From f1f748108a0b4dd69b16d7f9f7d9adb357b43325 Mon Sep 17 00:00:00 2001 From: Emma Harper Smith Date: Fri, 14 Nov 2025 15:34:24 -0800 Subject: [PATCH 01/20] MVP Rust in CPython --- .gitignore | 3 + Cargo.lock | 242 ++++++++++++++++++++++++++++ Cargo.toml | 6 + Makefile.pre.in | 16 +- Modules/Setup | 1 + Modules/Setup.stdlib.in | 5 + Modules/_base64/Cargo.toml | 12 ++ Modules/_base64/src/lib.rs | 95 +++++++++++ Modules/cpython-sys/Cargo.toml | 9 ++ Modules/cpython-sys/build.rs | 27 ++++ Modules/cpython-sys/src/lib.rs | 123 ++++++++++++++ Modules/cpython-sys/wrapper.h | 221 +++++++++++++++++++++++++ Modules/makesetup | 104 +++++++----- Tools/build/regen-rust-wrapper-h.py | 25 +++ configure | 136 ++++++++++++++++ configure.ac | 41 +++++ rust-toolchain.toml | 2 + 17 files changed, 1029 insertions(+), 39 deletions(-) create mode 100644 Cargo.lock create mode 100644 Cargo.toml create mode 100644 Modules/_base64/Cargo.toml create mode 100644 Modules/_base64/src/lib.rs create mode 100644 Modules/cpython-sys/Cargo.toml create mode 100644 Modules/cpython-sys/build.rs create mode 100644 Modules/cpython-sys/src/lib.rs create mode 100644 Modules/cpython-sys/wrapper.h create mode 100644 Tools/build/regen-rust-wrapper-h.py create mode 100644 rust-toolchain.toml diff --git a/.gitignore b/.gitignore index 2bf4925647ddcd..228455787e5fdf 100644 --- a/.gitignore +++ b/.gitignore @@ -178,3 +178,6 @@ CLAUDE.local.md #### main branch only stuff below this line, things to backport go above. #### # main branch only: ABI files are not checked/maintained. Doc/data/python*.abi + +# Rust build artifacts +/target/ \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 00000000000000..5acffd0fac3baa --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,242 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "_base64" +version = "0.1.0" +dependencies = [ + "base64", + "cpython-sys", +] + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bindgen" +version = "0.72.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn", +] + +[[package]] +name = "bitflags" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "cpython-sys" +version = "0.1.0" +dependencies = [ + "bindgen", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "libc" +version = "0.2.177" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" + +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link", +] + +[[package]] +name = "log" +version = "0.4.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "regex" +version = "1.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "syn" +version = "2.0.110" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 00000000000000..8b75182b7a596a --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,6 @@ +[workspace] +resolver = "3" +members = [ + "Modules/_base64", + "Modules/cpython-sys" +] diff --git a/Makefile.pre.in b/Makefile.pre.in index dd28ff5d2a3ed1..6d113d14f31630 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -58,6 +58,9 @@ DTRACE_HEADERS= @DTRACE_HEADERS@ DTRACE_OBJS= @DTRACE_OBJS@ DSYMUTIL= @DSYMUTIL@ DSYMUTIL_PATH= @DSYMUTIL_PATH@ +CARGO_HOME=@CARGO_HOME@ +CARGO_TARGET_DIR=@CARGO_TARGET_DIR@ +CARGO_PROFILE=@CARGO_PROFILE@ GNULD= @GNULD@ @@ -1649,6 +1652,10 @@ Makefile Modules/config.c: Makefile.pre \ @mv config.c Modules @echo "The Makefile was updated, you may need to re-run make." +.PHONY: regen-rust-wrapper-h +regen-rust-wrapper-h: $(PYTHON_HEADERS) + PYTHON_HEADERS="$(PYTHON_HEADERS)" $(PYTHON_FOR_REGEN) $(srcdir)/Tools/build/regen-rust-wrapper-h.py + .PHONY: regen-test-frozenmain regen-test-frozenmain: $(BUILDPYTHON) # Regenerate Programs/test_frozenmain.h @@ -3254,8 +3261,15 @@ profile-removal: rm -f profile-run-stamp rm -f profile-bolt-stamp +.PHONY: clean-rust +clean-rust: + @if test @CARGO_HOME@ != ''; then \ + echo Running cargo clean...; \ + $(CARGO_HOME)/bin/cargo clean; \ + fi + .PHONY: clean -clean: clean-retain-profile clean-bolt +clean: clean-retain-profile clean-bolt clean-rust @if test @DEF_MAKE_ALL_RULE@ = profile-opt -o @DEF_MAKE_ALL_RULE@ = bolt-opt; then \ rm -f profile-gen-stamp profile-clean-stamp; \ $(MAKE) profile-removal; \ diff --git a/Modules/Setup b/Modules/Setup index 8a54c0aaec60a0..86b43541f37ecc 100644 --- a/Modules/Setup +++ b/Modules/Setup @@ -182,6 +182,7 @@ PYTHONPATH=$(COREPYTHONPATH) #_codecs_tw cjkcodecs/_codecs_tw.c #_multibytecodec cjkcodecs/multibytecodec.c #unicodedata unicodedata.c +#_base64 _base64/Cargo.toml _base64/src/lib.rs lib_base64.a # Modules with some UNIX dependencies diff --git a/Modules/Setup.stdlib.in b/Modules/Setup.stdlib.in index 2c3013e3d0c144..635be3cbe8dbc0 100644 --- a/Modules/Setup.stdlib.in +++ b/Modules/Setup.stdlib.in @@ -117,6 +117,11 @@ @MODULE__MULTIBYTECODEC_TRUE@_multibytecodec cjkcodecs/multibytecodec.c @MODULE_UNICODEDATA_TRUE@unicodedata unicodedata.c +############################################################################ +# Rust modules +# +@MODULE__BASE64_TRUE@_base64 _base64/Cargo.toml _base64/src/lib.rs lib_base64.a + ############################################################################ # Modules with some UNIX dependencies # diff --git a/Modules/_base64/Cargo.toml b/Modules/_base64/Cargo.toml new file mode 100644 index 00000000000000..038ec4bb02eb02 --- /dev/null +++ b/Modules/_base64/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "_base64" +version = "0.1.0" +edition = "2024" + +[dependencies] +base64 = "0.22.1" +cpython-sys ={ path = "../cpython-sys" } + +[lib] +name = "_base64" +crate-type = ["staticlib"] \ No newline at end of file diff --git a/Modules/_base64/src/lib.rs b/Modules/_base64/src/lib.rs new file mode 100644 index 00000000000000..f9c122314a3a32 --- /dev/null +++ b/Modules/_base64/src/lib.rs @@ -0,0 +1,95 @@ +use std::cell::UnsafeCell; + +use std::ffi::CStr; +use std::ffi::CString; +use std::ffi::c_char; +use std::ffi::c_int; +use std::ffi::c_void; + +use cpython_sys::METH_FASTCALL; +use cpython_sys::Py_ssize_t; +use cpython_sys::PyBytes_AsString; +use cpython_sys::PyBytes_FromString; +use cpython_sys::PyMethodDef; +use cpython_sys::PyMethodDefFuncPointer; +use cpython_sys::PyModuleDef; +use cpython_sys::PyModuleDef_HEAD_INIT; +use cpython_sys::PyModuleDef_Init; +use cpython_sys::PyObject; + +use base64::prelude::*; + +#[unsafe(no_mangle)] +pub unsafe extern "C" fn standard_b64encode( + _module: *mut PyObject, + args: *mut *mut PyObject, + _nargs: Py_ssize_t, +) -> *mut PyObject { + let buff = unsafe { *args }; + let ptr = unsafe { PyBytes_AsString(buff) }; + if ptr.is_null() { + // Error handling omitted for now + unimplemented!("Error handling goes here...") + } + let cdata = unsafe { CStr::from_ptr(ptr) }; + let res = BASE64_STANDARD.encode(cdata.to_bytes()); + unsafe { PyBytes_FromString(CString::new(res).unwrap().as_ptr()) } +} + +#[unsafe(no_mangle)] +pub extern "C" fn _base64_clear(_obj: *mut PyObject) -> c_int { + //TODO + 0 +} + +#[unsafe(no_mangle)] +pub extern "C" fn _base64_free(_o: *mut c_void) { + //TODO +} + +pub struct ModuleDef { + ffi: UnsafeCell, +} + +impl ModuleDef { + fn init_multi_phase(&'static self) -> *mut PyObject { + unsafe { PyModuleDef_Init(self.ffi.get()) } + } +} + +unsafe impl Sync for ModuleDef {} + +pub static _BASE64_MODULE_METHODS: [PyMethodDef; 2] = { + [ + PyMethodDef { + ml_name: c"standard_b64encode".as_ptr() as *mut c_char, + ml_meth: PyMethodDefFuncPointer { + PyCFunctionFast: standard_b64encode, + }, + ml_flags: METH_FASTCALL, + ml_doc: c"Demo for the _base64 module".as_ptr() as *mut c_char, + }, + PyMethodDef::zeroed(), + ] +}; + +pub static _BASE64_MODULE: ModuleDef = { + ModuleDef { + ffi: UnsafeCell::new(PyModuleDef { + m_base: PyModuleDef_HEAD_INIT, + m_name: c"_base64".as_ptr() as *mut _, + m_doc: c"A test Rust module".as_ptr() as *mut _, + m_size: 0, + m_methods: &_BASE64_MODULE_METHODS as *const PyMethodDef as *mut _, + m_slots: std::ptr::null_mut(), + m_traverse: None, + m_clear: Some(_base64_clear), + m_free: Some(_base64_free), + }), + } +}; + +#[unsafe(no_mangle)] +pub extern "C" fn PyInit__base64() -> *mut PyObject { + _BASE64_MODULE.init_multi_phase() +} diff --git a/Modules/cpython-sys/Cargo.toml b/Modules/cpython-sys/Cargo.toml new file mode 100644 index 00000000000000..de0b88d5d5b723 --- /dev/null +++ b/Modules/cpython-sys/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "cpython-sys" +version = "0.1.0" +edition = "2024" + +[dependencies] + +[build-dependencies] +bindgen = "0.72.1" \ No newline at end of file diff --git a/Modules/cpython-sys/build.rs b/Modules/cpython-sys/build.rs new file mode 100644 index 00000000000000..b55f03c5b066b5 --- /dev/null +++ b/Modules/cpython-sys/build.rs @@ -0,0 +1,27 @@ +use std::env; +use std::path::{Path, PathBuf}; + +fn main() { + let curdir = std::env::current_dir().unwrap(); + let srcdir = curdir.parent().and_then(Path::parent).unwrap(); + let bindings = bindgen::Builder::default() + .header("wrapper.h") + .clang_arg(format!("-I{}", srcdir.as_os_str().to_str().unwrap())) + .clang_arg(format!("-I{}/Include", srcdir.as_os_str().to_str().unwrap())) + .allowlist_function("Py.*") + .allowlist_function("_Py.*") + .allowlist_type("Py.*") + .allowlist_type("_Py.*") + .allowlist_var("Py.*") + .allowlist_var("_Py.*") + .blocklist_type("^PyMethodDef$") + .parse_callbacks(Box::new(bindgen::CargoCallbacks::new())) + .generate() + .expect("Unable to generate bindings"); + + // Write the bindings to the $OUT_DIR/bindings.rs file. + let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); + bindings + .write_to_file(out_path.join("bindings.rs")) + .expect("Couldn't write bindings!"); +} \ No newline at end of file diff --git a/Modules/cpython-sys/src/lib.rs b/Modules/cpython-sys/src/lib.rs new file mode 100644 index 00000000000000..6c0b84d70ebc87 --- /dev/null +++ b/Modules/cpython-sys/src/lib.rs @@ -0,0 +1,123 @@ +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] +#![allow(unsafe_op_in_unsafe_fn)] +#![allow(unnecessary_transmutes)] + +use std::ffi::{c_char, c_int, c_void}; + +include!(concat!(env!("OUT_DIR"), "/bindings.rs")); +/* Flag passed to newmethodobject */ +/* #define METH_OLDARGS 0x0000 -- unsupported now */ +pub const METH_VARARGS: c_int = 0x0001; +pub const METH_KEYWORDS: c_int = 0x0002; +/* METH_NOARGS and METH_O must not be combined with the flags above. */ +pub const METH_NOARGS: c_int = 0x0004; +pub const METH_O: c_int = 0x0008; + +/* METH_CLASS and METH_STATIC are a little different; these control +the construction of methods for a class. These cannot be used for +functions in modules. */ +pub const METH_CLASS: c_int = 0x0010; +pub const METH_STATIC: c_int = 0x0020; + +/* METH_COEXIST allows a method to be entered even though a slot has +already filled the entry. When defined, the flag allows a separate +method, "__contains__" for example, to coexist with a defined +slot like sq_contains. */ + +pub const METH_COEXIST: c_int = 0x0040; + +pub const METH_FASTCALL: c_int = 0x0080; + +/* This bit is preserved for Stackless Python +pub const METH_STACKLESS: c_int = 0x0100; +pub const METH_STACKLESS: c_int = 0x0000; +*/ + +/* METH_METHOD means the function stores an + * additional reference to the class that defines it; + * both self and class are passed to it. + * It uses PyCMethodObject instead of PyCFunctionObject. + * May not be combined with METH_NOARGS, METH_O, METH_CLASS or METH_STATIC. + */ + +pub const METH_METHOD: c_int = 0x0200; + +#[cfg(target_pointer_width = "64")] +pub const _Py_STATIC_FLAG_BITS: Py_ssize_t = + (_Py_STATICALLY_ALLOCATED_FLAG | _Py_IMMORTAL_FLAGS) as Py_ssize_t; +#[cfg(target_pointer_width = "64")] +pub const _Py_STATIC_IMMORTAL_INITIAL_REFCNT: Py_ssize_t = + (_Py_IMMORTAL_INITIAL_REFCNT as Py_ssize_t) | (_Py_STATIC_FLAG_BITS << 48); +#[cfg(not(target_pointer_width = "64"))] +pub const _Py_STATIC_IMMORTAL_INITIAL_REFCNT: Py_ssize_t = 7u32 << 28; + +#[repr(C)] +pub union PyMethodDefFuncPointer { + pub PyCFunction: unsafe extern "C" fn(slf: *mut PyObject, args: *mut PyObject) -> *mut PyObject, + pub PyCFunctionFast: unsafe extern "C" fn( + slf: *mut PyObject, + args: *mut *mut PyObject, + nargs: Py_ssize_t, + ) -> *mut PyObject, + pub PyCFunctionWithKeywords: unsafe extern "C" fn( + slf: *mut PyObject, + args: *mut PyObject, + kwargs: *mut PyObject, + ) -> *mut PyObject, + pub PyCFunctionFastWithKeywords: unsafe extern "C" fn( + slf: *mut PyObject, + args: *mut *mut PyObject, + nargs: Py_ssize_t, + kwargs: *mut PyObject, + ) -> *mut PyObject, + pub PyCMethod: unsafe extern "C" fn( + slf: *mut PyObject, + typ: *mut PyTypeObject, + args: *mut *mut PyObject, + nargs: Py_ssize_t, + kwargs: *mut PyObject, + ) -> *mut PyObject, + pub Void: *mut c_void, +} + +#[repr(C)] +pub struct PyMethodDef { + pub ml_name: *mut c_char, + pub ml_meth: PyMethodDefFuncPointer, + pub ml_flags: c_int, + pub ml_doc: *mut c_char, +} + +impl PyMethodDef { + pub const fn zeroed() -> Self { + Self { + ml_name: std::ptr::null_mut(), + ml_meth: PyMethodDefFuncPointer { + Void: std::ptr::null_mut(), + }, + ml_flags: 0, + ml_doc: std::ptr::null_mut(), + } + } +} + +// TODO: this is pretty unsafe, we should probably wrap this in a nicer +// abstraction +unsafe impl Sync for PyMethodDef {} +unsafe impl Send for PyMethodDef {} + +pub const PyObject_HEAD_INIT: PyObject = PyObject { + __bindgen_anon_1: _object__bindgen_ty_1 { + ob_refcnt_full: _Py_STATIC_IMMORTAL_INITIAL_REFCNT as i64, + }, + ob_type: std::ptr::null_mut(), +}; + +pub const PyModuleDef_HEAD_INIT: PyModuleDef_Base = PyModuleDef_Base { + ob_base: PyObject_HEAD_INIT, + m_init: None, + m_index: 0, + m_copy: std::ptr::null_mut(), +}; diff --git a/Modules/cpython-sys/wrapper.h b/Modules/cpython-sys/wrapper.h new file mode 100644 index 00000000000000..1b0176a905b962 --- /dev/null +++ b/Modules/cpython-sys/wrapper.h @@ -0,0 +1,221 @@ +#define Py_BUILD_CORE +#include "Modules/expat/expat.h" +#include "Python.h" +#include "abstract.h" +#include "audit.h" +#include "bltinmodule.h" +#include "boolobject.h" +#include "bytearrayobject.h" +#include "bytesobject.h" +#include "ceval.h" +#include "codecs.h" +#include "compile.h" +#include "complexobject.h" +#include "critical_section.h" +#include "descrobject.h" +#include "dictobject.h" +#include "dynamic_annotations.h" +#include "enumobject.h" +#include "errcode.h" +#include "exports.h" +#include "fileobject.h" +#include "fileutils.h" +#include "floatobject.h" +#include "frameobject.h" +#include "genericaliasobject.h" +#include "import.h" +#include "intrcheck.h" +#include "iterobject.h" +#include "listobject.h" +#include "longobject.h" +#include "marshal.h" +#include "memoryobject.h" +#include "methodobject.h" +#include "modsupport.h" +#include "moduleobject.h" +#include "object.h" +#include "objimpl.h" +#include "opcode.h" +#include "opcode_ids.h" +#include "osdefs.h" +#include "osmodule.h" +#include "patchlevel.h" +#include "pyatomic.h" +#include "pybuffer.h" +#include "pycapsule.h" +#include "pydtrace.h" +#include "pyerrors.h" +#include "pyexpat.h" +#include "pyframe.h" +#include "pyhash.h" +#include "pylifecycle.h" +#include "pymacconfig.h" +#include "pymacro.h" +#include "pymath.h" +#include "pymem.h" +#include "pyport.h" +#include "pystate.h" +#include "pystats.h" +#include "pystrcmp.h" +#include "pystrtod.h" +#include "pythonrun.h" +#include "pythread.h" +#include "pytypedefs.h" +#include "rangeobject.h" +#include "refcount.h" +#include "setobject.h" +#include "sliceobject.h" +#include "structmember.h" +#include "structseq.h" +#include "sysmodule.h" +#include "traceback.h" +#include "tupleobject.h" +#include "typeslots.h" +#include "unicodeobject.h" +#include "warnings.h" +#include "weakrefobject.h" +#include "pyconfig.h" +#include "internal/pycore_parser.h" +#include "internal/pycore_mimalloc.h" +#include "internal/mimalloc/mimalloc.h" +#include "internal/mimalloc/mimalloc/atomic.h" +#include "internal/mimalloc/mimalloc/internal.h" +#include "internal/mimalloc/mimalloc/prim.h" +#include "internal/mimalloc/mimalloc/track.h" +#include "internal/mimalloc/mimalloc/types.h" +#include "internal/pycore_abstract.h" +#include "internal/pycore_asdl.h" +#include "internal/pycore_ast.h" +#include "internal/pycore_ast_state.h" +#include "internal/pycore_atexit.h" +#include "internal/pycore_audit.h" +#include "internal/pycore_backoff.h" +#include "internal/pycore_bitutils.h" +#include "internal/pycore_blocks_output_buffer.h" +#include "internal/pycore_brc.h" +#include "internal/pycore_bytes_methods.h" +#include "internal/pycore_bytesobject.h" +#include "internal/pycore_call.h" +#include "internal/pycore_capsule.h" +#include "internal/pycore_cell.h" +#include "internal/pycore_ceval.h" +#include "internal/pycore_ceval_state.h" +#include "internal/pycore_code.h" +#include "internal/pycore_codecs.h" +#include "internal/pycore_compile.h" +#include "internal/pycore_complexobject.h" +#include "internal/pycore_condvar.h" +#include "internal/pycore_context.h" +#include "internal/pycore_critical_section.h" +#include "internal/pycore_crossinterp.h" +#include "internal/pycore_debug_offsets.h" +#include "internal/pycore_descrobject.h" +#include "internal/pycore_dict.h" +#include "internal/pycore_dict_state.h" +#include "internal/pycore_dtoa.h" +#include "internal/pycore_exceptions.h" +#include "internal/pycore_faulthandler.h" +#include "internal/pycore_fileutils.h" +#include "internal/pycore_floatobject.h" +#include "internal/pycore_flowgraph.h" +#include "internal/pycore_format.h" +#include "internal/pycore_frame.h" +#include "internal/pycore_freelist.h" +#include "internal/pycore_freelist_state.h" +#include "internal/pycore_function.h" +#include "internal/pycore_gc.h" +#include "internal/pycore_genobject.h" +#include "internal/pycore_getopt.h" +#include "internal/pycore_gil.h" +#include "internal/pycore_global_objects.h" +#include "internal/pycore_global_objects_fini_generated.h" +#include "internal/pycore_global_strings.h" +#include "internal/pycore_hamt.h" +#include "internal/pycore_hashtable.h" +#include "internal/pycore_import.h" +#include "internal/pycore_importdl.h" +#include "internal/pycore_index_pool.h" +#include "internal/pycore_initconfig.h" +#include "internal/pycore_instruments.h" +#include "internal/pycore_instruction_sequence.h" +#include "internal/pycore_interp.h" +#include "internal/pycore_interp_structs.h" +#include "internal/pycore_interpframe.h" +#include "internal/pycore_interpframe_structs.h" +#include "internal/pycore_interpolation.h" +#include "internal/pycore_intrinsics.h" +#include "internal/pycore_jit.h" +#include "internal/pycore_list.h" +#include "internal/pycore_llist.h" +#include "internal/pycore_lock.h" +#include "internal/pycore_long.h" +#include "internal/pycore_memoryobject.h" +#include "internal/pycore_mimalloc.h" +#include "internal/pycore_modsupport.h" +#include "internal/pycore_moduleobject.h" +#include "internal/pycore_namespace.h" +#include "internal/pycore_object.h" +#include "internal/pycore_object_alloc.h" +#include "internal/pycore_object_deferred.h" +#include "internal/pycore_object_stack.h" +#include "internal/pycore_object_state.h" +#include "internal/pycore_obmalloc.h" +#include "internal/pycore_obmalloc_init.h" +#include "internal/pycore_opcode_metadata.h" +#include "internal/pycore_opcode_utils.h" +#include "internal/pycore_optimizer.h" +#include "internal/pycore_parking_lot.h" +#include "internal/pycore_parser.h" +#include "internal/pycore_pathconfig.h" +#include "internal/pycore_pyarena.h" +#include "internal/pycore_pyatomic_ft_wrappers.h" +#include "internal/pycore_pybuffer.h" +#include "internal/pycore_pyerrors.h" +#include "internal/pycore_pyhash.h" +#include "internal/pycore_pylifecycle.h" +#include "internal/pycore_pymath.h" +#include "internal/pycore_pymem.h" +#include "internal/pycore_pymem_init.h" +#include "internal/pycore_pystate.h" +#include "internal/pycore_pystats.h" +#include "internal/pycore_pythonrun.h" +#include "internal/pycore_pythread.h" +#include "internal/pycore_qsbr.h" +#include "internal/pycore_range.h" +#include "internal/pycore_runtime.h" +#include "internal/pycore_runtime_init.h" +#include "internal/pycore_runtime_init_generated.h" +#include "internal/pycore_runtime_structs.h" +#include "internal/pycore_semaphore.h" +#include "internal/pycore_setobject.h" +#include "internal/pycore_signal.h" +#include "internal/pycore_sliceobject.h" +#include "internal/pycore_stats.h" +#include "internal/pycore_strhex.h" +#include "internal/pycore_stackref.h" +#include "internal/pycore_structs.h" +#include "internal/pycore_structseq.h" +#include "internal/pycore_symtable.h" +#include "internal/pycore_sysmodule.h" +#include "internal/pycore_template.h" +#include "internal/pycore_time.h" +#include "internal/pycore_token.h" +#include "internal/pycore_traceback.h" +#include "internal/pycore_tracemalloc.h" +#include "internal/pycore_tstate.h" +#include "internal/pycore_tuple.h" +#include "internal/pycore_typedefs.h" +#include "internal/pycore_typeobject.h" +#include "internal/pycore_typevarobject.h" +#include "internal/pycore_ucnhash.h" +#include "internal/pycore_unicodectype.h" +#include "internal/pycore_unicodeobject.h" +#include "internal/pycore_unicodeobject_generated.h" +#include "internal/pycore_unionobject.h" +#include "internal/pycore_uniqueid.h" +#include "internal/pycore_uop.h" +#include "internal/pycore_uop_ids.h" +#include "internal/pycore_uop_metadata.h" +#include "internal/pycore_warnings.h" +#include "internal/pycore_weakref.h" +#include "Python/stdlib_module_names.h" diff --git a/Modules/makesetup b/Modules/makesetup index 104c824b846540..b701a61a548bae 100755 --- a/Modules/makesetup +++ b/Modules/makesetup @@ -144,6 +144,8 @@ sed -e 's/[ ]*#.*//' -e '/^[ ]*$/d' | srcs= cpps= libs= + rust= + manifest= mods= mods_upper= skip= @@ -176,6 +178,8 @@ sed -e 's/[ ]*#.*//' -e '/^[ ]*$/d' | *.c++) srcs="$srcs $arg";; *.cxx) srcs="$srcs $arg";; *.cpp) srcs="$srcs $arg";; + *.rs) srcs="$srcs $arg"; rust="true";; + *.toml) manifest="$arg";; \$\(*_CFLAGS\)) cpps="$cpps $arg";; \$\(*_INCLUDES\)) cpps="$cpps $arg";; \$\(*_LIBS\)) libs="$libs $arg";; @@ -226,44 +230,68 @@ sed -e 's/[ ]*#.*//' -e '/^[ ]*$/d' | yes) continue;; esac objs='' - for src in $srcs - do - case $src in - *.c) obj=`basename $src .c`.o; cc='$(CC)';; - *.cc) obj=`basename $src .cc`.o; cc='$(CXX)';; - *.c++) obj=`basename $src .c++`.o; cc='$(CXX)';; - *.C) obj=`basename $src .C`.o; cc='$(CXX)';; - *.cxx) obj=`basename $src .cxx`.o; cc='$(CXX)';; - *.cpp) obj=`basename $src .cpp`.o; cc='$(CXX)';; - *.m) obj=`basename $src .m`.o; cc='$(CC)';; # Obj-C - *) continue;; - esac - case $src in - */*) obj="$srcdir/`dirname $src`/$obj";; - *) obj="$srcdir/$obj";; - esac - objs="$objs $obj" - case $src in - glmodule.c) ;; - /*) ;; - \$*) ;; - *) src='$(srcdir)/'"$srcdir/$src";; - esac - # custom flags first, PY_STDMODULE_CFLAGS may contain -I with system libmpdec - case $doconfig in - no) - cc="$cc $cpps \$(PY_STDMODULE_CFLAGS) \$(CCSHARED)" - rule="$obj: $src \$(MODULE_${mods_upper}_DEPS) \$(MODULE_DEPS_SHARED) \$(PYTHON_HEADERS)" - rule="$rule; $cc -c $src -o $obj" - ;; - *) - cc="$cc $cpps \$(PY_BUILTIN_MODULE_CFLAGS)" - rule="$obj: $src \$(MODULE_${mods_upper}_DEPS) \$(MODULE_DEPS_STATIC) \$(PYTHON_HEADERS)" - rule="$rule; $cc -c $src -o $obj" - ;; - esac + custom_ldflags='' + if test "x$rust" = "x"; then + for src in $srcs + do + case $src in + *.c) obj=`basename $src .c`.o; cc='$(CC)';; + *.cc) obj=`basename $src .cc`.o; cc='$(CXX)';; + *.c++) obj=`basename $src .c++`.o; cc='$(CXX)';; + *.C) obj=`basename $src .C`.o; cc='$(CXX)';; + *.cxx) obj=`basename $src .cxx`.o; cc='$(CXX)';; + *.cpp) obj=`basename $src .cpp`.o; cc='$(CXX)';; + *.m) obj=`basename $src .m`.o; cc='$(CC)';; # Obj-C + *) continue;; + esac + case $src in + */*) obj="$srcdir/`dirname $src`/$obj";; + *) obj="$srcdir/$obj";; + esac + objs="$objs $obj" + case $src in + glmodule.c) ;; + /*) ;; + \$*) ;; + *) src='$(srcdir)/'"$srcdir/$src";; + esac + # custom flags first, PY_STDMODULE_CFLAGS may contain -I with system libmpdec + case $doconfig in + no) + cc="$cc $cpps \$(PY_STDMODULE_CFLAGS) \$(CCSHARED)" + rule="$obj: $src \$(MODULE_${mods_upper}_DEPS) \$(MODULE_DEPS_SHARED) \$(PYTHON_HEADERS)" + rule="$rule; $cc -c $src -o $obj" + ;; + *) + cc="$cc $cpps \$(PY_BUILTIN_MODULE_CFLAGS)" + rule="$obj: $src \$(MODULE_${mods_upper}_DEPS) \$(MODULE_DEPS_STATIC) \$(PYTHON_HEADERS)" + rule="$rule; $cc -c $src -o $obj" + ;; + esac + echo "$rule" >>$rulesf + done + else + prefixed_srcs= + for src in $srcs + do + prefixed_srcs="$prefixed_srcs $srcdir/$src" + done + objs= + # there's actually only one obj, so just set it to the lib + for lib in $libs + do + objs="target/\$(CARGO_TARGET_DIR)/$lib" + done + libs= + # depends on the headers through cpython-sys + rule="$objs: \$(srcdir)/Cargo.toml \$(srcdir)/Cargo.lock \$(srcdir)/$srcdir/$manifest Modules/cpython-sys/wrapper.h $prefixed_srcs \$(PYTHON_HEADERS)" + rule="$rule; cargo build --lib --locked --package ${mods} --profile \$(CARGO_PROFILE)" echo "$rule" >>$rulesf - done + for mod in $mods + do + custom_ldflags="-Wl,--defsym=PyInit_$mod=PyInit_$mod" + done + fi case $doconfig in yes) OBJS="$OBJS $objs";; esac @@ -277,7 +305,7 @@ sed -e 's/[ ]*#.*//' -e '/^[ ]*$/d' | ;; esac rule="$file: $objs \$(MODULE_${mods_upper}_LDEPS)" - rule="$rule; \$(BLDSHARED) $objs $libs \$(LIBPYTHON) -o $file" + rule="$rule; \$(BLDSHARED) $custom_ldflags $objs $libs \$(LIBPYTHON) -o $file" echo "$rule" >>$rulesf done done diff --git a/Tools/build/regen-rust-wrapper-h.py b/Tools/build/regen-rust-wrapper-h.py new file mode 100644 index 00000000000000..763bf1133d4ecb --- /dev/null +++ b/Tools/build/regen-rust-wrapper-h.py @@ -0,0 +1,25 @@ +import os +import re +from pathlib import Path + +ROOT = Path(__file__).resolve().parents[2] +INCLUDE = ROOT / "Include" +WRAPPER_H = ROOT / "Modules" / "cpython-sys" / "wrapper.h" + +def normalize_path(header: str) -> str: + return re.sub(r'(:?\.\/)(:?Include\/)?', '', header) + +def main(output: str = WRAPPER_H) -> None: + headers = os.environ.get("PYTHON_HEADERS") + if headers is None: + raise RuntimeError("Unable to read $PYTHON_HEADERS!") + with open(output, "w") as f: + f.write("#define Py_BUILD_CORE\n") + f.write("#include \"Modules/expat/expat.h\"\n") + for header in headers.split(): + normalized_path = normalize_path(header) + f.write(f"#include \"{normalized_path}\"\n") + +if __name__ == "__main__": + import sys + main(*sys.argv[1:]) diff --git a/configure b/configure index eeb24c1d844e86..0a114e20c00c30 100755 --- a/configure +++ b/configure @@ -687,6 +687,8 @@ MODULE_BINASCII_FALSE MODULE_BINASCII_TRUE MODULE_ZLIB_FALSE MODULE_ZLIB_TRUE +MODULE__BASE64_FALSE +MODULE__BASE64_TRUE MODULE__UUID_FALSE MODULE__UUID_TRUE MODULE__TKINTER_FALSE @@ -885,6 +887,10 @@ TCLTK_LIBS TCLTK_CFLAGS LIBSQLITE3_LIBS LIBSQLITE3_CFLAGS +CARGO_PROFILE +CARGO_TARGET_DIR +CARGO_HOME +HAS_CARGO LIBMPDEC_INTERNAL LIBMPDEC_LIBS LIBMPDEC_CFLAGS @@ -1116,6 +1122,7 @@ with_libs with_system_expat with_system_libmpdec with_decimal_contextvar +with_rust_base64 enable_loadable_sqlite_extensions with_dbmliborder enable_ipv6 @@ -1917,6 +1924,8 @@ Optional Packages: --with-decimal-contextvar build _decimal module using a coroutine-local rather than a thread-local context (default is yes) + --with-rust-base64 build _base64 module using the SIMD accelerated Rust + implementation --with-dbmliborder=db1:db2:... override order to check db backends for dbm; a valid value is a colon separated string with the backend @@ -16034,6 +16043,88 @@ fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for --with-rust-base64" >&5 +printf %s "checking for --with-rust-base64... " >&6; } + +# Check whether --with-rust_base64 was given. +if test ${with_rust_base64+y} +then : + withval=$with_rust_base64; rust_base64="yes" +else case e in #( + e) rust_base64="no" ;; +esac +fi + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $rust_base64" >&5 +printf "%s\n" "$rust_base64" >&6; } + +if test "x$rust_base64" = xyes +then : + + if test "$CARGO_HOME+set" != "set"; then + CARGO_HOME="$HOME/.cargo" + fi + # Extract the first word of "cargo", so it can be a program name with args. +set dummy cargo; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_HAS_CARGO+y} +then : + printf %s "(cached) " >&6 +else case e in #( + e) if test -n "$HAS_CARGO"; then + ac_cv_prog_HAS_CARGO="$HAS_CARGO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in not-found +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_HAS_CARGO=""$CARGO_HOME"" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_prog_HAS_CARGO" && ac_cv_prog_HAS_CARGO="found" +fi ;; +esac +fi +HAS_CARGO=$ac_cv_prog_HAS_CARGO +if test -n "$HAS_CARGO"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $HAS_CARGO" >&5 +printf "%s\n" "$HAS_CARGO" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + + if test $HAS_CARGO = "not-found"; then + as_fn_error $? "Could not find cargo. Please re-run configure with \$CARGO_HOME set" "$LINENO" 5 + fi + if test "$Py_OPT" = 'true'; then + CARGO_TARGET_DIR='release' + CARGO_PROFILE='release' + else + CARGO_TARGET_DIR='debug' + CARGO_PROFILE='dev' + fi + + + + + +fi + @@ -33479,6 +33570,47 @@ printf "%s\n" "$py_cv_module__uuid" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for stdlib extension module _base64" >&5 +printf %s "checking for stdlib extension module _base64... " >&6; } + if test "$py_cv_module__base64" != "n/a" +then : + + if true +then : + if test "$rust_base64" = "yes" +then : + py_cv_module__base64=yes +else case e in #( + e) py_cv_module__base64=missing ;; +esac +fi +else case e in #( + e) py_cv_module__base64=disabled ;; +esac +fi + +fi + as_fn_append MODULE_BLOCK "MODULE__BASE64_STATE=$py_cv_module__base64$as_nl" + if test "x$py_cv_module__base64" = xyes +then : + + + + +fi + if test "$py_cv_module__base64" = yes; then + MODULE__BASE64_TRUE= + MODULE__BASE64_FALSE='#' +else + MODULE__BASE64_TRUE='#' + MODULE__BASE64_FALSE= +fi + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $py_cv_module__base64" >&5 +printf "%s\n" "$py_cv_module__base64" >&6; } + + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for stdlib extension module zlib" >&5 printf %s "checking for stdlib extension module zlib... " >&6; } if test "$py_cv_module_zlib" != "n/a" @@ -34715,6 +34847,10 @@ if test -z "${MODULE__UUID_TRUE}" && test -z "${MODULE__UUID_FALSE}"; then as_fn_error $? "conditional \"MODULE__UUID\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi +if test -z "${MODULE__BASE64_TRUE}" && test -z "${MODULE__BASE64_FALSE}"; then + as_fn_error $? "conditional \"MODULE__BASE64\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi if test -z "${MODULE_ZLIB_TRUE}" && test -z "${MODULE_ZLIB_FALSE}"; then as_fn_error $? "conditional \"MODULE_ZLIB\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 diff --git a/configure.ac b/configure.ac index 92adc44da0d6fe..9b1a035f0ccc2f 100644 --- a/configure.ac +++ b/configure.ac @@ -4307,6 +4307,43 @@ fi AC_SUBST([LIBMPDEC_CFLAGS]) AC_SUBST([LIBMPDEC_INTERNAL]) +dnl Try to detect cargo in the environment. Cargo and rustup +dnl install into CARGO_HOME and RUSTUP_HOME, so check for those initially +AC_MSG_CHECKING([for --with-rust-base64]) +AC_ARG_WITH( + [rust_base64], + [AS_HELP_STRING( + [--with-rust-base64], + [build _base64 module using the SIMD accelerated Rust implementation] + )], + [rust_base64="yes"], + [rust_base64="no"]) +AC_MSG_RESULT([$rust_base64]) + +AS_VAR_IF( + [rust_base64], [yes], + [ + if test "$CARGO_HOME+set" != "set"; then + dnl try to guess the default UNIX value of ~/.cargo + CARGO_HOME="$HOME/.cargo" + fi + AC_CHECK_PROG(HAS_CARGO, [cargo], ["$CARGO_HOME"], [found], [not-found]) + if test $HAS_CARGO = "not-found"; then + AC_MSG_ERROR([Could not find cargo. Please re-run configure with \$CARGO_HOME set]) + fi + if test "$Py_OPT" = 'true'; then + CARGO_TARGET_DIR='release' + CARGO_PROFILE='release' + else + CARGO_TARGET_DIR='debug' + CARGO_PROFILE='dev' + fi + AC_SUBST([CARGO_HOME]) + AC_SUBST([CARGO_TARGET_DIR]) + AC_SUBST([CARGO_PROFILE]) + ] +) + dnl detect sqlite3 from Emscripten emport PY_CHECK_EMSCRIPTEN_PORT([LIBSQLITE3], [-sUSE_SQLITE3]) @@ -8156,6 +8193,10 @@ PY_STDLIB_MOD([_uuid], [], [test "$have_uuid" = "yes"], [$LIBUUID_CFLAGS], [$LIBUUID_LIBS]) +PY_STDLIB_MOD([_base64], + [], [test "$rust_base64" = "yes"], + [], []) + dnl compression libs PY_STDLIB_MOD([zlib], [], [test "$have_zlib" = yes], [$ZLIB_CFLAGS], [$ZLIB_LIBS]) diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 00000000000000..da064b583d29e7 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +channel = "1.91.1" \ No newline at end of file From 3234e735824581b262c2d4620834149135d9e9bc Mon Sep 17 00:00:00 2001 From: Emma Harper Smith Date: Fri, 14 Nov 2025 15:34:54 -0800 Subject: [PATCH 02/20] Add vendored sources --- vendor/aho-corasick/.cargo-checksum.json | 1 + vendor/aho-corasick/.cargo_vcs_info.json | 6 + vendor/aho-corasick/.github/FUNDING.yml | 1 + vendor/aho-corasick/.github/workflows/ci.yml | 148 + vendor/aho-corasick/.vim/coc-settings.json | 12 + vendor/aho-corasick/COPYING | 3 + vendor/aho-corasick/Cargo.lock | 39 + vendor/aho-corasick/Cargo.toml | 80 + vendor/aho-corasick/DESIGN.md | 481 + vendor/aho-corasick/LICENSE-MIT | 21 + vendor/aho-corasick/README.md | 174 + vendor/aho-corasick/UNLICENSE | 24 + vendor/aho-corasick/rustfmt.toml | 2 + vendor/aho-corasick/src/ahocorasick.rs | 2789 ++++ vendor/aho-corasick/src/automaton.rs | 1608 ++ vendor/aho-corasick/src/dfa.rs | 835 ++ vendor/aho-corasick/src/lib.rs | 326 + vendor/aho-corasick/src/macros.rs | 18 + vendor/aho-corasick/src/nfa/contiguous.rs | 1141 ++ vendor/aho-corasick/src/nfa/mod.rs | 40 + vendor/aho-corasick/src/nfa/noncontiguous.rs | 1762 +++ vendor/aho-corasick/src/packed/api.rs | 687 + vendor/aho-corasick/src/packed/ext.rs | 39 + vendor/aho-corasick/src/packed/mod.rs | 120 + vendor/aho-corasick/src/packed/pattern.rs | 480 + vendor/aho-corasick/src/packed/rabinkarp.rs | 168 + .../aho-corasick/src/packed/teddy/README.md | 386 + .../aho-corasick/src/packed/teddy/builder.rs | 792 + .../aho-corasick/src/packed/teddy/generic.rs | 1382 ++ vendor/aho-corasick/src/packed/teddy/mod.rs | 9 + vendor/aho-corasick/src/packed/tests.rs | 583 + vendor/aho-corasick/src/packed/vector.rs | 1757 +++ vendor/aho-corasick/src/tests.rs | 1664 +++ vendor/aho-corasick/src/transducer.rs | 270 + vendor/aho-corasick/src/util/alphabet.rs | 409 + vendor/aho-corasick/src/util/buffer.rs | 124 + .../aho-corasick/src/util/byte_frequencies.rs | 258 + vendor/aho-corasick/src/util/debug.rs | 26 + vendor/aho-corasick/src/util/error.rs | 259 + vendor/aho-corasick/src/util/int.rs | 278 + vendor/aho-corasick/src/util/mod.rs | 12 + vendor/aho-corasick/src/util/prefilter.rs | 924 ++ vendor/aho-corasick/src/util/primitives.rs | 759 + vendor/aho-corasick/src/util/remapper.rs | 214 + vendor/aho-corasick/src/util/search.rs | 1148 ++ vendor/aho-corasick/src/util/special.rs | 42 + vendor/base64/.cargo-checksum.json | 1 + vendor/base64/.cargo_vcs_info.json | 6 + vendor/base64/.circleci/config.yml | 135 + .../ISSUE_TEMPLATE/general-purpose-issue.md | 21 + vendor/base64/Cargo.lock | 1515 ++ vendor/base64/Cargo.toml | 85 + vendor/base64/LICENSE-APACHE | 201 + vendor/base64/LICENSE-MIT | 21 + vendor/base64/README.md | 154 + vendor/base64/RELEASE-NOTES.md | 271 + vendor/base64/benches/benchmarks.rs | 238 + vendor/base64/clippy.toml | 1 + vendor/base64/examples/base64.rs | 81 + vendor/base64/icon_CLion.svg | 34 + vendor/base64/src/alphabet.rs | 285 + vendor/base64/src/chunked_encoder.rs | 172 + vendor/base64/src/decode.rs | 386 + vendor/base64/src/display.rs | 88 + vendor/base64/src/encode.rs | 492 + .../src/engine/general_purpose/decode.rs | 357 + .../engine/general_purpose/decode_suffix.rs | 162 + .../base64/src/engine/general_purpose/mod.rs | 352 + vendor/base64/src/engine/mod.rs | 478 + vendor/base64/src/engine/naive.rs | 195 + vendor/base64/src/engine/tests.rs | 1579 ++ vendor/base64/src/lib.rs | 277 + vendor/base64/src/prelude.rs | 20 + vendor/base64/src/read/decoder.rs | 335 + vendor/base64/src/read/decoder_tests.rs | 487 + vendor/base64/src/read/mod.rs | 6 + vendor/base64/src/tests.rs | 117 + vendor/base64/src/write/encoder.rs | 407 + .../base64/src/write/encoder_string_writer.rs | 207 + vendor/base64/src/write/encoder_tests.rs | 554 + vendor/base64/src/write/mod.rs | 11 + vendor/base64/tests/encode.rs | 77 + vendor/base64/tests/tests.rs | 161 + vendor/bindgen/.cargo-checksum.json | 1 + vendor/bindgen/.cargo_vcs_info.json | 6 + vendor/bindgen/Cargo.lock | 485 + vendor/bindgen/Cargo.toml | 189 + vendor/bindgen/LICENSE | 29 + vendor/bindgen/README.md | 89 + vendor/bindgen/build.rs | 29 + vendor/bindgen/callbacks.rs | 317 + vendor/bindgen/clang.rs | 2448 ++++ vendor/bindgen/codegen/bitfield_unit.rs | 112 + .../codegen/bitfield_unit_raw_ref_macros.rs | 191 + vendor/bindgen/codegen/bitfield_unit_tests.rs | 260 + vendor/bindgen/codegen/dyngen.rs | 258 + vendor/bindgen/codegen/error.rs | 52 + vendor/bindgen/codegen/helpers.rs | 395 + vendor/bindgen/codegen/impl_debug.rs | 243 + vendor/bindgen/codegen/impl_partialeq.rs | 142 + vendor/bindgen/codegen/mod.rs | 5991 ++++++++ .../postprocessing/merge_extern_blocks.rs | 72 + vendor/bindgen/codegen/postprocessing/mod.rs | 57 + .../postprocessing/sort_semantically.rs | 46 + vendor/bindgen/codegen/serialize.rs | 443 + vendor/bindgen/codegen/struct_layout.rs | 458 + vendor/bindgen/deps.rs | 61 + vendor/bindgen/diagnostics.rs | 146 + vendor/bindgen/extra_assertions.rs | 17 + vendor/bindgen/features.rs | 570 + vendor/bindgen/ir/analysis/derive.rs | 726 + vendor/bindgen/ir/analysis/has_destructor.rs | 175 + vendor/bindgen/ir/analysis/has_float.rs | 248 + .../ir/analysis/has_type_param_in_array.rs | 242 + vendor/bindgen/ir/analysis/has_vtable.rs | 235 + vendor/bindgen/ir/analysis/mod.rs | 395 + vendor/bindgen/ir/analysis/sizedness.rs | 353 + vendor/bindgen/ir/analysis/template_params.rs | 601 + vendor/bindgen/ir/annotations.rs | 259 + vendor/bindgen/ir/comment.rs | 100 + vendor/bindgen/ir/comp.rs | 1921 +++ vendor/bindgen/ir/context.rs | 3107 ++++ vendor/bindgen/ir/derive.rs | 130 + vendor/bindgen/ir/dot.rs | 85 + vendor/bindgen/ir/enum_ty.rs | 321 + vendor/bindgen/ir/function.rs | 838 ++ vendor/bindgen/ir/int.rs | 128 + vendor/bindgen/ir/item.rs | 1994 +++ vendor/bindgen/ir/item_kind.rs | 135 + vendor/bindgen/ir/layout.rs | 126 + vendor/bindgen/ir/mod.rs | 25 + vendor/bindgen/ir/module.rs | 96 + vendor/bindgen/ir/objc.rs | 343 + vendor/bindgen/ir/template.rs | 335 + vendor/bindgen/ir/traversal.rs | 478 + vendor/bindgen/ir/ty.rs | 1256 ++ vendor/bindgen/ir/var.rs | 523 + vendor/bindgen/lib.rs | 1422 ++ vendor/bindgen/log_stubs.rs | 38 + vendor/bindgen/options/as_args.rs | 52 + vendor/bindgen/options/cli.rs | 1151 ++ vendor/bindgen/options/helpers.rs | 43 + vendor/bindgen/options/mod.rs | 2286 +++ vendor/bindgen/parse.rs | 41 + vendor/bindgen/regex_set.rs | 199 + vendor/bindgen/time.rs | 52 + vendor/bitflags/.cargo-checksum.json | 1 + vendor/bitflags/.cargo_vcs_info.json | 6 + vendor/bitflags/CHANGELOG.md | 636 + vendor/bitflags/CODE_OF_CONDUCT.md | 73 + vendor/bitflags/CONTRIBUTING.md | 9 + vendor/bitflags/Cargo.lock | 325 + vendor/bitflags/Cargo.toml | 120 + vendor/bitflags/LICENSE-APACHE | 201 + vendor/bitflags/LICENSE-MIT | 25 + vendor/bitflags/README.md | 88 + vendor/bitflags/SECURITY.md | 13 + vendor/bitflags/benches/parse.rs | 96 + vendor/bitflags/examples/custom_bits_type.rs | 97 + vendor/bitflags/examples/custom_derive.rs | 23 + vendor/bitflags/examples/fmt.rs | 49 + vendor/bitflags/examples/macro_free.rs | 61 + vendor/bitflags/examples/serde.rs | 39 + vendor/bitflags/spec.md | 556 + vendor/bitflags/src/example_generated.rs | 65 + vendor/bitflags/src/external.rs | 262 + vendor/bitflags/src/external/arbitrary.rs | 33 + vendor/bitflags/src/external/bytemuck.rs | 19 + vendor/bitflags/src/external/serde.rs | 94 + vendor/bitflags/src/internal.rs | 125 + vendor/bitflags/src/iter.rs | 182 + vendor/bitflags/src/lib.rs | 997 ++ vendor/bitflags/src/parser.rs | 332 + vendor/bitflags/src/public.rs | 580 + vendor/bitflags/src/tests.rs | 135 + vendor/bitflags/src/tests/all.rs | 23 + vendor/bitflags/src/tests/bitflags_match.rs | 93 + vendor/bitflags/src/tests/bits.rs | 36 + vendor/bitflags/src/tests/clear.rs | 27 + vendor/bitflags/src/tests/complement.rs | 53 + vendor/bitflags/src/tests/contains.rs | 108 + vendor/bitflags/src/tests/difference.rs | 92 + vendor/bitflags/src/tests/empty.rs | 23 + vendor/bitflags/src/tests/eq.rs | 10 + vendor/bitflags/src/tests/extend.rs | 42 + vendor/bitflags/src/tests/flags.rs | 46 + vendor/bitflags/src/tests/fmt.rs | 97 + vendor/bitflags/src/tests/from_bits.rs | 45 + vendor/bitflags/src/tests/from_bits_retain.rs | 38 + .../bitflags/src/tests/from_bits_truncate.rs | 42 + vendor/bitflags/src/tests/from_name.rs | 42 + vendor/bitflags/src/tests/insert.rs | 91 + vendor/bitflags/src/tests/intersection.rs | 79 + vendor/bitflags/src/tests/intersects.rs | 91 + vendor/bitflags/src/tests/is_all.rs | 32 + vendor/bitflags/src/tests/is_empty.rs | 31 + vendor/bitflags/src/tests/iter.rs | 299 + vendor/bitflags/src/tests/parser.rs | 332 + vendor/bitflags/src/tests/remove.rs | 100 + .../src/tests/symmetric_difference.rs | 110 + vendor/bitflags/src/tests/truncate.rs | 29 + vendor/bitflags/src/tests/union.rs | 71 + vendor/bitflags/src/tests/unknown.rs | 40 + vendor/bitflags/src/traits.rs | 457 + vendor/cexpr/.cargo-checksum.json | 1 + vendor/cexpr/.cargo_vcs_info.json | 5 + vendor/cexpr/.github/workflows/ci.yml | 31 + vendor/cexpr/Cargo.toml | 29 + vendor/cexpr/LICENSE-APACHE | 201 + vendor/cexpr/LICENSE-MIT | 25 + vendor/cexpr/bors.toml | 3 + vendor/cexpr/rustfmt.toml | 1 + vendor/cexpr/src/expr.rs | 610 + vendor/cexpr/src/lib.rs | 149 + vendor/cexpr/src/literal.rs | 361 + vendor/cexpr/src/token.rs | 44 + vendor/cexpr/tests/clang.rs | 339 + vendor/cexpr/tests/input/chars.h | 3 + vendor/cexpr/tests/input/fail.h | 9 + vendor/cexpr/tests/input/floats.h | 8 + vendor/cexpr/tests/input/int_signed.h | 3 + vendor/cexpr/tests/input/int_unsigned.h | 29 + vendor/cexpr/tests/input/strings.h | 17 + vendor/cexpr/tests/input/test_llvm_bug_9069.h | 4 + vendor/cfg-if/.cargo-checksum.json | 1 + vendor/cfg-if/.cargo_vcs_info.json | 6 + vendor/cfg-if/.github/dependabot.yml | 14 + vendor/cfg-if/.github/workflows/main.yaml | 48 + vendor/cfg-if/.github/workflows/publish.yaml | 25 + vendor/cfg-if/CHANGELOG.md | 29 + vendor/cfg-if/Cargo.lock | 16 + vendor/cfg-if/Cargo.toml | 47 + vendor/cfg-if/LICENSE-APACHE | 201 + vendor/cfg-if/LICENSE-MIT | 25 + vendor/cfg-if/README.md | 56 + vendor/cfg-if/src/lib.rs | 212 + vendor/cfg-if/tests/xcrate.rs | 16 + vendor/clang-sys/.cargo-checksum.json | 1 + vendor/clang-sys/.cargo_vcs_info.json | 6 + vendor/clang-sys/.github/workflows/ci.yml | 56 + vendor/clang-sys/.github/workflows/ssh.yml | 40 + vendor/clang-sys/CHANGELOG.md | 552 + vendor/clang-sys/Cargo.toml | 77 + vendor/clang-sys/LICENSE.txt | 202 + vendor/clang-sys/README.md | 116 + vendor/clang-sys/build.rs | 77 + vendor/clang-sys/build/common.rs | 355 + vendor/clang-sys/build/dynamic.rs | 276 + vendor/clang-sys/build/macros.rs | 49 + vendor/clang-sys/build/static.rs | 146 + vendor/clang-sys/clippy.toml | 1 + vendor/clang-sys/src/lib.rs | 2433 ++++ vendor/clang-sys/src/link.rs | 322 + vendor/clang-sys/src/support.rs | 238 + vendor/clang-sys/tests/build.rs | 356 + vendor/clang-sys/tests/header.h | 6 + vendor/clang-sys/tests/lib.rs | 52 + vendor/either/.cargo-checksum.json | 1 + vendor/either/.cargo_vcs_info.json | 6 + vendor/either/.github/workflows/ci.yml | 83 + vendor/either/Cargo.lock | 96 + vendor/either/Cargo.toml | 70 + vendor/either/LICENSE-APACHE | 201 + vendor/either/LICENSE-MIT | 25 + vendor/either/README-crates.io.md | 10 + vendor/either/README.rst | 204 + vendor/either/src/into_either.rs | 64 + vendor/either/src/iterator.rs | 315 + vendor/either/src/lib.rs | 1561 ++ vendor/either/src/serde_untagged.rs | 69 + vendor/either/src/serde_untagged_optional.rs | 74 + vendor/glob/.cargo-checksum.json | 1 + vendor/glob/.cargo_vcs_info.json | 6 + vendor/glob/.github/dependabot.yml | 13 + vendor/glob/.github/workflows/publish.yml | 27 + vendor/glob/.github/workflows/rust.yml | 99 + vendor/glob/CHANGELOG.md | 44 + vendor/glob/Cargo.lock | 107 + vendor/glob/Cargo.toml | 45 + vendor/glob/LICENSE-APACHE | 201 + vendor/glob/LICENSE-MIT | 25 + vendor/glob/README.md | 38 + vendor/glob/src/lib.rs | 1511 ++ vendor/glob/tests/glob-std.rs | 477 + vendor/glob/triagebot.toml | 1 + vendor/itertools/.cargo-checksum.json | 1 + vendor/itertools/.cargo_vcs_info.json | 6 + vendor/itertools/.codecov.yml | 7 + vendor/itertools/.github/dependabot.yml | 6 + vendor/itertools/.github/workflows/ci.yml | 85 + .../itertools/.github/workflows/coverage.yml | 34 + vendor/itertools/CHANGELOG.md | 539 + vendor/itertools/CONTRIBUTING.md | 189 + vendor/itertools/Cargo.lock | 740 + vendor/itertools/Cargo.toml | 105 + vendor/itertools/LICENSE-APACHE | 201 + vendor/itertools/LICENSE-MIT | 25 + vendor/itertools/README.md | 33 + vendor/itertools/benches/bench1.rs | 767 + vendor/itertools/benches/combinations.rs | 117 + .../benches/combinations_with_replacement.rs | 40 + .../itertools/benches/fold_specialization.rs | 75 + vendor/itertools/benches/powerset.rs | 97 + vendor/itertools/benches/specializations.rs | 667 + vendor/itertools/benches/tree_reduce.rs | 150 + .../itertools/benches/tuple_combinations.rs | 113 + vendor/itertools/benches/tuples.rs | 208 + vendor/itertools/examples/iris.data | 150 + vendor/itertools/examples/iris.rs | 140 + vendor/itertools/src/adaptors/coalesce.rs | 286 + vendor/itertools/src/adaptors/map.rs | 130 + vendor/itertools/src/adaptors/mod.rs | 1208 ++ .../itertools/src/adaptors/multi_product.rs | 231 + vendor/itertools/src/combinations.rs | 243 + .../src/combinations_with_replacement.rs | 192 + vendor/itertools/src/concat_impl.rs | 30 + vendor/itertools/src/cons_tuples_impl.rs | 58 + vendor/itertools/src/diff.rs | 104 + vendor/itertools/src/duplicates_impl.rs | 216 + vendor/itertools/src/either_or_both.rs | 514 + vendor/itertools/src/exactly_one_err.rs | 125 + vendor/itertools/src/extrema_set.rs | 50 + vendor/itertools/src/flatten_ok.rs | 205 + vendor/itertools/src/format.rs | 178 + vendor/itertools/src/free.rs | 317 + vendor/itertools/src/group_map.rs | 32 + vendor/itertools/src/groupbylazy.rs | 613 + vendor/itertools/src/grouping_map.rs | 614 + vendor/itertools/src/impl_macros.rs | 34 + vendor/itertools/src/intersperse.rs | 142 + vendor/itertools/src/iter_index.rs | 116 + vendor/itertools/src/k_smallest.rs | 98 + vendor/itertools/src/kmerge_impl.rs | 240 + vendor/itertools/src/lazy_buffer.rs | 75 + vendor/itertools/src/lib.rs | 4365 ++++++ vendor/itertools/src/merge_join.rs | 347 + vendor/itertools/src/minmax.rs | 116 + vendor/itertools/src/multipeek_impl.rs | 116 + vendor/itertools/src/pad_tail.rs | 124 + vendor/itertools/src/peek_nth.rs | 178 + vendor/itertools/src/peeking_take_while.rs | 201 + vendor/itertools/src/permutations.rs | 186 + vendor/itertools/src/powerset.rs | 131 + vendor/itertools/src/process_results_impl.rs | 108 + vendor/itertools/src/put_back_n_impl.rs | 71 + vendor/itertools/src/rciter_impl.rs | 102 + vendor/itertools/src/repeatn.rs | 83 + vendor/itertools/src/size_hint.rs | 94 + vendor/itertools/src/sources.rs | 153 + vendor/itertools/src/take_while_inclusive.rs | 96 + vendor/itertools/src/tee.rs | 93 + vendor/itertools/src/tuple_impl.rs | 401 + vendor/itertools/src/unique_impl.rs | 188 + vendor/itertools/src/unziptuple.rs | 80 + vendor/itertools/src/with_position.rs | 124 + vendor/itertools/src/zip_eq_impl.rs | 64 + vendor/itertools/src/zip_longest.rs | 139 + vendor/itertools/src/ziptuple.rs | 137 + vendor/itertools/tests/adaptors_no_collect.rs | 51 + vendor/itertools/tests/flatten_ok.rs | 76 + vendor/itertools/tests/laziness.rs | 283 + vendor/itertools/tests/macros_hygiene.rs | 14 + vendor/itertools/tests/merge_join.rs | 101 + vendor/itertools/tests/peeking_take_while.rs | 69 + vendor/itertools/tests/quick.rs | 1967 +++ vendor/itertools/tests/specializations.rs | 582 + vendor/itertools/tests/test_core.rs | 374 + vendor/itertools/tests/test_std.rs | 1523 ++ vendor/itertools/tests/tuples.rs | 86 + vendor/itertools/tests/zip.rs | 56 + vendor/libc/.cargo-checksum.json | 1 + vendor/libc/.cargo_vcs_info.json | 6 + vendor/libc/.editorconfig | 7 + vendor/libc/.git-blame-ignore-revs | 6 + vendor/libc/.release-plz.toml | 49 + vendor/libc/CHANGELOG.md | 747 + vendor/libc/CONTRIBUTING.md | 126 + vendor/libc/Cargo.lock | 16 + vendor/libc/Cargo.toml | 201 + vendor/libc/LICENSE-APACHE | 176 + vendor/libc/LICENSE-MIT | 25 + vendor/libc/README.md | 117 + vendor/libc/build.rs | 298 + vendor/libc/cherry-pick-stable.sh | 150 + vendor/libc/rustfmt.toml | 4 + vendor/libc/src/fuchsia/aarch64.rs | 69 + vendor/libc/src/fuchsia/mod.rs | 4322 ++++++ vendor/libc/src/fuchsia/riscv64.rs | 46 + vendor/libc/src/fuchsia/x86_64.rs | 142 + vendor/libc/src/hermit.rs | 561 + vendor/libc/src/lib.rs | 159 + vendor/libc/src/macros.rs | 446 + vendor/libc/src/new/bionic/mod.rs | 2 + vendor/libc/src/new/bionic/sys/mod.rs | 2 + vendor/libc/src/new/bionic/sys/socket.rs | 51 + vendor/libc/src/new/linux_uapi/linux/can.rs | 136 + .../src/new/linux_uapi/linux/can/j1939.rs | 60 + .../libc/src/new/linux_uapi/linux/can/raw.rs | 15 + vendor/libc/src/new/linux_uapi/linux/mod.rs | 4 + vendor/libc/src/new/linux_uapi/mod.rs | 4 + vendor/libc/src/new/mod.rs | 15 + vendor/libc/src/primitives.rs | 95 + vendor/libc/src/psp.rs | 4131 ++++++ vendor/libc/src/sgx.rs | 15 + vendor/libc/src/solid/aarch64.rs | 1 + vendor/libc/src/solid/arm.rs | 1 + vendor/libc/src/solid/mod.rs | 876 ++ vendor/libc/src/switch.rs | 16 + vendor/libc/src/teeos/mod.rs | 1355 ++ vendor/libc/src/trusty.rs | 72 + vendor/libc/src/types.rs | 39 + vendor/libc/src/unix/aix/mod.rs | 3382 +++++ vendor/libc/src/unix/aix/powerpc64.rs | 477 + vendor/libc/src/unix/bsd/apple/b32/mod.rs | 135 + .../src/unix/bsd/apple/b64/aarch64/mod.rs | 53 + vendor/libc/src/unix/bsd/apple/b64/mod.rs | 141 + .../libc/src/unix/bsd/apple/b64/x86_64/mod.rs | 179 + vendor/libc/src/unix/bsd/apple/mod.rs | 6245 ++++++++ .../unix/bsd/freebsdlike/dragonfly/errno.rs | 17 + .../src/unix/bsd/freebsdlike/dragonfly/mod.rs | 1635 +++ .../unix/bsd/freebsdlike/freebsd/aarch64.rs | 110 + .../src/unix/bsd/freebsdlike/freebsd/arm.rs | 53 + .../bsd/freebsdlike/freebsd/freebsd11/b32.rs | 37 + .../bsd/freebsdlike/freebsd/freebsd11/b64.rs | 36 + .../bsd/freebsdlike/freebsd/freebsd11/mod.rs | 449 + .../bsd/freebsdlike/freebsd/freebsd12/mod.rs | 487 + .../freebsdlike/freebsd/freebsd12/x86_64.rs | 7 + .../bsd/freebsdlike/freebsd/freebsd13/mod.rs | 531 + .../freebsdlike/freebsd/freebsd13/x86_64.rs | 7 + .../bsd/freebsdlike/freebsd/freebsd14/mod.rs | 532 + .../freebsdlike/freebsd/freebsd14/x86_64.rs | 14 + .../bsd/freebsdlike/freebsd/freebsd15/mod.rs | 534 + .../freebsdlike/freebsd/freebsd15/x86_64.rs | 14 + .../src/unix/bsd/freebsdlike/freebsd/mod.rs | 5659 ++++++++ .../unix/bsd/freebsdlike/freebsd/powerpc.rs | 62 + .../unix/bsd/freebsdlike/freebsd/powerpc64.rs | 63 + .../unix/bsd/freebsdlike/freebsd/riscv64.rs | 116 + .../src/unix/bsd/freebsdlike/freebsd/x86.rs | 134 + .../bsd/freebsdlike/freebsd/x86_64/mod.rs | 346 + vendor/libc/src/unix/bsd/freebsdlike/mod.rs | 2009 +++ vendor/libc/src/unix/bsd/mod.rs | 969 ++ vendor/libc/src/unix/bsd/netbsdlike/mod.rs | 905 ++ .../src/unix/bsd/netbsdlike/netbsd/aarch64.rs | 132 + .../src/unix/bsd/netbsdlike/netbsd/arm.rs | 70 + .../src/unix/bsd/netbsdlike/netbsd/mips.rs | 11 + .../src/unix/bsd/netbsdlike/netbsd/mod.rs | 3007 ++++ .../src/unix/bsd/netbsdlike/netbsd/powerpc.rs | 10 + .../src/unix/bsd/netbsdlike/netbsd/riscv64.rs | 77 + .../src/unix/bsd/netbsdlike/netbsd/sparc64.rs | 7 + .../src/unix/bsd/netbsdlike/netbsd/x86.rs | 5 + .../src/unix/bsd/netbsdlike/netbsd/x86_64.rs | 56 + .../unix/bsd/netbsdlike/openbsd/aarch64.rs | 20 + .../src/unix/bsd/netbsdlike/openbsd/arm.rs | 5 + .../src/unix/bsd/netbsdlike/openbsd/mips64.rs | 4 + .../src/unix/bsd/netbsdlike/openbsd/mod.rs | 2149 +++ .../unix/bsd/netbsdlike/openbsd/powerpc.rs | 5 + .../unix/bsd/netbsdlike/openbsd/powerpc64.rs | 5 + .../unix/bsd/netbsdlike/openbsd/riscv64.rs | 25 + .../unix/bsd/netbsdlike/openbsd/sparc64.rs | 4 + .../src/unix/bsd/netbsdlike/openbsd/x86.rs | 5 + .../src/unix/bsd/netbsdlike/openbsd/x86_64.rs | 109 + vendor/libc/src/unix/cygwin/mod.rs | 2477 ++++ vendor/libc/src/unix/haiku/b32.rs | 18 + vendor/libc/src/unix/haiku/b64.rs | 18 + vendor/libc/src/unix/haiku/bsd.rs | 151 + vendor/libc/src/unix/haiku/mod.rs | 2097 +++ vendor/libc/src/unix/haiku/native.rs | 1388 ++ vendor/libc/src/unix/haiku/x86_64.rs | 208 + vendor/libc/src/unix/hurd/b32.rs | 92 + vendor/libc/src/unix/hurd/b64.rs | 94 + vendor/libc/src/unix/hurd/mod.rs | 4623 ++++++ .../src/unix/linux_like/android/b32/arm.rs | 532 + .../src/unix/linux_like/android/b32/mod.rs | 239 + .../unix/linux_like/android/b32/x86/mod.rs | 604 + .../linux_like/android/b64/aarch64/mod.rs | 473 + .../src/unix/linux_like/android/b64/mod.rs | 292 + .../linux_like/android/b64/riscv64/mod.rs | 384 + .../unix/linux_like/android/b64/x86_64/mod.rs | 748 + .../libc/src/unix/linux_like/android/mod.rs | 4157 ++++++ .../src/unix/linux_like/emscripten/lfs64.rs | 211 + .../src/unix/linux_like/emscripten/mod.rs | 1589 ++ .../unix/linux_like/linux/arch/generic/mod.rs | 334 + .../unix/linux_like/linux/arch/mips/mod.rs | 333 + .../src/unix/linux_like/linux/arch/mod.rs | 20 + .../unix/linux_like/linux/arch/powerpc/mod.rs | 280 + .../unix/linux_like/linux/arch/sparc/mod.rs | 247 + .../unix/linux_like/linux/gnu/b32/arm/mod.rs | 928 ++ .../unix/linux_like/linux/gnu/b32/csky/mod.rs | 745 + .../unix/linux_like/linux/gnu/b32/m68k/mod.rs | 863 ++ .../unix/linux_like/linux/gnu/b32/mips/mod.rs | 925 ++ .../src/unix/linux_like/linux/gnu/b32/mod.rs | 491 + .../unix/linux_like/linux/gnu/b32/powerpc.rs | 892 ++ .../linux_like/linux/gnu/b32/riscv32/mod.rs | 808 ++ .../linux_like/linux/gnu/b32/sparc/mod.rs | 865 ++ .../unix/linux_like/linux/gnu/b32/x86/mod.rs | 1098 ++ .../linux_like/linux/gnu/b64/aarch64/ilp32.rs | 54 + .../linux_like/linux/gnu/b64/aarch64/lp64.rs | 57 + .../linux_like/linux/gnu/b64/aarch64/mod.rs | 973 ++ .../linux/gnu/b64/loongarch64/mod.rs | 922 ++ .../linux_like/linux/gnu/b64/mips64/mod.rs | 930 ++ .../src/unix/linux_like/linux/gnu/b64/mod.rs | 213 + .../linux_like/linux/gnu/b64/powerpc64/mod.rs | 974 ++ .../linux_like/linux/gnu/b64/riscv64/mod.rs | 910 ++ .../unix/linux_like/linux/gnu/b64/s390x.rs | 955 ++ .../linux_like/linux/gnu/b64/sparc64/mod.rs | 930 ++ .../linux_like/linux/gnu/b64/x86_64/mod.rs | 809 ++ .../linux/gnu/b64/x86_64/not_x32.rs | 446 + .../linux_like/linux/gnu/b64/x86_64/x32.rs | 398 + .../libc/src/unix/linux_like/linux/gnu/mod.rs | 1382 ++ vendor/libc/src/unix/linux_like/linux/mod.rs | 6830 +++++++++ .../unix/linux_like/linux/musl/b32/arm/mod.rs | 792 + .../unix/linux_like/linux/musl/b32/hexagon.rs | 621 + .../linux_like/linux/musl/b32/mips/mod.rs | 775 + .../src/unix/linux_like/linux/musl/b32/mod.rs | 65 + .../unix/linux_like/linux/musl/b32/powerpc.rs | 766 + .../linux_like/linux/musl/b32/riscv32/mod.rs | 655 + .../unix/linux_like/linux/musl/b32/x86/mod.rs | 889 ++ .../linux_like/linux/musl/b64/aarch64/mod.rs | 712 + .../linux/musl/b64/loongarch64/mod.rs | 667 + .../unix/linux_like/linux/musl/b64/mips64.rs | 708 + .../src/unix/linux_like/linux/musl/b64/mod.rs | 116 + .../linux_like/linux/musl/b64/powerpc64.rs | 752 + .../linux_like/linux/musl/b64/riscv64/mod.rs | 672 + .../unix/linux_like/linux/musl/b64/s390x.rs | 732 + .../linux_like/linux/musl/b64/wasm32/mod.rs | 688 + .../linux_like/linux/musl/b64/wasm32/wali.rs | 441 + .../linux_like/linux/musl/b64/x86_64/mod.rs | 915 ++ .../src/unix/linux_like/linux/musl/lfs64.rs | 239 + .../src/unix/linux_like/linux/musl/mod.rs | 1006 ++ .../unix/linux_like/linux/uclibc/arm/mod.rs | 925 ++ .../linux/uclibc/mips/mips32/mod.rs | 695 + .../linux/uclibc/mips/mips64/mod.rs | 204 + .../unix/linux_like/linux/uclibc/mips/mod.rs | 312 + .../src/unix/linux_like/linux/uclibc/mod.rs | 517 + .../linux_like/linux/uclibc/x86_64/l4re.rs | 53 + .../linux_like/linux/uclibc/x86_64/mod.rs | 355 + .../linux_like/linux/uclibc/x86_64/other.rs | 7 + vendor/libc/src/unix/linux_like/mod.rs | 2214 +++ vendor/libc/src/unix/mod.rs | 1901 +++ vendor/libc/src/unix/newlib/aarch64/mod.rs | 52 + vendor/libc/src/unix/newlib/arm/mod.rs | 54 + vendor/libc/src/unix/newlib/espidf/mod.rs | 120 + vendor/libc/src/unix/newlib/generic.rs | 39 + vendor/libc/src/unix/newlib/horizon/mod.rs | 278 + vendor/libc/src/unix/newlib/mod.rs | 997 ++ vendor/libc/src/unix/newlib/powerpc/mod.rs | 14 + vendor/libc/src/unix/newlib/rtems/mod.rs | 146 + vendor/libc/src/unix/newlib/vita/mod.rs | 235 + vendor/libc/src/unix/nto/aarch64.rs | 35 + vendor/libc/src/unix/nto/mod.rs | 3406 +++++ vendor/libc/src/unix/nto/neutrino.rs | 1270 ++ vendor/libc/src/unix/nto/x86_64.rs | 111 + vendor/libc/src/unix/nuttx/mod.rs | 597 + vendor/libc/src/unix/redox/mod.rs | 1496 ++ vendor/libc/src/unix/solarish/compat.rs | 218 + vendor/libc/src/unix/solarish/illumos.rs | 343 + vendor/libc/src/unix/solarish/mod.rs | 3240 +++++ vendor/libc/src/unix/solarish/solaris.rs | 239 + vendor/libc/src/unix/solarish/x86.rs | 31 + vendor/libc/src/unix/solarish/x86_64.rs | 170 + vendor/libc/src/unix/solarish/x86_common.rs | 69 + vendor/libc/src/vxworks/aarch64.rs | 1 + vendor/libc/src/vxworks/arm.rs | 1 + vendor/libc/src/vxworks/mod.rs | 2018 +++ vendor/libc/src/vxworks/powerpc.rs | 1 + vendor/libc/src/vxworks/powerpc64.rs | 1 + vendor/libc/src/vxworks/riscv32.rs | 1 + vendor/libc/src/vxworks/riscv64.rs | 1 + vendor/libc/src/vxworks/x86.rs | 1 + vendor/libc/src/vxworks/x86_64.rs | 1 + vendor/libc/src/wasi/mod.rs | 853 ++ vendor/libc/src/wasi/p2.rs | 188 + vendor/libc/src/windows/gnu/mod.rs | 36 + vendor/libc/src/windows/mod.rs | 611 + vendor/libc/src/windows/msvc/mod.rs | 17 + vendor/libc/src/xous.rs | 18 + vendor/libc/tests/const_fn.rs | 3 + vendor/libloading/.cargo-checksum.json | 1 + vendor/libloading/.cargo_vcs_info.json | 6 + .../.github/workflows/libloading.yml | 126 + vendor/libloading/Cargo.lock | 47 + vendor/libloading/Cargo.toml | 90 + vendor/libloading/LICENSE | 12 + vendor/libloading/README.mkd | 16 + vendor/libloading/src/changelog.rs | 405 + vendor/libloading/src/error.rs | 146 + vendor/libloading/src/lib.rs | 81 + vendor/libloading/src/os/mod.rs | 27 + vendor/libloading/src/os/unix/consts.rs | 265 + vendor/libloading/src/os/unix/mod.rs | 485 + vendor/libloading/src/os/windows/mod.rs | 590 + vendor/libloading/src/safe.rs | 318 + vendor/libloading/src/test_helpers.rs | 37 + vendor/libloading/src/util.rs | 34 + vendor/libloading/tests/constants.rs | 13 + vendor/libloading/tests/functions.rs | 312 + vendor/libloading/tests/library_filename.rs | 17 + vendor/libloading/tests/markers.rs | 96 + vendor/libloading/tests/windows.rs | 71 + vendor/log/.cargo-checksum.json | 1 + vendor/log/.cargo_vcs_info.json | 6 + vendor/log/.github/workflows/main.yml | 134 + vendor/log/CHANGELOG.md | 410 + vendor/log/Cargo.lock | 270 + vendor/log/Cargo.toml | 151 + vendor/log/LICENSE-APACHE | 201 + vendor/log/LICENSE-MIT | 25 + vendor/log/README.md | 134 + vendor/log/benches/value.rs | 27 + vendor/log/src/__private_api.rs | 151 + vendor/log/src/kv/error.rs | 94 + vendor/log/src/kv/key.rs | 163 + vendor/log/src/kv/mod.rs | 265 + vendor/log/src/kv/source.rs | 514 + vendor/log/src/kv/value.rs | 1395 ++ vendor/log/src/lib.rs | 2005 +++ vendor/log/src/macros.rs | 579 + vendor/log/src/serde.rs | 397 + vendor/log/tests/integration.rs | 101 + vendor/log/tests/macros.rs | 429 + vendor/log/triagebot.toml | 1 + vendor/memchr/.cargo-checksum.json | 1 + vendor/memchr/.cargo_vcs_info.json | 6 + vendor/memchr/.ignore | 1 + vendor/memchr/.vim/coc-settings.json | 16 + vendor/memchr/COPYING | 3 + vendor/memchr/Cargo.lock | 80 + vendor/memchr/Cargo.toml | 89 + vendor/memchr/LICENSE-MIT | 21 + vendor/memchr/README.md | 196 + vendor/memchr/UNLICENSE | 24 + vendor/memchr/rustfmt.toml | 2 + vendor/memchr/src/arch/aarch64/memchr.rs | 137 + vendor/memchr/src/arch/aarch64/mod.rs | 7 + vendor/memchr/src/arch/aarch64/neon/memchr.rs | 1031 ++ vendor/memchr/src/arch/aarch64/neon/mod.rs | 6 + .../src/arch/aarch64/neon/packedpair.rs | 236 + vendor/memchr/src/arch/all/memchr.rs | 1022 ++ vendor/memchr/src/arch/all/mod.rs | 234 + .../src/arch/all/packedpair/default_rank.rs | 258 + vendor/memchr/src/arch/all/packedpair/mod.rs | 359 + vendor/memchr/src/arch/all/rabinkarp.rs | 390 + vendor/memchr/src/arch/all/shiftor.rs | 89 + vendor/memchr/src/arch/all/twoway.rs | 877 ++ vendor/memchr/src/arch/generic/memchr.rs | 1214 ++ vendor/memchr/src/arch/generic/mod.rs | 14 + vendor/memchr/src/arch/generic/packedpair.rs | 317 + vendor/memchr/src/arch/mod.rs | 16 + vendor/memchr/src/arch/wasm32/memchr.rs | 124 + vendor/memchr/src/arch/wasm32/mod.rs | 7 + .../memchr/src/arch/wasm32/simd128/memchr.rs | 1020 ++ vendor/memchr/src/arch/wasm32/simd128/mod.rs | 6 + .../src/arch/wasm32/simd128/packedpair.rs | 228 + vendor/memchr/src/arch/x86_64/avx2/memchr.rs | 1352 ++ vendor/memchr/src/arch/x86_64/avx2/mod.rs | 6 + .../memchr/src/arch/x86_64/avx2/packedpair.rs | 272 + vendor/memchr/src/arch/x86_64/memchr.rs | 335 + vendor/memchr/src/arch/x86_64/mod.rs | 8 + vendor/memchr/src/arch/x86_64/sse2/memchr.rs | 1077 ++ vendor/memchr/src/arch/x86_64/sse2/mod.rs | 6 + .../memchr/src/arch/x86_64/sse2/packedpair.rs | 232 + vendor/memchr/src/cow.rs | 107 + vendor/memchr/src/ext.rs | 54 + vendor/memchr/src/lib.rs | 221 + vendor/memchr/src/macros.rs | 20 + vendor/memchr/src/memchr.rs | 903 ++ vendor/memchr/src/memmem/mod.rs | 737 + vendor/memchr/src/memmem/searcher.rs | 1030 ++ vendor/memchr/src/tests/memchr/mod.rs | 307 + vendor/memchr/src/tests/memchr/naive.rs | 33 + vendor/memchr/src/tests/memchr/prop.rs | 323 + vendor/memchr/src/tests/mod.rs | 15 + vendor/memchr/src/tests/packedpair.rs | 216 + vendor/memchr/src/tests/substring/mod.rs | 232 + vendor/memchr/src/tests/substring/naive.rs | 45 + vendor/memchr/src/tests/substring/prop.rs | 126 + vendor/memchr/src/vector.rs | 501 + vendor/minimal-lexical/.cargo-checksum.json | 1 + vendor/minimal-lexical/.cargo_vcs_info.json | 5 + .../.github/ISSUE_TEMPLATE/bug_report.md | 41 + .../.github/ISSUE_TEMPLATE/custom.md | 21 + .../.github/ISSUE_TEMPLATE/documentation.md | 16 + .../.github/ISSUE_TEMPLATE/feature_request.md | 27 + .../.github/ISSUE_TEMPLATE/question.md | 11 + .../.github/PULL_REQUEST_TEMPLATE/bug_fix.md | 27 + .../.github/PULL_REQUEST_TEMPLATE/custom.md | 22 + .../PULL_REQUEST_TEMPLATE/documentation.md | 21 + .../.github/workflows/Cross.yml | 90 + .../.github/workflows/Features.yml | 23 + .../minimal-lexical/.github/workflows/OSX.yml | 40 + .../.github/workflows/Simple.yml | 41 + .../.github/workflows/Valgrind.yml | 24 + vendor/minimal-lexical/.gitmodules | 4 + vendor/minimal-lexical/CHANGELOG | 38 + vendor/minimal-lexical/CODE_OF_CONDUCT.md | 141 + vendor/minimal-lexical/Cargo.toml | 33 + vendor/minimal-lexical/LICENSE-APACHE | 201 + vendor/minimal-lexical/LICENSE-MIT | 23 + vendor/minimal-lexical/LICENSE.md | 37 + vendor/minimal-lexical/README.md | 102 + vendor/minimal-lexical/clippy.toml | 1 + vendor/minimal-lexical/rustfmt.toml | 16 + vendor/minimal-lexical/src/bellerophon.rs | 391 + vendor/minimal-lexical/src/bigint.rs | 788 + vendor/minimal-lexical/src/extended_float.rs | 24 + vendor/minimal-lexical/src/fpu.rs | 98 + vendor/minimal-lexical/src/heapvec.rs | 190 + vendor/minimal-lexical/src/lemire.rs | 225 + vendor/minimal-lexical/src/lib.rs | 68 + vendor/minimal-lexical/src/libm.rs | 1238 ++ vendor/minimal-lexical/src/mask.rs | 60 + vendor/minimal-lexical/src/num.rs | 308 + vendor/minimal-lexical/src/number.rs | 83 + vendor/minimal-lexical/src/parse.rs | 201 + vendor/minimal-lexical/src/rounding.rs | 131 + vendor/minimal-lexical/src/slow.rs | 403 + vendor/minimal-lexical/src/stackvec.rs | 308 + vendor/minimal-lexical/src/table.rs | 11 + .../minimal-lexical/src/table_bellerophon.rs | 119 + vendor/minimal-lexical/src/table_lemire.rs | 676 + vendor/minimal-lexical/src/table_small.rs | 90 + vendor/minimal-lexical/tests/bellerophon.rs | 59 + .../tests/bellerophon_tests.rs | 231 + .../tests/integration_tests.rs | 228 + vendor/minimal-lexical/tests/lemire_tests.rs | 378 + vendor/minimal-lexical/tests/libm_tests.rs | 289 + vendor/minimal-lexical/tests/mask_tests.rs | 16 + vendor/minimal-lexical/tests/number_tests.rs | 88 + vendor/minimal-lexical/tests/parse_tests.rs | 189 + .../minimal-lexical/tests/rounding_tests.rs | 64 + vendor/minimal-lexical/tests/slow_tests.rs | 337 + vendor/minimal-lexical/tests/stackvec.rs | 32 + vendor/minimal-lexical/tests/vec_tests.rs | 395 + vendor/nom/.cargo-checksum.json | 1 + vendor/nom/.cargo_vcs_info.json | 6 + vendor/nom/CHANGELOG.md | 1551 ++ vendor/nom/Cargo.lock | 282 + vendor/nom/Cargo.toml | 168 + vendor/nom/LICENSE | 20 + vendor/nom/README.md | 331 + vendor/nom/doc/nom_recipes.md | 395 + vendor/nom/src/bits/complete.rs | 197 + vendor/nom/src/bits/mod.rs | 179 + vendor/nom/src/bits/streaming.rs | 170 + vendor/nom/src/branch/mod.rs | 267 + vendor/nom/src/branch/tests.rs | 142 + vendor/nom/src/bytes/complete.rs | 756 + vendor/nom/src/bytes/mod.rs | 6 + vendor/nom/src/bytes/streaming.rs | 700 + vendor/nom/src/bytes/tests.rs | 636 + vendor/nom/src/character/complete.rs | 1227 ++ vendor/nom/src/character/mod.rs | 116 + vendor/nom/src/character/streaming.rs | 1182 ++ vendor/nom/src/character/tests.rs | 62 + vendor/nom/src/combinator/mod.rs | 809 ++ vendor/nom/src/combinator/tests.rs | 275 + vendor/nom/src/error.rs | 831 ++ vendor/nom/src/internal.rs | 489 + vendor/nom/src/lib.rs | 464 + vendor/nom/src/macros.rs | 23 + vendor/nom/src/multi/mod.rs | 1049 ++ vendor/nom/src/multi/tests.rs | 534 + vendor/nom/src/number/complete.rs | 2126 +++ vendor/nom/src/number/mod.rs | 15 + vendor/nom/src/number/streaming.rs | 2206 +++ vendor/nom/src/sequence/mod.rs | 279 + vendor/nom/src/sequence/tests.rs | 290 + vendor/nom/src/str.rs | 536 + vendor/nom/src/traits.rs | 1441 ++ vendor/nom/tests/arithmetic.rs | 94 + vendor/nom/tests/arithmetic_ast.rs | 161 + vendor/nom/tests/css.rs | 45 + vendor/nom/tests/custom_errors.rs | 48 + vendor/nom/tests/escaped.rs | 28 + vendor/nom/tests/float.rs | 46 + vendor/nom/tests/fnmut.rs | 39 + vendor/nom/tests/ini.rs | 207 + vendor/nom/tests/ini_str.rs | 217 + vendor/nom/tests/issues.rs | 242 + vendor/nom/tests/json.rs | 236 + vendor/nom/tests/mp4.rs | 320 + vendor/nom/tests/multiline.rs | 31 + vendor/nom/tests/overflow.rs | 145 + vendor/nom/tests/reborrow_fold.rs | 31 + vendor/prettyplease/.cargo-checksum.json | 1 + vendor/prettyplease/.cargo_vcs_info.json | 6 + vendor/prettyplease/.github/FUNDING.yml | 1 + vendor/prettyplease/.github/workflows/ci.yml | 123 + vendor/prettyplease/Cargo.lock | 54 + vendor/prettyplease/Cargo.toml | 90 + vendor/prettyplease/LICENSE-APACHE | 176 + vendor/prettyplease/LICENSE-MIT | 23 + vendor/prettyplease/README.md | 312 + vendor/prettyplease/build.rs | 21 + vendor/prettyplease/examples/.tokeignore | 1 + vendor/prettyplease/examples/input.rs | 1 + .../examples/output.prettyplease.rs | 593 + vendor/prettyplease/examples/output.rustc.rs | 506 + .../prettyplease/examples/output.rustfmt.rs | 552 + vendor/prettyplease/src/algorithm.rs | 386 + vendor/prettyplease/src/attr.rs | 288 + vendor/prettyplease/src/classify.rs | 324 + vendor/prettyplease/src/convenience.rs | 98 + vendor/prettyplease/src/data.rs | 79 + vendor/prettyplease/src/expr.rs | 1533 ++ vendor/prettyplease/src/file.rs | 17 + vendor/prettyplease/src/fixup.rs | 676 + vendor/prettyplease/src/generics.rs | 426 + vendor/prettyplease/src/item.rs | 1813 +++ vendor/prettyplease/src/iter.rs | 46 + vendor/prettyplease/src/lib.rs | 385 + vendor/prettyplease/src/lifetime.rs | 9 + vendor/prettyplease/src/lit.rs | 57 + vendor/prettyplease/src/mac.rs | 706 + vendor/prettyplease/src/pat.rs | 254 + vendor/prettyplease/src/path.rs | 194 + vendor/prettyplease/src/precedence.rs | 148 + vendor/prettyplease/src/ring.rs | 81 + vendor/prettyplease/src/stmt.rs | 221 + vendor/prettyplease/src/token.rs | 80 + vendor/prettyplease/src/ty.rs | 326 + vendor/prettyplease/tests/test.rs | 51 + vendor/prettyplease/tests/test_precedence.rs | 900 ++ vendor/proc-macro2/.cargo-checksum.json | 1 + vendor/proc-macro2/.cargo_vcs_info.json | 6 + vendor/proc-macro2/.github/FUNDING.yml | 1 + vendor/proc-macro2/.github/workflows/ci.yml | 232 + vendor/proc-macro2/Cargo.lock | 326 + vendor/proc-macro2/Cargo.toml | 105 + vendor/proc-macro2/LICENSE-APACHE | 176 + vendor/proc-macro2/LICENSE-MIT | 23 + vendor/proc-macro2/README.md | 94 + vendor/proc-macro2/build.rs | 267 + vendor/proc-macro2/rust-toolchain.toml | 2 + vendor/proc-macro2/src/detection.rs | 75 + vendor/proc-macro2/src/extra.rs | 151 + vendor/proc-macro2/src/fallback.rs | 1256 ++ vendor/proc-macro2/src/lib.rs | 1495 ++ vendor/proc-macro2/src/location.rs | 29 + vendor/proc-macro2/src/marker.rs | 17 + vendor/proc-macro2/src/num.rs | 17 + vendor/proc-macro2/src/parse.rs | 995 ++ vendor/proc-macro2/src/probe.rs | 10 + .../proc-macro2/src/probe/proc_macro_span.rs | 51 + .../src/probe/proc_macro_span_file.rs | 14 + .../src/probe/proc_macro_span_location.rs | 21 + vendor/proc-macro2/src/rcvec.rs | 146 + .../proc-macro2/src/rustc_literal_escaper.rs | 701 + vendor/proc-macro2/src/wrapper.rs | 984 ++ vendor/proc-macro2/tests/comments.rs | 105 + vendor/proc-macro2/tests/features.rs | 10 + vendor/proc-macro2/tests/marker.rs | 97 + vendor/proc-macro2/tests/test.rs | 1094 ++ vendor/proc-macro2/tests/test_fmt.rs | 28 + vendor/proc-macro2/tests/test_size.rs | 81 + vendor/quote/.cargo-checksum.json | 1 + vendor/quote/.cargo_vcs_info.json | 6 + vendor/quote/.github/FUNDING.yml | 1 + vendor/quote/.github/workflows/ci.yml | 112 + vendor/quote/Cargo.lock | 256 + vendor/quote/Cargo.toml | 70 + vendor/quote/LICENSE-APACHE | 176 + vendor/quote/LICENSE-MIT | 23 + vendor/quote/README.md | 271 + vendor/quote/build.rs | 32 + vendor/quote/rust-toolchain.toml | 2 + vendor/quote/src/ext.rs | 136 + vendor/quote/src/format.rs | 168 + vendor/quote/src/ident_fragment.rs | 88 + vendor/quote/src/lib.rs | 1455 ++ vendor/quote/src/runtime.rs | 503 + vendor/quote/src/spanned.rs | 49 + vendor/quote/src/to_tokens.rs | 271 + vendor/quote/tests/compiletest.rs | 7 + vendor/quote/tests/test.rs | 568 + .../ui/does-not-have-iter-interpolated-dup.rs | 9 + ...does-not-have-iter-interpolated-dup.stderr | 13 + .../ui/does-not-have-iter-interpolated.rs | 9 + .../ui/does-not-have-iter-interpolated.stderr | 13 + .../tests/ui/does-not-have-iter-separated.rs | 5 + .../ui/does-not-have-iter-separated.stderr | 13 + vendor/quote/tests/ui/does-not-have-iter.rs | 5 + .../quote/tests/ui/does-not-have-iter.stderr | 13 + vendor/quote/tests/ui/not-quotable.rs | 7 + vendor/quote/tests/ui/not-quotable.stderr | 20 + vendor/quote/tests/ui/not-repeatable.rs | 8 + vendor/quote/tests/ui/not-repeatable.stderr | 42 + vendor/quote/tests/ui/wrong-type-span.rs | 7 + vendor/quote/tests/ui/wrong-type-span.stderr | 10 + vendor/regex-automata/.cargo-checksum.json | 1 + vendor/regex-automata/.cargo_vcs_info.json | 6 + vendor/regex-automata/Cargo.lock | 372 + vendor/regex-automata/Cargo.toml | 200 + vendor/regex-automata/LICENSE-APACHE | 201 + vendor/regex-automata/LICENSE-MIT | 25 + vendor/regex-automata/README.md | 117 + vendor/regex-automata/src/dfa/accel.rs | 517 + vendor/regex-automata/src/dfa/automaton.rs | 2260 +++ vendor/regex-automata/src/dfa/dense.rs | 5260 +++++++ vendor/regex-automata/src/dfa/determinize.rs | 599 + vendor/regex-automata/src/dfa/minimize.rs | 463 + vendor/regex-automata/src/dfa/mod.rs | 360 + vendor/regex-automata/src/dfa/onepass.rs | 3192 ++++ vendor/regex-automata/src/dfa/regex.rs | 870 ++ vendor/regex-automata/src/dfa/remapper.rs | 242 + vendor/regex-automata/src/dfa/search.rs | 644 + vendor/regex-automata/src/dfa/sparse.rs | 2655 ++++ vendor/regex-automata/src/dfa/special.rs | 494 + vendor/regex-automata/src/dfa/start.rs | 74 + vendor/regex-automata/src/hybrid/dfa.rs | 4434 ++++++ vendor/regex-automata/src/hybrid/error.rs | 241 + vendor/regex-automata/src/hybrid/id.rs | 354 + vendor/regex-automata/src/hybrid/mod.rs | 144 + vendor/regex-automata/src/hybrid/regex.rs | 895 ++ vendor/regex-automata/src/hybrid/search.rs | 802 + vendor/regex-automata/src/lib.rs | 651 + vendor/regex-automata/src/macros.rs | 20 + vendor/regex-automata/src/meta/error.rs | 241 + vendor/regex-automata/src/meta/limited.rs | 251 + vendor/regex-automata/src/meta/literal.rs | 81 + vendor/regex-automata/src/meta/mod.rs | 62 + vendor/regex-automata/src/meta/regex.rs | 3706 +++++ .../regex-automata/src/meta/reverse_inner.rs | 220 + vendor/regex-automata/src/meta/stopat.rs | 212 + vendor/regex-automata/src/meta/strategy.rs | 1905 +++ vendor/regex-automata/src/meta/wrappers.rs | 1336 ++ vendor/regex-automata/src/nfa/mod.rs | 55 + .../src/nfa/thompson/backtrack.rs | 1908 +++ .../src/nfa/thompson/builder.rs | 1337 ++ .../src/nfa/thompson/compiler.rs | 2368 +++ .../regex-automata/src/nfa/thompson/error.rs | 182 + .../src/nfa/thompson/literal_trie.rs | 528 + vendor/regex-automata/src/nfa/thompson/map.rs | 296 + vendor/regex-automata/src/nfa/thompson/mod.rs | 81 + vendor/regex-automata/src/nfa/thompson/nfa.rs | 2098 +++ .../regex-automata/src/nfa/thompson/pikevm.rs | 2359 +++ .../src/nfa/thompson/range_trie.rs | 1051 ++ vendor/regex-automata/src/util/alphabet.rs | 1139 ++ vendor/regex-automata/src/util/captures.rs | 2551 ++++ .../src/util/determinize/mod.rs | 682 + .../src/util/determinize/state.rs | 907 ++ vendor/regex-automata/src/util/empty.rs | 265 + vendor/regex-automata/src/util/escape.rs | 84 + vendor/regex-automata/src/util/int.rs | 246 + vendor/regex-automata/src/util/interpolate.rs | 576 + vendor/regex-automata/src/util/iter.rs | 1022 ++ vendor/regex-automata/src/util/lazy.rs | 461 + vendor/regex-automata/src/util/look.rs | 2547 ++++ vendor/regex-automata/src/util/memchr.rs | 93 + vendor/regex-automata/src/util/mod.rs | 57 + vendor/regex-automata/src/util/pool.rs | 1199 ++ .../src/util/prefilter/aho_corasick.rs | 149 + .../src/util/prefilter/byteset.rs | 58 + .../src/util/prefilter/memchr.rs | 186 + .../src/util/prefilter/memmem.rs | 88 + .../regex-automata/src/util/prefilter/mod.rs | 719 + .../src/util/prefilter/teddy.rs | 160 + vendor/regex-automata/src/util/primitives.rs | 776 + vendor/regex-automata/src/util/search.rs | 1988 +++ vendor/regex-automata/src/util/sparse_set.rs | 239 + vendor/regex-automata/src/util/start.rs | 479 + vendor/regex-automata/src/util/syntax.rs | 482 + .../src/util/unicode_data/mod.rs | 17 + .../src/util/unicode_data/perl_word.rs | 806 + vendor/regex-automata/src/util/utf8.rs | 191 + vendor/regex-automata/src/util/wire.rs | 947 ++ vendor/regex-automata/test | 95 + vendor/regex-automata/tests/dfa/api.rs | 162 + vendor/regex-automata/tests/dfa/mod.rs | 8 + .../regex-automata/tests/dfa/onepass/mod.rs | 2 + .../regex-automata/tests/dfa/onepass/suite.rs | 197 + vendor/regex-automata/tests/dfa/regression.rs | 48 + vendor/regex-automata/tests/dfa/suite.rs | 443 + vendor/regex-automata/tests/fuzz/dense.rs | 52 + vendor/regex-automata/tests/fuzz/mod.rs | 2 + vendor/regex-automata/tests/fuzz/sparse.rs | 132 + ...h-9486fb7c8a93b12c12a62166b43d31640c0208a9 | Bin 0 -> 1894 bytes ...m-9486fb7c8a93b12c12a62166b43d31640c0208a9 | Bin 0 -> 1882 bytes ...h-0da59c0434eaf35e5a6b470fa9244bb79c72b000 | Bin 0 -> 941 bytes ...h-18cfc246f2ddfc3dfc92b0c7893178c7cf65efa9 | Bin 0 -> 924 bytes ...h-61fd8e3003bf9d99f6c1e5a8488727eefd234b98 | Bin 0 -> 933 bytes ...h-a1b839d899ced76d5d7d0f78f9edb7a421505838 | Bin 0 -> 802 bytes ...h-c383ae07ec5e191422eadc492117439011816570 | Bin 0 -> 924 bytes ...h-d07703ceb94b10dcd9e4acb809f2051420449e2b | Bin 0 -> 922 bytes ...h-dbb8172d3984e7e7d03f4b5f8bb86ecd1460eff9 | Bin 0 -> 728 bytes vendor/regex-automata/tests/gen/README.md | 65 + vendor/regex-automata/tests/gen/dense/mod.rs | 22 + .../tests/gen/dense/multi_pattern_v2.rs | 43 + .../dense/multi_pattern_v2_fwd.bigendian.dfa | Bin 0 -> 11100 bytes .../multi_pattern_v2_fwd.littleendian.dfa | Bin 0 -> 11100 bytes .../dense/multi_pattern_v2_rev.bigendian.dfa | Bin 0 -> 7584 bytes .../multi_pattern_v2_rev.littleendian.dfa | Bin 0 -> 7584 bytes vendor/regex-automata/tests/gen/mod.rs | 2 + vendor/regex-automata/tests/gen/sparse/mod.rs | 22 + .../tests/gen/sparse/multi_pattern_v2.rs | 37 + .../sparse/multi_pattern_v2_fwd.bigendian.dfa | Bin 0 -> 3476 bytes .../multi_pattern_v2_fwd.littleendian.dfa | Bin 0 -> 3476 bytes .../sparse/multi_pattern_v2_rev.bigendian.dfa | Bin 0 -> 1920 bytes .../multi_pattern_v2_rev.littleendian.dfa | Bin 0 -> 1920 bytes vendor/regex-automata/tests/hybrid/api.rs | 171 + vendor/regex-automata/tests/hybrid/mod.rs | 3 + vendor/regex-automata/tests/hybrid/suite.rs | 347 + vendor/regex-automata/tests/lib.rs | 115 + vendor/regex-automata/tests/meta/mod.rs | 2 + vendor/regex-automata/tests/meta/suite.rs | 200 + vendor/regex-automata/tests/nfa/mod.rs | 1 + .../tests/nfa/thompson/backtrack/mod.rs | 2 + .../tests/nfa/thompson/backtrack/suite.rs | 213 + .../regex-automata/tests/nfa/thompson/mod.rs | 4 + .../tests/nfa/thompson/pikevm/mod.rs | 2 + .../tests/nfa/thompson/pikevm/suite.rs | 162 + vendor/regex-syntax/.cargo-checksum.json | 1 + vendor/regex-syntax/.cargo_vcs_info.json | 6 + vendor/regex-syntax/Cargo.lock | 65 + vendor/regex-syntax/Cargo.toml | 81 + vendor/regex-syntax/LICENSE-APACHE | 201 + vendor/regex-syntax/LICENSE-MIT | 25 + vendor/regex-syntax/README.md | 96 + vendor/regex-syntax/benches/bench.rs | 63 + vendor/regex-syntax/src/ast/mod.rs | 1807 +++ vendor/regex-syntax/src/ast/parse.rs | 6377 ++++++++ vendor/regex-syntax/src/ast/print.rs | 577 + vendor/regex-syntax/src/ast/visitor.rs | 522 + vendor/regex-syntax/src/debug.rs | 107 + vendor/regex-syntax/src/either.rs | 8 + vendor/regex-syntax/src/error.rs | 311 + vendor/regex-syntax/src/hir/interval.rs | 564 + vendor/regex-syntax/src/hir/literal.rs | 3214 ++++ vendor/regex-syntax/src/hir/mod.rs | 3873 +++++ vendor/regex-syntax/src/hir/print.rs | 608 + vendor/regex-syntax/src/hir/translate.rs | 3740 +++++ vendor/regex-syntax/src/hir/visitor.rs | 215 + vendor/regex-syntax/src/lib.rs | 433 + vendor/regex-syntax/src/parser.rs | 254 + vendor/regex-syntax/src/rank.rs | 258 + vendor/regex-syntax/src/unicode.rs | 1041 ++ .../src/unicode_tables/LICENSE-UNICODE | 57 + vendor/regex-syntax/src/unicode_tables/age.rs | 1846 +++ .../src/unicode_tables/case_folding_simple.rs | 2948 ++++ .../src/unicode_tables/general_category.rs | 6717 +++++++++ .../unicode_tables/grapheme_cluster_break.rs | 1420 ++ vendor/regex-syntax/src/unicode_tables/mod.rs | 57 + .../src/unicode_tables/perl_decimal.rs | 84 + .../src/unicode_tables/perl_space.rs | 23 + .../src/unicode_tables/perl_word.rs | 806 + .../src/unicode_tables/property_bool.rs | 12095 ++++++++++++++++ .../src/unicode_tables/property_names.rs | 281 + .../src/unicode_tables/property_values.rs | 956 ++ .../regex-syntax/src/unicode_tables/script.rs | 1300 ++ .../src/unicode_tables/script_extension.rs | 1718 +++ .../src/unicode_tables/sentence_break.rs | 2530 ++++ .../src/unicode_tables/word_break.rs | 1152 ++ vendor/regex-syntax/src/utf8.rs | 592 + vendor/regex-syntax/test | 30 + vendor/regex/.cargo-checksum.json | 1 + vendor/regex/.cargo_vcs_info.json | 6 + vendor/regex/.vim/coc-settings.json | 6 + vendor/regex/CHANGELOG.md | 1742 +++ vendor/regex/Cargo.lock | 383 + vendor/regex/Cargo.toml | 207 + vendor/regex/Cross.toml | 7 + vendor/regex/LICENSE-APACHE | 201 + vendor/regex/LICENSE-MIT | 25 + vendor/regex/README.md | 336 + vendor/regex/UNICODE.md | 258 + vendor/regex/bench/README.md | 2 + vendor/regex/rustfmt.toml | 2 + vendor/regex/src/builders.rs | 2539 ++++ vendor/regex/src/bytes.rs | 91 + vendor/regex/src/error.rs | 101 + vendor/regex/src/find_byte.rs | 17 + vendor/regex/src/lib.rs | 1353 ++ vendor/regex/src/pattern.rs | 67 + vendor/regex/src/regex/bytes.rs | 2722 ++++ vendor/regex/src/regex/mod.rs | 2 + vendor/regex/src/regex/string.rs | 2625 ++++ vendor/regex/src/regexset/bytes.rs | 728 + vendor/regex/src/regexset/mod.rs | 2 + vendor/regex/src/regexset/string.rs | 724 + vendor/regex/test | 46 + vendor/regex/testdata/README.md | 22 + vendor/regex/testdata/anchored.toml | 127 + vendor/regex/testdata/bytes.toml | 235 + vendor/regex/testdata/crazy.toml | 315 + vendor/regex/testdata/crlf.toml | 117 + vendor/regex/testdata/earliest.toml | 52 + vendor/regex/testdata/empty.toml | 113 + vendor/regex/testdata/expensive.toml | 23 + vendor/regex/testdata/flags.toml | 68 + vendor/regex/testdata/fowler/basic.toml | 1611 ++ vendor/regex/testdata/fowler/dat/README | 25 + vendor/regex/testdata/fowler/dat/basic.dat | 223 + .../regex/testdata/fowler/dat/nullsubexpr.dat | 74 + .../regex/testdata/fowler/dat/repetition.dat | 169 + vendor/regex/testdata/fowler/nullsubexpr.toml | 405 + vendor/regex/testdata/fowler/repetition.toml | 746 + vendor/regex/testdata/iter.toml | 143 + vendor/regex/testdata/leftmost-all.toml | 25 + vendor/regex/testdata/line-terminator.toml | 109 + vendor/regex/testdata/misc.toml | 99 + vendor/regex/testdata/multiline.toml | 845 ++ vendor/regex/testdata/no-unicode.toml | 222 + vendor/regex/testdata/overlapping.toml | 280 + vendor/regex/testdata/regex-lite.toml | 98 + vendor/regex/testdata/regression.toml | 830 ++ vendor/regex/testdata/set.toml | 641 + vendor/regex/testdata/substring.toml | 36 + vendor/regex/testdata/unicode.toml | 517 + vendor/regex/testdata/utf8.toml | 399 + .../regex/testdata/word-boundary-special.toml | 687 + vendor/regex/testdata/word-boundary.toml | 781 + vendor/regex/tests/lib.rs | 58 + vendor/regex/tests/misc.rs | 143 + vendor/regex/tests/regression.rs | 94 + vendor/regex/tests/regression_fuzz.rs | 61 + vendor/regex/tests/replace.rs | 183 + vendor/regex/tests/searcher.rs | 93 + vendor/regex/tests/suite_bytes.rs | 108 + vendor/regex/tests/suite_bytes_set.rs | 71 + vendor/regex/tests/suite_string.rs | 113 + vendor/regex/tests/suite_string_set.rs | 78 + vendor/rustc-hash/.cargo-checksum.json | 1 + vendor/rustc-hash/.cargo_vcs_info.json | 6 + vendor/rustc-hash/.github/workflows/rust.yml | 73 + vendor/rustc-hash/CHANGELOG.md | 32 + vendor/rustc-hash/CODE_OF_CONDUCT.md | 3 + vendor/rustc-hash/Cargo.lock | 75 + vendor/rustc-hash/Cargo.toml | 49 + vendor/rustc-hash/LICENSE-APACHE | 176 + vendor/rustc-hash/LICENSE-MIT | 23 + vendor/rustc-hash/README.md | 42 + vendor/rustc-hash/src/lib.rs | 459 + vendor/rustc-hash/src/random_state.rs | 101 + vendor/rustc-hash/src/seeded_state.rs | 76 + vendor/shlex/.cargo-checksum.json | 1 + vendor/shlex/.cargo_vcs_info.json | 6 + vendor/shlex/.github/workflows/test.yml | 36 + vendor/shlex/CHANGELOG.md | 21 + vendor/shlex/Cargo.toml | 35 + vendor/shlex/LICENSE-APACHE | 13 + vendor/shlex/LICENSE-MIT | 21 + vendor/shlex/README.md | 39 + vendor/shlex/src/bytes.rs | 576 + vendor/shlex/src/lib.rs | 358 + vendor/shlex/src/quoting_warning.md | 365 + vendor/syn/.cargo-checksum.json | 1 + vendor/syn/.cargo_vcs_info.json | 6 + vendor/syn/Cargo.lock | 1819 +++ vendor/syn/Cargo.toml | 272 + vendor/syn/LICENSE-APACHE | 176 + vendor/syn/LICENSE-MIT | 23 + vendor/syn/README.md | 284 + vendor/syn/benches/file.rs | 59 + vendor/syn/benches/rust.rs | 194 + vendor/syn/src/attr.rs | 836 ++ vendor/syn/src/bigint.rs | 66 + vendor/syn/src/buffer.rs | 435 + vendor/syn/src/classify.rs | 311 + vendor/syn/src/custom_keyword.rs | 260 + vendor/syn/src/custom_punctuation.rs | 305 + vendor/syn/src/data.rs | 424 + vendor/syn/src/derive.rs | 259 + vendor/syn/src/discouraged.rs | 225 + vendor/syn/src/drops.rs | 58 + vendor/syn/src/error.rs | 468 + vendor/syn/src/export.rs | 73 + vendor/syn/src/expr.rs | 4173 ++++++ vendor/syn/src/ext.rs | 179 + vendor/syn/src/file.rs | 125 + vendor/syn/src/fixup.rs | 773 + vendor/syn/src/gen/clone.rs | 2267 +++ vendor/syn/src/gen/debug.rs | 3238 +++++ vendor/syn/src/gen/eq.rs | 2306 +++ vendor/syn/src/gen/fold.rs | 3902 +++++ vendor/syn/src/gen/hash.rs | 2876 ++++ vendor/syn/src/gen/token.css | 737 + vendor/syn/src/gen/visit.rs | 3941 +++++ vendor/syn/src/gen/visit_mut.rs | 3759 +++++ vendor/syn/src/generics.rs | 1477 ++ vendor/syn/src/group.rs | 291 + vendor/syn/src/ident.rs | 108 + vendor/syn/src/item.rs | 3490 +++++ vendor/syn/src/lib.rs | 1009 ++ vendor/syn/src/lifetime.rs | 155 + vendor/syn/src/lit.rs | 1918 +++ vendor/syn/src/lookahead.rs | 348 + vendor/syn/src/mac.rs | 225 + vendor/syn/src/macros.rs | 182 + vendor/syn/src/meta.rs | 427 + vendor/syn/src/op.rs | 219 + vendor/syn/src/parse.rs | 1419 ++ vendor/syn/src/parse_macro_input.rs | 128 + vendor/syn/src/parse_quote.rs | 240 + vendor/syn/src/pat.rs | 955 ++ vendor/syn/src/path.rs | 966 ++ vendor/syn/src/precedence.rs | 210 + vendor/syn/src/print.rs | 16 + vendor/syn/src/punctuated.rs | 1169 ++ vendor/syn/src/restriction.rs | 178 + vendor/syn/src/scan_expr.rs | 268 + vendor/syn/src/sealed.rs | 4 + vendor/syn/src/span.rs | 63 + vendor/syn/src/spanned.rs | 118 + vendor/syn/src/stmt.rs | 484 + vendor/syn/src/thread.rs | 60 + vendor/syn/src/token.rs | 1093 ++ vendor/syn/src/tt.rs | 96 + vendor/syn/src/ty.rs | 1271 ++ vendor/syn/src/verbatim.rs | 33 + vendor/syn/src/whitespace.rs | 65 + vendor/syn/tests/common/eq.rs | 898 ++ vendor/syn/tests/common/mod.rs | 6 + vendor/syn/tests/common/parse.rs | 52 + vendor/syn/tests/common/visit.rs | 119 + vendor/syn/tests/debug/gen.rs | 5239 +++++++ vendor/syn/tests/debug/mod.rs | 147 + vendor/syn/tests/macros/mod.rs | 7 + vendor/syn/tests/regression.rs | 5 + vendor/syn/tests/regression/issue1108.rs | 5 + vendor/syn/tests/regression/issue1235.rs | 32 + vendor/syn/tests/repo/mod.rs | 630 + vendor/syn/tests/repo/progress.rs | 37 + vendor/syn/tests/snapshot/mod.rs | 68 + vendor/syn/tests/test_asyncness.rs | 49 + vendor/syn/tests/test_attribute.rs | 231 + vendor/syn/tests/test_derive_input.rs | 785 + vendor/syn/tests/test_expr.rs | 1702 +++ vendor/syn/tests/test_generics.rs | 345 + vendor/syn/tests/test_grouping.rs | 59 + vendor/syn/tests/test_ident.rs | 87 + vendor/syn/tests/test_item.rs | 316 + vendor/syn/tests/test_lit.rs | 335 + vendor/syn/tests/test_meta.rs | 180 + vendor/syn/tests/test_parse_buffer.rs | 103 + vendor/syn/tests/test_parse_quote.rs | 172 + vendor/syn/tests/test_parse_stream.rs | 187 + vendor/syn/tests/test_pat.rs | 158 + vendor/syn/tests/test_path.rs | 116 + vendor/syn/tests/test_precedence.rs | 558 + vendor/syn/tests/test_punctuated.rs | 92 + vendor/syn/tests/test_receiver.rs | 327 + vendor/syn/tests/test_round_trip.rs | 256 + vendor/syn/tests/test_shebang.rs | 73 + vendor/syn/tests/test_size.rs | 54 + vendor/syn/tests/test_stmt.rs | 337 + vendor/syn/tests/test_token_trees.rs | 38 + vendor/syn/tests/test_ty.rs | 471 + vendor/syn/tests/test_unparenthesize.rs | 70 + vendor/syn/tests/test_visibility.rs | 191 + vendor/syn/tests/zzz_stable.rs | 33 + vendor/unicode-ident/.cargo-checksum.json | 1 + vendor/unicode-ident/.cargo_vcs_info.json | 6 + vendor/unicode-ident/.github/FUNDING.yml | 1 + vendor/unicode-ident/.github/workflows/ci.yml | 110 + vendor/unicode-ident/Cargo.lock | 499 + vendor/unicode-ident/Cargo.toml | 84 + vendor/unicode-ident/LICENSE-APACHE | 176 + vendor/unicode-ident/LICENSE-MIT | 23 + vendor/unicode-ident/LICENSE-UNICODE | 39 + vendor/unicode-ident/README.md | 274 + vendor/unicode-ident/benches/xid.rs | 126 + vendor/unicode-ident/src/lib.rs | 281 + vendor/unicode-ident/src/tables.rs | 663 + vendor/unicode-ident/tests/compare.rs | 68 + vendor/unicode-ident/tests/fst/.gitignore | 1 + vendor/unicode-ident/tests/fst/mod.rs | 11 + .../unicode-ident/tests/fst/xid_continue.fst | Bin 0 -> 76143 bytes vendor/unicode-ident/tests/fst/xid_start.fst | Bin 0 -> 67370 bytes vendor/unicode-ident/tests/roaring/mod.rs | 23 + vendor/unicode-ident/tests/static_size.rs | 95 + vendor/unicode-ident/tests/tables/mod.rs | 7 + vendor/unicode-ident/tests/tables/tables.rs | 361 + vendor/unicode-ident/tests/trie/mod.rs | 7 + vendor/unicode-ident/tests/trie/trie.rs | 453 + vendor/windows-link/.cargo-checksum.json | 1 + vendor/windows-link/.cargo_vcs_info.json | 6 + vendor/windows-link/Cargo.lock | 7 + vendor/windows-link/Cargo.toml | 39 + vendor/windows-link/license-apache-2.0 | 201 + vendor/windows-link/license-mit | 21 + vendor/windows-link/readme.md | 26 + vendor/windows-link/src/lib.rs | 39 + 1282 files changed, 546705 insertions(+) create mode 100644 vendor/aho-corasick/.cargo-checksum.json create mode 100644 vendor/aho-corasick/.cargo_vcs_info.json create mode 100644 vendor/aho-corasick/.github/FUNDING.yml create mode 100644 vendor/aho-corasick/.github/workflows/ci.yml create mode 100644 vendor/aho-corasick/.vim/coc-settings.json create mode 100644 vendor/aho-corasick/COPYING create mode 100644 vendor/aho-corasick/Cargo.lock create mode 100644 vendor/aho-corasick/Cargo.toml create mode 100644 vendor/aho-corasick/DESIGN.md create mode 100644 vendor/aho-corasick/LICENSE-MIT create mode 100644 vendor/aho-corasick/README.md create mode 100644 vendor/aho-corasick/UNLICENSE create mode 100644 vendor/aho-corasick/rustfmt.toml create mode 100644 vendor/aho-corasick/src/ahocorasick.rs create mode 100644 vendor/aho-corasick/src/automaton.rs create mode 100644 vendor/aho-corasick/src/dfa.rs create mode 100644 vendor/aho-corasick/src/lib.rs create mode 100644 vendor/aho-corasick/src/macros.rs create mode 100644 vendor/aho-corasick/src/nfa/contiguous.rs create mode 100644 vendor/aho-corasick/src/nfa/mod.rs create mode 100644 vendor/aho-corasick/src/nfa/noncontiguous.rs create mode 100644 vendor/aho-corasick/src/packed/api.rs create mode 100644 vendor/aho-corasick/src/packed/ext.rs create mode 100644 vendor/aho-corasick/src/packed/mod.rs create mode 100644 vendor/aho-corasick/src/packed/pattern.rs create mode 100644 vendor/aho-corasick/src/packed/rabinkarp.rs create mode 100644 vendor/aho-corasick/src/packed/teddy/README.md create mode 100644 vendor/aho-corasick/src/packed/teddy/builder.rs create mode 100644 vendor/aho-corasick/src/packed/teddy/generic.rs create mode 100644 vendor/aho-corasick/src/packed/teddy/mod.rs create mode 100644 vendor/aho-corasick/src/packed/tests.rs create mode 100644 vendor/aho-corasick/src/packed/vector.rs create mode 100644 vendor/aho-corasick/src/tests.rs create mode 100644 vendor/aho-corasick/src/transducer.rs create mode 100644 vendor/aho-corasick/src/util/alphabet.rs create mode 100644 vendor/aho-corasick/src/util/buffer.rs create mode 100644 vendor/aho-corasick/src/util/byte_frequencies.rs create mode 100644 vendor/aho-corasick/src/util/debug.rs create mode 100644 vendor/aho-corasick/src/util/error.rs create mode 100644 vendor/aho-corasick/src/util/int.rs create mode 100644 vendor/aho-corasick/src/util/mod.rs create mode 100644 vendor/aho-corasick/src/util/prefilter.rs create mode 100644 vendor/aho-corasick/src/util/primitives.rs create mode 100644 vendor/aho-corasick/src/util/remapper.rs create mode 100644 vendor/aho-corasick/src/util/search.rs create mode 100644 vendor/aho-corasick/src/util/special.rs create mode 100644 vendor/base64/.cargo-checksum.json create mode 100644 vendor/base64/.cargo_vcs_info.json create mode 100644 vendor/base64/.circleci/config.yml create mode 100644 vendor/base64/.github/ISSUE_TEMPLATE/general-purpose-issue.md create mode 100644 vendor/base64/Cargo.lock create mode 100644 vendor/base64/Cargo.toml create mode 100644 vendor/base64/LICENSE-APACHE create mode 100644 vendor/base64/LICENSE-MIT create mode 100644 vendor/base64/README.md create mode 100644 vendor/base64/RELEASE-NOTES.md create mode 100644 vendor/base64/benches/benchmarks.rs create mode 100644 vendor/base64/clippy.toml create mode 100644 vendor/base64/examples/base64.rs create mode 100644 vendor/base64/icon_CLion.svg create mode 100644 vendor/base64/src/alphabet.rs create mode 100644 vendor/base64/src/chunked_encoder.rs create mode 100644 vendor/base64/src/decode.rs create mode 100644 vendor/base64/src/display.rs create mode 100644 vendor/base64/src/encode.rs create mode 100644 vendor/base64/src/engine/general_purpose/decode.rs create mode 100644 vendor/base64/src/engine/general_purpose/decode_suffix.rs create mode 100644 vendor/base64/src/engine/general_purpose/mod.rs create mode 100644 vendor/base64/src/engine/mod.rs create mode 100644 vendor/base64/src/engine/naive.rs create mode 100644 vendor/base64/src/engine/tests.rs create mode 100644 vendor/base64/src/lib.rs create mode 100644 vendor/base64/src/prelude.rs create mode 100644 vendor/base64/src/read/decoder.rs create mode 100644 vendor/base64/src/read/decoder_tests.rs create mode 100644 vendor/base64/src/read/mod.rs create mode 100644 vendor/base64/src/tests.rs create mode 100644 vendor/base64/src/write/encoder.rs create mode 100644 vendor/base64/src/write/encoder_string_writer.rs create mode 100644 vendor/base64/src/write/encoder_tests.rs create mode 100644 vendor/base64/src/write/mod.rs create mode 100644 vendor/base64/tests/encode.rs create mode 100644 vendor/base64/tests/tests.rs create mode 100644 vendor/bindgen/.cargo-checksum.json create mode 100644 vendor/bindgen/.cargo_vcs_info.json create mode 100644 vendor/bindgen/Cargo.lock create mode 100644 vendor/bindgen/Cargo.toml create mode 100644 vendor/bindgen/LICENSE create mode 100644 vendor/bindgen/README.md create mode 100644 vendor/bindgen/build.rs create mode 100644 vendor/bindgen/callbacks.rs create mode 100644 vendor/bindgen/clang.rs create mode 100644 vendor/bindgen/codegen/bitfield_unit.rs create mode 100644 vendor/bindgen/codegen/bitfield_unit_raw_ref_macros.rs create mode 100644 vendor/bindgen/codegen/bitfield_unit_tests.rs create mode 100644 vendor/bindgen/codegen/dyngen.rs create mode 100644 vendor/bindgen/codegen/error.rs create mode 100644 vendor/bindgen/codegen/helpers.rs create mode 100644 vendor/bindgen/codegen/impl_debug.rs create mode 100644 vendor/bindgen/codegen/impl_partialeq.rs create mode 100644 vendor/bindgen/codegen/mod.rs create mode 100644 vendor/bindgen/codegen/postprocessing/merge_extern_blocks.rs create mode 100644 vendor/bindgen/codegen/postprocessing/mod.rs create mode 100644 vendor/bindgen/codegen/postprocessing/sort_semantically.rs create mode 100644 vendor/bindgen/codegen/serialize.rs create mode 100644 vendor/bindgen/codegen/struct_layout.rs create mode 100644 vendor/bindgen/deps.rs create mode 100644 vendor/bindgen/diagnostics.rs create mode 100644 vendor/bindgen/extra_assertions.rs create mode 100644 vendor/bindgen/features.rs create mode 100644 vendor/bindgen/ir/analysis/derive.rs create mode 100644 vendor/bindgen/ir/analysis/has_destructor.rs create mode 100644 vendor/bindgen/ir/analysis/has_float.rs create mode 100644 vendor/bindgen/ir/analysis/has_type_param_in_array.rs create mode 100644 vendor/bindgen/ir/analysis/has_vtable.rs create mode 100644 vendor/bindgen/ir/analysis/mod.rs create mode 100644 vendor/bindgen/ir/analysis/sizedness.rs create mode 100644 vendor/bindgen/ir/analysis/template_params.rs create mode 100644 vendor/bindgen/ir/annotations.rs create mode 100644 vendor/bindgen/ir/comment.rs create mode 100644 vendor/bindgen/ir/comp.rs create mode 100644 vendor/bindgen/ir/context.rs create mode 100644 vendor/bindgen/ir/derive.rs create mode 100644 vendor/bindgen/ir/dot.rs create mode 100644 vendor/bindgen/ir/enum_ty.rs create mode 100644 vendor/bindgen/ir/function.rs create mode 100644 vendor/bindgen/ir/int.rs create mode 100644 vendor/bindgen/ir/item.rs create mode 100644 vendor/bindgen/ir/item_kind.rs create mode 100644 vendor/bindgen/ir/layout.rs create mode 100644 vendor/bindgen/ir/mod.rs create mode 100644 vendor/bindgen/ir/module.rs create mode 100644 vendor/bindgen/ir/objc.rs create mode 100644 vendor/bindgen/ir/template.rs create mode 100644 vendor/bindgen/ir/traversal.rs create mode 100644 vendor/bindgen/ir/ty.rs create mode 100644 vendor/bindgen/ir/var.rs create mode 100644 vendor/bindgen/lib.rs create mode 100644 vendor/bindgen/log_stubs.rs create mode 100644 vendor/bindgen/options/as_args.rs create mode 100644 vendor/bindgen/options/cli.rs create mode 100644 vendor/bindgen/options/helpers.rs create mode 100644 vendor/bindgen/options/mod.rs create mode 100644 vendor/bindgen/parse.rs create mode 100644 vendor/bindgen/regex_set.rs create mode 100644 vendor/bindgen/time.rs create mode 100644 vendor/bitflags/.cargo-checksum.json create mode 100644 vendor/bitflags/.cargo_vcs_info.json create mode 100644 vendor/bitflags/CHANGELOG.md create mode 100644 vendor/bitflags/CODE_OF_CONDUCT.md create mode 100644 vendor/bitflags/CONTRIBUTING.md create mode 100644 vendor/bitflags/Cargo.lock create mode 100644 vendor/bitflags/Cargo.toml create mode 100644 vendor/bitflags/LICENSE-APACHE create mode 100644 vendor/bitflags/LICENSE-MIT create mode 100644 vendor/bitflags/README.md create mode 100644 vendor/bitflags/SECURITY.md create mode 100644 vendor/bitflags/benches/parse.rs create mode 100644 vendor/bitflags/examples/custom_bits_type.rs create mode 100644 vendor/bitflags/examples/custom_derive.rs create mode 100644 vendor/bitflags/examples/fmt.rs create mode 100644 vendor/bitflags/examples/macro_free.rs create mode 100644 vendor/bitflags/examples/serde.rs create mode 100644 vendor/bitflags/spec.md create mode 100644 vendor/bitflags/src/example_generated.rs create mode 100644 vendor/bitflags/src/external.rs create mode 100644 vendor/bitflags/src/external/arbitrary.rs create mode 100644 vendor/bitflags/src/external/bytemuck.rs create mode 100644 vendor/bitflags/src/external/serde.rs create mode 100644 vendor/bitflags/src/internal.rs create mode 100644 vendor/bitflags/src/iter.rs create mode 100644 vendor/bitflags/src/lib.rs create mode 100644 vendor/bitflags/src/parser.rs create mode 100644 vendor/bitflags/src/public.rs create mode 100644 vendor/bitflags/src/tests.rs create mode 100644 vendor/bitflags/src/tests/all.rs create mode 100644 vendor/bitflags/src/tests/bitflags_match.rs create mode 100644 vendor/bitflags/src/tests/bits.rs create mode 100644 vendor/bitflags/src/tests/clear.rs create mode 100644 vendor/bitflags/src/tests/complement.rs create mode 100644 vendor/bitflags/src/tests/contains.rs create mode 100644 vendor/bitflags/src/tests/difference.rs create mode 100644 vendor/bitflags/src/tests/empty.rs create mode 100644 vendor/bitflags/src/tests/eq.rs create mode 100644 vendor/bitflags/src/tests/extend.rs create mode 100644 vendor/bitflags/src/tests/flags.rs create mode 100644 vendor/bitflags/src/tests/fmt.rs create mode 100644 vendor/bitflags/src/tests/from_bits.rs create mode 100644 vendor/bitflags/src/tests/from_bits_retain.rs create mode 100644 vendor/bitflags/src/tests/from_bits_truncate.rs create mode 100644 vendor/bitflags/src/tests/from_name.rs create mode 100644 vendor/bitflags/src/tests/insert.rs create mode 100644 vendor/bitflags/src/tests/intersection.rs create mode 100644 vendor/bitflags/src/tests/intersects.rs create mode 100644 vendor/bitflags/src/tests/is_all.rs create mode 100644 vendor/bitflags/src/tests/is_empty.rs create mode 100644 vendor/bitflags/src/tests/iter.rs create mode 100644 vendor/bitflags/src/tests/parser.rs create mode 100644 vendor/bitflags/src/tests/remove.rs create mode 100644 vendor/bitflags/src/tests/symmetric_difference.rs create mode 100644 vendor/bitflags/src/tests/truncate.rs create mode 100644 vendor/bitflags/src/tests/union.rs create mode 100644 vendor/bitflags/src/tests/unknown.rs create mode 100644 vendor/bitflags/src/traits.rs create mode 100644 vendor/cexpr/.cargo-checksum.json create mode 100644 vendor/cexpr/.cargo_vcs_info.json create mode 100644 vendor/cexpr/.github/workflows/ci.yml create mode 100644 vendor/cexpr/Cargo.toml create mode 100644 vendor/cexpr/LICENSE-APACHE create mode 100644 vendor/cexpr/LICENSE-MIT create mode 100644 vendor/cexpr/bors.toml create mode 100644 vendor/cexpr/rustfmt.toml create mode 100644 vendor/cexpr/src/expr.rs create mode 100644 vendor/cexpr/src/lib.rs create mode 100644 vendor/cexpr/src/literal.rs create mode 100644 vendor/cexpr/src/token.rs create mode 100644 vendor/cexpr/tests/clang.rs create mode 100644 vendor/cexpr/tests/input/chars.h create mode 100644 vendor/cexpr/tests/input/fail.h create mode 100644 vendor/cexpr/tests/input/floats.h create mode 100644 vendor/cexpr/tests/input/int_signed.h create mode 100644 vendor/cexpr/tests/input/int_unsigned.h create mode 100644 vendor/cexpr/tests/input/strings.h create mode 100644 vendor/cexpr/tests/input/test_llvm_bug_9069.h create mode 100644 vendor/cfg-if/.cargo-checksum.json create mode 100644 vendor/cfg-if/.cargo_vcs_info.json create mode 100644 vendor/cfg-if/.github/dependabot.yml create mode 100644 vendor/cfg-if/.github/workflows/main.yaml create mode 100644 vendor/cfg-if/.github/workflows/publish.yaml create mode 100644 vendor/cfg-if/CHANGELOG.md create mode 100644 vendor/cfg-if/Cargo.lock create mode 100644 vendor/cfg-if/Cargo.toml create mode 100644 vendor/cfg-if/LICENSE-APACHE create mode 100644 vendor/cfg-if/LICENSE-MIT create mode 100644 vendor/cfg-if/README.md create mode 100644 vendor/cfg-if/src/lib.rs create mode 100644 vendor/cfg-if/tests/xcrate.rs create mode 100644 vendor/clang-sys/.cargo-checksum.json create mode 100644 vendor/clang-sys/.cargo_vcs_info.json create mode 100644 vendor/clang-sys/.github/workflows/ci.yml create mode 100644 vendor/clang-sys/.github/workflows/ssh.yml create mode 100644 vendor/clang-sys/CHANGELOG.md create mode 100644 vendor/clang-sys/Cargo.toml create mode 100644 vendor/clang-sys/LICENSE.txt create mode 100644 vendor/clang-sys/README.md create mode 100644 vendor/clang-sys/build.rs create mode 100644 vendor/clang-sys/build/common.rs create mode 100644 vendor/clang-sys/build/dynamic.rs create mode 100644 vendor/clang-sys/build/macros.rs create mode 100644 vendor/clang-sys/build/static.rs create mode 100644 vendor/clang-sys/clippy.toml create mode 100644 vendor/clang-sys/src/lib.rs create mode 100644 vendor/clang-sys/src/link.rs create mode 100644 vendor/clang-sys/src/support.rs create mode 100644 vendor/clang-sys/tests/build.rs create mode 100644 vendor/clang-sys/tests/header.h create mode 100644 vendor/clang-sys/tests/lib.rs create mode 100644 vendor/either/.cargo-checksum.json create mode 100644 vendor/either/.cargo_vcs_info.json create mode 100644 vendor/either/.github/workflows/ci.yml create mode 100644 vendor/either/Cargo.lock create mode 100644 vendor/either/Cargo.toml create mode 100644 vendor/either/LICENSE-APACHE create mode 100644 vendor/either/LICENSE-MIT create mode 100644 vendor/either/README-crates.io.md create mode 100644 vendor/either/README.rst create mode 100644 vendor/either/src/into_either.rs create mode 100644 vendor/either/src/iterator.rs create mode 100644 vendor/either/src/lib.rs create mode 100644 vendor/either/src/serde_untagged.rs create mode 100644 vendor/either/src/serde_untagged_optional.rs create mode 100644 vendor/glob/.cargo-checksum.json create mode 100644 vendor/glob/.cargo_vcs_info.json create mode 100644 vendor/glob/.github/dependabot.yml create mode 100644 vendor/glob/.github/workflows/publish.yml create mode 100644 vendor/glob/.github/workflows/rust.yml create mode 100644 vendor/glob/CHANGELOG.md create mode 100644 vendor/glob/Cargo.lock create mode 100644 vendor/glob/Cargo.toml create mode 100644 vendor/glob/LICENSE-APACHE create mode 100644 vendor/glob/LICENSE-MIT create mode 100644 vendor/glob/README.md create mode 100644 vendor/glob/src/lib.rs create mode 100644 vendor/glob/tests/glob-std.rs create mode 100644 vendor/glob/triagebot.toml create mode 100644 vendor/itertools/.cargo-checksum.json create mode 100644 vendor/itertools/.cargo_vcs_info.json create mode 100644 vendor/itertools/.codecov.yml create mode 100644 vendor/itertools/.github/dependabot.yml create mode 100644 vendor/itertools/.github/workflows/ci.yml create mode 100644 vendor/itertools/.github/workflows/coverage.yml create mode 100644 vendor/itertools/CHANGELOG.md create mode 100644 vendor/itertools/CONTRIBUTING.md create mode 100644 vendor/itertools/Cargo.lock create mode 100644 vendor/itertools/Cargo.toml create mode 100644 vendor/itertools/LICENSE-APACHE create mode 100644 vendor/itertools/LICENSE-MIT create mode 100644 vendor/itertools/README.md create mode 100644 vendor/itertools/benches/bench1.rs create mode 100644 vendor/itertools/benches/combinations.rs create mode 100644 vendor/itertools/benches/combinations_with_replacement.rs create mode 100644 vendor/itertools/benches/fold_specialization.rs create mode 100644 vendor/itertools/benches/powerset.rs create mode 100644 vendor/itertools/benches/specializations.rs create mode 100644 vendor/itertools/benches/tree_reduce.rs create mode 100644 vendor/itertools/benches/tuple_combinations.rs create mode 100644 vendor/itertools/benches/tuples.rs create mode 100644 vendor/itertools/examples/iris.data create mode 100644 vendor/itertools/examples/iris.rs create mode 100644 vendor/itertools/src/adaptors/coalesce.rs create mode 100644 vendor/itertools/src/adaptors/map.rs create mode 100644 vendor/itertools/src/adaptors/mod.rs create mode 100644 vendor/itertools/src/adaptors/multi_product.rs create mode 100644 vendor/itertools/src/combinations.rs create mode 100644 vendor/itertools/src/combinations_with_replacement.rs create mode 100644 vendor/itertools/src/concat_impl.rs create mode 100644 vendor/itertools/src/cons_tuples_impl.rs create mode 100644 vendor/itertools/src/diff.rs create mode 100644 vendor/itertools/src/duplicates_impl.rs create mode 100644 vendor/itertools/src/either_or_both.rs create mode 100644 vendor/itertools/src/exactly_one_err.rs create mode 100644 vendor/itertools/src/extrema_set.rs create mode 100644 vendor/itertools/src/flatten_ok.rs create mode 100644 vendor/itertools/src/format.rs create mode 100644 vendor/itertools/src/free.rs create mode 100644 vendor/itertools/src/group_map.rs create mode 100644 vendor/itertools/src/groupbylazy.rs create mode 100644 vendor/itertools/src/grouping_map.rs create mode 100644 vendor/itertools/src/impl_macros.rs create mode 100644 vendor/itertools/src/intersperse.rs create mode 100644 vendor/itertools/src/iter_index.rs create mode 100644 vendor/itertools/src/k_smallest.rs create mode 100644 vendor/itertools/src/kmerge_impl.rs create mode 100644 vendor/itertools/src/lazy_buffer.rs create mode 100644 vendor/itertools/src/lib.rs create mode 100644 vendor/itertools/src/merge_join.rs create mode 100644 vendor/itertools/src/minmax.rs create mode 100644 vendor/itertools/src/multipeek_impl.rs create mode 100644 vendor/itertools/src/pad_tail.rs create mode 100644 vendor/itertools/src/peek_nth.rs create mode 100644 vendor/itertools/src/peeking_take_while.rs create mode 100644 vendor/itertools/src/permutations.rs create mode 100644 vendor/itertools/src/powerset.rs create mode 100644 vendor/itertools/src/process_results_impl.rs create mode 100644 vendor/itertools/src/put_back_n_impl.rs create mode 100644 vendor/itertools/src/rciter_impl.rs create mode 100644 vendor/itertools/src/repeatn.rs create mode 100644 vendor/itertools/src/size_hint.rs create mode 100644 vendor/itertools/src/sources.rs create mode 100644 vendor/itertools/src/take_while_inclusive.rs create mode 100644 vendor/itertools/src/tee.rs create mode 100644 vendor/itertools/src/tuple_impl.rs create mode 100644 vendor/itertools/src/unique_impl.rs create mode 100644 vendor/itertools/src/unziptuple.rs create mode 100644 vendor/itertools/src/with_position.rs create mode 100644 vendor/itertools/src/zip_eq_impl.rs create mode 100644 vendor/itertools/src/zip_longest.rs create mode 100644 vendor/itertools/src/ziptuple.rs create mode 100644 vendor/itertools/tests/adaptors_no_collect.rs create mode 100644 vendor/itertools/tests/flatten_ok.rs create mode 100644 vendor/itertools/tests/laziness.rs create mode 100644 vendor/itertools/tests/macros_hygiene.rs create mode 100644 vendor/itertools/tests/merge_join.rs create mode 100644 vendor/itertools/tests/peeking_take_while.rs create mode 100644 vendor/itertools/tests/quick.rs create mode 100644 vendor/itertools/tests/specializations.rs create mode 100644 vendor/itertools/tests/test_core.rs create mode 100644 vendor/itertools/tests/test_std.rs create mode 100644 vendor/itertools/tests/tuples.rs create mode 100644 vendor/itertools/tests/zip.rs create mode 100644 vendor/libc/.cargo-checksum.json create mode 100644 vendor/libc/.cargo_vcs_info.json create mode 100644 vendor/libc/.editorconfig create mode 100644 vendor/libc/.git-blame-ignore-revs create mode 100644 vendor/libc/.release-plz.toml create mode 100644 vendor/libc/CHANGELOG.md create mode 100644 vendor/libc/CONTRIBUTING.md create mode 100644 vendor/libc/Cargo.lock create mode 100644 vendor/libc/Cargo.toml create mode 100644 vendor/libc/LICENSE-APACHE create mode 100644 vendor/libc/LICENSE-MIT create mode 100644 vendor/libc/README.md create mode 100644 vendor/libc/build.rs create mode 100755 vendor/libc/cherry-pick-stable.sh create mode 100644 vendor/libc/rustfmt.toml create mode 100644 vendor/libc/src/fuchsia/aarch64.rs create mode 100644 vendor/libc/src/fuchsia/mod.rs create mode 100644 vendor/libc/src/fuchsia/riscv64.rs create mode 100644 vendor/libc/src/fuchsia/x86_64.rs create mode 100644 vendor/libc/src/hermit.rs create mode 100644 vendor/libc/src/lib.rs create mode 100644 vendor/libc/src/macros.rs create mode 100644 vendor/libc/src/new/bionic/mod.rs create mode 100644 vendor/libc/src/new/bionic/sys/mod.rs create mode 100644 vendor/libc/src/new/bionic/sys/socket.rs create mode 100644 vendor/libc/src/new/linux_uapi/linux/can.rs create mode 100644 vendor/libc/src/new/linux_uapi/linux/can/j1939.rs create mode 100644 vendor/libc/src/new/linux_uapi/linux/can/raw.rs create mode 100644 vendor/libc/src/new/linux_uapi/linux/mod.rs create mode 100644 vendor/libc/src/new/linux_uapi/mod.rs create mode 100644 vendor/libc/src/new/mod.rs create mode 100644 vendor/libc/src/primitives.rs create mode 100644 vendor/libc/src/psp.rs create mode 100644 vendor/libc/src/sgx.rs create mode 100644 vendor/libc/src/solid/aarch64.rs create mode 100644 vendor/libc/src/solid/arm.rs create mode 100644 vendor/libc/src/solid/mod.rs create mode 100644 vendor/libc/src/switch.rs create mode 100644 vendor/libc/src/teeos/mod.rs create mode 100644 vendor/libc/src/trusty.rs create mode 100644 vendor/libc/src/types.rs create mode 100644 vendor/libc/src/unix/aix/mod.rs create mode 100644 vendor/libc/src/unix/aix/powerpc64.rs create mode 100644 vendor/libc/src/unix/bsd/apple/b32/mod.rs create mode 100644 vendor/libc/src/unix/bsd/apple/b64/aarch64/mod.rs create mode 100644 vendor/libc/src/unix/bsd/apple/b64/mod.rs create mode 100644 vendor/libc/src/unix/bsd/apple/b64/x86_64/mod.rs create mode 100644 vendor/libc/src/unix/bsd/apple/mod.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/dragonfly/errno.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/aarch64.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/arm.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b32.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b64.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/mod.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/mod.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/x86_64.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/mod.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/x86_64.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/mod.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/x86_64.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/mod.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/x86_64.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc64.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/riscv64.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/mod.rs create mode 100644 vendor/libc/src/unix/bsd/freebsdlike/mod.rs create mode 100644 vendor/libc/src/unix/bsd/mod.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/mod.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/aarch64.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/arm.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/mips.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/powerpc.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/riscv64.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/sparc64.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86_64.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/aarch64.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/arm.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/mips64.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/mod.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc64.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/riscv64.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/sparc64.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86.rs create mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86_64.rs create mode 100644 vendor/libc/src/unix/cygwin/mod.rs create mode 100644 vendor/libc/src/unix/haiku/b32.rs create mode 100644 vendor/libc/src/unix/haiku/b64.rs create mode 100644 vendor/libc/src/unix/haiku/bsd.rs create mode 100644 vendor/libc/src/unix/haiku/mod.rs create mode 100644 vendor/libc/src/unix/haiku/native.rs create mode 100644 vendor/libc/src/unix/haiku/x86_64.rs create mode 100644 vendor/libc/src/unix/hurd/b32.rs create mode 100644 vendor/libc/src/unix/hurd/b64.rs create mode 100644 vendor/libc/src/unix/hurd/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/android/b32/arm.rs create mode 100644 vendor/libc/src/unix/linux_like/android/b32/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/android/b32/x86/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/android/b64/aarch64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/android/b64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/android/b64/riscv64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/android/b64/x86_64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/android/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/emscripten/lfs64.rs create mode 100644 vendor/libc/src/unix/linux_like/emscripten/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/arch/generic/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/arch/mips/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/arch/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/arch/powerpc/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/arch/sparc/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/arm/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/csky/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/m68k/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/mips/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/powerpc.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/riscv32/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/sparc/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/x86/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/ilp32.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/lp64.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/loongarch64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/mips64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/powerpc64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/riscv64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/s390x.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/sparc64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/not_x32.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/x32.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b32/arm/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b32/hexagon.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b32/mips/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b32/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b32/powerpc.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b32/riscv32/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b32/x86/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/loongarch64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/mips64.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/powerpc64.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/riscv64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/s390x.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/wali.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/x86_64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/lfs64.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/musl/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/arm/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips32/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/mips/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/l4re.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/mod.rs create mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/other.rs create mode 100644 vendor/libc/src/unix/linux_like/mod.rs create mode 100644 vendor/libc/src/unix/mod.rs create mode 100644 vendor/libc/src/unix/newlib/aarch64/mod.rs create mode 100644 vendor/libc/src/unix/newlib/arm/mod.rs create mode 100644 vendor/libc/src/unix/newlib/espidf/mod.rs create mode 100644 vendor/libc/src/unix/newlib/generic.rs create mode 100644 vendor/libc/src/unix/newlib/horizon/mod.rs create mode 100644 vendor/libc/src/unix/newlib/mod.rs create mode 100644 vendor/libc/src/unix/newlib/powerpc/mod.rs create mode 100644 vendor/libc/src/unix/newlib/rtems/mod.rs create mode 100644 vendor/libc/src/unix/newlib/vita/mod.rs create mode 100644 vendor/libc/src/unix/nto/aarch64.rs create mode 100644 vendor/libc/src/unix/nto/mod.rs create mode 100644 vendor/libc/src/unix/nto/neutrino.rs create mode 100644 vendor/libc/src/unix/nto/x86_64.rs create mode 100644 vendor/libc/src/unix/nuttx/mod.rs create mode 100644 vendor/libc/src/unix/redox/mod.rs create mode 100644 vendor/libc/src/unix/solarish/compat.rs create mode 100644 vendor/libc/src/unix/solarish/illumos.rs create mode 100644 vendor/libc/src/unix/solarish/mod.rs create mode 100644 vendor/libc/src/unix/solarish/solaris.rs create mode 100644 vendor/libc/src/unix/solarish/x86.rs create mode 100644 vendor/libc/src/unix/solarish/x86_64.rs create mode 100644 vendor/libc/src/unix/solarish/x86_common.rs create mode 100644 vendor/libc/src/vxworks/aarch64.rs create mode 100644 vendor/libc/src/vxworks/arm.rs create mode 100644 vendor/libc/src/vxworks/mod.rs create mode 100644 vendor/libc/src/vxworks/powerpc.rs create mode 100644 vendor/libc/src/vxworks/powerpc64.rs create mode 100644 vendor/libc/src/vxworks/riscv32.rs create mode 100644 vendor/libc/src/vxworks/riscv64.rs create mode 100644 vendor/libc/src/vxworks/x86.rs create mode 100644 vendor/libc/src/vxworks/x86_64.rs create mode 100644 vendor/libc/src/wasi/mod.rs create mode 100644 vendor/libc/src/wasi/p2.rs create mode 100644 vendor/libc/src/windows/gnu/mod.rs create mode 100644 vendor/libc/src/windows/mod.rs create mode 100644 vendor/libc/src/windows/msvc/mod.rs create mode 100644 vendor/libc/src/xous.rs create mode 100644 vendor/libc/tests/const_fn.rs create mode 100644 vendor/libloading/.cargo-checksum.json create mode 100644 vendor/libloading/.cargo_vcs_info.json create mode 100644 vendor/libloading/.github/workflows/libloading.yml create mode 100644 vendor/libloading/Cargo.lock create mode 100644 vendor/libloading/Cargo.toml create mode 100644 vendor/libloading/LICENSE create mode 100644 vendor/libloading/README.mkd create mode 100644 vendor/libloading/src/changelog.rs create mode 100644 vendor/libloading/src/error.rs create mode 100644 vendor/libloading/src/lib.rs create mode 100644 vendor/libloading/src/os/mod.rs create mode 100644 vendor/libloading/src/os/unix/consts.rs create mode 100644 vendor/libloading/src/os/unix/mod.rs create mode 100644 vendor/libloading/src/os/windows/mod.rs create mode 100644 vendor/libloading/src/safe.rs create mode 100644 vendor/libloading/src/test_helpers.rs create mode 100644 vendor/libloading/src/util.rs create mode 100644 vendor/libloading/tests/constants.rs create mode 100644 vendor/libloading/tests/functions.rs create mode 100644 vendor/libloading/tests/library_filename.rs create mode 100644 vendor/libloading/tests/markers.rs create mode 100644 vendor/libloading/tests/windows.rs create mode 100644 vendor/log/.cargo-checksum.json create mode 100644 vendor/log/.cargo_vcs_info.json create mode 100644 vendor/log/.github/workflows/main.yml create mode 100644 vendor/log/CHANGELOG.md create mode 100644 vendor/log/Cargo.lock create mode 100644 vendor/log/Cargo.toml create mode 100644 vendor/log/LICENSE-APACHE create mode 100644 vendor/log/LICENSE-MIT create mode 100644 vendor/log/README.md create mode 100644 vendor/log/benches/value.rs create mode 100644 vendor/log/src/__private_api.rs create mode 100644 vendor/log/src/kv/error.rs create mode 100644 vendor/log/src/kv/key.rs create mode 100644 vendor/log/src/kv/mod.rs create mode 100644 vendor/log/src/kv/source.rs create mode 100644 vendor/log/src/kv/value.rs create mode 100644 vendor/log/src/lib.rs create mode 100644 vendor/log/src/macros.rs create mode 100644 vendor/log/src/serde.rs create mode 100644 vendor/log/tests/integration.rs create mode 100644 vendor/log/tests/macros.rs create mode 100644 vendor/log/triagebot.toml create mode 100644 vendor/memchr/.cargo-checksum.json create mode 100644 vendor/memchr/.cargo_vcs_info.json create mode 100644 vendor/memchr/.ignore create mode 100644 vendor/memchr/.vim/coc-settings.json create mode 100644 vendor/memchr/COPYING create mode 100644 vendor/memchr/Cargo.lock create mode 100644 vendor/memchr/Cargo.toml create mode 100644 vendor/memchr/LICENSE-MIT create mode 100644 vendor/memchr/README.md create mode 100644 vendor/memchr/UNLICENSE create mode 100644 vendor/memchr/rustfmt.toml create mode 100644 vendor/memchr/src/arch/aarch64/memchr.rs create mode 100644 vendor/memchr/src/arch/aarch64/mod.rs create mode 100644 vendor/memchr/src/arch/aarch64/neon/memchr.rs create mode 100644 vendor/memchr/src/arch/aarch64/neon/mod.rs create mode 100644 vendor/memchr/src/arch/aarch64/neon/packedpair.rs create mode 100644 vendor/memchr/src/arch/all/memchr.rs create mode 100644 vendor/memchr/src/arch/all/mod.rs create mode 100644 vendor/memchr/src/arch/all/packedpair/default_rank.rs create mode 100644 vendor/memchr/src/arch/all/packedpair/mod.rs create mode 100644 vendor/memchr/src/arch/all/rabinkarp.rs create mode 100644 vendor/memchr/src/arch/all/shiftor.rs create mode 100644 vendor/memchr/src/arch/all/twoway.rs create mode 100644 vendor/memchr/src/arch/generic/memchr.rs create mode 100644 vendor/memchr/src/arch/generic/mod.rs create mode 100644 vendor/memchr/src/arch/generic/packedpair.rs create mode 100644 vendor/memchr/src/arch/mod.rs create mode 100644 vendor/memchr/src/arch/wasm32/memchr.rs create mode 100644 vendor/memchr/src/arch/wasm32/mod.rs create mode 100644 vendor/memchr/src/arch/wasm32/simd128/memchr.rs create mode 100644 vendor/memchr/src/arch/wasm32/simd128/mod.rs create mode 100644 vendor/memchr/src/arch/wasm32/simd128/packedpair.rs create mode 100644 vendor/memchr/src/arch/x86_64/avx2/memchr.rs create mode 100644 vendor/memchr/src/arch/x86_64/avx2/mod.rs create mode 100644 vendor/memchr/src/arch/x86_64/avx2/packedpair.rs create mode 100644 vendor/memchr/src/arch/x86_64/memchr.rs create mode 100644 vendor/memchr/src/arch/x86_64/mod.rs create mode 100644 vendor/memchr/src/arch/x86_64/sse2/memchr.rs create mode 100644 vendor/memchr/src/arch/x86_64/sse2/mod.rs create mode 100644 vendor/memchr/src/arch/x86_64/sse2/packedpair.rs create mode 100644 vendor/memchr/src/cow.rs create mode 100644 vendor/memchr/src/ext.rs create mode 100644 vendor/memchr/src/lib.rs create mode 100644 vendor/memchr/src/macros.rs create mode 100644 vendor/memchr/src/memchr.rs create mode 100644 vendor/memchr/src/memmem/mod.rs create mode 100644 vendor/memchr/src/memmem/searcher.rs create mode 100644 vendor/memchr/src/tests/memchr/mod.rs create mode 100644 vendor/memchr/src/tests/memchr/naive.rs create mode 100644 vendor/memchr/src/tests/memchr/prop.rs create mode 100644 vendor/memchr/src/tests/mod.rs create mode 100644 vendor/memchr/src/tests/packedpair.rs create mode 100644 vendor/memchr/src/tests/substring/mod.rs create mode 100644 vendor/memchr/src/tests/substring/naive.rs create mode 100644 vendor/memchr/src/tests/substring/prop.rs create mode 100644 vendor/memchr/src/vector.rs create mode 100644 vendor/minimal-lexical/.cargo-checksum.json create mode 100644 vendor/minimal-lexical/.cargo_vcs_info.json create mode 100644 vendor/minimal-lexical/.github/ISSUE_TEMPLATE/bug_report.md create mode 100644 vendor/minimal-lexical/.github/ISSUE_TEMPLATE/custom.md create mode 100644 vendor/minimal-lexical/.github/ISSUE_TEMPLATE/documentation.md create mode 100644 vendor/minimal-lexical/.github/ISSUE_TEMPLATE/feature_request.md create mode 100644 vendor/minimal-lexical/.github/ISSUE_TEMPLATE/question.md create mode 100644 vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/bug_fix.md create mode 100644 vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/custom.md create mode 100644 vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/documentation.md create mode 100644 vendor/minimal-lexical/.github/workflows/Cross.yml create mode 100644 vendor/minimal-lexical/.github/workflows/Features.yml create mode 100644 vendor/minimal-lexical/.github/workflows/OSX.yml create mode 100644 vendor/minimal-lexical/.github/workflows/Simple.yml create mode 100644 vendor/minimal-lexical/.github/workflows/Valgrind.yml create mode 100644 vendor/minimal-lexical/.gitmodules create mode 100644 vendor/minimal-lexical/CHANGELOG create mode 100644 vendor/minimal-lexical/CODE_OF_CONDUCT.md create mode 100644 vendor/minimal-lexical/Cargo.toml create mode 100644 vendor/minimal-lexical/LICENSE-APACHE create mode 100644 vendor/minimal-lexical/LICENSE-MIT create mode 100644 vendor/minimal-lexical/LICENSE.md create mode 100644 vendor/minimal-lexical/README.md create mode 100644 vendor/minimal-lexical/clippy.toml create mode 100644 vendor/minimal-lexical/rustfmt.toml create mode 100644 vendor/minimal-lexical/src/bellerophon.rs create mode 100644 vendor/minimal-lexical/src/bigint.rs create mode 100644 vendor/minimal-lexical/src/extended_float.rs create mode 100644 vendor/minimal-lexical/src/fpu.rs create mode 100644 vendor/minimal-lexical/src/heapvec.rs create mode 100644 vendor/minimal-lexical/src/lemire.rs create mode 100644 vendor/minimal-lexical/src/lib.rs create mode 100644 vendor/minimal-lexical/src/libm.rs create mode 100644 vendor/minimal-lexical/src/mask.rs create mode 100644 vendor/minimal-lexical/src/num.rs create mode 100644 vendor/minimal-lexical/src/number.rs create mode 100644 vendor/minimal-lexical/src/parse.rs create mode 100644 vendor/minimal-lexical/src/rounding.rs create mode 100644 vendor/minimal-lexical/src/slow.rs create mode 100644 vendor/minimal-lexical/src/stackvec.rs create mode 100644 vendor/minimal-lexical/src/table.rs create mode 100644 vendor/minimal-lexical/src/table_bellerophon.rs create mode 100644 vendor/minimal-lexical/src/table_lemire.rs create mode 100644 vendor/minimal-lexical/src/table_small.rs create mode 100644 vendor/minimal-lexical/tests/bellerophon.rs create mode 100644 vendor/minimal-lexical/tests/bellerophon_tests.rs create mode 100644 vendor/minimal-lexical/tests/integration_tests.rs create mode 100644 vendor/minimal-lexical/tests/lemire_tests.rs create mode 100644 vendor/minimal-lexical/tests/libm_tests.rs create mode 100644 vendor/minimal-lexical/tests/mask_tests.rs create mode 100644 vendor/minimal-lexical/tests/number_tests.rs create mode 100644 vendor/minimal-lexical/tests/parse_tests.rs create mode 100644 vendor/minimal-lexical/tests/rounding_tests.rs create mode 100644 vendor/minimal-lexical/tests/slow_tests.rs create mode 100644 vendor/minimal-lexical/tests/stackvec.rs create mode 100644 vendor/minimal-lexical/tests/vec_tests.rs create mode 100644 vendor/nom/.cargo-checksum.json create mode 100644 vendor/nom/.cargo_vcs_info.json create mode 100644 vendor/nom/CHANGELOG.md create mode 100644 vendor/nom/Cargo.lock create mode 100644 vendor/nom/Cargo.toml create mode 100644 vendor/nom/LICENSE create mode 100644 vendor/nom/README.md create mode 100644 vendor/nom/doc/nom_recipes.md create mode 100644 vendor/nom/src/bits/complete.rs create mode 100644 vendor/nom/src/bits/mod.rs create mode 100644 vendor/nom/src/bits/streaming.rs create mode 100644 vendor/nom/src/branch/mod.rs create mode 100644 vendor/nom/src/branch/tests.rs create mode 100644 vendor/nom/src/bytes/complete.rs create mode 100644 vendor/nom/src/bytes/mod.rs create mode 100644 vendor/nom/src/bytes/streaming.rs create mode 100644 vendor/nom/src/bytes/tests.rs create mode 100644 vendor/nom/src/character/complete.rs create mode 100644 vendor/nom/src/character/mod.rs create mode 100644 vendor/nom/src/character/streaming.rs create mode 100644 vendor/nom/src/character/tests.rs create mode 100644 vendor/nom/src/combinator/mod.rs create mode 100644 vendor/nom/src/combinator/tests.rs create mode 100644 vendor/nom/src/error.rs create mode 100644 vendor/nom/src/internal.rs create mode 100644 vendor/nom/src/lib.rs create mode 100644 vendor/nom/src/macros.rs create mode 100644 vendor/nom/src/multi/mod.rs create mode 100644 vendor/nom/src/multi/tests.rs create mode 100644 vendor/nom/src/number/complete.rs create mode 100644 vendor/nom/src/number/mod.rs create mode 100644 vendor/nom/src/number/streaming.rs create mode 100644 vendor/nom/src/sequence/mod.rs create mode 100644 vendor/nom/src/sequence/tests.rs create mode 100644 vendor/nom/src/str.rs create mode 100644 vendor/nom/src/traits.rs create mode 100644 vendor/nom/tests/arithmetic.rs create mode 100644 vendor/nom/tests/arithmetic_ast.rs create mode 100644 vendor/nom/tests/css.rs create mode 100644 vendor/nom/tests/custom_errors.rs create mode 100644 vendor/nom/tests/escaped.rs create mode 100644 vendor/nom/tests/float.rs create mode 100644 vendor/nom/tests/fnmut.rs create mode 100644 vendor/nom/tests/ini.rs create mode 100644 vendor/nom/tests/ini_str.rs create mode 100644 vendor/nom/tests/issues.rs create mode 100644 vendor/nom/tests/json.rs create mode 100644 vendor/nom/tests/mp4.rs create mode 100644 vendor/nom/tests/multiline.rs create mode 100644 vendor/nom/tests/overflow.rs create mode 100644 vendor/nom/tests/reborrow_fold.rs create mode 100644 vendor/prettyplease/.cargo-checksum.json create mode 100644 vendor/prettyplease/.cargo_vcs_info.json create mode 100644 vendor/prettyplease/.github/FUNDING.yml create mode 100644 vendor/prettyplease/.github/workflows/ci.yml create mode 100644 vendor/prettyplease/Cargo.lock create mode 100644 vendor/prettyplease/Cargo.toml create mode 100644 vendor/prettyplease/LICENSE-APACHE create mode 100644 vendor/prettyplease/LICENSE-MIT create mode 100644 vendor/prettyplease/README.md create mode 100644 vendor/prettyplease/build.rs create mode 100644 vendor/prettyplease/examples/.tokeignore create mode 100644 vendor/prettyplease/examples/input.rs create mode 100644 vendor/prettyplease/examples/output.prettyplease.rs create mode 100644 vendor/prettyplease/examples/output.rustc.rs create mode 100644 vendor/prettyplease/examples/output.rustfmt.rs create mode 100644 vendor/prettyplease/src/algorithm.rs create mode 100644 vendor/prettyplease/src/attr.rs create mode 100644 vendor/prettyplease/src/classify.rs create mode 100644 vendor/prettyplease/src/convenience.rs create mode 100644 vendor/prettyplease/src/data.rs create mode 100644 vendor/prettyplease/src/expr.rs create mode 100644 vendor/prettyplease/src/file.rs create mode 100644 vendor/prettyplease/src/fixup.rs create mode 100644 vendor/prettyplease/src/generics.rs create mode 100644 vendor/prettyplease/src/item.rs create mode 100644 vendor/prettyplease/src/iter.rs create mode 100644 vendor/prettyplease/src/lib.rs create mode 100644 vendor/prettyplease/src/lifetime.rs create mode 100644 vendor/prettyplease/src/lit.rs create mode 100644 vendor/prettyplease/src/mac.rs create mode 100644 vendor/prettyplease/src/pat.rs create mode 100644 vendor/prettyplease/src/path.rs create mode 100644 vendor/prettyplease/src/precedence.rs create mode 100644 vendor/prettyplease/src/ring.rs create mode 100644 vendor/prettyplease/src/stmt.rs create mode 100644 vendor/prettyplease/src/token.rs create mode 100644 vendor/prettyplease/src/ty.rs create mode 100644 vendor/prettyplease/tests/test.rs create mode 100644 vendor/prettyplease/tests/test_precedence.rs create mode 100644 vendor/proc-macro2/.cargo-checksum.json create mode 100644 vendor/proc-macro2/.cargo_vcs_info.json create mode 100644 vendor/proc-macro2/.github/FUNDING.yml create mode 100644 vendor/proc-macro2/.github/workflows/ci.yml create mode 100644 vendor/proc-macro2/Cargo.lock create mode 100644 vendor/proc-macro2/Cargo.toml create mode 100644 vendor/proc-macro2/LICENSE-APACHE create mode 100644 vendor/proc-macro2/LICENSE-MIT create mode 100644 vendor/proc-macro2/README.md create mode 100644 vendor/proc-macro2/build.rs create mode 100644 vendor/proc-macro2/rust-toolchain.toml create mode 100644 vendor/proc-macro2/src/detection.rs create mode 100644 vendor/proc-macro2/src/extra.rs create mode 100644 vendor/proc-macro2/src/fallback.rs create mode 100644 vendor/proc-macro2/src/lib.rs create mode 100644 vendor/proc-macro2/src/location.rs create mode 100644 vendor/proc-macro2/src/marker.rs create mode 100644 vendor/proc-macro2/src/num.rs create mode 100644 vendor/proc-macro2/src/parse.rs create mode 100644 vendor/proc-macro2/src/probe.rs create mode 100644 vendor/proc-macro2/src/probe/proc_macro_span.rs create mode 100644 vendor/proc-macro2/src/probe/proc_macro_span_file.rs create mode 100644 vendor/proc-macro2/src/probe/proc_macro_span_location.rs create mode 100644 vendor/proc-macro2/src/rcvec.rs create mode 100644 vendor/proc-macro2/src/rustc_literal_escaper.rs create mode 100644 vendor/proc-macro2/src/wrapper.rs create mode 100644 vendor/proc-macro2/tests/comments.rs create mode 100644 vendor/proc-macro2/tests/features.rs create mode 100644 vendor/proc-macro2/tests/marker.rs create mode 100644 vendor/proc-macro2/tests/test.rs create mode 100644 vendor/proc-macro2/tests/test_fmt.rs create mode 100644 vendor/proc-macro2/tests/test_size.rs create mode 100644 vendor/quote/.cargo-checksum.json create mode 100644 vendor/quote/.cargo_vcs_info.json create mode 100644 vendor/quote/.github/FUNDING.yml create mode 100644 vendor/quote/.github/workflows/ci.yml create mode 100644 vendor/quote/Cargo.lock create mode 100644 vendor/quote/Cargo.toml create mode 100644 vendor/quote/LICENSE-APACHE create mode 100644 vendor/quote/LICENSE-MIT create mode 100644 vendor/quote/README.md create mode 100644 vendor/quote/build.rs create mode 100644 vendor/quote/rust-toolchain.toml create mode 100644 vendor/quote/src/ext.rs create mode 100644 vendor/quote/src/format.rs create mode 100644 vendor/quote/src/ident_fragment.rs create mode 100644 vendor/quote/src/lib.rs create mode 100644 vendor/quote/src/runtime.rs create mode 100644 vendor/quote/src/spanned.rs create mode 100644 vendor/quote/src/to_tokens.rs create mode 100644 vendor/quote/tests/compiletest.rs create mode 100644 vendor/quote/tests/test.rs create mode 100644 vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.rs create mode 100644 vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.stderr create mode 100644 vendor/quote/tests/ui/does-not-have-iter-interpolated.rs create mode 100644 vendor/quote/tests/ui/does-not-have-iter-interpolated.stderr create mode 100644 vendor/quote/tests/ui/does-not-have-iter-separated.rs create mode 100644 vendor/quote/tests/ui/does-not-have-iter-separated.stderr create mode 100644 vendor/quote/tests/ui/does-not-have-iter.rs create mode 100644 vendor/quote/tests/ui/does-not-have-iter.stderr create mode 100644 vendor/quote/tests/ui/not-quotable.rs create mode 100644 vendor/quote/tests/ui/not-quotable.stderr create mode 100644 vendor/quote/tests/ui/not-repeatable.rs create mode 100644 vendor/quote/tests/ui/not-repeatable.stderr create mode 100644 vendor/quote/tests/ui/wrong-type-span.rs create mode 100644 vendor/quote/tests/ui/wrong-type-span.stderr create mode 100644 vendor/regex-automata/.cargo-checksum.json create mode 100644 vendor/regex-automata/.cargo_vcs_info.json create mode 100644 vendor/regex-automata/Cargo.lock create mode 100644 vendor/regex-automata/Cargo.toml create mode 100644 vendor/regex-automata/LICENSE-APACHE create mode 100644 vendor/regex-automata/LICENSE-MIT create mode 100644 vendor/regex-automata/README.md create mode 100644 vendor/regex-automata/src/dfa/accel.rs create mode 100644 vendor/regex-automata/src/dfa/automaton.rs create mode 100644 vendor/regex-automata/src/dfa/dense.rs create mode 100644 vendor/regex-automata/src/dfa/determinize.rs create mode 100644 vendor/regex-automata/src/dfa/minimize.rs create mode 100644 vendor/regex-automata/src/dfa/mod.rs create mode 100644 vendor/regex-automata/src/dfa/onepass.rs create mode 100644 vendor/regex-automata/src/dfa/regex.rs create mode 100644 vendor/regex-automata/src/dfa/remapper.rs create mode 100644 vendor/regex-automata/src/dfa/search.rs create mode 100644 vendor/regex-automata/src/dfa/sparse.rs create mode 100644 vendor/regex-automata/src/dfa/special.rs create mode 100644 vendor/regex-automata/src/dfa/start.rs create mode 100644 vendor/regex-automata/src/hybrid/dfa.rs create mode 100644 vendor/regex-automata/src/hybrid/error.rs create mode 100644 vendor/regex-automata/src/hybrid/id.rs create mode 100644 vendor/regex-automata/src/hybrid/mod.rs create mode 100644 vendor/regex-automata/src/hybrid/regex.rs create mode 100644 vendor/regex-automata/src/hybrid/search.rs create mode 100644 vendor/regex-automata/src/lib.rs create mode 100644 vendor/regex-automata/src/macros.rs create mode 100644 vendor/regex-automata/src/meta/error.rs create mode 100644 vendor/regex-automata/src/meta/limited.rs create mode 100644 vendor/regex-automata/src/meta/literal.rs create mode 100644 vendor/regex-automata/src/meta/mod.rs create mode 100644 vendor/regex-automata/src/meta/regex.rs create mode 100644 vendor/regex-automata/src/meta/reverse_inner.rs create mode 100644 vendor/regex-automata/src/meta/stopat.rs create mode 100644 vendor/regex-automata/src/meta/strategy.rs create mode 100644 vendor/regex-automata/src/meta/wrappers.rs create mode 100644 vendor/regex-automata/src/nfa/mod.rs create mode 100644 vendor/regex-automata/src/nfa/thompson/backtrack.rs create mode 100644 vendor/regex-automata/src/nfa/thompson/builder.rs create mode 100644 vendor/regex-automata/src/nfa/thompson/compiler.rs create mode 100644 vendor/regex-automata/src/nfa/thompson/error.rs create mode 100644 vendor/regex-automata/src/nfa/thompson/literal_trie.rs create mode 100644 vendor/regex-automata/src/nfa/thompson/map.rs create mode 100644 vendor/regex-automata/src/nfa/thompson/mod.rs create mode 100644 vendor/regex-automata/src/nfa/thompson/nfa.rs create mode 100644 vendor/regex-automata/src/nfa/thompson/pikevm.rs create mode 100644 vendor/regex-automata/src/nfa/thompson/range_trie.rs create mode 100644 vendor/regex-automata/src/util/alphabet.rs create mode 100644 vendor/regex-automata/src/util/captures.rs create mode 100644 vendor/regex-automata/src/util/determinize/mod.rs create mode 100644 vendor/regex-automata/src/util/determinize/state.rs create mode 100644 vendor/regex-automata/src/util/empty.rs create mode 100644 vendor/regex-automata/src/util/escape.rs create mode 100644 vendor/regex-automata/src/util/int.rs create mode 100644 vendor/regex-automata/src/util/interpolate.rs create mode 100644 vendor/regex-automata/src/util/iter.rs create mode 100644 vendor/regex-automata/src/util/lazy.rs create mode 100644 vendor/regex-automata/src/util/look.rs create mode 100644 vendor/regex-automata/src/util/memchr.rs create mode 100644 vendor/regex-automata/src/util/mod.rs create mode 100644 vendor/regex-automata/src/util/pool.rs create mode 100644 vendor/regex-automata/src/util/prefilter/aho_corasick.rs create mode 100644 vendor/regex-automata/src/util/prefilter/byteset.rs create mode 100644 vendor/regex-automata/src/util/prefilter/memchr.rs create mode 100644 vendor/regex-automata/src/util/prefilter/memmem.rs create mode 100644 vendor/regex-automata/src/util/prefilter/mod.rs create mode 100644 vendor/regex-automata/src/util/prefilter/teddy.rs create mode 100644 vendor/regex-automata/src/util/primitives.rs create mode 100644 vendor/regex-automata/src/util/search.rs create mode 100644 vendor/regex-automata/src/util/sparse_set.rs create mode 100644 vendor/regex-automata/src/util/start.rs create mode 100644 vendor/regex-automata/src/util/syntax.rs create mode 100644 vendor/regex-automata/src/util/unicode_data/mod.rs create mode 100644 vendor/regex-automata/src/util/unicode_data/perl_word.rs create mode 100644 vendor/regex-automata/src/util/utf8.rs create mode 100644 vendor/regex-automata/src/util/wire.rs create mode 100755 vendor/regex-automata/test create mode 100644 vendor/regex-automata/tests/dfa/api.rs create mode 100644 vendor/regex-automata/tests/dfa/mod.rs create mode 100644 vendor/regex-automata/tests/dfa/onepass/mod.rs create mode 100644 vendor/regex-automata/tests/dfa/onepass/suite.rs create mode 100644 vendor/regex-automata/tests/dfa/regression.rs create mode 100644 vendor/regex-automata/tests/dfa/suite.rs create mode 100644 vendor/regex-automata/tests/fuzz/dense.rs create mode 100644 vendor/regex-automata/tests/fuzz/mod.rs create mode 100644 vendor/regex-automata/tests/fuzz/sparse.rs create mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_dense_crash-9486fb7c8a93b12c12a62166b43d31640c0208a9 create mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_dense_minimized-from-9486fb7c8a93b12c12a62166b43d31640c0208a9 create mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-0da59c0434eaf35e5a6b470fa9244bb79c72b000 create mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-18cfc246f2ddfc3dfc92b0c7893178c7cf65efa9 create mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-61fd8e3003bf9d99f6c1e5a8488727eefd234b98 create mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-a1b839d899ced76d5d7d0f78f9edb7a421505838 create mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-c383ae07ec5e191422eadc492117439011816570 create mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-d07703ceb94b10dcd9e4acb809f2051420449e2b create mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-dbb8172d3984e7e7d03f4b5f8bb86ecd1460eff9 create mode 100644 vendor/regex-automata/tests/gen/README.md create mode 100644 vendor/regex-automata/tests/gen/dense/mod.rs create mode 100644 vendor/regex-automata/tests/gen/dense/multi_pattern_v2.rs create mode 100644 vendor/regex-automata/tests/gen/dense/multi_pattern_v2_fwd.bigendian.dfa create mode 100644 vendor/regex-automata/tests/gen/dense/multi_pattern_v2_fwd.littleendian.dfa create mode 100644 vendor/regex-automata/tests/gen/dense/multi_pattern_v2_rev.bigendian.dfa create mode 100644 vendor/regex-automata/tests/gen/dense/multi_pattern_v2_rev.littleendian.dfa create mode 100644 vendor/regex-automata/tests/gen/mod.rs create mode 100644 vendor/regex-automata/tests/gen/sparse/mod.rs create mode 100644 vendor/regex-automata/tests/gen/sparse/multi_pattern_v2.rs create mode 100644 vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_fwd.bigendian.dfa create mode 100644 vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_fwd.littleendian.dfa create mode 100644 vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_rev.bigendian.dfa create mode 100644 vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_rev.littleendian.dfa create mode 100644 vendor/regex-automata/tests/hybrid/api.rs create mode 100644 vendor/regex-automata/tests/hybrid/mod.rs create mode 100644 vendor/regex-automata/tests/hybrid/suite.rs create mode 100644 vendor/regex-automata/tests/lib.rs create mode 100644 vendor/regex-automata/tests/meta/mod.rs create mode 100644 vendor/regex-automata/tests/meta/suite.rs create mode 100644 vendor/regex-automata/tests/nfa/mod.rs create mode 100644 vendor/regex-automata/tests/nfa/thompson/backtrack/mod.rs create mode 100644 vendor/regex-automata/tests/nfa/thompson/backtrack/suite.rs create mode 100644 vendor/regex-automata/tests/nfa/thompson/mod.rs create mode 100644 vendor/regex-automata/tests/nfa/thompson/pikevm/mod.rs create mode 100644 vendor/regex-automata/tests/nfa/thompson/pikevm/suite.rs create mode 100644 vendor/regex-syntax/.cargo-checksum.json create mode 100644 vendor/regex-syntax/.cargo_vcs_info.json create mode 100644 vendor/regex-syntax/Cargo.lock create mode 100644 vendor/regex-syntax/Cargo.toml create mode 100644 vendor/regex-syntax/LICENSE-APACHE create mode 100644 vendor/regex-syntax/LICENSE-MIT create mode 100644 vendor/regex-syntax/README.md create mode 100644 vendor/regex-syntax/benches/bench.rs create mode 100644 vendor/regex-syntax/src/ast/mod.rs create mode 100644 vendor/regex-syntax/src/ast/parse.rs create mode 100644 vendor/regex-syntax/src/ast/print.rs create mode 100644 vendor/regex-syntax/src/ast/visitor.rs create mode 100644 vendor/regex-syntax/src/debug.rs create mode 100644 vendor/regex-syntax/src/either.rs create mode 100644 vendor/regex-syntax/src/error.rs create mode 100644 vendor/regex-syntax/src/hir/interval.rs create mode 100644 vendor/regex-syntax/src/hir/literal.rs create mode 100644 vendor/regex-syntax/src/hir/mod.rs create mode 100644 vendor/regex-syntax/src/hir/print.rs create mode 100644 vendor/regex-syntax/src/hir/translate.rs create mode 100644 vendor/regex-syntax/src/hir/visitor.rs create mode 100644 vendor/regex-syntax/src/lib.rs create mode 100644 vendor/regex-syntax/src/parser.rs create mode 100644 vendor/regex-syntax/src/rank.rs create mode 100644 vendor/regex-syntax/src/unicode.rs create mode 100644 vendor/regex-syntax/src/unicode_tables/LICENSE-UNICODE create mode 100644 vendor/regex-syntax/src/unicode_tables/age.rs create mode 100644 vendor/regex-syntax/src/unicode_tables/case_folding_simple.rs create mode 100644 vendor/regex-syntax/src/unicode_tables/general_category.rs create mode 100644 vendor/regex-syntax/src/unicode_tables/grapheme_cluster_break.rs create mode 100644 vendor/regex-syntax/src/unicode_tables/mod.rs create mode 100644 vendor/regex-syntax/src/unicode_tables/perl_decimal.rs create mode 100644 vendor/regex-syntax/src/unicode_tables/perl_space.rs create mode 100644 vendor/regex-syntax/src/unicode_tables/perl_word.rs create mode 100644 vendor/regex-syntax/src/unicode_tables/property_bool.rs create mode 100644 vendor/regex-syntax/src/unicode_tables/property_names.rs create mode 100644 vendor/regex-syntax/src/unicode_tables/property_values.rs create mode 100644 vendor/regex-syntax/src/unicode_tables/script.rs create mode 100644 vendor/regex-syntax/src/unicode_tables/script_extension.rs create mode 100644 vendor/regex-syntax/src/unicode_tables/sentence_break.rs create mode 100644 vendor/regex-syntax/src/unicode_tables/word_break.rs create mode 100644 vendor/regex-syntax/src/utf8.rs create mode 100755 vendor/regex-syntax/test create mode 100644 vendor/regex/.cargo-checksum.json create mode 100644 vendor/regex/.cargo_vcs_info.json create mode 100644 vendor/regex/.vim/coc-settings.json create mode 100644 vendor/regex/CHANGELOG.md create mode 100644 vendor/regex/Cargo.lock create mode 100644 vendor/regex/Cargo.toml create mode 100644 vendor/regex/Cross.toml create mode 100644 vendor/regex/LICENSE-APACHE create mode 100644 vendor/regex/LICENSE-MIT create mode 100644 vendor/regex/README.md create mode 100644 vendor/regex/UNICODE.md create mode 100644 vendor/regex/bench/README.md create mode 100644 vendor/regex/rustfmt.toml create mode 100644 vendor/regex/src/builders.rs create mode 100644 vendor/regex/src/bytes.rs create mode 100644 vendor/regex/src/error.rs create mode 100644 vendor/regex/src/find_byte.rs create mode 100644 vendor/regex/src/lib.rs create mode 100644 vendor/regex/src/pattern.rs create mode 100644 vendor/regex/src/regex/bytes.rs create mode 100644 vendor/regex/src/regex/mod.rs create mode 100644 vendor/regex/src/regex/string.rs create mode 100644 vendor/regex/src/regexset/bytes.rs create mode 100644 vendor/regex/src/regexset/mod.rs create mode 100644 vendor/regex/src/regexset/string.rs create mode 100755 vendor/regex/test create mode 100644 vendor/regex/testdata/README.md create mode 100644 vendor/regex/testdata/anchored.toml create mode 100644 vendor/regex/testdata/bytes.toml create mode 100644 vendor/regex/testdata/crazy.toml create mode 100644 vendor/regex/testdata/crlf.toml create mode 100644 vendor/regex/testdata/earliest.toml create mode 100644 vendor/regex/testdata/empty.toml create mode 100644 vendor/regex/testdata/expensive.toml create mode 100644 vendor/regex/testdata/flags.toml create mode 100644 vendor/regex/testdata/fowler/basic.toml create mode 100644 vendor/regex/testdata/fowler/dat/README create mode 100644 vendor/regex/testdata/fowler/dat/basic.dat create mode 100644 vendor/regex/testdata/fowler/dat/nullsubexpr.dat create mode 100644 vendor/regex/testdata/fowler/dat/repetition.dat create mode 100644 vendor/regex/testdata/fowler/nullsubexpr.toml create mode 100644 vendor/regex/testdata/fowler/repetition.toml create mode 100644 vendor/regex/testdata/iter.toml create mode 100644 vendor/regex/testdata/leftmost-all.toml create mode 100644 vendor/regex/testdata/line-terminator.toml create mode 100644 vendor/regex/testdata/misc.toml create mode 100644 vendor/regex/testdata/multiline.toml create mode 100644 vendor/regex/testdata/no-unicode.toml create mode 100644 vendor/regex/testdata/overlapping.toml create mode 100644 vendor/regex/testdata/regex-lite.toml create mode 100644 vendor/regex/testdata/regression.toml create mode 100644 vendor/regex/testdata/set.toml create mode 100644 vendor/regex/testdata/substring.toml create mode 100644 vendor/regex/testdata/unicode.toml create mode 100644 vendor/regex/testdata/utf8.toml create mode 100644 vendor/regex/testdata/word-boundary-special.toml create mode 100644 vendor/regex/testdata/word-boundary.toml create mode 100644 vendor/regex/tests/lib.rs create mode 100644 vendor/regex/tests/misc.rs create mode 100644 vendor/regex/tests/regression.rs create mode 100644 vendor/regex/tests/regression_fuzz.rs create mode 100644 vendor/regex/tests/replace.rs create mode 100644 vendor/regex/tests/searcher.rs create mode 100644 vendor/regex/tests/suite_bytes.rs create mode 100644 vendor/regex/tests/suite_bytes_set.rs create mode 100644 vendor/regex/tests/suite_string.rs create mode 100644 vendor/regex/tests/suite_string_set.rs create mode 100644 vendor/rustc-hash/.cargo-checksum.json create mode 100644 vendor/rustc-hash/.cargo_vcs_info.json create mode 100644 vendor/rustc-hash/.github/workflows/rust.yml create mode 100644 vendor/rustc-hash/CHANGELOG.md create mode 100644 vendor/rustc-hash/CODE_OF_CONDUCT.md create mode 100644 vendor/rustc-hash/Cargo.lock create mode 100644 vendor/rustc-hash/Cargo.toml create mode 100644 vendor/rustc-hash/LICENSE-APACHE create mode 100644 vendor/rustc-hash/LICENSE-MIT create mode 100644 vendor/rustc-hash/README.md create mode 100644 vendor/rustc-hash/src/lib.rs create mode 100644 vendor/rustc-hash/src/random_state.rs create mode 100644 vendor/rustc-hash/src/seeded_state.rs create mode 100644 vendor/shlex/.cargo-checksum.json create mode 100644 vendor/shlex/.cargo_vcs_info.json create mode 100644 vendor/shlex/.github/workflows/test.yml create mode 100644 vendor/shlex/CHANGELOG.md create mode 100644 vendor/shlex/Cargo.toml create mode 100644 vendor/shlex/LICENSE-APACHE create mode 100644 vendor/shlex/LICENSE-MIT create mode 100644 vendor/shlex/README.md create mode 100644 vendor/shlex/src/bytes.rs create mode 100644 vendor/shlex/src/lib.rs create mode 100644 vendor/shlex/src/quoting_warning.md create mode 100644 vendor/syn/.cargo-checksum.json create mode 100644 vendor/syn/.cargo_vcs_info.json create mode 100644 vendor/syn/Cargo.lock create mode 100644 vendor/syn/Cargo.toml create mode 100644 vendor/syn/LICENSE-APACHE create mode 100644 vendor/syn/LICENSE-MIT create mode 100644 vendor/syn/README.md create mode 100644 vendor/syn/benches/file.rs create mode 100644 vendor/syn/benches/rust.rs create mode 100644 vendor/syn/src/attr.rs create mode 100644 vendor/syn/src/bigint.rs create mode 100644 vendor/syn/src/buffer.rs create mode 100644 vendor/syn/src/classify.rs create mode 100644 vendor/syn/src/custom_keyword.rs create mode 100644 vendor/syn/src/custom_punctuation.rs create mode 100644 vendor/syn/src/data.rs create mode 100644 vendor/syn/src/derive.rs create mode 100644 vendor/syn/src/discouraged.rs create mode 100644 vendor/syn/src/drops.rs create mode 100644 vendor/syn/src/error.rs create mode 100644 vendor/syn/src/export.rs create mode 100644 vendor/syn/src/expr.rs create mode 100644 vendor/syn/src/ext.rs create mode 100644 vendor/syn/src/file.rs create mode 100644 vendor/syn/src/fixup.rs create mode 100644 vendor/syn/src/gen/clone.rs create mode 100644 vendor/syn/src/gen/debug.rs create mode 100644 vendor/syn/src/gen/eq.rs create mode 100644 vendor/syn/src/gen/fold.rs create mode 100644 vendor/syn/src/gen/hash.rs create mode 100644 vendor/syn/src/gen/token.css create mode 100644 vendor/syn/src/gen/visit.rs create mode 100644 vendor/syn/src/gen/visit_mut.rs create mode 100644 vendor/syn/src/generics.rs create mode 100644 vendor/syn/src/group.rs create mode 100644 vendor/syn/src/ident.rs create mode 100644 vendor/syn/src/item.rs create mode 100644 vendor/syn/src/lib.rs create mode 100644 vendor/syn/src/lifetime.rs create mode 100644 vendor/syn/src/lit.rs create mode 100644 vendor/syn/src/lookahead.rs create mode 100644 vendor/syn/src/mac.rs create mode 100644 vendor/syn/src/macros.rs create mode 100644 vendor/syn/src/meta.rs create mode 100644 vendor/syn/src/op.rs create mode 100644 vendor/syn/src/parse.rs create mode 100644 vendor/syn/src/parse_macro_input.rs create mode 100644 vendor/syn/src/parse_quote.rs create mode 100644 vendor/syn/src/pat.rs create mode 100644 vendor/syn/src/path.rs create mode 100644 vendor/syn/src/precedence.rs create mode 100644 vendor/syn/src/print.rs create mode 100644 vendor/syn/src/punctuated.rs create mode 100644 vendor/syn/src/restriction.rs create mode 100644 vendor/syn/src/scan_expr.rs create mode 100644 vendor/syn/src/sealed.rs create mode 100644 vendor/syn/src/span.rs create mode 100644 vendor/syn/src/spanned.rs create mode 100644 vendor/syn/src/stmt.rs create mode 100644 vendor/syn/src/thread.rs create mode 100644 vendor/syn/src/token.rs create mode 100644 vendor/syn/src/tt.rs create mode 100644 vendor/syn/src/ty.rs create mode 100644 vendor/syn/src/verbatim.rs create mode 100644 vendor/syn/src/whitespace.rs create mode 100644 vendor/syn/tests/common/eq.rs create mode 100644 vendor/syn/tests/common/mod.rs create mode 100644 vendor/syn/tests/common/parse.rs create mode 100644 vendor/syn/tests/common/visit.rs create mode 100644 vendor/syn/tests/debug/gen.rs create mode 100644 vendor/syn/tests/debug/mod.rs create mode 100644 vendor/syn/tests/macros/mod.rs create mode 100644 vendor/syn/tests/regression.rs create mode 100644 vendor/syn/tests/regression/issue1108.rs create mode 100644 vendor/syn/tests/regression/issue1235.rs create mode 100644 vendor/syn/tests/repo/mod.rs create mode 100644 vendor/syn/tests/repo/progress.rs create mode 100644 vendor/syn/tests/snapshot/mod.rs create mode 100644 vendor/syn/tests/test_asyncness.rs create mode 100644 vendor/syn/tests/test_attribute.rs create mode 100644 vendor/syn/tests/test_derive_input.rs create mode 100644 vendor/syn/tests/test_expr.rs create mode 100644 vendor/syn/tests/test_generics.rs create mode 100644 vendor/syn/tests/test_grouping.rs create mode 100644 vendor/syn/tests/test_ident.rs create mode 100644 vendor/syn/tests/test_item.rs create mode 100644 vendor/syn/tests/test_lit.rs create mode 100644 vendor/syn/tests/test_meta.rs create mode 100644 vendor/syn/tests/test_parse_buffer.rs create mode 100644 vendor/syn/tests/test_parse_quote.rs create mode 100644 vendor/syn/tests/test_parse_stream.rs create mode 100644 vendor/syn/tests/test_pat.rs create mode 100644 vendor/syn/tests/test_path.rs create mode 100644 vendor/syn/tests/test_precedence.rs create mode 100644 vendor/syn/tests/test_punctuated.rs create mode 100644 vendor/syn/tests/test_receiver.rs create mode 100644 vendor/syn/tests/test_round_trip.rs create mode 100644 vendor/syn/tests/test_shebang.rs create mode 100644 vendor/syn/tests/test_size.rs create mode 100644 vendor/syn/tests/test_stmt.rs create mode 100644 vendor/syn/tests/test_token_trees.rs create mode 100644 vendor/syn/tests/test_ty.rs create mode 100644 vendor/syn/tests/test_unparenthesize.rs create mode 100644 vendor/syn/tests/test_visibility.rs create mode 100644 vendor/syn/tests/zzz_stable.rs create mode 100644 vendor/unicode-ident/.cargo-checksum.json create mode 100644 vendor/unicode-ident/.cargo_vcs_info.json create mode 100644 vendor/unicode-ident/.github/FUNDING.yml create mode 100644 vendor/unicode-ident/.github/workflows/ci.yml create mode 100644 vendor/unicode-ident/Cargo.lock create mode 100644 vendor/unicode-ident/Cargo.toml create mode 100644 vendor/unicode-ident/LICENSE-APACHE create mode 100644 vendor/unicode-ident/LICENSE-MIT create mode 100644 vendor/unicode-ident/LICENSE-UNICODE create mode 100644 vendor/unicode-ident/README.md create mode 100644 vendor/unicode-ident/benches/xid.rs create mode 100644 vendor/unicode-ident/src/lib.rs create mode 100644 vendor/unicode-ident/src/tables.rs create mode 100644 vendor/unicode-ident/tests/compare.rs create mode 100644 vendor/unicode-ident/tests/fst/.gitignore create mode 100644 vendor/unicode-ident/tests/fst/mod.rs create mode 100644 vendor/unicode-ident/tests/fst/xid_continue.fst create mode 100644 vendor/unicode-ident/tests/fst/xid_start.fst create mode 100644 vendor/unicode-ident/tests/roaring/mod.rs create mode 100644 vendor/unicode-ident/tests/static_size.rs create mode 100644 vendor/unicode-ident/tests/tables/mod.rs create mode 100644 vendor/unicode-ident/tests/tables/tables.rs create mode 100644 vendor/unicode-ident/tests/trie/mod.rs create mode 100644 vendor/unicode-ident/tests/trie/trie.rs create mode 100644 vendor/windows-link/.cargo-checksum.json create mode 100644 vendor/windows-link/.cargo_vcs_info.json create mode 100644 vendor/windows-link/Cargo.lock create mode 100644 vendor/windows-link/Cargo.toml create mode 100644 vendor/windows-link/license-apache-2.0 create mode 100644 vendor/windows-link/license-mit create mode 100644 vendor/windows-link/readme.md create mode 100644 vendor/windows-link/src/lib.rs diff --git a/vendor/aho-corasick/.cargo-checksum.json b/vendor/aho-corasick/.cargo-checksum.json new file mode 100644 index 00000000000000..720dfa888c8cbc --- /dev/null +++ b/vendor/aho-corasick/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"ace303bf25937bf488919e5461f3994d1b88a19a40465ec8342c63af89cbebf7",".github/FUNDING.yml":"0c65f392d32a8639ba7986bbb42ca124505b462122382f314c89d84c95dd27f1",".github/workflows/ci.yml":"0605d9327a4633916dc789008d5686c692656bb3e1ee57f821f8537e9ad7d7b4",".vim/coc-settings.json":"8237c8f41db352b0d83f1bb10a60bc2f60f56f3234afbf696b4075c8d4d62d9b","COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.lock":"395d3e76f284190cef50c807ab2f00b9a5d388fde7a7bf88b73b02ed9fd346d1","Cargo.toml":"9384d7c725c5c2ebc8adc602081e7cbce8b214693e9e27edef1c40f33e925810","Cargo.toml.orig":"05304eb8b8821d48c0c4d2e991b9ed0f1a0b68cb70afb8881b81c4c317969663","DESIGN.md":"59c960e1b73b1d7fb41e4df6c0c1b1fcf44dd2ebc8a349597a7d0595f8cb5130","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","README.md":"afc4d559a98cf190029af0bf320fc0022725e349cd2a303aac860254e28f3c53","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","src/ahocorasick.rs":"c699c07df70be45c666e128509ad571a7649d2073e4ae16ac1efd6793c9c6890","src/automaton.rs":"22258a3e118672413119f8f543a9b912cce954e63524575c0ebfdf9011f9c2dd","src/dfa.rs":"197075923eb9d760a552f4e8652310fd4f657736613a9b1444ae05ef5d525da3","src/lib.rs":"66dea84d227f269b2f14ecc8109a97e96245b56c22eef0e8ce03b2343b8d6e66","src/macros.rs":"c6c52ae05b24433cffaca7b78b3645d797862c5d5feffddf9f54909095ed6e05","src/nfa/contiguous.rs":"f435c131ce84927e5600109722d006533ea21442dddaf18e03286d8caed82389","src/nfa/mod.rs":"ee7b3109774d14bbad5239c16bb980dd6b8185ec136d94fbaf2f0dc27d5ffa15","src/nfa/noncontiguous.rs":"de94f02b04efd8744fb096759a8897c22012b0e0ca3ace161fd87c71befefe04","src/packed/api.rs":"2197077ff7d7c731ae03a72bed0ae52d89fee56c5564be076313c9a573ce5013","src/packed/ext.rs":"66be06fde8558429da23a290584d4b9fae665bf64c2578db4fe5f5f3ee864869","src/packed/mod.rs":"0020cd6f07ba5c8955923a9516d7f758864260eda53a6b6f629131c45ddeec62","src/packed/pattern.rs":"0e4bca57d4b941495d31fc8246ad32904eed0cd89e3cda732ad35f4deeba3bef","src/packed/rabinkarp.rs":"403146eb1d838a84601d171393542340513cd1ee7ff750f2372161dd47746586","src/packed/teddy/README.md":"3a43194b64e221543d885176aba3beb1224a927385a20eca842daf6b0ea2f342","src/packed/teddy/builder.rs":"08ec116a4a842a2bb1221d296a2515ef3672c54906bed588fb733364c07855d3","src/packed/teddy/generic.rs":"ea252ab05b32cea7dd9d71e332071d243db7dd0362e049252a27e5881ba2bf39","src/packed/teddy/mod.rs":"17d741f7e2fb9dbac5ba7d1bd4542cf1e35e9f146ace728e23fe6bbed20028b2","src/packed/tests.rs":"8e2f56eb3890ed3876ecb47d3121996e416563127b6430110d7b516df3f83b4b","src/packed/vector.rs":"70c325cfa6f7c5c4c9a6af7b133b75a29e65990a7fe0b9a4c4ce3c3d5a0fe587","src/tests.rs":"c68192ab97b6161d0d6ee96fefd80cc7d14e4486ddcd8d1f82b5c92432c24ed5","src/transducer.rs":"02daa33a5d6dac41dcfd67f51df7c0d4a91c5131c781fb54c4de3520c585a6e1","src/util/alphabet.rs":"6dc22658a38deddc0279892035b18870d4585069e35ba7c7e649a24509acfbcc","src/util/buffer.rs":"f9e37f662c46c6ecd734458dedbe76c3bb0e84a93b6b0117c0d4ad3042413891","src/util/byte_frequencies.rs":"2fb85b381c038c1e44ce94294531cdcd339dca48b1e61f41455666e802cbbc9e","src/util/debug.rs":"ab301ad59aa912529cb97233a54a05914dd3cb2ec43e6fec7334170b97ac5998","src/util/error.rs":"ecccd60e7406305023efcc6adcc826eeeb083ab8f7fbfe3d97469438cd4c4e5c","src/util/int.rs":"e264e6abebf5622b59f6500210773db36048371c4e509c930263334095959a52","src/util/mod.rs":"7ab28d11323ecdbd982087f32eb8bceeee84f1a2583f3aae27039c36d58cf12c","src/util/prefilter.rs":"183e32aa9951d9957f89062e4a6ae7235df7060722a3c91995a3d36db5a98111","src/util/primitives.rs":"f89f3fa1d8db4e37de9ca767c6d05e346404837cade6d063bba68972fafa610b","src/util/remapper.rs":"9f12d911583a325c11806eeceb46d0dfec863cfcfa241aed84d31af73da746e5","src/util/search.rs":"6af803e08b8b8c8a33db100623f1621b0d741616524ce40893d8316897f27ffe","src/util/special.rs":"7d2f9cb9dd9771f59816e829b2d96b1239996f32939ba98764e121696c52b146"},"package":"ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301"} \ No newline at end of file diff --git a/vendor/aho-corasick/.cargo_vcs_info.json b/vendor/aho-corasick/.cargo_vcs_info.json new file mode 100644 index 00000000000000..51b411079c4244 --- /dev/null +++ b/vendor/aho-corasick/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "17f8b32e3b7c845ef3c5429b823804f552f14ec9" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/aho-corasick/.github/FUNDING.yml b/vendor/aho-corasick/.github/FUNDING.yml new file mode 100644 index 00000000000000..2869fec98f72fb --- /dev/null +++ b/vendor/aho-corasick/.github/FUNDING.yml @@ -0,0 +1 @@ +github: [BurntSushi] diff --git a/vendor/aho-corasick/.github/workflows/ci.yml b/vendor/aho-corasick/.github/workflows/ci.yml new file mode 100644 index 00000000000000..f1b34cf80418b5 --- /dev/null +++ b/vendor/aho-corasick/.github/workflows/ci.yml @@ -0,0 +1,148 @@ +name: ci +on: + pull_request: + push: + branches: + - master + schedule: + - cron: '00 01 * * *' + +# The section is needed to drop write-all permissions that are granted on +# `schedule` event. By specifying any permission explicitly all others are set +# to none. By using the principle of least privilege the damage a compromised +# workflow can do (because of an injection or compromised third party tool or +# action) is restricted. Currently the worklow doesn't need any additional +# permission except for pulling the code. Adding labels to issues, commenting +# on pull-requests, etc. may need additional permissions: +# +# Syntax for this section: +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions +# +# Reference for how to assign permissions on a job-by-job basis: +# https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs +# +# Reference for available permissions that we can enable if needed: +# https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token +permissions: + # to fetch code (actions/checkout) + contents: read + +jobs: + test: + name: test + env: + # For some builds, we use cross to test on 32-bit and big-endian + # systems. + CARGO: cargo + # When CARGO is set to CROSS, TARGET is set to `--target matrix.target`. + # Note that we only use cross on Linux, so setting a target on a + # different OS will just use normal cargo. + TARGET: + # Bump this as appropriate. We pin to a version to make sure CI + # continues to work as cross releases in the past have broken things + # in subtle ways. + CROSS_VERSION: v0.2.5 + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - build: pinned + os: ubuntu-latest + rust: 1.60.0 + - build: stable + os: ubuntu-latest + rust: stable + - build: stable-x86 + os: ubuntu-latest + rust: stable + target: i686-unknown-linux-gnu + - build: stable-aarch64 + os: ubuntu-latest + rust: stable + target: aarch64-unknown-linux-gnu + - build: stable-powerpc64 + os: ubuntu-latest + rust: stable + target: powerpc64-unknown-linux-gnu + - build: stable-s390x + os: ubuntu-latest + rust: stable + target: s390x-unknown-linux-gnu + - build: beta + os: ubuntu-latest + rust: beta + - build: nightly + os: ubuntu-latest + rust: nightly + - build: macos + os: macos-latest + rust: stable + - build: win-msvc + os: windows-latest + rust: stable + - build: win-gnu + os: windows-latest + rust: stable-x86_64-gnu + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.rust }} + - name: Install and configure Cross + if: matrix.os == 'ubuntu-latest' && matrix.target != '' + run: | + # In the past, new releases of 'cross' have broken CI. So for now, we + # pin it. We also use their pre-compiled binary releases because cross + # has over 100 dependencies and takes a bit to compile. + dir="$RUNNER_TEMP/cross-download" + mkdir "$dir" + echo "$dir" >> $GITHUB_PATH + cd "$dir" + curl -LO "https://github.com/cross-rs/cross/releases/download/$CROSS_VERSION/cross-x86_64-unknown-linux-musl.tar.gz" + tar xf cross-x86_64-unknown-linux-musl.tar.gz + + # We used to install 'cross' from master, but it kept failing. So now + # we build from a known-good version until 'cross' becomes more stable + # or we find an alternative. Notably, between v0.2.1 and current + # master (2022-06-14), the number of Cross's dependencies has doubled. + echo "CARGO=cross" >> $GITHUB_ENV + echo "TARGET=--target ${{ matrix.target }}" >> $GITHUB_ENV + - name: Show command used for Cargo + run: | + echo "cargo command is: ${{ env.CARGO }}" + echo "target flag is: ${{ env.TARGET }}" + - name: Show CPU info for debugging + if: matrix.os == 'ubuntu-latest' + run: lscpu + # See: https://github.com/rust-lang/regex/blob/a2887636930156023172e4b376a6febad4e49120/.github/workflows/ci.yml#L145-L163 + - name: Pin memchr to 2.6.2 + if: matrix.build == 'pinned' + run: cargo update -p memchr --precise 2.6.2 + - run: ${{ env.CARGO }} build --verbose $TARGET + - run: ${{ env.CARGO }} doc --verbose $TARGET + - run: ${{ env.CARGO }} test --verbose $TARGET + - run: ${{ env.CARGO }} test --lib --verbose --no-default-features --features std,perf-literal $TARGET + - run: ${{ env.CARGO }} test --lib --verbose --no-default-features $TARGET + - run: ${{ env.CARGO }} test --lib --verbose --no-default-features --features std $TARGET + - run: ${{ env.CARGO }} test --lib --verbose --no-default-features --features perf-literal $TARGET + - run: ${{ env.CARGO }} test --lib --verbose --no-default-features --features std,perf-literal,logging $TARGET + - if: matrix.build == 'nightly' + run: ${{ env.CARGO }} build --manifest-path aho-corasick-debug/Cargo.toml $TARGET + + rustfmt: + name: rustfmt + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: stable + components: rustfmt + - name: Check formatting + run: | + cargo fmt --all -- --check diff --git a/vendor/aho-corasick/.vim/coc-settings.json b/vendor/aho-corasick/.vim/coc-settings.json new file mode 100644 index 00000000000000..887eb6fab6f5e8 --- /dev/null +++ b/vendor/aho-corasick/.vim/coc-settings.json @@ -0,0 +1,12 @@ +{ + "rust-analyzer.linkedProjects": [ + "aho-corasick-debug/Cargo.toml", + "benchmarks/engines/rust-aho-corasick/Cargo.toml", + "benchmarks/engines/rust-daachorse/Cargo.toml", + "benchmarks/engines/rust-jetscii/Cargo.toml", + "benchmarks/engines/naive/Cargo.toml", + "benchmarks/shared/Cargo.toml", + "fuzz/Cargo.toml", + "Cargo.toml" + ] +} diff --git a/vendor/aho-corasick/COPYING b/vendor/aho-corasick/COPYING new file mode 100644 index 00000000000000..bb9c20a094e41b --- /dev/null +++ b/vendor/aho-corasick/COPYING @@ -0,0 +1,3 @@ +This project is dual-licensed under the Unlicense and MIT licenses. + +You may use this code under the terms of either license. diff --git a/vendor/aho-corasick/Cargo.lock b/vendor/aho-corasick/Cargo.lock new file mode 100644 index 00000000000000..597fa43801cfe7 --- /dev/null +++ b/vendor/aho-corasick/Cargo.lock @@ -0,0 +1,39 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.1.4" +dependencies = [ + "doc-comment", + "log", + "memchr", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + +[[package]] +name = "log" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "memchr" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" diff --git a/vendor/aho-corasick/Cargo.toml b/vendor/aho-corasick/Cargo.toml new file mode 100644 index 00000000000000..c4492a0170b083 --- /dev/null +++ b/vendor/aho-corasick/Cargo.toml @@ -0,0 +1,80 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60.0" +name = "aho-corasick" +version = "1.1.4" +authors = ["Andrew Gallant "] +build = false +exclude = [ + "/aho-corasick-debug", + "/benchmarks", + "/tmp", +] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Fast multiple substring searching." +homepage = "https://github.com/BurntSushi/aho-corasick" +readme = "README.md" +keywords = [ + "string", + "search", + "text", + "pattern", + "multi", +] +categories = ["text-processing"] +license = "Unlicense OR MIT" +repository = "https://github.com/BurntSushi/aho-corasick" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", + "--generate-link-to-definition", +] + +[features] +default = [ + "std", + "perf-literal", +] +logging = ["dep:log"] +perf-literal = ["dep:memchr"] +std = ["memchr?/std"] + +[lib] +name = "aho_corasick" +path = "src/lib.rs" + +[dependencies.log] +version = "0.4.17" +optional = true + +[dependencies.memchr] +version = "2.4.0" +optional = true +default-features = false + +[dev-dependencies.doc-comment] +version = "0.3.3" + +[profile.bench] +debug = 2 + +[profile.release] +debug = 2 diff --git a/vendor/aho-corasick/DESIGN.md b/vendor/aho-corasick/DESIGN.md new file mode 100644 index 00000000000000..f911f0c3ada977 --- /dev/null +++ b/vendor/aho-corasick/DESIGN.md @@ -0,0 +1,481 @@ +This document describes the internal design of this crate, which is an object +lesson in what happens when you take a fairly simple old algorithm like +Aho-Corasick and make it fast and production ready. + +The target audience of this document is Rust programmers that have some +familiarity with string searching, however, one does not need to know the +Aho-Corasick algorithm in order to read this (it is explained below). One +should, however, know what a trie is. (If you don't, go read its Wikipedia +article.) + +The center-piece of this crate is an implementation of Aho-Corasick. On its +own, Aho-Corasick isn't that complicated. The complex pieces come from the +different variants of Aho-Corasick implemented in this crate. Specifically, +they are: + +* Aho-Corasick as a noncontiguous NFA. States have their transitions + represented sparsely, and each state puts its transitions in its own separate + allocation. Hence the same "noncontiguous." +* Aho-Corasick as a contiguous NFA. This NFA uses a single allocation to + represent the transitions of all states. That is, transitions are laid out + contiguously in memory. Moreover, states near the starting state are + represented densely, such that finding the next state ID takes a constant + number of instructions. +* Aho-Corasick as a DFA. In this case, all states are represented densely in + a transition table that uses one allocation. +* Supporting "standard" match semantics, along with its overlapping variant, + in addition to leftmost-first and leftmost-longest semantics. The "standard" + semantics are typically what you see in a textbook description of + Aho-Corasick. However, Aho-Corasick is also useful as an optimization in + regex engines, which often use leftmost-first or leftmost-longest semantics. + Thus, it is useful to implement those semantics here. The "standard" and + "leftmost" search algorithms are subtly different, and also require slightly + different construction algorithms. +* Support for ASCII case insensitive matching. +* Support for accelerating searches when the patterns all start with a small + number of fixed bytes. Or alternatively, when the patterns all contain a + small number of rare bytes. (Searching for these bytes uses SIMD vectorized + code courtesy of `memchr`.) +* Transparent support for alternative SIMD vectorized search routines for + smaller number of literals, such as the Teddy algorithm. We called these + "packed" search routines because they use SIMD. They can often be an order of + magnitude faster than just Aho-Corasick, but don't scale as well. +* Support for searching streams. This can reuse most of the underlying code, + but does require careful buffering support. +* Support for anchored searches, which permit efficient "is prefix" checks for + a large number of patterns. + +When you combine all of this together along with trying to make everything as +fast as possible, what you end up with is enitrely too much code with too much +`unsafe`. Alas, I was not smart enough to figure out how to reduce it. Instead, +we will explain it. + + +# Basics + +The fundamental problem this crate is trying to solve is to determine the +occurrences of possibly many patterns in a haystack. The naive way to solve +this is to look for a match for each pattern at each position in the haystack: + + for i in 0..haystack.len(): + for p in patterns.iter(): + if haystack[i..].starts_with(p.bytes()): + return Match(p.id(), i, i + p.bytes().len()) + +Those four lines are effectively all this crate does. The problem with those +four lines is that they are very slow, especially when you're searching for a +large number of patterns. + +While there are many different algorithms available to solve this, a popular +one is Aho-Corasick. It's a common solution because it's not too hard to +implement, scales quite well even when searching for thousands of patterns and +is generally pretty fast. Aho-Corasick does well here because, regardless of +the number of patterns you're searching for, it always visits each byte in the +haystack exactly once. This means, generally speaking, adding more patterns to +an Aho-Corasick automaton does not make it slower. (Strictly speaking, however, +this is not true, since a larger automaton will make less effective use of the +CPU's cache.) + +Aho-Corasick can be succinctly described as a trie with state transitions +between some of the nodes that efficiently instruct the search algorithm to +try matching alternative keys in the trie. The trick is that these state +transitions are arranged such that each byte of input needs to be inspected +only once. These state transitions are typically called "failure transitions," +because they instruct the searcher (the thing traversing the automaton while +reading from the haystack) what to do when a byte in the haystack does not +correspond to a valid transition in the current state of the trie. + +More formally, a failure transition points to a state in the automaton that may +lead to a match whose prefix is a proper suffix of the path traversed through +the trie so far. (If no such proper suffix exists, then the failure transition +points back to the start state of the trie, effectively restarting the search.) +This is perhaps simpler to explain pictorally. For example, let's say we built +an Aho-Corasick automaton with the following patterns: 'abcd' and 'cef'. The +trie looks like this: + + a - S1 - b - S2 - c - S3 - d - S4* + / + S0 - c - S5 - e - S6 - f - S7* + +where states marked with a `*` are match states (meaning, the search algorithm +should stop and report a match to the caller). + +So given this trie, it should be somewhat straight-forward to see how it can +be used to determine whether any particular haystack *starts* with either +`abcd` or `cef`. It's easy to express this in code: + + fn has_prefix(trie: &Trie, haystack: &[u8]) -> bool { + let mut state_id = trie.start(); + // If the empty pattern is in trie, then state_id is a match state. + if trie.is_match(state_id) { + return true; + } + for (i, &b) in haystack.iter().enumerate() { + state_id = match trie.next_state(state_id, b) { + Some(id) => id, + // If there was no transition for this state and byte, then we know + // the haystack does not start with one of the patterns in our trie. + None => return false, + }; + if trie.is_match(state_id) { + return true; + } + } + false + } + +And that's pretty much it. All we do is move through the trie starting with the +bytes at the beginning of the haystack. If we find ourselves in a position +where we can't move, or if we've looked through the entire haystack without +seeing a match state, then we know the haystack does not start with any of the +patterns in the trie. + +The meat of the Aho-Corasick algorithm is in how we add failure transitions to +our trie to keep searching efficient. Specifically, it permits us to not only +check whether a haystack *starts* with any one of a number of patterns, but +rather, whether the haystack contains any of a number of patterns *anywhere* in +the haystack. + +As mentioned before, failure transitions connect a proper suffix of the path +traversed through the trie before, with a path that leads to a match that has a +prefix corresponding to that proper suffix. So in our case, for patterns `abcd` +and `cef`, with a haystack `abcef`, we want to transition to state `S5` (from +the diagram above) from `S3` upon seeing that the byte following `c` is not +`d`. Namely, the proper suffix in this example is `c`, which is a prefix of +`cef`. So the modified diagram looks like this: + + + a - S1 - b - S2 - c - S3 - d - S4* + / / + / ---------------- + / / + S0 - c - S5 - e - S6 - f - S7* + +One thing that isn't shown in this diagram is that *all* states have a failure +transition, but only `S3` has a *non-trivial* failure transition. That is, all +other states have a failure transition back to the start state. So if our +haystack was `abzabcd`, then the searcher would transition back to `S0` after +seeing `z`, which effectively restarts the search. (Because there is no pattern +in our trie that has a prefix of `bz` or `z`.) + +The code for traversing this *automaton* or *finite state machine* (it is no +longer just a trie) is not that much different from the `has_prefix` code +above: + + fn contains(fsm: &FiniteStateMachine, haystack: &[u8]) -> bool { + let mut state_id = fsm.start(); + // If the empty pattern is in fsm, then state_id is a match state. + if fsm.is_match(state_id) { + return true; + } + for (i, &b) in haystack.iter().enumerate() { + // While the diagram above doesn't show this, we may wind up needing + // to follow multiple failure transitions before we land on a state + // in which we can advance. Therefore, when searching for the next + // state, we need to loop until we don't see a failure transition. + // + // This loop terminates because the start state has no empty + // transitions. Every transition from the start state either points to + // another state, or loops back to the start state. + loop { + match fsm.next_state(state_id, b) { + Some(id) => { + state_id = id; + break; + } + // Unlike our code above, if there was no transition for this + // state, then we don't quit. Instead, we look for this state's + // failure transition and follow that instead. + None => { + state_id = fsm.next_fail_state(state_id); + } + }; + } + if fsm.is_match(state_id) { + return true; + } + } + false + } + +Other than the complication around traversing failure transitions, this code +is still roughly "traverse the automaton with bytes from the haystack, and quit +when a match is seen." + +And that concludes our section on the basics. While we didn't go deep into how +the automaton is built (see `src/nfa/noncontiguous.rs`, which has detailed +comments about that), the basic structure of Aho-Corasick should be reasonably +clear. + + +# NFAs and DFAs + +There are generally two types of finite automata: non-deterministic finite +automata (NFA) and deterministic finite automata (DFA). The difference between +them is, principally, that an NFA can be in multiple states at once. This is +typically accomplished by things called _epsilon_ transitions, where one could +move to a new state without consuming any bytes from the input. (The other +mechanism by which NFAs can be in more than one state is where the same byte in +a particular state transitions to multiple distinct states.) In contrast, a DFA +can only ever be in one state at a time. A DFA has no epsilon transitions, and +for any given state, a byte transitions to at most one other state. + +By this formulation, the Aho-Corasick automaton described in the previous +section is an NFA. This is because failure transitions are, effectively, +epsilon transitions. That is, whenever the automaton is in state `S`, it is +actually in the set of states that are reachable by recursively following +failure transitions from `S` until you reach the start state. (This means +that, for example, the start state is always active since the start state is +reachable via failure transitions from any state in the automaton.) + +NFAs have a lot of nice properties. They tend to be easier to construct, and +also tend to use less memory. However, their primary downside is that they are +typically slower to execute a search with. For example, the code above showing +how to search with an Aho-Corasick automaton needs to potentially iterate +through many failure transitions for every byte of input. While this is a +fairly small amount of overhead, this can add up, especially if the automaton +has a lot of overlapping patterns with a lot of failure transitions. + +A DFA's search code, by contrast, looks like this: + + fn contains(dfa: &DFA, haystack: &[u8]) -> bool { + let mut state_id = dfa.start(); + // If the empty pattern is in dfa, then state_id is a match state. + if dfa.is_match(state_id) { + return true; + } + for (i, &b) in haystack.iter().enumerate() { + // An Aho-Corasick DFA *never* has a missing state that requires + // failure transitions to be followed. One byte of input advances the + // automaton by one state. Always. + state_id = dfa.next_state(state_id, b); + if dfa.is_match(state_id) { + return true; + } + } + false + } + +The search logic here is much simpler than for the NFA, and this tends to +translate into significant performance benefits as well, since there's a lot +less work being done for each byte in the haystack. How is this accomplished? +It's done by pre-following all failure transitions for all states for all bytes +in the alphabet, and then building a single state transition table. Building +this DFA can be much more costly than building the NFA, and use much more +memory, but the better performance can be worth it. + +Users of this crate can actually choose between using one of two possible NFAs +(noncontiguous or contiguous) or a DFA. By default, a contiguous NFA is used, +in most circumstances, but if the number of patterns is small enough a DFA will +be used. A contiguous NFA is chosen because it uses orders of magnitude less +memory than a DFA, takes only a little longer to build than a noncontiguous +NFA and usually gets pretty close to the search speed of a DFA. (Callers can +override this automatic selection via the `AhoCorasickBuilder::start_kind` +configuration.) + + +# More DFA tricks + +As described in the previous section, one of the downsides of using a DFA +is that it uses more memory and can take longer to build. One small way of +mitigating these concerns is to map the alphabet used by the automaton into +a smaller space. Typically, the alphabet of a DFA has 256 elements in it: +one element for each possible value that fits into a byte. However, in many +cases, one does not need the full alphabet. For example, if all patterns in an +Aho-Corasick automaton are ASCII letters, then this only uses up 52 distinct +bytes. As far as the automaton is concerned, the rest of the 204 bytes are +indistinguishable from one another: they will never disrciminate between a +match or a non-match. Therefore, in cases like that, the alphabet can be shrunk +to just 53 elements. One for each ASCII letter, and then another to serve as a +placeholder for every other unused byte. + +In practice, this library doesn't quite compute the optimal set of equivalence +classes, but it's close enough in most cases. The key idea is that this then +allows the transition table for the DFA to be potentially much smaller. The +downside of doing this, however, is that since the transition table is defined +in terms of this smaller alphabet space, every byte in the haystack must be +re-mapped to this smaller space. This requires an additional 256-byte table. +In practice, this can lead to a small search time hit, but it can be difficult +to measure. Moreover, it can sometimes lead to faster search times for bigger +automata, since it could be difference between more parts of the automaton +staying in the CPU cache or not. + +One other trick for DFAs employed by this crate is the notion of premultiplying +state identifiers. Specifically, the normal way to compute the next transition +in a DFA is via the following (assuming that the transition table is laid out +sequentially in memory, in row-major order, where the rows are states): + + next_state_id = dfa.transitions[current_state_id * 256 + current_byte] + +However, since the value `256` is a fixed constant, we can actually premultiply +the state identifiers in the table when we build the table initially. Then, the +next transition computation simply becomes: + + next_state_id = dfa.transitions[current_state_id + current_byte] + +This doesn't seem like much, but when this is being executed for every byte of +input that you're searching, saving that extra multiplication instruction can +add up. + +The same optimization works even when equivalence classes are enabled, as +described above. The only difference is that the premultiplication is by the +total number of equivalence classes instead of 256. + +There isn't much downside to premultiplying state identifiers, other than it +imposes a smaller limit on the total number of states in the DFA. Namely, with +premultiplied state identifiers, you run out of room in your state identifier +representation more rapidly than if the identifiers are just state indices. + +Both equivalence classes and premultiplication are always enabled. There is a +`AhoCorasickBuilder::byte_classes` configuration, but disabling this just makes +it so there are always 256 equivalence classes, i.e., every class corresponds +to precisely one byte. When it's disabled, the equivalence class map itself is +still used. The purpose of disabling it is when one is debugging the underlying +automaton. It can be easier to comprehend when it uses actual byte values for +its transitions instead of equivalence classes. + + +# Match semantics + +One of the more interesting things about this implementation of Aho-Corasick +that (as far as this author knows) separates it from other implementations, is +that it natively supports leftmost-first and leftmost-longest match semantics. +Briefly, match semantics refer to the decision procedure by which searching +will disambiguate matches when there are multiple to choose from: + +* **standard** match semantics emits matches as soon as they are detected by + the automaton. This is typically equivalent to the textbook non-overlapping + formulation of Aho-Corasick. +* **leftmost-first** match semantics means that 1) the next match is the match + starting at the leftmost position and 2) among multiple matches starting at + the same leftmost position, the match corresponding to the pattern provided + first by the caller is reported. +* **leftmost-longest** is like leftmost-first, except when there are multiple + matches starting at the same leftmost position, the pattern corresponding to + the longest match is returned. + +(The crate API documentation discusses these differences, with examples, in +more depth on the `MatchKind` type.) + +The reason why supporting these match semantics is important is because it +gives the user more control over the match procedure. For example, +leftmost-first permits users to implement match priority by simply putting the +higher priority patterns first. Leftmost-longest, on the other hand, permits +finding the longest possible match, which might be useful when trying to find +words matching a dictionary. Additionally, regex engines often want to use +Aho-Corasick as an optimization when searching for an alternation of literals. +In order to preserve correct match semantics, regex engines typically can't use +the standard textbook definition directly, since regex engines will implement +either leftmost-first (Perl-like) or leftmost-longest (POSIX) match semantics. + +Supporting leftmost semantics requires a couple key changes: + +* Constructing the Aho-Corasick automaton changes a bit in both how the trie is + constructed and how failure transitions are found. Namely, only a subset + of the failure transitions are added. Specifically, only the failure + transitions that either do not occur after a match or do occur after a match + but preserve that match are kept. (More details on this can be found in + `src/nfa/noncontiguous.rs`.) +* The search algorithm changes slightly. Since we are looking for the leftmost + match, we cannot quit as soon as a match is detected. Instead, after a match + is detected, we must keep searching until either the end of the input or + until a dead state is seen. (Dead states are not used for standard match + semantics. Dead states mean that searching should stop after a match has been + found.) + +Most other implementations of Aho-Corasick do support leftmost match semantics, +but they do it with more overhead at search time, or even worse, with a queue +of matches and sophisticated hijinks to disambiguate the matches. While our +construction algorithm becomes a bit more complicated, the correct match +semantics fall out from the structure of the automaton itself. + + +# Overlapping matches + +One of the nice properties of an Aho-Corasick automaton is that it can report +all possible matches, even when they overlap with one another. In this mode, +the match semantics don't matter, since all possible matches are reported. +Overlapping searches work just like regular searches, except the state +identifier at which the previous search left off is carried over to the next +search, so that it can pick up where it left off. If there are additional +matches at that state, then they are reported before resuming the search. + +Enabling leftmost-first or leftmost-longest match semantics causes the +automaton to use a subset of all failure transitions, which means that +overlapping searches cannot be used. Therefore, if leftmost match semantics are +used, attempting to do an overlapping search will return an error (or panic +when using the infallible APIs). Thus, to get overlapping searches, the caller +must use the default standard match semantics. This behavior was chosen because +there are only two alternatives, which were deemed worse: + +* Compile two automatons internally, one for standard semantics and one for + the semantics requested by the caller (if not standard). +* Create a new type, distinct from the `AhoCorasick` type, which has different + capabilities based on the configuration options. + +The first is untenable because of the amount of memory used by the automaton. +The second increases the complexity of the API too much by adding too many +types that do similar things. It is conceptually much simpler to keep all +searching isolated to a single type. + + +# Stream searching + +Since Aho-Corasick is an automaton, it is possible to do partial searches on +partial parts of the haystack, and then resume that search on subsequent pieces +of the haystack. This is useful when the haystack you're trying to search is +not stored contiguously in memory, or if one does not want to read the entire +haystack into memory at once. + +Currently, only standard semantics are supported for stream searching. This is +some of the more complicated code in this crate, and is something I would very +much like to improve. In particular, it currently has the restriction that it +must buffer at least enough of the haystack in memory in order to fit the +longest possible match. The difficulty in getting stream searching right is +that the implementation choices (such as the buffer size) often impact what the +API looks like and what it's allowed to do. + + +# Prefilters + +In some cases, Aho-Corasick is not the fastest way to find matches containing +multiple patterns. Sometimes, the search can be accelerated using highly +optimized SIMD routines. For example, consider searching the following +patterns: + + Sherlock + Moriarty + Watson + +It is plausible that it would be much faster to quickly look for occurrences of +the leading bytes, `S`, `M` or `W`, before trying to start searching via the +automaton. Indeed, this is exactly what this crate will do. + +When there are more than three distinct starting bytes, then this crate will +look for three distinct bytes occurring at any position in the patterns, while +preferring bytes that are heuristically determined to be rare over others. For +example: + + Abuzz + Sanchez + Vasquez + Topaz + Waltz + +Here, we have more than 3 distinct starting bytes, but all of the patterns +contain `z`, which is typically a rare byte. In this case, the prefilter will +scan for `z`, back up a bit, and then execute the Aho-Corasick automaton. + +If all of that fails, then a packed multiple substring algorithm will be +attempted. Currently, the only algorithm available for this is Teddy, but more +may be added in the future. Teddy is unlike the above prefilters in that it +confirms its own matches, so when Teddy is active, it might not be necessary +for Aho-Corasick to run at all. However, the current Teddy implementation +only works in `x86_64` when SSSE3 or AVX2 are available or in `aarch64` +(using NEON), and moreover, only works _well_ when there are a small number +of patterns (say, less than 100). Teddy also requires the haystack to be of a +certain length (more than 16-34 bytes). When the haystack is shorter than that, +Rabin-Karp is used instead. (See `src/packed/rabinkarp.rs`.) + +There is a more thorough description of Teddy at +[`src/packed/teddy/README.md`](src/packed/teddy/README.md). diff --git a/vendor/aho-corasick/LICENSE-MIT b/vendor/aho-corasick/LICENSE-MIT new file mode 100644 index 00000000000000..3b0a5dc09c1e16 --- /dev/null +++ b/vendor/aho-corasick/LICENSE-MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andrew Gallant + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/aho-corasick/README.md b/vendor/aho-corasick/README.md new file mode 100644 index 00000000000000..c0f525fdc6be62 --- /dev/null +++ b/vendor/aho-corasick/README.md @@ -0,0 +1,174 @@ +aho-corasick +============ +A library for finding occurrences of many patterns at once with SIMD +acceleration in some cases. This library provides multiple pattern +search principally through an implementation of the +[Aho-Corasick algorithm](https://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_algorithm), +which builds a finite state machine for executing searches in linear time. +Features include case insensitive matching, overlapping matches, fast searching +via SIMD and optional full DFA construction and search & replace in streams. + +[![Build status](https://github.com/BurntSushi/aho-corasick/workflows/ci/badge.svg)](https://github.com/BurntSushi/aho-corasick/actions) +[![crates.io](https://img.shields.io/crates/v/aho-corasick.svg)](https://crates.io/crates/aho-corasick) + +Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/). + + +### Documentation + +https://docs.rs/aho-corasick + + +### Usage + +Run `cargo add aho-corasick` to automatically add this crate as a dependency +in your `Cargo.toml` file. + + +### Example: basic searching + +This example shows how to search for occurrences of multiple patterns +simultaneously. Each match includes the pattern that matched along with the +byte offsets of the match. + +```rust +use aho_corasick::{AhoCorasick, PatternID}; + +let patterns = &["apple", "maple", "Snapple"]; +let haystack = "Nobody likes maple in their apple flavored Snapple."; + +let ac = AhoCorasick::new(patterns).unwrap(); +let mut matches = vec![]; +for mat in ac.find_iter(haystack) { + matches.push((mat.pattern(), mat.start(), mat.end())); +} +assert_eq!(matches, vec![ + (PatternID::must(1), 13, 18), + (PatternID::must(0), 28, 33), + (PatternID::must(2), 43, 50), +]); +``` + + +### Example: ASCII case insensitivity + +This is like the previous example, but matches `Snapple` case insensitively +using `AhoCorasickBuilder`: + +```rust +use aho_corasick::{AhoCorasick, PatternID}; + +let patterns = &["apple", "maple", "snapple"]; +let haystack = "Nobody likes maple in their apple flavored Snapple."; + +let ac = AhoCorasick::builder() + .ascii_case_insensitive(true) + .build(patterns) + .unwrap(); +let mut matches = vec![]; +for mat in ac.find_iter(haystack) { + matches.push((mat.pattern(), mat.start(), mat.end())); +} +assert_eq!(matches, vec![ + (PatternID::must(1), 13, 18), + (PatternID::must(0), 28, 33), + (PatternID::must(2), 43, 50), +]); +``` + + +### Example: replacing matches in a stream + +This example shows how to execute a search and replace on a stream without +loading the entire stream into memory first. + +```rust,ignore +use aho_corasick::AhoCorasick; + +let patterns = &["fox", "brown", "quick"]; +let replace_with = &["sloth", "grey", "slow"]; + +// In a real example, these might be `std::fs::File`s instead. All you need to +// do is supply a pair of `std::io::Read` and `std::io::Write` implementations. +let rdr = "The quick brown fox."; +let mut wtr = vec![]; + +let ac = AhoCorasick::new(patterns).unwrap(); +ac.stream_replace_all(rdr.as_bytes(), &mut wtr, replace_with) + .expect("stream_replace_all failed"); +assert_eq!(b"The slow grey sloth.".to_vec(), wtr); +``` + + +### Example: finding the leftmost first match + +In the textbook description of Aho-Corasick, its formulation is typically +structured such that it reports all possible matches, even when they overlap +with another. In many cases, overlapping matches may not be desired, such as +the case of finding all successive non-overlapping matches like you might with +a standard regular expression. + +Unfortunately the "obvious" way to modify the Aho-Corasick algorithm to do +this doesn't always work in the expected way, since it will report matches as +soon as they are seen. For example, consider matching the regex `Samwise|Sam` +against the text `Samwise`. Most regex engines (that are Perl-like, or +non-POSIX) will report `Samwise` as a match, but the standard Aho-Corasick +algorithm modified for reporting non-overlapping matches will report `Sam`. + +A novel contribution of this library is the ability to change the match +semantics of Aho-Corasick (without additional search time overhead) such that +`Samwise` is reported instead. For example, here's the standard approach: + +```rust +use aho_corasick::AhoCorasick; + +let patterns = &["Samwise", "Sam"]; +let haystack = "Samwise"; + +let ac = AhoCorasick::new(patterns).unwrap(); +let mat = ac.find(haystack).expect("should have a match"); +assert_eq!("Sam", &haystack[mat.start()..mat.end()]); +``` + +And now here's the leftmost-first version, which matches how a Perl-like +regex will work: + +```rust +use aho_corasick::{AhoCorasick, MatchKind}; + +let patterns = &["Samwise", "Sam"]; +let haystack = "Samwise"; + +let ac = AhoCorasick::builder() + .match_kind(MatchKind::LeftmostFirst) + .build(patterns) + .unwrap(); +let mat = ac.find(haystack).expect("should have a match"); +assert_eq!("Samwise", &haystack[mat.start()..mat.end()]); +``` + +In addition to leftmost-first semantics, this library also supports +leftmost-longest semantics, which match the POSIX behavior of a regular +expression alternation. See `MatchKind` in the docs for more details. + + +### Minimum Rust version policy + +This crate's minimum supported `rustc` version is `1.60.0`. + +The current policy is that the minimum Rust version required to use this crate +can be increased in minor version updates. For example, if `crate 1.0` requires +Rust 1.20.0, then `crate 1.0.z` for all values of `z` will also require Rust +1.20.0 or newer. However, `crate 1.y` for `y > 0` may require a newer minimum +version of Rust. + +In general, this crate will be conservative with respect to the minimum +supported version of Rust. + + +### FFI bindings + +* [G-Research/ahocorasick_rs](https://github.com/G-Research/ahocorasick_rs/) +is a Python wrapper for this library. +* [tmikus/ahocorasick_rs](https://github.com/tmikus/ahocorasick_rs) is a Go + wrapper for this library. diff --git a/vendor/aho-corasick/UNLICENSE b/vendor/aho-corasick/UNLICENSE new file mode 100644 index 00000000000000..68a49daad8ff7e --- /dev/null +++ b/vendor/aho-corasick/UNLICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/vendor/aho-corasick/rustfmt.toml b/vendor/aho-corasick/rustfmt.toml new file mode 100644 index 00000000000000..aa37a218b97e5f --- /dev/null +++ b/vendor/aho-corasick/rustfmt.toml @@ -0,0 +1,2 @@ +max_width = 79 +use_small_heuristics = "max" diff --git a/vendor/aho-corasick/src/ahocorasick.rs b/vendor/aho-corasick/src/ahocorasick.rs new file mode 100644 index 00000000000000..2947627704d3d0 --- /dev/null +++ b/vendor/aho-corasick/src/ahocorasick.rs @@ -0,0 +1,2789 @@ +use core::{ + fmt::Debug, + panic::{RefUnwindSafe, UnwindSafe}, +}; + +use alloc::{string::String, sync::Arc, vec::Vec}; + +use crate::{ + automaton::{self, Automaton, OverlappingState}, + dfa, + nfa::{contiguous, noncontiguous}, + util::{ + error::{BuildError, MatchError}, + prefilter::Prefilter, + primitives::{PatternID, StateID}, + search::{Anchored, Input, Match, MatchKind, StartKind}, + }, +}; + +/// An automaton for searching multiple strings in linear time. +/// +/// The `AhoCorasick` type supports a few basic ways of constructing an +/// automaton, with the default being [`AhoCorasick::new`]. However, there +/// are a fair number of configurable options that can be set by using +/// [`AhoCorasickBuilder`] instead. Such options include, but are not limited +/// to, how matches are determined, simple case insensitivity, whether to use a +/// DFA or not and various knobs for controlling the space-vs-time trade offs +/// taken when building the automaton. +/// +/// # Resource usage +/// +/// Aho-Corasick automatons are always constructed in `O(p)` time, where +/// `p` is the combined length of all patterns being searched. With that +/// said, building an automaton can be fairly costly because of high constant +/// factors, particularly when enabling the [DFA](AhoCorasickKind::DFA) option +/// with [`AhoCorasickBuilder::kind`]. For this reason, it's generally a good +/// idea to build an automaton once and reuse it as much as possible. +/// +/// Aho-Corasick automatons can also use a fair bit of memory. To get +/// a concrete idea of how much memory is being used, try using the +/// [`AhoCorasick::memory_usage`] method. +/// +/// To give a quick idea of the differences between Aho-Corasick +/// implementations and their resource usage, here's a sample of construction +/// times and heap memory used after building an automaton from 100,000 +/// randomly selected titles from Wikipedia: +/// +/// * 99MB for a [`noncontiguous::NFA`] in 240ms. +/// * 21MB for a [`contiguous::NFA`] in 275ms. +/// * 1.6GB for a [`dfa::DFA`] in 1.88s. +/// +/// (Note that the memory usage above reflects the size of each automaton and +/// not peak memory usage. For example, building a contiguous NFA requires +/// first building a noncontiguous NFA. Once the contiguous NFA is built, the +/// noncontiguous NFA is freed.) +/// +/// This experiment very strongly argues that a contiguous NFA is often the +/// best balance in terms of resource usage. It takes a little longer to build, +/// but its memory usage is quite small. Its search speed (not listed) is +/// also often faster than a noncontiguous NFA, but a little slower than a +/// DFA. Indeed, when no specific [`AhoCorasickKind`] is used (which is the +/// default), a contiguous NFA is used in most cases. +/// +/// The only "catch" to using a contiguous NFA is that, because of its variety +/// of compression tricks, it may not be able to support automatons as large as +/// what the noncontiguous NFA supports. In which case, building a contiguous +/// NFA will fail and (by default) `AhoCorasick` will automatically fall +/// back to a noncontiguous NFA. (This typically only happens when building +/// automatons from millions of patterns.) Otherwise, the small additional time +/// for building a contiguous NFA is almost certainly worth it. +/// +/// # Cloning +/// +/// The `AhoCorasick` type uses thread safe reference counting internally. It +/// is guaranteed that it is cheap to clone. +/// +/// # Search configuration +/// +/// Most of the search routines accept anything that can be cheaply converted +/// to an [`Input`]. This includes `&[u8]`, `&str` and `Input` itself. +/// +/// # Construction failure +/// +/// It is generally possible for building an Aho-Corasick automaton to fail. +/// Construction can fail in generally one way: when the inputs provided are +/// too big. Whether that's a pattern that is too long, too many patterns +/// or some combination of both. A first approximation for the scale at which +/// construction can fail is somewhere around "millions of patterns." +/// +/// For that reason, if you're building an Aho-Corasick automaton from +/// untrusted input (or input that doesn't have any reasonable bounds on its +/// size), then it is strongly recommended to handle the possibility of an +/// error. +/// +/// If you're constructing an Aho-Corasick automaton from static or trusted +/// data, then it is likely acceptable to panic (by calling `unwrap()` or +/// `expect()`) if construction fails. +/// +/// # Fallibility +/// +/// The `AhoCorasick` type provides a number of methods for searching, as one +/// might expect. Depending on how the Aho-Corasick automaton was built and +/// depending on the search configuration, it is possible for a search to +/// return an error. Since an error is _never_ dependent on the actual contents +/// of the haystack, this type provides both infallible and fallible methods +/// for searching. The infallible methods panic if an error occurs, and can be +/// used for convenience and when you know the search will never return an +/// error. +/// +/// For example, the [`AhoCorasick::find_iter`] method is the infallible +/// version of the [`AhoCorasick::try_find_iter`] method. +/// +/// Examples of errors that can occur: +/// +/// * Running a search that requires [`MatchKind::Standard`] semantics (such +/// as a stream or overlapping search) with an automaton that was built with +/// [`MatchKind::LeftmostFirst`] or [`MatchKind::LeftmostLongest`] semantics. +/// * Running an anchored search with an automaton that only supports +/// unanchored searches. (By default, `AhoCorasick` only supports unanchored +/// searches. But this can be toggled with [`AhoCorasickBuilder::start_kind`].) +/// * Running an unanchored search with an automaton that only supports +/// anchored searches. +/// +/// The common thread between the different types of errors is that they are +/// all rooted in the automaton construction and search configurations. If +/// those configurations are a static property of your program, then it is +/// reasonable to call infallible routines since you know an error will never +/// occur. And if one _does_ occur, then it's a bug in your program. +/// +/// To re-iterate, if the patterns, build or search configuration come from +/// user or untrusted data, then you should handle errors at build or search +/// time. If only the haystack comes from user or untrusted data, then there +/// should be no need to handle errors anywhere and it is generally encouraged +/// to `unwrap()` (or `expect()`) both build and search time calls. +/// +/// # Examples +/// +/// This example shows how to search for occurrences of multiple patterns +/// simultaneously in a case insensitive fashion. Each match includes the +/// pattern that matched along with the byte offsets of the match. +/// +/// ``` +/// use aho_corasick::{AhoCorasick, PatternID}; +/// +/// let patterns = &["apple", "maple", "snapple"]; +/// let haystack = "Nobody likes maple in their apple flavored Snapple."; +/// +/// let ac = AhoCorasick::builder() +/// .ascii_case_insensitive(true) +/// .build(patterns) +/// .unwrap(); +/// let mut matches = vec![]; +/// for mat in ac.find_iter(haystack) { +/// matches.push((mat.pattern(), mat.start(), mat.end())); +/// } +/// assert_eq!(matches, vec![ +/// (PatternID::must(1), 13, 18), +/// (PatternID::must(0), 28, 33), +/// (PatternID::must(2), 43, 50), +/// ]); +/// ``` +/// +/// This example shows how to replace matches with some other string: +/// +/// ``` +/// use aho_corasick::AhoCorasick; +/// +/// let patterns = &["fox", "brown", "quick"]; +/// let haystack = "The quick brown fox."; +/// let replace_with = &["sloth", "grey", "slow"]; +/// +/// let ac = AhoCorasick::new(patterns).unwrap(); +/// let result = ac.replace_all(haystack, replace_with); +/// assert_eq!(result, "The slow grey sloth."); +/// ``` +#[derive(Clone)] +pub struct AhoCorasick { + /// The underlying Aho-Corasick automaton. It's one of + /// nfa::noncontiguous::NFA, nfa::contiguous::NFA or dfa::DFA. + aut: Arc, + /// The specific Aho-Corasick kind chosen. This makes it possible to + /// inspect any `AhoCorasick` and know what kind of search strategy it + /// uses. + kind: AhoCorasickKind, + /// The start kind of this automaton as configured by the caller. + /// + /// We don't really *need* to put this here, since the underlying automaton + /// will correctly return errors if the caller requests an unsupported + /// search type. But we do keep this here for API behavior consistency. + /// Namely, the NFAs in this crate support both unanchored and anchored + /// searches unconditionally. There's no way to disable one or the other. + /// They always both work. But the DFA in this crate specifically only + /// supports both unanchored and anchored searches if it's configured to + /// do so. Why? Because for the DFA, supporting both essentially requires + /// two copies of the transition table: one generated by following failure + /// transitions from the original NFA and one generated by not following + /// those failure transitions. + /// + /// So why record the start kind here? Well, consider what happens + /// when no specific 'AhoCorasickKind' is selected by the caller and + /// 'StartKind::Unanchored' is used (both are the default). It *might* + /// result in using a DFA or it might pick an NFA. If it picks an NFA, the + /// caller would then be able to run anchored searches, even though the + /// caller only asked for support for unanchored searches. Maybe that's + /// fine, but what if the DFA was chosen instead? Oops, the caller would + /// get an error. + /// + /// Basically, it seems bad to return an error or not based on some + /// internal implementation choice. So we smooth things out and ensure + /// anchored searches *always* report an error when only unanchored support + /// was asked for (and vice versa), even if the underlying automaton + /// supports it. + start_kind: StartKind, +} + +/// Convenience constructors for an Aho-Corasick searcher. To configure the +/// searcher, use an [`AhoCorasickBuilder`] instead. +impl AhoCorasick { + /// Create a new Aho-Corasick automaton using the default configuration. + /// + /// The default configuration optimizes for less space usage, but at the + /// expense of longer search times. To change the configuration, use + /// [`AhoCorasickBuilder`]. + /// + /// This uses the default [`MatchKind::Standard`] match semantics, which + /// reports a match as soon as it is found. This corresponds to the + /// standard match semantics supported by textbook descriptions of the + /// Aho-Corasick algorithm. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, PatternID}; + /// + /// let ac = AhoCorasick::new(&["foo", "bar", "baz"]).unwrap(); + /// assert_eq!( + /// Some(PatternID::must(1)), + /// ac.find("xxx bar xxx").map(|m| m.pattern()), + /// ); + /// ``` + pub fn new(patterns: I) -> Result + where + I: IntoIterator, + P: AsRef<[u8]>, + { + AhoCorasickBuilder::new().build(patterns) + } + + /// A convenience method for returning a new Aho-Corasick builder. + /// + /// This usually permits one to just import the `AhoCorasick` type. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, Match, MatchKind}; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(&["samwise", "sam"]) + /// .unwrap(); + /// assert_eq!(Some(Match::must(0, 0..7)), ac.find("samwise")); + /// ``` + pub fn builder() -> AhoCorasickBuilder { + AhoCorasickBuilder::new() + } +} + +/// Infallible search routines. These APIs panic when the underlying search +/// would otherwise fail. Infallible routines are useful because the errors are +/// a result of both search-time configuration and what configuration is used +/// to build the Aho-Corasick searcher. Both of these things are not usually +/// the result of user input, and thus, an error is typically indicative of a +/// programmer error. In cases where callers want errors instead of panics, use +/// the corresponding `try` method in the section below. +impl AhoCorasick { + /// Returns true if and only if this automaton matches the haystack at any + /// position. + /// + /// `input` may be any type that is cheaply convertible to an `Input`. This + /// includes, but is not limited to, `&str` and `&[u8]`. + /// + /// Aside from convenience, when `AhoCorasick` was built with + /// leftmost-first or leftmost-longest semantics, this might result in a + /// search that visits less of the haystack than [`AhoCorasick::find`] + /// would otherwise. (For standard semantics, matches are always + /// immediately returned once they are seen, so there is no way for this to + /// do less work in that case.) + /// + /// Note that there is no corresponding fallible routine for this method. + /// If you need a fallible version of this, then [`AhoCorasick::try_find`] + /// can be used with [`Input::earliest`] enabled. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::AhoCorasick; + /// + /// let ac = AhoCorasick::new(&[ + /// "foo", "bar", "quux", "baz", + /// ]).unwrap(); + /// assert!(ac.is_match("xxx bar xxx")); + /// assert!(!ac.is_match("xxx qux xxx")); + /// ``` + pub fn is_match<'h, I: Into>>(&self, input: I) -> bool { + self.aut + .try_find(&input.into().earliest(true)) + .expect("AhoCorasick::try_find is not expected to fail") + .is_some() + } + + /// Returns the location of the first match according to the match + /// semantics that this automaton was constructed with. + /// + /// `input` may be any type that is cheaply convertible to an `Input`. This + /// includes, but is not limited to, `&str` and `&[u8]`. + /// + /// This is the infallible version of [`AhoCorasick::try_find`]. + /// + /// # Panics + /// + /// This panics when [`AhoCorasick::try_find`] would return an error. + /// + /// # Examples + /// + /// Basic usage, with standard semantics: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind}; + /// + /// let patterns = &["b", "abc", "abcd"]; + /// let haystack = "abcd"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::Standard) // default, not necessary + /// .build(patterns) + /// .unwrap(); + /// let mat = ac.find(haystack).expect("should have a match"); + /// assert_eq!("b", &haystack[mat.start()..mat.end()]); + /// ``` + /// + /// Now with leftmost-first semantics: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind}; + /// + /// let patterns = &["b", "abc", "abcd"]; + /// let haystack = "abcd"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// let mat = ac.find(haystack).expect("should have a match"); + /// assert_eq!("abc", &haystack[mat.start()..mat.end()]); + /// ``` + /// + /// And finally, leftmost-longest semantics: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind}; + /// + /// let patterns = &["b", "abc", "abcd"]; + /// let haystack = "abcd"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostLongest) + /// .build(patterns) + /// .unwrap(); + /// let mat = ac.find(haystack).expect("should have a match"); + /// ``` + /// + /// # Example: configuring a search + /// + /// Because this method accepts anything that can be turned into an + /// [`Input`], it's possible to provide an `Input` directly in order to + /// configure the search. In this example, we show how to use the + /// `earliest` option to force the search to return as soon as it knows + /// a match has occurred. + /// + /// ``` + /// use aho_corasick::{AhoCorasick, Input, MatchKind}; + /// + /// let patterns = &["b", "abc", "abcd"]; + /// let haystack = "abcd"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostLongest) + /// .build(patterns) + /// .unwrap(); + /// let mat = ac.find(Input::new(haystack).earliest(true)) + /// .expect("should have a match"); + /// // The correct leftmost-longest match here is 'abcd', but since we + /// // told the search to quit as soon as it knows a match has occurred, + /// // we get a different match back. + /// assert_eq!("b", &haystack[mat.start()..mat.end()]); + /// ``` + pub fn find<'h, I: Into>>(&self, input: I) -> Option { + self.try_find(input) + .expect("AhoCorasick::try_find is not expected to fail") + } + + /// Returns the location of the first overlapping match in the given + /// input with respect to the current state of the underlying searcher. + /// + /// `input` may be any type that is cheaply convertible to an `Input`. This + /// includes, but is not limited to, `&str` and `&[u8]`. + /// + /// Overlapping searches do not report matches in their return value. + /// Instead, matches can be accessed via [`OverlappingState::get_match`] + /// after a search call. + /// + /// This is the infallible version of + /// [`AhoCorasick::try_find_overlapping`]. + /// + /// # Panics + /// + /// This panics when [`AhoCorasick::try_find_overlapping`] would + /// return an error. For example, when the Aho-Corasick searcher + /// doesn't support overlapping searches. (Only searchers built with + /// [`MatchKind::Standard`] semantics support overlapping searches.) + /// + /// # Example + /// + /// This shows how we can repeatedly call an overlapping search without + /// ever needing to explicitly re-slice the haystack. Overlapping search + /// works this way because searches depend on state saved during the + /// previous search. + /// + /// ``` + /// use aho_corasick::{ + /// automaton::OverlappingState, + /// AhoCorasick, Input, Match, + /// }; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::new(patterns).unwrap(); + /// let mut state = OverlappingState::start(); + /// + /// ac.find_overlapping(haystack, &mut state); + /// assert_eq!(Some(Match::must(2, 0..3)), state.get_match()); + /// + /// ac.find_overlapping(haystack, &mut state); + /// assert_eq!(Some(Match::must(0, 0..6)), state.get_match()); + /// + /// ac.find_overlapping(haystack, &mut state); + /// assert_eq!(Some(Match::must(2, 11..14)), state.get_match()); + /// + /// ac.find_overlapping(haystack, &mut state); + /// assert_eq!(Some(Match::must(2, 22..25)), state.get_match()); + /// + /// ac.find_overlapping(haystack, &mut state); + /// assert_eq!(Some(Match::must(0, 22..28)), state.get_match()); + /// + /// ac.find_overlapping(haystack, &mut state); + /// assert_eq!(Some(Match::must(1, 22..31)), state.get_match()); + /// + /// // No more match matches to be found. + /// ac.find_overlapping(haystack, &mut state); + /// assert_eq!(None, state.get_match()); + /// ``` + pub fn find_overlapping<'h, I: Into>>( + &self, + input: I, + state: &mut OverlappingState, + ) { + self.try_find_overlapping(input, state).expect( + "AhoCorasick::try_find_overlapping is not expected to fail", + ) + } + + /// Returns an iterator of non-overlapping matches, using the match + /// semantics that this automaton was constructed with. + /// + /// `input` may be any type that is cheaply convertible to an `Input`. This + /// includes, but is not limited to, `&str` and `&[u8]`. + /// + /// This is the infallible version of [`AhoCorasick::try_find_iter`]. + /// + /// # Panics + /// + /// This panics when [`AhoCorasick::try_find_iter`] would return an error. + /// + /// # Examples + /// + /// Basic usage, with standard semantics: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind, PatternID}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::Standard) // default, not necessary + /// .build(patterns) + /// .unwrap(); + /// let matches: Vec = ac + /// .find_iter(haystack) + /// .map(|mat| mat.pattern()) + /// .collect(); + /// assert_eq!(vec![ + /// PatternID::must(2), + /// PatternID::must(2), + /// PatternID::must(2), + /// ], matches); + /// ``` + /// + /// Now with leftmost-first semantics: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind, PatternID}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// let matches: Vec = ac + /// .find_iter(haystack) + /// .map(|mat| mat.pattern()) + /// .collect(); + /// assert_eq!(vec![ + /// PatternID::must(0), + /// PatternID::must(2), + /// PatternID::must(0), + /// ], matches); + /// ``` + /// + /// And finally, leftmost-longest semantics: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind, PatternID}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostLongest) + /// .build(patterns) + /// .unwrap(); + /// let matches: Vec = ac + /// .find_iter(haystack) + /// .map(|mat| mat.pattern()) + /// .collect(); + /// assert_eq!(vec![ + /// PatternID::must(0), + /// PatternID::must(2), + /// PatternID::must(1), + /// ], matches); + /// ``` + pub fn find_iter<'a, 'h, I: Into>>( + &'a self, + input: I, + ) -> FindIter<'a, 'h> { + self.try_find_iter(input) + .expect("AhoCorasick::try_find_iter is not expected to fail") + } + + /// Returns an iterator of overlapping matches. Stated differently, this + /// returns an iterator of all possible matches at every position. + /// + /// `input` may be any type that is cheaply convertible to an `Input`. This + /// includes, but is not limited to, `&str` and `&[u8]`. + /// + /// This is the infallible version of + /// [`AhoCorasick::try_find_overlapping_iter`]. + /// + /// # Panics + /// + /// This panics when `AhoCorasick::try_find_overlapping_iter` would return + /// an error. For example, when the Aho-Corasick searcher is built with + /// either leftmost-first or leftmost-longest match semantics. Stated + /// differently, overlapping searches require one to build the searcher + /// with [`MatchKind::Standard`] (it is the default). + /// + /// # Example: basic usage + /// + /// ``` + /// use aho_corasick::{AhoCorasick, PatternID}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::new(patterns).unwrap(); + /// let matches: Vec = ac + /// .find_overlapping_iter(haystack) + /// .map(|mat| mat.pattern()) + /// .collect(); + /// assert_eq!(vec![ + /// PatternID::must(2), + /// PatternID::must(0), + /// PatternID::must(2), + /// PatternID::must(2), + /// PatternID::must(0), + /// PatternID::must(1), + /// ], matches); + /// ``` + pub fn find_overlapping_iter<'a, 'h, I: Into>>( + &'a self, + input: I, + ) -> FindOverlappingIter<'a, 'h> { + self.try_find_overlapping_iter(input).expect( + "AhoCorasick::try_find_overlapping_iter is not expected to fail", + ) + } + + /// Replace all matches with a corresponding value in the `replace_with` + /// slice given. Matches correspond to the same matches as reported by + /// [`AhoCorasick::find_iter`]. + /// + /// Replacements are determined by the index of the matching pattern. + /// For example, if the pattern with index `2` is found, then it is + /// replaced by `replace_with[2]`. + /// + /// This is the infallible version of [`AhoCorasick::try_replace_all`]. + /// + /// # Panics + /// + /// This panics when [`AhoCorasick::try_replace_all`] would return an + /// error. + /// + /// This also panics when `replace_with.len()` does not equal + /// [`AhoCorasick::patterns_len`]. + /// + /// # Example: basic usage + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// let result = ac.replace_all(haystack, &["x", "y", "z"]); + /// assert_eq!("x the z to the xage", result); + /// ``` + pub fn replace_all(&self, haystack: &str, replace_with: &[B]) -> String + where + B: AsRef, + { + self.try_replace_all(haystack, replace_with) + .expect("AhoCorasick::try_replace_all is not expected to fail") + } + + /// Replace all matches using raw bytes with a corresponding value in the + /// `replace_with` slice given. Matches correspond to the same matches as + /// reported by [`AhoCorasick::find_iter`]. + /// + /// Replacements are determined by the index of the matching pattern. + /// For example, if the pattern with index `2` is found, then it is + /// replaced by `replace_with[2]`. + /// + /// This is the infallible version of + /// [`AhoCorasick::try_replace_all_bytes`]. + /// + /// # Panics + /// + /// This panics when [`AhoCorasick::try_replace_all_bytes`] would return an + /// error. + /// + /// This also panics when `replace_with.len()` does not equal + /// [`AhoCorasick::patterns_len`]. + /// + /// # Example: basic usage + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = b"append the app to the appendage"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// let result = ac.replace_all_bytes(haystack, &["x", "y", "z"]); + /// assert_eq!(b"x the z to the xage".to_vec(), result); + /// ``` + pub fn replace_all_bytes( + &self, + haystack: &[u8], + replace_with: &[B], + ) -> Vec + where + B: AsRef<[u8]>, + { + self.try_replace_all_bytes(haystack, replace_with) + .expect("AhoCorasick::try_replace_all_bytes should not fail") + } + + /// Replace all matches using a closure called on each match. + /// Matches correspond to the same matches as reported by + /// [`AhoCorasick::find_iter`]. + /// + /// The closure accepts three parameters: the match found, the text of + /// the match and a string buffer with which to write the replaced text + /// (if any). If the closure returns `true`, then it continues to the next + /// match. If the closure returns `false`, then searching is stopped. + /// + /// Note that any matches with boundaries that don't fall on a valid UTF-8 + /// boundary are silently skipped. + /// + /// This is the infallible version of + /// [`AhoCorasick::try_replace_all_with`]. + /// + /// # Panics + /// + /// This panics when [`AhoCorasick::try_replace_all_with`] would return an + /// error. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// let mut result = String::new(); + /// ac.replace_all_with(haystack, &mut result, |mat, _, dst| { + /// dst.push_str(&mat.pattern().as_usize().to_string()); + /// true + /// }); + /// assert_eq!("0 the 2 to the 0age", result); + /// ``` + /// + /// Stopping the replacement by returning `false` (continued from the + /// example above): + /// + /// ``` + /// # use aho_corasick::{AhoCorasick, MatchKind, PatternID}; + /// # let patterns = &["append", "appendage", "app"]; + /// # let haystack = "append the app to the appendage"; + /// # let ac = AhoCorasick::builder() + /// # .match_kind(MatchKind::LeftmostFirst) + /// # .build(patterns) + /// # .unwrap(); + /// let mut result = String::new(); + /// ac.replace_all_with(haystack, &mut result, |mat, _, dst| { + /// dst.push_str(&mat.pattern().as_usize().to_string()); + /// mat.pattern() != PatternID::must(2) + /// }); + /// assert_eq!("0 the 2 to the appendage", result); + /// ``` + pub fn replace_all_with( + &self, + haystack: &str, + dst: &mut String, + replace_with: F, + ) where + F: FnMut(&Match, &str, &mut String) -> bool, + { + self.try_replace_all_with(haystack, dst, replace_with) + .expect("AhoCorasick::try_replace_all_with should not fail") + } + + /// Replace all matches using raw bytes with a closure called on each + /// match. Matches correspond to the same matches as reported by + /// [`AhoCorasick::find_iter`]. + /// + /// The closure accepts three parameters: the match found, the text of + /// the match and a byte buffer with which to write the replaced text + /// (if any). If the closure returns `true`, then it continues to the next + /// match. If the closure returns `false`, then searching is stopped. + /// + /// This is the infallible version of + /// [`AhoCorasick::try_replace_all_with_bytes`]. + /// + /// # Panics + /// + /// This panics when [`AhoCorasick::try_replace_all_with_bytes`] would + /// return an error. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = b"append the app to the appendage"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// let mut result = vec![]; + /// ac.replace_all_with_bytes(haystack, &mut result, |mat, _, dst| { + /// dst.extend(mat.pattern().as_usize().to_string().bytes()); + /// true + /// }); + /// assert_eq!(b"0 the 2 to the 0age".to_vec(), result); + /// ``` + /// + /// Stopping the replacement by returning `false` (continued from the + /// example above): + /// + /// ``` + /// # use aho_corasick::{AhoCorasick, MatchKind, PatternID}; + /// # let patterns = &["append", "appendage", "app"]; + /// # let haystack = b"append the app to the appendage"; + /// # let ac = AhoCorasick::builder() + /// # .match_kind(MatchKind::LeftmostFirst) + /// # .build(patterns) + /// # .unwrap(); + /// let mut result = vec![]; + /// ac.replace_all_with_bytes(haystack, &mut result, |mat, _, dst| { + /// dst.extend(mat.pattern().as_usize().to_string().bytes()); + /// mat.pattern() != PatternID::must(2) + /// }); + /// assert_eq!(b"0 the 2 to the appendage".to_vec(), result); + /// ``` + pub fn replace_all_with_bytes( + &self, + haystack: &[u8], + dst: &mut Vec, + replace_with: F, + ) where + F: FnMut(&Match, &[u8], &mut Vec) -> bool, + { + self.try_replace_all_with_bytes(haystack, dst, replace_with) + .expect("AhoCorasick::try_replace_all_with_bytes should not fail") + } + + /// Returns an iterator of non-overlapping matches in the given + /// stream. Matches correspond to the same matches as reported by + /// [`AhoCorasick::find_iter`]. + /// + /// The matches yielded by this iterator use absolute position offsets in + /// the stream given, where the first byte has index `0`. Matches are + /// yieled until the stream is exhausted. + /// + /// Each item yielded by the iterator is an `Result`, where an error is yielded if there was a problem + /// reading from the reader given. + /// + /// When searching a stream, an internal buffer is used. Therefore, callers + /// should avoiding providing a buffered reader, if possible. + /// + /// This is the infallible version of + /// [`AhoCorasick::try_stream_find_iter`]. Note that both methods return + /// iterators that produce `Result` values. The difference is that this + /// routine panics if _construction_ of the iterator failed. The `Result` + /// values yield by the iterator come from whether the given reader returns + /// an error or not during the search. + /// + /// # Memory usage + /// + /// In general, searching streams will use a constant amount of memory for + /// its internal buffer. The one requirement is that the internal buffer + /// must be at least the size of the longest possible match. In most use + /// cases, the default buffer size will be much larger than any individual + /// match. + /// + /// # Panics + /// + /// This panics when [`AhoCorasick::try_stream_find_iter`] would return + /// an error. For example, when the Aho-Corasick searcher doesn't support + /// stream searches. (Only searchers built with [`MatchKind::Standard`] + /// semantics support stream searches.) + /// + /// # Example: basic usage + /// + /// ``` + /// use aho_corasick::{AhoCorasick, PatternID}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::new(patterns).unwrap(); + /// let mut matches = vec![]; + /// for result in ac.stream_find_iter(haystack.as_bytes()) { + /// let mat = result?; + /// matches.push(mat.pattern()); + /// } + /// assert_eq!(vec![ + /// PatternID::must(2), + /// PatternID::must(2), + /// PatternID::must(2), + /// ], matches); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "std")] + pub fn stream_find_iter<'a, R: std::io::Read>( + &'a self, + rdr: R, + ) -> StreamFindIter<'a, R> { + self.try_stream_find_iter(rdr) + .expect("AhoCorasick::try_stream_find_iter should not fail") + } +} + +/// Fallible search routines. These APIs return an error in cases where the +/// infallible routines would panic. +impl AhoCorasick { + /// Returns the location of the first match according to the match + /// semantics that this automaton was constructed with, and according + /// to the given `Input` configuration. + /// + /// This is the fallible version of [`AhoCorasick::find`]. + /// + /// # Errors + /// + /// This returns an error when this Aho-Corasick searcher does not support + /// the given `Input` configuration. + /// + /// For example, if the Aho-Corasick searcher only supports anchored + /// searches or only supports unanchored searches, then providing an + /// `Input` that requests an anchored (or unanchored) search when it isn't + /// supported would result in an error. + /// + /// # Example: leftmost-first searching + /// + /// Basic usage with leftmost-first semantics: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind, Input}; + /// + /// let patterns = &["b", "abc", "abcd"]; + /// let haystack = "foo abcd"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// let mat = ac.try_find(haystack)?.expect("should have a match"); + /// assert_eq!("abc", &haystack[mat.span()]); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: anchored leftmost-first searching + /// + /// This shows how to anchor the search, so that even if the haystack + /// contains a match somewhere, a match won't be reported unless one can + /// be found that starts at the beginning of the search: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, Anchored, Input, MatchKind, StartKind}; + /// + /// let patterns = &["b", "abc", "abcd"]; + /// let haystack = "foo abcd"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .start_kind(StartKind::Anchored) + /// .build(patterns) + /// .unwrap(); + /// let input = Input::new(haystack).anchored(Anchored::Yes); + /// assert_eq!(None, ac.try_find(input)?); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// If the beginning of the search is changed to where a match begins, then + /// it will be found: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, Anchored, Input, MatchKind, StartKind}; + /// + /// let patterns = &["b", "abc", "abcd"]; + /// let haystack = "foo abcd"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .start_kind(StartKind::Anchored) + /// .build(patterns) + /// .unwrap(); + /// let input = Input::new(haystack).range(4..).anchored(Anchored::Yes); + /// let mat = ac.try_find(input)?.expect("should have a match"); + /// assert_eq!("abc", &haystack[mat.span()]); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: earliest leftmost-first searching + /// + /// This shows how to run an "earliest" search even when the Aho-Corasick + /// searcher was compiled with leftmost-first match semantics. In this + /// case, the search is stopped as soon as it is known that a match has + /// occurred, even if it doesn't correspond to the leftmost-first match. + /// + /// ``` + /// use aho_corasick::{AhoCorasick, Input, MatchKind}; + /// + /// let patterns = &["b", "abc", "abcd"]; + /// let haystack = "foo abcd"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// let input = Input::new(haystack).earliest(true); + /// let mat = ac.try_find(input)?.expect("should have a match"); + /// assert_eq!("b", &haystack[mat.span()]); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn try_find<'h, I: Into>>( + &self, + input: I, + ) -> Result, MatchError> { + let input = input.into(); + enforce_anchored_consistency(self.start_kind, input.get_anchored())?; + self.aut.try_find(&input) + } + + /// Returns the location of the first overlapping match in the given + /// input with respect to the current state of the underlying searcher. + /// + /// Overlapping searches do not report matches in their return value. + /// Instead, matches can be accessed via [`OverlappingState::get_match`] + /// after a search call. + /// + /// This is the fallible version of [`AhoCorasick::find_overlapping`]. + /// + /// # Errors + /// + /// This returns an error when this Aho-Corasick searcher does not support + /// the given `Input` configuration or if overlapping search is not + /// supported. + /// + /// One example is that only Aho-Corasicker searchers built with + /// [`MatchKind::Standard`] semantics support overlapping searches. Using + /// any other match semantics will result in this returning an error. + /// + /// # Example: basic usage + /// + /// This shows how we can repeatedly call an overlapping search without + /// ever needing to explicitly re-slice the haystack. Overlapping search + /// works this way because searches depend on state saved during the + /// previous search. + /// + /// ``` + /// use aho_corasick::{ + /// automaton::OverlappingState, + /// AhoCorasick, Input, Match, + /// }; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::new(patterns).unwrap(); + /// let mut state = OverlappingState::start(); + /// + /// ac.try_find_overlapping(haystack, &mut state)?; + /// assert_eq!(Some(Match::must(2, 0..3)), state.get_match()); + /// + /// ac.try_find_overlapping(haystack, &mut state)?; + /// assert_eq!(Some(Match::must(0, 0..6)), state.get_match()); + /// + /// ac.try_find_overlapping(haystack, &mut state)?; + /// assert_eq!(Some(Match::must(2, 11..14)), state.get_match()); + /// + /// ac.try_find_overlapping(haystack, &mut state)?; + /// assert_eq!(Some(Match::must(2, 22..25)), state.get_match()); + /// + /// ac.try_find_overlapping(haystack, &mut state)?; + /// assert_eq!(Some(Match::must(0, 22..28)), state.get_match()); + /// + /// ac.try_find_overlapping(haystack, &mut state)?; + /// assert_eq!(Some(Match::must(1, 22..31)), state.get_match()); + /// + /// // No more match matches to be found. + /// ac.try_find_overlapping(haystack, &mut state)?; + /// assert_eq!(None, state.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: implementing your own overlapping iteration + /// + /// The previous example can be easily adapted to implement your own + /// iteration by repeatedly calling `try_find_overlapping` until either + /// an error occurs or no more matches are reported. + /// + /// This is effectively equivalent to the iterator returned by + /// [`AhoCorasick::try_find_overlapping_iter`], with the only difference + /// being that the iterator checks for errors before construction and + /// absolves the caller of needing to check for errors on every search + /// call. (Indeed, if the first `try_find_overlapping` call succeeds and + /// the same `Input` is given to subsequent calls, then all subsequent + /// calls are guaranteed to succeed.) + /// + /// ``` + /// use aho_corasick::{ + /// automaton::OverlappingState, + /// AhoCorasick, Input, Match, + /// }; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::new(patterns).unwrap(); + /// let mut state = OverlappingState::start(); + /// let mut matches = vec![]; + /// + /// loop { + /// ac.try_find_overlapping(haystack, &mut state)?; + /// let mat = match state.get_match() { + /// None => break, + /// Some(mat) => mat, + /// }; + /// matches.push(mat); + /// } + /// let expected = vec![ + /// Match::must(2, 0..3), + /// Match::must(0, 0..6), + /// Match::must(2, 11..14), + /// Match::must(2, 22..25), + /// Match::must(0, 22..28), + /// Match::must(1, 22..31), + /// ]; + /// assert_eq!(expected, matches); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: anchored iteration + /// + /// The previous example can also be adapted to implement + /// iteration over all anchored matches. In particular, + /// [`AhoCorasick::try_find_overlapping_iter`] does not support this + /// because it isn't totally clear what the match semantics ought to be. + /// + /// In this example, we will find all overlapping matches that start at + /// the beginning of our search. + /// + /// ``` + /// use aho_corasick::{ + /// automaton::OverlappingState, + /// AhoCorasick, Anchored, Input, Match, StartKind, + /// }; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::builder() + /// .start_kind(StartKind::Anchored) + /// .build(patterns) + /// .unwrap(); + /// let input = Input::new(haystack).anchored(Anchored::Yes); + /// let mut state = OverlappingState::start(); + /// let mut matches = vec![]; + /// + /// loop { + /// ac.try_find_overlapping(input.clone(), &mut state)?; + /// let mat = match state.get_match() { + /// None => break, + /// Some(mat) => mat, + /// }; + /// matches.push(mat); + /// } + /// let expected = vec![ + /// Match::must(2, 0..3), + /// Match::must(0, 0..6), + /// ]; + /// assert_eq!(expected, matches); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn try_find_overlapping<'h, I: Into>>( + &self, + input: I, + state: &mut OverlappingState, + ) -> Result<(), MatchError> { + let input = input.into(); + enforce_anchored_consistency(self.start_kind, input.get_anchored())?; + self.aut.try_find_overlapping(&input, state) + } + + /// Returns an iterator of non-overlapping matches, using the match + /// semantics that this automaton was constructed with. + /// + /// This is the fallible version of [`AhoCorasick::find_iter`]. + /// + /// Note that the error returned by this method occurs during construction + /// of the iterator. The iterator itself yields `Match` values. That is, + /// once the iterator is constructed, the iteration itself will never + /// report an error. + /// + /// # Errors + /// + /// This returns an error when this Aho-Corasick searcher does not support + /// the given `Input` configuration. + /// + /// For example, if the Aho-Corasick searcher only supports anchored + /// searches or only supports unanchored searches, then providing an + /// `Input` that requests an anchored (or unanchored) search when it isn't + /// supported would result in an error. + /// + /// # Example: leftmost-first searching + /// + /// Basic usage with leftmost-first semantics: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, Input, MatchKind, PatternID}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// let matches: Vec = ac + /// .try_find_iter(Input::new(haystack))? + /// .map(|mat| mat.pattern()) + /// .collect(); + /// assert_eq!(vec![ + /// PatternID::must(0), + /// PatternID::must(2), + /// PatternID::must(0), + /// ], matches); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: anchored leftmost-first searching + /// + /// This shows how to anchor the search, such that all matches must begin + /// at the starting location of the search. For an iterator, an anchored + /// search implies that all matches are adjacent. + /// + /// ``` + /// use aho_corasick::{ + /// AhoCorasick, Anchored, Input, MatchKind, PatternID, StartKind, + /// }; + /// + /// let patterns = &["foo", "bar", "quux"]; + /// let haystack = "fooquuxbar foo"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .start_kind(StartKind::Anchored) + /// .build(patterns) + /// .unwrap(); + /// let matches: Vec = ac + /// .try_find_iter(Input::new(haystack).anchored(Anchored::Yes))? + /// .map(|mat| mat.pattern()) + /// .collect(); + /// assert_eq!(vec![ + /// PatternID::must(0), + /// PatternID::must(2), + /// PatternID::must(1), + /// // The final 'foo' is not found because it is not adjacent to the + /// // 'bar' match. It needs to be adjacent because our search is + /// // anchored. + /// ], matches); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn try_find_iter<'a, 'h, I: Into>>( + &'a self, + input: I, + ) -> Result, MatchError> { + let input = input.into(); + enforce_anchored_consistency(self.start_kind, input.get_anchored())?; + Ok(FindIter(self.aut.try_find_iter(input)?)) + } + + /// Returns an iterator of overlapping matches. + /// + /// This is the fallible version of [`AhoCorasick::find_overlapping_iter`]. + /// + /// Note that the error returned by this method occurs during construction + /// of the iterator. The iterator itself yields `Match` values. That is, + /// once the iterator is constructed, the iteration itself will never + /// report an error. + /// + /// # Errors + /// + /// This returns an error when this Aho-Corasick searcher does not support + /// the given `Input` configuration or does not support overlapping + /// searches. + /// + /// One example is that only Aho-Corasicker searchers built with + /// [`MatchKind::Standard`] semantics support overlapping searches. Using + /// any other match semantics will result in this returning an error. + /// + /// # Example: basic usage + /// + /// ``` + /// use aho_corasick::{AhoCorasick, Input, PatternID}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::new(patterns).unwrap(); + /// let matches: Vec = ac + /// .try_find_overlapping_iter(Input::new(haystack))? + /// .map(|mat| mat.pattern()) + /// .collect(); + /// assert_eq!(vec![ + /// PatternID::must(2), + /// PatternID::must(0), + /// PatternID::must(2), + /// PatternID::must(2), + /// PatternID::must(0), + /// PatternID::must(1), + /// ], matches); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: anchored overlapping search returns an error + /// + /// It isn't clear what the match semantics for anchored overlapping + /// iterators *ought* to be, so currently an error is returned. Callers + /// may use [`AhoCorasick::try_find_overlapping`] to implement their own + /// semantics if desired. + /// + /// ``` + /// use aho_corasick::{AhoCorasick, Anchored, Input, StartKind}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "appendappendage app"; + /// + /// let ac = AhoCorasick::builder() + /// .start_kind(StartKind::Anchored) + /// .build(patterns) + /// .unwrap(); + /// let input = Input::new(haystack).anchored(Anchored::Yes); + /// assert!(ac.try_find_overlapping_iter(input).is_err()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn try_find_overlapping_iter<'a, 'h, I: Into>>( + &'a self, + input: I, + ) -> Result, MatchError> { + let input = input.into(); + enforce_anchored_consistency(self.start_kind, input.get_anchored())?; + Ok(FindOverlappingIter(self.aut.try_find_overlapping_iter(input)?)) + } + + /// Replace all matches with a corresponding value in the `replace_with` + /// slice given. Matches correspond to the same matches as reported by + /// [`AhoCorasick::try_find_iter`]. + /// + /// Replacements are determined by the index of the matching pattern. + /// For example, if the pattern with index `2` is found, then it is + /// replaced by `replace_with[2]`. + /// + /// # Panics + /// + /// This panics when `replace_with.len()` does not equal + /// [`AhoCorasick::patterns_len`]. + /// + /// # Errors + /// + /// This returns an error when this Aho-Corasick searcher does not support + /// the default `Input` configuration. More specifically, this occurs only + /// when the Aho-Corasick searcher does not support unanchored searches + /// since this replacement routine always does an unanchored search. + /// + /// # Example: basic usage + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// let result = ac.try_replace_all(haystack, &["x", "y", "z"])?; + /// assert_eq!("x the z to the xage", result); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn try_replace_all( + &self, + haystack: &str, + replace_with: &[B], + ) -> Result + where + B: AsRef, + { + enforce_anchored_consistency(self.start_kind, Anchored::No)?; + self.aut.try_replace_all(haystack, replace_with) + } + + /// Replace all matches using raw bytes with a corresponding value in the + /// `replace_with` slice given. Matches correspond to the same matches as + /// reported by [`AhoCorasick::try_find_iter`]. + /// + /// Replacements are determined by the index of the matching pattern. + /// For example, if the pattern with index `2` is found, then it is + /// replaced by `replace_with[2]`. + /// + /// This is the fallible version of [`AhoCorasick::replace_all_bytes`]. + /// + /// # Panics + /// + /// This panics when `replace_with.len()` does not equal + /// [`AhoCorasick::patterns_len`]. + /// + /// # Errors + /// + /// This returns an error when this Aho-Corasick searcher does not support + /// the default `Input` configuration. More specifically, this occurs only + /// when the Aho-Corasick searcher does not support unanchored searches + /// since this replacement routine always does an unanchored search. + /// + /// # Example: basic usage + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = b"append the app to the appendage"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// let result = ac.try_replace_all_bytes(haystack, &["x", "y", "z"])?; + /// assert_eq!(b"x the z to the xage".to_vec(), result); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn try_replace_all_bytes( + &self, + haystack: &[u8], + replace_with: &[B], + ) -> Result, MatchError> + where + B: AsRef<[u8]>, + { + enforce_anchored_consistency(self.start_kind, Anchored::No)?; + self.aut.try_replace_all_bytes(haystack, replace_with) + } + + /// Replace all matches using a closure called on each match. + /// Matches correspond to the same matches as reported by + /// [`AhoCorasick::try_find_iter`]. + /// + /// The closure accepts three parameters: the match found, the text of + /// the match and a string buffer with which to write the replaced text + /// (if any). If the closure returns `true`, then it continues to the next + /// match. If the closure returns `false`, then searching is stopped. + /// + /// Note that any matches with boundaries that don't fall on a valid UTF-8 + /// boundary are silently skipped. + /// + /// This is the fallible version of [`AhoCorasick::replace_all_with`]. + /// + /// # Errors + /// + /// This returns an error when this Aho-Corasick searcher does not support + /// the default `Input` configuration. More specifically, this occurs only + /// when the Aho-Corasick searcher does not support unanchored searches + /// since this replacement routine always does an unanchored search. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// let mut result = String::new(); + /// ac.try_replace_all_with(haystack, &mut result, |mat, _, dst| { + /// dst.push_str(&mat.pattern().as_usize().to_string()); + /// true + /// })?; + /// assert_eq!("0 the 2 to the 0age", result); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Stopping the replacement by returning `false` (continued from the + /// example above): + /// + /// ``` + /// # use aho_corasick::{AhoCorasick, MatchKind, PatternID}; + /// # let patterns = &["append", "appendage", "app"]; + /// # let haystack = "append the app to the appendage"; + /// # let ac = AhoCorasick::builder() + /// # .match_kind(MatchKind::LeftmostFirst) + /// # .build(patterns) + /// # .unwrap(); + /// let mut result = String::new(); + /// ac.try_replace_all_with(haystack, &mut result, |mat, _, dst| { + /// dst.push_str(&mat.pattern().as_usize().to_string()); + /// mat.pattern() != PatternID::must(2) + /// })?; + /// assert_eq!("0 the 2 to the appendage", result); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn try_replace_all_with( + &self, + haystack: &str, + dst: &mut String, + replace_with: F, + ) -> Result<(), MatchError> + where + F: FnMut(&Match, &str, &mut String) -> bool, + { + enforce_anchored_consistency(self.start_kind, Anchored::No)?; + self.aut.try_replace_all_with(haystack, dst, replace_with) + } + + /// Replace all matches using raw bytes with a closure called on each + /// match. Matches correspond to the same matches as reported by + /// [`AhoCorasick::try_find_iter`]. + /// + /// The closure accepts three parameters: the match found, the text of + /// the match and a byte buffer with which to write the replaced text + /// (if any). If the closure returns `true`, then it continues to the next + /// match. If the closure returns `false`, then searching is stopped. + /// + /// This is the fallible version of + /// [`AhoCorasick::replace_all_with_bytes`]. + /// + /// # Errors + /// + /// This returns an error when this Aho-Corasick searcher does not support + /// the default `Input` configuration. More specifically, this occurs only + /// when the Aho-Corasick searcher does not support unanchored searches + /// since this replacement routine always does an unanchored search. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = b"append the app to the appendage"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// let mut result = vec![]; + /// ac.try_replace_all_with_bytes(haystack, &mut result, |mat, _, dst| { + /// dst.extend(mat.pattern().as_usize().to_string().bytes()); + /// true + /// })?; + /// assert_eq!(b"0 the 2 to the 0age".to_vec(), result); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Stopping the replacement by returning `false` (continued from the + /// example above): + /// + /// ``` + /// # use aho_corasick::{AhoCorasick, MatchKind, PatternID}; + /// # let patterns = &["append", "appendage", "app"]; + /// # let haystack = b"append the app to the appendage"; + /// # let ac = AhoCorasick::builder() + /// # .match_kind(MatchKind::LeftmostFirst) + /// # .build(patterns) + /// # .unwrap(); + /// let mut result = vec![]; + /// ac.try_replace_all_with_bytes(haystack, &mut result, |mat, _, dst| { + /// dst.extend(mat.pattern().as_usize().to_string().bytes()); + /// mat.pattern() != PatternID::must(2) + /// })?; + /// assert_eq!(b"0 the 2 to the appendage".to_vec(), result); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn try_replace_all_with_bytes( + &self, + haystack: &[u8], + dst: &mut Vec, + replace_with: F, + ) -> Result<(), MatchError> + where + F: FnMut(&Match, &[u8], &mut Vec) -> bool, + { + enforce_anchored_consistency(self.start_kind, Anchored::No)?; + self.aut.try_replace_all_with_bytes(haystack, dst, replace_with) + } + + /// Returns an iterator of non-overlapping matches in the given + /// stream. Matches correspond to the same matches as reported by + /// [`AhoCorasick::try_find_iter`]. + /// + /// The matches yielded by this iterator use absolute position offsets in + /// the stream given, where the first byte has index `0`. Matches are + /// yieled until the stream is exhausted. + /// + /// Each item yielded by the iterator is an `Result`, where an error is yielded if there was a problem + /// reading from the reader given. + /// + /// When searching a stream, an internal buffer is used. Therefore, callers + /// should avoiding providing a buffered reader, if possible. + /// + /// This is the fallible version of [`AhoCorasick::stream_find_iter`]. + /// Note that both methods return iterators that produce `Result` values. + /// The difference is that this routine returns an error if _construction_ + /// of the iterator failed. The `Result` values yield by the iterator + /// come from whether the given reader returns an error or not during the + /// search. + /// + /// # Memory usage + /// + /// In general, searching streams will use a constant amount of memory for + /// its internal buffer. The one requirement is that the internal buffer + /// must be at least the size of the longest possible match. In most use + /// cases, the default buffer size will be much larger than any individual + /// match. + /// + /// # Errors + /// + /// This returns an error when this Aho-Corasick searcher does not support + /// the default `Input` configuration. More specifically, this occurs only + /// when the Aho-Corasick searcher does not support unanchored searches + /// since this stream searching routine always does an unanchored search. + /// + /// This also returns an error if the searcher does not support stream + /// searches. Only searchers built with [`MatchKind::Standard`] semantics + /// support stream searches. + /// + /// # Example: basic usage + /// + /// ``` + /// use aho_corasick::{AhoCorasick, PatternID}; + /// + /// let patterns = &["append", "appendage", "app"]; + /// let haystack = "append the app to the appendage"; + /// + /// let ac = AhoCorasick::new(patterns).unwrap(); + /// let mut matches = vec![]; + /// for result in ac.try_stream_find_iter(haystack.as_bytes())? { + /// let mat = result?; + /// matches.push(mat.pattern()); + /// } + /// assert_eq!(vec![ + /// PatternID::must(2), + /// PatternID::must(2), + /// PatternID::must(2), + /// ], matches); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "std")] + pub fn try_stream_find_iter<'a, R: std::io::Read>( + &'a self, + rdr: R, + ) -> Result, MatchError> { + enforce_anchored_consistency(self.start_kind, Anchored::No)?; + self.aut.try_stream_find_iter(rdr).map(StreamFindIter) + } + + /// Search for and replace all matches of this automaton in + /// the given reader, and write the replacements to the given + /// writer. Matches correspond to the same matches as reported by + /// [`AhoCorasick::try_find_iter`]. + /// + /// Replacements are determined by the index of the matching pattern. For + /// example, if the pattern with index `2` is found, then it is replaced by + /// `replace_with[2]`. + /// + /// After all matches are replaced, the writer is _not_ flushed. + /// + /// If there was a problem reading from the given reader or writing to the + /// given writer, then the corresponding `io::Error` is returned and all + /// replacement is stopped. + /// + /// When searching a stream, an internal buffer is used. Therefore, callers + /// should avoiding providing a buffered reader, if possible. However, + /// callers may want to provide a buffered writer. + /// + /// Note that there is currently no infallible version of this routine. + /// + /// # Memory usage + /// + /// In general, searching streams will use a constant amount of memory for + /// its internal buffer. The one requirement is that the internal buffer + /// must be at least the size of the longest possible match. In most use + /// cases, the default buffer size will be much larger than any individual + /// match. + /// + /// # Panics + /// + /// This panics when `replace_with.len()` does not equal + /// [`AhoCorasick::patterns_len`]. + /// + /// # Errors + /// + /// This returns an error when this Aho-Corasick searcher does not support + /// the default `Input` configuration. More specifically, this occurs only + /// when the Aho-Corasick searcher does not support unanchored searches + /// since this stream searching routine always does an unanchored search. + /// + /// This also returns an error if the searcher does not support stream + /// searches. Only searchers built with [`MatchKind::Standard`] semantics + /// support stream searches. + /// + /// # Example: basic usage + /// + /// ``` + /// use aho_corasick::AhoCorasick; + /// + /// let patterns = &["fox", "brown", "quick"]; + /// let haystack = "The quick brown fox."; + /// let replace_with = &["sloth", "grey", "slow"]; + /// + /// let ac = AhoCorasick::new(patterns).unwrap(); + /// let mut result = vec![]; + /// ac.try_stream_replace_all( + /// haystack.as_bytes(), + /// &mut result, + /// replace_with, + /// )?; + /// assert_eq!(b"The slow grey sloth.".to_vec(), result); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "std")] + pub fn try_stream_replace_all( + &self, + rdr: R, + wtr: W, + replace_with: &[B], + ) -> Result<(), std::io::Error> + where + R: std::io::Read, + W: std::io::Write, + B: AsRef<[u8]>, + { + enforce_anchored_consistency(self.start_kind, Anchored::No) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; + self.aut.try_stream_replace_all(rdr, wtr, replace_with) + } + + /// Search the given reader and replace all matches of this automaton + /// using the given closure. The result is written to the given + /// writer. Matches correspond to the same matches as reported by + /// [`AhoCorasick::try_find_iter`]. + /// + /// The closure accepts three parameters: the match found, the text of + /// the match and the writer with which to write the replaced text (if any). + /// + /// After all matches are replaced, the writer is _not_ flushed. + /// + /// If there was a problem reading from the given reader or writing to the + /// given writer, then the corresponding `io::Error` is returned and all + /// replacement is stopped. + /// + /// When searching a stream, an internal buffer is used. Therefore, callers + /// should avoiding providing a buffered reader, if possible. However, + /// callers may want to provide a buffered writer. + /// + /// Note that there is currently no infallible version of this routine. + /// + /// # Memory usage + /// + /// In general, searching streams will use a constant amount of memory for + /// its internal buffer. The one requirement is that the internal buffer + /// must be at least the size of the longest possible match. In most use + /// cases, the default buffer size will be much larger than any individual + /// match. + /// + /// # Errors + /// + /// This returns an error when this Aho-Corasick searcher does not support + /// the default `Input` configuration. More specifically, this occurs only + /// when the Aho-Corasick searcher does not support unanchored searches + /// since this stream searching routine always does an unanchored search. + /// + /// This also returns an error if the searcher does not support stream + /// searches. Only searchers built with [`MatchKind::Standard`] semantics + /// support stream searches. + /// + /// # Example: basic usage + /// + /// ``` + /// use std::io::Write; + /// use aho_corasick::AhoCorasick; + /// + /// let patterns = &["fox", "brown", "quick"]; + /// let haystack = "The quick brown fox."; + /// + /// let ac = AhoCorasick::new(patterns).unwrap(); + /// let mut result = vec![]; + /// ac.try_stream_replace_all_with( + /// haystack.as_bytes(), + /// &mut result, + /// |mat, _, wtr| { + /// wtr.write_all(mat.pattern().as_usize().to_string().as_bytes()) + /// }, + /// )?; + /// assert_eq!(b"The 2 1 0.".to_vec(), result); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "std")] + pub fn try_stream_replace_all_with( + &self, + rdr: R, + wtr: W, + replace_with: F, + ) -> Result<(), std::io::Error> + where + R: std::io::Read, + W: std::io::Write, + F: FnMut(&Match, &[u8], &mut W) -> Result<(), std::io::Error>, + { + enforce_anchored_consistency(self.start_kind, Anchored::No) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; + self.aut.try_stream_replace_all_with(rdr, wtr, replace_with) + } +} + +/// Routines for querying information about the Aho-Corasick automaton. +impl AhoCorasick { + /// Returns the kind of the Aho-Corasick automaton used by this searcher. + /// + /// Knowing the Aho-Corasick kind is principally useful for diagnostic + /// purposes. In particular, if no specific kind was given to + /// [`AhoCorasickBuilder::kind`], then one is automatically chosen and + /// this routine will report which one. + /// + /// Note that the heuristics used for choosing which `AhoCorasickKind` + /// may be changed in a semver compatible release. + /// + /// # Examples + /// + /// ``` + /// use aho_corasick::{AhoCorasick, AhoCorasickKind}; + /// + /// let ac = AhoCorasick::new(&["foo", "bar", "quux", "baz"]).unwrap(); + /// // The specific Aho-Corasick kind chosen is not guaranteed! + /// assert_eq!(AhoCorasickKind::DFA, ac.kind()); + /// ``` + pub fn kind(&self) -> AhoCorasickKind { + self.kind + } + + /// Returns the type of starting search configuration supported by this + /// Aho-Corasick automaton. + /// + /// # Examples + /// + /// ``` + /// use aho_corasick::{AhoCorasick, StartKind}; + /// + /// let ac = AhoCorasick::new(&["foo", "bar", "quux", "baz"]).unwrap(); + /// assert_eq!(StartKind::Unanchored, ac.start_kind()); + /// ``` + pub fn start_kind(&self) -> StartKind { + self.start_kind + } + + /// Returns the match kind used by this automaton. + /// + /// The match kind is important because it determines what kinds of + /// matches are returned. Also, some operations (such as overlapping + /// search and stream searching) are only supported when using the + /// [`MatchKind::Standard`] match kind. + /// + /// # Examples + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind}; + /// + /// let ac = AhoCorasick::new(&["foo", "bar", "quux", "baz"]).unwrap(); + /// assert_eq!(MatchKind::Standard, ac.match_kind()); + /// ``` + pub fn match_kind(&self) -> MatchKind { + self.aut.match_kind() + } + + /// Returns the length of the shortest pattern matched by this automaton. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::AhoCorasick; + /// + /// let ac = AhoCorasick::new(&["foo", "bar", "quux", "baz"]).unwrap(); + /// assert_eq!(3, ac.min_pattern_len()); + /// ``` + /// + /// Note that an `AhoCorasick` automaton has a minimum length of `0` if + /// and only if it can match the empty string: + /// + /// ``` + /// use aho_corasick::AhoCorasick; + /// + /// let ac = AhoCorasick::new(&["foo", "", "quux", "baz"]).unwrap(); + /// assert_eq!(0, ac.min_pattern_len()); + /// ``` + pub fn min_pattern_len(&self) -> usize { + self.aut.min_pattern_len() + } + + /// Returns the length of the longest pattern matched by this automaton. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::AhoCorasick; + /// + /// let ac = AhoCorasick::new(&["foo", "bar", "quux", "baz"]).unwrap(); + /// assert_eq!(4, ac.max_pattern_len()); + /// ``` + pub fn max_pattern_len(&self) -> usize { + self.aut.max_pattern_len() + } + + /// Return the total number of patterns matched by this automaton. + /// + /// This includes patterns that may never participate in a match. For + /// example, if [`MatchKind::LeftmostFirst`] match semantics are used, and + /// the patterns `Sam` and `Samwise` were used to build the automaton (in + /// that order), then `Samwise` can never participate in a match because + /// `Sam` will always take priority. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::AhoCorasick; + /// + /// let ac = AhoCorasick::new(&["foo", "bar", "baz"]).unwrap(); + /// assert_eq!(3, ac.patterns_len()); + /// ``` + pub fn patterns_len(&self) -> usize { + self.aut.patterns_len() + } + + /// Returns the approximate total amount of heap used by this automaton, in + /// units of bytes. + /// + /// # Examples + /// + /// This example shows the difference in heap usage between a few + /// configurations: + /// + /// ``` + /// # if !cfg!(target_pointer_width = "64") { return; } + /// use aho_corasick::{AhoCorasick, AhoCorasickKind, MatchKind}; + /// + /// let ac = AhoCorasick::builder() + /// .kind(None) // default + /// .build(&["foobar", "bruce", "triskaidekaphobia", "springsteen"]) + /// .unwrap(); + /// assert_eq!(5_632, ac.memory_usage()); + /// + /// let ac = AhoCorasick::builder() + /// .kind(None) // default + /// .ascii_case_insensitive(true) + /// .build(&["foobar", "bruce", "triskaidekaphobia", "springsteen"]) + /// .unwrap(); + /// assert_eq!(11_136, ac.memory_usage()); + /// + /// let ac = AhoCorasick::builder() + /// .kind(Some(AhoCorasickKind::NoncontiguousNFA)) + /// .ascii_case_insensitive(true) + /// .build(&["foobar", "bruce", "triskaidekaphobia", "springsteen"]) + /// .unwrap(); + /// assert_eq!(10_879, ac.memory_usage()); + /// + /// let ac = AhoCorasick::builder() + /// .kind(Some(AhoCorasickKind::ContiguousNFA)) + /// .ascii_case_insensitive(true) + /// .build(&["foobar", "bruce", "triskaidekaphobia", "springsteen"]) + /// .unwrap(); + /// assert_eq!(2_584, ac.memory_usage()); + /// + /// let ac = AhoCorasick::builder() + /// .kind(Some(AhoCorasickKind::DFA)) + /// .ascii_case_insensitive(true) + /// .build(&["foobar", "bruce", "triskaidekaphobia", "springsteen"]) + /// .unwrap(); + /// // While this shows the DFA being the biggest here by a small margin, + /// // don't let the difference fool you. With such a small number of + /// // patterns, the difference is small, but a bigger number of patterns + /// // will reveal that the rate of growth of the DFA is far bigger than + /// // the NFAs above. For a large number of patterns, it is easy for the + /// // DFA to take an order of magnitude more heap space (or more!). + /// assert_eq!(11_136, ac.memory_usage()); + /// ``` + pub fn memory_usage(&self) -> usize { + self.aut.memory_usage() + } +} + +// We provide a manual debug impl so that we don't include the 'start_kind', +// principally because it's kind of weird to do so and because it screws with +// the carefully curated debug output for the underlying automaton. +impl core::fmt::Debug for AhoCorasick { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_tuple("AhoCorasick").field(&self.aut).finish() + } +} + +/// An iterator of non-overlapping matches in a particular haystack. +/// +/// This iterator yields matches according to the [`MatchKind`] used by this +/// automaton. +/// +/// This iterator is constructed via the [`AhoCorasick::find_iter`] and +/// [`AhoCorasick::try_find_iter`] methods. +/// +/// The lifetime `'a` refers to the lifetime of the `AhoCorasick` automaton. +/// +/// The lifetime `'h` refers to the lifetime of the haystack being searched. +#[derive(Debug)] +pub struct FindIter<'a, 'h>(automaton::FindIter<'a, 'h, Arc>); + +impl<'a, 'h> Iterator for FindIter<'a, 'h> { + type Item = Match; + + #[inline] + fn next(&mut self) -> Option { + self.0.next() + } +} + +/// An iterator of overlapping matches in a particular haystack. +/// +/// This iterator will report all possible matches in a particular haystack, +/// even when the matches overlap. +/// +/// This iterator is constructed via the [`AhoCorasick::find_overlapping_iter`] +/// and [`AhoCorasick::try_find_overlapping_iter`] methods. +/// +/// The lifetime `'a` refers to the lifetime of the `AhoCorasick` automaton. +/// +/// The lifetime `'h` refers to the lifetime of the haystack being searched. +#[derive(Debug)] +pub struct FindOverlappingIter<'a, 'h>( + automaton::FindOverlappingIter<'a, 'h, Arc>, +); + +impl<'a, 'h> Iterator for FindOverlappingIter<'a, 'h> { + type Item = Match; + + #[inline] + fn next(&mut self) -> Option { + self.0.next() + } +} + +/// An iterator that reports Aho-Corasick matches in a stream. +/// +/// This iterator yields elements of type `Result`, +/// where an error is reported if there was a problem reading from the +/// underlying stream. The iterator terminates only when the underlying stream +/// reaches `EOF`. +/// +/// This iterator is constructed via the [`AhoCorasick::stream_find_iter`] and +/// [`AhoCorasick::try_stream_find_iter`] methods. +/// +/// The type variable `R` refers to the `io::Read` stream that is being read +/// from. +/// +/// The lifetime `'a` refers to the lifetime of the corresponding +/// [`AhoCorasick`] searcher. +#[cfg(feature = "std")] +#[derive(Debug)] +pub struct StreamFindIter<'a, R>( + automaton::StreamFindIter<'a, Arc, R>, +); + +#[cfg(feature = "std")] +impl<'a, R: std::io::Read> Iterator for StreamFindIter<'a, R> { + type Item = Result; + + fn next(&mut self) -> Option> { + self.0.next() + } +} + +/// A builder for configuring an Aho-Corasick automaton. +/// +/// # Quick advice +/// +/// * Use [`AhoCorasickBuilder::match_kind`] to configure your searcher +/// with [`MatchKind::LeftmostFirst`] if you want to match how backtracking +/// regex engines execute searches for `pat1|pat2|..|patN`. Use +/// [`MatchKind::LeftmostLongest`] if you want to match how POSIX regex engines +/// do it. +/// * If you need an anchored search, use [`AhoCorasickBuilder::start_kind`] to +/// set the [`StartKind::Anchored`] mode since [`StartKind::Unanchored`] is the +/// default. Or just use [`StartKind::Both`] to support both types of searches. +/// * You might want to use [`AhoCorasickBuilder::kind`] to set your searcher +/// to always use a [`AhoCorasickKind::DFA`] if search speed is critical and +/// memory usage isn't a concern. Otherwise, not setting a kind will probably +/// make the right choice for you. Beware that if you use [`StartKind::Both`] +/// to build a searcher that supports both unanchored and anchored searches +/// _and_ you set [`AhoCorasickKind::DFA`], then the DFA will essentially be +/// duplicated to support both simultaneously. This results in very high memory +/// usage. +/// * For all other options, their defaults are almost certainly what you want. +#[derive(Clone, Debug, Default)] +pub struct AhoCorasickBuilder { + nfa_noncontiguous: noncontiguous::Builder, + nfa_contiguous: contiguous::Builder, + dfa: dfa::Builder, + kind: Option, + start_kind: StartKind, +} + +impl AhoCorasickBuilder { + /// Create a new builder for configuring an Aho-Corasick automaton. + /// + /// The builder provides a way to configure a number of things, including + /// ASCII case insensitivity and what kind of match semantics are used. + pub fn new() -> AhoCorasickBuilder { + AhoCorasickBuilder::default() + } + + /// Build an Aho-Corasick automaton using the configuration set on this + /// builder. + /// + /// A builder may be reused to create more automatons. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::{AhoCorasickBuilder, PatternID}; + /// + /// let patterns = &["foo", "bar", "baz"]; + /// let ac = AhoCorasickBuilder::new().build(patterns).unwrap(); + /// assert_eq!( + /// Some(PatternID::must(1)), + /// ac.find("xxx bar xxx").map(|m| m.pattern()), + /// ); + /// ``` + pub fn build(&self, patterns: I) -> Result + where + I: IntoIterator, + P: AsRef<[u8]>, + { + let nfa = self.nfa_noncontiguous.build(patterns)?; + let (aut, kind): (Arc, AhoCorasickKind) = + match self.kind { + None => { + debug!( + "asked for automatic Aho-Corasick implementation, \ + criteria: ", + nfa.patterns_len(), + nfa.max_pattern_len(), + self.start_kind, + ); + self.build_auto(nfa) + } + Some(AhoCorasickKind::NoncontiguousNFA) => { + debug!("forcefully chose noncontiguous NFA"); + (Arc::new(nfa), AhoCorasickKind::NoncontiguousNFA) + } + Some(AhoCorasickKind::ContiguousNFA) => { + debug!("forcefully chose contiguous NFA"); + let cnfa = + self.nfa_contiguous.build_from_noncontiguous(&nfa)?; + (Arc::new(cnfa), AhoCorasickKind::ContiguousNFA) + } + Some(AhoCorasickKind::DFA) => { + debug!("forcefully chose DFA"); + let dfa = self.dfa.build_from_noncontiguous(&nfa)?; + (Arc::new(dfa), AhoCorasickKind::DFA) + } + }; + Ok(AhoCorasick { aut, kind, start_kind: self.start_kind }) + } + + /// Implements the automatic selection logic for the Aho-Corasick + /// implementation to use. Since all Aho-Corasick automatons are built + /// from a non-contiguous NFA, the caller is responsible for building + /// that first. + fn build_auto( + &self, + nfa: noncontiguous::NFA, + ) -> (Arc, AhoCorasickKind) { + // We try to build a DFA if we have a very small number of patterns, + // otherwise the memory usage just gets too crazy. We also only do it + // when the start kind is unanchored or anchored, but not both, because + // both implies two full copies of the transition table. + let try_dfa = !matches!(self.start_kind, StartKind::Both) + && nfa.patterns_len() <= 100; + if try_dfa { + match self.dfa.build_from_noncontiguous(&nfa) { + Ok(dfa) => { + debug!("chose a DFA"); + return (Arc::new(dfa), AhoCorasickKind::DFA); + } + Err(_err) => { + debug!( + "failed to build DFA, trying something else: {}", + _err + ); + } + } + } + // We basically always want a contiguous NFA if the limited + // circumstances in which we use a DFA are not true. It is quite fast + // and has excellent memory usage. The only way we don't use it is if + // there are so many states that it can't fit in a contiguous NFA. + // And the only way to know that is to try to build it. Building a + // contiguous NFA is mostly just reshuffling data from a noncontiguous + // NFA, so it isn't too expensive, especially relative to building a + // noncontiguous NFA in the first place. + match self.nfa_contiguous.build_from_noncontiguous(&nfa) { + Ok(nfa) => { + debug!("chose contiguous NFA"); + return (Arc::new(nfa), AhoCorasickKind::ContiguousNFA); + } + #[allow(unused_variables)] // unused when 'logging' is disabled + Err(_err) => { + debug!( + "failed to build contiguous NFA, \ + trying something else: {}", + _err + ); + } + } + debug!("chose non-contiguous NFA"); + (Arc::new(nfa), AhoCorasickKind::NoncontiguousNFA) + } + + /// Set the desired match semantics. + /// + /// The default is [`MatchKind::Standard`], which corresponds to the match + /// semantics supported by the standard textbook description of the + /// Aho-Corasick algorithm. Namely, matches are reported as soon as they + /// are found. Moreover, this is the only way to get overlapping matches + /// or do stream searching. + /// + /// The other kinds of match semantics that are supported are + /// [`MatchKind::LeftmostFirst`] and [`MatchKind::LeftmostLongest`]. The + /// former corresponds to the match you would get if you were to try to + /// match each pattern at each position in the haystack in the same order + /// that you give to the automaton. That is, it returns the leftmost match + /// corresponding to the earliest pattern given to the automaton. The + /// latter corresponds to finding the longest possible match among all + /// leftmost matches. + /// + /// For more details on match semantics, see the [documentation for + /// `MatchKind`](MatchKind). + /// + /// Note that setting this to [`MatchKind::LeftmostFirst`] or + /// [`MatchKind::LeftmostLongest`] will cause some search routines on + /// [`AhoCorasick`] to return an error (or panic if you're using the + /// infallible API). Notably, this includes stream and overlapping + /// searches. + /// + /// # Examples + /// + /// In these examples, we demonstrate the differences between match + /// semantics for a particular set of patterns in a specific order: + /// `b`, `abc`, `abcd`. + /// + /// Standard semantics: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind}; + /// + /// let patterns = &["b", "abc", "abcd"]; + /// let haystack = "abcd"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::Standard) // default, not necessary + /// .build(patterns) + /// .unwrap(); + /// let mat = ac.find(haystack).expect("should have a match"); + /// assert_eq!("b", &haystack[mat.start()..mat.end()]); + /// ``` + /// + /// Leftmost-first semantics: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind}; + /// + /// let patterns = &["b", "abc", "abcd"]; + /// let haystack = "abcd"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// let mat = ac.find(haystack).expect("should have a match"); + /// assert_eq!("abc", &haystack[mat.start()..mat.end()]); + /// ``` + /// + /// Leftmost-longest semantics: + /// + /// ``` + /// use aho_corasick::{AhoCorasick, MatchKind}; + /// + /// let patterns = &["b", "abc", "abcd"]; + /// let haystack = "abcd"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostLongest) + /// .build(patterns) + /// .unwrap(); + /// let mat = ac.find(haystack).expect("should have a match"); + /// assert_eq!("abcd", &haystack[mat.start()..mat.end()]); + /// ``` + pub fn match_kind(&mut self, kind: MatchKind) -> &mut AhoCorasickBuilder { + self.nfa_noncontiguous.match_kind(kind); + self.nfa_contiguous.match_kind(kind); + self.dfa.match_kind(kind); + self + } + + /// Sets the starting state configuration for the automaton. + /// + /// Every Aho-Corasick automaton is capable of having two start states: one + /// that is used for unanchored searches and one that is used for anchored + /// searches. Some automatons, like the NFAs, support this with almost zero + /// additional cost. Other automatons, like the DFA, require two copies of + /// the underlying transition table to support both simultaneously. + /// + /// Because there may be an added non-trivial cost to supporting both, it + /// is possible to configure which starting state configuration is needed. + /// + /// Indeed, since anchored searches tend to be somewhat more rare, + /// _only_ unanchored searches are supported by default. Thus, + /// [`StartKind::Unanchored`] is the default. + /// + /// Note that when this is set to [`StartKind::Unanchored`], then + /// running an anchored search will result in an error (or a panic + /// if using the infallible APIs). Similarly, when this is set to + /// [`StartKind::Anchored`], then running an unanchored search will + /// result in an error (or a panic if using the infallible APIs). When + /// [`StartKind::Both`] is used, then both unanchored and anchored searches + /// are always supported. + /// + /// Also note that even if an `AhoCorasick` searcher is using an NFA + /// internally (which always supports both unanchored and anchored + /// searches), an error will still be reported for a search that isn't + /// supported by the configuration set via this method. This means, + /// for example, that an error is never dependent on which internal + /// implementation of Aho-Corasick is used. + /// + /// # Example: anchored search + /// + /// This shows how to build a searcher that only supports anchored + /// searches: + /// + /// ``` + /// use aho_corasick::{ + /// AhoCorasick, Anchored, Input, Match, MatchKind, StartKind, + /// }; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .start_kind(StartKind::Anchored) + /// .build(&["b", "abc", "abcd"]) + /// .unwrap(); + /// + /// // An unanchored search is not supported! An error here is guaranteed + /// // given the configuration above regardless of which kind of + /// // Aho-Corasick implementation ends up being used internally. + /// let input = Input::new("foo abcd").anchored(Anchored::No); + /// assert!(ac.try_find(input).is_err()); + /// + /// let input = Input::new("foo abcd").anchored(Anchored::Yes); + /// assert_eq!(None, ac.try_find(input)?); + /// + /// let input = Input::new("abcd").anchored(Anchored::Yes); + /// assert_eq!(Some(Match::must(1, 0..3)), ac.try_find(input)?); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: unanchored and anchored searches + /// + /// This shows how to build a searcher that supports both unanchored and + /// anchored searches: + /// + /// ``` + /// use aho_corasick::{ + /// AhoCorasick, Anchored, Input, Match, MatchKind, StartKind, + /// }; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .start_kind(StartKind::Both) + /// .build(&["b", "abc", "abcd"]) + /// .unwrap(); + /// + /// let input = Input::new("foo abcd").anchored(Anchored::No); + /// assert_eq!(Some(Match::must(1, 4..7)), ac.try_find(input)?); + /// + /// let input = Input::new("foo abcd").anchored(Anchored::Yes); + /// assert_eq!(None, ac.try_find(input)?); + /// + /// let input = Input::new("abcd").anchored(Anchored::Yes); + /// assert_eq!(Some(Match::must(1, 0..3)), ac.try_find(input)?); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn start_kind(&mut self, kind: StartKind) -> &mut AhoCorasickBuilder { + self.dfa.start_kind(kind); + self.start_kind = kind; + self + } + + /// Enable ASCII-aware case insensitive matching. + /// + /// When this option is enabled, searching will be performed without + /// respect to case for ASCII letters (`a-z` and `A-Z`) only. + /// + /// Enabling this option does not change the search algorithm, but it may + /// increase the size of the automaton. + /// + /// **NOTE:** It is unlikely that support for Unicode case folding will + /// be added in the future. The ASCII case works via a simple hack to the + /// underlying automaton, but full Unicode handling requires a fair bit of + /// sophistication. If you do need Unicode handling, you might consider + /// using the [`regex` crate](https://docs.rs/regex) or the lower level + /// [`regex-automata` crate](https://docs.rs/regex-automata). + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::AhoCorasick; + /// + /// let patterns = &["FOO", "bAr", "BaZ"]; + /// let haystack = "foo bar baz"; + /// + /// let ac = AhoCorasick::builder() + /// .ascii_case_insensitive(true) + /// .build(patterns) + /// .unwrap(); + /// assert_eq!(3, ac.find_iter(haystack).count()); + /// ``` + pub fn ascii_case_insensitive( + &mut self, + yes: bool, + ) -> &mut AhoCorasickBuilder { + self.nfa_noncontiguous.ascii_case_insensitive(yes); + self.nfa_contiguous.ascii_case_insensitive(yes); + self.dfa.ascii_case_insensitive(yes); + self + } + + /// Choose the type of underlying automaton to use. + /// + /// Currently, there are four choices: + /// + /// * [`AhoCorasickKind::NoncontiguousNFA`] instructs the searcher to + /// use a [`noncontiguous::NFA`]. A noncontiguous NFA is the fastest to + /// be built, has moderate memory usage and is typically the slowest to + /// execute a search. + /// * [`AhoCorasickKind::ContiguousNFA`] instructs the searcher to use a + /// [`contiguous::NFA`]. A contiguous NFA is a little slower to build than + /// a noncontiguous NFA, has excellent memory usage and is typically a + /// little slower than a DFA for a search. + /// * [`AhoCorasickKind::DFA`] instructs the searcher to use a + /// [`dfa::DFA`]. A DFA is very slow to build, uses exorbitant amounts of + /// memory, but will typically execute searches the fastest. + /// * `None` (the default) instructs the searcher to choose the "best" + /// Aho-Corasick implementation. This choice is typically based primarily + /// on the number of patterns. + /// + /// Setting this configuration does not change the time complexity for + /// constructing the Aho-Corasick automaton (which is `O(p)` where `p` + /// is the total number of patterns being compiled). Setting this to + /// [`AhoCorasickKind::DFA`] does however reduce the time complexity of + /// non-overlapping searches from `O(n + p)` to `O(n)`, where `n` is the + /// length of the haystack. + /// + /// In general, you should probably stick to the default unless you have + /// some kind of reason to use a specific Aho-Corasick implementation. For + /// example, you might choose `AhoCorasickKind::DFA` if you don't care + /// about memory usage and want the fastest possible search times. + /// + /// Setting this guarantees that the searcher returned uses the chosen + /// implementation. If that implementation could not be constructed, then + /// an error will be returned. In contrast, when `None` is used, it is + /// possible for it to attempt to construct, for example, a contiguous + /// NFA and have it fail. In which case, it will fall back to using a + /// noncontiguous NFA. + /// + /// If `None` is given, then one may use [`AhoCorasick::kind`] to determine + /// which Aho-Corasick implementation was chosen. + /// + /// Note that the heuristics used for choosing which `AhoCorasickKind` + /// may be changed in a semver compatible release. + pub fn kind( + &mut self, + kind: Option, + ) -> &mut AhoCorasickBuilder { + self.kind = kind; + self + } + + /// Enable heuristic prefilter optimizations. + /// + /// When enabled, searching will attempt to quickly skip to match + /// candidates using specialized literal search routines. A prefilter + /// cannot always be used, and is generally treated as a heuristic. It + /// can be useful to disable this if the prefilter is observed to be + /// sub-optimal for a particular workload. + /// + /// Currently, prefilters are typically only active when building searchers + /// with a small (less than 100) number of patterns. + /// + /// This is enabled by default. + pub fn prefilter(&mut self, yes: bool) -> &mut AhoCorasickBuilder { + self.nfa_noncontiguous.prefilter(yes); + self.nfa_contiguous.prefilter(yes); + self.dfa.prefilter(yes); + self + } + + /// Set the limit on how many states use a dense representation for their + /// transitions. Other states will generally use a sparse representation. + /// + /// A dense representation uses more memory but is generally faster, since + /// the next transition in a dense representation can be computed in a + /// constant number of instructions. A sparse representation uses less + /// memory but is generally slower, since the next transition in a sparse + /// representation requires executing a variable number of instructions. + /// + /// This setting is only used when an Aho-Corasick implementation is used + /// that supports the dense versus sparse representation trade off. Not all + /// do. + /// + /// This limit is expressed in terms of the depth of a state, i.e., the + /// number of transitions from the starting state of the automaton. The + /// idea is that most of the time searching will be spent near the starting + /// state of the automaton, so states near the start state should use a + /// dense representation. States further away from the start state would + /// then use a sparse representation. + /// + /// By default, this is set to a low but non-zero number. Setting this to + /// `0` is almost never what you want, since it is likely to make searches + /// very slow due to the start state itself being forced to use a sparse + /// representation. However, it is unlikely that increasing this number + /// will help things much, since the most active states have a small depth. + /// More to the point, the memory usage increases superlinearly as this + /// number increases. + pub fn dense_depth(&mut self, depth: usize) -> &mut AhoCorasickBuilder { + self.nfa_noncontiguous.dense_depth(depth); + self.nfa_contiguous.dense_depth(depth); + self + } + + /// A debug settting for whether to attempt to shrink the size of the + /// automaton's alphabet or not. + /// + /// This option is enabled by default and should never be disabled unless + /// one is debugging the underlying automaton. + /// + /// When enabled, some (but not all) Aho-Corasick automatons will use a map + /// from all possible bytes to their corresponding equivalence class. Each + /// equivalence class represents a set of bytes that does not discriminate + /// between a match and a non-match in the automaton. + /// + /// The advantage of this map is that the size of the transition table can + /// be reduced drastically from `#states * 256 * sizeof(u32)` to + /// `#states * k * sizeof(u32)` where `k` is the number of equivalence + /// classes (rounded up to the nearest power of 2). As a result, total + /// space usage can decrease substantially. Moreover, since a smaller + /// alphabet is used, automaton compilation becomes faster as well. + /// + /// **WARNING:** This is only useful for debugging automatons. Disabling + /// this does not yield any speed advantages. Namely, even when this is + /// disabled, a byte class map is still used while searching. The only + /// difference is that every byte will be forced into its own distinct + /// equivalence class. This is useful for debugging the actual generated + /// transitions because it lets one see the transitions defined on actual + /// bytes instead of the equivalence classes. + pub fn byte_classes(&mut self, yes: bool) -> &mut AhoCorasickBuilder { + self.nfa_contiguous.byte_classes(yes); + self.dfa.byte_classes(yes); + self + } +} + +/// The type of Aho-Corasick implementation to use in an [`AhoCorasick`] +/// searcher. +/// +/// This is principally used as an input to the +/// [`AhoCorasickBuilder::start_kind`] method. Its documentation goes into more +/// detail about each choice. +#[non_exhaustive] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum AhoCorasickKind { + /// Use a noncontiguous NFA. + NoncontiguousNFA, + /// Use a contiguous NFA. + ContiguousNFA, + /// Use a DFA. Warning: DFAs typically use a large amount of memory. + DFA, +} + +/// A trait that effectively gives us practical dynamic dispatch over anything +/// that impls `Automaton`, but without needing to add a bunch of bounds to +/// the core `Automaton` trait. Basically, we provide all of the marker traits +/// that our automatons have, in addition to `Debug` impls and requiring that +/// there is no borrowed data. Without these, the main `AhoCorasick` type would +/// not be able to meaningfully impl `Debug` or the marker traits without also +/// requiring that all impls of `Automaton` do so, which would be not great. +trait AcAutomaton: + Automaton + Debug + Send + Sync + UnwindSafe + RefUnwindSafe + 'static +{ +} + +impl AcAutomaton for A where + A: Automaton + Debug + Send + Sync + UnwindSafe + RefUnwindSafe + 'static +{ +} + +impl crate::automaton::private::Sealed for Arc {} + +// I'm not sure why this trait impl shows up in the docs, as the AcAutomaton +// trait is not exported. So we forcefully hide it. +// +// SAFETY: This just defers to the underlying 'AcAutomaton' and thus inherits +// its safety properties. +#[doc(hidden)] +unsafe impl Automaton for Arc { + #[inline(always)] + fn start_state(&self, anchored: Anchored) -> Result { + (**self).start_state(anchored) + } + + #[inline(always)] + fn next_state( + &self, + anchored: Anchored, + sid: StateID, + byte: u8, + ) -> StateID { + (**self).next_state(anchored, sid, byte) + } + + #[inline(always)] + fn is_special(&self, sid: StateID) -> bool { + (**self).is_special(sid) + } + + #[inline(always)] + fn is_dead(&self, sid: StateID) -> bool { + (**self).is_dead(sid) + } + + #[inline(always)] + fn is_match(&self, sid: StateID) -> bool { + (**self).is_match(sid) + } + + #[inline(always)] + fn is_start(&self, sid: StateID) -> bool { + (**self).is_start(sid) + } + + #[inline(always)] + fn match_kind(&self) -> MatchKind { + (**self).match_kind() + } + + #[inline(always)] + fn match_len(&self, sid: StateID) -> usize { + (**self).match_len(sid) + } + + #[inline(always)] + fn match_pattern(&self, sid: StateID, index: usize) -> PatternID { + (**self).match_pattern(sid, index) + } + + #[inline(always)] + fn patterns_len(&self) -> usize { + (**self).patterns_len() + } + + #[inline(always)] + fn pattern_len(&self, pid: PatternID) -> usize { + (**self).pattern_len(pid) + } + + #[inline(always)] + fn min_pattern_len(&self) -> usize { + (**self).min_pattern_len() + } + + #[inline(always)] + fn max_pattern_len(&self) -> usize { + (**self).max_pattern_len() + } + + #[inline(always)] + fn memory_usage(&self) -> usize { + (**self).memory_usage() + } + + #[inline(always)] + fn prefilter(&self) -> Option<&Prefilter> { + (**self).prefilter() + } + + // Even though 'try_find' and 'try_find_overlapping' each have their + // own default impls, we explicitly define them here to fix a perf bug. + // Without these explicit definitions, the default impl will wind up using + // dynamic dispatch for all 'Automaton' method calls, including things like + // 'next_state' that absolutely must get inlined or else perf is trashed. + // Defining them explicitly here like this still requires dynamic dispatch + // to call 'try_find' itself, but all uses of 'Automaton' within 'try_find' + // are monomorphized. + // + // We don't need to explicitly impl any other methods, I think, because + // they are all implemented themselves in terms of 'try_find' and + // 'try_find_overlapping'. We still might wind up with an extra virtual + // call here or there, but that's okay since it's outside of any perf + // critical areas. + + #[inline(always)] + fn try_find( + &self, + input: &Input<'_>, + ) -> Result, MatchError> { + (**self).try_find(input) + } + + #[inline(always)] + fn try_find_overlapping( + &self, + input: &Input<'_>, + state: &mut OverlappingState, + ) -> Result<(), MatchError> { + (**self).try_find_overlapping(input, state) + } +} + +/// Returns an error if the start state configuration does not support the +/// desired search configuration. See the internal 'AhoCorasick::start_kind' +/// field docs for more details. +fn enforce_anchored_consistency( + have: StartKind, + want: Anchored, +) -> Result<(), MatchError> { + match have { + StartKind::Both => Ok(()), + StartKind::Unanchored if !want.is_anchored() => Ok(()), + StartKind::Unanchored => Err(MatchError::invalid_input_anchored()), + StartKind::Anchored if want.is_anchored() => Ok(()), + StartKind::Anchored => Err(MatchError::invalid_input_unanchored()), + } +} diff --git a/vendor/aho-corasick/src/automaton.rs b/vendor/aho-corasick/src/automaton.rs new file mode 100644 index 00000000000000..c41dc6e1db305e --- /dev/null +++ b/vendor/aho-corasick/src/automaton.rs @@ -0,0 +1,1608 @@ +/*! +Provides [`Automaton`] trait for abstracting over Aho-Corasick automata. + +The `Automaton` trait provides a way to write generic code over any +Aho-Corasick automaton. It also provides access to lower level APIs that +permit walking the state transitions of an Aho-Corasick automaton manually. +*/ + +use alloc::{string::String, vec::Vec}; + +use crate::util::{ + error::MatchError, + primitives::PatternID, + search::{Anchored, Input, Match, MatchKind, Span}, +}; + +pub use crate::util::{ + prefilter::{Candidate, Prefilter}, + primitives::{StateID, StateIDError}, +}; + +/// We seal the `Automaton` trait for now. It's a big trait, and it's +/// conceivable that I might want to add new required methods, and sealing the +/// trait permits doing that in a backwards compatible fashion. On other the +/// hand, if you have a solid use case for implementing the trait yourself, +/// please file an issue and we can discuss it. This was *mostly* done as a +/// conservative step. +pub(crate) mod private { + pub trait Sealed {} +} +impl private::Sealed for crate::nfa::noncontiguous::NFA {} +impl private::Sealed for crate::nfa::contiguous::NFA {} +impl private::Sealed for crate::dfa::DFA {} + +impl<'a, T: private::Sealed + ?Sized> private::Sealed for &'a T {} + +/// A trait that abstracts over Aho-Corasick automata. +/// +/// This trait primarily exists for niche use cases such as: +/// +/// * Using an NFA or DFA directly, bypassing the top-level +/// [`AhoCorasick`](crate::AhoCorasick) searcher. Currently, these include +/// [`noncontiguous::NFA`](crate::nfa::noncontiguous::NFA), +/// [`contiguous::NFA`](crate::nfa::contiguous::NFA) and +/// [`dfa::DFA`](crate::dfa::DFA). +/// * Implementing your own custom search routine by walking the automaton +/// yourself. This might be useful for implementing search on non-contiguous +/// strings or streams. +/// +/// For most use cases, it is not expected that users will need +/// to use or even know about this trait. Indeed, the top level +/// [`AhoCorasick`](crate::AhoCorasick) searcher does not expose any details +/// about this trait, nor does it implement it itself. +/// +/// Note that this trait defines a number of default methods, such as +/// [`Automaton::try_find`] and [`Automaton::try_find_iter`], which implement +/// higher level search routines in terms of the lower level automata API. +/// +/// # Sealed +/// +/// Currently, this trait is sealed. That means users of this crate can write +/// generic routines over this trait but cannot implement it themselves. This +/// restriction may be lifted in the future, but sealing the trait permits +/// adding new required methods in a backwards compatible fashion. +/// +/// # Special states +/// +/// This trait encodes a notion of "special" states in an automaton. Namely, +/// a state is treated as special if it is a dead, match or start state: +/// +/// * A dead state is a state that cannot be left once entered. All transitions +/// on a dead state lead back to itself. The dead state is meant to be treated +/// as a sentinel indicating that the search should stop and return a match if +/// one has been found, and nothing otherwise. +/// * A match state is a state that indicates one or more patterns have +/// matched. Depending on the [`MatchKind`] of the automaton, a search may +/// stop once a match is seen, or it may continue looking for matches until +/// it enters a dead state or sees the end of the haystack. +/// * A start state is a state that a search begins in. It is useful to know +/// when a search enters a start state because it may mean that a prefilter can +/// be used to skip ahead and quickly look for candidate matches. Unlike dead +/// and match states, it is never necessary to explicitly handle start states +/// for correctness. Indeed, in this crate, implementations of `Automaton` +/// will only treat start states as "special" when a prefilter is enabled and +/// active. Otherwise, treating it as special has no purpose and winds up +/// slowing down the overall search because it results in ping-ponging between +/// the main state transition and the "special" state logic. +/// +/// Since checking whether a state is special by doing three different +/// checks would be too expensive inside a fast search loop, the +/// [`Automaton::is_special`] method is provided for quickly checking whether +/// the state is special. The `Automaton::is_dead`, `Automaton::is_match` and +/// `Automaton::is_start` predicates can then be used to determine which kind +/// of special state it is. +/// +/// # Panics +/// +/// Most of the APIs on this trait should panic or give incorrect results +/// if invalid inputs are given to it. For example, `Automaton::next_state` +/// has unspecified behavior if the state ID given to it is not a valid +/// state ID for the underlying automaton. Valid state IDs can only be +/// retrieved in one of two ways: calling `Automaton::start_state` or calling +/// `Automaton::next_state` with a valid state ID. +/// +/// # Safety +/// +/// This trait is not safe to implement so that code may rely on the +/// correctness of implementations of this trait to avoid undefined behavior. +/// The primary correctness guarantees are: +/// +/// * `Automaton::start_state` always returns a valid state ID or an error or +/// panics. +/// * `Automaton::next_state`, when given a valid state ID, always returns +/// a valid state ID for all values of `anchored` and `byte`, or otherwise +/// panics. +/// +/// In general, the rest of the methods on `Automaton` need to uphold their +/// contracts as well. For example, `Automaton::is_dead` should only returns +/// true if the given state ID is actually a dead state. +/// +/// Note that currently this crate does not rely on the safety property defined +/// here to avoid undefined behavior. Instead, this was done to make it +/// _possible_ to do in the future. +/// +/// # Example +/// +/// This example shows how one might implement a basic but correct search +/// routine. We keep things simple by not using prefilters or worrying about +/// anchored searches, but do make sure our search is correct for all possible +/// [`MatchKind`] semantics. (The comments in the code below note the parts +/// that are needed to support certain `MatchKind` semantics.) +/// +/// ``` +/// use aho_corasick::{ +/// automaton::Automaton, +/// nfa::noncontiguous::NFA, +/// Anchored, Match, MatchError, MatchKind, +/// }; +/// +/// // Run an unanchored search for 'aut' in 'haystack'. Return the first match +/// // seen according to the automaton's match semantics. This returns an error +/// // if the given automaton does not support unanchored searches. +/// fn find( +/// aut: A, +/// haystack: &[u8], +/// ) -> Result, MatchError> { +/// let mut sid = aut.start_state(Anchored::No)?; +/// let mut at = 0; +/// let mut mat = None; +/// let get_match = |sid, at| { +/// let pid = aut.match_pattern(sid, 0); +/// let len = aut.pattern_len(pid); +/// Match::new(pid, (at - len)..at) +/// }; +/// // Start states can be match states! +/// if aut.is_match(sid) { +/// mat = Some(get_match(sid, at)); +/// // Standard semantics require matches to be reported as soon as +/// // they're seen. Otherwise, we continue until we see a dead state +/// // or the end of the haystack. +/// if matches!(aut.match_kind(), MatchKind::Standard) { +/// return Ok(mat); +/// } +/// } +/// while at < haystack.len() { +/// sid = aut.next_state(Anchored::No, sid, haystack[at]); +/// if aut.is_special(sid) { +/// if aut.is_dead(sid) { +/// return Ok(mat); +/// } else if aut.is_match(sid) { +/// mat = Some(get_match(sid, at + 1)); +/// // As above, standard semantics require that we return +/// // immediately once a match is found. +/// if matches!(aut.match_kind(), MatchKind::Standard) { +/// return Ok(mat); +/// } +/// } +/// } +/// at += 1; +/// } +/// Ok(mat) +/// } +/// +/// // Show that it works for standard searches. +/// let nfa = NFA::new(&["samwise", "sam"]).unwrap(); +/// assert_eq!(Some(Match::must(1, 0..3)), find(&nfa, b"samwise")?); +/// +/// // But also works when using leftmost-first. Notice how the match result +/// // has changed! +/// let nfa = NFA::builder() +/// .match_kind(MatchKind::LeftmostFirst) +/// .build(&["samwise", "sam"]) +/// .unwrap(); +/// assert_eq!(Some(Match::must(0, 0..7)), find(&nfa, b"samwise")?); +/// +/// # Ok::<(), Box>(()) +/// ``` +pub unsafe trait Automaton: private::Sealed { + /// Returns the starting state for the given anchor mode. + /// + /// Upon success, the state ID returned is guaranteed to be valid for + /// this automaton. + /// + /// # Errors + /// + /// This returns an error when the given search configuration is not + /// supported by the underlying automaton. For example, if the underlying + /// automaton only supports unanchored searches but the given configuration + /// was set to an anchored search, then this must return an error. + fn start_state(&self, anchored: Anchored) -> Result; + + /// Performs a state transition from `sid` for `byte` and returns the next + /// state. + /// + /// `anchored` should be [`Anchored::Yes`] when executing an anchored + /// search and [`Anchored::No`] otherwise. For some implementations of + /// `Automaton`, it is required to know whether the search is anchored + /// or not in order to avoid following failure transitions. Other + /// implementations may ignore `anchored` altogether and depend on + /// `Automaton::start_state` returning a state that walks a different path + /// through the automaton depending on whether the search is anchored or + /// not. + /// + /// # Panics + /// + /// This routine may panic or return incorrect results when the given state + /// ID is invalid. A state ID is valid if and only if: + /// + /// 1. It came from a call to `Automaton::start_state`, or + /// 2. It came from a previous call to `Automaton::next_state` with a + /// valid state ID. + /// + /// Implementations must treat all possible values of `byte` as valid. + /// + /// Implementations may panic on unsupported values of `anchored`, but are + /// not required to do so. + fn next_state( + &self, + anchored: Anchored, + sid: StateID, + byte: u8, + ) -> StateID; + + /// Returns true if the given ID represents a "special" state. A special + /// state is a dead, match or start state. + /// + /// Note that implementations may choose to return false when the given ID + /// corresponds to a start state. Namely, it always correct to treat start + /// states as non-special. Implementations must return true for states that + /// are dead or contain matches. + /// + /// This has unspecified behavior when given an invalid state ID. + fn is_special(&self, sid: StateID) -> bool; + + /// Returns true if the given ID represents a dead state. + /// + /// A dead state is a type of "sink" in a finite state machine. It + /// corresponds to a state whose transitions all loop back to itself. That + /// is, once entered, it can never be left. In practice, it serves as a + /// sentinel indicating that the search should terminate. + /// + /// This has unspecified behavior when given an invalid state ID. + fn is_dead(&self, sid: StateID) -> bool; + + /// Returns true if the given ID represents a match state. + /// + /// A match state is always associated with one or more pattern IDs that + /// matched at the position in the haystack when the match state was + /// entered. When a match state is entered, the match semantics dictate + /// whether it should be returned immediately (for `MatchKind::Standard`) + /// or if the search should continue (for `MatchKind::LeftmostFirst` and + /// `MatchKind::LeftmostLongest`) until a dead state is seen or the end of + /// the haystack has been reached. + /// + /// This has unspecified behavior when given an invalid state ID. + fn is_match(&self, sid: StateID) -> bool; + + /// Returns true if the given ID represents a start state. + /// + /// While it is never incorrect to ignore start states during a search + /// (except for the start of the search of course), knowing whether one has + /// entered a start state can be useful for certain classes of performance + /// optimizations. For example, if one is in a start state, it may be legal + /// to try to skip ahead and look for match candidates more quickly than + /// would otherwise be accomplished by walking the automaton. + /// + /// Implementations of `Automaton` in this crate "unspecialize" start + /// states when a prefilter is not active or enabled. In this case, it + /// is possible for `Automaton::is_special(sid)` to return false while + /// `Automaton::is_start(sid)` returns true. + /// + /// This has unspecified behavior when given an invalid state ID. + fn is_start(&self, sid: StateID) -> bool; + + /// Returns the match semantics that this automaton was built with. + fn match_kind(&self) -> MatchKind; + + /// Returns the total number of matches for the given state ID. + /// + /// This has unspecified behavior if the given ID does not refer to a match + /// state. + fn match_len(&self, sid: StateID) -> usize; + + /// Returns the pattern ID for the match state given by `sid` at the + /// `index` given. + /// + /// Typically, `index` is only ever greater than `0` when implementing an + /// overlapping search. Otherwise, it's likely that your search only cares + /// about reporting the first pattern ID in a match state. + /// + /// This has unspecified behavior if the given ID does not refer to a match + /// state, or if the index is greater than or equal to the total number of + /// matches in this match state. + fn match_pattern(&self, sid: StateID, index: usize) -> PatternID; + + /// Returns the total number of patterns compiled into this automaton. + fn patterns_len(&self) -> usize; + + /// Returns the length of the pattern for the given ID. + /// + /// This has unspecified behavior when given an invalid pattern + /// ID. A pattern ID is valid if and only if it is less than + /// `Automaton::patterns_len`. + fn pattern_len(&self, pid: PatternID) -> usize; + + /// Returns the length, in bytes, of the shortest pattern in this + /// automaton. + fn min_pattern_len(&self) -> usize; + + /// Returns the length, in bytes, of the longest pattern in this automaton. + fn max_pattern_len(&self) -> usize; + + /// Returns the heap memory usage, in bytes, used by this automaton. + fn memory_usage(&self) -> usize; + + /// Returns a prefilter, if available, that can be used to accelerate + /// searches for this automaton. + /// + /// The typical way this is used is when the start state is entered during + /// a search. When that happens, one can use a prefilter to skip ahead and + /// look for candidate matches without having to walk the automaton on the + /// bytes between candidates. + /// + /// Typically a prefilter is only available when there are a small (<100) + /// number of patterns built into the automaton. + fn prefilter(&self) -> Option<&Prefilter>; + + /// Executes a non-overlapping search with this automaton using the given + /// configuration. + /// + /// See + /// [`AhoCorasick::try_find`](crate::AhoCorasick::try_find) + /// for more documentation and examples. + fn try_find( + &self, + input: &Input<'_>, + ) -> Result, MatchError> { + try_find_fwd(&self, input) + } + + /// Executes a overlapping search with this automaton using the given + /// configuration. + /// + /// See + /// [`AhoCorasick::try_find_overlapping`](crate::AhoCorasick::try_find_overlapping) + /// for more documentation and examples. + fn try_find_overlapping( + &self, + input: &Input<'_>, + state: &mut OverlappingState, + ) -> Result<(), MatchError> { + try_find_overlapping_fwd(&self, input, state) + } + + /// Returns an iterator of non-overlapping matches with this automaton + /// using the given configuration. + /// + /// See + /// [`AhoCorasick::try_find_iter`](crate::AhoCorasick::try_find_iter) + /// for more documentation and examples. + fn try_find_iter<'a, 'h>( + &'a self, + input: Input<'h>, + ) -> Result, MatchError> + where + Self: Sized, + { + FindIter::new(self, input) + } + + /// Returns an iterator of overlapping matches with this automaton + /// using the given configuration. + /// + /// See + /// [`AhoCorasick::try_find_overlapping_iter`](crate::AhoCorasick::try_find_overlapping_iter) + /// for more documentation and examples. + fn try_find_overlapping_iter<'a, 'h>( + &'a self, + input: Input<'h>, + ) -> Result, MatchError> + where + Self: Sized, + { + if !self.match_kind().is_standard() { + return Err(MatchError::unsupported_overlapping( + self.match_kind(), + )); + } + // We might consider lifting this restriction. The reason why I added + // it was to ban the combination of "anchored search" and "overlapping + // iteration." The match semantics aren't totally clear in that case. + // Should we allow *any* matches that are adjacent to *any* previous + // match? Or only following the most recent one? Or only matches + // that start at the beginning of the search? We might also elect to + // just keep this restriction in place, as callers should be able to + // implement it themselves if they want to. + if input.get_anchored().is_anchored() { + return Err(MatchError::invalid_input_anchored()); + } + let _ = self.start_state(input.get_anchored())?; + let state = OverlappingState::start(); + Ok(FindOverlappingIter { aut: self, input, state }) + } + + /// Replaces all non-overlapping matches in `haystack` with + /// strings from `replace_with` depending on the pattern that + /// matched. The `replace_with` slice must have length equal to + /// `Automaton::patterns_len`. + /// + /// See + /// [`AhoCorasick::try_replace_all`](crate::AhoCorasick::try_replace_all) + /// for more documentation and examples. + fn try_replace_all( + &self, + haystack: &str, + replace_with: &[B], + ) -> Result + where + Self: Sized, + B: AsRef, + { + assert_eq!( + replace_with.len(), + self.patterns_len(), + "replace_all requires a replacement for every pattern \ + in the automaton" + ); + let mut dst = String::with_capacity(haystack.len()); + self.try_replace_all_with(haystack, &mut dst, |mat, _, dst| { + dst.push_str(replace_with[mat.pattern()].as_ref()); + true + })?; + Ok(dst) + } + + /// Replaces all non-overlapping matches in `haystack` with + /// strings from `replace_with` depending on the pattern that + /// matched. The `replace_with` slice must have length equal to + /// `Automaton::patterns_len`. + /// + /// See + /// [`AhoCorasick::try_replace_all_bytes`](crate::AhoCorasick::try_replace_all_bytes) + /// for more documentation and examples. + fn try_replace_all_bytes( + &self, + haystack: &[u8], + replace_with: &[B], + ) -> Result, MatchError> + where + Self: Sized, + B: AsRef<[u8]>, + { + assert_eq!( + replace_with.len(), + self.patterns_len(), + "replace_all requires a replacement for every pattern \ + in the automaton" + ); + let mut dst = Vec::with_capacity(haystack.len()); + self.try_replace_all_with_bytes(haystack, &mut dst, |mat, _, dst| { + dst.extend(replace_with[mat.pattern()].as_ref()); + true + })?; + Ok(dst) + } + + /// Replaces all non-overlapping matches in `haystack` by calling the + /// `replace_with` closure given. + /// + /// See + /// [`AhoCorasick::try_replace_all_with`](crate::AhoCorasick::try_replace_all_with) + /// for more documentation and examples. + fn try_replace_all_with( + &self, + haystack: &str, + dst: &mut String, + mut replace_with: F, + ) -> Result<(), MatchError> + where + Self: Sized, + F: FnMut(&Match, &str, &mut String) -> bool, + { + let mut last_match = 0; + for m in self.try_find_iter(Input::new(haystack))? { + // Since there are no restrictions on what kinds of patterns are + // in an Aho-Corasick automaton, we might get matches that split + // a codepoint, or even matches of a partial codepoint. When that + // happens, we just skip the match. + if !haystack.is_char_boundary(m.start()) + || !haystack.is_char_boundary(m.end()) + { + continue; + } + dst.push_str(&haystack[last_match..m.start()]); + last_match = m.end(); + if !replace_with(&m, &haystack[m.start()..m.end()], dst) { + break; + }; + } + dst.push_str(&haystack[last_match..]); + Ok(()) + } + + /// Replaces all non-overlapping matches in `haystack` by calling the + /// `replace_with` closure given. + /// + /// See + /// [`AhoCorasick::try_replace_all_with_bytes`](crate::AhoCorasick::try_replace_all_with_bytes) + /// for more documentation and examples. + fn try_replace_all_with_bytes( + &self, + haystack: &[u8], + dst: &mut Vec, + mut replace_with: F, + ) -> Result<(), MatchError> + where + Self: Sized, + F: FnMut(&Match, &[u8], &mut Vec) -> bool, + { + let mut last_match = 0; + for m in self.try_find_iter(Input::new(haystack))? { + dst.extend(&haystack[last_match..m.start()]); + last_match = m.end(); + if !replace_with(&m, &haystack[m.start()..m.end()], dst) { + break; + }; + } + dst.extend(&haystack[last_match..]); + Ok(()) + } + + /// Returns an iterator of non-overlapping matches with this automaton + /// from the stream given. + /// + /// See + /// [`AhoCorasick::try_stream_find_iter`](crate::AhoCorasick::try_stream_find_iter) + /// for more documentation and examples. + #[cfg(feature = "std")] + fn try_stream_find_iter<'a, R: std::io::Read>( + &'a self, + rdr: R, + ) -> Result, MatchError> + where + Self: Sized, + { + Ok(StreamFindIter { it: StreamChunkIter::new(self, rdr)? }) + } + + /// Replaces all non-overlapping matches in `rdr` with strings from + /// `replace_with` depending on the pattern that matched, and writes the + /// result to `wtr`. The `replace_with` slice must have length equal to + /// `Automaton::patterns_len`. + /// + /// See + /// [`AhoCorasick::try_stream_replace_all`](crate::AhoCorasick::try_stream_replace_all) + /// for more documentation and examples. + #[cfg(feature = "std")] + fn try_stream_replace_all( + &self, + rdr: R, + wtr: W, + replace_with: &[B], + ) -> std::io::Result<()> + where + Self: Sized, + R: std::io::Read, + W: std::io::Write, + B: AsRef<[u8]>, + { + assert_eq!( + replace_with.len(), + self.patterns_len(), + "streaming replace_all requires a replacement for every pattern \ + in the automaton", + ); + self.try_stream_replace_all_with(rdr, wtr, |mat, _, wtr| { + wtr.write_all(replace_with[mat.pattern()].as_ref()) + }) + } + + /// Replaces all non-overlapping matches in `rdr` by calling the + /// `replace_with` closure given and writing the result to `wtr`. + /// + /// See + /// [`AhoCorasick::try_stream_replace_all_with`](crate::AhoCorasick::try_stream_replace_all_with) + /// for more documentation and examples. + #[cfg(feature = "std")] + fn try_stream_replace_all_with( + &self, + rdr: R, + mut wtr: W, + mut replace_with: F, + ) -> std::io::Result<()> + where + Self: Sized, + R: std::io::Read, + W: std::io::Write, + F: FnMut(&Match, &[u8], &mut W) -> std::io::Result<()>, + { + let mut it = StreamChunkIter::new(self, rdr).map_err(|e| { + let kind = std::io::ErrorKind::Other; + std::io::Error::new(kind, e) + })?; + while let Some(result) = it.next() { + let chunk = result?; + match chunk { + StreamChunk::NonMatch { bytes, .. } => { + wtr.write_all(bytes)?; + } + StreamChunk::Match { bytes, mat } => { + replace_with(&mat, bytes, &mut wtr)?; + } + } + } + Ok(()) + } +} + +// SAFETY: This just defers to the underlying 'AcAutomaton' and thus inherits +// its safety properties. +unsafe impl<'a, A: Automaton + ?Sized> Automaton for &'a A { + #[inline(always)] + fn start_state(&self, anchored: Anchored) -> Result { + (**self).start_state(anchored) + } + + #[inline(always)] + fn next_state( + &self, + anchored: Anchored, + sid: StateID, + byte: u8, + ) -> StateID { + (**self).next_state(anchored, sid, byte) + } + + #[inline(always)] + fn is_special(&self, sid: StateID) -> bool { + (**self).is_special(sid) + } + + #[inline(always)] + fn is_dead(&self, sid: StateID) -> bool { + (**self).is_dead(sid) + } + + #[inline(always)] + fn is_match(&self, sid: StateID) -> bool { + (**self).is_match(sid) + } + + #[inline(always)] + fn is_start(&self, sid: StateID) -> bool { + (**self).is_start(sid) + } + + #[inline(always)] + fn match_kind(&self) -> MatchKind { + (**self).match_kind() + } + + #[inline(always)] + fn match_len(&self, sid: StateID) -> usize { + (**self).match_len(sid) + } + + #[inline(always)] + fn match_pattern(&self, sid: StateID, index: usize) -> PatternID { + (**self).match_pattern(sid, index) + } + + #[inline(always)] + fn patterns_len(&self) -> usize { + (**self).patterns_len() + } + + #[inline(always)] + fn pattern_len(&self, pid: PatternID) -> usize { + (**self).pattern_len(pid) + } + + #[inline(always)] + fn min_pattern_len(&self) -> usize { + (**self).min_pattern_len() + } + + #[inline(always)] + fn max_pattern_len(&self) -> usize { + (**self).max_pattern_len() + } + + #[inline(always)] + fn memory_usage(&self) -> usize { + (**self).memory_usage() + } + + #[inline(always)] + fn prefilter(&self) -> Option<&Prefilter> { + (**self).prefilter() + } +} + +/// Represents the current state of an overlapping search. +/// +/// This is used for overlapping searches since they need to know something +/// about the previous search. For example, when multiple patterns match at the +/// same position, this state tracks the last reported pattern so that the next +/// search knows whether to report another matching pattern or continue with +/// the search at the next position. Additionally, it also tracks which state +/// the last search call terminated in and the current offset of the search +/// in the haystack. +/// +/// This type provides limited introspection capabilities. The only thing a +/// caller can do is construct it and pass it around to permit search routines +/// to use it to track state, and to ask whether a match has been found. +/// +/// Callers should always provide a fresh state constructed via +/// [`OverlappingState::start`] when starting a new search. That same state +/// should be reused for subsequent searches on the same `Input`. The state +/// given will advance through the haystack itself. Callers can detect the end +/// of a search when neither an error nor a match is returned. +/// +/// # Example +/// +/// This example shows how to manually iterate over all overlapping matches. If +/// you need this, you might consider using +/// [`AhoCorasick::find_overlapping_iter`](crate::AhoCorasick::find_overlapping_iter) +/// instead, but this shows how to correctly use an `OverlappingState`. +/// +/// ``` +/// use aho_corasick::{ +/// automaton::OverlappingState, +/// AhoCorasick, Input, Match, +/// }; +/// +/// let patterns = &["append", "appendage", "app"]; +/// let haystack = "append the app to the appendage"; +/// +/// let ac = AhoCorasick::new(patterns).unwrap(); +/// let mut state = OverlappingState::start(); +/// let mut matches = vec![]; +/// +/// loop { +/// ac.find_overlapping(haystack, &mut state); +/// let mat = match state.get_match() { +/// None => break, +/// Some(mat) => mat, +/// }; +/// matches.push(mat); +/// } +/// let expected = vec![ +/// Match::must(2, 0..3), +/// Match::must(0, 0..6), +/// Match::must(2, 11..14), +/// Match::must(2, 22..25), +/// Match::must(0, 22..28), +/// Match::must(1, 22..31), +/// ]; +/// assert_eq!(expected, matches); +/// ``` +#[derive(Clone, Debug)] +pub struct OverlappingState { + /// The match reported by the most recent overlapping search to use this + /// state. + /// + /// If a search does not find any matches, then it is expected to clear + /// this value. + mat: Option, + /// The state ID of the state at which the search was in when the call + /// terminated. When this is a match state, `last_match` must be set to a + /// non-None value. + /// + /// A `None` value indicates the start state of the corresponding + /// automaton. We cannot use the actual ID, since any one automaton may + /// have many start states, and which one is in use depends on search-time + /// factors (such as whether the search is anchored or not). + id: Option, + /// The position of the search. + /// + /// When `id` is None (i.e., we are starting a search), this is set to + /// the beginning of the search as given by the caller regardless of its + /// current value. Subsequent calls to an overlapping search pick up at + /// this offset. + at: usize, + /// The index into the matching patterns of the next match to report if the + /// current state is a match state. Note that this may be 1 greater than + /// the total number of matches to report for the current match state. (In + /// which case, no more matches should be reported at the current position + /// and the search should advance to the next position.) + next_match_index: Option, +} + +impl OverlappingState { + /// Create a new overlapping state that begins at the start state. + pub fn start() -> OverlappingState { + OverlappingState { mat: None, id: None, at: 0, next_match_index: None } + } + + /// Return the match result of the most recent search to execute with this + /// state. + /// + /// Every search will clear this result automatically, such that if no + /// match is found, this will always correctly report `None`. + pub fn get_match(&self) -> Option { + self.mat + } +} + +/// An iterator of non-overlapping matches in a particular haystack. +/// +/// This iterator yields matches according to the [`MatchKind`] used by this +/// automaton. +/// +/// This iterator is constructed via the [`Automaton::try_find_iter`] method. +/// +/// The type variable `A` refers to the implementation of the [`Automaton`] +/// trait used to execute the search. +/// +/// The lifetime `'a` refers to the lifetime of the [`Automaton`] +/// implementation. +/// +/// The lifetime `'h` refers to the lifetime of the haystack being searched. +#[derive(Debug)] +pub struct FindIter<'a, 'h, A> { + /// The automaton used to drive the search. + aut: &'a A, + /// The input parameters to give to each search call. + /// + /// The start position of the search is mutated during iteration. + input: Input<'h>, + /// Records the end offset of the most recent match. This is necessary to + /// handle a corner case for preventing empty matches from overlapping with + /// the ending bounds of a prior match. + last_match_end: Option, +} + +impl<'a, 'h, A: Automaton> FindIter<'a, 'h, A> { + /// Creates a new non-overlapping iterator. If the given automaton would + /// return an error on a search with the given input configuration, then + /// that error is returned here. + fn new( + aut: &'a A, + input: Input<'h>, + ) -> Result, MatchError> { + // The only way this search can fail is if we cannot retrieve the start + // state. e.g., Asking for an anchored search when only unanchored + // searches are supported. + let _ = aut.start_state(input.get_anchored())?; + Ok(FindIter { aut, input, last_match_end: None }) + } + + /// Executes a search and returns a match if one is found. + /// + /// This does not advance the input forward. It just executes a search + /// based on the current configuration/offsets. + fn search(&self) -> Option { + // The unwrap is OK here because we check at iterator construction time + // that no subsequent search call (using the same configuration) will + // ever return an error. + self.aut + .try_find(&self.input) + .expect("already checked that no match error can occur") + } + + /// Handles the special case of an empty match by ensuring that 1) the + /// iterator always advances and 2) empty matches never overlap with other + /// matches. + /// + /// (1) is necessary because we principally make progress by setting the + /// starting location of the next search to the ending location of the last + /// match. But if a match is empty, then this results in a search that does + /// not advance and thus does not terminate. + /// + /// (2) is not strictly necessary, but makes intuitive sense and matches + /// the presiding behavior of most general purpose regex engines. + /// (Obviously this crate isn't a regex engine, but we choose to match + /// their semantics.) The "intuitive sense" here is that we want to report + /// NON-overlapping matches. So for example, given the patterns 'a' and + /// '' (an empty string) against the haystack 'a', without the special + /// handling, you'd get the matches [0, 1) and [1, 1), where the latter + /// overlaps with the end bounds of the former. + /// + /// Note that we mark this cold and forcefully prevent inlining because + /// handling empty matches like this is extremely rare and does require + /// quite a bit of code, comparatively. Keeping this code out of the main + /// iterator function keeps it smaller and more amenable to inlining + /// itself. + #[cold] + #[inline(never)] + fn handle_overlapping_empty_match( + &mut self, + mut m: Match, + ) -> Option { + assert!(m.is_empty()); + if Some(m.end()) == self.last_match_end { + self.input.set_start(self.input.start().checked_add(1).unwrap()); + m = self.search()?; + } + Some(m) + } +} + +impl<'a, 'h, A: Automaton> Iterator for FindIter<'a, 'h, A> { + type Item = Match; + + #[inline(always)] + fn next(&mut self) -> Option { + let mut m = self.search()?; + if m.is_empty() { + m = self.handle_overlapping_empty_match(m)?; + } + self.input.set_start(m.end()); + self.last_match_end = Some(m.end()); + Some(m) + } +} + +/// An iterator of overlapping matches in a particular haystack. +/// +/// This iterator will report all possible matches in a particular haystack, +/// even when the matches overlap. +/// +/// This iterator is constructed via the +/// [`Automaton::try_find_overlapping_iter`] method. +/// +/// The type variable `A` refers to the implementation of the [`Automaton`] +/// trait used to execute the search. +/// +/// The lifetime `'a` refers to the lifetime of the [`Automaton`] +/// implementation. +/// +/// The lifetime `'h` refers to the lifetime of the haystack being searched. +#[derive(Debug)] +pub struct FindOverlappingIter<'a, 'h, A> { + aut: &'a A, + input: Input<'h>, + state: OverlappingState, +} + +impl<'a, 'h, A: Automaton> Iterator for FindOverlappingIter<'a, 'h, A> { + type Item = Match; + + #[inline(always)] + fn next(&mut self) -> Option { + self.aut + .try_find_overlapping(&self.input, &mut self.state) + .expect("already checked that no match error can occur here"); + self.state.get_match() + } +} + +/// An iterator that reports matches in a stream. +/// +/// This iterator yields elements of type `io::Result`, where an error +/// is reported if there was a problem reading from the underlying stream. +/// The iterator terminates only when the underlying stream reaches `EOF`. +/// +/// This iterator is constructed via the [`Automaton::try_stream_find_iter`] +/// method. +/// +/// The type variable `A` refers to the implementation of the [`Automaton`] +/// trait used to execute the search. +/// +/// The type variable `R` refers to the `io::Read` stream that is being read +/// from. +/// +/// The lifetime `'a` refers to the lifetime of the [`Automaton`] +/// implementation. +#[cfg(feature = "std")] +#[derive(Debug)] +pub struct StreamFindIter<'a, A, R> { + it: StreamChunkIter<'a, A, R>, +} + +#[cfg(feature = "std")] +impl<'a, A: Automaton, R: std::io::Read> Iterator + for StreamFindIter<'a, A, R> +{ + type Item = std::io::Result; + + fn next(&mut self) -> Option> { + loop { + match self.it.next() { + None => return None, + Some(Err(err)) => return Some(Err(err)), + Some(Ok(StreamChunk::NonMatch { .. })) => {} + Some(Ok(StreamChunk::Match { mat, .. })) => { + return Some(Ok(mat)); + } + } + } + } +} + +/// An iterator that reports matches in a stream. +/// +/// (This doesn't actually implement the `Iterator` trait because it returns +/// something with a lifetime attached to a buffer it owns, but that's OK. It +/// still has a `next` method and is iterator-like enough to be fine.) +/// +/// This iterator yields elements of type `io::Result`, where +/// an error is reported if there was a problem reading from the underlying +/// stream. The iterator terminates only when the underlying stream reaches +/// `EOF`. +/// +/// The idea here is that each chunk represents either a match or a non-match, +/// and if you concatenated all of the chunks together, you'd reproduce the +/// entire contents of the stream, byte-for-byte. +/// +/// This chunk machinery is a bit complicated and it isn't strictly required +/// for a stream searcher that just reports matches. But we do need something +/// like this to deal with the "replacement" API, which needs to know which +/// chunks it can copy and which it needs to replace. +#[cfg(feature = "std")] +#[derive(Debug)] +struct StreamChunkIter<'a, A, R> { + /// The underlying automaton to do the search. + aut: &'a A, + /// The source of bytes we read from. + rdr: R, + /// A roll buffer for managing bytes from `rdr`. Basically, this is used + /// to handle the case of a match that is split by two different + /// calls to `rdr.read()`. This isn't strictly needed if all we needed to + /// do was report matches, but here we are reporting chunks of non-matches + /// and matches and in order to do that, we really just cannot treat our + /// stream as non-overlapping blocks of bytes. We need to permit some + /// overlap while we retain bytes from a previous `read` call in memory. + buf: crate::util::buffer::Buffer, + /// The unanchored starting state of this automaton. + start: StateID, + /// The state of the automaton. + sid: StateID, + /// The absolute position over the entire stream. + absolute_pos: usize, + /// The position we're currently at within `buf`. + buffer_pos: usize, + /// The buffer position of the end of the bytes that we last returned + /// to the caller. Basically, whenever we find a match, we look to see if + /// there is a difference between where the match started and the position + /// of the last byte we returned to the caller. If there's a difference, + /// then we need to return a 'NonMatch' chunk. + buffer_reported_pos: usize, +} + +#[cfg(feature = "std")] +impl<'a, A: Automaton, R: std::io::Read> StreamChunkIter<'a, A, R> { + fn new( + aut: &'a A, + rdr: R, + ) -> Result, MatchError> { + // This restriction is a carry-over from older versions of this crate. + // I didn't have the bandwidth to think through how to handle, say, + // leftmost-first or leftmost-longest matching, but... it should be + // possible? The main problem is that once you see a match state in + // leftmost-first semantics, you can't just stop at that point and + // report a match. You have to keep going until you either hit a dead + // state or EOF. So how do you know when you'll hit a dead state? Well, + // you don't. With Aho-Corasick, I believe you can put a bound on it + // and say, "once a match has been seen, you'll need to scan forward at + // most N bytes" where N=aut.max_pattern_len(). + // + // Which is fine, but it does mean that state about whether we're still + // looking for a dead state or not needs to persist across buffer + // refills. Which this code doesn't really handle. It does preserve + // *some* state across buffer refills, basically ensuring that a match + // span is always in memory. + if !aut.match_kind().is_standard() { + return Err(MatchError::unsupported_stream(aut.match_kind())); + } + // This is kind of a cop-out, but empty matches are SUPER annoying. + // If we know they can't happen (which is what we enforce here), then + // it makes a lot of logic much simpler. With that said, I'm open to + // supporting this case, but we need to define proper semantics for it + // first. It wasn't totally clear to me what it should do at the time + // of writing, so I decided to just be conservative. + // + // It also seems like a very weird case to support anyway. Why search a + // stream if you're just going to get a match at every position? + // + // ¯\_(ツ)_/¯ + if aut.min_pattern_len() == 0 { + return Err(MatchError::unsupported_empty()); + } + let start = aut.start_state(Anchored::No)?; + Ok(StreamChunkIter { + aut, + rdr, + buf: crate::util::buffer::Buffer::new(aut.max_pattern_len()), + start, + sid: start, + absolute_pos: 0, + buffer_pos: 0, + buffer_reported_pos: 0, + }) + } + + fn next(&mut self) -> Option> { + // This code is pretty gnarly. It IS simpler than the equivalent code + // in the previous aho-corasick release, in part because we inline + // automaton traversal here and also in part because we have abdicated + // support for automatons that contain an empty pattern. + // + // I suspect this code could be made a bit simpler by designing a + // better buffer abstraction. + // + // But in general, this code is basically write-only. So you'll need + // to go through it step-by-step to grok it. One of the key bits of + // complexity is tracking a few different offsets. 'buffer_pos' is + // where we are in the buffer for search. 'buffer_reported_pos' is the + // position immediately following the last byte in the buffer that + // we've returned to the caller. And 'absolute_pos' is the overall + // current absolute position of the search in the entire stream, and + // this is what match spans are reported in terms of. + loop { + if self.aut.is_match(self.sid) { + let mat = self.get_match(); + if let Some(r) = self.get_non_match_chunk(mat) { + self.buffer_reported_pos += r.len(); + let bytes = &self.buf.buffer()[r]; + return Some(Ok(StreamChunk::NonMatch { bytes })); + } + self.sid = self.start; + let r = self.get_match_chunk(mat); + self.buffer_reported_pos += r.len(); + let bytes = &self.buf.buffer()[r]; + return Some(Ok(StreamChunk::Match { bytes, mat })); + } + if self.buffer_pos >= self.buf.buffer().len() { + if let Some(r) = self.get_pre_roll_non_match_chunk() { + self.buffer_reported_pos += r.len(); + let bytes = &self.buf.buffer()[r]; + return Some(Ok(StreamChunk::NonMatch { bytes })); + } + if self.buf.buffer().len() >= self.buf.min_buffer_len() { + self.buffer_pos = self.buf.min_buffer_len(); + self.buffer_reported_pos -= + self.buf.buffer().len() - self.buf.min_buffer_len(); + self.buf.roll(); + } + match self.buf.fill(&mut self.rdr) { + Err(err) => return Some(Err(err)), + Ok(true) => {} + Ok(false) => { + // We've hit EOF, but if there are still some + // unreported bytes remaining, return them now. + if let Some(r) = self.get_eof_non_match_chunk() { + self.buffer_reported_pos += r.len(); + let bytes = &self.buf.buffer()[r]; + return Some(Ok(StreamChunk::NonMatch { bytes })); + } + // We've reported everything! + return None; + } + } + } + let start = self.absolute_pos; + for &byte in self.buf.buffer()[self.buffer_pos..].iter() { + self.sid = self.aut.next_state(Anchored::No, self.sid, byte); + self.absolute_pos += 1; + if self.aut.is_match(self.sid) { + break; + } + } + self.buffer_pos += self.absolute_pos - start; + } + } + + /// Return a match chunk for the given match. It is assumed that the match + /// ends at the current `buffer_pos`. + fn get_match_chunk(&self, mat: Match) -> core::ops::Range { + let start = self.buffer_pos - mat.len(); + let end = self.buffer_pos; + start..end + } + + /// Return a non-match chunk, if necessary, just before reporting a match. + /// This returns `None` if there is nothing to report. Otherwise, this + /// assumes that the given match ends at the current `buffer_pos`. + fn get_non_match_chunk( + &self, + mat: Match, + ) -> Option> { + let buffer_mat_start = self.buffer_pos - mat.len(); + if buffer_mat_start > self.buffer_reported_pos { + let start = self.buffer_reported_pos; + let end = buffer_mat_start; + return Some(start..end); + } + None + } + + /// Look for any bytes that should be reported as a non-match just before + /// rolling the buffer. + /// + /// Note that this only reports bytes up to `buffer.len() - + /// min_buffer_len`, as it's not possible to know whether the bytes + /// following that will participate in a match or not. + fn get_pre_roll_non_match_chunk(&self) -> Option> { + let end = + self.buf.buffer().len().saturating_sub(self.buf.min_buffer_len()); + if self.buffer_reported_pos < end { + return Some(self.buffer_reported_pos..end); + } + None + } + + /// Return any unreported bytes as a non-match up to the end of the buffer. + /// + /// This should only be called when the entire contents of the buffer have + /// been searched and EOF has been hit when trying to fill the buffer. + fn get_eof_non_match_chunk(&self) -> Option> { + if self.buffer_reported_pos < self.buf.buffer().len() { + return Some(self.buffer_reported_pos..self.buf.buffer().len()); + } + None + } + + /// Return the match at the current position for the current state. + /// + /// This panics if `self.aut.is_match(self.sid)` isn't true. + fn get_match(&self) -> Match { + get_match(self.aut, self.sid, 0, self.absolute_pos) + } +} + +/// A single chunk yielded by the stream chunk iterator. +/// +/// The `'r` lifetime refers to the lifetime of the stream chunk iterator. +#[cfg(feature = "std")] +#[derive(Debug)] +enum StreamChunk<'r> { + /// A chunk that does not contain any matches. + NonMatch { bytes: &'r [u8] }, + /// A chunk that precisely contains a match. + Match { bytes: &'r [u8], mat: Match }, +} + +#[inline(never)] +pub(crate) fn try_find_fwd( + aut: &A, + input: &Input<'_>, +) -> Result, MatchError> { + if input.is_done() { + return Ok(None); + } + let earliest = aut.match_kind().is_standard() || input.get_earliest(); + if input.get_anchored().is_anchored() { + try_find_fwd_imp(aut, input, None, Anchored::Yes, earliest) + } else if let Some(pre) = aut.prefilter() { + if earliest { + try_find_fwd_imp(aut, input, Some(pre), Anchored::No, true) + } else { + try_find_fwd_imp(aut, input, Some(pre), Anchored::No, false) + } + } else { + if earliest { + try_find_fwd_imp(aut, input, None, Anchored::No, true) + } else { + try_find_fwd_imp(aut, input, None, Anchored::No, false) + } + } +} + +#[inline(always)] +fn try_find_fwd_imp( + aut: &A, + input: &Input<'_>, + pre: Option<&Prefilter>, + anchored: Anchored, + earliest: bool, +) -> Result, MatchError> { + let mut sid = aut.start_state(input.get_anchored())?; + let mut at = input.start(); + let mut mat = None; + if aut.is_match(sid) { + mat = Some(get_match(aut, sid, 0, at)); + if earliest { + return Ok(mat); + } + } + if let Some(pre) = pre { + match pre.find_in(input.haystack(), input.get_span()) { + Candidate::None => return Ok(None), + Candidate::Match(m) => return Ok(Some(m)), + Candidate::PossibleStartOfMatch(i) => { + at = i; + } + } + } + while at < input.end() { + // I've tried unrolling this loop and eliding bounds checks, but no + // matter what I did, I could not observe a consistent improvement on + // any benchmark I could devise. (If someone wants to re-litigate this, + // the way to do it is to add an 'next_state_unchecked' method to the + // 'Automaton' trait with a default impl that uses 'next_state'. Then + // use 'aut.next_state_unchecked' here and implement it on DFA using + // unchecked slice index acces.) + sid = aut.next_state(anchored, sid, input.haystack()[at]); + if aut.is_special(sid) { + if aut.is_dead(sid) { + return Ok(mat); + } else if aut.is_match(sid) { + // We use 'at + 1' here because the match state is entered + // at the last byte of the pattern. Since we use half-open + // intervals, the end of the range of the match is one past the + // last byte. + let m = get_match(aut, sid, 0, at + 1); + // For the automata in this crate, we make a size trade off + // where we reuse the same automaton for both anchored and + // unanchored searches. We achieve this, principally, by simply + // not following failure transitions while computing the next + // state. Instead, if we fail to find the next state, we return + // a dead state, which instructs the search to stop. (This + // is why 'next_state' needs to know whether the search is + // anchored or not.) In addition, we have different start + // states for anchored and unanchored searches. The latter has + // a self-loop where as the former does not. + // + // In this way, we can use the same trie to execute both + // anchored and unanchored searches. There is a catch though. + // When building an Aho-Corasick automaton for unanchored + // searches, we copy matches from match states to other states + // (which would otherwise not be match states) if they are + // reachable via a failure transition. In the case of an + // anchored search, we *specifically* do not want to report + // these matches because they represent matches that start past + // the beginning of the search. + // + // Now we could tweak the automaton somehow to differentiate + // anchored from unanchored match states, but this would make + // 'aut.is_match' and potentially 'aut.is_special' slower. And + // also make the automaton itself more complex. + // + // Instead, we insert a special hack: if the search is + // anchored, we simply ignore matches that don't begin at + // the start of the search. This is not quite ideal, but we + // do specialize this function in such a way that unanchored + // searches don't pay for this additional branch. While this + // might cause a search to continue on for more than it + // otherwise optimally would, it will be no more than the + // longest pattern in the automaton. The reason for this is + // that we ensure we don't follow failure transitions during + // an anchored search. Combined with using a different anchored + // starting state with no self-loop, we guarantee that we'll + // at worst move through a number of transitions equal to the + // longest pattern. + // + // Now for DFAs, the whole point of them is to eliminate + // failure transitions entirely. So there is no way to say "if + // it's an anchored search don't follow failure transitions." + // Instead, we actually have to build two entirely separate + // automatons into the transition table. One with failure + // transitions built into it and another that is effectively + // just an encoding of the base trie into a transition table. + // DFAs still need this check though, because the match states + // still carry matches only reachable via a failure transition. + // Why? Because removing them seems difficult, although I + // haven't given it a lot of thought. + if !(anchored.is_anchored() && m.start() > input.start()) { + mat = Some(m); + if earliest { + return Ok(mat); + } + } + } else if let Some(pre) = pre { + // If we're here, we know it's a special state that is not a + // dead or a match state AND that a prefilter is active. Thus, + // it must be a start state. + debug_assert!(aut.is_start(sid)); + // We don't care about 'Candidate::Match' here because if such + // a match were possible, it would have been returned above + // when we run the prefilter before walking the automaton. + let span = Span::from(at..input.end()); + match pre.find_in(input.haystack(), span).into_option() { + None => return Ok(None), + Some(i) => { + if i > at { + at = i; + continue; + } + } + } + } else { + // When pre.is_none(), then starting states should not be + // treated as special. That is, without a prefilter, is_special + // should only return true when the state is a dead or a match + // state. + // + // It is possible to execute a search without a prefilter even + // when the underlying searcher has one: an anchored search. + // But in this case, the automaton makes it impossible to move + // back to the start state by construction, and thus, we should + // never reach this branch. + debug_assert!(false, "unreachable"); + } + } + at += 1; + } + Ok(mat) +} + +#[inline(never)] +fn try_find_overlapping_fwd( + aut: &A, + input: &Input<'_>, + state: &mut OverlappingState, +) -> Result<(), MatchError> { + state.mat = None; + if input.is_done() { + return Ok(()); + } + // Searching with a pattern ID is always anchored, so we should only ever + // use a prefilter when no pattern ID is given. + if aut.prefilter().is_some() && !input.get_anchored().is_anchored() { + let pre = aut.prefilter().unwrap(); + try_find_overlapping_fwd_imp(aut, input, Some(pre), state) + } else { + try_find_overlapping_fwd_imp(aut, input, None, state) + } +} + +#[inline(always)] +fn try_find_overlapping_fwd_imp( + aut: &A, + input: &Input<'_>, + pre: Option<&Prefilter>, + state: &mut OverlappingState, +) -> Result<(), MatchError> { + let mut sid = match state.id { + None => { + let sid = aut.start_state(input.get_anchored())?; + // Handle the case where the start state is a match state. That is, + // the empty string is in our automaton. We report every match we + // can here before moving on and updating 'state.at' and 'state.id' + // to find more matches in other parts of the haystack. + if aut.is_match(sid) { + let i = state.next_match_index.unwrap_or(0); + let len = aut.match_len(sid); + if i < len { + state.next_match_index = Some(i + 1); + state.mat = Some(get_match(aut, sid, i, input.start())); + return Ok(()); + } + } + state.at = input.start(); + state.id = Some(sid); + state.next_match_index = None; + state.mat = None; + sid + } + Some(sid) => { + // If we still have matches left to report in this state then + // report them until we've exhausted them. Only after that do we + // advance to the next offset in the haystack. + if let Some(i) = state.next_match_index { + let len = aut.match_len(sid); + if i < len { + state.next_match_index = Some(i + 1); + state.mat = Some(get_match(aut, sid, i, state.at + 1)); + return Ok(()); + } + // Once we've reported all matches at a given position, we need + // to advance the search to the next position. + state.at += 1; + state.next_match_index = None; + state.mat = None; + } + sid + } + }; + while state.at < input.end() { + sid = aut.next_state( + input.get_anchored(), + sid, + input.haystack()[state.at], + ); + if aut.is_special(sid) { + state.id = Some(sid); + if aut.is_dead(sid) { + return Ok(()); + } else if aut.is_match(sid) { + state.next_match_index = Some(1); + state.mat = Some(get_match(aut, sid, 0, state.at + 1)); + return Ok(()); + } else if let Some(pre) = pre { + // If we're here, we know it's a special state that is not a + // dead or a match state AND that a prefilter is active. Thus, + // it must be a start state. + debug_assert!(aut.is_start(sid)); + let span = Span::from(state.at..input.end()); + match pre.find_in(input.haystack(), span).into_option() { + None => return Ok(()), + Some(i) => { + if i > state.at { + state.at = i; + continue; + } + } + } + } else { + // When pre.is_none(), then starting states should not be + // treated as special. That is, without a prefilter, is_special + // should only return true when the state is a dead or a match + // state. + // + // ... except for one special case: in stream searching, we + // currently call overlapping search with a 'None' prefilter, + // regardless of whether one exists or not, because stream + // searching can't currently deal with prefilters correctly in + // all cases. + } + } + state.at += 1; + } + state.id = Some(sid); + Ok(()) +} + +#[inline(always)] +fn get_match( + aut: &A, + sid: StateID, + index: usize, + at: usize, +) -> Match { + let pid = aut.match_pattern(sid, index); + let len = aut.pattern_len(pid); + Match::new(pid, (at - len)..at) +} + +/// Write a prefix "state" indicator for fmt::Debug impls. It always writes +/// exactly two printable bytes to the given formatter. +/// +/// Specifically, this tries to succinctly distinguish the different types of +/// states: dead states, start states and match states. It even accounts for +/// the possible overlappings of different state types. (The only possible +/// overlapping is that of match and start states.) +pub(crate) fn fmt_state_indicator( + f: &mut core::fmt::Formatter<'_>, + aut: A, + id: StateID, +) -> core::fmt::Result { + if aut.is_dead(id) { + write!(f, "D ")?; + } else if aut.is_match(id) { + if aut.is_start(id) { + write!(f, "*>")?; + } else { + write!(f, "* ")?; + } + } else if aut.is_start(id) { + write!(f, " >")?; + } else { + write!(f, " ")?; + } + Ok(()) +} + +/// Return an iterator of transitions in a sparse format given an iterator +/// of all explicitly defined transitions. The iterator yields ranges of +/// transitions, such that any adjacent transitions mapped to the same +/// state are combined into a single range. +pub(crate) fn sparse_transitions<'a>( + mut it: impl Iterator + 'a, +) -> impl Iterator + 'a { + let mut cur: Option<(u8, u8, StateID)> = None; + core::iter::from_fn(move || { + while let Some((class, next)) = it.next() { + let (prev_start, prev_end, prev_next) = match cur { + Some(x) => x, + None => { + cur = Some((class, class, next)); + continue; + } + }; + if prev_next == next { + cur = Some((prev_start, class, prev_next)); + } else { + cur = Some((class, class, next)); + return Some((prev_start, prev_end, prev_next)); + } + } + if let Some((start, end, next)) = cur.take() { + return Some((start, end, next)); + } + None + }) +} diff --git a/vendor/aho-corasick/src/dfa.rs b/vendor/aho-corasick/src/dfa.rs new file mode 100644 index 00000000000000..1aa4f0e5cff1df --- /dev/null +++ b/vendor/aho-corasick/src/dfa.rs @@ -0,0 +1,835 @@ +/*! +Provides direct access to a DFA implementation of Aho-Corasick. + +This is a low-level API that generally only needs to be used in niche +circumstances. When possible, prefer using [`AhoCorasick`](crate::AhoCorasick) +instead of a DFA directly. Using an `DFA` directly is typically only necessary +when one needs access to the [`Automaton`] trait implementation. +*/ + +use alloc::{vec, vec::Vec}; + +use crate::{ + automaton::Automaton, + nfa::noncontiguous, + util::{ + alphabet::ByteClasses, + error::{BuildError, MatchError}, + int::{Usize, U32}, + prefilter::Prefilter, + primitives::{IteratorIndexExt, PatternID, SmallIndex, StateID}, + search::{Anchored, MatchKind, StartKind}, + special::Special, + }, +}; + +/// A DFA implementation of Aho-Corasick. +/// +/// When possible, prefer using [`AhoCorasick`](crate::AhoCorasick) instead of +/// this type directly. Using a `DFA` directly is typically only necessary when +/// one needs access to the [`Automaton`] trait implementation. +/// +/// This DFA can only be built by first constructing a [`noncontiguous::NFA`]. +/// Both [`DFA::new`] and [`Builder::build`] do this for you automatically, but +/// [`Builder::build_from_noncontiguous`] permits doing it explicitly. +/// +/// A DFA provides the best possible search performance (in this crate) via two +/// mechanisms: +/// +/// * All states use a dense representation for their transitions. +/// * All failure transitions are pre-computed such that they are never +/// explicitly handled at search time. +/// +/// These two facts combined mean that every state transition is performed +/// using a constant number of instructions. However, this comes at +/// great cost. The memory usage of a DFA can be quite exorbitant. +/// It is potentially multiple orders of magnitude greater than a +/// [`contiguous::NFA`](crate::nfa::contiguous::NFA) for example. In exchange, +/// a DFA will typically have better search speed than a `contiguous::NFA`, but +/// not by orders of magnitude. +/// +/// Unless you have a small number of patterns or memory usage is not a concern +/// and search performance is critical, a DFA is usually not the best choice. +/// +/// Moreover, unlike the NFAs in this crate, it is costly for a DFA to +/// support for anchored and unanchored search configurations. Namely, +/// since failure transitions are pre-computed, supporting both anchored +/// and unanchored searches requires a duplication of the transition table, +/// making the memory usage of such a DFA ever bigger. (The NFAs in this crate +/// unconditionally support both anchored and unanchored searches because there +/// is essentially no added cost for doing so.) It is for this reason that +/// a DFA's support for anchored and unanchored searches can be configured +/// via [`Builder::start_kind`]. By default, a DFA only supports unanchored +/// searches. +/// +/// # Example +/// +/// This example shows how to build an `DFA` directly and use it to execute +/// [`Automaton::try_find`]: +/// +/// ``` +/// use aho_corasick::{ +/// automaton::Automaton, +/// dfa::DFA, +/// Input, Match, +/// }; +/// +/// let patterns = &["b", "abc", "abcd"]; +/// let haystack = "abcd"; +/// +/// let nfa = DFA::new(patterns).unwrap(); +/// assert_eq!( +/// Some(Match::must(0, 1..2)), +/// nfa.try_find(&Input::new(haystack))?, +/// ); +/// # Ok::<(), Box>(()) +/// ``` +/// +/// It is also possible to implement your own version of `try_find`. See the +/// [`Automaton`] documentation for an example. +#[derive(Clone)] +pub struct DFA { + /// The DFA transition table. IDs in this table are pre-multiplied. So + /// instead of the IDs being 0, 1, 2, 3, ..., they are 0*stride, 1*stride, + /// 2*stride, 3*stride, ... + trans: Vec, + /// The matches for every match state in this DFA. This is first indexed by + /// state index (so that's `sid >> stride2`) and then by order in which the + /// matches are meant to occur. + matches: Vec>, + /// The amount of heap memory used, in bytes, by the inner Vecs of + /// 'matches'. + matches_memory_usage: usize, + /// The length of each pattern. This is used to compute the start offset + /// of a match. + pattern_lens: Vec, + /// A prefilter for accelerating searches, if one exists. + prefilter: Option, + /// The match semantics built into this DFA. + match_kind: MatchKind, + /// The total number of states in this DFA. + state_len: usize, + /// The alphabet size, or total number of equivalence classes, for this + /// DFA. Note that the actual number of transitions in each state is + /// stride=2^stride2, where stride is the smallest power of 2 greater than + /// or equal to alphabet_len. We do things this way so that we can use + /// bitshifting to go from a state ID to an index into 'matches'. + alphabet_len: usize, + /// The exponent with a base 2, such that stride=2^stride2. Given a state + /// index 'i', its state identifier is 'i << stride2'. Given a state + /// identifier 'sid', its state index is 'sid >> stride2'. + stride2: usize, + /// The equivalence classes for this DFA. All transitions are defined on + /// equivalence classes and not on the 256 distinct byte values. + byte_classes: ByteClasses, + /// The length of the shortest pattern in this automaton. + min_pattern_len: usize, + /// The length of the longest pattern in this automaton. + max_pattern_len: usize, + /// The information required to deduce which states are "special" in this + /// DFA. + special: Special, +} + +impl DFA { + /// Create a new Aho-Corasick DFA using the default configuration. + /// + /// Use a [`Builder`] if you want to change the configuration. + pub fn new(patterns: I) -> Result + where + I: IntoIterator, + P: AsRef<[u8]>, + { + DFA::builder().build(patterns) + } + + /// A convenience method for returning a new Aho-Corasick DFA builder. + /// + /// This usually permits one to just import the `DFA` type. + pub fn builder() -> Builder { + Builder::new() + } +} + +impl DFA { + /// A sentinel state ID indicating that a search should stop once it has + /// entered this state. When a search stops, it returns a match if one has + /// been found, otherwise no match. A DFA always has an actual dead state + /// at this ID. + /// + /// N.B. DFAs, unlike NFAs, do not have any notion of a FAIL state. + /// Namely, the whole point of a DFA is that the FAIL state is completely + /// compiled away. That is, DFA construction involves pre-computing the + /// failure transitions everywhere, such that failure transitions are no + /// longer used at search time. This, combined with its uniformly dense + /// representation, are the two most important factors in why it's faster + /// than the NFAs in this crate. + const DEAD: StateID = StateID::new_unchecked(0); + + /// Adds the given pattern IDs as matches to the given state and also + /// records the added memory usage. + fn set_matches( + &mut self, + sid: StateID, + pids: impl Iterator, + ) { + let index = (sid.as_usize() >> self.stride2).checked_sub(2).unwrap(); + let mut at_least_one = false; + for pid in pids { + self.matches[index].push(pid); + self.matches_memory_usage += PatternID::SIZE; + at_least_one = true; + } + assert!(at_least_one, "match state must have non-empty pids"); + } +} + +// SAFETY: 'start_state' always returns a valid state ID, 'next_state' always +// returns a valid state ID given a valid state ID. We otherwise claim that +// all other methods are correct as well. +unsafe impl Automaton for DFA { + #[inline(always)] + fn start_state(&self, anchored: Anchored) -> Result { + // Either of the start state IDs can be DEAD, in which case, support + // for that type of search is not provided by this DFA. Which start + // state IDs are inactive depends on the 'StartKind' configuration at + // DFA construction time. + match anchored { + Anchored::No => { + let start = self.special.start_unanchored_id; + if start == DFA::DEAD { + Err(MatchError::invalid_input_unanchored()) + } else { + Ok(start) + } + } + Anchored::Yes => { + let start = self.special.start_anchored_id; + if start == DFA::DEAD { + Err(MatchError::invalid_input_anchored()) + } else { + Ok(start) + } + } + } + } + + #[inline(always)] + fn next_state( + &self, + _anchored: Anchored, + sid: StateID, + byte: u8, + ) -> StateID { + let class = self.byte_classes.get(byte); + self.trans[(sid.as_u32() + u32::from(class)).as_usize()] + } + + #[inline(always)] + fn is_special(&self, sid: StateID) -> bool { + sid <= self.special.max_special_id + } + + #[inline(always)] + fn is_dead(&self, sid: StateID) -> bool { + sid == DFA::DEAD + } + + #[inline(always)] + fn is_match(&self, sid: StateID) -> bool { + !self.is_dead(sid) && sid <= self.special.max_match_id + } + + #[inline(always)] + fn is_start(&self, sid: StateID) -> bool { + sid == self.special.start_unanchored_id + || sid == self.special.start_anchored_id + } + + #[inline(always)] + fn match_kind(&self) -> MatchKind { + self.match_kind + } + + #[inline(always)] + fn patterns_len(&self) -> usize { + self.pattern_lens.len() + } + + #[inline(always)] + fn pattern_len(&self, pid: PatternID) -> usize { + self.pattern_lens[pid].as_usize() + } + + #[inline(always)] + fn min_pattern_len(&self) -> usize { + self.min_pattern_len + } + + #[inline(always)] + fn max_pattern_len(&self) -> usize { + self.max_pattern_len + } + + #[inline(always)] + fn match_len(&self, sid: StateID) -> usize { + debug_assert!(self.is_match(sid)); + let offset = (sid.as_usize() >> self.stride2) - 2; + self.matches[offset].len() + } + + #[inline(always)] + fn match_pattern(&self, sid: StateID, index: usize) -> PatternID { + debug_assert!(self.is_match(sid)); + let offset = (sid.as_usize() >> self.stride2) - 2; + self.matches[offset][index] + } + + #[inline(always)] + fn memory_usage(&self) -> usize { + use core::mem::size_of; + + (self.trans.len() * size_of::()) + + (self.matches.len() * size_of::>()) + + self.matches_memory_usage + + (self.pattern_lens.len() * size_of::()) + + self.prefilter.as_ref().map_or(0, |p| p.memory_usage()) + } + + #[inline(always)] + fn prefilter(&self) -> Option<&Prefilter> { + self.prefilter.as_ref() + } +} + +impl core::fmt::Debug for DFA { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + use crate::{ + automaton::{fmt_state_indicator, sparse_transitions}, + util::debug::DebugByte, + }; + + writeln!(f, "dfa::DFA(")?; + for index in 0..self.state_len { + let sid = StateID::new_unchecked(index << self.stride2); + // While we do currently include the FAIL state in the transition + // table (to simplify construction), it is never actually used. It + // poses problems with the code below because it gets treated as + // a match state incidentally when it is, of course, not. So we + // special case it. The fail state is always the first state after + // the dead state. + // + // If the construction is changed to remove the fail state (it + // probably should be), then this special case should be updated. + if index == 1 { + writeln!(f, "F {:06}:", sid.as_usize())?; + continue; + } + fmt_state_indicator(f, self, sid)?; + write!(f, "{:06}: ", sid.as_usize())?; + + let it = (0..self.byte_classes.alphabet_len()).map(|class| { + (class.as_u8(), self.trans[sid.as_usize() + class]) + }); + for (i, (start, end, next)) in sparse_transitions(it).enumerate() { + if i > 0 { + write!(f, ", ")?; + } + if start == end { + write!( + f, + "{:?} => {:?}", + DebugByte(start), + next.as_usize() + )?; + } else { + write!( + f, + "{:?}-{:?} => {:?}", + DebugByte(start), + DebugByte(end), + next.as_usize() + )?; + } + } + write!(f, "\n")?; + if self.is_match(sid) { + write!(f, " matches: ")?; + for i in 0..self.match_len(sid) { + if i > 0 { + write!(f, ", ")?; + } + let pid = self.match_pattern(sid, i); + write!(f, "{}", pid.as_usize())?; + } + write!(f, "\n")?; + } + } + writeln!(f, "match kind: {:?}", self.match_kind)?; + writeln!(f, "prefilter: {:?}", self.prefilter.is_some())?; + writeln!(f, "state length: {:?}", self.state_len)?; + writeln!(f, "pattern length: {:?}", self.patterns_len())?; + writeln!(f, "shortest pattern length: {:?}", self.min_pattern_len)?; + writeln!(f, "longest pattern length: {:?}", self.max_pattern_len)?; + writeln!(f, "alphabet length: {:?}", self.alphabet_len)?; + writeln!(f, "stride: {:?}", 1 << self.stride2)?; + writeln!(f, "byte classes: {:?}", self.byte_classes)?; + writeln!(f, "memory usage: {:?}", self.memory_usage())?; + writeln!(f, ")")?; + Ok(()) + } +} + +/// A builder for configuring an Aho-Corasick DFA. +/// +/// This builder has a subset of the options available to a +/// [`AhoCorasickBuilder`](crate::AhoCorasickBuilder). Of the shared options, +/// their behavior is identical. +#[derive(Clone, Debug)] +pub struct Builder { + noncontiguous: noncontiguous::Builder, + start_kind: StartKind, + byte_classes: bool, +} + +impl Default for Builder { + fn default() -> Builder { + Builder { + noncontiguous: noncontiguous::Builder::new(), + start_kind: StartKind::Unanchored, + byte_classes: true, + } + } +} + +impl Builder { + /// Create a new builder for configuring an Aho-Corasick DFA. + pub fn new() -> Builder { + Builder::default() + } + + /// Build an Aho-Corasick DFA from the given iterator of patterns. + /// + /// A builder may be reused to create more DFAs. + pub fn build(&self, patterns: I) -> Result + where + I: IntoIterator, + P: AsRef<[u8]>, + { + let nnfa = self.noncontiguous.build(patterns)?; + self.build_from_noncontiguous(&nnfa) + } + + /// Build an Aho-Corasick DFA from the given noncontiguous NFA. + /// + /// Note that when this method is used, only the `start_kind` and + /// `byte_classes` settings on this builder are respected. The other + /// settings only apply to the initial construction of the Aho-Corasick + /// automaton. Since using this method requires that initial construction + /// has already completed, all settings impacting only initial construction + /// are no longer relevant. + pub fn build_from_noncontiguous( + &self, + nnfa: &noncontiguous::NFA, + ) -> Result { + debug!("building DFA"); + let byte_classes = if self.byte_classes { + nnfa.byte_classes().clone() + } else { + ByteClasses::singletons() + }; + let state_len = match self.start_kind { + StartKind::Unanchored | StartKind::Anchored => nnfa.states().len(), + StartKind::Both => { + // These unwraps are OK because we know that the number of + // NFA states is < StateID::LIMIT which is in turn less than + // i32::MAX. Thus, there is always room to multiply by 2. + // Finally, the number of states is always at least 4 in the + // NFA (DEAD, FAIL, START-UNANCHORED, START-ANCHORED), so the + // subtraction of 4 is okay. + // + // Note that we subtract 4 because the "anchored" part of + // the DFA duplicates the unanchored part (without failure + // transitions), but reuses the DEAD, FAIL and START states. + nnfa.states() + .len() + .checked_mul(2) + .unwrap() + .checked_sub(4) + .unwrap() + } + }; + let trans_len = + match state_len.checked_shl(byte_classes.stride2().as_u32()) { + Some(trans_len) => trans_len, + None => { + return Err(BuildError::state_id_overflow( + StateID::MAX.as_u64(), + usize::MAX.as_u64(), + )) + } + }; + StateID::new(trans_len.checked_sub(byte_classes.stride()).unwrap()) + .map_err(|e| { + BuildError::state_id_overflow( + StateID::MAX.as_u64(), + e.attempted(), + ) + })?; + let num_match_states = match self.start_kind { + StartKind::Unanchored | StartKind::Anchored => { + nnfa.special().max_match_id.as_usize().checked_sub(1).unwrap() + } + StartKind::Both => nnfa + .special() + .max_match_id + .as_usize() + .checked_sub(1) + .unwrap() + .checked_mul(2) + .unwrap(), + }; + let mut dfa = DFA { + trans: vec![DFA::DEAD; trans_len], + matches: vec![vec![]; num_match_states], + matches_memory_usage: 0, + pattern_lens: nnfa.pattern_lens_raw().to_vec(), + prefilter: nnfa.prefilter().cloned(), + match_kind: nnfa.match_kind(), + state_len, + alphabet_len: byte_classes.alphabet_len(), + stride2: byte_classes.stride2(), + byte_classes, + min_pattern_len: nnfa.min_pattern_len(), + max_pattern_len: nnfa.max_pattern_len(), + // The special state IDs are set later. + special: Special::zero(), + }; + match self.start_kind { + StartKind::Both => { + self.finish_build_both_starts(nnfa, &mut dfa); + } + StartKind::Unanchored => { + self.finish_build_one_start(Anchored::No, nnfa, &mut dfa); + } + StartKind::Anchored => { + self.finish_build_one_start(Anchored::Yes, nnfa, &mut dfa) + } + } + debug!( + "DFA built, ", + dfa.state_len, + dfa.memory_usage(), + dfa.byte_classes.alphabet_len(), + dfa.byte_classes.stride(), + ); + // The vectors can grow ~twice as big during construction because a + // Vec amortizes growth. But here, let's shrink things back down to + // what we actually need since we're never going to add more to it. + dfa.trans.shrink_to_fit(); + dfa.pattern_lens.shrink_to_fit(); + dfa.matches.shrink_to_fit(); + // TODO: We might also want to shrink each Vec inside of `dfa.matches`, + // or even better, convert it to one contiguous allocation. But I think + // I went with nested allocs for good reason (can't remember), so this + // may be tricky to do. I decided not to shrink them here because it + // might require a fair bit of work to do. It's unclear whether it's + // worth it. + Ok(dfa) + } + + /// Finishes building a DFA for either unanchored or anchored searches, + /// but NOT both. + fn finish_build_one_start( + &self, + anchored: Anchored, + nnfa: &noncontiguous::NFA, + dfa: &mut DFA, + ) { + // This function always succeeds because we check above that all of the + // states in the NFA can be mapped to DFA state IDs. + let stride2 = dfa.stride2; + let old2new = |oldsid: StateID| { + StateID::new_unchecked(oldsid.as_usize() << stride2) + }; + for (oldsid, state) in nnfa.states().iter().with_state_ids() { + let newsid = old2new(oldsid); + if state.is_match() { + dfa.set_matches(newsid, nnfa.iter_matches(oldsid)); + } + sparse_iter( + nnfa, + oldsid, + &dfa.byte_classes, + |byte, class, mut oldnextsid| { + if oldnextsid == noncontiguous::NFA::FAIL { + if anchored.is_anchored() { + oldnextsid = noncontiguous::NFA::DEAD; + } else if state.fail() == noncontiguous::NFA::DEAD { + // This is a special case that avoids following + // DEAD transitions in a non-contiguous NFA. + // Following these transitions is pretty slow + // because the non-contiguous NFA will always use + // a sparse representation for it (because the + // DEAD state is usually treated as a sentinel). + // The *vast* majority of failure states are DEAD + // states, so this winds up being pretty slow if + // we go through the non-contiguous NFA state + // transition logic. Instead, just do it ourselves. + oldnextsid = noncontiguous::NFA::DEAD; + } else { + oldnextsid = nnfa.next_state( + Anchored::No, + state.fail(), + byte, + ); + } + } + dfa.trans[newsid.as_usize() + usize::from(class)] = + old2new(oldnextsid); + }, + ); + } + // Now that we've remapped all the IDs in our states, all that's left + // is remapping the special state IDs. + let old = nnfa.special(); + let new = &mut dfa.special; + new.max_special_id = old2new(old.max_special_id); + new.max_match_id = old2new(old.max_match_id); + if anchored.is_anchored() { + new.start_unanchored_id = DFA::DEAD; + new.start_anchored_id = old2new(old.start_anchored_id); + } else { + new.start_unanchored_id = old2new(old.start_unanchored_id); + new.start_anchored_id = DFA::DEAD; + } + } + + /// Finishes building a DFA that supports BOTH unanchored and anchored + /// searches. It works by inter-leaving unanchored states with anchored + /// states in the same transition table. This way, we avoid needing to + /// re-shuffle states afterward to ensure that our states still look like + /// DEAD, MATCH, ..., START-UNANCHORED, START-ANCHORED, NON-MATCH, ... + /// + /// Honestly this is pretty inscrutable... Simplifications are most + /// welcome. + fn finish_build_both_starts( + &self, + nnfa: &noncontiguous::NFA, + dfa: &mut DFA, + ) { + let stride2 = dfa.stride2; + let stride = 1 << stride2; + let mut remap_unanchored = vec![DFA::DEAD; nnfa.states().len()]; + let mut remap_anchored = vec![DFA::DEAD; nnfa.states().len()]; + let mut is_anchored = vec![false; dfa.state_len]; + let mut newsid = DFA::DEAD; + let next_dfa_id = + |sid: StateID| StateID::new_unchecked(sid.as_usize() + stride); + for (oldsid, state) in nnfa.states().iter().with_state_ids() { + if oldsid == noncontiguous::NFA::DEAD + || oldsid == noncontiguous::NFA::FAIL + { + remap_unanchored[oldsid] = newsid; + remap_anchored[oldsid] = newsid; + newsid = next_dfa_id(newsid); + } else if oldsid == nnfa.special().start_unanchored_id + || oldsid == nnfa.special().start_anchored_id + { + if oldsid == nnfa.special().start_unanchored_id { + remap_unanchored[oldsid] = newsid; + remap_anchored[oldsid] = DFA::DEAD; + } else { + remap_unanchored[oldsid] = DFA::DEAD; + remap_anchored[oldsid] = newsid; + is_anchored[newsid.as_usize() >> stride2] = true; + } + if state.is_match() { + dfa.set_matches(newsid, nnfa.iter_matches(oldsid)); + } + sparse_iter( + nnfa, + oldsid, + &dfa.byte_classes, + |_, class, oldnextsid| { + let class = usize::from(class); + if oldnextsid == noncontiguous::NFA::FAIL { + dfa.trans[newsid.as_usize() + class] = DFA::DEAD; + } else { + dfa.trans[newsid.as_usize() + class] = oldnextsid; + } + }, + ); + newsid = next_dfa_id(newsid); + } else { + let unewsid = newsid; + newsid = next_dfa_id(newsid); + let anewsid = newsid; + newsid = next_dfa_id(newsid); + + remap_unanchored[oldsid] = unewsid; + remap_anchored[oldsid] = anewsid; + is_anchored[anewsid.as_usize() >> stride2] = true; + if state.is_match() { + dfa.set_matches(unewsid, nnfa.iter_matches(oldsid)); + dfa.set_matches(anewsid, nnfa.iter_matches(oldsid)); + } + sparse_iter( + nnfa, + oldsid, + &dfa.byte_classes, + |byte, class, oldnextsid| { + let class = usize::from(class); + if oldnextsid == noncontiguous::NFA::FAIL { + let oldnextsid = + if state.fail() == noncontiguous::NFA::DEAD { + noncontiguous::NFA::DEAD + } else { + nnfa.next_state( + Anchored::No, + state.fail(), + byte, + ) + }; + dfa.trans[unewsid.as_usize() + class] = oldnextsid; + } else { + dfa.trans[unewsid.as_usize() + class] = oldnextsid; + dfa.trans[anewsid.as_usize() + class] = oldnextsid; + } + }, + ); + } + } + for i in 0..dfa.state_len { + let sid = i << stride2; + if is_anchored[i] { + for next in dfa.trans[sid..][..stride].iter_mut() { + *next = remap_anchored[*next]; + } + } else { + for next in dfa.trans[sid..][..stride].iter_mut() { + *next = remap_unanchored[*next]; + } + } + } + // Now that we've remapped all the IDs in our states, all that's left + // is remapping the special state IDs. + let old = nnfa.special(); + let new = &mut dfa.special; + new.max_special_id = remap_anchored[old.max_special_id]; + new.max_match_id = remap_anchored[old.max_match_id]; + new.start_unanchored_id = remap_unanchored[old.start_unanchored_id]; + new.start_anchored_id = remap_anchored[old.start_anchored_id]; + } + + /// Set the desired match semantics. + /// + /// This only applies when using [`Builder::build`] and not + /// [`Builder::build_from_noncontiguous`]. + /// + /// See + /// [`AhoCorasickBuilder::match_kind`](crate::AhoCorasickBuilder::match_kind) + /// for more documentation and examples. + pub fn match_kind(&mut self, kind: MatchKind) -> &mut Builder { + self.noncontiguous.match_kind(kind); + self + } + + /// Enable ASCII-aware case insensitive matching. + /// + /// This only applies when using [`Builder::build`] and not + /// [`Builder::build_from_noncontiguous`]. + /// + /// See + /// [`AhoCorasickBuilder::ascii_case_insensitive`](crate::AhoCorasickBuilder::ascii_case_insensitive) + /// for more documentation and examples. + pub fn ascii_case_insensitive(&mut self, yes: bool) -> &mut Builder { + self.noncontiguous.ascii_case_insensitive(yes); + self + } + + /// Enable heuristic prefilter optimizations. + /// + /// This only applies when using [`Builder::build`] and not + /// [`Builder::build_from_noncontiguous`]. + /// + /// See + /// [`AhoCorasickBuilder::prefilter`](crate::AhoCorasickBuilder::prefilter) + /// for more documentation and examples. + pub fn prefilter(&mut self, yes: bool) -> &mut Builder { + self.noncontiguous.prefilter(yes); + self + } + + /// Sets the starting state configuration for the automaton. + /// + /// See + /// [`AhoCorasickBuilder::start_kind`](crate::AhoCorasickBuilder::start_kind) + /// for more documentation and examples. + pub fn start_kind(&mut self, kind: StartKind) -> &mut Builder { + self.start_kind = kind; + self + } + + /// A debug setting for whether to attempt to shrink the size of the + /// automaton's alphabet or not. + /// + /// This should never be enabled unless you're debugging an automaton. + /// Namely, disabling byte classes makes transitions easier to reason + /// about, since they use the actual bytes instead of equivalence classes. + /// Disabling this confers no performance benefit at search time. + /// + /// See + /// [`AhoCorasickBuilder::byte_classes`](crate::AhoCorasickBuilder::byte_classes) + /// for more documentation and examples. + pub fn byte_classes(&mut self, yes: bool) -> &mut Builder { + self.byte_classes = yes; + self + } +} + +/// Iterate over all possible equivalence class transitions in this state. +/// The closure is called for all transitions with a distinct equivalence +/// class, even those not explicitly represented in this sparse state. For +/// any implicitly defined transitions, the given closure is called with +/// the fail state ID. +/// +/// The closure is guaranteed to be called precisely +/// `byte_classes.alphabet_len()` times, once for every possible class in +/// ascending order. +fn sparse_iter( + nnfa: &noncontiguous::NFA, + oldsid: StateID, + classes: &ByteClasses, + mut f: F, +) { + let mut prev_class = None; + let mut byte = 0usize; + for t in nnfa.iter_trans(oldsid) { + while byte < usize::from(t.byte()) { + let rep = byte.as_u8(); + let class = classes.get(rep); + byte += 1; + if prev_class != Some(class) { + f(rep, class, noncontiguous::NFA::FAIL); + prev_class = Some(class); + } + } + let rep = t.byte(); + let class = classes.get(rep); + byte += 1; + if prev_class != Some(class) { + f(rep, class, t.next()); + prev_class = Some(class); + } + } + for b in byte..=255 { + let rep = b.as_u8(); + let class = classes.get(rep); + if prev_class != Some(class) { + f(rep, class, noncontiguous::NFA::FAIL); + prev_class = Some(class); + } + } +} diff --git a/vendor/aho-corasick/src/lib.rs b/vendor/aho-corasick/src/lib.rs new file mode 100644 index 00000000000000..273dd3bfe95781 --- /dev/null +++ b/vendor/aho-corasick/src/lib.rs @@ -0,0 +1,326 @@ +/*! +A library for finding occurrences of many patterns at once. This library +provides multiple pattern search principally through an implementation of the +[Aho-Corasick algorithm](https://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_algorithm), +which builds a fast finite state machine for executing searches in linear time. + +Additionally, this library provides a number of configuration options for +building the automaton that permit controlling the space versus time trade +off. Other features include simple ASCII case insensitive matching, finding +overlapping matches, replacements, searching streams and even searching and +replacing text in streams. + +Finally, unlike most other Aho-Corasick implementations, this one +supports enabling [leftmost-first](MatchKind::LeftmostFirst) or +[leftmost-longest](MatchKind::LeftmostLongest) match semantics, using a +(seemingly) novel alternative construction algorithm. For more details on what +match semantics means, see the [`MatchKind`] type. + +# Overview + +This section gives a brief overview of the primary types in this crate: + +* [`AhoCorasick`] is the primary type and represents an Aho-Corasick automaton. +This is the type you use to execute searches. +* [`AhoCorasickBuilder`] can be used to build an Aho-Corasick automaton, and +supports configuring a number of options. +* [`Match`] represents a single match reported by an Aho-Corasick automaton. +Each match has two pieces of information: the pattern that matched and the +start and end byte offsets corresponding to the position in the haystack at +which it matched. + +# Example: basic searching + +This example shows how to search for occurrences of multiple patterns +simultaneously. Each match includes the pattern that matched along with the +byte offsets of the match. + +``` +use aho_corasick::{AhoCorasick, PatternID}; + +let patterns = &["apple", "maple", "Snapple"]; +let haystack = "Nobody likes maple in their apple flavored Snapple."; + +let ac = AhoCorasick::new(patterns).unwrap(); +let mut matches = vec![]; +for mat in ac.find_iter(haystack) { + matches.push((mat.pattern(), mat.start(), mat.end())); +} +assert_eq!(matches, vec![ + (PatternID::must(1), 13, 18), + (PatternID::must(0), 28, 33), + (PatternID::must(2), 43, 50), +]); +``` + +# Example: case insensitivity + +This is like the previous example, but matches `Snapple` case insensitively +using `AhoCorasickBuilder`: + +``` +use aho_corasick::{AhoCorasick, PatternID}; + +let patterns = &["apple", "maple", "snapple"]; +let haystack = "Nobody likes maple in their apple flavored Snapple."; + +let ac = AhoCorasick::builder() + .ascii_case_insensitive(true) + .build(patterns) + .unwrap(); +let mut matches = vec![]; +for mat in ac.find_iter(haystack) { + matches.push((mat.pattern(), mat.start(), mat.end())); +} +assert_eq!(matches, vec![ + (PatternID::must(1), 13, 18), + (PatternID::must(0), 28, 33), + (PatternID::must(2), 43, 50), +]); +``` + +# Example: replacing matches in a stream + +This example shows how to execute a search and replace on a stream without +loading the entire stream into memory first. + +``` +# #[cfg(feature = "std")] { +use aho_corasick::AhoCorasick; + +# fn example() -> Result<(), std::io::Error> { +let patterns = &["fox", "brown", "quick"]; +let replace_with = &["sloth", "grey", "slow"]; + +// In a real example, these might be `std::fs::File`s instead. All you need to +// do is supply a pair of `std::io::Read` and `std::io::Write` implementations. +let rdr = "The quick brown fox."; +let mut wtr = vec![]; + +let ac = AhoCorasick::new(patterns).unwrap(); +ac.try_stream_replace_all(rdr.as_bytes(), &mut wtr, replace_with)?; +assert_eq!(b"The slow grey sloth.".to_vec(), wtr); +# Ok(()) }; example().unwrap() +# } +``` + +# Example: finding the leftmost first match + +In the textbook description of Aho-Corasick, its formulation is typically +structured such that it reports all possible matches, even when they overlap +with another. In many cases, overlapping matches may not be desired, such as +the case of finding all successive non-overlapping matches like you might with +a standard regular expression. + +Unfortunately the "obvious" way to modify the Aho-Corasick algorithm to do +this doesn't always work in the expected way, since it will report matches as +soon as they are seen. For example, consider matching the regex `Samwise|Sam` +against the text `Samwise`. Most regex engines (that are Perl-like, or +non-POSIX) will report `Samwise` as a match, but the standard Aho-Corasick +algorithm modified for reporting non-overlapping matches will report `Sam`. + +A novel contribution of this library is the ability to change the match +semantics of Aho-Corasick (without additional search time overhead) such that +`Samwise` is reported instead. For example, here's the standard approach: + +``` +use aho_corasick::AhoCorasick; + +let patterns = &["Samwise", "Sam"]; +let haystack = "Samwise"; + +let ac = AhoCorasick::new(patterns).unwrap(); +let mat = ac.find(haystack).expect("should have a match"); +assert_eq!("Sam", &haystack[mat.start()..mat.end()]); +``` + +And now here's the leftmost-first version, which matches how a Perl-like +regex will work: + +``` +use aho_corasick::{AhoCorasick, MatchKind}; + +let patterns = &["Samwise", "Sam"]; +let haystack = "Samwise"; + +let ac = AhoCorasick::builder() + .match_kind(MatchKind::LeftmostFirst) + .build(patterns) + .unwrap(); +let mat = ac.find(haystack).expect("should have a match"); +assert_eq!("Samwise", &haystack[mat.start()..mat.end()]); +``` + +In addition to leftmost-first semantics, this library also supports +leftmost-longest semantics, which match the POSIX behavior of a regular +expression alternation. See [`MatchKind`] for more details. + +# Prefilters + +While an Aho-Corasick automaton can perform admirably when compared to more +naive solutions, it is generally slower than more specialized algorithms that +are accelerated using vector instructions such as SIMD. + +For that reason, this library will internally use a "prefilter" to attempt +to accelerate searches when possible. Currently, this library has several +different algorithms it might use depending on the patterns provided. Once the +number of patterns gets too big, prefilters are no longer used. + +While a prefilter is generally good to have on by default since it works +well in the common case, it can lead to less predictable or even sub-optimal +performance in some cases. For that reason, prefilters can be explicitly +disabled via [`AhoCorasickBuilder::prefilter`]. + +# Lower level APIs + +This crate also provides several sub-modules that collectively expose many of +the implementation details of the main [`AhoCorasick`] type. Most users of this +library can completely ignore the submodules and their contents, but if you +needed finer grained control, some parts of them may be useful to you. Here is +a brief overview of each and why you might want to use them: + +* The [`packed`] sub-module contains a lower level API for using fast +vectorized routines for finding a small number of patterns in a haystack. +You might want to use this API when you want to completely side-step using +Aho-Corasick automata. Otherwise, the fast vectorized routines are used +automatically as prefilters for `AhoCorasick` searches whenever possible. +* The [`automaton`] sub-module provides a lower level finite state +machine interface that the various Aho-Corasick implementations in +this crate implement. This sub-module's main contribution is the +[`Automaton`](automaton::Automaton) trait, which permits manually walking the +state transitions of an Aho-Corasick automaton. +* The [`dfa`] and [`nfa`] sub-modules provide DFA and NFA implementations of +the aforementioned `Automaton` trait. The main reason one might want to use +these sub-modules is to get access to a type that implements the `Automaton` +trait. (The top-level `AhoCorasick` type does not implement the `Automaton` +trait.) + +As mentioned above, if you aren't sure whether you need these sub-modules, +you should be able to safely ignore them and just focus on the [`AhoCorasick`] +type. + +# Crate features + +This crate exposes a few features for controlling dependency usage and whether +this crate can be used without the standard library. + +* **std** - + Enables support for the standard library. This feature is enabled by + default. When disabled, only `core` and `alloc` are used. At an API + level, enabling `std` enables `std::error::Error` trait impls for the + various error types, and higher level stream search routines such as + [`AhoCorasick::try_stream_find_iter`]. But the `std` feature is also required + to enable vectorized prefilters. Prefilters can greatly accelerate searches, + but generally only apply when the number of patterns is small (less than + ~100). +* **perf-literal** - + Enables support for literal prefilters that use vectorized routines from + external crates. This feature is enabled by default. If you're only using + Aho-Corasick for large numbers of patterns or otherwise can abide lower + throughput when searching with a small number of patterns, then it is + reasonable to disable this feature. +* **logging** - + Enables a dependency on the `log` crate and emits messages to aide in + diagnostics. This feature is disabled by default. +*/ + +#![no_std] +#![deny(missing_docs)] +#![deny(rustdoc::broken_intra_doc_links)] +#![cfg_attr(docsrs, feature(doc_cfg))] + +extern crate alloc; +#[cfg(any(test, feature = "std"))] +extern crate std; + +#[cfg(doctest)] +doc_comment::doctest!("../README.md"); + +#[cfg(feature = "std")] +pub use crate::ahocorasick::StreamFindIter; +pub use crate::{ + ahocorasick::{ + AhoCorasick, AhoCorasickBuilder, AhoCorasickKind, FindIter, + FindOverlappingIter, + }, + util::{ + error::{BuildError, MatchError, MatchErrorKind}, + primitives::{PatternID, PatternIDError}, + search::{Anchored, Input, Match, MatchKind, Span, StartKind}, + }, +}; + +#[macro_use] +mod macros; + +mod ahocorasick; +pub mod automaton; +pub mod dfa; +pub mod nfa; +pub mod packed; +#[cfg(test)] +mod tests; +// I wrote out the module for implementing fst::Automaton only to later realize +// that this would make fst a public dependency and fst is not at 1.0 yet. I +// decided to just keep the code in tree, but build it only during tests. +// +// TODO: I think I've changed my mind again. I'm considering pushing it out +// into either a separate crate or into 'fst' directly as an optional feature. +// #[cfg(test)] +// #[allow(dead_code)] +// mod transducer; +pub(crate) mod util; + +#[cfg(test)] +mod testoibits { + use std::panic::{RefUnwindSafe, UnwindSafe}; + + use super::*; + + fn assert_all() {} + + #[test] + fn oibits_main() { + assert_all::(); + assert_all::(); + assert_all::(); + assert_all::(); + assert_all::(); + + assert_all::(); + assert_all::(); + assert_all::(); + + assert_all::(); + assert_all::(); + assert_all::(); + assert_all::(); + assert_all::(); + assert_all::(); + } + + #[test] + fn oibits_automaton() { + use crate::{automaton, dfa::DFA}; + + assert_all::>(); + assert_all::>(); + #[cfg(feature = "std")] + assert_all::>(); + assert_all::(); + + assert_all::(); + assert_all::(); + } + + #[test] + fn oibits_packed() { + use crate::packed; + + assert_all::(); + assert_all::(); + assert_all::(); + assert_all::(); + assert_all::(); + } +} diff --git a/vendor/aho-corasick/src/macros.rs b/vendor/aho-corasick/src/macros.rs new file mode 100644 index 00000000000000..fc73e6eddd82ef --- /dev/null +++ b/vendor/aho-corasick/src/macros.rs @@ -0,0 +1,18 @@ +#![allow(unused_macros)] + +macro_rules! log { + ($($tt:tt)*) => { + #[cfg(feature = "logging")] + { + $($tt)* + } + } +} + +macro_rules! debug { + ($($tt:tt)*) => { log!(log::debug!($($tt)*)) } +} + +macro_rules! trace { + ($($tt:tt)*) => { log!(log::trace!($($tt)*)) } +} diff --git a/vendor/aho-corasick/src/nfa/contiguous.rs b/vendor/aho-corasick/src/nfa/contiguous.rs new file mode 100644 index 00000000000000..6ea2a47f849ee2 --- /dev/null +++ b/vendor/aho-corasick/src/nfa/contiguous.rs @@ -0,0 +1,1141 @@ +/*! +Provides a contiguous NFA implementation of Aho-Corasick. + +This is a low-level API that generally only needs to be used in niche +circumstances. When possible, prefer using [`AhoCorasick`](crate::AhoCorasick) +instead of a contiguous NFA directly. Using an `NFA` directly is typically only +necessary when one needs access to the [`Automaton`] trait implementation. +*/ + +use alloc::{vec, vec::Vec}; + +use crate::{ + automaton::Automaton, + nfa::noncontiguous, + util::{ + alphabet::ByteClasses, + error::{BuildError, MatchError}, + int::{Usize, U16, U32}, + prefilter::Prefilter, + primitives::{IteratorIndexExt, PatternID, SmallIndex, StateID}, + search::{Anchored, MatchKind}, + special::Special, + }, +}; + +/// A contiguous NFA implementation of Aho-Corasick. +/// +/// When possible, prefer using [`AhoCorasick`](crate::AhoCorasick) instead of +/// this type directly. Using an `NFA` directly is typically only necessary +/// when one needs access to the [`Automaton`] trait implementation. +/// +/// This NFA can only be built by first constructing a [`noncontiguous::NFA`]. +/// Both [`NFA::new`] and [`Builder::build`] do this for you automatically, but +/// [`Builder::build_from_noncontiguous`] permits doing it explicitly. +/// +/// The main difference between a noncontiguous NFA and a contiguous NFA is +/// that the latter represents all of its states and transitions in a single +/// allocation, where as the former uses a separate allocation for each state. +/// Doing this at construction time while keeping a low memory footprint isn't +/// feasible, which is primarily why there are two different NFA types: one +/// that does the least amount of work possible to build itself, and another +/// that does a little extra work to compact itself and make state transitions +/// faster by making some states use a dense representation. +/// +/// Because a contiguous NFA uses a single allocation, there is a lot more +/// opportunity for compression tricks to reduce the heap memory used. Indeed, +/// it is not uncommon for a contiguous NFA to use an order of magnitude less +/// heap memory than a noncontiguous NFA. Since building a contiguous NFA +/// usually only takes a fraction of the time it takes to build a noncontiguous +/// NFA, the overall build time is not much slower. Thus, in most cases, a +/// contiguous NFA is the best choice. +/// +/// Since a contiguous NFA uses various tricks for compression and to achieve +/// faster state transitions, currently, its limit on the number of states +/// is somewhat smaller than what a noncontiguous NFA can achieve. Generally +/// speaking, you shouldn't expect to run into this limit if the number of +/// patterns is under 1 million. It is plausible that this limit will be +/// increased in the future. If the limit is reached, building a contiguous NFA +/// will return an error. Often, since building a contiguous NFA is relatively +/// cheap, it can make sense to always try it even if you aren't sure if it +/// will fail or not. If it does, you can always fall back to a noncontiguous +/// NFA. (Indeed, the main [`AhoCorasick`](crate::AhoCorasick) type employs a +/// strategy similar to this at construction time.) +/// +/// # Example +/// +/// This example shows how to build an `NFA` directly and use it to execute +/// [`Automaton::try_find`]: +/// +/// ``` +/// use aho_corasick::{ +/// automaton::Automaton, +/// nfa::contiguous::NFA, +/// Input, Match, +/// }; +/// +/// let patterns = &["b", "abc", "abcd"]; +/// let haystack = "abcd"; +/// +/// let nfa = NFA::new(patterns).unwrap(); +/// assert_eq!( +/// Some(Match::must(0, 1..2)), +/// nfa.try_find(&Input::new(haystack))?, +/// ); +/// # Ok::<(), Box>(()) +/// ``` +/// +/// It is also possible to implement your own version of `try_find`. See the +/// [`Automaton`] documentation for an example. +#[derive(Clone)] +pub struct NFA { + /// The raw NFA representation. Each state is packed with a header + /// (containing the format of the state, the failure transition and, for + /// a sparse state, the number of transitions), its transitions and any + /// matching pattern IDs for match states. + repr: Vec, + /// The length of each pattern. This is used to compute the start offset + /// of a match. + pattern_lens: Vec, + /// The total number of states in this NFA. + state_len: usize, + /// A prefilter for accelerating searches, if one exists. + prefilter: Option, + /// The match semantics built into this NFA. + match_kind: MatchKind, + /// The alphabet size, or total number of equivalence classes, for this + /// NFA. Dense states always have this many transitions. + alphabet_len: usize, + /// The equivalence classes for this NFA. All transitions, dense and + /// sparse, are defined on equivalence classes and not on the 256 distinct + /// byte values. + byte_classes: ByteClasses, + /// The length of the shortest pattern in this automaton. + min_pattern_len: usize, + /// The length of the longest pattern in this automaton. + max_pattern_len: usize, + /// The information required to deduce which states are "special" in this + /// NFA. + special: Special, +} + +impl NFA { + /// Create a new Aho-Corasick contiguous NFA using the default + /// configuration. + /// + /// Use a [`Builder`] if you want to change the configuration. + pub fn new(patterns: I) -> Result + where + I: IntoIterator, + P: AsRef<[u8]>, + { + NFA::builder().build(patterns) + } + + /// A convenience method for returning a new Aho-Corasick contiguous NFA + /// builder. + /// + /// This usually permits one to just import the `NFA` type. + pub fn builder() -> Builder { + Builder::new() + } +} + +impl NFA { + /// A sentinel state ID indicating that a search should stop once it has + /// entered this state. When a search stops, it returns a match if one + /// has been found, otherwise no match. A contiguous NFA always has an + /// actual dead state at this ID. + const DEAD: StateID = StateID::new_unchecked(0); + /// Another sentinel state ID indicating that a search should move through + /// current state's failure transition. + /// + /// Note that unlike DEAD, this does not actually point to a valid state + /// in a contiguous NFA. (noncontiguous::NFA::FAIL does point to a valid + /// state.) Instead, this points to the position that is guaranteed to + /// never be a valid state ID (by making sure it points to a place in the + /// middle of the encoding of the DEAD state). Since we never need to + /// actually look at the FAIL state itself, this works out. + /// + /// By why do it this way? So that FAIL is a constant. I don't have any + /// concrete evidence that this materially helps matters, but it's easy to + /// do. The alternative would be making the FAIL ID point to the second + /// state, which could be made a constant but is a little trickier to do. + /// The easiest path is to just make the FAIL state a runtime value, but + /// since comparisons with FAIL occur in perf critical parts of the search, + /// we want it to be as tight as possible and not waste any registers. + /// + /// Very hand wavy... But the code complexity that results from this is + /// very mild. + const FAIL: StateID = StateID::new_unchecked(1); +} + +// SAFETY: 'start_state' always returns a valid state ID, 'next_state' always +// returns a valid state ID given a valid state ID. We otherwise claim that +// all other methods are correct as well. +unsafe impl Automaton for NFA { + #[inline(always)] + fn start_state(&self, anchored: Anchored) -> Result { + match anchored { + Anchored::No => Ok(self.special.start_unanchored_id), + Anchored::Yes => Ok(self.special.start_anchored_id), + } + } + + #[inline(always)] + fn next_state( + &self, + anchored: Anchored, + mut sid: StateID, + byte: u8, + ) -> StateID { + let repr = &self.repr; + let class = self.byte_classes.get(byte); + let u32tosid = StateID::from_u32_unchecked; + loop { + let o = sid.as_usize(); + let kind = repr[o] & 0xFF; + // I tried to encapsulate the "next transition" logic into its own + // function, but it seemed to always result in sub-optimal codegen + // that led to real and significant slowdowns. So we just inline + // the logic here. + // + // I've also tried a lot of different ways to speed up this + // routine, and most of them have failed. + if kind == State::KIND_DENSE { + let next = u32tosid(repr[o + 2 + usize::from(class)]); + if next != NFA::FAIL { + return next; + } + } else if kind == State::KIND_ONE { + if class == repr[o].low_u16().high_u8() { + return u32tosid(repr[o + 2]); + } + } else { + // NOTE: I tried a SWAR technique in the loop below, but found + // it slower. See the 'swar' test in the tests for this module. + let trans_len = kind.as_usize(); + let classes_len = u32_len(trans_len); + let trans_offset = o + 2 + classes_len; + for (i, &chunk) in + repr[o + 2..][..classes_len].iter().enumerate() + { + let classes = chunk.to_ne_bytes(); + if classes[0] == class { + return u32tosid(repr[trans_offset + i * 4]); + } + if classes[1] == class { + return u32tosid(repr[trans_offset + i * 4 + 1]); + } + if classes[2] == class { + return u32tosid(repr[trans_offset + i * 4 + 2]); + } + if classes[3] == class { + return u32tosid(repr[trans_offset + i * 4 + 3]); + } + } + } + // For an anchored search, we never follow failure transitions + // because failure transitions lead us down a path to matching + // a *proper* suffix of the path we were on. Thus, it can only + // produce matches that appear after the beginning of the search. + if anchored.is_anchored() { + return NFA::DEAD; + } + sid = u32tosid(repr[o + 1]); + } + } + + #[inline(always)] + fn is_special(&self, sid: StateID) -> bool { + sid <= self.special.max_special_id + } + + #[inline(always)] + fn is_dead(&self, sid: StateID) -> bool { + sid == NFA::DEAD + } + + #[inline(always)] + fn is_match(&self, sid: StateID) -> bool { + !self.is_dead(sid) && sid <= self.special.max_match_id + } + + #[inline(always)] + fn is_start(&self, sid: StateID) -> bool { + sid == self.special.start_unanchored_id + || sid == self.special.start_anchored_id + } + + #[inline(always)] + fn match_kind(&self) -> MatchKind { + self.match_kind + } + + #[inline(always)] + fn patterns_len(&self) -> usize { + self.pattern_lens.len() + } + + #[inline(always)] + fn pattern_len(&self, pid: PatternID) -> usize { + self.pattern_lens[pid].as_usize() + } + + #[inline(always)] + fn min_pattern_len(&self) -> usize { + self.min_pattern_len + } + + #[inline(always)] + fn max_pattern_len(&self) -> usize { + self.max_pattern_len + } + + #[inline(always)] + fn match_len(&self, sid: StateID) -> usize { + State::match_len(self.alphabet_len, &self.repr[sid.as_usize()..]) + } + + #[inline(always)] + fn match_pattern(&self, sid: StateID, index: usize) -> PatternID { + State::match_pattern( + self.alphabet_len, + &self.repr[sid.as_usize()..], + index, + ) + } + + #[inline(always)] + fn memory_usage(&self) -> usize { + use core::mem::size_of; + + (self.repr.len() * size_of::()) + + (self.pattern_lens.len() * size_of::()) + + self.prefilter.as_ref().map_or(0, |p| p.memory_usage()) + } + + #[inline(always)] + fn prefilter(&self) -> Option<&Prefilter> { + self.prefilter.as_ref() + } +} + +impl core::fmt::Debug for NFA { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + use crate::automaton::fmt_state_indicator; + + writeln!(f, "contiguous::NFA(")?; + let mut sid = NFA::DEAD; // always the first state and always present + loop { + let raw = &self.repr[sid.as_usize()..]; + if raw.is_empty() { + break; + } + let is_match = self.is_match(sid); + let state = State::read(self.alphabet_len, is_match, raw); + fmt_state_indicator(f, self, sid)?; + write!( + f, + "{:06}({:06}): ", + sid.as_usize(), + state.fail.as_usize() + )?; + state.fmt(f)?; + write!(f, "\n")?; + if self.is_match(sid) { + write!(f, " matches: ")?; + for i in 0..state.match_len { + let pid = State::match_pattern(self.alphabet_len, raw, i); + if i > 0 { + write!(f, ", ")?; + } + write!(f, "{}", pid.as_usize())?; + } + write!(f, "\n")?; + } + // The FAIL state doesn't actually have space for a state allocated + // for it, so we have to treat it as a special case. write below + // the DEAD state. + if sid == NFA::DEAD { + writeln!(f, "F {:06}:", NFA::FAIL.as_usize())?; + } + let len = State::len(self.alphabet_len, is_match, raw); + sid = StateID::new(sid.as_usize().checked_add(len).unwrap()) + .unwrap(); + } + writeln!(f, "match kind: {:?}", self.match_kind)?; + writeln!(f, "prefilter: {:?}", self.prefilter.is_some())?; + writeln!(f, "state length: {:?}", self.state_len)?; + writeln!(f, "pattern length: {:?}", self.patterns_len())?; + writeln!(f, "shortest pattern length: {:?}", self.min_pattern_len)?; + writeln!(f, "longest pattern length: {:?}", self.max_pattern_len)?; + writeln!(f, "alphabet length: {:?}", self.alphabet_len)?; + writeln!(f, "byte classes: {:?}", self.byte_classes)?; + writeln!(f, "memory usage: {:?}", self.memory_usage())?; + writeln!(f, ")")?; + + Ok(()) + } +} + +/// The "in memory" representation a single dense or sparse state. +/// +/// A `State`'s in memory representation is not ever actually materialized +/// during a search with a contiguous NFA. Doing so would be too slow. (Indeed, +/// the only time a `State` is actually constructed is in `Debug` impls.) +/// Instead, a `State` exposes a number of static methods for reading certain +/// things from the raw binary encoding of the state. +#[derive(Clone)] +struct State<'a> { + /// The state to transition to when 'class_to_next' yields a transition + /// to the FAIL state. + fail: StateID, + /// The number of pattern IDs in this state. For a non-match state, this is + /// always zero. Otherwise it is always bigger than zero. + match_len: usize, + /// The sparse or dense representation of the transitions for this state. + trans: StateTrans<'a>, +} + +/// The underlying representation of sparse or dense transitions for a state. +/// +/// Note that like `State`, we don't typically construct values of this type +/// during a search since we don't always need all values and thus would +/// represent a lot of wasteful work. +#[derive(Clone)] +enum StateTrans<'a> { + /// A sparse representation of transitions for a state, where only non-FAIL + /// transitions are explicitly represented. + Sparse { + classes: &'a [u32], + /// The transitions for this state, where each transition is packed + /// into a u32. The low 8 bits correspond to the byte class for the + /// transition, and the high 24 bits correspond to the next state ID. + /// + /// This packing is why the max state ID allowed for a contiguous + /// NFA is 2^24-1. + nexts: &'a [u32], + }, + /// A "one transition" state that is never a match state. + /// + /// These are by far the most common state, so we use a specialized and + /// very compact representation for them. + One { + /// The element of this NFA's alphabet that this transition is + /// defined for. + class: u8, + /// The state this should transition to if the current symbol is + /// equal to 'class'. + next: u32, + }, + /// A dense representation of transitions for a state, where all + /// transitions are explicitly represented, including transitions to the + /// FAIL state. + Dense { + /// A dense set of transitions to other states. The transitions may + /// point to a FAIL state, in which case, the search should try the + /// same transition lookup at 'fail'. + /// + /// Note that this is indexed by byte equivalence classes and not + /// byte values. That means 'class_to_next[byte]' is wrong and + /// 'class_to_next[classes.get(byte)]' is correct. The number of + /// transitions is always equivalent to 'classes.alphabet_len()'. + class_to_next: &'a [u32], + }, +} + +impl<'a> State<'a> { + /// The offset of where the "kind" of a state is stored. If it isn't one + /// of the sentinel values below, then it's a sparse state and the kind + /// corresponds to the number of transitions in the state. + const KIND: usize = 0; + + /// A sentinel value indicating that the state uses a dense representation. + const KIND_DENSE: u32 = 0xFF; + /// A sentinel value indicating that the state uses a special "one + /// transition" encoding. In practice, non-match states with one transition + /// make up the overwhelming majority of all states in any given + /// Aho-Corasick automaton, so we can specialize them using a very compact + /// representation. + const KIND_ONE: u32 = 0xFE; + + /// The maximum number of transitions to encode as a sparse state. Usually + /// states with a lot of transitions are either very rare, or occur near + /// the start state. In the latter case, they are probably dense already + /// anyway. In the former case, making them dense is fine because they're + /// rare. + /// + /// This needs to be small enough to permit each of the sentinel values for + /// 'KIND' above. Namely, a sparse state embeds the number of transitions + /// into the 'KIND'. Basically, "sparse" is a state kind too, but it's the + /// "else" branch. + /// + /// N.B. There isn't anything particularly magical about 127 here. I + /// just picked it because I figured any sparse state with this many + /// transitions is going to be exceptionally rare, and if it did have this + /// many transitions, then it would be quite slow to do a linear scan on + /// the transitions during a search anyway. + const MAX_SPARSE_TRANSITIONS: usize = 127; + + /// Remap state IDs in-place. + /// + /// `state` should be the the raw binary encoding of a state. (The start + /// of the slice must correspond to the start of the state, but the slice + /// may extend past the end of the encoding of the state.) + fn remap( + alphabet_len: usize, + old_to_new: &[StateID], + state: &mut [u32], + ) -> Result<(), BuildError> { + let kind = State::kind(state); + if kind == State::KIND_DENSE { + state[1] = old_to_new[state[1].as_usize()].as_u32(); + for next in state[2..][..alphabet_len].iter_mut() { + *next = old_to_new[next.as_usize()].as_u32(); + } + } else if kind == State::KIND_ONE { + state[1] = old_to_new[state[1].as_usize()].as_u32(); + state[2] = old_to_new[state[2].as_usize()].as_u32(); + } else { + let trans_len = State::sparse_trans_len(state); + let classes_len = u32_len(trans_len); + state[1] = old_to_new[state[1].as_usize()].as_u32(); + for next in state[2 + classes_len..][..trans_len].iter_mut() { + *next = old_to_new[next.as_usize()].as_u32(); + } + } + Ok(()) + } + + /// Returns the length, in number of u32s, of this state. + /// + /// This is useful for reading states consecutively, e.g., in the Debug + /// impl without needing to store a separate map from state index to state + /// identifier. + /// + /// `state` should be the the raw binary encoding of a state. (The start + /// of the slice must correspond to the start of the state, but the slice + /// may extend past the end of the encoding of the state.) + fn len(alphabet_len: usize, is_match: bool, state: &[u32]) -> usize { + let kind_len = 1; + let fail_len = 1; + let kind = State::kind(state); + let (classes_len, trans_len) = if kind == State::KIND_DENSE { + (0, alphabet_len) + } else if kind == State::KIND_ONE { + (0, 1) + } else { + let trans_len = State::sparse_trans_len(state); + let classes_len = u32_len(trans_len); + (classes_len, trans_len) + }; + let match_len = if !is_match { + 0 + } else if State::match_len(alphabet_len, state) == 1 { + // This is a special case because when there is one pattern ID for + // a match state, it is represented by a single u32 with its high + // bit set (which is impossible for a valid pattern ID). + 1 + } else { + // We add 1 to include the u32 that indicates the number of + // pattern IDs that follow. + 1 + State::match_len(alphabet_len, state) + }; + kind_len + fail_len + classes_len + trans_len + match_len + } + + /// Returns the kind of this state. + /// + /// This only includes the low byte. + #[inline(always)] + fn kind(state: &[u32]) -> u32 { + state[State::KIND] & 0xFF + } + + /// Get the number of sparse transitions in this state. This can never + /// be more than State::MAX_SPARSE_TRANSITIONS, as all states with more + /// transitions are encoded as dense states. + /// + /// `state` should be the the raw binary encoding of a sparse state. (The + /// start of the slice must correspond to the start of the state, but the + /// slice may extend past the end of the encoding of the state.) If this + /// isn't a sparse state, then the return value is unspecified. + /// + /// Do note that this is only legal to call on a sparse state. So for + /// example, "one transition" state is not a sparse state, so it would not + /// be legal to call this method on such a state. + #[inline(always)] + fn sparse_trans_len(state: &[u32]) -> usize { + (state[State::KIND] & 0xFF).as_usize() + } + + /// Returns the total number of matching pattern IDs in this state. Calling + /// this on a state that isn't a match results in unspecified behavior. + /// Thus, the returned number is never 0 for all correct calls. + /// + /// `state` should be the the raw binary encoding of a state. (The start + /// of the slice must correspond to the start of the state, but the slice + /// may extend past the end of the encoding of the state.) + #[inline(always)] + fn match_len(alphabet_len: usize, state: &[u32]) -> usize { + // We don't need to handle KIND_ONE here because it can never be a + // match state. + let packed = if State::kind(state) == State::KIND_DENSE { + let start = 2 + alphabet_len; + state[start].as_usize() + } else { + let trans_len = State::sparse_trans_len(state); + let classes_len = u32_len(trans_len); + let start = 2 + classes_len + trans_len; + state[start].as_usize() + }; + if packed & (1 << 31) == 0 { + packed + } else { + 1 + } + } + + /// Returns the pattern ID corresponding to the given index for the state + /// given. The `index` provided must be less than the number of pattern IDs + /// in this state. + /// + /// `state` should be the the raw binary encoding of a state. (The start of + /// the slice must correspond to the start of the state, but the slice may + /// extend past the end of the encoding of the state.) + /// + /// If the given state is not a match state or if the index is out of + /// bounds, then this has unspecified behavior. + #[inline(always)] + fn match_pattern( + alphabet_len: usize, + state: &[u32], + index: usize, + ) -> PatternID { + // We don't need to handle KIND_ONE here because it can never be a + // match state. + let start = if State::kind(state) == State::KIND_DENSE { + 2 + alphabet_len + } else { + let trans_len = State::sparse_trans_len(state); + let classes_len = u32_len(trans_len); + 2 + classes_len + trans_len + }; + let packed = state[start]; + let pid = if packed & (1 << 31) == 0 { + state[start + 1 + index] + } else { + assert_eq!(0, index); + packed & !(1 << 31) + }; + PatternID::from_u32_unchecked(pid) + } + + /// Read a state's binary encoding to its in-memory representation. + /// + /// `alphabet_len` should be the total number of transitions defined for + /// dense states. + /// + /// `is_match` should be true if this state is a match state and false + /// otherwise. + /// + /// `state` should be the the raw binary encoding of a state. (The start + /// of the slice must correspond to the start of the state, but the slice + /// may extend past the end of the encoding of the state.) + fn read( + alphabet_len: usize, + is_match: bool, + state: &'a [u32], + ) -> State<'a> { + let kind = State::kind(state); + let match_len = + if !is_match { 0 } else { State::match_len(alphabet_len, state) }; + let (trans, fail) = if kind == State::KIND_DENSE { + let fail = StateID::from_u32_unchecked(state[1]); + let class_to_next = &state[2..][..alphabet_len]; + (StateTrans::Dense { class_to_next }, fail) + } else if kind == State::KIND_ONE { + let fail = StateID::from_u32_unchecked(state[1]); + let class = state[State::KIND].low_u16().high_u8(); + let next = state[2]; + (StateTrans::One { class, next }, fail) + } else { + let fail = StateID::from_u32_unchecked(state[1]); + let trans_len = State::sparse_trans_len(state); + let classes_len = u32_len(trans_len); + let classes = &state[2..][..classes_len]; + let nexts = &state[2 + classes_len..][..trans_len]; + (StateTrans::Sparse { classes, nexts }, fail) + }; + State { fail, match_len, trans } + } + + /// Encode the "old" state from a noncontiguous NFA to its binary + /// representation to the given `dst` slice. `classes` should be the byte + /// classes computed for the noncontiguous NFA that the given state came + /// from. + /// + /// This returns an error if `dst` became so big that `StateID`s can no + /// longer be created for new states. Otherwise, it returns the state ID of + /// the new state created. + /// + /// When `force_dense` is true, then the encoded state will always use a + /// dense format. Otherwise, the choice between dense and sparse will be + /// automatically chosen based on the old state. + fn write( + nnfa: &noncontiguous::NFA, + oldsid: StateID, + old: &noncontiguous::State, + classes: &ByteClasses, + dst: &mut Vec, + force_dense: bool, + ) -> Result { + let sid = StateID::new(dst.len()).map_err(|e| { + BuildError::state_id_overflow(StateID::MAX.as_u64(), e.attempted()) + })?; + let old_len = nnfa.iter_trans(oldsid).count(); + // For states with a lot of transitions, we might as well just make + // them dense. These kinds of hot states tend to be very rare, so we're + // okay with it. This also gives us more sentinels in the state's + // 'kind', which lets us create different state kinds to save on + // space. + let kind = if force_dense || old_len > State::MAX_SPARSE_TRANSITIONS { + State::KIND_DENSE + } else if old_len == 1 && !old.is_match() { + State::KIND_ONE + } else { + // For a sparse state, the kind is just the number of transitions. + u32::try_from(old_len).unwrap() + }; + if kind == State::KIND_DENSE { + dst.push(kind); + dst.push(old.fail().as_u32()); + State::write_dense_trans(nnfa, oldsid, classes, dst)?; + } else if kind == State::KIND_ONE { + let t = nnfa.iter_trans(oldsid).next().unwrap(); + let class = u32::from(classes.get(t.byte())); + dst.push(kind | (class << 8)); + dst.push(old.fail().as_u32()); + dst.push(t.next().as_u32()); + } else { + dst.push(kind); + dst.push(old.fail().as_u32()); + State::write_sparse_trans(nnfa, oldsid, classes, dst)?; + } + // Now finally write the number of matches and the matches themselves. + if old.is_match() { + let matches_len = nnfa.iter_matches(oldsid).count(); + if matches_len == 1 { + let pid = nnfa.iter_matches(oldsid).next().unwrap().as_u32(); + assert_eq!(0, pid & (1 << 31)); + dst.push((1 << 31) | pid); + } else { + assert_eq!(0, matches_len & (1 << 31)); + dst.push(matches_len.as_u32()); + dst.extend(nnfa.iter_matches(oldsid).map(|pid| pid.as_u32())); + } + } + Ok(sid) + } + + /// Encode the "old" state transitions from a noncontiguous NFA to its + /// binary sparse representation to the given `dst` slice. `classes` should + /// be the byte classes computed for the noncontiguous NFA that the given + /// state came from. + /// + /// This returns an error if `dst` became so big that `StateID`s can no + /// longer be created for new states. + fn write_sparse_trans( + nnfa: &noncontiguous::NFA, + oldsid: StateID, + classes: &ByteClasses, + dst: &mut Vec, + ) -> Result<(), BuildError> { + let (mut chunk, mut len) = ([0; 4], 0); + for t in nnfa.iter_trans(oldsid) { + chunk[len] = classes.get(t.byte()); + len += 1; + if len == 4 { + dst.push(u32::from_ne_bytes(chunk)); + chunk = [0; 4]; + len = 0; + } + } + if len > 0 { + // In the case where the number of transitions isn't divisible + // by 4, the last u32 chunk will have some left over room. In + // this case, we "just" repeat the last equivalence class. By + // doing this, we know the leftover faux transitions will never + // be followed because if they were, it would have been followed + // prior to it in the last equivalence class. This saves us some + // branching in the search time state transition code. + let repeat = chunk[len - 1]; + while len < 4 { + chunk[len] = repeat; + len += 1; + } + dst.push(u32::from_ne_bytes(chunk)); + } + for t in nnfa.iter_trans(oldsid) { + dst.push(t.next().as_u32()); + } + Ok(()) + } + + /// Encode the "old" state transitions from a noncontiguous NFA to its + /// binary dense representation to the given `dst` slice. `classes` should + /// be the byte classes computed for the noncontiguous NFA that the given + /// state came from. + /// + /// This returns an error if `dst` became so big that `StateID`s can no + /// longer be created for new states. + fn write_dense_trans( + nnfa: &noncontiguous::NFA, + oldsid: StateID, + classes: &ByteClasses, + dst: &mut Vec, + ) -> Result<(), BuildError> { + // Our byte classes let us shrink the size of our dense states to the + // number of equivalence classes instead of just fixing it to 256. + // Any non-explicitly defined transition is just a transition to the + // FAIL state, so we fill that in first and then overwrite them with + // explicitly defined transitions. (Most states probably only have one + // or two explicitly defined transitions.) + // + // N.B. Remember that while building the contiguous NFA, we use state + // IDs from the noncontiguous NFA. It isn't until we've added all + // states that we go back and map noncontiguous IDs to contiguous IDs. + let start = dst.len(); + dst.extend( + core::iter::repeat(noncontiguous::NFA::FAIL.as_u32()) + .take(classes.alphabet_len()), + ); + assert!(start < dst.len(), "equivalence classes are never empty"); + for t in nnfa.iter_trans(oldsid) { + dst[start + usize::from(classes.get(t.byte()))] = + t.next().as_u32(); + } + Ok(()) + } + + /// Return an iterator over every explicitly defined transition in this + /// state. + fn transitions(&self) -> impl Iterator + '_ { + let mut i = 0; + core::iter::from_fn(move || match self.trans { + StateTrans::Sparse { classes, nexts } => { + if i >= nexts.len() { + return None; + } + let chunk = classes[i / 4]; + let class = chunk.to_ne_bytes()[i % 4]; + let next = StateID::from_u32_unchecked(nexts[i]); + i += 1; + Some((class, next)) + } + StateTrans::One { class, next } => { + if i == 0 { + i += 1; + Some((class, StateID::from_u32_unchecked(next))) + } else { + None + } + } + StateTrans::Dense { class_to_next } => { + if i >= class_to_next.len() { + return None; + } + let class = i.as_u8(); + let next = StateID::from_u32_unchecked(class_to_next[i]); + i += 1; + Some((class, next)) + } + }) + } +} + +impl<'a> core::fmt::Debug for State<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + use crate::{automaton::sparse_transitions, util::debug::DebugByte}; + + let it = sparse_transitions(self.transitions()) + // Writing out all FAIL transitions is quite noisy. Instead, we + // just require readers of the output to assume anything absent + // maps to the FAIL transition. + .filter(|&(_, _, sid)| sid != NFA::FAIL) + .enumerate(); + for (i, (start, end, sid)) in it { + if i > 0 { + write!(f, ", ")?; + } + if start == end { + write!(f, "{:?} => {:?}", DebugByte(start), sid.as_usize())?; + } else { + write!( + f, + "{:?}-{:?} => {:?}", + DebugByte(start), + DebugByte(end), + sid.as_usize() + )?; + } + } + Ok(()) + } +} + +/// A builder for configuring an Aho-Corasick contiguous NFA. +/// +/// This builder has a subset of the options available to a +/// [`AhoCorasickBuilder`](crate::AhoCorasickBuilder). Of the shared options, +/// their behavior is identical. +#[derive(Clone, Debug)] +pub struct Builder { + noncontiguous: noncontiguous::Builder, + dense_depth: usize, + byte_classes: bool, +} + +impl Default for Builder { + fn default() -> Builder { + Builder { + noncontiguous: noncontiguous::Builder::new(), + dense_depth: 2, + byte_classes: true, + } + } +} + +impl Builder { + /// Create a new builder for configuring an Aho-Corasick contiguous NFA. + pub fn new() -> Builder { + Builder::default() + } + + /// Build an Aho-Corasick contiguous NFA from the given iterator of + /// patterns. + /// + /// A builder may be reused to create more NFAs. + pub fn build(&self, patterns: I) -> Result + where + I: IntoIterator, + P: AsRef<[u8]>, + { + let nnfa = self.noncontiguous.build(patterns)?; + self.build_from_noncontiguous(&nnfa) + } + + /// Build an Aho-Corasick contiguous NFA from the given noncontiguous NFA. + /// + /// Note that when this method is used, only the `dense_depth` and + /// `byte_classes` settings on this builder are respected. The other + /// settings only apply to the initial construction of the Aho-Corasick + /// automaton. Since using this method requires that initial construction + /// has already completed, all settings impacting only initial construction + /// are no longer relevant. + pub fn build_from_noncontiguous( + &self, + nnfa: &noncontiguous::NFA, + ) -> Result { + debug!("building contiguous NFA"); + let byte_classes = if self.byte_classes { + nnfa.byte_classes().clone() + } else { + ByteClasses::singletons() + }; + let mut index_to_state_id = vec![NFA::DEAD; nnfa.states().len()]; + let mut nfa = NFA { + repr: vec![], + pattern_lens: nnfa.pattern_lens_raw().to_vec(), + state_len: nnfa.states().len(), + prefilter: nnfa.prefilter().map(|p| p.clone()), + match_kind: nnfa.match_kind(), + alphabet_len: byte_classes.alphabet_len(), + byte_classes, + min_pattern_len: nnfa.min_pattern_len(), + max_pattern_len: nnfa.max_pattern_len(), + // The special state IDs are set later. + special: Special::zero(), + }; + for (oldsid, state) in nnfa.states().iter().with_state_ids() { + // We don't actually encode a fail state since it isn't necessary. + // But we still want to make sure any FAIL ids are mapped + // correctly. + if oldsid == noncontiguous::NFA::FAIL { + index_to_state_id[oldsid] = NFA::FAIL; + continue; + } + let force_dense = state.depth().as_usize() < self.dense_depth; + let newsid = State::write( + nnfa, + oldsid, + state, + &nfa.byte_classes, + &mut nfa.repr, + force_dense, + )?; + index_to_state_id[oldsid] = newsid; + } + for &newsid in index_to_state_id.iter() { + if newsid == NFA::FAIL { + continue; + } + let state = &mut nfa.repr[newsid.as_usize()..]; + State::remap(nfa.alphabet_len, &index_to_state_id, state)?; + } + // Now that we've remapped all the IDs in our states, all that's left + // is remapping the special state IDs. + let remap = &index_to_state_id; + let old = nnfa.special(); + let new = &mut nfa.special; + new.max_special_id = remap[old.max_special_id]; + new.max_match_id = remap[old.max_match_id]; + new.start_unanchored_id = remap[old.start_unanchored_id]; + new.start_anchored_id = remap[old.start_anchored_id]; + debug!( + "contiguous NFA built, ", + nfa.state_len, + nfa.memory_usage(), + nfa.byte_classes.alphabet_len(), + ); + // The vectors can grow ~twice as big during construction because a + // Vec amortizes growth. But here, let's shrink things back down to + // what we actually need since we're never going to add more to it. + nfa.repr.shrink_to_fit(); + nfa.pattern_lens.shrink_to_fit(); + Ok(nfa) + } + + /// Set the desired match semantics. + /// + /// This only applies when using [`Builder::build`] and not + /// [`Builder::build_from_noncontiguous`]. + /// + /// See + /// [`AhoCorasickBuilder::match_kind`](crate::AhoCorasickBuilder::match_kind) + /// for more documentation and examples. + pub fn match_kind(&mut self, kind: MatchKind) -> &mut Builder { + self.noncontiguous.match_kind(kind); + self + } + + /// Enable ASCII-aware case insensitive matching. + /// + /// This only applies when using [`Builder::build`] and not + /// [`Builder::build_from_noncontiguous`]. + /// + /// See + /// [`AhoCorasickBuilder::ascii_case_insensitive`](crate::AhoCorasickBuilder::ascii_case_insensitive) + /// for more documentation and examples. + pub fn ascii_case_insensitive(&mut self, yes: bool) -> &mut Builder { + self.noncontiguous.ascii_case_insensitive(yes); + self + } + + /// Enable heuristic prefilter optimizations. + /// + /// This only applies when using [`Builder::build`] and not + /// [`Builder::build_from_noncontiguous`]. + /// + /// See + /// [`AhoCorasickBuilder::prefilter`](crate::AhoCorasickBuilder::prefilter) + /// for more documentation and examples. + pub fn prefilter(&mut self, yes: bool) -> &mut Builder { + self.noncontiguous.prefilter(yes); + self + } + + /// Set the limit on how many states use a dense representation for their + /// transitions. Other states will generally use a sparse representation. + /// + /// See + /// [`AhoCorasickBuilder::dense_depth`](crate::AhoCorasickBuilder::dense_depth) + /// for more documentation and examples. + pub fn dense_depth(&mut self, depth: usize) -> &mut Builder { + self.dense_depth = depth; + self + } + + /// A debug setting for whether to attempt to shrink the size of the + /// automaton's alphabet or not. + /// + /// This should never be enabled unless you're debugging an automaton. + /// Namely, disabling byte classes makes transitions easier to reason + /// about, since they use the actual bytes instead of equivalence classes. + /// Disabling this confers no performance benefit at search time. + /// + /// See + /// [`AhoCorasickBuilder::byte_classes`](crate::AhoCorasickBuilder::byte_classes) + /// for more documentation and examples. + pub fn byte_classes(&mut self, yes: bool) -> &mut Builder { + self.byte_classes = yes; + self + } +} + +/// Computes the number of u32 values needed to represent one byte per the +/// number of transitions given. +fn u32_len(ntrans: usize) -> usize { + if ntrans % 4 == 0 { + ntrans >> 2 + } else { + (ntrans >> 2) + 1 + } +} + +#[cfg(test)] +mod tests { + // This test demonstrates a SWAR technique I tried in the sparse transition + // code inside of 'next_state'. Namely, sparse transitions work by + // iterating over u32 chunks, with each chunk containing up to 4 classes + // corresponding to 4 transitions. This SWAR technique lets us find a + // matching transition without converting the u32 to a [u8; 4]. + // + // It turned out to be a little slower unfortunately, which isn't too + // surprising, since this is likely a throughput oriented optimization. + // Loop unrolling doesn't really help us because the vast majority of + // states have very few transitions. + // + // Anyway, this code was a little tricky to write, so I converted it to a + // test in case someone figures out how to use it more effectively than + // I could. + // + // (This also only works on little endian. So big endian would need to be + // accounted for if we ever decided to use this I think.) + #[cfg(target_endian = "little")] + #[test] + fn swar() { + use super::*; + + fn has_zero_byte(x: u32) -> u32 { + const LO_U32: u32 = 0x01010101; + const HI_U32: u32 = 0x80808080; + + x.wrapping_sub(LO_U32) & !x & HI_U32 + } + + fn broadcast(b: u8) -> u32 { + (u32::from(b)) * (u32::MAX / 255) + } + + fn index_of(x: u32) -> usize { + let o = + (((x - 1) & 0x01010101).wrapping_mul(0x01010101) >> 24) - 1; + o.as_usize() + } + + let bytes: [u8; 4] = [b'1', b'A', b'a', b'z']; + let chunk = u32::from_ne_bytes(bytes); + + let needle = broadcast(b'1'); + assert_eq!(0, index_of(has_zero_byte(needle ^ chunk))); + let needle = broadcast(b'A'); + assert_eq!(1, index_of(has_zero_byte(needle ^ chunk))); + let needle = broadcast(b'a'); + assert_eq!(2, index_of(has_zero_byte(needle ^ chunk))); + let needle = broadcast(b'z'); + assert_eq!(3, index_of(has_zero_byte(needle ^ chunk))); + } +} diff --git a/vendor/aho-corasick/src/nfa/mod.rs b/vendor/aho-corasick/src/nfa/mod.rs new file mode 100644 index 00000000000000..93f4dc25c21f6c --- /dev/null +++ b/vendor/aho-corasick/src/nfa/mod.rs @@ -0,0 +1,40 @@ +/*! +Provides direct access to NFA implementations of Aho-Corasick. + +The principle characteristic of an NFA in this crate is that it may +transition through multiple states per byte of haystack. In Aho-Corasick +parlance, NFAs follow failure transitions during a search. In contrast, +a [`DFA`](crate::dfa::DFA) pre-computes all failure transitions during +compilation at the expense of a much bigger memory footprint. + +Currently, there are two NFA implementations provided: noncontiguous and +contiguous. The names reflect their internal representation, and consequently, +the trade offs associated with them: + +* A [`noncontiguous::NFA`] uses a separate allocation for every NFA state to +represent its transitions in a sparse format. This is ideal for building an +NFA, since it cheaply permits different states to have a different number of +transitions. A noncontiguous NFA is where the main Aho-Corasick construction +algorithm is implemented. All other Aho-Corasick implementations are built by +first constructing a noncontiguous NFA. +* A [`contiguous::NFA`] is uses a single allocation to represent all states, +while still encoding most states as sparse states but permitting states near +the starting state to have a dense representation. The dense representation +uses more memory, but permits computing transitions during a search more +quickly. By only making the most active states dense (the states near the +starting state), a contiguous NFA better balances memory usage with search +speed. The single contiguous allocation also uses less overhead per state and +enables compression tricks where most states only use 8 bytes of heap memory. + +When given the choice between these two, you almost always want to pick a +contiguous NFA. It takes only a little longer to build, but both its memory +usage and search speed are typically much better than a noncontiguous NFA. A +noncontiguous NFA is useful when prioritizing build times, or when there are +so many patterns that a contiguous NFA could not be built. (Currently, because +of both memory and search speed improvements, a contiguous NFA has a smaller +internal limit on the total number of NFA states it can represent. But you +would likely need to have hundreds of thousands or even millions of patterns +before you hit this limit.) +*/ +pub mod contiguous; +pub mod noncontiguous; diff --git a/vendor/aho-corasick/src/nfa/noncontiguous.rs b/vendor/aho-corasick/src/nfa/noncontiguous.rs new file mode 100644 index 00000000000000..af32617c900745 --- /dev/null +++ b/vendor/aho-corasick/src/nfa/noncontiguous.rs @@ -0,0 +1,1762 @@ +/*! +Provides a noncontiguous NFA implementation of Aho-Corasick. + +This is a low-level API that generally only needs to be used in niche +circumstances. When possible, prefer using [`AhoCorasick`](crate::AhoCorasick) +instead of a noncontiguous NFA directly. Using an `NFA` directly is typically +only necessary when one needs access to the [`Automaton`] trait implementation. +*/ + +use alloc::{ + collections::{BTreeSet, VecDeque}, + vec, + vec::Vec, +}; + +use crate::{ + automaton::Automaton, + util::{ + alphabet::{ByteClassSet, ByteClasses}, + error::{BuildError, MatchError}, + prefilter::{self, opposite_ascii_case, Prefilter}, + primitives::{IteratorIndexExt, PatternID, SmallIndex, StateID}, + remapper::Remapper, + search::{Anchored, MatchKind}, + special::Special, + }, +}; + +/// A noncontiguous NFA implementation of Aho-Corasick. +/// +/// When possible, prefer using [`AhoCorasick`](crate::AhoCorasick) instead of +/// this type directly. Using an `NFA` directly is typically only necessary +/// when one needs access to the [`Automaton`] trait implementation. +/// +/// This NFA represents the "core" implementation of Aho-Corasick in this +/// crate. Namely, constructing this NFA involving building a trie and then +/// filling in the failure transitions between states, similar to what is +/// described in any standard textbook description of Aho-Corasick. +/// +/// In order to minimize heap usage and to avoid additional construction costs, +/// this implementation represents the transitions of all states as distinct +/// sparse memory allocations. This is where it gets its name from. That is, +/// this NFA has no contiguous memory allocation for its transition table. Each +/// state gets its own allocation. +/// +/// While the sparse representation keeps memory usage to somewhat reasonable +/// levels, it is still quite large and also results in somewhat mediocre +/// search performance. For this reason, it is almost always a good idea to +/// use a [`contiguous::NFA`](crate::nfa::contiguous::NFA) instead. It is +/// marginally slower to build, but has higher throughput and can sometimes use +/// an order of magnitude less memory. The main reason to use a noncontiguous +/// NFA is when you need the fastest possible construction time, or when a +/// contiguous NFA does not have the desired capacity. (The total number of NFA +/// states it can have is fewer than a noncontiguous NFA.) +/// +/// # Example +/// +/// This example shows how to build an `NFA` directly and use it to execute +/// [`Automaton::try_find`]: +/// +/// ``` +/// use aho_corasick::{ +/// automaton::Automaton, +/// nfa::noncontiguous::NFA, +/// Input, Match, +/// }; +/// +/// let patterns = &["b", "abc", "abcd"]; +/// let haystack = "abcd"; +/// +/// let nfa = NFA::new(patterns).unwrap(); +/// assert_eq!( +/// Some(Match::must(0, 1..2)), +/// nfa.try_find(&Input::new(haystack))?, +/// ); +/// # Ok::<(), Box>(()) +/// ``` +/// +/// It is also possible to implement your own version of `try_find`. See the +/// [`Automaton`] documentation for an example. +#[derive(Clone)] +pub struct NFA { + /// The match semantics built into this NFA. + match_kind: MatchKind, + /// A set of states. Each state defines its own transitions, a fail + /// transition and a set of indices corresponding to matches. + /// + /// The first state is always the fail state, which is used only as a + /// sentinel. Namely, in the final NFA, no transition into the fail state + /// exists. (Well, they do, but they aren't followed. Instead, the state's + /// failure transition is followed.) + /// + /// The second state (index 1) is always the dead state. Dead states are + /// in every automaton, but only used when leftmost-{first,longest} match + /// semantics are enabled. Specifically, they instruct search to stop + /// at specific points in order to report the correct match location. In + /// the standard Aho-Corasick construction, there are no transitions to + /// the dead state. + /// + /// The third state (index 2) is generally intended to be the starting or + /// "root" state. + states: Vec, + /// Transitions stored in a sparse representation via a linked list. + /// + /// Each transition contains three pieces of information: the byte it + /// is defined for, the state it transitions to and a link to the next + /// transition in the same state (or `StateID::ZERO` if it is the last + /// transition). + /// + /// The first transition for each state is determined by `State::sparse`. + /// + /// Note that this contains a complete set of all transitions in this NFA, + /// including states that have a dense representation for transitions. + /// (Adding dense transitions for a state doesn't remove its sparse + /// transitions, since deleting transitions from this particular sparse + /// representation would be fairly expensive.) + sparse: Vec, + /// Transitions stored in a dense representation. + /// + /// A state has a row in this table if and only if `State::dense` is + /// not equal to `StateID::ZERO`. When not zero, there are precisely + /// `NFA::byte_classes::alphabet_len()` entries beginning at `State::dense` + /// in this table. + /// + /// Generally a very small minority of states have a dense representation + /// since it uses so much memory. + dense: Vec, + /// Matches stored in linked list for each state. + /// + /// Like sparse transitions, each match has a link to the next match in the + /// state. + /// + /// The first match for each state is determined by `State::matches`. + matches: Vec, + /// The length, in bytes, of each pattern in this NFA. This slice is + /// indexed by `PatternID`. + /// + /// The number of entries in this vector corresponds to the total number of + /// patterns in this automaton. + pattern_lens: Vec, + /// A prefilter for quickly skipping to candidate matches, if pertinent. + prefilter: Option, + /// A set of equivalence classes in terms of bytes. We compute this while + /// building the NFA, but don't use it in the NFA's states. Instead, we + /// use this for building the DFA. We store it on the NFA since it's easy + /// to compute while visiting the patterns. + byte_classes: ByteClasses, + /// The length, in bytes, of the shortest pattern in this automaton. This + /// information is useful for detecting whether an automaton matches the + /// empty string or not. + min_pattern_len: usize, + /// The length, in bytes, of the longest pattern in this automaton. This + /// information is useful for keeping correct buffer sizes when searching + /// on streams. + max_pattern_len: usize, + /// The information required to deduce which states are "special" in this + /// NFA. + /// + /// Since the DEAD and FAIL states are always the first two states and + /// there are only ever two start states (which follow all of the match + /// states), it follows that we can determine whether a state is a fail, + /// dead, match or start with just a few comparisons on the ID itself: + /// + /// is_dead(sid): sid == NFA::DEAD + /// is_fail(sid): sid == NFA::FAIL + /// is_match(sid): NFA::FAIL < sid && sid <= max_match_id + /// is_start(sid): sid == start_unanchored_id || sid == start_anchored_id + /// + /// Note that this only applies to the NFA after it has been constructed. + /// During construction, the start states are the first ones added and the + /// match states are inter-leaved with non-match states. Once all of the + /// states have been added, the states are shuffled such that the above + /// predicates hold. + special: Special, +} + +impl NFA { + /// Create a new Aho-Corasick noncontiguous NFA using the default + /// configuration. + /// + /// Use a [`Builder`] if you want to change the configuration. + pub fn new(patterns: I) -> Result + where + I: IntoIterator, + P: AsRef<[u8]>, + { + NFA::builder().build(patterns) + } + + /// A convenience method for returning a new Aho-Corasick noncontiguous NFA + /// builder. + /// + /// This usually permits one to just import the `NFA` type. + pub fn builder() -> Builder { + Builder::new() + } +} + +impl NFA { + /// The DEAD state is a sentinel state like the FAIL state. The DEAD state + /// instructs any search to stop and return any currently recorded match, + /// or no match otherwise. Generally speaking, it is impossible for an + /// unanchored standard search to enter a DEAD state. But an anchored + /// search can, and so to can a leftmost search. + /// + /// We put DEAD before FAIL so that DEAD is always 0. We repeat this + /// decision across the other Aho-Corasicm automata, so that DEAD + /// states there are always 0 too. It's not that we need all of the + /// implementations to agree, but rather, the contiguous NFA and the DFA + /// use a sort of "premultiplied" state identifier where the only state + /// whose ID is always known and constant is the first state. Subsequent + /// state IDs depend on how much space has already been used in the + /// transition table. + pub(crate) const DEAD: StateID = StateID::new_unchecked(0); + /// The FAIL state mostly just corresponds to the ID of any transition on a + /// state that isn't explicitly defined. When one transitions into the FAIL + /// state, one must follow the previous state's failure transition before + /// doing the next state lookup. In this way, FAIL is more of a sentinel + /// than a state that one actually transitions into. In particular, it is + /// never exposed in the `Automaton` interface. + pub(crate) const FAIL: StateID = StateID::new_unchecked(1); + + /// Returns the equivalence classes of bytes found while constructing + /// this NFA. + /// + /// Note that the NFA doesn't actually make use of these equivalence + /// classes. Instead, these are useful for building the DFA when desired. + pub(crate) fn byte_classes(&self) -> &ByteClasses { + &self.byte_classes + } + + /// Returns a slice containing the length of each pattern in this searcher. + /// It is indexed by `PatternID` and has length `NFA::patterns_len`. + /// + /// This is exposed for convenience when building a contiguous NFA. But it + /// can be reconstructed from the `Automaton` API if necessary. + pub(crate) fn pattern_lens_raw(&self) -> &[SmallIndex] { + &self.pattern_lens + } + + /// Returns a slice of all states in this non-contiguous NFA. + pub(crate) fn states(&self) -> &[State] { + &self.states + } + + /// Returns the underlying "special" state information for this NFA. + pub(crate) fn special(&self) -> &Special { + &self.special + } + + /// Swaps the states at `id1` and `id2`. + /// + /// This does not update the transitions of any state to account for the + /// state swap. + pub(crate) fn swap_states(&mut self, id1: StateID, id2: StateID) { + self.states.swap(id1.as_usize(), id2.as_usize()); + } + + /// Re-maps all state IDs in this NFA according to the `map` function + /// given. + pub(crate) fn remap(&mut self, map: impl Fn(StateID) -> StateID) { + let alphabet_len = self.byte_classes.alphabet_len(); + for state in self.states.iter_mut() { + state.fail = map(state.fail); + let mut link = state.sparse; + while link != StateID::ZERO { + let t = &mut self.sparse[link]; + t.next = map(t.next); + link = t.link; + } + if state.dense != StateID::ZERO { + let start = state.dense.as_usize(); + for next in self.dense[start..][..alphabet_len].iter_mut() { + *next = map(*next); + } + } + } + } + + /// Iterate over all of the transitions for the given state ID. + pub(crate) fn iter_trans( + &self, + sid: StateID, + ) -> impl Iterator + '_ { + let mut link = self.states[sid].sparse; + core::iter::from_fn(move || { + if link == StateID::ZERO { + return None; + } + let t = self.sparse[link]; + link = t.link; + Some(t) + }) + } + + /// Iterate over all of the matches for the given state ID. + pub(crate) fn iter_matches( + &self, + sid: StateID, + ) -> impl Iterator + '_ { + let mut link = self.states[sid].matches; + core::iter::from_fn(move || { + if link == StateID::ZERO { + return None; + } + let m = self.matches[link]; + link = m.link; + Some(m.pid) + }) + } + + /// Return the link following the one given. If the one given is the last + /// link for the given state, then return `None`. + /// + /// If no previous link is given, then this returns the first link in the + /// state, if one exists. + /// + /// This is useful for manually iterating over the transitions in a single + /// state without borrowing the NFA. This permits mutating other parts of + /// the NFA during iteration. Namely, one can access the transition pointed + /// to by the link via `self.sparse[link]`. + fn next_link( + &self, + sid: StateID, + prev: Option, + ) -> Option { + let link = + prev.map_or(self.states[sid].sparse, |p| self.sparse[p].link); + if link == StateID::ZERO { + None + } else { + Some(link) + } + } + + /// Follow the transition for the given byte in the given state. If no such + /// transition exists, then the FAIL state ID is returned. + #[inline(always)] + fn follow_transition(&self, sid: StateID, byte: u8) -> StateID { + let s = &self.states[sid]; + // This is a special case that targets starting states and states + // near a start state. Namely, after the initial trie is constructed, + // we look for states close to the start state to convert to a dense + // representation for their transitions. This winds up using a lot more + // memory per state in exchange for faster transition lookups. But + // since we only do this for a small number of states (by default), the + // memory usage is usually minimal. + // + // This has *massive* benefit when executing searches because the + // unanchored starting state is by far the hottest state and is + // frequently visited. Moreover, the 'for' loop below that works + // decently on an actually sparse state is disastrous on a state that + // is nearly or completely dense. + if s.dense == StateID::ZERO { + self.follow_transition_sparse(sid, byte) + } else { + let class = usize::from(self.byte_classes.get(byte)); + self.dense[s.dense.as_usize() + class] + } + } + + /// Like `follow_transition`, but always uses the sparse representation. + #[inline(always)] + fn follow_transition_sparse(&self, sid: StateID, byte: u8) -> StateID { + for t in self.iter_trans(sid) { + if byte <= t.byte { + if byte == t.byte { + return t.next; + } + break; + } + } + NFA::FAIL + } + + /// Set the transition for the given byte to the state ID given. + /// + /// Note that one should not set transitions to the FAIL state. It is not + /// technically incorrect, but it wastes space. If a transition is not + /// defined, then it is automatically assumed to lead to the FAIL state. + fn add_transition( + &mut self, + prev: StateID, + byte: u8, + next: StateID, + ) -> Result<(), BuildError> { + if self.states[prev].dense != StateID::ZERO { + let dense = self.states[prev].dense; + let class = usize::from(self.byte_classes.get(byte)); + self.dense[dense.as_usize() + class] = next; + } + + let head = self.states[prev].sparse; + if head == StateID::ZERO || byte < self.sparse[head].byte { + let new_link = self.alloc_transition()?; + self.sparse[new_link] = Transition { byte, next, link: head }; + self.states[prev].sparse = new_link; + return Ok(()); + } else if byte == self.sparse[head].byte { + self.sparse[head].next = next; + return Ok(()); + } + + // We handled the only cases where the beginning of the transition + // chain needs to change. At this point, we now know that there is + // at least one entry in the transition chain and the byte for that + // transition is less than the byte for the transition we're adding. + let (mut link_prev, mut link_next) = (head, self.sparse[head].link); + while link_next != StateID::ZERO && byte > self.sparse[link_next].byte + { + link_prev = link_next; + link_next = self.sparse[link_next].link; + } + if link_next == StateID::ZERO || byte < self.sparse[link_next].byte { + let link = self.alloc_transition()?; + self.sparse[link] = Transition { byte, next, link: link_next }; + self.sparse[link_prev].link = link; + } else { + assert_eq!(byte, self.sparse[link_next].byte); + self.sparse[link_next].next = next; + } + Ok(()) + } + + /// This sets every possible transition (all 255 of them) for the given + /// state to the name `next` value. + /// + /// This is useful for efficiently initializing start/dead states. + /// + /// # Panics + /// + /// This requires that the state has no transitions added to it already. + /// If it has any transitions, then this panics. It will also panic if + /// the state has been densified prior to calling this. + fn init_full_state( + &mut self, + prev: StateID, + next: StateID, + ) -> Result<(), BuildError> { + assert_eq!( + StateID::ZERO, + self.states[prev].dense, + "state must not be dense yet" + ); + assert_eq!( + StateID::ZERO, + self.states[prev].sparse, + "state must have zero transitions" + ); + let mut prev_link = StateID::ZERO; + for byte in 0..=255 { + let new_link = self.alloc_transition()?; + self.sparse[new_link] = + Transition { byte, next, link: StateID::ZERO }; + if prev_link == StateID::ZERO { + self.states[prev].sparse = new_link; + } else { + self.sparse[prev_link].link = new_link; + } + prev_link = new_link; + } + Ok(()) + } + + /// Add a match for the given pattern ID to the state for the given ID. + fn add_match( + &mut self, + sid: StateID, + pid: PatternID, + ) -> Result<(), BuildError> { + let head = self.states[sid].matches; + let mut link = head; + while self.matches[link].link != StateID::ZERO { + link = self.matches[link].link; + } + let new_match_link = self.alloc_match()?; + self.matches[new_match_link].pid = pid; + if link == StateID::ZERO { + self.states[sid].matches = new_match_link; + } else { + self.matches[link].link = new_match_link; + } + Ok(()) + } + + /// Copy matches from the `src` state to the `dst` state. This is useful + /// when a match state can be reached via a failure transition. In which + /// case, you'll want to copy the matches (if any) from the state reached + /// by the failure transition to the original state you were at. + fn copy_matches( + &mut self, + src: StateID, + dst: StateID, + ) -> Result<(), BuildError> { + let head_dst = self.states[dst].matches; + let mut link_dst = head_dst; + while self.matches[link_dst].link != StateID::ZERO { + link_dst = self.matches[link_dst].link; + } + let mut link_src = self.states[src].matches; + while link_src != StateID::ZERO { + let new_match_link = + StateID::new(self.matches.len()).map_err(|e| { + BuildError::state_id_overflow( + StateID::MAX.as_u64(), + e.attempted(), + ) + })?; + self.matches.push(Match { + pid: self.matches[link_src].pid, + link: StateID::ZERO, + }); + if link_dst == StateID::ZERO { + self.states[dst].matches = new_match_link; + } else { + self.matches[link_dst].link = new_match_link; + } + + link_dst = new_match_link; + link_src = self.matches[link_src].link; + } + Ok(()) + } + + /// Create a new entry in `NFA::trans`, if there's room, and return that + /// entry's ID. If there's no room, then an error is returned. + fn alloc_transition(&mut self) -> Result { + let id = StateID::new(self.sparse.len()).map_err(|e| { + BuildError::state_id_overflow(StateID::MAX.as_u64(), e.attempted()) + })?; + self.sparse.push(Transition::default()); + Ok(id) + } + + /// Create a new entry in `NFA::matches`, if there's room, and return that + /// entry's ID. If there's no room, then an error is returned. + fn alloc_match(&mut self) -> Result { + let id = StateID::new(self.matches.len()).map_err(|e| { + BuildError::state_id_overflow(StateID::MAX.as_u64(), e.attempted()) + })?; + self.matches.push(Match::default()); + Ok(id) + } + + /// Create a new set of `N` transitions in this NFA's dense transition + /// table. The ID return corresponds to the index at which the `N` + /// transitions begin. So `id+0` is the first transition and `id+(N-1)` is + /// the last. + /// + /// `N` is determined via `NFA::byte_classes::alphabet_len`. + fn alloc_dense_state(&mut self) -> Result { + let id = StateID::new(self.dense.len()).map_err(|e| { + BuildError::state_id_overflow(StateID::MAX.as_u64(), e.attempted()) + })?; + // We use FAIL because it's the correct default. If a state doesn't + // have a transition defined for every possible byte value, then the + // transition function should return NFA::FAIL. + self.dense.extend( + core::iter::repeat(NFA::FAIL) + .take(self.byte_classes.alphabet_len()), + ); + Ok(id) + } + + /// Allocate and add a fresh state to the underlying NFA and return its + /// ID (guaranteed to be one more than the ID of the previously allocated + /// state). If the ID would overflow `StateID`, then this returns an error. + fn alloc_state(&mut self, depth: usize) -> Result { + // This is OK because we error when building the trie if we see a + // pattern whose length cannot fit into a 'SmallIndex', and the longest + // possible depth corresponds to the length of the longest pattern. + let depth = SmallIndex::new(depth) + .expect("patterns longer than SmallIndex::MAX are not allowed"); + let id = StateID::new(self.states.len()).map_err(|e| { + BuildError::state_id_overflow(StateID::MAX.as_u64(), e.attempted()) + })?; + self.states.push(State { + sparse: StateID::ZERO, + dense: StateID::ZERO, + matches: StateID::ZERO, + fail: self.special.start_unanchored_id, + depth, + }); + Ok(id) + } +} + +// SAFETY: 'start_state' always returns a valid state ID, 'next_state' always +// returns a valid state ID given a valid state ID. We otherwise claim that +// all other methods are correct as well. +unsafe impl Automaton for NFA { + #[inline(always)] + fn start_state(&self, anchored: Anchored) -> Result { + match anchored { + Anchored::No => Ok(self.special.start_unanchored_id), + Anchored::Yes => Ok(self.special.start_anchored_id), + } + } + + #[inline(always)] + fn next_state( + &self, + anchored: Anchored, + mut sid: StateID, + byte: u8, + ) -> StateID { + // This terminates since: + // + // 1. state.fail never points to the FAIL state. + // 2. All state.fail values point to a state closer to the start state. + // 3. The start state has no transitions to the FAIL state. + loop { + let next = self.follow_transition(sid, byte); + if next != NFA::FAIL { + return next; + } + // For an anchored search, we never follow failure transitions + // because failure transitions lead us down a path to matching + // a *proper* suffix of the path we were on. Thus, it can only + // produce matches that appear after the beginning of the search. + if anchored.is_anchored() { + return NFA::DEAD; + } + sid = self.states[sid].fail(); + } + } + + #[inline(always)] + fn is_special(&self, sid: StateID) -> bool { + sid <= self.special.max_special_id + } + + #[inline(always)] + fn is_dead(&self, sid: StateID) -> bool { + sid == NFA::DEAD + } + + #[inline(always)] + fn is_match(&self, sid: StateID) -> bool { + // N.B. This returns true when sid==NFA::FAIL but that's okay because + // NFA::FAIL is not actually a valid state ID from the perspective of + // the Automaton trait. Namely, it is never returned by 'start_state' + // or by 'next_state'. So we don't need to care about it here. + !self.is_dead(sid) && sid <= self.special.max_match_id + } + + #[inline(always)] + fn is_start(&self, sid: StateID) -> bool { + sid == self.special.start_unanchored_id + || sid == self.special.start_anchored_id + } + + #[inline(always)] + fn match_kind(&self) -> MatchKind { + self.match_kind + } + + #[inline(always)] + fn patterns_len(&self) -> usize { + self.pattern_lens.len() + } + + #[inline(always)] + fn pattern_len(&self, pid: PatternID) -> usize { + self.pattern_lens[pid].as_usize() + } + + #[inline(always)] + fn min_pattern_len(&self) -> usize { + self.min_pattern_len + } + + #[inline(always)] + fn max_pattern_len(&self) -> usize { + self.max_pattern_len + } + + #[inline(always)] + fn match_len(&self, sid: StateID) -> usize { + self.iter_matches(sid).count() + } + + #[inline(always)] + fn match_pattern(&self, sid: StateID, index: usize) -> PatternID { + self.iter_matches(sid).nth(index).unwrap() + } + + #[inline(always)] + fn memory_usage(&self) -> usize { + self.states.len() * core::mem::size_of::() + + self.sparse.len() * core::mem::size_of::() + + self.matches.len() * core::mem::size_of::() + + self.dense.len() * StateID::SIZE + + self.pattern_lens.len() * SmallIndex::SIZE + + self.prefilter.as_ref().map_or(0, |p| p.memory_usage()) + } + + #[inline(always)] + fn prefilter(&self) -> Option<&Prefilter> { + self.prefilter.as_ref() + } +} + +/// A representation of a sparse NFA state for an Aho-Corasick automaton. +/// +/// It contains the transitions to the next state, a failure transition for +/// cases where there exists no other transition for the current input byte +/// and the matches implied by visiting this state (if any). +#[derive(Clone, Debug)] +pub(crate) struct State { + /// A pointer to `NFA::trans` corresponding to the head of a linked list + /// containing all of the transitions for this state. + /// + /// This is `StateID::ZERO` if and only if this state has zero transitions. + sparse: StateID, + /// A pointer to a row of `N` transitions in `NFA::dense`. These + /// transitions correspond precisely to what is obtained by traversing + /// `sparse`, but permits constant time lookup. + /// + /// When this is zero (which is true for most states in the default + /// configuration), then this state has no dense representation. + /// + /// Note that `N` is equal to `NFA::byte_classes::alphabet_len()`. This is + /// typically much less than 256 (the maximum value). + dense: StateID, + /// A pointer to `NFA::matches` corresponding to the head of a linked list + /// containing all of the matches for this state. + /// + /// This is `StateID::ZERO` if and only if this state is not a match state. + matches: StateID, + /// The state that should be transitioned to if the current byte in the + /// haystack does not have a corresponding transition defined in this + /// state. + fail: StateID, + /// The depth of this state. Specifically, this is the distance from this + /// state to the starting state. (For the special sentinel states DEAD and + /// FAIL, their depth is always 0.) The depth of a starting state is 0. + /// + /// Note that depth is currently not used in this non-contiguous NFA. It + /// may in the future, but it is used in the contiguous NFA. Namely, it + /// permits an optimization where states near the starting state have their + /// transitions stored in a dense fashion, but all other states have their + /// transitions stored in a sparse fashion. (This non-contiguous NFA uses + /// a sparse representation for all states unconditionally.) In any case, + /// this is really the only convenient place to compute and store this + /// information, which we need when building the contiguous NFA. + depth: SmallIndex, +} + +impl State { + /// Return true if and only if this state is a match state. + pub(crate) fn is_match(&self) -> bool { + self.matches != StateID::ZERO + } + + /// Returns the failure transition for this state. + pub(crate) fn fail(&self) -> StateID { + self.fail + } + + /// Returns the depth of this state. That is, the number of transitions + /// this state is from the start state of the NFA. + pub(crate) fn depth(&self) -> SmallIndex { + self.depth + } +} + +/// A single transition in a non-contiguous NFA. +#[derive(Clone, Copy, Default)] +#[repr(packed)] +pub(crate) struct Transition { + byte: u8, + next: StateID, + link: StateID, +} + +impl Transition { + /// Return the byte for which this transition is defined. + pub(crate) fn byte(&self) -> u8 { + self.byte + } + + /// Return the ID of the state that this transition points to. + pub(crate) fn next(&self) -> StateID { + self.next + } + + /// Return the ID of the next transition. + fn link(&self) -> StateID { + self.link + } +} + +impl core::fmt::Debug for Transition { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!( + f, + "Transition(byte: {:X?}, next: {:?}, link: {:?})", + self.byte, + self.next().as_usize(), + self.link().as_usize() + ) + } +} + +/// A single match in a non-contiguous NFA. +#[derive(Clone, Copy, Default)] +struct Match { + pid: PatternID, + link: StateID, +} + +impl Match { + /// Return the pattern ID for this match. + pub(crate) fn pattern(&self) -> PatternID { + self.pid + } + + /// Return the ID of the next match. + fn link(&self) -> StateID { + self.link + } +} + +impl core::fmt::Debug for Match { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!( + f, + "Match(pid: {:?}, link: {:?})", + self.pattern().as_usize(), + self.link().as_usize() + ) + } +} + +/// A builder for configuring an Aho-Corasick noncontiguous NFA. +/// +/// This builder has a subset of the options available to a +/// [`AhoCorasickBuilder`](crate::AhoCorasickBuilder). Of the shared options, +/// their behavior is identical. +#[derive(Clone, Debug)] +pub struct Builder { + match_kind: MatchKind, + prefilter: bool, + ascii_case_insensitive: bool, + dense_depth: usize, +} + +impl Default for Builder { + fn default() -> Builder { + Builder { + match_kind: MatchKind::default(), + prefilter: true, + ascii_case_insensitive: false, + dense_depth: 3, + } + } +} + +impl Builder { + /// Create a new builder for configuring an Aho-Corasick noncontiguous NFA. + pub fn new() -> Builder { + Builder::default() + } + + /// Build an Aho-Corasick noncontiguous NFA from the given iterator of + /// patterns. + /// + /// A builder may be reused to create more NFAs. + pub fn build(&self, patterns: I) -> Result + where + I: IntoIterator, + P: AsRef<[u8]>, + { + debug!("building non-contiguous NFA"); + let nfa = Compiler::new(self)?.compile(patterns)?; + debug!( + "non-contiguous NFA built, ", + nfa.states.len(), + nfa.memory_usage() + ); + Ok(nfa) + } + + /// Set the desired match semantics. + /// + /// See + /// [`AhoCorasickBuilder::match_kind`](crate::AhoCorasickBuilder::match_kind) + /// for more documentation and examples. + pub fn match_kind(&mut self, kind: MatchKind) -> &mut Builder { + self.match_kind = kind; + self + } + + /// Enable ASCII-aware case insensitive matching. + /// + /// See + /// [`AhoCorasickBuilder::ascii_case_insensitive`](crate::AhoCorasickBuilder::ascii_case_insensitive) + /// for more documentation and examples. + pub fn ascii_case_insensitive(&mut self, yes: bool) -> &mut Builder { + self.ascii_case_insensitive = yes; + self + } + + /// Set the limit on how many states use a dense representation for their + /// transitions. Other states will generally use a sparse representation. + /// + /// See + /// [`AhoCorasickBuilder::dense_depth`](crate::AhoCorasickBuilder::dense_depth) + /// for more documentation and examples. + pub fn dense_depth(&mut self, depth: usize) -> &mut Builder { + self.dense_depth = depth; + self + } + + /// Enable heuristic prefilter optimizations. + /// + /// See + /// [`AhoCorasickBuilder::prefilter`](crate::AhoCorasickBuilder::prefilter) + /// for more documentation and examples. + pub fn prefilter(&mut self, yes: bool) -> &mut Builder { + self.prefilter = yes; + self + } +} + +/// A compiler uses a builder configuration and builds up the NFA formulation +/// of an Aho-Corasick automaton. This roughly corresponds to the standard +/// formulation described in textbooks, with some tweaks to support leftmost +/// searching. +#[derive(Debug)] +struct Compiler<'a> { + builder: &'a Builder, + prefilter: prefilter::Builder, + nfa: NFA, + byteset: ByteClassSet, +} + +impl<'a> Compiler<'a> { + fn new(builder: &'a Builder) -> Result, BuildError> { + let prefilter = prefilter::Builder::new(builder.match_kind) + .ascii_case_insensitive(builder.ascii_case_insensitive); + Ok(Compiler { + builder, + prefilter, + nfa: NFA { + match_kind: builder.match_kind, + states: vec![], + sparse: vec![], + dense: vec![], + matches: vec![], + pattern_lens: vec![], + prefilter: None, + byte_classes: ByteClasses::singletons(), + min_pattern_len: usize::MAX, + max_pattern_len: 0, + special: Special::zero(), + }, + byteset: ByteClassSet::empty(), + }) + } + + fn compile(mut self, patterns: I) -> Result + where + I: IntoIterator, + P: AsRef<[u8]>, + { + // Add dummy transition/match links, so that no valid link will point + // to another link at index 0. + self.nfa.sparse.push(Transition::default()); + self.nfa.matches.push(Match::default()); + // Add a dummy dense transition so that no states can have dense==0 + // represent a valid pointer to dense transitions. This permits + // dense==0 to be a sentinel indicating "no dense transitions." + self.nfa.dense.push(NFA::DEAD); + // the dead state, only used for leftmost and fixed to id==0 + self.nfa.alloc_state(0)?; + // the fail state, which is never entered and fixed to id==1 + self.nfa.alloc_state(0)?; + // unanchored start state, initially fixed to id==2 but later shuffled + // to appear after all non-start match states. + self.nfa.special.start_unanchored_id = self.nfa.alloc_state(0)?; + // anchored start state, initially fixed to id==3 but later shuffled + // to appear after unanchored start state. + self.nfa.special.start_anchored_id = self.nfa.alloc_state(0)?; + // Initialize the unanchored starting state in order to make it dense, + // and thus make transition lookups on this state faster. + self.init_unanchored_start_state()?; + // Set all transitions on the DEAD state to point to itself. This way, + // the DEAD state can never be escaped. It MUST be used as a sentinel + // in any correct search. + self.add_dead_state_loop()?; + // Build the base trie from the given patterns. + self.build_trie(patterns)?; + self.nfa.states.shrink_to_fit(); + // Turn our set of bytes into equivalent classes. This NFA + // implementation uses byte classes only for states that use a dense + // representation of transitions. (And that's why this comes before + // `self.densify()`, as the byte classes need to be set first.) + self.nfa.byte_classes = self.byteset.byte_classes(); + // Add transitions (and maybe matches) to the anchored starting state. + // The anchored starting state is used for anchored searches. The only + // mechanical difference between it and the unanchored start state is + // that missing transitions map to the DEAD state instead of the FAIL + // state. + self.set_anchored_start_state()?; + // Rewrite transitions to the FAIL state on the unanchored start state + // as self-transitions. This keeps the start state active at all times. + self.add_unanchored_start_state_loop(); + // Make some (possibly zero) states use a dense representation for + // transitions. It's important to do this right after the states + // and non-failure transitions are solidified. That way, subsequent + // accesses (particularly `fill_failure_transitions`) will benefit from + // the faster transition lookup in densified states. + self.densify()?; + // The meat of the Aho-Corasick algorithm: compute and write failure + // transitions. i.e., the state to move to when a transition isn't + // defined in the current state. These are epsilon transitions and thus + // make this formulation an NFA. + self.fill_failure_transitions()?; + // Handle a special case under leftmost semantics when at least one + // of the patterns is the empty string. + self.close_start_state_loop_for_leftmost(); + // Shuffle states so that we have DEAD, FAIL, MATCH, ..., START, START, + // NON-MATCH, ... This permits us to very quickly query the type of + // the state we're currently in during a search. + self.shuffle(); + self.nfa.prefilter = self.prefilter.build(); + // Store the maximum ID of all *relevant* special states. Start states + // are only relevant when we have a prefilter, otherwise, there is zero + // reason to care about whether a state is a start state or not during + // a search. Indeed, without a prefilter, we are careful to explicitly + // NOT care about start states, otherwise the search can ping pong + // between the unrolled loop and the handling of special-status states + // and destroy perf. + self.nfa.special.max_special_id = if self.nfa.prefilter.is_some() { + // Why the anchored starting state? Because we always put it + // after the unanchored starting state and it is therefore the + // maximum. Why put unanchored followed by anchored? No particular + // reason, but that's how the states are logically organized in the + // Thompson NFA implementation found in regex-automata. ¯\_(ツ)_/¯ + self.nfa.special.start_anchored_id + } else { + self.nfa.special.max_match_id + }; + self.nfa.sparse.shrink_to_fit(); + self.nfa.dense.shrink_to_fit(); + self.nfa.matches.shrink_to_fit(); + self.nfa.pattern_lens.shrink_to_fit(); + Ok(self.nfa) + } + + /// This sets up the initial prefix trie that makes up the Aho-Corasick + /// automaton. Effectively, it creates the basic structure of the + /// automaton, where every pattern given has a path from the start state to + /// the end of the pattern. + fn build_trie(&mut self, patterns: I) -> Result<(), BuildError> + where + I: IntoIterator, + P: AsRef<[u8]>, + { + 'PATTERNS: for (i, pat) in patterns.into_iter().enumerate() { + let pid = PatternID::new(i).map_err(|e| { + BuildError::pattern_id_overflow( + PatternID::MAX.as_u64(), + e.attempted(), + ) + })?; + let pat = pat.as_ref(); + let patlen = SmallIndex::new(pat.len()) + .map_err(|_| BuildError::pattern_too_long(pid, pat.len()))?; + self.nfa.min_pattern_len = + core::cmp::min(self.nfa.min_pattern_len, pat.len()); + self.nfa.max_pattern_len = + core::cmp::max(self.nfa.max_pattern_len, pat.len()); + assert_eq!( + i, + self.nfa.pattern_lens.len(), + "expected number of patterns to match pattern ID" + ); + self.nfa.pattern_lens.push(patlen); + // We add the pattern to the prefilter here because the pattern + // ID in the prefilter is determined with respect to the patterns + // added to the prefilter. That is, it isn't the ID we have here, + // but the one determined by its own accounting of patterns. + // To ensure they line up, we add every pattern we see to the + // prefilter, even if some patterns ultimately are impossible to + // match (in leftmost-first semantics specifically). + // + // Another way of doing this would be to expose an API in the + // prefilter to permit setting your own pattern IDs. Or to just use + // our own map and go between them. But this case is sufficiently + // rare that we don't bother and just make sure they're in sync. + if self.builder.prefilter { + self.prefilter.add(pat); + } + + let mut prev = self.nfa.special.start_unanchored_id; + let mut saw_match = false; + for (depth, &b) in pat.iter().enumerate() { + // When leftmost-first match semantics are requested, we + // specifically stop adding patterns when a previously added + // pattern is a prefix of it. We avoid adding it because + // leftmost-first semantics imply that the pattern can never + // match. This is not just an optimization to save space! It + // is necessary for correctness. In fact, this is the only + // difference in the automaton between the implementations for + // leftmost-first and leftmost-longest. + saw_match = saw_match || self.nfa.states[prev].is_match(); + if self.builder.match_kind.is_leftmost_first() && saw_match { + // Skip to the next pattern immediately. This avoids + // incorrectly adding a match after this loop terminates. + continue 'PATTERNS; + } + + // Add this byte to our equivalence classes. These don't + // get used while building the trie, but other Aho-Corasick + // implementations may use them. + self.byteset.set_range(b, b); + if self.builder.ascii_case_insensitive { + let b = opposite_ascii_case(b); + self.byteset.set_range(b, b); + } + + // If the transition from prev using the current byte already + // exists, then just move through it. Otherwise, add a new + // state. We track the depth here so that we can determine + // how to represent transitions. States near the start state + // use a dense representation that uses more memory but is + // faster. Other states use a sparse representation that uses + // less memory but is slower. + let next = self.nfa.follow_transition(prev, b); + if next != NFA::FAIL { + prev = next; + } else { + let next = self.nfa.alloc_state(depth)?; + self.nfa.add_transition(prev, b, next)?; + if self.builder.ascii_case_insensitive { + let b = opposite_ascii_case(b); + self.nfa.add_transition(prev, b, next)?; + } + prev = next; + } + } + // Once the pattern has been added, log the match in the final + // state that it reached. + self.nfa.add_match(prev, pid)?; + } + Ok(()) + } + + /// This routine creates failure transitions according to the standard + /// textbook formulation of the Aho-Corasick algorithm, with a couple small + /// tweaks to support "leftmost" semantics. + /// + /// Building failure transitions is the most interesting part of building + /// the Aho-Corasick automaton, because they are what allow searches to + /// be performed in linear time. Specifically, a failure transition is + /// a single transition associated with each state that points back to + /// the longest proper suffix of the pattern being searched. The failure + /// transition is followed whenever there exists no transition on the + /// current state for the current input byte. If there is no other proper + /// suffix, then the failure transition points back to the starting state. + /// + /// For example, let's say we built an Aho-Corasick automaton with the + /// following patterns: 'abcd' and 'cef'. The trie looks like this: + /// + /// ```ignore + /// a - S1 - b - S2 - c - S3 - d - S4* + /// / + /// S0 - c - S5 - e - S6 - f - S7* + /// ``` + /// + /// At this point, it should be fairly straight-forward to see how this + /// trie can be used in a simplistic way. At any given position in the + /// text we're searching (called the "subject" string), all we need to do + /// is follow the transitions in the trie by consuming one transition for + /// each byte in the subject string. If we reach a match state, then we can + /// report that location as a match. + /// + /// The trick comes when searching a subject string like 'abcef'. We'll + /// initially follow the transition from S0 to S1 and wind up in S3 after + /// observng the 'c' byte. At this point, the next byte is 'e' but state + /// S3 has no transition for 'e', so the search fails. We then would need + /// to restart the search at the next position in 'abcef', which + /// corresponds to 'b'. The match would fail, but the next search starting + /// at 'c' would finally succeed. The problem with this approach is that + /// we wind up searching the subject string potentially many times. In + /// effect, this makes the algorithm have worst case `O(n * m)` complexity, + /// where `n ~ len(subject)` and `m ~ len(all patterns)`. We would instead + /// like to achieve a `O(n + m)` worst case complexity. + /// + /// This is where failure transitions come in. Instead of dying at S3 in + /// the first search, the automaton can instruct the search to move to + /// another part of the automaton that corresponds to a suffix of what + /// we've seen so far. Recall that we've seen 'abc' in the subject string, + /// and the automaton does indeed have a non-empty suffix, 'c', that could + /// potentially lead to another match. Thus, the actual Aho-Corasick + /// automaton for our patterns in this case looks like this: + /// + /// ```ignore + /// a - S1 - b - S2 - c - S3 - d - S4* + /// / / + /// / ---------------- + /// / / + /// S0 - c - S5 - e - S6 - f - S7* + /// ``` + /// + /// That is, we have a failure transition from S3 to S5, which is followed + /// exactly in cases when we are in state S3 but see any byte other than + /// 'd' (that is, we've "failed" to find a match in this portion of our + /// trie). We know we can transition back to S5 because we've already seen + /// a 'c' byte, so we don't need to re-scan it. We can then pick back up + /// with the search starting at S5 and complete our match. + /// + /// Adding failure transitions to a trie is fairly simple, but subtle. The + /// key issue is that you might have multiple failure transition that you + /// need to follow. For example, look at the trie for the patterns + /// 'abcd', 'b', 'bcd' and 'cd': + /// + /// ```ignore + /// - a - S1 - b - S2* - c - S3 - d - S4* + /// / / / + /// / ------- ------- + /// / / / + /// S0 --- b - S5* - c - S6 - d - S7* + /// \ / + /// \ -------- + /// \ / + /// - c - S8 - d - S9* + /// ``` + /// + /// The failure transitions for this trie are defined from S2 to S5, + /// S3 to S6 and S6 to S8. Moreover, state S2 needs to track that it + /// corresponds to a match, since its failure transition to S5 is itself + /// a match state. + /// + /// Perhaps simplest way to think about adding these failure transitions + /// is recursively. That is, if you know the failure transitions for every + /// possible previous state that could be visited (e.g., when computing the + /// failure transition for S3, you already know the failure transitions + /// for S0, S1 and S2), then you can simply follow the failure transition + /// of the previous state and check whether the incoming transition is + /// defined after following the failure transition. + /// + /// For example, when determining the failure state for S3, by our + /// assumptions, we already know that there is a failure transition from + /// S2 (the previous state) to S5. So we follow that transition and check + /// whether the transition connecting S2 to S3 is defined. Indeed, it is, + /// as there is a transition from S5 to S6 for the byte 'c'. If no such + /// transition existed, we could keep following the failure transitions + /// until we reach the start state, which is the failure transition for + /// every state that has no corresponding proper suffix. + /// + /// We don't actually use recursion to implement this, but instead, use a + /// breadth first search of the automaton. Our base case is the start + /// state, whose failure transition is just a transition to itself. + /// + /// When building a leftmost automaton, we proceed as above, but only + /// include a subset of failure transitions. Namely, we omit any failure + /// transitions that appear after a match state in the trie. This is + /// because failure transitions always point back to a proper suffix of + /// what has been seen so far. Thus, following a failure transition after + /// a match implies looking for a match that starts after the one that has + /// already been seen, which is of course therefore not the leftmost match. + /// + /// N.B. I came up with this algorithm on my own, and after scouring all of + /// the other AC implementations I know of (Perl, Snort, many on GitHub). + /// I couldn't find any that implement leftmost semantics like this. + /// Perl of course needs leftmost-first semantics, but they implement it + /// with a seeming hack at *search* time instead of encoding it into the + /// automaton. There are also a couple Java libraries that support leftmost + /// longest semantics, but they do it by building a queue of matches at + /// search time, which is even worse than what Perl is doing. ---AG + fn fill_failure_transitions(&mut self) -> Result<(), BuildError> { + let is_leftmost = self.builder.match_kind.is_leftmost(); + let start_uid = self.nfa.special.start_unanchored_id; + // Initialize the queue for breadth first search with all transitions + // out of the start state. We handle the start state specially because + // we only want to follow non-self transitions. If we followed self + // transitions, then this would never terminate. + let mut queue = VecDeque::new(); + let mut seen = self.queued_set(); + let mut prev_link = None; + while let Some(link) = self.nfa.next_link(start_uid, prev_link) { + prev_link = Some(link); + let t = self.nfa.sparse[link]; + + // Skip anything we've seen before and any self-transitions on the + // start state. + if start_uid == t.next() || seen.contains(t.next) { + continue; + } + queue.push_back(t.next); + seen.insert(t.next); + // Under leftmost semantics, if a state immediately following + // the start state is a match state, then we never want to + // follow its failure transition since the failure transition + // necessarily leads back to the start state, which we never + // want to do for leftmost matching after a match has been + // found. + // + // We apply the same logic to non-start states below as well. + if is_leftmost && self.nfa.states[t.next].is_match() { + self.nfa.states[t.next].fail = NFA::DEAD; + } + } + while let Some(id) = queue.pop_front() { + let mut prev_link = None; + while let Some(link) = self.nfa.next_link(id, prev_link) { + prev_link = Some(link); + let t = self.nfa.sparse[link]; + + if seen.contains(t.next) { + // The only way to visit a duplicate state in a transition + // list is when ASCII case insensitivity is enabled. In + // this case, we want to skip it since it's redundant work. + // But it would also end up duplicating matches, which + // results in reporting duplicate matches in some cases. + // See the 'acasei010' regression test. + continue; + } + queue.push_back(t.next); + seen.insert(t.next); + + // As above for start states, under leftmost semantics, once + // we see a match all subsequent states should have no failure + // transitions because failure transitions always imply looking + // for a match that is a suffix of what has been seen so far + // (where "seen so far" corresponds to the string formed by + // following the transitions from the start state to the + // current state). Under leftmost semantics, we specifically do + // not want to allow this to happen because we always want to + // report the match found at the leftmost position. + // + // The difference between leftmost-first and leftmost-longest + // occurs previously while we build the trie. For + // leftmost-first, we simply omit any entries that would + // otherwise require passing through a match state. + // + // Note that for correctness, the failure transition has to be + // set to the dead state for ALL states following a match, not + // just the match state itself. However, by setting the failure + // transition to the dead state on all match states, the dead + // state will automatically propagate to all subsequent states + // via the failure state computation below. + if is_leftmost && self.nfa.states[t.next].is_match() { + self.nfa.states[t.next].fail = NFA::DEAD; + continue; + } + let mut fail = self.nfa.states[id].fail; + while self.nfa.follow_transition(fail, t.byte) == NFA::FAIL { + fail = self.nfa.states[fail].fail; + } + fail = self.nfa.follow_transition(fail, t.byte); + self.nfa.states[t.next].fail = fail; + self.nfa.copy_matches(fail, t.next)?; + } + // If the start state is a match state, then this automaton can + // match the empty string. This implies all states are match states + // since every position matches the empty string, so copy the + // matches from the start state to every state. Strictly speaking, + // this is only necessary for overlapping matches since each + // non-empty non-start match state needs to report empty matches + // in addition to its own. For the non-overlapping case, such + // states only report the first match, which is never empty since + // it isn't a start state. + if !is_leftmost { + self.nfa + .copy_matches(self.nfa.special.start_unanchored_id, id)?; + } + } + Ok(()) + } + + /// Shuffle the states so that they appear in this sequence: + /// + /// DEAD, FAIL, MATCH..., START, START, NON-MATCH... + /// + /// The idea here is that if we know how special states are laid out in our + /// transition table, then we can determine what "kind" of state we're in + /// just by comparing our current state ID with a particular value. In this + /// way, we avoid doing extra memory lookups. + /// + /// Before shuffling begins, our states look something like this: + /// + /// DEAD, FAIL, START, START, (MATCH | NON-MATCH)... + /// + /// So all we need to do is move all of the MATCH states so that they + /// all appear before any NON-MATCH state, like so: + /// + /// DEAD, FAIL, START, START, MATCH... NON-MATCH... + /// + /// Then it's just a simple matter of swapping the two START states with + /// the last two MATCH states. + /// + /// (This is the same technique used for fully compiled DFAs in + /// regex-automata.) + fn shuffle(&mut self) { + let old_start_uid = self.nfa.special.start_unanchored_id; + let old_start_aid = self.nfa.special.start_anchored_id; + assert!(old_start_uid < old_start_aid); + assert_eq!( + 3, + old_start_aid.as_usize(), + "anchored start state should be at index 3" + ); + // We implement shuffling by a sequence of pairwise swaps of states. + // Since we have a number of things referencing states via their + // IDs and swapping them changes their IDs, we need to record every + // swap we make so that we can remap IDs. The remapper handles this + // book-keeping for us. + let mut remapper = Remapper::new(&self.nfa, 0); + // The way we proceed here is by moving all match states so that + // they directly follow the start states. So it will go: DEAD, FAIL, + // START-UNANCHORED, START-ANCHORED, MATCH, ..., NON-MATCH, ... + // + // To do that, we proceed forward through all states after + // START-ANCHORED and swap match states so that they appear before all + // non-match states. + let mut next_avail = StateID::from(4u8); + for i in next_avail.as_usize()..self.nfa.states.len() { + let sid = StateID::new(i).unwrap(); + if !self.nfa.states[sid].is_match() { + continue; + } + remapper.swap(&mut self.nfa, sid, next_avail); + // The key invariant here is that only non-match states exist + // between 'next_avail' and 'sid' (with them being potentially + // equivalent). Thus, incrementing 'next_avail' by 1 is guaranteed + // to land on the leftmost non-match state. (Unless 'next_avail' + // and 'sid' are equivalent, in which case, a swap will occur but + // it is a no-op.) + next_avail = StateID::new(next_avail.one_more()).unwrap(); + } + // Now we'd like to move the start states to immediately following the + // match states. (The start states may themselves be match states, but + // we'll handle that later.) We arrange the states this way so that we + // don't necessarily need to check whether a state is a start state or + // not before checking whether a state is a match state. For example, + // we'd like to be able to write this as our state machine loop: + // + // sid = start() + // for byte in haystack: + // sid = next(sid, byte) + // if sid <= nfa.max_start_id: + // if sid <= nfa.max_dead_id: + // # search complete + // elif sid <= nfa.max_match_id: + // # found match + // + // The important context here is that we might not want to look for + // start states at all. Namely, if a searcher doesn't have a prefilter, + // then there is no reason to care about whether we're in a start state + // or not. And indeed, if we did check for it, this very hot loop would + // ping pong between the special state handling and the main state + // transition logic. This in turn stalls the CPU by killing branch + // prediction. + // + // So essentially, we really want to be able to "forget" that start + // states even exist and this is why we put them at the end. + let new_start_aid = + StateID::new(next_avail.as_usize().checked_sub(1).unwrap()) + .unwrap(); + remapper.swap(&mut self.nfa, old_start_aid, new_start_aid); + let new_start_uid = + StateID::new(next_avail.as_usize().checked_sub(2).unwrap()) + .unwrap(); + remapper.swap(&mut self.nfa, old_start_uid, new_start_uid); + let new_max_match_id = + StateID::new(next_avail.as_usize().checked_sub(3).unwrap()) + .unwrap(); + self.nfa.special.max_match_id = new_max_match_id; + self.nfa.special.start_unanchored_id = new_start_uid; + self.nfa.special.start_anchored_id = new_start_aid; + // If one start state is a match state, then they both are. + if self.nfa.states[self.nfa.special.start_anchored_id].is_match() { + self.nfa.special.max_match_id = self.nfa.special.start_anchored_id; + } + remapper.remap(&mut self.nfa); + } + + /// Attempts to convert the transition representation of a subset of states + /// in this NFA from sparse to dense. This can greatly improve search + /// performance since states with a higher number of transitions tend to + /// correlate with very active states. + /// + /// We generally only densify states that are close to the start state. + /// These tend to be the most active states and thus benefit from a dense + /// representation more than other states. + /// + /// This tends to best balance between memory usage and performance. In + /// particular, the *vast majority* of all states in a typical Aho-Corasick + /// automaton have only 1 transition and are usually farther from the start + /// state and thus don't get densified. + /// + /// Note that this doesn't remove the sparse representation of transitions + /// for states that are densified. It could be done, but actually removing + /// entries from `NFA::sparse` is likely more expensive than it's worth. + fn densify(&mut self) -> Result<(), BuildError> { + for i in 0..self.nfa.states.len() { + let sid = StateID::new(i).unwrap(); + // Don't bother densifying states that are only used as sentinels. + if sid == NFA::DEAD || sid == NFA::FAIL { + continue; + } + // Only densify states that are "close enough" to the start state. + if self.nfa.states[sid].depth.as_usize() + >= self.builder.dense_depth + { + continue; + } + let dense = self.nfa.alloc_dense_state()?; + let mut prev_link = None; + while let Some(link) = self.nfa.next_link(sid, prev_link) { + prev_link = Some(link); + let t = self.nfa.sparse[link]; + + let class = usize::from(self.nfa.byte_classes.get(t.byte)); + let index = dense.as_usize() + class; + self.nfa.dense[index] = t.next; + } + self.nfa.states[sid].dense = dense; + } + Ok(()) + } + + /// Returns a set that tracked queued states. + /// + /// This is only necessary when ASCII case insensitivity is enabled, since + /// it is the only way to visit the same state twice. Otherwise, this + /// returns an inert set that nevers adds anything and always reports + /// `false` for every member test. + fn queued_set(&self) -> QueuedSet { + if self.builder.ascii_case_insensitive { + QueuedSet::active() + } else { + QueuedSet::inert() + } + } + + /// Initializes the unanchored start state by making it dense. This is + /// achieved by explicitly setting every transition to the FAIL state. + /// This isn't necessary for correctness, since any missing transition is + /// automatically assumed to be mapped to the FAIL state. We do this to + /// make the unanchored starting state dense, and thus in turn make + /// transition lookups on it faster. (Which is worth doing because it's + /// the most active state.) + fn init_unanchored_start_state(&mut self) -> Result<(), BuildError> { + let start_uid = self.nfa.special.start_unanchored_id; + let start_aid = self.nfa.special.start_anchored_id; + self.nfa.init_full_state(start_uid, NFA::FAIL)?; + self.nfa.init_full_state(start_aid, NFA::FAIL)?; + Ok(()) + } + + /// Setup the anchored start state by copying all of the transitions and + /// matches from the unanchored starting state with one change: the failure + /// transition is changed to the DEAD state, so that for any undefined + /// transitions, the search will stop. + fn set_anchored_start_state(&mut self) -> Result<(), BuildError> { + let start_uid = self.nfa.special.start_unanchored_id; + let start_aid = self.nfa.special.start_anchored_id; + let (mut uprev_link, mut aprev_link) = (None, None); + loop { + let unext = self.nfa.next_link(start_uid, uprev_link); + let anext = self.nfa.next_link(start_aid, aprev_link); + let (ulink, alink) = match (unext, anext) { + (Some(ulink), Some(alink)) => (ulink, alink), + (None, None) => break, + _ => unreachable!(), + }; + uprev_link = Some(ulink); + aprev_link = Some(alink); + self.nfa.sparse[alink].next = self.nfa.sparse[ulink].next; + } + self.nfa.copy_matches(start_uid, start_aid)?; + // This is the main difference between the unanchored and anchored + // starting states. If a lookup on an anchored starting state fails, + // then the search should stop. + // + // N.B. This assumes that the loop on the unanchored starting state + // hasn't been created yet. + self.nfa.states[start_aid].fail = NFA::DEAD; + Ok(()) + } + + /// Set the failure transitions on the start state to loop back to the + /// start state. This effectively permits the Aho-Corasick automaton to + /// match at any position. This is also required for finding the next + /// state to terminate, namely, finding the next state should never return + /// a fail_id. + /// + /// This must be done after building the initial trie, since trie + /// construction depends on transitions to `fail_id` to determine whether a + /// state already exists or not. + fn add_unanchored_start_state_loop(&mut self) { + let start_uid = self.nfa.special.start_unanchored_id; + let mut prev_link = None; + while let Some(link) = self.nfa.next_link(start_uid, prev_link) { + prev_link = Some(link); + if self.nfa.sparse[link].next() == NFA::FAIL { + self.nfa.sparse[link].next = start_uid; + } + } + } + + /// Remove the start state loop by rewriting any transitions on the start + /// state back to the start state with transitions to the dead state. + /// + /// The loop is only closed when two conditions are met: the start state + /// is a match state and the match kind is leftmost-first or + /// leftmost-longest. + /// + /// The reason for this is that under leftmost semantics, a start state + /// that is also a match implies that we should never restart the search + /// process. We allow normal transitions out of the start state, but if + /// none exist, we transition to the dead state, which signals that + /// searching should stop. + fn close_start_state_loop_for_leftmost(&mut self) { + let start_uid = self.nfa.special.start_unanchored_id; + let start = &mut self.nfa.states[start_uid]; + let dense = start.dense; + if self.builder.match_kind.is_leftmost() && start.is_match() { + let mut prev_link = None; + while let Some(link) = self.nfa.next_link(start_uid, prev_link) { + prev_link = Some(link); + if self.nfa.sparse[link].next() == start_uid { + self.nfa.sparse[link].next = NFA::DEAD; + if dense != StateID::ZERO { + let b = self.nfa.sparse[link].byte; + let class = usize::from(self.nfa.byte_classes.get(b)); + self.nfa.dense[dense.as_usize() + class] = NFA::DEAD; + } + } + } + } + } + + /// Sets all transitions on the dead state to point back to the dead state. + /// Normally, missing transitions map back to the failure state, but the + /// point of the dead state is to act as a sink that can never be escaped. + fn add_dead_state_loop(&mut self) -> Result<(), BuildError> { + self.nfa.init_full_state(NFA::DEAD, NFA::DEAD)?; + Ok(()) + } +} + +/// A set of state identifiers used to avoid revisiting the same state multiple +/// times when filling in failure transitions. +/// +/// This set has an "inert" and an "active" mode. When inert, the set never +/// stores anything and always returns `false` for every member test. This is +/// useful to avoid the performance and memory overhead of maintaining this +/// set when it is not needed. +#[derive(Debug)] +struct QueuedSet { + set: Option>, +} + +impl QueuedSet { + /// Return an inert set that returns `false` for every state ID membership + /// test. + fn inert() -> QueuedSet { + QueuedSet { set: None } + } + + /// Return an active set that tracks state ID membership. + fn active() -> QueuedSet { + QueuedSet { set: Some(BTreeSet::new()) } + } + + /// Inserts the given state ID into this set. (If the set is inert, then + /// this is a no-op.) + fn insert(&mut self, state_id: StateID) { + if let Some(ref mut set) = self.set { + set.insert(state_id); + } + } + + /// Returns true if and only if the given state ID is in this set. If the + /// set is inert, this always returns false. + fn contains(&self, state_id: StateID) -> bool { + match self.set { + None => false, + Some(ref set) => set.contains(&state_id), + } + } +} + +impl core::fmt::Debug for NFA { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + use crate::{ + automaton::{fmt_state_indicator, sparse_transitions}, + util::debug::DebugByte, + }; + + writeln!(f, "noncontiguous::NFA(")?; + for (sid, state) in self.states.iter().with_state_ids() { + // The FAIL state doesn't actually have space for a state allocated + // for it, so we have to treat it as a special case. + if sid == NFA::FAIL { + writeln!(f, "F {:06}:", sid.as_usize())?; + continue; + } + fmt_state_indicator(f, self, sid)?; + write!( + f, + "{:06}({:06}): ", + sid.as_usize(), + state.fail.as_usize() + )?; + + let it = sparse_transitions( + self.iter_trans(sid).map(|t| (t.byte, t.next)), + ) + .enumerate(); + for (i, (start, end, sid)) in it { + if i > 0 { + write!(f, ", ")?; + } + if start == end { + write!( + f, + "{:?} => {:?}", + DebugByte(start), + sid.as_usize() + )?; + } else { + write!( + f, + "{:?}-{:?} => {:?}", + DebugByte(start), + DebugByte(end), + sid.as_usize() + )?; + } + } + + write!(f, "\n")?; + if self.is_match(sid) { + write!(f, " matches: ")?; + for (i, pid) in self.iter_matches(sid).enumerate() { + if i > 0 { + write!(f, ", ")?; + } + write!(f, "{}", pid.as_usize())?; + } + write!(f, "\n")?; + } + } + writeln!(f, "match kind: {:?}", self.match_kind)?; + writeln!(f, "prefilter: {:?}", self.prefilter.is_some())?; + writeln!(f, "state length: {:?}", self.states.len())?; + writeln!(f, "pattern length: {:?}", self.patterns_len())?; + writeln!(f, "shortest pattern length: {:?}", self.min_pattern_len)?; + writeln!(f, "longest pattern length: {:?}", self.max_pattern_len)?; + writeln!(f, "memory usage: {:?}", self.memory_usage())?; + writeln!(f, ")")?; + Ok(()) + } +} diff --git a/vendor/aho-corasick/src/packed/api.rs b/vendor/aho-corasick/src/packed/api.rs new file mode 100644 index 00000000000000..35ebf7e334da30 --- /dev/null +++ b/vendor/aho-corasick/src/packed/api.rs @@ -0,0 +1,687 @@ +use alloc::sync::Arc; + +use crate::{ + packed::{pattern::Patterns, rabinkarp::RabinKarp, teddy}, + util::search::{Match, Span}, +}; + +/// This is a limit placed on the total number of patterns we're willing to try +/// and match at once. As more sophisticated algorithms are added, this number +/// may be increased. +const PATTERN_LIMIT: usize = 128; + +/// A knob for controlling the match semantics of a packed multiple string +/// searcher. +/// +/// This differs from the [`MatchKind`](crate::MatchKind) type in the top-level +/// crate module in that it doesn't support "standard" match semantics, +/// and instead only supports leftmost-first or leftmost-longest. Namely, +/// "standard" semantics cannot be easily supported by packed searchers. +/// +/// For more information on the distinction between leftmost-first and +/// leftmost-longest, see the docs on the top-level `MatchKind` type. +/// +/// Unlike the top-level `MatchKind` type, the default match semantics for this +/// type are leftmost-first. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[non_exhaustive] +pub enum MatchKind { + /// Use leftmost-first match semantics, which reports leftmost matches. + /// When there are multiple possible leftmost matches, the match + /// corresponding to the pattern that appeared earlier when constructing + /// the automaton is reported. + /// + /// This is the default. + LeftmostFirst, + /// Use leftmost-longest match semantics, which reports leftmost matches. + /// When there are multiple possible leftmost matches, the longest match + /// is chosen. + LeftmostLongest, +} + +impl Default for MatchKind { + fn default() -> MatchKind { + MatchKind::LeftmostFirst + } +} + +/// The configuration for a packed multiple pattern searcher. +/// +/// The configuration is currently limited only to being able to select the +/// match semantics (leftmost-first or leftmost-longest) of a searcher. In the +/// future, more knobs may be made available. +/// +/// A configuration produces a [`packed::Builder`](Builder), which in turn can +/// be used to construct a [`packed::Searcher`](Searcher) for searching. +/// +/// # Example +/// +/// This example shows how to use leftmost-longest semantics instead of the +/// default (leftmost-first). +/// +/// ``` +/// use aho_corasick::{packed::{Config, MatchKind}, PatternID}; +/// +/// # fn example() -> Option<()> { +/// let searcher = Config::new() +/// .match_kind(MatchKind::LeftmostLongest) +/// .builder() +/// .add("foo") +/// .add("foobar") +/// .build()?; +/// let matches: Vec = searcher +/// .find_iter("foobar") +/// .map(|mat| mat.pattern()) +/// .collect(); +/// assert_eq!(vec![PatternID::must(1)], matches); +/// # Some(()) } +/// # if cfg!(all(feature = "std", any( +/// # target_arch = "x86_64", target_arch = "aarch64", +/// # ))) { +/// # example().unwrap() +/// # } else { +/// # assert!(example().is_none()); +/// # } +/// ``` +#[derive(Clone, Debug)] +pub struct Config { + kind: MatchKind, + force: Option, + only_teddy_fat: Option, + only_teddy_256bit: Option, + heuristic_pattern_limits: bool, +} + +/// An internal option for forcing the use of a particular packed algorithm. +/// +/// When an algorithm is forced, if a searcher could not be constructed for it, +/// then no searcher will be returned even if an alternative algorithm would +/// work. +#[derive(Clone, Debug)] +enum ForceAlgorithm { + Teddy, + RabinKarp, +} + +impl Default for Config { + fn default() -> Config { + Config::new() + } +} + +impl Config { + /// Create a new default configuration. A default configuration uses + /// leftmost-first match semantics. + pub fn new() -> Config { + Config { + kind: MatchKind::LeftmostFirst, + force: None, + only_teddy_fat: None, + only_teddy_256bit: None, + heuristic_pattern_limits: true, + } + } + + /// Create a packed builder from this configuration. The builder can be + /// used to accumulate patterns and create a [`Searcher`] from them. + pub fn builder(&self) -> Builder { + Builder::from_config(self.clone()) + } + + /// Set the match semantics for this configuration. + pub fn match_kind(&mut self, kind: MatchKind) -> &mut Config { + self.kind = kind; + self + } + + /// An undocumented method for forcing the use of the Teddy algorithm. + /// + /// This is only exposed for more precise testing and benchmarks. Callers + /// should not use it as it is not part of the API stability guarantees of + /// this crate. + #[doc(hidden)] + pub fn only_teddy(&mut self, yes: bool) -> &mut Config { + if yes { + self.force = Some(ForceAlgorithm::Teddy); + } else { + self.force = None; + } + self + } + + /// An undocumented method for forcing the use of the Fat Teddy algorithm. + /// + /// This is only exposed for more precise testing and benchmarks. Callers + /// should not use it as it is not part of the API stability guarantees of + /// this crate. + #[doc(hidden)] + pub fn only_teddy_fat(&mut self, yes: Option) -> &mut Config { + self.only_teddy_fat = yes; + self + } + + /// An undocumented method for forcing the use of SSE (`Some(false)`) or + /// AVX (`Some(true)`) algorithms. + /// + /// This is only exposed for more precise testing and benchmarks. Callers + /// should not use it as it is not part of the API stability guarantees of + /// this crate. + #[doc(hidden)] + pub fn only_teddy_256bit(&mut self, yes: Option) -> &mut Config { + self.only_teddy_256bit = yes; + self + } + + /// An undocumented method for forcing the use of the Rabin-Karp algorithm. + /// + /// This is only exposed for more precise testing and benchmarks. Callers + /// should not use it as it is not part of the API stability guarantees of + /// this crate. + #[doc(hidden)] + pub fn only_rabin_karp(&mut self, yes: bool) -> &mut Config { + if yes { + self.force = Some(ForceAlgorithm::RabinKarp); + } else { + self.force = None; + } + self + } + + /// Request that heuristic limitations on the number of patterns be + /// employed. This useful to disable for benchmarking where one wants to + /// explore how Teddy performs on large number of patterns even if the + /// heuristics would otherwise refuse construction. + /// + /// This is enabled by default. + pub fn heuristic_pattern_limits(&mut self, yes: bool) -> &mut Config { + self.heuristic_pattern_limits = yes; + self + } +} + +/// A builder for constructing a packed searcher from a collection of patterns. +/// +/// # Example +/// +/// This example shows how to use a builder to construct a searcher. By +/// default, leftmost-first match semantics are used. +/// +/// ``` +/// use aho_corasick::{packed::{Builder, MatchKind}, PatternID}; +/// +/// # fn example() -> Option<()> { +/// let searcher = Builder::new() +/// .add("foobar") +/// .add("foo") +/// .build()?; +/// let matches: Vec = searcher +/// .find_iter("foobar") +/// .map(|mat| mat.pattern()) +/// .collect(); +/// assert_eq!(vec![PatternID::ZERO], matches); +/// # Some(()) } +/// # if cfg!(all(feature = "std", any( +/// # target_arch = "x86_64", target_arch = "aarch64", +/// # ))) { +/// # example().unwrap() +/// # } else { +/// # assert!(example().is_none()); +/// # } +/// ``` +#[derive(Clone, Debug)] +pub struct Builder { + /// The configuration of this builder and subsequent matcher. + config: Config, + /// Set to true if the builder detects that a matcher cannot be built. + inert: bool, + /// The patterns provided by the caller. + patterns: Patterns, +} + +impl Builder { + /// Create a new builder for constructing a multi-pattern searcher. This + /// constructor uses the default configuration. + pub fn new() -> Builder { + Builder::from_config(Config::new()) + } + + fn from_config(config: Config) -> Builder { + Builder { config, inert: false, patterns: Patterns::new() } + } + + /// Build a searcher from the patterns added to this builder so far. + pub fn build(&self) -> Option { + if self.inert || self.patterns.is_empty() { + return None; + } + let mut patterns = self.patterns.clone(); + patterns.set_match_kind(self.config.kind); + let patterns = Arc::new(patterns); + let rabinkarp = RabinKarp::new(&patterns); + // Effectively, we only want to return a searcher if we can use Teddy, + // since Teddy is our only fast packed searcher at the moment. + // Rabin-Karp is only used when searching haystacks smaller than what + // Teddy can support. Thus, the only way to get a Rabin-Karp searcher + // is to force it using undocumented APIs (for tests/benchmarks). + let (search_kind, minimum_len) = match self.config.force { + None | Some(ForceAlgorithm::Teddy) => { + debug!("trying to build Teddy packed matcher"); + let teddy = match self.build_teddy(Arc::clone(&patterns)) { + None => return None, + Some(teddy) => teddy, + }; + let minimum_len = teddy.minimum_len(); + (SearchKind::Teddy(teddy), minimum_len) + } + Some(ForceAlgorithm::RabinKarp) => { + debug!("using Rabin-Karp packed matcher"); + (SearchKind::RabinKarp, 0) + } + }; + Some(Searcher { patterns, rabinkarp, search_kind, minimum_len }) + } + + fn build_teddy(&self, patterns: Arc) -> Option { + teddy::Builder::new() + .only_256bit(self.config.only_teddy_256bit) + .only_fat(self.config.only_teddy_fat) + .heuristic_pattern_limits(self.config.heuristic_pattern_limits) + .build(patterns) + } + + /// Add the given pattern to this set to match. + /// + /// The order in which patterns are added is significant. Namely, when + /// using leftmost-first match semantics, then when multiple patterns can + /// match at a particular location, the pattern that was added first is + /// used as the match. + /// + /// If the number of patterns added exceeds the amount supported by packed + /// searchers, then the builder will stop accumulating patterns and render + /// itself inert. At this point, constructing a searcher will always return + /// `None`. + pub fn add>(&mut self, pattern: P) -> &mut Builder { + if self.inert { + return self; + } else if self.patterns.len() >= PATTERN_LIMIT { + self.inert = true; + self.patterns.reset(); + return self; + } + // Just in case PATTERN_LIMIT increases beyond u16::MAX. + assert!(self.patterns.len() <= core::u16::MAX as usize); + + let pattern = pattern.as_ref(); + if pattern.is_empty() { + self.inert = true; + self.patterns.reset(); + return self; + } + self.patterns.add(pattern); + self + } + + /// Add the given iterator of patterns to this set to match. + /// + /// The iterator must yield elements that can be converted into a `&[u8]`. + /// + /// The order in which patterns are added is significant. Namely, when + /// using leftmost-first match semantics, then when multiple patterns can + /// match at a particular location, the pattern that was added first is + /// used as the match. + /// + /// If the number of patterns added exceeds the amount supported by packed + /// searchers, then the builder will stop accumulating patterns and render + /// itself inert. At this point, constructing a searcher will always return + /// `None`. + pub fn extend(&mut self, patterns: I) -> &mut Builder + where + I: IntoIterator, + P: AsRef<[u8]>, + { + for p in patterns { + self.add(p); + } + self + } + + /// Returns the number of patterns added to this builder. + pub fn len(&self) -> usize { + self.patterns.len() + } + + /// Returns the length, in bytes, of the shortest pattern added. + pub fn minimum_len(&self) -> usize { + self.patterns.minimum_len() + } +} + +impl Default for Builder { + fn default() -> Builder { + Builder::new() + } +} + +/// A packed searcher for quickly finding occurrences of multiple patterns. +/// +/// If callers need more flexible construction, or if one wants to change the +/// match semantics (either leftmost-first or leftmost-longest), then one can +/// use the [`Config`] and/or [`Builder`] types for more fine grained control. +/// +/// # Example +/// +/// This example shows how to create a searcher from an iterator of patterns. +/// By default, leftmost-first match semantics are used. +/// +/// ``` +/// use aho_corasick::{packed::{MatchKind, Searcher}, PatternID}; +/// +/// # fn example() -> Option<()> { +/// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; +/// let matches: Vec = searcher +/// .find_iter("foobar") +/// .map(|mat| mat.pattern()) +/// .collect(); +/// assert_eq!(vec![PatternID::ZERO], matches); +/// # Some(()) } +/// # if cfg!(all(feature = "std", any( +/// # target_arch = "x86_64", target_arch = "aarch64", +/// # ))) { +/// # example().unwrap() +/// # } else { +/// # assert!(example().is_none()); +/// # } +/// ``` +#[derive(Clone, Debug)] +pub struct Searcher { + patterns: Arc, + rabinkarp: RabinKarp, + search_kind: SearchKind, + minimum_len: usize, +} + +#[derive(Clone, Debug)] +enum SearchKind { + Teddy(teddy::Searcher), + RabinKarp, +} + +impl Searcher { + /// A convenience function for constructing a searcher from an iterator + /// of things that can be converted to a `&[u8]`. + /// + /// If a searcher could not be constructed (either because of an + /// unsupported CPU or because there are too many patterns), then `None` + /// is returned. + /// + /// # Example + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::{packed::{MatchKind, Searcher}, PatternID}; + /// + /// # fn example() -> Option<()> { + /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; + /// let matches: Vec = searcher + /// .find_iter("foobar") + /// .map(|mat| mat.pattern()) + /// .collect(); + /// assert_eq!(vec![PatternID::ZERO], matches); + /// # Some(()) } + /// # if cfg!(all(feature = "std", any( + /// # target_arch = "x86_64", target_arch = "aarch64", + /// # ))) { + /// # example().unwrap() + /// # } else { + /// # assert!(example().is_none()); + /// # } + /// ``` + pub fn new(patterns: I) -> Option + where + I: IntoIterator, + P: AsRef<[u8]>, + { + Builder::new().extend(patterns).build() + } + + /// A convenience function for calling `Config::new()`. + /// + /// This is useful for avoiding an additional import. + pub fn config() -> Config { + Config::new() + } + + /// A convenience function for calling `Builder::new()`. + /// + /// This is useful for avoiding an additional import. + pub fn builder() -> Builder { + Builder::new() + } + + /// Return the first occurrence of any of the patterns in this searcher, + /// according to its match semantics, in the given haystack. The `Match` + /// returned will include the identifier of the pattern that matched, which + /// corresponds to the index of the pattern (starting from `0`) in which it + /// was added. + /// + /// # Example + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::{packed::{MatchKind, Searcher}, PatternID}; + /// + /// # fn example() -> Option<()> { + /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; + /// let mat = searcher.find("foobar")?; + /// assert_eq!(PatternID::ZERO, mat.pattern()); + /// assert_eq!(0, mat.start()); + /// assert_eq!(6, mat.end()); + /// # Some(()) } + /// # if cfg!(all(feature = "std", any( + /// # target_arch = "x86_64", target_arch = "aarch64", + /// # ))) { + /// # example().unwrap() + /// # } else { + /// # assert!(example().is_none()); + /// # } + /// ``` + #[inline] + pub fn find>(&self, haystack: B) -> Option { + let haystack = haystack.as_ref(); + self.find_in(haystack, Span::from(0..haystack.len())) + } + + /// Return the first occurrence of any of the patterns in this searcher, + /// according to its match semantics, in the given haystack starting from + /// the given position. + /// + /// The `Match` returned will include the identifier of the pattern that + /// matched, which corresponds to the index of the pattern (starting from + /// `0`) in which it was added. The offsets in the `Match` will be relative + /// to the start of `haystack` (and not `at`). + /// + /// # Example + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::{packed::{MatchKind, Searcher}, PatternID, Span}; + /// + /// # fn example() -> Option<()> { + /// let haystack = "foofoobar"; + /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; + /// let mat = searcher.find_in(haystack, Span::from(3..haystack.len()))?; + /// assert_eq!(PatternID::ZERO, mat.pattern()); + /// assert_eq!(3, mat.start()); + /// assert_eq!(9, mat.end()); + /// # Some(()) } + /// # if cfg!(all(feature = "std", any( + /// # target_arch = "x86_64", target_arch = "aarch64", + /// # ))) { + /// # example().unwrap() + /// # } else { + /// # assert!(example().is_none()); + /// # } + /// ``` + #[inline] + pub fn find_in>( + &self, + haystack: B, + span: Span, + ) -> Option { + let haystack = haystack.as_ref(); + match self.search_kind { + SearchKind::Teddy(ref teddy) => { + if haystack[span].len() < teddy.minimum_len() { + return self.find_in_slow(haystack, span); + } + teddy.find(&haystack[..span.end], span.start) + } + SearchKind::RabinKarp => { + self.rabinkarp.find_at(&haystack[..span.end], span.start) + } + } + } + + /// Return an iterator of non-overlapping occurrences of the patterns in + /// this searcher, according to its match semantics, in the given haystack. + /// + /// # Example + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::{packed::{MatchKind, Searcher}, PatternID}; + /// + /// # fn example() -> Option<()> { + /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; + /// let matches: Vec = searcher + /// .find_iter("foobar fooba foofoo") + /// .map(|mat| mat.pattern()) + /// .collect(); + /// assert_eq!(vec![ + /// PatternID::must(0), + /// PatternID::must(1), + /// PatternID::must(1), + /// PatternID::must(1), + /// ], matches); + /// # Some(()) } + /// # if cfg!(all(feature = "std", any( + /// # target_arch = "x86_64", target_arch = "aarch64", + /// # ))) { + /// # example().unwrap() + /// # } else { + /// # assert!(example().is_none()); + /// # } + /// ``` + #[inline] + pub fn find_iter<'a, 'b, B: ?Sized + AsRef<[u8]>>( + &'a self, + haystack: &'b B, + ) -> FindIter<'a, 'b> { + let haystack = haystack.as_ref(); + let span = Span::from(0..haystack.len()); + FindIter { searcher: self, haystack, span } + } + + /// Returns the match kind used by this packed searcher. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use aho_corasick::packed::{MatchKind, Searcher}; + /// + /// # fn example() -> Option<()> { + /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; + /// // leftmost-first is the default. + /// assert_eq!(&MatchKind::LeftmostFirst, searcher.match_kind()); + /// # Some(()) } + /// # if cfg!(all(feature = "std", any( + /// # target_arch = "x86_64", target_arch = "aarch64", + /// # ))) { + /// # example().unwrap() + /// # } else { + /// # assert!(example().is_none()); + /// # } + /// ``` + #[inline] + pub fn match_kind(&self) -> &MatchKind { + self.patterns.match_kind() + } + + /// Returns the minimum length of a haystack that is required in order for + /// packed searching to be effective. + /// + /// In some cases, the underlying packed searcher may not be able to search + /// very short haystacks. When that occurs, the implementation will defer + /// to a slower non-packed searcher (which is still generally faster than + /// Aho-Corasick for a small number of patterns). However, callers may + /// want to avoid ever using the slower variant, which one can do by + /// never passing a haystack shorter than the minimum length returned by + /// this method. + #[inline] + pub fn minimum_len(&self) -> usize { + self.minimum_len + } + + /// Returns the approximate total amount of heap used by this searcher, in + /// units of bytes. + #[inline] + pub fn memory_usage(&self) -> usize { + self.patterns.memory_usage() + + self.rabinkarp.memory_usage() + + self.search_kind.memory_usage() + } + + /// Use a slow (non-packed) searcher. + /// + /// This is useful when a packed searcher could be constructed, but could + /// not be used to search a specific haystack. For example, if Teddy was + /// built but the haystack is smaller than ~34 bytes, then Teddy might not + /// be able to run. + fn find_in_slow(&self, haystack: &[u8], span: Span) -> Option { + self.rabinkarp.find_at(&haystack[..span.end], span.start) + } +} + +impl SearchKind { + fn memory_usage(&self) -> usize { + match *self { + SearchKind::Teddy(ref ted) => ted.memory_usage(), + SearchKind::RabinKarp => 0, + } + } +} + +/// An iterator over non-overlapping matches from a packed searcher. +/// +/// The lifetime `'s` refers to the lifetime of the underlying [`Searcher`], +/// while the lifetime `'h` refers to the lifetime of the haystack being +/// searched. +#[derive(Debug)] +pub struct FindIter<'s, 'h> { + searcher: &'s Searcher, + haystack: &'h [u8], + span: Span, +} + +impl<'s, 'h> Iterator for FindIter<'s, 'h> { + type Item = Match; + + fn next(&mut self) -> Option { + if self.span.start > self.span.end { + return None; + } + match self.searcher.find_in(self.haystack, self.span) { + None => None, + Some(m) => { + self.span.start = m.end(); + Some(m) + } + } + } +} diff --git a/vendor/aho-corasick/src/packed/ext.rs b/vendor/aho-corasick/src/packed/ext.rs new file mode 100644 index 00000000000000..b689642bca351b --- /dev/null +++ b/vendor/aho-corasick/src/packed/ext.rs @@ -0,0 +1,39 @@ +/// A trait for adding some helper routines to pointers. +pub(crate) trait Pointer { + /// Returns the distance, in units of `T`, between `self` and `origin`. + /// + /// # Safety + /// + /// Same as `ptr::offset_from` in addition to `self >= origin`. + unsafe fn distance(self, origin: Self) -> usize; + + /// Casts this pointer to `usize`. + /// + /// Callers should not convert the `usize` back to a pointer if at all + /// possible. (And if you believe it's necessary, open an issue to discuss + /// why. Otherwise, it has the potential to violate pointer provenance.) + /// The purpose of this function is just to be able to do arithmetic, i.e., + /// computing offsets or alignments. + fn as_usize(self) -> usize; +} + +impl Pointer for *const T { + unsafe fn distance(self, origin: *const T) -> usize { + // TODO: Replace with `ptr::sub_ptr` once stabilized. + usize::try_from(self.offset_from(origin)).unwrap_unchecked() + } + + fn as_usize(self) -> usize { + self as usize + } +} + +impl Pointer for *mut T { + unsafe fn distance(self, origin: *mut T) -> usize { + (self as *const T).distance(origin as *const T) + } + + fn as_usize(self) -> usize { + (self as *const T).as_usize() + } +} diff --git a/vendor/aho-corasick/src/packed/mod.rs b/vendor/aho-corasick/src/packed/mod.rs new file mode 100644 index 00000000000000..3990bc9330f7fd --- /dev/null +++ b/vendor/aho-corasick/src/packed/mod.rs @@ -0,0 +1,120 @@ +/*! +Provides packed multiple substring search, principally for a small number of +patterns. + +This sub-module provides vectorized routines for quickly finding +matches of a small number of patterns. In general, users of this crate +shouldn't need to interface with this module directly, as the primary +[`AhoCorasick`](crate::AhoCorasick) searcher will use these routines +automatically as a prefilter when applicable. However, in some cases, callers +may want to bypass the Aho-Corasick machinery entirely and use this vectorized +searcher directly. + +# Overview + +The primary types in this sub-module are: + +* [`Searcher`] executes the actual search algorithm to report matches in a +haystack. +* [`Builder`] accumulates patterns incrementally and can construct a +`Searcher`. +* [`Config`] permits tuning the searcher, and itself will produce a `Builder` +(which can then be used to build a `Searcher`). Currently, the only tuneable +knob are the match semantics, but this may be expanded in the future. + +# Examples + +This example shows how to create a searcher from an iterator of patterns. +By default, leftmost-first match semantics are used. (See the top-level +[`MatchKind`] type for more details about match semantics, which apply +similarly to packed substring search.) + +``` +use aho_corasick::{packed::{MatchKind, Searcher}, PatternID}; + +# fn example() -> Option<()> { +let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; +let matches: Vec = searcher + .find_iter("foobar") + .map(|mat| mat.pattern()) + .collect(); +assert_eq!(vec![PatternID::ZERO], matches); +# Some(()) } +# if cfg!(all(feature = "std", any( +# target_arch = "x86_64", target_arch = "aarch64", +# ))) { +# example().unwrap() +# } else { +# assert!(example().is_none()); +# } +``` + +This example shows how to use [`Config`] to change the match semantics to +leftmost-longest: + +``` +use aho_corasick::{packed::{Config, MatchKind}, PatternID}; + +# fn example() -> Option<()> { +let searcher = Config::new() + .match_kind(MatchKind::LeftmostLongest) + .builder() + .add("foo") + .add("foobar") + .build()?; +let matches: Vec = searcher + .find_iter("foobar") + .map(|mat| mat.pattern()) + .collect(); +assert_eq!(vec![PatternID::must(1)], matches); +# Some(()) } +# if cfg!(all(feature = "std", any( +# target_arch = "x86_64", target_arch = "aarch64", +# ))) { +# example().unwrap() +# } else { +# assert!(example().is_none()); +# } +``` + +# Packed substring searching + +Packed substring searching refers to the use of SIMD (Single Instruction, +Multiple Data) to accelerate the detection of matches in a haystack. Unlike +conventional algorithms, such as Aho-Corasick, SIMD algorithms for substring +search tend to do better with a small number of patterns, where as Aho-Corasick +generally maintains reasonably consistent performance regardless of the number +of patterns you give it. Because of this, the vectorized searcher in this +sub-module cannot be used as a general purpose searcher, since building the +searcher may fail even when given a small number of patterns. However, in +exchange, when searching for a small number of patterns, searching can be quite +a bit faster than Aho-Corasick (sometimes by an order of magnitude). + +The key take away here is that constructing a searcher from a list of patterns +is a fallible operation with no clear rules for when it will fail. While the +precise conditions under which building a searcher can fail is specifically an +implementation detail, here are some common reasons: + +* Too many patterns were given. Typically, the limit is on the order of 100 or + so, but this limit may fluctuate based on available CPU features. +* The available packed algorithms require CPU features that aren't available. + For example, currently, this crate only provides packed algorithms for + `x86_64` and `aarch64`. Therefore, constructing a packed searcher on any + other target will always fail. +* Zero patterns were given, or one of the patterns given was empty. Packed + searchers require at least one pattern and that all patterns are non-empty. +* Something else about the nature of the patterns (typically based on + heuristics) suggests that a packed searcher would perform very poorly, so + no searcher is built. +*/ + +pub use crate::packed::api::{Builder, Config, FindIter, MatchKind, Searcher}; + +mod api; +mod ext; +mod pattern; +mod rabinkarp; +mod teddy; +#[cfg(all(feature = "std", test))] +mod tests; +mod vector; diff --git a/vendor/aho-corasick/src/packed/pattern.rs b/vendor/aho-corasick/src/packed/pattern.rs new file mode 100644 index 00000000000000..14da87aabc9e04 --- /dev/null +++ b/vendor/aho-corasick/src/packed/pattern.rs @@ -0,0 +1,480 @@ +use core::{cmp, fmt, mem, u16, usize}; + +use alloc::{boxed::Box, string::String, vec, vec::Vec}; + +use crate::{ + packed::{api::MatchKind, ext::Pointer}, + PatternID, +}; + +/// A non-empty collection of non-empty patterns to search for. +/// +/// This collection of patterns is what is passed around to both execute +/// searches and to construct the searchers themselves. Namely, this permits +/// searches to avoid copying all of the patterns, and allows us to keep only +/// one copy throughout all packed searchers. +/// +/// Note that this collection is not a set. The same pattern can appear more +/// than once. +#[derive(Clone, Debug)] +pub(crate) struct Patterns { + /// The match semantics supported by this collection of patterns. + /// + /// The match semantics determines the order of the iterator over patterns. + /// For leftmost-first, patterns are provided in the same order as were + /// provided by the caller. For leftmost-longest, patterns are provided in + /// descending order of length, with ties broken by the order in which they + /// were provided by the caller. + kind: MatchKind, + /// The collection of patterns, indexed by their identifier. + by_id: Vec>, + /// The order of patterns defined for iteration, given by pattern + /// identifiers. The order of `by_id` and `order` is always the same for + /// leftmost-first semantics, but may be different for leftmost-longest + /// semantics. + order: Vec, + /// The length of the smallest pattern, in bytes. + minimum_len: usize, + /// The total number of pattern bytes across the entire collection. This + /// is used for reporting total heap usage in constant time. + total_pattern_bytes: usize, +} + +// BREADCRUMBS: I think we want to experiment with a different bucket +// representation. Basically, each bucket is just a Range to a single +// contiguous allocation? Maybe length-prefixed patterns or something? The +// idea is to try to get rid of the pointer chasing in verification. I don't +// know that that is the issue, but I suspect it is. + +impl Patterns { + /// Create a new collection of patterns for the given match semantics. The + /// ID of each pattern is the index of the pattern at which it occurs in + /// the `by_id` slice. + /// + /// If any of the patterns in the slice given are empty, then this panics. + /// Similarly, if the number of patterns given is zero, then this also + /// panics. + pub(crate) fn new() -> Patterns { + Patterns { + kind: MatchKind::default(), + by_id: vec![], + order: vec![], + minimum_len: usize::MAX, + total_pattern_bytes: 0, + } + } + + /// Add a pattern to this collection. + /// + /// This panics if the pattern given is empty. + pub(crate) fn add(&mut self, bytes: &[u8]) { + assert!(!bytes.is_empty()); + assert!(self.by_id.len() <= u16::MAX as usize); + + let id = PatternID::new(self.by_id.len()).unwrap(); + self.order.push(id); + self.by_id.push(bytes.to_vec()); + self.minimum_len = cmp::min(self.minimum_len, bytes.len()); + self.total_pattern_bytes += bytes.len(); + } + + /// Set the match kind semantics for this collection of patterns. + /// + /// If the kind is not set, then the default is leftmost-first. + pub(crate) fn set_match_kind(&mut self, kind: MatchKind) { + self.kind = kind; + match self.kind { + MatchKind::LeftmostFirst => { + self.order.sort(); + } + MatchKind::LeftmostLongest => { + let (order, by_id) = (&mut self.order, &mut self.by_id); + order.sort_by(|&id1, &id2| { + by_id[id1].len().cmp(&by_id[id2].len()).reverse() + }); + } + } + } + + /// Return the number of patterns in this collection. + /// + /// This is guaranteed to be greater than zero. + pub(crate) fn len(&self) -> usize { + self.by_id.len() + } + + /// Returns true if and only if this collection of patterns is empty. + pub(crate) fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the approximate total amount of heap used by these patterns, in + /// units of bytes. + pub(crate) fn memory_usage(&self) -> usize { + self.order.len() * mem::size_of::() + + self.by_id.len() * mem::size_of::>() + + self.total_pattern_bytes + } + + /// Clears all heap memory associated with this collection of patterns and + /// resets all state such that it is a valid empty collection. + pub(crate) fn reset(&mut self) { + self.kind = MatchKind::default(); + self.by_id.clear(); + self.order.clear(); + self.minimum_len = usize::MAX; + } + + /// Returns the length, in bytes, of the smallest pattern. + /// + /// This is guaranteed to be at least one. + pub(crate) fn minimum_len(&self) -> usize { + self.minimum_len + } + + /// Returns the match semantics used by these patterns. + pub(crate) fn match_kind(&self) -> &MatchKind { + &self.kind + } + + /// Return the pattern with the given identifier. If such a pattern does + /// not exist, then this panics. + pub(crate) fn get(&self, id: PatternID) -> Pattern<'_> { + Pattern(&self.by_id[id]) + } + + /// Return the pattern with the given identifier without performing bounds + /// checks. + /// + /// # Safety + /// + /// Callers must ensure that a pattern with the given identifier exists + /// before using this method. + pub(crate) unsafe fn get_unchecked(&self, id: PatternID) -> Pattern<'_> { + Pattern(self.by_id.get_unchecked(id.as_usize())) + } + + /// Return an iterator over all the patterns in this collection, in the + /// order in which they should be matched. + /// + /// Specifically, in a naive multi-pattern matcher, the following is + /// guaranteed to satisfy the match semantics of this collection of + /// patterns: + /// + /// ```ignore + /// for i in 0..haystack.len(): + /// for p in patterns.iter(): + /// if haystack[i..].starts_with(p.bytes()): + /// return Match(p.id(), i, i + p.bytes().len()) + /// ``` + /// + /// Namely, among the patterns in a collection, if they are matched in + /// the order provided by this iterator, then the result is guaranteed + /// to satisfy the correct match semantics. (Either leftmost-first or + /// leftmost-longest.) + pub(crate) fn iter(&self) -> PatternIter<'_> { + PatternIter { patterns: self, i: 0 } + } +} + +/// An iterator over the patterns in the `Patterns` collection. +/// +/// The order of the patterns provided by this iterator is consistent with the +/// match semantics of the originating collection of patterns. +/// +/// The lifetime `'p` corresponds to the lifetime of the collection of patterns +/// this is iterating over. +#[derive(Debug)] +pub(crate) struct PatternIter<'p> { + patterns: &'p Patterns, + i: usize, +} + +impl<'p> Iterator for PatternIter<'p> { + type Item = (PatternID, Pattern<'p>); + + fn next(&mut self) -> Option<(PatternID, Pattern<'p>)> { + if self.i >= self.patterns.len() { + return None; + } + let id = self.patterns.order[self.i]; + let p = self.patterns.get(id); + self.i += 1; + Some((id, p)) + } +} + +/// A pattern that is used in packed searching. +#[derive(Clone)] +pub(crate) struct Pattern<'a>(&'a [u8]); + +impl<'a> fmt::Debug for Pattern<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Pattern") + .field("lit", &String::from_utf8_lossy(self.0)) + .finish() + } +} + +impl<'p> Pattern<'p> { + /// Returns the length of this pattern, in bytes. + pub(crate) fn len(&self) -> usize { + self.0.len() + } + + /// Returns the bytes of this pattern. + pub(crate) fn bytes(&self) -> &[u8] { + self.0 + } + + /// Returns the first `len` low nybbles from this pattern. If this pattern + /// is shorter than `len`, then this panics. + pub(crate) fn low_nybbles(&self, len: usize) -> Box<[u8]> { + let mut nybs = vec![0; len].into_boxed_slice(); + for (i, byte) in self.bytes().iter().take(len).enumerate() { + nybs[i] = byte & 0xF; + } + nybs + } + + /// Returns true if this pattern is a prefix of the given bytes. + #[inline(always)] + pub(crate) fn is_prefix(&self, bytes: &[u8]) -> bool { + is_prefix(bytes, self.bytes()) + } + + /// Returns true if this pattern is a prefix of the haystack given by the + /// raw `start` and `end` pointers. + /// + /// # Safety + /// + /// * It must be the case that `start < end` and that the distance between + /// them is at least equal to `V::BYTES`. That is, it must always be valid + /// to do at least an unaligned load of `V` at `start`. + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + #[inline(always)] + pub(crate) unsafe fn is_prefix_raw( + &self, + start: *const u8, + end: *const u8, + ) -> bool { + let patlen = self.bytes().len(); + let haylen = end.distance(start); + if patlen > haylen { + return false; + } + // SAFETY: We've checked that the haystack has length at least equal + // to this pattern. All other safety concerns are the responsibility + // of the caller. + is_equal_raw(start, self.bytes().as_ptr(), patlen) + } +} + +/// Returns true if and only if `needle` is a prefix of `haystack`. +/// +/// This uses a latency optimized variant of `memcmp` internally which *might* +/// make this faster for very short strings. +/// +/// # Inlining +/// +/// This routine is marked `inline(always)`. If you want to call this function +/// in a way that is not always inlined, you'll need to wrap a call to it in +/// another function that is marked as `inline(never)` or just `inline`. +#[inline(always)] +fn is_prefix(haystack: &[u8], needle: &[u8]) -> bool { + if needle.len() > haystack.len() { + return false; + } + // SAFETY: Our pointers are derived directly from borrowed slices which + // uphold all of our safety guarantees except for length. We account for + // length with the check above. + unsafe { is_equal_raw(haystack.as_ptr(), needle.as_ptr(), needle.len()) } +} + +/// Compare corresponding bytes in `x` and `y` for equality. +/// +/// That is, this returns true if and only if `x.len() == y.len()` and +/// `x[i] == y[i]` for all `0 <= i < x.len()`. +/// +/// Note that this isn't used. We only use it in tests as a convenient way +/// of testing `is_equal_raw`. +/// +/// # Inlining +/// +/// This routine is marked `inline(always)`. If you want to call this function +/// in a way that is not always inlined, you'll need to wrap a call to it in +/// another function that is marked as `inline(never)` or just `inline`. +/// +/// # Motivation +/// +/// Why not use slice equality instead? Well, slice equality usually results in +/// a call out to the current platform's `libc` which might not be inlineable +/// or have other overhead. This routine isn't guaranteed to be a win, but it +/// might be in some cases. +#[cfg(test)] +#[inline(always)] +fn is_equal(x: &[u8], y: &[u8]) -> bool { + if x.len() != y.len() { + return false; + } + // SAFETY: Our pointers are derived directly from borrowed slices which + // uphold all of our safety guarantees except for length. We account for + // length with the check above. + unsafe { is_equal_raw(x.as_ptr(), y.as_ptr(), x.len()) } +} + +/// Compare `n` bytes at the given pointers for equality. +/// +/// This returns true if and only if `*x.add(i) == *y.add(i)` for all +/// `0 <= i < n`. +/// +/// # Inlining +/// +/// This routine is marked `inline(always)`. If you want to call this function +/// in a way that is not always inlined, you'll need to wrap a call to it in +/// another function that is marked as `inline(never)` or just `inline`. +/// +/// # Motivation +/// +/// Why not use slice equality instead? Well, slice equality usually results in +/// a call out to the current platform's `libc` which might not be inlineable +/// or have other overhead. This routine isn't guaranteed to be a win, but it +/// might be in some cases. +/// +/// # Safety +/// +/// * Both `x` and `y` must be valid for reads of up to `n` bytes. +/// * Both `x` and `y` must point to an initialized value. +/// * Both `x` and `y` must each point to an allocated object and +/// must either be in bounds or at most one byte past the end of the +/// allocated object. `x` and `y` do not need to point to the same allocated +/// object, but they may. +/// * Both `x` and `y` must be _derived from_ a pointer to their respective +/// allocated objects. +/// * The distance between `x` and `x+n` must not overflow `isize`. Similarly +/// for `y` and `y+n`. +/// * The distance being in bounds must not rely on "wrapping around" the +/// address space. +#[inline(always)] +unsafe fn is_equal_raw(mut x: *const u8, mut y: *const u8, n: usize) -> bool { + // If we don't have enough bytes to do 4-byte at a time loads, then + // handle each possible length specially. Note that I used to have a + // byte-at-a-time loop here and that turned out to be quite a bit slower + // for the memmem/pathological/defeat-simple-vector-alphabet benchmark. + if n < 4 { + return match n { + 0 => true, + 1 => x.read() == y.read(), + 2 => { + x.cast::().read_unaligned() + == y.cast::().read_unaligned() + } + // I also tried copy_nonoverlapping here and it looks like the + // codegen is the same. + 3 => x.cast::<[u8; 3]>().read() == y.cast::<[u8; 3]>().read(), + _ => unreachable!(), + }; + } + // When we have 4 or more bytes to compare, then proceed in chunks of 4 at + // a time using unaligned loads. + // + // Also, why do 4 byte loads instead of, say, 8 byte loads? The reason is + // that this particular version of memcmp is likely to be called with tiny + // needles. That means that if we do 8 byte loads, then a higher proportion + // of memcmp calls will use the slower variant above. With that said, this + // is a hypothesis and is only loosely supported by benchmarks. There's + // likely some improvement that could be made here. The main thing here + // though is to optimize for latency, not throughput. + + // SAFETY: The caller is responsible for ensuring the pointers we get are + // valid and readable for at least `n` bytes. We also do unaligned loads, + // so there's no need to ensure we're aligned. (This is justified by this + // routine being specifically for short strings.) + let xend = x.add(n.wrapping_sub(4)); + let yend = y.add(n.wrapping_sub(4)); + while x < xend { + let vx = x.cast::().read_unaligned(); + let vy = y.cast::().read_unaligned(); + if vx != vy { + return false; + } + x = x.add(4); + y = y.add(4); + } + let vx = xend.cast::().read_unaligned(); + let vy = yend.cast::().read_unaligned(); + vx == vy +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn equals_different_lengths() { + assert!(!is_equal(b"", b"a")); + assert!(!is_equal(b"a", b"")); + assert!(!is_equal(b"ab", b"a")); + assert!(!is_equal(b"a", b"ab")); + } + + #[test] + fn equals_mismatch() { + let one_mismatch = [ + (&b"a"[..], &b"x"[..]), + (&b"ab"[..], &b"ax"[..]), + (&b"abc"[..], &b"abx"[..]), + (&b"abcd"[..], &b"abcx"[..]), + (&b"abcde"[..], &b"abcdx"[..]), + (&b"abcdef"[..], &b"abcdex"[..]), + (&b"abcdefg"[..], &b"abcdefx"[..]), + (&b"abcdefgh"[..], &b"abcdefgx"[..]), + (&b"abcdefghi"[..], &b"abcdefghx"[..]), + (&b"abcdefghij"[..], &b"abcdefghix"[..]), + (&b"abcdefghijk"[..], &b"abcdefghijx"[..]), + (&b"abcdefghijkl"[..], &b"abcdefghijkx"[..]), + (&b"abcdefghijklm"[..], &b"abcdefghijklx"[..]), + (&b"abcdefghijklmn"[..], &b"abcdefghijklmx"[..]), + ]; + for (x, y) in one_mismatch { + assert_eq!(x.len(), y.len(), "lengths should match"); + assert!(!is_equal(x, y)); + assert!(!is_equal(y, x)); + } + } + + #[test] + fn equals_yes() { + assert!(is_equal(b"", b"")); + assert!(is_equal(b"a", b"a")); + assert!(is_equal(b"ab", b"ab")); + assert!(is_equal(b"abc", b"abc")); + assert!(is_equal(b"abcd", b"abcd")); + assert!(is_equal(b"abcde", b"abcde")); + assert!(is_equal(b"abcdef", b"abcdef")); + assert!(is_equal(b"abcdefg", b"abcdefg")); + assert!(is_equal(b"abcdefgh", b"abcdefgh")); + assert!(is_equal(b"abcdefghi", b"abcdefghi")); + } + + #[test] + fn prefix() { + assert!(is_prefix(b"", b"")); + assert!(is_prefix(b"a", b"")); + assert!(is_prefix(b"ab", b"")); + assert!(is_prefix(b"foo", b"foo")); + assert!(is_prefix(b"foobar", b"foo")); + + assert!(!is_prefix(b"foo", b"fob")); + assert!(!is_prefix(b"foobar", b"fob")); + } +} diff --git a/vendor/aho-corasick/src/packed/rabinkarp.rs b/vendor/aho-corasick/src/packed/rabinkarp.rs new file mode 100644 index 00000000000000..fdd8a6f0b4d8fa --- /dev/null +++ b/vendor/aho-corasick/src/packed/rabinkarp.rs @@ -0,0 +1,168 @@ +use alloc::{sync::Arc, vec, vec::Vec}; + +use crate::{packed::pattern::Patterns, util::search::Match, PatternID}; + +/// The type of the rolling hash used in the Rabin-Karp algorithm. +type Hash = usize; + +/// The number of buckets to store our patterns in. We don't want this to be +/// too big in order to avoid wasting memory, but we don't want it to be too +/// small either to avoid spending too much time confirming literals. +/// +/// The number of buckets MUST be a power of two. Otherwise, determining the +/// bucket from a hash will slow down the code considerably. Using a power +/// of two means `hash % NUM_BUCKETS` can compile down to a simple `and` +/// instruction. +const NUM_BUCKETS: usize = 64; + +/// An implementation of the Rabin-Karp algorithm. The main idea of this +/// algorithm is to maintain a rolling hash as it moves through the input, and +/// then check whether that hash corresponds to the same hash for any of the +/// patterns we're looking for. +/// +/// A draw back of naively scaling Rabin-Karp to multiple patterns is that +/// it requires all of the patterns to be the same length, which in turn +/// corresponds to the number of bytes to hash. We adapt this to work for +/// multiple patterns of varying size by fixing the number of bytes to hash +/// to be the length of the smallest pattern. We also split the patterns into +/// several buckets to hopefully make the confirmation step faster. +/// +/// Wikipedia has a decent explanation, if a bit heavy on the theory: +/// https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm +/// +/// But ESMAJ provides something a bit more concrete: +/// https://www-igm.univ-mlv.fr/~lecroq/string/node5.html +#[derive(Clone, Debug)] +pub(crate) struct RabinKarp { + /// The patterns we're searching for. + patterns: Arc, + /// The order of patterns in each bucket is significant. Namely, they are + /// arranged such that the first one to match is the correct match. This + /// may not necessarily correspond to the order provided by the caller. + /// For example, if leftmost-longest semantics are used, then the patterns + /// are sorted by their length in descending order. If leftmost-first + /// semantics are used, then the patterns are sorted by their pattern ID + /// in ascending order (which corresponds to the caller's order). + buckets: Vec>, + /// The length of the hashing window. Generally, this corresponds to the + /// length of the smallest pattern. + hash_len: usize, + /// The factor to subtract out of a hash before updating it with a new + /// byte. + hash_2pow: usize, +} + +impl RabinKarp { + /// Compile a new Rabin-Karp matcher from the patterns given. + /// + /// This panics if any of the patterns in the collection are empty, or if + /// the collection is itself empty. + pub(crate) fn new(patterns: &Arc) -> RabinKarp { + assert!(patterns.len() >= 1); + let hash_len = patterns.minimum_len(); + assert!(hash_len >= 1); + + let mut hash_2pow = 1usize; + for _ in 1..hash_len { + hash_2pow = hash_2pow.wrapping_shl(1); + } + + let mut rk = RabinKarp { + patterns: Arc::clone(patterns), + buckets: vec![vec![]; NUM_BUCKETS], + hash_len, + hash_2pow, + }; + for (id, pat) in patterns.iter() { + let hash = rk.hash(&pat.bytes()[..rk.hash_len]); + let bucket = hash % NUM_BUCKETS; + rk.buckets[bucket].push((hash, id)); + } + rk + } + + /// Return the first matching pattern in the given haystack, begining the + /// search at `at`. + pub(crate) fn find_at( + &self, + haystack: &[u8], + mut at: usize, + ) -> Option { + assert_eq!(NUM_BUCKETS, self.buckets.len()); + + if at + self.hash_len > haystack.len() { + return None; + } + let mut hash = self.hash(&haystack[at..at + self.hash_len]); + loop { + let bucket = &self.buckets[hash % NUM_BUCKETS]; + for &(phash, pid) in bucket { + if phash == hash { + if let Some(c) = self.verify(pid, haystack, at) { + return Some(c); + } + } + } + if at + self.hash_len >= haystack.len() { + return None; + } + hash = self.update_hash( + hash, + haystack[at], + haystack[at + self.hash_len], + ); + at += 1; + } + } + + /// Returns the approximate total amount of heap used by this searcher, in + /// units of bytes. + pub(crate) fn memory_usage(&self) -> usize { + self.buckets.len() * core::mem::size_of::>() + + self.patterns.len() * core::mem::size_of::<(Hash, PatternID)>() + } + + /// Verify whether the pattern with the given id matches at + /// `haystack[at..]`. + /// + /// We tag this function as `cold` because it helps improve codegen. + /// Intuitively, it would seem like inlining it would be better. However, + /// the only time this is called and a match is not found is when there + /// there is a hash collision, or when a prefix of a pattern matches but + /// the entire pattern doesn't match. This is hopefully fairly rare, and + /// if it does occur a lot, it's going to be slow no matter what we do. + #[cold] + fn verify( + &self, + id: PatternID, + haystack: &[u8], + at: usize, + ) -> Option { + let pat = self.patterns.get(id); + if pat.is_prefix(&haystack[at..]) { + Some(Match::new(id, at..at + pat.len())) + } else { + None + } + } + + /// Hash the given bytes. + fn hash(&self, bytes: &[u8]) -> Hash { + assert_eq!(self.hash_len, bytes.len()); + + let mut hash = 0usize; + for &b in bytes { + hash = hash.wrapping_shl(1).wrapping_add(b as usize); + } + hash + } + + /// Update the hash given based on removing `old_byte` at the beginning + /// of some byte string, and appending `new_byte` to the end of that same + /// byte string. + fn update_hash(&self, prev: Hash, old_byte: u8, new_byte: u8) -> Hash { + prev.wrapping_sub((old_byte as usize).wrapping_mul(self.hash_2pow)) + .wrapping_shl(1) + .wrapping_add(new_byte as usize) + } +} diff --git a/vendor/aho-corasick/src/packed/teddy/README.md b/vendor/aho-corasick/src/packed/teddy/README.md new file mode 100644 index 00000000000000..f0928cbe5ceca2 --- /dev/null +++ b/vendor/aho-corasick/src/packed/teddy/README.md @@ -0,0 +1,386 @@ +Teddy is a SIMD accelerated multiple substring matching algorithm. The name +and the core ideas in the algorithm were learned from the [Hyperscan][1_u] +project. The implementation in this repository was mostly motivated for use in +accelerating regex searches by searching for small sets of required literals +extracted from the regex. + + +# Background + +The key idea of Teddy is to do *packed* substring matching. In the literature, +packed substring matching is the idea of examining multiple bytes in a haystack +at a time to detect matches. Implementations of, for example, memchr (which +detects matches of a single byte) have been doing this for years. Only +recently, with the introduction of various SIMD instructions, has this been +extended to substring matching. The PCMPESTRI instruction (and its relatives), +for example, implements substring matching in hardware. It is, however, limited +to substrings of length 16 bytes or fewer, but this restriction is fine in a +regex engine, since we rarely care about the performance difference between +searching for a 16 byte literal and a 16 + N literal; 16 is already long +enough. The key downside of the PCMPESTRI instruction, on current (2016) CPUs +at least, is its latency and throughput. As a result, it is often faster to +do substring search with a Boyer-Moore (or Two-Way) variant and a well placed +memchr to quickly skip through the haystack. + +There are fewer results from the literature on packed substring matching, +and even fewer for packed multiple substring matching. Ben-Kiki et al. [2] +describes use of PCMPESTRI for substring matching, but is mostly theoretical +and hand-waves performance. There is other theoretical work done by Bille [3] +as well. + +The rest of the work in the field, as far as I'm aware, is by Faro and Kulekci +and is generally focused on multiple pattern search. Their first paper [4a] +introduces the concept of a fingerprint, which is computed for every block of +N bytes in every pattern. The haystack is then scanned N bytes at a time and +a fingerprint is computed in the same way it was computed for blocks in the +patterns. If the fingerprint corresponds to one that was found in a pattern, +then a verification step follows to confirm that one of the substrings with the +corresponding fingerprint actually matches at the current location. Various +implementation tricks are employed to make sure the fingerprint lookup is fast; +typically by truncating the fingerprint. (This may, of course, provoke more +steps in the verification process, so a balance must be struck.) + +The main downside of [4a] is that the minimum substring length is 32 bytes, +presumably because of how the algorithm uses certain SIMD instructions. This +essentially makes it useless for general purpose regex matching, where a small +number of short patterns is far more likely. + +Faro and Kulekci published another paper [4b] that is conceptually very similar +to [4a]. The key difference is that it uses the CRC32 instruction (introduced +as part of SSE 4.2) to compute fingerprint values. This also enables the +algorithm to work effectively on substrings as short as 7 bytes with 4 byte +windows. 7 bytes is unfortunately still too long. The window could be +technically shrunk to 2 bytes, thereby reducing minimum length to 3, but the +small window size ends up negating most performance benefits—and it's likely +the common case in a general purpose regex engine. + +Faro and Kulekci also published [4c] that appears to be intended as a +replacement to using PCMPESTRI. In particular, it is specifically motivated by +the high throughput/latency time of PCMPESTRI and therefore chooses other SIMD +instructions that are faster. While this approach works for short substrings, +I personally couldn't see a way to generalize it to multiple substring search. + +Faro and Kulekci have another paper [4d] that I haven't been able to read +because it is behind a paywall. + + +# Teddy + +Finally, we get to Teddy. If the above literature review is complete, then it +appears that Teddy is a novel algorithm. More than that, in my experience, it +completely blows away the competition for short substrings, which is exactly +what we want in a general purpose regex engine. Again, the algorithm appears +to be developed by the authors of [Hyperscan][1_u]. Hyperscan was open sourced +late 2015, and no earlier history could be found. Therefore, tracking the exact +provenance of the algorithm with respect to the published literature seems +difficult. + +At a high level, Teddy works somewhat similarly to the fingerprint algorithms +published by Faro and Kulekci, but Teddy does it in a way that scales a bit +better. Namely: + +1. Teddy's core algorithm scans the haystack in 16 (for SSE, or 32 for AVX) + byte chunks. 16 (or 32) is significant because it corresponds to the number + of bytes in a SIMD vector. +2. Bitwise operations are performed on each chunk to discover if any region of + it matches a set of precomputed fingerprints from the patterns. If there are + matches, then a verification step is performed. In this implementation, our + verification step is naive. This can be improved upon. + +The details to make this work are quite clever. First, we must choose how to +pick our fingerprints. In Hyperscan's implementation, I *believe* they use the +last N bytes of each substring, where N must be at least the minimum length of +any substring in the set being searched. In this implementation, we use the +first N bytes of each substring. (The tradeoffs between these choices aren't +yet clear to me.) We then must figure out how to quickly test whether an +occurrence of any fingerprint from the set of patterns appears in a 16 byte +block from the haystack. To keep things simple, let's assume N = 1 and examine +some examples to motivate the approach. Here are our patterns: + +```ignore +foo +bar +baz +``` + +The corresponding fingerprints, for N = 1, are `f`, `b` and `b`. Now let's set +our 16 byte block to: + +```ignore +bat cat foo bump +xxxxxxxxxxxxxxxx +``` + +To cut to the chase, Teddy works by using bitsets. In particular, Teddy creates +a mask that allows us to quickly compute membership of a fingerprint in a 16 +byte block that also tells which pattern the fingerprint corresponds to. In +this case, our fingerprint is a single byte, so an appropriate abstraction is +a map from a single byte to a list of patterns that contain that fingerprint: + +```ignore +f |--> foo +b |--> bar, baz +``` + +Now, all we need to do is figure out how to represent this map in vector space +and use normal SIMD operations to perform a lookup. The first simplification +we can make is to represent our patterns as bit fields occupying a single +byte. This is important, because a single SIMD vector can store 16 bytes. + +```ignore +f |--> 00000001 +b |--> 00000010, 00000100 +``` + +How do we perform lookup though? It turns out that SSSE3 introduced a very cool +instruction called PSHUFB. The instruction takes two SIMD vectors, `A` and `B`, +and returns a third vector `C`. All vectors are treated as 16 8-bit integers. +`C` is formed by `C[i] = A[B[i]]`. (This is a bit of a simplification, but true +for the purposes of this algorithm. For full details, see [Intel's Intrinsics +Guide][5_u].) This essentially lets us use the values in `B` to lookup values +in `A`. + +If we could somehow cause `B` to contain our 16 byte block from the haystack, +and if `A` could contain our bitmasks, then we'd end up with something like +this for `A`: + +```ignore + 0x00 0x01 ... 0x62 ... 0x66 ... 0xFF +A = 0 0 00000110 00000001 0 +``` + +And if `B` contains our window from our haystack, we could use shuffle to take +the values from `B` and use them to look up our bitsets in `A`. But of course, +we can't do this because `A` in the above example contains 256 bytes, which +is much larger than the size of a SIMD vector. + +Nybbles to the rescue! A nybble is 4 bits. Instead of one mask to hold all of +our bitsets, we can use two masks, where one mask corresponds to the lower four +bits of our fingerprint and the other mask corresponds to the upper four bits. +So our map now looks like: + +```ignore +'f' & 0xF = 0x6 |--> 00000001 +'f' >> 4 = 0x6 |--> 00000111 +'b' & 0xF = 0x2 |--> 00000110 +'b' >> 4 = 0x6 |--> 00000111 +``` + +Notice that the bitsets for each nybble correspond to the union of all +fingerprints that contain that nybble. For example, both `f` and `b` have the +same upper 4 bits but differ on the lower 4 bits. Putting this together, we +have `A0`, `A1` and `B`, where `A0` is our mask for the lower nybble, `A1` is +our mask for the upper nybble and `B` is our 16 byte block from the haystack: + +```ignore + 0x00 0x01 0x02 0x03 ... 0x06 ... 0xF +A0 = 0 0 00000110 0 00000001 0 +A1 = 0 0 0 0 00000111 0 +B = b a t _ t p +B = 0x62 0x61 0x74 0x20 0x74 0x70 +``` + +But of course, we can't use `B` with `PSHUFB` yet, since its values are 8 bits, +and we need indexes that are at most 4 bits (corresponding to one of 16 +values). We can apply the same transformation to split `B` into lower and upper +nybbles as we did `A`. As before, `B0` corresponds to the lower nybbles and +`B1` corresponds to the upper nybbles: + +```ignore + b a t _ c a t _ f o o _ b u m p +B0 = 0x2 0x1 0x4 0x0 0x3 0x1 0x4 0x0 0x6 0xF 0xF 0x0 0x2 0x5 0xD 0x0 +B1 = 0x6 0x6 0x7 0x2 0x6 0x6 0x7 0x2 0x6 0x6 0x6 0x2 0x6 0x7 0x6 0x7 +``` + +And now we have a nice correspondence. `B0` can index `A0` and `B1` can index +`A1`. Here's what we get when we apply `C0 = PSHUFB(A0, B0)`: + +```ignore + b a ... f o ... p + A0[0x2] A0[0x1] A0[0x6] A0[0xF] A0[0x0] +C0 = 00000110 0 00000001 0 0 +``` + +And `C1 = PSHUFB(A1, B1)`: + +```ignore + b a ... f o ... p + A1[0x6] A1[0x6] A1[0x6] A1[0x6] A1[0x7] +C1 = 00000111 00000111 00000111 00000111 0 +``` + +Notice how neither one of `C0` or `C1` is guaranteed to report fully correct +results all on its own. For example, `C1` claims that `b` is a fingerprint for +the pattern `foo` (since `A1[0x6] = 00000111`), and that `o` is a fingerprint +for all of our patterns. But if we combined `C0` and `C1` with an `AND` +operation: + +```ignore + b a ... f o ... p +C = 00000110 0 00000001 0 0 +``` + +Then we now have that `C[i]` contains a bitset corresponding to the matching +fingerprints in a haystack's 16 byte block, where `i` is the `ith` byte in that +block. + +Once we have that, we can look for the position of the least significant bit +in `C`. (Least significant because we only target little endian here. Thus, +the least significant bytes correspond to bytes in our haystack at a lower +address.) That position, modulo `8`, gives us the pattern that the fingerprint +matches. That position, integer divided by `8`, also gives us the byte offset +that the fingerprint occurs in inside the 16 byte haystack block. Using those +two pieces of information, we can run a verification procedure that tries +to match all substrings containing that fingerprint at that position in the +haystack. + + +# Implementation notes + +The problem with the algorithm as described above is that it uses a single byte +for a fingerprint. This will work well if the fingerprints are rare in the +haystack (e.g., capital letters or special characters in normal English text), +but if the fingerprints are common, you'll wind up spending too much time in +the verification step, which effectively negates the performance benefits of +scanning 16 bytes at a time. Remember, the key to the performance of this +algorithm is to do as little work as possible per 16 (or 32) bytes. + +This algorithm can be extrapolated in a relatively straight-forward way to use +larger fingerprints. That is, instead of a single byte prefix, we might use a +two or three byte prefix. The implementation here implements N = {1, 2, 3} +and always picks the largest N possible. The rationale is that the bigger the +fingerprint, the fewer verification steps we'll do. Of course, if N is too +large, then we'll end up doing too much on each step. + +The way to extend it is: + +1. Add a mask for each byte in the fingerprint. (Remember that each mask is + composed of two SIMD vectors.) This results in a value of `C` for each byte + in the fingerprint while searching. +2. When testing each 16 (or 32) byte block, each value of `C` must be shifted + so that they are aligned. Once aligned, they should all be `AND`'d together. + This will give you only the bitsets corresponding to the full match of the + fingerprint. To do this, one needs to save the last byte (for N=2) or last + two bytes (for N=3) from the previous iteration, and then line them up with + the first one or two bytes of the next iteration. + +## Verification + +Verification generally follows the procedure outlined above. The tricky parts +are in the right formulation of operations to get our bits out of our vectors. +We have a limited set of operations available to us on SIMD vectors as 128-bit +or 256-bit numbers, so we wind up needing to rip out 2 (or 4) 64-bit integers +from our vectors, and then run our verification step on each of those. The +verification step looks at the least significant bit set, and from its +position, we can derive the byte offset and bucket. (Again, as described +above.) Once we know the bucket, we do a fairly naive exhaustive search for +every literal in that bucket. (Hyperscan is a bit smarter here and uses a hash +table, but I haven't had time to thoroughly explore that. A few initial +half-hearted attempts resulted in worse performance.) + +## AVX + +The AVX version of Teddy extrapolates almost perfectly from the SSE version. +The only hickup is that PALIGNR is used to align chunks in the 16-bit version, +and there is no equivalent instruction in AVX. AVX does have VPALIGNR, but it +only works within 128-bit lanes. So there's a bit of tomfoolery to get around +this by shuffling the vectors before calling VPALIGNR. + +The only other aspect to AVX is that since our masks are still fundamentally +16-bytes (0x0-0xF), they are duplicated to 32-bytes, so that they can apply to +32-byte chunks. + +## Fat Teddy + +In the version of Teddy described above, 8 buckets are used to group patterns +that we want to search for. However, when AVX is available, we can extend the +number of buckets to 16 by permitting each byte in our masks to use 16-bits +instead of 8-bits to represent the buckets it belongs to. (This variant is also +in Hyperscan.) However, what we give up is the ability to scan 32 bytes at a +time, even though we're using AVX. Instead, we have to scan 16 bytes at a time. +What we gain, though, is (hopefully) less work in our verification routine. +It patterns are more spread out across more buckets, then there should overall +be fewer false positives. In general, Fat Teddy permits us to grow our capacity +a bit and search for more literals before Teddy gets overwhelmed. + +The tricky part of Fat Teddy is in how we adjust our masks and our verification +procedure. For the masks, we simply represent the first 8 buckets in each of +the low 16 bytes, and then the second 8 buckets in each of the high 16 bytes. +Then, in the search loop, instead of loading 32 bytes from the haystack, we +load the same 16 bytes from the haystack into both the low and high 16 byte +portions of our 256-bit vector. So for example, a mask might look like this: + + bits: 00100001 00000000 ... 11000000 00000000 00000001 ... 00000000 + byte: 31 30 16 15 14 0 + offset: 15 14 0 15 14 0 + buckets: 8-15 8-15 8-15 0-7 0-7 0-7 + +Where `byte` is the position in the vector (higher numbers corresponding to +more significant bits), `offset` is the corresponding position in the haystack +chunk, and `buckets` corresponds to the bucket assignments for that particular +byte. + +In particular, notice that the bucket assignments for offset `0` are spread +out between bytes `0` and `16`. This works well for the chunk-by-chunk search +procedure, but verification really wants to process all bucket assignments for +each offset at once. Otherwise, we might wind up finding a match at offset +`1` in one the first 8 buckets, when we really should have reported a match +at offset `0` in one of the second 8 buckets. (Because we want the leftmost +match.) + +Thus, for verification, we rearrange the above vector such that it is a +sequence of 16-bit integers, where the least significant 16-bit integer +corresponds to all of the bucket assignments for offset `0`. So with the +above vector, the least significant 16-bit integer would be + + 11000000 000000 + +which was taken from bytes `16` and `0`. Then the verification step pretty much +runs as described, except with 16 buckets instead of 8. + + +# References + +- **[1]** [Hyperscan on GitHub](https://github.com/intel/hyperscan), + [webpage](https://www.hyperscan.io/) +- **[2a]** Ben-Kiki, O., Bille, P., Breslauer, D., Gasieniec, L., Grossi, R., + & Weimann, O. (2011). + _Optimal packed string matching_. + In LIPIcs-Leibniz International Proceedings in Informatics (Vol. 13). + Schloss Dagstuhl-Leibniz-Zentrum fuer Informatik. + DOI: 10.4230/LIPIcs.FSTTCS.2011.423. + [PDF](https://drops.dagstuhl.de/opus/volltexte/2011/3355/pdf/37.pdf). +- **[2b]** Ben-Kiki, O., Bille, P., Breslauer, D., Ga̧sieniec, L., Grossi, R., + & Weimann, O. (2014). + _Towards optimal packed string matching_. + Theoretical Computer Science, 525, 111-129. + DOI: 10.1016/j.tcs.2013.06.013. + [PDF](https://www.cs.haifa.ac.il/~oren/Publications/bpsm.pdf). +- **[3]** Bille, P. (2011). + _Fast searching in packed strings_. + Journal of Discrete Algorithms, 9(1), 49-56. + DOI: 10.1016/j.jda.2010.09.003. + [PDF](https://www.sciencedirect.com/science/article/pii/S1570866710000353). +- **[4a]** Faro, S., & Külekci, M. O. (2012, October). + _Fast multiple string matching using streaming SIMD extensions technology_. + In String Processing and Information Retrieval (pp. 217-228). + Springer Berlin Heidelberg. + DOI: 10.1007/978-3-642-34109-0_23. + [PDF](https://www.dmi.unict.it/faro/papers/conference/faro32.pdf). +- **[4b]** Faro, S., & Külekci, M. O. (2013, September). + _Towards a Very Fast Multiple String Matching Algorithm for Short Patterns_. + In Stringology (pp. 78-91). + [PDF](https://www.dmi.unict.it/faro/papers/conference/faro36.pdf). +- **[4c]** Faro, S., & Külekci, M. O. (2013, January). + _Fast packed string matching for short patterns_. + In Proceedings of the Meeting on Algorithm Engineering & Expermiments + (pp. 113-121). + Society for Industrial and Applied Mathematics. + [PDF](https://arxiv.org/pdf/1209.6449.pdf). +- **[4d]** Faro, S., & Külekci, M. O. (2014). + _Fast and flexible packed string matching_. + Journal of Discrete Algorithms, 28, 61-72. + DOI: 10.1016/j.jda.2014.07.003. + +[1_u]: https://github.com/intel/hyperscan +[5_u]: https://software.intel.com/sites/landingpage/IntrinsicsGuide diff --git a/vendor/aho-corasick/src/packed/teddy/builder.rs b/vendor/aho-corasick/src/packed/teddy/builder.rs new file mode 100644 index 00000000000000..e9bb68b299f081 --- /dev/null +++ b/vendor/aho-corasick/src/packed/teddy/builder.rs @@ -0,0 +1,792 @@ +use core::{ + fmt::Debug, + panic::{RefUnwindSafe, UnwindSafe}, +}; + +use alloc::sync::Arc; + +use crate::packed::{ext::Pointer, pattern::Patterns, teddy::generic::Match}; + +/// A builder for constructing a Teddy matcher. +/// +/// The builder primarily permits fine grained configuration of the Teddy +/// matcher. Most options are made only available for testing/benchmarking +/// purposes. In reality, options are automatically determined by the nature +/// and number of patterns given to the builder. +#[derive(Clone, Debug)] +pub(crate) struct Builder { + /// When none, this is automatically determined. Otherwise, `false` means + /// slim Teddy is used (8 buckets) and `true` means fat Teddy is used + /// (16 buckets). Fat Teddy requires AVX2, so if that CPU feature isn't + /// available and Fat Teddy was requested, no matcher will be built. + only_fat: Option, + /// When none, this is automatically determined. Otherwise, `false` means + /// that 128-bit vectors will be used (up to SSSE3 instructions) where as + /// `true` means that 256-bit vectors will be used. As with `fat`, if + /// 256-bit vectors are requested and they aren't available, then a + /// searcher will not be built. + only_256bit: Option, + /// When true (the default), the number of patterns will be used as a + /// heuristic for refusing construction of a Teddy searcher. The point here + /// is that too many patterns can overwhelm Teddy. But this can be disabled + /// in cases where the caller knows better. + heuristic_pattern_limits: bool, +} + +impl Default for Builder { + fn default() -> Builder { + Builder::new() + } +} + +impl Builder { + /// Create a new builder for configuring a Teddy matcher. + pub(crate) fn new() -> Builder { + Builder { + only_fat: None, + only_256bit: None, + heuristic_pattern_limits: true, + } + } + + /// Build a matcher for the set of patterns given. If a matcher could not + /// be built, then `None` is returned. + /// + /// Generally, a matcher isn't built if the necessary CPU features aren't + /// available, an unsupported target or if the searcher is believed to be + /// slower than standard techniques (i.e., if there are too many literals). + pub(crate) fn build(&self, patterns: Arc) -> Option { + self.build_imp(patterns) + } + + /// Require the use of Fat (true) or Slim (false) Teddy. Fat Teddy uses + /// 16 buckets where as Slim Teddy uses 8 buckets. More buckets are useful + /// for a larger set of literals. + /// + /// `None` is the default, which results in an automatic selection based + /// on the number of literals and available CPU features. + pub(crate) fn only_fat(&mut self, yes: Option) -> &mut Builder { + self.only_fat = yes; + self + } + + /// Request the use of 256-bit vectors (true) or 128-bit vectors (false). + /// Generally, a larger vector size is better since it either permits + /// matching more patterns or matching more bytes in the haystack at once. + /// + /// `None` is the default, which results in an automatic selection based on + /// the number of literals and available CPU features. + pub(crate) fn only_256bit(&mut self, yes: Option) -> &mut Builder { + self.only_256bit = yes; + self + } + + /// Request that heuristic limitations on the number of patterns be + /// employed. This useful to disable for benchmarking where one wants to + /// explore how Teddy performs on large number of patterns even if the + /// heuristics would otherwise refuse construction. + /// + /// This is enabled by default. + pub(crate) fn heuristic_pattern_limits( + &mut self, + yes: bool, + ) -> &mut Builder { + self.heuristic_pattern_limits = yes; + self + } + + fn build_imp(&self, patterns: Arc) -> Option { + let patlimit = self.heuristic_pattern_limits; + // There's no particular reason why we limit ourselves to little endian + // here, but it seems likely that some parts of Teddy as they are + // currently written (e.g., the uses of `trailing_zeros`) are likely + // wrong on non-little-endian targets. Such things are likely easy to + // fix, but at the time of writing (2023/09/18), I actually do not know + // how to test this code on a big-endian target. So for now, we're + // conservative and just bail out. + if !cfg!(target_endian = "little") { + debug!("skipping Teddy because target isn't little endian"); + return None; + } + // Too many patterns will overwhelm Teddy and likely lead to slow + // downs, typically in the verification step. + if patlimit && patterns.len() > 64 { + debug!("skipping Teddy because of too many patterns"); + return None; + } + + #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] + { + use self::x86_64::{FatAVX2, SlimAVX2, SlimSSSE3}; + + let mask_len = core::cmp::min(4, patterns.minimum_len()); + let beefy = patterns.len() > 32; + let has_avx2 = self::x86_64::is_available_avx2(); + let has_ssse3 = has_avx2 || self::x86_64::is_available_ssse3(); + let use_avx2 = if self.only_256bit == Some(true) { + if !has_avx2 { + debug!( + "skipping Teddy because avx2 was demanded but unavailable" + ); + return None; + } + true + } else if self.only_256bit == Some(false) { + if !has_ssse3 { + debug!( + "skipping Teddy because ssse3 was demanded but unavailable" + ); + return None; + } + false + } else if !has_ssse3 && !has_avx2 { + debug!( + "skipping Teddy because ssse3 and avx2 are unavailable" + ); + return None; + } else { + has_avx2 + }; + let fat = match self.only_fat { + None => use_avx2 && beefy, + Some(false) => false, + Some(true) if !use_avx2 => { + debug!( + "skipping Teddy because fat was demanded, but fat \ + Teddy requires avx2 which is unavailable" + ); + return None; + } + Some(true) => true, + }; + // Just like for aarch64, it's possible that too many patterns will + // overhwelm Teddy. Unlike aarch64 though, we have Fat teddy which + // helps things scale a bit more by spreading patterns over more + // buckets. + // + // These thresholds were determined by looking at the measurements + // for the rust/aho-corasick/packed/leftmost-first and + // rust/aho-corasick/dfa/leftmost-first engines on the `teddy/` + // benchmarks. + if patlimit && mask_len == 1 && patterns.len() > 16 { + debug!( + "skipping Teddy (mask len: 1) because there are \ + too many patterns", + ); + return None; + } + match (mask_len, use_avx2, fat) { + (1, false, _) => { + debug!("Teddy choice: 128-bit slim, 1 byte"); + SlimSSSE3::<1>::new(&patterns) + } + (1, true, false) => { + debug!("Teddy choice: 256-bit slim, 1 byte"); + SlimAVX2::<1>::new(&patterns) + } + (1, true, true) => { + debug!("Teddy choice: 256-bit fat, 1 byte"); + FatAVX2::<1>::new(&patterns) + } + (2, false, _) => { + debug!("Teddy choice: 128-bit slim, 2 bytes"); + SlimSSSE3::<2>::new(&patterns) + } + (2, true, false) => { + debug!("Teddy choice: 256-bit slim, 2 bytes"); + SlimAVX2::<2>::new(&patterns) + } + (2, true, true) => { + debug!("Teddy choice: 256-bit fat, 2 bytes"); + FatAVX2::<2>::new(&patterns) + } + (3, false, _) => { + debug!("Teddy choice: 128-bit slim, 3 bytes"); + SlimSSSE3::<3>::new(&patterns) + } + (3, true, false) => { + debug!("Teddy choice: 256-bit slim, 3 bytes"); + SlimAVX2::<3>::new(&patterns) + } + (3, true, true) => { + debug!("Teddy choice: 256-bit fat, 3 bytes"); + FatAVX2::<3>::new(&patterns) + } + (4, false, _) => { + debug!("Teddy choice: 128-bit slim, 4 bytes"); + SlimSSSE3::<4>::new(&patterns) + } + (4, true, false) => { + debug!("Teddy choice: 256-bit slim, 4 bytes"); + SlimAVX2::<4>::new(&patterns) + } + (4, true, true) => { + debug!("Teddy choice: 256-bit fat, 4 bytes"); + FatAVX2::<4>::new(&patterns) + } + _ => { + debug!("no supported Teddy configuration found"); + None + } + } + } + #[cfg(all( + target_arch = "aarch64", + target_feature = "neon", + target_endian = "little" + ))] + { + use self::aarch64::SlimNeon; + + let mask_len = core::cmp::min(4, patterns.minimum_len()); + if self.only_256bit == Some(true) { + debug!( + "skipping Teddy because 256-bits were demanded \ + but unavailable" + ); + return None; + } + if self.only_fat == Some(true) { + debug!( + "skipping Teddy because fat was demanded but unavailable" + ); + } + // Since we don't have Fat teddy in aarch64 (I think we'd want at + // least 256-bit vectors for that), we need to be careful not to + // allow too many patterns as it might overwhelm Teddy. Generally + // speaking, as the mask length goes up, the more patterns we can + // handle because the mask length results in fewer candidates + // generated. + // + // These thresholds were determined by looking at the measurements + // for the rust/aho-corasick/packed/leftmost-first and + // rust/aho-corasick/dfa/leftmost-first engines on the `teddy/` + // benchmarks. + match mask_len { + 1 => { + if patlimit && patterns.len() > 16 { + debug!( + "skipping Teddy (mask len: 1) because there are \ + too many patterns", + ); + } + debug!("Teddy choice: 128-bit slim, 1 byte"); + SlimNeon::<1>::new(&patterns) + } + 2 => { + if patlimit && patterns.len() > 32 { + debug!( + "skipping Teddy (mask len: 2) because there are \ + too many patterns", + ); + } + debug!("Teddy choice: 128-bit slim, 2 bytes"); + SlimNeon::<2>::new(&patterns) + } + 3 => { + if patlimit && patterns.len() > 48 { + debug!( + "skipping Teddy (mask len: 3) because there are \ + too many patterns", + ); + } + debug!("Teddy choice: 128-bit slim, 3 bytes"); + SlimNeon::<3>::new(&patterns) + } + 4 => { + debug!("Teddy choice: 128-bit slim, 4 bytes"); + SlimNeon::<4>::new(&patterns) + } + _ => { + debug!("no supported Teddy configuration found"); + None + } + } + } + #[cfg(not(any( + all(target_arch = "x86_64", target_feature = "sse2"), + all( + target_arch = "aarch64", + target_feature = "neon", + target_endian = "little" + ) + )))] + { + None + } + } +} + +/// A searcher that dispatches to one of several possible Teddy variants. +#[derive(Clone, Debug)] +pub(crate) struct Searcher { + /// The Teddy variant we use. We use dynamic dispatch under the theory that + /// it results in better codegen then a enum, although this is a specious + /// claim. + /// + /// This `Searcher` is essentially a wrapper for a `SearcherT` trait + /// object. We just make `memory_usage` and `minimum_len` available without + /// going through dynamic dispatch. + imp: Arc, + /// Total heap memory used by the Teddy variant. + memory_usage: usize, + /// The minimum haystack length this searcher can handle. It is intended + /// for callers to use some other search routine (such as Rabin-Karp) in + /// cases where the haystack (or remainer of the haystack) is too short. + minimum_len: usize, +} + +impl Searcher { + /// Look for the leftmost occurrence of any pattern in this search in the + /// given haystack starting at the given position. + /// + /// # Panics + /// + /// This panics when `haystack[at..].len()` is less than the minimum length + /// for this haystack. + #[inline(always)] + pub(crate) fn find( + &self, + haystack: &[u8], + at: usize, + ) -> Option { + // SAFETY: The Teddy implementations all require a minimum haystack + // length, and this is required for safety. Therefore, we assert it + // here in order to make this method sound. + assert!(haystack[at..].len() >= self.minimum_len); + let hayptr = haystack.as_ptr(); + // SAFETY: Construction of the searcher guarantees that we are able + // to run it in the current environment (i.e., we won't get an AVX2 + // searcher on a x86-64 CPU without AVX2 support). Also, the pointers + // are valid as they are derived directly from a borrowed slice. + let teddym = unsafe { + self.imp.find(hayptr.add(at), hayptr.add(haystack.len()))? + }; + let start = teddym.start().as_usize().wrapping_sub(hayptr.as_usize()); + let end = teddym.end().as_usize().wrapping_sub(hayptr.as_usize()); + let span = crate::Span { start, end }; + // OK because we won't permit the construction of a searcher that + // could report a pattern ID bigger than what can fit in the crate-wide + // PatternID type. + let pid = crate::PatternID::new_unchecked(teddym.pattern().as_usize()); + let m = crate::Match::new(pid, span); + Some(m) + } + + /// Returns the approximate total amount of heap used by this type, in + /// units of bytes. + #[inline(always)] + pub(crate) fn memory_usage(&self) -> usize { + self.memory_usage + } + + /// Returns the minimum length, in bytes, that a haystack must be in order + /// to use it with this searcher. + #[inline(always)] + pub(crate) fn minimum_len(&self) -> usize { + self.minimum_len + } +} + +/// A trait that provides dynamic dispatch over the different possible Teddy +/// variants on the same algorithm. +/// +/// On `x86_64` for example, it isn't known until runtime which of 12 possible +/// variants will be used. One might use one of the four slim 128-bit vector +/// variants, or one of the four 256-bit vector variants or even one of the +/// four fat 256-bit vector variants. +/// +/// Since this choice is generally made when the Teddy searcher is constructed +/// and this choice is based on the patterns given and what the current CPU +/// supports, it follows that there must be some kind of indirection at search +/// time that "selects" the variant chosen at build time. +/// +/// There are a few different ways to go about this. One approach is to use an +/// enum. It works fine, but in my experiments, this generally results in worse +/// codegen. Another approach, which is what we use here, is dynamic dispatch +/// via a trait object. We basically implement this trait for each possible +/// variant, select the variant we want at build time and convert it to a +/// trait object for use at search time. +/// +/// Another approach is to use function pointers and stick each of the possible +/// variants into a union. This is essentially isomorphic to the dynamic +/// dispatch approach, but doesn't require any allocations. Since this crate +/// requires `alloc`, there's no real reason (AFAIK) to go down this path. (The +/// `memchr` crate does this.) +trait SearcherT: + Debug + Send + Sync + UnwindSafe + RefUnwindSafe + 'static +{ + /// Execute a search on the given haystack (identified by `start` and `end` + /// raw pointers). + /// + /// # Safety + /// + /// Essentially, the `start` and `end` pointers must be valid and point + /// to a haystack one can read. As long as you derive them from, for + /// example, a `&[u8]`, they should automatically satisfy all of the safety + /// obligations: + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// * It must be the case that `start <= end`. + /// * `end - start` must be greater than the minimum length for this + /// searcher. + /// + /// Also, it is expected that implementations of this trait will tag this + /// method with a `target_feature` attribute. Callers must ensure that + /// they are executing this method in an environment where that attribute + /// is valid. + unsafe fn find(&self, start: *const u8, end: *const u8) -> Option; +} + +#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] +mod x86_64 { + use core::arch::x86_64::{__m128i, __m256i}; + + use alloc::sync::Arc; + + use crate::packed::{ + ext::Pointer, + pattern::Patterns, + teddy::generic::{self, Match}, + }; + + use super::{Searcher, SearcherT}; + + #[derive(Clone, Debug)] + pub(super) struct SlimSSSE3 { + slim128: generic::Slim<__m128i, BYTES>, + } + + // Defines SlimSSSE3 wrapper functions for 1, 2, 3 and 4 bytes. + macro_rules! slim_ssse3 { + ($len:expr) => { + impl SlimSSSE3<$len> { + /// Creates a new searcher using "slim" Teddy with 128-bit + /// vectors. If SSSE3 is not available in the current + /// environment, then this returns `None`. + pub(super) fn new( + patterns: &Arc, + ) -> Option { + if !is_available_ssse3() { + return None; + } + Some(unsafe { SlimSSSE3::<$len>::new_unchecked(patterns) }) + } + + /// Creates a new searcher using "slim" Teddy with 256-bit + /// vectors without checking whether SSSE3 is available or not. + /// + /// # Safety + /// + /// Callers must ensure that SSSE3 is available in the current + /// environment. + #[target_feature(enable = "ssse3")] + unsafe fn new_unchecked(patterns: &Arc) -> Searcher { + let slim128 = generic::Slim::<__m128i, $len>::new( + Arc::clone(patterns), + ); + let memory_usage = slim128.memory_usage(); + let minimum_len = slim128.minimum_len(); + let imp = Arc::new(SlimSSSE3 { slim128 }); + Searcher { imp, memory_usage, minimum_len } + } + } + + impl SearcherT for SlimSSSE3<$len> { + #[target_feature(enable = "ssse3")] + #[inline] + unsafe fn find( + &self, + start: *const u8, + end: *const u8, + ) -> Option { + // SAFETY: All obligations except for `target_feature` are + // passed to the caller. Our use of `target_feature` is + // safe because construction of this type requires that the + // requisite target features are available. + self.slim128.find(start, end) + } + } + }; + } + + slim_ssse3!(1); + slim_ssse3!(2); + slim_ssse3!(3); + slim_ssse3!(4); + + #[derive(Clone, Debug)] + pub(super) struct SlimAVX2 { + slim128: generic::Slim<__m128i, BYTES>, + slim256: generic::Slim<__m256i, BYTES>, + } + + // Defines SlimAVX2 wrapper functions for 1, 2, 3 and 4 bytes. + macro_rules! slim_avx2 { + ($len:expr) => { + impl SlimAVX2<$len> { + /// Creates a new searcher using "slim" Teddy with 256-bit + /// vectors. If AVX2 is not available in the current + /// environment, then this returns `None`. + pub(super) fn new( + patterns: &Arc, + ) -> Option { + if !is_available_avx2() { + return None; + } + Some(unsafe { SlimAVX2::<$len>::new_unchecked(patterns) }) + } + + /// Creates a new searcher using "slim" Teddy with 256-bit + /// vectors without checking whether AVX2 is available or not. + /// + /// # Safety + /// + /// Callers must ensure that AVX2 is available in the current + /// environment. + #[target_feature(enable = "avx2")] + unsafe fn new_unchecked(patterns: &Arc) -> Searcher { + let slim128 = generic::Slim::<__m128i, $len>::new( + Arc::clone(&patterns), + ); + let slim256 = generic::Slim::<__m256i, $len>::new( + Arc::clone(&patterns), + ); + let memory_usage = + slim128.memory_usage() + slim256.memory_usage(); + let minimum_len = slim128.minimum_len(); + let imp = Arc::new(SlimAVX2 { slim128, slim256 }); + Searcher { imp, memory_usage, minimum_len } + } + } + + impl SearcherT for SlimAVX2<$len> { + #[target_feature(enable = "avx2")] + #[inline] + unsafe fn find( + &self, + start: *const u8, + end: *const u8, + ) -> Option { + // SAFETY: All obligations except for `target_feature` are + // passed to the caller. Our use of `target_feature` is + // safe because construction of this type requires that the + // requisite target features are available. + let len = end.distance(start); + if len < self.slim256.minimum_len() { + self.slim128.find(start, end) + } else { + self.slim256.find(start, end) + } + } + } + }; + } + + slim_avx2!(1); + slim_avx2!(2); + slim_avx2!(3); + slim_avx2!(4); + + #[derive(Clone, Debug)] + pub(super) struct FatAVX2 { + fat256: generic::Fat<__m256i, BYTES>, + } + + // Defines SlimAVX2 wrapper functions for 1, 2, 3 and 4 bytes. + macro_rules! fat_avx2 { + ($len:expr) => { + impl FatAVX2<$len> { + /// Creates a new searcher using "slim" Teddy with 256-bit + /// vectors. If AVX2 is not available in the current + /// environment, then this returns `None`. + pub(super) fn new( + patterns: &Arc, + ) -> Option { + if !is_available_avx2() { + return None; + } + Some(unsafe { FatAVX2::<$len>::new_unchecked(patterns) }) + } + + /// Creates a new searcher using "slim" Teddy with 256-bit + /// vectors without checking whether AVX2 is available or not. + /// + /// # Safety + /// + /// Callers must ensure that AVX2 is available in the current + /// environment. + #[target_feature(enable = "avx2")] + unsafe fn new_unchecked(patterns: &Arc) -> Searcher { + let fat256 = generic::Fat::<__m256i, $len>::new( + Arc::clone(&patterns), + ); + let memory_usage = fat256.memory_usage(); + let minimum_len = fat256.minimum_len(); + let imp = Arc::new(FatAVX2 { fat256 }); + Searcher { imp, memory_usage, minimum_len } + } + } + + impl SearcherT for FatAVX2<$len> { + #[target_feature(enable = "avx2")] + #[inline] + unsafe fn find( + &self, + start: *const u8, + end: *const u8, + ) -> Option { + // SAFETY: All obligations except for `target_feature` are + // passed to the caller. Our use of `target_feature` is + // safe because construction of this type requires that the + // requisite target features are available. + self.fat256.find(start, end) + } + } + }; + } + + fat_avx2!(1); + fat_avx2!(2); + fat_avx2!(3); + fat_avx2!(4); + + #[inline] + pub(super) fn is_available_ssse3() -> bool { + #[cfg(not(target_feature = "sse2"))] + { + false + } + #[cfg(target_feature = "sse2")] + { + #[cfg(target_feature = "ssse3")] + { + true + } + #[cfg(not(target_feature = "ssse3"))] + { + #[cfg(feature = "std")] + { + std::is_x86_feature_detected!("ssse3") + } + #[cfg(not(feature = "std"))] + { + false + } + } + } + } + + #[inline] + pub(super) fn is_available_avx2() -> bool { + #[cfg(not(target_feature = "sse2"))] + { + false + } + #[cfg(target_feature = "sse2")] + { + #[cfg(target_feature = "avx2")] + { + true + } + #[cfg(not(target_feature = "avx2"))] + { + #[cfg(feature = "std")] + { + std::is_x86_feature_detected!("avx2") + } + #[cfg(not(feature = "std"))] + { + false + } + } + } + } +} + +#[cfg(all( + target_arch = "aarch64", + target_feature = "neon", + target_endian = "little" +))] +mod aarch64 { + use core::arch::aarch64::uint8x16_t; + + use alloc::sync::Arc; + + use crate::packed::{ + pattern::Patterns, + teddy::generic::{self, Match}, + }; + + use super::{Searcher, SearcherT}; + + #[derive(Clone, Debug)] + pub(super) struct SlimNeon { + slim128: generic::Slim, + } + + // Defines SlimSSSE3 wrapper functions for 1, 2, 3 and 4 bytes. + macro_rules! slim_neon { + ($len:expr) => { + impl SlimNeon<$len> { + /// Creates a new searcher using "slim" Teddy with 128-bit + /// vectors. If SSSE3 is not available in the current + /// environment, then this returns `None`. + pub(super) fn new( + patterns: &Arc, + ) -> Option { + Some(unsafe { SlimNeon::<$len>::new_unchecked(patterns) }) + } + + /// Creates a new searcher using "slim" Teddy with 256-bit + /// vectors without checking whether SSSE3 is available or not. + /// + /// # Safety + /// + /// Callers must ensure that SSSE3 is available in the current + /// environment. + #[target_feature(enable = "neon")] + unsafe fn new_unchecked(patterns: &Arc) -> Searcher { + let slim128 = generic::Slim::::new( + Arc::clone(patterns), + ); + let memory_usage = slim128.memory_usage(); + let minimum_len = slim128.minimum_len(); + let imp = Arc::new(SlimNeon { slim128 }); + Searcher { imp, memory_usage, minimum_len } + } + } + + impl SearcherT for SlimNeon<$len> { + #[target_feature(enable = "neon")] + #[inline] + unsafe fn find( + &self, + start: *const u8, + end: *const u8, + ) -> Option { + // SAFETY: All obligations except for `target_feature` are + // passed to the caller. Our use of `target_feature` is + // safe because construction of this type requires that the + // requisite target features are available. + self.slim128.find(start, end) + } + } + }; + } + + slim_neon!(1); + slim_neon!(2); + slim_neon!(3); + slim_neon!(4); +} diff --git a/vendor/aho-corasick/src/packed/teddy/generic.rs b/vendor/aho-corasick/src/packed/teddy/generic.rs new file mode 100644 index 00000000000000..2aacd003576069 --- /dev/null +++ b/vendor/aho-corasick/src/packed/teddy/generic.rs @@ -0,0 +1,1382 @@ +use core::fmt::Debug; + +use alloc::{ + boxed::Box, collections::BTreeMap, format, sync::Arc, vec, vec::Vec, +}; + +use crate::{ + packed::{ + ext::Pointer, + pattern::Patterns, + vector::{FatVector, Vector}, + }, + util::int::U32, + PatternID, +}; + +/// A match type specialized to the Teddy implementations below. +/// +/// Essentially, instead of representing a match at byte offsets, we use +/// raw pointers. This is because the implementations below operate on raw +/// pointers, and so this is a more natural return type based on how the +/// implementation works. +/// +/// Also, the `PatternID` used here is a `u16`. +#[derive(Clone, Copy, Debug)] +pub(crate) struct Match { + pid: PatternID, + start: *const u8, + end: *const u8, +} + +impl Match { + /// Returns the ID of the pattern that matched. + pub(crate) fn pattern(&self) -> PatternID { + self.pid + } + + /// Returns a pointer into the haystack at which the match starts. + pub(crate) fn start(&self) -> *const u8 { + self.start + } + + /// Returns a pointer into the haystack at which the match ends. + pub(crate) fn end(&self) -> *const u8 { + self.end + } +} + +/// A "slim" Teddy implementation that is generic over both the vector type +/// and the minimum length of the patterns being searched for. +/// +/// Only 1, 2, 3 and 4 bytes are supported as minimum lengths. +#[derive(Clone, Debug)] +pub(crate) struct Slim { + /// A generic data structure for doing "slim" Teddy verification. + teddy: Teddy<8>, + /// The masks used as inputs to the shuffle operation to generate + /// candidates (which are fed into the verification routines). + masks: [Mask; BYTES], +} + +impl Slim { + /// Create a new "slim" Teddy searcher for the given patterns. + /// + /// # Panics + /// + /// This panics when `BYTES` is any value other than 1, 2, 3 or 4. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + #[inline(always)] + pub(crate) unsafe fn new(patterns: Arc) -> Slim { + assert!( + 1 <= BYTES && BYTES <= 4, + "only 1, 2, 3 or 4 bytes are supported" + ); + let teddy = Teddy::new(patterns); + let masks = SlimMaskBuilder::from_teddy(&teddy); + Slim { teddy, masks } + } + + /// Returns the approximate total amount of heap used by this type, in + /// units of bytes. + #[inline(always)] + pub(crate) fn memory_usage(&self) -> usize { + self.teddy.memory_usage() + } + + /// Returns the minimum length, in bytes, that a haystack must be in order + /// to use it with this searcher. + #[inline(always)] + pub(crate) fn minimum_len(&self) -> usize { + V::BYTES + (BYTES - 1) + } +} + +impl Slim { + /// Look for an occurrences of the patterns in this finder in the haystack + /// given by the `start` and `end` pointers. + /// + /// If no match could be found, then `None` is returned. + /// + /// # Safety + /// + /// The given pointers representing the haystack must be valid to read + /// from. They must also point to a region of memory that is at least the + /// minimum length required by this searcher. + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + #[inline(always)] + pub(crate) unsafe fn find( + &self, + start: *const u8, + end: *const u8, + ) -> Option { + let len = end.distance(start); + debug_assert!(len >= self.minimum_len()); + let mut cur = start; + while cur <= end.sub(V::BYTES) { + if let Some(m) = self.find_one(cur, end) { + return Some(m); + } + cur = cur.add(V::BYTES); + } + if cur < end { + cur = end.sub(V::BYTES); + if let Some(m) = self.find_one(cur, end) { + return Some(m); + } + } + None + } + + /// Look for a match starting at the `V::BYTES` at and after `cur`. If + /// there isn't one, then `None` is returned. + /// + /// # Safety + /// + /// The given pointers representing the haystack must be valid to read + /// from. They must also point to a region of memory that is at least the + /// minimum length required by this searcher. + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + #[inline(always)] + unsafe fn find_one( + &self, + cur: *const u8, + end: *const u8, + ) -> Option { + let c = self.candidate(cur); + if !c.is_zero() { + if let Some(m) = self.teddy.verify(cur, end, c) { + return Some(m); + } + } + None + } + + /// Look for a candidate match (represented as a vector) starting at the + /// `V::BYTES` at and after `cur`. If there isn't one, then a vector with + /// all bits set to zero is returned. + /// + /// # Safety + /// + /// The given pointer representing the haystack must be valid to read + /// from. + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + #[inline(always)] + unsafe fn candidate(&self, cur: *const u8) -> V { + let chunk = V::load_unaligned(cur); + Mask::members1(chunk, self.masks) + } +} + +impl Slim { + /// See Slim::find. + #[inline(always)] + pub(crate) unsafe fn find( + &self, + start: *const u8, + end: *const u8, + ) -> Option { + let len = end.distance(start); + debug_assert!(len >= self.minimum_len()); + let mut cur = start.add(1); + let mut prev0 = V::splat(0xFF); + while cur <= end.sub(V::BYTES) { + if let Some(m) = self.find_one(cur, end, &mut prev0) { + return Some(m); + } + cur = cur.add(V::BYTES); + } + if cur < end { + cur = end.sub(V::BYTES); + prev0 = V::splat(0xFF); + if let Some(m) = self.find_one(cur, end, &mut prev0) { + return Some(m); + } + } + None + } + + /// See Slim::find_one. + #[inline(always)] + unsafe fn find_one( + &self, + cur: *const u8, + end: *const u8, + prev0: &mut V, + ) -> Option { + let c = self.candidate(cur, prev0); + if !c.is_zero() { + if let Some(m) = self.teddy.verify(cur.sub(1), end, c) { + return Some(m); + } + } + None + } + + /// See Slim::candidate. + #[inline(always)] + unsafe fn candidate(&self, cur: *const u8, prev0: &mut V) -> V { + let chunk = V::load_unaligned(cur); + let (res0, res1) = Mask::members2(chunk, self.masks); + let res0prev0 = res0.shift_in_one_byte(*prev0); + let res = res0prev0.and(res1); + *prev0 = res0; + res + } +} + +impl Slim { + /// See Slim::find. + #[inline(always)] + pub(crate) unsafe fn find( + &self, + start: *const u8, + end: *const u8, + ) -> Option { + let len = end.distance(start); + debug_assert!(len >= self.minimum_len()); + let mut cur = start.add(2); + let mut prev0 = V::splat(0xFF); + let mut prev1 = V::splat(0xFF); + while cur <= end.sub(V::BYTES) { + if let Some(m) = self.find_one(cur, end, &mut prev0, &mut prev1) { + return Some(m); + } + cur = cur.add(V::BYTES); + } + if cur < end { + cur = end.sub(V::BYTES); + prev0 = V::splat(0xFF); + prev1 = V::splat(0xFF); + if let Some(m) = self.find_one(cur, end, &mut prev0, &mut prev1) { + return Some(m); + } + } + None + } + + /// See Slim::find_one. + #[inline(always)] + unsafe fn find_one( + &self, + cur: *const u8, + end: *const u8, + prev0: &mut V, + prev1: &mut V, + ) -> Option { + let c = self.candidate(cur, prev0, prev1); + if !c.is_zero() { + if let Some(m) = self.teddy.verify(cur.sub(2), end, c) { + return Some(m); + } + } + None + } + + /// See Slim::candidate. + #[inline(always)] + unsafe fn candidate( + &self, + cur: *const u8, + prev0: &mut V, + prev1: &mut V, + ) -> V { + let chunk = V::load_unaligned(cur); + let (res0, res1, res2) = Mask::members3(chunk, self.masks); + let res0prev0 = res0.shift_in_two_bytes(*prev0); + let res1prev1 = res1.shift_in_one_byte(*prev1); + let res = res0prev0.and(res1prev1).and(res2); + *prev0 = res0; + *prev1 = res1; + res + } +} + +impl Slim { + /// See Slim::find. + #[inline(always)] + pub(crate) unsafe fn find( + &self, + start: *const u8, + end: *const u8, + ) -> Option { + let len = end.distance(start); + debug_assert!(len >= self.minimum_len()); + let mut cur = start.add(3); + let mut prev0 = V::splat(0xFF); + let mut prev1 = V::splat(0xFF); + let mut prev2 = V::splat(0xFF); + while cur <= end.sub(V::BYTES) { + if let Some(m) = + self.find_one(cur, end, &mut prev0, &mut prev1, &mut prev2) + { + return Some(m); + } + cur = cur.add(V::BYTES); + } + if cur < end { + cur = end.sub(V::BYTES); + prev0 = V::splat(0xFF); + prev1 = V::splat(0xFF); + prev2 = V::splat(0xFF); + if let Some(m) = + self.find_one(cur, end, &mut prev0, &mut prev1, &mut prev2) + { + return Some(m); + } + } + None + } + + /// See Slim::find_one. + #[inline(always)] + unsafe fn find_one( + &self, + cur: *const u8, + end: *const u8, + prev0: &mut V, + prev1: &mut V, + prev2: &mut V, + ) -> Option { + let c = self.candidate(cur, prev0, prev1, prev2); + if !c.is_zero() { + if let Some(m) = self.teddy.verify(cur.sub(3), end, c) { + return Some(m); + } + } + None + } + + /// See Slim::candidate. + #[inline(always)] + unsafe fn candidate( + &self, + cur: *const u8, + prev0: &mut V, + prev1: &mut V, + prev2: &mut V, + ) -> V { + let chunk = V::load_unaligned(cur); + let (res0, res1, res2, res3) = Mask::members4(chunk, self.masks); + let res0prev0 = res0.shift_in_three_bytes(*prev0); + let res1prev1 = res1.shift_in_two_bytes(*prev1); + let res2prev2 = res2.shift_in_one_byte(*prev2); + let res = res0prev0.and(res1prev1).and(res2prev2).and(res3); + *prev0 = res0; + *prev1 = res1; + *prev2 = res2; + res + } +} + +/// A "fat" Teddy implementation that is generic over both the vector type +/// and the minimum length of the patterns being searched for. +/// +/// Only 1, 2, 3 and 4 bytes are supported as minimum lengths. +#[derive(Clone, Debug)] +pub(crate) struct Fat { + /// A generic data structure for doing "fat" Teddy verification. + teddy: Teddy<16>, + /// The masks used as inputs to the shuffle operation to generate + /// candidates (which are fed into the verification routines). + masks: [Mask; BYTES], +} + +impl Fat { + /// Create a new "fat" Teddy searcher for the given patterns. + /// + /// # Panics + /// + /// This panics when `BYTES` is any value other than 1, 2, 3 or 4. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + #[inline(always)] + pub(crate) unsafe fn new(patterns: Arc) -> Fat { + assert!( + 1 <= BYTES && BYTES <= 4, + "only 1, 2, 3 or 4 bytes are supported" + ); + let teddy = Teddy::new(patterns); + let masks = FatMaskBuilder::from_teddy(&teddy); + Fat { teddy, masks } + } + + /// Returns the approximate total amount of heap used by this type, in + /// units of bytes. + #[inline(always)] + pub(crate) fn memory_usage(&self) -> usize { + self.teddy.memory_usage() + } + + /// Returns the minimum length, in bytes, that a haystack must be in order + /// to use it with this searcher. + #[inline(always)] + pub(crate) fn minimum_len(&self) -> usize { + V::Half::BYTES + (BYTES - 1) + } +} + +impl Fat { + /// Look for an occurrences of the patterns in this finder in the haystack + /// given by the `start` and `end` pointers. + /// + /// If no match could be found, then `None` is returned. + /// + /// # Safety + /// + /// The given pointers representing the haystack must be valid to read + /// from. They must also point to a region of memory that is at least the + /// minimum length required by this searcher. + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + #[inline(always)] + pub(crate) unsafe fn find( + &self, + start: *const u8, + end: *const u8, + ) -> Option { + let len = end.distance(start); + debug_assert!(len >= self.minimum_len()); + let mut cur = start; + while cur <= end.sub(V::Half::BYTES) { + if let Some(m) = self.find_one(cur, end) { + return Some(m); + } + cur = cur.add(V::Half::BYTES); + } + if cur < end { + cur = end.sub(V::Half::BYTES); + if let Some(m) = self.find_one(cur, end) { + return Some(m); + } + } + None + } + + /// Look for a match starting at the `V::BYTES` at and after `cur`. If + /// there isn't one, then `None` is returned. + /// + /// # Safety + /// + /// The given pointers representing the haystack must be valid to read + /// from. They must also point to a region of memory that is at least the + /// minimum length required by this searcher. + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + #[inline(always)] + unsafe fn find_one( + &self, + cur: *const u8, + end: *const u8, + ) -> Option { + let c = self.candidate(cur); + if !c.is_zero() { + if let Some(m) = self.teddy.verify(cur, end, c) { + return Some(m); + } + } + None + } + + /// Look for a candidate match (represented as a vector) starting at the + /// `V::BYTES` at and after `cur`. If there isn't one, then a vector with + /// all bits set to zero is returned. + /// + /// # Safety + /// + /// The given pointer representing the haystack must be valid to read + /// from. + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + #[inline(always)] + unsafe fn candidate(&self, cur: *const u8) -> V { + let chunk = V::load_half_unaligned(cur); + Mask::members1(chunk, self.masks) + } +} + +impl Fat { + /// See `Fat::find`. + #[inline(always)] + pub(crate) unsafe fn find( + &self, + start: *const u8, + end: *const u8, + ) -> Option { + let len = end.distance(start); + debug_assert!(len >= self.minimum_len()); + let mut cur = start.add(1); + let mut prev0 = V::splat(0xFF); + while cur <= end.sub(V::Half::BYTES) { + if let Some(m) = self.find_one(cur, end, &mut prev0) { + return Some(m); + } + cur = cur.add(V::Half::BYTES); + } + if cur < end { + cur = end.sub(V::Half::BYTES); + prev0 = V::splat(0xFF); + if let Some(m) = self.find_one(cur, end, &mut prev0) { + return Some(m); + } + } + None + } + + /// See `Fat::find_one`. + #[inline(always)] + unsafe fn find_one( + &self, + cur: *const u8, + end: *const u8, + prev0: &mut V, + ) -> Option { + let c = self.candidate(cur, prev0); + if !c.is_zero() { + if let Some(m) = self.teddy.verify(cur.sub(1), end, c) { + return Some(m); + } + } + None + } + + /// See `Fat::candidate`. + #[inline(always)] + unsafe fn candidate(&self, cur: *const u8, prev0: &mut V) -> V { + let chunk = V::load_half_unaligned(cur); + let (res0, res1) = Mask::members2(chunk, self.masks); + let res0prev0 = res0.half_shift_in_one_byte(*prev0); + let res = res0prev0.and(res1); + *prev0 = res0; + res + } +} + +impl Fat { + /// See `Fat::find`. + #[inline(always)] + pub(crate) unsafe fn find( + &self, + start: *const u8, + end: *const u8, + ) -> Option { + let len = end.distance(start); + debug_assert!(len >= self.minimum_len()); + let mut cur = start.add(2); + let mut prev0 = V::splat(0xFF); + let mut prev1 = V::splat(0xFF); + while cur <= end.sub(V::Half::BYTES) { + if let Some(m) = self.find_one(cur, end, &mut prev0, &mut prev1) { + return Some(m); + } + cur = cur.add(V::Half::BYTES); + } + if cur < end { + cur = end.sub(V::Half::BYTES); + prev0 = V::splat(0xFF); + prev1 = V::splat(0xFF); + if let Some(m) = self.find_one(cur, end, &mut prev0, &mut prev1) { + return Some(m); + } + } + None + } + + /// See `Fat::find_one`. + #[inline(always)] + unsafe fn find_one( + &self, + cur: *const u8, + end: *const u8, + prev0: &mut V, + prev1: &mut V, + ) -> Option { + let c = self.candidate(cur, prev0, prev1); + if !c.is_zero() { + if let Some(m) = self.teddy.verify(cur.sub(2), end, c) { + return Some(m); + } + } + None + } + + /// See `Fat::candidate`. + #[inline(always)] + unsafe fn candidate( + &self, + cur: *const u8, + prev0: &mut V, + prev1: &mut V, + ) -> V { + let chunk = V::load_half_unaligned(cur); + let (res0, res1, res2) = Mask::members3(chunk, self.masks); + let res0prev0 = res0.half_shift_in_two_bytes(*prev0); + let res1prev1 = res1.half_shift_in_one_byte(*prev1); + let res = res0prev0.and(res1prev1).and(res2); + *prev0 = res0; + *prev1 = res1; + res + } +} + +impl Fat { + /// See `Fat::find`. + #[inline(always)] + pub(crate) unsafe fn find( + &self, + start: *const u8, + end: *const u8, + ) -> Option { + let len = end.distance(start); + debug_assert!(len >= self.minimum_len()); + let mut cur = start.add(3); + let mut prev0 = V::splat(0xFF); + let mut prev1 = V::splat(0xFF); + let mut prev2 = V::splat(0xFF); + while cur <= end.sub(V::Half::BYTES) { + if let Some(m) = + self.find_one(cur, end, &mut prev0, &mut prev1, &mut prev2) + { + return Some(m); + } + cur = cur.add(V::Half::BYTES); + } + if cur < end { + cur = end.sub(V::Half::BYTES); + prev0 = V::splat(0xFF); + prev1 = V::splat(0xFF); + prev2 = V::splat(0xFF); + if let Some(m) = + self.find_one(cur, end, &mut prev0, &mut prev1, &mut prev2) + { + return Some(m); + } + } + None + } + + /// See `Fat::find_one`. + #[inline(always)] + unsafe fn find_one( + &self, + cur: *const u8, + end: *const u8, + prev0: &mut V, + prev1: &mut V, + prev2: &mut V, + ) -> Option { + let c = self.candidate(cur, prev0, prev1, prev2); + if !c.is_zero() { + if let Some(m) = self.teddy.verify(cur.sub(3), end, c) { + return Some(m); + } + } + None + } + + /// See `Fat::candidate`. + #[inline(always)] + unsafe fn candidate( + &self, + cur: *const u8, + prev0: &mut V, + prev1: &mut V, + prev2: &mut V, + ) -> V { + let chunk = V::load_half_unaligned(cur); + let (res0, res1, res2, res3) = Mask::members4(chunk, self.masks); + let res0prev0 = res0.half_shift_in_three_bytes(*prev0); + let res1prev1 = res1.half_shift_in_two_bytes(*prev1); + let res2prev2 = res2.half_shift_in_one_byte(*prev2); + let res = res0prev0.and(res1prev1).and(res2prev2).and(res3); + *prev0 = res0; + *prev1 = res1; + *prev2 = res2; + res + } +} + +/// The common elements of all "slim" and "fat" Teddy search implementations. +/// +/// Essentially, this contains the patterns and the buckets. Namely, it +/// contains enough to implement the verification step after candidates are +/// identified via the shuffle masks. +/// +/// It is generic over the number of buckets used. In general, the number of +/// buckets is either 8 (for "slim" Teddy) or 16 (for "fat" Teddy). The generic +/// parameter isn't really meant to be instantiated for any value other than +/// 8 or 16, although it is technically possible. The main hiccup is that there +/// is some bit-shifting done in the critical part of verification that could +/// be quite expensive if `N` is not a multiple of 2. +#[derive(Clone, Debug)] +struct Teddy { + /// The patterns we are searching for. + /// + /// A pattern string can be found by its `PatternID`. + patterns: Arc, + /// The allocation of patterns in buckets. This only contains the IDs of + /// patterns. In order to do full verification, callers must provide the + /// actual patterns when using Teddy. + buckets: [Vec; BUCKETS], + // N.B. The above representation is very simple, but it definitely results + // in ping-ponging between different allocations during verification. I've + // tried experimenting with other representations that flatten the pattern + // strings into a single allocation, but it doesn't seem to help much. + // Probably everything is small enough to fit into cache anyway, and so the + // pointer chasing isn't a big deal? + // + // One other avenue I haven't explored is some kind of hashing trick + // that let's us do another high-confidence check before launching into + // `memcmp`. +} + +impl Teddy { + /// Create a new generic data structure for Teddy verification. + fn new(patterns: Arc) -> Teddy { + assert_ne!(0, patterns.len(), "Teddy requires at least one pattern"); + assert_ne!( + 0, + patterns.minimum_len(), + "Teddy does not support zero-length patterns" + ); + assert!( + BUCKETS == 8 || BUCKETS == 16, + "Teddy only supports 8 or 16 buckets" + ); + // MSRV(1.63): Use core::array::from_fn below instead of allocating a + // superfluous outer Vec. Not a big deal (especially given the BTreeMap + // allocation below), but nice to not do it. + let buckets = + <[Vec; BUCKETS]>::try_from(vec![vec![]; BUCKETS]) + .unwrap(); + let mut t = Teddy { patterns, buckets }; + + let mut map: BTreeMap, usize> = BTreeMap::new(); + for (id, pattern) in t.patterns.iter() { + // We try to be slightly clever in how we assign patterns into + // buckets. Generally speaking, we want patterns with the same + // prefix to be in the same bucket, since it minimizes the amount + // of time we spend churning through buckets in the verification + // step. + // + // So we could assign patterns with the same N-prefix (where N is + // the size of the mask, which is one of {1, 2, 3}) to the same + // bucket. However, case insensitive searches are fairly common, so + // we'd for example, ideally want to treat `abc` and `ABC` as if + // they shared the same prefix. ASCII has the nice property that + // the lower 4 bits of A and a are the same, so we therefore group + // patterns with the same low-nybble-N-prefix into the same bucket. + // + // MOREOVER, this is actually necessary for correctness! In + // particular, by grouping patterns with the same prefix into the + // same bucket, we ensure that we preserve correct leftmost-first + // and leftmost-longest match semantics. In addition to the fact + // that `patterns.iter()` iterates in the correct order, this + // guarantees that all possible ambiguous matches will occur in + // the same bucket. The verification routine could be adjusted to + // support correct leftmost match semantics regardless of bucket + // allocation, but that results in a performance hit. It's much + // nicer to be able to just stop as soon as a match is found. + let lonybs = pattern.low_nybbles(t.mask_len()); + if let Some(&bucket) = map.get(&lonybs) { + t.buckets[bucket].push(id); + } else { + // N.B. We assign buckets in reverse because it shouldn't have + // any influence on performance, but it does make it harder to + // get leftmost match semantics accidentally correct. + let bucket = (BUCKETS - 1) - (id.as_usize() % BUCKETS); + t.buckets[bucket].push(id); + map.insert(lonybs, bucket); + } + } + t + } + + /// Verify whether there are any matches starting at or after `cur` in the + /// haystack. The candidate chunk given should correspond to 8-bit bitsets + /// for N buckets. + /// + /// # Safety + /// + /// The given pointers representing the haystack must be valid to read + /// from. + #[inline(always)] + unsafe fn verify64( + &self, + cur: *const u8, + end: *const u8, + mut candidate_chunk: u64, + ) -> Option { + while candidate_chunk != 0 { + let bit = candidate_chunk.trailing_zeros().as_usize(); + candidate_chunk &= !(1 << bit); + + let cur = cur.add(bit / BUCKETS); + let bucket = bit % BUCKETS; + if let Some(m) = self.verify_bucket(cur, end, bucket) { + return Some(m); + } + } + None + } + + /// Verify whether there are any matches starting at `at` in the given + /// `haystack` corresponding only to patterns in the given bucket. + /// + /// # Safety + /// + /// The given pointers representing the haystack must be valid to read + /// from. + /// + /// The bucket index must be less than or equal to `self.buckets.len()`. + #[inline(always)] + unsafe fn verify_bucket( + &self, + cur: *const u8, + end: *const u8, + bucket: usize, + ) -> Option { + debug_assert!(bucket < self.buckets.len()); + // SAFETY: The caller must ensure that the bucket index is correct. + for pid in self.buckets.get_unchecked(bucket).iter().copied() { + // SAFETY: This is safe because we are guaranteed that every + // index in a Teddy bucket is a valid index into `pats`, by + // construction. + debug_assert!(pid.as_usize() < self.patterns.len()); + let pat = self.patterns.get_unchecked(pid); + if pat.is_prefix_raw(cur, end) { + let start = cur; + let end = start.add(pat.len()); + return Some(Match { pid, start, end }); + } + } + None + } + + /// Returns the total number of masks required by the patterns in this + /// Teddy searcher. + /// + /// Basically, the mask length corresponds to the type of Teddy searcher + /// to use: a 1-byte, 2-byte, 3-byte or 4-byte searcher. The bigger the + /// better, typically, since searching for longer substrings usually + /// decreases the rate of false positives. Therefore, the number of masks + /// needed is the length of the shortest pattern in this searcher. If the + /// length of the shortest pattern (in bytes) is bigger than 4, then the + /// mask length is 4 since there are no Teddy searchers for more than 4 + /// bytes. + fn mask_len(&self) -> usize { + core::cmp::min(4, self.patterns.minimum_len()) + } + + /// Returns the approximate total amount of heap used by this type, in + /// units of bytes. + fn memory_usage(&self) -> usize { + // This is an upper bound rather than a precise accounting. No + // particular reason, other than it's probably very close to actual + // memory usage in practice. + self.patterns.len() * core::mem::size_of::() + } +} + +impl Teddy<8> { + /// Runs the verification routine for "slim" Teddy. + /// + /// The candidate given should be a collection of 8-bit bitsets (one bitset + /// per lane), where the ith bit is set in the jth lane if and only if the + /// byte occurring at `at + j` in `cur` is in the bucket `i`. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + /// + /// The given pointers must be valid to read from. + #[inline(always)] + unsafe fn verify( + &self, + mut cur: *const u8, + end: *const u8, + candidate: V, + ) -> Option { + debug_assert!(!candidate.is_zero()); + // Convert the candidate into 64-bit chunks, and then verify each of + // those chunks. + candidate.for_each_64bit_lane( + #[inline(always)] + |_, chunk| { + let result = self.verify64(cur, end, chunk); + cur = cur.add(8); + result + }, + ) + } +} + +impl Teddy<16> { + /// Runs the verification routine for "fat" Teddy. + /// + /// The candidate given should be a collection of 8-bit bitsets (one bitset + /// per lane), where the ith bit is set in the jth lane if and only if the + /// byte occurring at `at + (j < 16 ? j : j - 16)` in `cur` is in the + /// bucket `j < 16 ? i : i + 8`. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + /// + /// The given pointers must be valid to read from. + #[inline(always)] + unsafe fn verify( + &self, + mut cur: *const u8, + end: *const u8, + candidate: V, + ) -> Option { + // This is a bit tricky, but we basically want to convert our + // candidate, which looks like this (assuming a 256-bit vector): + // + // a31 a30 ... a17 a16 a15 a14 ... a01 a00 + // + // where each a(i) is an 8-bit bitset corresponding to the activated + // buckets, to this + // + // a31 a15 a30 a14 a29 a13 ... a18 a02 a17 a01 a16 a00 + // + // Namely, for Fat Teddy, the high 128-bits of the candidate correspond + // to the same bytes in the haystack in the low 128-bits (so we only + // scan 16 bytes at a time), but are for buckets 8-15 instead of 0-7. + // + // The verification routine wants to look at all potentially matching + // buckets before moving on to the next lane. So for example, both + // a16 and a00 both correspond to the first byte in our window; a00 + // contains buckets 0-7 and a16 contains buckets 8-15. Specifically, + // a16 should be checked before a01. So the transformation shown above + // allows us to use our normal verification procedure with one small + // change: we treat each bitset as 16 bits instead of 8 bits. + debug_assert!(!candidate.is_zero()); + + // Swap the 128-bit lanes in the candidate vector. + let swapped = candidate.swap_halves(); + // Interleave the bytes from the low 128-bit lanes, starting with + // cand first. + let r1 = candidate.interleave_low_8bit_lanes(swapped); + // Interleave the bytes from the high 128-bit lanes, starting with + // cand first. + let r2 = candidate.interleave_high_8bit_lanes(swapped); + // Now just take the 2 low 64-bit integers from both r1 and r2. We + // can drop the high 64-bit integers because they are a mirror image + // of the low 64-bit integers. All we care about are the low 128-bit + // lanes of r1 and r2. Combined, they contain all our 16-bit bitsets + // laid out in the desired order, as described above. + r1.for_each_low_64bit_lane( + r2, + #[inline(always)] + |_, chunk| { + let result = self.verify64(cur, end, chunk); + cur = cur.add(4); + result + }, + ) + } +} + +/// A vector generic mask for the low and high nybbles in a set of patterns. +/// Each 8-bit lane `j` in a vector corresponds to a bitset where the `i`th bit +/// is set if and only if the nybble `j` is in the bucket `i` at a particular +/// position. +/// +/// This is slightly tweaked dependending on whether Slim or Fat Teddy is being +/// used. For Slim Teddy, the bitsets in the lower half are the same as the +/// bitsets in the higher half, so that we can search `V::BYTES` bytes at a +/// time. (Remember, the nybbles in the haystack are used as indices into these +/// masks, and 256-bit shuffles only operate on 128-bit lanes.) +/// +/// For Fat Teddy, the bitsets are not repeated, but instead, the high half +/// bits correspond to an addition 8 buckets. So that a bitset `00100010` has +/// buckets 1 and 5 set if it's in the lower half, but has buckets 9 and 13 set +/// if it's in the higher half. +#[derive(Clone, Copy, Debug)] +struct Mask { + lo: V, + hi: V, +} + +impl Mask { + /// Return a candidate for Teddy (fat or slim) that is searching for 1-byte + /// candidates. + /// + /// If a candidate is returned, it will be a collection of 8-bit bitsets + /// (one bitset per lane), where the ith bit is set in the jth lane if and + /// only if the byte occurring at the jth lane in `chunk` is in the bucket + /// `i`. If no candidate is found, then the vector returned will have all + /// lanes set to zero. + /// + /// `chunk` should correspond to a `V::BYTES` window of the haystack (where + /// the least significant byte corresponds to the start of the window). For + /// fat Teddy, the haystack window length should be `V::BYTES / 2`, with + /// the window repeated in each half of the vector. + /// + /// `mask1` should correspond to a low/high mask for the first byte of all + /// patterns that are being searched. + #[inline(always)] + unsafe fn members1(chunk: V, masks: [Mask; 1]) -> V { + let lomask = V::splat(0xF); + let hlo = chunk.and(lomask); + let hhi = chunk.shift_8bit_lane_right::<4>().and(lomask); + let locand = masks[0].lo.shuffle_bytes(hlo); + let hicand = masks[0].hi.shuffle_bytes(hhi); + locand.and(hicand) + } + + /// Return a candidate for Teddy (fat or slim) that is searching for 2-byte + /// candidates. + /// + /// If candidates are returned, each will be a collection of 8-bit bitsets + /// (one bitset per lane), where the ith bit is set in the jth lane if and + /// only if the byte occurring at the jth lane in `chunk` is in the bucket + /// `i`. Each candidate returned corresponds to the first and second bytes + /// of the patterns being searched. If no candidate is found, then all of + /// the lanes will be set to zero in at least one of the vectors returned. + /// + /// `chunk` should correspond to a `V::BYTES` window of the haystack (where + /// the least significant byte corresponds to the start of the window). For + /// fat Teddy, the haystack window length should be `V::BYTES / 2`, with + /// the window repeated in each half of the vector. + /// + /// The masks should correspond to the masks computed for the first and + /// second bytes of all patterns that are being searched. + #[inline(always)] + unsafe fn members2(chunk: V, masks: [Mask; 2]) -> (V, V) { + let lomask = V::splat(0xF); + let hlo = chunk.and(lomask); + let hhi = chunk.shift_8bit_lane_right::<4>().and(lomask); + + let locand1 = masks[0].lo.shuffle_bytes(hlo); + let hicand1 = masks[0].hi.shuffle_bytes(hhi); + let cand1 = locand1.and(hicand1); + + let locand2 = masks[1].lo.shuffle_bytes(hlo); + let hicand2 = masks[1].hi.shuffle_bytes(hhi); + let cand2 = locand2.and(hicand2); + + (cand1, cand2) + } + + /// Return a candidate for Teddy (fat or slim) that is searching for 3-byte + /// candidates. + /// + /// If candidates are returned, each will be a collection of 8-bit bitsets + /// (one bitset per lane), where the ith bit is set in the jth lane if and + /// only if the byte occurring at the jth lane in `chunk` is in the bucket + /// `i`. Each candidate returned corresponds to the first, second and third + /// bytes of the patterns being searched. If no candidate is found, then + /// all of the lanes will be set to zero in at least one of the vectors + /// returned. + /// + /// `chunk` should correspond to a `V::BYTES` window of the haystack (where + /// the least significant byte corresponds to the start of the window). For + /// fat Teddy, the haystack window length should be `V::BYTES / 2`, with + /// the window repeated in each half of the vector. + /// + /// The masks should correspond to the masks computed for the first, second + /// and third bytes of all patterns that are being searched. + #[inline(always)] + unsafe fn members3(chunk: V, masks: [Mask; 3]) -> (V, V, V) { + let lomask = V::splat(0xF); + let hlo = chunk.and(lomask); + let hhi = chunk.shift_8bit_lane_right::<4>().and(lomask); + + let locand1 = masks[0].lo.shuffle_bytes(hlo); + let hicand1 = masks[0].hi.shuffle_bytes(hhi); + let cand1 = locand1.and(hicand1); + + let locand2 = masks[1].lo.shuffle_bytes(hlo); + let hicand2 = masks[1].hi.shuffle_bytes(hhi); + let cand2 = locand2.and(hicand2); + + let locand3 = masks[2].lo.shuffle_bytes(hlo); + let hicand3 = masks[2].hi.shuffle_bytes(hhi); + let cand3 = locand3.and(hicand3); + + (cand1, cand2, cand3) + } + + /// Return a candidate for Teddy (fat or slim) that is searching for 4-byte + /// candidates. + /// + /// If candidates are returned, each will be a collection of 8-bit bitsets + /// (one bitset per lane), where the ith bit is set in the jth lane if and + /// only if the byte occurring at the jth lane in `chunk` is in the bucket + /// `i`. Each candidate returned corresponds to the first, second, third + /// and fourth bytes of the patterns being searched. If no candidate is + /// found, then all of the lanes will be set to zero in at least one of the + /// vectors returned. + /// + /// `chunk` should correspond to a `V::BYTES` window of the haystack (where + /// the least significant byte corresponds to the start of the window). For + /// fat Teddy, the haystack window length should be `V::BYTES / 2`, with + /// the window repeated in each half of the vector. + /// + /// The masks should correspond to the masks computed for the first, + /// second, third and fourth bytes of all patterns that are being searched. + #[inline(always)] + unsafe fn members4(chunk: V, masks: [Mask; 4]) -> (V, V, V, V) { + let lomask = V::splat(0xF); + let hlo = chunk.and(lomask); + let hhi = chunk.shift_8bit_lane_right::<4>().and(lomask); + + let locand1 = masks[0].lo.shuffle_bytes(hlo); + let hicand1 = masks[0].hi.shuffle_bytes(hhi); + let cand1 = locand1.and(hicand1); + + let locand2 = masks[1].lo.shuffle_bytes(hlo); + let hicand2 = masks[1].hi.shuffle_bytes(hhi); + let cand2 = locand2.and(hicand2); + + let locand3 = masks[2].lo.shuffle_bytes(hlo); + let hicand3 = masks[2].hi.shuffle_bytes(hhi); + let cand3 = locand3.and(hicand3); + + let locand4 = masks[3].lo.shuffle_bytes(hlo); + let hicand4 = masks[3].hi.shuffle_bytes(hhi); + let cand4 = locand4.and(hicand4); + + (cand1, cand2, cand3, cand4) + } +} + +/// Represents the low and high nybble masks that will be used during +/// search. Each mask is 32 bytes wide, although only the first 16 bytes are +/// used for 128-bit vectors. +/// +/// Each byte in the mask corresponds to a 8-bit bitset, where bit `i` is set +/// if and only if the corresponding nybble is in the ith bucket. The index of +/// the byte (0-15, inclusive) corresponds to the nybble. +/// +/// Each mask is used as the target of a shuffle, where the indices for the +/// shuffle are taken from the haystack. AND'ing the shuffles for both the +/// low and high masks together also results in 8-bit bitsets, but where bit +/// `i` is set if and only if the correspond *byte* is in the ith bucket. +#[derive(Clone, Default)] +struct SlimMaskBuilder { + lo: [u8; 32], + hi: [u8; 32], +} + +impl SlimMaskBuilder { + /// Update this mask by adding the given byte to the given bucket. The + /// given bucket must be in the range 0-7. + /// + /// # Panics + /// + /// When `bucket >= 8`. + fn add(&mut self, bucket: usize, byte: u8) { + assert!(bucket < 8); + + let bucket = u8::try_from(bucket).unwrap(); + let byte_lo = usize::from(byte & 0xF); + let byte_hi = usize::from((byte >> 4) & 0xF); + // When using 256-bit vectors, we need to set this bucket assignment in + // the low and high 128-bit portions of the mask. This allows us to + // process 32 bytes at a time. Namely, AVX2 shuffles operate on each + // of the 128-bit lanes, rather than the full 256-bit vector at once. + self.lo[byte_lo] |= 1 << bucket; + self.lo[byte_lo + 16] |= 1 << bucket; + self.hi[byte_hi] |= 1 << bucket; + self.hi[byte_hi + 16] |= 1 << bucket; + } + + /// Turn this builder into a vector mask. + /// + /// # Panics + /// + /// When `V` represents a vector bigger than what `MaskBytes` can contain. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + #[inline(always)] + unsafe fn build(&self) -> Mask { + assert!(V::BYTES <= self.lo.len()); + assert!(V::BYTES <= self.hi.len()); + Mask { + lo: V::load_unaligned(self.lo[..].as_ptr()), + hi: V::load_unaligned(self.hi[..].as_ptr()), + } + } + + /// A convenience function for building `N` vector masks from a slim + /// `Teddy` value. + /// + /// # Panics + /// + /// When `V` represents a vector bigger than what `MaskBytes` can contain. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + #[inline(always)] + unsafe fn from_teddy( + teddy: &Teddy<8>, + ) -> [Mask; BYTES] { + // MSRV(1.63): Use core::array::from_fn to just build the array here + // instead of creating a vector and turning it into an array. + let mut mask_builders = vec![SlimMaskBuilder::default(); BYTES]; + for (bucket_index, bucket) in teddy.buckets.iter().enumerate() { + for pid in bucket.iter().copied() { + let pat = teddy.patterns.get(pid); + for (i, builder) in mask_builders.iter_mut().enumerate() { + builder.add(bucket_index, pat.bytes()[i]); + } + } + } + let array = + <[SlimMaskBuilder; BYTES]>::try_from(mask_builders).unwrap(); + array.map(|builder| builder.build()) + } +} + +impl Debug for SlimMaskBuilder { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let (mut parts_lo, mut parts_hi) = (vec![], vec![]); + for i in 0..32 { + parts_lo.push(format!("{:02}: {:08b}", i, self.lo[i])); + parts_hi.push(format!("{:02}: {:08b}", i, self.hi[i])); + } + f.debug_struct("SlimMaskBuilder") + .field("lo", &parts_lo) + .field("hi", &parts_hi) + .finish() + } +} + +/// Represents the low and high nybble masks that will be used during "fat" +/// Teddy search. +/// +/// Each mask is 32 bytes wide, and at the time of writing, only 256-bit vectors +/// support fat Teddy. +/// +/// A fat Teddy mask is like a slim Teddy mask, except that instead of +/// repeating the bitsets in the high and low 128-bits in 256-bit vectors, the +/// high and low 128-bit halves each represent distinct buckets. (Bringing the +/// total to 16 instead of 8.) This permits spreading the patterns out a bit +/// more and thus putting less pressure on verification to be fast. +/// +/// Each byte in the mask corresponds to a 8-bit bitset, where bit `i` is set +/// if and only if the corresponding nybble is in the ith bucket. The index of +/// the byte (0-15, inclusive) corresponds to the nybble. +#[derive(Clone, Copy, Default)] +struct FatMaskBuilder { + lo: [u8; 32], + hi: [u8; 32], +} + +impl FatMaskBuilder { + /// Update this mask by adding the given byte to the given bucket. The + /// given bucket must be in the range 0-15. + /// + /// # Panics + /// + /// When `bucket >= 16`. + fn add(&mut self, bucket: usize, byte: u8) { + assert!(bucket < 16); + + let bucket = u8::try_from(bucket).unwrap(); + let byte_lo = usize::from(byte & 0xF); + let byte_hi = usize::from((byte >> 4) & 0xF); + // Unlike slim teddy, fat teddy only works with AVX2. For fat teddy, + // the high 128 bits of our mask correspond to buckets 8-15, while the + // low 128 bits correspond to buckets 0-7. + if bucket < 8 { + self.lo[byte_lo] |= 1 << bucket; + self.hi[byte_hi] |= 1 << bucket; + } else { + self.lo[byte_lo + 16] |= 1 << (bucket % 8); + self.hi[byte_hi + 16] |= 1 << (bucket % 8); + } + } + + /// Turn this builder into a vector mask. + /// + /// # Panics + /// + /// When `V` represents a vector bigger than what `MaskBytes` can contain. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + #[inline(always)] + unsafe fn build(&self) -> Mask { + assert!(V::BYTES <= self.lo.len()); + assert!(V::BYTES <= self.hi.len()); + Mask { + lo: V::load_unaligned(self.lo[..].as_ptr()), + hi: V::load_unaligned(self.hi[..].as_ptr()), + } + } + + /// A convenience function for building `N` vector masks from a fat + /// `Teddy` value. + /// + /// # Panics + /// + /// When `V` represents a vector bigger than what `MaskBytes` can contain. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + #[inline(always)] + unsafe fn from_teddy( + teddy: &Teddy<16>, + ) -> [Mask; BYTES] { + // MSRV(1.63): Use core::array::from_fn to just build the array here + // instead of creating a vector and turning it into an array. + let mut mask_builders = vec![FatMaskBuilder::default(); BYTES]; + for (bucket_index, bucket) in teddy.buckets.iter().enumerate() { + for pid in bucket.iter().copied() { + let pat = teddy.patterns.get(pid); + for (i, builder) in mask_builders.iter_mut().enumerate() { + builder.add(bucket_index, pat.bytes()[i]); + } + } + } + let array = + <[FatMaskBuilder; BYTES]>::try_from(mask_builders).unwrap(); + array.map(|builder| builder.build()) + } +} + +impl Debug for FatMaskBuilder { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let (mut parts_lo, mut parts_hi) = (vec![], vec![]); + for i in 0..32 { + parts_lo.push(format!("{:02}: {:08b}", i, self.lo[i])); + parts_hi.push(format!("{:02}: {:08b}", i, self.hi[i])); + } + f.debug_struct("FatMaskBuilder") + .field("lo", &parts_lo) + .field("hi", &parts_hi) + .finish() + } +} diff --git a/vendor/aho-corasick/src/packed/teddy/mod.rs b/vendor/aho-corasick/src/packed/teddy/mod.rs new file mode 100644 index 00000000000000..26cfcdc450ff02 --- /dev/null +++ b/vendor/aho-corasick/src/packed/teddy/mod.rs @@ -0,0 +1,9 @@ +// Regrettable, but Teddy stuff just isn't used on all targets. And for some +// targets, like aarch64, only "slim" Teddy is used and so "fat" Teddy gets a +// bunch of dead-code warnings. Just not worth trying to squash them. Blech. +#![allow(dead_code)] + +pub(crate) use self::builder::{Builder, Searcher}; + +mod builder; +mod generic; diff --git a/vendor/aho-corasick/src/packed/tests.rs b/vendor/aho-corasick/src/packed/tests.rs new file mode 100644 index 00000000000000..2b0d44ee6f83ca --- /dev/null +++ b/vendor/aho-corasick/src/packed/tests.rs @@ -0,0 +1,583 @@ +use std::collections::HashMap; + +use alloc::{ + format, + string::{String, ToString}, + vec, + vec::Vec, +}; + +use crate::{ + packed::{Config, MatchKind}, + util::search::Match, +}; + +/// A description of a single test against a multi-pattern searcher. +/// +/// A single test may not necessarily pass on every configuration of a +/// searcher. The tests are categorized and grouped appropriately below. +#[derive(Clone, Debug, Eq, PartialEq)] +struct SearchTest { + /// The name of this test, for debugging. + name: &'static str, + /// The patterns to search for. + patterns: &'static [&'static str], + /// The text to search. + haystack: &'static str, + /// Each match is a triple of (pattern_index, start, end), where + /// pattern_index is an index into `patterns` and `start`/`end` are indices + /// into `haystack`. + matches: &'static [(usize, usize, usize)], +} + +struct SearchTestOwned { + offset: usize, + name: String, + patterns: Vec, + haystack: String, + matches: Vec<(usize, usize, usize)>, +} + +impl SearchTest { + fn variations(&self) -> Vec { + let count = if cfg!(miri) { 1 } else { 261 }; + let mut tests = vec![]; + for i in 0..count { + tests.push(self.offset_prefix(i)); + tests.push(self.offset_suffix(i)); + tests.push(self.offset_both(i)); + } + tests + } + + fn offset_both(&self, off: usize) -> SearchTestOwned { + SearchTestOwned { + offset: off, + name: self.name.to_string(), + patterns: self.patterns.iter().map(|s| s.to_string()).collect(), + haystack: format!( + "{}{}{}", + "Z".repeat(off), + self.haystack, + "Z".repeat(off) + ), + matches: self + .matches + .iter() + .map(|&(id, s, e)| (id, s + off, e + off)) + .collect(), + } + } + + fn offset_prefix(&self, off: usize) -> SearchTestOwned { + SearchTestOwned { + offset: off, + name: self.name.to_string(), + patterns: self.patterns.iter().map(|s| s.to_string()).collect(), + haystack: format!("{}{}", "Z".repeat(off), self.haystack), + matches: self + .matches + .iter() + .map(|&(id, s, e)| (id, s + off, e + off)) + .collect(), + } + } + + fn offset_suffix(&self, off: usize) -> SearchTestOwned { + SearchTestOwned { + offset: off, + name: self.name.to_string(), + patterns: self.patterns.iter().map(|s| s.to_string()).collect(), + haystack: format!("{}{}", self.haystack, "Z".repeat(off)), + matches: self.matches.to_vec(), + } + } +} + +/// Short-hand constructor for SearchTest. We use it a lot below. +macro_rules! t { + ($name:ident, $patterns:expr, $haystack:expr, $matches:expr) => { + SearchTest { + name: stringify!($name), + patterns: $patterns, + haystack: $haystack, + matches: $matches, + } + }; +} + +/// A collection of test groups. +type TestCollection = &'static [&'static [SearchTest]]; + +// Define several collections corresponding to the different type of match +// semantics supported. These collections have some overlap, but each +// collection should have some tests that no other collection has. + +/// Tests for leftmost-first match semantics. +const PACKED_LEFTMOST_FIRST: TestCollection = + &[BASICS, LEFTMOST, LEFTMOST_FIRST, REGRESSION, TEDDY]; + +/// Tests for leftmost-longest match semantics. +const PACKED_LEFTMOST_LONGEST: TestCollection = + &[BASICS, LEFTMOST, LEFTMOST_LONGEST, REGRESSION, TEDDY]; + +// Now define the individual tests that make up the collections above. + +/// A collection of tests for the that should always be true regardless of +/// match semantics. That is, all combinations of leftmost-{first, longest} +/// should produce the same answer. +const BASICS: &'static [SearchTest] = &[ + t!(basic001, &["a"], "", &[]), + t!(basic010, &["a"], "a", &[(0, 0, 1)]), + t!(basic020, &["a"], "aa", &[(0, 0, 1), (0, 1, 2)]), + t!(basic030, &["a"], "aaa", &[(0, 0, 1), (0, 1, 2), (0, 2, 3)]), + t!(basic040, &["a"], "aba", &[(0, 0, 1), (0, 2, 3)]), + t!(basic050, &["a"], "bba", &[(0, 2, 3)]), + t!(basic060, &["a"], "bbb", &[]), + t!(basic070, &["a"], "bababbbba", &[(0, 1, 2), (0, 3, 4), (0, 8, 9)]), + t!(basic100, &["aa"], "", &[]), + t!(basic110, &["aa"], "aa", &[(0, 0, 2)]), + t!(basic120, &["aa"], "aabbaa", &[(0, 0, 2), (0, 4, 6)]), + t!(basic130, &["aa"], "abbab", &[]), + t!(basic140, &["aa"], "abbabaa", &[(0, 5, 7)]), + t!(basic150, &["aaa"], "aaa", &[(0, 0, 3)]), + t!(basic200, &["abc"], "abc", &[(0, 0, 3)]), + t!(basic210, &["abc"], "zazabzabcz", &[(0, 6, 9)]), + t!(basic220, &["abc"], "zazabczabcz", &[(0, 3, 6), (0, 7, 10)]), + t!(basic230, &["abcd"], "abcd", &[(0, 0, 4)]), + t!(basic240, &["abcd"], "zazabzabcdz", &[(0, 6, 10)]), + t!(basic250, &["abcd"], "zazabcdzabcdz", &[(0, 3, 7), (0, 8, 12)]), + t!(basic300, &["a", "b"], "", &[]), + t!(basic310, &["a", "b"], "z", &[]), + t!(basic320, &["a", "b"], "b", &[(1, 0, 1)]), + t!(basic330, &["a", "b"], "a", &[(0, 0, 1)]), + t!( + basic340, + &["a", "b"], + "abba", + &[(0, 0, 1), (1, 1, 2), (1, 2, 3), (0, 3, 4),] + ), + t!( + basic350, + &["b", "a"], + "abba", + &[(1, 0, 1), (0, 1, 2), (0, 2, 3), (1, 3, 4),] + ), + t!(basic360, &["abc", "bc"], "xbc", &[(1, 1, 3),]), + t!(basic400, &["foo", "bar"], "", &[]), + t!(basic410, &["foo", "bar"], "foobar", &[(0, 0, 3), (1, 3, 6),]), + t!(basic420, &["foo", "bar"], "barfoo", &[(1, 0, 3), (0, 3, 6),]), + t!(basic430, &["foo", "bar"], "foofoo", &[(0, 0, 3), (0, 3, 6),]), + t!(basic440, &["foo", "bar"], "barbar", &[(1, 0, 3), (1, 3, 6),]), + t!(basic450, &["foo", "bar"], "bafofoo", &[(0, 4, 7),]), + t!(basic460, &["bar", "foo"], "bafofoo", &[(1, 4, 7),]), + t!(basic470, &["foo", "bar"], "fobabar", &[(1, 4, 7),]), + t!(basic480, &["bar", "foo"], "fobabar", &[(0, 4, 7),]), + t!(basic700, &["yabcdef", "abcdezghi"], "yabcdefghi", &[(0, 0, 7),]), + t!(basic710, &["yabcdef", "abcdezghi"], "yabcdezghi", &[(1, 1, 10),]), + t!( + basic720, + &["yabcdef", "bcdeyabc", "abcdezghi"], + "yabcdezghi", + &[(2, 1, 10),] + ), + t!(basic810, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4),]), + t!(basic820, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4),]), + t!(basic830, &["abc", "bc"], "zazabcz", &[(0, 3, 6),]), + t!( + basic840, + &["ab", "ba"], + "abababa", + &[(0, 0, 2), (0, 2, 4), (0, 4, 6),] + ), + t!(basic850, &["foo", "foo"], "foobarfoo", &[(0, 0, 3), (0, 6, 9),]), +]; + +/// Tests for leftmost match semantics. These should pass for both +/// leftmost-first and leftmost-longest match kinds. Stated differently, among +/// ambiguous matches, the longest match and the match that appeared first when +/// constructing the automaton should always be the same. +const LEFTMOST: &'static [SearchTest] = &[ + t!(leftmost000, &["ab", "ab"], "abcd", &[(0, 0, 2)]), + t!(leftmost030, &["a", "ab"], "aa", &[(0, 0, 1), (0, 1, 2)]), + t!(leftmost031, &["ab", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]), + t!(leftmost032, &["ab", "a"], "xayabbbz", &[(1, 1, 2), (0, 3, 5)]), + t!(leftmost300, &["abcd", "bce", "b"], "abce", &[(1, 1, 4)]), + t!(leftmost310, &["abcd", "ce", "bc"], "abce", &[(2, 1, 3)]), + t!(leftmost320, &["abcd", "bce", "ce", "b"], "abce", &[(1, 1, 4)]), + t!(leftmost330, &["abcd", "bce", "cz", "bc"], "abcz", &[(3, 1, 3)]), + t!(leftmost340, &["bce", "cz", "bc"], "bcz", &[(2, 0, 2)]), + t!(leftmost350, &["abc", "bd", "ab"], "abd", &[(2, 0, 2)]), + t!( + leftmost360, + &["abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(2, 0, 8),] + ), + t!( + leftmost370, + &["abcdefghi", "cde", "hz", "abcdefgh"], + "abcdefghz", + &[(3, 0, 8),] + ), + t!( + leftmost380, + &["abcdefghi", "hz", "abcdefgh", "a"], + "abcdefghz", + &[(2, 0, 8),] + ), + t!( + leftmost390, + &["b", "abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(3, 0, 8),] + ), + t!( + leftmost400, + &["h", "abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(3, 0, 8),] + ), + t!( + leftmost410, + &["z", "abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(3, 0, 8), (0, 8, 9),] + ), +]; + +/// Tests for non-overlapping leftmost-first match semantics. These tests +/// should generally be specific to leftmost-first, which means they should +/// generally fail under leftmost-longest semantics. +const LEFTMOST_FIRST: &'static [SearchTest] = &[ + t!(leftfirst000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]), + t!(leftfirst020, &["abcd", "ab"], "abcd", &[(0, 0, 4)]), + t!(leftfirst030, &["ab", "ab"], "abcd", &[(0, 0, 2)]), + t!(leftfirst040, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (0, 3, 4)]), + t!(leftfirst100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(1, 1, 5)]), + t!(leftfirst110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]), + t!(leftfirst300, &["abcd", "b", "bce"], "abce", &[(1, 1, 2)]), + t!( + leftfirst310, + &["abcd", "b", "bce", "ce"], + "abce", + &[(1, 1, 2), (3, 2, 4),] + ), + t!( + leftfirst320, + &["a", "abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(0, 0, 1), (2, 7, 9),] + ), + t!(leftfirst330, &["a", "abab"], "abab", &[(0, 0, 1), (0, 2, 3)]), + t!( + leftfirst340, + &["abcdef", "x", "x", "x", "x", "x", "x", "abcde"], + "abcdef", + &[(0, 0, 6)] + ), +]; + +/// Tests for non-overlapping leftmost-longest match semantics. These tests +/// should generally be specific to leftmost-longest, which means they should +/// generally fail under leftmost-first semantics. +const LEFTMOST_LONGEST: &'static [SearchTest] = &[ + t!(leftlong000, &["ab", "abcd"], "abcd", &[(1, 0, 4)]), + t!(leftlong010, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4),]), + t!(leftlong040, &["a", "ab"], "a", &[(0, 0, 1)]), + t!(leftlong050, &["a", "ab"], "ab", &[(1, 0, 2)]), + t!(leftlong060, &["ab", "a"], "a", &[(1, 0, 1)]), + t!(leftlong070, &["ab", "a"], "ab", &[(0, 0, 2)]), + t!(leftlong100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(2, 1, 6)]), + t!(leftlong110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]), + t!(leftlong300, &["abcd", "b", "bce"], "abce", &[(2, 1, 4)]), + t!( + leftlong310, + &["a", "abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(3, 0, 8),] + ), + t!(leftlong320, &["a", "abab"], "abab", &[(1, 0, 4)]), + t!(leftlong330, &["abcd", "b", "ce"], "abce", &[(1, 1, 2), (2, 2, 4),]), + t!(leftlong340, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (1, 3, 5)]), +]; + +/// Regression tests that are applied to all combinations. +/// +/// If regression tests are needed for specific match semantics, then add them +/// to the appropriate group above. +const REGRESSION: &'static [SearchTest] = &[ + t!(regression010, &["inf", "ind"], "infind", &[(0, 0, 3), (1, 3, 6),]), + t!(regression020, &["ind", "inf"], "infind", &[(1, 0, 3), (0, 3, 6),]), + t!( + regression030, + &["libcore/", "libstd/"], + "libcore/char/methods.rs", + &[(0, 0, 8),] + ), + t!( + regression040, + &["libstd/", "libcore/"], + "libcore/char/methods.rs", + &[(1, 0, 8),] + ), + t!( + regression050, + &["\x00\x00\x01", "\x00\x00\x00"], + "\x00\x00\x00", + &[(1, 0, 3),] + ), + t!( + regression060, + &["\x00\x00\x00", "\x00\x00\x01"], + "\x00\x00\x00", + &[(0, 0, 3),] + ), +]; + +const TEDDY: &'static [SearchTest] = &[ + t!( + teddy010, + &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"], + "abcdefghijk", + &[ + (0, 0, 1), + (1, 1, 2), + (2, 2, 3), + (3, 3, 4), + (4, 4, 5), + (5, 5, 6), + (6, 6, 7), + (7, 7, 8), + (8, 8, 9), + (9, 9, 10), + (10, 10, 11) + ] + ), + t!( + teddy020, + &["ab", "bc", "cd", "de", "ef", "fg", "gh", "hi", "ij", "jk", "kl"], + "abcdefghijk", + &[(0, 0, 2), (2, 2, 4), (4, 4, 6), (6, 6, 8), (8, 8, 10),] + ), + t!( + teddy030, + &["abc"], + "abcdefghijklmnopqrstuvwxyzabcdefghijk", + &[(0, 0, 3), (0, 26, 29)] + ), +]; + +// Now define a test for each combination of things above that we want to run. +// Since there are a few different combinations for each collection of tests, +// we define a couple of macros to avoid repetition drudgery. The testconfig +// macro constructs the automaton from a given match kind, and runs the search +// tests one-by-one over the given collection. The `with` parameter allows one +// to configure the config with additional parameters. The testcombo macro +// invokes testconfig in precisely this way: it sets up several tests where +// each one turns a different knob on Config. + +macro_rules! testconfig { + ($name:ident, $collection:expr, $with:expr) => { + #[test] + fn $name() { + run_search_tests($collection, |test| { + let mut config = Config::new(); + $with(&mut config); + let mut builder = config.builder(); + builder.extend(test.patterns.iter().map(|p| p.as_bytes())); + let searcher = match builder.build() { + Some(searcher) => searcher, + None => { + // For x86-64 and aarch64, not building a searcher is + // probably a bug, so be loud. + if cfg!(any( + target_arch = "x86_64", + target_arch = "aarch64" + )) { + panic!("failed to build packed searcher") + } + return None; + } + }; + Some(searcher.find_iter(&test.haystack).collect()) + }); + } + }; +} + +testconfig!( + search_default_leftmost_first, + PACKED_LEFTMOST_FIRST, + |_: &mut Config| {} +); + +testconfig!( + search_default_leftmost_longest, + PACKED_LEFTMOST_LONGEST, + |c: &mut Config| { + c.match_kind(MatchKind::LeftmostLongest); + } +); + +testconfig!( + search_teddy_leftmost_first, + PACKED_LEFTMOST_FIRST, + |c: &mut Config| { + c.only_teddy(true); + } +); + +testconfig!( + search_teddy_leftmost_longest, + PACKED_LEFTMOST_LONGEST, + |c: &mut Config| { + c.only_teddy(true).match_kind(MatchKind::LeftmostLongest); + } +); + +testconfig!( + search_teddy_ssse3_leftmost_first, + PACKED_LEFTMOST_FIRST, + |c: &mut Config| { + c.only_teddy(true); + #[cfg(target_arch = "x86_64")] + if std::is_x86_feature_detected!("ssse3") { + c.only_teddy_256bit(Some(false)); + } + } +); + +testconfig!( + search_teddy_ssse3_leftmost_longest, + PACKED_LEFTMOST_LONGEST, + |c: &mut Config| { + c.only_teddy(true).match_kind(MatchKind::LeftmostLongest); + #[cfg(target_arch = "x86_64")] + if std::is_x86_feature_detected!("ssse3") { + c.only_teddy_256bit(Some(false)); + } + } +); + +testconfig!( + search_teddy_avx2_leftmost_first, + PACKED_LEFTMOST_FIRST, + |c: &mut Config| { + c.only_teddy(true); + #[cfg(target_arch = "x86_64")] + if std::is_x86_feature_detected!("avx2") { + c.only_teddy_256bit(Some(true)); + } + } +); + +testconfig!( + search_teddy_avx2_leftmost_longest, + PACKED_LEFTMOST_LONGEST, + |c: &mut Config| { + c.only_teddy(true).match_kind(MatchKind::LeftmostLongest); + #[cfg(target_arch = "x86_64")] + if std::is_x86_feature_detected!("avx2") { + c.only_teddy_256bit(Some(true)); + } + } +); + +testconfig!( + search_teddy_fat_leftmost_first, + PACKED_LEFTMOST_FIRST, + |c: &mut Config| { + c.only_teddy(true); + #[cfg(target_arch = "x86_64")] + if std::is_x86_feature_detected!("avx2") { + c.only_teddy_fat(Some(true)); + } + } +); + +testconfig!( + search_teddy_fat_leftmost_longest, + PACKED_LEFTMOST_LONGEST, + |c: &mut Config| { + c.only_teddy(true).match_kind(MatchKind::LeftmostLongest); + #[cfg(target_arch = "x86_64")] + if std::is_x86_feature_detected!("avx2") { + c.only_teddy_fat(Some(true)); + } + } +); + +testconfig!( + search_rabinkarp_leftmost_first, + PACKED_LEFTMOST_FIRST, + |c: &mut Config| { + c.only_rabin_karp(true); + } +); + +testconfig!( + search_rabinkarp_leftmost_longest, + PACKED_LEFTMOST_LONGEST, + |c: &mut Config| { + c.only_rabin_karp(true).match_kind(MatchKind::LeftmostLongest); + } +); + +#[test] +fn search_tests_have_unique_names() { + let assert = |constname, tests: &[SearchTest]| { + let mut seen = HashMap::new(); // map from test name to position + for (i, test) in tests.iter().enumerate() { + if !seen.contains_key(test.name) { + seen.insert(test.name, i); + } else { + let last = seen[test.name]; + panic!( + "{} tests have duplicate names at positions {} and {}", + constname, last, i + ); + } + } + }; + assert("BASICS", BASICS); + assert("LEFTMOST", LEFTMOST); + assert("LEFTMOST_FIRST", LEFTMOST_FIRST); + assert("LEFTMOST_LONGEST", LEFTMOST_LONGEST); + assert("REGRESSION", REGRESSION); + assert("TEDDY", TEDDY); +} + +fn run_search_tests Option>>( + which: TestCollection, + mut f: F, +) { + let get_match_triples = + |matches: Vec| -> Vec<(usize, usize, usize)> { + matches + .into_iter() + .map(|m| (m.pattern().as_usize(), m.start(), m.end())) + .collect() + }; + for &tests in which { + for spec in tests { + for test in spec.variations() { + let results = match f(&test) { + None => continue, + Some(results) => results, + }; + assert_eq!( + test.matches, + get_match_triples(results).as_slice(), + "test: {}, patterns: {:?}, haystack(len={:?}): {:?}, \ + offset: {:?}", + test.name, + test.patterns, + test.haystack.len(), + test.haystack, + test.offset, + ); + } + } + } +} diff --git a/vendor/aho-corasick/src/packed/vector.rs b/vendor/aho-corasick/src/packed/vector.rs new file mode 100644 index 00000000000000..57c02ccf8f320a --- /dev/null +++ b/vendor/aho-corasick/src/packed/vector.rs @@ -0,0 +1,1757 @@ +// NOTE: The descriptions for each of the vector methods on the traits below +// are pretty inscrutable. For this reason, there are tests for every method +// on for every trait impl below. If you're confused about what an op does, +// consult its test. (They probably should be doc tests, but I couldn't figure +// out how to write them in a non-annoying way.) + +use core::{ + fmt::Debug, + panic::{RefUnwindSafe, UnwindSafe}, +}; + +/// A trait for describing vector operations used by vectorized searchers. +/// +/// The trait is highly constrained to low level vector operations needed for +/// the specific algorithms used in this crate. In general, it was invented +/// mostly to be generic over x86's __m128i and __m256i types. At time of +/// writing, it also supports wasm and aarch64 128-bit vector types as well. +/// +/// # Safety +/// +/// All methods are not safe since they are intended to be implemented using +/// vendor intrinsics, which are also not safe. Callers must ensure that +/// the appropriate target features are enabled in the calling function, +/// and that the current CPU supports them. All implementations should +/// avoid marking the routines with `#[target_feature]` and instead mark +/// them as `#[inline(always)]` to ensure they get appropriately inlined. +/// (`inline(always)` cannot be used with target_feature.) +pub(crate) trait Vector: + Copy + Debug + Send + Sync + UnwindSafe + RefUnwindSafe +{ + /// The number of bits in the vector. + const BITS: usize; + /// The number of bytes in the vector. That is, this is the size of the + /// vector in memory. + const BYTES: usize; + + /// Create a vector with 8-bit lanes with the given byte repeated into each + /// lane. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn splat(byte: u8) -> Self; + + /// Read a vector-size number of bytes from the given pointer. The pointer + /// does not need to be aligned. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + /// + /// Callers must guarantee that at least `BYTES` bytes are readable from + /// `data`. + unsafe fn load_unaligned(data: *const u8) -> Self; + + /// Returns true if and only if this vector has zero in all of its lanes. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn is_zero(self) -> bool; + + /// Do an 8-bit pairwise equality check. If lane `i` is equal in this + /// vector and the one given, then lane `i` in the resulting vector is set + /// to `0xFF`. Otherwise, it is set to `0x00`. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn cmpeq(self, vector2: Self) -> Self; + + /// Perform a bitwise 'and' of this vector and the one given and return + /// the result. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn and(self, vector2: Self) -> Self; + + /// Perform a bitwise 'or' of this vector and the one given and return + /// the result. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + #[allow(dead_code)] // unused, but useful enough to keep around? + unsafe fn or(self, vector2: Self) -> Self; + + /// Shift each 8-bit lane in this vector to the right by the number of + /// bits indictated by the `BITS` type parameter. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn shift_8bit_lane_right(self) -> Self; + + /// Shift this vector to the left by one byte and shift the most + /// significant byte of `vector2` into the least significant position of + /// this vector. + /// + /// Stated differently, this behaves as if `self` and `vector2` were + /// concatenated into a `2 * Self::BITS` temporary buffer and then shifted + /// right by `Self::BYTES - 1` bytes. + /// + /// With respect to the Teddy algorithm, `vector2` is usually a previous + /// `Self::BYTES` chunk from the haystack and `self` is the chunk + /// immediately following it. This permits combining the last two bytes + /// from the previous chunk (`vector2`) with the first `Self::BYTES - 1` + /// bytes from the current chunk. This permits aligning the result of + /// various shuffles so that they can be and-ed together and a possible + /// candidate discovered. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn shift_in_one_byte(self, vector2: Self) -> Self; + + /// Shift this vector to the left by two bytes and shift the two most + /// significant bytes of `vector2` into the least significant position of + /// this vector. + /// + /// Stated differently, this behaves as if `self` and `vector2` were + /// concatenated into a `2 * Self::BITS` temporary buffer and then shifted + /// right by `Self::BYTES - 2` bytes. + /// + /// With respect to the Teddy algorithm, `vector2` is usually a previous + /// `Self::BYTES` chunk from the haystack and `self` is the chunk + /// immediately following it. This permits combining the last two bytes + /// from the previous chunk (`vector2`) with the first `Self::BYTES - 2` + /// bytes from the current chunk. This permits aligning the result of + /// various shuffles so that they can be and-ed together and a possible + /// candidate discovered. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn shift_in_two_bytes(self, vector2: Self) -> Self; + + /// Shift this vector to the left by three bytes and shift the three most + /// significant bytes of `vector2` into the least significant position of + /// this vector. + /// + /// Stated differently, this behaves as if `self` and `vector2` were + /// concatenated into a `2 * Self::BITS` temporary buffer and then shifted + /// right by `Self::BYTES - 3` bytes. + /// + /// With respect to the Teddy algorithm, `vector2` is usually a previous + /// `Self::BYTES` chunk from the haystack and `self` is the chunk + /// immediately following it. This permits combining the last three bytes + /// from the previous chunk (`vector2`) with the first `Self::BYTES - 3` + /// bytes from the current chunk. This permits aligning the result of + /// various shuffles so that they can be and-ed together and a possible + /// candidate discovered. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn shift_in_three_bytes(self, vector2: Self) -> Self; + + /// Shuffles the bytes in this vector according to the indices in each of + /// the corresponding lanes in `indices`. + /// + /// If `i` is the index of corresponding lanes, `A` is this vector, `B` is + /// indices and `C` is the resulting vector, then `C = A[B[i]]`. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn shuffle_bytes(self, indices: Self) -> Self; + + /// Call the provided function for each 64-bit lane in this vector. The + /// given function is provided the lane index and lane value as a `u64`. + /// + /// If `f` returns `Some`, then iteration over the lanes is stopped and the + /// value is returned. Otherwise, this returns `None`. + /// + /// # Notes + /// + /// Conceptually it would be nice if we could have a + /// `unpack64(self) -> [u64; BITS / 64]` method, but defining that is + /// tricky given Rust's [current support for const generics][support]. + /// And even if we could, it would be tricky to write generic code over + /// it. (Not impossible. We could introduce another layer that requires + /// `AsRef<[u64]>` or something.) + /// + /// [support]: https://github.com/rust-lang/rust/issues/60551 + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn for_each_64bit_lane( + self, + f: impl FnMut(usize, u64) -> Option, + ) -> Option; +} + +/// This trait extends the `Vector` trait with additional operations to support +/// Fat Teddy. +/// +/// Fat Teddy uses 16 buckets instead of 8, but reads half as many bytes (as +/// the vector size) instead of the full size of a vector per iteration. For +/// example, when using a 256-bit vector, Slim Teddy reads 32 bytes at a timr +/// but Fat Teddy reads 16 bytes at a time. +/// +/// Fat Teddy is useful when searching for a large number of literals. +/// The extra number of buckets spreads the literals out more and reduces +/// verification time. +/// +/// Currently we only implement this for AVX on x86_64. It would be nice to +/// implement this for SSE on x86_64 and NEON on aarch64, with the latter two +/// only reading 8 bytes at a time. It's not clear how well it would work, but +/// there are some tricky things to figure out in terms of implementation. The +/// `half_shift_in_{one,two,three}_bytes` methods in particular are probably +/// the trickiest of the bunch. For AVX2, these are implemented by taking +/// advantage of the fact that `_mm256_alignr_epi8` operates on each 128-bit +/// half instead of the full 256-bit vector. (Where as `_mm_alignr_epi8` +/// operates on the full 128-bit vector and not on each 64-bit half.) I didn't +/// do a careful survey of NEON to see if it could easily support these +/// operations. +pub(crate) trait FatVector: Vector { + type Half: Vector; + + /// Read a half-vector-size number of bytes from the given pointer, and + /// broadcast it across both halfs of a full vector. The pointer does not + /// need to be aligned. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + /// + /// Callers must guarantee that at least `Self::HALF::BYTES` bytes are + /// readable from `data`. + unsafe fn load_half_unaligned(data: *const u8) -> Self; + + /// Like `Vector::shift_in_one_byte`, except this is done for each half + /// of the vector instead. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn half_shift_in_one_byte(self, vector2: Self) -> Self; + + /// Like `Vector::shift_in_two_bytes`, except this is done for each half + /// of the vector instead. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn half_shift_in_two_bytes(self, vector2: Self) -> Self; + + /// Like `Vector::shift_in_two_bytes`, except this is done for each half + /// of the vector instead. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn half_shift_in_three_bytes(self, vector2: Self) -> Self; + + /// Swap the 128-bit lanes in this vector. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn swap_halves(self) -> Self; + + /// Unpack and interleave the 8-bit lanes from the low 128 bits of each + /// vector and return the result. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn interleave_low_8bit_lanes(self, vector2: Self) -> Self; + + /// Unpack and interleave the 8-bit lanes from the high 128 bits of each + /// vector and return the result. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn interleave_high_8bit_lanes(self, vector2: Self) -> Self; + + /// Call the provided function for each 64-bit lane in the lower half + /// of this vector and then in the other vector. The given function is + /// provided the lane index and lane value as a `u64`. (The high 128-bits + /// of each vector are ignored.) + /// + /// If `f` returns `Some`, then iteration over the lanes is stopped and the + /// value is returned. Otherwise, this returns `None`. + /// + /// # Safety + /// + /// Callers must ensure that this is okay to call in the current target for + /// the current CPU. + unsafe fn for_each_low_64bit_lane( + self, + vector2: Self, + f: impl FnMut(usize, u64) -> Option, + ) -> Option; +} + +#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] +mod x86_64_ssse3 { + use core::arch::x86_64::*; + + use crate::util::int::{I32, I8}; + + use super::Vector; + + impl Vector for __m128i { + const BITS: usize = 128; + const BYTES: usize = 16; + + #[inline(always)] + unsafe fn splat(byte: u8) -> __m128i { + _mm_set1_epi8(i8::from_bits(byte)) + } + + #[inline(always)] + unsafe fn load_unaligned(data: *const u8) -> __m128i { + _mm_loadu_si128(data.cast::<__m128i>()) + } + + #[inline(always)] + unsafe fn is_zero(self) -> bool { + let cmp = self.cmpeq(Self::splat(0)); + _mm_movemask_epi8(cmp).to_bits() == 0xFFFF + } + + #[inline(always)] + unsafe fn cmpeq(self, vector2: Self) -> __m128i { + _mm_cmpeq_epi8(self, vector2) + } + + #[inline(always)] + unsafe fn and(self, vector2: Self) -> __m128i { + _mm_and_si128(self, vector2) + } + + #[inline(always)] + unsafe fn or(self, vector2: Self) -> __m128i { + _mm_or_si128(self, vector2) + } + + #[inline(always)] + unsafe fn shift_8bit_lane_right(self) -> Self { + // Apparently there is no _mm_srli_epi8, so we emulate it by + // shifting 16-bit integers and masking out the high nybble of each + // 8-bit lane (since that nybble will contain bits from the low + // nybble of the previous lane). + let lomask = Self::splat(0xF); + _mm_srli_epi16(self, BITS).and(lomask) + } + + #[inline(always)] + unsafe fn shift_in_one_byte(self, vector2: Self) -> Self { + _mm_alignr_epi8(self, vector2, 15) + } + + #[inline(always)] + unsafe fn shift_in_two_bytes(self, vector2: Self) -> Self { + _mm_alignr_epi8(self, vector2, 14) + } + + #[inline(always)] + unsafe fn shift_in_three_bytes(self, vector2: Self) -> Self { + _mm_alignr_epi8(self, vector2, 13) + } + + #[inline(always)] + unsafe fn shuffle_bytes(self, indices: Self) -> Self { + _mm_shuffle_epi8(self, indices) + } + + #[inline(always)] + unsafe fn for_each_64bit_lane( + self, + mut f: impl FnMut(usize, u64) -> Option, + ) -> Option { + // We could just use _mm_extract_epi64 here, but that requires + // SSE 4.1. It isn't necessarily a problem to just require SSE 4.1, + // but everything else works with SSSE3 so we stick to that subset. + let lanes: [u64; 2] = core::mem::transmute(self); + if let Some(t) = f(0, lanes[0]) { + return Some(t); + } + if let Some(t) = f(1, lanes[1]) { + return Some(t); + } + None + } + } +} + +#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] +mod x86_64_avx2 { + use core::arch::x86_64::*; + + use crate::util::int::{I32, I64, I8}; + + use super::{FatVector, Vector}; + + impl Vector for __m256i { + const BITS: usize = 256; + const BYTES: usize = 32; + + #[inline(always)] + unsafe fn splat(byte: u8) -> __m256i { + _mm256_set1_epi8(i8::from_bits(byte)) + } + + #[inline(always)] + unsafe fn load_unaligned(data: *const u8) -> __m256i { + _mm256_loadu_si256(data.cast::<__m256i>()) + } + + #[inline(always)] + unsafe fn is_zero(self) -> bool { + let cmp = self.cmpeq(Self::splat(0)); + _mm256_movemask_epi8(cmp).to_bits() == 0xFFFFFFFF + } + + #[inline(always)] + unsafe fn cmpeq(self, vector2: Self) -> __m256i { + _mm256_cmpeq_epi8(self, vector2) + } + + #[inline(always)] + unsafe fn and(self, vector2: Self) -> __m256i { + _mm256_and_si256(self, vector2) + } + + #[inline(always)] + unsafe fn or(self, vector2: Self) -> __m256i { + _mm256_or_si256(self, vector2) + } + + #[inline(always)] + unsafe fn shift_8bit_lane_right(self) -> Self { + let lomask = Self::splat(0xF); + _mm256_srli_epi16(self, BITS).and(lomask) + } + + #[inline(always)] + unsafe fn shift_in_one_byte(self, vector2: Self) -> Self { + // Credit goes to jneem for figuring this out: + // https://github.com/jneem/teddy/blob/9ab5e899ad6ef6911aecd3cf1033f1abe6e1f66c/src/x86/teddy_simd.rs#L145-L184 + // + // TL;DR avx2's PALIGNR instruction is actually just two 128-bit + // PALIGNR instructions, which is not what we want, so we need to + // do some extra shuffling. + let v = _mm256_permute2x128_si256(vector2, self, 0x21); + _mm256_alignr_epi8(self, v, 15) + } + + #[inline(always)] + unsafe fn shift_in_two_bytes(self, vector2: Self) -> Self { + // Credit goes to jneem for figuring this out: + // https://github.com/jneem/teddy/blob/9ab5e899ad6ef6911aecd3cf1033f1abe6e1f66c/src/x86/teddy_simd.rs#L145-L184 + // + // TL;DR avx2's PALIGNR instruction is actually just two 128-bit + // PALIGNR instructions, which is not what we want, so we need to + // do some extra shuffling. + let v = _mm256_permute2x128_si256(vector2, self, 0x21); + _mm256_alignr_epi8(self, v, 14) + } + + #[inline(always)] + unsafe fn shift_in_three_bytes(self, vector2: Self) -> Self { + // Credit goes to jneem for figuring this out: + // https://github.com/jneem/teddy/blob/9ab5e899ad6ef6911aecd3cf1033f1abe6e1f66c/src/x86/teddy_simd.rs#L145-L184 + // + // TL;DR avx2's PALIGNR instruction is actually just two 128-bit + // PALIGNR instructions, which is not what we want, so we need to + // do some extra shuffling. + let v = _mm256_permute2x128_si256(vector2, self, 0x21); + _mm256_alignr_epi8(self, v, 13) + } + + #[inline(always)] + unsafe fn shuffle_bytes(self, indices: Self) -> Self { + _mm256_shuffle_epi8(self, indices) + } + + #[inline(always)] + unsafe fn for_each_64bit_lane( + self, + mut f: impl FnMut(usize, u64) -> Option, + ) -> Option { + // NOTE: At one point in the past, I used transmute to this to + // get a [u64; 4], but it turned out to lead to worse codegen IIRC. + // I've tried it more recently, and it looks like that's no longer + // the case. But since there's no difference, we stick with the + // slightly more complicated but transmute-free version. + let lane = _mm256_extract_epi64(self, 0).to_bits(); + if let Some(t) = f(0, lane) { + return Some(t); + } + let lane = _mm256_extract_epi64(self, 1).to_bits(); + if let Some(t) = f(1, lane) { + return Some(t); + } + let lane = _mm256_extract_epi64(self, 2).to_bits(); + if let Some(t) = f(2, lane) { + return Some(t); + } + let lane = _mm256_extract_epi64(self, 3).to_bits(); + if let Some(t) = f(3, lane) { + return Some(t); + } + None + } + } + + impl FatVector for __m256i { + type Half = __m128i; + + #[inline(always)] + unsafe fn load_half_unaligned(data: *const u8) -> Self { + let half = Self::Half::load_unaligned(data); + _mm256_broadcastsi128_si256(half) + } + + #[inline(always)] + unsafe fn half_shift_in_one_byte(self, vector2: Self) -> Self { + _mm256_alignr_epi8(self, vector2, 15) + } + + #[inline(always)] + unsafe fn half_shift_in_two_bytes(self, vector2: Self) -> Self { + _mm256_alignr_epi8(self, vector2, 14) + } + + #[inline(always)] + unsafe fn half_shift_in_three_bytes(self, vector2: Self) -> Self { + _mm256_alignr_epi8(self, vector2, 13) + } + + #[inline(always)] + unsafe fn swap_halves(self) -> Self { + _mm256_permute4x64_epi64(self, 0x4E) + } + + #[inline(always)] + unsafe fn interleave_low_8bit_lanes(self, vector2: Self) -> Self { + _mm256_unpacklo_epi8(self, vector2) + } + + #[inline(always)] + unsafe fn interleave_high_8bit_lanes(self, vector2: Self) -> Self { + _mm256_unpackhi_epi8(self, vector2) + } + + #[inline(always)] + unsafe fn for_each_low_64bit_lane( + self, + vector2: Self, + mut f: impl FnMut(usize, u64) -> Option, + ) -> Option { + let lane = _mm256_extract_epi64(self, 0).to_bits(); + if let Some(t) = f(0, lane) { + return Some(t); + } + let lane = _mm256_extract_epi64(self, 1).to_bits(); + if let Some(t) = f(1, lane) { + return Some(t); + } + let lane = _mm256_extract_epi64(vector2, 0).to_bits(); + if let Some(t) = f(2, lane) { + return Some(t); + } + let lane = _mm256_extract_epi64(vector2, 1).to_bits(); + if let Some(t) = f(3, lane) { + return Some(t); + } + None + } + } +} + +#[cfg(all( + target_arch = "aarch64", + target_feature = "neon", + target_endian = "little" +))] +mod aarch64_neon { + use core::arch::aarch64::*; + + use super::Vector; + + impl Vector for uint8x16_t { + const BITS: usize = 128; + const BYTES: usize = 16; + + #[inline(always)] + unsafe fn splat(byte: u8) -> uint8x16_t { + vdupq_n_u8(byte) + } + + #[inline(always)] + unsafe fn load_unaligned(data: *const u8) -> uint8x16_t { + vld1q_u8(data) + } + + #[inline(always)] + unsafe fn is_zero(self) -> bool { + // Could also use vmaxvq_u8. + // ... I tried that and couldn't observe any meaningful difference + // in benchmarks. + let maxes = vreinterpretq_u64_u8(vpmaxq_u8(self, self)); + vgetq_lane_u64(maxes, 0) == 0 + } + + #[inline(always)] + unsafe fn cmpeq(self, vector2: Self) -> uint8x16_t { + vceqq_u8(self, vector2) + } + + #[inline(always)] + unsafe fn and(self, vector2: Self) -> uint8x16_t { + vandq_u8(self, vector2) + } + + #[inline(always)] + unsafe fn or(self, vector2: Self) -> uint8x16_t { + vorrq_u8(self, vector2) + } + + #[inline(always)] + unsafe fn shift_8bit_lane_right(self) -> Self { + debug_assert!(BITS <= 7); + vshrq_n_u8(self, BITS) + } + + #[inline(always)] + unsafe fn shift_in_one_byte(self, vector2: Self) -> Self { + vextq_u8(vector2, self, 15) + } + + #[inline(always)] + unsafe fn shift_in_two_bytes(self, vector2: Self) -> Self { + vextq_u8(vector2, self, 14) + } + + #[inline(always)] + unsafe fn shift_in_three_bytes(self, vector2: Self) -> Self { + vextq_u8(vector2, self, 13) + } + + #[inline(always)] + unsafe fn shuffle_bytes(self, indices: Self) -> Self { + vqtbl1q_u8(self, indices) + } + + #[inline(always)] + unsafe fn for_each_64bit_lane( + self, + mut f: impl FnMut(usize, u64) -> Option, + ) -> Option { + let this = vreinterpretq_u64_u8(self); + let lane = vgetq_lane_u64(this, 0); + if let Some(t) = f(0, lane) { + return Some(t); + } + let lane = vgetq_lane_u64(this, 1); + if let Some(t) = f(1, lane) { + return Some(t); + } + None + } + } +} + +#[cfg(all(test, target_arch = "x86_64", target_feature = "sse2"))] +mod tests_x86_64_ssse3 { + use core::arch::x86_64::*; + + use crate::util::int::{I32, U32}; + + use super::*; + + fn is_runnable() -> bool { + std::is_x86_feature_detected!("ssse3") + } + + #[target_feature(enable = "ssse3")] + unsafe fn load(lanes: [u8; 16]) -> __m128i { + __m128i::load_unaligned(&lanes as *const u8) + } + + #[target_feature(enable = "ssse3")] + unsafe fn unload(v: __m128i) -> [u8; 16] { + [ + _mm_extract_epi8(v, 0).to_bits().low_u8(), + _mm_extract_epi8(v, 1).to_bits().low_u8(), + _mm_extract_epi8(v, 2).to_bits().low_u8(), + _mm_extract_epi8(v, 3).to_bits().low_u8(), + _mm_extract_epi8(v, 4).to_bits().low_u8(), + _mm_extract_epi8(v, 5).to_bits().low_u8(), + _mm_extract_epi8(v, 6).to_bits().low_u8(), + _mm_extract_epi8(v, 7).to_bits().low_u8(), + _mm_extract_epi8(v, 8).to_bits().low_u8(), + _mm_extract_epi8(v, 9).to_bits().low_u8(), + _mm_extract_epi8(v, 10).to_bits().low_u8(), + _mm_extract_epi8(v, 11).to_bits().low_u8(), + _mm_extract_epi8(v, 12).to_bits().low_u8(), + _mm_extract_epi8(v, 13).to_bits().low_u8(), + _mm_extract_epi8(v, 14).to_bits().low_u8(), + _mm_extract_epi8(v, 15).to_bits().low_u8(), + ] + } + + #[test] + fn vector_splat() { + #[target_feature(enable = "ssse3")] + unsafe fn test() { + let v = __m128i::splat(0xAF); + assert_eq!( + unload(v), + [ + 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, + 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF + ] + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_is_zero() { + #[target_feature(enable = "ssse3")] + unsafe fn test() { + let v = load([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + assert!(!v.is_zero()); + let v = load([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + assert!(v.is_zero()); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_cmpeq() { + #[target_feature(enable = "ssse3")] + unsafe fn test() { + let v1 = + load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1]); + let v2 = + load([16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]); + assert_eq!( + unload(v1.cmpeq(v2)), + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF] + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_and() { + #[target_feature(enable = "ssse3")] + unsafe fn test() { + let v1 = + load([0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + let v2 = + load([0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + assert_eq!( + unload(v1.and(v2)), + [0, 0, 0, 0, 0, 0b1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_or() { + #[target_feature(enable = "ssse3")] + unsafe fn test() { + let v1 = + load([0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + let v2 = + load([0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + assert_eq!( + unload(v1.or(v2)), + [0, 0, 0, 0, 0, 0b1011, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_shift_8bit_lane_right() { + #[target_feature(enable = "ssse3")] + unsafe fn test() { + let v = load([ + 0, 0, 0, 0, 0b1011, 0b0101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + assert_eq!( + unload(v.shift_8bit_lane_right::<2>()), + [0, 0, 0, 0, 0b0010, 0b0001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_shift_in_one_byte() { + #[target_feature(enable = "ssse3")] + unsafe fn test() { + let v1 = + load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + let v2 = load([ + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + assert_eq!( + unload(v1.shift_in_one_byte(v2)), + [32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_shift_in_two_bytes() { + #[target_feature(enable = "ssse3")] + unsafe fn test() { + let v1 = + load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + let v2 = load([ + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + assert_eq!( + unload(v1.shift_in_two_bytes(v2)), + [31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_shift_in_three_bytes() { + #[target_feature(enable = "ssse3")] + unsafe fn test() { + let v1 = + load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + let v2 = load([ + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + assert_eq!( + unload(v1.shift_in_three_bytes(v2)), + [30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_shuffle_bytes() { + #[target_feature(enable = "ssse3")] + unsafe fn test() { + let v1 = + load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + let v2 = + load([0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12]); + assert_eq!( + unload(v1.shuffle_bytes(v2)), + [1, 1, 1, 1, 5, 5, 5, 5, 9, 9, 9, 9, 13, 13, 13, 13], + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_for_each_64bit_lane() { + #[target_feature(enable = "ssse3")] + unsafe fn test() { + let v = load([ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, + 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, + ]); + let mut lanes = [0u64; 2]; + v.for_each_64bit_lane(|i, lane| { + lanes[i] = lane; + None::<()> + }); + assert_eq!(lanes, [0x0807060504030201, 0x100F0E0D0C0B0A09],); + } + if !is_runnable() { + return; + } + unsafe { test() } + } +} + +#[cfg(all(test, target_arch = "x86_64", target_feature = "sse2"))] +mod tests_x86_64_avx2 { + use core::arch::x86_64::*; + + use crate::util::int::{I32, U32}; + + use super::*; + + fn is_runnable() -> bool { + std::is_x86_feature_detected!("avx2") + } + + #[target_feature(enable = "avx2")] + unsafe fn load(lanes: [u8; 32]) -> __m256i { + __m256i::load_unaligned(&lanes as *const u8) + } + + #[target_feature(enable = "avx2")] + unsafe fn load_half(lanes: [u8; 16]) -> __m256i { + __m256i::load_half_unaligned(&lanes as *const u8) + } + + #[target_feature(enable = "avx2")] + unsafe fn unload(v: __m256i) -> [u8; 32] { + [ + _mm256_extract_epi8(v, 0).to_bits().low_u8(), + _mm256_extract_epi8(v, 1).to_bits().low_u8(), + _mm256_extract_epi8(v, 2).to_bits().low_u8(), + _mm256_extract_epi8(v, 3).to_bits().low_u8(), + _mm256_extract_epi8(v, 4).to_bits().low_u8(), + _mm256_extract_epi8(v, 5).to_bits().low_u8(), + _mm256_extract_epi8(v, 6).to_bits().low_u8(), + _mm256_extract_epi8(v, 7).to_bits().low_u8(), + _mm256_extract_epi8(v, 8).to_bits().low_u8(), + _mm256_extract_epi8(v, 9).to_bits().low_u8(), + _mm256_extract_epi8(v, 10).to_bits().low_u8(), + _mm256_extract_epi8(v, 11).to_bits().low_u8(), + _mm256_extract_epi8(v, 12).to_bits().low_u8(), + _mm256_extract_epi8(v, 13).to_bits().low_u8(), + _mm256_extract_epi8(v, 14).to_bits().low_u8(), + _mm256_extract_epi8(v, 15).to_bits().low_u8(), + _mm256_extract_epi8(v, 16).to_bits().low_u8(), + _mm256_extract_epi8(v, 17).to_bits().low_u8(), + _mm256_extract_epi8(v, 18).to_bits().low_u8(), + _mm256_extract_epi8(v, 19).to_bits().low_u8(), + _mm256_extract_epi8(v, 20).to_bits().low_u8(), + _mm256_extract_epi8(v, 21).to_bits().low_u8(), + _mm256_extract_epi8(v, 22).to_bits().low_u8(), + _mm256_extract_epi8(v, 23).to_bits().low_u8(), + _mm256_extract_epi8(v, 24).to_bits().low_u8(), + _mm256_extract_epi8(v, 25).to_bits().low_u8(), + _mm256_extract_epi8(v, 26).to_bits().low_u8(), + _mm256_extract_epi8(v, 27).to_bits().low_u8(), + _mm256_extract_epi8(v, 28).to_bits().low_u8(), + _mm256_extract_epi8(v, 29).to_bits().low_u8(), + _mm256_extract_epi8(v, 30).to_bits().low_u8(), + _mm256_extract_epi8(v, 31).to_bits().low_u8(), + ] + } + + #[test] + fn vector_splat() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v = __m256i::splat(0xAF); + assert_eq!( + unload(v), + [ + 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, + 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, + 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, + 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, + ] + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_is_zero() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v = load([ + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + assert!(!v.is_zero()); + let v = load([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + assert!(v.is_zero()); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_cmpeq() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v1 = load([ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 1, + ]); + let v2 = load([ + 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, + 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, + ]); + assert_eq!( + unload(v1.cmpeq(v2)), + [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF + ] + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_and() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v1 = load([ + 0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + let v2 = load([ + 0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + assert_eq!( + unload(v1.and(v2)), + [ + 0, 0, 0, 0, 0, 0b1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ] + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_or() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v1 = load([ + 0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + let v2 = load([ + 0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + assert_eq!( + unload(v1.or(v2)), + [ + 0, 0, 0, 0, 0, 0b1011, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ] + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_shift_8bit_lane_right() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v = load([ + 0, 0, 0, 0, 0b1011, 0b0101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + assert_eq!( + unload(v.shift_8bit_lane_right::<2>()), + [ + 0, 0, 0, 0, 0b0010, 0b0001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ] + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_shift_in_one_byte() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v1 = load([ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + let v2 = load([ + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, + ]); + assert_eq!( + unload(v1.shift_in_one_byte(v2)), + [ + 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, + ], + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_shift_in_two_bytes() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v1 = load([ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + let v2 = load([ + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, + ]); + assert_eq!( + unload(v1.shift_in_two_bytes(v2)), + [ + 63, 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, + ], + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_shift_in_three_bytes() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v1 = load([ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + let v2 = load([ + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, + ]); + assert_eq!( + unload(v1.shift_in_three_bytes(v2)), + [ + 62, 63, 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, + ], + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_shuffle_bytes() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v1 = load([ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + let v2 = load([ + 0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, + 16, 16, 20, 20, 20, 20, 24, 24, 24, 24, 28, 28, 28, 28, + ]); + assert_eq!( + unload(v1.shuffle_bytes(v2)), + [ + 1, 1, 1, 1, 5, 5, 5, 5, 9, 9, 9, 9, 13, 13, 13, 13, 17, + 17, 17, 17, 21, 21, 21, 21, 25, 25, 25, 25, 29, 29, 29, + 29 + ], + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn vector_for_each_64bit_lane() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v = load([ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, + 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, + 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, + 0x1F, 0x20, + ]); + let mut lanes = [0u64; 4]; + v.for_each_64bit_lane(|i, lane| { + lanes[i] = lane; + None::<()> + }); + assert_eq!( + lanes, + [ + 0x0807060504030201, + 0x100F0E0D0C0B0A09, + 0x1817161514131211, + 0x201F1E1D1C1B1A19 + ] + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn fat_vector_half_shift_in_one_byte() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v1 = load_half([ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + ]); + let v2 = load_half([ + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + assert_eq!( + unload(v1.half_shift_in_one_byte(v2)), + [ + 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 32, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 + ], + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn fat_vector_half_shift_in_two_bytes() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v1 = load_half([ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + ]); + let v2 = load_half([ + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + assert_eq!( + unload(v1.half_shift_in_two_bytes(v2)), + [ + 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 31, + 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + ], + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn fat_vector_half_shift_in_three_bytes() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v1 = load_half([ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + ]); + let v2 = load_half([ + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + assert_eq!( + unload(v1.half_shift_in_three_bytes(v2)), + [ + 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 30, + 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + ], + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn fat_vector_swap_halves() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v = load([ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + assert_eq!( + unload(v.swap_halves()), + [ + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, + ], + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn fat_vector_interleave_low_8bit_lanes() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v1 = load([ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + let v2 = load([ + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, + ]); + assert_eq!( + unload(v1.interleave_low_8bit_lanes(v2)), + [ + 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39, 8, 40, + 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55, + 24, 56, + ], + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn fat_vector_interleave_high_8bit_lanes() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v1 = load([ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + let v2 = load([ + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, + ]); + assert_eq!( + unload(v1.interleave_high_8bit_lanes(v2)), + [ + 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47, 16, + 48, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, + 63, 32, 64, + ], + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } + + #[test] + fn fat_vector_for_each_low_64bit_lane() { + #[target_feature(enable = "avx2")] + unsafe fn test() { + let v1 = load([ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, + 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, + 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, + 0x1F, 0x20, + ]); + let v2 = load([ + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, + 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, + 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, + 0x3F, 0x40, + ]); + let mut lanes = [0u64; 4]; + v1.for_each_low_64bit_lane(v2, |i, lane| { + lanes[i] = lane; + None::<()> + }); + assert_eq!( + lanes, + [ + 0x0807060504030201, + 0x100F0E0D0C0B0A09, + 0x2827262524232221, + 0x302F2E2D2C2B2A29 + ] + ); + } + if !is_runnable() { + return; + } + unsafe { test() } + } +} + +#[cfg(all(test, target_arch = "aarch64", target_feature = "neon"))] +mod tests_aarch64_neon { + use core::arch::aarch64::*; + + use super::*; + + #[target_feature(enable = "neon")] + unsafe fn load(lanes: [u8; 16]) -> uint8x16_t { + uint8x16_t::load_unaligned(&lanes as *const u8) + } + + #[target_feature(enable = "neon")] + unsafe fn unload(v: uint8x16_t) -> [u8; 16] { + [ + vgetq_lane_u8(v, 0), + vgetq_lane_u8(v, 1), + vgetq_lane_u8(v, 2), + vgetq_lane_u8(v, 3), + vgetq_lane_u8(v, 4), + vgetq_lane_u8(v, 5), + vgetq_lane_u8(v, 6), + vgetq_lane_u8(v, 7), + vgetq_lane_u8(v, 8), + vgetq_lane_u8(v, 9), + vgetq_lane_u8(v, 10), + vgetq_lane_u8(v, 11), + vgetq_lane_u8(v, 12), + vgetq_lane_u8(v, 13), + vgetq_lane_u8(v, 14), + vgetq_lane_u8(v, 15), + ] + } + + // Example functions. These don't test the Vector traits, but rather, + // specific NEON instructions. They are basically little experiments I + // wrote to figure out what an instruction does since their descriptions + // are so dense. I decided to keep the experiments around as example tests + // in case there' useful. + + #[test] + fn example_vmaxvq_u8_non_zero() { + #[target_feature(enable = "neon")] + unsafe fn example() { + let v = load([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + assert_eq!(vmaxvq_u8(v), 1); + } + unsafe { example() } + } + + #[test] + fn example_vmaxvq_u8_zero() { + #[target_feature(enable = "neon")] + unsafe fn example() { + let v = load([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + assert_eq!(vmaxvq_u8(v), 0); + } + unsafe { example() } + } + + #[test] + fn example_vpmaxq_u8_non_zero() { + #[target_feature(enable = "neon")] + unsafe fn example() { + let v = load([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + let r = vpmaxq_u8(v, v); + assert_eq!( + unload(r), + [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + ); + } + unsafe { example() } + } + + #[test] + fn example_vpmaxq_u8_self() { + #[target_feature(enable = "neon")] + unsafe fn example() { + let v = + load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + let r = vpmaxq_u8(v, v); + assert_eq!( + unload(r), + [2, 4, 6, 8, 10, 12, 14, 16, 2, 4, 6, 8, 10, 12, 14, 16] + ); + } + unsafe { example() } + } + + #[test] + fn example_vpmaxq_u8_other() { + #[target_feature(enable = "neon")] + unsafe fn example() { + let v1 = + load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + let v2 = load([ + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + let r = vpmaxq_u8(v1, v2); + assert_eq!( + unload(r), + [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32] + ); + } + unsafe { example() } + } + + // Now we test the actual methods on the Vector trait. + + #[test] + fn vector_splat() { + #[target_feature(enable = "neon")] + unsafe fn test() { + let v = uint8x16_t::splat(0xAF); + assert_eq!( + unload(v), + [ + 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, + 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF + ] + ); + } + unsafe { test() } + } + + #[test] + fn vector_is_zero() { + #[target_feature(enable = "neon")] + unsafe fn test() { + let v = load([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + assert!(!v.is_zero()); + let v = load([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + assert!(v.is_zero()); + } + unsafe { test() } + } + + #[test] + fn vector_cmpeq() { + #[target_feature(enable = "neon")] + unsafe fn test() { + let v1 = + load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1]); + let v2 = + load([16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]); + assert_eq!( + unload(v1.cmpeq(v2)), + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF] + ); + } + unsafe { test() } + } + + #[test] + fn vector_and() { + #[target_feature(enable = "neon")] + unsafe fn test() { + let v1 = + load([0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + let v2 = + load([0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + assert_eq!( + unload(v1.and(v2)), + [0, 0, 0, 0, 0, 0b1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ); + } + unsafe { test() } + } + + #[test] + fn vector_or() { + #[target_feature(enable = "neon")] + unsafe fn test() { + let v1 = + load([0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + let v2 = + load([0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + assert_eq!( + unload(v1.or(v2)), + [0, 0, 0, 0, 0, 0b1011, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ); + } + unsafe { test() } + } + + #[test] + fn vector_shift_8bit_lane_right() { + #[target_feature(enable = "neon")] + unsafe fn test() { + let v = load([ + 0, 0, 0, 0, 0b1011, 0b0101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + assert_eq!( + unload(v.shift_8bit_lane_right::<2>()), + [0, 0, 0, 0, 0b0010, 0b0001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ); + } + unsafe { test() } + } + + #[test] + fn vector_shift_in_one_byte() { + #[target_feature(enable = "neon")] + unsafe fn test() { + let v1 = + load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + let v2 = load([ + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + assert_eq!( + unload(v1.shift_in_one_byte(v2)), + [32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + ); + } + unsafe { test() } + } + + #[test] + fn vector_shift_in_two_bytes() { + #[target_feature(enable = "neon")] + unsafe fn test() { + let v1 = + load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + let v2 = load([ + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + assert_eq!( + unload(v1.shift_in_two_bytes(v2)), + [31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], + ); + } + unsafe { test() } + } + + #[test] + fn vector_shift_in_three_bytes() { + #[target_feature(enable = "neon")] + unsafe fn test() { + let v1 = + load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + let v2 = load([ + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + ]); + assert_eq!( + unload(v1.shift_in_three_bytes(v2)), + [30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], + ); + } + unsafe { test() } + } + + #[test] + fn vector_shuffle_bytes() { + #[target_feature(enable = "neon")] + unsafe fn test() { + let v1 = + load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + let v2 = + load([0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12]); + assert_eq!( + unload(v1.shuffle_bytes(v2)), + [1, 1, 1, 1, 5, 5, 5, 5, 9, 9, 9, 9, 13, 13, 13, 13], + ); + } + unsafe { test() } + } + + #[test] + fn vector_for_each_64bit_lane() { + #[target_feature(enable = "neon")] + unsafe fn test() { + let v = load([ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, + 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, + ]); + let mut lanes = [0u64; 2]; + v.for_each_64bit_lane(|i, lane| { + lanes[i] = lane; + None::<()> + }); + assert_eq!(lanes, [0x0807060504030201, 0x100F0E0D0C0B0A09],); + } + unsafe { test() } + } +} diff --git a/vendor/aho-corasick/src/tests.rs b/vendor/aho-corasick/src/tests.rs new file mode 100644 index 00000000000000..a5276f85f6e0b6 --- /dev/null +++ b/vendor/aho-corasick/src/tests.rs @@ -0,0 +1,1664 @@ +use std::{collections::HashMap, format, string::String, vec::Vec}; + +use crate::{ + AhoCorasick, AhoCorasickBuilder, AhoCorasickKind, Anchored, Input, Match, + MatchKind, StartKind, +}; + +/// A description of a single test against an Aho-Corasick automaton. +/// +/// A single test may not necessarily pass on every configuration of an +/// Aho-Corasick automaton. The tests are categorized and grouped appropriately +/// below. +#[derive(Clone, Debug, Eq, PartialEq)] +struct SearchTest { + /// The name of this test, for debugging. + name: &'static str, + /// The patterns to search for. + patterns: &'static [&'static str], + /// The text to search. + haystack: &'static str, + /// Each match is a triple of (pattern_index, start, end), where + /// pattern_index is an index into `patterns` and `start`/`end` are indices + /// into `haystack`. + matches: &'static [(usize, usize, usize)], +} + +/// Short-hand constructor for SearchTest. We use it a lot below. +macro_rules! t { + ($name:ident, $patterns:expr, $haystack:expr, $matches:expr) => { + SearchTest { + name: stringify!($name), + patterns: $patterns, + haystack: $haystack, + matches: $matches, + } + }; +} + +/// A collection of test groups. +type TestCollection = &'static [&'static [SearchTest]]; + +// Define several collections corresponding to the different type of match +// semantics supported by Aho-Corasick. These collections have some overlap, +// but each collection should have some tests that no other collection has. + +/// Tests for Aho-Corasick's standard non-overlapping match semantics. +const AC_STANDARD_NON_OVERLAPPING: TestCollection = + &[BASICS, NON_OVERLAPPING, STANDARD, REGRESSION]; + +/// Tests for Aho-Corasick's anchored standard non-overlapping match semantics. +const AC_STANDARD_ANCHORED_NON_OVERLAPPING: TestCollection = + &[ANCHORED_BASICS, ANCHORED_NON_OVERLAPPING, STANDARD_ANCHORED]; + +/// Tests for Aho-Corasick's standard overlapping match semantics. +const AC_STANDARD_OVERLAPPING: TestCollection = + &[BASICS, OVERLAPPING, REGRESSION]; + +/* +Iterators of anchored overlapping searches were removed from the API in +after 0.7, but we leave the tests commented out for posterity. +/// Tests for Aho-Corasick's anchored standard overlapping match semantics. +const AC_STANDARD_ANCHORED_OVERLAPPING: TestCollection = + &[ANCHORED_BASICS, ANCHORED_OVERLAPPING]; +*/ + +/// Tests for Aho-Corasick's leftmost-first match semantics. +const AC_LEFTMOST_FIRST: TestCollection = + &[BASICS, NON_OVERLAPPING, LEFTMOST, LEFTMOST_FIRST, REGRESSION]; + +/// Tests for Aho-Corasick's anchored leftmost-first match semantics. +const AC_LEFTMOST_FIRST_ANCHORED: TestCollection = &[ + ANCHORED_BASICS, + ANCHORED_NON_OVERLAPPING, + ANCHORED_LEFTMOST, + ANCHORED_LEFTMOST_FIRST, +]; + +/// Tests for Aho-Corasick's leftmost-longest match semantics. +const AC_LEFTMOST_LONGEST: TestCollection = + &[BASICS, NON_OVERLAPPING, LEFTMOST, LEFTMOST_LONGEST, REGRESSION]; + +/// Tests for Aho-Corasick's anchored leftmost-longest match semantics. +const AC_LEFTMOST_LONGEST_ANCHORED: TestCollection = &[ + ANCHORED_BASICS, + ANCHORED_NON_OVERLAPPING, + ANCHORED_LEFTMOST, + ANCHORED_LEFTMOST_LONGEST, +]; + +// Now define the individual tests that make up the collections above. + +/// A collection of tests for the Aho-Corasick algorithm that should always be +/// true regardless of match semantics. That is, all combinations of +/// leftmost-{shortest, first, longest} x {overlapping, non-overlapping} +/// should produce the same answer. +const BASICS: &'static [SearchTest] = &[ + t!(basic000, &[], "", &[]), + t!(basic001, &[""], "a", &[(0, 0, 0), (0, 1, 1)]), + t!(basic002, &["a"], "", &[]), + t!(basic010, &["a"], "a", &[(0, 0, 1)]), + t!(basic020, &["a"], "aa", &[(0, 0, 1), (0, 1, 2)]), + t!(basic030, &["a"], "aaa", &[(0, 0, 1), (0, 1, 2), (0, 2, 3)]), + t!(basic040, &["a"], "aba", &[(0, 0, 1), (0, 2, 3)]), + t!(basic050, &["a"], "bba", &[(0, 2, 3)]), + t!(basic060, &["a"], "bbb", &[]), + t!(basic070, &["a"], "bababbbba", &[(0, 1, 2), (0, 3, 4), (0, 8, 9)]), + t!(basic100, &["aa"], "", &[]), + t!(basic110, &["aa"], "aa", &[(0, 0, 2)]), + t!(basic120, &["aa"], "aabbaa", &[(0, 0, 2), (0, 4, 6)]), + t!(basic130, &["aa"], "abbab", &[]), + t!(basic140, &["aa"], "abbabaa", &[(0, 5, 7)]), + t!(basic200, &["abc"], "abc", &[(0, 0, 3)]), + t!(basic210, &["abc"], "zazabzabcz", &[(0, 6, 9)]), + t!(basic220, &["abc"], "zazabczabcz", &[(0, 3, 6), (0, 7, 10)]), + t!(basic300, &["a", "b"], "", &[]), + t!(basic310, &["a", "b"], "z", &[]), + t!(basic320, &["a", "b"], "b", &[(1, 0, 1)]), + t!(basic330, &["a", "b"], "a", &[(0, 0, 1)]), + t!( + basic340, + &["a", "b"], + "abba", + &[(0, 0, 1), (1, 1, 2), (1, 2, 3), (0, 3, 4),] + ), + t!( + basic350, + &["b", "a"], + "abba", + &[(1, 0, 1), (0, 1, 2), (0, 2, 3), (1, 3, 4),] + ), + t!(basic360, &["abc", "bc"], "xbc", &[(1, 1, 3),]), + t!(basic400, &["foo", "bar"], "", &[]), + t!(basic410, &["foo", "bar"], "foobar", &[(0, 0, 3), (1, 3, 6),]), + t!(basic420, &["foo", "bar"], "barfoo", &[(1, 0, 3), (0, 3, 6),]), + t!(basic430, &["foo", "bar"], "foofoo", &[(0, 0, 3), (0, 3, 6),]), + t!(basic440, &["foo", "bar"], "barbar", &[(1, 0, 3), (1, 3, 6),]), + t!(basic450, &["foo", "bar"], "bafofoo", &[(0, 4, 7),]), + t!(basic460, &["bar", "foo"], "bafofoo", &[(1, 4, 7),]), + t!(basic470, &["foo", "bar"], "fobabar", &[(1, 4, 7),]), + t!(basic480, &["bar", "foo"], "fobabar", &[(0, 4, 7),]), + t!(basic600, &[""], "", &[(0, 0, 0)]), + t!(basic610, &[""], "a", &[(0, 0, 0), (0, 1, 1)]), + t!(basic620, &[""], "abc", &[(0, 0, 0), (0, 1, 1), (0, 2, 2), (0, 3, 3)]), + t!(basic700, &["yabcdef", "abcdezghi"], "yabcdefghi", &[(0, 0, 7),]), + t!(basic710, &["yabcdef", "abcdezghi"], "yabcdezghi", &[(1, 1, 10),]), + t!( + basic720, + &["yabcdef", "bcdeyabc", "abcdezghi"], + "yabcdezghi", + &[(2, 1, 10),] + ), +]; + +/// A collection of *anchored* tests for the Aho-Corasick algorithm that should +/// always be true regardless of match semantics. That is, all combinations of +/// leftmost-{shortest, first, longest} x {overlapping, non-overlapping} should +/// produce the same answer. +const ANCHORED_BASICS: &'static [SearchTest] = &[ + t!(abasic000, &[], "", &[]), + t!(abasic001, &[], "a", &[]), + t!(abasic002, &[], "abc", &[]), + t!(abasic010, &[""], "", &[(0, 0, 0)]), + t!(abasic020, &[""], "a", &[(0, 0, 0), (0, 1, 1)]), + t!(abasic030, &[""], "abc", &[(0, 0, 0), (0, 1, 1), (0, 2, 2), (0, 3, 3)]), + t!(abasic100, &["a"], "a", &[(0, 0, 1)]), + t!(abasic110, &["a"], "aa", &[(0, 0, 1), (0, 1, 2)]), + t!(abasic120, &["a", "b"], "ab", &[(0, 0, 1), (1, 1, 2)]), + t!(abasic130, &["a", "b"], "ba", &[(1, 0, 1), (0, 1, 2)]), + t!(abasic140, &["foo", "foofoo"], "foo", &[(0, 0, 3)]), + t!(abasic150, &["foofoo", "foo"], "foo", &[(1, 0, 3)]), + t!(abasic200, &["foo"], "foofoo foo", &[(0, 0, 3), (0, 3, 6)]), +]; + +/// Tests for non-overlapping standard match semantics. +/// +/// These tests generally shouldn't pass for leftmost-{first,longest}, although +/// some do in order to write clearer tests. For example, standard000 will +/// pass with leftmost-first semantics, but standard010 will not. We write +/// both to emphasize how the match semantics work. +const STANDARD: &'static [SearchTest] = &[ + t!(standard000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]), + t!(standard010, &["abcd", "ab"], "abcd", &[(1, 0, 2)]), + t!(standard020, &["abcd", "ab", "abc"], "abcd", &[(1, 0, 2)]), + t!(standard030, &["abcd", "abc", "ab"], "abcd", &[(2, 0, 2)]), + t!(standard040, &["a", ""], "a", &[(1, 0, 0), (1, 1, 1)]), + t!( + standard400, + &["abcd", "bcd", "cd", "b"], + "abcd", + &[(3, 1, 2), (2, 2, 4),] + ), + t!(standard410, &["", "a"], "a", &[(0, 0, 0), (0, 1, 1),]), + t!(standard420, &["", "a"], "aa", &[(0, 0, 0), (0, 1, 1), (0, 2, 2),]), + t!(standard430, &["", "a", ""], "a", &[(0, 0, 0), (0, 1, 1),]), + t!(standard440, &["a", "", ""], "a", &[(1, 0, 0), (1, 1, 1),]), + t!(standard450, &["", "", "a"], "a", &[(0, 0, 0), (0, 1, 1),]), +]; + +/// Like STANDARD, but for anchored searches. +const STANDARD_ANCHORED: &'static [SearchTest] = &[ + t!(astandard000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]), + t!(astandard010, &["abcd", "ab"], "abcd", &[(1, 0, 2)]), + t!(astandard020, &["abcd", "ab", "abc"], "abcd", &[(1, 0, 2)]), + t!(astandard030, &["abcd", "abc", "ab"], "abcd", &[(2, 0, 2)]), + t!(astandard040, &["a", ""], "a", &[(1, 0, 0), (1, 1, 1)]), + t!(astandard050, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4)]), + t!(astandard410, &["", "a"], "a", &[(0, 0, 0), (0, 1, 1)]), + t!(astandard420, &["", "a"], "aa", &[(0, 0, 0), (0, 1, 1), (0, 2, 2)]), + t!(astandard430, &["", "a", ""], "a", &[(0, 0, 0), (0, 1, 1)]), + t!(astandard440, &["a", "", ""], "a", &[(1, 0, 0), (1, 1, 1)]), + t!(astandard450, &["", "", "a"], "a", &[(0, 0, 0), (0, 1, 1)]), +]; + +/// Tests for non-overlapping leftmost match semantics. These should pass for +/// both leftmost-first and leftmost-longest match kinds. Stated differently, +/// among ambiguous matches, the longest match and the match that appeared +/// first when constructing the automaton should always be the same. +const LEFTMOST: &'static [SearchTest] = &[ + t!(leftmost000, &["ab", "ab"], "abcd", &[(0, 0, 2)]), + t!(leftmost010, &["a", ""], "a", &[(0, 0, 1)]), + t!(leftmost011, &["a", ""], "ab", &[(0, 0, 1), (1, 2, 2)]), + t!(leftmost020, &["", ""], "a", &[(0, 0, 0), (0, 1, 1)]), + t!(leftmost030, &["a", "ab"], "aa", &[(0, 0, 1), (0, 1, 2)]), + t!(leftmost031, &["ab", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]), + t!(leftmost032, &["ab", "a"], "xayabbbz", &[(1, 1, 2), (0, 3, 5)]), + t!(leftmost300, &["abcd", "bce", "b"], "abce", &[(1, 1, 4)]), + t!(leftmost310, &["abcd", "ce", "bc"], "abce", &[(2, 1, 3)]), + t!(leftmost320, &["abcd", "bce", "ce", "b"], "abce", &[(1, 1, 4)]), + t!(leftmost330, &["abcd", "bce", "cz", "bc"], "abcz", &[(3, 1, 3)]), + t!(leftmost340, &["bce", "cz", "bc"], "bcz", &[(2, 0, 2)]), + t!(leftmost350, &["abc", "bd", "ab"], "abd", &[(2, 0, 2)]), + t!( + leftmost360, + &["abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(2, 0, 8),] + ), + t!( + leftmost370, + &["abcdefghi", "cde", "hz", "abcdefgh"], + "abcdefghz", + &[(3, 0, 8),] + ), + t!( + leftmost380, + &["abcdefghi", "hz", "abcdefgh", "a"], + "abcdefghz", + &[(2, 0, 8),] + ), + t!( + leftmost390, + &["b", "abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(3, 0, 8),] + ), + t!( + leftmost400, + &["h", "abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(3, 0, 8),] + ), + t!( + leftmost410, + &["z", "abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(3, 0, 8), (0, 8, 9),] + ), +]; + +/// Like LEFTMOST, but for anchored searches. +const ANCHORED_LEFTMOST: &'static [SearchTest] = &[ + t!(aleftmost000, &["ab", "ab"], "abcd", &[(0, 0, 2)]), + // We shouldn't allow an empty match immediately following a match, right? + t!(aleftmost010, &["a", ""], "a", &[(0, 0, 1)]), + t!(aleftmost020, &["", ""], "a", &[(0, 0, 0), (0, 1, 1)]), + t!(aleftmost030, &["a", "ab"], "aa", &[(0, 0, 1), (0, 1, 2)]), + t!(aleftmost031, &["ab", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]), + t!(aleftmost032, &["ab", "a"], "xayabbbz", &[]), + t!(aleftmost300, &["abcd", "bce", "b"], "abce", &[]), + t!(aleftmost301, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4)]), + t!(aleftmost310, &["abcd", "ce", "bc"], "abce", &[]), + t!(aleftmost320, &["abcd", "bce", "ce", "b"], "abce", &[]), + t!(aleftmost330, &["abcd", "bce", "cz", "bc"], "abcz", &[]), + t!(aleftmost340, &["bce", "cz", "bc"], "bcz", &[(2, 0, 2)]), + t!(aleftmost350, &["abc", "bd", "ab"], "abd", &[(2, 0, 2)]), + t!( + aleftmost360, + &["abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(2, 0, 8),] + ), + t!( + aleftmost370, + &["abcdefghi", "cde", "hz", "abcdefgh"], + "abcdefghz", + &[(3, 0, 8),] + ), + t!( + aleftmost380, + &["abcdefghi", "hz", "abcdefgh", "a"], + "abcdefghz", + &[(2, 0, 8),] + ), + t!( + aleftmost390, + &["b", "abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(3, 0, 8),] + ), + t!( + aleftmost400, + &["h", "abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(3, 0, 8),] + ), + t!( + aleftmost410, + &["z", "abcdefghi", "hz", "abcdefgh"], + "abcdefghzyz", + &[(3, 0, 8), (0, 8, 9)] + ), +]; + +/// Tests for non-overlapping leftmost-first match semantics. These tests +/// should generally be specific to leftmost-first, which means they should +/// generally fail under leftmost-longest semantics. +const LEFTMOST_FIRST: &'static [SearchTest] = &[ + t!(leftfirst000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]), + t!(leftfirst010, &["", "a"], "a", &[(0, 0, 0), (0, 1, 1)]), + t!(leftfirst011, &["", "a", ""], "a", &[(0, 0, 0), (0, 1, 1),]), + t!(leftfirst012, &["a", "", ""], "a", &[(0, 0, 1)]), + t!(leftfirst013, &["", "", "a"], "a", &[(0, 0, 0), (0, 1, 1)]), + t!(leftfirst014, &["a", ""], "a", &[(0, 0, 1)]), + t!(leftfirst015, &["a", ""], "ab", &[(0, 0, 1), (1, 2, 2)]), + t!(leftfirst020, &["abcd", "ab"], "abcd", &[(0, 0, 4)]), + t!(leftfirst030, &["ab", "ab"], "abcd", &[(0, 0, 2)]), + t!(leftfirst040, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (0, 3, 4)]), + t!(leftfirst100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(1, 1, 5)]), + t!(leftfirst110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]), + t!(leftfirst300, &["abcd", "b", "bce"], "abce", &[(1, 1, 2)]), + t!( + leftfirst310, + &["abcd", "b", "bce", "ce"], + "abce", + &[(1, 1, 2), (3, 2, 4),] + ), + t!( + leftfirst320, + &["a", "abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(0, 0, 1), (2, 7, 9),] + ), + t!(leftfirst330, &["a", "abab"], "abab", &[(0, 0, 1), (0, 2, 3)]), + t!(leftfirst400, &["amwix", "samwise", "sam"], "Zsamwix", &[(2, 1, 4)]), +]; + +/// Like LEFTMOST_FIRST, but for anchored searches. +const ANCHORED_LEFTMOST_FIRST: &'static [SearchTest] = &[ + t!(aleftfirst000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]), + t!(aleftfirst010, &["", "a"], "a", &[(0, 0, 0), (0, 1, 1)]), + t!(aleftfirst011, &["", "a", ""], "a", &[(0, 0, 0), (0, 1, 1)]), + t!(aleftfirst012, &["a", "", ""], "a", &[(0, 0, 1)]), + t!(aleftfirst013, &["", "", "a"], "a", &[(0, 0, 0), (0, 1, 1)]), + t!(aleftfirst020, &["abcd", "ab"], "abcd", &[(0, 0, 4)]), + t!(aleftfirst030, &["ab", "ab"], "abcd", &[(0, 0, 2)]), + t!(aleftfirst040, &["a", "ab"], "xayabbbz", &[]), + t!(aleftfirst100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[]), + t!(aleftfirst110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[]), + t!(aleftfirst300, &["abcd", "b", "bce"], "abce", &[]), + t!(aleftfirst310, &["abcd", "b", "bce", "ce"], "abce", &[]), + t!( + aleftfirst320, + &["a", "abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(0, 0, 1)] + ), + t!(aleftfirst330, &["a", "abab"], "abab", &[(0, 0, 1)]), + t!(aleftfirst400, &["wise", "samwise", "sam"], "samwix", &[(2, 0, 3)]), +]; + +/// Tests for non-overlapping leftmost-longest match semantics. These tests +/// should generally be specific to leftmost-longest, which means they should +/// generally fail under leftmost-first semantics. +const LEFTMOST_LONGEST: &'static [SearchTest] = &[ + t!(leftlong000, &["ab", "abcd"], "abcd", &[(1, 0, 4)]), + t!(leftlong010, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4),]), + t!(leftlong020, &["", "a"], "a", &[(1, 0, 1)]), + t!(leftlong021, &["", "a", ""], "a", &[(1, 0, 1)]), + t!(leftlong022, &["a", "", ""], "a", &[(0, 0, 1)]), + t!(leftlong023, &["", "", "a"], "a", &[(2, 0, 1)]), + t!(leftlong024, &["", "a"], "ab", &[(1, 0, 1), (0, 2, 2)]), + t!(leftlong030, &["", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]), + t!(leftlong040, &["a", "ab"], "a", &[(0, 0, 1)]), + t!(leftlong050, &["a", "ab"], "ab", &[(1, 0, 2)]), + t!(leftlong060, &["ab", "a"], "a", &[(1, 0, 1)]), + t!(leftlong070, &["ab", "a"], "ab", &[(0, 0, 2)]), + t!(leftlong100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(2, 1, 6)]), + t!(leftlong110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]), + t!(leftlong300, &["abcd", "b", "bce"], "abce", &[(2, 1, 4)]), + t!( + leftlong310, + &["a", "abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(3, 0, 8),] + ), + t!(leftlong320, &["a", "abab"], "abab", &[(1, 0, 4)]), + t!(leftlong330, &["abcd", "b", "ce"], "abce", &[(1, 1, 2), (2, 2, 4),]), + t!(leftlong340, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (1, 3, 5)]), +]; + +/// Like LEFTMOST_LONGEST, but for anchored searches. +const ANCHORED_LEFTMOST_LONGEST: &'static [SearchTest] = &[ + t!(aleftlong000, &["ab", "abcd"], "abcd", &[(1, 0, 4)]), + t!(aleftlong010, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4),]), + t!(aleftlong020, &["", "a"], "a", &[(1, 0, 1)]), + t!(aleftlong021, &["", "a", ""], "a", &[(1, 0, 1)]), + t!(aleftlong022, &["a", "", ""], "a", &[(0, 0, 1)]), + t!(aleftlong023, &["", "", "a"], "a", &[(2, 0, 1)]), + t!(aleftlong030, &["", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]), + t!(aleftlong040, &["a", "ab"], "a", &[(0, 0, 1)]), + t!(aleftlong050, &["a", "ab"], "ab", &[(1, 0, 2)]), + t!(aleftlong060, &["ab", "a"], "a", &[(1, 0, 1)]), + t!(aleftlong070, &["ab", "a"], "ab", &[(0, 0, 2)]), + t!(aleftlong100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[]), + t!(aleftlong110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[]), + t!(aleftlong300, &["abcd", "b", "bce"], "abce", &[]), + t!( + aleftlong310, + &["a", "abcdefghi", "hz", "abcdefgh"], + "abcdefghz", + &[(3, 0, 8),] + ), + t!(aleftlong320, &["a", "abab"], "abab", &[(1, 0, 4)]), + t!(aleftlong330, &["abcd", "b", "ce"], "abce", &[]), + t!(aleftlong340, &["a", "ab"], "xayabbbz", &[]), +]; + +/// Tests for non-overlapping match semantics. +/// +/// Generally these tests shouldn't pass when using overlapping semantics. +/// These should pass for both standard and leftmost match semantics. +const NON_OVERLAPPING: &'static [SearchTest] = &[ + t!(nover010, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4),]), + t!(nover020, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4),]), + t!(nover030, &["abc", "bc"], "zazabcz", &[(0, 3, 6),]), + t!( + nover100, + &["ab", "ba"], + "abababa", + &[(0, 0, 2), (0, 2, 4), (0, 4, 6),] + ), + t!(nover200, &["foo", "foo"], "foobarfoo", &[(0, 0, 3), (0, 6, 9),]), + t!(nover300, &["", ""], "", &[(0, 0, 0),]), + t!(nover310, &["", ""], "a", &[(0, 0, 0), (0, 1, 1),]), +]; + +/// Like NON_OVERLAPPING, but for anchored searches. +const ANCHORED_NON_OVERLAPPING: &'static [SearchTest] = &[ + t!(anover010, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4),]), + t!(anover020, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4),]), + t!(anover030, &["abc", "bc"], "zazabcz", &[]), + t!( + anover100, + &["ab", "ba"], + "abababa", + &[(0, 0, 2), (0, 2, 4), (0, 4, 6)] + ), + t!(anover200, &["foo", "foo"], "foobarfoo", &[(0, 0, 3)]), + t!(anover300, &["", ""], "", &[(0, 0, 0)]), + t!(anover310, &["", ""], "a", &[(0, 0, 0), (0, 1, 1)]), +]; + +/// Tests for overlapping match semantics. +/// +/// This only supports standard match semantics, since leftmost-{first,longest} +/// do not support overlapping matches. +const OVERLAPPING: &'static [SearchTest] = &[ + t!( + over000, + &["abcd", "bcd", "cd", "b"], + "abcd", + &[(3, 1, 2), (0, 0, 4), (1, 1, 4), (2, 2, 4),] + ), + t!( + over010, + &["bcd", "cd", "b", "abcd"], + "abcd", + &[(2, 1, 2), (3, 0, 4), (0, 1, 4), (1, 2, 4),] + ), + t!( + over020, + &["abcd", "bcd", "cd"], + "abcd", + &[(0, 0, 4), (1, 1, 4), (2, 2, 4),] + ), + t!( + over030, + &["bcd", "abcd", "cd"], + "abcd", + &[(1, 0, 4), (0, 1, 4), (2, 2, 4),] + ), + t!( + over040, + &["bcd", "cd", "abcd"], + "abcd", + &[(2, 0, 4), (0, 1, 4), (1, 2, 4),] + ), + t!(over050, &["abc", "bc"], "zazabcz", &[(0, 3, 6), (1, 4, 6),]), + t!( + over100, + &["ab", "ba"], + "abababa", + &[(0, 0, 2), (1, 1, 3), (0, 2, 4), (1, 3, 5), (0, 4, 6), (1, 5, 7),] + ), + t!( + over200, + &["foo", "foo"], + "foobarfoo", + &[(0, 0, 3), (1, 0, 3), (0, 6, 9), (1, 6, 9),] + ), + t!(over300, &["", ""], "", &[(0, 0, 0), (1, 0, 0),]), + t!( + over310, + &["", ""], + "a", + &[(0, 0, 0), (1, 0, 0), (0, 1, 1), (1, 1, 1),] + ), + t!(over320, &["", "a"], "a", &[(0, 0, 0), (1, 0, 1), (0, 1, 1),]), + t!( + over330, + &["", "a", ""], + "a", + &[(0, 0, 0), (2, 0, 0), (1, 0, 1), (0, 1, 1), (2, 1, 1),] + ), + t!( + over340, + &["a", "", ""], + "a", + &[(1, 0, 0), (2, 0, 0), (0, 0, 1), (1, 1, 1), (2, 1, 1),] + ), + t!( + over350, + &["", "", "a"], + "a", + &[(0, 0, 0), (1, 0, 0), (2, 0, 1), (0, 1, 1), (1, 1, 1),] + ), + t!( + over360, + &["foo", "foofoo"], + "foofoo", + &[(0, 0, 3), (1, 0, 6), (0, 3, 6)] + ), +]; + +/* +Iterators of anchored overlapping searches were removed from the API in +after 0.7, but we leave the tests commented out for posterity. +/// Like OVERLAPPING, but for anchored searches. +const ANCHORED_OVERLAPPING: &'static [SearchTest] = &[ + t!(aover000, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4)]), + t!(aover010, &["bcd", "cd", "b", "abcd"], "abcd", &[(3, 0, 4)]), + t!(aover020, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4)]), + t!(aover030, &["bcd", "abcd", "cd"], "abcd", &[(1, 0, 4)]), + t!(aover040, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4)]), + t!(aover050, &["abc", "bc"], "zazabcz", &[]), + t!(aover100, &["ab", "ba"], "abababa", &[(0, 0, 2)]), + t!(aover200, &["foo", "foo"], "foobarfoo", &[(0, 0, 3), (1, 0, 3)]), + t!(aover300, &["", ""], "", &[(0, 0, 0), (1, 0, 0),]), + t!(aover310, &["", ""], "a", &[(0, 0, 0), (1, 0, 0)]), + t!(aover320, &["", "a"], "a", &[(0, 0, 0), (1, 0, 1)]), + t!(aover330, &["", "a", ""], "a", &[(0, 0, 0), (2, 0, 0), (1, 0, 1)]), + t!(aover340, &["a", "", ""], "a", &[(1, 0, 0), (2, 0, 0), (0, 0, 1)]), + t!(aover350, &["", "", "a"], "a", &[(0, 0, 0), (1, 0, 0), (2, 0, 1)]), + t!(aover360, &["foo", "foofoo"], "foofoo", &[(0, 0, 3), (1, 0, 6)]), +]; +*/ + +/// Tests for ASCII case insensitivity. +/// +/// These tests should all have the same behavior regardless of match semantics +/// or whether the search is overlapping. +const ASCII_CASE_INSENSITIVE: &'static [SearchTest] = &[ + t!(acasei000, &["a"], "A", &[(0, 0, 1)]), + t!(acasei010, &["Samwise"], "SAMWISE", &[(0, 0, 7)]), + t!(acasei011, &["Samwise"], "SAMWISE.abcd", &[(0, 0, 7)]), + t!(acasei020, &["fOoBaR"], "quux foobar baz", &[(0, 5, 11)]), +]; + +/// Like ASCII_CASE_INSENSITIVE, but specifically for non-overlapping tests. +const ASCII_CASE_INSENSITIVE_NON_OVERLAPPING: &'static [SearchTest] = &[ + t!(acasei000, &["foo", "FOO"], "fOo", &[(0, 0, 3)]), + t!(acasei000, &["FOO", "foo"], "fOo", &[(0, 0, 3)]), + t!(acasei010, &["abc", "def"], "abcdef", &[(0, 0, 3), (1, 3, 6)]), +]; + +/// Like ASCII_CASE_INSENSITIVE, but specifically for overlapping tests. +const ASCII_CASE_INSENSITIVE_OVERLAPPING: &'static [SearchTest] = &[ + t!(acasei000, &["foo", "FOO"], "fOo", &[(0, 0, 3), (1, 0, 3)]), + t!(acasei001, &["FOO", "foo"], "fOo", &[(0, 0, 3), (1, 0, 3)]), + // This is a regression test from: + // https://github.com/BurntSushi/aho-corasick/issues/68 + // Previously, it was reporting a duplicate (1, 3, 6) match. + t!( + acasei010, + &["abc", "def", "abcdef"], + "abcdef", + &[(0, 0, 3), (2, 0, 6), (1, 3, 6)] + ), +]; + +/// Regression tests that are applied to all Aho-Corasick combinations. +/// +/// If regression tests are needed for specific match semantics, then add them +/// to the appropriate group above. +const REGRESSION: &'static [SearchTest] = &[ + t!(regression010, &["inf", "ind"], "infind", &[(0, 0, 3), (1, 3, 6),]), + t!(regression020, &["ind", "inf"], "infind", &[(1, 0, 3), (0, 3, 6),]), + t!( + regression030, + &["libcore/", "libstd/"], + "libcore/char/methods.rs", + &[(0, 0, 8),] + ), + t!( + regression040, + &["libstd/", "libcore/"], + "libcore/char/methods.rs", + &[(1, 0, 8),] + ), + t!( + regression050, + &["\x00\x00\x01", "\x00\x00\x00"], + "\x00\x00\x00", + &[(1, 0, 3),] + ), + t!( + regression060, + &["\x00\x00\x00", "\x00\x00\x01"], + "\x00\x00\x00", + &[(0, 0, 3),] + ), +]; + +// Now define a test for each combination of things above that we want to run. +// Since there are a few different combinations for each collection of tests, +// we define a couple of macros to avoid repetition drudgery. The testconfig +// macro constructs the automaton from a given match kind, and runs the search +// tests one-by-one over the given collection. The `with` parameter allows one +// to configure the builder with additional parameters. The testcombo macro +// invokes testconfig in precisely this way: it sets up several tests where +// each one turns a different knob on AhoCorasickBuilder. + +macro_rules! testconfig { + (anchored, $name:ident, $collection:expr, $kind:ident, $with:expr) => { + #[test] + fn $name() { + run_search_tests($collection, |test| { + let mut builder = AhoCorasick::builder(); + $with(&mut builder); + let input = Input::new(test.haystack).anchored(Anchored::Yes); + builder + .match_kind(MatchKind::$kind) + .build(test.patterns) + .unwrap() + .try_find_iter(input) + .unwrap() + .collect() + }); + } + }; + (overlapping, $name:ident, $collection:expr, $kind:ident, $with:expr) => { + #[test] + fn $name() { + run_search_tests($collection, |test| { + let mut builder = AhoCorasick::builder(); + $with(&mut builder); + builder + .match_kind(MatchKind::$kind) + .build(test.patterns) + .unwrap() + .find_overlapping_iter(test.haystack) + .collect() + }); + } + }; + (stream, $name:ident, $collection:expr, $kind:ident, $with:expr) => { + #[test] + fn $name() { + run_stream_search_tests($collection, |test| { + let buf = std::io::BufReader::with_capacity( + 1, + test.haystack.as_bytes(), + ); + let mut builder = AhoCorasick::builder(); + $with(&mut builder); + builder + .match_kind(MatchKind::$kind) + .build(test.patterns) + .unwrap() + .stream_find_iter(buf) + .map(|result| result.unwrap()) + .collect() + }); + } + }; + ($name:ident, $collection:expr, $kind:ident, $with:expr) => { + #[test] + fn $name() { + run_search_tests($collection, |test| { + let mut builder = AhoCorasick::builder(); + $with(&mut builder); + builder + .match_kind(MatchKind::$kind) + .build(test.patterns) + .unwrap() + .find_iter(test.haystack) + .collect() + }); + } + }; +} + +macro_rules! testcombo { + ($name:ident, $collection:expr, $kind:ident) => { + mod $name { + use super::*; + + testconfig!(default, $collection, $kind, |_| ()); + testconfig!( + nfa_default, + $collection, + $kind, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::NoncontiguousNFA)); + } + ); + testconfig!( + nfa_noncontig_no_prefilter, + $collection, + $kind, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::NoncontiguousNFA)) + .prefilter(false); + } + ); + testconfig!( + nfa_noncontig_all_sparse, + $collection, + $kind, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::NoncontiguousNFA)) + .dense_depth(0); + } + ); + testconfig!( + nfa_noncontig_all_dense, + $collection, + $kind, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::NoncontiguousNFA)) + .dense_depth(usize::MAX); + } + ); + testconfig!( + nfa_contig_default, + $collection, + $kind, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::ContiguousNFA)); + } + ); + testconfig!( + nfa_contig_no_prefilter, + $collection, + $kind, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::ContiguousNFA)) + .prefilter(false); + } + ); + testconfig!( + nfa_contig_all_sparse, + $collection, + $kind, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::ContiguousNFA)) + .dense_depth(0); + } + ); + testconfig!( + nfa_contig_all_dense, + $collection, + $kind, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::ContiguousNFA)) + .dense_depth(usize::MAX); + } + ); + testconfig!( + nfa_contig_no_byte_class, + $collection, + $kind, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::ContiguousNFA)) + .byte_classes(false); + } + ); + testconfig!( + dfa_default, + $collection, + $kind, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)); + } + ); + testconfig!( + dfa_start_both, + $collection, + $kind, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)) + .start_kind(StartKind::Both); + } + ); + testconfig!( + dfa_no_prefilter, + $collection, + $kind, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)).prefilter(false); + } + ); + testconfig!( + dfa_start_both_no_prefilter, + $collection, + $kind, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)) + .start_kind(StartKind::Both) + .prefilter(false); + } + ); + testconfig!( + dfa_no_byte_class, + $collection, + $kind, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)).byte_classes(false); + } + ); + testconfig!( + dfa_start_both_no_byte_class, + $collection, + $kind, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)) + .start_kind(StartKind::Both) + .byte_classes(false); + } + ); + } + }; +} + +// Write out the various combinations of match semantics given the variety of +// configurations tested by 'testcombo!'. +testcombo!(search_leftmost_longest, AC_LEFTMOST_LONGEST, LeftmostLongest); +testcombo!(search_leftmost_first, AC_LEFTMOST_FIRST, LeftmostFirst); +testcombo!( + search_standard_nonoverlapping, + AC_STANDARD_NON_OVERLAPPING, + Standard +); + +// Write out the overlapping combo by hand since there is only one of them. +testconfig!( + overlapping, + search_standard_overlapping_default, + AC_STANDARD_OVERLAPPING, + Standard, + |_| () +); +testconfig!( + overlapping, + search_standard_overlapping_nfa_noncontig_default, + AC_STANDARD_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::NoncontiguousNFA)); + } +); +testconfig!( + overlapping, + search_standard_overlapping_nfa_noncontig_no_prefilter, + AC_STANDARD_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::NoncontiguousNFA)).prefilter(false); + } +); +testconfig!( + overlapping, + search_standard_overlapping_nfa_contig_default, + AC_STANDARD_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::ContiguousNFA)); + } +); +testconfig!( + overlapping, + search_standard_overlapping_nfa_contig_no_prefilter, + AC_STANDARD_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::ContiguousNFA)).prefilter(false); + } +); +testconfig!( + overlapping, + search_standard_overlapping_nfa_contig_all_sparse, + AC_STANDARD_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::ContiguousNFA)).dense_depth(0); + } +); +testconfig!( + overlapping, + search_standard_overlapping_nfa_contig_all_dense, + AC_STANDARD_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::ContiguousNFA)).dense_depth(usize::MAX); + } +); +testconfig!( + overlapping, + search_standard_overlapping_dfa_default, + AC_STANDARD_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)); + } +); +testconfig!( + overlapping, + search_standard_overlapping_dfa_start_both, + AC_STANDARD_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)).start_kind(StartKind::Both); + } +); +testconfig!( + overlapping, + search_standard_overlapping_dfa_no_prefilter, + AC_STANDARD_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)).prefilter(false); + } +); +testconfig!( + overlapping, + search_standard_overlapping_dfa_start_both_no_prefilter, + AC_STANDARD_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)) + .start_kind(StartKind::Both) + .prefilter(false); + } +); +testconfig!( + overlapping, + search_standard_overlapping_dfa_no_byte_class, + AC_STANDARD_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)).byte_classes(false); + } +); +testconfig!( + overlapping, + search_standard_overlapping_dfa_start_both_no_byte_class, + AC_STANDARD_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)) + .start_kind(StartKind::Both) + .byte_classes(false); + } +); + +// Also write out tests manually for streams, since we only test the standard +// match semantics. We also don't bother testing different automaton +// configurations, since those are well covered by tests above. +#[cfg(feature = "std")] +testconfig!( + stream, + search_standard_stream_default, + AC_STANDARD_NON_OVERLAPPING, + Standard, + |_| () +); +#[cfg(feature = "std")] +testconfig!( + stream, + search_standard_stream_nfa_noncontig_default, + AC_STANDARD_NON_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::NoncontiguousNFA)); + } +); +#[cfg(feature = "std")] +testconfig!( + stream, + search_standard_stream_nfa_contig_default, + AC_STANDARD_NON_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::ContiguousNFA)); + } +); +#[cfg(feature = "std")] +testconfig!( + stream, + search_standard_stream_dfa_default, + AC_STANDARD_NON_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)); + } +); + +// Same thing for anchored searches. Write them out manually. +testconfig!( + anchored, + search_standard_anchored_default, + AC_STANDARD_ANCHORED_NON_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.start_kind(StartKind::Anchored); + } +); +testconfig!( + anchored, + search_standard_anchored_nfa_noncontig_default, + AC_STANDARD_ANCHORED_NON_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.start_kind(StartKind::Anchored) + .kind(Some(AhoCorasickKind::NoncontiguousNFA)); + } +); +testconfig!( + anchored, + search_standard_anchored_nfa_contig_default, + AC_STANDARD_ANCHORED_NON_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.start_kind(StartKind::Anchored) + .kind(Some(AhoCorasickKind::ContiguousNFA)); + } +); +testconfig!( + anchored, + search_standard_anchored_dfa_default, + AC_STANDARD_ANCHORED_NON_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.start_kind(StartKind::Anchored).kind(Some(AhoCorasickKind::DFA)); + } +); +testconfig!( + anchored, + search_standard_anchored_dfa_start_both, + AC_STANDARD_ANCHORED_NON_OVERLAPPING, + Standard, + |b: &mut AhoCorasickBuilder| { + b.start_kind(StartKind::Both).kind(Some(AhoCorasickKind::DFA)); + } +); +testconfig!( + anchored, + search_leftmost_first_anchored_default, + AC_LEFTMOST_FIRST_ANCHORED, + LeftmostFirst, + |b: &mut AhoCorasickBuilder| { + b.start_kind(StartKind::Anchored); + } +); +testconfig!( + anchored, + search_leftmost_first_anchored_nfa_noncontig_default, + AC_LEFTMOST_FIRST_ANCHORED, + LeftmostFirst, + |b: &mut AhoCorasickBuilder| { + b.start_kind(StartKind::Anchored) + .kind(Some(AhoCorasickKind::NoncontiguousNFA)); + } +); +testconfig!( + anchored, + search_leftmost_first_anchored_nfa_contig_default, + AC_LEFTMOST_FIRST_ANCHORED, + LeftmostFirst, + |b: &mut AhoCorasickBuilder| { + b.start_kind(StartKind::Anchored) + .kind(Some(AhoCorasickKind::ContiguousNFA)); + } +); +testconfig!( + anchored, + search_leftmost_first_anchored_dfa_default, + AC_LEFTMOST_FIRST_ANCHORED, + LeftmostFirst, + |b: &mut AhoCorasickBuilder| { + b.start_kind(StartKind::Anchored).kind(Some(AhoCorasickKind::DFA)); + } +); +testconfig!( + anchored, + search_leftmost_first_anchored_dfa_start_both, + AC_LEFTMOST_FIRST_ANCHORED, + LeftmostFirst, + |b: &mut AhoCorasickBuilder| { + b.start_kind(StartKind::Both).kind(Some(AhoCorasickKind::DFA)); + } +); +testconfig!( + anchored, + search_leftmost_longest_anchored_default, + AC_LEFTMOST_LONGEST_ANCHORED, + LeftmostLongest, + |b: &mut AhoCorasickBuilder| { + b.start_kind(StartKind::Anchored); + } +); +testconfig!( + anchored, + search_leftmost_longest_anchored_nfa_noncontig_default, + AC_LEFTMOST_LONGEST_ANCHORED, + LeftmostLongest, + |b: &mut AhoCorasickBuilder| { + b.start_kind(StartKind::Anchored) + .kind(Some(AhoCorasickKind::NoncontiguousNFA)); + } +); +testconfig!( + anchored, + search_leftmost_longest_anchored_nfa_contig_default, + AC_LEFTMOST_LONGEST_ANCHORED, + LeftmostLongest, + |b: &mut AhoCorasickBuilder| { + b.start_kind(StartKind::Anchored) + .kind(Some(AhoCorasickKind::ContiguousNFA)); + } +); +testconfig!( + anchored, + search_leftmost_longest_anchored_dfa_default, + AC_LEFTMOST_LONGEST_ANCHORED, + LeftmostLongest, + |b: &mut AhoCorasickBuilder| { + b.start_kind(StartKind::Anchored).kind(Some(AhoCorasickKind::DFA)); + } +); +testconfig!( + anchored, + search_leftmost_longest_anchored_dfa_start_both, + AC_LEFTMOST_LONGEST_ANCHORED, + LeftmostLongest, + |b: &mut AhoCorasickBuilder| { + b.start_kind(StartKind::Both).kind(Some(AhoCorasickKind::DFA)); + } +); + +// And also write out the test combinations for ASCII case insensitivity. +testconfig!( + acasei_standard_default, + &[ASCII_CASE_INSENSITIVE], + Standard, + |b: &mut AhoCorasickBuilder| { + b.prefilter(false).ascii_case_insensitive(true); + } +); +testconfig!( + acasei_standard_nfa_noncontig_default, + &[ASCII_CASE_INSENSITIVE], + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::NoncontiguousNFA)) + .prefilter(false) + .ascii_case_insensitive(true); + } +); +testconfig!( + acasei_standard_nfa_contig_default, + &[ASCII_CASE_INSENSITIVE], + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::ContiguousNFA)) + .prefilter(false) + .ascii_case_insensitive(true); + } +); +testconfig!( + acasei_standard_dfa_default, + &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)).ascii_case_insensitive(true); + } +); +testconfig!( + overlapping, + acasei_standard_overlapping_default, + &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_OVERLAPPING], + Standard, + |b: &mut AhoCorasickBuilder| { + b.ascii_case_insensitive(true); + } +); +testconfig!( + overlapping, + acasei_standard_overlapping_nfa_noncontig_default, + &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_OVERLAPPING], + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::NoncontiguousNFA)) + .ascii_case_insensitive(true); + } +); +testconfig!( + overlapping, + acasei_standard_overlapping_nfa_contig_default, + &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_OVERLAPPING], + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::ContiguousNFA)) + .ascii_case_insensitive(true); + } +); +testconfig!( + overlapping, + acasei_standard_overlapping_dfa_default, + &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_OVERLAPPING], + Standard, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)).ascii_case_insensitive(true); + } +); +testconfig!( + acasei_leftmost_first_default, + &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], + LeftmostFirst, + |b: &mut AhoCorasickBuilder| { + b.ascii_case_insensitive(true); + } +); +testconfig!( + acasei_leftmost_first_nfa_noncontig_default, + &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], + LeftmostFirst, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::NoncontiguousNFA)) + .ascii_case_insensitive(true); + } +); +testconfig!( + acasei_leftmost_first_nfa_contig_default, + &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], + LeftmostFirst, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::ContiguousNFA)) + .ascii_case_insensitive(true); + } +); +testconfig!( + acasei_leftmost_first_dfa_default, + &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], + LeftmostFirst, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)).ascii_case_insensitive(true); + } +); +testconfig!( + acasei_leftmost_longest_default, + &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], + LeftmostLongest, + |b: &mut AhoCorasickBuilder| { + b.ascii_case_insensitive(true); + } +); +testconfig!( + acasei_leftmost_longest_nfa_noncontig_default, + &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], + LeftmostLongest, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::NoncontiguousNFA)) + .ascii_case_insensitive(true); + } +); +testconfig!( + acasei_leftmost_longest_nfa_contig_default, + &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], + LeftmostLongest, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::ContiguousNFA)) + .ascii_case_insensitive(true); + } +); +testconfig!( + acasei_leftmost_longest_dfa_default, + &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], + LeftmostLongest, + |b: &mut AhoCorasickBuilder| { + b.kind(Some(AhoCorasickKind::DFA)).ascii_case_insensitive(true); + } +); + +fn run_search_tests Vec>( + which: TestCollection, + mut f: F, +) { + let get_match_triples = + |matches: Vec| -> Vec<(usize, usize, usize)> { + matches + .into_iter() + .map(|m| (m.pattern().as_usize(), m.start(), m.end())) + .collect() + }; + for &tests in which { + for test in tests { + assert_eq!( + test.matches, + get_match_triples(f(&test)).as_slice(), + "test: {}, patterns: {:?}, haystack: {:?}", + test.name, + test.patterns, + test.haystack + ); + } + } +} + +// Like 'run_search_tests', but we skip any tests that contain the empty +// pattern because stream searching doesn't support it. +#[cfg(feature = "std")] +fn run_stream_search_tests Vec>( + which: TestCollection, + mut f: F, +) { + let get_match_triples = + |matches: Vec| -> Vec<(usize, usize, usize)> { + matches + .into_iter() + .map(|m| (m.pattern().as_usize(), m.start(), m.end())) + .collect() + }; + for &tests in which { + for test in tests { + if test.patterns.iter().any(|p| p.is_empty()) { + continue; + } + assert_eq!( + test.matches, + get_match_triples(f(&test)).as_slice(), + "test: {}, patterns: {:?}, haystack: {:?}", + test.name, + test.patterns, + test.haystack + ); + } + } +} + +#[test] +fn search_tests_have_unique_names() { + let assert = |constname, tests: &[SearchTest]| { + let mut seen = HashMap::new(); // map from test name to position + for (i, test) in tests.iter().enumerate() { + if !seen.contains_key(test.name) { + seen.insert(test.name, i); + } else { + let last = seen[test.name]; + panic!( + "{} tests have duplicate names at positions {} and {}", + constname, last, i + ); + } + } + }; + assert("BASICS", BASICS); + assert("STANDARD", STANDARD); + assert("LEFTMOST", LEFTMOST); + assert("LEFTMOST_FIRST", LEFTMOST_FIRST); + assert("LEFTMOST_LONGEST", LEFTMOST_LONGEST); + assert("NON_OVERLAPPING", NON_OVERLAPPING); + assert("OVERLAPPING", OVERLAPPING); + assert("REGRESSION", REGRESSION); +} + +#[cfg(feature = "std")] +#[test] +#[should_panic] +fn stream_not_allowed_leftmost_first() { + let fsm = AhoCorasick::builder() + .match_kind(MatchKind::LeftmostFirst) + .build(None::) + .unwrap(); + assert_eq!(fsm.stream_find_iter(&b""[..]).count(), 0); +} + +#[cfg(feature = "std")] +#[test] +#[should_panic] +fn stream_not_allowed_leftmost_longest() { + let fsm = AhoCorasick::builder() + .match_kind(MatchKind::LeftmostLongest) + .build(None::) + .unwrap(); + assert_eq!(fsm.stream_find_iter(&b""[..]).count(), 0); +} + +#[test] +#[should_panic] +fn overlapping_not_allowed_leftmost_first() { + let fsm = AhoCorasick::builder() + .match_kind(MatchKind::LeftmostFirst) + .build(None::) + .unwrap(); + assert_eq!(fsm.find_overlapping_iter("").count(), 0); +} + +#[test] +#[should_panic] +fn overlapping_not_allowed_leftmost_longest() { + let fsm = AhoCorasick::builder() + .match_kind(MatchKind::LeftmostLongest) + .build(None::) + .unwrap(); + assert_eq!(fsm.find_overlapping_iter("").count(), 0); +} + +// This tests that if we build an AC matcher with an "unanchored" start kind, +// then we can't run an anchored search even if the underlying searcher +// supports it. +// +// The key bit here is that both of the NFAs in this crate unconditionally +// support both unanchored and anchored searches, but the DFA does not because +// of the added cost of doing so. To avoid the top-level AC matcher sometimes +// supporting anchored and sometimes not (depending on which searcher it +// chooses to use internally), we ensure that the given 'StartKind' is always +// respected. +#[test] +fn anchored_not_allowed_even_if_technically_available() { + let ac = AhoCorasick::builder() + .kind(Some(AhoCorasickKind::NoncontiguousNFA)) + .start_kind(StartKind::Unanchored) + .build(&["foo"]) + .unwrap(); + assert!(ac.try_find(Input::new("foo").anchored(Anchored::Yes)).is_err()); + + let ac = AhoCorasick::builder() + .kind(Some(AhoCorasickKind::ContiguousNFA)) + .start_kind(StartKind::Unanchored) + .build(&["foo"]) + .unwrap(); + assert!(ac.try_find(Input::new("foo").anchored(Anchored::Yes)).is_err()); + + // For completeness, check that the DFA returns an error too. + let ac = AhoCorasick::builder() + .kind(Some(AhoCorasickKind::DFA)) + .start_kind(StartKind::Unanchored) + .build(&["foo"]) + .unwrap(); + assert!(ac.try_find(Input::new("foo").anchored(Anchored::Yes)).is_err()); +} + +// This is like the test aboved, but with unanchored and anchored flipped. That +// is, we asked for an AC searcher with anchored support and we check that +// unanchored searches return an error even if the underlying searcher would +// technically support it. +#[test] +fn unanchored_not_allowed_even_if_technically_available() { + let ac = AhoCorasick::builder() + .kind(Some(AhoCorasickKind::NoncontiguousNFA)) + .start_kind(StartKind::Anchored) + .build(&["foo"]) + .unwrap(); + assert!(ac.try_find(Input::new("foo").anchored(Anchored::No)).is_err()); + + let ac = AhoCorasick::builder() + .kind(Some(AhoCorasickKind::ContiguousNFA)) + .start_kind(StartKind::Anchored) + .build(&["foo"]) + .unwrap(); + assert!(ac.try_find(Input::new("foo").anchored(Anchored::No)).is_err()); + + // For completeness, check that the DFA returns an error too. + let ac = AhoCorasick::builder() + .kind(Some(AhoCorasickKind::DFA)) + .start_kind(StartKind::Anchored) + .build(&["foo"]) + .unwrap(); + assert!(ac.try_find(Input::new("foo").anchored(Anchored::No)).is_err()); +} + +// This tests that a prefilter does not cause a search to report a match +// outside the bounds provided by the caller. +// +// This is a regression test for a bug I introduced during the rewrite of most +// of the crate after 0.7. It was never released. The tricky part here is +// ensuring we get a prefilter that can report matches on its own (such as the +// packed searcher). Otherwise, prefilters that report false positives might +// have searched past the bounds provided by the caller, but confirming the +// match would subsequently fail. +#[test] +fn prefilter_stays_in_bounds() { + let ac = AhoCorasick::builder() + .match_kind(MatchKind::LeftmostFirst) + .build(&["sam", "frodo", "pippin", "merry", "gandalf", "sauron"]) + .unwrap(); + let haystack = "foo gandalf"; + assert_eq!(None, ac.find(Input::new(haystack).range(0..10))); +} + +// See: https://github.com/BurntSushi/aho-corasick/issues/44 +// +// In short, this test ensures that enabling ASCII case insensitivity does not +// visit an exponential number of states when filling in failure transitions. +#[test] +fn regression_ascii_case_insensitive_no_exponential() { + let ac = AhoCorasick::builder() + .ascii_case_insensitive(true) + .build(&["Tsubaki House-Triple Shot Vol01校花三姐妹"]) + .unwrap(); + assert!(ac.find("").is_none()); +} + +// See: https://github.com/BurntSushi/aho-corasick/issues/53 +// +// This test ensures that the rare byte prefilter works in a particular corner +// case. In particular, the shift offset detected for '/' in the patterns below +// was incorrect, leading to a false negative. +#[test] +fn regression_rare_byte_prefilter() { + use crate::AhoCorasick; + + let ac = AhoCorasick::new(&["ab/j/", "x/"]).unwrap(); + assert!(ac.is_match("ab/j/")); +} + +#[test] +fn regression_case_insensitive_prefilter() { + for c in b'a'..b'z' { + for c2 in b'a'..b'z' { + let c = c as char; + let c2 = c2 as char; + let needle = format!("{}{}", c, c2).to_lowercase(); + let haystack = needle.to_uppercase(); + let ac = AhoCorasick::builder() + .ascii_case_insensitive(true) + .prefilter(true) + .build(&[&needle]) + .unwrap(); + assert_eq!( + 1, + ac.find_iter(&haystack).count(), + "failed to find {:?} in {:?}\n\nautomaton:\n{:?}", + needle, + haystack, + ac, + ); + } + } +} + +// See: https://github.com/BurntSushi/aho-corasick/issues/64 +// +// This occurs when the rare byte prefilter is active. +#[cfg(feature = "std")] +#[test] +fn regression_stream_rare_byte_prefilter() { + use std::io::Read; + + // NOTE: The test only fails if this ends with j. + const MAGIC: [u8; 5] = *b"1234j"; + + // NOTE: The test fails for value in 8188..=8191 These value put the string + // to search accross two call to read because the buffer size is 64KB by + // default. + const BEGIN: usize = 65_535; + + /// This is just a structure that implements Reader. The reader + /// implementation will simulate a file filled with 0, except for the MAGIC + /// string at offset BEGIN. + #[derive(Default)] + struct R { + read: usize, + } + + impl Read for R { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + if self.read > 100000 { + return Ok(0); + } + let mut from = 0; + if self.read < BEGIN { + from = buf.len().min(BEGIN - self.read); + for x in 0..from { + buf[x] = 0; + } + self.read += from; + } + if self.read >= BEGIN && self.read <= BEGIN + MAGIC.len() { + let to = buf.len().min(BEGIN + MAGIC.len() - self.read + from); + if to > from { + buf[from..to].copy_from_slice( + &MAGIC + [self.read - BEGIN..self.read - BEGIN + to - from], + ); + self.read += to - from; + from = to; + } + } + for x in from..buf.len() { + buf[x] = 0; + self.read += 1; + } + Ok(buf.len()) + } + } + + fn run() -> std::io::Result<()> { + let aut = AhoCorasick::builder() + // Enable byte classes to make debugging the automaton easier. It + // should have no effect on the test result. + .byte_classes(false) + .build(&[&MAGIC]) + .unwrap(); + + // While reading from a vector, it works: + let mut buf = alloc::vec![]; + R::default().read_to_end(&mut buf)?; + let from_whole = aut.find_iter(&buf).next().unwrap().start(); + + // But using stream_find_iter fails! + let mut file = std::io::BufReader::new(R::default()); + let begin = aut + .stream_find_iter(&mut file) + .next() + .expect("NOT FOUND!!!!")? // Panic here + .start(); + assert_eq!(from_whole, begin); + Ok(()) + } + + run().unwrap() +} diff --git a/vendor/aho-corasick/src/transducer.rs b/vendor/aho-corasick/src/transducer.rs new file mode 100644 index 00000000000000..39bb240f4461ba --- /dev/null +++ b/vendor/aho-corasick/src/transducer.rs @@ -0,0 +1,270 @@ +/*! +Provides implementations of `fst::Automaton` for Aho-Corasick automata. + +This works by providing two wrapper types, [`Anchored`] and [`Unanchored`]. +The former executes an anchored search on an FST while the latter executes +an unanchored search. Building these wrappers is fallible and will fail if +the underlying Aho-Corasick automaton does not support the type of search it +represents. +*/ + +use crate::{ + automaton::{Automaton, StateID}, + Anchored as AcAnchored, Input, MatchError, +}; + +/// Represents an unanchored Aho-Corasick search of a finite state transducer. +/// +/// Wrapping an Aho-Corasick automaton in `Unanchored` will fail if the +/// underlying automaton does not support unanchored searches. +/// +/// # Example +/// +/// This shows how to build an FST of keys and then run an unanchored search on +/// those keys using an Aho-Corasick automaton. +/// +/// ``` +/// use aho_corasick::{nfa::contiguous::NFA, transducer::Unanchored}; +/// use fst::{Automaton, IntoStreamer, Set, Streamer}; +/// +/// let set = Set::from_iter(&["abcd", "bc", "bcd", "xyz"]).unwrap(); +/// let nfa = NFA::new(&["bcd", "x"]).unwrap(); +/// // NFAs always support both unanchored and anchored searches. +/// let searcher = Unanchored::new(&nfa).unwrap(); +/// +/// let mut stream = set.search(searcher).into_stream(); +/// let mut results = vec![]; +/// while let Some(key) = stream.next() { +/// results.push(std::str::from_utf8(key).unwrap().to_string()); +/// } +/// assert_eq!(vec!["abcd", "bcd", "xyz"], results); +/// ``` +#[derive(Clone, Debug)] +pub struct Unanchored(A); + +impl Unanchored { + /// Create a new `Unanchored` implementation of the `fst::Automaton` trait. + /// + /// If the given Aho-Corasick automaton does not support unanchored + /// searches, then this returns an error. + pub fn new(aut: A) -> Result, MatchError> { + let input = Input::new("").anchored(AcAnchored::No); + let _ = aut.start_state(&input)?; + Ok(Unanchored(aut)) + } + + /// Returns a borrow to the underlying automaton. + pub fn as_ref(&self) -> &A { + &self.0 + } + + /// Unwrap this value and return the inner automaton. + pub fn into_inner(self) -> A { + self.0 + } +} + +impl fst::Automaton for Unanchored { + type State = StateID; + + #[inline] + fn start(&self) -> StateID { + let input = Input::new("").anchored(AcAnchored::No); + self.0.start_state(&input).expect("support for unanchored searches") + } + + #[inline] + fn is_match(&self, state: &StateID) -> bool { + self.0.is_match(*state) + } + + #[inline] + fn accept(&self, state: &StateID, byte: u8) -> StateID { + if fst::Automaton::is_match(self, state) { + return *state; + } + self.0.next_state(AcAnchored::No, *state, byte) + } + + #[inline] + fn can_match(&self, state: &StateID) -> bool { + !self.0.is_dead(*state) + } +} + +/// Represents an anchored Aho-Corasick search of a finite state transducer. +/// +/// Wrapping an Aho-Corasick automaton in `Unanchored` will fail if the +/// underlying automaton does not support unanchored searches. +/// +/// # Example +/// +/// This shows how to build an FST of keys and then run an anchored search on +/// those keys using an Aho-Corasick automaton. +/// +/// ``` +/// use aho_corasick::{nfa::contiguous::NFA, transducer::Anchored}; +/// use fst::{Automaton, IntoStreamer, Set, Streamer}; +/// +/// let set = Set::from_iter(&["abcd", "bc", "bcd", "xyz"]).unwrap(); +/// let nfa = NFA::new(&["bcd", "x"]).unwrap(); +/// // NFAs always support both unanchored and anchored searches. +/// let searcher = Anchored::new(&nfa).unwrap(); +/// +/// let mut stream = set.search(searcher).into_stream(); +/// let mut results = vec![]; +/// while let Some(key) = stream.next() { +/// results.push(std::str::from_utf8(key).unwrap().to_string()); +/// } +/// assert_eq!(vec!["bcd", "xyz"], results); +/// ``` +/// +/// This is like the example above, except we use an Aho-Corasick DFA, which +/// requires explicitly configuring it to support anchored searches. (NFAs +/// unconditionally support both unanchored and anchored searches.) +/// +/// ``` +/// use aho_corasick::{dfa::DFA, transducer::Anchored, StartKind}; +/// use fst::{Automaton, IntoStreamer, Set, Streamer}; +/// +/// let set = Set::from_iter(&["abcd", "bc", "bcd", "xyz"]).unwrap(); +/// let dfa = DFA::builder() +/// .start_kind(StartKind::Anchored) +/// .build(&["bcd", "x"]) +/// .unwrap(); +/// // We've explicitly configured our DFA to support anchored searches. +/// let searcher = Anchored::new(&dfa).unwrap(); +/// +/// let mut stream = set.search(searcher).into_stream(); +/// let mut results = vec![]; +/// while let Some(key) = stream.next() { +/// results.push(std::str::from_utf8(key).unwrap().to_string()); +/// } +/// assert_eq!(vec!["bcd", "xyz"], results); +/// ``` +#[derive(Clone, Debug)] +pub struct Anchored(A); + +impl Anchored { + /// Create a new `Anchored` implementation of the `fst::Automaton` trait. + /// + /// If the given Aho-Corasick automaton does not support anchored searches, + /// then this returns an error. + pub fn new(aut: A) -> Result, MatchError> { + let input = Input::new("").anchored(AcAnchored::Yes); + let _ = aut.start_state(&input)?; + Ok(Anchored(aut)) + } + + /// Returns a borrow to the underlying automaton. + pub fn as_ref(&self) -> &A { + &self.0 + } + + /// Unwrap this value and return the inner automaton. + pub fn into_inner(self) -> A { + self.0 + } +} + +impl fst::Automaton for Anchored { + type State = StateID; + + #[inline] + fn start(&self) -> StateID { + let input = Input::new("").anchored(AcAnchored::Yes); + self.0.start_state(&input).expect("support for unanchored searches") + } + + #[inline] + fn is_match(&self, state: &StateID) -> bool { + self.0.is_match(*state) + } + + #[inline] + fn accept(&self, state: &StateID, byte: u8) -> StateID { + if fst::Automaton::is_match(self, state) { + return *state; + } + self.0.next_state(AcAnchored::Yes, *state, byte) + } + + #[inline] + fn can_match(&self, state: &StateID) -> bool { + !self.0.is_dead(*state) + } +} + +#[cfg(test)] +mod tests { + use alloc::{string::String, vec, vec::Vec}; + + use fst::{Automaton, IntoStreamer, Set, Streamer}; + + use crate::{ + dfa::DFA, + nfa::{contiguous, noncontiguous}, + StartKind, + }; + + use super::*; + + fn search>( + set: &Set, + aut: A, + ) -> Vec { + let mut stream = set.search(aut).into_stream(); + let mut results = vec![]; + while let Some(key) = stream.next() { + results.push(String::from(core::str::from_utf8(key).unwrap())); + } + results + } + + #[test] + fn unanchored() { + let set = + Set::from_iter(&["a", "bar", "baz", "wat", "xba", "xbax", "z"]) + .unwrap(); + let patterns = vec!["baz", "bax"]; + let expected = vec!["baz", "xbax"]; + + let aut = Unanchored(noncontiguous::NFA::new(&patterns).unwrap()); + let got = search(&set, &aut); + assert_eq!(got, expected); + + let aut = Unanchored(contiguous::NFA::new(&patterns).unwrap()); + let got = search(&set, &aut); + assert_eq!(got, expected); + + let aut = Unanchored(DFA::new(&patterns).unwrap()); + let got = search(&set, &aut); + assert_eq!(got, expected); + } + + #[test] + fn anchored() { + let set = + Set::from_iter(&["a", "bar", "baz", "wat", "xba", "xbax", "z"]) + .unwrap(); + let patterns = vec!["baz", "bax"]; + let expected = vec!["baz"]; + + let aut = Anchored(noncontiguous::NFA::new(&patterns).unwrap()); + let got = search(&set, &aut); + assert_eq!(got, expected); + + let aut = Anchored(contiguous::NFA::new(&patterns).unwrap()); + let got = search(&set, &aut); + assert_eq!(got, expected); + + let aut = Anchored( + DFA::builder() + .start_kind(StartKind::Anchored) + .build(&patterns) + .unwrap(), + ); + let got = search(&set, &aut); + assert_eq!(got, expected); + } +} diff --git a/vendor/aho-corasick/src/util/alphabet.rs b/vendor/aho-corasick/src/util/alphabet.rs new file mode 100644 index 00000000000000..69724fa3abe627 --- /dev/null +++ b/vendor/aho-corasick/src/util/alphabet.rs @@ -0,0 +1,409 @@ +use crate::util::int::Usize; + +/// A representation of byte oriented equivalence classes. +/// +/// This is used in finite state machines to reduce the size of the transition +/// table. This can have a particularly large impact not only on the total size +/// of an FSM, but also on FSM build times because it reduces the number of +/// transitions that need to be visited/set. +#[derive(Clone, Copy)] +pub(crate) struct ByteClasses([u8; 256]); + +impl ByteClasses { + /// Creates a new set of equivalence classes where all bytes are mapped to + /// the same class. + pub(crate) fn empty() -> ByteClasses { + ByteClasses([0; 256]) + } + + /// Creates a new set of equivalence classes where each byte belongs to + /// its own equivalence class. + pub(crate) fn singletons() -> ByteClasses { + let mut classes = ByteClasses::empty(); + for b in 0..=255 { + classes.set(b, b); + } + classes + } + + /// Set the equivalence class for the given byte. + #[inline] + pub(crate) fn set(&mut self, byte: u8, class: u8) { + self.0[usize::from(byte)] = class; + } + + /// Get the equivalence class for the given byte. + #[inline] + pub(crate) fn get(&self, byte: u8) -> u8 { + self.0[usize::from(byte)] + } + + /// Return the total number of elements in the alphabet represented by + /// these equivalence classes. Equivalently, this returns the total number + /// of equivalence classes. + #[inline] + pub(crate) fn alphabet_len(&self) -> usize { + // Add one since the number of equivalence classes is one bigger than + // the last one. + usize::from(self.0[255]) + 1 + } + + /// Returns the stride, as a base-2 exponent, required for these + /// equivalence classes. + /// + /// The stride is always the smallest power of 2 that is greater than or + /// equal to the alphabet length. This is done so that converting between + /// state IDs and indices can be done with shifts alone, which is much + /// faster than integer division. The "stride2" is the exponent. i.e., + /// `2^stride2 = stride`. + pub(crate) fn stride2(&self) -> usize { + let zeros = self.alphabet_len().next_power_of_two().trailing_zeros(); + usize::try_from(zeros).unwrap() + } + + /// Returns the stride for these equivalence classes, which corresponds + /// to the smallest power of 2 greater than or equal to the number of + /// equivalence classes. + pub(crate) fn stride(&self) -> usize { + 1 << self.stride2() + } + + /// Returns true if and only if every byte in this class maps to its own + /// equivalence class. Equivalently, there are 257 equivalence classes + /// and each class contains exactly one byte (plus the special EOI class). + #[inline] + pub(crate) fn is_singleton(&self) -> bool { + self.alphabet_len() == 256 + } + + /// Returns an iterator over all equivalence classes in this set. + pub(crate) fn iter(&self) -> ByteClassIter { + ByteClassIter { it: 0..self.alphabet_len() } + } + + /// Returns an iterator of the bytes in the given equivalence class. + pub(crate) fn elements(&self, class: u8) -> ByteClassElements { + ByteClassElements { classes: self, class, bytes: 0..=255 } + } + + /// Returns an iterator of byte ranges in the given equivalence class. + /// + /// That is, a sequence of contiguous ranges are returned. Typically, every + /// class maps to a single contiguous range. + fn element_ranges(&self, class: u8) -> ByteClassElementRanges { + ByteClassElementRanges { elements: self.elements(class), range: None } + } +} + +impl core::fmt::Debug for ByteClasses { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + if self.is_singleton() { + write!(f, "ByteClasses()") + } else { + write!(f, "ByteClasses(")?; + for (i, class) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + write!(f, "{:?} => [", class)?; + for (start, end) in self.element_ranges(class) { + if start == end { + write!(f, "{:?}", start)?; + } else { + write!(f, "{:?}-{:?}", start, end)?; + } + } + write!(f, "]")?; + } + write!(f, ")") + } + } +} + +/// An iterator over each equivalence class. +#[derive(Debug)] +pub(crate) struct ByteClassIter { + it: core::ops::Range, +} + +impl Iterator for ByteClassIter { + type Item = u8; + + fn next(&mut self) -> Option { + self.it.next().map(|class| class.as_u8()) + } +} + +/// An iterator over all elements in a specific equivalence class. +#[derive(Debug)] +pub(crate) struct ByteClassElements<'a> { + classes: &'a ByteClasses, + class: u8, + bytes: core::ops::RangeInclusive, +} + +impl<'a> Iterator for ByteClassElements<'a> { + type Item = u8; + + fn next(&mut self) -> Option { + while let Some(byte) = self.bytes.next() { + if self.class == self.classes.get(byte) { + return Some(byte); + } + } + None + } +} + +/// An iterator over all elements in an equivalence class expressed as a +/// sequence of contiguous ranges. +#[derive(Debug)] +pub(crate) struct ByteClassElementRanges<'a> { + elements: ByteClassElements<'a>, + range: Option<(u8, u8)>, +} + +impl<'a> Iterator for ByteClassElementRanges<'a> { + type Item = (u8, u8); + + fn next(&mut self) -> Option<(u8, u8)> { + loop { + let element = match self.elements.next() { + None => return self.range.take(), + Some(element) => element, + }; + match self.range.take() { + None => { + self.range = Some((element, element)); + } + Some((start, end)) => { + if usize::from(end) + 1 != usize::from(element) { + self.range = Some((element, element)); + return Some((start, end)); + } + self.range = Some((start, element)); + } + } + } + } +} + +/// A partitioning of bytes into equivalence classes. +/// +/// A byte class set keeps track of an *approximation* of equivalence classes +/// of bytes during NFA construction. That is, every byte in an equivalence +/// class cannot discriminate between a match and a non-match. +/// +/// Note that this may not compute the minimal set of equivalence classes. +/// Basically, any byte in a pattern given to the noncontiguous NFA builder +/// will automatically be treated as its own equivalence class. All other +/// bytes---any byte not in any pattern---will be treated as their own +/// equivalence classes. In theory, all bytes not in any pattern should +/// be part of a single equivalence class, but in practice, we only treat +/// contiguous ranges of bytes as an equivalence class. So the number of +/// classes computed may be bigger than necessary. This usually doesn't make +/// much of a difference, and keeps the implementation simple. +#[derive(Clone, Debug)] +pub(crate) struct ByteClassSet(ByteSet); + +impl Default for ByteClassSet { + fn default() -> ByteClassSet { + ByteClassSet::empty() + } +} + +impl ByteClassSet { + /// Create a new set of byte classes where all bytes are part of the same + /// equivalence class. + pub(crate) fn empty() -> Self { + ByteClassSet(ByteSet::empty()) + } + + /// Indicate the the range of byte given (inclusive) can discriminate a + /// match between it and all other bytes outside of the range. + pub(crate) fn set_range(&mut self, start: u8, end: u8) { + debug_assert!(start <= end); + if start > 0 { + self.0.add(start - 1); + } + self.0.add(end); + } + + /// Convert this boolean set to a map that maps all byte values to their + /// corresponding equivalence class. The last mapping indicates the largest + /// equivalence class identifier (which is never bigger than 255). + pub(crate) fn byte_classes(&self) -> ByteClasses { + let mut classes = ByteClasses::empty(); + let mut class = 0u8; + let mut b = 0u8; + loop { + classes.set(b, class); + if b == 255 { + break; + } + if self.0.contains(b) { + class = class.checked_add(1).unwrap(); + } + b = b.checked_add(1).unwrap(); + } + classes + } +} + +/// A simple set of bytes that is reasonably cheap to copy and allocation free. +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] +pub(crate) struct ByteSet { + bits: BitSet, +} + +/// The representation of a byte set. Split out so that we can define a +/// convenient Debug impl for it while keeping "ByteSet" in the output. +#[derive(Clone, Copy, Default, Eq, PartialEq)] +struct BitSet([u128; 2]); + +impl ByteSet { + /// Create an empty set of bytes. + pub(crate) fn empty() -> ByteSet { + ByteSet { bits: BitSet([0; 2]) } + } + + /// Add a byte to this set. + /// + /// If the given byte already belongs to this set, then this is a no-op. + pub(crate) fn add(&mut self, byte: u8) { + let bucket = byte / 128; + let bit = byte % 128; + self.bits.0[usize::from(bucket)] |= 1 << bit; + } + + /// Return true if and only if the given byte is in this set. + pub(crate) fn contains(&self, byte: u8) -> bool { + let bucket = byte / 128; + let bit = byte % 128; + self.bits.0[usize::from(bucket)] & (1 << bit) > 0 + } +} + +impl core::fmt::Debug for BitSet { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let mut fmtd = f.debug_set(); + for b in 0u8..=255 { + if (ByteSet { bits: *self }).contains(b) { + fmtd.entry(&b); + } + } + fmtd.finish() + } +} + +#[cfg(test)] +mod tests { + use alloc::{vec, vec::Vec}; + + use super::*; + + #[test] + fn byte_classes() { + let mut set = ByteClassSet::empty(); + set.set_range(b'a', b'z'); + + let classes = set.byte_classes(); + assert_eq!(classes.get(0), 0); + assert_eq!(classes.get(1), 0); + assert_eq!(classes.get(2), 0); + assert_eq!(classes.get(b'a' - 1), 0); + assert_eq!(classes.get(b'a'), 1); + assert_eq!(classes.get(b'm'), 1); + assert_eq!(classes.get(b'z'), 1); + assert_eq!(classes.get(b'z' + 1), 2); + assert_eq!(classes.get(254), 2); + assert_eq!(classes.get(255), 2); + + let mut set = ByteClassSet::empty(); + set.set_range(0, 2); + set.set_range(4, 6); + let classes = set.byte_classes(); + assert_eq!(classes.get(0), 0); + assert_eq!(classes.get(1), 0); + assert_eq!(classes.get(2), 0); + assert_eq!(classes.get(3), 1); + assert_eq!(classes.get(4), 2); + assert_eq!(classes.get(5), 2); + assert_eq!(classes.get(6), 2); + assert_eq!(classes.get(7), 3); + assert_eq!(classes.get(255), 3); + } + + #[test] + fn full_byte_classes() { + let mut set = ByteClassSet::empty(); + for b in 0u8..=255 { + set.set_range(b, b); + } + assert_eq!(set.byte_classes().alphabet_len(), 256); + } + + #[test] + fn elements_typical() { + let mut set = ByteClassSet::empty(); + set.set_range(b'b', b'd'); + set.set_range(b'g', b'm'); + set.set_range(b'z', b'z'); + let classes = set.byte_classes(); + // class 0: \x00-a + // class 1: b-d + // class 2: e-f + // class 3: g-m + // class 4: n-y + // class 5: z-z + // class 6: \x7B-\xFF + assert_eq!(classes.alphabet_len(), 7); + + let elements = classes.elements(0).collect::>(); + assert_eq!(elements.len(), 98); + assert_eq!(elements[0], b'\x00'); + assert_eq!(elements[97], b'a'); + + let elements = classes.elements(1).collect::>(); + assert_eq!(elements, vec![b'b', b'c', b'd'],); + + let elements = classes.elements(2).collect::>(); + assert_eq!(elements, vec![b'e', b'f'],); + + let elements = classes.elements(3).collect::>(); + assert_eq!(elements, vec![b'g', b'h', b'i', b'j', b'k', b'l', b'm',],); + + let elements = classes.elements(4).collect::>(); + assert_eq!(elements.len(), 12); + assert_eq!(elements[0], b'n'); + assert_eq!(elements[11], b'y'); + + let elements = classes.elements(5).collect::>(); + assert_eq!(elements, vec![b'z']); + + let elements = classes.elements(6).collect::>(); + assert_eq!(elements.len(), 133); + assert_eq!(elements[0], b'\x7B'); + assert_eq!(elements[132], b'\xFF'); + } + + #[test] + fn elements_singletons() { + let classes = ByteClasses::singletons(); + assert_eq!(classes.alphabet_len(), 256); + + let elements = classes.elements(b'a').collect::>(); + assert_eq!(elements, vec![b'a']); + } + + #[test] + fn elements_empty() { + let classes = ByteClasses::empty(); + assert_eq!(classes.alphabet_len(), 1); + + let elements = classes.elements(0).collect::>(); + assert_eq!(elements.len(), 256); + assert_eq!(elements[0], b'\x00'); + assert_eq!(elements[255], b'\xFF'); + } +} diff --git a/vendor/aho-corasick/src/util/buffer.rs b/vendor/aho-corasick/src/util/buffer.rs new file mode 100644 index 00000000000000..e9e982af588592 --- /dev/null +++ b/vendor/aho-corasick/src/util/buffer.rs @@ -0,0 +1,124 @@ +use alloc::{vec, vec::Vec}; + +/// The default buffer capacity that we use for the stream buffer. +const DEFAULT_BUFFER_CAPACITY: usize = 64 * (1 << 10); // 64 KB + +/// A fairly simple roll buffer for supporting stream searches. +/// +/// This buffer acts as a temporary place to store a fixed amount of data when +/// reading from a stream. Its central purpose is to allow "rolling" some +/// suffix of the data to the beginning of the buffer before refilling it with +/// more data from the stream. For example, let's say we are trying to match +/// "foobar" on a stream. When we report the match, we'd like to not only +/// report the correct offsets at which the match occurs, but also the matching +/// bytes themselves. So let's say our stream is a file with the following +/// contents: `test test foobar test test`. Now assume that we happen to read +/// the aforementioned file in two chunks: `test test foo` and `bar test test`. +/// Naively, it would not be possible to report a single contiguous `foobar` +/// match, but this roll buffer allows us to do that. Namely, after the second +/// read, the contents of the buffer should be `st foobar test test`, where the +/// search should ultimately resume immediately after `foo`. (The prefix `st ` +/// is included because the roll buffer saves N bytes at the end of the buffer, +/// where N is the maximum possible length of a match.) +/// +/// A lot of the logic for dealing with this is unfortunately split out between +/// this roll buffer and the `StreamChunkIter`. +/// +/// Note also that this buffer is not actually required to just report matches. +/// Because a `Match` is just some offsets. But it *is* required for supporting +/// things like `try_stream_replace_all` because that needs some mechanism for +/// knowing which bytes in the stream correspond to a match and which don't. So +/// when a match occurs across two `read` calls, *something* needs to retain +/// the bytes from the previous `read` call because you don't know before the +/// second read call whether a match exists or not. +#[derive(Debug)] +pub(crate) struct Buffer { + /// The raw buffer contents. This has a fixed size and never increases. + buf: Vec, + /// The minimum size of the buffer, which is equivalent to the maximum + /// possible length of a match. This corresponds to the amount that we + /// roll + min: usize, + /// The end of the contents of this buffer. + end: usize, +} + +impl Buffer { + /// Create a new buffer for stream searching. The minimum buffer length + /// given should be the size of the maximum possible match length. + pub(crate) fn new(min_buffer_len: usize) -> Buffer { + let min = core::cmp::max(1, min_buffer_len); + // The minimum buffer amount is also the amount that we roll our + // buffer in order to support incremental searching. To this end, + // our actual capacity needs to be at least 1 byte bigger than our + // minimum amount, otherwise we won't have any overlap. In actuality, + // we want our buffer to be a bit bigger than that for performance + // reasons, so we set a lower bound of `8 * min`. + // + // TODO: It would be good to find a way to test the streaming + // implementation with the minimal buffer size. For now, we just + // uncomment out the next line and comment out the subsequent line. + // let capacity = 1 + min; + let capacity = core::cmp::max(min * 8, DEFAULT_BUFFER_CAPACITY); + Buffer { buf: vec![0; capacity], min, end: 0 } + } + + /// Return the contents of this buffer. + #[inline] + pub(crate) fn buffer(&self) -> &[u8] { + &self.buf[..self.end] + } + + /// Return the minimum size of the buffer. The only way a buffer may be + /// smaller than this is if the stream itself contains less than the + /// minimum buffer amount. + #[inline] + pub(crate) fn min_buffer_len(&self) -> usize { + self.min + } + + /// Return all free capacity in this buffer. + fn free_buffer(&mut self) -> &mut [u8] { + &mut self.buf[self.end..] + } + + /// Refill the contents of this buffer by reading as much as possible into + /// this buffer's free capacity. If no more bytes could be read, then this + /// returns false. Otherwise, this reads until it has filled the buffer + /// past the minimum amount. + pub(crate) fn fill( + &mut self, + mut rdr: R, + ) -> std::io::Result { + let mut readany = false; + loop { + let readlen = rdr.read(self.free_buffer())?; + if readlen == 0 { + return Ok(readany); + } + readany = true; + self.end += readlen; + if self.buffer().len() >= self.min { + return Ok(true); + } + } + } + + /// Roll the contents of the buffer so that the suffix of this buffer is + /// moved to the front and all other contents are dropped. The size of the + /// suffix corresponds precisely to the minimum buffer length. + /// + /// This should only be called when the entire contents of this buffer have + /// been searched. + pub(crate) fn roll(&mut self) { + let roll_start = self + .end + .checked_sub(self.min) + .expect("buffer capacity should be bigger than minimum amount"); + let roll_end = roll_start + self.min; + + assert!(roll_end <= self.end); + self.buf.copy_within(roll_start..roll_end, 0); + self.end = self.min; + } +} diff --git a/vendor/aho-corasick/src/util/byte_frequencies.rs b/vendor/aho-corasick/src/util/byte_frequencies.rs new file mode 100644 index 00000000000000..c313b629db5d53 --- /dev/null +++ b/vendor/aho-corasick/src/util/byte_frequencies.rs @@ -0,0 +1,258 @@ +pub const BYTE_FREQUENCIES: [u8; 256] = [ + 55, // '\x00' + 52, // '\x01' + 51, // '\x02' + 50, // '\x03' + 49, // '\x04' + 48, // '\x05' + 47, // '\x06' + 46, // '\x07' + 45, // '\x08' + 103, // '\t' + 242, // '\n' + 66, // '\x0b' + 67, // '\x0c' + 229, // '\r' + 44, // '\x0e' + 43, // '\x0f' + 42, // '\x10' + 41, // '\x11' + 40, // '\x12' + 39, // '\x13' + 38, // '\x14' + 37, // '\x15' + 36, // '\x16' + 35, // '\x17' + 34, // '\x18' + 33, // '\x19' + 56, // '\x1a' + 32, // '\x1b' + 31, // '\x1c' + 30, // '\x1d' + 29, // '\x1e' + 28, // '\x1f' + 255, // ' ' + 148, // '!' + 164, // '"' + 149, // '#' + 136, // '$' + 160, // '%' + 155, // '&' + 173, // "'" + 221, // '(' + 222, // ')' + 134, // '*' + 122, // '+' + 232, // ',' + 202, // '-' + 215, // '.' + 224, // '/' + 208, // '0' + 220, // '1' + 204, // '2' + 187, // '3' + 183, // '4' + 179, // '5' + 177, // '6' + 168, // '7' + 178, // '8' + 200, // '9' + 226, // ':' + 195, // ';' + 154, // '<' + 184, // '=' + 174, // '>' + 126, // '?' + 120, // '@' + 191, // 'A' + 157, // 'B' + 194, // 'C' + 170, // 'D' + 189, // 'E' + 162, // 'F' + 161, // 'G' + 150, // 'H' + 193, // 'I' + 142, // 'J' + 137, // 'K' + 171, // 'L' + 176, // 'M' + 185, // 'N' + 167, // 'O' + 186, // 'P' + 112, // 'Q' + 175, // 'R' + 192, // 'S' + 188, // 'T' + 156, // 'U' + 140, // 'V' + 143, // 'W' + 123, // 'X' + 133, // 'Y' + 128, // 'Z' + 147, // '[' + 138, // '\\' + 146, // ']' + 114, // '^' + 223, // '_' + 151, // '`' + 249, // 'a' + 216, // 'b' + 238, // 'c' + 236, // 'd' + 253, // 'e' + 227, // 'f' + 218, // 'g' + 230, // 'h' + 247, // 'i' + 135, // 'j' + 180, // 'k' + 241, // 'l' + 233, // 'm' + 246, // 'n' + 244, // 'o' + 231, // 'p' + 139, // 'q' + 245, // 'r' + 243, // 's' + 251, // 't' + 235, // 'u' + 201, // 'v' + 196, // 'w' + 240, // 'x' + 214, // 'y' + 152, // 'z' + 182, // '{' + 205, // '|' + 181, // '}' + 127, // '~' + 27, // '\x7f' + 212, // '\x80' + 211, // '\x81' + 210, // '\x82' + 213, // '\x83' + 228, // '\x84' + 197, // '\x85' + 169, // '\x86' + 159, // '\x87' + 131, // '\x88' + 172, // '\x89' + 105, // '\x8a' + 80, // '\x8b' + 98, // '\x8c' + 96, // '\x8d' + 97, // '\x8e' + 81, // '\x8f' + 207, // '\x90' + 145, // '\x91' + 116, // '\x92' + 115, // '\x93' + 144, // '\x94' + 130, // '\x95' + 153, // '\x96' + 121, // '\x97' + 107, // '\x98' + 132, // '\x99' + 109, // '\x9a' + 110, // '\x9b' + 124, // '\x9c' + 111, // '\x9d' + 82, // '\x9e' + 108, // '\x9f' + 118, // '\xa0' + 141, // '¡' + 113, // '¢' + 129, // '£' + 119, // '¤' + 125, // '¥' + 165, // '¦' + 117, // '§' + 92, // '¨' + 106, // '©' + 83, // 'ª' + 72, // '«' + 99, // '¬' + 93, // '\xad' + 65, // '®' + 79, // '¯' + 166, // '°' + 237, // '±' + 163, // '²' + 199, // '³' + 190, // '´' + 225, // 'µ' + 209, // '¶' + 203, // '·' + 198, // '¸' + 217, // '¹' + 219, // 'º' + 206, // '»' + 234, // '¼' + 248, // '½' + 158, // '¾' + 239, // '¿' + 255, // 'À' + 255, // 'Á' + 255, // 'Â' + 255, // 'Ã' + 255, // 'Ä' + 255, // 'Å' + 255, // 'Æ' + 255, // 'Ç' + 255, // 'È' + 255, // 'É' + 255, // 'Ê' + 255, // 'Ë' + 255, // 'Ì' + 255, // 'Í' + 255, // 'Î' + 255, // 'Ï' + 255, // 'Ð' + 255, // 'Ñ' + 255, // 'Ò' + 255, // 'Ó' + 255, // 'Ô' + 255, // 'Õ' + 255, // 'Ö' + 255, // '×' + 255, // 'Ø' + 255, // 'Ù' + 255, // 'Ú' + 255, // 'Û' + 255, // 'Ü' + 255, // 'Ý' + 255, // 'Þ' + 255, // 'ß' + 255, // 'à' + 255, // 'á' + 255, // 'â' + 255, // 'ã' + 255, // 'ä' + 255, // 'å' + 255, // 'æ' + 255, // 'ç' + 255, // 'è' + 255, // 'é' + 255, // 'ê' + 255, // 'ë' + 255, // 'ì' + 255, // 'í' + 255, // 'î' + 255, // 'ï' + 255, // 'ð' + 255, // 'ñ' + 255, // 'ò' + 255, // 'ó' + 255, // 'ô' + 255, // 'õ' + 255, // 'ö' + 255, // '÷' + 255, // 'ø' + 255, // 'ù' + 255, // 'ú' + 255, // 'û' + 255, // 'ü' + 255, // 'ý' + 255, // 'þ' + 255, // 'ÿ' +]; diff --git a/vendor/aho-corasick/src/util/debug.rs b/vendor/aho-corasick/src/util/debug.rs new file mode 100644 index 00000000000000..22b5f2231f282b --- /dev/null +++ b/vendor/aho-corasick/src/util/debug.rs @@ -0,0 +1,26 @@ +/// A type that wraps a single byte with a convenient fmt::Debug impl that +/// escapes the byte. +pub(crate) struct DebugByte(pub(crate) u8); + +impl core::fmt::Debug for DebugByte { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + // Special case ASCII space. It's too hard to read otherwise, so + // put quotes around it. I sometimes wonder whether just '\x20' would + // be better... + if self.0 == b' ' { + return write!(f, "' '"); + } + // 10 bytes is enough to cover any output from ascii::escape_default. + let mut bytes = [0u8; 10]; + let mut len = 0; + for (i, mut b) in core::ascii::escape_default(self.0).enumerate() { + // capitalize \xab to \xAB + if i >= 2 && b'a' <= b && b <= b'f' { + b -= 32; + } + bytes[len] = b; + len += 1; + } + write!(f, "{}", core::str::from_utf8(&bytes[..len]).unwrap()) + } +} diff --git a/vendor/aho-corasick/src/util/error.rs b/vendor/aho-corasick/src/util/error.rs new file mode 100644 index 00000000000000..326d04657b2480 --- /dev/null +++ b/vendor/aho-corasick/src/util/error.rs @@ -0,0 +1,259 @@ +use crate::util::{ + primitives::{PatternID, SmallIndex}, + search::MatchKind, +}; + +/// An error that occurred during the construction of an Aho-Corasick +/// automaton. +/// +/// Build errors occur when some kind of limit has been exceeded, either in the +/// number of states, the number of patterns of the length of a pattern. These +/// limits aren't part of the public API, but they should generally be large +/// enough to handle most use cases. +/// +/// When the `std` feature is enabled, this implements the `std::error::Error` +/// trait. +#[derive(Clone, Debug)] +pub struct BuildError { + kind: ErrorKind, +} + +/// The kind of error that occurred. +#[derive(Clone, Debug)] +enum ErrorKind { + /// An error that occurs when allocating a new state would result in an + /// identifier that exceeds the capacity of a `StateID`. + StateIDOverflow { + /// The maximum possible id. + max: u64, + /// The maximum ID requested. + requested_max: u64, + }, + /// An error that occurs when adding a pattern to an Aho-Corasick + /// automaton would result in an identifier that exceeds the capacity of a + /// `PatternID`. + PatternIDOverflow { + /// The maximum possible id. + max: u64, + /// The maximum ID requested. + requested_max: u64, + }, + /// Occurs when a pattern string is given to the Aho-Corasick constructor + /// that is too long. + PatternTooLong { + /// The ID of the pattern that was too long. + pattern: PatternID, + /// The length that was too long. + len: usize, + }, +} + +impl BuildError { + pub(crate) fn state_id_overflow( + max: u64, + requested_max: u64, + ) -> BuildError { + BuildError { kind: ErrorKind::StateIDOverflow { max, requested_max } } + } + + pub(crate) fn pattern_id_overflow( + max: u64, + requested_max: u64, + ) -> BuildError { + BuildError { + kind: ErrorKind::PatternIDOverflow { max, requested_max }, + } + } + + pub(crate) fn pattern_too_long( + pattern: PatternID, + len: usize, + ) -> BuildError { + BuildError { kind: ErrorKind::PatternTooLong { pattern, len } } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for BuildError {} + +impl core::fmt::Display for BuildError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self.kind { + ErrorKind::StateIDOverflow { max, requested_max } => { + write!( + f, + "state identifier overflow: failed to create state ID \ + from {}, which exceeds the max of {}", + requested_max, max, + ) + } + ErrorKind::PatternIDOverflow { max, requested_max } => { + write!( + f, + "pattern identifier overflow: failed to create pattern ID \ + from {}, which exceeds the max of {}", + requested_max, max, + ) + } + ErrorKind::PatternTooLong { pattern, len } => { + write!( + f, + "pattern {} with length {} exceeds \ + the maximum pattern length of {}", + pattern.as_usize(), + len, + SmallIndex::MAX.as_usize(), + ) + } + } + } +} + +/// An error that occurred during an Aho-Corasick search. +/// +/// An error that occurs during a search is limited to some kind of +/// misconfiguration that resulted in an illegal call. Stated differently, +/// whether an error occurs is not dependent on the specific bytes in the +/// haystack. +/// +/// Examples of misconfiguration: +/// +/// * Executing a stream or overlapping search on a searcher that was built was +/// something other than [`MatchKind::Standard`](crate::MatchKind::Standard) +/// semantics. +/// * Requested an anchored or an unanchored search on a searcher that doesn't +/// support unanchored or anchored searches, respectively. +/// +/// When the `std` feature is enabled, this implements the `std::error::Error` +/// trait. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct MatchError(alloc::boxed::Box); + +impl MatchError { + /// Create a new error value with the given kind. + /// + /// This is a more verbose version of the kind-specific constructors, e.g., + /// `MatchError::unsupported_stream`. + pub fn new(kind: MatchErrorKind) -> MatchError { + MatchError(alloc::boxed::Box::new(kind)) + } + + /// Returns a reference to the underlying error kind. + pub fn kind(&self) -> &MatchErrorKind { + &self.0 + } + + /// Create a new "invalid anchored search" error. This occurs when the + /// caller requests an anchored search but where anchored searches aren't + /// supported. + /// + /// This is the same as calling `MatchError::new` with a + /// [`MatchErrorKind::InvalidInputAnchored`] kind. + pub fn invalid_input_anchored() -> MatchError { + MatchError::new(MatchErrorKind::InvalidInputAnchored) + } + + /// Create a new "invalid unanchored search" error. This occurs when the + /// caller requests an unanchored search but where unanchored searches + /// aren't supported. + /// + /// This is the same as calling `MatchError::new` with a + /// [`MatchErrorKind::InvalidInputUnanchored`] kind. + pub fn invalid_input_unanchored() -> MatchError { + MatchError::new(MatchErrorKind::InvalidInputUnanchored) + } + + /// Create a new "unsupported stream search" error. This occurs when the + /// caller requests a stream search while using an Aho-Corasick automaton + /// with a match kind other than [`MatchKind::Standard`]. + /// + /// The match kind given should be the match kind of the automaton. It + /// should never be `MatchKind::Standard`. + pub fn unsupported_stream(got: MatchKind) -> MatchError { + MatchError::new(MatchErrorKind::UnsupportedStream { got }) + } + + /// Create a new "unsupported overlapping search" error. This occurs when + /// the caller requests an overlapping search while using an Aho-Corasick + /// automaton with a match kind other than [`MatchKind::Standard`]. + /// + /// The match kind given should be the match kind of the automaton. It + /// should never be `MatchKind::Standard`. + pub fn unsupported_overlapping(got: MatchKind) -> MatchError { + MatchError::new(MatchErrorKind::UnsupportedOverlapping { got }) + } + + /// Create a new "unsupported empty pattern" error. This occurs when the + /// caller requests a search for which matching an automaton that contains + /// an empty pattern string is not supported. + pub fn unsupported_empty() -> MatchError { + MatchError::new(MatchErrorKind::UnsupportedEmpty) + } +} + +/// The underlying kind of a [`MatchError`]. +/// +/// This is a **non-exhaustive** enum. That means new variants may be added in +/// a semver-compatible release. +#[non_exhaustive] +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum MatchErrorKind { + /// An error indicating that an anchored search was requested, but from a + /// searcher that was built without anchored support. + InvalidInputAnchored, + /// An error indicating that an unanchored search was requested, but from a + /// searcher that was built without unanchored support. + InvalidInputUnanchored, + /// An error indicating that a stream search was attempted on an + /// Aho-Corasick automaton with an unsupported `MatchKind`. + UnsupportedStream { + /// The match semantics for the automaton that was used. + got: MatchKind, + }, + /// An error indicating that an overlapping search was attempted on an + /// Aho-Corasick automaton with an unsupported `MatchKind`. + UnsupportedOverlapping { + /// The match semantics for the automaton that was used. + got: MatchKind, + }, + /// An error indicating that the operation requested doesn't support + /// automatons that contain an empty pattern string. + UnsupportedEmpty, +} + +#[cfg(feature = "std")] +impl std::error::Error for MatchError {} + +impl core::fmt::Display for MatchError { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + match *self.kind() { + MatchErrorKind::InvalidInputAnchored => { + write!(f, "anchored searches are not supported or enabled") + } + MatchErrorKind::InvalidInputUnanchored => { + write!(f, "unanchored searches are not supported or enabled") + } + MatchErrorKind::UnsupportedStream { got } => { + write!( + f, + "match kind {:?} does not support stream searching", + got, + ) + } + MatchErrorKind::UnsupportedOverlapping { got } => { + write!( + f, + "match kind {:?} does not support overlapping searches", + got, + ) + } + MatchErrorKind::UnsupportedEmpty => { + write!( + f, + "matching with an empty pattern string is not \ + supported for this operation", + ) + } + } + } +} diff --git a/vendor/aho-corasick/src/util/int.rs b/vendor/aho-corasick/src/util/int.rs new file mode 100644 index 00000000000000..54762b66046893 --- /dev/null +++ b/vendor/aho-corasick/src/util/int.rs @@ -0,0 +1,278 @@ +/*! +This module provides several integer oriented traits for converting between +both fixed size integers and integers whose size varies based on the target +(like `usize`). + +The main design principle for this module is to centralize all uses of `as`. +The thinking here is that `as` makes it very easy to perform accidental lossy +conversions, and if we centralize all its uses here under more descriptive +higher level operations, its use and correctness becomes easier to audit. + +This was copied mostly wholesale from `regex-automata`. + +NOTE: for simplicity, we don't take target pointer width into account here for +`usize` conversions. Since we currently only panic in debug mode, skipping the +check when it can be proven it isn't needed at compile time doesn't really +matter. Now, if we wind up wanting to do as many checks as possible in release +mode, then we would want to skip those when we know the conversions are always +non-lossy. +*/ + +// We define a little more than what we need, but I'd rather just have +// everything via a consistent and uniform API then have holes. +#![allow(dead_code)] + +pub(crate) trait U8 { + fn as_usize(self) -> usize; +} + +impl U8 for u8 { + fn as_usize(self) -> usize { + usize::from(self) + } +} + +pub(crate) trait U16 { + fn as_usize(self) -> usize; + fn low_u8(self) -> u8; + fn high_u8(self) -> u8; +} + +impl U16 for u16 { + fn as_usize(self) -> usize { + usize::from(self) + } + + fn low_u8(self) -> u8 { + self as u8 + } + + fn high_u8(self) -> u8 { + (self >> 8) as u8 + } +} + +pub(crate) trait U32 { + fn as_usize(self) -> usize; + fn low_u8(self) -> u8; + fn low_u16(self) -> u16; + fn high_u16(self) -> u16; +} + +impl U32 for u32 { + #[inline] + fn as_usize(self) -> usize { + #[cfg(debug_assertions)] + { + usize::try_from(self).expect("u32 overflowed usize") + } + #[cfg(not(debug_assertions))] + { + self as usize + } + } + + fn low_u8(self) -> u8 { + self as u8 + } + + fn low_u16(self) -> u16 { + self as u16 + } + + fn high_u16(self) -> u16 { + (self >> 16) as u16 + } +} + +pub(crate) trait U64 { + fn as_usize(self) -> usize; + fn low_u8(self) -> u8; + fn low_u16(self) -> u16; + fn low_u32(self) -> u32; + fn high_u32(self) -> u32; +} + +impl U64 for u64 { + fn as_usize(self) -> usize { + #[cfg(debug_assertions)] + { + usize::try_from(self).expect("u64 overflowed usize") + } + #[cfg(not(debug_assertions))] + { + self as usize + } + } + + fn low_u8(self) -> u8 { + self as u8 + } + + fn low_u16(self) -> u16 { + self as u16 + } + + fn low_u32(self) -> u32 { + self as u32 + } + + fn high_u32(self) -> u32 { + (self >> 32) as u32 + } +} + +pub(crate) trait I8 { + fn as_usize(self) -> usize; + fn to_bits(self) -> u8; + fn from_bits(n: u8) -> i8; +} + +impl I8 for i8 { + fn as_usize(self) -> usize { + #[cfg(debug_assertions)] + { + usize::try_from(self).expect("i8 overflowed usize") + } + #[cfg(not(debug_assertions))] + { + self as usize + } + } + + fn to_bits(self) -> u8 { + self as u8 + } + + fn from_bits(n: u8) -> i8 { + n as i8 + } +} + +pub(crate) trait I32 { + fn as_usize(self) -> usize; + fn to_bits(self) -> u32; + fn from_bits(n: u32) -> i32; +} + +impl I32 for i32 { + fn as_usize(self) -> usize { + #[cfg(debug_assertions)] + { + usize::try_from(self).expect("i32 overflowed usize") + } + #[cfg(not(debug_assertions))] + { + self as usize + } + } + + fn to_bits(self) -> u32 { + self as u32 + } + + fn from_bits(n: u32) -> i32 { + n as i32 + } +} + +pub(crate) trait I64 { + fn as_usize(self) -> usize; + fn to_bits(self) -> u64; + fn from_bits(n: u64) -> i64; +} + +impl I64 for i64 { + fn as_usize(self) -> usize { + #[cfg(debug_assertions)] + { + usize::try_from(self).expect("i64 overflowed usize") + } + #[cfg(not(debug_assertions))] + { + self as usize + } + } + + fn to_bits(self) -> u64 { + self as u64 + } + + fn from_bits(n: u64) -> i64 { + n as i64 + } +} + +pub(crate) trait Usize { + fn as_u8(self) -> u8; + fn as_u16(self) -> u16; + fn as_u32(self) -> u32; + fn as_u64(self) -> u64; +} + +impl Usize for usize { + fn as_u8(self) -> u8 { + #[cfg(debug_assertions)] + { + u8::try_from(self).expect("usize overflowed u8") + } + #[cfg(not(debug_assertions))] + { + self as u8 + } + } + + fn as_u16(self) -> u16 { + #[cfg(debug_assertions)] + { + u16::try_from(self).expect("usize overflowed u16") + } + #[cfg(not(debug_assertions))] + { + self as u16 + } + } + + fn as_u32(self) -> u32 { + #[cfg(debug_assertions)] + { + u32::try_from(self).expect("usize overflowed u32") + } + #[cfg(not(debug_assertions))] + { + self as u32 + } + } + + fn as_u64(self) -> u64 { + #[cfg(debug_assertions)] + { + u64::try_from(self).expect("usize overflowed u64") + } + #[cfg(not(debug_assertions))] + { + self as u64 + } + } +} + +// Pointers aren't integers, but we convert pointers to integers to perform +// offset arithmetic in some places. (And no, we don't convert the integers +// back to pointers.) So add 'as_usize' conversions here too for completeness. +// +// These 'as' casts are actually okay because they're always non-lossy. But the +// idea here is to just try and remove as much 'as' as possible, particularly +// in this crate where we are being really paranoid about offsets and making +// sure we don't panic on inputs that might be untrusted. This way, the 'as' +// casts become easier to audit if they're all in one place, even when some of +// them are actually okay 100% of the time. + +pub(crate) trait Pointer { + fn as_usize(self) -> usize; +} + +impl Pointer for *const T { + fn as_usize(self) -> usize { + self as usize + } +} diff --git a/vendor/aho-corasick/src/util/mod.rs b/vendor/aho-corasick/src/util/mod.rs new file mode 100644 index 00000000000000..f7a1ddd07b8f85 --- /dev/null +++ b/vendor/aho-corasick/src/util/mod.rs @@ -0,0 +1,12 @@ +pub(crate) mod alphabet; +#[cfg(feature = "std")] +pub(crate) mod buffer; +pub(crate) mod byte_frequencies; +pub(crate) mod debug; +pub(crate) mod error; +pub(crate) mod int; +pub(crate) mod prefilter; +pub(crate) mod primitives; +pub(crate) mod remapper; +pub(crate) mod search; +pub(crate) mod special; diff --git a/vendor/aho-corasick/src/util/prefilter.rs b/vendor/aho-corasick/src/util/prefilter.rs new file mode 100644 index 00000000000000..ec3171694f10cf --- /dev/null +++ b/vendor/aho-corasick/src/util/prefilter.rs @@ -0,0 +1,924 @@ +use core::{ + cmp, + fmt::Debug, + panic::{RefUnwindSafe, UnwindSafe}, + u8, +}; + +use alloc::{sync::Arc, vec, vec::Vec}; + +use crate::{ + packed, + util::{ + alphabet::ByteSet, + search::{Match, MatchKind, Span}, + }, +}; + +/// A prefilter for accelerating a search. +/// +/// This crate uses prefilters in the core search implementations to accelerate +/// common cases. They typically only apply to cases where there are a small +/// number of patterns (less than 100 or so), but when they do, thoughput can +/// be boosted considerably, perhaps by an order of magnitude. When a prefilter +/// is active, it is used whenever a search enters an automaton's start state. +/// +/// Currently, prefilters cannot be constructed by +/// callers. A `Prefilter` can only be accessed via the +/// [`Automaton::prefilter`](crate::automaton::Automaton::prefilter) +/// method and used to execute a search. In other words, a prefilter can be +/// used to optimize your own search implementation if necessary, but cannot do +/// much else. If you have a use case for more APIs, please submit an issue. +#[derive(Clone, Debug)] +pub struct Prefilter { + finder: Arc, + memory_usage: usize, +} + +impl Prefilter { + /// Execute a search in the haystack within the span given. If a match or + /// a possible match is returned, then it is guaranteed to occur within + /// the bounds of the span. + /// + /// If the span provided is invalid for the given haystack, then behavior + /// is unspecified. + #[inline] + pub fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { + self.finder.find_in(haystack, span) + } + + #[inline] + pub(crate) fn memory_usage(&self) -> usize { + self.memory_usage + } +} + +/// A candidate is the result of running a prefilter on a haystack at a +/// particular position. +/// +/// The result is either no match, a confirmed match or a possible match. +/// +/// When no match is returned, the prefilter is guaranteeing that no possible +/// match can be found in the haystack, and the caller may trust this. That is, +/// all correct prefilters must never report false negatives. +/// +/// In some cases, a prefilter can confirm a match very quickly, in which case, +/// the caller may use this to stop what it's doing and report the match. In +/// this case, prefilter implementations must never report a false positive. +/// In other cases, the prefilter can only report a potential match, in which +/// case the callers must attempt to confirm the match. In this case, prefilter +/// implementations are permitted to return false positives. +#[derive(Clone, Debug)] +pub enum Candidate { + /// No match was found. Since false negatives are not possible, this means + /// the search can quit as it is guaranteed not to find another match. + None, + /// A confirmed match was found. Callers do not need to confirm it. + Match(Match), + /// The start of a possible match was found. Callers must confirm it before + /// reporting it as a match. + PossibleStartOfMatch(usize), +} + +impl Candidate { + /// Convert this candidate into an option. This is useful when callers + /// do not distinguish between true positives and false positives (i.e., + /// the caller must always confirm the match). + pub fn into_option(self) -> Option { + match self { + Candidate::None => None, + Candidate::Match(ref m) => Some(m.start()), + Candidate::PossibleStartOfMatch(start) => Some(start), + } + } +} + +/// A prefilter describes the behavior of fast literal scanners for quickly +/// skipping past bytes in the haystack that we know cannot possibly +/// participate in a match. +trait PrefilterI: + Send + Sync + RefUnwindSafe + UnwindSafe + Debug + 'static +{ + /// Returns the next possible match candidate. This may yield false + /// positives, so callers must confirm a match starting at the position + /// returned. This, however, must never produce false negatives. That is, + /// this must, at minimum, return the starting position of the next match + /// in the given haystack after or at the given position. + fn find_in(&self, haystack: &[u8], span: Span) -> Candidate; +} + +impl PrefilterI for Arc

{ + #[inline(always)] + fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { + (**self).find_in(haystack, span) + } +} + +/// A builder for constructing the best possible prefilter. When constructed, +/// this builder will heuristically select the best prefilter it can build, +/// if any, and discard the rest. +#[derive(Debug)] +pub(crate) struct Builder { + count: usize, + ascii_case_insensitive: bool, + start_bytes: StartBytesBuilder, + rare_bytes: RareBytesBuilder, + memmem: MemmemBuilder, + packed: Option, + // If we run across a condition that suggests we shouldn't use a prefilter + // at all (like an empty pattern), then disable prefilters entirely. + enabled: bool, +} + +impl Builder { + /// Create a new builder for constructing the best possible prefilter. + pub(crate) fn new(kind: MatchKind) -> Builder { + let pbuilder = kind + .as_packed() + .map(|kind| packed::Config::new().match_kind(kind).builder()); + Builder { + count: 0, + ascii_case_insensitive: false, + start_bytes: StartBytesBuilder::new(), + rare_bytes: RareBytesBuilder::new(), + memmem: MemmemBuilder::default(), + packed: pbuilder, + enabled: true, + } + } + + /// Enable ASCII case insensitivity. When set, byte strings added to this + /// builder will be interpreted without respect to ASCII case. + pub(crate) fn ascii_case_insensitive(mut self, yes: bool) -> Builder { + self.ascii_case_insensitive = yes; + self.start_bytes = self.start_bytes.ascii_case_insensitive(yes); + self.rare_bytes = self.rare_bytes.ascii_case_insensitive(yes); + self + } + + /// Return a prefilter suitable for quickly finding potential matches. + /// + /// All patterns added to an Aho-Corasick automaton should be added to this + /// builder before attempting to construct the prefilter. + pub(crate) fn build(&self) -> Option { + if !self.enabled { + debug!("prefilter not enabled, skipping"); + return None; + } + // If we only have one pattern, then deferring to memmem is always + // the best choice. This is kind of a weird case, because, well, why + // use Aho-Corasick if you only have one pattern? But maybe you don't + // know exactly how many patterns you'll get up front, and you need to + // support the option of multiple patterns. So instead of relying on + // the caller to branch and use memmem explicitly, we just do it for + // them. + if !self.ascii_case_insensitive { + if let Some(pre) = self.memmem.build() { + debug!("using memmem prefilter"); + return Some(pre); + } + } + let (packed, patlen, minlen) = if self.ascii_case_insensitive { + (None, usize::MAX, 0) + } else { + let patlen = self.packed.as_ref().map_or(usize::MAX, |p| p.len()); + let minlen = self.packed.as_ref().map_or(0, |p| p.minimum_len()); + let packed = + self.packed.as_ref().and_then(|b| b.build()).map(|s| { + let memory_usage = s.memory_usage(); + debug!( + "built packed prefilter (len: {}, \ + minimum pattern len: {}, memory usage: {}) \ + for consideration", + patlen, minlen, memory_usage, + ); + Prefilter { finder: Arc::new(Packed(s)), memory_usage } + }); + (packed, patlen, minlen) + }; + match (self.start_bytes.build(), self.rare_bytes.build()) { + // If we could build both start and rare prefilters, then there are + // a few cases in which we'd want to use the start-byte prefilter + // over the rare-byte prefilter, since the former has lower + // overhead. + (prestart @ Some(_), prerare @ Some(_)) => { + debug!( + "both start (len={}, rank={}) and \ + rare (len={}, rank={}) byte prefilters \ + are available", + self.start_bytes.count, + self.start_bytes.rank_sum, + self.rare_bytes.count, + self.rare_bytes.rank_sum, + ); + if patlen <= 16 + && minlen >= 2 + && self.start_bytes.count >= 3 + && self.rare_bytes.count >= 3 + { + debug!( + "start and rare byte prefilters available, but \ + they're probably slower than packed so using \ + packed" + ); + return packed; + } + // If the start-byte prefilter can scan for a smaller number + // of bytes than the rare-byte prefilter, then it's probably + // faster. + let has_fewer_bytes = + self.start_bytes.count < self.rare_bytes.count; + // Otherwise, if the combined frequency rank of the detected + // bytes in the start-byte prefilter is "close" to the combined + // frequency rank of the rare-byte prefilter, then we pick + // the start-byte prefilter even if the rare-byte prefilter + // heuristically searches for rare bytes. This is because the + // rare-byte prefilter has higher constant costs, so we tend to + // prefer the start-byte prefilter when we can. + let has_rarer_bytes = + self.start_bytes.rank_sum <= self.rare_bytes.rank_sum + 50; + if has_fewer_bytes { + debug!( + "using start byte prefilter because it has fewer + bytes to search for than the rare byte prefilter", + ); + prestart + } else if has_rarer_bytes { + debug!( + "using start byte prefilter because its byte \ + frequency rank was determined to be \ + \"good enough\" relative to the rare byte prefilter \ + byte frequency rank", + ); + prestart + } else { + debug!("using rare byte prefilter"); + prerare + } + } + (prestart @ Some(_), None) => { + if patlen <= 16 && minlen >= 2 && self.start_bytes.count >= 3 { + debug!( + "start byte prefilter available, but \ + it's probably slower than packed so using \ + packed" + ); + return packed; + } + debug!( + "have start byte prefilter but not rare byte prefilter, \ + so using start byte prefilter", + ); + prestart + } + (None, prerare @ Some(_)) => { + if patlen <= 16 && minlen >= 2 && self.rare_bytes.count >= 3 { + debug!( + "rare byte prefilter available, but \ + it's probably slower than packed so using \ + packed" + ); + return packed; + } + debug!( + "have rare byte prefilter but not start byte prefilter, \ + so using rare byte prefilter", + ); + prerare + } + (None, None) if self.ascii_case_insensitive => { + debug!( + "no start or rare byte prefilter and ASCII case \ + insensitivity was enabled, so skipping prefilter", + ); + None + } + (None, None) => { + if packed.is_some() { + debug!("falling back to packed prefilter"); + } else { + debug!("no prefilter available"); + } + packed + } + } + } + + /// Add a literal string to this prefilter builder. + pub(crate) fn add(&mut self, bytes: &[u8]) { + if bytes.is_empty() { + self.enabled = false; + } + if !self.enabled { + return; + } + self.count += 1; + self.start_bytes.add(bytes); + self.rare_bytes.add(bytes); + self.memmem.add(bytes); + if let Some(ref mut pbuilder) = self.packed { + pbuilder.add(bytes); + } + } +} + +/// A type that wraps a packed searcher and implements the `Prefilter` +/// interface. +#[derive(Clone, Debug)] +struct Packed(packed::Searcher); + +impl PrefilterI for Packed { + fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { + self.0 + .find_in(haystack, span) + .map_or(Candidate::None, Candidate::Match) + } +} + +/// A builder for constructing a prefilter that uses memmem. +#[derive(Debug, Default)] +struct MemmemBuilder { + /// The number of patterns that have been added. + count: usize, + /// The singular pattern to search for. This is only set when count==1. + one: Option>, +} + +impl MemmemBuilder { + fn build(&self) -> Option { + #[cfg(all(feature = "std", feature = "perf-literal"))] + fn imp(builder: &MemmemBuilder) -> Option { + let pattern = builder.one.as_ref()?; + assert_eq!(1, builder.count); + let finder = Arc::new(Memmem( + memchr::memmem::Finder::new(pattern).into_owned(), + )); + let memory_usage = pattern.len(); + Some(Prefilter { finder, memory_usage }) + } + + #[cfg(not(all(feature = "std", feature = "perf-literal")))] + fn imp(_: &MemmemBuilder) -> Option { + None + } + + imp(self) + } + + fn add(&mut self, bytes: &[u8]) { + self.count += 1; + if self.count == 1 { + self.one = Some(bytes.to_vec()); + } else { + self.one = None; + } + } +} + +/// A type that wraps a SIMD accelerated single substring search from the +/// `memchr` crate for use as a prefilter. +/// +/// Currently, this prefilter is only active for Aho-Corasick searchers with +/// a single pattern. In theory, this could be extended to support searchers +/// that have a common prefix of more than one byte (for one byte, we would use +/// memchr), but it's not clear if it's worth it or not. +/// +/// Also, unfortunately, this currently also requires the 'std' feature to +/// be enabled. That's because memchr doesn't have a no-std-but-with-alloc +/// mode, and so APIs like Finder::into_owned aren't available when 'std' is +/// disabled. But there should be an 'alloc' feature that brings in APIs like +/// Finder::into_owned but doesn't use std-only features like runtime CPU +/// feature detection. +#[cfg(all(feature = "std", feature = "perf-literal"))] +#[derive(Clone, Debug)] +struct Memmem(memchr::memmem::Finder<'static>); + +#[cfg(all(feature = "std", feature = "perf-literal"))] +impl PrefilterI for Memmem { + fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { + use crate::util::primitives::PatternID; + + self.0.find(&haystack[span]).map_or(Candidate::None, |i| { + let start = span.start + i; + let end = start + self.0.needle().len(); + // N.B. We can declare a match and use a fixed pattern ID here + // because a Memmem prefilter is only ever created for searchers + // with exactly one pattern. Thus, every match is always a match + // and it is always for the first and only pattern. + Candidate::Match(Match::new(PatternID::ZERO, start..end)) + }) + } +} + +/// A builder for constructing a rare byte prefilter. +/// +/// A rare byte prefilter attempts to pick out a small set of rare bytes that +/// occurr in the patterns, and then quickly scan to matches of those rare +/// bytes. +#[derive(Clone, Debug)] +struct RareBytesBuilder { + /// Whether this prefilter should account for ASCII case insensitivity or + /// not. + ascii_case_insensitive: bool, + /// A set of rare bytes, indexed by byte value. + rare_set: ByteSet, + /// A set of byte offsets associated with bytes in a pattern. An entry + /// corresponds to a particular bytes (its index) and is only non-zero if + /// the byte occurred at an offset greater than 0 in at least one pattern. + /// + /// If a byte's offset is not representable in 8 bits, then the rare bytes + /// prefilter becomes inert. + byte_offsets: RareByteOffsets, + /// Whether this is available as a prefilter or not. This can be set to + /// false during construction if a condition is seen that invalidates the + /// use of the rare-byte prefilter. + available: bool, + /// The number of bytes set to an active value in `byte_offsets`. + count: usize, + /// The sum of frequency ranks for the rare bytes detected. This is + /// intended to give a heuristic notion of how rare the bytes are. + rank_sum: u16, +} + +/// A set of byte offsets, keyed by byte. +#[derive(Clone, Copy)] +struct RareByteOffsets { + /// Each entry corresponds to the maximum offset of the corresponding + /// byte across all patterns seen. + set: [RareByteOffset; 256], +} + +impl RareByteOffsets { + /// Create a new empty set of rare byte offsets. + pub(crate) fn empty() -> RareByteOffsets { + RareByteOffsets { set: [RareByteOffset::default(); 256] } + } + + /// Add the given offset for the given byte to this set. If the offset is + /// greater than the existing offset, then it overwrites the previous + /// value and returns false. If there is no previous value set, then this + /// sets it and returns true. + pub(crate) fn set(&mut self, byte: u8, off: RareByteOffset) { + self.set[byte as usize].max = + cmp::max(self.set[byte as usize].max, off.max); + } +} + +impl core::fmt::Debug for RareByteOffsets { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut offsets = vec![]; + for off in self.set.iter() { + if off.max > 0 { + offsets.push(off); + } + } + f.debug_struct("RareByteOffsets").field("set", &offsets).finish() + } +} + +/// Offsets associated with an occurrence of a "rare" byte in any of the +/// patterns used to construct a single Aho-Corasick automaton. +#[derive(Clone, Copy, Debug)] +struct RareByteOffset { + /// The maximum offset at which a particular byte occurs from the start + /// of any pattern. This is used as a shift amount. That is, when an + /// occurrence of this byte is found, the candidate position reported by + /// the prefilter is `position_of_byte - max`, such that the automaton + /// will begin its search at a position that is guaranteed to observe a + /// match. + /// + /// To avoid accidentally quadratic behavior, a prefilter is considered + /// ineffective when it is asked to start scanning from a position that it + /// has already scanned past. + /// + /// Using a `u8` here means that if we ever see a pattern that's longer + /// than 255 bytes, then the entire rare byte prefilter is disabled. + max: u8, +} + +impl Default for RareByteOffset { + fn default() -> RareByteOffset { + RareByteOffset { max: 0 } + } +} + +impl RareByteOffset { + /// Create a new rare byte offset. If the given offset is too big, then + /// None is returned. In that case, callers should render the rare bytes + /// prefilter inert. + fn new(max: usize) -> Option { + if max > u8::MAX as usize { + None + } else { + Some(RareByteOffset { max: max as u8 }) + } + } +} + +impl RareBytesBuilder { + /// Create a new builder for constructing a rare byte prefilter. + fn new() -> RareBytesBuilder { + RareBytesBuilder { + ascii_case_insensitive: false, + rare_set: ByteSet::empty(), + byte_offsets: RareByteOffsets::empty(), + available: true, + count: 0, + rank_sum: 0, + } + } + + /// Enable ASCII case insensitivity. When set, byte strings added to this + /// builder will be interpreted without respect to ASCII case. + fn ascii_case_insensitive(mut self, yes: bool) -> RareBytesBuilder { + self.ascii_case_insensitive = yes; + self + } + + /// Build the rare bytes prefilter. + /// + /// If there are more than 3 distinct rare bytes found, or if heuristics + /// otherwise determine that this prefilter should not be used, then `None` + /// is returned. + fn build(&self) -> Option { + #[cfg(feature = "perf-literal")] + fn imp(builder: &RareBytesBuilder) -> Option { + if !builder.available || builder.count > 3 { + return None; + } + let (mut bytes, mut len) = ([0; 3], 0); + for b in 0..=255 { + if builder.rare_set.contains(b) { + bytes[len] = b; + len += 1; + } + } + let finder: Arc = match len { + 0 => return None, + 1 => Arc::new(RareBytesOne { + byte1: bytes[0], + offset: builder.byte_offsets.set[bytes[0] as usize], + }), + 2 => Arc::new(RareBytesTwo { + offsets: builder.byte_offsets, + byte1: bytes[0], + byte2: bytes[1], + }), + 3 => Arc::new(RareBytesThree { + offsets: builder.byte_offsets, + byte1: bytes[0], + byte2: bytes[1], + byte3: bytes[2], + }), + _ => unreachable!(), + }; + Some(Prefilter { finder, memory_usage: 0 }) + } + + #[cfg(not(feature = "perf-literal"))] + fn imp(_: &RareBytesBuilder) -> Option { + None + } + + imp(self) + } + + /// Add a byte string to this builder. + /// + /// All patterns added to an Aho-Corasick automaton should be added to this + /// builder before attempting to construct the prefilter. + fn add(&mut self, bytes: &[u8]) { + // If we've already given up, then do nothing. + if !self.available { + return; + } + // If we've already blown our budget, then don't waste time looking + // for more rare bytes. + if self.count > 3 { + self.available = false; + return; + } + // If the pattern is too long, then our offset table is bunk, so + // give up. + if bytes.len() >= 256 { + self.available = false; + return; + } + let mut rarest = match bytes.first() { + None => return, + Some(&b) => (b, freq_rank(b)), + }; + // The idea here is to look for the rarest byte in each pattern, and + // add that to our set. As a special exception, if we see a byte that + // we've already added, then we immediately stop and choose that byte, + // even if there's another rare byte in the pattern. This helps us + // apply the rare byte optimization in more cases by attempting to pick + // bytes that are in common between patterns. So for example, if we + // were searching for `Sherlock` and `lockjaw`, then this would pick + // `k` for both patterns, resulting in the use of `memchr` instead of + // `memchr2` for `k` and `j`. + let mut found = false; + for (pos, &b) in bytes.iter().enumerate() { + self.set_offset(pos, b); + if found { + continue; + } + if self.rare_set.contains(b) { + found = true; + continue; + } + let rank = freq_rank(b); + if rank < rarest.1 { + rarest = (b, rank); + } + } + if !found { + self.add_rare_byte(rarest.0); + } + } + + fn set_offset(&mut self, pos: usize, byte: u8) { + // This unwrap is OK because pos is never bigger than our max. + let offset = RareByteOffset::new(pos).unwrap(); + self.byte_offsets.set(byte, offset); + if self.ascii_case_insensitive { + self.byte_offsets.set(opposite_ascii_case(byte), offset); + } + } + + fn add_rare_byte(&mut self, byte: u8) { + self.add_one_rare_byte(byte); + if self.ascii_case_insensitive { + self.add_one_rare_byte(opposite_ascii_case(byte)); + } + } + + fn add_one_rare_byte(&mut self, byte: u8) { + if !self.rare_set.contains(byte) { + self.rare_set.add(byte); + self.count += 1; + self.rank_sum += freq_rank(byte) as u16; + } + } +} + +/// A prefilter for scanning for a single "rare" byte. +#[cfg(feature = "perf-literal")] +#[derive(Clone, Debug)] +struct RareBytesOne { + byte1: u8, + offset: RareByteOffset, +} + +#[cfg(feature = "perf-literal")] +impl PrefilterI for RareBytesOne { + fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { + memchr::memchr(self.byte1, &haystack[span]) + .map(|i| { + let pos = span.start + i; + cmp::max( + span.start, + pos.saturating_sub(usize::from(self.offset.max)), + ) + }) + .map_or(Candidate::None, Candidate::PossibleStartOfMatch) + } +} + +/// A prefilter for scanning for two "rare" bytes. +#[cfg(feature = "perf-literal")] +#[derive(Clone, Debug)] +struct RareBytesTwo { + offsets: RareByteOffsets, + byte1: u8, + byte2: u8, +} + +#[cfg(feature = "perf-literal")] +impl PrefilterI for RareBytesTwo { + fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { + memchr::memchr2(self.byte1, self.byte2, &haystack[span]) + .map(|i| { + let pos = span.start + i; + let offset = self.offsets.set[usize::from(haystack[pos])].max; + cmp::max(span.start, pos.saturating_sub(usize::from(offset))) + }) + .map_or(Candidate::None, Candidate::PossibleStartOfMatch) + } +} + +/// A prefilter for scanning for three "rare" bytes. +#[cfg(feature = "perf-literal")] +#[derive(Clone, Debug)] +struct RareBytesThree { + offsets: RareByteOffsets, + byte1: u8, + byte2: u8, + byte3: u8, +} + +#[cfg(feature = "perf-literal")] +impl PrefilterI for RareBytesThree { + fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { + memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span]) + .map(|i| { + let pos = span.start + i; + let offset = self.offsets.set[usize::from(haystack[pos])].max; + cmp::max(span.start, pos.saturating_sub(usize::from(offset))) + }) + .map_or(Candidate::None, Candidate::PossibleStartOfMatch) + } +} + +/// A builder for constructing a starting byte prefilter. +/// +/// A starting byte prefilter is a simplistic prefilter that looks for possible +/// matches by reporting all positions corresponding to a particular byte. This +/// generally only takes affect when there are at most 3 distinct possible +/// starting bytes. e.g., the patterns `foo`, `bar`, and `baz` have two +/// distinct starting bytes (`f` and `b`), and this prefilter returns all +/// occurrences of either `f` or `b`. +/// +/// In some cases, a heuristic frequency analysis may determine that it would +/// be better not to use this prefilter even when there are 3 or fewer distinct +/// starting bytes. +#[derive(Clone, Debug)] +struct StartBytesBuilder { + /// Whether this prefilter should account for ASCII case insensitivity or + /// not. + ascii_case_insensitive: bool, + /// The set of starting bytes observed. + byteset: Vec, + /// The number of bytes set to true in `byteset`. + count: usize, + /// The sum of frequency ranks for the rare bytes detected. This is + /// intended to give a heuristic notion of how rare the bytes are. + rank_sum: u16, +} + +impl StartBytesBuilder { + /// Create a new builder for constructing a start byte prefilter. + fn new() -> StartBytesBuilder { + StartBytesBuilder { + ascii_case_insensitive: false, + byteset: vec![false; 256], + count: 0, + rank_sum: 0, + } + } + + /// Enable ASCII case insensitivity. When set, byte strings added to this + /// builder will be interpreted without respect to ASCII case. + fn ascii_case_insensitive(mut self, yes: bool) -> StartBytesBuilder { + self.ascii_case_insensitive = yes; + self + } + + /// Build the starting bytes prefilter. + /// + /// If there are more than 3 distinct starting bytes, or if heuristics + /// otherwise determine that this prefilter should not be used, then `None` + /// is returned. + fn build(&self) -> Option { + #[cfg(feature = "perf-literal")] + fn imp(builder: &StartBytesBuilder) -> Option { + if builder.count > 3 { + return None; + } + let (mut bytes, mut len) = ([0; 3], 0); + for b in 0..256 { + if !builder.byteset[b] { + continue; + } + // We don't handle non-ASCII bytes for now. Getting non-ASCII + // bytes right is trickier, since we generally don't want to put + // a leading UTF-8 code unit into a prefilter that isn't ASCII, + // since they can frequently. Instead, it would be better to use a + // continuation byte, but this requires more sophisticated analysis + // of the automaton and a richer prefilter API. + if b > 0x7F { + return None; + } + bytes[len] = b as u8; + len += 1; + } + let finder: Arc = match len { + 0 => return None, + 1 => Arc::new(StartBytesOne { byte1: bytes[0] }), + 2 => Arc::new(StartBytesTwo { + byte1: bytes[0], + byte2: bytes[1], + }), + 3 => Arc::new(StartBytesThree { + byte1: bytes[0], + byte2: bytes[1], + byte3: bytes[2], + }), + _ => unreachable!(), + }; + Some(Prefilter { finder, memory_usage: 0 }) + } + + #[cfg(not(feature = "perf-literal"))] + fn imp(_: &StartBytesBuilder) -> Option { + None + } + + imp(self) + } + + /// Add a byte string to this builder. + /// + /// All patterns added to an Aho-Corasick automaton should be added to this + /// builder before attempting to construct the prefilter. + fn add(&mut self, bytes: &[u8]) { + if self.count > 3 { + return; + } + if let Some(&byte) = bytes.first() { + self.add_one_byte(byte); + if self.ascii_case_insensitive { + self.add_one_byte(opposite_ascii_case(byte)); + } + } + } + + fn add_one_byte(&mut self, byte: u8) { + if !self.byteset[byte as usize] { + self.byteset[byte as usize] = true; + self.count += 1; + self.rank_sum += freq_rank(byte) as u16; + } + } +} + +/// A prefilter for scanning for a single starting byte. +#[cfg(feature = "perf-literal")] +#[derive(Clone, Debug)] +struct StartBytesOne { + byte1: u8, +} + +#[cfg(feature = "perf-literal")] +impl PrefilterI for StartBytesOne { + fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { + memchr::memchr(self.byte1, &haystack[span]) + .map(|i| span.start + i) + .map_or(Candidate::None, Candidate::PossibleStartOfMatch) + } +} + +/// A prefilter for scanning for two starting bytes. +#[cfg(feature = "perf-literal")] +#[derive(Clone, Debug)] +struct StartBytesTwo { + byte1: u8, + byte2: u8, +} + +#[cfg(feature = "perf-literal")] +impl PrefilterI for StartBytesTwo { + fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { + memchr::memchr2(self.byte1, self.byte2, &haystack[span]) + .map(|i| span.start + i) + .map_or(Candidate::None, Candidate::PossibleStartOfMatch) + } +} + +/// A prefilter for scanning for three starting bytes. +#[cfg(feature = "perf-literal")] +#[derive(Clone, Debug)] +struct StartBytesThree { + byte1: u8, + byte2: u8, + byte3: u8, +} + +#[cfg(feature = "perf-literal")] +impl PrefilterI for StartBytesThree { + fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { + memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span]) + .map(|i| span.start + i) + .map_or(Candidate::None, Candidate::PossibleStartOfMatch) + } +} + +/// If the given byte is an ASCII letter, then return it in the opposite case. +/// e.g., Given `b'A'`, this returns `b'a'`, and given `b'a'`, this returns +/// `b'A'`. If a non-ASCII letter is given, then the given byte is returned. +pub(crate) fn opposite_ascii_case(b: u8) -> u8 { + if b'A' <= b && b <= b'Z' { + b.to_ascii_lowercase() + } else if b'a' <= b && b <= b'z' { + b.to_ascii_uppercase() + } else { + b + } +} + +/// Return the frequency rank of the given byte. The higher the rank, the more +/// common the byte (heuristically speaking). +fn freq_rank(b: u8) -> u8 { + use crate::util::byte_frequencies::BYTE_FREQUENCIES; + BYTE_FREQUENCIES[b as usize] +} diff --git a/vendor/aho-corasick/src/util/primitives.rs b/vendor/aho-corasick/src/util/primitives.rs new file mode 100644 index 00000000000000..784d3971713d10 --- /dev/null +++ b/vendor/aho-corasick/src/util/primitives.rs @@ -0,0 +1,759 @@ +/*! +Lower level primitive types that are useful in a variety of circumstances. + +# Overview + +This list represents the principle types in this module and briefly describes +when you might want to use them. + +* [`PatternID`] - A type that represents the identifier of a regex pattern. +This is probably the most widely used type in this module (which is why it's +also re-exported in the crate root). +* [`StateID`] - A type the represents the identifier of a finite automaton +state. This is used for both NFAs and DFAs, with the notable exception of +the hybrid NFA/DFA. (The hybrid NFA/DFA uses a special purpose "lazy" state +identifier.) +* [`SmallIndex`] - The internal representation of both a `PatternID` and a +`StateID`. Its purpose is to serve as a type that can index memory without +being as big as a `usize` on 64-bit targets. The main idea behind this type +is that there are many things in regex engines that will, in practice, never +overflow a 32-bit integer. (For example, like the number of patterns in a regex +or the number of states in an NFA.) Thus, a `SmallIndex` can be used to index +memory without peppering `as` casts everywhere. Moreover, it forces callers +to handle errors in the case where, somehow, the value would otherwise overflow +either a 32-bit integer or a `usize` (e.g., on 16-bit targets). +*/ + +// The macro we use to define some types below adds methods that we don't +// use on some of the types. There isn't much, so we just squash the warning. +#![allow(dead_code)] + +use alloc::vec::Vec; + +use crate::util::int::{Usize, U16, U32, U64}; + +/// A type that represents a "small" index. +/// +/// The main idea of this type is to provide something that can index memory, +/// but uses less memory than `usize` on 64-bit systems. Specifically, its +/// representation is always a `u32` and has `repr(transparent)` enabled. (So +/// it is safe to transmute between a `u32` and a `SmallIndex`.) +/// +/// A small index is typically useful in cases where there is no practical way +/// that the index will overflow a 32-bit integer. A good example of this is +/// an NFA state. If you could somehow build an NFA with `2^30` states, its +/// memory usage would be exorbitant and its runtime execution would be so +/// slow as to be completely worthless. Therefore, this crate generally deems +/// it acceptable to return an error if it would otherwise build an NFA that +/// requires a slice longer than what a 32-bit integer can index. In exchange, +/// we can use 32-bit indices instead of 64-bit indices in various places. +/// +/// This type ensures this by providing a constructor that will return an error +/// if its argument cannot fit into the type. This makes it much easier to +/// handle these sorts of boundary cases that are otherwise extremely subtle. +/// +/// On all targets, this type guarantees that its value will fit in a `u32`, +/// `i32`, `usize` and an `isize`. This means that on 16-bit targets, for +/// example, this type's maximum value will never overflow an `isize`, +/// which means it will never overflow a `i16` even though its internal +/// representation is still a `u32`. +/// +/// The purpose for making the type fit into even signed integer types like +/// `isize` is to guarantee that the difference between any two small indices +/// is itself also a small index. This is useful in certain contexts, e.g., +/// for delta encoding. +/// +/// # Other types +/// +/// The following types wrap `SmallIndex` to provide a more focused use case: +/// +/// * [`PatternID`] is for representing the identifiers of patterns. +/// * [`StateID`] is for representing the identifiers of states in finite +/// automata. It is used for both NFAs and DFAs. +/// +/// # Representation +/// +/// This type is always represented internally by a `u32` and is marked as +/// `repr(transparent)`. Thus, this type always has the same representation as +/// a `u32`. It is thus safe to transmute between a `u32` and a `SmallIndex`. +/// +/// # Indexing +/// +/// For convenience, callers may use a `SmallIndex` to index slices. +/// +/// # Safety +/// +/// While a `SmallIndex` is meant to guarantee that its value fits into `usize` +/// without using as much space as a `usize` on all targets, callers must +/// not rely on this property for safety. Callers may choose to rely on this +/// property for correctness however. For example, creating a `SmallIndex` with +/// an invalid value can be done in entirely safe code. This may in turn result +/// in panics or silent logical errors. +#[derive( + Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord, +)] +#[repr(transparent)] +pub(crate) struct SmallIndex(u32); + +impl SmallIndex { + /// The maximum index value. + #[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] + pub const MAX: SmallIndex = + // FIXME: Use as_usize() once const functions in traits are stable. + SmallIndex::new_unchecked(core::i32::MAX as usize - 1); + + /// The maximum index value. + #[cfg(target_pointer_width = "16")] + pub const MAX: SmallIndex = + SmallIndex::new_unchecked(core::isize::MAX - 1); + + /// The total number of values that can be represented as a small index. + pub const LIMIT: usize = SmallIndex::MAX.as_usize() + 1; + + /// The zero index value. + pub const ZERO: SmallIndex = SmallIndex::new_unchecked(0); + + /// The number of bytes that a single small index uses in memory. + pub const SIZE: usize = core::mem::size_of::(); + + /// Create a new small index. + /// + /// If the given index exceeds [`SmallIndex::MAX`], then this returns + /// an error. + #[inline] + pub fn new(index: usize) -> Result { + SmallIndex::try_from(index) + } + + /// Create a new small index without checking whether the given value + /// exceeds [`SmallIndex::MAX`]. + /// + /// Using this routine with an invalid index value will result in + /// unspecified behavior, but *not* undefined behavior. In particular, an + /// invalid index value is likely to cause panics or possibly even silent + /// logical errors. + /// + /// Callers must never rely on a `SmallIndex` to be within a certain range + /// for memory safety. + #[inline] + pub const fn new_unchecked(index: usize) -> SmallIndex { + // FIXME: Use as_u32() once const functions in traits are stable. + SmallIndex::from_u32_unchecked(index as u32) + } + + /// Create a new small index from a `u32` without checking whether the + /// given value exceeds [`SmallIndex::MAX`]. + /// + /// Using this routine with an invalid index value will result in + /// unspecified behavior, but *not* undefined behavior. In particular, an + /// invalid index value is likely to cause panics or possibly even silent + /// logical errors. + /// + /// Callers must never rely on a `SmallIndex` to be within a certain range + /// for memory safety. + #[inline] + pub const fn from_u32_unchecked(index: u32) -> SmallIndex { + SmallIndex(index) + } + + /// Like [`SmallIndex::new`], but panics if the given index is not valid. + #[inline] + pub fn must(index: usize) -> SmallIndex { + SmallIndex::new(index).expect("invalid small index") + } + + /// Return this small index as a `usize`. This is guaranteed to never + /// overflow `usize`. + #[inline] + pub const fn as_usize(&self) -> usize { + // FIXME: Use as_usize() once const functions in traits are stable. + self.0 as usize + } + + /// Return this small index as a `u64`. This is guaranteed to never + /// overflow. + #[inline] + pub const fn as_u64(&self) -> u64 { + // FIXME: Use u64::from() once const functions in traits are stable. + self.0 as u64 + } + + /// Return the internal `u32` of this small index. This is guaranteed to + /// never overflow `u32`. + #[inline] + pub const fn as_u32(&self) -> u32 { + self.0 + } + + /// Return the internal `u32` of this small index represented as an `i32`. + /// This is guaranteed to never overflow an `i32`. + #[inline] + pub const fn as_i32(&self) -> i32 { + // This is OK because we guarantee that our max value is <= i32::MAX. + self.0 as i32 + } + + /// Returns one more than this small index as a usize. + /// + /// Since a small index has constraints on its maximum value, adding `1` to + /// it will always fit in a `usize`, `isize`, `u32` and a `i32`. + #[inline] + pub fn one_more(&self) -> usize { + self.as_usize() + 1 + } + + /// Decode this small index from the bytes given using the native endian + /// byte order for the current target. + /// + /// If the decoded integer is not representable as a small index for the + /// current target, then this returns an error. + #[inline] + pub fn from_ne_bytes( + bytes: [u8; 4], + ) -> Result { + let id = u32::from_ne_bytes(bytes); + if id > SmallIndex::MAX.as_u32() { + return Err(SmallIndexError { attempted: u64::from(id) }); + } + Ok(SmallIndex::new_unchecked(id.as_usize())) + } + + /// Decode this small index from the bytes given using the native endian + /// byte order for the current target. + /// + /// This is analogous to [`SmallIndex::new_unchecked`] in that is does not + /// check whether the decoded integer is representable as a small index. + #[inline] + pub fn from_ne_bytes_unchecked(bytes: [u8; 4]) -> SmallIndex { + SmallIndex::new_unchecked(u32::from_ne_bytes(bytes).as_usize()) + } + + /// Return the underlying small index integer as raw bytes in native endian + /// format. + #[inline] + pub fn to_ne_bytes(&self) -> [u8; 4] { + self.0.to_ne_bytes() + } +} + +impl core::ops::Index for [T] { + type Output = T; + + #[inline] + fn index(&self, index: SmallIndex) -> &T { + &self[index.as_usize()] + } +} + +impl core::ops::IndexMut for [T] { + #[inline] + fn index_mut(&mut self, index: SmallIndex) -> &mut T { + &mut self[index.as_usize()] + } +} + +impl core::ops::Index for Vec { + type Output = T; + + #[inline] + fn index(&self, index: SmallIndex) -> &T { + &self[index.as_usize()] + } +} + +impl core::ops::IndexMut for Vec { + #[inline] + fn index_mut(&mut self, index: SmallIndex) -> &mut T { + &mut self[index.as_usize()] + } +} + +impl From for SmallIndex { + fn from(sid: StateID) -> SmallIndex { + sid.0 + } +} + +impl From for SmallIndex { + fn from(pid: PatternID) -> SmallIndex { + pid.0 + } +} + +impl From for SmallIndex { + fn from(index: u8) -> SmallIndex { + SmallIndex::new_unchecked(usize::from(index)) + } +} + +impl TryFrom for SmallIndex { + type Error = SmallIndexError; + + fn try_from(index: u16) -> Result { + if u32::from(index) > SmallIndex::MAX.as_u32() { + return Err(SmallIndexError { attempted: u64::from(index) }); + } + Ok(SmallIndex::new_unchecked(index.as_usize())) + } +} + +impl TryFrom for SmallIndex { + type Error = SmallIndexError; + + fn try_from(index: u32) -> Result { + if index > SmallIndex::MAX.as_u32() { + return Err(SmallIndexError { attempted: u64::from(index) }); + } + Ok(SmallIndex::new_unchecked(index.as_usize())) + } +} + +impl TryFrom for SmallIndex { + type Error = SmallIndexError; + + fn try_from(index: u64) -> Result { + if index > SmallIndex::MAX.as_u64() { + return Err(SmallIndexError { attempted: index }); + } + Ok(SmallIndex::new_unchecked(index.as_usize())) + } +} + +impl TryFrom for SmallIndex { + type Error = SmallIndexError; + + fn try_from(index: usize) -> Result { + if index > SmallIndex::MAX.as_usize() { + return Err(SmallIndexError { attempted: index.as_u64() }); + } + Ok(SmallIndex::new_unchecked(index)) + } +} + +/// This error occurs when a small index could not be constructed. +/// +/// This occurs when given an integer exceeding the maximum small index value. +/// +/// When the `std` feature is enabled, this implements the `Error` trait. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct SmallIndexError { + attempted: u64, +} + +impl SmallIndexError { + /// Returns the value that could not be converted to a small index. + pub fn attempted(&self) -> u64 { + self.attempted + } +} + +#[cfg(feature = "std")] +impl std::error::Error for SmallIndexError {} + +impl core::fmt::Display for SmallIndexError { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!( + f, + "failed to create small index from {:?}, which exceeds {:?}", + self.attempted(), + SmallIndex::MAX, + ) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct SmallIndexIter { + rng: core::ops::Range, +} + +impl Iterator for SmallIndexIter { + type Item = SmallIndex; + + fn next(&mut self) -> Option { + if self.rng.start >= self.rng.end { + return None; + } + let next_id = self.rng.start + 1; + let id = core::mem::replace(&mut self.rng.start, next_id); + // new_unchecked is OK since we asserted that the number of + // elements in this iterator will fit in an ID at construction. + Some(SmallIndex::new_unchecked(id)) + } +} + +macro_rules! index_type_impls { + ($name:ident, $err:ident, $iter:ident, $withiter:ident) => { + impl $name { + /// The maximum value. + pub const MAX: $name = $name(SmallIndex::MAX); + + /// The total number of values that can be represented. + pub const LIMIT: usize = SmallIndex::LIMIT; + + /// The zero value. + pub const ZERO: $name = $name(SmallIndex::ZERO); + + /// The number of bytes that a single value uses in memory. + pub const SIZE: usize = SmallIndex::SIZE; + + /// Create a new value that is represented by a "small index." + /// + /// If the given index exceeds the maximum allowed value, then this + /// returns an error. + #[inline] + pub fn new(value: usize) -> Result<$name, $err> { + SmallIndex::new(value).map($name).map_err($err) + } + + /// Create a new value without checking whether the given argument + /// exceeds the maximum. + /// + /// Using this routine with an invalid value will result in + /// unspecified behavior, but *not* undefined behavior. In + /// particular, an invalid ID value is likely to cause panics or + /// possibly even silent logical errors. + /// + /// Callers must never rely on this type to be within a certain + /// range for memory safety. + #[inline] + pub const fn new_unchecked(value: usize) -> $name { + $name(SmallIndex::new_unchecked(value)) + } + + /// Create a new value from a `u32` without checking whether the + /// given value exceeds the maximum. + /// + /// Using this routine with an invalid value will result in + /// unspecified behavior, but *not* undefined behavior. In + /// particular, an invalid ID value is likely to cause panics or + /// possibly even silent logical errors. + /// + /// Callers must never rely on this type to be within a certain + /// range for memory safety. + #[inline] + pub const fn from_u32_unchecked(index: u32) -> $name { + $name(SmallIndex::from_u32_unchecked(index)) + } + + /// Like `new`, but panics if the given value is not valid. + #[inline] + pub fn must(value: usize) -> $name { + $name::new(value).expect(concat!( + "invalid ", + stringify!($name), + " value" + )) + } + + /// Return the internal value as a `usize`. This is guaranteed to + /// never overflow `usize`. + #[inline] + pub const fn as_usize(&self) -> usize { + self.0.as_usize() + } + + /// Return the internal value as a `u64`. This is guaranteed to + /// never overflow. + #[inline] + pub const fn as_u64(&self) -> u64 { + self.0.as_u64() + } + + /// Return the internal value as a `u32`. This is guaranteed to + /// never overflow `u32`. + #[inline] + pub const fn as_u32(&self) -> u32 { + self.0.as_u32() + } + + /// Return the internal value as a `i32`. This is guaranteed to + /// never overflow an `i32`. + #[inline] + pub const fn as_i32(&self) -> i32 { + self.0.as_i32() + } + + /// Returns one more than this value as a usize. + /// + /// Since values represented by a "small index" have constraints + /// on their maximum value, adding `1` to it will always fit in a + /// `usize`, `u32` and a `i32`. + #[inline] + pub fn one_more(&self) -> usize { + self.0.one_more() + } + + /// Decode this value from the bytes given using the native endian + /// byte order for the current target. + /// + /// If the decoded integer is not representable as a small index + /// for the current target, then this returns an error. + #[inline] + pub fn from_ne_bytes(bytes: [u8; 4]) -> Result<$name, $err> { + SmallIndex::from_ne_bytes(bytes).map($name).map_err($err) + } + + /// Decode this value from the bytes given using the native endian + /// byte order for the current target. + /// + /// This is analogous to `new_unchecked` in that is does not check + /// whether the decoded integer is representable as a small index. + #[inline] + pub fn from_ne_bytes_unchecked(bytes: [u8; 4]) -> $name { + $name(SmallIndex::from_ne_bytes_unchecked(bytes)) + } + + /// Return the underlying integer as raw bytes in native endian + /// format. + #[inline] + pub fn to_ne_bytes(&self) -> [u8; 4] { + self.0.to_ne_bytes() + } + + /// Returns an iterator over all values from 0 up to and not + /// including the given length. + /// + /// If the given length exceeds this type's limit, then this + /// panics. + pub(crate) fn iter(len: usize) -> $iter { + $iter::new(len) + } + } + + // We write our own Debug impl so that we get things like PatternID(5) + // instead of PatternID(SmallIndex(5)). + impl core::fmt::Debug for $name { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_tuple(stringify!($name)).field(&self.as_u32()).finish() + } + } + + impl core::ops::Index<$name> for [T] { + type Output = T; + + #[inline] + fn index(&self, index: $name) -> &T { + &self[index.as_usize()] + } + } + + impl core::ops::IndexMut<$name> for [T] { + #[inline] + fn index_mut(&mut self, index: $name) -> &mut T { + &mut self[index.as_usize()] + } + } + + impl core::ops::Index<$name> for Vec { + type Output = T; + + #[inline] + fn index(&self, index: $name) -> &T { + &self[index.as_usize()] + } + } + + impl core::ops::IndexMut<$name> for Vec { + #[inline] + fn index_mut(&mut self, index: $name) -> &mut T { + &mut self[index.as_usize()] + } + } + + impl From for $name { + fn from(index: SmallIndex) -> $name { + $name(index) + } + } + + impl From for $name { + fn from(value: u8) -> $name { + $name(SmallIndex::from(value)) + } + } + + impl TryFrom for $name { + type Error = $err; + + fn try_from(value: u16) -> Result<$name, $err> { + SmallIndex::try_from(value).map($name).map_err($err) + } + } + + impl TryFrom for $name { + type Error = $err; + + fn try_from(value: u32) -> Result<$name, $err> { + SmallIndex::try_from(value).map($name).map_err($err) + } + } + + impl TryFrom for $name { + type Error = $err; + + fn try_from(value: u64) -> Result<$name, $err> { + SmallIndex::try_from(value).map($name).map_err($err) + } + } + + impl TryFrom for $name { + type Error = $err; + + fn try_from(value: usize) -> Result<$name, $err> { + SmallIndex::try_from(value).map($name).map_err($err) + } + } + + /// This error occurs when an ID could not be constructed. + /// + /// This occurs when given an integer exceeding the maximum allowed + /// value. + /// + /// When the `std` feature is enabled, this implements the `Error` + /// trait. + #[derive(Clone, Debug, Eq, PartialEq)] + pub struct $err(SmallIndexError); + + impl $err { + /// Returns the value that could not be converted to an ID. + pub fn attempted(&self) -> u64 { + self.0.attempted() + } + } + + #[cfg(feature = "std")] + impl std::error::Error for $err {} + + impl core::fmt::Display for $err { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!( + f, + "failed to create {} from {:?}, which exceeds {:?}", + stringify!($name), + self.attempted(), + $name::MAX, + ) + } + } + + #[derive(Clone, Debug)] + pub(crate) struct $iter(SmallIndexIter); + + impl $iter { + fn new(len: usize) -> $iter { + assert!( + len <= $name::LIMIT, + "cannot create iterator for {} when number of \ + elements exceed {:?}", + stringify!($name), + $name::LIMIT, + ); + $iter(SmallIndexIter { rng: 0..len }) + } + } + + impl Iterator for $iter { + type Item = $name; + + fn next(&mut self) -> Option<$name> { + self.0.next().map($name) + } + } + + /// An iterator adapter that is like std::iter::Enumerate, but attaches + /// small index values instead. It requires `ExactSizeIterator`. At + /// construction, it ensures that the index of each element in the + /// iterator is representable in the corresponding small index type. + #[derive(Clone, Debug)] + pub(crate) struct $withiter { + it: I, + ids: $iter, + } + + impl $withiter { + fn new(it: I) -> $withiter { + let ids = $name::iter(it.len()); + $withiter { it, ids } + } + } + + impl Iterator for $withiter { + type Item = ($name, I::Item); + + fn next(&mut self) -> Option<($name, I::Item)> { + let item = self.it.next()?; + // Number of elements in this iterator must match, according + // to contract of ExactSizeIterator. + let id = self.ids.next().unwrap(); + Some((id, item)) + } + } + }; +} + +/// The identifier of a pattern in an Aho-Corasick automaton. +/// +/// It is represented by a `u32` even on 64-bit systems in order to conserve +/// space. Namely, on all targets, this type guarantees that its value will +/// fit in a `u32`, `i32`, `usize` and an `isize`. This means that on 16-bit +/// targets, for example, this type's maximum value will never overflow an +/// `isize`, which means it will never overflow a `i16` even though its +/// internal representation is still a `u32`. +/// +/// # Safety +/// +/// While a `PatternID` is meant to guarantee that its value fits into `usize` +/// without using as much space as a `usize` on all targets, callers must +/// not rely on this property for safety. Callers may choose to rely on this +/// property for correctness however. For example, creating a `StateID` with an +/// invalid value can be done in entirely safe code. This may in turn result in +/// panics or silent logical errors. +#[derive(Clone, Copy, Default, Eq, Hash, PartialEq, PartialOrd, Ord)] +#[repr(transparent)] +pub struct PatternID(SmallIndex); + +/// The identifier of a finite automaton state. +/// +/// It is represented by a `u32` even on 64-bit systems in order to conserve +/// space. Namely, on all targets, this type guarantees that its value will +/// fit in a `u32`, `i32`, `usize` and an `isize`. This means that on 16-bit +/// targets, for example, this type's maximum value will never overflow an +/// `isize`, which means it will never overflow a `i16` even though its +/// internal representation is still a `u32`. +/// +/// # Safety +/// +/// While a `StateID` is meant to guarantee that its value fits into `usize` +/// without using as much space as a `usize` on all targets, callers must +/// not rely on this property for safety. Callers may choose to rely on this +/// property for correctness however. For example, creating a `StateID` with an +/// invalid value can be done in entirely safe code. This may in turn result in +/// panics or silent logical errors. +#[derive(Clone, Copy, Default, Eq, Hash, PartialEq, PartialOrd, Ord)] +#[repr(transparent)] +pub struct StateID(SmallIndex); + +index_type_impls!(PatternID, PatternIDError, PatternIDIter, WithPatternIDIter); +index_type_impls!(StateID, StateIDError, StateIDIter, WithStateIDIter); + +/// A utility trait that defines a couple of adapters for making it convenient +/// to access indices as "small index" types. We require ExactSizeIterator so +/// that iterator construction can do a single check to make sure the index of +/// each element is representable by its small index type. +pub(crate) trait IteratorIndexExt: Iterator { + fn with_pattern_ids(self) -> WithPatternIDIter + where + Self: Sized + ExactSizeIterator, + { + WithPatternIDIter::new(self) + } + + fn with_state_ids(self) -> WithStateIDIter + where + Self: Sized + ExactSizeIterator, + { + WithStateIDIter::new(self) + } +} + +impl IteratorIndexExt for I {} diff --git a/vendor/aho-corasick/src/util/remapper.rs b/vendor/aho-corasick/src/util/remapper.rs new file mode 100644 index 00000000000000..7c47a082cdd221 --- /dev/null +++ b/vendor/aho-corasick/src/util/remapper.rs @@ -0,0 +1,214 @@ +use alloc::vec::Vec; + +use crate::{nfa::noncontiguous, util::primitives::StateID}; + +/// Remappable is a tightly coupled abstraction that facilitates remapping +/// state identifiers in DFAs. +/// +/// The main idea behind remapping state IDs is that DFAs often need to check +/// if a certain state is a "special" state of some kind (like a match state) +/// during a search. Since this is extremely perf critical code, we want this +/// check to be as fast as possible. Partitioning state IDs into, for example, +/// into "non-match" and "match" states means one can tell if a state is a +/// match state via a simple comparison of the state ID. +/// +/// The issue is that during the DFA construction process, it's not +/// particularly easy to partition the states. Instead, the simplest thing is +/// to often just do a pass over all of the states and shuffle them into their +/// desired partitionings. To do that, we need a mechanism for swapping states. +/// Hence, this abstraction. +/// +/// Normally, for such little code, I would just duplicate it. But this is a +/// key optimization and the implementation is a bit subtle. So the abstraction +/// is basically a ham-fisted attempt at DRY. The only place we use this is in +/// the dense and one-pass DFAs. +/// +/// See also src/dfa/special.rs for a more detailed explanation of how dense +/// DFAs are partitioned. +pub(crate) trait Remappable: core::fmt::Debug { + /// Return the total number of states. + fn state_len(&self) -> usize; + + /// Swap the states pointed to by the given IDs. The underlying finite + /// state machine should be mutated such that all of the transitions in + /// `id1` are now in the memory region where the transitions for `id2` + /// were, and all of the transitions in `id2` are now in the memory region + /// where the transitions for `id1` were. + /// + /// Essentially, this "moves" `id1` to `id2` and `id2` to `id1`. + /// + /// It is expected that, after calling this, the underlying state machine + /// will be left in an inconsistent state, since any other transitions + /// pointing to, e.g., `id1` need to be updated to point to `id2`, since + /// that's where `id1` moved to. + /// + /// In order to "fix" the underlying inconsistent state, a `Remapper` + /// should be used to guarantee that `remap` is called at the appropriate + /// time. + fn swap_states(&mut self, id1: StateID, id2: StateID); + + /// This must remap every single state ID in the underlying value according + /// to the function given. For example, in a DFA, this should remap every + /// transition and every starting state ID. + fn remap(&mut self, map: impl Fn(StateID) -> StateID); +} + +/// Remapper is an abstraction the manages the remapping of state IDs in a +/// finite state machine. This is useful when one wants to shuffle states into +/// different positions in the machine. +/// +/// One of the key complexities this manages is the ability to correctly move +/// one state multiple times. +/// +/// Once shuffling is complete, `remap` must be called, which will rewrite +/// all pertinent transitions to updated state IDs. Neglecting to call `remap` +/// will almost certainly result in a corrupt machine. +#[derive(Debug)] +pub(crate) struct Remapper { + /// A map from the index of a state to its pre-multiplied identifier. + /// + /// When a state is swapped with another, then their corresponding + /// locations in this map are also swapped. Thus, its new position will + /// still point to its old pre-multiplied StateID. + /// + /// While there is a bit more to it, this then allows us to rewrite the + /// state IDs in a DFA's transition table in a single pass. This is done + /// by iterating over every ID in this map, then iterating over each + /// transition for the state at that ID and re-mapping the transition from + /// `old_id` to `map[dfa.to_index(old_id)]`. That is, we find the position + /// in this map where `old_id` *started*, and set it to where it ended up + /// after all swaps have been completed. + map: Vec, + /// A way to map indices to state IDs (and back). + idx: IndexMapper, +} + +impl Remapper { + /// Create a new remapper from the given remappable implementation. The + /// remapper can then be used to swap states. The remappable value given + /// here must the same one given to `swap` and `remap`. + /// + /// The given stride should be the stride of the transition table expressed + /// as a power of 2. This stride is used to map between state IDs and state + /// indices. If state IDs and state indices are equivalent, then provide + /// a `stride2` of `0`, which acts as an identity. + pub(crate) fn new(r: &impl Remappable, stride2: usize) -> Remapper { + let idx = IndexMapper { stride2 }; + let map = (0..r.state_len()).map(|i| idx.to_state_id(i)).collect(); + Remapper { map, idx } + } + + /// Swap two states. Once this is called, callers must follow through to + /// call `remap`, or else it's possible for the underlying remappable + /// value to be in a corrupt state. + pub(crate) fn swap( + &mut self, + r: &mut impl Remappable, + id1: StateID, + id2: StateID, + ) { + if id1 == id2 { + return; + } + r.swap_states(id1, id2); + self.map.swap(self.idx.to_index(id1), self.idx.to_index(id2)); + } + + /// Complete the remapping process by rewriting all state IDs in the + /// remappable value according to the swaps performed. + pub(crate) fn remap(mut self, r: &mut impl Remappable) { + // Update the map to account for states that have been swapped + // multiple times. For example, if (A, C) and (C, G) are swapped, then + // transitions previously pointing to A should now point to G. But if + // we don't update our map, they will erroneously be set to C. All we + // do is follow the swaps in our map until we see our original state + // ID. + // + // The intuition here is to think about how changes are made to the + // map: only through pairwise swaps. That means that starting at any + // given state, it is always possible to find the loop back to that + // state by following the swaps represented in the map (which might be + // 0 swaps). + // + // We are also careful to clone the map before starting in order to + // freeze it. We use the frozen map to find our loops, since we need to + // update our map as well. Without freezing it, our updates could break + // the loops referenced above and produce incorrect results. + let oldmap = self.map.clone(); + for i in 0..r.state_len() { + let cur_id = self.idx.to_state_id(i); + let mut new_id = oldmap[i]; + if cur_id == new_id { + continue; + } + loop { + let id = oldmap[self.idx.to_index(new_id)]; + if cur_id == id { + self.map[i] = new_id; + break; + } + new_id = id; + } + } + r.remap(|sid| self.map[self.idx.to_index(sid)]); + } +} + +/// A simple type for mapping between state indices and state IDs. +/// +/// The reason why this exists is because state IDs are "premultiplied" in a +/// DFA. That is, in order to get to the transitions for a particular state, +/// one need only use the state ID as-is, instead of having to multiply it by +/// transition table's stride. +/// +/// The downside of this is that it's inconvenient to map between state IDs +/// using a dense map, e.g., Vec. That's because state IDs look like +/// `0`, `stride`, `2*stride`, `3*stride`, etc., instead of `0`, `1`, `2`, `3`, +/// etc. +/// +/// Since our state IDs are premultiplied, we can convert back-and-forth +/// between IDs and indices by simply unmultiplying the IDs and multiplying the +/// indices. +/// +/// Note that for a sparse NFA, state IDs and indices are equivalent. In this +/// case, we set the stride of the index mapped to be `0`, which acts as an +/// identity. +#[derive(Debug)] +struct IndexMapper { + /// The power of 2 corresponding to the stride of the corresponding + /// transition table. 'id >> stride2' de-multiplies an ID while 'index << + /// stride2' pre-multiplies an index to an ID. + stride2: usize, +} + +impl IndexMapper { + /// Convert a state ID to a state index. + fn to_index(&self, id: StateID) -> usize { + id.as_usize() >> self.stride2 + } + + /// Convert a state index to a state ID. + fn to_state_id(&self, index: usize) -> StateID { + // CORRECTNESS: If the given index is not valid, then it is not + // required for this to panic or return a valid state ID. We'll "just" + // wind up with panics or silent logic errors at some other point. But + // this is OK because if Remappable::state_len is correct and so is + // 'to_index', then all inputs to 'to_state_id' should be valid indices + // and thus transform into valid state IDs. + StateID::new_unchecked(index << self.stride2) + } +} + +impl Remappable for noncontiguous::NFA { + fn state_len(&self) -> usize { + noncontiguous::NFA::states(self).len() + } + + fn swap_states(&mut self, id1: StateID, id2: StateID) { + noncontiguous::NFA::swap_states(self, id1, id2) + } + + fn remap(&mut self, map: impl Fn(StateID) -> StateID) { + noncontiguous::NFA::remap(self, map) + } +} diff --git a/vendor/aho-corasick/src/util/search.rs b/vendor/aho-corasick/src/util/search.rs new file mode 100644 index 00000000000000..59b7035e1ffd17 --- /dev/null +++ b/vendor/aho-corasick/src/util/search.rs @@ -0,0 +1,1148 @@ +use core::ops::{Range, RangeBounds}; + +use crate::util::primitives::PatternID; + +/// The configuration and the haystack to use for an Aho-Corasick search. +/// +/// When executing a search, there are a few parameters one might want to +/// configure: +/// +/// * The haystack to search, provided to the [`Input::new`] constructor. This +/// is the only required parameter. +/// * The span _within_ the haystack to limit a search to. (The default +/// is the entire haystack.) This is configured via [`Input::span`] or +/// [`Input::range`]. +/// * Whether to run an unanchored (matches can occur anywhere after the +/// start of the search) or anchored (matches can only occur beginning at +/// the start of the search) search. Unanchored search is the default. This is +/// configured via [`Input::anchored`]. +/// * Whether to quit the search as soon as a match has been found, regardless +/// of the [`MatchKind`] that the searcher was built with. This is configured +/// via [`Input::earliest`]. +/// +/// For most cases, the defaults for all optional parameters are appropriate. +/// The utility of this type is that it keeps the default or common case simple +/// while permitting tweaking parameters in more niche use cases while reusing +/// the same search APIs. +/// +/// # Valid bounds and search termination +/// +/// An `Input` permits setting the bounds of a search via either +/// [`Input::span`] or [`Input::range`]. The bounds set must be valid, or +/// else a panic will occur. Bounds are valid if and only if: +/// +/// * The bounds represent a valid range into the input's haystack. +/// * **or** the end bound is a valid ending bound for the haystack *and* +/// the start bound is exactly one greater than the end bound. +/// +/// In the latter case, [`Input::is_done`] will return true and indicates any +/// search receiving such an input should immediately return with no match. +/// +/// Other than representing "search is complete," the `Input::span` and +/// `Input::range` APIs are never necessary. Instead, callers can slice the +/// haystack instead, e.g., with `&haystack[start..end]`. With that said, they +/// can be more convenient than slicing because the match positions reported +/// when using `Input::span` or `Input::range` are in terms of the original +/// haystack. If you instead use `&haystack[start..end]`, then you'll need to +/// add `start` to any match position returned in order for it to be a correct +/// index into `haystack`. +/// +/// # Example: `&str` and `&[u8]` automatically convert to an `Input` +/// +/// There is a `From<&T> for Input` implementation for all `T: AsRef<[u8]>`. +/// Additionally, the [`AhoCorasick`](crate::AhoCorasick) search APIs accept +/// a `Into`. These two things combined together mean you can provide +/// things like `&str` and `&[u8]` to search APIs when the defaults are +/// suitable, but also an `Input` when they're not. For example: +/// +/// ``` +/// use aho_corasick::{AhoCorasick, Anchored, Input, Match, StartKind}; +/// +/// // Build a searcher that supports both unanchored and anchored modes. +/// let ac = AhoCorasick::builder() +/// .start_kind(StartKind::Both) +/// .build(&["abcd", "b"]) +/// .unwrap(); +/// let haystack = "abcd"; +/// +/// // A search using default parameters is unanchored. With standard +/// // semantics, this finds `b` first. +/// assert_eq!( +/// Some(Match::must(1, 1..2)), +/// ac.find(haystack), +/// ); +/// // Using the same 'find' routine, we can provide an 'Input' explicitly +/// // that is configured to do an anchored search. Since 'b' doesn't start +/// // at the beginning of the search, it is not reported as a match. +/// assert_eq!( +/// Some(Match::must(0, 0..4)), +/// ac.find(Input::new(haystack).anchored(Anchored::Yes)), +/// ); +/// ``` +#[derive(Clone)] +pub struct Input<'h> { + haystack: &'h [u8], + span: Span, + anchored: Anchored, + earliest: bool, +} + +impl<'h> Input<'h> { + /// Create a new search configuration for the given haystack. + #[inline] + pub fn new>(haystack: &'h H) -> Input<'h> { + Input { + haystack: haystack.as_ref(), + span: Span { start: 0, end: haystack.as_ref().len() }, + anchored: Anchored::No, + earliest: false, + } + } + + /// Set the span for this search. + /// + /// This routine is generic over how a span is provided. While + /// a [`Span`] may be given directly, one may also provide a + /// `std::ops::Range`. To provide anything supported by range + /// syntax, use the [`Input::range`] method. + /// + /// The default span is the entire haystack. + /// + /// Note that [`Input::range`] overrides this method and vice versa. + /// + /// # Panics + /// + /// This panics if the given span does not correspond to valid bounds in + /// the haystack or the termination of a search. + /// + /// # Example + /// + /// This example shows how the span of the search can impact whether a + /// match is reported or not. + /// + /// ``` + /// use aho_corasick::{AhoCorasick, Input, MatchKind}; + /// + /// let patterns = &["b", "abcd", "abc"]; + /// let haystack = "abcd"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// let input = Input::new(haystack).span(0..3); + /// let mat = ac.try_find(input)?.expect("should have a match"); + /// // Without the span stopping the search early, 'abcd' would be reported + /// // because it is the correct leftmost-first match. + /// assert_eq!("abc", &haystack[mat.span()]); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn span>(mut self, span: S) -> Input<'h> { + self.set_span(span); + self + } + + /// Like `Input::span`, but accepts any range instead. + /// + /// The default range is the entire haystack. + /// + /// Note that [`Input::span`] overrides this method and vice versa. + /// + /// # Panics + /// + /// This routine will panic if the given range could not be converted + /// to a valid [`Range`]. For example, this would panic when given + /// `0..=usize::MAX` since it cannot be represented using a half-open + /// interval in terms of `usize`. + /// + /// This routine also panics if the given range does not correspond to + /// valid bounds in the haystack or the termination of a search. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::Input; + /// + /// let input = Input::new("foobar"); + /// assert_eq!(0..6, input.get_range()); + /// + /// let input = Input::new("foobar").range(2..=4); + /// assert_eq!(2..5, input.get_range()); + /// ``` + #[inline] + pub fn range>(mut self, range: R) -> Input<'h> { + self.set_range(range); + self + } + + /// Sets the anchor mode of a search. + /// + /// When a search is anchored (via [`Anchored::Yes`]), a match must begin + /// at the start of a search. When a search is not anchored (that's + /// [`Anchored::No`]), searchers will look for a match anywhere in the + /// haystack. + /// + /// By default, the anchored mode is [`Anchored::No`]. + /// + /// # Support for anchored searches + /// + /// Anchored or unanchored searches might not always be available, + /// depending on the type of searcher used and its configuration: + /// + /// * [`noncontiguous::NFA`](crate::nfa::noncontiguous::NFA) always + /// supports both unanchored and anchored searches. + /// * [`contiguous::NFA`](crate::nfa::contiguous::NFA) always supports both + /// unanchored and anchored searches. + /// * [`dfa::DFA`](crate::dfa::DFA) supports only unanchored + /// searches by default. + /// [`dfa::Builder::start_kind`](crate::dfa::Builder::start_kind) can + /// be used to change the default to supporting both kinds of searches + /// or even just anchored searches. + /// * [`AhoCorasick`](crate::AhoCorasick) inherits the same setup as a + /// `DFA`. Namely, it only supports unanchored searches by default, but + /// [`AhoCorasickBuilder::start_kind`](crate::AhoCorasickBuilder::start_kind) + /// can change this. + /// + /// If you try to execute a search using a `try_` ("fallible") method with + /// an unsupported anchor mode, then an error will be returned. For calls + /// to infallible search methods, a panic will result. + /// + /// # Example + /// + /// This demonstrates the differences between an anchored search and + /// an unanchored search. Notice that we build our `AhoCorasick` searcher + /// with [`StartKind::Both`] so that it supports both unanchored and + /// anchored searches simultaneously. + /// + /// ``` + /// use aho_corasick::{ + /// AhoCorasick, Anchored, Input, MatchKind, StartKind, + /// }; + /// + /// let patterns = &["bcd"]; + /// let haystack = "abcd"; + /// + /// let ac = AhoCorasick::builder() + /// .start_kind(StartKind::Both) + /// .build(patterns) + /// .unwrap(); + /// + /// // Note that 'Anchored::No' is the default, so it doesn't need to + /// // be explicitly specified here. + /// let input = Input::new(haystack); + /// let mat = ac.try_find(input)?.expect("should have a match"); + /// assert_eq!("bcd", &haystack[mat.span()]); + /// + /// // While 'bcd' occurs in the haystack, it does not begin where our + /// // search begins, so no match is found. + /// let input = Input::new(haystack).anchored(Anchored::Yes); + /// assert_eq!(None, ac.try_find(input)?); + /// + /// // However, if we start our search where 'bcd' starts, then we will + /// // find a match. + /// let input = Input::new(haystack).range(1..).anchored(Anchored::Yes); + /// let mat = ac.try_find(input)?.expect("should have a match"); + /// assert_eq!("bcd", &haystack[mat.span()]); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn anchored(mut self, mode: Anchored) -> Input<'h> { + self.set_anchored(mode); + self + } + + /// Whether to execute an "earliest" search or not. + /// + /// When running a non-overlapping search, an "earliest" search will + /// return the match location as early as possible. For example, given + /// the patterns `abc` and `b`, and a haystack of `abc`, a normal + /// leftmost-first search will return `abc` as a match. But an "earliest" + /// search will return as soon as it is known that a match occurs, which + /// happens once `b` is seen. + /// + /// Note that when using [`MatchKind::Standard`], the "earliest" option + /// has no effect since standard semantics are already "earliest." Note + /// also that this has no effect in overlapping searches, since overlapping + /// searches also use standard semantics and report all possible matches. + /// + /// This is disabled by default. + /// + /// # Example + /// + /// This example shows the difference between "earliest" searching and + /// normal leftmost searching. + /// + /// ``` + /// use aho_corasick::{AhoCorasick, Anchored, Input, MatchKind, StartKind}; + /// + /// let patterns = &["abc", "b"]; + /// let haystack = "abc"; + /// + /// let ac = AhoCorasick::builder() + /// .match_kind(MatchKind::LeftmostFirst) + /// .build(patterns) + /// .unwrap(); + /// + /// // The normal leftmost-first match. + /// let input = Input::new(haystack); + /// let mat = ac.try_find(input)?.expect("should have a match"); + /// assert_eq!("abc", &haystack[mat.span()]); + /// + /// // The "earliest" possible match, even if it isn't leftmost-first. + /// let input = Input::new(haystack).earliest(true); + /// let mat = ac.try_find(input)?.expect("should have a match"); + /// assert_eq!("b", &haystack[mat.span()]); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn earliest(mut self, yes: bool) -> Input<'h> { + self.set_earliest(yes); + self + } + + /// Set the span for this search configuration. + /// + /// This is like the [`Input::span`] method, except this mutates the + /// span in place. + /// + /// This routine is generic over how a span is provided. While + /// a [`Span`] may be given directly, one may also provide a + /// `std::ops::Range`. + /// + /// # Panics + /// + /// This panics if the given span does not correspond to valid bounds in + /// the haystack or the termination of a search. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::Input; + /// + /// let mut input = Input::new("foobar"); + /// assert_eq!(0..6, input.get_range()); + /// input.set_span(2..4); + /// assert_eq!(2..4, input.get_range()); + /// ``` + #[inline] + pub fn set_span>(&mut self, span: S) { + let span = span.into(); + assert!( + span.end <= self.haystack.len() + && span.start <= span.end.wrapping_add(1), + "invalid span {:?} for haystack of length {}", + span, + self.haystack.len(), + ); + self.span = span; + } + + /// Set the span for this search configuration given any range. + /// + /// This is like the [`Input::range`] method, except this mutates the + /// span in place. + /// + /// # Panics + /// + /// This routine will panic if the given range could not be converted + /// to a valid [`Range`]. For example, this would panic when given + /// `0..=usize::MAX` since it cannot be represented using a half-open + /// interval in terms of `usize`. + /// + /// This routine also panics if the given range does not correspond to + /// valid bounds in the haystack or the termination of a search. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::Input; + /// + /// let mut input = Input::new("foobar"); + /// assert_eq!(0..6, input.get_range()); + /// input.set_range(2..=4); + /// assert_eq!(2..5, input.get_range()); + /// ``` + #[inline] + pub fn set_range>(&mut self, range: R) { + use core::ops::Bound; + + // It's a little weird to convert ranges into spans, and then spans + // back into ranges when we actually slice the haystack. Because + // of that process, we always represent everything as a half-open + // internal. Therefore, handling things like m..=n is a little awkward. + let start = match range.start_bound() { + Bound::Included(&i) => i, + // Can this case ever happen? Range syntax doesn't support it... + Bound::Excluded(&i) => i.checked_add(1).unwrap(), + Bound::Unbounded => 0, + }; + let end = match range.end_bound() { + Bound::Included(&i) => i.checked_add(1).unwrap(), + Bound::Excluded(&i) => i, + Bound::Unbounded => self.haystack().len(), + }; + self.set_span(Span { start, end }); + } + + /// Set the starting offset for the span for this search configuration. + /// + /// This is a convenience routine for only mutating the start of a span + /// without having to set the entire span. + /// + /// # Panics + /// + /// This panics if the given span does not correspond to valid bounds in + /// the haystack or the termination of a search. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::Input; + /// + /// let mut input = Input::new("foobar"); + /// assert_eq!(0..6, input.get_range()); + /// input.set_start(5); + /// assert_eq!(5..6, input.get_range()); + /// ``` + #[inline] + pub fn set_start(&mut self, start: usize) { + self.set_span(Span { start, ..self.get_span() }); + } + + /// Set the ending offset for the span for this search configuration. + /// + /// This is a convenience routine for only mutating the end of a span + /// without having to set the entire span. + /// + /// # Panics + /// + /// This panics if the given span does not correspond to valid bounds in + /// the haystack or the termination of a search. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::Input; + /// + /// let mut input = Input::new("foobar"); + /// assert_eq!(0..6, input.get_range()); + /// input.set_end(5); + /// assert_eq!(0..5, input.get_range()); + /// ``` + #[inline] + pub fn set_end(&mut self, end: usize) { + self.set_span(Span { end, ..self.get_span() }); + } + + /// Set the anchor mode of a search. + /// + /// This is like [`Input::anchored`], except it mutates the search + /// configuration in place. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::{Anchored, Input}; + /// + /// let mut input = Input::new("foobar"); + /// assert_eq!(Anchored::No, input.get_anchored()); + /// + /// input.set_anchored(Anchored::Yes); + /// assert_eq!(Anchored::Yes, input.get_anchored()); + /// ``` + #[inline] + pub fn set_anchored(&mut self, mode: Anchored) { + self.anchored = mode; + } + + /// Set whether the search should execute in "earliest" mode or not. + /// + /// This is like [`Input::earliest`], except it mutates the search + /// configuration in place. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::Input; + /// + /// let mut input = Input::new("foobar"); + /// assert!(!input.get_earliest()); + /// input.set_earliest(true); + /// assert!(input.get_earliest()); + /// ``` + #[inline] + pub fn set_earliest(&mut self, yes: bool) { + self.earliest = yes; + } + + /// Return a borrow of the underlying haystack as a slice of bytes. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::Input; + /// + /// let input = Input::new("foobar"); + /// assert_eq!(b"foobar", input.haystack()); + /// ``` + #[inline] + pub fn haystack(&self) -> &[u8] { + self.haystack + } + + /// Return the start position of this search. + /// + /// This is a convenience routine for `search.get_span().start()`. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::Input; + /// + /// let input = Input::new("foobar"); + /// assert_eq!(0, input.start()); + /// + /// let input = Input::new("foobar").span(2..4); + /// assert_eq!(2, input.start()); + /// ``` + #[inline] + pub fn start(&self) -> usize { + self.get_span().start + } + + /// Return the end position of this search. + /// + /// This is a convenience routine for `search.get_span().end()`. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::Input; + /// + /// let input = Input::new("foobar"); + /// assert_eq!(6, input.end()); + /// + /// let input = Input::new("foobar").span(2..4); + /// assert_eq!(4, input.end()); + /// ``` + #[inline] + pub fn end(&self) -> usize { + self.get_span().end + } + + /// Return the span for this search configuration. + /// + /// If one was not explicitly set, then the span corresponds to the entire + /// range of the haystack. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::{Input, Span}; + /// + /// let input = Input::new("foobar"); + /// assert_eq!(Span { start: 0, end: 6 }, input.get_span()); + /// ``` + #[inline] + pub fn get_span(&self) -> Span { + self.span + } + + /// Return the span as a range for this search configuration. + /// + /// If one was not explicitly set, then the span corresponds to the entire + /// range of the haystack. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::Input; + /// + /// let input = Input::new("foobar"); + /// assert_eq!(0..6, input.get_range()); + /// ``` + #[inline] + pub fn get_range(&self) -> Range { + self.get_span().range() + } + + /// Return the anchored mode for this search configuration. + /// + /// If no anchored mode was set, then it defaults to [`Anchored::No`]. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::{Anchored, Input}; + /// + /// let mut input = Input::new("foobar"); + /// assert_eq!(Anchored::No, input.get_anchored()); + /// + /// input.set_anchored(Anchored::Yes); + /// assert_eq!(Anchored::Yes, input.get_anchored()); + /// ``` + #[inline] + pub fn get_anchored(&self) -> Anchored { + self.anchored + } + + /// Return whether this search should execute in "earliest" mode. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::Input; + /// + /// let input = Input::new("foobar"); + /// assert!(!input.get_earliest()); + /// ``` + #[inline] + pub fn get_earliest(&self) -> bool { + self.earliest + } + + /// Return true if this input has been exhausted, which in turn means all + /// subsequent searches will return no matches. + /// + /// This occurs precisely when the start position of this search is greater + /// than the end position of the search. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::Input; + /// + /// let mut input = Input::new("foobar"); + /// assert!(!input.is_done()); + /// input.set_start(6); + /// assert!(!input.is_done()); + /// input.set_start(7); + /// assert!(input.is_done()); + /// ``` + #[inline] + pub fn is_done(&self) -> bool { + self.get_span().start > self.get_span().end + } +} + +impl<'h> core::fmt::Debug for Input<'h> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let mut fmter = f.debug_struct("Input"); + match core::str::from_utf8(self.haystack()) { + Ok(nice) => fmter.field("haystack", &nice), + Err(_) => fmter.field("haystack", &self.haystack()), + } + .field("span", &self.span) + .field("anchored", &self.anchored) + .field("earliest", &self.earliest) + .finish() + } +} + +impl<'h, H: ?Sized + AsRef<[u8]>> From<&'h H> for Input<'h> { + #[inline] + fn from(haystack: &'h H) -> Input<'h> { + Input::new(haystack) + } +} + +/// A representation of a range in a haystack. +/// +/// A span corresponds to the starting and ending _byte offsets_ of a +/// contiguous region of bytes. The starting offset is inclusive while the +/// ending offset is exclusive. That is, a span is a half-open interval. +/// +/// A span is used to report the offsets of a match, but it is also used to +/// convey which region of a haystack should be searched via routines like +/// [`Input::span`]. +/// +/// This is basically equivalent to a `std::ops::Range`, except this +/// type implements `Copy` which makes it more ergonomic to use in the context +/// of this crate. Indeed, `Span` exists only because `Range` does +/// not implement `Copy`. Like a range, this implements `Index` for `[u8]` +/// and `str`, and `IndexMut` for `[u8]`. For convenience, this also impls +/// `From`, which means things like `Span::from(5..10)` work. +/// +/// There are no constraints on the values of a span. It is, for example, legal +/// to create a span where `start > end`. +#[derive(Clone, Copy, Eq, Hash, PartialEq)] +pub struct Span { + /// The start offset of the span, inclusive. + pub start: usize, + /// The end offset of the span, exclusive. + pub end: usize, +} + +impl Span { + /// Returns this span as a range. + #[inline] + pub fn range(&self) -> Range { + Range::from(*self) + } + + /// Returns true when this span is empty. That is, when `start >= end`. + #[inline] + pub fn is_empty(&self) -> bool { + self.start >= self.end + } + + /// Returns the length of this span. + /// + /// This returns `0` in precisely the cases that `is_empty` returns `true`. + #[inline] + pub fn len(&self) -> usize { + self.end.saturating_sub(self.start) + } + + /// Returns true when the given offset is contained within this span. + /// + /// Note that an empty span contains no offsets and will always return + /// false. + #[inline] + pub fn contains(&self, offset: usize) -> bool { + !self.is_empty() && self.start <= offset && offset <= self.end + } + + /// Returns a new span with `offset` added to this span's `start` and `end` + /// values. + #[inline] + pub fn offset(&self, offset: usize) -> Span { + Span { start: self.start + offset, end: self.end + offset } + } +} + +impl core::fmt::Debug for Span { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "{}..{}", self.start, self.end) + } +} + +impl core::ops::Index for [u8] { + type Output = [u8]; + + #[inline] + fn index(&self, index: Span) -> &[u8] { + &self[index.range()] + } +} + +impl core::ops::IndexMut for [u8] { + #[inline] + fn index_mut(&mut self, index: Span) -> &mut [u8] { + &mut self[index.range()] + } +} + +impl core::ops::Index for str { + type Output = str; + + #[inline] + fn index(&self, index: Span) -> &str { + &self[index.range()] + } +} + +impl From> for Span { + #[inline] + fn from(range: Range) -> Span { + Span { start: range.start, end: range.end } + } +} + +impl From for Range { + #[inline] + fn from(span: Span) -> Range { + Range { start: span.start, end: span.end } + } +} + +impl PartialEq> for Span { + #[inline] + fn eq(&self, range: &Range) -> bool { + self.start == range.start && self.end == range.end + } +} + +impl PartialEq for Range { + #[inline] + fn eq(&self, span: &Span) -> bool { + self.start == span.start && self.end == span.end + } +} + +/// The type of anchored search to perform. +/// +/// If an Aho-Corasick searcher does not support the anchored mode selected, +/// then the search will return an error or panic, depending on whether a +/// fallible or an infallible routine was called. +#[non_exhaustive] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Anchored { + /// Run an unanchored search. This means a match may occur anywhere at or + /// after the start position of the search up until the end position of the + /// search. + No, + /// Run an anchored search. This means that a match must begin at the start + /// position of the search and end before the end position of the search. + Yes, +} + +impl Anchored { + /// Returns true if and only if this anchor mode corresponds to an anchored + /// search. + /// + /// # Example + /// + /// ``` + /// use aho_corasick::Anchored; + /// + /// assert!(!Anchored::No.is_anchored()); + /// assert!(Anchored::Yes.is_anchored()); + /// ``` + #[inline] + pub fn is_anchored(&self) -> bool { + matches!(*self, Anchored::Yes) + } +} + +/// A representation of a match reported by an Aho-Corasick searcher. +/// +/// A match has two essential pieces of information: the [`PatternID`] that +/// matches, and the [`Span`] of the match in a haystack. +/// +/// The pattern is identified by an ID, which corresponds to its position +/// (starting from `0`) relative to other patterns used to construct the +/// corresponding searcher. If only a single pattern is provided, then all +/// matches are guaranteed to have a pattern ID of `0`. +/// +/// Every match reported by a searcher guarantees that its span has its start +/// offset as less than or equal to its end offset. +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +pub struct Match { + /// The pattern ID. + pattern: PatternID, + /// The underlying match span. + span: Span, +} + +impl Match { + /// Create a new match from a pattern ID and a span. + /// + /// This constructor is generic over how a span is provided. While + /// a [`Span`] may be given directly, one may also provide a + /// `std::ops::Range`. + /// + /// # Panics + /// + /// This panics if `end < start`. + /// + /// # Example + /// + /// This shows how to create a match for the first pattern in an + /// Aho-Corasick searcher using convenient range syntax. + /// + /// ``` + /// use aho_corasick::{Match, PatternID}; + /// + /// let m = Match::new(PatternID::ZERO, 5..10); + /// assert_eq!(0, m.pattern().as_usize()); + /// assert_eq!(5, m.start()); + /// assert_eq!(10, m.end()); + /// ``` + #[inline] + pub fn new>(pattern: PatternID, span: S) -> Match { + let span = span.into(); + assert!(span.start <= span.end, "invalid match span"); + Match { pattern, span } + } + + /// Create a new match from a pattern ID and a byte offset span. + /// + /// This constructor is generic over how a span is provided. While + /// a [`Span`] may be given directly, one may also provide a + /// `std::ops::Range`. + /// + /// This is like [`Match::new`], but accepts a `usize` instead of a + /// [`PatternID`]. This panics if the given `usize` is not representable + /// as a `PatternID`. + /// + /// # Panics + /// + /// This panics if `end < start` or if `pattern > PatternID::MAX`. + /// + /// # Example + /// + /// This shows how to create a match for the third pattern in an + /// Aho-Corasick searcher using convenient range syntax. + /// + /// ``` + /// use aho_corasick::Match; + /// + /// let m = Match::must(3, 5..10); + /// assert_eq!(3, m.pattern().as_usize()); + /// assert_eq!(5, m.start()); + /// assert_eq!(10, m.end()); + /// ``` + #[inline] + pub fn must>(pattern: usize, span: S) -> Match { + Match::new(PatternID::must(pattern), span) + } + + /// Returns the ID of the pattern that matched. + /// + /// The ID of a pattern is derived from the position in which it was + /// originally inserted into the corresponding searcher. The first pattern + /// has identifier `0`, and each subsequent pattern is `1`, `2` and so on. + #[inline] + pub fn pattern(&self) -> PatternID { + self.pattern + } + + /// The starting position of the match. + /// + /// This is a convenience routine for `Match::span().start`. + #[inline] + pub fn start(&self) -> usize { + self.span().start + } + + /// The ending position of the match. + /// + /// This is a convenience routine for `Match::span().end`. + #[inline] + pub fn end(&self) -> usize { + self.span().end + } + + /// Returns the match span as a range. + /// + /// This is a convenience routine for `Match::span().range()`. + #[inline] + pub fn range(&self) -> core::ops::Range { + self.span().range() + } + + /// Returns the span for this match. + #[inline] + pub fn span(&self) -> Span { + self.span + } + + /// Returns true when the span in this match is empty. + /// + /// An empty match can only be returned when empty pattern is in the + /// Aho-Corasick searcher. + #[inline] + pub fn is_empty(&self) -> bool { + self.span().is_empty() + } + + /// Returns the length of this match. + /// + /// This returns `0` in precisely the cases that `is_empty` returns `true`. + #[inline] + pub fn len(&self) -> usize { + self.span().len() + } + + /// Returns a new match with `offset` added to its span's `start` and `end` + /// values. + #[inline] + pub fn offset(&self, offset: usize) -> Match { + Match { + pattern: self.pattern, + span: Span { + start: self.start() + offset, + end: self.end() + offset, + }, + } + } +} + +/// A knob for controlling the match semantics of an Aho-Corasick automaton. +/// +/// There are two generally different ways that Aho-Corasick automatons can +/// report matches. The first way is the "standard" approach that results from +/// implementing most textbook explanations of Aho-Corasick. The second way is +/// to report only the leftmost non-overlapping matches. The leftmost approach +/// is in turn split into two different ways of resolving ambiguous matches: +/// leftmost-first and leftmost-longest. +/// +/// The `Standard` match kind is the default and is the only one that supports +/// overlapping matches and stream searching. (Trying to find overlapping or +/// streaming matches using leftmost match semantics will result in an error in +/// fallible APIs and a panic when using infallibe APIs.) The `Standard` match +/// kind will report matches as they are seen. When searching for overlapping +/// matches, then all possible matches are reported. When searching for +/// non-overlapping matches, the first match seen is reported. For example, for +/// non-overlapping matches, given the patterns `abcd` and `b` and the haystack +/// `abcdef`, only a match for `b` is reported since it is detected first. The +/// `abcd` match is never reported since it overlaps with the `b` match. +/// +/// In contrast, the leftmost match kind always prefers the leftmost match +/// among all possible matches. Given the same example as above with `abcd` and +/// `b` as patterns and `abcdef` as the haystack, the leftmost match is `abcd` +/// since it begins before the `b` match, even though the `b` match is detected +/// before the `abcd` match. In this case, the `b` match is not reported at all +/// since it overlaps with the `abcd` match. +/// +/// The difference between leftmost-first and leftmost-longest is in how they +/// resolve ambiguous matches when there are multiple leftmost matches to +/// choose from. Leftmost-first always chooses the pattern that was provided +/// earliest, where as leftmost-longest always chooses the longest matching +/// pattern. For example, given the patterns `a` and `ab` and the subject +/// string `ab`, the leftmost-first match is `a` but the leftmost-longest match +/// is `ab`. Conversely, if the patterns were given in reverse order, i.e., +/// `ab` and `a`, then both the leftmost-first and leftmost-longest matches +/// would be `ab`. Stated differently, the leftmost-first match depends on the +/// order in which the patterns were given to the Aho-Corasick automaton. +/// Because of that, when leftmost-first matching is used, if a pattern `A` +/// that appears before a pattern `B` is a prefix of `B`, then it is impossible +/// to ever observe a match of `B`. +/// +/// If you're not sure which match kind to pick, then stick with the standard +/// kind, which is the default. In particular, if you need overlapping or +/// streaming matches, then you _must_ use the standard kind. The leftmost +/// kinds are useful in specific circumstances. For example, leftmost-first can +/// be very useful as a way to implement match priority based on the order of +/// patterns given and leftmost-longest can be useful for dictionary searching +/// such that only the longest matching words are reported. +/// +/// # Relationship with regular expression alternations +/// +/// Understanding match semantics can be a little tricky, and one easy way +/// to conceptualize non-overlapping matches from an Aho-Corasick automaton +/// is to think about them as a simple alternation of literals in a regular +/// expression. For example, let's say we wanted to match the strings +/// `Sam` and `Samwise`, which would turn into the regex `Sam|Samwise`. It +/// turns out that regular expression engines have two different ways of +/// matching this alternation. The first way, leftmost-longest, is commonly +/// found in POSIX compatible implementations of regular expressions (such as +/// `grep`). The second way, leftmost-first, is commonly found in backtracking +/// implementations such as Perl. (Some regex engines, such as RE2 and Rust's +/// regex engine do not use backtracking, but still implement leftmost-first +/// semantics in an effort to match the behavior of dominant backtracking +/// regex engines such as those found in Perl, Ruby, Python, Javascript and +/// PHP.) +/// +/// That is, when matching `Sam|Samwise` against `Samwise`, a POSIX regex +/// will match `Samwise` because it is the longest possible match, but a +/// Perl-like regex will match `Sam` since it appears earlier in the +/// alternation. Indeed, the regex `Sam|Samwise` in a Perl-like regex engine +/// will never match `Samwise` since `Sam` will always have higher priority. +/// Conversely, matching the regex `Samwise|Sam` against `Samwise` will lead to +/// a match of `Samwise` in both POSIX and Perl-like regexes since `Samwise` is +/// still longest match, but it also appears earlier than `Sam`. +/// +/// The "standard" match semantics of Aho-Corasick generally don't correspond +/// to the match semantics of any large group of regex implementations, so +/// there's no direct analogy that can be made here. Standard match semantics +/// are generally useful for overlapping matches, or if you just want to see +/// matches as they are detected. +/// +/// The main conclusion to draw from this section is that the match semantics +/// can be tweaked to precisely match either Perl-like regex alternations or +/// POSIX regex alternations. +#[non_exhaustive] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum MatchKind { + /// Use standard match semantics, which support overlapping matches. When + /// used with non-overlapping matches, matches are reported as they are + /// seen. + Standard, + /// Use leftmost-first match semantics, which reports leftmost matches. + /// When there are multiple possible leftmost matches, the match + /// corresponding to the pattern that appeared earlier when constructing + /// the automaton is reported. + /// + /// This does **not** support overlapping matches or stream searching. If + /// this match kind is used, attempting to find overlapping matches or + /// stream matches will fail. + LeftmostFirst, + /// Use leftmost-longest match semantics, which reports leftmost matches. + /// When there are multiple possible leftmost matches, the longest match + /// is chosen. + /// + /// This does **not** support overlapping matches or stream searching. If + /// this match kind is used, attempting to find overlapping matches or + /// stream matches will fail. + LeftmostLongest, +} + +/// The default match kind is `MatchKind::Standard`. +impl Default for MatchKind { + fn default() -> MatchKind { + MatchKind::Standard + } +} + +impl MatchKind { + #[inline] + pub(crate) fn is_standard(&self) -> bool { + matches!(*self, MatchKind::Standard) + } + + #[inline] + pub(crate) fn is_leftmost(&self) -> bool { + matches!(*self, MatchKind::LeftmostFirst | MatchKind::LeftmostLongest) + } + + #[inline] + pub(crate) fn is_leftmost_first(&self) -> bool { + matches!(*self, MatchKind::LeftmostFirst) + } + + /// Convert this match kind into a packed match kind. If this match kind + /// corresponds to standard semantics, then this returns None, since + /// packed searching does not support standard semantics. + #[inline] + pub(crate) fn as_packed(&self) -> Option { + match *self { + MatchKind::Standard => None, + MatchKind::LeftmostFirst => { + Some(crate::packed::MatchKind::LeftmostFirst) + } + MatchKind::LeftmostLongest => { + Some(crate::packed::MatchKind::LeftmostLongest) + } + } + } +} + +/// The kind of anchored starting configurations to support in an Aho-Corasick +/// searcher. +/// +/// Depending on which searcher is used internally by +/// [`AhoCorasick`](crate::AhoCorasick), supporting both unanchored +/// and anchored searches can be quite costly. For this reason, +/// [`AhoCorasickBuilder::start_kind`](crate::AhoCorasickBuilder::start_kind) +/// can be used to configure whether your searcher supports unanchored, +/// anchored or both kinds of searches. +/// +/// This searcher configuration knob works in concert with the search time +/// configuration [`Input::anchored`]. Namely, if one requests an unsupported +/// anchored mode, then the search will either panic or return an error, +/// depending on whether you're using infallible or fallibe APIs, respectively. +/// +/// `AhoCorasick` by default only supports unanchored searches. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum StartKind { + /// Support both anchored and unanchored searches. + Both, + /// Support only unanchored searches. Requesting an anchored search will + /// return an error in fallible APIs and panic in infallible APIs. + Unanchored, + /// Support only anchored searches. Requesting an unanchored search will + /// return an error in fallible APIs and panic in infallible APIs. + Anchored, +} + +impl Default for StartKind { + fn default() -> StartKind { + StartKind::Unanchored + } +} diff --git a/vendor/aho-corasick/src/util/special.rs b/vendor/aho-corasick/src/util/special.rs new file mode 100644 index 00000000000000..beeba40c893107 --- /dev/null +++ b/vendor/aho-corasick/src/util/special.rs @@ -0,0 +1,42 @@ +use crate::util::primitives::StateID; + +/// A collection of sentinel state IDs for Aho-Corasick automata. +/// +/// This specifically enables the technique by which we determine which states +/// are dead, matches or start states. Namely, by arranging states in a +/// particular order, we can determine the type of a state simply by looking at +/// its ID. +#[derive(Clone, Debug)] +pub(crate) struct Special { + /// The maximum ID of all the "special" states. This corresponds either to + /// start_anchored_id when a prefilter is active and max_match_id when a + /// prefilter is not active. The idea here is that if there is no prefilter, + /// then there is no point in treating start states as special. + pub(crate) max_special_id: StateID, + /// The maximum ID of all the match states. Any state ID bigger than this + /// is guaranteed to be a non-match ID. + /// + /// It is possible and legal for max_match_id to be equal to + /// start_anchored_id, which occurs precisely in the case where the empty + /// string is a pattern that was added to the underlying automaton. + pub(crate) max_match_id: StateID, + /// The state ID of the start state used for unanchored searches. + pub(crate) start_unanchored_id: StateID, + /// The state ID of the start state used for anchored searches. This is + /// always start_unanchored_id+1. + pub(crate) start_anchored_id: StateID, +} + +impl Special { + /// Create a new set of "special" state IDs with all IDs initialized to + /// zero. The general idea here is that they will be updated and set to + /// correct values later. + pub(crate) fn zero() -> Special { + Special { + max_special_id: StateID::ZERO, + max_match_id: StateID::ZERO, + start_unanchored_id: StateID::ZERO, + start_anchored_id: StateID::ZERO, + } + } +} diff --git a/vendor/base64/.cargo-checksum.json b/vendor/base64/.cargo-checksum.json new file mode 100644 index 00000000000000..b0b083ea550910 --- /dev/null +++ b/vendor/base64/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"4d505b174c7ecd854ad5668b1750be31305ca69e4634131e7bb262e98f3cf50e",".circleci/config.yml":"c44defbad42a19f8c5fb8aeb9e71beaf1d0e920d615a06f42e4936c29e53547f",".github/ISSUE_TEMPLATE/general-purpose-issue.md":"9e89c069e50dc24a09ece40bd6d02618ab044b2b53d2e5221defd6c884c96964","Cargo.lock":"cee37732975a1ffc1f956d3d05b6edf1baec72841cfabc384a21b02b3bfa0275","Cargo.toml":"52bee6a418e14918d37058fd15fccfd0f417a06fe4f9668b6f97866bf7f991e3","Cargo.toml.orig":"ff2d361bc5f6ec9b4738c293b3dfa65278e93a2664040f75ef6c944441818afe","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0dd882e53de11566d50f8e8e2d5a651bcf3fabee4987d70f306233cf39094ba7","README.md":"df01f5b4317d601e7de86743f9818aec9196abf9e298f5e47679b7a966ecd945","RELEASE-NOTES.md":"997a5193317a8bff266ecfe4f015ba070b782b6df7d3a1738b9b52584d57f9c6","benches/benchmarks.rs":"cebbcc8649e760e569c6be04f5e727aee2c2568ced7faab580fc0aa0d0426d26","clippy.toml":"b26be4d15ed059985ce6994f11817fd7562046f46e460a0dc64dbb71cfc246d1","examples/base64.rs":"b75ead2199a9b4389c69fe6f1ae988176a263b8fc84e7a4fea1d7e5a41592078","icon_CLion.svg":"cffa044ba75cb998ee3306991dc4a3755ec2f39ab95ddd4b74bc21988389020f","src/alphabet.rs":"5de2beb8fcccb078c61cac2c0477ebbde145122d6c10a0f7ea2e57e8159318e0","src/chunked_encoder.rs":"edfdbb9a4329b80fb2c769ada81e234e00839e0fa85faaa70bacf40ce12e951c","src/decode.rs":"b046a72d62eaac58dc42efcf7848d9d96d022f6594e851cf87074b77ce45c04a","src/display.rs":"31bf3e19274a0b80dd8948a81ea535944f756ef5b88736124c940f5fe1e8c71c","src/encode.rs":"44ddcc162f3fe9817b6e857dda0a3b9197b90a657e5f71c44aacabf5431ccf7d","src/engine/general_purpose/decode.rs":"d865b057e5788e7fefd189cf57ec913df263e6a0742dfa52513f587e14fa1a92","src/engine/general_purpose/decode_suffix.rs":"689688f7bf442b232d3b9f56a1b41c56d9393ace88556a165c224b93dd19b74e","src/engine/general_purpose/mod.rs":"901760a7f5721ec3bafad5fea6251f57de0f767ecb2e1e2fdfe64d661404ec34","src/engine/mod.rs":"5e4a6c0e86417f3b62350264ef383f91e9864390f7c315d786ecd8e9c920ee9f","src/engine/naive.rs":"70de29d909c3fe7918d2965782088b05047b8b6e30d1d2bf11ba073d3f8633ff","src/engine/tests.rs":"2cc8d1431f40f5b9c3ad8970e6fb73bba8be3f2317553dd026539f41908aaa19","src/lib.rs":"c4db7bd31ace78aec2ecd151cef3ad90dfdc76097ba12027bde79d3c82612f7c","src/prelude.rs":"c1587138e5301ac797c5c362cb3638649b33f79c20c16db6f38ad44330540752","src/read/decoder.rs":"00aaa0553a54fcf12762658c4e56663a9705cc30c07af30976291e6f69d78c3d","src/read/decoder_tests.rs":"66ec39bf6e86f21f4db1afd6c5cd63d4a4931ab896b9c38de25d99b803804bbf","src/read/mod.rs":"e0b714eda02d16b1ffa6f78fd09b2f963e01c881b1f7c17b39db4e904be5e746","src/tests.rs":"90cb9f8a1ccb7c4ddc4f8618208e0031fc97e0df0e5aa466d6a5cf45d25967d8","src/write/encoder.rs":"c889c853249220fe2ddaeb77ee6e2ee2945f7db88cd6658ef89ff71b81255ea8","src/write/encoder_string_writer.rs":"0326c9d120369b9bbc35697b5b9b141bed24283374c93d5af1052eb042e47799","src/write/encoder_tests.rs":"28695a485b17cf5db73656aae5d90127f726e02c6d70efd83e5ab53a4cc17b38","src/write/mod.rs":"73cd98dadc9d712b3fefd9449d97e825e097397441b90588e0051e4d3b0911b9","tests/encode.rs":"5309f4538b1df611436f7bfba7409c725161b6f841b1bbf8d9890ae185de7d88","tests/tests.rs":"78efcf0dc4bb6ae52f7a91fcad89e44e4dce578224c36b4e6c1c306459be8500"},"package":"72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"} \ No newline at end of file diff --git a/vendor/base64/.cargo_vcs_info.json b/vendor/base64/.cargo_vcs_info.json new file mode 100644 index 00000000000000..50adb81ec205f5 --- /dev/null +++ b/vendor/base64/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "e14400697453bcc85997119b874bc03d9601d0af" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/base64/.circleci/config.yml b/vendor/base64/.circleci/config.yml new file mode 100644 index 00000000000000..4d2576dc824a49 --- /dev/null +++ b/vendor/base64/.circleci/config.yml @@ -0,0 +1,135 @@ +version: '2.1' + +workflows: + version: 2 + build: + jobs: + - build: + matrix: + parameters: + rust_img: [ + # Yes, a single-parameter axis, but means it can be referred to as a cache parameter easily without + # duplicating the magic version number throughout this file. + # The default rust images (not -slim or -alpine) are based on buildpack-deps. Hopefully this will + # be easier on the CI hosts since presumably those fat lower layers will already be cached, and + # therefore faster than a minimal, customized alpine. + # MSRV + 'rust:1.48.0' + ] + # a hacky scheme to work around CircleCI's inability to deal with mutable docker tags, forcing us to + # get a nightly or stable toolchain via rustup instead of a mutable docker tag + toolchain_override: [ + '__msrv__', # won't add any other toolchains, just uses what's in the docker image + '1.70.0', # minimum needed to build dev-dependencies + 'stable', + 'beta', + 'nightly' + ] + +jobs: + build: + parameters: + rust_img: + type: string + toolchain_override: + type: string + docker: + - image: << parameters.rust_img >> + steps: + - checkout + - restore_cache: + key: project-cache-v5-<< parameters.rust_img >>-<< parameters.toolchain_override >>-{{ checksum "Cargo.toml" }} + - run: + name: Setup toolchain + command: | + if [[ '<< parameters.toolchain_override >>' != '__msrv__' ]] + then + rustup toolchain add '<< parameters.toolchain_override >>' + rustup default '<< parameters.toolchain_override >>' + fi + - run: + name: Log rustc version + command: rustc --version + - run: + name: Build main target + # update first to select dependencies appropriate for this toolchain + command: | + cargo update + cargo build + - run: + name: Check formatting + command: | + rustup component add rustfmt + cargo fmt -- --check + - run: + name: Check clippy lints + # we only care about stable clippy -- nightly clippy is a bit wild + command: | + if [[ '<< parameters.toolchain_override >>' == 'stable' ]] + then + rustup component add clippy + cargo clippy --all-targets + fi + - run: + name: Build all targets + command: | + if [[ '<< parameters.toolchain_override >>' != '__msrv__' ]] + then + cargo build --all-targets + fi + - run: + name: Build without default features + command: | + cargo build --no-default-features + if [[ '<< parameters.toolchain_override >>' != '__msrv__' ]] + then + cargo build --no-default-features --all-targets + fi + - run: + name: Build with only alloc + command: | + cargo build --no-default-features --features alloc + if [[ '<< parameters.toolchain_override >>' != '__msrv__' ]] + then + cargo build --no-default-features --features alloc --all-targets + fi + - run: + name: Add arm toolchain + command: rustup target add thumbv6m-none-eabi + - run: + name: Build ARM without default features (no_std) + command: cargo build --target thumbv6m-none-eabi --no-default-features + - run: + name: Build ARM with only alloc feature + command: cargo build --target thumbv6m-none-eabi --no-default-features --features alloc + - run: + # dev dependencies can't build on 1.48.0 + name: Run tests + command: | + if [[ '<< parameters.toolchain_override >>' != '__msrv__' ]] + then + cargo test --no-default-features + cargo test + fi + - run: + name: Build docs + command: cargo doc --verbose + - run: + name: Confirm fuzzers can run + # TERM=dumb prevents cargo fuzz list from printing with color + environment: + TERM: dumb + command: | + if [[ '<< parameters.toolchain_override >>' = 'nightly' ]] + then + cargo install cargo-fuzz + cargo fuzz list | xargs -I FUZZER cargo fuzz run FUZZER -- -max_total_time=1 + fi + + - save_cache: + key: project-cache-v5-<< parameters.rust_img >>-<< parameters.toolchain_override >>-{{ checksum "Cargo.toml" }} + paths: + # rust docker img doesn't use $HOME/[.cargo,.rustup] + - /usr/local/cargo + - /usr/local/rustup + - ./target diff --git a/vendor/base64/.github/ISSUE_TEMPLATE/general-purpose-issue.md b/vendor/base64/.github/ISSUE_TEMPLATE/general-purpose-issue.md new file mode 100644 index 00000000000000..b35b2f3eb65ead --- /dev/null +++ b/vendor/base64/.github/ISSUE_TEMPLATE/general-purpose-issue.md @@ -0,0 +1,21 @@ +--- +name: General purpose issue +about: General purpose issue +title: Default issue +labels: '' +assignees: '' + +--- + +# Before you file an issue + +- Did you read the docs? +- Did you read the README? + +# The problem + +- + +# How I, the issue filer, am going to help solve it + +- diff --git a/vendor/base64/Cargo.lock b/vendor/base64/Cargo.lock new file mode 100644 index 00000000000000..84e188d12c4b89 --- /dev/null +++ b/vendor/base64/Cargo.lock @@ -0,0 +1,1515 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3" +dependencies = [ + "concurrent-queue", + "event-listener 5.2.0", + "event-listener-strategy 0.5.0", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-executor" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" +dependencies = [ + "async-lock 3.3.0", + "async-task", + "concurrent-queue", + "fastrand 2.0.1", + "futures-lite 2.2.0", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" +dependencies = [ + "async-channel 2.2.0", + "async-executor", + "async-io 2.3.1", + "async-lock 3.3.0", + "blocking", + "futures-lite 2.2.0", + "once_cell", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite 1.13.0", + "log", + "parking", + "polling 2.8.0", + "rustix 0.37.27", + "slab", + "socket2", + "waker-fn", +] + +[[package]] +name = "async-io" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f97ab0c5b00a7cdbe5a371b9a782ee7be1316095885c8a4ea1daf490eb0ef65" +dependencies = [ + "async-lock 3.3.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.2.0", + "parking", + "polling 3.4.0", + "rustix 0.38.9", + "slab", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" +dependencies = [ + "event-listener 4.0.3", + "event-listener-strategy 0.4.0", + "pin-project-lite", +] + +[[package]] +name = "async-std" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +dependencies = [ + "async-attributes", + "async-channel 1.9.0", + "async-global-executor", + "async-io 1.13.0", + "async-lock 2.8.0", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite 1.13.0", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + +[[package]] +name = "async-task" +version = "4.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi 0.1.19", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "base64" +version = "0.22.1" +dependencies = [ + "clap", + "criterion", + "once_cell", + "rand", + "rstest", + "rstest_reuse", + "strum", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" + +[[package]] +name = "blocking" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +dependencies = [ + "async-channel 2.2.0", + "async-lock 3.3.0", + "async-task", + "fastrand 2.0.1", + "futures-io", + "futures-lite 2.2.0", + "piper", + "tracing", +] + +[[package]] +name = "bumpalo" +version = "3.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + +[[package]] +name = "clap" +version = "3.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" +dependencies = [ + "atty", + "bitflags 1.3.2", + "clap_derive", + "clap_lex", + "indexmap", + "once_cell", + "strsim", + "termcolor", + "textwrap", +] + +[[package]] +name = "clap_derive" +version = "3.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" +dependencies = [ + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "clap_lex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" +dependencies = [ + "os_str_bytes", +] + +[[package]] +name = "concurrent-queue" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "criterion" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb" +dependencies = [ + "anes", + "atty", + "cast", + "ciborium", + "clap", + "criterion-plot", + "itertools", + "lazy_static", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "ctor" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "either" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" + +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.3", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "feedafcaa9b749175d5ac357452a9d41ea2911da598fde46ce1fe02c37751291" +dependencies = [ + "event-listener 5.2.0", + "pin-project-lite", +] + +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + +[[package]] +name = "futures" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-executor" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + +[[package]] +name = "futures-lite" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "445ba825b27408685aaecefd65178908c36c6e96aaf6d8599419d46e624192ba" +dependencies = [ + "fastrand 2.0.1", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + +[[package]] +name = "futures-macro" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] + +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + +[[package]] +name = "futures-util" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "getrandom" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "half" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5eceaaeec696539ddaf7b333340f1af35a5aa87ae3e4f3ead0532f72affab2e" +dependencies = [ + "cfg-if", + "crunchy", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown", +] + +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "io-lifetimes" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" + +[[package]] +name = "js-sys" +version = "0.3.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.153" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" + +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + +[[package]] +name = "linux-raw-sys" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" + +[[package]] +name = "log" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if", + "value-bag", +] + +[[package]] +name = "memchr" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" + +[[package]] +name = "num-traits" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +dependencies = [ + "autocfg", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + +[[package]] +name = "os_str_bytes" +version = "6.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" + +[[package]] +name = "parking" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.0.1", + "futures-io", +] + +[[package]] +name = "plotters" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" + +[[package]] +name = "plotters-svg" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +dependencies = [ + "plotters-backend", +] + +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + +[[package]] +name = "polling" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30054e72317ab98eddd8561db0f6524df3367636884b7b21b703e4b280a84a14" +dependencies = [ + "cfg-if", + "concurrent-queue", + "pin-project-lite", + "rustix 0.38.9", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.78" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rayon" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "regex" +version = "1.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" +dependencies = [ + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" + +[[package]] +name = "rstest" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b939295f93cb1d12bc1a83cf9ee963199b133fb8a79832dd51b68bb9f59a04dc" +dependencies = [ + "async-std", + "futures", + "futures-timer", + "rstest_macros", + "rustc_version", +] + +[[package]] +name = "rstest_macros" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f78aba848123782ba59340928ec7d876ebe745aa0365d6af8a630f19a5c16116" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "rustc_version", + "syn 1.0.109", +] + +[[package]] +name = "rstest_reuse" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88530b681abe67924d42cca181d070e3ac20e0740569441a9e35a7cedd2b34a4" +dependencies = [ + "quote", + "rand", + "rustc_version", + "syn 2.0.52", +] + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + +[[package]] +name = "rustix" +version = "0.37.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustix" +version = "0.38.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bfe0f2582b4931a45d1fa608f8a8722e8b3c7ac54dd6d5f3b3212791fedef49" +dependencies = [ + "bitflags 2.4.2", + "errno", + "libc", + "linux-raw-sys 0.4.13", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + +[[package]] +name = "ryu" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "semver" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" + +[[package]] +name = "serde" +version = "1.0.197" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.197" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] + +[[package]] +name = "serde_json" +version = "1.0.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "socket2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "strum" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.25.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.52", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.52" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "textwrap" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "value-bag" +version = "1.0.0-alpha.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" +dependencies = [ + "ctor", + "version_check", +] + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "waker-fn" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.52", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" + +[[package]] +name = "web-sys" +version = "0.3.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.4", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +dependencies = [ + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" diff --git a/vendor/base64/Cargo.toml b/vendor/base64/Cargo.toml new file mode 100644 index 00000000000000..e1b35fc46c869c --- /dev/null +++ b/vendor/base64/Cargo.toml @@ -0,0 +1,85 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.48.0" +name = "base64" +version = "0.22.1" +authors = ["Marshall Pierce "] +description = "encodes and decodes base64 as bytes or utf8" +documentation = "https://docs.rs/base64" +readme = "README.md" +keywords = [ + "base64", + "utf8", + "encode", + "decode", + "no_std", +] +categories = ["encoding"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/marshallpierce/rust-base64" + +[package.metadata.docs.rs] +rustdoc-args = ["--generate-link-to-definition"] + +[profile.bench] +debug = 2 + +[profile.test] +opt-level = 3 + +[[example]] +name = "base64" +required-features = ["std"] + +[[test]] +name = "tests" +required-features = ["alloc"] + +[[test]] +name = "encode" +required-features = ["alloc"] + +[[bench]] +name = "benchmarks" +harness = false +required-features = ["std"] + +[dev-dependencies.clap] +version = "3.2.25" +features = ["derive"] + +[dev-dependencies.criterion] +version = "0.4.0" + +[dev-dependencies.once_cell] +version = "1" + +[dev-dependencies.rand] +version = "0.8.5" +features = ["small_rng"] + +[dev-dependencies.rstest] +version = "0.13.0" + +[dev-dependencies.rstest_reuse] +version = "0.6.0" + +[dev-dependencies.strum] +version = "0.25" +features = ["derive"] + +[features] +alloc = [] +default = ["std"] +std = ["alloc"] diff --git a/vendor/base64/LICENSE-APACHE b/vendor/base64/LICENSE-APACHE new file mode 100644 index 00000000000000..16fe87b06e802f --- /dev/null +++ b/vendor/base64/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/base64/LICENSE-MIT b/vendor/base64/LICENSE-MIT new file mode 100644 index 00000000000000..7bc10f80a0499e --- /dev/null +++ b/vendor/base64/LICENSE-MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Alice Maz + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/base64/README.md b/vendor/base64/README.md new file mode 100644 index 00000000000000..f566756d51fa49 --- /dev/null +++ b/vendor/base64/README.md @@ -0,0 +1,154 @@ +# [base64](https://crates.io/crates/base64) + +[![](https://img.shields.io/crates/v/base64.svg)](https://crates.io/crates/base64) [![Docs](https://docs.rs/base64/badge.svg)](https://docs.rs/base64) [![CircleCI](https://circleci.com/gh/marshallpierce/rust-base64/tree/master.svg?style=shield)](https://circleci.com/gh/marshallpierce/rust-base64/tree/master) [![codecov](https://codecov.io/gh/marshallpierce/rust-base64/branch/master/graph/badge.svg)](https://codecov.io/gh/marshallpierce/rust-base64) [![unsafe forbidden](https://img.shields.io/badge/unsafe-forbidden-success.svg)](https://github.com/rust-secure-code/safety-dance/) + + + +Made with CLion. Thanks to JetBrains for supporting open source! + +It's base64. What more could anyone want? + +This library's goals are to be *correct* and *fast*. It's thoroughly tested and widely used. It exposes functionality at +multiple levels of abstraction so you can choose the level of convenience vs performance that you want, +e.g. `decode_engine_slice` decodes into an existing `&mut [u8]` and is pretty fast (2.6GiB/s for a 3 KiB input), +whereas `decode_engine` allocates a new `Vec` and returns it, which might be more convenient in some cases, but is +slower (although still fast enough for almost any purpose) at 2.1 GiB/s. + +See the [docs](https://docs.rs/base64) for all the details. + +## FAQ + +### I need to decode base64 with whitespace/null bytes/other random things interspersed in it. What should I do? + +Remove non-base64 characters from your input before decoding. + +If you have a `Vec` of base64, [retain](https://doc.rust-lang.org/std/vec/struct.Vec.html#method.retain) can be used to +strip out whatever you need removed. + +If you have a `Read` (e.g. reading a file or network socket), there are various approaches. + +- Use [iter_read](https://crates.io/crates/iter-read) together with `Read`'s `bytes()` to filter out unwanted bytes. +- Implement `Read` with a `read()` impl that delegates to your actual `Read`, and then drops any bytes you don't want. + +### I need to line-wrap base64, e.g. for MIME/PEM. + +[line-wrap](https://crates.io/crates/line-wrap) does just that. + +### I want canonical base64 encoding/decoding. + +First, don't do this. You should no more expect Base64 to be canonical than you should expect compression algorithms to +produce canonical output across all usage in the wild (hint: they don't). +However, [people are drawn to their own destruction like moths to a flame](https://eprint.iacr.org/2022/361), so here we +are. + +There are two opportunities for non-canonical encoding (and thus, detection of the same during decoding): the final bits +of the last encoded token in two or three token suffixes, and the `=` token used to inflate the suffix to a full four +tokens. + +The trailing bits issue is unavoidable: with 6 bits available in each encoded token, 1 input byte takes 2 tokens, +with the second one having some bits unused. Same for two input bytes: 16 bits, but 3 tokens have 18 bits. Unless we +decide to stop shipping whole bytes around, we're stuck with those extra bits that a sneaky or buggy encoder might set +to 1 instead of 0. + +The `=` pad bytes, on the other hand, are entirely a self-own by the Base64 standard. They do not affect decoding other +than to provide an opportunity to say "that padding is incorrect". Exabytes of storage and transfer have no doubt been +wasted on pointless `=` bytes. Somehow we all seem to be quite comfortable with, say, hex-encoded data just stopping +when it's done rather than requiring a confirmation that the author of the encoder could count to four. Anyway, there +are two ways to make pad bytes predictable: require canonical padding to the next multiple of four bytes as per the RFC, +or, if you control all producers and consumers, save a few bytes by requiring no padding (especially applicable to the +url-safe alphabet). + +All `Engine` implementations must at a minimum support treating non-canonical padding of both types as an error, and +optionally may allow other behaviors. + +## Rust version compatibility + +The minimum supported Rust version is 1.48.0. + +# Contributing + +Contributions are very welcome. However, because this library is used widely, and in security-sensitive contexts, all +PRs will be carefully scrutinized. Beyond that, this sort of low level library simply needs to be 100% correct. Nobody +wants to chase bugs in encoding of any sort. + +All this means that it takes me a fair amount of time to review each PR, so it might take quite a while to carve out the +free time to give each PR the attention it deserves. I will get to everyone eventually! + +## Developing + +Benchmarks are in `benches/`. + +```bash +cargo bench +``` + +## no_std + +This crate supports no_std. By default the crate targets std via the `std` feature. You can deactivate +the `default-features` to target `core` instead. In that case you lose out on all the functionality revolving +around `std::io`, `std::error::Error`, and heap allocations. There is an additional `alloc` feature that you can activate +to bring back the support for heap allocations. + +## Profiling + +On Linux, you can use [perf](https://perf.wiki.kernel.org/index.php/Main_Page) for profiling. Then compile the +benchmarks with `cargo bench --no-run`. + +Run the benchmark binary with `perf` (shown here filtering to one particular benchmark, which will make the results +easier to read). `perf` is only available to the root user on most systems as it fiddles with event counters in your +CPU, so use `sudo`. We need to run the actual benchmark binary, hence the path into `target`. You can see the actual +full path with `cargo bench -v`; it will print out the commands it runs. If you use the exact path +that `bench` outputs, make sure you get the one that's for the benchmarks, not the tests. You may also want +to `cargo clean` so you have only one `benchmarks-` binary (they tend to accumulate). + +```bash +sudo perf record target/release/deps/benchmarks-* --bench decode_10mib_reuse +``` + +Then analyze the results, again with perf: + +```bash +sudo perf annotate -l +``` + +You'll see a bunch of interleaved rust source and assembly like this. The section with `lib.rs:327` is telling us that +4.02% of samples saw the `movzbl` aka bit shift as the active instruction. However, this percentage is not as exact as +it seems due to a phenomenon called *skid*. Basically, a consequence of how fancy modern CPUs are is that this sort of +instruction profiling is inherently inaccurate, especially in branch-heavy code. + +```text + lib.rs:322 0.70 : 10698: mov %rdi,%rax + 2.82 : 1069b: shr $0x38,%rax + : if morsel == decode_tables::INVALID_VALUE { + : bad_byte_index = input_index; + : break; + : }; + : accum = (morsel as u64) << 58; + lib.rs:327 4.02 : 1069f: movzbl (%r9,%rax,1),%r15d + : // fast loop of 8 bytes at a time + : while input_index < length_of_full_chunks { + : let mut accum: u64; + : + : let input_chunk = BigEndian::read_u64(&input_bytes[input_index..(input_index + 8)]); + : morsel = decode_table[(input_chunk >> 56) as usize]; + lib.rs:322 3.68 : 106a4: cmp $0xff,%r15 + : if morsel == decode_tables::INVALID_VALUE { + 0.00 : 106ab: je 1090e +``` + +## Fuzzing + +This uses [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz). See `fuzz/fuzzers` for the available fuzzing scripts. +To run, use an invocation like these: + +```bash +cargo +nightly fuzz run roundtrip +cargo +nightly fuzz run roundtrip_no_pad +cargo +nightly fuzz run roundtrip_random_config -- -max_len=10240 +cargo +nightly fuzz run decode_random +``` + +## License + +This project is dual-licensed under MIT and Apache 2.0. + diff --git a/vendor/base64/RELEASE-NOTES.md b/vendor/base64/RELEASE-NOTES.md new file mode 100644 index 00000000000000..91b68a6782095d --- /dev/null +++ b/vendor/base64/RELEASE-NOTES.md @@ -0,0 +1,271 @@ +# 0.22.1 + +- Correct the symbols used for the predefined `alphabet::BIN_HEX`. + +# 0.22.0 + +- `DecodeSliceError::OutputSliceTooSmall` is now conservative rather than precise. That is, the error will only occur if the decoded output _cannot_ fit, meaning that `Engine::decode_slice` can now be used with exactly-sized output slices. As part of this, `Engine::internal_decode` now returns `DecodeSliceError` instead of `DecodeError`, but that is not expected to affect any external callers. +- `DecodeError::InvalidLength` now refers specifically to the _number of valid symbols_ being invalid (i.e. `len % 4 == 1`), rather than just the number of input bytes. This avoids confusing scenarios when based on interpretation you could make a case for either `InvalidLength` or `InvalidByte` being appropriate. +- Decoding is somewhat faster (5-10%) + +# 0.21.7 + +- Support getting an alphabet's contents as a str via `Alphabet::as_str()` + +# 0.21.6 + +- Improved introductory documentation and example + +# 0.21.5 + +- Add `Debug` and `Clone` impls for the general purpose Engine + +# 0.21.4 + +- Make `encoded_len` `const`, allowing the creation of arrays sized to encode compile-time-known data lengths + +# 0.21.3 + +- Implement `source` instead of `cause` on Error types +- Roll back MSRV to 1.48.0 so Debian can continue to live in a time warp +- Slightly faster chunked encoding for short inputs +- Decrease binary size + +# 0.21.2 + +- Rollback MSRV to 1.57.0 -- only dev dependencies need 1.60, not the main code + +# 0.21.1 + +- Remove the possibility of panicking during decoded length calculations +- `DecoderReader` no longer sometimes erroneously ignores + padding [#226](https://github.com/marshallpierce/rust-base64/issues/226) + +## Breaking changes + +- `Engine.internal_decode` return type changed +- Update MSRV to 1.60.0 + +# 0.21.0 + +## Migration + +### Functions + +| < 0.20 function | 0.21 equivalent | +|-------------------------|-------------------------------------------------------------------------------------| +| `encode()` | `engine::general_purpose::STANDARD.encode()` or `prelude::BASE64_STANDARD.encode()` | +| `encode_config()` | `engine.encode()` | +| `encode_config_buf()` | `engine.encode_string()` | +| `encode_config_slice()` | `engine.encode_slice()` | +| `decode()` | `engine::general_purpose::STANDARD.decode()` or `prelude::BASE64_STANDARD.decode()` | +| `decode_config()` | `engine.decode()` | +| `decode_config_buf()` | `engine.decode_vec()` | +| `decode_config_slice()` | `engine.decode_slice()` | + +The short-lived 0.20 functions were the 0.13 functions with `config` replaced with `engine`. + +### Padding + +If applicable, use the preset engines `engine::STANDARD`, `engine::STANDARD_NO_PAD`, `engine::URL_SAFE`, +or `engine::URL_SAFE_NO_PAD`. +The `NO_PAD` ones require that padding is absent when decoding, and the others require that +canonical padding is present . + +If you need the < 0.20 behavior that did not care about padding, or want to recreate < 0.20.0's predefined `Config`s +precisely, see the following table. + +| 0.13.1 Config | 0.20.0+ alphabet | `encode_padding` | `decode_padding_mode` | +|-----------------|------------------|------------------|-----------------------| +| STANDARD | STANDARD | true | Indifferent | +| STANDARD_NO_PAD | STANDARD | false | Indifferent | +| URL_SAFE | URL_SAFE | true | Indifferent | +| URL_SAFE_NO_PAD | URL_SAFE | false | Indifferent | + +# 0.21.0-rc.1 + +- Restore the ability to decode into a slice of precisely the correct length with `Engine.decode_slice_unchecked`. +- Add `Engine` as a `pub use` in `prelude`. + +# 0.21.0-beta.2 + +## Breaking changes + +- Re-exports of preconfigured engines in `engine` are removed in favor of `base64::prelude::...` that are better suited + to those who wish to `use` the entire path to a name. + +# 0.21.0-beta.1 + +## Breaking changes + +- `FastPortable` was only meant to be an interim name, and shouldn't have shipped in 0.20. It is now `GeneralPurpose` to + make its intended usage more clear. +- `GeneralPurpose` and its config are now `pub use`'d in the `engine` module for convenience. +- Change a few `from()` functions to be `new()`. `from()` causes confusing compiler errors because of confusion + with `From::from`, and is a little misleading because some of those invocations are not very cheap as one would + usually expect from a `from` call. +- `encode*` and `decode*` top level functions are now methods on `Engine`. +- `DEFAULT_ENGINE` was replaced by `engine::general_purpose::STANDARD` +- Predefined engine consts `engine::general_purpose::{STANDARD, STANDARD_NO_PAD, URL_SAFE, URL_SAFE_NO_PAD}` + - These are `pub use`d into `engine` as well +- The `*_slice` decode/encode functions now return an error instead of panicking when the output slice is too small + - As part of this, there isn't now a public way to decode into a slice _exactly_ the size needed for inputs that + aren't multiples of 4 tokens. If adding up to 2 bytes to always be a multiple of 3 bytes for the decode buffer is + a problem, file an issue. + +## Other changes + +- `decoded_len_estimate()` is provided to make it easy to size decode buffers correctly. + +# 0.20.0 + +## Breaking changes + +- Update MSRV to 1.57.0 +- Decoding can now either ignore padding, require correct padding, or require no padding. The default is to require + correct padding. + - The `NO_PAD` config now requires that padding be absent when decoding. + +## 0.20.0-alpha.1 + +### Breaking changes + +- Extended the `Config` concept into the `Engine` abstraction, allowing the user to pick different encoding / decoding + implementations. + - What was formerly the only algorithm is now the `FastPortable` engine, so named because it's portable (works on + any CPU) and relatively fast. + - This opens the door to a portable constant-time + implementation ([#153](https://github.com/marshallpierce/rust-base64/pull/153), + presumably `ConstantTimePortable`?) for security-sensitive applications that need side-channel resistance, and + CPU-specific SIMD implementations for more speed. + - Standard base64 per the RFC is available via `DEFAULT_ENGINE`. To use different alphabets or other settings ( + padding, etc), create your own engine instance. +- `CharacterSet` is now `Alphabet` (per the RFC), and allows creating custom alphabets. The corresponding tables that + were previously code-generated are now built dynamically. +- Since there are already multiple breaking changes, various functions are renamed to be more consistent and + discoverable. +- MSRV is now 1.47.0 to allow various things to use `const fn`. +- `DecoderReader` now owns its inner reader, and can expose it via `into_inner()`. For symmetry, `EncoderWriter` can do + the same with its writer. +- `encoded_len` is now public so you can size encode buffers precisely. + +# 0.13.1 + +- More precise decode buffer sizing, avoiding unnecessary allocation in `decode_config`. + +# 0.13.0 + +- Config methods are const +- Added `EncoderStringWriter` to allow encoding directly to a String +- `EncoderWriter` now owns its delegate writer rather than keeping a reference to it (though refs still work) + - As a consequence, it is now possible to extract the delegate writer from an `EncoderWriter` via `finish()`, which + returns `Result` instead of `Result<()>`. If you were calling `finish()` explicitly, you will now need to + use `let _ = foo.finish()` instead of just `foo.finish()` to avoid a warning about the unused value. +- When decoding input that has both an invalid length and an invalid symbol as the last byte, `InvalidByte` will be + emitted instead of `InvalidLength` to make the problem more obvious. + +# 0.12.2 + +- Add `BinHex` alphabet + +# 0.12.1 + +- Add `Bcrypt` alphabet + +# 0.12.0 + +- A `Read` implementation (`DecoderReader`) to let users transparently decoded data from a b64 input source +- IMAP's modified b64 alphabet +- Relaxed type restrictions to just `AsRef<[ut8]>` for main `encode*`/`decode*` functions +- A minor performance improvement in encoding + +# 0.11.0 + +- Minimum rust version 1.34.0 +- `no_std` is now supported via the two new features `alloc` and `std`. + +# 0.10.1 + +- Minimum rust version 1.27.2 +- Fix bug in streaming encoding ([#90](https://github.com/marshallpierce/rust-base64/pull/90)): if the underlying writer + didn't write all the bytes given to it, the remaining bytes would not be retried later. See the docs + on `EncoderWriter::write`. +- Make it configurable whether or not to return an error when decoding detects excess trailing bits. + +# 0.10.0 + +- Remove line wrapping. Line wrapping was never a great conceptual fit in this library, and other features (streaming + encoding, etc) either couldn't support it or could support only special cases of it with a great increase in + complexity. Line wrapping has been pulled out into a [line-wrap](https://crates.io/crates/line-wrap) crate, so it's + still available if you need it. + - `Base64Display` creation no longer uses a `Result` because it can't fail, which means its helper methods for + common + configs that `unwrap()` for you are no longer needed +- Add a streaming encoder `Write` impl to transparently base64 as you write. +- Remove the remaining `unsafe` code. +- Remove whitespace stripping to simplify `no_std` support. No out of the box configs use it, and it's trivial to do + yourself if needed: `filter(|b| !b" \n\t\r\x0b\x0c".contains(b)`. +- Detect invalid trailing symbols when decoding and return an error rather than silently ignoring them. + +# 0.9.3 + +- Update safemem + +# 0.9.2 + +- Derive `Clone` for `DecodeError`. + +# 0.9.1 + +- Add support for `crypt(3)`'s base64 variant. + +# 0.9.0 + +- `decode_config_slice` function for no-allocation decoding, analogous to `encode_config_slice` +- Decode performance optimization + +# 0.8.0 + +- `encode_config_slice` function for no-allocation encoding + +# 0.7.0 + +- `STANDARD_NO_PAD` config +- `Base64Display` heap-free wrapper for use in format strings, etc + +# 0.6.0 + +- Decode performance improvements +- Use `unsafe` in fewer places +- Added fuzzers + +# 0.5.2 + +- Avoid usize overflow when calculating length +- Better line wrapping performance + +# 0.5.1 + +- Temporarily disable line wrapping +- Add Apache 2.0 license + +# 0.5.0 + +- MIME support, including configurable line endings and line wrapping +- Removed `decode_ws` +- Renamed `Base64Error` to `DecodeError` + +# 0.4.1 + +- Allow decoding a `AsRef<[u8]>` instead of just a `&str` + +# 0.4.0 + +- Configurable padding +- Encode performance improvements + +# 0.3.0 + +- Added encode/decode functions that do not allocate their own storage +- Decode performance improvements +- Extraneous padding bytes are no longer ignored. Now, an error will be returned. diff --git a/vendor/base64/benches/benchmarks.rs b/vendor/base64/benches/benchmarks.rs new file mode 100644 index 00000000000000..8f041854e1085a --- /dev/null +++ b/vendor/base64/benches/benchmarks.rs @@ -0,0 +1,238 @@ +#[macro_use] +extern crate criterion; + +use base64::{ + display, + engine::{general_purpose::STANDARD, Engine}, + write, +}; +use criterion::{black_box, Bencher, BenchmarkId, Criterion, Throughput}; +use rand::{Rng, SeedableRng}; +use std::io::{self, Read, Write}; + +fn do_decode_bench(b: &mut Bencher, &size: &usize) { + let mut v: Vec = Vec::with_capacity(size * 3 / 4); + fill(&mut v); + let encoded = STANDARD.encode(&v); + + b.iter(|| { + let orig = STANDARD.decode(&encoded); + black_box(&orig); + }); +} + +fn do_decode_bench_reuse_buf(b: &mut Bencher, &size: &usize) { + let mut v: Vec = Vec::with_capacity(size * 3 / 4); + fill(&mut v); + let encoded = STANDARD.encode(&v); + + let mut buf = Vec::new(); + b.iter(|| { + STANDARD.decode_vec(&encoded, &mut buf).unwrap(); + black_box(&buf); + buf.clear(); + }); +} + +fn do_decode_bench_slice(b: &mut Bencher, &size: &usize) { + let mut v: Vec = Vec::with_capacity(size * 3 / 4); + fill(&mut v); + let encoded = STANDARD.encode(&v); + + let mut buf = vec![0; size]; + b.iter(|| { + STANDARD.decode_slice(&encoded, &mut buf).unwrap(); + black_box(&buf); + }); +} + +fn do_decode_bench_stream(b: &mut Bencher, &size: &usize) { + let mut v: Vec = Vec::with_capacity(size * 3 / 4); + fill(&mut v); + let encoded = STANDARD.encode(&v); + + let mut buf = vec![0; size]; + buf.truncate(0); + + b.iter(|| { + let mut cursor = io::Cursor::new(&encoded[..]); + let mut decoder = base64::read::DecoderReader::new(&mut cursor, &STANDARD); + decoder.read_to_end(&mut buf).unwrap(); + buf.clear(); + black_box(&buf); + }); +} + +fn do_encode_bench(b: &mut Bencher, &size: &usize) { + let mut v: Vec = Vec::with_capacity(size); + fill(&mut v); + b.iter(|| { + let e = STANDARD.encode(&v); + black_box(&e); + }); +} + +fn do_encode_bench_display(b: &mut Bencher, &size: &usize) { + let mut v: Vec = Vec::with_capacity(size); + fill(&mut v); + b.iter(|| { + let e = format!("{}", display::Base64Display::new(&v, &STANDARD)); + black_box(&e); + }); +} + +fn do_encode_bench_reuse_buf(b: &mut Bencher, &size: &usize) { + let mut v: Vec = Vec::with_capacity(size); + fill(&mut v); + let mut buf = String::new(); + b.iter(|| { + STANDARD.encode_string(&v, &mut buf); + buf.clear(); + }); +} + +fn do_encode_bench_slice(b: &mut Bencher, &size: &usize) { + let mut v: Vec = Vec::with_capacity(size); + fill(&mut v); + // conservative estimate of encoded size + let mut buf = vec![0; v.len() * 2]; + b.iter(|| STANDARD.encode_slice(&v, &mut buf).unwrap()); +} + +fn do_encode_bench_stream(b: &mut Bencher, &size: &usize) { + let mut v: Vec = Vec::with_capacity(size); + fill(&mut v); + let mut buf = Vec::with_capacity(size * 2); + + b.iter(|| { + buf.clear(); + let mut stream_enc = write::EncoderWriter::new(&mut buf, &STANDARD); + stream_enc.write_all(&v).unwrap(); + stream_enc.flush().unwrap(); + }); +} + +fn do_encode_bench_string_stream(b: &mut Bencher, &size: &usize) { + let mut v: Vec = Vec::with_capacity(size); + fill(&mut v); + + b.iter(|| { + let mut stream_enc = write::EncoderStringWriter::new(&STANDARD); + stream_enc.write_all(&v).unwrap(); + stream_enc.flush().unwrap(); + let _ = stream_enc.into_inner(); + }); +} + +fn do_encode_bench_string_reuse_buf_stream(b: &mut Bencher, &size: &usize) { + let mut v: Vec = Vec::with_capacity(size); + fill(&mut v); + + let mut buf = String::new(); + b.iter(|| { + buf.clear(); + let mut stream_enc = write::EncoderStringWriter::from_consumer(&mut buf, &STANDARD); + stream_enc.write_all(&v).unwrap(); + stream_enc.flush().unwrap(); + let _ = stream_enc.into_inner(); + }); +} + +fn fill(v: &mut Vec) { + let cap = v.capacity(); + // weak randomness is plenty; we just want to not be completely friendly to the branch predictor + let mut r = rand::rngs::SmallRng::from_entropy(); + while v.len() < cap { + v.push(r.gen::()); + } +} + +const BYTE_SIZES: [usize; 5] = [3, 50, 100, 500, 3 * 1024]; + +// Benchmarks over these byte sizes take longer so we will run fewer samples to +// keep the benchmark runtime reasonable. +const LARGE_BYTE_SIZES: [usize; 3] = [3 * 1024 * 1024, 10 * 1024 * 1024, 30 * 1024 * 1024]; + +fn encode_benchmarks(c: &mut Criterion, label: &str, byte_sizes: &[usize]) { + let mut group = c.benchmark_group(label); + group + .warm_up_time(std::time::Duration::from_millis(500)) + .measurement_time(std::time::Duration::from_secs(3)); + + for size in byte_sizes { + group + .throughput(Throughput::Bytes(*size as u64)) + .bench_with_input(BenchmarkId::new("encode", size), size, do_encode_bench) + .bench_with_input( + BenchmarkId::new("encode_display", size), + size, + do_encode_bench_display, + ) + .bench_with_input( + BenchmarkId::new("encode_reuse_buf", size), + size, + do_encode_bench_reuse_buf, + ) + .bench_with_input( + BenchmarkId::new("encode_slice", size), + size, + do_encode_bench_slice, + ) + .bench_with_input( + BenchmarkId::new("encode_reuse_buf_stream", size), + size, + do_encode_bench_stream, + ) + .bench_with_input( + BenchmarkId::new("encode_string_stream", size), + size, + do_encode_bench_string_stream, + ) + .bench_with_input( + BenchmarkId::new("encode_string_reuse_buf_stream", size), + size, + do_encode_bench_string_reuse_buf_stream, + ); + } + + group.finish(); +} + +fn decode_benchmarks(c: &mut Criterion, label: &str, byte_sizes: &[usize]) { + let mut group = c.benchmark_group(label); + + for size in byte_sizes { + group + .warm_up_time(std::time::Duration::from_millis(500)) + .measurement_time(std::time::Duration::from_secs(3)) + .throughput(Throughput::Bytes(*size as u64)) + .bench_with_input(BenchmarkId::new("decode", size), size, do_decode_bench) + .bench_with_input( + BenchmarkId::new("decode_reuse_buf", size), + size, + do_decode_bench_reuse_buf, + ) + .bench_with_input( + BenchmarkId::new("decode_slice", size), + size, + do_decode_bench_slice, + ) + .bench_with_input( + BenchmarkId::new("decode_stream", size), + size, + do_decode_bench_stream, + ); + } + + group.finish(); +} + +fn bench(c: &mut Criterion) { + encode_benchmarks(c, "encode_small_input", &BYTE_SIZES[..]); + encode_benchmarks(c, "encode_large_input", &LARGE_BYTE_SIZES[..]); + decode_benchmarks(c, "decode_small_input", &BYTE_SIZES[..]); + decode_benchmarks(c, "decode_large_input", &LARGE_BYTE_SIZES[..]); +} + +criterion_group!(benches, bench); +criterion_main!(benches); diff --git a/vendor/base64/clippy.toml b/vendor/base64/clippy.toml new file mode 100644 index 00000000000000..11d46a73f3328a --- /dev/null +++ b/vendor/base64/clippy.toml @@ -0,0 +1 @@ +msrv = "1.48.0" diff --git a/vendor/base64/examples/base64.rs b/vendor/base64/examples/base64.rs new file mode 100644 index 00000000000000..0c8aa3fe76c50f --- /dev/null +++ b/vendor/base64/examples/base64.rs @@ -0,0 +1,81 @@ +use std::fs::File; +use std::io::{self, Read}; +use std::path::PathBuf; +use std::process; + +use base64::{alphabet, engine, read, write}; +use clap::Parser; + +#[derive(Clone, Debug, Parser, strum::EnumString, Default)] +#[strum(serialize_all = "kebab-case")] +enum Alphabet { + #[default] + Standard, + UrlSafe, +} + +/// Base64 encode or decode FILE (or standard input), to standard output. +#[derive(Debug, Parser)] +struct Opt { + /// Decode the base64-encoded input (default: encode the input as base64). + #[structopt(short = 'd', long = "decode")] + decode: bool, + + /// The encoding alphabet: "standard" (default) or "url-safe". + #[structopt(long = "alphabet")] + alphabet: Option, + + /// Omit padding characters while encoding, and reject them while decoding. + #[structopt(short = 'p', long = "no-padding")] + no_padding: bool, + + /// The file to encode or decode. + #[structopt(name = "FILE", parse(from_os_str))] + file: Option, +} + +fn main() { + let opt = Opt::parse(); + let stdin; + let mut input: Box = match opt.file { + None => { + stdin = io::stdin(); + Box::new(stdin.lock()) + } + Some(ref f) if f.as_os_str() == "-" => { + stdin = io::stdin(); + Box::new(stdin.lock()) + } + Some(f) => Box::new(File::open(f).unwrap()), + }; + + let alphabet = opt.alphabet.unwrap_or_default(); + let engine = engine::GeneralPurpose::new( + &match alphabet { + Alphabet::Standard => alphabet::STANDARD, + Alphabet::UrlSafe => alphabet::URL_SAFE, + }, + match opt.no_padding { + true => engine::general_purpose::NO_PAD, + false => engine::general_purpose::PAD, + }, + ); + + let stdout = io::stdout(); + let mut stdout = stdout.lock(); + let r = if opt.decode { + let mut decoder = read::DecoderReader::new(&mut input, &engine); + io::copy(&mut decoder, &mut stdout) + } else { + let mut encoder = write::EncoderWriter::new(&mut stdout, &engine); + io::copy(&mut input, &mut encoder) + }; + if let Err(e) = r { + eprintln!( + "Base64 {} failed with {}", + if opt.decode { "decode" } else { "encode" }, + e + ); + process::exit(1); + } +} diff --git a/vendor/base64/icon_CLion.svg b/vendor/base64/icon_CLion.svg new file mode 100644 index 00000000000000..e9edb0445ea387 --- /dev/null +++ b/vendor/base64/icon_CLion.svg @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + icon_CLion + + + + + + + + + + + + + diff --git a/vendor/base64/src/alphabet.rs b/vendor/base64/src/alphabet.rs new file mode 100644 index 00000000000000..b07bfdfe65823a --- /dev/null +++ b/vendor/base64/src/alphabet.rs @@ -0,0 +1,285 @@ +//! Provides [Alphabet] and constants for alphabets commonly used in the wild. + +use crate::PAD_BYTE; +use core::{convert, fmt}; +#[cfg(any(feature = "std", test))] +use std::error; + +const ALPHABET_SIZE: usize = 64; + +/// An alphabet defines the 64 ASCII characters (symbols) used for base64. +/// +/// Common alphabets are provided as constants, and custom alphabets +/// can be made via `from_str` or the `TryFrom` implementation. +/// +/// # Examples +/// +/// Building and using a custom Alphabet: +/// +/// ``` +/// let custom = base64::alphabet::Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/").unwrap(); +/// +/// let engine = base64::engine::GeneralPurpose::new( +/// &custom, +/// base64::engine::general_purpose::PAD); +/// ``` +/// +/// Building a const: +/// +/// ``` +/// use base64::alphabet::Alphabet; +/// +/// static CUSTOM: Alphabet = { +/// // Result::unwrap() isn't const yet, but panic!() is OK +/// match Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/") { +/// Ok(x) => x, +/// Err(_) => panic!("creation of alphabet failed"), +/// } +/// }; +/// ``` +/// +/// Building lazily: +/// +/// ``` +/// use base64::{ +/// alphabet::Alphabet, +/// engine::{general_purpose::GeneralPurpose, GeneralPurposeConfig}, +/// }; +/// use once_cell::sync::Lazy; +/// +/// static CUSTOM: Lazy = Lazy::new(|| +/// Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/").unwrap() +/// ); +/// ``` +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Alphabet { + pub(crate) symbols: [u8; ALPHABET_SIZE], +} + +impl Alphabet { + /// Performs no checks so that it can be const. + /// Used only for known-valid strings. + const fn from_str_unchecked(alphabet: &str) -> Self { + let mut symbols = [0_u8; ALPHABET_SIZE]; + let source_bytes = alphabet.as_bytes(); + + // a way to copy that's allowed in const fn + let mut index = 0; + while index < ALPHABET_SIZE { + symbols[index] = source_bytes[index]; + index += 1; + } + + Self { symbols } + } + + /// Create an `Alphabet` from a string of 64 unique printable ASCII bytes. + /// + /// The `=` byte is not allowed as it is used for padding. + pub const fn new(alphabet: &str) -> Result { + let bytes = alphabet.as_bytes(); + if bytes.len() != ALPHABET_SIZE { + return Err(ParseAlphabetError::InvalidLength); + } + + { + let mut index = 0; + while index < ALPHABET_SIZE { + let byte = bytes[index]; + + // must be ascii printable. 127 (DEL) is commonly considered printable + // for some reason but clearly unsuitable for base64. + if !(byte >= 32_u8 && byte <= 126_u8) { + return Err(ParseAlphabetError::UnprintableByte(byte)); + } + // = is assumed to be padding, so cannot be used as a symbol + if byte == PAD_BYTE { + return Err(ParseAlphabetError::ReservedByte(byte)); + } + + // Check for duplicates while staying within what const allows. + // It's n^2, but only over 64 hot bytes, and only once, so it's likely in the single digit + // microsecond range. + + let mut probe_index = 0; + while probe_index < ALPHABET_SIZE { + if probe_index == index { + probe_index += 1; + continue; + } + + let probe_byte = bytes[probe_index]; + + if byte == probe_byte { + return Err(ParseAlphabetError::DuplicatedByte(byte)); + } + + probe_index += 1; + } + + index += 1; + } + } + + Ok(Self::from_str_unchecked(alphabet)) + } + + /// Create a `&str` from the symbols in the `Alphabet` + pub fn as_str(&self) -> &str { + core::str::from_utf8(&self.symbols).unwrap() + } +} + +impl convert::TryFrom<&str> for Alphabet { + type Error = ParseAlphabetError; + + fn try_from(value: &str) -> Result { + Self::new(value) + } +} + +/// Possible errors when constructing an [Alphabet] from a `str`. +#[derive(Debug, Eq, PartialEq)] +pub enum ParseAlphabetError { + /// Alphabets must be 64 ASCII bytes + InvalidLength, + /// All bytes must be unique + DuplicatedByte(u8), + /// All bytes must be printable (in the range `[32, 126]`). + UnprintableByte(u8), + /// `=` cannot be used + ReservedByte(u8), +} + +impl fmt::Display for ParseAlphabetError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::InvalidLength => write!(f, "Invalid length - must be 64 bytes"), + Self::DuplicatedByte(b) => write!(f, "Duplicated byte: {:#04x}", b), + Self::UnprintableByte(b) => write!(f, "Unprintable byte: {:#04x}", b), + Self::ReservedByte(b) => write!(f, "Reserved byte: {:#04x}", b), + } + } +} + +#[cfg(any(feature = "std", test))] +impl error::Error for ParseAlphabetError {} + +/// The standard alphabet (with `+` and `/`) specified in [RFC 4648][]. +/// +/// [RFC 4648]: https://datatracker.ietf.org/doc/html/rfc4648#section-4 +pub const STANDARD: Alphabet = Alphabet::from_str_unchecked( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", +); + +/// The URL-safe alphabet (with `-` and `_`) specified in [RFC 4648][]. +/// +/// [RFC 4648]: https://datatracker.ietf.org/doc/html/rfc4648#section-5 +pub const URL_SAFE: Alphabet = Alphabet::from_str_unchecked( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_", +); + +/// The `crypt(3)` alphabet (with `.` and `/` as the _first_ two characters). +/// +/// Not standardized, but folk wisdom on the net asserts that this alphabet is what crypt uses. +pub const CRYPT: Alphabet = Alphabet::from_str_unchecked( + "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", +); + +/// The bcrypt alphabet. +pub const BCRYPT: Alphabet = Alphabet::from_str_unchecked( + "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", +); + +/// The alphabet used in IMAP-modified UTF-7 (with `+` and `,`). +/// +/// See [RFC 3501](https://tools.ietf.org/html/rfc3501#section-5.1.3) +pub const IMAP_MUTF7: Alphabet = Alphabet::from_str_unchecked( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,", +); + +/// The alphabet used in BinHex 4.0 files. +/// +/// See [BinHex 4.0 Definition](http://files.stairways.com/other/binhex-40-specs-info.txt) +pub const BIN_HEX: Alphabet = Alphabet::from_str_unchecked( + "!\"#$%&'()*+,-012345689@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr", +); + +#[cfg(test)] +mod tests { + use crate::alphabet::*; + use core::convert::TryFrom as _; + + #[test] + fn detects_duplicate_start() { + assert_eq!( + ParseAlphabetError::DuplicatedByte(b'A'), + Alphabet::new("AACDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/") + .unwrap_err() + ); + } + + #[test] + fn detects_duplicate_end() { + assert_eq!( + ParseAlphabetError::DuplicatedByte(b'/'), + Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789//") + .unwrap_err() + ); + } + + #[test] + fn detects_duplicate_middle() { + assert_eq!( + ParseAlphabetError::DuplicatedByte(b'Z'), + Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZZbcdefghijklmnopqrstuvwxyz0123456789+/") + .unwrap_err() + ); + } + + #[test] + fn detects_length() { + assert_eq!( + ParseAlphabetError::InvalidLength, + Alphabet::new( + "xxxxxxxxxABCDEFGHIJKLMNOPQRSTUVWXYZZbcdefghijklmnopqrstuvwxyz0123456789+/", + ) + .unwrap_err() + ); + } + + #[test] + fn detects_padding() { + assert_eq!( + ParseAlphabetError::ReservedByte(b'='), + Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+=") + .unwrap_err() + ); + } + + #[test] + fn detects_unprintable() { + // form feed + assert_eq!( + ParseAlphabetError::UnprintableByte(0xc), + Alphabet::new("\x0cBCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/") + .unwrap_err() + ); + } + + #[test] + fn same_as_unchecked() { + assert_eq!( + STANDARD, + Alphabet::try_from("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/") + .unwrap() + ); + } + + #[test] + fn str_same_as_input() { + let alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + let a = Alphabet::try_from(alphabet).unwrap(); + assert_eq!(alphabet, a.as_str()) + } +} diff --git a/vendor/base64/src/chunked_encoder.rs b/vendor/base64/src/chunked_encoder.rs new file mode 100644 index 00000000000000..817b339f3b8151 --- /dev/null +++ b/vendor/base64/src/chunked_encoder.rs @@ -0,0 +1,172 @@ +use crate::{ + encode::add_padding, + engine::{Config, Engine}, +}; +#[cfg(any(feature = "alloc", test))] +use alloc::string::String; +#[cfg(any(feature = "alloc", test))] +use core::str; + +/// The output mechanism for ChunkedEncoder's encoded bytes. +pub trait Sink { + type Error; + + /// Handle a chunk of encoded base64 data (as UTF-8 bytes) + fn write_encoded_bytes(&mut self, encoded: &[u8]) -> Result<(), Self::Error>; +} + +/// A base64 encoder that emits encoded bytes in chunks without heap allocation. +pub struct ChunkedEncoder<'e, E: Engine + ?Sized> { + engine: &'e E, +} + +impl<'e, E: Engine + ?Sized> ChunkedEncoder<'e, E> { + pub fn new(engine: &'e E) -> ChunkedEncoder<'e, E> { + ChunkedEncoder { engine } + } + + pub fn encode(&self, bytes: &[u8], sink: &mut S) -> Result<(), S::Error> { + const BUF_SIZE: usize = 1024; + const CHUNK_SIZE: usize = BUF_SIZE / 4 * 3; + + let mut buf = [0; BUF_SIZE]; + for chunk in bytes.chunks(CHUNK_SIZE) { + let mut len = self.engine.internal_encode(chunk, &mut buf); + if chunk.len() != CHUNK_SIZE && self.engine.config().encode_padding() { + // Final, potentially partial, chunk. + // Only need to consider if padding is needed on a partial chunk since full chunk + // is a multiple of 3, which therefore won't be padded. + // Pad output to multiple of four bytes if required by config. + len += add_padding(len, &mut buf[len..]); + } + sink.write_encoded_bytes(&buf[..len])?; + } + + Ok(()) + } +} + +// A really simple sink that just appends to a string +#[cfg(any(feature = "alloc", test))] +pub(crate) struct StringSink<'a> { + string: &'a mut String, +} + +#[cfg(any(feature = "alloc", test))] +impl<'a> StringSink<'a> { + pub(crate) fn new(s: &mut String) -> StringSink { + StringSink { string: s } + } +} + +#[cfg(any(feature = "alloc", test))] +impl<'a> Sink for StringSink<'a> { + type Error = (); + + fn write_encoded_bytes(&mut self, s: &[u8]) -> Result<(), Self::Error> { + self.string.push_str(str::from_utf8(s).unwrap()); + + Ok(()) + } +} + +#[cfg(test)] +pub mod tests { + use rand::{ + distributions::{Distribution, Uniform}, + Rng, SeedableRng, + }; + + use crate::{ + alphabet::STANDARD, + engine::general_purpose::{GeneralPurpose, GeneralPurposeConfig, PAD}, + tests::random_engine, + }; + + use super::*; + + #[test] + fn chunked_encode_empty() { + assert_eq!("", chunked_encode_str(&[], PAD)); + } + + #[test] + fn chunked_encode_intermediate_fast_loop() { + // > 8 bytes input, will enter the pretty fast loop + assert_eq!("Zm9vYmFyYmF6cXV4", chunked_encode_str(b"foobarbazqux", PAD)); + } + + #[test] + fn chunked_encode_fast_loop() { + // > 32 bytes input, will enter the uber fast loop + assert_eq!( + "Zm9vYmFyYmF6cXV4cXV1eGNvcmdlZ3JhdWx0Z2FycGx5eg==", + chunked_encode_str(b"foobarbazquxquuxcorgegraultgarplyz", PAD) + ); + } + + #[test] + fn chunked_encode_slow_loop_only() { + // < 8 bytes input, slow loop only + assert_eq!("Zm9vYmFy", chunked_encode_str(b"foobar", PAD)); + } + + #[test] + fn chunked_encode_matches_normal_encode_random_string_sink() { + let helper = StringSinkTestHelper; + chunked_encode_matches_normal_encode_random(&helper); + } + + pub fn chunked_encode_matches_normal_encode_random(sink_test_helper: &S) { + let mut input_buf: Vec = Vec::new(); + let mut output_buf = String::new(); + let mut rng = rand::rngs::SmallRng::from_entropy(); + let input_len_range = Uniform::new(1, 10_000); + + for _ in 0..20_000 { + input_buf.clear(); + output_buf.clear(); + + let buf_len = input_len_range.sample(&mut rng); + for _ in 0..buf_len { + input_buf.push(rng.gen()); + } + + let engine = random_engine(&mut rng); + + let chunk_encoded_string = sink_test_helper.encode_to_string(&engine, &input_buf); + engine.encode_string(&input_buf, &mut output_buf); + + assert_eq!(output_buf, chunk_encoded_string, "input len={}", buf_len); + } + } + + fn chunked_encode_str(bytes: &[u8], config: GeneralPurposeConfig) -> String { + let mut s = String::new(); + + let mut sink = StringSink::new(&mut s); + let engine = GeneralPurpose::new(&STANDARD, config); + let encoder = ChunkedEncoder::new(&engine); + encoder.encode(bytes, &mut sink).unwrap(); + + s + } + + // An abstraction around sinks so that we can have tests that easily to any sink implementation + pub trait SinkTestHelper { + fn encode_to_string(&self, engine: &E, bytes: &[u8]) -> String; + } + + struct StringSinkTestHelper; + + impl SinkTestHelper for StringSinkTestHelper { + fn encode_to_string(&self, engine: &E, bytes: &[u8]) -> String { + let encoder = ChunkedEncoder::new(engine); + let mut s = String::new(); + let mut sink = StringSink::new(&mut s); + encoder.encode(bytes, &mut sink).unwrap(); + + s + } + } +} diff --git a/vendor/base64/src/decode.rs b/vendor/base64/src/decode.rs new file mode 100644 index 00000000000000..6df8abad2ca202 --- /dev/null +++ b/vendor/base64/src/decode.rs @@ -0,0 +1,386 @@ +use crate::engine::{general_purpose::STANDARD, DecodeEstimate, Engine}; +#[cfg(any(feature = "alloc", test))] +use alloc::vec::Vec; +use core::fmt; +#[cfg(any(feature = "std", test))] +use std::error; + +/// Errors that can occur while decoding. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum DecodeError { + /// An invalid byte was found in the input. The offset and offending byte are provided. + /// + /// Padding characters (`=`) interspersed in the encoded form are invalid, as they may only + /// be present as the last 0-2 bytes of input. + /// + /// This error may also indicate that extraneous trailing input bytes are present, causing + /// otherwise valid padding to no longer be the last bytes of input. + InvalidByte(usize, u8), + /// The length of the input, as measured in valid base64 symbols, is invalid. + /// There must be 2-4 symbols in the last input quad. + InvalidLength(usize), + /// The last non-padding input symbol's encoded 6 bits have nonzero bits that will be discarded. + /// This is indicative of corrupted or truncated Base64. + /// Unlike [DecodeError::InvalidByte], which reports symbols that aren't in the alphabet, + /// this error is for symbols that are in the alphabet but represent nonsensical encodings. + InvalidLastSymbol(usize, u8), + /// The nature of the padding was not as configured: absent or incorrect when it must be + /// canonical, or present when it must be absent, etc. + InvalidPadding, +} + +impl fmt::Display for DecodeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Self::InvalidByte(index, byte) => { + write!(f, "Invalid symbol {}, offset {}.", byte, index) + } + Self::InvalidLength(len) => write!(f, "Invalid input length: {}", len), + Self::InvalidLastSymbol(index, byte) => { + write!(f, "Invalid last symbol {}, offset {}.", byte, index) + } + Self::InvalidPadding => write!(f, "Invalid padding"), + } + } +} + +#[cfg(any(feature = "std", test))] +impl error::Error for DecodeError {} + +/// Errors that can occur while decoding into a slice. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum DecodeSliceError { + /// A [DecodeError] occurred + DecodeError(DecodeError), + /// The provided slice is too small. + OutputSliceTooSmall, +} + +impl fmt::Display for DecodeSliceError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::DecodeError(e) => write!(f, "DecodeError: {}", e), + Self::OutputSliceTooSmall => write!(f, "Output slice too small"), + } + } +} + +#[cfg(any(feature = "std", test))] +impl error::Error for DecodeSliceError { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + match self { + DecodeSliceError::DecodeError(e) => Some(e), + DecodeSliceError::OutputSliceTooSmall => None, + } + } +} + +impl From for DecodeSliceError { + fn from(e: DecodeError) -> Self { + DecodeSliceError::DecodeError(e) + } +} + +/// Decode base64 using the [`STANDARD` engine](STANDARD). +/// +/// See [Engine::decode]. +#[deprecated(since = "0.21.0", note = "Use Engine::decode")] +#[cfg(any(feature = "alloc", test))] +pub fn decode>(input: T) -> Result, DecodeError> { + STANDARD.decode(input) +} + +/// Decode from string reference as octets using the specified [Engine]. +/// +/// See [Engine::decode]. +///Returns a `Result` containing a `Vec`. +#[deprecated(since = "0.21.0", note = "Use Engine::decode")] +#[cfg(any(feature = "alloc", test))] +pub fn decode_engine>( + input: T, + engine: &E, +) -> Result, DecodeError> { + engine.decode(input) +} + +/// Decode from string reference as octets. +/// +/// See [Engine::decode_vec]. +#[cfg(any(feature = "alloc", test))] +#[deprecated(since = "0.21.0", note = "Use Engine::decode_vec")] +pub fn decode_engine_vec>( + input: T, + buffer: &mut Vec, + engine: &E, +) -> Result<(), DecodeError> { + engine.decode_vec(input, buffer) +} + +/// Decode the input into the provided output slice. +/// +/// See [Engine::decode_slice]. +#[deprecated(since = "0.21.0", note = "Use Engine::decode_slice")] +pub fn decode_engine_slice>( + input: T, + output: &mut [u8], + engine: &E, +) -> Result { + engine.decode_slice(input, output) +} + +/// Returns a conservative estimate of the decoded size of `encoded_len` base64 symbols (rounded up +/// to the next group of 3 decoded bytes). +/// +/// The resulting length will be a safe choice for the size of a decode buffer, but may have up to +/// 2 trailing bytes that won't end up being needed. +/// +/// # Examples +/// +/// ``` +/// use base64::decoded_len_estimate; +/// +/// assert_eq!(3, decoded_len_estimate(1)); +/// assert_eq!(3, decoded_len_estimate(2)); +/// assert_eq!(3, decoded_len_estimate(3)); +/// assert_eq!(3, decoded_len_estimate(4)); +/// // start of the next quad of encoded symbols +/// assert_eq!(6, decoded_len_estimate(5)); +/// ``` +pub fn decoded_len_estimate(encoded_len: usize) -> usize { + STANDARD + .internal_decoded_len_estimate(encoded_len) + .decoded_len_estimate() +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + alphabet, + engine::{general_purpose, Config, GeneralPurpose}, + tests::{assert_encode_sanity, random_engine}, + }; + use rand::{ + distributions::{Distribution, Uniform}, + Rng, SeedableRng, + }; + + #[test] + fn decode_into_nonempty_vec_doesnt_clobber_existing_prefix() { + let mut orig_data = Vec::new(); + let mut encoded_data = String::new(); + let mut decoded_with_prefix = Vec::new(); + let mut decoded_without_prefix = Vec::new(); + let mut prefix = Vec::new(); + + let prefix_len_range = Uniform::new(0, 1000); + let input_len_range = Uniform::new(0, 1000); + + let mut rng = rand::rngs::SmallRng::from_entropy(); + + for _ in 0..10_000 { + orig_data.clear(); + encoded_data.clear(); + decoded_with_prefix.clear(); + decoded_without_prefix.clear(); + prefix.clear(); + + let input_len = input_len_range.sample(&mut rng); + + for _ in 0..input_len { + orig_data.push(rng.gen()); + } + + let engine = random_engine(&mut rng); + engine.encode_string(&orig_data, &mut encoded_data); + assert_encode_sanity(&encoded_data, engine.config().encode_padding(), input_len); + + let prefix_len = prefix_len_range.sample(&mut rng); + + // fill the buf with a prefix + for _ in 0..prefix_len { + prefix.push(rng.gen()); + } + + decoded_with_prefix.resize(prefix_len, 0); + decoded_with_prefix.copy_from_slice(&prefix); + + // decode into the non-empty buf + engine + .decode_vec(&encoded_data, &mut decoded_with_prefix) + .unwrap(); + // also decode into the empty buf + engine + .decode_vec(&encoded_data, &mut decoded_without_prefix) + .unwrap(); + + assert_eq!( + prefix_len + decoded_without_prefix.len(), + decoded_with_prefix.len() + ); + assert_eq!(orig_data, decoded_without_prefix); + + // append plain decode onto prefix + prefix.append(&mut decoded_without_prefix); + + assert_eq!(prefix, decoded_with_prefix); + } + } + + #[test] + fn decode_slice_doesnt_clobber_existing_prefix_or_suffix() { + do_decode_slice_doesnt_clobber_existing_prefix_or_suffix(|e, input, output| { + e.decode_slice(input, output).unwrap() + }) + } + + #[test] + fn decode_slice_unchecked_doesnt_clobber_existing_prefix_or_suffix() { + do_decode_slice_doesnt_clobber_existing_prefix_or_suffix(|e, input, output| { + e.decode_slice_unchecked(input, output).unwrap() + }) + } + + #[test] + fn decode_engine_estimation_works_for_various_lengths() { + let engine = GeneralPurpose::new(&alphabet::STANDARD, general_purpose::NO_PAD); + for num_prefix_quads in 0..100 { + for suffix in &["AA", "AAA", "AAAA"] { + let mut prefix = "AAAA".repeat(num_prefix_quads); + prefix.push_str(suffix); + // make sure no overflow (and thus a panic) occurs + let res = engine.decode(prefix); + assert!(res.is_ok()); + } + } + } + + #[test] + fn decode_slice_output_length_errors() { + for num_quads in 1..100 { + let input = "AAAA".repeat(num_quads); + let mut vec = vec![0; (num_quads - 1) * 3]; + assert_eq!( + DecodeSliceError::OutputSliceTooSmall, + STANDARD.decode_slice(&input, &mut vec).unwrap_err() + ); + vec.push(0); + assert_eq!( + DecodeSliceError::OutputSliceTooSmall, + STANDARD.decode_slice(&input, &mut vec).unwrap_err() + ); + vec.push(0); + assert_eq!( + DecodeSliceError::OutputSliceTooSmall, + STANDARD.decode_slice(&input, &mut vec).unwrap_err() + ); + vec.push(0); + // now it works + assert_eq!( + num_quads * 3, + STANDARD.decode_slice(&input, &mut vec).unwrap() + ); + } + } + + fn do_decode_slice_doesnt_clobber_existing_prefix_or_suffix< + F: Fn(&GeneralPurpose, &[u8], &mut [u8]) -> usize, + >( + call_decode: F, + ) { + let mut orig_data = Vec::new(); + let mut encoded_data = String::new(); + let mut decode_buf = Vec::new(); + let mut decode_buf_copy: Vec = Vec::new(); + + let input_len_range = Uniform::new(0, 1000); + + let mut rng = rand::rngs::SmallRng::from_entropy(); + + for _ in 0..10_000 { + orig_data.clear(); + encoded_data.clear(); + decode_buf.clear(); + decode_buf_copy.clear(); + + let input_len = input_len_range.sample(&mut rng); + + for _ in 0..input_len { + orig_data.push(rng.gen()); + } + + let engine = random_engine(&mut rng); + engine.encode_string(&orig_data, &mut encoded_data); + assert_encode_sanity(&encoded_data, engine.config().encode_padding(), input_len); + + // fill the buffer with random garbage, long enough to have some room before and after + for _ in 0..5000 { + decode_buf.push(rng.gen()); + } + + // keep a copy for later comparison + decode_buf_copy.extend(decode_buf.iter()); + + let offset = 1000; + + // decode into the non-empty buf + let decode_bytes_written = + call_decode(&engine, encoded_data.as_bytes(), &mut decode_buf[offset..]); + + assert_eq!(orig_data.len(), decode_bytes_written); + assert_eq!( + orig_data, + &decode_buf[offset..(offset + decode_bytes_written)] + ); + assert_eq!(&decode_buf_copy[0..offset], &decode_buf[0..offset]); + assert_eq!( + &decode_buf_copy[offset + decode_bytes_written..], + &decode_buf[offset + decode_bytes_written..] + ); + } + } +} + +#[allow(deprecated)] +#[cfg(test)] +mod coverage_gaming { + use super::*; + use std::error::Error; + + #[test] + fn decode_error() { + let _ = format!("{:?}", DecodeError::InvalidPadding.clone()); + let _ = format!( + "{} {} {} {}", + DecodeError::InvalidByte(0, 0), + DecodeError::InvalidLength(0), + DecodeError::InvalidLastSymbol(0, 0), + DecodeError::InvalidPadding, + ); + } + + #[test] + fn decode_slice_error() { + let _ = format!("{:?}", DecodeSliceError::OutputSliceTooSmall.clone()); + let _ = format!( + "{} {}", + DecodeSliceError::OutputSliceTooSmall, + DecodeSliceError::DecodeError(DecodeError::InvalidPadding) + ); + let _ = DecodeSliceError::OutputSliceTooSmall.source(); + let _ = DecodeSliceError::DecodeError(DecodeError::InvalidPadding).source(); + } + + #[test] + fn deprecated_fns() { + let _ = decode(""); + let _ = decode_engine("", &crate::prelude::BASE64_STANDARD); + let _ = decode_engine_vec("", &mut Vec::new(), &crate::prelude::BASE64_STANDARD); + let _ = decode_engine_slice("", &mut [], &crate::prelude::BASE64_STANDARD); + } + + #[test] + fn decoded_len_est() { + assert_eq!(3, decoded_len_estimate(4)); + } +} diff --git a/vendor/base64/src/display.rs b/vendor/base64/src/display.rs new file mode 100644 index 00000000000000..fc292f1b00a66a --- /dev/null +++ b/vendor/base64/src/display.rs @@ -0,0 +1,88 @@ +//! Enables base64'd output anywhere you might use a `Display` implementation, like a format string. +//! +//! ``` +//! use base64::{display::Base64Display, engine::general_purpose::STANDARD}; +//! +//! let data = vec![0x0, 0x1, 0x2, 0x3]; +//! let wrapper = Base64Display::new(&data, &STANDARD); +//! +//! assert_eq!("base64: AAECAw==", format!("base64: {}", wrapper)); +//! ``` + +use super::chunked_encoder::ChunkedEncoder; +use crate::engine::Engine; +use core::fmt::{Display, Formatter}; +use core::{fmt, str}; + +/// A convenience wrapper for base64'ing bytes into a format string without heap allocation. +pub struct Base64Display<'a, 'e, E: Engine> { + bytes: &'a [u8], + chunked_encoder: ChunkedEncoder<'e, E>, +} + +impl<'a, 'e, E: Engine> Base64Display<'a, 'e, E> { + /// Create a `Base64Display` with the provided engine. + pub fn new(bytes: &'a [u8], engine: &'e E) -> Base64Display<'a, 'e, E> { + Base64Display { + bytes, + chunked_encoder: ChunkedEncoder::new(engine), + } + } +} + +impl<'a, 'e, E: Engine> Display for Base64Display<'a, 'e, E> { + fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> { + let mut sink = FormatterSink { f: formatter }; + self.chunked_encoder.encode(self.bytes, &mut sink) + } +} + +struct FormatterSink<'a, 'b: 'a> { + f: &'a mut Formatter<'b>, +} + +impl<'a, 'b: 'a> super::chunked_encoder::Sink for FormatterSink<'a, 'b> { + type Error = fmt::Error; + + fn write_encoded_bytes(&mut self, encoded: &[u8]) -> Result<(), Self::Error> { + // Avoid unsafe. If max performance is needed, write your own display wrapper that uses + // unsafe here to gain about 10-15%. + self.f + .write_str(str::from_utf8(encoded).expect("base64 data was not utf8")) + } +} + +#[cfg(test)] +mod tests { + use super::super::chunked_encoder::tests::{ + chunked_encode_matches_normal_encode_random, SinkTestHelper, + }; + use super::*; + use crate::engine::general_purpose::STANDARD; + + #[test] + fn basic_display() { + assert_eq!( + "~$Zm9vYmFy#*", + format!("~${}#*", Base64Display::new(b"foobar", &STANDARD)) + ); + assert_eq!( + "~$Zm9vYmFyZg==#*", + format!("~${}#*", Base64Display::new(b"foobarf", &STANDARD)) + ); + } + + #[test] + fn display_encode_matches_normal_encode() { + let helper = DisplaySinkTestHelper; + chunked_encode_matches_normal_encode_random(&helper); + } + + struct DisplaySinkTestHelper; + + impl SinkTestHelper for DisplaySinkTestHelper { + fn encode_to_string(&self, engine: &E, bytes: &[u8]) -> String { + format!("{}", Base64Display::new(bytes, engine)) + } + } +} diff --git a/vendor/base64/src/encode.rs b/vendor/base64/src/encode.rs new file mode 100644 index 00000000000000..ae6d79074d71f9 --- /dev/null +++ b/vendor/base64/src/encode.rs @@ -0,0 +1,492 @@ +#[cfg(any(feature = "alloc", test))] +use alloc::string::String; +use core::fmt; +#[cfg(any(feature = "std", test))] +use std::error; + +#[cfg(any(feature = "alloc", test))] +use crate::engine::general_purpose::STANDARD; +use crate::engine::{Config, Engine}; +use crate::PAD_BYTE; + +/// Encode arbitrary octets as base64 using the [`STANDARD` engine](STANDARD). +/// +/// See [Engine::encode]. +#[allow(unused)] +#[deprecated(since = "0.21.0", note = "Use Engine::encode")] +#[cfg(any(feature = "alloc", test))] +pub fn encode>(input: T) -> String { + STANDARD.encode(input) +} + +///Encode arbitrary octets as base64 using the provided `Engine` into a new `String`. +/// +/// See [Engine::encode]. +#[allow(unused)] +#[deprecated(since = "0.21.0", note = "Use Engine::encode")] +#[cfg(any(feature = "alloc", test))] +pub fn encode_engine>(input: T, engine: &E) -> String { + engine.encode(input) +} + +///Encode arbitrary octets as base64 into a supplied `String`. +/// +/// See [Engine::encode_string]. +#[allow(unused)] +#[deprecated(since = "0.21.0", note = "Use Engine::encode_string")] +#[cfg(any(feature = "alloc", test))] +pub fn encode_engine_string>( + input: T, + output_buf: &mut String, + engine: &E, +) { + engine.encode_string(input, output_buf) +} + +/// Encode arbitrary octets as base64 into a supplied slice. +/// +/// See [Engine::encode_slice]. +#[allow(unused)] +#[deprecated(since = "0.21.0", note = "Use Engine::encode_slice")] +pub fn encode_engine_slice>( + input: T, + output_buf: &mut [u8], + engine: &E, +) -> Result { + engine.encode_slice(input, output_buf) +} + +/// B64-encode and pad (if configured). +/// +/// This helper exists to avoid recalculating encoded_size, which is relatively expensive on short +/// inputs. +/// +/// `encoded_size` is the encoded size calculated for `input`. +/// +/// `output` must be of size `encoded_size`. +/// +/// All bytes in `output` will be written to since it is exactly the size of the output. +pub(crate) fn encode_with_padding( + input: &[u8], + output: &mut [u8], + engine: &E, + expected_encoded_size: usize, +) { + debug_assert_eq!(expected_encoded_size, output.len()); + + let b64_bytes_written = engine.internal_encode(input, output); + + let padding_bytes = if engine.config().encode_padding() { + add_padding(b64_bytes_written, &mut output[b64_bytes_written..]) + } else { + 0 + }; + + let encoded_bytes = b64_bytes_written + .checked_add(padding_bytes) + .expect("usize overflow when calculating b64 length"); + + debug_assert_eq!(expected_encoded_size, encoded_bytes); +} + +/// Calculate the base64 encoded length for a given input length, optionally including any +/// appropriate padding bytes. +/// +/// Returns `None` if the encoded length can't be represented in `usize`. This will happen for +/// input lengths in approximately the top quarter of the range of `usize`. +pub const fn encoded_len(bytes_len: usize, padding: bool) -> Option { + let rem = bytes_len % 3; + + let complete_input_chunks = bytes_len / 3; + // `?` is disallowed in const, and `let Some(_) = _ else` requires 1.65.0, whereas this + // messier syntax works on 1.48 + let complete_chunk_output = + if let Some(complete_chunk_output) = complete_input_chunks.checked_mul(4) { + complete_chunk_output + } else { + return None; + }; + + if rem > 0 { + if padding { + complete_chunk_output.checked_add(4) + } else { + let encoded_rem = match rem { + 1 => 2, + // only other possible remainder is 2 + // can't use a separate _ => unreachable!() in const fns in ancient rust versions + _ => 3, + }; + complete_chunk_output.checked_add(encoded_rem) + } + } else { + Some(complete_chunk_output) + } +} + +/// Write padding characters. +/// `unpadded_output_len` is the size of the unpadded but base64 encoded data. +/// `output` is the slice where padding should be written, of length at least 2. +/// +/// Returns the number of padding bytes written. +pub(crate) fn add_padding(unpadded_output_len: usize, output: &mut [u8]) -> usize { + let pad_bytes = (4 - (unpadded_output_len % 4)) % 4; + // for just a couple bytes, this has better performance than using + // .fill(), or iterating over mutable refs, which call memset() + #[allow(clippy::needless_range_loop)] + for i in 0..pad_bytes { + output[i] = PAD_BYTE; + } + + pad_bytes +} + +/// Errors that can occur while encoding into a slice. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum EncodeSliceError { + /// The provided slice is too small. + OutputSliceTooSmall, +} + +impl fmt::Display for EncodeSliceError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::OutputSliceTooSmall => write!(f, "Output slice too small"), + } + } +} + +#[cfg(any(feature = "std", test))] +impl error::Error for EncodeSliceError {} + +#[cfg(test)] +mod tests { + use super::*; + + use crate::{ + alphabet, + engine::general_purpose::{GeneralPurpose, NO_PAD, STANDARD}, + tests::{assert_encode_sanity, random_config, random_engine}, + }; + use rand::{ + distributions::{Distribution, Uniform}, + Rng, SeedableRng, + }; + use std::str; + + const URL_SAFE_NO_PAD_ENGINE: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, NO_PAD); + + #[test] + fn encoded_size_correct_standard() { + assert_encoded_length(0, 0, &STANDARD, true); + + assert_encoded_length(1, 4, &STANDARD, true); + assert_encoded_length(2, 4, &STANDARD, true); + assert_encoded_length(3, 4, &STANDARD, true); + + assert_encoded_length(4, 8, &STANDARD, true); + assert_encoded_length(5, 8, &STANDARD, true); + assert_encoded_length(6, 8, &STANDARD, true); + + assert_encoded_length(7, 12, &STANDARD, true); + assert_encoded_length(8, 12, &STANDARD, true); + assert_encoded_length(9, 12, &STANDARD, true); + + assert_encoded_length(54, 72, &STANDARD, true); + + assert_encoded_length(55, 76, &STANDARD, true); + assert_encoded_length(56, 76, &STANDARD, true); + assert_encoded_length(57, 76, &STANDARD, true); + + assert_encoded_length(58, 80, &STANDARD, true); + } + + #[test] + fn encoded_size_correct_no_pad() { + assert_encoded_length(0, 0, &URL_SAFE_NO_PAD_ENGINE, false); + + assert_encoded_length(1, 2, &URL_SAFE_NO_PAD_ENGINE, false); + assert_encoded_length(2, 3, &URL_SAFE_NO_PAD_ENGINE, false); + assert_encoded_length(3, 4, &URL_SAFE_NO_PAD_ENGINE, false); + + assert_encoded_length(4, 6, &URL_SAFE_NO_PAD_ENGINE, false); + assert_encoded_length(5, 7, &URL_SAFE_NO_PAD_ENGINE, false); + assert_encoded_length(6, 8, &URL_SAFE_NO_PAD_ENGINE, false); + + assert_encoded_length(7, 10, &URL_SAFE_NO_PAD_ENGINE, false); + assert_encoded_length(8, 11, &URL_SAFE_NO_PAD_ENGINE, false); + assert_encoded_length(9, 12, &URL_SAFE_NO_PAD_ENGINE, false); + + assert_encoded_length(54, 72, &URL_SAFE_NO_PAD_ENGINE, false); + + assert_encoded_length(55, 74, &URL_SAFE_NO_PAD_ENGINE, false); + assert_encoded_length(56, 75, &URL_SAFE_NO_PAD_ENGINE, false); + assert_encoded_length(57, 76, &URL_SAFE_NO_PAD_ENGINE, false); + + assert_encoded_length(58, 78, &URL_SAFE_NO_PAD_ENGINE, false); + } + + #[test] + fn encoded_size_overflow() { + assert_eq!(None, encoded_len(usize::MAX, true)); + } + + #[test] + fn encode_engine_string_into_nonempty_buffer_doesnt_clobber_prefix() { + let mut orig_data = Vec::new(); + let mut prefix = String::new(); + let mut encoded_data_no_prefix = String::new(); + let mut encoded_data_with_prefix = String::new(); + let mut decoded = Vec::new(); + + let prefix_len_range = Uniform::new(0, 1000); + let input_len_range = Uniform::new(0, 1000); + + let mut rng = rand::rngs::SmallRng::from_entropy(); + + for _ in 0..10_000 { + orig_data.clear(); + prefix.clear(); + encoded_data_no_prefix.clear(); + encoded_data_with_prefix.clear(); + decoded.clear(); + + let input_len = input_len_range.sample(&mut rng); + + for _ in 0..input_len { + orig_data.push(rng.gen()); + } + + let prefix_len = prefix_len_range.sample(&mut rng); + for _ in 0..prefix_len { + // getting convenient random single-byte printable chars that aren't base64 is + // annoying + prefix.push('#'); + } + encoded_data_with_prefix.push_str(&prefix); + + let engine = random_engine(&mut rng); + engine.encode_string(&orig_data, &mut encoded_data_no_prefix); + engine.encode_string(&orig_data, &mut encoded_data_with_prefix); + + assert_eq!( + encoded_data_no_prefix.len() + prefix_len, + encoded_data_with_prefix.len() + ); + assert_encode_sanity( + &encoded_data_no_prefix, + engine.config().encode_padding(), + input_len, + ); + assert_encode_sanity( + &encoded_data_with_prefix[prefix_len..], + engine.config().encode_padding(), + input_len, + ); + + // append plain encode onto prefix + prefix.push_str(&encoded_data_no_prefix); + + assert_eq!(prefix, encoded_data_with_prefix); + + engine + .decode_vec(&encoded_data_no_prefix, &mut decoded) + .unwrap(); + assert_eq!(orig_data, decoded); + } + } + + #[test] + fn encode_engine_slice_into_nonempty_buffer_doesnt_clobber_suffix() { + let mut orig_data = Vec::new(); + let mut encoded_data = Vec::new(); + let mut encoded_data_original_state = Vec::new(); + let mut decoded = Vec::new(); + + let input_len_range = Uniform::new(0, 1000); + + let mut rng = rand::rngs::SmallRng::from_entropy(); + + for _ in 0..10_000 { + orig_data.clear(); + encoded_data.clear(); + encoded_data_original_state.clear(); + decoded.clear(); + + let input_len = input_len_range.sample(&mut rng); + + for _ in 0..input_len { + orig_data.push(rng.gen()); + } + + // plenty of existing garbage in the encoded buffer + for _ in 0..10 * input_len { + encoded_data.push(rng.gen()); + } + + encoded_data_original_state.extend_from_slice(&encoded_data); + + let engine = random_engine(&mut rng); + + let encoded_size = encoded_len(input_len, engine.config().encode_padding()).unwrap(); + + assert_eq!( + encoded_size, + engine.encode_slice(&orig_data, &mut encoded_data).unwrap() + ); + + assert_encode_sanity( + str::from_utf8(&encoded_data[0..encoded_size]).unwrap(), + engine.config().encode_padding(), + input_len, + ); + + assert_eq!( + &encoded_data[encoded_size..], + &encoded_data_original_state[encoded_size..] + ); + + engine + .decode_vec(&encoded_data[0..encoded_size], &mut decoded) + .unwrap(); + assert_eq!(orig_data, decoded); + } + } + + #[test] + fn encode_to_slice_random_valid_utf8() { + let mut input = Vec::new(); + let mut output = Vec::new(); + + let input_len_range = Uniform::new(0, 1000); + + let mut rng = rand::rngs::SmallRng::from_entropy(); + + for _ in 0..10_000 { + input.clear(); + output.clear(); + + let input_len = input_len_range.sample(&mut rng); + + for _ in 0..input_len { + input.push(rng.gen()); + } + + let config = random_config(&mut rng); + let engine = random_engine(&mut rng); + + // fill up the output buffer with garbage + let encoded_size = encoded_len(input_len, config.encode_padding()).unwrap(); + for _ in 0..encoded_size { + output.push(rng.gen()); + } + + let orig_output_buf = output.clone(); + + let bytes_written = engine.internal_encode(&input, &mut output); + + // make sure the part beyond bytes_written is the same garbage it was before + assert_eq!(orig_output_buf[bytes_written..], output[bytes_written..]); + + // make sure the encoded bytes are UTF-8 + let _ = str::from_utf8(&output[0..bytes_written]).unwrap(); + } + } + + #[test] + fn encode_with_padding_random_valid_utf8() { + let mut input = Vec::new(); + let mut output = Vec::new(); + + let input_len_range = Uniform::new(0, 1000); + + let mut rng = rand::rngs::SmallRng::from_entropy(); + + for _ in 0..10_000 { + input.clear(); + output.clear(); + + let input_len = input_len_range.sample(&mut rng); + + for _ in 0..input_len { + input.push(rng.gen()); + } + + let engine = random_engine(&mut rng); + + // fill up the output buffer with garbage + let encoded_size = encoded_len(input_len, engine.config().encode_padding()).unwrap(); + for _ in 0..encoded_size + 1000 { + output.push(rng.gen()); + } + + let orig_output_buf = output.clone(); + + encode_with_padding(&input, &mut output[0..encoded_size], &engine, encoded_size); + + // make sure the part beyond b64 is the same garbage it was before + assert_eq!(orig_output_buf[encoded_size..], output[encoded_size..]); + + // make sure the encoded bytes are UTF-8 + let _ = str::from_utf8(&output[0..encoded_size]).unwrap(); + } + } + + #[test] + fn add_padding_random_valid_utf8() { + let mut output = Vec::new(); + + let mut rng = rand::rngs::SmallRng::from_entropy(); + + // cover our bases for length % 4 + for unpadded_output_len in 0..20 { + output.clear(); + + // fill output with random + for _ in 0..100 { + output.push(rng.gen()); + } + + let orig_output_buf = output.clone(); + + let bytes_written = add_padding(unpadded_output_len, &mut output); + + // make sure the part beyond bytes_written is the same garbage it was before + assert_eq!(orig_output_buf[bytes_written..], output[bytes_written..]); + + // make sure the encoded bytes are UTF-8 + let _ = str::from_utf8(&output[0..bytes_written]).unwrap(); + } + } + + fn assert_encoded_length( + input_len: usize, + enc_len: usize, + engine: &E, + padded: bool, + ) { + assert_eq!(enc_len, encoded_len(input_len, padded).unwrap()); + + let mut bytes: Vec = Vec::new(); + let mut rng = rand::rngs::SmallRng::from_entropy(); + + for _ in 0..input_len { + bytes.push(rng.gen()); + } + + let encoded = engine.encode(&bytes); + assert_encode_sanity(&encoded, padded, input_len); + + assert_eq!(enc_len, encoded.len()); + } + + #[test] + fn encode_imap() { + assert_eq!( + &GeneralPurpose::new(&alphabet::IMAP_MUTF7, NO_PAD).encode(b"\xFB\xFF"), + &GeneralPurpose::new(&alphabet::STANDARD, NO_PAD) + .encode(b"\xFB\xFF") + .replace('/', ",") + ); + } +} diff --git a/vendor/base64/src/engine/general_purpose/decode.rs b/vendor/base64/src/engine/general_purpose/decode.rs new file mode 100644 index 00000000000000..b55d3fc5c8f7e5 --- /dev/null +++ b/vendor/base64/src/engine/general_purpose/decode.rs @@ -0,0 +1,357 @@ +use crate::{ + engine::{general_purpose::INVALID_VALUE, DecodeEstimate, DecodeMetadata, DecodePaddingMode}, + DecodeError, DecodeSliceError, PAD_BYTE, +}; + +#[doc(hidden)] +pub struct GeneralPurposeEstimate { + /// input len % 4 + rem: usize, + conservative_decoded_len: usize, +} + +impl GeneralPurposeEstimate { + pub(crate) fn new(encoded_len: usize) -> Self { + let rem = encoded_len % 4; + Self { + rem, + conservative_decoded_len: (encoded_len / 4 + (rem > 0) as usize) * 3, + } + } +} + +impl DecodeEstimate for GeneralPurposeEstimate { + fn decoded_len_estimate(&self) -> usize { + self.conservative_decoded_len + } +} + +/// Helper to avoid duplicating num_chunks calculation, which is costly on short inputs. +/// Returns the decode metadata, or an error. +// We're on the fragile edge of compiler heuristics here. If this is not inlined, slow. If this is +// inlined(always), a different slow. plain ol' inline makes the benchmarks happiest at the moment, +// but this is fragile and the best setting changes with only minor code modifications. +#[inline] +pub(crate) fn decode_helper( + input: &[u8], + estimate: GeneralPurposeEstimate, + output: &mut [u8], + decode_table: &[u8; 256], + decode_allow_trailing_bits: bool, + padding_mode: DecodePaddingMode, +) -> Result { + let input_complete_nonterminal_quads_len = + complete_quads_len(input, estimate.rem, output.len(), decode_table)?; + + const UNROLLED_INPUT_CHUNK_SIZE: usize = 32; + const UNROLLED_OUTPUT_CHUNK_SIZE: usize = UNROLLED_INPUT_CHUNK_SIZE / 4 * 3; + + let input_complete_quads_after_unrolled_chunks_len = + input_complete_nonterminal_quads_len % UNROLLED_INPUT_CHUNK_SIZE; + + let input_unrolled_loop_len = + input_complete_nonterminal_quads_len - input_complete_quads_after_unrolled_chunks_len; + + // chunks of 32 bytes + for (chunk_index, chunk) in input[..input_unrolled_loop_len] + .chunks_exact(UNROLLED_INPUT_CHUNK_SIZE) + .enumerate() + { + let input_index = chunk_index * UNROLLED_INPUT_CHUNK_SIZE; + let chunk_output = &mut output[chunk_index * UNROLLED_OUTPUT_CHUNK_SIZE + ..(chunk_index + 1) * UNROLLED_OUTPUT_CHUNK_SIZE]; + + decode_chunk_8( + &chunk[0..8], + input_index, + decode_table, + &mut chunk_output[0..6], + )?; + decode_chunk_8( + &chunk[8..16], + input_index + 8, + decode_table, + &mut chunk_output[6..12], + )?; + decode_chunk_8( + &chunk[16..24], + input_index + 16, + decode_table, + &mut chunk_output[12..18], + )?; + decode_chunk_8( + &chunk[24..32], + input_index + 24, + decode_table, + &mut chunk_output[18..24], + )?; + } + + // remaining quads, except for the last possibly partial one, as it may have padding + let output_unrolled_loop_len = input_unrolled_loop_len / 4 * 3; + let output_complete_quad_len = input_complete_nonterminal_quads_len / 4 * 3; + { + let output_after_unroll = &mut output[output_unrolled_loop_len..output_complete_quad_len]; + + for (chunk_index, chunk) in input + [input_unrolled_loop_len..input_complete_nonterminal_quads_len] + .chunks_exact(4) + .enumerate() + { + let chunk_output = &mut output_after_unroll[chunk_index * 3..chunk_index * 3 + 3]; + + decode_chunk_4( + chunk, + input_unrolled_loop_len + chunk_index * 4, + decode_table, + chunk_output, + )?; + } + } + + super::decode_suffix::decode_suffix( + input, + input_complete_nonterminal_quads_len, + output, + output_complete_quad_len, + decode_table, + decode_allow_trailing_bits, + padding_mode, + ) +} + +/// Returns the length of complete quads, except for the last one, even if it is complete. +/// +/// Returns an error if the output len is not big enough for decoding those complete quads, or if +/// the input % 4 == 1, and that last byte is an invalid value other than a pad byte. +/// +/// - `input` is the base64 input +/// - `input_len_rem` is input len % 4 +/// - `output_len` is the length of the output slice +pub(crate) fn complete_quads_len( + input: &[u8], + input_len_rem: usize, + output_len: usize, + decode_table: &[u8; 256], +) -> Result { + debug_assert!(input.len() % 4 == input_len_rem); + + // detect a trailing invalid byte, like a newline, as a user convenience + if input_len_rem == 1 { + let last_byte = input[input.len() - 1]; + // exclude pad bytes; might be part of padding that extends from earlier in the input + if last_byte != PAD_BYTE && decode_table[usize::from(last_byte)] == INVALID_VALUE { + return Err(DecodeError::InvalidByte(input.len() - 1, last_byte).into()); + } + }; + + // skip last quad, even if it's complete, as it may have padding + let input_complete_nonterminal_quads_len = input + .len() + .saturating_sub(input_len_rem) + // if rem was 0, subtract 4 to avoid padding + .saturating_sub((input_len_rem == 0) as usize * 4); + debug_assert!( + input.is_empty() || (1..=4).contains(&(input.len() - input_complete_nonterminal_quads_len)) + ); + + // check that everything except the last quad handled by decode_suffix will fit + if output_len < input_complete_nonterminal_quads_len / 4 * 3 { + return Err(DecodeSliceError::OutputSliceTooSmall); + }; + Ok(input_complete_nonterminal_quads_len) +} + +/// Decode 8 bytes of input into 6 bytes of output. +/// +/// `input` is the 8 bytes to decode. +/// `index_at_start_of_input` is the offset in the overall input (used for reporting errors +/// accurately) +/// `decode_table` is the lookup table for the particular base64 alphabet. +/// `output` will have its first 6 bytes overwritten +// yes, really inline (worth 30-50% speedup) +#[inline(always)] +fn decode_chunk_8( + input: &[u8], + index_at_start_of_input: usize, + decode_table: &[u8; 256], + output: &mut [u8], +) -> Result<(), DecodeError> { + let morsel = decode_table[usize::from(input[0])]; + if morsel == INVALID_VALUE { + return Err(DecodeError::InvalidByte(index_at_start_of_input, input[0])); + } + let mut accum = u64::from(morsel) << 58; + + let morsel = decode_table[usize::from(input[1])]; + if morsel == INVALID_VALUE { + return Err(DecodeError::InvalidByte( + index_at_start_of_input + 1, + input[1], + )); + } + accum |= u64::from(morsel) << 52; + + let morsel = decode_table[usize::from(input[2])]; + if morsel == INVALID_VALUE { + return Err(DecodeError::InvalidByte( + index_at_start_of_input + 2, + input[2], + )); + } + accum |= u64::from(morsel) << 46; + + let morsel = decode_table[usize::from(input[3])]; + if morsel == INVALID_VALUE { + return Err(DecodeError::InvalidByte( + index_at_start_of_input + 3, + input[3], + )); + } + accum |= u64::from(morsel) << 40; + + let morsel = decode_table[usize::from(input[4])]; + if morsel == INVALID_VALUE { + return Err(DecodeError::InvalidByte( + index_at_start_of_input + 4, + input[4], + )); + } + accum |= u64::from(morsel) << 34; + + let morsel = decode_table[usize::from(input[5])]; + if morsel == INVALID_VALUE { + return Err(DecodeError::InvalidByte( + index_at_start_of_input + 5, + input[5], + )); + } + accum |= u64::from(morsel) << 28; + + let morsel = decode_table[usize::from(input[6])]; + if morsel == INVALID_VALUE { + return Err(DecodeError::InvalidByte( + index_at_start_of_input + 6, + input[6], + )); + } + accum |= u64::from(morsel) << 22; + + let morsel = decode_table[usize::from(input[7])]; + if morsel == INVALID_VALUE { + return Err(DecodeError::InvalidByte( + index_at_start_of_input + 7, + input[7], + )); + } + accum |= u64::from(morsel) << 16; + + output[..6].copy_from_slice(&accum.to_be_bytes()[..6]); + + Ok(()) +} + +/// Like [decode_chunk_8] but for 4 bytes of input and 3 bytes of output. +#[inline(always)] +fn decode_chunk_4( + input: &[u8], + index_at_start_of_input: usize, + decode_table: &[u8; 256], + output: &mut [u8], +) -> Result<(), DecodeError> { + let morsel = decode_table[usize::from(input[0])]; + if morsel == INVALID_VALUE { + return Err(DecodeError::InvalidByte(index_at_start_of_input, input[0])); + } + let mut accum = u32::from(morsel) << 26; + + let morsel = decode_table[usize::from(input[1])]; + if morsel == INVALID_VALUE { + return Err(DecodeError::InvalidByte( + index_at_start_of_input + 1, + input[1], + )); + } + accum |= u32::from(morsel) << 20; + + let morsel = decode_table[usize::from(input[2])]; + if morsel == INVALID_VALUE { + return Err(DecodeError::InvalidByte( + index_at_start_of_input + 2, + input[2], + )); + } + accum |= u32::from(morsel) << 14; + + let morsel = decode_table[usize::from(input[3])]; + if morsel == INVALID_VALUE { + return Err(DecodeError::InvalidByte( + index_at_start_of_input + 3, + input[3], + )); + } + accum |= u32::from(morsel) << 8; + + output[..3].copy_from_slice(&accum.to_be_bytes()[..3]); + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + use crate::engine::general_purpose::STANDARD; + + #[test] + fn decode_chunk_8_writes_only_6_bytes() { + let input = b"Zm9vYmFy"; // "foobar" + let mut output = [0_u8, 1, 2, 3, 4, 5, 6, 7]; + + decode_chunk_8(&input[..], 0, &STANDARD.decode_table, &mut output).unwrap(); + assert_eq!(&vec![b'f', b'o', b'o', b'b', b'a', b'r', 6, 7], &output); + } + + #[test] + fn decode_chunk_4_writes_only_3_bytes() { + let input = b"Zm9v"; // "foobar" + let mut output = [0_u8, 1, 2, 3]; + + decode_chunk_4(&input[..], 0, &STANDARD.decode_table, &mut output).unwrap(); + assert_eq!(&vec![b'f', b'o', b'o', 3], &output); + } + + #[test] + fn estimate_short_lengths() { + for (range, decoded_len_estimate) in [ + (0..=0, 0), + (1..=4, 3), + (5..=8, 6), + (9..=12, 9), + (13..=16, 12), + (17..=20, 15), + ] { + for encoded_len in range { + let estimate = GeneralPurposeEstimate::new(encoded_len); + assert_eq!(decoded_len_estimate, estimate.decoded_len_estimate()); + } + } + } + + #[test] + fn estimate_via_u128_inflation() { + // cover both ends of usize + (0..1000) + .chain(usize::MAX - 1000..=usize::MAX) + .for_each(|encoded_len| { + // inflate to 128 bit type to be able to safely use the easy formulas + let len_128 = encoded_len as u128; + + let estimate = GeneralPurposeEstimate::new(encoded_len); + assert_eq!( + (len_128 + 3) / 4 * 3, + estimate.conservative_decoded_len as u128 + ); + }) + } +} diff --git a/vendor/base64/src/engine/general_purpose/decode_suffix.rs b/vendor/base64/src/engine/general_purpose/decode_suffix.rs new file mode 100644 index 00000000000000..02aaf5141e1a41 --- /dev/null +++ b/vendor/base64/src/engine/general_purpose/decode_suffix.rs @@ -0,0 +1,162 @@ +use crate::{ + engine::{general_purpose::INVALID_VALUE, DecodeMetadata, DecodePaddingMode}, + DecodeError, DecodeSliceError, PAD_BYTE, +}; + +/// Decode the last 0-4 bytes, checking for trailing set bits and padding per the provided +/// parameters. +/// +/// Returns the decode metadata representing the total number of bytes decoded, including the ones +/// indicated as already written by `output_index`. +pub(crate) fn decode_suffix( + input: &[u8], + input_index: usize, + output: &mut [u8], + mut output_index: usize, + decode_table: &[u8; 256], + decode_allow_trailing_bits: bool, + padding_mode: DecodePaddingMode, +) -> Result { + debug_assert!((input.len() - input_index) <= 4); + + // Decode any leftovers that might not be a complete input chunk of 4 bytes. + // Use a u32 as a stack-resident 4 byte buffer. + let mut morsels_in_leftover = 0; + let mut padding_bytes_count = 0; + // offset from input_index + let mut first_padding_offset: usize = 0; + let mut last_symbol = 0_u8; + let mut morsels = [0_u8; 4]; + + for (leftover_index, &b) in input[input_index..].iter().enumerate() { + // '=' padding + if b == PAD_BYTE { + // There can be bad padding bytes in a few ways: + // 1 - Padding with non-padding characters after it + // 2 - Padding after zero or one characters in the current quad (should only + // be after 2 or 3 chars) + // 3 - More than two characters of padding. If 3 or 4 padding chars + // are in the same quad, that implies it will be caught by #2. + // If it spreads from one quad to another, it will be an invalid byte + // in the first quad. + // 4 - Non-canonical padding -- 1 byte when it should be 2, etc. + // Per config, non-canonical but still functional non- or partially-padded base64 + // may be treated as an error condition. + + if leftover_index < 2 { + // Check for error #2. + // Either the previous byte was padding, in which case we would have already hit + // this case, or it wasn't, in which case this is the first such error. + debug_assert!( + leftover_index == 0 || (leftover_index == 1 && padding_bytes_count == 0) + ); + let bad_padding_index = input_index + leftover_index; + return Err(DecodeError::InvalidByte(bad_padding_index, b).into()); + } + + if padding_bytes_count == 0 { + first_padding_offset = leftover_index; + } + + padding_bytes_count += 1; + continue; + } + + // Check for case #1. + // To make '=' handling consistent with the main loop, don't allow + // non-suffix '=' in trailing chunk either. Report error as first + // erroneous padding. + if padding_bytes_count > 0 { + return Err( + DecodeError::InvalidByte(input_index + first_padding_offset, PAD_BYTE).into(), + ); + } + + last_symbol = b; + + // can use up to 8 * 6 = 48 bits of the u64, if last chunk has no padding. + // Pack the leftovers from left to right. + let morsel = decode_table[b as usize]; + if morsel == INVALID_VALUE { + return Err(DecodeError::InvalidByte(input_index + leftover_index, b).into()); + } + + morsels[morsels_in_leftover] = morsel; + morsels_in_leftover += 1; + } + + // If there was 1 trailing byte, and it was valid, and we got to this point without hitting + // an invalid byte, now we can report invalid length + if !input.is_empty() && morsels_in_leftover < 2 { + return Err(DecodeError::InvalidLength(input_index + morsels_in_leftover).into()); + } + + match padding_mode { + DecodePaddingMode::Indifferent => { /* everything we care about was already checked */ } + DecodePaddingMode::RequireCanonical => { + // allow empty input + if (padding_bytes_count + morsels_in_leftover) % 4 != 0 { + return Err(DecodeError::InvalidPadding.into()); + } + } + DecodePaddingMode::RequireNone => { + if padding_bytes_count > 0 { + // check at the end to make sure we let the cases of padding that should be InvalidByte + // get hit + return Err(DecodeError::InvalidPadding.into()); + } + } + } + + // When encoding 1 trailing byte (e.g. 0xFF), 2 base64 bytes ("/w") are needed. + // / is the symbol for 63 (0x3F, bottom 6 bits all set) and w is 48 (0x30, top 2 bits + // of bottom 6 bits set). + // When decoding two symbols back to one trailing byte, any final symbol higher than + // w would still decode to the original byte because we only care about the top two + // bits in the bottom 6, but would be a non-canonical encoding. So, we calculate a + // mask based on how many bits are used for just the canonical encoding, and optionally + // error if any other bits are set. In the example of one encoded byte -> 2 symbols, + // 2 symbols can technically encode 12 bits, but the last 4 are non-canonical, and + // useless since there are no more symbols to provide the necessary 4 additional bits + // to finish the second original byte. + + let leftover_bytes_to_append = morsels_in_leftover * 6 / 8; + // Put the up to 6 complete bytes as the high bytes. + // Gain a couple percent speedup from nudging these ORs to use more ILP with a two-way split. + let mut leftover_num = (u32::from(morsels[0]) << 26) + | (u32::from(morsels[1]) << 20) + | (u32::from(morsels[2]) << 14) + | (u32::from(morsels[3]) << 8); + + // if there are bits set outside the bits we care about, last symbol encodes trailing bits that + // will not be included in the output + let mask = !0_u32 >> (leftover_bytes_to_append * 8); + if !decode_allow_trailing_bits && (leftover_num & mask) != 0 { + // last morsel is at `morsels_in_leftover` - 1 + return Err(DecodeError::InvalidLastSymbol( + input_index + morsels_in_leftover - 1, + last_symbol, + ) + .into()); + } + + // Strangely, this approach benchmarks better than writing bytes one at a time, + // or copy_from_slice into output. + for _ in 0..leftover_bytes_to_append { + let hi_byte = (leftover_num >> 24) as u8; + leftover_num <<= 8; + *output + .get_mut(output_index) + .ok_or(DecodeSliceError::OutputSliceTooSmall)? = hi_byte; + output_index += 1; + } + + Ok(DecodeMetadata::new( + output_index, + if padding_bytes_count > 0 { + Some(input_index + first_padding_offset) + } else { + None + }, + )) +} diff --git a/vendor/base64/src/engine/general_purpose/mod.rs b/vendor/base64/src/engine/general_purpose/mod.rs new file mode 100644 index 00000000000000..6fe958097b2878 --- /dev/null +++ b/vendor/base64/src/engine/general_purpose/mod.rs @@ -0,0 +1,352 @@ +//! Provides the [GeneralPurpose] engine and associated config types. +use crate::{ + alphabet, + alphabet::Alphabet, + engine::{Config, DecodeMetadata, DecodePaddingMode}, + DecodeSliceError, +}; +use core::convert::TryInto; + +pub(crate) mod decode; +pub(crate) mod decode_suffix; + +pub use decode::GeneralPurposeEstimate; + +pub(crate) const INVALID_VALUE: u8 = 255; + +/// A general-purpose base64 engine. +/// +/// - It uses no vector CPU instructions, so it will work on any system. +/// - It is reasonably fast (~2-3GiB/s). +/// - It is not constant-time, though, so it is vulnerable to timing side-channel attacks. For loading cryptographic keys, etc, it is suggested to use the forthcoming constant-time implementation. + +#[derive(Debug, Clone)] +pub struct GeneralPurpose { + encode_table: [u8; 64], + decode_table: [u8; 256], + config: GeneralPurposeConfig, +} + +impl GeneralPurpose { + /// Create a `GeneralPurpose` engine from an [Alphabet]. + /// + /// While not very expensive to initialize, ideally these should be cached + /// if the engine will be used repeatedly. + pub const fn new(alphabet: &Alphabet, config: GeneralPurposeConfig) -> Self { + Self { + encode_table: encode_table(alphabet), + decode_table: decode_table(alphabet), + config, + } + } +} + +impl super::Engine for GeneralPurpose { + type Config = GeneralPurposeConfig; + type DecodeEstimate = GeneralPurposeEstimate; + + fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize { + let mut input_index: usize = 0; + + const BLOCKS_PER_FAST_LOOP: usize = 4; + const LOW_SIX_BITS: u64 = 0x3F; + + // we read 8 bytes at a time (u64) but only actually consume 6 of those bytes. Thus, we need + // 2 trailing bytes to be available to read.. + let last_fast_index = input.len().saturating_sub(BLOCKS_PER_FAST_LOOP * 6 + 2); + let mut output_index = 0; + + if last_fast_index > 0 { + while input_index <= last_fast_index { + // Major performance wins from letting the optimizer do the bounds check once, mostly + // on the output side + let input_chunk = + &input[input_index..(input_index + (BLOCKS_PER_FAST_LOOP * 6 + 2))]; + let output_chunk = + &mut output[output_index..(output_index + BLOCKS_PER_FAST_LOOP * 8)]; + + // Hand-unrolling for 32 vs 16 or 8 bytes produces yields performance about equivalent + // to unsafe pointer code on a Xeon E5-1650v3. 64 byte unrolling was slightly better for + // large inputs but significantly worse for 50-byte input, unsurprisingly. I suspect + // that it's a not uncommon use case to encode smallish chunks of data (e.g. a 64-byte + // SHA-512 digest), so it would be nice if that fit in the unrolled loop at least once. + // Plus, single-digit percentage performance differences might well be quite different + // on different hardware. + + let input_u64 = read_u64(&input_chunk[0..]); + + output_chunk[0] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize]; + output_chunk[1] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize]; + output_chunk[2] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize]; + output_chunk[3] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize]; + output_chunk[4] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize]; + output_chunk[5] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize]; + output_chunk[6] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize]; + output_chunk[7] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize]; + + let input_u64 = read_u64(&input_chunk[6..]); + + output_chunk[8] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize]; + output_chunk[9] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize]; + output_chunk[10] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize]; + output_chunk[11] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize]; + output_chunk[12] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize]; + output_chunk[13] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize]; + output_chunk[14] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize]; + output_chunk[15] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize]; + + let input_u64 = read_u64(&input_chunk[12..]); + + output_chunk[16] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize]; + output_chunk[17] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize]; + output_chunk[18] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize]; + output_chunk[19] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize]; + output_chunk[20] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize]; + output_chunk[21] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize]; + output_chunk[22] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize]; + output_chunk[23] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize]; + + let input_u64 = read_u64(&input_chunk[18..]); + + output_chunk[24] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize]; + output_chunk[25] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize]; + output_chunk[26] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize]; + output_chunk[27] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize]; + output_chunk[28] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize]; + output_chunk[29] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize]; + output_chunk[30] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize]; + output_chunk[31] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize]; + + output_index += BLOCKS_PER_FAST_LOOP * 8; + input_index += BLOCKS_PER_FAST_LOOP * 6; + } + } + + // Encode what's left after the fast loop. + + const LOW_SIX_BITS_U8: u8 = 0x3F; + + let rem = input.len() % 3; + let start_of_rem = input.len() - rem; + + // start at the first index not handled by fast loop, which may be 0. + + while input_index < start_of_rem { + let input_chunk = &input[input_index..(input_index + 3)]; + let output_chunk = &mut output[output_index..(output_index + 4)]; + + output_chunk[0] = self.encode_table[(input_chunk[0] >> 2) as usize]; + output_chunk[1] = self.encode_table + [((input_chunk[0] << 4 | input_chunk[1] >> 4) & LOW_SIX_BITS_U8) as usize]; + output_chunk[2] = self.encode_table + [((input_chunk[1] << 2 | input_chunk[2] >> 6) & LOW_SIX_BITS_U8) as usize]; + output_chunk[3] = self.encode_table[(input_chunk[2] & LOW_SIX_BITS_U8) as usize]; + + input_index += 3; + output_index += 4; + } + + if rem == 2 { + output[output_index] = self.encode_table[(input[start_of_rem] >> 2) as usize]; + output[output_index + 1] = + self.encode_table[((input[start_of_rem] << 4 | input[start_of_rem + 1] >> 4) + & LOW_SIX_BITS_U8) as usize]; + output[output_index + 2] = + self.encode_table[((input[start_of_rem + 1] << 2) & LOW_SIX_BITS_U8) as usize]; + output_index += 3; + } else if rem == 1 { + output[output_index] = self.encode_table[(input[start_of_rem] >> 2) as usize]; + output[output_index + 1] = + self.encode_table[((input[start_of_rem] << 4) & LOW_SIX_BITS_U8) as usize]; + output_index += 2; + } + + output_index + } + + fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate { + GeneralPurposeEstimate::new(input_len) + } + + fn internal_decode( + &self, + input: &[u8], + output: &mut [u8], + estimate: Self::DecodeEstimate, + ) -> Result { + decode::decode_helper( + input, + estimate, + output, + &self.decode_table, + self.config.decode_allow_trailing_bits, + self.config.decode_padding_mode, + ) + } + + fn config(&self) -> &Self::Config { + &self.config + } +} + +/// Returns a table mapping a 6-bit index to the ASCII byte encoding of the index +pub(crate) const fn encode_table(alphabet: &Alphabet) -> [u8; 64] { + // the encode table is just the alphabet: + // 6-bit index lookup -> printable byte + let mut encode_table = [0_u8; 64]; + { + let mut index = 0; + while index < 64 { + encode_table[index] = alphabet.symbols[index]; + index += 1; + } + } + + encode_table +} + +/// Returns a table mapping base64 bytes as the lookup index to either: +/// - [INVALID_VALUE] for bytes that aren't members of the alphabet +/// - a byte whose lower 6 bits are the value that was encoded into the index byte +pub(crate) const fn decode_table(alphabet: &Alphabet) -> [u8; 256] { + let mut decode_table = [INVALID_VALUE; 256]; + + // Since the table is full of `INVALID_VALUE` already, we only need to overwrite + // the parts that are valid. + let mut index = 0; + while index < 64 { + // The index in the alphabet is the 6-bit value we care about. + // Since the index is in 0-63, it is safe to cast to u8. + decode_table[alphabet.symbols[index] as usize] = index as u8; + index += 1; + } + + decode_table +} + +#[inline] +fn read_u64(s: &[u8]) -> u64 { + u64::from_be_bytes(s[..8].try_into().unwrap()) +} + +/// Contains configuration parameters for base64 encoding and decoding. +/// +/// ``` +/// # use base64::engine::GeneralPurposeConfig; +/// let config = GeneralPurposeConfig::new() +/// .with_encode_padding(false); +/// // further customize using `.with_*` methods as needed +/// ``` +/// +/// The constants [PAD] and [NO_PAD] cover most use cases. +/// +/// To specify the characters used, see [Alphabet]. +#[derive(Clone, Copy, Debug)] +pub struct GeneralPurposeConfig { + encode_padding: bool, + decode_allow_trailing_bits: bool, + decode_padding_mode: DecodePaddingMode, +} + +impl GeneralPurposeConfig { + /// Create a new config with `padding` = `true`, `decode_allow_trailing_bits` = `false`, and + /// `decode_padding_mode = DecodePaddingMode::RequireCanonicalPadding`. + /// + /// This probably matches most people's expectations, but consider disabling padding to save + /// a few bytes unless you specifically need it for compatibility with some legacy system. + pub const fn new() -> Self { + Self { + // RFC states that padding must be applied by default + encode_padding: true, + decode_allow_trailing_bits: false, + decode_padding_mode: DecodePaddingMode::RequireCanonical, + } + } + + /// Create a new config based on `self` with an updated `padding` setting. + /// + /// If `padding` is `true`, encoding will append either 1 or 2 `=` padding characters as needed + /// to produce an output whose length is a multiple of 4. + /// + /// Padding is not needed for correct decoding and only serves to waste bytes, but it's in the + /// [spec](https://datatracker.ietf.org/doc/html/rfc4648#section-3.2). + /// + /// For new applications, consider not using padding if the decoders you're using don't require + /// padding to be present. + pub const fn with_encode_padding(self, padding: bool) -> Self { + Self { + encode_padding: padding, + ..self + } + } + + /// Create a new config based on `self` with an updated `decode_allow_trailing_bits` setting. + /// + /// Most users will not need to configure this. It's useful if you need to decode base64 + /// produced by a buggy encoder that has bits set in the unused space on the last base64 + /// character as per [forgiving-base64 decode](https://infra.spec.whatwg.org/#forgiving-base64-decode). + /// If invalid trailing bits are present and this is `true`, those bits will + /// be silently ignored, else `DecodeError::InvalidLastSymbol` will be emitted. + pub const fn with_decode_allow_trailing_bits(self, allow: bool) -> Self { + Self { + decode_allow_trailing_bits: allow, + ..self + } + } + + /// Create a new config based on `self` with an updated `decode_padding_mode` setting. + /// + /// Padding is not useful in terms of representing encoded data -- it makes no difference to + /// the decoder if padding is present or not, so if you have some un-padded input to decode, it + /// is perfectly fine to use `DecodePaddingMode::Indifferent` to prevent errors from being + /// emitted. + /// + /// However, since in practice + /// [people who learned nothing from BER vs DER seem to expect base64 to have one canonical encoding](https://eprint.iacr.org/2022/361), + /// the default setting is the stricter `DecodePaddingMode::RequireCanonicalPadding`. + /// + /// Or, if "canonical" in your circumstance means _no_ padding rather than padding to the + /// next multiple of four, there's `DecodePaddingMode::RequireNoPadding`. + pub const fn with_decode_padding_mode(self, mode: DecodePaddingMode) -> Self { + Self { + decode_padding_mode: mode, + ..self + } + } +} + +impl Default for GeneralPurposeConfig { + /// Delegates to [GeneralPurposeConfig::new]. + fn default() -> Self { + Self::new() + } +} + +impl Config for GeneralPurposeConfig { + fn encode_padding(&self) -> bool { + self.encode_padding + } +} + +/// A [GeneralPurpose] engine using the [alphabet::STANDARD] base64 alphabet and [PAD] config. +pub const STANDARD: GeneralPurpose = GeneralPurpose::new(&alphabet::STANDARD, PAD); + +/// A [GeneralPurpose] engine using the [alphabet::STANDARD] base64 alphabet and [NO_PAD] config. +pub const STANDARD_NO_PAD: GeneralPurpose = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD); + +/// A [GeneralPurpose] engine using the [alphabet::URL_SAFE] base64 alphabet and [PAD] config. +pub const URL_SAFE: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, PAD); + +/// A [GeneralPurpose] engine using the [alphabet::URL_SAFE] base64 alphabet and [NO_PAD] config. +pub const URL_SAFE_NO_PAD: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, NO_PAD); + +/// Include padding bytes when encoding, and require that they be present when decoding. +/// +/// This is the standard per the base64 RFC, but consider using [NO_PAD] instead as padding serves +/// little purpose in practice. +pub const PAD: GeneralPurposeConfig = GeneralPurposeConfig::new(); + +/// Don't add padding when encoding, and require no padding when decoding. +pub const NO_PAD: GeneralPurposeConfig = GeneralPurposeConfig::new() + .with_encode_padding(false) + .with_decode_padding_mode(DecodePaddingMode::RequireNone); diff --git a/vendor/base64/src/engine/mod.rs b/vendor/base64/src/engine/mod.rs new file mode 100644 index 00000000000000..f2cc33f607c12e --- /dev/null +++ b/vendor/base64/src/engine/mod.rs @@ -0,0 +1,478 @@ +//! Provides the [Engine] abstraction and out of the box implementations. +#[cfg(any(feature = "alloc", test))] +use crate::chunked_encoder; +use crate::{ + encode::{encode_with_padding, EncodeSliceError}, + encoded_len, DecodeError, DecodeSliceError, +}; +#[cfg(any(feature = "alloc", test))] +use alloc::vec::Vec; + +#[cfg(any(feature = "alloc", test))] +use alloc::{string::String, vec}; + +pub mod general_purpose; + +#[cfg(test)] +mod naive; + +#[cfg(test)] +mod tests; + +pub use general_purpose::{GeneralPurpose, GeneralPurposeConfig}; + +/// An `Engine` provides low-level encoding and decoding operations that all other higher-level parts of the API use. Users of the library will generally not need to implement this. +/// +/// Different implementations offer different characteristics. The library currently ships with +/// [GeneralPurpose] that offers good speed and works on any CPU, with more choices +/// coming later, like a constant-time one when side channel resistance is called for, and vendor-specific vectorized ones for more speed. +/// +/// See [general_purpose::STANDARD_NO_PAD] if you just want standard base64. Otherwise, when possible, it's +/// recommended to store the engine in a `const` so that references to it won't pose any lifetime +/// issues, and to avoid repeating the cost of engine setup. +/// +/// Since almost nobody will need to implement `Engine`, docs for internal methods are hidden. +// When adding an implementation of Engine, include them in the engine test suite: +// - add an implementation of [engine::tests::EngineWrapper] +// - add the implementation to the `all_engines` macro +// All tests run on all engines listed in the macro. +pub trait Engine: Send + Sync { + /// The config type used by this engine + type Config: Config; + /// The decode estimate used by this engine + type DecodeEstimate: DecodeEstimate; + + /// This is not meant to be called directly; it is only for `Engine` implementors. + /// See the other `encode*` functions on this trait. + /// + /// Encode the `input` bytes into the `output` buffer based on the mapping in `encode_table`. + /// + /// `output` will be long enough to hold the encoded data. + /// + /// Returns the number of bytes written. + /// + /// No padding should be written; that is handled separately. + /// + /// Must not write any bytes into the output slice other than the encoded data. + #[doc(hidden)] + fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize; + + /// This is not meant to be called directly; it is only for `Engine` implementors. + /// + /// As an optimization to prevent the decoded length from being calculated twice, it is + /// sometimes helpful to have a conservative estimate of the decoded size before doing the + /// decoding, so this calculation is done separately and passed to [Engine::decode()] as needed. + #[doc(hidden)] + fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate; + + /// This is not meant to be called directly; it is only for `Engine` implementors. + /// See the other `decode*` functions on this trait. + /// + /// Decode `input` base64 bytes into the `output` buffer. + /// + /// `decode_estimate` is the result of [Engine::internal_decoded_len_estimate()], which is passed in to avoid + /// calculating it again (expensive on short inputs).` + /// + /// Each complete 4-byte chunk of encoded data decodes to 3 bytes of decoded data, but this + /// function must also handle the final possibly partial chunk. + /// If the input length is not a multiple of 4, or uses padding bytes to reach a multiple of 4, + /// the trailing 2 or 3 bytes must decode to 1 or 2 bytes, respectively, as per the + /// [RFC](https://tools.ietf.org/html/rfc4648#section-3.5). + /// + /// Decoding must not write any bytes into the output slice other than the decoded data. + /// + /// Non-canonical trailing bits in the final tokens or non-canonical padding must be reported as + /// errors unless the engine is configured otherwise. + #[doc(hidden)] + fn internal_decode( + &self, + input: &[u8], + output: &mut [u8], + decode_estimate: Self::DecodeEstimate, + ) -> Result; + + /// Returns the config for this engine. + fn config(&self) -> &Self::Config; + + /// Encode arbitrary octets as base64 using the provided `Engine`. + /// Returns a `String`. + /// + /// # Example + /// + /// ```rust + /// use base64::{Engine as _, engine::{self, general_purpose}, alphabet}; + /// + /// let b64 = general_purpose::STANDARD.encode(b"hello world~"); + /// println!("{}", b64); + /// + /// const CUSTOM_ENGINE: engine::GeneralPurpose = + /// engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::NO_PAD); + /// + /// let b64_url = CUSTOM_ENGINE.encode(b"hello internet~"); + /// ``` + #[cfg(any(feature = "alloc", test))] + #[inline] + fn encode>(&self, input: T) -> String { + fn inner(engine: &E, input_bytes: &[u8]) -> String + where + E: Engine + ?Sized, + { + let encoded_size = encoded_len(input_bytes.len(), engine.config().encode_padding()) + .expect("integer overflow when calculating buffer size"); + + let mut buf = vec![0; encoded_size]; + + encode_with_padding(input_bytes, &mut buf[..], engine, encoded_size); + + String::from_utf8(buf).expect("Invalid UTF8") + } + + inner(self, input.as_ref()) + } + + /// Encode arbitrary octets as base64 into a supplied `String`. + /// Writes into the supplied `String`, which may allocate if its internal buffer isn't big enough. + /// + /// # Example + /// + /// ```rust + /// use base64::{Engine as _, engine::{self, general_purpose}, alphabet}; + /// const CUSTOM_ENGINE: engine::GeneralPurpose = + /// engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::NO_PAD); + /// + /// fn main() { + /// let mut buf = String::new(); + /// general_purpose::STANDARD.encode_string(b"hello world~", &mut buf); + /// println!("{}", buf); + /// + /// buf.clear(); + /// CUSTOM_ENGINE.encode_string(b"hello internet~", &mut buf); + /// println!("{}", buf); + /// } + /// ``` + #[cfg(any(feature = "alloc", test))] + #[inline] + fn encode_string>(&self, input: T, output_buf: &mut String) { + fn inner(engine: &E, input_bytes: &[u8], output_buf: &mut String) + where + E: Engine + ?Sized, + { + let mut sink = chunked_encoder::StringSink::new(output_buf); + + chunked_encoder::ChunkedEncoder::new(engine) + .encode(input_bytes, &mut sink) + .expect("Writing to a String shouldn't fail"); + } + + inner(self, input.as_ref(), output_buf) + } + + /// Encode arbitrary octets as base64 into a supplied slice. + /// Writes into the supplied output buffer. + /// + /// This is useful if you wish to avoid allocation entirely (e.g. encoding into a stack-resident + /// or statically-allocated buffer). + /// + /// # Example + /// + #[cfg_attr(feature = "alloc", doc = "```")] + #[cfg_attr(not(feature = "alloc"), doc = "```ignore")] + /// use base64::{Engine as _, engine::general_purpose}; + /// let s = b"hello internet!"; + /// let mut buf = Vec::new(); + /// // make sure we'll have a slice big enough for base64 + padding + /// buf.resize(s.len() * 4 / 3 + 4, 0); + /// + /// let bytes_written = general_purpose::STANDARD.encode_slice(s, &mut buf).unwrap(); + /// + /// // shorten our vec down to just what was written + /// buf.truncate(bytes_written); + /// + /// assert_eq!(s, general_purpose::STANDARD.decode(&buf).unwrap().as_slice()); + /// ``` + #[inline] + fn encode_slice>( + &self, + input: T, + output_buf: &mut [u8], + ) -> Result { + fn inner( + engine: &E, + input_bytes: &[u8], + output_buf: &mut [u8], + ) -> Result + where + E: Engine + ?Sized, + { + let encoded_size = encoded_len(input_bytes.len(), engine.config().encode_padding()) + .expect("usize overflow when calculating buffer size"); + + if output_buf.len() < encoded_size { + return Err(EncodeSliceError::OutputSliceTooSmall); + } + + let b64_output = &mut output_buf[0..encoded_size]; + + encode_with_padding(input_bytes, b64_output, engine, encoded_size); + + Ok(encoded_size) + } + + inner(self, input.as_ref(), output_buf) + } + + /// Decode the input into a new `Vec`. + /// + /// # Example + /// + /// ```rust + /// use base64::{Engine as _, alphabet, engine::{self, general_purpose}}; + /// + /// let bytes = general_purpose::STANDARD + /// .decode("aGVsbG8gd29ybGR+Cg==").unwrap(); + /// println!("{:?}", bytes); + /// + /// // custom engine setup + /// let bytes_url = engine::GeneralPurpose::new( + /// &alphabet::URL_SAFE, + /// general_purpose::NO_PAD) + /// .decode("aGVsbG8gaW50ZXJuZXR-Cg").unwrap(); + /// println!("{:?}", bytes_url); + /// ``` + #[cfg(any(feature = "alloc", test))] + #[inline] + fn decode>(&self, input: T) -> Result, DecodeError> { + fn inner(engine: &E, input_bytes: &[u8]) -> Result, DecodeError> + where + E: Engine + ?Sized, + { + let estimate = engine.internal_decoded_len_estimate(input_bytes.len()); + let mut buffer = vec![0; estimate.decoded_len_estimate()]; + + let bytes_written = engine + .internal_decode(input_bytes, &mut buffer, estimate) + .map_err(|e| match e { + DecodeSliceError::DecodeError(e) => e, + DecodeSliceError::OutputSliceTooSmall => { + unreachable!("Vec is sized conservatively") + } + })? + .decoded_len; + + buffer.truncate(bytes_written); + + Ok(buffer) + } + + inner(self, input.as_ref()) + } + + /// Decode the `input` into the supplied `buffer`. + /// + /// Writes into the supplied `Vec`, which may allocate if its internal buffer isn't big enough. + /// Returns a `Result` containing an empty tuple, aka `()`. + /// + /// # Example + /// + /// ```rust + /// use base64::{Engine as _, alphabet, engine::{self, general_purpose}}; + /// const CUSTOM_ENGINE: engine::GeneralPurpose = + /// engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::PAD); + /// + /// fn main() { + /// use base64::Engine; + /// let mut buffer = Vec::::new(); + /// // with the default engine + /// general_purpose::STANDARD + /// .decode_vec("aGVsbG8gd29ybGR+Cg==", &mut buffer,).unwrap(); + /// println!("{:?}", buffer); + /// + /// buffer.clear(); + /// + /// // with a custom engine + /// CUSTOM_ENGINE.decode_vec( + /// "aGVsbG8gaW50ZXJuZXR-Cg==", + /// &mut buffer, + /// ).unwrap(); + /// println!("{:?}", buffer); + /// } + /// ``` + #[cfg(any(feature = "alloc", test))] + #[inline] + fn decode_vec>( + &self, + input: T, + buffer: &mut Vec, + ) -> Result<(), DecodeError> { + fn inner(engine: &E, input_bytes: &[u8], buffer: &mut Vec) -> Result<(), DecodeError> + where + E: Engine + ?Sized, + { + let starting_output_len = buffer.len(); + let estimate = engine.internal_decoded_len_estimate(input_bytes.len()); + + let total_len_estimate = estimate + .decoded_len_estimate() + .checked_add(starting_output_len) + .expect("Overflow when calculating output buffer length"); + + buffer.resize(total_len_estimate, 0); + + let buffer_slice = &mut buffer.as_mut_slice()[starting_output_len..]; + + let bytes_written = engine + .internal_decode(input_bytes, buffer_slice, estimate) + .map_err(|e| match e { + DecodeSliceError::DecodeError(e) => e, + DecodeSliceError::OutputSliceTooSmall => { + unreachable!("Vec is sized conservatively") + } + })? + .decoded_len; + + buffer.truncate(starting_output_len + bytes_written); + + Ok(()) + } + + inner(self, input.as_ref(), buffer) + } + + /// Decode the input into the provided output slice. + /// + /// Returns the number of bytes written to the slice, or an error if `output` is smaller than + /// the estimated decoded length. + /// + /// This will not write any bytes past exactly what is decoded (no stray garbage bytes at the end). + /// + /// See [crate::decoded_len_estimate] for calculating buffer sizes. + /// + /// See [Engine::decode_slice_unchecked] for a version that panics instead of returning an error + /// if the output buffer is too small. + #[inline] + fn decode_slice>( + &self, + input: T, + output: &mut [u8], + ) -> Result { + fn inner( + engine: &E, + input_bytes: &[u8], + output: &mut [u8], + ) -> Result + where + E: Engine + ?Sized, + { + engine + .internal_decode( + input_bytes, + output, + engine.internal_decoded_len_estimate(input_bytes.len()), + ) + .map(|dm| dm.decoded_len) + } + + inner(self, input.as_ref(), output) + } + + /// Decode the input into the provided output slice. + /// + /// Returns the number of bytes written to the slice. + /// + /// This will not write any bytes past exactly what is decoded (no stray garbage bytes at the end). + /// + /// See [crate::decoded_len_estimate] for calculating buffer sizes. + /// + /// See [Engine::decode_slice] for a version that returns an error instead of panicking if the output + /// buffer is too small. + /// + /// # Panics + /// + /// Panics if the provided output buffer is too small for the decoded data. + #[inline] + fn decode_slice_unchecked>( + &self, + input: T, + output: &mut [u8], + ) -> Result { + fn inner(engine: &E, input_bytes: &[u8], output: &mut [u8]) -> Result + where + E: Engine + ?Sized, + { + engine + .internal_decode( + input_bytes, + output, + engine.internal_decoded_len_estimate(input_bytes.len()), + ) + .map(|dm| dm.decoded_len) + .map_err(|e| match e { + DecodeSliceError::DecodeError(e) => e, + DecodeSliceError::OutputSliceTooSmall => { + panic!("Output slice is too small") + } + }) + } + + inner(self, input.as_ref(), output) + } +} + +/// The minimal level of configuration that engines must support. +pub trait Config { + /// Returns `true` if padding should be added after the encoded output. + /// + /// Padding is added outside the engine's encode() since the engine may be used + /// to encode only a chunk of the overall output, so it can't always know when + /// the output is "done" and would therefore need padding (if configured). + // It could be provided as a separate parameter when encoding, but that feels like + // leaking an implementation detail to the user, and it's hopefully more convenient + // to have to only pass one thing (the engine) to any part of the API. + fn encode_padding(&self) -> bool; +} + +/// The decode estimate used by an engine implementation. Users do not need to interact with this; +/// it is only for engine implementors. +/// +/// Implementors may store relevant data here when constructing this to avoid having to calculate +/// them again during actual decoding. +pub trait DecodeEstimate { + /// Returns a conservative (err on the side of too big) estimate of the decoded length to use + /// for pre-allocating buffers, etc. + /// + /// The estimate must be no larger than the next largest complete triple of decoded bytes. + /// That is, the final quad of tokens to decode may be assumed to be complete with no padding. + fn decoded_len_estimate(&self) -> usize; +} + +/// Controls how pad bytes are handled when decoding. +/// +/// Each [Engine] must support at least the behavior indicated by +/// [DecodePaddingMode::RequireCanonical], and may support other modes. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum DecodePaddingMode { + /// Canonical padding is allowed, but any fewer padding bytes than that is also allowed. + Indifferent, + /// Padding must be canonical (0, 1, or 2 `=` as needed to produce a 4 byte suffix). + RequireCanonical, + /// Padding must be absent -- for when you want predictable padding, without any wasted bytes. + RequireNone, +} + +/// Metadata about the result of a decode operation +#[derive(PartialEq, Eq, Debug)] +pub struct DecodeMetadata { + /// Number of decoded bytes output + pub(crate) decoded_len: usize, + /// Offset of the first padding byte in the input, if any + pub(crate) padding_offset: Option, +} + +impl DecodeMetadata { + pub(crate) fn new(decoded_bytes: usize, padding_index: Option) -> Self { + Self { + decoded_len: decoded_bytes, + padding_offset: padding_index, + } + } +} diff --git a/vendor/base64/src/engine/naive.rs b/vendor/base64/src/engine/naive.rs new file mode 100644 index 00000000000000..af509bfa56b6a5 --- /dev/null +++ b/vendor/base64/src/engine/naive.rs @@ -0,0 +1,195 @@ +use crate::{ + alphabet::Alphabet, + engine::{ + general_purpose::{self, decode_table, encode_table}, + Config, DecodeEstimate, DecodeMetadata, DecodePaddingMode, Engine, + }, + DecodeError, DecodeSliceError, +}; +use std::ops::{BitAnd, BitOr, Shl, Shr}; + +/// Comparatively simple implementation that can be used as something to compare against in tests +pub struct Naive { + encode_table: [u8; 64], + decode_table: [u8; 256], + config: NaiveConfig, +} + +impl Naive { + const ENCODE_INPUT_CHUNK_SIZE: usize = 3; + const DECODE_INPUT_CHUNK_SIZE: usize = 4; + + pub const fn new(alphabet: &Alphabet, config: NaiveConfig) -> Self { + Self { + encode_table: encode_table(alphabet), + decode_table: decode_table(alphabet), + config, + } + } + + fn decode_byte_into_u32(&self, offset: usize, byte: u8) -> Result { + let decoded = self.decode_table[byte as usize]; + + if decoded == general_purpose::INVALID_VALUE { + return Err(DecodeError::InvalidByte(offset, byte)); + } + + Ok(decoded as u32) + } +} + +impl Engine for Naive { + type Config = NaiveConfig; + type DecodeEstimate = NaiveEstimate; + + fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize { + // complete chunks first + + const LOW_SIX_BITS: u32 = 0x3F; + + let rem = input.len() % Self::ENCODE_INPUT_CHUNK_SIZE; + // will never underflow + let complete_chunk_len = input.len() - rem; + + let mut input_index = 0_usize; + let mut output_index = 0_usize; + if let Some(last_complete_chunk_index) = + complete_chunk_len.checked_sub(Self::ENCODE_INPUT_CHUNK_SIZE) + { + while input_index <= last_complete_chunk_index { + let chunk = &input[input_index..input_index + Self::ENCODE_INPUT_CHUNK_SIZE]; + + // populate low 24 bits from 3 bytes + let chunk_int: u32 = + (chunk[0] as u32).shl(16) | (chunk[1] as u32).shl(8) | (chunk[2] as u32); + // encode 4x 6-bit output bytes + output[output_index] = self.encode_table[chunk_int.shr(18) as usize]; + output[output_index + 1] = + self.encode_table[chunk_int.shr(12_u8).bitand(LOW_SIX_BITS) as usize]; + output[output_index + 2] = + self.encode_table[chunk_int.shr(6_u8).bitand(LOW_SIX_BITS) as usize]; + output[output_index + 3] = + self.encode_table[chunk_int.bitand(LOW_SIX_BITS) as usize]; + + input_index += Self::ENCODE_INPUT_CHUNK_SIZE; + output_index += 4; + } + } + + // then leftovers + if rem == 2 { + let chunk = &input[input_index..input_index + 2]; + + // high six bits of chunk[0] + output[output_index] = self.encode_table[chunk[0].shr(2) as usize]; + // bottom 2 bits of [0], high 4 bits of [1] + output[output_index + 1] = + self.encode_table[(chunk[0].shl(4_u8).bitor(chunk[1].shr(4_u8)) as u32) + .bitand(LOW_SIX_BITS) as usize]; + // bottom 4 bits of [1], with the 2 bottom bits as zero + output[output_index + 2] = + self.encode_table[(chunk[1].shl(2_u8) as u32).bitand(LOW_SIX_BITS) as usize]; + + output_index += 3; + } else if rem == 1 { + let byte = input[input_index]; + output[output_index] = self.encode_table[byte.shr(2) as usize]; + output[output_index + 1] = + self.encode_table[(byte.shl(4_u8) as u32).bitand(LOW_SIX_BITS) as usize]; + output_index += 2; + } + + output_index + } + + fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate { + NaiveEstimate::new(input_len) + } + + fn internal_decode( + &self, + input: &[u8], + output: &mut [u8], + estimate: Self::DecodeEstimate, + ) -> Result { + let complete_nonterminal_quads_len = general_purpose::decode::complete_quads_len( + input, + estimate.rem, + output.len(), + &self.decode_table, + )?; + + const BOTTOM_BYTE: u32 = 0xFF; + + for (chunk_index, chunk) in input[..complete_nonterminal_quads_len] + .chunks_exact(4) + .enumerate() + { + let input_index = chunk_index * 4; + let output_index = chunk_index * 3; + + let decoded_int: u32 = self.decode_byte_into_u32(input_index, chunk[0])?.shl(18) + | self + .decode_byte_into_u32(input_index + 1, chunk[1])? + .shl(12) + | self.decode_byte_into_u32(input_index + 2, chunk[2])?.shl(6) + | self.decode_byte_into_u32(input_index + 3, chunk[3])?; + + output[output_index] = decoded_int.shr(16_u8).bitand(BOTTOM_BYTE) as u8; + output[output_index + 1] = decoded_int.shr(8_u8).bitand(BOTTOM_BYTE) as u8; + output[output_index + 2] = decoded_int.bitand(BOTTOM_BYTE) as u8; + } + + general_purpose::decode_suffix::decode_suffix( + input, + complete_nonterminal_quads_len, + output, + complete_nonterminal_quads_len / 4 * 3, + &self.decode_table, + self.config.decode_allow_trailing_bits, + self.config.decode_padding_mode, + ) + } + + fn config(&self) -> &Self::Config { + &self.config + } +} + +pub struct NaiveEstimate { + /// remainder from dividing input by `Naive::DECODE_CHUNK_SIZE` + rem: usize, + /// Length of input that is in complete `Naive::DECODE_CHUNK_SIZE`-length chunks + complete_chunk_len: usize, +} + +impl NaiveEstimate { + fn new(input_len: usize) -> Self { + let rem = input_len % Naive::DECODE_INPUT_CHUNK_SIZE; + let complete_chunk_len = input_len - rem; + + Self { + rem, + complete_chunk_len, + } + } +} + +impl DecodeEstimate for NaiveEstimate { + fn decoded_len_estimate(&self) -> usize { + ((self.complete_chunk_len / 4) + ((self.rem > 0) as usize)) * 3 + } +} + +#[derive(Clone, Copy, Debug)] +pub struct NaiveConfig { + pub encode_padding: bool, + pub decode_allow_trailing_bits: bool, + pub decode_padding_mode: DecodePaddingMode, +} + +impl Config for NaiveConfig { + fn encode_padding(&self) -> bool { + self.encode_padding + } +} diff --git a/vendor/base64/src/engine/tests.rs b/vendor/base64/src/engine/tests.rs new file mode 100644 index 00000000000000..72bbf4bb046d29 --- /dev/null +++ b/vendor/base64/src/engine/tests.rs @@ -0,0 +1,1579 @@ +// rstest_reuse template functions have unused variables +#![allow(unused_variables)] + +use rand::{ + self, + distributions::{self, Distribution as _}, + rngs, Rng as _, SeedableRng as _, +}; +use rstest::rstest; +use rstest_reuse::{apply, template}; +use std::{collections, fmt, io::Read as _}; + +use crate::{ + alphabet::{Alphabet, STANDARD}, + encode::add_padding, + encoded_len, + engine::{ + general_purpose, naive, Config, DecodeEstimate, DecodeMetadata, DecodePaddingMode, Engine, + }, + read::DecoderReader, + tests::{assert_encode_sanity, random_alphabet, random_config}, + DecodeError, DecodeSliceError, PAD_BYTE, +}; + +// the case::foo syntax includes the "foo" in the generated test method names +#[template] +#[rstest(engine_wrapper, +case::general_purpose(GeneralPurposeWrapper {}), +case::naive(NaiveWrapper {}), +case::decoder_reader(DecoderReaderEngineWrapper {}), +)] +fn all_engines(engine_wrapper: E) {} + +/// Some decode tests don't make sense for use with `DecoderReader` as they are difficult to +/// reason about or otherwise inapplicable given how DecoderReader slice up its input along +/// chunk boundaries. +#[template] +#[rstest(engine_wrapper, +case::general_purpose(GeneralPurposeWrapper {}), +case::naive(NaiveWrapper {}), +)] +fn all_engines_except_decoder_reader(engine_wrapper: E) {} + +#[apply(all_engines)] +fn rfc_test_vectors_std_alphabet(engine_wrapper: E) { + let data = vec![ + ("", ""), + ("f", "Zg=="), + ("fo", "Zm8="), + ("foo", "Zm9v"), + ("foob", "Zm9vYg=="), + ("fooba", "Zm9vYmE="), + ("foobar", "Zm9vYmFy"), + ]; + + let engine = E::standard(); + let engine_no_padding = E::standard_unpadded(); + + for (orig, encoded) in &data { + let encoded_without_padding = encoded.trim_end_matches('='); + + // unpadded + { + let mut encode_buf = [0_u8; 8]; + let mut decode_buf = [0_u8; 6]; + + let encode_len = + engine_no_padding.internal_encode(orig.as_bytes(), &mut encode_buf[..]); + assert_eq!( + &encoded_without_padding, + &std::str::from_utf8(&encode_buf[0..encode_len]).unwrap() + ); + let decode_len = engine_no_padding + .decode_slice_unchecked(encoded_without_padding.as_bytes(), &mut decode_buf[..]) + .unwrap(); + assert_eq!(orig.len(), decode_len); + + assert_eq!( + orig, + &std::str::from_utf8(&decode_buf[0..decode_len]).unwrap() + ); + + // if there was any padding originally, the no padding engine won't decode it + if encoded.as_bytes().contains(&PAD_BYTE) { + assert_eq!( + Err(DecodeError::InvalidPadding), + engine_no_padding.decode(encoded) + ) + } + } + + // padded + { + let mut encode_buf = [0_u8; 8]; + let mut decode_buf = [0_u8; 6]; + + let encode_len = engine.internal_encode(orig.as_bytes(), &mut encode_buf[..]); + assert_eq!( + // doesn't have padding added yet + &encoded_without_padding, + &std::str::from_utf8(&encode_buf[0..encode_len]).unwrap() + ); + let pad_len = add_padding(encode_len, &mut encode_buf[encode_len..]); + assert_eq!(encoded.as_bytes(), &encode_buf[..encode_len + pad_len]); + + let decode_len = engine + .decode_slice_unchecked(encoded.as_bytes(), &mut decode_buf[..]) + .unwrap(); + assert_eq!(orig.len(), decode_len); + + assert_eq!( + orig, + &std::str::from_utf8(&decode_buf[0..decode_len]).unwrap() + ); + + // if there was (canonical) padding, and we remove it, the standard engine won't decode + if encoded.as_bytes().contains(&PAD_BYTE) { + assert_eq!( + Err(DecodeError::InvalidPadding), + engine.decode(encoded_without_padding) + ) + } + } + } +} + +#[apply(all_engines)] +fn roundtrip_random(engine_wrapper: E) { + let mut rng = seeded_rng(); + + let mut orig_data = Vec::::new(); + let mut encode_buf = Vec::::new(); + let mut decode_buf = Vec::::new(); + + let len_range = distributions::Uniform::new(1, 1_000); + + for _ in 0..10_000 { + let engine = E::random(&mut rng); + + orig_data.clear(); + encode_buf.clear(); + decode_buf.clear(); + + let (orig_len, _, encoded_len) = generate_random_encoded_data( + &engine, + &mut orig_data, + &mut encode_buf, + &mut rng, + &len_range, + ); + + // exactly the right size + decode_buf.resize(orig_len, 0); + + let dec_len = engine + .decode_slice_unchecked(&encode_buf[0..encoded_len], &mut decode_buf[..]) + .unwrap(); + + assert_eq!(orig_len, dec_len); + assert_eq!(&orig_data[..], &decode_buf[..dec_len]); + } +} + +#[apply(all_engines)] +fn encode_doesnt_write_extra_bytes(engine_wrapper: E) { + let mut rng = seeded_rng(); + + let mut orig_data = Vec::::new(); + let mut encode_buf = Vec::::new(); + let mut encode_buf_backup = Vec::::new(); + + let input_len_range = distributions::Uniform::new(0, 1000); + + for _ in 0..10_000 { + let engine = E::random(&mut rng); + let padded = engine.config().encode_padding(); + + orig_data.clear(); + encode_buf.clear(); + encode_buf_backup.clear(); + + let orig_len = fill_rand(&mut orig_data, &mut rng, &input_len_range); + + let prefix_len = 1024; + // plenty of prefix and suffix + fill_rand_len(&mut encode_buf, &mut rng, prefix_len * 2 + orig_len * 2); + encode_buf_backup.extend_from_slice(&encode_buf[..]); + + let expected_encode_len_no_pad = encoded_len(orig_len, false).unwrap(); + + let encoded_len_no_pad = + engine.internal_encode(&orig_data[..], &mut encode_buf[prefix_len..]); + assert_eq!(expected_encode_len_no_pad, encoded_len_no_pad); + + // no writes past what it claimed to write + assert_eq!(&encode_buf_backup[..prefix_len], &encode_buf[..prefix_len]); + assert_eq!( + &encode_buf_backup[(prefix_len + encoded_len_no_pad)..], + &encode_buf[(prefix_len + encoded_len_no_pad)..] + ); + + let encoded_data = &encode_buf[prefix_len..(prefix_len + encoded_len_no_pad)]; + assert_encode_sanity( + std::str::from_utf8(encoded_data).unwrap(), + // engines don't pad + false, + orig_len, + ); + + // pad so we can decode it in case our random engine requires padding + let pad_len = if padded { + add_padding( + encoded_len_no_pad, + &mut encode_buf[prefix_len + encoded_len_no_pad..], + ) + } else { + 0 + }; + + assert_eq!( + orig_data, + engine + .decode(&encode_buf[prefix_len..(prefix_len + encoded_len_no_pad + pad_len)],) + .unwrap() + ); + } +} + +#[apply(all_engines)] +fn encode_engine_slice_fits_into_precisely_sized_slice(engine_wrapper: E) { + let mut orig_data = Vec::new(); + let mut encoded_data = Vec::new(); + let mut decoded = Vec::new(); + + let input_len_range = distributions::Uniform::new(0, 1000); + + let mut rng = rngs::SmallRng::from_entropy(); + + for _ in 0..10_000 { + orig_data.clear(); + encoded_data.clear(); + decoded.clear(); + + let input_len = input_len_range.sample(&mut rng); + + for _ in 0..input_len { + orig_data.push(rng.gen()); + } + + let engine = E::random(&mut rng); + + let encoded_size = encoded_len(input_len, engine.config().encode_padding()).unwrap(); + + encoded_data.resize(encoded_size, 0); + + assert_eq!( + encoded_size, + engine.encode_slice(&orig_data, &mut encoded_data).unwrap() + ); + + assert_encode_sanity( + std::str::from_utf8(&encoded_data[0..encoded_size]).unwrap(), + engine.config().encode_padding(), + input_len, + ); + + engine + .decode_vec(&encoded_data[0..encoded_size], &mut decoded) + .unwrap(); + assert_eq!(orig_data, decoded); + } +} + +#[apply(all_engines)] +fn decode_doesnt_write_extra_bytes(engine_wrapper: E) +where + E: EngineWrapper, + <::Engine as Engine>::Config: fmt::Debug, +{ + let mut rng = seeded_rng(); + + let mut orig_data = Vec::::new(); + let mut encode_buf = Vec::::new(); + let mut decode_buf = Vec::::new(); + let mut decode_buf_backup = Vec::::new(); + + let len_range = distributions::Uniform::new(1, 1_000); + + for _ in 0..10_000 { + let engine = E::random(&mut rng); + + orig_data.clear(); + encode_buf.clear(); + decode_buf.clear(); + decode_buf_backup.clear(); + + let orig_len = fill_rand(&mut orig_data, &mut rng, &len_range); + encode_buf.resize(orig_len * 2 + 100, 0); + + let encoded_len = engine + .encode_slice(&orig_data[..], &mut encode_buf[..]) + .unwrap(); + encode_buf.truncate(encoded_len); + + // oversize decode buffer so we can easily tell if it writes anything more than + // just the decoded data + let prefix_len = 1024; + // plenty of prefix and suffix + fill_rand_len(&mut decode_buf, &mut rng, prefix_len * 2 + orig_len * 2); + decode_buf_backup.extend_from_slice(&decode_buf[..]); + + let dec_len = engine + .decode_slice_unchecked(&encode_buf, &mut decode_buf[prefix_len..]) + .unwrap(); + + assert_eq!(orig_len, dec_len); + assert_eq!( + &orig_data[..], + &decode_buf[prefix_len..prefix_len + dec_len] + ); + assert_eq!(&decode_buf_backup[..prefix_len], &decode_buf[..prefix_len]); + assert_eq!( + &decode_buf_backup[prefix_len + dec_len..], + &decode_buf[prefix_len + dec_len..] + ); + } +} + +#[apply(all_engines)] +fn decode_detect_invalid_last_symbol(engine_wrapper: E) { + // 0xFF -> "/w==", so all letters > w, 0-9, and '+', '/' should get InvalidLastSymbol + let engine = E::standard(); + + assert_eq!(Ok(vec![0x89, 0x85]), engine.decode("iYU=")); + assert_eq!(Ok(vec![0xFF]), engine.decode("/w==")); + + for (suffix, offset) in vec![ + // suffix, offset of bad byte from start of suffix + ("/x==", 1_usize), + ("/z==", 1_usize), + ("/0==", 1_usize), + ("/9==", 1_usize), + ("/+==", 1_usize), + ("//==", 1_usize), + // trailing 01 + ("iYV=", 2_usize), + // trailing 10 + ("iYW=", 2_usize), + // trailing 11 + ("iYX=", 2_usize), + ] { + for prefix_quads in 0..256 { + let mut encoded = "AAAA".repeat(prefix_quads); + encoded.push_str(suffix); + + assert_eq!( + Err(DecodeError::InvalidLastSymbol( + encoded.len() - 4 + offset, + suffix.as_bytes()[offset], + )), + engine.decode(encoded.as_str()) + ); + } + } +} + +#[apply(all_engines)] +fn decode_detect_1_valid_symbol_in_last_quad_invalid_length(engine_wrapper: E) { + for len in (0_usize..256).map(|len| len * 4 + 1) { + for mode in all_pad_modes() { + let mut input = vec![b'A'; len]; + + let engine = E::standard_with_pad_mode(true, mode); + + assert_eq!(Err(DecodeError::InvalidLength(len)), engine.decode(&input)); + // if we add padding, then the first pad byte in the quad is invalid because it should + // be the second symbol + for _ in 0..3 { + input.push(PAD_BYTE); + assert_eq!( + Err(DecodeError::InvalidByte(len, PAD_BYTE)), + engine.decode(&input) + ); + } + } + } +} + +#[apply(all_engines)] +fn decode_detect_1_invalid_byte_in_last_quad_invalid_byte(engine_wrapper: E) { + for prefix_len in (0_usize..256).map(|len| len * 4) { + for mode in all_pad_modes() { + let mut input = vec![b'A'; prefix_len]; + input.push(b'*'); + + let engine = E::standard_with_pad_mode(true, mode); + + assert_eq!( + Err(DecodeError::InvalidByte(prefix_len, b'*')), + engine.decode(&input) + ); + // adding padding doesn't matter + for _ in 0..3 { + input.push(PAD_BYTE); + assert_eq!( + Err(DecodeError::InvalidByte(prefix_len, b'*')), + engine.decode(&input) + ); + } + } + } +} + +#[apply(all_engines)] +fn decode_detect_invalid_last_symbol_every_possible_two_symbols( + engine_wrapper: E, +) { + let engine = E::standard(); + + let mut base64_to_bytes = collections::HashMap::new(); + + for b in 0_u8..=255 { + let mut b64 = vec![0_u8; 4]; + assert_eq!(2, engine.internal_encode(&[b], &mut b64[..])); + let _ = add_padding(2, &mut b64[2..]); + + assert!(base64_to_bytes.insert(b64, vec![b]).is_none()); + } + + // every possible combination of trailing symbols must either decode to 1 byte or get InvalidLastSymbol, with or without any leading chunks + + let mut prefix = Vec::new(); + for _ in 0..256 { + let mut clone = prefix.clone(); + + let mut symbols = [0_u8; 4]; + for &s1 in STANDARD.symbols.iter() { + symbols[0] = s1; + for &s2 in STANDARD.symbols.iter() { + symbols[1] = s2; + symbols[2] = PAD_BYTE; + symbols[3] = PAD_BYTE; + + // chop off previous symbols + clone.truncate(prefix.len()); + clone.extend_from_slice(&symbols[..]); + let decoded_prefix_len = prefix.len() / 4 * 3; + + match base64_to_bytes.get(&symbols[..]) { + Some(bytes) => { + let res = engine + .decode(&clone) + // remove prefix + .map(|decoded| decoded[decoded_prefix_len..].to_vec()); + + assert_eq!(Ok(bytes.clone()), res); + } + None => assert_eq!( + Err(DecodeError::InvalidLastSymbol(1, s2)), + engine.decode(&symbols[..]) + ), + } + } + } + + prefix.extend_from_slice(b"AAAA"); + } +} + +#[apply(all_engines)] +fn decode_detect_invalid_last_symbol_every_possible_three_symbols( + engine_wrapper: E, +) { + let engine = E::standard(); + + let mut base64_to_bytes = collections::HashMap::new(); + + let mut bytes = [0_u8; 2]; + for b1 in 0_u8..=255 { + bytes[0] = b1; + for b2 in 0_u8..=255 { + bytes[1] = b2; + let mut b64 = vec![0_u8; 4]; + assert_eq!(3, engine.internal_encode(&bytes, &mut b64[..])); + let _ = add_padding(3, &mut b64[3..]); + + let mut v = Vec::with_capacity(2); + v.extend_from_slice(&bytes[..]); + + assert!(base64_to_bytes.insert(b64, v).is_none()); + } + } + + // every possible combination of symbols must either decode to 2 bytes or get InvalidLastSymbol, with or without any leading chunks + + let mut prefix = Vec::new(); + let mut input = Vec::new(); + for _ in 0..256 { + input.clear(); + input.extend_from_slice(&prefix); + + let mut symbols = [0_u8; 4]; + for &s1 in STANDARD.symbols.iter() { + symbols[0] = s1; + for &s2 in STANDARD.symbols.iter() { + symbols[1] = s2; + for &s3 in STANDARD.symbols.iter() { + symbols[2] = s3; + symbols[3] = PAD_BYTE; + + // chop off previous symbols + input.truncate(prefix.len()); + input.extend_from_slice(&symbols[..]); + let decoded_prefix_len = prefix.len() / 4 * 3; + + match base64_to_bytes.get(&symbols[..]) { + Some(bytes) => { + let res = engine + .decode(&input) + // remove prefix + .map(|decoded| decoded[decoded_prefix_len..].to_vec()); + + assert_eq!(Ok(bytes.clone()), res); + } + None => assert_eq!( + Err(DecodeError::InvalidLastSymbol(2, s3)), + engine.decode(&symbols[..]) + ), + } + } + } + } + prefix.extend_from_slice(b"AAAA"); + } +} + +#[apply(all_engines)] +fn decode_invalid_trailing_bits_ignored_when_configured(engine_wrapper: E) { + let strict = E::standard(); + let forgiving = E::standard_allow_trailing_bits(); + + fn assert_tolerant_decode( + engine: &E, + input: &mut String, + b64_prefix_len: usize, + expected_decode_bytes: Vec, + data: &str, + ) { + let prefixed = prefixed_data(input, b64_prefix_len, data); + let decoded = engine.decode(prefixed); + // prefix is always complete chunks + let decoded_prefix_len = b64_prefix_len / 4 * 3; + assert_eq!( + Ok(expected_decode_bytes), + decoded.map(|v| v[decoded_prefix_len..].to_vec()) + ); + } + + let mut prefix = String::new(); + for _ in 0..256 { + let mut input = prefix.clone(); + + // example from https://github.com/marshallpierce/rust-base64/issues/75 + assert!(strict + .decode(prefixed_data(&mut input, prefix.len(), "/w==")) + .is_ok()); + assert!(strict + .decode(prefixed_data(&mut input, prefix.len(), "iYU=")) + .is_ok()); + // trailing 01 + assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![255], "/x=="); + assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![137, 133], "iYV="); + // trailing 10 + assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![255], "/y=="); + assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![137, 133], "iYW="); + // trailing 11 + assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![255], "/z=="); + assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![137, 133], "iYX="); + + prefix.push_str("AAAA"); + } +} + +#[apply(all_engines)] +fn decode_invalid_byte_error(engine_wrapper: E) { + let mut rng = seeded_rng(); + + let mut orig_data = Vec::::new(); + let mut encode_buf = Vec::::new(); + let mut decode_buf = Vec::::new(); + + let len_range = distributions::Uniform::new(1, 1_000); + + for _ in 0..100_000 { + let alphabet = random_alphabet(&mut rng); + let engine = E::random_alphabet(&mut rng, alphabet); + + orig_data.clear(); + encode_buf.clear(); + decode_buf.clear(); + + let (orig_len, encoded_len_just_data, encoded_len_with_padding) = + generate_random_encoded_data( + &engine, + &mut orig_data, + &mut encode_buf, + &mut rng, + &len_range, + ); + + // exactly the right size + decode_buf.resize(orig_len, 0); + + // replace one encoded byte with an invalid byte + let invalid_byte: u8 = loop { + let byte: u8 = rng.gen(); + + if alphabet.symbols.contains(&byte) || byte == PAD_BYTE { + continue; + } else { + break byte; + } + }; + + let invalid_range = distributions::Uniform::new(0, orig_len); + let invalid_index = invalid_range.sample(&mut rng); + encode_buf[invalid_index] = invalid_byte; + + assert_eq!( + Err(DecodeError::InvalidByte(invalid_index, invalid_byte)), + engine.decode_slice_unchecked( + &encode_buf[0..encoded_len_with_padding], + &mut decode_buf[..], + ) + ); + } +} + +/// Any amount of padding anywhere before the final non padding character = invalid byte at first +/// pad byte. +/// From this and [decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad_non_canonical_padding_suffix_all_modes], +/// we know padding must extend contiguously to the end of the input. +#[apply(all_engines)] +fn decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad_all_modes< + E: EngineWrapper, +>( + engine_wrapper: E, +) { + // Different amounts of padding, w/ offset from end for the last non-padding char. + // Only canonical padding, so Canonical mode will work. + let suffixes = &[("AA==", 2), ("AAA=", 1), ("AAAA", 0)]; + + for mode in pad_modes_allowing_padding() { + // We don't encode, so we don't care about encode padding. + let engine = E::standard_with_pad_mode(true, mode); + + decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad( + engine, + suffixes.as_slice(), + ); + } +} + +/// See [decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad_all_modes] +#[apply(all_engines)] +fn decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad_non_canonical_padding_suffix< + E: EngineWrapper, +>( + engine_wrapper: E, +) { + // Different amounts of padding, w/ offset from end for the last non-padding char, and + // non-canonical padding. + let suffixes = [ + ("AA==", 2), + ("AA=", 1), + ("AA", 0), + ("AAA=", 1), + ("AAA", 0), + ("AAAA", 0), + ]; + + // We don't encode, so we don't care about encode padding. + // Decoding is indifferent so that we don't get caught by missing padding on the last quad + let engine = E::standard_with_pad_mode(true, DecodePaddingMode::Indifferent); + + decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad( + engine, + suffixes.as_slice(), + ) +} + +fn decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad( + engine: impl Engine, + suffixes: &[(&str, usize)], +) { + let mut rng = seeded_rng(); + + let prefix_quads_range = distributions::Uniform::from(0..=256); + + for _ in 0..100_000 { + for (suffix, suffix_offset) in suffixes.iter() { + let mut s = "AAAA".repeat(prefix_quads_range.sample(&mut rng)); + s.push_str(suffix); + let mut encoded = s.into_bytes(); + + // calculate a range to write padding into that leaves at least one non padding char + let last_non_padding_offset = encoded.len() - 1 - suffix_offset; + + // don't include last non padding char as it must stay not padding + let padding_end = rng.gen_range(0..last_non_padding_offset); + + // don't use more than 100 bytes of padding, but also use shorter lengths when + // padding_end is near the start of the encoded data to avoid biasing to padding + // the entire prefix on short lengths + let padding_len = rng.gen_range(1..=usize::min(100, padding_end + 1)); + let padding_start = padding_end.saturating_sub(padding_len); + + encoded[padding_start..=padding_end].fill(PAD_BYTE); + + // should still have non-padding before any final padding + assert_ne!(PAD_BYTE, encoded[last_non_padding_offset]); + assert_eq!( + Err(DecodeError::InvalidByte(padding_start, PAD_BYTE)), + engine.decode(&encoded), + "len: {}, input: {}", + encoded.len(), + String::from_utf8(encoded).unwrap() + ); + } + } +} + +/// Any amount of padding before final chunk that crosses over into final chunk with 1-4 bytes = +/// invalid byte at first pad byte. +/// From this we know the padding must start in the final chunk. +#[apply(all_engines)] +fn decode_padding_starts_before_final_chunk_error_invalid_byte_at_first_pad( + engine_wrapper: E, +) { + let mut rng = seeded_rng(); + + // must have at least one prefix quad + let prefix_quads_range = distributions::Uniform::from(1..256); + let suffix_pad_len_range = distributions::Uniform::from(1..=4); + // don't use no-padding mode, as the reader decode might decode a block that ends with + // valid padding, which should then be referenced when encountering the later invalid byte + for mode in pad_modes_allowing_padding() { + // we don't encode so we don't care about encode padding + let engine = E::standard_with_pad_mode(true, mode); + for _ in 0..100_000 { + let suffix_len = suffix_pad_len_range.sample(&mut rng); + // all 0 bits so we don't hit InvalidLastSymbol with the reader decoder + let mut encoded = "AAAA" + .repeat(prefix_quads_range.sample(&mut rng)) + .into_bytes(); + encoded.resize(encoded.len() + suffix_len, PAD_BYTE); + + // amount of padding must be long enough to extend back from suffix into previous + // quads + let padding_len = rng.gen_range(suffix_len + 1..encoded.len()); + // no non-padding after padding in this test, so padding goes to the end + let padding_start = encoded.len() - padding_len; + encoded[padding_start..].fill(PAD_BYTE); + + assert_eq!( + Err(DecodeError::InvalidByte(padding_start, PAD_BYTE)), + engine.decode(&encoded), + "suffix_len: {}, padding_len: {}, b64: {}", + suffix_len, + padding_len, + std::str::from_utf8(&encoded).unwrap() + ); + } + } +} + +/// 0-1 bytes of data before any amount of padding in final chunk = invalid byte, since padding +/// is not valid data (consistent with error for pad bytes in earlier chunks). +/// From this we know there must be 2-3 bytes of data before padding +#[apply(all_engines)] +fn decode_too_little_data_before_padding_error_invalid_byte(engine_wrapper: E) { + let mut rng = seeded_rng(); + + // want to test no prefix quad case, so start at 0 + let prefix_quads_range = distributions::Uniform::from(0_usize..256); + let suffix_data_len_range = distributions::Uniform::from(0_usize..=1); + for mode in all_pad_modes() { + // we don't encode so we don't care about encode padding + let engine = E::standard_with_pad_mode(true, mode); + for _ in 0..100_000 { + let suffix_data_len = suffix_data_len_range.sample(&mut rng); + let prefix_quad_len = prefix_quads_range.sample(&mut rng); + + // for all possible padding lengths + for padding_len in 1..=(4 - suffix_data_len) { + let mut encoded = "ABCD".repeat(prefix_quad_len).into_bytes(); + encoded.resize(encoded.len() + suffix_data_len, b'A'); + encoded.resize(encoded.len() + padding_len, PAD_BYTE); + + assert_eq!( + Err(DecodeError::InvalidByte( + prefix_quad_len * 4 + suffix_data_len, + PAD_BYTE, + )), + engine.decode(&encoded), + "input {} suffix data len {} pad len {}", + String::from_utf8(encoded).unwrap(), + suffix_data_len, + padding_len + ); + } + } + } +} + +// https://eprint.iacr.org/2022/361.pdf table 2, test 1 +#[apply(all_engines)] +fn decode_malleability_test_case_3_byte_suffix_valid(engine_wrapper: E) { + assert_eq!( + b"Hello".as_slice(), + &E::standard().decode("SGVsbG8=").unwrap() + ); +} + +// https://eprint.iacr.org/2022/361.pdf table 2, test 2 +#[apply(all_engines)] +fn decode_malleability_test_case_3_byte_suffix_invalid_trailing_symbol( + engine_wrapper: E, +) { + assert_eq!( + DecodeError::InvalidLastSymbol(6, 0x39), + E::standard().decode("SGVsbG9=").unwrap_err() + ); +} + +// https://eprint.iacr.org/2022/361.pdf table 2, test 3 +#[apply(all_engines)] +fn decode_malleability_test_case_3_byte_suffix_no_padding(engine_wrapper: E) { + assert_eq!( + DecodeError::InvalidPadding, + E::standard().decode("SGVsbG9").unwrap_err() + ); +} + +// https://eprint.iacr.org/2022/361.pdf table 2, test 4 +#[apply(all_engines)] +fn decode_malleability_test_case_2_byte_suffix_valid_two_padding_symbols( + engine_wrapper: E, +) { + assert_eq!( + b"Hell".as_slice(), + &E::standard().decode("SGVsbA==").unwrap() + ); +} + +// https://eprint.iacr.org/2022/361.pdf table 2, test 5 +#[apply(all_engines)] +fn decode_malleability_test_case_2_byte_suffix_short_padding(engine_wrapper: E) { + assert_eq!( + DecodeError::InvalidPadding, + E::standard().decode("SGVsbA=").unwrap_err() + ); +} + +// https://eprint.iacr.org/2022/361.pdf table 2, test 6 +#[apply(all_engines)] +fn decode_malleability_test_case_2_byte_suffix_no_padding(engine_wrapper: E) { + assert_eq!( + DecodeError::InvalidPadding, + E::standard().decode("SGVsbA").unwrap_err() + ); +} + +// https://eprint.iacr.org/2022/361.pdf table 2, test 7 +// DecoderReader pseudo-engine gets InvalidByte at 8 (extra padding) since it decodes the first +// two complete quads correctly. +#[apply(all_engines_except_decoder_reader)] +fn decode_malleability_test_case_2_byte_suffix_too_much_padding( + engine_wrapper: E, +) { + assert_eq!( + DecodeError::InvalidByte(6, PAD_BYTE), + E::standard().decode("SGVsbA====").unwrap_err() + ); +} + +/// Requires canonical padding -> accepts 2 + 2, 3 + 1, 4 + 0 final quad configurations +#[apply(all_engines)] +fn decode_pad_mode_requires_canonical_accepts_canonical(engine_wrapper: E) { + assert_all_suffixes_ok( + E::standard_with_pad_mode(true, DecodePaddingMode::RequireCanonical), + vec!["/w==", "iYU=", "AAAA"], + ); +} + +/// Requires canonical padding -> rejects 2 + 0-1, 3 + 0 final chunk configurations +#[apply(all_engines)] +fn decode_pad_mode_requires_canonical_rejects_non_canonical(engine_wrapper: E) { + let engine = E::standard_with_pad_mode(true, DecodePaddingMode::RequireCanonical); + + let suffixes = ["/w", "/w=", "iYU"]; + for num_prefix_quads in 0..256 { + for &suffix in suffixes.iter() { + let mut encoded = "AAAA".repeat(num_prefix_quads); + encoded.push_str(suffix); + + let res = engine.decode(&encoded); + + assert_eq!(Err(DecodeError::InvalidPadding), res); + } + } +} + +/// Requires no padding -> accepts 2 + 0, 3 + 0, 4 + 0 final chunk configuration +#[apply(all_engines)] +fn decode_pad_mode_requires_no_padding_accepts_no_padding(engine_wrapper: E) { + assert_all_suffixes_ok( + E::standard_with_pad_mode(true, DecodePaddingMode::RequireNone), + vec!["/w", "iYU", "AAAA"], + ); +} + +/// Requires no padding -> rejects 2 + 1-2, 3 + 1 final chunk configuration +#[apply(all_engines)] +fn decode_pad_mode_requires_no_padding_rejects_any_padding(engine_wrapper: E) { + let engine = E::standard_with_pad_mode(true, DecodePaddingMode::RequireNone); + + let suffixes = ["/w=", "/w==", "iYU="]; + for num_prefix_quads in 0..256 { + for &suffix in suffixes.iter() { + let mut encoded = "AAAA".repeat(num_prefix_quads); + encoded.push_str(suffix); + + let res = engine.decode(&encoded); + + assert_eq!(Err(DecodeError::InvalidPadding), res); + } + } +} + +/// Indifferent padding accepts 2 + 0-2, 3 + 0-1, 4 + 0 final chunk configuration +#[apply(all_engines)] +fn decode_pad_mode_indifferent_padding_accepts_anything(engine_wrapper: E) { + assert_all_suffixes_ok( + E::standard_with_pad_mode(true, DecodePaddingMode::Indifferent), + vec!["/w", "/w=", "/w==", "iYU", "iYU=", "AAAA"], + ); +} + +/// 1 trailing byte that's not padding is detected as invalid byte even though there's padding +/// in the middle of the input. This is essentially mandating the eager check for 1 trailing byte +/// to catch the \n suffix case. +// DecoderReader pseudo-engine can't handle DecodePaddingMode::RequireNone since it will decode +// a complete quad with padding in it before encountering the stray byte that makes it an invalid +// length +#[apply(all_engines_except_decoder_reader)] +fn decode_invalid_trailing_bytes_all_pad_modes_invalid_byte(engine_wrapper: E) { + for mode in all_pad_modes() { + do_invalid_trailing_byte(E::standard_with_pad_mode(true, mode), mode); + } +} + +#[apply(all_engines)] +fn decode_invalid_trailing_bytes_invalid_byte(engine_wrapper: E) { + // excluding no padding mode because the DecoderWrapper pseudo-engine will fail with + // InvalidPadding because it will decode the last complete quad with padding first + for mode in pad_modes_allowing_padding() { + do_invalid_trailing_byte(E::standard_with_pad_mode(true, mode), mode); + } +} +fn do_invalid_trailing_byte(engine: impl Engine, mode: DecodePaddingMode) { + for last_byte in [b'*', b'\n'] { + for num_prefix_quads in 0..256 { + let mut s: String = "ABCD".repeat(num_prefix_quads); + s.push_str("Cg=="); + let mut input = s.into_bytes(); + input.push(last_byte); + + // The case of trailing newlines is common enough to warrant a test for a good error + // message. + assert_eq!( + Err(DecodeError::InvalidByte( + num_prefix_quads * 4 + 4, + last_byte + )), + engine.decode(&input), + "mode: {:?}, input: {}", + mode, + String::from_utf8(input).unwrap() + ); + } + } +} + +/// When there's 1 trailing byte, but it's padding, it's only InvalidByte if there isn't padding +/// earlier. +#[apply(all_engines)] +fn decode_invalid_trailing_padding_as_invalid_byte_at_first_pad_byte( + engine_wrapper: E, +) { + // excluding no padding mode because the DecoderWrapper pseudo-engine will fail with + // InvalidPadding because it will decode the last complete quad with padding first + for mode in pad_modes_allowing_padding() { + do_invalid_trailing_padding_as_invalid_byte_at_first_padding( + E::standard_with_pad_mode(true, mode), + mode, + ); + } +} + +// DecoderReader pseudo-engine can't handle DecodePaddingMode::RequireNone since it will decode +// a complete quad with padding in it before encountering the stray byte that makes it an invalid +// length +#[apply(all_engines_except_decoder_reader)] +fn decode_invalid_trailing_padding_as_invalid_byte_at_first_byte_all_modes( + engine_wrapper: E, +) { + for mode in all_pad_modes() { + do_invalid_trailing_padding_as_invalid_byte_at_first_padding( + E::standard_with_pad_mode(true, mode), + mode, + ); + } +} +fn do_invalid_trailing_padding_as_invalid_byte_at_first_padding( + engine: impl Engine, + mode: DecodePaddingMode, +) { + for num_prefix_quads in 0..256 { + for (suffix, pad_offset) in [("AA===", 2), ("AAA==", 3), ("AAAA=", 4)] { + let mut s: String = "ABCD".repeat(num_prefix_quads); + s.push_str(suffix); + + assert_eq!( + // pad after `g`, not the last one + Err(DecodeError::InvalidByte( + num_prefix_quads * 4 + pad_offset, + PAD_BYTE + )), + engine.decode(&s), + "mode: {:?}, input: {}", + mode, + s + ); + } + } +} + +#[apply(all_engines)] +fn decode_into_slice_fits_in_precisely_sized_slice(engine_wrapper: E) { + let mut orig_data = Vec::new(); + let mut encoded_data = String::new(); + let mut decode_buf = Vec::new(); + + let input_len_range = distributions::Uniform::new(0, 1000); + let mut rng = rngs::SmallRng::from_entropy(); + + for _ in 0..10_000 { + orig_data.clear(); + encoded_data.clear(); + decode_buf.clear(); + + let input_len = input_len_range.sample(&mut rng); + + for _ in 0..input_len { + orig_data.push(rng.gen()); + } + + let engine = E::random(&mut rng); + engine.encode_string(&orig_data, &mut encoded_data); + assert_encode_sanity(&encoded_data, engine.config().encode_padding(), input_len); + + decode_buf.resize(input_len, 0); + // decode into the non-empty buf + let decode_bytes_written = engine + .decode_slice_unchecked(encoded_data.as_bytes(), &mut decode_buf[..]) + .unwrap(); + assert_eq!(orig_data.len(), decode_bytes_written); + assert_eq!(orig_data, decode_buf); + + // same for checked variant + decode_buf.clear(); + decode_buf.resize(input_len, 0); + // decode into the non-empty buf + let decode_bytes_written = engine + .decode_slice(encoded_data.as_bytes(), &mut decode_buf[..]) + .unwrap(); + assert_eq!(orig_data.len(), decode_bytes_written); + assert_eq!(orig_data, decode_buf); + } +} + +#[apply(all_engines)] +fn inner_decode_reports_padding_position(engine_wrapper: E) { + let mut b64 = String::new(); + let mut decoded = Vec::new(); + let engine = E::standard(); + + for pad_position in 1..10_000 { + b64.clear(); + decoded.clear(); + // plenty of room for original data + decoded.resize(pad_position, 0); + + for _ in 0..pad_position { + b64.push('A'); + } + // finish the quad with padding + for _ in 0..(4 - (pad_position % 4)) { + b64.push('='); + } + + let decode_res = engine.internal_decode( + b64.as_bytes(), + &mut decoded[..], + engine.internal_decoded_len_estimate(b64.len()), + ); + if pad_position % 4 < 2 { + // impossible padding + assert_eq!( + Err(DecodeSliceError::DecodeError(DecodeError::InvalidByte( + pad_position, + PAD_BYTE + ))), + decode_res + ); + } else { + let decoded_bytes = pad_position / 4 * 3 + + match pad_position % 4 { + 0 => 0, + 2 => 1, + 3 => 2, + _ => unreachable!(), + }; + assert_eq!( + Ok(DecodeMetadata::new(decoded_bytes, Some(pad_position))), + decode_res + ); + } + } +} + +#[apply(all_engines)] +fn decode_length_estimate_delta(engine_wrapper: E) { + for engine in [E::standard(), E::standard_unpadded()] { + for &padding in &[true, false] { + for orig_len in 0..1000 { + let encoded_len = encoded_len(orig_len, padding).unwrap(); + + let decoded_estimate = engine + .internal_decoded_len_estimate(encoded_len) + .decoded_len_estimate(); + assert!(decoded_estimate >= orig_len); + assert!( + decoded_estimate - orig_len < 3, + "estimate: {}, encoded: {}, orig: {}", + decoded_estimate, + encoded_len, + orig_len + ); + } + } + } +} + +#[apply(all_engines)] +fn estimate_via_u128_inflation(engine_wrapper: E) { + // cover both ends of usize + (0..1000) + .chain(usize::MAX - 1000..=usize::MAX) + .for_each(|encoded_len| { + // inflate to 128 bit type to be able to safely use the easy formulas + let len_128 = encoded_len as u128; + + let estimate = E::standard() + .internal_decoded_len_estimate(encoded_len) + .decoded_len_estimate(); + + // This check is a little too strict: it requires using the (len + 3) / 4 * 3 formula + // or equivalent, but until other engines come along that use a different formula + // requiring that we think more carefully about what the allowable criteria are, this + // will do. + assert_eq!( + ((len_128 + 3) / 4 * 3) as usize, + estimate, + "enc len {}", + encoded_len + ); + }) +} + +#[apply(all_engines)] +fn decode_slice_checked_fails_gracefully_at_all_output_lengths( + engine_wrapper: E, +) { + let mut rng = seeded_rng(); + for original_len in 0..1000 { + let mut original = vec![0; original_len]; + rng.fill(&mut original[..]); + + for mode in all_pad_modes() { + let engine = E::standard_with_pad_mode( + match mode { + DecodePaddingMode::Indifferent | DecodePaddingMode::RequireCanonical => true, + DecodePaddingMode::RequireNone => false, + }, + mode, + ); + + let encoded = engine.encode(&original); + let mut decode_buf = Vec::with_capacity(original_len); + for decode_buf_len in 0..original_len { + decode_buf.resize(decode_buf_len, 0); + assert_eq!( + DecodeSliceError::OutputSliceTooSmall, + engine + .decode_slice(&encoded, &mut decode_buf[..]) + .unwrap_err(), + "original len: {}, encoded len: {}, buf len: {}, mode: {:?}", + original_len, + encoded.len(), + decode_buf_len, + mode + ); + // internal method works the same + assert_eq!( + DecodeSliceError::OutputSliceTooSmall, + engine + .internal_decode( + encoded.as_bytes(), + &mut decode_buf[..], + engine.internal_decoded_len_estimate(encoded.len()) + ) + .unwrap_err() + ); + } + + decode_buf.resize(original_len, 0); + rng.fill(&mut decode_buf[..]); + assert_eq!( + original_len, + engine.decode_slice(&encoded, &mut decode_buf[..]).unwrap() + ); + assert_eq!(original, decode_buf); + } + } +} + +/// Returns a tuple of the original data length, the encoded data length (just data), and the length including padding. +/// +/// Vecs provided should be empty. +fn generate_random_encoded_data>( + engine: &E, + orig_data: &mut Vec, + encode_buf: &mut Vec, + rng: &mut R, + length_distribution: &D, +) -> (usize, usize, usize) { + let padding: bool = engine.config().encode_padding(); + + let orig_len = fill_rand(orig_data, rng, length_distribution); + let expected_encoded_len = encoded_len(orig_len, padding).unwrap(); + encode_buf.resize(expected_encoded_len, 0); + + let base_encoded_len = engine.internal_encode(&orig_data[..], &mut encode_buf[..]); + + let enc_len_with_padding = if padding { + base_encoded_len + add_padding(base_encoded_len, &mut encode_buf[base_encoded_len..]) + } else { + base_encoded_len + }; + + assert_eq!(expected_encoded_len, enc_len_with_padding); + + (orig_len, base_encoded_len, enc_len_with_padding) +} + +// fill to a random length +fn fill_rand>( + vec: &mut Vec, + rng: &mut R, + length_distribution: &D, +) -> usize { + let len = length_distribution.sample(rng); + for _ in 0..len { + vec.push(rng.gen()); + } + + len +} + +fn fill_rand_len(vec: &mut Vec, rng: &mut R, len: usize) { + for _ in 0..len { + vec.push(rng.gen()); + } +} + +fn prefixed_data<'i>(input_with_prefix: &'i mut String, prefix_len: usize, data: &str) -> &'i str { + input_with_prefix.truncate(prefix_len); + input_with_prefix.push_str(data); + input_with_prefix.as_str() +} + +/// A wrapper to make using engines in rstest fixtures easier. +/// The functions don't need to be instance methods, but rstest does seem +/// to want an instance, so instances are passed to test functions and then ignored. +trait EngineWrapper { + type Engine: Engine; + + /// Return an engine configured for RFC standard base64 + fn standard() -> Self::Engine; + + /// Return an engine configured for RFC standard base64, except with no padding appended on + /// encode, and required no padding on decode. + fn standard_unpadded() -> Self::Engine; + + /// Return an engine configured for RFC standard alphabet with the provided encode and decode + /// pad settings + fn standard_with_pad_mode(encode_pad: bool, decode_pad_mode: DecodePaddingMode) + -> Self::Engine; + + /// Return an engine configured for RFC standard base64 that allows invalid trailing bits + fn standard_allow_trailing_bits() -> Self::Engine; + + /// Return an engine configured with a randomized alphabet and config + fn random(rng: &mut R) -> Self::Engine; + + /// Return an engine configured with the specified alphabet and randomized config + fn random_alphabet(rng: &mut R, alphabet: &Alphabet) -> Self::Engine; +} + +struct GeneralPurposeWrapper {} + +impl EngineWrapper for GeneralPurposeWrapper { + type Engine = general_purpose::GeneralPurpose; + + fn standard() -> Self::Engine { + general_purpose::GeneralPurpose::new(&STANDARD, general_purpose::PAD) + } + + fn standard_unpadded() -> Self::Engine { + general_purpose::GeneralPurpose::new(&STANDARD, general_purpose::NO_PAD) + } + + fn standard_with_pad_mode( + encode_pad: bool, + decode_pad_mode: DecodePaddingMode, + ) -> Self::Engine { + general_purpose::GeneralPurpose::new( + &STANDARD, + general_purpose::GeneralPurposeConfig::new() + .with_encode_padding(encode_pad) + .with_decode_padding_mode(decode_pad_mode), + ) + } + + fn standard_allow_trailing_bits() -> Self::Engine { + general_purpose::GeneralPurpose::new( + &STANDARD, + general_purpose::GeneralPurposeConfig::new().with_decode_allow_trailing_bits(true), + ) + } + + fn random(rng: &mut R) -> Self::Engine { + let alphabet = random_alphabet(rng); + + Self::random_alphabet(rng, alphabet) + } + + fn random_alphabet(rng: &mut R, alphabet: &Alphabet) -> Self::Engine { + general_purpose::GeneralPurpose::new(alphabet, random_config(rng)) + } +} + +struct NaiveWrapper {} + +impl EngineWrapper for NaiveWrapper { + type Engine = naive::Naive; + + fn standard() -> Self::Engine { + naive::Naive::new( + &STANDARD, + naive::NaiveConfig { + encode_padding: true, + decode_allow_trailing_bits: false, + decode_padding_mode: DecodePaddingMode::RequireCanonical, + }, + ) + } + + fn standard_unpadded() -> Self::Engine { + naive::Naive::new( + &STANDARD, + naive::NaiveConfig { + encode_padding: false, + decode_allow_trailing_bits: false, + decode_padding_mode: DecodePaddingMode::RequireNone, + }, + ) + } + + fn standard_with_pad_mode( + encode_pad: bool, + decode_pad_mode: DecodePaddingMode, + ) -> Self::Engine { + naive::Naive::new( + &STANDARD, + naive::NaiveConfig { + encode_padding: encode_pad, + decode_allow_trailing_bits: false, + decode_padding_mode: decode_pad_mode, + }, + ) + } + + fn standard_allow_trailing_bits() -> Self::Engine { + naive::Naive::new( + &STANDARD, + naive::NaiveConfig { + encode_padding: true, + decode_allow_trailing_bits: true, + decode_padding_mode: DecodePaddingMode::RequireCanonical, + }, + ) + } + + fn random(rng: &mut R) -> Self::Engine { + let alphabet = random_alphabet(rng); + + Self::random_alphabet(rng, alphabet) + } + + fn random_alphabet(rng: &mut R, alphabet: &Alphabet) -> Self::Engine { + let mode = rng.gen(); + + let config = naive::NaiveConfig { + encode_padding: match mode { + DecodePaddingMode::Indifferent => rng.gen(), + DecodePaddingMode::RequireCanonical => true, + DecodePaddingMode::RequireNone => false, + }, + decode_allow_trailing_bits: rng.gen(), + decode_padding_mode: mode, + }; + + naive::Naive::new(alphabet, config) + } +} + +/// A pseudo-Engine that routes all decoding through [DecoderReader] +struct DecoderReaderEngine { + engine: E, +} + +impl From for DecoderReaderEngine { + fn from(value: E) -> Self { + Self { engine: value } + } +} + +impl Engine for DecoderReaderEngine { + type Config = E::Config; + type DecodeEstimate = E::DecodeEstimate; + + fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize { + self.engine.internal_encode(input, output) + } + + fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate { + self.engine.internal_decoded_len_estimate(input_len) + } + + fn internal_decode( + &self, + input: &[u8], + output: &mut [u8], + decode_estimate: Self::DecodeEstimate, + ) -> Result { + let mut reader = DecoderReader::new(input, &self.engine); + let mut buf = vec![0; input.len()]; + // to avoid effects like not detecting invalid length due to progressively growing + // the output buffer in read_to_end etc, read into a big enough buffer in one go + // to make behavior more consistent with normal engines + let _ = reader + .read(&mut buf) + .and_then(|len| { + buf.truncate(len); + // make sure we got everything + reader.read_to_end(&mut buf) + }) + .map_err(|io_error| { + *io_error + .into_inner() + .and_then(|inner| inner.downcast::().ok()) + .unwrap() + })?; + if output.len() < buf.len() { + return Err(DecodeSliceError::OutputSliceTooSmall); + } + output[..buf.len()].copy_from_slice(&buf); + Ok(DecodeMetadata::new( + buf.len(), + input + .iter() + .enumerate() + .filter(|(_offset, byte)| **byte == PAD_BYTE) + .map(|(offset, _byte)| offset) + .next(), + )) + } + + fn config(&self) -> &Self::Config { + self.engine.config() + } +} + +struct DecoderReaderEngineWrapper {} + +impl EngineWrapper for DecoderReaderEngineWrapper { + type Engine = DecoderReaderEngine; + + fn standard() -> Self::Engine { + GeneralPurposeWrapper::standard().into() + } + + fn standard_unpadded() -> Self::Engine { + GeneralPurposeWrapper::standard_unpadded().into() + } + + fn standard_with_pad_mode( + encode_pad: bool, + decode_pad_mode: DecodePaddingMode, + ) -> Self::Engine { + GeneralPurposeWrapper::standard_with_pad_mode(encode_pad, decode_pad_mode).into() + } + + fn standard_allow_trailing_bits() -> Self::Engine { + GeneralPurposeWrapper::standard_allow_trailing_bits().into() + } + + fn random(rng: &mut R) -> Self::Engine { + GeneralPurposeWrapper::random(rng).into() + } + + fn random_alphabet(rng: &mut R, alphabet: &Alphabet) -> Self::Engine { + GeneralPurposeWrapper::random_alphabet(rng, alphabet).into() + } +} + +fn seeded_rng() -> impl rand::Rng { + rngs::SmallRng::from_entropy() +} + +fn all_pad_modes() -> Vec { + vec![ + DecodePaddingMode::Indifferent, + DecodePaddingMode::RequireCanonical, + DecodePaddingMode::RequireNone, + ] +} + +fn pad_modes_allowing_padding() -> Vec { + vec![ + DecodePaddingMode::Indifferent, + DecodePaddingMode::RequireCanonical, + ] +} + +fn assert_all_suffixes_ok(engine: E, suffixes: Vec<&str>) { + for num_prefix_quads in 0..256 { + for &suffix in suffixes.iter() { + let mut encoded = "AAAA".repeat(num_prefix_quads); + encoded.push_str(suffix); + + let res = &engine.decode(&encoded); + assert!(res.is_ok()); + } + } +} diff --git a/vendor/base64/src/lib.rs b/vendor/base64/src/lib.rs new file mode 100644 index 00000000000000..579a7225cb75ea --- /dev/null +++ b/vendor/base64/src/lib.rs @@ -0,0 +1,277 @@ +//! Correct, fast, and configurable [base64][] decoding and encoding. Base64 +//! transports binary data efficiently in contexts where only plain text is +//! allowed. +//! +//! [base64]: https://developer.mozilla.org/en-US/docs/Glossary/Base64 +//! +//! # Usage +//! +//! Use an [`Engine`] to decode or encode base64, configured with the base64 +//! alphabet and padding behavior best suited to your application. +//! +//! ## Engine setup +//! +//! There is more than one way to encode a stream of bytes as “base64”. +//! Different applications use different encoding +//! [alphabets][alphabet::Alphabet] and +//! [padding behaviors][engine::general_purpose::GeneralPurposeConfig]. +//! +//! ### Encoding alphabet +//! +//! Almost all base64 [alphabets][alphabet::Alphabet] use `A-Z`, `a-z`, and +//! `0-9`, which gives nearly 64 characters (26 + 26 + 10 = 62), but they differ +//! in their choice of their final 2. +//! +//! Most applications use the [standard][alphabet::STANDARD] alphabet specified +//! in [RFC 4648][rfc-alphabet]. If that’s all you need, you can get started +//! quickly by using the pre-configured +//! [`STANDARD`][engine::general_purpose::STANDARD] engine, which is also available +//! in the [`prelude`] module as shown here, if you prefer a minimal `use` +//! footprint. +//! +#![cfg_attr(feature = "alloc", doc = "```")] +#![cfg_attr(not(feature = "alloc"), doc = "```ignore")] +//! use base64::prelude::*; +//! +//! # fn main() -> Result<(), base64::DecodeError> { +//! assert_eq!(BASE64_STANDARD.decode(b"+uwgVQA=")?, b"\xFA\xEC\x20\x55\0"); +//! assert_eq!(BASE64_STANDARD.encode(b"\xFF\xEC\x20\x55\0"), "/+wgVQA="); +//! # Ok(()) +//! # } +//! ``` +//! +//! [rfc-alphabet]: https://datatracker.ietf.org/doc/html/rfc4648#section-4 +//! +//! Other common alphabets are available in the [`alphabet`] module. +//! +//! #### URL-safe alphabet +//! +//! The standard alphabet uses `+` and `/` as its two non-alphanumeric tokens, +//! which cannot be safely used in URL’s without encoding them as `%2B` and +//! `%2F`. +//! +//! To avoid that, some applications use a [“URL-safe” alphabet][alphabet::URL_SAFE], +//! which uses `-` and `_` instead. To use that alternative alphabet, use the +//! [`URL_SAFE`][engine::general_purpose::URL_SAFE] engine. This example doesn't +//! use [`prelude`] to show what a more explicit `use` would look like. +//! +#![cfg_attr(feature = "alloc", doc = "```")] +#![cfg_attr(not(feature = "alloc"), doc = "```ignore")] +//! use base64::{engine::general_purpose::URL_SAFE, Engine as _}; +//! +//! # fn main() -> Result<(), base64::DecodeError> { +//! assert_eq!(URL_SAFE.decode(b"-uwgVQA=")?, b"\xFA\xEC\x20\x55\0"); +//! assert_eq!(URL_SAFE.encode(b"\xFF\xEC\x20\x55\0"), "_-wgVQA="); +//! # Ok(()) +//! # } +//! ``` +//! +//! ### Padding characters +//! +//! Each base64 character represents 6 bits (2⁶ = 64) of the original binary +//! data, and every 3 bytes of input binary data will encode to 4 base64 +//! characters (8 bits × 3 = 6 bits × 4 = 24 bits). +//! +//! When the input is not an even multiple of 3 bytes in length, [canonical][] +//! base64 encoders insert padding characters at the end, so that the output +//! length is always a multiple of 4: +//! +//! [canonical]: https://datatracker.ietf.org/doc/html/rfc4648#section-3.5 +//! +#![cfg_attr(feature = "alloc", doc = "```")] +#![cfg_attr(not(feature = "alloc"), doc = "```ignore")] +//! use base64::{engine::general_purpose::STANDARD, Engine as _}; +//! +//! assert_eq!(STANDARD.encode(b""), ""); +//! assert_eq!(STANDARD.encode(b"f"), "Zg=="); +//! assert_eq!(STANDARD.encode(b"fo"), "Zm8="); +//! assert_eq!(STANDARD.encode(b"foo"), "Zm9v"); +//! ``` +//! +//! Canonical encoding ensures that base64 encodings will be exactly the same, +//! byte-for-byte, regardless of input length. But the `=` padding characters +//! aren’t necessary for decoding, and they may be omitted by using a +//! [`NO_PAD`][engine::general_purpose::NO_PAD] configuration: +//! +#![cfg_attr(feature = "alloc", doc = "```")] +#![cfg_attr(not(feature = "alloc"), doc = "```ignore")] +//! use base64::{engine::general_purpose::STANDARD_NO_PAD, Engine as _}; +//! +//! assert_eq!(STANDARD_NO_PAD.encode(b""), ""); +//! assert_eq!(STANDARD_NO_PAD.encode(b"f"), "Zg"); +//! assert_eq!(STANDARD_NO_PAD.encode(b"fo"), "Zm8"); +//! assert_eq!(STANDARD_NO_PAD.encode(b"foo"), "Zm9v"); +//! ``` +//! +//! The pre-configured `NO_PAD` engines will reject inputs containing padding +//! `=` characters. To encode without padding and still accept padding while +//! decoding, create an [engine][engine::general_purpose::GeneralPurpose] with +//! that [padding mode][engine::DecodePaddingMode]. +//! +#![cfg_attr(feature = "alloc", doc = "```")] +#![cfg_attr(not(feature = "alloc"), doc = "```ignore")] +//! # use base64::{engine::general_purpose::STANDARD_NO_PAD, Engine as _}; +//! assert_eq!(STANDARD_NO_PAD.decode(b"Zm8="), Err(base64::DecodeError::InvalidPadding)); +//! ``` +//! +//! ### Further customization +//! +//! Decoding and encoding behavior can be customized by creating an +//! [engine][engine::GeneralPurpose] with an [alphabet][alphabet::Alphabet] and +//! [padding configuration][engine::GeneralPurposeConfig]: +//! +#![cfg_attr(feature = "alloc", doc = "```")] +#![cfg_attr(not(feature = "alloc"), doc = "```ignore")] +//! use base64::{engine, alphabet, Engine as _}; +//! +//! // bizarro-world base64: +/ as the first symbols instead of the last +//! let alphabet = +//! alphabet::Alphabet::new("+/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") +//! .unwrap(); +//! +//! // a very weird config that encodes with padding but requires no padding when decoding...? +//! let crazy_config = engine::GeneralPurposeConfig::new() +//! .with_decode_allow_trailing_bits(true) +//! .with_encode_padding(true) +//! .with_decode_padding_mode(engine::DecodePaddingMode::RequireNone); +//! +//! let crazy_engine = engine::GeneralPurpose::new(&alphabet, crazy_config); +//! +//! let encoded = crazy_engine.encode(b"abc 123"); +//! +//! ``` +//! +//! ## Memory allocation +//! +//! The [decode][Engine::decode()] and [encode][Engine::encode()] engine methods +//! allocate memory for their results – `decode` returns a `Vec` and +//! `encode` returns a `String`. To instead decode or encode into a buffer that +//! you allocated, use one of the alternative methods: +//! +//! #### Decoding +//! +//! | Method | Output | Allocates memory | +//! | -------------------------- | ----------------------------- | ----------------------------- | +//! | [`Engine::decode`] | returns a new `Vec` | always | +//! | [`Engine::decode_vec`] | appends to provided `Vec` | if `Vec` lacks capacity | +//! | [`Engine::decode_slice`] | writes to provided `&[u8]` | never +//! +//! #### Encoding +//! +//! | Method | Output | Allocates memory | +//! | -------------------------- | ---------------------------- | ------------------------------ | +//! | [`Engine::encode`] | returns a new `String` | always | +//! | [`Engine::encode_string`] | appends to provided `String` | if `String` lacks capacity | +//! | [`Engine::encode_slice`] | writes to provided `&[u8]` | never | +//! +//! ## Input and output +//! +//! The `base64` crate can [decode][Engine::decode()] and +//! [encode][Engine::encode()] values in memory, or +//! [`DecoderReader`][read::DecoderReader] and +//! [`EncoderWriter`][write::EncoderWriter] provide streaming decoding and +//! encoding for any [readable][std::io::Read] or [writable][std::io::Write] +//! byte stream. +//! +//! #### Decoding +//! +#![cfg_attr(feature = "std", doc = "```")] +#![cfg_attr(not(feature = "std"), doc = "```ignore")] +//! # use std::io; +//! use base64::{engine::general_purpose::STANDARD, read::DecoderReader}; +//! +//! # fn main() -> Result<(), Box> { +//! let mut input = io::stdin(); +//! let mut decoder = DecoderReader::new(&mut input, &STANDARD); +//! io::copy(&mut decoder, &mut io::stdout())?; +//! # Ok(()) +//! # } +//! ``` +//! +//! #### Encoding +//! +#![cfg_attr(feature = "std", doc = "```")] +#![cfg_attr(not(feature = "std"), doc = "```ignore")] +//! # use std::io; +//! use base64::{engine::general_purpose::STANDARD, write::EncoderWriter}; +//! +//! # fn main() -> Result<(), Box> { +//! let mut output = io::stdout(); +//! let mut encoder = EncoderWriter::new(&mut output, &STANDARD); +//! io::copy(&mut io::stdin(), &mut encoder)?; +//! # Ok(()) +//! # } +//! ``` +//! +//! #### Display +//! +//! If you only need a base64 representation for implementing the +//! [`Display`][std::fmt::Display] trait, use +//! [`Base64Display`][display::Base64Display]: +//! +//! ``` +//! use base64::{display::Base64Display, engine::general_purpose::STANDARD}; +//! +//! let value = Base64Display::new(b"\0\x01\x02\x03", &STANDARD); +//! assert_eq!("base64: AAECAw==", format!("base64: {}", value)); +//! ``` +//! +//! # Panics +//! +//! If length calculations result in overflowing `usize`, a panic will result. + +#![cfg_attr(feature = "cargo-clippy", allow(clippy::cast_lossless))] +#![deny( + missing_docs, + trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_results, + variant_size_differences +)] +#![forbid(unsafe_code)] +// Allow globally until https://github.com/rust-lang/rust-clippy/issues/8768 is resolved. +// The desired state is to allow it only for the rstest_reuse import. +#![allow(clippy::single_component_path_imports)] +#![cfg_attr(not(any(feature = "std", test)), no_std)] + +#[cfg(any(feature = "alloc", test))] +extern crate alloc; + +// has to be included at top level because of the way rstest_reuse defines its macros +#[cfg(test)] +use rstest_reuse; + +mod chunked_encoder; +pub mod display; +#[cfg(any(feature = "std", test))] +pub mod read; +#[cfg(any(feature = "std", test))] +pub mod write; + +pub mod engine; +pub use engine::Engine; + +pub mod alphabet; + +mod encode; +#[allow(deprecated)] +#[cfg(any(feature = "alloc", test))] +pub use crate::encode::{encode, encode_engine, encode_engine_string}; +#[allow(deprecated)] +pub use crate::encode::{encode_engine_slice, encoded_len, EncodeSliceError}; + +mod decode; +#[allow(deprecated)] +#[cfg(any(feature = "alloc", test))] +pub use crate::decode::{decode, decode_engine, decode_engine_vec}; +#[allow(deprecated)] +pub use crate::decode::{decode_engine_slice, decoded_len_estimate, DecodeError, DecodeSliceError}; + +pub mod prelude; + +#[cfg(test)] +mod tests; + +const PAD_BYTE: u8 = b'='; diff --git a/vendor/base64/src/prelude.rs b/vendor/base64/src/prelude.rs new file mode 100644 index 00000000000000..df5fdb497c6ce6 --- /dev/null +++ b/vendor/base64/src/prelude.rs @@ -0,0 +1,20 @@ +//! Preconfigured engines for common use cases. +//! +//! These are re-exports of `const` engines in [crate::engine::general_purpose], renamed with a `BASE64_` +//! prefix for those who prefer to `use` the entire path to a name. +//! +//! # Examples +//! +#![cfg_attr(feature = "alloc", doc = "```")] +#![cfg_attr(not(feature = "alloc"), doc = "```ignore")] +//! use base64::prelude::{Engine as _, BASE64_STANDARD_NO_PAD}; +//! +//! assert_eq!("c29tZSBieXRlcw", &BASE64_STANDARD_NO_PAD.encode(b"some bytes")); +//! ``` + +pub use crate::engine::Engine; + +pub use crate::engine::general_purpose::STANDARD as BASE64_STANDARD; +pub use crate::engine::general_purpose::STANDARD_NO_PAD as BASE64_STANDARD_NO_PAD; +pub use crate::engine::general_purpose::URL_SAFE as BASE64_URL_SAFE; +pub use crate::engine::general_purpose::URL_SAFE_NO_PAD as BASE64_URL_SAFE_NO_PAD; diff --git a/vendor/base64/src/read/decoder.rs b/vendor/base64/src/read/decoder.rs new file mode 100644 index 00000000000000..781f6f880e5537 --- /dev/null +++ b/vendor/base64/src/read/decoder.rs @@ -0,0 +1,335 @@ +use crate::{engine::Engine, DecodeError, DecodeSliceError, PAD_BYTE}; +use std::{cmp, fmt, io}; + +// This should be large, but it has to fit on the stack. +pub(crate) const BUF_SIZE: usize = 1024; + +// 4 bytes of base64 data encode 3 bytes of raw data (modulo padding). +const BASE64_CHUNK_SIZE: usize = 4; +const DECODED_CHUNK_SIZE: usize = 3; + +/// A `Read` implementation that decodes base64 data read from an underlying reader. +/// +/// # Examples +/// +/// ``` +/// use std::io::Read; +/// use std::io::Cursor; +/// use base64::engine::general_purpose; +/// +/// // use a cursor as the simplest possible `Read` -- in real code this is probably a file, etc. +/// let mut wrapped_reader = Cursor::new(b"YXNkZg=="); +/// let mut decoder = base64::read::DecoderReader::new( +/// &mut wrapped_reader, +/// &general_purpose::STANDARD); +/// +/// // handle errors as you normally would +/// let mut result = Vec::new(); +/// decoder.read_to_end(&mut result).unwrap(); +/// +/// assert_eq!(b"asdf", &result[..]); +/// +/// ``` +pub struct DecoderReader<'e, E: Engine, R: io::Read> { + engine: &'e E, + /// Where b64 data is read from + inner: R, + + /// Holds b64 data read from the delegate reader. + b64_buffer: [u8; BUF_SIZE], + /// The start of the pending buffered data in `b64_buffer`. + b64_offset: usize, + /// The amount of buffered b64 data after `b64_offset` in `b64_len`. + b64_len: usize, + /// Since the caller may provide us with a buffer of size 1 or 2 that's too small to copy a + /// decoded chunk in to, we have to be able to hang on to a few decoded bytes. + /// Technically we only need to hold 2 bytes, but then we'd need a separate temporary buffer to + /// decode 3 bytes into and then juggle copying one byte into the provided read buf and the rest + /// into here, which seems like a lot of complexity for 1 extra byte of storage. + decoded_chunk_buffer: [u8; DECODED_CHUNK_SIZE], + /// Index of start of decoded data in `decoded_chunk_buffer` + decoded_offset: usize, + /// Length of decoded data after `decoded_offset` in `decoded_chunk_buffer` + decoded_len: usize, + /// Input length consumed so far. + /// Used to provide accurate offsets in errors + input_consumed_len: usize, + /// offset of previously seen padding, if any + padding_offset: Option, +} + +// exclude b64_buffer as it's uselessly large +impl<'e, E: Engine, R: io::Read> fmt::Debug for DecoderReader<'e, E, R> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("DecoderReader") + .field("b64_offset", &self.b64_offset) + .field("b64_len", &self.b64_len) + .field("decoded_chunk_buffer", &self.decoded_chunk_buffer) + .field("decoded_offset", &self.decoded_offset) + .field("decoded_len", &self.decoded_len) + .field("input_consumed_len", &self.input_consumed_len) + .field("padding_offset", &self.padding_offset) + .finish() + } +} + +impl<'e, E: Engine, R: io::Read> DecoderReader<'e, E, R> { + /// Create a new decoder that will read from the provided reader `r`. + pub fn new(reader: R, engine: &'e E) -> Self { + DecoderReader { + engine, + inner: reader, + b64_buffer: [0; BUF_SIZE], + b64_offset: 0, + b64_len: 0, + decoded_chunk_buffer: [0; DECODED_CHUNK_SIZE], + decoded_offset: 0, + decoded_len: 0, + input_consumed_len: 0, + padding_offset: None, + } + } + + /// Write as much as possible of the decoded buffer into the target buffer. + /// Must only be called when there is something to write and space to write into. + /// Returns a Result with the number of (decoded) bytes copied. + fn flush_decoded_buf(&mut self, buf: &mut [u8]) -> io::Result { + debug_assert!(self.decoded_len > 0); + debug_assert!(!buf.is_empty()); + + let copy_len = cmp::min(self.decoded_len, buf.len()); + debug_assert!(copy_len > 0); + debug_assert!(copy_len <= self.decoded_len); + + buf[..copy_len].copy_from_slice( + &self.decoded_chunk_buffer[self.decoded_offset..self.decoded_offset + copy_len], + ); + + self.decoded_offset += copy_len; + self.decoded_len -= copy_len; + + debug_assert!(self.decoded_len < DECODED_CHUNK_SIZE); + + Ok(copy_len) + } + + /// Read into the remaining space in the buffer after the current contents. + /// Must only be called when there is space to read into in the buffer. + /// Returns the number of bytes read. + fn read_from_delegate(&mut self) -> io::Result { + debug_assert!(self.b64_offset + self.b64_len < BUF_SIZE); + + let read = self + .inner + .read(&mut self.b64_buffer[self.b64_offset + self.b64_len..])?; + self.b64_len += read; + + debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE); + + Ok(read) + } + + /// Decode the requested number of bytes from the b64 buffer into the provided buffer. It's the + /// caller's responsibility to choose the number of b64 bytes to decode correctly. + /// + /// Returns a Result with the number of decoded bytes written to `buf`. + /// + /// # Panics + /// + /// panics if `buf` is too small + fn decode_to_buf(&mut self, b64_len_to_decode: usize, buf: &mut [u8]) -> io::Result { + debug_assert!(self.b64_len >= b64_len_to_decode); + debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE); + debug_assert!(!buf.is_empty()); + + let b64_to_decode = &self.b64_buffer[self.b64_offset..self.b64_offset + b64_len_to_decode]; + let decode_metadata = self + .engine + .internal_decode( + b64_to_decode, + buf, + self.engine.internal_decoded_len_estimate(b64_len_to_decode), + ) + .map_err(|dse| match dse { + DecodeSliceError::DecodeError(de) => { + match de { + DecodeError::InvalidByte(offset, byte) => { + match (byte, self.padding_offset) { + // if there was padding in a previous block of decoding that happened to + // be correct, and we now find more padding that happens to be incorrect, + // to be consistent with non-reader decodes, record the error at the first + // padding + (PAD_BYTE, Some(first_pad_offset)) => { + DecodeError::InvalidByte(first_pad_offset, PAD_BYTE) + } + _ => { + DecodeError::InvalidByte(self.input_consumed_len + offset, byte) + } + } + } + DecodeError::InvalidLength(len) => { + DecodeError::InvalidLength(self.input_consumed_len + len) + } + DecodeError::InvalidLastSymbol(offset, byte) => { + DecodeError::InvalidLastSymbol(self.input_consumed_len + offset, byte) + } + DecodeError::InvalidPadding => DecodeError::InvalidPadding, + } + } + DecodeSliceError::OutputSliceTooSmall => { + unreachable!("buf is sized correctly in calling code") + } + }) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + if let Some(offset) = self.padding_offset { + // we've already seen padding + if decode_metadata.decoded_len > 0 { + // we read more after already finding padding; report error at first padding byte + return Err(io::Error::new( + io::ErrorKind::InvalidData, + DecodeError::InvalidByte(offset, PAD_BYTE), + )); + } + } + + self.padding_offset = self.padding_offset.or(decode_metadata + .padding_offset + .map(|offset| self.input_consumed_len + offset)); + self.input_consumed_len += b64_len_to_decode; + self.b64_offset += b64_len_to_decode; + self.b64_len -= b64_len_to_decode; + + debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE); + + Ok(decode_metadata.decoded_len) + } + + /// Unwraps this `DecoderReader`, returning the base reader which it reads base64 encoded + /// input from. + /// + /// Because `DecoderReader` performs internal buffering, the state of the inner reader is + /// unspecified. This function is mainly provided because the inner reader type may provide + /// additional functionality beyond the `Read` implementation which may still be useful. + pub fn into_inner(self) -> R { + self.inner + } +} + +impl<'e, E: Engine, R: io::Read> io::Read for DecoderReader<'e, E, R> { + /// Decode input from the wrapped reader. + /// + /// Under non-error circumstances, this returns `Ok` with the value being the number of bytes + /// written in `buf`. + /// + /// Where possible, this function buffers base64 to minimize the number of read() calls to the + /// delegate reader. + /// + /// # Errors + /// + /// Any errors emitted by the delegate reader are returned. Decoding errors due to invalid + /// base64 are also possible, and will have `io::ErrorKind::InvalidData`. + fn read(&mut self, buf: &mut [u8]) -> io::Result { + if buf.is_empty() { + return Ok(0); + } + + // offset == BUF_SIZE when we copied it all last time + debug_assert!(self.b64_offset <= BUF_SIZE); + debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE); + debug_assert!(if self.b64_offset == BUF_SIZE { + self.b64_len == 0 + } else { + self.b64_len <= BUF_SIZE + }); + + debug_assert!(if self.decoded_len == 0 { + // can be = when we were able to copy the complete chunk + self.decoded_offset <= DECODED_CHUNK_SIZE + } else { + self.decoded_offset < DECODED_CHUNK_SIZE + }); + + // We shouldn't ever decode into decoded_buffer when we can't immediately write at least one + // byte into the provided buf, so the effective length should only be 3 momentarily between + // when we decode and when we copy into the target buffer. + debug_assert!(self.decoded_len < DECODED_CHUNK_SIZE); + debug_assert!(self.decoded_len + self.decoded_offset <= DECODED_CHUNK_SIZE); + + if self.decoded_len > 0 { + // we have a few leftover decoded bytes; flush that rather than pull in more b64 + self.flush_decoded_buf(buf) + } else { + let mut at_eof = false; + while self.b64_len < BASE64_CHUNK_SIZE { + // Copy any bytes we have to the start of the buffer. + self.b64_buffer + .copy_within(self.b64_offset..self.b64_offset + self.b64_len, 0); + self.b64_offset = 0; + + // then fill in more data + let read = self.read_from_delegate()?; + if read == 0 { + // we never read into an empty buf, so 0 => we've hit EOF + at_eof = true; + break; + } + } + + if self.b64_len == 0 { + debug_assert!(at_eof); + // we must be at EOF, and we have no data left to decode + return Ok(0); + }; + + debug_assert!(if at_eof { + // if we are at eof, we may not have a complete chunk + self.b64_len > 0 + } else { + // otherwise, we must have at least one chunk + self.b64_len >= BASE64_CHUNK_SIZE + }); + + debug_assert_eq!(0, self.decoded_len); + + if buf.len() < DECODED_CHUNK_SIZE { + // caller requested an annoyingly short read + // have to write to a tmp buf first to avoid double mutable borrow + let mut decoded_chunk = [0_u8; DECODED_CHUNK_SIZE]; + // if we are at eof, could have less than BASE64_CHUNK_SIZE, in which case we have + // to assume that these last few tokens are, in fact, valid (i.e. must be 2-4 b64 + // tokens, not 1, since 1 token can't decode to 1 byte). + let to_decode = cmp::min(self.b64_len, BASE64_CHUNK_SIZE); + + let decoded = self.decode_to_buf(to_decode, &mut decoded_chunk[..])?; + self.decoded_chunk_buffer[..decoded].copy_from_slice(&decoded_chunk[..decoded]); + + self.decoded_offset = 0; + self.decoded_len = decoded; + + // can be less than 3 on last block due to padding + debug_assert!(decoded <= 3); + + self.flush_decoded_buf(buf) + } else { + let b64_bytes_that_can_decode_into_buf = (buf.len() / DECODED_CHUNK_SIZE) + .checked_mul(BASE64_CHUNK_SIZE) + .expect("too many chunks"); + debug_assert!(b64_bytes_that_can_decode_into_buf >= BASE64_CHUNK_SIZE); + + let b64_bytes_available_to_decode = if at_eof { + self.b64_len + } else { + // only use complete chunks + self.b64_len - self.b64_len % 4 + }; + + let actual_decode_len = cmp::min( + b64_bytes_that_can_decode_into_buf, + b64_bytes_available_to_decode, + ); + self.decode_to_buf(actual_decode_len, buf) + } + } + } +} diff --git a/vendor/base64/src/read/decoder_tests.rs b/vendor/base64/src/read/decoder_tests.rs new file mode 100644 index 00000000000000..f343145744815b --- /dev/null +++ b/vendor/base64/src/read/decoder_tests.rs @@ -0,0 +1,487 @@ +use std::{ + cmp, + io::{self, Read as _}, + iter, +}; + +use rand::{Rng as _, RngCore as _}; + +use super::decoder::{DecoderReader, BUF_SIZE}; +use crate::{ + alphabet, + engine::{general_purpose::STANDARD, Engine, GeneralPurpose}, + tests::{random_alphabet, random_config, random_engine}, + DecodeError, PAD_BYTE, +}; + +#[test] +fn simple() { + let tests: &[(&[u8], &[u8])] = &[ + (&b"0"[..], &b"MA=="[..]), + (b"01", b"MDE="), + (b"012", b"MDEy"), + (b"0123", b"MDEyMw=="), + (b"01234", b"MDEyMzQ="), + (b"012345", b"MDEyMzQ1"), + (b"0123456", b"MDEyMzQ1Ng=="), + (b"01234567", b"MDEyMzQ1Njc="), + (b"012345678", b"MDEyMzQ1Njc4"), + (b"0123456789", b"MDEyMzQ1Njc4OQ=="), + ][..]; + + for (text_expected, base64data) in tests.iter() { + // Read n bytes at a time. + for n in 1..base64data.len() + 1 { + let mut wrapped_reader = io::Cursor::new(base64data); + let mut decoder = DecoderReader::new(&mut wrapped_reader, &STANDARD); + + // handle errors as you normally would + let mut text_got = Vec::new(); + let mut buffer = vec![0u8; n]; + while let Ok(read) = decoder.read(&mut buffer[..]) { + if read == 0 { + break; + } + text_got.extend_from_slice(&buffer[..read]); + } + + assert_eq!( + text_got, + *text_expected, + "\nGot: {}\nExpected: {}", + String::from_utf8_lossy(&text_got[..]), + String::from_utf8_lossy(text_expected) + ); + } + } +} + +// Make sure we error out on trailing junk. +#[test] +fn trailing_junk() { + let tests: &[&[u8]] = &[&b"MDEyMzQ1Njc4*!@#$%^&"[..], b"MDEyMzQ1Njc4OQ== "][..]; + + for base64data in tests.iter() { + // Read n bytes at a time. + for n in 1..base64data.len() + 1 { + let mut wrapped_reader = io::Cursor::new(base64data); + let mut decoder = DecoderReader::new(&mut wrapped_reader, &STANDARD); + + // handle errors as you normally would + let mut buffer = vec![0u8; n]; + let mut saw_error = false; + loop { + match decoder.read(&mut buffer[..]) { + Err(_) => { + saw_error = true; + break; + } + Ok(0) => break, + Ok(_len) => (), + } + } + + assert!(saw_error); + } + } +} + +#[test] +fn handles_short_read_from_delegate() { + let mut rng = rand::thread_rng(); + let mut bytes = Vec::new(); + let mut b64 = String::new(); + let mut decoded = Vec::new(); + + for _ in 0..10_000 { + bytes.clear(); + b64.clear(); + decoded.clear(); + + let size = rng.gen_range(0..(10 * BUF_SIZE)); + bytes.extend(iter::repeat(0).take(size)); + bytes.truncate(size); + rng.fill_bytes(&mut bytes[..size]); + assert_eq!(size, bytes.len()); + + let engine = random_engine(&mut rng); + engine.encode_string(&bytes[..], &mut b64); + + let mut wrapped_reader = io::Cursor::new(b64.as_bytes()); + let mut short_reader = RandomShortRead { + delegate: &mut wrapped_reader, + rng: &mut rng, + }; + + let mut decoder = DecoderReader::new(&mut short_reader, &engine); + + let decoded_len = decoder.read_to_end(&mut decoded).unwrap(); + assert_eq!(size, decoded_len); + assert_eq!(&bytes[..], &decoded[..]); + } +} + +#[test] +fn read_in_short_increments() { + let mut rng = rand::thread_rng(); + let mut bytes = Vec::new(); + let mut b64 = String::new(); + let mut decoded = Vec::new(); + + for _ in 0..10_000 { + bytes.clear(); + b64.clear(); + decoded.clear(); + + let size = rng.gen_range(0..(10 * BUF_SIZE)); + bytes.extend(iter::repeat(0).take(size)); + // leave room to play around with larger buffers + decoded.extend(iter::repeat(0).take(size * 3)); + + rng.fill_bytes(&mut bytes[..]); + assert_eq!(size, bytes.len()); + + let engine = random_engine(&mut rng); + + engine.encode_string(&bytes[..], &mut b64); + + let mut wrapped_reader = io::Cursor::new(&b64[..]); + let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine); + + consume_with_short_reads_and_validate(&mut rng, &bytes[..], &mut decoded, &mut decoder); + } +} + +#[test] +fn read_in_short_increments_with_short_delegate_reads() { + let mut rng = rand::thread_rng(); + let mut bytes = Vec::new(); + let mut b64 = String::new(); + let mut decoded = Vec::new(); + + for _ in 0..10_000 { + bytes.clear(); + b64.clear(); + decoded.clear(); + + let size = rng.gen_range(0..(10 * BUF_SIZE)); + bytes.extend(iter::repeat(0).take(size)); + // leave room to play around with larger buffers + decoded.extend(iter::repeat(0).take(size * 3)); + + rng.fill_bytes(&mut bytes[..]); + assert_eq!(size, bytes.len()); + + let engine = random_engine(&mut rng); + + engine.encode_string(&bytes[..], &mut b64); + + let mut base_reader = io::Cursor::new(&b64[..]); + let mut decoder = DecoderReader::new(&mut base_reader, &engine); + let mut short_reader = RandomShortRead { + delegate: &mut decoder, + rng: &mut rand::thread_rng(), + }; + + consume_with_short_reads_and_validate( + &mut rng, + &bytes[..], + &mut decoded, + &mut short_reader, + ); + } +} + +#[test] +fn reports_invalid_last_symbol_correctly() { + let mut rng = rand::thread_rng(); + let mut bytes = Vec::new(); + let mut b64 = String::new(); + let mut b64_bytes = Vec::new(); + let mut decoded = Vec::new(); + let mut bulk_decoded = Vec::new(); + + for _ in 0..1_000 { + bytes.clear(); + b64.clear(); + b64_bytes.clear(); + + let size = rng.gen_range(1..(10 * BUF_SIZE)); + bytes.extend(iter::repeat(0).take(size)); + decoded.extend(iter::repeat(0).take(size)); + rng.fill_bytes(&mut bytes[..]); + assert_eq!(size, bytes.len()); + + let config = random_config(&mut rng); + let alphabet = random_alphabet(&mut rng); + // changing padding will cause invalid padding errors when we twiddle the last byte + let engine = GeneralPurpose::new(alphabet, config.with_encode_padding(false)); + engine.encode_string(&bytes[..], &mut b64); + b64_bytes.extend(b64.bytes()); + assert_eq!(b64_bytes.len(), b64.len()); + + // change the last character to every possible symbol. Should behave the same as bulk + // decoding whether invalid or valid. + for &s1 in alphabet.symbols.iter() { + decoded.clear(); + bulk_decoded.clear(); + + // replace the last + *b64_bytes.last_mut().unwrap() = s1; + let bulk_res = engine.decode_vec(&b64_bytes[..], &mut bulk_decoded); + + let mut wrapped_reader = io::Cursor::new(&b64_bytes[..]); + let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine); + + let stream_res = decoder.read_to_end(&mut decoded).map(|_| ()).map_err(|e| { + e.into_inner() + .and_then(|e| e.downcast::().ok()) + }); + + assert_eq!(bulk_res.map_err(|e| Some(Box::new(e))), stream_res); + } + } +} + +#[test] +fn reports_invalid_byte_correctly() { + let mut rng = rand::thread_rng(); + let mut bytes = Vec::new(); + let mut b64 = String::new(); + let mut stream_decoded = Vec::new(); + let mut bulk_decoded = Vec::new(); + + for _ in 0..10_000 { + bytes.clear(); + b64.clear(); + stream_decoded.clear(); + bulk_decoded.clear(); + + let size = rng.gen_range(1..(10 * BUF_SIZE)); + bytes.extend(iter::repeat(0).take(size)); + rng.fill_bytes(&mut bytes[..size]); + assert_eq!(size, bytes.len()); + + let engine = GeneralPurpose::new(&alphabet::STANDARD, random_config(&mut rng)); + + engine.encode_string(&bytes[..], &mut b64); + // replace one byte, somewhere, with '*', which is invalid + let bad_byte_pos = rng.gen_range(0..b64.len()); + let mut b64_bytes = b64.bytes().collect::>(); + b64_bytes[bad_byte_pos] = b'*'; + + let mut wrapped_reader = io::Cursor::new(b64_bytes.clone()); + let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine); + + let read_decode_err = decoder + .read_to_end(&mut stream_decoded) + .map_err(|e| { + let kind = e.kind(); + let inner = e + .into_inner() + .and_then(|e| e.downcast::().ok()); + inner.map(|i| (*i, kind)) + }) + .err() + .and_then(|o| o); + + let bulk_decode_err = engine.decode_vec(&b64_bytes[..], &mut bulk_decoded).err(); + + // it's tricky to predict where the invalid data's offset will be since if it's in the last + // chunk it will be reported at the first padding location because it's treated as invalid + // padding. So, we just check that it's the same as it is for decoding all at once. + assert_eq!( + bulk_decode_err.map(|e| (e, io::ErrorKind::InvalidData)), + read_decode_err + ); + } +} + +#[test] +fn internal_padding_error_with_short_read_concatenated_texts_invalid_byte_error() { + let mut rng = rand::thread_rng(); + let mut bytes = Vec::new(); + let mut b64 = String::new(); + let mut reader_decoded = Vec::new(); + let mut bulk_decoded = Vec::new(); + + // encodes with padding, requires that padding be present so we don't get InvalidPadding + // just because padding is there at all + let engine = STANDARD; + + for _ in 0..10_000 { + bytes.clear(); + b64.clear(); + reader_decoded.clear(); + bulk_decoded.clear(); + + // at least 2 bytes so there can be a split point between bytes + let size = rng.gen_range(2..(10 * BUF_SIZE)); + bytes.resize(size, 0); + rng.fill_bytes(&mut bytes[..size]); + + // Concatenate two valid b64s, yielding padding in the middle. + // This avoids scenarios that are challenging to assert on, like random padding location + // that might be InvalidLastSymbol when decoded at certain buffer sizes but InvalidByte + // when done all at once. + let split = loop { + // find a split point that will produce padding on the first part + let s = rng.gen_range(1..size); + if s % 3 != 0 { + // short enough to need padding + break s; + }; + }; + + engine.encode_string(&bytes[..split], &mut b64); + assert!(b64.contains('='), "split: {}, b64: {}", split, b64); + let bad_byte_pos = b64.find('=').unwrap(); + engine.encode_string(&bytes[split..], &mut b64); + let b64_bytes = b64.as_bytes(); + + // short read to make it plausible for padding to happen on a read boundary + let read_len = rng.gen_range(1..10); + let mut wrapped_reader = ShortRead { + max_read_len: read_len, + delegate: io::Cursor::new(&b64_bytes), + }; + + let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine); + + let read_decode_err = decoder + .read_to_end(&mut reader_decoded) + .map_err(|e| { + *e.into_inner() + .and_then(|e| e.downcast::().ok()) + .unwrap() + }) + .unwrap_err(); + + let bulk_decode_err = engine.decode_vec(b64_bytes, &mut bulk_decoded).unwrap_err(); + + assert_eq!( + bulk_decode_err, + read_decode_err, + "read len: {}, bad byte pos: {}, b64: {}", + read_len, + bad_byte_pos, + std::str::from_utf8(b64_bytes).unwrap() + ); + assert_eq!( + DecodeError::InvalidByte( + split / 3 * 4 + + match split % 3 { + 1 => 2, + 2 => 3, + _ => unreachable!(), + }, + PAD_BYTE + ), + read_decode_err + ); + } +} + +#[test] +fn internal_padding_anywhere_error() { + let mut rng = rand::thread_rng(); + let mut bytes = Vec::new(); + let mut b64 = String::new(); + let mut reader_decoded = Vec::new(); + + // encodes with padding, requires that padding be present so we don't get InvalidPadding + // just because padding is there at all + let engine = STANDARD; + + for _ in 0..10_000 { + bytes.clear(); + b64.clear(); + reader_decoded.clear(); + + bytes.resize(10 * BUF_SIZE, 0); + rng.fill_bytes(&mut bytes[..]); + + // Just shove a padding byte in there somewhere. + // The specific error to expect is challenging to predict precisely because it + // will vary based on the position of the padding in the quad and the read buffer + // length, but SOMETHING should go wrong. + + engine.encode_string(&bytes[..], &mut b64); + let mut b64_bytes = b64.as_bytes().to_vec(); + // put padding somewhere other than the last quad + b64_bytes[rng.gen_range(0..bytes.len() - 4)] = PAD_BYTE; + + // short read to make it plausible for padding to happen on a read boundary + let read_len = rng.gen_range(1..10); + let mut wrapped_reader = ShortRead { + max_read_len: read_len, + delegate: io::Cursor::new(&b64_bytes), + }; + + let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine); + + let result = decoder.read_to_end(&mut reader_decoded); + assert!(result.is_err()); + } +} + +fn consume_with_short_reads_and_validate( + rng: &mut rand::rngs::ThreadRng, + expected_bytes: &[u8], + decoded: &mut [u8], + short_reader: &mut R, +) { + let mut total_read = 0_usize; + loop { + assert!( + total_read <= expected_bytes.len(), + "tr {} size {}", + total_read, + expected_bytes.len() + ); + if total_read == expected_bytes.len() { + assert_eq!(expected_bytes, &decoded[..total_read]); + // should be done + assert_eq!(0, short_reader.read(&mut *decoded).unwrap()); + // didn't write anything + assert_eq!(expected_bytes, &decoded[..total_read]); + + break; + } + let decode_len = rng.gen_range(1..cmp::max(2, expected_bytes.len() * 2)); + + let read = short_reader + .read(&mut decoded[total_read..total_read + decode_len]) + .unwrap(); + total_read += read; + } +} + +/// Limits how many bytes a reader will provide in each read call. +/// Useful for shaking out code that may work fine only with typical input sources that always fill +/// the buffer. +struct RandomShortRead<'a, 'b, R: io::Read, N: rand::Rng> { + delegate: &'b mut R, + rng: &'a mut N, +} + +impl<'a, 'b, R: io::Read, N: rand::Rng> io::Read for RandomShortRead<'a, 'b, R, N> { + fn read(&mut self, buf: &mut [u8]) -> Result { + // avoid 0 since it means EOF for non-empty buffers + let effective_len = cmp::min(self.rng.gen_range(1..20), buf.len()); + + self.delegate.read(&mut buf[..effective_len]) + } +} + +struct ShortRead { + delegate: R, + max_read_len: usize, +} + +impl io::Read for ShortRead { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let len = self.max_read_len.max(buf.len()); + self.delegate.read(&mut buf[..len]) + } +} diff --git a/vendor/base64/src/read/mod.rs b/vendor/base64/src/read/mod.rs new file mode 100644 index 00000000000000..856064481cba17 --- /dev/null +++ b/vendor/base64/src/read/mod.rs @@ -0,0 +1,6 @@ +//! Implementations of `io::Read` to transparently decode base64. +mod decoder; +pub use self::decoder::DecoderReader; + +#[cfg(test)] +mod decoder_tests; diff --git a/vendor/base64/src/tests.rs b/vendor/base64/src/tests.rs new file mode 100644 index 00000000000000..7083b5433fe2ef --- /dev/null +++ b/vendor/base64/src/tests.rs @@ -0,0 +1,117 @@ +use std::str; + +use rand::{ + distributions, + distributions::{Distribution as _, Uniform}, + seq::SliceRandom, + Rng, SeedableRng, +}; + +use crate::{ + alphabet, + encode::encoded_len, + engine::{ + general_purpose::{GeneralPurpose, GeneralPurposeConfig}, + Config, DecodePaddingMode, Engine, + }, +}; + +#[test] +fn roundtrip_random_config_short() { + // exercise the slower encode/decode routines that operate on shorter buffers more vigorously + roundtrip_random_config(Uniform::new(0, 50), 10_000); +} + +#[test] +fn roundtrip_random_config_long() { + roundtrip_random_config(Uniform::new(0, 1000), 10_000); +} + +pub fn assert_encode_sanity(encoded: &str, padded: bool, input_len: usize) { + let input_rem = input_len % 3; + let expected_padding_len = if input_rem > 0 { + if padded { + 3 - input_rem + } else { + 0 + } + } else { + 0 + }; + + let expected_encoded_len = encoded_len(input_len, padded).unwrap(); + + assert_eq!(expected_encoded_len, encoded.len()); + + let padding_len = encoded.chars().filter(|&c| c == '=').count(); + + assert_eq!(expected_padding_len, padding_len); + + let _ = str::from_utf8(encoded.as_bytes()).expect("Base64 should be valid utf8"); +} + +fn roundtrip_random_config(input_len_range: Uniform, iterations: u32) { + let mut input_buf: Vec = Vec::new(); + let mut encoded_buf = String::new(); + let mut rng = rand::rngs::SmallRng::from_entropy(); + + for _ in 0..iterations { + input_buf.clear(); + encoded_buf.clear(); + + let input_len = input_len_range.sample(&mut rng); + + let engine = random_engine(&mut rng); + + for _ in 0..input_len { + input_buf.push(rng.gen()); + } + + engine.encode_string(&input_buf, &mut encoded_buf); + + assert_encode_sanity(&encoded_buf, engine.config().encode_padding(), input_len); + + assert_eq!(input_buf, engine.decode(&encoded_buf).unwrap()); + } +} + +pub fn random_config(rng: &mut R) -> GeneralPurposeConfig { + let mode = rng.gen(); + GeneralPurposeConfig::new() + .with_encode_padding(match mode { + DecodePaddingMode::Indifferent => rng.gen(), + DecodePaddingMode::RequireCanonical => true, + DecodePaddingMode::RequireNone => false, + }) + .with_decode_padding_mode(mode) + .with_decode_allow_trailing_bits(rng.gen()) +} + +impl distributions::Distribution for distributions::Standard { + fn sample(&self, rng: &mut R) -> DecodePaddingMode { + match rng.gen_range(0..=2) { + 0 => DecodePaddingMode::Indifferent, + 1 => DecodePaddingMode::RequireCanonical, + _ => DecodePaddingMode::RequireNone, + } + } +} + +pub fn random_alphabet(rng: &mut R) -> &'static alphabet::Alphabet { + ALPHABETS.choose(rng).unwrap() +} + +pub fn random_engine(rng: &mut R) -> GeneralPurpose { + let alphabet = random_alphabet(rng); + let config = random_config(rng); + GeneralPurpose::new(alphabet, config) +} + +const ALPHABETS: &[alphabet::Alphabet] = &[ + alphabet::URL_SAFE, + alphabet::STANDARD, + alphabet::CRYPT, + alphabet::BCRYPT, + alphabet::IMAP_MUTF7, + alphabet::BIN_HEX, +]; diff --git a/vendor/base64/src/write/encoder.rs b/vendor/base64/src/write/encoder.rs new file mode 100644 index 00000000000000..1c19bb42ab73a1 --- /dev/null +++ b/vendor/base64/src/write/encoder.rs @@ -0,0 +1,407 @@ +use crate::engine::Engine; +use std::{ + cmp, fmt, io, + io::{ErrorKind, Result}, +}; + +pub(crate) const BUF_SIZE: usize = 1024; +/// The most bytes whose encoding will fit in `BUF_SIZE` +const MAX_INPUT_LEN: usize = BUF_SIZE / 4 * 3; +// 3 bytes of input = 4 bytes of base64, always (because we don't allow line wrapping) +const MIN_ENCODE_CHUNK_SIZE: usize = 3; + +/// A `Write` implementation that base64 encodes data before delegating to the wrapped writer. +/// +/// Because base64 has special handling for the end of the input data (padding, etc), there's a +/// `finish()` method on this type that encodes any leftover input bytes and adds padding if +/// appropriate. It's called automatically when deallocated (see the `Drop` implementation), but +/// any error that occurs when invoking the underlying writer will be suppressed. If you want to +/// handle such errors, call `finish()` yourself. +/// +/// # Examples +/// +/// ``` +/// use std::io::Write; +/// use base64::engine::general_purpose; +/// +/// // use a vec as the simplest possible `Write` -- in real code this is probably a file, etc. +/// let mut enc = base64::write::EncoderWriter::new(Vec::new(), &general_purpose::STANDARD); +/// +/// // handle errors as you normally would +/// enc.write_all(b"asdf").unwrap(); +/// +/// // could leave this out to be called by Drop, if you don't care +/// // about handling errors or getting the delegate writer back +/// let delegate = enc.finish().unwrap(); +/// +/// // base64 was written to the writer +/// assert_eq!(b"YXNkZg==", &delegate[..]); +/// +/// ``` +/// +/// # Panics +/// +/// Calling `write()` (or related methods) or `finish()` after `finish()` has completed without +/// error is invalid and will panic. +/// +/// # Errors +/// +/// Base64 encoding itself does not generate errors, but errors from the wrapped writer will be +/// returned as per the contract of `Write`. +/// +/// # Performance +/// +/// It has some minor performance loss compared to encoding slices (a couple percent). +/// It does not do any heap allocation. +/// +/// # Limitations +/// +/// Owing to the specification of the `write` and `flush` methods on the `Write` trait and their +/// implications for a buffering implementation, these methods may not behave as expected. In +/// particular, calling `write_all` on this interface may fail with `io::ErrorKind::WriteZero`. +/// See the documentation of the `Write` trait implementation for further details. +pub struct EncoderWriter<'e, E: Engine, W: io::Write> { + engine: &'e E, + /// Where encoded data is written to. It's an Option as it's None immediately before Drop is + /// called so that finish() can return the underlying writer. None implies that finish() has + /// been called successfully. + delegate: Option, + /// Holds a partial chunk, if any, after the last `write()`, so that we may then fill the chunk + /// with the next `write()`, encode it, then proceed with the rest of the input normally. + extra_input: [u8; MIN_ENCODE_CHUNK_SIZE], + /// How much of `extra` is occupied, in `[0, MIN_ENCODE_CHUNK_SIZE]`. + extra_input_occupied_len: usize, + /// Buffer to encode into. May hold leftover encoded bytes from a previous write call that the underlying writer + /// did not write last time. + output: [u8; BUF_SIZE], + /// How much of `output` is occupied with encoded data that couldn't be written last time + output_occupied_len: usize, + /// panic safety: don't write again in destructor if writer panicked while we were writing to it + panicked: bool, +} + +impl<'e, E: Engine, W: io::Write> fmt::Debug for EncoderWriter<'e, E, W> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "extra_input: {:?} extra_input_occupied_len:{:?} output[..5]: {:?} output_occupied_len: {:?}", + self.extra_input, + self.extra_input_occupied_len, + &self.output[0..5], + self.output_occupied_len + ) + } +} + +impl<'e, E: Engine, W: io::Write> EncoderWriter<'e, E, W> { + /// Create a new encoder that will write to the provided delegate writer. + pub fn new(delegate: W, engine: &'e E) -> EncoderWriter<'e, E, W> { + EncoderWriter { + engine, + delegate: Some(delegate), + extra_input: [0u8; MIN_ENCODE_CHUNK_SIZE], + extra_input_occupied_len: 0, + output: [0u8; BUF_SIZE], + output_occupied_len: 0, + panicked: false, + } + } + + /// Encode all remaining buffered data and write it, including any trailing incomplete input + /// triples and associated padding. + /// + /// Once this succeeds, no further writes or calls to this method are allowed. + /// + /// This may write to the delegate writer multiple times if the delegate writer does not accept + /// all input provided to its `write` each invocation. + /// + /// If you don't care about error handling, it is not necessary to call this function, as the + /// equivalent finalization is done by the Drop impl. + /// + /// Returns the writer that this was constructed around. + /// + /// # Errors + /// + /// The first error that is not of `ErrorKind::Interrupted` will be returned. + pub fn finish(&mut self) -> Result { + // If we could consume self in finish(), we wouldn't have to worry about this case, but + // finish() is retryable in the face of I/O errors, so we can't consume here. + if self.delegate.is_none() { + panic!("Encoder has already had finish() called"); + }; + + self.write_final_leftovers()?; + + let writer = self.delegate.take().expect("Writer must be present"); + + Ok(writer) + } + + /// Write any remaining buffered data to the delegate writer. + fn write_final_leftovers(&mut self) -> Result<()> { + if self.delegate.is_none() { + // finish() has already successfully called this, and we are now in drop() with a None + // writer, so just no-op + return Ok(()); + } + + self.write_all_encoded_output()?; + + if self.extra_input_occupied_len > 0 { + let encoded_len = self + .engine + .encode_slice( + &self.extra_input[..self.extra_input_occupied_len], + &mut self.output[..], + ) + .expect("buffer is large enough"); + + self.output_occupied_len = encoded_len; + + self.write_all_encoded_output()?; + + // write succeeded, do not write the encoding of extra again if finish() is retried + self.extra_input_occupied_len = 0; + } + + Ok(()) + } + + /// Write as much of the encoded output to the delegate writer as it will accept, and store the + /// leftovers to be attempted at the next write() call. Updates `self.output_occupied_len`. + /// + /// # Errors + /// + /// Errors from the delegate writer are returned. In the case of an error, + /// `self.output_occupied_len` will not be updated, as errors from `write` are specified to mean + /// that no write took place. + fn write_to_delegate(&mut self, current_output_len: usize) -> Result<()> { + self.panicked = true; + let res = self + .delegate + .as_mut() + .expect("Writer must be present") + .write(&self.output[..current_output_len]); + self.panicked = false; + + res.map(|consumed| { + debug_assert!(consumed <= current_output_len); + + if consumed < current_output_len { + self.output_occupied_len = current_output_len.checked_sub(consumed).unwrap(); + // If we're blocking on I/O, the minor inefficiency of copying bytes to the + // start of the buffer is the least of our concerns... + // TODO Rotate moves more than we need to; copy_within now stable. + self.output.rotate_left(consumed); + } else { + self.output_occupied_len = 0; + } + }) + } + + /// Write all buffered encoded output. If this returns `Ok`, `self.output_occupied_len` is `0`. + /// + /// This is basically write_all for the remaining buffered data but without the undesirable + /// abort-on-`Ok(0)` behavior. + /// + /// # Errors + /// + /// Any error emitted by the delegate writer abort the write loop and is returned, unless it's + /// `Interrupted`, in which case the error is ignored and writes will continue. + fn write_all_encoded_output(&mut self) -> Result<()> { + while self.output_occupied_len > 0 { + let remaining_len = self.output_occupied_len; + match self.write_to_delegate(remaining_len) { + // try again on interrupts ala write_all + Err(ref e) if e.kind() == ErrorKind::Interrupted => {} + // other errors return + Err(e) => return Err(e), + // success no-ops because remaining length is already updated + Ok(_) => {} + }; + } + + debug_assert_eq!(0, self.output_occupied_len); + Ok(()) + } + + /// Unwraps this `EncoderWriter`, returning the base writer it writes base64 encoded output + /// to. + /// + /// Normally this method should not be needed, since `finish()` returns the inner writer if + /// it completes successfully. That will also ensure all data has been flushed, which the + /// `into_inner()` function does *not* do. + /// + /// Calling this method after `finish()` has completed successfully will panic, since the + /// writer has already been returned. + /// + /// This method may be useful if the writer implements additional APIs beyond the `Write` + /// trait. Note that the inner writer might be in an error state or have an incomplete + /// base64 string written to it. + pub fn into_inner(mut self) -> W { + self.delegate + .take() + .expect("Encoder has already had finish() called") + } +} + +impl<'e, E: Engine, W: io::Write> io::Write for EncoderWriter<'e, E, W> { + /// Encode input and then write to the delegate writer. + /// + /// Under non-error circumstances, this returns `Ok` with the value being the number of bytes + /// of `input` consumed. The value may be `0`, which interacts poorly with `write_all`, which + /// interprets `Ok(0)` as an error, despite it being allowed by the contract of `write`. See + /// for more on that. + /// + /// If the previous call to `write` provided more (encoded) data than the delegate writer could + /// accept in a single call to its `write`, the remaining data is buffered. As long as buffered + /// data is present, subsequent calls to `write` will try to write the remaining buffered data + /// to the delegate and return either `Ok(0)` -- and therefore not consume any of `input` -- or + /// an error. + /// + /// # Errors + /// + /// Any errors emitted by the delegate writer are returned. + fn write(&mut self, input: &[u8]) -> Result { + if self.delegate.is_none() { + panic!("Cannot write more after calling finish()"); + } + + if input.is_empty() { + return Ok(0); + } + + // The contract of `Write::write` places some constraints on this implementation: + // - a call to `write()` represents at most one call to a wrapped `Write`, so we can't + // iterate over the input and encode multiple chunks. + // - Errors mean that "no bytes were written to this writer", so we need to reset the + // internal state to what it was before the error occurred + + // before reading any input, write any leftover encoded output from last time + if self.output_occupied_len > 0 { + let current_len = self.output_occupied_len; + return self + .write_to_delegate(current_len) + // did not read any input + .map(|_| 0); + } + + debug_assert_eq!(0, self.output_occupied_len); + + // how many bytes, if any, were read into `extra` to create a triple to encode + let mut extra_input_read_len = 0; + let mut input = input; + + let orig_extra_len = self.extra_input_occupied_len; + + let mut encoded_size = 0; + // always a multiple of MIN_ENCODE_CHUNK_SIZE + let mut max_input_len = MAX_INPUT_LEN; + + // process leftover un-encoded input from last write + if self.extra_input_occupied_len > 0 { + debug_assert!(self.extra_input_occupied_len < 3); + if input.len() + self.extra_input_occupied_len >= MIN_ENCODE_CHUNK_SIZE { + // Fill up `extra`, encode that into `output`, and consume as much of the rest of + // `input` as possible. + // We could write just the encoding of `extra` by itself but then we'd have to + // return after writing only 4 bytes, which is inefficient if the underlying writer + // would make a syscall. + extra_input_read_len = MIN_ENCODE_CHUNK_SIZE - self.extra_input_occupied_len; + debug_assert!(extra_input_read_len > 0); + // overwrite only bytes that weren't already used. If we need to rollback extra_len + // (when the subsequent write errors), the old leading bytes will still be there. + self.extra_input[self.extra_input_occupied_len..MIN_ENCODE_CHUNK_SIZE] + .copy_from_slice(&input[0..extra_input_read_len]); + + let len = self.engine.internal_encode( + &self.extra_input[0..MIN_ENCODE_CHUNK_SIZE], + &mut self.output[..], + ); + debug_assert_eq!(4, len); + + input = &input[extra_input_read_len..]; + + // consider extra to be used up, since we encoded it + self.extra_input_occupied_len = 0; + // don't clobber where we just encoded to + encoded_size = 4; + // and don't read more than can be encoded + max_input_len = MAX_INPUT_LEN - MIN_ENCODE_CHUNK_SIZE; + + // fall through to normal encoding + } else { + // `extra` and `input` are non empty, but `|extra| + |input| < 3`, so there must be + // 1 byte in each. + debug_assert_eq!(1, input.len()); + debug_assert_eq!(1, self.extra_input_occupied_len); + + self.extra_input[self.extra_input_occupied_len] = input[0]; + self.extra_input_occupied_len += 1; + return Ok(1); + }; + } else if input.len() < MIN_ENCODE_CHUNK_SIZE { + // `extra` is empty, and `input` fits inside it + self.extra_input[0..input.len()].copy_from_slice(input); + self.extra_input_occupied_len = input.len(); + return Ok(input.len()); + }; + + // either 0 or 1 complete chunks encoded from extra + debug_assert!(encoded_size == 0 || encoded_size == 4); + debug_assert!( + // didn't encode extra input + MAX_INPUT_LEN == max_input_len + // encoded one triple + || MAX_INPUT_LEN == max_input_len + MIN_ENCODE_CHUNK_SIZE + ); + + // encode complete triples only + let input_complete_chunks_len = input.len() - (input.len() % MIN_ENCODE_CHUNK_SIZE); + let input_chunks_to_encode_len = cmp::min(input_complete_chunks_len, max_input_len); + debug_assert_eq!(0, max_input_len % MIN_ENCODE_CHUNK_SIZE); + debug_assert_eq!(0, input_chunks_to_encode_len % MIN_ENCODE_CHUNK_SIZE); + + encoded_size += self.engine.internal_encode( + &input[..(input_chunks_to_encode_len)], + &mut self.output[encoded_size..], + ); + + // not updating `self.output_occupied_len` here because if the below write fails, it should + // "never take place" -- the buffer contents we encoded are ignored and perhaps retried + // later, if the consumer chooses. + + self.write_to_delegate(encoded_size) + // no matter whether we wrote the full encoded buffer or not, we consumed the same + // input + .map(|_| extra_input_read_len + input_chunks_to_encode_len) + .map_err(|e| { + // in case we filled and encoded `extra`, reset extra_len + self.extra_input_occupied_len = orig_extra_len; + + e + }) + } + + /// Because this is usually treated as OK to call multiple times, it will *not* flush any + /// incomplete chunks of input or write padding. + /// # Errors + /// + /// The first error that is not of [`ErrorKind::Interrupted`] will be returned. + fn flush(&mut self) -> Result<()> { + self.write_all_encoded_output()?; + self.delegate + .as_mut() + .expect("Writer must be present") + .flush() + } +} + +impl<'e, E: Engine, W: io::Write> Drop for EncoderWriter<'e, E, W> { + fn drop(&mut self) { + if !self.panicked { + // like `BufWriter`, ignore errors during drop + let _ = self.write_final_leftovers(); + } + } +} diff --git a/vendor/base64/src/write/encoder_string_writer.rs b/vendor/base64/src/write/encoder_string_writer.rs new file mode 100644 index 00000000000000..9c02bcde84fb4d --- /dev/null +++ b/vendor/base64/src/write/encoder_string_writer.rs @@ -0,0 +1,207 @@ +use super::encoder::EncoderWriter; +use crate::engine::Engine; +use std::io; + +/// A `Write` implementation that base64-encodes data using the provided config and accumulates the +/// resulting base64 utf8 `&str` in a [StrConsumer] implementation (typically `String`), which is +/// then exposed via `into_inner()`. +/// +/// # Examples +/// +/// Buffer base64 in a new String: +/// +/// ``` +/// use std::io::Write; +/// use base64::engine::general_purpose; +/// +/// let mut enc = base64::write::EncoderStringWriter::new(&general_purpose::STANDARD); +/// +/// enc.write_all(b"asdf").unwrap(); +/// +/// // get the resulting String +/// let b64_string = enc.into_inner(); +/// +/// assert_eq!("YXNkZg==", &b64_string); +/// ``` +/// +/// Or, append to an existing `String`, which implements `StrConsumer`: +/// +/// ``` +/// use std::io::Write; +/// use base64::engine::general_purpose; +/// +/// let mut buf = String::from("base64: "); +/// +/// let mut enc = base64::write::EncoderStringWriter::from_consumer( +/// &mut buf, +/// &general_purpose::STANDARD); +/// +/// enc.write_all(b"asdf").unwrap(); +/// +/// // release the &mut reference on buf +/// let _ = enc.into_inner(); +/// +/// assert_eq!("base64: YXNkZg==", &buf); +/// ``` +/// +/// # Performance +/// +/// Because it has to validate that the base64 is UTF-8, it is about 80% as fast as writing plain +/// bytes to a `io::Write`. +pub struct EncoderStringWriter<'e, E: Engine, S: StrConsumer> { + encoder: EncoderWriter<'e, E, Utf8SingleCodeUnitWriter>, +} + +impl<'e, E: Engine, S: StrConsumer> EncoderStringWriter<'e, E, S> { + /// Create a EncoderStringWriter that will append to the provided `StrConsumer`. + pub fn from_consumer(str_consumer: S, engine: &'e E) -> Self { + EncoderStringWriter { + encoder: EncoderWriter::new(Utf8SingleCodeUnitWriter { str_consumer }, engine), + } + } + + /// Encode all remaining buffered data, including any trailing incomplete input triples and + /// associated padding. + /// + /// Returns the base64-encoded form of the accumulated written data. + pub fn into_inner(mut self) -> S { + self.encoder + .finish() + .expect("Writing to a consumer should never fail") + .str_consumer + } +} + +impl<'e, E: Engine> EncoderStringWriter<'e, E, String> { + /// Create a EncoderStringWriter that will encode into a new `String` with the provided config. + pub fn new(engine: &'e E) -> Self { + EncoderStringWriter::from_consumer(String::new(), engine) + } +} + +impl<'e, E: Engine, S: StrConsumer> io::Write for EncoderStringWriter<'e, E, S> { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.encoder.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.encoder.flush() + } +} + +/// An abstraction around consuming `str`s produced by base64 encoding. +pub trait StrConsumer { + /// Consume the base64 encoded data in `buf` + fn consume(&mut self, buf: &str); +} + +/// As for io::Write, `StrConsumer` is implemented automatically for `&mut S`. +impl StrConsumer for &mut S { + fn consume(&mut self, buf: &str) { + (**self).consume(buf); + } +} + +/// Pushes the str onto the end of the String +impl StrConsumer for String { + fn consume(&mut self, buf: &str) { + self.push_str(buf); + } +} + +/// A `Write` that only can handle bytes that are valid single-byte UTF-8 code units. +/// +/// This is safe because we only use it when writing base64, which is always valid UTF-8. +struct Utf8SingleCodeUnitWriter { + str_consumer: S, +} + +impl io::Write for Utf8SingleCodeUnitWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + // Because we expect all input to be valid utf-8 individual bytes, we can encode any buffer + // length + let s = std::str::from_utf8(buf).expect("Input must be valid UTF-8"); + + self.str_consumer.consume(s); + + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + // no op + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use crate::{ + engine::Engine, tests::random_engine, write::encoder_string_writer::EncoderStringWriter, + }; + use rand::Rng; + use std::cmp; + use std::io::Write; + + #[test] + fn every_possible_split_of_input() { + let mut rng = rand::thread_rng(); + let mut orig_data = Vec::::new(); + let mut normal_encoded = String::new(); + + let size = 5_000; + + for i in 0..size { + orig_data.clear(); + normal_encoded.clear(); + + orig_data.resize(size, 0); + rng.fill(&mut orig_data[..]); + + let engine = random_engine(&mut rng); + engine.encode_string(&orig_data, &mut normal_encoded); + + let mut stream_encoder = EncoderStringWriter::new(&engine); + // Write the first i bytes, then the rest + stream_encoder.write_all(&orig_data[0..i]).unwrap(); + stream_encoder.write_all(&orig_data[i..]).unwrap(); + + let stream_encoded = stream_encoder.into_inner(); + + assert_eq!(normal_encoded, stream_encoded); + } + } + #[test] + fn incremental_writes() { + let mut rng = rand::thread_rng(); + let mut orig_data = Vec::::new(); + let mut normal_encoded = String::new(); + + let size = 5_000; + + for _ in 0..size { + orig_data.clear(); + normal_encoded.clear(); + + orig_data.resize(size, 0); + rng.fill(&mut orig_data[..]); + + let engine = random_engine(&mut rng); + engine.encode_string(&orig_data, &mut normal_encoded); + + let mut stream_encoder = EncoderStringWriter::new(&engine); + // write small nibbles of data + let mut offset = 0; + while offset < size { + let nibble_size = cmp::min(rng.gen_range(0..=64), size - offset); + let len = stream_encoder + .write(&orig_data[offset..offset + nibble_size]) + .unwrap(); + offset += len; + } + + let stream_encoded = stream_encoder.into_inner(); + + assert_eq!(normal_encoded, stream_encoded); + } + } +} diff --git a/vendor/base64/src/write/encoder_tests.rs b/vendor/base64/src/write/encoder_tests.rs new file mode 100644 index 00000000000000..1f1a1650a6b47d --- /dev/null +++ b/vendor/base64/src/write/encoder_tests.rs @@ -0,0 +1,554 @@ +use std::io::{Cursor, Write}; +use std::{cmp, io, str}; + +use rand::Rng; + +use crate::{ + alphabet::{STANDARD, URL_SAFE}, + engine::{ + general_purpose::{GeneralPurpose, NO_PAD, PAD}, + Engine, + }, + tests::random_engine, +}; + +use super::EncoderWriter; + +const URL_SAFE_ENGINE: GeneralPurpose = GeneralPurpose::new(&URL_SAFE, PAD); +const NO_PAD_ENGINE: GeneralPurpose = GeneralPurpose::new(&STANDARD, NO_PAD); + +#[test] +fn encode_three_bytes() { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); + + let sz = enc.write(b"abc").unwrap(); + assert_eq!(sz, 3); + } + assert_eq!(&c.get_ref()[..], URL_SAFE_ENGINE.encode("abc").as_bytes()); +} + +#[test] +fn encode_nine_bytes_two_writes() { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); + + let sz = enc.write(b"abcdef").unwrap(); + assert_eq!(sz, 6); + let sz = enc.write(b"ghi").unwrap(); + assert_eq!(sz, 3); + } + assert_eq!( + &c.get_ref()[..], + URL_SAFE_ENGINE.encode("abcdefghi").as_bytes() + ); +} + +#[test] +fn encode_one_then_two_bytes() { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); + + let sz = enc.write(b"a").unwrap(); + assert_eq!(sz, 1); + let sz = enc.write(b"bc").unwrap(); + assert_eq!(sz, 2); + } + assert_eq!(&c.get_ref()[..], URL_SAFE_ENGINE.encode("abc").as_bytes()); +} + +#[test] +fn encode_one_then_five_bytes() { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); + + let sz = enc.write(b"a").unwrap(); + assert_eq!(sz, 1); + let sz = enc.write(b"bcdef").unwrap(); + assert_eq!(sz, 5); + } + assert_eq!( + &c.get_ref()[..], + URL_SAFE_ENGINE.encode("abcdef").as_bytes() + ); +} + +#[test] +fn encode_1_2_3_bytes() { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); + + let sz = enc.write(b"a").unwrap(); + assert_eq!(sz, 1); + let sz = enc.write(b"bc").unwrap(); + assert_eq!(sz, 2); + let sz = enc.write(b"def").unwrap(); + assert_eq!(sz, 3); + } + assert_eq!( + &c.get_ref()[..], + URL_SAFE_ENGINE.encode("abcdef").as_bytes() + ); +} + +#[test] +fn encode_with_padding() { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); + + enc.write_all(b"abcd").unwrap(); + + enc.flush().unwrap(); + } + assert_eq!(&c.get_ref()[..], URL_SAFE_ENGINE.encode("abcd").as_bytes()); +} + +#[test] +fn encode_with_padding_multiple_writes() { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); + + assert_eq!(1, enc.write(b"a").unwrap()); + assert_eq!(2, enc.write(b"bc").unwrap()); + assert_eq!(3, enc.write(b"def").unwrap()); + assert_eq!(1, enc.write(b"g").unwrap()); + + enc.flush().unwrap(); + } + assert_eq!( + &c.get_ref()[..], + URL_SAFE_ENGINE.encode("abcdefg").as_bytes() + ); +} + +#[test] +fn finish_writes_extra_byte() { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); + + assert_eq!(6, enc.write(b"abcdef").unwrap()); + + // will be in extra + assert_eq!(1, enc.write(b"g").unwrap()); + + // 1 trailing byte = 2 encoded chars + let _ = enc.finish().unwrap(); + } + assert_eq!( + &c.get_ref()[..], + URL_SAFE_ENGINE.encode("abcdefg").as_bytes() + ); +} + +#[test] +fn write_partial_chunk_encodes_partial_chunk() { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); + + // nothing encoded yet + assert_eq!(2, enc.write(b"ab").unwrap()); + // encoded here + let _ = enc.finish().unwrap(); + } + assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("ab").as_bytes()); + assert_eq!(3, c.get_ref().len()); +} + +#[test] +fn write_1_chunk_encodes_complete_chunk() { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); + + assert_eq!(3, enc.write(b"abc").unwrap()); + let _ = enc.finish().unwrap(); + } + assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes()); + assert_eq!(4, c.get_ref().len()); +} + +#[test] +fn write_1_chunk_and_partial_encodes_only_complete_chunk() { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); + + // "d" not consumed since it's not a full chunk + assert_eq!(3, enc.write(b"abcd").unwrap()); + let _ = enc.finish().unwrap(); + } + assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes()); + assert_eq!(4, c.get_ref().len()); +} + +#[test] +fn write_2_partials_to_exactly_complete_chunk_encodes_complete_chunk() { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); + + assert_eq!(1, enc.write(b"a").unwrap()); + assert_eq!(2, enc.write(b"bc").unwrap()); + let _ = enc.finish().unwrap(); + } + assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes()); + assert_eq!(4, c.get_ref().len()); +} + +#[test] +fn write_partial_then_enough_to_complete_chunk_but_not_complete_another_chunk_encodes_complete_chunk_without_consuming_remaining( +) { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); + + assert_eq!(1, enc.write(b"a").unwrap()); + // doesn't consume "d" + assert_eq!(2, enc.write(b"bcd").unwrap()); + let _ = enc.finish().unwrap(); + } + assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes()); + assert_eq!(4, c.get_ref().len()); +} + +#[test] +fn write_partial_then_enough_to_complete_chunk_and_another_chunk_encodes_complete_chunks() { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); + + assert_eq!(1, enc.write(b"a").unwrap()); + // completes partial chunk, and another chunk + assert_eq!(5, enc.write(b"bcdef").unwrap()); + let _ = enc.finish().unwrap(); + } + assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abcdef").as_bytes()); + assert_eq!(8, c.get_ref().len()); +} + +#[test] +fn write_partial_then_enough_to_complete_chunk_and_another_chunk_and_another_partial_chunk_encodes_only_complete_chunks( +) { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); + + assert_eq!(1, enc.write(b"a").unwrap()); + // completes partial chunk, and another chunk, with one more partial chunk that's not + // consumed + assert_eq!(5, enc.write(b"bcdefe").unwrap()); + let _ = enc.finish().unwrap(); + } + assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abcdef").as_bytes()); + assert_eq!(8, c.get_ref().len()); +} + +#[test] +fn drop_calls_finish_for_you() { + let mut c = Cursor::new(Vec::new()); + { + let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); + assert_eq!(1, enc.write(b"a").unwrap()); + } + assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("a").as_bytes()); + assert_eq!(2, c.get_ref().len()); +} + +#[test] +fn every_possible_split_of_input() { + let mut rng = rand::thread_rng(); + let mut orig_data = Vec::::new(); + let mut stream_encoded = Vec::::new(); + let mut normal_encoded = String::new(); + + let size = 5_000; + + for i in 0..size { + orig_data.clear(); + stream_encoded.clear(); + normal_encoded.clear(); + + for _ in 0..size { + orig_data.push(rng.gen()); + } + + let engine = random_engine(&mut rng); + engine.encode_string(&orig_data, &mut normal_encoded); + + { + let mut stream_encoder = EncoderWriter::new(&mut stream_encoded, &engine); + // Write the first i bytes, then the rest + stream_encoder.write_all(&orig_data[0..i]).unwrap(); + stream_encoder.write_all(&orig_data[i..]).unwrap(); + } + + assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap()); + } +} + +#[test] +fn encode_random_config_matches_normal_encode_reasonable_input_len() { + // choose up to 2 * buf size, so ~half the time it'll use a full buffer + do_encode_random_config_matches_normal_encode(super::encoder::BUF_SIZE * 2); +} + +#[test] +fn encode_random_config_matches_normal_encode_tiny_input_len() { + do_encode_random_config_matches_normal_encode(10); +} + +#[test] +fn retrying_writes_that_error_with_interrupted_works() { + let mut rng = rand::thread_rng(); + let mut orig_data = Vec::::new(); + let mut stream_encoded = Vec::::new(); + let mut normal_encoded = String::new(); + + for _ in 0..1_000 { + orig_data.clear(); + stream_encoded.clear(); + normal_encoded.clear(); + + let orig_len: usize = rng.gen_range(100..20_000); + for _ in 0..orig_len { + orig_data.push(rng.gen()); + } + + // encode the normal way + let engine = random_engine(&mut rng); + engine.encode_string(&orig_data, &mut normal_encoded); + + // encode via the stream encoder + { + let mut interrupt_rng = rand::thread_rng(); + let mut interrupting_writer = InterruptingWriter { + w: &mut stream_encoded, + rng: &mut interrupt_rng, + fraction: 0.8, + }; + + let mut stream_encoder = EncoderWriter::new(&mut interrupting_writer, &engine); + let mut bytes_consumed = 0; + while bytes_consumed < orig_len { + // use short inputs since we want to use `extra` a lot as that's what needs rollback + // when errors occur + let input_len: usize = cmp::min(rng.gen_range(0..10), orig_len - bytes_consumed); + + retry_interrupted_write_all( + &mut stream_encoder, + &orig_data[bytes_consumed..bytes_consumed + input_len], + ) + .unwrap(); + + bytes_consumed += input_len; + } + + loop { + let res = stream_encoder.finish(); + match res { + Ok(_) => break, + Err(e) => match e.kind() { + io::ErrorKind::Interrupted => continue, + _ => panic!("{:?}", e), // bail + }, + } + } + + assert_eq!(orig_len, bytes_consumed); + } + + assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap()); + } +} + +#[test] +fn writes_that_only_write_part_of_input_and_sometimes_interrupt_produce_correct_encoded_data() { + let mut rng = rand::thread_rng(); + let mut orig_data = Vec::::new(); + let mut stream_encoded = Vec::::new(); + let mut normal_encoded = String::new(); + + for _ in 0..1_000 { + orig_data.clear(); + stream_encoded.clear(); + normal_encoded.clear(); + + let orig_len: usize = rng.gen_range(100..20_000); + for _ in 0..orig_len { + orig_data.push(rng.gen()); + } + + // encode the normal way + let engine = random_engine(&mut rng); + engine.encode_string(&orig_data, &mut normal_encoded); + + // encode via the stream encoder + { + let mut partial_rng = rand::thread_rng(); + let mut partial_writer = PartialInterruptingWriter { + w: &mut stream_encoded, + rng: &mut partial_rng, + full_input_fraction: 0.1, + no_interrupt_fraction: 0.1, + }; + + let mut stream_encoder = EncoderWriter::new(&mut partial_writer, &engine); + let mut bytes_consumed = 0; + while bytes_consumed < orig_len { + // use at most medium-length inputs to exercise retry logic more aggressively + let input_len: usize = cmp::min(rng.gen_range(0..100), orig_len - bytes_consumed); + + let res = + stream_encoder.write(&orig_data[bytes_consumed..bytes_consumed + input_len]); + + // retry on interrupt + match res { + Ok(len) => bytes_consumed += len, + Err(e) => match e.kind() { + io::ErrorKind::Interrupted => continue, + _ => { + panic!("should not see other errors"); + } + }, + } + } + + let _ = stream_encoder.finish().unwrap(); + + assert_eq!(orig_len, bytes_consumed); + } + + assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap()); + } +} + +/// Retry writes until all the data is written or an error that isn't Interrupted is returned. +fn retry_interrupted_write_all(w: &mut W, buf: &[u8]) -> io::Result<()> { + let mut bytes_consumed = 0; + + while bytes_consumed < buf.len() { + let res = w.write(&buf[bytes_consumed..]); + + match res { + Ok(len) => bytes_consumed += len, + Err(e) => match e.kind() { + io::ErrorKind::Interrupted => continue, + _ => return Err(e), + }, + } + } + + Ok(()) +} + +fn do_encode_random_config_matches_normal_encode(max_input_len: usize) { + let mut rng = rand::thread_rng(); + let mut orig_data = Vec::::new(); + let mut stream_encoded = Vec::::new(); + let mut normal_encoded = String::new(); + + for _ in 0..1_000 { + orig_data.clear(); + stream_encoded.clear(); + normal_encoded.clear(); + + let orig_len: usize = rng.gen_range(100..20_000); + for _ in 0..orig_len { + orig_data.push(rng.gen()); + } + + // encode the normal way + let engine = random_engine(&mut rng); + engine.encode_string(&orig_data, &mut normal_encoded); + + // encode via the stream encoder + { + let mut stream_encoder = EncoderWriter::new(&mut stream_encoded, &engine); + let mut bytes_consumed = 0; + while bytes_consumed < orig_len { + let input_len: usize = + cmp::min(rng.gen_range(0..max_input_len), orig_len - bytes_consumed); + + // write a little bit of the data + stream_encoder + .write_all(&orig_data[bytes_consumed..bytes_consumed + input_len]) + .unwrap(); + + bytes_consumed += input_len; + } + + let _ = stream_encoder.finish().unwrap(); + + assert_eq!(orig_len, bytes_consumed); + } + + assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap()); + } +} + +/// A `Write` implementation that returns Interrupted some fraction of the time, randomly. +struct InterruptingWriter<'a, W: 'a + Write, R: 'a + Rng> { + w: &'a mut W, + rng: &'a mut R, + /// In [0, 1]. If a random number in [0, 1] is `<= threshold`, `Write` methods will return + /// an `Interrupted` error + fraction: f64, +} + +impl<'a, W: Write, R: Rng> Write for InterruptingWriter<'a, W, R> { + fn write(&mut self, buf: &[u8]) -> io::Result { + if self.rng.gen_range(0.0..1.0) <= self.fraction { + return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted")); + } + + self.w.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + if self.rng.gen_range(0.0..1.0) <= self.fraction { + return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted")); + } + + self.w.flush() + } +} + +/// A `Write` implementation that sometimes will only write part of its input. +struct PartialInterruptingWriter<'a, W: 'a + Write, R: 'a + Rng> { + w: &'a mut W, + rng: &'a mut R, + /// In [0, 1]. If a random number in [0, 1] is `<= threshold`, `write()` will write all its + /// input. Otherwise, it will write a random substring + full_input_fraction: f64, + no_interrupt_fraction: f64, +} + +impl<'a, W: Write, R: Rng> Write for PartialInterruptingWriter<'a, W, R> { + fn write(&mut self, buf: &[u8]) -> io::Result { + if self.rng.gen_range(0.0..1.0) > self.no_interrupt_fraction { + return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted")); + } + + if self.rng.gen_range(0.0..1.0) <= self.full_input_fraction || buf.is_empty() { + // pass through the buf untouched + self.w.write(buf) + } else { + // only use a prefix of it + self.w + .write(&buf[0..(self.rng.gen_range(0..(buf.len() - 1)))]) + } + } + + fn flush(&mut self) -> io::Result<()> { + self.w.flush() + } +} diff --git a/vendor/base64/src/write/mod.rs b/vendor/base64/src/write/mod.rs new file mode 100644 index 00000000000000..2a617db9de7b2f --- /dev/null +++ b/vendor/base64/src/write/mod.rs @@ -0,0 +1,11 @@ +//! Implementations of `io::Write` to transparently handle base64. +mod encoder; +mod encoder_string_writer; + +pub use self::{ + encoder::EncoderWriter, + encoder_string_writer::{EncoderStringWriter, StrConsumer}, +}; + +#[cfg(test)] +mod encoder_tests; diff --git a/vendor/base64/tests/encode.rs b/vendor/base64/tests/encode.rs new file mode 100644 index 00000000000000..9d6944741aea4c --- /dev/null +++ b/vendor/base64/tests/encode.rs @@ -0,0 +1,77 @@ +use base64::{ + alphabet::URL_SAFE, engine::general_purpose::PAD, engine::general_purpose::STANDARD, *, +}; + +fn compare_encode(expected: &str, target: &[u8]) { + assert_eq!(expected, STANDARD.encode(target)); +} + +#[test] +fn encode_all_ascii() { + let ascii: Vec = (0..=127).collect(); + + compare_encode( + "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7P\ + D0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn8\ + =", + &ascii, + ); +} + +#[test] +fn encode_all_bytes() { + let bytes: Vec = (0..=255).collect(); + + compare_encode( + "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7P\ + D0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn\ + +AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6\ + /wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/w==", + &bytes, + ); +} + +#[test] +fn encode_all_bytes_url() { + let bytes: Vec = (0..=255).collect(); + + assert_eq!( + "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0\ + -P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn\ + -AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq\ + -wsbKztLW2t7i5uru8vb6_wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t_g4eLj5OXm5-jp6uvs7e7v8PHy\ + 8_T19vf4-fr7_P3-_w==", + &engine::GeneralPurpose::new(&URL_SAFE, PAD).encode(bytes) + ); +} + +#[test] +fn encoded_len_unpadded() { + assert_eq!(0, encoded_len(0, false).unwrap()); + assert_eq!(2, encoded_len(1, false).unwrap()); + assert_eq!(3, encoded_len(2, false).unwrap()); + assert_eq!(4, encoded_len(3, false).unwrap()); + assert_eq!(6, encoded_len(4, false).unwrap()); + assert_eq!(7, encoded_len(5, false).unwrap()); + assert_eq!(8, encoded_len(6, false).unwrap()); + assert_eq!(10, encoded_len(7, false).unwrap()); +} + +#[test] +fn encoded_len_padded() { + assert_eq!(0, encoded_len(0, true).unwrap()); + assert_eq!(4, encoded_len(1, true).unwrap()); + assert_eq!(4, encoded_len(2, true).unwrap()); + assert_eq!(4, encoded_len(3, true).unwrap()); + assert_eq!(8, encoded_len(4, true).unwrap()); + assert_eq!(8, encoded_len(5, true).unwrap()); + assert_eq!(8, encoded_len(6, true).unwrap()); + assert_eq!(12, encoded_len(7, true).unwrap()); +} +#[test] +fn encoded_len_overflow() { + let max_size = usize::MAX / 4 * 3 + 2; + assert_eq!(2, max_size % 3); + assert_eq!(Some(usize::MAX), encoded_len(max_size, false)); + assert_eq!(None, encoded_len(max_size + 1, false)); +} diff --git a/vendor/base64/tests/tests.rs b/vendor/base64/tests/tests.rs new file mode 100644 index 00000000000000..eceff40d6a33be --- /dev/null +++ b/vendor/base64/tests/tests.rs @@ -0,0 +1,161 @@ +use rand::{Rng, SeedableRng}; + +use base64::engine::{general_purpose::STANDARD, Engine}; +use base64::*; + +use base64::engine::general_purpose::{GeneralPurpose, NO_PAD}; + +// generate random contents of the specified length and test encode/decode roundtrip +fn roundtrip_random( + byte_buf: &mut Vec, + str_buf: &mut String, + engine: &E, + byte_len: usize, + approx_values_per_byte: u8, + max_rounds: u64, +) { + // let the short ones be short but don't let it get too crazy large + let num_rounds = calculate_number_of_rounds(byte_len, approx_values_per_byte, max_rounds); + let mut r = rand::rngs::SmallRng::from_entropy(); + let mut decode_buf = Vec::new(); + + for _ in 0..num_rounds { + byte_buf.clear(); + str_buf.clear(); + decode_buf.clear(); + while byte_buf.len() < byte_len { + byte_buf.push(r.gen::()); + } + + engine.encode_string(&byte_buf, str_buf); + engine.decode_vec(&str_buf, &mut decode_buf).unwrap(); + + assert_eq!(byte_buf, &decode_buf); + } +} + +fn calculate_number_of_rounds(byte_len: usize, approx_values_per_byte: u8, max: u64) -> u64 { + // don't overflow + let mut prod = approx_values_per_byte as u64; + + for _ in 0..byte_len { + if prod > max { + return max; + } + + prod = prod.saturating_mul(prod); + } + + prod +} + +#[test] +fn roundtrip_random_short_standard() { + let mut byte_buf: Vec = Vec::new(); + let mut str_buf = String::new(); + + for input_len in 0..40 { + roundtrip_random(&mut byte_buf, &mut str_buf, &STANDARD, input_len, 4, 10000); + } +} + +#[test] +fn roundtrip_random_with_fast_loop_standard() { + let mut byte_buf: Vec = Vec::new(); + let mut str_buf = String::new(); + + for input_len in 40..100 { + roundtrip_random(&mut byte_buf, &mut str_buf, &STANDARD, input_len, 4, 1000); + } +} + +#[test] +fn roundtrip_random_short_no_padding() { + let mut byte_buf: Vec = Vec::new(); + let mut str_buf = String::new(); + + let engine = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD); + for input_len in 0..40 { + roundtrip_random(&mut byte_buf, &mut str_buf, &engine, input_len, 4, 10000); + } +} + +#[test] +fn roundtrip_random_no_padding() { + let mut byte_buf: Vec = Vec::new(); + let mut str_buf = String::new(); + + let engine = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD); + + for input_len in 40..100 { + roundtrip_random(&mut byte_buf, &mut str_buf, &engine, input_len, 4, 1000); + } +} + +#[test] +fn roundtrip_decode_trailing_10_bytes() { + // This is a special case because we decode 8 byte blocks of input at a time as much as we can, + // ideally unrolled to 32 bytes at a time, in stages 1 and 2. Since we also write a u64's worth + // of bytes (8) to the output, we always write 2 garbage bytes that then will be overwritten by + // the NEXT block. However, if the next block only contains 2 bytes, it will decode to 1 byte, + // and therefore be too short to cover up the trailing 2 garbage bytes. Thus, we have stage 3 + // to handle that case. + + for num_quads in 0..25 { + let mut s: String = "ABCD".repeat(num_quads); + s.push_str("EFGHIJKLZg"); + + let engine = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD); + let decoded = engine.decode(&s).unwrap(); + assert_eq!(num_quads * 3 + 7, decoded.len()); + + assert_eq!(s, engine.encode(&decoded)); + } +} + +#[test] +fn display_wrapper_matches_normal_encode() { + let mut bytes = Vec::::with_capacity(256); + + for i in 0..255 { + bytes.push(i); + } + bytes.push(255); + + assert_eq!( + STANDARD.encode(&bytes), + format!("{}", display::Base64Display::new(&bytes, &STANDARD)) + ); +} + +#[test] +fn encode_engine_slice_error_when_buffer_too_small() { + for num_triples in 1..100 { + let input = "AAA".repeat(num_triples); + let mut vec = vec![0; (num_triples - 1) * 4]; + assert_eq!( + EncodeSliceError::OutputSliceTooSmall, + STANDARD.encode_slice(&input, &mut vec).unwrap_err() + ); + vec.push(0); + assert_eq!( + EncodeSliceError::OutputSliceTooSmall, + STANDARD.encode_slice(&input, &mut vec).unwrap_err() + ); + vec.push(0); + assert_eq!( + EncodeSliceError::OutputSliceTooSmall, + STANDARD.encode_slice(&input, &mut vec).unwrap_err() + ); + vec.push(0); + assert_eq!( + EncodeSliceError::OutputSliceTooSmall, + STANDARD.encode_slice(&input, &mut vec).unwrap_err() + ); + vec.push(0); + assert_eq!( + num_triples * 4, + STANDARD.encode_slice(&input, &mut vec).unwrap() + ); + } +} diff --git a/vendor/bindgen/.cargo-checksum.json b/vendor/bindgen/.cargo-checksum.json new file mode 100644 index 00000000000000..3fd0aa70ed81ad --- /dev/null +++ b/vendor/bindgen/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"b5c3dc198affdf569150b54e8b50a92f1f8dbc4127f07bcd2728f55570394a15","Cargo.lock":"775138b42c9ceb7d012985ea43cb12cd32c325c9c5af2edd5d9d4913d7a44a07","Cargo.toml":"f72dfce465f8e986f51506cda6090d754057b55318712c6c13ef96230d9b1a42","Cargo.toml.orig":"26d7094dff93b9e475855e6e97ccc03f77844e5198eb933ce873049af9b6bca7","LICENSE":"c23953d9deb0a3312dbeaf6c128a657f3591acee45067612fa68405eaa4525db","README.md":"79b56b76b0e4c133c704f21776329b02228279a0d5b90f5aa401e51fb59b43bb","build.rs":"f7a10af0a21662e104e0058da7e3471a20be328eef6c7c41988525be90fdfe92","callbacks.rs":"de8bbe96753b6c5107984f2d26f13abf8fd2dd914ca688a989bf90c8e63c435f","clang.rs":"e991300ce9b1f0b9fb4a0b4bd32e899b6cfa546f034858f08bf57678b3d7044c","codegen/bitfield_unit.rs":"bcec32a8289eb8643bdc7aae0636aecac9f28e83895ebd330a4b0d1d3468bb4c","codegen/bitfield_unit_raw_ref_macros.rs":"cd9a02db7a0f0d2db79dc6b54c64eec2b438d2124928127266a5e30cf451696e","codegen/bitfield_unit_tests.rs":"9915cb19bf37fc1013fc72753bae153f7c249280daec94b25fb461f9936dffa4","codegen/dyngen.rs":"1b42f7fa9fb65ff2fa898b289a3b934d7b21c5ba6c1b3e37aa97f0faa87769a0","codegen/error.rs":"67680c4d171d63848d9eb4ddd5f100be7463564e8c1c9203fefc0e61a19dfdec","codegen/helpers.rs":"2f2873a8bf98a7583c30d42758f0c229ae7d7a6ee71b89bb33a79ffa73ab4ab2","codegen/impl_debug.rs":"dff9ca17a9397f327841f6321056cc0bdffe1f52106ec08879bcc74c22d4f383","codegen/impl_partialeq.rs":"60623e75c079ccadc4b928acd2cf78449db84591b720e242fc10ee1417678981","codegen/mod.rs":"a865463a58ef01a49079118f648b8e139eb63790b7d511d03588e0ac52bf35ee","codegen/postprocessing/merge_extern_blocks.rs":"3e244fe62abcadcb6dae069c37d21220f5351dc7b8c33c5576a5193731933c4c","codegen/postprocessing/mod.rs":"160a6d6701cabf2514e23570df1bd1b648c909cc27b7c583f21d98fe0c16722e","codegen/postprocessing/sort_semantically.rs":"5099e8fc134a92cb72b585bd95854a52ff81e2f5307c3b83617d83e7408302ee","codegen/serialize.rs":"d57eb31ba0fda825241f886336279573050b396e52badf3886e0bc76a15110ad","codegen/struct_layout.rs":"78b38cc064491d854516dcf36e268b45e549d2bd3150ea4cb390529f656b2132","deps.rs":"297dcc2be53af1a3ea4f77e16902a641f3e6f0baad09c06a6ea26050a0281c18","diagnostics.rs":"9c80043ac9fa8f683019577f311853a0d5929e41a95b3255736f80105914cdfa","extra_assertions.rs":"1596b7e7f031714dc699ebda135e795f1ecfc971ce9de6861a3c00e77fcef011","features.rs":"ad17b96bf6b97cecb33d2b4710261341b1e3828263531708157d2b206af65c77","ir/analysis/derive.rs":"f0bd1b6362ed9a8e8bc6933bf1af29cceabde84015dc34d76f34d3e4c56a4103","ir/analysis/has_destructor.rs":"64dc142ff8c56db94b464f0f03ecd25317c8c1d6cd2a7304d2d3a0ff0a0db890","ir/analysis/has_float.rs":"9fb88d05c5920e9000e5cb6e87775c0d1042a6b11205577e281ba281e6e6acdb","ir/analysis/has_type_param_in_array.rs":"0975d1ce43bcba97eb303635118e74d494d46ac67cf5ee53faf6f6584a556cef","ir/analysis/has_vtable.rs":"3e1a807feccb55d6148e81512417b56cd1d70a70783508b65a3ff2abde461d2b","ir/analysis/mod.rs":"93edca96d765dfa19ac231198027b0ba48c623502a8be1dbf799a241cd6b304b","ir/analysis/sizedness.rs":"0b78e70737e038ebdee2c3d195194a060c9287000b9059ded0686728a89b4ff2","ir/analysis/template_params.rs":"a8dfd3e02b1745a5b7c6faa16309bd0b8a88d76b7fedca27c322782cc9e77177","ir/annotations.rs":"8397ced62808fe99dfdde35792cd8b2389e7828d752a6c8aa3a70c1e14595e11","ir/comment.rs":"57863204d329ae82872ecc4829cc299969ff07da3a32a4a13d7d84429f55b84d","ir/comp.rs":"6dce8c17967a2219ccf8ac2bf11ca97a046ca28df258f47e1f2cc8c34e2237ed","ir/context.rs":"59b73ef695f98adda0b54828a820739121d0f2c869f06b75a0cbc1a84c3ca887","ir/derive.rs":"09860cffec0ebecce31da0c6c9ea0cf9a0d4784262ff4eb16ea459c0d0782ac9","ir/dot.rs":"8b8f6dd13e662fcb4114949025cb43467b34fa4998a3371c101db5dd82688f44","ir/enum_ty.rs":"5d7ae2e3de172d9812425e8cc6e30d559b0743620b3b09f7d72f3b05a7e1ce98","ir/function.rs":"a8296565624f1be38eaa01cea638e39eb1e2ee9de6859fc63a070f5a190c4c8c","ir/int.rs":"1bf1e4d87eca13ee2fc38ff4d56c266f303f188796f5c0d290f53162798d2d01","ir/item.rs":"d11623c01e1a9128063be4e4bde5c459a0a2f10fd9e323fced3cd4bc8d394b6c","ir/item_kind.rs":"799fab994b5ed35045786a68003c2c12b6601cf3b07e8ccc0b9acd6f921217e0","ir/layout.rs":"5b2958eb3d5e5d96bd85ce02925d936e89d3147c62cd225e3a0ae7f042b74fca","ir/mod.rs":"a3b98b1732111a980a795c72eaf1e09101e842ef2de76b4f2d4a7857f8d4cee4","ir/module.rs":"617867617ebd7e56157a9ba057441ce11a33c25138a1da64646f44ccaae7c762","ir/objc.rs":"092c7f32cec4191aa6235e4554420ab2053e0c7fec5ece016a7ed303763e8547","ir/template.rs":"1114c0924323f8b30bb32dbb3f6730dd7f5bc1f0771ad5099738e8e57111c07d","ir/traversal.rs":"3ebde94ead0fe69d51541ab61d700c7c1f6382574e4c110b8c7fe3e2c6218f19","ir/ty.rs":"e2516217fa439e65ef38dce2acd712a89879045e21e64b6f317a81b0d22927dc","ir/var.rs":"2a94decd3adfdccd3bd0015b460d180838e3c92b122b632eed44032b80cad120","lib.rs":"fe023071fb5e39938173ee7e160a4c7f6b735afc71c8d164c397842d69b055de","log_stubs.rs":"a636d59af2fd3745c2e416e1ab8a1e1a3888ea84cd4657a321ce22f15e0c5a87","options/as_args.rs":"a1a5e7f0dde82590371fc1a9ea5fde7f3e2252530ca74d98bb49a8ce06cc864f","options/cli.rs":"ce154bf2b5dfb6771e90526424b94169f6b0ac3a4ec035772a835827795eaaae","options/helpers.rs":"f4a7681e29b2dcc3be9249478c499d685b9e29d4f4ca4ae8bff7a91668cd8f15","options/mod.rs":"beaaccfdf79a309bc272fc72a9278568d9e0f4dff1edd71fceffc3bf1e9baec2","parse.rs":"fce3616e0464aa7414888e5d00d4df18c83bb3034a1c807d36a07a3c586e475a","regex_set.rs":"d8995adb9e5cecc2d738e662a62d5081150bf967cb67e1206070e22b7265578a","time.rs":"1429af446b2b38c70ceec82c4202d4822c618cad47ba502dce72dbdc4cbb425e"},"package":"993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895"} \ No newline at end of file diff --git a/vendor/bindgen/.cargo_vcs_info.json b/vendor/bindgen/.cargo_vcs_info.json new file mode 100644 index 00000000000000..786f0e3e412926 --- /dev/null +++ b/vendor/bindgen/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "d874de8d646d9b8a3e7ba2db2bcd52f2fba8f1f5" + }, + "path_in_vcs": "bindgen" +} \ No newline at end of file diff --git a/vendor/bindgen/Cargo.lock b/vendor/bindgen/Cargo.lock new file mode 100644 index 00000000000000..0778c7fa950037 --- /dev/null +++ b/vendor/bindgen/Cargo.lock @@ -0,0 +1,485 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "annotate-snippets" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24e35ed54e5ea7997c14ed4c70ba043478db1112e98263b3b035907aa197d991" +dependencies = [ + "anstyle", + "unicode-width", +] + +[[package]] +name = "anstyle" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" + +[[package]] +name = "bindgen" +version = "0.72.1" +dependencies = [ + "annotate-snippets", + "bitflags 2.2.1", + "cexpr", + "clang-sys", + "clap", + "clap_complete", + "itertools", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.90", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24a6904aef64d73cf10ab17ebace7befb918b82164785cb89907993be7f83813" + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "4.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f13b9c79b5d1dd500d20ef541215a6423c75829ef43117e1b4d17fd8af0b5d76" +dependencies = [ + "bitflags 1.3.2", + "clap_derive", + "clap_lex", + "is-terminal", + "once_cell", + "strsim", + "termcolor", +] + +[[package]] +name = "clap_complete" +version = "4.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01c22dcfb410883764b29953103d9ef7bb8fe21b3fa1158bc99986c2067294bd" +dependencies = [ + "clap", +] + +[[package]] +name = "clap_derive" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "684a277d672e91966334af371f1a7b5833f9aa00b07c84e92fbce95e00208ce8" +dependencies = [ + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "clap_lex" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "783fe232adfca04f90f56201b26d79682d4cd2625e0bc7290b95123afe558ade" +dependencies = [ + "os_str_bytes", +] + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + +[[package]] +name = "is-terminal" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "libc" +version = "0.2.167" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc" + +[[package]] +name = "libloading" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +dependencies = [ + "cfg-if", + "windows-targets", +] + +[[package]] +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "once_cell" +version = "1.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" + +[[package]] +name = "os_str_bytes" +version = "6.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" + +[[package]] +name = "prettyplease" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +dependencies = [ + "proc-macro2", + "syn 2.0.90", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "rustc-hash" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.90" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "termcolor" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "unicode-ident" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" + +[[package]] +name = "unicode-width" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" diff --git a/vendor/bindgen/Cargo.toml b/vendor/bindgen/Cargo.toml new file mode 100644 index 00000000000000..b26f28a7cf85cd --- /dev/null +++ b/vendor/bindgen/Cargo.toml @@ -0,0 +1,189 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.70.0" +name = "bindgen" +version = "0.72.1" +authors = [ + "Jyun-Yan You ", + "Emilio Cobos Álvarez ", + "Nick Fitzgerald ", + "The Servo project developers", +] +build = "build.rs" +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Automatically generates Rust FFI bindings to C and C++ libraries." +homepage = "https://rust-lang.github.io/rust-bindgen/" +documentation = "https://docs.rs/bindgen" +readme = "README.md" +keywords = [ + "bindings", + "ffi", + "code-generation", +] +categories = [ + "external-ffi-bindings", + "development-tools::ffi", +] +license = "BSD-3-Clause" +repository = "https://github.com/rust-lang/rust-bindgen" + +[package.metadata.docs.rs] +features = ["experimental"] + +[package.metadata.release] +pre-release-hook = [ + "../node_modules/doctoc/doctoc.js", + "../CHANGELOG.md", +] +release = true + +[[package.metadata.release.pre-release-replacements]] +file = "../CHANGELOG.md" +replace = """ +# Unreleased +## Added +## Changed +## Removed +## Fixed +## Security + +# {{version}} ({{date}})""" +search = "# Unreleased" + +[features] +__cli = [ + "dep:clap", + "dep:clap_complete", +] +__testing_only_extra_assertions = [] +__testing_only_libclang_16 = [] +__testing_only_libclang_9 = [] +default = [ + "logging", + "prettyplease", + "runtime", +] +experimental = ["dep:annotate-snippets"] +logging = ["dep:log"] +runtime = ["clang-sys/runtime"] +static = ["clang-sys/static"] + +[lib] +name = "bindgen" +path = "lib.rs" + +[dependencies.annotate-snippets] +version = "0.11.4" +optional = true + +[dependencies.bitflags] +version = "2.2.1" + +[dependencies.cexpr] +version = "0.6" + +[dependencies.clang-sys] +version = "1" +features = ["clang_11_0"] + +[dependencies.clap] +version = "4" +features = ["derive"] +optional = true + +[dependencies.clap_complete] +version = "4" +optional = true + +[dependencies.itertools] +version = ">=0.10,<0.14" +default-features = false + +[dependencies.log] +version = "0.4" +optional = true + +[dependencies.prettyplease] +version = "0.2.7" +features = ["verbatim"] +optional = true + +[dependencies.proc-macro2] +version = "1.0.80" + +[dependencies.quote] +version = "1" +default-features = false + +[dependencies.regex] +version = "1.5.3" +features = [ + "std", + "unicode-perl", +] +default-features = false + +[dependencies.rustc-hash] +version = "2.1.0" + +[dependencies.shlex] +version = "1" + +[dependencies.syn] +version = "2.0" +features = [ + "full", + "extra-traits", + "visit-mut", +] + +[lints.clippy] +cast_possible_truncation = "allow" +cast_possible_wrap = "allow" +cast_precision_loss = "allow" +cast_sign_loss = "allow" +default_trait_access = "allow" +enum_glob_use = "allow" +ignored_unit_patterns = "allow" +implicit_hasher = "allow" +items_after_statements = "allow" +match_same_arms = "allow" +maybe_infinite_iter = "allow" +missing_errors_doc = "allow" +missing_panics_doc = "allow" +module_name_repetitions = "allow" +must_use_candidate = "allow" +redundant_closure_for_method_calls = "allow" +return_self_not_must_use = "allow" +similar_names = "allow" +struct_excessive_bools = "allow" +struct_field_names = "allow" +too_many_lines = "allow" +trivially_copy_pass_by_ref = "allow" +unnecessary_wraps = "allow" +unreadable_literal = "allow" +unused_self = "allow" +used_underscore_binding = "allow" +wildcard_imports = "allow" + +[lints.clippy.pedantic] +level = "warn" +priority = -1 + +[lints.rust] +unused_qualifications = "warn" diff --git a/vendor/bindgen/LICENSE b/vendor/bindgen/LICENSE new file mode 100644 index 00000000000000..62f55f45a1d1f7 --- /dev/null +++ b/vendor/bindgen/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2013, Jyun-Yan You +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/bindgen/README.md b/vendor/bindgen/README.md new file mode 100644 index 00000000000000..b35dee3bef41f7 --- /dev/null +++ b/vendor/bindgen/README.md @@ -0,0 +1,89 @@ +[![crates.io](https://img.shields.io/crates/v/bindgen.svg)](https://crates.io/crates/bindgen) +[![docs.rs](https://docs.rs/bindgen/badge.svg)](https://docs.rs/bindgen/) + +# `bindgen` + +**`bindgen` automatically generates Rust FFI bindings to C (and some C++) libraries.** + +For example, given the C header `doggo.h`: + +```c +typedef struct Doggo { + int many; + char wow; +} Doggo; + +void eleven_out_of_ten_majestic_af(Doggo* pupper); +``` + +`bindgen` produces Rust FFI code allowing you to call into the `doggo` library's +functions and use its types: + +```rust +/* automatically generated by rust-bindgen 0.99.9 */ + +#[repr(C)] +pub struct Doggo { + pub many: ::std::os::raw::c_int, + pub wow: ::std::os::raw::c_char, +} + +extern "C" { + pub fn eleven_out_of_ten_majestic_af(pupper: *mut Doggo); +} +``` + +## Users Guide + +[📚 Read the `bindgen` users guide here! 📚](https://rust-lang.github.io/rust-bindgen) + +## MSRV + +The `bindgen` minimum supported Rust version is **1.70.0**. + +The `bindgen-cli` minimum supported Rust version is **1.70.0**. + +No MSRV bump policy has been established yet, so MSRV may increase in any release. + +The MSRV is the minimum Rust version that can be used to *compile* each crate. However, `bindgen` and `bindgen-cli` can generate bindings that are compatible with Rust versions below the current MSRV. + +Most of the time, the `bindgen-cli` crate will have a more recent MSRV than `bindgen` as crates such as `clap` require it. + +## API Reference + +[API reference documentation is on docs.rs](https://docs.rs/bindgen) + +## Environment Variables + +In addition to the [library API](https://docs.rs/bindgen) and [executable command-line API][bindgen-cmdline], +`bindgen` can be controlled through environment variables. + +End-users should set these environment variables to modify `bindgen`'s behavior without modifying the source code of direct consumers of `bindgen`. + +- `BINDGEN_EXTRA_CLANG_ARGS`: extra arguments to pass to `clang` + - Arguments are whitespace-separated + - Use shell-style quoting to pass through whitespace + - Examples: + - Specify alternate sysroot: `--sysroot=/path/to/sysroot` + - Add include search path with spaces: `-I"/path/with spaces"` +- `BINDGEN_EXTRA_CLANG_ARGS_`: similar to `BINDGEN_EXTRA_CLANG_ARGS`, + but used to set per-target arguments to pass to clang. Useful to set system include + directories in a target-specific way in cross-compilation environments with multiple targets. + Has precedence over `BINDGEN_EXTRA_CLANG_ARGS`. + +Additionally, `bindgen` uses `libclang` to parse C and C++ header files. +To modify how `bindgen` searches for `libclang`, see the [`clang-sys` documentation][clang-sys-env]. +For more details on how `bindgen` uses `libclang`, see the [`bindgen` users guide][bindgen-book-clang]. + +## Releases + +We don't follow a specific release calendar, but if you need a release please +file an issue requesting that (ping `@emilio` for increased effectiveness). + +## Contributing + +[See `CONTRIBUTING.md` for hacking on `bindgen`!](./CONTRIBUTING.md) + +[bindgen-cmdline]: https://rust-lang.github.io/rust-bindgen/command-line-usage.html +[clang-sys-env]: https://github.com/KyleMayes/clang-sys#environment-variables +[bindgen-book-clang]: https://rust-lang.github.io/rust-bindgen/requirements.html#clang diff --git a/vendor/bindgen/build.rs b/vendor/bindgen/build.rs new file mode 100644 index 00000000000000..4fb2d3075ecdbc --- /dev/null +++ b/vendor/bindgen/build.rs @@ -0,0 +1,29 @@ +use std::env; +use std::fs::File; +use std::io::Write; +use std::path::{Path, PathBuf}; + +fn main() { + let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); + + let mut dst = + File::create(Path::new(&out_dir).join("host-target.txt")).unwrap(); + dst.write_all(env::var("TARGET").unwrap().as_bytes()) + .unwrap(); + + // On behalf of clang_sys, rebuild ourselves if important configuration + // variables change, to ensure that bindings get rebuilt if the + // underlying libclang changes. + println!("cargo:rerun-if-env-changed=LLVM_CONFIG_PATH"); + println!("cargo:rerun-if-env-changed=LIBCLANG_PATH"); + println!("cargo:rerun-if-env-changed=LIBCLANG_STATIC_PATH"); + println!("cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS"); + println!( + "cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS_{}", + env::var("TARGET").unwrap() + ); + println!( + "cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS_{}", + env::var("TARGET").unwrap().replace('-', "_") + ); +} diff --git a/vendor/bindgen/callbacks.rs b/vendor/bindgen/callbacks.rs new file mode 100644 index 00000000000000..93005ce8e523ea --- /dev/null +++ b/vendor/bindgen/callbacks.rs @@ -0,0 +1,317 @@ +//! A public API for more fine-grained customization of bindgen behavior. + +pub use crate::ir::analysis::DeriveTrait; +pub use crate::ir::derive::CanDerive as ImplementsTrait; +pub use crate::ir::enum_ty::{EnumVariantCustomBehavior, EnumVariantValue}; +pub use crate::ir::int::IntKind; +pub use cexpr::token::Kind as TokenKind; +pub use cexpr::token::Token; +use std::fmt; + +/// An enum to allow ignoring parsing of macros. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Default)] +pub enum MacroParsingBehavior { + /// Ignore the macro, generating no code for it, or anything that depends on + /// it. + Ignore, + /// The default behavior bindgen would have otherwise. + #[default] + Default, +} + +/// A trait to allow configuring different kinds of types in different +/// situations. +pub trait ParseCallbacks: fmt::Debug { + #[cfg(feature = "__cli")] + #[doc(hidden)] + fn cli_args(&self) -> Vec { + vec![] + } + + /// This function will be run on every macro that is identified. + fn will_parse_macro(&self, _name: &str) -> MacroParsingBehavior { + MacroParsingBehavior::Default + } + + /// This function will run for every extern variable and function. The returned value determines + /// the name visible in the bindings. + fn generated_name_override( + &self, + _item_info: ItemInfo<'_>, + ) -> Option { + None + } + + /// This function will run for every extern variable and function. The returned value determines + /// the link name in the bindings. + fn generated_link_name_override( + &self, + _item_info: ItemInfo<'_>, + ) -> Option { + None + } + + /// Modify the contents of a macro + fn modify_macro(&self, _name: &str, _tokens: &mut Vec) {} + + /// The integer kind an integer macro should have, given a name and the + /// value of that macro, or `None` if you want the default to be chosen. + fn int_macro(&self, _name: &str, _value: i64) -> Option { + None + } + + /// This will be run on every string macro. The callback cannot influence the further + /// treatment of the macro, but may use the value to generate additional code or configuration. + fn str_macro(&self, _name: &str, _value: &[u8]) {} + + /// This will be run on every function-like macro. The callback cannot + /// influence the further treatment of the macro, but may use the value to + /// generate additional code or configuration. + /// + /// The first parameter represents the name and argument list (including the + /// parentheses) of the function-like macro. The second parameter represents + /// the expansion of the macro as a sequence of tokens. + fn func_macro(&self, _name: &str, _value: &[&[u8]]) {} + + /// This function should return whether, given an enum variant + /// name, and value, this enum variant will forcibly be a constant. + fn enum_variant_behavior( + &self, + _enum_name: Option<&str>, + _original_variant_name: &str, + _variant_value: EnumVariantValue, + ) -> Option { + None + } + + /// Allows to rename an enum variant, replacing `_original_variant_name`. + fn enum_variant_name( + &self, + _enum_name: Option<&str>, + _original_variant_name: &str, + _variant_value: EnumVariantValue, + ) -> Option { + None + } + + /// Allows to rename an item, replacing `_item_info.name`. + fn item_name(&self, _item_info: ItemInfo) -> Option { + None + } + + /// This will be called on every header filename passed to (`Builder::header`)[`crate::Builder::header`]. + fn header_file(&self, _filename: &str) {} + + /// This will be called on every file inclusion, with the full path of the included file. + fn include_file(&self, _filename: &str) {} + + /// This will be called every time `bindgen` reads an environment variable whether it has any + /// content or not. + fn read_env_var(&self, _key: &str) {} + + /// This will be called to determine whether a particular blocklisted type + /// implements a trait or not. This will be used to implement traits on + /// other types containing the blocklisted type. + /// + /// * `None`: use the default behavior + /// * `Some(ImplementsTrait::Yes)`: `_name` implements `_derive_trait` + /// * `Some(ImplementsTrait::Manually)`: any type including `_name` can't + /// derive `_derive_trait` but can implemented it manually + /// * `Some(ImplementsTrait::No)`: `_name` doesn't implement `_derive_trait` + fn blocklisted_type_implements_trait( + &self, + _name: &str, + _derive_trait: DeriveTrait, + ) -> Option { + None + } + + /// Provide a list of custom derive attributes. + /// + /// If no additional attributes are wanted, this function should return an + /// empty `Vec`. + fn add_derives(&self, _info: &DeriveInfo<'_>) -> Vec { + vec![] + } + + /// Provide a list of custom attributes. + /// + /// If no additional attributes are wanted, this function should return an + /// empty `Vec`. + fn add_attributes(&self, _info: &AttributeInfo<'_>) -> Vec { + vec![] + } + + /// Process a source code comment. + fn process_comment(&self, _comment: &str) -> Option { + None + } + + /// Potentially override the visibility of a composite type field. + /// + /// Caution: This allows overriding standard C++ visibility inferred by + /// `respect_cxx_access_specs`. + fn field_visibility( + &self, + _info: FieldInfo<'_>, + ) -> Option { + None + } + + /// Process a function name that as exactly one `va_list` argument + /// to be wrapped as a variadic function with the wrapped static function + /// feature. + /// + /// The returned string is new function name. + #[cfg(feature = "experimental")] + fn wrap_as_variadic_fn(&self, _name: &str) -> Option { + None + } + + /// This will get called everytime an item (currently struct, union, and alias) is found with some information about it + fn new_item_found(&self, _id: DiscoveredItemId, _item: DiscoveredItem) {} + + // TODO add callback for ResolvedTypeRef +} + +/// An identifier for a discovered item. Used to identify an aliased type (see [`DiscoveredItem::Alias`]) +#[derive(Ord, PartialOrd, PartialEq, Eq, Hash, Debug, Clone, Copy)] +pub struct DiscoveredItemId(usize); + +impl DiscoveredItemId { + /// Constructor + pub fn new(value: usize) -> Self { + Self(value) + } +} + +/// Struct passed to [`ParseCallbacks::new_item_found`] containing information about discovered +/// items (struct, union, and alias) +#[derive(Debug, Hash, Clone, Ord, PartialOrd, Eq, PartialEq)] +pub enum DiscoveredItem { + /// Represents a struct with its original name in C and its generated binding name + Struct { + /// The original name (learnt from C) of the structure + /// Can be None if the union is anonymous. + original_name: Option, + + /// The name of the generated binding + final_name: String, + }, + + /// Represents a union with its original name in C and its generated binding name + Union { + /// The original name (learnt from C) of the structure. + /// Can be None if the union is anonymous. + original_name: Option, + + /// The name of the generated binding + final_name: String, + }, + + /// Represents an alias like a typedef + /// ```c + /// typedef struct MyStruct { + /// ... + /// } StructAlias; + /// ``` + /// Here, the name of the alias is `StructAlias` and it's an alias for `MyStruct` + Alias { + /// The name of the alias in C (`StructAlias`) + alias_name: String, + + /// The identifier of the discovered type + alias_for: DiscoveredItemId, + }, + + /// Represents an enum. + Enum { + /// The final name of the generated binding + final_name: String, + }, + + /// A function or method. + Function { + /// The final name used. + final_name: String, + }, + + /// A method. + Method { + /// The final name used. + final_name: String, + + /// Type to which this method belongs. + parent: DiscoveredItemId, + }, // modules, etc. +} + +/// Relevant information about a type to which new derive attributes will be added using +/// [`ParseCallbacks::add_derives`]. +#[derive(Debug)] +#[non_exhaustive] +pub struct DeriveInfo<'a> { + /// The name of the type. + pub name: &'a str, + /// The kind of the type. + pub kind: TypeKind, +} + +/// Relevant information about a type to which new attributes will be added using +/// [`ParseCallbacks::add_attributes`]. +#[derive(Debug)] +#[non_exhaustive] +pub struct AttributeInfo<'a> { + /// The name of the type. + pub name: &'a str, + /// The kind of the type. + pub kind: TypeKind, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +/// The kind of the current type. +pub enum TypeKind { + /// The type is a Rust `struct`. + Struct, + /// The type is a Rust `enum`. + Enum, + /// The type is a Rust `union`. + Union, +} + +/// A struct providing information about the item being passed to [`ParseCallbacks::generated_name_override`]. +#[derive(Clone, Copy)] +#[non_exhaustive] +pub struct ItemInfo<'a> { + /// The name of the item + pub name: &'a str, + /// The kind of item + pub kind: ItemKind, +} + +/// An enum indicating the kind of item for an `ItemInfo`. +#[derive(Clone, Copy)] +#[non_exhaustive] +pub enum ItemKind { + /// A module + Module, + /// A type + Type, + /// A Function + Function, + /// A Variable + Var, +} + +/// Relevant information about a field for which visibility can be determined using +/// [`ParseCallbacks::field_visibility`]. +#[derive(Debug)] +#[non_exhaustive] +pub struct FieldInfo<'a> { + /// The name of the type. + pub type_name: &'a str, + /// The name of the field. + pub field_name: &'a str, + /// The name of the type of the field. + pub field_type_name: Option<&'a str>, +} diff --git a/vendor/bindgen/clang.rs b/vendor/bindgen/clang.rs new file mode 100644 index 00000000000000..1e8326ed82082f --- /dev/null +++ b/vendor/bindgen/clang.rs @@ -0,0 +1,2448 @@ +//! A higher level Clang API built on top of the generated bindings in the +//! `clang_sys` module. + +#![allow(non_upper_case_globals, dead_code)] +#![deny(clippy::missing_docs_in_private_items)] + +use crate::ir::context::BindgenContext; +use clang_sys::*; +use std::cmp; + +use std::ffi::{CStr, CString}; +use std::fmt; +use std::fs::OpenOptions; +use std::hash::Hash; +use std::hash::Hasher; +use std::os::raw::{c_char, c_int, c_longlong, c_uint, c_ulong, c_ulonglong}; +use std::sync::OnceLock; +use std::{mem, ptr, slice}; + +/// Type representing a clang attribute. +/// +/// Values of this type can be used to check for different attributes using the `has_attrs` +/// function. +pub(crate) struct Attribute { + name: &'static [u8], + kind: Option, + token_kind: CXTokenKind, +} + +impl Attribute { + /// A `warn_unused_result` attribute. + pub(crate) const MUST_USE: Self = Self { + name: b"warn_unused_result", + // FIXME(emilio): clang-sys doesn't expose `CXCursor_WarnUnusedResultAttr` (from clang 9). + kind: Some(440), + token_kind: CXToken_Identifier, + }; + + /// A `_Noreturn` attribute. + pub(crate) const NO_RETURN: Self = Self { + name: b"_Noreturn", + kind: None, + token_kind: CXToken_Keyword, + }; + + /// A `[[noreturn]]` attribute. + pub(crate) const NO_RETURN_CPP: Self = Self { + name: b"noreturn", + kind: None, + token_kind: CXToken_Identifier, + }; +} + +/// A cursor into the Clang AST, pointing to an AST node. +/// +/// We call the AST node pointed to by the cursor the cursor's "referent". +#[derive(Copy, Clone)] +pub(crate) struct Cursor { + x: CXCursor, +} + +impl fmt::Debug for Cursor { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!( + fmt, + "Cursor({} kind: {}, loc: {}, usr: {:?})", + self.spelling(), + kind_to_str(self.kind()), + self.location(), + self.usr() + ) + } +} + +impl Cursor { + /// Get the Unified Symbol Resolution for this cursor's referent, if + /// available. + /// + /// The USR can be used to compare entities across translation units. + pub(crate) fn usr(&self) -> Option { + let s = unsafe { cxstring_into_string(clang_getCursorUSR(self.x)) }; + if s.is_empty() { + None + } else { + Some(s) + } + } + + /// Is this cursor's referent a declaration? + pub(crate) fn is_declaration(&self) -> bool { + unsafe { clang_isDeclaration(self.kind()) != 0 } + } + + /// Is this cursor's referent an anonymous record or so? + pub(crate) fn is_anonymous(&self) -> bool { + unsafe { clang_Cursor_isAnonymous(self.x) != 0 } + } + + /// Get this cursor's referent's spelling. + pub(crate) fn spelling(&self) -> String { + unsafe { cxstring_into_string(clang_getCursorSpelling(self.x)) } + } + + /// Get this cursor's referent's display name. + /// + /// This is not necessarily a valid identifier. It includes extra + /// information, such as parameters for a function, etc. + pub(crate) fn display_name(&self) -> String { + unsafe { cxstring_into_string(clang_getCursorDisplayName(self.x)) } + } + + /// Get the mangled name of this cursor's referent. + pub(crate) fn mangling(&self) -> String { + unsafe { cxstring_into_string(clang_Cursor_getMangling(self.x)) } + } + + /// Gets the C++ manglings for this cursor, or an error if the manglings + /// are not available. + pub(crate) fn cxx_manglings(&self) -> Result, ()> { + use clang_sys::*; + unsafe { + let manglings = clang_Cursor_getCXXManglings(self.x); + if manglings.is_null() { + return Err(()); + } + let count = (*manglings).Count as usize; + + let mut result = Vec::with_capacity(count); + for i in 0..count { + let string_ptr = (*manglings).Strings.add(i); + result.push(cxstring_to_string_leaky(*string_ptr)); + } + clang_disposeStringSet(manglings); + Ok(result) + } + } + + /// Returns whether the cursor refers to a built-in definition. + pub(crate) fn is_builtin(&self) -> bool { + let (file, _, _, _) = self.location().location(); + file.name().is_none() + } + + /// Get the `Cursor` for this cursor's referent's lexical parent. + /// + /// The lexical parent is the parent of the definition. The semantic parent + /// is the parent of the declaration. Generally, the lexical parent doesn't + /// have any effect on semantics, while the semantic parent does. + /// + /// In the following snippet, the `Foo` class would be the semantic parent + /// of the out-of-line `method` definition, while the lexical parent is the + /// translation unit. + /// + /// ```c++ + /// class Foo { + /// void method(); + /// }; + /// + /// void Foo::method() { /* ... */ } + /// ``` + pub(crate) fn lexical_parent(&self) -> Cursor { + unsafe { + Cursor { + x: clang_getCursorLexicalParent(self.x), + } + } + } + + /// Get the referent's semantic parent, if one is available. + /// + /// See documentation for `lexical_parent` for details on semantic vs + /// lexical parents. + pub(crate) fn fallible_semantic_parent(&self) -> Option { + let sp = unsafe { + Cursor { + x: clang_getCursorSemanticParent(self.x), + } + }; + if sp == *self || !sp.is_valid() { + return None; + } + Some(sp) + } + + /// Get the referent's semantic parent. + /// + /// See documentation for `lexical_parent` for details on semantic vs + /// lexical parents. + pub(crate) fn semantic_parent(&self) -> Cursor { + self.fallible_semantic_parent().unwrap() + } + + /// Return the number of template arguments used by this cursor's referent, + /// if the referent is either a template instantiation. Returns `None` + /// otherwise. + /// + /// NOTE: This may not return `Some` for partial template specializations, + /// see #193 and #194. + pub(crate) fn num_template_args(&self) -> Option { + // XXX: `clang_Type_getNumTemplateArguments` is sort of reliable, while + // `clang_Cursor_getNumTemplateArguments` is totally unreliable. + // Therefore, try former first, and only fallback to the latter if we + // have to. + self.cur_type() + .num_template_args() + .or_else(|| { + let n: c_int = + unsafe { clang_Cursor_getNumTemplateArguments(self.x) }; + + if n >= 0 { + Some(n as u32) + } else { + debug_assert_eq!(n, -1); + None + } + }) + .or_else(|| { + let canonical = self.canonical(); + if canonical == *self { + None + } else { + canonical.num_template_args() + } + }) + } + + /// Get a cursor pointing to this referent's containing translation unit. + /// + /// Note that we shouldn't create a `TranslationUnit` struct here, because + /// bindgen assumes there will only be one of them alive at a time, and + /// disposes it on drop. That can change if this would be required, but I + /// think we can survive fine without it. + pub(crate) fn translation_unit(&self) -> Cursor { + assert!(self.is_valid()); + unsafe { + let tu = clang_Cursor_getTranslationUnit(self.x); + let cursor = Cursor { + x: clang_getTranslationUnitCursor(tu), + }; + assert!(cursor.is_valid()); + cursor + } + } + + /// Is the referent a top level construct? + pub(crate) fn is_toplevel(&self) -> bool { + let mut semantic_parent = self.fallible_semantic_parent(); + + while semantic_parent.is_some() && + (semantic_parent.unwrap().kind() == CXCursor_Namespace || + semantic_parent.unwrap().kind() == + CXCursor_NamespaceAlias || + semantic_parent.unwrap().kind() == CXCursor_NamespaceRef) + { + semantic_parent = + semantic_parent.unwrap().fallible_semantic_parent(); + } + + let tu = self.translation_unit(); + // Yes, this can happen with, e.g., macro definitions. + semantic_parent == tu.fallible_semantic_parent() + } + + /// There are a few kinds of types that we need to treat specially, mainly + /// not tracking the type declaration but the location of the cursor, given + /// clang doesn't expose a proper declaration for these types. + pub(crate) fn is_template_like(&self) -> bool { + matches!( + self.kind(), + CXCursor_ClassTemplate | + CXCursor_ClassTemplatePartialSpecialization | + CXCursor_TypeAliasTemplateDecl + ) + } + + /// Is this Cursor pointing to a function-like macro definition? + pub(crate) fn is_macro_function_like(&self) -> bool { + unsafe { clang_Cursor_isMacroFunctionLike(self.x) != 0 } + } + + /// Get the kind of referent this cursor is pointing to. + pub(crate) fn kind(&self) -> CXCursorKind { + self.x.kind + } + + /// Returns true if the cursor is a definition + pub(crate) fn is_definition(&self) -> bool { + unsafe { clang_isCursorDefinition(self.x) != 0 } + } + + /// Is the referent a template specialization? + pub(crate) fn is_template_specialization(&self) -> bool { + self.specialized().is_some() + } + + /// Is the referent a fully specialized template specialization without any + /// remaining free template arguments? + pub(crate) fn is_fully_specialized_template(&self) -> bool { + self.is_template_specialization() && + self.kind() != CXCursor_ClassTemplatePartialSpecialization && + self.num_template_args().unwrap_or(0) > 0 + } + + /// Is the referent a template specialization that still has remaining free + /// template arguments? + pub(crate) fn is_in_non_fully_specialized_template(&self) -> bool { + if self.is_toplevel() { + return false; + } + + let parent = self.semantic_parent(); + if parent.is_fully_specialized_template() { + return false; + } + + if !parent.is_template_like() { + return parent.is_in_non_fully_specialized_template(); + } + + true + } + + /// Is the referent any kind of template parameter? + pub(crate) fn is_template_parameter(&self) -> bool { + matches!( + self.kind(), + CXCursor_TemplateTemplateParameter | + CXCursor_TemplateTypeParameter | + CXCursor_NonTypeTemplateParameter + ) + } + + /// Does the referent's type or value depend on a template parameter? + pub(crate) fn is_dependent_on_template_parameter(&self) -> bool { + fn visitor( + found_template_parameter: &mut bool, + cur: Cursor, + ) -> CXChildVisitResult { + // If we found a template parameter, it is dependent. + if cur.is_template_parameter() { + *found_template_parameter = true; + return CXChildVisit_Break; + } + + // Get the referent and traverse it as well. + if let Some(referenced) = cur.referenced() { + if referenced.is_template_parameter() { + *found_template_parameter = true; + return CXChildVisit_Break; + } + + referenced + .visit(|next| visitor(found_template_parameter, next)); + if *found_template_parameter { + return CXChildVisit_Break; + } + } + + // Continue traversing the AST at the original cursor. + CXChildVisit_Recurse + } + + if self.is_template_parameter() { + return true; + } + + let mut found_template_parameter = false; + self.visit(|next| visitor(&mut found_template_parameter, next)); + + found_template_parameter + } + + /// Is this cursor pointing a valid referent? + pub(crate) fn is_valid(&self) -> bool { + unsafe { clang_isInvalid(self.kind()) == 0 } + } + + /// Get the source location for the referent. + pub(crate) fn location(&self) -> SourceLocation { + unsafe { + SourceLocation { + x: clang_getCursorLocation(self.x), + } + } + } + + /// Get the source location range for the referent. + pub(crate) fn extent(&self) -> CXSourceRange { + unsafe { clang_getCursorExtent(self.x) } + } + + /// Get the raw declaration comment for this referent, if one exists. + pub(crate) fn raw_comment(&self) -> Option { + let s = unsafe { + cxstring_into_string(clang_Cursor_getRawCommentText(self.x)) + }; + if s.is_empty() { + None + } else { + Some(s) + } + } + + /// Get the referent's parsed comment. + pub(crate) fn comment(&self) -> Comment { + unsafe { + Comment { + x: clang_Cursor_getParsedComment(self.x), + } + } + } + + /// Get the referent's type. + pub(crate) fn cur_type(&self) -> Type { + unsafe { + Type { + x: clang_getCursorType(self.x), + } + } + } + + /// Given that this cursor's referent is a reference to another type, or is + /// a declaration, get the cursor pointing to the referenced type or type of + /// the declared thing. + pub(crate) fn definition(&self) -> Option { + unsafe { + let ret = Cursor { + x: clang_getCursorDefinition(self.x), + }; + + if ret.is_valid() && ret.kind() != CXCursor_NoDeclFound { + Some(ret) + } else { + None + } + } + } + + /// Given that this cursor's referent is reference type, get the cursor + /// pointing to the referenced type. + pub(crate) fn referenced(&self) -> Option { + unsafe { + let ret = Cursor { + x: clang_getCursorReferenced(self.x), + }; + + if ret.is_valid() { + Some(ret) + } else { + None + } + } + } + + /// Get the canonical cursor for this referent. + /// + /// Many types can be declared multiple times before finally being properly + /// defined. This method allows us to get the canonical cursor for the + /// referent type. + pub(crate) fn canonical(&self) -> Cursor { + unsafe { + Cursor { + x: clang_getCanonicalCursor(self.x), + } + } + } + + /// Given that this cursor points to either a template specialization or a + /// template instantiation, get a cursor pointing to the template definition + /// that is being specialized. + pub(crate) fn specialized(&self) -> Option { + unsafe { + let ret = Cursor { + x: clang_getSpecializedCursorTemplate(self.x), + }; + if ret.is_valid() { + Some(ret) + } else { + None + } + } + } + + /// Assuming that this cursor's referent is a template declaration, get the + /// kind of cursor that would be generated for its specializations. + pub(crate) fn template_kind(&self) -> CXCursorKind { + unsafe { clang_getTemplateCursorKind(self.x) } + } + + /// Traverse this cursor's referent and its children. + /// + /// Call the given function on each AST node traversed. + pub(crate) fn visit(&self, mut visitor: Visitor) + where + Visitor: FnMut(Cursor) -> CXChildVisitResult, + { + let data = ptr::addr_of_mut!(visitor); + unsafe { + clang_visitChildren(self.x, visit_children::, data.cast()); + } + } + + /// Traverse all of this cursor's children, sorted by where they appear in source code. + /// + /// Call the given function on each AST node traversed. + pub(crate) fn visit_sorted( + &self, + ctx: &mut BindgenContext, + mut visitor: Visitor, + ) where + Visitor: FnMut(&mut BindgenContext, Cursor), + { + // FIXME(#2556): The current source order stuff doesn't account well for different levels + // of includes, or includes that show up at the same byte offset because they are passed in + // via CLI. + const SOURCE_ORDER_ENABLED: bool = false; + if !SOURCE_ORDER_ENABLED { + return self.visit(|c| { + visitor(ctx, c); + CXChildVisit_Continue + }); + } + + let mut children = self.collect_children(); + for child in &children { + if child.kind() == CXCursor_InclusionDirective { + if let Some(included_file) = child.get_included_file_name() { + let location = child.location(); + let (source_file, _, _, offset) = location.location(); + + if let Some(source_file) = source_file.name() { + ctx.add_include(source_file, included_file, offset); + } + } + } + } + children + .sort_by(|child1, child2| child1.cmp_by_source_order(child2, ctx)); + for child in children { + visitor(ctx, child); + } + } + + /// Compare source order of two cursors, considering `#include` directives. + /// + /// Built-in items provided by the compiler (which don't have a source file), + /// are sorted first. Remaining files are sorted by their position in the source file. + /// If the items' source files differ, they are sorted by the position of the first + /// `#include` for their source file. If no source files are included, `None` is returned. + fn cmp_by_source_order( + &self, + other: &Self, + ctx: &BindgenContext, + ) -> cmp::Ordering { + let (file, _, _, offset) = self.location().location(); + let (other_file, _, _, other_offset) = other.location().location(); + + let (file, other_file) = match (file.name(), other_file.name()) { + (Some(file), Some(other_file)) => (file, other_file), + // Built-in definitions should come first. + (Some(_), None) => return cmp::Ordering::Greater, + (None, Some(_)) => return cmp::Ordering::Less, + (None, None) => return cmp::Ordering::Equal, + }; + + if file == other_file { + // Both items are in the same source file, compare by byte offset. + return offset.cmp(&other_offset); + } + + let include_location = ctx.included_file_location(&file); + let other_include_location = ctx.included_file_location(&other_file); + match (include_location, other_include_location) { + (Some((file2, offset2)), _) if file2 == other_file => { + offset2.cmp(&other_offset) + } + (Some(_), None) => cmp::Ordering::Greater, + (_, Some((other_file2, other_offset2))) if file == other_file2 => { + offset.cmp(&other_offset2) + } + (None, Some(_)) => cmp::Ordering::Less, + (Some((file2, offset2)), Some((other_file2, other_offset2))) => { + if file2 == other_file2 { + offset2.cmp(&other_offset2) + } else { + cmp::Ordering::Equal + } + } + (None, None) => cmp::Ordering::Equal, + } + } + + /// Collect all of this cursor's children into a vec and return them. + pub(crate) fn collect_children(&self) -> Vec { + let mut children = vec![]; + self.visit(|c| { + children.push(c); + CXChildVisit_Continue + }); + children + } + + /// Does this cursor have any children? + pub(crate) fn has_children(&self) -> bool { + let mut has_children = false; + self.visit(|_| { + has_children = true; + CXChildVisit_Break + }); + has_children + } + + /// Does this cursor have at least `n` children? + pub(crate) fn has_at_least_num_children(&self, n: usize) -> bool { + assert!(n > 0); + let mut num_left = n; + self.visit(|_| { + num_left -= 1; + if num_left == 0 { + CXChildVisit_Break + } else { + CXChildVisit_Continue + } + }); + num_left == 0 + } + + /// Returns whether the given location contains a cursor with the given + /// kind in the first level of nesting underneath (doesn't look + /// recursively). + pub(crate) fn contains_cursor(&self, kind: CXCursorKind) -> bool { + let mut found = false; + + self.visit(|c| { + if c.kind() == kind { + found = true; + CXChildVisit_Break + } else { + CXChildVisit_Continue + } + }); + + found + } + + /// Is the referent an inlined function? + pub(crate) fn is_inlined_function(&self) -> bool { + unsafe { clang_Cursor_isFunctionInlined(self.x) != 0 } + } + + /// Is the referent a defaulted function? + pub(crate) fn is_defaulted_function(&self) -> bool { + unsafe { clang_CXXMethod_isDefaulted(self.x) != 0 } + } + + /// Is the referent a deleted function? + pub(crate) fn is_deleted_function(&self) -> bool { + // Unfortunately, libclang doesn't yet have an API for checking if a + // member function is deleted, but the following should be a good + // enough approximation. + // Deleted functions are implicitly inline according to paragraph 4 of + // [dcl.fct.def.delete] in the C++ standard. Normal inline functions + // have a definition in the same translation unit, so if this is an + // inline function without a definition, and it's not a defaulted + // function, we can reasonably safely conclude that it's a deleted + // function. + self.is_inlined_function() && + self.definition().is_none() && + !self.is_defaulted_function() + } + + /// Is the referent a bit field declaration? + pub(crate) fn is_bit_field(&self) -> bool { + unsafe { clang_Cursor_isBitField(self.x) != 0 } + } + + /// Get a cursor to the bit field's width expression, or `None` if it's not + /// a bit field. + pub(crate) fn bit_width_expr(&self) -> Option { + if !self.is_bit_field() { + return None; + } + + let mut result = None; + self.visit(|cur| { + // The first child may or may not be a TypeRef, depending on whether + // the field's type is builtin. Skip it. + if cur.kind() == CXCursor_TypeRef { + return CXChildVisit_Continue; + } + + // The next expression or literal is the bit width. + result = Some(cur); + + CXChildVisit_Break + }); + + result + } + + /// Get the width of this cursor's referent bit field, or `None` if the + /// referent is not a bit field or if the width could not be evaluated. + pub(crate) fn bit_width(&self) -> Option { + // It is not safe to check the bit width without ensuring it doesn't + // depend on a template parameter. See + // https://github.com/rust-lang/rust-bindgen/issues/2239 + if self.bit_width_expr()?.is_dependent_on_template_parameter() { + return None; + } + + unsafe { + let w = clang_getFieldDeclBitWidth(self.x); + if w == -1 { + None + } else { + Some(w as u32) + } + } + } + + /// Get the integer representation type used to hold this cursor's referent + /// enum type. + pub(crate) fn enum_type(&self) -> Option { + unsafe { + let t = Type { + x: clang_getEnumDeclIntegerType(self.x), + }; + if t.is_valid() { + Some(t) + } else { + None + } + } + } + + /// Get the boolean constant value for this cursor's enum variant referent. + /// + /// Returns None if the cursor's referent is not an enum variant. + pub(crate) fn enum_val_boolean(&self) -> Option { + unsafe { + if self.kind() == CXCursor_EnumConstantDecl { + Some(clang_getEnumConstantDeclValue(self.x) != 0) + } else { + None + } + } + } + + /// Get the signed constant value for this cursor's enum variant referent. + /// + /// Returns None if the cursor's referent is not an enum variant. + pub(crate) fn enum_val_signed(&self) -> Option { + unsafe { + if self.kind() == CXCursor_EnumConstantDecl { + #[allow(clippy::unnecessary_cast)] + Some(clang_getEnumConstantDeclValue(self.x) as i64) + } else { + None + } + } + } + + /// Get the unsigned constant value for this cursor's enum variant referent. + /// + /// Returns None if the cursor's referent is not an enum variant. + pub(crate) fn enum_val_unsigned(&self) -> Option { + unsafe { + if self.kind() == CXCursor_EnumConstantDecl { + #[allow(clippy::unnecessary_cast)] + Some(clang_getEnumConstantDeclUnsignedValue(self.x) as u64) + } else { + None + } + } + } + + /// Does this cursor have the given attributes? + pub(crate) fn has_attrs( + &self, + attrs: &[Attribute; N], + ) -> [bool; N] { + let mut found_attrs = [false; N]; + let mut found_count = 0; + + self.visit(|cur| { + let kind = cur.kind(); + for (idx, attr) in attrs.iter().enumerate() { + let found_attr = &mut found_attrs[idx]; + if !*found_attr { + // `attr.name` and` attr.token_kind` are checked against unexposed attributes only. + if attr.kind == Some(kind) || + (kind == CXCursor_UnexposedAttr && + cur.tokens().iter().any(|t| { + t.kind == attr.token_kind && + t.spelling() == attr.name + })) + { + *found_attr = true; + found_count += 1; + + if found_count == N { + return CXChildVisit_Break; + } + } + } + } + + CXChildVisit_Continue + }); + + found_attrs + } + + /// Given that this cursor's referent is a `typedef`, get the `Type` that is + /// being aliased. + pub(crate) fn typedef_type(&self) -> Option { + let inner = Type { + x: unsafe { clang_getTypedefDeclUnderlyingType(self.x) }, + }; + + if inner.is_valid() { + Some(inner) + } else { + None + } + } + + /// Get the linkage kind for this cursor's referent. + /// + /// This only applies to functions and variables. + pub(crate) fn linkage(&self) -> CXLinkageKind { + unsafe { clang_getCursorLinkage(self.x) } + } + + /// Get the visibility of this cursor's referent. + pub(crate) fn visibility(&self) -> CXVisibilityKind { + unsafe { clang_getCursorVisibility(self.x) } + } + + /// Given that this cursor's referent is a function, return cursors to its + /// parameters. + /// + /// Returns None if the cursor's referent is not a function/method call or + /// declaration. + pub(crate) fn args(&self) -> Option> { + // match self.kind() { + // CXCursor_FunctionDecl | + // CXCursor_CXXMethod => { + self.num_args().ok().map(|num| { + (0..num) + .map(|i| Cursor { + x: unsafe { clang_Cursor_getArgument(self.x, i as c_uint) }, + }) + .collect() + }) + } + + /// Given that this cursor's referent is a function/method call or + /// declaration, return the number of arguments it takes. + /// + /// Returns Err if the cursor's referent is not a function/method call or + /// declaration. + pub(crate) fn num_args(&self) -> Result { + unsafe { + let w = clang_Cursor_getNumArguments(self.x); + if w == -1 { + Err(()) + } else { + Ok(w as u32) + } + } + } + + /// Get the access specifier for this cursor's referent. + pub(crate) fn access_specifier(&self) -> CX_CXXAccessSpecifier { + unsafe { clang_getCXXAccessSpecifier(self.x) } + } + + /// Is the cursor's referent publicly accessible in C++? + /// + /// Returns true if `self.access_specifier()` is `CX_CXXPublic` or + /// `CX_CXXInvalidAccessSpecifier`. + pub(crate) fn public_accessible(&self) -> bool { + let access = self.access_specifier(); + access == CX_CXXPublic || access == CX_CXXInvalidAccessSpecifier + } + + /// Is this cursor's referent a field declaration that is marked as + /// `mutable`? + pub(crate) fn is_mutable_field(&self) -> bool { + unsafe { clang_CXXField_isMutable(self.x) != 0 } + } + + /// Get the offset of the field represented by the Cursor. + pub(crate) fn offset_of_field(&self) -> Result { + let offset = unsafe { clang_Cursor_getOffsetOfField(self.x) }; + + if offset < 0 { + Err(LayoutError::from(offset as i32)) + } else { + Ok(offset as usize) + } + } + + /// Is this cursor's referent a member function that is declared `static`? + pub(crate) fn method_is_static(&self) -> bool { + unsafe { clang_CXXMethod_isStatic(self.x) != 0 } + } + + /// Is this cursor's referent a member function that is declared `const`? + pub(crate) fn method_is_const(&self) -> bool { + unsafe { clang_CXXMethod_isConst(self.x) != 0 } + } + + /// Is this cursor's referent a member function that is virtual? + pub(crate) fn method_is_virtual(&self) -> bool { + unsafe { clang_CXXMethod_isVirtual(self.x) != 0 } + } + + /// Is this cursor's referent a member function that is pure virtual? + pub(crate) fn method_is_pure_virtual(&self) -> bool { + unsafe { clang_CXXMethod_isPureVirtual(self.x) != 0 } + } + + /// Is this cursor's referent a struct or class with virtual members? + pub(crate) fn is_virtual_base(&self) -> bool { + unsafe { clang_isVirtualBase(self.x) != 0 } + } + + /// Try to evaluate this cursor. + pub(crate) fn evaluate(&self) -> Option { + EvalResult::new(*self) + } + + /// Return the result type for this cursor + pub(crate) fn ret_type(&self) -> Option { + let rt = Type { + x: unsafe { clang_getCursorResultType(self.x) }, + }; + if rt.is_valid() { + Some(rt) + } else { + None + } + } + + /// Gets the tokens that correspond to that cursor. + pub(crate) fn tokens(&self) -> RawTokens<'_> { + RawTokens::new(self) + } + + /// Gets the tokens that correspond to that cursor as `cexpr` tokens. + pub(crate) fn cexpr_tokens(self) -> Vec { + self.tokens() + .iter() + .filter_map(|token| token.as_cexpr_token()) + .collect() + } + + /// Obtain the real path name of a cursor of `InclusionDirective` kind. + /// + /// Returns None if the cursor does not include a file, otherwise the file's full name + pub(crate) fn get_included_file_name(&self) -> Option { + let file = unsafe { clang_getIncludedFile(self.x) }; + if file.is_null() { + None + } else { + Some(unsafe { cxstring_into_string(clang_getFileName(file)) }) + } + } + + /// Is this cursor's referent a namespace that is inline? + pub(crate) fn is_inline_namespace(&self) -> bool { + unsafe { clang_Cursor_isInlineNamespace(self.x) != 0 } + } +} + +/// A struct that owns the tokenizer result from a given cursor. +pub(crate) struct RawTokens<'a> { + cursor: &'a Cursor, + tu: CXTranslationUnit, + tokens: *mut CXToken, + token_count: c_uint, +} + +impl<'a> RawTokens<'a> { + fn new(cursor: &'a Cursor) -> Self { + let mut tokens = ptr::null_mut(); + let mut token_count = 0; + let range = cursor.extent(); + let tu = unsafe { clang_Cursor_getTranslationUnit(cursor.x) }; + unsafe { clang_tokenize(tu, range, &mut tokens, &mut token_count) }; + Self { + cursor, + tu, + tokens, + token_count, + } + } + + fn as_slice(&self) -> &[CXToken] { + if self.tokens.is_null() { + return &[]; + } + unsafe { slice::from_raw_parts(self.tokens, self.token_count as usize) } + } + + /// Get an iterator over these tokens. + pub(crate) fn iter(&self) -> ClangTokenIterator<'_> { + ClangTokenIterator { + tu: self.tu, + raw: self.as_slice().iter(), + } + } +} + +impl Drop for RawTokens<'_> { + fn drop(&mut self) { + if !self.tokens.is_null() { + unsafe { + clang_disposeTokens( + self.tu, + self.tokens, + self.token_count as c_uint, + ); + } + } + } +} + +/// A raw clang token, that exposes only kind, spelling, and extent. This is a +/// slightly more convenient version of `CXToken` which owns the spelling +/// string and extent. +#[derive(Debug)] +pub(crate) struct ClangToken { + spelling: CXString, + /// The extent of the token. This is the same as the relevant member from + /// `CXToken`. + pub(crate) extent: CXSourceRange, + /// The kind of the token. This is the same as the relevant member from + /// `CXToken`. + pub(crate) kind: CXTokenKind, +} + +impl ClangToken { + /// Get the token spelling, without being converted to utf-8. + pub(crate) fn spelling(&self) -> &[u8] { + let c_str = unsafe { CStr::from_ptr(clang_getCString(self.spelling)) }; + c_str.to_bytes() + } + + /// Converts a `ClangToken` to a `cexpr` token if possible. + pub(crate) fn as_cexpr_token(&self) -> Option { + use cexpr::token; + + let kind = match self.kind { + CXToken_Punctuation => token::Kind::Punctuation, + CXToken_Literal => token::Kind::Literal, + CXToken_Identifier => token::Kind::Identifier, + CXToken_Keyword => token::Kind::Keyword, + // NB: cexpr is not too happy about comments inside + // expressions, so we strip them down here. + CXToken_Comment => return None, + _ => { + warn!("Found unexpected token kind: {self:?}"); + return None; + } + }; + + Some(token::Token { + kind, + raw: self.spelling().to_vec().into_boxed_slice(), + }) + } +} + +impl Drop for ClangToken { + fn drop(&mut self) { + unsafe { clang_disposeString(self.spelling) } + } +} + +/// An iterator over a set of Tokens. +pub(crate) struct ClangTokenIterator<'a> { + tu: CXTranslationUnit, + raw: slice::Iter<'a, CXToken>, +} + +impl Iterator for ClangTokenIterator<'_> { + type Item = ClangToken; + + fn next(&mut self) -> Option { + let raw = self.raw.next()?; + unsafe { + let kind = clang_getTokenKind(*raw); + let spelling = clang_getTokenSpelling(self.tu, *raw); + let extent = clang_getTokenExtent(self.tu, *raw); + Some(ClangToken { + spelling, + extent, + kind, + }) + } + } +} + +/// Checks whether the name looks like an identifier, i.e. is alphanumeric +/// (including '_') and does not start with a digit. +pub(crate) fn is_valid_identifier(name: &str) -> bool { + let mut chars = name.chars(); + let first_valid = + chars.next().is_some_and(|c| c.is_alphabetic() || c == '_'); + + first_valid && chars.all(|c| c.is_alphanumeric() || c == '_') +} + +extern "C" fn visit_children( + cur: CXCursor, + _parent: CXCursor, + data: CXClientData, +) -> CXChildVisitResult +where + Visitor: FnMut(Cursor) -> CXChildVisitResult, +{ + let func: &mut Visitor = unsafe { &mut *data.cast::() }; + let child = Cursor { x: cur }; + + (*func)(child) +} + +impl PartialEq for Cursor { + fn eq(&self, other: &Cursor) -> bool { + unsafe { clang_equalCursors(self.x, other.x) == 1 } + } +} + +impl Eq for Cursor {} + +impl Hash for Cursor { + fn hash(&self, state: &mut H) { + unsafe { clang_hashCursor(self.x) }.hash(state); + } +} + +/// The type of a node in clang's AST. +#[derive(Clone, Copy)] +pub(crate) struct Type { + x: CXType, +} + +impl PartialEq for Type { + fn eq(&self, other: &Self) -> bool { + unsafe { clang_equalTypes(self.x, other.x) != 0 } + } +} + +impl Eq for Type {} + +impl fmt::Debug for Type { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!( + fmt, + "Type({}, kind: {}, cconv: {}, decl: {:?}, canon: {:?})", + self.spelling(), + type_to_str(self.kind()), + self.call_conv(), + self.declaration(), + self.declaration().canonical() + ) + } +} + +/// An error about the layout of a struct, class, or type. +#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] +pub(crate) enum LayoutError { + /// Asked for the layout of an invalid type. + Invalid, + /// Asked for the layout of an incomplete type. + Incomplete, + /// Asked for the layout of a dependent type. + Dependent, + /// Asked for the layout of a type that does not have constant size. + NotConstantSize, + /// Asked for the layout of a field in a type that does not have such a + /// field. + InvalidFieldName, + /// An unknown layout error. + Unknown, +} + +impl ::std::convert::From for LayoutError { + fn from(val: i32) -> Self { + use self::LayoutError::*; + + match val { + CXTypeLayoutError_Invalid => Invalid, + CXTypeLayoutError_Incomplete => Incomplete, + CXTypeLayoutError_Dependent => Dependent, + CXTypeLayoutError_NotConstantSize => NotConstantSize, + CXTypeLayoutError_InvalidFieldName => InvalidFieldName, + _ => Unknown, + } + } +} + +impl Type { + /// Get this type's kind. + pub(crate) fn kind(&self) -> CXTypeKind { + self.x.kind + } + + /// Get a cursor pointing to this type's declaration. + pub(crate) fn declaration(&self) -> Cursor { + let decl = Cursor { + x: unsafe { clang_getTypeDeclaration(self.x) }, + }; + // Prior to clang 22, the declaration pointed to the definition. + decl.definition().unwrap_or(decl) + } + + /// Get the canonical declaration of this type, if it is available. + pub(crate) fn canonical_declaration( + &self, + location: Option<&Cursor>, + ) -> Option { + let mut declaration = self.declaration(); + if !declaration.is_valid() { + if let Some(location) = location { + let mut location = *location; + if let Some(referenced) = location.referenced() { + location = referenced; + } + if location.is_template_like() { + declaration = location; + } + } + } + + let canonical = declaration.canonical(); + if canonical.is_valid() && canonical.kind() != CXCursor_NoDeclFound { + Some(CanonicalTypeDeclaration(*self, canonical)) + } else { + None + } + } + + /// Get a raw display name for this type. + pub(crate) fn spelling(&self) -> String { + let s = unsafe { cxstring_into_string(clang_getTypeSpelling(self.x)) }; + // Clang 5.0 introduced changes in the spelling API so it returned the + // full qualified name. Let's undo that here. + if s.split("::").all(is_valid_identifier) { + if let Some(s) = s.split("::").last() { + return s.to_owned(); + } + } + + s + } + + /// Is this type const qualified? + pub(crate) fn is_const(&self) -> bool { + unsafe { clang_isConstQualifiedType(self.x) != 0 } + } + + #[inline] + fn is_non_deductible_auto_type(&self) -> bool { + debug_assert_eq!(self.kind(), CXType_Auto); + self.canonical_type() == *self + } + + #[inline] + fn clang_size_of(&self, ctx: &BindgenContext) -> c_longlong { + match self.kind() { + // Work-around https://bugs.llvm.org/show_bug.cgi?id=40975 + CXType_RValueReference | CXType_LValueReference => { + ctx.target_pointer_size() as c_longlong + } + // Work-around https://bugs.llvm.org/show_bug.cgi?id=40813 + CXType_Auto if self.is_non_deductible_auto_type() => -6, + _ => unsafe { clang_Type_getSizeOf(self.x) }, + } + } + + #[inline] + fn clang_align_of(&self, ctx: &BindgenContext) -> c_longlong { + match self.kind() { + // Work-around https://bugs.llvm.org/show_bug.cgi?id=40975 + CXType_RValueReference | CXType_LValueReference => { + ctx.target_pointer_size() as c_longlong + } + // Work-around https://bugs.llvm.org/show_bug.cgi?id=40813 + CXType_Auto if self.is_non_deductible_auto_type() => -6, + _ => unsafe { clang_Type_getAlignOf(self.x) }, + } + } + + /// What is the size of this type? Paper over invalid types by returning `0` + /// for them. + pub(crate) fn size(&self, ctx: &BindgenContext) -> usize { + let val = self.clang_size_of(ctx); + if val < 0 { + 0 + } else { + val as usize + } + } + + /// What is the size of this type? + pub(crate) fn fallible_size( + &self, + ctx: &BindgenContext, + ) -> Result { + let val = self.clang_size_of(ctx); + if val < 0 { + Err(LayoutError::from(val as i32)) + } else { + Ok(val as usize) + } + } + + /// What is the alignment of this type? Paper over invalid types by + /// returning `0`. + pub(crate) fn align(&self, ctx: &BindgenContext) -> usize { + let val = self.clang_align_of(ctx); + if val < 0 { + 0 + } else { + val as usize + } + } + + /// What is the alignment of this type? + pub(crate) fn fallible_align( + &self, + ctx: &BindgenContext, + ) -> Result { + let val = self.clang_align_of(ctx); + if val < 0 { + Err(LayoutError::from(val as i32)) + } else { + Ok(val as usize) + } + } + + /// Get the layout for this type, or an error describing why it does not + /// have a valid layout. + pub(crate) fn fallible_layout( + &self, + ctx: &BindgenContext, + ) -> Result { + use crate::ir::layout::Layout; + let size = self.fallible_size(ctx)?; + let align = self.fallible_align(ctx)?; + Ok(Layout::new(size, align)) + } + + /// Get the number of template arguments this type has, or `None` if it is + /// not some kind of template. + pub(crate) fn num_template_args(&self) -> Option { + let n = unsafe { clang_Type_getNumTemplateArguments(self.x) }; + if n >= 0 { + Some(n as u32) + } else { + debug_assert_eq!(n, -1); + None + } + } + + /// If this type is a class template specialization, return its + /// template arguments. Otherwise, return None. + pub(crate) fn template_args(&self) -> Option { + self.num_template_args().map(|n| TypeTemplateArgIterator { + x: self.x, + length: n, + index: 0, + }) + } + + /// Given that this type is a function prototype, return the types of its parameters. + /// + /// Returns None if the type is not a function prototype. + pub(crate) fn args(&self) -> Option> { + self.num_args().ok().map(|num| { + (0..num) + .map(|i| Type { + x: unsafe { clang_getArgType(self.x, i as c_uint) }, + }) + .collect() + }) + } + + /// Given that this type is a function prototype, return the number of arguments it takes. + /// + /// Returns Err if the type is not a function prototype. + pub(crate) fn num_args(&self) -> Result { + unsafe { + let w = clang_getNumArgTypes(self.x); + if w == -1 { + Err(()) + } else { + Ok(w as u32) + } + } + } + + /// Given that this type is a pointer type, return the type that it points + /// to. + pub(crate) fn pointee_type(&self) -> Option { + match self.kind() { + CXType_Pointer | + CXType_RValueReference | + CXType_LValueReference | + CXType_MemberPointer | + CXType_BlockPointer | + CXType_ObjCObjectPointer => { + let ret = Type { + x: unsafe { clang_getPointeeType(self.x) }, + }; + debug_assert!(ret.is_valid()); + Some(ret) + } + _ => None, + } + } + + /// Given that this type is an array, vector, or complex type, return the + /// type of its elements. + pub(crate) fn elem_type(&self) -> Option { + let current_type = Type { + x: unsafe { clang_getElementType(self.x) }, + }; + if current_type.is_valid() { + Some(current_type) + } else { + None + } + } + + /// Given that this type is an array or vector type, return its number of + /// elements. + pub(crate) fn num_elements(&self) -> Option { + let num_elements_returned = unsafe { clang_getNumElements(self.x) }; + if num_elements_returned == -1 { + None + } else { + Some(num_elements_returned as usize) + } + } + + /// Get the canonical version of this type. This sees through `typedef`s and + /// aliases to get the underlying, canonical type. + pub(crate) fn canonical_type(&self) -> Type { + unsafe { + Type { + x: clang_getCanonicalType(self.x), + } + } + } + + /// Is this type a variadic function type? + pub(crate) fn is_variadic(&self) -> bool { + unsafe { clang_isFunctionTypeVariadic(self.x) != 0 } + } + + /// Given that this type is a function type, get the type of its return + /// value. + pub(crate) fn ret_type(&self) -> Option { + let rt = Type { + x: unsafe { clang_getResultType(self.x) }, + }; + if rt.is_valid() { + Some(rt) + } else { + None + } + } + + /// Given that this type is a function type, get its calling convention. If + /// this is not a function type, `CXCallingConv_Invalid` is returned. + pub(crate) fn call_conv(&self) -> CXCallingConv { + unsafe { clang_getFunctionTypeCallingConv(self.x) } + } + + /// For elaborated types (types which use `class`, `struct`, or `union` to + /// disambiguate types from local bindings), get the underlying type. + pub(crate) fn named(&self) -> Type { + unsafe { + Type { + x: clang_Type_getNamedType(self.x), + } + } + } + + /// For atomic types, get the underlying type. + pub(crate) fn atomic_value_type(&self) -> Type { + unsafe { + Type { + x: clang_Type_getValueType(self.x), + } + } + } + + /// Is this a valid type? + pub(crate) fn is_valid(&self) -> bool { + self.kind() != CXType_Invalid + } + + /// Is this a valid and exposed type? + pub(crate) fn is_valid_and_exposed(&self) -> bool { + self.is_valid() && self.kind() != CXType_Unexposed + } + + /// Is this type a fully instantiated template? + pub(crate) fn is_fully_instantiated_template(&self) -> bool { + // Yep, the spelling of this containing type-parameter is extremely + // nasty... But can happen in . Unfortunately I couldn't + // reduce it enough :( + self.template_args().is_some_and(|args| args.len() > 0) && + !matches!( + self.declaration().kind(), + CXCursor_ClassTemplatePartialSpecialization | + CXCursor_TypeAliasTemplateDecl | + CXCursor_TemplateTemplateParameter + ) + } + + /// Is this type an associated template type? Eg `T::Associated` in + /// this example: + /// + /// ```c++ + /// template + /// class Foo { + /// typename T::Associated member; + /// }; + /// ``` + pub(crate) fn is_associated_type(&self) -> bool { + // This is terrible :( + fn hacky_parse_associated_type>(spelling: S) -> bool { + static ASSOC_TYPE_RE: OnceLock = OnceLock::new(); + ASSOC_TYPE_RE + .get_or_init(|| { + regex::Regex::new(r"typename type\-parameter\-\d+\-\d+::.+") + .unwrap() + }) + .is_match(spelling.as_ref()) + } + + self.kind() == CXType_Unexposed && + (hacky_parse_associated_type(self.spelling()) || + hacky_parse_associated_type( + self.canonical_type().spelling(), + )) + } +} + +/// The `CanonicalTypeDeclaration` type exists as proof-by-construction that its +/// cursor is the canonical declaration for its type. If you have a +/// `CanonicalTypeDeclaration` instance, you know for sure that the type and +/// cursor match up in a canonical declaration relationship, and it simply +/// cannot be otherwise. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct CanonicalTypeDeclaration(Type, Cursor); + +impl CanonicalTypeDeclaration { + /// Get the type. + pub(crate) fn ty(&self) -> &Type { + &self.0 + } + + /// Get the type's canonical declaration cursor. + pub(crate) fn cursor(&self) -> &Cursor { + &self.1 + } +} + +/// An iterator for a type's template arguments. +pub(crate) struct TypeTemplateArgIterator { + x: CXType, + length: u32, + index: u32, +} + +impl Iterator for TypeTemplateArgIterator { + type Item = Type; + fn next(&mut self) -> Option { + if self.index < self.length { + let idx = self.index as c_uint; + self.index += 1; + Some(Type { + x: unsafe { clang_Type_getTemplateArgumentAsType(self.x, idx) }, + }) + } else { + None + } + } +} + +impl ExactSizeIterator for TypeTemplateArgIterator { + fn len(&self) -> usize { + assert!(self.index <= self.length); + (self.length - self.index) as usize + } +} + +/// A `SourceLocation` is a file, line, column, and byte offset location for +/// some source text. +pub(crate) struct SourceLocation { + x: CXSourceLocation, +} + +impl SourceLocation { + /// Get the (file, line, column, byte offset) tuple for this source + /// location. + pub(crate) fn location(&self) -> (File, usize, usize, usize) { + unsafe { + let mut file = mem::zeroed(); + let mut line = 0; + let mut col = 0; + let mut off = 0; + clang_getFileLocation( + self.x, &mut file, &mut line, &mut col, &mut off, + ); + (File { x: file }, line as usize, col as usize, off as usize) + } + } +} + +impl fmt::Display for SourceLocation { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let (file, line, col, _) = self.location(); + if let Some(name) = file.name() { + write!(f, "{name}:{line}:{col}") + } else { + "builtin definitions".fmt(f) + } + } +} + +impl fmt::Debug for SourceLocation { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{self}") + } +} + +/// A comment in the source text. +/// +/// Comments are sort of parsed by Clang, and have a tree structure. +pub(crate) struct Comment { + x: CXComment, +} + +impl Comment { + /// What kind of comment is this? + pub(crate) fn kind(&self) -> CXCommentKind { + unsafe { clang_Comment_getKind(self.x) } + } + + /// Get this comment's children comment + pub(crate) fn get_children(&self) -> CommentChildrenIterator { + CommentChildrenIterator { + parent: self.x, + length: unsafe { clang_Comment_getNumChildren(self.x) }, + index: 0, + } + } + + /// Given that this comment is the start or end of an HTML tag, get its tag + /// name. + pub(crate) fn get_tag_name(&self) -> String { + unsafe { cxstring_into_string(clang_HTMLTagComment_getTagName(self.x)) } + } + + /// Given that this comment is an HTML start tag, get its attributes. + pub(crate) fn get_tag_attrs(&self) -> CommentAttributesIterator { + CommentAttributesIterator { + x: self.x, + length: unsafe { clang_HTMLStartTag_getNumAttrs(self.x) }, + index: 0, + } + } +} + +/// An iterator for a comment's children +pub(crate) struct CommentChildrenIterator { + parent: CXComment, + length: c_uint, + index: c_uint, +} + +impl Iterator for CommentChildrenIterator { + type Item = Comment; + fn next(&mut self) -> Option { + if self.index < self.length { + let idx = self.index; + self.index += 1; + Some(Comment { + x: unsafe { clang_Comment_getChild(self.parent, idx) }, + }) + } else { + None + } + } +} + +/// An HTML start tag comment attribute +pub(crate) struct CommentAttribute { + /// HTML start tag attribute name + pub(crate) name: String, + /// HTML start tag attribute value + pub(crate) value: String, +} + +/// An iterator for a comment's attributes +pub(crate) struct CommentAttributesIterator { + x: CXComment, + length: c_uint, + index: c_uint, +} + +impl Iterator for CommentAttributesIterator { + type Item = CommentAttribute; + fn next(&mut self) -> Option { + if self.index < self.length { + let idx = self.index; + self.index += 1; + Some(CommentAttribute { + name: unsafe { + cxstring_into_string(clang_HTMLStartTag_getAttrName( + self.x, idx, + )) + }, + value: unsafe { + cxstring_into_string(clang_HTMLStartTag_getAttrValue( + self.x, idx, + )) + }, + }) + } else { + None + } + } +} + +/// A source file. +pub(crate) struct File { + x: CXFile, +} + +impl File { + /// Get the name of this source file. + pub(crate) fn name(&self) -> Option { + if self.x.is_null() { + return None; + } + Some(unsafe { cxstring_into_string(clang_getFileName(self.x)) }) + } +} + +fn cxstring_to_string_leaky(s: CXString) -> String { + if s.data.is_null() { + return String::new(); + } + let c_str = unsafe { CStr::from_ptr(clang_getCString(s)) }; + c_str.to_string_lossy().into_owned() +} + +fn cxstring_into_string(s: CXString) -> String { + let ret = cxstring_to_string_leaky(s); + unsafe { clang_disposeString(s) }; + ret +} + +/// An `Index` is an environment for a set of translation units that will +/// typically end up linked together in one final binary. +pub(crate) struct Index { + x: CXIndex, +} + +impl Index { + /// Construct a new `Index`. + /// + /// The `pch` parameter controls whether declarations in pre-compiled + /// headers are included when enumerating a translation unit's "locals". + /// + /// The `diag` parameter controls whether debugging diagnostics are enabled. + pub(crate) fn new(pch: bool, diag: bool) -> Index { + unsafe { + Index { + x: clang_createIndex(c_int::from(pch), c_int::from(diag)), + } + } + } +} + +impl fmt::Debug for Index { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "Index {{ }}") + } +} + +impl Drop for Index { + fn drop(&mut self) { + unsafe { + clang_disposeIndex(self.x); + } + } +} + +/// A translation unit (or "compilation unit"). +pub(crate) struct TranslationUnit { + x: CXTranslationUnit, +} + +impl fmt::Debug for TranslationUnit { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "TranslationUnit {{ }}") + } +} + +impl TranslationUnit { + /// Parse a source file into a translation unit. + pub(crate) fn parse( + ix: &Index, + file: &str, + cmd_args: &[Box], + unsaved: &[UnsavedFile], + opts: CXTranslationUnit_Flags, + ) -> Option { + let fname = CString::new(file).unwrap(); + let _c_args: Vec = cmd_args + .iter() + .map(|s| CString::new(s.as_bytes()).unwrap()) + .collect(); + let c_args: Vec<*const c_char> = + _c_args.iter().map(|s| s.as_ptr()).collect(); + let mut c_unsaved: Vec = + unsaved.iter().map(|f| f.x).collect(); + let tu = unsafe { + clang_parseTranslationUnit( + ix.x, + fname.as_ptr(), + c_args.as_ptr(), + c_args.len() as c_int, + c_unsaved.as_mut_ptr(), + c_unsaved.len() as c_uint, + opts, + ) + }; + if tu.is_null() { + None + } else { + Some(TranslationUnit { x: tu }) + } + } + + /// Get the Clang diagnostic information associated with this translation + /// unit. + pub(crate) fn diags(&self) -> Vec { + unsafe { + let num = clang_getNumDiagnostics(self.x) as usize; + let mut diags = vec![]; + for i in 0..num { + diags.push(Diagnostic { + x: clang_getDiagnostic(self.x, i as c_uint), + }); + } + diags + } + } + + /// Get a cursor pointing to the root of this translation unit's AST. + pub(crate) fn cursor(&self) -> Cursor { + unsafe { + Cursor { + x: clang_getTranslationUnitCursor(self.x), + } + } + } + + /// Save a translation unit to the given file. + pub(crate) fn save(&mut self, file: &str) -> Result<(), CXSaveError> { + let Ok(file) = CString::new(file) else { + return Err(CXSaveError_Unknown); + }; + let ret = unsafe { + clang_saveTranslationUnit( + self.x, + file.as_ptr(), + clang_defaultSaveOptions(self.x), + ) + }; + if ret != 0 { + Err(ret) + } else { + Ok(()) + } + } + + /// Is this the null translation unit? + pub(crate) fn is_null(&self) -> bool { + self.x.is_null() + } +} + +impl Drop for TranslationUnit { + fn drop(&mut self) { + unsafe { + clang_disposeTranslationUnit(self.x); + } + } +} + +/// Translation unit used for macro fallback parsing +pub(crate) struct FallbackTranslationUnit { + file_path: String, + pch_path: String, + idx: Box, + tu: TranslationUnit, +} + +impl fmt::Debug for FallbackTranslationUnit { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "FallbackTranslationUnit {{ }}") + } +} + +impl FallbackTranslationUnit { + /// Create a new fallback translation unit + pub(crate) fn new( + file: String, + pch_path: String, + c_args: &[Box], + ) -> Option { + // Create empty file + OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(&file) + .ok()?; + + let f_index = Box::new(Index::new(true, false)); + let f_translation_unit = TranslationUnit::parse( + &f_index, + &file, + c_args, + &[], + CXTranslationUnit_None, + )?; + Some(FallbackTranslationUnit { + file_path: file, + pch_path, + tu: f_translation_unit, + idx: f_index, + }) + } + + /// Get reference to underlying translation unit. + pub(crate) fn translation_unit(&self) -> &TranslationUnit { + &self.tu + } + + /// Reparse a translation unit. + pub(crate) fn reparse( + &mut self, + unsaved_contents: &str, + ) -> Result<(), CXErrorCode> { + let unsaved = &[UnsavedFile::new(&self.file_path, unsaved_contents)]; + let mut c_unsaved: Vec = + unsaved.iter().map(|f| f.x).collect(); + let ret = unsafe { + clang_reparseTranslationUnit( + self.tu.x, + unsaved.len() as c_uint, + c_unsaved.as_mut_ptr(), + clang_defaultReparseOptions(self.tu.x), + ) + }; + if ret != 0 { + Err(ret) + } else { + Ok(()) + } + } +} + +impl Drop for FallbackTranslationUnit { + fn drop(&mut self) { + let _ = std::fs::remove_file(&self.file_path); + let _ = std::fs::remove_file(&self.pch_path); + } +} + +/// A diagnostic message generated while parsing a translation unit. +pub(crate) struct Diagnostic { + x: CXDiagnostic, +} + +impl Diagnostic { + /// Format this diagnostic message as a string, using the given option bit + /// flags. + pub(crate) fn format(&self) -> String { + unsafe { + let opts = clang_defaultDiagnosticDisplayOptions(); + cxstring_into_string(clang_formatDiagnostic(self.x, opts)) + } + } + + /// What is the severity of this diagnostic message? + pub(crate) fn severity(&self) -> CXDiagnosticSeverity { + unsafe { clang_getDiagnosticSeverity(self.x) } + } +} + +impl Drop for Diagnostic { + /// Destroy this diagnostic message. + fn drop(&mut self) { + unsafe { + clang_disposeDiagnostic(self.x); + } + } +} + +/// A file which has not been saved to disk. +pub(crate) struct UnsavedFile { + x: CXUnsavedFile, + /// The name of the unsaved file. Kept here to avoid leaving dangling pointers in + /// `CXUnsavedFile`. + pub(crate) name: CString, + contents: CString, +} + +impl UnsavedFile { + /// Construct a new unsaved file with the given `name` and `contents`. + pub(crate) fn new(name: &str, contents: &str) -> UnsavedFile { + let name = CString::new(name.as_bytes()).unwrap(); + let contents = CString::new(contents.as_bytes()).unwrap(); + let x = CXUnsavedFile { + Filename: name.as_ptr(), + Contents: contents.as_ptr(), + Length: contents.as_bytes().len() as c_ulong, + }; + UnsavedFile { x, name, contents } + } +} + +impl fmt::Debug for UnsavedFile { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!( + fmt, + "UnsavedFile(name: {:?}, contents: {:?})", + self.name, self.contents + ) + } +} + +/// Convert a cursor kind into a static string. +pub(crate) fn kind_to_str(x: CXCursorKind) -> String { + unsafe { cxstring_into_string(clang_getCursorKindSpelling(x)) } +} + +/// Convert a type kind to a static string. +pub(crate) fn type_to_str(x: CXTypeKind) -> String { + unsafe { cxstring_into_string(clang_getTypeKindSpelling(x)) } +} + +/// Dump the Clang AST to stdout for debugging purposes. +pub(crate) fn ast_dump(c: &Cursor, depth: isize) -> CXChildVisitResult { + fn print_indent>(depth: isize, s: S) { + for _ in 0..depth { + print!(" "); + } + println!("{}", s.as_ref()); + } + + fn print_cursor>(depth: isize, prefix: S, c: &Cursor) { + let prefix = prefix.as_ref(); + print_indent( + depth, + format!(" {prefix}kind = {}", kind_to_str(c.kind())), + ); + print_indent( + depth, + format!(" {prefix}spelling = \"{}\"", c.spelling()), + ); + print_indent(depth, format!(" {prefix}location = {}", c.location())); + print_indent( + depth, + format!(" {prefix}is-definition? {}", c.is_definition()), + ); + print_indent( + depth, + format!(" {prefix}is-declaration? {}", c.is_declaration()), + ); + print_indent( + depth, + format!( + " {prefix}is-inlined-function? {}", + c.is_inlined_function() + ), + ); + + let templ_kind = c.template_kind(); + if templ_kind != CXCursor_NoDeclFound { + print_indent( + depth, + format!(" {prefix}template-kind = {}", kind_to_str(templ_kind)), + ); + } + if let Some(usr) = c.usr() { + print_indent(depth, format!(" {prefix}usr = \"{usr}\"")); + } + if let Ok(num) = c.num_args() { + print_indent(depth, format!(" {prefix}number-of-args = {num}")); + } + if let Some(num) = c.num_template_args() { + print_indent( + depth, + format!(" {prefix}number-of-template-args = {num}"), + ); + } + + if c.is_bit_field() { + let width = match c.bit_width() { + Some(w) => w.to_string(), + None => "".to_string(), + }; + print_indent(depth, format!(" {prefix}bit-width = {width}")); + } + + if let Some(ty) = c.enum_type() { + print_indent( + depth, + format!(" {prefix}enum-type = {}", type_to_str(ty.kind())), + ); + } + if let Some(val) = c.enum_val_signed() { + print_indent(depth, format!(" {prefix}enum-val = {val}")); + } + if let Some(ty) = c.typedef_type() { + print_indent( + depth, + format!(" {prefix}typedef-type = {}", type_to_str(ty.kind())), + ); + } + if let Some(ty) = c.ret_type() { + print_indent( + depth, + format!(" {prefix}ret-type = {}", type_to_str(ty.kind())), + ); + } + + if let Some(refd) = c.referenced() { + if refd != *c { + println!(); + print_cursor( + depth, + String::from(prefix) + "referenced.", + &refd, + ); + } + } + + let canonical = c.canonical(); + if canonical != *c { + println!(); + print_cursor( + depth, + String::from(prefix) + "canonical.", + &canonical, + ); + } + + if let Some(specialized) = c.specialized() { + if specialized != *c { + println!(); + print_cursor( + depth, + String::from(prefix) + "specialized.", + &specialized, + ); + } + } + + if let Some(parent) = c.fallible_semantic_parent() { + println!(); + print_cursor( + depth, + String::from(prefix) + "semantic-parent.", + &parent, + ); + } + } + + fn print_type>(depth: isize, prefix: S, ty: &Type) { + let prefix = prefix.as_ref(); + + let kind = ty.kind(); + print_indent(depth, format!(" {prefix}kind = {}", type_to_str(kind))); + if kind == CXType_Invalid { + return; + } + + print_indent(depth, format!(" {prefix}cconv = {}", ty.call_conv())); + + print_indent( + depth, + format!(" {prefix}spelling = \"{}\"", ty.spelling()), + ); + let num_template_args = + unsafe { clang_Type_getNumTemplateArguments(ty.x) }; + if num_template_args >= 0 { + print_indent( + depth, + format!( + " {prefix}number-of-template-args = {num_template_args}" + ), + ); + } + if let Some(num) = ty.num_elements() { + print_indent(depth, format!(" {prefix}number-of-elements = {num}")); + } + print_indent( + depth, + format!(" {prefix}is-variadic? {}", ty.is_variadic()), + ); + + let canonical = ty.canonical_type(); + if canonical != *ty { + println!(); + print_type(depth, String::from(prefix) + "canonical.", &canonical); + } + + if let Some(pointee) = ty.pointee_type() { + if pointee != *ty { + println!(); + print_type(depth, String::from(prefix) + "pointee.", &pointee); + } + } + + if let Some(elem) = ty.elem_type() { + if elem != *ty { + println!(); + print_type(depth, String::from(prefix) + "elements.", &elem); + } + } + + if let Some(ret) = ty.ret_type() { + if ret != *ty { + println!(); + print_type(depth, String::from(prefix) + "return.", &ret); + } + } + + let named = ty.named(); + if named != *ty && named.is_valid() { + println!(); + print_type(depth, String::from(prefix) + "named.", &named); + } + } + + print_indent(depth, "("); + print_cursor(depth, "", c); + + println!(); + let ty = c.cur_type(); + print_type(depth, "type.", &ty); + + let declaration = ty.declaration(); + if declaration != *c && declaration.kind() != CXCursor_NoDeclFound { + println!(); + print_cursor(depth, "type.declaration.", &declaration); + } + + // Recurse. + let mut found_children = false; + c.visit(|s| { + if !found_children { + println!(); + found_children = true; + } + ast_dump(&s, depth + 1) + }); + + print_indent(depth, ")"); + + CXChildVisit_Continue +} + +/// Try to extract the clang version to a string +pub(crate) fn extract_clang_version() -> String { + unsafe { cxstring_into_string(clang_getClangVersion()) } +} + +/// A wrapper for the result of evaluating an expression. +#[derive(Debug)] +pub(crate) struct EvalResult { + x: CXEvalResult, + ty: Type, +} + +impl EvalResult { + /// Evaluate `cursor` and return the result. + pub(crate) fn new(cursor: Cursor) -> Option { + // Work around https://bugs.llvm.org/show_bug.cgi?id=42532, see: + // * https://github.com/rust-lang/rust-bindgen/issues/283 + // * https://github.com/rust-lang/rust-bindgen/issues/1590 + { + let mut found_cant_eval = false; + cursor.visit(|c| { + if c.kind() == CXCursor_TypeRef && + c.cur_type().canonical_type().kind() == CXType_Unexposed + { + found_cant_eval = true; + return CXChildVisit_Break; + } + + CXChildVisit_Recurse + }); + + if found_cant_eval { + return None; + } + } + Some(EvalResult { + x: unsafe { clang_Cursor_Evaluate(cursor.x) }, + ty: cursor.cur_type().canonical_type(), + }) + } + + fn kind(&self) -> CXEvalResultKind { + unsafe { clang_EvalResult_getKind(self.x) } + } + + /// Try to get back the result as a double. + pub(crate) fn as_double(&self) -> Option { + match self.kind() { + CXEval_Float => { + Some(unsafe { clang_EvalResult_getAsDouble(self.x) }) + } + _ => None, + } + } + + /// Try to get back the result as an integer. + pub(crate) fn as_int(&self) -> Option { + if self.kind() != CXEval_Int { + return None; + } + + if unsafe { clang_EvalResult_isUnsignedInt(self.x) } != 0 { + let value = unsafe { clang_EvalResult_getAsUnsigned(self.x) }; + if value > i64::MAX as c_ulonglong { + return None; + } + + return Some(value as i64); + } + + let value = unsafe { clang_EvalResult_getAsLongLong(self.x) }; + if value > i64::MAX as c_longlong { + return None; + } + if value < i64::MIN as c_longlong { + return None; + } + #[allow(clippy::unnecessary_cast)] + Some(value as i64) + } + + /// Evaluates the expression as a literal string, that may or may not be + /// valid utf-8. + pub(crate) fn as_literal_string(&self) -> Option> { + if self.kind() != CXEval_StrLiteral { + return None; + } + + let char_ty = self.ty.pointee_type().or_else(|| self.ty.elem_type())?; + match char_ty.kind() { + CXType_Char_S | CXType_SChar | CXType_Char_U | CXType_UChar => { + let ret = unsafe { + CStr::from_ptr(clang_EvalResult_getAsStr(self.x)) + }; + Some(ret.to_bytes().to_vec()) + } + // FIXME: Support generating these. + CXType_Char16 => None, + CXType_Char32 => None, + CXType_WChar => None, + _ => None, + } + } +} + +impl Drop for EvalResult { + fn drop(&mut self) { + unsafe { clang_EvalResult_dispose(self.x) }; + } +} +/// ABI kinds as defined in +/// +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +pub(crate) enum ABIKind { + /// All the regular targets like Linux, Mac, WASM, etc. implement the Itanium ABI + GenericItanium, + /// The ABI used when compiling for the MSVC target + Microsoft, +} + +/// Target information obtained from libclang. +#[derive(Debug)] +pub(crate) struct TargetInfo { + /// The target triple. + pub(crate) triple: String, + /// The width of the pointer _in bits_. + pub(crate) pointer_width: usize, + /// The ABI of the target + pub(crate) abi: ABIKind, +} + +impl TargetInfo { + /// Tries to obtain target information from libclang. + pub(crate) fn new(tu: &TranslationUnit) -> Self { + let triple; + let pointer_width; + unsafe { + let ti = clang_getTranslationUnitTargetInfo(tu.x); + triple = cxstring_into_string(clang_TargetInfo_getTriple(ti)); + pointer_width = clang_TargetInfo_getPointerWidth(ti); + clang_TargetInfo_dispose(ti); + } + assert!(pointer_width > 0); + assert_eq!(pointer_width % 8, 0); + + let abi = if triple.contains("msvc") { + ABIKind::Microsoft + } else { + ABIKind::GenericItanium + }; + + TargetInfo { + triple, + pointer_width: pointer_width as usize, + abi, + } + } +} diff --git a/vendor/bindgen/codegen/bitfield_unit.rs b/vendor/bindgen/codegen/bitfield_unit.rs new file mode 100644 index 00000000000000..59c66f8cb733b4 --- /dev/null +++ b/vendor/bindgen/codegen/bitfield_unit.rs @@ -0,0 +1,112 @@ +#[repr(C)] +#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct __BindgenBitfieldUnit { + storage: Storage, +} + +impl __BindgenBitfieldUnit { + #[inline] + pub const fn new(storage: Storage) -> Self { + Self { storage } + } +} + +impl __BindgenBitfieldUnit +where + Storage: AsRef<[u8]> + AsMut<[u8]>, +{ + #[inline] + fn extract_bit(byte: u8, index: usize) -> bool { + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + + let mask = 1 << bit_index; + + byte & mask == mask + } + + #[inline] + pub fn get_bit(&self, index: usize) -> bool { + debug_assert!(index / 8 < self.storage.as_ref().len()); + + let byte_index = index / 8; + let byte = self.storage.as_ref()[byte_index]; + + Self::extract_bit(byte, index) + } + + #[inline] + fn change_bit(byte: u8, index: usize, val: bool) -> u8 { + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + + let mask = 1 << bit_index; + if val { + byte | mask + } else { + byte & !mask + } + } + + #[inline] + pub fn set_bit(&mut self, index: usize, val: bool) { + debug_assert!(index / 8 < self.storage.as_ref().len()); + + let byte_index = index / 8; + let byte = &mut self.storage.as_mut()[byte_index]; + + *byte = Self::change_bit(*byte, index, val); + } + + #[inline] + pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!( + (bit_offset + (bit_width as usize)) / 8 <= + self.storage.as_ref().len() + ); + + let mut val = 0; + + for i in 0..(bit_width as usize) { + if self.get_bit(i + bit_offset) { + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + val |= 1 << index; + } + } + + val + } + + #[inline] + pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!( + (bit_offset + (bit_width as usize)) / 8 <= + self.storage.as_ref().len() + ); + + for i in 0..(bit_width as usize) { + let mask = 1 << i; + let val_bit_is_set = val & mask == mask; + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + self.set_bit(index + bit_offset, val_bit_is_set); + } + } +} diff --git a/vendor/bindgen/codegen/bitfield_unit_raw_ref_macros.rs b/vendor/bindgen/codegen/bitfield_unit_raw_ref_macros.rs new file mode 100644 index 00000000000000..0c864c7369ebe5 --- /dev/null +++ b/vendor/bindgen/codegen/bitfield_unit_raw_ref_macros.rs @@ -0,0 +1,191 @@ +#[repr(C)] +#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct __BindgenBitfieldUnit { + storage: Storage, +} + +impl __BindgenBitfieldUnit { + #[inline] + pub const fn new(storage: Storage) -> Self { + Self { storage } + } +} + +impl __BindgenBitfieldUnit +where + Storage: AsRef<[u8]> + AsMut<[u8]>, +{ + #[inline] + fn extract_bit(byte: u8, index: usize) -> bool { + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + + let mask = 1 << bit_index; + + byte & mask == mask + } + + #[inline] + pub fn get_bit(&self, index: usize) -> bool { + debug_assert!(index / 8 < self.storage.as_ref().len()); + + let byte_index = index / 8; + let byte = self.storage.as_ref()[byte_index]; + + Self::extract_bit(byte, index) + } + + #[inline] + pub unsafe fn raw_get_bit(this: *const Self, index: usize) -> bool { + debug_assert!(index / 8 < core::mem::size_of::()); + + let byte_index = index / 8; + let byte = unsafe { *(core::ptr::addr_of!((*this).storage) as *const u8) + .offset(byte_index as isize) }; + + Self::extract_bit(byte, index) + } + + #[inline] + fn change_bit(byte: u8, index: usize, val: bool) -> u8 { + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + + let mask = 1 << bit_index; + if val { + byte | mask + } else { + byte & !mask + } + } + + #[inline] + pub fn set_bit(&mut self, index: usize, val: bool) { + debug_assert!(index / 8 < self.storage.as_ref().len()); + + let byte_index = index / 8; + let byte = &mut self.storage.as_mut()[byte_index]; + + *byte = Self::change_bit(*byte, index, val); + } + + #[inline] + pub unsafe fn raw_set_bit(this: *mut Self, index: usize, val: bool) { + debug_assert!(index / 8 < core::mem::size_of::()); + + let byte_index = index / 8; + let byte = unsafe { + (core::ptr::addr_of_mut!((*this).storage) as *mut u8) + .offset(byte_index as isize) + }; + + unsafe { *byte = Self::change_bit(*byte, index, val) }; + } + + #[inline] + pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!( + (bit_offset + (bit_width as usize)) / 8 <= + self.storage.as_ref().len() + ); + + let mut val = 0; + + for i in 0..(bit_width as usize) { + if self.get_bit(i + bit_offset) { + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + val |= 1 << index; + } + } + + val + } + + #[inline] + pub unsafe fn raw_get( + this: *const Self, + bit_offset: usize, + bit_width: u8, + ) -> u64 { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < core::mem::size_of::()); + debug_assert!( + (bit_offset + (bit_width as usize)) / 8 <= + core::mem::size_of::() + ); + + let mut val = 0; + + for i in 0..(bit_width as usize) { + if unsafe { Self::raw_get_bit(this, i + bit_offset) } { + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + val |= 1 << index; + } + } + + val + } + + #[inline] + pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!( + (bit_offset + (bit_width as usize)) / 8 <= + self.storage.as_ref().len() + ); + + for i in 0..(bit_width as usize) { + let mask = 1 << i; + let val_bit_is_set = val & mask == mask; + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + self.set_bit(index + bit_offset, val_bit_is_set); + } + } + + #[inline] + pub unsafe fn raw_set( + this: *mut Self, + bit_offset: usize, + bit_width: u8, + val: u64, + ) { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < core::mem::size_of::()); + debug_assert!( + (bit_offset + (bit_width as usize)) / 8 <= + core::mem::size_of::() + ); + + for i in 0..(bit_width as usize) { + let mask = 1 << i; + let val_bit_is_set = val & mask == mask; + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + } + } +} diff --git a/vendor/bindgen/codegen/bitfield_unit_tests.rs b/vendor/bindgen/codegen/bitfield_unit_tests.rs new file mode 100644 index 00000000000000..ead0ffec0c2d76 --- /dev/null +++ b/vendor/bindgen/codegen/bitfield_unit_tests.rs @@ -0,0 +1,260 @@ +//! Tests for `__BindgenBitfieldUnit`. +//! +//! Note that bit-fields are allocated right to left (least to most significant +//! bits). +//! +//! From the x86 PS ABI: +//! +//! ```c +//! struct { +//! int j : 5; +//! int k : 6; +//! int m : 7; +//! }; +//! ``` +//! +//! ```ignore +//! +------------------------------------------------------------+ +//! | | | | | +//! | padding | m | k | j | +//! |31 18|17 11|10 5|4 0| +//! +------------------------------------------------------------+ +//! ``` + +use super::bitfield_unit::__BindgenBitfieldUnit; + +#[test] +fn bitfield_unit_get_bit() { + let unit = __BindgenBitfieldUnit::<[u8; 2]>::new([0b10011101, 0b00011101]); + + let mut bits = vec![]; + for i in 0..16 { + bits.push(unit.get_bit(i)); + } + + println!(); + println!("bits = {bits:?}"); + assert_eq!( + bits, + &[ + // 0b10011101 + true, false, true, true, true, false, false, true, + // 0b00011101 + true, false, true, true, true, false, false, false + ] + ); +} + +#[test] +fn bitfield_unit_set_bit() { + let mut unit = + __BindgenBitfieldUnit::<[u8; 2]>::new([0b00000000, 0b00000000]); + + for i in 0..16 { + if i % 3 == 0 { + unit.set_bit(i, true); + } + } + + for i in 0..16 { + assert_eq!(unit.get_bit(i), i % 3 == 0); + } + + let mut unit = + __BindgenBitfieldUnit::<[u8; 2]>::new([0b11111111, 0b11111111]); + + for i in 0..16 { + if i % 3 == 0 { + unit.set_bit(i, false); + } + } + + for i in 0..16 { + assert_eq!(unit.get_bit(i), i % 3 != 0); + } +} + +macro_rules! bitfield_unit_get { + ( + $( + With $storage:expr , then get($start:expr, $len:expr) is $expected:expr; + )* + ) => { + #[test] + fn bitfield_unit_get() { + $({ + let expected = $expected; + let unit = __BindgenBitfieldUnit::<_>::new($storage); + let actual = unit.get($start, $len); + + println!(); + println!("expected = {expected:064b}"); + println!("actual = {actual:064b}"); + + assert_eq!(expected, actual); + })* + } + } +} + +bitfield_unit_get! { + // Let's just exhaustively test getting the bits from a single byte, since + // there are few enough combinations... + + With [0b11100010], then get(0, 1) is 0; + With [0b11100010], then get(1, 1) is 1; + With [0b11100010], then get(2, 1) is 0; + With [0b11100010], then get(3, 1) is 0; + With [0b11100010], then get(4, 1) is 0; + With [0b11100010], then get(5, 1) is 1; + With [0b11100010], then get(6, 1) is 1; + With [0b11100010], then get(7, 1) is 1; + + With [0b11100010], then get(0, 2) is 0b10; + With [0b11100010], then get(1, 2) is 0b01; + With [0b11100010], then get(2, 2) is 0b00; + With [0b11100010], then get(3, 2) is 0b00; + With [0b11100010], then get(4, 2) is 0b10; + With [0b11100010], then get(5, 2) is 0b11; + With [0b11100010], then get(6, 2) is 0b11; + + With [0b11100010], then get(0, 3) is 0b010; + With [0b11100010], then get(1, 3) is 0b001; + With [0b11100010], then get(2, 3) is 0b000; + With [0b11100010], then get(3, 3) is 0b100; + With [0b11100010], then get(4, 3) is 0b110; + With [0b11100010], then get(5, 3) is 0b111; + + With [0b11100010], then get(0, 4) is 0b0010; + With [0b11100010], then get(1, 4) is 0b0001; + With [0b11100010], then get(2, 4) is 0b1000; + With [0b11100010], then get(3, 4) is 0b1100; + With [0b11100010], then get(4, 4) is 0b1110; + + With [0b11100010], then get(0, 5) is 0b00010; + With [0b11100010], then get(1, 5) is 0b10001; + With [0b11100010], then get(2, 5) is 0b11000; + With [0b11100010], then get(3, 5) is 0b11100; + + With [0b11100010], then get(0, 6) is 0b100010; + With [0b11100010], then get(1, 6) is 0b110001; + With [0b11100010], then get(2, 6) is 0b111000; + + With [0b11100010], then get(0, 7) is 0b1100010; + With [0b11100010], then get(1, 7) is 0b1110001; + + With [0b11100010], then get(0, 8) is 0b11100010; + + // OK. Now let's test getting bits from across byte boundaries. + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(0, 16) is 0b1111111101010101; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(1, 16) is 0b0111111110101010; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(2, 16) is 0b0011111111010101; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(3, 16) is 0b0001111111101010; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(4, 16) is 0b0000111111110101; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(5, 16) is 0b0000011111111010; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(6, 16) is 0b0000001111111101; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(7, 16) is 0b0000000111111110; + + With [0b01010101, 0b11111111, 0b00000000, 0b11111111], + then get(8, 16) is 0b0000000011111111; +} + +macro_rules! bitfield_unit_set { + ( + $( + set($start:expr, $len:expr, $val:expr) is $expected:expr; + )* + ) => { + #[test] + fn bitfield_unit_set() { + $( + let mut unit = __BindgenBitfieldUnit::<[u8; 4]>::new([0, 0, 0, 0]); + unit.set($start, $len, $val); + let actual = unit.get(0, 32); + + println!(); + println!("set({}, {}, {:032b}", $start, $len, $val); + println!("expected = {:064b}", $expected); + println!("actual = {actual:064b}"); + + assert_eq!($expected, actual); + )* + } + } +} + +bitfield_unit_set! { + // Once again, let's exhaustively test single byte combinations. + + set(0, 1, 0b11111111) is 0b00000001; + set(1, 1, 0b11111111) is 0b00000010; + set(2, 1, 0b11111111) is 0b00000100; + set(3, 1, 0b11111111) is 0b00001000; + set(4, 1, 0b11111111) is 0b00010000; + set(5, 1, 0b11111111) is 0b00100000; + set(6, 1, 0b11111111) is 0b01000000; + set(7, 1, 0b11111111) is 0b10000000; + + set(0, 2, 0b11111111) is 0b00000011; + set(1, 2, 0b11111111) is 0b00000110; + set(2, 2, 0b11111111) is 0b00001100; + set(3, 2, 0b11111111) is 0b00011000; + set(4, 2, 0b11111111) is 0b00110000; + set(5, 2, 0b11111111) is 0b01100000; + set(6, 2, 0b11111111) is 0b11000000; + + set(0, 3, 0b11111111) is 0b00000111; + set(1, 3, 0b11111111) is 0b00001110; + set(2, 3, 0b11111111) is 0b00011100; + set(3, 3, 0b11111111) is 0b00111000; + set(4, 3, 0b11111111) is 0b01110000; + set(5, 3, 0b11111111) is 0b11100000; + + set(0, 4, 0b11111111) is 0b00001111; + set(1, 4, 0b11111111) is 0b00011110; + set(2, 4, 0b11111111) is 0b00111100; + set(3, 4, 0b11111111) is 0b01111000; + set(4, 4, 0b11111111) is 0b11110000; + + set(0, 5, 0b11111111) is 0b00011111; + set(1, 5, 0b11111111) is 0b00111110; + set(2, 5, 0b11111111) is 0b01111100; + set(3, 5, 0b11111111) is 0b11111000; + + set(0, 6, 0b11111111) is 0b00111111; + set(1, 6, 0b11111111) is 0b01111110; + set(2, 6, 0b11111111) is 0b11111100; + + set(0, 7, 0b11111111) is 0b01111111; + set(1, 7, 0b11111111) is 0b11111110; + + set(0, 8, 0b11111111) is 0b11111111; + + // And, now let's cross byte boundaries. + + set(0, 16, 0b1111111111111111) is 0b00000000000000001111111111111111; + set(1, 16, 0b1111111111111111) is 0b00000000000000011111111111111110; + set(2, 16, 0b1111111111111111) is 0b00000000000000111111111111111100; + set(3, 16, 0b1111111111111111) is 0b00000000000001111111111111111000; + set(4, 16, 0b1111111111111111) is 0b00000000000011111111111111110000; + set(5, 16, 0b1111111111111111) is 0b00000000000111111111111111100000; + set(6, 16, 0b1111111111111111) is 0b00000000001111111111111111000000; + set(7, 16, 0b1111111111111111) is 0b00000000011111111111111110000000; + set(8, 16, 0b1111111111111111) is 0b00000000111111111111111100000000; +} diff --git a/vendor/bindgen/codegen/dyngen.rs b/vendor/bindgen/codegen/dyngen.rs new file mode 100644 index 00000000000000..76f3805795326a --- /dev/null +++ b/vendor/bindgen/codegen/dyngen.rs @@ -0,0 +1,258 @@ +use crate::codegen; +use crate::ir::context::BindgenContext; +use crate::ir::function::ClangAbi; +use proc_macro2::{Ident, TokenStream}; + +/// Used to build the output tokens for dynamic bindings. +#[derive(Default)] +pub(crate) struct DynamicItems { + /// Tracks the tokens that will appears inside the library struct -- e.g.: + /// ```ignore + /// struct Lib { + /// __library: ::libloading::Library, + /// pub x: Result, // <- tracks these + /// ... + /// } + /// ``` + struct_members: Vec, + + /// Tracks the tokens that will appear inside the library struct's implementation, e.g.: + /// + /// ```ignore + /// impl Lib { + /// ... + /// pub unsafe fn foo(&self, ...) { // <- tracks these + /// ... + /// } + /// } + /// ``` + struct_implementation: Vec, + + /// Tracks the initialization of the fields inside the `::new` constructor of the library + /// struct, e.g.: + /// ```ignore + /// impl Lib { + /// + /// pub unsafe fn new

(path: P) -> Result + /// where + /// P: AsRef<::std::ffi::OsStr>, + /// { + /// ... + /// let foo = __library.get(...) ...; // <- tracks these + /// ... + /// } + /// + /// ... + /// } + /// ``` + constructor_inits: Vec, + + /// Tracks the information that is passed to the library struct at the end of the `::new` + /// constructor, e.g.: + /// ```ignore + /// impl LibFoo { + /// pub unsafe fn new

(path: P) -> Result + /// where + /// P: AsRef<::std::ffi::OsStr>, + /// { + /// ... + /// Ok(LibFoo { + /// __library: __library, + /// foo, + /// bar, // <- tracks these + /// ... + /// }) + /// } + /// } + /// ``` + init_fields: Vec, +} + +impl DynamicItems { + pub(crate) fn new() -> Self { + Self::default() + } + + pub(crate) fn get_tokens( + &self, + lib_ident: &Ident, + ctx: &BindgenContext, + ) -> TokenStream { + let struct_members = &self.struct_members; + let constructor_inits = &self.constructor_inits; + let init_fields = &self.init_fields; + let struct_implementation = &self.struct_implementation; + + let library_new = if ctx.options().wrap_unsafe_ops { + quote!(unsafe { ::libloading::Library::new(path) }) + } else { + quote!(::libloading::Library::new(path)) + }; + + let from_library = if ctx.options().wrap_unsafe_ops { + quote!(unsafe { Self::from_library(library) }) + } else { + quote!(Self::from_library(library)) + }; + + quote! { + pub struct #lib_ident { + __library: ::libloading::Library, + #(#struct_members)* + } + + impl #lib_ident { + pub unsafe fn new

( + path: P + ) -> Result + where P: AsRef<::std::ffi::OsStr> { + let library = #library_new?; + #from_library + } + + pub unsafe fn from_library( + library: L + ) -> Result + where L: Into<::libloading::Library> { + let __library = library.into(); + #( #constructor_inits )* + Ok(#lib_ident { + __library, + #( #init_fields ),* + }) + } + + #( #struct_implementation )* + } + } + } + + #[allow(clippy::too_many_arguments)] + pub(crate) fn push_func( + &mut self, + ident: &Ident, + symbol: &str, + abi: ClangAbi, + is_variadic: bool, + is_required: bool, + args: &[TokenStream], + args_identifiers: &[TokenStream], + ret: &TokenStream, + ret_ty: &TokenStream, + attributes: &[TokenStream], + ctx: &BindgenContext, + ) { + if !is_variadic { + assert_eq!(args.len(), args_identifiers.len()); + } + + let signature = quote! { unsafe extern #abi fn ( #( #args),* ) #ret }; + let member = if is_required { + signature + } else { + quote! { Result<#signature, ::libloading::Error> } + }; + + self.struct_members.push(quote! { + pub #ident: #member, + }); + + // N.B: If the signature was required, it won't be wrapped in a Result<...> + // and we can simply call it directly. + let fn_ = if is_required { + quote! { self.#ident } + } else { + quote! { self.#ident.as_ref().expect("Expected function, got error.") } + }; + let call_body = if ctx.options().wrap_unsafe_ops { + quote!(unsafe { (#fn_)(#( #args_identifiers ),*) }) + } else { + quote!((#fn_)(#( #args_identifiers ),*) ) + }; + + // We can't implement variadic functions from C easily, so we allow to + // access the function pointer so that the user can call it just fine. + if !is_variadic { + self.struct_implementation.push(quote! { + #(#attributes)* + pub unsafe fn #ident ( &self, #( #args ),* ) #ret_ty { + #call_body + } + }); + } + + // N.B: Unwrap the signature upon construction if it is required to be resolved. + let symbol_cstr = + codegen::helpers::ast_ty::cstr_expr(symbol.to_string()); + let library_get = if ctx.options().wrap_unsafe_ops { + quote!(unsafe { __library.get(#symbol_cstr) }) + } else { + quote!(__library.get(#symbol_cstr)) + }; + + self.constructor_inits.push(if is_required { + quote! { + let #ident = #library_get.map(|sym| *sym)?; + } + } else { + quote! { + let #ident = #library_get.map(|sym| *sym); + } + }); + + self.init_fields.push(quote! { + #ident + }); + } + + pub fn push_var( + &mut self, + ident: &Ident, + symbol: &str, + ty: &TokenStream, + is_required: bool, + wrap_unsafe_ops: bool, + ) { + let member = if is_required { + quote! { *mut #ty } + } else { + quote! { Result<*mut #ty, ::libloading::Error> } + }; + + self.struct_members.push(quote! { + pub #ident: #member, + }); + + let deref = if is_required { + quote! { self.#ident } + } else { + quote! { *self.#ident.as_ref().expect("Expected variable, got error.") } + }; + self.struct_implementation.push(quote! { + pub unsafe fn #ident (&self) -> *mut #ty { + #deref + } + }); + + let symbol_cstr = + codegen::helpers::ast_ty::cstr_expr(symbol.to_string()); + + let library_get = if wrap_unsafe_ops { + quote!(unsafe { __library.get::<*mut #ty>(#symbol_cstr) }) + } else { + quote!(__library.get::<*mut #ty>(#symbol_cstr)) + }; + + let qmark = if is_required { quote!(?) } else { quote!() }; + + let var_get = quote! { + let #ident = #library_get.map(|sym| *sym)#qmark; + }; + + self.constructor_inits.push(var_get); + + self.init_fields.push(quote! { + #ident + }); + } +} diff --git a/vendor/bindgen/codegen/error.rs b/vendor/bindgen/codegen/error.rs new file mode 100644 index 00000000000000..b82ba2aef1c5e9 --- /dev/null +++ b/vendor/bindgen/codegen/error.rs @@ -0,0 +1,52 @@ +use std::error; +use std::fmt; + +/// Errors that can occur during code generation. +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) enum Error { + /// Tried to generate an opaque blob for a type that did not have a layout. + NoLayoutForOpaqueBlob, + + /// Tried to instantiate an opaque template definition, or a template + /// definition that is too difficult for us to understand (like a partial + /// template specialization). + InstantiationOfOpaqueType, + + /// Function ABI is not supported. + UnsupportedAbi(&'static str), + + /// The pointer type size does not match the target's pointer size. + InvalidPointerSize { + ty_name: String, + ty_size: usize, + ptr_size: usize, + }, +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Error::NoLayoutForOpaqueBlob => { + "Tried to generate an opaque blob, but had no layout.".fmt(f) + } + Error::InstantiationOfOpaqueType => { + "Instantiation of opaque template type or partial template specialization." + .fmt(f) + } + Error::UnsupportedAbi(abi) => { + write!( + f, + "{abi} ABI is not supported by the configured Rust target." + ) + } + Error::InvalidPointerSize { ty_name, ty_size, ptr_size } => { + write!(f, "The {ty_name} pointer type has size {ty_size} but the current target's pointer size is {ptr_size}.") + } + } + } +} + +impl error::Error for Error {} + +/// A `Result` of `T` or an error of `bindgen::codegen::error::Error`. +pub(crate) type Result = ::std::result::Result; diff --git a/vendor/bindgen/codegen/helpers.rs b/vendor/bindgen/codegen/helpers.rs new file mode 100644 index 00000000000000..82172f34884137 --- /dev/null +++ b/vendor/bindgen/codegen/helpers.rs @@ -0,0 +1,395 @@ +//! Helpers for code generation that don't need macro expansion. + +use proc_macro2::{Ident, Span}; + +use crate::ir::context::BindgenContext; +use crate::ir::layout::Layout; + +pub(crate) mod attributes { + use proc_macro2::{Ident, Span, TokenStream}; + use std::{borrow::Cow, str::FromStr}; + + pub(crate) fn repr(which: &str) -> TokenStream { + let which = Ident::new(which, Span::call_site()); + quote! { + #[repr( #which )] + } + } + + pub(crate) fn repr_list(which_ones: &[&str]) -> TokenStream { + let which_ones = which_ones + .iter() + .map(|one| TokenStream::from_str(one).expect("repr to be valid")); + quote! { + #[repr( #( #which_ones ),* )] + } + } + + pub(crate) fn derives(which_ones: &[&str]) -> TokenStream { + let which_ones = which_ones + .iter() + .map(|one| TokenStream::from_str(one).expect("derive to be valid")); + quote! { + #[derive( #( #which_ones ),* )] + } + } + + pub(crate) fn inline() -> TokenStream { + quote! { + #[inline] + } + } + + pub(crate) fn must_use() -> TokenStream { + quote! { + #[must_use] + } + } + + pub(crate) fn non_exhaustive() -> TokenStream { + quote! { + #[non_exhaustive] + } + } + + pub(crate) fn doc(comment: &str) -> TokenStream { + if comment.is_empty() { + quote!() + } else { + quote!(#[doc = #comment]) + } + } + + pub(crate) fn link_name(name: &str) -> TokenStream { + // LLVM mangles the name by default but it's already mangled. + // Prefixing the name with \u{1} should tell LLVM to not mangle it. + let name: Cow<'_, str> = if MANGLE { + name.into() + } else { + format!("\u{1}{name}").into() + }; + + quote! { + #[link_name = #name] + } + } +} + +/// The `ffi_safe` argument should be true if this is a type that the user might +/// reasonably use, e.g. not struct padding, where the `__BindgenOpaqueArray` is +/// just noise. +/// TODO: Should this be `MaybeUninit`, since padding bytes are effectively +/// uninitialized? +pub(crate) fn blob( + ctx: &BindgenContext, + layout: Layout, + ffi_safe: bool, +) -> syn::Type { + let opaque = layout.opaque(); + + // FIXME(emilio, #412): We fall back to byte alignment, but there are + // some things that legitimately are more than 8-byte aligned. + // + // Eventually we should be able to `unwrap` here, but... + let ty = opaque.known_rust_type_for_array().unwrap_or_else(|| { + warn!("Found unknown alignment on code generation!"); + syn::parse_quote! { u8 } + }); + + let data_len = opaque.array_size().unwrap_or(layout.size); + + if data_len == 1 { + ty + } else if ffi_safe && ctx.options().rust_features().min_const_generics { + ctx.generated_opaque_array(); + if ctx.options().enable_cxx_namespaces { + syn::parse_quote! { root::__BindgenOpaqueArray<#ty, #data_len> } + } else { + syn::parse_quote! { __BindgenOpaqueArray<#ty, #data_len> } + } + } else { + // This is not FFI safe as an argument; the struct above is + // preferable. + syn::parse_quote! { [ #ty ; #data_len ] } + } +} + +/// Integer type of the same size as the given `Layout`. +pub(crate) fn integer_type(layout: Layout) -> Option { + Layout::known_type_for_size(layout.size) +} + +pub(crate) const BITFIELD_UNIT: &str = "__BindgenBitfieldUnit"; + +/// Generates a bitfield allocation unit type for a type with the given `Layout`. +pub(crate) fn bitfield_unit(ctx: &BindgenContext, layout: Layout) -> syn::Type { + let size = layout.size; + let bitfield_unit_name = Ident::new(BITFIELD_UNIT, Span::call_site()); + let ty = syn::parse_quote! { #bitfield_unit_name<[u8; #size]> }; + + if ctx.options().enable_cxx_namespaces { + return syn::parse_quote! { root::#ty }; + } + + ty +} + +pub(crate) mod ast_ty { + use crate::ir::context::BindgenContext; + use crate::ir::function::FunctionSig; + use crate::ir::layout::Layout; + use crate::ir::ty::{FloatKind, IntKind}; + use crate::RustTarget; + use proc_macro2::TokenStream; + use std::str::FromStr; + + pub(crate) fn c_void(ctx: &BindgenContext) -> syn::Type { + // ctypes_prefix takes precedence + match ctx.options().ctypes_prefix { + Some(ref prefix) => { + let prefix = TokenStream::from_str(prefix.as_str()).unwrap(); + syn::parse_quote! { #prefix::c_void } + } + None => { + if ctx.options().use_core { + syn::parse_quote! { ::core::ffi::c_void } + } else { + syn::parse_quote! { ::std::os::raw::c_void } + } + } + } + } + + pub(crate) fn raw_type(ctx: &BindgenContext, name: &str) -> syn::Type { + let ident = ctx.rust_ident_raw(name); + match ctx.options().ctypes_prefix { + Some(ref prefix) => { + let prefix = TokenStream::from_str(prefix.as_str()).unwrap(); + syn::parse_quote! { #prefix::#ident } + } + None => { + if ctx.options().use_core && + ctx.options().rust_features().core_ffi_c + { + syn::parse_quote! { ::core::ffi::#ident } + } else { + syn::parse_quote! { ::std::os::raw::#ident } + } + } + } + } + + pub(crate) fn int_kind_rust_type( + ctx: &BindgenContext, + ik: IntKind, + layout: Option, + ) -> syn::Type { + match ik { + IntKind::Bool => syn::parse_quote! { bool }, + IntKind::Char { .. } => raw_type(ctx, "c_char"), + // The following is used only when an unusual command-line + // argument is used. bindgen_cchar16_t is not a real type; + // but this allows downstream postprocessors to distinguish + // this case and do something special for C++ bindings + // containing the C++ type char16_t. + IntKind::Char16 => syn::parse_quote! { bindgen_cchar16_t }, + IntKind::SChar => raw_type(ctx, "c_schar"), + IntKind::UChar => raw_type(ctx, "c_uchar"), + IntKind::Short => raw_type(ctx, "c_short"), + IntKind::UShort => raw_type(ctx, "c_ushort"), + IntKind::Int => raw_type(ctx, "c_int"), + IntKind::UInt => raw_type(ctx, "c_uint"), + IntKind::Long => raw_type(ctx, "c_long"), + IntKind::ULong => raw_type(ctx, "c_ulong"), + IntKind::LongLong => raw_type(ctx, "c_longlong"), + IntKind::ULongLong => raw_type(ctx, "c_ulonglong"), + IntKind::WChar => { + let layout = + layout.expect("Couldn't compute wchar_t's layout?"); + Layout::known_type_for_size(layout.size) + .expect("Non-representable wchar_t?") + } + + IntKind::I8 => syn::parse_quote! { i8 }, + IntKind::U8 => syn::parse_quote! { u8 }, + IntKind::I16 => syn::parse_quote! { i16 }, + IntKind::U16 => syn::parse_quote! { u16 }, + IntKind::I32 => syn::parse_quote! { i32 }, + IntKind::U32 => syn::parse_quote! { u32 }, + IntKind::I64 => syn::parse_quote! { i64 }, + IntKind::U64 => syn::parse_quote! { u64 }, + IntKind::Custom { name, .. } => { + syn::parse_str(name).expect("Invalid integer type.") + } + IntKind::U128 => { + if true { + syn::parse_quote! { u128 } + } else { + // Best effort thing, but wrong alignment + // unfortunately. + syn::parse_quote! { [u64; 2] } + } + } + IntKind::I128 => { + if true { + syn::parse_quote! { i128 } + } else { + syn::parse_quote! { [u64; 2] } + } + } + } + } + + pub(crate) fn float_kind_rust_type( + ctx: &BindgenContext, + fk: FloatKind, + layout: Option, + ) -> syn::Type { + // TODO: we probably should take the type layout into account more + // often? + // + // Also, maybe this one shouldn't be the default? + match (fk, ctx.options().convert_floats) { + (FloatKind::Float16, _) => { + // TODO: do f16 when rust lands it + ctx.generated_bindgen_float16(); + if ctx.options().enable_cxx_namespaces { + syn::parse_quote! { root::__BindgenFloat16 } + } else { + syn::parse_quote! { __BindgenFloat16 } + } + } + (FloatKind::Float, true) => syn::parse_quote! { f32 }, + (FloatKind::Double, true) => syn::parse_quote! { f64 }, + (FloatKind::Float, false) => raw_type(ctx, "c_float"), + (FloatKind::Double, false) => raw_type(ctx, "c_double"), + (FloatKind::LongDouble, _) => { + if let Some(layout) = layout { + match layout.size { + 4 => syn::parse_quote! { f32 }, + 8 => syn::parse_quote! { f64 }, + // TODO(emilio): If rust ever gains f128 we should + // use it here and below. + _ => super::integer_type(layout) + .unwrap_or(syn::parse_quote! { f64 }), + } + } else { + debug_assert!( + false, + "How didn't we know the layout for a primitive type?" + ); + syn::parse_quote! { f64 } + } + } + (FloatKind::Float128, _) => { + if true { + syn::parse_quote! { u128 } + } else { + syn::parse_quote! { [u64; 2] } + } + } + } + } + + pub(crate) fn int_expr(val: i64) -> TokenStream { + // Don't use quote! { #val } because that adds the type suffix. + let val = proc_macro2::Literal::i64_unsuffixed(val); + quote!(#val) + } + + pub(crate) fn uint_expr(val: u64) -> TokenStream { + // Don't use quote! { #val } because that adds the type suffix. + let val = proc_macro2::Literal::u64_unsuffixed(val); + quote!(#val) + } + + pub(crate) fn cstr_expr(mut string: String) -> TokenStream { + string.push('\0'); + let b = proc_macro2::Literal::byte_string(string.as_bytes()); + quote! { + #b + } + } + + pub(crate) fn float_expr( + ctx: &BindgenContext, + f: f64, + ) -> Result { + if f.is_finite() { + let val = proc_macro2::Literal::f64_unsuffixed(f); + + return Ok(quote!(#val)); + } + + let prefix = ctx.trait_prefix(); + let rust_target = ctx.options().rust_target; + + if f.is_nan() { + // FIXME: This should be done behind a `RustFeature` instead + #[allow(deprecated)] + let tokens = if rust_target >= RustTarget::Stable_1_43 { + quote! { + f64::NAN + } + } else { + quote! { + ::#prefix::f64::NAN + } + }; + return Ok(tokens); + } + + if f.is_infinite() { + let tokens = if f.is_sign_positive() { + // FIXME: This should be done behind a `RustFeature` instead + #[allow(deprecated)] + if rust_target >= RustTarget::Stable_1_43 { + quote! { + f64::INFINITY + } + } else { + quote! { + ::#prefix::f64::INFINITY + } + } + } else { + // FIXME: This should be done behind a `RustFeature` instead + #[allow(deprecated)] + // Negative infinity + if rust_target >= RustTarget::Stable_1_43 { + quote! { + f64::NEG_INFINITY + } + } else { + quote! { + ::#prefix::f64::NEG_INFINITY + } + } + }; + return Ok(tokens); + } + + warn!("Unknown non-finite float number: {f:?}"); + Err(()) + } + + pub(crate) fn arguments_from_signature( + signature: &FunctionSig, + ctx: &BindgenContext, + ) -> Vec { + let mut unnamed_arguments = 0; + signature + .argument_types() + .iter() + .map(|&(ref name, _ty)| { + let name = if let Some(ref name) = *name { + ctx.rust_ident(name) + } else { + unnamed_arguments += 1; + ctx.rust_ident(format!("arg{unnamed_arguments}")) + }; + quote! { #name } + }) + .collect() + } +} diff --git a/vendor/bindgen/codegen/impl_debug.rs b/vendor/bindgen/codegen/impl_debug.rs new file mode 100644 index 00000000000000..058a73bd132157 --- /dev/null +++ b/vendor/bindgen/codegen/impl_debug.rs @@ -0,0 +1,243 @@ +use crate::ir::comp::{BitfieldUnit, CompKind, Field, FieldData, FieldMethods}; +use crate::ir::context::BindgenContext; +use crate::ir::item::{HasTypeParamInArray, IsOpaque, Item, ItemCanonicalName}; +use crate::ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT}; +use std::fmt::Write as _; + +pub(crate) fn gen_debug_impl( + ctx: &BindgenContext, + fields: &[Field], + item: &Item, + kind: CompKind, +) -> proc_macro2::TokenStream { + let struct_name = item.canonical_name(ctx); + let mut format_string = format!("{struct_name} {{{{ "); + let mut tokens = vec![]; + + if item.is_opaque(ctx, &()) { + format_string.push_str("opaque"); + } else { + match kind { + CompKind::Union => { + format_string.push_str("union"); + } + CompKind::Struct => { + let processed_fields = fields.iter().filter_map(|f| match f { + Field::DataMember(ref fd) => fd.impl_debug(ctx, ()), + Field::Bitfields(ref bu) => bu.impl_debug(ctx, ()), + }); + + for (i, (fstring, toks)) in processed_fields.enumerate() { + if i > 0 { + format_string.push_str(", "); + } + tokens.extend(toks); + format_string.push_str(&fstring); + } + } + } + } + + format_string.push_str(" }}"); + tokens.insert(0, quote! { #format_string }); + + let prefix = ctx.trait_prefix(); + + quote! { + fn fmt(&self, f: &mut ::#prefix::fmt::Formatter<'_>) -> ::#prefix ::fmt::Result { + write!(f, #( #tokens ),*) + } + } +} + +/// A trait for the things which we can codegen tokens that contribute towards a +/// generated `impl Debug`. +pub(crate) trait ImplDebug<'a> { + /// Any extra parameter required by this a particular `ImplDebug` implementation. + type Extra; + + /// Generate a format string snippet to be included in the larger `impl Debug` + /// format string, and the code to get the format string's interpolation values. + fn impl_debug( + &self, + ctx: &BindgenContext, + extra: Self::Extra, + ) -> Option<(String, Vec)>; +} + +impl ImplDebug<'_> for FieldData { + type Extra = (); + + fn impl_debug( + &self, + ctx: &BindgenContext, + _: Self::Extra, + ) -> Option<(String, Vec)> { + if let Some(name) = self.name() { + ctx.resolve_item(self.ty()).impl_debug(ctx, name) + } else { + None + } + } +} + +impl ImplDebug<'_> for BitfieldUnit { + type Extra = (); + + fn impl_debug( + &self, + ctx: &BindgenContext, + _: Self::Extra, + ) -> Option<(String, Vec)> { + let mut format_string = String::new(); + let mut tokens = vec![]; + for (i, bitfield) in self.bitfields().iter().enumerate() { + if i > 0 { + format_string.push_str(", "); + } + + if let Some(bitfield_name) = bitfield.name() { + let _ = write!(format_string, "{bitfield_name} : {{:?}}"); + let getter_name = bitfield.getter_name(); + let name_ident = ctx.rust_ident_raw(getter_name); + tokens.push(quote! { + self.#name_ident () + }); + } + } + + Some((format_string, tokens)) + } +} + +impl<'a> ImplDebug<'a> for Item { + type Extra = &'a str; + + fn impl_debug( + &self, + ctx: &BindgenContext, + name: &str, + ) -> Option<(String, Vec)> { + let name_ident = ctx.rust_ident(name); + + // We don't know if blocklisted items `impl Debug` or not, so we can't + // add them to the format string we're building up. + if !ctx.allowlisted_items().contains(&self.id()) { + return None; + } + + let ty = self.as_type()?; + + fn debug_print( + name: &str, + name_ident: &proc_macro2::TokenStream, + ) -> Option<(String, Vec)> { + Some(( + format!("{name}: {{:?}}"), + vec![quote! { + self.#name_ident + }], + )) + } + + match *ty.kind() { + // Handle the simple cases. + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::Complex(..) | + TypeKind::Function(..) | + TypeKind::Enum(..) | + TypeKind::Reference(..) | + TypeKind::UnresolvedTypeRef(..) | + TypeKind::ObjCInterface(..) | + TypeKind::ObjCId | + TypeKind::Comp(..) | + TypeKind::ObjCSel => debug_print(name, "e! { #name_ident }), + + TypeKind::TemplateInstantiation(ref inst) => { + if inst.is_opaque(ctx, self) { + Some((format!("{name}: opaque"), vec![])) + } else { + debug_print(name, "e! { #name_ident }) + } + } + + // The generic is not required to implement Debug, so we can not debug print that type + TypeKind::TypeParam => { + Some((format!("{name}: Non-debuggable generic"), vec![])) + } + + TypeKind::Array(_, len) => { + // Generics are not required to implement Debug + if self.has_type_param_in_array(ctx) { + Some((format!("{name}: Array with length {len}"), vec![])) + } else if len < RUST_DERIVE_IN_ARRAY_LIMIT || + ctx.options().rust_features().larger_arrays + { + // The simple case + debug_print(name, "e! { #name_ident }) + } else if ctx.options().use_core { + // There is no String in core; reducing field visibility to avoid breaking + // no_std setups. + Some((format!("{name}: [...]"), vec![])) + } else { + // Let's implement our own print function + Some(( + format!("{name}: [{{}}]"), + vec![quote! {{ + use std::fmt::Write as _; + let mut output = String::new(); + let mut iter = self.#name_ident.iter(); + if let Some(value) = iter.next() { + let _ = write!(output, "{value:?}"); + for value in iter { + let _ = write!(output, ", {value:?}"); + } + } + output + }}], + )) + } + } + TypeKind::Vector(_, len) => { + if ctx.options().use_core { + // There is no format! in core; reducing field visibility to avoid breaking + // no_std setups. + Some((format!("{name}(...)"), vec![])) + } else { + let self_ids = 0..len; + Some(( + format!("{name}({{}})"), + vec![quote! { + #(format!("{:?}", self.#self_ids)),* + }], + )) + } + } + + TypeKind::ResolvedTypeRef(t) | + TypeKind::TemplateAlias(t, _) | + TypeKind::Alias(t) | + TypeKind::BlockPointer(t) => { + // We follow the aliases + ctx.resolve_item(t).impl_debug(ctx, name) + } + + TypeKind::Pointer(inner) => { + let inner_type = ctx.resolve_type(inner).canonical_type(ctx); + match *inner_type.kind() { + TypeKind::Function(ref sig) + if !sig.function_pointers_can_derive() => + { + Some((format!("{name}: FunctionPointer"), vec![])) + } + _ => debug_print(name, "e! { #name_ident }), + } + } + + TypeKind::Opaque => None, + } + } +} diff --git a/vendor/bindgen/codegen/impl_partialeq.rs b/vendor/bindgen/codegen/impl_partialeq.rs new file mode 100644 index 00000000000000..c2787967d85230 --- /dev/null +++ b/vendor/bindgen/codegen/impl_partialeq.rs @@ -0,0 +1,142 @@ +use crate::ir::comp::{CompInfo, CompKind, Field, FieldMethods}; +use crate::ir::context::BindgenContext; +use crate::ir::item::{IsOpaque, Item}; +use crate::ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT}; + +/// Generate a manual implementation of `PartialEq` trait for the +/// specified compound type. +pub(crate) fn gen_partialeq_impl( + ctx: &BindgenContext, + comp_info: &CompInfo, + item: &Item, + ty_for_impl: &proc_macro2::TokenStream, +) -> Option { + let mut tokens = vec![]; + + if item.is_opaque(ctx, &()) { + tokens.push(quote! { + &self._bindgen_opaque_blob[..] == &other._bindgen_opaque_blob[..] + }); + } else if comp_info.kind() == CompKind::Union { + assert!(!ctx.options().untagged_union); + tokens.push(quote! { + &self.bindgen_union_field[..] == &other.bindgen_union_field[..] + }); + } else { + for base in comp_info.base_members() { + if !base.requires_storage(ctx) { + continue; + } + + let ty_item = ctx.resolve_item(base.ty); + let field_name = &base.field_name; + + if ty_item.is_opaque(ctx, &()) { + let field_name = ctx.rust_ident(field_name); + tokens.push(quote! { + &self. #field_name [..] == &other. #field_name [..] + }); + } else { + tokens.push(gen_field(ctx, ty_item, field_name)); + } + } + + for field in comp_info.fields() { + match *field { + Field::DataMember(ref fd) => { + let ty_item = ctx.resolve_item(fd.ty()); + let name = fd.name().unwrap(); + tokens.push(gen_field(ctx, ty_item, name)); + } + Field::Bitfields(ref bu) => { + for bitfield in bu.bitfields() { + if bitfield.name().is_some() { + let getter_name = bitfield.getter_name(); + let name_ident = ctx.rust_ident_raw(getter_name); + tokens.push(quote! { + self.#name_ident () == other.#name_ident () + }); + } + } + } + } + } + } + + Some(quote! { + fn eq(&self, other: & #ty_for_impl) -> bool { + #( #tokens )&&* + } + }) +} + +fn gen_field( + ctx: &BindgenContext, + ty_item: &Item, + name: &str, +) -> proc_macro2::TokenStream { + fn quote_equals( + name_ident: &proc_macro2::Ident, + ) -> proc_macro2::TokenStream { + quote! { self.#name_ident == other.#name_ident } + } + + let name_ident = ctx.rust_ident(name); + let ty = ty_item.expect_type(); + + match *ty.kind() { + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(..) | + TypeKind::Complex(..) | + TypeKind::Float(..) | + TypeKind::Enum(..) | + TypeKind::TypeParam | + TypeKind::UnresolvedTypeRef(..) | + TypeKind::Reference(..) | + TypeKind::ObjCInterface(..) | + TypeKind::ObjCId | + TypeKind::ObjCSel | + TypeKind::Comp(..) | + TypeKind::Pointer(_) | + TypeKind::Function(..) | + TypeKind::Opaque => quote_equals(&name_ident), + + TypeKind::TemplateInstantiation(ref inst) => { + if inst.is_opaque(ctx, ty_item) { + quote! { + &self. #name_ident [..] == &other. #name_ident [..] + } + } else { + quote_equals(&name_ident) + } + } + + TypeKind::Array(_, len) => { + if len <= RUST_DERIVE_IN_ARRAY_LIMIT || + ctx.options().rust_features().larger_arrays + { + quote_equals(&name_ident) + } else { + quote! { + &self. #name_ident [..] == &other. #name_ident [..] + } + } + } + TypeKind::Vector(_, len) => { + let self_ids = 0..len; + let other_ids = 0..len; + quote! { + #(self.#self_ids == other.#other_ids &&)* true + } + } + + TypeKind::ResolvedTypeRef(t) | + TypeKind::TemplateAlias(t, _) | + TypeKind::Alias(t) | + TypeKind::BlockPointer(t) => { + let inner_item = ctx.resolve_item(t); + gen_field(ctx, inner_item, name) + } + } +} diff --git a/vendor/bindgen/codegen/mod.rs b/vendor/bindgen/codegen/mod.rs new file mode 100644 index 00000000000000..59f2265c09ed1f --- /dev/null +++ b/vendor/bindgen/codegen/mod.rs @@ -0,0 +1,5991 @@ +mod dyngen; +pub(crate) mod error; + +mod helpers; +mod impl_debug; +mod impl_partialeq; +mod postprocessing; +mod serialize; +pub(crate) mod struct_layout; + +#[cfg(test)] +#[allow(warnings)] +pub(crate) mod bitfield_unit; +#[cfg(all(test, target_endian = "little"))] +mod bitfield_unit_tests; + +use self::dyngen::DynamicItems; +use self::helpers::attributes; +use self::struct_layout::StructLayoutTracker; + +use super::BindgenOptions; + +use crate::callbacks::{ + AttributeInfo, DeriveInfo, DiscoveredItem, DiscoveredItemId, FieldInfo, + TypeKind as DeriveTypeKind, +}; +use crate::codegen::error::Error; +use crate::ir::analysis::{HasVtable, Sizedness}; +use crate::ir::annotations::{ + Annotations, FieldAccessorKind, FieldVisibilityKind, +}; +use crate::ir::comp::{ + Bitfield, BitfieldUnit, CompInfo, CompKind, Field, FieldData, FieldMethods, + Method, MethodKind, +}; +use crate::ir::context::{BindgenContext, ItemId}; +use crate::ir::derive::{ + CanDerive, CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq, + CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd, +}; +use crate::ir::dot; +use crate::ir::enum_ty::{Enum, EnumVariant, EnumVariantValue}; +use crate::ir::function::{ + ClangAbi, Function, FunctionKind, FunctionSig, Linkage, +}; +use crate::ir::int::IntKind; +use crate::ir::item::{IsOpaque, Item, ItemCanonicalName, ItemCanonicalPath}; +use crate::ir::item_kind::ItemKind; +use crate::ir::layout::Layout; +use crate::ir::module::Module; +use crate::ir::objc::{ObjCInterface, ObjCMethod}; +use crate::ir::template::{ + AsTemplateParam, TemplateInstantiation, TemplateParameters, +}; +use crate::ir::ty::{Type, TypeKind}; +use crate::ir::var::Var; + +use proc_macro2::{Ident, Span}; +use quote::{ToTokens, TokenStreamExt}; + +use crate::{Entry, HashMap, HashSet}; +use std::borrow::Cow; +use std::cell::Cell; +use std::collections::VecDeque; +use std::ffi::CStr; +use std::fmt::{self, Write}; +use std::ops; +use std::str::{self, FromStr}; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum CodegenError { + Serialize { msg: String, loc: String }, + Io(String), +} + +impl From for CodegenError { + fn from(err: std::io::Error) -> Self { + Self::Io(err.to_string()) + } +} + +impl fmt::Display for CodegenError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Serialize { msg, loc } => { + write!(f, "serialization error at {loc}: {msg}") + } + Self::Io(err) => err.fmt(f), + } + } +} + +// Name of type defined in constified enum module +pub(crate) static CONSTIFIED_ENUM_MODULE_REPR_NAME: &str = "Type"; + +fn top_level_path( + ctx: &BindgenContext, + item: &Item, +) -> Vec { + let mut path = vec![quote! { self }]; + + if ctx.options().enable_cxx_namespaces { + for _ in 0..item.codegen_depth(ctx) { + path.push(quote! { super }); + } + } + + path +} + +fn root_import( + ctx: &BindgenContext, + module: &Item, +) -> proc_macro2::TokenStream { + assert!(ctx.options().enable_cxx_namespaces, "Somebody messed it up"); + assert!(module.is_module()); + + let mut path = top_level_path(ctx, module); + + let root = ctx.root_module().canonical_name(ctx); + let root_ident = ctx.rust_ident(root); + path.push(quote! { #root_ident }); + + let mut tokens = quote! {}; + tokens.append_separated(path, quote!(::)); + + quote! { + #[allow(unused_imports)] + use #tokens ; + } +} + +bitflags! { + #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] + struct DerivableTraits: u16 { + const DEBUG = 1 << 0; + const DEFAULT = 1 << 1; + const COPY = 1 << 2; + const CLONE = 1 << 3; + const HASH = 1 << 4; + const PARTIAL_ORD = 1 << 5; + const ORD = 1 << 6; + const PARTIAL_EQ = 1 << 7; + const EQ = 1 << 8; + } +} + +fn derives_of_item( + item: &Item, + ctx: &BindgenContext, + packed: bool, +) -> DerivableTraits { + let mut derivable_traits = DerivableTraits::empty(); + + if item.can_derive_copy(ctx) && !item.annotations().disallow_copy() { + derivable_traits |= DerivableTraits::COPY; + + // FIXME: This requires extra logic if you have a big array in a + // templated struct. The reason for this is that the magic: + // fn clone(&self) -> Self { *self } + // doesn't work for templates. + // + // It's not hard to fix though. + derivable_traits |= DerivableTraits::CLONE; + } else if packed { + // If the struct or union is packed, deriving from Copy is required for + // deriving from any other trait. + return derivable_traits; + } + + if item.can_derive_debug(ctx) && !item.annotations().disallow_debug() { + derivable_traits |= DerivableTraits::DEBUG; + } + + if item.can_derive_default(ctx) && !item.annotations().disallow_default() { + derivable_traits |= DerivableTraits::DEFAULT; + } + + if item.can_derive_hash(ctx) { + derivable_traits |= DerivableTraits::HASH; + } + + if item.can_derive_partialord(ctx) { + derivable_traits |= DerivableTraits::PARTIAL_ORD; + } + + if item.can_derive_ord(ctx) { + derivable_traits |= DerivableTraits::ORD; + } + + if item.can_derive_partialeq(ctx) { + derivable_traits |= DerivableTraits::PARTIAL_EQ; + } + + if item.can_derive_eq(ctx) { + derivable_traits |= DerivableTraits::EQ; + } + + derivable_traits +} + +impl From for Vec<&'static str> { + fn from(derivable_traits: DerivableTraits) -> Vec<&'static str> { + [ + (DerivableTraits::DEBUG, "Debug"), + (DerivableTraits::DEFAULT, "Default"), + (DerivableTraits::COPY, "Copy"), + (DerivableTraits::CLONE, "Clone"), + (DerivableTraits::HASH, "Hash"), + (DerivableTraits::PARTIAL_ORD, "PartialOrd"), + (DerivableTraits::ORD, "Ord"), + (DerivableTraits::PARTIAL_EQ, "PartialEq"), + (DerivableTraits::EQ, "Eq"), + ] + .iter() + .filter_map(|&(flag, derive)| { + Some(derive).filter(|_| derivable_traits.contains(flag)) + }) + .collect() + } +} + +struct WrapAsVariadic { + new_name: String, + idx_of_va_list_arg: usize, +} + +struct CodegenResult<'a> { + items: Vec, + dynamic_items: DynamicItems, + + /// A monotonic counter used to add stable unique ID's to stuff that doesn't + /// need to be referenced by anything. + codegen_id: &'a Cell, + + /// Whether a bindgen union has been generated at least once. + saw_bindgen_union: bool, + + /// Whether an incomplete array has been generated at least once. + saw_incomplete_array: bool, + + /// Whether Objective C types have been seen at least once. + saw_objc: bool, + + /// Whether Apple block types have been seen at least once. + saw_block: bool, + + /// Whether a bitfield allocation unit has been seen at least once. + saw_bitfield_unit: bool, + + items_seen: HashSet, + /// The set of generated function/var names, needed because in C/C++ is + /// legal to do something like: + /// + /// ```c++ + /// extern "C" { + /// void foo(); + /// extern int bar; + /// } + /// + /// extern "C" { + /// void foo(); + /// extern int bar; + /// } + /// ``` + /// + /// Being these two different declarations. + functions_seen: HashSet, + vars_seen: HashSet, + + /// Used for making bindings to overloaded functions. Maps from a canonical + /// function name to the number of overloads we have already codegen'd for + /// that name. This lets us give each overload a unique suffix. + overload_counters: HashMap, + + /// List of items to serialize. With optionally the argument for the wrap as + /// variadic transformation to be applied. + items_to_serialize: Vec<(ItemId, Option)>, +} + +impl<'a> CodegenResult<'a> { + fn new(codegen_id: &'a Cell) -> Self { + CodegenResult { + items: vec![], + dynamic_items: DynamicItems::new(), + saw_bindgen_union: false, + saw_incomplete_array: false, + saw_objc: false, + saw_block: false, + saw_bitfield_unit: false, + codegen_id, + items_seen: Default::default(), + functions_seen: Default::default(), + vars_seen: Default::default(), + overload_counters: Default::default(), + items_to_serialize: Default::default(), + } + } + + fn dynamic_items(&mut self) -> &mut DynamicItems { + &mut self.dynamic_items + } + + fn saw_bindgen_union(&mut self) { + self.saw_bindgen_union = true; + } + + fn saw_incomplete_array(&mut self) { + self.saw_incomplete_array = true; + } + + fn saw_objc(&mut self) { + self.saw_objc = true; + } + + fn saw_block(&mut self) { + self.saw_block = true; + } + + fn saw_bitfield_unit(&mut self) { + self.saw_bitfield_unit = true; + } + + fn seen>(&self, item: Id) -> bool { + self.items_seen.contains(&item.into()) + } + + fn set_seen>(&mut self, item: Id) { + self.items_seen.insert(item.into()); + } + + fn seen_function(&self, name: &str) -> bool { + self.functions_seen.contains(name) + } + + fn saw_function(&mut self, name: &str) { + self.functions_seen.insert(name.into()); + } + + /// Get the overload number for the given function name. Increments the + /// counter internally so the next time we ask for the overload for this + /// name, we get the incremented value, and so on. + fn overload_number(&mut self, name: &str) -> u32 { + let counter = self.overload_counters.entry(name.into()).or_insert(0); + let number = *counter; + *counter += 1; + number + } + + fn seen_var(&self, name: &str) -> bool { + self.vars_seen.contains(name) + } + + fn saw_var(&mut self, name: &str) { + self.vars_seen.insert(name.into()); + } + + fn inner(&mut self, cb: F) -> Vec + where + F: FnOnce(&mut Self), + { + let mut new = Self::new(self.codegen_id); + + cb(&mut new); + + self.saw_incomplete_array |= new.saw_incomplete_array; + self.saw_objc |= new.saw_objc; + self.saw_block |= new.saw_block; + self.saw_bitfield_unit |= new.saw_bitfield_unit; + self.saw_bindgen_union |= new.saw_bindgen_union; + + new.items + } +} + +impl ops::Deref for CodegenResult<'_> { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.items + } +} + +impl ops::DerefMut for CodegenResult<'_> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.items + } +} + +/// A trait to convert a rust type into a pointer, optionally const, to the same +/// type. +trait ToPtr { + fn to_ptr(self, is_const: bool) -> syn::Type; +} + +impl ToPtr for syn::Type { + fn to_ptr(self, is_const: bool) -> syn::Type { + if is_const { + syn::parse_quote! { *const #self } + } else { + syn::parse_quote! { *mut #self } + } + } +} + +/// An extension trait for `syn::Type` that lets us append any implicit +/// template parameters that exist for some type, if necessary. +trait WithImplicitTemplateParams { + fn with_implicit_template_params( + self, + ctx: &BindgenContext, + item: &Item, + ) -> Self; +} + +impl WithImplicitTemplateParams for syn::Type { + fn with_implicit_template_params( + self, + ctx: &BindgenContext, + item: &Item, + ) -> Self { + let item = item.id().into_resolver().through_type_refs().resolve(ctx); + + let params = match *item.expect_type().kind() { + TypeKind::UnresolvedTypeRef(..) => { + unreachable!("already resolved unresolved type refs") + } + TypeKind::ResolvedTypeRef(..) => { + unreachable!("we resolved item through type refs") + } + // None of these types ever have implicit template parameters. + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Pointer(..) | + TypeKind::Reference(..) | + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::Complex(..) | + TypeKind::Array(..) | + TypeKind::TypeParam | + TypeKind::Opaque | + TypeKind::Function(..) | + TypeKind::Enum(..) | + TypeKind::ObjCId | + TypeKind::ObjCSel | + TypeKind::TemplateInstantiation(..) => None, + _ => { + let params = item.used_template_params(ctx); + if params.is_empty() { + None + } else { + Some(params.into_iter().map(|p| { + p.try_to_rust_ty(ctx, &()).expect( + "template params cannot fail to be a rust type", + ) + })) + } + } + }; + + if let Some(params) = params { + syn::parse_quote! { #self<#(#params),*> } + } else { + self + } + } +} + +trait CodeGenerator { + /// Extra information from the caller. + type Extra; + + /// Extra information returned to the caller. + type Return; + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + extra: &Self::Extra, + ) -> Self::Return; +} + +impl Item { + fn process_before_codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult, + ) -> bool { + if !self.is_enabled_for_codegen(ctx) { + return false; + } + + if self.is_blocklisted(ctx) || result.seen(self.id()) { + debug!( + "::process_before_codegen: Ignoring hidden or seen: \ + self = {:?}", + self + ); + return false; + } + + if !ctx.codegen_items().contains(&self.id()) { + // TODO(emilio, #453): Figure out what to do when this happens + // legitimately, we could track the opaque stuff and disable the + // assertion there I guess. + warn!("Found non-allowlisted item in code generation: {self:?}"); + } + + result.set_seen(self.id()); + true + } +} + +impl CodeGenerator for Item { + type Extra = (); + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + _extra: &(), + ) { + debug!("::codegen: self = {self:?}"); + if !self.process_before_codegen(ctx, result) { + return; + } + + match *self.kind() { + ItemKind::Module(ref module) => { + module.codegen(ctx, result, self); + } + ItemKind::Function(ref fun) => { + fun.codegen(ctx, result, self); + } + ItemKind::Var(ref var) => { + var.codegen(ctx, result, self); + } + ItemKind::Type(ref ty) => { + ty.codegen(ctx, result, self); + } + } + } +} + +impl CodeGenerator for Module { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + debug!("::codegen: item = {item:?}"); + + let codegen_self = |result: &mut CodegenResult, + found_any: &mut bool| { + for child in self.children() { + if ctx.codegen_items().contains(child) { + *found_any = true; + ctx.resolve_item(*child).codegen(ctx, result, &()); + } + } + + if item.id() == ctx.root_module() { + if result.saw_block { + utils::prepend_block_header(ctx, &mut *result); + } + if result.saw_bindgen_union { + utils::prepend_union_types(ctx, &mut *result); + } + if result.saw_incomplete_array { + utils::prepend_incomplete_array_types(ctx, &mut *result); + } + if ctx.need_bindgen_float16_type() { + utils::prepend_float16_type(&mut *result); + } + if ctx.need_bindgen_complex_type() { + utils::prepend_complex_type(&mut *result); + } + if ctx.need_opaque_array_type() { + utils::prepend_opaque_array_type(&mut *result); + } + if result.saw_objc { + utils::prepend_objc_header(ctx, &mut *result); + } + if result.saw_bitfield_unit { + utils::prepend_bitfield_unit_type(ctx, &mut *result); + } + } + }; + + if !ctx.options().enable_cxx_namespaces || + (self.is_inline() && + !ctx.options().conservative_inline_namespaces) + { + codegen_self(result, &mut false); + return; + } + + let mut found_any = false; + let inner_items = result.inner(|result| { + result.push(root_import(ctx, item)); + + let path = item + .namespace_aware_canonical_path(ctx) + .join("::") + .into_boxed_str(); + if let Some(raw_lines) = ctx.options().module_lines.get(&path) { + for raw_line in raw_lines { + found_any = true; + result.push( + proc_macro2::TokenStream::from_str(raw_line).unwrap(), + ); + } + } + + codegen_self(result, &mut found_any); + }); + + // Don't bother creating an empty module. + if !found_any { + return; + } + + let name = item.canonical_name(ctx); + let ident = ctx.rust_ident(name); + result.push(if item.id() == ctx.root_module() { + quote! { + #[allow(non_snake_case, non_camel_case_types, non_upper_case_globals)] + pub mod #ident { + #( #inner_items )* + } + } + } else { + quote! { + pub mod #ident { + #( #inner_items )* + } + } + }); + } +} + +impl CodeGenerator for Var { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + use crate::ir::var::VarType; + debug!("::codegen: item = {item:?}"); + debug_assert!(item.is_enabled_for_codegen(ctx)); + + let canonical_name = item.canonical_name(ctx); + + if result.seen_var(&canonical_name) { + return; + } + result.saw_var(&canonical_name); + + let canonical_ident = ctx.rust_ident(&canonical_name); + + // We can't generate bindings to static variables of templates. The + // number of actual variables for a single declaration are open ended + // and we don't know what instantiations do or don't exist. + if !item.all_template_params(ctx).is_empty() { + return; + } + + let mut attrs = vec![]; + if let Some(comment) = item.comment(ctx) { + attrs.push(attributes::doc(&comment)); + } + + let var_ty = self.ty(); + let ty = var_ty.to_rust_ty_or_opaque(ctx, &()); + + if let Some(val) = self.val() { + match *val { + VarType::Bool(val) => { + result.push(quote! { + #(#attrs)* + pub const #canonical_ident : #ty = #val ; + }); + } + VarType::Int(val) => { + let int_kind = var_ty + .into_resolver() + .through_type_aliases() + .through_type_refs() + .resolve(ctx) + .expect_type() + .as_integer() + .unwrap(); + let val = if int_kind.is_signed() { + helpers::ast_ty::int_expr(val) + } else { + helpers::ast_ty::uint_expr(val as _) + }; + result.push(quote! { + #(#attrs)* + pub const #canonical_ident : #ty = #val ; + }); + } + VarType::String(ref bytes) => { + let prefix = ctx.trait_prefix(); + + let options = ctx.options(); + let rust_features = options.rust_features; + + let mut cstr_bytes = bytes.clone(); + cstr_bytes.push(0); + let len = proc_macro2::Literal::usize_unsuffixed( + cstr_bytes.len(), + ); + let cstr = + if options.generate_cstr && rust_features.const_cstr { + CStr::from_bytes_with_nul(&cstr_bytes).ok() + } else { + None + }; + + if let Some(cstr) = cstr { + let cstr_ty = quote! { ::#prefix::ffi::CStr }; + if rust_features.literal_cstr { + let cstr = proc_macro2::Literal::c_string(cstr); + result.push(quote! { + #(#attrs)* + pub const #canonical_ident: &#cstr_ty = #cstr; + }); + } else { + let bytes = + proc_macro2::Literal::byte_string(&cstr_bytes); + result.push(quote! { + #(#attrs)* + #[allow(unsafe_code)] + pub const #canonical_ident: &#cstr_ty = unsafe { + #cstr_ty::from_bytes_with_nul_unchecked(#bytes) + }; + }); + } + } else { + // TODO: Here we ignore the type we just made up, probably + // we should refactor how the variable type and ty ID work. + let array_ty = quote! { [u8; #len] }; + let bytes = + proc_macro2::Literal::byte_string(&cstr_bytes); + let lifetime = + if true { None } else { Some(quote! { 'static }) } + .into_iter(); + + result.push(quote! { + #(#attrs)* + pub const #canonical_ident: &#(#lifetime )*#array_ty = #bytes ; + }); + } + } + VarType::Float(f) => { + if let Ok(expr) = helpers::ast_ty::float_expr(ctx, f) { + result.push(quote! { + #(#attrs)* + pub const #canonical_ident : #ty = #expr ; + }); + } + } + VarType::Char(c) => { + result.push(quote! { + #(#attrs)* + pub const #canonical_ident : #ty = #c ; + }); + } + } + } else { + let symbol: &str = self.link_name().unwrap_or_else(|| { + let link_name = + self.mangled_name().unwrap_or_else(|| self.name()); + if utils::names_will_be_identical_after_mangling( + &canonical_name, + link_name, + None, + ) { + canonical_name.as_str() + } else { + attrs.push(attributes::link_name::(link_name)); + link_name + } + }); + + let maybe_mut = if self.is_const() { + quote! {} + } else { + quote! { mut } + }; + + let safety = ctx + .options() + .rust_features + .unsafe_extern_blocks + .then(|| quote!(unsafe)); + + let tokens = quote!( + #safety extern "C" { + #(#attrs)* + pub static #maybe_mut #canonical_ident: #ty; + } + ); + + if ctx.options().dynamic_library_name.is_some() { + result.dynamic_items().push_var( + &canonical_ident, + symbol, + &self + .ty() + .to_rust_ty_or_opaque(ctx, &()) + .into_token_stream(), + ctx.options().dynamic_link_require_all, + ctx.options().wrap_unsafe_ops, + ); + } else { + result.push(tokens); + } + } + } +} + +impl CodeGenerator for Type { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + debug!("::codegen: item = {item:?}"); + debug_assert!(item.is_enabled_for_codegen(ctx)); + + match *self.kind() { + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::Complex(..) | + TypeKind::Array(..) | + TypeKind::Vector(..) | + TypeKind::Pointer(..) | + TypeKind::Reference(..) | + TypeKind::Function(..) | + TypeKind::ResolvedTypeRef(..) | + TypeKind::Opaque | + TypeKind::TypeParam => { + // These items don't need code generation, they only need to be + // converted to rust types in fields, arguments, and such. + // NOTE(emilio): If you add to this list, make sure to also add + // it to BindgenContext::compute_allowlisted_and_codegen_items. + } + TypeKind::TemplateInstantiation(ref inst) => { + inst.codegen(ctx, result, item); + } + TypeKind::BlockPointer(inner) => { + if !ctx.options().generate_block { + return; + } + + let inner_item = + inner.into_resolver().through_type_refs().resolve(ctx); + let name = item.canonical_name(ctx); + + let inner_rust_type = { + if let TypeKind::Function(fnsig) = + inner_item.kind().expect_type().kind() + { + utils::fnsig_block(ctx, fnsig) + } else { + panic!("invalid block typedef: {inner_item:?}") + } + }; + + let rust_name = ctx.rust_ident(name); + + let mut tokens = if let Some(comment) = item.comment(ctx) { + attributes::doc(&comment) + } else { + quote! {} + }; + + tokens.append_all(quote! { + pub type #rust_name = #inner_rust_type ; + }); + + result.push(tokens); + result.saw_block(); + } + TypeKind::Comp(ref ci) => ci.codegen(ctx, result, item), + TypeKind::TemplateAlias(inner, _) | TypeKind::Alias(inner) => { + let inner_item = + inner.into_resolver().through_type_refs().resolve(ctx); + let name = item.canonical_name(ctx); + let path = item.canonical_path(ctx); + + { + let through_type_aliases = inner + .into_resolver() + .through_type_refs() + .through_type_aliases() + .resolve(ctx); + + // Try to catch the common pattern: + // + // typedef struct foo { ... } foo; + // + // here, and also other more complex cases like #946. + if through_type_aliases.canonical_path(ctx) == path { + return; + } + } + + // If this is a known named type, disallow generating anything + // for it too. If size_t -> usize conversions are enabled, we + // need to check that these conversions are permissible, but + // nothing needs to be generated, still. + let spelling = self.name().expect("Unnamed alias?"); + if utils::type_from_named(ctx, spelling).is_some() { + if let "size_t" | "ssize_t" = spelling { + let layout = inner_item + .kind() + .expect_type() + .layout(ctx) + .expect("No layout?"); + assert_eq!( + layout.size, + ctx.target_pointer_size(), + "Target platform requires `--no-size_t-is-usize`. The size of `{spelling}` ({}) does not match the target pointer size ({})", + layout.size, + ctx.target_pointer_size(), + ); + assert_eq!( + layout.align, + ctx.target_pointer_size(), + "Target platform requires `--no-size_t-is-usize`. The alignment of `{spelling}` ({}) does not match the target pointer size ({})", + layout.align, + ctx.target_pointer_size(), + ); + } + return; + } + + let mut outer_params = item.used_template_params(ctx); + + let is_opaque = item.is_opaque(ctx, &()); + let inner_rust_type = if is_opaque { + outer_params = vec![]; + self.to_opaque(ctx, item) + } else { + // Its possible that we have better layout information than + // the inner type does, so fall back to an opaque blob based + // on our layout if converting the inner item fails. + inner_item + .try_to_rust_ty_or_opaque(ctx, &()) + .unwrap_or_else(|_| self.to_opaque(ctx, item)) + .with_implicit_template_params(ctx, inner_item) + }; + + { + // FIXME(emilio): This is a workaround to avoid generating + // incorrect type aliases because of types that we haven't + // been able to resolve (because, eg, they depend on a + // template parameter). + // + // It's kind of a shame not generating them even when they + // could be referenced, but we already do the same for items + // with invalid template parameters, and at least this way + // they can be replaced, instead of generating plain invalid + // code. + let inner_canon_type = + inner_item.expect_type().canonical_type(ctx); + if inner_canon_type.is_invalid_type_param() { + warn!( + "Item contained invalid named type, skipping: \ + {:?}, {:?}", + item, inner_item + ); + return; + } + } + + let rust_name = ctx.rust_ident(&name); + + ctx.options().for_each_callback(|cb| { + cb.new_item_found( + DiscoveredItemId::new(item.id().as_usize()), + DiscoveredItem::Alias { + alias_name: rust_name.to_string(), + alias_for: DiscoveredItemId::new( + inner_item.id().as_usize(), + ), + }, + ); + }); + + let mut tokens = if let Some(comment) = item.comment(ctx) { + attributes::doc(&comment) + } else { + quote! {} + }; + + let alias_style = if ctx.options().type_alias.matches(&name) { + AliasVariation::TypeAlias + } else if ctx.options().new_type_alias.matches(&name) { + AliasVariation::NewType + } else if ctx.options().new_type_alias_deref.matches(&name) { + AliasVariation::NewTypeDeref + } else { + ctx.options().default_alias_style + }; + + // We prefer using `pub use` over `pub type` because of: + // https://github.com/rust-lang/rust/issues/26264 + if matches!(inner_rust_type, syn::Type::Path(_)) && + outer_params.is_empty() && + !is_opaque && + alias_style == AliasVariation::TypeAlias && + inner_item.expect_type().canonical_type(ctx).is_enum() + { + tokens.append_all(quote! { + pub use + }); + let path = top_level_path(ctx, item); + tokens.append_separated(path, quote!(::)); + tokens.append_all(quote! { + :: #inner_rust_type as #rust_name ; + }); + result.push(tokens); + return; + } + + tokens.append_all(match alias_style { + AliasVariation::TypeAlias => quote! { + pub type #rust_name + }, + AliasVariation::NewType | AliasVariation::NewTypeDeref => { + let mut attributes = + vec![attributes::repr("transparent")]; + let packed = false; // Types can't be packed in Rust. + let derivable_traits = + derives_of_item(item, ctx, packed); + let mut derives: Vec<_> = derivable_traits.into(); + // The custom derives callback may return a list of derive attributes; + // add them to the end of the list. + let custom_derives = + ctx.options().all_callbacks(|cb| { + cb.add_derives(&DeriveInfo { + name: &name, + kind: DeriveTypeKind::Struct, + }) + }); + // In most cases this will be a no-op, since custom_derives will be empty. + derives + .extend(custom_derives.iter().map(|s| s.as_str())); + attributes.push(attributes::derives(&derives)); + + let custom_attributes = + ctx.options().all_callbacks(|cb| { + cb.add_attributes(&AttributeInfo { + name: &name, + kind: DeriveTypeKind::Struct, + }) + }); + attributes.extend( + custom_attributes + .iter() + .map(|s| s.parse().unwrap()), + ); + + quote! { + #( #attributes )* + pub struct #rust_name + } + } + }); + + let params: Vec<_> = outer_params + .into_iter() + .filter_map(|p| p.as_template_param(ctx, &())) + .collect(); + if params + .iter() + .any(|p| ctx.resolve_type(*p).is_invalid_type_param()) + { + warn!( + "Item contained invalid template \ + parameter: {:?}", + item + ); + return; + } + let params: Vec<_> = params + .iter() + .map(|p| { + p.try_to_rust_ty(ctx, &()).expect( + "type parameters can always convert to rust ty OK", + ) + }) + .collect(); + + if !params.is_empty() { + tokens.append_all(quote! { + < #( #params ),* > + }); + } + + tokens.append_all(match alias_style { + AliasVariation::TypeAlias => quote! { + = #inner_rust_type ; + }, + AliasVariation::NewType | AliasVariation::NewTypeDeref => { + let visibility = ctx + .options() + .last_callback(|cb| { + cb.field_visibility(FieldInfo { + type_name: &item.canonical_name(ctx), + field_name: "0", + field_type_name: inner_item + .expect_type() + .name(), + }) + }) + .unwrap_or(ctx.options().default_visibility); + let access_spec = access_specifier(visibility); + quote! { + (#access_spec #inner_rust_type) ; + } + } + }); + + if alias_style == AliasVariation::NewTypeDeref { + let prefix = ctx.trait_prefix(); + tokens.append_all(quote! { + impl ::#prefix::ops::Deref for #rust_name { + type Target = #inner_rust_type; + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } + } + impl ::#prefix::ops::DerefMut for #rust_name { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } + } + }); + } + + result.push(tokens); + } + TypeKind::Enum(ref ei) => ei.codegen(ctx, result, item), + TypeKind::ObjCId | TypeKind::ObjCSel => { + result.saw_objc(); + } + TypeKind::ObjCInterface(ref interface) => { + interface.codegen(ctx, result, item); + } + ref u @ TypeKind::UnresolvedTypeRef(..) => { + unreachable!("Should have been resolved after parsing {u:?}!") + } + } + } +} + +struct Vtable<'a> { + item_id: ItemId, + /// A reference to the originating compound object. + #[allow(dead_code)] + comp_info: &'a CompInfo, +} + +impl<'a> Vtable<'a> { + fn new(item_id: ItemId, comp_info: &'a CompInfo) -> Self { + Vtable { item_id, comp_info } + } +} + +impl CodeGenerator for Vtable<'_> { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + assert_eq!(item.id(), self.item_id); + debug_assert!(item.is_enabled_for_codegen(ctx)); + let name = ctx.rust_ident(self.canonical_name(ctx)); + + // For now, we will only generate vtables for classes that: + // - do not inherit from others (compilers merge VTable from primary parent class). + // - do not contain a virtual destructor (requires ordering; platforms generate different vtables). + if ctx.options().vtable_generation && + self.comp_info.base_members().is_empty() && + self.comp_info.destructor().is_none() + { + let class_ident = ctx.rust_ident(self.item_id.canonical_name(ctx)); + + let methods = self + .comp_info + .methods() + .iter() + .filter_map(|m| { + if !m.is_virtual() { + return None; + } + + let function_item = ctx.resolve_item(m.signature()); + let function = function_item.expect_function(); + let signature_item = ctx.resolve_item(function.signature()); + let TypeKind::Function(ref signature) = signature_item.expect_type().kind() else { panic!("Function signature type mismatch") }; + + // FIXME: Is there a canonical name without the class prepended? + let function_name = function_item.canonical_name(ctx); + + // FIXME: Need to account for overloading with times_seen (separately from regular function path). + let function_name = ctx.rust_ident(function_name); + let mut args = utils::fnsig_arguments(ctx, signature); + let ret = utils::fnsig_return_ty(ctx, signature); + + args[0] = if m.is_const() { + quote! { this: *const #class_ident } + } else { + quote! { this: *mut #class_ident } + }; + + Some(quote! { + pub #function_name : unsafe extern "C" fn( #( #args ),* ) #ret + }) + }) + .collect::>(); + + result.push(quote! { + #[repr(C)] + pub struct #name { + #( #methods ),* + } + }); + } else { + // For the cases we don't support, simply generate an empty struct. + let void = helpers::ast_ty::c_void(ctx); + + result.push(quote! { + #[repr(C)] + pub struct #name ( #void ); + }); + } + } +} + +impl ItemCanonicalName for Vtable<'_> { + fn canonical_name(&self, ctx: &BindgenContext) -> String { + format!("{}__bindgen_vtable", self.item_id.canonical_name(ctx)) + } +} + +impl TryToRustTy for Vtable<'_> { + type Extra = (); + + fn try_to_rust_ty( + &self, + ctx: &BindgenContext, + _: &(), + ) -> error::Result { + let name = ctx.rust_ident(self.canonical_name(ctx)); + Ok(syn::parse_quote! { #name }) + } +} + +impl CodeGenerator for TemplateInstantiation { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + debug_assert!(item.is_enabled_for_codegen(ctx)); + + // Although uses of instantiations don't need code generation, and are + // just converted to rust types in fields, vars, etc, we take this + // opportunity to generate tests for their layout here. If the + // instantiation is opaque, then its presumably because we don't + // properly understand it (maybe because of specializations), and so we + // shouldn't emit layout tests either. + if !ctx.options().layout_tests || self.is_opaque(ctx, item) { + return; + } + + // For consistency with other layout tests, gate this on offset_of. + let compile_time = ctx.options().rust_features().offset_of; + + // If there are any unbound type parameters, then we can't generate a + // layout test because we aren't dealing with a concrete type with a + // concrete size and alignment. + if ctx.uses_any_template_parameters(item.id()) { + return; + } + + let layout = item.kind().expect_type().layout(ctx); + + if let Some(layout) = layout { + let size = layout.size; + let align = layout.align; + + let name = item.full_disambiguated_name(ctx); + let fn_name = if compile_time { + None + } else { + let mut fn_name = + format!("__bindgen_test_layout_{name}_instantiation"); + let times_seen = result.overload_number(&fn_name); + if times_seen > 0 { + write!(&mut fn_name, "_{times_seen}").unwrap(); + } + Some(ctx.rust_ident_raw(fn_name)) + }; + + let prefix = ctx.trait_prefix(); + let ident = item.to_rust_ty_or_opaque(ctx, &()); + let size_of_expr = quote! { + ::#prefix::mem::size_of::<#ident>() + }; + let align_of_expr = quote! { + ::#prefix::mem::align_of::<#ident>() + }; + let size_of_err = + format!("Size of template specialization: {name}"); + let align_of_err = + format!("Align of template specialization: {name}"); + + if compile_time { + // In an ideal world this would be assert_eq!, but that is not + // supported in const fn due to the need for string formatting. + // If #size_of_expr > #size, this will index OOB, and if + // #size_of_expr < #size, the subtraction will overflow, both + // of which print enough information to see what has gone wrong. + result.push(quote! { + #[allow(clippy::unnecessary_operation, clippy::identity_op)] + const _: () = { + [#size_of_err][#size_of_expr - #size]; + [#align_of_err][#align_of_expr - #align]; + }; + }); + } else { + result.push(quote! { + #[test] + fn #fn_name() { + assert_eq!(#size_of_expr, #size, #size_of_err); + assert_eq!(#align_of_expr, #align, #align_of_err); + } + }); + } + } + } +} + +/// Trait for implementing the code generation of a struct or union field. +trait FieldCodegen<'a> { + type Extra; + + #[allow(clippy::too_many_arguments)] + fn codegen( + &self, + ctx: &BindgenContext, + visibility_kind: FieldVisibilityKind, + accessor_kind: FieldAccessorKind, + parent: &CompInfo, + parent_item: &Item, + last_field: bool, + result: &mut CodegenResult, + struct_layout: &mut StructLayoutTracker, + fields: &mut F, + methods: &mut M, + extra: Self::Extra, + ) where + F: Extend, + M: Extend; +} + +impl FieldCodegen<'_> for Field { + type Extra = (); + + fn codegen( + &self, + ctx: &BindgenContext, + visibility_kind: FieldVisibilityKind, + accessor_kind: FieldAccessorKind, + parent: &CompInfo, + parent_item: &Item, + last_field: bool, + result: &mut CodegenResult, + struct_layout: &mut StructLayoutTracker, + fields: &mut F, + methods: &mut M, + _: (), + ) where + F: Extend, + M: Extend, + { + match *self { + Field::DataMember(ref data) => { + data.codegen( + ctx, + visibility_kind, + accessor_kind, + parent, + parent_item, + last_field, + result, + struct_layout, + fields, + methods, + (), + ); + } + Field::Bitfields(ref unit) => { + unit.codegen( + ctx, + visibility_kind, + accessor_kind, + parent, + parent_item, + last_field, + result, + struct_layout, + fields, + methods, + (), + ); + } + } + } +} + +fn wrap_union_field_if_needed( + ctx: &BindgenContext, + struct_layout: &StructLayoutTracker, + ty: syn::Type, + result: &mut CodegenResult, +) -> syn::Type { + if struct_layout.is_rust_union() { + if struct_layout.can_copy_union_fields() { + ty + } else { + let prefix = ctx.trait_prefix(); + syn::parse_quote! { ::#prefix::mem::ManuallyDrop<#ty> } + } + } else { + result.saw_bindgen_union(); + if ctx.options().enable_cxx_namespaces { + syn::parse_quote! { root::__BindgenUnionField<#ty> } + } else { + syn::parse_quote! { __BindgenUnionField<#ty> } + } + } +} + +impl FieldCodegen<'_> for FieldData { + type Extra = (); + + fn codegen( + &self, + ctx: &BindgenContext, + parent_visibility_kind: FieldVisibilityKind, + accessor_kind: FieldAccessorKind, + parent: &CompInfo, + parent_item: &Item, + last_field: bool, + result: &mut CodegenResult, + struct_layout: &mut StructLayoutTracker, + fields: &mut F, + methods: &mut M, + _: (), + ) where + F: Extend, + M: Extend, + { + // Bitfields are handled by `FieldCodegen` implementations for + // `BitfieldUnit` and `Bitfield`. + assert!(self.bitfield_width().is_none()); + + let field_item = + self.ty().into_resolver().through_type_refs().resolve(ctx); + let field_ty = field_item.expect_type(); + let ty = self + .ty() + .to_rust_ty_or_opaque(ctx, &()) + .with_implicit_template_params(ctx, field_item); + + // NB: If supported, we use proper `union` types. + let ty = if parent.is_union() { + wrap_union_field_if_needed(ctx, struct_layout, ty, result) + } else if let Some(item) = field_ty.is_incomplete_array(ctx) { + // Only FAM if its the last field + if ctx.options().flexarray_dst && last_field { + struct_layout.saw_flexible_array(); + syn::parse_quote! { FAM } + } else { + result.saw_incomplete_array(); + + let inner = item.to_rust_ty_or_opaque(ctx, &()); + + if ctx.options().enable_cxx_namespaces { + syn::parse_quote! { root::__IncompleteArrayField<#inner> } + } else { + syn::parse_quote! { __IncompleteArrayField<#inner> } + } + } + } else { + ty + }; + + let mut field = quote! {}; + if ctx.options().generate_comments { + if let Some(raw_comment) = self.comment() { + let comment = ctx.options().process_comment(raw_comment); + field = attributes::doc(&comment); + } + } + + let field_name = self + .name() + .map(|name| ctx.rust_mangle(name).into_owned()) + .expect("Each field should have a name in codegen!"); + let field_name = field_name.as_str(); + let field_ident = ctx.rust_ident_raw(field_name); + + if let Some(padding_field) = + struct_layout.saw_field(field_name, field_ty, self.offset()) + { + fields.extend(Some(padding_field)); + } + + let visibility = compute_visibility( + ctx, + self.is_public(), + ctx.options().last_callback(|cb| { + cb.field_visibility(FieldInfo { + type_name: &parent_item.canonical_name(ctx), + field_name, + field_type_name: field_ty.name(), + }) + }), + self.annotations(), + parent_visibility_kind, + ); + let accessor_kind = + self.annotations().accessor_kind().unwrap_or(accessor_kind); + + match visibility { + FieldVisibilityKind::Private => { + field.append_all(quote! { + #field_ident : #ty , + }); + } + FieldVisibilityKind::PublicCrate => { + field.append_all(quote! { + pub(crate) #field_ident : #ty , + }); + } + FieldVisibilityKind::Public => { + field.append_all(quote! { + pub #field_ident : #ty , + }); + } + } + + fields.extend(Some(field)); + + // TODO: Factor the following code out, please! + if accessor_kind == FieldAccessorKind::None { + return; + } + + let getter_name = ctx.rust_ident_raw(format!("get_{field_name}")); + let mutable_getter_name = + ctx.rust_ident_raw(format!("get_{field_name}_mut")); + + methods.extend(Some(match accessor_kind { + FieldAccessorKind::None => unreachable!(), + FieldAccessorKind::Regular => { + quote! { + #[inline] + pub fn #getter_name(&self) -> & #ty { + &self.#field_ident + } + + #[inline] + pub fn #mutable_getter_name(&mut self) -> &mut #ty { + &mut self.#field_ident + } + } + } + FieldAccessorKind::Unsafe => { + quote! { + #[inline] + pub unsafe fn #getter_name(&self) -> & #ty { + &self.#field_ident + } + + #[inline] + pub unsafe fn #mutable_getter_name(&mut self) -> &mut #ty { + &mut self.#field_ident + } + } + } + FieldAccessorKind::Immutable => { + quote! { + #[inline] + pub fn #getter_name(&self) -> & #ty { + &self.#field_ident + } + } + } + })); + } +} + +impl BitfieldUnit { + /// Get the constructor name for this bitfield unit. + fn ctor_name(&self) -> proc_macro2::TokenStream { + let ctor_name = Ident::new( + &format!("new_bitfield_{}", self.nth()), + Span::call_site(), + ); + quote! { + #ctor_name + } + } +} + +impl Bitfield { + /// Extend an under construction bitfield unit constructor with this + /// bitfield. This sets the relevant bits on the `__bindgen_bitfield_unit` + /// variable that's being constructed. + fn extend_ctor_impl( + &self, + ctx: &BindgenContext, + param_name: &proc_macro2::TokenStream, + mut ctor_impl: proc_macro2::TokenStream, + ) -> proc_macro2::TokenStream { + let bitfield_ty = ctx.resolve_type(self.ty()); + let bitfield_ty_layout = bitfield_ty + .layout(ctx) + .expect("Bitfield without layout? Gah!"); + let bitfield_int_ty = helpers::integer_type(bitfield_ty_layout).expect( + "Should already have verified that the bitfield is \ + representable as an int", + ); + + let offset = self.offset_into_unit(); + let width = self.width() as u8; + let prefix = ctx.trait_prefix(); + + ctor_impl.append_all(quote! { + __bindgen_bitfield_unit.set( + #offset, + #width, + { + let #param_name: #bitfield_int_ty = unsafe { + ::#prefix::mem::transmute(#param_name) + }; + #param_name as u64 + } + ); + }); + + ctor_impl + } +} + +fn access_specifier( + visibility: FieldVisibilityKind, +) -> proc_macro2::TokenStream { + match visibility { + FieldVisibilityKind::Private => quote! {}, + FieldVisibilityKind::PublicCrate => quote! { pub(crate) }, + FieldVisibilityKind::Public => quote! { pub }, + } +} + +/// Compute a fields or structs visibility based on multiple conditions. +/// 1. If the element was declared public, and we respect such CXX accesses specs +/// (context option) => By default Public, but this can be overruled by an `annotation`. +/// +/// 2. If the element was declared private, and we respect such CXX accesses specs +/// (context option) => By default Private, but this can be overruled by an `annotation`. +/// +/// 3. If we do not respect visibility modifiers, the result depends on the `annotation`, +/// if any, or the passed `default_kind`. +/// +fn compute_visibility( + ctx: &BindgenContext, + is_declared_public: bool, + callback_override: Option, + annotations: &Annotations, + default_kind: FieldVisibilityKind, +) -> FieldVisibilityKind { + callback_override + .or_else(|| annotations.visibility_kind()) + .unwrap_or_else(|| { + match (is_declared_public, ctx.options().respect_cxx_access_specs) { + (true, true) => { + // declared as public, cxx specs are respected + FieldVisibilityKind::Public + } + (false, true) => { + // declared as private, cxx specs are respected + FieldVisibilityKind::Private + } + (_, false) => { + // cxx specs are not respected, declaration does not matter. + default_kind + } + } + }) +} + +impl FieldCodegen<'_> for BitfieldUnit { + type Extra = (); + + fn codegen( + &self, + ctx: &BindgenContext, + visibility_kind: FieldVisibilityKind, + accessor_kind: FieldAccessorKind, + parent: &CompInfo, + parent_item: &Item, + last_field: bool, + result: &mut CodegenResult, + struct_layout: &mut StructLayoutTracker, + fields: &mut F, + methods: &mut M, + _: (), + ) where + F: Extend, + M: Extend, + { + use crate::ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT; + + result.saw_bitfield_unit(); + + let layout = self.layout(); + let unit_field_ty = helpers::bitfield_unit(ctx, layout); + let field_ty = { + let unit_field_ty = unit_field_ty.clone(); + if parent.is_union() { + wrap_union_field_if_needed( + ctx, + struct_layout, + unit_field_ty, + result, + ) + } else { + unit_field_ty + } + }; + + { + let align_field_name = format!("_bitfield_align_{}", self.nth()); + let align_field_ident = ctx.rust_ident(align_field_name); + let align_ty = match self.layout().align { + n if n >= 8 => quote! { u64 }, + 4 => quote! { u32 }, + 2 => quote! { u16 }, + _ => quote! { u8 }, + }; + let access_spec = access_specifier(visibility_kind); + let align_field = quote! { + #access_spec #align_field_ident: [#align_ty; 0], + }; + fields.extend(Some(align_field)); + } + + let unit_field_name = format!("_bitfield_{}", self.nth()); + let unit_field_ident = ctx.rust_ident(&unit_field_name); + + let ctor_name = self.ctor_name(); + let mut ctor_params = vec![]; + let mut ctor_impl = quote! {}; + + // We cannot generate any constructor if the underlying storage can't + // implement AsRef<[u8]> / AsMut<[u8]> / etc, or can't derive Default. + // + // We don't check `larger_arrays` here because Default does still have + // the 32 items limitation. + let mut generate_ctor = layout.size <= RUST_DERIVE_IN_ARRAY_LIMIT; + + let mut unit_visibility = visibility_kind; + let bfields = self.bitfields(); + for (idx, bf) in bfields.iter().enumerate() { + // Codegen not allowed for anonymous bitfields + if bf.name().is_none() { + continue; + } + + if layout.size > RUST_DERIVE_IN_ARRAY_LIMIT && + !ctx.options().rust_features().larger_arrays + { + continue; + } + + let mut bitfield_representable_as_int = true; + let mut bitfield_visibility = visibility_kind; + bf.codegen( + ctx, + visibility_kind, + accessor_kind, + parent, + parent_item, + last_field && idx == bfields.len() - 1, + result, + struct_layout, + fields, + methods, + ( + &unit_field_name, + &unit_field_ty, + &mut bitfield_representable_as_int, + &mut bitfield_visibility, + ), + ); + if bitfield_visibility < unit_visibility { + unit_visibility = bitfield_visibility; + } + + // Generating a constructor requires the bitfield to be representable as an integer. + if !bitfield_representable_as_int { + generate_ctor = false; + continue; + } + + let param_name = bitfield_getter_name(ctx, bf); + let bitfield_ty_item = ctx.resolve_item(bf.ty()); + let bitfield_ty = bitfield_ty_item.expect_type(); + let bitfield_ty = + bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item); + + ctor_params.push(quote! { + #param_name : #bitfield_ty + }); + ctor_impl = bf.extend_ctor_impl(ctx, ¶m_name, ctor_impl); + } + + let access_spec = access_specifier(unit_visibility); + + let field = quote! { + #access_spec #unit_field_ident : #field_ty , + }; + fields.extend(Some(field)); + + if generate_ctor { + methods.extend(Some(quote! { + #[inline] + #access_spec fn #ctor_name ( #( #ctor_params ),* ) -> #unit_field_ty { + let mut __bindgen_bitfield_unit: #unit_field_ty = Default::default(); + #ctor_impl + __bindgen_bitfield_unit + } + })); + } + + struct_layout.saw_bitfield_unit(layout); + } +} + +fn bitfield_getter_name( + ctx: &BindgenContext, + bitfield: &Bitfield, +) -> proc_macro2::TokenStream { + let name = bitfield.getter_name(); + let name = ctx.rust_ident_raw(name); + quote! { #name } +} + +fn bitfield_raw_getter_name( + ctx: &BindgenContext, + bitfield: &Bitfield, +) -> proc_macro2::TokenStream { + let name = bitfield.getter_name(); + let name = ctx.rust_ident_raw(format!("{name}_raw")); + quote! { #name } +} + +fn bitfield_setter_name( + ctx: &BindgenContext, + bitfield: &Bitfield, +) -> proc_macro2::TokenStream { + let setter = bitfield.setter_name(); + let setter = ctx.rust_ident_raw(setter); + quote! { #setter } +} + +fn bitfield_raw_setter_name( + ctx: &BindgenContext, + bitfield: &Bitfield, +) -> proc_macro2::TokenStream { + let setter = bitfield.setter_name(); + let setter = ctx.rust_ident_raw(format!("{setter}_raw")); + quote! { #setter } +} + +impl<'a> FieldCodegen<'a> for Bitfield { + type Extra = ( + &'a str, + &'a syn::Type, + &'a mut bool, + &'a mut FieldVisibilityKind, + ); + + fn codegen( + &self, + ctx: &BindgenContext, + visibility_kind: FieldVisibilityKind, + _accessor_kind: FieldAccessorKind, + parent: &CompInfo, + parent_item: &Item, + _last_field: bool, + _result: &mut CodegenResult, + struct_layout: &mut StructLayoutTracker, + _fields: &mut F, + methods: &mut M, + ( + unit_field_name, + unit_field_ty, + bitfield_representable_as_int, + bitfield_visibility, + ): ( + &'a str, + &'a syn::Type, + &mut bool, + &'a mut FieldVisibilityKind, + ), + ) where + F: Extend, + M: Extend, + { + let prefix = ctx.trait_prefix(); + let getter_name = bitfield_getter_name(ctx, self); + let setter_name = bitfield_setter_name(ctx, self); + let raw_getter_name = bitfield_raw_getter_name(ctx, self); + let raw_setter_name = bitfield_raw_setter_name(ctx, self); + let unit_field_ident = Ident::new(unit_field_name, Span::call_site()); + + let bitfield_ty_item = ctx.resolve_item(self.ty()); + let bitfield_ty = bitfield_ty_item.expect_type(); + let bitfield_ty_ident = bitfield_ty.name(); + + let bitfield_ty_layout = bitfield_ty + .layout(ctx) + .expect("Bitfield without layout? Gah!"); + let bitfield_int_ty = + if let Some(int_ty) = helpers::integer_type(bitfield_ty_layout) { + *bitfield_representable_as_int = true; + int_ty + } else { + *bitfield_representable_as_int = false; + return; + }; + + let bitfield_ty = + bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item); + + let offset = self.offset_into_unit(); + let width = self.width() as u8; + + let override_visibility = self.name().and_then(|field_name| { + ctx.options().last_callback(|cb| { + cb.field_visibility(FieldInfo { + type_name: &parent_item.canonical_name(ctx), + field_name, + field_type_name: bitfield_ty_ident, + }) + }) + }); + *bitfield_visibility = compute_visibility( + ctx, + self.is_public(), + override_visibility, + self.annotations(), + visibility_kind, + ); + let access_spec = access_specifier(*bitfield_visibility); + + if parent.is_union() && !struct_layout.is_rust_union() { + methods.extend(Some(quote! { + #[inline] + #access_spec fn #getter_name(&self) -> #bitfield_ty { + unsafe { + ::#prefix::mem::transmute( + self.#unit_field_ident.as_ref().get(#offset, #width) + as #bitfield_int_ty + ) + } + } + + #[inline] + #access_spec fn #setter_name(&mut self, val: #bitfield_ty) { + unsafe { + let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); + self.#unit_field_ident.as_mut().set( + #offset, + #width, + val as u64 + ) + } + } + })); + + if ctx.options().rust_features.raw_ref_macros { + methods.extend(Some(quote! { + #[inline] + #access_spec unsafe fn #raw_getter_name(this: *const Self) -> #bitfield_ty { + unsafe { + ::#prefix::mem::transmute(<#unit_field_ty>::raw_get( + (*::#prefix::ptr::addr_of!((*this).#unit_field_ident)).as_ref() as *const _, + #offset, + #width, + ) as #bitfield_int_ty) + } + } + + #[inline] + #access_spec unsafe fn #raw_setter_name(this: *mut Self, val: #bitfield_ty) { + unsafe { + let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); + <#unit_field_ty>::raw_set( + (*::#prefix::ptr::addr_of_mut!((*this).#unit_field_ident)).as_mut() as *mut _, + #offset, + #width, + val as u64, + ) + } + } + })); + } + } else { + methods.extend(Some(quote! { + #[inline] + #access_spec fn #getter_name(&self) -> #bitfield_ty { + unsafe { + ::#prefix::mem::transmute( + self.#unit_field_ident.get(#offset, #width) + as #bitfield_int_ty + ) + } + } + + #[inline] + #access_spec fn #setter_name(&mut self, val: #bitfield_ty) { + unsafe { + let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); + self.#unit_field_ident.set( + #offset, + #width, + val as u64 + ) + } + } + })); + + if ctx.options().rust_features.raw_ref_macros { + methods.extend(Some(quote! { + #[inline] + #access_spec unsafe fn #raw_getter_name(this: *const Self) -> #bitfield_ty { + unsafe { + ::#prefix::mem::transmute(<#unit_field_ty>::raw_get( + ::#prefix::ptr::addr_of!((*this).#unit_field_ident), + #offset, + #width, + ) as #bitfield_int_ty) + } + } + + #[inline] + #access_spec unsafe fn #raw_setter_name(this: *mut Self, val: #bitfield_ty) { + unsafe { + let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); + <#unit_field_ty>::raw_set( + ::#prefix::ptr::addr_of_mut!((*this).#unit_field_ident), + #offset, + #width, + val as u64, + ) + } + } + })); + } + } + } +} + +impl CodeGenerator for CompInfo { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + debug!("::codegen: item = {item:?}"); + debug_assert!(item.is_enabled_for_codegen(ctx)); + + // Don't output classes with template parameters that aren't types, and + // also don't output template specializations, neither total or partial. + if self.has_non_type_template_params() { + return; + } + + let ty = item.expect_type(); + let layout = ty.layout(ctx); + let mut packed = self.is_packed(ctx, layout.as_ref()); + + let canonical_name = item.canonical_name(ctx); + let canonical_ident = ctx.rust_ident(&canonical_name); + + // Generate the vtable from the method list if appropriate. + // + // TODO: I don't know how this could play with virtual methods that are + // not in the list of methods found by us, we'll see. Also, could the + // order of the vtable pointers vary? + // + // FIXME: Once we generate proper vtables, we need to codegen the + // vtable, but *not* generate a field for it in the case that + // HasVtable::has_vtable_ptr is false but HasVtable::has_vtable is true. + // + // Also, we need to generate the vtable in such a way it "inherits" from + // the parent too. + let is_opaque = item.is_opaque(ctx, &()); + let mut fields = vec![]; + let visibility = item + .annotations() + .visibility_kind() + .unwrap_or(ctx.options().default_visibility); + let mut struct_layout = StructLayoutTracker::new( + ctx, + self, + ty, + &canonical_name, + visibility, + packed, + ); + + let mut generic_param_names = vec![]; + + for (idx, ty) in item.used_template_params(ctx).iter().enumerate() { + let param = ctx.resolve_type(*ty); + let name = param.name().unwrap(); + let ident = ctx.rust_ident(name); + generic_param_names.push(ident.clone()); + + let prefix = ctx.trait_prefix(); + let field_name = ctx.rust_ident(format!("_phantom_{idx}")); + fields.push(quote! { + pub #field_name : ::#prefix::marker::PhantomData< + ::#prefix::cell::UnsafeCell<#ident> + > , + }); + } + + if !is_opaque { + if item.has_vtable_ptr(ctx) { + let vtable = Vtable::new(item.id(), self); + vtable.codegen(ctx, result, item); + + let vtable_type = vtable + .try_to_rust_ty(ctx, &()) + .expect("vtable to Rust type conversion is infallible") + .to_ptr(true); + + fields.push(quote! { + pub vtable_: #vtable_type , + }); + + struct_layout.saw_vtable(); + } + + for base in self.base_members() { + if !base.requires_storage(ctx) { + continue; + } + + let inner_item = ctx.resolve_item(base.ty); + let inner = inner_item + .to_rust_ty_or_opaque(ctx, &()) + .with_implicit_template_params(ctx, inner_item); + let field_name = ctx.rust_ident(&base.field_name); + + struct_layout.saw_base(inner_item.expect_type()); + + let visibility = match ( + base.is_public(), + ctx.options().respect_cxx_access_specs, + ) { + (true, true) => FieldVisibilityKind::Public, + (false, true) => FieldVisibilityKind::Private, + _ => ctx.options().default_visibility, + }; + + let access_spec = access_specifier(visibility); + fields.push(quote! { + #access_spec #field_name: #inner, + }); + } + } + + let mut methods = vec![]; + if !is_opaque { + let struct_accessor_kind = item + .annotations() + .accessor_kind() + .unwrap_or(FieldAccessorKind::None); + let field_decls = self.fields(); + for (idx, field) in field_decls.iter().enumerate() { + field.codegen( + ctx, + visibility, + struct_accessor_kind, + self, + item, + idx == field_decls.len() - 1, + result, + &mut struct_layout, + &mut fields, + &mut methods, + (), + ); + } + // Check whether an explicit padding field is needed + // at the end. + if let Some(comp_layout) = layout { + fields.extend( + struct_layout + .add_tail_padding(&canonical_name, comp_layout), + ); + } + } + + if is_opaque { + // Opaque item should not have generated methods, fields. + debug_assert!(fields.is_empty()); + debug_assert!(methods.is_empty()); + } + + let is_union = self.kind() == CompKind::Union; + let layout = item.kind().expect_type().layout(ctx); + let zero_sized = item.is_zero_sized(ctx); + let forward_decl = self.is_forward_declaration(); + + let mut explicit_align = None; + + // C++ requires every struct to be addressable, so what C++ compilers do + // is making the struct 1-byte sized. + // + // This is apparently not the case for C, see: + // https://github.com/rust-lang/rust-bindgen/issues/551 + // + // Just get the layout, and assume C++ if not. + // + // NOTE: This check is conveniently here to avoid the dummy fields we + // may add for unused template parameters. + if !forward_decl && zero_sized { + let has_address = if is_opaque { + // Generate the address field if it's an opaque type and + // couldn't determine the layout of the blob. + layout.is_none() + } else { + layout.map_or(true, |l| l.size != 0) + }; + + if has_address { + let layout = Layout::new(1, 1); + let ty = helpers::blob(ctx, Layout::new(1, 1), false); + struct_layout.saw_field_with_layout( + "_address", + layout, + /* offset = */ Some(0), + ); + fields.push(quote! { + pub _address: #ty, + }); + } + } + + if is_opaque { + match layout { + Some(l) => { + explicit_align = Some(l.align); + + let ty = helpers::blob(ctx, l, false); + fields.push(quote! { + pub _bindgen_opaque_blob: #ty , + }); + } + None => { + if !forward_decl { + warn!("Opaque type without layout! Expect dragons!"); + } + } + } + } else if !is_union && !zero_sized { + if let Some(padding_field) = + layout.and_then(|layout| struct_layout.pad_struct(layout)) + { + fields.push(padding_field); + } + + if let Some(layout) = layout { + if struct_layout.requires_explicit_align(layout) { + if layout.align == 1 { + packed = true; + } else { + explicit_align = Some(layout.align); + } + } + } + } else if is_union && !forward_decl { + if let Some(layout) = layout { + // TODO(emilio): It'd be nice to unify this with the struct path above somehow. + if struct_layout.requires_explicit_align(layout) { + explicit_align = Some(layout.align); + } + if !struct_layout.is_rust_union() { + let ty = helpers::blob(ctx, layout, false); + fields.push(quote! { + pub bindgen_union_field: #ty , + }); + } + } + } + + if forward_decl { + fields.push(quote! { + _unused: [u8; 0], + }); + } + + let (flex_array_generic, flex_inner_ty) = if ctx.options().flexarray_dst + { + match self.flex_array_member(ctx) { + Some(ty) => { + let inner = ty.to_rust_ty_or_opaque(ctx, &()); + ( + Some(quote! { FAM: ?Sized = [ #inner; 0 ] }), + Some(quote! { #inner }), + ) + } + None => (None, None), + } + } else { + (None, None) + }; + + // Generics, including the flexible array member. + // + // generics - generic parameters for the struct declaration + // impl_generics_labels - generic parameters for `impl<...>` + // impl_generics_params - generic parameters for `impl structname<...>` + // + // `impl` blocks are for non-FAM related impls like Default, etc + let (generics, impl_generics_labels, impl_generics_params) = + if !generic_param_names.is_empty() || flex_array_generic.is_some() { + let (flex_sized, flex_fam) = match flex_inner_ty.as_ref() { + None => (None, None), + Some(ty) => ( + Some(quote! { [ #ty; 0 ] }), + Some(quote! { FAM: ?Sized = [ #ty; 0 ] }), + ), + }; + + ( + quote! { + < #( #generic_param_names , )* #flex_fam > + }, + quote! { + < #( #generic_param_names , )* > + }, + quote! { + < #( #generic_param_names , )* #flex_sized > + }, + ) + } else { + (quote! {}, quote! {}, quote! {}) + }; + + let mut attributes = vec![]; + let mut needs_clone_impl = false; + let mut needs_default_impl = false; + let mut needs_debug_impl = false; + let mut needs_partialeq_impl = false; + let needs_flexarray_impl = flex_array_generic.is_some(); + if let Some(comment) = item.comment(ctx) { + attributes.push(attributes::doc(&comment)); + } + + // if a type has both a "packed" attribute and an "align(N)" attribute, then check if the + // "packed" attr is redundant, and do not include it if so. + if packed && + !is_opaque && + !(explicit_align.is_some() && + self.already_packed(ctx).unwrap_or(false)) + { + let n = layout.map_or(1, |l| l.align); + assert!(ctx.options().rust_features().repr_packed_n || n == 1); + let packed_repr = if n == 1 { + "packed".to_string() + } else { + format!("packed({n})") + }; + attributes.push(attributes::repr_list(&["C", &packed_repr])); + } else { + attributes.push(attributes::repr("C")); + } + + if true { + if let Some(explicit) = explicit_align { + // Ensure that the struct has the correct alignment even in + // presence of alignas. + let explicit = helpers::ast_ty::int_expr(explicit as i64); + attributes.push(quote! { + #[repr(align(#explicit))] + }); + } + } + + let derivable_traits = derives_of_item(item, ctx, packed); + if !derivable_traits.contains(DerivableTraits::DEBUG) { + needs_debug_impl = ctx.options().derive_debug && + ctx.options().impl_debug && + !ctx.no_debug_by_name(item) && + !item.annotations().disallow_debug(); + } + + if !derivable_traits.contains(DerivableTraits::DEFAULT) { + needs_default_impl = ctx.options().derive_default && + !self.is_forward_declaration() && + !ctx.no_default_by_name(item) && + !item.annotations().disallow_default(); + } + + let all_template_params = item.all_template_params(ctx); + + if derivable_traits.contains(DerivableTraits::COPY) && + !derivable_traits.contains(DerivableTraits::CLONE) + { + needs_clone_impl = true; + } + + if !derivable_traits.contains(DerivableTraits::PARTIAL_EQ) { + needs_partialeq_impl = ctx.options().derive_partialeq && + ctx.options().impl_partialeq && + ctx.lookup_can_derive_partialeq_or_partialord(item.id()) == + CanDerive::Manually; + } + + let mut derives: Vec<_> = derivable_traits.into(); + derives.extend(item.annotations().derives().iter().map(String::as_str)); + + let is_rust_union = is_union && struct_layout.is_rust_union(); + + let discovered_id = DiscoveredItemId::new(item.id().as_usize()); + ctx.options().for_each_callback(|cb| { + let discovered_item = match self.kind() { + CompKind::Struct => DiscoveredItem::Struct { + original_name: item + .kind() + .expect_type() + .name() + .map(String::from), + final_name: canonical_ident.to_string(), + }, + CompKind::Union => DiscoveredItem::Union { + original_name: item + .kind() + .expect_type() + .name() + .map(String::from), + final_name: canonical_ident.to_string(), + }, + }; + + cb.new_item_found(discovered_id, discovered_item); + }); + + // The custom derives callback may return a list of derive attributes; + // add them to the end of the list. + let custom_derives = ctx.options().all_callbacks(|cb| { + cb.add_derives(&DeriveInfo { + name: &canonical_name, + kind: if is_rust_union { + DeriveTypeKind::Union + } else { + DeriveTypeKind::Struct + }, + }) + }); + // In most cases this will be a no-op, since custom_derives will be empty. + derives.extend(custom_derives.iter().map(|s| s.as_str())); + + if !derives.is_empty() { + attributes.push(attributes::derives(&derives)); + } + + attributes.extend( + item.annotations() + .attributes() + .iter() + .map(|s| s.parse().unwrap()), + ); + + let custom_attributes = ctx.options().all_callbacks(|cb| { + cb.add_attributes(&AttributeInfo { + name: &canonical_name, + kind: if is_rust_union { + DeriveTypeKind::Union + } else { + DeriveTypeKind::Struct + }, + }) + }); + attributes.extend(custom_attributes.iter().map(|s| s.parse().unwrap())); + + if item.must_use(ctx) { + attributes.push(attributes::must_use()); + } + + let mut tokens = if is_rust_union { + quote! { + #( #attributes )* + pub union #canonical_ident + } + } else { + quote! { + #( #attributes )* + pub struct #canonical_ident + } + }; + + tokens.append_all(quote! { + #generics { + #( #fields )* + } + }); + result.push(tokens); + + // Generate the inner types and all that stuff. + // + // TODO: In the future we might want to be smart, and use nested + // modules, and whatnot. + for ty in self.inner_types() { + let child_item = ctx.resolve_item(*ty); + // assert_eq!(child_item.parent_id(), item.id()); + child_item.codegen(ctx, result, &()); + } + + // NOTE: Some unexposed attributes (like alignment attributes) may + // affect layout, so we're bad and pray to the gods for avoid sending + // all the tests to shit when parsing things like max_align_t. + if self.found_unknown_attr() { + warn!("Type {canonical_ident} has an unknown attribute that may affect layout"); + } + + if all_template_params.is_empty() { + if !is_opaque { + for var in self.inner_vars() { + ctx.resolve_item(*var).codegen(ctx, result, &()); + } + } + + if ctx.options().layout_tests && !self.is_forward_declaration() { + if let Some(layout) = layout { + let compile_time = ctx.options().rust_features().offset_of; + let fn_name = if compile_time { + None + } else { + let fn_name = + format!("bindgen_test_layout_{canonical_ident}"); + Some(ctx.rust_ident_raw(fn_name)) + }; + let prefix = ctx.trait_prefix(); + let size_of_expr = quote! { + ::#prefix::mem::size_of::<#canonical_ident>() + }; + let align_of_expr = quote! { + ::#prefix::mem::align_of::<#canonical_ident>() + }; + let size = layout.size; + let align = layout.align; + let size_of_err = format!("Size of {canonical_ident}"); + let align_of_err = + format!("Alignment of {canonical_ident}"); + + let check_struct_align = if compile_time { + quote! { + [#align_of_err][#align_of_expr - #align]; + } + } else { + quote! { + assert_eq!(#align_of_expr, #align, #align_of_err); + } + }; + + let should_skip_field_offset_checks = is_opaque; + + let check_field_offset = if should_skip_field_offset_checks + { + vec![] + } else { + self.fields() + .iter() + .filter_map(|field| { + let Field::DataMember(field) = field else { return None }; + let name = field.name()?; + field.offset().map(|offset| { + let field_offset = offset / 8; + let field_name = ctx.rust_ident(name); + let offset_of_err = format!("Offset of field: {canonical_ident}::{field_name}"); + if compile_time { + quote! { + [#offset_of_err][ + ::#prefix::mem::offset_of!(#canonical_ident, #field_name) - #field_offset + ]; + } + } else { + quote! { + assert_eq!( + unsafe { + ::#prefix::ptr::addr_of!((*ptr).#field_name) as usize - ptr as usize + }, + #field_offset, + #offset_of_err + ); + } + } + }) + }) + .collect() + }; + + let uninit_decl = if check_field_offset.is_empty() || + compile_time + { + None + } else { + // FIXME: When MSRV >= 1.59.0, we can use + // > const PTR: *const #canonical_ident = ::#prefix::mem::MaybeUninit::uninit().as_ptr(); + Some(quote! { + // Use a shared MaybeUninit so that rustc with + // opt-level=0 doesn't take too much stack space, + // see #2218. + const UNINIT: ::#prefix::mem::MaybeUninit<#canonical_ident> = ::#prefix::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); + }) + }; + + if compile_time { + result.push(quote! { + #[allow(clippy::unnecessary_operation, clippy::identity_op)] + const _: () = { + [#size_of_err][#size_of_expr - #size]; + #check_struct_align + #( #check_field_offset )* + }; + }); + } else { + result.push(quote! { + #[test] + fn #fn_name() { + #uninit_decl + assert_eq!(#size_of_expr, #size, #size_of_err); + #check_struct_align + #( #check_field_offset )* + } + }); + } + } + } + + let mut method_names = Default::default(); + if ctx.options().codegen_config.methods() { + for method in self.methods() { + assert_ne!(method.kind(), MethodKind::Constructor); + method.codegen_method( + ctx, + &mut methods, + &mut method_names, + result, + self, + discovered_id, + ); + } + } + + if ctx.options().codegen_config.constructors() { + for sig in self.constructors() { + Method::new( + MethodKind::Constructor, + *sig, + /* const */ + false, + ) + .codegen_method( + ctx, + &mut methods, + &mut method_names, + result, + self, + discovered_id, + ); + } + } + + if ctx.options().codegen_config.destructors() { + if let Some((kind, destructor)) = self.destructor() { + debug_assert!(kind.is_destructor()); + Method::new(kind, destructor, false).codegen_method( + ctx, + &mut methods, + &mut method_names, + result, + self, + discovered_id, + ); + } + } + } + + // NB: We can't use to_rust_ty here since for opaque types this tries to + // use the specialization knowledge to generate a blob field. + let ty_for_impl = quote! { + #canonical_ident #impl_generics_params + }; + + if needs_clone_impl { + result.push(quote! { + impl #impl_generics_labels Clone for #ty_for_impl { + fn clone(&self) -> Self { *self } + } + }); + } + + if needs_flexarray_impl { + result.push(self.generate_flexarray( + ctx, + &canonical_ident, + flex_inner_ty.as_ref(), + &generic_param_names, + &impl_generics_labels, + )); + } + + if needs_default_impl { + let prefix = ctx.trait_prefix(); + let body = if ctx.options().rust_features().maybe_uninit { + quote! { + let mut s = ::#prefix::mem::MaybeUninit::::uninit(); + unsafe { + ::#prefix::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } + } else { + quote! { + unsafe { + let mut s: Self = ::#prefix::mem::uninitialized(); + ::#prefix::ptr::write_bytes(&mut s, 0, 1); + s + } + } + }; + // Note we use `ptr::write_bytes()` instead of `mem::zeroed()` because the latter does + // not necessarily ensure padding bytes are zeroed. Some C libraries are sensitive to + // non-zero padding bytes, especially when forwards/backwards compatibility is + // involved. + result.push(quote! { + impl #impl_generics_labels Default for #ty_for_impl { + fn default() -> Self { + #body + } + } + }); + } + + if needs_debug_impl { + let impl_ = impl_debug::gen_debug_impl( + ctx, + self.fields(), + item, + self.kind(), + ); + + let prefix = ctx.trait_prefix(); + + result.push(quote! { + impl #impl_generics_labels ::#prefix::fmt::Debug for #ty_for_impl { + #impl_ + } + }); + } + + if needs_partialeq_impl { + if let Some(impl_) = impl_partialeq::gen_partialeq_impl( + ctx, + self, + item, + &ty_for_impl, + ) { + let partialeq_bounds = if generic_param_names.is_empty() { + quote! {} + } else { + let bounds = generic_param_names.iter().map(|t| { + quote! { #t: PartialEq } + }); + quote! { where #( #bounds ),* } + }; + + let prefix = ctx.trait_prefix(); + result.push(quote! { + impl #impl_generics_labels ::#prefix::cmp::PartialEq for #ty_for_impl #partialeq_bounds { + #impl_ + } + }); + } + } + + if !methods.is_empty() { + result.push(quote! { + impl #impl_generics_labels #ty_for_impl { + #( #methods )* + } + }); + } + } +} + +impl CompInfo { + fn generate_flexarray( + &self, + ctx: &BindgenContext, + canonical_ident: &Ident, + flex_inner_ty: Option<&proc_macro2::TokenStream>, + generic_param_names: &[Ident], + impl_generics_labels: &proc_macro2::TokenStream, + ) -> proc_macro2::TokenStream { + let prefix = ctx.trait_prefix(); + + let flex_array = flex_inner_ty.as_ref().map(|ty| quote! { [ #ty ] }); + + let dst_ty_for_impl = quote! { + #canonical_ident < #( #generic_param_names , )* #flex_array > + + }; + let sized_ty_for_impl = quote! { + #canonical_ident < #( #generic_param_names , )* [ #flex_inner_ty; 0 ] > + }; + + let layout = if ctx.options().rust_features().layout_for_ptr { + quote! { + pub fn layout(len: usize) -> ::#prefix::alloc::Layout { + // SAFETY: Null pointers are OK if we don't deref them + unsafe { + let p: *const Self = ::#prefix::ptr::from_raw_parts(::#prefix::ptr::null::<()>(), len); + ::#prefix::alloc::Layout::for_value_raw(p) + } + } + } + } else { + quote!() + }; + + let (from_ptr_dst, from_ptr_sized) = if ctx + .options() + .rust_features() + .ptr_metadata + { + let flex_ref_inner = ctx.wrap_unsafe_ops(quote! { + Self::flex_ptr(self, len) + }); + let flex_ref_mut_inner = ctx.wrap_unsafe_ops(quote! { + Self::flex_ptr_mut(self, len).assume_init() + }); + let flex_ptr_inner = ctx.wrap_unsafe_ops(quote! { + &*::#prefix::ptr::from_raw_parts(ptr as *const (), len) + }); + let flex_ptr_mut_inner = ctx.wrap_unsafe_ops(quote! { + // Initialize reference without ever exposing it, as its possibly uninitialized + let mut uninit = ::#prefix::mem::MaybeUninit::<&mut #dst_ty_for_impl>::uninit(); + (uninit.as_mut_ptr() as *mut *mut #dst_ty_for_impl) + .write(::#prefix::ptr::from_raw_parts_mut(ptr as *mut (), len)); + + uninit + }); + + ( + quote! { + #[inline] + pub fn fixed(&self) -> (& #sized_ty_for_impl, usize) { + unsafe { + let (ptr, len) = (self as *const Self).to_raw_parts(); + (&*(ptr as *const #sized_ty_for_impl), len) + } + } + + #[inline] + pub fn fixed_mut(&mut self) -> (&mut #sized_ty_for_impl, usize) { + unsafe { + let (ptr, len) = (self as *mut Self).to_raw_parts(); + (&mut *(ptr as *mut #sized_ty_for_impl), len) + } + } + }, + quote! { + /// Convert a sized prefix to an unsized structure with the given length. + /// + /// SAFETY: Underlying storage is initialized up to at least `len` elements. + pub unsafe fn flex_ref(&self, len: usize) -> &#dst_ty_for_impl { + // SAFETY: Reference is always valid as pointer. Caller is guaranteeing `len`. + #flex_ref_inner + } + + /// Convert a mutable sized prefix to an unsized structure with the given length. + /// + /// SAFETY: Underlying storage is initialized up to at least `len` elements. + #[inline] + pub unsafe fn flex_ref_mut(&mut self, len: usize) -> &mut #dst_ty_for_impl { + // SAFETY: Reference is always valid as pointer. Caller is guaranteeing `len`. + #flex_ref_mut_inner + } + + /// Construct DST variant from a pointer and a size. + /// + /// NOTE: lifetime of returned reference is not tied to any underlying storage. + /// SAFETY: `ptr` is valid. Underlying storage is fully initialized up to at least `len` elements. + #[inline] + pub unsafe fn flex_ptr<'unbounded>(ptr: *const Self, len: usize) -> &'unbounded #dst_ty_for_impl { + #flex_ptr_inner + } + + /// Construct mutable DST variant from a pointer and a + /// size. The returned `&mut` reference is initialized + /// pointing to memory referenced by `ptr`, but there's + /// no requirement that that memory be initialized. + /// + /// NOTE: lifetime of returned reference is not tied to any underlying storage. + /// SAFETY: `ptr` is valid. Underlying storage has space for at least `len` elements. + #[inline] + pub unsafe fn flex_ptr_mut<'unbounded>( + ptr: *mut Self, + len: usize, + ) -> ::#prefix::mem::MaybeUninit<&'unbounded mut #dst_ty_for_impl> { + #flex_ptr_mut_inner + } + }, + ) + } else { + (quote!(), quote!()) + }; + + quote! { + impl #impl_generics_labels #dst_ty_for_impl { + #layout + #from_ptr_dst + } + + impl #impl_generics_labels #sized_ty_for_impl { + #from_ptr_sized + } + } + } +} + +impl Method { + fn codegen_method( + &self, + ctx: &BindgenContext, + methods: &mut Vec, + method_names: &mut HashSet, + result: &mut CodegenResult<'_>, + _parent: &CompInfo, + parent_id: DiscoveredItemId, + ) { + assert!({ + let cc = &ctx.options().codegen_config; + match self.kind() { + MethodKind::Constructor => cc.constructors(), + MethodKind::Destructor | + MethodKind::VirtualDestructor { .. } => cc.destructors(), + MethodKind::Static | + MethodKind::Normal | + MethodKind::Virtual { .. } => cc.methods(), + } + }); + + // TODO(emilio): We could generate final stuff at least. + if self.is_virtual() { + return; // FIXME + } + + // First of all, output the actual function. + let function_item = ctx.resolve_item(self.signature()); + let id = DiscoveredItemId::new(function_item.id().as_usize()); + if !function_item.process_before_codegen(ctx, result) { + return; + } + let function = function_item.expect_function(); + let times_seen = function.codegen(ctx, result, function_item); + let Some(times_seen) = times_seen else { return }; + let signature_item = ctx.resolve_item(function.signature()); + let mut name = match self.kind() { + MethodKind::Constructor => "new".into(), + MethodKind::Destructor => "destruct".into(), + _ => function.name().to_owned(), + }; + + let TypeKind::Function(ref signature) = + *signature_item.expect_type().kind() + else { + panic!("How in the world?") + }; + + let supported_abi = signature.abi(ctx, Some(&*name)).is_ok(); + if !supported_abi { + return; + } + + // Do not generate variadic methods, since rust does not allow + // implementing them, and we don't do a good job at it anyway. + if signature.is_variadic() { + return; + } + + if method_names.contains(&name) { + let mut count = 1; + let mut new_name; + + while { + new_name = format!("{name}{count}"); + method_names.contains(&new_name) + } { + count += 1; + } + + name = new_name; + } + + method_names.insert(name.clone()); + + ctx.options().for_each_callback(|cb| { + cb.new_item_found( + id, + DiscoveredItem::Method { + parent: parent_id, + final_name: name.clone(), + }, + ); + }); + + let mut function_name = function_item.canonical_name(ctx); + if times_seen > 0 { + write!(&mut function_name, "{times_seen}").unwrap(); + } + let function_name = ctx.rust_ident(function_name); + let mut args = utils::fnsig_arguments(ctx, signature); + let mut ret = utils::fnsig_return_ty(ctx, signature); + + if !self.is_static() && !self.is_constructor() { + args[0] = if self.is_const() { + quote! { &self } + } else { + quote! { &mut self } + }; + } + + // If it's a constructor, we always return `Self`, and we inject the + // "this" parameter, so there's no need to ask the user for it. + // + // Note that constructors in Clang are represented as functions with + // return-type = void. + if self.is_constructor() { + args.remove(0); + ret = quote! { -> Self }; + } + + let mut exprs = + helpers::ast_ty::arguments_from_signature(signature, ctx); + + let mut stmts = vec![]; + + // If it's a constructor, we need to insert an extra parameter with a + // variable called `__bindgen_tmp` we're going to create. + if self.is_constructor() { + let prefix = ctx.trait_prefix(); + let tmp_variable_decl = if ctx + .options() + .rust_features() + .maybe_uninit + { + exprs[0] = quote! { + __bindgen_tmp.as_mut_ptr() + }; + quote! { + let mut __bindgen_tmp = ::#prefix::mem::MaybeUninit::uninit() + } + } else { + exprs[0] = quote! { + &mut __bindgen_tmp + }; + quote! { + let mut __bindgen_tmp = ::#prefix::mem::uninitialized() + } + }; + stmts.push(tmp_variable_decl); + } else if !self.is_static() { + assert!(!exprs.is_empty()); + exprs[0] = quote! { + self + }; + } + + let call = quote! { + #function_name (#( #exprs ),* ) + }; + + stmts.push(call); + + if self.is_constructor() { + stmts.push(if ctx.options().rust_features().maybe_uninit { + quote! { + __bindgen_tmp.assume_init() + } + } else { + quote! { + __bindgen_tmp + } + }); + } + + let block = ctx.wrap_unsafe_ops(quote! ( #( #stmts );*)); + + let mut attrs = vec![attributes::inline()]; + + if signature.must_use() { + attrs.push(attributes::must_use()); + } + + let name = ctx.rust_ident(&name); + methods.push(quote! { + #(#attrs)* + pub unsafe fn #name ( #( #args ),* ) #ret { + #block + } + }); + } +} + +/// A helper type that represents different enum variations. +#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] +pub enum EnumVariation { + /// The code for this enum will use a Rust enum. Note that creating this in unsafe code + /// (including FFI) with an invalid value will invoke undefined behaviour, whether or not + /// its marked as `#[non_exhaustive]`. + Rust { + /// Indicates whether the generated struct should be `#[non_exhaustive]` + non_exhaustive: bool, + }, + /// The code for this enum will use a newtype + NewType { + /// Indicates whether the newtype will have bitwise operators + is_bitfield: bool, + /// Indicates whether the variants will be represented as global constants + is_global: bool, + }, + /// The code for this enum will use consts + #[default] + Consts, + /// The code for this enum will use a module containing consts + ModuleConsts, +} + +impl EnumVariation { + fn is_rust(self) -> bool { + matches!(self, EnumVariation::Rust { .. }) + } + + /// Both the `Const` and `ModuleConsts` variants will cause this to return + /// true. + fn is_const(self) -> bool { + matches!(self, EnumVariation::Consts | EnumVariation::ModuleConsts) + } +} + +impl fmt::Display for EnumVariation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + Self::Rust { + non_exhaustive: false, + } => "rust", + Self::Rust { + non_exhaustive: true, + } => "rust_non_exhaustive", + Self::NewType { + is_bitfield: true, .. + } => "bitfield", + Self::NewType { + is_bitfield: false, + is_global, + } => { + if *is_global { + "newtype_global" + } else { + "newtype" + } + } + Self::Consts => "consts", + Self::ModuleConsts => "moduleconsts", + }; + s.fmt(f) + } +} + +impl FromStr for EnumVariation { + type Err = std::io::Error; + + /// Create a `EnumVariation` from a string. + fn from_str(s: &str) -> Result { + match s { + "rust" => Ok(EnumVariation::Rust { + non_exhaustive: false, + }), + "rust_non_exhaustive" => Ok(EnumVariation::Rust { + non_exhaustive: true, + }), + "bitfield" => Ok(EnumVariation::NewType { + is_bitfield: true, + is_global: false, + }), + "consts" => Ok(EnumVariation::Consts), + "moduleconsts" => Ok(EnumVariation::ModuleConsts), + "newtype" => Ok(EnumVariation::NewType { + is_bitfield: false, + is_global: false, + }), + "newtype_global" => Ok(EnumVariation::NewType { + is_bitfield: false, + is_global: true, + }), + _ => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + concat!( + "Got an invalid EnumVariation. Accepted values ", + "are 'rust', 'rust_non_exhaustive', 'bitfield', 'consts',", + "'moduleconsts', 'newtype' and 'newtype_global'." + ), + )), + } + } +} + +struct EnumBuilder { + /// Type identifier of the enum. + /// + /// This is the base name, i.e. for `ModuleConst` enums, this does not include the module name. + enum_type: Ident, + /// Attributes applying to the enum type + attrs: Vec, + /// The representation of the enum, e.g. `u32`. + repr: syn::Type, + /// The enum kind we are generating + kind: EnumBuilderKind, + /// A list of all variants this enum has. + enum_variants: Vec, +} + +/// A helper type to construct different enum variations. +enum EnumBuilderKind { + Rust { + non_exhaustive: bool, + }, + NewType { + is_bitfield: bool, + is_global: bool, + /// if the enum is named or not. + is_anonymous: bool, + }, + Consts { + needs_typedef: bool, + }, + ModuleConsts { + module_name: Ident, + }, +} + +impl EnumBuilder { + /// Returns true if the builder is for a rustified enum. + fn is_rust_enum(&self) -> bool { + matches!(self.kind, EnumBuilderKind::Rust { .. }) + } + + /// Create a new enum given an item builder, a canonical name, a name for + /// the representation, and which variation it should be generated as. + fn new( + name: &str, + attrs: Vec, + repr: &syn::Type, + enum_variation: EnumVariation, + has_typedef: bool, + enum_is_anonymous: bool, + ) -> Self { + let ident = Ident::new(name, Span::call_site()); + // For most variants this is the same + let mut enum_ty = ident.clone(); + + let kind = match enum_variation { + EnumVariation::NewType { + is_bitfield, + is_global, + } => EnumBuilderKind::NewType { + is_bitfield, + is_global, + is_anonymous: enum_is_anonymous, + }, + + EnumVariation::Rust { non_exhaustive } => { + EnumBuilderKind::Rust { non_exhaustive } + } + + EnumVariation::Consts => EnumBuilderKind::Consts { + needs_typedef: !has_typedef, + }, + + EnumVariation::ModuleConsts => { + enum_ty = Ident::new( + CONSTIFIED_ENUM_MODULE_REPR_NAME, + Span::call_site(), + ); + + EnumBuilderKind::ModuleConsts { + module_name: ident.clone(), + } + } + }; + EnumBuilder { + enum_type: enum_ty, + attrs, + repr: repr.clone(), + kind, + enum_variants: vec![], + } + } + + /// Add a variant to this enum. + fn with_variant( + mut self, + ctx: &BindgenContext, + variant: &EnumVariant, + variant_doc: proc_macro2::TokenStream, + mangling_prefix: Option<&str>, + rust_ty: &syn::Type, + is_ty_named: bool, + ) -> Self { + let variant_name = ctx.rust_mangle(variant.name()); + let is_rust_enum = self.is_rust_enum(); + let expr = match variant.val() { + EnumVariantValue::Boolean(v) if is_rust_enum => { + helpers::ast_ty::uint_expr(u64::from(v)) + } + EnumVariantValue::Boolean(v) => quote!(#v), + EnumVariantValue::Signed(v) => helpers::ast_ty::int_expr(v), + EnumVariantValue::Unsigned(v) => helpers::ast_ty::uint_expr(v), + }; + + match self.kind { + EnumBuilderKind::Rust { .. } => { + let name = ctx.rust_ident(variant_name); + self.enum_variants.push(EnumVariantInfo { + variant_name: name, + variant_doc, + value: expr, + }); + self + } + + EnumBuilderKind::NewType { is_global, .. } => { + let variant_ident = if is_ty_named && !is_global { + ctx.rust_ident(variant_name) + } else { + ctx.rust_ident(match mangling_prefix { + Some(prefix) => { + Cow::Owned(format!("{prefix}_{variant_name}")) + } + None => variant_name, + }) + }; + self.enum_variants.push(EnumVariantInfo { + variant_name: variant_ident, + variant_doc, + value: quote! { #rust_ty ( #expr )}, + }); + + self + } + + EnumBuilderKind::Consts { .. } => { + let constant_name = match mangling_prefix { + Some(prefix) => { + Cow::Owned(format!("{prefix}_{variant_name}")) + } + None => variant_name, + }; + + let ident = ctx.rust_ident(constant_name); + self.enum_variants.push(EnumVariantInfo { + variant_name: ident, + variant_doc, + value: quote! { #expr }, + }); + + self + } + EnumBuilderKind::ModuleConsts { .. } => { + let name = ctx.rust_ident(variant_name); + self.enum_variants.push(EnumVariantInfo { + variant_name: name, + variant_doc, + value: quote! { #expr }, + }); + self + } + } + } + + fn newtype_bitfield_impl( + prefix: &Ident, + rust_ty: &syn::Type, + ) -> proc_macro2::TokenStream { + let rust_ty_name = &rust_ty; + quote! { + impl ::#prefix::ops::BitOr<#rust_ty> for #rust_ty { + type Output = Self; + + #[inline] + fn bitor(self, other: Self) -> Self { + #rust_ty_name(self.0 | other.0) + } + } + impl ::#prefix::ops::BitOrAssign for #rust_ty { + #[inline] + fn bitor_assign(&mut self, rhs: #rust_ty) { + self.0 |= rhs.0; + } + } + impl ::#prefix::ops::BitAnd<#rust_ty> for #rust_ty { + type Output = Self; + + #[inline] + fn bitand(self, other: Self) -> Self { + #rust_ty_name(self.0 & other.0) + } + } + impl ::#prefix::ops::BitAndAssign for #rust_ty { + #[inline] + fn bitand_assign(&mut self, rhs: #rust_ty) { + self.0 &= rhs.0; + } + } + } + } + + fn build( + self, + ctx: &BindgenContext, + rust_ty: &syn::Type, + ) -> proc_macro2::TokenStream { + let enum_ident = self.enum_type; + + // 1. Construct a list of the enum variants + let variants = match self.kind { + EnumBuilderKind::Rust { .. } => { + let mut variants = vec![]; + + for v in self.enum_variants { + let variant_doc = &v.variant_doc; + let variant_ident = &v.variant_name; + let variant_value = &v.value; + + variants.push(quote! { + #variant_doc + #variant_ident = #variant_value, + }); + } + + if variants.is_empty() { + variants.push( + quote! {__bindgen_cannot_repr_c_on_empty_enum = 0,}, + ); + } + variants + } + EnumBuilderKind::NewType { .. } => { + let mut variants = vec![]; + + for v in self.enum_variants { + let variant_doc = &v.variant_doc; + let variant_ident = &v.variant_name; + let variant_value = &v.value; + + variants.push(quote! { + #variant_doc + pub const #variant_ident: #enum_ident = #variant_value; + }); + } + variants + } + EnumBuilderKind::Consts { .. } | + EnumBuilderKind::ModuleConsts { .. } => { + let mut variants = vec![]; + + for v in self.enum_variants { + let variant_doc = &v.variant_doc; + let variant_ident = &v.variant_name; + let variant_value = &v.value; + + variants.push(quote! { + #variant_doc + pub const #variant_ident: #enum_ident = #variant_value; + }); + } + variants + } + }; + let attrs = self.attrs; + let enum_repr = &self.repr; + + // 2. Generate the enum representation + match self.kind { + EnumBuilderKind::Rust { non_exhaustive } => { + let non_exhaustive_opt = + non_exhaustive.then(attributes::non_exhaustive); + + quote! { + // Note: repr is on top of attrs to keep the test expectations diff small. + // a future commit could move it further down. + #[repr(#enum_repr)] + #non_exhaustive_opt + #( #attrs )* + pub enum #enum_ident { + #( #variants )* + } + } + } + EnumBuilderKind::NewType { + is_bitfield, + is_global, + is_anonymous, + } => { + // There doesn't seem to be a technical reason why we generate + // anon enum variants as global constants. + // We keep this behavior to avoid breaking changes in the bindings. + let impl_variants = if is_anonymous || is_global { + quote! { + #( #variants )* + } + } else { + quote! { + impl #enum_ident { + #( #variants )* + } + } + }; + + let prefix = ctx.trait_prefix(); + let bitfield_impl_opt = is_bitfield + .then(|| Self::newtype_bitfield_impl(&prefix, rust_ty)); + + quote! { + // Previously variant impls where before the enum definition. + // lets keep this as is for now, to reduce the diff in generated bindings. + #impl_variants + + #bitfield_impl_opt + + #[repr(transparent)] + #( #attrs )* + pub struct #enum_ident (pub #enum_repr); + } + } + EnumBuilderKind::Consts { needs_typedef } => { + let typedef_opt = needs_typedef.then(|| { + quote! { + #( #attrs )* + pub type #enum_ident = #enum_repr; + } + }); + quote! { + #( #variants )* + + #typedef_opt + } + } + EnumBuilderKind::ModuleConsts { module_name, .. } => { + quote! { + // todo: Probably some attributes, e.g. `cfg` should apply to the `mod`. + pub mod #module_name { + #( #attrs )* + pub type #enum_ident = #enum_repr; + + #( #variants )* + } + } + } + } + } +} + +impl CodeGenerator for Enum { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + debug!("::codegen: item = {item:?}"); + debug_assert!(item.is_enabled_for_codegen(ctx)); + + let name = item.canonical_name(ctx); + let ident = ctx.rust_ident(&name); + let enum_ty = item.expect_type(); + let layout = enum_ty.layout(ctx); + let variation = self.computed_enum_variation(ctx, item); + + let repr_translated; + let repr = match self.repr().map(|repr| ctx.resolve_type(repr)) { + Some(repr) + if !ctx.options().translate_enum_integer_types && + !variation.is_rust() => + { + repr + } + repr => { + // An enum's integer type is translated to a native Rust + // integer type in 3 cases: + // * the enum is Rustified and we need a translated type for + // the repr attribute + // * the representation couldn't be determined from the C source + // * it was explicitly requested as a bindgen option + + let kind = if let Some(repr) = repr { + match *repr.canonical_type(ctx).kind() { + TypeKind::Int(int_kind) => int_kind, + _ => panic!("Unexpected type as enum repr"), + } + } else { + warn!( + "Guessing type of enum! Forward declarations of enums \ + shouldn't be legal!" + ); + IntKind::Int + }; + + let signed = kind.is_signed(); + let size = layout + .map(|l| l.size) + .or_else(|| kind.known_size()) + .unwrap_or(0); + + let translated = match (signed, size) { + (true, 1) => IntKind::I8, + (false, 1) => IntKind::U8, + (true, 2) => IntKind::I16, + (false, 2) => IntKind::U16, + (true, 4) => IntKind::I32, + (false, 4) => IntKind::U32, + (true, 8) => IntKind::I64, + (false, 8) => IntKind::U64, + _ => { + warn!( + "invalid enum decl: signed: {signed}, size: {size}" + ); + IntKind::I32 + } + }; + + repr_translated = + Type::new(None, None, TypeKind::Int(translated), false); + &repr_translated + } + }; + + let mut attrs = vec![]; + + if let Some(comment) = item.comment(ctx) { + attrs.push(attributes::doc(&comment)); + } + + if item.must_use(ctx) { + attrs.push(attributes::must_use()); + } + + if !variation.is_const() { + let packed = false; // Enums can't be packed in Rust. + let mut derives = derives_of_item(item, ctx, packed); + // For backwards compat, enums always derive + // Clone/Eq/PartialEq/Hash, even if we don't generate those by + // default. + derives.insert( + DerivableTraits::CLONE | + DerivableTraits::HASH | + DerivableTraits::PARTIAL_EQ | + DerivableTraits::EQ, + ); + let mut derives: Vec<_> = derives.into(); + for derive in item.annotations().derives() { + if !derives.contains(&derive.as_str()) { + derives.push(derive); + } + } + + // The custom derives callback may return a list of derive attributes; + // add them to the end of the list. + let custom_derives = ctx.options().all_callbacks(|cb| { + cb.add_derives(&DeriveInfo { + name: &name, + kind: DeriveTypeKind::Enum, + }) + }); + // In most cases this will be a no-op, since custom_derives will be empty. + derives.extend(custom_derives.iter().map(|s| s.as_str())); + + attrs.extend( + item.annotations() + .attributes() + .iter() + .map(|s| s.parse().unwrap()), + ); + + // The custom attribute callback may return a list of attributes; + // add them to the end of the list. + let custom_attributes = ctx.options().all_callbacks(|cb| { + cb.add_attributes(&AttributeInfo { + name: &name, + kind: DeriveTypeKind::Enum, + }) + }); + attrs.extend(custom_attributes.iter().map(|s| s.parse().unwrap())); + + attrs.push(attributes::derives(&derives)); + } + + fn add_constant( + ctx: &BindgenContext, + enum_: &Type, + // Only to avoid recomputing every time. + enum_canonical_name: &Ident, + // May be the same as "variant" if it's because the + // enum is unnamed and we still haven't seen the + // value. + variant_name: &Ident, + referenced_name: &Ident, + enum_rust_ty: &syn::Type, + result: &mut CodegenResult<'_>, + ) { + let constant_name = if enum_.name().is_some() { + if ctx.options().prepend_enum_name { + format!("{enum_canonical_name}_{variant_name}") + } else { + format!("{variant_name}") + } + } else { + format!("{variant_name}") + }; + let constant_name = ctx.rust_ident(constant_name); + + result.push(quote! { + pub const #constant_name : #enum_rust_ty = + #enum_canonical_name :: #referenced_name ; + }); + } + + let repr = repr.to_rust_ty_or_opaque(ctx, item); + let has_typedef = ctx.is_enum_typedef_combo(item.id()); + + ctx.options().for_each_callback(|cb| { + cb.new_item_found( + DiscoveredItemId::new(item.id().as_usize()), + DiscoveredItem::Enum { + final_name: name.to_string(), + }, + ); + }); + + let mut builder = EnumBuilder::new( + &name, + attrs, + &repr, + variation, + has_typedef, + enum_ty.name().is_none(), + ); + + // A map where we keep a value -> variant relation. + let mut seen_values = HashMap::<_, Ident>::default(); + let enum_rust_ty = item.to_rust_ty_or_opaque(ctx, &()); + let is_toplevel = item.is_toplevel(ctx); + + // Used to mangle the constants we generate in the unnamed-enum case. + let parent_canonical_name = if is_toplevel { + None + } else { + Some(item.parent_id().canonical_name(ctx)) + }; + + let constant_mangling_prefix = if ctx.options().prepend_enum_name { + if enum_ty.name().is_none() { + parent_canonical_name.as_deref() + } else { + Some(&*name) + } + } else { + None + }; + + // NB: We defer the creation of constified variants, in case we find + // another variant with the same value (which is the common thing to + // do). + let mut constified_variants = VecDeque::new(); + + let mut iter = self.variants().iter().peekable(); + while let Some(variant) = + iter.next().or_else(|| constified_variants.pop_front()) + { + if variant.hidden() { + continue; + } + + if variant.force_constification() && iter.peek().is_some() { + constified_variants.push_back(variant); + continue; + } + + let mut variant_doc = quote! {}; + if ctx.options().generate_comments { + if let Some(raw_comment) = variant.comment() { + let processed_comment = + ctx.options().process_comment(raw_comment); + variant_doc = attributes::doc(&processed_comment); + } + } + + match seen_values.entry(variant.val()) { + Entry::Occupied(ref entry) => { + if variation.is_rust() { + let variant_name = ctx.rust_mangle(variant.name()); + let mangled_name = if is_toplevel || + enum_ty.name().is_some() + { + variant_name + } else { + let parent_name = + parent_canonical_name.as_ref().unwrap(); + + Cow::Owned(format!("{parent_name}_{variant_name}")) + }; + + let existing_variant_name = entry.get(); + // Use associated constants for named enums. + if enum_ty.name().is_some() { + let enum_canonical_name = &ident; + let variant_name = + ctx.rust_ident_raw(&*mangled_name); + result.push(quote! { + impl #enum_rust_ty { + pub const #variant_name : #enum_rust_ty = + #enum_canonical_name :: #existing_variant_name ; + } + }); + } else { + add_constant( + ctx, + enum_ty, + &ident, + &Ident::new(&mangled_name, Span::call_site()), + existing_variant_name, + &enum_rust_ty, + result, + ); + } + } else { + builder = builder.with_variant( + ctx, + variant, + variant_doc, + constant_mangling_prefix, + &enum_rust_ty, + enum_ty.name().is_some(), + ); + } + } + Entry::Vacant(entry) => { + builder = builder.with_variant( + ctx, + variant, + variant_doc, + constant_mangling_prefix, + &enum_rust_ty, + enum_ty.name().is_some(), + ); + + let variant_name = ctx.rust_ident(variant.name()); + + // If it's an unnamed enum, or constification is enforced, + // we also generate a constant so it can be properly + // accessed. + if (variation.is_rust() && enum_ty.name().is_none()) || + variant.force_constification() + { + let mangled_name = if is_toplevel { + variant_name.clone() + } else { + let parent_name = + parent_canonical_name.as_ref().unwrap(); + + Ident::new( + &format!("{parent_name}_{variant_name}"), + Span::call_site(), + ) + }; + + add_constant( + ctx, + enum_ty, + &ident, + &mangled_name, + &variant_name, + &enum_rust_ty, + result, + ); + } + + entry.insert(variant_name); + } + } + } + + let item = builder.build(ctx, &enum_rust_ty); + result.push(item); + } +} + +struct EnumVariantInfo { + variant_name: Ident, + variant_doc: proc_macro2::TokenStream, + value: proc_macro2::TokenStream, +} + +/// Enum for the default type of macro constants. +#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] +pub enum MacroTypeVariation { + /// Use i32 or i64 + Signed, + /// Use u32 or u64 + #[default] + Unsigned, +} + +impl fmt::Display for MacroTypeVariation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + Self::Signed => "signed", + Self::Unsigned => "unsigned", + }; + s.fmt(f) + } +} + +impl FromStr for MacroTypeVariation { + type Err = std::io::Error; + + /// Create a `MacroTypeVariation` from a string. + fn from_str(s: &str) -> Result { + match s { + "signed" => Ok(MacroTypeVariation::Signed), + "unsigned" => Ok(MacroTypeVariation::Unsigned), + _ => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + concat!( + "Got an invalid MacroTypeVariation. Accepted values ", + "are 'signed' and 'unsigned'" + ), + )), + } + } +} + +/// Enum for how aliases should be translated. +#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] +pub enum AliasVariation { + /// Convert to regular Rust alias + #[default] + TypeAlias, + /// Create a new type by wrapping the old type in a struct and using #[repr(transparent)] + NewType, + /// Same as `NewType` but also impl Deref to be able to use the methods of the wrapped type + NewTypeDeref, +} + +impl fmt::Display for AliasVariation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + Self::TypeAlias => "type_alias", + Self::NewType => "new_type", + Self::NewTypeDeref => "new_type_deref", + }; + + s.fmt(f) + } +} + +impl FromStr for AliasVariation { + type Err = std::io::Error; + + /// Create an `AliasVariation` from a string. + fn from_str(s: &str) -> Result { + match s { + "type_alias" => Ok(AliasVariation::TypeAlias), + "new_type" => Ok(AliasVariation::NewType), + "new_type_deref" => Ok(AliasVariation::NewTypeDeref), + _ => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + concat!( + "Got an invalid AliasVariation. Accepted values ", + "are 'type_alias', 'new_type', and 'new_type_deref'" + ), + )), + } + } +} + +/// Enum for how non-`Copy` `union`s should be translated. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum NonCopyUnionStyle { + /// Wrap members in a type generated by `bindgen`. + BindgenWrapper, + /// Wrap members in [`::core::mem::ManuallyDrop`]. + /// + /// Note: `ManuallyDrop` was stabilized in Rust 1.20.0, do not use it if your + /// MSRV is lower. + ManuallyDrop, +} + +impl fmt::Display for NonCopyUnionStyle { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + Self::BindgenWrapper => "bindgen_wrapper", + Self::ManuallyDrop => "manually_drop", + }; + + s.fmt(f) + } +} + +impl Default for NonCopyUnionStyle { + fn default() -> Self { + Self::BindgenWrapper + } +} + +impl FromStr for NonCopyUnionStyle { + type Err = std::io::Error; + + fn from_str(s: &str) -> Result { + match s { + "bindgen_wrapper" => Ok(Self::BindgenWrapper), + "manually_drop" => Ok(Self::ManuallyDrop), + _ => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + concat!( + "Got an invalid NonCopyUnionStyle. Accepted values ", + "are 'bindgen_wrapper' and 'manually_drop'" + ), + )), + } + } +} + +/// Fallible conversion to an opaque blob. +/// +/// Implementors of this trait should provide the `try_get_layout` method to +/// fallibly get this thing's layout, which the provided `try_to_opaque` trait +/// method will use to convert the `Layout` into an opaque blob Rust type. +pub(crate) trait TryToOpaque { + type Extra; + + /// Get the layout for this thing, if one is available. + fn try_get_layout( + &self, + ctx: &BindgenContext, + extra: &Self::Extra, + ) -> error::Result; + + /// Do not override this provided trait method. + fn try_to_opaque( + &self, + ctx: &BindgenContext, + extra: &Self::Extra, + ) -> error::Result { + self.try_get_layout(ctx, extra) + .map(|layout| helpers::blob(ctx, layout, true)) + } +} + +/// Infallible conversion of an IR thing to an opaque blob. +/// +/// The resulting layout is best effort, and is unfortunately not guaranteed to +/// be correct. When all else fails, we fall back to a single byte layout as a +/// last resort, because C++ does not permit zero-sized types. See the note in +/// the `ToRustTyOrOpaque` doc comment about fallible versus infallible traits +/// and when each is appropriate. +/// +/// Don't implement this directly. Instead implement `TryToOpaque`, and then +/// leverage the blanket impl for this trait. +pub(crate) trait ToOpaque: TryToOpaque { + fn get_layout(&self, ctx: &BindgenContext, extra: &Self::Extra) -> Layout { + self.try_get_layout(ctx, extra) + .unwrap_or_else(|_| Layout::for_size(ctx, 1)) + } + + fn to_opaque( + &self, + ctx: &BindgenContext, + extra: &Self::Extra, + ) -> syn::Type { + let layout = self.get_layout(ctx, extra); + helpers::blob(ctx, layout, true) + } +} + +impl ToOpaque for T where T: TryToOpaque {} + +/// Fallible conversion from an IR thing to an *equivalent* Rust type. +/// +/// If the C/C++ construct represented by the IR thing cannot (currently) be +/// represented in Rust (for example, instantiations of templates with +/// const-value generic parameters) then the impl should return an `Err`. It +/// should *not* attempt to return an opaque blob with the correct size and +/// alignment. That is the responsibility of the `TryToOpaque` trait. +pub(crate) trait TryToRustTy { + type Extra; + + fn try_to_rust_ty( + &self, + ctx: &BindgenContext, + extra: &Self::Extra, + ) -> error::Result; +} + +/// Fallible conversion to a Rust type or an opaque blob with the correct size +/// and alignment. +/// +/// Don't implement this directly. Instead implement `TryToRustTy` and +/// `TryToOpaque`, and then leverage the blanket impl for this trait below. +pub(crate) trait TryToRustTyOrOpaque: TryToRustTy + TryToOpaque { + type Extra; + + fn try_to_rust_ty_or_opaque( + &self, + ctx: &BindgenContext, + extra: &::Extra, + ) -> error::Result; +} + +impl TryToRustTyOrOpaque for T +where + T: TryToRustTy + TryToOpaque, +{ + type Extra = E; + + fn try_to_rust_ty_or_opaque( + &self, + ctx: &BindgenContext, + extra: &E, + ) -> error::Result { + self.try_to_rust_ty(ctx, extra).or_else(|_| { + if let Ok(layout) = self.try_get_layout(ctx, extra) { + Ok(helpers::blob(ctx, layout, true)) + } else { + Err(Error::NoLayoutForOpaqueBlob) + } + }) + } +} + +/// Infallible conversion to a Rust type, or an opaque blob with a best effort +/// of correct size and alignment. +/// +/// Don't implement this directly. Instead implement `TryToRustTy` and +/// `TryToOpaque`, and then leverage the blanket impl for this trait below. +/// +/// ### Fallible vs. Infallible Conversions to Rust Types +/// +/// When should one use this infallible `ToRustTyOrOpaque` trait versus the +/// fallible `TryTo{RustTy, Opaque, RustTyOrOpaque}` traits? All fallible trait +/// implementations that need to convert another thing into a Rust type or +/// opaque blob in a nested manner should also use fallible trait methods and +/// propagate failure up the stack. Only infallible functions and methods like +/// `CodeGenerator` implementations should use the infallible +/// `ToRustTyOrOpaque`. The further out we push error recovery, the more likely +/// we are to get a usable `Layout` even if we can't generate an equivalent Rust +/// type for a C++ construct. +pub(crate) trait ToRustTyOrOpaque: TryToRustTy + ToOpaque { + type Extra; + + fn to_rust_ty_or_opaque( + &self, + ctx: &BindgenContext, + extra: &::Extra, + ) -> syn::Type; +} + +impl ToRustTyOrOpaque for T +where + T: TryToRustTy + ToOpaque, +{ + type Extra = E; + + fn to_rust_ty_or_opaque( + &self, + ctx: &BindgenContext, + extra: &E, + ) -> syn::Type { + self.try_to_rust_ty(ctx, extra) + .unwrap_or_else(|_| self.to_opaque(ctx, extra)) + } +} + +impl TryToOpaque for T +where + T: Copy + Into, +{ + type Extra = (); + + fn try_get_layout( + &self, + ctx: &BindgenContext, + _: &(), + ) -> error::Result { + ctx.resolve_item((*self).into()).try_get_layout(ctx, &()) + } +} + +impl TryToRustTy for T +where + T: Copy + Into, +{ + type Extra = (); + + fn try_to_rust_ty( + &self, + ctx: &BindgenContext, + _: &(), + ) -> error::Result { + ctx.resolve_item((*self).into()).try_to_rust_ty(ctx, &()) + } +} + +impl TryToOpaque for Item { + type Extra = (); + + fn try_get_layout( + &self, + ctx: &BindgenContext, + _: &(), + ) -> error::Result { + self.kind().expect_type().try_get_layout(ctx, self) + } +} + +impl TryToRustTy for Item { + type Extra = (); + + fn try_to_rust_ty( + &self, + ctx: &BindgenContext, + _: &(), + ) -> error::Result { + self.kind().expect_type().try_to_rust_ty(ctx, self) + } +} + +impl TryToOpaque for Type { + type Extra = Item; + + fn try_get_layout( + &self, + ctx: &BindgenContext, + _: &Item, + ) -> error::Result { + self.layout(ctx).ok_or(Error::NoLayoutForOpaqueBlob) + } +} + +impl TryToRustTy for Type { + type Extra = Item; + + fn try_to_rust_ty( + &self, + ctx: &BindgenContext, + item: &Item, + ) -> error::Result { + use self::helpers::ast_ty::*; + + match *self.kind() { + TypeKind::Void => Ok(c_void(ctx)), + // TODO: we should do something smart with nullptr, or maybe *const + // c_void is enough? + TypeKind::NullPtr => Ok(c_void(ctx).to_ptr(true)), + TypeKind::Int(ik) => { + Ok(int_kind_rust_type(ctx, ik, self.layout(ctx))) + } + TypeKind::Float(fk) => { + Ok(float_kind_rust_type(ctx, fk, self.layout(ctx))) + } + TypeKind::Complex(fk) => { + let float_path = + float_kind_rust_type(ctx, fk, self.layout(ctx)); + + ctx.generated_bindgen_complex(); + Ok(if ctx.options().enable_cxx_namespaces { + syn::parse_quote! { root::__BindgenComplex<#float_path> } + } else { + syn::parse_quote! { __BindgenComplex<#float_path> } + }) + } + TypeKind::Function(ref signature) => { + // We can't rely on the sizeof(Option>) == + // sizeof(NonZero<_>) optimization with opaque blobs (because + // they aren't NonZero), so don't *ever* use an or_opaque + // variant here. + let ty = signature.try_to_rust_ty(ctx, item)?; + + let prefix = ctx.trait_prefix(); + Ok(syn::parse_quote! { ::#prefix::option::Option<#ty> }) + } + TypeKind::Array(item, len) | TypeKind::Vector(item, len) => { + let ty = item.try_to_rust_ty(ctx, &())?; + Ok(syn::parse_quote! { [ #ty ; #len ] }) + } + TypeKind::Enum(..) => { + let path = item.namespace_aware_canonical_path(ctx); + let path = proc_macro2::TokenStream::from_str(&path.join("::")) + .unwrap(); + Ok(syn::parse_quote!(#path)) + } + TypeKind::TemplateInstantiation(ref inst) => { + inst.try_to_rust_ty(ctx, item) + } + TypeKind::ResolvedTypeRef(inner) => inner.try_to_rust_ty(ctx, &()), + TypeKind::TemplateAlias(..) | + TypeKind::Alias(..) | + TypeKind::BlockPointer(..) => { + if self.is_block_pointer() && !ctx.options().generate_block { + let void = c_void(ctx); + return Ok(void.to_ptr(/* is_const = */ false)); + } + + if item.is_opaque(ctx, &()) && + item.used_template_params(ctx) + .into_iter() + .any(|param| param.is_template_param(ctx, &())) + { + self.try_to_opaque(ctx, item) + } else if let Some(ty) = self + .name() + .and_then(|name| utils::type_from_named(ctx, name)) + { + Ok(ty) + } else { + utils::build_path(item, ctx) + } + } + TypeKind::Comp(ref info) => { + let template_params = item.all_template_params(ctx); + if info.has_non_type_template_params() || + (item.is_opaque(ctx, &()) && !template_params.is_empty()) + { + return self.try_to_opaque(ctx, item); + } + + utils::build_path(item, ctx) + } + TypeKind::Opaque => self.try_to_opaque(ctx, item), + TypeKind::Pointer(inner) | TypeKind::Reference(inner) => { + // Check that this type has the same size as the target's pointer type. + let size = self.get_layout(ctx, item).size; + if size != ctx.target_pointer_size() { + return Err(Error::InvalidPointerSize { + ty_name: self.name().unwrap_or("unknown").into(), + ty_size: size, + ptr_size: ctx.target_pointer_size(), + }); + } + + let is_const = ctx.resolve_type(inner).is_const(); + + let inner = + inner.into_resolver().through_type_refs().resolve(ctx); + let inner_ty = inner.expect_type(); + + let is_objc_pointer = + matches!(inner_ty.kind(), TypeKind::ObjCInterface(..)); + + // Regardless if we can properly represent the inner type, we + // should always generate a proper pointer here, so use + // infallible conversion of the inner type. + let ty = inner + .to_rust_ty_or_opaque(ctx, &()) + .with_implicit_template_params(ctx, inner); + + // Avoid the first function pointer level, since it's already + // represented in Rust. + if inner_ty.canonical_type(ctx).is_function() || is_objc_pointer + { + Ok(ty) + } else { + Ok(ty.to_ptr(is_const)) + } + } + TypeKind::TypeParam => { + let name = item.canonical_name(ctx); + let ident = ctx.rust_ident(name); + Ok(syn::parse_quote! { #ident }) + } + TypeKind::ObjCSel => Ok(syn::parse_quote! { objc::runtime::Sel }), + TypeKind::ObjCId => Ok(syn::parse_quote! { id }), + TypeKind::ObjCInterface(ref interface) => { + let name = ctx.rust_ident(interface.name()); + Ok(syn::parse_quote! { #name }) + } + ref u @ TypeKind::UnresolvedTypeRef(..) => { + unreachable!("Should have been resolved after parsing {u:?}!") + } + } + } +} + +impl TryToOpaque for TemplateInstantiation { + type Extra = Item; + + fn try_get_layout( + &self, + ctx: &BindgenContext, + item: &Item, + ) -> error::Result { + item.expect_type() + .layout(ctx) + .ok_or(Error::NoLayoutForOpaqueBlob) + } +} + +impl TryToRustTy for TemplateInstantiation { + type Extra = Item; + + fn try_to_rust_ty( + &self, + ctx: &BindgenContext, + item: &Item, + ) -> error::Result { + if self.is_opaque(ctx, item) { + return Err(Error::InstantiationOfOpaqueType); + } + + let def = self + .template_definition() + .into_resolver() + .through_type_refs() + .resolve(ctx); + + let mut ty = quote! {}; + let def_path = def.namespace_aware_canonical_path(ctx); + ty.append_separated( + def_path.into_iter().map(|p| ctx.rust_ident(p)), + quote!(::), + ); + + let def_params = def.self_template_params(ctx); + if def_params.is_empty() { + // This can happen if we generated an opaque type for a partial + // template specialization, and we've hit an instantiation of + // that partial specialization. + extra_assert!(def.is_opaque(ctx, &())); + return Err(Error::InstantiationOfOpaqueType); + } + + // TODO: If the definition type is a template class/struct + // definition's member template definition, it could rely on + // generic template parameters from its outer template + // class/struct. When we emit bindings for it, it could require + // *more* type arguments than we have here, and we will need to + // reconstruct them somehow. We don't have any means of doing + // that reconstruction at this time. + + let template_args = self + .template_arguments() + .iter() + .zip(def_params.iter()) + // Only pass type arguments for the type parameters that + // the def uses. + .filter(|&(_, param)| ctx.uses_template_parameter(def.id(), *param)) + .map(|(arg, _)| { + let arg = arg.into_resolver().through_type_refs().resolve(ctx); + let ty = arg + .try_to_rust_ty(ctx, &())? + .with_implicit_template_params(ctx, arg); + Ok(ty) + }) + .collect::>>()?; + + Ok(if template_args.is_empty() { + syn::parse_quote! { #ty } + } else { + syn::parse_quote! { #ty<#(#template_args),*> } + }) + } +} + +impl TryToRustTy for FunctionSig { + type Extra = Item; + + fn try_to_rust_ty( + &self, + ctx: &BindgenContext, + item: &Item, + ) -> error::Result { + // TODO: we might want to consider ignoring the reference return value. + let ret = utils::fnsig_return_ty(ctx, self); + let arguments = utils::fnsig_arguments(ctx, self); + + match self.abi(ctx, None) { + Ok(abi) => Ok( + syn::parse_quote! { unsafe extern #abi fn ( #( #arguments ),* ) #ret }, + ), + Err(err) => { + if matches!(err, Error::UnsupportedAbi(_)) { + unsupported_abi_diagnostic( + self.name(), + self.is_variadic(), + item.location(), + ctx, + &err, + ); + } + + Err(err) + } + } + } +} + +impl CodeGenerator for Function { + type Extra = Item; + + /// If we've actually generated the symbol, the number of times we've seen + /// it. + type Return = Option; + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) -> Self::Return { + debug!("::codegen: item = {item:?}"); + debug_assert!(item.is_enabled_for_codegen(ctx)); + let id = DiscoveredItemId::new(item.id().as_usize()); + + let is_internal = matches!(self.linkage(), Linkage::Internal); + + let signature_item = ctx.resolve_item(self.signature()); + let signature = signature_item.kind().expect_type().canonical_type(ctx); + let TypeKind::Function(ref signature) = *signature.kind() else { + panic!("Signature kind is not a Function: {signature:?}") + }; + + if is_internal { + if !ctx.options().wrap_static_fns { + // We cannot do anything with internal functions if we are not wrapping them so + // just avoid generating anything for them. + return None; + } + + if signature.is_variadic() { + // We cannot generate wrappers for variadic static functions so we avoid + // generating any code for them. + variadic_fn_diagnostic(self.name(), item.location(), ctx); + return None; + } + } + + let is_pure_virtual = match self.kind() { + FunctionKind::Method(ref method_kind) => { + method_kind.is_pure_virtual() + } + FunctionKind::Function => false, + }; + if is_pure_virtual && !ctx.options().generate_pure_virtual_functions { + // Pure virtual methods have no actual symbol, so we can't generate + // something meaningful for them. Downstream code postprocessors + // might want to find out about them. + return None; + } + + let is_dynamic_function = match self.kind() { + FunctionKind::Function => { + ctx.options().dynamic_library_name.is_some() + } + FunctionKind::Method(_) => false, + }; + + // Similar to static member variables in a class template, we can't + // generate bindings to template functions, because the set of + // instantiations is open ended and we have no way of knowing which + // monomorphizations actually exist. + if !item.all_template_params(ctx).is_empty() { + return None; + } + + let name = self.name(); + let mut canonical_name = item.canonical_name(ctx); + let mangled_name = self.mangled_name(); + + { + let seen_symbol_name = mangled_name.unwrap_or(&canonical_name); + + // TODO: Maybe warn here if there's a type/argument mismatch, or + // something? + if result.seen_function(seen_symbol_name) { + return None; + } + result.saw_function(seen_symbol_name); + } + + let mut attributes = vec![]; + + if true { + let must_use = signature.must_use() || { + let ret_ty = signature + .return_type() + .into_resolver() + .through_type_refs() + .resolve(ctx); + ret_ty.must_use(ctx) + }; + + if must_use { + attributes.push(attributes::must_use()); + } + } + + if let Some(comment) = item.comment(ctx) { + attributes.push(attributes::doc(&comment)); + } + + let abi = match signature.abi(ctx, Some(name)) { + Err(err) => { + if matches!(err, Error::UnsupportedAbi(_)) { + unsupported_abi_diagnostic( + name, + signature.is_variadic(), + item.location(), + ctx, + &err, + ); + } + + return None; + } + Ok(ClangAbi::Unknown(unknown_abi)) => { + panic!( + "Invalid or unknown abi {unknown_abi:?} for function {canonical_name:?} ({self:?})" + ); + } + Ok(abi) => abi, + }; + + // Handle overloaded functions by giving each overload its own unique + // suffix. + let times_seen = result.overload_number(&canonical_name); + if times_seen > 0 { + write!(&mut canonical_name, "{times_seen}").unwrap(); + } + ctx.options().for_each_callback(|cb| { + cb.new_item_found( + id, + DiscoveredItem::Function { + final_name: canonical_name.to_string(), + }, + ); + }); + + let link_name_attr = self.link_name().or_else(|| { + let mangled_name = mangled_name.unwrap_or(name); + (!utils::names_will_be_identical_after_mangling( + &canonical_name, + mangled_name, + Some(abi), + )) + .then_some(mangled_name) + }); + + if let Some(link_name) = link_name_attr { + if !is_dynamic_function { + attributes.push(attributes::link_name::(link_name)); + } + } + + // Unfortunately this can't piggyback on the `attributes` list because + // the #[link(wasm_import_module)] needs to happen before the `extern + // "C"` block. It doesn't get picked up properly otherwise + let wasm_link_attribute = + ctx.options().wasm_import_module_name.as_ref().map(|name| { + quote! { #[link(wasm_import_module = #name)] } + }); + + let should_wrap = is_internal && + ctx.options().wrap_static_fns && + link_name_attr.is_none(); + + if should_wrap { + let name = canonical_name.clone() + ctx.wrap_static_fns_suffix(); + attributes.push(attributes::link_name::(&name)); + } + + let wrap_as_variadic = if should_wrap && !signature.is_variadic() { + utils::wrap_as_variadic_fn(ctx, signature, name) + } else { + None + }; + + let (ident, args) = if let Some(WrapAsVariadic { + idx_of_va_list_arg, + new_name, + }) = &wrap_as_variadic + { + ( + new_name, + utils::fnsig_arguments_iter( + ctx, + // Prune argument at index (idx_of_va_list_arg) + signature.argument_types().iter().enumerate().filter_map( + |(idx, t)| { + if idx == *idx_of_va_list_arg { + None + } else { + Some(t) + } + }, + ), + // and replace it by a `...` (variadic symbol and the end of the signature) + true, + ), + ) + } else { + (&canonical_name, utils::fnsig_arguments(ctx, signature)) + }; + let ret = utils::fnsig_return_ty(ctx, signature); + + let ident = ctx.rust_ident(ident); + + let safety = ctx + .options() + .rust_features + .unsafe_extern_blocks + .then(|| quote!(unsafe)); + + let tokens = quote! { + #wasm_link_attribute + #safety extern #abi { + #(#attributes)* + pub fn #ident ( #( #args ),* ) #ret; + } + }; + + // Add the item to the serialization list if necessary + if should_wrap { + result + .items_to_serialize + .push((item.id(), wrap_as_variadic)); + } + + // If we're doing dynamic binding generation, add to the dynamic items. + if is_dynamic_function { + let ident_str = ident.to_string(); + let symbol = link_name_attr.unwrap_or(&ident_str); + let args_identifiers = + utils::fnsig_argument_identifiers(ctx, signature); + let ret_ty = utils::fnsig_return_ty(ctx, signature); + result.dynamic_items().push_func( + &ident, + symbol, + abi, + signature.is_variadic(), + ctx.options().dynamic_link_require_all, + &args, + &args_identifiers, + &ret, + &ret_ty, + &attributes, + ctx, + ); + } else { + result.push(tokens); + } + Some(times_seen) + } +} + +#[cfg_attr(not(feature = "experimental"), allow(unused_variables))] +fn unsupported_abi_diagnostic( + fn_name: &str, + variadic: bool, + location: Option<&crate::clang::SourceLocation>, + ctx: &BindgenContext, + error: &Error, +) { + warn!( + "Skipping {}function `{fn_name}` because the {error}", + if variadic { "variadic " } else { "" }, + ); + + #[cfg(feature = "experimental")] + if ctx.options().emit_diagnostics { + use crate::diagnostics::{get_line, Diagnostic, Level, Slice}; + + let mut diag = Diagnostic::default(); + diag.with_title( + format!( + "Skipping {}function `{fn_name}` because the {error}", + if variadic { "variadic " } else { "" }, + ), + Level::Warning, + ) + .add_annotation( + "No code will be generated for this function.", + Level::Warning, + ) + .add_annotation( + format!( + "The configured Rust version is {}.", + ctx.options().rust_target + ), + Level::Note, + ); + + if let Some(loc) = location { + let (file, line, col, _) = loc.location(); + + if let Some(filename) = file.name() { + if let Ok(Some(source)) = get_line(&filename, line) { + let mut slice = Slice::default(); + slice + .with_source(source) + .with_location(filename, line, col); + diag.add_slice(slice); + } + } + } + + diag.display(); + } +} + +fn variadic_fn_diagnostic( + fn_name: &str, + _location: Option<&crate::clang::SourceLocation>, + _ctx: &BindgenContext, +) { + warn!( + "Cannot generate wrapper for the static variadic function `{fn_name}`." + ); + + #[cfg(feature = "experimental")] + if _ctx.options().emit_diagnostics { + use crate::diagnostics::{get_line, Diagnostic, Level, Slice}; + + let mut diag = Diagnostic::default(); + + diag.with_title(format!("Cannot generate wrapper for the static function `{fn_name}`."), Level::Warning) + .add_annotation("The `--wrap-static-fns` feature does not support variadic functions.", Level::Note) + .add_annotation("No code will be generated for this function.", Level::Note); + + if let Some(loc) = _location { + let (file, line, col, _) = loc.location(); + + if let Some(filename) = file.name() { + if let Ok(Some(source)) = get_line(&filename, line) { + let mut slice = Slice::default(); + slice + .with_source(source) + .with_location(filename, line, col); + diag.add_slice(slice); + } + } + } + + diag.display(); + } +} + +fn objc_method_codegen( + ctx: &BindgenContext, + method: &ObjCMethod, + methods: &mut Vec, + class_name: Option<&str>, + rust_class_name: &str, + prefix: &str, +) { + // This would ideally resolve the method into an Item, and use + // Item::process_before_codegen; however, ObjC methods are not currently + // made into function items. + let name = format!("{rust_class_name}::{prefix}{}", method.rust_name()); + if ctx.options().blocklisted_items.matches(name) { + return; + } + + let signature = method.signature(); + let fn_args = utils::fnsig_arguments(ctx, signature); + let fn_ret = utils::fnsig_return_ty(ctx, signature); + + let sig = if method.is_class_method() { + quote! { + ( #( #fn_args ),* ) #fn_ret + } + } else { + let self_arr = [quote! { &self }]; + let args = self_arr.iter().chain(fn_args.iter()); + quote! { + ( #( #args ),* ) #fn_ret + } + }; + + let methods_and_args = method.format_method_call(&fn_args); + + let body = { + let body = if method.is_class_method() { + let class_name = ctx.rust_ident( + class_name + .expect("Generating a class method without class name?"), + ); + quote!(msg_send!(class!(#class_name), #methods_and_args)) + } else { + quote!(msg_send!(*self, #methods_and_args)) + }; + + ctx.wrap_unsafe_ops(body) + }; + + let method_name = ctx.rust_ident(format!("{prefix}{}", method.rust_name())); + + methods.push(quote! { + unsafe fn #method_name #sig where ::Target: objc::Message + Sized { + #body + } + }); +} + +impl CodeGenerator for ObjCInterface { + type Extra = Item; + type Return = (); + + fn codegen( + &self, + ctx: &BindgenContext, + result: &mut CodegenResult<'_>, + item: &Item, + ) { + debug_assert!(item.is_enabled_for_codegen(ctx)); + + let mut impl_items = vec![]; + let rust_class_name = item.path_for_allowlisting(ctx)[1..].join("::"); + + for method in self.methods() { + objc_method_codegen( + ctx, + method, + &mut impl_items, + None, + &rust_class_name, + "", + ); + } + + for class_method in self.class_methods() { + let ambiquity = self + .methods() + .iter() + .map(|m| m.rust_name()) + .any(|x| x == class_method.rust_name()); + let prefix = if ambiquity { "class_" } else { "" }; + objc_method_codegen( + ctx, + class_method, + &mut impl_items, + Some(self.name()), + &rust_class_name, + prefix, + ); + } + + let trait_name = ctx.rust_ident(self.rust_name()); + let trait_constraints = quote! { + Sized + std::ops::Deref + }; + let trait_block = if self.is_template() { + let template_names: Vec = self + .template_names + .iter() + .map(|g| ctx.rust_ident(g)) + .collect(); + + quote! { + pub trait #trait_name <#(#template_names:'static),*> : #trait_constraints { + #( #impl_items )* + } + } + } else { + quote! { + pub trait #trait_name : #trait_constraints { + #( #impl_items )* + } + } + }; + + let class_name = ctx.rust_ident(self.name()); + if !self.is_category() && !self.is_protocol() { + let struct_block = quote! { + #[repr(transparent)] + #[derive(Debug, Copy, Clone)] + pub struct #class_name(pub id); + impl std::ops::Deref for #class_name { + type Target = objc::runtime::Object; + fn deref(&self) -> &Self::Target { + unsafe { + &*self.0 + } + } + } + unsafe impl objc::Message for #class_name { } + impl #class_name { + pub fn alloc() -> Self { + Self(unsafe { + msg_send!(class!(#class_name), alloc) + }) + } + } + }; + result.push(struct_block); + let mut protocol_set: HashSet = Default::default(); + for protocol_id in &self.conforms_to { + protocol_set.insert(*protocol_id); + let protocol_name = ctx.rust_ident( + ctx.resolve_type(protocol_id.expect_type_id(ctx)) + .name() + .unwrap(), + ); + let impl_trait = quote! { + impl #protocol_name for #class_name { } + }; + result.push(impl_trait); + } + let mut parent_class = self.parent_class; + while let Some(parent_id) = parent_class { + let parent = parent_id + .expect_type_id(ctx) + .into_resolver() + .through_type_refs() + .resolve(ctx) + .expect_type() + .kind(); + + let TypeKind::ObjCInterface(parent) = parent else { + break; + }; + parent_class = parent.parent_class; + + let parent_name = ctx.rust_ident(parent.rust_name()); + let impl_trait = if parent.is_template() { + let template_names: Vec = parent + .template_names + .iter() + .map(|g| ctx.rust_ident(g)) + .collect(); + quote! { + impl <#(#template_names :'static),*> #parent_name <#(#template_names),*> for #class_name { + } + } + } else { + quote! { + impl #parent_name for #class_name { } + } + }; + result.push(impl_trait); + for protocol_id in &parent.conforms_to { + if protocol_set.insert(*protocol_id) { + let protocol_name = ctx.rust_ident( + ctx.resolve_type(protocol_id.expect_type_id(ctx)) + .name() + .unwrap(), + ); + let impl_trait = quote! { + impl #protocol_name for #class_name { } + }; + result.push(impl_trait); + } + } + if !parent.is_template() { + let parent_struct_name = parent.name(); + let child_struct_name = self.name(); + let parent_struct = ctx.rust_ident(parent_struct_name); + let from_block = quote! { + impl From<#class_name> for #parent_struct { + fn from(child: #class_name) -> #parent_struct { + #parent_struct(child.0) + } + } + }; + result.push(from_block); + + let error_msg = format!( + "This {parent_struct_name} cannot be downcasted to {child_struct_name}" + ); + let try_into_block = quote! { + impl std::convert::TryFrom<#parent_struct> for #class_name { + type Error = &'static str; + fn try_from(parent: #parent_struct) -> Result<#class_name, Self::Error> { + let is_kind_of : bool = unsafe { msg_send!(parent, isKindOfClass:class!(#class_name))}; + if is_kind_of { + Ok(#class_name(parent.0)) + } else { + Err(#error_msg) + } + } + } + }; + result.push(try_into_block); + } + } + } + + if !self.is_protocol() { + let impl_block = if self.is_template() { + let template_names: Vec = self + .template_names + .iter() + .map(|g| ctx.rust_ident(g)) + .collect(); + quote! { + impl <#(#template_names :'static),*> #trait_name <#(#template_names),*> for #class_name { + } + } + } else { + quote! { + impl #trait_name for #class_name { + } + } + }; + result.push(impl_block); + } + + result.push(trait_block); + result.saw_objc(); + } +} + +pub(crate) fn codegen( + context: BindgenContext, +) -> Result<(proc_macro2::TokenStream, BindgenOptions), CodegenError> { + context.gen(|context| { + let _t = context.timer("codegen"); + let counter = Cell::new(0); + let mut result = CodegenResult::new(&counter); + + debug!("codegen: {:?}", context.options()); + + if context.options().emit_ir { + let codegen_items = context.codegen_items(); + for (id, item) in context.items() { + if codegen_items.contains(&id) { + println!("ir: {id:?} = {item:#?}"); + } + } + } + + if let Some(path) = context.options().emit_ir_graphviz.as_ref() { + match dot::write_dot_file(context, path) { + Ok(()) => info!( + "Your dot file was generated successfully into: {path}" + ), + Err(e) => warn!("{e}"), + } + } + + if let Some(spec) = context.options().depfile.as_ref() { + match spec.write(context.deps()) { + Ok(()) => info!( + "Your depfile was generated successfully into: {}", + spec.depfile_path.display() + ), + Err(e) => warn!("{e}"), + } + } + + context.resolve_item(context.root_module()).codegen( + context, + &mut result, + &(), + ); + + if let Some(ref lib_name) = context.options().dynamic_library_name { + let lib_ident = context.rust_ident(lib_name); + let dynamic_items_tokens = + result.dynamic_items().get_tokens(&lib_ident, context); + result.push(dynamic_items_tokens); + } + + utils::serialize_items(&result, context)?; + + Ok(postprocessing::postprocessing( + result.items, + context.options(), + )) + }) +} + +pub(crate) mod utils { + use super::helpers::BITFIELD_UNIT; + use super::serialize::CSerialize; + use super::{error, CodegenError, CodegenResult, ToRustTyOrOpaque}; + use crate::ir::context::BindgenContext; + use crate::ir::context::TypeId; + use crate::ir::function::{Abi, ClangAbi, FunctionSig}; + use crate::ir::item::{Item, ItemCanonicalPath}; + use crate::ir::ty::TypeKind; + use crate::{args_are_cpp, file_is_cpp}; + use std::borrow::Cow; + use std::io::Write; + use std::mem; + use std::path::PathBuf; + use std::str::FromStr; + + pub(super) fn serialize_items( + result: &CodegenResult, + context: &BindgenContext, + ) -> Result<(), CodegenError> { + if result.items_to_serialize.is_empty() { + return Ok(()); + } + + let path = context.options().wrap_static_fns_path.as_ref().map_or_else( + || std::env::temp_dir().join("bindgen").join("extern"), + PathBuf::from, + ); + + let dir = path.parent().unwrap(); + + if !dir.exists() { + std::fs::create_dir_all(dir)?; + } + + let is_cpp = args_are_cpp(&context.options().clang_args) || + context + .options() + .input_headers + .iter() + .any(|h| file_is_cpp(h)); + + let source_path = path.with_extension(if is_cpp { "cpp" } else { "c" }); + + let mut code = Vec::new(); + + if !context.options().input_headers.is_empty() { + for header in &context.options().input_headers { + writeln!(code, "#include \"{header}\"")?; + } + + writeln!(code)?; + } + + if !context.options().input_header_contents.is_empty() { + for (name, contents) in &context.options().input_header_contents { + writeln!(code, "// {name}\n{contents}")?; + } + + writeln!(code)?; + } + + writeln!(code, "// Static wrappers\n")?; + + for (id, wrap_as_variadic) in &result.items_to_serialize { + let item = context.resolve_item(*id); + item.serialize(context, wrap_as_variadic, &mut vec![], &mut code)?; + } + + std::fs::write(source_path, code)?; + + Ok(()) + } + + pub(super) fn wrap_as_variadic_fn( + ctx: &BindgenContext, + signature: &FunctionSig, + name: &str, + ) -> Option { + // Fast path, exclude because: + // - with 0 args: no va_list possible, so no point searching for one + // - with 1 args: cannot have a `va_list` and another arg (required by va_start) + if signature.argument_types().len() <= 1 { + return None; + } + + let mut it = signature.argument_types().iter().enumerate().filter_map( + |(idx, (_name, mut type_id))| { + // Hand rolled visitor that checks for the presence of `va_list` + loop { + let ty = ctx.resolve_type(type_id); + if Some("__builtin_va_list") == ty.name() { + return Some(idx); + } + match ty.kind() { + TypeKind::Alias(type_id_alias) => { + type_id = *type_id_alias; + } + TypeKind::ResolvedTypeRef(type_id_typedef) => { + type_id = *type_id_typedef; + } + _ => break, + } + } + None + }, + ); + + // Return THE idx (by checking that there is no idx after) + // This is done since we cannot handle multiple `va_list` + it.next().filter(|_| it.next().is_none()).and_then(|idx| { + // Call the `wrap_as_variadic_fn` callback + #[cfg(feature = "experimental")] + { + ctx.options() + .last_callback(|c| c.wrap_as_variadic_fn(name)) + .map(|new_name| super::WrapAsVariadic { + new_name, + idx_of_va_list_arg: idx, + }) + } + #[cfg(not(feature = "experimental"))] + { + let _ = name; + let _ = idx; + None + } + }) + } + + pub(crate) fn prepend_bitfield_unit_type( + ctx: &BindgenContext, + result: &mut Vec, + ) { + if ctx.options().blocklisted_items.matches(BITFIELD_UNIT) || + ctx.options().blocklisted_types.matches(BITFIELD_UNIT) + { + return; + } + + let bitfield_unit_src = if ctx.options().rust_features().raw_ref_macros + { + include_str!("./bitfield_unit_raw_ref_macros.rs") + } else { + include_str!("./bitfield_unit.rs") + }; + let bitfield_unit_src = if true { + Cow::Borrowed(bitfield_unit_src) + } else { + Cow::Owned(bitfield_unit_src.replace("const fn ", "fn ")) + }; + let bitfield_unit_type = + proc_macro2::TokenStream::from_str(&bitfield_unit_src).unwrap(); + let bitfield_unit_type = quote!(#bitfield_unit_type); + + let items = vec![bitfield_unit_type]; + let old_items = mem::replace(result, items); + result.extend(old_items); + } + + pub(crate) fn prepend_objc_header( + ctx: &BindgenContext, + result: &mut Vec, + ) { + let use_objc = if ctx.options().objc_extern_crate { + quote! { + #[macro_use] + extern crate objc; + } + } else { + quote! { + use objc::{self, msg_send, sel, sel_impl, class}; + } + }; + + let id_type = quote! { + #[allow(non_camel_case_types)] + pub type id = *mut objc::runtime::Object; + }; + + let items = vec![use_objc, id_type]; + let old_items = mem::replace(result, items); + result.extend(old_items); + } + + pub(crate) fn prepend_block_header( + ctx: &BindgenContext, + result: &mut Vec, + ) { + let use_block = if ctx.options().block_extern_crate { + quote! { + extern crate block; + } + } else { + quote! { + use block; + } + }; + + let items = vec![use_block]; + let old_items = mem::replace(result, items); + result.extend(old_items); + } + + pub(crate) fn prepend_union_types( + ctx: &BindgenContext, + result: &mut Vec, + ) { + let prefix = ctx.trait_prefix(); + + // If the target supports `const fn`, declare eligible functions + // as `const fn` else just `fn`. + let const_fn = if true { + quote! { const fn } + } else { + quote! { fn } + }; + + // TODO(emilio): The fmt::Debug impl could be way nicer with + // std::intrinsics::type_name, but... + let union_field_decl = quote! { + #[repr(C)] + pub struct __BindgenUnionField(::#prefix::marker::PhantomData); + }; + + let transmute = + ctx.wrap_unsafe_ops(quote!(::#prefix::mem::transmute(self))); + + let union_field_impl = quote! { + impl __BindgenUnionField { + #[inline] + pub #const_fn new() -> Self { + __BindgenUnionField(::#prefix::marker::PhantomData) + } + + #[inline] + pub unsafe fn as_ref(&self) -> &T { + #transmute + } + + #[inline] + pub unsafe fn as_mut(&mut self) -> &mut T { + #transmute + } + } + }; + + let union_field_default_impl = quote! { + impl ::#prefix::default::Default for __BindgenUnionField { + #[inline] + fn default() -> Self { + Self::new() + } + } + }; + + let union_field_clone_impl = quote! { + impl ::#prefix::clone::Clone for __BindgenUnionField { + #[inline] + fn clone(&self) -> Self { + *self + } + } + }; + + let union_field_copy_impl = quote! { + impl ::#prefix::marker::Copy for __BindgenUnionField {} + }; + + let union_field_debug_impl = quote! { + impl ::#prefix::fmt::Debug for __BindgenUnionField { + fn fmt(&self, fmt: &mut ::#prefix::fmt::Formatter<'_>) + -> ::#prefix::fmt::Result { + fmt.write_str("__BindgenUnionField") + } + } + }; + + // The actual memory of the filed will be hashed, so that's why these + // field doesn't do anything with the hash. + let union_field_hash_impl = quote! { + impl ::#prefix::hash::Hash for __BindgenUnionField { + fn hash(&self, _state: &mut H) { + } + } + }; + + let union_field_partialeq_impl = quote! { + impl ::#prefix::cmp::PartialEq for __BindgenUnionField { + fn eq(&self, _other: &__BindgenUnionField) -> bool { + true + } + } + }; + + let union_field_eq_impl = quote! { + impl ::#prefix::cmp::Eq for __BindgenUnionField { + } + }; + + let items = vec![ + union_field_decl, + union_field_impl, + union_field_default_impl, + union_field_clone_impl, + union_field_copy_impl, + union_field_debug_impl, + union_field_hash_impl, + union_field_partialeq_impl, + union_field_eq_impl, + ]; + + let old_items = mem::replace(result, items); + result.extend(old_items); + } + + pub(crate) fn prepend_incomplete_array_types( + ctx: &BindgenContext, + result: &mut Vec, + ) { + let prefix = ctx.trait_prefix(); + + // If the target supports `const fn`, declare eligible functions + // as `const fn` else just `fn`. + let const_fn = if true { + quote! { const fn } + } else { + quote! { fn } + }; + + let incomplete_array_decl = quote! { + #[repr(C)] + #[derive(Default)] + pub struct __IncompleteArrayField( + ::#prefix::marker::PhantomData, [T; 0]); + }; + + let from_raw_parts = ctx.wrap_unsafe_ops(quote! ( + ::#prefix::slice::from_raw_parts(self.as_ptr(), len) + )); + let from_raw_parts_mut = ctx.wrap_unsafe_ops(quote! ( + ::#prefix::slice::from_raw_parts_mut(self.as_mut_ptr(), len) + )); + + let incomplete_array_impl = quote! { + impl __IncompleteArrayField { + #[inline] + pub #const_fn new() -> Self { + __IncompleteArrayField(::#prefix::marker::PhantomData, []) + } + + #[inline] + pub fn as_ptr(&self) -> *const T { + self as *const _ as *const T + } + + #[inline] + pub fn as_mut_ptr(&mut self) -> *mut T { + self as *mut _ as *mut T + } + + #[inline] + pub unsafe fn as_slice(&self, len: usize) -> &[T] { + #from_raw_parts + } + + #[inline] + pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { + #from_raw_parts_mut + } + } + }; + + let incomplete_array_debug_impl = quote! { + impl ::#prefix::fmt::Debug for __IncompleteArrayField { + fn fmt(&self, fmt: &mut ::#prefix::fmt::Formatter<'_>) + -> ::#prefix::fmt::Result { + fmt.write_str("__IncompleteArrayField") + } + } + }; + + let items = vec![ + incomplete_array_decl, + incomplete_array_impl, + incomplete_array_debug_impl, + ]; + + let old_items = mem::replace(result, items); + result.extend(old_items); + } + + pub(crate) fn prepend_float16_type( + result: &mut Vec, + ) { + let float16_type = quote! { + #[derive(PartialEq, Copy, Clone, Hash, Debug, Default)] + #[repr(transparent)] + pub struct __BindgenFloat16(pub u16); + }; + + let items = vec![float16_type]; + let old_items = mem::replace(result, items); + result.extend(old_items); + } + + pub(crate) fn prepend_complex_type( + result: &mut Vec, + ) { + let complex_type = quote! { + #[derive(PartialEq, Copy, Clone, Hash, Debug, Default)] + #[repr(C)] + pub struct __BindgenComplex { + pub re: T, + pub im: T + } + }; + + let items = vec![complex_type]; + let old_items = mem::replace(result, items); + result.extend(old_items); + } + + pub(crate) fn prepend_opaque_array_type( + result: &mut Vec, + ) { + let ty = quote! { + /// If Bindgen could only determine the size and alignment of a + /// type, it is represented like this. + #[derive(PartialEq, Copy, Clone, Debug, Hash)] + #[repr(C)] + pub struct __BindgenOpaqueArray(pub [T; N]); + impl Default for __BindgenOpaqueArray { + fn default() -> Self { + Self([::default(); N]) + } + } + }; + + result.insert(0, ty); + } + + pub(crate) fn build_path( + item: &Item, + ctx: &BindgenContext, + ) -> error::Result { + let path = item.namespace_aware_canonical_path(ctx); + let tokens = + proc_macro2::TokenStream::from_str(&path.join("::")).unwrap(); + + Ok(syn::parse_quote! { #tokens }) + } + + fn primitive_ty(ctx: &BindgenContext, name: &str) -> syn::Type { + let ident = ctx.rust_ident_raw(name); + syn::parse_quote! { #ident } + } + + pub(crate) fn type_from_named( + ctx: &BindgenContext, + name: &str, + ) -> Option { + // FIXME: We could use the inner item to check this is really a + // primitive type but, who the heck overrides these anyway? + Some(match name { + "int8_t" => primitive_ty(ctx, "i8"), + "uint8_t" => primitive_ty(ctx, "u8"), + "int16_t" => primitive_ty(ctx, "i16"), + "uint16_t" => primitive_ty(ctx, "u16"), + "int32_t" => primitive_ty(ctx, "i32"), + "uint32_t" => primitive_ty(ctx, "u32"), + "int64_t" => primitive_ty(ctx, "i64"), + "uint64_t" => primitive_ty(ctx, "u64"), + + "size_t" if ctx.options().size_t_is_usize => { + primitive_ty(ctx, "usize") + } + "uintptr_t" => primitive_ty(ctx, "usize"), + + "ssize_t" if ctx.options().size_t_is_usize => { + primitive_ty(ctx, "isize") + } + "intptr_t" | "ptrdiff_t" => primitive_ty(ctx, "isize"), + _ => return None, + }) + } + + fn fnsig_return_ty_internal( + ctx: &BindgenContext, + sig: &FunctionSig, + ) -> syn::Type { + if sig.is_divergent() { + return syn::parse_quote! { ! }; + } + + let canonical_type_kind = sig + .return_type() + .into_resolver() + .through_type_refs() + .through_type_aliases() + .resolve(ctx) + .kind() + .expect_type() + .kind(); + + match canonical_type_kind { + TypeKind::Void => syn::parse_quote! { () }, + _ => sig.return_type().to_rust_ty_or_opaque(ctx, &()), + } + } + + pub(crate) fn fnsig_return_ty( + ctx: &BindgenContext, + sig: &FunctionSig, + ) -> proc_macro2::TokenStream { + match fnsig_return_ty_internal(ctx, sig) { + syn::Type::Tuple(syn::TypeTuple { elems, .. }) + if elems.is_empty() => + { + quote! {} + } + ty => quote! { -> #ty }, + } + } + + pub(crate) fn fnsig_argument_type( + ctx: &BindgenContext, + ty: TypeId, + ) -> syn::Type { + use super::ToPtr; + + let arg_item = ctx.resolve_item(ty); + let arg_ty = arg_item.kind().expect_type(); + + // From the C90 standard[1]: + // + // A declaration of a parameter as "array of type" shall be + // adjusted to "qualified pointer to type", where the type + // qualifiers (if any) are those specified within the [ and ] of + // the array type derivation. + // + // [1]: http://c0x.coding-guidelines.com/6.7.5.3.html + match *arg_ty.canonical_type(ctx).kind() { + TypeKind::Array(t, _) => { + let stream = if ctx.options().array_pointers_in_arguments { + arg_ty.to_rust_ty_or_opaque(ctx, arg_item) + } else { + t.to_rust_ty_or_opaque(ctx, &()) + }; + stream + .to_ptr(ctx.resolve_type(t).is_const() || arg_ty.is_const()) + } + TypeKind::Pointer(inner) => { + let inner = ctx.resolve_item(inner); + let inner_ty = inner.expect_type(); + if let TypeKind::ObjCInterface(ref interface) = + *inner_ty.canonical_type(ctx).kind() + { + let name = ctx.rust_ident(interface.name()); + syn::parse_quote! { #name } + } else { + arg_item.to_rust_ty_or_opaque(ctx, &()) + } + } + _ => arg_item.to_rust_ty_or_opaque(ctx, &()), + } + } + + pub(crate) fn fnsig_arguments_iter< + 'a, + I: Iterator, TypeId)>, + >( + ctx: &BindgenContext, + args_iter: I, + is_variadic: bool, + ) -> Vec { + let mut unnamed_arguments = 0; + let mut args = args_iter + .map(|(name, ty)| { + let arg_ty = fnsig_argument_type(ctx, *ty); + + let arg_name = if let Some(ref name) = *name { + ctx.rust_mangle(name).into_owned() + } else { + unnamed_arguments += 1; + format!("arg{unnamed_arguments}") + }; + + assert!(!arg_name.is_empty()); + let arg_name = ctx.rust_ident(arg_name); + + quote! { + #arg_name : #arg_ty + } + }) + .collect::>(); + + if is_variadic { + args.push(quote! { ... }); + } + + args + } + + pub(crate) fn fnsig_arguments( + ctx: &BindgenContext, + sig: &FunctionSig, + ) -> Vec { + fnsig_arguments_iter( + ctx, + sig.argument_types().iter(), + sig.is_variadic(), + ) + } + + pub(crate) fn fnsig_argument_identifiers( + ctx: &BindgenContext, + sig: &FunctionSig, + ) -> Vec { + let mut unnamed_arguments = 0; + let args = sig + .argument_types() + .iter() + .map(|&(ref name, _ty)| { + let arg_name = if let Some(ref name) = *name { + ctx.rust_mangle(name).into_owned() + } else { + unnamed_arguments += 1; + format!("arg{unnamed_arguments}") + }; + + assert!(!arg_name.is_empty()); + let arg_name = ctx.rust_ident(arg_name); + + quote! { + #arg_name + } + }) + .collect::>(); + + args + } + + pub(crate) fn fnsig_block( + ctx: &BindgenContext, + sig: &FunctionSig, + ) -> proc_macro2::TokenStream { + let args = sig.argument_types().iter().map(|&(_, ty)| { + let arg_item = ctx.resolve_item(ty); + + arg_item.to_rust_ty_or_opaque(ctx, &()) + }); + + let ret_ty = fnsig_return_ty_internal(ctx, sig); + quote! { + *const ::block::Block<(#(#args,)*), #ret_ty> + } + } + + // Returns true if `canonical_name` will end up as `mangled_name` at the + // machine code level, i.e. after LLVM has applied any target specific + // mangling. + pub(crate) fn names_will_be_identical_after_mangling( + canonical_name: &str, + mangled_name: &str, + call_conv: Option, + ) -> bool { + // If the mangled name and the canonical name are the same then no + // mangling can have happened between the two versions. + if canonical_name == mangled_name { + return true; + } + + // Working with &[u8] makes indexing simpler than with &str + let canonical_name = canonical_name.as_bytes(); + let mangled_name = mangled_name.as_bytes(); + + let (mangling_prefix, expect_suffix) = match call_conv { + Some(ClangAbi::Known(Abi::C)) | + // None is the case for global variables + None => { + (b'_', false) + } + Some(ClangAbi::Known(Abi::Stdcall)) => (b'_', true), + Some(ClangAbi::Known(Abi::Fastcall)) => (b'@', true), + + // This is something we don't recognize, stay on the safe side + // by emitting the `#[link_name]` attribute + Some(_) => return false, + }; + + // Check that the mangled name is long enough to at least contain the + // canonical name plus the expected prefix. + if mangled_name.len() < canonical_name.len() + 1 { + return false; + } + + // Return if the mangled name does not start with the prefix expected + // for the given calling convention. + if mangled_name[0] != mangling_prefix { + return false; + } + + // Check that the mangled name contains the canonical name after the + // prefix + if &mangled_name[1..=canonical_name.len()] != canonical_name { + return false; + } + + // If the given calling convention also prescribes a suffix, check that + // it exists too + if expect_suffix { + let suffix = &mangled_name[canonical_name.len() + 1..]; + + // The shortest suffix is "@0" + if suffix.len() < 2 { + return false; + } + + // Check that the suffix starts with '@' and is all ASCII decimals + // after that. + if suffix[0] != b'@' || !suffix[1..].iter().all(u8::is_ascii_digit) + { + return false; + } + } else if mangled_name.len() != canonical_name.len() + 1 { + // If we don't expect a prefix but there is one, we need the + // #[link_name] attribute + return false; + } + + true + } +} diff --git a/vendor/bindgen/codegen/postprocessing/merge_extern_blocks.rs b/vendor/bindgen/codegen/postprocessing/merge_extern_blocks.rs new file mode 100644 index 00000000000000..e0f6a34baa2284 --- /dev/null +++ b/vendor/bindgen/codegen/postprocessing/merge_extern_blocks.rs @@ -0,0 +1,72 @@ +use syn::{ + visit_mut::{visit_file_mut, visit_item_mod_mut, VisitMut}, + File, Item, ItemForeignMod, ItemMod, +}; + +pub(super) fn merge_extern_blocks(file: &mut File) { + Visitor.visit_file_mut(file); +} + +struct Visitor; + +impl VisitMut for Visitor { + fn visit_file_mut(&mut self, file: &mut File) { + visit_items(&mut file.items); + visit_file_mut(self, file); + } + + fn visit_item_mod_mut(&mut self, item_mod: &mut ItemMod) { + if let Some((_, ref mut items)) = item_mod.content { + visit_items(items); + } + visit_item_mod_mut(self, item_mod); + } +} + +fn visit_items(items: &mut Vec) { + // Keep all the extern blocks in a different `Vec` for faster search. + let mut extern_blocks = Vec::::new(); + + for item in std::mem::take(items) { + if let Item::ForeignMod(ItemForeignMod { + attrs, + abi, + brace_token, + unsafety, + items: extern_block_items, + }) = item + { + let mut exists = false; + for extern_block in &mut extern_blocks { + // Check if there is a extern block with the same ABI and + // attributes. + if extern_block.attrs == attrs && extern_block.abi == abi { + // Merge the items of the two blocks. + extern_block.items.extend_from_slice(&extern_block_items); + exists = true; + break; + } + } + // If no existing extern block had the same ABI and attributes, store + // it. + if !exists { + extern_blocks.push(ItemForeignMod { + attrs, + abi, + brace_token, + unsafety, + items: extern_block_items, + }); + } + } else { + // If the item is not an extern block, we don't have to do anything and just + // push it back. + items.push(item); + } + } + + // Move all the extern blocks alongside the rest of the items. + for extern_block in extern_blocks { + items.push(Item::ForeignMod(extern_block)); + } +} diff --git a/vendor/bindgen/codegen/postprocessing/mod.rs b/vendor/bindgen/codegen/postprocessing/mod.rs new file mode 100644 index 00000000000000..964169852100a1 --- /dev/null +++ b/vendor/bindgen/codegen/postprocessing/mod.rs @@ -0,0 +1,57 @@ +use proc_macro2::TokenStream; +use quote::ToTokens; +use syn::{parse2, File}; + +use crate::BindgenOptions; + +mod merge_extern_blocks; +mod sort_semantically; + +use merge_extern_blocks::merge_extern_blocks; +use sort_semantically::sort_semantically; + +struct PostProcessingPass { + should_run: fn(&BindgenOptions) -> bool, + run: fn(&mut File), +} + +// TODO: This can be a const fn when mutable references are allowed in const +// context. +macro_rules! pass { + ($pass:ident) => { + PostProcessingPass { + should_run: |options| options.$pass, + run: |file| $pass(file), + } + }; +} + +const PASSES: &[PostProcessingPass] = + &[pass!(merge_extern_blocks), pass!(sort_semantically)]; + +pub(crate) fn postprocessing( + items: Vec, + options: &BindgenOptions, +) -> TokenStream { + let items = items.into_iter().collect(); + let require_syn = PASSES.iter().any(|pass| (pass.should_run)(options)); + + if !require_syn { + return items; + } + + // This syn business is a hack, for now. This means that we are re-parsing already + // generated code using `syn` (as opposed to `quote`) because `syn` provides us more + // control over the elements. + // The `unwrap` here is deliberate because bindgen should generate valid rust items at all + // times. + let mut file = parse2::(items).unwrap(); + + for pass in PASSES { + if (pass.should_run)(options) { + (pass.run)(&mut file); + } + } + + file.into_token_stream() +} diff --git a/vendor/bindgen/codegen/postprocessing/sort_semantically.rs b/vendor/bindgen/codegen/postprocessing/sort_semantically.rs new file mode 100644 index 00000000000000..e9bb5dc308a3e6 --- /dev/null +++ b/vendor/bindgen/codegen/postprocessing/sort_semantically.rs @@ -0,0 +1,46 @@ +use syn::{ + visit_mut::{visit_file_mut, visit_item_mod_mut, VisitMut}, + File, Item, ItemMod, +}; + +pub(super) fn sort_semantically(file: &mut File) { + Visitor.visit_file_mut(file); +} + +struct Visitor; + +impl VisitMut for Visitor { + fn visit_file_mut(&mut self, file: &mut File) { + visit_items(&mut file.items); + visit_file_mut(self, file); + } + + fn visit_item_mod_mut(&mut self, item_mod: &mut ItemMod) { + if let Some((_, ref mut items)) = item_mod.content { + visit_items(items); + } + visit_item_mod_mut(self, item_mod); + } +} + +fn visit_items(items: &mut [Item]) { + items.sort_by_key(|item| match item { + Item::Type(_) => 0, + Item::Struct(_) => 1, + Item::Const(_) => 2, + Item::Fn(_) => 3, + Item::Enum(_) => 4, + Item::Union(_) => 5, + Item::Static(_) => 6, + Item::Trait(_) => 7, + Item::TraitAlias(_) => 8, + Item::Impl(_) => 9, + Item::Mod(_) => 10, + Item::Use(_) => 11, + Item::Verbatim(_) => 12, + Item::ExternCrate(_) => 13, + Item::ForeignMod(_) => 14, + Item::Macro(_) => 15, + _ => 18, + }); +} diff --git a/vendor/bindgen/codegen/serialize.rs b/vendor/bindgen/codegen/serialize.rs new file mode 100644 index 00000000000000..9af48aa8ffed80 --- /dev/null +++ b/vendor/bindgen/codegen/serialize.rs @@ -0,0 +1,443 @@ +use std::io::Write; + +use crate::callbacks::IntKind; + +use crate::ir::comp::CompKind; +use crate::ir::context::{BindgenContext, TypeId}; +use crate::ir::function::{Function, FunctionKind}; +use crate::ir::item::Item; +use crate::ir::item::ItemCanonicalName; +use crate::ir::item_kind::ItemKind; +use crate::ir::ty::{FloatKind, Type, TypeKind}; + +use super::{CodegenError, WrapAsVariadic}; + +fn get_loc(item: &Item) -> String { + item.location() + .map_or_else(|| "unknown".to_owned(), |x| x.to_string()) +} + +pub(super) trait CSerialize<'a> { + type Extra; + + fn serialize( + &self, + ctx: &BindgenContext, + extra: Self::Extra, + stack: &mut Vec, + writer: &mut W, + ) -> Result<(), CodegenError>; +} + +impl<'a> CSerialize<'a> for Item { + type Extra = &'a Option; + + fn serialize( + &self, + ctx: &BindgenContext, + extra: Self::Extra, + stack: &mut Vec, + writer: &mut W, + ) -> Result<(), CodegenError> { + match self.kind() { + ItemKind::Function(func) => { + func.serialize(ctx, (self, extra), stack, writer) + } + kind => Err(CodegenError::Serialize { + msg: format!("Cannot serialize item kind {kind:?}"), + loc: get_loc(self), + }), + } + } +} + +impl<'a> CSerialize<'a> for Function { + type Extra = (&'a Item, &'a Option); + + fn serialize( + &self, + ctx: &BindgenContext, + (item, wrap_as_variadic): Self::Extra, + stack: &mut Vec, + writer: &mut W, + ) -> Result<(), CodegenError> { + if self.kind() != FunctionKind::Function { + return Err(CodegenError::Serialize { + msg: format!( + "Cannot serialize function kind {:?}", + self.kind(), + ), + loc: get_loc(item), + }); + } + + let TypeKind::Function(signature) = + ctx.resolve_type(self.signature()).kind() + else { + unreachable!() + }; + + assert!(!signature.is_variadic()); + + let name = self.name(); + + // Function arguments stored as `(name, type_id)` tuples. + let args = { + let mut count = 0; + + let idx_to_prune = wrap_as_variadic.as_ref().map( + |WrapAsVariadic { + idx_of_va_list_arg, .. + }| *idx_of_va_list_arg, + ); + + signature + .argument_types() + .iter() + .cloned() + .enumerate() + .filter_map(|(idx, (opt_name, type_id))| { + if Some(idx) == idx_to_prune { + None + } else { + Some(( + opt_name.unwrap_or_else(|| { + let name = format!("arg_{count}"); + count += 1; + name + }), + type_id, + )) + } + }) + .collect::>() + }; + + // The name used for the wrapper self. + let wrap_name = format!("{name}{}", ctx.wrap_static_fns_suffix()); + + // The function's return type + let (ret_item, ret_ty) = { + let type_id = signature.return_type(); + let ret_item = ctx.resolve_item(type_id); + let ret_ty = ret_item.expect_type(); + + // Write `ret_ty`. + ret_ty.serialize(ctx, ret_item, stack, writer)?; + + (ret_item, ret_ty) + }; + + const INDENT: &str = " "; + + // Write `wrap_name(args`. + write!(writer, " {wrap_name}(")?; + serialize_args(&args, ctx, writer)?; + + if wrap_as_variadic.is_none() { + // Write `) { name(` if the function returns void and `) { return name(` if it does not. + if ret_ty.is_void() { + write!(writer, ") {{ {name}(")?; + } else { + write!(writer, ") {{ return {name}(")?; + } + } else { + // Write `, ...) {` + writeln!(writer, ", ...) {{")?; + + // Declare the return type `RET_TY ret;` if their is a need to do so + if !ret_ty.is_void() { + write!(writer, "{INDENT}")?; + ret_ty.serialize(ctx, ret_item, stack, writer)?; + writeln!(writer, " ret;")?; + } + + // Setup va_list + writeln!(writer, "{INDENT}va_list ap;\n")?; + writeln!( + writer, + "{INDENT}va_start(ap, {});", + args.last().unwrap().0 + )?; + + write!(writer, "{INDENT}")?; + // Write `ret = name(` or `name(` depending if the function returns something + if !ret_ty.is_void() { + write!(writer, "ret = ")?; + } + write!(writer, "{name}(")?; + } + + // Get the arguments names and insert at the right place if necessary `ap` + let mut args: Vec<_> = args.into_iter().map(|(name, _)| name).collect(); + if let Some(WrapAsVariadic { + idx_of_va_list_arg, .. + }) = wrap_as_variadic + { + args.insert(*idx_of_va_list_arg, "ap".to_owned()); + } + + // Write `arg_names);`. + serialize_sep(", ", args.iter(), ctx, writer, |name, _, buf| { + write!(buf, "{name}").map_err(From::from) + })?; + #[rustfmt::skip] + write!(writer, ");{}", if wrap_as_variadic.is_none() { " " } else { "\n" })?; + + if wrap_as_variadic.is_some() { + // End va_list and return the result if their is one + writeln!(writer, "{INDENT}va_end(ap);")?; + if !ret_ty.is_void() { + writeln!(writer, "{INDENT}return ret;")?; + } + } + + writeln!(writer, "}}")?; + + Ok(()) + } +} + +impl CSerialize<'_> for TypeId { + type Extra = (); + + fn serialize( + &self, + ctx: &BindgenContext, + (): Self::Extra, + stack: &mut Vec, + writer: &mut W, + ) -> Result<(), CodegenError> { + let item = ctx.resolve_item(*self); + item.expect_type().serialize(ctx, item, stack, writer) + } +} + +impl<'a> CSerialize<'a> for Type { + type Extra = &'a Item; + + fn serialize( + &self, + ctx: &BindgenContext, + item: Self::Extra, + stack: &mut Vec, + writer: &mut W, + ) -> Result<(), CodegenError> { + match self.kind() { + TypeKind::Void => { + if self.is_const() { + write!(writer, "const ")?; + } + write!(writer, "void")?; + } + TypeKind::NullPtr => { + if self.is_const() { + write!(writer, "const ")?; + } + write!(writer, "nullptr_t")?; + } + TypeKind::Int(int_kind) => { + if self.is_const() { + write!(writer, "const ")?; + } + match int_kind { + IntKind::Bool => write!(writer, "bool")?, + IntKind::SChar => write!(writer, "signed char")?, + IntKind::UChar => write!(writer, "unsigned char")?, + IntKind::WChar => write!(writer, "wchar_t")?, + IntKind::Short => write!(writer, "short")?, + IntKind::UShort => write!(writer, "unsigned short")?, + IntKind::Int => write!(writer, "int")?, + IntKind::UInt => write!(writer, "unsigned int")?, + IntKind::Long => write!(writer, "long")?, + IntKind::ULong => write!(writer, "unsigned long")?, + IntKind::LongLong => write!(writer, "long long")?, + IntKind::ULongLong => write!(writer, "unsigned long long")?, + IntKind::Char { .. } => write!(writer, "char")?, + int_kind => { + return Err(CodegenError::Serialize { + msg: format!( + "Cannot serialize integer kind {int_kind:?}" + ), + loc: get_loc(item), + }) + } + } + } + TypeKind::Float(float_kind) => { + if self.is_const() { + write!(writer, "const ")?; + } + match float_kind { + FloatKind::Float16 => write!(writer, "_Float16")?, + FloatKind::Float => write!(writer, "float")?, + FloatKind::Double => write!(writer, "double")?, + FloatKind::LongDouble => write!(writer, "long double")?, + FloatKind::Float128 => write!(writer, "__float128")?, + } + } + TypeKind::Complex(float_kind) => { + if self.is_const() { + write!(writer, "const ")?; + } + match float_kind { + FloatKind::Float16 => write!(writer, "_Float16 complex")?, + FloatKind::Float => write!(writer, "float complex")?, + FloatKind::Double => write!(writer, "double complex")?, + FloatKind::LongDouble => { + write!(writer, "long double complex")?; + } + FloatKind::Float128 => write!(writer, "__complex128")?, + } + } + TypeKind::Alias(type_id) => { + if let Some(name) = self.name() { + if self.is_const() { + write!(writer, "const {name}")?; + } else { + write!(writer, "{name}")?; + } + } else { + type_id.serialize(ctx, (), stack, writer)?; + } + } + TypeKind::Array(type_id, length) => { + type_id.serialize(ctx, (), stack, writer)?; + write!(writer, " [{length}]")?; + } + TypeKind::Function(signature) => { + if self.is_const() { + stack.push("const ".to_string()); + } + + signature.return_type().serialize( + ctx, + (), + &mut vec![], + writer, + )?; + + write!(writer, " (")?; + while let Some(item) = stack.pop() { + write!(writer, "{item}")?; + } + write!(writer, ")")?; + + let args = signature.argument_types(); + if args.is_empty() { + write!(writer, " (void)")?; + } else { + write!(writer, " (")?; + serialize_sep( + ", ", + args.iter(), + ctx, + writer, + |(name, type_id), ctx, buf| { + let mut stack = vec![]; + if let Some(name) = name { + stack.push(name.clone()); + } + type_id.serialize(ctx, (), &mut stack, buf) + }, + )?; + write!(writer, ")")?; + } + } + TypeKind::ResolvedTypeRef(type_id) => { + if self.is_const() { + write!(writer, "const ")?; + } + type_id.serialize(ctx, (), stack, writer)?; + } + TypeKind::Pointer(type_id) => { + if self.is_const() { + stack.push("*const ".to_owned()); + } else { + stack.push("*".to_owned()); + } + type_id.serialize(ctx, (), stack, writer)?; + } + TypeKind::Comp(comp_info) => { + if self.is_const() { + write!(writer, "const ")?; + } + + let name = item.canonical_name(ctx); + + match comp_info.kind() { + CompKind::Struct => write!(writer, "struct {name}")?, + CompKind::Union => write!(writer, "union {name}")?, + } + } + TypeKind::Enum(_enum_ty) => { + if self.is_const() { + write!(writer, "const ")?; + } + + let name = item.canonical_name(ctx); + write!(writer, "enum {name}")?; + } + ty => { + return Err(CodegenError::Serialize { + msg: format!("Cannot serialize type kind {ty:?}"), + loc: get_loc(item), + }) + } + } + + if !stack.is_empty() { + write!(writer, " ")?; + while let Some(item) = stack.pop() { + write!(writer, "{item}")?; + } + } + + Ok(()) + } +} + +fn serialize_args( + args: &[(String, TypeId)], + ctx: &BindgenContext, + writer: &mut W, +) -> Result<(), CodegenError> { + if args.is_empty() { + write!(writer, "void")?; + } else { + serialize_sep( + ", ", + args.iter(), + ctx, + writer, + |(name, type_id), ctx, buf| { + type_id.serialize(ctx, (), &mut vec![name.clone()], buf) + }, + )?; + } + + Ok(()) +} + +fn serialize_sep< + W: Write, + F: FnMut(I::Item, &BindgenContext, &mut W) -> Result<(), CodegenError>, + I: Iterator, +>( + sep: &str, + mut iter: I, + ctx: &BindgenContext, + buf: &mut W, + mut f: F, +) -> Result<(), CodegenError> { + if let Some(item) = iter.next() { + f(item, ctx, buf)?; + let sep = sep.as_bytes(); + for item in iter { + buf.write_all(sep)?; + f(item, ctx, buf)?; + } + } + + Ok(()) +} diff --git a/vendor/bindgen/codegen/struct_layout.rs b/vendor/bindgen/codegen/struct_layout.rs new file mode 100644 index 00000000000000..0d2e6a05c57ac0 --- /dev/null +++ b/vendor/bindgen/codegen/struct_layout.rs @@ -0,0 +1,458 @@ +//! Helpers for code generation that need struct layout + +use super::helpers; + +use crate::ir::comp::CompInfo; +use crate::ir::context::BindgenContext; +use crate::ir::layout::Layout; +use crate::ir::ty::{Type, TypeKind}; +use crate::FieldVisibilityKind; +use proc_macro2::{Ident, Span}; +use std::cmp; + +const MAX_GUARANTEED_ALIGN: usize = 8; + +/// Trace the layout of struct. +#[derive(Debug)] +pub(crate) struct StructLayoutTracker<'a> { + name: &'a str, + ctx: &'a BindgenContext, + comp: &'a CompInfo, + is_packed: bool, + known_type_layout: Option, + is_rust_union: bool, + can_copy_union_fields: bool, + latest_offset: usize, + padding_count: usize, + latest_field_layout: Option, + max_field_align: usize, + last_field_was_bitfield: bool, + visibility: FieldVisibilityKind, + last_field_was_flexible_array: bool, +} + +/// Returns a size aligned to a given value. +pub(crate) fn align_to(size: usize, align: usize) -> usize { + if align == 0 { + return size; + } + + let rem = size % align; + if rem == 0 { + return size; + } + + size + align - rem +} + +/// Returns the lower power of two byte count that can hold at most n bits. +pub(crate) fn bytes_from_bits_pow2(mut n: usize) -> usize { + if n == 0 { + return 0; + } + + if n <= 8 { + return 1; + } + + if !n.is_power_of_two() { + n = n.next_power_of_two(); + } + + n / 8 +} + +#[test] +fn test_align_to() { + assert_eq!(align_to(1, 1), 1); + assert_eq!(align_to(1, 2), 2); + assert_eq!(align_to(1, 4), 4); + assert_eq!(align_to(5, 1), 5); + assert_eq!(align_to(17, 4), 20); +} + +#[test] +fn test_bytes_from_bits_pow2() { + assert_eq!(bytes_from_bits_pow2(0), 0); + for i in 1..9 { + assert_eq!(bytes_from_bits_pow2(i), 1); + } + for i in 9..17 { + assert_eq!(bytes_from_bits_pow2(i), 2); + } + for i in 17..33 { + assert_eq!(bytes_from_bits_pow2(i), 4); + } +} + +impl<'a> StructLayoutTracker<'a> { + pub(crate) fn new( + ctx: &'a BindgenContext, + comp: &'a CompInfo, + ty: &'a Type, + name: &'a str, + visibility: FieldVisibilityKind, + is_packed: bool, + ) -> Self { + let known_type_layout = ty.layout(ctx); + let (is_rust_union, can_copy_union_fields) = + comp.is_rust_union(ctx, known_type_layout.as_ref(), name); + StructLayoutTracker { + name, + ctx, + comp, + visibility, + is_packed, + known_type_layout, + is_rust_union, + can_copy_union_fields, + latest_offset: 0, + padding_count: 0, + latest_field_layout: None, + max_field_align: 0, + last_field_was_bitfield: false, + last_field_was_flexible_array: false, + } + } + + pub(crate) fn can_copy_union_fields(&self) -> bool { + self.can_copy_union_fields + } + + pub(crate) fn is_rust_union(&self) -> bool { + self.is_rust_union + } + + pub(crate) fn saw_flexible_array(&mut self) { + self.last_field_was_flexible_array = true; + } + + pub(crate) fn saw_vtable(&mut self) { + debug!("saw vtable for {}", self.name); + + let ptr_size = self.ctx.target_pointer_size(); + self.latest_offset += ptr_size; + self.latest_field_layout = Some(Layout::new(ptr_size, ptr_size)); + self.max_field_align = ptr_size; + } + + pub(crate) fn saw_base(&mut self, base_ty: &Type) { + debug!("saw base for {}", self.name); + if let Some(layout) = base_ty.layout(self.ctx) { + self.align_to_latest_field(layout); + + self.latest_offset += self.padding_bytes(layout) + layout.size; + self.latest_field_layout = Some(layout); + self.max_field_align = cmp::max(self.max_field_align, layout.align); + } + } + + pub(crate) fn saw_bitfield_unit(&mut self, layout: Layout) { + debug!("saw bitfield unit for {}: {layout:?}", self.name); + + self.align_to_latest_field(layout); + + self.latest_offset += layout.size; + + debug!( + "Offset: : {} -> {}", + self.latest_offset - layout.size, + self.latest_offset + ); + + self.latest_field_layout = Some(layout); + self.last_field_was_bitfield = true; + self.max_field_align = cmp::max(self.max_field_align, layout.align); + } + + /// Returns a padding field if necessary for a given new field _before_ + /// adding that field. + pub(crate) fn saw_field( + &mut self, + field_name: &str, + field_ty: &Type, + field_offset: Option, + ) -> Option { + let mut field_layout = field_ty.layout(self.ctx)?; + + if let TypeKind::Array(inner, len) = + *field_ty.canonical_type(self.ctx).kind() + { + // FIXME(emilio): As an _ultra_ hack, we correct the layout returned + // by arrays of structs that have a bigger alignment than what we + // can support. + // + // This means that the structs in the array are super-unsafe to + // access, since they won't be properly aligned, but there's not too + // much we can do about it. + if let Some(layout) = self.ctx.resolve_type(inner).layout(self.ctx) + { + if layout.align > MAX_GUARANTEED_ALIGN { + field_layout.size = + align_to(layout.size, layout.align) * len; + field_layout.align = MAX_GUARANTEED_ALIGN; + } + } + } + self.saw_field_with_layout(field_name, field_layout, field_offset) + } + + pub(crate) fn saw_field_with_layout( + &mut self, + field_name: &str, + field_layout: Layout, + field_offset: Option, + ) -> Option { + let will_merge_with_bitfield = self.align_to_latest_field(field_layout); + + let is_union = self.comp.is_union(); + let padding_bytes = match field_offset { + Some(offset) if offset / 8 > self.latest_offset => { + offset / 8 - self.latest_offset + } + _ => { + if will_merge_with_bitfield || + field_layout.align == 0 || + is_union + { + 0 + } else if !self.is_packed { + self.padding_bytes(field_layout) + } else if let Some(mut l) = self.known_type_layout { + if field_layout.align < l.align { + l.align = field_layout.align; + } + self.padding_bytes(l) + } else { + 0 + } + } + }; + + self.latest_offset += padding_bytes; + + let padding_layout = if self.is_packed || is_union { + None + } else { + let force_padding = self.ctx.options().force_explicit_padding; + + // Otherwise the padding is useless. + let need_padding = force_padding || + padding_bytes >= field_layout.align || + field_layout.align > MAX_GUARANTEED_ALIGN; + + debug!( + "Offset: : {} -> {}", + self.latest_offset - padding_bytes, + self.latest_offset + ); + + debug!( + "align field {field_name} to {}/{} with {padding_bytes} padding bytes {field_layout:?}", + self.latest_offset, + field_offset.unwrap_or(0) / 8, + ); + + let padding_align = if force_padding { + 1 + } else { + cmp::min(field_layout.align, MAX_GUARANTEED_ALIGN) + }; + + if need_padding && padding_bytes != 0 { + Some(Layout::new(padding_bytes, padding_align)) + } else { + None + } + }; + + if is_union { + self.latest_offset = + cmp::max(self.latest_offset, field_layout.size); + } else { + self.latest_offset += field_layout.size; + } + self.latest_field_layout = Some(field_layout); + self.max_field_align = + cmp::max(self.max_field_align, field_layout.align); + self.last_field_was_bitfield = false; + + debug!( + "Offset: {field_name}: {} -> {}", + self.latest_offset - field_layout.size, + self.latest_offset + ); + + padding_layout.map(|layout| self.padding_field(layout)) + } + + pub(crate) fn add_tail_padding( + &mut self, + comp_name: &str, + comp_layout: Layout, + ) -> Option { + // Only emit an padding field at the end of a struct if the + // user configures explicit padding. + if !self.ctx.options().force_explicit_padding { + return None; + } + + // Padding doesn't make sense for rust unions. + if self.is_rust_union { + return None; + } + + // Also doesn't make sense for structs with flexible array members + if self.last_field_was_flexible_array { + return None; + } + + if self.latest_offset == comp_layout.size { + // This struct does not contain tail padding. + return None; + } + + trace!( + "need a tail padding field for {comp_name}: offset {} -> size {}", + self.latest_offset, + comp_layout.size + ); + let size = comp_layout.size - self.latest_offset; + Some(self.padding_field(Layout::new(size, 0))) + } + + pub(crate) fn pad_struct( + &mut self, + layout: Layout, + ) -> Option { + debug!("pad_struct:\n\tself = {self:#?}\n\tlayout = {layout:#?}"); + + if layout.size < self.latest_offset { + warn!( + "Calculated wrong layout for {}, too more {} bytes", + self.name, + self.latest_offset - layout.size + ); + return None; + } + + let padding_bytes = layout.size - self.latest_offset; + if padding_bytes == 0 { + return None; + } + + let repr_align = true; + + // We always pad to get to the correct size if the struct is one of + // those we can't align properly. + // + // Note that if the last field we saw was a bitfield, we may need to pad + // regardless, because bitfields don't respect alignment as strictly as + // other fields. + if padding_bytes >= layout.align || + (self.last_field_was_bitfield && + padding_bytes >= self.latest_field_layout.unwrap().align) || + (!repr_align && layout.align > MAX_GUARANTEED_ALIGN) + { + let layout = if self.is_packed { + Layout::new(padding_bytes, 1) + } else if self.last_field_was_bitfield || + layout.align > MAX_GUARANTEED_ALIGN + { + // We've already given up on alignment here. + Layout::for_size(self.ctx, padding_bytes) + } else { + Layout::new(padding_bytes, layout.align) + }; + + debug!("pad bytes to struct {}, {layout:?}", self.name); + + Some(self.padding_field(layout)) + } else { + None + } + } + + pub(crate) fn requires_explicit_align(&self, layout: Layout) -> bool { + let repr_align = true; + + // Always force explicit repr(align) for stuff more than 16-byte aligned + // to work-around https://github.com/rust-lang/rust/issues/54341. + // + // Worst-case this just generates redundant alignment attributes. + if repr_align && self.max_field_align >= 16 { + return true; + } + + if self.max_field_align >= layout.align { + return false; + } + + // We can only generate up-to a 8-bytes of alignment unless we support + // repr(align). + repr_align || layout.align <= MAX_GUARANTEED_ALIGN + } + + fn padding_bytes(&self, layout: Layout) -> usize { + align_to(self.latest_offset, layout.align) - self.latest_offset + } + + fn padding_field(&mut self, layout: Layout) -> proc_macro2::TokenStream { + let ty = helpers::blob(self.ctx, layout, false); + let padding_count = self.padding_count; + + self.padding_count += 1; + + let padding_field_name = Ident::new( + &format!("__bindgen_padding_{padding_count}"), + Span::call_site(), + ); + + self.max_field_align = cmp::max(self.max_field_align, layout.align); + + let vis = super::access_specifier(self.visibility); + + quote! { + #vis #padding_field_name : #ty , + } + } + + /// Returns whether the new field is known to merge with a bitfield. + /// + /// This is just to avoid doing the same check also in `pad_field`. + fn align_to_latest_field(&mut self, new_field_layout: Layout) -> bool { + if self.is_packed { + // Skip to align fields when packed. + return false; + } + + let Some(layout) = self.latest_field_layout else { + return false; + }; + + // If it was, we may or may not need to align, depending on what the + // current field alignment and the bitfield size and alignment are. + debug!( + "align_to_bitfield? {}: {layout:?} {new_field_layout:?}", + self.last_field_was_bitfield, + ); + + // Avoid divide-by-zero errors if align is 0. + let align = cmp::max(1, layout.align); + + if self.last_field_was_bitfield && + new_field_layout.align <= layout.size % align && + new_field_layout.size <= layout.size % align + { + // The new field will be coalesced into some of the remaining bits. + // + // FIXME(emilio): I think this may not catch everything? + debug!("Will merge with bitfield"); + return true; + } + + // Else, just align the obvious way. + self.latest_offset += self.padding_bytes(layout); + false + } +} diff --git a/vendor/bindgen/deps.rs b/vendor/bindgen/deps.rs new file mode 100644 index 00000000000000..3f95ac1e89e5ac --- /dev/null +++ b/vendor/bindgen/deps.rs @@ -0,0 +1,61 @@ +/// Generating build depfiles from parsed bindings. +use std::{collections::BTreeSet, path::PathBuf}; + +#[derive(Clone, Debug)] +pub(crate) struct DepfileSpec { + pub output_module: String, + pub depfile_path: PathBuf, +} + +impl DepfileSpec { + pub fn write(&self, deps: &BTreeSet>) -> std::io::Result<()> { + std::fs::write(&self.depfile_path, self.to_string(deps)) + } + + fn to_string(&self, deps: &BTreeSet>) -> String { + // Transforms a string by escaping spaces and backslashes. + let escape = |s: &str| s.replace('\\', "\\\\").replace(' ', "\\ "); + + let mut buf = format!("{}:", escape(&self.output_module)); + for file in deps { + buf = format!("{buf} {}", escape(file)); + } + buf + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn escaping_depfile() { + let spec = DepfileSpec { + output_module: "Mod Name".to_owned(), + depfile_path: PathBuf::new(), + }; + + let deps: BTreeSet<_> = vec![ + r"/absolute/path".into(), + r"C:\win\absolute\path".into(), + r"../relative/path".into(), + r"..\win\relative\path".into(), + r"../path/with spaces/in/it".into(), + r"..\win\path\with spaces\in\it".into(), + r"path\with/mixed\separators".into(), + ] + .into_iter() + .collect(); + assert_eq!( + spec.to_string(&deps), + "Mod\\ Name: \ + ../path/with\\ spaces/in/it \ + ../relative/path \ + ..\\\\win\\\\path\\\\with\\ spaces\\\\in\\\\it \ + ..\\\\win\\\\relative\\\\path \ + /absolute/path \ + C:\\\\win\\\\absolute\\\\path \ + path\\\\with/mixed\\\\separators" + ); + } +} diff --git a/vendor/bindgen/diagnostics.rs b/vendor/bindgen/diagnostics.rs new file mode 100644 index 00000000000000..f22402ac0e541a --- /dev/null +++ b/vendor/bindgen/diagnostics.rs @@ -0,0 +1,146 @@ +//! Types and function used to emit pretty diagnostics for `bindgen`. +//! +//! The entry point of this module is the [`Diagnostic`] type. + +use std::fmt::Write; +use std::io::{self, BufRead, BufReader}; +use std::{borrow::Cow, fs::File}; + +use annotate_snippets::{Renderer, Snippet}; + +pub(crate) use annotate_snippets::Level; + +/// A `bindgen` diagnostic. +#[derive(Default)] +pub(crate) struct Diagnostic<'a> { + title: Option<(Cow<'a, str>, Level)>, + slices: Vec>, + footer: Vec<(Cow<'a, str>, Level)>, +} + +impl<'a> Diagnostic<'a> { + /// Add a title to the diagnostic and set its type. + pub(crate) fn with_title( + &mut self, + title: impl Into>, + level: Level, + ) -> &mut Self { + self.title = Some((title.into(), level)); + self + } + + /// Add a slice of source code to the diagnostic. + pub(crate) fn add_slice(&mut self, slice: Slice<'a>) -> &mut Self { + self.slices.push(slice); + self + } + + /// Add a footer annotation to the diagnostic. This annotation will have its own type. + pub(crate) fn add_annotation( + &mut self, + msg: impl Into>, + level: Level, + ) -> &mut Self { + self.footer.push((msg.into(), level)); + self + } + + /// Print this diagnostic. + /// + /// The diagnostic is printed using `cargo:warning` if `bindgen` is being invoked by a build + /// script or using `eprintln` otherwise. + pub(crate) fn display(&self) { + std::thread_local! { + static INVOKED_BY_BUILD_SCRIPT: bool = std::env::var_os("CARGO_CFG_TARGET_ARCH").is_some(); + } + + let mut footer = vec![]; + let mut slices = vec![]; + let snippet = if let Some((msg, level)) = &self.title { + (*level).title(msg) + } else { + return; + }; + + for (msg, level) in &self.footer { + footer.push((*level).title(msg)); + } + + // add additional info that this is generated by bindgen + // so as to not confuse with rustc warnings + footer.push( + Level::Info.title("This diagnostic was generated by bindgen."), + ); + + for slice in &self.slices { + if let Some(source) = &slice.source { + let mut snippet = Snippet::source(source) + .line_start(slice.line.unwrap_or_default()); + if let Some(origin) = &slice.filename { + snippet = snippet.origin(origin); + } + slices.push(snippet); + } + } + + let renderer = Renderer::styled(); + let dl = renderer.render(snippet.snippets(slices).footers(footer)); + + if INVOKED_BY_BUILD_SCRIPT.with(Clone::clone) { + // This is just a hack which hides the `warning:` added by cargo at the beginning of + // every line. This should be fine as our diagnostics already have a colorful title. + // FIXME (pvdrz): Could it be that this doesn't work in other languages? + let hide_warning = "\r \r"; + let string = dl.to_string(); + for line in string.lines() { + println!("cargo:warning={hide_warning}{line}"); + } + } else { + eprintln!("{dl}\n"); + } + } +} + +/// A slice of source code. +#[derive(Default)] +pub(crate) struct Slice<'a> { + source: Option>, + filename: Option, + line: Option, +} + +impl<'a> Slice<'a> { + /// Set the source code. + pub(crate) fn with_source( + &mut self, + source: impl Into>, + ) -> &mut Self { + self.source = Some(source.into()); + self + } + + /// Set the file, line and column. + pub(crate) fn with_location( + &mut self, + mut name: String, + line: usize, + col: usize, + ) -> &mut Self { + write!(name, ":{line}:{col}").expect("Writing to a string cannot fail"); + self.filename = Some(name); + self.line = Some(line); + self + } +} + +pub(crate) fn get_line( + filename: &str, + line: usize, +) -> io::Result> { + let file = BufReader::new(File::open(filename)?); + if let Some(line) = file.lines().nth(line.wrapping_sub(1)) { + return line.map(Some); + } + + Ok(None) +} diff --git a/vendor/bindgen/extra_assertions.rs b/vendor/bindgen/extra_assertions.rs new file mode 100644 index 00000000000000..8526fd42d2e915 --- /dev/null +++ b/vendor/bindgen/extra_assertions.rs @@ -0,0 +1,17 @@ +//! Macros for defining extra assertions that should only be checked in testing +//! and/or CI when the `__testing_only_extra_assertions` feature is enabled. + +/// Simple macro that forwards to assert! when using +/// `__testing_only_extra_assertions`. +macro_rules! extra_assert { + ( $cond:expr ) => { + if cfg!(feature = "__testing_only_extra_assertions") { + assert!($cond); + } + }; + ( $cond:expr , $( $arg:tt )+ ) => { + if cfg!(feature = "__testing_only_extra_assertions") { + assert!($cond, $( $arg )* ) + } + }; +} diff --git a/vendor/bindgen/features.rs b/vendor/bindgen/features.rs new file mode 100644 index 00000000000000..45ea893947188a --- /dev/null +++ b/vendor/bindgen/features.rs @@ -0,0 +1,570 @@ +//! Contains code for selecting features + +#![deny(unused_extern_crates)] +#![deny(clippy::missing_docs_in_private_items)] +#![allow(deprecated)] + +use std::str::FromStr; +use std::{fmt, io}; + +/// Represents the version of the Rust language to target. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[repr(transparent)] +pub struct RustTarget(Version); + +impl RustTarget { + /// Create a new [`RustTarget`] for a stable release of Rust. + pub fn stable(minor: u64, patch: u64) -> Result { + let target = Self(Version::Stable(minor, patch)); + + if target < EARLIEST_STABLE_RUST { + return Err(InvalidRustTarget::TooEarly); + } + + Ok(target) + } + + const fn minor(&self) -> Option { + match self.0 { + Version::Nightly => None, + Version::Stable(minor, _) => Some(minor), + } + } + + const fn is_compatible(&self, other: &Self) -> bool { + match (self.0, other.0) { + (Version::Stable(minor, _), Version::Stable(other_minor, _)) => { + // We ignore the patch version number as they only include backwards compatible bug + // fixes. + minor >= other_minor + } + // Nightly is compatible with everything + (Version::Nightly, _) => true, + // No stable release is compatible with nightly + (Version::Stable { .. }, Version::Nightly) => false, + } + } +} + +impl Default for RustTarget { + fn default() -> Self { + // Bindgen from build script: default to generating bindings compatible + // with the Rust version currently performing this build. + #[cfg(not(feature = "__cli"))] + { + use std::env; + use std::iter; + use std::process::Command; + use std::sync::OnceLock; + + static CURRENT_RUST: OnceLock> = OnceLock::new(); + + if let Some(current_rust) = *CURRENT_RUST.get_or_init(|| { + let is_build_script = + env::var_os("CARGO_CFG_TARGET_ARCH").is_some(); + if !is_build_script { + return None; + } + + let rustc = env::var_os("RUSTC")?; + let rustc_wrapper = env::var_os("RUSTC_WRAPPER") + .filter(|wrapper| !wrapper.is_empty()); + let wrapped_rustc = + rustc_wrapper.iter().chain(iter::once(&rustc)); + + let mut is_clippy_driver = false; + loop { + let mut wrapped_rustc = wrapped_rustc.clone(); + let mut command = + Command::new(wrapped_rustc.next().unwrap()); + command.args(wrapped_rustc); + if is_clippy_driver { + command.arg("--rustc"); + } + command.arg("--version"); + + let output = command.output().ok()?; + let string = String::from_utf8(output.stdout).ok()?; + + // Version string like "rustc 1.100.0-beta.5 (f0e1d2c3b 2026-10-17)" + let last_line = string.lines().last().unwrap_or(&string); + let (program, rest) = last_line.trim().split_once(' ')?; + if program != "rustc" { + if program.starts_with("clippy") && !is_clippy_driver { + is_clippy_driver = true; + continue; + } + return None; + } + + let number = rest.split([' ', '-', '+']).next()?; + break RustTarget::from_str(number).ok(); + } + }) { + return current_rust; + } + } + + // Bindgen from CLI, or cannot determine compiler version: default to + // generating bindings compatible with the latest stable release of Rust + // that Bindgen knows about. + LATEST_STABLE_RUST + } +} + +impl fmt::Display for RustTarget { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 { + Version::Stable(minor, patch) => write!(f, "1.{minor}.{patch}"), + Version::Nightly => "nightly".fmt(f), + } + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +enum Version { + Stable(u64, u64), + Nightly, +} + +#[derive(Debug, PartialEq, Eq, Hash)] +pub enum InvalidRustTarget { + TooEarly, +} + +impl fmt::Display for InvalidRustTarget { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::TooEarly => write!(f, "the earliest Rust version supported by bindgen is {EARLIEST_STABLE_RUST}"), + } + } +} + +/// This macro defines the Rust editions supported by bindgen. +macro_rules! define_rust_editions { + ($($variant:ident($value:literal) => $minor:literal,)*) => { + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] + #[doc = "Represents Rust Edition for the generated bindings"] + pub enum RustEdition { + $( + #[doc = concat!("The ", stringify!($value), " edition of Rust.")] + $variant, + )* + } + + impl FromStr for RustEdition { + type Err = InvalidRustEdition; + + fn from_str(s: &str) -> Result { + match s { + $(stringify!($value) => Ok(Self::$variant),)* + _ => Err(InvalidRustEdition(s.to_owned())), + } + } + } + + impl fmt::Display for RustEdition { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + $(Self::$variant => stringify!($value).fmt(f),)* + } + } + } + + impl RustEdition { + pub(crate) const ALL: [Self; [$($value,)*].len()] = [$(Self::$variant,)*]; + + pub(crate) fn is_available(self, target: RustTarget) -> bool { + let Some(minor) = target.minor() else { + return true; + }; + + match self { + $(Self::$variant => $minor <= minor,)* + } + } + } + } +} + +#[derive(Debug)] +pub struct InvalidRustEdition(String); + +impl fmt::Display for InvalidRustEdition { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "\"{}\" is not a valid Rust edition", self.0) + } +} + +impl std::error::Error for InvalidRustEdition {} + +define_rust_editions! { + Edition2018(2018) => 31, + Edition2021(2021) => 56, + Edition2024(2024) => 85, +} + +impl RustTarget { + /// Returns the latest edition supported by this target. + pub(crate) fn latest_edition(self) -> RustEdition { + RustEdition::ALL + .iter() + .rev() + .find(|edition| edition.is_available(self)) + .copied() + .expect("bindgen should always support at least one edition") + } +} + +impl Default for RustEdition { + fn default() -> Self { + RustTarget::default().latest_edition() + } +} + +/// This macro defines the [`RustTarget`] and [`RustFeatures`] types. +macro_rules! define_rust_targets { + ( + Nightly => {$($nightly_feature:ident $(($nightly_edition:literal))|* $(: #$issue:literal)?),* $(,)?} $(,)? + $( + $variant:ident($minor:literal) => {$($feature:ident $(($edition:literal))|* $(: #$pull:literal)?),* $(,)?}, + )* + $(,)? + ) => { + + impl RustTarget { + /// The nightly version of Rust, which introduces the following features:" + $(#[doc = concat!( + "- [`", stringify!($nightly_feature), "`]", + "(", $("https://github.com/rust-lang/rust/pull/", stringify!($issue),)* ")", + )])* + #[deprecated = "The use of this constant is deprecated, please use `RustTarget::nightly` instead."] + pub const Nightly: Self = Self::nightly(); + + /// The nightly version of Rust, which introduces the following features:" + $(#[doc = concat!( + "- [`", stringify!($nightly_feature), "`]", + "(", $("https://github.com/rust-lang/rust/pull/", stringify!($issue),)* ")", + )])* + pub const fn nightly() -> Self { + Self(Version::Nightly) + } + + $( + #[doc = concat!("Version 1.", stringify!($minor), " of Rust, which introduced the following features:")] + $(#[doc = concat!( + "- [`", stringify!($feature), "`]", + "(", $("https://github.com/rust-lang/rust/pull/", stringify!($pull),)* ")", + )])* + #[deprecated = "The use of this constant is deprecated, please use `RustTarget::stable` instead."] + pub const $variant: Self = Self(Version::Stable($minor, 0)); + )* + + const fn stable_releases() -> [(Self, u64); [$($minor,)*].len()] { + [$((Self::$variant, $minor),)*] + } + } + + #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] + pub(crate) struct RustFeatures { + $($(pub(crate) $feature: bool,)*)* + $(pub(crate) $nightly_feature: bool,)* + } + + impl RustFeatures { + /// Compute the features that must be enabled in a specific Rust target with a specific edition. + pub(crate) fn new(target: RustTarget, edition: RustEdition) -> Self { + let mut features = Self { + $($($feature: false,)*)* + $($nightly_feature: false,)* + }; + + if target.is_compatible(&RustTarget::nightly()) { + $( + let editions: &[RustEdition] = &[$(stringify!($nightly_edition).parse::().ok().expect("invalid edition"),)*]; + + if editions.is_empty() || editions.contains(&edition) { + features.$nightly_feature = true; + } + )* + } + + $( + if target.is_compatible(&RustTarget::$variant) { + $( + let editions: &[RustEdition] = &[$(stringify!($edition).parse::().ok().expect("invalid edition"),)*]; + + if editions.is_empty() || editions.contains(&edition) { + features.$feature = true; + } + )* + } + )* + + features + } + } + }; +} + +// NOTE: When adding or removing features here, make sure to add the stabilization PR +// number for the feature if it has been stabilized or the tracking issue number if the feature is +// not stable. +define_rust_targets! { + Nightly => { + vectorcall_abi: #124485, + ptr_metadata: #81513, + layout_for_ptr: #69835, + }, + Stable_1_82(82) => { + unsafe_extern_blocks: #127921, + }, + Stable_1_77(77) => { + offset_of: #106655, + literal_cstr(2021)|(2024): #117472, + }, + Stable_1_73(73) => { thiscall_abi: #42202 }, + Stable_1_71(71) => { c_unwind_abi: #106075 }, + Stable_1_68(68) => { abi_efiapi: #105795 }, + Stable_1_64(64) => { core_ffi_c: #94503 }, + Stable_1_51(51) => { + raw_ref_macros: #80886, + min_const_generics: #74878, + }, + Stable_1_59(59) => { const_cstr: #54745 }, + Stable_1_47(47) => { larger_arrays: #74060 }, + Stable_1_43(43) => { associated_constants: #68952 }, + Stable_1_40(40) => { non_exhaustive: #44109 }, + Stable_1_36(36) => { maybe_uninit: #60445 }, + Stable_1_33(33) => { repr_packed_n: #57049 }, +} + +/// Latest stable release of Rust that is supported by bindgen +pub const LATEST_STABLE_RUST: RustTarget = { + // FIXME: replace all this code by + // ``` + // RustTarget::stable_releases() + // .into_iter() + // .max_by_key(|(_, m)| m) + // .map(|(t, _)| t) + // .unwrap() + // ``` + // once those operations can be used in constants. + let targets = RustTarget::stable_releases(); + + let mut i = 0; + let mut latest_target = None; + let mut latest_minor = 0; + + while i < targets.len() { + let (target, minor) = targets[i]; + + if latest_minor < minor { + latest_minor = minor; + latest_target = Some(target); + } + + i += 1; + } + + match latest_target { + Some(target) => target, + None => unreachable!(), + } +}; + +/// Earliest stable release of Rust that is supported by bindgen +pub const EARLIEST_STABLE_RUST: RustTarget = { + // FIXME: replace all this code by + // ``` + // RustTarget::stable_releases() + // .into_iter() + // .min_by_key(|(_, m)| m) + // .map(|(t, _)| t) + // .unwrap_or(LATEST_STABLE_RUST) + // ``` + // once those operations can be used in constants. + let targets = RustTarget::stable_releases(); + + let mut i = 0; + let mut earliest_target = None; + let Some(mut earliest_minor) = LATEST_STABLE_RUST.minor() else { + unreachable!() + }; + + while i < targets.len() { + let (target, minor) = targets[i]; + + if earliest_minor > minor { + earliest_minor = minor; + earliest_target = Some(target); + } + + i += 1; + } + + match earliest_target { + Some(target) => target, + None => unreachable!(), + } +}; + +fn invalid_input(input: &str, msg: impl fmt::Display) -> io::Error { + io::Error::new( + io::ErrorKind::InvalidInput, + format!("\"{input}\" is not a valid Rust target, {msg}"), + ) +} + +impl FromStr for RustTarget { + type Err = io::Error; + + fn from_str(input: &str) -> Result { + if input == "nightly" { + return Ok(Self::Nightly); + } + + let Some((major_str, tail)) = input.split_once('.') else { + return Err(invalid_input(input, "accepted values are of the form \"1.71\", \"1.71.1\" or \"nightly\"." ) ); + }; + + if major_str != "1" { + return Err(invalid_input( + input, + "The largest major version of Rust released is \"1\"", + )); + } + + let (minor, patch) = if let Some((minor_str, patch_str)) = + tail.split_once('.') + { + let Ok(minor) = minor_str.parse::() else { + return Err(invalid_input(input, "the minor version number must be an unsigned 64-bit integer")); + }; + let Ok(patch) = patch_str.parse::() else { + return Err(invalid_input(input, "the patch version number must be an unsigned 64-bit integer")); + }; + (minor, patch) + } else { + let Ok(minor) = tail.parse::() else { + return Err(invalid_input(input, "the minor version number must be an unsigned 64-bit integer")); + }; + (minor, 0) + }; + + Self::stable(minor, patch).map_err(|err| invalid_input(input, err)) + } +} + +impl RustFeatures { + /// Compute the features that must be enabled in a specific Rust target with the latest edition + /// available in that target. + pub(crate) fn new_with_latest_edition(target: RustTarget) -> Self { + Self::new(target, target.latest_edition()) + } +} + +impl Default for RustFeatures { + fn default() -> Self { + Self::new_with_latest_edition(RustTarget::default()) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn release_versions_for_editions() { + assert_eq!( + "1.33".parse::().unwrap().latest_edition(), + RustEdition::Edition2018 + ); + assert_eq!( + "1.56".parse::().unwrap().latest_edition(), + RustEdition::Edition2021 + ); + assert_eq!( + "1.85".parse::().unwrap().latest_edition(), + RustEdition::Edition2024 + ); + assert_eq!( + "nightly".parse::().unwrap().latest_edition(), + RustEdition::Edition2024 + ); + } + + #[test] + fn target_features() { + let features = + RustFeatures::new_with_latest_edition(RustTarget::Stable_1_71); + assert!( + features.c_unwind_abi && + features.abi_efiapi && + !features.thiscall_abi + ); + + let features = RustFeatures::new( + RustTarget::Stable_1_77, + RustEdition::Edition2018, + ); + assert!(!features.literal_cstr); + + let features = + RustFeatures::new_with_latest_edition(RustTarget::Stable_1_77); + assert!(features.literal_cstr); + + let f_nightly = + RustFeatures::new_with_latest_edition(RustTarget::Nightly); + assert!( + f_nightly.vectorcall_abi && + f_nightly.ptr_metadata && + f_nightly.layout_for_ptr + ); + } + + fn test_target(input: &str, expected: RustTarget) { + // Two targets are equivalent if they enable the same set of features + let expected = RustFeatures::new_with_latest_edition(expected); + let found = RustFeatures::new_with_latest_edition( + input.parse::().unwrap(), + ); + assert_eq!( + expected, + found, + "target {input} enables features:\n{found:#?}\nand should enable features:\n{expected:#?}" + ); + } + + fn test_invalid_target(input: &str) { + assert!( + input.parse::().is_err(), + "{input} should be an invalid target" + ); + } + + #[test] + fn valid_targets() { + test_target("1.71", RustTarget::Stable_1_71); + test_target("1.71.0", RustTarget::Stable_1_71); + test_target("1.71.1", RustTarget::Stable_1_71); + test_target("1.72", RustTarget::Stable_1_71); + test_target("1.73", RustTarget::Stable_1_73); + test_target("1.18446744073709551615", LATEST_STABLE_RUST); + test_target("nightly", RustTarget::Nightly); + } + + #[test] + fn invalid_targets() { + test_invalid_target("2.0"); + test_invalid_target("1.cat"); + test_invalid_target("1.0.cat"); + test_invalid_target("1.18446744073709551616"); + test_invalid_target("1.0.18446744073709551616"); + test_invalid_target("1.-1.0"); + test_invalid_target("1.0.-1"); + test_invalid_target("beta"); + test_invalid_target("1.0.0"); + test_invalid_target("1.32.0"); + } +} diff --git a/vendor/bindgen/ir/analysis/derive.rs b/vendor/bindgen/ir/analysis/derive.rs new file mode 100644 index 00000000000000..eaa20fff463c9d --- /dev/null +++ b/vendor/bindgen/ir/analysis/derive.rs @@ -0,0 +1,726 @@ +//! Determining which types for which we cannot emit `#[derive(Trait)]`. + +use std::fmt; + +use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; +use crate::ir::analysis::has_vtable::HasVtable; +use crate::ir::comp::CompKind; +use crate::ir::context::{BindgenContext, ItemId}; +use crate::ir::derive::CanDerive; +use crate::ir::function::FunctionSig; +use crate::ir::item::{IsOpaque, Item}; +use crate::ir::layout::Layout; +use crate::ir::template::TemplateParameters; +use crate::ir::traversal::{EdgeKind, Trace}; +use crate::ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT; +use crate::ir::ty::{Type, TypeKind}; +use crate::{Entry, HashMap, HashSet}; + +/// Which trait to consider when doing the `CannotDerive` analysis. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum DeriveTrait { + /// The `Copy` trait. + Copy, + /// The `Debug` trait. + Debug, + /// The `Default` trait. + Default, + /// The `Hash` trait. + Hash, + /// The `PartialEq` and `PartialOrd` traits. + PartialEqOrPartialOrd, +} + +/// An analysis that finds for each IR item whether a trait cannot be derived. +/// +/// We use the monotone constraint function `cannot_derive`, defined as follows +/// for type T: +/// +/// * If T is Opaque and the layout of the type is known, get this layout as an +/// opaquetype and check whether it can derive using trivial checks. +/// +/// * If T is Array, a trait cannot be derived if the array is incomplete, +/// if the length of the array is larger than the limit (unless the trait +/// allows it), or the trait cannot be derived for the type of data the array +/// contains. +/// +/// * If T is Vector, a trait cannot be derived if the trait cannot be derived +/// for the type of data the vector contains. +/// +/// * If T is a type alias, a templated alias or an indirection to another type, +/// the trait cannot be derived if the trait cannot be derived for type T +/// refers to. +/// +/// * If T is a compound type, the trait cannot be derived if the trait cannot +/// be derived for any of its base members or fields. +/// +/// * If T is an instantiation of an abstract template definition, the trait +/// cannot be derived if any of the template arguments or template definition +/// cannot derive the trait. +/// +/// * For all other (simple) types, compiler and standard library limitations +/// dictate whether the trait is implemented. +#[derive(Debug, Clone)] +pub(crate) struct CannotDerive<'ctx> { + ctx: &'ctx BindgenContext, + + derive_trait: DeriveTrait, + + // The incremental result of this analysis's computation. + // Contains information whether particular item can derive `derive_trait` + can_derive: HashMap, + + // Dependencies saying that if a key ItemId has been inserted into the + // `cannot_derive_partialeq_or_partialord` set, then each of the ids + // in Vec need to be considered again. + // + // This is a subset of the natural IR graph with reversed edges, where we + // only include the edges from the IR graph that can affect whether a type + // can derive `derive_trait`. + dependencies: HashMap>, +} + +type EdgePredicate = fn(EdgeKind) -> bool; + +fn consider_edge_default(kind: EdgeKind) -> bool { + match kind { + // These are the only edges that can affect whether a type can derive + EdgeKind::BaseMember | + EdgeKind::Field | + EdgeKind::TypeReference | + EdgeKind::VarType | + EdgeKind::TemplateArgument | + EdgeKind::TemplateDeclaration | + EdgeKind::TemplateParameterDefinition => true, + + EdgeKind::Constructor | + EdgeKind::Destructor | + EdgeKind::FunctionReturn | + EdgeKind::FunctionParameter | + EdgeKind::InnerType | + EdgeKind::InnerVar | + EdgeKind::Method | + EdgeKind::Generic => false, + } +} + +impl CannotDerive<'_> { + fn insert>( + &mut self, + id: Id, + can_derive: CanDerive, + ) -> ConstrainResult { + let id = id.into(); + trace!( + "inserting {id:?} can_derive<{}>={can_derive:?}", + self.derive_trait, + ); + + if let CanDerive::Yes = can_derive { + return ConstrainResult::Same; + } + + match self.can_derive.entry(id) { + Entry::Occupied(mut entry) => { + if *entry.get() < can_derive { + entry.insert(can_derive); + ConstrainResult::Changed + } else { + ConstrainResult::Same + } + } + Entry::Vacant(entry) => { + entry.insert(can_derive); + ConstrainResult::Changed + } + } + } + + fn constrain_type(&mut self, item: &Item, ty: &Type) -> CanDerive { + if !self.ctx.allowlisted_items().contains(&item.id()) { + let can_derive = self + .ctx + .blocklisted_type_implements_trait(item, self.derive_trait); + match can_derive { + CanDerive::Yes => trace!( + " blocklisted type explicitly implements {}", + self.derive_trait + ), + CanDerive::Manually => trace!( + " blocklisted type requires manual implementation of {}", + self.derive_trait + ), + CanDerive::No => trace!( + " cannot derive {} for blocklisted type", + self.derive_trait + ), + } + return can_derive; + } + + if self.derive_trait.not_by_name(self.ctx, item) { + trace!( + " cannot derive {} for explicitly excluded type", + self.derive_trait + ); + return CanDerive::No; + } + + trace!("ty: {ty:?}"); + if item.is_opaque(self.ctx, &()) { + if !self.derive_trait.can_derive_union() && + ty.is_union() && + self.ctx.options().untagged_union + { + trace!( + " cannot derive {} for Rust unions", + self.derive_trait + ); + return CanDerive::No; + } + + let layout_can_derive = + ty.layout(self.ctx).map_or(CanDerive::Yes, |l| { + l.opaque().array_size_within_derive_limit() + }); + + match layout_can_derive { + CanDerive::Yes => { + trace!( + " we can trivially derive {} for the layout", + self.derive_trait + ); + } + _ => { + trace!( + " we cannot derive {} for the layout", + self.derive_trait + ); + } + } + return layout_can_derive; + } + + match *ty.kind() { + // Handle the simple cases. These can derive traits without further + // information. + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(..) | + TypeKind::Complex(..) | + TypeKind::Float(..) | + TypeKind::Enum(..) | + TypeKind::TypeParam | + TypeKind::UnresolvedTypeRef(..) | + TypeKind::Reference(..) | + TypeKind::ObjCInterface(..) | + TypeKind::ObjCId | + TypeKind::ObjCSel => self.derive_trait.can_derive_simple(ty.kind()), + TypeKind::Pointer(inner) => { + let inner_type = + self.ctx.resolve_type(inner).canonical_type(self.ctx); + if let TypeKind::Function(ref sig) = *inner_type.kind() { + self.derive_trait.can_derive_fnptr(sig) + } else { + self.derive_trait.can_derive_pointer() + } + } + TypeKind::Function(ref sig) => { + self.derive_trait.can_derive_fnptr(sig) + } + + // Complex cases need more information + TypeKind::Array(t, len) => { + let inner_type = + self.can_derive.get(&t.into()).copied().unwrap_or_default(); + if inner_type != CanDerive::Yes { + trace!( + " arrays of T for which we cannot derive {} \ + also cannot derive {}", + self.derive_trait, + self.derive_trait + ); + return CanDerive::No; + } + + if len == 0 && !self.derive_trait.can_derive_incomplete_array() + { + trace!( + " cannot derive {} for incomplete arrays", + self.derive_trait + ); + return CanDerive::No; + } + + if self.derive_trait.can_derive_large_array(self.ctx) { + trace!(" array can derive {}", self.derive_trait); + return CanDerive::Yes; + } + + if len > RUST_DERIVE_IN_ARRAY_LIMIT { + trace!( + " array is too large to derive {}, but it may be implemented", self.derive_trait + ); + return CanDerive::Manually; + } + trace!( + " array is small enough to derive {}", + self.derive_trait + ); + CanDerive::Yes + } + TypeKind::Vector(t, len) => { + let inner_type = + self.can_derive.get(&t.into()).copied().unwrap_or_default(); + if inner_type != CanDerive::Yes { + trace!( + " vectors of T for which we cannot derive {} \ + also cannot derive {}", + self.derive_trait, + self.derive_trait + ); + return CanDerive::No; + } + assert_ne!(len, 0, "vectors cannot have zero length"); + self.derive_trait.can_derive_vector() + } + + TypeKind::Comp(ref info) => { + assert!( + !info.has_non_type_template_params(), + "The early ty.is_opaque check should have handled this case" + ); + + if !self.derive_trait.can_derive_compound_forward_decl() && + info.is_forward_declaration() + { + trace!( + " cannot derive {} for forward decls", + self.derive_trait + ); + return CanDerive::No; + } + + // NOTE: Take into account that while unions in C and C++ are copied by + // default, the may have an explicit destructor in C++, so we can't + // defer this check just for the union case. + if !self.derive_trait.can_derive_compound_with_destructor() && + self.ctx.lookup_has_destructor( + item.id().expect_type_id(self.ctx), + ) + { + trace!( + " comp has destructor which cannot derive {}", + self.derive_trait + ); + return CanDerive::No; + } + + if info.kind() == CompKind::Union { + if self.derive_trait.can_derive_union() { + if self.ctx.options().untagged_union && + // https://github.com/rust-lang/rust/issues/36640 + (!info.self_template_params(self.ctx).is_empty() || + !item.all_template_params(self.ctx).is_empty()) + { + trace!( + " cannot derive {} for Rust union because issue 36640", self.derive_trait + ); + return CanDerive::No; + } + // fall through to be same as non-union handling + } else { + if self.ctx.options().untagged_union { + trace!( + " cannot derive {} for Rust unions", + self.derive_trait + ); + return CanDerive::No; + } + + let layout_can_derive = + ty.layout(self.ctx).map_or(CanDerive::Yes, |l| { + l.opaque().array_size_within_derive_limit() + }); + match layout_can_derive { + CanDerive::Yes => { + trace!( + " union layout can trivially derive {}", + self.derive_trait + ); + } + _ => { + trace!( + " union layout cannot derive {}", + self.derive_trait + ); + } + } + return layout_can_derive; + } + } + + if !self.derive_trait.can_derive_compound_with_vtable() && + item.has_vtable(self.ctx) + { + trace!( + " cannot derive {} for comp with vtable", + self.derive_trait + ); + return CanDerive::No; + } + + // Bitfield units are always represented as arrays of u8, but + // they're not traced as arrays, so we need to check here + // instead. + if !self.derive_trait.can_derive_large_array(self.ctx) && + info.has_too_large_bitfield_unit() && + !item.is_opaque(self.ctx, &()) + { + trace!( + " cannot derive {} for comp with too large bitfield unit", + self.derive_trait + ); + return CanDerive::No; + } + + let pred = self.derive_trait.consider_edge_comp(); + self.constrain_join(item, pred) + } + + TypeKind::ResolvedTypeRef(..) | + TypeKind::TemplateAlias(..) | + TypeKind::Alias(..) | + TypeKind::BlockPointer(..) => { + let pred = self.derive_trait.consider_edge_typeref(); + self.constrain_join(item, pred) + } + + TypeKind::TemplateInstantiation(..) => { + let pred = self.derive_trait.consider_edge_tmpl_inst(); + self.constrain_join(item, pred) + } + + TypeKind::Opaque => unreachable!( + "The early ty.is_opaque check should have handled this case" + ), + } + } + + fn constrain_join( + &mut self, + item: &Item, + consider_edge: EdgePredicate, + ) -> CanDerive { + let mut candidate = None; + + item.trace( + self.ctx, + &mut |sub_id, edge_kind| { + // Ignore ourselves, since union with ourself is a + // no-op. Ignore edges that aren't relevant to the + // analysis. + if sub_id == item.id() || !consider_edge(edge_kind) { + return; + } + + let can_derive = self.can_derive + .get(&sub_id) + .copied() + .unwrap_or_default(); + + match can_derive { + CanDerive::Yes => trace!(" member {sub_id:?} can derive {}", self.derive_trait), + CanDerive::Manually => trace!(" member {sub_id:?} cannot derive {}, but it may be implemented", self.derive_trait), + CanDerive::No => trace!(" member {sub_id:?} cannot derive {}", self.derive_trait), + } + + *candidate.get_or_insert(CanDerive::Yes) |= can_derive; + }, + &(), + ); + + if candidate.is_none() { + trace!( + " can derive {} because there are no members", + self.derive_trait + ); + } + candidate.unwrap_or_default() + } +} + +impl DeriveTrait { + fn not_by_name(self, ctx: &BindgenContext, item: &Item) -> bool { + match self { + DeriveTrait::Copy => ctx.no_copy_by_name(item), + DeriveTrait::Debug => ctx.no_debug_by_name(item), + DeriveTrait::Default => ctx.no_default_by_name(item), + DeriveTrait::Hash => ctx.no_hash_by_name(item), + DeriveTrait::PartialEqOrPartialOrd => { + ctx.no_partialeq_by_name(item) + } + } + } + + fn consider_edge_comp(self) -> EdgePredicate { + match self { + DeriveTrait::PartialEqOrPartialOrd => consider_edge_default, + _ => |kind| matches!(kind, EdgeKind::BaseMember | EdgeKind::Field), + } + } + + fn consider_edge_typeref(self) -> EdgePredicate { + match self { + DeriveTrait::PartialEqOrPartialOrd => consider_edge_default, + _ => |kind| kind == EdgeKind::TypeReference, + } + } + + fn consider_edge_tmpl_inst(self) -> EdgePredicate { + match self { + DeriveTrait::PartialEqOrPartialOrd => consider_edge_default, + _ => |kind| { + matches!( + kind, + EdgeKind::TemplateArgument | EdgeKind::TemplateDeclaration + ) + }, + } + } + + fn can_derive_large_array(self, ctx: &BindgenContext) -> bool { + if ctx.options().rust_features().larger_arrays { + !matches!(self, DeriveTrait::Default) + } else { + matches!(self, DeriveTrait::Copy) + } + } + + fn can_derive_union(self) -> bool { + matches!(self, DeriveTrait::Copy) + } + + fn can_derive_compound_with_destructor(self) -> bool { + !matches!(self, DeriveTrait::Copy) + } + + fn can_derive_compound_with_vtable(self) -> bool { + !matches!(self, DeriveTrait::Default) + } + + fn can_derive_compound_forward_decl(self) -> bool { + matches!(self, DeriveTrait::Copy | DeriveTrait::Debug) + } + + fn can_derive_incomplete_array(self) -> bool { + !matches!( + self, + DeriveTrait::Copy | + DeriveTrait::Hash | + DeriveTrait::PartialEqOrPartialOrd + ) + } + + fn can_derive_fnptr(self, f: &FunctionSig) -> CanDerive { + match (self, f.function_pointers_can_derive()) { + (DeriveTrait::Copy | DeriveTrait::Default, _) | (_, true) => { + trace!(" function pointer can derive {self}"); + CanDerive::Yes + } + (DeriveTrait::Debug, false) => { + trace!(" function pointer cannot derive {self}, but it may be implemented"); + CanDerive::Manually + } + (_, false) => { + trace!(" function pointer cannot derive {self}"); + CanDerive::No + } + } + } + + fn can_derive_vector(self) -> CanDerive { + if self == DeriveTrait::PartialEqOrPartialOrd { + // FIXME: vectors always can derive PartialEq, but they should + // not derive PartialOrd: + // https://github.com/rust-lang-nursery/packed_simd/issues/48 + trace!(" vectors cannot derive PartialOrd"); + CanDerive::No + } else { + trace!(" vector can derive {self}"); + CanDerive::Yes + } + } + + fn can_derive_pointer(self) -> CanDerive { + if self == DeriveTrait::Default { + trace!(" pointer cannot derive Default"); + CanDerive::No + } else { + trace!(" pointer can derive {self}"); + CanDerive::Yes + } + } + + fn can_derive_simple(self, kind: &TypeKind) -> CanDerive { + match (self, kind) { + // === Default === + ( + DeriveTrait::Default, + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Enum(..) | + TypeKind::Reference(..) | + TypeKind::TypeParam | + TypeKind::ObjCInterface(..) | + TypeKind::ObjCId | + TypeKind::ObjCSel, + ) => { + trace!(" types that always cannot derive Default"); + CanDerive::No + } + (DeriveTrait::Default, TypeKind::UnresolvedTypeRef(..)) => { + unreachable!( + "Type with unresolved type ref can't reach derive default" + ) + } + // === Hash === + ( + DeriveTrait::Hash, + TypeKind::Float(..) | TypeKind::Complex(..), + ) => { + trace!(" float cannot derive Hash"); + CanDerive::No + } + // === others === + _ => { + trace!(" simple type that can always derive {self}"); + CanDerive::Yes + } + } + } +} + +impl fmt::Display for DeriveTrait { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let s = match self { + DeriveTrait::Copy => "Copy", + DeriveTrait::Debug => "Debug", + DeriveTrait::Default => "Default", + DeriveTrait::Hash => "Hash", + DeriveTrait::PartialEqOrPartialOrd => "PartialEq/PartialOrd", + }; + s.fmt(f) + } +} + +impl<'ctx> MonotoneFramework for CannotDerive<'ctx> { + type Node = ItemId; + type Extra = (&'ctx BindgenContext, DeriveTrait); + type Output = HashMap; + + fn new( + (ctx, derive_trait): (&'ctx BindgenContext, DeriveTrait), + ) -> CannotDerive<'ctx> { + let can_derive = HashMap::default(); + let dependencies = generate_dependencies(ctx, consider_edge_default); + + CannotDerive { + ctx, + derive_trait, + can_derive, + dependencies, + } + } + + fn initial_worklist(&self) -> Vec { + // The transitive closure of all allowlisted items, including explicitly + // blocklisted items. + self.ctx + .allowlisted_items() + .iter() + .copied() + .flat_map(|i| { + let mut reachable = vec![i]; + i.trace( + self.ctx, + &mut |s, _| { + reachable.push(s); + }, + &(), + ); + reachable + }) + .collect() + } + + fn constrain(&mut self, id: ItemId) -> ConstrainResult { + trace!("constrain: {id:?}"); + + if let Some(CanDerive::No) = self.can_derive.get(&id) { + trace!(" already know it cannot derive {}", self.derive_trait); + return ConstrainResult::Same; + } + + let item = self.ctx.resolve_item(id); + let can_derive = match item.as_type() { + Some(ty) => { + let mut can_derive = self.constrain_type(item, ty); + if let CanDerive::Yes = can_derive { + let is_reached_limit = + |l: Layout| l.align > RUST_DERIVE_IN_ARRAY_LIMIT; + if !self.derive_trait.can_derive_large_array(self.ctx) && + ty.layout(self.ctx).is_some_and(is_reached_limit) + { + // We have to be conservative: the struct *could* have enough + // padding that we emit an array that is longer than + // `RUST_DERIVE_IN_ARRAY_LIMIT`. If we moved padding calculations + // into the IR and computed them before this analysis, then we could + // be precise rather than conservative here. + can_derive = CanDerive::Manually; + } + } + can_derive + } + None => self.constrain_join(item, consider_edge_default), + }; + + self.insert(id, can_derive) + } + + fn each_depending_on(&self, id: ItemId, mut f: F) + where + F: FnMut(ItemId), + { + if let Some(edges) = self.dependencies.get(&id) { + for item in edges { + trace!("enqueue {item:?} into worklist"); + f(*item); + } + } + } +} + +impl<'ctx> From> for HashMap { + fn from(analysis: CannotDerive<'ctx>) -> Self { + extra_assert!(analysis + .can_derive + .values() + .all(|v| *v != CanDerive::Yes)); + + analysis.can_derive + } +} + +/// Convert a `HashMap` into a `HashSet`. +/// +/// Elements that are not `CanDerive::Yes` are kept in the set, so that it +/// represents all items that cannot derive. +pub(crate) fn as_cannot_derive_set( + can_derive: HashMap, +) -> HashSet { + can_derive + .into_iter() + .filter_map(|(k, v)| if v == CanDerive::Yes { None } else { Some(k) }) + .collect() +} diff --git a/vendor/bindgen/ir/analysis/has_destructor.rs b/vendor/bindgen/ir/analysis/has_destructor.rs new file mode 100644 index 00000000000000..4893f8f8075db2 --- /dev/null +++ b/vendor/bindgen/ir/analysis/has_destructor.rs @@ -0,0 +1,175 @@ +//! Determining which types have destructors + +use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; +use crate::ir::comp::{CompKind, Field, FieldMethods}; +use crate::ir::context::{BindgenContext, ItemId}; +use crate::ir::traversal::EdgeKind; +use crate::ir::ty::TypeKind; +use crate::{HashMap, HashSet}; + +/// An analysis that finds for each IR item whether it has a destructor or not +/// +/// We use the monotone function `has destructor`, defined as follows: +/// +/// * If T is a type alias, a templated alias, or an indirection to another type, +/// T has a destructor if the type T refers to has a destructor. +/// * If T is a compound type, T has a destructor if we saw a destructor when parsing it, +/// or if it's a struct, T has a destructor if any of its base members has a destructor, +/// or if any of its fields have a destructor. +/// * If T is an instantiation of an abstract template definition, T has +/// a destructor if its template definition has a destructor, +/// or if any of the template arguments has a destructor. +/// * If T is the type of a field, that field has a destructor if it's not a bitfield, +/// and if T has a destructor. +#[derive(Debug, Clone)] +pub(crate) struct HasDestructorAnalysis<'ctx> { + ctx: &'ctx BindgenContext, + + // The incremental result of this analysis's computation. Everything in this + // set definitely has a destructor. + have_destructor: HashSet, + + // Dependencies saying that if a key ItemId has been inserted into the + // `have_destructor` set, then each of the ids in Vec need to be + // considered again. + // + // This is a subset of the natural IR graph with reversed edges, where we + // only include the edges from the IR graph that can affect whether a type + // has a destructor or not. + dependencies: HashMap>, +} + +impl HasDestructorAnalysis<'_> { + fn consider_edge(kind: EdgeKind) -> bool { + // These are the only edges that can affect whether a type has a + // destructor or not. + matches!( + kind, + EdgeKind::TypeReference | + EdgeKind::BaseMember | + EdgeKind::Field | + EdgeKind::TemplateArgument | + EdgeKind::TemplateDeclaration + ) + } + + fn insert>(&mut self, id: Id) -> ConstrainResult { + let id = id.into(); + let was_not_already_in_set = self.have_destructor.insert(id); + assert!( + was_not_already_in_set, + "We shouldn't try and insert {id:?} twice because if it was \ + already in the set, `constrain` should have exited early." + ); + ConstrainResult::Changed + } +} + +impl<'ctx> MonotoneFramework for HasDestructorAnalysis<'ctx> { + type Node = ItemId; + type Extra = &'ctx BindgenContext; + type Output = HashSet; + + fn new(ctx: &'ctx BindgenContext) -> Self { + let have_destructor = HashSet::default(); + let dependencies = generate_dependencies(ctx, Self::consider_edge); + + HasDestructorAnalysis { + ctx, + have_destructor, + dependencies, + } + } + + fn initial_worklist(&self) -> Vec { + self.ctx.allowlisted_items().iter().copied().collect() + } + + fn constrain(&mut self, id: ItemId) -> ConstrainResult { + if self.have_destructor.contains(&id) { + // We've already computed that this type has a destructor and that can't + // change. + return ConstrainResult::Same; + } + + let item = self.ctx.resolve_item(id); + let ty = match item.as_type() { + None => return ConstrainResult::Same, + Some(ty) => ty, + }; + + match *ty.kind() { + TypeKind::TemplateAlias(t, _) | + TypeKind::Alias(t) | + TypeKind::ResolvedTypeRef(t) => { + if self.have_destructor.contains(&t.into()) { + self.insert(id) + } else { + ConstrainResult::Same + } + } + + TypeKind::Comp(ref info) => { + if info.has_own_destructor() { + return self.insert(id); + } + + match info.kind() { + CompKind::Union => ConstrainResult::Same, + CompKind::Struct => { + let base_or_field_destructor = + info.base_members().iter().any(|base| { + self.have_destructor.contains(&base.ty.into()) + }) || info.fields().iter().any( + |field| match *field { + Field::DataMember(ref data) => self + .have_destructor + .contains(&data.ty().into()), + Field::Bitfields(_) => false, + }, + ); + if base_or_field_destructor { + self.insert(id) + } else { + ConstrainResult::Same + } + } + } + } + + TypeKind::TemplateInstantiation(ref inst) => { + let definition_or_arg_destructor = self + .have_destructor + .contains(&inst.template_definition().into()) || + inst.template_arguments().iter().any(|arg| { + self.have_destructor.contains(&arg.into()) + }); + if definition_or_arg_destructor { + self.insert(id) + } else { + ConstrainResult::Same + } + } + + _ => ConstrainResult::Same, + } + } + + fn each_depending_on(&self, id: ItemId, mut f: F) + where + F: FnMut(ItemId), + { + if let Some(edges) = self.dependencies.get(&id) { + for item in edges { + trace!("enqueue {item:?} into worklist"); + f(*item); + } + } + } +} + +impl<'ctx> From> for HashSet { + fn from(analysis: HasDestructorAnalysis<'ctx>) -> Self { + analysis.have_destructor + } +} diff --git a/vendor/bindgen/ir/analysis/has_float.rs b/vendor/bindgen/ir/analysis/has_float.rs new file mode 100644 index 00000000000000..e2463ccb96e262 --- /dev/null +++ b/vendor/bindgen/ir/analysis/has_float.rs @@ -0,0 +1,248 @@ +//! Determining which types has float. + +use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; +use crate::ir::comp::Field; +use crate::ir::comp::FieldMethods; +use crate::ir::context::{BindgenContext, ItemId}; +use crate::ir::traversal::EdgeKind; +use crate::ir::ty::TypeKind; +use crate::{HashMap, HashSet}; + +/// An analysis that finds for each IR item whether it has float or not. +/// +/// We use the monotone constraint function `has_float`, +/// defined as follows: +/// +/// * If T is float or complex float, T trivially has. +/// * If T is a type alias, a templated alias or an indirection to another type, +/// it has float if the type T refers to has. +/// * If T is a compound type, it has float if any of base memter or field +/// has. +/// * If T is an instantiation of an abstract template definition, T has +/// float if any of the template arguments or template definition +/// has. +#[derive(Debug, Clone)] +pub(crate) struct HasFloat<'ctx> { + ctx: &'ctx BindgenContext, + + // The incremental result of this analysis's computation. Everything in this + // set has float. + has_float: HashSet, + + // Dependencies saying that if a key ItemId has been inserted into the + // `has_float` set, then each of the ids in Vec need to be + // considered again. + // + // This is a subset of the natural IR graph with reversed edges, where we + // only include the edges from the IR graph that can affect whether a type + // has float or not. + dependencies: HashMap>, +} + +impl HasFloat<'_> { + fn consider_edge(kind: EdgeKind) -> bool { + match kind { + EdgeKind::BaseMember | + EdgeKind::Field | + EdgeKind::TypeReference | + EdgeKind::VarType | + EdgeKind::TemplateArgument | + EdgeKind::TemplateDeclaration | + EdgeKind::TemplateParameterDefinition => true, + + EdgeKind::Constructor | + EdgeKind::Destructor | + EdgeKind::FunctionReturn | + EdgeKind::FunctionParameter | + EdgeKind::InnerType | + EdgeKind::InnerVar | + EdgeKind::Method | + EdgeKind::Generic => false, + } + } + + fn insert>(&mut self, id: Id) -> ConstrainResult { + let id = id.into(); + trace!("inserting {id:?} into the has_float set"); + + let was_not_already_in_set = self.has_float.insert(id); + assert!( + was_not_already_in_set, + "We shouldn't try and insert {id:?} twice because if it was \ + already in the set, `constrain` should have exited early." + ); + + ConstrainResult::Changed + } +} + +impl<'ctx> MonotoneFramework for HasFloat<'ctx> { + type Node = ItemId; + type Extra = &'ctx BindgenContext; + type Output = HashSet; + + fn new(ctx: &'ctx BindgenContext) -> HasFloat<'ctx> { + let has_float = HashSet::default(); + let dependencies = generate_dependencies(ctx, Self::consider_edge); + + HasFloat { + ctx, + has_float, + dependencies, + } + } + + fn initial_worklist(&self) -> Vec { + self.ctx.allowlisted_items().iter().copied().collect() + } + + fn constrain(&mut self, id: ItemId) -> ConstrainResult { + trace!("constrain: {id:?}"); + + if self.has_float.contains(&id) { + trace!(" already know it do not have float"); + return ConstrainResult::Same; + } + + let item = self.ctx.resolve_item(id); + let Some(ty) = item.as_type() else { + trace!(" not a type; ignoring"); + return ConstrainResult::Same; + }; + + match *ty.kind() { + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(..) | + TypeKind::Function(..) | + TypeKind::Enum(..) | + TypeKind::Reference(..) | + TypeKind::TypeParam | + TypeKind::Opaque | + TypeKind::Pointer(..) | + TypeKind::UnresolvedTypeRef(..) | + TypeKind::ObjCInterface(..) | + TypeKind::ObjCId | + TypeKind::ObjCSel => { + trace!(" simple type that do not have float"); + ConstrainResult::Same + } + + TypeKind::Float(..) | TypeKind::Complex(..) => { + trace!(" float type has float"); + self.insert(id) + } + + TypeKind::Array(t, _) => { + if self.has_float.contains(&t.into()) { + trace!( + " Array with type T that has float also has float" + ); + return self.insert(id); + } + trace!(" Array with type T that do not have float also do not have float"); + ConstrainResult::Same + } + TypeKind::Vector(t, _) => { + if self.has_float.contains(&t.into()) { + trace!( + " Vector with type T that has float also has float" + ); + return self.insert(id); + } + trace!(" Vector with type T that do not have float also do not have float"); + ConstrainResult::Same + } + + TypeKind::ResolvedTypeRef(t) | + TypeKind::TemplateAlias(t, _) | + TypeKind::Alias(t) | + TypeKind::BlockPointer(t) => { + if self.has_float.contains(&t.into()) { + trace!( + " aliases and type refs to T which have float \ + also have float" + ); + self.insert(id) + } else { + trace!(" aliases and type refs to T which do not have float \ + also do not have floaarrayt"); + ConstrainResult::Same + } + } + + TypeKind::Comp(ref info) => { + let bases_have = info + .base_members() + .iter() + .any(|base| self.has_float.contains(&base.ty.into())); + if bases_have { + trace!(" bases have float, so we also have"); + return self.insert(id); + } + let fields_have = info.fields().iter().any(|f| match *f { + Field::DataMember(ref data) => { + self.has_float.contains(&data.ty().into()) + } + Field::Bitfields(ref bfu) => bfu + .bitfields() + .iter() + .any(|b| self.has_float.contains(&b.ty().into())), + }); + if fields_have { + trace!(" fields have float, so we also have"); + return self.insert(id); + } + + trace!(" comp doesn't have float"); + ConstrainResult::Same + } + + TypeKind::TemplateInstantiation(ref template) => { + let args_have = template + .template_arguments() + .iter() + .any(|arg| self.has_float.contains(&arg.into())); + if args_have { + trace!( + " template args have float, so \ + instantiation also has float" + ); + return self.insert(id); + } + + let def_has = self + .has_float + .contains(&template.template_definition().into()); + if def_has { + trace!( + " template definition has float, so \ + instantiation also has" + ); + return self.insert(id); + } + + trace!(" template instantiation do not have float"); + ConstrainResult::Same + } + } + } + + fn each_depending_on(&self, id: ItemId, mut f: F) + where + F: FnMut(ItemId), + { + if let Some(edges) = self.dependencies.get(&id) { + for item in edges { + trace!("enqueue {item:?} into worklist"); + f(*item); + } + } + } +} + +impl<'ctx> From> for HashSet { + fn from(analysis: HasFloat<'ctx>) -> Self { + analysis.has_float + } +} diff --git a/vendor/bindgen/ir/analysis/has_type_param_in_array.rs b/vendor/bindgen/ir/analysis/has_type_param_in_array.rs new file mode 100644 index 00000000000000..687f81560c7783 --- /dev/null +++ b/vendor/bindgen/ir/analysis/has_type_param_in_array.rs @@ -0,0 +1,242 @@ +//! Determining which types has typed parameters in array. + +use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; +use crate::ir::comp::Field; +use crate::ir::comp::FieldMethods; +use crate::ir::context::{BindgenContext, ItemId}; +use crate::ir::traversal::EdgeKind; +use crate::ir::ty::TypeKind; +use crate::{HashMap, HashSet}; + +/// An analysis that finds for each IR item whether it has array or not. +/// +/// We use the monotone constraint function `has_type_parameter_in_array`, +/// defined as follows: +/// +/// * If T is Array type with type parameter, T trivially has. +/// * If T is a type alias, a templated alias or an indirection to another type, +/// it has type parameter in array if the type T refers to has. +/// * If T is a compound type, it has array if any of base memter or field +/// has type parameter in array. +/// * If T is an instantiation of an abstract template definition, T has +/// type parameter in array if any of the template arguments or template definition +/// has. +#[derive(Debug, Clone)] +pub(crate) struct HasTypeParameterInArray<'ctx> { + ctx: &'ctx BindgenContext, + + // The incremental result of this analysis's computation. Everything in this + // set has array. + has_type_parameter_in_array: HashSet, + + // Dependencies saying that if a key ItemId has been inserted into the + // `has_type_parameter_in_array` set, then each of the ids in Vec need to be + // considered again. + // + // This is a subset of the natural IR graph with reversed edges, where we + // only include the edges from the IR graph that can affect whether a type + // has array or not. + dependencies: HashMap>, +} + +impl HasTypeParameterInArray<'_> { + fn consider_edge(kind: EdgeKind) -> bool { + match kind { + // These are the only edges that can affect whether a type has type parameter + // in array or not. + EdgeKind::BaseMember | + EdgeKind::Field | + EdgeKind::TypeReference | + EdgeKind::VarType | + EdgeKind::TemplateArgument | + EdgeKind::TemplateDeclaration | + EdgeKind::TemplateParameterDefinition => true, + + EdgeKind::Constructor | + EdgeKind::Destructor | + EdgeKind::FunctionReturn | + EdgeKind::FunctionParameter | + EdgeKind::InnerType | + EdgeKind::InnerVar | + EdgeKind::Method | + EdgeKind::Generic => false, + } + } + + fn insert>(&mut self, id: Id) -> ConstrainResult { + let id = id.into(); + trace!("inserting {id:?} into the has_type_parameter_in_array set"); + + let was_not_already_in_set = + self.has_type_parameter_in_array.insert(id); + assert!( + was_not_already_in_set, + "We shouldn't try and insert {id:?} twice because if it was \ + already in the set, `constrain` should have exited early." + ); + + ConstrainResult::Changed + } +} + +impl<'ctx> MonotoneFramework for HasTypeParameterInArray<'ctx> { + type Node = ItemId; + type Extra = &'ctx BindgenContext; + type Output = HashSet; + + fn new(ctx: &'ctx BindgenContext) -> HasTypeParameterInArray<'ctx> { + let has_type_parameter_in_array = HashSet::default(); + let dependencies = generate_dependencies(ctx, Self::consider_edge); + + HasTypeParameterInArray { + ctx, + has_type_parameter_in_array, + dependencies, + } + } + + fn initial_worklist(&self) -> Vec { + self.ctx.allowlisted_items().iter().copied().collect() + } + + fn constrain(&mut self, id: ItemId) -> ConstrainResult { + trace!("constrain: {id:?}"); + + if self.has_type_parameter_in_array.contains(&id) { + trace!(" already know it do not have array"); + return ConstrainResult::Same; + } + + let item = self.ctx.resolve_item(id); + let Some(ty) = item.as_type() else { + trace!(" not a type; ignoring"); + return ConstrainResult::Same; + }; + + match *ty.kind() { + // Handle the simple cases. These cannot have array in type parameter + // without further information. + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::Vector(..) | + TypeKind::Complex(..) | + TypeKind::Function(..) | + TypeKind::Enum(..) | + TypeKind::Reference(..) | + TypeKind::TypeParam | + TypeKind::Opaque | + TypeKind::Pointer(..) | + TypeKind::UnresolvedTypeRef(..) | + TypeKind::ObjCInterface(..) | + TypeKind::ObjCId | + TypeKind::ObjCSel => { + trace!(" simple type that do not have array"); + ConstrainResult::Same + } + + TypeKind::Array(t, _) => { + let inner_ty = + self.ctx.resolve_type(t).canonical_type(self.ctx); + if let TypeKind::TypeParam = *inner_ty.kind() { + trace!(" Array with Named type has type parameter"); + self.insert(id) + } else { + trace!( + " Array without Named type does have type parameter" + ); + ConstrainResult::Same + } + } + + TypeKind::ResolvedTypeRef(t) | + TypeKind::TemplateAlias(t, _) | + TypeKind::Alias(t) | + TypeKind::BlockPointer(t) => { + if self.has_type_parameter_in_array.contains(&t.into()) { + trace!( + " aliases and type refs to T which have array \ + also have array" + ); + self.insert(id) + } else { + trace!( + " aliases and type refs to T which do not have array \ + also do not have array" + ); + ConstrainResult::Same + } + } + + TypeKind::Comp(ref info) => { + let bases_have = info.base_members().iter().any(|base| { + self.has_type_parameter_in_array.contains(&base.ty.into()) + }); + if bases_have { + trace!(" bases have array, so we also have"); + return self.insert(id); + } + let fields_have = info.fields().iter().any(|f| match *f { + Field::DataMember(ref data) => self + .has_type_parameter_in_array + .contains(&data.ty().into()), + Field::Bitfields(..) => false, + }); + if fields_have { + trace!(" fields have array, so we also have"); + return self.insert(id); + } + + trace!(" comp doesn't have array"); + ConstrainResult::Same + } + + TypeKind::TemplateInstantiation(ref template) => { + let args_have = + template.template_arguments().iter().any(|arg| { + self.has_type_parameter_in_array.contains(&arg.into()) + }); + if args_have { + trace!( + " template args have array, so \ + instantiation also has array" + ); + return self.insert(id); + } + + let def_has = self + .has_type_parameter_in_array + .contains(&template.template_definition().into()); + if def_has { + trace!( + " template definition has array, so \ + instantiation also has" + ); + return self.insert(id); + } + + trace!(" template instantiation do not have array"); + ConstrainResult::Same + } + } + } + + fn each_depending_on(&self, id: ItemId, mut f: F) + where + F: FnMut(ItemId), + { + if let Some(edges) = self.dependencies.get(&id) { + for item in edges { + trace!("enqueue {item:?} into worklist"); + f(*item); + } + } + } +} + +impl<'ctx> From> for HashSet { + fn from(analysis: HasTypeParameterInArray<'ctx>) -> Self { + analysis.has_type_parameter_in_array + } +} diff --git a/vendor/bindgen/ir/analysis/has_vtable.rs b/vendor/bindgen/ir/analysis/has_vtable.rs new file mode 100644 index 00000000000000..3ff64a6d2b1a49 --- /dev/null +++ b/vendor/bindgen/ir/analysis/has_vtable.rs @@ -0,0 +1,235 @@ +//! Determining which types has vtable + +use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; +use crate::ir::context::{BindgenContext, ItemId}; +use crate::ir::traversal::EdgeKind; +use crate::ir::ty::TypeKind; +use crate::{Entry, HashMap}; +use std::cmp; +use std::ops; + +/// The result of the `HasVtableAnalysis` for an individual item. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Default)] +pub(crate) enum HasVtableResult { + /// The item does not have a vtable pointer. + #[default] + No, + + /// The item has a vtable and the actual vtable pointer is within this item. + SelfHasVtable, + + /// The item has a vtable, but the actual vtable pointer is in a base + /// member. + BaseHasVtable, +} + +impl HasVtableResult { + /// Take the least upper bound of `self` and `rhs`. + pub(crate) fn join(self, rhs: Self) -> Self { + cmp::max(self, rhs) + } +} + +impl ops::BitOr for HasVtableResult { + type Output = Self; + + fn bitor(self, rhs: HasVtableResult) -> Self::Output { + self.join(rhs) + } +} + +impl ops::BitOrAssign for HasVtableResult { + fn bitor_assign(&mut self, rhs: HasVtableResult) { + *self = self.join(rhs); + } +} + +/// An analysis that finds for each IR item whether it has vtable or not +/// +/// We use the monotone function `has vtable`, defined as follows: +/// +/// * If T is a type alias, a templated alias, an indirection to another type, +/// or a reference of a type, T has vtable if the type T refers to has vtable. +/// * If T is a compound type, T has vtable if we saw a virtual function when +/// parsing it or any of its base member has vtable. +/// * If T is an instantiation of an abstract template definition, T has +/// vtable if template definition has vtable +#[derive(Debug, Clone)] +pub(crate) struct HasVtableAnalysis<'ctx> { + ctx: &'ctx BindgenContext, + + // The incremental result of this analysis's computation. Everything in this + // set definitely has a vtable. + have_vtable: HashMap, + + // Dependencies saying that if a key ItemId has been inserted into the + // `have_vtable` set, then each of the ids in Vec need to be + // considered again. + // + // This is a subset of the natural IR graph with reversed edges, where we + // only include the edges from the IR graph that can affect whether a type + // has a vtable or not. + dependencies: HashMap>, +} + +impl HasVtableAnalysis<'_> { + fn consider_edge(kind: EdgeKind) -> bool { + // These are the only edges that can affect whether a type has a + // vtable or not. + matches!( + kind, + EdgeKind::TypeReference | + EdgeKind::BaseMember | + EdgeKind::TemplateDeclaration + ) + } + + fn insert>( + &mut self, + id: Id, + result: HasVtableResult, + ) -> ConstrainResult { + if let HasVtableResult::No = result { + return ConstrainResult::Same; + } + + let id = id.into(); + match self.have_vtable.entry(id) { + Entry::Occupied(mut entry) => { + if *entry.get() < result { + entry.insert(result); + ConstrainResult::Changed + } else { + ConstrainResult::Same + } + } + Entry::Vacant(entry) => { + entry.insert(result); + ConstrainResult::Changed + } + } + } + + fn forward(&mut self, from: Id1, to: Id2) -> ConstrainResult + where + Id1: Into, + Id2: Into, + { + let from = from.into(); + let to = to.into(); + + match self.have_vtable.get(&from) { + None => ConstrainResult::Same, + Some(r) => self.insert(to, *r), + } + } +} + +impl<'ctx> MonotoneFramework for HasVtableAnalysis<'ctx> { + type Node = ItemId; + type Extra = &'ctx BindgenContext; + type Output = HashMap; + + fn new(ctx: &'ctx BindgenContext) -> HasVtableAnalysis<'ctx> { + let have_vtable = HashMap::default(); + let dependencies = generate_dependencies(ctx, Self::consider_edge); + + HasVtableAnalysis { + ctx, + have_vtable, + dependencies, + } + } + + fn initial_worklist(&self) -> Vec { + self.ctx.allowlisted_items().iter().copied().collect() + } + + fn constrain(&mut self, id: ItemId) -> ConstrainResult { + trace!("constrain {id:?}"); + + let item = self.ctx.resolve_item(id); + let ty = match item.as_type() { + None => return ConstrainResult::Same, + Some(ty) => ty, + }; + + // TODO #851: figure out a way to handle deriving from template type parameters. + match *ty.kind() { + TypeKind::TemplateAlias(t, _) | + TypeKind::Alias(t) | + TypeKind::ResolvedTypeRef(t) | + TypeKind::Reference(t) => { + trace!( + " aliases and references forward to their inner type" + ); + self.forward(t, id) + } + + TypeKind::Comp(ref info) => { + trace!(" comp considers its own methods and bases"); + let mut result = HasVtableResult::No; + + if info.has_own_virtual_method() { + trace!(" comp has its own virtual method"); + result |= HasVtableResult::SelfHasVtable; + } + + let bases_has_vtable = info.base_members().iter().any(|base| { + trace!(" comp has a base with a vtable: {base:?}"); + self.have_vtable.contains_key(&base.ty.into()) + }); + if bases_has_vtable { + result |= HasVtableResult::BaseHasVtable; + } + + self.insert(id, result) + } + + TypeKind::TemplateInstantiation(ref inst) => { + self.forward(inst.template_definition(), id) + } + + _ => ConstrainResult::Same, + } + } + + fn each_depending_on(&self, id: ItemId, mut f: F) + where + F: FnMut(ItemId), + { + if let Some(edges) = self.dependencies.get(&id) { + for item in edges { + trace!("enqueue {item:?} into worklist"); + f(*item); + } + } + } +} + +impl<'ctx> From> for HashMap { + fn from(analysis: HasVtableAnalysis<'ctx>) -> Self { + // We let the lack of an entry mean "No" to save space. + extra_assert!(analysis + .have_vtable + .values() + .all(|v| { *v != HasVtableResult::No })); + + analysis.have_vtable + } +} + +/// A convenience trait for the things for which we might wonder if they have a +/// vtable during codegen. +/// +/// This is not for _computing_ whether the thing has a vtable, it is for +/// looking up the results of the `HasVtableAnalysis`'s computations for a +/// specific thing. +pub(crate) trait HasVtable { + /// Return `true` if this thing has vtable, `false` otherwise. + fn has_vtable(&self, ctx: &BindgenContext) -> bool; + + /// Return `true` if this thing has an actual vtable pointer in itself, as + /// opposed to transitively in a base member. + fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool; +} diff --git a/vendor/bindgen/ir/analysis/mod.rs b/vendor/bindgen/ir/analysis/mod.rs new file mode 100644 index 00000000000000..74a305edfb5ac5 --- /dev/null +++ b/vendor/bindgen/ir/analysis/mod.rs @@ -0,0 +1,395 @@ +//! Fix-point analyses on the IR using the "monotone framework". +//! +//! A lattice is a set with a partial ordering between elements, where there is +//! a single least upper bound and a single greatest least bound for every +//! subset. We are dealing with finite lattices, which means that it has a +//! finite number of elements, and it follows that there exists a single top and +//! a single bottom member of the lattice. For example, the power set of a +//! finite set forms a finite lattice where partial ordering is defined by set +//! inclusion, that is `a <= b` if `a` is a subset of `b`. Here is the finite +//! lattice constructed from the set {0,1,2}: +//! +//! ```text +//! .----- Top = {0,1,2} -----. +//! / | \ +//! / | \ +//! / | \ +//! {0,1} -------. {0,2} .--------- {1,2} +//! | \ / \ / | +//! | / \ | +//! | / \ / \ | +//! {0} --------' {1} `---------- {2} +//! \ | / +//! \ | / +//! \ | / +//! `------ Bottom = {} ------' +//! ``` +//! +//! A monotone function `f` is a function where if `x <= y`, then it holds that +//! `f(x) <= f(y)`. It should be clear that running a monotone function to a +//! fix-point on a finite lattice will always terminate: `f` can only "move" +//! along the lattice in a single direction, and therefore can only either find +//! a fix-point in the middle of the lattice or continue to the top or bottom +//! depending if it is ascending or descending the lattice respectively. +//! +//! For a deeper introduction to the general form of this kind of analysis, see +//! [Static Program Analysis by Anders Møller and Michael I. Schwartzbach][spa]. +//! +//! [spa]: https://cs.au.dk/~amoeller/spa/spa.pdf + +// Re-export individual analyses. +mod template_params; +pub(crate) use self::template_params::UsedTemplateParameters; +mod derive; +pub use self::derive::DeriveTrait; +pub(crate) use self::derive::{as_cannot_derive_set, CannotDerive}; +mod has_vtable; +pub(crate) use self::has_vtable::{ + HasVtable, HasVtableAnalysis, HasVtableResult, +}; +mod has_destructor; +pub(crate) use self::has_destructor::HasDestructorAnalysis; +mod has_type_param_in_array; +pub(crate) use self::has_type_param_in_array::HasTypeParameterInArray; +mod has_float; +pub(crate) use self::has_float::HasFloat; +mod sizedness; +pub(crate) use self::sizedness::{ + Sizedness, SizednessAnalysis, SizednessResult, +}; + +use crate::ir::context::{BindgenContext, ItemId}; + +use crate::ir::traversal::{EdgeKind, Trace}; +use crate::HashMap; +use std::fmt; +use std::ops; + +/// An analysis in the monotone framework. +/// +/// Implementors of this trait must maintain the following two invariants: +/// +/// 1. The concrete data must be a member of a finite-height lattice. +/// 2. The concrete `constrain` method must be monotone: that is, +/// if `x <= y`, then `constrain(x) <= constrain(y)`. +/// +/// If these invariants do not hold, iteration to a fix-point might never +/// complete. +/// +/// For a simple example analysis, see the `ReachableFrom` type in the `tests` +/// module below. +pub(crate) trait MonotoneFramework: Sized + fmt::Debug { + /// The type of node in our dependency graph. + /// + /// This is just generic (and not `ItemId`) so that we can easily unit test + /// without constructing real `Item`s and their `ItemId`s. + type Node: Copy; + + /// Any extra data that is needed during computation. + /// + /// Again, this is just generic (and not `&BindgenContext`) so that we can + /// easily unit test without constructing real `BindgenContext`s full of + /// real `Item`s and real `ItemId`s. + type Extra: Sized; + + /// The final output of this analysis. Once we have reached a fix-point, we + /// convert `self` into this type, and return it as the final result of the + /// analysis. + type Output: From + fmt::Debug; + + /// Construct a new instance of this analysis. + fn new(extra: Self::Extra) -> Self; + + /// Get the initial set of nodes from which to start the analysis. Unless + /// you are sure of some domain-specific knowledge, this should be the + /// complete set of nodes. + fn initial_worklist(&self) -> Vec; + + /// Update the analysis for the given node. + /// + /// If this results in changing our internal state (ie, we discovered that + /// we have not reached a fix-point and iteration should continue), return + /// `ConstrainResult::Changed`. Otherwise, return `ConstrainResult::Same`. + /// When `constrain` returns `ConstrainResult::Same` for all nodes in the + /// set, we have reached a fix-point and the analysis is complete. + fn constrain(&mut self, node: Self::Node) -> ConstrainResult; + + /// For each node `d` that depends on the given `node`'s current answer when + /// running `constrain(d)`, call `f(d)`. This informs us which new nodes to + /// queue up in the worklist when `constrain(node)` reports updated + /// information. + fn each_depending_on(&self, node: Self::Node, f: F) + where + F: FnMut(Self::Node); +} + +/// Whether an analysis's `constrain` function modified the incremental results +/// or not. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +pub(crate) enum ConstrainResult { + /// The incremental results were updated, and the fix-point computation + /// should continue. + Changed, + + /// The incremental results were not updated. + #[default] + Same, +} + +impl ops::BitOr for ConstrainResult { + type Output = Self; + + fn bitor(self, rhs: ConstrainResult) -> Self::Output { + if self == ConstrainResult::Changed || rhs == ConstrainResult::Changed { + ConstrainResult::Changed + } else { + ConstrainResult::Same + } + } +} + +impl ops::BitOrAssign for ConstrainResult { + fn bitor_assign(&mut self, rhs: ConstrainResult) { + *self = *self | rhs; + } +} + +/// Run an analysis in the monotone framework. +pub(crate) fn analyze(extra: Analysis::Extra) -> Analysis::Output +where + Analysis: MonotoneFramework, +{ + let mut analysis = Analysis::new(extra); + let mut worklist = analysis.initial_worklist(); + + while let Some(node) = worklist.pop() { + if let ConstrainResult::Changed = analysis.constrain(node) { + analysis.each_depending_on(node, |needs_work| { + worklist.push(needs_work); + }); + } + } + + analysis.into() +} + +/// Generate the dependency map for analysis +pub(crate) fn generate_dependencies( + ctx: &BindgenContext, + consider_edge: F, +) -> HashMap> +where + F: Fn(EdgeKind) -> bool, +{ + let mut dependencies = HashMap::default(); + + for &item in ctx.allowlisted_items() { + dependencies.entry(item).or_insert_with(Vec::new); + + { + // We reverse our natural IR graph edges to find dependencies + // between nodes. + item.trace( + ctx, + &mut |sub_item: ItemId, edge_kind| { + if ctx.allowlisted_items().contains(&sub_item) && + consider_edge(edge_kind) + { + dependencies + .entry(sub_item) + .or_insert_with(Vec::new) + .push(item); + } + }, + &(), + ); + } + } + dependencies +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::HashSet; + + // Here we find the set of nodes that are reachable from any given + // node. This is a lattice mapping nodes to subsets of all nodes. Our join + // function is set union. + // + // This is our test graph: + // + // +---+ +---+ + // | | | | + // | 1 | .----| 2 | + // | | | | | + // +---+ | +---+ + // | | ^ + // | | | + // | +---+ '------' + // '----->| | + // | 3 | + // .------| |------. + // | +---+ | + // | ^ | + // v | v + // +---+ | +---+ +---+ + // | | | | | | | + // | 4 | | | 5 |--->| 6 | + // | | | | | | | + // +---+ | +---+ +---+ + // | | | | + // | | | v + // | +---+ | +---+ + // | | | | | | + // '----->| 7 |<-----' | 8 | + // | | | | + // +---+ +---+ + // + // And here is the mapping from a node to the set of nodes that are + // reachable from it within the test graph: + // + // 1: {3,4,5,6,7,8} + // 2: {2} + // 3: {3,4,5,6,7,8} + // 4: {3,4,5,6,7,8} + // 5: {3,4,5,6,7,8} + // 6: {8} + // 7: {3,4,5,6,7,8} + // 8: {} + + #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] + struct Node(usize); + + #[derive(Clone, Debug, Default, PartialEq, Eq)] + struct Graph(HashMap>); + + impl Graph { + fn make_test_graph() -> Graph { + let mut g = Graph::default(); + g.0.insert(Node(1), vec![Node(3)]); + g.0.insert(Node(2), vec![Node(2)]); + g.0.insert(Node(3), vec![Node(4), Node(5)]); + g.0.insert(Node(4), vec![Node(7)]); + g.0.insert(Node(5), vec![Node(6), Node(7)]); + g.0.insert(Node(6), vec![Node(8)]); + g.0.insert(Node(7), vec![Node(3)]); + g.0.insert(Node(8), vec![]); + g + } + + fn reverse(&self) -> Graph { + let mut reversed = Graph::default(); + for (node, edges) in &self.0 { + reversed.0.entry(*node).or_insert_with(Vec::new); + for referent in edges { + reversed + .0 + .entry(*referent) + .or_insert_with(Vec::new) + .push(*node); + } + } + reversed + } + } + + #[derive(Clone, Debug, PartialEq, Eq)] + struct ReachableFrom<'a> { + reachable: HashMap>, + graph: &'a Graph, + reversed: Graph, + } + + impl<'a> MonotoneFramework for ReachableFrom<'a> { + type Node = Node; + type Extra = &'a Graph; + type Output = HashMap>; + + fn new(graph: &'a Graph) -> Self { + let reversed = graph.reverse(); + ReachableFrom { + reachable: Default::default(), + graph, + reversed, + } + } + + fn initial_worklist(&self) -> Vec { + self.graph.0.keys().copied().collect() + } + + fn constrain(&mut self, node: Node) -> ConstrainResult { + // The set of nodes reachable from a node `x` is + // + // reachable(x) = s_0 U s_1 U ... U reachable(s_0) U reachable(s_1) U ... + // + // where there exist edges from `x` to each of `s_0, s_1, ...`. + // + // Yes, what follows is a **terribly** inefficient set union + // implementation. Don't copy this code outside of this test! + + let original_size = self.reachable.entry(node).or_default().len(); + + for sub_node in &self.graph.0[&node] { + self.reachable.get_mut(&node).unwrap().insert(*sub_node); + + let sub_reachable = + self.reachable.entry(*sub_node).or_default().clone(); + + for transitive in sub_reachable { + self.reachable.get_mut(&node).unwrap().insert(transitive); + } + } + + let new_size = self.reachable[&node].len(); + if original_size == new_size { + ConstrainResult::Same + } else { + ConstrainResult::Changed + } + } + + fn each_depending_on(&self, node: Node, mut f: F) + where + F: FnMut(Node), + { + for dep in &self.reversed.0[&node] { + f(*dep); + } + } + } + + impl<'a> From> for HashMap> { + fn from(reachable: ReachableFrom<'a>) -> Self { + reachable.reachable + } + } + + #[test] + fn monotone() { + let g = Graph::make_test_graph(); + let reachable = analyze::(&g); + println!("reachable = {reachable:#?}"); + + fn nodes(nodes: A) -> HashSet + where + A: AsRef<[usize]>, + { + nodes.as_ref().iter().copied().map(Node).collect() + } + + let mut expected = HashMap::default(); + expected.insert(Node(1), nodes([3, 4, 5, 6, 7, 8])); + expected.insert(Node(2), nodes([2])); + expected.insert(Node(3), nodes([3, 4, 5, 6, 7, 8])); + expected.insert(Node(4), nodes([3, 4, 5, 6, 7, 8])); + expected.insert(Node(5), nodes([3, 4, 5, 6, 7, 8])); + expected.insert(Node(6), nodes([8])); + expected.insert(Node(7), nodes([3, 4, 5, 6, 7, 8])); + expected.insert(Node(8), nodes([])); + println!("expected = {expected:#?}"); + + assert_eq!(reachable, expected); + } +} diff --git a/vendor/bindgen/ir/analysis/sizedness.rs b/vendor/bindgen/ir/analysis/sizedness.rs new file mode 100644 index 00000000000000..ce3c2c3da15a47 --- /dev/null +++ b/vendor/bindgen/ir/analysis/sizedness.rs @@ -0,0 +1,353 @@ +//! Determining the sizedness of types (as base classes and otherwise). + +use super::{ + generate_dependencies, ConstrainResult, HasVtable, MonotoneFramework, +}; +use crate::ir::context::{BindgenContext, TypeId}; +use crate::ir::item::IsOpaque; +use crate::ir::traversal::EdgeKind; +use crate::ir::ty::TypeKind; +use crate::{Entry, HashMap}; +use std::{cmp, ops}; + +/// The result of the `Sizedness` analysis for an individual item. +/// +/// This is a chain lattice of the form: +/// +/// ```ignore +/// NonZeroSized +/// | +/// DependsOnTypeParam +/// | +/// ZeroSized +/// ``` +/// +/// We initially assume that all types are `ZeroSized` and then update our +/// understanding as we learn more about each type. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Default)] +pub(crate) enum SizednessResult { + /// The type is zero-sized. + /// + /// This means that if it is a C++ type, and is not being used as a base + /// member, then we must add an `_address` byte to enforce the + /// unique-address-per-distinct-object-instance rule. + #[default] + ZeroSized, + + /// Whether this type is zero-sized or not depends on whether a type + /// parameter is zero-sized or not. + /// + /// For example, given these definitions: + /// + /// ```c++ + /// template + /// class Flongo : public T {}; + /// + /// class Empty {}; + /// + /// class NonEmpty { int x; }; + /// ``` + /// + /// Then `Flongo` is zero-sized, and needs an `_address` byte + /// inserted, while `Flongo` is *not* zero-sized, and should *not* + /// have an `_address` byte inserted. + /// + /// We don't properly handle this situation correctly right now: + /// + DependsOnTypeParam, + + /// Has some size that is known to be greater than zero. That doesn't mean + /// it has a static size, but it is not zero sized for sure. In other words, + /// it might contain an incomplete array or some other dynamically sized + /// type. + NonZeroSized, +} + +impl SizednessResult { + /// Take the least upper bound of `self` and `rhs`. + pub(crate) fn join(self, rhs: Self) -> Self { + cmp::max(self, rhs) + } +} + +impl ops::BitOr for SizednessResult { + type Output = Self; + + fn bitor(self, rhs: SizednessResult) -> Self::Output { + self.join(rhs) + } +} + +impl ops::BitOrAssign for SizednessResult { + fn bitor_assign(&mut self, rhs: SizednessResult) { + *self = self.join(rhs); + } +} + +/// An analysis that computes the sizedness of all types. +/// +/// * For types with known sizes -- for example pointers, scalars, etc... -- +/// they are assigned `NonZeroSized`. +/// +/// * For compound structure types with one or more fields, they are assigned +/// `NonZeroSized`. +/// +/// * For compound structure types without any fields, the results of the bases +/// are `join`ed. +/// +/// * For type parameters, `DependsOnTypeParam` is assigned. +#[derive(Debug)] +pub(crate) struct SizednessAnalysis<'ctx> { + ctx: &'ctx BindgenContext, + dependencies: HashMap>, + // Incremental results of the analysis. Missing entries are implicitly + // considered `ZeroSized`. + sized: HashMap, +} + +impl SizednessAnalysis<'_> { + fn consider_edge(kind: EdgeKind) -> bool { + // These are the only edges that can affect whether a type is + // zero-sized or not. + matches!( + kind, + EdgeKind::TemplateArgument | + EdgeKind::TemplateParameterDefinition | + EdgeKind::TemplateDeclaration | + EdgeKind::TypeReference | + EdgeKind::BaseMember | + EdgeKind::Field + ) + } + + /// Insert an incremental result, and return whether this updated our + /// knowledge of types and we should continue the analysis. + fn insert( + &mut self, + id: TypeId, + result: SizednessResult, + ) -> ConstrainResult { + trace!("inserting {result:?} for {id:?}"); + + if let SizednessResult::ZeroSized = result { + return ConstrainResult::Same; + } + + match self.sized.entry(id) { + Entry::Occupied(mut entry) => { + if *entry.get() < result { + entry.insert(result); + ConstrainResult::Changed + } else { + ConstrainResult::Same + } + } + Entry::Vacant(entry) => { + entry.insert(result); + ConstrainResult::Changed + } + } + } + + fn forward(&mut self, from: TypeId, to: TypeId) -> ConstrainResult { + match self.sized.get(&from) { + None => ConstrainResult::Same, + Some(r) => self.insert(to, *r), + } + } +} + +impl<'ctx> MonotoneFramework for SizednessAnalysis<'ctx> { + type Node = TypeId; + type Extra = &'ctx BindgenContext; + type Output = HashMap; + + fn new(ctx: &'ctx BindgenContext) -> SizednessAnalysis<'ctx> { + let dependencies = generate_dependencies(ctx, Self::consider_edge) + .into_iter() + .filter_map(|(id, sub_ids)| { + id.as_type_id(ctx).map(|id| { + ( + id, + sub_ids + .into_iter() + .filter_map(|s| s.as_type_id(ctx)) + .collect::>(), + ) + }) + }) + .collect(); + + let sized = HashMap::default(); + + SizednessAnalysis { + ctx, + dependencies, + sized, + } + } + + fn initial_worklist(&self) -> Vec { + self.ctx + .allowlisted_items() + .iter() + .filter_map(|id| id.as_type_id(self.ctx)) + .collect() + } + + fn constrain(&mut self, id: TypeId) -> ConstrainResult { + trace!("constrain {id:?}"); + + if let Some(SizednessResult::NonZeroSized) = self.sized.get(&id) { + trace!(" already know it is not zero-sized"); + return ConstrainResult::Same; + } + + if id.has_vtable_ptr(self.ctx) { + trace!(" has an explicit vtable pointer, therefore is not zero-sized"); + return self.insert(id, SizednessResult::NonZeroSized); + } + + let ty = self.ctx.resolve_type(id); + + if id.is_opaque(self.ctx, &()) { + trace!(" type is opaque; checking layout..."); + let result = + ty.layout(self.ctx).map_or(SizednessResult::ZeroSized, |l| { + if l.size == 0 { + trace!(" ...layout has size == 0"); + SizednessResult::ZeroSized + } else { + trace!(" ...layout has size > 0"); + SizednessResult::NonZeroSized + } + }); + return self.insert(id, result); + } + + match *ty.kind() { + TypeKind::Void => { + trace!(" void is zero-sized"); + self.insert(id, SizednessResult::ZeroSized) + } + + TypeKind::TypeParam => { + trace!( + " type params sizedness depends on what they're \ + instantiated as" + ); + self.insert(id, SizednessResult::DependsOnTypeParam) + } + + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::Complex(..) | + TypeKind::Function(..) | + TypeKind::Enum(..) | + TypeKind::Reference(..) | + TypeKind::NullPtr | + TypeKind::ObjCId | + TypeKind::ObjCSel | + TypeKind::Pointer(..) => { + trace!(" {:?} is known not to be zero-sized", ty.kind()); + self.insert(id, SizednessResult::NonZeroSized) + } + + TypeKind::ObjCInterface(..) => { + trace!(" obj-c interfaces always have at least the `isa` pointer"); + self.insert(id, SizednessResult::NonZeroSized) + } + + TypeKind::TemplateAlias(t, _) | + TypeKind::Alias(t) | + TypeKind::BlockPointer(t) | + TypeKind::ResolvedTypeRef(t) => { + trace!(" aliases and type refs forward to their inner type"); + self.forward(t, id) + } + + TypeKind::TemplateInstantiation(ref inst) => { + trace!( + " template instantiations are zero-sized if their \ + definition is zero-sized" + ); + self.forward(inst.template_definition(), id) + } + + TypeKind::Array(_, 0) => { + trace!(" arrays of zero elements are zero-sized"); + self.insert(id, SizednessResult::ZeroSized) + } + TypeKind::Array(..) => { + trace!(" arrays of > 0 elements are not zero-sized"); + self.insert(id, SizednessResult::NonZeroSized) + } + TypeKind::Vector(..) => { + trace!(" vectors are not zero-sized"); + self.insert(id, SizednessResult::NonZeroSized) + } + + TypeKind::Comp(ref info) => { + trace!(" comp considers its own fields and bases"); + + if !info.fields().is_empty() { + return self.insert(id, SizednessResult::NonZeroSized); + } + + let result = info + .base_members() + .iter() + .filter_map(|base| self.sized.get(&base.ty)) + .fold(SizednessResult::ZeroSized, |a, b| a.join(*b)); + + self.insert(id, result) + } + + TypeKind::Opaque => { + unreachable!("covered by the .is_opaque() check above") + } + + TypeKind::UnresolvedTypeRef(..) => { + unreachable!("Should have been resolved after parsing!"); + } + } + } + + fn each_depending_on(&self, id: TypeId, mut f: F) + where + F: FnMut(TypeId), + { + if let Some(edges) = self.dependencies.get(&id) { + for ty in edges { + trace!("enqueue {ty:?} into worklist"); + f(*ty); + } + } + } +} + +impl<'ctx> From> for HashMap { + fn from(analysis: SizednessAnalysis<'ctx>) -> Self { + // We let the lack of an entry mean "ZeroSized" to save space. + extra_assert!(analysis + .sized + .values() + .all(|v| { *v != SizednessResult::ZeroSized })); + + analysis.sized + } +} + +/// A convenience trait for querying whether some type or ID is sized. +/// +/// This is not for _computing_ whether the thing is sized, it is for looking up +/// the results of the `Sizedness` analysis's computations for a specific thing. +pub(crate) trait Sizedness { + /// Get the sizedness of this type. + fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult; + + /// Is the sizedness for this type `SizednessResult::ZeroSized`? + fn is_zero_sized(&self, ctx: &BindgenContext) -> bool { + self.sizedness(ctx) == SizednessResult::ZeroSized + } +} diff --git a/vendor/bindgen/ir/analysis/template_params.rs b/vendor/bindgen/ir/analysis/template_params.rs new file mode 100644 index 00000000000000..df8f861cfe88a4 --- /dev/null +++ b/vendor/bindgen/ir/analysis/template_params.rs @@ -0,0 +1,601 @@ +//! Discover which template type parameters are actually used. +//! +//! ### Why do we care? +//! +//! C++ allows ignoring template parameters, while Rust does not. Usually we can +//! blindly stick a `PhantomData` inside a generic Rust struct to make up for +//! this. That doesn't work for templated type aliases, however: +//! +//! ```C++ +//! template +//! using Fml = int; +//! ``` +//! +//! If we generate the naive Rust code for this alias, we get: +//! +//! ```ignore +//! pub(crate) type Fml = ::std::os::raw::int; +//! ``` +//! +//! And this is rejected by `rustc` due to the unused type parameter. +//! +//! (Aside: in these simple cases, `libclang` will often just give us the +//! aliased type directly, and we will never even know we were dealing with +//! aliases, let alone templated aliases. It's the more convoluted scenarios +//! where we get to have some fun...) +//! +//! For such problematic template aliases, we could generate a tuple whose +//! second member is a `PhantomData`. Or, if we wanted to go the extra mile, +//! we could even generate some smarter wrapper that implements `Deref`, +//! `DerefMut`, `From`, `Into`, `AsRef`, and `AsMut` to the actually aliased +//! type. However, this is still lackluster: +//! +//! 1. Even with a billion conversion-trait implementations, using the generated +//! bindings is rather un-ergonomic. +//! 2. With either of these solutions, we need to keep track of which aliases +//! we've transformed like this in order to generate correct uses of the +//! wrapped type. +//! +//! Given that we have to properly track which template parameters ended up used +//! for (2), we might as well leverage that information to make ergonomic +//! bindings that don't contain any unused type parameters at all, and +//! completely avoid the pain of (1). +//! +//! ### How do we determine which template parameters are used? +//! +//! Determining which template parameters are actually used is a trickier +//! problem than it might seem at a glance. On the one hand, trivial uses are +//! easy to detect: +//! +//! ```C++ +//! template +//! class Foo { +//! T trivial_use_of_t; +//! }; +//! ``` +//! +//! It gets harder when determining if one template parameter is used depends on +//! determining if another template parameter is used. In this example, whether +//! `U` is used depends on whether `T` is used. +//! +//! ```C++ +//! template +//! class DoesntUseT { +//! int x; +//! }; +//! +//! template +//! class Fml { +//! DoesntUseT lololol; +//! }; +//! ``` +//! +//! We can express the set of used template parameters as a constraint solving +//! problem (where the set of template parameters used by a given IR item is the +//! union of its sub-item's used template parameters) and iterate to a +//! fixed-point. +//! +//! We use the `ir::analysis::MonotoneFramework` infrastructure for this +//! fix-point analysis, where our lattice is the mapping from each IR item to +//! the powerset of the template parameters that appear in the input C++ header, +//! our join function is set union. The set of template parameters appearing in +//! the program is finite, as is the number of IR items. We start at our +//! lattice's bottom element: every item mapping to an empty set of template +//! parameters. Our analysis only adds members to each item's set of used +//! template parameters, never removes them, so it is monotone. Because our +//! lattice is finite and our constraint function is monotone, iteration to a +//! fix-point will terminate. +//! +//! See `src/ir/analysis.rs` for more. + +use super::{ConstrainResult, MonotoneFramework}; +use crate::ir::context::{BindgenContext, ItemId}; +use crate::ir::item::{Item, ItemSet}; +use crate::ir::template::{TemplateInstantiation, TemplateParameters}; +use crate::ir::traversal::{EdgeKind, Trace}; +use crate::ir::ty::TypeKind; +use crate::{HashMap, HashSet}; + +/// An analysis that finds for each IR item its set of template parameters that +/// it uses. +/// +/// We use the monotone constraint function `template_param_usage`, defined as +/// follows: +/// +/// * If `T` is a named template type parameter, it trivially uses itself: +/// +/// ```ignore +/// template_param_usage(T) = { T } +/// ``` +/// +/// * If `inst` is a template instantiation, `inst.args` are the template +/// instantiation's template arguments, `inst.def` is the template definition +/// being instantiated, and `inst.def.params` is the template definition's +/// template parameters, then the instantiation's usage is the union of each +/// of its arguments' usages *if* the corresponding template parameter is in +/// turn used by the template definition: +/// +/// ```ignore +/// template_param_usage(inst) = union( +/// template_param_usage(inst.args[i]) +/// for i in 0..length(inst.args.length) +/// if inst.def.params[i] in template_param_usage(inst.def) +/// ) +/// ``` +/// +/// * Finally, for all other IR item kinds, we use our lattice's `join` +/// operation: set union with each successor of the given item's template +/// parameter usage: +/// +/// ```ignore +/// template_param_usage(v) = +/// union(template_param_usage(w) for w in successors(v)) +/// ``` +/// +/// Note that we ignore certain edges in the graph, such as edges from a +/// template declaration to its template parameters' definitions for this +/// analysis. If we didn't, then we would mistakenly determine that ever +/// template parameter is always used. +/// +/// The final wrinkle is handling of blocklisted types. Normally, we say that +/// the set of allowlisted items is the transitive closure of items explicitly +/// called out for allowlisting, *without* any items explicitly called out as +/// blocklisted. However, for the purposes of this analysis's correctness, we +/// simplify and consider run the analysis on the full transitive closure of +/// allowlisted items. We do, however, treat instantiations of blocklisted items +/// specially; see `constrain_instantiation_of_blocklisted_template` and its +/// documentation for details. +#[derive(Debug, Clone)] +pub(crate) struct UsedTemplateParameters<'ctx> { + ctx: &'ctx BindgenContext, + + // The Option is only there for temporary moves out of the hash map. See the + // comments in `UsedTemplateParameters::constrain` below. + used: HashMap>, + + dependencies: HashMap>, + + // The set of allowlisted items, without any blocklisted items reachable + // from the allowlisted items which would otherwise be considered + // allowlisted as well. + allowlisted_items: HashSet, +} + +impl UsedTemplateParameters<'_> { + fn consider_edge(kind: EdgeKind) -> bool { + match kind { + // For each of these kinds of edges, if the referent uses a template + // parameter, then it should be considered that the origin of the + // edge also uses the template parameter. + EdgeKind::TemplateArgument | + EdgeKind::BaseMember | + EdgeKind::Field | + EdgeKind::Constructor | + EdgeKind::Destructor | + EdgeKind::VarType | + EdgeKind::FunctionReturn | + EdgeKind::FunctionParameter | + EdgeKind::TypeReference => true, + + // An inner var or type using a template parameter is orthogonal + // from whether we use it. See template-param-usage-{6,11}.hpp. + EdgeKind::InnerVar | EdgeKind::InnerType => false, + + // We can't emit machine code for new monomorphizations of class + // templates' methods (and don't detect explicit instantiations) so + // we must ignore template parameters that are only used by + // methods. This doesn't apply to a function type's return or + // parameter types, however, because of type aliases of function + // pointers that use template parameters, eg + // tests/headers/struct_with_typedef_template_arg.hpp + EdgeKind::Method => false, + + // If we considered these edges, we would end up mistakenly claiming + // that every template parameter always used. + EdgeKind::TemplateDeclaration | + EdgeKind::TemplateParameterDefinition => false, + + // Since we have to be careful about which edges we consider for + // this analysis to be correct, we ignore generic edges. We also + // avoid a `_` wild card to force authors of new edge kinds to + // determine whether they need to be considered by this analysis. + EdgeKind::Generic => false, + } + } + + fn take_this_id_usage_set>( + &mut self, + this_id: Id, + ) -> ItemSet { + let this_id = this_id.into(); + self.used + .get_mut(&this_id) + .expect( + "Should have a set of used template params for every item \ + id", + ) + .take() + .expect( + "Should maintain the invariant that all used template param \ + sets are `Some` upon entry of `constrain`", + ) + } + + /// We say that blocklisted items use all of their template parameters. The + /// blocklisted type is most likely implemented explicitly by the user, + /// since it won't be in the generated bindings, and we don't know exactly + /// what they'll to with template parameters, but we can push the issue down + /// the line to them. + fn constrain_instantiation_of_blocklisted_template( + &self, + this_id: ItemId, + used_by_this_id: &mut ItemSet, + instantiation: &TemplateInstantiation, + ) { + trace!( + " instantiation of blocklisted template, uses all template \ + arguments" + ); + + let args = instantiation + .template_arguments() + .iter() + .map(|a| { + a.into_resolver() + .through_type_refs() + .through_type_aliases() + .resolve(self.ctx) + .id() + }) + .filter(|a| *a != this_id) + .flat_map(|a| { + self.used + .get(&a) + .expect("Should have a used entry for the template arg") + .as_ref() + .expect( + "Because a != this_id, and all used template \ + param sets other than this_id's are `Some`, \ + a's used template param set should be `Some`", + ) + .iter() + }); + + used_by_this_id.extend(args); + } + + /// A template instantiation's concrete template argument is only used if + /// the template definition uses the corresponding template parameter. + fn constrain_instantiation( + &self, + this_id: ItemId, + used_by_this_id: &mut ItemSet, + instantiation: &TemplateInstantiation, + ) { + trace!(" template instantiation"); + + let decl = self.ctx.resolve_type(instantiation.template_definition()); + let args = instantiation.template_arguments(); + + let params = decl.self_template_params(self.ctx); + + debug_assert!(this_id != instantiation.template_definition()); + let used_by_def = self.used + .get(&instantiation.template_definition().into()) + .expect("Should have a used entry for instantiation's template definition") + .as_ref() + .expect("And it should be Some because only this_id's set is None, and an \ + instantiation's template definition should never be the \ + instantiation itself"); + + for (arg, param) in args.iter().zip(params.iter()) { + trace!( + " instantiation's argument {arg:?} is used if definition's \ + parameter {param:?} is used", + ); + + if used_by_def.contains(¶m.into()) { + trace!(" param is used by template definition"); + + let arg = arg + .into_resolver() + .through_type_refs() + .through_type_aliases() + .resolve(self.ctx) + .id(); + + if arg == this_id { + continue; + } + + let used_by_arg = self + .used + .get(&arg) + .expect("Should have a used entry for the template arg") + .as_ref() + .expect( + "Because arg != this_id, and all used template \ + param sets other than this_id's are `Some`, \ + arg's used template param set should be \ + `Some`", + ) + .iter(); + used_by_this_id.extend(used_by_arg); + } + } + } + + /// The join operation on our lattice: the set union of all of this ID's + /// successors. + fn constrain_join(&self, used_by_this_id: &mut ItemSet, item: &Item) { + trace!(" other item: join with successors' usage"); + + item.trace( + self.ctx, + &mut |sub_id, edge_kind| { + // Ignore ourselves, since union with ourself is a + // no-op. Ignore edges that aren't relevant to the + // analysis. + if sub_id == item.id() || !Self::consider_edge(edge_kind) { + return; + } + + let used_by_sub_id = self + .used + .get(&sub_id) + .expect("Should have a used set for the sub_id successor") + .as_ref() + .expect( + "Because sub_id != id, and all used template \ + param sets other than id's are `Some`, \ + sub_id's used template param set should be \ + `Some`", + ) + .iter(); + + trace!( + " union with {sub_id:?}'s usage: {:?}", + used_by_sub_id.clone().collect::>() + ); + + used_by_this_id.extend(used_by_sub_id); + }, + &(), + ); + } +} + +impl<'ctx> MonotoneFramework for UsedTemplateParameters<'ctx> { + type Node = ItemId; + type Extra = &'ctx BindgenContext; + type Output = HashMap; + + fn new(ctx: &'ctx BindgenContext) -> UsedTemplateParameters<'ctx> { + let mut used = HashMap::default(); + let mut dependencies = HashMap::default(); + let allowlisted_items: HashSet<_> = + ctx.allowlisted_items().iter().copied().collect(); + + let allowlisted_and_blocklisted_items: ItemSet = allowlisted_items + .iter() + .copied() + .flat_map(|i| { + let mut reachable = vec![i]; + i.trace( + ctx, + &mut |s, _| { + reachable.push(s); + }, + &(), + ); + reachable + }) + .collect(); + + for item in allowlisted_and_blocklisted_items { + dependencies.entry(item).or_insert_with(Vec::new); + used.entry(item).or_insert_with(|| Some(ItemSet::new())); + + { + // We reverse our natural IR graph edges to find dependencies + // between nodes. + item.trace( + ctx, + &mut |sub_item: ItemId, _| { + used.entry(sub_item) + .or_insert_with(|| Some(ItemSet::new())); + dependencies + .entry(sub_item) + .or_insert_with(Vec::new) + .push(item); + }, + &(), + ); + } + + // Additionally, whether a template instantiation's template + // arguments are used depends on whether the template declaration's + // generic template parameters are used. + let item_kind = + ctx.resolve_item(item).as_type().map(|ty| ty.kind()); + if let Some(TypeKind::TemplateInstantiation(inst)) = item_kind { + let decl = ctx.resolve_type(inst.template_definition()); + let args = inst.template_arguments(); + + // Although template definitions should always have + // template parameters, there is a single exception: + // opaque templates. Hence the unwrap_or. + let params = decl.self_template_params(ctx); + + for (arg, param) in args.iter().zip(params.iter()) { + let arg = arg + .into_resolver() + .through_type_aliases() + .through_type_refs() + .resolve(ctx) + .id(); + + let param = param + .into_resolver() + .through_type_aliases() + .through_type_refs() + .resolve(ctx) + .id(); + + used.entry(arg).or_insert_with(|| Some(ItemSet::new())); + used.entry(param).or_insert_with(|| Some(ItemSet::new())); + + dependencies + .entry(arg) + .or_insert_with(Vec::new) + .push(param); + } + } + } + + if cfg!(feature = "__testing_only_extra_assertions") { + // Invariant: The `used` map has an entry for every allowlisted + // item, as well as all explicitly blocklisted items that are + // reachable from allowlisted items. + // + // Invariant: the `dependencies` map has an entry for every + // allowlisted item. + // + // (This is so that every item we call `constrain` on is guaranteed + // to have a set of template parameters, and we can allow + // blocklisted templates to use all of their parameters). + for item in &allowlisted_items { + extra_assert!(used.contains_key(item)); + extra_assert!(dependencies.contains_key(item)); + item.trace( + ctx, + &mut |sub_item, _| { + extra_assert!(used.contains_key(&sub_item)); + extra_assert!(dependencies.contains_key(&sub_item)); + }, + &(), + ); + } + } + + UsedTemplateParameters { + ctx, + used, + dependencies, + allowlisted_items, + } + } + + fn initial_worklist(&self) -> Vec { + // The transitive closure of all allowlisted items, including explicitly + // blocklisted items. + self.ctx + .allowlisted_items() + .iter() + .copied() + .flat_map(|i| { + let mut reachable = vec![i]; + i.trace( + self.ctx, + &mut |s, _| { + reachable.push(s); + }, + &(), + ); + reachable + }) + .collect() + } + + fn constrain(&mut self, id: ItemId) -> ConstrainResult { + // Invariant: all hash map entries' values are `Some` upon entering and + // exiting this method. + extra_assert!(self.used.values().all(|v| v.is_some())); + + // Take the set for this ID out of the hash map while we mutate it based + // on other hash map entries. We *must* put it back into the hash map at + // the end of this method. This allows us to side-step HashMap's lack of + // an analog to slice::split_at_mut. + let mut used_by_this_id = self.take_this_id_usage_set(id); + + trace!("constrain {id:?}"); + trace!(" initially, used set is {used_by_this_id:?}"); + + let original_len = used_by_this_id.len(); + + let item = self.ctx.resolve_item(id); + let ty_kind = item.as_type().map(|ty| ty.kind()); + match ty_kind { + // Named template type parameters trivially use themselves. + Some(&TypeKind::TypeParam) => { + trace!(" named type, trivially uses itself"); + used_by_this_id.insert(id); + } + // Template instantiations only use their template arguments if the + // template definition uses the corresponding template parameter. + Some(TypeKind::TemplateInstantiation(inst)) => { + if self + .allowlisted_items + .contains(&inst.template_definition().into()) + { + self.constrain_instantiation( + id, + &mut used_by_this_id, + inst, + ); + } else { + self.constrain_instantiation_of_blocklisted_template( + id, + &mut used_by_this_id, + inst, + ); + } + } + // Otherwise, add the union of each of its referent item's template + // parameter usage. + _ => self.constrain_join(&mut used_by_this_id, item), + } + + trace!(" finally, used set is {used_by_this_id:?}"); + + let new_len = used_by_this_id.len(); + assert!( + new_len >= original_len, + "This is the property that ensures this function is monotone -- \ + if it doesn't hold, the analysis might never terminate!" + ); + + // Put the set back in the hash map and restore our invariant. + debug_assert!(self.used[&id].is_none()); + self.used.insert(id, Some(used_by_this_id)); + extra_assert!(self.used.values().all(|v| v.is_some())); + + if new_len == original_len { + ConstrainResult::Same + } else { + ConstrainResult::Changed + } + } + + fn each_depending_on(&self, item: ItemId, mut f: F) + where + F: FnMut(ItemId), + { + if let Some(edges) = self.dependencies.get(&item) { + for item in edges { + trace!("enqueue {item:?} into worklist"); + f(*item); + } + } + } +} + +impl<'ctx> From> for HashMap { + fn from(used_templ_params: UsedTemplateParameters<'ctx>) -> Self { + used_templ_params + .used + .into_iter() + .map(|(k, v)| (k, v.unwrap())) + .collect() + } +} diff --git a/vendor/bindgen/ir/annotations.rs b/vendor/bindgen/ir/annotations.rs new file mode 100644 index 00000000000000..7f5d74b3ee7549 --- /dev/null +++ b/vendor/bindgen/ir/annotations.rs @@ -0,0 +1,259 @@ +//! Types and functions related to bindgen annotation comments. +//! +//! Users can add annotations in doc comments to types that they would like to +//! replace other types with, mark as opaque, etc. This module deals with all of +//! that stuff. + +use std::str::FromStr; + +use crate::clang; + +/// What kind of visibility modifier should be used for a struct or field? +#[derive(Copy, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Default)] +pub enum FieldVisibilityKind { + /// Fields are marked as private, i.e., struct Foo {bar: bool} + Private, + /// Fields are marked as crate public, i.e., struct Foo {pub(crate) bar: bool} + PublicCrate, + /// Fields are marked as public, i.e., struct Foo {pub bar: bool} + #[default] + Public, +} + +impl FromStr for FieldVisibilityKind { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "private" => Ok(Self::Private), + "crate" => Ok(Self::PublicCrate), + "public" => Ok(Self::Public), + _ => Err(format!("Invalid visibility kind: `{s}`")), + } + } +} + +impl std::fmt::Display for FieldVisibilityKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s = match self { + FieldVisibilityKind::Private => "private", + FieldVisibilityKind::PublicCrate => "crate", + FieldVisibilityKind::Public => "public", + }; + + s.fmt(f) + } +} + +/// What kind of accessor should we provide for a field? +#[derive(Copy, PartialEq, Eq, Clone, Debug)] +pub(crate) enum FieldAccessorKind { + /// No accessor. + None, + /// Plain accessor. + Regular, + /// Unsafe accessor. + Unsafe, + /// Immutable accessor. + Immutable, +} + +/// Annotations for a given item, or a field. +/// +/// You can see the kind of comments that are accepted in the [Doxygen documentation](https://www.doxygen.nl/manual/docblocks.html). +#[derive(Default, Clone, PartialEq, Eq, Debug)] +pub(crate) struct Annotations { + /// Whether this item is marked as opaque. Only applies to types. + opaque: bool, + /// Whether this item should be hidden from the output. Only applies to + /// types, or enum variants. + hide: bool, + /// Whether this type should be replaced by another. The name is a + /// namespace-aware path. + use_instead_of: Option>, + /// Manually disable deriving copy/clone on this type. Only applies to + /// struct or union types. + disallow_copy: bool, + /// Manually disable deriving debug on this type. + disallow_debug: bool, + /// Manually disable deriving/implement default on this type. + disallow_default: bool, + /// Whether to add a `#[must_use]` annotation to this type. + must_use_type: bool, + /// Visibility of struct fields. You can set this on + /// structs (it will apply to all the fields), or individual fields. + visibility_kind: Option, + /// The kind of accessor this field will have. Also can be applied to + /// structs so all the fields inside share it by default. + accessor_kind: Option, + /// Whether this enum variant should be constified. + /// + /// This is controlled by the `constant` attribute, this way: + /// + /// ```cpp + /// enum Foo { + /// Bar = 0, /**<

*/ + /// Baz = 0, + /// }; + /// ``` + /// + /// In that case, bindgen will generate a constant for `Bar` instead of + /// `Baz`. + constify_enum_variant: bool, + /// List of explicit derives for this type. + derives: Vec, + /// List of explicit attributes for this type. + attributes: Vec, +} + +fn parse_accessor(s: &str) -> FieldAccessorKind { + match s { + "false" => FieldAccessorKind::None, + "unsafe" => FieldAccessorKind::Unsafe, + "immutable" => FieldAccessorKind::Immutable, + _ => FieldAccessorKind::Regular, + } +} + +impl Annotations { + /// Construct new annotations for the given cursor and its bindgen comments + /// (if any). + pub(crate) fn new(cursor: &clang::Cursor) -> Option { + let mut anno = Annotations::default(); + let mut matched_one = false; + anno.parse(&cursor.comment(), &mut matched_one); + + if matched_one { + Some(anno) + } else { + None + } + } + + /// Should this type be hidden? + pub(crate) fn hide(&self) -> bool { + self.hide + } + + /// Should this type be opaque? + pub(crate) fn opaque(&self) -> bool { + self.opaque + } + + /// For a given type, indicates the type it should replace. + /// + /// For example, in the following code: + /// + /// ```cpp + /// + /// /**
*/ + /// struct Foo { int x; }; + /// + /// struct Bar { char foo; }; + /// ``` + /// + /// the generated code would look something like: + /// + /// ``` + /// /**
*/ + /// struct Bar { + /// x: ::std::os::raw::c_int, + /// }; + /// ``` + /// + /// That is, code for `Foo` is used to generate `Bar`. + pub(crate) fn use_instead_of(&self) -> Option<&[String]> { + self.use_instead_of.as_deref() + } + + /// The list of derives that have been specified in this annotation. + pub(crate) fn derives(&self) -> &[String] { + &self.derives + } + + /// The list of attributes that have been specified in this annotation. + pub(crate) fn attributes(&self) -> &[String] { + &self.attributes + } + + /// Should we avoid implementing the `Copy` trait? + pub(crate) fn disallow_copy(&self) -> bool { + self.disallow_copy + } + + /// Should we avoid implementing the `Debug` trait? + pub(crate) fn disallow_debug(&self) -> bool { + self.disallow_debug + } + + /// Should we avoid implementing the `Default` trait? + pub(crate) fn disallow_default(&self) -> bool { + self.disallow_default + } + + /// Should this type get a `#[must_use]` annotation? + pub(crate) fn must_use_type(&self) -> bool { + self.must_use_type + } + + /// What kind of accessors should we provide for this type's fields? + pub(crate) fn visibility_kind(&self) -> Option { + self.visibility_kind + } + + /// What kind of accessors should we provide for this type's fields? + pub(crate) fn accessor_kind(&self) -> Option { + self.accessor_kind + } + + fn parse(&mut self, comment: &clang::Comment, matched: &mut bool) { + use clang_sys::CXComment_HTMLStartTag; + if comment.kind() == CXComment_HTMLStartTag && + comment.get_tag_name() == "div" && + comment + .get_tag_attrs() + .next() + .is_some_and(|attr| attr.name == "rustbindgen") + { + *matched = true; + for attr in comment.get_tag_attrs() { + match attr.name.as_str() { + "opaque" => self.opaque = true, + "hide" => self.hide = true, + "nocopy" => self.disallow_copy = true, + "nodebug" => self.disallow_debug = true, + "nodefault" => self.disallow_default = true, + "mustusetype" => self.must_use_type = true, + "replaces" => { + self.use_instead_of = Some( + attr.value.split("::").map(Into::into).collect(), + ); + } + "derive" => self.derives.push(attr.value), + "attribute" => self.attributes.push(attr.value), + "private" => { + self.visibility_kind = if attr.value == "false" { + Some(FieldVisibilityKind::Public) + } else { + Some(FieldVisibilityKind::Private) + }; + } + "accessor" => { + self.accessor_kind = Some(parse_accessor(&attr.value)); + } + "constant" => self.constify_enum_variant = true, + _ => {} + } + } + } + + for child in comment.get_children() { + self.parse(&child, matched); + } + } + + /// Returns whether we've parsed a "constant" attribute. + pub(crate) fn constify_enum_variant(&self) -> bool { + self.constify_enum_variant + } +} diff --git a/vendor/bindgen/ir/comment.rs b/vendor/bindgen/ir/comment.rs new file mode 100644 index 00000000000000..a4ba3201867bc8 --- /dev/null +++ b/vendor/bindgen/ir/comment.rs @@ -0,0 +1,100 @@ +//! Utilities for manipulating C/C++ comments. + +/// The type of a comment. +#[derive(Debug, PartialEq, Eq)] +enum Kind { + /// A `///` comment, or something of the like. + /// All lines in a comment should start with the same symbol. + SingleLines, + /// A `/**` comment, where each other line can start with `*` and the + /// entire block ends with `*/`. + MultiLine, +} + +/// Preprocesses a C/C++ comment so that it is a valid Rust comment. +pub(crate) fn preprocess(comment: &str) -> String { + match kind(comment) { + Some(Kind::SingleLines) => preprocess_single_lines(comment), + Some(Kind::MultiLine) => preprocess_multi_line(comment), + None => comment.to_owned(), + } +} + +/// Gets the kind of the doc comment, if it is one. +fn kind(comment: &str) -> Option { + if comment.starts_with("/*") { + Some(Kind::MultiLine) + } else if comment.starts_with("//") { + Some(Kind::SingleLines) + } else { + None + } +} + +/// Preprocesses multiple single line comments. +/// +/// Handles lines starting with both `//` and `///`. +fn preprocess_single_lines(comment: &str) -> String { + debug_assert!(comment.starts_with("//"), "comment is not single line"); + + let lines: Vec<_> = comment + .lines() + .map(|l| l.trim().trim_start_matches('/')) + .collect(); + lines.join("\n") +} + +fn preprocess_multi_line(comment: &str) -> String { + let comment = comment + .trim_start_matches('/') + .trim_end_matches('/') + .trim_end_matches('*'); + + // Strip any potential `*` characters preceding each line. + let mut lines: Vec<_> = comment + .lines() + .map(|line| line.trim().trim_start_matches('*').trim_start_matches('!')) + .skip_while(|line| line.trim().is_empty()) // Skip the first empty lines. + .collect(); + + // Remove the trailing line corresponding to the `*/`. + if lines.last().is_some_and(|l| l.trim().is_empty()) { + lines.pop(); + } + + lines.join("\n") +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn picks_up_single_and_multi_line_doc_comments() { + assert_eq!(kind("/// hello"), Some(Kind::SingleLines)); + assert_eq!(kind("/** world */"), Some(Kind::MultiLine)); + } + + #[test] + fn processes_single_lines_correctly() { + assert_eq!(preprocess("///"), ""); + assert_eq!(preprocess("/// hello"), " hello"); + assert_eq!(preprocess("// hello"), " hello"); + assert_eq!(preprocess("// hello"), " hello"); + } + + #[test] + fn processes_multi_lines_correctly() { + assert_eq!(preprocess("/**/"), ""); + + assert_eq!( + preprocess("/** hello \n * world \n * foo \n */"), + " hello\n world\n foo" + ); + + assert_eq!( + preprocess("/**\nhello\n*world\n*foo\n*/"), + "hello\nworld\nfoo" + ); + } +} diff --git a/vendor/bindgen/ir/comp.rs b/vendor/bindgen/ir/comp.rs new file mode 100644 index 00000000000000..655e0f1fa5d939 --- /dev/null +++ b/vendor/bindgen/ir/comp.rs @@ -0,0 +1,1921 @@ +//! Compound types (unions and structs) in our intermediate representation. + +use itertools::Itertools; + +use super::analysis::Sizedness; +use super::annotations::Annotations; +use super::context::{BindgenContext, FunctionId, ItemId, TypeId, VarId}; +use super::dot::DotAttributes; +use super::item::{IsOpaque, Item}; +use super::layout::Layout; +use super::template::TemplateParameters; +use super::traversal::{EdgeKind, Trace, Tracer}; +use super::ty::RUST_DERIVE_IN_ARRAY_LIMIT; +use crate::clang; +use crate::codegen::struct_layout::{align_to, bytes_from_bits_pow2}; +use crate::ir::derive::CanDeriveCopy; +use crate::parse::ParseError; +use crate::HashMap; +use crate::NonCopyUnionStyle; +use std::cmp; +use std::io; +use std::mem; + +/// The kind of compound type. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum CompKind { + /// A struct. + Struct, + /// A union. + Union, +} + +/// The kind of C++ method. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum MethodKind { + /// A constructor. We represent it as method for convenience, to avoid code + /// duplication. + Constructor, + /// A destructor. + Destructor, + /// A virtual destructor. + VirtualDestructor { + /// Whether it's pure virtual. + pure_virtual: bool, + }, + /// A static method. + Static, + /// A normal method. + Normal, + /// A virtual method. + Virtual { + /// Whether it's pure virtual. + pure_virtual: bool, + }, +} + +impl MethodKind { + /// Is this a destructor method? + pub(crate) fn is_destructor(self) -> bool { + matches!( + self, + MethodKind::Destructor | MethodKind::VirtualDestructor { .. } + ) + } + + /// Is this a pure virtual method? + pub(crate) fn is_pure_virtual(self) -> bool { + match self { + MethodKind::Virtual { pure_virtual } | + MethodKind::VirtualDestructor { pure_virtual } => pure_virtual, + _ => false, + } + } +} + +/// A struct representing a C++ method, either static, normal, or virtual. +#[derive(Debug)] +pub(crate) struct Method { + kind: MethodKind, + /// The signature of the method. Take into account this is not a `Type` + /// item, but a `Function` one. + /// + /// This is tricky and probably this field should be renamed. + signature: FunctionId, + is_const: bool, +} + +impl Method { + /// Construct a new `Method`. + pub(crate) fn new( + kind: MethodKind, + signature: FunctionId, + is_const: bool, + ) -> Self { + Method { + kind, + signature, + is_const, + } + } + + /// What kind of method is this? + pub(crate) fn kind(&self) -> MethodKind { + self.kind + } + + /// Is this a constructor? + pub(crate) fn is_constructor(&self) -> bool { + self.kind == MethodKind::Constructor + } + + /// Is this a virtual method? + pub(crate) fn is_virtual(&self) -> bool { + matches!( + self.kind, + MethodKind::Virtual { .. } | MethodKind::VirtualDestructor { .. } + ) + } + + /// Is this a static method? + pub(crate) fn is_static(&self) -> bool { + self.kind == MethodKind::Static + } + + /// Get the ID for the `Function` signature for this method. + pub(crate) fn signature(&self) -> FunctionId { + self.signature + } + + /// Is this a const qualified method? + pub(crate) fn is_const(&self) -> bool { + self.is_const + } +} + +/// Methods common to the various field types. +pub(crate) trait FieldMethods { + /// Get the name of this field. + fn name(&self) -> Option<&str>; + + /// Get the type of this field. + fn ty(&self) -> TypeId; + + /// Get the comment for this field. + fn comment(&self) -> Option<&str>; + + /// If this is a bitfield, how many bits does it need? + fn bitfield_width(&self) -> Option; + + /// Is this field declared public? + fn is_public(&self) -> bool; + + /// Get the annotations for this field. + fn annotations(&self) -> &Annotations; + + /// The offset of the field (in bits) + fn offset(&self) -> Option; +} + +/// A contiguous set of logical bitfields that live within the same physical +/// allocation unit. See 9.2.4 [class.bit] in the C++ standard and [section +/// 2.4.II.1 in the Itanium C++ +/// ABI](http://itanium-cxx-abi.github.io/cxx-abi/abi.html#class-types). +#[derive(Debug)] +pub(crate) struct BitfieldUnit { + nth: usize, + layout: Layout, + bitfields: Vec, +} + +impl BitfieldUnit { + /// Get the 1-based index of this bitfield unit within its containing + /// struct. Useful for generating a Rust struct's field name for this unit + /// of bitfields. + pub(crate) fn nth(&self) -> usize { + self.nth + } + + /// Get the layout within which these bitfields reside. + pub(crate) fn layout(&self) -> Layout { + self.layout + } + + /// Get the bitfields within this unit. + pub(crate) fn bitfields(&self) -> &[Bitfield] { + &self.bitfields + } +} + +/// A struct representing a C++ field. +#[derive(Debug)] +pub(crate) enum Field { + /// A normal data member. + DataMember(FieldData), + + /// A physical allocation unit containing many logical bitfields. + Bitfields(BitfieldUnit), +} + +impl Field { + /// Get this field's layout. + pub(crate) fn layout(&self, ctx: &BindgenContext) -> Option { + match *self { + Field::Bitfields(BitfieldUnit { layout, .. }) => Some(layout), + Field::DataMember(ref data) => { + ctx.resolve_type(data.ty).layout(ctx) + } + } + } +} + +impl Trace for Field { + type Extra = (); + + fn trace(&self, _: &BindgenContext, tracer: &mut T, _: &()) + where + T: Tracer, + { + match *self { + Field::DataMember(ref data) => { + tracer.visit_kind(data.ty.into(), EdgeKind::Field); + } + Field::Bitfields(BitfieldUnit { ref bitfields, .. }) => { + for bf in bitfields { + tracer.visit_kind(bf.ty().into(), EdgeKind::Field); + } + } + } + } +} + +impl DotAttributes for Field { + fn dot_attributes( + &self, + ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + match *self { + Field::DataMember(ref data) => data.dot_attributes(ctx, out), + Field::Bitfields(BitfieldUnit { + layout, + ref bitfields, + .. + }) => { + writeln!( + out, + r#" + bitfield unit + + + + + + + + + "#, + layout.size, layout.align + )?; + for bf in bitfields { + bf.dot_attributes(ctx, out)?; + } + writeln!(out, "
unit.size{}
unit.align{}
") + } + } + } +} + +impl DotAttributes for FieldData { + fn dot_attributes( + &self, + _ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + writeln!( + out, + "{}{:?}", + self.name().unwrap_or("(anonymous)"), + self.ty() + ) + } +} + +impl DotAttributes for Bitfield { + fn dot_attributes( + &self, + _ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + writeln!( + out, + "{} : {}{:?}", + self.name().unwrap_or("(anonymous)"), + self.width(), + self.ty() + ) + } +} + +/// A logical bitfield within some physical bitfield allocation unit. +#[derive(Debug)] +pub(crate) struct Bitfield { + /// Index of the bit within this bitfield's allocation unit where this + /// bitfield's bits begin. + offset_into_unit: usize, + + /// The field data for this bitfield. + data: FieldData, + + /// Name of the generated Rust getter for this bitfield. + /// + /// Should be assigned before codegen. + getter_name: Option, + + /// Name of the generated Rust setter for this bitfield. + /// + /// Should be assigned before codegen. + setter_name: Option, +} + +impl Bitfield { + /// Construct a new bitfield. + fn new(offset_into_unit: usize, raw: RawField) -> Bitfield { + assert!(raw.bitfield_width().is_some()); + + Bitfield { + offset_into_unit, + data: raw.0, + getter_name: None, + setter_name: None, + } + } + + /// Get the index of the bit within this bitfield's allocation unit where + /// this bitfield begins. + pub(crate) fn offset_into_unit(&self) -> usize { + self.offset_into_unit + } + + /// Get the bit width of this bitfield. + pub(crate) fn width(&self) -> u32 { + self.data.bitfield_width().unwrap() + } + + /// Name of the generated Rust getter for this bitfield. + /// + /// Panics if called before assigning bitfield accessor names or if + /// this bitfield have no name. + pub(crate) fn getter_name(&self) -> &str { + assert!( + self.name().is_some(), + "`Bitfield::getter_name` called on anonymous field" + ); + self.getter_name.as_ref().expect( + "`Bitfield::getter_name` should only be called after\ + assigning bitfield accessor names", + ) + } + + /// Name of the generated Rust setter for this bitfield. + /// + /// Panics if called before assigning bitfield accessor names or if + /// this bitfield have no name. + pub(crate) fn setter_name(&self) -> &str { + assert!( + self.name().is_some(), + "`Bitfield::setter_name` called on anonymous field" + ); + self.setter_name.as_ref().expect( + "`Bitfield::setter_name` should only be called\ + after assigning bitfield accessor names", + ) + } +} + +impl FieldMethods for Bitfield { + fn name(&self) -> Option<&str> { + self.data.name() + } + + fn ty(&self) -> TypeId { + self.data.ty() + } + + fn comment(&self) -> Option<&str> { + self.data.comment() + } + + fn bitfield_width(&self) -> Option { + self.data.bitfield_width() + } + + fn is_public(&self) -> bool { + self.data.is_public() + } + + fn annotations(&self) -> &Annotations { + self.data.annotations() + } + + fn offset(&self) -> Option { + self.data.offset() + } +} + +/// A raw field might be either of a plain data member or a bitfield within a +/// bitfield allocation unit, but we haven't processed it and determined which +/// yet (which would involve allocating it into a bitfield unit if it is a +/// bitfield). +#[derive(Debug)] +struct RawField(FieldData); + +impl RawField { + /// Construct a new `RawField`. + fn new( + name: Option, + ty: TypeId, + comment: Option, + annotations: Option, + bitfield_width: Option, + public: bool, + offset: Option, + ) -> RawField { + RawField(FieldData { + name, + ty, + comment, + annotations: annotations.unwrap_or_default(), + bitfield_width, + public, + offset, + }) + } +} + +impl FieldMethods for RawField { + fn name(&self) -> Option<&str> { + self.0.name() + } + + fn ty(&self) -> TypeId { + self.0.ty() + } + + fn comment(&self) -> Option<&str> { + self.0.comment() + } + + fn bitfield_width(&self) -> Option { + self.0.bitfield_width() + } + + fn is_public(&self) -> bool { + self.0.is_public() + } + + fn annotations(&self) -> &Annotations { + self.0.annotations() + } + + fn offset(&self) -> Option { + self.0.offset() + } +} + +/// Convert the given ordered set of raw fields into a list of either plain data +/// members, and/or bitfield units containing multiple bitfields. +/// +/// If we do not have the layout for a bitfield's type, then we can't reliably +/// compute its allocation unit. In such cases, we return an error. +fn raw_fields_to_fields_and_bitfield_units( + ctx: &BindgenContext, + raw_fields: I, + packed: bool, +) -> Result<(Vec, bool), ()> +where + I: IntoIterator, +{ + let mut raw_fields = raw_fields.into_iter().fuse().peekable(); + let mut fields = vec![]; + let mut bitfield_unit_count = 0; + + loop { + // While we have plain old data members, just keep adding them to our + // resulting fields. We introduce a scope here so that we can use + // `raw_fields` again after the `by_ref` iterator adaptor is dropped. + { + let non_bitfields = raw_fields + .by_ref() + .peeking_take_while(|f| f.bitfield_width().is_none()) + .map(|f| Field::DataMember(f.0)); + fields.extend(non_bitfields); + } + + // Now gather all the consecutive bitfields. Only consecutive bitfields + // may potentially share a bitfield allocation unit with each other in + // the Itanium C++ ABI. + let mut bitfields = raw_fields + .by_ref() + .peeking_take_while(|f| f.bitfield_width().is_some()) + .peekable(); + + if bitfields.peek().is_none() { + break; + } + + bitfields_to_allocation_units( + ctx, + &mut bitfield_unit_count, + &mut fields, + bitfields, + packed, + )?; + } + + assert!( + raw_fields.next().is_none(), + "The above loop should consume all items in `raw_fields`" + ); + + Ok((fields, bitfield_unit_count != 0)) +} + +/// Given a set of contiguous raw bitfields, group and allocate them into +/// (potentially multiple) bitfield units. +fn bitfields_to_allocation_units( + ctx: &BindgenContext, + bitfield_unit_count: &mut usize, + fields: &mut E, + raw_bitfields: I, + packed: bool, +) -> Result<(), ()> +where + E: Extend, + I: IntoIterator, +{ + assert!(ctx.collected_typerefs()); + + // NOTE: What follows is reverse-engineered from LLVM's + // lib/AST/RecordLayoutBuilder.cpp + // + // FIXME(emilio): There are some differences between Microsoft and the + // Itanium ABI, but we'll ignore those and stick to Itanium for now. + // + // Also, we need to handle packed bitfields and stuff. + // + // TODO(emilio): Take into account C++'s wide bitfields, and + // packing, sigh. + + fn flush_allocation_unit( + fields: &mut E, + bitfield_unit_count: &mut usize, + unit_size_in_bits: usize, + unit_align_in_bits: usize, + bitfields: Vec, + packed: bool, + ) where + E: Extend, + { + *bitfield_unit_count += 1; + let align = if packed { + 1 + } else { + bytes_from_bits_pow2(unit_align_in_bits) + }; + let size = align_to(unit_size_in_bits, 8) / 8; + let layout = Layout::new(size, align); + fields.extend(Some(Field::Bitfields(BitfieldUnit { + nth: *bitfield_unit_count, + layout, + bitfields, + }))); + } + + let mut max_align = 0; + let mut unfilled_bits_in_unit = 0; + let mut unit_size_in_bits = 0; + let mut unit_align = 0; + let mut bitfields_in_unit = vec![]; + + // TODO(emilio): Determine this from attributes or pragma ms_struct + // directives. Also, perhaps we should check if the target is MSVC? + const is_ms_struct: bool = false; + + for bitfield in raw_bitfields { + let bitfield_width = bitfield.bitfield_width().unwrap() as usize; + let bitfield_layout = + ctx.resolve_type(bitfield.ty()).layout(ctx).ok_or(())?; + let bitfield_size = bitfield_layout.size; + let bitfield_align = bitfield_layout.align; + + let mut offset = unit_size_in_bits; + if !packed { + if is_ms_struct { + if unit_size_in_bits != 0 && + (bitfield_width == 0 || + bitfield_width > unfilled_bits_in_unit) + { + // We've reached the end of this allocation unit, so flush it + // and its bitfields. + unit_size_in_bits = + align_to(unit_size_in_bits, unit_align * 8); + flush_allocation_unit( + fields, + bitfield_unit_count, + unit_size_in_bits, + unit_align, + mem::take(&mut bitfields_in_unit), + packed, + ); + + // Now we're working on a fresh bitfield allocation unit, so reset + // the current unit size and alignment. + offset = 0; + unit_align = 0; + } + } else if offset != 0 && + (bitfield_width == 0 || + (offset & (bitfield_align * 8 - 1)) + bitfield_width > + bitfield_size * 8) + { + offset = align_to(offset, bitfield_align * 8); + } + } + + // According to the x86[-64] ABI spec: "Unnamed bit-fields’ types do not + // affect the alignment of a structure or union". This makes sense: such + // bit-fields are only used for padding, and we can't perform an + // un-aligned read of something we can't read because we can't even name + // it. + if bitfield.name().is_some() { + max_align = cmp::max(max_align, bitfield_align); + + // NB: The `bitfield_width` here is completely, absolutely + // intentional. Alignment of the allocation unit is based on the + // maximum bitfield width, not (directly) on the bitfields' types' + // alignment. + unit_align = cmp::max(unit_align, bitfield_width); + } + + // Always keep all bitfields around. While unnamed bitifields are used + // for padding (and usually not needed hereafter), large unnamed + // bitfields over their types size cause weird allocation size behavior from clang. + // Therefore, all bitfields needed to be kept around in order to check for this + // and make the struct opaque in this case + bitfields_in_unit.push(Bitfield::new(offset, bitfield)); + + unit_size_in_bits = offset + bitfield_width; + + // Compute what the physical unit's final size would be given what we + // have seen so far, and use that to compute how many bits are still + // available in the unit. + let data_size = align_to(unit_size_in_bits, bitfield_align * 8); + unfilled_bits_in_unit = data_size - unit_size_in_bits; + } + + if unit_size_in_bits != 0 { + // Flush the last allocation unit and its bitfields. + flush_allocation_unit( + fields, + bitfield_unit_count, + unit_size_in_bits, + unit_align, + bitfields_in_unit, + packed, + ); + } + + Ok(()) +} + +/// A compound structure's fields are initially raw, and have bitfields that +/// have not been grouped into allocation units. During this time, the fields +/// are mutable and we build them up during parsing. +/// +/// Then, once resolving typerefs is completed, we compute all structs' fields' +/// bitfield allocation units, and they remain frozen and immutable forever +/// after. +#[derive(Debug)] +enum CompFields { + Before(Vec), + After { + fields: Vec, + has_bitfield_units: bool, + }, + Error, +} + +impl Default for CompFields { + fn default() -> CompFields { + CompFields::Before(vec![]) + } +} + +impl CompFields { + fn append_raw_field(&mut self, raw: RawField) { + match *self { + CompFields::Before(ref mut raws) => { + raws.push(raw); + } + _ => { + panic!( + "Must not append new fields after computing bitfield allocation units" + ); + } + } + } + + fn compute_bitfield_units(&mut self, ctx: &BindgenContext, packed: bool) { + let raws = match *self { + CompFields::Before(ref mut raws) => mem::take(raws), + _ => { + panic!("Already computed bitfield units"); + } + }; + + let result = raw_fields_to_fields_and_bitfield_units(ctx, raws, packed); + + match result { + Ok((fields, has_bitfield_units)) => { + *self = CompFields::After { + fields, + has_bitfield_units, + }; + } + Err(()) => { + *self = CompFields::Error; + } + } + } + + fn deanonymize_fields(&mut self, ctx: &BindgenContext, methods: &[Method]) { + let fields = match *self { + CompFields::After { ref mut fields, .. } => fields, + // Nothing to do here. + CompFields::Error => return, + CompFields::Before(_) => { + panic!("Not yet computed bitfield units."); + } + }; + + fn has_method( + methods: &[Method], + ctx: &BindgenContext, + name: &str, + ) -> bool { + methods.iter().any(|method| { + let method_name = ctx.resolve_func(method.signature()).name(); + method_name == name || ctx.rust_mangle(method_name) == name + }) + } + + struct AccessorNamesPair { + getter: String, + setter: String, + } + + let mut accessor_names: HashMap = fields + .iter() + .flat_map(|field| match *field { + Field::Bitfields(ref bu) => &*bu.bitfields, + Field::DataMember(_) => &[], + }) + .filter_map(|bitfield| bitfield.name()) + .map(|bitfield_name| { + let bitfield_name = bitfield_name.to_string(); + let getter = { + let mut getter = + ctx.rust_mangle(&bitfield_name).to_string(); + if has_method(methods, ctx, &getter) { + getter.push_str("_bindgen_bitfield"); + } + getter + }; + let setter = { + let setter = format!("set_{bitfield_name}"); + let mut setter = ctx.rust_mangle(&setter).to_string(); + if has_method(methods, ctx, &setter) { + setter.push_str("_bindgen_bitfield"); + } + setter + }; + (bitfield_name, AccessorNamesPair { getter, setter }) + }) + .collect(); + + let mut anon_field_counter = 0; + for field in fields.iter_mut() { + match *field { + Field::DataMember(FieldData { ref mut name, .. }) => { + if name.is_some() { + continue; + } + + anon_field_counter += 1; + *name = Some(format!( + "{}{anon_field_counter}", + ctx.options().anon_fields_prefix, + )); + } + Field::Bitfields(ref mut bu) => { + for bitfield in &mut bu.bitfields { + if bitfield.name().is_none() { + continue; + } + + if let Some(AccessorNamesPair { getter, setter }) = + accessor_names.remove(bitfield.name().unwrap()) + { + bitfield.getter_name = Some(getter); + bitfield.setter_name = Some(setter); + } + } + } + } + } + } + + /// Return the flex array member for the struct/class, if any. + fn flex_array_member(&self, ctx: &BindgenContext) -> Option { + let fields = match self { + CompFields::Before(_) => panic!("raw fields"), + CompFields::After { fields, .. } => fields, + CompFields::Error => return None, // panic? + }; + + match fields.last()? { + Field::Bitfields(..) => None, + Field::DataMember(FieldData { ty, .. }) => ctx + .resolve_type(*ty) + .is_incomplete_array(ctx) + .map(|item| item.expect_type_id(ctx)), + } + } +} + +impl Trace for CompFields { + type Extra = (); + + fn trace(&self, context: &BindgenContext, tracer: &mut T, _: &()) + where + T: Tracer, + { + match *self { + CompFields::Error => {} + CompFields::Before(ref fields) => { + for f in fields { + tracer.visit_kind(f.ty().into(), EdgeKind::Field); + } + } + CompFields::After { ref fields, .. } => { + for f in fields { + f.trace(context, tracer, &()); + } + } + } + } +} + +/// Common data shared across different field types. +#[derive(Clone, Debug)] +pub(crate) struct FieldData { + /// The name of the field, empty if it's an unnamed bitfield width. + name: Option, + + /// The inner type. + ty: TypeId, + + /// The doc comment on the field if any. + comment: Option, + + /// Annotations for this field, or the default. + annotations: Annotations, + + /// If this field is a bitfield, and how many bits does it contain if it is. + bitfield_width: Option, + + /// If the C++ field is declared `public` + public: bool, + + /// The offset of the field (in bits) + offset: Option, +} + +impl FieldMethods for FieldData { + fn name(&self) -> Option<&str> { + self.name.as_deref() + } + + fn ty(&self) -> TypeId { + self.ty + } + + fn comment(&self) -> Option<&str> { + self.comment.as_deref() + } + + fn bitfield_width(&self) -> Option { + self.bitfield_width + } + + fn is_public(&self) -> bool { + self.public + } + + fn annotations(&self) -> &Annotations { + &self.annotations + } + + fn offset(&self) -> Option { + self.offset + } +} + +/// The kind of inheritance a base class is using. +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) enum BaseKind { + /// Normal inheritance, like: + /// + /// ```cpp + /// class A : public B {}; + /// ``` + Normal, + /// Virtual inheritance, like: + /// + /// ```cpp + /// class A: public virtual B {}; + /// ``` + Virtual, +} + +/// A base class. +#[derive(Clone, Debug)] +pub(crate) struct Base { + /// The type of this base class. + pub(crate) ty: TypeId, + /// The kind of inheritance we're doing. + pub(crate) kind: BaseKind, + /// Name of the field in which this base should be stored. + pub(crate) field_name: String, + /// Whether this base is inherited from publicly. + pub(crate) is_pub: bool, +} + +impl Base { + /// Whether this base class is inheriting virtually. + pub(crate) fn is_virtual(&self) -> bool { + self.kind == BaseKind::Virtual + } + + /// Whether this base class should have it's own field for storage. + pub(crate) fn requires_storage(&self, ctx: &BindgenContext) -> bool { + // Virtual bases are already taken into account by the vtable + // pointer. + // + // FIXME(emilio): Is this always right? + if self.is_virtual() { + return false; + } + + // NB: We won't include zero-sized types in our base chain because they + // would contribute to our size given the dummy field we insert for + // zero-sized types. + if self.ty.is_zero_sized(ctx) { + return false; + } + + true + } + + /// Whether this base is inherited from publicly. + pub(crate) fn is_public(&self) -> bool { + self.is_pub + } +} + +/// A compound type. +/// +/// Either a struct or union, a compound type is built up from the combination +/// of fields which also are associated with their own (potentially compound) +/// type. +#[derive(Debug)] +pub(crate) struct CompInfo { + /// Whether this is a struct or a union. + kind: CompKind, + + /// The members of this struct or union. + fields: CompFields, + + /// The abstract template parameters of this class. Note that these are NOT + /// concrete template arguments, and should always be a + /// `Type(TypeKind::TypeParam(name))`. For concrete template arguments, see + /// `TypeKind::TemplateInstantiation`. + template_params: Vec, + + /// The method declarations inside this class, if in C++ mode. + methods: Vec, + + /// The different constructors this struct or class contains. + constructors: Vec, + + /// The destructor of this type. The bool represents whether this destructor + /// is virtual. + destructor: Option<(MethodKind, FunctionId)>, + + /// Vector of classes this one inherits from. + base_members: Vec, + + /// The inner types that were declared inside this class, in something like: + /// + /// ```c++ + /// class Foo { + /// typedef int FooTy; + /// struct Bar { + /// int baz; + /// }; + /// } + /// + /// static Foo::Bar const = {3}; + /// ``` + inner_types: Vec, + + /// Set of static constants declared inside this class. + inner_vars: Vec, + + /// Whether this type should generate an vtable (TODO: Should be able to + /// look at the virtual methods and ditch this field). + has_own_virtual_method: bool, + + /// Whether this type has destructor. + has_destructor: bool, + + /// Whether this type has a base type with more than one member. + /// + /// TODO: We should be able to compute this. + has_nonempty_base: bool, + + /// If this type has a template parameter which is not a type (e.g.: a + /// `size_t`) + has_non_type_template_params: bool, + + /// Whether this type has a bit field member whose width couldn't be + /// evaluated (e.g. if it depends on a template parameter). We generate an + /// opaque type in this case. + has_unevaluable_bit_field_width: bool, + + /// Whether we saw `__attribute__((packed))` on or within this type. + packed_attr: bool, + + /// Used to know if we've found an opaque attribute that could cause us to + /// generate a type with invalid layout. This is explicitly used to avoid us + /// generating bad alignments when parsing types like `max_align_t`. + /// + /// It's not clear what the behavior should be here, if generating the item + /// and pray, or behave as an opaque type. + found_unknown_attr: bool, + + /// Used to indicate when a struct has been forward declared. Usually used + /// in headers so that APIs can't modify them directly. + is_forward_declaration: bool, +} + +impl CompInfo { + /// Construct a new compound type. + pub(crate) fn new(kind: CompKind) -> Self { + CompInfo { + kind, + fields: CompFields::default(), + template_params: vec![], + methods: vec![], + constructors: vec![], + destructor: None, + base_members: vec![], + inner_types: vec![], + inner_vars: vec![], + has_own_virtual_method: false, + has_destructor: false, + has_nonempty_base: false, + has_non_type_template_params: false, + has_unevaluable_bit_field_width: false, + packed_attr: false, + found_unknown_attr: false, + is_forward_declaration: false, + } + } + + /// Compute the layout of this type. + /// + /// This is called as a fallback under some circumstances where LLVM doesn't + /// give us the correct layout. + /// + /// If we're a union without known layout, we try to compute it from our + /// members. This is not ideal, but clang fails to report the size for these + /// kind of unions, see `test/headers/template_union.hpp` + pub(crate) fn layout(&self, ctx: &BindgenContext) -> Option { + // We can't do better than clang here, sorry. + if self.kind == CompKind::Struct { + return None; + } + + // By definition, we don't have the right layout information here if + // we're a forward declaration. + if self.is_forward_declaration() { + return None; + } + + // empty union case + if !self.has_fields() { + return None; + } + + let mut max_size = 0; + // Don't allow align(0) + let mut max_align = 1; + self.each_known_field_layout(ctx, |layout| { + max_size = cmp::max(max_size, layout.size); + max_align = cmp::max(max_align, layout.align); + }); + + Some(Layout::new(max_size, max_align)) + } + + /// Get this type's set of fields. + pub(crate) fn fields(&self) -> &[Field] { + match self.fields { + CompFields::Error => &[], + CompFields::After { ref fields, .. } => fields, + CompFields::Before(..) => { + panic!("Should always have computed bitfield units first"); + } + } + } + + /// Return the flex array member and its element type if any + pub(crate) fn flex_array_member( + &self, + ctx: &BindgenContext, + ) -> Option { + self.fields.flex_array_member(ctx) + } + + fn has_fields(&self) -> bool { + match self.fields { + CompFields::Error => false, + CompFields::After { ref fields, .. } => !fields.is_empty(), + CompFields::Before(ref raw_fields) => !raw_fields.is_empty(), + } + } + + fn each_known_field_layout( + &self, + ctx: &BindgenContext, + mut callback: impl FnMut(Layout), + ) { + match self.fields { + CompFields::Error => {} + CompFields::After { ref fields, .. } => { + for field in fields { + if let Some(layout) = field.layout(ctx) { + callback(layout); + } + } + } + CompFields::Before(ref raw_fields) => { + for field in raw_fields { + let field_ty = ctx.resolve_type(field.0.ty); + if let Some(layout) = field_ty.layout(ctx) { + callback(layout); + } + } + } + } + } + + fn has_bitfields(&self) -> bool { + match self.fields { + CompFields::Error => false, + CompFields::After { + has_bitfield_units, .. + } => has_bitfield_units, + CompFields::Before(_) => { + panic!("Should always have computed bitfield units first"); + } + } + } + + /// Returns whether we have a too large bitfield unit, in which case we may + /// not be able to derive some of the things we should be able to normally + /// derive. + pub(crate) fn has_too_large_bitfield_unit(&self) -> bool { + if !self.has_bitfields() { + return false; + } + self.fields().iter().any(|field| match *field { + Field::DataMember(..) => false, + Field::Bitfields(ref unit) => { + unit.layout.size > RUST_DERIVE_IN_ARRAY_LIMIT + } + }) + } + + /// Does this type have any template parameters that aren't types + /// (e.g. int)? + pub(crate) fn has_non_type_template_params(&self) -> bool { + self.has_non_type_template_params + } + + /// Do we see a virtual function during parsing? + /// Get the `has_own_virtual_method` boolean. + pub(crate) fn has_own_virtual_method(&self) -> bool { + self.has_own_virtual_method + } + + /// Did we see a destructor when parsing this type? + pub(crate) fn has_own_destructor(&self) -> bool { + self.has_destructor + } + + /// Get this type's set of methods. + pub(crate) fn methods(&self) -> &[Method] { + &self.methods + } + + /// Get this type's set of constructors. + pub(crate) fn constructors(&self) -> &[FunctionId] { + &self.constructors + } + + /// Get this type's destructor. + pub(crate) fn destructor(&self) -> Option<(MethodKind, FunctionId)> { + self.destructor + } + + /// What kind of compound type is this? + pub(crate) fn kind(&self) -> CompKind { + self.kind + } + + /// Is this a union? + pub(crate) fn is_union(&self) -> bool { + self.kind() == CompKind::Union + } + + /// The set of types that this one inherits from. + pub(crate) fn base_members(&self) -> &[Base] { + &self.base_members + } + + /// Construct a new compound type from a Clang type. + pub(crate) fn from_ty( + potential_id: ItemId, + ty: &clang::Type, + location: Option, + ctx: &mut BindgenContext, + ) -> Result { + use clang_sys::*; + assert!( + ty.template_args().is_none(), + "We handle template instantiations elsewhere" + ); + + let mut cursor = ty.declaration(); + let mut kind = Self::kind_from_cursor(&cursor); + if kind.is_err() { + if let Some(location) = location { + kind = Self::kind_from_cursor(&location); + cursor = location; + } + } + + let kind = kind?; + + debug!("CompInfo::from_ty({kind:?}, {cursor:?})"); + + let mut ci = CompInfo::new(kind); + ci.is_forward_declaration = + location.map_or(true, |cur| match cur.kind() { + CXCursor_ParmDecl => true, + CXCursor_StructDecl | CXCursor_UnionDecl | + CXCursor_ClassDecl => !cur.is_definition(), + _ => false, + }); + + let mut maybe_anonymous_struct_field = None; + cursor.visit(|cur| { + if cur.kind() != CXCursor_FieldDecl { + if let Some((ty, clang_ty, public, offset)) = + maybe_anonymous_struct_field.take() + { + if cur.kind() == CXCursor_TypedefDecl && + cur.typedef_type().unwrap().canonical_type() == + clang_ty + { + // Typedefs of anonymous structs appear later in the ast + // than the struct itself, that would otherwise be an + // anonymous field. Detect that case here, and do + // nothing. + } else { + let field = RawField::new( + None, ty, None, None, None, public, offset, + ); + ci.fields.append_raw_field(field); + } + } + } + + match cur.kind() { + CXCursor_FieldDecl => { + if let Some((ty, clang_ty, public, offset)) = + maybe_anonymous_struct_field.take() + { + let mut used = false; + cur.visit(|child| { + if child.cur_type() == clang_ty { + used = true; + } + CXChildVisit_Continue + }); + + if !used { + let field = RawField::new( + None, ty, None, None, None, public, offset, + ); + ci.fields.append_raw_field(field); + } + } + + let bit_width = if cur.is_bit_field() { + let width = cur.bit_width(); + + // Make opaque type if the bit width couldn't be + // evaluated. + if width.is_none() { + ci.has_unevaluable_bit_field_width = true; + return CXChildVisit_Break; + } + + width + } else { + None + }; + + let field_type = Item::from_ty_or_ref( + cur.cur_type(), + cur, + Some(potential_id), + ctx, + ); + + let comment = cur.raw_comment(); + let annotations = Annotations::new(&cur); + let name = cur.spelling(); + let is_public = cur.public_accessible(); + let offset = cur.offset_of_field().ok(); + + // Name can be empty if there are bitfields, for example, + // see tests/headers/struct_with_bitfields.h + assert!( + !name.is_empty() || bit_width.is_some(), + "Empty field name?" + ); + + let name = if name.is_empty() { None } else { Some(name) }; + + let field = RawField::new( + name, + field_type, + comment, + annotations, + bit_width, + is_public, + offset, + ); + ci.fields.append_raw_field(field); + + // No we look for things like attributes and stuff. + cur.visit(|cur| { + if cur.kind() == CXCursor_UnexposedAttr { + ci.found_unknown_attr = true; + } + CXChildVisit_Continue + }); + } + CXCursor_UnexposedAttr => { + ci.found_unknown_attr = true; + } + CXCursor_EnumDecl | + CXCursor_TypeAliasDecl | + CXCursor_TypeAliasTemplateDecl | + CXCursor_TypedefDecl | + CXCursor_StructDecl | + CXCursor_UnionDecl | + CXCursor_ClassTemplate | + CXCursor_ClassDecl => { + // We can find non-semantic children here, clang uses a + // StructDecl to note incomplete structs that haven't been + // forward-declared before, see [1]. + // + // Also, clang seems to scope struct definitions inside + // unions, and other named struct definitions inside other + // structs to the whole translation unit. + // + // Let's just assume that if the cursor we've found is a + // definition, it's a valid inner type. + // + // [1]: https://github.com/rust-lang/rust-bindgen/issues/482 + let is_inner_struct = + cur.semantic_parent() == cursor || cur.is_definition(); + if !is_inner_struct { + return CXChildVisit_Continue; + } + + // Even if this is a definition, we may not be the semantic + // parent, see #1281. + let inner = Item::parse(cur, Some(potential_id), ctx) + .expect("Inner ClassDecl"); + + // If we avoided recursion parsing this type (in + // `Item::from_ty_with_id()`), then this might not be a + // valid type ID, so check and gracefully handle this. + if ctx.resolve_item_fallible(inner).is_some() { + let inner = inner.expect_type_id(ctx); + + ci.inner_types.push(inner); + + // A declaration of an union or a struct without name + // could also be an unnamed field, unfortunately. + if cur.is_anonymous() && cur.kind() != CXCursor_EnumDecl + { + let ty = cur.cur_type(); + let public = cur.public_accessible(); + let offset = cur.offset_of_field().ok(); + + maybe_anonymous_struct_field = + Some((inner, ty, public, offset)); + } + } + } + CXCursor_PackedAttr => { + ci.packed_attr = true; + } + CXCursor_TemplateTypeParameter => { + let param = Item::type_param(None, cur, ctx).expect( + "Item::type_param shouldn't fail when pointing \ + at a TemplateTypeParameter", + ); + ci.template_params.push(param); + } + CXCursor_CXXBaseSpecifier => { + let is_virtual_base = cur.is_virtual_base(); + ci.has_own_virtual_method |= is_virtual_base; + + let kind = if is_virtual_base { + BaseKind::Virtual + } else { + BaseKind::Normal + }; + + let field_name = match ci.base_members.len() { + 0 => "_base".into(), + n => format!("_base_{n}"), + }; + let type_id = + Item::from_ty_or_ref(cur.cur_type(), cur, None, ctx); + ci.base_members.push(Base { + ty: type_id, + kind, + field_name, + is_pub: cur.access_specifier() == CX_CXXPublic, + }); + } + CXCursor_Constructor | CXCursor_Destructor | + CXCursor_CXXMethod => { + let is_virtual = cur.method_is_virtual(); + let is_static = cur.method_is_static(); + debug_assert!(!(is_static && is_virtual), "How?"); + + ci.has_destructor |= cur.kind() == CXCursor_Destructor; + ci.has_own_virtual_method |= is_virtual; + + // This used to not be here, but then I tried generating + // stylo bindings with this (without path filters), and + // cried a lot with a method in gfx/Point.h + // (ToUnknownPoint), that somehow was causing the same type + // to be inserted in the map two times. + // + // I couldn't make a reduced test case, but anyway... + // Methods of template functions not only used to be inlined, + // but also instantiated, and we wouldn't be able to call + // them, so just bail out. + if !ci.template_params.is_empty() { + return CXChildVisit_Continue; + } + + // NB: This gets us an owned `Function`, not a + // `FunctionSig`. + let signature = + match Item::parse(cur, Some(potential_id), ctx) { + Ok(item) + if ctx + .resolve_item(item) + .kind() + .is_function() => + { + item + } + _ => return CXChildVisit_Continue, + }; + + let signature = signature.expect_function_id(ctx); + + match cur.kind() { + CXCursor_Constructor => { + ci.constructors.push(signature); + } + CXCursor_Destructor => { + let kind = if is_virtual { + MethodKind::VirtualDestructor { + pure_virtual: cur.method_is_pure_virtual(), + } + } else { + MethodKind::Destructor + }; + ci.destructor = Some((kind, signature)); + } + CXCursor_CXXMethod => { + let is_const = cur.method_is_const(); + let method_kind = if is_static { + MethodKind::Static + } else if is_virtual { + MethodKind::Virtual { + pure_virtual: cur.method_is_pure_virtual(), + } + } else { + MethodKind::Normal + }; + + let method = + Method::new(method_kind, signature, is_const); + + ci.methods.push(method); + } + _ => unreachable!("How can we see this here?"), + } + } + CXCursor_NonTypeTemplateParameter => { + ci.has_non_type_template_params = true; + } + CXCursor_VarDecl => { + let linkage = cur.linkage(); + if linkage != CXLinkage_External && + linkage != CXLinkage_UniqueExternal + { + return CXChildVisit_Continue; + } + + let visibility = cur.visibility(); + if visibility != CXVisibility_Default { + return CXChildVisit_Continue; + } + + if let Ok(item) = Item::parse(cur, Some(potential_id), ctx) + { + ci.inner_vars.push(item.as_var_id_unchecked()); + } + } + // Intentionally not handled + CXCursor_CXXAccessSpecifier | + CXCursor_CXXFinalAttr | + CXCursor_FunctionTemplate | + CXCursor_ConversionFunction => {} + _ => { + warn!( + "unhandled comp member `{}` (kind {:?}) in `{}` ({})", + cur.spelling(), + clang::kind_to_str(cur.kind()), + cursor.spelling(), + cur.location() + ); + } + } + CXChildVisit_Continue + }); + + if let Some((ty, _, public, offset)) = maybe_anonymous_struct_field { + let field = + RawField::new(None, ty, None, None, None, public, offset); + ci.fields.append_raw_field(field); + } + + Ok(ci) + } + + fn kind_from_cursor( + cursor: &clang::Cursor, + ) -> Result { + use clang_sys::*; + Ok(match cursor.kind() { + CXCursor_UnionDecl => CompKind::Union, + CXCursor_ClassDecl | CXCursor_StructDecl => CompKind::Struct, + CXCursor_CXXBaseSpecifier | + CXCursor_ClassTemplatePartialSpecialization | + CXCursor_ClassTemplate => match cursor.template_kind() { + CXCursor_UnionDecl => CompKind::Union, + _ => CompKind::Struct, + }, + _ => { + warn!("Unknown kind for comp type: {cursor:?}"); + return Err(ParseError::Continue); + } + }) + } + + /// Get the set of types that were declared within this compound type + /// (e.g. nested class definitions). + pub(crate) fn inner_types(&self) -> &[TypeId] { + &self.inner_types + } + + /// Get the set of static variables declared within this compound type. + pub(crate) fn inner_vars(&self) -> &[VarId] { + &self.inner_vars + } + + /// Have we found a field with an opaque type that could potentially mess up + /// the layout of this compound type? + pub(crate) fn found_unknown_attr(&self) -> bool { + self.found_unknown_attr + } + + /// Is this compound type packed? + pub(crate) fn is_packed( + &self, + ctx: &BindgenContext, + layout: Option<&Layout>, + ) -> bool { + if self.packed_attr { + return true; + } + + // Even though `libclang` doesn't expose `#pragma packed(...)`, we can + // detect it through its effects. + if let Some(parent_layout) = layout { + let mut packed = false; + self.each_known_field_layout(ctx, |layout| { + packed = packed || layout.align > parent_layout.align; + }); + if packed { + info!("Found a struct that was defined within `#pragma packed(...)`"); + return true; + } + + if self.has_own_virtual_method && parent_layout.align == 1 { + return true; + } + } + + false + } + + /// Return true if a compound type is "naturally packed". This means we can exclude the + /// "packed" attribute without changing the layout. + /// This is useful for types that need an "align(N)" attribute since rustc won't compile + /// structs that have both of those attributes. + pub(crate) fn already_packed(&self, ctx: &BindgenContext) -> Option { + let mut total_size: usize = 0; + + for field in self.fields() { + let layout = field.layout(ctx)?; + + if layout.align != 0 && total_size % layout.align != 0 { + return Some(false); + } + + total_size += layout.size; + } + + Some(true) + } + + /// Returns true if compound type has been forward declared + pub(crate) fn is_forward_declaration(&self) -> bool { + self.is_forward_declaration + } + + /// Compute this compound structure's bitfield allocation units. + pub(crate) fn compute_bitfield_units( + &mut self, + ctx: &BindgenContext, + layout: Option<&Layout>, + ) { + let packed = self.is_packed(ctx, layout); + self.fields.compute_bitfield_units(ctx, packed); + } + + /// Assign for each anonymous field a generated name. + pub(crate) fn deanonymize_fields(&mut self, ctx: &BindgenContext) { + self.fields.deanonymize_fields(ctx, &self.methods); + } + + /// Returns whether the current union can be represented as a Rust `union` + /// + /// Requirements: + /// 1. Current `RustTarget` allows for `untagged_union` + /// 2. Each field can derive `Copy` or we use `ManuallyDrop`. + /// 3. It's not zero-sized. + /// + /// Second boolean returns whether all fields can be copied (and thus + /// `ManuallyDrop` is not needed). + pub(crate) fn is_rust_union( + &self, + ctx: &BindgenContext, + layout: Option<&Layout>, + name: &str, + ) -> (bool, bool) { + if !self.is_union() { + return (false, false); + } + + if !ctx.options().untagged_union { + return (false, false); + } + + if self.is_forward_declaration() { + return (false, false); + } + + let union_style = if ctx.options().bindgen_wrapper_union.matches(name) { + NonCopyUnionStyle::BindgenWrapper + } else if ctx.options().manually_drop_union.matches(name) { + NonCopyUnionStyle::ManuallyDrop + } else { + ctx.options().default_non_copy_union_style + }; + + let all_can_copy = self.fields().iter().all(|f| match *f { + Field::DataMember(ref field_data) => { + field_data.ty().can_derive_copy(ctx) + } + Field::Bitfields(_) => true, + }); + + if !all_can_copy && union_style == NonCopyUnionStyle::BindgenWrapper { + return (false, false); + } + + if layout.is_some_and(|l| l.size == 0) { + return (false, false); + } + + (true, all_can_copy) + } +} + +impl DotAttributes for CompInfo { + fn dot_attributes( + &self, + ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + writeln!(out, "CompKind{:?}", self.kind)?; + + if self.has_own_virtual_method { + writeln!(out, "has_vtabletrue")?; + } + + if self.has_destructor { + writeln!(out, "has_destructortrue")?; + } + + if self.has_nonempty_base { + writeln!(out, "has_nonempty_basetrue")?; + } + + if self.has_non_type_template_params { + writeln!( + out, + "has_non_type_template_paramstrue" + )?; + } + + if self.packed_attr { + writeln!(out, "packed_attrtrue")?; + } + + if self.is_forward_declaration { + writeln!( + out, + "is_forward_declarationtrue" + )?; + } + + if !self.fields().is_empty() { + writeln!(out, r#"fields"#)?; + for field in self.fields() { + field.dot_attributes(ctx, out)?; + } + writeln!(out, "
")?; + } + + Ok(()) + } +} + +impl IsOpaque for CompInfo { + type Extra = Option; + + fn is_opaque(&self, ctx: &BindgenContext, layout: &Option) -> bool { + if self.has_non_type_template_params || + self.has_unevaluable_bit_field_width + { + return true; + } + + // When we do not have the layout for a bitfield's type (for example, it + // is a type parameter), then we can't compute bitfield units. We are + // left with no choice but to make the whole struct opaque, or else we + // might generate structs with incorrect sizes and alignments. + if let CompFields::Error = self.fields { + return true; + } + + // Bitfields with a width that is larger than their unit's width have + // some strange things going on, and the best we can do is make the + // whole struct opaque. + if self.fields().iter().any(|f| match *f { + Field::DataMember(_) => false, + Field::Bitfields(ref unit) => unit.bitfields().iter().any(|bf| { + let bitfield_layout = ctx + .resolve_type(bf.ty()) + .layout(ctx) + .expect("Bitfield without layout? Gah!"); + bf.width() / 8 > bitfield_layout.size as u32 + }), + }) { + return true; + } + + if !ctx.options().rust_features().repr_packed_n { + // If we don't have `#[repr(packed(N)]`, the best we can + // do is make this struct opaque. + // + // See https://github.com/rust-lang/rust-bindgen/issues/537 and + // https://github.com/rust-lang/rust/issues/33158 + if self.is_packed(ctx, layout.as_ref()) && + layout.is_some_and(|l| l.align > 1) + { + warn!("Found a type that is both packed and aligned to greater than \ + 1; Rust before version 1.33 doesn't have `#[repr(packed(N))]`, so we \ + are treating it as opaque. You may wish to set bindgen's rust target \ + version to 1.33 or later to enable `#[repr(packed(N))]` support."); + return true; + } + } + + false + } +} + +impl TemplateParameters for CompInfo { + fn self_template_params(&self, _ctx: &BindgenContext) -> Vec { + self.template_params.clone() + } +} + +impl Trace for CompInfo { + type Extra = Item; + + fn trace(&self, context: &BindgenContext, tracer: &mut T, item: &Item) + where + T: Tracer, + { + for p in item.all_template_params(context) { + tracer.visit_kind(p.into(), EdgeKind::TemplateParameterDefinition); + } + + for ty in self.inner_types() { + tracer.visit_kind(ty.into(), EdgeKind::InnerType); + } + + for &var in self.inner_vars() { + tracer.visit_kind(var.into(), EdgeKind::InnerVar); + } + + for method in self.methods() { + tracer.visit_kind(method.signature.into(), EdgeKind::Method); + } + + if let Some((_kind, signature)) = self.destructor() { + tracer.visit_kind(signature.into(), EdgeKind::Destructor); + } + + for ctor in self.constructors() { + tracer.visit_kind(ctor.into(), EdgeKind::Constructor); + } + + // Base members and fields are not generated for opaque types (but all + // of the above things are) so stop here. + if item.is_opaque(context, &()) { + return; + } + + for base in self.base_members() { + tracer.visit_kind(base.ty.into(), EdgeKind::BaseMember); + } + + self.fields.trace(context, tracer, &()); + } +} diff --git a/vendor/bindgen/ir/context.rs b/vendor/bindgen/ir/context.rs new file mode 100644 index 00000000000000..c0201a114b7a46 --- /dev/null +++ b/vendor/bindgen/ir/context.rs @@ -0,0 +1,3107 @@ +//! Common context that is passed around during parsing and codegen. + +use super::super::time::Timer; +use super::analysis::{ + analyze, as_cannot_derive_set, CannotDerive, DeriveTrait, + HasDestructorAnalysis, HasFloat, HasTypeParameterInArray, + HasVtableAnalysis, HasVtableResult, SizednessAnalysis, SizednessResult, + UsedTemplateParameters, +}; +use super::derive::{ + CanDerive, CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq, + CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd, +}; +use super::function::Function; +use super::int::IntKind; +use super::item::{IsOpaque, Item, ItemAncestors, ItemSet}; +use super::item_kind::ItemKind; +use super::module::{Module, ModuleKind}; +use super::template::{TemplateInstantiation, TemplateParameters}; +use super::traversal::{self, Edge, ItemTraversal}; +use super::ty::{FloatKind, Type, TypeKind}; +use crate::clang::{self, ABIKind, Cursor}; +use crate::codegen::CodegenError; +use crate::BindgenOptions; +use crate::{Entry, HashMap, HashSet}; + +use proc_macro2::{Ident, Span, TokenStream}; +use quote::ToTokens; +use std::borrow::Cow; +use std::cell::{Cell, RefCell}; +use std::collections::{BTreeSet, HashMap as StdHashMap}; +use std::mem; +use std::path::Path; + +/// An identifier for some kind of IR item. +#[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, Hash)] +pub(crate) struct ItemId(usize); + +/// Declare a newtype around `ItemId` with conversion methods. +macro_rules! item_id_newtype { + ( + $( #[$attr:meta] )* + pub(crate) struct $name:ident(ItemId) + where + $( #[$checked_attr:meta] )* + checked = $checked:ident with $check_method:ident, + $( #[$expected_attr:meta] )* + expected = $expected:ident, + $( #[$unchecked_attr:meta] )* + unchecked = $unchecked:ident; + ) => { + $( #[$attr] )* + #[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, Hash)] + pub(crate) struct $name(ItemId); + + impl $name { + /// Create an `ItemResolver` from this ID. + #[allow(dead_code)] + pub(crate) fn into_resolver(self) -> ItemResolver { + let id: ItemId = self.into(); + id.into() + } + } + + impl ::std::cmp::PartialEq for $name + where + T: Copy + Into + { + fn eq(&self, rhs: &T) -> bool { + let rhs: ItemId = (*rhs).into(); + self.0 == rhs + } + } + + impl From<$name> for ItemId { + fn from(id: $name) -> ItemId { + id.0 + } + } + + impl<'a> From<&'a $name> for ItemId { + fn from(id: &'a $name) -> ItemId { + id.0 + } + } + + #[allow(dead_code)] + impl ItemId { + $( #[$checked_attr] )* + pub(crate) fn $checked(&self, ctx: &BindgenContext) -> Option<$name> { + if ctx.resolve_item(*self).kind().$check_method() { + Some($name(*self)) + } else { + None + } + } + + $( #[$expected_attr] )* + pub(crate) fn $expected(&self, ctx: &BindgenContext) -> $name { + self.$checked(ctx) + .expect(concat!( + stringify!($expected), + " called with ItemId that points to the wrong ItemKind" + )) + } + + $( #[$unchecked_attr] )* + pub(crate) fn $unchecked(&self) -> $name { + $name(*self) + } + } + } +} + +item_id_newtype! { + /// An identifier for an `Item` whose `ItemKind` is known to be + /// `ItemKind::Type`. + pub(crate) struct TypeId(ItemId) + where + /// Convert this `ItemId` into a `TypeId` if its associated item is a type, + /// otherwise return `None`. + checked = as_type_id with is_type, + + /// Convert this `ItemId` into a `TypeId`. + /// + /// If this `ItemId` does not point to a type, then panic. + expected = expect_type_id, + + /// Convert this `ItemId` into a `TypeId` without actually checking whether + /// this ID actually points to a `Type`. + unchecked = as_type_id_unchecked; +} + +item_id_newtype! { + /// An identifier for an `Item` whose `ItemKind` is known to be + /// `ItemKind::Module`. + pub(crate) struct ModuleId(ItemId) + where + /// Convert this `ItemId` into a `ModuleId` if its associated item is a + /// module, otherwise return `None`. + checked = as_module_id with is_module, + + /// Convert this `ItemId` into a `ModuleId`. + /// + /// If this `ItemId` does not point to a module, then panic. + expected = expect_module_id, + + /// Convert this `ItemId` into a `ModuleId` without actually checking + /// whether this ID actually points to a `Module`. + unchecked = as_module_id_unchecked; +} + +item_id_newtype! { + /// An identifier for an `Item` whose `ItemKind` is known to be + /// `ItemKind::Var`. + pub(crate) struct VarId(ItemId) + where + /// Convert this `ItemId` into a `VarId` if its associated item is a var, + /// otherwise return `None`. + checked = as_var_id with is_var, + + /// Convert this `ItemId` into a `VarId`. + /// + /// If this `ItemId` does not point to a var, then panic. + expected = expect_var_id, + + /// Convert this `ItemId` into a `VarId` without actually checking whether + /// this ID actually points to a `Var`. + unchecked = as_var_id_unchecked; +} + +item_id_newtype! { + /// An identifier for an `Item` whose `ItemKind` is known to be + /// `ItemKind::Function`. + pub(crate) struct FunctionId(ItemId) + where + /// Convert this `ItemId` into a `FunctionId` if its associated item is a function, + /// otherwise return `None`. + checked = as_function_id with is_function, + + /// Convert this `ItemId` into a `FunctionId`. + /// + /// If this `ItemId` does not point to a function, then panic. + expected = expect_function_id, + + /// Convert this `ItemId` into a `FunctionId` without actually checking whether + /// this ID actually points to a `Function`. + unchecked = as_function_id_unchecked; +} + +impl From for usize { + fn from(id: ItemId) -> usize { + id.0 + } +} + +impl ItemId { + /// Get a numeric representation of this ID. + pub(crate) fn as_usize(self) -> usize { + self.into() + } +} + +impl ::std::cmp::PartialEq for ItemId +where + T: Copy + Into, +{ + fn eq(&self, rhs: &T) -> bool { + let rhs: ItemId = (*rhs).into(); + self.0 == rhs.0 + } +} + +impl CanDeriveDebug for T +where + T: Copy + Into, +{ + fn can_derive_debug(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_debug && ctx.lookup_can_derive_debug(*self) + } +} + +impl CanDeriveDefault for T +where + T: Copy + Into, +{ + fn can_derive_default(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_default && ctx.lookup_can_derive_default(*self) + } +} + +impl CanDeriveCopy for T +where + T: Copy + Into, +{ + fn can_derive_copy(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_copy && ctx.lookup_can_derive_copy(*self) + } +} + +impl CanDeriveHash for T +where + T: Copy + Into, +{ + fn can_derive_hash(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_hash && ctx.lookup_can_derive_hash(*self) + } +} + +impl CanDerivePartialOrd for T +where + T: Copy + Into, +{ + fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_partialord && + ctx.lookup_can_derive_partialeq_or_partialord(*self) == + CanDerive::Yes + } +} + +impl CanDerivePartialEq for T +where + T: Copy + Into, +{ + fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_partialeq && + ctx.lookup_can_derive_partialeq_or_partialord(*self) == + CanDerive::Yes + } +} + +impl CanDeriveEq for T +where + T: Copy + Into, +{ + fn can_derive_eq(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_eq && + ctx.lookup_can_derive_partialeq_or_partialord(*self) == + CanDerive::Yes && + !ctx.lookup_has_float(*self) + } +} + +impl CanDeriveOrd for T +where + T: Copy + Into, +{ + fn can_derive_ord(&self, ctx: &BindgenContext) -> bool { + ctx.options().derive_ord && + ctx.lookup_can_derive_partialeq_or_partialord(*self) == + CanDerive::Yes && + !ctx.lookup_has_float(*self) + } +} + +/// A key used to index a resolved type, so we only process it once. +/// +/// This is almost always a USR string (an unique identifier generated by +/// clang), but it can also be the canonical declaration if the type is unnamed, +/// in which case clang may generate the same USR for multiple nested unnamed +/// types. +#[derive(Eq, PartialEq, Hash, Debug)] +enum TypeKey { + Usr(String), + Declaration(Cursor), +} + +/// A context used during parsing and generation of structs. +#[derive(Debug)] +pub(crate) struct BindgenContext { + /// The map of all the items parsed so far, keyed off `ItemId`. + items: Vec>, + + /// Clang USR to type map. This is needed to be able to associate types with + /// item ids during parsing. + types: HashMap, + + /// Maps from a cursor to the item ID of the named template type parameter + /// for that cursor. + type_params: HashMap, + + /// A cursor to module map. Similar reason than above. + modules: HashMap, + + /// The root module, this is guaranteed to be an item of kind Module. + root_module: ModuleId, + + /// Current module being traversed. + current_module: ModuleId, + + /// A `HashMap` keyed on a type definition, and whose value is the parent ID + /// of the declaration. + /// + /// This is used to handle the cases where the semantic and the lexical + /// parents of the cursor differ, like when a nested class is defined + /// outside of the parent class. + semantic_parents: HashMap, + + /// A stack with the current type declarations and types we're parsing. This + /// is needed to avoid infinite recursion when parsing a type like: + /// + /// struct c { struct c* next; }; + /// + /// This means effectively, that a type has a potential ID before knowing if + /// it's a correct type. But that's not important in practice. + /// + /// We could also use the `types` `HashMap`, but my intention with it is that + /// only valid types and declarations end up there, and this could + /// potentially break that assumption. + currently_parsed_types: Vec, + + /// A map with all the already parsed macro names. This is done to avoid + /// hard errors while parsing duplicated macros, as well to allow macro + /// expression parsing. + /// + /// This needs to be an `std::HashMap` because the `cexpr` API requires it. + parsed_macros: StdHashMap, cexpr::expr::EvalResult>, + + /// A map with all include locations. + /// + /// This is needed so that items are created in the order they are defined in. + /// + /// The key is the included file, the value is a pair of the source file and + /// the position of the `#include` directive in the source file. + includes: StdHashMap, + + /// A set of all the included filenames. + deps: BTreeSet>, + + /// The active replacements collected from replaces="xxx" annotations. + replacements: HashMap, ItemId>, + + collected_typerefs: bool, + + in_codegen: bool, + + /// The translation unit for parsing. + translation_unit: clang::TranslationUnit, + + /// The translation unit for macro fallback parsing. + fallback_tu: Option, + + /// Target information that can be useful for some stuff. + target_info: clang::TargetInfo, + + /// The options given by the user via cli or other medium. + options: BindgenOptions, + + /// Whether an opaque array was generated + generated_opaque_array: Cell, + + /// Whether a bindgen complex was generated + generated_bindgen_complex: Cell, + + /// Whether a bindgen float16 was generated + generated_bindgen_float16: Cell, + + /// The set of `ItemId`s that are allowlisted. This the very first thing + /// computed after parsing our IR, and before running any of our analyses. + allowlisted: Option, + + /// Cache for calls to `ParseCallbacks::blocklisted_type_implements_trait` + blocklisted_types_implement_traits: + RefCell>>, + + /// The set of `ItemId`s that are allowlisted for code generation _and_ that + /// we should generate accounting for the codegen options. + /// + /// It's computed right after computing the allowlisted items. + codegen_items: Option, + + /// Map from an item's ID to the set of template parameter items that it + /// uses. See `ir::named` for more details. Always `Some` during the codegen + /// phase. + used_template_parameters: Option>, + + /// The set of `TypeKind::Comp` items found during parsing that need their + /// bitfield allocation units computed. Drained in `compute_bitfield_units`. + need_bitfield_allocation: Vec, + + /// The set of enums that are defined by a pair of `enum` and `typedef`, + /// which is legal in C (but not C++). + /// + /// ```c++ + /// // in either order + /// enum Enum { Variants... }; + /// typedef int16_t Enum; + /// ``` + /// + /// The stored `ItemId` is that of the `TypeKind::Enum`, not of the + /// `TypeKind::Alias`. + /// + /// This is populated when we enter codegen by `compute_enum_typedef_combos` + /// and is always `None` before that and `Some` after. + enum_typedef_combos: Option>, + + /// The set of (`ItemId`s of) types that can't derive debug. + /// + /// This is populated when we enter codegen by `compute_cannot_derive_debug` + /// and is always `None` before that and `Some` after. + cannot_derive_debug: Option>, + + /// The set of (`ItemId`s of) types that can't derive default. + /// + /// This is populated when we enter codegen by `compute_cannot_derive_default` + /// and is always `None` before that and `Some` after. + cannot_derive_default: Option>, + + /// The set of (`ItemId`s of) types that can't derive copy. + /// + /// This is populated when we enter codegen by `compute_cannot_derive_copy` + /// and is always `None` before that and `Some` after. + cannot_derive_copy: Option>, + + /// The set of (`ItemId`s of) types that can't derive hash. + /// + /// This is populated when we enter codegen by `compute_can_derive_hash` + /// and is always `None` before that and `Some` after. + cannot_derive_hash: Option>, + + /// The map why specified `ItemId`s of) types that can't derive hash. + /// + /// This is populated when we enter codegen by + /// `compute_cannot_derive_partialord_partialeq_or_eq` and is always `None` + /// before that and `Some` after. + cannot_derive_partialeq_or_partialord: Option>, + + /// The sizedness of types. + /// + /// This is populated by `compute_sizedness` and is always `None` before + /// that function is invoked and `Some` afterwards. + sizedness: Option>, + + /// The set of (`ItemId's of`) types that has vtable. + /// + /// Populated when we enter codegen by `compute_has_vtable`; always `None` + /// before that and `Some` after. + have_vtable: Option>, + + /// The set of (`ItemId's of`) types that has destructor. + /// + /// Populated when we enter codegen by `compute_has_destructor`; always `None` + /// before that and `Some` after. + have_destructor: Option>, + + /// The set of (`ItemId's of`) types that has array. + /// + /// Populated when we enter codegen by `compute_has_type_param_in_array`; always `None` + /// before that and `Some` after. + has_type_param_in_array: Option>, + + /// The set of (`ItemId's of`) types that has float. + /// + /// Populated when we enter codegen by `compute_has_float`; always `None` + /// before that and `Some` after. + has_float: Option>, +} + +/// A traversal of allowlisted items. +struct AllowlistedItemsTraversal<'ctx> { + ctx: &'ctx BindgenContext, + traversal: ItemTraversal<'ctx, ItemSet, Vec>, +} + +impl Iterator for AllowlistedItemsTraversal<'_> { + type Item = ItemId; + + fn next(&mut self) -> Option { + loop { + let id = self.traversal.next()?; + + if self.ctx.resolve_item(id).is_blocklisted(self.ctx) { + continue; + } + + return Some(id); + } + } +} + +impl<'ctx> AllowlistedItemsTraversal<'ctx> { + /// Construct a new allowlisted items traversal. + pub(crate) fn new( + ctx: &'ctx BindgenContext, + roots: R, + predicate: for<'a> fn(&'a BindgenContext, Edge) -> bool, + ) -> Self + where + R: IntoIterator, + { + AllowlistedItemsTraversal { + ctx, + traversal: ItemTraversal::new(ctx, roots, predicate), + } + } +} + +impl BindgenContext { + /// Construct the context for the given `options`. + pub(crate) fn new( + options: BindgenOptions, + input_unsaved_files: &[clang::UnsavedFile], + ) -> Self { + // TODO(emilio): Use the CXTargetInfo here when available. + // + // see: https://reviews.llvm.org/D32389 + let index = clang::Index::new(false, true); + + let parse_options = + clang_sys::CXTranslationUnit_DetailedPreprocessingRecord; + + let translation_unit = { + let _t = + Timer::new("translation_unit").with_output(options.time_phases); + + clang::TranslationUnit::parse( + &index, + "", + &options.clang_args, + input_unsaved_files, + parse_options, + ).expect("libclang error; possible causes include: +- Invalid flag syntax +- Unrecognized flags +- Invalid flag arguments +- File I/O errors +- Host vs. target architecture mismatch +If you encounter an error missing from this list, please file an issue or a PR!") + }; + + let target_info = clang::TargetInfo::new(&translation_unit); + let root_module = Self::build_root_module(ItemId(0)); + let root_module_id = root_module.id().as_module_id_unchecked(); + + // depfiles need to include the explicitly listed headers too + let deps = options.input_headers.iter().cloned().collect(); + + BindgenContext { + items: vec![Some(root_module)], + includes: Default::default(), + deps, + types: Default::default(), + type_params: Default::default(), + modules: Default::default(), + root_module: root_module_id, + current_module: root_module_id, + semantic_parents: Default::default(), + currently_parsed_types: vec![], + parsed_macros: Default::default(), + replacements: Default::default(), + collected_typerefs: false, + in_codegen: false, + translation_unit, + fallback_tu: None, + target_info, + options, + generated_bindgen_complex: Cell::new(false), + generated_bindgen_float16: Cell::new(false), + generated_opaque_array: Cell::new(false), + allowlisted: None, + blocklisted_types_implement_traits: Default::default(), + codegen_items: None, + used_template_parameters: None, + need_bitfield_allocation: Default::default(), + enum_typedef_combos: None, + cannot_derive_debug: None, + cannot_derive_default: None, + cannot_derive_copy: None, + cannot_derive_hash: None, + cannot_derive_partialeq_or_partialord: None, + sizedness: None, + have_vtable: None, + have_destructor: None, + has_type_param_in_array: None, + has_float: None, + } + } + + /// Returns `true` if the target architecture is wasm32 + pub(crate) fn is_target_wasm32(&self) -> bool { + self.target_info.triple.starts_with("wasm32-") + } + + /// Creates a timer for the current bindgen phase. If `time_phases` is `true`, + /// the timer will print to stderr when it is dropped, otherwise it will do + /// nothing. + pub(crate) fn timer<'a>(&self, name: &'a str) -> Timer<'a> { + Timer::new(name).with_output(self.options.time_phases) + } + + /// Returns the pointer width to use for the target for the current + /// translation. + pub(crate) fn target_pointer_size(&self) -> usize { + self.target_info.pointer_width / 8 + } + + /// Returns the ABI, which is mostly useful for determining the mangling kind. + pub(crate) fn abi_kind(&self) -> ABIKind { + self.target_info.abi + } + + /// Get the stack of partially parsed types that we are in the middle of + /// parsing. + pub(crate) fn currently_parsed_types(&self) -> &[PartialType] { + &self.currently_parsed_types[..] + } + + /// Begin parsing the given partial type, and push it onto the + /// `currently_parsed_types` stack so that we won't infinite recurse if we + /// run into a reference to it while parsing it. + pub(crate) fn begin_parsing(&mut self, partial_ty: PartialType) { + self.currently_parsed_types.push(partial_ty); + } + + /// Finish parsing the current partial type, pop it off the + /// `currently_parsed_types` stack, and return it. + pub(crate) fn finish_parsing(&mut self) -> PartialType { + self.currently_parsed_types.pop().expect( + "should have been parsing a type, if we finished parsing a type", + ) + } + + /// Add the location of the `#include` directive for the `included_file`. + pub(crate) fn add_include( + &mut self, + source_file: String, + included_file: String, + offset: usize, + ) { + self.includes + .entry(included_file) + .or_insert((source_file, offset)); + } + + /// Get the location of the first `#include` directive for the `included_file`. + pub(crate) fn included_file_location( + &self, + included_file: &str, + ) -> Option<(String, usize)> { + self.includes.get(included_file).cloned() + } + + /// Add an included file. + pub(crate) fn add_dep(&mut self, dep: Box) { + self.deps.insert(dep); + } + + /// Get any included files. + pub(crate) fn deps(&self) -> &BTreeSet> { + &self.deps + } + + /// Define a new item. + /// + /// This inserts it into the internal items set, and its type into the + /// internal types set. + pub(crate) fn add_item( + &mut self, + item: Item, + declaration: Option, + location: Option, + ) { + debug!("BindgenContext::add_item({item:?}, declaration: {declaration:?}, loc: {location:?}"); + debug_assert!( + declaration.is_some() || + !item.kind().is_type() || + item.kind().expect_type().is_builtin_or_type_param() || + item.kind().expect_type().is_opaque(self, &item) || + item.kind().expect_type().is_unresolved_ref(), + "Adding a type without declaration?" + ); + + let id = item.id(); + let is_type = item.kind().is_type(); + let is_unnamed = is_type && item.expect_type().name().is_none(); + let is_template_instantiation = + is_type && item.expect_type().is_template_instantiation(); + + if item.id() != self.root_module { + self.add_item_to_module(&item); + } + + if is_type && item.expect_type().is_comp() { + self.need_bitfield_allocation.push(id); + } + + let old_item = mem::replace(&mut self.items[id.0], Some(item)); + assert!( + old_item.is_none(), + "should not have already associated an item with the given id" + ); + + // Unnamed items can have an USR, but they can't be referenced from + // other sites explicitly and the USR can match if the unnamed items are + // nested, so don't bother tracking them. + if !is_type || is_template_instantiation { + return; + } + if let Some(mut declaration) = declaration { + if !declaration.is_valid() { + if let Some(location) = location { + if location.is_template_like() { + declaration = location; + } + } + } + declaration = declaration.canonical(); + if !declaration.is_valid() { + // This could happen, for example, with types like `int*` or + // similar. + // + // Fortunately, we don't care about those types being + // duplicated, so we can just ignore them. + debug!( + "Invalid declaration {declaration:?} found for type {:?}", + self.resolve_item_fallible(id) + .unwrap() + .kind() + .expect_type() + ); + return; + } + + let key = if is_unnamed { + TypeKey::Declaration(declaration) + } else if let Some(usr) = declaration.usr() { + TypeKey::Usr(usr) + } else { + warn!("Valid declaration with no USR: {declaration:?}, {location:?}"); + TypeKey::Declaration(declaration) + }; + + let old = self.types.insert(key, id.as_type_id_unchecked()); + debug_assert_eq!(old, None); + } + } + + /// Ensure that every item (other than the root module) is in a module's + /// children list. This is to make sure that every allowlisted item get's + /// codegen'd, even if its parent is not allowlisted. See issue #769 for + /// details. + fn add_item_to_module(&mut self, item: &Item) { + assert_ne!(item.id(), self.root_module); + assert!(self.resolve_item_fallible(item.id()).is_none()); + + if let Some(ref mut parent) = self.items[item.parent_id().0] { + if let Some(module) = parent.as_module_mut() { + debug!( + "add_item_to_module: adding {:?} as child of parent module {:?}", + item.id(), + item.parent_id() + ); + + module.children_mut().insert(item.id()); + return; + } + } + + debug!( + "add_item_to_module: adding {:?} as child of current module {:?}", + item.id(), + self.current_module + ); + + self.items[self.current_module.0 .0] + .as_mut() + .expect("Should always have an item for self.current_module") + .as_module_mut() + .expect("self.current_module should always be a module") + .children_mut() + .insert(item.id()); + } + + /// Add a new named template type parameter to this context's item set. + pub(crate) fn add_type_param(&mut self, item: Item, definition: Cursor) { + debug!("BindgenContext::add_type_param: item = {item:?}; definition = {definition:?}"); + + assert!( + item.expect_type().is_type_param(), + "Should directly be a named type, not a resolved reference or anything" + ); + assert_eq!( + definition.kind(), + clang_sys::CXCursor_TemplateTypeParameter + ); + + self.add_item_to_module(&item); + + let id = item.id(); + let old_item = mem::replace(&mut self.items[id.0], Some(item)); + assert!( + old_item.is_none(), + "should not have already associated an item with the given id" + ); + + let old_named_ty = self + .type_params + .insert(definition, id.as_type_id_unchecked()); + assert!( + old_named_ty.is_none(), + "should not have already associated a named type with this id" + ); + } + + /// Get the named type defined at the given cursor location, if we've + /// already added one. + pub(crate) fn get_type_param(&self, definition: &Cursor) -> Option { + assert_eq!( + definition.kind(), + clang_sys::CXCursor_TemplateTypeParameter + ); + self.type_params.get(definition).copied() + } + + // TODO: Move all this syntax crap to other part of the code. + + /// Mangles a name so it doesn't conflict with any keyword. + #[rustfmt::skip] + pub(crate) fn rust_mangle<'a>(&self, name: &'a str) -> Cow<'a, str> { + if name.contains('@') || + name.contains('?') || + name.contains('$') || + matches!( + name, + "abstract" | "alignof" | "as" | "async" | "await" | "become" | + "box" | "break" | "const" | "continue" | "crate" | "do" | + "dyn" | "else" | "enum" | "extern" | "false" | "final" | + "fn" | "for" | "gen" | "if" | "impl" | "in" | "let" | "loop" | + "macro" | "match" | "mod" | "move" | "mut" | "offsetof" | + "override" | "priv" | "proc" | "pub" | "pure" | "ref" | + "return" | "Self" | "self" | "sizeof" | "static" | + "struct" | "super" | "trait" | "true" | "try" | "type" | "typeof" | + "unsafe" | "unsized" | "use" | "virtual" | "where" | + "while" | "yield" | "str" | "bool" | "f32" | "f64" | + "usize" | "isize" | "u128" | "i128" | "u64" | "i64" | + "u32" | "i32" | "u16" | "i16" | "u8" | "i8" | "_" + ) + { + let mut s = name.to_owned(); + s = s.replace('@', "_"); + s = s.replace('?', "_"); + s = s.replace('$', "_"); + s.push('_'); + return Cow::Owned(s); + } + Cow::Borrowed(name) + } + + /// Returns a mangled name as a rust identifier. + pub(crate) fn rust_ident(&self, name: S) -> Ident + where + S: AsRef, + { + self.rust_ident_raw(self.rust_mangle(name.as_ref())) + } + + /// Returns a mangled name as a rust identifier. + pub(crate) fn rust_ident_raw(&self, name: T) -> Ident + where + T: AsRef, + { + Ident::new(name.as_ref(), Span::call_site()) + } + + /// Iterate over all items that have been defined. + pub(crate) fn items(&self) -> impl Iterator { + self.items.iter().enumerate().filter_map(|(index, item)| { + let item = item.as_ref()?; + Some((ItemId(index), item)) + }) + } + + /// Have we collected all unresolved type references yet? + pub(crate) fn collected_typerefs(&self) -> bool { + self.collected_typerefs + } + + /// Gather all the unresolved type references. + fn collect_typerefs( + &mut self, + ) -> Vec<(ItemId, clang::Type, Cursor, Option)> { + debug_assert!(!self.collected_typerefs); + self.collected_typerefs = true; + let mut typerefs = vec![]; + + for (id, item) in self.items() { + let kind = item.kind(); + let Some(ty) = kind.as_type() else { continue }; + + if let TypeKind::UnresolvedTypeRef(ref ty, loc, parent_id) = + *ty.kind() + { + typerefs.push((id, *ty, loc, parent_id)); + } + } + typerefs + } + + /// Collect all of our unresolved type references and resolve them. + fn resolve_typerefs(&mut self) { + let _t = self.timer("resolve_typerefs"); + + let typerefs = self.collect_typerefs(); + + for (id, ty, loc, parent_id) in typerefs { + let _resolved = + { + let resolved = Item::from_ty(&ty, loc, parent_id, self) + .unwrap_or_else(|_| { + warn!("Could not resolve type reference, falling back \ + to opaque blob"); + Item::new_opaque_type(self.next_item_id(), &ty, self) + }); + + let item = self.items[id.0].as_mut().unwrap(); + *item.kind_mut().as_type_mut().unwrap().kind_mut() = + TypeKind::ResolvedTypeRef(resolved); + resolved + }; + + // Something in the STL is trolling me. I don't need this assertion + // right now, but worth investigating properly once this lands. + // + // debug_assert!(self.items.get(&resolved).is_some(), "How?"); + // + // if let Some(parent_id) = parent_id { + // assert_eq!(self.items[&resolved].parent_id(), parent_id); + // } + } + } + + /// Temporarily loan `Item` with the given `ItemId`. This provides means to + /// mutably borrow `Item` while having a reference to `BindgenContext`. + /// + /// `Item` with the given `ItemId` is removed from the context, given + /// closure is executed and then `Item` is placed back. + /// + /// # Panics + /// + /// Panics if attempt to resolve given `ItemId` inside the given + /// closure is made. + fn with_loaned_item(&mut self, id: ItemId, f: F) -> T + where + F: (FnOnce(&BindgenContext, &mut Item) -> T), + { + let mut item = self.items[id.0].take().unwrap(); + + let result = f(self, &mut item); + + let existing = mem::replace(&mut self.items[id.0], Some(item)); + assert!(existing.is_none()); + + result + } + + /// Compute the bitfield allocation units for all `TypeKind::Comp` items we + /// parsed. + fn compute_bitfield_units(&mut self) { + let _t = self.timer("compute_bitfield_units"); + + assert!(self.collected_typerefs()); + + let need_bitfield_allocation = + mem::take(&mut self.need_bitfield_allocation); + for id in need_bitfield_allocation { + self.with_loaned_item(id, |ctx, item| { + let ty = item.kind_mut().as_type_mut().unwrap(); + let layout = ty.layout(ctx); + ty.as_comp_mut() + .unwrap() + .compute_bitfield_units(ctx, layout.as_ref()); + }); + } + } + + /// Assign a new generated name for each anonymous field. + fn deanonymize_fields(&mut self) { + let _t = self.timer("deanonymize_fields"); + + let comp_item_ids: Vec = self + .items() + .filter_map(|(id, item)| { + if item.kind().as_type()?.is_comp() { + return Some(id); + } + None + }) + .collect(); + + for id in comp_item_ids { + self.with_loaned_item(id, |ctx, item| { + item.kind_mut() + .as_type_mut() + .unwrap() + .as_comp_mut() + .unwrap() + .deanonymize_fields(ctx); + }); + } + } + + /// Iterate over all items and replace any item that has been named in a + /// `replaces="SomeType"` annotation with the replacement type. + fn process_replacements(&mut self) { + let _t = self.timer("process_replacements"); + if self.replacements.is_empty() { + debug!("No replacements to process"); + return; + } + + // FIXME: This is linear, but the replaces="xxx" annotation was already + // there, and for better or worse it's useful, sigh... + // + // We leverage the ResolvedTypeRef thing, though, which is cool :P. + + let mut replacements = vec![]; + + for (id, item) in self.items() { + if item.annotations().use_instead_of().is_some() { + continue; + } + + // Calls to `canonical_name` are expensive, so eagerly filter out + // items that cannot be replaced. + let Some(ty) = item.kind().as_type() else { + continue; + }; + + match *ty.kind() { + TypeKind::Comp(..) | + TypeKind::TemplateAlias(..) | + TypeKind::Enum(..) | + TypeKind::Alias(..) => {} + _ => continue, + } + + let path = item.path_for_allowlisting(self); + let replacement = self.replacements.get(&path[1..]); + + if let Some(replacement) = replacement { + if *replacement != id { + // We set this just after parsing the annotation. It's + // very unlikely, but this can happen. + if self.resolve_item_fallible(*replacement).is_some() { + replacements.push(( + id.expect_type_id(self), + replacement.expect_type_id(self), + )); + } + } + } + } + + for (id, replacement_id) in replacements { + debug!("Replacing {id:?} with {replacement_id:?}"); + let new_parent = { + let item_id: ItemId = id.into(); + let item = self.items[item_id.0].as_mut().unwrap(); + *item.kind_mut().as_type_mut().unwrap().kind_mut() = + TypeKind::ResolvedTypeRef(replacement_id); + item.parent_id() + }; + + // Relocate the replacement item from where it was declared, to + // where the thing it is replacing was declared. + // + // First, we'll make sure that its parent ID is correct. + + let old_parent = self.resolve_item(replacement_id).parent_id(); + if new_parent == old_parent { + // Same parent and therefore also same containing + // module. Nothing to do here. + continue; + } + + let replacement_item_id: ItemId = replacement_id.into(); + self.items[replacement_item_id.0] + .as_mut() + .unwrap() + .set_parent_for_replacement(new_parent); + + // Second, make sure that it is in the correct module's children + // set. + + let old_module = { + let immut_self = &*self; + old_parent + .ancestors(immut_self) + .chain(Some(immut_self.root_module.into())) + .find(|id| { + let item = immut_self.resolve_item(*id); + item.as_module().is_some_and(|m| { + m.children().contains(&replacement_id.into()) + }) + }) + }; + let old_module = old_module + .expect("Every replacement item should be in a module"); + + let new_module = { + let immut_self = &*self; + new_parent + .ancestors(immut_self) + .find(|id| immut_self.resolve_item(*id).is_module()) + }; + let new_module = + new_module.unwrap_or_else(|| self.root_module.into()); + + if new_module == old_module { + // Already in the correct module. + continue; + } + + self.items[old_module.0] + .as_mut() + .unwrap() + .as_module_mut() + .unwrap() + .children_mut() + .remove(&replacement_id.into()); + + self.items[new_module.0] + .as_mut() + .unwrap() + .as_module_mut() + .unwrap() + .children_mut() + .insert(replacement_id.into()); + } + } + + /// Enter the code generation phase, invoke the given callback `cb`, and + /// leave the code generation phase. + pub(crate) fn gen( + mut self, + cb: F, + ) -> Result<(Out, BindgenOptions), CodegenError> + where + F: FnOnce(&Self) -> Result, + { + self.in_codegen = true; + + self.resolve_typerefs(); + self.compute_bitfield_units(); + self.process_replacements(); + + self.deanonymize_fields(); + + self.assert_no_dangling_references(); + + // Compute the allowlisted set after processing replacements and + // resolving type refs, as those are the final mutations of the IR + // graph, and their completion means that the IR graph is now frozen. + self.compute_allowlisted_and_codegen_items(); + + // Make sure to do this after processing replacements, since that messes + // with the parentage and module children, and we want to assert that it + // messes with them correctly. + self.assert_every_item_in_a_module(); + + self.compute_has_vtable(); + self.compute_sizedness(); + self.compute_has_destructor(); + self.find_used_template_parameters(); + self.compute_enum_typedef_combos(); + self.compute_cannot_derive_debug(); + self.compute_cannot_derive_default(); + self.compute_cannot_derive_copy(); + self.compute_has_type_param_in_array(); + self.compute_has_float(); + self.compute_cannot_derive_hash(); + self.compute_cannot_derive_partialord_partialeq_or_eq(); + + let ret = cb(&self)?; + Ok((ret, self.options)) + } + + /// When the `__testing_only_extra_assertions` feature is enabled, this + /// function walks the IR graph and asserts that we do not have any edges + /// referencing an `ItemId` for which we do not have an associated IR item. + fn assert_no_dangling_references(&self) { + if cfg!(feature = "__testing_only_extra_assertions") { + for _ in self.assert_no_dangling_item_traversal() { + // The iterator's next method does the asserting for us. + } + } + } + + fn assert_no_dangling_item_traversal( + &self, + ) -> traversal::AssertNoDanglingItemsTraversal<'_> { + assert!(self.in_codegen_phase()); + assert_eq!(self.current_module, self.root_module); + + let roots = self.items().map(|(id, _)| id); + traversal::AssertNoDanglingItemsTraversal::new( + self, + roots, + traversal::all_edges, + ) + } + + /// When the `__testing_only_extra_assertions` feature is enabled, walk over + /// every item and ensure that it is in the children set of one of its + /// module ancestors. + fn assert_every_item_in_a_module(&self) { + if cfg!(feature = "__testing_only_extra_assertions") { + assert!(self.in_codegen_phase()); + assert_eq!(self.current_module, self.root_module); + + for (id, _item) in self.items() { + if id == self.root_module { + continue; + } + + assert!( + { + let id = id + .into_resolver() + .through_type_refs() + .through_type_aliases() + .resolve(self) + .id(); + id.ancestors(self) + .chain(Some(self.root_module.into())) + .any(|ancestor| { + debug!("Checking if {id:?} is a child of {ancestor:?}"); + self.resolve_item(ancestor) + .as_module() + .is_some_and(|m| m.children().contains(&id)) + }) + }, + "{id:?} should be in some ancestor module's children set" + ); + } + } + } + + /// Compute for every type whether it is sized or not, and whether it is + /// sized or not as a base class. + fn compute_sizedness(&mut self) { + let _t = self.timer("compute_sizedness"); + assert!(self.sizedness.is_none()); + self.sizedness = Some(analyze::(self)); + } + + /// Look up whether the type with the given ID is sized or not. + pub(crate) fn lookup_sizedness(&self, id: TypeId) -> SizednessResult { + assert!( + self.in_codegen_phase(), + "We only compute sizedness after we've entered codegen" + ); + + self.sizedness + .as_ref() + .unwrap() + .get(&id) + .copied() + .unwrap_or(SizednessResult::ZeroSized) + } + + /// Compute whether the type has vtable. + fn compute_has_vtable(&mut self) { + let _t = self.timer("compute_has_vtable"); + assert!(self.have_vtable.is_none()); + self.have_vtable = Some(analyze::(self)); + } + + /// Look up whether the item with `id` has vtable or not. + pub(crate) fn lookup_has_vtable(&self, id: TypeId) -> HasVtableResult { + assert!( + self.in_codegen_phase(), + "We only compute vtables when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` has a + // vtable or not. + self.have_vtable + .as_ref() + .unwrap() + .get(&id.into()) + .copied() + .unwrap_or(HasVtableResult::No) + } + + /// Compute whether the type has a destructor. + fn compute_has_destructor(&mut self) { + let _t = self.timer("compute_has_destructor"); + assert!(self.have_destructor.is_none()); + self.have_destructor = Some(analyze::(self)); + } + + /// Look up whether the item with `id` has a destructor. + pub(crate) fn lookup_has_destructor(&self, id: TypeId) -> bool { + assert!( + self.in_codegen_phase(), + "We only compute destructors when we enter codegen" + ); + + self.have_destructor.as_ref().unwrap().contains(&id.into()) + } + + fn find_used_template_parameters(&mut self) { + let _t = self.timer("find_used_template_parameters"); + if self.options.allowlist_recursively { + let used_params = analyze::(self); + self.used_template_parameters = Some(used_params); + } else { + // If you aren't recursively allowlisting, then we can't really make + // any sense of template parameter usage, and you're on your own. + let mut used_params = HashMap::default(); + for &id in self.allowlisted_items() { + used_params.entry(id).or_insert_with(|| { + id.self_template_params(self) + .into_iter() + .map(|p| p.into()) + .collect() + }); + } + self.used_template_parameters = Some(used_params); + } + } + + /// Return `true` if `item` uses the given `template_param`, `false` + /// otherwise. + /// + /// This method may only be called during the codegen phase, because the + /// template usage information is only computed as we enter the codegen + /// phase. + /// + /// If the item is blocklisted, then we say that it always uses the template + /// parameter. This is a little subtle. The template parameter usage + /// analysis only considers allowlisted items, and if any blocklisted item + /// shows up in the generated bindings, it is the user's responsibility to + /// manually provide a definition for them. To give them the most + /// flexibility when doing that, we assume that they use every template + /// parameter and always pass template arguments through in instantiations. + pub(crate) fn uses_template_parameter( + &self, + item: ItemId, + template_param: TypeId, + ) -> bool { + assert!( + self.in_codegen_phase(), + "We only compute template parameter usage as we enter codegen" + ); + + if self.resolve_item(item).is_blocklisted(self) { + return true; + } + + let template_param = template_param + .into_resolver() + .through_type_refs() + .through_type_aliases() + .resolve(self) + .id(); + + self.used_template_parameters + .as_ref() + .expect("should have found template parameter usage if we're in codegen") + .get(&item).is_some_and(|items_used_params| items_used_params.contains(&template_param)) + } + + /// Return `true` if `item` uses any unbound, generic template parameters, + /// `false` otherwise. + /// + /// Has the same restrictions that `uses_template_parameter` has. + pub(crate) fn uses_any_template_parameters(&self, item: ItemId) -> bool { + assert!( + self.in_codegen_phase(), + "We only compute template parameter usage as we enter codegen" + ); + + self.used_template_parameters + .as_ref() + .expect( + "should have template parameter usage info in codegen phase", + ) + .get(&item) + .is_some_and(|used| !used.is_empty()) + } + + // This deserves a comment. Builtin types don't get a valid declaration, so + // we can't add it to the cursor->type map. + // + // That being said, they're not generated anyway, and are few, so the + // duplication and special-casing is fine. + // + // If at some point we care about the memory here, probably a map TypeKind + // -> builtin type ItemId would be the best to improve that. + fn add_builtin_item(&mut self, item: Item) { + debug!("add_builtin_item: item = {item:?}"); + debug_assert!(item.kind().is_type()); + self.add_item_to_module(&item); + let id = item.id(); + let old_item = mem::replace(&mut self.items[id.0], Some(item)); + assert!(old_item.is_none(), "Inserted type twice?"); + } + + fn build_root_module(id: ItemId) -> Item { + let module = Module::new(Some("root".into()), ModuleKind::Normal); + Item::new(id, None, None, id, ItemKind::Module(module), None) + } + + /// Get the root module. + pub(crate) fn root_module(&self) -> ModuleId { + self.root_module + } + + /// Resolve a type with the given ID. + /// + /// Panics if there is no item for the given `TypeId` or if the resolved + /// item is not a `Type`. + pub(crate) fn resolve_type(&self, type_id: TypeId) -> &Type { + self.resolve_item(type_id).kind().expect_type() + } + + /// Resolve a function with the given ID. + /// + /// Panics if there is no item for the given `FunctionId` or if the resolved + /// item is not a `Function`. + pub(crate) fn resolve_func(&self, func_id: FunctionId) -> &Function { + self.resolve_item(func_id).kind().expect_function() + } + + /// Resolve the given `ItemId` as a type, or `None` if there is no item with + /// the given ID. + /// + /// Panics if the ID resolves to an item that is not a type. + pub(crate) fn safe_resolve_type(&self, type_id: TypeId) -> Option<&Type> { + self.resolve_item_fallible(type_id) + .map(|t| t.kind().expect_type()) + } + + /// Resolve the given `ItemId` into an `Item`, or `None` if no such item + /// exists. + pub(crate) fn resolve_item_fallible>( + &self, + id: Id, + ) -> Option<&Item> { + self.items.get(id.into().0)?.as_ref() + } + + /// Resolve the given `ItemId` into an `Item`. + /// + /// Panics if the given ID does not resolve to any item. + pub(crate) fn resolve_item>(&self, item_id: Id) -> &Item { + let item_id = item_id.into(); + match self.resolve_item_fallible(item_id) { + Some(item) => item, + None => panic!("Not an item: {item_id:?}"), + } + } + + /// Get the current module. + pub(crate) fn current_module(&self) -> ModuleId { + self.current_module + } + + /// Add a semantic parent for a given type definition. + /// + /// We do this from the type declaration, in order to be able to find the + /// correct type definition afterwards. + /// + /// TODO(emilio): We could consider doing this only when + /// `declaration.lexical_parent() != definition.lexical_parent()`, but it's + /// not sure it's worth it. + pub(crate) fn add_semantic_parent( + &mut self, + definition: Cursor, + parent_id: ItemId, + ) { + self.semantic_parents.insert(definition, parent_id); + } + + /// Returns a known semantic parent for a given definition. + pub(crate) fn known_semantic_parent( + &self, + definition: Cursor, + ) -> Option { + self.semantic_parents.get(&definition).copied() + } + + /// Given a cursor pointing to the location of a template instantiation, + /// return a tuple of the form `(declaration_cursor, declaration_id, + /// num_expected_template_args)`. + /// + /// Note that `declaration_id` is not guaranteed to be in the context's item + /// set! It is possible that it is a partial type that we are still in the + /// middle of parsing. + fn get_declaration_info_for_template_instantiation( + &self, + instantiation: &Cursor, + ) -> Option<(Cursor, ItemId, usize)> { + instantiation + .cur_type() + .canonical_declaration(Some(instantiation)) + .and_then(|canon_decl| { + self.get_resolved_type(&canon_decl).and_then( + |template_decl_id| { + let num_params = + template_decl_id.num_self_template_params(self); + if num_params == 0 { + None + } else { + Some(( + *canon_decl.cursor(), + template_decl_id.into(), + num_params, + )) + } + }, + ) + }) + .or_else(|| { + // If we haven't already parsed the declaration of + // the template being instantiated, then it *must* + // be on the stack of types we are currently + // parsing. If it wasn't then clang would have + // already errored out before we started + // constructing our IR because you can't instantiate + // a template until it is fully defined. + instantiation + .referenced() + .and_then(|referenced| { + self.currently_parsed_types() + .iter() + .find(|partial_ty| *partial_ty.decl() == referenced) + }) + .and_then(|template_decl| { + let num_template_params = + template_decl.num_self_template_params(self); + if num_template_params == 0 { + None + } else { + Some(( + *template_decl.decl(), + template_decl.id(), + num_template_params, + )) + } + }) + }) + } + + /// Parse a template instantiation, eg `Foo`. + /// + /// This is surprisingly difficult to do with libclang, due to the fact that + /// it doesn't provide explicit template argument information, except for + /// function template declarations(!?!??!). + /// + /// The only way to do this is manually inspecting the AST and looking for + /// `TypeRefs` and `TemplateRefs` inside. This, unfortunately, doesn't work for + /// more complex cases, see the comment on the assertion below. + /// + /// To add insult to injury, the AST itself has structure that doesn't make + /// sense. Sometimes `Foo>` has an AST with nesting like you might + /// expect: `(Foo (Bar (int)))`. Other times, the AST we get is completely + /// flat: `(Foo Bar int)`. + /// + /// To see an example of what this method handles: + /// + /// ```c++ + /// template + /// class Incomplete { + /// T p; + /// }; + /// + /// template + /// class Foo { + /// Incomplete bar; + /// }; + /// ``` + /// + /// Finally, template instantiations are always children of the current + /// module. They use their template's definition for their name, so the + /// parent is only useful for ensuring that their layout tests get + /// codegen'd. + fn instantiate_template( + &mut self, + with_id: ItemId, + template: TypeId, + ty: &clang::Type, + location: Cursor, + ) -> Option { + let num_expected_args = + self.resolve_type(template).num_self_template_params(self); + if num_expected_args == 0 { + warn!( + "Tried to instantiate a template for which we could not \ + determine any template parameters" + ); + return None; + } + + let mut args = vec![]; + let mut found_const_arg = false; + let mut children = location.collect_children(); + + if children.iter().all(|c| !c.has_children()) { + // This is insanity... If clang isn't giving us a properly nested + // AST for which template arguments belong to which template we are + // instantiating, we'll need to construct it ourselves. However, + // there is an extra `NamespaceRef, NamespaceRef, ..., TemplateRef` + // representing a reference to the outermost template declaration + // that we need to filter out of the children. We need to do this + // filtering because we already know which template declaration is + // being specialized via the `location`'s type, and if we do not + // filter it out, we'll add an extra layer of template instantiation + // on accident. + let idx = children + .iter() + .position(|c| c.kind() == clang_sys::CXCursor_TemplateRef); + if let Some(idx) = idx { + if children + .iter() + .take(idx) + .all(|c| c.kind() == clang_sys::CXCursor_NamespaceRef) + { + children = children.into_iter().skip(idx + 1).collect(); + } + } + } + + for child in children.iter().rev() { + match child.kind() { + clang_sys::CXCursor_TypeRef | + clang_sys::CXCursor_TypedefDecl | + clang_sys::CXCursor_TypeAliasDecl => { + // The `with_id` ID will potentially end up unused if we give up + // on this type (for example, because it has const value + // template args), so if we pass `with_id` as the parent, it is + // potentially a dangling reference. Instead, use the canonical + // template declaration as the parent. It is already parsed and + // has a known-resolvable `ItemId`. + let ty = Item::from_ty_or_ref( + child.cur_type(), + *child, + Some(template.into()), + self, + ); + args.push(ty); + } + clang_sys::CXCursor_TemplateRef => { + let ( + template_decl_cursor, + template_decl_id, + num_expected_template_args, + ) = self.get_declaration_info_for_template_instantiation( + child, + )?; + + if num_expected_template_args == 0 || + child.has_at_least_num_children( + num_expected_template_args, + ) + { + // Do a happy little parse. See comment in the TypeRef + // match arm about parent IDs. + let ty = Item::from_ty_or_ref( + child.cur_type(), + *child, + Some(template.into()), + self, + ); + args.push(ty); + } else { + // This is the case mentioned in the doc comment where + // clang gives us a flattened AST and we have to + // reconstruct which template arguments go to which + // instantiation :( + let args_len = args.len(); + if args_len < num_expected_template_args { + warn!( + "Found a template instantiation without \ + enough template arguments" + ); + return None; + } + + let mut sub_args: Vec<_> = args + .drain(args_len - num_expected_template_args..) + .collect(); + sub_args.reverse(); + + let sub_name = Some(template_decl_cursor.spelling()); + let sub_inst = TemplateInstantiation::new( + // This isn't guaranteed to be a type that we've + // already finished parsing yet. + template_decl_id.as_type_id_unchecked(), + sub_args, + ); + let sub_kind = + TypeKind::TemplateInstantiation(sub_inst); + let sub_ty = Type::new( + sub_name, + template_decl_cursor + .cur_type() + .fallible_layout(self) + .ok(), + sub_kind, + false, + ); + let sub_id = self.next_item_id(); + let sub_item = Item::new( + sub_id, + None, + None, + self.current_module.into(), + ItemKind::Type(sub_ty), + Some(child.location()), + ); + + // Bypass all the validations in add_item explicitly. + debug!( + "instantiate_template: inserting nested \ + instantiation item: {:?}", + sub_item + ); + self.add_item_to_module(&sub_item); + debug_assert_eq!(sub_id, sub_item.id()); + self.items[sub_id.0] = Some(sub_item); + args.push(sub_id.as_type_id_unchecked()); + } + } + _ => { + warn!( + "Found template arg cursor we can't handle: {child:?}" + ); + found_const_arg = true; + } + } + } + + if found_const_arg { + // This is a dependently typed template instantiation. That is, an + // instantiation of a template with one or more const values as + // template arguments, rather than only types as template + // arguments. For example, `Foo` versus `Bar`. + // We can't handle these instantiations, so just punt in this + // situation... + warn!( + "Found template instantiated with a const value; \ + bindgen can't handle this kind of template instantiation!" + ); + return None; + } + + if args.len() != num_expected_args { + warn!( + "Found a template with an unexpected number of template \ + arguments" + ); + return None; + } + + args.reverse(); + let type_kind = TypeKind::TemplateInstantiation( + TemplateInstantiation::new(template, args), + ); + let name = ty.spelling(); + let name = if name.is_empty() { None } else { Some(name) }; + let ty = Type::new( + name, + ty.fallible_layout(self).ok(), + type_kind, + ty.is_const(), + ); + let item = Item::new( + with_id, + None, + None, + self.current_module.into(), + ItemKind::Type(ty), + Some(location.location()), + ); + + // Bypass all the validations in add_item explicitly. + debug!("instantiate_template: inserting item: {item:?}"); + self.add_item_to_module(&item); + debug_assert_eq!(with_id, item.id()); + self.items[with_id.0] = Some(item); + Some(with_id.as_type_id_unchecked()) + } + + /// If we have already resolved the type for the given type declaration, + /// return its `ItemId`. Otherwise, return `None`. + pub(crate) fn get_resolved_type( + &self, + decl: &clang::CanonicalTypeDeclaration, + ) -> Option { + self.types + .get(&TypeKey::Declaration(*decl.cursor())) + .or_else(|| { + decl.cursor() + .usr() + .and_then(|usr| self.types.get(&TypeKey::Usr(usr))) + }) + .copied() + } + + /// Looks up for an already resolved type, either because it's builtin, or + /// because we already have it in the map. + pub(crate) fn builtin_or_resolved_ty( + &mut self, + with_id: ItemId, + parent_id: Option, + ty: &clang::Type, + location: Option, + ) -> Option { + use clang_sys::{CXCursor_TypeAliasTemplateDecl, CXCursor_TypeRef}; + debug!("builtin_or_resolved_ty: {ty:?}, {location:?}, {with_id:?}, {parent_id:?}"); + + if let Some(decl) = ty.canonical_declaration(location.as_ref()) { + if let Some(id) = self.get_resolved_type(&decl) { + debug!( + "Already resolved ty {id:?}, {decl:?}, {ty:?} {location:?}" + ); + // If the declaration already exists, then either: + // + // * the declaration is a template declaration of some sort, + // and we are looking at an instantiation or specialization + // of it, or + // * we have already parsed and resolved this type, and + // there's nothing left to do. + if let Some(location) = location { + if decl.cursor().is_template_like() && + *ty != decl.cursor().cur_type() + { + // For specialized type aliases, there's no way to get the + // template parameters as of this writing (for a struct + // specialization we wouldn't be in this branch anyway). + // + // Explicitly return `None` if there aren't any + // unspecialized parameters (contains any `TypeRef`) so we + // resolve the canonical type if there is one and it's + // exposed. + // + // This is _tricky_, I know :( + if decl.cursor().kind() == + CXCursor_TypeAliasTemplateDecl && + !location.contains_cursor(CXCursor_TypeRef) && + ty.canonical_type().is_valid_and_exposed() + { + return None; + } + + return self + .instantiate_template(with_id, id, ty, location) + .or(Some(id)); + } + } + + return Some(self.build_ty_wrapper(with_id, id, parent_id, ty)); + } + } + + debug!("Not resolved, maybe builtin?"); + self.build_builtin_ty(ty) + } + + /// Make a new item that is a resolved type reference to the `wrapped_id`. + /// + /// This is unfortunately a lot of bloat, but is needed to properly track + /// constness et al. + /// + /// We should probably make the constness tracking separate, so it doesn't + /// bloat that much, but hey, we already bloat the heck out of builtin + /// types. + pub(crate) fn build_ty_wrapper( + &mut self, + with_id: ItemId, + wrapped_id: TypeId, + parent_id: Option, + ty: &clang::Type, + ) -> TypeId { + self.build_wrapper(with_id, wrapped_id, parent_id, ty, ty.is_const()) + } + + /// A wrapper over a type that adds a const qualifier explicitly. + /// + /// Needed to handle const methods in C++, wrapping the type . + pub(crate) fn build_const_wrapper( + &mut self, + with_id: ItemId, + wrapped_id: TypeId, + parent_id: Option, + ty: &clang::Type, + ) -> TypeId { + self.build_wrapper( + with_id, wrapped_id, parent_id, ty, /* is_const = */ true, + ) + } + + fn build_wrapper( + &mut self, + with_id: ItemId, + wrapped_id: TypeId, + parent_id: Option, + ty: &clang::Type, + is_const: bool, + ) -> TypeId { + let spelling = ty.spelling(); + let layout = ty.fallible_layout(self).ok(); + let location = ty.declaration().location(); + let type_kind = TypeKind::ResolvedTypeRef(wrapped_id); + let ty = Type::new(Some(spelling), layout, type_kind, is_const); + let item = Item::new( + with_id, + None, + None, + parent_id.unwrap_or_else(|| self.current_module.into()), + ItemKind::Type(ty), + Some(location), + ); + self.add_builtin_item(item); + with_id.as_type_id_unchecked() + } + + /// Returns the next item ID to be used for an item. + pub(crate) fn next_item_id(&mut self) -> ItemId { + let ret = ItemId(self.items.len()); + self.items.push(None); + ret + } + + fn build_builtin_ty(&mut self, ty: &clang::Type) -> Option { + use clang_sys::*; + let type_kind = match ty.kind() { + CXType_NullPtr => TypeKind::NullPtr, + CXType_Void => TypeKind::Void, + CXType_Bool => TypeKind::Int(IntKind::Bool), + CXType_Int => TypeKind::Int(IntKind::Int), + CXType_UInt => TypeKind::Int(IntKind::UInt), + CXType_Char_S => TypeKind::Int(IntKind::Char { is_signed: true }), + CXType_Char_U => TypeKind::Int(IntKind::Char { is_signed: false }), + CXType_SChar => TypeKind::Int(IntKind::SChar), + CXType_UChar => TypeKind::Int(IntKind::UChar), + CXType_Short => TypeKind::Int(IntKind::Short), + CXType_UShort => TypeKind::Int(IntKind::UShort), + CXType_WChar => TypeKind::Int(IntKind::WChar), + CXType_Char16 if self.options().use_distinct_char16_t => { + TypeKind::Int(IntKind::Char16) + } + CXType_Char16 => TypeKind::Int(IntKind::U16), + CXType_Char32 => TypeKind::Int(IntKind::U32), + CXType_Long => TypeKind::Int(IntKind::Long), + CXType_ULong => TypeKind::Int(IntKind::ULong), + CXType_LongLong => TypeKind::Int(IntKind::LongLong), + CXType_ULongLong => TypeKind::Int(IntKind::ULongLong), + CXType_Int128 => TypeKind::Int(IntKind::I128), + CXType_UInt128 => TypeKind::Int(IntKind::U128), + CXType_Float16 | CXType_Half => TypeKind::Float(FloatKind::Float16), + CXType_Float => TypeKind::Float(FloatKind::Float), + CXType_Double => TypeKind::Float(FloatKind::Double), + CXType_LongDouble => TypeKind::Float(FloatKind::LongDouble), + CXType_Float128 => TypeKind::Float(FloatKind::Float128), + CXType_Complex => { + let float_type = + ty.elem_type().expect("Not able to resolve complex type?"); + let float_kind = match float_type.kind() { + CXType_Float16 | CXType_Half => FloatKind::Float16, + CXType_Float => FloatKind::Float, + CXType_Double => FloatKind::Double, + CXType_LongDouble => FloatKind::LongDouble, + CXType_Float128 => FloatKind::Float128, + _ => panic!( + "Non floating-type complex? {ty:?}, {float_type:?}", + ), + }; + TypeKind::Complex(float_kind) + } + _ => return None, + }; + + let spelling = ty.spelling(); + let is_const = ty.is_const(); + let layout = ty.fallible_layout(self).ok(); + let location = ty.declaration().location(); + let ty = Type::new(Some(spelling), layout, type_kind, is_const); + let id = self.next_item_id(); + let item = Item::new( + id, + None, + None, + self.root_module.into(), + ItemKind::Type(ty), + Some(location), + ); + self.add_builtin_item(item); + Some(id.as_type_id_unchecked()) + } + + /// Get the current Clang translation unit that is being processed. + pub(crate) fn translation_unit(&self) -> &clang::TranslationUnit { + &self.translation_unit + } + + /// Initialize fallback translation unit if it does not exist and + /// then return a mutable reference to the fallback translation unit. + pub(crate) fn try_ensure_fallback_translation_unit( + &mut self, + ) -> Option<&mut clang::FallbackTranslationUnit> { + if self.fallback_tu.is_none() { + let file = format!( + "{}/.macro_eval.c", + match self.options().clang_macro_fallback_build_dir { + Some(ref path) => path.as_os_str().to_str()?, + None => ".", + } + ); + + let index = clang::Index::new(false, false); + + let mut header_names_to_compile = Vec::new(); + let mut header_paths = Vec::new(); + let mut header_includes = Vec::new(); + let single_header = self.options().input_headers.last().cloned()?; + for input_header in &self.options.input_headers + [..self.options.input_headers.len() - 1] + { + let path = Path::new(input_header.as_ref()); + if let Some(header_path) = path.parent() { + if header_path == Path::new("") { + header_paths.push("."); + } else { + header_paths.push(header_path.as_os_str().to_str()?); + } + } else { + header_paths.push("."); + } + let header_name = path.file_name()?.to_str()?; + header_includes.push(header_name.to_string()); + header_names_to_compile + .push(header_name.split(".h").next()?.to_string()); + } + let pch = format!( + "{}/{}", + match self.options().clang_macro_fallback_build_dir { + Some(ref path) => path.as_os_str().to_str()?, + None => ".", + }, + header_names_to_compile.join("-") + "-precompile.h.pch" + ); + + let mut c_args = self.options.fallback_clang_args.clone(); + c_args.push("-x".to_string().into_boxed_str()); + c_args.push("c-header".to_string().into_boxed_str()); + for header_path in header_paths { + c_args.push(format!("-I{header_path}").into_boxed_str()); + } + for header_include in header_includes { + c_args.push("-include".to_string().into_boxed_str()); + c_args.push(header_include.into_boxed_str()); + } + let mut tu = clang::TranslationUnit::parse( + &index, + &single_header, + &c_args, + &[], + clang_sys::CXTranslationUnit_ForSerialization, + )?; + tu.save(&pch).ok()?; + + let mut c_args = vec![ + "-include-pch".to_string().into_boxed_str(), + pch.clone().into_boxed_str(), + ]; + let mut skip_next = false; + for arg in &self.options.fallback_clang_args { + if arg.as_ref() == "-include" { + skip_next = true; + } else if skip_next { + skip_next = false; + } else { + c_args.push(arg.clone()); + } + } + self.fallback_tu = + Some(clang::FallbackTranslationUnit::new(file, pch, &c_args)?); + } + + self.fallback_tu.as_mut() + } + + /// Have we parsed the macro named `macro_name` already? + pub(crate) fn parsed_macro(&self, macro_name: &[u8]) -> bool { + self.parsed_macros.contains_key(macro_name) + } + + /// Get the currently parsed macros. + pub(crate) fn parsed_macros( + &self, + ) -> &StdHashMap, cexpr::expr::EvalResult> { + debug_assert!(!self.in_codegen_phase()); + &self.parsed_macros + } + + /// Mark the macro named `macro_name` as parsed. + pub(crate) fn note_parsed_macro( + &mut self, + id: Vec, + value: cexpr::expr::EvalResult, + ) { + self.parsed_macros.insert(id, value); + } + + /// Are we in the codegen phase? + pub(crate) fn in_codegen_phase(&self) -> bool { + self.in_codegen + } + + /// Mark the type with the given `name` as replaced by the type with ID + /// `potential_ty`. + /// + /// Replacement types are declared using the `replaces="xxx"` annotation, + /// and implies that the original type is hidden. + pub(crate) fn replace(&mut self, name: &[String], potential_ty: ItemId) { + match self.replacements.entry(name.into()) { + Entry::Vacant(entry) => { + debug!("Defining replacement for {name:?} as {potential_ty:?}"); + entry.insert(potential_ty); + } + Entry::Occupied(occupied) => { + warn!( + "Replacement for {name:?} already defined as {:?}; \ + ignoring duplicate replacement definition as {potential_ty:?}", + occupied.get(), + ); + } + } + } + + /// Has the item with the given `name` and `id` been replaced by another + /// type? + pub(crate) fn is_replaced_type>( + &self, + path: &[String], + id: Id, + ) -> bool { + let id = id.into(); + matches!(self.replacements.get(path), Some(replaced_by) if *replaced_by != id) + } + + /// Is the type with the given `name` marked as opaque? + pub(crate) fn opaque_by_name(&self, path: &[String]) -> bool { + debug_assert!( + self.in_codegen_phase(), + "You're not supposed to call this yet" + ); + self.options.opaque_types.matches(path[1..].join("::")) + } + + /// Get the options used to configure this bindgen context. + pub(crate) fn options(&self) -> &BindgenOptions { + &self.options + } + + /// Tokenizes a namespace cursor in order to get the name and kind of the + /// namespace. + fn tokenize_namespace( + &self, + cursor: &Cursor, + ) -> (Option, ModuleKind) { + assert_eq!( + cursor.kind(), + ::clang_sys::CXCursor_Namespace, + "Be a nice person" + ); + + let mut module_name = None; + let spelling = cursor.spelling(); + if !spelling.is_empty() { + module_name = Some(spelling); + } + + let mut kind = ModuleKind::Normal; + let mut looking_for_name = false; + for token in cursor.tokens().iter() { + match token.spelling() { + b"inline" => { + debug_assert!( + kind != ModuleKind::Inline, + "Multiple inline keywords?" + ); + kind = ModuleKind::Inline; + // When hitting a nested inline namespace we get a spelling + // that looks like ["inline", "foo"]. Deal with it properly. + looking_for_name = true; + } + // The double colon allows us to handle nested namespaces like + // namespace foo::bar { } + // + // libclang still gives us two namespace cursors, which is cool, + // but the tokenization of the second begins with the double + // colon. That's ok, so we only need to handle the weird + // tokenization here. + b"namespace" | b"::" => { + looking_for_name = true; + } + b"{" => { + // This should be an anonymous namespace. + assert!(looking_for_name); + break; + } + name => { + if looking_for_name { + if module_name.is_none() { + module_name = Some( + String::from_utf8_lossy(name).into_owned(), + ); + } + break; + } + // This is _likely_, but not certainly, a macro that's + // been placed just before the namespace keyword. + // Unfortunately, clang tokens don't let us easily see + // through the ifdef tokens, so we don't know what this + // token should really be. Instead of panicking though, + // we warn the user that we assumed the token was blank, + // and then move on. + // + // See also https://github.com/rust-lang/rust-bindgen/issues/1676. + warn!("Ignored unknown namespace prefix '{}' at {token:?} in {cursor:?}", String::from_utf8_lossy(name)); + } + } + } + + if cursor.is_inline_namespace() { + kind = ModuleKind::Inline; + } + + (module_name, kind) + } + + /// Given a `CXCursor_Namespace` cursor, return the item ID of the + /// corresponding module, or create one on the fly. + pub(crate) fn module(&mut self, cursor: Cursor) -> ModuleId { + use clang_sys::*; + assert_eq!(cursor.kind(), CXCursor_Namespace, "Be a nice person"); + let cursor = cursor.canonical(); + if let Some(id) = self.modules.get(&cursor) { + return *id; + } + + let (module_name, kind) = self.tokenize_namespace(&cursor); + + let module_id = self.next_item_id(); + let module = Module::new(module_name, kind); + let module = Item::new( + module_id, + None, + None, + self.current_module.into(), + ItemKind::Module(module), + Some(cursor.location()), + ); + + let module_id = module.id().as_module_id_unchecked(); + self.modules.insert(cursor, module_id); + + self.add_item(module, None, None); + + module_id + } + + /// Start traversing the module with the given `module_id`, invoke the + /// callback `cb`, and then return to traversing the original module. + pub(crate) fn with_module(&mut self, module_id: ModuleId, cb: F) + where + F: FnOnce(&mut Self), + { + debug_assert!(self.resolve_item(module_id).kind().is_module(), "Wat"); + + let previous_id = self.current_module; + self.current_module = module_id; + + cb(self); + + self.current_module = previous_id; + } + + /// Iterate over all (explicitly or transitively) allowlisted items. + /// + /// If no items are explicitly allowlisted, then all items are considered + /// allowlisted. + pub(crate) fn allowlisted_items(&self) -> &ItemSet { + assert!(self.in_codegen_phase()); + assert_eq!(self.current_module, self.root_module); + + self.allowlisted.as_ref().unwrap() + } + + /// Check whether a particular blocklisted type implements a trait or not. + /// Results may be cached. + pub(crate) fn blocklisted_type_implements_trait( + &self, + item: &Item, + derive_trait: DeriveTrait, + ) -> CanDerive { + assert!(self.in_codegen_phase()); + assert_eq!(self.current_module, self.root_module); + + *self + .blocklisted_types_implement_traits + .borrow_mut() + .entry(derive_trait) + .or_default() + .entry(item.id()) + .or_insert_with(|| { + item.expect_type() + .name() + .and_then(|name| { + if self.options.parse_callbacks.is_empty() { + // Sized integer types from get mapped to Rust primitive + // types regardless of whether they are blocklisted, so ensure that + // standard traits are considered derivable for them too. + if self.is_stdint_type(name) { + Some(CanDerive::Yes) + } else { + Some(CanDerive::No) + } + } else { + self.options.last_callback(|cb| { + cb.blocklisted_type_implements_trait( + name, + derive_trait, + ) + }) + } + }) + .unwrap_or(CanDerive::No) + }) + } + + /// Is the given type a type from that corresponds to a Rust primitive type? + pub(crate) fn is_stdint_type(&self, name: &str) -> bool { + match name { + "int8_t" | "uint8_t" | "int16_t" | "uint16_t" | "int32_t" | + "uint32_t" | "int64_t" | "uint64_t" | "uintptr_t" | + "intptr_t" | "ptrdiff_t" => true, + "size_t" | "ssize_t" => self.options.size_t_is_usize, + _ => false, + } + } + + /// Get a reference to the set of items we should generate. + pub(crate) fn codegen_items(&self) -> &ItemSet { + assert!(self.in_codegen_phase()); + assert_eq!(self.current_module, self.root_module); + self.codegen_items.as_ref().unwrap() + } + + /// Compute the allowlisted items set and populate `self.allowlisted`. + fn compute_allowlisted_and_codegen_items(&mut self) { + assert!(self.in_codegen_phase()); + assert_eq!(self.current_module, self.root_module); + assert!(self.allowlisted.is_none()); + let _t = self.timer("compute_allowlisted_and_codegen_items"); + + let roots = { + let mut roots = self + .items() + // Only consider roots that are enabled for codegen. + .filter(|&(_, item)| item.is_enabled_for_codegen(self)) + .filter(|&(_, item)| { + // If nothing is explicitly allowlisted, then everything is fair + // game. + if self.options().allowlisted_types.is_empty() && + self.options().allowlisted_functions.is_empty() && + self.options().allowlisted_vars.is_empty() && + self.options().allowlisted_files.is_empty() && + self.options().allowlisted_items.is_empty() + { + return true; + } + + // If this is a type that explicitly replaces another, we assume + // you know what you're doing. + if item.annotations().use_instead_of().is_some() { + return true; + } + + // Items with a source location in an explicitly allowlisted file + // are always included. + if !self.options().allowlisted_files.is_empty() { + if let Some(location) = item.location() { + let (file, _, _, _) = location.location(); + if let Some(filename) = file.name() { + if self + .options() + .allowlisted_files + .matches(filename) + { + return true; + } + } + } + } + + let name = item.path_for_allowlisting(self)[1..].join("::"); + debug!("allowlisted_items: testing {name:?}"); + + if self.options().allowlisted_items.matches(&name) { + return true; + } + + match *item.kind() { + ItemKind::Module(..) => true, + ItemKind::Function(_) => { + self.options().allowlisted_functions.matches(&name) + } + ItemKind::Var(_) => { + self.options().allowlisted_vars.matches(&name) + } + ItemKind::Type(ref ty) => { + if self.options().allowlisted_types.matches(&name) { + return true; + } + + // Auto-allowlist types that don't need code + // generation if not allowlisting recursively, to + // make the #[derive] analysis not be lame. + if !self.options().allowlist_recursively { + match *ty.kind() { + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::Complex(..) | + TypeKind::Array(..) | + TypeKind::Vector(..) | + TypeKind::Pointer(..) | + TypeKind::Reference(..) | + TypeKind::Function(..) | + TypeKind::ResolvedTypeRef(..) | + TypeKind::Opaque | + TypeKind::TypeParam => return true, + _ => {} + } + if self.is_stdint_type(&name) { + return true; + } + } + + // Unnamed top-level enums are special and we + // allowlist them via the `allowlisted_vars` filter, + // since they're effectively top-level constants, + // and there's no way for them to be referenced + // consistently. + let parent = self.resolve_item(item.parent_id()); + if !parent.is_module() { + return false; + } + + let TypeKind::Enum(ref enum_) = *ty.kind() else { + return false; + }; + + if ty.name().is_some() { + return false; + } + + let mut prefix_path = + parent.path_for_allowlisting(self).clone(); + enum_.variants().iter().any(|variant| { + prefix_path.push( + variant.name_for_allowlisting().into(), + ); + let name = prefix_path[1..].join("::"); + prefix_path.pop().unwrap(); + self.options().allowlisted_vars.matches(&name) + || self + .options() + .allowlisted_items + .matches(name) + }) + } + } + }) + .map(|(id, _)| id) + .collect::>(); + + // The reversal preserves the expected ordering of traversal, + // resulting in more stable-ish bindgen-generated names for + // anonymous types (like unions). + roots.reverse(); + roots + }; + + let allowlisted_items_predicate = + if self.options().allowlist_recursively { + traversal::all_edges + } else { + // Only follow InnerType edges from the allowlisted roots. + // Such inner types (e.g. anonymous structs/unions) are + // always emitted by codegen, and they need to be allowlisted + // to make sure they are processed by e.g. the derive analysis. + traversal::only_inner_type_edges + }; + + let allowlisted = AllowlistedItemsTraversal::new( + self, + roots.clone(), + allowlisted_items_predicate, + ) + .collect::(); + + let codegen_items = if self.options().allowlist_recursively { + AllowlistedItemsTraversal::new( + self, + roots, + traversal::codegen_edges, + ) + .collect::() + } else { + allowlisted.clone() + }; + + self.allowlisted = Some(allowlisted); + self.codegen_items = Some(codegen_items); + + for item in self.options().allowlisted_functions.unmatched_items() { + unused_regex_diagnostic(item, "--allowlist-function", self); + } + + for item in self.options().allowlisted_vars.unmatched_items() { + unused_regex_diagnostic(item, "--allowlist-var", self); + } + + for item in self.options().allowlisted_types.unmatched_items() { + unused_regex_diagnostic(item, "--allowlist-type", self); + } + + for item in self.options().allowlisted_items.unmatched_items() { + unused_regex_diagnostic(item, "--allowlist-items", self); + } + } + + /// Convenient method for getting the prefix to use for most traits in + /// codegen depending on the `use_core` option. + pub(crate) fn trait_prefix(&self) -> Ident { + if self.options().use_core { + self.rust_ident_raw("core") + } else { + self.rust_ident_raw("std") + } + } + + /// Call if an opaque array is generated + pub(crate) fn generated_opaque_array(&self) { + self.generated_opaque_array.set(true); + } + + /// Whether we need to generate the opaque array type + pub(crate) fn need_opaque_array_type(&self) -> bool { + self.generated_opaque_array.get() + } + + /// Call if a bindgen complex is generated + pub(crate) fn generated_bindgen_complex(&self) { + self.generated_bindgen_complex.set(true); + } + + /// Whether we need to generate the bindgen complex type + pub(crate) fn need_bindgen_complex_type(&self) -> bool { + self.generated_bindgen_complex.get() + } + + /// Call if a bindgen float16 is generated + pub(crate) fn generated_bindgen_float16(&self) { + self.generated_bindgen_float16.set(true); + } + + /// Whether we need to generate the bindgen float16 type + pub(crate) fn need_bindgen_float16_type(&self) -> bool { + self.generated_bindgen_float16.get() + } + + /// Compute which `enum`s have an associated `typedef` definition. + fn compute_enum_typedef_combos(&mut self) { + let _t = self.timer("compute_enum_typedef_combos"); + assert!(self.enum_typedef_combos.is_none()); + + let mut enum_typedef_combos = HashSet::default(); + for item in &self.items { + if let Some(ItemKind::Module(module)) = + item.as_ref().map(Item::kind) + { + // Find typedefs in this module, and build set of their names. + let mut names_of_typedefs = HashSet::default(); + for child_id in module.children() { + if let Some(ItemKind::Type(ty)) = + self.items[child_id.0].as_ref().map(Item::kind) + { + if let (Some(name), TypeKind::Alias(type_id)) = + (ty.name(), ty.kind()) + { + // We disregard aliases that refer to the enum + // itself, such as in `typedef enum { ... } Enum;`. + if type_id + .into_resolver() + .through_type_refs() + .through_type_aliases() + .resolve(self) + .expect_type() + .is_int() + { + names_of_typedefs.insert(name); + } + } + } + } + + // Find enums in this module, and record the ID of each one that + // has a typedef. + for child_id in module.children() { + if let Some(ItemKind::Type(ty)) = + self.items[child_id.0].as_ref().map(Item::kind) + { + if let (Some(name), true) = (ty.name(), ty.is_enum()) { + if names_of_typedefs.contains(name) { + enum_typedef_combos.insert(*child_id); + } + } + } + } + } + } + + self.enum_typedef_combos = Some(enum_typedef_combos); + } + + /// Look up whether `id` refers to an `enum` whose underlying type is + /// defined by a `typedef`. + pub(crate) fn is_enum_typedef_combo(&self, id: ItemId) -> bool { + assert!( + self.in_codegen_phase(), + "We only compute enum_typedef_combos when we enter codegen", + ); + self.enum_typedef_combos.as_ref().unwrap().contains(&id) + } + + /// Compute whether we can derive debug. + fn compute_cannot_derive_debug(&mut self) { + let _t = self.timer("compute_cannot_derive_debug"); + assert!(self.cannot_derive_debug.is_none()); + if self.options.derive_debug { + self.cannot_derive_debug = + Some(as_cannot_derive_set(analyze::(( + self, + DeriveTrait::Debug, + )))); + } + } + + /// Look up whether the item with `id` can + /// derive debug or not. + pub(crate) fn lookup_can_derive_debug>( + &self, + id: Id, + ) -> bool { + let id = id.into(); + assert!( + self.in_codegen_phase(), + "We only compute can_derive_debug when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` can + // derive debug or not. + !self.cannot_derive_debug.as_ref().unwrap().contains(&id) + } + + /// Compute whether we can derive default. + fn compute_cannot_derive_default(&mut self) { + let _t = self.timer("compute_cannot_derive_default"); + assert!(self.cannot_derive_default.is_none()); + if self.options.derive_default { + self.cannot_derive_default = + Some(as_cannot_derive_set(analyze::(( + self, + DeriveTrait::Default, + )))); + } + } + + /// Look up whether the item with `id` can + /// derive default or not. + pub(crate) fn lookup_can_derive_default>( + &self, + id: Id, + ) -> bool { + let id = id.into(); + assert!( + self.in_codegen_phase(), + "We only compute can_derive_default when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` can + // derive default or not. + !self.cannot_derive_default.as_ref().unwrap().contains(&id) + } + + /// Compute whether we can derive copy. + fn compute_cannot_derive_copy(&mut self) { + let _t = self.timer("compute_cannot_derive_copy"); + assert!(self.cannot_derive_copy.is_none()); + self.cannot_derive_copy = + Some(as_cannot_derive_set(analyze::(( + self, + DeriveTrait::Copy, + )))); + } + + /// Compute whether we can derive hash. + fn compute_cannot_derive_hash(&mut self) { + let _t = self.timer("compute_cannot_derive_hash"); + assert!(self.cannot_derive_hash.is_none()); + if self.options.derive_hash { + self.cannot_derive_hash = + Some(as_cannot_derive_set(analyze::(( + self, + DeriveTrait::Hash, + )))); + } + } + + /// Look up whether the item with `id` can + /// derive hash or not. + pub(crate) fn lookup_can_derive_hash>( + &self, + id: Id, + ) -> bool { + let id = id.into(); + assert!( + self.in_codegen_phase(), + "We only compute can_derive_debug when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` can + // derive hash or not. + !self.cannot_derive_hash.as_ref().unwrap().contains(&id) + } + + /// Compute whether we can derive `PartialOrd`, `PartialEq` or `Eq`. + fn compute_cannot_derive_partialord_partialeq_or_eq(&mut self) { + let _t = self.timer("compute_cannot_derive_partialord_partialeq_or_eq"); + assert!(self.cannot_derive_partialeq_or_partialord.is_none()); + if self.options.derive_partialord || + self.options.derive_partialeq || + self.options.derive_eq + { + self.cannot_derive_partialeq_or_partialord = + Some(analyze::(( + self, + DeriveTrait::PartialEqOrPartialOrd, + ))); + } + } + + /// Look up whether the item with `id` can derive `Partial{Eq,Ord}`. + pub(crate) fn lookup_can_derive_partialeq_or_partialord< + Id: Into, + >( + &self, + id: Id, + ) -> CanDerive { + let id = id.into(); + assert!( + self.in_codegen_phase(), + "We only compute can_derive_partialeq_or_partialord when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` can + // derive partialeq or not. + self.cannot_derive_partialeq_or_partialord + .as_ref() + .unwrap() + .get(&id) + .copied() + .unwrap_or(CanDerive::Yes) + } + + /// Look up whether the item with `id` can derive `Copy` or not. + pub(crate) fn lookup_can_derive_copy>( + &self, + id: Id, + ) -> bool { + assert!( + self.in_codegen_phase(), + "We only compute can_derive_debug when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` can + // derive `Copy` or not. + let id = id.into(); + + !self.lookup_has_type_param_in_array(id) && + !self.cannot_derive_copy.as_ref().unwrap().contains(&id) + } + + /// Compute whether the type has type parameter in array. + fn compute_has_type_param_in_array(&mut self) { + let _t = self.timer("compute_has_type_param_in_array"); + assert!(self.has_type_param_in_array.is_none()); + self.has_type_param_in_array = + Some(analyze::(self)); + } + + /// Look up whether the item with `id` has type parameter in array or not. + pub(crate) fn lookup_has_type_param_in_array>( + &self, + id: Id, + ) -> bool { + assert!( + self.in_codegen_phase(), + "We only compute has array when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` has + // type parameter in array or not. + self.has_type_param_in_array + .as_ref() + .unwrap() + .contains(&id.into()) + } + + /// Compute whether the type has float. + fn compute_has_float(&mut self) { + let _t = self.timer("compute_has_float"); + assert!(self.has_float.is_none()); + if self.options.derive_eq || self.options.derive_ord { + self.has_float = Some(analyze::(self)); + } + } + + /// Look up whether the item with `id` has array or not. + pub(crate) fn lookup_has_float>(&self, id: Id) -> bool { + assert!( + self.in_codegen_phase(), + "We only compute has float when we enter codegen" + ); + + // Look up the computed value for whether the item with `id` has + // float or not. + self.has_float.as_ref().unwrap().contains(&id.into()) + } + + /// Check if `--no-partialeq` flag is enabled for this item. + pub(crate) fn no_partialeq_by_name(&self, item: &Item) -> bool { + let name = item.path_for_allowlisting(self)[1..].join("::"); + self.options().no_partialeq_types.matches(name) + } + + /// Check if `--no-copy` flag is enabled for this item. + pub(crate) fn no_copy_by_name(&self, item: &Item) -> bool { + let name = item.path_for_allowlisting(self)[1..].join("::"); + self.options().no_copy_types.matches(name) + } + + /// Check if `--no-debug` flag is enabled for this item. + pub(crate) fn no_debug_by_name(&self, item: &Item) -> bool { + let name = item.path_for_allowlisting(self)[1..].join("::"); + self.options().no_debug_types.matches(name) + } + + /// Check if `--no-default` flag is enabled for this item. + pub(crate) fn no_default_by_name(&self, item: &Item) -> bool { + let name = item.path_for_allowlisting(self)[1..].join("::"); + self.options().no_default_types.matches(name) + } + + /// Check if `--no-hash` flag is enabled for this item. + pub(crate) fn no_hash_by_name(&self, item: &Item) -> bool { + let name = item.path_for_allowlisting(self)[1..].join("::"); + self.options().no_hash_types.matches(name) + } + + /// Check if `--must-use-type` flag is enabled for this item. + pub(crate) fn must_use_type_by_name(&self, item: &Item) -> bool { + let name = item.path_for_allowlisting(self)[1..].join("::"); + self.options().must_use_types.matches(name) + } + + /// Wrap some tokens in an `unsafe` block if the `--wrap-unsafe-ops` option is enabled. + pub(crate) fn wrap_unsafe_ops(&self, tokens: impl ToTokens) -> TokenStream { + if self.options.wrap_unsafe_ops { + quote!(unsafe { #tokens }) + } else { + tokens.into_token_stream() + } + } + + /// Get the suffix to be added to `static` functions if the `--wrap-static-fns` option is + /// enabled. + pub(crate) fn wrap_static_fns_suffix(&self) -> &str { + self.options() + .wrap_static_fns_suffix + .as_deref() + .unwrap_or(crate::DEFAULT_NON_EXTERN_FNS_SUFFIX) + } +} + +/// A builder struct for configuring item resolution options. +#[derive(Debug, Copy, Clone)] +pub(crate) struct ItemResolver { + id: ItemId, + through_type_refs: bool, + through_type_aliases: bool, +} + +impl ItemId { + /// Create an `ItemResolver` from this item ID. + pub(crate) fn into_resolver(self) -> ItemResolver { + self.into() + } +} + +impl From for ItemResolver +where + T: Into, +{ + fn from(id: T) -> ItemResolver { + ItemResolver::new(id) + } +} + +impl ItemResolver { + /// Construct a new `ItemResolver` from the given ID. + pub(crate) fn new>(id: Id) -> ItemResolver { + let id = id.into(); + ItemResolver { + id, + through_type_refs: false, + through_type_aliases: false, + } + } + + /// Keep resolving through `Type::TypeRef` items. + pub(crate) fn through_type_refs(mut self) -> ItemResolver { + self.through_type_refs = true; + self + } + + /// Keep resolving through `Type::Alias` items. + pub(crate) fn through_type_aliases(mut self) -> ItemResolver { + self.through_type_aliases = true; + self + } + + /// Finish configuring and perform the actual item resolution. + pub(crate) fn resolve(self, ctx: &BindgenContext) -> &Item { + assert!(ctx.collected_typerefs()); + + let mut id = self.id; + let mut seen_ids = HashSet::default(); + loop { + let item = ctx.resolve_item(id); + + // Detect cycles and bail out. These can happen in certain cases + // involving incomplete qualified dependent types (#2085). + if !seen_ids.insert(id) { + return item; + } + + let ty_kind = item.as_type().map(|t| t.kind()); + match ty_kind { + Some(&TypeKind::ResolvedTypeRef(next_id)) + if self.through_type_refs => + { + id = next_id.into(); + } + // We intentionally ignore template aliases here, as they are + // more complicated, and don't represent a simple renaming of + // some type. + Some(&TypeKind::Alias(next_id)) + if self.through_type_aliases => + { + id = next_id.into(); + } + _ => return item, + } + } + } +} + +/// A type that we are in the middle of parsing. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) struct PartialType { + decl: Cursor, + // Just an ItemId, and not a TypeId, because we haven't finished this type + // yet, so there's still time for things to go wrong. + id: ItemId, +} + +impl PartialType { + /// Construct a new `PartialType`. + pub(crate) fn new(decl: Cursor, id: ItemId) -> PartialType { + // assert!(decl == decl.canonical()); + PartialType { decl, id } + } + + /// The cursor pointing to this partial type's declaration location. + pub(crate) fn decl(&self) -> &Cursor { + &self.decl + } + + /// The item ID allocated for this type. This is *NOT* a key for an entry in + /// the context's item set yet! + pub(crate) fn id(&self) -> ItemId { + self.id + } +} + +impl TemplateParameters for PartialType { + fn self_template_params(&self, _ctx: &BindgenContext) -> Vec { + // Maybe at some point we will eagerly parse named types, but for now we + // don't and this information is unavailable. + vec![] + } + + fn num_self_template_params(&self, _ctx: &BindgenContext) -> usize { + // Wouldn't it be nice if libclang would reliably give us this + // information‽ + match self.decl().kind() { + clang_sys::CXCursor_ClassTemplate | + clang_sys::CXCursor_FunctionTemplate | + clang_sys::CXCursor_TypeAliasTemplateDecl => { + let mut num_params = 0; + self.decl().visit(|c| { + match c.kind() { + clang_sys::CXCursor_TemplateTypeParameter | + clang_sys::CXCursor_TemplateTemplateParameter | + clang_sys::CXCursor_NonTypeTemplateParameter => { + num_params += 1; + } + _ => {} + } + clang_sys::CXChildVisit_Continue + }); + num_params + } + _ => 0, + } + } +} + +fn unused_regex_diagnostic(item: &str, name: &str, _ctx: &BindgenContext) { + warn!("unused option: {name} {item}"); + + #[cfg(feature = "experimental")] + if _ctx.options().emit_diagnostics { + use crate::diagnostics::{Diagnostic, Level}; + + Diagnostic::default() + .with_title( + format!("Unused regular expression: `{item}`."), + Level::Warning, + ) + .add_annotation( + format!("This regular expression was passed to `{name}`."), + Level::Note, + ) + .display(); + } +} diff --git a/vendor/bindgen/ir/derive.rs b/vendor/bindgen/ir/derive.rs new file mode 100644 index 00000000000000..3ee6476af9a76d --- /dev/null +++ b/vendor/bindgen/ir/derive.rs @@ -0,0 +1,130 @@ +//! Traits for determining whether we can derive traits for a thing or not. +//! +//! These traits tend to come in pairs: +//! +//! 1. A "trivial" version, whose implementations aren't allowed to recursively +//! look at other types or the results of fix point analyses. +//! +//! 2. A "normal" version, whose implementations simply query the results of a +//! fix point analysis. +//! +//! The former is used by the analyses when creating the results queried by the +//! second. + +use super::context::BindgenContext; + +use std::cmp; +use std::ops; + +/// A trait that encapsulates the logic for whether or not we can derive `Debug` +/// for a given thing. +pub(crate) trait CanDeriveDebug { + /// Return `true` if `Debug` can be derived for this thing, `false` + /// otherwise. + fn can_derive_debug(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait that encapsulates the logic for whether or not we can derive `Copy` +/// for a given thing. +pub(crate) trait CanDeriveCopy { + /// Return `true` if `Copy` can be derived for this thing, `false` + /// otherwise. + fn can_derive_copy(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait that encapsulates the logic for whether or not we can derive +/// `Default` for a given thing. +pub(crate) trait CanDeriveDefault { + /// Return `true` if `Default` can be derived for this thing, `false` + /// otherwise. + fn can_derive_default(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait that encapsulates the logic for whether or not we can derive `Hash` +/// for a given thing. +pub(crate) trait CanDeriveHash { + /// Return `true` if `Hash` can be derived for this thing, `false` + /// otherwise. + fn can_derive_hash(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait that encapsulates the logic for whether or not we can derive +/// `PartialEq` for a given thing. +pub(crate) trait CanDerivePartialEq { + /// Return `true` if `PartialEq` can be derived for this thing, `false` + /// otherwise. + fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait that encapsulates the logic for whether or not we can derive +/// `PartialOrd` for a given thing. +pub(crate) trait CanDerivePartialOrd { + /// Return `true` if `PartialOrd` can be derived for this thing, `false` + /// otherwise. + fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait that encapsulates the logic for whether or not we can derive `Eq` +/// for a given thing. +pub(crate) trait CanDeriveEq { + /// Return `true` if `Eq` can be derived for this thing, `false` otherwise. + fn can_derive_eq(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait that encapsulates the logic for whether or not we can derive `Ord` +/// for a given thing. +pub(crate) trait CanDeriveOrd { + /// Return `true` if `Ord` can be derived for this thing, `false` otherwise. + fn can_derive_ord(&self, ctx: &BindgenContext) -> bool; +} + +/// Whether it is possible or not to automatically derive trait for an item. +/// +/// ```ignore +/// No +/// ^ +/// | +/// Manually +/// ^ +/// | +/// Yes +/// ``` +/// +/// Initially we assume that we can derive trait for all types and then +/// update our understanding as we learn more about each type. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Default)] +pub enum CanDerive { + /// Yes, we can derive automatically. + #[default] + Yes, + + /// The only thing that stops us from automatically deriving is that + /// array with more than maximum number of elements is used. + /// + /// This means we probably can "manually" implement such trait. + Manually, + + /// No, we cannot. + No, +} + +impl CanDerive { + /// Take the least upper bound of `self` and `rhs`. + pub(crate) fn join(self, rhs: Self) -> Self { + cmp::max(self, rhs) + } +} + +impl ops::BitOr for CanDerive { + type Output = Self; + + fn bitor(self, rhs: Self) -> Self::Output { + self.join(rhs) + } +} + +impl ops::BitOrAssign for CanDerive { + fn bitor_assign(&mut self, rhs: Self) { + *self = self.join(rhs); + } +} diff --git a/vendor/bindgen/ir/dot.rs b/vendor/bindgen/ir/dot.rs new file mode 100644 index 00000000000000..9bfc559f41fa88 --- /dev/null +++ b/vendor/bindgen/ir/dot.rs @@ -0,0 +1,85 @@ +//! Generating Graphviz `dot` files from our IR. + +use super::context::{BindgenContext, ItemId}; +use super::traversal::Trace; +use std::fs::File; +use std::io::{self, Write}; +use std::path::Path; + +/// A trait for anything that can write attributes as `` rows to a dot +/// file. +pub(crate) trait DotAttributes { + /// Write this thing's attributes to the given output. Each attribute must + /// be its own `...`. + fn dot_attributes( + &self, + ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: Write; +} + +/// Write a graphviz dot file containing our IR. +pub(crate) fn write_dot_file

(ctx: &BindgenContext, path: P) -> io::Result<()> +where + P: AsRef, +{ + let file = File::create(path)?; + let mut dot_file = io::BufWriter::new(file); + writeln!(&mut dot_file, "digraph {{")?; + + let mut err: Option> = None; + + for (id, item) in ctx.items() { + let is_allowlisted = ctx.allowlisted_items().contains(&id); + + writeln!( + &mut dot_file, + r#"{} [fontname="courier", color={}, label=<

"#, + id.as_usize(), + if is_allowlisted { "black" } else { "gray" } + )?; + item.dot_attributes(ctx, &mut dot_file)?; + writeln!(&mut dot_file, "
>];")?; + + item.trace( + ctx, + &mut |sub_id: ItemId, edge_kind| { + if err.is_some() { + return; + } + + match writeln!( + &mut dot_file, + "{} -> {} [label={edge_kind:?}, color={}];", + id.as_usize(), + sub_id.as_usize(), + if is_allowlisted { "black" } else { "gray" } + ) { + Ok(_) => {} + Err(e) => err = Some(Err(e)), + } + }, + &(), + ); + + if let Some(err) = err { + return err; + } + + if let Some(module) = item.as_module() { + for child in module.children() { + writeln!( + &mut dot_file, + "{} -> {} [style=dotted, color=gray]", + item.id().as_usize(), + child.as_usize() + )?; + } + } + } + + writeln!(&mut dot_file, "}}")?; + Ok(()) +} diff --git a/vendor/bindgen/ir/enum_ty.rs b/vendor/bindgen/ir/enum_ty.rs new file mode 100644 index 00000000000000..9b08da3bce108e --- /dev/null +++ b/vendor/bindgen/ir/enum_ty.rs @@ -0,0 +1,321 @@ +//! Intermediate representation for C/C++ enumerations. + +use super::super::codegen::EnumVariation; +use super::context::{BindgenContext, TypeId}; +use super::item::Item; +use super::ty::{Type, TypeKind}; +use crate::clang; +use crate::ir::annotations::Annotations; +use crate::parse::ParseError; +use crate::regex_set::RegexSet; + +/// An enum representing custom handling that can be given to a variant. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum EnumVariantCustomBehavior { + /// This variant will be a module containing constants. + ModuleConstify, + /// This variant will be constified, that is, forced to generate a constant. + Constify, + /// This variant will be hidden entirely from the resulting enum. + Hide, +} + +/// A C/C++ enumeration. +#[derive(Debug)] +pub(crate) struct Enum { + /// The representation used for this enum; it should be an `IntKind` type or + /// an alias to one. + /// + /// It's `None` if the enum is a forward declaration and isn't defined + /// anywhere else, see `tests/headers/func_ptr_in_struct.h`. + repr: Option, + + /// The different variants, with explicit values. + variants: Vec, +} + +impl Enum { + /// Construct a new `Enum` with the given representation and variants. + pub(crate) fn new( + repr: Option, + variants: Vec, + ) -> Self { + Enum { repr, variants } + } + + /// Get this enumeration's representation. + pub(crate) fn repr(&self) -> Option { + self.repr + } + + /// Get this enumeration's variants. + pub(crate) fn variants(&self) -> &[EnumVariant] { + &self.variants + } + + /// Construct an enumeration from the given Clang type. + pub(crate) fn from_ty( + ty: &clang::Type, + ctx: &mut BindgenContext, + ) -> Result { + use clang_sys::*; + debug!("Enum::from_ty {ty:?}"); + + if ty.kind() != CXType_Enum { + return Err(ParseError::Continue); + } + + let declaration = ty.declaration().canonical(); + let repr = declaration + .enum_type() + .and_then(|et| Item::from_ty(&et, declaration, None, ctx).ok()); + let mut variants = vec![]; + + let variant_ty = + repr.and_then(|r| ctx.resolve_type(r).safe_canonical_type(ctx)); + let is_bool = variant_ty.is_some_and(Type::is_bool); + + // Assume signedness since the default type by the C standard is an int. + let is_signed = variant_ty.map_or(true, |ty| match *ty.kind() { + TypeKind::Int(ref int_kind) => int_kind.is_signed(), + ref other => { + panic!("Since when enums can be non-integers? {other:?}") + } + }); + + let type_name = ty.spelling(); + let type_name = if type_name.is_empty() { + None + } else { + Some(type_name) + }; + let type_name = type_name.as_deref(); + + let definition = declaration.definition().unwrap_or(declaration); + definition.visit(|cursor| { + if cursor.kind() == CXCursor_EnumConstantDecl { + let value = if is_bool { + cursor.enum_val_boolean().map(EnumVariantValue::Boolean) + } else if is_signed { + cursor.enum_val_signed().map(EnumVariantValue::Signed) + } else { + cursor.enum_val_unsigned().map(EnumVariantValue::Unsigned) + }; + if let Some(val) = value { + let name = cursor.spelling(); + let annotations = Annotations::new(&cursor); + let custom_behavior = ctx + .options() + .last_callback(|callbacks| { + callbacks + .enum_variant_behavior(type_name, &name, val) + }) + .or_else(|| { + let annotations = annotations.as_ref()?; + if annotations.hide() { + Some(EnumVariantCustomBehavior::Hide) + } else if annotations.constify_enum_variant() { + Some(EnumVariantCustomBehavior::Constify) + } else { + None + } + }); + + let new_name = ctx + .options() + .last_callback(|callbacks| { + callbacks.enum_variant_name(type_name, &name, val) + }) + .or_else(|| { + annotations + .as_ref()? + .use_instead_of()? + .last() + .cloned() + }) + .unwrap_or_else(|| name.clone()); + + let comment = cursor.raw_comment(); + variants.push(EnumVariant::new( + new_name, + name, + comment, + val, + custom_behavior, + )); + } + } + CXChildVisit_Continue + }); + Ok(Enum::new(repr, variants)) + } + + fn is_matching_enum( + &self, + ctx: &BindgenContext, + enums: &RegexSet, + item: &Item, + ) -> bool { + let path = item.path_for_allowlisting(ctx); + let enum_ty = item.expect_type(); + + if enums.matches(path[1..].join("::")) { + return true; + } + + // Test the variants if the enum is anonymous. + if enum_ty.name().is_some() { + return false; + } + + self.variants().iter().any(|v| enums.matches(v.name())) + } + + /// Returns the final representation of the enum. + pub(crate) fn computed_enum_variation( + &self, + ctx: &BindgenContext, + item: &Item, + ) -> EnumVariation { + // ModuleConsts has higher precedence before Rust in order to avoid + // problems with overlapping match patterns. + if self.is_matching_enum( + ctx, + &ctx.options().constified_enum_modules, + item, + ) { + EnumVariation::ModuleConsts + } else if self.is_matching_enum( + ctx, + &ctx.options().bitfield_enums, + item, + ) { + EnumVariation::NewType { + is_bitfield: true, + is_global: false, + } + } else if self.is_matching_enum(ctx, &ctx.options().newtype_enums, item) + { + EnumVariation::NewType { + is_bitfield: false, + is_global: false, + } + } else if self.is_matching_enum( + ctx, + &ctx.options().newtype_global_enums, + item, + ) { + EnumVariation::NewType { + is_bitfield: false, + is_global: true, + } + } else if self.is_matching_enum( + ctx, + &ctx.options().rustified_enums, + item, + ) { + EnumVariation::Rust { + non_exhaustive: false, + } + } else if self.is_matching_enum( + ctx, + &ctx.options().rustified_non_exhaustive_enums, + item, + ) { + EnumVariation::Rust { + non_exhaustive: true, + } + } else if self.is_matching_enum( + ctx, + &ctx.options().constified_enums, + item, + ) { + EnumVariation::Consts + } else { + ctx.options().default_enum_style + } + } +} + +/// A single enum variant, to be contained only in an enum. +#[derive(Debug)] +pub(crate) struct EnumVariant { + /// The name of the variant. + name: String, + + /// The original name of the variant (without user mangling) + name_for_allowlisting: String, + + /// An optional doc comment. + comment: Option, + + /// The integer value of the variant. + val: EnumVariantValue, + + /// The custom behavior this variant may have, if any. + custom_behavior: Option, +} + +/// A constant value assigned to an enumeration variant. +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum EnumVariantValue { + /// A boolean constant. + Boolean(bool), + + /// A signed constant. + Signed(i64), + + /// An unsigned constant. + Unsigned(u64), +} + +impl EnumVariant { + /// Construct a new enumeration variant from the given parts. + pub(crate) fn new( + name: String, + name_for_allowlisting: String, + comment: Option, + val: EnumVariantValue, + custom_behavior: Option, + ) -> Self { + EnumVariant { + name, + name_for_allowlisting, + comment, + val, + custom_behavior, + } + } + + /// Get this variant's name. + pub(crate) fn name(&self) -> &str { + &self.name + } + + /// Get this variant's name. + pub(crate) fn name_for_allowlisting(&self) -> &str { + &self.name_for_allowlisting + } + + /// Get this variant's value. + pub(crate) fn val(&self) -> EnumVariantValue { + self.val + } + + /// Get this variant's documentation. + pub(crate) fn comment(&self) -> Option<&str> { + self.comment.as_deref() + } + + /// Returns whether this variant should be enforced to be a constant by code + /// generation. + pub(crate) fn force_constification(&self) -> bool { + self.custom_behavior == Some(EnumVariantCustomBehavior::Constify) + } + + /// Returns whether the current variant should be hidden completely from the + /// resulting rust enum. + pub(crate) fn hidden(&self) -> bool { + self.custom_behavior == Some(EnumVariantCustomBehavior::Hide) + } +} diff --git a/vendor/bindgen/ir/function.rs b/vendor/bindgen/ir/function.rs new file mode 100644 index 00000000000000..65a12d4bb2dbfb --- /dev/null +++ b/vendor/bindgen/ir/function.rs @@ -0,0 +1,838 @@ +//! Intermediate representation for C/C++ functions and methods. + +use super::comp::MethodKind; +use super::context::{BindgenContext, TypeId}; +use super::dot::DotAttributes; +use super::item::Item; +use super::traversal::{EdgeKind, Trace, Tracer}; +use super::ty::TypeKind; +use crate::callbacks::{ItemInfo, ItemKind}; +use crate::clang::{self, ABIKind, Attribute}; +use crate::parse::{ClangSubItemParser, ParseError, ParseResult}; +use clang_sys::CXCallingConv; + +use quote::TokenStreamExt; +use std::io; +use std::str::FromStr; + +const RUST_DERIVE_FUNPTR_LIMIT: usize = 12; + +/// What kind of function are we looking at? +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum FunctionKind { + /// A plain, free function. + Function, + /// A method of some kind. + Method(MethodKind), +} + +impl FunctionKind { + /// Given a clang cursor, return the kind of function it represents, or + /// `None` otherwise. + pub(crate) fn from_cursor(cursor: &clang::Cursor) -> Option { + // FIXME(emilio): Deduplicate logic with `ir::comp`. + Some(match cursor.kind() { + clang_sys::CXCursor_FunctionDecl => FunctionKind::Function, + clang_sys::CXCursor_Constructor => { + FunctionKind::Method(MethodKind::Constructor) + } + clang_sys::CXCursor_Destructor => { + FunctionKind::Method(if cursor.method_is_virtual() { + MethodKind::VirtualDestructor { + pure_virtual: cursor.method_is_pure_virtual(), + } + } else { + MethodKind::Destructor + }) + } + clang_sys::CXCursor_CXXMethod => { + if cursor.method_is_virtual() { + FunctionKind::Method(MethodKind::Virtual { + pure_virtual: cursor.method_is_pure_virtual(), + }) + } else if cursor.method_is_static() { + FunctionKind::Method(MethodKind::Static) + } else { + FunctionKind::Method(MethodKind::Normal) + } + } + _ => return None, + }) + } +} + +/// The style of linkage +#[derive(Debug, Clone, Copy)] +pub(crate) enum Linkage { + /// Externally visible and can be linked against + External, + /// Not exposed externally. 'static inline' functions will have this kind of linkage + Internal, +} + +/// A function declaration, with a signature, arguments, and argument names. +/// +/// The argument names vector must be the same length as the ones in the +/// signature. +#[derive(Debug)] +pub(crate) struct Function { + /// The name of this function. + name: String, + + /// The mangled name, that is, the symbol. + mangled_name: Option, + + /// The link name. If specified, overwrite `mangled_name`. + link_name: Option, + + /// The ID pointing to the current function signature. + signature: TypeId, + + /// The kind of function this is. + kind: FunctionKind, + + /// The linkage of the function. + linkage: Linkage, +} + +impl Function { + /// Construct a new function. + pub(crate) fn new( + name: String, + mangled_name: Option, + link_name: Option, + signature: TypeId, + kind: FunctionKind, + linkage: Linkage, + ) -> Self { + Function { + name, + mangled_name, + link_name, + signature, + kind, + linkage, + } + } + + /// Get this function's name. + pub(crate) fn name(&self) -> &str { + &self.name + } + + /// Get this function's name. + pub(crate) fn mangled_name(&self) -> Option<&str> { + self.mangled_name.as_deref() + } + + /// Get this function's link name. + pub fn link_name(&self) -> Option<&str> { + self.link_name.as_deref() + } + + /// Get this function's signature type. + pub(crate) fn signature(&self) -> TypeId { + self.signature + } + + /// Get this function's kind. + pub(crate) fn kind(&self) -> FunctionKind { + self.kind + } + + /// Get this function's linkage. + pub(crate) fn linkage(&self) -> Linkage { + self.linkage + } +} + +impl DotAttributes for Function { + fn dot_attributes( + &self, + _ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + if let Some(ref mangled) = self.mangled_name { + let mangled: String = + mangled.chars().flat_map(|c| c.escape_default()).collect(); + writeln!(out, "mangled name{mangled}")?; + } + + Ok(()) + } +} + +/// A valid rust ABI. +#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] +pub enum Abi { + /// The default C ABI. + C, + /// The "stdcall" ABI. + Stdcall, + /// The "efiapi" ABI. + EfiApi, + /// The "fastcall" ABI. + Fastcall, + /// The "thiscall" ABI. + ThisCall, + /// The "vectorcall" ABI. + Vectorcall, + /// The "aapcs" ABI. + Aapcs, + /// The "win64" ABI. + Win64, + /// The "C-unwind" ABI. + CUnwind, + /// The "system" ABI. + System, +} + +impl FromStr for Abi { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "C" => Ok(Self::C), + "stdcall" => Ok(Self::Stdcall), + "efiapi" => Ok(Self::EfiApi), + "fastcall" => Ok(Self::Fastcall), + "thiscall" => Ok(Self::ThisCall), + "vectorcall" => Ok(Self::Vectorcall), + "aapcs" => Ok(Self::Aapcs), + "win64" => Ok(Self::Win64), + "C-unwind" => Ok(Self::CUnwind), + "system" => Ok(Self::System), + _ => Err(format!("Invalid or unknown ABI {s:?}")), + } + } +} + +impl std::fmt::Display for Abi { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s = match *self { + Self::C => "C", + Self::Stdcall => "stdcall", + Self::EfiApi => "efiapi", + Self::Fastcall => "fastcall", + Self::ThisCall => "thiscall", + Self::Vectorcall => "vectorcall", + Self::Aapcs => "aapcs", + Self::Win64 => "win64", + Self::CUnwind => "C-unwind", + Abi::System => "system", + }; + + s.fmt(f) + } +} + +impl quote::ToTokens for Abi { + fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { + let abi = self.to_string(); + tokens.append_all(quote! { #abi }); + } +} + +/// An ABI extracted from a clang cursor. +#[derive(Debug, Copy, Clone)] +pub(crate) enum ClangAbi { + /// An ABI known by Rust. + Known(Abi), + /// An unknown or invalid ABI. + Unknown(CXCallingConv), +} + +impl ClangAbi { + /// Returns whether this Abi is known or not. + fn is_unknown(self) -> bool { + matches!(self, ClangAbi::Unknown(..)) + } +} + +impl quote::ToTokens for ClangAbi { + fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { + match *self { + Self::Known(abi) => abi.to_tokens(tokens), + Self::Unknown(cc) => panic!( + "Cannot turn unknown calling convention to tokens: {cc:?}" + ), + } + } +} + +/// A function signature. +#[derive(Debug)] +pub(crate) struct FunctionSig { + /// The name of this function signature. + name: String, + + /// The return type of the function. + return_type: TypeId, + + /// The type of the arguments, optionally with the name of the argument when + /// declared. + argument_types: Vec<(Option, TypeId)>, + + /// Whether this function is variadic. + is_variadic: bool, + is_divergent: bool, + + /// Whether this function's return value must be used. + must_use: bool, + + /// The ABI of this function. + abi: ClangAbi, +} + +fn get_abi(cc: CXCallingConv) -> ClangAbi { + use clang_sys::*; + match cc { + CXCallingConv_Default | CXCallingConv_C => ClangAbi::Known(Abi::C), + CXCallingConv_X86StdCall => ClangAbi::Known(Abi::Stdcall), + CXCallingConv_X86FastCall => ClangAbi::Known(Abi::Fastcall), + CXCallingConv_X86ThisCall => ClangAbi::Known(Abi::ThisCall), + CXCallingConv_X86VectorCall | CXCallingConv_AArch64VectorCall => { + ClangAbi::Known(Abi::Vectorcall) + } + CXCallingConv_AAPCS => ClangAbi::Known(Abi::Aapcs), + CXCallingConv_X86_64Win64 => ClangAbi::Known(Abi::Win64), + other => ClangAbi::Unknown(other), + } +} + +/// Get the mangled name for the cursor's referent. +pub(crate) fn cursor_mangling( + ctx: &BindgenContext, + cursor: &clang::Cursor, +) -> Option { + if !ctx.options().enable_mangling { + return None; + } + + // We early return here because libclang may crash in some case + // if we pass in a variable inside a partial specialized template. + // See rust-lang/rust-bindgen#67, and rust-lang/rust-bindgen#462. + if cursor.is_in_non_fully_specialized_template() { + return None; + } + + let is_itanium_abi = ctx.abi_kind() == ABIKind::GenericItanium; + let is_destructor = cursor.kind() == clang_sys::CXCursor_Destructor; + if let Ok(mut manglings) = cursor.cxx_manglings() { + while let Some(m) = manglings.pop() { + // Only generate the destructor group 1, see below. + if is_itanium_abi && is_destructor && !m.ends_with("D1Ev") { + continue; + } + + return Some(m); + } + } + + let mut mangling = cursor.mangling(); + if mangling.is_empty() { + return None; + } + + if is_itanium_abi && is_destructor { + // With old (3.8-) libclang versions, and the Itanium ABI, clang returns + // the "destructor group 0" symbol, which means that it'll try to free + // memory, which definitely isn't what we want. + // + // Explicitly force the destructor group 1 symbol. + // + // See http://refspecs.linuxbase.org/cxxabi-1.83.html#mangling-special + // for the reference, and http://stackoverflow.com/a/6614369/1091587 for + // a more friendly explanation. + // + // We don't need to do this for constructors since clang seems to always + // have returned the C1 constructor. + // + // FIXME(emilio): Can a legit symbol in other ABIs end with this string? + // I don't think so, but if it can this would become a linker error + // anyway, not an invalid free at runtime. + // + // TODO(emilio, #611): Use cpp_demangle if this becomes nastier with + // time. + if mangling.ends_with("D0Ev") { + let new_len = mangling.len() - 4; + mangling.truncate(new_len); + mangling.push_str("D1Ev"); + } + } + + Some(mangling) +} + +fn args_from_ty_and_cursor( + ty: &clang::Type, + cursor: &clang::Cursor, + ctx: &mut BindgenContext, +) -> Vec<(Option, TypeId)> { + let cursor_args = cursor.args().unwrap_or_default().into_iter(); + let type_args = ty.args().unwrap_or_default().into_iter(); + + // Argument types can be found in either the cursor or the type, but argument names may only be + // found on the cursor. We often have access to both a type and a cursor for each argument, but + // in some cases we may only have one. + // + // Prefer using the type as the source of truth for the argument's type, but fall back to + // inspecting the cursor (this happens for Objective C interfaces). + // + // Prefer using the cursor for the argument's type, but fall back to using the parent's cursor + // (this happens for function pointer return types). + cursor_args + .map(Some) + .chain(std::iter::repeat(None)) + .zip(type_args.map(Some).chain(std::iter::repeat(None))) + .take_while(|(cur, ty)| cur.is_some() || ty.is_some()) + .map(|(arg_cur, arg_ty)| { + let name = arg_cur.map(|a| a.spelling()).and_then(|name| { + if name.is_empty() { + None + } else { + Some(name) + } + }); + + let cursor = arg_cur.unwrap_or(*cursor); + let ty = arg_ty.unwrap_or_else(|| cursor.cur_type()); + (name, Item::from_ty_or_ref(ty, cursor, None, ctx)) + }) + .collect() +} + +impl FunctionSig { + /// Get the function name. + pub(crate) fn name(&self) -> &str { + &self.name + } + + /// Construct a new function signature from the given Clang type. + pub(crate) fn from_ty( + ty: &clang::Type, + cursor: &clang::Cursor, + ctx: &mut BindgenContext, + ) -> Result { + use clang_sys::*; + debug!("FunctionSig::from_ty {ty:?} {cursor:?}"); + + // Skip function templates + let kind = cursor.kind(); + if kind == CXCursor_FunctionTemplate { + return Err(ParseError::Continue); + } + + let spelling = cursor.spelling(); + + // Don't parse operatorxx functions in C++ + let is_operator = |spelling: &str| { + spelling.starts_with("operator") && + !clang::is_valid_identifier(spelling) + }; + if is_operator(&spelling) && !ctx.options().represent_cxx_operators { + return Err(ParseError::Continue); + } + + // Constructors of non-type template parameter classes for some reason + // include the template parameter in their name. Just skip them, since + // we don't handle well non-type template parameters anyway. + if (kind == CXCursor_Constructor || kind == CXCursor_Destructor) && + spelling.contains('<') + { + return Err(ParseError::Continue); + } + + let cursor = if cursor.is_valid() { + *cursor + } else { + ty.declaration() + }; + + let mut args = match kind { + CXCursor_FunctionDecl | + CXCursor_Constructor | + CXCursor_CXXMethod | + CXCursor_ObjCInstanceMethodDecl | + CXCursor_ObjCClassMethodDecl => { + args_from_ty_and_cursor(ty, &cursor, ctx) + } + _ => { + // For non-CXCursor_FunctionDecl, visiting the cursor's children + // is the only reliable way to get parameter names. + let mut args = vec![]; + cursor.visit(|c| { + if c.kind() == CXCursor_ParmDecl { + let ty = + Item::from_ty_or_ref(c.cur_type(), c, None, ctx); + let name = c.spelling(); + let name = + if name.is_empty() { None } else { Some(name) }; + args.push((name, ty)); + } + CXChildVisit_Continue + }); + + if args.is_empty() { + // FIXME(emilio): Sometimes libclang doesn't expose the + // right AST for functions tagged as stdcall and such... + // + // https://bugs.llvm.org/show_bug.cgi?id=45919 + args_from_ty_and_cursor(ty, &cursor, ctx) + } else { + args + } + } + }; + + let (must_use, mut is_divergent) = + if ctx.options().enable_function_attribute_detection { + let [must_use, no_return, no_return_cpp] = cursor.has_attrs(&[ + Attribute::MUST_USE, + Attribute::NO_RETURN, + Attribute::NO_RETURN_CPP, + ]); + (must_use, no_return || no_return_cpp) + } else { + Default::default() + }; + + // Check if the type contains __attribute__((noreturn)) outside of parentheses. This is + // somewhat fragile, but it seems to be the only way to get at this information as of + // libclang 9. + let ty_spelling = ty.spelling(); + let has_attribute_noreturn = ty_spelling + .match_indices("__attribute__((noreturn))") + .any(|(i, _)| { + let depth = ty_spelling[..i] + .bytes() + .filter_map(|ch| match ch { + b'(' => Some(1), + b')' => Some(-1), + _ => None, + }) + .sum::(); + depth == 0 + }); + is_divergent = is_divergent || has_attribute_noreturn; + + let is_method = kind == CXCursor_CXXMethod; + let is_constructor = kind == CXCursor_Constructor; + let is_destructor = kind == CXCursor_Destructor; + if (is_constructor || is_destructor || is_method) && + cursor.lexical_parent() != cursor.semantic_parent() + { + // Only parse constructors once. + return Err(ParseError::Continue); + } + + if is_method || is_constructor || is_destructor { + let is_const = is_method && cursor.method_is_const(); + let is_virtual = is_method && cursor.method_is_virtual(); + let is_static = is_method && cursor.method_is_static(); + if !is_static && + (!is_virtual || + ctx.options().use_specific_virtual_function_receiver) + { + let parent = cursor.semantic_parent(); + let class = Item::parse(parent, None, ctx) + .expect("Expected to parse the class"); + // The `class` most likely is not finished parsing yet, so use + // the unchecked variant. + let class = class.as_type_id_unchecked(); + + let class = if is_const { + let const_class_id = ctx.next_item_id(); + ctx.build_const_wrapper( + const_class_id, + class, + None, + &parent.cur_type(), + ) + } else { + class + }; + + let ptr = + Item::builtin_type(TypeKind::Pointer(class), false, ctx); + args.insert(0, (Some("this".into()), ptr)); + } else if is_virtual { + let void = Item::builtin_type(TypeKind::Void, false, ctx); + let ptr = + Item::builtin_type(TypeKind::Pointer(void), false, ctx); + args.insert(0, (Some("this".into()), ptr)); + } + } + + let ty_ret_type = if kind == CXCursor_ObjCInstanceMethodDecl || + kind == CXCursor_ObjCClassMethodDecl + { + ty.ret_type() + .or_else(|| cursor.ret_type()) + .ok_or(ParseError::Continue)? + } else { + ty.ret_type().ok_or(ParseError::Continue)? + }; + + let ret = if is_constructor && ctx.is_target_wasm32() { + // Constructors in Clang wasm32 target return a pointer to the object + // being constructed. + let void = Item::builtin_type(TypeKind::Void, false, ctx); + Item::builtin_type(TypeKind::Pointer(void), false, ctx) + } else { + Item::from_ty_or_ref(ty_ret_type, cursor, None, ctx) + }; + + // Clang plays with us at "find the calling convention", see #549 and + // co. This seems to be a better fix than that commit. + let mut call_conv = ty.call_conv(); + if let Some(ty) = cursor.cur_type().canonical_type().pointee_type() { + let cursor_call_conv = ty.call_conv(); + if cursor_call_conv != CXCallingConv_Invalid { + call_conv = cursor_call_conv; + } + } + + let abi = get_abi(call_conv); + + if abi.is_unknown() { + warn!("Unknown calling convention: {call_conv:?}"); + } + + Ok(Self { + name: spelling, + return_type: ret, + argument_types: args, + is_variadic: ty.is_variadic(), + is_divergent, + must_use, + abi, + }) + } + + /// Get this function signature's return type. + pub(crate) fn return_type(&self) -> TypeId { + self.return_type + } + + /// Get this function signature's argument (name, type) pairs. + pub(crate) fn argument_types(&self) -> &[(Option, TypeId)] { + &self.argument_types + } + + /// Get this function signature's ABI. + pub(crate) fn abi( + &self, + ctx: &BindgenContext, + name: Option<&str>, + ) -> crate::codegen::error::Result { + // FIXME (pvdrz): Try to do this check lazily instead. Maybe store the ABI inside `ctx` + // instead?. + let abi = if let Some(name) = name { + if let Some((abi, _)) = ctx + .options() + .abi_overrides + .iter() + .find(|(_, regex_set)| regex_set.matches(name)) + { + ClangAbi::Known(*abi) + } else { + self.abi + } + } else if let Some((abi, _)) = ctx + .options() + .abi_overrides + .iter() + .find(|(_, regex_set)| regex_set.matches(&self.name)) + { + ClangAbi::Known(*abi) + } else { + self.abi + }; + + match abi { + ClangAbi::Known(Abi::ThisCall) + if !ctx.options().rust_features().thiscall_abi => + { + Err(crate::codegen::error::Error::UnsupportedAbi("thiscall")) + } + ClangAbi::Known(Abi::Vectorcall) + if !ctx.options().rust_features().vectorcall_abi => + { + Err(crate::codegen::error::Error::UnsupportedAbi("vectorcall")) + } + ClangAbi::Known(Abi::CUnwind) + if !ctx.options().rust_features().c_unwind_abi => + { + Err(crate::codegen::error::Error::UnsupportedAbi("C-unwind")) + } + ClangAbi::Known(Abi::EfiApi) + if !ctx.options().rust_features().abi_efiapi => + { + Err(crate::codegen::error::Error::UnsupportedAbi("efiapi")) + } + ClangAbi::Known(Abi::Win64) if self.is_variadic() => { + Err(crate::codegen::error::Error::UnsupportedAbi("Win64")) + } + abi => Ok(abi), + } + } + + /// Is this function signature variadic? + pub(crate) fn is_variadic(&self) -> bool { + // Clang reports some functions as variadic when they *might* be + // variadic. We do the argument check because rust doesn't codegen well + // variadic functions without an initial argument. + self.is_variadic && !self.argument_types.is_empty() + } + + /// Must this function's return value be used? + pub(crate) fn must_use(&self) -> bool { + self.must_use + } + + /// Are function pointers with this signature able to derive Rust traits? + /// Rust only supports deriving traits for function pointers with a limited + /// number of parameters and a couple ABIs. + /// + /// For more details, see: + /// + /// * , + /// * , + /// * and + pub(crate) fn function_pointers_can_derive(&self) -> bool { + if self.argument_types.len() > RUST_DERIVE_FUNPTR_LIMIT { + return false; + } + + matches!(self.abi, ClangAbi::Known(Abi::C) | ClangAbi::Unknown(..)) + } + + /// Whether this function has attributes marking it as divergent. + pub(crate) fn is_divergent(&self) -> bool { + self.is_divergent + } +} + +impl ClangSubItemParser for Function { + fn parse( + cursor: clang::Cursor, + context: &mut BindgenContext, + ) -> Result, ParseError> { + use clang_sys::*; + + let kind = match FunctionKind::from_cursor(&cursor) { + None => return Err(ParseError::Continue), + Some(k) => k, + }; + + debug!("Function::parse({cursor:?}, {:?})", cursor.cur_type()); + let visibility = cursor.visibility(); + if visibility != CXVisibility_Default { + return Err(ParseError::Continue); + } + if cursor.access_specifier() == CX_CXXPrivate && + !context.options().generate_private_functions + { + return Err(ParseError::Continue); + } + + let linkage = cursor.linkage(); + let linkage = match linkage { + CXLinkage_External | CXLinkage_UniqueExternal => Linkage::External, + CXLinkage_Internal => Linkage::Internal, + _ => return Err(ParseError::Continue), + }; + + if cursor.is_inlined_function() || + cursor.definition().is_some_and(|x| x.is_inlined_function()) + { + if !context.options().generate_inline_functions && + !context.options().wrap_static_fns + { + return Err(ParseError::Continue); + } + + if cursor.is_deleted_function() && + !context.options().generate_deleted_functions + { + return Err(ParseError::Continue); + } + + // We cannot handle `inline` functions that are not `static`. + if context.options().wrap_static_fns && + cursor.is_inlined_function() && + matches!(linkage, Linkage::External) + { + return Err(ParseError::Continue); + } + } + + // Grab the signature using Item::from_ty. + let sig = Item::from_ty(&cursor.cur_type(), cursor, None, context)?; + + let mut name = cursor.spelling(); + assert!(!name.is_empty(), "Empty function name?"); + + if cursor.kind() == CXCursor_Destructor { + // Remove the leading `~`. The alternative to this is special-casing + // code-generation for destructor functions, which seems less than + // ideal. + if name.starts_with('~') { + name.remove(0); + } + + // Add a suffix to avoid colliding with constructors. This would be + // technically fine (since we handle duplicated functions/methods), + // but seems easy enough to handle it here. + name.push_str("_destructor"); + } + if let Some(nm) = context.options().last_callback(|callbacks| { + callbacks.generated_name_override(ItemInfo { + name: name.as_str(), + kind: ItemKind::Function, + }) + }) { + name = nm; + } + assert!(!name.is_empty(), "Empty function name."); + + let mangled_name = cursor_mangling(context, &cursor); + + let link_name = context.options().last_callback(|callbacks| { + callbacks.generated_link_name_override(ItemInfo { + name: name.as_str(), + kind: ItemKind::Function, + }) + }); + + let function = Self::new( + name.clone(), + mangled_name, + link_name, + sig, + kind, + linkage, + ); + + Ok(ParseResult::New(function, Some(cursor))) + } +} + +impl Trace for FunctionSig { + type Extra = (); + + fn trace(&self, _: &BindgenContext, tracer: &mut T, _: &()) + where + T: Tracer, + { + tracer.visit_kind(self.return_type().into(), EdgeKind::FunctionReturn); + + for &(_, ty) in self.argument_types() { + tracer.visit_kind(ty.into(), EdgeKind::FunctionParameter); + } + } +} diff --git a/vendor/bindgen/ir/int.rs b/vendor/bindgen/ir/int.rs new file mode 100644 index 00000000000000..ed18a999492ba5 --- /dev/null +++ b/vendor/bindgen/ir/int.rs @@ -0,0 +1,128 @@ +//! Intermediate representation for integral types. + +/// Which integral type are we dealing with? +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub enum IntKind { + /// A `bool`. + Bool, + + /// A `signed char`. + SChar, + + /// An `unsigned char`. + UChar, + + /// A `wchar_t`. + WChar, + + /// A platform-dependent `char` type, with the signedness support. + Char { + /// Whether the char is signed for the target platform. + is_signed: bool, + }, + + /// A `short`. + Short, + + /// An `unsigned short`. + UShort, + + /// An `int`. + Int, + + /// An `unsigned int`. + UInt, + + /// A `long`. + Long, + + /// An `unsigned long`. + ULong, + + /// A `long long`. + LongLong, + + /// An `unsigned long long`. + ULongLong, + + /// A 8-bit signed integer. + I8, + + /// A 8-bit unsigned integer. + U8, + + /// A 16-bit signed integer. + I16, + + /// A 16-bit integer, used only for enum size representation. + U16, + + /// The C++ type `char16_t`, which is its own type (unlike in C). + Char16, + + /// A 32-bit signed integer. + I32, + + /// A 32-bit unsigned integer. + U32, + + /// A 64-bit signed integer. + I64, + + /// A 64-bit unsigned integer. + U64, + + /// An `int128_t` + I128, + + /// A `uint128_t`. + U128, + + /// A custom integer type, used to allow custom macro types depending on + /// range. + Custom { + /// The name of the type, which would be used without modification. + name: &'static str, + /// Whether the type is signed or not. + is_signed: bool, + }, +} + +impl IntKind { + /// Is this integral type signed? + pub(crate) fn is_signed(&self) -> bool { + use self::IntKind::*; + match *self { + // TODO(emilio): wchar_t can in theory be signed, but we have no way + // to know whether it is or not right now (unlike char, there's no + // WChar_S / WChar_U). + Bool | UChar | UShort | UInt | ULong | ULongLong | U8 | U16 | + Char16 | WChar | U32 | U64 | U128 => false, + + SChar | Short | Int | Long | LongLong | I8 | I16 | I32 | I64 | + I128 => true, + + Char { is_signed } | Custom { is_signed, .. } => is_signed, + } + } + + /// If this type has a known size, return it (in bytes). This is to + /// alleviate libclang sometimes not giving us a layout (like in the case + /// when an enum is defined inside a class with template parameters). + pub(crate) fn known_size(&self) -> Option { + use self::IntKind::*; + Some(match *self { + Bool | UChar | SChar | U8 | I8 | Char { .. } => 1, + U16 | I16 | Char16 => 2, + U32 | I32 => 4, + U64 | I64 => 8, + I128 | U128 => 16, + _ => return None, + }) + } + + /// Whether this type's signedness matches the value. + pub(crate) fn signedness_matches(&self, val: i64) -> bool { + val >= 0 || self.is_signed() + } +} diff --git a/vendor/bindgen/ir/item.rs b/vendor/bindgen/ir/item.rs new file mode 100644 index 00000000000000..d38879f390c9f6 --- /dev/null +++ b/vendor/bindgen/ir/item.rs @@ -0,0 +1,1994 @@ +//! Bindgen's core intermediate representation type. + +use super::super::codegen::{EnumVariation, CONSTIFIED_ENUM_MODULE_REPR_NAME}; +use super::analysis::{HasVtable, HasVtableResult, Sizedness, SizednessResult}; +use super::annotations::Annotations; +use super::comp::{CompKind, MethodKind}; +use super::context::{BindgenContext, ItemId, PartialType, TypeId}; +use super::derive::{ + CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq, + CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd, +}; +use super::dot::DotAttributes; +use super::function::{Function, FunctionKind}; +use super::item_kind::ItemKind; +use super::layout::Opaque; +use super::module::Module; +use super::template::{AsTemplateParam, TemplateParameters}; +use super::traversal::{EdgeKind, Trace, Tracer}; +use super::ty::{Type, TypeKind}; +use crate::callbacks::ItemInfo; +use crate::clang; +use crate::parse::{ClangSubItemParser, ParseError, ParseResult}; + +use std::cell::{Cell, OnceCell}; +use std::collections::BTreeSet; +use std::fmt::Write; +use std::io; +use std::iter; +use std::sync::OnceLock; + +/// A trait to get the canonical name from an item. +/// +/// This is the trait that will eventually isolate all the logic related to name +/// mangling and that kind of stuff. +/// +/// This assumes no nested paths, at some point I'll have to make it a more +/// complex thing. +/// +/// This name is required to be safe for Rust, that is, is not expected to +/// return any rust keyword from here. +pub(crate) trait ItemCanonicalName { + /// Get the canonical name for this item. + fn canonical_name(&self, ctx: &BindgenContext) -> String; +} + +/// The same, but specifies the path that needs to be followed to reach an item. +/// +/// To contrast with `canonical_name`, here's an example: +/// +/// ```c++ +/// namespace foo { +/// const BAR = 3; +/// } +/// ``` +/// +/// For bar, the canonical path is `vec!["foo", "BAR"]`, while the canonical +/// name is just `"BAR"`. +pub(crate) trait ItemCanonicalPath { + /// Get the namespace-aware canonical path for this item. This means that if + /// namespaces are disabled, you'll get a single item, and otherwise you get + /// the whole path. + fn namespace_aware_canonical_path( + &self, + ctx: &BindgenContext, + ) -> Vec; + + /// Get the canonical path for this item. + fn canonical_path(&self, ctx: &BindgenContext) -> Vec; +} + +/// A trait for determining if some IR thing is opaque or not. +pub(crate) trait IsOpaque { + /// Extra context the IR thing needs to determine if it is opaque or not. + type Extra; + + /// Returns `true` if the thing is opaque, and `false` otherwise. + /// + /// May only be called when `ctx` is in the codegen phase. + fn is_opaque(&self, ctx: &BindgenContext, extra: &Self::Extra) -> bool; +} + +/// A trait for determining if some IR thing has type parameter in array or not. +pub(crate) trait HasTypeParamInArray { + /// Returns `true` if the thing has Array, and `false` otherwise. + fn has_type_param_in_array(&self, ctx: &BindgenContext) -> bool; +} + +/// A trait for iterating over an item and its parents and up its ancestor chain +/// up to (but not including) the implicit root module. +pub(crate) trait ItemAncestors { + /// Get an iterable over this item's ancestors. + fn ancestors<'a>(&self, ctx: &'a BindgenContext) -> ItemAncestorsIter<'a>; +} + +#[cfg(feature = "__testing_only_extra_assertions")] +type DebugOnlyItemSet = ItemSet; + +#[cfg(not(feature = "__testing_only_extra_assertions"))] +struct DebugOnlyItemSet; + +#[cfg(not(feature = "__testing_only_extra_assertions"))] +impl DebugOnlyItemSet { + fn new() -> Self { + DebugOnlyItemSet + } + + #[allow(clippy::trivially_copy_pass_by_ref)] + fn contains(&self, _id: &ItemId) -> bool { + false + } + + fn insert(&mut self, _id: ItemId) {} +} + +/// An iterator over an item and its ancestors. +pub(crate) struct ItemAncestorsIter<'a> { + item: ItemId, + ctx: &'a BindgenContext, + seen: DebugOnlyItemSet, +} + +impl<'a> ItemAncestorsIter<'a> { + fn new>(ctx: &'a BindgenContext, id: Id) -> Self { + ItemAncestorsIter { + item: id.into(), + ctx, + seen: DebugOnlyItemSet::new(), + } + } +} + +impl Iterator for ItemAncestorsIter<'_> { + type Item = ItemId; + + fn next(&mut self) -> Option { + let item = self.ctx.resolve_item(self.item); + + if item.parent_id() == self.item { + None + } else { + self.item = item.parent_id(); + + extra_assert!(!self.seen.contains(&item.id())); + self.seen.insert(item.id()); + + Some(item.id()) + } + } +} + +impl AsTemplateParam for T +where + T: Copy + Into, +{ + type Extra = (); + + fn as_template_param( + &self, + ctx: &BindgenContext, + _: &(), + ) -> Option { + ctx.resolve_item((*self).into()).as_template_param(ctx, &()) + } +} + +impl AsTemplateParam for Item { + type Extra = (); + + fn as_template_param( + &self, + ctx: &BindgenContext, + _: &(), + ) -> Option { + self.kind.as_template_param(ctx, self) + } +} + +impl AsTemplateParam for ItemKind { + type Extra = Item; + + fn as_template_param( + &self, + ctx: &BindgenContext, + item: &Item, + ) -> Option { + match *self { + ItemKind::Type(ref ty) => ty.as_template_param(ctx, item), + ItemKind::Module(..) | + ItemKind::Function(..) | + ItemKind::Var(..) => None, + } + } +} + +impl ItemCanonicalName for T +where + T: Copy + Into, +{ + fn canonical_name(&self, ctx: &BindgenContext) -> String { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + ctx.resolve_item(*self).canonical_name(ctx) + } +} + +impl ItemCanonicalPath for T +where + T: Copy + Into, +{ + fn namespace_aware_canonical_path( + &self, + ctx: &BindgenContext, + ) -> Vec { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + ctx.resolve_item(*self).namespace_aware_canonical_path(ctx) + } + + fn canonical_path(&self, ctx: &BindgenContext) -> Vec { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + ctx.resolve_item(*self).canonical_path(ctx) + } +} + +impl ItemAncestors for T +where + T: Copy + Into, +{ + fn ancestors<'a>(&self, ctx: &'a BindgenContext) -> ItemAncestorsIter<'a> { + ItemAncestorsIter::new(ctx, *self) + } +} + +impl ItemAncestors for Item { + fn ancestors<'a>(&self, ctx: &'a BindgenContext) -> ItemAncestorsIter<'a> { + self.id().ancestors(ctx) + } +} + +impl Trace for Id +where + Id: Copy + Into, +{ + type Extra = (); + + fn trace(&self, ctx: &BindgenContext, tracer: &mut T, extra: &()) + where + T: Tracer, + { + ctx.resolve_item(*self).trace(ctx, tracer, extra); + } +} + +impl Trace for Item { + type Extra = (); + + fn trace(&self, ctx: &BindgenContext, tracer: &mut T, _extra: &()) + where + T: Tracer, + { + // Even if this item is blocklisted/hidden, we want to trace it. It is + // traversal iterators' consumers' responsibility to filter items as + // needed. Generally, this filtering happens in the implementation of + // `Iterator` for `allowlistedItems`. Fully tracing blocklisted items is + // necessary for things like the template parameter usage analysis to + // function correctly. + + match *self.kind() { + ItemKind::Type(ref ty) => { + // There are some types, like resolved type references, where we + // don't want to stop collecting types even though they may be + // opaque. + if ty.should_be_traced_unconditionally() || + !self.is_opaque(ctx, &()) + { + ty.trace(ctx, tracer, self); + } + } + ItemKind::Function(ref fun) => { + // Just the same way, it has not real meaning for a function to + // be opaque, so we trace across it. + tracer.visit(fun.signature().into()); + } + ItemKind::Var(ref var) => { + tracer.visit_kind(var.ty().into(), EdgeKind::VarType); + } + ItemKind::Module(_) => { + // Module -> children edges are "weak", and we do not want to + // trace them. If we did, then allowlisting wouldn't work as + // expected: everything in every module would end up + // allowlisted. + // + // TODO: make a new edge kind for module -> children edges and + // filter them during allowlisting traversals. + } + } + } +} + +impl CanDeriveDebug for Item { + fn can_derive_debug(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_debug(ctx) + } +} + +impl CanDeriveDefault for Item { + fn can_derive_default(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_default(ctx) + } +} + +impl CanDeriveCopy for Item { + fn can_derive_copy(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_copy(ctx) + } +} + +impl CanDeriveHash for Item { + fn can_derive_hash(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_hash(ctx) + } +} + +impl CanDerivePartialOrd for Item { + fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_partialord(ctx) + } +} + +impl CanDerivePartialEq for Item { + fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_partialeq(ctx) + } +} + +impl CanDeriveEq for Item { + fn can_derive_eq(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_eq(ctx) + } +} + +impl CanDeriveOrd for Item { + fn can_derive_ord(&self, ctx: &BindgenContext) -> bool { + self.id().can_derive_ord(ctx) + } +} + +/// An item is the base of the bindgen representation, it can be either a +/// module, a type, a function, or a variable (see `ItemKind` for more +/// information). +/// +/// Items refer to each other by `ItemId`. Every item has its parent's +/// ID. Depending on the kind of item this is, it may also refer to other items, +/// such as a compound type item referring to other types. Collectively, these +/// references form a graph. +/// +/// The entry-point to this graph is the "root module": a meta-item used to hold +/// all top-level items. +/// +/// An item may have a comment, and annotations (see the `annotations` module). +/// +/// Note that even though we parse all the types of annotations in comments, not +/// all of them apply to every item. Those rules are described in the +/// `annotations` module. +#[derive(Debug)] +pub(crate) struct Item { + /// This item's ID. + id: ItemId, + + /// The item's local ID, unique only amongst its siblings. Only used for + /// anonymous items. + /// + /// Lazily initialized in `local_id()`. + /// + /// Note that only structs, unions, and enums get a local type ID. In any + /// case this is an implementation detail. + local_id: OnceCell, + + /// The next local ID to use for a child or template instantiation. + next_child_local_id: Cell, + + /// A cached copy of the canonical name, as returned by `canonical_name`. + /// + /// This is a fairly used operation during codegen so this makes bindgen + /// considerably faster in those cases. + canonical_name: OnceCell, + + /// The path to use for allowlisting and other name-based checks, as + /// returned by `path_for_allowlisting`, lazily constructed. + path_for_allowlisting: OnceCell>, + + /// A doc comment over the item, if any. + comment: Option, + /// Annotations extracted from the doc comment, or the default ones + /// otherwise. + annotations: Annotations, + /// An item's parent ID. This will most likely be a class where this item + /// was declared, or a module, etc. + /// + /// All the items have a parent, except the root module, in which case the + /// parent ID is its own ID. + parent_id: ItemId, + /// The item kind. + kind: ItemKind, + /// The source location of the item. + location: Option, +} + +impl AsRef for Item { + fn as_ref(&self) -> &ItemId { + &self.id + } +} + +impl Item { + /// Construct a new `Item`. + pub(crate) fn new( + id: ItemId, + comment: Option, + annotations: Option, + parent_id: ItemId, + kind: ItemKind, + location: Option, + ) -> Self { + debug_assert!(id != parent_id || kind.is_module()); + Item { + id, + local_id: OnceCell::new(), + next_child_local_id: Cell::new(1), + canonical_name: OnceCell::new(), + path_for_allowlisting: OnceCell::new(), + parent_id, + comment, + annotations: annotations.unwrap_or_default(), + kind, + location, + } + } + + /// Construct a new opaque item type. + pub(crate) fn new_opaque_type( + with_id: ItemId, + ty: &clang::Type, + ctx: &mut BindgenContext, + ) -> TypeId { + let location = ty.declaration().location(); + let ty = Opaque::from_clang_ty(ty, ctx); + let kind = ItemKind::Type(ty); + let parent = ctx.root_module().into(); + ctx.add_item( + Item::new(with_id, None, None, parent, kind, Some(location)), + None, + None, + ); + with_id.as_type_id_unchecked() + } + + /// Get this `Item`'s identifier. + pub(crate) fn id(&self) -> ItemId { + self.id + } + + /// Get this `Item`'s parent's identifier. + /// + /// For the root module, the parent's ID is its own ID. + pub(crate) fn parent_id(&self) -> ItemId { + self.parent_id + } + + /// Set this item's parent ID. + /// + /// This is only used so replacements get generated in the proper module. + pub(crate) fn set_parent_for_replacement>( + &mut self, + id: Id, + ) { + self.parent_id = id.into(); + } + + /// Returns the depth this item is indented to. + /// + /// FIXME(emilio): This may need fixes for the enums within modules stuff. + pub(crate) fn codegen_depth(&self, ctx: &BindgenContext) -> usize { + if !ctx.options().enable_cxx_namespaces { + return 0; + } + + self.ancestors(ctx) + .filter(|id| { + ctx.resolve_item(*id).as_module().is_some_and(|module| { + !module.is_inline() || + ctx.options().conservative_inline_namespaces + }) + }) + .count() + + 1 + } + + /// Get this `Item`'s comment, if it has any, already preprocessed and with + /// the right indentation. + pub(crate) fn comment(&self, ctx: &BindgenContext) -> Option { + if !ctx.options().generate_comments { + return None; + } + + self.comment + .as_ref() + .map(|comment| ctx.options().process_comment(comment)) + } + + /// What kind of item is this? + pub(crate) fn kind(&self) -> &ItemKind { + &self.kind + } + + /// Get a mutable reference to this item's kind. + pub(crate) fn kind_mut(&mut self) -> &mut ItemKind { + &mut self.kind + } + + /// Where in the source is this item located? + pub(crate) fn location(&self) -> Option<&clang::SourceLocation> { + self.location.as_ref() + } + + /// Get an identifier that differentiates this item from its siblings. + /// + /// This should stay relatively stable in the face of code motion outside or + /// below this item's lexical scope, meaning that this can be useful for + /// generating relatively stable identifiers within a scope. + pub(crate) fn local_id(&self, ctx: &BindgenContext) -> usize { + *self.local_id.get_or_init(|| { + let parent = ctx.resolve_item(self.parent_id); + parent.next_child_local_id() + }) + } + + /// Get an identifier that differentiates a child of this item of other + /// related items. + /// + /// This is currently used for anonymous items, and template instantiation + /// tests, in both cases in order to reduce noise when system headers are at + /// place. + pub(crate) fn next_child_local_id(&self) -> usize { + let local_id = self.next_child_local_id.get(); + self.next_child_local_id.set(local_id + 1); + local_id + } + + /// Returns whether this item is a top-level item, from the point of view of + /// bindgen. + /// + /// This point of view changes depending on whether namespaces are enabled + /// or not. That way, in the following example: + /// + /// ```c++ + /// namespace foo { + /// static int var; + /// } + /// ``` + /// + /// `var` would be a toplevel item if namespaces are disabled, but won't if + /// they aren't. + /// + /// This function is used to determine when the codegen phase should call + /// `codegen` on an item, since any item that is not top-level will be + /// generated by its parent. + pub(crate) fn is_toplevel(&self, ctx: &BindgenContext) -> bool { + // FIXME: Workaround for some types falling behind when parsing weird + // stl classes, for example. + if ctx.options().enable_cxx_namespaces && + self.kind().is_module() && + self.id() != ctx.root_module() + { + return false; + } + + let mut parent = self.parent_id; + loop { + let Some(parent_item) = ctx.resolve_item_fallible(parent) else { + return false; + }; + + if parent_item.id() == ctx.root_module() { + return true; + } else if ctx.options().enable_cxx_namespaces || + !parent_item.kind().is_module() + { + return false; + } + + parent = parent_item.parent_id(); + } + } + + /// Get a reference to this item's underlying `Type`. Panic if this is some + /// other kind of item. + pub(crate) fn expect_type(&self) -> &Type { + self.kind().expect_type() + } + + /// Get a reference to this item's underlying `Type`, or `None` if this is + /// some other kind of item. + pub(crate) fn as_type(&self) -> Option<&Type> { + self.kind().as_type() + } + + /// Get a reference to this item's underlying `Function`. Panic if this is + /// some other kind of item. + pub(crate) fn expect_function(&self) -> &Function { + self.kind().expect_function() + } + + /// Is this item a module? + pub(crate) fn is_module(&self) -> bool { + matches!(self.kind, ItemKind::Module(..)) + } + + /// Get this item's annotations. + pub(crate) fn annotations(&self) -> &Annotations { + &self.annotations + } + + /// Whether this item should be blocklisted. + /// + /// This may be due to either annotations or to other kind of configuration. + pub(crate) fn is_blocklisted(&self, ctx: &BindgenContext) -> bool { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + if self.annotations.hide() { + return true; + } + + if !ctx.options().blocklisted_files.is_empty() { + if let Some(location) = &self.location { + let (file, _, _, _) = location.location(); + if let Some(filename) = file.name() { + if ctx.options().blocklisted_files.matches(filename) { + return true; + } + } + } + } + + let path = self.path_for_allowlisting(ctx); + let name = path[1..].join("::"); + ctx.options().blocklisted_items.matches(&name) || + match self.kind { + ItemKind::Type(..) => { + ctx.options().blocklisted_types.matches(&name) || + ctx.is_replaced_type(path, self.id) + } + ItemKind::Function(..) => { + ctx.options().blocklisted_functions.matches(&name) + } + ItemKind::Var(..) => { + ctx.options().blocklisted_vars.matches(&name) + } + // TODO: Add namespace blocklisting? + ItemKind::Module(..) => false, + } + } + + /// Take out item `NameOptions` + pub(crate) fn name<'a>( + &'a self, + ctx: &'a BindgenContext, + ) -> NameOptions<'a> { + NameOptions::new(self, ctx) + } + + /// Get the target item ID for name generation. + fn name_target(&self, ctx: &BindgenContext) -> ItemId { + let mut targets_seen = DebugOnlyItemSet::new(); + let mut item = self; + + loop { + extra_assert!(!targets_seen.contains(&item.id())); + targets_seen.insert(item.id()); + + if self.annotations().use_instead_of().is_some() { + return self.id(); + } + + match *item.kind() { + ItemKind::Type(ref ty) => match *ty.kind() { + TypeKind::ResolvedTypeRef(inner) => { + item = ctx.resolve_item(inner); + } + TypeKind::TemplateInstantiation(ref inst) => { + item = ctx.resolve_item(inst.template_definition()); + } + _ => return item.id(), + }, + _ => return item.id(), + } + } + } + + /// Create a fully disambiguated name for an item, including template + /// parameters if it is a type + pub(crate) fn full_disambiguated_name( + &self, + ctx: &BindgenContext, + ) -> String { + let mut s = String::new(); + let level = 0; + self.push_disambiguated_name(ctx, &mut s, level); + s + } + + /// Helper function for `full_disambiguated_name` + fn push_disambiguated_name( + &self, + ctx: &BindgenContext, + to: &mut String, + level: u8, + ) { + to.push_str(&self.canonical_name(ctx)); + if let ItemKind::Type(ref ty) = *self.kind() { + if let TypeKind::TemplateInstantiation(ref inst) = *ty.kind() { + let _ = write!(to, "_open{level}_"); + for arg in inst.template_arguments() { + arg.into_resolver() + .through_type_refs() + .resolve(ctx) + .push_disambiguated_name(ctx, to, level + 1); + to.push('_'); + } + let _ = write!(to, "close{level}"); + } + } + } + + /// Get this function item's name, or `None` if this item is not a function. + fn func_name(&self) -> Option<&str> { + match *self.kind() { + ItemKind::Function(ref func) => Some(func.name()), + _ => None, + } + } + + /// Get the overload index for this method. If this is not a method, return + /// `None`. + fn overload_index(&self, ctx: &BindgenContext) -> Option { + self.func_name().and_then(|func_name| { + let parent = ctx.resolve_item(self.parent_id()); + if let ItemKind::Type(ref ty) = *parent.kind() { + if let TypeKind::Comp(ref ci) = *ty.kind() { + // All the constructors have the same name, so no need to + // resolve and check. + return ci + .constructors() + .iter() + .position(|c| *c == self.id()) + .or_else(|| { + ci.methods() + .iter() + .filter(|m| { + let item = ctx.resolve_item(m.signature()); + let func = item.expect_function(); + func.name() == func_name + }) + .position(|m| m.signature() == self.id()) + }); + } + } + + None + }) + } + + /// Get this item's base name (aka non-namespaced name). + fn base_name(&self, ctx: &BindgenContext) -> String { + if let Some(path) = self.annotations().use_instead_of() { + return path.last().unwrap().clone(); + } + + match *self.kind() { + ItemKind::Var(ref var) => var.name().to_owned(), + ItemKind::Module(ref module) => module.name().map_or_else( + || format!("_bindgen_mod_{}", self.exposed_id(ctx)), + ToOwned::to_owned, + ), + ItemKind::Type(ref ty) => ty.sanitized_name(ctx).map_or_else( + || format!("_bindgen_ty_{}", self.exposed_id(ctx)), + Into::into, + ), + ItemKind::Function(ref fun) => { + let mut name = fun.name().to_owned(); + + if let Some(idx) = self.overload_index(ctx) { + if idx > 0 { + write!(&mut name, "{idx}").unwrap(); + } + } + + name + } + } + } + + fn is_anon(&self) -> bool { + match self.kind() { + ItemKind::Module(module) => module.name().is_none(), + ItemKind::Type(ty) => ty.name().is_none(), + ItemKind::Function(_) => false, + ItemKind::Var(_) => false, + } + } + + /// Get the canonical name without taking into account the replaces + /// annotation. + /// + /// This is the base logic used to implement hiding and replacing via + /// annotations, and also to implement proper name mangling. + /// + /// The idea is that each generated type in the same "level" (read: module + /// or namespace) has a unique canonical name. + /// + /// This name should be derived from the immutable state contained in the + /// type and the parent chain, since it should be consistent. + /// + /// If `BindgenOptions::disable_nested_struct_naming` is true then returned + /// name is the inner most non-anonymous name plus all the anonymous base names + /// that follows. + pub(crate) fn real_canonical_name( + &self, + ctx: &BindgenContext, + opt: &NameOptions, + ) -> String { + let target = ctx.resolve_item(self.name_target(ctx)); + + // Short-circuit if the target has an override, and just use that. + if let Some(path) = target.annotations.use_instead_of() { + if ctx.options().enable_cxx_namespaces { + return path.last().unwrap().clone(); + } + return path.join("_"); + } + + let base_name = target.base_name(ctx); + + // Named template type arguments are never namespaced, and never + // mangled. + if target.is_template_param(ctx, &()) { + return base_name; + } + + // Ancestors' ID iter + let mut ids_iter = target + .parent_id() + .ancestors(ctx) + .filter(|id| *id != ctx.root_module()) + .take_while(|id| { + // Stop iterating ancestors once we reach a non-inline namespace + // when opt.within_namespaces is set. + !opt.within_namespaces || !ctx.resolve_item(*id).is_module() + }) + .filter(|id| { + if !ctx.options().conservative_inline_namespaces { + if let ItemKind::Module(ref module) = + *ctx.resolve_item(*id).kind() + { + return !module.is_inline(); + } + } + + true + }); + + let ids: Vec<_> = if ctx.options().disable_nested_struct_naming { + let mut ids = Vec::new(); + + // If target is anonymous we need find its first named ancestor. + if target.is_anon() { + for id in ids_iter.by_ref() { + ids.push(id); + + if !ctx.resolve_item(id).is_anon() { + break; + } + } + } + + ids + } else { + ids_iter.collect() + }; + + // Concatenate this item's ancestors' names together. + let mut names: Vec<_> = ids + .into_iter() + .map(|id| { + let item = ctx.resolve_item(id); + let target = ctx.resolve_item(item.name_target(ctx)); + target.base_name(ctx) + }) + .filter(|name| !name.is_empty()) + .collect(); + + names.reverse(); + + if !base_name.is_empty() { + names.push(base_name); + } + + if ctx.options().c_naming { + if let Some(prefix) = self.c_naming_prefix() { + names.insert(0, prefix.to_string()); + } + } + + let name = names.join("_"); + + let name = if opt.user_mangled == UserMangled::Yes { + let item_info = ItemInfo { + name: &name, + kind: match self.kind() { + ItemKind::Module(..) => crate::callbacks::ItemKind::Module, + ItemKind::Type(..) => crate::callbacks::ItemKind::Type, + ItemKind::Function(..) => { + crate::callbacks::ItemKind::Function + } + ItemKind::Var(..) => crate::callbacks::ItemKind::Var, + }, + }; + ctx.options() + .last_callback(|callbacks| callbacks.item_name(item_info)) + .unwrap_or(name) + } else { + name + }; + + ctx.rust_mangle(&name).into_owned() + } + + /// The exposed ID that represents an unique ID among the siblings of a + /// given item. + pub(crate) fn exposed_id(&self, ctx: &BindgenContext) -> String { + // Only use local ids for enums, classes, structs and union types. All + // other items use their global ID. + let ty_kind = self.kind().as_type().map(|t| t.kind()); + if let Some( + TypeKind::Comp(..) | + TypeKind::TemplateInstantiation(..) | + TypeKind::Enum(..), + ) = ty_kind + { + return self.local_id(ctx).to_string(); + } + + // Note that this `id_` prefix prevents (really unlikely) collisions + // between the global ID and the local ID of an item with the same + // parent. + format!("id_{}", self.id().as_usize()) + } + + /// Get a reference to this item's `Module`, or `None` if this is not a + /// `Module` item. + pub(crate) fn as_module(&self) -> Option<&Module> { + match self.kind { + ItemKind::Module(ref module) => Some(module), + _ => None, + } + } + + /// Get a mutable reference to this item's `Module`, or `None` if this is + /// not a `Module` item. + pub(crate) fn as_module_mut(&mut self) -> Option<&mut Module> { + match self.kind { + ItemKind::Module(ref mut module) => Some(module), + _ => None, + } + } + + /// Returns whether the item is a constified module enum + fn is_constified_enum_module(&self, ctx: &BindgenContext) -> bool { + // Do not jump through aliases, except for aliases that point to a type + // with the same name, since we dont generate coe for them. + let item = self.id.into_resolver().through_type_refs().resolve(ctx); + let ItemKind::Type(ref type_) = *item.kind() else { + return false; + }; + + match *type_.kind() { + TypeKind::Enum(ref enum_) => { + enum_.computed_enum_variation(ctx, self) == + EnumVariation::ModuleConsts + } + TypeKind::Alias(inner_id) => { + // TODO(emilio): Make this "hop through type aliases that aren't + // really generated" an option in `ItemResolver`? + let inner_item = ctx.resolve_item(inner_id); + let name = item.canonical_name(ctx); + + if inner_item.canonical_name(ctx) == name { + inner_item.is_constified_enum_module(ctx) + } else { + false + } + } + _ => false, + } + } + + /// Is this item of a kind that is enabled for code generation? + pub(crate) fn is_enabled_for_codegen(&self, ctx: &BindgenContext) -> bool { + let cc = &ctx.options().codegen_config; + match *self.kind() { + ItemKind::Module(..) => true, + ItemKind::Var(_) => cc.vars(), + ItemKind::Type(_) => cc.types(), + ItemKind::Function(ref f) => match f.kind() { + FunctionKind::Function => cc.functions(), + FunctionKind::Method(MethodKind::Constructor) => { + cc.constructors() + } + FunctionKind::Method( + MethodKind::Destructor | + MethodKind::VirtualDestructor { .. }, + ) => cc.destructors(), + FunctionKind::Method( + MethodKind::Static | + MethodKind::Normal | + MethodKind::Virtual { .. }, + ) => cc.methods(), + }, + } + } + + /// Returns the path we should use for allowlisting / blocklisting, which + /// doesn't include user-mangling. + pub(crate) fn path_for_allowlisting( + &self, + ctx: &BindgenContext, + ) -> &Vec { + self.path_for_allowlisting + .get_or_init(|| self.compute_path(ctx, UserMangled::No)) + } + + fn compute_path( + &self, + ctx: &BindgenContext, + mangled: UserMangled, + ) -> Vec { + if let Some(path) = self.annotations().use_instead_of() { + let mut ret = + vec![ctx.resolve_item(ctx.root_module()).name(ctx).get()]; + ret.extend_from_slice(path); + return ret; + } + + let target = ctx.resolve_item(self.name_target(ctx)); + let mut path: Vec<_> = target + .ancestors(ctx) + .chain(iter::once(ctx.root_module().into())) + .map(|id| ctx.resolve_item(id)) + .filter(|item| { + item.id() == target.id() || + item.as_module().is_some_and(|module| { + !module.is_inline() || + ctx.options().conservative_inline_namespaces + }) + }) + .map(|item| { + ctx.resolve_item(item.name_target(ctx)) + .name(ctx) + .within_namespaces() + .user_mangled(mangled) + .get() + }) + .collect(); + path.reverse(); + path + } + + /// Returns a prefix for the canonical name when C naming is enabled. + fn c_naming_prefix(&self) -> Option<&str> { + let ItemKind::Type(ref ty) = self.kind else { + return None; + }; + + Some(match ty.kind() { + TypeKind::Comp(ref ci) => match ci.kind() { + CompKind::Struct => "struct", + CompKind::Union => "union", + }, + TypeKind::Enum(..) => "enum", + _ => return None, + }) + } + + /// Whether this is a `#[must_use]` type. + pub(crate) fn must_use(&self, ctx: &BindgenContext) -> bool { + self.annotations().must_use_type() || ctx.must_use_type_by_name(self) + } +} + +impl IsOpaque for T +where + T: Copy + Into, +{ + type Extra = (); + + fn is_opaque(&self, ctx: &BindgenContext, _: &()) -> bool { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + ctx.resolve_item((*self).into()).is_opaque(ctx, &()) + } +} + +impl IsOpaque for Item { + type Extra = (); + + fn is_opaque(&self, ctx: &BindgenContext, _: &()) -> bool { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + self.annotations.opaque() || + self.as_type().is_some_and(|ty| ty.is_opaque(ctx, self)) || + ctx.opaque_by_name(self.path_for_allowlisting(ctx)) + } +} + +impl HasVtable for T +where + T: Copy + Into, +{ + fn has_vtable(&self, ctx: &BindgenContext) -> bool { + let id: ItemId = (*self).into(); + id.as_type_id(ctx).is_some_and(|id| { + !matches!(ctx.lookup_has_vtable(id), HasVtableResult::No) + }) + } + + fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool { + let id: ItemId = (*self).into(); + id.as_type_id(ctx).is_some_and(|id| { + matches!(ctx.lookup_has_vtable(id), HasVtableResult::SelfHasVtable) + }) + } +} + +impl HasVtable for Item { + fn has_vtable(&self, ctx: &BindgenContext) -> bool { + self.id().has_vtable(ctx) + } + + fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool { + self.id().has_vtable_ptr(ctx) + } +} + +impl Sizedness for T +where + T: Copy + Into, +{ + fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult { + let id: ItemId = (*self).into(); + id.as_type_id(ctx) + .map_or(SizednessResult::default(), |id| ctx.lookup_sizedness(id)) + } +} + +impl Sizedness for Item { + fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult { + self.id().sizedness(ctx) + } +} + +impl HasTypeParamInArray for T +where + T: Copy + Into, +{ + fn has_type_param_in_array(&self, ctx: &BindgenContext) -> bool { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + ctx.lookup_has_type_param_in_array(*self) + } +} + +impl HasTypeParamInArray for Item { + fn has_type_param_in_array(&self, ctx: &BindgenContext) -> bool { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + ctx.lookup_has_type_param_in_array(self.id()) + } +} + +/// A set of items. +pub(crate) type ItemSet = BTreeSet; + +impl DotAttributes for Item { + fn dot_attributes( + &self, + ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + writeln!( + out, + "{:?} + name{}", + self.id, + self.name(ctx).get() + )?; + + if self.is_opaque(ctx, &()) { + writeln!(out, "opaquetrue")?; + } + + self.kind.dot_attributes(ctx, out) + } +} + +impl TemplateParameters for T +where + T: Copy + Into, +{ + fn self_template_params(&self, ctx: &BindgenContext) -> Vec { + ctx.resolve_item_fallible(*self) + .map_or(vec![], |item| item.self_template_params(ctx)) + } +} + +impl TemplateParameters for Item { + fn self_template_params(&self, ctx: &BindgenContext) -> Vec { + self.kind.self_template_params(ctx) + } +} + +impl TemplateParameters for ItemKind { + fn self_template_params(&self, ctx: &BindgenContext) -> Vec { + match *self { + ItemKind::Type(ref ty) => ty.self_template_params(ctx), + // If we start emitting bindings to explicitly instantiated + // functions, then we'll need to check ItemKind::Function for + // template params. + ItemKind::Function(_) | ItemKind::Module(_) | ItemKind::Var(_) => { + vec![] + } + } + } +} + +// An utility function to handle recursing inside nested types. +fn visit_child( + cur: clang::Cursor, + id: ItemId, + ty: &clang::Type, + parent_id: Option, + ctx: &mut BindgenContext, + result: &mut Result, +) -> clang_sys::CXChildVisitResult { + use clang_sys::*; + if result.is_ok() { + return CXChildVisit_Break; + } + + *result = Item::from_ty_with_id(id, ty, cur, parent_id, ctx); + + match *result { + Ok(..) => CXChildVisit_Break, + Err(ParseError::Recurse) => { + cur.visit(|c| visit_child(c, id, ty, parent_id, ctx, result)); + CXChildVisit_Continue + } + Err(ParseError::Continue) => CXChildVisit_Continue, + } +} + +impl Item { + /// Create a builtin type. + pub(crate) fn builtin_type( + kind: TypeKind, + is_const: bool, + ctx: &mut BindgenContext, + ) -> TypeId { + // Feel free to add more here, I'm just lazy. + match kind { + TypeKind::Void | + TypeKind::Int(..) | + TypeKind::Pointer(..) | + TypeKind::Float(..) => {} + _ => panic!("Unsupported builtin type"), + } + + let ty = Type::new(None, None, kind, is_const); + let id = ctx.next_item_id(); + let module = ctx.root_module().into(); + ctx.add_item( + Item::new(id, None, None, module, ItemKind::Type(ty), None), + None, + None, + ); + id.as_type_id_unchecked() + } + + /// Parse this item from the given Clang cursor. + pub(crate) fn parse( + cursor: clang::Cursor, + parent_id: Option, + ctx: &mut BindgenContext, + ) -> Result { + use crate::ir::var::Var; + use clang_sys::*; + + if !cursor.is_valid() { + return Err(ParseError::Continue); + } + + let comment = cursor.raw_comment(); + let annotations = Annotations::new(&cursor); + + let current_module = ctx.current_module().into(); + let relevant_parent_id = parent_id.unwrap_or(current_module); + + #[allow(clippy::missing_docs_in_private_items)] + macro_rules! try_parse { + ($what:ident) => { + match $what::parse(cursor, ctx) { + Ok(ParseResult::New(item, declaration)) => { + let id = ctx.next_item_id(); + + ctx.add_item( + Item::new( + id, + comment, + annotations, + relevant_parent_id, + ItemKind::$what(item), + Some(cursor.location()), + ), + declaration, + Some(cursor), + ); + return Ok(id); + } + Ok(ParseResult::AlreadyResolved(id)) => { + return Ok(id); + } + Err(ParseError::Recurse) => return Err(ParseError::Recurse), + Err(ParseError::Continue) => {} + } + }; + } + + try_parse!(Module); + + // NOTE: Is extremely important to parse functions and vars **before** + // types. Otherwise we can parse a function declaration as a type + // (which is legal), and lose functions to generate. + // + // In general, I'm not totally confident this split between + // ItemKind::Function and TypeKind::FunctionSig is totally worth it, but + // I guess we can try. + try_parse!(Function); + try_parse!(Var); + + // Types are sort of special, so to avoid parsing template classes + // twice, handle them separately. + { + let definition = cursor.definition(); + let applicable_cursor = definition.unwrap_or(cursor); + + let relevant_parent_id = match definition { + Some(definition) => { + if definition != cursor { + ctx.add_semantic_parent(definition, relevant_parent_id); + return Ok(Item::from_ty_or_ref( + applicable_cursor.cur_type(), + cursor, + parent_id, + ctx, + ) + .into()); + } + ctx.known_semantic_parent(definition) + .or(parent_id) + .unwrap_or_else(|| ctx.current_module().into()) + } + None => relevant_parent_id, + }; + + match Item::from_ty( + &applicable_cursor.cur_type(), + applicable_cursor, + Some(relevant_parent_id), + ctx, + ) { + Ok(ty) => return Ok(ty.into()), + Err(ParseError::Recurse) => return Err(ParseError::Recurse), + Err(ParseError::Continue) => {} + } + } + + match cursor.kind() { + // On Clang 18+, extern "C" is reported accurately as a LinkageSpec. + // Older LLVM treat it as UnexposedDecl. + CXCursor_LinkageSpec | CXCursor_UnexposedDecl => { + Err(ParseError::Recurse) + } + + // We allowlist cursors here known to be unhandled, to prevent being + // too noisy about this. + CXCursor_MacroDefinition | + CXCursor_MacroExpansion | + CXCursor_UsingDeclaration | + CXCursor_UsingDirective | + CXCursor_StaticAssert | + CXCursor_FunctionTemplate => { + debug!("Unhandled cursor kind {:?}: {cursor:?}", cursor.kind()); + Err(ParseError::Continue) + } + + CXCursor_InclusionDirective => { + let file = cursor.get_included_file_name(); + match file { + None => { + warn!("Inclusion of a nameless file in {cursor:?}"); + } + Some(included_file) => { + for cb in &ctx.options().parse_callbacks { + cb.include_file(&included_file); + } + + ctx.add_dep(included_file.into_boxed_str()); + } + } + Err(ParseError::Continue) + } + + _ => { + // ignore toplevel operator overloads + let spelling = cursor.spelling(); + if !spelling.starts_with("operator") { + warn!( + "Unhandled cursor kind {:?}: {cursor:?}", + cursor.kind(), + ); + } + Err(ParseError::Continue) + } + } + } + + /// Parse this item from the given Clang type, or if we haven't resolved all + /// the other items this one depends on, an unresolved reference. + pub(crate) fn from_ty_or_ref( + ty: clang::Type, + location: clang::Cursor, + parent_id: Option, + ctx: &mut BindgenContext, + ) -> TypeId { + let id = ctx.next_item_id(); + Self::from_ty_or_ref_with_id(id, ty, location, parent_id, ctx) + } + + /// Parse a C++ type. If we find a reference to a type that has not been + /// defined yet, use `UnresolvedTypeRef` as a placeholder. + /// + /// This logic is needed to avoid parsing items with the incorrect parent + /// and it's sort of complex to explain, so I'll just point to + /// `tests/headers/typeref.hpp` to see the kind of constructs that forced + /// this. + /// + /// Typerefs are resolved once parsing is completely done, see + /// `BindgenContext::resolve_typerefs`. + pub(crate) fn from_ty_or_ref_with_id( + potential_id: ItemId, + ty: clang::Type, + location: clang::Cursor, + parent_id: Option, + ctx: &mut BindgenContext, + ) -> TypeId { + debug!("from_ty_or_ref_with_id: {potential_id:?} {ty:?}, {location:?}, {parent_id:?}"); + + if ctx.collected_typerefs() { + debug!("refs already collected, resolving directly"); + return Item::from_ty_with_id( + potential_id, + &ty, + location, + parent_id, + ctx, + ) + .unwrap_or_else(|_| Item::new_opaque_type(potential_id, &ty, ctx)); + } + + if let Some(ty) = ctx.builtin_or_resolved_ty( + potential_id, + parent_id, + &ty, + Some(location), + ) { + debug!("{ty:?} already resolved: {location:?}"); + return ty; + } + + debug!("New unresolved type reference: {ty:?}, {location:?}"); + + let is_const = ty.is_const(); + let kind = TypeKind::UnresolvedTypeRef(ty, location, parent_id); + let current_module = ctx.current_module(); + + ctx.add_item( + Item::new( + potential_id, + None, + None, + parent_id.unwrap_or_else(|| current_module.into()), + ItemKind::Type(Type::new(None, None, kind, is_const)), + Some(location.location()), + ), + None, + None, + ); + potential_id.as_type_id_unchecked() + } + + /// Parse this item from the given Clang type. See [`Item::from_ty_with_id`]. + pub(crate) fn from_ty( + ty: &clang::Type, + location: clang::Cursor, + parent_id: Option, + ctx: &mut BindgenContext, + ) -> Result { + let id = ctx.next_item_id(); + Item::from_ty_with_id(id, ty, location, parent_id, ctx) + } + + /// This is one of the trickiest methods you'll find (probably along with + /// some of the ones that handle templates in `BindgenContext`). + /// + /// This method parses a type, given the potential ID of that type (if + /// parsing it was correct), an optional location we're scanning, which is + /// critical some times to obtain information, an optional parent item ID, + /// that will, if it's `None`, become the current module ID, and the + /// context. + pub(crate) fn from_ty_with_id( + id: ItemId, + ty: &clang::Type, + location: clang::Cursor, + parent_id: Option, + ctx: &mut BindgenContext, + ) -> Result { + use clang_sys::*; + + debug!( + "Item::from_ty_with_id: {id:?}\n\ + \tty = {ty:?},\n\ + \tlocation = {location:?}", + ); + + if ty.kind() == CXType_Unexposed || + location.cur_type().kind() == CXType_Unexposed + { + if ty.is_associated_type() || + location.cur_type().is_associated_type() + { + return Ok(Item::new_opaque_type(id, ty, ctx)); + } + + if let Some(param_id) = Item::type_param(None, location, ctx) { + return Ok(ctx.build_ty_wrapper(id, param_id, None, ty)); + } + } + + // Treat all types that are declared inside functions as opaque. The Rust binding + // won't be able to do anything with them anyway. + // + // (If we don't do this check here, we can have subtle logic bugs because we generally + // ignore function bodies. See issue #2036.) + if let Some(ref parent) = ty.declaration().fallible_semantic_parent() { + if FunctionKind::from_cursor(parent).is_some() { + debug!("Skipping type declared inside function: {ty:?}"); + return Ok(Item::new_opaque_type(id, ty, ctx)); + } + } + + let decl = { + let canonical_def = ty.canonical_type().declaration().definition(); + canonical_def.unwrap_or_else(|| ty.declaration()) + }; + + let comment = location.raw_comment().or_else(|| decl.raw_comment()); + + let annotations = + Annotations::new(&decl).or_else(|| Annotations::new(&location)); + + if let Some(ref annotations) = annotations { + if let Some(replaced) = annotations.use_instead_of() { + ctx.replace(replaced, id); + } + } + + if let Some(ty) = + ctx.builtin_or_resolved_ty(id, parent_id, ty, Some(location)) + { + return Ok(ty); + } + + // First, check we're not recursing. + let mut valid_decl = decl.kind() != CXCursor_NoDeclFound; + let declaration_to_look_for = if valid_decl { + decl.canonical() + } else if location.kind() == CXCursor_ClassTemplate { + valid_decl = true; + location + } else { + decl + }; + + if valid_decl { + if let Some(partial) = ctx + .currently_parsed_types() + .iter() + .find(|ty| *ty.decl() == declaration_to_look_for) + { + debug!("Avoiding recursion parsing type: {ty:?}"); + // Unchecked because we haven't finished this type yet. + return Ok(partial.id().as_type_id_unchecked()); + } + } + + let current_module = ctx.current_module().into(); + let partial_ty = PartialType::new(declaration_to_look_for, id); + if valid_decl { + ctx.begin_parsing(partial_ty); + } + + let result = Type::from_clang_ty(id, ty, location, parent_id, ctx); + let relevant_parent_id = parent_id.unwrap_or(current_module); + let ret = match result { + Ok(ParseResult::AlreadyResolved(ty)) => { + Ok(ty.as_type_id_unchecked()) + } + Ok(ParseResult::New(item, declaration)) => { + ctx.add_item( + Item::new( + id, + comment, + annotations, + relevant_parent_id, + ItemKind::Type(item), + Some(location.location()), + ), + declaration, + Some(location), + ); + Ok(id.as_type_id_unchecked()) + } + Err(ParseError::Continue) => Err(ParseError::Continue), + Err(ParseError::Recurse) => { + debug!("Item::from_ty recursing in the ast"); + let mut result = Err(ParseError::Recurse); + + // Need to pop here, otherwise we'll get stuck. + // + // TODO: Find a nicer interface, really. Also, the + // declaration_to_look_for suspiciously shares a lot of + // logic with ir::context, so we should refactor that. + if valid_decl { + let finished = ctx.finish_parsing(); + assert_eq!(*finished.decl(), declaration_to_look_for); + } + + location.visit(|cur| { + visit_child(cur, id, ty, parent_id, ctx, &mut result) + }); + + if valid_decl { + let partial_ty = + PartialType::new(declaration_to_look_for, id); + ctx.begin_parsing(partial_ty); + } + + // If we have recursed into the AST all we know, and we still + // haven't found what we've got, let's just try and make a named + // type. + // + // This is what happens with some template members, for example. + if let Err(ParseError::Recurse) = result { + warn!( + "Unknown type, assuming named template type: \ + id = {:?}; spelling = {}", + id, + ty.spelling() + ); + Item::type_param(Some(id), location, ctx) + .ok_or(ParseError::Recurse) + } else { + result + } + } + }; + + if valid_decl { + let partial_ty = ctx.finish_parsing(); + assert_eq!(*partial_ty.decl(), declaration_to_look_for); + } + + ret + } + + /// A named type is a template parameter, e.g., the `T` in `Foo`. They're always local so + /// it's the only exception when there's no declaration for a type. + pub(crate) fn type_param( + with_id: Option, + location: clang::Cursor, + ctx: &mut BindgenContext, + ) -> Option { + let ty = location.cur_type(); + + debug!( + "Item::type_param:\n\ + \twith_id = {:?},\n\ + \tty = {} {:?},\n\ + \tlocation: {:?}", + with_id, + ty.spelling(), + ty, + location + ); + + if ty.kind() != clang_sys::CXType_Unexposed { + // If the given cursor's type's kind is not Unexposed, then we + // aren't looking at a template parameter. This check may need to be + // updated in the future if they start properly exposing template + // type parameters. + return None; + } + + let ty_spelling = ty.spelling(); + + // Clang does not expose any information about template type parameters + // via their clang::Type, nor does it give us their canonical cursors + // the straightforward way. However, there are three situations from + // which we can find the definition of the template type parameter, if + // the cursor is indeed looking at some kind of a template type + // parameter or use of one: + // + // 1. The cursor is pointing at the template type parameter's + // definition. This is the trivial case. + // + // (kind = TemplateTypeParameter, ...) + // + // 2. The cursor is pointing at a TypeRef whose referenced() cursor is + // situation (1). + // + // (kind = TypeRef, + // referenced = (kind = TemplateTypeParameter, ...), + // ...) + // + // 3. The cursor is pointing at some use of a template type parameter + // (for example, in a FieldDecl), and this cursor has a child cursor + // whose spelling is the same as the parent's type's spelling, and whose + // kind is a TypeRef of the situation (2) variety. + // + // (kind = FieldDecl, + // type = (kind = Unexposed, + // spelling = "T", + // ...), + // children = + // (kind = TypeRef, + // spelling = "T", + // referenced = (kind = TemplateTypeParameter, + // spelling = "T", + // ...), + // ...) + // ...) + // + // TODO: The alternative to this hacky pattern matching would be to + // maintain proper scopes of template parameters while parsing and use + // de Brujin indices to access template parameters, which clang exposes + // in the cursor's type's canonical type's spelling: + // "type-parameter-x-y". That is probably a better approach long-term, + // but maintaining these scopes properly would require more changes to + // the whole libclang -> IR parsing code. + + fn is_template_with_spelling( + refd: &clang::Cursor, + spelling: &str, + ) -> bool { + static ANON_TYPE_PARAM_RE: OnceLock = OnceLock::new(); + let anon_type_param_re = ANON_TYPE_PARAM_RE.get_or_init(|| { + regex::Regex::new(r"^type\-parameter\-\d+\-\d+$").unwrap() + }); + + if refd.kind() != clang_sys::CXCursor_TemplateTypeParameter { + return false; + } + + let refd_spelling = refd.spelling(); + refd_spelling == spelling || + // Allow for anonymous template parameters. + (refd_spelling.is_empty() && anon_type_param_re.is_match(spelling.as_ref())) + } + + let definition = if is_template_with_spelling(&location, &ty_spelling) { + // Situation (1) + location + } else if location.kind() == clang_sys::CXCursor_TypeRef { + // Situation (2) + match location.referenced() { + Some(refd) + if is_template_with_spelling(&refd, &ty_spelling) => + { + refd + } + _ => return None, + } + } else { + // Situation (3) + let mut definition = None; + + location.visit(|child| { + let child_ty = child.cur_type(); + if child_ty.kind() == clang_sys::CXCursor_TypeRef && + child_ty.spelling() == ty_spelling + { + match child.referenced() { + Some(refd) + if is_template_with_spelling( + &refd, + &ty_spelling, + ) => + { + definition = Some(refd); + return clang_sys::CXChildVisit_Break; + } + _ => {} + } + } + + clang_sys::CXChildVisit_Continue + }); + + definition? + }; + assert!(is_template_with_spelling(&definition, &ty_spelling)); + + // Named types are always parented to the root module. They are never + // referenced with namespace prefixes, and they can't inherit anything + // from their parent either, so it is simplest to just hang them off + // something we know will always exist. + let parent = ctx.root_module().into(); + + if let Some(id) = ctx.get_type_param(&definition) { + return Some(if let Some(with_id) = with_id { + ctx.build_ty_wrapper(with_id, id, Some(parent), &ty) + } else { + id + }); + } + + // See tests/headers/const_tparam.hpp and + // tests/headers/variadic_tname.hpp. + let name = ty_spelling.replace("const ", "").replace('.', ""); + + let id = with_id.unwrap_or_else(|| ctx.next_item_id()); + let item = Item::new( + id, + None, + None, + parent, + ItemKind::Type(Type::named(name)), + Some(location.location()), + ); + ctx.add_type_param(item, definition); + Some(id.as_type_id_unchecked()) + } +} + +impl ItemCanonicalName for Item { + fn canonical_name(&self, ctx: &BindgenContext) -> String { + debug_assert!( + ctx.in_codegen_phase(), + "You're not supposed to call this yet" + ); + self.canonical_name + .get_or_init(|| { + let in_namespace = ctx.options().enable_cxx_namespaces || + ctx.options().disable_name_namespacing; + + if in_namespace { + self.name(ctx).within_namespaces().get() + } else { + self.name(ctx).get() + } + }) + .clone() + } +} + +impl ItemCanonicalPath for Item { + fn namespace_aware_canonical_path( + &self, + ctx: &BindgenContext, + ) -> Vec { + let mut path = self.canonical_path(ctx); + + // ASSUMPTION: (disable_name_namespacing && cxx_namespaces) + // is equivalent to + // disable_name_namespacing + if ctx.options().disable_name_namespacing { + // Only keep the last item in path + let split_idx = path.len() - 1; + path = path.split_off(split_idx); + } else if !ctx.options().enable_cxx_namespaces { + // Ignore first item "root" + path = vec![path[1..].join("_")]; + } + + if self.is_constified_enum_module(ctx) { + path.push(CONSTIFIED_ENUM_MODULE_REPR_NAME.into()); + } + + path + } + + fn canonical_path(&self, ctx: &BindgenContext) -> Vec { + self.compute_path(ctx, UserMangled::Yes) + } +} + +/// Whether to use the user-mangled name (mangled by the `item_name` callback or +/// not. +/// +/// Most of the callers probably want just yes, but the ones dealing with +/// allowlisting and blocklisting don't. +#[derive(Copy, Clone, Debug, PartialEq)] +enum UserMangled { + No, + Yes, +} + +/// Builder struct for naming variations, which hold inside different +/// flags for naming options. +#[derive(Debug)] +pub(crate) struct NameOptions<'a> { + item: &'a Item, + ctx: &'a BindgenContext, + within_namespaces: bool, + user_mangled: UserMangled, +} + +impl<'a> NameOptions<'a> { + /// Construct a new `NameOptions` + pub(crate) fn new(item: &'a Item, ctx: &'a BindgenContext) -> Self { + NameOptions { + item, + ctx, + within_namespaces: false, + user_mangled: UserMangled::Yes, + } + } + + /// Construct the name without the item's containing C++ namespaces mangled + /// into it. In other words, the item's name within the item's namespace. + pub(crate) fn within_namespaces(&mut self) -> &mut Self { + self.within_namespaces = true; + self + } + + fn user_mangled(&mut self, user_mangled: UserMangled) -> &mut Self { + self.user_mangled = user_mangled; + self + } + + /// Construct a name `String` + pub(crate) fn get(&self) -> String { + self.item.real_canonical_name(self.ctx, self) + } +} diff --git a/vendor/bindgen/ir/item_kind.rs b/vendor/bindgen/ir/item_kind.rs new file mode 100644 index 00000000000000..9221b50579b523 --- /dev/null +++ b/vendor/bindgen/ir/item_kind.rs @@ -0,0 +1,135 @@ +//! Different variants of an `Item` in our intermediate representation. + +use super::context::BindgenContext; +use super::dot::DotAttributes; +use super::function::Function; +use super::module::Module; +use super::ty::Type; +use super::var::Var; +use std::io; + +/// A item we parse and translate. +#[derive(Debug)] +pub(crate) enum ItemKind { + /// A module, created implicitly once (the root module), or via C++ + /// namespaces. + Module(Module), + + /// A type declared in any of the multiple ways it can be declared. + Type(Type), + + /// A function or method declaration. + Function(Function), + + /// A variable declaration, most likely a static. + Var(Var), +} + +impl ItemKind { + /// Get a reference to this `ItemKind`'s underlying `Module`, or `None` if it + /// is some other kind. + pub(crate) fn as_module(&self) -> Option<&Module> { + match *self { + ItemKind::Module(ref module) => Some(module), + _ => None, + } + } + + /// Transform our `ItemKind` into a string. + pub(crate) fn kind_name(&self) -> &'static str { + match *self { + ItemKind::Module(..) => "Module", + ItemKind::Type(..) => "Type", + ItemKind::Function(..) => "Function", + ItemKind::Var(..) => "Var", + } + } + + /// Is this a module? + pub(crate) fn is_module(&self) -> bool { + self.as_module().is_some() + } + + /// Get a reference to this `ItemKind`'s underlying `Function`, or `None` if + /// it is some other kind. + pub(crate) fn as_function(&self) -> Option<&Function> { + match *self { + ItemKind::Function(ref func) => Some(func), + _ => None, + } + } + + /// Is this a function? + pub(crate) fn is_function(&self) -> bool { + self.as_function().is_some() + } + + /// Get a reference to this `ItemKind`'s underlying `Function`, or panic if + /// it is some other kind. + pub(crate) fn expect_function(&self) -> &Function { + self.as_function().expect("Not a function") + } + + /// Get a reference to this `ItemKind`'s underlying `Type`, or `None` if + /// it is some other kind. + pub(crate) fn as_type(&self) -> Option<&Type> { + match *self { + ItemKind::Type(ref ty) => Some(ty), + _ => None, + } + } + + /// Get a mutable reference to this `ItemKind`'s underlying `Type`, or `None` + /// if it is some other kind. + pub(crate) fn as_type_mut(&mut self) -> Option<&mut Type> { + match *self { + ItemKind::Type(ref mut ty) => Some(ty), + _ => None, + } + } + + /// Is this a type? + pub(crate) fn is_type(&self) -> bool { + self.as_type().is_some() + } + + /// Get a reference to this `ItemKind`'s underlying `Type`, or panic if it is + /// some other kind. + pub(crate) fn expect_type(&self) -> &Type { + self.as_type().expect("Not a type") + } + + /// Get a reference to this `ItemKind`'s underlying `Var`, or `None` if it is + /// some other kind. + pub(crate) fn as_var(&self) -> Option<&Var> { + match *self { + ItemKind::Var(ref v) => Some(v), + _ => None, + } + } + + /// Is this a variable? + pub(crate) fn is_var(&self) -> bool { + self.as_var().is_some() + } +} + +impl DotAttributes for ItemKind { + fn dot_attributes( + &self, + ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + writeln!(out, "kind{}", self.kind_name())?; + + match *self { + ItemKind::Module(ref module) => module.dot_attributes(ctx, out), + ItemKind::Type(ref ty) => ty.dot_attributes(ctx, out), + ItemKind::Function(ref func) => func.dot_attributes(ctx, out), + ItemKind::Var(ref var) => var.dot_attributes(ctx, out), + } + } +} diff --git a/vendor/bindgen/ir/layout.rs b/vendor/bindgen/ir/layout.rs new file mode 100644 index 00000000000000..905e47c732a225 --- /dev/null +++ b/vendor/bindgen/ir/layout.rs @@ -0,0 +1,126 @@ +//! Intermediate representation for the physical layout of some type. + +use super::derive::CanDerive; +use super::ty::{Type, TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT}; +use crate::clang; +use crate::ir::context::BindgenContext; +use std::cmp; + +/// A type that represents the struct layout of a type. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct Layout { + /// The size (in bytes) of this layout. + pub(crate) size: usize, + /// The alignment (in bytes) of this layout. + pub(crate) align: usize, + /// Whether this layout's members are packed or not. + pub(crate) packed: bool, +} + +#[test] +fn test_layout_for_size() { + use std::mem::size_of; + let ptr_size = size_of::<*mut ()>(); + assert_eq!( + Layout::for_size_internal(ptr_size, ptr_size), + Layout::new(ptr_size, ptr_size) + ); + assert_eq!( + Layout::for_size_internal(ptr_size, 3 * ptr_size), + Layout::new(3 * ptr_size, ptr_size) + ); +} + +impl Layout { + /// Gets the integer type name for a given known size. + pub(crate) fn known_type_for_size(size: usize) -> Option { + Some(match size { + 16 => syn::parse_quote! { u128 }, + 8 => syn::parse_quote! { u64 }, + 4 => syn::parse_quote! { u32 }, + 2 => syn::parse_quote! { u16 }, + 1 => syn::parse_quote! { u8 }, + _ => return None, + }) + } + + /// Construct a new `Layout` with the given `size` and `align`. It is not + /// packed. + pub(crate) fn new(size: usize, align: usize) -> Self { + Layout { + size, + align, + packed: false, + } + } + + fn for_size_internal(ptr_size: usize, size: usize) -> Self { + let mut next_align = 2; + while size % next_align == 0 && next_align <= ptr_size { + next_align *= 2; + } + Layout { + size, + align: next_align / 2, + packed: false, + } + } + + /// Creates a non-packed layout for a given size, trying to use the maximum + /// alignment possible. + pub(crate) fn for_size(ctx: &BindgenContext, size: usize) -> Self { + Self::for_size_internal(ctx.target_pointer_size(), size) + } + + /// Get this layout as an opaque type. + pub(crate) fn opaque(&self) -> Opaque { + Opaque(*self) + } +} + +/// When we are treating a type as opaque, it is just a blob with a `Layout`. +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct Opaque(pub(crate) Layout); + +impl Opaque { + /// Construct a new opaque type from the given clang type. + pub(crate) fn from_clang_ty( + ty: &clang::Type, + ctx: &BindgenContext, + ) -> Type { + let layout = Layout::new(ty.size(ctx), ty.align(ctx)); + let ty_kind = TypeKind::Opaque; + let is_const = ty.is_const(); + Type::new(None, Some(layout), ty_kind, is_const) + } + + /// Return the known rust type we should use to create a correctly-aligned + /// field with this layout. + pub(crate) fn known_rust_type_for_array(&self) -> Option { + Layout::known_type_for_size(self.0.align) + } + + /// Return the array size that an opaque type for this layout should have if + /// we know the correct type for it, or `None` otherwise. + pub(crate) fn array_size(&self) -> Option { + if self.known_rust_type_for_array().is_some() { + Some(self.0.size / cmp::max(self.0.align, 1)) + } else { + None + } + } + + /// Return `true` if this opaque layout's array size will fit within the + /// maximum number of array elements that Rust allows deriving traits + /// with. Return `false` otherwise. + pub(crate) fn array_size_within_derive_limit(&self) -> CanDerive { + if self + .array_size() + .is_some_and(|size| size <= RUST_DERIVE_IN_ARRAY_LIMIT) + { + CanDerive::Yes + } else { + CanDerive::Manually + } + } +} diff --git a/vendor/bindgen/ir/mod.rs b/vendor/bindgen/ir/mod.rs new file mode 100644 index 00000000000000..acdb4896cda7cc --- /dev/null +++ b/vendor/bindgen/ir/mod.rs @@ -0,0 +1,25 @@ +//! The ir module defines bindgen's intermediate representation. +//! +//! Parsing C/C++ generates the IR, while code generation outputs Rust code from +//! the IR. +#![deny(clippy::missing_docs_in_private_items)] + +pub(crate) mod analysis; +pub(crate) mod annotations; +pub(crate) mod comment; +pub(crate) mod comp; +pub(crate) mod context; +pub(crate) mod derive; +pub(crate) mod dot; +pub(crate) mod enum_ty; +pub(crate) mod function; +pub(crate) mod int; +pub(crate) mod item; +pub(crate) mod item_kind; +pub(crate) mod layout; +pub(crate) mod module; +pub(crate) mod objc; +pub(crate) mod template; +pub(crate) mod traversal; +pub(crate) mod ty; +pub(crate) mod var; diff --git a/vendor/bindgen/ir/module.rs b/vendor/bindgen/ir/module.rs new file mode 100644 index 00000000000000..4788cf4285fc17 --- /dev/null +++ b/vendor/bindgen/ir/module.rs @@ -0,0 +1,96 @@ +//! Intermediate representation for modules (AKA C++ namespaces). + +use super::context::BindgenContext; +use super::dot::DotAttributes; +use super::item::ItemSet; +use crate::clang; +use crate::parse::{ClangSubItemParser, ParseError, ParseResult}; +use crate::parse_one; + +use std::io; + +/// Whether this module is inline or not. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum ModuleKind { + /// This module is not inline. + Normal, + /// This module is inline, as in `inline namespace foo {}`. + Inline, +} + +/// A module, as in, a C++ namespace. +#[derive(Clone, Debug)] +pub(crate) struct Module { + /// The name of the module, or none if it's anonymous. + name: Option, + /// The kind of module this is. + kind: ModuleKind, + /// The children of this module, just here for convenience. + children: ItemSet, +} + +impl Module { + /// Construct a new `Module`. + pub(crate) fn new(name: Option, kind: ModuleKind) -> Self { + Module { + name, + kind, + children: ItemSet::new(), + } + } + + /// Get this module's name. + pub(crate) fn name(&self) -> Option<&str> { + self.name.as_deref() + } + + /// Get a mutable reference to this module's children. + pub(crate) fn children_mut(&mut self) -> &mut ItemSet { + &mut self.children + } + + /// Get this module's children. + pub(crate) fn children(&self) -> &ItemSet { + &self.children + } + + /// Whether this namespace is inline. + pub(crate) fn is_inline(&self) -> bool { + self.kind == ModuleKind::Inline + } +} + +impl DotAttributes for Module { + fn dot_attributes( + &self, + _ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + writeln!(out, "ModuleKind{:?}", self.kind) + } +} + +impl ClangSubItemParser for Module { + fn parse( + cursor: clang::Cursor, + ctx: &mut BindgenContext, + ) -> Result, ParseError> { + use clang_sys::*; + match cursor.kind() { + CXCursor_Namespace => { + let module_id = ctx.module(cursor); + ctx.with_module(module_id, |ctx| { + cursor.visit_sorted(ctx, |ctx, child| { + parse_one(ctx, child, Some(module_id.into())); + }); + }); + + Ok(ParseResult::AlreadyResolved(module_id.into())) + } + _ => Err(ParseError::Continue), + } + } +} diff --git a/vendor/bindgen/ir/objc.rs b/vendor/bindgen/ir/objc.rs new file mode 100644 index 00000000000000..6cdadb131d45a2 --- /dev/null +++ b/vendor/bindgen/ir/objc.rs @@ -0,0 +1,343 @@ +//! Objective C types + +use super::context::{BindgenContext, ItemId}; +use super::function::FunctionSig; +use super::item::Item; +use super::traversal::{Trace, Tracer}; +use super::ty::TypeKind; +use crate::clang; +use clang_sys::CXChildVisit_Continue; +use clang_sys::CXCursor_ObjCCategoryDecl; +use clang_sys::CXCursor_ObjCClassMethodDecl; +use clang_sys::CXCursor_ObjCClassRef; +use clang_sys::CXCursor_ObjCInstanceMethodDecl; +use clang_sys::CXCursor_ObjCProtocolDecl; +use clang_sys::CXCursor_ObjCProtocolRef; +use clang_sys::CXCursor_ObjCSuperClassRef; +use clang_sys::CXCursor_TemplateTypeParameter; +use proc_macro2::{Ident, Span, TokenStream}; + +/// Objective-C interface as used in `TypeKind` +/// +/// Also, protocols and categories are parsed as this type +#[derive(Debug)] +pub(crate) struct ObjCInterface { + /// The name + /// like, `NSObject` + name: String, + + category: Option, + + is_protocol: bool, + + /// The list of template names almost always, `ObjectType` or `KeyType` + pub(crate) template_names: Vec, + + /// The list of protocols that this interface conforms to. + pub(crate) conforms_to: Vec, + + /// The direct parent for this interface. + pub(crate) parent_class: Option, + + /// List of the methods defined in this interface + methods: Vec, + + class_methods: Vec, +} + +/// The objective c methods +#[derive(Debug)] +pub(crate) struct ObjCMethod { + /// The original method selector name + /// like, dataWithBytes:length: + name: String, + + /// Method name as converted to rust + /// like, `dataWithBytes_length`_ + rust_name: String, + + signature: FunctionSig, + + /// Is class method? + is_class_method: bool, +} + +impl ObjCInterface { + fn new(name: &str) -> ObjCInterface { + ObjCInterface { + name: name.to_owned(), + category: None, + is_protocol: false, + template_names: Vec::new(), + parent_class: None, + conforms_to: Vec::new(), + methods: Vec::new(), + class_methods: Vec::new(), + } + } + + /// The name + /// like, `NSObject` + pub(crate) fn name(&self) -> &str { + self.name.as_ref() + } + + /// Formats the name for rust + /// Can be like `NSObject`, but with categories might be like `NSObject_NSCoderMethods` + /// and protocols are like `PNSObject` + pub(crate) fn rust_name(&self) -> String { + if let Some(ref cat) = self.category { + format!("{}_{cat}", self.name()) + } else if self.is_protocol { + format!("P{}", self.name()) + } else { + format!("I{}", self.name().to_owned()) + } + } + + /// Is this a template interface? + pub(crate) fn is_template(&self) -> bool { + !self.template_names.is_empty() + } + + /// List of the methods defined in this interface + pub(crate) fn methods(&self) -> &Vec { + &self.methods + } + + /// Is this a protocol? + pub(crate) fn is_protocol(&self) -> bool { + self.is_protocol + } + + /// Is this a category? + pub(crate) fn is_category(&self) -> bool { + self.category.is_some() + } + + /// List of the class methods defined in this interface + pub(crate) fn class_methods(&self) -> &Vec { + &self.class_methods + } + + /// Parses the Objective C interface from the cursor + pub(crate) fn from_ty( + cursor: &clang::Cursor, + ctx: &mut BindgenContext, + ) -> Option { + let name = cursor.spelling(); + let mut interface = Self::new(&name); + + if cursor.kind() == CXCursor_ObjCProtocolDecl { + interface.is_protocol = true; + } + + cursor.visit(|c| { + match c.kind() { + CXCursor_ObjCClassRef => { + if cursor.kind() == CXCursor_ObjCCategoryDecl { + // We are actually a category extension, and we found the reference + // to the original interface, so name this interface appropriately + interface.name = c.spelling(); + interface.category = Some(cursor.spelling()); + } + } + CXCursor_ObjCProtocolRef => { + // Gather protocols this interface conforms to + let needle = format!("P{}", c.spelling()); + let items_map = ctx.items(); + debug!( + "Interface {} conforms to {needle}, find the item", + interface.name, + ); + + for (id, item) in items_map { + if let Some(ty) = item.as_type() { + if let TypeKind::ObjCInterface(ref protocol) = + *ty.kind() + { + if protocol.is_protocol { + debug!( + "Checking protocol {}, ty.name {:?}", + protocol.name, + ty.name() + ); + if Some(needle.as_ref()) == ty.name() { + debug!("Found conforming protocol {item:?}"); + interface.conforms_to.push(id); + break; + } + } + } + } + } + } + CXCursor_ObjCInstanceMethodDecl | + CXCursor_ObjCClassMethodDecl => { + let name = c.spelling(); + let signature = + FunctionSig::from_ty(&c.cur_type(), &c, ctx) + .expect("Invalid function sig"); + let is_class_method = + c.kind() == CXCursor_ObjCClassMethodDecl; + let method = + ObjCMethod::new(&name, signature, is_class_method); + interface.add_method(method); + } + CXCursor_TemplateTypeParameter => { + let name = c.spelling(); + interface.template_names.push(name); + } + CXCursor_ObjCSuperClassRef => { + let item = Item::from_ty_or_ref(c.cur_type(), c, None, ctx); + interface.parent_class = Some(item.into()); + } + _ => {} + } + CXChildVisit_Continue + }); + Some(interface) + } + + fn add_method(&mut self, method: ObjCMethod) { + if method.is_class_method { + self.class_methods.push(method); + } else { + self.methods.push(method); + } + } +} + +impl ObjCMethod { + fn new( + name: &str, + signature: FunctionSig, + is_class_method: bool, + ) -> ObjCMethod { + let split_name: Vec<&str> = name.split(':').collect(); + + let rust_name = split_name.join("_"); + + ObjCMethod { + name: name.to_owned(), + rust_name, + signature, + is_class_method, + } + } + + /// Method name as converted to rust + /// like, `dataWithBytes_length`_ + pub(crate) fn rust_name(&self) -> &str { + self.rust_name.as_ref() + } + + /// Returns the methods signature as `FunctionSig` + pub(crate) fn signature(&self) -> &FunctionSig { + &self.signature + } + + /// Is this a class method? + pub(crate) fn is_class_method(&self) -> bool { + self.is_class_method + } + + /// Formats the method call + pub(crate) fn format_method_call( + &self, + args: &[TokenStream], + ) -> TokenStream { + let split_name: Vec> = self + .name + .split(':') + .enumerate() + .map(|(idx, name)| { + if name.is_empty() { + None + } else if idx == 0 { + // Try to parse the method name as an identifier. Having a keyword is ok + // unless it is `crate`, `self`, `super` or `Self`, so we try to add the `_` + // suffix to it and parse it. + if ["crate", "self", "super", "Self"].contains(&name) { + Some(Ident::new(&format!("{name}_"), Span::call_site())) + } else { + Some(Ident::new(name, Span::call_site())) + } + } else { + // Try to parse the current joining name as an identifier. This might fail if the name + // is a keyword, so we try to "r#" to it and parse again, this could also fail + // if the name is `crate`, `self`, `super` or `Self`, so we try to add the `_` + // suffix to it and parse again. If this also fails, we panic with the first + // error. + Some( + syn::parse_str::(name) + .or_else(|err| { + syn::parse_str::(&format!("r#{name}")) + .map_err(|_| err) + }) + .or_else(|err| { + syn::parse_str::(&format!("{name}_")) + .map_err(|_| err) + }) + .expect("Invalid identifier"), + ) + } + }) + .collect(); + + // No arguments + if args.is_empty() && split_name.len() == 1 { + let name = &split_name[0]; + return quote! { + #name + }; + } + + // Check right amount of arguments + assert_eq!(args.len(), split_name.len() - 1, "Incorrect method name or arguments for objc method, {args:?} vs {split_name:?}"); + + // Get arguments without type signatures to pass to `msg_send!` + let mut args_without_types = vec![]; + for arg in args { + let arg = arg.to_string(); + let name_and_sig: Vec<&str> = arg.split(' ').collect(); + let name = name_and_sig[0]; + args_without_types.push(Ident::new(name, Span::call_site())); + } + + let args = split_name.into_iter().zip(args_without_types).map( + |(arg, arg_val)| { + if let Some(arg) = arg { + quote! { #arg: #arg_val } + } else { + quote! { #arg_val: #arg_val } + } + }, + ); + + quote! { + #( #args )* + } + } +} + +impl Trace for ObjCInterface { + type Extra = (); + + fn trace(&self, context: &BindgenContext, tracer: &mut T, _: &()) + where + T: Tracer, + { + for method in &self.methods { + method.signature.trace(context, tracer, &()); + } + + for class_method in &self.class_methods { + class_method.signature.trace(context, tracer, &()); + } + + for protocol in &self.conforms_to { + tracer.visit(*protocol); + } + } +} diff --git a/vendor/bindgen/ir/template.rs b/vendor/bindgen/ir/template.rs new file mode 100644 index 00000000000000..7f3667879d98eb --- /dev/null +++ b/vendor/bindgen/ir/template.rs @@ -0,0 +1,335 @@ +//! Template declaration and instantiation related things. +//! +//! The nomenclature surrounding templates is often confusing, so here are a few +//! brief definitions: +//! +//! * "Template definition": a class/struct/alias/function definition that takes +//! generic template parameters. For example: +//! +//! ```c++ +//! template +//! class List { +//! // ... +//! }; +//! ``` +//! +//! * "Template instantiation": an instantiation is a use of a template with +//! concrete template arguments. For example, `List`. +//! +//! * "Template specialization": an alternative template definition providing a +//! custom definition for instantiations with the matching template +//! arguments. This C++ feature is unsupported by bindgen. For example: +//! +//! ```c++ +//! template<> +//! class List { +//! // Special layout for int lists... +//! }; +//! ``` + +use super::context::{BindgenContext, ItemId, TypeId}; +use super::item::{IsOpaque, Item, ItemAncestors}; +use super::traversal::{EdgeKind, Trace, Tracer}; +use crate::clang; + +/// Template declaration (and such declaration's template parameters) related +/// methods. +/// +/// This trait's methods distinguish between `None` and `Some([])` for +/// declarations that are not templates and template declarations with zero +/// parameters, in general. +/// +/// Consider this example: +/// +/// ```c++ +/// template +/// class Foo { +/// T use_of_t; +/// U use_of_u; +/// +/// template +/// using Bar = V*; +/// +/// class Inner { +/// T x; +/// U y; +/// Bar z; +/// }; +/// +/// template +/// class Lol { +/// // No use of W, but here's a use of T. +/// T t; +/// }; +/// +/// template +/// class Wtf { +/// // X is not used because W is not used. +/// Lol lololol; +/// }; +/// }; +/// +/// class Qux { +/// int y; +/// }; +/// ``` +/// +/// The following table depicts the results of each trait method when invoked on +/// each of the declarations above: +/// +/// |Decl. | self_template_params | num_self_template_params | all_template_parameters | +/// |------|----------------------|--------------------------|-------------------------| +/// |Foo | T, U | 2 | T, U | +/// |Bar | V | 1 | T, U, V | +/// |Inner | | 0 | T, U | +/// |Lol | W | 1 | T, U, W | +/// |Wtf | X | 1 | T, U, X | +/// |Qux | | 0 | | +/// +/// | Decl. | used_template_params | +/// |-------|----------------------| +/// | Foo | T, U | +/// | Bar | V | +/// | Inner | | +/// | Lol | T | +/// | Wtf | T | +/// | Qux | | +pub(crate) trait TemplateParameters: Sized { + /// Get the set of `ItemId`s that make up this template declaration's free + /// template parameters. + /// + /// Note that these might *not* all be named types: C++ allows + /// constant-value template parameters as well as template-template + /// parameters. Of course, Rust does not allow generic parameters to be + /// anything but types, so we must treat them as opaque, and avoid + /// instantiating them. + fn self_template_params(&self, ctx: &BindgenContext) -> Vec; + + /// Get the number of free template parameters this template declaration + /// has. + fn num_self_template_params(&self, ctx: &BindgenContext) -> usize { + self.self_template_params(ctx).len() + } + + /// Get the complete set of template parameters that can affect this + /// declaration. + /// + /// Note that this item doesn't need to be a template declaration itself for + /// `Some` to be returned here (in contrast to `self_template_params`). If + /// this item is a member of a template declaration, then the parent's + /// template parameters are included here. + /// + /// In the example above, `Inner` depends on both of the `T` and `U` type + /// parameters, even though it is not itself a template declaration and + /// therefore has no type parameters itself. Perhaps it helps to think about + /// how we would fully reference such a member type in C++: + /// `Foo::Inner`. `Foo` *must* be instantiated with template + /// arguments before we can gain access to the `Inner` member type. + fn all_template_params(&self, ctx: &BindgenContext) -> Vec + where + Self: ItemAncestors, + { + let mut ancestors: Vec<_> = self.ancestors(ctx).collect(); + ancestors.reverse(); + ancestors + .into_iter() + .flat_map(|id| id.self_template_params(ctx).into_iter()) + .collect() + } + + /// Get only the set of template parameters that this item uses. This is a + /// subset of `all_template_params` and does not necessarily contain any of + /// `self_template_params`. + fn used_template_params(&self, ctx: &BindgenContext) -> Vec + where + Self: AsRef, + { + assert!( + ctx.in_codegen_phase(), + "template parameter usage is not computed until codegen" + ); + + let id = *self.as_ref(); + ctx.resolve_item(id) + .all_template_params(ctx) + .into_iter() + .filter(|p| ctx.uses_template_parameter(id, *p)) + .collect() + } +} + +/// A trait for things which may or may not be a named template type parameter. +pub(crate) trait AsTemplateParam { + /// Any extra information the implementor might need to make this decision. + type Extra; + + /// Convert this thing to the item ID of a named template type parameter. + fn as_template_param( + &self, + ctx: &BindgenContext, + extra: &Self::Extra, + ) -> Option; + + /// Is this a named template type parameter? + fn is_template_param( + &self, + ctx: &BindgenContext, + extra: &Self::Extra, + ) -> bool { + self.as_template_param(ctx, extra).is_some() + } +} + +/// A concrete instantiation of a generic template. +#[derive(Clone, Debug)] +pub(crate) struct TemplateInstantiation { + /// The template definition which this is instantiating. + definition: TypeId, + /// The concrete template arguments, which will be substituted in the + /// definition for the generic template parameters. + args: Vec, +} + +impl TemplateInstantiation { + /// Construct a new template instantiation from the given parts. + pub(crate) fn new(definition: TypeId, args: I) -> TemplateInstantiation + where + I: IntoIterator, + { + TemplateInstantiation { + definition, + args: args.into_iter().collect(), + } + } + + /// Get the template definition for this instantiation. + pub(crate) fn template_definition(&self) -> TypeId { + self.definition + } + + /// Get the concrete template arguments used in this instantiation. + pub(crate) fn template_arguments(&self) -> &[TypeId] { + &self.args[..] + } + + /// Parse a `TemplateInstantiation` from a clang `Type`. + pub(crate) fn from_ty( + ty: &clang::Type, + ctx: &mut BindgenContext, + ) -> Option { + use clang_sys::*; + + let template_args = ty.template_args().map_or(vec![], |args| match ty + .canonical_type() + .template_args() + { + Some(canonical_args) => { + let arg_count = args.len(); + args.chain(canonical_args.skip(arg_count)) + .filter(|t| t.kind() != CXType_Invalid) + .map(|t| { + Item::from_ty_or_ref(t, t.declaration(), None, ctx) + }) + .collect() + } + None => args + .filter(|t| t.kind() != CXType_Invalid) + .map(|t| Item::from_ty_or_ref(t, t.declaration(), None, ctx)) + .collect(), + }); + + let declaration = ty.declaration(); + let definition = if declaration.kind() == CXCursor_TypeAliasTemplateDecl + { + Some(declaration) + } else { + declaration.specialized().or_else(|| { + let mut template_ref = None; + ty.declaration().visit(|child| { + if child.kind() == CXCursor_TemplateRef { + template_ref = Some(child); + return CXVisit_Break; + } + + // Instantiations of template aliases might have the + // TemplateRef to the template alias definition arbitrarily + // deep, so we need to recurse here and not only visit + // direct children. + CXChildVisit_Recurse + }); + + template_ref.and_then(|cur| cur.referenced()) + }) + }; + + let Some(definition) = definition else { + if !ty.declaration().is_builtin() { + warn!( + "Could not find template definition for template \ + instantiation" + ); + } + return None; + }; + + let template_definition = + Item::from_ty_or_ref(definition.cur_type(), definition, None, ctx); + + Some(TemplateInstantiation::new( + template_definition, + template_args, + )) + } +} + +impl IsOpaque for TemplateInstantiation { + type Extra = Item; + + /// Is this an opaque template instantiation? + fn is_opaque(&self, ctx: &BindgenContext, item: &Item) -> bool { + if self.template_definition().is_opaque(ctx, &()) { + return true; + } + + // TODO(#774): This doesn't properly handle opaque instantiations where + // an argument is itself an instantiation because `canonical_name` does + // not insert the template arguments into the name, ie it for nested + // template arguments it creates "Foo" instead of "Foo". The fully + // correct fix is to make `canonical_{name,path}` include template + // arguments properly. + + let mut path = item.path_for_allowlisting(ctx).clone(); + let args: Vec<_> = self + .template_arguments() + .iter() + .map(|arg| { + let arg_path = + ctx.resolve_item(*arg).path_for_allowlisting(ctx); + arg_path[1..].join("::") + }) + .collect(); + { + let last = path.last_mut().unwrap(); + last.push('<'); + last.push_str(&args.join(", ")); + last.push('>'); + } + + ctx.opaque_by_name(&path) + } +} + +impl Trace for TemplateInstantiation { + type Extra = (); + + fn trace(&self, _ctx: &BindgenContext, tracer: &mut T, _: &()) + where + T: Tracer, + { + tracer + .visit_kind(self.definition.into(), EdgeKind::TemplateDeclaration); + for arg in self.template_arguments() { + tracer.visit_kind(arg.into(), EdgeKind::TemplateArgument); + } + } +} diff --git a/vendor/bindgen/ir/traversal.rs b/vendor/bindgen/ir/traversal.rs new file mode 100644 index 00000000000000..01f3a8bd507f4a --- /dev/null +++ b/vendor/bindgen/ir/traversal.rs @@ -0,0 +1,478 @@ +//! Traversal of the graph of IR items and types. + +use super::context::{BindgenContext, ItemId}; +use super::item::ItemSet; +use std::collections::{BTreeMap, VecDeque}; + +/// An outgoing edge in the IR graph is a reference from some item to another +/// item: +/// +/// from --> to +/// +/// The `from` is left implicit: it is the concrete `Trace` implementer which +/// yielded this outgoing edge. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub(crate) struct Edge { + to: ItemId, + kind: EdgeKind, +} + +impl Edge { + /// Construct a new edge whose referent is `to` and is of the given `kind`. + pub(crate) fn new(to: ItemId, kind: EdgeKind) -> Edge { + Edge { to, kind } + } +} + +impl From for ItemId { + fn from(val: Edge) -> Self { + val.to + } +} + +/// The kind of edge reference. This is useful when we wish to only consider +/// certain kinds of edges for a particular traversal or analysis. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub(crate) enum EdgeKind { + /// A generic, catch-all edge. + Generic, + + /// An edge from a template declaration, to the definition of a named type + /// parameter. For example, the edge from `Foo` to `T` in the following + /// snippet: + /// + /// ```C++ + /// template + /// class Foo { }; + /// ``` + TemplateParameterDefinition, + + /// An edge from a template instantiation to the template declaration that + /// is being instantiated. For example, the edge from `Foo` to + /// to `Foo`: + /// + /// ```C++ + /// template + /// class Foo { }; + /// + /// using Bar = Foo; + /// ``` + TemplateDeclaration, + + /// An edge from a template instantiation to its template argument. For + /// example, `Foo` to `Bar`: + /// + /// ```C++ + /// template + /// class Foo { }; + /// + /// class Bar { }; + /// + /// using FooBar = Foo; + /// ``` + TemplateArgument, + + /// An edge from a compound type to one of its base member types. For + /// example, the edge from `Bar` to `Foo`: + /// + /// ```C++ + /// class Foo { }; + /// + /// class Bar : public Foo { }; + /// ``` + BaseMember, + + /// An edge from a compound type to the types of one of its fields. For + /// example, the edge from `Foo` to `int`: + /// + /// ```C++ + /// class Foo { + /// int x; + /// }; + /// ``` + Field, + + /// An edge from an class or struct type to an inner type member. For + /// example, the edge from `Foo` to `Foo::Bar` here: + /// + /// ```C++ + /// class Foo { + /// struct Bar { }; + /// }; + /// ``` + InnerType, + + /// An edge from an class or struct type to an inner static variable. For + /// example, the edge from `Foo` to `Foo::BAR` here: + /// + /// ```C++ + /// class Foo { + /// static const char* BAR; + /// }; + /// ``` + InnerVar, + + /// An edge from a class or struct type to one of its method functions. For + /// example, the edge from `Foo` to `Foo::bar`: + /// + /// ```C++ + /// class Foo { + /// bool bar(int x, int y); + /// }; + /// ``` + Method, + + /// An edge from a class or struct type to one of its constructor + /// functions. For example, the edge from `Foo` to `Foo::Foo(int x, int y)`: + /// + /// ```C++ + /// class Foo { + /// int my_x; + /// int my_y; + /// + /// public: + /// Foo(int x, int y); + /// }; + /// ``` + Constructor, + + /// An edge from a class or struct type to its destructor function. For + /// example, the edge from `Doggo` to `Doggo::~Doggo()`: + /// + /// ```C++ + /// struct Doggo { + /// char* wow; + /// + /// public: + /// ~Doggo(); + /// }; + /// ``` + Destructor, + + /// An edge from a function declaration to its return type. For example, the + /// edge from `foo` to `int`: + /// + /// ```C++ + /// int foo(char* string); + /// ``` + FunctionReturn, + + /// An edge from a function declaration to one of its parameter types. For + /// example, the edge from `foo` to `char*`: + /// + /// ```C++ + /// int foo(char* string); + /// ``` + FunctionParameter, + + /// An edge from a static variable to its type. For example, the edge from + /// `FOO` to `const char*`: + /// + /// ```C++ + /// static const char* FOO; + /// ``` + VarType, + + /// An edge from a non-templated alias or typedef to the referenced type. + TypeReference, +} + +/// A predicate to allow visiting only sub-sets of the whole IR graph by +/// excluding certain edges from being followed by the traversal. +/// +/// The predicate must return true if the traversal should follow this edge +/// and visit everything that is reachable through it. +pub(crate) type TraversalPredicate = + for<'a> fn(&'a BindgenContext, Edge) -> bool; + +/// A `TraversalPredicate` implementation that follows all edges, and therefore +/// traversals using this predicate will see the whole IR graph reachable from +/// the traversal's roots. +pub(crate) fn all_edges(_: &BindgenContext, _: Edge) -> bool { + true +} + +/// A `TraversalPredicate` implementation that only follows +/// `EdgeKind::InnerType` edges, and therefore traversals using this predicate +/// will only visit the traversal's roots and their inner types. This is used +/// in no-recursive-allowlist mode, where inner types such as anonymous +/// structs/unions still need to be processed. +pub(crate) fn only_inner_type_edges(_: &BindgenContext, edge: Edge) -> bool { + edge.kind == EdgeKind::InnerType +} + +/// A `TraversalPredicate` implementation that only follows edges to items that +/// are enabled for code generation. This lets us skip considering items for +/// which are not reachable from code generation. +pub(crate) fn codegen_edges(ctx: &BindgenContext, edge: Edge) -> bool { + let cc = &ctx.options().codegen_config; + match edge.kind { + EdgeKind::Generic => { + ctx.resolve_item(edge.to).is_enabled_for_codegen(ctx) + } + + // We statically know the kind of item that non-generic edges can point + // to, so we don't need to actually resolve the item and check + // `Item::is_enabled_for_codegen`. + EdgeKind::TemplateParameterDefinition | + EdgeKind::TemplateArgument | + EdgeKind::TemplateDeclaration | + EdgeKind::BaseMember | + EdgeKind::Field | + EdgeKind::InnerType | + EdgeKind::FunctionReturn | + EdgeKind::FunctionParameter | + EdgeKind::VarType | + EdgeKind::TypeReference => cc.types(), + EdgeKind::InnerVar => cc.vars(), + EdgeKind::Method => cc.methods(), + EdgeKind::Constructor => cc.constructors(), + EdgeKind::Destructor => cc.destructors(), + } +} + +/// The storage for the set of items that have been seen (although their +/// outgoing edges might not have been fully traversed yet) in an active +/// traversal. +pub(crate) trait TraversalStorage<'ctx> { + /// Construct a new instance of this `TraversalStorage`, for a new traversal. + fn new(ctx: &'ctx BindgenContext) -> Self; + + /// Add the given item to the storage. If the item has never been seen + /// before, return `true`. Otherwise, return `false`. + /// + /// The `from` item is the item from which we discovered this item, or is + /// `None` if this item is a root. + fn add(&mut self, from: Option, item: ItemId) -> bool; +} + +impl<'ctx> TraversalStorage<'ctx> for ItemSet { + fn new(_: &'ctx BindgenContext) -> Self { + ItemSet::new() + } + + fn add(&mut self, _: Option, item: ItemId) -> bool { + self.insert(item) + } +} + +/// A `TraversalStorage` implementation that keeps track of how we first reached +/// each item. This is useful for providing debug assertions with meaningful +/// diagnostic messages about dangling items. +#[derive(Debug)] +pub(crate) struct Paths<'ctx>(BTreeMap, &'ctx BindgenContext); + +impl<'ctx> TraversalStorage<'ctx> for Paths<'ctx> { + fn new(ctx: &'ctx BindgenContext) -> Self { + Paths(BTreeMap::new(), ctx) + } + + fn add(&mut self, from: Option, item: ItemId) -> bool { + let newly_discovered = + self.0.insert(item, from.unwrap_or(item)).is_none(); + + if self.1.resolve_item_fallible(item).is_none() { + let mut path = vec![]; + let mut current = item; + loop { + let predecessor = *self.0.get(¤t).expect( + "We know we found this item id, so it must have a \ + predecessor", + ); + if predecessor == current { + break; + } + path.push(predecessor); + current = predecessor; + } + path.reverse(); + panic!( + "Found reference to dangling id = {item:?}\nvia path = {path:?}" + ); + } + + newly_discovered + } +} + +/// The queue of seen-but-not-yet-traversed items. +/// +/// Using a FIFO queue with a traversal will yield a breadth-first traversal, +/// while using a LIFO queue will result in a depth-first traversal of the IR +/// graph. +pub(crate) trait TraversalQueue: Default { + /// Add a newly discovered item to the queue. + fn push(&mut self, item: ItemId); + + /// Pop the next item to traverse, if any. + fn next(&mut self) -> Option; +} + +impl TraversalQueue for Vec { + fn push(&mut self, item: ItemId) { + self.push(item); + } + + fn next(&mut self) -> Option { + self.pop() + } +} + +impl TraversalQueue for VecDeque { + fn push(&mut self, item: ItemId) { + self.push_back(item); + } + + fn next(&mut self) -> Option { + self.pop_front() + } +} + +/// Something that can receive edges from a `Trace` implementation. +pub(crate) trait Tracer { + /// Note an edge between items. Called from within a `Trace` implementation. + fn visit_kind(&mut self, item: ItemId, kind: EdgeKind); + + /// A synonym for `tracer.visit_kind(item, EdgeKind::Generic)`. + fn visit(&mut self, item: ItemId) { + self.visit_kind(item, EdgeKind::Generic); + } +} + +impl Tracer for F +where + F: FnMut(ItemId, EdgeKind), +{ + fn visit_kind(&mut self, item: ItemId, kind: EdgeKind) { + (*self)(item, kind); + } +} + +/// Trace all of the outgoing edges to other items. Implementations should call +/// one of `tracer.visit(edge)` or `tracer.visit_kind(edge, EdgeKind::Whatever)` +/// for each of their outgoing edges. +pub(crate) trait Trace { + /// If a particular type needs extra information beyond what it has in + /// `self` and `context` to find its referenced items, its implementation + /// can define this associated type, forcing callers to pass the needed + /// information through. + type Extra; + + /// Trace all of this item's outgoing edges to other items. + fn trace( + &self, + context: &BindgenContext, + tracer: &mut T, + extra: &Self::Extra, + ) where + T: Tracer; +} + +/// An graph traversal of the transitive closure of references between items. +/// +/// See `BindgenContext::allowlisted_items` for more information. +pub(crate) struct ItemTraversal<'ctx, Storage, Queue> +where + Storage: TraversalStorage<'ctx>, + Queue: TraversalQueue, +{ + ctx: &'ctx BindgenContext, + + /// The set of items we have seen thus far in this traversal. + seen: Storage, + + /// The set of items that we have seen, but have yet to traverse. + queue: Queue, + + /// The predicate that determines which edges this traversal will follow. + predicate: TraversalPredicate, + + /// The item we are currently traversing. + currently_traversing: Option, +} + +impl<'ctx, Storage, Queue> ItemTraversal<'ctx, Storage, Queue> +where + Storage: TraversalStorage<'ctx>, + Queue: TraversalQueue, +{ + /// Begin a new traversal, starting from the given roots. + pub(crate) fn new( + ctx: &'ctx BindgenContext, + roots: R, + predicate: TraversalPredicate, + ) -> ItemTraversal<'ctx, Storage, Queue> + where + R: IntoIterator, + { + let mut seen = Storage::new(ctx); + let mut queue = Queue::default(); + + for id in roots { + seen.add(None, id); + queue.push(id); + } + + ItemTraversal { + ctx, + seen, + queue, + predicate, + currently_traversing: None, + } + } +} + +impl<'ctx, Storage, Queue> Tracer for ItemTraversal<'ctx, Storage, Queue> +where + Storage: TraversalStorage<'ctx>, + Queue: TraversalQueue, +{ + fn visit_kind(&mut self, item: ItemId, kind: EdgeKind) { + let edge = Edge::new(item, kind); + if !(self.predicate)(self.ctx, edge) { + return; + } + + let is_newly_discovered = + self.seen.add(self.currently_traversing, item); + if is_newly_discovered { + self.queue.push(item); + } + } +} + +impl<'ctx, Storage, Queue> Iterator for ItemTraversal<'ctx, Storage, Queue> +where + Storage: TraversalStorage<'ctx>, + Queue: TraversalQueue, +{ + type Item = ItemId; + + fn next(&mut self) -> Option { + let id = self.queue.next()?; + + let newly_discovered = self.seen.add(None, id); + debug_assert!( + !newly_discovered, + "should have already seen anything we get out of our queue" + ); + debug_assert!( + self.ctx.resolve_item_fallible(id).is_some(), + "should only get IDs of actual items in our context during traversal" + ); + + self.currently_traversing = Some(id); + id.trace(self.ctx, self, &()); + self.currently_traversing = None; + + Some(id) + } +} + +/// An iterator to find any dangling items. +/// +/// See `BindgenContext::assert_no_dangling_item_traversal` for more +/// information. +pub(crate) type AssertNoDanglingItemsTraversal<'ctx> = + ItemTraversal<'ctx, Paths<'ctx>, VecDeque>; diff --git a/vendor/bindgen/ir/ty.rs b/vendor/bindgen/ir/ty.rs new file mode 100644 index 00000000000000..38a7f6344a9d12 --- /dev/null +++ b/vendor/bindgen/ir/ty.rs @@ -0,0 +1,1256 @@ +//! Everything related to types in our intermediate representation. + +use super::comp::CompInfo; +use super::context::{BindgenContext, ItemId, TypeId}; +use super::dot::DotAttributes; +use super::enum_ty::Enum; +use super::function::FunctionSig; +use super::item::{IsOpaque, Item}; +use super::layout::{Layout, Opaque}; +use super::objc::ObjCInterface; +use super::template::{ + AsTemplateParam, TemplateInstantiation, TemplateParameters, +}; +use super::traversal::{EdgeKind, Trace, Tracer}; +use crate::clang::{self, Cursor}; +use crate::parse::{ParseError, ParseResult}; +use std::borrow::Cow; +use std::io; + +pub use super::int::IntKind; + +/// The base representation of a type in bindgen. +/// +/// A type has an optional name, which if present cannot be empty, a `layout` +/// (size, alignment and packedness) if known, a `Kind`, which determines which +/// kind of type it is, and whether the type is const. +#[derive(Debug)] +pub(crate) struct Type { + /// The name of the type, or None if it was an unnamed struct or union. + name: Option, + /// The layout of the type, if known. + layout: Option, + /// The inner kind of the type + kind: TypeKind, + /// Whether this type is const-qualified. + is_const: bool, +} + +/// The maximum number of items in an array for which Rust implements common +/// traits, and so if we have a type containing an array with more than this +/// many items, we won't be able to derive common traits on that type. +/// +pub(crate) const RUST_DERIVE_IN_ARRAY_LIMIT: usize = 32; + +impl Type { + /// Get the underlying `CompInfo` for this type as a mutable reference, or + /// `None` if this is some other kind of type. + pub(crate) fn as_comp_mut(&mut self) -> Option<&mut CompInfo> { + match self.kind { + TypeKind::Comp(ref mut ci) => Some(ci), + _ => None, + } + } + + /// Construct a new `Type`. + pub(crate) fn new( + name: Option, + layout: Option, + kind: TypeKind, + is_const: bool, + ) -> Self { + Type { + name, + layout, + kind, + is_const, + } + } + + /// Which kind of type is this? + pub(crate) fn kind(&self) -> &TypeKind { + &self.kind + } + + /// Get a mutable reference to this type's kind. + pub(crate) fn kind_mut(&mut self) -> &mut TypeKind { + &mut self.kind + } + + /// Get this type's name. + pub(crate) fn name(&self) -> Option<&str> { + self.name.as_deref() + } + + /// Whether this is a block pointer type. + pub(crate) fn is_block_pointer(&self) -> bool { + matches!(self.kind, TypeKind::BlockPointer(..)) + } + + /// Is this an integer type, including `bool` or `char`? + pub(crate) fn is_int(&self) -> bool { + matches!(self.kind, TypeKind::Int(_)) + } + + /// Is this a compound type? + pub(crate) fn is_comp(&self) -> bool { + matches!(self.kind, TypeKind::Comp(..)) + } + + /// Is this a union? + pub(crate) fn is_union(&self) -> bool { + match self.kind { + TypeKind::Comp(ref comp) => comp.is_union(), + _ => false, + } + } + + /// Is this type of kind `TypeKind::TypeParam`? + pub(crate) fn is_type_param(&self) -> bool { + matches!(self.kind, TypeKind::TypeParam) + } + + /// Is this a template instantiation type? + pub(crate) fn is_template_instantiation(&self) -> bool { + matches!(self.kind, TypeKind::TemplateInstantiation(..)) + } + + /// Is this a function type? + pub(crate) fn is_function(&self) -> bool { + matches!(self.kind, TypeKind::Function(..)) + } + + /// Is this an enum type? + pub(crate) fn is_enum(&self) -> bool { + matches!(self.kind, TypeKind::Enum(..)) + } + + /// Is this void? + pub(crate) fn is_void(&self) -> bool { + matches!(self.kind, TypeKind::Void) + } + /// Is this either a builtin or named type? + pub(crate) fn is_builtin_or_type_param(&self) -> bool { + matches!( + self.kind, + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Function(..) | + TypeKind::Array(..) | + TypeKind::Reference(..) | + TypeKind::Pointer(..) | + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::TypeParam + ) + } + + /// Creates a new named type, with name `name`. + pub(crate) fn named(name: String) -> Self { + let name = if name.is_empty() { None } else { Some(name) }; + Self::new(name, None, TypeKind::TypeParam, false) + } + + /// Is this a floating point type? + pub(crate) fn is_float(&self) -> bool { + matches!(self.kind, TypeKind::Float(..)) + } + + /// Is this a boolean type? + pub(crate) fn is_bool(&self) -> bool { + matches!(self.kind, TypeKind::Int(IntKind::Bool)) + } + + /// Is this an integer type? + pub(crate) fn is_integer(&self) -> bool { + matches!(self.kind, TypeKind::Int(..)) + } + + /// Cast this type to an integer kind, or `None` if it is not an integer + /// type. + pub(crate) fn as_integer(&self) -> Option { + match self.kind { + TypeKind::Int(int_kind) => Some(int_kind), + _ => None, + } + } + + /// Is this a `const` qualified type? + pub(crate) fn is_const(&self) -> bool { + self.is_const + } + + /// Is this an unresolved reference? + pub(crate) fn is_unresolved_ref(&self) -> bool { + matches!(self.kind, TypeKind::UnresolvedTypeRef(_, _, _)) + } + + /// Is this a incomplete array type? + pub(crate) fn is_incomplete_array( + &self, + ctx: &BindgenContext, + ) -> Option { + match self.kind { + TypeKind::Array(item, len) => { + if len == 0 { + Some(item.into()) + } else { + None + } + } + TypeKind::ResolvedTypeRef(inner) => { + ctx.resolve_type(inner).is_incomplete_array(ctx) + } + _ => None, + } + } + + /// What is the layout of this type? + pub(crate) fn layout(&self, ctx: &BindgenContext) -> Option { + self.layout.or_else(|| { + match self.kind { + TypeKind::Comp(ref ci) => ci.layout(ctx), + TypeKind::Array(inner, 0) => Some(Layout::new( + 0, + ctx.resolve_type(inner).layout(ctx)?.align, + )), + // FIXME(emilio): This is a hack for anonymous union templates. + // Use the actual pointer size! + TypeKind::Pointer(..) => Some(Layout::new( + ctx.target_pointer_size(), + ctx.target_pointer_size(), + )), + TypeKind::ResolvedTypeRef(inner) => { + ctx.resolve_type(inner).layout(ctx) + } + _ => None, + } + }) + } + + /// Whether this named type is an invalid C++ identifier. This is done to + /// avoid generating invalid code with some cases we can't handle, see: + /// + /// tests/headers/381-decltype-alias.hpp + pub(crate) fn is_invalid_type_param(&self) -> bool { + match self.kind { + TypeKind::TypeParam => { + let name = self.name().expect("Unnamed named type?"); + !clang::is_valid_identifier(name) + } + _ => false, + } + } + + /// Takes `name`, and returns a suitable identifier representation for it. + fn sanitize_name(name: &str) -> Cow<'_, str> { + if clang::is_valid_identifier(name) { + return Cow::Borrowed(name); + } + + let name = name.replace([' ', ':', '.'], "_"); + Cow::Owned(name) + } + + /// Get this type's sanitized name. + pub(crate) fn sanitized_name<'a>( + &'a self, + ctx: &BindgenContext, + ) -> Option> { + let name_info = match *self.kind() { + TypeKind::Pointer(inner) => Some((inner, Cow::Borrowed("ptr"))), + TypeKind::Reference(inner) => Some((inner, Cow::Borrowed("ref"))), + TypeKind::Array(inner, length) => { + Some((inner, format!("array{length}").into())) + } + _ => None, + }; + if let Some((inner, prefix)) = name_info { + ctx.resolve_item(inner) + .expect_type() + .sanitized_name(ctx) + .map(|name| format!("{prefix}_{name}").into()) + } else { + self.name().map(Self::sanitize_name) + } + } + + /// See [`Self::safe_canonical_type`]. + pub(crate) fn canonical_type<'tr>( + &'tr self, + ctx: &'tr BindgenContext, + ) -> &'tr Type { + self.safe_canonical_type(ctx) + .expect("Should have been resolved after parsing!") + } + + /// Returns the canonical type of this type, that is, the "inner type". + /// + /// For example, for a `typedef`, the canonical type would be the + /// `typedef`ed type, for a template instantiation, would be the template + /// its specializing, and so on. Return None if the type is unresolved. + pub(crate) fn safe_canonical_type<'tr>( + &'tr self, + ctx: &'tr BindgenContext, + ) -> Option<&'tr Type> { + match self.kind { + TypeKind::TypeParam | + TypeKind::Array(..) | + TypeKind::Vector(..) | + TypeKind::Comp(..) | + TypeKind::Opaque | + TypeKind::Int(..) | + TypeKind::Float(..) | + TypeKind::Complex(..) | + TypeKind::Function(..) | + TypeKind::Enum(..) | + TypeKind::Reference(..) | + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Pointer(..) | + TypeKind::BlockPointer(..) | + TypeKind::ObjCId | + TypeKind::ObjCSel | + TypeKind::ObjCInterface(..) => Some(self), + + TypeKind::ResolvedTypeRef(inner) | + TypeKind::Alias(inner) | + TypeKind::TemplateAlias(inner, _) => { + ctx.resolve_type(inner).safe_canonical_type(ctx) + } + TypeKind::TemplateInstantiation(ref inst) => ctx + .resolve_type(inst.template_definition()) + .safe_canonical_type(ctx), + + TypeKind::UnresolvedTypeRef(..) => None, + } + } + + /// There are some types we don't want to stop at when finding an opaque + /// item, so we can arrive to the proper item that needs to be generated. + pub(crate) fn should_be_traced_unconditionally(&self) -> bool { + matches!( + self.kind, + TypeKind::Comp(..) | + TypeKind::Function(..) | + TypeKind::Pointer(..) | + TypeKind::Array(..) | + TypeKind::Reference(..) | + TypeKind::TemplateInstantiation(..) | + TypeKind::ResolvedTypeRef(..) + ) + } +} + +impl IsOpaque for Type { + type Extra = Item; + + fn is_opaque(&self, ctx: &BindgenContext, item: &Item) -> bool { + match self.kind { + TypeKind::Opaque => true, + TypeKind::TemplateInstantiation(ref inst) => { + inst.is_opaque(ctx, item) + } + TypeKind::Comp(ref comp) => comp.is_opaque(ctx, &self.layout), + TypeKind::ResolvedTypeRef(to) => to.is_opaque(ctx, &()), + _ => false, + } + } +} + +impl AsTemplateParam for Type { + type Extra = Item; + + fn as_template_param( + &self, + ctx: &BindgenContext, + item: &Item, + ) -> Option { + self.kind.as_template_param(ctx, item) + } +} + +impl AsTemplateParam for TypeKind { + type Extra = Item; + + fn as_template_param( + &self, + ctx: &BindgenContext, + item: &Item, + ) -> Option { + match *self { + TypeKind::TypeParam => Some(item.id().expect_type_id(ctx)), + TypeKind::ResolvedTypeRef(id) => id.as_template_param(ctx, &()), + _ => None, + } + } +} + +impl DotAttributes for Type { + fn dot_attributes( + &self, + ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + if let Some(ref layout) = self.layout { + writeln!( + out, + "size{} + align{}", + layout.size, layout.align + )?; + if layout.packed { + writeln!(out, "packedtrue")?; + } + } + + if self.is_const { + writeln!(out, "consttrue")?; + } + + self.kind.dot_attributes(ctx, out) + } +} + +impl DotAttributes for TypeKind { + fn dot_attributes( + &self, + ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + writeln!( + out, + "type kind{}", + self.kind_name() + )?; + + if let TypeKind::Comp(ref comp) = *self { + comp.dot_attributes(ctx, out)?; + } + + Ok(()) + } +} + +impl TypeKind { + fn kind_name(&self) -> &'static str { + match *self { + TypeKind::Void => "Void", + TypeKind::NullPtr => "NullPtr", + TypeKind::Comp(..) => "Comp", + TypeKind::Opaque => "Opaque", + TypeKind::Int(..) => "Int", + TypeKind::Float(..) => "Float", + TypeKind::Complex(..) => "Complex", + TypeKind::Alias(..) => "Alias", + TypeKind::TemplateAlias(..) => "TemplateAlias", + TypeKind::Array(..) => "Array", + TypeKind::Vector(..) => "Vector", + TypeKind::Function(..) => "Function", + TypeKind::Enum(..) => "Enum", + TypeKind::Pointer(..) => "Pointer", + TypeKind::BlockPointer(..) => "BlockPointer", + TypeKind::Reference(..) => "Reference", + TypeKind::TemplateInstantiation(..) => "TemplateInstantiation", + TypeKind::UnresolvedTypeRef(..) => "UnresolvedTypeRef", + TypeKind::ResolvedTypeRef(..) => "ResolvedTypeRef", + TypeKind::TypeParam => "TypeParam", + TypeKind::ObjCInterface(..) => "ObjCInterface", + TypeKind::ObjCId => "ObjCId", + TypeKind::ObjCSel => "ObjCSel", + } + } +} + +#[test] +fn is_invalid_type_param_valid() { + let ty = Type::new(Some("foo".into()), None, TypeKind::TypeParam, false); + assert!(!ty.is_invalid_type_param()); +} + +#[test] +fn is_invalid_type_param_valid_underscore_and_numbers() { + let ty = Type::new( + Some("_foo123456789_".into()), + None, + TypeKind::TypeParam, + false, + ); + assert!(!ty.is_invalid_type_param()); +} + +#[test] +fn is_invalid_type_param_valid_unnamed_kind() { + let ty = Type::new(Some("foo".into()), None, TypeKind::Void, false); + assert!(!ty.is_invalid_type_param()); +} + +#[test] +fn is_invalid_type_param_invalid_start() { + let ty = Type::new(Some("1foo".into()), None, TypeKind::TypeParam, false); + assert!(ty.is_invalid_type_param()); +} + +#[test] +fn is_invalid_type_param_invalid_remaining() { + let ty = Type::new(Some("foo-".into()), None, TypeKind::TypeParam, false); + assert!(ty.is_invalid_type_param()); +} + +#[test] +#[should_panic(expected = "Unnamed named type")] +fn is_invalid_type_param_unnamed() { + let ty = Type::new(None, None, TypeKind::TypeParam, false); + assert!(ty.is_invalid_type_param()); +} + +#[test] +fn is_invalid_type_param_empty_name() { + let ty = Type::new(Some(String::new()), None, TypeKind::TypeParam, false); + assert!(ty.is_invalid_type_param()); +} + +impl TemplateParameters for Type { + fn self_template_params(&self, ctx: &BindgenContext) -> Vec { + self.kind.self_template_params(ctx) + } +} + +impl TemplateParameters for TypeKind { + fn self_template_params(&self, ctx: &BindgenContext) -> Vec { + match *self { + TypeKind::ResolvedTypeRef(id) => { + ctx.resolve_type(id).self_template_params(ctx) + } + TypeKind::Comp(ref comp) => comp.self_template_params(ctx), + TypeKind::TemplateAlias(_, ref args) => args.clone(), + + TypeKind::Opaque | + TypeKind::TemplateInstantiation(..) | + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(_) | + TypeKind::Float(_) | + TypeKind::Complex(_) | + TypeKind::Array(..) | + TypeKind::Vector(..) | + TypeKind::Function(_) | + TypeKind::Enum(_) | + TypeKind::Pointer(_) | + TypeKind::BlockPointer(_) | + TypeKind::Reference(_) | + TypeKind::UnresolvedTypeRef(..) | + TypeKind::TypeParam | + TypeKind::Alias(_) | + TypeKind::ObjCId | + TypeKind::ObjCSel | + TypeKind::ObjCInterface(_) => vec![], + } + } +} + +/// The kind of float this type represents. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum FloatKind { + /// A half (`_Float16` or `__fp16`) + Float16, + /// A `float`. + Float, + /// A `double`. + Double, + /// A `long double`. + LongDouble, + /// A `__float128`. + Float128, +} + +/// The different kinds of types that we can parse. +#[derive(Debug)] +pub(crate) enum TypeKind { + /// The void type. + Void, + + /// The `nullptr_t` type. + NullPtr, + + /// A compound type, that is, a class, struct, or union. + Comp(CompInfo), + + /// An opaque type that we just don't understand. All usage of this should + /// result in an opaque blob of bytes generated from the containing type's + /// layout. + Opaque, + + /// An integer type, of a given kind. `bool` and `char` are also considered + /// integers. + Int(IntKind), + + /// A floating point type. + Float(FloatKind), + + /// A complex floating point type. + Complex(FloatKind), + + /// A type alias, with a name, that points to another type. + Alias(TypeId), + + /// A templated alias, pointing to an inner type, just as `Alias`, but with + /// template parameters. + TemplateAlias(TypeId, Vec), + + /// A packed vector type: element type, number of elements + Vector(TypeId, usize), + + /// An array of a type and a length. + Array(TypeId, usize), + + /// A function type, with a given signature. + Function(FunctionSig), + + /// An `enum` type. + Enum(Enum), + + /// A pointer to a type. The bool field represents whether it's const or + /// not. + Pointer(TypeId), + + /// A pointer to an Apple block. + BlockPointer(TypeId), + + /// A reference to a type, as in: int& `foo()`. + Reference(TypeId), + + /// An instantiation of an abstract template definition with a set of + /// concrete template arguments. + TemplateInstantiation(TemplateInstantiation), + + /// A reference to a yet-to-resolve type. This stores the clang cursor + /// itself, and postpones its resolution. + /// + /// These are gone in a phase after parsing where these are mapped to + /// already known types, and are converted to `ResolvedTypeRef`. + /// + /// see tests/headers/typeref.hpp to see somewhere where this is a problem. + UnresolvedTypeRef(clang::Type, Cursor, /* parent_id */ Option), + + /// An indirection to another type. + /// + /// These are generated after we resolve a forward declaration, or when we + /// replace one type with another. + ResolvedTypeRef(TypeId), + + /// A named type, that is, a template parameter. + TypeParam, + + /// Objective C interface. Always referenced through a pointer + ObjCInterface(ObjCInterface), + + /// Objective C 'id' type, points to any object + ObjCId, + + /// Objective C selector type + ObjCSel, +} + +impl Type { + /// This is another of the nasty methods. This one is the one that takes + /// care of the core logic of converting a clang type to a `Type`. + /// + /// It's sort of nasty and full of special-casing, but hopefully the + /// comments in every special case justify why they're there. + pub(crate) fn from_clang_ty( + potential_id: ItemId, + ty: &clang::Type, + location: Cursor, + parent_id: Option, + ctx: &mut BindgenContext, + ) -> Result, ParseError> { + use clang_sys::*; + { + let already_resolved = ctx.builtin_or_resolved_ty( + potential_id, + parent_id, + ty, + Some(location), + ); + if let Some(ty) = already_resolved { + debug!("{ty:?} already resolved: {location:?}"); + return Ok(ParseResult::AlreadyResolved(ty.into())); + } + } + + let layout = ty.fallible_layout(ctx).ok(); + let cursor = ty.declaration(); + let is_anonymous = cursor.is_anonymous(); + let mut name = if is_anonymous { + None + } else { + Some(cursor.spelling()).filter(|n| !n.is_empty()) + }; + + debug!( + "from_clang_ty: {potential_id:?}, ty: {ty:?}, loc: {location:?}" + ); + debug!("currently_parsed_types: {:?}", ctx.currently_parsed_types()); + + let canonical_ty = ty.canonical_type(); + + // Parse objc protocols as if they were interfaces + let mut ty_kind = ty.kind(); + match location.kind() { + CXCursor_ObjCProtocolDecl | CXCursor_ObjCCategoryDecl => { + ty_kind = CXType_ObjCInterface; + } + _ => {} + } + + // Objective C template type parameter + // FIXME: This is probably wrong, we are attempting to find the + // objc template params, which seem to manifest as a typedef. + // We are rewriting them as ID to suppress multiple conflicting + // typedefs at root level + if ty_kind == CXType_Typedef { + let is_template_type_param = + ty.declaration().kind() == CXCursor_TemplateTypeParameter; + let is_canonical_objcpointer = + canonical_ty.kind() == CXType_ObjCObjectPointer; + + // We have found a template type for objc interface + if is_canonical_objcpointer && is_template_type_param { + // Objective-C generics are just ids with fancy name. + // To keep it simple, just name them ids + name = Some("id".to_owned()); + } + } + + if location.kind() == CXCursor_ClassTemplatePartialSpecialization { + // Sorry! (Not sorry) + warn!( + "Found a partial template specialization; bindgen does not \ + support partial template specialization! Constructing \ + opaque type instead." + ); + return Ok(ParseResult::New( + Opaque::from_clang_ty(&canonical_ty, ctx), + None, + )); + } + + let kind = if location.kind() == CXCursor_TemplateRef || + (ty.template_args().is_some() && ty_kind != CXType_Typedef) + { + // This is a template instantiation. + match TemplateInstantiation::from_ty(ty, ctx) { + Some(inst) => TypeKind::TemplateInstantiation(inst), + None => TypeKind::Opaque, + } + } else { + match ty_kind { + CXType_Unexposed + if *ty != canonical_ty && + canonical_ty.kind() != CXType_Invalid && + ty.ret_type().is_none() && + // Sometime clang desugars some types more than + // what we need, specially with function + // pointers. + // + // We should also try the solution of inverting + // those checks instead of doing this, that is, + // something like: + // + // CXType_Unexposed if ty.ret_type().is_some() + // => { ... } + // + // etc. + !canonical_ty.spelling().contains("type-parameter") => + { + debug!("Looking for canonical type: {canonical_ty:?}"); + return Self::from_clang_ty( + potential_id, + &canonical_ty, + location, + parent_id, + ctx, + ); + } + CXType_Unexposed | CXType_Invalid => { + // For some reason Clang doesn't give us any hint in some + // situations where we should generate a function pointer (see + // tests/headers/func_ptr_in_struct.h), so we do a guess here + // trying to see if it has a valid return type. + if ty.ret_type().is_some() { + let signature = + FunctionSig::from_ty(ty, &location, ctx)?; + TypeKind::Function(signature) + // Same here, with template specialisations we can safely + // assume this is a Comp(..) + } else if ty.is_fully_instantiated_template() { + debug!("Template specialization: {ty:?}, {location:?} {canonical_ty:?}"); + let complex = CompInfo::from_ty( + potential_id, + ty, + Some(location), + ctx, + ) + .expect("C'mon"); + TypeKind::Comp(complex) + } else { + match location.kind() { + CXCursor_CXXBaseSpecifier | + CXCursor_ClassTemplate => { + if location.kind() == CXCursor_CXXBaseSpecifier + { + // In the case we're parsing a base specifier + // inside an unexposed or invalid type, it means + // that we're parsing one of two things: + // + // * A template parameter. + // * A complex class that isn't exposed. + // + // This means, unfortunately, that there's no + // good way to differentiate between them. + // + // Probably we could try to look at the + // declaration and complicate more this logic, + // but we'll keep it simple... if it's a valid + // C++ identifier, we'll consider it as a + // template parameter. + // + // This is because: + // + // * We expect every other base that is a + // proper identifier (that is, a simple + // struct/union declaration), to be exposed, + // so this path can't be reached in that + // case. + // + // * Quite conveniently, complex base + // specifiers preserve their full names (that + // is: Foo instead of Foo). We can take + // advantage of this. + // + // If we find some edge case where this doesn't + // work (which I guess is unlikely, see the + // different test cases[1][2][3][4]), we'd need + // to find more creative ways of differentiating + // these two cases. + // + // [1]: inherit_named.hpp + // [2]: forward-inherit-struct-with-fields.hpp + // [3]: forward-inherit-struct.hpp + // [4]: inherit-namespaced.hpp + if location.spelling().chars().all(|c| { + c.is_alphanumeric() || c == '_' + }) { + return Err(ParseError::Recurse); + } + } else { + name = Some(location.spelling()); + } + + let complex = CompInfo::from_ty( + potential_id, + ty, + Some(location), + ctx, + ); + if let Ok(complex) = complex { + TypeKind::Comp(complex) + } else { + warn!( + "Could not create complex type \ + from class template or base \ + specifier, using opaque blob" + ); + let opaque = Opaque::from_clang_ty(ty, ctx); + return Ok(ParseResult::New(opaque, None)); + } + } + CXCursor_TypeAliasTemplateDecl => { + debug!("TypeAliasTemplateDecl"); + + // We need to manually unwind this one. + let mut inner = Err(ParseError::Continue); + let mut args = vec![]; + + location.visit(|cur| { + match cur.kind() { + CXCursor_TypeAliasDecl => { + let current = cur.cur_type(); + + debug_assert_eq!( + current.kind(), + CXType_Typedef + ); + + name = Some(location.spelling()); + + let inner_ty = cur + .typedef_type() + .expect("Not valid Type?"); + inner = Ok(Item::from_ty_or_ref( + inner_ty, + cur, + Some(potential_id), + ctx, + )); + } + CXCursor_TemplateTypeParameter => { + let param = Item::type_param( + None, cur, ctx, + ) + .expect( + "Item::type_param shouldn't \ + ever fail if we are looking \ + at a TemplateTypeParameter", + ); + args.push(param); + } + _ => {} + } + CXChildVisit_Continue + }); + + let Ok(inner_type) = inner else { + warn!( + "Failed to parse template alias \ + {:?}", + location + ); + return Err(ParseError::Continue); + }; + + TypeKind::TemplateAlias(inner_type, args) + } + CXCursor_TemplateRef => { + let referenced = location.referenced().unwrap(); + let referenced_ty = referenced.cur_type(); + + debug!("TemplateRef: location = {location:?}; referenced = {referenced:?}; referenced_ty = {referenced_ty:?}"); + + return Self::from_clang_ty( + potential_id, + &referenced_ty, + referenced, + parent_id, + ctx, + ); + } + CXCursor_TypeRef => { + let referenced = location.referenced().unwrap(); + let referenced_ty = referenced.cur_type(); + let declaration = referenced_ty.declaration(); + + debug!("TypeRef: location = {location:?}; referenced = {referenced:?}; referenced_ty = {referenced_ty:?}"); + + let id = Item::from_ty_or_ref_with_id( + potential_id, + referenced_ty, + declaration, + parent_id, + ctx, + ); + return Ok(ParseResult::AlreadyResolved( + id.into(), + )); + } + CXCursor_NamespaceRef => { + return Err(ParseError::Continue); + } + _ => { + if ty.kind() == CXType_Unexposed { + warn!("Unexposed type {ty:?}, recursing inside, loc: {location:?}"); + return Err(ParseError::Recurse); + } + + warn!("invalid type {ty:?}"); + return Err(ParseError::Continue); + } + } + } + } + CXType_Auto => { + if canonical_ty == *ty { + debug!("Couldn't find deduced type: {ty:?}"); + return Err(ParseError::Continue); + } + + return Self::from_clang_ty( + potential_id, + &canonical_ty, + location, + parent_id, + ctx, + ); + } + // NOTE: We don't resolve pointers eagerly because the pointee type + // might not have been parsed, and if it contains templates or + // something else we might get confused, see the comment inside + // TypeRef. + // + // We might need to, though, if the context is already in the + // process of resolving them. + CXType_ObjCObjectPointer | + CXType_MemberPointer | + CXType_Pointer => { + let mut pointee = ty.pointee_type().unwrap(); + if *ty != canonical_ty { + let canonical_pointee = + canonical_ty.pointee_type().unwrap(); + // clang sometimes loses pointee constness here, see + // #2244. + if canonical_pointee.is_const() != pointee.is_const() { + pointee = canonical_pointee; + } + } + let inner = + Item::from_ty_or_ref(pointee, location, None, ctx); + TypeKind::Pointer(inner) + } + CXType_BlockPointer => { + let pointee = ty.pointee_type().expect("Not valid Type?"); + let inner = + Item::from_ty_or_ref(pointee, location, None, ctx); + TypeKind::BlockPointer(inner) + } + // XXX: RValueReference is most likely wrong, but I don't think we + // can even add bindings for that, so huh. + CXType_RValueReference | CXType_LValueReference => { + let inner = Item::from_ty_or_ref( + ty.pointee_type().unwrap(), + location, + None, + ctx, + ); + TypeKind::Reference(inner) + } + // XXX DependentSizedArray is wrong + CXType_VariableArray | CXType_DependentSizedArray => { + let inner = Item::from_ty( + ty.elem_type().as_ref().unwrap(), + location, + None, + ctx, + ) + .expect("Not able to resolve array element?"); + TypeKind::Pointer(inner) + } + CXType_IncompleteArray => { + let inner = Item::from_ty( + ty.elem_type().as_ref().unwrap(), + location, + None, + ctx, + ) + .expect("Not able to resolve array element?"); + TypeKind::Array(inner, 0) + } + CXType_FunctionNoProto | CXType_FunctionProto => { + let signature = FunctionSig::from_ty(ty, &location, ctx)?; + TypeKind::Function(signature) + } + CXType_Typedef => { + let inner = cursor.typedef_type().expect("Not valid Type?"); + let inner_id = + Item::from_ty_or_ref(inner, location, None, ctx); + if inner_id == potential_id { + warn!( + "Generating opaque type instead of self-referential \ + typedef"); + // This can happen if we bail out of recursive situations + // within the clang parsing. + TypeKind::Opaque + } else { + // Check if this type definition is an alias to a pointer of a `struct` / + // `union` / `enum` with the same name and add the `_ptr` suffix to it to + // avoid name collisions. + if let Some(ref mut name) = name { + if inner.kind() == CXType_Pointer && + !ctx.options().c_naming + { + let pointee = inner.pointee_type().unwrap(); + if pointee.kind() == CXType_Elaborated && + pointee.declaration().spelling() == *name + { + *name += "_ptr"; + } + } + } + TypeKind::Alias(inner_id) + } + } + CXType_Enum => { + let enum_ = Enum::from_ty(ty, ctx).expect("Not an enum?"); + + if !is_anonymous { + let pretty_name = ty.spelling(); + if clang::is_valid_identifier(&pretty_name) { + name = Some(pretty_name); + } + } + + TypeKind::Enum(enum_) + } + CXType_Record => { + let complex = CompInfo::from_ty( + potential_id, + ty, + Some(location), + ctx, + ) + .expect("Not a complex type?"); + + if !is_anonymous { + // The pretty-printed name may contain typedefed name, + // but may also be "struct (anonymous at .h:1)" + let pretty_name = ty.spelling(); + if clang::is_valid_identifier(&pretty_name) { + name = Some(pretty_name); + } + } + + TypeKind::Comp(complex) + } + CXType_Vector | CXType_ExtVector => { + let inner = Item::from_ty( + ty.elem_type().as_ref().unwrap(), + location, + None, + ctx, + )?; + TypeKind::Vector(inner, ty.num_elements().unwrap()) + } + CXType_ConstantArray => { + let inner = Item::from_ty( + ty.elem_type().as_ref().unwrap(), + location, + None, + ctx, + ) + .expect("Not able to resolve array element?"); + TypeKind::Array(inner, ty.num_elements().unwrap()) + } + CXType_Atomic => { + // TODO(emilio): Maybe we can preserve the "is atomic" bit somehow and generate + // something more useful... But for now this is better than panicking or + // generating nothing. + return Self::from_clang_ty( + potential_id, + &ty.atomic_value_type(), + location, + parent_id, + ctx, + ); + } + CXType_Elaborated => { + return Self::from_clang_ty( + potential_id, + &ty.named(), + location, + parent_id, + ctx, + ); + } + CXType_ObjCId => TypeKind::ObjCId, + CXType_ObjCSel => TypeKind::ObjCSel, + CXType_ObjCClass | CXType_ObjCInterface => { + let interface = ObjCInterface::from_ty(&location, ctx) + .expect("Not a valid objc interface?"); + if !is_anonymous { + name = Some(interface.rust_name()); + } + TypeKind::ObjCInterface(interface) + } + CXType_Dependent => { + return Err(ParseError::Continue); + } + _ => { + warn!( + "unsupported type: kind = {:?}; ty = {ty:?}; at {location:?}", + ty.kind(), + ); + return Err(ParseError::Continue); + } + } + }; + + name = name.filter(|n| !n.is_empty()); + + let is_const = ty.is_const() || + (ty.kind() == CXType_ConstantArray && + ty.elem_type().is_some_and(|element| element.is_const())); + + let ty = Type::new(name, layout, kind, is_const); + // TODO: maybe declaration.canonical()? + Ok(ParseResult::New(ty, Some(cursor.canonical()))) + } +} + +impl Trace for Type { + type Extra = Item; + + fn trace(&self, context: &BindgenContext, tracer: &mut T, item: &Item) + where + T: Tracer, + { + if self.name().is_some_and(|name| context.is_stdint_type(name)) { + // These types are special-cased in codegen and don't need to be traversed. + return; + } + match *self.kind() { + TypeKind::Pointer(inner) | + TypeKind::Reference(inner) | + TypeKind::Array(inner, _) | + TypeKind::Vector(inner, _) | + TypeKind::BlockPointer(inner) | + TypeKind::Alias(inner) | + TypeKind::ResolvedTypeRef(inner) => { + tracer.visit_kind(inner.into(), EdgeKind::TypeReference); + } + TypeKind::TemplateAlias(inner, ref template_params) => { + tracer.visit_kind(inner.into(), EdgeKind::TypeReference); + for param in template_params { + tracer.visit_kind( + param.into(), + EdgeKind::TemplateParameterDefinition, + ); + } + } + TypeKind::TemplateInstantiation(ref inst) => { + inst.trace(context, tracer, &()); + } + TypeKind::Comp(ref ci) => ci.trace(context, tracer, item), + TypeKind::Function(ref sig) => sig.trace(context, tracer, &()), + TypeKind::Enum(ref en) => { + if let Some(repr) = en.repr() { + tracer.visit(repr.into()); + } + } + TypeKind::UnresolvedTypeRef(_, _, Some(id)) => { + tracer.visit(id); + } + + TypeKind::ObjCInterface(ref interface) => { + interface.trace(context, tracer, &()); + } + + // None of these variants have edges to other items and types. + TypeKind::Opaque | + TypeKind::UnresolvedTypeRef(_, _, None) | + TypeKind::TypeParam | + TypeKind::Void | + TypeKind::NullPtr | + TypeKind::Int(_) | + TypeKind::Float(_) | + TypeKind::Complex(_) | + TypeKind::ObjCId | + TypeKind::ObjCSel => {} + } + } +} diff --git a/vendor/bindgen/ir/var.rs b/vendor/bindgen/ir/var.rs new file mode 100644 index 00000000000000..45f4ba1ba01a33 --- /dev/null +++ b/vendor/bindgen/ir/var.rs @@ -0,0 +1,523 @@ +//! Intermediate representation of variables. + +use super::super::codegen::MacroTypeVariation; +use super::context::{BindgenContext, TypeId}; +use super::dot::DotAttributes; +use super::function::cursor_mangling; +use super::int::IntKind; +use super::item::Item; +use super::ty::{FloatKind, TypeKind}; +use crate::callbacks::{ItemInfo, ItemKind, MacroParsingBehavior}; +use crate::clang; +use crate::clang::ClangToken; +use crate::parse::{ClangSubItemParser, ParseError, ParseResult}; + +use std::io; +use std::num::Wrapping; + +/// The type for a constant variable. +#[derive(Debug)] +pub(crate) enum VarType { + /// A boolean. + Bool(bool), + /// An integer. + Int(i64), + /// A floating point number. + Float(f64), + /// A character. + Char(u8), + /// A string, not necessarily well-formed utf-8. + String(Vec), +} + +/// A `Var` is our intermediate representation of a variable. +#[derive(Debug)] +pub(crate) struct Var { + /// The name of the variable. + name: String, + /// The mangled name of the variable. + mangled_name: Option, + /// The link name of the variable. + link_name: Option, + /// The type of the variable. + ty: TypeId, + /// The value of the variable, that needs to be suitable for `ty`. + val: Option, + /// Whether this variable is const. + is_const: bool, +} + +impl Var { + /// Construct a new `Var`. + pub(crate) fn new( + name: String, + mangled_name: Option, + link_name: Option, + ty: TypeId, + val: Option, + is_const: bool, + ) -> Var { + assert!(!name.is_empty()); + Var { + name, + mangled_name, + link_name, + ty, + val, + is_const, + } + } + + /// Is this variable `const` qualified? + pub(crate) fn is_const(&self) -> bool { + self.is_const + } + + /// The value of this constant variable, if any. + pub(crate) fn val(&self) -> Option<&VarType> { + self.val.as_ref() + } + + /// Get this variable's type. + pub(crate) fn ty(&self) -> TypeId { + self.ty + } + + /// Get this variable's name. + pub(crate) fn name(&self) -> &str { + &self.name + } + + /// Get this variable's mangled name. + pub(crate) fn mangled_name(&self) -> Option<&str> { + self.mangled_name.as_deref() + } + + /// Get this variable's link name. + pub fn link_name(&self) -> Option<&str> { + self.link_name.as_deref() + } +} + +impl DotAttributes for Var { + fn dot_attributes( + &self, + _ctx: &BindgenContext, + out: &mut W, + ) -> io::Result<()> + where + W: io::Write, + { + if self.is_const { + writeln!(out, "consttrue")?; + } + + if let Some(ref mangled) = self.mangled_name { + writeln!(out, "mangled name{mangled}")?; + } + + Ok(()) + } +} + +fn default_macro_constant_type(ctx: &BindgenContext, value: i64) -> IntKind { + if value < 0 || + ctx.options().default_macro_constant_type == + MacroTypeVariation::Signed + { + if value < i64::from(i32::MIN) || value > i64::from(i32::MAX) { + IntKind::I64 + } else if !ctx.options().fit_macro_constants || + value < i64::from(i16::MIN) || + value > i64::from(i16::MAX) + { + IntKind::I32 + } else if value < i64::from(i8::MIN) || value > i64::from(i8::MAX) { + IntKind::I16 + } else { + IntKind::I8 + } + } else if value > i64::from(u32::MAX) { + IntKind::U64 + } else if !ctx.options().fit_macro_constants || value > i64::from(u16::MAX) + { + IntKind::U32 + } else if value > i64::from(u8::MAX) { + IntKind::U16 + } else { + IntKind::U8 + } +} + +/// Parses tokens from a `CXCursor_MacroDefinition` pointing into a function-like +/// macro, and calls the `func_macro` callback. +fn handle_function_macro( + cursor: &clang::Cursor, + callbacks: &dyn crate::callbacks::ParseCallbacks, +) { + let is_closing_paren = |t: &ClangToken| { + // Test cheap token kind before comparing exact spellings. + t.kind == clang_sys::CXToken_Punctuation && t.spelling() == b")" + }; + let tokens: Vec<_> = cursor.tokens().iter().collect(); + if let Some(boundary) = tokens.iter().position(is_closing_paren) { + let mut spelled = tokens.iter().map(ClangToken::spelling); + // Add 1, to convert index to length. + let left = spelled.by_ref().take(boundary + 1); + let left = left.collect::>().concat(); + if let Ok(left) = String::from_utf8(left) { + let right: Vec<_> = spelled.collect(); + callbacks.func_macro(&left, &right); + } + } +} + +impl ClangSubItemParser for Var { + fn parse( + cursor: clang::Cursor, + ctx: &mut BindgenContext, + ) -> Result, ParseError> { + use cexpr::expr::EvalResult; + use cexpr::literal::CChar; + use clang_sys::*; + match cursor.kind() { + CXCursor_MacroDefinition => { + for callbacks in &ctx.options().parse_callbacks { + match callbacks.will_parse_macro(&cursor.spelling()) { + MacroParsingBehavior::Ignore => { + return Err(ParseError::Continue); + } + MacroParsingBehavior::Default => {} + } + + if cursor.is_macro_function_like() { + handle_function_macro(&cursor, callbacks.as_ref()); + // We handled the macro, skip macro processing below. + return Err(ParseError::Continue); + } + } + + let value = parse_macro(ctx, &cursor); + + let Some((id, value)) = value else { + return Err(ParseError::Continue); + }; + + assert!(!id.is_empty(), "Empty macro name?"); + + let previously_defined = ctx.parsed_macro(&id); + + // NB: It's important to "note" the macro even if the result is + // not an integer, otherwise we might loose other kind of + // derived macros. + ctx.note_parsed_macro(id.clone(), value.clone()); + + if previously_defined { + let name = String::from_utf8(id).unwrap(); + duplicated_macro_diagnostic(&name, cursor.location(), ctx); + return Err(ParseError::Continue); + } + + // NOTE: Unwrapping, here and above, is safe, because the + // identifier of a token comes straight from clang, and we + // enforce utf8 there, so we should have already panicked at + // this point. + let name = String::from_utf8(id).unwrap(); + let (type_kind, val) = match value { + EvalResult::Invalid => return Err(ParseError::Continue), + EvalResult::Float(f) => { + (TypeKind::Float(FloatKind::Double), VarType::Float(f)) + } + EvalResult::Char(c) => { + let c = match c { + CChar::Char(c) => { + assert_eq!(c.len_utf8(), 1); + c as u8 + } + CChar::Raw(c) => u8::try_from(c).unwrap(), + }; + + (TypeKind::Int(IntKind::U8), VarType::Char(c)) + } + EvalResult::Str(val) => { + let char_ty = Item::builtin_type( + TypeKind::Int(IntKind::U8), + true, + ctx, + ); + for callbacks in &ctx.options().parse_callbacks { + callbacks.str_macro(&name, &val); + } + (TypeKind::Pointer(char_ty), VarType::String(val)) + } + EvalResult::Int(Wrapping(value)) => { + let kind = ctx + .options() + .last_callback(|c| c.int_macro(&name, value)) + .unwrap_or_else(|| { + default_macro_constant_type(ctx, value) + }); + + (TypeKind::Int(kind), VarType::Int(value)) + } + }; + + let ty = Item::builtin_type(type_kind, true, ctx); + + Ok(ParseResult::New( + Var::new(name, None, None, ty, Some(val), true), + Some(cursor), + )) + } + CXCursor_VarDecl => { + let mut name = cursor.spelling(); + if cursor.linkage() == CXLinkage_External { + if let Some(nm) = ctx.options().last_callback(|callbacks| { + callbacks.generated_name_override(ItemInfo { + name: name.as_str(), + kind: ItemKind::Var, + }) + }) { + name = nm; + } + } + // No more changes to name + let name = name; + + if name.is_empty() { + warn!("Empty constant name?"); + return Err(ParseError::Continue); + } + + let link_name = ctx.options().last_callback(|callbacks| { + callbacks.generated_link_name_override(ItemInfo { + name: name.as_str(), + kind: ItemKind::Var, + }) + }); + + let ty = cursor.cur_type(); + + // TODO(emilio): do we have to special-case constant arrays in + // some other places? + let is_const = ty.is_const() || + ([CXType_ConstantArray, CXType_IncompleteArray] + .contains(&ty.kind()) && + ty.elem_type() + .is_some_and(|element| element.is_const())); + + let ty = match Item::from_ty(&ty, cursor, None, ctx) { + Ok(ty) => ty, + Err(e) => { + assert!( + matches!(ty.kind(), CXType_Auto | CXType_Unexposed), + "Couldn't resolve constant type, and it \ + wasn't an nondeductible auto type or unexposed \ + type: {ty:?}" + ); + return Err(e); + } + }; + + // Note: Ty might not be totally resolved yet, see + // tests/headers/inner_const.hpp + // + // That's fine because in that case we know it's not a literal. + let canonical_ty = ctx + .safe_resolve_type(ty) + .and_then(|t| t.safe_canonical_type(ctx)); + + let is_integer = canonical_ty.is_some_and(|t| t.is_integer()); + let is_float = canonical_ty.is_some_and(|t| t.is_float()); + + // TODO: We could handle `char` more gracefully. + // TODO: Strings, though the lookup is a bit more hard (we need + // to look at the canonical type of the pointee too, and check + // is char, u8, or i8 I guess). + let value = if is_integer { + let TypeKind::Int(kind) = *canonical_ty.unwrap().kind() + else { + unreachable!() + }; + + let mut val = cursor.evaluate().and_then(|v| v.as_int()); + if val.is_none() || !kind.signedness_matches(val.unwrap()) { + val = get_integer_literal_from_cursor(&cursor); + } + + val.map(|val| { + if kind == IntKind::Bool { + VarType::Bool(val != 0) + } else { + VarType::Int(val) + } + }) + } else if is_float { + cursor + .evaluate() + .and_then(|v| v.as_double()) + .map(VarType::Float) + } else { + cursor + .evaluate() + .and_then(|v| v.as_literal_string()) + .map(VarType::String) + }; + + let mangling = cursor_mangling(ctx, &cursor); + let var = + Var::new(name, mangling, link_name, ty, value, is_const); + + Ok(ParseResult::New(var, Some(cursor))) + } + _ => { + /* TODO */ + Err(ParseError::Continue) + } + } + } +} + +/// This function uses a [`FallbackTranslationUnit`][clang::FallbackTranslationUnit] to parse each +/// macro that cannot be parsed by the normal bindgen process for `#define`s. +/// +/// To construct the [`FallbackTranslationUnit`][clang::FallbackTranslationUnit], first precompiled +/// headers are generated for all input headers. An empty temporary `.c` file is generated to pass +/// to the translation unit. On the evaluation of each macro, a [`String`] is generated with the +/// new contents of the empty file and passed in for reparsing. The precompiled headers and +/// preservation of the [`FallbackTranslationUnit`][clang::FallbackTranslationUnit] across macro +/// evaluations are both optimizations that have significantly improved the performance. +fn parse_macro_clang_fallback( + ctx: &mut BindgenContext, + cursor: &clang::Cursor, +) -> Option<(Vec, cexpr::expr::EvalResult)> { + if !ctx.options().clang_macro_fallback { + return None; + } + + let ftu = ctx.try_ensure_fallback_translation_unit()?; + let contents = format!("int main() {{ {}; }}", cursor.spelling()); + ftu.reparse(&contents).ok()?; + // Children of root node of AST + let root_children = ftu.translation_unit().cursor().collect_children(); + // Last child in root is function declaration + // Should be FunctionDecl + let main_func = root_children.last()?; + // Children should all be statements in function declaration + let all_stmts = main_func.collect_children(); + // First child in all_stmts should be the statement containing the macro to evaluate + // Should be CompoundStmt + let macro_stmt = all_stmts.first()?; + // Children should all be expressions from the compound statement + let paren_exprs = macro_stmt.collect_children(); + // First child in all_exprs is the expression utilizing the given macro to be evaluated + // Should be ParenExpr + let paren = paren_exprs.first()?; + + Some(( + cursor.spelling().into_bytes(), + cexpr::expr::EvalResult::Int(Wrapping(paren.evaluate()?.as_int()?)), + )) +} + +/// Try and parse a macro using all the macros parsed until now. +fn parse_macro( + ctx: &mut BindgenContext, + cursor: &clang::Cursor, +) -> Option<(Vec, cexpr::expr::EvalResult)> { + use cexpr::expr; + + let mut cexpr_tokens = cursor.cexpr_tokens(); + + for callbacks in &ctx.options().parse_callbacks { + callbacks.modify_macro(&cursor.spelling(), &mut cexpr_tokens); + } + + let parser = expr::IdentifierParser::new(ctx.parsed_macros()); + + match parser.macro_definition(&cexpr_tokens) { + Ok((_, (id, val))) => Some((id.into(), val)), + _ => parse_macro_clang_fallback(ctx, cursor), + } +} + +fn parse_int_literal_tokens(cursor: &clang::Cursor) -> Option { + use cexpr::expr; + use cexpr::expr::EvalResult; + + let cexpr_tokens = cursor.cexpr_tokens(); + + // TODO(emilio): We can try to parse other kinds of literals. + match expr::expr(&cexpr_tokens) { + Ok((_, EvalResult::Int(Wrapping(val)))) => Some(val), + _ => None, + } +} + +fn get_integer_literal_from_cursor(cursor: &clang::Cursor) -> Option { + use clang_sys::*; + let mut value = None; + cursor.visit(|c| { + match c.kind() { + CXCursor_IntegerLiteral | CXCursor_UnaryOperator => { + value = parse_int_literal_tokens(&c); + } + CXCursor_UnexposedExpr => { + value = get_integer_literal_from_cursor(&c); + } + _ => (), + } + if value.is_some() { + CXChildVisit_Break + } else { + CXChildVisit_Continue + } + }); + value +} + +fn duplicated_macro_diagnostic( + macro_name: &str, + _location: clang::SourceLocation, + _ctx: &BindgenContext, +) { + warn!("Duplicated macro definition: {macro_name}"); + + #[cfg(feature = "experimental")] + // FIXME (pvdrz & amanjeev): This diagnostic message shows way too often to be actually + // useful. We have to change the logic where this function is called to be able to emit this + // message only when the duplication is an actual issue. + // + // If I understood correctly, `bindgen` ignores all `#undef` directives. Meaning that this: + // ```c + // #define FOO 1 + // #undef FOO + // #define FOO 2 + // ``` + // + // Will trigger this message even though there's nothing wrong with it. + #[allow(clippy::overly_complex_bool_expr)] + if false && _ctx.options().emit_diagnostics { + use crate::diagnostics::{get_line, Diagnostic, Level, Slice}; + use std::borrow::Cow; + + let mut slice = Slice::default(); + let mut source = Cow::from(macro_name); + + let (file, line, col, _) = _location.location(); + if let Some(filename) = file.name() { + if let Ok(Some(code)) = get_line(&filename, line) { + source = code.into(); + } + slice.with_location(filename, line, col); + } + + slice.with_source(source); + + Diagnostic::default() + .with_title("Duplicated macro definition.", Level::Warning) + .add_slice(slice) + .add_annotation("This macro had a duplicate.", Level::Note) + .display(); + } +} diff --git a/vendor/bindgen/lib.rs b/vendor/bindgen/lib.rs new file mode 100644 index 00000000000000..b2fecc2c3b0e29 --- /dev/null +++ b/vendor/bindgen/lib.rs @@ -0,0 +1,1422 @@ +//! Generate Rust bindings for C and C++ libraries. +//! +//! Provide a C/C++ header file, receive Rust FFI code to call into C/C++ +//! functions and use types defined in the header. +//! +//! See the [`Builder`](./struct.Builder.html) struct for usage. +//! +//! See the [Users Guide](https://rust-lang.github.io/rust-bindgen/) for +//! additional documentation. +#![deny(missing_docs)] +#![deny(unused_extern_crates)] +#![deny(clippy::disallowed_methods)] +// To avoid rather annoying warnings when matching with CXCursor_xxx as a +// constant. +#![allow(non_upper_case_globals)] +// `quote!` nests quite deeply. +#![recursion_limit = "128"] + +#[macro_use] +extern crate bitflags; +#[macro_use] +extern crate quote; + +#[cfg(feature = "logging")] +#[macro_use] +extern crate log; + +#[cfg(not(feature = "logging"))] +#[macro_use] +mod log_stubs; + +#[macro_use] +mod extra_assertions; + +mod codegen; +mod deps; +mod options; +mod time; + +pub mod callbacks; + +mod clang; +#[cfg(feature = "experimental")] +mod diagnostics; +mod features; +mod ir; +mod parse; +mod regex_set; + +pub use codegen::{ + AliasVariation, EnumVariation, MacroTypeVariation, NonCopyUnionStyle, +}; +pub use features::{RustEdition, RustTarget, LATEST_STABLE_RUST}; +pub use ir::annotations::FieldVisibilityKind; +pub use ir::function::Abi; +#[cfg(feature = "__cli")] +pub use options::cli::builder_from_flags; + +use codegen::CodegenError; +use features::RustFeatures; +use ir::comment; +use ir::context::{BindgenContext, ItemId}; +use ir::item::Item; +use options::BindgenOptions; +use parse::ParseError; + +use std::borrow::Cow; +use std::collections::hash_map::Entry; +use std::env; +use std::ffi::OsStr; +use std::fs::{File, OpenOptions}; +use std::io::{self, Write}; +use std::mem::size_of; +use std::path::{Path, PathBuf}; +use std::process::{Command, Stdio}; +use std::rc::Rc; +use std::str::FromStr; + +// Some convenient typedefs for a fast hash map and hash set. +type HashMap = rustc_hash::FxHashMap; +type HashSet = rustc_hash::FxHashSet; + +/// Default prefix for the anon fields. +pub const DEFAULT_ANON_FIELDS_PREFIX: &str = "__bindgen_anon_"; + +const DEFAULT_NON_EXTERN_FNS_SUFFIX: &str = "__extern"; + +fn file_is_cpp(name_file: &str) -> bool { + Path::new(name_file).extension().is_some_and(|ext| { + ext.eq_ignore_ascii_case("hpp") || + ext.eq_ignore_ascii_case("hxx") || + ext.eq_ignore_ascii_case("hh") || + ext.eq_ignore_ascii_case("h++") + }) +} + +fn args_are_cpp(clang_args: &[Box]) -> bool { + for w in clang_args.windows(2) { + if w[0].as_ref() == "-xc++" || w[1].as_ref() == "-xc++" { + return true; + } + if w[0].as_ref() == "-x" && w[1].as_ref() == "c++" { + return true; + } + if w[0].as_ref() == "-include" && file_is_cpp(w[1].as_ref()) { + return true; + } + } + false +} + +bitflags! { + /// A type used to indicate which kind of items we have to generate. + #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] + pub struct CodegenConfig: u32 { + /// Whether to generate functions. + const FUNCTIONS = 1 << 0; + /// Whether to generate types. + const TYPES = 1 << 1; + /// Whether to generate constants. + const VARS = 1 << 2; + /// Whether to generate methods. + const METHODS = 1 << 3; + /// Whether to generate constructors + const CONSTRUCTORS = 1 << 4; + /// Whether to generate destructors. + const DESTRUCTORS = 1 << 5; + } +} + +impl CodegenConfig { + /// Returns true if functions should be generated. + pub fn functions(self) -> bool { + self.contains(CodegenConfig::FUNCTIONS) + } + + /// Returns true if types should be generated. + pub fn types(self) -> bool { + self.contains(CodegenConfig::TYPES) + } + + /// Returns true if constants should be generated. + pub fn vars(self) -> bool { + self.contains(CodegenConfig::VARS) + } + + /// Returns true if methods should be generated. + pub fn methods(self) -> bool { + self.contains(CodegenConfig::METHODS) + } + + /// Returns true if constructors should be generated. + pub fn constructors(self) -> bool { + self.contains(CodegenConfig::CONSTRUCTORS) + } + + /// Returns true if destructors should be generated. + pub fn destructors(self) -> bool { + self.contains(CodegenConfig::DESTRUCTORS) + } +} + +impl Default for CodegenConfig { + fn default() -> Self { + CodegenConfig::all() + } +} + +/// Formatting tools that can be used to format the bindings +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[non_exhaustive] +pub enum Formatter { + /// Do not format the bindings. + None, + /// Use `rustfmt` to format the bindings. + Rustfmt, + #[cfg(feature = "prettyplease")] + /// Use `prettyplease` to format the bindings. + Prettyplease, +} + +impl Default for Formatter { + fn default() -> Self { + Self::Rustfmt + } +} + +impl FromStr for Formatter { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "none" => Ok(Self::None), + "rustfmt" => Ok(Self::Rustfmt), + #[cfg(feature = "prettyplease")] + "prettyplease" => Ok(Self::Prettyplease), + _ => Err(format!("`{s}` is not a valid formatter")), + } + } +} + +impl std::fmt::Display for Formatter { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s = match self { + Self::None => "none", + Self::Rustfmt => "rustfmt", + #[cfg(feature = "prettyplease")] + Self::Prettyplease => "prettyplease", + }; + + std::fmt::Display::fmt(&s, f) + } +} + +/// Configure and generate Rust bindings for a C/C++ header. +/// +/// This is the main entry point to the library. +/// +/// ```ignore +/// use bindgen::builder; +/// +/// // Configure and generate bindings. +/// let bindings = builder().header("path/to/input/header") +/// .allowlist_type("SomeCoolClass") +/// .allowlist_function("do_some_cool_thing") +/// .generate()?; +/// +/// // Write the generated bindings to an output file. +/// bindings.write_to_file("path/to/output.rs")?; +/// ``` +/// +/// # Enums +/// +/// Bindgen can map C/C++ enums into Rust in different ways. The way bindgen maps enums depends on +/// the pattern passed to several methods: +/// +/// 1. [`constified_enum_module()`](#method.constified_enum_module) +/// 2. [`bitfield_enum()`](#method.bitfield_enum) +/// 3. [`newtype_enum()`](#method.newtype_enum) +/// 4. [`rustified_enum()`](#method.rustified_enum) +/// 5. [`rustified_non_exhaustive_enum()`](#method.rustified_non_exhaustive_enum) +/// +/// For each C enum, bindgen tries to match the pattern in the following order: +/// +/// 1. Constified enum module +/// 2. Bitfield enum +/// 3. Newtype enum +/// 4. Rustified enum +/// +/// If none of the above patterns match, then bindgen will generate a set of Rust constants. +/// +/// # Clang arguments +/// +/// Extra arguments can be passed to with clang: +/// 1. [`clang_arg()`](#method.clang_arg): takes a single argument +/// 2. [`clang_args()`](#method.clang_args): takes an iterator of arguments +/// 3. `BINDGEN_EXTRA_CLANG_ARGS` environment variable: whitespace separate +/// environment variable of arguments +/// +/// Clang arguments specific to your crate should be added via the +/// `clang_arg()`/`clang_args()` methods. +/// +/// End-users of the crate may need to set the `BINDGEN_EXTRA_CLANG_ARGS` environment variable to +/// add additional arguments. For example, to build against a different sysroot a user could set +/// `BINDGEN_EXTRA_CLANG_ARGS` to `--sysroot=/path/to/sysroot`. +/// +/// # Regular expression arguments +/// +/// Some [`Builder`] methods, such as `allowlist_*` and `blocklist_*`, allow regular +/// expressions as arguments. These regular expressions will be enclosed in parentheses and +/// anchored with `^` and `$`. So, if the argument passed is ``, the regular expression to be +/// stored will be `^()$`. +/// +/// As a consequence, regular expressions passed to `bindgen` will try to match the whole name of +/// an item instead of a section of it, which means that to match any items with the prefix +/// `prefix`, the `prefix.*` regular expression must be used. +/// +/// Certain methods, like [`Builder::allowlist_function`], use regular expressions over function +/// names. To match C++ methods, prefix the name of the type where they belong, followed by an +/// underscore. So, if the type `Foo` has a method `bar`, it can be matched with the `Foo_bar` +/// regular expression. +/// +/// Additionally, Objective-C interfaces can be matched by prefixing the regular expression with +/// `I`. For example, the `IFoo` regular expression matches the `Foo` interface, and the `IFoo_foo` +/// regular expression matches the `foo` method of the `Foo` interface. +/// +/// Releases of `bindgen` with a version lesser or equal to `0.62.0` used to accept the wildcard +/// pattern `*` as a valid regular expression. This behavior has been deprecated, and the `.*` +/// regular expression must be used instead. +#[derive(Debug, Default, Clone)] +pub struct Builder { + options: BindgenOptions, +} + +/// Construct a new [`Builder`](./struct.Builder.html). +pub fn builder() -> Builder { + Default::default() +} + +fn get_extra_clang_args( + parse_callbacks: &[Rc], +) -> Vec { + // Add any extra arguments from the environment to the clang command line. + let extra_clang_args = match get_target_dependent_env_var( + parse_callbacks, + "BINDGEN_EXTRA_CLANG_ARGS", + ) { + None => return vec![], + Some(s) => s, + }; + + // Try to parse it with shell quoting. If we fail, make it one single big argument. + if let Some(strings) = shlex::split(&extra_clang_args) { + return strings; + } + vec![extra_clang_args] +} + +impl Builder { + /// Generate the Rust bindings using the options built up thus far. + pub fn generate(mut self) -> Result { + // Keep rust_features synced with rust_target + self.options.rust_features = match self.options.rust_edition { + Some(edition) => { + if !edition.is_available(self.options.rust_target) { + return Err(BindgenError::UnsupportedEdition( + edition, + self.options.rust_target, + )); + } + RustFeatures::new(self.options.rust_target, edition) + } + None => { + RustFeatures::new_with_latest_edition(self.options.rust_target) + } + }; + + // Add any extra arguments from the environment to the clang command line. + self.options.clang_args.extend( + get_extra_clang_args(&self.options.parse_callbacks) + .into_iter() + .map(String::into_boxed_str), + ); + + for header in &self.options.input_headers { + self.options + .for_each_callback(|cb| cb.header_file(header.as_ref())); + } + + // Transform input headers to arguments on the clang command line. + self.options.fallback_clang_args = self + .options + .clang_args + .iter() + .filter(|arg| { + !arg.starts_with("-MMD") && + !arg.starts_with("-MD") && + !arg.starts_with("--write-user-dependencies") && + !arg.starts_with("--user-dependencies") + }) + .cloned() + .collect::>(); + self.options.clang_args.extend( + self.options.input_headers + [..self.options.input_headers.len().saturating_sub(1)] + .iter() + .flat_map(|header| ["-include".into(), header.clone()]), + ); + + let input_unsaved_files = + std::mem::take(&mut self.options.input_header_contents) + .into_iter() + .map(|(name, contents)| { + clang::UnsavedFile::new(name.as_ref(), contents.as_ref()) + }) + .collect::>(); + + Bindings::generate(self.options, &input_unsaved_files) + } + + /// Preprocess and dump the input header files to disk. + /// + /// This is useful when debugging bindgen, using C-Reduce, or when filing + /// issues. The resulting file will be named something like `__bindgen.i` or + /// `__bindgen.ii` + pub fn dump_preprocessed_input(&self) -> io::Result<()> { + let clang = + clang_sys::support::Clang::find(None, &[]).ok_or_else(|| { + io::Error::new( + io::ErrorKind::Other, + "Cannot find clang executable", + ) + })?; + + // The contents of a wrapper file that includes all the input header + // files. + let mut wrapper_contents = String::new(); + + // Whether we are working with C or C++ inputs. + let mut is_cpp = args_are_cpp(&self.options.clang_args); + + // For each input header, add `#include "$header"`. + for header in &self.options.input_headers { + is_cpp |= file_is_cpp(header); + + wrapper_contents.push_str("#include \""); + wrapper_contents.push_str(header); + wrapper_contents.push_str("\"\n"); + } + + // For each input header content, add a prefix line of `#line 0 "$name"` + // followed by the contents. + for (name, contents) in &self.options.input_header_contents { + is_cpp |= file_is_cpp(name); + + wrapper_contents.push_str("#line 0 \""); + wrapper_contents.push_str(name); + wrapper_contents.push_str("\"\n"); + wrapper_contents.push_str(contents); + } + + let wrapper_path = PathBuf::from(if is_cpp { + "__bindgen.cpp" + } else { + "__bindgen.c" + }); + + { + let mut wrapper_file = File::create(&wrapper_path)?; + wrapper_file.write_all(wrapper_contents.as_bytes())?; + } + + let mut cmd = Command::new(clang.path); + cmd.arg("-save-temps") + .arg("-E") + .arg("-C") + .arg("-c") + .arg(&wrapper_path) + .stdout(Stdio::piped()); + + for a in &self.options.clang_args { + cmd.arg(a.as_ref()); + } + + for a in get_extra_clang_args(&self.options.parse_callbacks) { + cmd.arg(a); + } + + let mut child = cmd.spawn()?; + + let mut preprocessed = child.stdout.take().unwrap(); + let mut file = File::create(if is_cpp { + "__bindgen.ii" + } else { + "__bindgen.i" + })?; + io::copy(&mut preprocessed, &mut file)?; + + if child.wait()?.success() { + Ok(()) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + "clang exited with non-zero status", + )) + } + } +} + +impl BindgenOptions { + fn build(&mut self) { + const REGEX_SETS_LEN: usize = 29; + + let regex_sets: [_; REGEX_SETS_LEN] = [ + &mut self.blocklisted_types, + &mut self.blocklisted_functions, + &mut self.blocklisted_items, + &mut self.blocklisted_files, + &mut self.blocklisted_vars, + &mut self.opaque_types, + &mut self.allowlisted_vars, + &mut self.allowlisted_types, + &mut self.allowlisted_functions, + &mut self.allowlisted_files, + &mut self.allowlisted_items, + &mut self.bitfield_enums, + &mut self.constified_enums, + &mut self.constified_enum_modules, + &mut self.newtype_enums, + &mut self.newtype_global_enums, + &mut self.rustified_enums, + &mut self.rustified_non_exhaustive_enums, + &mut self.type_alias, + &mut self.new_type_alias, + &mut self.new_type_alias_deref, + &mut self.bindgen_wrapper_union, + &mut self.manually_drop_union, + &mut self.no_partialeq_types, + &mut self.no_copy_types, + &mut self.no_debug_types, + &mut self.no_default_types, + &mut self.no_hash_types, + &mut self.must_use_types, + ]; + + let record_matches = self.record_matches; + #[cfg(feature = "experimental")] + { + let sets_len = REGEX_SETS_LEN + self.abi_overrides.len(); + let names = if self.emit_diagnostics { + <[&str; REGEX_SETS_LEN]>::into_iter([ + "--blocklist-type", + "--blocklist-function", + "--blocklist-item", + "--blocklist-file", + "--blocklist-var", + "--opaque-type", + "--allowlist-type", + "--allowlist-function", + "--allowlist-var", + "--allowlist-file", + "--allowlist-item", + "--bitfield-enum", + "--newtype-enum", + "--newtype-global-enum", + "--rustified-enum", + "--rustified-enum-non-exhaustive", + "--constified-enum-module", + "--constified-enum", + "--type-alias", + "--new-type-alias", + "--new-type-alias-deref", + "--bindgen-wrapper-union", + "--manually-drop-union", + "--no-partialeq", + "--no-copy", + "--no-debug", + "--no-default", + "--no-hash", + "--must-use", + ]) + .chain((0..self.abi_overrides.len()).map(|_| "--override-abi")) + .map(Some) + .collect() + } else { + vec![None; sets_len] + }; + + for (regex_set, name) in + self.abi_overrides.values_mut().chain(regex_sets).zip(names) + { + regex_set.build_with_diagnostics(record_matches, name); + } + } + #[cfg(not(feature = "experimental"))] + for regex_set in self.abi_overrides.values_mut().chain(regex_sets) { + regex_set.build(record_matches); + } + } + + /// Update rust target version + pub fn set_rust_target(&mut self, rust_target: RustTarget) { + self.rust_target = rust_target; + } + + /// Get features supported by target Rust version + pub fn rust_features(&self) -> RustFeatures { + self.rust_features + } + + fn last_callback( + &self, + f: impl Fn(&dyn callbacks::ParseCallbacks) -> Option, + ) -> Option { + self.parse_callbacks + .iter() + .filter_map(|cb| f(cb.as_ref())) + .next_back() + } + + fn all_callbacks( + &self, + f: impl Fn(&dyn callbacks::ParseCallbacks) -> Vec, + ) -> Vec { + self.parse_callbacks + .iter() + .flat_map(|cb| f(cb.as_ref())) + .collect() + } + + fn for_each_callback(&self, f: impl Fn(&dyn callbacks::ParseCallbacks)) { + self.parse_callbacks.iter().for_each(|cb| f(cb.as_ref())); + } + + fn process_comment(&self, comment: &str) -> String { + let comment = comment::preprocess(comment); + self.last_callback(|cb| cb.process_comment(&comment)) + .unwrap_or(comment) + } +} + +#[cfg(feature = "runtime")] +fn ensure_libclang_is_loaded() { + use std::sync::{Arc, OnceLock}; + + if clang_sys::is_loaded() { + return; + } + + // XXX (issue #350): Ensure that our dynamically loaded `libclang` + // doesn't get dropped prematurely, nor is loaded multiple times + // across different threads. + + static LIBCLANG: OnceLock> = OnceLock::new(); + let libclang = LIBCLANG.get_or_init(|| { + clang_sys::load().expect("Unable to find libclang"); + clang_sys::get_library() + .expect("We just loaded libclang and it had better still be here!") + }); + + clang_sys::set_library(Some(libclang.clone())); +} + +#[cfg(not(feature = "runtime"))] +fn ensure_libclang_is_loaded() {} + +/// Error type for rust-bindgen. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[non_exhaustive] +pub enum BindgenError { + /// The header was a folder. + FolderAsHeader(PathBuf), + /// Permissions to read the header is insufficient. + InsufficientPermissions(PathBuf), + /// The header does not exist. + NotExist(PathBuf), + /// Clang diagnosed an error. + ClangDiagnostic(String), + /// Code generation reported an error. + Codegen(CodegenError), + /// The passed edition is not available on that Rust target. + UnsupportedEdition(RustEdition, RustTarget), +} + +impl std::fmt::Display for BindgenError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BindgenError::FolderAsHeader(h) => { + write!(f, "'{}' is a folder", h.display()) + } + BindgenError::InsufficientPermissions(h) => { + write!(f, "insufficient permissions to read '{}'", h.display()) + } + BindgenError::NotExist(h) => { + write!(f, "header '{}' does not exist.", h.display()) + } + BindgenError::ClangDiagnostic(message) => { + write!(f, "clang diagnosed error: {message}") + } + BindgenError::Codegen(err) => { + write!(f, "codegen error: {err}") + } + BindgenError::UnsupportedEdition(edition, target) => { + write!(f, "edition {edition} is not available on Rust {target}") + } + } + } +} + +impl std::error::Error for BindgenError {} + +/// Generated Rust bindings. +#[derive(Debug)] +pub struct Bindings { + options: BindgenOptions, + module: proc_macro2::TokenStream, +} + +pub(crate) const HOST_TARGET: &str = + include_str!(concat!(env!("OUT_DIR"), "/host-target.txt")); + +// Some architecture triplets are different between rust and libclang, see #1211 +// and duplicates. +fn rust_to_clang_target(rust_target: &str) -> Box { + const TRIPLE_HYPHENS_MESSAGE: &str = "Target triple should contain hyphens"; + + let mut triple: Vec<&str> = rust_target.split_terminator('-').collect(); + + assert!(!triple.is_empty(), "{}", TRIPLE_HYPHENS_MESSAGE); + triple.resize(4, ""); + + // RISC-V + if triple[0].starts_with("riscv32") { + triple[0] = "riscv32"; + } else if triple[0].starts_with("riscv64") { + triple[0] = "riscv64"; + } + + // Apple + if triple[1] == "apple" { + if triple[0] == "aarch64" { + triple[0] = "arm64"; + } + if triple[3] == "sim" { + triple[3] = "simulator"; + } + } + + // ESP-IDF + if triple[2] == "espidf" { + triple[2] = "elf"; + } + + triple + .iter() + .skip(1) + .fold(triple[0].to_string(), |triple, part| { + if part.is_empty() { + triple + } else { + triple + "-" + part + } + }) + .into() +} + +/// Returns the effective target, and whether it was explicitly specified on the +/// clang flags. +fn find_effective_target(clang_args: &[Box]) -> (Box, bool) { + let mut args = clang_args.iter(); + while let Some(opt) = args.next() { + if opt.starts_with("--target=") { + let mut split = opt.split('='); + split.next(); + return (split.next().unwrap().into(), true); + } + + if opt.as_ref() == "-target" { + if let Some(target) = args.next() { + return (target.clone(), true); + } + } + } + + // If we're running from a build script, try to find the cargo target. + if let Ok(t) = env::var("TARGET") { + return (rust_to_clang_target(&t), false); + } + + (rust_to_clang_target(HOST_TARGET), false) +} + +impl Bindings { + /// Generate bindings for the given options. + pub(crate) fn generate( + mut options: BindgenOptions, + input_unsaved_files: &[clang::UnsavedFile], + ) -> Result { + ensure_libclang_is_loaded(); + + #[cfg(feature = "runtime")] + match clang_sys::get_library().unwrap().version() { + None => { + warn!("Could not detect a Clang version, make sure you are using libclang 9 or newer"); + } + Some(version) => { + if version < clang_sys::Version::V9_0 { + warn!("Detected Clang version {version:?} which is unsupported and can cause invalid code generation, use libclang 9 or newer"); + } + } + } + + #[cfg(feature = "runtime")] + debug!( + "Generating bindings, libclang at {}", + clang_sys::get_library().unwrap().path().display() + ); + #[cfg(not(feature = "runtime"))] + debug!("Generating bindings, libclang linked"); + + options.build(); + + let (effective_target, explicit_target) = + find_effective_target(&options.clang_args); + + let is_host_build = + rust_to_clang_target(HOST_TARGET) == effective_target; + + // NOTE: The is_host_build check wouldn't be sound normally in some + // cases if we were to call a binary (if you have a 32-bit clang and are + // building on a 64-bit system for example). But since we rely on + // opening libclang.so, it has to be the same architecture and thus the + // check is fine. + if !explicit_target && !is_host_build { + options.clang_args.insert( + 0, + format!("--target={effective_target}").into_boxed_str(), + ); + } + + fn detect_include_paths(options: &mut BindgenOptions) { + if !options.detect_include_paths { + return; + } + + // Filter out include paths and similar stuff, so we don't incorrectly + // promote them to `-isystem`. + let clang_args_for_clang_sys = { + let mut last_was_include_prefix = false; + options + .clang_args + .iter() + .filter(|arg| { + if last_was_include_prefix { + last_was_include_prefix = false; + return false; + } + + let arg = arg.as_ref(); + + // https://clang.llvm.org/docs/ClangCommandLineReference.html + // -isystem and -isystem-after are harmless. + if arg == "-I" || arg == "--include-directory" { + last_was_include_prefix = true; + return false; + } + + if arg.starts_with("-I") || + arg.starts_with("--include-directory=") + { + return false; + } + + true + }) + .map(|arg| arg.clone().into()) + .collect::>() + }; + + debug!( + "Trying to find clang with flags: {clang_args_for_clang_sys:?}" + ); + + let clang = match clang_sys::support::Clang::find( + None, + &clang_args_for_clang_sys, + ) { + None => return, + Some(clang) => clang, + }; + + debug!("Found clang: {clang:?}"); + + // Whether we are working with C or C++ inputs. + let is_cpp = args_are_cpp(&options.clang_args) || + options.input_headers.iter().any(|h| file_is_cpp(h)); + + let search_paths = if is_cpp { + clang.cpp_search_paths + } else { + clang.c_search_paths + }; + + if let Some(search_paths) = search_paths { + for path in search_paths { + if let Ok(path) = path.into_os_string().into_string() { + options.clang_args.push("-isystem".into()); + options.clang_args.push(path.into_boxed_str()); + } + } + } + } + + detect_include_paths(&mut options); + + #[cfg(unix)] + fn can_read(perms: &std::fs::Permissions) -> bool { + use std::os::unix::fs::PermissionsExt; + perms.mode() & 0o444 > 0 + } + + #[cfg(not(unix))] + fn can_read(_: &std::fs::Permissions) -> bool { + true + } + + if let Some(h) = options.input_headers.last() { + let path = Path::new(h.as_ref()); + if let Ok(md) = std::fs::metadata(path) { + if md.is_dir() { + return Err(BindgenError::FolderAsHeader(path.into())); + } + if !can_read(&md.permissions()) { + return Err(BindgenError::InsufficientPermissions( + path.into(), + )); + } + options.clang_args.push(h.clone()); + } else { + return Err(BindgenError::NotExist(path.into())); + } + } + + for (idx, f) in input_unsaved_files.iter().enumerate() { + if idx != 0 || !options.input_headers.is_empty() { + options.clang_args.push("-include".into()); + } + options.clang_args.push(f.name.to_str().unwrap().into()); + } + + debug!("Fixed-up options: {options:?}"); + + let time_phases = options.time_phases; + let mut context = BindgenContext::new(options, input_unsaved_files); + + if is_host_build { + debug_assert_eq!( + context.target_pointer_size(), + size_of::<*mut ()>(), + "{effective_target:?} {HOST_TARGET:?}" + ); + } + + { + let _t = time::Timer::new("parse").with_output(time_phases); + parse(&mut context)?; + } + + let (module, options) = + codegen::codegen(context).map_err(BindgenError::Codegen)?; + + Ok(Bindings { options, module }) + } + + /// Write these bindings as source text to a file. + pub fn write_to_file>(&self, path: P) -> io::Result<()> { + let file = OpenOptions::new() + .write(true) + .truncate(true) + .create(true) + .open(path.as_ref())?; + self.write(Box::new(file))?; + Ok(()) + } + + /// Write these bindings as source text to the given `Write`able. + pub fn write<'a>(&self, mut writer: Box) -> io::Result<()> { + const NL: &str = if cfg!(windows) { "\r\n" } else { "\n" }; + + if !self.options.disable_header_comment { + let version = + option_env!("CARGO_PKG_VERSION").unwrap_or("(unknown version)"); + write!( + writer, + "/* automatically generated by rust-bindgen {version} */{NL}{NL}", + )?; + } + + for line in &self.options.raw_lines { + writer.write_all(line.as_bytes())?; + writer.write_all(NL.as_bytes())?; + } + + if !self.options.raw_lines.is_empty() { + writer.write_all(NL.as_bytes())?; + } + + match self.format_tokens(&self.module) { + Ok(formatted_bindings) => { + writer.write_all(formatted_bindings.as_bytes())?; + } + Err(err) => { + eprintln!( + "Failed to run rustfmt: {err} (non-fatal, continuing)" + ); + writer.write_all(self.module.to_string().as_bytes())?; + } + } + Ok(()) + } + + /// Gets the rustfmt path to rustfmt the generated bindings. + fn rustfmt_path(&self) -> io::Result> { + debug_assert!(matches!(self.options.formatter, Formatter::Rustfmt)); + if let Some(ref p) = self.options.rustfmt_path { + return Ok(Cow::Borrowed(p)); + } + if let Ok(rustfmt) = env::var("RUSTFMT") { + return Ok(Cow::Owned(rustfmt.into())); + } + // No rustfmt binary was specified, so assume that the binary is called + // "rustfmt" and that it is in the user's PATH. + Ok(Cow::Owned("rustfmt".into())) + } + + /// Formats a token stream with the formatter set up in `BindgenOptions`. + fn format_tokens( + &self, + tokens: &proc_macro2::TokenStream, + ) -> io::Result { + let _t = time::Timer::new("rustfmt_generated_string") + .with_output(self.options.time_phases); + + match self.options.formatter { + Formatter::None => return Ok(tokens.to_string()), + #[cfg(feature = "prettyplease")] + Formatter::Prettyplease => { + return Ok(prettyplease::unparse(&syn::parse_quote!(#tokens))); + } + Formatter::Rustfmt => (), + } + + let rustfmt = self.rustfmt_path()?; + let mut cmd = Command::new(&*rustfmt); + + cmd.stdin(Stdio::piped()).stdout(Stdio::piped()); + + if let Some(path) = self + .options + .rustfmt_configuration_file + .as_ref() + .and_then(|f| f.to_str()) + { + cmd.args(["--config-path", path]); + } + + let edition = self + .options + .rust_edition + .unwrap_or_else(|| self.options.rust_target.latest_edition()); + cmd.args(["--edition", &format!("{edition}")]); + + let mut child = cmd.spawn()?; + let mut child_stdin = child.stdin.take().unwrap(); + let mut child_stdout = child.stdout.take().unwrap(); + + let source = tokens.to_string(); + + // Write to stdin in a new thread, so that we can read from stdout on this + // thread. This keeps the child from blocking on writing to its stdout which + // might block us from writing to its stdin. + let stdin_handle = ::std::thread::spawn(move || { + let _ = child_stdin.write_all(source.as_bytes()); + source + }); + + let mut output = vec![]; + io::copy(&mut child_stdout, &mut output)?; + + let status = child.wait()?; + let source = stdin_handle.join().expect( + "The thread writing to rustfmt's stdin doesn't do \ + anything that could panic", + ); + + match String::from_utf8(output) { + Ok(bindings) => match status.code() { + Some(0) => Ok(bindings), + Some(2) => Err(io::Error::new( + io::ErrorKind::Other, + "Rustfmt parsing errors.".to_string(), + )), + Some(3) => { + rustfmt_non_fatal_error_diagnostic( + "Rustfmt could not format some lines", + &self.options, + ); + Ok(bindings) + } + _ => Err(io::Error::new( + io::ErrorKind::Other, + "Internal rustfmt error".to_string(), + )), + }, + _ => Ok(source), + } + } +} + +fn rustfmt_non_fatal_error_diagnostic(msg: &str, _options: &BindgenOptions) { + warn!("{msg}"); + + #[cfg(feature = "experimental")] + if _options.emit_diagnostics { + use crate::diagnostics::{Diagnostic, Level}; + + Diagnostic::default() + .with_title(msg, Level::Warning) + .add_annotation( + "The bindings will be generated but not formatted.", + Level::Note, + ) + .display(); + } +} + +impl std::fmt::Display for Bindings { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut bytes = vec![]; + self.write(Box::new(&mut bytes) as Box) + .expect("writing to a vec cannot fail"); + f.write_str( + std::str::from_utf8(&bytes) + .expect("we should only write bindings that are valid utf-8"), + ) + } +} + +/// Determines whether the given cursor is in any of the files matched by the +/// options. +fn filter_builtins(ctx: &BindgenContext, cursor: &clang::Cursor) -> bool { + ctx.options().builtins || !cursor.is_builtin() +} + +/// Parse one `Item` from the Clang cursor. +fn parse_one( + ctx: &mut BindgenContext, + cursor: clang::Cursor, + parent: Option, +) { + if !filter_builtins(ctx, &cursor) { + return; + } + + match Item::parse(cursor, parent, ctx) { + Ok(..) => {} + Err(ParseError::Continue) => {} + Err(ParseError::Recurse) => { + cursor + .visit_sorted(ctx, |ctx, child| parse_one(ctx, child, parent)); + } + } +} + +/// Parse the Clang AST into our `Item` internal representation. +fn parse(context: &mut BindgenContext) -> Result<(), BindgenError> { + use clang_sys::*; + + let mut error = None; + for d in &context.translation_unit().diags() { + let msg = d.format(); + let is_err = d.severity() >= CXDiagnostic_Error; + if is_err { + let error = error.get_or_insert_with(String::new); + error.push_str(&msg); + error.push('\n'); + } else { + eprintln!("clang diag: {msg}"); + } + } + + if let Some(message) = error { + return Err(BindgenError::ClangDiagnostic(message)); + } + + let cursor = context.translation_unit().cursor(); + + if context.options().emit_ast { + fn dump_if_not_builtin(cur: &clang::Cursor) -> CXChildVisitResult { + if cur.is_builtin() { + CXChildVisit_Continue + } else { + clang::ast_dump(cur, 0) + } + } + cursor.visit(|cur| dump_if_not_builtin(&cur)); + } + + let root = context.root_module(); + context.with_module(root, |ctx| { + cursor.visit_sorted(ctx, |ctx, child| parse_one(ctx, child, None)); + }); + + assert_eq!( + context.current_module(), + context.root_module(), + "How did this happen?" + ); + Ok(()) +} + +/// Extracted Clang version data +#[derive(Debug)] +pub struct ClangVersion { + /// Major and minor semver, if parsing was successful + pub parsed: Option<(u32, u32)>, + /// full version string + pub full: String, +} + +/// Get the major and the minor semver numbers of Clang's version +pub fn clang_version() -> ClangVersion { + ensure_libclang_is_loaded(); + + //Debian clang version 11.0.1-2 + let raw_v: String = clang::extract_clang_version(); + let split_v: Option> = raw_v + .split_whitespace() + .find(|t| t.chars().next().is_some_and(|v| v.is_ascii_digit())) + .map(|v| v.split('.').collect()); + if let Some(v) = split_v { + if v.len() >= 2 { + let maybe_major = v[0].parse::(); + let maybe_minor = v[1].parse::(); + if let (Ok(major), Ok(minor)) = (maybe_major, maybe_minor) { + return ClangVersion { + parsed: Some((major, minor)), + full: raw_v.clone(), + }; + } + } + } + ClangVersion { + parsed: None, + full: raw_v.clone(), + } +} + +fn env_var + AsRef>( + parse_callbacks: &[Rc], + key: K, +) -> Result { + for callback in parse_callbacks { + callback.read_env_var(key.as_ref()); + } + env::var(key) +} + +/// Looks for the env var `var_${TARGET}`, and falls back to just `var` when it is not found. +fn get_target_dependent_env_var( + parse_callbacks: &[Rc], + var: &str, +) -> Option { + if let Ok(target) = env_var(parse_callbacks, "TARGET") { + if let Ok(v) = env_var(parse_callbacks, format!("{var}_{target}")) { + return Some(v); + } + if let Ok(v) = env_var( + parse_callbacks, + format!("{var}_{}", target.replace('-', "_")), + ) { + return Some(v); + } + } + + env_var(parse_callbacks, var).ok() +} + +/// A `ParseCallbacks` implementation that will act on file includes by echoing a rerun-if-changed +/// line and on env variable usage by echoing a rerun-if-env-changed line +/// +/// When running inside a `build.rs` script, this can be used to make cargo invalidate the +/// generated bindings whenever any of the files included from the header change: +/// ``` +/// use bindgen::builder; +/// let bindings = builder() +/// .header("path/to/input/header") +/// .parse_callbacks(Box::new(bindgen::CargoCallbacks::new())) +/// .generate(); +/// ``` +#[derive(Debug)] +pub struct CargoCallbacks { + rerun_on_header_files: bool, +} + +/// Create a new `CargoCallbacks` value with [`CargoCallbacks::rerun_on_header_files`] disabled. +/// +/// This constructor has been deprecated in favor of [`CargoCallbacks::new`] where +/// [`CargoCallbacks::rerun_on_header_files`] is enabled by default. +#[deprecated = "Use `CargoCallbacks::new()` instead. Please, check the documentation for further information."] +pub const CargoCallbacks: CargoCallbacks = CargoCallbacks { + rerun_on_header_files: false, +}; + +impl CargoCallbacks { + /// Create a new `CargoCallbacks` value. + pub fn new() -> Self { + Self { + rerun_on_header_files: true, + } + } + + /// Whether Cargo should re-run the build script if any of the input header files has changed. + /// + /// This option is enabled by default unless the deprecated [`const@CargoCallbacks`] + /// constructor is used. + pub fn rerun_on_header_files(mut self, doit: bool) -> Self { + self.rerun_on_header_files = doit; + self + } +} + +impl Default for CargoCallbacks { + fn default() -> Self { + Self::new() + } +} + +impl callbacks::ParseCallbacks for CargoCallbacks { + fn header_file(&self, filename: &str) { + if self.rerun_on_header_files { + println!("cargo:rerun-if-changed={filename}"); + } + } + + fn include_file(&self, filename: &str) { + println!("cargo:rerun-if-changed={filename}"); + } + + fn read_env_var(&self, key: &str) { + println!("cargo:rerun-if-env-changed={key}"); + } +} + +/// Test `command_line_flag` function. +#[test] +fn commandline_flag_unit_test_function() { + //Test 1 + let bindings = builder(); + let command_line_flags = bindings.command_line_flags(); + + let test_cases = [ + "--rust-target", + "--no-derive-default", + "--generate", + "functions,types,vars,methods,constructors,destructors", + ] + .iter() + .map(|&x| x.into()) + .collect::>(); + + assert!(test_cases.iter().all(|x| command_line_flags.contains(x))); + + //Test 2 + let bindings = builder() + .header("input_header") + .allowlist_type("Distinct_Type") + .allowlist_function("safe_function"); + + let command_line_flags = bindings.command_line_flags(); + let test_cases = [ + "--rust-target", + "input_header", + "--no-derive-default", + "--generate", + "functions,types,vars,methods,constructors,destructors", + "--allowlist-type", + "Distinct_Type", + "--allowlist-function", + "safe_function", + ] + .iter() + .map(|&x| x.into()) + .collect::>(); + println!("{command_line_flags:?}"); + + assert!(test_cases.iter().all(|x| command_line_flags.contains(x))); +} + +#[test] +fn test_rust_to_clang_target() { + assert_eq!( + rust_to_clang_target("aarch64-apple-ios").as_ref(), + "arm64-apple-ios" + ); +} + +#[test] +fn test_rust_to_clang_target_riscv() { + assert_eq!( + rust_to_clang_target("riscv64gc-unknown-linux-gnu").as_ref(), + "riscv64-unknown-linux-gnu" + ); + assert_eq!( + rust_to_clang_target("riscv64imac-unknown-none-elf").as_ref(), + "riscv64-unknown-none-elf" + ); + assert_eq!( + rust_to_clang_target("riscv32imc-unknown-none-elf").as_ref(), + "riscv32-unknown-none-elf" + ); + assert_eq!( + rust_to_clang_target("riscv32imac-unknown-none-elf").as_ref(), + "riscv32-unknown-none-elf" + ); + assert_eq!( + rust_to_clang_target("riscv32imafc-unknown-none-elf").as_ref(), + "riscv32-unknown-none-elf" + ); + assert_eq!( + rust_to_clang_target("riscv32i-unknown-none-elf").as_ref(), + "riscv32-unknown-none-elf" + ); +} + +#[test] +fn test_rust_to_clang_target_espidf() { + assert_eq!( + rust_to_clang_target("riscv32imc-esp-espidf").as_ref(), + "riscv32-esp-elf" + ); + assert_eq!( + rust_to_clang_target("xtensa-esp32-espidf").as_ref(), + "xtensa-esp32-elf" + ); +} + +#[test] +fn test_rust_to_clang_target_simulator() { + assert_eq!( + rust_to_clang_target("aarch64-apple-ios-sim").as_ref(), + "arm64-apple-ios-simulator" + ); + assert_eq!( + rust_to_clang_target("aarch64-apple-tvos-sim").as_ref(), + "arm64-apple-tvos-simulator" + ); + assert_eq!( + rust_to_clang_target("aarch64-apple-watchos-sim").as_ref(), + "arm64-apple-watchos-simulator" + ); +} diff --git a/vendor/bindgen/log_stubs.rs b/vendor/bindgen/log_stubs.rs new file mode 100644 index 00000000000000..51d2f81fd1346b --- /dev/null +++ b/vendor/bindgen/log_stubs.rs @@ -0,0 +1,38 @@ +#![allow(unused)] + +#[clippy::format_args] +macro_rules! log { + (target: $target:expr, $lvl:expr, $($arg:tt)+) => {{ + let _ = $target; + let _ = log!($lvl, $($arg)+); + }}; + ($lvl:expr, $($arg:tt)+) => {{ + let _ = $lvl; + let _ = format_args!($($arg)+); + }}; +} +#[clippy::format_args] +macro_rules! error { + (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; + ($($arg:tt)+) => { log!("", $($arg)+) }; +} +#[clippy::format_args] +macro_rules! warn { + (target: $target:expr, $($arg:tt)*) => { log!(target: $target, "", $($arg)*) }; + ($($arg:tt)*) => { log!("", $($arg)*) }; +} +#[clippy::format_args] +macro_rules! info { + (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; + ($($arg:tt)+) => { log!("", $($arg)+) }; +} +#[clippy::format_args] +macro_rules! debug { + (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; + ($($arg:tt)+) => { log!("", $($arg)+) }; +} +#[clippy::format_args] +macro_rules! trace { + (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; + ($($arg:tt)+) => { log!("", $($arg)+) }; +} diff --git a/vendor/bindgen/options/as_args.rs b/vendor/bindgen/options/as_args.rs new file mode 100644 index 00000000000000..83103fdaf48a40 --- /dev/null +++ b/vendor/bindgen/options/as_args.rs @@ -0,0 +1,52 @@ +use std::path::PathBuf; + +use crate::regex_set::RegexSet; + +/// Trait used to turn [`crate::BindgenOptions`] fields into CLI args. +pub(super) trait AsArgs { + fn as_args(&self, args: &mut Vec, flag: &str); +} + +/// If the `bool` is `true`, `flag` is pushed into `args`. +/// +/// be careful about the truth value of the field as some options, like `--no-layout-tests`, are +/// actually negations of the fields. +impl AsArgs for bool { + fn as_args(&self, args: &mut Vec, flag: &str) { + if *self { + args.push(flag.to_string()); + } + } +} + +/// Iterate over all the items of the `RegexSet` and push `flag` followed by the item into `args` +/// for each item. +impl AsArgs for RegexSet { + fn as_args(&self, args: &mut Vec, flag: &str) { + for item in self.get_items() { + args.extend_from_slice(&[flag.to_owned(), item.clone().into()]); + } + } +} + +/// If the `Option` is `Some(value)`, push `flag` followed by `value`. +impl AsArgs for Option { + fn as_args(&self, args: &mut Vec, flag: &str) { + if let Some(string) = self { + args.extend_from_slice(&[flag.to_owned(), string.clone()]); + } + } +} + +/// If the `Option` is `Some(path)`, push `flag` followed by the [`std::path::Path::display`] +/// representation of `path`. +impl AsArgs for Option { + fn as_args(&self, args: &mut Vec, flag: &str) { + if let Some(path) = self { + args.extend_from_slice(&[ + flag.to_owned(), + path.display().to_string(), + ]); + } + } +} diff --git a/vendor/bindgen/options/cli.rs b/vendor/bindgen/options/cli.rs new file mode 100644 index 00000000000000..bce7faed35263d --- /dev/null +++ b/vendor/bindgen/options/cli.rs @@ -0,0 +1,1151 @@ +#![allow(unused_qualifications)] // Clap somehow generates a lot of these + +use crate::{ + builder, + callbacks::{ + AttributeInfo, DeriveInfo, ItemInfo, ParseCallbacks, TypeKind, + }, + features::{RustEdition, EARLIEST_STABLE_RUST}, + regex_set::RegexSet, + Abi, AliasVariation, Builder, CodegenConfig, EnumVariation, + FieldVisibilityKind, Formatter, MacroTypeVariation, NonCopyUnionStyle, + RustTarget, +}; +use clap::{ + error::{Error, ErrorKind}, + CommandFactory, Parser, +}; +use proc_macro2::TokenStream; +use std::io; +use std::path::{Path, PathBuf}; +use std::str::FromStr; +use std::{fs::File, process::exit}; + +fn rust_target_help() -> String { + format!( + "Version of the Rust compiler to target. Any Rust version after {EARLIEST_STABLE_RUST} is supported. Defaults to {}.", + RustTarget::default() + ) +} + +fn rust_edition_help() -> String { + format!("Rust edition to target. Defaults to the latest edition supported by the chosen Rust target. Possible values: ({}). ", RustEdition::ALL.map(|e| e.to_string()).join("|")) +} + +fn parse_codegen_config( + what_to_generate: &str, +) -> Result { + let mut config = CodegenConfig::empty(); + for what in what_to_generate.split(',') { + match what { + "functions" => config.insert(CodegenConfig::FUNCTIONS), + "types" => config.insert(CodegenConfig::TYPES), + "vars" => config.insert(CodegenConfig::VARS), + "methods" => config.insert(CodegenConfig::METHODS), + "constructors" => config.insert(CodegenConfig::CONSTRUCTORS), + "destructors" => config.insert(CodegenConfig::DESTRUCTORS), + otherwise => { + return Err(Error::raw( + ErrorKind::InvalidValue, + format!("Unknown codegen item kind: {otherwise}"), + )); + } + } + } + + Ok(config) +} + +fn parse_rustfmt_config_path(path_str: &str) -> Result { + let path = Path::new(path_str); + + if !path.is_absolute() { + return Err(Error::raw( + ErrorKind::InvalidValue, + "--rustfmt-configuration-file needs to be an absolute path!", + )); + } + + if path.to_str().is_none() { + return Err(Error::raw( + ErrorKind::InvalidUtf8, + "--rustfmt-configuration-file contains non-valid UTF8 characters.", + )); + } + + Ok(path.to_path_buf()) +} + +fn parse_abi_override(abi_override: &str) -> Result<(Abi, String), Error> { + let (regex, abi_str) = abi_override + .rsplit_once('=') + .ok_or_else(|| Error::raw(ErrorKind::InvalidValue, "Missing `=`"))?; + + let abi = abi_str + .parse() + .map_err(|err| Error::raw(ErrorKind::InvalidValue, err))?; + + Ok((abi, regex.to_owned())) +} + +fn parse_custom_derive( + custom_derive: &str, +) -> Result<(Vec, String), Error> { + let (regex, derives) = custom_derive + .rsplit_once('=') + .ok_or_else(|| Error::raw(ErrorKind::InvalidValue, "Missing `=`"))?; + + let derives = derives.split(',').map(|s| s.to_owned()).collect(); + + Ok((derives, regex.to_owned())) +} + +fn parse_custom_attribute( + custom_attribute: &str, +) -> Result<(Vec, String), Error> { + let mut brace_level = 0; + let (regex, attributes) = custom_attribute + .rsplit_once(|c| { + match c { + ']' => brace_level += 1, + '[' => brace_level -= 1, + _ => {} + } + c == '=' && brace_level == 0 + }) + .ok_or_else(|| Error::raw(ErrorKind::InvalidValue, "Missing `=`"))?; + + let mut brace_level = 0; + let attributes = attributes + .split(|c| { + match c { + ']' => brace_level += 1, + '[' => brace_level -= 1, + _ => {} + } + c == ',' && brace_level == 0 + }) + .map(|s| s.to_owned()) + .collect::>(); + + for attribute in &attributes { + if let Err(err) = TokenStream::from_str(attribute) { + return Err(Error::raw(ErrorKind::InvalidValue, err)); + } + } + + Ok((attributes, regex.to_owned())) +} + +#[derive(Parser, Debug)] +#[clap( + about = "Generates Rust bindings from C/C++ headers.", + override_usage = "bindgen
-- ...", + trailing_var_arg = true +)] +#[allow(clippy::doc_markdown)] +struct BindgenCommand { + /// C or C++ header file. + header: Option, + /// Path to write depfile to. + #[arg(long)] + depfile: Option, + /// The default STYLE of code used to generate enums. + #[arg(long, value_name = "STYLE")] + default_enum_style: Option, + /// Mark any enum whose name matches REGEX as a set of bitfield flags. + #[arg(long, value_name = "REGEX")] + bitfield_enum: Vec, + /// Mark any enum whose name matches REGEX as a newtype. + #[arg(long, value_name = "REGEX")] + newtype_enum: Vec, + /// Mark any enum whose name matches REGEX as a global newtype. + #[arg(long, value_name = "REGEX")] + newtype_global_enum: Vec, + /// Mark any enum whose name matches REGEX as a Rust enum. + #[arg(long, value_name = "REGEX")] + rustified_enum: Vec, + /// Mark any enum whose name matches REGEX as a non-exhaustive Rust enum. + #[arg(long, value_name = "REGEX")] + rustified_non_exhaustive_enum: Vec, + /// Mark any enum whose name matches REGEX as a series of constants. + #[arg(long, value_name = "REGEX")] + constified_enum: Vec, + /// Mark any enum whose name matches REGEX as a module of constants. + #[arg(long, value_name = "REGEX")] + constified_enum_module: Vec, + /// The default signed/unsigned TYPE for C macro constants. + #[arg(long, value_name = "TYPE")] + default_macro_constant_type: Option, + /// The default STYLE of code used to generate typedefs. + #[arg(long, value_name = "STYLE")] + default_alias_style: Option, + /// Mark any typedef alias whose name matches REGEX to use normal type aliasing. + #[arg(long, value_name = "REGEX")] + normal_alias: Vec, + /// Mark any typedef alias whose name matches REGEX to have a new type generated for it. + #[arg(long, value_name = "REGEX")] + new_type_alias: Vec, + /// Mark any typedef alias whose name matches REGEX to have a new type with Deref and DerefMut to the inner type. + #[arg(long, value_name = "REGEX")] + new_type_alias_deref: Vec, + /// The default STYLE of code used to generate unions with non-Copy members. Note that ManuallyDrop was first stabilized in Rust 1.20.0. + #[arg(long, value_name = "STYLE")] + default_non_copy_union_style: Option, + /// Mark any union whose name matches REGEX and who has a non-Copy member to use a bindgen-generated wrapper for fields. + #[arg(long, value_name = "REGEX")] + bindgen_wrapper_union: Vec, + /// Mark any union whose name matches REGEX and who has a non-Copy member to use ManuallyDrop (stabilized in Rust 1.20.0) for fields. + #[arg(long, value_name = "REGEX")] + manually_drop_union: Vec, + /// Mark TYPE as hidden. + #[arg(long, value_name = "TYPE")] + blocklist_type: Vec, + /// Mark FUNCTION as hidden. + #[arg(long, value_name = "FUNCTION")] + blocklist_function: Vec, + /// Mark ITEM as hidden. + #[arg(long, value_name = "ITEM")] + blocklist_item: Vec, + /// Mark FILE as hidden. + #[arg(long, value_name = "FILE")] + blocklist_file: Vec, + /// Mark VAR as hidden. + #[arg(long, value_name = "VAR")] + blocklist_var: Vec, + /// Avoid generating layout tests for any type. + #[arg(long)] + no_layout_tests: bool, + /// Avoid deriving Copy on any type. + #[arg(long)] + no_derive_copy: bool, + /// Avoid deriving Debug on any type. + #[arg(long)] + no_derive_debug: bool, + /// Avoid deriving Default on any type. + #[arg(long, hide = true)] + no_derive_default: bool, + /// Create a Debug implementation if it cannot be derived automatically. + #[arg(long)] + impl_debug: bool, + /// Create a PartialEq implementation if it cannot be derived automatically. + #[arg(long)] + impl_partialeq: bool, + /// Derive Default on any type. + #[arg(long)] + with_derive_default: bool, + /// Derive Hash on any type. + #[arg(long)] + with_derive_hash: bool, + /// Derive PartialEq on any type. + #[arg(long)] + with_derive_partialeq: bool, + /// Derive PartialOrd on any type. + #[arg(long)] + with_derive_partialord: bool, + /// Derive Eq on any type. + #[arg(long)] + with_derive_eq: bool, + /// Derive Ord on any type. + #[arg(long)] + with_derive_ord: bool, + /// Avoid including doc comments in the output, see: + #[arg(long)] + no_doc_comments: bool, + /// Disable allowlisting types recursively. This will cause bindgen to emit Rust code that won't compile! See the `bindgen::Builder::allowlist_recursively` method's documentation for details. + #[arg(long)] + no_recursive_allowlist: bool, + /// Use extern crate instead of use for objc. + #[arg(long)] + objc_extern_crate: bool, + /// Generate block signatures instead of void pointers. + #[arg(long)] + generate_block: bool, + /// Generate string constants as `&CStr` instead of `&[u8]`. + #[arg(long)] + generate_cstr: bool, + /// Use extern crate instead of use for block. + #[arg(long)] + block_extern_crate: bool, + /// Do not trust the libclang-provided mangling + #[arg(long)] + distrust_clang_mangling: bool, + /// Output bindings for builtin definitions, e.g. __builtin_va_list. + #[arg(long)] + builtins: bool, + /// Use the given PREFIX before raw types instead of ::std::os::raw. + #[arg(long, value_name = "PREFIX")] + ctypes_prefix: Option, + /// Use the given PREFIX for anonymous fields. + #[arg(long, value_name = "PREFIX")] + anon_fields_prefix: Option, + /// Time the different bindgen phases and print to stderr + #[arg(long)] + time_phases: bool, + /// Output the Clang AST for debugging purposes. + #[arg(long)] + emit_clang_ast: bool, + /// Output our internal IR for debugging purposes. + #[arg(long)] + emit_ir: bool, + /// Dump a graphviz dot file to PATH. + #[arg(long, value_name = "PATH")] + emit_ir_graphviz: Option, + /// Enable support for C++ namespaces. + #[arg(long)] + enable_cxx_namespaces: bool, + /// Disable namespacing via mangling, causing bindgen to generate names like `Baz` instead of `foo_bar_Baz` for an input name `foo::bar::Baz`. + #[arg(long)] + disable_name_namespacing: bool, + /// Disable nested struct naming, causing bindgen to generate names like `bar` instead of `foo_bar` for a nested definition `struct foo { struct bar { } b; };`. + #[arg(long)] + disable_nested_struct_naming: bool, + /// Disable support for native Rust unions. + #[arg(long)] + disable_untagged_union: bool, + /// Suppress insertion of bindgen's version identifier into generated bindings. + #[arg(long)] + disable_header_comment: bool, + /// Do not generate bindings for functions or methods. This is useful when you only care about struct layouts. + #[arg(long)] + ignore_functions: bool, + /// Generate only given items, split by commas. Valid values are `functions`,`types`, `vars`, `methods`, `constructors` and `destructors`. + #[arg(long, value_parser = parse_codegen_config)] + generate: Option, + /// Do not generate bindings for methods. + #[arg(long)] + ignore_methods: bool, + /// Do not automatically convert floats to f32/f64. + #[arg(long)] + no_convert_floats: bool, + /// Do not prepend the enum name to constant or newtype variants. + #[arg(long)] + no_prepend_enum_name: bool, + /// Do not try to detect default include paths + #[arg(long)] + no_include_path_detection: bool, + /// Try to fit macro constants into types smaller than u32/i32 + #[arg(long)] + fit_macro_constant_types: bool, + /// Mark TYPE as opaque. + #[arg(long, value_name = "TYPE")] + opaque_type: Vec, + /// Write Rust bindings to OUTPUT. + #[arg(long, short, value_name = "OUTPUT")] + output: Option, + /// Add a raw line of Rust code at the beginning of output. + #[arg(long)] + raw_line: Vec, + /// Add a RAW_LINE of Rust code to a given module with name MODULE_NAME. + #[arg(long, number_of_values = 2, value_names = ["MODULE_NAME", "RAW_LINE"])] + module_raw_line: Vec, + #[arg(long, help = rust_target_help())] + rust_target: Option, + #[arg(long, value_name = "EDITION", help = rust_edition_help())] + rust_edition: Option, + /// Use types from Rust core instead of std. + #[arg(long)] + use_core: bool, + /// Conservatively generate inline namespaces to avoid name conflicts. + #[arg(long)] + conservative_inline_namespaces: bool, + /// Allowlist all the free-standing functions matching REGEX. Other non-allowlisted functions will not be generated. + #[arg(long, value_name = "REGEX")] + allowlist_function: Vec, + /// Generate inline functions. + #[arg(long)] + generate_inline_functions: bool, + /// Only generate types matching REGEX. Other non-allowlisted types will not be generated. + #[arg(long, value_name = "REGEX")] + allowlist_type: Vec, + /// Allowlist all the free-standing variables matching REGEX. Other non-allowlisted variables will not be generated. + #[arg(long, value_name = "REGEX")] + allowlist_var: Vec, + /// Allowlist all contents of PATH. + #[arg(long, value_name = "PATH")] + allowlist_file: Vec, + /// Allowlist all items matching REGEX. Other non-allowlisted items will not be generated. + #[arg(long, value_name = "REGEX")] + allowlist_item: Vec, + /// Print verbose error messages. + #[arg(long)] + verbose: bool, + /// Preprocess and dump the input header files to disk. Useful when debugging bindgen, using C-Reduce, or when filing issues. The resulting file will be named something like `__bindgen.i` or `__bindgen.ii`. + #[arg(long)] + dump_preprocessed_input: bool, + /// Do not record matching items in the regex sets. This disables reporting of unused items. + #[arg(long)] + no_record_matches: bool, + /// Do not bind size_t as usize (useful on platforms where those types are incompatible). + #[arg(long = "no-size_t-is-usize")] + no_size_t_is_usize: bool, + /// Do not format the generated bindings with rustfmt. This option is deprecated, please use + /// `--formatter=none` instead. + #[arg(long)] + no_rustfmt_bindings: bool, + /// Which FORMATTER should be used for the bindings + #[arg( + long, + value_name = "FORMATTER", + conflicts_with = "no_rustfmt_bindings" + )] + formatter: Option, + /// The absolute PATH to the rustfmt configuration file. The configuration file will be used for formatting the bindings. This parameter sets `formatter` to `rustfmt`. + #[arg(long, value_name = "PATH", conflicts_with = "no_rustfmt_bindings", value_parser=parse_rustfmt_config_path)] + rustfmt_configuration_file: Option, + /// Avoid deriving PartialEq for types matching REGEX. + #[arg(long, value_name = "REGEX")] + no_partialeq: Vec, + /// Avoid deriving Copy and Clone for types matching REGEX. + #[arg(long, value_name = "REGEX")] + no_copy: Vec, + /// Avoid deriving Debug for types matching REGEX. + #[arg(long, value_name = "REGEX")] + no_debug: Vec, + /// Avoid deriving/implementing Default for types matching REGEX. + #[arg(long, value_name = "REGEX")] + no_default: Vec, + /// Avoid deriving Hash for types matching REGEX. + #[arg(long, value_name = "REGEX")] + no_hash: Vec, + /// Add `#[must_use]` annotation to types matching REGEX. + #[arg(long, value_name = "REGEX")] + must_use_type: Vec, + /// Enables detecting unexposed attributes in functions (slow). Used to generate `#[must_use]` annotations. + #[arg(long)] + enable_function_attribute_detection: bool, + /// Use `*const [T; size]` instead of `*const T` for C arrays + #[arg(long)] + use_array_pointers_in_arguments: bool, + /// The NAME to be used in a #[link(wasm_import_module = ...)] statement + #[arg(long, value_name = "NAME")] + wasm_import_module_name: Option, + /// Use dynamic loading mode with the given library NAME. + #[arg(long, value_name = "NAME")] + dynamic_loading: Option, + /// Require successful linkage to all functions in the library. + #[arg(long)] + dynamic_link_require_all: bool, + /// Prefix the name of exported symbols. + #[arg(long)] + prefix_link_name: Option, + /// Makes generated bindings `pub` only for items if the items are publicly accessible in C++. + #[arg(long)] + respect_cxx_access_specs: bool, + /// Always translate enum integer types to native Rust integer types. + #[arg(long)] + translate_enum_integer_types: bool, + /// Generate types with C style naming. + #[arg(long)] + c_naming: bool, + /// Always output explicit padding fields. + #[arg(long)] + explicit_padding: bool, + /// Always be specific about the 'receiver' of a virtual function. + #[arg(long)] + use_specific_virtual_function_receiver: bool, + /// Use distinct char16_t + #[arg(long)] + use_distinct_char16_t: bool, + /// Output C++ overloaded operators + #[arg(long)] + represent_cxx_operators: bool, + /// Enables generation of vtable functions. + #[arg(long)] + vtable_generation: bool, + /// Enables sorting of code generation in a predefined manner. + #[arg(long)] + sort_semantically: bool, + /// Deduplicates extern blocks. + #[arg(long)] + merge_extern_blocks: bool, + /// Overrides the ABI of functions matching REGEX. The OVERRIDE value must be of the shape REGEX=ABI where ABI can be one of C, stdcall, efiapi, fastcall, thiscall, aapcs, win64 or C-unwind<.> + #[arg(long, value_name = "OVERRIDE", value_parser = parse_abi_override)] + override_abi: Vec<(Abi, String)>, + /// Wrap unsafe operations in unsafe blocks. + #[arg(long)] + wrap_unsafe_ops: bool, + /// Enable fallback for clang macro parsing. + #[arg(long)] + clang_macro_fallback: bool, + /// Set path for temporary files generated by fallback for clang macro parsing. + #[arg(long)] + clang_macro_fallback_build_dir: Option, + /// Use DSTs to represent structures with flexible array members. + #[arg(long)] + flexarray_dst: bool, + /// Derive custom traits on any kind of type. The CUSTOM value must be of the shape REGEX=DERIVE where DERIVE is a coma-separated list of derive macros. + #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_derive)] + with_derive_custom: Vec<(Vec, String)>, + /// Derive custom traits on a `struct`. The CUSTOM value must be of the shape REGEX=DERIVE where DERIVE is a coma-separated list of derive macros. + #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_derive)] + with_derive_custom_struct: Vec<(Vec, String)>, + /// Derive custom traits on an `enum`. The CUSTOM value must be of the shape REGEX=DERIVE where DERIVE is a coma-separated list of derive macros. + #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_derive)] + with_derive_custom_enum: Vec<(Vec, String)>, + /// Derive custom traits on a `union`. The CUSTOM value must be of the shape REGEX=DERIVE where DERIVE is a coma-separated list of derive macros. + #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_derive)] + with_derive_custom_union: Vec<(Vec, String)>, + /// Add custom attributes on any kind of type. The CUSTOM value must be of the shape REGEX=ATTRIBUTE where ATTRIBUTE is a coma-separated list of attributes. + #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_attribute)] + with_attribute_custom: Vec<(Vec, String)>, + /// Add custom attributes on a `struct`. The CUSTOM value must be of the shape REGEX=ATTRIBUTE where ATTRIBUTE is a coma-separated list of attributes. + #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_attribute)] + with_attribute_custom_struct: Vec<(Vec, String)>, + /// Add custom attributes on an `enum`. The CUSTOM value must be of the shape REGEX=ATTRIBUTE where ATTRIBUTE is a coma-separated list of attributes. + #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_attribute)] + with_attribute_custom_enum: Vec<(Vec, String)>, + /// Add custom attributes on a `union`. The CUSTOM value must be of the shape REGEX=ATTRIBUTE where ATTRIBUTE is a coma-separated list of attributes. + #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_attribute)] + with_attribute_custom_union: Vec<(Vec, String)>, + /// Generate wrappers for `static` and `static inline` functions. + #[arg(long)] + wrap_static_fns: bool, + /// Sets the PATH for the source file that must be created due to the presence of `static` and + /// `static inline` functions. + #[arg(long, value_name = "PATH")] + wrap_static_fns_path: Option, + /// Sets the SUFFIX added to the extern wrapper functions generated for `static` and `static + /// inline` functions. + #[arg(long, value_name = "SUFFIX")] + wrap_static_fns_suffix: Option, + /// Set the default VISIBILITY of fields, including bitfields and accessor methods for + /// bitfields. This flag is ignored if the `--respect-cxx-access-specs` flag is used. + #[arg(long, value_name = "VISIBILITY")] + default_visibility: Option, + /// Whether to generate C++ functions marked with "=delete" even though they + /// can't be called. + #[arg(long)] + generate_deleted_functions: bool, + /// Whether to generate C++ "pure virtual" functions even though they can't + /// be called. + #[arg(long)] + generate_pure_virtual_functions: bool, + /// Whether to generate C++ private functions even though they can't + /// be called. + #[arg(long)] + generate_private_functions: bool, + /// Whether to emit diagnostics or not. + #[cfg(feature = "experimental")] + #[arg(long, requires = "experimental")] + emit_diagnostics: bool, + /// Generates completions for the specified SHELL, sends them to `stdout` and exits. + #[arg(long, value_name = "SHELL")] + generate_shell_completions: Option, + /// Enables experimental features. + #[arg(long)] + experimental: bool, + /// Prints the version, and exits + #[arg(short = 'V', long)] + version: bool, + /// Arguments to be passed straight through to clang. + clang_args: Vec, +} + +/// Construct a new [`Builder`](./struct.Builder.html) from command line flags. +pub fn builder_from_flags( + args: I, +) -> Result<(Builder, Box, bool), io::Error> +where + I: Iterator, +{ + let command = BindgenCommand::parse_from(args); + + let BindgenCommand { + header, + depfile, + default_enum_style, + bitfield_enum, + newtype_enum, + newtype_global_enum, + rustified_enum, + rustified_non_exhaustive_enum, + constified_enum, + constified_enum_module, + default_macro_constant_type, + default_alias_style, + normal_alias, + new_type_alias, + new_type_alias_deref, + default_non_copy_union_style, + bindgen_wrapper_union, + manually_drop_union, + blocklist_type, + blocklist_function, + blocklist_item, + blocklist_file, + blocklist_var, + no_layout_tests, + no_derive_copy, + no_derive_debug, + no_derive_default, + impl_debug, + impl_partialeq, + with_derive_default, + with_derive_hash, + with_derive_partialeq, + with_derive_partialord, + with_derive_eq, + with_derive_ord, + no_doc_comments, + no_recursive_allowlist, + objc_extern_crate, + generate_block, + generate_cstr, + block_extern_crate, + distrust_clang_mangling, + builtins, + ctypes_prefix, + anon_fields_prefix, + time_phases, + emit_clang_ast, + emit_ir, + emit_ir_graphviz, + enable_cxx_namespaces, + disable_name_namespacing, + disable_nested_struct_naming, + disable_untagged_union, + disable_header_comment, + ignore_functions, + generate, + ignore_methods, + no_convert_floats, + no_prepend_enum_name, + no_include_path_detection, + fit_macro_constant_types, + opaque_type, + output, + raw_line, + module_raw_line, + rust_target, + rust_edition, + use_core, + conservative_inline_namespaces, + allowlist_function, + generate_inline_functions, + allowlist_type, + allowlist_var, + allowlist_file, + allowlist_item, + verbose, + dump_preprocessed_input, + no_record_matches, + no_size_t_is_usize, + no_rustfmt_bindings, + formatter, + rustfmt_configuration_file, + no_partialeq, + no_copy, + no_debug, + no_default, + no_hash, + must_use_type, + enable_function_attribute_detection, + use_array_pointers_in_arguments, + wasm_import_module_name, + dynamic_loading, + dynamic_link_require_all, + prefix_link_name, + respect_cxx_access_specs, + translate_enum_integer_types, + c_naming, + explicit_padding, + use_specific_virtual_function_receiver, + use_distinct_char16_t, + represent_cxx_operators, + vtable_generation, + sort_semantically, + merge_extern_blocks, + override_abi, + wrap_unsafe_ops, + clang_macro_fallback, + clang_macro_fallback_build_dir, + flexarray_dst, + with_derive_custom, + with_derive_custom_struct, + with_derive_custom_enum, + with_derive_custom_union, + with_attribute_custom, + with_attribute_custom_struct, + with_attribute_custom_enum, + with_attribute_custom_union, + wrap_static_fns, + wrap_static_fns_path, + wrap_static_fns_suffix, + default_visibility, + generate_deleted_functions, + generate_pure_virtual_functions, + generate_private_functions, + #[cfg(feature = "experimental")] + emit_diagnostics, + generate_shell_completions, + experimental: _, + version, + clang_args, + } = command; + + if let Some(shell) = generate_shell_completions { + clap_complete::generate( + shell, + &mut BindgenCommand::command(), + "bindgen", + &mut io::stdout(), + ); + + exit(0) + } + + if version { + println!( + "bindgen {}", + option_env!("CARGO_PKG_VERSION").unwrap_or("unknown") + ); + if verbose { + println!("Clang: {}", crate::clang_version().full); + } + + exit(0) + } + + if header.is_none() { + return Err(io::Error::new(io::ErrorKind::Other, "Header not found")); + } + + let mut builder = builder(); + + #[derive(Debug)] + struct PrefixLinkNameCallback { + prefix: String, + } + + impl ParseCallbacks for PrefixLinkNameCallback { + fn generated_link_name_override( + &self, + item_info: ItemInfo<'_>, + ) -> Option { + let mut prefix = self.prefix.clone(); + prefix.push_str(item_info.name); + Some(prefix) + } + } + + #[derive(Debug)] + struct CustomDeriveCallback { + derives: Vec, + kind: Option, + regex_set: RegexSet, + } + + impl ParseCallbacks for CustomDeriveCallback { + fn cli_args(&self) -> Vec { + let mut args = vec![]; + + let flag = match &self.kind { + None => "--with-derive-custom", + Some(TypeKind::Struct) => "--with-derive-custom-struct", + Some(TypeKind::Enum) => "--with-derive-custom-enum", + Some(TypeKind::Union) => "--with-derive-custom-union", + }; + + let derives = self.derives.join(","); + + for item in self.regex_set.get_items() { + args.extend_from_slice(&[ + flag.to_owned(), + format!("{item}={derives}"), + ]); + } + + args + } + + fn add_derives(&self, info: &DeriveInfo<'_>) -> Vec { + if self.kind.map_or(true, |kind| kind == info.kind) && + self.regex_set.matches(info.name) + { + return self.derives.clone(); + } + vec![] + } + } + + #[derive(Debug)] + struct CustomAttributeCallback { + attributes: Vec, + kind: Option, + regex_set: RegexSet, + } + + impl ParseCallbacks for CustomAttributeCallback { + fn cli_args(&self) -> Vec { + let mut args = vec![]; + + let flag = match &self.kind { + None => "--with-attribute-custom", + Some(TypeKind::Struct) => "--with-attribute-custom-struct", + Some(TypeKind::Enum) => "--with-attribute-custom-enum", + Some(TypeKind::Union) => "--with-attribute-custom-union", + }; + + let attributes = self.attributes.join(","); + + for item in self.regex_set.get_items() { + args.extend_from_slice(&[ + flag.to_owned(), + format!("{item}={attributes}"), + ]); + } + + args + } + + fn add_attributes(&self, info: &AttributeInfo<'_>) -> Vec { + if self.kind.map_or(true, |kind| kind == info.kind) && + self.regex_set.matches(info.name) + { + return self.attributes.clone(); + } + vec![] + } + } + + /// Macro used to apply CLI arguments to a builder. + /// + /// This is done by passing an identifier for each argument and a function to be applied over + /// the builder. For example: + /// ```rust,ignore + /// fn apply_arg(builder: Builder, arg_value: Value) -> Builder { + /// todo!() + /// } + /// + /// apply_args!( + /// builder { + /// arg => apply_arg, + /// } + /// ); + /// ``` + /// + /// If the identifier of the argument is the same as an already existing builder method then + /// you can omit the second part: + /// ```rust,ignore + /// apply_args!( + /// builder { + /// arg + /// } + /// ); + /// ``` + /// Which expands to the same code as: + /// ```rust,ignore + /// apply_args!( + /// builder { + /// arg => Builder::arg, + /// } + /// ); + /// ``` + macro_rules! apply_args { + ($builder:ident {}) => { $builder }; + ($builder:ident {$arg:ident => $function:expr, $($token:tt)*}) => { + { + $builder = CliArg::apply($arg, $builder, $function); + apply_args!($builder {$($token)*}) + } + }; + ($builder:ident {$arg:ident, $($token:tt)*}) => { + { + $builder = CliArg::apply($arg, $builder, Builder::$arg); + apply_args!($builder {$($token)*}) + } + } + } + + builder = apply_args!( + builder { + header, + rust_target, + rust_edition, + default_enum_style, + bitfield_enum, + newtype_enum, + newtype_global_enum, + rustified_enum, + rustified_non_exhaustive_enum, + constified_enum, + constified_enum_module, + default_macro_constant_type, + default_alias_style, + normal_alias => Builder::type_alias, + new_type_alias, + new_type_alias_deref, + default_non_copy_union_style, + bindgen_wrapper_union, + manually_drop_union, + blocklist_type, + blocklist_function, + blocklist_item, + blocklist_file, + blocklist_var, + builtins => |b, _| b.emit_builtins(), + no_layout_tests => |b, _| b.layout_tests(false), + no_derive_copy => |b, _| b.derive_copy(false), + no_derive_debug => |b, _| b.derive_debug(false), + impl_debug, + impl_partialeq, + with_derive_default => Builder::derive_default, + with_derive_hash => Builder::derive_hash, + with_derive_partialeq => Builder::derive_partialeq, + with_derive_partialord => Builder::derive_partialord, + with_derive_eq => Builder::derive_eq, + with_derive_ord => Builder::derive_ord, + no_derive_default => |b, _| b.derive_default(false), + no_prepend_enum_name => |b, _| b.prepend_enum_name(false), + no_include_path_detection => |b, _| b.detect_include_paths(false), + fit_macro_constant_types => Builder::fit_macro_constants, + time_phases, + use_array_pointers_in_arguments => Builder::array_pointers_in_arguments, + wasm_import_module_name, + ctypes_prefix, + anon_fields_prefix, + generate => Builder::with_codegen_config, + emit_clang_ast => |b, _| b.emit_clang_ast(), + emit_ir => |b, _| b.emit_ir(), + emit_ir_graphviz, + enable_cxx_namespaces => |b, _| b.enable_cxx_namespaces(), + enable_function_attribute_detection => |b, _| b.enable_function_attribute_detection(), + disable_name_namespacing => |b, _| b.disable_name_namespacing(), + disable_nested_struct_naming => |b, _| b.disable_nested_struct_naming(), + disable_untagged_union => |b, _| b.disable_untagged_union(), + disable_header_comment => |b, _| b.disable_header_comment(), + ignore_functions => |b, _| b.ignore_functions(), + ignore_methods => |b, _| b.ignore_methods(), + no_convert_floats => |b, _| b.no_convert_floats(), + no_doc_comments => |b, _| b.generate_comments(false), + no_recursive_allowlist => |b, _| b.allowlist_recursively(false), + objc_extern_crate, + generate_block, + generate_cstr, + block_extern_crate, + opaque_type, + raw_line, + use_core => |b, _| b.use_core(), + distrust_clang_mangling => |b, _| b.trust_clang_mangling(false), + conservative_inline_namespaces => |b, _| b.conservative_inline_namespaces(), + generate_inline_functions, + allowlist_function, + allowlist_type, + allowlist_var, + allowlist_file, + allowlist_item, + clang_args => Builder::clang_arg, + no_record_matches => |b, _| b.record_matches(false), + no_size_t_is_usize => |b, _| b.size_t_is_usize(false), + no_rustfmt_bindings => |b, _| b.formatter(Formatter::None), + formatter, + no_partialeq, + no_copy, + no_debug, + no_default, + no_hash, + must_use_type, + dynamic_loading => Builder::dynamic_library_name, + dynamic_link_require_all, + prefix_link_name => |b, prefix| b.parse_callbacks(Box::new(PrefixLinkNameCallback { prefix })), + respect_cxx_access_specs, + translate_enum_integer_types, + c_naming, + explicit_padding, + use_specific_virtual_function_receiver, + use_distinct_char16_t, + represent_cxx_operators, + vtable_generation, + sort_semantically, + merge_extern_blocks, + override_abi => |b, (abi, regex)| b.override_abi(abi, regex), + wrap_unsafe_ops, + clang_macro_fallback => |b, _| b.clang_macro_fallback(), + clang_macro_fallback_build_dir, + flexarray_dst, + wrap_static_fns, + wrap_static_fns_path, + wrap_static_fns_suffix, + default_visibility, + generate_deleted_functions, + generate_pure_virtual_functions, + generate_private_functions, + } + ); + + let mut values = module_raw_line.into_iter(); + while let Some(module) = values.next() { + let line = values.next().unwrap(); + builder = builder.module_raw_line(module, line); + } + + let output = if let Some(path) = &output { + let file = File::create(path)?; + if let Some(depfile) = depfile { + builder = builder.depfile(path, depfile); + } + Box::new(io::BufWriter::new(file)) as Box + } else { + if let Some(depfile) = depfile { + builder = builder.depfile("-", depfile); + } + Box::new(io::BufWriter::new(io::stdout())) as Box + }; + + if dump_preprocessed_input { + builder.dump_preprocessed_input()?; + } + + if let Some(path) = rustfmt_configuration_file { + builder = builder.rustfmt_configuration_file(Some(path)); + } + + for (custom_derives, kind, _name) in [ + (with_derive_custom, None, "--with-derive-custom"), + ( + with_derive_custom_struct, + Some(TypeKind::Struct), + "--with-derive-custom-struct", + ), + ( + with_derive_custom_enum, + Some(TypeKind::Enum), + "--with-derive-custom-enum", + ), + ( + with_derive_custom_union, + Some(TypeKind::Union), + "--with-derive-custom-union", + ), + ] { + #[cfg(feature = "experimental")] + let name = emit_diagnostics.then_some(_name); + + for (derives, regex) in custom_derives { + let mut regex_set = RegexSet::default(); + regex_set.insert(regex); + + #[cfg(feature = "experimental")] + regex_set.build_with_diagnostics(false, name); + #[cfg(not(feature = "experimental"))] + regex_set.build(false); + + builder = builder.parse_callbacks(Box::new(CustomDeriveCallback { + derives, + kind, + regex_set, + })); + } + } + + for (custom_attributes, kind, _name) in [ + (with_attribute_custom, None, "--with-attribute-custom"), + ( + with_attribute_custom_struct, + Some(TypeKind::Struct), + "--with-attribute-custom-struct", + ), + ( + with_attribute_custom_enum, + Some(TypeKind::Enum), + "--with-attribute-custom-enum", + ), + ( + with_attribute_custom_union, + Some(TypeKind::Union), + "--with-attribute-custom-union", + ), + ] { + #[cfg(feature = "experimental")] + let name = emit_diagnostics.then_some(_name); + + for (attributes, regex) in custom_attributes { + let mut regex_set = RegexSet::default(); + regex_set.insert(regex); + + #[cfg(feature = "experimental")] + regex_set.build_with_diagnostics(false, name); + #[cfg(not(feature = "experimental"))] + regex_set.build(false); + + builder = + builder.parse_callbacks(Box::new(CustomAttributeCallback { + attributes, + kind, + regex_set, + })); + } + } + + #[cfg(feature = "experimental")] + if emit_diagnostics { + builder = builder.emit_diagnostics(); + } + + Ok((builder, output, verbose)) +} + +/// Trait for CLI arguments that can be applied to a [`Builder`]. +trait CliArg { + /// The value of this argument. + type Value; + + /// Apply the current argument to the passed [`Builder`]. + fn apply( + self, + builder: Builder, + f: impl Fn(Builder, Self::Value) -> Builder, + ) -> Builder; +} + +/// Boolean arguments are applied when they evaluate to `true`. +impl CliArg for bool { + type Value = bool; + + fn apply( + self, + mut builder: Builder, + f: impl Fn(Builder, Self::Value) -> Builder, + ) -> Builder { + if self { + builder = f(builder, self); + } + + builder + } +} + +/// Optional arguments are applied when they are `Some`. +impl CliArg for Option { + type Value = T; + + fn apply( + self, + mut builder: Builder, + f: impl Fn(Builder, Self::Value) -> Builder, + ) -> Builder { + if let Some(value) = self { + builder = f(builder, value); + } + + builder + } +} + +/// Multiple valued arguments are applied once for each value. +impl CliArg for Vec { + type Value = T; + + fn apply( + self, + mut builder: Builder, + f: impl Fn(Builder, Self::Value) -> Builder, + ) -> Builder { + for value in self { + builder = f(builder, value); + } + + builder + } +} diff --git a/vendor/bindgen/options/helpers.rs b/vendor/bindgen/options/helpers.rs new file mode 100644 index 00000000000000..1816c72b572b04 --- /dev/null +++ b/vendor/bindgen/options/helpers.rs @@ -0,0 +1,43 @@ +/// Helper function that appends extra documentation to [`crate::Builder`] methods that support regular +/// expressions in their input. +macro_rules! regex_option { + ($(#[$attrs:meta])* pub fn $($tokens:tt)*) => { + $(#[$attrs])* + /// + /// Regular expressions are supported. Check the [regular expression + /// arguments](./struct.Builder.html#regular-expression-arguments) section and the + /// [regex](https://docs.rs/regex) crate documentation for further information. + pub fn $($tokens)* + }; +} + +/// Helper macro to set the default value of each option. +/// +/// This macro is an internal implementation detail of the `options` macro and should not be used +/// directly. +macro_rules! default { + () => { + Default::default() + }; + ($expr:expr) => { + $expr + }; +} + +/// Helper macro to set the conversion to CLI arguments for each option. +/// +/// This macro is an internal implementation detail of the `options` macro and should not be used +/// directly. +macro_rules! as_args { + ($flag:literal) => { + |field, args| AsArgs::as_args(field, args, $flag) + }; + ($expr:expr) => { + $expr + }; +} + +/// Helper function to ignore an option when converting it into CLI arguments. +/// +/// This function is only used inside `options` and should not be used in other contexts. +pub(super) fn ignore(_: &T, _: &mut Vec) {} diff --git a/vendor/bindgen/options/mod.rs b/vendor/bindgen/options/mod.rs new file mode 100644 index 00000000000000..c9ef7c8b490da7 --- /dev/null +++ b/vendor/bindgen/options/mod.rs @@ -0,0 +1,2286 @@ +//! Declarations and setter methods for `bindgen` options. +//! +//! The main entry point of this module is the `options` macro. +#[macro_use] +mod helpers; +mod as_args; +#[cfg(feature = "__cli")] +pub(crate) mod cli; + +use crate::callbacks::ParseCallbacks; +use crate::codegen::{ + AliasVariation, EnumVariation, MacroTypeVariation, NonCopyUnionStyle, +}; +use crate::deps::DepfileSpec; +use crate::features::{RustEdition, RustFeatures, RustTarget}; +use crate::regex_set::RegexSet; +use crate::Abi; +use crate::Builder; +use crate::CodegenConfig; +use crate::FieldVisibilityKind; +use crate::Formatter; +use crate::HashMap; +use crate::DEFAULT_ANON_FIELDS_PREFIX; + +use std::env; +use std::path::{Path, PathBuf}; +use std::rc::Rc; + +use as_args::AsArgs; +use helpers::ignore; + +/// Macro used to generate the [`BindgenOptions`] type and the [`Builder`] setter methods for each +/// one of the fields of `BindgenOptions`. +/// +/// The input format of this macro resembles a `struct` pattern. Each field of the `BindgenOptions` +/// type is declared by adding the name of the field and its type using the `name: type` syntax and +/// a block of code with the following items: +/// +/// - `default`: The default value for the field. If this item is omitted, `Default::default()` is +/// used instead, meaning that the type of the field must implement `Default`. +/// - `methods`: A block of code containing methods for the `Builder` type. These methods should be +/// related to the field being declared. +/// - `as_args`: This item declares how the field should be converted into a valid CLI argument for +/// `bindgen` and is used in the [`Builder::command_line_flags`] method which is used to do a +/// roundtrip test of the CLI args in the `bindgen-test` crate. This item can take one of the +/// following: +/// - A string literal with the flag if the type of the field implements the [`AsArgs`] trait. +/// - A closure with the signature `|field, args: &mut Vec| -> ()` that pushes arguments +/// into the `args` buffer based on the value of the field. This is used if the field does not +/// implement `AsArgs` or if the implementation of the trait is not logically correct for the +/// option and a custom behavior must be taken into account. +/// - The `ignore` literal, which does not emit any CLI arguments for this field. This is useful +/// if the field cannot be used from the `bindgen` CLI. +/// +/// As an example, this would be the declaration of a `bool` field called `be_fun` whose default +/// value is `false` (the `Default` value for `bool`): +/// ```rust,ignore +/// be_fun: bool { +/// methods: { +/// /// Ask `bindgen` to be fun. This option is disabled by default. +/// fn be_fun(mut self) -> Self { +/// self.options.be_fun = true; +/// self +/// } +/// }, +/// as_args: "--be-fun", +/// } +/// ``` +/// +/// However, we could also set the `be_fun` field to `true` by default and use a `--not-fun` flag +/// instead. This means that we have to add the `default` item and use a closure in the `as_args` +/// item: +/// ```rust,ignore +/// be_fun: bool { +/// default: true, +/// methods: { +/// /// Ask `bindgen` to not be fun. `bindgen` is fun by default. +/// fn not_fun(mut self) -> Self { +/// self.options.be_fun = false; +/// self +/// } +/// }, +/// as_args: |be_fun, args| (!be_fun).as_args(args, "--not-fun"), +/// } +/// ``` +/// More complex examples can be found in the sole invocation of this macro. +macro_rules! options { + ($( + $(#[doc = $docs:literal])+ + $field:ident: $ty:ty { + $(default: $default:expr,)? + methods: {$($methods_tokens:tt)*}$(,)? + as_args: $as_args:expr$(,)? + }$(,)? + )*) => { + #[derive(Debug, Clone)] + pub(crate) struct BindgenOptions { + $($(#[doc = $docs])* pub(crate) $field: $ty,)* + } + + impl Default for BindgenOptions { + fn default() -> Self { + Self { + $($field: default!($($default)*),)* + } + } + } + + impl Builder { + /// Generates the command line flags used to create this [`Builder`]. + pub fn command_line_flags(&self) -> Vec { + let mut args = vec![]; + + let headers = match self.options.input_headers.split_last() { + Some((header, headers)) => { + // The last input header is passed as an argument in the first position. + args.push(header.clone().into()); + headers + }, + None => &[] + }; + + $({ + let func: fn(&$ty, &mut Vec) = as_args!($as_args); + func(&self.options.$field, &mut args); + })* + + // Add the `--experimental` flag if `bindgen` is built with the `experimental` + // feature. + if cfg!(feature = "experimental") { + args.push("--experimental".to_owned()); + } + + // Add all the clang arguments. + args.push("--".to_owned()); + + if !self.options.clang_args.is_empty() { + args.extend(self.options.clang_args.iter().map(|s| s.clone().into())); + } + + // We need to pass all but the last header via the `-include` clang argument. + for header in headers { + args.push("-include".to_owned()); + args.push(header.clone().into()); + } + + args + } + + $($($methods_tokens)*)* + } + }; +} + +options! { + /// Whether to specify the type of a virtual function receiver + use_specific_virtual_function_receiver: bool { + methods: { + /// Normally, virtual functions have void* as their 'this' type. + /// If this flag is enabled, override that behavior to indicate a + /// pointer of the specific type. + /// Disabled by default. + pub fn use_specific_virtual_function_receiver(mut self, doit: bool) -> Builder { + self.options.use_specific_virtual_function_receiver = doit; + self + } + }, + as_args: "--use-specific-virtual-function-receiver", + }, + + /// Whether we should distinguish between C++'s 'char16_t' and 'u16'. + /// The C++ type `char16_t` is its own special type; it's not a typedef + /// of some other integer (this differs from C). + /// As standard, bindgen represents C++ `char16_t` as `u16`. + /// Rust does not have a `std::os::raw::c_char16_t` type, and thus + /// we can't use a built-in Rust type in the generated bindings (and + /// nor would it be appropriate as it's a C++-specific type.) + /// But for some uses of bindgen, especially when downstream + /// post-processing occurs, it's important to distinguish `char16_t` + /// from normal `uint16_t`. When this option is enabled, bindgen + /// generates a fake type called `bindgen_cchar16_t`. Downstream + /// code post-processors should arrange to replace this with a + /// real type. + use_distinct_char16_t: bool { + methods: { + /// If this is true, denote 'char16_t' as a separate type from 'u16' + /// Disabled by default. + pub fn use_distinct_char16_t(mut self, doit: bool) -> Builder { + self.options.use_distinct_char16_t = doit; + self + } + }, + as_args: "--use-distinct-char16-t", + }, + /// Whether we should output C++ overloaded operators. By itself, + /// this option is not sufficient to produce valid output, because + /// such operators will have names that are not acceptable Rust + /// names (for example `operator=`). If you use this option, you'll also + /// have to rename the resulting functions - for example by using + /// [`ParseCallbacks::generated_name_override`]. + represent_cxx_operators: bool { + methods: { + /// If this is true, output existence of C++ overloaded operators. + /// At present, only operator= is noted. + /// Disabled by default. + pub fn represent_cxx_operators(mut self, doit: bool) -> Builder { + self.options.represent_cxx_operators = doit; + self + } + }, + as_args: "--represent-cxx-operators", + }, + + /// Types that have been blocklisted and should not appear anywhere in the generated code. + blocklisted_types: RegexSet { + methods: { + regex_option! { + /// Do not generate any bindings for the given type. + /// + /// This option is not recursive, meaning that it will only block types whose names + /// explicitly match the argument of this method. + pub fn blocklist_type>(mut self, arg: T) -> Builder { + self.options.blocklisted_types.insert(arg); + self + } + } + }, + as_args: "--blocklist-type", + }, + /// Functions that have been blocklisted and should not appear in the generated code. + blocklisted_functions: RegexSet { + methods: { + regex_option! { + /// Do not generate any bindings for the given function. + /// + /// This option is not recursive, meaning that it will only block functions whose + /// names explicitly match the argument of this method. + pub fn blocklist_function>(mut self, arg: T) -> Builder { + self.options.blocklisted_functions.insert(arg); + self + } + } + }, + as_args: "--blocklist-function", + }, + /// Items that have been blocklisted and should not appear in the generated code. + blocklisted_items: RegexSet { + methods: { + regex_option! { + /// Do not generate any bindings for the given item, regardless of whether it is a + /// type, function, module, etc. + /// + /// This option is not recursive, meaning that it will only block items whose names + /// explicitly match the argument of this method. + pub fn blocklist_item>(mut self, arg: T) -> Builder { + self.options.blocklisted_items.insert(arg); + self + } + } + }, + as_args: "--blocklist-item", + }, + /// Files whose contents should be blocklisted and should not appear in the generated code. + blocklisted_files: RegexSet { + methods: { + regex_option! { + /// Do not generate any bindings for the contents of the given file, regardless of + /// whether the contents of the file are types, functions, modules, etc. + /// + /// This option is not recursive, meaning that it will only block files whose names + /// explicitly match the argument of this method. + /// + /// This method will use the argument to match the complete path of the file + /// instead of a section of it. + pub fn blocklist_file>(mut self, arg: T) -> Builder { + self.options.blocklisted_files.insert(arg); + self + } + } + }, + as_args: "--blocklist-file", + }, + /// Variables that have been blocklisted and should not appear in the generated code. + blocklisted_vars: RegexSet { + methods: { + regex_option! { + /// Do not generate any bindings for the given variable. + /// + /// This option is not recursive, meaning that it will only block variables whose + /// names explicitly match the argument of this method. + pub fn blocklist_var>(mut self, arg: T) -> Builder { + self.options.blocklisted_vars.insert(arg); + self + } + } + }, + as_args: "--blocklist-var", + }, + /// Types that should be treated as opaque structures in the generated code. + opaque_types: RegexSet { + methods: { + regex_option! { + /// Treat the given type as opaque in the generated bindings. + /// + /// Opaque in this context means that none of the generated bindings will contain + /// information about the inner representation of the type and the type itself will + /// be represented as a chunk of bytes with the alignment and size of the type. + pub fn opaque_type>(mut self, arg: T) -> Builder { + self.options.opaque_types.insert(arg); + self + } + } + }, + as_args: "--opaque-type", + }, + /// The explicit `rustfmt` path. + rustfmt_path: Option { + methods: { + /// Set an explicit path to the `rustfmt` binary. + /// + /// This option only comes into effect if `rustfmt` is set to be the formatter used by + /// `bindgen`. Check the documentation of the [`Builder::formatter`] method for more + /// information. + pub fn with_rustfmt>(mut self, path: P) -> Self { + self.options.rustfmt_path = Some(path.into()); + self + } + }, + // This option cannot be set from the CLI. + as_args: ignore, + }, + /// The path to which we should write a Makefile-syntax depfile (if any). + depfile: Option { + methods: { + /// Add a depfile output which will be written alongside the generated bindings. + pub fn depfile, D: Into>( + mut self, + output_module: H, + depfile: D, + ) -> Builder { + self.options.depfile = Some(DepfileSpec { + output_module: output_module.into(), + depfile_path: depfile.into(), + }); + self + } + }, + as_args: |depfile, args| { + if let Some(depfile) = depfile { + args.push("--depfile".into()); + args.push(depfile.depfile_path.display().to_string()); + } + }, + }, + /// Types that have been allowlisted and should appear in the generated code. + allowlisted_types: RegexSet { + methods: { + regex_option! { + /// Generate bindings for the given type. + /// + /// This option is transitive by default. Check the documentation of the + /// [`Builder::allowlist_recursively`] method for further information. + pub fn allowlist_type>(mut self, arg: T) -> Builder { + self.options.allowlisted_types.insert(arg); + self + } + } + }, + as_args: "--allowlist-type", + }, + /// Functions that have been allowlisted and should appear in the generated code. + allowlisted_functions: RegexSet { + methods: { + regex_option! { + /// Generate bindings for the given function. + /// + /// This option is transitive by default. Check the documentation of the + /// [`Builder::allowlist_recursively`] method for further information. + pub fn allowlist_function>(mut self, arg: T) -> Builder { + self.options.allowlisted_functions.insert(arg); + self + } + } + }, + as_args: "--allowlist-function", + }, + /// Variables that have been allowlisted and should appear in the generated code. + allowlisted_vars: RegexSet { + methods: { + regex_option! { + /// Generate bindings for the given variable. + /// + /// This option is transitive by default. Check the documentation of the + /// [`Builder::allowlist_recursively`] method for further information. + pub fn allowlist_var>(mut self, arg: T) -> Builder { + self.options.allowlisted_vars.insert(arg); + self + } + } + }, + as_args: "--allowlist-var", + }, + /// Files whose contents have been allowlisted and should appear in the generated code. + allowlisted_files: RegexSet { + methods: { + regex_option! { + /// Generate bindings for the content of the given file. + /// + /// This option is transitive by default. Check the documentation of the + /// [`Builder::allowlist_recursively`] method for further information. + /// + /// This method will use the argument to match the complete path of the file + /// instead of a section of it. + pub fn allowlist_file>(mut self, arg: T) -> Builder { + self.options.allowlisted_files.insert(arg); + self + } + } + }, + as_args: "--allowlist-file", + }, + /// Items that have been allowlisted and should appear in the generated code. + allowlisted_items: RegexSet { + methods: { + regex_option! { + /// Generate bindings for the given item, regardless of whether it is a type, + /// function, module, etc. + /// + /// This option is transitive by default. Check the documentation of the + /// [`Builder::allowlist_recursively`] method for further information. + pub fn allowlist_item>(mut self, arg: T) -> Builder { + self.options.allowlisted_items.insert(arg); + self + } + } + }, + as_args: "--allowlist-item", + }, + /// The default style of for generated `enum`s. + default_enum_style: EnumVariation { + methods: { + /// Set the default style for generated `enum`s. + /// + /// If this method is not called, the [`EnumVariation::Consts`] style will be used by + /// default. + /// + /// To set the style for individual `enum`s, use [`Builder::bitfield_enum`], + /// [`Builder::newtype_enum`], [`Builder::newtype_global_enum`], + /// [`Builder::rustified_enum`], [`Builder::rustified_non_exhaustive_enum`], + /// [`Builder::constified_enum_module`] or [`Builder::constified_enum`]. + pub fn default_enum_style( + mut self, + arg: EnumVariation, + ) -> Builder { + self.options.default_enum_style = arg; + self + } + }, + as_args: |variation, args| { + if *variation != Default::default() { + args.push("--default-enum-style".to_owned()); + args.push(variation.to_string()); + } + }, + }, + /// `enum`s marked as bitfield-like. This is, newtypes with bitwise operations. + bitfield_enums: RegexSet { + methods: { + regex_option! { + /// Mark the given `enum` as being bitfield-like. + /// + /// This is similar to the [`Builder::newtype_enum`] style, but with the bitwise + /// operators implemented. + pub fn bitfield_enum>(mut self, arg: T) -> Builder { + self.options.bitfield_enums.insert(arg); + self + } + } + }, + as_args: "--bitfield-enum", + }, + /// `enum`s marked as newtypes. + newtype_enums: RegexSet { + methods: { + regex_option! { + /// Mark the given `enum` as a newtype. + /// + /// This means that an integer newtype will be declared to represent the `enum` + /// type and its variants will be represented as constants inside of this type's + /// `impl` block. + pub fn newtype_enum>(mut self, arg: T) -> Builder { + self.options.newtype_enums.insert(arg); + self + } + } + }, + as_args: "--newtype-enum", + }, + /// `enum`s marked as global newtypes . + newtype_global_enums: RegexSet { + methods: { + regex_option! { + /// Mark the given `enum` as a global newtype. + /// + /// This is similar to the [`Builder::newtype_enum`] style, but the constants for + /// each variant are free constants instead of being declared inside an `impl` + /// block for the newtype. + pub fn newtype_global_enum>(mut self, arg: T) -> Builder { + self.options.newtype_global_enums.insert(arg); + self + } + } + }, + as_args: "--newtype-global-enum", + }, + /// `enum`s marked as Rust `enum`s. + rustified_enums: RegexSet { + methods: { + regex_option! { + /// Mark the given `enum` as a Rust `enum`. + /// + /// This means that each variant of the `enum` will be represented as a Rust `enum` + /// variant. + /// + /// **Use this with caution**, creating an instance of a Rust `enum` with an + /// invalid value will cause undefined behaviour. To avoid this, use the + /// [`Builder::newtype_enum`] style instead. + pub fn rustified_enum>(mut self, arg: T) -> Builder { + self.options.rustified_enums.insert(arg); + self + } + } + }, + as_args: "--rustified-enum", + }, + /// `enum`s marked as non-exhaustive Rust `enum`s. + rustified_non_exhaustive_enums: RegexSet { + methods: { + regex_option! { + /// Mark the given `enum` as a non-exhaustive Rust `enum`. + /// + /// This is similar to the [`Builder::rustified_enum`] style, but the `enum` is + /// tagged with the `#[non_exhaustive]` attribute. + pub fn rustified_non_exhaustive_enum>(mut self, arg: T) -> Builder { + self.options.rustified_non_exhaustive_enums.insert(arg); + self + } + } + }, + as_args: "--rustified-non-exhaustive-enums", + }, + /// `enum`s marked as modules of constants. + constified_enum_modules: RegexSet { + methods: { + regex_option! { + /// Mark the given `enum` as a module with a set of integer constants. + pub fn constified_enum_module>(mut self, arg: T) -> Builder { + self.options.constified_enum_modules.insert(arg); + self + } + } + }, + as_args: "--constified-enum-module", + }, + /// `enum`s marked as a set of constants. + constified_enums: RegexSet { + methods: { + regex_option! { + /// Mark the given `enum` as a set of integer constants. + /// + /// This is similar to the [`Builder::constified_enum_module`] style, but the + /// constants are generated in the current module instead of in a new module. + pub fn constified_enum>(mut self, arg: T) -> Builder { + self.options.constified_enums.insert(arg); + self + } + } + }, + as_args: "--constified-enum", + }, + /// The default type signedness for C macro constants. + default_macro_constant_type: MacroTypeVariation { + methods: { + /// Set the default type signedness to be used for macro constants. + /// + /// If this method is not called, [`MacroTypeVariation::Unsigned`] is used by default. + /// + /// To set the type for individual macro constants, use the + /// [`ParseCallbacks::int_macro`] method. + pub fn default_macro_constant_type(mut self, arg: MacroTypeVariation) -> Builder { + self.options.default_macro_constant_type = arg; + self + } + + }, + as_args: |variation, args| { + if *variation != Default::default() { + args.push("--default-macro-constant-type".to_owned()); + args.push(variation.to_string()); + } + }, + }, + /// The default style of code generation for `typedef`s. + default_alias_style: AliasVariation { + methods: { + /// Set the default style of code generation for `typedef`s. + /// + /// If this method is not called, the [`AliasVariation::TypeAlias`] style is used by + /// default. + /// + /// To set the style for individual `typedefs`s, use [`Builder::type_alias`], + /// [`Builder::new_type_alias`] or [`Builder::new_type_alias_deref`]. + pub fn default_alias_style( + mut self, + arg: AliasVariation, + ) -> Builder { + self.options.default_alias_style = arg; + self + } + }, + as_args: |variation, args| { + if *variation != Default::default() { + args.push("--default-alias-style".to_owned()); + args.push(variation.to_string()); + } + }, + }, + /// `typedef` patterns that will use regular type aliasing. + type_alias: RegexSet { + methods: { + regex_option! { + /// Mark the given `typedef` as a regular Rust `type` alias. + /// + /// This is the default behavior, meaning that this method only comes into effect + /// if a style different from [`AliasVariation::TypeAlias`] was passed to the + /// [`Builder::default_alias_style`] method. + pub fn type_alias>(mut self, arg: T) -> Builder { + self.options.type_alias.insert(arg); + self + } + } + }, + as_args: "--type-alias", + }, + /// `typedef` patterns that will be aliased by creating a newtype. + new_type_alias: RegexSet { + methods: { + regex_option! { + /// Mark the given `typedef` as a Rust newtype by having the aliased + /// type be wrapped in a `struct` with `#[repr(transparent)]`. + /// + /// This method can be used to enforce stricter type checking. + pub fn new_type_alias>(mut self, arg: T) -> Builder { + self.options.new_type_alias.insert(arg); + self + } + } + }, + as_args: "--new-type-alias", + }, + /// `typedef` patterns that will be wrapped in a newtype implementing `Deref` and `DerefMut`. + new_type_alias_deref: RegexSet { + methods: { + regex_option! { + /// Mark the given `typedef` to be generated as a newtype that can be dereferenced. + /// + /// This is similar to the [`Builder::new_type_alias`] style, but the newtype + /// implements `Deref` and `DerefMut` with the aliased type as a target. + pub fn new_type_alias_deref>(mut self, arg: T) -> Builder { + self.options.new_type_alias_deref.insert(arg); + self + } + } + }, + as_args: "--new-type-alias-deref", + }, + /// The default style of code to generate for `union`s containing non-`Copy` members. + default_non_copy_union_style: NonCopyUnionStyle { + methods: { + /// Set the default style of code to generate for `union`s with non-`Copy` members. + /// + /// If this method is not called, the [`NonCopyUnionStyle::BindgenWrapper`] style is + /// used by default. + /// + /// To set the style for individual `union`s, use [`Builder::bindgen_wrapper_union`] or + /// [`Builder::manually_drop_union`]. + pub fn default_non_copy_union_style(mut self, arg: NonCopyUnionStyle) -> Self { + self.options.default_non_copy_union_style = arg; + self + } + }, + as_args: |style, args| { + if *style != Default::default() { + args.push("--default-non-copy-union-style".to_owned()); + args.push(style.to_string()); + } + }, + }, + /// The patterns marking non-`Copy` `union`s as using the `bindgen` generated wrapper. + bindgen_wrapper_union: RegexSet { + methods: { + regex_option! { + /// Mark the given `union` to use a `bindgen`-generated wrapper for its members if at + /// least one them is not `Copy`. + /// + /// This is the default behavior, meaning that this method only comes into effect + /// if a style different from [`NonCopyUnionStyle::BindgenWrapper`] was passed to + /// the [`Builder::default_non_copy_union_style`] method. + pub fn bindgen_wrapper_union>(mut self, arg: T) -> Self { + self.options.bindgen_wrapper_union.insert(arg); + self + } + } + }, + as_args: "--bindgen-wrapper-union", + }, + /// The patterns marking non-`Copy` `union`s as using the `ManuallyDrop` wrapper. + manually_drop_union: RegexSet { + methods: { + regex_option! { + /// Mark the given `union` to use [`::core::mem::ManuallyDrop`] for its members if + /// at least one of them is not `Copy`. + /// + /// The `ManuallyDrop` type was stabilized in Rust 1.20.0, do not use this option + /// if your target version is lower than this. + pub fn manually_drop_union>(mut self, arg: T) -> Self { + self.options.manually_drop_union.insert(arg); + self + } + } + + }, + as_args: "--manually-drop-union", + }, + + + /// Whether we should generate built-in definitions. + builtins: bool { + methods: { + /// Generate Rust bindings for built-in definitions (for example `__builtin_va_list`). + /// + /// Bindings for built-in definitions are not emitted by default. + pub fn emit_builtins(mut self) -> Builder { + self.options.builtins = true; + self + } + }, + as_args: "--builtins", + }, + /// Whether we should dump the Clang AST for debugging purposes. + emit_ast: bool { + methods: { + /// Emit the Clang AST to `stdout` for debugging purposes. + /// + /// The Clang AST is not emitted by default. + pub fn emit_clang_ast(mut self) -> Builder { + self.options.emit_ast = true; + self + } + }, + as_args: "--emit-clang-ast", + }, + /// Whether we should dump our IR for debugging purposes. + emit_ir: bool { + methods: { + /// Emit the `bindgen` internal representation to `stdout` for debugging purposes. + /// + /// This internal representation is not emitted by default. + pub fn emit_ir(mut self) -> Builder { + self.options.emit_ir = true; + self + } + }, + as_args: "--emit-ir", + }, + /// Output path for the `graphviz` DOT file. + emit_ir_graphviz: Option { + methods: { + /// Set the path for the file where the`bindgen` internal representation will be + /// emitted as a graph using the `graphviz` DOT language. + /// + /// This graph representation is not emitted by default. + pub fn emit_ir_graphviz>(mut self, path: T) -> Builder { + let path = path.into(); + self.options.emit_ir_graphviz = Some(path); + self + } + }, + as_args: "--emit-ir-graphviz", + }, + + /// Whether we should emulate C++ namespaces with Rust modules. + enable_cxx_namespaces: bool { + methods: { + /// Emulate C++ namespaces using Rust modules in the generated bindings. + /// + /// C++ namespaces are not emulated by default. + pub fn enable_cxx_namespaces(mut self) -> Builder { + self.options.enable_cxx_namespaces = true; + self + } + }, + as_args: "--enable-cxx-namespaces", + }, + /// Whether we should try to find unexposed attributes in functions. + enable_function_attribute_detection: bool { + methods: { + /// Enable detecting function attributes on C functions. + /// + /// This enables the following features: + /// - Add `#[must_use]` attributes to Rust items whose C counterparts are marked as so. + /// This feature also requires that the Rust target version supports the attribute. + /// - Set `!` as the return type for Rust functions whose C counterparts are marked as + /// diverging. + /// + /// This option can be quite slow in some cases (check [#1465]), so it is disabled by + /// default. + /// + /// [#1465]: https://github.com/rust-lang/rust-bindgen/issues/1465 + pub fn enable_function_attribute_detection(mut self) -> Self { + self.options.enable_function_attribute_detection = true; + self + } + + }, + as_args: "--enable-function-attribute-detection", + }, + /// Whether we should avoid mangling names with namespaces. + disable_name_namespacing: bool { + methods: { + /// Disable name auto-namespacing. + /// + /// By default, `bindgen` mangles names like `foo::bar::Baz` to look like `foo_bar_Baz` + /// instead of just `Baz`. This method disables that behavior. + /// + /// Note that this does not change the names used for allowlisting and blocklisting, + /// which should still be mangled with the namespaces. Additionally, this option may + /// cause `bindgen` to generate duplicate names. + pub fn disable_name_namespacing(mut self) -> Builder { + self.options.disable_name_namespacing = true; + self + } + }, + as_args: "--disable-name-namespacing", + }, + /// Whether we should avoid generating nested `struct` names. + disable_nested_struct_naming: bool { + methods: { + /// Disable nested `struct` naming. + /// + /// The following `struct`s have different names for C and C++. In C, they are visible + /// as `foo` and `bar`. In C++, they are visible as `foo` and `foo::bar`. + /// + /// ```c + /// struct foo { + /// struct bar { + /// } b; + /// }; + /// ``` + /// + /// `bindgen` tries to avoid duplicate names by default, so it follows the C++ naming + /// convention and it generates `foo` and `foo_bar` instead of just `foo` and `bar`. + /// + /// This method disables this behavior and it is indented to be used only for headers + /// that were written in C. + pub fn disable_nested_struct_naming(mut self) -> Builder { + self.options.disable_nested_struct_naming = true; + self + } + }, + as_args: "--disable-nested-struct-naming", + }, + /// Whether we should avoid embedding version identifiers into source code. + disable_header_comment: bool { + methods: { + /// Do not insert the `bindgen` version identifier into the generated bindings. + /// + /// This identifier is inserted by default. + pub fn disable_header_comment(mut self) -> Self { + self.options.disable_header_comment = true; + self + } + + }, + as_args: "--disable-header-comment", + }, + /// Whether we should generate layout tests for generated `struct`s. + layout_tests: bool { + default: true, + methods: { + /// Set whether layout tests should be generated. + /// + /// Layout tests are generated by default. + pub fn layout_tests(mut self, doit: bool) -> Self { + self.options.layout_tests = doit; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-layout-tests"), + }, + /// Whether we should implement `Debug` for types that cannot derive it. + impl_debug: bool { + methods: { + /// Set whether `Debug` should be implemented for types that cannot derive it. + /// + /// This option is disabled by default. + pub fn impl_debug(mut self, doit: bool) -> Self { + self.options.impl_debug = doit; + self + } + + }, + as_args: "--impl-debug", + }, + /// Whether we should implement `PartialEq` types that cannot derive it. + impl_partialeq: bool { + methods: { + /// Set whether `PartialEq` should be implemented for types that cannot derive it. + /// + /// This option is disabled by default. + pub fn impl_partialeq(mut self, doit: bool) -> Self { + self.options.impl_partialeq = doit; + self + } + }, + as_args: "--impl-partialeq", + }, + /// Whether we should derive `Copy` when possible. + derive_copy: bool { + default: true, + methods: { + /// Set whether the `Copy` trait should be derived when possible. + /// + /// `Copy` is derived by default. + pub fn derive_copy(mut self, doit: bool) -> Self { + self.options.derive_copy = doit; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-derive-copy"), + }, + + /// Whether we should derive `Debug` when possible. + derive_debug: bool { + default: true, + methods: { + /// Set whether the `Debug` trait should be derived when possible. + /// + /// The [`Builder::impl_debug`] method can be used to implement `Debug` for types that + /// cannot derive it. + /// + /// `Debug` is derived by default. + pub fn derive_debug(mut self, doit: bool) -> Self { + self.options.derive_debug = doit; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-derive-debug"), + }, + + /// Whether we should derive `Default` when possible. + derive_default: bool { + methods: { + /// Set whether the `Default` trait should be derived when possible. + /// + /// `Default` is not derived by default. + pub fn derive_default(mut self, doit: bool) -> Self { + self.options.derive_default = doit; + self + } + }, + as_args: |&value, args| { + let arg = if value { + "--with-derive-default" + } else { + "--no-derive-default" + }; + + args.push(arg.to_owned()); + }, + }, + /// Whether we should derive `Hash` when possible. + derive_hash: bool { + methods: { + /// Set whether the `Hash` trait should be derived when possible. + /// + /// `Hash` is not derived by default. + pub fn derive_hash(mut self, doit: bool) -> Self { + self.options.derive_hash = doit; + self + } + }, + as_args: "--with-derive-hash", + }, + /// Whether we should derive `PartialOrd` when possible. + derive_partialord: bool { + methods: { + /// Set whether the `PartialOrd` trait should be derived when possible. + /// + /// Take into account that `Ord` cannot be derived for a type that does not implement + /// `PartialOrd`. For this reason, setting this method to `false` also sets + /// automatically [`Builder::derive_ord`] to `false`. + /// + /// `PartialOrd` is not derived by default. + pub fn derive_partialord(mut self, doit: bool) -> Self { + self.options.derive_partialord = doit; + if !doit { + self.options.derive_ord = false; + } + self + } + }, + as_args: "--with-derive-partialord", + }, + /// Whether we should derive `Ord` when possible. + derive_ord: bool { + methods: { + /// Set whether the `Ord` trait should be derived when possible. + /// + /// Take into account that `Ord` cannot be derived for a type that does not implement + /// `PartialOrd`. For this reason, the value set with this method will also be set + /// automatically for [`Builder::derive_partialord`]. + /// + /// `Ord` is not derived by default. + pub fn derive_ord(mut self, doit: bool) -> Self { + self.options.derive_ord = doit; + self.options.derive_partialord = doit; + self + } + }, + as_args: "--with-derive-ord", + }, + /// Whether we should derive `PartialEq` when possible. + derive_partialeq: bool { + methods: { + /// Set whether the `PartialEq` trait should be derived when possible. + /// + /// Take into account that `Eq` cannot be derived for a type that does not implement + /// `PartialEq`. For this reason, setting this method to `false` also sets + /// automatically [`Builder::derive_eq`] to `false`. + /// + /// The [`Builder::impl_partialeq`] method can be used to implement `PartialEq` for + /// types that cannot derive it. + /// + /// `PartialEq` is not derived by default. + pub fn derive_partialeq(mut self, doit: bool) -> Self { + self.options.derive_partialeq = doit; + if !doit { + self.options.derive_eq = false; + } + self + } + }, + as_args: "--with-derive-partialeq", + }, + /// Whether we should derive `Eq` when possible. + derive_eq: bool { + methods: { + /// Set whether the `Eq` trait should be derived when possible. + /// + /// Take into account that `Eq` cannot be derived for a type that does not implement + /// `PartialEq`. For this reason, the value set with this method will also be set + /// automatically for [`Builder::derive_partialeq`]. + /// + /// `Eq` is not derived by default. + pub fn derive_eq(mut self, doit: bool) -> Self { + self.options.derive_eq = doit; + if doit { + self.options.derive_partialeq = doit; + } + self + } + }, + as_args: "--with-derive-eq", + }, + /// Whether we should use `core` instead of `std`. + /// + /// If this option is enabled and the Rust target version is greater than 1.64, the prefix for + /// C platform-specific types will be `::core::ffi` instead of `::core::os::raw`. + use_core: bool { + methods: { + /// Use `core` instead of `std` in the generated bindings. + /// + /// `std` is used by default. + pub fn use_core(mut self) -> Builder { + self.options.use_core = true; + self + } + + }, + as_args: "--use-core", + }, + /// An optional prefix for the C platform-specific types. + ctypes_prefix: Option { + methods: { + /// Use the given prefix for the C platform-specific types instead of `::std::os::raw`. + /// + /// Alternatively, the [`Builder::use_core`] method can be used to set the prefix to + /// `::core::ffi` or `::core::os::raw`. + pub fn ctypes_prefix>(mut self, prefix: T) -> Builder { + self.options.ctypes_prefix = Some(prefix.into()); + self + } + }, + as_args: "--ctypes-prefix", + }, + /// The prefix for anonymous fields. + anon_fields_prefix: String { + default: DEFAULT_ANON_FIELDS_PREFIX.into(), + methods: { + /// Use the given prefix for the anonymous fields. + /// + /// An anonymous field, is a field of a C/C++ type that does not have a name. For + /// example, in the following C code: + /// ```c + /// struct integer { + /// struct { + /// int inner; + /// }; + /// } + /// ``` + /// + /// The only field of the `integer` `struct` is an anonymous field and its Rust + /// representation will be named using this prefix followed by an integer identifier. + /// + /// The default prefix is `__bindgen_anon_`. + pub fn anon_fields_prefix>(mut self, prefix: T) -> Builder { + self.options.anon_fields_prefix = prefix.into(); + self + } + }, + as_args: |prefix, args| { + if prefix != DEFAULT_ANON_FIELDS_PREFIX { + args.push("--anon-fields-prefix".to_owned()); + args.push(prefix.clone()); + } + }, + }, + /// Whether to measure the time for each one of the `bindgen` phases. + time_phases: bool { + methods: { + /// Set whether to measure the elapsed time for each one of the `bindgen` phases. This + /// information is printed to `stderr`. + /// + /// The elapsed time is not measured by default. + pub fn time_phases(mut self, doit: bool) -> Self { + self.options.time_phases = doit; + self + } + }, + as_args: "--time-phases", + }, + /// Whether to convert C float types to `f32` and `f64`. + convert_floats: bool { + default: true, + methods: { + /// Avoid converting C float types to `f32` and `f64`. + pub fn no_convert_floats(mut self) -> Self { + self.options.convert_floats = false; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-convert-floats"), + }, + /// The set of raw lines to be prepended to the top-level module of the generated Rust code. + raw_lines: Vec> { + methods: { + /// Add a line of Rust code at the beginning of the generated bindings. The string is + /// passed through without any modification. + pub fn raw_line>(mut self, arg: T) -> Self { + self.options.raw_lines.push(arg.into().into_boxed_str()); + self + } + }, + as_args: |raw_lines, args| { + for line in raw_lines { + args.push("--raw-line".to_owned()); + args.push(line.clone().into()); + } + }, + }, + /// The set of raw lines to prepend to different modules. + module_lines: HashMap, Vec>> { + methods: { + /// Add a given line to the beginning of a given module. + /// + /// This option only comes into effect if the [`Builder::enable_cxx_namespaces`] method + /// is also being called. + pub fn module_raw_line(mut self, module: T, line: U) -> Self + where + T: Into, + U: Into, + { + self.options + .module_lines + .entry(module.into().into_boxed_str()) + .or_default() + .push(line.into().into_boxed_str()); + self + } + }, + as_args: |module_lines, args| { + for (module, lines) in module_lines { + for line in lines { + args.push("--module-raw-line".to_owned()); + args.push(module.clone().into()); + args.push(line.clone().into()); + } + } + }, + }, + /// The input header files. + input_headers: Vec> { + methods: { + /// Add an input C/C++ header to generate bindings for. + /// + /// This can be used to generate bindings for a single header: + /// + /// ```ignore + /// let bindings = bindgen::Builder::default() + /// .header("input.h") + /// .generate() + /// .unwrap(); + /// ``` + /// + /// Or for multiple headers: + /// + /// ```ignore + /// let bindings = bindgen::Builder::default() + /// .header("first.h") + /// .header("second.h") + /// .header("third.h") + /// .generate() + /// .unwrap(); + /// ``` + pub fn header>(mut self, header: T) -> Builder { + self.options.input_headers.push(header.into().into_boxed_str()); + self + } + + /// Add input C/C++ header(s) to generate bindings for. + /// + /// This can be used to generate bindings for a single header: + /// + /// ```ignore + /// let bindings = bindgen::Builder::default() + /// .headers(["input.h"]) + /// .generate() + /// .unwrap(); + /// ``` + /// + /// Or for multiple headers: + /// + /// ```ignore + /// let bindings = bindgen::Builder::default() + /// .headers(["first.h", "second.h", "third.h"]) + /// .generate() + /// .unwrap(); + /// ``` + pub fn headers(mut self, headers: I) -> Builder + where + I::Item: Into, + { + self.options + .input_headers + .extend(headers.into_iter().map(Into::into).map(Into::into)); + self + } + }, + // This field is handled specially inside the macro. + as_args: ignore, + }, + /// The set of arguments to be passed straight through to Clang. + clang_args: Vec> { + methods: { + /// Add an argument to be passed straight through to Clang. + pub fn clang_arg>(self, arg: T) -> Builder { + self.clang_args([arg.into().into_boxed_str()]) + } + + /// Add several arguments to be passed straight through to Clang. + pub fn clang_args(mut self, args: I) -> Builder + where + I::Item: AsRef, + { + for arg in args { + self.options.clang_args.push(arg.as_ref().to_owned().into_boxed_str()); + } + self + } + }, + // This field is handled specially inside the macro. + as_args: ignore, + }, + /// The set of arguments to be passed straight through to Clang for the macro fallback code. + fallback_clang_args: Vec> { + methods: {}, + as_args: ignore, + }, + /// Tuples of unsaved file contents of the form (name, contents). + input_header_contents: Vec<(Box, Box)> { + methods: { + /// Add `contents` as an input C/C++ header named `name`. + /// + /// This can be used to inject additional C/C++ code as an input without having to + /// create additional header files. + pub fn header_contents(mut self, name: &str, contents: &str) -> Builder { + // Apparently clang relies on having virtual FS correspondent to + // the real one, so we need absolute paths here + let absolute_path = env::current_dir() + .expect("Cannot retrieve current directory") + .join(name) + .to_str() + .expect("Cannot convert current directory name to string") + .into(); + self.options + .input_header_contents + .push((absolute_path, contents.into())); + self + } + }, + // Header contents cannot be added from the CLI. + as_args: ignore, + }, + /// A user-provided visitor to allow customizing different kinds of situations. + parse_callbacks: Vec> { + methods: { + /// Add a new [`ParseCallbacks`] instance to configure types in different situations. + /// + /// This can also be used with [`CargoCallbacks`](struct@crate::CargoCallbacks) to emit + /// `cargo:rerun-if-changed=...` for all `#include`d header files. + pub fn parse_callbacks(mut self, cb: Box) -> Self { + self.options.parse_callbacks.push(Rc::from(cb)); + self + } + }, + as_args: |_callbacks, _args| { + #[cfg(feature = "__cli")] + for cb in _callbacks { + _args.extend(cb.cli_args()); + } + }, + }, + /// Which kind of items should we generate. We generate all of them by default. + codegen_config: CodegenConfig { + default: CodegenConfig::all(), + methods: { + /// Do not generate any functions. + /// + /// Functions are generated by default. + pub fn ignore_functions(mut self) -> Builder { + self.options.codegen_config.remove(CodegenConfig::FUNCTIONS); + self + } + + /// Do not generate any methods. + /// + /// Methods are generated by default. + pub fn ignore_methods(mut self) -> Builder { + self.options.codegen_config.remove(CodegenConfig::METHODS); + self + } + + /// Choose what to generate using a [`CodegenConfig`]. + /// + /// This option overlaps with [`Builder::ignore_functions`] and + /// [`Builder::ignore_methods`]. + /// + /// All the items in `CodegenConfig` are generated by default. + pub fn with_codegen_config(mut self, config: CodegenConfig) -> Self { + self.options.codegen_config = config; + self + } + }, + as_args: |codegen_config, args| { + if !codegen_config.functions() { + args.push("--ignore-functions".to_owned()); + } + + args.push("--generate".to_owned()); + + //Temporary placeholder for the 4 options below. + let mut options: Vec = Vec::new(); + if codegen_config.functions() { + options.push("functions".to_owned()); + } + + if codegen_config.types() { + options.push("types".to_owned()); + } + + if codegen_config.vars() { + options.push("vars".to_owned()); + } + + if codegen_config.methods() { + options.push("methods".to_owned()); + } + + if codegen_config.constructors() { + options.push("constructors".to_owned()); + } + + if codegen_config.destructors() { + options.push("destructors".to_owned()); + } + + args.push(options.join(",")); + + if !codegen_config.methods() { + args.push("--ignore-methods".to_owned()); + } + }, + }, + /// Whether to treat inline namespaces conservatively. + conservative_inline_namespaces: bool { + methods: { + /// Treat inline namespaces conservatively. + /// + /// This is tricky, because in C++ is technically legal to override an item + /// defined in an inline namespace: + /// + /// ```cpp + /// inline namespace foo { + /// using Bar = int; + /// } + /// using Bar = long; + /// ``` + /// + /// Even though referencing `Bar` is a compiler error. + /// + /// We want to support this (arguably esoteric) use case, but we do not want to make + /// the rest of `bindgen` users pay an usability penalty for that. + /// + /// To support this, we need to keep all the inline namespaces around, but then using + /// `bindgen` becomes a bit more difficult, because you cannot reference paths like + /// `std::string` (you'd need to use the proper inline namespace). + /// + /// We could complicate a lot of the logic to detect name collisions and, in the + /// absence of collisions, generate a `pub use inline_ns::*` or something like that. + /// + /// That is probably something we can do to improve the usability of this option if we + /// realize it is needed way more often. Our guess is that this extra logic is not + /// going to be very useful. + /// + /// This option is disabled by default. + pub fn conservative_inline_namespaces(mut self) -> Builder { + self.options.conservative_inline_namespaces = true; + self + } + }, + as_args: "--conservative-inline-namespaces", + }, + /// Whether to keep documentation comments in the generated output. + generate_comments: bool { + default: true, + methods: { + /// Set whether the generated bindings should contain documentation comments. + /// + /// Documentation comments are included by default. + /// + /// Note that clang excludes comments from system headers by default, pass + /// `"-fretain-comments-from-system-headers"` to the [`Builder::clang_arg`] method to + /// include them. + /// + /// It is also possible to process all comments and not just documentation using the + /// `"-fparse-all-comments"` flag. Check [these slides on clang comment parsing]( + /// https://llvm.org/devmtg/2012-11/Gribenko_CommentParsing.pdf) for more information + /// and examples. + pub fn generate_comments(mut self, doit: bool) -> Self { + self.options.generate_comments = doit; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-doc-comments"), + }, + /// Whether to generate inline functions. + generate_inline_functions: bool { + methods: { + /// Set whether to generate inline functions. + /// + /// This option is disabled by default. + /// + /// Note that they will usually not work. However you can use `-fkeep-inline-functions` + /// or `-fno-inline-functions` if you are responsible of compiling the library to make + /// them callable. + /// + /// Check the [`Builder::wrap_static_fns`] method for an alternative. + pub fn generate_inline_functions(mut self, doit: bool) -> Self { + self.options.generate_inline_functions = doit; + self + } + }, + as_args: "--generate-inline-functions", + }, + /// Whether to allowlist types recursively. + allowlist_recursively: bool { + default: true, + methods: { + /// Set whether to recursively allowlist items. + /// + /// Items are allowlisted recursively by default. + /// + /// Given that we have explicitly allowlisted the `initiate_dance_party` function in + /// this C header: + /// + /// ```c + /// typedef struct MoonBoots { + /// int bouncy_level; + /// } MoonBoots; + /// + /// void initiate_dance_party(MoonBoots* boots); + /// ``` + /// + /// We would normally generate bindings to both the `initiate_dance_party` function and + /// the `MoonBoots` type that it transitively references. If `false` is passed to this + /// method, `bindgen` will not emit bindings for anything except the explicitly + /// allowlisted items, meaning that the definition for `MoonBoots` would not be + /// generated. However, the `initiate_dance_party` function would still reference + /// `MoonBoots`! + /// + /// **Disabling this feature will almost certainly cause `bindgen` to emit bindings + /// that will not compile!** If you disable this feature, then it is *your* + /// responsibility to provide definitions for every type that is referenced from an + /// explicitly allowlisted item. One way to provide the missing definitions is by using + /// the [`Builder::raw_line`] method, another would be to define them in Rust and then + /// `include!(...)` the bindings immediately afterwards. + pub fn allowlist_recursively(mut self, doit: bool) -> Self { + self.options.allowlist_recursively = doit; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-recursive-allowlist"), + }, + /// Whether to emit `#[macro_use] extern crate objc;` instead of `use objc;` in the prologue of + /// the files generated from objective-c files. + objc_extern_crate: bool { + methods: { + /// Emit `#[macro_use] extern crate objc;` instead of `use objc;` in the prologue of + /// the files generated from objective-c files. + /// + /// `use objc;` is emitted by default. + pub fn objc_extern_crate(mut self, doit: bool) -> Self { + self.options.objc_extern_crate = doit; + self + } + }, + as_args: "--objc-extern-crate", + }, + /// Whether to generate proper block signatures instead of `void` pointers. + generate_block: bool { + methods: { + /// Generate proper block signatures instead of `void` pointers. + /// + /// `void` pointers are used by default. + pub fn generate_block(mut self, doit: bool) -> Self { + self.options.generate_block = doit; + self + } + }, + as_args: "--generate-block", + }, + /// Whether to generate strings as `CStr`. + generate_cstr: bool { + methods: { + /// Set whether string constants should be generated as `&CStr` instead of `&[u8]`. + /// + /// A minimum Rust target of 1.59 is required for this to have any effect as support + /// for `CStr::from_bytes_with_nul_unchecked` in `const` contexts is needed. + /// + /// This option is disabled by default but will become enabled by default in a future + /// release, so enabling this is recommended. + pub fn generate_cstr(mut self, doit: bool) -> Self { + self.options.generate_cstr = doit; + self + } + }, + as_args: "--generate-cstr", + }, + /// Whether to emit `#[macro_use] extern crate block;` instead of `use block;` in the prologue + /// of the files generated from apple block files. + block_extern_crate: bool { + methods: { + /// Emit `#[macro_use] extern crate block;` instead of `use block;` in the prologue of + /// the files generated from apple block files. + /// + /// `use block;` is emitted by default. + pub fn block_extern_crate(mut self, doit: bool) -> Self { + self.options.block_extern_crate = doit; + self + } + }, + as_args: "--block-extern-crate", + }, + /// Whether to use the clang-provided name mangling. + enable_mangling: bool { + default: true, + methods: { + /// Set whether to use the clang-provided name mangling. This is probably needed for + /// C++ features. + /// + /// The mangling provided by clang is used by default. + /// + /// We allow disabling this option because some old `libclang` versions seem to return + /// incorrect results in some cases for non-mangled functions, check [#528] for more + /// information. + /// + /// [#528]: https://github.com/rust-lang/rust-bindgen/issues/528 + pub fn trust_clang_mangling(mut self, doit: bool) -> Self { + self.options.enable_mangling = doit; + self + } + + }, + as_args: |value, args| (!value).as_args(args, "--distrust-clang-mangling"), + }, + /// Whether to detect include paths using `clang_sys`. + detect_include_paths: bool { + default: true, + methods: { + /// Set whether to detect include paths using `clang_sys`. + /// + /// `clang_sys` is used to detect include paths by default. + pub fn detect_include_paths(mut self, doit: bool) -> Self { + self.options.detect_include_paths = doit; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-include-path-detection"), + }, + /// Whether we should try to fit macro constants into types smaller than `u32` and `i32`. + fit_macro_constants: bool { + methods: { + /// Set whether `bindgen` should try to fit macro constants into types smaller than `u32` + /// and `i32`. + /// + /// This option is disabled by default. + pub fn fit_macro_constants(mut self, doit: bool) -> Self { + self.options.fit_macro_constants = doit; + self + } + }, + as_args: "--fit-macro-constant-types", + }, + /// Whether to prepend the `enum` name to constant or newtype variants. + prepend_enum_name: bool { + default: true, + methods: { + /// Set whether to prepend the `enum` name to constant or newtype variants. + /// + /// The `enum` name is prepended by default. + pub fn prepend_enum_name(mut self, doit: bool) -> Self { + self.options.prepend_enum_name = doit; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-prepend-enum-name"), + }, + /// Version of the Rust compiler to target. + rust_target: RustTarget { + methods: { + /// Specify the Rust target version. + /// + /// The default target is the latest stable Rust version. + pub fn rust_target(mut self, rust_target: RustTarget) -> Self { + self.options.set_rust_target(rust_target); + self + } + }, + as_args: |rust_target, args| { + args.push("--rust-target".to_owned()); + args.push(rust_target.to_string()); + }, + }, + /// The Rust edition to use for code generation. + rust_edition: Option { + methods: { + /// Specify the Rust target edition. + /// + /// The default edition is the latest edition supported by the chosen Rust target. + pub fn rust_edition(mut self, rust_edition: RustEdition) -> Self { + self.options.rust_edition = Some(rust_edition); + self + } + } + as_args: |edition, args| { + if let Some(edition) = edition { + args.push("--rust-edition".to_owned()); + args.push(edition.to_string()); + } + }, + }, + /// Features to be enabled. They are derived from `rust_target`. + rust_features: RustFeatures { + methods: {}, + // This field cannot be set from the CLI, + as_args: ignore, + }, + /// Enable support for native Rust unions if they are supported. + untagged_union: bool { + default: true, + methods: { + /// Disable support for native Rust unions, if supported. + /// + /// The default value of this option is set based on the value passed to + /// [`Builder::rust_target`]. + pub fn disable_untagged_union(mut self) -> Self { + self.options.untagged_union = false; + self + } + } + as_args: |value, args| (!value).as_args(args, "--disable-untagged-union"), + }, + /// Whether we should record which items in the regex sets did match any C items. + record_matches: bool { + default: true, + methods: { + /// Set whether we should record which items in our regex sets did match any C items. + /// + /// Matches are recorded by default. + pub fn record_matches(mut self, doit: bool) -> Self { + self.options.record_matches = doit; + self + } + + }, + as_args: |value, args| (!value).as_args(args, "--no-record-matches"), + }, + /// Whether `size_t` should be translated to `usize` automatically. + size_t_is_usize: bool { + default: true, + methods: { + /// Set whether `size_t` should be translated to `usize`. + /// + /// If `size_t` is translated to `usize`, type definitions for `size_t` will not be + /// emitted. + /// + /// `size_t` is translated to `usize` by default. + pub fn size_t_is_usize(mut self, is: bool) -> Self { + self.options.size_t_is_usize = is; + self + } + }, + as_args: |value, args| (!value).as_args(args, "--no-size_t-is-usize"), + }, + /// The tool that should be used to format the generated bindings. + formatter: Formatter { + methods: { + /// Set whether `rustfmt` should be used to format the generated bindings. + /// + /// `rustfmt` is used by default. + /// + /// This method overlaps in functionality with the more general [`Builder::formatter`]. + /// Thus, the latter should be preferred. + #[deprecated] + pub fn rustfmt_bindings(mut self, doit: bool) -> Self { + self.options.formatter = if doit { + Formatter::Rustfmt + } else { + Formatter::None + }; + self + } + + /// Set which tool should be used to format the generated bindings. + /// + /// The default formatter is [`Formatter::Rustfmt`]. + /// + /// To be able to use `prettyplease` as a formatter, the `"prettyplease"` feature for + /// `bindgen` must be enabled in the Cargo manifest. + pub fn formatter(mut self, formatter: Formatter) -> Self { + self.options.formatter = formatter; + self + } + }, + as_args: |formatter, args| { + if *formatter != Default::default() { + args.push("--formatter".to_owned()); + args.push(formatter.to_string()); + } + }, + }, + /// The absolute path to the `rustfmt` configuration file. + rustfmt_configuration_file: Option { + methods: { + /// Set the absolute path to the `rustfmt` configuration file. + /// + /// The default `rustfmt` options are used if `None` is passed to this method or if + /// this method is not called at all. + /// + /// Calling this method will set the [`Builder::rustfmt_bindings`] option to `true` + /// and the [`Builder::formatter`] option to [`Formatter::Rustfmt`]. + pub fn rustfmt_configuration_file(mut self, path: Option) -> Self { + self = self.formatter(Formatter::Rustfmt); + self.options.rustfmt_configuration_file = path; + self + } + }, + as_args: "--rustfmt-configuration-file", + }, + /// Types that should not derive `PartialEq`. + no_partialeq_types: RegexSet { + methods: { + regex_option! { + /// Do not derive `PartialEq` for a given type. + pub fn no_partialeq>(mut self, arg: T) -> Builder { + self.options.no_partialeq_types.insert(arg.into()); + self + } + } + }, + as_args: "--no-partialeq", + }, + /// Types that should not derive `Copy`. + no_copy_types: RegexSet { + methods: { + regex_option! { + /// Do not derive `Copy` and `Clone` for a given type. + pub fn no_copy>(mut self, arg: T) -> Self { + self.options.no_copy_types.insert(arg.into()); + self + } + } + }, + as_args: "--no-copy", + }, + /// Types that should not derive `Debug`. + no_debug_types: RegexSet { + methods: { + regex_option! { + /// Do not derive `Debug` for a given type. + pub fn no_debug>(mut self, arg: T) -> Self { + self.options.no_debug_types.insert(arg.into()); + self + } + } + }, + as_args: "--no-debug", + }, + /// Types that should not derive or implement `Default`. + no_default_types: RegexSet { + methods: { + regex_option! { + /// Do not derive or implement `Default` for a given type. + pub fn no_default>(mut self, arg: T) -> Self { + self.options.no_default_types.insert(arg.into()); + self + } + } + }, + as_args: "--no-default", + }, + /// Types that should not derive `Hash`. + no_hash_types: RegexSet { + methods: { + regex_option! { + /// Do not derive `Hash` for a given type. + pub fn no_hash>(mut self, arg: T) -> Builder { + self.options.no_hash_types.insert(arg.into()); + self + } + } + }, + as_args: "--no-hash", + }, + /// Types that should be annotated with `#[must_use]`. + must_use_types: RegexSet { + methods: { + regex_option! { + /// Annotate the given type with the `#[must_use]` attribute. + pub fn must_use_type>(mut self, arg: T) -> Builder { + self.options.must_use_types.insert(arg.into()); + self + } + } + }, + as_args: "--must-use-type", + }, + /// Whether C arrays should be regular pointers in rust or array pointers + array_pointers_in_arguments: bool { + methods: { + /// Translate arrays `T arr[size]` into array pointers `*mut [T; size]` instead of + /// translating them as `*mut T` which is the default. + /// + /// The same is done for `*const` pointers. + pub fn array_pointers_in_arguments(mut self, doit: bool) -> Self { + self.options.array_pointers_in_arguments = doit; + self + } + + }, + as_args: "--use-array-pointers-in-arguments", + }, + /// The name of the `wasm_import_module`. + wasm_import_module_name: Option { + methods: { + /// Adds the `#[link(wasm_import_module = import_name)]` attribute to all the `extern` + /// blocks generated by `bindgen`. + /// + /// This attribute is not added by default. + pub fn wasm_import_module_name>( + mut self, + import_name: T, + ) -> Self { + self.options.wasm_import_module_name = Some(import_name.into()); + self + } + }, + as_args: "--wasm-import-module-name", + }, + /// The name of the dynamic library (if we are generating bindings for a shared library). + dynamic_library_name: Option { + methods: { + /// Generate bindings for a shared library with the given name. + /// + /// This option is disabled by default. + pub fn dynamic_library_name>( + mut self, + dynamic_library_name: T, + ) -> Self { + self.options.dynamic_library_name = Some(dynamic_library_name.into()); + self + } + }, + as_args: "--dynamic-loading", + }, + /// Whether to require successful linkage for all routines in a shared library. + dynamic_link_require_all: bool { + methods: { + /// Set whether to require successful linkage for all routines in a shared library. + /// This allows us to optimize function calls by being able to safely assume function + /// pointers are valid. + /// + /// This option only comes into effect if the [`Builder::dynamic_library_name`] option + /// is set. + /// + /// This option is disabled by default. + pub fn dynamic_link_require_all(mut self, req: bool) -> Self { + self.options.dynamic_link_require_all = req; + self + } + }, + as_args: "--dynamic-link-require-all", + }, + /// Whether to only make generated bindings `pub` if the items would be publicly accessible by + /// C++. + respect_cxx_access_specs: bool { + methods: { + /// Set whether to respect the C++ access specifications. + /// + /// Passing `true` to this method will set the visibility of the generated Rust items + /// as `pub` only if the corresponding C++ items are publicly accessible instead of + /// marking all the items as public, which is the default. + pub fn respect_cxx_access_specs(mut self, doit: bool) -> Self { + self.options.respect_cxx_access_specs = doit; + self + } + + }, + as_args: "--respect-cxx-access-specs", + }, + /// Whether to translate `enum` integer types to native Rust integer types. + translate_enum_integer_types: bool { + methods: { + /// Set whether to always translate `enum` integer types to native Rust integer types. + /// + /// Passing `true` to this method will result in `enum`s having types such as `u32` and + /// `i16` instead of `c_uint` and `c_short` which is the default. The `#[repr]` types + /// of Rust `enum`s are always translated to Rust integer types. + pub fn translate_enum_integer_types(mut self, doit: bool) -> Self { + self.options.translate_enum_integer_types = doit; + self + } + }, + as_args: "--translate-enum-integer-types", + }, + /// Whether to generate types with C style naming. + c_naming: bool { + methods: { + /// Set whether to generate types with C style naming. + /// + /// Passing `true` to this method will add prefixes to the generated type names. For + /// example, instead of a `struct` with name `A` we will generate a `struct` with + /// `struct_A`. Currently applies to `struct`s, `union`s, and `enum`s. + pub fn c_naming(mut self, doit: bool) -> Self { + self.options.c_naming = doit; + self + } + }, + as_args: "--c-naming", + }, + /// Whether to always emit explicit padding fields. + force_explicit_padding: bool { + methods: { + /// Set whether to always emit explicit padding fields. + /// + /// This option should be enabled if a `struct` needs to be serialized in its native + /// format (padding bytes and all). This could be required if such `struct` will be + /// written to a file or sent over the network, as anything reading the padding bytes + /// of a struct may cause undefined behavior. + /// + /// Padding fields are not emitted by default. + pub fn explicit_padding(mut self, doit: bool) -> Self { + self.options.force_explicit_padding = doit; + self + } + }, + as_args: "--explicit-padding", + }, + /// Whether to emit vtable functions. + vtable_generation: bool { + methods: { + /// Set whether to enable experimental support to generate virtual table functions. + /// + /// This option should mostly work, though some edge cases are likely to be broken. + /// + /// Virtual table generation is disabled by default. + pub fn vtable_generation(mut self, doit: bool) -> Self { + self.options.vtable_generation = doit; + self + } + }, + as_args: "--vtable-generation", + }, + /// Whether to sort the generated Rust items. + sort_semantically: bool { + methods: { + /// Set whether to sort the generated Rust items in a predefined manner. + /// + /// Items are not ordered by default. + pub fn sort_semantically(mut self, doit: bool) -> Self { + self.options.sort_semantically = doit; + self + } + }, + as_args: "--sort-semantically", + }, + /// Whether to deduplicate `extern` blocks. + merge_extern_blocks: bool { + methods: { + /// Merge all extern blocks under the same module into a single one. + /// + /// Extern blocks are not merged by default. + pub fn merge_extern_blocks(mut self, doit: bool) -> Self { + self.options.merge_extern_blocks = doit; + self + } + }, + as_args: "--merge-extern-blocks", + }, + /// Whether to wrap unsafe operations in unsafe blocks. + wrap_unsafe_ops: bool { + methods: { + /// Wrap all unsafe operations in unsafe blocks. + /// + /// Unsafe operations are not wrapped by default. + pub fn wrap_unsafe_ops(mut self, doit: bool) -> Self { + self.options.wrap_unsafe_ops = doit; + self + } + }, + as_args: "--wrap-unsafe-ops", + }, + /// Use DSTs to represent structures with flexible array members. + flexarray_dst: bool { + methods: { + /// Use DSTs to represent structures with flexible array members. + /// + /// This option is disabled by default. + pub fn flexarray_dst(mut self, doit: bool) -> Self { + self.options.flexarray_dst = doit; + self + } + }, + as_args: "--flexarray-dst", + }, + /// Patterns for functions whose ABI should be overridden. + abi_overrides: HashMap { + methods: { + regex_option! { + /// Override the ABI of a given function. + pub fn override_abi>(mut self, abi: Abi, arg: T) -> Self { + self.options + .abi_overrides + .entry(abi) + .or_default() + .insert(arg.into()); + self + } + } + }, + as_args: |overrides, args| { + for (abi, set) in overrides { + for item in set.get_items() { + args.push("--override-abi".to_owned()); + args.push(format!("{item}={abi}")); + } + } + }, + }, + /// Whether to generate wrappers for `static` functions. + wrap_static_fns: bool { + methods: { + /// Set whether to generate wrappers for `static`` functions. + /// + /// Passing `true` to this method will generate a C source file with non-`static` + /// functions that call the `static` functions found in the input headers and can be + /// called from Rust once the source file is compiled. + /// + /// The path of this source file can be set using the [`Builder::wrap_static_fns_path`] + /// method. + pub fn wrap_static_fns(mut self, doit: bool) -> Self { + self.options.wrap_static_fns = doit; + self + } + }, + as_args: "--wrap-static-fns", + }, + /// The suffix to be added to the function wrappers for `static` functions. + wrap_static_fns_suffix: Option { + methods: { + /// Set the suffix added to the wrappers for `static` functions. + /// + /// This option only comes into effect if `true` is passed to the + /// [`Builder::wrap_static_fns`] method. + /// + /// The default suffix is `__extern`. + pub fn wrap_static_fns_suffix>(mut self, suffix: T) -> Self { + self.options.wrap_static_fns_suffix = Some(suffix.as_ref().to_owned()); + self + } + }, + as_args: "--wrap-static-fns-suffix", + }, + /// The path of the file where the wrappers for `static` functions will be emitted. + wrap_static_fns_path: Option { + methods: { + /// Set the path for the source code file that would be created if any wrapper + /// functions must be generated due to the presence of `static` functions. + /// + /// `bindgen` will automatically add the right extension to the header and source code + /// files. + /// + /// This option only comes into effect if `true` is passed to the + /// [`Builder::wrap_static_fns`] method. + /// + /// The default path is `temp_dir/bindgen/extern`, where `temp_dir` is the path + /// returned by [`std::env::temp_dir`] . + pub fn wrap_static_fns_path>(mut self, path: T) -> Self { + self.options.wrap_static_fns_path = Some(path.as_ref().to_owned()); + self + } + }, + as_args: "--wrap-static-fns-path", + }, + /// Default visibility of fields. + default_visibility: FieldVisibilityKind { + methods: { + /// Set the default visibility of fields, including bitfields and accessor methods for + /// bitfields. + /// + /// This option only comes into effect if the [`Builder::respect_cxx_access_specs`] + /// option is disabled. + pub fn default_visibility( + mut self, + visibility: FieldVisibilityKind, + ) -> Self { + self.options.default_visibility = visibility; + self + } + }, + as_args: |visibility, args| { + if *visibility != Default::default() { + args.push("--default-visibility".to_owned()); + args.push(visibility.to_string()); + } + }, + }, + /// Whether to emit diagnostics or not. + emit_diagnostics: bool { + methods: { + #[cfg(feature = "experimental")] + /// Emit diagnostics. + /// + /// These diagnostics are emitted to `stderr` if you are using `bindgen-cli` or printed + /// using `cargo:warning=` if you are using `bindgen` as a `build-dependency`. + /// + /// Diagnostics are not emitted by default. + /// + /// The layout and contents of these diagnostic messages are not covered by versioning + /// and can change without notice. + pub fn emit_diagnostics(mut self) -> Self { + self.options.emit_diagnostics = true; + self + } + }, + as_args: "--emit-diagnostics", + }, + /// Whether to use Clang evaluation on temporary files as a fallback for macros that fail to + /// parse. + clang_macro_fallback: bool { + methods: { + /// Use Clang as a fallback for macros that fail to parse using `CExpr`. + /// + /// This uses a workaround to evaluate each macro in a temporary file. Because this + /// results in slower compilation, this option is opt-in. + pub fn clang_macro_fallback(mut self) -> Self { + self.options.clang_macro_fallback = true; + self + } + }, + as_args: "--clang-macro-fallback", + } + /// Path to use for temporary files created by clang macro fallback code like precompiled + /// headers. + clang_macro_fallback_build_dir: Option { + methods: { + /// Set a path to a directory to which `.c` and `.h.pch` files should be written for the + /// purpose of using clang to evaluate macros that can't be easily parsed. + /// + /// The default location for `.h.pch` files is the directory that the corresponding + /// `.h` file is located in. The default for the temporary `.c` file used for clang + /// parsing is the current working directory. Both of these defaults are overridden + /// by this option. + pub fn clang_macro_fallback_build_dir>(mut self, path: P) -> Self { + self.options.clang_macro_fallback_build_dir = Some(path.as_ref().to_owned()); + self + } + }, + as_args: "--clang-macro-fallback-build-dir", + } + /// Whether to always report C++ "deleted" functions. + generate_deleted_functions: bool { + methods: { + /// Set whether to generate C++ functions even marked "=deleted" + /// + /// Although not useful to call these functions, downstream code + /// generators may need to know whether they've been deleted in + /// order to determine the relocatability of a C++ type + /// (specifically by virtue of which constructors exist.) + pub fn generate_deleted_functions(mut self, doit: bool) -> Self { + self.options.generate_deleted_functions = doit; + self + } + + }, + as_args: "--generate-deleted-functions", + }, + /// Whether to always report C++ "pure virtual" functions. + generate_pure_virtual_functions: bool { + methods: { + /// Set whether to generate C++ functions that are pure virtual. + /// + /// These functions can't be called, so the only reason + /// to generate them is if downstream postprocessors + /// need to know of their existence. This is necessary, + /// for instance, to determine whether a type itself is + /// pure virtual and thus can't be allocated. + /// Downstream code generators may choose to make code to + /// allow types to be allocated but need to avoid doing so + /// if the type contains pure virtual functions. + pub fn generate_pure_virtual_functions(mut self, doit: bool) -> Self { + self.options.generate_pure_virtual_functions = doit; + self + } + + }, + as_args: "--generate-pure-virtual-functions", + }, + /// Whether to always report C++ "private" functions. + generate_private_functions: bool { + methods: { + /// Set whether to generate C++ functions that are private. + /// + /// These functions can't be called, so the only reason + /// to generate them is if downstream postprocessors + /// need to know of their existence. + pub fn generate_private_functions(mut self, doit: bool) -> Self { + self.options.generate_private_functions = doit; + self + } + + }, + as_args: "--generate-private-functions", + }, +} diff --git a/vendor/bindgen/parse.rs b/vendor/bindgen/parse.rs new file mode 100644 index 00000000000000..d29b090fcb6a84 --- /dev/null +++ b/vendor/bindgen/parse.rs @@ -0,0 +1,41 @@ +//! Common traits and types related to parsing our IR from Clang cursors. +#![deny(clippy::missing_docs_in_private_items)] + +use crate::clang; +use crate::ir::context::{BindgenContext, ItemId}; + +/// Not so much an error in the traditional sense, but a control flow message +/// when walking over Clang's AST with a cursor. +#[derive(Debug)] +pub(crate) enum ParseError { + /// Recurse down the current AST node's children. + Recurse, + /// Continue on to the next sibling AST node, or back up to the parent's + /// siblings if we've exhausted all of this node's siblings (and so on). + Continue, +} + +/// The result of parsing a Clang AST node. +#[derive(Debug)] +pub(crate) enum ParseResult { + /// We've already resolved this item before, here is the extant `ItemId` for + /// it. + AlreadyResolved(ItemId), + + /// This is a newly parsed item. If the cursor is `Some`, it points to the + /// AST node where the new `T` was declared. + New(T, Option), +} + +/// An intermediate representation "sub-item" (i.e. one of the types contained +/// inside an `ItemKind` variant) that can be parsed from a Clang cursor. +pub(crate) trait ClangSubItemParser: Sized { + /// Attempt to parse this type from the given cursor. + /// + /// The fact that is a reference guarantees it's held by the context, and + /// allow returning already existing types. + fn parse( + cursor: clang::Cursor, + context: &mut BindgenContext, + ) -> Result, ParseError>; +} diff --git a/vendor/bindgen/regex_set.rs b/vendor/bindgen/regex_set.rs new file mode 100644 index 00000000000000..32279557b535a0 --- /dev/null +++ b/vendor/bindgen/regex_set.rs @@ -0,0 +1,199 @@ +//! A type that represents the union of a set of regular expressions. +#![deny(clippy::missing_docs_in_private_items)] + +use regex::RegexSet as RxSet; +use std::cell::Cell; + +/// A dynamic set of regular expressions. +#[derive(Clone, Debug, Default)] +pub(crate) struct RegexSet { + items: Vec>, + /// Whether any of the items in the set was ever matched. The length of this + /// vector is exactly the length of `items`. + matched: Vec>, + set: Option, + /// Whether we should record matching items in the `matched` vector or not. + record_matches: bool, +} + +impl RegexSet { + /// Is this set empty? + pub(crate) fn is_empty(&self) -> bool { + self.items.is_empty() + } + + /// Insert a new regex into this set. + pub(crate) fn insert(&mut self, string: S) + where + S: AsRef, + { + self.items.push(string.as_ref().to_owned().into_boxed_str()); + self.matched.push(Cell::new(false)); + self.set = None; + } + + /// Returns slice of String from its field 'items' + pub(crate) fn get_items(&self) -> &[Box] { + &self.items + } + + /// Returns an iterator over regexes in the set which didn't match any + /// strings yet. + pub(crate) fn unmatched_items(&self) -> impl Iterator { + self.items.iter().enumerate().filter_map(move |(i, item)| { + if !self.record_matches || self.matched[i].get() { + return None; + } + + Some(item.as_ref()) + }) + } + + /// Construct a `RegexSet` from the set of entries we've accumulated. + /// + /// Must be called before calling `matches()`, or it will always return + /// false. + #[inline] + #[allow(unused)] + pub(crate) fn build(&mut self, record_matches: bool) { + self.build_inner(record_matches, None); + } + + #[cfg(all(feature = "__cli", feature = "experimental"))] + /// Construct a `RegexSet` from the set of entries we've accumulated and emit diagnostics if the + /// name of the regex set is passed to it. + /// + /// Must be called before calling `matches()`, or it will always return + /// false. + #[inline] + pub(crate) fn build_with_diagnostics( + &mut self, + record_matches: bool, + name: Option<&'static str>, + ) { + self.build_inner(record_matches, name); + } + + #[cfg(all(not(feature = "__cli"), feature = "experimental"))] + /// Construct a RegexSet from the set of entries we've accumulated and emit diagnostics if the + /// name of the regex set is passed to it. + /// + /// Must be called before calling `matches()`, or it will always return + /// false. + #[inline] + pub(crate) fn build_with_diagnostics( + &mut self, + record_matches: bool, + name: Option<&'static str>, + ) { + self.build_inner(record_matches, name); + } + + fn build_inner( + &mut self, + record_matches: bool, + _name: Option<&'static str>, + ) { + let items = self.items.iter().map(|item| format!("^({item})$")); + self.record_matches = record_matches; + self.set = match RxSet::new(items) { + Ok(x) => Some(x), + Err(e) => { + warn!("Invalid regex in {:?}: {e:?}", self.items); + #[cfg(feature = "experimental")] + if let Some(name) = _name { + invalid_regex_warning(self, e, name); + } + None + } + } + } + + /// Does the given `string` match any of the regexes in this set? + pub(crate) fn matches(&self, string: S) -> bool + where + S: AsRef, + { + let s = string.as_ref(); + let Some(ref set) = self.set else { + return false; + }; + + if !self.record_matches { + return set.is_match(s); + } + + let matches = set.matches(s); + if !matches.matched_any() { + return false; + } + for i in &matches { + self.matched[i].set(true); + } + + true + } +} + +#[cfg(feature = "experimental")] +fn invalid_regex_warning( + set: &RegexSet, + err: regex::Error, + name: &'static str, +) { + use crate::diagnostics::{Diagnostic, Level, Slice}; + + let mut diagnostic = Diagnostic::default(); + + match err { + regex::Error::Syntax(string) => { + if string.starts_with("regex parse error:\n") { + let mut source = String::new(); + + let mut parsing_source = true; + + for line in string.lines().skip(1) { + if parsing_source { + if line.starts_with(' ') { + source.push_str(line); + source.push('\n'); + continue; + } + parsing_source = false; + } + let error = "error: "; + if line.starts_with(error) { + let (_, msg) = line.split_at(error.len()); + diagnostic.add_annotation(msg.to_owned(), Level::Error); + } else { + diagnostic.add_annotation(line.to_owned(), Level::Info); + } + } + let mut slice = Slice::default(); + slice.with_source(source); + diagnostic.add_slice(slice); + + diagnostic.with_title( + "Error while parsing a regular expression.", + Level::Warning, + ); + } else { + diagnostic.with_title(string, Level::Warning); + } + } + err => { + let err = err.to_string(); + diagnostic.with_title(err, Level::Warning); + } + } + + diagnostic.add_annotation( + format!("This regular expression was passed via `{name}`."), + Level::Note, + ); + + if set.items.iter().any(|item| item.as_ref() == "*") { + diagnostic.add_annotation("Wildcard patterns \"*\" are no longer considered valid. Use \".*\" instead.", Level::Help); + } + diagnostic.display(); +} diff --git a/vendor/bindgen/time.rs b/vendor/bindgen/time.rs new file mode 100644 index 00000000000000..2952e36f760c2a --- /dev/null +++ b/vendor/bindgen/time.rs @@ -0,0 +1,52 @@ +use std::io::{self, Write}; +use std::time::{Duration, Instant}; + +/// RAII timer to measure how long phases take. +#[derive(Debug)] +pub struct Timer<'a> { + output: bool, + name: &'a str, + start: Instant, +} + +impl<'a> Timer<'a> { + /// Creates a Timer with the given name, and starts it. By default, + /// will print to stderr when it is `drop`'d + pub fn new(name: &'a str) -> Self { + Timer { + output: true, + name, + start: Instant::now(), + } + } + + /// Sets whether or not the Timer will print a message + /// when it is dropped. + pub fn with_output(mut self, output: bool) -> Self { + self.output = output; + self + } + + /// Returns the time elapsed since the timer's creation + pub fn elapsed(&self) -> Duration { + self.start.elapsed() + } + + fn print_elapsed(&mut self) { + if self.output { + let elapsed = self.elapsed(); + let time = (elapsed.as_secs() as f64) * 1e3 + + f64::from(elapsed.subsec_nanos()) / 1e6; + let stderr = io::stderr(); + // Arbitrary output format, subject to change. + writeln!(stderr.lock(), " time: {time:>9.3} ms.\t{}", self.name) + .expect("timer write should not fail"); + } + } +} + +impl Drop for Timer<'_> { + fn drop(&mut self) { + self.print_elapsed(); + } +} diff --git a/vendor/bitflags/.cargo-checksum.json b/vendor/bitflags/.cargo-checksum.json new file mode 100644 index 00000000000000..734a5dc6fed4a9 --- /dev/null +++ b/vendor/bitflags/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"886ab20f7366d7decd3716d19aff27f3ddde9684f5c71acca1684d867e692235","CHANGELOG.md":"648bc400e8387d19c7170890bb6e45207d63cb3149f5591936b317ac7952bbb9","CODE_OF_CONDUCT.md":"42634d0f6d922f49857175af991802822f7f920487aefa2ee250a50d12251a66","CONTRIBUTING.md":"6c9f96eacb20af877ae2d16f024904f3038b93448a8488e9dbcac0df7f6439a5","Cargo.lock":"eb3583e00fadd27f10c93df9fb63695ca1889436cebb4c8b58414243cdda9d59","Cargo.toml":"1d496ea35bdd5b8e3ee00cfa6fd515d89842c793c3e86f450f8c963b5b3a84eb","Cargo.toml.orig":"25266ca314ead26f44356315628a4136adfefdcaf7bd86e15c3d55903bda7c6d","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"b3d42f34936fd897dc891094f0dbbfb5f41f7255f48a3fe2470b1b420b6235d6","SECURITY.md":"68704c8128fa2e776ed7cbda741fbf61ad52f998a96350ee7ee4dbf64c6573bc","benches/parse.rs":"f1390d62322c6880d65bd931e183d49b313f287879a6bfaa36b1cb1921090b51","examples/custom_bits_type.rs":"e53b32051adc5d97860e0b48c8f3a301a041d73b4939c0d7caa5f0cfcc0b9739","examples/custom_derive.rs":"730589695eb68dda21d0d9f69e90cbdbf9823b13d6f16c5f22b0083c00981813","examples/fmt.rs":"87ba37a1fb8528570c74ea26d8e8948e1179c3d867b928bea1080880258e0a99","examples/macro_free.rs":"69e7f284b53b5214d51228a686e87f127b52a3b74711e45537ebfa5583a180e5","examples/serde.rs":"dfc7cd50232c6763f7cd05b4089ef9408db9368ee42c3fd5c116ff424810a2b0","spec.md":"f0657642c7cf470e6d6e55362aaab224b3df0f22cb7796b109bb41687acea8b1","src/example_generated.rs":"d018caf059f6ffc4c2403b771a6d76679fa5af03c329a91bd9252957df695e7f","src/external.rs":"59e962382560a5362953dd4396a09ba78e3f4446ef46c6a153a9cadb1c329506","src/external/arbitrary.rs":"43908bb4fe0a076078dcb3fa70c654aaed8c7b38aa66574414165a82037def83","src/external/bytemuck.rs":"3afcef382122867040fddd5e4153d633d1ed5596fe5d7dfac66a8e61c2513df5","src/external/serde.rs":"0f7339036f41cd93f29b21b954bc1f0fd747762f7a4f20d4ebfc848b20584dc8","src/internal.rs":"645b13af0c7302258df61239073a4b8203d09f27b6c17f8a6f1f8c3e427f5334","src/iter.rs":"18db983a501b02c71fda1301a9c020322bd684fe043ccccc8221a706a53c1f31","src/lib.rs":"bf13327fb5ee4fd149acb95c0f007dc1222b8e028217800ffbdf1dd70e176e13","src/parser.rs":"4e788b29f5d0542c409a8b43c703bcb4a6c2a57c181cadd17f565f0abb39681e","src/public.rs":"2a695651626cf7442cc83e52c410e01ceeb50902345e2a85988ad27c951287ac","src/tests.rs":"8e480dc78bd29bbb62cfaf62c3c8f779b39f96edc1e83f230a353296bfb4ffff","src/tests/all.rs":"e99a865cd4271a524c2fe95503e96d851b35990570aed6fb2e9dac7a14da31b6","src/tests/bitflags_match.rs":"601ad186930908b681f24312132000518fc927ba569d394e5c4440462f037aec","src/tests/bits.rs":"3840c34b2ea5d1802404b9ce5bcc1d3fa6ccd8dfba2e29e6d07c605f817d90df","src/tests/clear.rs":"6976fcda2f3367c8219485d33bd5d754da6769770cf164c12baace010ad7686d","src/tests/complement.rs":"d0e6d4c3daf49e0a7438c9f1c1ac91fad1b37f258c03593f6cd6a695ee626f5e","src/tests/contains.rs":"58bb3cb8c86550e775d11134da1d4aca85c83f943ea454e3a5f222772c674a24","src/tests/difference.rs":"d0d2b96bb52658b8ac019210da74ca75a53e76622f668855142ea6e97c28cb0e","src/tests/empty.rs":"817d6e93ced7cb7576ff0e334aa1a44703f3f96871ff2c6bdcb8f207e6551f67","src/tests/eq.rs":"b816767680a029e9c163e37af074dd4e604c4a3e4936f829f0ca3774fd5f0e37","src/tests/extend.rs":"5fabb9fd0254c64da019149c24063fceff72da3eb4ad73b57c1cc4c04b008364","src/tests/flags.rs":"2f48d3a25db1cf66fe98c9959abc70875deb9f7b38b2c278dc70c46e0d4ec277","src/tests/fmt.rs":"a2d4148491f3202f030f63633eee941b741e3be29a68cf376f008dbe5cb11e5c","src/tests/from_bits.rs":"d94c65b88bf89961d0cfc1b3152a7f1acc285bae160a1628438effda11b8e2c1","src/tests/from_bits_retain.rs":"980591dfaf91e940f42d9a1ce890f237514dd59d458fc264abcf9ceabbc40677","src/tests/from_bits_truncate.rs":"d3406b5e107ebb6449b98a59eee6cc5d84f947d4aaee1ee7e80dc7202de179f0","src/tests/from_name.rs":"f4a055d1f3c86decef70ef8f3020cef5c4e229718c20b3d59d5a3abc3a8b1298","src/tests/insert.rs":"3fab5da800a6fc0654dfb5f859f95da65a507eb9fda8695083c2712266dff0b9","src/tests/intersection.rs":"baf1454c9e4eba552264870a556ee0032d9f2bb8cac361833d571235e0b52221","src/tests/intersects.rs":"c55e36179fd8bc636f04ea9bbce346dcaafe57915d13f1df28c5b83117dbd08e","src/tests/is_all.rs":"b2f11faa7c954bd85c8fb39999e0c37d983cf7895152bc13c7ddde106aa33b6d","src/tests/is_empty.rs":"11f21323cdca7ff92dd89e09de667dba69e8dce88e2d3e27ea68ace91d15d070","src/tests/iter.rs":"db96736e94686f4c66c012b20e4059fc3e61205feda8b4f1ad7aa16615071c18","src/tests/parser.rs":"fa2fb8dedcf16601af609a5e21d9c5840c7f96a1e3a587f7f2ea3dc8387f7628","src/tests/remove.rs":"6e75f8508d2dc1a2cba89ef691f4387a665a4fd13853bb1dd0fd80c783b89947","src/tests/symmetric_difference.rs":"0a89f084f9de1dd5b1932fe72c3b10a3c93cbaa16832b3a31b6a85e3bbd3ba6e","src/tests/truncate.rs":"683430af4a0e47ec73c737a6908fac5d851bed7c41d47c73a642e96d966aa5ae","src/tests/union.rs":"88f398ee4600bb1e59bf6d02d1f6ff33f5f853eab5a6c700bd8a683c6ee4651a","src/tests/unknown.rs":"fa9e8ee461f176c0d892cde487fef0fe66df2aa5906aaef21b093102e590f5f5","src/traits.rs":"c8757d4f5aa26ac2c2c154bd293f647d0722ac65e977fb9d19f41c83798cae40"},"package":"812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3"} \ No newline at end of file diff --git a/vendor/bitflags/.cargo_vcs_info.json b/vendor/bitflags/.cargo_vcs_info.json new file mode 100644 index 00000000000000..c4d1f9e9636e47 --- /dev/null +++ b/vendor/bitflags/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "7cc8595e93d04d180d39e2f25242dca85dd71228" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/bitflags/CHANGELOG.md b/vendor/bitflags/CHANGELOG.md new file mode 100644 index 00000000000000..b03810ae3a79a3 --- /dev/null +++ b/vendor/bitflags/CHANGELOG.md @@ -0,0 +1,636 @@ +# 2.10.0 + +## What's Changed +* Implement iterator for all named flags by @ssrlive in https://github.com/bitflags/bitflags/pull/465 +* Depend on serde_core instead of serde by @KodrAus in https://github.com/bitflags/bitflags/pull/467 + +## New Contributors +* @ssrlive made their first contribution in https://github.com/bitflags/bitflags/pull/465 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.9.4...2.10.0 + +# 2.9.4 + +## What's Changed +* Add Cargo features to readme by @KodrAus in https://github.com/bitflags/bitflags/pull/460 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.9.3...2.9.4 + +# 2.9.3 + +## What's Changed +* Streamline generated code by @nnethercote in https://github.com/bitflags/bitflags/pull/458 + +## New Contributors +* @nnethercote made their first contribution in https://github.com/bitflags/bitflags/pull/458 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.9.2...2.9.3 + +# 2.9.2 + +## What's Changed +* Fix difference in the spec by @KodrAus in https://github.com/bitflags/bitflags/pull/446 +* Fix up inaccurate docs on bitflags_match by @KodrAus in https://github.com/bitflags/bitflags/pull/453 +* Remove rustc internal crate feature by @KodrAus in https://github.com/bitflags/bitflags/pull/454 + + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.9.1...2.9.2 + +# 2.9.1 + +## What's Changed +* Document Cargo features by @KodrAus in https://github.com/bitflags/bitflags/pull/444 + + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.9.0...2.9.1 + +# 2.9.0 + +## What's Changed +* `Flags` trait: add `clear(&mut self)` method by @wysiwys in https://github.com/bitflags/bitflags/pull/437 +* Fix up UI tests by @KodrAus in https://github.com/bitflags/bitflags/pull/438 + + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.8.0...2.9.0 + +# 2.8.0 + +## What's Changed +* feat(core): Add bitflags_match macro for bitflag matching by @YuniqueUnic in https://github.com/bitflags/bitflags/pull/423 +* Finalize bitflags_match by @KodrAus in https://github.com/bitflags/bitflags/pull/431 + +## New Contributors +* @YuniqueUnic made their first contribution in https://github.com/bitflags/bitflags/pull/423 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.7.0...2.8.0 + +# 2.7.0 + +## What's Changed +* Fix `clippy::doc_lazy_continuation` lints by @waywardmonkeys in https://github.com/bitflags/bitflags/pull/414 +* Run clippy on extra features in CI. by @waywardmonkeys in https://github.com/bitflags/bitflags/pull/415 +* Fix CI: trybuild refresh, allow some clippy restrictions. by @waywardmonkeys in https://github.com/bitflags/bitflags/pull/417 +* Update zerocopy version in example by @KodrAus in https://github.com/bitflags/bitflags/pull/422 +* Add method to check if unknown bits are set by @wysiwys in https://github.com/bitflags/bitflags/pull/426 +* Update error messages by @KodrAus in https://github.com/bitflags/bitflags/pull/427 +* Add `truncate(&mut self)` method to unset unknown bits by @wysiwys in https://github.com/bitflags/bitflags/pull/428 +* Update error messages by @KodrAus in https://github.com/bitflags/bitflags/pull/429 + +## New Contributors +* @wysiwys made their first contribution in https://github.com/bitflags/bitflags/pull/426 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.6.0...2.7.0 + +# 2.6.0 + +## What's Changed +* Sync CHANGELOG.md with github release notes by @dextero in https://github.com/bitflags/bitflags/pull/402 +* Update error messages and zerocopy by @KodrAus in https://github.com/bitflags/bitflags/pull/403 +* Bump minimum declared versions of dependencies by @dextero in https://github.com/bitflags/bitflags/pull/404 +* chore(deps): bump serde_derive and bytemuck versions by @joshka in https://github.com/bitflags/bitflags/pull/405 +* add OSFF Scorecard workflow by @KodrAus in https://github.com/bitflags/bitflags/pull/396 +* Update stderr messages by @KodrAus in https://github.com/bitflags/bitflags/pull/408 +* Fix typo by @waywardmonkeys in https://github.com/bitflags/bitflags/pull/410 +* Allow specifying outer attributes in impl mode by @KodrAus in https://github.com/bitflags/bitflags/pull/411 + +## New Contributors +* @dextero made their first contribution in https://github.com/bitflags/bitflags/pull/402 +* @joshka made their first contribution in https://github.com/bitflags/bitflags/pull/405 +* @waywardmonkeys made their first contribution in https://github.com/bitflags/bitflags/pull/410 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.5.0...2.6.0 + +# 2.5.0 + +## What's Changed +* Derive `Debug` for `Flag` by @tgross35 in https://github.com/bitflags/bitflags/pull/398 +* Support truncating or strict-named variants of parsing and formatting by @KodrAus in https://github.com/bitflags/bitflags/pull/400 + +## New Contributors +* @tgross35 made their first contribution in https://github.com/bitflags/bitflags/pull/398 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.4.2...2.5.0 + +# 2.4.2 + +## What's Changed +* Cargo.toml: Anchor excludes to root of the package by @jamessan in https://github.com/bitflags/bitflags/pull/387 +* Update error messages by @KodrAus in https://github.com/bitflags/bitflags/pull/390 +* Add support for impl mode structs to be repr(packed) by @GnomedDev in https://github.com/bitflags/bitflags/pull/388 +* Remove old `unused_tuple_struct_fields` lint by @dtolnay in https://github.com/bitflags/bitflags/pull/393 +* Delete use of `local_inner_macros` by @dtolnay in https://github.com/bitflags/bitflags/pull/392 + +## New Contributors +* @jamessan made their first contribution in https://github.com/bitflags/bitflags/pull/387 +* @GnomedDev made their first contribution in https://github.com/bitflags/bitflags/pull/388 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.4.1...2.4.2 + +# 2.4.1 + +## What's Changed +* Allow some new pedantic clippy lints by @KodrAus in https://github.com/bitflags/bitflags/pull/380 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.4.0...2.4.1 + +# 2.4.0 + +## What's Changed +* Remove html_root_url by @eldruin in https://github.com/bitflags/bitflags/pull/368 +* Support unnamed flags by @KodrAus in https://github.com/bitflags/bitflags/pull/371 +* Update smoke test to verify all Clippy and rustc lints by @MitMaro in https://github.com/bitflags/bitflags/pull/374 +* Specify the behavior of bitflags by @KodrAus in https://github.com/bitflags/bitflags/pull/369 + +## New Contributors +* @eldruin made their first contribution in https://github.com/bitflags/bitflags/pull/368 +* @MitMaro made their first contribution in https://github.com/bitflags/bitflags/pull/374 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.3.3...2.4.0 + +# 2.3.3 + +## Changes to `-=` + +The `-=` operator was incorrectly changed to truncate bits that didn't correspond to valid flags in `2.3.0`. This has +been fixed up so it once again behaves the same as `-` and `difference`. + +## Changes to `!` + +The `!` operator previously called `Self::from_bits_truncate`, which would truncate any bits that only partially +overlapped with a valid flag. It will now use `bits & Self::all().bits()`, so any bits that overlap any bits +specified by any flag will be respected. This is unlikely to have any practical implications, but enables defining +a flag like `const ALL = !0` as a way to signal that any bit pattern is a known set of flags. + +## Changes to formatting + +Zero-valued flags will never be printed. You'll either get `0x0` for empty flags using debug formatting, or the +set of flags with zero-valued flags omitted for others. + +Composite flags will no longer be redundantly printed if there are extra bits to print at the end that don't correspond +to a valid flag. + +## What's Changed +* Fix up incorrect sub assign behavior and other cleanups by @KodrAus in https://github.com/bitflags/bitflags/pull/366 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.3.2...2.3.3 + +# 2.3.2 + +## What's Changed +* [doc] [src/lib.rs] delete redundant path prefix by @OccupyMars2025 in https://github.com/bitflags/bitflags/pull/361 + +## New Contributors +* @OccupyMars2025 made their first contribution in https://github.com/bitflags/bitflags/pull/361 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.3.1...2.3.2 + +# 2.3.1 + +## What's Changed +* Fix Self in flags value expressions by @KodrAus in https://github.com/bitflags/bitflags/pull/355 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.3.0...2.3.1 + +# 2.3.0 + +## Major changes + +### `BitFlags` trait deprecated in favor of `Flags` trait + +This release introduces the `Flags` trait and deprecates the `BitFlags` trait. These two traits are semver compatible so if you have public API code depending on `BitFlags` you can move to `Flags` without breaking end-users. This is possible because the `BitFlags` trait was never publicly implementable, so it now carries `Flags` as a supertrait. All implementations of `Flags` additionally implement `BitFlags`. + +The `Flags` trait is a publicly implementable version of the old `BitFlags` trait. The original `BitFlags` trait carried some macro baggage that made it difficult to implement, so a new `Flags` trait has been introduced as the _One True Trait_ for interacting with flags types generically. See the the `macro_free` and `custom_derive` examples for more details. + +### `Bits` trait publicly exposed + +The `Bits` trait for the underlying storage of flags values is also now publicly implementable. This lets you define your own exotic backing storage for flags. See the `custom_bits_type` example for more details. + +## What's Changed +* Use explicit hashes for actions steps by @KodrAus in https://github.com/bitflags/bitflags/pull/350 +* Support ejecting flags types from the bitflags macro by @KodrAus in https://github.com/bitflags/bitflags/pull/351 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.2.1...2.3.0 + +# 2.2.1 + +## What's Changed +* Refactor attribute filtering to apply per-flag by @KodrAus in https://github.com/bitflags/bitflags/pull/345 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.2.0...2.2.1 + +# 2.2.0 + +## What's Changed +* Create SECURITY.md by @KodrAus in https://github.com/bitflags/bitflags/pull/338 +* add docs to describe the behavior of multi-bit flags by @nicholasbishop in https://github.com/bitflags/bitflags/pull/340 +* Add support for bytemuck by @KodrAus in https://github.com/bitflags/bitflags/pull/336 +* Add a top-level macro for filtering attributes by @KodrAus in https://github.com/bitflags/bitflags/pull/341 + +## New Contributors +* @nicholasbishop made their first contribution in https://github.com/bitflags/bitflags/pull/340 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.1.0...2.2.0 + +# 2.1.0 + +## What's Changed +* Add docs for the internal Field0 and examples of formatting/parsing by @KodrAus in https://github.com/bitflags/bitflags/pull/328 +* Add support for arbitrary by @KodrAus in https://github.com/bitflags/bitflags/pull/324 +* Fix up missing docs for consts within consts by @KodrAus in https://github.com/bitflags/bitflags/pull/330 +* Ignore clippy lint in generated code by @Jake-Shadle in https://github.com/bitflags/bitflags/pull/331 + +## New Contributors +* @Jake-Shadle made their first contribution in https://github.com/bitflags/bitflags/pull/331 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.0.2...2.1.0 + +# 2.0.2 + +## What's Changed +* Fix up missing isize and usize Bits impls by @KodrAus in https://github.com/bitflags/bitflags/pull/321 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.0.1...2.0.2 + +# 2.0.1 + +## What's Changed +* Fix up some docs issues by @KodrAus in https://github.com/bitflags/bitflags/pull/309 +* Make empty_flag() const. by @tormeh in https://github.com/bitflags/bitflags/pull/313 +* Fix formatting of multi-bit flags with partial overlap by @KodrAus in https://github.com/bitflags/bitflags/pull/316 + +## New Contributors +* @tormeh made their first contribution in https://github.com/bitflags/bitflags/pull/313 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.0.0...2.0.1 + +# 2.0.0 + +## Major changes + +This release includes some major changes over `1.x`. If you use `bitflags!` types in your public API then upgrading this library may cause breakage in your downstream users. + +### ⚠️ Serialization + +You'll need to add the `serde` Cargo feature in order to `#[derive(Serialize, Deserialize)]` on your generated flags types: + +```rust +bitflags! { + #[derive(Serialize, Deserialize)] + #[serde(transparent)] + pub struct Flags: T { + .. + } +} +``` + +where `T` is the underlying bits type you're using, such as `u32`. + +The default serialization format with `serde` **has changed** if you `#[derive(Serialize, Deserialize)]` on your generated flags types. It will now use a formatted string for human-readable formats and the underlying bits type for compact formats. + +To keep the old format, see the https://github.com/KodrAus/bitflags-serde-legacy library. + +### ⚠️ Traits + +Generated flags types now derive fewer traits. If you need to maintain backwards compatibility, you can derive the following yourself: + +```rust +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)] +``` + +### ⚠️ Methods + +The unsafe `from_bits_unchecked` method is now a safe `from_bits_retain` method. + +You can add the following method to your generated types to keep them compatible: + +```rust +#[deprecated = "use the safe `from_bits_retain` method instead"] +pub unsafe fn from_bits_unchecked(bits: T) -> Self { + Self::from_bits_retain(bits) +} +``` + +where `T` is the underlying bits type you're using, such as `u32`. + +### ⚠️ `.bits` field + +You can now use the `.bits()` method instead of the old `.bits`. + +The representation of generated flags types has changed from a struct with the single field `bits` to a newtype. + +## What's Changed +* Fix a typo and call out MSRV bump by @KodrAus in https://github.com/bitflags/bitflags/pull/259 +* BitFlags trait by @arturoc in https://github.com/bitflags/bitflags/pull/220 +* Add a hidden trait to discourage manual impls of BitFlags by @KodrAus in https://github.com/bitflags/bitflags/pull/261 +* Sanitize `Ok` by @konsumlamm in https://github.com/bitflags/bitflags/pull/266 +* Fix bug in `Debug` implementation by @konsumlamm in https://github.com/bitflags/bitflags/pull/268 +* Fix a typo in the generated documentation by @wackbyte in https://github.com/bitflags/bitflags/pull/271 +* Use SPDX license format by @atouchet in https://github.com/bitflags/bitflags/pull/272 +* serde tests fail in CI by @arturoc in https://github.com/bitflags/bitflags/pull/277 +* Fix beta test output by @KodrAus in https://github.com/bitflags/bitflags/pull/279 +* Add example to the README.md file by @tiaanl in https://github.com/bitflags/bitflags/pull/270 +* Iterator over all the enabled options by @arturoc in https://github.com/bitflags/bitflags/pull/278 +* from_bits_(truncate) fail with composite flags by @arturoc in https://github.com/bitflags/bitflags/pull/276 +* Add more platform coverage to CI by @KodrAus in https://github.com/bitflags/bitflags/pull/280 +* rework the way cfgs are handled by @KodrAus in https://github.com/bitflags/bitflags/pull/281 +* Split generated code into two types by @KodrAus in https://github.com/bitflags/bitflags/pull/282 +* expose bitflags iters using nameable types by @KodrAus in https://github.com/bitflags/bitflags/pull/286 +* Support creating flags from their names by @KodrAus in https://github.com/bitflags/bitflags/pull/287 +* Update README.md by @KodrAus in https://github.com/bitflags/bitflags/pull/288 +* Prepare for 2.0.0-rc.1 release by @KodrAus in https://github.com/bitflags/bitflags/pull/289 +* Add missing "if" to contains doc-comment in traits.rs by @rusty-snake in https://github.com/bitflags/bitflags/pull/291 +* Forbid unsafe_code by @fintelia in https://github.com/bitflags/bitflags/pull/294 +* serde: enable no-std support by @nim65s in https://github.com/bitflags/bitflags/pull/296 +* Add a parser for flags formatted as bar-separated-values by @KodrAus in https://github.com/bitflags/bitflags/pull/297 +* Prepare for 2.0.0-rc.2 release by @KodrAus in https://github.com/bitflags/bitflags/pull/299 +* Use strip_prefix instead of starts_with + slice by @QuinnPainter in https://github.com/bitflags/bitflags/pull/301 +* Fix up some clippy lints by @KodrAus in https://github.com/bitflags/bitflags/pull/302 +* Prepare for 2.0.0-rc.3 release by @KodrAus in https://github.com/bitflags/bitflags/pull/303 +* feat: Add minimum permissions to rust.yml workflow by @gabibguti in https://github.com/bitflags/bitflags/pull/305 + +## New Contributors +* @wackbyte made their first contribution in https://github.com/bitflags/bitflags/pull/271 +* @atouchet made their first contribution in https://github.com/bitflags/bitflags/pull/272 +* @tiaanl made their first contribution in https://github.com/bitflags/bitflags/pull/270 +* @rusty-snake made their first contribution in https://github.com/bitflags/bitflags/pull/291 +* @fintelia made their first contribution in https://github.com/bitflags/bitflags/pull/294 +* @nim65s made their first contribution in https://github.com/bitflags/bitflags/pull/296 +* @QuinnPainter made their first contribution in https://github.com/bitflags/bitflags/pull/301 +* @gabibguti made their first contribution in https://github.com/bitflags/bitflags/pull/305 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/1.3.2...2.0.0 + +# 2.0.0-rc.3 + +## What's Changed +* Use strip_prefix instead of starts_with + slice by @QuinnPainter in https://github.com/bitflags/bitflags/pull/301 +* Fix up some clippy lints by @KodrAus in https://github.com/bitflags/bitflags/pull/302 + +## New Contributors +* @QuinnPainter made their first contribution in https://github.com/bitflags/bitflags/pull/301 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.0.0-rc.2...2.0.0-rc.3 + +# 2.0.0-rc.2 + +## Changes to `serde` serialization + +**⚠️ NOTE ⚠️** This release changes the default serialization you'll get if you `#[derive(Serialize, Deserialize)]` +on your generated flags types. It will now use a formatted string for human-readable formats and the underlying bits +type for compact formats. + +To keep the old behavior, see the [`bitflags-serde-legacy`](https://github.com/KodrAus/bitflags-serde-legacy) library. + +## What's Changed + +* Add missing "if" to contains doc-comment in traits.rs by @rusty-snake in https://github.com/bitflags/bitflags/pull/291 +* Forbid unsafe_code by @fintelia in https://github.com/bitflags/bitflags/pull/294 +* serde: enable no-std support by @nim65s in https://github.com/bitflags/bitflags/pull/296 +* Add a parser for flags formatted as bar-separated-values by @KodrAus in https://github.com/bitflags/bitflags/pull/297 + +## New Contributors +* @rusty-snake made their first contribution in https://github.com/bitflags/bitflags/pull/291 +* @fintelia made their first contribution in https://github.com/bitflags/bitflags/pull/294 +* @nim65s made their first contribution in https://github.com/bitflags/bitflags/pull/296 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.0.0-rc.1...2.0.0-rc.2 + +# 2.0.0-rc.1 + +This is a big release including a few years worth of work on a new `BitFlags` trait, iteration, and better macro organization for future extensibility. + +## What's Changed +* Fix a typo and call out MSRV bump by @KodrAus in https://github.com/bitflags/bitflags/pull/259 +* BitFlags trait by @arturoc in https://github.com/bitflags/bitflags/pull/220 +* Add a hidden trait to discourage manual impls of BitFlags by @KodrAus in https://github.com/bitflags/bitflags/pull/261 +* Sanitize `Ok` by @konsumlamm in https://github.com/bitflags/bitflags/pull/266 +* Fix bug in `Debug` implementation by @konsumlamm in https://github.com/bitflags/bitflags/pull/268 +* Fix a typo in the generated documentation by @wackbyte in https://github.com/bitflags/bitflags/pull/271 +* Use SPDX license format by @atouchet in https://github.com/bitflags/bitflags/pull/272 +* serde tests fail in CI by @arturoc in https://github.com/bitflags/bitflags/pull/277 +* Fix beta test output by @KodrAus in https://github.com/bitflags/bitflags/pull/279 +* Add example to the README.md file by @tiaanl in https://github.com/bitflags/bitflags/pull/270 +* Iterator over all the enabled options by @arturoc in https://github.com/bitflags/bitflags/pull/278 +* from_bits_(truncate) fail with composite flags by @arturoc in https://github.com/bitflags/bitflags/pull/276 +* Add more platform coverage to CI by @KodrAus in https://github.com/bitflags/bitflags/pull/280 +* rework the way cfgs are handled by @KodrAus in https://github.com/bitflags/bitflags/pull/281 +* Split generated code into two types by @KodrAus in https://github.com/bitflags/bitflags/pull/282 +* expose bitflags iters using nameable types by @KodrAus in https://github.com/bitflags/bitflags/pull/286 +* Support creating flags from their names by @KodrAus in https://github.com/bitflags/bitflags/pull/287 +* Update README.md by @KodrAus in https://github.com/bitflags/bitflags/pull/288 + +## New Contributors +* @wackbyte made their first contribution in https://github.com/bitflags/bitflags/pull/271 +* @atouchet made their first contribution in https://github.com/bitflags/bitflags/pull/272 +* @tiaanl made their first contribution in https://github.com/bitflags/bitflags/pull/270 + +**Full Changelog**: https://github.com/bitflags/bitflags/compare/1.3.2...2.0.0-rc.1 + +# 1.3.2 + +- Allow `non_snake_case` in generated flags types ([#256]) + +[#256]: https://github.com/bitflags/bitflags/pull/256 + +# 1.3.1 + +- Revert unconditional `#[repr(transparent)]` ([#252]) + +[#252]: https://github.com/bitflags/bitflags/pull/252 + +# 1.3.0 (yanked) + +**This release bumps the Minimum Supported Rust Version to `1.46.0`** + +- Add `#[repr(transparent)]` ([#187]) + +- End `empty` doc comment with full stop ([#202]) + +- Fix typo in crate root docs ([#206]) + +- Document from_bits_unchecked unsafety ([#207]) + +- Let `is_all` ignore extra bits ([#211]) + +- Allows empty flag definition ([#225]) + +- Making crate accessible from std ([#227]) + +- Make `from_bits` a const fn ([#229]) + +- Allow multiple bitflags structs in one macro invocation ([#235]) + +- Add named functions to perform set operations ([#244]) + +- Fix typos in method docs ([#245]) + +- Modernization of the `bitflags` macro to take advantage of newer features and 2018 idioms ([#246]) + +- Fix regression (in an unreleased feature) and simplify tests ([#247]) + +- Use `Self` and fix bug when overriding `stringify!` ([#249]) + +[#187]: https://github.com/bitflags/bitflags/pull/187 +[#202]: https://github.com/bitflags/bitflags/pull/202 +[#206]: https://github.com/bitflags/bitflags/pull/206 +[#207]: https://github.com/bitflags/bitflags/pull/207 +[#211]: https://github.com/bitflags/bitflags/pull/211 +[#225]: https://github.com/bitflags/bitflags/pull/225 +[#227]: https://github.com/bitflags/bitflags/pull/227 +[#229]: https://github.com/bitflags/bitflags/pull/229 +[#235]: https://github.com/bitflags/bitflags/pull/235 +[#244]: https://github.com/bitflags/bitflags/pull/244 +[#245]: https://github.com/bitflags/bitflags/pull/245 +[#246]: https://github.com/bitflags/bitflags/pull/246 +[#247]: https://github.com/bitflags/bitflags/pull/247 +[#249]: https://github.com/bitflags/bitflags/pull/249 + +# 1.2.1 + +- Remove extraneous `#[inline]` attributes ([#194]) + +[#194]: https://github.com/bitflags/bitflags/pull/194 + +# 1.2.0 + +- Fix typo: {Lower, Upper}Exp - {Lower, Upper}Hex ([#183]) + +- Add support for "unknown" bits ([#188]) + +[#183]: https://github.com/rust-lang-nursery/bitflags/pull/183 +[#188]: https://github.com/rust-lang-nursery/bitflags/pull/188 + +# 1.1.0 + +This is a re-release of `1.0.5`, which was yanked due to a bug in the RLS. + +# 1.0.5 + +- Use compiletest_rs flags supported by stable toolchain ([#171]) + +- Put the user provided attributes first ([#173]) + +- Make bitflags methods `const` on newer compilers ([#175]) + +[#171]: https://github.com/rust-lang-nursery/bitflags/pull/171 +[#173]: https://github.com/rust-lang-nursery/bitflags/pull/173 +[#175]: https://github.com/rust-lang-nursery/bitflags/pull/175 + +# 1.0.4 + +- Support Rust 2018 style macro imports ([#165]) + + ```rust + use bitflags::bitflags; + ``` + +[#165]: https://github.com/rust-lang-nursery/bitflags/pull/165 + +# 1.0.3 + +- Improve zero value flag handling and documentation ([#157]) + +[#157]: https://github.com/rust-lang-nursery/bitflags/pull/157 + +# 1.0.2 + +- 30% improvement in compile time of bitflags crate ([#156]) + +- Documentation improvements ([#153]) + +- Implementation cleanup ([#149]) + +[#156]: https://github.com/rust-lang-nursery/bitflags/pull/156 +[#153]: https://github.com/rust-lang-nursery/bitflags/pull/153 +[#149]: https://github.com/rust-lang-nursery/bitflags/pull/149 + +# 1.0.1 +- Add support for `pub(restricted)` specifier on the bitflags struct ([#135]) +- Optimize performance of `all()` when called from a separate crate ([#136]) + +[#135]: https://github.com/rust-lang-nursery/bitflags/pull/135 +[#136]: https://github.com/rust-lang-nursery/bitflags/pull/136 + +# 1.0.0 +- **[breaking change]** Macro now generates [associated constants](https://doc.rust-lang.org/reference/items.html#associated-constants) ([#24]) + +- **[breaking change]** Minimum supported version is Rust **1.20**, due to usage of associated constants + +- After being broken in 0.9, the `#[deprecated]` attribute is now supported again ([#112]) + +- Other improvements to unit tests and documentation ([#106] and [#115]) + +[#24]: https://github.com/rust-lang-nursery/bitflags/pull/24 +[#106]: https://github.com/rust-lang-nursery/bitflags/pull/106 +[#112]: https://github.com/rust-lang-nursery/bitflags/pull/112 +[#115]: https://github.com/rust-lang-nursery/bitflags/pull/115 + +## How to update your code to use associated constants +Assuming the following structure definition: +```rust +bitflags! { + struct Something: u8 { + const FOO = 0b01, + const BAR = 0b10 + } +} +``` +In 0.9 and older you could do: +```rust +let x = FOO.bits | BAR.bits; +``` +Now you must use: +```rust +let x = Something::FOO.bits | Something::BAR.bits; +``` + +# 0.9.1 +- Fix the implementation of `Formatting` traits when other formatting traits were present in scope ([#105]) + +[#105]: https://github.com/rust-lang-nursery/bitflags/pull/105 + +# 0.9.0 +- **[breaking change]** Use struct keyword instead of flags to define bitflag types ([#84]) + +- **[breaking change]** Terminate const items with semicolons instead of commas ([#87]) + +- Implement the `Hex`, `Octal`, and `Binary` formatting traits ([#86]) + +- Printing an empty flag value with the `Debug` trait now prints "(empty)" instead of nothing ([#85]) + +- The `bitflags!` macro can now be used inside of a fn body, to define a type local to that function ([#74]) + +[#74]: https://github.com/rust-lang-nursery/bitflags/pull/74 +[#84]: https://github.com/rust-lang-nursery/bitflags/pull/84 +[#85]: https://github.com/rust-lang-nursery/bitflags/pull/85 +[#86]: https://github.com/rust-lang-nursery/bitflags/pull/86 +[#87]: https://github.com/rust-lang-nursery/bitflags/pull/87 + +# 0.8.2 +- Update feature flag used when building bitflags as a dependency of the Rust toolchain + +# 0.8.1 +- Allow bitflags to be used as a dependency of the Rust toolchain + +# 0.8.0 +- Add support for the experimental `i128` and `u128` integer types ([#57]) +- Add set method: `flags.set(SOME_FLAG, true)` or `flags.set(SOME_FLAG, false)` ([#55]) + This may break code that defines its own set method + +[#55]: https://github.com/rust-lang-nursery/bitflags/pull/55 +[#57]: https://github.com/rust-lang-nursery/bitflags/pull/57 + +# 0.7.1 +*(yanked)* + +# 0.7.0 +- Implement the Extend trait ([#49]) +- Allow definitions inside the `bitflags!` macro to refer to items imported from other modules ([#51]) + +[#49]: https://github.com/rust-lang-nursery/bitflags/pull/49 +[#51]: https://github.com/rust-lang-nursery/bitflags/pull/51 + +# 0.6.0 +- The `no_std` feature was removed as it is now the default +- The `assignment_operators` feature was remove as it is now enabled by default +- Some clippy suggestions have been applied diff --git a/vendor/bitflags/CODE_OF_CONDUCT.md b/vendor/bitflags/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000000..f7add90ae35556 --- /dev/null +++ b/vendor/bitflags/CODE_OF_CONDUCT.md @@ -0,0 +1,73 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +education, socio-economic status, nationality, personal appearance, race, +religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at coc@senaite.org. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org \ No newline at end of file diff --git a/vendor/bitflags/CONTRIBUTING.md b/vendor/bitflags/CONTRIBUTING.md new file mode 100644 index 00000000000000..588336398290c4 --- /dev/null +++ b/vendor/bitflags/CONTRIBUTING.md @@ -0,0 +1,9 @@ +# Updating compile-fail test outputs + +`bitflags` uses the `trybuild` crate to integration test its macros. Since Rust error messages change frequently enough that `nightly` builds produce spurious failures, we only check the compiler output in `beta` builds. If you run: + +``` +TRYBUILD=overwrite cargo +beta test --all +``` + +it will run the tests and update the `trybuild` output files. diff --git a/vendor/bitflags/Cargo.lock b/vendor/bitflags/Cargo.lock new file mode 100644 index 00000000000000..23ecff1378e608 --- /dev/null +++ b/vendor/bitflags/Cargo.lock @@ -0,0 +1,325 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "arbitrary" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" +dependencies = [ + "derive_arbitrary", +] + +[[package]] +name = "bitflags" +version = "2.10.0" +dependencies = [ + "arbitrary", + "bytemuck", + "rustversion", + "serde", + "serde_core", + "serde_json", + "serde_test", + "trybuild", + "zerocopy", +] + +[[package]] +name = "bytemuck" +version = "1.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9abbd1bc6865053c427f7198e6af43bfdedc55ab791faed4fbd361d789575ff" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "derive_arbitrary" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" + +[[package]] +name = "indexmap" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "proc-macro2" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.145" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", + "serde_core", +] + +[[package]] +name = "serde_spanned" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392" +dependencies = [ + "serde_core", +] + +[[package]] +name = "serde_test" +version = "1.0.177" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f901ee573cab6b3060453d2d5f0bae4e6d628c23c0a962ff9b5f1d7c8d4f1ed" +dependencies = [ + "serde", +] + +[[package]] +name = "syn" +version = "2.0.107" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a26dbd934e5451d21ef060c018dae56fc073894c5a7896f882928a76e6d081b" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "target-triple" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ac9aa371f599d22256307c24a9d748c041e548cbf599f35d890f9d365361790" + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "toml" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" +dependencies = [ + "indexmap", + "serde_core", + "serde_spanned", + "toml_datetime", + "toml_parser", + "toml_writer", + "winnow", +] + +[[package]] +name = "toml_datetime" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_parser" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +dependencies = [ + "winnow", +] + +[[package]] +name = "toml_writer" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" + +[[package]] +name = "trybuild" +version = "1.0.112" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d66678374d835fe847e0dc8348fde2ceb5be4a7ec204437d8367f0d8df266a5" +dependencies = [ + "glob", + "serde", + "serde_derive", + "serde_json", + "target-triple", + "termcolor", + "toml", +] + +[[package]] +name = "unicode-ident" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "winnow" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" + +[[package]] +name = "zerocopy" +version = "0.8.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/vendor/bitflags/Cargo.toml b/vendor/bitflags/Cargo.toml new file mode 100644 index 00000000000000..f950e7e3d7c4cb --- /dev/null +++ b/vendor/bitflags/Cargo.toml @@ -0,0 +1,120 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.56.0" +name = "bitflags" +version = "2.10.0" +authors = ["The Rust Project Developers"] +build = false +exclude = [ + "/tests", + "/.github", +] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +A macro to generate structures which behave like bitflags. +""" +homepage = "https://github.com/bitflags/bitflags" +documentation = "https://docs.rs/bitflags" +readme = "README.md" +keywords = [ + "bit", + "bitmask", + "bitflags", + "flags", +] +categories = ["no-std"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/bitflags/bitflags" + +[package.metadata.docs.rs] +features = ["example_generated"] + +[features] +example_generated = [] +serde = ["serde_core"] +std = [] + +[lib] +name = "bitflags" +path = "src/lib.rs" + +[[example]] +name = "custom_bits_type" +path = "examples/custom_bits_type.rs" + +[[example]] +name = "custom_derive" +path = "examples/custom_derive.rs" + +[[example]] +name = "fmt" +path = "examples/fmt.rs" + +[[example]] +name = "macro_free" +path = "examples/macro_free.rs" + +[[example]] +name = "serde" +path = "examples/serde.rs" + +[[bench]] +name = "parse" +path = "benches/parse.rs" + +[dependencies.arbitrary] +version = "1.0" +optional = true + +[dependencies.bytemuck] +version = "1.12" +optional = true + +[dependencies.serde_core] +version = "1.0.228" +optional = true +default-features = false + +[dev-dependencies.arbitrary] +version = "1.0" +features = ["derive"] + +[dev-dependencies.bytemuck] +version = "1.12.2" +features = ["derive"] + +[dev-dependencies.rustversion] +version = "1.0" + +[dev-dependencies.serde_json] +version = "1.0" + +[dev-dependencies.serde_lib] +version = "1.0.103" +features = ["derive"] +package = "serde" + +[dev-dependencies.serde_test] +version = "1.0.19" + +[dev-dependencies.trybuild] +version = "1.0.18" + +[dev-dependencies.zerocopy] +version = "0.8" +features = ["derive"] diff --git a/vendor/bitflags/LICENSE-APACHE b/vendor/bitflags/LICENSE-APACHE new file mode 100644 index 00000000000000..16fe87b06e802f --- /dev/null +++ b/vendor/bitflags/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/bitflags/LICENSE-MIT b/vendor/bitflags/LICENSE-MIT new file mode 100644 index 00000000000000..39d4bdb5acd313 --- /dev/null +++ b/vendor/bitflags/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/bitflags/README.md b/vendor/bitflags/README.md new file mode 100644 index 00000000000000..5f8a1be35804ff --- /dev/null +++ b/vendor/bitflags/README.md @@ -0,0 +1,88 @@ +bitflags +======== + +[![Rust](https://github.com/bitflags/bitflags/workflows/Rust/badge.svg)](https://github.com/bitflags/bitflags/actions) +[![Latest version](https://img.shields.io/crates/v/bitflags.svg)](https://crates.io/crates/bitflags) +[![Documentation](https://docs.rs/bitflags/badge.svg)](https://docs.rs/bitflags) +![License](https://img.shields.io/crates/l/bitflags.svg) + +`bitflags` generates flags enums with well-defined semantics and ergonomic end-user APIs. + +You can use `bitflags` to: + +- provide more user-friendly bindings to C APIs where flags may or may not be fully known in advance. +- generate efficient options types with string parsing and formatting support. + +You can't use `bitflags` to: + +- guarantee only bits corresponding to defined flags will ever be set. `bitflags` allows access to the underlying bits type so arbitrary bits may be set. +- define bitfields. `bitflags` only generates types where set bits denote the presence of some combination of flags. + +- [Documentation](https://docs.rs/bitflags) +- [Specification](https://github.com/bitflags/bitflags/blob/main/spec.md) +- [Release notes](https://github.com/bitflags/bitflags/releases) + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +bitflags = "2.10.0" +``` + +and this to your source code: + +```rust +use bitflags::bitflags; +``` + +## Example + +Generate a flags structure: + +```rust +use bitflags::bitflags; + +// The `bitflags!` macro generates `struct`s that manage a set of flags. +bitflags! { + /// Represents a set of flags. + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + struct Flags: u32 { + /// The value `A`, at bit position `0`. + const A = 0b00000001; + /// The value `B`, at bit position `1`. + const B = 0b00000010; + /// The value `C`, at bit position `2`. + const C = 0b00000100; + + /// The combination of `A`, `B`, and `C`. + const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); + } +} + +fn main() { + let e1 = Flags::A | Flags::C; + let e2 = Flags::B | Flags::C; + assert_eq!((e1 | e2), Flags::ABC); // union + assert_eq!((e1 & e2), Flags::C); // intersection + assert_eq!((e1 - e2), Flags::A); // set difference + assert_eq!(!e2, Flags::A); // set complement +} +``` + +## Cargo features + +The `bitflags` library defines a few Cargo features that you can opt-in to: + +- `std`: Implement the `Error` trait on error types used by `bitflags`. +- `serde`: Support deriving `serde` traits on generated flags types. +- `arbitrary`: Support deriving `arbitrary` traits on generated flags types. +- `bytemuck`: Support deriving `bytemuck` traits on generated flags types. + +Also see [`bitflags_derive`](https://github.com/bitflags/bitflags-derive) for other flags-aware traits. + +## Rust Version Support + +The minimum supported Rust version is documented in the `Cargo.toml` file. +This may be bumped in minor releases as necessary. diff --git a/vendor/bitflags/SECURITY.md b/vendor/bitflags/SECURITY.md new file mode 100644 index 00000000000000..790ac5b59debde --- /dev/null +++ b/vendor/bitflags/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/bitflags/bitflags/security/advisories/new). + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, please give us at least 90 days to work on a fix before public exposure. diff --git a/vendor/bitflags/benches/parse.rs b/vendor/bitflags/benches/parse.rs new file mode 100644 index 00000000000000..caa9203451a12e --- /dev/null +++ b/vendor/bitflags/benches/parse.rs @@ -0,0 +1,96 @@ +#![feature(test)] + +extern crate test; + +use std::{ + fmt::{self, Display}, + str::FromStr, +}; + +bitflags::bitflags! { + struct Flags10: u32 { + const A = 0b0000_0000_0000_0001; + const B = 0b0000_0000_0000_0010; + const C = 0b0000_0000_0000_0100; + const D = 0b0000_0000_0000_1000; + const E = 0b0000_0000_0001_0000; + const F = 0b0000_0000_0010_0000; + const G = 0b0000_0000_0100_0000; + const H = 0b0000_0000_1000_0000; + const I = 0b0000_0001_0000_0000; + const J = 0b0000_0010_0000_0000; + } +} + +impl FromStr for Flags10 { + type Err = bitflags::parser::ParseError; + + fn from_str(flags: &str) -> Result { + Ok(Flags10(flags.parse()?)) + } +} + +impl Display for Flags10 { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Display::fmt(&self.0, f) + } +} + +#[bench] +fn format_flags_1_present(b: &mut test::Bencher) { + b.iter(|| Flags10::J.to_string()) +} + +#[bench] +fn format_flags_5_present(b: &mut test::Bencher) { + b.iter(|| (Flags10::F | Flags10::G | Flags10::H | Flags10::I | Flags10::J).to_string()) +} + +#[bench] +fn format_flags_10_present(b: &mut test::Bencher) { + b.iter(|| { + (Flags10::A + | Flags10::B + | Flags10::C + | Flags10::D + | Flags10::E + | Flags10::F + | Flags10::G + | Flags10::H + | Flags10::I + | Flags10::J) + .to_string() + }) +} + +#[bench] +fn parse_flags_1_10(b: &mut test::Bencher) { + b.iter(|| { + let flags: Flags10 = "J".parse().unwrap(); + flags + }) +} + +#[bench] +fn parse_flags_5_10(b: &mut test::Bencher) { + b.iter(|| { + let flags: Flags10 = "F | G | H | I | J".parse().unwrap(); + flags + }) +} + +#[bench] +fn parse_flags_10_10(b: &mut test::Bencher) { + b.iter(|| { + let flags: Flags10 = "A | B | C | D | E | F | G | H | I | J".parse().unwrap(); + flags + }) +} + +#[bench] +fn parse_flags_1_10_hex(b: &mut test::Bencher) { + b.iter(|| { + let flags: Flags10 = "0xFF".parse().unwrap(); + flags + }) +} diff --git a/vendor/bitflags/examples/custom_bits_type.rs b/vendor/bitflags/examples/custom_bits_type.rs new file mode 100644 index 00000000000000..8924bfdf31a6e4 --- /dev/null +++ b/vendor/bitflags/examples/custom_bits_type.rs @@ -0,0 +1,97 @@ +use std::ops::{BitAnd, BitOr, BitXor, Not}; + +use bitflags::{Bits, Flag, Flags}; + +// Define a custom container that can be used in flags types +// Note custom bits types can't be used in `bitflags!` +// without making the trait impls `const`. This is currently +// unstable +#[derive(Clone, Copy, Debug)] +pub struct CustomBits([bool; 3]); + +impl Bits for CustomBits { + const EMPTY: Self = CustomBits([false; 3]); + + const ALL: Self = CustomBits([true; 3]); +} + +impl PartialEq for CustomBits { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +impl BitAnd for CustomBits { + type Output = Self; + + fn bitand(self, other: Self) -> Self { + CustomBits([ + self.0[0] & other.0[0], + self.0[1] & other.0[1], + self.0[2] & other.0[2], + ]) + } +} + +impl BitOr for CustomBits { + type Output = Self; + + fn bitor(self, other: Self) -> Self { + CustomBits([ + self.0[0] | other.0[0], + self.0[1] | other.0[1], + self.0[2] | other.0[2], + ]) + } +} + +impl BitXor for CustomBits { + type Output = Self; + + fn bitxor(self, other: Self) -> Self { + CustomBits([ + self.0[0] & other.0[0], + self.0[1] & other.0[1], + self.0[2] & other.0[2], + ]) + } +} + +impl Not for CustomBits { + type Output = Self; + + fn not(self) -> Self { + CustomBits([!self.0[0], !self.0[1], !self.0[2]]) + } +} + +#[derive(Clone, Copy, Debug)] +pub struct CustomFlags(CustomBits); + +impl CustomFlags { + pub const A: Self = CustomFlags(CustomBits([true, false, false])); + pub const B: Self = CustomFlags(CustomBits([false, true, false])); + pub const C: Self = CustomFlags(CustomBits([false, false, true])); +} + +impl Flags for CustomFlags { + const FLAGS: &'static [Flag] = &[ + Flag::new("A", Self::A), + Flag::new("B", Self::B), + Flag::new("C", Self::C), + ]; + + type Bits = CustomBits; + + fn bits(&self) -> Self::Bits { + self.0 + } + + fn from_bits_retain(bits: Self::Bits) -> Self { + CustomFlags(bits) + } +} + +fn main() { + println!("{:?}", CustomFlags::A.union(CustomFlags::C)); +} diff --git a/vendor/bitflags/examples/custom_derive.rs b/vendor/bitflags/examples/custom_derive.rs new file mode 100644 index 00000000000000..ba26723f0c4c41 --- /dev/null +++ b/vendor/bitflags/examples/custom_derive.rs @@ -0,0 +1,23 @@ +//! An example of implementing the `BitFlags` trait manually for a flags type. + +use std::str; + +use bitflags::bitflags; + +// Define a flags type outside of the `bitflags` macro as a newtype +// It can accept custom derives for libraries `bitflags` doesn't support natively +#[derive(zerocopy::IntoBytes, zerocopy::FromBytes, zerocopy::KnownLayout, zerocopy::Immutable)] +#[repr(transparent)] +pub struct ManualFlags(u32); + +// Next: use `impl Flags` instead of `struct Flags` +bitflags! { + impl ManualFlags: u32 { + const A = 0b00000001; + const B = 0b00000010; + const C = 0b00000100; + const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); + } +} + +fn main() {} diff --git a/vendor/bitflags/examples/fmt.rs b/vendor/bitflags/examples/fmt.rs new file mode 100644 index 00000000000000..724b2074cf0c85 --- /dev/null +++ b/vendor/bitflags/examples/fmt.rs @@ -0,0 +1,49 @@ +//! An example of implementing Rust's standard formatting and parsing traits for flags types. + +use core::{fmt, str}; + +bitflags::bitflags! { + // You can `#[derive]` the `Debug` trait, but implementing it manually + // can produce output like `A | B` instead of `Flags(A | B)`. + // #[derive(Debug)] + #[derive(PartialEq, Eq)] + pub struct Flags: u32 { + const A = 1; + const B = 2; + const C = 4; + const D = 8; + } +} + +impl fmt::Debug for Flags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + bitflags::parser::to_writer(self, f) + } +} + +impl fmt::Display for Flags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + bitflags::parser::to_writer(self, f) + } +} + +impl str::FromStr for Flags { + type Err = bitflags::parser::ParseError; + + fn from_str(flags: &str) -> Result { + bitflags::parser::from_str(flags) + } +} + +fn main() -> Result<(), bitflags::parser::ParseError> { + let flags = Flags::A | Flags::B; + + println!("{}", flags); + + let formatted = flags.to_string(); + let parsed: Flags = formatted.parse()?; + + assert_eq!(flags, parsed); + + Ok(()) +} diff --git a/vendor/bitflags/examples/macro_free.rs b/vendor/bitflags/examples/macro_free.rs new file mode 100644 index 00000000000000..7563379005c813 --- /dev/null +++ b/vendor/bitflags/examples/macro_free.rs @@ -0,0 +1,61 @@ +//! An example of implementing the `BitFlags` trait manually for a flags type. +//! +//! This example doesn't use any macros. + +use std::{fmt, str}; + +use bitflags::{Flag, Flags}; + +// First: Define your flags type. It just needs to be `Sized + 'static`. +pub struct ManualFlags(u32); + +// Not required: Define some constants for valid flags +impl ManualFlags { + pub const A: ManualFlags = ManualFlags(0b00000001); + pub const B: ManualFlags = ManualFlags(0b00000010); + pub const C: ManualFlags = ManualFlags(0b00000100); + pub const ABC: ManualFlags = ManualFlags(0b00000111); +} + +// Next: Implement the `BitFlags` trait, specifying your set of valid flags +// and iterators +impl Flags for ManualFlags { + const FLAGS: &'static [Flag] = &[ + Flag::new("A", Self::A), + Flag::new("B", Self::B), + Flag::new("C", Self::C), + ]; + + type Bits = u32; + + fn bits(&self) -> u32 { + self.0 + } + + fn from_bits_retain(bits: u32) -> Self { + Self(bits) + } +} + +// Not required: Add parsing support +impl str::FromStr for ManualFlags { + type Err = bitflags::parser::ParseError; + + fn from_str(input: &str) -> Result { + bitflags::parser::from_str(input) + } +} + +// Not required: Add formatting support +impl fmt::Display for ManualFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + bitflags::parser::to_writer(self, f) + } +} + +fn main() { + println!( + "{}", + ManualFlags::A.union(ManualFlags::B).union(ManualFlags::C) + ); +} diff --git a/vendor/bitflags/examples/serde.rs b/vendor/bitflags/examples/serde.rs new file mode 100644 index 00000000000000..3b72e1a81ef422 --- /dev/null +++ b/vendor/bitflags/examples/serde.rs @@ -0,0 +1,39 @@ +//! An example of implementing `serde::Serialize` and `serde::Deserialize`. +//! The `#[serde(transparent)]` attribute is recommended to serialize directly +//! to the underlying bits type without wrapping it in a `serde` newtype. + +#[cfg(feature = "serde")] +fn main() { + use serde_lib::*; + + bitflags::bitflags! { + #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] + #[serde(transparent)] + // NOTE: We alias the `serde` crate as `serde_lib` in this repository, + // but you don't need to do this + #[serde(crate = "serde_lib")] + pub struct Flags: u32 { + const A = 1; + const B = 2; + const C = 4; + const D = 8; + } + } + + let flags = Flags::A | Flags::B; + + let serialized = serde_json::to_string(&flags).unwrap(); + + println!("{:?} -> {}", flags, serialized); + + assert_eq!(serialized, r#""A | B""#); + + let deserialized: Flags = serde_json::from_str(&serialized).unwrap(); + + println!("{} -> {:?}", serialized, flags); + + assert_eq!(deserialized, flags); +} + +#[cfg(not(feature = "serde"))] +fn main() {} diff --git a/vendor/bitflags/spec.md b/vendor/bitflags/spec.md new file mode 100644 index 00000000000000..10a55db8dd0d3e --- /dev/null +++ b/vendor/bitflags/spec.md @@ -0,0 +1,556 @@ +# Bitflags + +`bitflags` generates flags enums with well-defined semantics and ergonomic end-user APIs. + +You can use `bitflags` to: + +- provide more user-friendly bindings to C APIs where flags may or may not be fully known in advance. +- generate efficient options types with string parsing and formatting support. + +You can't use `bitflags` to: + +- guarantee only bits corresponding to defined flags will ever be set. `bitflags` allows access to the underlying bits type so arbitrary bits may be set. +- define bitfields. `bitflags` only generates types where set bits denote the presence of some combination of flags. + +## Definitions + +This section formally defines the terminology and semantics of `bitflags`. It's organized so more fundamental concepts are introduced before those that build on them. It may be helpful to start from the bottom of the section and refer back up to concepts defined earlier. + +Examples use `bitflags` syntax with `u8` as the bits type. + +### Bits type + +A type that defines a fixed number of bits at specific locations. + +---- + +Bits types are typically fixed-width unsigned integers. For example, `u8` is a bits type that defines 8 bits; bit-0 through bit-7. + +### Bits value + +An instance of a bits type where each bit may be set (`1`) or unset (`0`). + +---- + +Some examples of bits values for the bits type `u8` are: + +```rust +0b0000_0000 +0b1111_1111 +0b1010_0101 +``` + +#### Equality + +Two bits values are equal if their bits are in the same configuration; set bits in one are set in the other, and unset bits in one are unset in the other. + +#### Operations + +Bits values define the bitwise operators and (`&`), or (`|`), exclusive-or (`^`), and negation (`!`) that apply to each of their bits. + +### Flag + +A set of bits in a bits type that may have a unique name. + +---- + +Bits are not required to be exclusive to a flag. Bits are not required to be contiguous. + +The following is a flag for `u8` with the name `A` that includes bit-0: + +```rust +const A = 0b0000_0001; +``` + +The following is a flag for `u8` with the name `B` that includes bit-0, and bit-5: + +```rust +const B = 0b0010_0001; +``` + +#### Named flag + +A flag with a name. + +---- + +The following is a named flag, where the name is `A`: + +```rust +const A = 0b0000_0001; +``` + +#### Unnamed flag + +A flag without a name. + +---- + +The following is an unnamed flag: + +```rust +const _ = 0b0000_0001; +``` + +#### Zero-bit flag + +A flag with a set of zero bits. + +---- + +The following is a zero-bit flag: + +```rust +const ZERO = 0b0000_0000; +``` + +#### Single-bit flag + +A flag with a set of one bit. + +---- + +The following are single-bit flags: + +```rust +const A = 0b0000_0001; +const B = 0b0000_0010; +``` + +#### Multi-bit flag + +A flag with a set of more than one bit. + +---- + +The following are multi-bit flags: + +```rust +const A = 0b0000_0011; +const B = 0b1111_1111; +``` + +### Flags type + +A set of defined flags over a specific bits type. + +#### Known bit + +A bit in any defined flag. + +---- + +In the following flags type: + +```rust +struct Flags { + const A = 0b0000_0001; + const B = 0b0000_0010; + const C = 0b0000_0100; +} +``` + +the known bits are: + +```rust +0b0000_0111 +``` + +#### Unknown bit + +A bit not in any defined flag. + +---- + +In the following flags type: + +```rust +struct Flags { + const A = 0b0000_0001; + const B = 0b0000_0010; + const C = 0b0000_0100; +} +``` + +the unknown bits are: + +```rust +0b1111_1000 +``` + +### Flags value + +An instance of a flags type using its specific bits value for storage. + +The flags value of a flag is one where each of its bits is set, and all others are unset. + +#### Contains + +Whether all set bits in a source flags value are also set in a target flags value. + +---- + +Given the flags value: + +```rust +0b0000_0011 +``` + +the following flags values are contained: + +```rust +0b0000_0000 +0b0000_0010 +0b0000_0001 +0b0000_0011 +``` + +but the following flags values are not contained: + +```rust +0b0000_1000 +0b0000_0110 +``` + +#### Intersects + +Whether any set bits in a source flags value are also set in a target flags value. + +---- + +Given the flags value: + +```rust +0b0000_0011 +``` + +the following flags intersect: + +```rust +0b0000_0010 +0b0000_0001 +0b1111_1111 +``` + +but the following flags values do not intersect: + +```rust +0b0000_0000 +0b1111_0000 +``` + +#### Empty + +Whether all bits in a flags value are unset. + +---- + +The following flags value is empty: + +```rust +0b0000_0000 +``` + +The following flags values are not empty: + +```rust +0b0000_0001 +0b0110_0000 +``` + +#### All + +Whether all defined flags are contained in a flags value. + +---- + +Given a flags type: + +```rust +struct Flags { + const A = 0b0000_0001; + const B = 0b0000_0010; +} +``` + +the following flags values all satisfy all: + +```rust +0b0000_0011 +0b1000_0011 +0b1111_1111 +``` + +### Operations + +Examples in this section all use the given flags type: + +```rust +struct Flags { + const A = 0b0000_0001; + const B = 0b0000_0010; + const C = 0b0000_1100; +} +``` + +#### Truncate + +Unset all unknown bits in a flags value. + +---- + +Given the flags value: + +```rust +0b1111_1111 +``` + +the result of truncation will be: + +```rust +0b0000_1111 +``` + +---- + +Truncating doesn't guarantee that a non-empty result will contain any defined flags. Given the following flags type: + +```rust +struct Flags { + const A = 0b0000_0101; +} +``` + +and the following flags value: + +```rust +0b0000_1110; +``` + +The result of truncation will be: + +```rust +0b0000_0100; +``` + +which intersects the flag `A`, but doesn't contain it. + +This behavior is possible even when only operating with flags values containing defined flags. Given the following flags type: + +```rust +struct Flags { + const A = 0b0000_0101; + const B = 0b0000_0001; +} +``` + +The result of `A ^ B` is `0b0000_0100`, which also doesn't contain any defined flag. + +---- + +If all known bits are in the set of at least one defined single-bit flag, then all operations that produce non-empty results will always contain defined flags. + +#### Union + +The bitwise or (`|`) of the bits in two flags values. + +---- + +The following are examples of the result of unioning flags values: + +```rust +0b0000_0001 | 0b0000_0010 = 0b0000_0011 +0b0000_0000 | 0b1111_1111 = 0b1111_1111 +``` + +#### Intersection + +The bitwise and (`&`) of the bits in two flags values. + +---- + +The following are examples of the result of intersecting flags values: + +```rust +0b0000_0001 & 0b0000_0010 = 0b0000_0000 +0b1111_1100 & 0b1111_0111 = 0b1111_0100 +0b1111_1111 & 0b1111_1111 = 0b1111_1111 +``` + +#### Symmetric difference + +The bitwise exclusive-or (`^`) of the bits in two flags values. + +---- + +The following are examples of the symmetric difference between two flags values: + +```rust +0b0000_0001 ^ 0b0000_0010 = 0b0000_0011 +0b0000_1111 ^ 0b0000_0011 = 0b0000_1100 +0b1100_0000 ^ 0b0011_0000 = 0b1111_0000 +``` + +#### Complement + +The bitwise negation (`!`) of the bits in a flags value, truncating the result. + +---- + +The complement is the only operation that explicitly truncates its result, because it doesn't accept a second flags value as input and so is likely to set unknown bits. + +---- + +The following are examples of the complement of a flags value: + +```rust +!0b0000_0000 = 0b0000_1111 +!0b0000_1111 = 0b0000_0000 +!0b1111_1000 = 0b0000_0111 +``` + +#### Difference + +The bitwise intersection (`&`) of the bits in one flags value and the bitwise negation (`!`) of the bits in another. + +---- + +This operation is not equivalent to the intersection of one flags value with the complement of another (`&!`). +The former will truncate the result in the complement, where difference will not. + +---- + +The following are examples of the difference between two flags values: + +```rust +0b0000_0001 & !0b0000_0010 = 0b0000_0001 +0b0000_1101 & !0b0000_0011 = 0b0000_1100 +0b1111_1111 & !0b0000_0001 = 0b1111_1110 +``` + +### Iteration + +Yield the bits of a source flags value in a set of contained flags values. + +---- + +To be most useful, each yielded flags value should set exactly the bits of a defined flag contained in the source. Any known bits that aren't in the set of any contained flag should be yielded together as a final flags value. + +---- + +Given the following flags type: + +```rust +struct Flags { + const A = 0b0000_0001; + const B = 0b0000_0010; + const AB = 0b0000_0011; +} +``` + +and the following flags value: + +```rust +0b0000_1111 +``` + +When iterated it may yield a flags value for `A` and `B`, then a final flag with the unknown bits: + +```rust +0b0000_0001 +0b0000_0010 +0b0000_1100 +``` + +It may also yield a flags value for `AB`, then a final flag with the unknown bits: + +```rust +0b0000_0011 +0b0000_1100 +``` + +---- + +Given the following flags type: + +```rust +struct Flags { + const A = 0b0000_0011; +} +``` + +and the following flags value: + +```rust +0b0000_0001 +``` + +When iterated it will still yield a flags value for the known bit `0b0000_0001` even though it doesn't contain a flag. + +### Formatting + +Format and parse a flags value as text using the following grammar: + +- _Flags:_ (_Whitespace_ _Flag_ _Whitespace_)`|`* +- _Flag:_ _Name_ | _Hex Number_ +- _Name:_ The name of any defined flag +- _Hex Number_: `0x`([0-9a-fA-F])* +- _Whitespace_: (\s)* + +Flags values can be formatted as _Flags_ by iterating over them, formatting each yielded flags value as a _Flag_. Any yielded flags value that sets exactly the bits of a defined flag with a name should be formatted as a _Name_. Otherwise it must be formatted as a _Hex Number_. + +Formatting and parsing supports three modes: + +- **Retain**: Formatting and parsing roundtrips exactly the bits of the source flags value. This is the default behavior. +- **Truncate**: Flags values are truncated before formatting, and truncated after parsing. +- **Strict**: A _Flag_ may only be formatted and parsed as a _Name_. _Hex numbers_ are not allowed. A consequence of this is that unknown bits and any bits that aren't in a contained named flag will be ignored. This is recommended for flags values serialized across API boundaries, like web services. + +Text that is empty or whitespace is an empty flags value. + +---- + +Given the following flags type: + +```rust +struct Flags { + const A = 0b0000_0001; + const B = 0b0000_0010; + const AB = 0b0000_0011; + const C = 0b0000_1100; +} +``` + +The following are examples of how flags values can be formatted using any mode: + +```rust +0b0000_0000 = "" +0b0000_0001 = "A" +0b0000_0010 = "B" +0b0000_0011 = "A | B" +0b0000_0011 = "AB" +0b0000_1111 = "A | B | C" +``` + +Truncate mode will unset any unknown bits: + +```rust +0b1000_0000 = "" +0b1111_1111 = "A | B | C" +0b0000_1000 = "0x8" +``` + +Retain mode will include any unknown bits as a final _Flag_: + +```rust +0b1000_0000 = "0x80" +0b1111_1111 = "A | B | C | 0xf0" +0b0000_1000 = "0x8" +``` + +Strict mode will unset any unknown bits, as well as bits not contained in any defined named flags: + +```rust +0b1000_0000 = "" +0b1111_1111 = "A | B | C" +0b0000_1000 = "" +``` diff --git a/vendor/bitflags/src/example_generated.rs b/vendor/bitflags/src/example_generated.rs new file mode 100644 index 00000000000000..abb1118fa14a41 --- /dev/null +++ b/vendor/bitflags/src/example_generated.rs @@ -0,0 +1,65 @@ +//! This module shows an example of code generated by the macro. **IT MUST NOT BE USED OUTSIDE THIS +//! CRATE**. +//! +//! Usually, when you call the `bitflags!` macro, only the `Flags` type would be visible. In this +//! example, the `Field0`, `Iter`, and `IterRaw` types are also exposed so that you can explore +//! their APIs. The `Field0` type can be accessed as `self.0` on an instance of `Flags`. + +__declare_public_bitflags! { + /// This is the same `Flags` struct defined in the [crate level example](../index.html#example). + /// Note that this struct is just for documentation purposes only, it must not be used outside + /// this crate. + pub struct Flags +} + +__declare_internal_bitflags! { + pub struct Field0: u32 +} + +__impl_internal_bitflags! { + Field0: u32, Flags { + // Field `A`. + /// + /// This flag has the value `0b00000001`. + const A = 0b00000001; + /// Field `B`. + /// + /// This flag has the value `0b00000010`. + const B = 0b00000010; + /// Field `C`. + /// + /// This flag has the value `0b00000100`. + const C = 0b00000100; + const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); + } +} + +__impl_public_bitflags_forward! { + Flags: u32, Field0 +} + +__impl_public_bitflags_ops! { + Flags +} + +__impl_public_bitflags_iter! { + Flags: u32, Flags +} + +__impl_public_bitflags_consts! { + Flags: u32 { + /// Field `A`. + /// + /// This flag has the value `0b00000001`. + const A = 0b00000001; + /// Field `B`. + /// + /// This flag has the value `0b00000010`. + const B = 0b00000010; + /// Field `C`. + /// + /// This flag has the value `0b00000100`. + const C = 0b00000100; + const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); + } +} diff --git a/vendor/bitflags/src/external.rs b/vendor/bitflags/src/external.rs new file mode 100644 index 00000000000000..a60abec4c08df8 --- /dev/null +++ b/vendor/bitflags/src/external.rs @@ -0,0 +1,262 @@ +//! Conditional trait implementations for external libraries. + +/* +How do I support a new external library? + +Let's say we want to add support for `my_library`. + +First, we create a module under `external`, like `serde` with any specialized code. +Ideally, any utilities in here should just work off the `Flags` trait and maybe a +few other assumed bounds. + +Next, re-export the library from the `__private` module here. + +Next, define a macro like so: + +```rust +#[macro_export] +#[doc(hidden)] +#[cfg(feature = "serde")] +macro_rules! __impl_external_bitflags_my_library { + ( + $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:tt; + )* + } + ) => { + // Implementation goes here + }; +} + +#[macro_export] +#[doc(hidden)] +#[cfg(not(feature = "my_library"))] +macro_rules! __impl_external_bitflags_my_library { + ( + $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:tt; + )* + } + ) => {}; +} +``` + +Note that the macro is actually defined twice; once for when the `my_library` feature +is available, and once for when it's not. This is because the `__impl_external_bitflags_my_library` +macro is called in an end-user's library, not in `bitflags`. In an end-user's library we don't +know whether or not a particular feature of `bitflags` is enabled, so we unconditionally call +the macro, where the body of that macro depends on the feature flag. + +Now, we add our macro call to the `__impl_external_bitflags` macro body: + +```rust +__impl_external_bitflags_my_library! { + $InternalBitFlags: $T, $PublicBitFlags { + $( + $(#[$inner $($args)*])* + const $Flag; + )* + } +} +``` +*/ + +pub(crate) mod __private { + #[cfg(feature = "serde")] + pub use serde_core as serde; + + #[cfg(feature = "arbitrary")] + pub use arbitrary; + + #[cfg(feature = "bytemuck")] + pub use bytemuck; +} + +/// Implements traits from external libraries for the internal bitflags type. +#[macro_export] +#[doc(hidden)] +macro_rules! __impl_external_bitflags { + ( + $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:tt; + )* + } + ) => { + // Any new library traits impls should be added here + // Use `serde` as an example: generate code when the feature is available, + // and a no-op when it isn't + + $crate::__impl_external_bitflags_serde! { + $InternalBitFlags: $T, $PublicBitFlags { + $( + $(#[$inner $($args)*])* + const $Flag; + )* + } + } + + $crate::__impl_external_bitflags_arbitrary! { + $InternalBitFlags: $T, $PublicBitFlags { + $( + $(#[$inner $($args)*])* + const $Flag; + )* + } + } + + $crate::__impl_external_bitflags_bytemuck! { + $InternalBitFlags: $T, $PublicBitFlags { + $( + $(#[$inner $($args)*])* + const $Flag; + )* + } + } + }; +} + +#[cfg(feature = "serde")] +pub mod serde; + +/// Implement `Serialize` and `Deserialize` for the internal bitflags type. +#[macro_export] +#[doc(hidden)] +#[cfg(feature = "serde")] +macro_rules! __impl_external_bitflags_serde { + ( + $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:tt; + )* + } + ) => { + impl $crate::__private::serde::Serialize for $InternalBitFlags { + fn serialize( + &self, + serializer: S, + ) -> $crate::__private::core::result::Result { + $crate::serde::serialize( + &$PublicBitFlags::from_bits_retain(self.bits()), + serializer, + ) + } + } + + impl<'de> $crate::__private::serde::Deserialize<'de> for $InternalBitFlags { + fn deserialize>( + deserializer: D, + ) -> $crate::__private::core::result::Result { + let flags: $PublicBitFlags = $crate::serde::deserialize(deserializer)?; + + Ok(flags.0) + } + } + }; +} + +#[macro_export] +#[doc(hidden)] +#[cfg(not(feature = "serde"))] +macro_rules! __impl_external_bitflags_serde { + ( + $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:tt; + )* + } + ) => {}; +} + +#[cfg(feature = "arbitrary")] +pub mod arbitrary; + +#[cfg(feature = "bytemuck")] +mod bytemuck; + +/// Implement `Arbitrary` for the internal bitflags type. +#[macro_export] +#[doc(hidden)] +#[cfg(feature = "arbitrary")] +macro_rules! __impl_external_bitflags_arbitrary { + ( + $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:tt; + )* + } + ) => { + impl<'a> $crate::__private::arbitrary::Arbitrary<'a> for $InternalBitFlags { + fn arbitrary( + u: &mut $crate::__private::arbitrary::Unstructured<'a>, + ) -> $crate::__private::arbitrary::Result { + $crate::arbitrary::arbitrary::<$PublicBitFlags>(u).map(|flags| flags.0) + } + } + }; +} + +#[macro_export] +#[doc(hidden)] +#[cfg(not(feature = "arbitrary"))] +macro_rules! __impl_external_bitflags_arbitrary { + ( + $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:tt; + )* + } + ) => {}; +} + +/// Implement `Pod` and `Zeroable` for the internal bitflags type. +#[macro_export] +#[doc(hidden)] +#[cfg(feature = "bytemuck")] +macro_rules! __impl_external_bitflags_bytemuck { + ( + $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:tt; + )* + } + ) => { + // SAFETY: $InternalBitFlags is guaranteed to have the same ABI as $T, + // and $T implements Pod + unsafe impl $crate::__private::bytemuck::Pod for $InternalBitFlags where + $T: $crate::__private::bytemuck::Pod + { + } + + // SAFETY: $InternalBitFlags is guaranteed to have the same ABI as $T, + // and $T implements Zeroable + unsafe impl $crate::__private::bytemuck::Zeroable for $InternalBitFlags where + $T: $crate::__private::bytemuck::Zeroable + { + } + }; +} + +#[macro_export] +#[doc(hidden)] +#[cfg(not(feature = "bytemuck"))] +macro_rules! __impl_external_bitflags_bytemuck { + ( + $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:tt; + )* + } + ) => {}; +} diff --git a/vendor/bitflags/src/external/arbitrary.rs b/vendor/bitflags/src/external/arbitrary.rs new file mode 100644 index 00000000000000..edde9b5ec4e0d8 --- /dev/null +++ b/vendor/bitflags/src/external/arbitrary.rs @@ -0,0 +1,33 @@ +//! Specialized fuzzing for flags types using `arbitrary`. + +use crate::Flags; + +/** +Generate some arbitrary flags value with only known bits set. +*/ +pub fn arbitrary<'a, B: Flags>(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result +where + B::Bits: arbitrary::Arbitrary<'a>, +{ + B::from_bits(u.arbitrary()?).ok_or(arbitrary::Error::IncorrectFormat) +} + +#[cfg(test)] +mod tests { + use arbitrary::Arbitrary; + + bitflags! { + #[derive(Arbitrary)] + struct Color: u32 { + const RED = 0x1; + const GREEN = 0x2; + const BLUE = 0x4; + } + } + + #[test] + fn test_arbitrary() { + let mut unstructured = arbitrary::Unstructured::new(&[0_u8; 256]); + let _color = Color::arbitrary(&mut unstructured); + } +} diff --git a/vendor/bitflags/src/external/bytemuck.rs b/vendor/bitflags/src/external/bytemuck.rs new file mode 100644 index 00000000000000..a0cd68c9d7e736 --- /dev/null +++ b/vendor/bitflags/src/external/bytemuck.rs @@ -0,0 +1,19 @@ +#[cfg(test)] +mod tests { + use bytemuck::{Pod, Zeroable}; + + bitflags! { + #[derive(Pod, Zeroable, Clone, Copy)] + #[repr(transparent)] + struct Color: u32 { + const RED = 0x1; + const GREEN = 0x2; + const BLUE = 0x4; + } + } + + #[test] + fn test_bytemuck() { + assert_eq!(0x1, bytemuck::cast::(Color::RED)); + } +} diff --git a/vendor/bitflags/src/external/serde.rs b/vendor/bitflags/src/external/serde.rs new file mode 100644 index 00000000000000..ff327b4b32093c --- /dev/null +++ b/vendor/bitflags/src/external/serde.rs @@ -0,0 +1,94 @@ +//! Specialized serialization for flags types using `serde`. + +use crate::{ + parser::{self, ParseHex, WriteHex}, + Flags, +}; +use core::{fmt, str}; +use serde_core::{ + de::{Error, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; + +/** +Serialize a set of flags as a human-readable string or their underlying bits. + +Any unknown bits will be retained. +*/ +pub fn serialize(flags: &B, serializer: S) -> Result +where + B::Bits: WriteHex + Serialize, +{ + // Serialize human-readable flags as a string like `"A | B"` + if serializer.is_human_readable() { + serializer.collect_str(&parser::AsDisplay(flags)) + } + // Serialize non-human-readable flags directly as the underlying bits + else { + flags.bits().serialize(serializer) + } +} + +/** +Deserialize a set of flags from a human-readable string or their underlying bits. + +Any unknown bits will be retained. +*/ +pub fn deserialize<'de, B: Flags, D: Deserializer<'de>>(deserializer: D) -> Result +where + B::Bits: ParseHex + Deserialize<'de>, +{ + if deserializer.is_human_readable() { + // Deserialize human-readable flags by parsing them from strings like `"A | B"` + struct FlagsVisitor(core::marker::PhantomData); + + impl<'de, B: Flags> Visitor<'de> for FlagsVisitor + where + B::Bits: ParseHex, + { + type Value = B; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a string value of `|` separated flags") + } + + fn visit_str(self, flags: &str) -> Result { + parser::from_str(flags).map_err(|e| E::custom(e)) + } + } + + deserializer.deserialize_str(FlagsVisitor(Default::default())) + } else { + // Deserialize non-human-readable flags directly from the underlying bits + let bits = B::Bits::deserialize(deserializer)?; + + Ok(B::from_bits_retain(bits)) + } +} + +#[cfg(test)] +mod tests { + use serde_test::{assert_tokens, Configure, Token::*}; + + bitflags! { + #[derive(serde_lib::Serialize, serde_lib::Deserialize, Debug, PartialEq, Eq)] + #[serde(crate = "serde_lib", transparent)] + struct SerdeFlags: u32 { + const A = 1; + const B = 2; + const C = 4; + const D = 8; + } + } + + #[test] + fn test_serde_bitflags_default() { + assert_tokens(&SerdeFlags::empty().readable(), &[Str("")]); + + assert_tokens(&SerdeFlags::empty().compact(), &[U32(0)]); + + assert_tokens(&(SerdeFlags::A | SerdeFlags::B).readable(), &[Str("A | B")]); + + assert_tokens(&(SerdeFlags::A | SerdeFlags::B).compact(), &[U32(1 | 2)]); + } +} diff --git a/vendor/bitflags/src/internal.rs b/vendor/bitflags/src/internal.rs new file mode 100644 index 00000000000000..87d01cc0cb5f55 --- /dev/null +++ b/vendor/bitflags/src/internal.rs @@ -0,0 +1,125 @@ +//! Generate the internal `bitflags`-facing flags type. +//! +//! The code generated here is owned by `bitflags`, but still part of its public API. +//! Changes to the types generated here need to be considered like any other public API change. + +/// Declare the `bitflags`-facing bitflags struct. +/// +/// This type is part of the `bitflags` crate's public API, but not part of the user's. +#[macro_export] +#[doc(hidden)] +macro_rules! __declare_internal_bitflags { + ( + $vis:vis struct $InternalBitFlags:ident: $T:ty + ) => { + // NOTE: The ABI of this type is _guaranteed_ to be the same as `T` + // This is relied on by some external libraries like `bytemuck` to make + // its `unsafe` trait impls sound. + #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + #[repr(transparent)] + $vis struct $InternalBitFlags($T); + }; +} + +/// Implement functions on the private (bitflags-facing) bitflags type. +/// +/// Methods and trait implementations can be freely added here without breaking end-users. +/// If we want to expose new functionality to `#[derive]`, this is the place to do it. +#[macro_export] +#[doc(hidden)] +macro_rules! __impl_internal_bitflags { + ( + $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:tt = $value:expr; + )* + } + ) => { + // NOTE: This impl is also used to prevent using bits types from non-primitive types + // in the `bitflags` macro. If this approach is changed, this guard will need to be + // retained somehow + impl $crate::__private::PublicFlags for $PublicBitFlags { + type Primitive = $T; + type Internal = $InternalBitFlags; + } + + impl $crate::__private::core::default::Default for $InternalBitFlags { + #[inline] + fn default() -> Self { + $InternalBitFlags::empty() + } + } + + impl $crate::__private::core::fmt::Debug for $InternalBitFlags { + fn fmt(&self, f: &mut $crate::__private::core::fmt::Formatter<'_>) -> $crate::__private::core::fmt::Result { + if self.is_empty() { + // If no flags are set then write an empty hex flag to avoid + // writing an empty string. In some contexts, like serialization, + // an empty string is preferable, but it may be unexpected in + // others for a format not to produce any output. + // + // We can remove this `0x0` and remain compatible with `FromStr`, + // because an empty string will still parse to an empty set of flags, + // just like `0x0` does. + $crate::__private::core::write!(f, "{:#x}", <$T as $crate::Bits>::EMPTY) + } else { + $crate::__private::core::fmt::Display::fmt(self, f) + } + } + } + + impl $crate::__private::core::fmt::Display for $InternalBitFlags { + fn fmt(&self, f: &mut $crate::__private::core::fmt::Formatter<'_>) -> $crate::__private::core::fmt::Result { + $crate::parser::to_writer(&$PublicBitFlags(*self), f) + } + } + + impl $crate::__private::core::str::FromStr for $InternalBitFlags { + type Err = $crate::parser::ParseError; + + fn from_str(s: &str) -> $crate::__private::core::result::Result { + $crate::parser::from_str::<$PublicBitFlags>(s).map(|flags| flags.0) + } + } + + impl $crate::__private::core::convert::AsRef<$T> for $InternalBitFlags { + fn as_ref(&self) -> &$T { + &self.0 + } + } + + impl $crate::__private::core::convert::From<$T> for $InternalBitFlags { + fn from(bits: $T) -> Self { + Self::from_bits_retain(bits) + } + } + + // The internal flags type offers a similar API to the public one + + $crate::__impl_public_bitflags! { + $InternalBitFlags: $T, $PublicBitFlags { + $( + $(#[$inner $($args)*])* + const $Flag = $value; + )* + } + } + + $crate::__impl_public_bitflags_ops! { + $InternalBitFlags + } + + $crate::__impl_public_bitflags_iter! { + $InternalBitFlags: $T, $PublicBitFlags + } + + impl $InternalBitFlags { + /// Returns a mutable reference to the raw value of the flags currently stored. + #[inline] + pub fn bits_mut(&mut self) -> &mut $T { + &mut self.0 + } + } + }; +} diff --git a/vendor/bitflags/src/iter.rs b/vendor/bitflags/src/iter.rs new file mode 100644 index 00000000000000..ae0efc930917fa --- /dev/null +++ b/vendor/bitflags/src/iter.rs @@ -0,0 +1,182 @@ +/*! +Yield the bits of a source flags value in a set of contained flags values. +*/ + +use crate::{Flag, Flags}; + +/** +An iterator over flags values. + +This iterator will yield flags values for contained, defined flags first, with any remaining bits yielded +as a final flags value. +*/ +pub struct Iter { + inner: IterNames, + done: bool, +} + +impl Iter { + pub(crate) fn new(flags: &B) -> Self { + Iter { + inner: IterNames::new(flags), + done: false, + } + } +} + +impl Iter { + // Used by the `bitflags` macro + #[doc(hidden)] + pub const fn __private_const_new(flags: &'static [Flag], source: B, remaining: B) -> Self { + Iter { + inner: IterNames::__private_const_new(flags, source, remaining), + done: false, + } + } +} + +impl Iterator for Iter { + type Item = B; + + fn next(&mut self) -> Option { + match self.inner.next() { + Some((_, flag)) => Some(flag), + None if !self.done => { + self.done = true; + + // After iterating through valid names, if there are any bits left over + // then return one final value that includes them. This makes `into_iter` + // and `from_iter` roundtrip + if !self.inner.remaining().is_empty() { + Some(B::from_bits_retain(self.inner.remaining.bits())) + } else { + None + } + } + None => None, + } + } +} + +/** +An iterator over flags values. + +This iterator only yields flags values for contained, defined, named flags. Any remaining bits +won't be yielded, but can be found with the [`IterNames::remaining`] method. +*/ +pub struct IterNames { + flags: &'static [Flag], + idx: usize, + source: B, + remaining: B, +} + +impl IterNames { + pub(crate) fn new(flags: &B) -> Self { + IterNames { + flags: B::FLAGS, + idx: 0, + remaining: B::from_bits_retain(flags.bits()), + source: B::from_bits_retain(flags.bits()), + } + } +} + +impl IterNames { + // Used by the bitflags macro + #[doc(hidden)] + pub const fn __private_const_new(flags: &'static [Flag], source: B, remaining: B) -> Self { + IterNames { + flags, + idx: 0, + remaining, + source, + } + } + + /// Get a flags value of any remaining bits that haven't been yielded yet. + /// + /// Once the iterator has finished, this method can be used to + /// check whether or not there are any bits that didn't correspond + /// to a contained, defined, named flag remaining. + pub fn remaining(&self) -> &B { + &self.remaining + } +} + +impl Iterator for IterNames { + type Item = (&'static str, B); + + fn next(&mut self) -> Option { + while let Some(flag) = self.flags.get(self.idx) { + // Short-circuit if our state is empty + if self.remaining.is_empty() { + return None; + } + + self.idx += 1; + + // Skip unnamed flags + if flag.name().is_empty() { + continue; + } + + let bits = flag.value().bits(); + + // If the flag is set in the original source _and_ it has bits that haven't + // been covered by a previous flag yet then yield it. These conditions cover + // two cases for multi-bit flags: + // + // 1. When flags partially overlap, such as `0b00000001` and `0b00000101`, we'll + // yield both flags. + // 2. When flags fully overlap, such as in convenience flags that are a shorthand for others, + // we won't yield both flags. + if self.source.contains(B::from_bits_retain(bits)) + && self.remaining.intersects(B::from_bits_retain(bits)) + { + self.remaining.remove(B::from_bits_retain(bits)); + + return Some((flag.name(), B::from_bits_retain(bits))); + } + } + + None + } +} + +/** +An iterator over all defined named flags. + +This iterator will yield flags values for all defined named flags, regardless of +whether they are contained in a particular flags value. +*/ +pub struct IterDefinedNames { + flags: &'static [Flag], + idx: usize, +} + +impl IterDefinedNames { + pub(crate) fn new() -> Self { + IterDefinedNames { + flags: B::FLAGS, + idx: 0, + } + } +} + +impl Iterator for IterDefinedNames { + type Item = (&'static str, B); + + fn next(&mut self) -> Option { + while let Some(flag) = self.flags.get(self.idx) { + self.idx += 1; + + // Only yield named flags + if flag.is_named() { + return Some((flag.name(), B::from_bits_retain(flag.value().bits()))); + } + } + + None + } +} diff --git a/vendor/bitflags/src/lib.rs b/vendor/bitflags/src/lib.rs new file mode 100644 index 00000000000000..b672ec066b8feb --- /dev/null +++ b/vendor/bitflags/src/lib.rs @@ -0,0 +1,997 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/*! +Generate types for C-style flags with ergonomic APIs. + +# Getting started + +Add `bitflags` to your `Cargo.toml`: + +```toml +[dependencies.bitflags] +version = "2.10.0" +``` + +## Crate features + +The `bitflags` library defines a few Cargo features that you can opt-in to: + +- `std`: Implement the `Error` trait on error types used by `bitflags`. +- `serde`: Support deriving `serde` traits on generated flags types. +- `arbitrary`: Support deriving `arbitrary` traits on generated flags types. +- `bytemuck`: Support deriving `bytemuck` traits on generated flags types. + +## Generating flags types + +Use the [`bitflags`] macro to generate flags types: + +```rust +use bitflags::bitflags; + +bitflags! { + pub struct Flags: u32 { + const A = 0b00000001; + const B = 0b00000010; + const C = 0b00000100; + } +} +``` + +See the docs for the `bitflags` macro for the full syntax. + +Also see the [`example_generated`](./example_generated/index.html) module for an example of what the `bitflags` macro generates for a flags type. + +### Externally defined flags + +If you're generating flags types for an external source, such as a C API, you can define +an extra unnamed flag as a mask of all bits the external source may ever set. Usually this would be all bits (`!0`): + +```rust +# use bitflags::bitflags; +bitflags! { + pub struct Flags: u32 { + const A = 0b00000001; + const B = 0b00000010; + const C = 0b00000100; + + // The source may set any bits + const _ = !0; + } +} +``` + +Why should you do this? Generated methods like `all` and truncating operators like `!` only consider +bits in defined flags. Adding an unnamed flag makes those methods consider additional bits, +without generating additional constants for them. It helps compatibility when the external source +may start setting additional bits at any time. The [known and unknown bits](#known-and-unknown-bits) +section has more details on this behavior. + +### Custom derives + +You can derive some traits on generated flags types if you enable Cargo features. The following +libraries are currently supported: + +- `serde`: Support `#[derive(Serialize, Deserialize)]`, using text for human-readable formats, + and a raw number for binary formats. +- `arbitrary`: Support `#[derive(Arbitrary)]`, only generating flags values with known bits. +- `bytemuck`: Support `#[derive(Pod, Zeroable)]`, for casting between flags values and their + underlying bits values. + +You can also define your own flags type outside of the [`bitflags`] macro and then use it to generate methods. +This can be useful if you need a custom `#[derive]` attribute for a library that `bitflags` doesn't +natively support: + +```rust +# use std::fmt::Debug as SomeTrait; +# use bitflags::bitflags; +#[derive(SomeTrait)] +pub struct Flags(u32); + +bitflags! { + impl Flags: u32 { + const A = 0b00000001; + const B = 0b00000010; + const C = 0b00000100; + } +} +``` + +### Adding custom methods + +The [`bitflags`] macro supports attributes on generated flags types within the macro itself, while +`impl` blocks can be added outside of it: + +```rust +# use bitflags::bitflags; +bitflags! { + // Attributes can be applied to flags types + #[repr(transparent)] + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] + pub struct Flags: u32 { + const A = 0b00000001; + const B = 0b00000010; + const C = 0b00000100; + } +} + +// Impl blocks can be added to flags types +impl Flags { + pub fn as_u64(&self) -> u64 { + self.bits() as u64 + } +} +``` + +## Working with flags values + +Use generated constants and standard bitwise operators to interact with flags values: + +```rust +# use bitflags::bitflags; +# bitflags! { +# #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +# pub struct Flags: u32 { +# const A = 0b00000001; +# const B = 0b00000010; +# const C = 0b00000100; +# } +# } +// union +let ab = Flags::A | Flags::B; + +// intersection +let a = ab & Flags::A; + +// difference +let b = ab - Flags::A; + +// complement +let c = !ab; +``` + +See the docs for the [`Flags`] trait for more details on operators and how they behave. + +# Formatting and parsing + +`bitflags` defines a text format that can be used to convert any flags value to and from strings. + +See the [`parser`] module for more details. + +# Specification + +The terminology and behavior of generated flags types is +[specified in the source repository](https://github.com/bitflags/bitflags/blob/main/spec.md). +Details are repeated in these docs where appropriate, but is exhaustively listed in the spec. Some +things are worth calling out explicitly here. + +## Flags types, flags values, flags + +The spec and these docs use consistent terminology to refer to things in the bitflags domain: + +- **Bits type**: A type that defines a fixed number of bits at specific locations. +- **Flag**: A set of bits in a bits type that may have a unique name. +- **Flags type**: A set of defined flags over a specific bits type. +- **Flags value**: An instance of a flags type using its specific bits value for storage. + +``` +# use bitflags::bitflags; +bitflags! { + struct FlagsType: u8 { +// -- Bits type +// --------- Flags type + const A = 1; +// ----- Flag + } +} + +let flag = FlagsType::A; +// ---- Flags value +``` + +## Known and unknown bits + +Any bits in a flag you define are called _known bits_. Any other bits are _unknown bits_. +In the following flags type: + +``` +# use bitflags::bitflags; +bitflags! { + struct Flags: u8 { + const A = 1; + const B = 1 << 1; + const C = 1 << 2; + } +} +``` + +The known bits are `0b0000_0111` and the unknown bits are `0b1111_1000`. + +`bitflags` doesn't guarantee that a flags value will only ever have known bits set, but some operators +will unset any unknown bits they encounter. In a future version of `bitflags`, all operators will +unset unknown bits. + +If you're using `bitflags` for flags types defined externally, such as from C, you probably want all +bits to be considered known, in case that external source changes. You can do this using an unnamed +flag, as described in [externally defined flags](#externally-defined-flags). + +## Zero-bit flags + +Flags with no bits set should be avoided because they interact strangely with [`Flags::contains`] +and [`Flags::intersects`]. A zero-bit flag is always contained, but is never intersected. The +names of zero-bit flags can be parsed, but are never formatted. + +## Multi-bit flags + +Flags that set multiple bits should be avoided unless each bit is also in a single-bit flag. +Take the following flags type as an example: + +``` +# use bitflags::bitflags; +bitflags! { + struct Flags: u8 { + const A = 1; + const B = 1 | 1 << 1; + } +} +``` + +The result of `Flags::A ^ Flags::B` is `0b0000_0010`, which doesn't correspond to either +`Flags::A` or `Flags::B` even though it's still a known bit. +*/ + +#![cfg_attr(not(any(feature = "std", test)), no_std)] +#![cfg_attr(not(test), forbid(unsafe_code))] +#![cfg_attr(test, allow(mixed_script_confusables))] + +#[doc(inline)] +pub use traits::{Bits, Flag, Flags}; + +pub mod iter; +pub mod parser; + +mod traits; + +#[doc(hidden)] +pub mod __private { + #[allow(unused_imports)] + // Easier than conditionally checking any optional external dependencies + pub use crate::{external::__private::*, traits::__private::*}; + + pub use core; +} + +#[allow(unused_imports)] +pub use external::*; + +#[allow(deprecated)] +pub use traits::BitFlags; + +/* +How does the bitflags crate work? + +This library generates a `struct` in the end-user's crate with a bunch of constants on it that represent flags. +The difference between `bitflags` and a lot of other libraries is that we don't actually control the generated `struct` in the end. +It's part of the end-user's crate, so it belongs to them. That makes it difficult to extend `bitflags` with new functionality +because we could end up breaking valid code that was already written. + +Our solution is to split the type we generate into two: the public struct owned by the end-user, and an internal struct owned by `bitflags` (us). +To give you an example, let's say we had a crate that called `bitflags!`: + +```rust +bitflags! { + pub struct MyFlags: u32 { + const A = 1; + const B = 2; + } +} +``` + +What they'd end up with looks something like this: + +```rust +pub struct MyFlags(::InternalBitFlags); + +const _: () = { + #[repr(transparent)] + #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct MyInternalBitFlags { + bits: u32, + } + + impl PublicFlags for MyFlags { + type Internal = InternalBitFlags; + } +}; +``` + +If we want to expose something like a new trait impl for generated flags types, we add it to our generated `MyInternalBitFlags`, +and let `#[derive]` on `MyFlags` pick up that implementation, if an end-user chooses to add one. + +The public API is generated in the `__impl_public_flags!` macro, and the internal API is generated in +the `__impl_internal_flags!` macro. + +The macros are split into 3 modules: + +- `public`: where the user-facing flags types are generated. +- `internal`: where the `bitflags`-facing flags types are generated. +- `external`: where external library traits are implemented conditionally. +*/ + +/** +Generate a flags type. + +# `struct` mode + +A declaration that begins with `$vis struct` will generate a `struct` for a flags type, along with +methods and trait implementations for it. The body of the declaration defines flags as constants, +where each constant is a flags value of the generated flags type. + +## Examples + +Generate a flags type using `u8` as the bits type: + +``` +# use bitflags::bitflags; +bitflags! { + struct Flags: u8 { + const A = 1; + const B = 1 << 1; + const C = 0b0000_0100; + } +} +``` + +Flags types are private by default and accept standard visibility modifiers. Flags themselves +are always public: + +``` +# use bitflags::bitflags; +bitflags! { + pub struct Flags: u8 { + // Constants are always `pub` + const A = 1; + } +} +``` + +Flags may refer to other flags using their [`Flags::bits`] value: + +``` +# use bitflags::bitflags; +bitflags! { + struct Flags: u8 { + const A = 1; + const B = 1 << 1; + const AB = Flags::A.bits() | Flags::B.bits(); + } +} +``` + +A single `bitflags` invocation may include zero or more flags type declarations: + +``` +# use bitflags::bitflags; +bitflags! {} + +bitflags! { + struct Flags1: u8 { + const A = 1; + } + + struct Flags2: u8 { + const A = 1; + } +} +``` + +# `impl` mode + +A declaration that begins with `impl` will only generate methods and trait implementations for the +`struct` defined outside of the `bitflags` macro. + +The struct itself must be a newtype using the bits type as its field. + +The syntax for `impl` mode is identical to `struct` mode besides the starting token. + +## Examples + +Implement flags methods and traits for a custom flags type using `u8` as its underlying bits type: + +``` +# use bitflags::bitflags; +struct Flags(u8); + +bitflags! { + impl Flags: u8 { + const A = 1; + const B = 1 << 1; + const C = 0b0000_0100; + } +} +``` + +# Named and unnamed flags + +Constants in the body of a declaration are flags. The identifier of the constant is the name of +the flag. If the identifier is `_`, then the flag is unnamed. Unnamed flags don't appear in the +generated API, but affect how bits are truncated. + +## Examples + +Adding an unnamed flag that makes all bits known: + +``` +# use bitflags::bitflags; +bitflags! { + struct Flags: u8 { + const A = 1; + const B = 1 << 1; + + const _ = !0; + } +} +``` + +Flags types may define multiple unnamed flags: + +``` +# use bitflags::bitflags; +bitflags! { + struct Flags: u8 { + const _ = 1; + const _ = 1 << 1; + } +} +``` +*/ +#[macro_export] +macro_rules! bitflags { + ( + $(#[$outer:meta])* + $vis:vis struct $BitFlags:ident: $T:ty { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:tt = $value:expr; + )* + } + + $($t:tt)* + ) => { + // Declared in the scope of the `bitflags!` call + // This type appears in the end-user's API + $crate::__declare_public_bitflags! { + $(#[$outer])* + $vis struct $BitFlags + } + + // Workaround for: https://github.com/bitflags/bitflags/issues/320 + $crate::__impl_public_bitflags_consts! { + $BitFlags: $T { + $( + $(#[$inner $($args)*])* + const $Flag = $value; + )* + } + } + + #[allow( + dead_code, + deprecated, + unused_doc_comments, + unused_attributes, + unused_mut, + unused_imports, + non_upper_case_globals, + clippy::assign_op_pattern, + clippy::indexing_slicing, + clippy::same_name_method, + clippy::iter_without_into_iter, + )] + const _: () = { + // Declared in a "hidden" scope that can't be reached directly + // These types don't appear in the end-user's API + $crate::__declare_internal_bitflags! { + $vis struct InternalBitFlags: $T + } + + $crate::__impl_internal_bitflags! { + InternalBitFlags: $T, $BitFlags { + $( + $(#[$inner $($args)*])* + const $Flag = $value; + )* + } + } + + // This is where new library trait implementations can be added + $crate::__impl_external_bitflags! { + InternalBitFlags: $T, $BitFlags { + $( + $(#[$inner $($args)*])* + const $Flag; + )* + } + } + + $crate::__impl_public_bitflags_forward! { + $BitFlags: $T, InternalBitFlags + } + + $crate::__impl_public_bitflags_ops! { + $BitFlags + } + + $crate::__impl_public_bitflags_iter! { + $BitFlags: $T, $BitFlags + } + }; + + $crate::bitflags! { + $($t)* + } + }; + ( + $(#[$outer:meta])* + impl $BitFlags:ident: $T:ty { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:tt = $value:expr; + )* + } + + $($t:tt)* + ) => { + $crate::__impl_public_bitflags_consts! { + $BitFlags: $T { + $( + $(#[$inner $($args)*])* + const $Flag = $value; + )* + } + } + + #[allow( + dead_code, + deprecated, + unused_doc_comments, + unused_attributes, + unused_mut, + unused_imports, + non_upper_case_globals, + clippy::assign_op_pattern, + clippy::iter_without_into_iter, + )] + const _: () = { + $crate::__impl_public_bitflags! { + $(#[$outer])* + $BitFlags: $T, $BitFlags { + $( + $(#[$inner $($args)*])* + const $Flag = $value; + )* + } + } + + $crate::__impl_public_bitflags_ops! { + $BitFlags + } + + $crate::__impl_public_bitflags_iter! { + $BitFlags: $T, $BitFlags + } + }; + + $crate::bitflags! { + $($t)* + } + }; + () => {}; +} + +/// Implement functions on bitflags types. +/// +/// We need to be careful about adding new methods and trait implementations here because they +/// could conflict with items added by the end-user. +#[macro_export] +#[doc(hidden)] +macro_rules! __impl_bitflags { + ( + // These param names must be passed in to make the macro work. + // Just use `params: self, bits, name, other, value;`. + params: $self:ident, $bits:ident, $name:ident, $other:ident, $value:ident; + $(#[$outer:meta])* + $PublicBitFlags:ident: $T:ty { + fn empty() $empty_body:block + fn all() $all_body:block + fn bits(&self) $bits_body:block + fn from_bits(bits) $from_bits_body:block + fn from_bits_truncate(bits) $from_bits_truncate_body:block + fn from_bits_retain(bits) $from_bits_retain_body:block + fn from_name(name) $from_name_body:block + fn is_empty(&self) $is_empty_body:block + fn is_all(&self) $is_all_body:block + fn intersects(&self, other) $intersects_body:block + fn contains(&self, other) $contains_body:block + fn insert(&mut self, other) $insert_body:block + fn remove(&mut self, other) $remove_body:block + fn toggle(&mut self, other) $toggle_body:block + fn set(&mut self, other, value) $set_body:block + fn intersection(self, other) $intersection_body:block + fn union(self, other) $union_body:block + fn difference(self, other) $difference_body:block + fn symmetric_difference(self, other) $symmetric_difference_body:block + fn complement(self) $complement_body:block + } + ) => { + #[allow(dead_code, deprecated, unused_attributes)] + $(#[$outer])* + impl $PublicBitFlags { + /// Get a flags value with all bits unset. + #[inline] + pub const fn empty() -> Self + $empty_body + + /// Get a flags value with all known bits set. + #[inline] + pub const fn all() -> Self + $all_body + + /// Get the underlying bits value. + /// + /// The returned value is exactly the bits set in this flags value. + #[inline] + pub const fn bits(&$self) -> $T + $bits_body + + /// Convert from a bits value. + /// + /// This method will return `None` if any unknown bits are set. + #[inline] + pub const fn from_bits($bits: $T) -> $crate::__private::core::option::Option + $from_bits_body + + /// Convert from a bits value, unsetting any unknown bits. + #[inline] + pub const fn from_bits_truncate($bits: $T) -> Self + $from_bits_truncate_body + + /// Convert from a bits value exactly. + #[inline] + pub const fn from_bits_retain($bits: $T) -> Self + $from_bits_retain_body + + /// Get a flags value with the bits of a flag with the given name set. + /// + /// This method will return `None` if `name` is empty or doesn't + /// correspond to any named flag. + #[inline] + pub fn from_name($name: &str) -> $crate::__private::core::option::Option + $from_name_body + + /// Whether all bits in this flags value are unset. + #[inline] + pub const fn is_empty(&$self) -> bool + $is_empty_body + + /// Whether all known bits in this flags value are set. + #[inline] + pub const fn is_all(&$self) -> bool + $is_all_body + + /// Whether any set bits in a source flags value are also set in a target flags value. + #[inline] + pub const fn intersects(&$self, $other: Self) -> bool + $intersects_body + + /// Whether all set bits in a source flags value are also set in a target flags value. + #[inline] + pub const fn contains(&$self, $other: Self) -> bool + $contains_body + + /// The bitwise or (`|`) of the bits in two flags values. + #[inline] + pub fn insert(&mut $self, $other: Self) + $insert_body + + /// The intersection of a source flags value with the complement of a target flags + /// value (`&!`). + /// + /// This method is not equivalent to `self & !other` when `other` has unknown bits set. + /// `remove` won't truncate `other`, but the `!` operator will. + #[inline] + pub fn remove(&mut $self, $other: Self) + $remove_body + + /// The bitwise exclusive-or (`^`) of the bits in two flags values. + #[inline] + pub fn toggle(&mut $self, $other: Self) + $toggle_body + + /// Call `insert` when `value` is `true` or `remove` when `value` is `false`. + #[inline] + pub fn set(&mut $self, $other: Self, $value: bool) + $set_body + + /// The bitwise and (`&`) of the bits in two flags values. + #[inline] + #[must_use] + pub const fn intersection($self, $other: Self) -> Self + $intersection_body + + /// The bitwise or (`|`) of the bits in two flags values. + #[inline] + #[must_use] + pub const fn union($self, $other: Self) -> Self + $union_body + + /// The intersection of a source flags value with the complement of a target flags + /// value (`&!`). + /// + /// This method is not equivalent to `self & !other` when `other` has unknown bits set. + /// `difference` won't truncate `other`, but the `!` operator will. + #[inline] + #[must_use] + pub const fn difference($self, $other: Self) -> Self + $difference_body + + /// The bitwise exclusive-or (`^`) of the bits in two flags values. + #[inline] + #[must_use] + pub const fn symmetric_difference($self, $other: Self) -> Self + $symmetric_difference_body + + /// The bitwise negation (`!`) of the bits in a flags value, truncating the result. + #[inline] + #[must_use] + pub const fn complement($self) -> Self + $complement_body + } + }; +} + +/// A macro that matches flags values, similar to Rust's `match` statement. +/// +/// In a regular `match` statement, the syntax `Flag::A | Flag::B` is interpreted as an or-pattern, +/// instead of the bitwise-or of `Flag::A` and `Flag::B`. This can be surprising when combined with flags types +/// because `Flag::A | Flag::B` won't match the pattern `Flag::A | Flag::B`. This macro is an alternative to +/// `match` for flags values that doesn't have this issue. +/// +/// # Syntax +/// +/// ```ignore +/// bitflags_match!(expression, { +/// pattern1 => result1, +/// pattern2 => result2, +/// .. +/// _ => default_result, +/// }) +/// ``` +/// +/// The final `_ => default_result` arm is required, otherwise the macro will fail to compile. +/// +/// # Examples +/// +/// ```rust +/// use bitflags::{bitflags, bitflags_match}; +/// +/// bitflags! { +/// #[derive(PartialEq)] +/// struct Flags: u8 { +/// const A = 1 << 0; +/// const B = 1 << 1; +/// const C = 1 << 2; +/// } +/// } +/// +/// let flags = Flags::A | Flags::B; +/// +/// // Prints `the value is A and B` +/// bitflags_match!(flags, { +/// Flags::A | Flags::B => println!("the value is A and B"), +/// _ => println!("the value is not A and B"), +/// }); +/// +/// // Prints `the value is not A` +/// bitflags_match!(flags, { +/// Flags::A => println!("the value is A"), +/// _ => println!("the value is not A"), +/// }); +/// ``` +/// +/// # How it works +/// +/// The macro expands to a series of `if` statements, **checking equality** between the input expression +/// and each pattern. This allows for correct matching of bitflag combinations, which is not possible +/// with a regular match expression due to the way bitflags are implemented. +/// +/// Patterns are evaluated in the order they appear in the macro. +#[macro_export] +macro_rules! bitflags_match { + ($operation:expr, { + $($t:tt)* + }) => { + // Expand to a closure so we can use `return` + // This makes it possible to apply attributes to the "match arms" + (|| { + $crate::__bitflags_match!($operation, { $($t)* }) + })() + }; +} + +/// Expand the `bitflags_match` macro +#[macro_export] +#[doc(hidden)] +macro_rules! __bitflags_match { + // Eat an optional `,` following a block match arm + ($operation:expr, { $pattern:expr => { $($body:tt)* } , $($t:tt)+ }) => { + $crate::__bitflags_match!($operation, { $pattern => { $($body)* } $($t)+ }) + }; + // Expand a block match arm `A => { .. }` + ($operation:expr, { $pattern:expr => { $($body:tt)* } $($t:tt)+ }) => { + { + if $operation == $pattern { + return { + $($body)* + }; + } + + $crate::__bitflags_match!($operation, { $($t)+ }) + } + }; + // Expand an expression match arm `A => x,` + ($operation:expr, { $pattern:expr => $body:expr , $($t:tt)+ }) => { + { + if $operation == $pattern { + return $body; + } + + $crate::__bitflags_match!($operation, { $($t)+ }) + } + }; + // Expand the default case + ($operation:expr, { _ => $default:expr $(,)? }) => { + $default + } +} + +/// A macro that processed the input to `bitflags!` and shuffles attributes around +/// based on whether or not they're "expression-safe". +/// +/// This macro is a token-tree muncher that works on 2 levels: +/// +/// For each attribute, we explicitly match on its identifier, like `cfg` to determine +/// whether or not it should be considered expression-safe. +/// +/// If you find yourself with an attribute that should be considered expression-safe +/// and isn't, it can be added here. +#[macro_export] +#[doc(hidden)] +macro_rules! __bitflags_expr_safe_attrs { + // Entrypoint: Move all flags and all attributes into `unprocessed` lists + // where they'll be munched one-at-a-time + ( + $(#[$inner:ident $($args:tt)*])* + { $e:expr } + ) => { + $crate::__bitflags_expr_safe_attrs! { + expr: { $e }, + attrs: { + // All attributes start here + unprocessed: [$(#[$inner $($args)*])*], + // Attributes that are safe on expressions go here + processed: [], + }, + } + }; + // Process the next attribute on the current flag + // `cfg`: The next flag should be propagated to expressions + // NOTE: You can copy this rules block and replace `cfg` with + // your attribute name that should be considered expression-safe + ( + expr: { $e:expr }, + attrs: { + unprocessed: [ + // cfg matched here + #[cfg $($args:tt)*] + $($attrs_rest:tt)* + ], + processed: [$($expr:tt)*], + }, + ) => { + $crate::__bitflags_expr_safe_attrs! { + expr: { $e }, + attrs: { + unprocessed: [ + $($attrs_rest)* + ], + processed: [ + $($expr)* + // cfg added here + #[cfg $($args)*] + ], + }, + } + }; + // Process the next attribute on the current flag + // `$other`: The next flag should not be propagated to expressions + ( + expr: { $e:expr }, + attrs: { + unprocessed: [ + // $other matched here + #[$other:ident $($args:tt)*] + $($attrs_rest:tt)* + ], + processed: [$($expr:tt)*], + }, + ) => { + $crate::__bitflags_expr_safe_attrs! { + expr: { $e }, + attrs: { + unprocessed: [ + $($attrs_rest)* + ], + processed: [ + // $other not added here + $($expr)* + ], + }, + } + }; + // Once all attributes on all flags are processed, generate the actual code + ( + expr: { $e:expr }, + attrs: { + unprocessed: [], + processed: [$(#[$expr:ident $($exprargs:tt)*])*], + }, + ) => { + $(#[$expr $($exprargs)*])* + { $e } + } +} + +/// Implement a flag, which may be a wildcard `_`. +#[macro_export] +#[doc(hidden)] +macro_rules! __bitflags_flag { + ( + { + name: _, + named: { $($named:tt)* }, + unnamed: { $($unnamed:tt)* }, + } + ) => { + $($unnamed)* + }; + ( + { + name: $Flag:ident, + named: { $($named:tt)* }, + unnamed: { $($unnamed:tt)* }, + } + ) => { + $($named)* + }; +} + +#[macro_use] +mod public; +#[macro_use] +mod internal; +#[macro_use] +mod external; + +#[cfg(feature = "example_generated")] +pub mod example_generated; + +#[cfg(test)] +mod tests; diff --git a/vendor/bitflags/src/parser.rs b/vendor/bitflags/src/parser.rs new file mode 100644 index 00000000000000..34b432da39b8fd --- /dev/null +++ b/vendor/bitflags/src/parser.rs @@ -0,0 +1,332 @@ +/*! +Parsing flags from text. + +Format and parse a flags value as text using the following grammar: + +- _Flags:_ (_Whitespace_ _Flag_ _Whitespace_)`|`* +- _Flag:_ _Name_ | _Hex Number_ +- _Name:_ The name of any defined flag +- _Hex Number_: `0x`([0-9a-fA-F])* +- _Whitespace_: (\s)* + +As an example, this is how `Flags::A | Flags::B | 0x0c` can be represented as text: + +```text +A | B | 0x0c +``` + +Alternatively, it could be represented without whitespace: + +```text +A|B|0x0C +``` + +Note that identifiers are *case-sensitive*, so the following is *not equivalent*: + +```text +a|b|0x0C +``` +*/ + +#![allow(clippy::let_unit_value)] + +use core::fmt::{self, Write}; + +use crate::{Bits, Flags}; + +/** +Write a flags value as text. + +Any bits that aren't part of a contained flag will be formatted as a hex number. +*/ +pub fn to_writer(flags: &B, mut writer: impl Write) -> Result<(), fmt::Error> +where + B::Bits: WriteHex, +{ + // A formatter for bitflags that produces text output like: + // + // A | B | 0xf6 + // + // The names of set flags are written in a bar-separated-format, + // followed by a hex number of any remaining bits that are set + // but don't correspond to any flags. + + // Iterate over known flag values + let mut first = true; + let mut iter = flags.iter_names(); + for (name, _) in &mut iter { + if !first { + writer.write_str(" | ")?; + } + + first = false; + writer.write_str(name)?; + } + + // Append any extra bits that correspond to flags to the end of the format + let remaining = iter.remaining().bits(); + if remaining != B::Bits::EMPTY { + if !first { + writer.write_str(" | ")?; + } + + writer.write_str("0x")?; + remaining.write_hex(writer)?; + } + + fmt::Result::Ok(()) +} + +#[cfg(feature = "serde")] +pub(crate) struct AsDisplay<'a, B>(pub(crate) &'a B); + +#[cfg(feature = "serde")] +impl<'a, B: Flags> fmt::Display for AsDisplay<'a, B> +where + B::Bits: WriteHex, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + to_writer(self.0, f) + } +} + +/** +Parse a flags value from text. + +This function will fail on any names that don't correspond to defined flags. +Unknown bits will be retained. +*/ +pub fn from_str(input: &str) -> Result +where + B::Bits: ParseHex, +{ + let mut parsed_flags = B::empty(); + + // If the input is empty then return an empty set of flags + if input.trim().is_empty() { + return Ok(parsed_flags); + } + + for flag in input.split('|') { + let flag = flag.trim(); + + // If the flag is empty then we've got missing input + if flag.is_empty() { + return Err(ParseError::empty_flag()); + } + + // If the flag starts with `0x` then it's a hex number + // Parse it directly to the underlying bits type + let parsed_flag = if let Some(flag) = flag.strip_prefix("0x") { + let bits = + ::parse_hex(flag).map_err(|_| ParseError::invalid_hex_flag(flag))?; + + B::from_bits_retain(bits) + } + // Otherwise the flag is a name + // The generated flags type will determine whether + // or not it's a valid identifier + else { + B::from_name(flag).ok_or_else(|| ParseError::invalid_named_flag(flag))? + }; + + parsed_flags.insert(parsed_flag); + } + + Ok(parsed_flags) +} + +/** +Write a flags value as text, ignoring any unknown bits. +*/ +pub fn to_writer_truncate(flags: &B, writer: impl Write) -> Result<(), fmt::Error> +where + B::Bits: WriteHex, +{ + to_writer(&B::from_bits_truncate(flags.bits()), writer) +} + +/** +Parse a flags value from text. + +This function will fail on any names that don't correspond to defined flags. +Unknown bits will be ignored. +*/ +pub fn from_str_truncate(input: &str) -> Result +where + B::Bits: ParseHex, +{ + Ok(B::from_bits_truncate(from_str::(input)?.bits())) +} + +/** +Write only the contained, defined, named flags in a flags value as text. +*/ +pub fn to_writer_strict(flags: &B, mut writer: impl Write) -> Result<(), fmt::Error> { + // This is a simplified version of `to_writer` that ignores + // any bits not corresponding to a named flag + + let mut first = true; + let mut iter = flags.iter_names(); + for (name, _) in &mut iter { + if !first { + writer.write_str(" | ")?; + } + + first = false; + writer.write_str(name)?; + } + + fmt::Result::Ok(()) +} + +/** +Parse a flags value from text. + +This function will fail on any names that don't correspond to defined flags. +This function will fail to parse hex values. +*/ +pub fn from_str_strict(input: &str) -> Result { + // This is a simplified version of `from_str` that ignores + // any bits not corresponding to a named flag + + let mut parsed_flags = B::empty(); + + // If the input is empty then return an empty set of flags + if input.trim().is_empty() { + return Ok(parsed_flags); + } + + for flag in input.split('|') { + let flag = flag.trim(); + + // If the flag is empty then we've got missing input + if flag.is_empty() { + return Err(ParseError::empty_flag()); + } + + // If the flag starts with `0x` then it's a hex number + // These aren't supported in the strict parser + if flag.starts_with("0x") { + return Err(ParseError::invalid_hex_flag("unsupported hex flag value")); + } + + let parsed_flag = B::from_name(flag).ok_or_else(|| ParseError::invalid_named_flag(flag))?; + + parsed_flags.insert(parsed_flag); + } + + Ok(parsed_flags) +} + +/** +Encode a value as a hex string. + +Implementors of this trait should not write the `0x` prefix. +*/ +pub trait WriteHex { + /// Write the value as hex. + fn write_hex(&self, writer: W) -> fmt::Result; +} + +/** +Parse a value from a hex string. +*/ +pub trait ParseHex { + /// Parse the value from hex. + fn parse_hex(input: &str) -> Result + where + Self: Sized; +} + +/// An error encountered while parsing flags from text. +#[derive(Debug)] +pub struct ParseError(ParseErrorKind); + +#[derive(Debug)] +#[allow(clippy::enum_variant_names)] +enum ParseErrorKind { + EmptyFlag, + InvalidNamedFlag { + #[cfg(not(feature = "std"))] + got: (), + #[cfg(feature = "std")] + got: String, + }, + InvalidHexFlag { + #[cfg(not(feature = "std"))] + got: (), + #[cfg(feature = "std")] + got: String, + }, +} + +impl ParseError { + /// An invalid hex flag was encountered. + pub fn invalid_hex_flag(flag: impl fmt::Display) -> Self { + let _flag = flag; + + let got = { + #[cfg(feature = "std")] + { + _flag.to_string() + } + }; + + ParseError(ParseErrorKind::InvalidHexFlag { got }) + } + + /// A named flag that doesn't correspond to any on the flags type was encountered. + pub fn invalid_named_flag(flag: impl fmt::Display) -> Self { + let _flag = flag; + + let got = { + #[cfg(feature = "std")] + { + _flag.to_string() + } + }; + + ParseError(ParseErrorKind::InvalidNamedFlag { got }) + } + + /// A hex or named flag wasn't found between separators. + pub const fn empty_flag() -> Self { + ParseError(ParseErrorKind::EmptyFlag) + } +} + +impl fmt::Display for ParseError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + ParseErrorKind::InvalidNamedFlag { got } => { + let _got = got; + + write!(f, "unrecognized named flag")?; + + #[cfg(feature = "std")] + { + write!(f, " `{}`", _got)?; + } + } + ParseErrorKind::InvalidHexFlag { got } => { + let _got = got; + + write!(f, "invalid hex flag")?; + + #[cfg(feature = "std")] + { + write!(f, " `{}`", _got)?; + } + } + ParseErrorKind::EmptyFlag => { + write!(f, "encountered empty flag")?; + } + } + + Ok(()) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for ParseError {} diff --git a/vendor/bitflags/src/public.rs b/vendor/bitflags/src/public.rs new file mode 100644 index 00000000000000..1326a572a53205 --- /dev/null +++ b/vendor/bitflags/src/public.rs @@ -0,0 +1,580 @@ +//! Generate the user-facing flags type. +//! +//! The code here belongs to the end-user, so new trait implementations and methods can't be +//! added without potentially breaking users. + +/// Declare the user-facing bitflags struct. +/// +/// This type is guaranteed to be a newtype with a `bitflags`-facing type as its single field. +#[macro_export] +#[doc(hidden)] +macro_rules! __declare_public_bitflags { + ( + $(#[$outer:meta])* + $vis:vis struct $PublicBitFlags:ident + ) => { + $(#[$outer])* + $vis struct $PublicBitFlags(<$PublicBitFlags as $crate::__private::PublicFlags>::Internal); + }; +} + +/// Implement functions on the public (user-facing) bitflags type. +/// +/// We need to be careful about adding new methods and trait implementations here because they +/// could conflict with items added by the end-user. +#[macro_export] +#[doc(hidden)] +macro_rules! __impl_public_bitflags_forward { + ( + $(#[$outer:meta])* + $PublicBitFlags:ident: $T:ty, $InternalBitFlags:ident + ) => { + $crate::__impl_bitflags! { + params: self, bits, name, other, value; + $(#[$outer])* + $PublicBitFlags: $T { + fn empty() { + Self($InternalBitFlags::empty()) + } + + fn all() { + Self($InternalBitFlags::all()) + } + + fn bits(&self) { + self.0.bits() + } + + fn from_bits(bits) { + match $InternalBitFlags::from_bits(bits) { + $crate::__private::core::option::Option::Some(bits) => $crate::__private::core::option::Option::Some(Self(bits)), + $crate::__private::core::option::Option::None => $crate::__private::core::option::Option::None, + } + } + + fn from_bits_truncate(bits) { + Self($InternalBitFlags::from_bits_truncate(bits)) + } + + fn from_bits_retain(bits) { + Self($InternalBitFlags::from_bits_retain(bits)) + } + + fn from_name(name) { + match $InternalBitFlags::from_name(name) { + $crate::__private::core::option::Option::Some(bits) => $crate::__private::core::option::Option::Some(Self(bits)), + $crate::__private::core::option::Option::None => $crate::__private::core::option::Option::None, + } + } + + fn is_empty(&self) { + self.0.is_empty() + } + + fn is_all(&self) { + self.0.is_all() + } + + fn intersects(&self, other) { + self.0.intersects(other.0) + } + + fn contains(&self, other) { + self.0.contains(other.0) + } + + fn insert(&mut self, other) { + self.0.insert(other.0) + } + + fn remove(&mut self, other) { + self.0.remove(other.0) + } + + fn toggle(&mut self, other) { + self.0.toggle(other.0) + } + + fn set(&mut self, other, value) { + self.0.set(other.0, value) + } + + fn intersection(self, other) { + Self(self.0.intersection(other.0)) + } + + fn union(self, other) { + Self(self.0.union(other.0)) + } + + fn difference(self, other) { + Self(self.0.difference(other.0)) + } + + fn symmetric_difference(self, other) { + Self(self.0.symmetric_difference(other.0)) + } + + fn complement(self) { + Self(self.0.complement()) + } + } + } + }; +} + +/// Implement functions on the public (user-facing) bitflags type. +/// +/// We need to be careful about adding new methods and trait implementations here because they +/// could conflict with items added by the end-user. +#[macro_export] +#[doc(hidden)] +macro_rules! __impl_public_bitflags { + ( + $(#[$outer:meta])* + $BitFlags:ident: $T:ty, $PublicBitFlags:ident { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:tt = $value:expr; + )* + } + ) => { + $crate::__impl_bitflags! { + params: self, bits, name, other, value; + $(#[$outer])* + $BitFlags: $T { + fn empty() { + Self(<$T as $crate::Bits>::EMPTY) + } + + fn all() { + let mut truncated = <$T as $crate::Bits>::EMPTY; + let mut i = 0; + + $( + $crate::__bitflags_expr_safe_attrs!( + $(#[$inner $($args)*])* + {{ + let flag = <$PublicBitFlags as $crate::Flags>::FLAGS[i].value().bits(); + + truncated = truncated | flag; + i += 1; + }} + ); + )* + + let _ = i; + Self(truncated) + } + + fn bits(&self) { + self.0 + } + + fn from_bits(bits) { + let truncated = Self::from_bits_truncate(bits).0; + + if truncated == bits { + $crate::__private::core::option::Option::Some(Self(bits)) + } else { + $crate::__private::core::option::Option::None + } + } + + fn from_bits_truncate(bits) { + Self(bits & Self::all().0) + } + + fn from_bits_retain(bits) { + Self(bits) + } + + fn from_name(name) { + $( + $crate::__bitflags_flag!({ + name: $Flag, + named: { + $crate::__bitflags_expr_safe_attrs!( + $(#[$inner $($args)*])* + { + if name == $crate::__private::core::stringify!($Flag) { + return $crate::__private::core::option::Option::Some(Self($PublicBitFlags::$Flag.bits())); + } + } + ); + }, + unnamed: {}, + }); + )* + + let _ = name; + $crate::__private::core::option::Option::None + } + + fn is_empty(&self) { + self.0 == <$T as $crate::Bits>::EMPTY + } + + fn is_all(&self) { + // NOTE: We check against `Self::all` here, not `Self::Bits::ALL` + // because the set of all flags may not use all bits + Self::all().0 | self.0 == self.0 + } + + fn intersects(&self, other) { + self.0 & other.0 != <$T as $crate::Bits>::EMPTY + } + + fn contains(&self, other) { + self.0 & other.0 == other.0 + } + + fn insert(&mut self, other) { + *self = Self(self.0).union(other); + } + + fn remove(&mut self, other) { + *self = Self(self.0).difference(other); + } + + fn toggle(&mut self, other) { + *self = Self(self.0).symmetric_difference(other); + } + + fn set(&mut self, other, value) { + if value { + self.insert(other); + } else { + self.remove(other); + } + } + + fn intersection(self, other) { + Self(self.0 & other.0) + } + + fn union(self, other) { + Self(self.0 | other.0) + } + + fn difference(self, other) { + Self(self.0 & !other.0) + } + + fn symmetric_difference(self, other) { + Self(self.0 ^ other.0) + } + + fn complement(self) { + Self::from_bits_truncate(!self.0) + } + } + } + }; +} + +/// Implement iterators on the public (user-facing) bitflags type. +#[macro_export] +#[doc(hidden)] +macro_rules! __impl_public_bitflags_iter { + ( + $(#[$outer:meta])* + $BitFlags:ident: $T:ty, $PublicBitFlags:ident + ) => { + $(#[$outer])* + impl $BitFlags { + /// Yield a set of contained flags values. + /// + /// Each yielded flags value will correspond to a defined named flag. Any unknown bits + /// will be yielded together as a final flags value. + #[inline] + pub const fn iter(&self) -> $crate::iter::Iter<$PublicBitFlags> { + $crate::iter::Iter::__private_const_new( + <$PublicBitFlags as $crate::Flags>::FLAGS, + $PublicBitFlags::from_bits_retain(self.bits()), + $PublicBitFlags::from_bits_retain(self.bits()), + ) + } + + /// Yield a set of contained named flags values. + /// + /// This method is like [`iter`](#method.iter), except only yields bits in contained named flags. + /// Any unknown bits, or bits not corresponding to a contained flag will not be yielded. + #[inline] + pub const fn iter_names(&self) -> $crate::iter::IterNames<$PublicBitFlags> { + $crate::iter::IterNames::__private_const_new( + <$PublicBitFlags as $crate::Flags>::FLAGS, + $PublicBitFlags::from_bits_retain(self.bits()), + $PublicBitFlags::from_bits_retain(self.bits()), + ) + } + } + + $(#[$outer:meta])* + impl $crate::__private::core::iter::IntoIterator for $BitFlags { + type Item = $PublicBitFlags; + type IntoIter = $crate::iter::Iter<$PublicBitFlags>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } + } + }; +} + +/// Implement traits on the public (user-facing) bitflags type. +#[macro_export] +#[doc(hidden)] +macro_rules! __impl_public_bitflags_ops { + ( + $(#[$outer:meta])* + $PublicBitFlags:ident + ) => { + + $(#[$outer])* + impl $crate::__private::core::fmt::Binary for $PublicBitFlags { + fn fmt( + &self, + f: &mut $crate::__private::core::fmt::Formatter, + ) -> $crate::__private::core::fmt::Result { + let inner = self.0; + $crate::__private::core::fmt::Binary::fmt(&inner, f) + } + } + + $(#[$outer])* + impl $crate::__private::core::fmt::Octal for $PublicBitFlags { + fn fmt( + &self, + f: &mut $crate::__private::core::fmt::Formatter, + ) -> $crate::__private::core::fmt::Result { + let inner = self.0; + $crate::__private::core::fmt::Octal::fmt(&inner, f) + } + } + + $(#[$outer])* + impl $crate::__private::core::fmt::LowerHex for $PublicBitFlags { + fn fmt( + &self, + f: &mut $crate::__private::core::fmt::Formatter, + ) -> $crate::__private::core::fmt::Result { + let inner = self.0; + $crate::__private::core::fmt::LowerHex::fmt(&inner, f) + } + } + + $(#[$outer])* + impl $crate::__private::core::fmt::UpperHex for $PublicBitFlags { + fn fmt( + &self, + f: &mut $crate::__private::core::fmt::Formatter, + ) -> $crate::__private::core::fmt::Result { + let inner = self.0; + $crate::__private::core::fmt::UpperHex::fmt(&inner, f) + } + } + + $(#[$outer])* + impl $crate::__private::core::ops::BitOr for $PublicBitFlags { + type Output = Self; + + /// The bitwise or (`|`) of the bits in two flags values. + #[inline] + fn bitor(self, other: $PublicBitFlags) -> Self { + self.union(other) + } + } + + $(#[$outer])* + impl $crate::__private::core::ops::BitOrAssign for $PublicBitFlags { + /// The bitwise or (`|`) of the bits in two flags values. + #[inline] + fn bitor_assign(&mut self, other: Self) { + self.insert(other); + } + } + + $(#[$outer])* + impl $crate::__private::core::ops::BitXor for $PublicBitFlags { + type Output = Self; + + /// The bitwise exclusive-or (`^`) of the bits in two flags values. + #[inline] + fn bitxor(self, other: Self) -> Self { + self.symmetric_difference(other) + } + } + + $(#[$outer])* + impl $crate::__private::core::ops::BitXorAssign for $PublicBitFlags { + /// The bitwise exclusive-or (`^`) of the bits in two flags values. + #[inline] + fn bitxor_assign(&mut self, other: Self) { + self.toggle(other); + } + } + + $(#[$outer])* + impl $crate::__private::core::ops::BitAnd for $PublicBitFlags { + type Output = Self; + + /// The bitwise and (`&`) of the bits in two flags values. + #[inline] + fn bitand(self, other: Self) -> Self { + self.intersection(other) + } + } + + $(#[$outer])* + impl $crate::__private::core::ops::BitAndAssign for $PublicBitFlags { + /// The bitwise and (`&`) of the bits in two flags values. + #[inline] + fn bitand_assign(&mut self, other: Self) { + *self = Self::from_bits_retain(self.bits()).intersection(other); + } + } + + $(#[$outer])* + impl $crate::__private::core::ops::Sub for $PublicBitFlags { + type Output = Self; + + /// The intersection of a source flags value with the complement of a target flags value (`&!`). + /// + /// This method is not equivalent to `self & !other` when `other` has unknown bits set. + /// `difference` won't truncate `other`, but the `!` operator will. + #[inline] + fn sub(self, other: Self) -> Self { + self.difference(other) + } + } + + $(#[$outer])* + impl $crate::__private::core::ops::SubAssign for $PublicBitFlags { + /// The intersection of a source flags value with the complement of a target flags value (`&!`). + /// + /// This method is not equivalent to `self & !other` when `other` has unknown bits set. + /// `difference` won't truncate `other`, but the `!` operator will. + #[inline] + fn sub_assign(&mut self, other: Self) { + self.remove(other); + } + } + + $(#[$outer])* + impl $crate::__private::core::ops::Not for $PublicBitFlags { + type Output = Self; + + /// The bitwise negation (`!`) of the bits in a flags value, truncating the result. + #[inline] + fn not(self) -> Self { + self.complement() + } + } + + $(#[$outer])* + impl $crate::__private::core::iter::Extend<$PublicBitFlags> for $PublicBitFlags { + /// The bitwise or (`|`) of the bits in each flags value. + fn extend>( + &mut self, + iterator: T, + ) { + for item in iterator { + self.insert(item) + } + } + } + + $(#[$outer])* + impl $crate::__private::core::iter::FromIterator<$PublicBitFlags> for $PublicBitFlags { + /// The bitwise or (`|`) of the bits in each flags value. + fn from_iter>( + iterator: T, + ) -> Self { + use $crate::__private::core::iter::Extend; + + let mut result = Self::empty(); + result.extend(iterator); + result + } + } + }; +} + +/// Implement constants on the public (user-facing) bitflags type. +#[macro_export] +#[doc(hidden)] +macro_rules! __impl_public_bitflags_consts { + ( + $(#[$outer:meta])* + $PublicBitFlags:ident: $T:ty { + $( + $(#[$inner:ident $($args:tt)*])* + const $Flag:tt = $value:expr; + )* + } + ) => { + $(#[$outer])* + impl $PublicBitFlags { + $( + $crate::__bitflags_flag!({ + name: $Flag, + named: { + $(#[$inner $($args)*])* + #[allow( + deprecated, + non_upper_case_globals, + )] + pub const $Flag: Self = Self::from_bits_retain($value); + }, + unnamed: {}, + }); + )* + } + + $(#[$outer])* + impl $crate::Flags for $PublicBitFlags { + const FLAGS: &'static [$crate::Flag<$PublicBitFlags>] = &[ + $( + $crate::__bitflags_flag!({ + name: $Flag, + named: { + $crate::__bitflags_expr_safe_attrs!( + $(#[$inner $($args)*])* + { + #[allow( + deprecated, + non_upper_case_globals, + )] + $crate::Flag::new($crate::__private::core::stringify!($Flag), $PublicBitFlags::$Flag) + } + ) + }, + unnamed: { + $crate::__bitflags_expr_safe_attrs!( + $(#[$inner $($args)*])* + { + #[allow( + deprecated, + non_upper_case_globals, + )] + $crate::Flag::new("", $PublicBitFlags::from_bits_retain($value)) + } + ) + }, + }), + )* + ]; + + type Bits = $T; + + fn bits(&self) -> $T { + $PublicBitFlags::bits(self) + } + + fn from_bits_retain(bits: $T) -> $PublicBitFlags { + $PublicBitFlags::from_bits_retain(bits) + } + } + }; +} diff --git a/vendor/bitflags/src/tests.rs b/vendor/bitflags/src/tests.rs new file mode 100644 index 00000000000000..0770e1b3f93487 --- /dev/null +++ b/vendor/bitflags/src/tests.rs @@ -0,0 +1,135 @@ +mod all; +mod bitflags_match; +mod bits; +mod clear; +mod complement; +mod contains; +mod difference; +mod empty; +mod eq; +mod extend; +mod flags; +mod fmt; +mod from_bits; +mod from_bits_retain; +mod from_bits_truncate; +mod from_name; +mod insert; +mod intersection; +mod intersects; +mod is_all; +mod is_empty; +mod iter; +mod parser; +mod remove; +mod symmetric_difference; +mod truncate; +mod union; +mod unknown; + +bitflags! { + #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] + pub struct TestFlags: u8 { + /// 1 + const A = 1; + + /// 1 << 1 + const B = 1 << 1; + + /// 1 << 2 + const C = 1 << 2; + + /// 1 | (1 << 1) | (1 << 2) + const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); + } + + #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] + pub struct TestFlagsInvert: u8 { + /// 1 | (1 << 1) | (1 << 2) + const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); + + /// 1 + const A = 1; + + /// 1 << 1 + const B = 1 << 1; + + /// 1 << 2 + const C = 1 << 2; + } + + #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] + pub struct TestZero: u8 { + /// 0 + const ZERO = 0; + } + + #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] + pub struct TestZeroOne: u8 { + /// 0 + const ZERO = 0; + + /// 1 + const ONE = 1; + } + + #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] + pub struct TestUnicode: u8 { + /// 1 + const 一 = 1; + + /// 2 + const 二 = 1 << 1; + } + + #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] + pub struct TestEmpty: u8 {} + + #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] + pub struct TestOverlapping: u8 { + /// 1 | (1 << 1) + const AB = 1 | (1 << 1); + + /// (1 << 1) | (1 << 2) + const BC = (1 << 1) | (1 << 2); + } + + #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] + pub struct TestOverlappingFull: u8 { + /// 1 + const A = 1; + + /// 1 + const B = 1; + + /// 1 + const C = 1; + + /// 2 + const D = 1 << 1; + } + + #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] + pub struct TestExternal: u8 { + /// 1 + const A = 1; + + /// 1 << 1 + const B = 1 << 1; + + /// 1 << 2 + const C = 1 << 2; + + /// 1 | (1 << 1) | (1 << 2) + const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); + + /// External + const _ = !0; + } + + #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] + pub struct TestExternalFull: u8 { + /// External + const _ = !0; + } +} diff --git a/vendor/bitflags/src/tests/all.rs b/vendor/bitflags/src/tests/all.rs new file mode 100644 index 00000000000000..cceb93a4691b07 --- /dev/null +++ b/vendor/bitflags/src/tests/all.rs @@ -0,0 +1,23 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case(1 | 1 << 1 | 1 << 2, TestFlags::all); + + case(0, TestZero::all); + + case(0, TestEmpty::all); + + case(!0, TestExternal::all); +} + +#[track_caller] +fn case(expected: T::Bits, inherent: impl FnOnce() -> T) +where + ::Bits: std::fmt::Debug + PartialEq, +{ + assert_eq!(expected, inherent().bits(), "T::all()"); + assert_eq!(expected, T::all().bits(), "Flags::all()"); +} diff --git a/vendor/bitflags/src/tests/bitflags_match.rs b/vendor/bitflags/src/tests/bitflags_match.rs new file mode 100644 index 00000000000000..93190f8bb4ef64 --- /dev/null +++ b/vendor/bitflags/src/tests/bitflags_match.rs @@ -0,0 +1,93 @@ +bitflags! { + #[derive(PartialEq)] + struct Flags: u8 { + const A = 1 << 0; + const B = 1 << 1; + const C = 1 << 2; + const D = 1 << 3; + } +} + +fn flag_to_string(flag: Flags) -> String { + bitflags_match!(flag, { + Flags::A => "A".to_string(), + Flags::B => { "B".to_string() } + Flags::C => "C".to_string(), + Flags::D => "D".to_string(), + Flags::A | Flags::B => "A or B".to_string(), + Flags::A & Flags::B => { "A and B | empty".to_string() }, + Flags::A ^ Flags::B => "A xor B".to_string(), + Flags::A | Flags::B | Flags::C => "A or B or C".to_string(), + Flags::A & Flags::B & Flags::C => "A and B and C".to_string(), + Flags::A ^ Flags::B ^ Flags::C => "A xor B xor C".to_string(), + Flags::A | Flags::B | Flags::C | Flags::D => "All flags".to_string(), + _ => "Unknown combination".to_string() + }) +} + +#[test] +fn test_single_flags() { + assert_eq!(flag_to_string(Flags::A), "A"); + assert_eq!(flag_to_string(Flags::B), "B"); + assert_eq!(flag_to_string(Flags::C), "C"); + assert_eq!(flag_to_string(Flags::D), "D"); +} + +#[test] +fn test_or_operations() { + assert_eq!(flag_to_string(Flags::A | Flags::B), "A or B"); + assert_eq!( + flag_to_string(Flags::A | Flags::B | Flags::C), + "A or B or C" + ); + assert_eq!( + flag_to_string(Flags::A | Flags::B | Flags::C | Flags::D), + "All flags" + ); +} + +#[test] +fn test_and_operations() { + assert_eq!(flag_to_string(Flags::A & Flags::A), "A"); + assert_eq!(flag_to_string(Flags::A & Flags::B), "A and B | empty"); + assert_eq!( + flag_to_string(Flags::A & Flags::B & Flags::C), + "A and B | empty" + ); // Since A, B, and C are mutually exclusive, the result of A & B & C is 0 ==> A & B & C = 0000 (i.e., empty). + // However, in the bitflags_match! statement (actually is if {..} else if {..} .. else {..}), + // the "A & B = 0000" condition is listed first, so 0000 will match "A & B" first, + // resulting in the output of the "A and B | empty" branch. + assert_eq!( + flag_to_string(Flags::A & Flags::B & Flags::C & Flags::D), + "A and B | empty" + ); +} + +#[test] +fn test_xor_operations() { + assert_eq!(flag_to_string(Flags::A ^ Flags::B), "A or B"); // A | B = A ^ B == 0011 + assert_eq!(flag_to_string(Flags::A ^ Flags::A), "A and B | empty"); + assert_eq!( + flag_to_string(Flags::A ^ Flags::B ^ Flags::C), + "A or B or C" + ); +} + +#[test] +fn test_complex_operations() { + assert_eq!(flag_to_string(Flags::A | (Flags::B & Flags::C)), "A"); + assert_eq!( + flag_to_string((Flags::A | Flags::B) & (Flags::B | Flags::C)), + "B" + ); + assert_eq!( + flag_to_string(Flags::A ^ (Flags::B | Flags::C)), + "A or B or C" + ); +} + +#[test] +fn test_empty_and_full_flags() { + assert_eq!(flag_to_string(Flags::empty()), "A and B | empty"); + assert_eq!(flag_to_string(Flags::all()), "All flags"); +} diff --git a/vendor/bitflags/src/tests/bits.rs b/vendor/bitflags/src/tests/bits.rs new file mode 100644 index 00000000000000..678f153e36b15a --- /dev/null +++ b/vendor/bitflags/src/tests/bits.rs @@ -0,0 +1,36 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case(0, TestFlags::empty(), TestFlags::bits); + + case(1, TestFlags::A, TestFlags::bits); + case(1 | 1 << 1 | 1 << 2, TestFlags::ABC, TestFlags::bits); + + case(!0, TestFlags::from_bits_retain(u8::MAX), TestFlags::bits); + case(1 << 3, TestFlags::from_bits_retain(1 << 3), TestFlags::bits); + + case(1 << 3, TestZero::from_bits_retain(1 << 3), TestZero::bits); + + case(1 << 3, TestEmpty::from_bits_retain(1 << 3), TestEmpty::bits); + + case( + 1 << 4 | 1 << 6, + TestExternal::from_bits_retain(1 << 4 | 1 << 6), + TestExternal::bits, + ); +} + +#[track_caller] +fn case( + expected: T::Bits, + value: T, + inherent: impl FnOnce(&T) -> T::Bits, +) where + T::Bits: std::fmt::Debug + PartialEq, +{ + assert_eq!(expected, inherent(&value), "{:?}.bits()", value); + assert_eq!(expected, Flags::bits(&value), "Flags::bits({:?})", value); +} diff --git a/vendor/bitflags/src/tests/clear.rs b/vendor/bitflags/src/tests/clear.rs new file mode 100644 index 00000000000000..2d42cce4e3f98b --- /dev/null +++ b/vendor/bitflags/src/tests/clear.rs @@ -0,0 +1,27 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case(TestFlags::from_bits_retain(0)); + + case(TestFlags::from_bits_retain(1 << 3)); + + case(TestFlags::ABC | TestFlags::from_bits_retain(1 << 3)); + + case(TestZero::empty()); + + case(TestZero::all()); + + case(TestFlags::from_bits_retain(1 << 3) | TestFlags::all()); +} + +#[track_caller] +fn case(mut flags: T) +where + T: std::fmt::Debug + PartialEq + Copy, +{ + flags.clear(); + assert_eq!(flags, T::empty(), "{:?}.clear()", flags); +} diff --git a/vendor/bitflags/src/tests/complement.rs b/vendor/bitflags/src/tests/complement.rs new file mode 100644 index 00000000000000..ac7a421af0beb6 --- /dev/null +++ b/vendor/bitflags/src/tests/complement.rs @@ -0,0 +1,53 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case(0, TestFlags::all(), TestFlags::complement); + case(0, TestFlags::from_bits_retain(!0), TestFlags::complement); + + case(1 | 1 << 1, TestFlags::C, TestFlags::complement); + case( + 1 | 1 << 1, + TestFlags::C | TestFlags::from_bits_retain(1 << 3), + TestFlags::complement, + ); + + case( + 1 | 1 << 1 | 1 << 2, + TestFlags::empty(), + TestFlags::complement, + ); + case( + 1 | 1 << 1 | 1 << 2, + TestFlags::from_bits_retain(1 << 3), + TestFlags::complement, + ); + + case(0, TestZero::empty(), TestZero::complement); + + case(0, TestEmpty::empty(), TestEmpty::complement); + + case(1 << 2, TestOverlapping::AB, TestOverlapping::complement); + + case(!0, TestExternal::empty(), TestExternal::complement); +} + +#[track_caller] +fn case + Copy>( + expected: T::Bits, + value: T, + inherent: impl FnOnce(T) -> T, +) where + T::Bits: std::fmt::Debug + PartialEq, +{ + assert_eq!(expected, inherent(value).bits(), "{:?}.complement()", value); + assert_eq!( + expected, + Flags::complement(value).bits(), + "Flags::complement({:?})", + value + ); + assert_eq!(expected, (!value).bits(), "!{:?}", value); +} diff --git a/vendor/bitflags/src/tests/contains.rs b/vendor/bitflags/src/tests/contains.rs new file mode 100644 index 00000000000000..12428ddcb09c76 --- /dev/null +++ b/vendor/bitflags/src/tests/contains.rs @@ -0,0 +1,108 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case( + TestFlags::empty(), + &[ + (TestFlags::empty(), true), + (TestFlags::A, false), + (TestFlags::B, false), + (TestFlags::C, false), + (TestFlags::from_bits_retain(1 << 3), false), + ], + TestFlags::contains, + ); + + case( + TestFlags::A, + &[ + (TestFlags::empty(), true), + (TestFlags::A, true), + (TestFlags::B, false), + (TestFlags::C, false), + (TestFlags::ABC, false), + (TestFlags::from_bits_retain(1 << 3), false), + (TestFlags::from_bits_retain(1 | (1 << 3)), false), + ], + TestFlags::contains, + ); + + case( + TestFlags::ABC, + &[ + (TestFlags::empty(), true), + (TestFlags::A, true), + (TestFlags::B, true), + (TestFlags::C, true), + (TestFlags::ABC, true), + (TestFlags::from_bits_retain(1 << 3), false), + ], + TestFlags::contains, + ); + + case( + TestFlags::from_bits_retain(1 << 3), + &[ + (TestFlags::empty(), true), + (TestFlags::A, false), + (TestFlags::B, false), + (TestFlags::C, false), + (TestFlags::from_bits_retain(1 << 3), true), + ], + TestFlags::contains, + ); + + case( + TestZero::ZERO, + &[(TestZero::ZERO, true)], + TestZero::contains, + ); + + case( + TestOverlapping::AB, + &[ + (TestOverlapping::AB, true), + (TestOverlapping::BC, false), + (TestOverlapping::from_bits_retain(1 << 1), true), + ], + TestOverlapping::contains, + ); + + case( + TestExternal::all(), + &[ + (TestExternal::A, true), + (TestExternal::B, true), + (TestExternal::C, true), + (TestExternal::from_bits_retain(1 << 5 | 1 << 7), true), + ], + TestExternal::contains, + ); +} + +#[track_caller] +fn case( + value: T, + inputs: &[(T, bool)], + mut inherent: impl FnMut(&T, T) -> bool, +) { + for (input, expected) in inputs { + assert_eq!( + *expected, + inherent(&value, *input), + "{:?}.contains({:?})", + value, + input + ); + assert_eq!( + *expected, + Flags::contains(&value, *input), + "Flags::contains({:?}, {:?})", + value, + input + ); + } +} diff --git a/vendor/bitflags/src/tests/difference.rs b/vendor/bitflags/src/tests/difference.rs new file mode 100644 index 00000000000000..6ce9c0bf1981a3 --- /dev/null +++ b/vendor/bitflags/src/tests/difference.rs @@ -0,0 +1,92 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case( + TestFlags::A | TestFlags::B, + &[ + (TestFlags::A, 1 << 1), + (TestFlags::B, 1), + (TestFlags::from_bits_retain(1 << 3), 1 | 1 << 1), + ], + TestFlags::difference, + ); + + case( + TestFlags::from_bits_retain(1 | 1 << 3), + &[ + (TestFlags::A, 1 << 3), + (TestFlags::from_bits_retain(1 << 3), 1), + ], + TestFlags::difference, + ); + + case( + TestExternal::from_bits_retain(!0), + &[(TestExternal::A, 0b1111_1110)], + TestExternal::difference, + ); + + assert_eq!( + 0b1111_1110, + (TestExternal::from_bits_retain(!0) & !TestExternal::A).bits() + ); + + assert_eq!( + 0b1111_1110, + (TestFlags::from_bits_retain(!0).difference(TestFlags::A)).bits() + ); + + // The `!` operator unsets bits that don't correspond to known flags + assert_eq!( + 1 << 1 | 1 << 2, + (TestFlags::from_bits_retain(!0) & !TestFlags::A).bits() + ); +} + +#[track_caller] +fn case + std::ops::SubAssign + Copy>( + value: T, + inputs: &[(T, T::Bits)], + mut inherent: impl FnMut(T, T) -> T, +) where + T::Bits: std::fmt::Debug + PartialEq + Copy, +{ + for (input, expected) in inputs { + assert_eq!( + *expected, + inherent(value, *input).bits(), + "{:?}.difference({:?})", + value, + input + ); + assert_eq!( + *expected, + Flags::difference(value, *input).bits(), + "Flags::difference({:?}, {:?})", + value, + input + ); + assert_eq!( + *expected, + (value - *input).bits(), + "{:?} - {:?}", + value, + input + ); + assert_eq!( + *expected, + { + let mut value = value; + value -= *input; + value + } + .bits(), + "{:?} -= {:?}", + value, + input, + ); + } +} diff --git a/vendor/bitflags/src/tests/empty.rs b/vendor/bitflags/src/tests/empty.rs new file mode 100644 index 00000000000000..57fb1c7cf18789 --- /dev/null +++ b/vendor/bitflags/src/tests/empty.rs @@ -0,0 +1,23 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case(0, TestFlags::empty); + + case(0, TestZero::empty); + + case(0, TestEmpty::empty); + + case(0, TestExternal::empty); +} + +#[track_caller] +fn case(expected: T::Bits, inherent: impl FnOnce() -> T) +where + ::Bits: std::fmt::Debug + PartialEq, +{ + assert_eq!(expected, inherent().bits(), "T::empty()"); + assert_eq!(expected, T::empty().bits(), "Flags::empty()"); +} diff --git a/vendor/bitflags/src/tests/eq.rs b/vendor/bitflags/src/tests/eq.rs new file mode 100644 index 00000000000000..9779af7629a988 --- /dev/null +++ b/vendor/bitflags/src/tests/eq.rs @@ -0,0 +1,10 @@ +use super::*; + +#[test] +fn cases() { + assert_eq!(TestFlags::empty(), TestFlags::empty()); + assert_eq!(TestFlags::all(), TestFlags::all()); + + assert!(TestFlags::from_bits_retain(1) < TestFlags::from_bits_retain(2)); + assert!(TestFlags::from_bits_retain(2) > TestFlags::from_bits_retain(1)); +} diff --git a/vendor/bitflags/src/tests/extend.rs b/vendor/bitflags/src/tests/extend.rs new file mode 100644 index 00000000000000..869dc17fc81b61 --- /dev/null +++ b/vendor/bitflags/src/tests/extend.rs @@ -0,0 +1,42 @@ +use super::*; + +#[test] +fn cases() { + let mut flags = TestFlags::empty(); + + flags.extend(TestFlags::A); + + assert_eq!(TestFlags::A, flags); + + flags.extend(TestFlags::A | TestFlags::B | TestFlags::C); + + assert_eq!(TestFlags::ABC, flags); + + flags.extend(TestFlags::from_bits_retain(1 << 5)); + + assert_eq!(TestFlags::ABC | TestFlags::from_bits_retain(1 << 5), flags); +} + +mod external { + use super::*; + + #[test] + fn cases() { + let mut flags = TestExternal::empty(); + + flags.extend(TestExternal::A); + + assert_eq!(TestExternal::A, flags); + + flags.extend(TestExternal::A | TestExternal::B | TestExternal::C); + + assert_eq!(TestExternal::ABC, flags); + + flags.extend(TestExternal::from_bits_retain(1 << 5)); + + assert_eq!( + TestExternal::ABC | TestExternal::from_bits_retain(1 << 5), + flags + ); + } +} diff --git a/vendor/bitflags/src/tests/flags.rs b/vendor/bitflags/src/tests/flags.rs new file mode 100644 index 00000000000000..7a625b312c1ef0 --- /dev/null +++ b/vendor/bitflags/src/tests/flags.rs @@ -0,0 +1,46 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + let flags = TestFlags::FLAGS + .iter() + .map(|flag| (flag.name(), flag.value().bits())) + .collect::>(); + + assert_eq!( + vec![ + ("A", 1u8), + ("B", 1 << 1), + ("C", 1 << 2), + ("ABC", 1 | 1 << 1 | 1 << 2), + ], + flags, + ); + + assert_eq!(0, TestEmpty::FLAGS.iter().count()); +} + +mod external { + use super::*; + + #[test] + fn cases() { + let flags = TestExternal::FLAGS + .iter() + .map(|flag| (flag.name(), flag.value().bits())) + .collect::>(); + + assert_eq!( + vec![ + ("A", 1u8), + ("B", 1 << 1), + ("C", 1 << 2), + ("ABC", 1 | 1 << 1 | 1 << 2), + ("", !0), + ], + flags, + ); + } +} diff --git a/vendor/bitflags/src/tests/fmt.rs b/vendor/bitflags/src/tests/fmt.rs new file mode 100644 index 00000000000000..ed4571877dc44f --- /dev/null +++ b/vendor/bitflags/src/tests/fmt.rs @@ -0,0 +1,97 @@ +use super::*; + +#[test] +fn cases() { + case(TestFlags::empty(), "TestFlags(0x0)", "0", "0", "0", "0"); + case(TestFlags::A, "TestFlags(A)", "1", "1", "1", "1"); + case( + TestFlags::all(), + "TestFlags(A | B | C)", + "7", + "7", + "7", + "111", + ); + case( + TestFlags::from_bits_retain(1 << 3), + "TestFlags(0x8)", + "8", + "8", + "10", + "1000", + ); + case( + TestFlags::A | TestFlags::from_bits_retain(1 << 3), + "TestFlags(A | 0x8)", + "9", + "9", + "11", + "1001", + ); + + case(TestZero::ZERO, "TestZero(0x0)", "0", "0", "0", "0"); + case( + TestZero::ZERO | TestZero::from_bits_retain(1), + "TestZero(0x1)", + "1", + "1", + "1", + "1", + ); + + case(TestZeroOne::ONE, "TestZeroOne(ONE)", "1", "1", "1", "1"); + + case( + TestOverlapping::from_bits_retain(1 << 1), + "TestOverlapping(0x2)", + "2", + "2", + "2", + "10", + ); + + case( + TestExternal::from_bits_retain(1 | 1 << 1 | 1 << 3), + "TestExternal(A | B | 0x8)", + "B", + "b", + "13", + "1011", + ); + + case( + TestExternal::all(), + "TestExternal(A | B | C | 0xf8)", + "FF", + "ff", + "377", + "11111111", + ); + + case( + TestExternalFull::all(), + "TestExternalFull(0xff)", + "FF", + "ff", + "377", + "11111111", + ); +} + +#[track_caller] +fn case< + T: std::fmt::Debug + std::fmt::UpperHex + std::fmt::LowerHex + std::fmt::Octal + std::fmt::Binary, +>( + value: T, + debug: &str, + uhex: &str, + lhex: &str, + oct: &str, + bin: &str, +) { + assert_eq!(debug, format!("{:?}", value)); + assert_eq!(uhex, format!("{:X}", value)); + assert_eq!(lhex, format!("{:x}", value)); + assert_eq!(oct, format!("{:o}", value)); + assert_eq!(bin, format!("{:b}", value)); +} diff --git a/vendor/bitflags/src/tests/from_bits.rs b/vendor/bitflags/src/tests/from_bits.rs new file mode 100644 index 00000000000000..dada9aff82326c --- /dev/null +++ b/vendor/bitflags/src/tests/from_bits.rs @@ -0,0 +1,45 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case(Some(0), 0, TestFlags::from_bits); + case(Some(1), 1, TestFlags::from_bits); + case( + Some(1 | 1 << 1 | 1 << 2), + 1 | 1 << 1 | 1 << 2, + TestFlags::from_bits, + ); + + case(None, 1 << 3, TestFlags::from_bits); + case(None, 1 | 1 << 3, TestFlags::from_bits); + + case(Some(1 | 1 << 1), 1 | 1 << 1, TestOverlapping::from_bits); + + case(Some(1 << 1), 1 << 1, TestOverlapping::from_bits); + + case(Some(1 << 5), 1 << 5, TestExternal::from_bits); +} + +#[track_caller] +fn case( + expected: Option, + input: T::Bits, + inherent: impl FnOnce(T::Bits) -> Option, +) where + ::Bits: std::fmt::Debug + PartialEq, +{ + assert_eq!( + expected, + inherent(input).map(|f| f.bits()), + "T::from_bits({:?})", + input + ); + assert_eq!( + expected, + T::from_bits(input).map(|f| f.bits()), + "Flags::from_bits({:?})", + input + ); +} diff --git a/vendor/bitflags/src/tests/from_bits_retain.rs b/vendor/bitflags/src/tests/from_bits_retain.rs new file mode 100644 index 00000000000000..1ae28a663fd62d --- /dev/null +++ b/vendor/bitflags/src/tests/from_bits_retain.rs @@ -0,0 +1,38 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case(0, TestFlags::from_bits_retain); + case(1, TestFlags::from_bits_retain); + case(1 | 1 << 1 | 1 << 2, TestFlags::from_bits_retain); + + case(1 << 3, TestFlags::from_bits_retain); + case(1 | 1 << 3, TestFlags::from_bits_retain); + + case(1 | 1 << 1, TestOverlapping::from_bits_retain); + + case(1 << 1, TestOverlapping::from_bits_retain); + + case(1 << 5, TestExternal::from_bits_retain); +} + +#[track_caller] +fn case(input: T::Bits, inherent: impl FnOnce(T::Bits) -> T) +where + ::Bits: std::fmt::Debug + PartialEq, +{ + assert_eq!( + input, + inherent(input).bits(), + "T::from_bits_retain({:?})", + input + ); + assert_eq!( + input, + T::from_bits_retain(input).bits(), + "Flags::from_bits_retain({:?})", + input + ); +} diff --git a/vendor/bitflags/src/tests/from_bits_truncate.rs b/vendor/bitflags/src/tests/from_bits_truncate.rs new file mode 100644 index 00000000000000..e4f3e537c4a3f1 --- /dev/null +++ b/vendor/bitflags/src/tests/from_bits_truncate.rs @@ -0,0 +1,42 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case(0, 0, TestFlags::from_bits_truncate); + case(1, 1, TestFlags::from_bits_truncate); + case( + 1 | 1 << 1 | 1 << 2, + 1 | 1 << 1 | 1 << 2, + TestFlags::from_bits_truncate, + ); + + case(0, 1 << 3, TestFlags::from_bits_truncate); + case(1, 1 | 1 << 3, TestFlags::from_bits_truncate); + + case(1 | 1 << 1, 1 | 1 << 1, TestOverlapping::from_bits_truncate); + + case(1 << 1, 1 << 1, TestOverlapping::from_bits_truncate); + + case(1 << 5, 1 << 5, TestExternal::from_bits_truncate); +} + +#[track_caller] +fn case(expected: T::Bits, input: T::Bits, inherent: impl FnOnce(T::Bits) -> T) +where + ::Bits: std::fmt::Debug + PartialEq, +{ + assert_eq!( + expected, + inherent(input).bits(), + "T::from_bits_truncate({:?})", + input + ); + assert_eq!( + expected, + T::from_bits_truncate(input).bits(), + "Flags::from_bits_truncate({:?})", + input + ); +} diff --git a/vendor/bitflags/src/tests/from_name.rs b/vendor/bitflags/src/tests/from_name.rs new file mode 100644 index 00000000000000..1d9a4e48b650b3 --- /dev/null +++ b/vendor/bitflags/src/tests/from_name.rs @@ -0,0 +1,42 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case(Some(1), "A", TestFlags::from_name); + case(Some(1 << 1), "B", TestFlags::from_name); + case(Some(1 | 1 << 1 | 1 << 2), "ABC", TestFlags::from_name); + + case(None, "", TestFlags::from_name); + case(None, "a", TestFlags::from_name); + case(None, "0x1", TestFlags::from_name); + case(None, "A | B", TestFlags::from_name); + + case(Some(0), "ZERO", TestZero::from_name); + + case(Some(2), "二", TestUnicode::from_name); + + case(None, "_", TestExternal::from_name); + + case(None, "", TestExternal::from_name); +} + +#[track_caller] +fn case(expected: Option, input: &str, inherent: impl FnOnce(&str) -> Option) +where + ::Bits: std::fmt::Debug + PartialEq, +{ + assert_eq!( + expected, + inherent(input).map(|f| f.bits()), + "T::from_name({:?})", + input + ); + assert_eq!( + expected, + T::from_name(input).map(|f| f.bits()), + "Flags::from_name({:?})", + input + ); +} diff --git a/vendor/bitflags/src/tests/insert.rs b/vendor/bitflags/src/tests/insert.rs new file mode 100644 index 00000000000000..b18cd17235288c --- /dev/null +++ b/vendor/bitflags/src/tests/insert.rs @@ -0,0 +1,91 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case( + TestFlags::empty(), + &[ + (TestFlags::A, 1), + (TestFlags::A | TestFlags::B, 1 | 1 << 1), + (TestFlags::empty(), 0), + (TestFlags::from_bits_retain(1 << 3), 1 << 3), + ], + TestFlags::insert, + TestFlags::set, + ); + + case( + TestFlags::A, + &[ + (TestFlags::A, 1), + (TestFlags::empty(), 1), + (TestFlags::B, 1 | 1 << 1), + ], + TestFlags::insert, + TestFlags::set, + ); +} + +#[track_caller] +fn case( + value: T, + inputs: &[(T, T::Bits)], + mut inherent_insert: impl FnMut(&mut T, T), + mut inherent_set: impl FnMut(&mut T, T, bool), +) where + T::Bits: std::fmt::Debug + PartialEq + Copy, +{ + for (input, expected) in inputs { + assert_eq!( + *expected, + { + let mut value = value; + inherent_insert(&mut value, *input); + value + } + .bits(), + "{:?}.insert({:?})", + value, + input + ); + assert_eq!( + *expected, + { + let mut value = value; + Flags::insert(&mut value, *input); + value + } + .bits(), + "Flags::insert({:?}, {:?})", + value, + input + ); + + assert_eq!( + *expected, + { + let mut value = value; + inherent_set(&mut value, *input, true); + value + } + .bits(), + "{:?}.set({:?}, true)", + value, + input + ); + assert_eq!( + *expected, + { + let mut value = value; + Flags::set(&mut value, *input, true); + value + } + .bits(), + "Flags::set({:?}, {:?}, true)", + value, + input + ); + } +} diff --git a/vendor/bitflags/src/tests/intersection.rs b/vendor/bitflags/src/tests/intersection.rs new file mode 100644 index 00000000000000..10a8ae9fb6b6a4 --- /dev/null +++ b/vendor/bitflags/src/tests/intersection.rs @@ -0,0 +1,79 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case( + TestFlags::empty(), + &[(TestFlags::empty(), 0), (TestFlags::all(), 0)], + TestFlags::intersection, + ); + + case( + TestFlags::all(), + &[ + (TestFlags::all(), 1 | 1 << 1 | 1 << 2), + (TestFlags::A, 1), + (TestFlags::from_bits_retain(1 << 3), 0), + ], + TestFlags::intersection, + ); + + case( + TestFlags::from_bits_retain(1 << 3), + &[(TestFlags::from_bits_retain(1 << 3), 1 << 3)], + TestFlags::intersection, + ); + + case( + TestOverlapping::AB, + &[(TestOverlapping::BC, 1 << 1)], + TestOverlapping::intersection, + ); +} + +#[track_caller] +fn case + std::ops::BitAndAssign + Copy>( + value: T, + inputs: &[(T, T::Bits)], + mut inherent: impl FnMut(T, T) -> T, +) where + T::Bits: std::fmt::Debug + PartialEq + Copy, +{ + for (input, expected) in inputs { + assert_eq!( + *expected, + inherent(value, *input).bits(), + "{:?}.intersection({:?})", + value, + input + ); + assert_eq!( + *expected, + Flags::intersection(value, *input).bits(), + "Flags::intersection({:?}, {:?})", + value, + input + ); + assert_eq!( + *expected, + (value & *input).bits(), + "{:?} & {:?}", + value, + input + ); + assert_eq!( + *expected, + { + let mut value = value; + value &= *input; + value + } + .bits(), + "{:?} &= {:?}", + value, + input, + ); + } +} diff --git a/vendor/bitflags/src/tests/intersects.rs b/vendor/bitflags/src/tests/intersects.rs new file mode 100644 index 00000000000000..fe907981a2ad66 --- /dev/null +++ b/vendor/bitflags/src/tests/intersects.rs @@ -0,0 +1,91 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case( + TestFlags::empty(), + &[ + (TestFlags::empty(), false), + (TestFlags::A, false), + (TestFlags::B, false), + (TestFlags::C, false), + (TestFlags::from_bits_retain(1 << 3), false), + ], + TestFlags::intersects, + ); + + case( + TestFlags::A, + &[ + (TestFlags::empty(), false), + (TestFlags::A, true), + (TestFlags::B, false), + (TestFlags::C, false), + (TestFlags::ABC, true), + (TestFlags::from_bits_retain(1 << 3), false), + (TestFlags::from_bits_retain(1 | (1 << 3)), true), + ], + TestFlags::intersects, + ); + + case( + TestFlags::ABC, + &[ + (TestFlags::empty(), false), + (TestFlags::A, true), + (TestFlags::B, true), + (TestFlags::C, true), + (TestFlags::ABC, true), + (TestFlags::from_bits_retain(1 << 3), false), + ], + TestFlags::intersects, + ); + + case( + TestFlags::from_bits_retain(1 << 3), + &[ + (TestFlags::empty(), false), + (TestFlags::A, false), + (TestFlags::B, false), + (TestFlags::C, false), + (TestFlags::from_bits_retain(1 << 3), true), + ], + TestFlags::intersects, + ); + + case( + TestOverlapping::AB, + &[ + (TestOverlapping::AB, true), + (TestOverlapping::BC, true), + (TestOverlapping::from_bits_retain(1 << 1), true), + ], + TestOverlapping::intersects, + ); +} + +#[track_caller] +fn case( + value: T, + inputs: &[(T, bool)], + mut inherent: impl FnMut(&T, T) -> bool, +) { + for (input, expected) in inputs { + assert_eq!( + *expected, + inherent(&value, *input), + "{:?}.intersects({:?})", + value, + input + ); + assert_eq!( + *expected, + Flags::intersects(&value, *input), + "Flags::intersects({:?}, {:?})", + value, + input + ); + } +} diff --git a/vendor/bitflags/src/tests/is_all.rs b/vendor/bitflags/src/tests/is_all.rs new file mode 100644 index 00000000000000..382a458f610b0f --- /dev/null +++ b/vendor/bitflags/src/tests/is_all.rs @@ -0,0 +1,32 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case(false, TestFlags::empty(), TestFlags::is_all); + case(false, TestFlags::A, TestFlags::is_all); + + case(true, TestFlags::ABC, TestFlags::is_all); + + case( + true, + TestFlags::ABC | TestFlags::from_bits_retain(1 << 3), + TestFlags::is_all, + ); + + case(true, TestZero::empty(), TestZero::is_all); + + case(true, TestEmpty::empty(), TestEmpty::is_all); +} + +#[track_caller] +fn case(expected: bool, value: T, inherent: impl FnOnce(&T) -> bool) { + assert_eq!(expected, inherent(&value), "{:?}.is_all()", value); + assert_eq!( + expected, + Flags::is_all(&value), + "Flags::is_all({:?})", + value + ); +} diff --git a/vendor/bitflags/src/tests/is_empty.rs b/vendor/bitflags/src/tests/is_empty.rs new file mode 100644 index 00000000000000..92165f18e36bf0 --- /dev/null +++ b/vendor/bitflags/src/tests/is_empty.rs @@ -0,0 +1,31 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case(true, TestFlags::empty(), TestFlags::is_empty); + + case(false, TestFlags::A, TestFlags::is_empty); + case(false, TestFlags::ABC, TestFlags::is_empty); + case( + false, + TestFlags::from_bits_retain(1 << 3), + TestFlags::is_empty, + ); + + case(true, TestZero::empty(), TestZero::is_empty); + + case(true, TestEmpty::empty(), TestEmpty::is_empty); +} + +#[track_caller] +fn case(expected: bool, value: T, inherent: impl FnOnce(&T) -> bool) { + assert_eq!(expected, inherent(&value), "{:?}.is_empty()", value); + assert_eq!( + expected, + Flags::is_empty(&value), + "Flags::is_empty({:?})", + value + ); +} diff --git a/vendor/bitflags/src/tests/iter.rs b/vendor/bitflags/src/tests/iter.rs new file mode 100644 index 00000000000000..d4b2ea068915bc --- /dev/null +++ b/vendor/bitflags/src/tests/iter.rs @@ -0,0 +1,299 @@ +use super::*; + +use crate::Flags; + +#[test] +#[cfg(not(miri))] // Very slow in miri +fn roundtrip() { + for a in 0u8..=255 { + for b in 0u8..=255 { + let f = TestFlags::from_bits_retain(a | b); + + assert_eq!(f, f.iter().collect::()); + assert_eq!( + TestFlags::from_bits_truncate(f.bits()), + f.iter_names().map(|(_, f)| f).collect::() + ); + + let f = TestExternal::from_bits_retain(a | b); + + assert_eq!(f, f.iter().collect::()); + } + } +} + +mod collect { + use super::*; + + #[test] + fn cases() { + assert_eq!(0, [].into_iter().collect::().bits()); + + assert_eq!(1, [TestFlags::A,].into_iter().collect::().bits()); + + assert_eq!( + 1 | 1 << 1 | 1 << 2, + [TestFlags::A, TestFlags::B | TestFlags::C,] + .into_iter() + .collect::() + .bits() + ); + + assert_eq!( + 1 | 1 << 3, + [ + TestFlags::from_bits_retain(1 << 3), + TestFlags::empty(), + TestFlags::A, + ] + .into_iter() + .collect::() + .bits() + ); + + assert_eq!( + 1 << 5 | 1 << 7, + [ + TestExternal::empty(), + TestExternal::from_bits_retain(1 << 5), + TestExternal::from_bits_retain(1 << 7), + ] + .into_iter() + .collect::() + .bits() + ); + } +} + +mod iter { + use super::*; + + #[test] + fn cases() { + case(&[], TestFlags::empty(), TestFlags::iter); + + case(&[1], TestFlags::A, TestFlags::iter); + case(&[1, 1 << 1], TestFlags::A | TestFlags::B, TestFlags::iter); + case( + &[1, 1 << 1, 1 << 3], + TestFlags::A | TestFlags::B | TestFlags::from_bits_retain(1 << 3), + TestFlags::iter, + ); + + case(&[1, 1 << 1, 1 << 2], TestFlags::ABC, TestFlags::iter); + case( + &[1, 1 << 1, 1 << 2, 1 << 3], + TestFlags::ABC | TestFlags::from_bits_retain(1 << 3), + TestFlags::iter, + ); + + case( + &[1 | 1 << 1 | 1 << 2], + TestFlagsInvert::ABC, + TestFlagsInvert::iter, + ); + + case(&[], TestZero::ZERO, TestZero::iter); + + case( + &[1, 1 << 1, 1 << 2, 0b1111_1000], + TestExternal::all(), + TestExternal::iter, + ); + } + + #[track_caller] + fn case + Copy>( + expected: &[T::Bits], + value: T, + inherent: impl FnOnce(&T) -> crate::iter::Iter, + ) where + T::Bits: std::fmt::Debug + PartialEq, + { + assert_eq!( + expected, + inherent(&value).map(|f| f.bits()).collect::>(), + "{:?}.iter()", + value + ); + assert_eq!( + expected, + Flags::iter(&value).map(|f| f.bits()).collect::>(), + "Flags::iter({:?})", + value + ); + assert_eq!( + expected, + value.into_iter().map(|f| f.bits()).collect::>(), + "{:?}.into_iter()", + value + ); + } +} + +mod iter_names { + use super::*; + + #[test] + fn cases() { + case(&[], TestFlags::empty(), TestFlags::iter_names); + + case(&[("A", 1)], TestFlags::A, TestFlags::iter_names); + case( + &[("A", 1), ("B", 1 << 1)], + TestFlags::A | TestFlags::B, + TestFlags::iter_names, + ); + case( + &[("A", 1), ("B", 1 << 1)], + TestFlags::A | TestFlags::B | TestFlags::from_bits_retain(1 << 3), + TestFlags::iter_names, + ); + + case( + &[("A", 1), ("B", 1 << 1), ("C", 1 << 2)], + TestFlags::ABC, + TestFlags::iter_names, + ); + case( + &[("A", 1), ("B", 1 << 1), ("C", 1 << 2)], + TestFlags::ABC | TestFlags::from_bits_retain(1 << 3), + TestFlags::iter_names, + ); + + case( + &[("ABC", 1 | 1 << 1 | 1 << 2)], + TestFlagsInvert::ABC, + TestFlagsInvert::iter_names, + ); + + case(&[], TestZero::ZERO, TestZero::iter_names); + + case( + &[("A", 1)], + TestOverlappingFull::A, + TestOverlappingFull::iter_names, + ); + case( + &[("A", 1), ("D", 1 << 1)], + TestOverlappingFull::A | TestOverlappingFull::D, + TestOverlappingFull::iter_names, + ); + } + + #[track_caller] + fn case( + expected: &[(&'static str, T::Bits)], + value: T, + inherent: impl FnOnce(&T) -> crate::iter::IterNames, + ) where + T::Bits: std::fmt::Debug + PartialEq, + { + assert_eq!( + expected, + inherent(&value) + .map(|(n, f)| (n, f.bits())) + .collect::>(), + "{:?}.iter_names()", + value + ); + assert_eq!( + expected, + Flags::iter_names(&value) + .map(|(n, f)| (n, f.bits())) + .collect::>(), + "Flags::iter_names({:?})", + value + ); + } +} + +mod iter_defined_names { + use crate::Flags; + + #[test] + fn test_defined_names() { + bitflags! { + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + struct TestFlags: u32 { + const A = 0b00000001; + const ZERO = 0; + const B = 0b00000010; + const C = 0b00000100; + const CC = Self::C.bits(); + const D = 0b10000100; + const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); + const AB = Self::A.bits() | Self::B.bits(); + const AC = Self::A.bits() | Self::C.bits(); + const CB = Self::B.bits() | Self::C.bits(); + } + } + + // Test all named flags produced by the iterator + let all_named: Vec<(&'static str, TestFlags)> = TestFlags::iter_defined_names().collect(); + + // Verify all named flags are included + let expected_flags = vec![ + ("A", TestFlags::A), + ("ZERO", TestFlags::ZERO), + ("B", TestFlags::B), + ("C", TestFlags::C), + // Note: CC and C have the same bit value, but both are named flags + ("CC", TestFlags::CC), + ("D", TestFlags::D), + ("ABC", TestFlags::ABC), + ("AB", TestFlags::AB), + ("AC", TestFlags::AC), + ("CB", TestFlags::CB), + ]; + + assert_eq!( + all_named.len(), + expected_flags.len(), + "Should have 10 named flags" + ); + + // Verify each expected flag is in the result + for expected_flag in &expected_flags { + assert!( + all_named.contains(expected_flag), + "Missing flag: {:?}", + expected_flag + ); + } + + // Test if iterator order is consistent with definition order + let flags_in_order: Vec<(&'static str, TestFlags)> = + TestFlags::iter_defined_names().collect(); + assert_eq!( + flags_in_order, expected_flags, + "Flag order should match definition order" + ); + + // Test that iterator can be used multiple times + let first_iteration: Vec<(&'static str, TestFlags)> = + TestFlags::iter_defined_names().collect(); + let second_iteration: Vec<(&'static str, TestFlags)> = + TestFlags::iter_defined_names().collect(); + assert_eq!( + first_iteration, second_iteration, + "Multiple iterations should produce the same result" + ); + + // Test consistency with FLAGS constant + let flags_from_iter: std::collections::HashSet = TestFlags::iter_defined_names() + .map(|(_, f)| f.bits()) + .collect(); + + let flags_from_const: std::collections::HashSet = TestFlags::FLAGS + .iter() + .filter(|f| f.is_named()) + .map(|f| f.value().bits()) + .collect(); + + assert_eq!( + flags_from_iter, flags_from_const, + "iter_defined_names() should be consistent with named flags in FLAGS" + ); + } +} diff --git a/vendor/bitflags/src/tests/parser.rs b/vendor/bitflags/src/tests/parser.rs new file mode 100644 index 00000000000000..fb27225ecef604 --- /dev/null +++ b/vendor/bitflags/src/tests/parser.rs @@ -0,0 +1,332 @@ +use super::*; + +use crate::{parser::*, Flags}; + +#[test] +#[cfg(not(miri))] // Very slow in miri +fn roundtrip() { + let mut s = String::new(); + + for a in 0u8..=255 { + for b in 0u8..=255 { + let f = TestFlags::from_bits_retain(a | b); + + s.clear(); + to_writer(&f, &mut s).unwrap(); + + assert_eq!(f, from_str::(&s).unwrap()); + } + } +} + +#[test] +#[cfg(not(miri))] // Very slow in miri +fn roundtrip_truncate() { + let mut s = String::new(); + + for a in 0u8..=255 { + for b in 0u8..=255 { + let f = TestFlags::from_bits_retain(a | b); + + s.clear(); + to_writer_truncate(&f, &mut s).unwrap(); + + assert_eq!( + TestFlags::from_bits_truncate(f.bits()), + from_str_truncate::(&s).unwrap() + ); + } + } +} + +#[test] +#[cfg(not(miri))] // Very slow in miri +fn roundtrip_strict() { + let mut s = String::new(); + + for a in 0u8..=255 { + for b in 0u8..=255 { + let f = TestFlags::from_bits_retain(a | b); + + s.clear(); + to_writer_strict(&f, &mut s).unwrap(); + + let mut strict = TestFlags::empty(); + for (_, flag) in f.iter_names() { + strict |= flag; + } + let f = strict; + + if let Ok(s) = from_str_strict::(&s) { + assert_eq!(f, s); + } + } + } +} + +mod from_str { + use super::*; + + #[test] + fn valid() { + assert_eq!(0, from_str::("").unwrap().bits()); + + assert_eq!(1, from_str::("A").unwrap().bits()); + assert_eq!(1, from_str::(" A ").unwrap().bits()); + assert_eq!( + 1 | 1 << 1 | 1 << 2, + from_str::("A | B | C").unwrap().bits() + ); + assert_eq!( + 1 | 1 << 1 | 1 << 2, + from_str::("A\n|\tB\r\n| C ").unwrap().bits() + ); + assert_eq!( + 1 | 1 << 1 | 1 << 2, + from_str::("A|B|C").unwrap().bits() + ); + + assert_eq!(1 << 3, from_str::("0x8").unwrap().bits()); + assert_eq!(1 | 1 << 3, from_str::("A | 0x8").unwrap().bits()); + assert_eq!( + 1 | 1 << 1 | 1 << 3, + from_str::("0x1 | 0x8 | B").unwrap().bits() + ); + + assert_eq!( + 1 | 1 << 1, + from_str::("一 | 二").unwrap().bits() + ); + } + + #[test] + fn invalid() { + assert!(from_str::("a") + .unwrap_err() + .to_string() + .starts_with("unrecognized named flag")); + assert!(from_str::("A & B") + .unwrap_err() + .to_string() + .starts_with("unrecognized named flag")); + + assert!(from_str::("0xg") + .unwrap_err() + .to_string() + .starts_with("invalid hex flag")); + assert!(from_str::("0xffffffffffff") + .unwrap_err() + .to_string() + .starts_with("invalid hex flag")); + } +} + +mod to_writer { + use super::*; + + #[test] + fn cases() { + assert_eq!("", write(TestFlags::empty())); + assert_eq!("A", write(TestFlags::A)); + assert_eq!("A | B | C", write(TestFlags::all())); + assert_eq!("0x8", write(TestFlags::from_bits_retain(1 << 3))); + assert_eq!( + "A | 0x8", + write(TestFlags::A | TestFlags::from_bits_retain(1 << 3)) + ); + + assert_eq!("", write(TestZero::ZERO)); + + assert_eq!("ABC", write(TestFlagsInvert::all())); + + assert_eq!("0x1", write(TestOverlapping::from_bits_retain(1))); + + assert_eq!("A", write(TestOverlappingFull::C)); + assert_eq!( + "A | D", + write(TestOverlappingFull::C | TestOverlappingFull::D) + ); + } + + fn write(value: F) -> String + where + F::Bits: crate::parser::WriteHex, + { + let mut s = String::new(); + + to_writer(&value, &mut s).unwrap(); + s + } +} + +mod from_str_truncate { + use super::*; + + #[test] + fn valid() { + assert_eq!(0, from_str_truncate::("").unwrap().bits()); + + assert_eq!(1, from_str_truncate::("A").unwrap().bits()); + assert_eq!(1, from_str_truncate::(" A ").unwrap().bits()); + assert_eq!( + 1 | 1 << 1 | 1 << 2, + from_str_truncate::("A | B | C").unwrap().bits() + ); + assert_eq!( + 1 | 1 << 1 | 1 << 2, + from_str_truncate::("A\n|\tB\r\n| C ") + .unwrap() + .bits() + ); + assert_eq!( + 1 | 1 << 1 | 1 << 2, + from_str_truncate::("A|B|C").unwrap().bits() + ); + + assert_eq!(0, from_str_truncate::("0x8").unwrap().bits()); + assert_eq!(1, from_str_truncate::("A | 0x8").unwrap().bits()); + assert_eq!( + 1 | 1 << 1, + from_str_truncate::("0x1 | 0x8 | B") + .unwrap() + .bits() + ); + + assert_eq!( + 1 | 1 << 1, + from_str_truncate::("一 | 二").unwrap().bits() + ); + } +} + +mod to_writer_truncate { + use super::*; + + #[test] + fn cases() { + assert_eq!("", write(TestFlags::empty())); + assert_eq!("A", write(TestFlags::A)); + assert_eq!("A | B | C", write(TestFlags::all())); + assert_eq!("", write(TestFlags::from_bits_retain(1 << 3))); + assert_eq!( + "A", + write(TestFlags::A | TestFlags::from_bits_retain(1 << 3)) + ); + + assert_eq!("", write(TestZero::ZERO)); + + assert_eq!("ABC", write(TestFlagsInvert::all())); + + assert_eq!("0x1", write(TestOverlapping::from_bits_retain(1))); + + assert_eq!("A", write(TestOverlappingFull::C)); + assert_eq!( + "A | D", + write(TestOverlappingFull::C | TestOverlappingFull::D) + ); + } + + fn write(value: F) -> String + where + F::Bits: crate::parser::WriteHex, + { + let mut s = String::new(); + + to_writer_truncate(&value, &mut s).unwrap(); + s + } +} + +mod from_str_strict { + use super::*; + + #[test] + fn valid() { + assert_eq!(0, from_str_strict::("").unwrap().bits()); + + assert_eq!(1, from_str_strict::("A").unwrap().bits()); + assert_eq!(1, from_str_strict::(" A ").unwrap().bits()); + assert_eq!( + 1 | 1 << 1 | 1 << 2, + from_str_strict::("A | B | C").unwrap().bits() + ); + assert_eq!( + 1 | 1 << 1 | 1 << 2, + from_str_strict::("A\n|\tB\r\n| C ") + .unwrap() + .bits() + ); + assert_eq!( + 1 | 1 << 1 | 1 << 2, + from_str_strict::("A|B|C").unwrap().bits() + ); + + assert_eq!( + 1 | 1 << 1, + from_str_strict::("一 | 二").unwrap().bits() + ); + } + + #[test] + fn invalid() { + assert!(from_str_strict::("a") + .unwrap_err() + .to_string() + .starts_with("unrecognized named flag")); + assert!(from_str_strict::("A & B") + .unwrap_err() + .to_string() + .starts_with("unrecognized named flag")); + + assert!(from_str_strict::("0x1") + .unwrap_err() + .to_string() + .starts_with("invalid hex flag")); + assert!(from_str_strict::("0xg") + .unwrap_err() + .to_string() + .starts_with("invalid hex flag")); + assert!(from_str_strict::("0xffffffffffff") + .unwrap_err() + .to_string() + .starts_with("invalid hex flag")); + } +} + +mod to_writer_strict { + use super::*; + + #[test] + fn cases() { + assert_eq!("", write(TestFlags::empty())); + assert_eq!("A", write(TestFlags::A)); + assert_eq!("A | B | C", write(TestFlags::all())); + assert_eq!("", write(TestFlags::from_bits_retain(1 << 3))); + assert_eq!( + "A", + write(TestFlags::A | TestFlags::from_bits_retain(1 << 3)) + ); + + assert_eq!("", write(TestZero::ZERO)); + + assert_eq!("ABC", write(TestFlagsInvert::all())); + + assert_eq!("", write(TestOverlapping::from_bits_retain(1))); + + assert_eq!("A", write(TestOverlappingFull::C)); + assert_eq!( + "A | D", + write(TestOverlappingFull::C | TestOverlappingFull::D) + ); + } + + fn write(value: F) -> String + where + F::Bits: crate::parser::WriteHex, + { + let mut s = String::new(); + + to_writer_strict(&value, &mut s).unwrap(); + s + } +} diff --git a/vendor/bitflags/src/tests/remove.rs b/vendor/bitflags/src/tests/remove.rs new file mode 100644 index 00000000000000..574b1edbf228f2 --- /dev/null +++ b/vendor/bitflags/src/tests/remove.rs @@ -0,0 +1,100 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case( + TestFlags::empty(), + &[ + (TestFlags::A, 0), + (TestFlags::empty(), 0), + (TestFlags::from_bits_retain(1 << 3), 0), + ], + TestFlags::remove, + TestFlags::set, + ); + + case( + TestFlags::A, + &[ + (TestFlags::A, 0), + (TestFlags::empty(), 1), + (TestFlags::B, 1), + ], + TestFlags::remove, + TestFlags::set, + ); + + case( + TestFlags::ABC, + &[ + (TestFlags::A, 1 << 1 | 1 << 2), + (TestFlags::A | TestFlags::C, 1 << 1), + ], + TestFlags::remove, + TestFlags::set, + ); +} + +#[track_caller] +fn case( + value: T, + inputs: &[(T, T::Bits)], + mut inherent_remove: impl FnMut(&mut T, T), + mut inherent_set: impl FnMut(&mut T, T, bool), +) where + T::Bits: std::fmt::Debug + PartialEq + Copy, +{ + for (input, expected) in inputs { + assert_eq!( + *expected, + { + let mut value = value; + inherent_remove(&mut value, *input); + value + } + .bits(), + "{:?}.remove({:?})", + value, + input + ); + assert_eq!( + *expected, + { + let mut value = value; + Flags::remove(&mut value, *input); + value + } + .bits(), + "Flags::remove({:?}, {:?})", + value, + input + ); + + assert_eq!( + *expected, + { + let mut value = value; + inherent_set(&mut value, *input, false); + value + } + .bits(), + "{:?}.set({:?}, false)", + value, + input + ); + assert_eq!( + *expected, + { + let mut value = value; + Flags::set(&mut value, *input, false); + value + } + .bits(), + "Flags::set({:?}, {:?}, false)", + value, + input + ); + } +} diff --git a/vendor/bitflags/src/tests/symmetric_difference.rs b/vendor/bitflags/src/tests/symmetric_difference.rs new file mode 100644 index 00000000000000..75e9123ac5da42 --- /dev/null +++ b/vendor/bitflags/src/tests/symmetric_difference.rs @@ -0,0 +1,110 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case( + TestFlags::empty(), + &[ + (TestFlags::empty(), 0), + (TestFlags::all(), 1 | 1 << 1 | 1 << 2), + (TestFlags::from_bits_retain(1 << 3), 1 << 3), + ], + TestFlags::symmetric_difference, + TestFlags::toggle, + ); + + case( + TestFlags::A, + &[ + (TestFlags::empty(), 1), + (TestFlags::A, 0), + (TestFlags::all(), 1 << 1 | 1 << 2), + ], + TestFlags::symmetric_difference, + TestFlags::toggle, + ); + + case( + TestFlags::A | TestFlags::B | TestFlags::from_bits_retain(1 << 3), + &[ + (TestFlags::ABC, 1 << 2 | 1 << 3), + (TestFlags::from_bits_retain(1 << 3), 1 | 1 << 1), + ], + TestFlags::symmetric_difference, + TestFlags::toggle, + ); +} + +#[track_caller] +fn case + std::ops::BitXorAssign + Copy>( + value: T, + inputs: &[(T, T::Bits)], + mut inherent_sym_diff: impl FnMut(T, T) -> T, + mut inherent_toggle: impl FnMut(&mut T, T), +) where + T::Bits: std::fmt::Debug + PartialEq + Copy, +{ + for (input, expected) in inputs { + assert_eq!( + *expected, + inherent_sym_diff(value, *input).bits(), + "{:?}.symmetric_difference({:?})", + value, + input + ); + assert_eq!( + *expected, + Flags::symmetric_difference(value, *input).bits(), + "Flags::symmetric_difference({:?}, {:?})", + value, + input + ); + assert_eq!( + *expected, + (value ^ *input).bits(), + "{:?} ^ {:?}", + value, + input + ); + assert_eq!( + *expected, + { + let mut value = value; + value ^= *input; + value + } + .bits(), + "{:?} ^= {:?}", + value, + input, + ); + + assert_eq!( + *expected, + { + let mut value = value; + inherent_toggle(&mut value, *input); + value + } + .bits(), + "{:?}.toggle({:?})", + value, + input, + ); + + assert_eq!( + *expected, + { + let mut value = value; + Flags::toggle(&mut value, *input); + value + } + .bits(), + "{:?}.toggle({:?})", + value, + input, + ); + } +} diff --git a/vendor/bitflags/src/tests/truncate.rs b/vendor/bitflags/src/tests/truncate.rs new file mode 100644 index 00000000000000..e38df48dc59870 --- /dev/null +++ b/vendor/bitflags/src/tests/truncate.rs @@ -0,0 +1,29 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case( + TestFlags::ABC | TestFlags::from_bits_retain(1 << 3), + TestFlags::ABC, + ); + + case(TestZero::empty(), TestZero::empty()); + + case(TestZero::all(), TestZero::all()); + + case( + TestFlags::from_bits_retain(1 << 3) | TestFlags::all(), + TestFlags::all(), + ); +} + +#[track_caller] +fn case(mut before: T, after: T) +where + T: std::fmt::Debug + PartialEq + Copy, +{ + before.truncate(); + assert_eq!(before, after, "{:?}.truncate()", before); +} diff --git a/vendor/bitflags/src/tests/union.rs b/vendor/bitflags/src/tests/union.rs new file mode 100644 index 00000000000000..6190681931cac1 --- /dev/null +++ b/vendor/bitflags/src/tests/union.rs @@ -0,0 +1,71 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case( + TestFlags::empty(), + &[ + (TestFlags::A, 1), + (TestFlags::all(), 1 | 1 << 1 | 1 << 2), + (TestFlags::empty(), 0), + (TestFlags::from_bits_retain(1 << 3), 1 << 3), + ], + TestFlags::union, + ); + + case( + TestFlags::A | TestFlags::C, + &[ + (TestFlags::A | TestFlags::B, 1 | 1 << 1 | 1 << 2), + (TestFlags::A, 1 | 1 << 2), + ], + TestFlags::union, + ); +} + +#[track_caller] +fn case + std::ops::BitOrAssign + Copy>( + value: T, + inputs: &[(T, T::Bits)], + mut inherent: impl FnMut(T, T) -> T, +) where + T::Bits: std::fmt::Debug + PartialEq + Copy, +{ + for (input, expected) in inputs { + assert_eq!( + *expected, + inherent(value, *input).bits(), + "{:?}.union({:?})", + value, + input + ); + assert_eq!( + *expected, + Flags::union(value, *input).bits(), + "Flags::union({:?}, {:?})", + value, + input + ); + assert_eq!( + *expected, + (value | *input).bits(), + "{:?} | {:?}", + value, + input + ); + assert_eq!( + *expected, + { + let mut value = value; + value |= *input; + value + } + .bits(), + "{:?} |= {:?}", + value, + input, + ); + } +} diff --git a/vendor/bitflags/src/tests/unknown.rs b/vendor/bitflags/src/tests/unknown.rs new file mode 100644 index 00000000000000..020f7e927e242d --- /dev/null +++ b/vendor/bitflags/src/tests/unknown.rs @@ -0,0 +1,40 @@ +use super::*; + +use crate::Flags; + +#[test] +fn cases() { + case(false, TestFlags::empty(), TestFlags::contains_unknown_bits); + case(false, TestFlags::A, TestFlags::contains_unknown_bits); + + case( + true, + TestFlags::ABC | TestFlags::from_bits_retain(1 << 3), + TestFlags::contains_unknown_bits, + ); + + case( + true, + TestFlags::empty() | TestFlags::from_bits_retain(1 << 3), + TestFlags::contains_unknown_bits, + ); + + case(false, TestFlags::all(), TestFlags::contains_unknown_bits); + + case(false, TestZero::empty(), TestZero::contains_unknown_bits); +} +#[track_caller] +fn case(expected: bool, value: T, inherent: impl FnOnce(&T) -> bool) { + assert_eq!( + expected, + inherent(&value), + "{:?}.contains_unknown_bits()", + value + ); + assert_eq!( + expected, + Flags::contains_unknown_bits(&value), + "Flags::contains_unknown_bits({:?})", + value + ); +} diff --git a/vendor/bitflags/src/traits.rs b/vendor/bitflags/src/traits.rs new file mode 100644 index 00000000000000..efb438739fb602 --- /dev/null +++ b/vendor/bitflags/src/traits.rs @@ -0,0 +1,457 @@ +use core::{ + fmt, + ops::{BitAnd, BitOr, BitXor, Not}, +}; + +use crate::{ + iter, + parser::{ParseError, ParseHex, WriteHex}, +}; + +/** +A defined flags value that may be named or unnamed. +*/ +#[derive(Debug)] +pub struct Flag { + name: &'static str, + value: B, +} + +impl Flag { + /** + Define a flag. + + If `name` is non-empty then the flag is named, otherwise it's unnamed. + */ + pub const fn new(name: &'static str, value: B) -> Self { + Flag { name, value } + } + + /** + Get the name of this flag. + + If the flag is unnamed then the returned string will be empty. + */ + pub const fn name(&self) -> &'static str { + self.name + } + + /** + Get the flags value of this flag. + */ + pub const fn value(&self) -> &B { + &self.value + } + + /** + Whether the flag is named. + + If [`Flag::name`] returns a non-empty string then this method will return `true`. + */ + pub const fn is_named(&self) -> bool { + !self.name.is_empty() + } + + /** + Whether the flag is unnamed. + + If [`Flag::name`] returns a non-empty string then this method will return `false`. + */ + pub const fn is_unnamed(&self) -> bool { + self.name.is_empty() + } +} + +/** +A set of defined flags using a bits type as storage. + +## Implementing `Flags` + +This trait is implemented by the [`bitflags`](macro.bitflags.html) macro: + +``` +use bitflags::bitflags; + +bitflags! { + struct MyFlags: u8 { + const A = 1; + const B = 1 << 1; + } +} +``` + +It can also be implemented manually: + +``` +use bitflags::{Flag, Flags}; + +struct MyFlags(u8); + +impl Flags for MyFlags { + const FLAGS: &'static [Flag] = &[ + Flag::new("A", MyFlags(1)), + Flag::new("B", MyFlags(1 << 1)), + ]; + + type Bits = u8; + + fn from_bits_retain(bits: Self::Bits) -> Self { + MyFlags(bits) + } + + fn bits(&self) -> Self::Bits { + self.0 + } +} +``` + +## Using `Flags` + +The `Flags` trait can be used generically to work with any flags types. In this example, +we can count the number of defined named flags: + +``` +# use bitflags::{bitflags, Flags}; +fn defined_flags() -> usize { + F::FLAGS.iter().filter(|f| f.is_named()).count() +} + +bitflags! { + struct MyFlags: u8 { + const A = 1; + const B = 1 << 1; + const C = 1 << 2; + + const _ = !0; + } +} + +assert_eq!(3, defined_flags::()); +``` +*/ +pub trait Flags: Sized + 'static { + /// The set of defined flags. + const FLAGS: &'static [Flag]; + + /// The underlying bits type. + type Bits: Bits; + + /// Get a flags value with all bits unset. + fn empty() -> Self { + Self::from_bits_retain(Self::Bits::EMPTY) + } + + /// Get a flags value with all known bits set. + fn all() -> Self { + let mut truncated = Self::Bits::EMPTY; + + for flag in Self::FLAGS.iter() { + truncated = truncated | flag.value().bits(); + } + + Self::from_bits_retain(truncated) + } + + /// This method will return `true` if any unknown bits are set. + fn contains_unknown_bits(&self) -> bool { + Self::all().bits() & self.bits() != self.bits() + } + + /// Get the underlying bits value. + /// + /// The returned value is exactly the bits set in this flags value. + fn bits(&self) -> Self::Bits; + + /// Convert from a bits value. + /// + /// This method will return `None` if any unknown bits are set. + fn from_bits(bits: Self::Bits) -> Option { + let truncated = Self::from_bits_truncate(bits); + + if truncated.bits() == bits { + Some(truncated) + } else { + None + } + } + + /// Convert from a bits value, unsetting any unknown bits. + fn from_bits_truncate(bits: Self::Bits) -> Self { + Self::from_bits_retain(bits & Self::all().bits()) + } + + /// Convert from a bits value exactly. + fn from_bits_retain(bits: Self::Bits) -> Self; + + /// Get a flags value with the bits of a flag with the given name set. + /// + /// This method will return `None` if `name` is empty or doesn't + /// correspond to any named flag. + fn from_name(name: &str) -> Option { + // Don't parse empty names as empty flags + if name.is_empty() { + return None; + } + + for flag in Self::FLAGS { + if flag.name() == name { + return Some(Self::from_bits_retain(flag.value().bits())); + } + } + + None + } + + /// Yield a set of contained flags values. + /// + /// Each yielded flags value will correspond to a defined named flag. Any unknown bits + /// will be yielded together as a final flags value. + fn iter(&self) -> iter::Iter { + iter::Iter::new(self) + } + + /// Yield a set of contained named flags values. + /// + /// This method is like [`Flags::iter`], except only yields bits in contained named flags. + /// Any unknown bits, or bits not corresponding to a contained flag will not be yielded. + fn iter_names(&self) -> iter::IterNames { + iter::IterNames::new(self) + } + + /// Yield a set of all named flags defined by [`Self::FLAGS`]. + fn iter_defined_names() -> iter::IterDefinedNames { + iter::IterDefinedNames::new() + } + + /// Whether all bits in this flags value are unset. + fn is_empty(&self) -> bool { + self.bits() == Self::Bits::EMPTY + } + + /// Whether all known bits in this flags value are set. + fn is_all(&self) -> bool { + // NOTE: We check against `Self::all` here, not `Self::Bits::ALL` + // because the set of all flags may not use all bits + Self::all().bits() | self.bits() == self.bits() + } + + /// Whether any set bits in a source flags value are also set in a target flags value. + fn intersects(&self, other: Self) -> bool + where + Self: Sized, + { + self.bits() & other.bits() != Self::Bits::EMPTY + } + + /// Whether all set bits in a source flags value are also set in a target flags value. + fn contains(&self, other: Self) -> bool + where + Self: Sized, + { + self.bits() & other.bits() == other.bits() + } + + /// Remove any unknown bits from the flags. + fn truncate(&mut self) + where + Self: Sized, + { + *self = Self::from_bits_truncate(self.bits()); + } + + /// The bitwise or (`|`) of the bits in two flags values. + fn insert(&mut self, other: Self) + where + Self: Sized, + { + *self = Self::from_bits_retain(self.bits()).union(other); + } + + /// The intersection of a source flags value with the complement of a target flags value (`&!`). + /// + /// This method is not equivalent to `self & !other` when `other` has unknown bits set. + /// `remove` won't truncate `other`, but the `!` operator will. + fn remove(&mut self, other: Self) + where + Self: Sized, + { + *self = Self::from_bits_retain(self.bits()).difference(other); + } + + /// The bitwise exclusive-or (`^`) of the bits in two flags values. + fn toggle(&mut self, other: Self) + where + Self: Sized, + { + *self = Self::from_bits_retain(self.bits()).symmetric_difference(other); + } + + /// Call [`Flags::insert`] when `value` is `true` or [`Flags::remove`] when `value` is `false`. + fn set(&mut self, other: Self, value: bool) + where + Self: Sized, + { + if value { + self.insert(other); + } else { + self.remove(other); + } + } + + /// Unsets all bits in the flags. + fn clear(&mut self) + where + Self: Sized, + { + *self = Self::empty(); + } + + /// The bitwise and (`&`) of the bits in two flags values. + #[must_use] + fn intersection(self, other: Self) -> Self { + Self::from_bits_retain(self.bits() & other.bits()) + } + + /// The bitwise or (`|`) of the bits in two flags values. + #[must_use] + fn union(self, other: Self) -> Self { + Self::from_bits_retain(self.bits() | other.bits()) + } + + /// The intersection of a source flags value with the complement of a target flags value (`&!`). + /// + /// This method is not equivalent to `self & !other` when `other` has unknown bits set. + /// `difference` won't truncate `other`, but the `!` operator will. + #[must_use] + fn difference(self, other: Self) -> Self { + Self::from_bits_retain(self.bits() & !other.bits()) + } + + /// The bitwise exclusive-or (`^`) of the bits in two flags values. + #[must_use] + fn symmetric_difference(self, other: Self) -> Self { + Self::from_bits_retain(self.bits() ^ other.bits()) + } + + /// The bitwise negation (`!`) of the bits in a flags value, truncating the result. + #[must_use] + fn complement(self) -> Self { + Self::from_bits_truncate(!self.bits()) + } +} + +/** +A bits type that can be used as storage for a flags type. +*/ +pub trait Bits: + Clone + + Copy + + PartialEq + + BitAnd + + BitOr + + BitXor + + Not + + Sized + + 'static +{ + /// A value with all bits unset. + const EMPTY: Self; + + /// A value with all bits set. + const ALL: Self; +} + +// Not re-exported: prevent custom `Bits` impls being used in the `bitflags!` macro, +// or they may fail to compile based on crate features +pub trait Primitive {} + +macro_rules! impl_bits { + ($($u:ty, $i:ty,)*) => { + $( + impl Bits for $u { + const EMPTY: $u = 0; + const ALL: $u = <$u>::MAX; + } + + impl Bits for $i { + const EMPTY: $i = 0; + const ALL: $i = <$u>::MAX as $i; + } + + impl ParseHex for $u { + fn parse_hex(input: &str) -> Result { + <$u>::from_str_radix(input, 16).map_err(|_| ParseError::invalid_hex_flag(input)) + } + } + + impl ParseHex for $i { + fn parse_hex(input: &str) -> Result { + <$i>::from_str_radix(input, 16).map_err(|_| ParseError::invalid_hex_flag(input)) + } + } + + impl WriteHex for $u { + fn write_hex(&self, mut writer: W) -> fmt::Result { + write!(writer, "{:x}", self) + } + } + + impl WriteHex for $i { + fn write_hex(&self, mut writer: W) -> fmt::Result { + write!(writer, "{:x}", self) + } + } + + impl Primitive for $i {} + impl Primitive for $u {} + )* + } +} + +impl_bits! { + u8, i8, + u16, i16, + u32, i32, + u64, i64, + u128, i128, + usize, isize, +} + +/// A trait for referencing the `bitflags`-owned internal type +/// without exposing it publicly. +pub trait PublicFlags { + /// The type of the underlying storage. + type Primitive: Primitive; + + /// The type of the internal field on the generated flags type. + type Internal; +} + +#[doc(hidden)] +#[deprecated(note = "use the `Flags` trait instead")] +pub trait BitFlags: ImplementedByBitFlagsMacro + Flags { + /// An iterator over enabled flags in an instance of the type. + type Iter: Iterator; + + /// An iterator over the raw names and bits for enabled flags in an instance of the type. + type IterNames: Iterator; +} + +#[allow(deprecated)] +impl BitFlags for B { + type Iter = iter::Iter; + type IterNames = iter::IterNames; +} + +impl ImplementedByBitFlagsMacro for B {} + +/// A marker trait that signals that an implementation of `BitFlags` came from the `bitflags!` macro. +/// +/// There's nothing stopping an end-user from implementing this trait, but we don't guarantee their +/// manual implementations won't break between non-breaking releases. +#[doc(hidden)] +pub trait ImplementedByBitFlagsMacro {} + +pub(crate) mod __private { + pub use super::{ImplementedByBitFlagsMacro, PublicFlags}; +} diff --git a/vendor/cexpr/.cargo-checksum.json b/vendor/cexpr/.cargo-checksum.json new file mode 100644 index 00000000000000..2c634bf62c7229 --- /dev/null +++ b/vendor/cexpr/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"f5fa050aba66bc280e9163f1c2e309f87dfb3a0dc4a511b1379d767dc42bf4d1",".github/workflows/ci.yml":"ed1d33f83f25e920c5ecaec59f51fd9209fcf1da912cbff4e5a6d7da6b737922","Cargo.toml":"3300e6f2f5fc184c613a78251df3d1333530c9b54193e137b75c88f6db5a6fa6","Cargo.toml.orig":"cb6c93b8f4c1b681296427608ca2e7c6772ba3a6b824df6d968ed027afb1d851","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"d9771b8c6cf4426d3846de54c1febe20907f1eeadf7adfb5ade89a83bd9ea77f","bors.toml":"1c81ede536a37edd30fe4e622ff0531b25372403ac9475a5d6c50f14156565a2","rustfmt.toml":"d8e7f616455a670ba75e3e94bf6f88f4c168c481664d12501820c7dfff5c3cc2","src/expr.rs":"dad9327dac3af9d2f5818937aac91000e02d835dc600da6185c06e9e12047b1e","src/lib.rs":"ff218b9b734ab2eaef813a6ee3a907cb5cd71d483dfaa28d44926824a5b6d804","src/literal.rs":"6fdefc0357b8a14444df21b05c90556861dc0466e63a57669786f3ef3a3dc1c3","src/token.rs":"cd1ba6315b0137de9a0711670dd1840ac76c41f3b88dcd1a93ad77e1800c703f","tests/clang.rs":"5bb9807f35f760065d15cb9dfb7d8b79c2f734aef7ba5fe3737154155ed8ee73","tests/input/chars.h":"69c8141870872b795b5174bad125b748732c2b01d0e98ffcfc37b19f3f791f69","tests/input/fail.h":"b0b6cffd2dd17410b5eb02ee79ab75754820480b960db8a9866cc9983bd36b65","tests/input/floats.h":"28ec664e793c494e1a31f3bc5b790014e9921fc741bf475a86319b9a9eee5915","tests/input/int_signed.h":"934199eded85dd7820ca08c0beb1381ee6d9339970d2720a69c23025571707ce","tests/input/int_unsigned.h":"7b8023ba468ec76b184912692bc40e8fbcdd92ad86ec5a7c0dbcb02f2b8d961d","tests/input/strings.h":"2dd11bc066f34e8cb1916a28353e9e9a3a21cd406651b2f94fc47e89c95d9cba","tests/input/test_llvm_bug_9069.h":"8d9ae1d1eadc8f6d5c14296f984547fe894d0f2ce5cd6d7aa8caad40a56bc5e1"},"package":"6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766"} \ No newline at end of file diff --git a/vendor/cexpr/.cargo_vcs_info.json b/vendor/cexpr/.cargo_vcs_info.json new file mode 100644 index 00000000000000..72d29e1e7693d1 --- /dev/null +++ b/vendor/cexpr/.cargo_vcs_info.json @@ -0,0 +1,5 @@ +{ + "git": { + "sha1": "c7ccdfbc37b508cfda1171ab4f89afaeb72e82f3" + } +} diff --git a/vendor/cexpr/.github/workflows/ci.yml b/vendor/cexpr/.github/workflows/ci.yml new file mode 100644 index 00000000000000..8af3b706469e7c --- /dev/null +++ b/vendor/cexpr/.github/workflows/ci.yml @@ -0,0 +1,31 @@ +name: CI +on: + push: + branches: + - master + pull_request: + branches: + - master + + +jobs: + build_and_test: + name: Build and Test + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Install LLVM and Clang + uses: KyleMayes/install-llvm-action@v1 + with: + version: "11.0" + directory: ${{ runner.temp }}/llvm-11.0 + + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + + - uses: actions-rs/cargo@v1 + with: + command: test + args: --verbose --all diff --git a/vendor/cexpr/Cargo.toml b/vendor/cexpr/Cargo.toml new file mode 100644 index 00000000000000..4956001cdda7af --- /dev/null +++ b/vendor/cexpr/Cargo.toml @@ -0,0 +1,29 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "cexpr" +version = "0.6.0" +authors = ["Jethro Beekman "] +description = "A C expression parser and evaluator" +documentation = "https://docs.rs/cexpr/" +keywords = ["C", "expression", "parser"] +license = "Apache-2.0/MIT" +repository = "https://github.com/jethrogb/rust-cexpr" +[dependencies.nom] +version = "7" +features = ["std"] +default-features = false +[dev-dependencies.clang-sys] +version = ">= 0.13.0, < 0.29.0" +[badges.travis-ci] +repository = "jethrogb/rust-cexpr" diff --git a/vendor/cexpr/LICENSE-APACHE b/vendor/cexpr/LICENSE-APACHE new file mode 100644 index 00000000000000..16fe87b06e802f --- /dev/null +++ b/vendor/cexpr/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/cexpr/LICENSE-MIT b/vendor/cexpr/LICENSE-MIT new file mode 100644 index 00000000000000..ed958e7ade0fc4 --- /dev/null +++ b/vendor/cexpr/LICENSE-MIT @@ -0,0 +1,25 @@ +(C) Copyright 2016 Jethro G. Beekman + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/cexpr/bors.toml b/vendor/cexpr/bors.toml new file mode 100644 index 00000000000000..ca08e818bf3e37 --- /dev/null +++ b/vendor/cexpr/bors.toml @@ -0,0 +1,3 @@ +status = [ + "continuous-integration/travis-ci/push", +] diff --git a/vendor/cexpr/rustfmt.toml b/vendor/cexpr/rustfmt.toml new file mode 100644 index 00000000000000..32a9786fa1c4a9 --- /dev/null +++ b/vendor/cexpr/rustfmt.toml @@ -0,0 +1 @@ +edition = "2018" diff --git a/vendor/cexpr/src/expr.rs b/vendor/cexpr/src/expr.rs new file mode 100644 index 00000000000000..7f7e458bd4639b --- /dev/null +++ b/vendor/cexpr/src/expr.rs @@ -0,0 +1,610 @@ +// (C) Copyright 2016 Jethro G. Beekman +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +//! Evaluating C expressions from tokens. +//! +//! Numerical operators are supported. All numerical values are treated as +//! `i64` or `f64`. Type casting is not supported. `i64` are converted to +//! `f64` when used in conjunction with a `f64`. Right shifts are always +//! arithmetic shifts. +//! +//! The `sizeof` operator is not supported. +//! +//! String concatenation is supported, but width prefixes are ignored; all +//! strings are treated as narrow strings. +//! +//! Use the `IdentifierParser` to substitute identifiers found in expressions. + +use std::collections::HashMap; +use std::num::Wrapping; +use std::ops::{ + AddAssign, BitAndAssign, BitOrAssign, BitXorAssign, DivAssign, MulAssign, RemAssign, ShlAssign, + ShrAssign, SubAssign, +}; + +use crate::literal::{self, CChar}; +use crate::token::{Kind as TokenKind, Token}; +use crate::ToCexprResult; +use nom::branch::alt; +use nom::combinator::{complete, map, map_opt}; +use nom::multi::{fold_many0, many0, separated_list0}; +use nom::sequence::{delimited, pair, preceded}; +use nom::*; + +/// Expression parser/evaluator that supports identifiers. +#[derive(Debug)] +pub struct IdentifierParser<'ident> { + identifiers: &'ident HashMap, EvalResult>, +} +#[derive(Copy, Clone)] +struct PRef<'a>(&'a IdentifierParser<'a>); + +/// A shorthand for the type of cexpr expression evaluation results. +pub type CResult<'a, R> = IResult<&'a [Token], R, crate::Error<&'a [Token]>>; + +/// The result of parsing a literal or evaluating an expression. +#[derive(Debug, Clone, PartialEq)] +#[allow(missing_docs)] +pub enum EvalResult { + Int(Wrapping), + Float(f64), + Char(CChar), + Str(Vec), + Invalid, +} + +macro_rules! result_opt ( + (fn $n:ident: $e:ident -> $t:ty) => ( + #[allow(dead_code)] + #[allow(clippy::wrong_self_convention)] + fn $n(self) -> Option<$t> { + if let EvalResult::$e(v) = self { + Some(v) + } else { + None + } + } + ); +); + +impl EvalResult { + result_opt!(fn as_int: Int -> Wrapping); + result_opt!(fn as_float: Float -> f64); + result_opt!(fn as_char: Char -> CChar); + result_opt!(fn as_str: Str -> Vec); + + #[allow(clippy::wrong_self_convention)] + fn as_numeric(self) -> Option { + match self { + EvalResult::Int(_) | EvalResult::Float(_) => Some(self), + _ => None, + } + } +} + +impl From> for EvalResult { + fn from(s: Vec) -> EvalResult { + EvalResult::Str(s) + } +} + +// =========================================== +// ============= Clang tokens ================ +// =========================================== + +macro_rules! exact_token ( + ($k:ident, $c:expr) => ({ + move |input: &[Token]| { + if input.is_empty() { + let res: CResult<'_, &[u8]> = Err(crate::nom::Err::Incomplete(Needed::new($c.len()))); + res + } else { + if input[0].kind==TokenKind::$k && &input[0].raw[..]==$c { + Ok((&input[1..], &input[0].raw[..])) + } else { + Err(crate::nom::Err::Error((input, crate::ErrorKind::ExactToken(TokenKind::$k,$c)).into())) + } + } + } + }); +); + +fn identifier_token(input: &[Token]) -> CResult<'_, &[u8]> { + if input.is_empty() { + let res: CResult<'_, &[u8]> = Err(nom::Err::Incomplete(Needed::new(1))); + res + } else { + if input[0].kind == TokenKind::Identifier { + Ok((&input[1..], &input[0].raw[..])) + } else { + Err(crate::nom::Err::Error((input, crate::ErrorKind::TypedToken(TokenKind::Identifier)).into())) + } + } +} + +fn p(c: &'static str) -> impl Fn(&[Token]) -> CResult<'_, &[u8]> { + exact_token!(Punctuation, c.as_bytes()) +} + +fn one_of_punctuation(c: &'static [&'static str]) -> impl Fn(&[Token]) -> CResult<'_, &[u8]> { + move |input| { + if input.is_empty() { + let min = c + .iter() + .map(|opt| opt.len()) + .min() + .expect("at least one option"); + Err(crate::nom::Err::Incomplete(Needed::new(min))) + } else if input[0].kind == TokenKind::Punctuation + && c.iter().any(|opt| opt.as_bytes() == &input[0].raw[..]) + { + Ok((&input[1..], &input[0].raw[..])) + } else { + Err(crate::nom::Err::Error( + ( + input, + crate::ErrorKind::ExactTokens(TokenKind::Punctuation, c), + ) + .into(), + )) + } + } +} + +// ================================================== +// ============= Numeric expressions ================ +// ================================================== + +impl<'a> AddAssign<&'a EvalResult> for EvalResult { + fn add_assign(&mut self, rhs: &'a EvalResult) { + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a + b), + (&Float(a), &Int(b)) => Float(a + (b.0 as f64)), + (&Int(a), &Float(b)) => Float(a.0 as f64 + b), + (&Float(a), &Float(b)) => Float(a + b), + _ => Invalid, + }; + } +} +impl<'a> BitAndAssign<&'a EvalResult> for EvalResult { + fn bitand_assign(&mut self, rhs: &'a EvalResult) { + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a & b), + _ => Invalid, + }; + } +} +impl<'a> BitOrAssign<&'a EvalResult> for EvalResult { + fn bitor_assign(&mut self, rhs: &'a EvalResult) { + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a | b), + _ => Invalid, + }; + } +} +impl<'a> BitXorAssign<&'a EvalResult> for EvalResult { + fn bitxor_assign(&mut self, rhs: &'a EvalResult) { + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a ^ b), + _ => Invalid, + }; + } +} +impl<'a> DivAssign<&'a EvalResult> for EvalResult { + fn div_assign(&mut self, rhs: &'a EvalResult) { + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a / b), + (&Float(a), &Int(b)) => Float(a / (b.0 as f64)), + (&Int(a), &Float(b)) => Float(a.0 as f64 / b), + (&Float(a), &Float(b)) => Float(a / b), + _ => Invalid, + }; + } +} +impl<'a> MulAssign<&'a EvalResult> for EvalResult { + fn mul_assign(&mut self, rhs: &'a EvalResult) { + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a * b), + (&Float(a), &Int(b)) => Float(a * (b.0 as f64)), + (&Int(a), &Float(b)) => Float(a.0 as f64 * b), + (&Float(a), &Float(b)) => Float(a * b), + _ => Invalid, + }; + } +} +impl<'a> RemAssign<&'a EvalResult> for EvalResult { + fn rem_assign(&mut self, rhs: &'a EvalResult) { + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a % b), + (&Float(a), &Int(b)) => Float(a % (b.0 as f64)), + (&Int(a), &Float(b)) => Float(a.0 as f64 % b), + (&Float(a), &Float(b)) => Float(a % b), + _ => Invalid, + }; + } +} +impl<'a> ShlAssign<&'a EvalResult> for EvalResult { + fn shl_assign(&mut self, rhs: &'a EvalResult) { + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a << (b.0 as usize)), + _ => Invalid, + }; + } +} +impl<'a> ShrAssign<&'a EvalResult> for EvalResult { + fn shr_assign(&mut self, rhs: &'a EvalResult) { + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a >> (b.0 as usize)), + _ => Invalid, + }; + } +} +impl<'a> SubAssign<&'a EvalResult> for EvalResult { + fn sub_assign(&mut self, rhs: &'a EvalResult) { + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a - b), + (&Float(a), &Int(b)) => Float(a - (b.0 as f64)), + (&Int(a), &Float(b)) => Float(a.0 as f64 - b), + (&Float(a), &Float(b)) => Float(a - b), + _ => Invalid, + }; + } +} + +fn unary_op(input: (&[u8], EvalResult)) -> Option { + use self::EvalResult::*; + assert_eq!(input.0.len(), 1); + match (input.0[0], input.1) { + (b'+', i) => Some(i), + (b'-', Int(i)) => Some(Int(Wrapping(i.0.wrapping_neg()))), // impl Neg for Wrapping not until rust 1.10... + (b'-', Float(i)) => Some(Float(-i)), + (b'-', _) => unreachable!("non-numeric unary op"), + (b'~', Int(i)) => Some(Int(!i)), + (b'~', Float(_)) => None, + (b'~', _) => unreachable!("non-numeric unary op"), + _ => unreachable!("invalid unary op"), + } +} + +fn numeric, F>( + f: F, +) -> impl FnMut(I) -> nom::IResult +where + F: FnMut(I) -> nom::IResult, +{ + nom::combinator::map_opt(f, EvalResult::as_numeric) +} + +impl<'a> PRef<'a> { + fn unary(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + alt(( + delimited(p("("), |i| self.numeric_expr(i), p(")")), + numeric(|i| self.literal(i)), + numeric(|i| self.identifier(i)), + map_opt( + pair(one_of_punctuation(&["+", "-", "~"][..]), |i| self.unary(i)), + unary_op, + ), + ))(input) + } + + fn mul_div_rem(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + let (input, acc) = self.unary(input)?; + fold_many0( + pair(complete(one_of_punctuation(&["*", "/", "%"][..])), |i| { + self.unary(i) + }), + move || acc.clone(), + |mut acc, (op, val): (&[u8], EvalResult)| { + match op[0] as char { + '*' => acc *= &val, + '/' => acc /= &val, + '%' => acc %= &val, + _ => unreachable!(), + }; + acc + }, + )(input) + } + + fn add_sub(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + let (input, acc) = self.mul_div_rem(input)?; + fold_many0( + pair(complete(one_of_punctuation(&["+", "-"][..])), |i| { + self.mul_div_rem(i) + }), + move || acc.clone(), + |mut acc, (op, val): (&[u8], EvalResult)| { + match op[0] as char { + '+' => acc += &val, + '-' => acc -= &val, + _ => unreachable!(), + }; + acc + }, + )(input) + } + + fn shl_shr(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + let (input, acc) = self.add_sub(input)?; + numeric(fold_many0( + pair(complete(one_of_punctuation(&["<<", ">>"][..])), |i| { + self.add_sub(i) + }), + move || acc.clone(), + |mut acc, (op, val): (&[u8], EvalResult)| { + match op { + b"<<" => acc <<= &val, + b">>" => acc >>= &val, + _ => unreachable!(), + }; + acc + }, + ))(input) + } + + fn and(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + let (input, acc) = self.shl_shr(input)?; + numeric(fold_many0( + preceded(complete(p("&")), |i| self.shl_shr(i)), + move || acc.clone(), + |mut acc, val: EvalResult| { + acc &= &val; + acc + }, + ))(input) + } + + fn xor(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + let (input, acc) = self.and(input)?; + numeric(fold_many0( + preceded(complete(p("^")), |i| self.and(i)), + move || acc.clone(), + |mut acc, val: EvalResult| { + acc ^= &val; + acc + }, + ))(input) + } + + fn or(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + let (input, acc) = self.xor(input)?; + numeric(fold_many0( + preceded(complete(p("|")), |i| self.xor(i)), + move || acc.clone(), + |mut acc, val: EvalResult| { + acc |= &val; + acc + }, + ))(input) + } + + #[inline(always)] + fn numeric_expr(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + self.or(input) + } +} + +// ======================================================= +// ============= Literals and identifiers ================ +// ======================================================= + +impl<'a> PRef<'a> { + fn identifier(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + match input.split_first() { + None => Err(Err::Incomplete(Needed::new(1))), + Some(( + &Token { + kind: TokenKind::Identifier, + ref raw, + }, + rest, + )) => { + if let Some(r) = self.identifiers.get(&raw[..]) { + Ok((rest, r.clone())) + } else { + Err(Err::Error( + (input, crate::ErrorKind::UnknownIdentifier).into(), + )) + } + } + Some(_) => Err(Err::Error( + (input, crate::ErrorKind::TypedToken(TokenKind::Identifier)).into(), + )), + } + } + + fn literal(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + match input.split_first() { + None => Err(Err::Incomplete(Needed::new(1))), + Some(( + &Token { + kind: TokenKind::Literal, + ref raw, + }, + rest, + )) => match literal::parse(raw) { + Ok((_, result)) => Ok((rest, result)), + _ => Err(Err::Error((input, crate::ErrorKind::InvalidLiteral).into())), + }, + Some(_) => Err(Err::Error( + (input, crate::ErrorKind::TypedToken(TokenKind::Literal)).into(), + )), + } + } + + fn string(self, input: &'_ [Token]) -> CResult<'_, Vec> { + alt(( + map_opt(|i| self.literal(i), EvalResult::as_str), + map_opt(|i| self.identifier(i), EvalResult::as_str), + ))(input) + .to_cexpr_result() + } + + // "string1" "string2" etc... + fn concat_str(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + map( + pair(|i| self.string(i), many0(complete(|i| self.string(i)))), + |(first, v)| { + Vec::into_iter(v) + .fold(first, |mut s, elem| { + Vec::extend_from_slice(&mut s, Vec::::as_slice(&elem)); + s + }) + .into() + }, + )(input) + .to_cexpr_result() + } + + fn expr(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + alt(( + |i| self.numeric_expr(i), + delimited(p("("), |i| self.expr(i), p(")")), + |i| self.concat_str(i), + |i| self.literal(i), + |i| self.identifier(i), + ))(input) + .to_cexpr_result() + } + + fn macro_definition(self, input: &'_ [Token]) -> CResult<'_, (&'_ [u8], EvalResult)> { + pair(identifier_token, |i| self.expr(i))(input) + } +} + +impl<'a> ::std::ops::Deref for PRef<'a> { + type Target = IdentifierParser<'a>; + fn deref(&self) -> &IdentifierParser<'a> { + self.0 + } +} + +impl<'ident> IdentifierParser<'ident> { + fn as_ref(&self) -> PRef<'_> { + PRef(self) + } + + /// Create a new `IdentifierParser` with a set of known identifiers. When + /// a known identifier is encountered during parsing, it is substituted + /// for the value specified. + pub fn new(identifiers: &HashMap, EvalResult>) -> IdentifierParser<'_> { + IdentifierParser { identifiers } + } + + /// Parse and evaluate an expression of a list of tokens. + /// + /// Returns an error if the input is not a valid expression or if the token + /// stream contains comments, keywords or unknown identifiers. + pub fn expr<'a>(&self, input: &'a [Token]) -> CResult<'a, EvalResult> { + self.as_ref().expr(input) + } + + /// Parse and evaluate a macro definition from a list of tokens. + /// + /// Returns the identifier for the macro and its replacement evaluated as an + /// expression. The input should not include `#define`. + /// + /// Returns an error if the replacement is not a valid expression, if called + /// on most function-like macros, or if the token stream contains comments, + /// keywords or unknown identifiers. + /// + /// N.B. This is intended to fail on function-like macros, but if it the + /// macro takes a single argument, the argument name is defined as an + /// identifier, and the macro otherwise parses as an expression, it will + /// return a result even on function-like macros. + /// + /// ```c + /// // will evaluate into IDENTIFIER + /// #define DELETE(IDENTIFIER) + /// // will evaluate into IDENTIFIER-3 + /// #define NEGATIVE_THREE(IDENTIFIER) -3 + /// ``` + pub fn macro_definition<'a>(&self, input: &'a [Token]) -> CResult<'a, (&'a [u8], EvalResult)> { + crate::assert_full_parse(self.as_ref().macro_definition(input)) + } +} + +/// Parse and evaluate an expression of a list of tokens. +/// +/// Returns an error if the input is not a valid expression or if the token +/// stream contains comments, keywords or identifiers. +pub fn expr(input: &[Token]) -> CResult<'_, EvalResult> { + IdentifierParser::new(&HashMap::new()).expr(input) +} + +/// Parse and evaluate a macro definition from a list of tokens. +/// +/// Returns the identifier for the macro and its replacement evaluated as an +/// expression. The input should not include `#define`. +/// +/// Returns an error if the replacement is not a valid expression, if called +/// on a function-like macro, or if the token stream contains comments, +/// keywords or identifiers. +pub fn macro_definition(input: &[Token]) -> CResult<'_, (&'_ [u8], EvalResult)> { + IdentifierParser::new(&HashMap::new()).macro_definition(input) +} + +/// Parse a functional macro declaration from a list of tokens. +/// +/// Returns the identifier for the macro and the argument list (in order). The +/// input should not include `#define`. The actual definition is not parsed and +/// may be obtained from the unparsed data returned. +/// +/// Returns an error if the input is not a functional macro or if the token +/// stream contains comments. +/// +/// # Example +/// ``` +/// use cexpr::expr::{IdentifierParser, EvalResult, fn_macro_declaration}; +/// use cexpr::assert_full_parse; +/// use cexpr::token::Kind::*; +/// use cexpr::token::Token; +/// +/// // #define SUFFIX(arg) arg "suffix" +/// let tokens = vec![ +/// (Identifier, &b"SUFFIX"[..]).into(), +/// (Punctuation, &b"("[..]).into(), +/// (Identifier, &b"arg"[..]).into(), +/// (Punctuation, &b")"[..]).into(), +/// (Identifier, &b"arg"[..]).into(), +/// (Literal, &br#""suffix""#[..]).into(), +/// ]; +/// +/// // Try to parse the functional part +/// let (expr, (ident, args)) = fn_macro_declaration(&tokens).unwrap(); +/// assert_eq!(ident, b"SUFFIX"); +/// +/// // Create dummy arguments +/// let idents = args.into_iter().map(|arg| +/// (arg.to_owned(), EvalResult::Str(b"test".to_vec())) +/// ).collect(); +/// +/// // Evaluate the macro +/// let (_, evaluated) = assert_full_parse(IdentifierParser::new(&idents).expr(expr)).unwrap(); +/// assert_eq!(evaluated, EvalResult::Str(b"testsuffix".to_vec())); +/// ``` +pub fn fn_macro_declaration(input: &[Token]) -> CResult<'_, (&[u8], Vec<&[u8]>)> { + pair( + identifier_token, + delimited( + p("("), + separated_list0(p(","), identifier_token), + p(")"), + ), + )(input) +} diff --git a/vendor/cexpr/src/lib.rs b/vendor/cexpr/src/lib.rs new file mode 100644 index 00000000000000..5170f97d135c6c --- /dev/null +++ b/vendor/cexpr/src/lib.rs @@ -0,0 +1,149 @@ +// (C) Copyright 2016 Jethro G. Beekman +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +//! A C expression parser and evaluator. +//! +//! This crate provides methods for parsing and evaluating simple C expressions. In general, the +//! crate can handle most arithmetic expressions that would appear in macros or the definition of +//! constants, as well as string and character constants. +//! +//! The main entry point for is [`token::parse`], which parses a byte string and returns its +//! evaluated value. +#![warn(rust_2018_idioms)] +#![warn(missing_docs)] +#![allow(deprecated)] + +pub mod nom { + //! nom's result types, re-exported. + pub use nom::{error::ErrorKind, error::Error, Err, IResult, Needed}; +} +pub mod expr; +pub mod literal; +pub mod token; + +/// Parsing errors specific to C parsing +#[derive(Debug)] +pub enum ErrorKind { + /// Expected the specified token + ExactToken(token::Kind, &'static [u8]), + /// Expected one of the specified tokens + ExactTokens(token::Kind, &'static [&'static str]), + /// Expected a token of the specified kind + TypedToken(token::Kind), + /// An unknown identifier was encountered + UnknownIdentifier, + /// An invalid literal was encountered. + /// + /// When encountered, this generally means a bug exists in the data that + /// was passed in or the parsing logic. + InvalidLiteral, + /// A full parse was requested, but data was left over after parsing finished. + Partial, + /// An error occurred in an underlying nom parser. + Parser(nom::ErrorKind), +} + +impl From for ErrorKind { + fn from(k: nom::ErrorKind) -> Self { + ErrorKind::Parser(k) + } +} + +impl From for ErrorKind { + fn from(_: u32) -> Self { + ErrorKind::InvalidLiteral + } +} + +/// Parsing errors specific to C parsing. +/// +/// This is a superset of `(I, nom::ErrorKind)` that includes the additional errors specified by +/// [`ErrorKind`]. +#[derive(Debug)] +pub struct Error { + /// The remainder of the input stream at the time of the error. + pub input: I, + /// The error that occurred. + pub error: ErrorKind, +} + +impl From<(I, nom::ErrorKind)> for Error { + fn from(e: (I, nom::ErrorKind)) -> Self { + Self::from((e.0, ErrorKind::from(e.1))) + } +} + +impl From<(I, ErrorKind)> for Error { + fn from(e: (I, ErrorKind)) -> Self { + Self { + input: e.0, + error: e.1, + } + } +} + +impl From<::nom::error::Error> for Error { + fn from(e: ::nom::error::Error) -> Self { + Self { + input: e.input, + error: e.code.into(), + } + } +} + +impl ::nom::error::ParseError for Error { + fn from_error_kind(input: I, kind: nom::ErrorKind) -> Self { + Self { + input, + error: kind.into(), + } + } + + fn append(_: I, _: nom::ErrorKind, other: Self) -> Self { + other + } +} + +// in lieu of https://github.com/Geal/nom/issues/1010 +trait ToCexprResult { + fn to_cexpr_result(self) -> nom::IResult>; +} +impl ToCexprResult for nom::IResult +where + Error: From, +{ + fn to_cexpr_result(self) -> nom::IResult> { + match self { + Ok(v) => Ok(v), + Err(nom::Err::Incomplete(n)) => Err(nom::Err::Incomplete(n)), + Err(nom::Err::Error(e)) => Err(nom::Err::Error(e.into())), + Err(nom::Err::Failure(e)) => Err(nom::Err::Failure(e.into())), + } + } +} + +/// If the input result indicates a succesful parse, but there is data left, +/// return an `Error::Partial` instead. +pub fn assert_full_parse<'i, I: 'i, O, E>( + result: nom::IResult<&'i [I], O, E>, +) -> nom::IResult<&'i [I], O, Error<&'i [I]>> +where + Error<&'i [I]>: From, +{ + match result.to_cexpr_result() { + Ok((rem, output)) => { + if rem.is_empty() { + Ok((rem, output)) + } else { + Err(nom::Err::Error((rem, ErrorKind::Partial).into())) + } + } + Err(nom::Err::Incomplete(n)) => Err(nom::Err::Incomplete(n)), + Err(nom::Err::Failure(e)) => Err(nom::Err::Failure(e)), + Err(nom::Err::Error(e)) => Err(nom::Err::Error(e)), + } +} diff --git a/vendor/cexpr/src/literal.rs b/vendor/cexpr/src/literal.rs new file mode 100644 index 00000000000000..68e85c7dadbd0d --- /dev/null +++ b/vendor/cexpr/src/literal.rs @@ -0,0 +1,361 @@ +// (C) Copyright 2016 Jethro G. Beekman +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +//! Parsing C literals from byte slices. +//! +//! This will parse a representation of a C literal into a Rust type. +//! +//! # characters +//! Character literals are stored into the `CChar` type, which can hold values +//! that are not valid Unicode code points. ASCII characters are represented as +//! `char`, literal bytes with the high byte set are converted into the raw +//! representation. Escape sequences are supported. If hex and octal escapes +//! map to an ASCII character, that is used, otherwise, the raw encoding is +//! used, including for values over 255. Unicode escapes are checked for +//! validity and mapped to `char`. Character sequences are not supported. Width +//! prefixes are ignored. +//! +//! # strings +//! Strings are interpreted as byte vectors. Escape sequences are supported. If +//! hex and octal escapes map onto multi-byte characters, they are truncated to +//! one 8-bit character. Unicode escapes are converted into their UTF-8 +//! encoding. Width prefixes are ignored. +//! +//! # integers +//! Integers are read into `i64`. Binary, octal, decimal and hexadecimal are +//! all supported. If the literal value is between `i64::MAX` and `u64::MAX`, +//! it is bit-cast to `i64`. Values over `u64::MAX` cannot be parsed. Width and +//! sign suffixes are ignored. Sign prefixes are not supported. +//! +//! # real numbers +//! Reals are read into `f64`. Width suffixes are ignored. Sign prefixes are +//! not supported in the significand. Hexadecimal floating points are not +//! supported. + +use std::char; +use std::str::{self, FromStr}; + +use nom::branch::alt; +use nom::bytes::complete::is_not; +use nom::bytes::complete::tag; +use nom::character::complete::{char, one_of}; +use nom::combinator::{complete, map, map_opt, opt, recognize}; +use nom::multi::{fold_many0, many0, many1, many_m_n}; +use nom::sequence::{delimited, pair, preceded, terminated, tuple}; +use nom::*; + +use crate::expr::EvalResult; +use crate::ToCexprResult; + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +/// Representation of a C character +pub enum CChar { + /// A character that can be represented as a `char` + Char(char), + /// Any other character (8-bit characters, unicode surrogates, etc.) + Raw(u64), +} + +impl From for CChar { + fn from(i: u8) -> CChar { + match i { + 0..=0x7f => CChar::Char(i as u8 as char), + _ => CChar::Raw(i as u64), + } + } +} + +// A non-allocating version of this would be nice... +impl std::convert::Into> for CChar { + fn into(self) -> Vec { + match self { + CChar::Char(c) => { + let mut s = String::with_capacity(4); + s.extend(&[c]); + s.into_bytes() + } + CChar::Raw(i) => { + let mut v = Vec::with_capacity(1); + v.push(i as u8); + v + } + } + } +} + +/// ensures the child parser consumes the whole input +pub fn full( + f: F, +) -> impl Fn(I) -> nom::IResult +where + I: nom::InputLength, + F: Fn(I) -> nom::IResult, +{ + move |input| { + let res = f(input); + match res { + Ok((i, o)) => { + if i.input_len() == 0 { + Ok((i, o)) + } else { + Err(nom::Err::Error(nom::error::Error::new(i, nom::error::ErrorKind::Complete))) + } + } + r => r, + } + } +} + +// ================================= +// ======== matching digits ======== +// ================================= + +macro_rules! byte { + ($($p: pat)|* ) => {{ + fn parser(i: &[u8]) -> crate::nom::IResult<&[u8], u8> { + match i.split_first() { + $(Some((&c @ $p,rest)))|* => Ok((rest,c)), + Some(_) => Err(nom::Err::Error(nom::error::Error::new(i, nom::error::ErrorKind::OneOf))), + None => Err(nom::Err::Incomplete(Needed::new(1))), + } + } + + parser + }} +} + +fn binary(i: &[u8]) -> nom::IResult<&[u8], u8> { + byte!(b'0'..=b'1')(i) +} + +fn octal(i: &[u8]) -> nom::IResult<&[u8], u8> { + byte!(b'0'..=b'7')(i) +} + +fn decimal(i: &[u8]) -> nom::IResult<&[u8], u8> { + byte!(b'0'..=b'9')(i) +} + +fn hexadecimal(i: &[u8]) -> nom::IResult<&[u8], u8> { + byte!(b'0' ..= b'9' | b'a' ..= b'f' | b'A' ..= b'F')(i) +} + +// ======================================== +// ======== characters and strings ======== +// ======================================== + +fn escape2char(c: char) -> CChar { + CChar::Char(match c { + 'a' => '\x07', + 'b' => '\x08', + 'f' => '\x0c', + 'n' => '\n', + 'r' => '\r', + 't' => '\t', + 'v' => '\x0b', + _ => unreachable!("invalid escape {}", c), + }) +} + +fn c_raw_escape(n: Vec, radix: u32) -> Option { + str::from_utf8(&n) + .ok() + .and_then(|i| u64::from_str_radix(i, radix).ok()) + .map(|i| match i { + 0..=0x7f => CChar::Char(i as u8 as char), + _ => CChar::Raw(i), + }) +} + +fn c_unicode_escape(n: Vec) -> Option { + str::from_utf8(&n) + .ok() + .and_then(|i| u32::from_str_radix(i, 16).ok()) + .and_then(char::from_u32) + .map(CChar::Char) +} + +fn escaped_char(i: &[u8]) -> nom::IResult<&[u8], CChar> { + preceded( + char('\\'), + alt(( + map(one_of(r#"'"?\"#), CChar::Char), + map(one_of("abfnrtv"), escape2char), + map_opt(many_m_n(1, 3, octal), |v| c_raw_escape(v, 8)), + map_opt(preceded(char('x'), many1(hexadecimal)), |v| { + c_raw_escape(v, 16) + }), + map_opt( + preceded(char('u'), many_m_n(4, 4, hexadecimal)), + c_unicode_escape, + ), + map_opt( + preceded(char('U'), many_m_n(8, 8, hexadecimal)), + c_unicode_escape, + ), + )), + )(i) +} + +fn c_width_prefix(i: &[u8]) -> nom::IResult<&[u8], &[u8]> { + alt((tag("u8"), tag("u"), tag("U"), tag("L")))(i) +} + +fn c_char(i: &[u8]) -> nom::IResult<&[u8], CChar> { + delimited( + terminated(opt(c_width_prefix), char('\'')), + alt(( + escaped_char, + map(byte!(0 ..= 91 /* \=92 */ | 93 ..= 255), CChar::from), + )), + char('\''), + )(i) +} + +fn c_string(i: &[u8]) -> nom::IResult<&[u8], Vec> { + delimited( + alt((preceded(c_width_prefix, char('"')), char('"'))), + fold_many0( + alt(( + map(escaped_char, |c: CChar| c.into()), + map(is_not([b'\\', b'"']), |c: &[u8]| c.into()), + )), + Vec::new, + |mut v: Vec, res: Vec| { + v.extend_from_slice(&res); + v + }, + ), + char('"'), + )(i) +} + +// ================================ +// ======== parse integers ======== +// ================================ + +fn c_int_radix(n: Vec, radix: u32) -> Option { + str::from_utf8(&n) + .ok() + .and_then(|i| u64::from_str_radix(i, radix).ok()) +} + +fn take_ul(input: &[u8]) -> IResult<&[u8], &[u8]> { + let r = input.split_at_position(|c| c != b'u' && c != b'U' && c != b'l' && c != b'L'); + match r { + Err(Err::Incomplete(_)) => Ok((&input[input.len()..], input)), + res => res, + } +} + +fn c_int(i: &[u8]) -> nom::IResult<&[u8], i64> { + map( + terminated( + alt(( + map_opt(preceded(tag("0x"), many1(complete(hexadecimal))), |v| { + c_int_radix(v, 16) + }), + map_opt(preceded(tag("0X"), many1(complete(hexadecimal))), |v| { + c_int_radix(v, 16) + }), + map_opt(preceded(tag("0b"), many1(complete(binary))), |v| { + c_int_radix(v, 2) + }), + map_opt(preceded(tag("0B"), many1(complete(binary))), |v| { + c_int_radix(v, 2) + }), + map_opt(preceded(char('0'), many1(complete(octal))), |v| { + c_int_radix(v, 8) + }), + map_opt(many1(complete(decimal)), |v| c_int_radix(v, 10)), + |input| Err(crate::nom::Err::Error(nom::error::Error::new(input, crate::nom::ErrorKind::Fix))), + )), + opt(take_ul), + ), + |i| i as i64, + )(i) +} + +// ============================== +// ======== parse floats ======== +// ============================== + +fn float_width(i: &[u8]) -> nom::IResult<&[u8], u8> { + nom::combinator::complete(byte!(b'f' | b'l' | b'F' | b'L'))(i) +} + +fn float_exp(i: &[u8]) -> nom::IResult<&[u8], (Option, Vec)> { + preceded( + byte!(b'e' | b'E'), + pair(opt(byte!(b'-' | b'+')), many1(complete(decimal))), + )(i) +} + +fn c_float(i: &[u8]) -> nom::IResult<&[u8], f64> { + map_opt( + alt(( + terminated( + recognize(tuple(( + many1(complete(decimal)), + byte!(b'.'), + many0(complete(decimal)), + ))), + opt(float_width), + ), + terminated( + recognize(tuple(( + many0(complete(decimal)), + byte!(b'.'), + many1(complete(decimal)), + ))), + opt(float_width), + ), + terminated( + recognize(tuple(( + many0(complete(decimal)), + opt(byte!(b'.')), + many1(complete(decimal)), + float_exp, + ))), + opt(float_width), + ), + terminated( + recognize(tuple(( + many1(complete(decimal)), + opt(byte!(b'.')), + many0(complete(decimal)), + float_exp, + ))), + opt(float_width), + ), + terminated(recognize(many1(complete(decimal))), float_width), + )), + |v| str::from_utf8(v).ok().and_then(|i| f64::from_str(i).ok()), + )(i) +} + +// ================================ +// ======== main interface ======== +// ================================ + +fn one_literal(input: &[u8]) -> nom::IResult<&[u8], EvalResult, crate::Error<&[u8]>> { + alt(( + map(full(c_char), EvalResult::Char), + map(full(c_int), |i| EvalResult::Int(::std::num::Wrapping(i))), + map(full(c_float), EvalResult::Float), + map(full(c_string), EvalResult::Str), + ))(input) + .to_cexpr_result() +} + +/// Parse a C literal. +/// +/// The input must contain exactly the representation of a single literal +/// token, and in particular no whitespace or sign prefixes. +pub fn parse(input: &[u8]) -> IResult<&[u8], EvalResult, crate::Error<&[u8]>> { + crate::assert_full_parse(one_literal(input)) +} diff --git a/vendor/cexpr/src/token.rs b/vendor/cexpr/src/token.rs new file mode 100644 index 00000000000000..dbc5949cd4fcb2 --- /dev/null +++ b/vendor/cexpr/src/token.rs @@ -0,0 +1,44 @@ +// (C) Copyright 2016 Jethro G. Beekman +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +//! Representation of a C token +//! +//! This is designed to map onto a libclang CXToken. + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[allow(missing_docs)] +pub enum Kind { + Punctuation, + Keyword, + Identifier, + Literal, + Comment, +} + +/// A single token in a C expression. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Token { + /// The type of this token. + pub kind: Kind, + /// The bytes that make up the token. + pub raw: Box<[u8]>, +} + +impl<'a> From<(Kind, &'a [u8])> for Token { + fn from((kind, value): (Kind, &'a [u8])) -> Token { + Token { + kind, + raw: value.to_owned().into_boxed_slice(), + } + } +} + +/// Remove all comment tokens from a vector of tokens +pub fn remove_comments(v: &mut Vec) -> &mut Vec { + v.retain(|t| t.kind != Kind::Comment); + v +} diff --git a/vendor/cexpr/tests/clang.rs b/vendor/cexpr/tests/clang.rs new file mode 100644 index 00000000000000..b2484f0778288b --- /dev/null +++ b/vendor/cexpr/tests/clang.rs @@ -0,0 +1,339 @@ +// (C) Copyright 2016 Jethro G. Beekman +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +extern crate cexpr; +extern crate clang_sys; + +use std::collections::HashMap; +use std::io::Write; +use std::str::{self, FromStr}; +use std::{char, ffi, mem, ptr, slice}; + +use cexpr::assert_full_parse; +use cexpr::expr::{fn_macro_declaration, EvalResult, IdentifierParser}; +use cexpr::literal::CChar; +use cexpr::token::Token; +use clang_sys::*; + +// main testing routine +fn test_definition( + ident: Vec, + tokens: &[Token], + idents: &mut HashMap, EvalResult>, +) -> bool { + fn bytes_to_int(value: &[u8]) -> Option { + str::from_utf8(value) + .ok() + .map(|s| s.replace("n", "-")) + .map(|s| s.replace("_", "")) + .and_then(|v| i64::from_str(&v).ok()) + .map(::std::num::Wrapping) + .map(Int) + } + + use cexpr::expr::EvalResult::*; + + let display_name = String::from_utf8_lossy(&ident).into_owned(); + + let functional; + let test = { + // Split name such as Str_test_string into (Str,test_string) + let pos = ident + .iter() + .position(|c| *c == b'_') + .expect(&format!("Invalid definition in testcase: {}", display_name)); + let mut expected = &ident[..pos]; + let mut value = &ident[(pos + 1)..]; + + functional = expected == b"Fn"; + + if functional { + let ident = value; + let pos = ident + .iter() + .position(|c| *c == b'_') + .expect(&format!("Invalid definition in testcase: {}", display_name)); + expected = &ident[..pos]; + value = &ident[(pos + 1)..]; + } + + if expected == b"Str" { + let mut splits = value.split(|c| *c == b'U'); + let mut s = Vec::with_capacity(value.len()); + s.extend_from_slice(splits.next().unwrap()); + for split in splits { + let (chr, rest) = split.split_at(6); + let chr = u32::from_str_radix(str::from_utf8(chr).unwrap(), 16).unwrap(); + write!(s, "{}", char::from_u32(chr).unwrap()).unwrap(); + s.extend_from_slice(rest); + } + Some(Str(s)) + } else if expected == b"Int" { + bytes_to_int(value) + } else if expected == b"Float" { + str::from_utf8(value) + .ok() + .map(|s| s.replace("n", "-").replace("p", ".")) + .and_then(|v| f64::from_str(&v).ok()) + .map(Float) + } else if expected == b"CharRaw" { + str::from_utf8(value) + .ok() + .and_then(|v| u64::from_str(v).ok()) + .map(CChar::Raw) + .map(Char) + } else if expected == b"CharChar" { + str::from_utf8(value) + .ok() + .and_then(|v| u32::from_str(v).ok()) + .and_then(char::from_u32) + .map(CChar::Char) + .map(Char) + } else { + Some(Invalid) + } + .expect(&format!("Invalid definition in testcase: {}", display_name)) + }; + + let result = if functional { + let mut fnidents; + let expr_tokens; + match fn_macro_declaration(&tokens) { + Ok((rest, (_, args))) => { + fnidents = idents.clone(); + expr_tokens = rest; + for arg in args { + let val = match test { + Int(_) => bytes_to_int(&arg), + Str(_) => Some(Str(arg.to_owned())), + _ => unimplemented!(), + } + .expect(&format!( + "Invalid argument in functional macro testcase: {}", + display_name + )); + fnidents.insert(arg.to_owned(), val); + } + } + e => { + println!( + "Failed test for {}, unable to parse functional macro declaration: {:?}", + display_name, e + ); + return false; + } + } + assert_full_parse(IdentifierParser::new(&fnidents).expr(&expr_tokens)) + } else { + IdentifierParser::new(idents) + .macro_definition(&tokens) + .map(|(i, (_, val))| (i, val)) + }; + + match result { + Ok((_, val)) => { + if val == test { + if let Some(_) = idents.insert(ident, val) { + panic!("Duplicate definition for testcase: {}", display_name); + } + true + } else { + println!( + "Failed test for {}, expected {:?}, got {:?}", + display_name, test, val + ); + false + } + } + e => { + if test == Invalid { + true + } else { + println!( + "Failed test for {}, expected {:?}, got {:?}", + display_name, test, e + ); + false + } + } + } +} + +// support code for the clang lexer +unsafe fn clang_str_to_vec(s: CXString) -> Vec { + let vec = ffi::CStr::from_ptr(clang_getCString(s)) + .to_bytes() + .to_owned(); + clang_disposeString(s); + vec +} + +#[allow(non_upper_case_globals)] +unsafe fn token_clang_to_cexpr(tu: CXTranslationUnit, orig: &CXToken) -> Token { + Token { + kind: match clang_getTokenKind(*orig) { + CXToken_Comment => cexpr::token::Kind::Comment, + CXToken_Identifier => cexpr::token::Kind::Identifier, + CXToken_Keyword => cexpr::token::Kind::Keyword, + CXToken_Literal => cexpr::token::Kind::Literal, + CXToken_Punctuation => cexpr::token::Kind::Punctuation, + _ => panic!("invalid token kind: {:?}", *orig), + }, + raw: clang_str_to_vec(clang_getTokenSpelling(tu, *orig)).into_boxed_slice(), + } +} + +extern "C" fn visit_children_thunk( + cur: CXCursor, + parent: CXCursor, + closure: CXClientData, +) -> CXChildVisitResult +where + F: FnMut(CXCursor, CXCursor) -> CXChildVisitResult, +{ + unsafe { (&mut *(closure as *mut F))(cur, parent) } +} + +unsafe fn visit_children(cursor: CXCursor, mut f: F) +where + F: FnMut(CXCursor, CXCursor) -> CXChildVisitResult, +{ + clang_visitChildren( + cursor, + visit_children_thunk:: as _, + &mut f as *mut F as CXClientData, + ); +} + +unsafe fn location_in_scope(r: CXSourceRange) -> bool { + let start = clang_getRangeStart(r); + let mut file = ptr::null_mut(); + clang_getSpellingLocation( + start, + &mut file, + ptr::null_mut(), + ptr::null_mut(), + ptr::null_mut(), + ); + clang_Location_isFromMainFile(start) != 0 + && clang_Location_isInSystemHeader(start) == 0 + && file != ptr::null_mut() +} + +/// tokenize_range_adjust can be used to work around LLVM bug 9069 +/// https://bugs.llvm.org//show_bug.cgi?id=9069 +fn file_visit_macros, Vec)>( + file: &str, + tokenize_range_adjust: bool, + mut visitor: F, +) { + unsafe { + let tu = { + let index = clang_createIndex(true as _, false as _); + let cfile = ffi::CString::new(file).unwrap(); + let mut tu = mem::MaybeUninit::uninit(); + assert!( + clang_parseTranslationUnit2( + index, + cfile.as_ptr(), + [b"-std=c11\0".as_ptr() as *const ::std::os::raw::c_char].as_ptr(), + 1, + ptr::null_mut(), + 0, + CXTranslationUnit_DetailedPreprocessingRecord, + &mut *tu.as_mut_ptr() + ) == CXError_Success, + "Failure reading test case {}", + file + ); + tu.assume_init() + }; + visit_children(clang_getTranslationUnitCursor(tu), |cur, _parent| { + if cur.kind == CXCursor_MacroDefinition { + let mut range = clang_getCursorExtent(cur); + if !location_in_scope(range) { + return CXChildVisit_Continue; + } + range.end_int_data -= if tokenize_range_adjust { 1 } else { 0 }; + let mut token_ptr = ptr::null_mut(); + let mut num = 0; + clang_tokenize(tu, range, &mut token_ptr, &mut num); + if token_ptr != ptr::null_mut() { + let tokens = slice::from_raw_parts(token_ptr, num as usize); + let tokens: Vec<_> = tokens + .iter() + .filter_map(|t| { + if clang_getTokenKind(*t) != CXToken_Comment { + Some(token_clang_to_cexpr(tu, t)) + } else { + None + } + }) + .collect(); + clang_disposeTokens(tu, token_ptr, num); + visitor(clang_str_to_vec(clang_getCursorSpelling(cur)), tokens) + } + } + CXChildVisit_Continue + }); + clang_disposeTranslationUnit(tu); + }; +} + +fn test_file(file: &str) -> bool { + let mut idents = HashMap::new(); + let mut all_succeeded = true; + file_visit_macros(file, fix_bug_9069(), |ident, tokens| { + all_succeeded &= test_definition(ident, &tokens, &mut idents) + }); + all_succeeded +} + +fn fix_bug_9069() -> bool { + fn check_bug_9069() -> bool { + let mut token_sets = vec![]; + file_visit_macros( + "tests/input/test_llvm_bug_9069.h", + false, + |ident, tokens| { + assert_eq!(&ident, b"A"); + token_sets.push(tokens); + }, + ); + assert_eq!(token_sets.len(), 2); + token_sets[0] != token_sets[1] + } + + use std::sync::atomic::{AtomicBool, Ordering}; + use std::sync::Once; + + static CHECK_FIX: Once = Once::new(); + static FIX: AtomicBool = AtomicBool::new(false); + + CHECK_FIX.call_once(|| FIX.store(check_bug_9069(), Ordering::SeqCst)); + + FIX.load(Ordering::SeqCst) +} + +macro_rules! test_file { + ($f:ident) => { + #[test] + fn $f() { + assert!( + test_file(concat!("tests/input/", stringify!($f), ".h")), + "test_file" + ) + } + }; +} + +test_file!(floats); +test_file!(chars); +test_file!(strings); +test_file!(int_signed); +test_file!(int_unsigned); +test_file!(fail); diff --git a/vendor/cexpr/tests/input/chars.h b/vendor/cexpr/tests/input/chars.h new file mode 100644 index 00000000000000..45351d3259bd37 --- /dev/null +++ b/vendor/cexpr/tests/input/chars.h @@ -0,0 +1,3 @@ +#define CharChar_65 'A' +#define CharChar_127849 '\U0001f369' // 🍩 +#define CharRaw_255 U'\xff' diff --git a/vendor/cexpr/tests/input/fail.h b/vendor/cexpr/tests/input/fail.h new file mode 100644 index 00000000000000..fd416bc7cb0f4d --- /dev/null +++ b/vendor/cexpr/tests/input/fail.h @@ -0,0 +1,9 @@ +#define FAIL_function_like(x) 3 +#define FAIL_empty +#define FAIL_invalid_for_radix 0b2 +#define FAIL_shift_by_float 3<<1f +#define FAIL_unknown_identifier UNKNOWN +#define Int_0 0 +#define Str_str "str" +#define FAIL_concat_integer "test" Str_str Int_0 +#define FAIL_too_large_int 18446744073709551616 diff --git a/vendor/cexpr/tests/input/floats.h b/vendor/cexpr/tests/input/floats.h new file mode 100644 index 00000000000000..61942cf41fe3e5 --- /dev/null +++ b/vendor/cexpr/tests/input/floats.h @@ -0,0 +1,8 @@ +#define Float_0 0. +#define Float_1 1f +#define Float_p1 .1 +#define Float_2 2.0 +#define Float_1000 1e3 +#define Float_2000 2e+3 +#define Float_p001 1e-3 +#define Float_80 10.0*(1<<3) diff --git a/vendor/cexpr/tests/input/int_signed.h b/vendor/cexpr/tests/input/int_signed.h new file mode 100644 index 00000000000000..65854a63e30787 --- /dev/null +++ b/vendor/cexpr/tests/input/int_signed.h @@ -0,0 +1,3 @@ +#define Int_n3 -(-(-3)) +#define Int_n5 -3-2 +#define Int_n9223372036854775808 -9223372036854775808 diff --git a/vendor/cexpr/tests/input/int_unsigned.h b/vendor/cexpr/tests/input/int_unsigned.h new file mode 100644 index 00000000000000..6663dda3d6e5ac --- /dev/null +++ b/vendor/cexpr/tests/input/int_unsigned.h @@ -0,0 +1,29 @@ +#define Int_456 456 +#define Int_0 0 +#define Int_1 0b1 +#define Int_2 0x2 +#define Int_3 3L +#define Int_4 0X4 +#define Int_5 0B101 +#define Int_63 077 +#define Int_123 123 +#define Int_124 124u +#define Int_125 125uL +#define Int_126 126LuL +#define Int_16 (((1)<<4ULL))/*comment*/ +#define Int_13 1|8^6&2<<1 + +#define Int_47 32|15 +#define Int_38 (32|15)^9 +#define Int_6 ((32|15)^9)&7 +#define Int_12 (((32|15)^9)&7)<<1 +#define Int_17 ((((32|15)^9)&7)<<1)+5 +#define Int_15 (((((32|15)^9)&7)<<1)+5)-2 +#define Int_60 ((((((32|15)^9)&7)<<1)+5)-2)*4 +#define Int_30 (((((((32|15)^9)&7)<<1)+5)-2)*4)/2 +#define Int_39 32|15^9&7<<1+5-2*4/2 + +#define Int_n1 18446744073709551615 /*2^64-1*/ +#define Int_n9223372036854775808 9223372036854775808 + +#define Fn_Int_9(_3) _3*3 diff --git a/vendor/cexpr/tests/input/strings.h b/vendor/cexpr/tests/input/strings.h new file mode 100644 index 00000000000000..d01d409cbfc485 --- /dev/null +++ b/vendor/cexpr/tests/input/strings.h @@ -0,0 +1,17 @@ +#define Str_ "" +#define Str_str "str" +#define Str_unicode u"unicode" +#define Str_long L"long" +#define Str_concat u"con" L"cat" +#define Str_concat_parens ("concat" U"_parens") +#define Str_concat_identifier (Str_concat L"_identifier") +#define Str_hex_escape_all "\x68\x65\x78\x5f\x65\x73\x63\x61\x70\x65\x5f\x61\x6c\x6c" +#define Str_hex_escape_hex "h\x65x_\x65s\x63\x61p\x65_h\x65x" +#define Str_quote_U000022_escape "quote_\"_escape" +#define Str_Fly_away_in_my_space_U01F680_You_no_need_put_U01F4B5_in_my_pocket \ + u8"Fly_away_in_my_space_🚀_You_no_need_put_💵_in_my_pocket" +#define Fn_Str_no_args() "no_args" +#define Fn_Str_no_args_concat() "no_args_" Str_concat +#define Fn_Str_prepend_arg(arg) "prepend_" arg +#define Fn_Str_two_args(two, args) two "_" args +#define Fn_Str_three_args(three, _, args) three _ args diff --git a/vendor/cexpr/tests/input/test_llvm_bug_9069.h b/vendor/cexpr/tests/input/test_llvm_bug_9069.h new file mode 100644 index 00000000000000..a92374efee136d --- /dev/null +++ b/vendor/cexpr/tests/input/test_llvm_bug_9069.h @@ -0,0 +1,4 @@ +// The following two definitions should yield the same list of tokens. +// If https://bugs.llvm.org//show_bug.cgi?id=9069 is not fixed, they don't. +#define A 1 +#define A 1 diff --git a/vendor/cfg-if/.cargo-checksum.json b/vendor/cfg-if/.cargo-checksum.json new file mode 100644 index 00000000000000..78d3f3fbcf3746 --- /dev/null +++ b/vendor/cfg-if/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"65840ba044457862e25b2d7d53f4a2de232adf933bd31aba8f2bd6a1f51a6881",".github/dependabot.yml":"828e3ecefdc555a5210a5bdffd5621ef3625ceb35c7fc91a0b4faef6f9921b75",".github/workflows/main.yaml":"6612a51b1f1479eabac7d3bd14aa609811d4afd2df2d454b9a1f6d6f3748f5b2",".github/workflows/publish.yaml":"1417805078704eecbaeea8611c5a44df575bfe1908d6969d909224a6e5e26ca8","CHANGELOG.md":"08ba7340057565b338afaa29b36bd2a1c46f5495b043bc49d12230a6a82d5f76","Cargo.lock":"26922b9384045e5a3d496f21ec7c355da585d0caa1d13b887b634527d36fc450","Cargo.toml":"281d508beb1fe3927cf03d3f2f8c9a5117b1e4fe97ae21b9026cf318e8c35273","Cargo.toml.orig":"5a17ee17da78f5179373b8324a1180e71efe2bcf3e3c9ca18c1bdb1e3faa9792","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"cd565d563a2c70d197bb6fee1678e122214e22af7bdb046b80f52c1d953cd72f","src/lib.rs":"c09723e0890d15810374009e96b20bf0eb2f65f383006516f34db36240835c85","tests/xcrate.rs":"bcec148e69db81b1a618bdd6f96a25d9a0442e6ecc692fe28f1206d9bffc006a"},"package":"9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"} \ No newline at end of file diff --git a/vendor/cfg-if/.cargo_vcs_info.json b/vendor/cfg-if/.cargo_vcs_info.json new file mode 100644 index 00000000000000..d4bec315ac6e9d --- /dev/null +++ b/vendor/cfg-if/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "3510ca6abea34cbbc702509a4e50ea9709925eda" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/cfg-if/.github/dependabot.yml b/vendor/cfg-if/.github/dependabot.yml new file mode 100644 index 00000000000000..36e4ff06363a32 --- /dev/null +++ b/vendor/cfg-if/.github/dependabot.yml @@ -0,0 +1,14 @@ +version: 2 +updates: + - package-ecosystem: cargo + directory: "/" + schedule: + interval: daily + time: "08:00" + open-pull-requests-limit: 10 + + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: weekly + open-pull-requests-limit: 3 diff --git a/vendor/cfg-if/.github/workflows/main.yaml b/vendor/cfg-if/.github/workflows/main.yaml new file mode 100644 index 00000000000000..7288a62d253538 --- /dev/null +++ b/vendor/cfg-if/.github/workflows/main.yaml @@ -0,0 +1,48 @@ +name: CI +on: [push, pull_request] + +permissions: + contents: read + +env: + RUSTDOCFLAGS: -Dwarnings + RUSTFLAGS: -Dwarnings + +jobs: + test: + name: Test + runs-on: ubuntu-latest + strategy: + matrix: + rust: + - "1.32" # msrv + - stable + - beta + - nightly + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - name: Install Rust ${{ matrix.rust }} + run: | + rustup self update + rustup update ${{ matrix.rust }} + rustup default ${{ matrix.rust }} + rustc -vV + - name: Run tests + run: | + set -eux + # Remove `-Dwarnings` at the MSRV since lints may be different + [ "${{ matrix.rust }}" = "1.32" ] && export RUSTFLAGS="--cfg msrv_test" + cargo test + + rustfmt: + name: Rustfmt + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - name: Install Rust Stable + run: | + rustup update stable + rustup default stable + rustup component add rustfmt + - name: Run rustfmt + run: cargo fmt -- --check diff --git a/vendor/cfg-if/.github/workflows/publish.yaml b/vendor/cfg-if/.github/workflows/publish.yaml new file mode 100644 index 00000000000000..248e3ccdd9ad6e --- /dev/null +++ b/vendor/cfg-if/.github/workflows/publish.yaml @@ -0,0 +1,25 @@ +name: Release-plz + +permissions: + pull-requests: write + contents: write + +on: + push: { branches: [main] } + +jobs: + release-plz: + name: Release-plz + runs-on: ubuntu-24.04 + steps: + - name: Checkout repository + uses: actions/checkout@v5 + with: + fetch-depth: 0 + - name: Install Rust (rustup) + run: rustup update nightly --no-self-update && rustup default nightly + - name: Run release-plz + uses: MarcoIeni/release-plz-action@v0.5 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} diff --git a/vendor/cfg-if/CHANGELOG.md b/vendor/cfg-if/CHANGELOG.md new file mode 100644 index 00000000000000..55b54ece74c2e9 --- /dev/null +++ b/vendor/cfg-if/CHANGELOG.md @@ -0,0 +1,29 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.0.4](https://github.com/rust-lang/cfg-if/compare/v1.0.3...v1.0.4) - 2025-10-15 + +- Support `cfg(true)` and `cfg(false)` ([#99](https://github.com/rust-lang/cfg-if/pull/99)) +- Set and test a MSRV of 1.32 +- Have a single top-level rule + +## [1.0.3](https://github.com/rust-lang/cfg-if/compare/v1.0.2...v1.0.3) - 2025-08-19 + +- Revert "Remove `@__identity` rule." + +## [1.0.2](https://github.com/rust-lang/cfg-if/compare/v1.0.1...v1.0.2) - 2025-08-19 + +- Remove `@__identity` rule. + +## [1.0.1](https://github.com/rust-lang/cfg-if/compare/v1.0.0...v1.0.1) - 2025-06-09 + +- Remove `compiler-builtins` from `rustc-dep-of-std` dependencies +- Remove redundant configuration from Cargo.toml +- More readable formatting and identifier names. ([#39](https://github.com/rust-lang/cfg-if/pull/39)) +- Add expanded example to readme ([#38](https://github.com/rust-lang/cfg-if/pull/38)) diff --git a/vendor/cfg-if/Cargo.lock b/vendor/cfg-if/Cargo.lock new file mode 100644 index 00000000000000..57166796745a7e --- /dev/null +++ b/vendor/cfg-if/Cargo.lock @@ -0,0 +1,16 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "cfg-if" +version = "1.0.4" +dependencies = [ + "rustc-std-workspace-core 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-std-workspace-core" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[metadata] +"checksum rustc-std-workspace-core 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "aa9c45b374136f52f2d6311062c7146bff20fec063c3f5d46a410bd937746955" diff --git a/vendor/cfg-if/Cargo.toml b/vendor/cfg-if/Cargo.toml new file mode 100644 index 00000000000000..450f7a2df1e06c --- /dev/null +++ b/vendor/cfg-if/Cargo.toml @@ -0,0 +1,47 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.32" +name = "cfg-if" +version = "1.0.4" +authors = ["Alex Crichton "] +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +A macro to ergonomically define an item depending on a large number of #[cfg] +parameters. Structured like an if-else chain, the first matching branch is the +item that gets emitted. +""" +readme = "README.md" +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/cfg-if" + +[features] +rustc-dep-of-std = ["core"] + +[lib] +name = "cfg_if" +path = "src/lib.rs" + +[[test]] +name = "xcrate" +path = "tests/xcrate.rs" + +[dependencies.core] +version = "1.0.0" +optional = true +package = "rustc-std-workspace-core" diff --git a/vendor/cfg-if/LICENSE-APACHE b/vendor/cfg-if/LICENSE-APACHE new file mode 100644 index 00000000000000..16fe87b06e802f --- /dev/null +++ b/vendor/cfg-if/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/cfg-if/LICENSE-MIT b/vendor/cfg-if/LICENSE-MIT new file mode 100644 index 00000000000000..39e0ed6602151f --- /dev/null +++ b/vendor/cfg-if/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 Alex Crichton + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/cfg-if/README.md b/vendor/cfg-if/README.md new file mode 100644 index 00000000000000..d174b6eda69c5d --- /dev/null +++ b/vendor/cfg-if/README.md @@ -0,0 +1,56 @@ +# cfg-if + +[Documentation](https://docs.rs/cfg-if) + +A macro to ergonomically define an item depending on a large number of #[cfg] +parameters. Structured like an if-else chain, the first matching branch is the +item that gets emitted. + +```toml +[dependencies] +cfg-if = "1.0" +``` + +## Example + +```rust +cfg_if::cfg_if! { + if #[cfg(unix)] { + fn foo() { /* unix specific functionality */ } + } else if #[cfg(target_pointer_width = "32")] { + fn foo() { /* non-unix, 32-bit functionality */ } + } else { + fn foo() { /* fallback implementation */ } + } +} + +fn main() { + foo(); +} +``` +The `cfg_if!` block above is expanded to: +```rust +#[cfg(unix)] +fn foo() { /* unix specific functionality */ } +#[cfg(all(target_pointer_width = "32", not(unix)))] +fn foo() { /* non-unix, 32-bit functionality */ } +#[cfg(not(any(unix, target_pointer_width = "32")))] +fn foo() { /* fallback implementation */ } +``` + +# License + +This project is licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + https://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or + https://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in `cfg-if` by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/vendor/cfg-if/src/lib.rs b/vendor/cfg-if/src/lib.rs new file mode 100644 index 00000000000000..2c7414eb81c1ea --- /dev/null +++ b/vendor/cfg-if/src/lib.rs @@ -0,0 +1,212 @@ +//! A macro for defining `#[cfg]` if-else statements. +//! +//! The macro provided by this crate, `cfg_if`, is similar to the `if/elif` C +//! preprocessor macro by allowing definition of a cascade of `#[cfg]` cases, +//! emitting the implementation which matches first. +//! +//! This allows you to conveniently provide a long list `#[cfg]`'d blocks of code +//! without having to rewrite each clause multiple times. +//! +//! # Example +//! +//! ``` +//! cfg_if::cfg_if! { +//! if #[cfg(unix)] { +//! fn foo() { /* unix specific functionality */ } +//! } else if #[cfg(target_pointer_width = "32")] { +//! fn foo() { /* non-unix, 32-bit functionality */ } +//! } else { +//! fn foo() { /* fallback implementation */ } +//! } +//! } +//! +//! # fn main() {} +//! ``` + +#![no_std] +#![doc(html_root_url = "https://docs.rs/cfg-if")] +#![deny(missing_docs)] +#![cfg_attr(test, allow(unexpected_cfgs))] // we test with features that do not exist + +/// The main macro provided by this crate. See crate documentation for more +/// information. +#[macro_export] +macro_rules! cfg_if { + ( + if #[cfg( $($i_meta:tt)+ )] { $( $i_tokens:tt )* } + $( + else if #[cfg( $($ei_meta:tt)+ )] { $( $ei_tokens:tt )* } + )* + $( + else { $( $e_tokens:tt )* } + )? + ) => { + $crate::cfg_if! { + @__items () ; + (( $($i_meta)+ ) ( $( $i_tokens )* )), + $( + (( $($ei_meta)+ ) ( $( $ei_tokens )* )), + )* + $( + (() ( $( $e_tokens )* )), + )? + } + }; + + // Internal and recursive macro to emit all the items + // + // Collects all the previous cfgs in a list at the beginning, so they can be + // negated. After the semicolon are all the remaining items. + (@__items ( $( ($($_:tt)*) , )* ) ; ) => {}; + ( + @__items ( $( ($($no:tt)+) , )* ) ; + (( $( $($yes:tt)+ )? ) ( $( $tokens:tt )* )), + $( $rest:tt , )* + ) => { + // Emit all items within one block, applying an appropriate #[cfg]. The + // #[cfg] will require all `$yes` matchers specified and must also negate + // all previous matchers. + #[cfg(all( + $( $($yes)+ , )? + not(any( $( $($no)+ ),* )) + ))] + // Subtle: You might think we could put `$( $tokens )*` here. But if + // that contains multiple items then the `#[cfg(all(..))]` above would + // only apply to the first one. By wrapping `$( $tokens )*` in this + // macro call, we temporarily group the items into a single thing (the + // macro call) that will be included/excluded by the `#[cfg(all(..))]` + // as appropriate. If the `#[cfg(all(..))]` succeeds, the macro call + // will be included, and then evaluated, producing `$( $tokens )*`. See + // also the "issue #90" test below. + $crate::cfg_if! { @__temp_group $( $tokens )* } + + // Recurse to emit all other items in `$rest`, and when we do so add all + // our `$yes` matchers to the list of `$no` matchers as future emissions + // will have to negate everything we just matched as well. + $crate::cfg_if! { + @__items ( $( ($($no)+) , )* $( ($($yes)+) , )? ) ; + $( $rest , )* + } + }; + + // See the "Subtle" comment above. + (@__temp_group $( $tokens:tt )* ) => { + $( $tokens )* + }; +} + +#[cfg(test)] +mod tests { + cfg_if! { + if #[cfg(test)] { + use core::option::Option as Option2; + fn works1() -> Option2 { Some(1) } + } else { + fn works1() -> Option { None } + } + } + + cfg_if! { + if #[cfg(foo)] { + fn works2() -> bool { false } + } else if #[cfg(test)] { + fn works2() -> bool { true } + } else { + fn works2() -> bool { false } + } + } + + cfg_if! { + if #[cfg(foo)] { + fn works3() -> bool { false } + } else { + fn works3() -> bool { true } + } + } + + cfg_if! { + if #[cfg(test)] { + use core::option::Option as Option3; + fn works4() -> Option3 { Some(1) } + } + } + + cfg_if! { + if #[cfg(foo)] { + fn works5() -> bool { false } + } else if #[cfg(test)] { + fn works5() -> bool { true } + } + } + + // In issue #90 there was a bug that caused only the first item within a + // block to be annotated with the produced `#[cfg(...)]`. In this example, + // it meant that the first `type _B` wasn't being omitted as it should have + // been, which meant we had two `type _B`s, which caused an error. See also + // the "Subtle" comment above. + cfg_if!( + if #[cfg(target_os = "no-such-operating-system-good-sir!")] { + type _A = usize; + type _B = usize; + } else { + type _A = i32; + type _B = i32; + } + ); + + #[cfg(not(msrv_test))] + cfg_if! { + if #[cfg(false)] { + fn works6() -> bool { false } + } else if #[cfg(true)] { + fn works6() -> bool { true } + } else if #[cfg(false)] { + fn works6() -> bool { false } + } + } + + #[test] + fn it_works() { + assert!(works1().is_some()); + assert!(works2()); + assert!(works3()); + assert!(works4().is_some()); + assert!(works5()); + #[cfg(not(msrv_test))] + assert!(works6()); + } + + #[test] + #[allow(clippy::assertions_on_constants)] + fn test_usage_within_a_function() { + cfg_if! { + if #[cfg(debug_assertions)] { + // we want to put more than one thing here to make sure that they + // all get configured properly. + assert!(cfg!(debug_assertions)); + assert_eq!(4, 2 + 2); + } else { + assert!(works1().is_some()); + assert_eq!(10, 5 + 5); + } + } + } + + #[allow(dead_code)] + trait Trait { + fn blah(&self); + } + + #[allow(dead_code)] + struct Struct; + + impl Trait for Struct { + cfg_if! { + if #[cfg(feature = "blah")] { + fn blah(&self) { unimplemented!(); } + } else { + fn blah(&self) { unimplemented!(); } + } + } + } +} diff --git a/vendor/cfg-if/tests/xcrate.rs b/vendor/cfg-if/tests/xcrate.rs new file mode 100644 index 00000000000000..454e90f0dc891a --- /dev/null +++ b/vendor/cfg-if/tests/xcrate.rs @@ -0,0 +1,16 @@ +#![allow(unexpected_cfgs)] // `foo` doesn't exist + +cfg_if::cfg_if! { + if #[cfg(foo)] { + fn works() -> bool { false } + } else if #[cfg(test)] { + fn works() -> bool { true } + } else { + fn works() -> bool { false } + } +} + +#[test] +fn smoke() { + assert!(works()); +} diff --git a/vendor/clang-sys/.cargo-checksum.json b/vendor/clang-sys/.cargo-checksum.json new file mode 100644 index 00000000000000..623f070c77d279 --- /dev/null +++ b/vendor/clang-sys/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"a2221882ba4c52abe2344a74a26f535df2641864f5a49435975f23ae2fcd5b3a",".github/workflows/ci.yml":"b5dc986d9f7ed68b8f3022a7f9e71739d7f297b4b6719c2913c1c77e3b9d93c5",".github/workflows/ssh.yml":"d1b12ff03ea5cd5814d5d2c0563d5291e9e847de13be7bedaf411c7f97f20953","CHANGELOG.md":"9db56336c2fd1dddbacc861f42b697f218a9dccb663aaa1ad042cfe940a0c232","Cargo.toml":"db6730e270afa1f936b6f14264be0b0aaa506b88d91ab4805cf270595f3b568b","Cargo.toml.orig":"c6241039bc28f47561154b404d3fe28fe4b582977c8e6ca9288305171a7968f8","LICENSE.txt":"3ddf9be5c28fe27dad143a5dc76eea25222ad1dd68934a047064e56ed2fa40c5","README.md":"ca106237bdacd8aee43af3bc2ad94771b1c1fe029a7d6f622989c00d5a74f4eb","build.rs":"321ac62c88932a3831be9c96f526a21f65ea22df01639946bd0033d1bcf8900e","build/common.rs":"c827ffc2761c4b96952334e35ff443198adfc86fbe2822c309dfe5ea1bcc8cc0","build/dynamic.rs":"c28adab4ea893d12f47d8b229c38a134a6553c654a1d243f37f7f03ed82e5723","build/macros.rs":"41eef7020d4c28ce70c71036009be4be5844572b26e32b840f671b924174475e","build/static.rs":"51316c6274c15f939fff637499163a7312c97d95cea6959825f1ca52af35a726","clippy.toml":"fcf54943ba571514b244cc098ce08671b4117167733e8107e799d533a12a2195","src/lib.rs":"dc1707cf08d65b2bf8e0b9f836f5c2e74af399ea10476a36238056ad1dcc926b","src/link.rs":"d12eda4e3f76f00168615b4cba67b0b1fff8e6dbb06df80302561baa9472eec3","src/support.rs":"4f5f2e76f9352b6b02a1519857de773b6ab064c7bdfab15bf63d4f712f0c7b61","tests/build.rs":"b9bc3b4af58ab815e9ef56538b58448f19ede42591078ef02c6ff9f946456315","tests/header.h":"1b15a686d1c06561960045a26c25a34d840f26c8246f2f5e630f993b69c7492c","tests/lib.rs":"7ddd85162a682328b4eea499526a14f4a841c10ac673a5871f02050b428231d4"},"package":"0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4"} \ No newline at end of file diff --git a/vendor/clang-sys/.cargo_vcs_info.json b/vendor/clang-sys/.cargo_vcs_info.json new file mode 100644 index 00000000000000..deb83a612063b5 --- /dev/null +++ b/vendor/clang-sys/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "cf3874b2480b9ca12f367a54a4835dd2920847de" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/clang-sys/.github/workflows/ci.yml b/vendor/clang-sys/.github/workflows/ci.yml new file mode 100644 index 00000000000000..08c2cba46f2a76 --- /dev/null +++ b/vendor/clang-sys/.github/workflows/ci.yml @@ -0,0 +1,56 @@ +name: CI + +on: + push: + branches: + - master + pull_request: + branches: + - master + +jobs: + test: + name: Test + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [macos-latest, ubuntu-latest, windows-latest] + clang: [["14.0", "clang_14_0"]] + rust: ["1.60.0"] + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + # LLVM and Clang + - name: Install LLVM and Clang + uses: KyleMayes/install-llvm-action@v2.0.3 + with: + version: ${{ matrix.clang[0] }} + directory: ${{ runner.temp }}/llvm-${{ matrix.clang[0] }} + # Rust + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.rust }} + # Test + - name: Cargo Test (Dynamic) + run: cargo test --verbose --features ${{ matrix.clang[1] }} -- --nocapture + - name: Cargo Test (Runtime) + run: cargo test --verbose --features "${{ matrix.clang[1] }} runtime" -- --nocapture + test-bindgen: + name: Test (bindgen) + runs-on: ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + # LLVM and Clang + - name: Install LLVM and Clang + uses: KyleMayes/install-llvm-action@v2.0.3 + with: + version: 14 + directory: ${{ runner.temp }}/llvm + # Rust + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + # Test + - name: Cargo Run (bindgen-test) + run: cargo run --manifest-path bindgen-test/Cargo.toml diff --git a/vendor/clang-sys/.github/workflows/ssh.yml b/vendor/clang-sys/.github/workflows/ssh.yml new file mode 100644 index 00000000000000..188fa3d349fed4 --- /dev/null +++ b/vendor/clang-sys/.github/workflows/ssh.yml @@ -0,0 +1,40 @@ +name: SSH + +on: + workflow_dispatch: + inputs: + os: + description: "Operating System" + required: true + default: "ubuntu-latest" + +jobs: + ssh: + name: SSH + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [macos-latest, ubuntu-latest, windows-latest] + clang: [["13.0", "clang_13_0"]] + rust: ["1.60.0"] + steps: + - name: Checkout Repository + uses: actions/checkout@v2 + if: github.event.inputs.os == matrix.os + # LLVM and Clang + - name: Install LLVM and Clang + uses: KyleMayes/install-llvm-action@v2.0.3 + if: github.event.inputs.os == matrix.os + with: + version: ${{ matrix.clang[0] }} + directory: ${{ runner.temp }}/llvm-${{ matrix.clang[0] }} + # Rust + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + if: github.event.inputs.os == matrix.os + with: + toolchain: ${{ matrix.rust }} + # SSH + - name: Enable SSH + uses: mxschmitt/action-tmate@v3 + if: github.event.inputs.os == matrix.os diff --git a/vendor/clang-sys/CHANGELOG.md b/vendor/clang-sys/CHANGELOG.md new file mode 100644 index 00000000000000..dc6c75e02dfd30 --- /dev/null +++ b/vendor/clang-sys/CHANGELOG.md @@ -0,0 +1,552 @@ +## [1.8.1] - 2024-05-28 + +### Added +- Added support for `clang` 18.0.x + +### Fixed +- Improve DLL search on Windows to take target architecture into account (e.g., ARM64 vs x86-64) +- Improved detection of `libclang` installed with Visual Studio on Windows + +## [1.8.0] - 2024-05-26 + +### Changed +- Bumped minimum supported Rust version (MSRV) to 1.60.0 +- Added error logging when `CLANG_PATH` set but it isn't a full path to an executable +- Removed reference to `libclang` 3.5 in error message for attempting to call an unsupported function + +### Added +- Added `libcpp` Cargo feature which enables linking to `libc++` instead of `libstdc++` when linking to `libclang` statically on Linux or Haiku + +### Fixed +- Fixed handling of paths that contain characters that have special meaning in +glob patterns (e.g., `[` or `]`) +- Fixed `Clang::find` to support both the `-target` and `--target` arguments +when using target-prefixed `clang` binaries + +## [1.7.0] - 2023-12-31 + +### Added +- Added support for `clang` 17.0.x + +## [1.6.1] - 2023-03-29 + +### Fixed +- Improved error message when calling a `libclang` function that is not supported by the loaded `libclang` instance (https://github.com/rust-lang/rust-bindgen/issues/2446) + +## [1.6.0] - 2023-02-18 + +### Changed +- MinGW directories are not searched for `libclang` instances on Windows when +compiling for an MSVC target +- Bumped minimum supported Rust version (MSRV) to 1.51.0 +- Changed Windows search directory preferences (`libclang` instances from +Visual Studio installs are now the lowest priority rather than the second +highest) + +## ~~[1.5.1] - 2023-02-05~~ (YANKED) + +### Changed +- MinGW directories are not searched for `libclang` instances on Windows when +compiling for an MSVC target + +## ~~[1.5.0] - 2023-02-05~~ (YANKED) + +### Changed +- Bumped minimum supported Rust version (MSRV) to 1.51.0 +- Changed Windows search directory preferences (`libclang` instances from +Visual Studio installs are now the lowest priority rather than the second +highest) + +### Added +- Added additional support for `clang` 16.0.x + +## [1.4.0] - 2022-09-22 + +### Changed +- The value of an `EntityKind` enum variant +(`EntityKind::CXCursor_TranslationUnit`) has been updated for Clang 15.0 and +later to match the +[breaking change made in `libclang`](https://github.com/llvm/llvm-project/commit/bb83f8e70bd1d56152f02307adacd718cd67e312#diff-674613a0e47f4e66cc19061e28e3296d39be2d124dceefb68237b30b8e241e7c) + +### Added +- Added support for `clang` 16.0.x +- Added support for `clang` 15.0.x +- Added support for `clang` 14.0.x + +## [1.3.3] - 2022-05-28 + +### Fixed +- Fixed `Clang::find` to check that `CLANG_PATH` is an executable file before +selecting it + +## [1.3.2] - 2022-05-18 + +### Added +- Added support for illumos and derivatives + +## [1.3.1] - 2022-02-03 + +### Added +- Added missing `clang_getToken` function + +## [1.3.0] - 2021-10-31 + +### Added +- Added support for `clang` 13.0.x +- Added support for `clang` 12.0.x +- Added support for the Haiku operating system + +## [1.2.2] - 2021-09-02 + +### Fixed +- Fixed handling of paths that contain characters that have special meaning in +glob patterns (e.g., `[` or `]`) + +## [1.2.1] - 2021-08-24 + +### Changed +- Updated build script to check the install location used by the +[Scoop](https://scoop.sh/) command-line installer on Windows + +### Fixed +- Updated build script to support environments where the `PATH` environment +variable is not set + +## [1.2.0] - 2021-04-08 + +### Changed +- Changed `Clang::find` to prefer target-prefixed binaries when a `-target` +argument is provided (e.g., if the arguments `-target` and +`x86_64-unknown-linux-gnu` are provided, a target-prefixed Clang executable +such as `x86_64-unknown-linux-gnu-clang` will be preferred over a non-target +prefixed Clang executable) + +### Fixed +- Fixed build script to split paths in environment variables (e.g., +`LD_LIBRARY_PATH`) using the appropriate separator for the platform (previously +`:` was used as the separator but some platforms such as Windows use `;`) + +## [1.1.1] - 2021-02-19 + +### Changed +- Bumped `libloading` version to `0.7` + +## [1.1.0] - 2021-02-09 + +### Changed +- Added Visual Studio LLVM component directory to search paths on Windows +([#121](https://github.com/KyleMayes/clang-sys/issues/121)) + +### Added +- Added support for `clang` 11.0.x + +## [1.0.3] - 2020-11-19 + +### Fixed +- Fixed `Clang::find` panicking when `llvm-config` or `xcode-build` don't output anything to `stdout` + +## [1.0.2] - 2020-11-17 + +### Fixed +- Fixed `Clang::find` to properly search directories returned by the +`llvm-config --bindir` and `xcodebuild -find clang` commands +- Improved version selection algorithm in the case where there are multiple +instances of `libclang` with the highest version found; previously the lowest +priority instance would be selected instead of the highest priority instance +(e.g., the versions found by searching the fallback directories were preferred +over the versions found by searching the `llvm-config --prefix` directory) + +## [1.0.1] - 2020-10-01 + +### Changed +- Improved panic error message when calling an unloaded function + +## [1.0.0] - 2020-07-14 + +### Changed +- Bumped `libloading` version to `0.6.0` +- Updated build script to not print warnings about failures to execute +`llvm-config` and `xcode-select` unless an instance of `libclang` is not found + +### Added +- Added support for `clang` 10.0.x + +### Removed +- Removed `gte_clang_*` Cargo features (these were an implementation detail) + +## [0.29.3] - 2020-03-31 + +### Added +- Added ability to determine version of runtime-linked instance of `libclang` + +## [0.29.2] - 2020-03-09 + +### Added +- Revert unnecessary increase of minimum version of `libc` and `libloading` + +## [0.29.2] - 2020-03-09 + +### Added +- Revert unnecessary increase of minimum version of `libc` and `libloading` + +## [0.29.1] - 2020-03-06 + +### Added +- Added support for finding instances of `libclang` matching `libclang-*.so.*` + +## [0.29.0] - 2020-02-17 + +### Changed +- Wrapped function pointer fields in `Option` in the `CXCursorAndRangeVisitor` +and `IndexerCallbacks` structs (to permit nullability and to avoid undefined +behavior caused by `Default` implementations for these structs which returns a +zeroed value) + +### Added +- Added support for `clang` 9.0.x +- Added missing `CXCallingConv_AArch64VectorCall` variant to `CXCallingConv` enum +- Added missing `clang_CompileCommand_getNumMappedSources` function + +## [0.28.1] - 2019-07-28 + +### Changed +- Bumped `glob` version to `0.3.0` +- Improved error message when an invocation of an executable is not successful +- Allowed `LIBCLANG_PATH` to refer to a specific `libclang` instance (e.g., + `/usr/local/lib/libclang.so.10`) + +### Fixed +- Fixed + [`libclang-cpp`](https://github.com/llvm-mirror/clang/commit/90d6722bdcbc2af52306f7e948c556ad6185ac48) + being linked instead of `libclang` + +## [0.28.0] - 2019-02-17 + +### Changed +- Changed `llvm-config` to be first search candidate on macOS + +### Added +- Added support for `clang` 8.0.x + +### Removed +- Removed `assert-minimum` feature +- Removed version detection for libraries without versions embedded in the filename + +## [0.27.0] - 2019-01-10 + +### Changed +- Added version detection for libraries without versions embedded in the filename + +### Added +- Added `assert-minimum` feature (see `README.md` for details) + +## [0.26.4] - 2018-12-29 + +### Changed +- Added shared library path to `SharedLibrary` struct + +## [0.26.3] - 2018-11-14 + +### Changed +- Disable default features of `libc` dependency + +## [0.26.2] - 2018-11-03 + +### Fixed +- Fixed dynamic linking on macOS + +## [0.26.1] - 2018-10-10 + +### Fixed +- Fixed support for finding libraries in `bin` directories on Windows + +## [0.26.0] - 2018-10-07 + +### Changed +- Added support for finding libraries with version suffixes on Linux when using runtime linking (e.g., `libclang.so.1`) + +## [0.25.0] - 2018-10-06 + +### Changed +- Added support for versioned libraries on BSDs + +## [0.24.0] - 2018-09-15 + +### Changed +- Reworked finding of libraries (see `README.md` for details) + +### Added +- Added support for `clang` 7.0.x + +## [0.23.0] - 2018-06-16 + +### Changed +- Changed `Clang::find` to skip dynamic libraries for an incorrect architecture on Windows + +## [0.22.0] - 2018-03-11 + +### Added +- Added support for `clang` 6.0.x +- Bumped `libc` version to `0.2.39` +- Bumped `libloading` version to `0.5.0` + +## [0.21.2] - 2018-02-17 + +### Changed +- Added original errors to error messages +- Added support for searching for libraries in `LD_LIBRARY_PATH` directories + +## [0.21.1] - 2017-11-24 + +### Changed +- Improved finding of versioned libraries (e.g., `libclang-3.9.so`) + +### Fixed +* Fixed compilation failures on the beta and nightly channels caused by a [compiler bug](https://github.com/KyleMayes/clang-sys/pull/69) + +## [0.21.0] - 2017-10-11 + +### Changed +* Replaced `bitflags` usage with constants which avoids crashes on 32-bit Linux platforms + +## [0.20.1] - 2017-09-16 + +### Fixed +- Fixed static linking + +## [0.20.0] - 2017-09-14 + +### Added +- Added support for `clang` 5.0.x +- Added `clang` as a link target of this package +- Added dummy implementations of `is_loaded` for builds with the `static` Cargo feature enabled + +## [0.19.0] - 2017-07-02 + +### Changed +- Bumped `bitflags` version to `0.9.1` +- Added `args` parameter to `Clang::new` function which passes arguments to the Clang executable + +## [0.18.0] - 2017-05-16 + +### Changed +- Improved finding of versioned libraries (e.g., `libclang.so.3.9`) + +## [0.17.0] - 2017-05-08 + +### Changed +- Changed storage type of include search paths from `Vec` to `Option>` + +## [0.16.0] - 2017-05-02 + +### Changed +- Bumped `libloading` version to `0.4.0` + +## [0.15.2] - 2017-04-28 + +### Fixed +- Fixed finding of `libclang.so.1` on Linux + +## [0.15.1] - 2017-03-29 + +### Fixed +- Fixed static linking when libraries are in [different directories](https://github.com/KyleMayes/clang-sys/issues/50) + +## [0.15.0] - 2017-03-13 + +### Added +- Added support for `clang` 4.0.x + +### Changed +- Changed functions in the `Functions` struct to be `unsafe` (`runtime` feature only) +- Changed `Clang::find` method to ignore directories and non-executable files +- Changed `Clang::find` to skip dynamic libraries for an incorrect architecture on FreeBSD and Linux +- Bumped `bitflags` version to `0.7.0` + +## [0.14.0] - 2017-01-30 + +### Changed +- Changed all enum types from tuple structs to raw integers to avoid + [segmentation faults](https://github.com/rust-lang/rust/issues/39394) on some platforms + +## [0.13.0] - 2017-01-29 + +### Changed +- Changed all opaque pointers types from tuple structs to raw pointers to avoid + [segmentation faults](https://github.com/rust-lang/rust/issues/39394) on some platforms + +## [0.12.0] - 2016-12-13 + +### Changed +- Altered the runtime linking API to allow for testing the presence of functions + +## [0.11.1] - 2016-12-07 + +### Added +- Added support for linking to Clang on Windows from unofficial LLVM sources such as MSYS and MinGW + +## [0.11.0] - 2016-10-07 + +### Changed +- Changed all enums from Rust enums to typed constants to avoid + [undefined behavior](https://github.com/KyleMayes/clang-sys/issues/42) + +## [0.10.1] - 2016-08-21 + +### Changed +- Changed static linking on FreeBSD and macOS to link against `libc++` instead of `libstd++` + +## [0.10.0] - 2016-08-01 + +### Changed +- Added `runtime` Cargo feature that links to `libclang` shared library at runtime +- Added `from_raw` method to `CXTypeLayoutError` enum +- Added implementations of `Deref` for opaque FFI structs +- Changed `Default` implementations for structs to zero out the struct + +## [0.9.0] - 2016-07-21 + +### Added +- Added documentation bindings + +## [0.8.1] - 2016-07-20 + +### Changed +- Added `CLANG_PATH` environment variable for providing a path to `clang` executable +- Added usage of `llvm-config` to search for `clang` +- Added usage of `xcodebuild` to search for `clang` on macOS + +## [0.8.0] - 2016-07-18 + +### Added +- Added support for `clang` 3.9.x + +### Changed +- Bumped `libc` version to `0.2.14` + +### Fixed +- Fixed `LIBCLANG_PATH` usage on Windows to search both the `bin` and `lib` directories +- Fixed search path parsing on macOS +- Fixed search path parsing on Windows +- Fixed default search path ordering on macOS + +## [0.7.2] - 2016-06-17 + +### Fixed +- Fixed finding of `clang` executables when system has executables matching `clang-*` + (e.g., `clang-format`) + +## [0.7.1] - 2016-06-10 + +### Changed +- Bumped `libc` version to `0.2.12` + +### Fixed +- Fixed finding of `clang` executables suffixed by their version (e.g., `clang-3.5`) + +## [0.7.0] - 2016-05-31 + +### Changed +- Changed `Clang` struct `version` field type to `Option` + +## [0.6.0] - 2016-05-26 + +### Added +- Added `support` module + +### Fixed +- Fixed `libclang` linking on FreeBSD +- Fixed `libclang` linking on Windows with the MSVC toolchain +- Improved `libclang` static linking + +## [0.5.4] - 20160-5-19 + +### Changed +- Added implementations of `Default` for FFI structs + +## [0.5.3] - 2016-05-17 + +### Changed +- Bumped `bitflags` version to `0.7.0` + +## [0.5.2] - 2016-05-12 + +### Fixed +- Fixed `libclang` static linking + +## [0.5.1] - 2016-05-10 + +### Fixed +- Fixed `libclang` linking on macOS +- Fixed `libclang` linking on Windows + +## [0.5.0] - 2016-05-10 + +### Removed +- Removed `rustc_version` dependency +- Removed support for `LIBCLANG_STATIC` environment variable + +### Changed +- Bumped `bitflags` version to `0.6.0` +- Bumped `libc` version to `0.2.11` +- Improved `libclang` search path +- Improved `libclang` static linking + +## [0.4.2] - 2016-04-20 + +### Changed +- Bumped `libc` version to `0.2.10` + +## [0.4.1] - 2016-04-02 + +### Changed +- Bumped `libc` version to `0.2.9` +- Bumped `rustc_version` version to `0.1.7` + +## [0.4.0] - 2016-03-28 + +### Removed +- Removed support for `clang` 3.4.x + +## [0.3.1] - 2016-03-21 + +### Added +- Added support for finding `libclang` + +## [0.3.0] - 2016-03-16 + +### Removed +- Removed build system types and functions + +### Added +- Added support for `clang` 3.4.x + +### Changed +- Bumped `bitflags` version to `0.5.0` +- Bumped `libc` version to `0.2.8` + +## [0.2.1] - 2016-02-13 + +### Changed +- Simplified internal usage of conditional compilation +- Bumped `bitflags` version to `0.4.0` +- Bumped `libc` version to `0.2.7` +- Bumped `rustc_version` version to `0.1.6` + +## [0.2.0] - 2016-02-12 + +### Added +- Added support for `clang` 3.8.x + +## [0.1.2] - 2015-12-29 + +### Added +- Added derivations of `Debug` for FFI structs + +## [0.1.1] - 2015-12-26 + +### Added +- Added derivations of `PartialOrd` and `Ord` for FFI enums + +## [0.1.0] - 2015-12-22 +- Initial release diff --git a/vendor/clang-sys/Cargo.toml b/vendor/clang-sys/Cargo.toml new file mode 100644 index 00000000000000..ae9a8042b9cd0b --- /dev/null +++ b/vendor/clang-sys/Cargo.toml @@ -0,0 +1,77 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +name = "clang-sys" +version = "1.8.1" +authors = ["Kyle Mayes "] +build = "build.rs" +links = "clang" +description = "Rust bindings for libclang." +documentation = "https://docs.rs/clang-sys" +readme = "README.md" +license = "Apache-2.0" +repository = "https://github.com/KyleMayes/clang-sys" + +[package.metadata.docs.rs] +features = [ + "clang_18_0", + "runtime", +] + +[dependencies.glob] +version = "0.3" + +[dependencies.libc] +version = "0.2.39" +default-features = false + +[dependencies.libloading] +version = "0.8" +optional = true + +[dev-dependencies.glob] +version = "0.3" + +[dev-dependencies.lazy_static] +version = "1" + +[dev-dependencies.tempfile] +version = ">=3.0.0, <3.7.0" + +[build-dependencies.glob] +version = "0.3" + +[features] +clang_10_0 = ["clang_9_0"] +clang_11_0 = ["clang_10_0"] +clang_12_0 = ["clang_11_0"] +clang_13_0 = ["clang_12_0"] +clang_14_0 = ["clang_13_0"] +clang_15_0 = ["clang_14_0"] +clang_16_0 = ["clang_15_0"] +clang_17_0 = ["clang_16_0"] +clang_18_0 = ["clang_17_0"] +clang_3_5 = [] +clang_3_6 = ["clang_3_5"] +clang_3_7 = ["clang_3_6"] +clang_3_8 = ["clang_3_7"] +clang_3_9 = ["clang_3_8"] +clang_4_0 = ["clang_3_9"] +clang_5_0 = ["clang_4_0"] +clang_6_0 = ["clang_5_0"] +clang_7_0 = ["clang_6_0"] +clang_8_0 = ["clang_7_0"] +clang_9_0 = ["clang_8_0"] +libcpp = [] +runtime = ["libloading"] +static = [] diff --git a/vendor/clang-sys/LICENSE.txt b/vendor/clang-sys/LICENSE.txt new file mode 100644 index 00000000000000..75b52484ea471f --- /dev/null +++ b/vendor/clang-sys/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/clang-sys/README.md b/vendor/clang-sys/README.md new file mode 100644 index 00000000000000..be86f940073764 --- /dev/null +++ b/vendor/clang-sys/README.md @@ -0,0 +1,116 @@ +# clang-sys + +[![Crate](https://img.shields.io/crates/v/clang-sys.svg)](https://crates.io/crates/clang-sys) +[![Documentation](https://docs.rs/clang-sys/badge.svg)](https://docs.rs/clang-sys) +[![CI](https://img.shields.io/github/actions/workflow/status/KyleMayes/clang-sys/ci.yml?branch=master)](https://github.com/KyleMayes/clang-sys/actions?query=workflow%3ACI) +![MSRV](https://img.shields.io/badge/MSRV-1.60.0-blue) + +Rust bindings for `libclang`. + +If you are interested in a somewhat idiomatic Rust wrapper for these bindings, see [`clang-rs`](https://github.com/KyleMayes/clang-rs). + +Released under the Apache License 2.0. + +## [Documentation](https://docs.rs/clang-sys) + +Note that the documentation on https://docs.rs for this crate assumes usage of the `runtime` Cargo feature as well as the Cargo feature for the latest supported version of `libclang` (e.g., `clang_16_0`), neither of which are enabled by default. + +Due to the usage of the `runtime` Cargo feature, this documentation will contain some additional types and functions to manage a dynamically loaded `libclang` instance at runtime. + +Due to the usage of the Cargo feature for the latest supported version of `libclang`, this documentation will contain constants and functions that are not available in the oldest supported version of `libclang` (3.5). All of these types and functions have a documentation comment which specifies the minimum `libclang` version required to use the item. + +## Supported Versions + +To target a version of `libclang`, enable a Cargo features such as one of the following: + +* `clang_3_5` - requires `libclang` 3.5 or later +* `clang_3_6` - requires `libclang` 3.6 or later +* etc... +* `clang_17_0` - requires `libclang` 17.0 or later +* `clang_18_0` - requires `libclang` 18.0 or later + +If you do not enable one of these features, the API provided by `libclang` 3.5 will be available by default. + +**Note:** If you are using Clang 15.0 or later, you should enable the `clang_15_0` feature or a more recent version feature. Clang 15.0 introduced [a breaking change to the `EntityKind` enum](https://github.com/llvm/llvm-project/commit/bb83f8e70bd1d56152f02307adacd718cd67e312#diff-674613a0e47f4e66cc19061e28e3296d39be2d124dceefb68237b30b8e241e7c) which resulted in a mismatch between the values returned by `libclang` and the values for `EntityKind` defined by this crate in previous versions. + +## Dependencies + +By default, this crate will attempt to link to `libclang` dynamically. In this case, this crate depends on the `libclang` shared library (`libclang.so` on Linux, `libclang.dylib` on macOS, `libclang.dll` on Windows). If you want to link to `libclang` statically instead, enable the `static` Cargo feature. In this case, this crate depends on the LLVM and Clang static libraries. If you don't want to link to `libclang` at compiletime but instead want to load it at runtime, enable the `runtime` Cargo feature. + +These libraries can be either be installed as a part of Clang or downloaded [here](http://llvm.org/releases/download.html). + +**Note:** The downloads for LLVM and Clang 3.8 and later do not include the `libclang.a` static library. This means you cannot link to any of these versions of `libclang` statically unless you build it from source. + +### Versioned Dependencies + +This crate supports finding versioned instances of `libclang.so` (e.g.,`libclang-3.9.so`). In the case where there are multiple instances to choose from, this crate will prefer instances with higher versions. For example, the following instances of `libclang.so` are listed in descending order of preference: + +1. `libclang-4.0.so` +2. `libclang-4.so` +3. `libclang-3.9.so` +4. `libclang-3.so` +5. `libclang.so` + +**Note:** On BSD distributions, versioned instances of `libclang.so` matching the pattern `libclang.so.*` (e.g., `libclang.so.7.0`) are also included. + +**Note:** On Linux distributions when the `runtime` features is enabled, versioned instances of `libclang.so` matching the pattern `libclang.so.*` (e.g., `libclang.so.1`) are also included. + +## Environment Variables + +The following environment variables, if set, are used by this crate to find the required libraries and executables: + +* `LLVM_CONFIG_PATH` **(compiletime)** - provides a full path to an `llvm-config` executable (including the executable itself [i.e., `/usr/local/bin/llvm-config-8.0`]) +* `LIBCLANG_PATH` **(compiletime)** - provides a path to a directory containing a `libclang` shared library or a full path to a specific `libclang` shared library +* `LIBCLANG_STATIC_PATH` **(compiletime)** - provides a path to a directory containing LLVM and Clang static libraries +* `CLANG_PATH` **(runtime)** - provides a path to a `clang` executable + +## Linking + +### Dynamic + +`libclang` shared libraries will be searched for in the following directories: + +* the directory provided by the `LIBCLANG_PATH` environment variable +* the `bin` and `lib` directories in the directory provided by `llvm-config --libdir` +* the directories provided by `LD_LIBRARY_PATH` environment variable +* a list of likely directories for the target platform (e.g., `/usr/local/lib` on Linux) +* **macOS only:** the toolchain directory in the directory provided by `xcode-select --print-path` + +On Linux, running an executable that has been dynamically linked to `libclang` may require you to add a path to `libclang.so` to the `LD_LIBRARY_PATH` environment variable. The same is true on OS X, except the `DYLD_LIBRARY_PATH` environment variable is used instead. + +On Windows, running an executable that has been dynamically linked to `libclang` requires that `libclang.dll` can be found by the executable at runtime. See [here](https://msdn.microsoft.com/en-us/library/7d83bc18.aspx) for more information. + +### Static + +The availability of `llvm-config` is not optional for static linking. Ensure that an instance of this executable can be found on your system's path or set the `LLVM_CONFIG_PATH` environment variable. The required LLVM and Clang static libraries will be searched for in the same way as shared libraries are searched for, except the `LIBCLANG_STATIC_PATH` environment variable is used in place of the `LIBCLANG_PATH` environment variable. + +**Note:** The `libcpp` Cargo feature can be used to enable linking to `libc++` instead of `libstd++` when linking to `libclang` statically on Linux or Haiku. + +#### Static Library Availability + +Linking to `libclang` statically on *nix systems requires that the `libclang.a` static library be available. +This library is usually *not* included in most distributions of LLVM and Clang (e.g., `libclang-dev` on Debian-based systems). +If you need to link to `libclang` statically then most likely the only consistent way to get your hands on `libclang.a` is to build it yourself. + +Here's an example of building the required static libraries and using them with `clang-sys`: + +```text +git clone git@github.com:llvm/llvm-project.git +cd llvm-project + +cmake -S llvm -B build -G Ninja -DLLVM_ENABLE_PROJECTS=clang -DLIBCLANG_BUILD_STATIC=ON +ninja -C build + +cd .. +git clone git@github.com:KyleMayes/clang-sys.git +cd clang-sys + +LLVM_CONFIG_PATH=../llvm-project/build/bin/llvm-config cargo test --features static +``` + +Linking to `libclang` statically requires linking a large number of big static libraries. +Using [`rust-lld` as a linker](https://blog.rust-lang.org/2024/05/17/enabling-rust-lld-on-linux.html) can greatly reduce linking times. + +### Runtime + +The `clang_sys::load` function is used to load a `libclang` shared library for use in the thread in which it is called. The `clang_sys::unload` function will unload the `libclang` shared library. `clang_sys::load` searches for a `libclang` shared library in the same way one is searched for when linking to `libclang` dynamically at compiletime. diff --git a/vendor/clang-sys/build.rs b/vendor/clang-sys/build.rs new file mode 100644 index 00000000000000..4155b9781ec94f --- /dev/null +++ b/vendor/clang-sys/build.rs @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: Apache-2.0 + +//! Finds `libclang` static or shared libraries and links to them. +//! +//! # Environment Variables +//! +//! This build script can make use of several environment variables to help it +//! find the required static or shared libraries. +//! +//! * `LLVM_CONFIG_PATH` - provides a path to an `llvm-config` executable +//! * `LIBCLANG_PATH` - provides a path to a directory containing a `libclang` +//! shared library or a path to a specific `libclang` shared library +//! * `LIBCLANG_STATIC_PATH` - provides a path to a directory containing LLVM +//! and Clang static libraries + +#![allow(unused_attributes)] + +use std::path::Path; + +#[macro_use] +#[path = "build/macros.rs"] +pub mod macros; + +#[path = "build/common.rs"] +pub mod common; +#[path = "build/dynamic.rs"] +pub mod dynamic; +#[path = "build/static.rs"] +pub mod r#static; + +/// Copies a file. +#[cfg(feature = "runtime")] +fn copy(source: &str, destination: &Path) { + use std::fs::File; + use std::io::{Read, Write}; + + let mut string = String::new(); + File::open(source) + .unwrap() + .read_to_string(&mut string) + .unwrap(); + File::create(destination) + .unwrap() + .write_all(string.as_bytes()) + .unwrap(); +} + +/// Copies the code used to find and link to `libclang` shared libraries into +/// the build output directory so that it may be used when linking at runtime. +#[cfg(feature = "runtime")] +fn main() { + use std::env; + + if cfg!(feature = "static") { + panic!("`runtime` and `static` features can't be combined"); + } + + let out = env::var("OUT_DIR").unwrap(); + copy("build/macros.rs", &Path::new(&out).join("macros.rs")); + copy("build/common.rs", &Path::new(&out).join("common.rs")); + copy("build/dynamic.rs", &Path::new(&out).join("dynamic.rs")); +} + +/// Finds and links to the required libraries dynamically or statically. +#[cfg(not(feature = "runtime"))] +fn main() { + if cfg!(feature = "static") { + r#static::link(); + } else { + dynamic::link(); + } + + if let Some(output) = common::run_llvm_config(&["--includedir"]) { + let directory = Path::new(output.trim_end()); + println!("cargo:include={}", directory.display()); + } +} diff --git a/vendor/clang-sys/build/common.rs b/vendor/clang-sys/build/common.rs new file mode 100644 index 00000000000000..4d144cb2a9a938 --- /dev/null +++ b/vendor/clang-sys/build/common.rs @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: Apache-2.0 + +use std::cell::RefCell; +use std::collections::HashMap; +use std::env; +use std::path::{Path, PathBuf}; +use std::process::Command; + +use glob::{MatchOptions, Pattern}; + +//================================================ +// Commands +//================================================ + +thread_local! { + /// The errors encountered by the build script while executing commands. + static COMMAND_ERRORS: RefCell>> = RefCell::default(); +} + +/// Adds an error encountered by the build script while executing a command. +fn add_command_error(name: &str, path: &str, arguments: &[&str], message: String) { + COMMAND_ERRORS.with(|e| { + e.borrow_mut() + .entry(name.into()) + .or_default() + .push(format!( + "couldn't execute `{} {}` (path={}) ({})", + name, + arguments.join(" "), + path, + message, + )) + }); +} + +/// A struct that prints the errors encountered by the build script while +/// executing commands when dropped (unless explictly discarded). +/// +/// This is handy because we only want to print these errors when the build +/// script fails to link to an instance of `libclang`. For example, if +/// `llvm-config` couldn't be executed but an instance of `libclang` was found +/// anyway we don't want to pollute the build output with irrelevant errors. +#[derive(Default)] +pub struct CommandErrorPrinter { + discard: bool, +} + +impl CommandErrorPrinter { + pub fn discard(mut self) { + self.discard = true; + } +} + +impl Drop for CommandErrorPrinter { + fn drop(&mut self) { + if self.discard { + return; + } + + let errors = COMMAND_ERRORS.with(|e| e.borrow().clone()); + + if let Some(errors) = errors.get("llvm-config") { + println!( + "cargo:warning=could not execute `llvm-config` one or more \ + times, if the LLVM_CONFIG_PATH environment variable is set to \ + a full path to valid `llvm-config` executable it will be used \ + to try to find an instance of `libclang` on your system: {}", + errors + .iter() + .map(|e| format!("\"{}\"", e)) + .collect::>() + .join("\n "), + ) + } + + if let Some(errors) = errors.get("xcode-select") { + println!( + "cargo:warning=could not execute `xcode-select` one or more \ + times, if a valid instance of this executable is on your PATH \ + it will be used to try to find an instance of `libclang` on \ + your system: {}", + errors + .iter() + .map(|e| format!("\"{}\"", e)) + .collect::>() + .join("\n "), + ) + } + } +} + +#[cfg(test)] +lazy_static::lazy_static! { + pub static ref RUN_COMMAND_MOCK: std::sync::Mutex< + Option Option + Send + Sync + 'static>>, + > = std::sync::Mutex::new(None); +} + +/// Executes a command and returns the `stdout` output if the command was +/// successfully executed (errors are added to `COMMAND_ERRORS`). +fn run_command(name: &str, path: &str, arguments: &[&str]) -> Option { + #[cfg(test)] + if let Some(command) = &*RUN_COMMAND_MOCK.lock().unwrap() { + return command(name, path, arguments); + } + + let output = match Command::new(path).args(arguments).output() { + Ok(output) => output, + Err(error) => { + let message = format!("error: {}", error); + add_command_error(name, path, arguments, message); + return None; + } + }; + + if output.status.success() { + Some(String::from_utf8_lossy(&output.stdout).into_owned()) + } else { + let message = format!("exit code: {}", output.status); + add_command_error(name, path, arguments, message); + None + } +} + +/// Executes the `llvm-config` command and returns the `stdout` output if the +/// command was successfully executed (errors are added to `COMMAND_ERRORS`). +pub fn run_llvm_config(arguments: &[&str]) -> Option { + let path = env::var("LLVM_CONFIG_PATH").unwrap_or_else(|_| "llvm-config".into()); + run_command("llvm-config", &path, arguments) +} + +/// Executes the `xcode-select` command and returns the `stdout` output if the +/// command was successfully executed (errors are added to `COMMAND_ERRORS`). +pub fn run_xcode_select(arguments: &[&str]) -> Option { + run_command("xcode-select", "xcode-select", arguments) +} + +//================================================ +// Search Directories +//================================================ +// These search directories are listed in order of +// preference, so if multiple `libclang` instances +// are found when searching matching directories, +// the `libclang` instances from earlier +// directories will be preferred (though version +// takes precedence over location). +//================================================ + +/// `libclang` directory patterns for Haiku. +const DIRECTORIES_HAIKU: &[&str] = &[ + "/boot/home/config/non-packaged/develop/lib", + "/boot/home/config/non-packaged/lib", + "/boot/system/non-packaged/develop/lib", + "/boot/system/non-packaged/lib", + "/boot/system/develop/lib", + "/boot/system/lib", +]; + +/// `libclang` directory patterns for Linux (and FreeBSD). +const DIRECTORIES_LINUX: &[&str] = &[ + "/usr/local/llvm*/lib*", + "/usr/local/lib*/*/*", + "/usr/local/lib*/*", + "/usr/local/lib*", + "/usr/lib*/*/*", + "/usr/lib*/*", + "/usr/lib*", +]; + +/// `libclang` directory patterns for macOS. +const DIRECTORIES_MACOS: &[&str] = &[ + "/usr/local/opt/llvm*/lib/llvm*/lib", + "/Library/Developer/CommandLineTools/usr/lib", + "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib", + "/usr/local/opt/llvm*/lib", +]; + +/// `libclang` directory patterns for Windows. +/// +/// The boolean indicates whether the directory pattern should be used when +/// compiling for an MSVC target environment. +const DIRECTORIES_WINDOWS: &[(&str, bool)] = &[ + // LLVM + Clang can be installed using Scoop (https://scoop.sh). + // Other Windows package managers install LLVM + Clang to other listed + // system-wide directories. + ("C:\\Users\\*\\scoop\\apps\\llvm\\current\\lib", true), + ("C:\\MSYS*\\MinGW*\\lib", false), + ("C:\\Program Files*\\LLVM\\lib", true), + ("C:\\LLVM\\lib", true), + // LLVM + Clang can be installed as a component of Visual Studio. + // https://github.com/KyleMayes/clang-sys/issues/121 + ("C:\\Program Files*\\Microsoft Visual Studio\\*\\VC\\Tools\\Llvm\\**\\lib", true), +]; + +/// `libclang` directory patterns for illumos +const DIRECTORIES_ILLUMOS: &[&str] = &[ + "/opt/ooce/llvm-*/lib", + "/opt/ooce/clang-*/lib", +]; + +//================================================ +// Searching +//================================================ + +/// Finds the files in a directory that match one or more filename glob patterns +/// and returns the paths to and filenames of those files. +fn search_directory(directory: &Path, filenames: &[String]) -> Vec<(PathBuf, String)> { + // Escape the specified directory in case it contains characters that have + // special meaning in glob patterns (e.g., `[` or `]`). + let directory = Pattern::escape(directory.to_str().unwrap()); + let directory = Path::new(&directory); + + // Join the escaped directory to the filename glob patterns to obtain + // complete glob patterns for the files being searched for. + let paths = filenames + .iter() + .map(|f| directory.join(f).to_str().unwrap().to_owned()); + + // Prevent wildcards from matching path separators to ensure that the search + // is limited to the specified directory. + let mut options = MatchOptions::new(); + options.require_literal_separator = true; + + paths + .map(|p| glob::glob_with(&p, options)) + .filter_map(Result::ok) + .flatten() + .filter_map(|p| { + let path = p.ok()?; + let filename = path.file_name()?.to_str().unwrap(); + + // The `libclang_shared` library has been renamed to `libclang-cpp` + // in Clang 10. This can cause instances of this library (e.g., + // `libclang-cpp.so.10`) to be matched by patterns looking for + // instances of `libclang`. + if filename.contains("-cpp.") { + return None; + } + + Some((path.parent().unwrap().to_owned(), filename.into())) + }) + .collect::>() +} + +/// Finds the files in a directory (and any relevant sibling directories) that +/// match one or more filename glob patterns and returns the paths to and +/// filenames of those files. +fn search_directories(directory: &Path, filenames: &[String]) -> Vec<(PathBuf, String)> { + let mut results = search_directory(directory, filenames); + + // On Windows, `libclang.dll` is usually found in the LLVM `bin` directory + // while `libclang.lib` is usually found in the LLVM `lib` directory. To + // keep things consistent with other platforms, only LLVM `lib` directories + // are included in the backup search directory globs so we need to search + // the LLVM `bin` directory here. + if target_os!("windows") && directory.ends_with("lib") { + let sibling = directory.parent().unwrap().join("bin"); + results.extend(search_directory(&sibling, filenames)); + } + + results +} + +/// Finds the `libclang` static or dynamic libraries matching one or more +/// filename glob patterns and returns the paths to and filenames of those files. +pub fn search_libclang_directories(filenames: &[String], variable: &str) -> Vec<(PathBuf, String)> { + // Search only the path indicated by the relevant environment variable + // (e.g., `LIBCLANG_PATH`) if it is set. + if let Ok(path) = env::var(variable).map(|d| Path::new(&d).to_path_buf()) { + // Check if the path is a matching file. + if let Some(parent) = path.parent() { + let filename = path.file_name().unwrap().to_str().unwrap(); + let libraries = search_directories(parent, filenames); + if libraries.iter().any(|(_, f)| f == filename) { + return vec![(parent.into(), filename.into())]; + } + } + + // Check if the path is directory containing a matching file. + return search_directories(&path, filenames); + } + + let mut found = vec![]; + + // Search the `bin` and `lib` directories in the directory returned by + // `llvm-config --prefix`. + if let Some(output) = run_llvm_config(&["--prefix"]) { + let directory = Path::new(output.lines().next().unwrap()).to_path_buf(); + found.extend(search_directories(&directory.join("bin"), filenames)); + found.extend(search_directories(&directory.join("lib"), filenames)); + found.extend(search_directories(&directory.join("lib64"), filenames)); + } + + // Search the toolchain directory in the directory returned by + // `xcode-select --print-path`. + if target_os!("macos") { + if let Some(output) = run_xcode_select(&["--print-path"]) { + let directory = Path::new(output.lines().next().unwrap()).to_path_buf(); + let directory = directory.join("Toolchains/XcodeDefault.xctoolchain/usr/lib"); + found.extend(search_directories(&directory, filenames)); + } + } + + // Search the directories in the `LD_LIBRARY_PATH` environment variable. + if let Ok(path) = env::var("LD_LIBRARY_PATH") { + for directory in env::split_paths(&path) { + found.extend(search_directories(&directory, filenames)); + } + } + + // Determine the `libclang` directory patterns. + let directories: Vec<&str> = if target_os!("haiku") { + DIRECTORIES_HAIKU.into() + } else if target_os!("linux") || target_os!("freebsd") { + DIRECTORIES_LINUX.into() + } else if target_os!("macos") { + DIRECTORIES_MACOS.into() + } else if target_os!("windows") { + let msvc = target_env!("msvc"); + DIRECTORIES_WINDOWS + .iter() + .filter(|d| d.1 || !msvc) + .map(|d| d.0) + .collect() + } else if target_os!("illumos") { + DIRECTORIES_ILLUMOS.into() + } else { + vec![] + }; + + // We use temporary directories when testing the build script so we'll + // remove the prefixes that make the directories absolute. + let directories = if test!() { + directories + .iter() + .map(|d| d.strip_prefix('/').or_else(|| d.strip_prefix("C:\\")).unwrap_or(d)) + .collect::>() + } else { + directories + }; + + // Search the directories provided by the `libclang` directory patterns. + let mut options = MatchOptions::new(); + options.case_sensitive = false; + options.require_literal_separator = true; + for directory in directories.iter() { + if let Ok(directories) = glob::glob_with(directory, options) { + for directory in directories.filter_map(Result::ok).filter(|p| p.is_dir()) { + found.extend(search_directories(&directory, filenames)); + } + } + } + + found +} diff --git a/vendor/clang-sys/build/dynamic.rs b/vendor/clang-sys/build/dynamic.rs new file mode 100644 index 00000000000000..f3d5a626837b52 --- /dev/null +++ b/vendor/clang-sys/build/dynamic.rs @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: Apache-2.0 + +use std::env; +use std::fs::File; +use std::io::{self, Error, ErrorKind, Read, Seek, SeekFrom}; +use std::path::{Path, PathBuf}; + +use super::common; + +//================================================ +// Validation +//================================================ + +/// Extracts the ELF class from the ELF header in a shared library. +fn parse_elf_header(path: &Path) -> io::Result { + let mut file = File::open(path)?; + let mut buffer = [0; 5]; + file.read_exact(&mut buffer)?; + if buffer[..4] == [127, 69, 76, 70] { + Ok(buffer[4]) + } else { + Err(Error::new(ErrorKind::InvalidData, "invalid ELF header")) + } +} + +/// Extracts the magic number and machine type from the PE header in a shared library. +fn parse_pe_header(path: &Path) -> io::Result<(u16, u16)> { + let mut file = File::open(path)?; + + // Extract the header offset. + let mut buffer = [0; 4]; + let start = SeekFrom::Start(0x3C); + file.seek(start)?; + file.read_exact(&mut buffer)?; + let offset = i32::from_le_bytes(buffer); + + // Check the validity of the header. + file.seek(SeekFrom::Start(offset as u64))?; + file.read_exact(&mut buffer)?; + if buffer != [80, 69, 0, 0] { + return Err(Error::new(ErrorKind::InvalidData, "invalid PE header")); + } + + // Extract the magic number. + let mut buffer = [0; 2]; + file.seek(SeekFrom::Current(20))?; + file.read_exact(&mut buffer)?; + let magic_number = u16::from_le_bytes(buffer); + + // Extract the machine type. + let mut buffer = [0; 2]; + file.seek(SeekFrom::Current(-22))?; + file.read_exact(&mut buffer)?; + let machine_type = u16::from_le_bytes(buffer); + + return Ok((magic_number, machine_type)); +} + +/// Checks that a `libclang` shared library matches the target platform. +fn validate_library(path: &Path) -> Result<(), String> { + if target_os!("linux") || target_os!("freebsd") { + let class = parse_elf_header(path).map_err(|e| e.to_string())?; + + if target_pointer_width!("32") && class != 1 { + return Err("invalid ELF class (64-bit)".into()); + } + + if target_pointer_width!("64") && class != 2 { + return Err("invalid ELF class (32-bit)".into()); + } + + Ok(()) + } else if target_os!("windows") { + let (magic, machine_type) = parse_pe_header(path).map_err(|e| e.to_string())?; + + if target_pointer_width!("32") && magic != 267 { + return Err("invalid DLL (64-bit)".into()); + } + + if target_pointer_width!("64") && magic != 523 { + return Err("invalid DLL (32-bit)".into()); + } + + let arch_mismatch = match machine_type { + 0x014C if !target_arch!("x86") => Some("x86"), + 0x8664 if !target_arch!("x86_64") => Some("x86-64"), + 0xAA64 if !target_arch!("aarch64") => Some("ARM64"), + _ => None, + }; + + if let Some(arch) = arch_mismatch { + Err(format!("invalid DLL ({arch})")) + } else { + Ok(()) + } + } else { + Ok(()) + } +} + +//================================================ +// Searching +//================================================ + +/// Extracts the version components in a `libclang` shared library filename. +fn parse_version(filename: &str) -> Vec { + let version = if let Some(version) = filename.strip_prefix("libclang.so.") { + version + } else if filename.starts_with("libclang-") { + &filename[9..filename.len() - 3] + } else { + return vec![]; + }; + + version.split('.').map(|s| s.parse().unwrap_or(0)).collect() +} + +/// Finds `libclang` shared libraries and returns the paths to, filenames of, +/// and versions of those shared libraries. +fn search_libclang_directories(runtime: bool) -> Result)>, String> { + let mut files = vec![format!( + "{}clang{}", + env::consts::DLL_PREFIX, + env::consts::DLL_SUFFIX + )]; + + if target_os!("linux") { + // Some Linux distributions don't create a `libclang.so` symlink, so we + // need to look for versioned files (e.g., `libclang-3.9.so`). + files.push("libclang-*.so".into()); + + // Some Linux distributions don't create a `libclang.so` symlink and + // don't have versioned files as described above, so we need to look for + // suffix versioned files (e.g., `libclang.so.1`). However, `ld` cannot + // link to these files, so this will only be included when linking at + // runtime. + if runtime { + files.push("libclang.so.*".into()); + files.push("libclang-*.so.*".into()); + } + } + + if target_os!("freebsd") || target_os!("haiku") || target_os!("netbsd") || target_os!("openbsd") { + // Some BSD distributions don't create a `libclang.so` symlink either, + // but use a different naming scheme for versioned files (e.g., + // `libclang.so.7.0`). + files.push("libclang.so.*".into()); + } + + if target_os!("windows") { + // The official LLVM build uses `libclang.dll` on Windows instead of + // `clang.dll`. However, unofficial builds such as MinGW use `clang.dll`. + files.push("libclang.dll".into()); + } + + // Find and validate `libclang` shared libraries and collect the versions. + let mut valid = vec![]; + let mut invalid = vec![]; + for (directory, filename) in common::search_libclang_directories(&files, "LIBCLANG_PATH") { + let path = directory.join(&filename); + match validate_library(&path) { + Ok(()) => { + let version = parse_version(&filename); + valid.push((directory, filename, version)) + } + Err(message) => invalid.push(format!("({}: {})", path.display(), message)), + } + } + + if !valid.is_empty() { + return Ok(valid); + } + + let message = format!( + "couldn't find any valid shared libraries matching: [{}], set the \ + `LIBCLANG_PATH` environment variable to a path where one of these files \ + can be found (invalid: [{}])", + files + .iter() + .map(|f| format!("'{}'", f)) + .collect::>() + .join(", "), + invalid.join(", "), + ); + + Err(message) +} + +/// Finds the "best" `libclang` shared library and returns the directory and +/// filename of that library. +pub fn find(runtime: bool) -> Result<(PathBuf, String), String> { + search_libclang_directories(runtime)? + .iter() + // We want to find the `libclang` shared library with the highest + // version number, hence `max_by_key` below. + // + // However, in the case where there are multiple such `libclang` shared + // libraries, we want to use the order in which they appeared in the + // list returned by `search_libclang_directories` as a tiebreaker since + // that function returns `libclang` shared libraries in descending order + // of preference by how they were found. + // + // `max_by_key`, perhaps surprisingly, returns the *last* item with the + // maximum key rather than the first which results in the opposite of + // the tiebreaking behavior we want. This is easily fixed by reversing + // the list first. + .rev() + .max_by_key(|f| &f.2) + .cloned() + .map(|(path, filename, _)| (path, filename)) + .ok_or_else(|| "unreachable".into()) +} + +//================================================ +// Linking +//================================================ + +/// Finds and links to a `libclang` shared library. +#[cfg(not(feature = "runtime"))] +pub fn link() { + let cep = common::CommandErrorPrinter::default(); + + use std::fs; + + let (directory, filename) = find(false).unwrap(); + println!("cargo:rustc-link-search={}", directory.display()); + + if cfg!(all(target_os = "windows", target_env = "msvc")) { + // Find the `libclang` stub static library required for the MSVC + // toolchain. + let lib = if !directory.ends_with("bin") { + directory + } else { + directory.parent().unwrap().join("lib") + }; + + if lib.join("libclang.lib").exists() { + println!("cargo:rustc-link-search={}", lib.display()); + } else if lib.join("libclang.dll.a").exists() { + // MSYS and MinGW use `libclang.dll.a` instead of `libclang.lib`. + // It is linkable with the MSVC linker, but Rust doesn't recognize + // the `.a` suffix, so we need to copy it with a different name. + // + // FIXME: Maybe we can just hardlink or symlink it? + let out = env::var("OUT_DIR").unwrap(); + fs::copy( + lib.join("libclang.dll.a"), + Path::new(&out).join("libclang.lib"), + ) + .unwrap(); + println!("cargo:rustc-link-search=native={}", out); + } else { + panic!( + "using '{}', so 'libclang.lib' or 'libclang.dll.a' must be \ + available in {}", + filename, + lib.display(), + ); + } + + println!("cargo:rustc-link-lib=dylib=libclang"); + } else { + let name = filename.trim_start_matches("lib"); + + // Strip extensions and trailing version numbers (e.g., the `.so.7.0` in + // `libclang.so.7.0`). + let name = match name.find(".dylib").or_else(|| name.find(".so")) { + Some(index) => &name[0..index], + None => name, + }; + + println!("cargo:rustc-link-lib=dylib={}", name); + } + + cep.discard(); +} diff --git a/vendor/clang-sys/build/macros.rs b/vendor/clang-sys/build/macros.rs new file mode 100644 index 00000000000000..a766a6e27c4427 --- /dev/null +++ b/vendor/clang-sys/build/macros.rs @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: Apache-2.0 + +macro_rules! test { + () => (cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok()); +} + +macro_rules! target_os { + ($os:expr) => { + if cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok() { + let var = ::std::env::var("_CLANG_SYS_TEST_OS"); + var.map_or(false, |v| v == $os) + } else { + cfg!(target_os = $os) + } + }; +} + +macro_rules! target_arch { + ($arch:expr) => { + if cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok() { + let var = ::std::env::var("_CLANG_SYS_TEST_ARCH"); + var.map_or(false, |v| v == $arch) + } else { + cfg!(target_arch = $arch) + } + }; +} + +macro_rules! target_pointer_width { + ($pointer_width:expr) => { + if cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok() { + let var = ::std::env::var("_CLANG_SYS_TEST_POINTER_WIDTH"); + var.map_or(false, |v| v == $pointer_width) + } else { + cfg!(target_pointer_width = $pointer_width) + } + }; +} + +macro_rules! target_env { + ($env:expr) => { + if cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok() { + let var = ::std::env::var("_CLANG_SYS_TEST_ENV"); + var.map_or(false, |v| v == $env) + } else { + cfg!(target_env = $env) + } + }; +} diff --git a/vendor/clang-sys/build/static.rs b/vendor/clang-sys/build/static.rs new file mode 100644 index 00000000000000..c1b70eb08b2654 --- /dev/null +++ b/vendor/clang-sys/build/static.rs @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: Apache-2.0 + +use std::path::{Path, PathBuf}; + +use glob::Pattern; + +use super::common; + +//================================================ +// Searching +//================================================ + +/// Clang static libraries required to link to `libclang` 3.5 and later. +const CLANG_LIBRARIES: &[&str] = &[ + "clang", + "clangAST", + "clangAnalysis", + "clangBasic", + "clangDriver", + "clangEdit", + "clangFrontend", + "clangIndex", + "clangLex", + "clangParse", + "clangRewrite", + "clangSema", + "clangSerialization", +]; + +/// Gets the name of an LLVM or Clang static library from a path. +fn get_library_name(path: &Path) -> Option { + path.file_stem().map(|p| { + let string = p.to_string_lossy(); + if let Some(name) = string.strip_prefix("lib") { + name.to_owned() + } else { + string.to_string() + } + }) +} + +/// Gets the LLVM static libraries required to link to `libclang`. +fn get_llvm_libraries() -> Vec { + common::run_llvm_config(&["--libs"]) + .unwrap() + .split_whitespace() + .filter_map(|p| { + // Depending on the version of `llvm-config` in use, listed + // libraries may be in one of two forms, a full path to the library + // or simply prefixed with `-l`. + if let Some(path) = p.strip_prefix("-l") { + Some(path.into()) + } else { + get_library_name(Path::new(p)) + } + }) + .collect() +} + +/// Gets the Clang static libraries required to link to `libclang`. +fn get_clang_libraries>(directory: P) -> Vec { + // Escape the directory in case it contains characters that have special + // meaning in glob patterns (e.g., `[` or `]`). + let directory = Pattern::escape(directory.as_ref().to_str().unwrap()); + let directory = Path::new(&directory); + + let pattern = directory.join("libclang*.a").to_str().unwrap().to_owned(); + if let Ok(libraries) = glob::glob(&pattern) { + libraries + .filter_map(|l| l.ok().and_then(|l| get_library_name(&l))) + .collect() + } else { + CLANG_LIBRARIES.iter().map(|l| (*l).to_string()).collect() + } +} + +/// Finds a directory containing LLVM and Clang static libraries and returns the +/// path to that directory. +fn find() -> PathBuf { + let name = if target_os!("windows") { + "libclang.lib" + } else { + "libclang.a" + }; + + let files = common::search_libclang_directories(&[name.into()], "LIBCLANG_STATIC_PATH"); + if let Some((directory, _)) = files.into_iter().next() { + directory + } else { + panic!( + "could not find the required `{name}` static library, see the \ + README for more information on how to link to `libclang` statically: \ + https://github.com/KyleMayes/clang-sys?tab=readme-ov-file#static" + ); + } +} + +//================================================ +// Linking +//================================================ + +/// Finds and links to `libclang` static libraries. +pub fn link() { + let cep = common::CommandErrorPrinter::default(); + + let directory = find(); + + // Specify required Clang static libraries. + println!("cargo:rustc-link-search=native={}", directory.display()); + for library in get_clang_libraries(directory) { + println!("cargo:rustc-link-lib=static={}", library); + } + + // Determine the shared mode used by LLVM. + let mode = common::run_llvm_config(&["--shared-mode"]).map(|m| m.trim().to_owned()); + let prefix = if mode.map_or(false, |m| m == "static") { + "static=" + } else { + "" + }; + + // Specify required LLVM static libraries. + println!( + "cargo:rustc-link-search=native={}", + common::run_llvm_config(&["--libdir"]).unwrap().trim_end() + ); + for library in get_llvm_libraries() { + println!("cargo:rustc-link-lib={}{}", prefix, library); + } + + // Specify required system libraries. + // MSVC doesn't need this, as it tracks dependencies inside `.lib` files. + if cfg!(target_os = "freebsd") { + println!("cargo:rustc-flags=-l ffi -l ncursesw -l c++ -l z"); + } else if cfg!(any(target_os = "haiku", target_os = "linux")) { + if cfg!(feature = "libcpp") { + println!("cargo:rustc-flags=-l c++"); + } else { + println!("cargo:rustc-flags=-l ffi -l ncursesw -l stdc++ -l z"); + } + } else if cfg!(target_os = "macos") { + println!("cargo:rustc-flags=-l ffi -l ncurses -l c++ -l z"); + } + + cep.discard(); +} diff --git a/vendor/clang-sys/clippy.toml b/vendor/clang-sys/clippy.toml new file mode 100644 index 00000000000000..6f41284e10733b --- /dev/null +++ b/vendor/clang-sys/clippy.toml @@ -0,0 +1 @@ +doc-valid-idents = ["FreeBSD"] diff --git a/vendor/clang-sys/src/lib.rs b/vendor/clang-sys/src/lib.rs new file mode 100644 index 00000000000000..5f5383b9fcc6ac --- /dev/null +++ b/vendor/clang-sys/src/lib.rs @@ -0,0 +1,2433 @@ +// SPDX-License-Identifier: Apache-2.0 + +//! Rust bindings for `libclang`. +//! +//! ## [Documentation](https://docs.rs/clang-sys) +//! +//! Note that the documentation on https://docs.rs for this crate assumes usage +//! of the `runtime` Cargo feature as well as the Cargo feature for the latest +//! supported version of `libclang` (e.g., `clang_11_0`), neither of which are +//! enabled by default. +//! +//! Due to the usage of the `runtime` Cargo feature, this documentation will +//! contain some additional types and functions to manage a dynamically loaded +//! `libclang` instance at runtime. +//! +//! Due to the usage of the Cargo feature for the latest supported version of +//! `libclang`, this documentation will contain constants and functions that are +//! not available in the oldest supported version of `libclang` (3.5). All of +//! these types and functions have a documentation comment which specifies the +//! minimum `libclang` version required to use the item. + +#![allow(non_camel_case_types, non_snake_case, non_upper_case_globals)] +#![cfg_attr(feature = "cargo-clippy", allow(clippy::unreadable_literal))] + +pub mod support; + +#[macro_use] +mod link; + +use std::mem; + +use libc::*; + +pub type CXClientData = *mut c_void; +pub type CXCursorVisitor = extern "C" fn(CXCursor, CXCursor, CXClientData) -> CXChildVisitResult; +#[cfg(feature = "clang_3_7")] +pub type CXFieldVisitor = extern "C" fn(CXCursor, CXClientData) -> CXVisitorResult; +pub type CXInclusionVisitor = extern "C" fn(CXFile, *mut CXSourceLocation, c_uint, CXClientData); + +//================================================ +// Macros +//================================================ + +/// Defines a C enum as a series of constants. +macro_rules! cenum { + (#[repr($ty:ty)] $(#[$meta:meta])* enum $name:ident { + $($(#[$vmeta:meta])* const $variant:ident = $value:expr), +, + }) => ( + pub type $name = $ty; + + $($(#[$vmeta])* pub const $variant: $name = $value;)+ + ); + (#[repr($ty:ty)] $(#[$meta:meta])* enum $name:ident { + $($(#[$vmeta:meta])* const $variant:ident = $value:expr); +; + }) => ( + pub type $name = $ty; + + $($(#[$vmeta])* pub const $variant: $name = $value;)+ + ); + ($(#[$meta:meta])* enum $name:ident { + $($(#[$vmeta:meta])* const $variant:ident = $value:expr), +, + }) => ( + pub type $name = c_int; + + $($(#[$vmeta])* pub const $variant: $name = $value;)+ + ); + ($(#[$meta:meta])* enum $name:ident { + $($(#[$vmeta:meta])* const $variant:ident = $value:expr); +; + }) => ( + pub type $name = c_int; + + $($(#[$vmeta])* pub const $variant: $name = $value;)+ + ); +} + +/// Implements a zeroing implementation of `Default` for the supplied type. +macro_rules! default { + (#[$meta:meta] $ty:ty) => { + #[$meta] + impl Default for $ty { + fn default() -> $ty { + unsafe { mem::zeroed() } + } + } + }; + + ($ty:ty) => { + impl Default for $ty { + fn default() -> $ty { + unsafe { mem::zeroed() } + } + } + }; +} + +//================================================ +// Enums +//================================================ + +cenum! { + enum CXAvailabilityKind { + const CXAvailability_Available = 0, + const CXAvailability_Deprecated = 1, + const CXAvailability_NotAvailable = 2, + const CXAvailability_NotAccessible = 3, + } +} + +cenum! { + /// Only available on `libclang` 17.0 and later. + #[cfg(feature = "clang_17_0")] + enum CXBinaryOperatorKind { + const CXBinaryOperator_Invalid = 0, + const CXBinaryOperator_PtrMemD = 1, + const CXBinaryOperator_PtrMemI = 2, + const CXBinaryOperator_Mul = 3, + const CXBinaryOperator_Div = 4, + const CXBinaryOperator_Rem = 5, + const CXBinaryOperator_Add = 6, + const CXBinaryOperator_Sub = 7, + const CXBinaryOperator_Shl = 8, + const CXBinaryOperator_Shr = 9, + const CXBinaryOperator_Cmp = 10, + const CXBinaryOperator_LT = 11, + const CXBinaryOperator_GT = 12, + const CXBinaryOperator_LE = 13, + const CXBinaryOperator_GE = 14, + const CXBinaryOperator_EQ = 15, + const CXBinaryOperator_NE = 16, + const CXBinaryOperator_And = 17, + const CXBinaryOperator_Xor = 18, + const CXBinaryOperator_Or = 19, + const CXBinaryOperator_LAnd = 20, + const CXBinaryOperator_LOr = 21, + const CXBinaryOperator_Assign = 22, + const CXBinaryOperator_MulAssign = 23, + const CXBinaryOperator_DivAssign = 24, + const CXBinaryOperator_RemAssign = 25, + const CXBinaryOperator_AddAssign = 26, + const CXBinaryOperator_SubAssign = 27, + const CXBinaryOperator_ShlAssign = 28, + const CXBinaryOperator_ShrAssign = 29, + const CXBinaryOperator_AndAssign = 30, + const CXBinaryOperator_XorAssign = 31, + const CXBinaryOperator_OrAssign = 32, + const CXBinaryOperator_Comma = 33, + } +} + +cenum! { + enum CXCallingConv { + const CXCallingConv_Default = 0, + const CXCallingConv_C = 1, + const CXCallingConv_X86StdCall = 2, + const CXCallingConv_X86FastCall = 3, + const CXCallingConv_X86ThisCall = 4, + const CXCallingConv_X86Pascal = 5, + const CXCallingConv_AAPCS = 6, + const CXCallingConv_AAPCS_VFP = 7, + /// Only produced by `libclang` 4.0 and later. + const CXCallingConv_X86RegCall = 8, + const CXCallingConv_IntelOclBicc = 9, + const CXCallingConv_Win64 = 10, + const CXCallingConv_X86_64Win64 = 10, + const CXCallingConv_X86_64SysV = 11, + /// Only produced by `libclang` 3.6 and later. + const CXCallingConv_X86VectorCall = 12, + /// Only produced by `libclang` 3.9 and later. + const CXCallingConv_Swift = 13, + /// Only produced by `libclang` 3.9 and later. + const CXCallingConv_PreserveMost = 14, + /// Only produced by `libclang` 3.9 and later. + const CXCallingConv_PreserveAll = 15, + /// Only produced by `libclang` 8.0 and later. + const CXCallingConv_AArch64VectorCall = 16, + const CXCallingConv_Invalid = 100, + const CXCallingConv_Unexposed = 200, + /// Only produced by `libclang` 13.0 and later. + const CXCallingConv_SwiftAsync = 17, + /// Only produced by `libclang` 15.0 and later. + const CXCallingConv_AArch64SVEPCS = 18, + /// Only produced by `libclang` 18.0 and later. + const CXCallingConv_M68kRTD = 19, + } +} + +cenum! { + enum CXChildVisitResult { + const CXChildVisit_Break = 0, + const CXChildVisit_Continue = 1, + const CXChildVisit_Recurse = 2, + } +} + +cenum! { + #[repr(c_uchar)] + /// Only available on `libclang` 17.0 and later. + #[cfg(feature = "clang_17_0")] + enum CXChoice { + const CXChoice_Default = 0, + const CXChoice_Enabled = 1, + const CXChoice_Disabled = 2, + } +} + +cenum! { + enum CXCommentInlineCommandRenderKind { + const CXCommentInlineCommandRenderKind_Normal = 0, + const CXCommentInlineCommandRenderKind_Bold = 1, + const CXCommentInlineCommandRenderKind_Monospaced = 2, + const CXCommentInlineCommandRenderKind_Emphasized = 3, + } +} + +cenum! { + enum CXCommentKind { + const CXComment_Null = 0, + const CXComment_Text = 1, + const CXComment_InlineCommand = 2, + const CXComment_HTMLStartTag = 3, + const CXComment_HTMLEndTag = 4, + const CXComment_Paragraph = 5, + const CXComment_BlockCommand = 6, + const CXComment_ParamCommand = 7, + const CXComment_TParamCommand = 8, + const CXComment_VerbatimBlockCommand = 9, + const CXComment_VerbatimBlockLine = 10, + const CXComment_VerbatimLine = 11, + const CXComment_FullComment = 12, + } +} + +cenum! { + enum CXCommentParamPassDirection { + const CXCommentParamPassDirection_In = 0, + const CXCommentParamPassDirection_Out = 1, + const CXCommentParamPassDirection_InOut = 2, + } +} + +cenum! { + enum CXCompilationDatabase_Error { + const CXCompilationDatabase_NoError = 0, + const CXCompilationDatabase_CanNotLoadDatabase = 1, + } +} + +cenum! { + enum CXCompletionChunkKind { + const CXCompletionChunk_Optional = 0, + const CXCompletionChunk_TypedText = 1, + const CXCompletionChunk_Text = 2, + const CXCompletionChunk_Placeholder = 3, + const CXCompletionChunk_Informative = 4, + const CXCompletionChunk_CurrentParameter = 5, + const CXCompletionChunk_LeftParen = 6, + const CXCompletionChunk_RightParen = 7, + const CXCompletionChunk_LeftBracket = 8, + const CXCompletionChunk_RightBracket = 9, + const CXCompletionChunk_LeftBrace = 10, + const CXCompletionChunk_RightBrace = 11, + const CXCompletionChunk_LeftAngle = 12, + const CXCompletionChunk_RightAngle = 13, + const CXCompletionChunk_Comma = 14, + const CXCompletionChunk_ResultType = 15, + const CXCompletionChunk_Colon = 16, + const CXCompletionChunk_SemiColon = 17, + const CXCompletionChunk_Equal = 18, + const CXCompletionChunk_HorizontalSpace = 19, + const CXCompletionChunk_VerticalSpace = 20, + } +} + +cenum! { + enum CXCursorKind { + const CXCursor_UnexposedDecl = 1, + const CXCursor_StructDecl = 2, + const CXCursor_UnionDecl = 3, + const CXCursor_ClassDecl = 4, + const CXCursor_EnumDecl = 5, + const CXCursor_FieldDecl = 6, + const CXCursor_EnumConstantDecl = 7, + const CXCursor_FunctionDecl = 8, + const CXCursor_VarDecl = 9, + const CXCursor_ParmDecl = 10, + const CXCursor_ObjCInterfaceDecl = 11, + const CXCursor_ObjCCategoryDecl = 12, + const CXCursor_ObjCProtocolDecl = 13, + const CXCursor_ObjCPropertyDecl = 14, + const CXCursor_ObjCIvarDecl = 15, + const CXCursor_ObjCInstanceMethodDecl = 16, + const CXCursor_ObjCClassMethodDecl = 17, + const CXCursor_ObjCImplementationDecl = 18, + const CXCursor_ObjCCategoryImplDecl = 19, + const CXCursor_TypedefDecl = 20, + const CXCursor_CXXMethod = 21, + const CXCursor_Namespace = 22, + const CXCursor_LinkageSpec = 23, + const CXCursor_Constructor = 24, + const CXCursor_Destructor = 25, + const CXCursor_ConversionFunction = 26, + const CXCursor_TemplateTypeParameter = 27, + const CXCursor_NonTypeTemplateParameter = 28, + const CXCursor_TemplateTemplateParameter = 29, + const CXCursor_FunctionTemplate = 30, + const CXCursor_ClassTemplate = 31, + const CXCursor_ClassTemplatePartialSpecialization = 32, + const CXCursor_NamespaceAlias = 33, + const CXCursor_UsingDirective = 34, + const CXCursor_UsingDeclaration = 35, + const CXCursor_TypeAliasDecl = 36, + const CXCursor_ObjCSynthesizeDecl = 37, + const CXCursor_ObjCDynamicDecl = 38, + const CXCursor_CXXAccessSpecifier = 39, + const CXCursor_ObjCSuperClassRef = 40, + const CXCursor_ObjCProtocolRef = 41, + const CXCursor_ObjCClassRef = 42, + const CXCursor_TypeRef = 43, + const CXCursor_CXXBaseSpecifier = 44, + const CXCursor_TemplateRef = 45, + const CXCursor_NamespaceRef = 46, + const CXCursor_MemberRef = 47, + const CXCursor_LabelRef = 48, + const CXCursor_OverloadedDeclRef = 49, + const CXCursor_VariableRef = 50, + const CXCursor_InvalidFile = 70, + const CXCursor_NoDeclFound = 71, + const CXCursor_NotImplemented = 72, + const CXCursor_InvalidCode = 73, + const CXCursor_UnexposedExpr = 100, + const CXCursor_DeclRefExpr = 101, + const CXCursor_MemberRefExpr = 102, + const CXCursor_CallExpr = 103, + const CXCursor_ObjCMessageExpr = 104, + const CXCursor_BlockExpr = 105, + const CXCursor_IntegerLiteral = 106, + const CXCursor_FloatingLiteral = 107, + const CXCursor_ImaginaryLiteral = 108, + const CXCursor_StringLiteral = 109, + const CXCursor_CharacterLiteral = 110, + const CXCursor_ParenExpr = 111, + const CXCursor_UnaryOperator = 112, + const CXCursor_ArraySubscriptExpr = 113, + const CXCursor_BinaryOperator = 114, + const CXCursor_CompoundAssignOperator = 115, + const CXCursor_ConditionalOperator = 116, + const CXCursor_CStyleCastExpr = 117, + const CXCursor_CompoundLiteralExpr = 118, + const CXCursor_InitListExpr = 119, + const CXCursor_AddrLabelExpr = 120, + const CXCursor_StmtExpr = 121, + const CXCursor_GenericSelectionExpr = 122, + const CXCursor_GNUNullExpr = 123, + const CXCursor_CXXStaticCastExpr = 124, + const CXCursor_CXXDynamicCastExpr = 125, + const CXCursor_CXXReinterpretCastExpr = 126, + const CXCursor_CXXConstCastExpr = 127, + const CXCursor_CXXFunctionalCastExpr = 128, + const CXCursor_CXXTypeidExpr = 129, + const CXCursor_CXXBoolLiteralExpr = 130, + const CXCursor_CXXNullPtrLiteralExpr = 131, + const CXCursor_CXXThisExpr = 132, + const CXCursor_CXXThrowExpr = 133, + const CXCursor_CXXNewExpr = 134, + const CXCursor_CXXDeleteExpr = 135, + const CXCursor_UnaryExpr = 136, + const CXCursor_ObjCStringLiteral = 137, + const CXCursor_ObjCEncodeExpr = 138, + const CXCursor_ObjCSelectorExpr = 139, + const CXCursor_ObjCProtocolExpr = 140, + const CXCursor_ObjCBridgedCastExpr = 141, + const CXCursor_PackExpansionExpr = 142, + const CXCursor_SizeOfPackExpr = 143, + const CXCursor_LambdaExpr = 144, + const CXCursor_ObjCBoolLiteralExpr = 145, + const CXCursor_ObjCSelfExpr = 146, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_OMPArraySectionExpr = 147, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_ObjCAvailabilityCheckExpr = 148, + /// Only produced by `libclang` 7.0 and later. + const CXCursor_FixedPointLiteral = 149, + /// Only produced by `libclang` 12.0 and later. + const CXCursor_OMPArrayShapingExpr = 150, + /// Only produced by `libclang` 12.0 and later. + const CXCursor_OMPIteratorExpr = 151, + /// Only produced by `libclang` 12.0 and later. + const CXCursor_CXXAddrspaceCastExpr = 152, + /// Only produced by `libclang` 15.0 and later. + const CXCursor_ConceptSpecializationExpr = 153, + /// Only produced by `libclang` 15.0 and later. + const CXCursor_RequiresExpr = 154, + /// Only produced by `libclang` 16.0 and later. + const CXCursor_CXXParenListInitExpr = 155, + const CXCursor_UnexposedStmt = 200, + const CXCursor_LabelStmt = 201, + const CXCursor_CompoundStmt = 202, + const CXCursor_CaseStmt = 203, + const CXCursor_DefaultStmt = 204, + const CXCursor_IfStmt = 205, + const CXCursor_SwitchStmt = 206, + const CXCursor_WhileStmt = 207, + const CXCursor_DoStmt = 208, + const CXCursor_ForStmt = 209, + const CXCursor_GotoStmt = 210, + const CXCursor_IndirectGotoStmt = 211, + const CXCursor_ContinueStmt = 212, + const CXCursor_BreakStmt = 213, + const CXCursor_ReturnStmt = 214, + /// Duplicate of `CXCursor_GccAsmStmt`. + const CXCursor_AsmStmt = 215, + const CXCursor_ObjCAtTryStmt = 216, + const CXCursor_ObjCAtCatchStmt = 217, + const CXCursor_ObjCAtFinallyStmt = 218, + const CXCursor_ObjCAtThrowStmt = 219, + const CXCursor_ObjCAtSynchronizedStmt = 220, + const CXCursor_ObjCAutoreleasePoolStmt = 221, + const CXCursor_ObjCForCollectionStmt = 222, + const CXCursor_CXXCatchStmt = 223, + const CXCursor_CXXTryStmt = 224, + const CXCursor_CXXForRangeStmt = 225, + const CXCursor_SEHTryStmt = 226, + const CXCursor_SEHExceptStmt = 227, + const CXCursor_SEHFinallyStmt = 228, + const CXCursor_MSAsmStmt = 229, + const CXCursor_NullStmt = 230, + const CXCursor_DeclStmt = 231, + const CXCursor_OMPParallelDirective = 232, + const CXCursor_OMPSimdDirective = 233, + const CXCursor_OMPForDirective = 234, + const CXCursor_OMPSectionsDirective = 235, + const CXCursor_OMPSectionDirective = 236, + const CXCursor_OMPSingleDirective = 237, + const CXCursor_OMPParallelForDirective = 238, + const CXCursor_OMPParallelSectionsDirective = 239, + const CXCursor_OMPTaskDirective = 240, + const CXCursor_OMPMasterDirective = 241, + const CXCursor_OMPCriticalDirective = 242, + const CXCursor_OMPTaskyieldDirective = 243, + const CXCursor_OMPBarrierDirective = 244, + const CXCursor_OMPTaskwaitDirective = 245, + const CXCursor_OMPFlushDirective = 246, + const CXCursor_SEHLeaveStmt = 247, + /// Only produced by `libclang` 3.6 and later. + const CXCursor_OMPOrderedDirective = 248, + /// Only produced by `libclang` 3.6 and later. + const CXCursor_OMPAtomicDirective = 249, + /// Only produced by `libclang` 3.6 and later. + const CXCursor_OMPForSimdDirective = 250, + /// Only produced by `libclang` 3.6 and later. + const CXCursor_OMPParallelForSimdDirective = 251, + /// Only produced by `libclang` 3.6 and later. + const CXCursor_OMPTargetDirective = 252, + /// Only produced by `libclang` 3.6 and later. + const CXCursor_OMPTeamsDirective = 253, + /// Only produced by `libclang` 3.7 and later. + const CXCursor_OMPTaskgroupDirective = 254, + /// Only produced by `libclang` 3.7 and later. + const CXCursor_OMPCancellationPointDirective = 255, + /// Only produced by `libclang` 3.7 and later. + const CXCursor_OMPCancelDirective = 256, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_OMPTargetDataDirective = 257, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_OMPTaskLoopDirective = 258, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_OMPTaskLoopSimdDirective = 259, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_OMPDistributeDirective = 260, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPTargetEnterDataDirective = 261, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPTargetExitDataDirective = 262, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPTargetParallelDirective = 263, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPTargetParallelForDirective = 264, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPTargetUpdateDirective = 265, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPDistributeParallelForDirective = 266, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPDistributeParallelForSimdDirective = 267, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPDistributeSimdDirective = 268, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPTargetParallelForSimdDirective = 269, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTargetSimdDirective = 270, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTeamsDistributeDirective = 271, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTeamsDistributeSimdDirective = 272, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTeamsDistributeParallelForSimdDirective = 273, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTeamsDistributeParallelForDirective = 274, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTargetTeamsDirective = 275, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTargetTeamsDistributeDirective = 276, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTargetTeamsDistributeParallelForDirective = 277, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTargetTeamsDistributeParallelForSimdDirective = 278, + /// Only producer by `libclang` 4.0 and later. + const CXCursor_OMPTargetTeamsDistributeSimdDirective = 279, + /// Only produced by 'libclang' 9.0 and later. + const CXCursor_BuiltinBitCastExpr = 280, + /// Only produced by `libclang` 10.0 and later. + const CXCursor_OMPMasterTaskLoopDirective = 281, + /// Only produced by `libclang` 10.0 and later. + const CXCursor_OMPParallelMasterTaskLoopDirective = 282, + /// Only produced by `libclang` 10.0 and later. + const CXCursor_OMPMasterTaskLoopSimdDirective = 283, + /// Only produced by `libclang` 10.0 and later. + const CXCursor_OMPParallelMasterTaskLoopSimdDirective = 284, + /// Only produced by `libclang` 10.0 and later. + const CXCursor_OMPParallelMasterDirective = 285, + /// Only produced by `libclang` 11.0 and later. + const CXCursor_OMPDepobjDirective = 286, + /// Only produced by `libclang` 11.0 and later. + const CXCursor_OMPScanDirective = 287, + /// Only produced by `libclang` 13.0 and later. + const CXCursor_OMPTileDirective = 288, + /// Only produced by `libclang` 13.0 and later. + const CXCursor_OMPCanonicalLoop = 289, + /// Only produced by `libclang` 13.0 and later. + const CXCursor_OMPInteropDirective = 290, + /// Only produced by `libclang` 13.0 and later. + const CXCursor_OMPDispatchDirective = 291, + /// Only produced by `libclang` 13.0 and later. + const CXCursor_OMPMaskedDirective = 292, + /// Only produced by `libclang` 13.0 and later. + const CXCursor_OMPUnrollDirective = 293, + /// Only produced by `libclang` 14.0 and later. + const CXCursor_OMPMetaDirective = 294, + /// Only produced by `libclang` 14.0 and later. + const CXCursor_OMPGenericLoopDirective = 295, + /// Only produced by `libclang` 15.0 and later. + const CXCursor_OMPTeamsGenericLoopDirective = 296, + /// Only produced by `libclang` 15.0 and later. + const CXCursor_OMPTargetTeamsGenericLoopDirective = 297, + /// Only produced by `libclang` 15.0 and later. + const CXCursor_OMPParallelGenericLoopDirective = 298, + /// Only produced by `libclang` 15.0 and later. + const CXCursor_OMPTargetParallelGenericLoopDirective = 299, + /// Only produced by `libclang` 15.0 and later. + const CXCursor_OMPParallelMaskedDirective = 300, + /// Only produced by `libclang` 15.0 and later. + const CXCursor_OMPMaskedTaskLoopDirective = 301, + /// Only produced by `libclang` 15.0 and later. + const CXCursor_OMPMaskedTaskLoopSimdDirective = 302, + /// Only produced by `libclang` 15.0 and later. + const CXCursor_OMPParallelMaskedTaskLoopDirective = 303, + /// Only produced by `libclang` 15.0 and later. + const CXCursor_OMPParallelMaskedTaskLoopSimdDirective = 304, + /// Only produced by `libclang` 16.0 and later. + const CXCursor_OMPErrorDirective = 305, + /// Only produced by `libclang` 18.0 and later. + const CXCursor_OMPScopeDirective = 306, + #[cfg(not(feature="clang_15_0"))] + const CXCursor_TranslationUnit = 300, + #[cfg(feature="clang_15_0")] + const CXCursor_TranslationUnit = 350, + const CXCursor_UnexposedAttr = 400, + const CXCursor_IBActionAttr = 401, + const CXCursor_IBOutletAttr = 402, + const CXCursor_IBOutletCollectionAttr = 403, + const CXCursor_CXXFinalAttr = 404, + const CXCursor_CXXOverrideAttr = 405, + const CXCursor_AnnotateAttr = 406, + const CXCursor_AsmLabelAttr = 407, + const CXCursor_PackedAttr = 408, + const CXCursor_PureAttr = 409, + const CXCursor_ConstAttr = 410, + const CXCursor_NoDuplicateAttr = 411, + const CXCursor_CUDAConstantAttr = 412, + const CXCursor_CUDADeviceAttr = 413, + const CXCursor_CUDAGlobalAttr = 414, + const CXCursor_CUDAHostAttr = 415, + /// Only produced by `libclang` 3.6 and later. + const CXCursor_CUDASharedAttr = 416, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_VisibilityAttr = 417, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_DLLExport = 418, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_DLLImport = 419, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_NSReturnsRetained = 420, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_NSReturnsNotRetained = 421, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_NSReturnsAutoreleased = 422, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_NSConsumesSelf = 423, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_NSConsumed = 424, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCException = 425, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCNSObject = 426, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCIndependentClass = 427, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCPreciseLifetime = 428, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCReturnsInnerPointer = 429, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCRequiresSuper = 430, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCRootClass = 431, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCSubclassingRestricted = 432, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCExplicitProtocolImpl = 433, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCDesignatedInitializer = 434, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCRuntimeVisible = 435, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCBoxable = 436, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_FlagEnum = 437, + /// Only produced by `libclang` 9.0 and later. + const CXCursor_ConvergentAttr = 438, + /// Only produced by `libclang` 9.0 and later. + const CXCursor_WarnUnusedAttr = 439, + /// Only produced by `libclang` 9.0 and later. + const CXCursor_WarnUnusedResultAttr = 440, + /// Only produced by `libclang` 9.0 and later. + const CXCursor_AlignedAttr = 441, + const CXCursor_PreprocessingDirective = 500, + const CXCursor_MacroDefinition = 501, + /// Duplicate of `CXCursor_MacroInstantiation`. + const CXCursor_MacroExpansion = 502, + const CXCursor_InclusionDirective = 503, + const CXCursor_ModuleImportDecl = 600, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_TypeAliasTemplateDecl = 601, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_StaticAssert = 602, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_FriendDecl = 603, + /// Only produced by `libclang` 15.0 and later. + const CXCursor_ConceptDecl = 604, + /// Only produced by `libclang` 3.7 and later. + const CXCursor_OverloadCandidate = 700, + } +} + +cenum! { + /// Only available on `libclang` 5.0 and later. + #[cfg(feature = "clang_5_0")] + enum CXCursor_ExceptionSpecificationKind { + const CXCursor_ExceptionSpecificationKind_None = 0, + const CXCursor_ExceptionSpecificationKind_DynamicNone = 1, + const CXCursor_ExceptionSpecificationKind_Dynamic = 2, + const CXCursor_ExceptionSpecificationKind_MSAny = 3, + const CXCursor_ExceptionSpecificationKind_BasicNoexcept = 4, + const CXCursor_ExceptionSpecificationKind_ComputedNoexcept = 5, + const CXCursor_ExceptionSpecificationKind_Unevaluated = 6, + const CXCursor_ExceptionSpecificationKind_Uninstantiated = 7, + const CXCursor_ExceptionSpecificationKind_Unparsed = 8, + /// Only available on `libclang` 9.0 and later. + #[cfg(feature = "clang_9_0")] + const CXCursor_ExceptionSpecificationKind_NoThrow = 9, + } +} + +cenum! { + enum CXDiagnosticSeverity { + const CXDiagnostic_Ignored = 0, + const CXDiagnostic_Note = 1, + const CXDiagnostic_Warning = 2, + const CXDiagnostic_Error = 3, + const CXDiagnostic_Fatal = 4, + } +} + +cenum! { + enum CXErrorCode { + const CXError_Success = 0, + const CXError_Failure = 1, + const CXError_Crashed = 2, + const CXError_InvalidArguments = 3, + const CXError_ASTReadError = 4, + } +} + +cenum! { + enum CXEvalResultKind { + const CXEval_UnExposed = 0, + const CXEval_Int = 1 , + const CXEval_Float = 2, + const CXEval_ObjCStrLiteral = 3, + const CXEval_StrLiteral = 4, + const CXEval_CFStr = 5, + const CXEval_Other = 6, + } +} + +cenum! { + enum CXIdxAttrKind { + const CXIdxAttr_Unexposed = 0, + const CXIdxAttr_IBAction = 1, + const CXIdxAttr_IBOutlet = 2, + const CXIdxAttr_IBOutletCollection = 3, + } +} + +cenum! { + enum CXIdxEntityCXXTemplateKind { + const CXIdxEntity_NonTemplate = 0, + const CXIdxEntity_Template = 1, + const CXIdxEntity_TemplatePartialSpecialization = 2, + const CXIdxEntity_TemplateSpecialization = 3, + } +} + +cenum! { + enum CXIdxEntityKind { + const CXIdxEntity_Unexposed = 0, + const CXIdxEntity_Typedef = 1, + const CXIdxEntity_Function = 2, + const CXIdxEntity_Variable = 3, + const CXIdxEntity_Field = 4, + const CXIdxEntity_EnumConstant = 5, + const CXIdxEntity_ObjCClass = 6, + const CXIdxEntity_ObjCProtocol = 7, + const CXIdxEntity_ObjCCategory = 8, + const CXIdxEntity_ObjCInstanceMethod = 9, + const CXIdxEntity_ObjCClassMethod = 10, + const CXIdxEntity_ObjCProperty = 11, + const CXIdxEntity_ObjCIvar = 12, + const CXIdxEntity_Enum = 13, + const CXIdxEntity_Struct = 14, + const CXIdxEntity_Union = 15, + const CXIdxEntity_CXXClass = 16, + const CXIdxEntity_CXXNamespace = 17, + const CXIdxEntity_CXXNamespaceAlias = 18, + const CXIdxEntity_CXXStaticVariable = 19, + const CXIdxEntity_CXXStaticMethod = 20, + const CXIdxEntity_CXXInstanceMethod = 21, + const CXIdxEntity_CXXConstructor = 22, + const CXIdxEntity_CXXDestructor = 23, + const CXIdxEntity_CXXConversionFunction = 24, + const CXIdxEntity_CXXTypeAlias = 25, + const CXIdxEntity_CXXInterface = 26, + /// Only produced by `libclang` 15.0 and later. + const CXIdxEntity_CXXConcept = 27, + } +} + +cenum! { + enum CXIdxEntityLanguage { + const CXIdxEntityLang_None = 0, + const CXIdxEntityLang_C = 1, + const CXIdxEntityLang_ObjC = 2, + const CXIdxEntityLang_CXX = 3, + /// Only produced by `libclang` 5.0 and later. + const CXIdxEntityLang_Swift = 4, + } +} + +cenum! { + enum CXIdxEntityRefKind { + const CXIdxEntityRef_Direct = 1, + const CXIdxEntityRef_Implicit = 2, + } +} + +cenum! { + enum CXIdxObjCContainerKind { + const CXIdxObjCContainer_ForwardRef = 0, + const CXIdxObjCContainer_Interface = 1, + const CXIdxObjCContainer_Implementation = 2, + } +} + +cenum! { + enum CXLanguageKind { + const CXLanguage_Invalid = 0, + const CXLanguage_C = 1, + const CXLanguage_ObjC = 2, + const CXLanguage_CPlusPlus = 3, + } +} + +cenum! { + enum CXLinkageKind { + const CXLinkage_Invalid = 0, + const CXLinkage_NoLinkage = 1, + const CXLinkage_Internal = 2, + const CXLinkage_UniqueExternal = 3, + const CXLinkage_External = 4, + } +} + +cenum! { + enum CXLoadDiag_Error { + const CXLoadDiag_None = 0, + const CXLoadDiag_Unknown = 1, + const CXLoadDiag_CannotLoad = 2, + const CXLoadDiag_InvalidFile = 3, + } +} + +cenum! { + /// Only available on `libclang` 7.0 and later. + #[cfg(feature = "clang_7_0")] + enum CXPrintingPolicyProperty { + const CXPrintingPolicy_Indentation = 0, + const CXPrintingPolicy_SuppressSpecifiers = 1, + const CXPrintingPolicy_SuppressTagKeyword = 2, + const CXPrintingPolicy_IncludeTagDefinition = 3, + const CXPrintingPolicy_SuppressScope = 4, + const CXPrintingPolicy_SuppressUnwrittenScope = 5, + const CXPrintingPolicy_SuppressInitializers = 6, + const CXPrintingPolicy_ConstantArraySizeAsWritten = 7, + const CXPrintingPolicy_AnonymousTagLocations = 8, + const CXPrintingPolicy_SuppressStrongLifetime = 9, + const CXPrintingPolicy_SuppressLifetimeQualifiers = 10, + const CXPrintingPolicy_SuppressTemplateArgsInCXXConstructors = 11, + const CXPrintingPolicy_Bool = 12, + const CXPrintingPolicy_Restrict = 13, + const CXPrintingPolicy_Alignof = 14, + const CXPrintingPolicy_UnderscoreAlignof = 15, + const CXPrintingPolicy_UseVoidForZeroParams = 16, + const CXPrintingPolicy_TerseOutput = 17, + const CXPrintingPolicy_PolishForDeclaration = 18, + const CXPrintingPolicy_Half = 19, + const CXPrintingPolicy_MSWChar = 20, + const CXPrintingPolicy_IncludeNewlines = 21, + const CXPrintingPolicy_MSVCFormatting = 22, + const CXPrintingPolicy_ConstantsAsWritten = 23, + const CXPrintingPolicy_SuppressImplicitBase = 24, + const CXPrintingPolicy_FullyQualifiedName = 25, + } +} + +cenum! { + enum CXRefQualifierKind { + const CXRefQualifier_None = 0, + const CXRefQualifier_LValue = 1, + const CXRefQualifier_RValue = 2, + } +} + +cenum! { + enum CXResult { + const CXResult_Success = 0, + const CXResult_Invalid = 1, + const CXResult_VisitBreak = 2, + } +} + +cenum! { + enum CXSaveError { + const CXSaveError_None = 0, + const CXSaveError_Unknown = 1, + const CXSaveError_TranslationErrors = 2, + const CXSaveError_InvalidTU = 3, + } +} + +cenum! { + /// Only available on `libclang` 6.0 and later. + #[cfg(feature = "clang_6_0")] + enum CXTLSKind { + const CXTLS_None = 0, + const CXTLS_Dynamic = 1, + const CXTLS_Static = 2, + } +} + +cenum! { + enum CXTUResourceUsageKind { + const CXTUResourceUsage_AST = 1, + const CXTUResourceUsage_Identifiers = 2, + const CXTUResourceUsage_Selectors = 3, + const CXTUResourceUsage_GlobalCompletionResults = 4, + const CXTUResourceUsage_SourceManagerContentCache = 5, + const CXTUResourceUsage_AST_SideTables = 6, + const CXTUResourceUsage_SourceManager_Membuffer_Malloc = 7, + const CXTUResourceUsage_SourceManager_Membuffer_MMap = 8, + const CXTUResourceUsage_ExternalASTSource_Membuffer_Malloc = 9, + const CXTUResourceUsage_ExternalASTSource_Membuffer_MMap = 10, + const CXTUResourceUsage_Preprocessor = 11, + const CXTUResourceUsage_PreprocessingRecord = 12, + const CXTUResourceUsage_SourceManager_DataStructures = 13, + const CXTUResourceUsage_Preprocessor_HeaderSearch = 14, + } +} + +cenum! { + /// Only available on `libclang` 3.6 and later. + #[cfg(feature = "clang_3_6")] + enum CXTemplateArgumentKind { + const CXTemplateArgumentKind_Null = 0, + const CXTemplateArgumentKind_Type = 1, + const CXTemplateArgumentKind_Declaration = 2, + const CXTemplateArgumentKind_NullPtr = 3, + const CXTemplateArgumentKind_Integral = 4, + const CXTemplateArgumentKind_Template = 5, + const CXTemplateArgumentKind_TemplateExpansion = 6, + const CXTemplateArgumentKind_Expression = 7, + const CXTemplateArgumentKind_Pack = 8, + const CXTemplateArgumentKind_Invalid = 9, + } +} + +cenum! { + enum CXTokenKind { + const CXToken_Punctuation = 0, + const CXToken_Keyword = 1, + const CXToken_Identifier = 2, + const CXToken_Literal = 3, + const CXToken_Comment = 4, + } +} + +cenum! { + enum CXTypeKind { + const CXType_Invalid = 0, + const CXType_Unexposed = 1, + const CXType_Void = 2, + const CXType_Bool = 3, + const CXType_Char_U = 4, + const CXType_UChar = 5, + const CXType_Char16 = 6, + const CXType_Char32 = 7, + const CXType_UShort = 8, + const CXType_UInt = 9, + const CXType_ULong = 10, + const CXType_ULongLong = 11, + const CXType_UInt128 = 12, + const CXType_Char_S = 13, + const CXType_SChar = 14, + const CXType_WChar = 15, + const CXType_Short = 16, + const CXType_Int = 17, + const CXType_Long = 18, + const CXType_LongLong = 19, + const CXType_Int128 = 20, + const CXType_Float = 21, + const CXType_Double = 22, + const CXType_LongDouble = 23, + const CXType_NullPtr = 24, + const CXType_Overload = 25, + const CXType_Dependent = 26, + const CXType_ObjCId = 27, + const CXType_ObjCClass = 28, + const CXType_ObjCSel = 29, + /// Only produced by `libclang` 3.9 and later. + const CXType_Float128 = 30, + /// Only produced by `libclang` 5.0 and later. + const CXType_Half = 31, + /// Only produced by `libclang` 6.0 and later. + const CXType_Float16 = 32, + /// Only produced by `libclang` 7.0 and later. + const CXType_ShortAccum = 33, + /// Only produced by `libclang` 7.0 and later. + const CXType_Accum = 34, + /// Only produced by `libclang` 7.0 and later. + const CXType_LongAccum = 35, + /// Only produced by `libclang` 7.0 and later. + const CXType_UShortAccum = 36, + /// Only produced by `libclang` 7.0 and later. + const CXType_UAccum = 37, + /// Only produced by `libclang` 7.0 and later. + const CXType_ULongAccum = 38, + /// Only produced by `libclang` 11.0 and later. + const CXType_BFloat16 = 39, + /// Only produced by `libclang` 14.0 and later. + const CXType_Ibm128 = 40, + const CXType_Complex = 100, + const CXType_Pointer = 101, + const CXType_BlockPointer = 102, + const CXType_LValueReference = 103, + const CXType_RValueReference = 104, + const CXType_Record = 105, + const CXType_Enum = 106, + const CXType_Typedef = 107, + const CXType_ObjCInterface = 108, + const CXType_ObjCObjectPointer = 109, + const CXType_FunctionNoProto = 110, + const CXType_FunctionProto = 111, + const CXType_ConstantArray = 112, + const CXType_Vector = 113, + const CXType_IncompleteArray = 114, + const CXType_VariableArray = 115, + const CXType_DependentSizedArray = 116, + const CXType_MemberPointer = 117, + /// Only produced by `libclang` 3.8 and later. + const CXType_Auto = 118, + /// Only produced by `libclang` 3.9 and later. + const CXType_Elaborated = 119, + /// Only produced by `libclang` 5.0 and later. + const CXType_Pipe = 120, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dRO = 121, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dArrayRO = 122, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dBufferRO = 123, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dRO = 124, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayRO = 125, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dDepthRO = 126, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayDepthRO = 127, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dMSAARO = 128, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayMSAARO = 129, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dMSAADepthRO = 130, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayMSAADepthRO = 131, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage3dRO = 132, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dWO = 133, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dArrayWO = 134, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dBufferWO = 135, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dWO = 136, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayWO = 137, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dDepthWO = 138, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayDepthWO = 139, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dMSAAWO = 140, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayMSAAWO = 141, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dMSAADepthWO = 142, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayMSAADepthWO = 143, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage3dWO = 144, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dRW = 145, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dArrayRW = 146, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dBufferRW = 147, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dRW = 148, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayRW = 149, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dDepthRW = 150, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayDepthRW = 151, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dMSAARW = 152, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayMSAARW = 153, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dMSAADepthRW = 154, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayMSAADepthRW = 155, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage3dRW = 156, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLSampler = 157, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLEvent = 158, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLQueue = 159, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLReserveID = 160, + /// Only produced by `libclang` 8.0 and later. + const CXType_ObjCObject = 161, + /// Only produced by `libclang` 8.0 and later. + const CXType_ObjCTypeParam = 162, + /// Only produced by `libclang` 8.0 and later. + const CXType_Attributed = 163, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCMcePayload = 164, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCImePayload = 165, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCRefPayload = 166, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCSicPayload = 167, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCMceResult = 168, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCImeResult = 169, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCRefResult = 170, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCSicResult = 171, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCImeResultSingleRefStreamout = 172, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCImeResultDualRefStreamout = 173, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCImeSingleRefStreamin = 174, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCImeDualRefStreamin = 175, + /// Only produced by `libclang` 9.0 and later. + const CXType_ExtVector = 176, + /// Only produced by `libclang` 11.0 and later. + const CXType_Atomic = 177, + /// Only produced by `libclang` 15.0 and later. + const CXType_BTFTagAttributed = 178, + } +} + +cenum! { + enum CXTypeLayoutError { + const CXTypeLayoutError_Invalid = -1, + const CXTypeLayoutError_Incomplete = -2, + const CXTypeLayoutError_Dependent = -3, + const CXTypeLayoutError_NotConstantSize = -4, + const CXTypeLayoutError_InvalidFieldName = -5, + /// Only produced by `libclang` 9.0 and later. + const CXTypeLayoutError_Undeduced = -6, + } +} + +cenum! { + /// Only available on `libclang` 3.8 and later. + #[cfg(feature = "clang_3_8")] + enum CXVisibilityKind { + const CXVisibility_Invalid = 0, + const CXVisibility_Hidden = 1, + const CXVisibility_Protected = 2, + const CXVisibility_Default = 3, + } +} + +cenum! { + /// Only available on `libclang` 8.0 and later. + #[cfg(feature = "clang_8_0")] + enum CXTypeNullabilityKind { + const CXTypeNullability_NonNull = 0, + const CXTypeNullability_Nullable = 1, + const CXTypeNullability_Unspecified = 2, + const CXTypeNullability_Invalid = 3, + /// Only produced by `libclang` 12.0 and later. + const CXTypeNullability_NullableResult = 4, + } +} + +cenum! { + /// Only available on `libclang` 17.0 and later. + #[cfg(feature = "clang_17_0")] + enum CXUnaryOperatorKind { + const CXUnaryOperator_Invalid = 0, + const CXUnaryOperator_PostInc = 1, + const CXUnaryOperator_PostDec = 2, + const CXUnaryOperator_PreInc = 3, + const CXUnaryOperator_PreDec = 4, + const CXUnaryOperator_AddrOf = 5, + const CXUnaryOperator_Deref = 6, + const CXUnaryOperator_Plus = 7, + const CXUnaryOperator_Minus = 8, + const CXUnaryOperator_Not = 9, + const CXUnaryOperator_LNot = 10, + const CXUnaryOperator_Real = 11, + const CXUnaryOperator_Imag = 12, + const CXUnaryOperator_Extension = 13, + const CXUnaryOperator_Coawait = 14, + } +} + +cenum! { + enum CXVisitorResult { + const CXVisit_Break = 0, + const CXVisit_Continue = 1, + } +} + +cenum! { + enum CX_CXXAccessSpecifier { + const CX_CXXInvalidAccessSpecifier = 0, + const CX_CXXPublic = 1, + const CX_CXXProtected = 2, + const CX_CXXPrivate = 3, + } +} + +cenum! { + /// Only available on `libclang` 3.6 and later. + #[cfg(feature = "clang_3_6")] + enum CX_StorageClass { + const CX_SC_Invalid = 0, + const CX_SC_None = 1, + const CX_SC_Extern = 2, + const CX_SC_Static = 3, + const CX_SC_PrivateExtern = 4, + const CX_SC_OpenCLWorkGroupLocal = 5, + const CX_SC_Auto = 6, + const CX_SC_Register = 7, + } +} + +//================================================ +// Flags +//================================================ + +cenum! { + enum CXCodeComplete_Flags { + const CXCodeComplete_IncludeMacros = 1; + const CXCodeComplete_IncludeCodePatterns = 2; + const CXCodeComplete_IncludeBriefComments = 4; + const CXCodeComplete_SkipPreamble = 8; + const CXCodeComplete_IncludeCompletionsWithFixIts = 16; + } +} + +cenum! { + enum CXCompletionContext { + const CXCompletionContext_Unexposed = 0; + const CXCompletionContext_AnyType = 1; + const CXCompletionContext_AnyValue = 2; + const CXCompletionContext_ObjCObjectValue = 4; + const CXCompletionContext_ObjCSelectorValue = 8; + const CXCompletionContext_CXXClassTypeValue = 16; + const CXCompletionContext_DotMemberAccess = 32; + const CXCompletionContext_ArrowMemberAccess = 64; + const CXCompletionContext_ObjCPropertyAccess = 128; + const CXCompletionContext_EnumTag = 256; + const CXCompletionContext_UnionTag = 512; + const CXCompletionContext_StructTag = 1024; + const CXCompletionContext_ClassTag = 2048; + const CXCompletionContext_Namespace = 4096; + const CXCompletionContext_NestedNameSpecifier = 8192; + const CXCompletionContext_ObjCInterface = 16384; + const CXCompletionContext_ObjCProtocol = 32768; + const CXCompletionContext_ObjCCategory = 65536; + const CXCompletionContext_ObjCInstanceMessage = 131072; + const CXCompletionContext_ObjCClassMessage = 262144; + const CXCompletionContext_ObjCSelectorName = 524288; + const CXCompletionContext_MacroName = 1048576; + const CXCompletionContext_NaturalLanguage = 2097152; + const CXCompletionContext_IncludedFile = 4194304; + const CXCompletionContext_Unknown = 8388607; + } +} + +cenum! { + enum CXDiagnosticDisplayOptions { + const CXDiagnostic_DisplaySourceLocation = 1; + const CXDiagnostic_DisplayColumn = 2; + const CXDiagnostic_DisplaySourceRanges = 4; + const CXDiagnostic_DisplayOption = 8; + const CXDiagnostic_DisplayCategoryId = 16; + const CXDiagnostic_DisplayCategoryName = 32; + } +} + +cenum! { + enum CXGlobalOptFlags { + const CXGlobalOpt_None = 0; + const CXGlobalOpt_ThreadBackgroundPriorityForIndexing = 1; + const CXGlobalOpt_ThreadBackgroundPriorityForEditing = 2; + const CXGlobalOpt_ThreadBackgroundPriorityForAll = 3; + } +} + +cenum! { + enum CXIdxDeclInfoFlags { + const CXIdxDeclFlag_Skipped = 1; + } +} + +cenum! { + enum CXIndexOptFlags { + const CXIndexOptNone = 0; + const CXIndexOptSuppressRedundantRefs = 1; + const CXIndexOptIndexFunctionLocalSymbols = 2; + const CXIndexOptIndexImplicitTemplateInstantiations = 4; + const CXIndexOptSuppressWarnings = 8; + const CXIndexOptSkipParsedBodiesInSession = 16; + } +} + +/// Only available on `libclang` 17.0 and later. +#[cfg(feature = "clang_17_0")] +#[cfg(not(target_os = "windows"))] +pub type CXIndexOptions_Flags = c_ushort; + +/// Only available on `libclang` 17.0 and later. +#[cfg(feature = "clang_17_0")] +#[cfg(target_os = "windows")] +pub type CXIndexOptions_Flags = c_uint; + +/// Only available on `libclang` 17.0 and later. +#[cfg(feature = "clang_17_0")] +pub const CXIndexOptions_ExcludeDeclarationsFromPCH: CXIndexOptions_Flags = 1; + +/// Only available on `libclang` 17.0 and later. +#[cfg(feature = "clang_17_0")] +pub const CXIndexOptions_DisplayDiagnostics: CXIndexOptions_Flags = 2; + +/// Only available on `libclang` 17.0 and later. +#[cfg(feature = "clang_17_0")] +pub const CXIndexOptions_StorePreamblesInMemory: CXIndexOptions_Flags = 4; + +cenum! { + enum CXNameRefFlags { + const CXNameRange_WantQualifier = 1; + const CXNameRange_WantTemplateArgs = 2; + const CXNameRange_WantSinglePiece = 4; + } +} + +cenum! { + enum CXObjCDeclQualifierKind { + const CXObjCDeclQualifier_None = 0; + const CXObjCDeclQualifier_In = 1; + const CXObjCDeclQualifier_Inout = 2; + const CXObjCDeclQualifier_Out = 4; + const CXObjCDeclQualifier_Bycopy = 8; + const CXObjCDeclQualifier_Byref = 16; + const CXObjCDeclQualifier_Oneway = 32; + } +} + +cenum! { + enum CXObjCPropertyAttrKind { + const CXObjCPropertyAttr_noattr = 0; + const CXObjCPropertyAttr_readonly = 1; + const CXObjCPropertyAttr_getter = 2; + const CXObjCPropertyAttr_assign = 4; + const CXObjCPropertyAttr_readwrite = 8; + const CXObjCPropertyAttr_retain = 16; + const CXObjCPropertyAttr_copy = 32; + const CXObjCPropertyAttr_nonatomic = 64; + const CXObjCPropertyAttr_setter = 128; + const CXObjCPropertyAttr_atomic = 256; + const CXObjCPropertyAttr_weak = 512; + const CXObjCPropertyAttr_strong = 1024; + const CXObjCPropertyAttr_unsafe_unretained = 2048; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + const CXObjCPropertyAttr_class = 4096; + } +} + +cenum! { + enum CXReparse_Flags { + const CXReparse_None = 0; + } +} + +cenum! { + enum CXSaveTranslationUnit_Flags { + const CXSaveTranslationUnit_None = 0; + } +} + +cenum! { + /// Only available on `libclang` 7.0 and later. + #[cfg(feature = "clang_7_0")] + enum CXSymbolRole { + const CXSymbolRole_None = 0; + const CXSymbolRole_Declaration = 1; + const CXSymbolRole_Definition = 2; + const CXSymbolRole_Reference = 4; + const CXSymbolRole_Read = 8; + const CXSymbolRole_Write = 16; + const CXSymbolRole_Call = 32; + const CXSymbolRole_Dynamic = 64; + const CXSymbolRole_AddressOf = 128; + const CXSymbolRole_Implicit = 256; + } +} + +cenum! { + enum CXTranslationUnit_Flags { + const CXTranslationUnit_None = 0; + const CXTranslationUnit_DetailedPreprocessingRecord = 1; + const CXTranslationUnit_Incomplete = 2; + const CXTranslationUnit_PrecompiledPreamble = 4; + const CXTranslationUnit_CacheCompletionResults = 8; + const CXTranslationUnit_ForSerialization = 16; + const CXTranslationUnit_CXXChainedPCH = 32; + const CXTranslationUnit_SkipFunctionBodies = 64; + const CXTranslationUnit_IncludeBriefCommentsInCodeCompletion = 128; + /// Only available on `libclang` 3.8 and later. + #[cfg(feature = "clang_3_8")] + const CXTranslationUnit_CreatePreambleOnFirstParse = 256; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + const CXTranslationUnit_KeepGoing = 512; + /// Only available on `libclang` 5.0 and later. + #[cfg(feature = "clang_5_0")] + const CXTranslationUnit_SingleFileParse = 1024; + /// Only available on `libclang` 7.0 and later. + #[cfg(feature = "clang_7_0")] + const CXTranslationUnit_LimitSkipFunctionBodiesToPreamble = 2048; + /// Only available on `libclang` 8.0 and later. + #[cfg(feature = "clang_8_0")] + const CXTranslationUnit_IncludeAttributedTypes = 4096; + /// Only available on `libclang` 8.0 and later. + #[cfg(feature = "clang_8_0")] + const CXTranslationUnit_VisitImplicitAttributes = 8192; + /// Only available on `libclang` 9.0 and later. + #[cfg(feature = "clang_9_0")] + const CXTranslationUnit_IgnoreNonErrorsFromIncludedFiles = 16384; + /// Only available on `libclang` 10.0 and later. + #[cfg(feature = "clang_10_0")] + const CXTranslationUnit_RetainExcludedConditionalBlocks = 32768; + } +} + +//================================================ +// Structs +//================================================ + +// Opaque ________________________________________ + +macro_rules! opaque { + ($name:ident) => { + pub type $name = *mut c_void; + }; +} + +opaque!(CXCompilationDatabase); +opaque!(CXCompileCommand); +opaque!(CXCompileCommands); +opaque!(CXCompletionString); +opaque!(CXCursorSet); +opaque!(CXDiagnostic); +opaque!(CXDiagnosticSet); +#[cfg(feature = "clang_3_9")] +opaque!(CXEvalResult); +opaque!(CXFile); +opaque!(CXIdxClientASTFile); +opaque!(CXIdxClientContainer); +opaque!(CXIdxClientEntity); +opaque!(CXIdxClientFile); +opaque!(CXIndex); +opaque!(CXIndexAction); +opaque!(CXModule); +#[cfg(feature = "clang_7_0")] +opaque!(CXPrintingPolicy); +opaque!(CXRemapping); +#[cfg(feature = "clang_5_0")] +opaque!(CXTargetInfo); +opaque!(CXTranslationUnit); + +// Transparent ___________________________________ + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXCodeCompleteResults { + pub Results: *mut CXCompletionResult, + pub NumResults: c_uint, +} + +default!(CXCodeCompleteResults); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXComment { + pub ASTNode: *const c_void, + pub TranslationUnit: CXTranslationUnit, +} + +default!(CXComment); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXCompletionResult { + pub CursorKind: CXCursorKind, + pub CompletionString: CXCompletionString, +} + +default!(CXCompletionResult); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXCursor { + pub kind: CXCursorKind, + pub xdata: c_int, + pub data: [*const c_void; 3], +} + +default!(CXCursor); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXCursorAndRangeVisitor { + pub context: *mut c_void, + pub visit: Option CXVisitorResult>, +} + +default!(CXCursorAndRangeVisitor); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXFileUniqueID { + pub data: [c_ulonglong; 3], +} + +default!(CXFileUniqueID); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxAttrInfo { + pub kind: CXIdxAttrKind, + pub cursor: CXCursor, + pub loc: CXIdxLoc, +} + +default!(CXIdxAttrInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxBaseClassInfo { + pub base: *const CXIdxEntityInfo, + pub cursor: CXCursor, + pub loc: CXIdxLoc, +} + +default!(CXIdxBaseClassInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxCXXClassDeclInfo { + pub declInfo: *const CXIdxDeclInfo, + pub bases: *const *const CXIdxBaseClassInfo, + pub numBases: c_uint, +} + +default!(CXIdxCXXClassDeclInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxContainerInfo { + pub cursor: CXCursor, +} + +default!(CXIdxContainerInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxDeclInfo { + pub entityInfo: *const CXIdxEntityInfo, + pub cursor: CXCursor, + pub loc: CXIdxLoc, + pub semanticContainer: *const CXIdxContainerInfo, + pub lexicalContainer: *const CXIdxContainerInfo, + pub isRedeclaration: c_int, + pub isDefinition: c_int, + pub isContainer: c_int, + pub declAsContainer: *const CXIdxContainerInfo, + pub isImplicit: c_int, + pub attributes: *const *const CXIdxAttrInfo, + pub numAttributes: c_uint, + pub flags: c_uint, +} + +default!(CXIdxDeclInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxEntityInfo { + pub kind: CXIdxEntityKind, + pub templateKind: CXIdxEntityCXXTemplateKind, + pub lang: CXIdxEntityLanguage, + pub name: *const c_char, + pub USR: *const c_char, + pub cursor: CXCursor, + pub attributes: *const *const CXIdxAttrInfo, + pub numAttributes: c_uint, +} + +default!(CXIdxEntityInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxEntityRefInfo { + pub kind: CXIdxEntityRefKind, + pub cursor: CXCursor, + pub loc: CXIdxLoc, + pub referencedEntity: *const CXIdxEntityInfo, + pub parentEntity: *const CXIdxEntityInfo, + pub container: *const CXIdxContainerInfo, + /// Only available on `libclang` 7.0 and later. + #[cfg(feature = "clang_7_0")] + pub role: CXSymbolRole, +} + +default!(CXIdxEntityRefInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxIBOutletCollectionAttrInfo { + pub attrInfo: *const CXIdxAttrInfo, + pub objcClass: *const CXIdxEntityInfo, + pub classCursor: CXCursor, + pub classLoc: CXIdxLoc, +} + +default!(CXIdxIBOutletCollectionAttrInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxImportedASTFileInfo { + pub file: CXFile, + pub module: CXModule, + pub loc: CXIdxLoc, + pub isImplicit: c_int, +} + +default!(CXIdxImportedASTFileInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxIncludedFileInfo { + pub hashLoc: CXIdxLoc, + pub filename: *const c_char, + pub file: CXFile, + pub isImport: c_int, + pub isAngled: c_int, + pub isModuleImport: c_int, +} + +default!(CXIdxIncludedFileInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxLoc { + pub ptr_data: [*mut c_void; 2], + pub int_data: c_uint, +} + +default!(CXIdxLoc); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxObjCCategoryDeclInfo { + pub containerInfo: *const CXIdxObjCContainerDeclInfo, + pub objcClass: *const CXIdxEntityInfo, + pub classCursor: CXCursor, + pub classLoc: CXIdxLoc, + pub protocols: *const CXIdxObjCProtocolRefListInfo, +} + +default!(CXIdxObjCCategoryDeclInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxObjCContainerDeclInfo { + pub declInfo: *const CXIdxDeclInfo, + pub kind: CXIdxObjCContainerKind, +} + +default!(CXIdxObjCContainerDeclInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxObjCInterfaceDeclInfo { + pub containerInfo: *const CXIdxObjCContainerDeclInfo, + pub superInfo: *const CXIdxBaseClassInfo, + pub protocols: *const CXIdxObjCProtocolRefListInfo, +} + +default!(CXIdxObjCInterfaceDeclInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxObjCPropertyDeclInfo { + pub declInfo: *const CXIdxDeclInfo, + pub getter: *const CXIdxEntityInfo, + pub setter: *const CXIdxEntityInfo, +} + +default!(CXIdxObjCPropertyDeclInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxObjCProtocolRefInfo { + pub protocol: *const CXIdxEntityInfo, + pub cursor: CXCursor, + pub loc: CXIdxLoc, +} + +default!(CXIdxObjCProtocolRefInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxObjCProtocolRefListInfo { + pub protocols: *const *const CXIdxObjCProtocolRefInfo, + pub numProtocols: c_uint, +} + +default!(CXIdxObjCProtocolRefListInfo); + +#[cfg(feature = "clang_17_0")] +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIndexOptions { + pub Size: c_uint, + pub ThreadBackgroundPriorityForIndexing: CXChoice, + pub ThreadBackgroundPriorityForEditing: CXChoice, + pub flags: CXIndexOptions_Flags, + pub PreambleStoragePath: *const c_char, + pub InvocationEmissionPath: *const c_char, +} + +#[cfg(feature = "clang_17_0")] +default!(CXIndexOptions); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXPlatformAvailability { + pub Platform: CXString, + pub Introduced: CXVersion, + pub Deprecated: CXVersion, + pub Obsoleted: CXVersion, + pub Unavailable: c_int, + pub Message: CXString, +} + +default!(CXPlatformAvailability); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXSourceLocation { + pub ptr_data: [*const c_void; 2], + pub int_data: c_uint, +} + +default!(CXSourceLocation); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXSourceRange { + pub ptr_data: [*const c_void; 2], + pub begin_int_data: c_uint, + pub end_int_data: c_uint, +} + +default!(CXSourceRange); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXSourceRangeList { + pub count: c_uint, + pub ranges: *mut CXSourceRange, +} + +default!(CXSourceRangeList); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXString { + pub data: *const c_void, + pub private_flags: c_uint, +} + +default!(CXString); + +#[cfg(feature = "clang_3_8")] +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXStringSet { + pub Strings: *mut CXString, + pub Count: c_uint, +} + +#[cfg(feature = "clang_3_8")] +default!(CXStringSet); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXTUResourceUsage { + pub data: *mut c_void, + pub numEntries: c_uint, + pub entries: *mut CXTUResourceUsageEntry, +} + +default!(CXTUResourceUsage); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXTUResourceUsageEntry { + pub kind: CXTUResourceUsageKind, + pub amount: c_ulong, +} + +default!(CXTUResourceUsageEntry); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXToken { + pub int_data: [c_uint; 4], + pub ptr_data: *mut c_void, +} + +default!(CXToken); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXType { + pub kind: CXTypeKind, + pub data: [*mut c_void; 2], +} + +default!(CXType); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXUnsavedFile { + pub Filename: *const c_char, + pub Contents: *const c_char, + pub Length: c_ulong, +} + +default!(CXUnsavedFile); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXVersion { + pub Major: c_int, + pub Minor: c_int, + pub Subminor: c_int, +} + +default!(CXVersion); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +#[rustfmt::skip] +pub struct IndexerCallbacks { + pub abortQuery: Option c_int>, + pub diagnostic: Option, + pub enteredMainFile: Option CXIdxClientFile>, + pub ppIncludedFile: Option CXIdxClientFile>, + pub importedASTFile: Option CXIdxClientASTFile>, + pub startedTranslationUnit: Option CXIdxClientContainer>, + pub indexDeclaration: Option, + pub indexEntityReference: Option, +} + +default!(IndexerCallbacks); + +//================================================ +// Functions +//================================================ + +link! { + pub fn clang_CXCursorSet_contains(set: CXCursorSet, cursor: CXCursor) -> c_uint; + pub fn clang_CXCursorSet_insert(set: CXCursorSet, cursor: CXCursor) -> c_uint; + pub fn clang_CXIndex_getGlobalOptions(index: CXIndex) -> CXGlobalOptFlags; + pub fn clang_CXIndex_setGlobalOptions(index: CXIndex, flags: CXGlobalOptFlags); + /// Only available on `libclang` 6.0 and later. + #[cfg(feature = "clang_6_0")] + pub fn clang_CXIndex_setInvocationEmissionPathOption(index: CXIndex, path: *const c_char); + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_CXXConstructor_isConvertingConstructor(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_CXXConstructor_isCopyConstructor(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_CXXConstructor_isDefaultConstructor(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_CXXConstructor_isMoveConstructor(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 3.8 and later. + #[cfg(feature = "clang_3_8")] + pub fn clang_CXXField_isMutable(cursor: CXCursor) -> c_uint; + pub fn clang_CXXMethod_isConst(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 16.0 and later. + #[cfg(feature = "clang_16_0")] + pub fn clang_CXXMethod_isCopyAssignmentOperator(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_CXXMethod_isDefaulted(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 16.0 and later. + #[cfg(feature = "clang_16_0")] + pub fn clang_CXXMethod_isDeleted(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 16.0 and later. + #[cfg(feature = "clang_16_0")] + pub fn clang_CXXMethod_isMoveAssignmentOperator(cursor: CXCursor) -> c_uint; + pub fn clang_CXXMethod_isPureVirtual(cursor: CXCursor) -> c_uint; + pub fn clang_CXXMethod_isStatic(cursor: CXCursor) -> c_uint; + pub fn clang_CXXMethod_isVirtual(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 17.0 and later. + #[cfg(feature = "clang_17_0")] + pub fn clang_CXXMethod_isExplicit(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 6.0 and later. + #[cfg(feature = "clang_6_0")] + pub fn clang_CXXRecord_isAbstract(cursor: CXCursor) -> c_uint; + pub fn clang_CompilationDatabase_dispose(database: CXCompilationDatabase); + pub fn clang_CompilationDatabase_fromDirectory(directory: *const c_char, error: *mut CXCompilationDatabase_Error) -> CXCompilationDatabase; + pub fn clang_CompilationDatabase_getAllCompileCommands(database: CXCompilationDatabase) -> CXCompileCommands; + pub fn clang_CompilationDatabase_getCompileCommands(database: CXCompilationDatabase, filename: *const c_char) -> CXCompileCommands; + pub fn clang_CompileCommand_getArg(command: CXCompileCommand, index: c_uint) -> CXString; + pub fn clang_CompileCommand_getDirectory(command: CXCompileCommand) -> CXString; + /// Only available on `libclang` 3.8 and later. + #[cfg(feature = "clang_3_8")] + pub fn clang_CompileCommand_getFilename(command: CXCompileCommand) -> CXString; + /// Only available on `libclang` 3.8 and later. + #[cfg(feature = "clang_3_8")] + pub fn clang_CompileCommand_getMappedSourceContent(command: CXCompileCommand, index: c_uint) -> CXString; + /// Only available on `libclang` 3.8 and later. + #[cfg(feature = "clang_3_8")] + pub fn clang_CompileCommand_getMappedSourcePath(command: CXCompileCommand, index: c_uint) -> CXString; + pub fn clang_CompileCommand_getNumArgs(command: CXCompileCommand) -> c_uint; + pub fn clang_CompileCommand_getNumMappedSources(command: CXCompileCommand) -> c_uint; + pub fn clang_CompileCommands_dispose(command: CXCompileCommands); + pub fn clang_CompileCommands_getCommand(command: CXCompileCommands, index: c_uint) -> CXCompileCommand; + pub fn clang_CompileCommands_getSize(command: CXCompileCommands) -> c_uint; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_Cursor_Evaluate(cursor: CXCursor) -> CXEvalResult; + pub fn clang_Cursor_getArgument(cursor: CXCursor, index: c_uint) -> CXCursor; + pub fn clang_Cursor_getBriefCommentText(cursor: CXCursor) -> CXString; + /// Only available on `libclang` 3.8 and later. + #[cfg(feature = "clang_3_8")] + pub fn clang_Cursor_getCXXManglings(cursor: CXCursor) -> *mut CXStringSet; + pub fn clang_Cursor_getCommentRange(cursor: CXCursor) -> CXSourceRange; + /// Only available on `libclang` 3.6 and later. + #[cfg(feature = "clang_3_6")] + pub fn clang_Cursor_getMangling(cursor: CXCursor) -> CXString; + pub fn clang_Cursor_getModule(cursor: CXCursor) -> CXModule; + pub fn clang_Cursor_getNumArguments(cursor: CXCursor) -> c_int; + /// Only available on `libclang` 3.6 and later. + #[cfg(feature = "clang_3_6")] + pub fn clang_Cursor_getNumTemplateArguments(cursor: CXCursor) -> c_int; + pub fn clang_Cursor_getObjCDeclQualifiers(cursor: CXCursor) -> CXObjCDeclQualifierKind; + /// Only available on `libclang` 6.0 and later. + #[cfg(feature = "clang_6_0")] + pub fn clang_Cursor_getObjCManglings(cursor: CXCursor) -> *mut CXStringSet; + pub fn clang_Cursor_getObjCPropertyAttributes(cursor: CXCursor, reserved: c_uint) -> CXObjCPropertyAttrKind; + /// Only available on `libclang` 8.0 and later. + #[cfg(feature = "clang_8_0")] + pub fn clang_Cursor_getObjCPropertyGetterName(cursor: CXCursor) -> CXString; + /// Only available on `libclang` 8.0 and later. + #[cfg(feature = "clang_8_0")] + pub fn clang_Cursor_getObjCPropertySetterName(cursor: CXCursor) -> CXString; + pub fn clang_Cursor_getObjCSelectorIndex(cursor: CXCursor) -> c_int; + /// Only available on `libclang` 3.7 and later. + #[cfg(feature = "clang_3_7")] + pub fn clang_Cursor_getOffsetOfField(cursor: CXCursor) -> c_longlong; + pub fn clang_Cursor_getRawCommentText(cursor: CXCursor) -> CXString; + pub fn clang_Cursor_getReceiverType(cursor: CXCursor) -> CXType; + pub fn clang_Cursor_getSpellingNameRange(cursor: CXCursor, index: c_uint, reserved: c_uint) -> CXSourceRange; + /// Only available on `libclang` 3.6 and later. + #[cfg(feature = "clang_3_6")] + pub fn clang_Cursor_getStorageClass(cursor: CXCursor) -> CX_StorageClass; + /// Only available on `libclang` 3.6 and later. + #[cfg(feature = "clang_3_6")] + pub fn clang_Cursor_getTemplateArgumentKind(cursor: CXCursor, index: c_uint) -> CXTemplateArgumentKind; + /// Only available on `libclang` 3.6 and later. + #[cfg(feature = "clang_3_6")] + pub fn clang_Cursor_getTemplateArgumentType(cursor: CXCursor, index: c_uint) -> CXType; + /// Only available on `libclang` 3.6 and later. + #[cfg(feature = "clang_3_6")] + pub fn clang_Cursor_getTemplateArgumentUnsignedValue(cursor: CXCursor, index: c_uint) -> c_ulonglong; + /// Only available on `libclang` 3.6 and later. + #[cfg(feature = "clang_3_6")] + pub fn clang_Cursor_getTemplateArgumentValue(cursor: CXCursor, index: c_uint) -> c_longlong; + pub fn clang_Cursor_getTranslationUnit(cursor: CXCursor) -> CXTranslationUnit; + /// Only available on `libclang` 12.0 and later. + #[cfg(feature = "clang_12_0")] + pub fn clang_Cursor_getVarDeclInitializer(cursor: CXCursor) -> CXCursor; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_Cursor_hasAttrs(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 12.0 and later. + #[cfg(feature = "clang_12_0")] + pub fn clang_Cursor_hasVarDeclGlobalStorage(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 12.0 and later. + #[cfg(feature = "clang_12_0")] + pub fn clang_Cursor_hasVarDeclExternalStorage(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 3.7 and later. + #[cfg(feature = "clang_3_7")] + pub fn clang_Cursor_isAnonymous(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 9.0 and later. + #[cfg(feature = "clang_9_0")] + pub fn clang_Cursor_isAnonymousRecordDecl(cursor: CXCursor) -> c_uint; + pub fn clang_Cursor_isBitField(cursor: CXCursor) -> c_uint; + pub fn clang_Cursor_isDynamicCall(cursor: CXCursor) -> c_int; + /// Only available on `libclang` 5.0 and later. + #[cfg(feature = "clang_5_0")] + pub fn clang_Cursor_isExternalSymbol(cursor: CXCursor, language: *mut CXString, from: *mut CXString, generated: *mut c_uint) -> c_uint; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_Cursor_isFunctionInlined(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 9.0 and later. + #[cfg(feature = "clang_9_0")] + pub fn clang_Cursor_isInlineNamespace(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_Cursor_isMacroBuiltin(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_Cursor_isMacroFunctionLike(cursor: CXCursor) -> c_uint; + pub fn clang_Cursor_isNull(cursor: CXCursor) -> c_int; + pub fn clang_Cursor_isObjCOptional(cursor: CXCursor) -> c_uint; + pub fn clang_Cursor_isVariadic(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 5.0 and later. + #[cfg(feature = "clang_5_0")] + pub fn clang_EnumDecl_isScoped(cursor: CXCursor) -> c_uint; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_EvalResult_dispose(result: CXEvalResult); + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_EvalResult_getAsDouble(result: CXEvalResult) -> libc::c_double; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_EvalResult_getAsInt(result: CXEvalResult) -> c_int; + /// Only available on `libclang` 4.0 and later. + #[cfg(feature = "clang_4_0")] + pub fn clang_EvalResult_getAsLongLong(result: CXEvalResult) -> c_longlong; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_EvalResult_getAsStr(result: CXEvalResult) -> *const c_char; + /// Only available on `libclang` 4.0 and later. + #[cfg(feature = "clang_4_0")] + pub fn clang_EvalResult_getAsUnsigned(result: CXEvalResult) -> c_ulonglong; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_EvalResult_getKind(result: CXEvalResult) -> CXEvalResultKind; + /// Only available on `libclang` 4.0 and later. + #[cfg(feature = "clang_4_0")] + pub fn clang_EvalResult_isUnsignedInt(result: CXEvalResult) -> c_uint; + /// Only available on `libclang` 3.6 and later. + #[cfg(feature = "clang_3_6")] + pub fn clang_File_isEqual(left: CXFile, right: CXFile) -> c_int; + /// Only available on `libclang` 7.0 and later. + #[cfg(feature = "clang_7_0")] + pub fn clang_File_tryGetRealPathName(file: CXFile) -> CXString; + pub fn clang_IndexAction_create(index: CXIndex) -> CXIndexAction; + pub fn clang_IndexAction_dispose(index: CXIndexAction); + pub fn clang_Location_isFromMainFile(location: CXSourceLocation) -> c_int; + pub fn clang_Location_isInSystemHeader(location: CXSourceLocation) -> c_int; + pub fn clang_Module_getASTFile(module: CXModule) -> CXFile; + pub fn clang_Module_getFullName(module: CXModule) -> CXString; + pub fn clang_Module_getName(module: CXModule) -> CXString; + pub fn clang_Module_getNumTopLevelHeaders(tu: CXTranslationUnit, module: CXModule) -> c_uint; + pub fn clang_Module_getParent(module: CXModule) -> CXModule; + pub fn clang_Module_getTopLevelHeader(tu: CXTranslationUnit, module: CXModule, index: c_uint) -> CXFile; + pub fn clang_Module_isSystem(module: CXModule) -> c_int; + /// Only available on `libclang` 7.0 and later. + #[cfg(feature = "clang_7_0")] + pub fn clang_PrintingPolicy_dispose(policy: CXPrintingPolicy); + /// Only available on `libclang` 7.0 and later. + #[cfg(feature = "clang_7_0")] + pub fn clang_PrintingPolicy_getProperty(policy: CXPrintingPolicy, property: CXPrintingPolicyProperty) -> c_uint; + /// Only available on `libclang` 7.0 and later. + #[cfg(feature = "clang_7_0")] + pub fn clang_PrintingPolicy_setProperty(policy: CXPrintingPolicy, property: CXPrintingPolicyProperty, value: c_uint); + pub fn clang_Range_isNull(range: CXSourceRange) -> c_int; + /// Only available on `libclang` 5.0 and later. + #[cfg(feature = "clang_5_0")] + pub fn clang_TargetInfo_dispose(info: CXTargetInfo); + /// Only available on `libclang` 5.0 and later. + #[cfg(feature = "clang_5_0")] + pub fn clang_TargetInfo_getPointerWidth(info: CXTargetInfo) -> c_int; + /// Only available on `libclang` 5.0 and later. + #[cfg(feature = "clang_5_0")] + pub fn clang_TargetInfo_getTriple(info: CXTargetInfo) -> CXString; + pub fn clang_Type_getAlignOf(type_: CXType) -> c_longlong; + pub fn clang_Type_getCXXRefQualifier(type_: CXType) -> CXRefQualifierKind; + pub fn clang_Type_getClassType(type_: CXType) -> CXType; + /// Only available on `libclang` 8.0 and later. + #[cfg(feature = "clang_8_0")] + pub fn clang_Type_getModifiedType(type_: CXType) -> CXType; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_Type_getNamedType(type_: CXType) -> CXType; + /// Only available on `libclang` 8.0 and later. + #[cfg(feature = "clang_8_0")] + pub fn clang_Type_getNullability(type_: CXType) -> CXTypeNullabilityKind; + /// Only available on `libclang` 8.0 and later. + #[cfg(feature = "clang_8_0")] + pub fn clang_Type_getNumObjCProtocolRefs(type_: CXType) -> c_uint; + /// Only available on `libclang` 8.0 and later. + #[cfg(feature = "clang_8_0")] + pub fn clang_Type_getNumObjCTypeArgs(type_: CXType) -> c_uint; + pub fn clang_Type_getNumTemplateArguments(type_: CXType) -> c_int; + /// Only available on `libclang` 3.9 and later. + #[cfg(feature = "clang_3_9")] + pub fn clang_Type_getObjCEncoding(type_: CXType) -> CXString; + /// Only available on `libclang` 8.0 and later. + #[cfg(feature = "clang_8_0")] + pub fn clang_Type_getObjCObjectBaseType(type_: CXType) -> CXType; + /// Only available on `libclang` 8.0 and later. + #[cfg(feature = "clang_8_0")] + pub fn clang_Type_getObjCProtocolDecl(type_: CXType, index: c_uint) -> CXCursor; + /// Only available on `libclang` 8.0 and later. + #[cfg(feature = "clang_8_0")] + pub fn clang_Type_getObjCTypeArg(type_: CXType, index: c_uint) -> CXType; + pub fn clang_Type_getOffsetOf(type_: CXType, field: *const c_char) -> c_longlong; + pub fn clang_Type_getSizeOf(type_: CXType) -> c_longlong; + pub fn clang_Type_getTemplateArgumentAsType(type_: CXType, index: c_uint) -> CXType; + /// Only available on `libclang` 11.0 and later. + #[cfg(feature = "clang_11_0")] + pub fn clang_Type_getValueType(type_: CXType) -> CXType; + /// Only available on `libclang` 5.0 and later. + #[cfg(feature = "clang_5_0")] + pub fn clang_Type_isTransparentTagTypedef(type_: CXType) -> c_uint; + /// Only available on `libclang` 3.7 and later. + #[cfg(feature = "clang_3_7")] + pub fn clang_Type_visitFields(type_: CXType, visitor: CXFieldVisitor, data: CXClientData) -> CXVisitorResult; + pub fn clang_annotateTokens(tu: CXTranslationUnit, tokens: *mut CXToken, n_tokens: c_uint, cursors: *mut CXCursor); + pub fn clang_codeCompleteAt(tu: CXTranslationUnit, file: *const c_char, line: c_uint, column: c_uint, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXCodeComplete_Flags) -> *mut CXCodeCompleteResults; + pub fn clang_codeCompleteGetContainerKind(results: *mut CXCodeCompleteResults, incomplete: *mut c_uint) -> CXCursorKind; + pub fn clang_codeCompleteGetContainerUSR(results: *mut CXCodeCompleteResults) -> CXString; + pub fn clang_codeCompleteGetContexts(results: *mut CXCodeCompleteResults) -> c_ulonglong; + pub fn clang_codeCompleteGetDiagnostic(results: *mut CXCodeCompleteResults, index: c_uint) -> CXDiagnostic; + pub fn clang_codeCompleteGetNumDiagnostics(results: *mut CXCodeCompleteResults) -> c_uint; + pub fn clang_codeCompleteGetObjCSelector(results: *mut CXCodeCompleteResults) -> CXString; + pub fn clang_constructUSR_ObjCCategory(class: *const c_char, category: *const c_char) -> CXString; + pub fn clang_constructUSR_ObjCClass(class: *const c_char) -> CXString; + pub fn clang_constructUSR_ObjCIvar(name: *const c_char, usr: CXString) -> CXString; + pub fn clang_constructUSR_ObjCMethod(name: *const c_char, instance: c_uint, usr: CXString) -> CXString; + pub fn clang_constructUSR_ObjCProperty(property: *const c_char, usr: CXString) -> CXString; + pub fn clang_constructUSR_ObjCProtocol(protocol: *const c_char) -> CXString; + pub fn clang_createCXCursorSet() -> CXCursorSet; + pub fn clang_createIndex(exclude: c_int, display: c_int) -> CXIndex; + /// Only available on `libclang` 17.0 and later. + #[cfg(feature = "clang_17_0")] + pub fn clang_createIndexWithOptions(options: CXIndexOptions) -> CXIndex; + pub fn clang_createTranslationUnit(index: CXIndex, file: *const c_char) -> CXTranslationUnit; + pub fn clang_createTranslationUnit2(index: CXIndex, file: *const c_char, tu: *mut CXTranslationUnit) -> CXErrorCode; + pub fn clang_createTranslationUnitFromSourceFile(index: CXIndex, file: *const c_char, n_arguments: c_int, arguments: *const *const c_char, n_unsaved: c_uint, unsaved: *mut CXUnsavedFile) -> CXTranslationUnit; + pub fn clang_defaultCodeCompleteOptions() -> CXCodeComplete_Flags; + pub fn clang_defaultDiagnosticDisplayOptions() -> CXDiagnosticDisplayOptions; + pub fn clang_defaultEditingTranslationUnitOptions() -> CXTranslationUnit_Flags; + pub fn clang_defaultReparseOptions(tu: CXTranslationUnit) -> CXReparse_Flags; + pub fn clang_defaultSaveOptions(tu: CXTranslationUnit) -> CXSaveTranslationUnit_Flags; + pub fn clang_disposeCXCursorSet(set: CXCursorSet); + pub fn clang_disposeCXPlatformAvailability(availability: *mut CXPlatformAvailability); + pub fn clang_disposeCXTUResourceUsage(usage: CXTUResourceUsage); + pub fn clang_disposeCodeCompleteResults(results: *mut CXCodeCompleteResults); + pub fn clang_disposeDiagnostic(diagnostic: CXDiagnostic); + pub fn clang_disposeDiagnosticSet(diagnostic: CXDiagnosticSet); + pub fn clang_disposeIndex(index: CXIndex); + pub fn clang_disposeOverriddenCursors(cursors: *mut CXCursor); + pub fn clang_disposeSourceRangeList(list: *mut CXSourceRangeList); + pub fn clang_disposeString(string: CXString); + /// Only available on `libclang` 3.8 and later. + #[cfg(feature = "clang_3_8")] + pub fn clang_disposeStringSet(set: *mut CXStringSet); + pub fn clang_disposeTokens(tu: CXTranslationUnit, tokens: *mut CXToken, n_tokens: c_uint); + pub fn clang_disposeTranslationUnit(tu: CXTranslationUnit); + pub fn clang_enableStackTraces(); + pub fn clang_equalCursors(left: CXCursor, right: CXCursor) -> c_uint; + pub fn clang_equalLocations(left: CXSourceLocation, right: CXSourceLocation) -> c_uint; + pub fn clang_equalRanges(left: CXSourceRange, right: CXSourceRange) -> c_uint; + pub fn clang_equalTypes(left: CXType, right: CXType) -> c_uint; + pub fn clang_executeOnThread(function: extern fn(*mut c_void), data: *mut c_void, stack: c_uint); + pub fn clang_findIncludesInFile(tu: CXTranslationUnit, file: CXFile, cursor: CXCursorAndRangeVisitor) -> CXResult; + pub fn clang_findReferencesInFile(cursor: CXCursor, file: CXFile, visitor: CXCursorAndRangeVisitor) -> CXResult; + pub fn clang_formatDiagnostic(diagnostic: CXDiagnostic, flags: CXDiagnosticDisplayOptions) -> CXString; + /// Only available on `libclang` 3.7 and later. + #[cfg(feature = "clang_3_7")] + pub fn clang_free(buffer: *mut c_void); + /// Only available on `libclang` 5.0 and later. + #[cfg(feature = "clang_5_0")] + pub fn clang_getAddressSpace(type_: CXType) -> c_uint; + /// Only available on `libclang` 4.0 and later. + #[cfg(feature = "clang_4_0")] + pub fn clang_getAllSkippedRanges(tu: CXTranslationUnit) -> *mut CXSourceRangeList; + pub fn clang_getArgType(type_: CXType, index: c_uint) -> CXType; + pub fn clang_getArrayElementType(type_: CXType) -> CXType; + pub fn clang_getArraySize(type_: CXType) -> c_longlong; + /// Only available on `libclang` 17.0 and later. + #[cfg(feature = "clang_17_0")] + pub fn clang_getBinaryOperatorKindSpelling(kind: CXBinaryOperatorKind) -> CXString; + pub fn clang_getCString(string: CXString) -> *const c_char; + pub fn clang_getCXTUResourceUsage(tu: CXTranslationUnit) -> CXTUResourceUsage; + pub fn clang_getCXXAccessSpecifier(cursor: CXCursor) -> CX_CXXAccessSpecifier; + pub fn clang_getCanonicalCursor(cursor: CXCursor) -> CXCursor; + pub fn clang_getCanonicalType(type_: CXType) -> CXType; + pub fn clang_getChildDiagnostics(diagnostic: CXDiagnostic) -> CXDiagnosticSet; + pub fn clang_getClangVersion() -> CXString; + pub fn clang_getCompletionAnnotation(string: CXCompletionString, index: c_uint) -> CXString; + pub fn clang_getCompletionAvailability(string: CXCompletionString) -> CXAvailabilityKind; + pub fn clang_getCompletionBriefComment(string: CXCompletionString) -> CXString; + pub fn clang_getCompletionChunkCompletionString(string: CXCompletionString, index: c_uint) -> CXCompletionString; + pub fn clang_getCompletionChunkKind(string: CXCompletionString, index: c_uint) -> CXCompletionChunkKind; + pub fn clang_getCompletionChunkText(string: CXCompletionString, index: c_uint) -> CXString; + /// Only available on `libclang` 7.0 and later. + #[cfg(feature = "clang_7_0")] + pub fn clang_getCompletionFixIt(results: *mut CXCodeCompleteResults, completion_index: c_uint, fixit_index: c_uint, range: *mut CXSourceRange) -> CXString; + pub fn clang_getCompletionNumAnnotations(string: CXCompletionString) -> c_uint; + /// Only available on `libclang` 7.0 and later. + #[cfg(feature = "clang_7_0")] + pub fn clang_getCompletionNumFixIts(results: *mut CXCodeCompleteResults, completion_index: c_uint) -> c_uint; + pub fn clang_getCompletionParent(string: CXCompletionString, kind: *mut CXCursorKind) -> CXString; + pub fn clang_getCompletionPriority(string: CXCompletionString) -> c_uint; + pub fn clang_getCursor(tu: CXTranslationUnit, location: CXSourceLocation) -> CXCursor; + pub fn clang_getCursorAvailability(cursor: CXCursor) -> CXAvailabilityKind; + /// Only available on `libclang` 17.0 and later. + #[cfg(feature = "clang_17_0")] + pub fn clang_getCursorBinaryOperatorKind(cursor: CXCursor) -> CXBinaryOperatorKind; + pub fn clang_getCursorCompletionString(cursor: CXCursor) -> CXCompletionString; + pub fn clang_getCursorDefinition(cursor: CXCursor) -> CXCursor; + pub fn clang_getCursorDisplayName(cursor: CXCursor) -> CXString; + /// Only available on `libclang` 5.0 and later. + #[cfg(feature = "clang_5_0")] + pub fn clang_getCursorExceptionSpecificationType(cursor: CXCursor) -> CXCursor_ExceptionSpecificationKind; + pub fn clang_getCursorExtent(cursor: CXCursor) -> CXSourceRange; + pub fn clang_getCursorKind(cursor: CXCursor) -> CXCursorKind; + pub fn clang_getCursorKindSpelling(kind: CXCursorKind) -> CXString; + pub fn clang_getCursorLanguage(cursor: CXCursor) -> CXLanguageKind; + pub fn clang_getCursorLexicalParent(cursor: CXCursor) -> CXCursor; + pub fn clang_getCursorLinkage(cursor: CXCursor) -> CXLinkageKind; + pub fn clang_getCursorLocation(cursor: CXCursor) -> CXSourceLocation; + pub fn clang_getCursorPlatformAvailability(cursor: CXCursor, deprecated: *mut c_int, deprecated_message: *mut CXString, unavailable: *mut c_int, unavailable_message: *mut CXString, availability: *mut CXPlatformAvailability, n_availability: c_int) -> c_int; + /// Only available on `libclang` 7.0 and later. + #[cfg(feature = "clang_7_0")] + pub fn clang_getCursorPrettyPrinted(cursor: CXCursor, policy: CXPrintingPolicy) -> CXString; + /// Only available on `libclang` 7.0 and later. + #[cfg(feature = "clang_7_0")] + pub fn clang_getCursorPrintingPolicy(cursor: CXCursor) -> CXPrintingPolicy; + pub fn clang_getCursorReferenceNameRange(cursor: CXCursor, flags: CXNameRefFlags, index: c_uint) -> CXSourceRange; + pub fn clang_getCursorReferenced(cursor: CXCursor) -> CXCursor; + pub fn clang_getCursorResultType(cursor: CXCursor) -> CXType; + pub fn clang_getCursorSemanticParent(cursor: CXCursor) -> CXCursor; + pub fn clang_getCursorSpelling(cursor: CXCursor) -> CXString; + /// Only available on `libclang` 6.0 and later. + #[cfg(feature = "clang_6_0")] + pub fn clang_getCursorTLSKind(cursor: CXCursor) -> CXTLSKind; + pub fn clang_getCursorType(cursor: CXCursor) -> CXType; + /// Only available on `libclang` 17.0 and later. + #[cfg(feature = "clang_17_0")] + pub fn clang_getCursorUnaryOperatorKind(cursor: CXCursor) -> CXUnaryOperatorKind; + pub fn clang_getCursorUSR(cursor: CXCursor) -> CXString; + /// Only available on `libclang` 3.8 and later. + #[cfg(feature = "clang_3_8")] + pub fn clang_getCursorVisibility(cursor: CXCursor) -> CXVisibilityKind; + pub fn clang_getDeclObjCTypeEncoding(cursor: CXCursor) -> CXString; + pub fn clang_getDefinitionSpellingAndExtent(cursor: CXCursor, start: *mut *const c_char, end: *mut *const c_char, start_line: *mut c_uint, start_column: *mut c_uint, end_line: *mut c_uint, end_column: *mut c_uint); + pub fn clang_getDiagnostic(tu: CXTranslationUnit, index: c_uint) -> CXDiagnostic; + pub fn clang_getDiagnosticCategory(diagnostic: CXDiagnostic) -> c_uint; + pub fn clang_getDiagnosticCategoryName(category: c_uint) -> CXString; + pub fn clang_getDiagnosticCategoryText(diagnostic: CXDiagnostic) -> CXString; + pub fn clang_getDiagnosticFixIt(diagnostic: CXDiagnostic, index: c_uint, range: *mut CXSourceRange) -> CXString; + pub fn clang_getDiagnosticInSet(diagnostic: CXDiagnosticSet, index: c_uint) -> CXDiagnostic; + pub fn clang_getDiagnosticLocation(diagnostic: CXDiagnostic) -> CXSourceLocation; + pub fn clang_getDiagnosticNumFixIts(diagnostic: CXDiagnostic) -> c_uint; + pub fn clang_getDiagnosticNumRanges(diagnostic: CXDiagnostic) -> c_uint; + pub fn clang_getDiagnosticOption(diagnostic: CXDiagnostic, option: *mut CXString) -> CXString; + pub fn clang_getDiagnosticRange(diagnostic: CXDiagnostic, index: c_uint) -> CXSourceRange; + pub fn clang_getDiagnosticSetFromTU(tu: CXTranslationUnit) -> CXDiagnosticSet; + pub fn clang_getDiagnosticSeverity(diagnostic: CXDiagnostic) -> CXDiagnosticSeverity; + pub fn clang_getDiagnosticSpelling(diagnostic: CXDiagnostic) -> CXString; + pub fn clang_getElementType(type_: CXType) -> CXType; + pub fn clang_getEnumConstantDeclUnsignedValue(cursor: CXCursor) -> c_ulonglong; + pub fn clang_getEnumConstantDeclValue(cursor: CXCursor) -> c_longlong; + pub fn clang_getEnumDeclIntegerType(cursor: CXCursor) -> CXType; + /// Only available on `libclang` 5.0 and later. + #[cfg(feature = "clang_5_0")] + pub fn clang_getExceptionSpecificationType(type_: CXType) -> CXCursor_ExceptionSpecificationKind; + pub fn clang_getExpansionLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); + pub fn clang_getFieldDeclBitWidth(cursor: CXCursor) -> c_int; + pub fn clang_getFile(tu: CXTranslationUnit, file: *const c_char) -> CXFile; + /// Only available on `libclang` 6.0 and later. + #[cfg(feature = "clang_6_0")] + pub fn clang_getFileContents(tu: CXTranslationUnit, file: CXFile, size: *mut size_t) -> *const c_char; + pub fn clang_getFileLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); + pub fn clang_getFileName(file: CXFile) -> CXString; + pub fn clang_getFileTime(file: CXFile) -> time_t; + pub fn clang_getFileUniqueID(file: CXFile, id: *mut CXFileUniqueID) -> c_int; + pub fn clang_getFunctionTypeCallingConv(type_: CXType) -> CXCallingConv; + pub fn clang_getIBOutletCollectionType(cursor: CXCursor) -> CXType; + pub fn clang_getIncludedFile(cursor: CXCursor) -> CXFile; + pub fn clang_getInclusions(tu: CXTranslationUnit, visitor: CXInclusionVisitor, data: CXClientData); + pub fn clang_getInstantiationLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); + pub fn clang_getLocation(tu: CXTranslationUnit, file: CXFile, line: c_uint, column: c_uint) -> CXSourceLocation; + pub fn clang_getLocationForOffset(tu: CXTranslationUnit, file: CXFile, offset: c_uint) -> CXSourceLocation; + pub fn clang_getModuleForFile(tu: CXTranslationUnit, file: CXFile) -> CXModule; + /// Only available on `libclang` 16.0 and later. + #[cfg(feature = "clang_16_0")] + pub fn clang_getNonReferenceType(type_: CXType) -> CXType; + pub fn clang_getNullCursor() -> CXCursor; + pub fn clang_getNullLocation() -> CXSourceLocation; + pub fn clang_getNullRange() -> CXSourceRange; + pub fn clang_getNumArgTypes(type_: CXType) -> c_int; + pub fn clang_getNumCompletionChunks(string: CXCompletionString) -> c_uint; + pub fn clang_getNumDiagnostics(tu: CXTranslationUnit) -> c_uint; + pub fn clang_getNumDiagnosticsInSet(diagnostic: CXDiagnosticSet) -> c_uint; + pub fn clang_getNumElements(type_: CXType) -> c_longlong; + pub fn clang_getNumOverloadedDecls(cursor: CXCursor) -> c_uint; + pub fn clang_getOverloadedDecl(cursor: CXCursor, index: c_uint) -> CXCursor; + pub fn clang_getOverriddenCursors(cursor: CXCursor, cursors: *mut *mut CXCursor, n_cursors: *mut c_uint); + pub fn clang_getPointeeType(type_: CXType) -> CXType; + pub fn clang_getPresumedLocation(location: CXSourceLocation, file: *mut CXString, line: *mut c_uint, column: *mut c_uint); + pub fn clang_getRange(start: CXSourceLocation, end: CXSourceLocation) -> CXSourceRange; + pub fn clang_getRangeEnd(range: CXSourceRange) -> CXSourceLocation; + pub fn clang_getRangeStart(range: CXSourceRange) -> CXSourceLocation; + pub fn clang_getRemappings(file: *const c_char) -> CXRemapping; + pub fn clang_getRemappingsFromFileList(files: *mut *const c_char, n_files: c_uint) -> CXRemapping; + pub fn clang_getResultType(type_: CXType) -> CXType; + pub fn clang_getSkippedRanges(tu: CXTranslationUnit, file: CXFile) -> *mut CXSourceRangeList; + pub fn clang_getSpecializedCursorTemplate(cursor: CXCursor) -> CXCursor; + pub fn clang_getSpellingLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); + pub fn clang_getTUResourceUsageName(kind: CXTUResourceUsageKind) -> *const c_char; + pub fn clang_getTemplateCursorKind(cursor: CXCursor) -> CXCursorKind; + pub fn clang_getToken(tu: CXTranslationUnit, location: CXSourceLocation) -> *mut CXToken; + pub fn clang_getTokenExtent(tu: CXTranslationUnit, token: CXToken) -> CXSourceRange; + pub fn clang_getTokenKind(token: CXToken) -> CXTokenKind; + pub fn clang_getTokenLocation(tu: CXTranslationUnit, token: CXToken) -> CXSourceLocation; + pub fn clang_getTokenSpelling(tu: CXTranslationUnit, token: CXToken) -> CXString; + pub fn clang_getTranslationUnitCursor(tu: CXTranslationUnit) -> CXCursor; + pub fn clang_getTranslationUnitSpelling(tu: CXTranslationUnit) -> CXString; + /// Only available on `libclang` 5.0 and later. + #[cfg(feature = "clang_5_0")] + pub fn clang_getTranslationUnitTargetInfo(tu: CXTranslationUnit) -> CXTargetInfo; + /// Only available on `libclang` 17.0 and later. + #[cfg(feature = "clang_17_0")] + pub fn clang_getUnaryOperatorKindSpelling(kind: CXUnaryOperatorKind) -> CXString; + /// Only available on `libclang` 16.0 and later. + #[cfg(feature = "clang_16_0")] + pub fn clang_getUnqualifiedType(type_: CXType) -> CXType; + pub fn clang_getTypeDeclaration(type_: CXType) -> CXCursor; + pub fn clang_getTypeKindSpelling(type_: CXTypeKind) -> CXString; + pub fn clang_getTypeSpelling(type_: CXType) -> CXString; + pub fn clang_getTypedefDeclUnderlyingType(cursor: CXCursor) -> CXType; + /// Only available on `libclang` 5.0 and later. + #[cfg(feature = "clang_5_0")] + pub fn clang_getTypedefName(type_: CXType) -> CXString; + pub fn clang_hashCursor(cursor: CXCursor) -> c_uint; + pub fn clang_indexLoc_getCXSourceLocation(location: CXIdxLoc) -> CXSourceLocation; + pub fn clang_indexLoc_getFileLocation(location: CXIdxLoc, index_file: *mut CXIdxClientFile, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); + pub fn clang_indexSourceFile(index: CXIndexAction, data: CXClientData, callbacks: *mut IndexerCallbacks, n_callbacks: c_uint, index_flags: CXIndexOptFlags, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, tu: *mut CXTranslationUnit, tu_flags: CXTranslationUnit_Flags) -> CXErrorCode; + /// Only available on `libclang` 3.8 and later. + #[cfg(feature = "clang_3_8")] + pub fn clang_indexSourceFileFullArgv(index: CXIndexAction, data: CXClientData, callbacks: *mut IndexerCallbacks, n_callbacks: c_uint, index_flags: CXIndexOptFlags, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, tu: *mut CXTranslationUnit, tu_flags: CXTranslationUnit_Flags) -> CXErrorCode; + pub fn clang_indexTranslationUnit(index: CXIndexAction, data: CXClientData, callbacks: *mut IndexerCallbacks, n_callbacks: c_uint, flags: CXIndexOptFlags, tu: CXTranslationUnit) -> c_int; + pub fn clang_index_getCXXClassDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxCXXClassDeclInfo; + pub fn clang_index_getClientContainer(info: *const CXIdxContainerInfo) -> CXIdxClientContainer; + pub fn clang_index_getClientEntity(info: *const CXIdxEntityInfo) -> CXIdxClientEntity; + pub fn clang_index_getIBOutletCollectionAttrInfo(info: *const CXIdxAttrInfo) -> *const CXIdxIBOutletCollectionAttrInfo; + pub fn clang_index_getObjCCategoryDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCCategoryDeclInfo; + pub fn clang_index_getObjCContainerDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCContainerDeclInfo; + pub fn clang_index_getObjCInterfaceDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCInterfaceDeclInfo; + pub fn clang_index_getObjCPropertyDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCPropertyDeclInfo; + pub fn clang_index_getObjCProtocolRefListInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCProtocolRefListInfo; + pub fn clang_index_isEntityObjCContainerKind(info: CXIdxEntityKind) -> c_int; + pub fn clang_index_setClientContainer(info: *const CXIdxContainerInfo, container: CXIdxClientContainer); + pub fn clang_index_setClientEntity(info: *const CXIdxEntityInfo, entity: CXIdxClientEntity); + pub fn clang_isAttribute(kind: CXCursorKind) -> c_uint; + pub fn clang_isConstQualifiedType(type_: CXType) -> c_uint; + pub fn clang_isCursorDefinition(cursor: CXCursor) -> c_uint; + pub fn clang_isDeclaration(kind: CXCursorKind) -> c_uint; + pub fn clang_isExpression(kind: CXCursorKind) -> c_uint; + pub fn clang_isFileMultipleIncludeGuarded(tu: CXTranslationUnit, file: CXFile) -> c_uint; + pub fn clang_isFunctionTypeVariadic(type_: CXType) -> c_uint; + pub fn clang_isInvalid(kind: CXCursorKind) -> c_uint; + /// Only available on `libclang` 7.0 and later. + #[cfg(feature = "clang_7_0")] + pub fn clang_isInvalidDeclaration(cursor: CXCursor) -> c_uint; + pub fn clang_isPODType(type_: CXType) -> c_uint; + pub fn clang_isPreprocessing(kind: CXCursorKind) -> c_uint; + pub fn clang_isReference(kind: CXCursorKind) -> c_uint; + pub fn clang_isRestrictQualifiedType(type_: CXType) -> c_uint; + pub fn clang_isStatement(kind: CXCursorKind) -> c_uint; + pub fn clang_isTranslationUnit(kind: CXCursorKind) -> c_uint; + pub fn clang_isUnexposed(kind: CXCursorKind) -> c_uint; + pub fn clang_isVirtualBase(cursor: CXCursor) -> c_uint; + pub fn clang_isVolatileQualifiedType(type_: CXType) -> c_uint; + pub fn clang_loadDiagnostics(file: *const c_char, error: *mut CXLoadDiag_Error, message: *mut CXString) -> CXDiagnosticSet; + pub fn clang_parseTranslationUnit(index: CXIndex, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXTranslationUnit_Flags) -> CXTranslationUnit; + pub fn clang_parseTranslationUnit2(index: CXIndex, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXTranslationUnit_Flags, tu: *mut CXTranslationUnit) -> CXErrorCode; + /// Only available on `libclang` 3.8 and later. + #[cfg(feature = "clang_3_8")] + pub fn clang_parseTranslationUnit2FullArgv(index: CXIndex, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXTranslationUnit_Flags, tu: *mut CXTranslationUnit) -> CXErrorCode; + pub fn clang_remap_dispose(remapping: CXRemapping); + pub fn clang_remap_getFilenames(remapping: CXRemapping, index: c_uint, original: *mut CXString, transformed: *mut CXString); + pub fn clang_remap_getNumFiles(remapping: CXRemapping) -> c_uint; + pub fn clang_reparseTranslationUnit(tu: CXTranslationUnit, n_unsaved: c_uint, unsaved: *mut CXUnsavedFile, flags: CXReparse_Flags) -> CXErrorCode; + pub fn clang_saveTranslationUnit(tu: CXTranslationUnit, file: *const c_char, options: CXSaveTranslationUnit_Flags) -> CXSaveError; + pub fn clang_sortCodeCompletionResults(results: *mut CXCompletionResult, n_results: c_uint); + /// Only available on `libclang` 5.0 and later. + #[cfg(feature = "clang_5_0")] + pub fn clang_suspendTranslationUnit(tu: CXTranslationUnit) -> c_uint; + pub fn clang_toggleCrashRecovery(recovery: c_uint); + pub fn clang_tokenize(tu: CXTranslationUnit, range: CXSourceRange, tokens: *mut *mut CXToken, n_tokens: *mut c_uint); + pub fn clang_visitChildren(cursor: CXCursor, visitor: CXCursorVisitor, data: CXClientData) -> c_uint; + + // Documentation + pub fn clang_BlockCommandComment_getArgText(comment: CXComment, index: c_uint) -> CXString; + pub fn clang_BlockCommandComment_getCommandName(comment: CXComment) -> CXString; + pub fn clang_BlockCommandComment_getNumArgs(comment: CXComment) -> c_uint; + pub fn clang_BlockCommandComment_getParagraph(comment: CXComment) -> CXComment; + pub fn clang_Comment_getChild(comment: CXComment, index: c_uint) -> CXComment; + pub fn clang_Comment_getKind(comment: CXComment) -> CXCommentKind; + pub fn clang_Comment_getNumChildren(comment: CXComment) -> c_uint; + pub fn clang_Comment_isWhitespace(comment: CXComment) -> c_uint; + pub fn clang_Cursor_getParsedComment(C: CXCursor) -> CXComment; + pub fn clang_FullComment_getAsHTML(comment: CXComment) -> CXString; + pub fn clang_FullComment_getAsXML(comment: CXComment) -> CXString; + pub fn clang_HTMLStartTag_getAttrName(comment: CXComment, index: c_uint) -> CXString; + pub fn clang_HTMLStartTag_getAttrValue(comment: CXComment, index: c_uint) -> CXString; + pub fn clang_HTMLStartTag_getNumAttrs(comment: CXComment) -> c_uint; + pub fn clang_HTMLStartTagComment_isSelfClosing(comment: CXComment) -> c_uint; + pub fn clang_HTMLTagComment_getAsString(comment: CXComment) -> CXString; + pub fn clang_HTMLTagComment_getTagName(comment: CXComment) -> CXString; + pub fn clang_InlineCommandComment_getArgText(comment: CXComment, index: c_uint) -> CXString; + pub fn clang_InlineCommandComment_getCommandName(comment: CXComment) -> CXString; + pub fn clang_InlineCommandComment_getNumArgs(comment: CXComment) -> c_uint; + pub fn clang_InlineCommandComment_getRenderKind(comment: CXComment) -> CXCommentInlineCommandRenderKind; + pub fn clang_InlineContentComment_hasTrailingNewline(comment: CXComment) -> c_uint; + pub fn clang_ParamCommandComment_getDirection(comment: CXComment) -> CXCommentParamPassDirection; + pub fn clang_ParamCommandComment_getParamIndex(comment: CXComment) -> c_uint; + pub fn clang_ParamCommandComment_getParamName(comment: CXComment) -> CXString; + pub fn clang_ParamCommandComment_isDirectionExplicit(comment: CXComment) -> c_uint; + pub fn clang_ParamCommandComment_isParamIndexValid(comment: CXComment) -> c_uint; + pub fn clang_TextComment_getText(comment: CXComment) -> CXString; + pub fn clang_TParamCommandComment_getDepth(comment: CXComment) -> c_uint; + pub fn clang_TParamCommandComment_getIndex(comment: CXComment, depth: c_uint) -> c_uint; + pub fn clang_TParamCommandComment_getParamName(comment: CXComment) -> CXString; + pub fn clang_TParamCommandComment_isParamPositionValid(comment: CXComment) -> c_uint; + pub fn clang_VerbatimBlockLineComment_getText(comment: CXComment) -> CXString; + pub fn clang_VerbatimLineComment_getText(comment: CXComment) -> CXString; +} diff --git a/vendor/clang-sys/src/link.rs b/vendor/clang-sys/src/link.rs new file mode 100644 index 00000000000000..1adb0957fd89b0 --- /dev/null +++ b/vendor/clang-sys/src/link.rs @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: Apache-2.0 + +//================================================ +// Macros +//================================================ + +#[cfg(feature = "runtime")] +macro_rules! link { + ( + @LOAD: + $(#[doc=$doc:expr])* + #[cfg($cfg:meta)] + fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)* + ) => ( + $(#[doc=$doc])* + #[cfg($cfg)] + pub fn $name(library: &mut super::SharedLibrary) { + let symbol = unsafe { library.library.get(stringify!($name).as_bytes()) }.ok(); + library.functions.$name = match symbol { + Some(s) => *s, + None => None, + }; + } + + #[cfg(not($cfg))] + pub fn $name(_: &mut super::SharedLibrary) {} + ); + + ( + @LOAD: + fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)* + ) => ( + link!(@LOAD: #[cfg(feature = "runtime")] fn $name($($pname: $pty), *) $(-> $ret)*); + ); + + ( + $( + $(#[doc=$doc:expr] #[cfg($cfg:meta)])* + pub fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*; + )+ + ) => ( + use std::cell::{RefCell}; + use std::fmt; + use std::sync::{Arc}; + use std::path::{Path, PathBuf}; + + /// The (minimum) version of a `libclang` shared library. + #[allow(missing_docs)] + #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub enum Version { + V3_5 = 35, + V3_6 = 36, + V3_7 = 37, + V3_8 = 38, + V3_9 = 39, + V4_0 = 40, + V5_0 = 50, + V6_0 = 60, + V7_0 = 70, + V8_0 = 80, + V9_0 = 90, + V11_0 = 110, + V12_0 = 120, + V16_0 = 160, + V17_0 = 170, + } + + impl fmt::Display for Version { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use Version::*; + match self { + V3_5 => write!(f, "3.5.x"), + V3_6 => write!(f, "3.6.x"), + V3_7 => write!(f, "3.7.x"), + V3_8 => write!(f, "3.8.x"), + V3_9 => write!(f, "3.9.x"), + V4_0 => write!(f, "4.0.x"), + V5_0 => write!(f, "5.0.x"), + V6_0 => write!(f, "6.0.x"), + V7_0 => write!(f, "7.0.x"), + V8_0 => write!(f, "8.0.x"), + V9_0 => write!(f, "9.0.x - 10.0.x"), + V11_0 => write!(f, "11.0.x"), + V12_0 => write!(f, "12.0.x - 15.0.x"), + V16_0 => write!(f, "16.0.x"), + V17_0 => write!(f, "17.0.x or later"), + } + } + } + + /// The set of functions loaded dynamically. + #[derive(Debug, Default)] + pub struct Functions { + $( + $(#[doc=$doc] #[cfg($cfg)])* + pub $name: Option $ret)*>, + )+ + } + + /// A dynamically loaded instance of the `libclang` library. + #[derive(Debug)] + pub struct SharedLibrary { + library: libloading::Library, + path: PathBuf, + pub functions: Functions, + } + + impl SharedLibrary { + fn new(library: libloading::Library, path: PathBuf) -> Self { + Self { library, path, functions: Functions::default() } + } + + /// Returns the path to this `libclang` shared library. + pub fn path(&self) -> &Path { + &self.path + } + + /// Returns the (minimum) version of this `libclang` shared library. + /// + /// If this returns `None`, it indicates that the version is too old + /// to be supported by this crate (i.e., `3.4` or earlier). If the + /// version of this shared library is more recent than that fully + /// supported by this crate, the most recent fully supported version + /// will be returned. + pub fn version(&self) -> Option { + macro_rules! check { + ($fn:expr, $version:ident) => { + if self.library.get::($fn).is_ok() { + return Some(Version::$version); + } + }; + } + + unsafe { + check!(b"clang_CXXMethod_isExplicit", V17_0); + check!(b"clang_CXXMethod_isCopyAssignmentOperator", V16_0); + check!(b"clang_Cursor_getVarDeclInitializer", V12_0); + check!(b"clang_Type_getValueType", V11_0); + check!(b"clang_Cursor_isAnonymousRecordDecl", V9_0); + check!(b"clang_Cursor_getObjCPropertyGetterName", V8_0); + check!(b"clang_File_tryGetRealPathName", V7_0); + check!(b"clang_CXIndex_setInvocationEmissionPathOption", V6_0); + check!(b"clang_Cursor_isExternalSymbol", V5_0); + check!(b"clang_EvalResult_getAsLongLong", V4_0); + check!(b"clang_CXXConstructor_isConvertingConstructor", V3_9); + check!(b"clang_CXXField_isMutable", V3_8); + check!(b"clang_Cursor_getOffsetOfField", V3_7); + check!(b"clang_Cursor_getStorageClass", V3_6); + check!(b"clang_Type_getNumTemplateArguments", V3_5); + } + + None + } + } + + thread_local!(static LIBRARY: RefCell>> = RefCell::new(None)); + + /// Returns whether a `libclang` shared library is loaded on this thread. + pub fn is_loaded() -> bool { + LIBRARY.with(|l| l.borrow().is_some()) + } + + fn with_library(f: F) -> Option where F: FnOnce(&SharedLibrary) -> T { + LIBRARY.with(|l| { + match l.borrow().as_ref() { + Some(library) => Some(f(&library)), + _ => None, + } + }) + } + + $( + #[cfg_attr(feature="cargo-clippy", allow(clippy::missing_safety_doc))] + #[cfg_attr(feature="cargo-clippy", allow(clippy::too_many_arguments))] + $(#[doc=$doc] #[cfg($cfg)])* + pub unsafe fn $name($($pname: $pty), *) $(-> $ret)* { + let f = with_library(|library| { + if let Some(function) = library.functions.$name { + function + } else { + panic!( + r#" +A `libclang` function was called that is not supported by the loaded `libclang` instance. + + called function = `{0}` + loaded `libclang` instance = {1} + +The minimum `libclang` requirement for this particular function can be found here: +https://docs.rs/clang-sys/latest/clang_sys/{0}/index.html + +Instructions for installing `libclang` can be found here: +https://rust-lang.github.io/rust-bindgen/requirements.html +"#, + stringify!($name), + library + .version() + .map(|v| format!("{}", v)) + .unwrap_or_else(|| "unsupported version".into()), + ); + } + }).expect("a `libclang` shared library is not loaded on this thread"); + f($($pname), *) + } + + $(#[doc=$doc] #[cfg($cfg)])* + pub mod $name { + pub fn is_loaded() -> bool { + super::with_library(|l| l.functions.$name.is_some()).unwrap_or(false) + } + } + )+ + + mod load { + $(link!(@LOAD: $(#[cfg($cfg)])* fn $name($($pname: $pty), *) $(-> $ret)*);)+ + } + + /// Loads a `libclang` shared library and returns the library instance. + /// + /// This function does not attempt to load any functions from the shared library. The caller + /// is responsible for loading the functions they require. + /// + /// # Failures + /// + /// * a `libclang` shared library could not be found + /// * the `libclang` shared library could not be opened + pub fn load_manually() -> Result { + #[allow(dead_code)] + mod build { + include!(concat!(env!("OUT_DIR"), "/macros.rs")); + pub mod common { include!(concat!(env!("OUT_DIR"), "/common.rs")); } + pub mod dynamic { include!(concat!(env!("OUT_DIR"), "/dynamic.rs")); } + } + + let (directory, filename) = build::dynamic::find(true)?; + let path = directory.join(filename); + + unsafe { + let library = libloading::Library::new(&path).map_err(|e| { + format!( + "the `libclang` shared library at {} could not be opened: {}", + path.display(), + e, + ) + }); + + let mut library = SharedLibrary::new(library?, path); + $(load::$name(&mut library);)+ + Ok(library) + } + } + + /// Loads a `libclang` shared library for use in the current thread. + /// + /// This functions attempts to load all the functions in the shared library. Whether a + /// function has been loaded can be tested by calling the `is_loaded` function on the + /// module with the same name as the function (e.g., `clang_createIndex::is_loaded()` for + /// the `clang_createIndex` function). + /// + /// # Failures + /// + /// * a `libclang` shared library could not be found + /// * the `libclang` shared library could not be opened + #[allow(dead_code)] + pub fn load() -> Result<(), String> { + let library = Arc::new(load_manually()?); + LIBRARY.with(|l| *l.borrow_mut() = Some(library)); + Ok(()) + } + + /// Unloads the `libclang` shared library in use in the current thread. + /// + /// # Failures + /// + /// * a `libclang` shared library is not in use in the current thread + pub fn unload() -> Result<(), String> { + let library = set_library(None); + if library.is_some() { + Ok(()) + } else { + Err("a `libclang` shared library is not in use in the current thread".into()) + } + } + + /// Returns the library instance stored in TLS. + /// + /// This functions allows for sharing library instances between threads. + pub fn get_library() -> Option> { + LIBRARY.with(|l| l.borrow_mut().clone()) + } + + /// Sets the library instance stored in TLS and returns the previous library. + /// + /// This functions allows for sharing library instances between threads. + pub fn set_library(library: Option>) -> Option> { + LIBRARY.with(|l| mem::replace(&mut *l.borrow_mut(), library)) + } + ) +} + +#[cfg(not(feature = "runtime"))] +macro_rules! link { + ( + $( + $(#[doc=$doc:expr] #[cfg($cfg:meta)])* + pub fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*; + )+ + ) => ( + extern { + $( + $(#[doc=$doc] #[cfg($cfg)])* + pub fn $name($($pname: $pty), *) $(-> $ret)*; + )+ + } + + $( + $(#[doc=$doc] #[cfg($cfg)])* + pub mod $name { + pub fn is_loaded() -> bool { true } + } + )+ + ) +} diff --git a/vendor/clang-sys/src/support.rs b/vendor/clang-sys/src/support.rs new file mode 100644 index 00000000000000..bd20da6fe83a99 --- /dev/null +++ b/vendor/clang-sys/src/support.rs @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: Apache-2.0 + +//! Provides helper functionality. + +use std::path::{Path, PathBuf}; +use std::process::Command; +use std::{env, io}; + +use glob::{self, Pattern}; + +use libc::c_int; + +use super::CXVersion; + +//================================================ +// Structs +//================================================ + +/// A `clang` executable. +#[derive(Clone, Debug)] +pub struct Clang { + /// The path to this `clang` executable. + pub path: PathBuf, + /// The version of this `clang` executable if it could be parsed. + pub version: Option, + /// The directories searched by this `clang` executable for C headers if + /// they could be parsed. + pub c_search_paths: Option>, + /// The directories searched by this `clang` executable for C++ headers if + /// they could be parsed. + pub cpp_search_paths: Option>, +} + +impl Clang { + fn new(path: impl AsRef, args: &[String]) -> Self { + Self { + path: path.as_ref().into(), + version: parse_version(path.as_ref()), + c_search_paths: parse_search_paths(path.as_ref(), "c", args), + cpp_search_paths: parse_search_paths(path.as_ref(), "c++", args), + } + } + + /// Returns a `clang` executable if one can be found. + /// + /// If the `CLANG_PATH` environment variable is set, that is the instance of + /// `clang` used. Otherwise, a series of directories are searched. First, if + /// a path is supplied, that is the first directory searched. Then, the + /// directory returned by `llvm-config --bindir` is searched. On macOS + /// systems, `xcodebuild -find clang` will next be queried. Last, the + /// directories in the system's `PATH` are searched. + /// + /// ## Cross-compilation + /// + /// If target arguments are provided (e.g., `--target` followed by a target + /// like `x86_64-unknown-linux-gnu`) then this method will prefer a + /// target-prefixed instance of `clang` (e.g., + /// `x86_64-unknown-linux-gnu-clang` for the above example). + pub fn find(path: Option<&Path>, args: &[String]) -> Option { + if let Ok(path) = env::var("CLANG_PATH") { + let p = Path::new(&path); + if p.is_file() && is_executable(p).unwrap_or(false) { + return Some(Clang::new(p, args)); + } else { + eprintln!("`CLANG_PATH` env var set but is not a full path to an executable"); + } + } + + // Determine the cross-compilation target, if any. + + let mut target = None; + for i in 0..args.len() { + if (args[i] == "-target" || args[i] == "-target") && i + 1 < args.len() { + target = Some(&args[i + 1]); + } + } + + // Collect the paths to search for a `clang` executable in. + + let mut paths = vec![]; + + if let Some(path) = path { + paths.push(path.into()); + } + + if let Ok(path) = run_llvm_config(&["--bindir"]) { + if let Some(line) = path.lines().next() { + paths.push(line.into()); + } + } + + if cfg!(target_os = "macos") { + if let Ok((path, _)) = run("xcodebuild", &["-find", "clang"]) { + if let Some(line) = path.lines().next() { + paths.push(line.into()); + } + } + } + + if let Ok(path) = env::var("PATH") { + paths.extend(env::split_paths(&path)); + } + + // First, look for a target-prefixed `clang` executable. + + if let Some(target) = target { + let default = format!("{}-clang{}", target, env::consts::EXE_SUFFIX); + let versioned = format!("{}-clang-[0-9]*{}", target, env::consts::EXE_SUFFIX); + let patterns = &[&default[..], &versioned[..]]; + for path in &paths { + if let Some(path) = find(path, patterns) { + return Some(Clang::new(path, args)); + } + } + } + + // Otherwise, look for any other `clang` executable. + + let default = format!("clang{}", env::consts::EXE_SUFFIX); + let versioned = format!("clang-[0-9]*{}", env::consts::EXE_SUFFIX); + let patterns = &[&default[..], &versioned[..]]; + for path in paths { + if let Some(path) = find(&path, patterns) { + return Some(Clang::new(path, args)); + } + } + + None + } +} + +//================================================ +// Functions +//================================================ + +/// Returns the first match to the supplied glob patterns in the supplied +/// directory if there are any matches. +fn find(directory: &Path, patterns: &[&str]) -> Option { + // Escape the directory in case it contains characters that have special + // meaning in glob patterns (e.g., `[` or `]`). + let directory = if let Some(directory) = directory.to_str() { + Path::new(&Pattern::escape(directory)).to_owned() + } else { + return None; + }; + + for pattern in patterns { + let pattern = directory.join(pattern).to_string_lossy().into_owned(); + if let Some(path) = glob::glob(&pattern).ok()?.filter_map(|p| p.ok()).next() { + if path.is_file() && is_executable(&path).unwrap_or(false) { + return Some(path); + } + } + } + + None +} + +#[cfg(unix)] +fn is_executable(path: &Path) -> io::Result { + use std::ffi::CString; + use std::os::unix::ffi::OsStrExt; + + let path = CString::new(path.as_os_str().as_bytes())?; + unsafe { Ok(libc::access(path.as_ptr(), libc::X_OK) == 0) } +} + +#[cfg(not(unix))] +fn is_executable(_: &Path) -> io::Result { + Ok(true) +} + +/// Attempts to run an executable, returning the `stdout` and `stderr` output if +/// successful. +fn run(executable: &str, arguments: &[&str]) -> Result<(String, String), String> { + Command::new(executable) + .args(arguments) + .output() + .map(|o| { + let stdout = String::from_utf8_lossy(&o.stdout).into_owned(); + let stderr = String::from_utf8_lossy(&o.stderr).into_owned(); + (stdout, stderr) + }) + .map_err(|e| format!("could not run executable `{}`: {}", executable, e)) +} + +/// Runs `clang`, returning the `stdout` and `stderr` output. +fn run_clang(path: &Path, arguments: &[&str]) -> (String, String) { + run(&path.to_string_lossy(), arguments).unwrap() +} + +/// Runs `llvm-config`, returning the `stdout` output if successful. +fn run_llvm_config(arguments: &[&str]) -> Result { + let config = env::var("LLVM_CONFIG_PATH").unwrap_or_else(|_| "llvm-config".to_string()); + run(&config, arguments).map(|(o, _)| o) +} + +/// Parses a version number if possible, ignoring trailing non-digit characters. +fn parse_version_number(number: &str) -> Option { + number + .chars() + .take_while(|c| c.is_ascii_digit()) + .collect::() + .parse() + .ok() +} + +/// Parses the version from the output of a `clang` executable if possible. +fn parse_version(path: &Path) -> Option { + let output = run_clang(path, &["--version"]).0; + let start = output.find("version ")? + 8; + let mut numbers = output[start..].split_whitespace().next()?.split('.'); + let major = numbers.next().and_then(parse_version_number)?; + let minor = numbers.next().and_then(parse_version_number)?; + let subminor = numbers.next().and_then(parse_version_number).unwrap_or(0); + Some(CXVersion { + Major: major, + Minor: minor, + Subminor: subminor, + }) +} + +/// Parses the search paths from the output of a `clang` executable if possible. +fn parse_search_paths(path: &Path, language: &str, args: &[String]) -> Option> { + let mut clang_args = vec!["-E", "-x", language, "-", "-v"]; + clang_args.extend(args.iter().map(|s| &**s)); + let output = run_clang(path, &clang_args).1; + let start = output.find("#include <...> search starts here:")? + 34; + let end = output.find("End of search list.")?; + let paths = output[start..end].replace("(framework directory)", ""); + Some( + paths + .lines() + .filter(|l| !l.is_empty()) + .map(|l| Path::new(l.trim()).into()) + .collect(), + ) +} diff --git a/vendor/clang-sys/tests/build.rs b/vendor/clang-sys/tests/build.rs new file mode 100644 index 00000000000000..1ac4e617046a48 --- /dev/null +++ b/vendor/clang-sys/tests/build.rs @@ -0,0 +1,356 @@ +#![allow(dead_code)] + +use core::fmt; +use std::collections::HashMap; +use std::env; +use std::fs; +use std::path::PathBuf; +use std::sync::Arc; +use std::sync::Mutex; + +use tempfile::TempDir; + +#[macro_use] +#[path = "../build/macros.rs"] +mod macros; + +#[path = "../build/common.rs"] +mod common; +#[path = "../build/dynamic.rs"] +mod dynamic; +#[path = "../build/static.rs"] +mod r#static; + +#[derive(Debug, Default)] +struct RunCommandMock { + invocations: Vec<(String, String, Vec)>, + responses: HashMap, String>, +} + + +#[derive(Copy, Clone, Debug)] +enum Arch { + ARM64, + X86, + X86_64, +} + +impl Arch { + fn pe_machine_type(self) -> u16 { + match self { + Arch::ARM64 => 0xAA64, + Arch::X86 => 0x014C, + Arch::X86_64 => 0x8664, + } + } +} + +impl fmt::Display for Arch { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Arch::ARM64 => write!(f, "aarch64"), + Arch::X86 => write!(f, "x86"), + Arch::X86_64 => write!(f, "x86_64"), + } + } +} + +#[derive(Debug)] +struct Env { + os: String, + arch: Arch, + pointer_width: String, + env: Option, + vars: HashMap, Option)>, + cwd: PathBuf, + tmp: TempDir, + files: Vec, + commands: Arc>, +} + +impl Env { + fn new(os: &str, arch: Arch, pointer_width: &str) -> Self { + Env { + os: os.into(), + arch, + pointer_width: pointer_width.into(), + env: None, + vars: HashMap::new(), + cwd: env::current_dir().unwrap(), + tmp: tempfile::Builder::new().prefix("clang_sys_test").tempdir().unwrap(), + files: vec![], + commands: Default::default(), + } + .var("CLANG_PATH", None) + .var("LD_LIBRARY_PATH", None) + .var("LIBCLANG_PATH", None) + .var("LIBCLANG_STATIC_PATH", None) + .var("LLVM_CONFIG_PATH", None) + .var("PATH", None) + } + + fn env(mut self, env: &str) -> Self { + self.env = Some(env.into()); + self + } + + fn var(mut self, name: &str, value: Option<&str>) -> Self { + let previous = env::var(name).ok(); + self.vars.insert(name.into(), (value.map(|v| v.into()), previous)); + self + } + + fn dir(mut self, path: &str) -> Self { + self.files.push(path.into()); + let path = self.tmp.path().join(path); + fs::create_dir_all(path).unwrap(); + self + } + + fn file(mut self, path: &str, contents: &[u8]) -> Self { + self.files.push(path.into()); + let path = self.tmp.path().join(path); + fs::create_dir_all(path.parent().unwrap()).unwrap(); + fs::write(self.tmp.path().join(path), contents).unwrap(); + self + } + + fn dll(self, path: &str, arch: Arch, pointer_width: &str) -> Self { + // PE header. + let mut contents = [0; 64]; + contents[0x3C..0x3C + 4].copy_from_slice(&i32::to_le_bytes(10)); + contents[10..14].copy_from_slice(&[b'P', b'E', 0, 0]); + contents[14..16].copy_from_slice(&u16::to_le_bytes(arch.pe_machine_type())); + let magic = if pointer_width == "64" { 523 } else { 267 }; + contents[34..36].copy_from_slice(&u16::to_le_bytes(magic)); + + self.file(path, &contents) + } + + fn so(self, path: &str, pointer_width: &str) -> Self { + // ELF header. + let class = if pointer_width == "64" { 2 } else { 1 }; + let contents = [127, 69, 76, 70, class]; + + self.file(path, &contents) + } + + fn command(self, command: &str, args: &[&str], response: &str) -> Self { + let command = command.to_string(); + let args = args.iter().map(|a| a.to_string()).collect::>(); + + let mut key = vec![command]; + key.extend(args); + self.commands.lock().unwrap().responses.insert(key, response.into()); + + self + } + + fn enable(self) -> Self { + env::set_var("_CLANG_SYS_TEST", "yep"); + env::set_var("_CLANG_SYS_TEST_OS", &self.os); + env::set_var("_CLANG_SYS_TEST_ARCH", &format!("{}", self.arch)); + env::set_var("_CLANG_SYS_TEST_POINTER_WIDTH", &self.pointer_width); + if let Some(env) = &self.env { + env::set_var("_CLANG_SYS_TEST_ENV", env); + } + + for (name, (value, _)) in &self.vars { + if let Some(value) = value { + env::set_var(name, value); + } else { + env::remove_var(name); + } + } + + env::set_current_dir(&self.tmp).unwrap(); + + let commands = self.commands.clone(); + let mock = &mut *common::RUN_COMMAND_MOCK.lock().unwrap(); + *mock = Some(Box::new(move |command, path, args| { + let command = command.to_string(); + let path = path.to_string(); + let args = args.iter().map(|a| a.to_string()).collect::>(); + + let mut commands = commands.lock().unwrap(); + commands.invocations.push((command.clone(), path, args.clone())); + + let mut key = vec![command]; + key.extend(args); + commands.responses.get(&key).cloned() + })); + + self + } +} + +impl Drop for Env { + fn drop(&mut self) { + env::remove_var("_CLANG_SYS_TEST"); + env::remove_var("_CLANG_SYS_TEST_OS"); + env::remove_var("_CLANG_SYS_TEST_ARCH"); + env::remove_var("_CLANG_SYS_TEST_POINTER_WIDTH"); + env::remove_var("_CLANG_SYS_TEST_ENV"); + + for (name, (_, previous)) in &self.vars { + if let Some(previous) = previous { + env::set_var(name, previous); + } else { + env::remove_var(name); + } + } + + if let Err(error) = env::set_current_dir(&self.cwd) { + println!("Failed to reset working directory: {:?}", error); + } + } +} + +#[test] +fn test_all() { + // Run tests serially since they alter the environment. + + test_linux_directory_preference(); + test_linux_version_preference(); + test_linux_directory_and_version_preference(); + + #[cfg(target_os = "windows")] + { + test_windows_bin_sibling(); + test_windows_mingw_gnu(); + test_windows_mingw_msvc(); + test_windows_arm64_on_x86_64(); + test_windows_x86_64_on_arm64(); + } +} + +macro_rules! assert_error { + ($result:expr, $contents:expr $(,)?) => { + if let Err(error) = $result { + if !error.contains($contents) { + panic!("expected error to contain {:?}, received: {error:?}", $contents); + } + } else { + panic!("expected error, received: {:?}", $result); + } + }; +} + +//================================================ +// Dynamic +//================================================ + +// Linux ----------------------------------------- + +fn test_linux_directory_preference() { + let _env = Env::new("linux", Arch::X86_64, "64") + .so("usr/lib/libclang.so.1", "64") + .so("usr/local/lib/libclang.so.1", "64") + .enable(); + + assert_eq!( + dynamic::find(true), + Ok(("usr/local/lib".into(), "libclang.so.1".into())), + ); +} + +fn test_linux_version_preference() { + let _env = Env::new("linux", Arch::X86_64, "64") + .so("usr/lib/libclang-3.so", "64") + .so("usr/lib/libclang-3.5.so", "64") + .so("usr/lib/libclang-3.5.0.so", "64") + .enable(); + + assert_eq!( + dynamic::find(true), + Ok(("usr/lib".into(), "libclang-3.5.0.so".into())), + ); +} + +fn test_linux_directory_and_version_preference() { + let _env = Env::new("linux", Arch::X86_64, "64") + .so("usr/local/llvm/lib/libclang-3.so", "64") + .so("usr/local/lib/libclang-3.5.so", "64") + .so("usr/lib/libclang-3.5.0.so", "64") + .enable(); + + assert_eq!( + dynamic::find(true), + Ok(("usr/lib".into(), "libclang-3.5.0.so".into())), + ); +} + +// Windows --------------------------------------- + +#[cfg(target_os = "windows")] +fn test_windows_bin_sibling() { + let _env = Env::new("windows", Arch::X86_64, "64") + .dir("Program Files\\LLVM\\lib") + .dll("Program Files\\LLVM\\bin\\libclang.dll", Arch::X86_64, "64") + .enable(); + + assert_eq!( + dynamic::find(true), + Ok(("Program Files\\LLVM\\bin".into(), "libclang.dll".into())), + ); +} + +#[cfg(target_os = "windows")] +fn test_windows_mingw_gnu() { + let _env = Env::new("windows", Arch::X86_64, "64") + .env("gnu") + .dir("MSYS\\MinGW\\lib") + .dll("MSYS\\MinGW\\bin\\clang.dll", Arch::X86_64, "64") + .dir("Program Files\\LLVM\\lib") + .dll("Program Files\\LLVM\\bin\\libclang.dll", Arch::X86_64, "64") + .enable(); + + assert_eq!( + dynamic::find(true), + Ok(("MSYS\\MinGW\\bin".into(), "clang.dll".into())), + ); +} + +#[cfg(target_os = "windows")] +fn test_windows_mingw_msvc() { + let _env = Env::new("windows", Arch::X86_64, "64") + .env("msvc") + .dir("MSYS\\MinGW\\lib") + .dll("MSYS\\MinGW\\bin\\clang.dll", Arch::X86_64, "64") + .dir("Program Files\\LLVM\\lib") + .dll("Program Files\\LLVM\\bin\\libclang.dll", Arch::X86_64, "64") + .enable(); + + assert_eq!( + dynamic::find(true), + Ok(("Program Files\\LLVM\\bin".into(), "libclang.dll".into())), + ); +} + +#[cfg(target_os = "windows")] +fn test_windows_arm64_on_x86_64() { + let _env = Env::new("windows", Arch::X86_64, "64") + .env("msvc") + .dir("Program Files\\LLVM\\lib") + .dll("Program Files\\LLVM\\bin\\libclang.dll", Arch::ARM64, "64") + .enable(); + + assert_error!( + dynamic::find(true), + "invalid: [(Program Files\\LLVM\\bin\\libclang.dll: invalid DLL (ARM64)", + ); +} + +#[cfg(target_os = "windows")] +fn test_windows_x86_64_on_arm64() { + let _env = Env::new("windows", Arch::ARM64, "64") + .env("msvc") + .dir("Program Files\\LLVM\\lib") + .dll("Program Files\\LLVM\\bin\\libclang.dll", Arch::X86_64, "64") + .enable(); + + assert_error!( + dynamic::find(true), + "invalid: [(Program Files\\LLVM\\bin\\libclang.dll: invalid DLL (x86-64)", + ); +} diff --git a/vendor/clang-sys/tests/header.h b/vendor/clang-sys/tests/header.h new file mode 100644 index 00000000000000..5c392d31455a87 --- /dev/null +++ b/vendor/clang-sys/tests/header.h @@ -0,0 +1,6 @@ +#ifndef HEADER_H_ +#define HEADER_H_ + +int add(int a, int b); + +#endif diff --git a/vendor/clang-sys/tests/lib.rs b/vendor/clang-sys/tests/lib.rs new file mode 100644 index 00000000000000..1f152f7883aabd --- /dev/null +++ b/vendor/clang-sys/tests/lib.rs @@ -0,0 +1,52 @@ +use std::ptr; + +use clang_sys::*; + +use libc::c_char; + +fn parse() { + unsafe { + let index = clang_createIndex(0, 0); + assert!(!index.is_null()); + + let tu = clang_parseTranslationUnit( + index, + "tests/header.h\0".as_ptr() as *const c_char, + ptr::null_mut(), + 0, + ptr::null_mut(), + 0, + 0, + ); + assert!(!tu.is_null()); + } +} + +#[cfg(feature = "runtime")] +#[test] +fn test() { + load().unwrap(); + let library = get_library().unwrap(); + println!("{:?} ({:?})", library.version(), library.path()); + parse(); + unload().unwrap(); +} + +#[cfg(not(feature = "runtime"))] +#[test] +fn test() { + parse(); +} + +#[test] +fn test_support() { + let clang = support::Clang::find(None, &[]).unwrap(); + println!("{:?}", clang); +} + +#[test] +fn test_support_target() { + let args = &["--target".into(), "x86_64-unknown-linux-gnu".into()]; + let clang = support::Clang::find(None, args).unwrap(); + println!("{:?}", clang); +} diff --git a/vendor/either/.cargo-checksum.json b/vendor/either/.cargo-checksum.json new file mode 100644 index 00000000000000..0b95517a79697b --- /dev/null +++ b/vendor/either/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"a476e926b135f5b5862629aa62aee044b0ea4b5328e4de5f46f43d3858d3bbfe",".github/workflows/ci.yml":"1980de2333ca92700b4cacc285e75dc8a3ee4f561ee8c962989469da6be1980d","Cargo.lock":"fa51302ea4d0f21da8621d0376bc26e2ffc28754fef88a80fa8d48897cbef662","Cargo.toml":"e88e6acce3b0cbf6734ab7c41cbc01a6b368843eb29d57f06e3b917ec9af03e0","Cargo.toml.orig":"e38380a7d61979d78ece13c1e45386b8f81b50375aad7887671c0ab026056687","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7576269ea71f767b99297934c0b2367532690f8c4badc695edf8e04ab6a1e545","README-crates.io.md":"b775991a01ab4a0a8de6169f597775319d9ce8178f5c74ccdc634f13a286b20c","README.rst":"fb08fabe5268b1f350bf8772c240c93190f7b88ae856dd09e77248e65881eebf","src/into_either.rs":"0477f226bbba78ef017de08b87d421d3cd99fbc95b90ba4e6e3e803e3d15254e","src/iterator.rs":"eef042c8fa7d2d2cb002ed81dedf8c124ec36252ae8bd0368039c788f686edd8","src/lib.rs":"430b3125aa77ab51bca768e6be8d4a2cbdb9932338f9e5d90803a0a2fe99b371","src/serde_untagged.rs":"e826ee0ab31616e49c3e3f3711c8441001ee424b3e7a8c4c466cfcc4f8a7701a","src/serde_untagged_optional.rs":"86265f09d0795428bb2ce013b070d1badf1e2210217844a9ff3f04b2795868ab"},"package":"48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"} \ No newline at end of file diff --git a/vendor/either/.cargo_vcs_info.json b/vendor/either/.cargo_vcs_info.json new file mode 100644 index 00000000000000..1cca00e9368565 --- /dev/null +++ b/vendor/either/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "59ae1fce0cec62c886fcd486e06b7e219bc7ce48" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/either/.github/workflows/ci.yml b/vendor/either/.github/workflows/ci.yml new file mode 100644 index 00000000000000..2f3843b7a2ec6e --- /dev/null +++ b/vendor/either/.github/workflows/ci.yml @@ -0,0 +1,83 @@ +on: + push: + branches: [ main ] + pull_request: + merge_group: + +name: CI + +jobs: + ci: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + rust: + - 1.63.0 # MSRV + - stable + - beta + - nightly + features: + - "" + - "serde" + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Cache the registry + uses: actions/cache@v4 + if: startsWith(matrix.rust, '1') + with: + path: ~/.cargo/registry/index + key: cargo-${{ matrix.rust }}-git-index + + - name: Set up Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.rust }} + + - name: Build (no_std) + run: cargo build --no-default-features --features "${{ matrix.features }}" + + - name: Build + run: cargo build --features "${{ matrix.features }}" + + - name: Test + run: cargo test --features "${{ matrix.features }}" + + - name: Doc + run: cargo doc --features "${{ matrix.features }}" + + clippy: + name: Rustfmt and Clippy + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up nightly Rust + uses: dtolnay/rust-toolchain@nightly + with: + components: rustfmt, clippy + + - name: Rustfmt + run: cargo fmt --all -- --check + + - name: Clippy + run: cargo clippy # -- -D warnings + + # One job that "summarizes" the success state of this pipeline. This can then be added to branch + # protection, rather than having to add each job separately. + success: + name: Success + runs-on: ubuntu-latest + needs: [ci, clippy] + # Github branch protection is exceedingly silly and treats "jobs skipped because a dependency + # failed" as success. So we have to do some contortions to ensure the job fails if any of its + # dependencies fails. + if: always() # make sure this is never "skipped" + steps: + # Manually check the status of all dependencies. `if: failure()` does not work. + - name: check if any dependency failed + run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}' diff --git a/vendor/either/Cargo.lock b/vendor/either/Cargo.lock new file mode 100644 index 00000000000000..3c336a9a934e58 --- /dev/null +++ b/vendor/either/Cargo.lock @@ -0,0 +1,96 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "either" +version = "1.15.0" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "proc-macro2" +version = "1.0.94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "serde" +version = "1.0.218" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.218" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "syn" +version = "2.0.99" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e02e925281e18ffd9d640e234264753c43edc62d64b2d4cf898f1bc5e75f3fc2" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" diff --git a/vendor/either/Cargo.toml b/vendor/either/Cargo.toml new file mode 100644 index 00000000000000..68b38fd8097e4f --- /dev/null +++ b/vendor/either/Cargo.toml @@ -0,0 +1,70 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.63.0" +name = "either" +version = "1.15.0" +authors = ["bluss"] +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +The enum `Either` with variants `Left` and `Right` is a general purpose sum type with two cases. +""" +documentation = "https://docs.rs/either/1/" +readme = "README-crates.io.md" +keywords = [ + "data-structure", + "no_std", +] +categories = [ + "data-structures", + "no-std", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rayon-rs/either" + +[package.metadata.docs.rs] +features = ["serde"] + +[package.metadata.playground] +features = ["serde"] + +[package.metadata.release] +allow-branch = ["main"] +sign-tag = true +tag-name = "{{version}}" + +[features] +default = ["std"] +std = [] +use_std = ["std"] + +[lib] +name = "either" +path = "src/lib.rs" + +[dependencies.serde] +version = "1.0.95" +features = [ + "alloc", + "derive", +] +optional = true +default-features = false + +[dev-dependencies.serde_json] +version = "1.0.0" diff --git a/vendor/either/LICENSE-APACHE b/vendor/either/LICENSE-APACHE new file mode 100644 index 00000000000000..16fe87b06e802f --- /dev/null +++ b/vendor/either/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/either/LICENSE-MIT b/vendor/either/LICENSE-MIT new file mode 100644 index 00000000000000..9203baa055d41d --- /dev/null +++ b/vendor/either/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2015 + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/either/README-crates.io.md b/vendor/either/README-crates.io.md new file mode 100644 index 00000000000000..d36890278b328e --- /dev/null +++ b/vendor/either/README-crates.io.md @@ -0,0 +1,10 @@ +The enum `Either` with variants `Left` and `Right` is a general purpose +sum type with two cases. + +Either has methods that are similar to Option and Result, and it also implements +traits like `Iterator`. + +Includes macros `try_left!()` and `try_right!()` to use for +short-circuiting logic, similar to how the `?` operator is used with `Result`. +Note that `Either` is general purpose. For describing success or error, use the +regular `Result`. diff --git a/vendor/either/README.rst b/vendor/either/README.rst new file mode 100644 index 00000000000000..7665f1ff0dd71a --- /dev/null +++ b/vendor/either/README.rst @@ -0,0 +1,204 @@ + +Either +====== + +The enum ``Either`` with variants ``Left`` and ``Right`` and trait +implementations including Iterator, Read, Write. + +Either has methods that are similar to Option and Result. + +Includes convenience macros ``try_left!()`` and ``try_right!()`` to use for +short-circuiting logic. + +Please read the `API documentation here`__ + +__ https://docs.rs/either/ + +|build_status|_ |crates|_ + +.. |build_status| image:: https://github.com/rayon-rs/either/workflows/CI/badge.svg?branch=main +.. _build_status: https://github.com/rayon-rs/either/actions + +.. |crates| image:: https://img.shields.io/crates/v/either.svg +.. _crates: https://crates.io/crates/either + +How to use with cargo:: + + [dependencies] + either = "1" + + +Recent Changes +-------------- + +- 1.15.0 + + - Fix ``serde`` support when building without ``std``, by @klkvr (#119) + + - Use a more common ``std`` feature for default enablement, deprecating + the ``use_std`` feature as a mere alias of the new name. + +- 1.14.0 + + - **MSRV**: ``either`` now requires Rust 1.63 or later. + + - Implement ``fmt::Write`` for ``Either``, by @yotamofek (#113) + + - Replace ``Into for Either`` with ``From for Result``, by @cuviper (#118) + +- 1.13.0 + + - Add new methods ``.cloned()`` and ``.copied()``, by @ColonelThirtyTwo (#107) + +- 1.12.0 + + - **MSRV**: ``either`` now requires Rust 1.37 or later. + + - Specialize ``nth_back`` for ``Either`` and ``IterEither``, by @cuviper (#106) + +- 1.11.0 + + - Add new trait ``IntoEither`` that is useful to convert to ``Either`` in method chains, + by @SFM61319 (#101) + +- 1.10.0 + + - Add new methods ``.factor_iter()``, ``.factor_iter_mut()``, and ``.factor_into_iter()`` + that return ``Either`` items, plus ``.iter()`` and ``.iter_mut()`` to convert to direct + reference iterators; by @aj-bagwell and @cuviper (#91) + +- 1.9.0 + + - Add new methods ``.map_either()`` and ``.map_either_with()``, by @nasadorian (#82) + +- 1.8.1 + + - Clarified that the multiple licenses are combined with OR. + +- 1.8.0 + + - **MSRV**: ``either`` now requires Rust 1.36 or later. + + - Add new methods ``.as_pin_ref()`` and ``.as_pin_mut()`` to project a + pinned ``Either`` as inner ``Pin`` variants, by @cuviper (#77) + + - Implement the ``Future`` trait, by @cuviper (#77) + + - Specialize more methods of the ``io`` traits, by @Kixunil and @cuviper (#75) + +- 1.7.0 + + - **MSRV**: ``either`` now requires Rust 1.31 or later. + + - Export the macro ``for_both!``, by @thomaseizinger (#58) + + - Implement the ``io::Seek`` trait, by @Kerollmops (#60) + + - Add new method ``.either_into()`` for ``Into`` conversion, by @TonalidadeHidrica (#63) + + - Add new methods ``.factor_ok()``, ``.factor_err()``, and ``.factor_none()``, + by @zachs18 (#67) + + - Specialize ``source`` in the ``Error`` implementation, by @thomaseizinger (#69) + + - Specialize more iterator methods and implement the ``FusedIterator`` trait, + by @Ten0 (#66) and @cuviper (#71) + + - Specialize ``Clone::clone_from``, by @cuviper (#72) + +- 1.6.1 + + - Add new methods ``.expect_left()``, ``.unwrap_left()``, + and equivalents on the right, by @spenserblack (#51) + +- 1.6.0 + + - Add new modules ``serde_untagged`` and ``serde_untagged_optional`` to customize + how ``Either`` fields are serialized in other types, by @MikailBag (#49) + +- 1.5.3 + + - Add new method ``.map()`` for ``Either`` by @nvzqz (#40). + +- 1.5.2 + + - Add new methods ``.left_or()``, ``.left_or_default()``, ``.left_or_else()``, + and equivalents on the right, by @DCjanus (#36) + +- 1.5.1 + + - Add ``AsRef`` and ``AsMut`` implementations for common unsized types: + ``str``, ``[T]``, ``CStr``, ``OsStr``, and ``Path``, by @mexus (#29) + +- 1.5.0 + + - Add new methods ``.factor_first()``, ``.factor_second()`` and ``.into_inner()`` + by @mathstuf (#19) + +- 1.4.0 + + - Add inherent method ``.into_iter()`` by @cuviper (#12) + +- 1.3.0 + + - Add opt-in serde support by @hcpl + +- 1.2.0 + + - Add method ``.either_with()`` by @Twey (#13) + +- 1.1.0 + + - Add methods ``left_and_then``, ``right_and_then`` by @rampantmonkey + - Include license files in the repository and released crate + +- 1.0.3 + + - Add crate categories + +- 1.0.2 + + - Forward more ``Iterator`` methods + - Implement ``Extend`` for ``Either`` if ``L, R`` do. + +- 1.0.1 + + - Fix ``Iterator`` impl for ``Either`` to forward ``.fold()``. + +- 1.0.0 + + - Add default crate feature ``use_std`` so that you can opt out of linking to + std. + +- 0.1.7 + + - Add methods ``.map_left()``, ``.map_right()`` and ``.either()``. + - Add more documentation + +- 0.1.3 + + - Implement Display, Error + +- 0.1.2 + + - Add macros ``try_left!`` and ``try_right!``. + +- 0.1.1 + + - Implement Deref, DerefMut + +- 0.1.0 + + - Initial release + - Support Iterator, Read, Write + +License +------- + +Dual-licensed to be compatible with the Rust project. + +Licensed under the Apache License, Version 2.0 +https://www.apache.org/licenses/LICENSE-2.0 or the MIT license +https://opensource.org/licenses/MIT, at your +option. This file may not be copied, modified, or distributed +except according to those terms. diff --git a/vendor/either/src/into_either.rs b/vendor/either/src/into_either.rs new file mode 100644 index 00000000000000..73746c80f11f15 --- /dev/null +++ b/vendor/either/src/into_either.rs @@ -0,0 +1,64 @@ +//! The trait [`IntoEither`] provides methods for converting a type `Self`, whose +//! size is constant and known at compile-time, into an [`Either`] variant. + +use super::{Either, Left, Right}; + +/// Provides methods for converting a type `Self` into either a [`Left`] or [`Right`] +/// variant of [`Either`](Either). +/// +/// The [`into_either`](IntoEither::into_either) method takes a [`bool`] to determine +/// whether to convert to [`Left`] or [`Right`]. +/// +/// The [`into_either_with`](IntoEither::into_either_with) method takes a +/// [predicate function](FnOnce) to determine whether to convert to [`Left`] or [`Right`]. +pub trait IntoEither: Sized { + /// Converts `self` into a [`Left`] variant of [`Either`](Either) + /// if `into_left` is `true`. + /// Converts `self` into a [`Right`] variant of [`Either`](Either) + /// otherwise. + /// + /// # Examples + /// + /// ``` + /// use either::{IntoEither, Left, Right}; + /// + /// let x = 0; + /// assert_eq!(x.into_either(true), Left(x)); + /// assert_eq!(x.into_either(false), Right(x)); + /// ``` + fn into_either(self, into_left: bool) -> Either { + if into_left { + Left(self) + } else { + Right(self) + } + } + + /// Converts `self` into a [`Left`] variant of [`Either`](Either) + /// if `into_left(&self)` returns `true`. + /// Converts `self` into a [`Right`] variant of [`Either`](Either) + /// otherwise. + /// + /// # Examples + /// + /// ``` + /// use either::{IntoEither, Left, Right}; + /// + /// fn is_even(x: &u8) -> bool { + /// x % 2 == 0 + /// } + /// + /// let x = 0; + /// assert_eq!(x.into_either_with(is_even), Left(x)); + /// assert_eq!(x.into_either_with(|x| !is_even(x)), Right(x)); + /// ``` + fn into_either_with(self, into_left: F) -> Either + where + F: FnOnce(&Self) -> bool, + { + let into_left = into_left(&self); + self.into_either(into_left) + } +} + +impl IntoEither for T {} diff --git a/vendor/either/src/iterator.rs b/vendor/either/src/iterator.rs new file mode 100644 index 00000000000000..d54fab793d969a --- /dev/null +++ b/vendor/either/src/iterator.rs @@ -0,0 +1,315 @@ +use super::{for_both, Either, Left, Right}; +use core::iter; + +macro_rules! wrap_either { + ($value:expr => $( $tail:tt )*) => { + match $value { + Left(inner) => inner.map(Left) $($tail)*, + Right(inner) => inner.map(Right) $($tail)*, + } + }; +} + +/// Iterator that maps left or right iterators to corresponding `Either`-wrapped items. +/// +/// This struct is created by the [`Either::factor_into_iter`], +/// [`factor_iter`][Either::factor_iter], +/// and [`factor_iter_mut`][Either::factor_iter_mut] methods. +#[derive(Clone, Debug)] +pub struct IterEither { + inner: Either, +} + +impl IterEither { + pub(crate) fn new(inner: Either) -> Self { + IterEither { inner } + } +} + +impl Extend for Either +where + L: Extend, + R: Extend, +{ + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { + for_both!(self, inner => inner.extend(iter)) + } +} + +/// `Either` is an iterator if both `L` and `R` are iterators. +impl Iterator for Either +where + L: Iterator, + R: Iterator, +{ + type Item = L::Item; + + fn next(&mut self) -> Option { + for_both!(self, inner => inner.next()) + } + + fn size_hint(&self) -> (usize, Option) { + for_both!(self, inner => inner.size_hint()) + } + + fn fold(self, init: Acc, f: G) -> Acc + where + G: FnMut(Acc, Self::Item) -> Acc, + { + for_both!(self, inner => inner.fold(init, f)) + } + + fn for_each(self, f: F) + where + F: FnMut(Self::Item), + { + for_both!(self, inner => inner.for_each(f)) + } + + fn count(self) -> usize { + for_both!(self, inner => inner.count()) + } + + fn last(self) -> Option { + for_both!(self, inner => inner.last()) + } + + fn nth(&mut self, n: usize) -> Option { + for_both!(self, inner => inner.nth(n)) + } + + fn collect(self) -> B + where + B: iter::FromIterator, + { + for_both!(self, inner => inner.collect()) + } + + fn partition(self, f: F) -> (B, B) + where + B: Default + Extend, + F: FnMut(&Self::Item) -> bool, + { + for_both!(self, inner => inner.partition(f)) + } + + fn all(&mut self, f: F) -> bool + where + F: FnMut(Self::Item) -> bool, + { + for_both!(self, inner => inner.all(f)) + } + + fn any(&mut self, f: F) -> bool + where + F: FnMut(Self::Item) -> bool, + { + for_both!(self, inner => inner.any(f)) + } + + fn find

(&mut self, predicate: P) -> Option + where + P: FnMut(&Self::Item) -> bool, + { + for_both!(self, inner => inner.find(predicate)) + } + + fn find_map(&mut self, f: F) -> Option + where + F: FnMut(Self::Item) -> Option, + { + for_both!(self, inner => inner.find_map(f)) + } + + fn position

(&mut self, predicate: P) -> Option + where + P: FnMut(Self::Item) -> bool, + { + for_both!(self, inner => inner.position(predicate)) + } +} + +impl DoubleEndedIterator for Either +where + L: DoubleEndedIterator, + R: DoubleEndedIterator, +{ + fn next_back(&mut self) -> Option { + for_both!(self, inner => inner.next_back()) + } + + fn nth_back(&mut self, n: usize) -> Option { + for_both!(self, inner => inner.nth_back(n)) + } + + fn rfold(self, init: Acc, f: G) -> Acc + where + G: FnMut(Acc, Self::Item) -> Acc, + { + for_both!(self, inner => inner.rfold(init, f)) + } + + fn rfind

(&mut self, predicate: P) -> Option + where + P: FnMut(&Self::Item) -> bool, + { + for_both!(self, inner => inner.rfind(predicate)) + } +} + +impl ExactSizeIterator for Either +where + L: ExactSizeIterator, + R: ExactSizeIterator, +{ + fn len(&self) -> usize { + for_both!(self, inner => inner.len()) + } +} + +impl iter::FusedIterator for Either +where + L: iter::FusedIterator, + R: iter::FusedIterator, +{ +} + +impl Iterator for IterEither +where + L: Iterator, + R: Iterator, +{ + type Item = Either; + + fn next(&mut self) -> Option { + Some(map_either!(self.inner, ref mut inner => inner.next()?)) + } + + fn size_hint(&self) -> (usize, Option) { + for_both!(self.inner, ref inner => inner.size_hint()) + } + + fn fold(self, init: Acc, f: G) -> Acc + where + G: FnMut(Acc, Self::Item) -> Acc, + { + wrap_either!(self.inner => .fold(init, f)) + } + + fn for_each(self, f: F) + where + F: FnMut(Self::Item), + { + wrap_either!(self.inner => .for_each(f)) + } + + fn count(self) -> usize { + for_both!(self.inner, inner => inner.count()) + } + + fn last(self) -> Option { + Some(map_either!(self.inner, inner => inner.last()?)) + } + + fn nth(&mut self, n: usize) -> Option { + Some(map_either!(self.inner, ref mut inner => inner.nth(n)?)) + } + + fn collect(self) -> B + where + B: iter::FromIterator, + { + wrap_either!(self.inner => .collect()) + } + + fn partition(self, f: F) -> (B, B) + where + B: Default + Extend, + F: FnMut(&Self::Item) -> bool, + { + wrap_either!(self.inner => .partition(f)) + } + + fn all(&mut self, f: F) -> bool + where + F: FnMut(Self::Item) -> bool, + { + wrap_either!(&mut self.inner => .all(f)) + } + + fn any(&mut self, f: F) -> bool + where + F: FnMut(Self::Item) -> bool, + { + wrap_either!(&mut self.inner => .any(f)) + } + + fn find

(&mut self, predicate: P) -> Option + where + P: FnMut(&Self::Item) -> bool, + { + wrap_either!(&mut self.inner => .find(predicate)) + } + + fn find_map(&mut self, f: F) -> Option + where + F: FnMut(Self::Item) -> Option, + { + wrap_either!(&mut self.inner => .find_map(f)) + } + + fn position

(&mut self, predicate: P) -> Option + where + P: FnMut(Self::Item) -> bool, + { + wrap_either!(&mut self.inner => .position(predicate)) + } +} + +impl DoubleEndedIterator for IterEither +where + L: DoubleEndedIterator, + R: DoubleEndedIterator, +{ + fn next_back(&mut self) -> Option { + Some(map_either!(self.inner, ref mut inner => inner.next_back()?)) + } + + fn nth_back(&mut self, n: usize) -> Option { + Some(map_either!(self.inner, ref mut inner => inner.nth_back(n)?)) + } + + fn rfold(self, init: Acc, f: G) -> Acc + where + G: FnMut(Acc, Self::Item) -> Acc, + { + wrap_either!(self.inner => .rfold(init, f)) + } + + fn rfind

(&mut self, predicate: P) -> Option + where + P: FnMut(&Self::Item) -> bool, + { + wrap_either!(&mut self.inner => .rfind(predicate)) + } +} + +impl ExactSizeIterator for IterEither +where + L: ExactSizeIterator, + R: ExactSizeIterator, +{ + fn len(&self) -> usize { + for_both!(self.inner, ref inner => inner.len()) + } +} + +impl iter::FusedIterator for IterEither +where + L: iter::FusedIterator, + R: iter::FusedIterator, +{ +} diff --git a/vendor/either/src/lib.rs b/vendor/either/src/lib.rs new file mode 100644 index 00000000000000..e2265eb7104057 --- /dev/null +++ b/vendor/either/src/lib.rs @@ -0,0 +1,1561 @@ +//! The enum [`Either`] with variants `Left` and `Right` is a general purpose +//! sum type with two cases. +//! +//! [`Either`]: enum.Either.html +//! +//! **Crate features:** +//! +//! * `"std"` +//! Enabled by default. Disable to make the library `#![no_std]`. +//! +//! * `"serde"` +//! Disabled by default. Enable to `#[derive(Serialize, Deserialize)]` for `Either` +//! + +#![doc(html_root_url = "https://docs.rs/either/1/")] +#![no_std] + +#[cfg(any(test, feature = "std"))] +extern crate std; + +#[cfg(feature = "serde")] +pub mod serde_untagged; + +#[cfg(feature = "serde")] +pub mod serde_untagged_optional; + +use core::convert::{AsMut, AsRef}; +use core::fmt; +use core::future::Future; +use core::ops::Deref; +use core::ops::DerefMut; +use core::pin::Pin; + +#[cfg(any(test, feature = "std"))] +use std::error::Error; +#[cfg(any(test, feature = "std"))] +use std::io::{self, BufRead, Read, Seek, SeekFrom, Write}; + +pub use crate::Either::{Left, Right}; + +/// The enum `Either` with variants `Left` and `Right` is a general purpose +/// sum type with two cases. +/// +/// The `Either` type is symmetric and treats its variants the same way, without +/// preference. +/// (For representing success or error, use the regular `Result` enum instead.) +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum Either { + /// A value of type `L`. + Left(L), + /// A value of type `R`. + Right(R), +} + +/// Evaluate the provided expression for both [`Either::Left`] and [`Either::Right`]. +/// +/// This macro is useful in cases where both sides of [`Either`] can be interacted with +/// in the same way even though the don't share the same type. +/// +/// Syntax: `either::for_both!(` *expression* `,` *pattern* `=>` *expression* `)` +/// +/// # Example +/// +/// ``` +/// use either::Either; +/// +/// fn length(owned_or_borrowed: Either) -> usize { +/// either::for_both!(owned_or_borrowed, s => s.len()) +/// } +/// +/// fn main() { +/// let borrowed = Either::Right("Hello world!"); +/// let owned = Either::Left("Hello world!".to_owned()); +/// +/// assert_eq!(length(borrowed), 12); +/// assert_eq!(length(owned), 12); +/// } +/// ``` +#[macro_export] +macro_rules! for_both { + ($value:expr, $pattern:pat => $result:expr) => { + match $value { + $crate::Either::Left($pattern) => $result, + $crate::Either::Right($pattern) => $result, + } + }; +} + +/// Macro for unwrapping the left side of an [`Either`], which fails early +/// with the opposite side. Can only be used in functions that return +/// `Either` because of the early return of `Right` that it provides. +/// +/// See also [`try_right!`] for its dual, which applies the same just to the +/// right side. +/// +/// # Example +/// +/// ``` +/// use either::{Either, Left, Right}; +/// +/// fn twice(wrapper: Either) -> Either { +/// let value = either::try_left!(wrapper); +/// Left(value * 2) +/// } +/// +/// fn main() { +/// assert_eq!(twice(Left(2)), Left(4)); +/// assert_eq!(twice(Right("ups")), Right("ups")); +/// } +/// ``` +#[macro_export] +macro_rules! try_left { + ($expr:expr) => { + match $expr { + $crate::Left(val) => val, + $crate::Right(err) => return $crate::Right(::core::convert::From::from(err)), + } + }; +} + +/// Dual to [`try_left!`], see its documentation for more information. +#[macro_export] +macro_rules! try_right { + ($expr:expr) => { + match $expr { + $crate::Left(err) => return $crate::Left(::core::convert::From::from(err)), + $crate::Right(val) => val, + } + }; +} + +macro_rules! map_either { + ($value:expr, $pattern:pat => $result:expr) => { + match $value { + Left($pattern) => Left($result), + Right($pattern) => Right($result), + } + }; +} + +mod iterator; +pub use self::iterator::IterEither; + +mod into_either; +pub use self::into_either::IntoEither; + +impl Clone for Either { + fn clone(&self) -> Self { + match self { + Left(inner) => Left(inner.clone()), + Right(inner) => Right(inner.clone()), + } + } + + fn clone_from(&mut self, source: &Self) { + match (self, source) { + (Left(dest), Left(source)) => dest.clone_from(source), + (Right(dest), Right(source)) => dest.clone_from(source), + (dest, source) => *dest = source.clone(), + } + } +} + +impl Either { + /// Return true if the value is the `Left` variant. + /// + /// ``` + /// use either::*; + /// + /// let values = [Left(1), Right("the right value")]; + /// assert_eq!(values[0].is_left(), true); + /// assert_eq!(values[1].is_left(), false); + /// ``` + pub fn is_left(&self) -> bool { + match self { + Left(_) => true, + Right(_) => false, + } + } + + /// Return true if the value is the `Right` variant. + /// + /// ``` + /// use either::*; + /// + /// let values = [Left(1), Right("the right value")]; + /// assert_eq!(values[0].is_right(), false); + /// assert_eq!(values[1].is_right(), true); + /// ``` + pub fn is_right(&self) -> bool { + !self.is_left() + } + + /// Convert the left side of `Either` to an `Option`. + /// + /// ``` + /// use either::*; + /// + /// let left: Either<_, ()> = Left("some value"); + /// assert_eq!(left.left(), Some("some value")); + /// + /// let right: Either<(), _> = Right(321); + /// assert_eq!(right.left(), None); + /// ``` + pub fn left(self) -> Option { + match self { + Left(l) => Some(l), + Right(_) => None, + } + } + + /// Convert the right side of `Either` to an `Option`. + /// + /// ``` + /// use either::*; + /// + /// let left: Either<_, ()> = Left("some value"); + /// assert_eq!(left.right(), None); + /// + /// let right: Either<(), _> = Right(321); + /// assert_eq!(right.right(), Some(321)); + /// ``` + pub fn right(self) -> Option { + match self { + Left(_) => None, + Right(r) => Some(r), + } + } + + /// Convert `&Either` to `Either<&L, &R>`. + /// + /// ``` + /// use either::*; + /// + /// let left: Either<_, ()> = Left("some value"); + /// assert_eq!(left.as_ref(), Left(&"some value")); + /// + /// let right: Either<(), _> = Right("some value"); + /// assert_eq!(right.as_ref(), Right(&"some value")); + /// ``` + pub fn as_ref(&self) -> Either<&L, &R> { + map_either!(self, inner => inner) + } + + /// Convert `&mut Either` to `Either<&mut L, &mut R>`. + /// + /// ``` + /// use either::*; + /// + /// fn mutate_left(value: &mut Either) { + /// if let Some(l) = value.as_mut().left() { + /// *l = 999; + /// } + /// } + /// + /// let mut left = Left(123); + /// let mut right = Right(123); + /// mutate_left(&mut left); + /// mutate_left(&mut right); + /// assert_eq!(left, Left(999)); + /// assert_eq!(right, Right(123)); + /// ``` + pub fn as_mut(&mut self) -> Either<&mut L, &mut R> { + map_either!(self, inner => inner) + } + + /// Convert `Pin<&Either>` to `Either, Pin<&R>>`, + /// pinned projections of the inner variants. + pub fn as_pin_ref(self: Pin<&Self>) -> Either, Pin<&R>> { + // SAFETY: We can use `new_unchecked` because the `inner` parts are + // guaranteed to be pinned, as they come from `self` which is pinned. + unsafe { map_either!(Pin::get_ref(self), inner => Pin::new_unchecked(inner)) } + } + + /// Convert `Pin<&mut Either>` to `Either, Pin<&mut R>>`, + /// pinned projections of the inner variants. + pub fn as_pin_mut(self: Pin<&mut Self>) -> Either, Pin<&mut R>> { + // SAFETY: `get_unchecked_mut` is fine because we don't move anything. + // We can use `new_unchecked` because the `inner` parts are guaranteed + // to be pinned, as they come from `self` which is pinned, and we never + // offer an unpinned `&mut L` or `&mut R` through `Pin<&mut Self>`. We + // also don't have an implementation of `Drop`, nor manual `Unpin`. + unsafe { map_either!(Pin::get_unchecked_mut(self), inner => Pin::new_unchecked(inner)) } + } + + /// Convert `Either` to `Either`. + /// + /// ``` + /// use either::*; + /// + /// let left: Either<_, ()> = Left(123); + /// assert_eq!(left.flip(), Right(123)); + /// + /// let right: Either<(), _> = Right("some value"); + /// assert_eq!(right.flip(), Left("some value")); + /// ``` + pub fn flip(self) -> Either { + match self { + Left(l) => Right(l), + Right(r) => Left(r), + } + } + + /// Apply the function `f` on the value in the `Left` variant if it is present rewrapping the + /// result in `Left`. + /// + /// ``` + /// use either::*; + /// + /// let left: Either<_, u32> = Left(123); + /// assert_eq!(left.map_left(|x| x * 2), Left(246)); + /// + /// let right: Either = Right(123); + /// assert_eq!(right.map_left(|x| x * 2), Right(123)); + /// ``` + pub fn map_left(self, f: F) -> Either + where + F: FnOnce(L) -> M, + { + match self { + Left(l) => Left(f(l)), + Right(r) => Right(r), + } + } + + /// Apply the function `f` on the value in the `Right` variant if it is present rewrapping the + /// result in `Right`. + /// + /// ``` + /// use either::*; + /// + /// let left: Either<_, u32> = Left(123); + /// assert_eq!(left.map_right(|x| x * 2), Left(123)); + /// + /// let right: Either = Right(123); + /// assert_eq!(right.map_right(|x| x * 2), Right(246)); + /// ``` + pub fn map_right(self, f: F) -> Either + where + F: FnOnce(R) -> S, + { + match self { + Left(l) => Left(l), + Right(r) => Right(f(r)), + } + } + + /// Apply the functions `f` and `g` to the `Left` and `Right` variants + /// respectively. This is equivalent to + /// [bimap](https://hackage.haskell.org/package/bifunctors-5/docs/Data-Bifunctor.html) + /// in functional programming. + /// + /// ``` + /// use either::*; + /// + /// let f = |s: String| s.len(); + /// let g = |u: u8| u.to_string(); + /// + /// let left: Either = Left("loopy".into()); + /// assert_eq!(left.map_either(f, g), Left(5)); + /// + /// let right: Either = Right(42); + /// assert_eq!(right.map_either(f, g), Right("42".into())); + /// ``` + pub fn map_either(self, f: F, g: G) -> Either + where + F: FnOnce(L) -> M, + G: FnOnce(R) -> S, + { + match self { + Left(l) => Left(f(l)), + Right(r) => Right(g(r)), + } + } + + /// Similar to [`map_either`][Self::map_either], with an added context `ctx` accessible to + /// both functions. + /// + /// ``` + /// use either::*; + /// + /// let mut sum = 0; + /// + /// // Both closures want to update the same value, so pass it as context. + /// let mut f = |sum: &mut usize, s: String| { *sum += s.len(); s.to_uppercase() }; + /// let mut g = |sum: &mut usize, u: usize| { *sum += u; u.to_string() }; + /// + /// let left: Either = Left("loopy".into()); + /// assert_eq!(left.map_either_with(&mut sum, &mut f, &mut g), Left("LOOPY".into())); + /// + /// let right: Either = Right(42); + /// assert_eq!(right.map_either_with(&mut sum, &mut f, &mut g), Right("42".into())); + /// + /// assert_eq!(sum, 47); + /// ``` + pub fn map_either_with(self, ctx: Ctx, f: F, g: G) -> Either + where + F: FnOnce(Ctx, L) -> M, + G: FnOnce(Ctx, R) -> S, + { + match self { + Left(l) => Left(f(ctx, l)), + Right(r) => Right(g(ctx, r)), + } + } + + /// Apply one of two functions depending on contents, unifying their result. If the value is + /// `Left(L)` then the first function `f` is applied; if it is `Right(R)` then the second + /// function `g` is applied. + /// + /// ``` + /// use either::*; + /// + /// fn square(n: u32) -> i32 { (n * n) as i32 } + /// fn negate(n: i32) -> i32 { -n } + /// + /// let left: Either = Left(4); + /// assert_eq!(left.either(square, negate), 16); + /// + /// let right: Either = Right(-4); + /// assert_eq!(right.either(square, negate), 4); + /// ``` + pub fn either(self, f: F, g: G) -> T + where + F: FnOnce(L) -> T, + G: FnOnce(R) -> T, + { + match self { + Left(l) => f(l), + Right(r) => g(r), + } + } + + /// Like [`either`][Self::either], but provide some context to whichever of the + /// functions ends up being called. + /// + /// ``` + /// // In this example, the context is a mutable reference + /// use either::*; + /// + /// let mut result = Vec::new(); + /// + /// let values = vec![Left(2), Right(2.7)]; + /// + /// for value in values { + /// value.either_with(&mut result, + /// |ctx, integer| ctx.push(integer), + /// |ctx, real| ctx.push(f64::round(real) as i32)); + /// } + /// + /// assert_eq!(result, vec![2, 3]); + /// ``` + pub fn either_with(self, ctx: Ctx, f: F, g: G) -> T + where + F: FnOnce(Ctx, L) -> T, + G: FnOnce(Ctx, R) -> T, + { + match self { + Left(l) => f(ctx, l), + Right(r) => g(ctx, r), + } + } + + /// Apply the function `f` on the value in the `Left` variant if it is present. + /// + /// ``` + /// use either::*; + /// + /// let left: Either<_, u32> = Left(123); + /// assert_eq!(left.left_and_then::<_,()>(|x| Right(x * 2)), Right(246)); + /// + /// let right: Either = Right(123); + /// assert_eq!(right.left_and_then(|x| Right::<(), _>(x * 2)), Right(123)); + /// ``` + pub fn left_and_then(self, f: F) -> Either + where + F: FnOnce(L) -> Either, + { + match self { + Left(l) => f(l), + Right(r) => Right(r), + } + } + + /// Apply the function `f` on the value in the `Right` variant if it is present. + /// + /// ``` + /// use either::*; + /// + /// let left: Either<_, u32> = Left(123); + /// assert_eq!(left.right_and_then(|x| Right(x * 2)), Left(123)); + /// + /// let right: Either = Right(123); + /// assert_eq!(right.right_and_then(|x| Right(x * 2)), Right(246)); + /// ``` + pub fn right_and_then(self, f: F) -> Either + where + F: FnOnce(R) -> Either, + { + match self { + Left(l) => Left(l), + Right(r) => f(r), + } + } + + /// Convert the inner value to an iterator. + /// + /// This requires the `Left` and `Right` iterators to have the same item type. + /// See [`factor_into_iter`][Either::factor_into_iter] to iterate different types. + /// + /// ``` + /// use either::*; + /// + /// let left: Either<_, Vec> = Left(vec![1, 2, 3, 4, 5]); + /// let mut right: Either, _> = Right(vec![]); + /// right.extend(left.into_iter()); + /// assert_eq!(right, Right(vec![1, 2, 3, 4, 5])); + /// ``` + #[allow(clippy::should_implement_trait)] + pub fn into_iter(self) -> Either + where + L: IntoIterator, + R: IntoIterator, + { + map_either!(self, inner => inner.into_iter()) + } + + /// Borrow the inner value as an iterator. + /// + /// This requires the `Left` and `Right` iterators to have the same item type. + /// See [`factor_iter`][Either::factor_iter] to iterate different types. + /// + /// ``` + /// use either::*; + /// + /// let left: Either<_, &[u32]> = Left(vec![2, 3]); + /// let mut right: Either, _> = Right(&[4, 5][..]); + /// let mut all = vec![1]; + /// all.extend(left.iter()); + /// all.extend(right.iter()); + /// assert_eq!(all, vec![1, 2, 3, 4, 5]); + /// ``` + pub fn iter(&self) -> Either<<&L as IntoIterator>::IntoIter, <&R as IntoIterator>::IntoIter> + where + for<'a> &'a L: IntoIterator, + for<'a> &'a R: IntoIterator::Item>, + { + map_either!(self, inner => inner.into_iter()) + } + + /// Mutably borrow the inner value as an iterator. + /// + /// This requires the `Left` and `Right` iterators to have the same item type. + /// See [`factor_iter_mut`][Either::factor_iter_mut] to iterate different types. + /// + /// ``` + /// use either::*; + /// + /// let mut left: Either<_, &mut [u32]> = Left(vec![2, 3]); + /// for l in left.iter_mut() { + /// *l *= *l + /// } + /// assert_eq!(left, Left(vec![4, 9])); + /// + /// let mut inner = [4, 5]; + /// let mut right: Either, _> = Right(&mut inner[..]); + /// for r in right.iter_mut() { + /// *r *= *r + /// } + /// assert_eq!(inner, [16, 25]); + /// ``` + pub fn iter_mut( + &mut self, + ) -> Either<<&mut L as IntoIterator>::IntoIter, <&mut R as IntoIterator>::IntoIter> + where + for<'a> &'a mut L: IntoIterator, + for<'a> &'a mut R: IntoIterator::Item>, + { + map_either!(self, inner => inner.into_iter()) + } + + /// Converts an `Either` of `Iterator`s to be an `Iterator` of `Either`s + /// + /// Unlike [`into_iter`][Either::into_iter], this does not require the + /// `Left` and `Right` iterators to have the same item type. + /// + /// ``` + /// use either::*; + /// let left: Either<_, Vec> = Left(&["hello"]); + /// assert_eq!(left.factor_into_iter().next(), Some(Left(&"hello"))); + /// + /// let right: Either<&[&str], _> = Right(vec![0, 1]); + /// assert_eq!(right.factor_into_iter().collect::>(), vec![Right(0), Right(1)]); + /// + /// ``` + // TODO(MSRV): doc(alias) was stabilized in Rust 1.48 + // #[doc(alias = "transpose")] + pub fn factor_into_iter(self) -> IterEither + where + L: IntoIterator, + R: IntoIterator, + { + IterEither::new(map_either!(self, inner => inner.into_iter())) + } + + /// Borrows an `Either` of `Iterator`s to be an `Iterator` of `Either`s + /// + /// Unlike [`iter`][Either::iter], this does not require the + /// `Left` and `Right` iterators to have the same item type. + /// + /// ``` + /// use either::*; + /// let left: Either<_, Vec> = Left(["hello"]); + /// assert_eq!(left.factor_iter().next(), Some(Left(&"hello"))); + /// + /// let right: Either<[&str; 2], _> = Right(vec![0, 1]); + /// assert_eq!(right.factor_iter().collect::>(), vec![Right(&0), Right(&1)]); + /// + /// ``` + pub fn factor_iter( + &self, + ) -> IterEither<<&L as IntoIterator>::IntoIter, <&R as IntoIterator>::IntoIter> + where + for<'a> &'a L: IntoIterator, + for<'a> &'a R: IntoIterator, + { + IterEither::new(map_either!(self, inner => inner.into_iter())) + } + + /// Mutably borrows an `Either` of `Iterator`s to be an `Iterator` of `Either`s + /// + /// Unlike [`iter_mut`][Either::iter_mut], this does not require the + /// `Left` and `Right` iterators to have the same item type. + /// + /// ``` + /// use either::*; + /// let mut left: Either<_, Vec> = Left(["hello"]); + /// left.factor_iter_mut().for_each(|x| *x.unwrap_left() = "goodbye"); + /// assert_eq!(left, Left(["goodbye"])); + /// + /// let mut right: Either<[&str; 2], _> = Right(vec![0, 1, 2]); + /// right.factor_iter_mut().for_each(|x| if let Right(r) = x { *r = -*r; }); + /// assert_eq!(right, Right(vec![0, -1, -2])); + /// + /// ``` + pub fn factor_iter_mut( + &mut self, + ) -> IterEither<<&mut L as IntoIterator>::IntoIter, <&mut R as IntoIterator>::IntoIter> + where + for<'a> &'a mut L: IntoIterator, + for<'a> &'a mut R: IntoIterator, + { + IterEither::new(map_either!(self, inner => inner.into_iter())) + } + + /// Return left value or given value + /// + /// Arguments passed to `left_or` are eagerly evaluated; if you are passing + /// the result of a function call, it is recommended to use + /// [`left_or_else`][Self::left_or_else], which is lazily evaluated. + /// + /// # Examples + /// + /// ``` + /// # use either::*; + /// let left: Either<&str, &str> = Left("left"); + /// assert_eq!(left.left_or("foo"), "left"); + /// + /// let right: Either<&str, &str> = Right("right"); + /// assert_eq!(right.left_or("left"), "left"); + /// ``` + pub fn left_or(self, other: L) -> L { + match self { + Either::Left(l) => l, + Either::Right(_) => other, + } + } + + /// Return left or a default + /// + /// # Examples + /// + /// ``` + /// # use either::*; + /// let left: Either = Left("left".to_string()); + /// assert_eq!(left.left_or_default(), "left"); + /// + /// let right: Either = Right(42); + /// assert_eq!(right.left_or_default(), String::default()); + /// ``` + pub fn left_or_default(self) -> L + where + L: Default, + { + match self { + Either::Left(l) => l, + Either::Right(_) => L::default(), + } + } + + /// Returns left value or computes it from a closure + /// + /// # Examples + /// + /// ``` + /// # use either::*; + /// let left: Either = Left("3".to_string()); + /// assert_eq!(left.left_or_else(|_| unreachable!()), "3"); + /// + /// let right: Either = Right(3); + /// assert_eq!(right.left_or_else(|x| x.to_string()), "3"); + /// ``` + pub fn left_or_else(self, f: F) -> L + where + F: FnOnce(R) -> L, + { + match self { + Either::Left(l) => l, + Either::Right(r) => f(r), + } + } + + /// Return right value or given value + /// + /// Arguments passed to `right_or` are eagerly evaluated; if you are passing + /// the result of a function call, it is recommended to use + /// [`right_or_else`][Self::right_or_else], which is lazily evaluated. + /// + /// # Examples + /// + /// ``` + /// # use either::*; + /// let right: Either<&str, &str> = Right("right"); + /// assert_eq!(right.right_or("foo"), "right"); + /// + /// let left: Either<&str, &str> = Left("left"); + /// assert_eq!(left.right_or("right"), "right"); + /// ``` + pub fn right_or(self, other: R) -> R { + match self { + Either::Left(_) => other, + Either::Right(r) => r, + } + } + + /// Return right or a default + /// + /// # Examples + /// + /// ``` + /// # use either::*; + /// let left: Either = Left("left".to_string()); + /// assert_eq!(left.right_or_default(), u32::default()); + /// + /// let right: Either = Right(42); + /// assert_eq!(right.right_or_default(), 42); + /// ``` + pub fn right_or_default(self) -> R + where + R: Default, + { + match self { + Either::Left(_) => R::default(), + Either::Right(r) => r, + } + } + + /// Returns right value or computes it from a closure + /// + /// # Examples + /// + /// ``` + /// # use either::*; + /// let left: Either = Left("3".to_string()); + /// assert_eq!(left.right_or_else(|x| x.parse().unwrap()), 3); + /// + /// let right: Either = Right(3); + /// assert_eq!(right.right_or_else(|_| unreachable!()), 3); + /// ``` + pub fn right_or_else(self, f: F) -> R + where + F: FnOnce(L) -> R, + { + match self { + Either::Left(l) => f(l), + Either::Right(r) => r, + } + } + + /// Returns the left value + /// + /// # Examples + /// + /// ``` + /// # use either::*; + /// let left: Either<_, ()> = Left(3); + /// assert_eq!(left.unwrap_left(), 3); + /// ``` + /// + /// # Panics + /// + /// When `Either` is a `Right` value + /// + /// ```should_panic + /// # use either::*; + /// let right: Either<(), _> = Right(3); + /// right.unwrap_left(); + /// ``` + pub fn unwrap_left(self) -> L + where + R: core::fmt::Debug, + { + match self { + Either::Left(l) => l, + Either::Right(r) => { + panic!("called `Either::unwrap_left()` on a `Right` value: {:?}", r) + } + } + } + + /// Returns the right value + /// + /// # Examples + /// + /// ``` + /// # use either::*; + /// let right: Either<(), _> = Right(3); + /// assert_eq!(right.unwrap_right(), 3); + /// ``` + /// + /// # Panics + /// + /// When `Either` is a `Left` value + /// + /// ```should_panic + /// # use either::*; + /// let left: Either<_, ()> = Left(3); + /// left.unwrap_right(); + /// ``` + pub fn unwrap_right(self) -> R + where + L: core::fmt::Debug, + { + match self { + Either::Right(r) => r, + Either::Left(l) => panic!("called `Either::unwrap_right()` on a `Left` value: {:?}", l), + } + } + + /// Returns the left value + /// + /// # Examples + /// + /// ``` + /// # use either::*; + /// let left: Either<_, ()> = Left(3); + /// assert_eq!(left.expect_left("value was Right"), 3); + /// ``` + /// + /// # Panics + /// + /// When `Either` is a `Right` value + /// + /// ```should_panic + /// # use either::*; + /// let right: Either<(), _> = Right(3); + /// right.expect_left("value was Right"); + /// ``` + pub fn expect_left(self, msg: &str) -> L + where + R: core::fmt::Debug, + { + match self { + Either::Left(l) => l, + Either::Right(r) => panic!("{}: {:?}", msg, r), + } + } + + /// Returns the right value + /// + /// # Examples + /// + /// ``` + /// # use either::*; + /// let right: Either<(), _> = Right(3); + /// assert_eq!(right.expect_right("value was Left"), 3); + /// ``` + /// + /// # Panics + /// + /// When `Either` is a `Left` value + /// + /// ```should_panic + /// # use either::*; + /// let left: Either<_, ()> = Left(3); + /// left.expect_right("value was Right"); + /// ``` + pub fn expect_right(self, msg: &str) -> R + where + L: core::fmt::Debug, + { + match self { + Either::Right(r) => r, + Either::Left(l) => panic!("{}: {:?}", msg, l), + } + } + + /// Convert the contained value into `T` + /// + /// # Examples + /// + /// ``` + /// # use either::*; + /// // Both u16 and u32 can be converted to u64. + /// let left: Either = Left(3u16); + /// assert_eq!(left.either_into::(), 3u64); + /// let right: Either = Right(7u32); + /// assert_eq!(right.either_into::(), 7u64); + /// ``` + pub fn either_into(self) -> T + where + L: Into, + R: Into, + { + for_both!(self, inner => inner.into()) + } +} + +impl Either, Option> { + /// Factors out `None` from an `Either` of [`Option`]. + /// + /// ``` + /// use either::*; + /// let left: Either<_, Option> = Left(Some(vec![0])); + /// assert_eq!(left.factor_none(), Some(Left(vec![0]))); + /// + /// let right: Either>, _> = Right(Some(String::new())); + /// assert_eq!(right.factor_none(), Some(Right(String::new()))); + /// ``` + // TODO(MSRV): doc(alias) was stabilized in Rust 1.48 + // #[doc(alias = "transpose")] + pub fn factor_none(self) -> Option> { + match self { + Left(l) => l.map(Either::Left), + Right(r) => r.map(Either::Right), + } + } +} + +impl Either, Result> { + /// Factors out a homogenous type from an `Either` of [`Result`]. + /// + /// Here, the homogeneous type is the `Err` type of the [`Result`]. + /// + /// ``` + /// use either::*; + /// let left: Either<_, Result> = Left(Ok(vec![0])); + /// assert_eq!(left.factor_err(), Ok(Left(vec![0]))); + /// + /// let right: Either, u32>, _> = Right(Ok(String::new())); + /// assert_eq!(right.factor_err(), Ok(Right(String::new()))); + /// ``` + // TODO(MSRV): doc(alias) was stabilized in Rust 1.48 + // #[doc(alias = "transpose")] + pub fn factor_err(self) -> Result, E> { + match self { + Left(l) => l.map(Either::Left), + Right(r) => r.map(Either::Right), + } + } +} + +impl Either, Result> { + /// Factors out a homogenous type from an `Either` of [`Result`]. + /// + /// Here, the homogeneous type is the `Ok` type of the [`Result`]. + /// + /// ``` + /// use either::*; + /// let left: Either<_, Result> = Left(Err(vec![0])); + /// assert_eq!(left.factor_ok(), Err(Left(vec![0]))); + /// + /// let right: Either>, _> = Right(Err(String::new())); + /// assert_eq!(right.factor_ok(), Err(Right(String::new()))); + /// ``` + // TODO(MSRV): doc(alias) was stabilized in Rust 1.48 + // #[doc(alias = "transpose")] + pub fn factor_ok(self) -> Result> { + match self { + Left(l) => l.map_err(Either::Left), + Right(r) => r.map_err(Either::Right), + } + } +} + +impl Either<(T, L), (T, R)> { + /// Factor out a homogeneous type from an either of pairs. + /// + /// Here, the homogeneous type is the first element of the pairs. + /// + /// ``` + /// use either::*; + /// let left: Either<_, (u32, String)> = Left((123, vec![0])); + /// assert_eq!(left.factor_first().0, 123); + /// + /// let right: Either<(u32, Vec), _> = Right((123, String::new())); + /// assert_eq!(right.factor_first().0, 123); + /// ``` + pub fn factor_first(self) -> (T, Either) { + match self { + Left((t, l)) => (t, Left(l)), + Right((t, r)) => (t, Right(r)), + } + } +} + +impl Either<(L, T), (R, T)> { + /// Factor out a homogeneous type from an either of pairs. + /// + /// Here, the homogeneous type is the second element of the pairs. + /// + /// ``` + /// use either::*; + /// let left: Either<_, (String, u32)> = Left((vec![0], 123)); + /// assert_eq!(left.factor_second().1, 123); + /// + /// let right: Either<(Vec, u32), _> = Right((String::new(), 123)); + /// assert_eq!(right.factor_second().1, 123); + /// ``` + pub fn factor_second(self) -> (Either, T) { + match self { + Left((l, t)) => (Left(l), t), + Right((r, t)) => (Right(r), t), + } + } +} + +impl Either { + /// Extract the value of an either over two equivalent types. + /// + /// ``` + /// use either::*; + /// + /// let left: Either<_, u32> = Left(123); + /// assert_eq!(left.into_inner(), 123); + /// + /// let right: Either = Right(123); + /// assert_eq!(right.into_inner(), 123); + /// ``` + pub fn into_inner(self) -> T { + for_both!(self, inner => inner) + } + + /// Map `f` over the contained value and return the result in the + /// corresponding variant. + /// + /// ``` + /// use either::*; + /// + /// let value: Either<_, i32> = Right(42); + /// + /// let other = value.map(|x| x * 2); + /// assert_eq!(other, Right(84)); + /// ``` + pub fn map(self, f: F) -> Either + where + F: FnOnce(T) -> M, + { + match self { + Left(l) => Left(f(l)), + Right(r) => Right(f(r)), + } + } +} + +impl Either<&L, &R> { + /// Maps an `Either<&L, &R>` to an `Either` by cloning the contents of + /// either branch. + pub fn cloned(self) -> Either + where + L: Clone, + R: Clone, + { + map_either!(self, inner => inner.clone()) + } + + /// Maps an `Either<&L, &R>` to an `Either` by copying the contents of + /// either branch. + pub fn copied(self) -> Either + where + L: Copy, + R: Copy, + { + map_either!(self, inner => *inner) + } +} + +impl Either<&mut L, &mut R> { + /// Maps an `Either<&mut L, &mut R>` to an `Either` by cloning the contents of + /// either branch. + pub fn cloned(self) -> Either + where + L: Clone, + R: Clone, + { + map_either!(self, inner => inner.clone()) + } + + /// Maps an `Either<&mut L, &mut R>` to an `Either` by copying the contents of + /// either branch. + pub fn copied(self) -> Either + where + L: Copy, + R: Copy, + { + map_either!(self, inner => *inner) + } +} + +/// Convert from `Result` to `Either` with `Ok => Right` and `Err => Left`. +impl From> for Either { + fn from(r: Result) -> Self { + match r { + Err(e) => Left(e), + Ok(o) => Right(o), + } + } +} + +/// Convert from `Either` to `Result` with `Right => Ok` and `Left => Err`. +impl From> for Result { + fn from(val: Either) -> Self { + match val { + Left(l) => Err(l), + Right(r) => Ok(r), + } + } +} + +/// `Either` is a future if both `L` and `R` are futures. +impl Future for Either +where + L: Future, + R: Future, +{ + type Output = L::Output; + + fn poll( + self: Pin<&mut Self>, + cx: &mut core::task::Context<'_>, + ) -> core::task::Poll { + for_both!(self.as_pin_mut(), inner => inner.poll(cx)) + } +} + +#[cfg(any(test, feature = "std"))] +/// `Either` implements `Read` if both `L` and `R` do. +/// +/// Requires crate feature `"std"` +impl Read for Either +where + L: Read, + R: Read, +{ + fn read(&mut self, buf: &mut [u8]) -> io::Result { + for_both!(self, inner => inner.read(buf)) + } + + fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> { + for_both!(self, inner => inner.read_exact(buf)) + } + + fn read_to_end(&mut self, buf: &mut std::vec::Vec) -> io::Result { + for_both!(self, inner => inner.read_to_end(buf)) + } + + fn read_to_string(&mut self, buf: &mut std::string::String) -> io::Result { + for_both!(self, inner => inner.read_to_string(buf)) + } +} + +#[cfg(any(test, feature = "std"))] +/// `Either` implements `Seek` if both `L` and `R` do. +/// +/// Requires crate feature `"std"` +impl Seek for Either +where + L: Seek, + R: Seek, +{ + fn seek(&mut self, pos: SeekFrom) -> io::Result { + for_both!(self, inner => inner.seek(pos)) + } +} + +#[cfg(any(test, feature = "std"))] +/// Requires crate feature `"std"` +impl BufRead for Either +where + L: BufRead, + R: BufRead, +{ + fn fill_buf(&mut self) -> io::Result<&[u8]> { + for_both!(self, inner => inner.fill_buf()) + } + + fn consume(&mut self, amt: usize) { + for_both!(self, inner => inner.consume(amt)) + } + + fn read_until(&mut self, byte: u8, buf: &mut std::vec::Vec) -> io::Result { + for_both!(self, inner => inner.read_until(byte, buf)) + } + + fn read_line(&mut self, buf: &mut std::string::String) -> io::Result { + for_both!(self, inner => inner.read_line(buf)) + } +} + +#[cfg(any(test, feature = "std"))] +/// `Either` implements `Write` if both `L` and `R` do. +/// +/// Requires crate feature `"std"` +impl Write for Either +where + L: Write, + R: Write, +{ + fn write(&mut self, buf: &[u8]) -> io::Result { + for_both!(self, inner => inner.write(buf)) + } + + fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { + for_both!(self, inner => inner.write_all(buf)) + } + + fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> { + for_both!(self, inner => inner.write_fmt(fmt)) + } + + fn flush(&mut self) -> io::Result<()> { + for_both!(self, inner => inner.flush()) + } +} + +impl AsRef for Either +where + L: AsRef, + R: AsRef, +{ + fn as_ref(&self) -> &Target { + for_both!(self, inner => inner.as_ref()) + } +} + +macro_rules! impl_specific_ref_and_mut { + ($t:ty, $($attr:meta),* ) => { + $(#[$attr])* + impl AsRef<$t> for Either + where L: AsRef<$t>, R: AsRef<$t> + { + fn as_ref(&self) -> &$t { + for_both!(self, inner => inner.as_ref()) + } + } + + $(#[$attr])* + impl AsMut<$t> for Either + where L: AsMut<$t>, R: AsMut<$t> + { + fn as_mut(&mut self) -> &mut $t { + for_both!(self, inner => inner.as_mut()) + } + } + }; +} + +impl_specific_ref_and_mut!(str,); +impl_specific_ref_and_mut!( + ::std::path::Path, + cfg(feature = "std"), + doc = "Requires crate feature `std`." +); +impl_specific_ref_and_mut!( + ::std::ffi::OsStr, + cfg(feature = "std"), + doc = "Requires crate feature `std`." +); +impl_specific_ref_and_mut!( + ::std::ffi::CStr, + cfg(feature = "std"), + doc = "Requires crate feature `std`." +); + +impl AsRef<[Target]> for Either +where + L: AsRef<[Target]>, + R: AsRef<[Target]>, +{ + fn as_ref(&self) -> &[Target] { + for_both!(self, inner => inner.as_ref()) + } +} + +impl AsMut for Either +where + L: AsMut, + R: AsMut, +{ + fn as_mut(&mut self) -> &mut Target { + for_both!(self, inner => inner.as_mut()) + } +} + +impl AsMut<[Target]> for Either +where + L: AsMut<[Target]>, + R: AsMut<[Target]>, +{ + fn as_mut(&mut self) -> &mut [Target] { + for_both!(self, inner => inner.as_mut()) + } +} + +impl Deref for Either +where + L: Deref, + R: Deref, +{ + type Target = L::Target; + + fn deref(&self) -> &Self::Target { + for_both!(self, inner => &**inner) + } +} + +impl DerefMut for Either +where + L: DerefMut, + R: DerefMut, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + for_both!(self, inner => &mut *inner) + } +} + +#[cfg(any(test, feature = "std"))] +/// `Either` implements `Error` if *both* `L` and `R` implement it. +/// +/// Requires crate feature `"std"` +impl Error for Either +where + L: Error, + R: Error, +{ + fn source(&self) -> Option<&(dyn Error + 'static)> { + for_both!(self, inner => inner.source()) + } + + #[allow(deprecated)] + fn description(&self) -> &str { + for_both!(self, inner => inner.description()) + } + + #[allow(deprecated)] + fn cause(&self) -> Option<&dyn Error> { + for_both!(self, inner => inner.cause()) + } +} + +impl fmt::Display for Either +where + L: fmt::Display, + R: fmt::Display, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for_both!(self, inner => inner.fmt(f)) + } +} + +impl fmt::Write for Either +where + L: fmt::Write, + R: fmt::Write, +{ + fn write_str(&mut self, s: &str) -> fmt::Result { + for_both!(self, inner => inner.write_str(s)) + } + + fn write_char(&mut self, c: char) -> fmt::Result { + for_both!(self, inner => inner.write_char(c)) + } + + fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result { + for_both!(self, inner => inner.write_fmt(args)) + } +} + +#[test] +fn basic() { + let mut e = Left(2); + let r = Right(2); + assert_eq!(e, Left(2)); + e = r; + assert_eq!(e, Right(2)); + assert_eq!(e.left(), None); + assert_eq!(e.right(), Some(2)); + assert_eq!(e.as_ref().right(), Some(&2)); + assert_eq!(e.as_mut().right(), Some(&mut 2)); +} + +#[test] +fn macros() { + use std::string::String; + + fn a() -> Either { + let x: u32 = try_left!(Right(1337u32)); + Left(x * 2) + } + assert_eq!(a(), Right(1337)); + + fn b() -> Either { + Right(try_right!(Left("foo bar"))) + } + assert_eq!(b(), Left(String::from("foo bar"))); +} + +#[test] +fn deref() { + use std::string::String; + + fn is_str(_: &str) {} + let value: Either = Left(String::from("test")); + is_str(&value); +} + +#[test] +fn iter() { + let x = 3; + let mut iter = match x { + 3 => Left(0..10), + _ => Right(17..), + }; + + assert_eq!(iter.next(), Some(0)); + assert_eq!(iter.count(), 9); +} + +#[test] +fn seek() { + use std::io; + + let use_empty = false; + let mut mockdata = [0x00; 256]; + for (i, data) in mockdata.iter_mut().enumerate() { + *data = i as u8; + } + + let mut reader = if use_empty { + // Empty didn't impl Seek until Rust 1.51 + Left(io::Cursor::new([])) + } else { + Right(io::Cursor::new(&mockdata[..])) + }; + + let mut buf = [0u8; 16]; + assert_eq!(reader.read(&mut buf).unwrap(), buf.len()); + assert_eq!(buf, mockdata[..buf.len()]); + + // the first read should advance the cursor and return the next 16 bytes thus the `ne` + assert_eq!(reader.read(&mut buf).unwrap(), buf.len()); + assert_ne!(buf, mockdata[..buf.len()]); + + // if the seek operation fails it should read 16..31 instead of 0..15 + reader.seek(io::SeekFrom::Start(0)).unwrap(); + assert_eq!(reader.read(&mut buf).unwrap(), buf.len()); + assert_eq!(buf, mockdata[..buf.len()]); +} + +#[test] +fn read_write() { + use std::io; + + let use_stdio = false; + let mockdata = [0xff; 256]; + + let mut reader = if use_stdio { + Left(io::stdin()) + } else { + Right(&mockdata[..]) + }; + + let mut buf = [0u8; 16]; + assert_eq!(reader.read(&mut buf).unwrap(), buf.len()); + assert_eq!(&buf, &mockdata[..buf.len()]); + + let mut mockbuf = [0u8; 256]; + let mut writer = if use_stdio { + Left(io::stdout()) + } else { + Right(&mut mockbuf[..]) + }; + + let buf = [1u8; 16]; + assert_eq!(writer.write(&buf).unwrap(), buf.len()); +} + +#[test] +fn error() { + let invalid_utf8 = b"\xff"; + #[allow(invalid_from_utf8)] + let res = if let Err(error) = ::std::str::from_utf8(invalid_utf8) { + Err(Left(error)) + } else if let Err(error) = "x".parse::() { + Err(Right(error)) + } else { + Ok(()) + }; + assert!(res.is_err()); + #[allow(deprecated)] + res.unwrap_err().description(); // make sure this can be called +} + +/// A helper macro to check if AsRef and AsMut are implemented for a given type. +macro_rules! check_t { + ($t:ty) => {{ + fn check_ref>() {} + fn propagate_ref, T2: AsRef<$t>>() { + check_ref::>() + } + fn check_mut>() {} + fn propagate_mut, T2: AsMut<$t>>() { + check_mut::>() + } + }}; +} + +// This "unused" method is here to ensure that compilation doesn't fail on given types. +fn _unsized_ref_propagation() { + check_t!(str); + + fn check_array_ref, Item>() {} + fn check_array_mut, Item>() {} + + fn propagate_array_ref, T2: AsRef<[Item]>, Item>() { + check_array_ref::, _>() + } + + fn propagate_array_mut, T2: AsMut<[Item]>, Item>() { + check_array_mut::, _>() + } +} + +// This "unused" method is here to ensure that compilation doesn't fail on given types. +#[cfg(feature = "std")] +fn _unsized_std_propagation() { + check_t!(::std::path::Path); + check_t!(::std::ffi::OsStr); + check_t!(::std::ffi::CStr); +} diff --git a/vendor/either/src/serde_untagged.rs b/vendor/either/src/serde_untagged.rs new file mode 100644 index 00000000000000..72078c3ec8e88e --- /dev/null +++ b/vendor/either/src/serde_untagged.rs @@ -0,0 +1,69 @@ +//! Untagged serialization/deserialization support for Either. +//! +//! `Either` uses default, externally-tagged representation. +//! However, sometimes it is useful to support several alternative types. +//! For example, we may have a field which is generally Map +//! but in typical cases Vec would suffice, too. +//! +//! ```rust +//! # fn main() -> Result<(), Box> { +//! use either::Either; +//! use std::collections::HashMap; +//! +//! #[derive(serde::Serialize, serde::Deserialize, Debug)] +//! #[serde(transparent)] +//! struct IntOrString { +//! #[serde(with = "either::serde_untagged")] +//! inner: Either, HashMap> +//! }; +//! +//! // serialization +//! let data = IntOrString { +//! inner: Either::Left(vec!["Hello".to_string()]) +//! }; +//! // notice: no tags are emitted. +//! assert_eq!(serde_json::to_string(&data)?, r#"["Hello"]"#); +//! +//! // deserialization +//! let data: IntOrString = serde_json::from_str( +//! r#"{"a": 0, "b": 14}"# +//! )?; +//! println!("found {:?}", data); +//! # Ok(()) +//! # } +//! ``` + +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(untagged)] +enum Either { + Left(L), + Right(R), +} + +pub fn serialize(this: &super::Either, serializer: S) -> Result +where + S: Serializer, + L: Serialize, + R: Serialize, +{ + let untagged = match this { + super::Either::Left(left) => Either::Left(left), + super::Either::Right(right) => Either::Right(right), + }; + untagged.serialize(serializer) +} + +pub fn deserialize<'de, L, R, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, + L: Deserialize<'de>, + R: Deserialize<'de>, +{ + match Either::deserialize(deserializer) { + Ok(Either::Left(left)) => Ok(super::Either::Left(left)), + Ok(Either::Right(right)) => Ok(super::Either::Right(right)), + Err(error) => Err(error), + } +} diff --git a/vendor/either/src/serde_untagged_optional.rs b/vendor/either/src/serde_untagged_optional.rs new file mode 100644 index 00000000000000..fb3239ace1d5e4 --- /dev/null +++ b/vendor/either/src/serde_untagged_optional.rs @@ -0,0 +1,74 @@ +//! Untagged serialization/deserialization support for Option>. +//! +//! `Either` uses default, externally-tagged representation. +//! However, sometimes it is useful to support several alternative types. +//! For example, we may have a field which is generally Map +//! but in typical cases Vec would suffice, too. +//! +//! ```rust +//! # fn main() -> Result<(), Box> { +//! use either::Either; +//! use std::collections::HashMap; +//! +//! #[derive(serde::Serialize, serde::Deserialize, Debug)] +//! #[serde(transparent)] +//! struct IntOrString { +//! #[serde(with = "either::serde_untagged_optional")] +//! inner: Option, HashMap>> +//! }; +//! +//! // serialization +//! let data = IntOrString { +//! inner: Some(Either::Left(vec!["Hello".to_string()])) +//! }; +//! // notice: no tags are emitted. +//! assert_eq!(serde_json::to_string(&data)?, r#"["Hello"]"#); +//! +//! // deserialization +//! let data: IntOrString = serde_json::from_str( +//! r#"{"a": 0, "b": 14}"# +//! )?; +//! println!("found {:?}", data); +//! # Ok(()) +//! # } +//! ``` + +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +#[derive(Serialize, Deserialize)] +#[serde(untagged)] +enum Either { + Left(L), + Right(R), +} + +pub fn serialize( + this: &Option>, + serializer: S, +) -> Result +where + S: Serializer, + L: Serialize, + R: Serialize, +{ + let untagged = match this { + Some(super::Either::Left(left)) => Some(Either::Left(left)), + Some(super::Either::Right(right)) => Some(Either::Right(right)), + None => None, + }; + untagged.serialize(serializer) +} + +pub fn deserialize<'de, L, R, D>(deserializer: D) -> Result>, D::Error> +where + D: Deserializer<'de>, + L: Deserialize<'de>, + R: Deserialize<'de>, +{ + match Option::deserialize(deserializer) { + Ok(Some(Either::Left(left))) => Ok(Some(super::Either::Left(left))), + Ok(Some(Either::Right(right))) => Ok(Some(super::Either::Right(right))), + Ok(None) => Ok(None), + Err(error) => Err(error), + } +} diff --git a/vendor/glob/.cargo-checksum.json b/vendor/glob/.cargo-checksum.json new file mode 100644 index 00000000000000..b5a6521b19a1ae --- /dev/null +++ b/vendor/glob/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"861149153b886c52f0f5b13f9401c61e7cb7581d8a1e0874be0c57983d232c9e",".github/dependabot.yml":"8e93631a765d23b8eeabcf3c5da80c850f2cab429c4e5c2c8d81f562f522bb3c",".github/workflows/publish.yml":"1bfb8b9fb856e6dfeaf481d7a440071e3dc8248f32b5c63ef03cb285d7f10b6e",".github/workflows/rust.yml":"b8738c208278b79af3e540339461065596907b9508208974c3c5b68f1a9e13b9","CHANGELOG.md":"1cf3525be59a348ffcda444cac1f16eba48b5a9177587ecd8d55af5b5a097a73","Cargo.lock":"745d71fb944e4c1ff5fe99d4cc61c12be4d602509692ca3b662d8cf1d0131c48","Cargo.toml":"1962525cc2a684e334a07ad2996eb587b5dbf678e58eb65733471570b49c0b6c","Cargo.toml.orig":"c5dde6f8a5a9bfe170a059cd67fa3cde5897a91da1b56bb036d405475cb3dadb","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"0ceda5714ffd02ecc88084521bcd258f90ce4b01eca51d0d1cb602aaf5c47288","src/lib.rs":"2d714448a69d329a6dc51da264555321d2d20e6e84842a22036e63b4509e87ec","tests/glob-std.rs":"720727be7dde4d11d581c00abc1ac48fff864aac6cfedc13858d4f13bb38ff79","triagebot.toml":"a135e10c777cd13459559bdf74fb704c1379af7c9b0f70bc49fa6f5a837daa81"},"package":"0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280"} \ No newline at end of file diff --git a/vendor/glob/.cargo_vcs_info.json b/vendor/glob/.cargo_vcs_info.json new file mode 100644 index 00000000000000..195cd9f9fe2262 --- /dev/null +++ b/vendor/glob/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "952da299a3a98893805133ec852ab29877e64e98" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/glob/.github/dependabot.yml b/vendor/glob/.github/dependabot.yml new file mode 100644 index 00000000000000..de9707038a5771 --- /dev/null +++ b/vendor/glob/.github/dependabot.yml @@ -0,0 +1,13 @@ +version: 2 +updates: + - package-ecosystem: "cargo" + directory: "/" + schedule: + interval: "monthly" + open-pull-requests-limit: 10 + ignore: + - dependency-name: "tempdir" + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "monthly" diff --git a/vendor/glob/.github/workflows/publish.yml b/vendor/glob/.github/workflows/publish.yml new file mode 100644 index 00000000000000..e715c61871fdda --- /dev/null +++ b/vendor/glob/.github/workflows/publish.yml @@ -0,0 +1,27 @@ +name: Release-plz + +permissions: + pull-requests: write + contents: write + +on: + push: + branches: + - master + +jobs: + release-plz: + name: Release-plz + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Install Rust (rustup) + run: rustup update nightly --no-self-update && rustup default nightly + - name: Run release-plz + uses: MarcoIeni/release-plz-action@v0.5 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} diff --git a/vendor/glob/.github/workflows/rust.yml b/vendor/glob/.github/workflows/rust.yml new file mode 100644 index 00000000000000..e16d2a9a066f1b --- /dev/null +++ b/vendor/glob/.github/workflows/rust.yml @@ -0,0 +1,99 @@ +name: CI + +env: + CARGO_TERM_VERBOSE: true + RUSTDOCFLAGS: -Dwarnings + RUSTFLAGS: -Dwarnings + +on: + pull_request: + push: + branches: + - master + +jobs: + test: + name: Tests + runs-on: ${{ matrix.os }} + strategy: + matrix: + channel: + - stable + - nightly + - 1.63.0 # MSRV of test dependencies + os: + - macos-13 # x86 MacOS + - macos-15 # Arm MacOS + - windows-2025 + - ubuntu-24.04 + include: + - channel: beta + os: ubuntu-24.04 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Update rust + run: | + rustup default ${{ matrix.channel }} + rustup update --no-self-update + + - run: cargo test --all + + clippy: + name: Clippy + runs-on: ubuntu-24.04 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Update rust + run: | + # use beta since it gives us near-latest fixes but isn't as volatile as nightly + rustup default beta + rustup component add clippy + rustup update --no-self-update + - run: cargo clippy --all -- -Aclippy::while_let_loop + + msrv: + name: Check building with the MSRV + runs-on: ubuntu-24.04 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Update rust + run: | + rustup default 1.63.0 + rustup update --no-self-update + + - run: cargo build + + rustfmt: + name: Rustfmt + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Install Rust + run: | + rustup default nightly + rustup update --no-self-update + rustup component add rustfmt + - run: cargo fmt -- --check + + success: + needs: + - test + - clippy + - msrv + - rustfmt + runs-on: ubuntu-latest + # GitHub branch protection is exceedingly silly and treats "jobs skipped because a dependency + # failed" as success. So we have to do some contortions to ensure the job fails if any of its + # dependencies fails. + if: always() # make sure this is never "skipped" + steps: + # Manually check the status of all dependencies. `if: failure()` does not work. + - name: check if any dependency failed + run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}' diff --git a/vendor/glob/CHANGELOG.md b/vendor/glob/CHANGELOG.md new file mode 100644 index 00000000000000..52d7c25af7fdeb --- /dev/null +++ b/vendor/glob/CHANGELOG.md @@ -0,0 +1,44 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [0.3.3](https://github.com/rust-lang/glob/compare/v0.3.2...v0.3.3) - 2025-08-11 + +- Optimize memory allocations ([#147](https://github.com/rust-lang/glob/pull/147)) +- Bump the MSRV to 1.63 ([#172](https://github.com/rust-lang/glob/pull/172)) +- Fix spelling in pattern documentation ([#164](https://github.com/rust-lang/glob/pull/164)) +- Fix version numbers and some formatting ([#157](https://github.com/rust-lang/glob/pull/157)) +- Style fixes ([#137](https://github.com/rust-lang/glob/pull/137)) + +## [0.3.2](https://github.com/rust-lang/glob/compare/v0.3.1...v0.3.2) - 2024-12-28 + +## What's Changed +* Add fs::symlink_metadata to detect broken symlinks by @kyoheiu in https://github.com/rust-lang/glob/pull/105 +* Add support for windows verbatim disk paths by @nico-abram in https://github.com/rust-lang/glob/pull/112 +* Respect `require_literal_leading_dot` option in `glob_with` method for path components by @JohnTitor in https://github.com/rust-lang/glob/pull/128 +* Harden tests for symlink by @JohnTitor in https://github.com/rust-lang/glob/pull/127 +* Remove "extern crate" directions from README by @zmitchell in https://github.com/rust-lang/glob/pull/131 +* Add FIXME for tempdir by @JohnTitor in https://github.com/rust-lang/glob/pull/126 +* Cache information about file type by @Kobzol in https://github.com/rust-lang/glob/pull/135 +* Document the behaviour of ** with files by @Wilfred in https://github.com/rust-lang/glob/pull/138 +* Add dependabot by @oriontvv in https://github.com/rust-lang/glob/pull/139 +* Bump actions/checkout from 3 to 4 by @dependabot in https://github.com/rust-lang/glob/pull/140 +* Check only (no longer test) at the MSRV by @tgross35 in https://github.com/rust-lang/glob/pull/151 +* Add release-plz for automated releases by @tgross35 in https://github.com/rust-lang/glob/pull/150 + +## New Contributors +* @kyoheiu made their first contribution in https://github.com/rust-lang/glob/pull/105 +* @nico-abram made their first contribution in https://github.com/rust-lang/glob/pull/112 +* @zmitchell made their first contribution in https://github.com/rust-lang/glob/pull/131 +* @Kobzol made their first contribution in https://github.com/rust-lang/glob/pull/135 +* @Wilfred made their first contribution in https://github.com/rust-lang/glob/pull/138 +* @oriontvv made their first contribution in https://github.com/rust-lang/glob/pull/139 +* @dependabot made their first contribution in https://github.com/rust-lang/glob/pull/140 +* @tgross35 made their first contribution in https://github.com/rust-lang/glob/pull/151 + +**Full Changelog**: https://github.com/rust-lang/glob/compare/0.3.1...0.3.2 diff --git a/vendor/glob/Cargo.lock b/vendor/glob/Cargo.lock new file mode 100644 index 00000000000000..d1da04baabfc16 --- /dev/null +++ b/vendor/glob/Cargo.lock @@ -0,0 +1,107 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" + +[[package]] +name = "glob" +version = "0.3.3" +dependencies = [ + "doc-comment", + "tempdir", +] + +[[package]] +name = "libc" +version = "0.2.175" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" + +[[package]] +name = "rand" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" +dependencies = [ + "fuchsia-cprng", + "libc", + "rand_core 0.3.1", + "rdrand", + "winapi", +] + +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +dependencies = [ + "rand_core 0.4.2", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "tempdir" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" +dependencies = [ + "rand", + "remove_dir_all", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/vendor/glob/Cargo.toml b/vendor/glob/Cargo.toml new file mode 100644 index 00000000000000..c72d5c564c7b11 --- /dev/null +++ b/vendor/glob/Cargo.toml @@ -0,0 +1,45 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +rust-version = "1.63.0" +name = "glob" +version = "0.3.3" +authors = ["The Rust Project Developers"] +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +Support for matching file paths against Unix shell style patterns. +""" +homepage = "https://github.com/rust-lang/glob" +documentation = "https://docs.rs/glob" +readme = "README.md" +categories = ["filesystem"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/glob" + +[lib] +name = "glob" +path = "src/lib.rs" + +[[test]] +name = "glob-std" +path = "tests/glob-std.rs" + +[dev-dependencies.doc-comment] +version = "0.3" + +[dev-dependencies.tempdir] +version = "0.3" diff --git a/vendor/glob/LICENSE-APACHE b/vendor/glob/LICENSE-APACHE new file mode 100644 index 00000000000000..16fe87b06e802f --- /dev/null +++ b/vendor/glob/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/glob/LICENSE-MIT b/vendor/glob/LICENSE-MIT new file mode 100644 index 00000000000000..39d4bdb5acd313 --- /dev/null +++ b/vendor/glob/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/glob/README.md b/vendor/glob/README.md new file mode 100644 index 00000000000000..3ad9ff8b41fff2 --- /dev/null +++ b/vendor/glob/README.md @@ -0,0 +1,38 @@ +glob +==== + +Support for matching file paths against Unix shell style patterns. + +[![Continuous integration](https://github.com/rust-lang/glob/actions/workflows/rust.yml/badge.svg)](https://github.com/rust-lang/glob/actions/workflows/rust.yml) + +[Documentation](https://docs.rs/glob) + +## Usage + +To use `glob`, add this to your `Cargo.toml`: + +```toml +[dependencies] +glob = "0.3.2" +``` + +If you're using Rust 1.30 or earlier, or edition 2015, add this to your crate root: + +```rust +extern crate glob; +``` + +## Examples + +Print all jpg files in /media/ and all of its subdirectories. + +```rust +use glob::glob; + +for entry in glob("/media/**/*.jpg").expect("Failed to read glob pattern") { + match entry { + Ok(path) => println!("{:?}", path.display()), + Err(e) => println!("{:?}", e), + } +} +``` diff --git a/vendor/glob/src/lib.rs b/vendor/glob/src/lib.rs new file mode 100644 index 00000000000000..133a17a3343811 --- /dev/null +++ b/vendor/glob/src/lib.rs @@ -0,0 +1,1511 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Support for matching file paths against Unix shell style patterns. +//! +//! The `glob` and `glob_with` functions allow querying the filesystem for all +//! files that match a particular pattern (similar to the libc `glob` function). +//! The methods on the `Pattern` type provide functionality for checking if +//! individual paths match a particular pattern (similar to the libc `fnmatch` +//! function). +//! +//! For consistency across platforms, and for Windows support, this module +//! is implemented entirely in Rust rather than deferring to the libc +//! `glob`/`fnmatch` functions. +//! +//! # Examples +//! +//! To print all jpg files in `/media/` and all of its subdirectories. +//! +//! ```rust,no_run +//! use glob::glob; +//! +//! for entry in glob("/media/**/*.jpg").expect("Failed to read glob pattern") { +//! match entry { +//! Ok(path) => println!("{:?}", path.display()), +//! Err(e) => println!("{:?}", e), +//! } +//! } +//! ``` +//! +//! To print all files containing the letter "a", case insensitive, in a `local` +//! directory relative to the current working directory. This ignores errors +//! instead of printing them. +//! +//! ```rust,no_run +//! use glob::glob_with; +//! use glob::MatchOptions; +//! +//! let options = MatchOptions { +//! case_sensitive: false, +//! require_literal_separator: false, +//! require_literal_leading_dot: false, +//! }; +//! for entry in glob_with("local/*a*", options).unwrap() { +//! if let Ok(path) = entry { +//! println!("{:?}", path.display()) +//! } +//! } +//! ``` + +#![doc( + html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://www.rust-lang.org/favicon.ico", + html_root_url = "https://docs.rs/glob/0.3.1" +)] +#![deny(missing_docs)] +#![allow(clippy::while_let_loop)] + +#[cfg(test)] +#[macro_use] +extern crate doc_comment; + +#[cfg(test)] +doctest!("../README.md"); + +use std::cmp; +use std::cmp::Ordering; +use std::error::Error; +use std::fmt; +use std::fs; +use std::fs::DirEntry; +use std::io; +use std::ops::Deref; +use std::path::{self, Component, Path, PathBuf}; +use std::str::FromStr; + +use CharSpecifier::{CharRange, SingleChar}; +use MatchResult::{EntirePatternDoesntMatch, Match, SubPatternDoesntMatch}; +use PatternToken::AnyExcept; +use PatternToken::{AnyChar, AnyRecursiveSequence, AnySequence, AnyWithin, Char}; + +/// An iterator that yields `Path`s from the filesystem that match a particular +/// pattern. +/// +/// Note that it yields `GlobResult` in order to report any `IoErrors` that may +/// arise during iteration. If a directory matches but is unreadable, +/// thereby preventing its contents from being checked for matches, a +/// `GlobError` is returned to express this. +/// +/// See the `glob` function for more details. +#[derive(Debug)] +pub struct Paths { + dir_patterns: Vec, + require_dir: bool, + options: MatchOptions, + todo: Vec>, + scope: Option, +} + +/// Return an iterator that produces all the `Path`s that match the given +/// pattern using default match options, which may be absolute or relative to +/// the current working directory. +/// +/// This may return an error if the pattern is invalid. +/// +/// This method uses the default match options and is equivalent to calling +/// `glob_with(pattern, MatchOptions::new())`. Use `glob_with` directly if you +/// want to use non-default match options. +/// +/// When iterating, each result is a `GlobResult` which expresses the +/// possibility that there was an `IoError` when attempting to read the contents +/// of the matched path. In other words, each item returned by the iterator +/// will either be an `Ok(Path)` if the path matched, or an `Err(GlobError)` if +/// the path (partially) matched _but_ its contents could not be read in order +/// to determine if its contents matched. +/// +/// See the `Paths` documentation for more information. +/// +/// # Examples +/// +/// Consider a directory `/media/pictures` containing only the files +/// `kittens.jpg`, `puppies.jpg` and `hamsters.gif`: +/// +/// ```rust,no_run +/// use glob::glob; +/// +/// for entry in glob("/media/pictures/*.jpg").unwrap() { +/// match entry { +/// Ok(path) => println!("{:?}", path.display()), +/// +/// // if the path matched but was unreadable, +/// // thereby preventing its contents from matching +/// Err(e) => println!("{:?}", e), +/// } +/// } +/// ``` +/// +/// The above code will print: +/// +/// ```ignore +/// /media/pictures/kittens.jpg +/// /media/pictures/puppies.jpg +/// ``` +/// +/// If you want to ignore unreadable paths, you can use something like +/// `filter_map`: +/// +/// ```rust +/// use glob::glob; +/// use std::result::Result; +/// +/// for path in glob("/media/pictures/*.jpg").unwrap().filter_map(Result::ok) { +/// println!("{}", path.display()); +/// } +/// ``` +/// Paths are yielded in alphabetical order. +pub fn glob(pattern: &str) -> Result { + glob_with(pattern, MatchOptions::new()) +} + +/// Return an iterator that produces all the `Path`s that match the given +/// pattern using the specified match options, which may be absolute or relative +/// to the current working directory. +/// +/// This may return an error if the pattern is invalid. +/// +/// This function accepts Unix shell style patterns as described by +/// `Pattern::new(..)`. The options given are passed through unchanged to +/// `Pattern::matches_with(..)` with the exception that +/// `require_literal_separator` is always set to `true` regardless of the value +/// passed to this function. +/// +/// Paths are yielded in alphabetical order. +pub fn glob_with(pattern: &str, options: MatchOptions) -> Result { + #[cfg(windows)] + fn check_windows_verbatim(p: &Path) -> bool { + match p.components().next() { + Some(Component::Prefix(ref p)) => { + // Allow VerbatimDisk paths. std canonicalize() generates them, and they work fine + p.kind().is_verbatim() + && if let std::path::Prefix::VerbatimDisk(_) = p.kind() { + false + } else { + true + } + } + _ => false, + } + } + #[cfg(not(windows))] + fn check_windows_verbatim(_: &Path) -> bool { + false + } + + #[cfg(windows)] + fn to_scope(p: &Path) -> PathBuf { + // FIXME handle volume relative paths here + p.to_path_buf() + } + #[cfg(not(windows))] + fn to_scope(p: &Path) -> PathBuf { + p.to_path_buf() + } + + // make sure that the pattern is valid first, else early return with error + let _ = Pattern::new(pattern)?; + + let mut components = Path::new(pattern).components().peekable(); + loop { + match components.peek() { + Some(&Component::Prefix(..)) | Some(&Component::RootDir) => { + components.next(); + } + _ => break, + } + } + let rest = components.map(|s| s.as_os_str()).collect::(); + let normalized_pattern = Path::new(pattern).iter().collect::(); + let root_len = normalized_pattern.to_str().unwrap().len() - rest.to_str().unwrap().len(); + let root = if root_len > 0 { + Some(Path::new(&pattern[..root_len])) + } else { + None + }; + + if root_len > 0 && check_windows_verbatim(root.unwrap()) { + // FIXME: How do we want to handle verbatim paths? I'm inclined to + // return nothing, since we can't very well find all UNC shares with a + // 1-letter server name. + return Ok(Paths { + dir_patterns: Vec::new(), + require_dir: false, + options, + todo: Vec::new(), + scope: None, + }); + } + + let scope = root.map_or_else(|| PathBuf::from("."), to_scope); + let scope = PathWrapper::from_path(scope); + + let mut dir_patterns = Vec::new(); + let components = + pattern[cmp::min(root_len, pattern.len())..].split_terminator(path::is_separator); + + for component in components { + dir_patterns.push(Pattern::new(component)?); + } + + if root_len == pattern.len() { + dir_patterns.push(Pattern { + original: "".to_string(), + tokens: Vec::new(), + is_recursive: false, + has_metachars: false, + }); + } + + let last_is_separator = pattern.chars().next_back().map(path::is_separator); + let require_dir = last_is_separator == Some(true); + let todo = Vec::new(); + + Ok(Paths { + dir_patterns, + require_dir, + options, + todo, + scope: Some(scope), + }) +} + +/// A glob iteration error. +/// +/// This is typically returned when a particular path cannot be read +/// to determine if its contents match the glob pattern. This is possible +/// if the program lacks the appropriate permissions, for example. +#[derive(Debug)] +pub struct GlobError { + path: PathBuf, + error: io::Error, +} + +impl GlobError { + /// The Path that the error corresponds to. + pub fn path(&self) -> &Path { + &self.path + } + + /// The error in question. + pub fn error(&self) -> &io::Error { + &self.error + } + + /// Consumes self, returning the _raw_ underlying `io::Error` + pub fn into_error(self) -> io::Error { + self.error + } +} + +impl Error for GlobError { + #[allow(deprecated)] + fn description(&self) -> &str { + self.error.description() + } + + #[allow(unknown_lints, bare_trait_objects)] + fn cause(&self) -> Option<&Error> { + Some(&self.error) + } +} + +impl fmt::Display for GlobError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "attempting to read `{}` resulted in an error: {}", + self.path.display(), + self.error + ) + } +} + +#[derive(Debug)] +struct PathWrapper { + path: PathBuf, + is_directory: bool, +} + +impl PathWrapper { + fn from_dir_entry(path: PathBuf, e: DirEntry) -> Self { + let is_directory = e + .file_type() + .ok() + .and_then(|file_type| { + // We need to use fs::metadata to resolve the actual path + // if it's a symlink. + if file_type.is_symlink() { + None + } else { + Some(file_type.is_dir()) + } + }) + .or_else(|| fs::metadata(&path).map(|m| m.is_dir()).ok()) + .unwrap_or(false); + Self { path, is_directory } + } + fn from_path(path: PathBuf) -> Self { + let is_directory = fs::metadata(&path).map(|m| m.is_dir()).unwrap_or(false); + Self { path, is_directory } + } + + fn into_path(self) -> PathBuf { + self.path + } +} + +impl Deref for PathWrapper { + type Target = Path; + + fn deref(&self) -> &Self::Target { + self.path.deref() + } +} + +impl AsRef for PathWrapper { + fn as_ref(&self) -> &Path { + self.path.as_ref() + } +} + +/// An alias for a glob iteration result. +/// +/// This represents either a matched path or a glob iteration error, +/// such as failing to read a particular directory's contents. +pub type GlobResult = Result; + +impl Iterator for Paths { + type Item = GlobResult; + + fn next(&mut self) -> Option { + // the todo buffer hasn't been initialized yet, so it's done at this + // point rather than in glob() so that the errors are unified that is, + // failing to fill the buffer is an iteration error construction of the + // iterator (i.e. glob()) only fails if it fails to compile the Pattern + if let Some(scope) = self.scope.take() { + if !self.dir_patterns.is_empty() { + // Shouldn't happen, but we're using -1 as a special index. + assert!(self.dir_patterns.len() < usize::MAX); + + fill_todo(&mut self.todo, &self.dir_patterns, 0, &scope, self.options); + } + } + + loop { + if self.dir_patterns.is_empty() || self.todo.is_empty() { + return None; + } + + let (path, mut idx) = match self.todo.pop().unwrap() { + Ok(pair) => pair, + Err(e) => return Some(Err(e)), + }; + + // idx -1: was already checked by fill_todo, maybe path was '.' or + // '..' that we can't match here because of normalization. + if idx == usize::MAX { + if self.require_dir && !path.is_directory { + continue; + } + return Some(Ok(path.into_path())); + } + + if self.dir_patterns[idx].is_recursive { + let mut next = idx; + + // collapse consecutive recursive patterns + while (next + 1) < self.dir_patterns.len() + && self.dir_patterns[next + 1].is_recursive + { + next += 1; + } + + if path.is_directory { + // the path is a directory, so it's a match + + // push this directory's contents + fill_todo( + &mut self.todo, + &self.dir_patterns, + next, + &path, + self.options, + ); + + if next == self.dir_patterns.len() - 1 { + // pattern ends in recursive pattern, so return this + // directory as a result + return Some(Ok(path.into_path())); + } else { + // advanced to the next pattern for this path + idx = next + 1; + } + } else if next == self.dir_patterns.len() - 1 { + // not a directory and it's the last pattern, meaning no + // match + continue; + } else { + // advanced to the next pattern for this path + idx = next + 1; + } + } + + // not recursive, so match normally + if self.dir_patterns[idx].matches_with( + { + match path.file_name().and_then(|s| s.to_str()) { + // FIXME (#9639): How do we handle non-utf8 filenames? + // Ignore them for now; ideally we'd still match them + // against a * + None => continue, + Some(x) => x, + } + }, + self.options, + ) { + if idx == self.dir_patterns.len() - 1 { + // it is not possible for a pattern to match a directory + // *AND* its children so we don't need to check the + // children + + if !self.require_dir || path.is_directory { + return Some(Ok(path.into_path())); + } + } else { + fill_todo( + &mut self.todo, + &self.dir_patterns, + idx + 1, + &path, + self.options, + ); + } + } + } + } +} + +/// A pattern parsing error. +#[derive(Debug)] +#[allow(missing_copy_implementations)] +pub struct PatternError { + /// The approximate character index of where the error occurred. + pub pos: usize, + + /// A message describing the error. + pub msg: &'static str, +} + +impl Error for PatternError { + fn description(&self) -> &str { + self.msg + } +} + +impl fmt::Display for PatternError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "Pattern syntax error near position {}: {}", + self.pos, self.msg + ) + } +} + +/// A compiled Unix shell style pattern. +/// +/// - `?` matches any single character. +/// +/// - `*` matches any (possibly empty) sequence of characters. +/// +/// - `**` matches the current directory and arbitrary +/// subdirectories. To match files in arbitrary subdirectories, use +/// `**/*`. +/// +/// This sequence **must** form a single path component, so both +/// `**a` and `b**` are invalid and will result in an error. A +/// sequence of more than two consecutive `*` characters is also +/// invalid. +/// +/// - `[...]` matches any character inside the brackets. Character sequences +/// can also specify ranges of characters, as ordered by Unicode, so e.g. +/// `[0-9]` specifies any character between 0 and 9 inclusive. An unclosed +/// bracket is invalid. +/// +/// - `[!...]` is the negation of `[...]`, i.e. it matches any characters +/// **not** in the brackets. +/// +/// - The metacharacters `?`, `*`, `[`, `]` can be matched by using brackets +/// (e.g. `[?]`). When a `]` occurs immediately following `[` or `[!` then it +/// is interpreted as being part of, rather then ending, the character set, so +/// `]` and NOT `]` can be matched by `[]]` and `[!]]` respectively. The `-` +/// character can be specified inside a character sequence pattern by placing +/// it at the start or the end, e.g. `[abc-]`. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Debug)] +pub struct Pattern { + original: String, + tokens: Vec, + is_recursive: bool, + /// A bool value that indicates whether the pattern contains any metacharacters. + /// We use this information for some fast path optimizations. + has_metachars: bool, +} + +/// Show the original glob pattern. +impl fmt::Display for Pattern { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.original.fmt(f) + } +} + +impl FromStr for Pattern { + type Err = PatternError; + + fn from_str(s: &str) -> Result { + Self::new(s) + } +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +enum PatternToken { + Char(char), + AnyChar, + AnySequence, + AnyRecursiveSequence, + AnyWithin(Vec), + AnyExcept(Vec), +} + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +enum CharSpecifier { + SingleChar(char), + CharRange(char, char), +} + +#[derive(Copy, Clone, PartialEq)] +enum MatchResult { + Match, + SubPatternDoesntMatch, + EntirePatternDoesntMatch, +} + +const ERROR_WILDCARDS: &str = "wildcards are either regular `*` or recursive `**`"; +const ERROR_RECURSIVE_WILDCARDS: &str = "recursive wildcards must form a single path \ + component"; +const ERROR_INVALID_RANGE: &str = "invalid range pattern"; + +impl Pattern { + /// This function compiles Unix shell style patterns. + /// + /// An invalid glob pattern will yield a `PatternError`. + pub fn new(pattern: &str) -> Result { + let chars = pattern.chars().collect::>(); + let mut tokens = Vec::new(); + let mut is_recursive = false; + let mut has_metachars = false; + let mut i = 0; + + while i < chars.len() { + match chars[i] { + '?' => { + has_metachars = true; + tokens.push(AnyChar); + i += 1; + } + '*' => { + has_metachars = true; + + let old = i; + + while i < chars.len() && chars[i] == '*' { + i += 1; + } + + let count = i - old; + + match count.cmp(&2) { + Ordering::Greater => { + return Err(PatternError { + pos: old + 2, + msg: ERROR_WILDCARDS, + }) + } + Ordering::Equal => { + // ** can only be an entire path component + // i.e. a/**/b is valid, but a**/b or a/**b is not + // invalid matches are treated literally + let is_valid = if i == 2 || path::is_separator(chars[i - count - 1]) { + // it ends in a '/' + if i < chars.len() && path::is_separator(chars[i]) { + i += 1; + true + // or the pattern ends here + // this enables the existing globbing mechanism + } else if i == chars.len() { + true + // `**` ends in non-separator + } else { + return Err(PatternError { + pos: i, + msg: ERROR_RECURSIVE_WILDCARDS, + }); + } + // `**` begins with non-separator + } else { + return Err(PatternError { + pos: old - 1, + msg: ERROR_RECURSIVE_WILDCARDS, + }); + }; + + if is_valid { + // collapse consecutive AnyRecursiveSequence to a + // single one + + let tokens_len = tokens.len(); + + if !(tokens_len > 1 + && tokens[tokens_len - 1] == AnyRecursiveSequence) + { + is_recursive = true; + tokens.push(AnyRecursiveSequence); + } + } + } + Ordering::Less => tokens.push(AnySequence), + } + } + '[' => { + has_metachars = true; + + if i + 4 <= chars.len() && chars[i + 1] == '!' { + match chars[i + 3..].iter().position(|x| *x == ']') { + None => (), + Some(j) => { + let chars = &chars[i + 2..i + 3 + j]; + let cs = parse_char_specifiers(chars); + tokens.push(AnyExcept(cs)); + i += j + 4; + continue; + } + } + } else if i + 3 <= chars.len() && chars[i + 1] != '!' { + match chars[i + 2..].iter().position(|x| *x == ']') { + None => (), + Some(j) => { + let cs = parse_char_specifiers(&chars[i + 1..i + 2 + j]); + tokens.push(AnyWithin(cs)); + i += j + 3; + continue; + } + } + } + + // if we get here then this is not a valid range pattern + return Err(PatternError { + pos: i, + msg: ERROR_INVALID_RANGE, + }); + } + c => { + tokens.push(Char(c)); + i += 1; + } + } + } + + Ok(Self { + tokens, + original: pattern.to_string(), + is_recursive, + has_metachars, + }) + } + + /// Escape metacharacters within the given string by surrounding them in + /// brackets. The resulting string will, when compiled into a `Pattern`, + /// match the input string and nothing else. + pub fn escape(s: &str) -> String { + let mut escaped = String::new(); + for c in s.chars() { + match c { + // note that ! does not need escaping because it is only special + // inside brackets + '?' | '*' | '[' | ']' => { + escaped.push('['); + escaped.push(c); + escaped.push(']'); + } + c => { + escaped.push(c); + } + } + } + escaped + } + + /// Return if the given `str` matches this `Pattern` using the default + /// match options (i.e. `MatchOptions::new()`). + /// + /// # Examples + /// + /// ```rust + /// use glob::Pattern; + /// + /// assert!(Pattern::new("c?t").unwrap().matches("cat")); + /// assert!(Pattern::new("k[!e]tteh").unwrap().matches("kitteh")); + /// assert!(Pattern::new("d*g").unwrap().matches("doog")); + /// ``` + pub fn matches(&self, str: &str) -> bool { + self.matches_with(str, MatchOptions::new()) + } + + /// Return if the given `Path`, when converted to a `str`, matches this + /// `Pattern` using the default match options (i.e. `MatchOptions::new()`). + pub fn matches_path(&self, path: &Path) -> bool { + // FIXME (#9639): This needs to handle non-utf8 paths + path.to_str().map_or(false, |s| self.matches(s)) + } + + /// Return if the given `str` matches this `Pattern` using the specified + /// match options. + pub fn matches_with(&self, str: &str, options: MatchOptions) -> bool { + self.matches_from(true, str.chars(), 0, options) == Match + } + + /// Return if the given `Path`, when converted to a `str`, matches this + /// `Pattern` using the specified match options. + pub fn matches_path_with(&self, path: &Path, options: MatchOptions) -> bool { + // FIXME (#9639): This needs to handle non-utf8 paths + path.to_str() + .map_or(false, |s| self.matches_with(s, options)) + } + + /// Access the original glob pattern. + pub fn as_str(&self) -> &str { + &self.original + } + + fn matches_from( + &self, + mut follows_separator: bool, + mut file: std::str::Chars, + i: usize, + options: MatchOptions, + ) -> MatchResult { + for (ti, token) in self.tokens[i..].iter().enumerate() { + match *token { + AnySequence | AnyRecursiveSequence => { + // ** must be at the start. + debug_assert!(match *token { + AnyRecursiveSequence => follows_separator, + _ => true, + }); + + // Empty match + match self.matches_from(follows_separator, file.clone(), i + ti + 1, options) { + SubPatternDoesntMatch => (), // keep trying + m => return m, + }; + + while let Some(c) = file.next() { + if follows_separator && options.require_literal_leading_dot && c == '.' { + return SubPatternDoesntMatch; + } + follows_separator = path::is_separator(c); + match *token { + AnyRecursiveSequence if !follows_separator => continue, + AnySequence + if options.require_literal_separator && follows_separator => + { + return SubPatternDoesntMatch + } + _ => (), + } + match self.matches_from( + follows_separator, + file.clone(), + i + ti + 1, + options, + ) { + SubPatternDoesntMatch => (), // keep trying + m => return m, + } + } + } + _ => { + let c = match file.next() { + Some(c) => c, + None => return EntirePatternDoesntMatch, + }; + + let is_sep = path::is_separator(c); + + if !match *token { + AnyChar | AnyWithin(..) | AnyExcept(..) + if (options.require_literal_separator && is_sep) + || (follows_separator + && options.require_literal_leading_dot + && c == '.') => + { + false + } + AnyChar => true, + AnyWithin(ref specifiers) => in_char_specifiers(specifiers, c, options), + AnyExcept(ref specifiers) => !in_char_specifiers(specifiers, c, options), + Char(c2) => chars_eq(c, c2, options.case_sensitive), + AnySequence | AnyRecursiveSequence => unreachable!(), + } { + return SubPatternDoesntMatch; + } + follows_separator = is_sep; + } + } + } + + // Iter is fused. + if file.next().is_none() { + Match + } else { + SubPatternDoesntMatch + } + } +} + +// Fills `todo` with paths under `path` to be matched by `patterns[idx]`, +// special-casing patterns to match `.` and `..`, and avoiding `readdir()` +// calls when there are no metacharacters in the pattern. +fn fill_todo( + todo: &mut Vec>, + patterns: &[Pattern], + idx: usize, + path: &PathWrapper, + options: MatchOptions, +) { + let add = |todo: &mut Vec<_>, next_path: PathWrapper| { + if idx + 1 == patterns.len() { + // We know it's good, so don't make the iterator match this path + // against the pattern again. In particular, it can't match + // . or .. globs since these never show up as path components. + todo.push(Ok((next_path, usize::MAX))); + } else { + fill_todo(todo, patterns, idx + 1, &next_path, options); + } + }; + + let pattern = &patterns[idx]; + let is_dir = path.is_directory; + let curdir = path.as_ref() == Path::new("."); + match (pattern.has_metachars, is_dir) { + (false, _) => { + debug_assert!( + pattern + .tokens + .iter() + .all(|tok| matches!(tok, PatternToken::Char(_))), + "broken invariant: pattern has metachars but shouldn't" + ); + let s = pattern.as_str(); + + // This pattern component doesn't have any metacharacters, so we + // don't need to read the current directory to know where to + // continue. So instead of passing control back to the iterator, + // we can just check for that one entry and potentially recurse + // right away. + let special = "." == s || ".." == s; + let next_path = if curdir { + PathBuf::from(s) + } else { + path.join(s) + }; + let next_path = PathWrapper::from_path(next_path); + if (special && is_dir) + || (!special + && (fs::metadata(&next_path).is_ok() + || fs::symlink_metadata(&next_path).is_ok())) + { + add(todo, next_path); + } + } + (true, true) => { + let dirs = fs::read_dir(path).and_then(|d| { + d.map(|e| { + e.map(|e| { + let path = if curdir { + PathBuf::from(e.path().file_name().unwrap()) + } else { + e.path() + }; + PathWrapper::from_dir_entry(path, e) + }) + }) + .collect::, _>>() + }); + match dirs { + Ok(mut children) => { + if options.require_literal_leading_dot { + children + .retain(|x| !x.file_name().unwrap().to_str().unwrap().starts_with('.')); + } + children.sort_by(|p1, p2| p2.file_name().cmp(&p1.file_name())); + todo.extend(children.into_iter().map(|x| Ok((x, idx)))); + + // Matching the special directory entries . and .. that + // refer to the current and parent directory respectively + // requires that the pattern has a leading dot, even if the + // `MatchOptions` field `require_literal_leading_dot` is not + // set. + if !pattern.tokens.is_empty() && pattern.tokens[0] == Char('.') { + for &special in &[".", ".."] { + if pattern.matches_with(special, options) { + add(todo, PathWrapper::from_path(path.join(special))); + } + } + } + } + Err(e) => { + todo.push(Err(GlobError { + path: path.to_path_buf(), + error: e, + })); + } + } + } + (true, false) => { + // not a directory, nothing more to find + } + } +} + +fn parse_char_specifiers(s: &[char]) -> Vec { + let mut cs = Vec::new(); + let mut i = 0; + while i < s.len() { + if i + 3 <= s.len() && s[i + 1] == '-' { + cs.push(CharRange(s[i], s[i + 2])); + i += 3; + } else { + cs.push(SingleChar(s[i])); + i += 1; + } + } + cs +} + +fn in_char_specifiers(specifiers: &[CharSpecifier], c: char, options: MatchOptions) -> bool { + for &specifier in specifiers.iter() { + match specifier { + SingleChar(sc) => { + if chars_eq(c, sc, options.case_sensitive) { + return true; + } + } + CharRange(start, end) => { + // FIXME: work with non-ascii chars properly (issue #1347) + if !options.case_sensitive && c.is_ascii() && start.is_ascii() && end.is_ascii() { + let start = start.to_ascii_lowercase(); + let end = end.to_ascii_lowercase(); + + let start_up = start.to_uppercase().next().unwrap(); + let end_up = end.to_uppercase().next().unwrap(); + + // only allow case insensitive matching when + // both start and end are within a-z or A-Z + if start != start_up && end != end_up { + let c = c.to_ascii_lowercase(); + if c >= start && c <= end { + return true; + } + } + } + + if c >= start && c <= end { + return true; + } + } + } + } + + false +} + +/// A helper function to determine if two chars are (possibly case-insensitively) equal. +fn chars_eq(a: char, b: char, case_sensitive: bool) -> bool { + if cfg!(windows) && path::is_separator(a) && path::is_separator(b) { + true + } else if !case_sensitive && a.is_ascii() && b.is_ascii() { + // FIXME: work with non-ascii chars properly (issue #9084) + a.eq_ignore_ascii_case(&b) + } else { + a == b + } +} + +/// Configuration options to modify the behaviour of `Pattern::matches_with(..)`. +#[allow(missing_copy_implementations)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +pub struct MatchOptions { + /// Whether or not patterns should be matched in a case-sensitive manner. + /// This currently only considers upper/lower case relationships between + /// ASCII characters, but in future this might be extended to work with + /// Unicode. + pub case_sensitive: bool, + + /// Whether or not path-component separator characters (e.g. `/` on + /// Posix) must be matched by a literal `/`, rather than by `*` or `?` or + /// `[...]`. + pub require_literal_separator: bool, + + /// Whether or not paths that contain components that start with a `.` + /// will require that `.` appears literally in the pattern; `*`, `?`, `**`, + /// or `[...]` will not match. This is useful because such files are + /// conventionally considered hidden on Unix systems and it might be + /// desirable to skip them when listing files. + pub require_literal_leading_dot: bool, +} + +impl MatchOptions { + /// Constructs a new `MatchOptions` with default field values. This is used + /// when calling functions that do not take an explicit `MatchOptions` + /// parameter. + /// + /// This function always returns this value: + /// + /// ```rust,ignore + /// MatchOptions { + /// case_sensitive: true, + /// require_literal_separator: false, + /// require_literal_leading_dot: false + /// } + /// ``` + /// + /// # Note + /// The behavior of this method doesn't match `default()`'s. This returns + /// `case_sensitive` as `true` while `default()` does it as `false`. + // FIXME: Consider unity the behavior with `default()` in a next major release. + pub fn new() -> Self { + Self { + case_sensitive: true, + require_literal_separator: false, + require_literal_leading_dot: false, + } + } +} + +#[cfg(test)] +mod test { + use super::{glob, MatchOptions, Pattern}; + use std::path::Path; + + #[test] + fn test_pattern_from_str() { + assert!("a*b".parse::().unwrap().matches("a_b")); + assert!("a/**b".parse::().unwrap_err().pos == 4); + } + + #[test] + fn test_wildcard_errors() { + assert!(Pattern::new("a/**b").unwrap_err().pos == 4); + assert!(Pattern::new("a/bc**").unwrap_err().pos == 3); + assert!(Pattern::new("a/*****").unwrap_err().pos == 4); + assert!(Pattern::new("a/b**c**d").unwrap_err().pos == 2); + assert!(Pattern::new("a**b").unwrap_err().pos == 0); + } + + #[test] + fn test_unclosed_bracket_errors() { + assert!(Pattern::new("abc[def").unwrap_err().pos == 3); + assert!(Pattern::new("abc[!def").unwrap_err().pos == 3); + assert!(Pattern::new("abc[").unwrap_err().pos == 3); + assert!(Pattern::new("abc[!").unwrap_err().pos == 3); + assert!(Pattern::new("abc[d").unwrap_err().pos == 3); + assert!(Pattern::new("abc[!d").unwrap_err().pos == 3); + assert!(Pattern::new("abc[]").unwrap_err().pos == 3); + assert!(Pattern::new("abc[!]").unwrap_err().pos == 3); + } + + #[test] + fn test_glob_errors() { + assert!(glob("a/**b").err().unwrap().pos == 4); + assert!(glob("abc[def").err().unwrap().pos == 3); + } + + // this test assumes that there is a /root directory and that + // the user running this test is not root or otherwise doesn't + // have permission to read its contents + #[cfg(all(unix, not(target_os = "macos")))] + #[test] + fn test_iteration_errors() { + use std::io; + let mut iter = glob("/root/*").unwrap(); + + // GlobErrors shouldn't halt iteration + let next = iter.next(); + assert!(next.is_some()); + + let err = next.unwrap(); + assert!(err.is_err()); + + let err = err.err().unwrap(); + assert!(err.path() == Path::new("/root")); + assert!(err.error().kind() == io::ErrorKind::PermissionDenied); + } + + #[test] + fn test_absolute_pattern() { + assert!(glob("/").unwrap().next().is_some()); + assert!(glob("//").unwrap().next().is_some()); + + // assume that the filesystem is not empty! + assert!(glob("/*").unwrap().next().is_some()); + + #[cfg(not(windows))] + fn win() {} + + #[cfg(windows)] + fn win() { + use std::env::current_dir; + use std::path::Component; + + // check windows absolute paths with host/device components + let root_with_device = current_dir() + .ok() + .and_then(|p| match p.components().next().unwrap() { + Component::Prefix(prefix_component) => { + let path = Path::new(prefix_component.as_os_str()).join("*"); + Some(path.to_path_buf()) + } + _ => panic!("no prefix in this path"), + }) + .unwrap(); + // FIXME (#9639): This needs to handle non-utf8 paths + assert!(glob(root_with_device.as_os_str().to_str().unwrap()) + .unwrap() + .next() + .is_some()); + } + win() + } + + #[test] + fn test_wildcards() { + assert!(Pattern::new("a*b").unwrap().matches("a_b")); + assert!(Pattern::new("a*b*c").unwrap().matches("abc")); + assert!(!Pattern::new("a*b*c").unwrap().matches("abcd")); + assert!(Pattern::new("a*b*c").unwrap().matches("a_b_c")); + assert!(Pattern::new("a*b*c").unwrap().matches("a___b___c")); + assert!(Pattern::new("abc*abc*abc") + .unwrap() + .matches("abcabcabcabcabcabcabc")); + assert!(!Pattern::new("abc*abc*abc") + .unwrap() + .matches("abcabcabcabcabcabcabca")); + assert!(Pattern::new("a*a*a*a*a*a*a*a*a") + .unwrap() + .matches("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")); + assert!(Pattern::new("a*b[xyz]c*d").unwrap().matches("abxcdbxcddd")); + } + + #[test] + fn test_recursive_wildcards() { + let pat = Pattern::new("some/**/needle.txt").unwrap(); + assert!(pat.matches("some/needle.txt")); + assert!(pat.matches("some/one/needle.txt")); + assert!(pat.matches("some/one/two/needle.txt")); + assert!(pat.matches("some/other/needle.txt")); + assert!(!pat.matches("some/other/notthis.txt")); + + // a single ** should be valid, for globs + // Should accept anything + let pat = Pattern::new("**").unwrap(); + assert!(pat.is_recursive); + assert!(pat.matches("abcde")); + assert!(pat.matches("")); + assert!(pat.matches(".asdf")); + assert!(pat.matches("/x/.asdf")); + + // collapse consecutive wildcards + let pat = Pattern::new("some/**/**/needle.txt").unwrap(); + assert!(pat.matches("some/needle.txt")); + assert!(pat.matches("some/one/needle.txt")); + assert!(pat.matches("some/one/two/needle.txt")); + assert!(pat.matches("some/other/needle.txt")); + assert!(!pat.matches("some/other/notthis.txt")); + + // ** can begin the pattern + let pat = Pattern::new("**/test").unwrap(); + assert!(pat.matches("one/two/test")); + assert!(pat.matches("one/test")); + assert!(pat.matches("test")); + + // /** can begin the pattern + let pat = Pattern::new("/**/test").unwrap(); + assert!(pat.matches("/one/two/test")); + assert!(pat.matches("/one/test")); + assert!(pat.matches("/test")); + assert!(!pat.matches("/one/notthis")); + assert!(!pat.matches("/notthis")); + + // Only start sub-patterns on start of path segment. + let pat = Pattern::new("**/.*").unwrap(); + assert!(pat.matches(".abc")); + assert!(pat.matches("abc/.abc")); + assert!(!pat.matches("ab.c")); + assert!(!pat.matches("abc/ab.c")); + } + + #[test] + fn test_lots_of_files() { + // this is a good test because it touches lots of differently named files + glob("/*/*/*/*").unwrap().skip(10000).next(); + } + + #[test] + fn test_range_pattern() { + let pat = Pattern::new("a[0-9]b").unwrap(); + for i in 0..10 { + assert!(pat.matches(&format!("a{}b", i))); + } + assert!(!pat.matches("a_b")); + + let pat = Pattern::new("a[!0-9]b").unwrap(); + for i in 0..10 { + assert!(!pat.matches(&format!("a{}b", i))); + } + assert!(pat.matches("a_b")); + + let pats = ["[a-z123]", "[1a-z23]", "[123a-z]"]; + for &p in pats.iter() { + let pat = Pattern::new(p).unwrap(); + for c in "abcdefghijklmnopqrstuvwxyz".chars() { + assert!(pat.matches(&c.to_string())); + } + for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ".chars() { + let options = MatchOptions { + case_sensitive: false, + ..MatchOptions::new() + }; + assert!(pat.matches_with(&c.to_string(), options)); + } + assert!(pat.matches("1")); + assert!(pat.matches("2")); + assert!(pat.matches("3")); + } + + let pats = ["[abc-]", "[-abc]", "[a-c-]"]; + for &p in pats.iter() { + let pat = Pattern::new(p).unwrap(); + assert!(pat.matches("a")); + assert!(pat.matches("b")); + assert!(pat.matches("c")); + assert!(pat.matches("-")); + assert!(!pat.matches("d")); + } + + let pat = Pattern::new("[2-1]").unwrap(); + assert!(!pat.matches("1")); + assert!(!pat.matches("2")); + + assert!(Pattern::new("[-]").unwrap().matches("-")); + assert!(!Pattern::new("[!-]").unwrap().matches("-")); + } + + #[test] + fn test_pattern_matches() { + let txt_pat = Pattern::new("*hello.txt").unwrap(); + assert!(txt_pat.matches("hello.txt")); + assert!(txt_pat.matches("gareth_says_hello.txt")); + assert!(txt_pat.matches("some/path/to/hello.txt")); + assert!(txt_pat.matches("some\\path\\to\\hello.txt")); + assert!(txt_pat.matches("/an/absolute/path/to/hello.txt")); + assert!(!txt_pat.matches("hello.txt-and-then-some")); + assert!(!txt_pat.matches("goodbye.txt")); + + let dir_pat = Pattern::new("*some/path/to/hello.txt").unwrap(); + assert!(dir_pat.matches("some/path/to/hello.txt")); + assert!(dir_pat.matches("a/bigger/some/path/to/hello.txt")); + assert!(!dir_pat.matches("some/path/to/hello.txt-and-then-some")); + assert!(!dir_pat.matches("some/other/path/to/hello.txt")); + } + + #[test] + fn test_pattern_escape() { + let s = "_[_]_?_*_!_"; + assert_eq!(Pattern::escape(s), "_[[]_[]]_[?]_[*]_!_".to_string()); + assert!(Pattern::new(&Pattern::escape(s)).unwrap().matches(s)); + } + + #[test] + fn test_pattern_matches_case_insensitive() { + let pat = Pattern::new("aBcDeFg").unwrap(); + let options = MatchOptions { + case_sensitive: false, + require_literal_separator: false, + require_literal_leading_dot: false, + }; + + assert!(pat.matches_with("aBcDeFg", options)); + assert!(pat.matches_with("abcdefg", options)); + assert!(pat.matches_with("ABCDEFG", options)); + assert!(pat.matches_with("AbCdEfG", options)); + } + + #[test] + fn test_pattern_matches_case_insensitive_range() { + let pat_within = Pattern::new("[a]").unwrap(); + let pat_except = Pattern::new("[!a]").unwrap(); + + let options_case_insensitive = MatchOptions { + case_sensitive: false, + require_literal_separator: false, + require_literal_leading_dot: false, + }; + let options_case_sensitive = MatchOptions { + case_sensitive: true, + require_literal_separator: false, + require_literal_leading_dot: false, + }; + + assert!(pat_within.matches_with("a", options_case_insensitive)); + assert!(pat_within.matches_with("A", options_case_insensitive)); + assert!(!pat_within.matches_with("A", options_case_sensitive)); + + assert!(!pat_except.matches_with("a", options_case_insensitive)); + assert!(!pat_except.matches_with("A", options_case_insensitive)); + assert!(pat_except.matches_with("A", options_case_sensitive)); + } + + #[test] + fn test_pattern_matches_require_literal_separator() { + let options_require_literal = MatchOptions { + case_sensitive: true, + require_literal_separator: true, + require_literal_leading_dot: false, + }; + let options_not_require_literal = MatchOptions { + case_sensitive: true, + require_literal_separator: false, + require_literal_leading_dot: false, + }; + + assert!(Pattern::new("abc/def") + .unwrap() + .matches_with("abc/def", options_require_literal)); + assert!(!Pattern::new("abc?def") + .unwrap() + .matches_with("abc/def", options_require_literal)); + assert!(!Pattern::new("abc*def") + .unwrap() + .matches_with("abc/def", options_require_literal)); + assert!(!Pattern::new("abc[/]def") + .unwrap() + .matches_with("abc/def", options_require_literal)); + + assert!(Pattern::new("abc/def") + .unwrap() + .matches_with("abc/def", options_not_require_literal)); + assert!(Pattern::new("abc?def") + .unwrap() + .matches_with("abc/def", options_not_require_literal)); + assert!(Pattern::new("abc*def") + .unwrap() + .matches_with("abc/def", options_not_require_literal)); + assert!(Pattern::new("abc[/]def") + .unwrap() + .matches_with("abc/def", options_not_require_literal)); + } + + #[test] + fn test_pattern_matches_require_literal_leading_dot() { + let options_require_literal_leading_dot = MatchOptions { + case_sensitive: true, + require_literal_separator: false, + require_literal_leading_dot: true, + }; + let options_not_require_literal_leading_dot = MatchOptions { + case_sensitive: true, + require_literal_separator: false, + require_literal_leading_dot: false, + }; + + let f = |options| { + Pattern::new("*.txt") + .unwrap() + .matches_with(".hello.txt", options) + }; + assert!(f(options_not_require_literal_leading_dot)); + assert!(!f(options_require_literal_leading_dot)); + + let f = |options| { + Pattern::new(".*.*") + .unwrap() + .matches_with(".hello.txt", options) + }; + assert!(f(options_not_require_literal_leading_dot)); + assert!(f(options_require_literal_leading_dot)); + + let f = |options| { + Pattern::new("aaa/bbb/*") + .unwrap() + .matches_with("aaa/bbb/.ccc", options) + }; + assert!(f(options_not_require_literal_leading_dot)); + assert!(!f(options_require_literal_leading_dot)); + + let f = |options| { + Pattern::new("aaa/bbb/*") + .unwrap() + .matches_with("aaa/bbb/c.c.c.", options) + }; + assert!(f(options_not_require_literal_leading_dot)); + assert!(f(options_require_literal_leading_dot)); + + let f = |options| { + Pattern::new("aaa/bbb/.*") + .unwrap() + .matches_with("aaa/bbb/.ccc", options) + }; + assert!(f(options_not_require_literal_leading_dot)); + assert!(f(options_require_literal_leading_dot)); + + let f = |options| { + Pattern::new("aaa/?bbb") + .unwrap() + .matches_with("aaa/.bbb", options) + }; + assert!(f(options_not_require_literal_leading_dot)); + assert!(!f(options_require_literal_leading_dot)); + + let f = |options| { + Pattern::new("aaa/[.]bbb") + .unwrap() + .matches_with("aaa/.bbb", options) + }; + assert!(f(options_not_require_literal_leading_dot)); + assert!(!f(options_require_literal_leading_dot)); + + let f = |options| Pattern::new("**/*").unwrap().matches_with(".bbb", options); + assert!(f(options_not_require_literal_leading_dot)); + assert!(!f(options_require_literal_leading_dot)); + } + + #[test] + fn test_matches_path() { + // on windows, (Path::new("a/b").as_str().unwrap() == "a\\b"), so this + // tests that / and \ are considered equivalent on windows + assert!(Pattern::new("a/b").unwrap().matches_path(Path::new("a/b"))); + } + + #[test] + fn test_path_join() { + let pattern = Path::new("one").join(Path::new("**/*.rs")); + assert!(Pattern::new(pattern.to_str().unwrap()).is_ok()); + } +} diff --git a/vendor/glob/tests/glob-std.rs b/vendor/glob/tests/glob-std.rs new file mode 100644 index 00000000000000..ba12701e36f9f6 --- /dev/null +++ b/vendor/glob/tests/glob-std.rs @@ -0,0 +1,477 @@ +// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-windows TempDir may cause IoError on windows: #10462 + +#![cfg_attr(test, deny(warnings))] + +extern crate glob; +extern crate tempdir; + +use glob::{glob, glob_with}; +use std::env; +use std::fs; +use std::path::PathBuf; +use tempdir::TempDir; + +#[test] +fn main() { + fn mk_file(path: &str, directory: bool) { + if directory { + fs::create_dir(path).unwrap(); + } else { + fs::File::create(path).unwrap(); + } + } + + fn mk_symlink_file(original: &str, link: &str) { + #[cfg(unix)] + { + use std::os::unix::fs::symlink; + symlink(original, link).unwrap(); + } + #[cfg(windows)] + { + use std::os::windows::fs::symlink_file; + symlink_file(original, link).unwrap(); + } + } + + fn mk_symlink_dir(original: &str, link: &str) { + #[cfg(unix)] + { + use std::os::unix::fs::symlink; + symlink(original, link).unwrap(); + } + #[cfg(windows)] + { + use std::os::windows::fs::symlink_dir; + symlink_dir(original, link).unwrap(); + } + } + + fn glob_vec(pattern: &str) -> Vec { + glob(pattern).unwrap().map(|r| r.unwrap()).collect() + } + + fn glob_with_vec(pattern: &str, options: glob::MatchOptions) -> Vec { + glob_with(pattern, options) + .unwrap() + .map(|r| r.unwrap()) + .collect() + } + + let root = TempDir::new("glob-tests"); + let root = root.ok().expect("Should have created a temp directory"); + assert!(env::set_current_dir(root.path()).is_ok()); + + mk_file("aaa", true); + mk_file("aaa/apple", true); + mk_file("aaa/orange", true); + mk_file("aaa/tomato", true); + mk_file("aaa/tomato/tomato.txt", false); + mk_file("aaa/tomato/tomoto.txt", false); + mk_file("bbb", true); + mk_file("bbb/specials", true); + mk_file("bbb/specials/!", false); + // a valid symlink + mk_symlink_file("aaa/apple", "aaa/green_apple"); + // a broken symlink + mk_symlink_file("aaa/setsuna", "aaa/kazusa"); + + // windows does not allow `*` or `?` characters to exist in filenames + if env::consts::FAMILY != "windows" { + mk_file("bbb/specials/*", false); + mk_file("bbb/specials/?", false); + } + + mk_file("bbb/specials/[", false); + mk_file("bbb/specials/]", false); + mk_file("ccc", true); + mk_file("xyz", true); + mk_file("xyz/x", false); + mk_file("xyz/y", false); + mk_file("xyz/z", false); + + mk_file("r", true); + mk_file("r/current_dir.md", false); + mk_file("r/one", true); + mk_file("r/one/a.md", false); + mk_file("r/one/another", true); + mk_file("r/one/another/a.md", false); + mk_file("r/one/another/deep", true); + mk_file("r/one/another/deep/spelunking.md", false); + mk_file("r/another", true); + mk_file("r/another/a.md", false); + mk_file("r/two", true); + mk_file("r/two/b.md", false); + mk_file("r/three", true); + mk_file("r/three/c.md", false); + + mk_file("dirsym", true); + mk_symlink_dir(root.path().join("r").to_str().unwrap(), "dirsym/link"); + + assert_eq!( + glob_vec("dirsym/**/*.md"), + vec!( + PathBuf::from("dirsym/link/another/a.md"), + PathBuf::from("dirsym/link/current_dir.md"), + PathBuf::from("dirsym/link/one/a.md"), + PathBuf::from("dirsym/link/one/another/a.md"), + PathBuf::from("dirsym/link/one/another/deep/spelunking.md"), + PathBuf::from("dirsym/link/three/c.md"), + PathBuf::from("dirsym/link/two/b.md") + ) + ); + + // all recursive entities + assert_eq!( + glob_vec("r/**"), + vec!( + PathBuf::from("r/another"), + PathBuf::from("r/one"), + PathBuf::from("r/one/another"), + PathBuf::from("r/one/another/deep"), + PathBuf::from("r/three"), + PathBuf::from("r/two") + ) + ); + + // std-canonicalized windows verbatim disk paths should work + if env::consts::FAMILY == "windows" { + let r_verbatim = PathBuf::from("r").canonicalize().unwrap(); + assert_eq!( + glob_vec(&format!("{}\\**", r_verbatim.display().to_string())) + .into_iter() + .map(|p| p.strip_prefix(&r_verbatim).unwrap().to_owned()) + .collect::>(), + vec!( + PathBuf::from("another"), + PathBuf::from("one"), + PathBuf::from("one\\another"), + PathBuf::from("one\\another\\deep"), + PathBuf::from("three"), + PathBuf::from("two") + ) + ); + } + + // collapse consecutive recursive patterns + assert_eq!( + glob_vec("r/**/**"), + vec!( + PathBuf::from("r/another"), + PathBuf::from("r/one"), + PathBuf::from("r/one/another"), + PathBuf::from("r/one/another/deep"), + PathBuf::from("r/three"), + PathBuf::from("r/two") + ) + ); + + assert_eq!( + glob_vec("r/**/*"), + vec!( + PathBuf::from("r/another"), + PathBuf::from("r/another/a.md"), + PathBuf::from("r/current_dir.md"), + PathBuf::from("r/one"), + PathBuf::from("r/one/a.md"), + PathBuf::from("r/one/another"), + PathBuf::from("r/one/another/a.md"), + PathBuf::from("r/one/another/deep"), + PathBuf::from("r/one/another/deep/spelunking.md"), + PathBuf::from("r/three"), + PathBuf::from("r/three/c.md"), + PathBuf::from("r/two"), + PathBuf::from("r/two/b.md") + ) + ); + + // followed by a wildcard + assert_eq!( + glob_vec("r/**/*.md"), + vec!( + PathBuf::from("r/another/a.md"), + PathBuf::from("r/current_dir.md"), + PathBuf::from("r/one/a.md"), + PathBuf::from("r/one/another/a.md"), + PathBuf::from("r/one/another/deep/spelunking.md"), + PathBuf::from("r/three/c.md"), + PathBuf::from("r/two/b.md") + ) + ); + + // followed by a precise pattern + assert_eq!( + glob_vec("r/one/**/a.md"), + vec!( + PathBuf::from("r/one/a.md"), + PathBuf::from("r/one/another/a.md") + ) + ); + + // followed by another recursive pattern + // collapses consecutive recursives into one + assert_eq!( + glob_vec("r/one/**/**/a.md"), + vec!( + PathBuf::from("r/one/a.md"), + PathBuf::from("r/one/another/a.md") + ) + ); + + // followed by two precise patterns + assert_eq!( + glob_vec("r/**/another/a.md"), + vec!( + PathBuf::from("r/another/a.md"), + PathBuf::from("r/one/another/a.md") + ) + ); + + assert_eq!(glob_vec(""), Vec::::new()); + assert_eq!(glob_vec("."), vec!(PathBuf::from("."))); + assert_eq!(glob_vec(".."), vec!(PathBuf::from(".."))); + + assert_eq!(glob_vec("aaa"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("aaa/"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("a"), Vec::::new()); + assert_eq!(glob_vec("aa"), Vec::::new()); + assert_eq!(glob_vec("aaaa"), Vec::::new()); + + assert_eq!(glob_vec("aaa/apple"), vec!(PathBuf::from("aaa/apple"))); + assert_eq!(glob_vec("aaa/apple/nope"), Vec::::new()); + + // windows should support both / and \ as directory separators + if env::consts::FAMILY == "windows" { + assert_eq!(glob_vec("aaa\\apple"), vec!(PathBuf::from("aaa/apple"))); + } + + assert_eq!( + glob_vec("???/"), + vec!( + PathBuf::from("aaa"), + PathBuf::from("bbb"), + PathBuf::from("ccc"), + PathBuf::from("xyz") + ) + ); + + assert_eq!( + glob_vec("aaa/tomato/tom?to.txt"), + vec!( + PathBuf::from("aaa/tomato/tomato.txt"), + PathBuf::from("aaa/tomato/tomoto.txt") + ) + ); + + assert_eq!( + glob_vec("xyz/?"), + vec!( + PathBuf::from("xyz/x"), + PathBuf::from("xyz/y"), + PathBuf::from("xyz/z") + ) + ); + + assert_eq!(glob_vec("a*"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("*a*"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("a*a"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("aaa*"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("*aaa"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("*aaa*"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("*a*a*a*"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("aaa*/"), vec!(PathBuf::from("aaa"))); + + assert_eq!( + glob_vec("aaa/*"), + vec!( + PathBuf::from("aaa/apple"), + PathBuf::from("aaa/green_apple"), + PathBuf::from("aaa/kazusa"), + PathBuf::from("aaa/orange"), + PathBuf::from("aaa/tomato"), + ) + ); + + assert_eq!( + glob_vec("aaa/*a*"), + vec!( + PathBuf::from("aaa/apple"), + PathBuf::from("aaa/green_apple"), + PathBuf::from("aaa/kazusa"), + PathBuf::from("aaa/orange"), + PathBuf::from("aaa/tomato") + ) + ); + + assert_eq!( + glob_vec("*/*/*.txt"), + vec!( + PathBuf::from("aaa/tomato/tomato.txt"), + PathBuf::from("aaa/tomato/tomoto.txt") + ) + ); + + assert_eq!( + glob_vec("*/*/t[aob]m?to[.]t[!y]t"), + vec!( + PathBuf::from("aaa/tomato/tomato.txt"), + PathBuf::from("aaa/tomato/tomoto.txt") + ) + ); + + assert_eq!(glob_vec("./aaa"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("./*"), glob_vec("*")); + assert_eq!(glob_vec("*/..").pop().unwrap(), PathBuf::from("xyz/..")); + assert_eq!(glob_vec("aaa/../bbb"), vec!(PathBuf::from("aaa/../bbb"))); + assert_eq!(glob_vec("nonexistent/../bbb"), Vec::::new()); + assert_eq!(glob_vec("aaa/tomato/tomato.txt/.."), Vec::::new()); + + assert_eq!(glob_vec("aaa/tomato/tomato.txt/"), Vec::::new()); + + // Ensure to find a broken symlink. + assert_eq!(glob_vec("aaa/kazusa"), vec!(PathBuf::from("aaa/kazusa"))); + + assert_eq!(glob_vec("aa[a]"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("aa[abc]"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("a[bca]a"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("aa[b]"), Vec::::new()); + assert_eq!(glob_vec("aa[xyz]"), Vec::::new()); + assert_eq!(glob_vec("aa[]]"), Vec::::new()); + + assert_eq!(glob_vec("aa[!b]"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("aa[!bcd]"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("a[!bcd]a"), vec!(PathBuf::from("aaa"))); + assert_eq!(glob_vec("aa[!a]"), Vec::::new()); + assert_eq!(glob_vec("aa[!abc]"), Vec::::new()); + + assert_eq!( + glob_vec("bbb/specials/[[]"), + vec!(PathBuf::from("bbb/specials/[")) + ); + assert_eq!( + glob_vec("bbb/specials/!"), + vec!(PathBuf::from("bbb/specials/!")) + ); + assert_eq!( + glob_vec("bbb/specials/[]]"), + vec!(PathBuf::from("bbb/specials/]")) + ); + + mk_file("i", true); + mk_file("i/qwe", true); + mk_file("i/qwe/.aaa", false); + mk_file("i/qwe/.bbb", true); + mk_file("i/qwe/.bbb/ccc", false); + mk_file("i/qwe/.bbb/.ddd", false); + mk_file("i/qwe/eee", false); + + let options = glob::MatchOptions { + case_sensitive: false, + require_literal_separator: true, + require_literal_leading_dot: true, + }; + assert_eq!(glob_with_vec("i/**/*a*", options), Vec::::new()); + assert_eq!(glob_with_vec("i/**/*c*", options), Vec::::new()); + assert_eq!(glob_with_vec("i/**/*d*", options), Vec::::new()); + assert_eq!( + glob_with_vec("i/**/*e*", options), + vec!(PathBuf::from("i/qwe"), PathBuf::from("i/qwe/eee")) + ); + + if env::consts::FAMILY != "windows" { + assert_eq!( + glob_vec("bbb/specials/[*]"), + vec!(PathBuf::from("bbb/specials/*")) + ); + assert_eq!( + glob_vec("bbb/specials/[?]"), + vec!(PathBuf::from("bbb/specials/?")) + ); + } + + if env::consts::FAMILY == "windows" { + assert_eq!( + glob_vec("bbb/specials/[![]"), + vec!( + PathBuf::from("bbb/specials/!"), + PathBuf::from("bbb/specials/]") + ) + ); + + assert_eq!( + glob_vec("bbb/specials/[!]]"), + vec!( + PathBuf::from("bbb/specials/!"), + PathBuf::from("bbb/specials/[") + ) + ); + + assert_eq!( + glob_vec("bbb/specials/[!!]"), + vec!( + PathBuf::from("bbb/specials/["), + PathBuf::from("bbb/specials/]") + ) + ); + } else { + assert_eq!( + glob_vec("bbb/specials/[![]"), + vec!( + PathBuf::from("bbb/specials/!"), + PathBuf::from("bbb/specials/*"), + PathBuf::from("bbb/specials/?"), + PathBuf::from("bbb/specials/]") + ) + ); + + assert_eq!( + glob_vec("bbb/specials/[!]]"), + vec!( + PathBuf::from("bbb/specials/!"), + PathBuf::from("bbb/specials/*"), + PathBuf::from("bbb/specials/?"), + PathBuf::from("bbb/specials/[") + ) + ); + + assert_eq!( + glob_vec("bbb/specials/[!!]"), + vec!( + PathBuf::from("bbb/specials/*"), + PathBuf::from("bbb/specials/?"), + PathBuf::from("bbb/specials/["), + PathBuf::from("bbb/specials/]") + ) + ); + + assert_eq!( + glob_vec("bbb/specials/[!*]"), + vec!( + PathBuf::from("bbb/specials/!"), + PathBuf::from("bbb/specials/?"), + PathBuf::from("bbb/specials/["), + PathBuf::from("bbb/specials/]") + ) + ); + + assert_eq!( + glob_vec("bbb/specials/[!?]"), + vec!( + PathBuf::from("bbb/specials/!"), + PathBuf::from("bbb/specials/*"), + PathBuf::from("bbb/specials/["), + PathBuf::from("bbb/specials/]") + ) + ); + } +} diff --git a/vendor/glob/triagebot.toml b/vendor/glob/triagebot.toml new file mode 100644 index 00000000000000..fa0824ac53c0a9 --- /dev/null +++ b/vendor/glob/triagebot.toml @@ -0,0 +1 @@ +[assign] diff --git a/vendor/itertools/.cargo-checksum.json b/vendor/itertools/.cargo-checksum.json new file mode 100644 index 00000000000000..2fc9929fb19a8e --- /dev/null +++ b/vendor/itertools/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"a3852e6977ae2992b84eaa301ca1402cafaa705d968d18b4df1e7c1324961be7",".codecov.yml":"27b445dc39fefbcb3c232623d6ce77ec15133d3fad6bde481b8d140614993b2a",".github/dependabot.yml":"7ae793ed2cfbb3d571f46e4c6ed9cfd374af472c44d38d7e9be82e91fccafcd4",".github/workflows/ci.yml":"f7335e53804a94dbfb31d3215d5035461ac3de73b932b116db2c8102c56bc396",".github/workflows/coverage.yml":"6dfc476a71ffa247ff4a79dfb2e51afc927ea644f5c781afaf5c3cd03b552537","CHANGELOG.md":"ceee4376468a3f7647f3bf4649e195a86873dd3091f23e3f992d248bd143fba2","CONTRIBUTING.md":"d5787d0fd4df15481e2e09a37234ac5dec22c007c890826991f633d890efa29e","Cargo.lock":"fd2c9ca8e299f51d7ed2a0f3760c393f03c544c817743ab7341c1f22b8c1d869","Cargo.toml":"49abb2101a0dd9cb137df206454b6620d04929a4975921fab6682ba834435620","Cargo.toml.orig":"30713cac3a7479b71408e83c0247aef8c7fd716c8fd4ab490d55c36cea0bc0e2","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7576269ea71f767b99297934c0b2367532690f8c4badc695edf8e04ab6a1e545","README.md":"fc812ab0d5756b62c2ae34f38365899204b53332d5e6a87a695b0fe15a466957","benches/bench1.rs":"d632c8b839d7b318d1cb7b81b9c62570c77dcdf0696b8ce3d52067c79c930f78","benches/combinations.rs":"5b3bd243336d6b6bdc111d66218f3f0a4ecdb10fb72e90db79959e3d8bb2cf6f","benches/combinations_with_replacement.rs":"11f29160652a2d90ce7ca4b1c339c4457888ab6867e2456ce1c62e3adf9be737","benches/fold_specialization.rs":"66ab13fd8576a662afb59ef72c5565f5c3d27f7f30a976450ee5a14958654fa2","benches/powerset.rs":"dc1fd729584147e5d8e4d19c6ca6f8706087d41c3c5beb7293d9ea43b4beab14","benches/specializations.rs":"d8320071a692147c1239881725079003be2f924f6124c3aa3bdf6a4596d66a66","benches/tree_reduce.rs":"fa4f22f042b76df89094ddf6e925ba42c4c3992f8195e719ed035f2e7cfa05bd","benches/tuple_combinations.rs":"16366158743307a0289fc1df423a3cec45009807d410a9fe9922d5b6f8b7d002","benches/tuples.rs":"5ab542aca40df4390de0ebf3819665df402d924a7dd6f4280e6ffc942bbd25c4","examples/iris.data":"596ffd580471ca4d4880f8e439c7281f3b50d8249a5960353cb200b1490f63a0","examples/iris.rs":"42c1b2fc148df52a050b013a57b577ad19911f1fe85b9525863df501979b5cd1","src/adaptors/coalesce.rs":"b57157c205ae077dd398740b61c7f49023aa80868abd8a071a6fe89ae6ecc9ad","src/adaptors/map.rs":"4952ee770cb54e98b2f649efd9c98f18951689358eb9b6bee10f139d056353ae","src/adaptors/mod.rs":"7064a1043baec815c02803d5043bd950e6a515f3a0247e44028ee080004dc225","src/adaptors/multi_product.rs":"ad501e8ae4e5089b9d2f2be1f9a4713da6a2103b14daa759e09918409f88e321","src/combinations.rs":"6c1cd55051eb59c595780b055ccabb07db72add134120dd8b2f5aa60c0f5fa6e","src/combinations_with_replacement.rs":"cad1885ca51e52a1dc324a0b06bd0d1d911f1dd58cf5d76bd9a9c78a09853b86","src/concat_impl.rs":"6094463eb57f77e115f6a3fe7f469992eef81c0c4caa9585b99a426d87f794fb","src/cons_tuples_impl.rs":"3ceee1ff0dbd4c3b43195a490b8f38b05de3a46e0fb691ba11fbbe1e7e3ad746","src/diff.rs":"046b3ac4a22036b9ec8741aba4e8f6729ae44bf14346b61c23192b88d9fc7c88","src/duplicates_impl.rs":"1be37249b4566edc8da611ed9766ec851a526e7513bd13d80fe97482dcfcf7f3","src/either_or_both.rs":"cac278666b5d3c1fd103d97d15ce4c40960ea459441aeae83c6502087fd2ad8d","src/exactly_one_err.rs":"90b6204551161d27394af72107765dbfe3b51a77f4770c2e506fa4938985a184","src/extrema_set.rs":"7e0d92ca1aafc1221e08d0297087b35373463d03228a0e65628cfd1734273e90","src/flatten_ok.rs":"62c18e5221a27949a00de49414306d6dfd601515817c1c8ae6189e3275756dd3","src/format.rs":"94675a6ac4500ec52bbf8463b2241b870fea8b5dd6b113accb8a00b2c1174871","src/free.rs":"6f3597a5ccf8a9b0606da7df6803f7368152ebcf7b7bcfd31b17fcff3a286139","src/group_map.rs":"c9da201137c6bb479b9308bfc38398b76950e39905f4ce8bc435c5318371522c","src/groupbylazy.rs":"5862629719258703aad47977ba1060f20fff15e962e18e6142758ebf6cd4a61c","src/grouping_map.rs":"8dac807a6cbf1893fdc147b4160000c452bfb5e533e1c774ed6bd3af91cf46da","src/impl_macros.rs":"97fc5f39574805e0c220aa462cf1ae7dcac5c1082d6ee5500e7d71c120db5f88","src/intersperse.rs":"55031819e985c3184275e254c9600ecbe01e9fb49f198039c5da82a87ea5b90e","src/iter_index.rs":"1b0ff8376a4ad855d44db8c662450c777db84e0f4997b53ca575c65b107bb83b","src/k_smallest.rs":"6a665742f6665e350a54ae3ff821252e7c599b57aee3239a03fa56a9d1930467","src/kmerge_impl.rs":"2e425d4189898566c5146e8f5bd258045c246f6babbe3ac5fef10ca08ae2efd2","src/lazy_buffer.rs":"a065f73c228f156bdf901824977ea9375f912823af4f9b05378e3f633d3b20e4","src/lib.rs":"75903dcd21573a8a77a205cfb8d335c60c2939771481c6431c29a0918d8dbfb0","src/merge_join.rs":"bb1fccddcc647fe21da1895a8808c06596d49900f5cf60a69a9c9141fc12af11","src/minmax.rs":"0ec34b172ca8efc4aacb96f3e5771bdc5e8ac882876ee0f59d698c3924717c48","src/multipeek_impl.rs":"79eef0be49ad66f15d41808e72c03976c4f7cff5838b69d17975d3ece266f3f8","src/pad_tail.rs":"e6bb5b086478600b0dbb8726cae8364bf83ab36d989ef467e1264eea43933b50","src/peek_nth.rs":"093f1a157b1c917f041af5244a5a46311affa2922126e36dc0ee2c501c79b58c","src/peeking_take_while.rs":"6967ba212f045145da7683a192471b2dcfcedf90d23922d70a5b7e2a1b36622e","src/permutations.rs":"b316084ee14e9e138d22f177367b3bfa24cb3e5e90ab20b9b00a9a23d653496f","src/powerset.rs":"7ab24fefc914b339dd92a6c8e639d0cad34479e09293b3346078856d6bc02d34","src/process_results_impl.rs":"a6f91aec53c56b042e15ecb8f8ca489c81e3ee92347dc9fa8352a5baac44a247","src/put_back_n_impl.rs":"5a58d7a31c03029f0726e4d42de3be869580cf76b73c6d1ef70dd40c240b03a0","src/rciter_impl.rs":"9a50cdc0106587be8ee49c2af5fcf84436b74d353c2846b401eb638c23b4733c","src/repeatn.rs":"dd9a5bf5a63ef9cc6ec5c8a6137c7ffba80f13568b6d001e189daaa29ffbaf39","src/size_hint.rs":"6022c2327ddc6df7e7b939eb60a93ee66ea9aa4d3aab49b9952e663ff4bff10b","src/sources.rs":"ef942af209ca1effcd28a95abedad8c45b659ae2a15b66c2158cb604f6e325f8","src/take_while_inclusive.rs":"1973a9f5322b3dae3b5ccded5912a08a8e2e975b9a5eac666192b118b230d305","src/tee.rs":"dad50ca162627cf0a67786f0993ef27d06cdefc14d412463e58c07824ef409d8","src/tuple_impl.rs":"0213261109e7c65746ccc22425d19141907bf7ea1e3dd4c40e9f278e6148e272","src/unique_impl.rs":"1efc280226f13ddd7dd5f7eedeec0093b704596652c942f3a0b2f8c90fa2e2f7","src/unziptuple.rs":"f3f6a2ee2658fa07db7592f2c344c2e3b1263a21fc75e1325f2be32c9dc1e750","src/with_position.rs":"9ca1eb195d04690b0c3a62a6c0eea349b8042e11c4ca4b80744f54103e1c7355","src/zip_eq_impl.rs":"4e0d38266c26982ea8b8d055994cb1298e93b7749caadbd7f25d2b6e0c8ce0d7","src/zip_longest.rs":"5572699564dd5717cc074b7733333ed238c2e9f3e6819d45e33e3a2dbda74478","src/ziptuple.rs":"d3a12221d39c8a5514574adb3ad2ccd1803d514b1cb09fbcd9253e3ddd628310","tests/adaptors_no_collect.rs":"7e6240878b1fc13b6384fdde0317d5d7ccca3e417b10a201ba61eb5255400fda","tests/flatten_ok.rs":"b7894874132918b8229c7150b2637511d8e3e14197d8eeb9382d46b2a514efa2","tests/laziness.rs":"89e6caec10da3d7aeadf9e30d5caf03cda36d07cee8415ff134b5b8e2a2cf144","tests/macros_hygiene.rs":"c9e9f0546a8c12ea52311c0eadd77d75c782d4e10ae9e74d410ea2861d526c66","tests/merge_join.rs":"5fb506b989f4a331d46cdec5775ea594656985134196099eaf8d3905bdddcdd5","tests/peeking_take_while.rs":"f834361c5520dda15eb9e9ebe87507c905462201412b21859d9f83dab91d0e0b","tests/quick.rs":"60b1ca6d820aa505545f20d6082fd08c1e0470b5326b711567ec1c93d07f9ced","tests/specializations.rs":"7c6a461850a2b4f783801ef23b2303ad985c58f2295c569001369b3c9d4c6e33","tests/test_core.rs":"482e077e0c5fe78ba0a8a126d8c0821162d820a21936855fadede713b1d4e70a","tests/test_std.rs":"f788573adc9ae19eb4bd2886c3967b273dd881982af407f6f5b6276434df0f00","tests/tuples.rs":"014e4da776174bfe923270e2a359cd9c95b372fce4b952b8138909d6e2c52762","tests/zip.rs":"2f68d531170fa2f106efafaf38ae854281d93305bf1b2b8d4bea833072518ecd"},"package":"413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"} \ No newline at end of file diff --git a/vendor/itertools/.cargo_vcs_info.json b/vendor/itertools/.cargo_vcs_info.json new file mode 100644 index 00000000000000..848cbe437ec807 --- /dev/null +++ b/vendor/itertools/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "d5084d15e959b85d89a49e5cd33ad6267bc541a3" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/itertools/.codecov.yml b/vendor/itertools/.codecov.yml new file mode 100644 index 00000000000000..d06394ae04a138 --- /dev/null +++ b/vendor/itertools/.codecov.yml @@ -0,0 +1,7 @@ +coverage: + status: + project: + default: + target: auto + # Allow a tiny drop of overall project coverage in PR to reduce spurious failures. + threshold: 0.25% diff --git a/vendor/itertools/.github/dependabot.yml b/vendor/itertools/.github/dependabot.yml new file mode 100644 index 00000000000000..71607d0c3c26d5 --- /dev/null +++ b/vendor/itertools/.github/dependabot.yml @@ -0,0 +1,6 @@ +version: 2 +updates: +- package-ecosystem: github-actions + directory: "/" + schedule: + interval: daily diff --git a/vendor/itertools/.github/workflows/ci.yml b/vendor/itertools/.github/workflows/ci.yml new file mode 100644 index 00000000000000..239ce2405f3989 --- /dev/null +++ b/vendor/itertools/.github/workflows/ci.yml @@ -0,0 +1,85 @@ +name: CI + +on: + pull_request: + paths-ignore: + - "**.md" + merge_group: + paths-ignore: + - "**.md" + +jobs: + check: + runs-on: ubuntu-latest + strategy: + matrix: + features: + [ + "", + "--no-default-features", + "--no-default-features --features use_alloc", + "--all-targets --all-features", + ] + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + components: clippy + - run: RUSTFLAGS="--deny warnings" cargo clippy ${{ matrix.features }} + + doc: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - run: RUSTDOCFLAGS="-Dwarnings" cargo doc --all-features + + msrv: + runs-on: ubuntu-latest + env: + CARGO_NET_GIT_FETCH_WITH_CLI: true + steps: + - uses: actions/checkout@v4 + - uses: taiki-e/install-action@cargo-no-dev-deps + - uses: dtolnay/rust-toolchain@master + with: + # Here, it does not trigger a PR from dependabot. + toolchain: 1.43.1 + - run: cargo no-dev-deps check + + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - run: cargo test --all-features + + check-format: + name: check format + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: stable + components: rustfmt + - run: cargo fmt --check + + semver-checks: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: obi1kenobi/cargo-semver-checks-action@v2.4 + with: + rust-toolchain: stable + feature-group: all-features + + # Used to signal to branch protections that all other jobs have succeeded. + all-jobs-succeed: + name: All checks succeeded + if: success() + runs-on: ubuntu-latest + needs: [check, msrv, test, check-format, doc] + steps: + - name: Mark the job as successful + run: exit 0 diff --git a/vendor/itertools/.github/workflows/coverage.yml b/vendor/itertools/.github/workflows/coverage.yml new file mode 100644 index 00000000000000..5c08456590ebd9 --- /dev/null +++ b/vendor/itertools/.github/workflows/coverage.yml @@ -0,0 +1,34 @@ +on: + push: + branches: [master] + paths-ignore: + - "**.md" + pull_request: + paths-ignore: + - "**.md" + +name: Code Coverage + +jobs: + coverage: + name: coverage + runs-on: ubuntu-latest + steps: + - name: checkout source + uses: actions/checkout@v4 + + - name: Install nightly toolchain + uses: dtolnay/rust-toolchain@nightly + with: + components: llvm-tools-preview + + - name: Install cargo-llvm-cov + uses: taiki-e/install-action@cargo-llvm-cov + + - name: Run llvm-cov + run: cargo llvm-cov --all-features --doctests --workspace --lcov --output-path lcov.info + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + files: lcov.info diff --git a/vendor/itertools/CHANGELOG.md b/vendor/itertools/CHANGELOG.md new file mode 100644 index 00000000000000..de9564c6a229f7 --- /dev/null +++ b/vendor/itertools/CHANGELOG.md @@ -0,0 +1,539 @@ +# Changelog + +## 0.13.0 + +### Breaking +- Removed implementation of `DoubleEndedIterator` for `ConsTuples` (#853) +- Made `MultiProduct` fused and fixed on an empty iterator (#835, #834) +- Changed `iproduct!` to return tuples for maxi one iterator too (#870) +- Changed `PutBack::put_back` to return the old value (#880) +- Removed deprecated `repeat_call, Itertools::{foreach, step, map_results, fold_results}` (#878) +- Removed `TakeWhileInclusive::new` (#912) + +### Added +- Added `Itertools::{smallest_by, smallest_by_key, largest, largest_by, largest_by_key}` (#654, #885) +- Added `Itertools::tail` (#899) +- Implemented `DoubleEndedIterator` for `ProcessResults` (#910) +- Implemented `Debug` for `FormatWith` (#931) +- Added `Itertools::get` (#891) + +### Changed +- Deprecated `Itertools::group_by` (renamed `chunk_by`) (#866, #879) +- Deprecated `unfold` (use `std::iter::from_fn` instead) (#871) +- Optimized `GroupingMapBy` (#873, #876) +- Relaxed `Fn` bounds to `FnMut` in `diff_with, Itertools::into_group_map_by` (#886) +- Relaxed `Debug/Clone` bounds for `MapInto` (#889) +- Documented the `use_alloc` feature (#887) +- Optimized `Itertools::set_from` (#888) +- Removed badges in `README.md` (#890) +- Added "no-std" categories in `Cargo.toml` (#894) +- Fixed `Itertools::k_smallest` on short unfused iterators (#900) +- Deprecated `Itertools::tree_fold1` (renamed `tree_reduce`) (#895) +- Deprecated `GroupingMap::fold_first` (renamed `reduce`) (#902) +- Fixed `Itertools::k_smallest(0)` to consume the iterator, optimized `Itertools::k_smallest(1)` (#909) +- Specialized `Combinations::nth` (#914) +- Specialized `MergeBy::fold` (#920) +- Specialized `CombinationsWithReplacement::nth` (#923) +- Specialized `FlattenOk::{fold, rfold}` (#927) +- Specialized `Powerset::nth` (#924) +- Documentation fixes (#882, #936) +- Fixed `assert_equal` for iterators longer than `i32::MAX` (#932) +- Updated the `must_use` message of non-lazy `KMergeBy` and `TupleCombinations` (#939) + +### Notable Internal Changes +- Tested iterator laziness (#792) +- Created `CONTRIBUTING.md` (#767) + +## 0.12.1 + +### Added +- Documented iteration order guarantee for `Itertools::[tuple_]combinations` (#822) +- Documented possible panic in `iterate` (#842) +- Implemented `Clone` and `Debug` for `Diff` (#845) +- Implemented `Debug` for `WithPosition` (#859) +- Implemented `Eq` for `MinMaxResult` (#838) +- Implemented `From>` for `Option>` (#843) +- Implemented `PeekingNext` for `RepeatN` (#855) + +### Changed +- Made `CoalesceBy` lazy (#801) +- Optimized `Filter[Map]Ok::next`, `Itertools::partition`, `Unique[By]::next[_back]` (#818) +- Optimized `Itertools::find_position` (#837) +- Optimized `Positions::next[_back]` (#816) +- Optimized `ZipLongest::fold` (#854) +- Relaxed `Debug` bounds for `GroupingMapBy` (#860) +- Specialized `ExactlyOneError::fold` (#826) +- Specialized `Interleave[Shortest]::fold` (#849) +- Specialized `MultiPeek::fold` (#820) +- Specialized `PadUsing::[r]fold` (#825) +- Specialized `PeekNth::fold` (#824) +- Specialized `Positions::[r]fold` (#813) +- Specialized `PutBackN::fold` (#823) +- Specialized `RepeatN::[r]fold` (#821) +- Specialized `TakeWhileInclusive::fold` (#851) +- Specialized `ZipLongest::rfold` (#848) + +### Notable Internal Changes +- Added test coverage in CI (#847, #856) +- Added semver check in CI (#784) +- Enforced `clippy` in CI (#740) +- Enforced `rustdoc` in CI (#840) +- Improved specialization tests (#807) +- More specialization benchmarks (#806) + +## 0.12.0 + +### Breaking +- Made `take_while_inclusive` consume iterator by value (#709) +- Added `Clone` bound to `Unique` (#777) + +### Added +- Added `Itertools::try_len` (#723) +- Added free function `sort_unstable` (#796) +- Added `GroupMap::fold_with` (#778, #785) +- Added `PeekNth::{peek_mut, peek_nth_mut}` (#716) +- Added `PeekNth::{next_if, next_if_eq}` (#734) +- Added conversion into `(Option,Option)` to `EitherOrBoth` (#713) +- Added conversion from `Either` to `EitherOrBoth` (#715) +- Implemented `ExactSizeIterator` for `Tuples` (#761) +- Implemented `ExactSizeIterator` for `(Circular)TupleWindows` (#752) +- Made `EitherOrBoth` a shorthand for `EitherOrBoth` (#719) + +### Changed +- Added missing `#[must_use]` annotations on iterator adaptors (#794) +- Made `Combinations` lazy (#795) +- Made `Intersperse(With)` lazy (#797) +- Made `Permutations` lazy (#793) +- Made `Product` lazy (#800) +- Made `TupleWindows` lazy (#602) +- Specialized `Combinations::{count, size_hint}` (#729) +- Specialized `CombinationsWithReplacement::{count, size_hint}` (#737) +- Specialized `Powerset::fold` (#765) +- Specialized `Powerset::count` (#735) +- Specialized `TupleCombinations::{count, size_hint}` (#763) +- Specialized `TupleCombinations::fold` (#775) +- Specialized `WhileSome::fold` (#780) +- Specialized `WithPosition::fold` (#772) +- Specialized `ZipLongest::fold` (#774) +- Changed `{min, max}_set*` operations require `alloc` feature, instead of `std` (#760) +- Improved documentation of `tree_fold1` (#787) +- Improved documentation of `permutations` (#724) +- Fixed typo in documentation of `multiunzip` (#770) + +### Notable Internal Changes +- Improved specialization tests (#799, #786, #782) +- Simplified implementation of `Permutations` (#739, #748, #790) +- Combined `Merge`/`MergeBy`/`MergeJoinBy` implementations (#736) +- Simplified `Permutations::size_hint` (#739) +- Fix wrapping arithmetic in benchmarks (#770) +- Enforced `rustfmt` in CI (#751) +- Disallowed compile warnings in CI (#720) +- Used `cargo hack` to check MSRV (#754) + +## 0.11.0 + +### Breaking +- Make `Itertools::merge_join_by` also accept functions returning bool (#704) +- Implement `PeekingNext` transitively over mutable references (#643) +- Change `with_position` to yield `(Position, Item)` instead of `Position` (#699) + +### Added +- Add `Itertools::take_while_inclusive` (#616) +- Implement `PeekingNext` for `PeekingTakeWhile` (#644) +- Add `EitherOrBoth::{just_left, just_right, into_left, into_right, as_deref, as_deref_mut, left_or_insert, right_or_insert, left_or_insert_with, right_or_insert_with, insert_left, insert_right, insert_both}` (#629) +- Implement `Clone` for `CircularTupleWindows` (#686) +- Implement `Clone` for `Chunks` (#683) +- Add `Itertools::process_results` (#680) + +### Changed +- Use `Cell` instead of `RefCell` in `Format` and `FormatWith` (#608) +- CI tweaks (#674, #675) +- Document and test the difference between stable and unstable sorts (#653) +- Fix documentation error on `Itertools::max_set_by_key` (#692) +- Move MSRV metadata to `Cargo.toml` (#672) +- Implement `equal` with `Iterator::eq` (#591) + +## 0.10.5 + - Maintenance + +## 0.10.4 + - Add `EitherOrBoth::or` and `EitherOrBoth::or_else` (#593) + - Add `min_set`, `max_set` et al. (#613, #323) + - Use `either/use_std` (#628) + - Documentation fixes (#612, #625, #632, #633, #634, #638) + - Code maintenance (#623, #624, #627, #630) + +## 0.10.3 + - Maintenance + +## 0.10.2 + - Add `Itertools::multiunzip` (#362, #565) + - Add `intersperse` and `intersperse_with` free functions (#555) + - Add `Itertools::sorted_by_cached_key` (#424, #575) + - Specialize `ProcessResults::fold` (#563) + - Fix subtraction overflow in `DuplicatesBy::size_hint` (#552) + - Fix specialization tests (#574) + - More `Debug` impls (#573) + - Deprecate `fold1` (use `reduce` instead) (#580) + - Documentation fixes (`HomogenousTuple`, `into_group_map`, `into_group_map_by`, `MultiPeek::peek`) (#543 et al.) + +## 0.10.1 + - Add `Itertools::contains` (#514) + - Add `Itertools::counts_by` (#515) + - Add `Itertools::partition_result` (#511) + - Add `Itertools::all_unique` (#241) + - Add `Itertools::duplicates` and `Itertools::duplicates_by` (#502) + - Add `chain!` (#525) + - Add `Itertools::at_most_one` (#523) + - Add `Itertools::flatten_ok` (#527) + - Add `EitherOrBoth::or_default` (#583) + - Add `Itertools::find_or_last` and `Itertools::find_or_first` (#535) + - Implement `FusedIterator` for `FilterOk`, `FilterMapOk`, `InterleaveShortest`, `KMergeBy`, `MergeBy`, `PadUsing`, `Positions`, `Product` , `RcIter`, `TupleWindows`, `Unique`, `UniqueBy`, `Update`, `WhileSome`, `Combinations`, `CombinationsWithReplacement`, `Powerset`, `RepeatN`, and `WithPosition` (#550) + - Implement `FusedIterator` for `Interleave`, `IntersperseWith`, and `ZipLongest` (#548) + +## 0.10.0 + - **Increase minimum supported Rust version to 1.32.0** + - Improve macro hygiene (#507) + - Add `Itertools::powerset` (#335) + - Add `Itertools::sorted_unstable`, `Itertools::sorted_unstable_by`, and `Itertools::sorted_unstable_by_key` (#494) + - Implement `Error` for `ExactlyOneError` (#484) + - Undeprecate `Itertools::fold_while` (#476) + - Tuple-related adapters work for tuples of arity up to 12 (#475) + - `use_alloc` feature for users who have `alloc`, but not `std` (#474) + - Add `Itertools::k_smallest` (#473) + - Add `Itertools::into_grouping_map` and `GroupingMap` (#465) + - Add `Itertools::into_grouping_map_by` and `GroupingMapBy` (#465) + - Add `Itertools::counts` (#468) + - Add implementation of `DoubleEndedIterator` for `Unique` (#442) + - Add implementation of `DoubleEndedIterator` for `UniqueBy` (#442) + - Add implementation of `DoubleEndedIterator` for `Zip` (#346) + - Add `Itertools::multipeek` (#435) + - Add `Itertools::dedup_with_count` and `DedupWithCount` (#423) + - Add `Itertools::dedup_by_with_count` and `DedupByWithCount` (#423) + - Add `Itertools::intersperse_with` and `IntersperseWith` (#381) + - Add `Itertools::filter_ok` and `FilterOk` (#377) + - Add `Itertools::filter_map_ok` and `FilterMapOk` (#377) + - Deprecate `Itertools::fold_results`, use `Itertools::fold_ok` instead (#377) + - Deprecate `Itertools::map_results`, use `Itertools::map_ok` instead (#377) + - Deprecate `FoldResults`, use `FoldOk` instead (#377) + - Deprecate `MapResults`, use `MapOk` instead (#377) + - Add `Itertools::circular_tuple_windows` and `CircularTupleWindows` (#350) + - Add `peek_nth` and `PeekNth` (#303) + +## 0.9.0 + - Fix potential overflow in `MergeJoinBy::size_hint` (#385) + - Add `derive(Clone)` where possible (#382) + - Add `try_collect` method (#394) + - Add `HomogeneousTuple` trait (#389) + - Fix `combinations(0)` and `combinations_with_replacement(0)` (#383) + - Don't require `ParitalEq` to the `Item` of `DedupBy` (#397) + - Implement missing specializations on the `PutBack` adaptor and on the `MergeJoinBy` iterator (#372) + - Add `position_*` methods (#412) + - Derive `Hash` for `EitherOrBoth` (#417) + - Increase minimum supported Rust version to 1.32.0 + +## 0.8.2 + - Use `slice::iter` instead of `into_iter` to avoid future breakage (#378, by @LukasKalbertodt) +## 0.8.1 + - Added a [`.exactly_one()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.exactly_one) iterator method that, on success, extracts the single value of an iterator ; by @Xaeroxe + - Added combinatory iterator adaptors: + - [`.permutations(k)`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.permutations): + + `[0, 1, 2].iter().permutations(2)` yields + + ```rust + [ + vec![0, 1], + vec![0, 2], + vec![1, 0], + vec![1, 2], + vec![2, 0], + vec![2, 1], + ] + ``` + + ; by @tobz1000 + + - [`.combinations_with_replacement(k)`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.combinations_with_replacement): + + `[0, 1, 2].iter().combinations_with_replacement(2)` yields + + ```rust + [ + vec![0, 0], + vec![0, 1], + vec![0, 2], + vec![1, 1], + vec![1, 2], + vec![2, 2], + ] + ``` + + ; by @tommilligan + + - For reference, these methods join the already existing [`.combinations(k)`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.combinations): + + `[0, 1, 2].iter().combinations(2)` yields + + ```rust + [ + vec![0, 1], + vec![0, 2], + vec![1, 2], + ] + ``` + + - Improved the performance of [`.fold()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.fold)-based internal iteration for the [`.intersperse()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.intersperse) iterator ; by @jswrenn + - Added [`.dedup_by()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.dedup_by), [`.merge_by()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.merge_by) and [`.kmerge_by()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.kmerge_by) adaptors that work like [`.dedup()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.dedup), [`.merge()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.merge) and [`.kmerge()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.kmerge), but taking an additional custom comparison closure parameter. ; by @phimuemue + - Improved the performance of [`.all_equal()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.all_equal) ; by @fyrchik + - Loosened the bounds on [`.partition_map()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.partition_map) to take just a `FnMut` closure rather than a `Fn` closure, and made its implementation use internal iteration for better performance ; by @danielhenrymantilla + - Added convenience methods to [`EitherOrBoth`](https://docs.rs/itertools/0.8.1/itertools/enum.EitherOrBoth.html) elements yielded from the [`.zip_longest()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.zip_longest) iterator adaptor ; by @Avi-D-coder + - Added [`.sum1()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.sum1) and [`.product1()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.product1) iterator methods that respectively try to return the sum and the product of the elements of an iterator **when it is not empty**, otherwise they return `None` ; by @Emerentius +## 0.8.0 + - Added new adaptor `.map_into()` for conversions using `Into` by @vorner + - Improved `Itertools` docs by @JohnHeitmann + - The return type of `.sorted_by_by_key()` is now an iterator, not a Vec. + - The return type of the `izip!(x, y)` macro with exactly two arguments is now the usual `Iterator::zip`. + - Remove `.flatten()` in favour of std's `.flatten()` + - Deprecate `.foreach()` in favour of std's `.for_each()` + - Deprecate `.step()` in favour of std's `.step_by()` + - Deprecate `repeat_call` in favour of std's `repeat_with` + - Deprecate `.fold_while()` in favour of std's `.try_fold()` + - Require Rust 1.24 as minimal version. +## 0.7.11 + - Add convenience methods to `EitherOrBoth`, making it more similar to `Option` and `Either` by @jethrogb +## 0.7.10 + - No changes. +## 0.7.9 + - New inclusion policy: See the readme about suggesting features for std before accepting them in itertools. + - The `FoldWhile` type now implements `Eq` and `PartialEq` by @jturner314 +## 0.7.8 + - Add new iterator method `.tree_fold1()` which is like `.fold1()` except items are combined in a tree structure (see its docs). By @scottmcm + - Add more `Debug` impls by @phimuemue: KMerge, KMergeBy, MergeJoinBy, ConsTuples, Intersperse, ProcessResults, RcIter, Tee, TupleWindows, Tee, ZipLongest, ZipEq, Zip. +## 0.7.7 + - Add new iterator method `.into_group_map() -> HashMap>` which turns an iterator of `(K, V)` elements into such a hash table, where values are grouped by key. By @tobz1000 + - Add new free function `flatten` for the `.flatten()` adaptor. **NOTE:** recent Rust nightlies have `Iterator::flatten` and thus a clash with our flatten adaptor. One workaround is to use the itertools `flatten` free function. +## 0.7.6 + - Add new adaptor `.multi_cartesian_product()` which is an n-ary product iterator by @tobz1000 + - Add new method `.sorted_by_key()` by @Xion + - Provide simpler and faster `.count()` for `.unique()` and `.unique_by()` +## 0.7.5 + - `.multipeek()` now implements `PeekingNext`, by @nicopap. +## 0.7.4 + - Add new adaptor `.update()` by @lucasem; this adaptor is used to modify an element before passing it on in an iterator chain. +## 0.7.3 + - Add new method `.collect_tuple()` by @matklad; it makes a tuple out of the iterator's elements if the number of them matches **exactly**. + - Implement `fold` and `collect` for `.map_results()` which means it reuses the code of the standard `.map()` for these methods. +## 0.7.2 + - Add new adaptor `.merge_join_by` by @srijs; a heterogeneous merge join for two ordered sequences. +## 0.7.1 + - Iterator adaptors and iterators in itertools now use the same `must_use` reminder that the standard library adaptors do, by @matematikaedit and @bluss *“iterator adaptors are lazy and do nothing unless consumed”*. +## 0.7.0 + - Faster `izip!()` by @krdln + - `izip!()` is now a wrapper for repeated regular `.zip()` and a single `.map()`. This means it optimizes as well as the standard library `.zip()` it uses. **Note:** `multizip` and `izip!()` are now different! The former has a named type but the latter optimizes better. + - Faster `.unique()` + - `no_std` support, which is opt-in! + - Many lovable features are still there without std, like `izip!()` or `.format()` or `.merge()`, but not those that use collections. + - Trait bounds were required up front instead of just on the type: `group_by`'s `PartialEq` by @Phlosioneer and `repeat_call`'s `FnMut`. + - Removed deprecated constructor `Zip::new` — use `izip!()` or `multizip()` +## 0.6.5 + - Fix bug in `.cartesian_product()`'s fold (which only was visible for unfused iterators). +## 0.6.4 + - Add specific `fold` implementations for `.cartesian_product()` and `cons_tuples()`, which improves their performance in fold, foreach, and iterator consumers derived from them. +## 0.6.3 + - Add iterator adaptor `.positions(predicate)` by @tmccombs +## 0.6.2 + - Add function `process_results` which can “lift” a function of the regular values of an iterator so that it can process the `Ok` values from an iterator of `Results` instead, by @shepmaster + - Add iterator method `.concat()` which combines all iterator elements into a single collection using the `Extend` trait, by @srijs +## 0.6.1 + - Better size hint testing and subsequent size hint bugfixes by @rkarp. Fixes bugs in product, `interleave_shortest` size hints. + - New iterator method `.all_equal()` by @phimuemue +## 0.6.0 + - Deprecated names were removed in favour of their replacements + - `.flatten()` does not implement double ended iteration anymore + - `.fold_while()` uses `&mut self` and returns `FoldWhile`, for composability #168 + - `.foreach()` and `.fold1()` use `self`, like `.fold()` does. + - `.combinations(0)` now produces a single empty vector. #174 +## 0.5.10 + - Add itertools method `.kmerge_by()` (and corresponding free function) + - Relaxed trait requirement of `.kmerge()` and `.minmax()` to PartialOrd. +## 0.5.9 + - Add multipeek method `.reset_peek()` + - Add categories +## 0.5.8 + - Add iterator adaptor `.peeking_take_while()` and its trait `PeekingNext`. +## 0.5.7 + - Add iterator adaptor `.with_position()` + - Fix multipeek's performance for long peeks by using `VecDeque`. +## 0.5.6 + - Add `.map_results()` +## 0.5.5 + - Many more adaptors now implement `Debug` + - Add free function constructor `repeat_n`. `RepeatN::new` is now deprecated. +## 0.5.4 + - Add infinite generator function `iterate`, that takes a seed and a closure. +## 0.5.3 + - Special-cased `.fold()` for flatten and put back. `.foreach()` now uses fold on the iterator, to pick up any iterator specific loop implementation. + - `.combinations(n)` asserts up front that `n != 0`, instead of running into an error on the second iterator element. +## 0.5.2 + - Add `.tuples::()` that iterates by two, three or four elements at a time (where `T` is a tuple type). + - Add `.tuple_windows::()` that iterates using a window of the two, three or four most recent elements. + - Add `.next_tuple::()` method, that picks the next two, three or four elements in one go. + - `.interleave()` now has an accurate size hint. +## 0.5.1 + - Workaround module/function name clash that made racer crash on completing itertools. Only internal changes needed. +## 0.5.0 + - [Release announcement](https://bluss.github.io/rust/2016/09/26/itertools-0.5.0/) + - Renamed: + - `combinations` is now `tuple_combinations` + - `combinations_n` to `combinations` + - `group_by_lazy`, `chunks_lazy` to `group_by`, `chunks` + - `Unfold::new` to `unfold()` + - `RepeatCall::new` to `repeat_call()` + - `Zip::new` to `multizip` + - `PutBack::new`, `PutBackN::new` to `put_back`, `put_back_n` + - `PutBack::with_value` is now a builder setter, not a constructor + - `MultiPeek::new`, `.multipeek()` to `multipeek()` + - `format` to `format_with` and `format_default` to `format` + - `.into_rc()` to `rciter` + - `Partition` enum is now `Either` + - Module reorganization: + - All iterator structs are under `itertools::structs` but also reexported to the top level, for backwards compatibility + - All free functions are reexported at the root, `itertools::free` will be removed in the next version + - Removed: + - `ZipSlices`, use `.zip()` instead + - `.enumerate_from()`, `ZipTrusted`, due to being unstable + - `.mend_slices()`, moved to crate `odds` + - Stride, StrideMut, moved to crate `odds` + - `linspace()`, moved to crate `itertools-num` + - `.sort_by()`, use `.sorted_by()` + - `.is_empty_hint()`, use `.size_hint()` + - `.dropn()`, use `.dropping()` + - `.map_fn()`, use `.map()` + - `.slice()`, use `.take()` / `.skip()` + - helper traits in `misc` + - `new` constructors on iterator structs, use `Itertools` trait or free functions instead + - `itertools::size_hint` is now private + - Behaviour changes: + - `format` and `format_with` helpers now panic if you try to format them more than once. + - `repeat_call` is not double ended anymore + - New features: + - tuple flattening iterator is constructible with `cons_tuples` + - itertools reexports `Either` from the `either` crate. `Either` is an iterator when `L, R` are. + - `MinMaxResult` now implements `Copy` and `Clone` + - `tuple_combinations` supports 1-4 tuples of combinations (previously just 2) +## 0.4.19 + - Add `.minmax_by()` + - Add `itertools::free::cloned` + - Add `itertools::free::rciter` + - Improve `.step(n)` slightly to take advantage of specialized Fuse better. +## 0.4.18 + - Only changes related to the "unstable" crate feature. This feature is more or less deprecated. + - Use deprecated warnings when unstable is enabled. `.enumerate_from()` will be removed imminently since it's using a deprecated libstd trait. +## 0.4.17 + - Fix bug in `.kmerge()` that caused it to often produce the wrong order #134 +## 0.4.16 + - Improve precision of the `interleave_shortest` adaptor's size hint (it is now computed exactly when possible). +## 0.4.15 + - Fixup on top of the workaround in 0.4.14. A function in `itertools::free` was removed by mistake and now it is added back again. +## 0.4.14 + - Workaround an upstream regression in a Rust nightly build that broke compilation of of `itertools::free::{interleave, merge}` +## 0.4.13 + - Add `.minmax()` and `.minmax_by_key()`, iterator methods for finding both minimum and maximum in one scan. + - Add `.format_default()`, a simpler version of `.format()` (lazy formatting for iterators). +## 0.4.12 + - Add `.zip_eq()`, an adaptor like `.zip()` except it ensures iterators of inequal length don't pass silently (instead it panics). + - Add `.fold_while()`, an iterator method that is a fold that can short-circuit. + - Add `.partition_map()`, an iterator method that can separate elements into two collections. +## 0.4.11 + - Add `.get()` for `Stride{,Mut}` and `.get_mut()` for `StrideMut` +## 0.4.10 + - Improve performance of `.kmerge()` +## 0.4.9 + - Add k-ary merge adaptor `.kmerge()` + - Fix a bug in `.islice()` with ranges `a..b` where a `> b`. +## 0.4.8 + - Implement `Clone`, `Debug` for `Linspace` +## 0.4.7 + - Add function `diff_with()` that compares two iterators + - Add `.combinations_n()`, an n-ary combinations iterator + - Add methods `PutBack::with_value` and `PutBack::into_parts`. +## 0.4.6 + - Add method `.sorted()` + - Add module `itertools::free` with free function variants of common iterator adaptors and methods. For example `enumerate(iterable)`, `rev(iterable)`, and so on. +## 0.4.5 + - Add `.flatten()` +## 0.4.4 + - Allow composing `ZipSlices` with itself +## 0.4.3 + - Write `iproduct!()` as a single expression; this allows temporary values in its arguments. +## 0.4.2 + - Add `.fold_options()` + - Require Rust 1.1 or later +## 0.4.1 + - Update `.dropping()` to take advantage of `.nth()` +## 0.4.0 + - `.merge()`, `.unique()` and `.dedup()` now perform better due to not using function pointers + - Add free functions `enumerate()` and `rev()` + - Breaking changes: + - Return types of `.merge()` and `.merge_by()` renamed and changed + - Method `Merge::new` removed + - `.merge_by()` now takes a closure that returns bool. + - Return type of `.dedup()` changed + - Return type of `.mend_slices()` changed + - Return type of `.unique()` changed + - Removed function `times()`, struct `Times`: use a range instead + - Removed deprecated macro `icompr!()` + - Removed deprecated `FnMap` and method `.fn_map()`: use `.map_fn()` + - `.interleave_shortest()` is no longer guaranteed to act like fused +## 0.3.25 + - Rename `.sort_by()` to `.sorted_by()`. Old name is deprecated. + - Fix well-formedness warnings from RFC 1214, no user visible impact +## 0.3.24 + - Improve performance of `.merge()`'s ordering function slightly +## 0.3.23 + - Added `.chunks()`, similar to (and based on) `.group_by_lazy()`. + - Tweak linspace to match numpy.linspace and make it double ended. +## 0.3.22 + - Added `ZipSlices`, a fast zip for slices +## 0.3.21 + - Remove `Debug` impl for `Format`, it will have different use later +## 0.3.20 + - Optimize `.group_by_lazy()` +## 0.3.19 + - Added `.group_by_lazy()`, a possibly nonallocating group by + - Added `.format()`, a nonallocating formatting helper for iterators + - Remove uses of `RandomAccessIterator` since it has been deprecated in Rust. +## 0.3.17 + - Added (adopted) `Unfold` from Rust +## 0.3.16 + - Added adaptors `.unique()`, `.unique_by()` +## 0.3.15 + - Added method `.sort_by()` +## 0.3.14 + - Added adaptor `.while_some()` +## 0.3.13 + - Added adaptor `.interleave_shortest()` + - Added adaptor `.pad_using()` +## 0.3.11 + - Added `assert_equal` function +## 0.3.10 + - Bugfix `.combinations()` `size_hint`. +## 0.3.8 + - Added source `RepeatCall` +## 0.3.7 + - Added adaptor `PutBackN` + - Added adaptor `.combinations()` +## 0.3.6 + - Added `itertools::partition`, partition a sequence in place based on a predicate. + - Deprecate `icompr!()` with no replacement. +## 0.3.5 + - `.map_fn()` replaces deprecated `.fn_map()`. +## 0.3.4 + - `.take_while_ref()` *by-ref adaptor* + - `.coalesce()` *adaptor* + - `.mend_slices()` *adaptor* +## 0.3.3 + - `.dropping_back()` *method* + - `.fold1()` *method* + - `.is_empty_hint()` *method* diff --git a/vendor/itertools/CONTRIBUTING.md b/vendor/itertools/CONTRIBUTING.md new file mode 100644 index 00000000000000..1dbf6f59dd546b --- /dev/null +++ b/vendor/itertools/CONTRIBUTING.md @@ -0,0 +1,189 @@ +# Contributing to itertools + +We use stable Rust only. +Please check the minimum version of Rust we use in `Cargo.toml`. + +_If you are proposing a major change to CI or a new iterator adaptor for this crate, +then **please first file an issue** describing your proposal._ +[Usual concerns about new methods](https://github.com/rust-itertools/itertools/issues/413#issuecomment-657670781). + +To pass CI tests successfully, your code must be free of "compiler warnings" and "clippy warnings" and be "rustfmt" formatted. + +Note that small PRs are easier to review and therefore are more easily merged. + +## Write a new method/adaptor for `Itertools` trait +In general, the code logic should be tested with [quickcheck](https://crates.io/crates/quickcheck) tests in `tests/quick.rs` +which allow us to test properties about the code with randomly generated inputs. + +### Behind `use_std`/`use_alloc` feature? +If it needs the "std" (such as using hashes) then it should be behind the `use_std` feature, +or if it requires heap allocation (such as using vectors) then it should be behind the `use_alloc` feature. +Otherwise it should be able to run in `no_std` context. + +This mostly applies to your new module, each import from it, and to your new `Itertools` method. + +### Pick the right receiver +`self`, `&mut self` or `&self`? From [#710](https://github.com/rust-itertools/itertools/pull/710): + +- Take by value when: + - It transfers ownership to another iterator type, such as `filter`, `map`... + - It consumes the iterator completely, such as `count`, `last`, `max`... +- Mutably borrow when it consumes only part of the iterator, such as `find`, `all`, `try_collect`... +- Immutably borrow when there is no change, such as `size_hint`. + +### Laziness +Iterators are [lazy](https://doc.rust-lang.org/std/iter/index.html#laziness): + +- structs of iterator adaptors should have `#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]` ; +- structs of iterators should have `#[must_use = "iterators are lazy and do nothing unless consumed"]`. + +Those behaviors are **tested** in `tests/laziness.rs`. + +## Specialize `Iterator` methods +It might be more performant to specialize some methods. +However, each specialization should be thoroughly tested. + +Correctly specializing methods can be difficult, and _we do not require that you do it on your initial PR_. + +Most of the time, we want specializations of: + +- [`size_hint`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.size_hint): + It mostly allows allocation optimizations. + When always exact, it also enables to implement `ExactSizeIterator`. + See our private module `src/size_hint.rs` for helpers. +- [`fold`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.fold) + might make iteration faster than calling `next` repeatedly. +- [`count`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.count), + [`last`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.last), + [`nth`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.nth) + as we might be able to avoid iterating on every item with `next`. + +Additionally, + +- `for_each`, `reduce`, `max/min[_by[_key]]` and `partition` all rely on `fold` so you should specialize it instead. +- `all`, `any`, `find`, `find_map`, `cmp`, `partial_cmp`, `eq`, `ne`, `lt`, `le`, `gt`, `ge` and `position` all rely (by default) on `try_fold` + which we can not specialize on stable rust, so you might want to wait it stabilizes + or specialize each of them. +- `DoubleEndedIterator::{nth_back, rfold, rfind}`: similar reasoning. + +An adaptor might use the inner iterator specializations for its own specializations. + +They are **tested** in `tests/specializations.rs` and **benchmarked** in `benches/specializations.rs` +(build those benchmarks is slow so you might want to temporarily remove the ones you do not want to measure). + +## Additional implementations +### The [`Debug`](https://doc.rust-lang.org/std/fmt/trait.Debug.html) implementation +All our iterators should implement `Debug`. + +When one of the field is not debuggable (such as _functions_), you must not derive `Debug`. +Instead, manually implement it and _ignore this field_ in our helper macro `debug_fmt_fields`. + +

+4 examples (click to expand) + +```rust +use std::fmt; + +/* ===== Simple derive. ===== */ +#[derive(Debug)] +struct Name1 { + iter: I, +} + +/* ===== With an unclonable field. ===== */ +struct Name2 { + iter: I, + func: F, +} + +// No `F: Debug` bound and the field `func` is ignored. +impl fmt::Debug for Name2 { + // it defines the `fmt` function from a struct name and the fields you want to debug. + debug_fmt_fields!(Name2, iter); +} + +/* ===== With an unclonable field, but another bound to add. ===== */ +struct Name3 { + iter: I, + item: Option, + func: F, +} + +// Same about `F` and `func`, similar about `I` but we must add the `I::Item: Debug` bound. +impl fmt::Debug for Name3 +where + I::Item: fmt::Debug, +{ + debug_fmt_fields!(Name3, iter, item); +} + +/* ===== With an unclonable field for which we can provide some information. ===== */ +struct Name4 { + iter: I, + func: Option, +} + +// If ignore a field is not good enough, implement Debug fully manually. +impl fmt::Debug for Name4 { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let func = if self.func.is_some() { "Some(_)" } else { "None" }; + f.debug_struct("Name4") + .field("iter", &self.iter) + .field("func", &func) + .finish() + } +} +``` +
+ +### When/How to implement [`Clone`](https://doc.rust-lang.org/std/clone/trait.Clone.html) +All our iterators should implement `Clone` when possible. + +Note that a mutable reference is never clonable so `struct Name<'a, I: 'a> { iter: &'a mut I }` can not implement `Clone`. + +Derive `Clone` on a generic struct adds the bound `Clone` on each generic parameter. +It might be an issue in which case you should manually implement it with our helper macro `clone_fields` (it defines the `clone` function calling `clone` on each field) and be careful about the bounds. + +### When to implement [`std::iter::FusedIterator`](https://doc.rust-lang.org/std/iter/trait.FusedIterator.html) +This trait should be implemented _by all iterators that always return `None` after returning `None` once_, because it allows to optimize `Iterator::fuse()`. + +The conditions on which it should be implemented are usually the ones from the `Iterator` implementation, eventually refined to ensure it behaves in a fused way. + +### When to implement [`ExactSizeIterator`](https://doc.rust-lang.org/std/iter/trait.ExactSizeIterator.html) +_When we are always able to return an exact non-overflowing length._ + +Therefore, we do not implement it on adaptors that makes the iterator longer as the resulting length could overflow. + +One should not override `ExactSizeIterator::len` method but rely on an exact `Iterator::size_hint` implementation, meaning it returns `(length, Some(length))` (unless you could make `len` more performant than the default). + +The conditions on which it should be implemented are usually the ones from the `Iterator` implementation, probably refined to ensure the size hint is exact. + +### When to implement [`DoubleEndedIterator`](https://doc.rust-lang.org/std/iter/trait.DoubleEndedIterator.html) +When the iterator structure allows to handle _iterating on both fronts simultaneously_. +The iteration might stop in the middle when both fronts meet. + +The conditions on which it should be implemented are usually the ones from the `Iterator` implementation, probably refined to ensure we can iterate on both fronts simultaneously. + +### When to implement [`itertools::PeekingNext`](https://docs.rs/itertools/latest/itertools/trait.PeekingNext.html) +TODO + +This is currently **tested** in `tests/test_std.rs`. + +## About lending iterators +TODO + + +## Other notes +No guideline about using `#[inline]` yet. + +### `.fold` / `.for_each` / `.try_fold` / `.try_for_each` +In the Rust standard library, it's quite common for `fold` to be implemented in terms of `try_fold`. But it's not something we do yet because we can not specialize `try_fold` methods yet (it uses the unstable `Try`). + +From [#781](https://github.com/rust-itertools/itertools/pull/781), the general rule to follow is something like this: + +- If you need to completely consume an iterator: + - Use `fold` if you need an _owned_ access to an accumulator. + - Use `for_each` otherwise. +- If you need to partly consume an iterator, the same applies with `try_` versions: + - Use `try_fold` if you need an _owned_ access to an accumulator. + - Use `try_for_each` otherwise. diff --git a/vendor/itertools/Cargo.lock b/vendor/itertools/Cargo.lock new file mode 100644 index 00000000000000..d2183c2a4bc1cb --- /dev/null +++ b/vendor/itertools/Cargo.lock @@ -0,0 +1,740 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bumpalo" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + +[[package]] +name = "clap" +version = "3.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" +dependencies = [ + "bitflags", + "clap_lex", + "indexmap", + "textwrap", +] + +[[package]] +name = "clap_lex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" +dependencies = [ + "os_str_bytes", +] + +[[package]] +name = "criterion" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb" +dependencies = [ + "anes", + "atty", + "cast", + "ciborium", + "clap", + "criterion-plot", + "itertools 0.10.5", + "lazy_static", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools 0.10.5", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "either" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" + +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +dependencies = [ + "criterion", + "either", + "paste", + "permutohedron", + "quickcheck", + "rand", +] + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "js-sys" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.154" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" + +[[package]] +name = "log" +version = "0.4.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" + +[[package]] +name = "memchr" +version = "2.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + +[[package]] +name = "os_str_bytes" +version = "6.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "permutohedron" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b687ff7b5da449d39e418ad391e5e08da53ec334903ddbb921db208908fc372c" + +[[package]] +name = "plotters" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" + +[[package]] +name = "plotters-svg" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +dependencies = [ + "plotters-backend", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro2" +version = "1.0.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quickcheck" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f" +dependencies = [ + "rand", + "rand_core", +] + +[[package]] +name = "quote" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom", + "libc", + "rand_chacha", + "rand_core", + "rand_hc", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "regex" +version = "1.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" + +[[package]] +name = "ryu" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "serde" +version = "1.0.202" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.202" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "syn" +version = "2.0.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf5be731623ca1a1fb7d8be6f261a3be6d3e2337b8a1f97be944d020c8fcb704" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "textwrap" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasm-bindgen" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" + +[[package]] +name = "web-sys" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" diff --git a/vendor/itertools/Cargo.toml b/vendor/itertools/Cargo.toml new file mode 100644 index 00000000000000..21896fed739fef --- /dev/null +++ b/vendor/itertools/Cargo.toml @@ -0,0 +1,105 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.43.1" +name = "itertools" +version = "0.13.0" +authors = ["bluss"] +description = "Extra iterator adaptors, iterator methods, free functions, and macros." +documentation = "https://docs.rs/itertools/" +readme = "README.md" +keywords = [ + "iterator", + "data-structure", + "zip", + "product", +] +categories = [ + "algorithms", + "rust-patterns", + "no-std", + "no-std::no-alloc", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-itertools/itertools" + +[profile.bench] +debug = 2 + +[lib] +test = false +bench = false + +[[bench]] +name = "tuple_combinations" +harness = false + +[[bench]] +name = "tuples" +harness = false + +[[bench]] +name = "fold_specialization" +harness = false + +[[bench]] +name = "combinations_with_replacement" +harness = false + +[[bench]] +name = "tree_reduce" +harness = false + +[[bench]] +name = "bench1" +harness = false + +[[bench]] +name = "combinations" +harness = false + +[[bench]] +name = "powerset" +harness = false + +[[bench]] +name = "specializations" +harness = false + +[dependencies.either] +version = "1.0" +default-features = false + +[dev-dependencies.criterion] +version = "0.4.0" + +[dev-dependencies.paste] +version = "1.0.0" + +[dev-dependencies.permutohedron] +version = "0.2" + +[dev-dependencies.quickcheck] +version = "0.9" +default_features = false + +[dev-dependencies.rand] +version = "0.7" + +[features] +default = ["use_std"] +use_alloc = [] +use_std = [ + "use_alloc", + "either/use_std", +] diff --git a/vendor/itertools/LICENSE-APACHE b/vendor/itertools/LICENSE-APACHE new file mode 100644 index 00000000000000..16fe87b06e802f --- /dev/null +++ b/vendor/itertools/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/itertools/LICENSE-MIT b/vendor/itertools/LICENSE-MIT new file mode 100644 index 00000000000000..9203baa055d41d --- /dev/null +++ b/vendor/itertools/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2015 + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/itertools/README.md b/vendor/itertools/README.md new file mode 100644 index 00000000000000..982ef5dbe6b7e3 --- /dev/null +++ b/vendor/itertools/README.md @@ -0,0 +1,33 @@ +# Itertools + +Extra iterator adaptors, functions and macros. + +Please read the [API documentation here](https://docs.rs/itertools/). + +How to use with Cargo: + +```toml +[dependencies] +itertools = "0.13.0" +``` + +How to use in your crate: + +```rust +use itertools::Itertools; +``` + +## How to contribute +If you're not sure what to work on, try checking the [help wanted](https://github.com/rust-itertools/itertools/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) label. + +See our [CONTRIBUTING.md](https://github.com/rust-itertools/itertools/blob/master/CONTRIBUTING.md) for a detailed guide. + +## License + +Dual-licensed to be compatible with the Rust project. + +Licensed under the Apache License, Version 2.0 +https://www.apache.org/licenses/LICENSE-2.0 or the MIT license +https://opensource.org/licenses/MIT, at your +option. This file may not be copied, modified, or distributed +except according to those terms. diff --git a/vendor/itertools/benches/bench1.rs b/vendor/itertools/benches/bench1.rs new file mode 100644 index 00000000000000..53e77b0da46a00 --- /dev/null +++ b/vendor/itertools/benches/bench1.rs @@ -0,0 +1,767 @@ +use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion}; +use itertools::free::cloned; +use itertools::iproduct; +use itertools::Itertools; + +use std::cmp; +use std::iter::repeat; +use std::ops::{Add, Range}; + +fn slice_iter(c: &mut Criterion) { + let xs: Vec<_> = repeat(1i32).take(20).collect(); + + c.bench_function("slice iter", move |b| { + b.iter(|| { + for elt in xs.iter() { + black_box(elt); + } + }) + }); +} + +fn slice_iter_rev(c: &mut Criterion) { + let xs: Vec<_> = repeat(1i32).take(20).collect(); + + c.bench_function("slice iter rev", move |b| { + b.iter(|| { + for elt in xs.iter().rev() { + black_box(elt); + } + }) + }); +} + +fn zip_default_zip(c: &mut Criterion) { + let xs = vec![0; 1024]; + let ys = vec![0; 768]; + let xs = black_box(xs); + let ys = black_box(ys); + + c.bench_function("zip default zip", move |b| { + b.iter(|| { + for (&x, &y) in xs.iter().zip(&ys) { + black_box(x); + black_box(y); + } + }) + }); +} + +fn zipdot_i32_default_zip(c: &mut Criterion) { + let xs = vec![2; 1024]; + let ys = vec![2; 768]; + let xs = black_box(xs); + let ys = black_box(ys); + + c.bench_function("zipdot i32 default zip", move |b| { + b.iter(|| { + let mut s = 0; + for (&x, &y) in xs.iter().zip(&ys) { + s += x * y; + } + s + }) + }); +} + +fn zipdot_f32_default_zip(c: &mut Criterion) { + let xs = vec![2f32; 1024]; + let ys = vec![2f32; 768]; + let xs = black_box(xs); + let ys = black_box(ys); + + c.bench_function("zipdot f32 default zip", move |b| { + b.iter(|| { + let mut s = 0.; + for (&x, &y) in xs.iter().zip(&ys) { + s += x * y; + } + s + }) + }); +} + +fn zip_default_zip3(c: &mut Criterion) { + let xs = vec![0; 1024]; + let ys = vec![0; 768]; + let zs = vec![0; 766]; + let xs = black_box(xs); + let ys = black_box(ys); + let zs = black_box(zs); + + c.bench_function("zip default zip3", move |b| { + b.iter(|| { + for ((&x, &y), &z) in xs.iter().zip(&ys).zip(&zs) { + black_box(x); + black_box(y); + black_box(z); + } + }) + }); +} + +fn zip_slices_ziptuple(c: &mut Criterion) { + let xs = vec![0; 1024]; + let ys = vec![0; 768]; + + c.bench_function("zip slices ziptuple", move |b| { + b.iter(|| { + let xs = black_box(&xs); + let ys = black_box(&ys); + for (&x, &y) in itertools::multizip((xs, ys)) { + black_box(x); + black_box(y); + } + }) + }); +} + +fn zip_checked_counted_loop(c: &mut Criterion) { + let xs = vec![0; 1024]; + let ys = vec![0; 768]; + let xs = black_box(xs); + let ys = black_box(ys); + + c.bench_function("zip checked counted loop", move |b| { + b.iter(|| { + // Must slice to equal lengths, and then bounds checks are eliminated! + let len = cmp::min(xs.len(), ys.len()); + let xs = &xs[..len]; + let ys = &ys[..len]; + + for i in 0..len { + let x = xs[i]; + let y = ys[i]; + black_box(x); + black_box(y); + } + }) + }); +} + +fn zipdot_i32_checked_counted_loop(c: &mut Criterion) { + let xs = vec![2; 1024]; + let ys = vec![2; 768]; + let xs = black_box(xs); + let ys = black_box(ys); + + c.bench_function("zipdot i32 checked counted loop", move |b| { + b.iter(|| { + // Must slice to equal lengths, and then bounds checks are eliminated! + let len = cmp::min(xs.len(), ys.len()); + let xs = &xs[..len]; + let ys = &ys[..len]; + + let mut s = 0i32; + + for i in 0..len { + s += xs[i] * ys[i]; + } + s + }) + }); +} + +fn zipdot_f32_checked_counted_loop(c: &mut Criterion) { + let xs = vec![2f32; 1024]; + let ys = vec![2f32; 768]; + let xs = black_box(xs); + let ys = black_box(ys); + + c.bench_function("zipdot f32 checked counted loop", move |b| { + b.iter(|| { + // Must slice to equal lengths, and then bounds checks are eliminated! + let len = cmp::min(xs.len(), ys.len()); + let xs = &xs[..len]; + let ys = &ys[..len]; + + let mut s = 0.; + + for i in 0..len { + s += xs[i] * ys[i]; + } + s + }) + }); +} + +fn zipdot_f32_checked_counted_unrolled_loop(c: &mut Criterion) { + let xs = vec![2f32; 1024]; + let ys = vec![2f32; 768]; + let xs = black_box(xs); + let ys = black_box(ys); + + c.bench_function("zipdot f32 checked counted unrolled loop", move |b| { + b.iter(|| { + // Must slice to equal lengths, and then bounds checks are eliminated! + let len = cmp::min(xs.len(), ys.len()); + let mut xs = &xs[..len]; + let mut ys = &ys[..len]; + + let mut s = 0.; + let (mut p0, mut p1, mut p2, mut p3, mut p4, mut p5, mut p6, mut p7) = + (0., 0., 0., 0., 0., 0., 0., 0.); + + // how to unroll and have bounds checks eliminated (by cristicbz) + // split sum into eight parts to enable vectorization (by bluss) + while xs.len() >= 8 { + p0 += xs[0] * ys[0]; + p1 += xs[1] * ys[1]; + p2 += xs[2] * ys[2]; + p3 += xs[3] * ys[3]; + p4 += xs[4] * ys[4]; + p5 += xs[5] * ys[5]; + p6 += xs[6] * ys[6]; + p7 += xs[7] * ys[7]; + + xs = &xs[8..]; + ys = &ys[8..]; + } + s += p0 + p4; + s += p1 + p5; + s += p2 + p6; + s += p3 + p7; + + for i in 0..xs.len() { + s += xs[i] * ys[i]; + } + s + }) + }); +} + +fn zip_unchecked_counted_loop(c: &mut Criterion) { + let xs = vec![0; 1024]; + let ys = vec![0; 768]; + let xs = black_box(xs); + let ys = black_box(ys); + + c.bench_function("zip unchecked counted loop", move |b| { + b.iter(|| { + let len = cmp::min(xs.len(), ys.len()); + for i in 0..len { + unsafe { + let x = *xs.get_unchecked(i); + let y = *ys.get_unchecked(i); + black_box(x); + black_box(y); + } + } + }) + }); +} + +fn zipdot_i32_unchecked_counted_loop(c: &mut Criterion) { + let xs = vec![2; 1024]; + let ys = vec![2; 768]; + let xs = black_box(xs); + let ys = black_box(ys); + + c.bench_function("zipdot i32 unchecked counted loop", move |b| { + b.iter(|| { + let len = cmp::min(xs.len(), ys.len()); + let mut s = 0i32; + for i in 0..len { + unsafe { + let x = *xs.get_unchecked(i); + let y = *ys.get_unchecked(i); + s += x * y; + } + } + s + }) + }); +} + +fn zipdot_f32_unchecked_counted_loop(c: &mut Criterion) { + let xs = vec![2.; 1024]; + let ys = vec![2.; 768]; + let xs = black_box(xs); + let ys = black_box(ys); + + c.bench_function("zipdot f32 unchecked counted loop", move |b| { + b.iter(|| { + let len = cmp::min(xs.len(), ys.len()); + let mut s = 0f32; + for i in 0..len { + unsafe { + let x = *xs.get_unchecked(i); + let y = *ys.get_unchecked(i); + s += x * y; + } + } + s + }) + }); +} + +fn zip_unchecked_counted_loop3(c: &mut Criterion) { + let xs = vec![0; 1024]; + let ys = vec![0; 768]; + let zs = vec![0; 766]; + let xs = black_box(xs); + let ys = black_box(ys); + let zs = black_box(zs); + + c.bench_function("zip unchecked counted loop3", move |b| { + b.iter(|| { + let len = cmp::min(xs.len(), cmp::min(ys.len(), zs.len())); + for i in 0..len { + unsafe { + let x = *xs.get_unchecked(i); + let y = *ys.get_unchecked(i); + let z = *zs.get_unchecked(i); + black_box(x); + black_box(y); + black_box(z); + } + } + }) + }); +} + +fn chunk_by_lazy_1(c: &mut Criterion) { + let mut data = vec![0; 1024]; + for (index, elt) in data.iter_mut().enumerate() { + *elt = index / 10; + } + + let data = black_box(data); + + c.bench_function("chunk by lazy 1", move |b| { + b.iter(|| { + for (_key, chunk) in &data.iter().chunk_by(|elt| **elt) { + for elt in chunk { + black_box(elt); + } + } + }) + }); +} + +fn chunk_by_lazy_2(c: &mut Criterion) { + let mut data = vec![0; 1024]; + for (index, elt) in data.iter_mut().enumerate() { + *elt = index / 2; + } + + let data = black_box(data); + + c.bench_function("chunk by lazy 2", move |b| { + b.iter(|| { + for (_key, chunk) in &data.iter().chunk_by(|elt| **elt) { + for elt in chunk { + black_box(elt); + } + } + }) + }); +} + +fn slice_chunks(c: &mut Criterion) { + let data = vec![0; 1024]; + + let data = black_box(data); + let sz = black_box(10); + + c.bench_function("slice chunks", move |b| { + b.iter(|| { + for chunk in data.chunks(sz) { + for elt in chunk { + black_box(elt); + } + } + }) + }); +} + +fn chunks_lazy_1(c: &mut Criterion) { + let data = vec![0; 1024]; + + let data = black_box(data); + let sz = black_box(10); + + c.bench_function("chunks lazy 1", move |b| { + b.iter(|| { + for chunk in &data.iter().chunks(sz) { + for elt in chunk { + black_box(elt); + } + } + }) + }); +} + +fn equal(c: &mut Criterion) { + let data = vec![7; 1024]; + let l = data.len(); + let alpha = black_box(&data[1..]); + let beta = black_box(&data[..l - 1]); + + c.bench_function("equal", move |b| b.iter(|| itertools::equal(alpha, beta))); +} + +fn merge_default(c: &mut Criterion) { + let mut data1 = vec![0; 1024]; + let mut data2 = vec![0; 800]; + let mut x = 0; + + #[allow(clippy::explicit_counter_loop, clippy::unused_enumerate_index)] + for (_, elt) in data1.iter_mut().enumerate() { + *elt = x; + x += 1; + } + + let mut y = 0; + for (i, elt) in data2.iter_mut().enumerate() { + *elt += y; + if i % 3 == 0 { + y += 3; + } else { + y += 0; + } + } + let data1 = black_box(data1); + let data2 = black_box(data2); + + c.bench_function("merge default", move |b| { + b.iter(|| data1.iter().merge(&data2).count()) + }); +} + +fn merge_by_cmp(c: &mut Criterion) { + let mut data1 = vec![0; 1024]; + let mut data2 = vec![0; 800]; + let mut x = 0; + + #[allow(clippy::explicit_counter_loop, clippy::unused_enumerate_index)] + for (_, elt) in data1.iter_mut().enumerate() { + *elt = x; + x += 1; + } + + let mut y = 0; + for (i, elt) in data2.iter_mut().enumerate() { + *elt += y; + if i % 3 == 0 { + y += 3; + } else { + y += 0; + } + } + let data1 = black_box(data1); + let data2 = black_box(data2); + + c.bench_function("merge by cmp", move |b| { + b.iter(|| data1.iter().merge_by(&data2, PartialOrd::le).count()) + }); +} + +fn merge_by_lt(c: &mut Criterion) { + let mut data1 = vec![0; 1024]; + let mut data2 = vec![0; 800]; + let mut x = 0; + + #[allow(clippy::explicit_counter_loop, clippy::unused_enumerate_index)] + for (_, elt) in data1.iter_mut().enumerate() { + *elt = x; + x += 1; + } + + let mut y = 0; + for (i, elt) in data2.iter_mut().enumerate() { + *elt += y; + if i % 3 == 0 { + y += 3; + } else { + y += 0; + } + } + let data1 = black_box(data1); + let data2 = black_box(data2); + + c.bench_function("merge by lt", move |b| { + b.iter(|| data1.iter().merge_by(&data2, |a, b| a <= b).count()) + }); +} + +fn kmerge_default(c: &mut Criterion) { + let mut data1 = vec![0; 1024]; + let mut data2 = vec![0; 800]; + let mut x = 0; + + #[allow(clippy::explicit_counter_loop, clippy::unused_enumerate_index)] + for (_, elt) in data1.iter_mut().enumerate() { + *elt = x; + x += 1; + } + + let mut y = 0; + for (i, elt) in data2.iter_mut().enumerate() { + *elt += y; + if i % 3 == 0 { + y += 3; + } else { + y += 0; + } + } + let data1 = black_box(data1); + let data2 = black_box(data2); + let its = &[data1.iter(), data2.iter()]; + + c.bench_function("kmerge default", move |b| { + b.iter(|| its.iter().cloned().kmerge().count()) + }); +} + +fn kmerge_tenway(c: &mut Criterion) { + let mut data = vec![0; 10240]; + + let mut state = 1729u16; + fn rng(state: &mut u16) -> u16 { + let new = state.wrapping_mul(31421).wrapping_add(6927); + *state = new; + new + } + + for elt in &mut data { + *elt = rng(&mut state); + } + + let mut chunks = Vec::new(); + let mut rest = &mut data[..]; + while !rest.is_empty() { + let chunk_len = 1 + rng(&mut state) % 512; + let chunk_len = cmp::min(rest.len(), chunk_len as usize); + let (fst, tail) = { rest }.split_at_mut(chunk_len); + fst.sort(); + chunks.push(fst.iter().cloned()); + rest = tail; + } + + // println!("Chunk lengths: {}", chunks.iter().format_with(", ", |elt, f| f(&elt.len()))); + + c.bench_function("kmerge tenway", move |b| { + b.iter(|| chunks.iter().cloned().kmerge().count()) + }); +} + +fn fast_integer_sum(iter: I) -> I::Item +where + I: IntoIterator, + I::Item: Default + Add, +{ + iter.into_iter().fold(<_>::default(), |x, y| x + y) +} + +fn step_vec_2(c: &mut Criterion) { + let v = vec![0; 1024]; + + c.bench_function("step vec 2", move |b| { + b.iter(|| fast_integer_sum(cloned(v.iter().step_by(2)))) + }); +} + +fn step_vec_10(c: &mut Criterion) { + let v = vec![0; 1024]; + + c.bench_function("step vec 10", move |b| { + b.iter(|| fast_integer_sum(cloned(v.iter().step_by(10)))) + }); +} + +fn step_range_2(c: &mut Criterion) { + let v = black_box(0..1024); + + c.bench_function("step range 2", move |b| { + b.iter(|| fast_integer_sum(v.clone().step_by(2))) + }); +} + +fn step_range_10(c: &mut Criterion) { + let v = black_box(0..1024); + + c.bench_function("step range 10", move |b| { + b.iter(|| fast_integer_sum(v.clone().step_by(10))) + }); +} + +fn vec_iter_mut_partition(c: &mut Criterion) { + let data = std::iter::repeat(-1024i32..1024) + .take(256) + .flatten() + .collect_vec(); + c.bench_function("vec iter mut partition", move |b| { + b.iter_batched( + || data.clone(), + |mut data| { + black_box(itertools::partition(black_box(&mut data), |n| *n >= 0)); + }, + BatchSize::LargeInput, + ) + }); +} + +fn cartesian_product_iterator(c: &mut Criterion) { + let xs = vec![0; 16]; + + c.bench_function("cartesian product iterator", move |b| { + b.iter(|| { + let mut sum = 0; + for (&x, &y, &z) in iproduct!(&xs, &xs, &xs) { + sum += x; + sum += y; + sum += z; + } + sum + }) + }); +} + +fn multi_cartesian_product_iterator(c: &mut Criterion) { + let xs = [vec![0; 16], vec![0; 16], vec![0; 16]]; + + c.bench_function("multi cartesian product iterator", move |b| { + b.iter(|| { + let mut sum = 0; + for x in xs.iter().multi_cartesian_product() { + sum += x[0]; + sum += x[1]; + sum += x[2]; + } + sum + }) + }); +} + +fn cartesian_product_nested_for(c: &mut Criterion) { + let xs = vec![0; 16]; + + c.bench_function("cartesian product nested for", move |b| { + b.iter(|| { + let mut sum = 0; + for &x in &xs { + for &y in &xs { + for &z in &xs { + sum += x; + sum += y; + sum += z; + } + } + } + sum + }) + }); +} + +fn all_equal(c: &mut Criterion) { + let mut xs = vec![0; 5_000_000]; + xs.extend(vec![1; 5_000_000]); + + c.bench_function("all equal", move |b| b.iter(|| xs.iter().all_equal())); +} + +fn all_equal_for(c: &mut Criterion) { + let mut xs = vec![0; 5_000_000]; + xs.extend(vec![1; 5_000_000]); + + c.bench_function("all equal for", move |b| { + b.iter(|| { + for &x in &xs { + if x != xs[0] { + return false; + } + } + true + }) + }); +} + +fn all_equal_default(c: &mut Criterion) { + let mut xs = vec![0; 5_000_000]; + xs.extend(vec![1; 5_000_000]); + + c.bench_function("all equal default", move |b| { + b.iter(|| xs.iter().dedup().nth(1).is_none()) + }); +} + +const PERM_COUNT: usize = 6; + +fn permutations_iter(c: &mut Criterion) { + struct NewIterator(Range); + + impl Iterator for NewIterator { + type Item = usize; + + fn next(&mut self) -> Option { + self.0.next() + } + } + + c.bench_function("permutations iter", move |b| { + b.iter( + || { + for _ in NewIterator(0..PERM_COUNT).permutations(PERM_COUNT) {} + }, + ) + }); +} + +fn permutations_range(c: &mut Criterion) { + c.bench_function("permutations range", move |b| { + b.iter(|| for _ in (0..PERM_COUNT).permutations(PERM_COUNT) {}) + }); +} + +fn permutations_slice(c: &mut Criterion) { + let v = (0..PERM_COUNT).collect_vec(); + + c.bench_function("permutations slice", move |b| { + b.iter(|| for _ in v.as_slice().iter().permutations(PERM_COUNT) {}) + }); +} + +criterion_group!( + benches, + slice_iter, + slice_iter_rev, + zip_default_zip, + zipdot_i32_default_zip, + zipdot_f32_default_zip, + zip_default_zip3, + zip_slices_ziptuple, + zip_checked_counted_loop, + zipdot_i32_checked_counted_loop, + zipdot_f32_checked_counted_loop, + zipdot_f32_checked_counted_unrolled_loop, + zip_unchecked_counted_loop, + zipdot_i32_unchecked_counted_loop, + zipdot_f32_unchecked_counted_loop, + zip_unchecked_counted_loop3, + chunk_by_lazy_1, + chunk_by_lazy_2, + slice_chunks, + chunks_lazy_1, + equal, + merge_default, + merge_by_cmp, + merge_by_lt, + kmerge_default, + kmerge_tenway, + step_vec_2, + step_vec_10, + step_range_2, + step_range_10, + vec_iter_mut_partition, + cartesian_product_iterator, + multi_cartesian_product_iterator, + cartesian_product_nested_for, + all_equal, + all_equal_for, + all_equal_default, + permutations_iter, + permutations_range, + permutations_slice, +); +criterion_main!(benches); diff --git a/vendor/itertools/benches/combinations.rs b/vendor/itertools/benches/combinations.rs new file mode 100644 index 00000000000000..42a452111ea8d8 --- /dev/null +++ b/vendor/itertools/benches/combinations.rs @@ -0,0 +1,117 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use itertools::Itertools; + +// approximate 100_000 iterations for each combination +const N1: usize = 100_000; +const N2: usize = 448; +const N3: usize = 86; +const N4: usize = 41; +const N14: usize = 21; + +fn comb_for1(c: &mut Criterion) { + c.bench_function("comb for1", move |b| { + b.iter(|| { + for i in 0..N1 { + black_box(vec![i]); + } + }) + }); +} + +fn comb_for2(c: &mut Criterion) { + c.bench_function("comb for2", move |b| { + b.iter(|| { + for i in 0..N2 { + for j in (i + 1)..N2 { + black_box(vec![i, j]); + } + } + }) + }); +} + +fn comb_for3(c: &mut Criterion) { + c.bench_function("comb for3", move |b| { + b.iter(|| { + for i in 0..N3 { + for j in (i + 1)..N3 { + for k in (j + 1)..N3 { + black_box(vec![i, j, k]); + } + } + } + }) + }); +} + +fn comb_for4(c: &mut Criterion) { + c.bench_function("comb for4", move |b| { + b.iter(|| { + for i in 0..N4 { + for j in (i + 1)..N4 { + for k in (j + 1)..N4 { + for l in (k + 1)..N4 { + black_box(vec![i, j, k, l]); + } + } + } + } + }) + }); +} + +fn comb_c1(c: &mut Criterion) { + c.bench_function("comb c1", move |b| { + b.iter(|| { + for combo in (0..N1).combinations(1) { + black_box(combo); + } + }) + }); +} + +fn comb_c2(c: &mut Criterion) { + c.bench_function("comb c2", move |b| { + b.iter(|| { + for combo in (0..N2).combinations(2) { + black_box(combo); + } + }) + }); +} + +fn comb_c3(c: &mut Criterion) { + c.bench_function("comb c3", move |b| { + b.iter(|| { + for combo in (0..N3).combinations(3) { + black_box(combo); + } + }) + }); +} + +fn comb_c4(c: &mut Criterion) { + c.bench_function("comb c4", move |b| { + b.iter(|| { + for combo in (0..N4).combinations(4) { + black_box(combo); + } + }) + }); +} + +fn comb_c14(c: &mut Criterion) { + c.bench_function("comb c14", move |b| { + b.iter(|| { + for combo in (0..N14).combinations(14) { + black_box(combo); + } + }) + }); +} + +criterion_group!( + benches, comb_for1, comb_for2, comb_for3, comb_for4, comb_c1, comb_c2, comb_c3, comb_c4, + comb_c14, +); +criterion_main!(benches); diff --git a/vendor/itertools/benches/combinations_with_replacement.rs b/vendor/itertools/benches/combinations_with_replacement.rs new file mode 100644 index 00000000000000..8e4fa3dc3b1bce --- /dev/null +++ b/vendor/itertools/benches/combinations_with_replacement.rs @@ -0,0 +1,40 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use itertools::Itertools; + +fn comb_replacement_n10_k5(c: &mut Criterion) { + c.bench_function("comb replacement n10k5", move |b| { + b.iter(|| { + for i in (0..10).combinations_with_replacement(5) { + black_box(i); + } + }) + }); +} + +fn comb_replacement_n5_k10(c: &mut Criterion) { + c.bench_function("comb replacement n5 k10", move |b| { + b.iter(|| { + for i in (0..5).combinations_with_replacement(10) { + black_box(i); + } + }) + }); +} + +fn comb_replacement_n10_k10(c: &mut Criterion) { + c.bench_function("comb replacement n10 k10", move |b| { + b.iter(|| { + for i in (0..10).combinations_with_replacement(10) { + black_box(i); + } + }) + }); +} + +criterion_group!( + benches, + comb_replacement_n10_k5, + comb_replacement_n5_k10, + comb_replacement_n10_k10, +); +criterion_main!(benches); diff --git a/vendor/itertools/benches/fold_specialization.rs b/vendor/itertools/benches/fold_specialization.rs new file mode 100644 index 00000000000000..b44f3472146307 --- /dev/null +++ b/vendor/itertools/benches/fold_specialization.rs @@ -0,0 +1,75 @@ +#![allow(unstable_name_collisions)] + +use criterion::{criterion_group, criterion_main, Criterion}; +use itertools::Itertools; + +struct Unspecialized(I); + +impl Iterator for Unspecialized +where + I: Iterator, +{ + type Item = I::Item; + + #[inline(always)] + fn next(&mut self) -> Option { + self.0.next() + } + + #[inline(always)] + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +mod specialization { + use super::*; + + pub mod intersperse { + use super::*; + + pub fn external(c: &mut Criterion) { + let arr = [1; 1024]; + + c.bench_function("external", move |b| { + b.iter(|| { + let mut sum = 0; + for &x in arr.iter().intersperse(&0) { + sum += x; + } + sum + }) + }); + } + + pub fn internal_specialized(c: &mut Criterion) { + let arr = [1; 1024]; + + c.bench_function("internal specialized", move |b| { + b.iter(|| { + #[allow(clippy::unnecessary_fold)] + arr.iter().intersperse(&0).fold(0, |acc, x| acc + x) + }) + }); + } + + pub fn internal_unspecialized(c: &mut Criterion) { + let arr = [1; 1024]; + + c.bench_function("internal unspecialized", move |b| { + b.iter(|| { + #[allow(clippy::unnecessary_fold)] + Unspecialized(arr.iter().intersperse(&0)).fold(0, |acc, x| acc + x) + }) + }); + } + } +} + +criterion_group!( + benches, + specialization::intersperse::external, + specialization::intersperse::internal_specialized, + specialization::intersperse::internal_unspecialized, +); +criterion_main!(benches); diff --git a/vendor/itertools/benches/powerset.rs b/vendor/itertools/benches/powerset.rs new file mode 100644 index 00000000000000..018333d316c1e6 --- /dev/null +++ b/vendor/itertools/benches/powerset.rs @@ -0,0 +1,97 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use itertools::Itertools; + +// Keep aggregate generated elements the same, regardless of powerset length. +const TOTAL_ELEMENTS: usize = 1 << 12; +const fn calc_iters(n: usize) -> usize { + TOTAL_ELEMENTS / (1 << n) +} + +fn powerset_n(c: &mut Criterion, n: usize) { + let id = format!("powerset {}", n); + c.bench_function(id.as_str(), move |b| { + b.iter(|| { + for _ in 0..calc_iters(n) { + for elt in (0..n).powerset() { + black_box(elt); + } + } + }) + }); +} + +fn powerset_n_fold(c: &mut Criterion, n: usize) { + let id = format!("powerset {} fold", n); + c.bench_function(id.as_str(), move |b| { + b.iter(|| { + for _ in 0..calc_iters(n) { + (0..n).powerset().fold(0, |s, elt| s + black_box(elt).len()); + } + }) + }); +} + +fn powerset_0(c: &mut Criterion) { + powerset_n(c, 0); +} + +fn powerset_1(c: &mut Criterion) { + powerset_n(c, 1); +} + +fn powerset_2(c: &mut Criterion) { + powerset_n(c, 2); +} + +fn powerset_4(c: &mut Criterion) { + powerset_n(c, 4); +} + +fn powerset_8(c: &mut Criterion) { + powerset_n(c, 8); +} + +fn powerset_12(c: &mut Criterion) { + powerset_n(c, 12); +} + +fn powerset_0_fold(c: &mut Criterion) { + powerset_n_fold(c, 0); +} + +fn powerset_1_fold(c: &mut Criterion) { + powerset_n_fold(c, 1); +} + +fn powerset_2_fold(c: &mut Criterion) { + powerset_n_fold(c, 2); +} + +fn powerset_4_fold(c: &mut Criterion) { + powerset_n_fold(c, 4); +} + +fn powerset_8_fold(c: &mut Criterion) { + powerset_n_fold(c, 8); +} + +fn powerset_12_fold(c: &mut Criterion) { + powerset_n_fold(c, 12); +} + +criterion_group!( + benches, + powerset_0, + powerset_1, + powerset_2, + powerset_4, + powerset_8, + powerset_12, + powerset_0_fold, + powerset_1_fold, + powerset_2_fold, + powerset_4_fold, + powerset_8_fold, + powerset_12_fold, +); +criterion_main!(benches); diff --git a/vendor/itertools/benches/specializations.rs b/vendor/itertools/benches/specializations.rs new file mode 100644 index 00000000000000..18039fc4edef13 --- /dev/null +++ b/vendor/itertools/benches/specializations.rs @@ -0,0 +1,667 @@ +#![allow(unstable_name_collisions, clippy::incompatible_msrv)] + +use criterion::black_box; +use criterion::BenchmarkId; +use itertools::Itertools; + +const NTH_INPUTS: &[usize] = &[0, 1, 2, 4, 8]; + +/// Create multiple functions each defining a benchmark group about iterator methods. +/// +/// Each created group has functions with the following ids: +/// +/// - `next`, `size_hint`, `count`, `last`, `nth`, `collect`, `fold` +/// - and when marked as `DoubleEndedIterator`: `next_back`, `nth_back`, `rfold` +/// - and when marked as `ExactSizeIterator`: `len` +/// +/// Note that this macro can be called only once. +macro_rules! bench_specializations { + ( + $( + $name:ident { + $($extra:ident)* + {$( + $init:stmt; + )*} + $iterator:expr + } + )* + ) => { + $( + #[allow(unused_must_use)] + fn $name(c: &mut ::criterion::Criterion) { + let mut bench_group = c.benchmark_group(stringify!($name)); + $( + $init + )* + let bench_first_its = { + let mut bench_idx = 0; + [0; 1000].map(|_| { + let mut it = $iterator; + if bench_idx != 0 { + it.nth(bench_idx - 1); + } + bench_idx += 1; + it + }) + }; + bench_specializations!(@Iterator bench_group bench_first_its: $iterator); + $( + bench_specializations!(@$extra bench_group bench_first_its: $iterator); + )* + bench_group.finish(); + } + )* + + ::criterion::criterion_group!(benches, $($name, )*); + ::criterion::criterion_main!(benches); + }; + + (@Iterator $group:ident $first_its:ident: $iterator:expr) => { + $group.bench_function("next", |bencher| bencher.iter(|| { + let mut it = $iterator; + while let Some(x) = it.next() { + black_box(x); + } + })); + $group.bench_function("size_hint", |bencher| bencher.iter(|| { + $first_its.iter().for_each(|it| { + black_box(it.size_hint()); + }) + })); + $group.bench_function("count", |bencher| bencher.iter(|| { + $iterator.count() + })); + $group.bench_function("last", |bencher| bencher.iter(|| { + $iterator.last() + })); + for n in NTH_INPUTS { + $group.bench_with_input(BenchmarkId::new("nth", n), n, |bencher, n| bencher.iter(|| { + for start in 0_usize..10 { + let mut it = $iterator; + if let Some(s) = start.checked_sub(1) { + black_box(it.nth(s)); + } + while let Some(x) = it.nth(*n) { + black_box(x); + } + } + })); + } + $group.bench_function("collect", |bencher| bencher.iter(|| { + $iterator.collect::>() + })); + $group.bench_function("fold", |bencher| bencher.iter(|| { + $iterator.fold((), |(), x| { + black_box(x); + }) + })); + }; + + (@DoubleEndedIterator $group:ident $_first_its:ident: $iterator:expr) => { + $group.bench_function("next_back", |bencher| bencher.iter(|| { + let mut it = $iterator; + while let Some(x) = it.next_back() { + black_box(x); + } + })); + for n in NTH_INPUTS { + $group.bench_with_input(BenchmarkId::new("nth_back", n), n, |bencher, n| bencher.iter(|| { + for start in 0_usize..10 { + let mut it = $iterator; + if let Some(s) = start.checked_sub(1) { + black_box(it.nth_back(s)); + } + while let Some(x) = it.nth_back(*n) { + black_box(x); + } + } + })); + } + $group.bench_function("rfold", |bencher| bencher.iter(|| { + $iterator.rfold((), |(), x| { + black_box(x); + }) + })); + }; + + (@ExactSizeIterator $group:ident $first_its:ident: $_iterator:expr) => { + $group.bench_function("len", |bencher| bencher.iter(|| { + $first_its.iter().for_each(|it| { + black_box(it.len()); + }) + })); + }; +} + +// Usage examples: +// - For `ZipLongest::fold` only: +// cargo bench --bench specializations zip_longest/fold +// - For `.combinations(k).nth(8)`: +// cargo bench --bench specializations combinations./nth/8 +bench_specializations! { + interleave { + { + let v1 = black_box(vec![0; 1024]); + let v2 = black_box(vec![0; 768]); + } + v1.iter().interleave(&v2) + } + interleave_shortest { + { + let v1 = black_box(vec![0; 1024]); + let v2 = black_box(vec![0; 768]); + } + v1.iter().interleave_shortest(&v2) + } + batching { + { + let v = black_box(vec![0; 1024]); + } + v.iter().batching(Iterator::next) + } + tuple_windows1 { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + } + v.iter().tuple_windows::<(_,)>() + } + tuple_windows2 { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + } + v.iter().tuple_windows::<(_, _)>() + } + tuple_windows3 { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + } + v.iter().tuple_windows::<(_, _, _)>() + } + tuple_windows4 { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + } + v.iter().tuple_windows::<(_, _, _, _)>() + } + circular_tuple_windows1 { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + } + v.iter().circular_tuple_windows::<(_,)>() + } + circular_tuple_windows2 { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + } + v.iter().circular_tuple_windows::<(_, _)>() + } + circular_tuple_windows3 { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + } + v.iter().circular_tuple_windows::<(_, _, _)>() + } + circular_tuple_windows4 { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + } + v.iter().circular_tuple_windows::<(_, _, _, _)>() + } + tuples1 { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + } + v.iter().tuples::<(_,)>() + } + tuples2 { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + } + v.iter().tuples::<(_, _)>() + } + tuples3 { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + } + v.iter().tuples::<(_, _, _)>() + } + tuples4 { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + } + v.iter().tuples::<(_, _, _, _)>() + } + tuple_buffer { + ExactSizeIterator + { + let v = black_box(vec![0; 11]); + // Short but the buffer can't have 12 or more elements. + } + { + let mut it = v.iter().tuples::<(_, _, _, _, _, _, _, _, _, _, _, _)>(); + it.next(); // No element but it fills the buffer. + it.into_buffer() + } + } + cartesian_product { + { + let v = black_box(vec![0; 16]); + } + itertools::iproduct!(&v, &v, &v) + } + multi_cartesian_product { + { + let vs = black_box([0; 3].map(|_| vec![0; 16])); + } + vs.iter().multi_cartesian_product() + } + coalesce { + { + let v = black_box(vec![0; 1024]); + } + v.iter().coalesce(|x, y| if x == y { Ok(x) } else { Err((x, y)) }) + } + dedup { + { + let v = black_box((0..32).flat_map(|x| [x; 32]).collect_vec()); + } + v.iter().dedup() + } + dedup_by { + { + let v = black_box((0..32).flat_map(|x| [x; 32]).collect_vec()); + } + v.iter().dedup_by(PartialOrd::ge) + } + dedup_with_count { + { + let v = black_box((0..32).flat_map(|x| [x; 32]).collect_vec()); + } + v.iter().dedup_with_count() + } + dedup_by_with_count { + { + let v = black_box((0..32).flat_map(|x| [x; 32]).collect_vec()); + } + v.iter().dedup_by_with_count(PartialOrd::ge) + } + duplicates { + DoubleEndedIterator + { + let v = black_box((0..32).cycle().take(1024).collect_vec()); + } + v.iter().duplicates() + } + duplicates_by { + DoubleEndedIterator + { + let v = black_box((0..1024).collect_vec()); + } + v.iter().duplicates_by(|x| *x % 10) + } + unique { + DoubleEndedIterator + { + let v = black_box((0..32).cycle().take(1024).collect_vec()); + } + v.iter().unique() + } + unique_by { + DoubleEndedIterator + { + let v = black_box((0..1024).collect_vec()); + } + v.iter().unique_by(|x| *x % 50) + } + take_while_inclusive { + { + let v = black_box((0..1024).collect_vec()); + } + v.iter().take_while_inclusive(|x| **x < 1000) + } + pad_using { + DoubleEndedIterator + ExactSizeIterator + { + let v = black_box((0..1024).collect_vec()); + } + v.iter().copied().pad_using(2048, |i| 5 * i) + } + positions { + DoubleEndedIterator + { + let v = black_box((0..1024).collect_vec()); + } + v.iter().positions(|x| x % 5 == 0) + } + update { + DoubleEndedIterator + ExactSizeIterator + { + let v = black_box((0_i32..1024).collect_vec()); + } + v.iter().copied().update(|x| *x *= 7) + } + tuple_combinations1 { + { + let v = black_box(vec![0; 1024]); + } + v.iter().tuple_combinations::<(_,)>() + } + tuple_combinations2 { + { + let v = black_box(vec![0; 64]); + } + v.iter().tuple_combinations::<(_, _)>() + } + tuple_combinations3 { + { + let v = black_box(vec![0; 64]); + } + v.iter().tuple_combinations::<(_, _, _)>() + } + tuple_combinations4 { + { + let v = black_box(vec![0; 64]); + } + v.iter().tuple_combinations::<(_, _, _, _)>() + } + intersperse { + { + let v = black_box(vec![0; 1024]); + let n = black_box(0); + } + v.iter().intersperse(&n) + } + intersperse_with { + { + let v = black_box(vec![0; 1024]); + let n = black_box(0); + } + v.iter().intersperse_with(|| &n) + } + combinations1 { + { + let v = black_box(vec![0; 1792]); + } + v.iter().combinations(1) + } + combinations2 { + { + let v = black_box(vec![0; 60]); + } + v.iter().combinations(2) + } + combinations3 { + { + let v = black_box(vec![0; 23]); + } + v.iter().combinations(3) + } + combinations4 { + { + let v = black_box(vec![0; 16]); + } + v.iter().combinations(4) + } + combinations_with_replacement1 { + { + let v = black_box(vec![0; 4096]); + } + v.iter().combinations_with_replacement(1) + } + combinations_with_replacement2 { + { + let v = black_box(vec![0; 90]); + } + v.iter().combinations_with_replacement(2) + } + combinations_with_replacement3 { + { + let v = black_box(vec![0; 28]); + } + v.iter().combinations_with_replacement(3) + } + combinations_with_replacement4 { + { + let v = black_box(vec![0; 16]); + } + v.iter().combinations_with_replacement(4) + } + permutations1 { + { + let v = black_box(vec![0; 1024]); + } + v.iter().permutations(1) + } + permutations2 { + { + let v = black_box(vec![0; 36]); + } + v.iter().permutations(2) + } + permutations3 { + { + let v = black_box(vec![0; 12]); + } + v.iter().permutations(3) + } + permutations4 { + { + let v = black_box(vec![0; 8]); + } + v.iter().permutations(4) + } + powerset { + { + let v = black_box(vec![0; 10]); + } + v.iter().powerset() + } + while_some { + {} + (0..) + .map(black_box) + .map(|i| char::from_digit(i, 16)) + .while_some() + } + with_position { + ExactSizeIterator + { + let v = black_box((0..10240).collect_vec()); + } + v.iter().with_position() + } + zip_longest { + DoubleEndedIterator + ExactSizeIterator + { + let xs = black_box(vec![0; 1024]); + let ys = black_box(vec![0; 768]); + } + xs.iter().zip_longest(ys.iter()) + } + zip_eq { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + } + v.iter().zip_eq(v.iter().rev()) + } + multizip { + DoubleEndedIterator + ExactSizeIterator + { + let v1 = black_box(vec![0; 1024]); + let v2 = black_box(vec![0; 768]); + let v3 = black_box(vec![0; 2048]); + } + itertools::multizip((&v1, &v2, &v3)) + } + izip { + DoubleEndedIterator + ExactSizeIterator + { + let v1 = black_box(vec![0; 1024]); + let v2 = black_box(vec![0; 768]); + let v3 = black_box(vec![0; 2048]); + } + itertools::izip!(&v1, &v2, &v3) + } + put_back { + { + let v = black_box(vec![0; 1024]); + } + itertools::put_back(&v).with_value(black_box(&0)) + } + put_back_n { + { + let v1 = black_box(vec![0; 1024]); + let v2 = black_box(vec![0; 16]); + } + { + let mut it = itertools::put_back_n(&v1); + for n in &v2 { + it.put_back(n); + } + it + } + } + exactly_one_error { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + } + // Use `at_most_one` would be similar. + v.iter().exactly_one().unwrap_err() + } + multipeek { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + let n = black_box(16); + } + { + let mut it = v.iter().multipeek(); + for _ in 0..n { + it.peek(); + } + it + } + } + peek_nth { + ExactSizeIterator + { + let v = black_box(vec![0; 1024]); + let n = black_box(16); + } + { + let mut it = itertools::peek_nth(&v); + it.peek_nth(n); + it + } + } + repeat_n { + DoubleEndedIterator + ExactSizeIterator + {} + itertools::repeat_n(black_box(0), black_box(1024)) + } + merge { + { + let v1 = black_box((0..1024).collect_vec()); + let v2 = black_box((0..768).collect_vec()); + } + v1.iter().merge(&v2) + } + merge_by { + { + let v1 = black_box((0..1024).collect_vec()); + let v2 = black_box((0..768).collect_vec()); + } + v1.iter().merge_by(&v2, PartialOrd::ge) + } + merge_join_by_ordering { + { + let v1 = black_box((0..1024).collect_vec()); + let v2 = black_box((0..768).collect_vec()); + } + v1.iter().merge_join_by(&v2, Ord::cmp) + } + merge_join_by_bool { + { + let v1 = black_box((0..1024).collect_vec()); + let v2 = black_box((0..768).collect_vec()); + } + v1.iter().merge_join_by(&v2, PartialOrd::ge) + } + kmerge { + { + let vs = black_box(vec![vec![0; 1024], vec![0; 256], vec![0; 768]]); + } + vs.iter().kmerge() + } + kmerge_by { + { + let vs = black_box(vec![vec![0; 1024], vec![0; 256], vec![0; 768]]); + } + vs.iter().kmerge_by(PartialOrd::ge) + } + map_into { + DoubleEndedIterator + ExactSizeIterator + { + let v = black_box(vec![0_u8; 1024]); + } + v.iter().copied().map_into::() + } + map_ok { + DoubleEndedIterator + ExactSizeIterator + { + let v = black_box((0_u32..1024) + .map(|x| if x % 2 == 1 { Err(x) } else { Ok(x) }) + .collect_vec()); + } + v.iter().copied().map_ok(|x| x + 1) + } + filter_ok { + { + let v = black_box((0_u32..1024) + .map(|x| if x % 2 == 1 { Err(x) } else { Ok(x) }) + .collect_vec()); + } + v.iter().copied().filter_ok(|x| x % 3 == 0) + } + filter_map_ok { + { + let v = black_box((0_u32..1024) + .map(|x| if x % 2 == 1 { Err(x) } else { Ok(x) }) + .collect_vec()); + } + v.iter().copied().filter_map_ok(|x| if x % 3 == 0 { Some(x + 1) } else { None }) + } + flatten_ok { + DoubleEndedIterator + { + let d = black_box(vec![0; 8]); + let v = black_box((0..512) + .map(|x| if x % 2 == 0 { Ok(&d) } else { Err(x) }) + .collect_vec()); + } + v.iter().copied().flatten_ok() + } +} diff --git a/vendor/itertools/benches/tree_reduce.rs b/vendor/itertools/benches/tree_reduce.rs new file mode 100644 index 00000000000000..051b148834815e --- /dev/null +++ b/vendor/itertools/benches/tree_reduce.rs @@ -0,0 +1,150 @@ +#![allow(deprecated)] + +use criterion::{criterion_group, criterion_main, Criterion}; +use itertools::{cloned, Itertools}; + +trait IterEx: Iterator { + // Another efficient implementation against which to compare, + // but needs `std` so is less desirable. + fn tree_reduce_vec(self, mut f: F) -> Option + where + F: FnMut(Self::Item, Self::Item) -> Self::Item, + Self: Sized, + { + let hint = self.size_hint().0; + let cap = std::mem::size_of::() * 8 - hint.leading_zeros() as usize; + let mut stack = Vec::with_capacity(cap); + self.enumerate().for_each(|(mut i, mut x)| { + while (i & 1) != 0 { + x = f(stack.pop().unwrap(), x); + i >>= 1; + } + stack.push(x); + }); + stack.into_iter().fold1(f) + } +} +impl IterEx for T {} + +macro_rules! def_benchs { + ($N:expr, + $FUN:ident, + $BENCH_NAME:ident, + ) => { + mod $BENCH_NAME { + use super::*; + + pub fn sum(c: &mut Criterion) { + let v: Vec = (0..$N).collect(); + + c.bench_function( + &(stringify!($BENCH_NAME).replace('_', " ") + " sum"), + move |b| b.iter(|| cloned(&v).$FUN(|x, y| x + y)), + ); + } + + pub fn complex_iter(c: &mut Criterion) { + let u = (3..).take($N / 2); + let v = (5..).take($N / 2); + let it = u.chain(v); + + c.bench_function( + &(stringify!($BENCH_NAME).replace('_', " ") + " complex iter"), + move |b| b.iter(|| it.clone().map(|x| x as f32).$FUN(f32::atan2)), + ); + } + + pub fn string_format(c: &mut Criterion) { + // This goes quadratic with linear `fold1`, so use a smaller + // size to not waste too much time in travis. The allocations + // in here are so expensive anyway that it'll still take + // way longer per iteration than the other two benchmarks. + let v: Vec = (0..($N / 4)).collect(); + + c.bench_function( + &(stringify!($BENCH_NAME).replace('_', " ") + " string format"), + move |b| { + b.iter(|| { + cloned(&v) + .map(|x| x.to_string()) + .$FUN(|x, y| format!("{} + {}", x, y)) + }) + }, + ); + } + } + + criterion_group!( + $BENCH_NAME, + $BENCH_NAME::sum, + $BENCH_NAME::complex_iter, + $BENCH_NAME::string_format, + ); + }; +} + +def_benchs! { + 10_000, + fold1, + fold1_10k, +} + +def_benchs! { + 10_000, + tree_reduce, + tree_reduce_stack_10k, +} + +def_benchs! { + 10_000, + tree_reduce_vec, + tree_reduce_vec_10k, +} + +def_benchs! { + 100, + fold1, + fold1_100, +} + +def_benchs! { + 100, + tree_reduce, + tree_reduce_stack_100, +} + +def_benchs! { + 100, + tree_reduce_vec, + tree_reduce_vec_100, +} + +def_benchs! { + 8, + fold1, + fold1_08, +} + +def_benchs! { + 8, + tree_reduce, + tree_reduce_stack_08, +} + +def_benchs! { + 8, + tree_reduce_vec, + tree_reduce_vec_08, +} + +criterion_main!( + fold1_10k, + tree_reduce_stack_10k, + tree_reduce_vec_10k, + fold1_100, + tree_reduce_stack_100, + tree_reduce_vec_100, + fold1_08, + tree_reduce_stack_08, + tree_reduce_vec_08, +); diff --git a/vendor/itertools/benches/tuple_combinations.rs b/vendor/itertools/benches/tuple_combinations.rs new file mode 100644 index 00000000000000..4e26b282e853f7 --- /dev/null +++ b/vendor/itertools/benches/tuple_combinations.rs @@ -0,0 +1,113 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use itertools::Itertools; + +// approximate 100_000 iterations for each combination +const N1: usize = 100_000; +const N2: usize = 448; +const N3: usize = 86; +const N4: usize = 41; + +fn tuple_comb_for1(c: &mut Criterion) { + c.bench_function("tuple comb for1", move |b| { + b.iter(|| { + for i in 0..N1 { + black_box(i); + } + }) + }); +} + +fn tuple_comb_for2(c: &mut Criterion) { + c.bench_function("tuple comb for2", move |b| { + b.iter(|| { + for i in 0..N2 { + for j in (i + 1)..N2 { + black_box(i + j); + } + } + }) + }); +} + +fn tuple_comb_for3(c: &mut Criterion) { + c.bench_function("tuple comb for3", move |b| { + b.iter(|| { + for i in 0..N3 { + for j in (i + 1)..N3 { + for k in (j + 1)..N3 { + black_box(i + j + k); + } + } + } + }) + }); +} + +fn tuple_comb_for4(c: &mut Criterion) { + c.bench_function("tuple comb for4", move |b| { + b.iter(|| { + for i in 0..N4 { + for j in (i + 1)..N4 { + for k in (j + 1)..N4 { + for l in (k + 1)..N4 { + black_box(i + j + k + l); + } + } + } + } + }) + }); +} + +fn tuple_comb_c1(c: &mut Criterion) { + c.bench_function("tuple comb c1", move |b| { + b.iter(|| { + for (i,) in (0..N1).tuple_combinations() { + black_box(i); + } + }) + }); +} + +fn tuple_comb_c2(c: &mut Criterion) { + c.bench_function("tuple comb c2", move |b| { + b.iter(|| { + for (i, j) in (0..N2).tuple_combinations() { + black_box(i + j); + } + }) + }); +} + +fn tuple_comb_c3(c: &mut Criterion) { + c.bench_function("tuple comb c3", move |b| { + b.iter(|| { + for (i, j, k) in (0..N3).tuple_combinations() { + black_box(i + j + k); + } + }) + }); +} + +fn tuple_comb_c4(c: &mut Criterion) { + c.bench_function("tuple comb c4", move |b| { + b.iter(|| { + for (i, j, k, l) in (0..N4).tuple_combinations() { + black_box(i + j + k + l); + } + }) + }); +} + +criterion_group!( + benches, + tuple_comb_for1, + tuple_comb_for2, + tuple_comb_for3, + tuple_comb_for4, + tuple_comb_c1, + tuple_comb_c2, + tuple_comb_c3, + tuple_comb_c4, +); +criterion_main!(benches); diff --git a/vendor/itertools/benches/tuples.rs b/vendor/itertools/benches/tuples.rs new file mode 100644 index 00000000000000..2eca34712ad56e --- /dev/null +++ b/vendor/itertools/benches/tuples.rs @@ -0,0 +1,208 @@ +use criterion::{criterion_group, criterion_main, Criterion}; +use itertools::Itertools; + +fn s1(a: u32) -> u32 { + a +} + +fn s2(a: u32, b: u32) -> u32 { + a + b +} + +fn s3(a: u32, b: u32, c: u32) -> u32 { + a + b + c +} + +fn s4(a: u32, b: u32, c: u32, d: u32) -> u32 { + a + b + c + d +} + +fn sum_s1(s: &[u32]) -> u32 { + s1(s[0]) +} + +fn sum_s2(s: &[u32]) -> u32 { + s2(s[0], s[1]) +} + +fn sum_s3(s: &[u32]) -> u32 { + s3(s[0], s[1], s[2]) +} + +fn sum_s4(s: &[u32]) -> u32 { + s4(s[0], s[1], s[2], s[3]) +} + +fn sum_t1(s: &(&u32,)) -> u32 { + s1(*s.0) +} + +fn sum_t2(s: &(&u32, &u32)) -> u32 { + s2(*s.0, *s.1) +} + +fn sum_t3(s: &(&u32, &u32, &u32)) -> u32 { + s3(*s.0, *s.1, *s.2) +} + +fn sum_t4(s: &(&u32, &u32, &u32, &u32)) -> u32 { + s4(*s.0, *s.1, *s.2, *s.3) +} + +macro_rules! def_benchs { + ($N:expr; + $BENCH_GROUP:ident, + $TUPLE_FUN:ident, + $TUPLES:ident, + $TUPLE_WINDOWS:ident; + $SLICE_FUN:ident, + $CHUNKS:ident, + $WINDOWS:ident; + $FOR_CHUNKS:ident, + $FOR_WINDOWS:ident + ) => { + fn $FOR_CHUNKS(c: &mut Criterion) { + let v: Vec = (0..$N * 1_000).collect(); + let mut s = 0; + c.bench_function(&stringify!($FOR_CHUNKS).replace('_', " "), move |b| { + b.iter(|| { + let mut j = 0; + for _ in 0..1_000 { + s += $SLICE_FUN(&v[j..(j + $N)]); + j += $N; + } + s + }) + }); + } + + fn $FOR_WINDOWS(c: &mut Criterion) { + let v: Vec = (0..1_000).collect(); + let mut s = 0; + c.bench_function(&stringify!($FOR_WINDOWS).replace('_', " "), move |b| { + b.iter(|| { + for i in 0..(1_000 - $N) { + s += $SLICE_FUN(&v[i..(i + $N)]); + } + s + }) + }); + } + + fn $TUPLES(c: &mut Criterion) { + let v: Vec = (0..$N * 1_000).collect(); + let mut s = 0; + c.bench_function(&stringify!($TUPLES).replace('_', " "), move |b| { + b.iter(|| { + for x in v.iter().tuples() { + s += $TUPLE_FUN(&x); + } + s + }) + }); + } + + fn $CHUNKS(c: &mut Criterion) { + let v: Vec = (0..$N * 1_000).collect(); + let mut s = 0; + c.bench_function(&stringify!($CHUNKS).replace('_', " "), move |b| { + b.iter(|| { + for x in v.chunks($N) { + s += $SLICE_FUN(x); + } + s + }) + }); + } + + fn $TUPLE_WINDOWS(c: &mut Criterion) { + let v: Vec = (0..1_000).collect(); + let mut s = 0; + c.bench_function(&stringify!($TUPLE_WINDOWS).replace('_', " "), move |b| { + b.iter(|| { + for x in v.iter().tuple_windows() { + s += $TUPLE_FUN(&x); + } + s + }) + }); + } + + fn $WINDOWS(c: &mut Criterion) { + let v: Vec = (0..1_000).collect(); + let mut s = 0; + c.bench_function(&stringify!($WINDOWS).replace('_', " "), move |b| { + b.iter(|| { + for x in v.windows($N) { + s += $SLICE_FUN(x); + } + s + }) + }); + } + + criterion_group!( + $BENCH_GROUP, + $FOR_CHUNKS, + $FOR_WINDOWS, + $TUPLES, + $CHUNKS, + $TUPLE_WINDOWS, + $WINDOWS, + ); + }; +} + +def_benchs! { + 1; + benches_1, + sum_t1, + tuple_chunks_1, + tuple_windows_1; + sum_s1, + slice_chunks_1, + slice_windows_1; + for_chunks_1, + for_windows_1 +} + +def_benchs! { + 2; + benches_2, + sum_t2, + tuple_chunks_2, + tuple_windows_2; + sum_s2, + slice_chunks_2, + slice_windows_2; + for_chunks_2, + for_windows_2 +} + +def_benchs! { + 3; + benches_3, + sum_t3, + tuple_chunks_3, + tuple_windows_3; + sum_s3, + slice_chunks_3, + slice_windows_3; + for_chunks_3, + for_windows_3 +} + +def_benchs! { + 4; + benches_4, + sum_t4, + tuple_chunks_4, + tuple_windows_4; + sum_s4, + slice_chunks_4, + slice_windows_4; + for_chunks_4, + for_windows_4 +} + +criterion_main!(benches_1, benches_2, benches_3, benches_4,); diff --git a/vendor/itertools/examples/iris.data b/vendor/itertools/examples/iris.data new file mode 100644 index 00000000000000..a3490e0e07dc9d --- /dev/null +++ b/vendor/itertools/examples/iris.data @@ -0,0 +1,150 @@ +5.1,3.5,1.4,0.2,Iris-setosa +4.9,3.0,1.4,0.2,Iris-setosa +4.7,3.2,1.3,0.2,Iris-setosa +4.6,3.1,1.5,0.2,Iris-setosa +5.0,3.6,1.4,0.2,Iris-setosa +5.4,3.9,1.7,0.4,Iris-setosa +4.6,3.4,1.4,0.3,Iris-setosa +5.0,3.4,1.5,0.2,Iris-setosa +4.4,2.9,1.4,0.2,Iris-setosa +4.9,3.1,1.5,0.1,Iris-setosa +5.4,3.7,1.5,0.2,Iris-setosa +4.8,3.4,1.6,0.2,Iris-setosa +4.8,3.0,1.4,0.1,Iris-setosa +4.3,3.0,1.1,0.1,Iris-setosa +5.8,4.0,1.2,0.2,Iris-setosa +5.7,4.4,1.5,0.4,Iris-setosa +5.4,3.9,1.3,0.4,Iris-setosa +5.1,3.5,1.4,0.3,Iris-setosa +5.7,3.8,1.7,0.3,Iris-setosa +5.1,3.8,1.5,0.3,Iris-setosa +5.4,3.4,1.7,0.2,Iris-setosa +5.1,3.7,1.5,0.4,Iris-setosa +4.6,3.6,1.0,0.2,Iris-setosa +5.1,3.3,1.7,0.5,Iris-setosa +4.8,3.4,1.9,0.2,Iris-setosa +5.0,3.0,1.6,0.2,Iris-setosa +5.0,3.4,1.6,0.4,Iris-setosa +5.2,3.5,1.5,0.2,Iris-setosa +5.2,3.4,1.4,0.2,Iris-setosa +4.7,3.2,1.6,0.2,Iris-setosa +4.8,3.1,1.6,0.2,Iris-setosa +5.4,3.4,1.5,0.4,Iris-setosa +5.2,4.1,1.5,0.1,Iris-setosa +5.5,4.2,1.4,0.2,Iris-setosa +4.9,3.1,1.5,0.1,Iris-setosa +5.0,3.2,1.2,0.2,Iris-setosa +5.5,3.5,1.3,0.2,Iris-setosa +4.9,3.1,1.5,0.1,Iris-setosa +4.4,3.0,1.3,0.2,Iris-setosa +5.1,3.4,1.5,0.2,Iris-setosa +5.0,3.5,1.3,0.3,Iris-setosa +4.5,2.3,1.3,0.3,Iris-setosa +4.4,3.2,1.3,0.2,Iris-setosa +5.0,3.5,1.6,0.6,Iris-setosa +5.1,3.8,1.9,0.4,Iris-setosa +4.8,3.0,1.4,0.3,Iris-setosa +5.1,3.8,1.6,0.2,Iris-setosa +4.6,3.2,1.4,0.2,Iris-setosa +5.3,3.7,1.5,0.2,Iris-setosa +5.0,3.3,1.4,0.2,Iris-setosa +7.0,3.2,4.7,1.4,Iris-versicolor +6.4,3.2,4.5,1.5,Iris-versicolor +6.9,3.1,4.9,1.5,Iris-versicolor +5.5,2.3,4.0,1.3,Iris-versicolor +6.5,2.8,4.6,1.5,Iris-versicolor +5.7,2.8,4.5,1.3,Iris-versicolor +6.3,3.3,4.7,1.6,Iris-versicolor +4.9,2.4,3.3,1.0,Iris-versicolor +6.6,2.9,4.6,1.3,Iris-versicolor +5.2,2.7,3.9,1.4,Iris-versicolor +5.0,2.0,3.5,1.0,Iris-versicolor +5.9,3.0,4.2,1.5,Iris-versicolor +6.0,2.2,4.0,1.0,Iris-versicolor +6.1,2.9,4.7,1.4,Iris-versicolor +5.6,2.9,3.6,1.3,Iris-versicolor +6.7,3.1,4.4,1.4,Iris-versicolor +5.6,3.0,4.5,1.5,Iris-versicolor +5.8,2.7,4.1,1.0,Iris-versicolor +6.2,2.2,4.5,1.5,Iris-versicolor +5.6,2.5,3.9,1.1,Iris-versicolor +5.9,3.2,4.8,1.8,Iris-versicolor +6.1,2.8,4.0,1.3,Iris-versicolor +6.3,2.5,4.9,1.5,Iris-versicolor +6.1,2.8,4.7,1.2,Iris-versicolor +6.4,2.9,4.3,1.3,Iris-versicolor +6.6,3.0,4.4,1.4,Iris-versicolor +6.8,2.8,4.8,1.4,Iris-versicolor +6.7,3.0,5.0,1.7,Iris-versicolor +6.0,2.9,4.5,1.5,Iris-versicolor +5.7,2.6,3.5,1.0,Iris-versicolor +5.5,2.4,3.8,1.1,Iris-versicolor +5.5,2.4,3.7,1.0,Iris-versicolor +5.8,2.7,3.9,1.2,Iris-versicolor +6.0,2.7,5.1,1.6,Iris-versicolor +5.4,3.0,4.5,1.5,Iris-versicolor +6.0,3.4,4.5,1.6,Iris-versicolor +6.7,3.1,4.7,1.5,Iris-versicolor +6.3,2.3,4.4,1.3,Iris-versicolor +5.6,3.0,4.1,1.3,Iris-versicolor +5.5,2.5,4.0,1.3,Iris-versicolor +5.5,2.6,4.4,1.2,Iris-versicolor +6.1,3.0,4.6,1.4,Iris-versicolor +5.8,2.6,4.0,1.2,Iris-versicolor +5.0,2.3,3.3,1.0,Iris-versicolor +5.6,2.7,4.2,1.3,Iris-versicolor +5.7,3.0,4.2,1.2,Iris-versicolor +5.7,2.9,4.2,1.3,Iris-versicolor +6.2,2.9,4.3,1.3,Iris-versicolor +5.1,2.5,3.0,1.1,Iris-versicolor +5.7,2.8,4.1,1.3,Iris-versicolor +6.3,3.3,6.0,2.5,Iris-virginica +5.8,2.7,5.1,1.9,Iris-virginica +7.1,3.0,5.9,2.1,Iris-virginica +6.3,2.9,5.6,1.8,Iris-virginica +6.5,3.0,5.8,2.2,Iris-virginica +7.6,3.0,6.6,2.1,Iris-virginica +4.9,2.5,4.5,1.7,Iris-virginica +7.3,2.9,6.3,1.8,Iris-virginica +6.7,2.5,5.8,1.8,Iris-virginica +7.2,3.6,6.1,2.5,Iris-virginica +6.5,3.2,5.1,2.0,Iris-virginica +6.4,2.7,5.3,1.9,Iris-virginica +6.8,3.0,5.5,2.1,Iris-virginica +5.7,2.5,5.0,2.0,Iris-virginica +5.8,2.8,5.1,2.4,Iris-virginica +6.4,3.2,5.3,2.3,Iris-virginica +6.5,3.0,5.5,1.8,Iris-virginica +7.7,3.8,6.7,2.2,Iris-virginica +7.7,2.6,6.9,2.3,Iris-virginica +6.0,2.2,5.0,1.5,Iris-virginica +6.9,3.2,5.7,2.3,Iris-virginica +5.6,2.8,4.9,2.0,Iris-virginica +7.7,2.8,6.7,2.0,Iris-virginica +6.3,2.7,4.9,1.8,Iris-virginica +6.7,3.3,5.7,2.1,Iris-virginica +7.2,3.2,6.0,1.8,Iris-virginica +6.2,2.8,4.8,1.8,Iris-virginica +6.1,3.0,4.9,1.8,Iris-virginica +6.4,2.8,5.6,2.1,Iris-virginica +7.2,3.0,5.8,1.6,Iris-virginica +7.4,2.8,6.1,1.9,Iris-virginica +7.9,3.8,6.4,2.0,Iris-virginica +6.4,2.8,5.6,2.2,Iris-virginica +6.3,2.8,5.1,1.5,Iris-virginica +6.1,2.6,5.6,1.4,Iris-virginica +7.7,3.0,6.1,2.3,Iris-virginica +6.3,3.4,5.6,2.4,Iris-virginica +6.4,3.1,5.5,1.8,Iris-virginica +6.0,3.0,4.8,1.8,Iris-virginica +6.9,3.1,5.4,2.1,Iris-virginica +6.7,3.1,5.6,2.4,Iris-virginica +6.9,3.1,5.1,2.3,Iris-virginica +5.8,2.7,5.1,1.9,Iris-virginica +6.8,3.2,5.9,2.3,Iris-virginica +6.7,3.3,5.7,2.5,Iris-virginica +6.7,3.0,5.2,2.3,Iris-virginica +6.3,2.5,5.0,1.9,Iris-virginica +6.5,3.0,5.2,2.0,Iris-virginica +6.2,3.4,5.4,2.3,Iris-virginica +5.9,3.0,5.1,1.8,Iris-virginica diff --git a/vendor/itertools/examples/iris.rs b/vendor/itertools/examples/iris.rs new file mode 100644 index 00000000000000..63f9c48326041d --- /dev/null +++ b/vendor/itertools/examples/iris.rs @@ -0,0 +1,140 @@ +/// +/// This example parses, sorts and groups the iris dataset +/// and does some simple manipulations. +/// +/// Iterators and itertools functionality are used throughout. +use itertools::Itertools; +use std::collections::HashMap; +use std::iter::repeat; +use std::num::ParseFloatError; +use std::str::FromStr; + +static DATA: &str = include_str!("iris.data"); + +#[derive(Clone, Debug)] +struct Iris { + name: String, + data: [f32; 4], +} + +#[allow(dead_code)] // fields are currently ignored +#[derive(Clone, Debug)] +enum ParseError { + Numeric(ParseFloatError), + Other(&'static str), +} + +impl From for ParseError { + fn from(err: ParseFloatError) -> Self { + Self::Numeric(err) + } +} + +/// Parse an Iris from a comma-separated line +impl FromStr for Iris { + type Err = ParseError; + + fn from_str(s: &str) -> Result { + let mut iris = Self { + name: "".into(), + data: [0.; 4], + }; + let mut parts = s.split(',').map(str::trim); + + // using Iterator::by_ref() + for (index, part) in parts.by_ref().take(4).enumerate() { + iris.data[index] = part.parse::()?; + } + if let Some(name) = parts.next() { + iris.name = name.into(); + } else { + return Err(ParseError::Other("Missing name")); + } + Ok(iris) + } +} + +fn main() { + // using Itertools::fold_results to create the result of parsing + let irises = DATA + .lines() + .map(str::parse) + .fold_ok(Vec::new(), |mut v, iris: Iris| { + v.push(iris); + v + }); + let mut irises = match irises { + Err(e) => { + println!("Error parsing: {:?}", e); + std::process::exit(1); + } + Ok(data) => data, + }; + + // Sort them and group them + irises.sort_by(|a, b| Ord::cmp(&a.name, &b.name)); + + // using Iterator::cycle() + let mut plot_symbols = "+ox".chars().cycle(); + let mut symbolmap = HashMap::new(); + + // using Itertools::chunk_by + for (species, species_chunk) in &irises.iter().chunk_by(|iris| &iris.name) { + // assign a plot symbol + symbolmap + .entry(species) + .or_insert_with(|| plot_symbols.next().unwrap()); + println!("{} (symbol={})", species, symbolmap[species]); + + for iris in species_chunk { + // using Itertools::format for lazy formatting + println!("{:>3.1}", iris.data.iter().format(", ")); + } + } + + // Look at all combinations of the four columns + // + // See https://en.wikipedia.org/wiki/Iris_flower_data_set + // + let n = 30; // plot size + let mut plot = vec![' '; n * n]; + + // using Itertools::tuple_combinations + for (a, b) in (0..4).tuple_combinations() { + println!("Column {} vs {}:", a, b); + + // Clear plot + // + // using std::iter::repeat; + // using Itertools::set_from + plot.iter_mut().set_from(repeat(' ')); + + // using Itertools::minmax + let min_max = |data: &[Iris], col| { + data.iter() + .map(|iris| iris.data[col]) + .minmax() + .into_option() + .expect("Can't find min/max of empty iterator") + }; + let (min_x, max_x) = min_max(&irises, a); + let (min_y, max_y) = min_max(&irises, b); + + // Plot the data points + let round_to_grid = |x, min, max| ((x - min) / (max - min) * ((n - 1) as f32)) as usize; + let flip = |ix| n - 1 - ix; // reverse axis direction + + for iris in &irises { + let ix = round_to_grid(iris.data[a], min_x, max_x); + let iy = flip(round_to_grid(iris.data[b], min_y, max_y)); + plot[n * iy + ix] = symbolmap[&iris.name]; + } + + // render plot + // + // using Itertools::join + for line in plot.chunks(n) { + println!("{}", line.iter().join(" ")) + } + } +} diff --git a/vendor/itertools/src/adaptors/coalesce.rs b/vendor/itertools/src/adaptors/coalesce.rs new file mode 100644 index 00000000000000..ab1ab5255dd62e --- /dev/null +++ b/vendor/itertools/src/adaptors/coalesce.rs @@ -0,0 +1,286 @@ +use std::fmt; +use std::iter::FusedIterator; + +use crate::size_hint; + +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct CoalesceBy +where + I: Iterator, + C: CountItem, +{ + iter: I, + /// `last` is `None` while no item have been taken out of `iter` (at definition). + /// Then `last` will be `Some(Some(item))` until `iter` is exhausted, + /// in which case `last` will be `Some(None)`. + last: Option>, + f: F, +} + +impl Clone for CoalesceBy +where + I: Clone + Iterator, + F: Clone, + C: CountItem, + C::CItem: Clone, +{ + clone_fields!(last, iter, f); +} + +impl fmt::Debug for CoalesceBy +where + I: Iterator + fmt::Debug, + C: CountItem, + C::CItem: fmt::Debug, +{ + debug_fmt_fields!(CoalesceBy, iter, last); +} + +pub trait CoalescePredicate { + fn coalesce_pair(&mut self, t: T, item: Item) -> Result; +} + +impl Iterator for CoalesceBy +where + I: Iterator, + F: CoalescePredicate, + C: CountItem, +{ + type Item = C::CItem; + + fn next(&mut self) -> Option { + let Self { iter, last, f } = self; + // this fuses the iterator + let init = match last { + Some(elt) => elt.take(), + None => { + *last = Some(None); + iter.next().map(C::new) + } + }?; + + Some( + iter.try_fold(init, |accum, next| match f.coalesce_pair(accum, next) { + Ok(joined) => Ok(joined), + Err((last_, next_)) => { + *last = Some(Some(next_)); + Err(last_) + } + }) + .unwrap_or_else(|x| x), + ) + } + + fn size_hint(&self) -> (usize, Option) { + let (low, hi) = size_hint::add_scalar( + self.iter.size_hint(), + matches!(self.last, Some(Some(_))) as usize, + ); + ((low > 0) as usize, hi) + } + + fn fold(self, acc: Acc, mut fn_acc: FnAcc) -> Acc + where + FnAcc: FnMut(Acc, Self::Item) -> Acc, + { + let Self { + mut iter, + last, + mut f, + } = self; + if let Some(last) = last.unwrap_or_else(|| iter.next().map(C::new)) { + let (last, acc) = iter.fold((last, acc), |(last, acc), elt| { + match f.coalesce_pair(last, elt) { + Ok(joined) => (joined, acc), + Err((last_, next_)) => (next_, fn_acc(acc, last_)), + } + }); + fn_acc(acc, last) + } else { + acc + } + } +} + +impl FusedIterator for CoalesceBy +where + I: Iterator, + F: CoalescePredicate, + C: CountItem, +{ +} + +pub struct NoCount; + +pub struct WithCount; + +pub trait CountItem { + type CItem; + fn new(t: T) -> Self::CItem; +} + +impl CountItem for NoCount { + type CItem = T; + #[inline(always)] + fn new(t: T) -> T { + t + } +} + +impl CountItem for WithCount { + type CItem = (usize, T); + #[inline(always)] + fn new(t: T) -> (usize, T) { + (1, t) + } +} + +/// An iterator adaptor that may join together adjacent elements. +/// +/// See [`.coalesce()`](crate::Itertools::coalesce) for more information. +pub type Coalesce = CoalesceBy; + +impl CoalescePredicate for F +where + F: FnMut(T, Item) -> Result, +{ + fn coalesce_pair(&mut self, t: T, item: Item) -> Result { + self(t, item) + } +} + +/// Create a new `Coalesce`. +pub fn coalesce(iter: I, f: F) -> Coalesce +where + I: Iterator, +{ + Coalesce { + last: None, + iter, + f, + } +} + +/// An iterator adaptor that removes repeated duplicates, determining equality using a comparison function. +/// +/// See [`.dedup_by()`](crate::Itertools::dedup_by) or [`.dedup()`](crate::Itertools::dedup) for more information. +pub type DedupBy = CoalesceBy, NoCount>; + +#[derive(Clone)] +pub struct DedupPred2CoalescePred(DP); + +impl fmt::Debug for DedupPred2CoalescePred { + debug_fmt_fields!(DedupPred2CoalescePred,); +} + +pub trait DedupPredicate { + // TODO replace by Fn(&T, &T)->bool once Rust supports it + fn dedup_pair(&mut self, a: &T, b: &T) -> bool; +} + +impl CoalescePredicate for DedupPred2CoalescePred +where + DP: DedupPredicate, +{ + fn coalesce_pair(&mut self, t: T, item: T) -> Result { + if self.0.dedup_pair(&t, &item) { + Ok(t) + } else { + Err((t, item)) + } + } +} + +#[derive(Clone, Debug)] +pub struct DedupEq; + +impl DedupPredicate for DedupEq { + fn dedup_pair(&mut self, a: &T, b: &T) -> bool { + a == b + } +} + +impl bool> DedupPredicate for F { + fn dedup_pair(&mut self, a: &T, b: &T) -> bool { + self(a, b) + } +} + +/// Create a new `DedupBy`. +pub fn dedup_by(iter: I, dedup_pred: Pred) -> DedupBy +where + I: Iterator, +{ + DedupBy { + last: None, + iter, + f: DedupPred2CoalescePred(dedup_pred), + } +} + +/// An iterator adaptor that removes repeated duplicates. +/// +/// See [`.dedup()`](crate::Itertools::dedup) for more information. +pub type Dedup = DedupBy; + +/// Create a new `Dedup`. +pub fn dedup(iter: I) -> Dedup +where + I: Iterator, +{ + dedup_by(iter, DedupEq) +} + +/// An iterator adaptor that removes repeated duplicates, while keeping a count of how many +/// repeated elements were present. This will determine equality using a comparison function. +/// +/// See [`.dedup_by_with_count()`](crate::Itertools::dedup_by_with_count) or +/// [`.dedup_with_count()`](crate::Itertools::dedup_with_count) for more information. +pub type DedupByWithCount = + CoalesceBy, WithCount>; + +#[derive(Clone, Debug)] +pub struct DedupPredWithCount2CoalescePred(DP); + +impl CoalescePredicate for DedupPredWithCount2CoalescePred +where + DP: DedupPredicate, +{ + fn coalesce_pair( + &mut self, + (c, t): (usize, T), + item: T, + ) -> Result<(usize, T), ((usize, T), (usize, T))> { + if self.0.dedup_pair(&t, &item) { + Ok((c + 1, t)) + } else { + Err(((c, t), (1, item))) + } + } +} + +/// An iterator adaptor that removes repeated duplicates, while keeping a count of how many +/// repeated elements were present. +/// +/// See [`.dedup_with_count()`](crate::Itertools::dedup_with_count) for more information. +pub type DedupWithCount = DedupByWithCount; + +/// Create a new `DedupByWithCount`. +pub fn dedup_by_with_count(iter: I, dedup_pred: Pred) -> DedupByWithCount +where + I: Iterator, +{ + DedupByWithCount { + last: None, + iter, + f: DedupPredWithCount2CoalescePred(dedup_pred), + } +} + +/// Create a new `DedupWithCount`. +pub fn dedup_with_count(iter: I) -> DedupWithCount +where + I: Iterator, +{ + dedup_by_with_count(iter, DedupEq) +} diff --git a/vendor/itertools/src/adaptors/map.rs b/vendor/itertools/src/adaptors/map.rs new file mode 100644 index 00000000000000..c78b9be698035e --- /dev/null +++ b/vendor/itertools/src/adaptors/map.rs @@ -0,0 +1,130 @@ +use std::iter::FromIterator; +use std::marker::PhantomData; + +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct MapSpecialCase { + pub(crate) iter: I, + pub(crate) f: F, +} + +pub trait MapSpecialCaseFn { + type Out; + fn call(&mut self, t: T) -> Self::Out; +} + +impl Iterator for MapSpecialCase +where + I: Iterator, + R: MapSpecialCaseFn, +{ + type Item = R::Out; + + fn next(&mut self) -> Option { + self.iter.next().map(|i| self.f.call(i)) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn fold(self, init: Acc, mut fold_f: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter.fold(init, move |acc, v| fold_f(acc, f.call(v))) + } + + fn collect(self) -> C + where + C: FromIterator, + { + let mut f = self.f; + self.iter.map(move |v| f.call(v)).collect() + } +} + +impl DoubleEndedIterator for MapSpecialCase +where + I: DoubleEndedIterator, + R: MapSpecialCaseFn, +{ + fn next_back(&mut self) -> Option { + self.iter.next_back().map(|i| self.f.call(i)) + } +} + +impl ExactSizeIterator for MapSpecialCase +where + I: ExactSizeIterator, + R: MapSpecialCaseFn, +{ +} + +/// An iterator adapter to apply a transformation within a nested `Result::Ok`. +/// +/// See [`.map_ok()`](crate::Itertools::map_ok) for more information. +pub type MapOk = MapSpecialCase>; + +impl MapSpecialCaseFn> for MapSpecialCaseFnOk +where + F: FnMut(T) -> U, +{ + type Out = Result; + fn call(&mut self, t: Result) -> Self::Out { + t.map(|v| self.0(v)) + } +} + +#[derive(Clone)] +pub struct MapSpecialCaseFnOk(F); + +impl std::fmt::Debug for MapSpecialCaseFnOk { + debug_fmt_fields!(MapSpecialCaseFnOk,); +} + +/// Create a new `MapOk` iterator. +pub fn map_ok(iter: I, f: F) -> MapOk +where + I: Iterator>, + F: FnMut(T) -> U, +{ + MapSpecialCase { + iter, + f: MapSpecialCaseFnOk(f), + } +} + +/// An iterator adapter to apply `Into` conversion to each element. +/// +/// See [`.map_into()`](crate::Itertools::map_into) for more information. +pub type MapInto = MapSpecialCase>; + +impl, U> MapSpecialCaseFn for MapSpecialCaseFnInto { + type Out = U; + fn call(&mut self, t: T) -> Self::Out { + t.into() + } +} + +pub struct MapSpecialCaseFnInto(PhantomData); + +impl std::fmt::Debug for MapSpecialCaseFnInto { + debug_fmt_fields!(MapSpecialCaseFnInto, 0); +} + +impl Clone for MapSpecialCaseFnInto { + #[inline] + fn clone(&self) -> Self { + Self(PhantomData) + } +} + +/// Create a new [`MapInto`] iterator. +pub fn map_into(iter: I) -> MapInto { + MapSpecialCase { + iter, + f: MapSpecialCaseFnInto(PhantomData), + } +} diff --git a/vendor/itertools/src/adaptors/mod.rs b/vendor/itertools/src/adaptors/mod.rs new file mode 100644 index 00000000000000..52e36c48be4c32 --- /dev/null +++ b/vendor/itertools/src/adaptors/mod.rs @@ -0,0 +1,1208 @@ +//! Licensed under the Apache License, Version 2.0 +//! or the MIT license +//! , at your +//! option. This file may not be copied, modified, or distributed +//! except according to those terms. + +mod coalesce; +pub(crate) mod map; +mod multi_product; +pub use self::coalesce::*; +pub use self::map::{map_into, map_ok, MapInto, MapOk}; +#[cfg(feature = "use_alloc")] +pub use self::multi_product::*; + +use crate::size_hint::{self, SizeHint}; +use std::fmt; +use std::iter::{Enumerate, FromIterator, Fuse, FusedIterator}; +use std::marker::PhantomData; + +/// An iterator adaptor that alternates elements from two iterators until both +/// run out. +/// +/// This iterator is *fused*. +/// +/// See [`.interleave()`](crate::Itertools::interleave) for more information. +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct Interleave { + i: Fuse, + j: Fuse, + next_coming_from_j: bool, +} + +/// Create an iterator that interleaves elements in `i` and `j`. +/// +/// [`IntoIterator`] enabled version of [`Itertools::interleave`](crate::Itertools::interleave). +pub fn interleave( + i: I, + j: J, +) -> Interleave<::IntoIter, ::IntoIter> +where + I: IntoIterator, + J: IntoIterator, +{ + Interleave { + i: i.into_iter().fuse(), + j: j.into_iter().fuse(), + next_coming_from_j: false, + } +} + +impl Iterator for Interleave +where + I: Iterator, + J: Iterator, +{ + type Item = I::Item; + #[inline] + fn next(&mut self) -> Option { + self.next_coming_from_j = !self.next_coming_from_j; + if self.next_coming_from_j { + match self.i.next() { + None => self.j.next(), + r => r, + } + } else { + match self.j.next() { + None => self.i.next(), + r => r, + } + } + } + + fn size_hint(&self) -> (usize, Option) { + size_hint::add(self.i.size_hint(), self.j.size_hint()) + } + + fn fold(self, mut init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + let Self { + mut i, + mut j, + next_coming_from_j, + } = self; + if next_coming_from_j { + match j.next() { + Some(y) => init = f(init, y), + None => return i.fold(init, f), + } + } + let res = i.try_fold(init, |mut acc, x| { + acc = f(acc, x); + match j.next() { + Some(y) => Ok(f(acc, y)), + None => Err(acc), + } + }); + match res { + Ok(acc) => j.fold(acc, f), + Err(acc) => i.fold(acc, f), + } + } +} + +impl FusedIterator for Interleave +where + I: Iterator, + J: Iterator, +{ +} + +/// An iterator adaptor that alternates elements from the two iterators until +/// one of them runs out. +/// +/// This iterator is *fused*. +/// +/// See [`.interleave_shortest()`](crate::Itertools::interleave_shortest) +/// for more information. +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct InterleaveShortest +where + I: Iterator, + J: Iterator, +{ + i: I, + j: J, + next_coming_from_j: bool, +} + +/// Create a new `InterleaveShortest` iterator. +pub fn interleave_shortest(i: I, j: J) -> InterleaveShortest +where + I: Iterator, + J: Iterator, +{ + InterleaveShortest { + i, + j, + next_coming_from_j: false, + } +} + +impl Iterator for InterleaveShortest +where + I: Iterator, + J: Iterator, +{ + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + let e = if self.next_coming_from_j { + self.j.next() + } else { + self.i.next() + }; + if e.is_some() { + self.next_coming_from_j = !self.next_coming_from_j; + } + e + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (curr_hint, next_hint) = { + let i_hint = self.i.size_hint(); + let j_hint = self.j.size_hint(); + if self.next_coming_from_j { + (j_hint, i_hint) + } else { + (i_hint, j_hint) + } + }; + let (curr_lower, curr_upper) = curr_hint; + let (next_lower, next_upper) = next_hint; + let (combined_lower, combined_upper) = + size_hint::mul_scalar(size_hint::min(curr_hint, next_hint), 2); + let lower = if curr_lower > next_lower { + combined_lower + 1 + } else { + combined_lower + }; + let upper = { + let extra_elem = match (curr_upper, next_upper) { + (_, None) => false, + (None, Some(_)) => true, + (Some(curr_max), Some(next_max)) => curr_max > next_max, + }; + if extra_elem { + combined_upper.and_then(|x| x.checked_add(1)) + } else { + combined_upper + } + }; + (lower, upper) + } + + fn fold(self, mut init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + let Self { + mut i, + mut j, + next_coming_from_j, + } = self; + if next_coming_from_j { + match j.next() { + Some(y) => init = f(init, y), + None => return init, + } + } + let res = i.try_fold(init, |mut acc, x| { + acc = f(acc, x); + match j.next() { + Some(y) => Ok(f(acc, y)), + None => Err(acc), + } + }); + match res { + Ok(val) => val, + Err(val) => val, + } + } +} + +impl FusedIterator for InterleaveShortest +where + I: FusedIterator, + J: FusedIterator, +{ +} + +#[derive(Clone, Debug)] +/// An iterator adaptor that allows putting back a single +/// item to the front of the iterator. +/// +/// Iterator element type is `I::Item`. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct PutBack +where + I: Iterator, +{ + top: Option, + iter: I, +} + +/// Create an iterator where you can put back a single item +pub fn put_back(iterable: I) -> PutBack +where + I: IntoIterator, +{ + PutBack { + top: None, + iter: iterable.into_iter(), + } +} + +impl PutBack +where + I: Iterator, +{ + /// put back value `value` (builder method) + pub fn with_value(mut self, value: I::Item) -> Self { + self.put_back(value); + self + } + + /// Split the `PutBack` into its parts. + #[inline] + pub fn into_parts(self) -> (Option, I) { + let Self { top, iter } = self; + (top, iter) + } + + /// Put back a single value to the front of the iterator. + /// + /// If a value is already in the put back slot, it is returned. + #[inline] + pub fn put_back(&mut self, x: I::Item) -> Option { + self.top.replace(x) + } +} + +impl Iterator for PutBack +where + I: Iterator, +{ + type Item = I::Item; + #[inline] + fn next(&mut self) -> Option { + match self.top { + None => self.iter.next(), + ref mut some => some.take(), + } + } + #[inline] + fn size_hint(&self) -> (usize, Option) { + // Not ExactSizeIterator because size may be larger than usize + size_hint::add_scalar(self.iter.size_hint(), self.top.is_some() as usize) + } + + fn count(self) -> usize { + self.iter.count() + (self.top.is_some() as usize) + } + + fn last(self) -> Option { + self.iter.last().or(self.top) + } + + fn nth(&mut self, n: usize) -> Option { + match self.top { + None => self.iter.nth(n), + ref mut some => { + if n == 0 { + some.take() + } else { + *some = None; + self.iter.nth(n - 1) + } + } + } + } + + fn all(&mut self, mut f: G) -> bool + where + G: FnMut(Self::Item) -> bool, + { + if let Some(elt) = self.top.take() { + if !f(elt) { + return false; + } + } + self.iter.all(f) + } + + fn fold(mut self, init: Acc, mut f: G) -> Acc + where + G: FnMut(Acc, Self::Item) -> Acc, + { + let mut accum = init; + if let Some(elt) = self.top.take() { + accum = f(accum, elt); + } + self.iter.fold(accum, f) + } +} + +#[derive(Debug, Clone)] +/// An iterator adaptor that iterates over the cartesian product of +/// the element sets of two iterators `I` and `J`. +/// +/// Iterator element type is `(I::Item, J::Item)`. +/// +/// See [`.cartesian_product()`](crate::Itertools::cartesian_product) for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct Product +where + I: Iterator, +{ + a: I, + /// `a_cur` is `None` while no item have been taken out of `a` (at definition). + /// Then `a_cur` will be `Some(Some(item))` until `a` is exhausted, + /// in which case `a_cur` will be `Some(None)`. + a_cur: Option>, + b: J, + b_orig: J, +} + +/// Create a new cartesian product iterator +/// +/// Iterator element type is `(I::Item, J::Item)`. +pub fn cartesian_product(i: I, j: J) -> Product +where + I: Iterator, + J: Clone + Iterator, + I::Item: Clone, +{ + Product { + a_cur: None, + a: i, + b: j.clone(), + b_orig: j, + } +} + +impl Iterator for Product +where + I: Iterator, + J: Clone + Iterator, + I::Item: Clone, +{ + type Item = (I::Item, J::Item); + + fn next(&mut self) -> Option { + let Self { + a, + a_cur, + b, + b_orig, + } = self; + let elt_b = match b.next() { + None => { + *b = b_orig.clone(); + match b.next() { + None => return None, + Some(x) => { + *a_cur = Some(a.next()); + x + } + } + } + Some(x) => x, + }; + a_cur + .get_or_insert_with(|| a.next()) + .as_ref() + .map(|a| (a.clone(), elt_b)) + } + + fn size_hint(&self) -> (usize, Option) { + // Not ExactSizeIterator because size may be larger than usize + // Compute a * b_orig + b for both lower and upper bound + let mut sh = size_hint::mul(self.a.size_hint(), self.b_orig.size_hint()); + if matches!(self.a_cur, Some(Some(_))) { + sh = size_hint::add(sh, self.b.size_hint()); + } + sh + } + + fn fold(self, mut accum: Acc, mut f: G) -> Acc + where + G: FnMut(Acc, Self::Item) -> Acc, + { + // use a split loop to handle the loose a_cur as well as avoiding to + // clone b_orig at the end. + let Self { + mut a, + a_cur, + mut b, + b_orig, + } = self; + if let Some(mut elt_a) = a_cur.unwrap_or_else(|| a.next()) { + loop { + accum = b.fold(accum, |acc, elt| f(acc, (elt_a.clone(), elt))); + + // we can only continue iterating a if we had a first element; + if let Some(next_elt_a) = a.next() { + b = b_orig.clone(); + elt_a = next_elt_a; + } else { + break; + } + } + } + accum + } +} + +impl FusedIterator for Product +where + I: FusedIterator, + J: Clone + FusedIterator, + I::Item: Clone, +{ +} + +/// A “meta iterator adaptor”. Its closure receives a reference to the iterator +/// and may pick off as many elements as it likes, to produce the next iterator element. +/// +/// Iterator element type is `X` if the return type of `F` is `Option`. +/// +/// See [`.batching()`](crate::Itertools::batching) for more information. +#[derive(Clone)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct Batching { + f: F, + iter: I, +} + +impl fmt::Debug for Batching +where + I: fmt::Debug, +{ + debug_fmt_fields!(Batching, iter); +} + +/// Create a new Batching iterator. +pub fn batching(iter: I, f: F) -> Batching { + Batching { f, iter } +} + +impl Iterator for Batching +where + I: Iterator, + F: FnMut(&mut I) -> Option, +{ + type Item = B; + #[inline] + fn next(&mut self) -> Option { + (self.f)(&mut self.iter) + } +} + +/// An iterator adaptor that borrows from a `Clone`-able iterator +/// to only pick off elements while the predicate returns `true`. +/// +/// See [`.take_while_ref()`](crate::Itertools::take_while_ref) for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct TakeWhileRef<'a, I: 'a, F> { + iter: &'a mut I, + f: F, +} + +impl<'a, I, F> fmt::Debug for TakeWhileRef<'a, I, F> +where + I: Iterator + fmt::Debug, +{ + debug_fmt_fields!(TakeWhileRef, iter); +} + +/// Create a new `TakeWhileRef` from a reference to clonable iterator. +pub fn take_while_ref(iter: &mut I, f: F) -> TakeWhileRef +where + I: Iterator + Clone, +{ + TakeWhileRef { iter, f } +} + +impl<'a, I, F> Iterator for TakeWhileRef<'a, I, F> +where + I: Iterator + Clone, + F: FnMut(&I::Item) -> bool, +{ + type Item = I::Item; + + fn next(&mut self) -> Option { + let old = self.iter.clone(); + match self.iter.next() { + None => None, + Some(elt) => { + if (self.f)(&elt) { + Some(elt) + } else { + *self.iter = old; + None + } + } + } + } + + fn size_hint(&self) -> (usize, Option) { + (0, self.iter.size_hint().1) + } +} + +/// An iterator adaptor that filters `Option` iterator elements +/// and produces `A`. Stops on the first `None` encountered. +/// +/// See [`.while_some()`](crate::Itertools::while_some) for more information. +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct WhileSome { + iter: I, +} + +/// Create a new `WhileSome`. +pub fn while_some(iter: I) -> WhileSome { + WhileSome { iter } +} + +impl Iterator for WhileSome +where + I: Iterator>, +{ + type Item = A; + + fn next(&mut self) -> Option { + match self.iter.next() { + None | Some(None) => None, + Some(elt) => elt, + } + } + + fn size_hint(&self) -> (usize, Option) { + (0, self.iter.size_hint().1) + } + + fn fold(mut self, acc: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + let res = self.iter.try_fold(acc, |acc, item| match item { + Some(item) => Ok(f(acc, item)), + None => Err(acc), + }); + + match res { + Ok(val) => val, + Err(val) => val, + } + } +} + +/// An iterator to iterate through all combinations in a `Clone`-able iterator that produces tuples +/// of a specific size. +/// +/// See [`.tuple_combinations()`](crate::Itertools::tuple_combinations) for more +/// information. +#[derive(Clone, Debug)] +#[must_use = "this iterator adaptor is not lazy but does nearly nothing unless consumed"] +pub struct TupleCombinations +where + I: Iterator, + T: HasCombination, +{ + iter: T::Combination, + _mi: PhantomData, +} + +pub trait HasCombination: Sized { + type Combination: From + Iterator; +} + +/// Create a new `TupleCombinations` from a clonable iterator. +pub fn tuple_combinations(iter: I) -> TupleCombinations +where + I: Iterator + Clone, + I::Item: Clone, + T: HasCombination, +{ + TupleCombinations { + iter: T::Combination::from(iter), + _mi: PhantomData, + } +} + +impl Iterator for TupleCombinations +where + I: Iterator, + T: HasCombination, +{ + type Item = T; + + fn next(&mut self) -> Option { + self.iter.next() + } + + fn size_hint(&self) -> SizeHint { + self.iter.size_hint() + } + + fn count(self) -> usize { + self.iter.count() + } + + fn fold(self, init: B, f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, f) + } +} + +impl FusedIterator for TupleCombinations +where + I: FusedIterator, + T: HasCombination, +{ +} + +#[derive(Clone, Debug)] +pub struct Tuple1Combination { + iter: I, +} + +impl From for Tuple1Combination { + fn from(iter: I) -> Self { + Self { iter } + } +} + +impl Iterator for Tuple1Combination { + type Item = (I::Item,); + + fn next(&mut self) -> Option { + self.iter.next().map(|x| (x,)) + } + + fn size_hint(&self) -> SizeHint { + self.iter.size_hint() + } + + fn count(self) -> usize { + self.iter.count() + } + + fn fold(self, init: B, f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + self.iter.map(|x| (x,)).fold(init, f) + } +} + +impl HasCombination for (I::Item,) { + type Combination = Tuple1Combination; +} + +macro_rules! impl_tuple_combination { + ($C:ident $P:ident ; $($X:ident)*) => ( + #[derive(Clone, Debug)] + pub struct $C { + item: Option, + iter: I, + c: $P, + } + + impl From for $C { + fn from(mut iter: I) -> Self { + Self { + item: iter.next(), + iter: iter.clone(), + c: iter.into(), + } + } + } + + impl From for $C> { + fn from(iter: I) -> Self { + Self::from(iter.fuse()) + } + } + + impl Iterator for $C + where I: Iterator + Clone, + A: Clone, + { + type Item = (A, $(ignore_ident!($X, A)),*); + + fn next(&mut self) -> Option { + if let Some(($($X,)*)) = self.c.next() { + let z = self.item.clone().unwrap(); + Some((z, $($X),*)) + } else { + self.item = self.iter.next(); + self.item.clone().and_then(|z| { + self.c = self.iter.clone().into(); + self.c.next().map(|($($X,)*)| (z, $($X),*)) + }) + } + } + + fn size_hint(&self) -> SizeHint { + const K: usize = 1 + count_ident!($($X)*); + let (mut n_min, mut n_max) = self.iter.size_hint(); + n_min = checked_binomial(n_min, K).unwrap_or(usize::MAX); + n_max = n_max.and_then(|n| checked_binomial(n, K)); + size_hint::add(self.c.size_hint(), (n_min, n_max)) + } + + fn count(self) -> usize { + const K: usize = 1 + count_ident!($($X)*); + let n = self.iter.count(); + checked_binomial(n, K).unwrap() + self.c.count() + } + + fn fold(self, mut init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + let Self { c, item, mut iter } = self; + if let Some(z) = item.as_ref() { + init = c + .map(|($($X,)*)| (z.clone(), $($X),*)) + .fold(init, &mut f); + } + while let Some(z) = iter.next() { + let c: $P = iter.clone().into(); + init = c + .map(|($($X,)*)| (z.clone(), $($X),*)) + .fold(init, &mut f); + } + init + } + } + + impl HasCombination for (A, $(ignore_ident!($X, A)),*) + where I: Iterator + Clone, + I::Item: Clone + { + type Combination = $C>; + } + ) +} + +// This snippet generates the twelve `impl_tuple_combination!` invocations: +// use core::iter; +// use itertools::Itertools; +// +// for i in 2..=12 { +// println!("impl_tuple_combination!(Tuple{arity}Combination Tuple{prev}Combination; {idents});", +// arity = i, +// prev = i - 1, +// idents = ('a'..'z').take(i - 1).join(" "), +// ); +// } +// It could probably be replaced by a bit more macro cleverness. +impl_tuple_combination!(Tuple2Combination Tuple1Combination; a); +impl_tuple_combination!(Tuple3Combination Tuple2Combination; a b); +impl_tuple_combination!(Tuple4Combination Tuple3Combination; a b c); +impl_tuple_combination!(Tuple5Combination Tuple4Combination; a b c d); +impl_tuple_combination!(Tuple6Combination Tuple5Combination; a b c d e); +impl_tuple_combination!(Tuple7Combination Tuple6Combination; a b c d e f); +impl_tuple_combination!(Tuple8Combination Tuple7Combination; a b c d e f g); +impl_tuple_combination!(Tuple9Combination Tuple8Combination; a b c d e f g h); +impl_tuple_combination!(Tuple10Combination Tuple9Combination; a b c d e f g h i); +impl_tuple_combination!(Tuple11Combination Tuple10Combination; a b c d e f g h i j); +impl_tuple_combination!(Tuple12Combination Tuple11Combination; a b c d e f g h i j k); + +// https://en.wikipedia.org/wiki/Binomial_coefficient#In_programming_languages +pub(crate) fn checked_binomial(mut n: usize, mut k: usize) -> Option { + if n < k { + return Some(0); + } + // `factorial(n) / factorial(n - k) / factorial(k)` but trying to avoid it overflows: + k = (n - k).min(k); // symmetry + let mut c = 1; + for i in 1..=k { + c = (c / i) + .checked_mul(n)? + .checked_add((c % i).checked_mul(n)? / i)?; + n -= 1; + } + Some(c) +} + +#[test] +fn test_checked_binomial() { + // With the first row: [1, 0, 0, ...] and the first column full of 1s, we check + // row by row the recurrence relation of binomials (which is an equivalent definition). + // For n >= 1 and k >= 1 we have: + // binomial(n, k) == binomial(n - 1, k - 1) + binomial(n - 1, k) + const LIMIT: usize = 500; + let mut row = vec![Some(0); LIMIT + 1]; + row[0] = Some(1); + for n in 0..=LIMIT { + for k in 0..=LIMIT { + assert_eq!(row[k], checked_binomial(n, k)); + } + row = std::iter::once(Some(1)) + .chain((1..=LIMIT).map(|k| row[k - 1]?.checked_add(row[k]?))) + .collect(); + } +} + +/// An iterator adapter to filter values within a nested `Result::Ok`. +/// +/// See [`.filter_ok()`](crate::Itertools::filter_ok) for more information. +#[derive(Clone)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct FilterOk { + iter: I, + f: F, +} + +impl fmt::Debug for FilterOk +where + I: fmt::Debug, +{ + debug_fmt_fields!(FilterOk, iter); +} + +/// Create a new `FilterOk` iterator. +pub fn filter_ok(iter: I, f: F) -> FilterOk +where + I: Iterator>, + F: FnMut(&T) -> bool, +{ + FilterOk { iter, f } +} + +impl Iterator for FilterOk +where + I: Iterator>, + F: FnMut(&T) -> bool, +{ + type Item = Result; + + fn next(&mut self) -> Option { + let f = &mut self.f; + self.iter.find(|res| match res { + Ok(t) => f(t), + _ => true, + }) + } + + fn size_hint(&self) -> (usize, Option) { + (0, self.iter.size_hint().1) + } + + fn fold(self, init: Acc, fold_f: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter + .filter(|v| v.as_ref().map(&mut f).unwrap_or(true)) + .fold(init, fold_f) + } + + fn collect(self) -> C + where + C: FromIterator, + { + let mut f = self.f; + self.iter + .filter(|v| v.as_ref().map(&mut f).unwrap_or(true)) + .collect() + } +} + +impl FusedIterator for FilterOk +where + I: FusedIterator>, + F: FnMut(&T) -> bool, +{ +} + +/// An iterator adapter to filter and apply a transformation on values within a nested `Result::Ok`. +/// +/// See [`.filter_map_ok()`](crate::Itertools::filter_map_ok) for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[derive(Clone)] +pub struct FilterMapOk { + iter: I, + f: F, +} + +impl fmt::Debug for FilterMapOk +where + I: fmt::Debug, +{ + debug_fmt_fields!(FilterMapOk, iter); +} + +fn transpose_result(result: Result, E>) -> Option> { + match result { + Ok(Some(v)) => Some(Ok(v)), + Ok(None) => None, + Err(e) => Some(Err(e)), + } +} + +/// Create a new `FilterOk` iterator. +pub fn filter_map_ok(iter: I, f: F) -> FilterMapOk +where + I: Iterator>, + F: FnMut(T) -> Option, +{ + FilterMapOk { iter, f } +} + +impl Iterator for FilterMapOk +where + I: Iterator>, + F: FnMut(T) -> Option, +{ + type Item = Result; + + fn next(&mut self) -> Option { + let f = &mut self.f; + self.iter.find_map(|res| match res { + Ok(t) => f(t).map(Ok), + Err(e) => Some(Err(e)), + }) + } + + fn size_hint(&self) -> (usize, Option) { + (0, self.iter.size_hint().1) + } + + fn fold(self, init: Acc, fold_f: Fold) -> Acc + where + Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter + .filter_map(|v| transpose_result(v.map(&mut f))) + .fold(init, fold_f) + } + + fn collect(self) -> C + where + C: FromIterator, + { + let mut f = self.f; + self.iter + .filter_map(|v| transpose_result(v.map(&mut f))) + .collect() + } +} + +impl FusedIterator for FilterMapOk +where + I: FusedIterator>, + F: FnMut(T) -> Option, +{ +} + +/// An iterator adapter to get the positions of each element that matches a predicate. +/// +/// See [`.positions()`](crate::Itertools::positions) for more information. +#[derive(Clone)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct Positions { + iter: Enumerate, + f: F, +} + +impl fmt::Debug for Positions +where + I: fmt::Debug, +{ + debug_fmt_fields!(Positions, iter); +} + +/// Create a new `Positions` iterator. +pub fn positions(iter: I, f: F) -> Positions +where + I: Iterator, + F: FnMut(I::Item) -> bool, +{ + let iter = iter.enumerate(); + Positions { iter, f } +} + +impl Iterator for Positions +where + I: Iterator, + F: FnMut(I::Item) -> bool, +{ + type Item = usize; + + fn next(&mut self) -> Option { + let f = &mut self.f; + // TODO: once MSRV >= 1.62, use `then_some`. + self.iter + .find_map(|(count, val)| if f(val) { Some(count) } else { None }) + } + + fn size_hint(&self) -> (usize, Option) { + (0, self.iter.size_hint().1) + } + + fn fold(self, init: B, mut func: G) -> B + where + G: FnMut(B, Self::Item) -> B, + { + let mut f = self.f; + self.iter.fold(init, |mut acc, (count, val)| { + if f(val) { + acc = func(acc, count); + } + acc + }) + } +} + +impl DoubleEndedIterator for Positions +where + I: DoubleEndedIterator + ExactSizeIterator, + F: FnMut(I::Item) -> bool, +{ + fn next_back(&mut self) -> Option { + let f = &mut self.f; + // TODO: once MSRV >= 1.62, use `then_some`. + self.iter + .by_ref() + .rev() + .find_map(|(count, val)| if f(val) { Some(count) } else { None }) + } + + fn rfold(self, init: B, mut func: G) -> B + where + G: FnMut(B, Self::Item) -> B, + { + let mut f = self.f; + self.iter.rfold(init, |mut acc, (count, val)| { + if f(val) { + acc = func(acc, count); + } + acc + }) + } +} + +impl FusedIterator for Positions +where + I: FusedIterator, + F: FnMut(I::Item) -> bool, +{ +} + +/// An iterator adapter to apply a mutating function to each element before yielding it. +/// +/// See [`.update()`](crate::Itertools::update) for more information. +#[derive(Clone)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct Update { + iter: I, + f: F, +} + +impl fmt::Debug for Update +where + I: fmt::Debug, +{ + debug_fmt_fields!(Update, iter); +} + +/// Create a new `Update` iterator. +pub fn update(iter: I, f: F) -> Update +where + I: Iterator, + F: FnMut(&mut I::Item), +{ + Update { iter, f } +} + +impl Iterator for Update +where + I: Iterator, + F: FnMut(&mut I::Item), +{ + type Item = I::Item; + + fn next(&mut self) -> Option { + if let Some(mut v) = self.iter.next() { + (self.f)(&mut v); + Some(v) + } else { + None + } + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn fold(self, init: Acc, mut g: G) -> Acc + where + G: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter.fold(init, move |acc, mut v| { + f(&mut v); + g(acc, v) + }) + } + + // if possible, re-use inner iterator specializations in collect + fn collect(self) -> C + where + C: FromIterator, + { + let mut f = self.f; + self.iter + .map(move |mut v| { + f(&mut v); + v + }) + .collect() + } +} + +impl ExactSizeIterator for Update +where + I: ExactSizeIterator, + F: FnMut(&mut I::Item), +{ +} + +impl DoubleEndedIterator for Update +where + I: DoubleEndedIterator, + F: FnMut(&mut I::Item), +{ + fn next_back(&mut self) -> Option { + if let Some(mut v) = self.iter.next_back() { + (self.f)(&mut v); + Some(v) + } else { + None + } + } +} + +impl FusedIterator for Update +where + I: FusedIterator, + F: FnMut(&mut I::Item), +{ +} diff --git a/vendor/itertools/src/adaptors/multi_product.rs b/vendor/itertools/src/adaptors/multi_product.rs new file mode 100644 index 00000000000000..314d4a46ef9272 --- /dev/null +++ b/vendor/itertools/src/adaptors/multi_product.rs @@ -0,0 +1,231 @@ +#![cfg(feature = "use_alloc")] +use Option::{self as State, None as ProductEnded, Some as ProductInProgress}; +use Option::{self as CurrentItems, None as NotYetPopulated, Some as Populated}; + +use alloc::vec::Vec; + +use crate::size_hint; + +#[derive(Clone)] +/// An iterator adaptor that iterates over the cartesian product of +/// multiple iterators of type `I`. +/// +/// An iterator element type is `Vec`. +/// +/// See [`.multi_cartesian_product()`](crate::Itertools::multi_cartesian_product) +/// for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct MultiProduct(State>) +where + I: Iterator + Clone, + I::Item: Clone; + +#[derive(Clone)] +/// Internals for `MultiProduct`. +struct MultiProductInner +where + I: Iterator + Clone, + I::Item: Clone, +{ + /// Holds the iterators. + iters: Vec>, + /// Not populated at the beginning then it holds the current item of each iterator. + cur: CurrentItems>, +} + +impl std::fmt::Debug for MultiProduct +where + I: Iterator + Clone + std::fmt::Debug, + I::Item: Clone + std::fmt::Debug, +{ + debug_fmt_fields!(MultiProduct, 0); +} + +impl std::fmt::Debug for MultiProductInner +where + I: Iterator + Clone + std::fmt::Debug, + I::Item: Clone + std::fmt::Debug, +{ + debug_fmt_fields!(MultiProductInner, iters, cur); +} + +/// Create a new cartesian product iterator over an arbitrary number +/// of iterators of the same type. +/// +/// Iterator element is of type `Vec`. +pub fn multi_cartesian_product(iters: H) -> MultiProduct<::IntoIter> +where + H: Iterator, + H::Item: IntoIterator, + ::IntoIter: Clone, + ::Item: Clone, +{ + let inner = MultiProductInner { + iters: iters + .map(|i| MultiProductIter::new(i.into_iter())) + .collect(), + cur: NotYetPopulated, + }; + MultiProduct(ProductInProgress(inner)) +} + +#[derive(Clone, Debug)] +/// Holds the state of a single iterator within a `MultiProduct`. +struct MultiProductIter +where + I: Iterator + Clone, + I::Item: Clone, +{ + iter: I, + iter_orig: I, +} + +impl MultiProductIter +where + I: Iterator + Clone, + I::Item: Clone, +{ + fn new(iter: I) -> Self { + Self { + iter: iter.clone(), + iter_orig: iter, + } + } +} + +impl Iterator for MultiProduct +where + I: Iterator + Clone, + I::Item: Clone, +{ + type Item = Vec; + + fn next(&mut self) -> Option { + // This fuses the iterator. + let inner = self.0.as_mut()?; + match &mut inner.cur { + Populated(values) => { + debug_assert!(!inner.iters.is_empty()); + // Find (from the right) a non-finished iterator and + // reset the finished ones encountered. + for (iter, item) in inner.iters.iter_mut().zip(values.iter_mut()).rev() { + if let Some(new) = iter.iter.next() { + *item = new; + return Some(values.clone()); + } else { + iter.iter = iter.iter_orig.clone(); + // `cur` is populated so the untouched `iter_orig` can not be empty. + *item = iter.iter.next().unwrap(); + } + } + self.0 = ProductEnded; + None + } + // Only the first time. + NotYetPopulated => { + let next: Option> = inner.iters.iter_mut().map(|i| i.iter.next()).collect(); + if next.is_none() || inner.iters.is_empty() { + // This cartesian product had at most one item to generate and now ends. + self.0 = ProductEnded; + } else { + inner.cur.clone_from(&next); + } + next + } + } + } + + fn count(self) -> usize { + match self.0 { + ProductEnded => 0, + // The iterator is fresh so the count is the product of the length of each iterator: + // - If one of them is empty, stop counting. + // - Less `count()` calls than the general case. + ProductInProgress(MultiProductInner { + iters, + cur: NotYetPopulated, + }) => iters + .into_iter() + .map(|iter| iter.iter_orig.count()) + .try_fold(1, |product, count| { + if count == 0 { + None + } else { + Some(product * count) + } + }) + .unwrap_or_default(), + // The general case. + ProductInProgress(MultiProductInner { + iters, + cur: Populated(_), + }) => iters.into_iter().fold(0, |mut acc, iter| { + if acc != 0 { + acc *= iter.iter_orig.count(); + } + acc + iter.iter.count() + }), + } + } + + fn size_hint(&self) -> (usize, Option) { + match &self.0 { + ProductEnded => (0, Some(0)), + ProductInProgress(MultiProductInner { + iters, + cur: NotYetPopulated, + }) => iters + .iter() + .map(|iter| iter.iter_orig.size_hint()) + .fold((1, Some(1)), size_hint::mul), + ProductInProgress(MultiProductInner { + iters, + cur: Populated(_), + }) => { + if let [first, tail @ ..] = &iters[..] { + tail.iter().fold(first.iter.size_hint(), |mut sh, iter| { + sh = size_hint::mul(sh, iter.iter_orig.size_hint()); + size_hint::add(sh, iter.iter.size_hint()) + }) + } else { + // Since it is populated, this cartesian product has started so `iters` is not empty. + unreachable!() + } + } + } + } + + fn last(self) -> Option { + let MultiProductInner { iters, cur } = self.0?; + // Collect the last item of each iterator of the product. + if let Populated(values) = cur { + let mut count = iters.len(); + let last = iters + .into_iter() + .zip(values) + .map(|(i, value)| { + i.iter.last().unwrap_or_else(|| { + // The iterator is empty, use its current `value`. + count -= 1; + value + }) + }) + .collect(); + if count == 0 { + // `values` was the last item. + None + } else { + Some(last) + } + } else { + iters.into_iter().map(|i| i.iter.last()).collect() + } + } +} + +impl std::iter::FusedIterator for MultiProduct +where + I: Iterator + Clone, + I::Item: Clone, +{ +} diff --git a/vendor/itertools/src/combinations.rs b/vendor/itertools/src/combinations.rs new file mode 100644 index 00000000000000..6bb2f3ec66911c --- /dev/null +++ b/vendor/itertools/src/combinations.rs @@ -0,0 +1,243 @@ +use std::fmt; +use std::iter::FusedIterator; + +use super::lazy_buffer::LazyBuffer; +use alloc::vec::Vec; + +use crate::adaptors::checked_binomial; + +/// An iterator to iterate through all the `k`-length combinations in an iterator. +/// +/// See [`.combinations()`](crate::Itertools::combinations) for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct Combinations { + indices: Vec, + pool: LazyBuffer, + first: bool, +} + +impl Clone for Combinations +where + I: Clone + Iterator, + I::Item: Clone, +{ + clone_fields!(indices, pool, first); +} + +impl fmt::Debug for Combinations +where + I: Iterator + fmt::Debug, + I::Item: fmt::Debug, +{ + debug_fmt_fields!(Combinations, indices, pool, first); +} + +/// Create a new `Combinations` from a clonable iterator. +pub fn combinations(iter: I, k: usize) -> Combinations +where + I: Iterator, +{ + Combinations { + indices: (0..k).collect(), + pool: LazyBuffer::new(iter), + first: true, + } +} + +impl Combinations { + /// Returns the length of a combination produced by this iterator. + #[inline] + pub fn k(&self) -> usize { + self.indices.len() + } + + /// Returns the (current) length of the pool from which combination elements are + /// selected. This value can change between invocations of [`next`](Combinations::next). + #[inline] + pub fn n(&self) -> usize { + self.pool.len() + } + + /// Returns a reference to the source pool. + #[inline] + pub(crate) fn src(&self) -> &LazyBuffer { + &self.pool + } + + /// Resets this `Combinations` back to an initial state for combinations of length + /// `k` over the same pool data source. If `k` is larger than the current length + /// of the data pool an attempt is made to prefill the pool so that it holds `k` + /// elements. + pub(crate) fn reset(&mut self, k: usize) { + self.first = true; + + if k < self.indices.len() { + self.indices.truncate(k); + for i in 0..k { + self.indices[i] = i; + } + } else { + for i in 0..self.indices.len() { + self.indices[i] = i; + } + self.indices.extend(self.indices.len()..k); + self.pool.prefill(k); + } + } + + pub(crate) fn n_and_count(self) -> (usize, usize) { + let Self { + indices, + pool, + first, + } = self; + let n = pool.count(); + (n, remaining_for(n, first, &indices).unwrap()) + } + + /// Initialises the iterator by filling a buffer with elements from the + /// iterator. Returns true if there are no combinations, false otherwise. + fn init(&mut self) -> bool { + self.pool.prefill(self.k()); + let done = self.k() > self.n(); + if !done { + self.first = false; + } + + done + } + + /// Increments indices representing the combination to advance to the next + /// (in lexicographic order by increasing sequence) combination. For example + /// if we have n=4 & k=2 then `[0, 1] -> [0, 2] -> [0, 3] -> [1, 2] -> ...` + /// + /// Returns true if we've run out of combinations, false otherwise. + fn increment_indices(&mut self) -> bool { + if self.indices.is_empty() { + return true; // Done + } + + // Scan from the end, looking for an index to increment + let mut i: usize = self.indices.len() - 1; + + // Check if we need to consume more from the iterator + if self.indices[i] == self.pool.len() - 1 { + self.pool.get_next(); // may change pool size + } + + while self.indices[i] == i + self.pool.len() - self.indices.len() { + if i > 0 { + i -= 1; + } else { + // Reached the last combination + return true; + } + } + + // Increment index, and reset the ones to its right + self.indices[i] += 1; + for j in i + 1..self.indices.len() { + self.indices[j] = self.indices[j - 1] + 1; + } + + // If we've made it this far, we haven't run out of combos + false + } + + /// Returns the n-th item or the number of successful steps. + pub(crate) fn try_nth(&mut self, n: usize) -> Result<::Item, usize> + where + I::Item: Clone, + { + let done = if self.first { + self.init() + } else { + self.increment_indices() + }; + if done { + return Err(0); + } + for i in 0..n { + if self.increment_indices() { + return Err(i + 1); + } + } + Ok(self.pool.get_at(&self.indices)) + } +} + +impl Iterator for Combinations +where + I: Iterator, + I::Item: Clone, +{ + type Item = Vec; + fn next(&mut self) -> Option { + let done = if self.first { + self.init() + } else { + self.increment_indices() + }; + + if done { + return None; + } + + Some(self.pool.get_at(&self.indices)) + } + + fn nth(&mut self, n: usize) -> Option { + self.try_nth(n).ok() + } + + fn size_hint(&self) -> (usize, Option) { + let (mut low, mut upp) = self.pool.size_hint(); + low = remaining_for(low, self.first, &self.indices).unwrap_or(usize::MAX); + upp = upp.and_then(|upp| remaining_for(upp, self.first, &self.indices)); + (low, upp) + } + + #[inline] + fn count(self) -> usize { + self.n_and_count().1 + } +} + +impl FusedIterator for Combinations +where + I: Iterator, + I::Item: Clone, +{ +} + +/// For a given size `n`, return the count of remaining combinations or None if it would overflow. +fn remaining_for(n: usize, first: bool, indices: &[usize]) -> Option { + let k = indices.len(); + if n < k { + Some(0) + } else if first { + checked_binomial(n, k) + } else { + // https://en.wikipedia.org/wiki/Combinatorial_number_system + // http://www.site.uottawa.ca/~lucia/courses/5165-09/GenCombObj.pdf + + // The combinations generated after the current one can be counted by counting as follows: + // - The subsequent combinations that differ in indices[0]: + // If subsequent combinations differ in indices[0], then their value for indices[0] + // must be at least 1 greater than the current indices[0]. + // As indices is strictly monotonically sorted, this means we can effectively choose k values + // from (n - 1 - indices[0]), leading to binomial(n - 1 - indices[0], k) possibilities. + // - The subsequent combinations with same indices[0], but differing indices[1]: + // Here we can choose k - 1 values from (n - 1 - indices[1]) values, + // leading to binomial(n - 1 - indices[1], k - 1) possibilities. + // - (...) + // - The subsequent combinations with same indices[0..=i], but differing indices[i]: + // Here we can choose k - i values from (n - 1 - indices[i]) values: binomial(n - 1 - indices[i], k - i). + // Since subsequent combinations can in any index, we must sum up the aforementioned binomial coefficients. + + // Below, `n0` resembles indices[i]. + indices.iter().enumerate().try_fold(0usize, |sum, (i, n0)| { + sum.checked_add(checked_binomial(n - 1 - *n0, k - i)?) + }) + } +} diff --git a/vendor/itertools/src/combinations_with_replacement.rs b/vendor/itertools/src/combinations_with_replacement.rs new file mode 100644 index 00000000000000..f363f9ba26b1ee --- /dev/null +++ b/vendor/itertools/src/combinations_with_replacement.rs @@ -0,0 +1,192 @@ +use alloc::boxed::Box; +use alloc::vec::Vec; +use std::fmt; +use std::iter::FusedIterator; + +use super::lazy_buffer::LazyBuffer; +use crate::adaptors::checked_binomial; + +/// An iterator to iterate through all the `n`-length combinations in an iterator, with replacement. +/// +/// See [`.combinations_with_replacement()`](crate::Itertools::combinations_with_replacement) +/// for more information. +#[derive(Clone)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct CombinationsWithReplacement +where + I: Iterator, + I::Item: Clone, +{ + indices: Box<[usize]>, + pool: LazyBuffer, + first: bool, +} + +impl fmt::Debug for CombinationsWithReplacement +where + I: Iterator + fmt::Debug, + I::Item: fmt::Debug + Clone, +{ + debug_fmt_fields!(CombinationsWithReplacement, indices, pool, first); +} + +/// Create a new `CombinationsWithReplacement` from a clonable iterator. +pub fn combinations_with_replacement(iter: I, k: usize) -> CombinationsWithReplacement +where + I: Iterator, + I::Item: Clone, +{ + let indices = alloc::vec![0; k].into_boxed_slice(); + let pool: LazyBuffer = LazyBuffer::new(iter); + + CombinationsWithReplacement { + indices, + pool, + first: true, + } +} + +impl CombinationsWithReplacement +where + I: Iterator, + I::Item: Clone, +{ + /// Increments indices representing the combination to advance to the next + /// (in lexicographic order by increasing sequence) combination. + /// + /// Returns true if we've run out of combinations, false otherwise. + fn increment_indices(&mut self) -> bool { + // Check if we need to consume more from the iterator + // This will run while we increment our first index digit + self.pool.get_next(); + + // Work out where we need to update our indices + let mut increment = None; + for (i, indices_int) in self.indices.iter().enumerate().rev() { + if *indices_int < self.pool.len() - 1 { + increment = Some((i, indices_int + 1)); + break; + } + } + match increment { + // If we can update the indices further + Some((increment_from, increment_value)) => { + // We need to update the rightmost non-max value + // and all those to the right + for i in &mut self.indices[increment_from..] { + *i = increment_value; + } + // TODO: once MSRV >= 1.50, use `fill` instead: + // self.indices[increment_from..].fill(increment_value); + false + } + // Otherwise, we're done + None => true, + } + } +} + +impl Iterator for CombinationsWithReplacement +where + I: Iterator, + I::Item: Clone, +{ + type Item = Vec; + + fn next(&mut self) -> Option { + if self.first { + // In empty edge cases, stop iterating immediately + if !(self.indices.is_empty() || self.pool.get_next()) { + return None; + } + self.first = false; + } else if self.increment_indices() { + return None; + } + Some(self.pool.get_at(&self.indices)) + } + + fn nth(&mut self, n: usize) -> Option { + if self.first { + // In empty edge cases, stop iterating immediately + if !(self.indices.is_empty() || self.pool.get_next()) { + return None; + } + self.first = false; + } else if self.increment_indices() { + return None; + } + for _ in 0..n { + if self.increment_indices() { + return None; + } + } + Some(self.pool.get_at(&self.indices)) + } + + fn size_hint(&self) -> (usize, Option) { + let (mut low, mut upp) = self.pool.size_hint(); + low = remaining_for(low, self.first, &self.indices).unwrap_or(usize::MAX); + upp = upp.and_then(|upp| remaining_for(upp, self.first, &self.indices)); + (low, upp) + } + + fn count(self) -> usize { + let Self { + indices, + pool, + first, + } = self; + let n = pool.count(); + remaining_for(n, first, &indices).unwrap() + } +} + +impl FusedIterator for CombinationsWithReplacement +where + I: Iterator, + I::Item: Clone, +{ +} + +/// For a given size `n`, return the count of remaining combinations with replacement or None if it would overflow. +fn remaining_for(n: usize, first: bool, indices: &[usize]) -> Option { + // With a "stars and bars" representation, choose k values with replacement from n values is + // like choosing k out of k + n − 1 positions (hence binomial(k + n - 1, k) possibilities) + // to place k stars and therefore n - 1 bars. + // Example (n=4, k=6): ***|*||** represents [0,0,0,1,3,3]. + let count = |n: usize, k: usize| { + let positions = if n == 0 { + k.saturating_sub(1) + } else { + (n - 1).checked_add(k)? + }; + checked_binomial(positions, k) + }; + let k = indices.len(); + if first { + count(n, k) + } else { + // The algorithm is similar to the one for combinations *without replacement*, + // except we choose values *with replacement* and indices are *non-strictly* monotonically sorted. + + // The combinations generated after the current one can be counted by counting as follows: + // - The subsequent combinations that differ in indices[0]: + // If subsequent combinations differ in indices[0], then their value for indices[0] + // must be at least 1 greater than the current indices[0]. + // As indices is monotonically sorted, this means we can effectively choose k values with + // replacement from (n - 1 - indices[0]), leading to count(n - 1 - indices[0], k) possibilities. + // - The subsequent combinations with same indices[0], but differing indices[1]: + // Here we can choose k - 1 values with replacement from (n - 1 - indices[1]) values, + // leading to count(n - 1 - indices[1], k - 1) possibilities. + // - (...) + // - The subsequent combinations with same indices[0..=i], but differing indices[i]: + // Here we can choose k - i values with replacement from (n - 1 - indices[i]) values: count(n - 1 - indices[i], k - i). + // Since subsequent combinations can in any index, we must sum up the aforementioned binomial coefficients. + + // Below, `n0` resembles indices[i]. + indices.iter().enumerate().try_fold(0usize, |sum, (i, n0)| { + sum.checked_add(count(n - 1 - *n0, k - i)?) + }) + } +} diff --git a/vendor/itertools/src/concat_impl.rs b/vendor/itertools/src/concat_impl.rs new file mode 100644 index 00000000000000..ec7b91c605e639 --- /dev/null +++ b/vendor/itertools/src/concat_impl.rs @@ -0,0 +1,30 @@ +use crate::Itertools; + +/// Combine all an iterator's elements into one element by using [`Extend`]. +/// +/// [`IntoIterator`]-enabled version of [`Itertools::concat`]. +/// +/// This combinator will extend the first item with each of the rest of the +/// items of the iterator. If the iterator is empty, the default value of +/// `I::Item` is returned. +/// +/// ```rust +/// use itertools::concat; +/// +/// let input = vec![vec![1], vec![2, 3], vec![4, 5, 6]]; +/// assert_eq!(concat(input), vec![1, 2, 3, 4, 5, 6]); +/// ``` +pub fn concat(iterable: I) -> I::Item +where + I: IntoIterator, + I::Item: Extend<<::Item as IntoIterator>::Item> + IntoIterator + Default, +{ + #[allow(deprecated)] //TODO: once msrv hits 1.51. replace `fold1` with `reduce` + iterable + .into_iter() + .fold1(|mut a, b| { + a.extend(b); + a + }) + .unwrap_or_default() +} diff --git a/vendor/itertools/src/cons_tuples_impl.rs b/vendor/itertools/src/cons_tuples_impl.rs new file mode 100644 index 00000000000000..9ab309478875d1 --- /dev/null +++ b/vendor/itertools/src/cons_tuples_impl.rs @@ -0,0 +1,58 @@ +macro_rules! impl_cons_iter( + ($_A:ident, $_B:ident, ) => (); // stop + + ($A:ident, $($B:ident,)*) => ( + impl_cons_iter!($($B,)*); + #[allow(non_snake_case)] + impl Iterator for ConsTuples + where Iter: Iterator, + { + type Item = ($($B,)* X, ); + fn next(&mut self) -> Option { + self.iter.next().map(|(($($B,)*), x)| ($($B,)* x, )) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + fn fold(self, accum: Acc, mut f: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.fold(accum, move |acc, (($($B,)*), x)| f(acc, ($($B,)* x, ))) + } + } + ); +); + +impl_cons_iter!(A, B, C, D, E, F, G, H, I, J, K, L,); + +/// An iterator that maps an iterator of tuples like +/// `((A, B), C)` to an iterator of `(A, B, C)`. +/// +/// Used by the `iproduct!()` macro. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[derive(Debug)] +pub struct ConsTuples +where + I: Iterator, +{ + iter: I, +} + +impl Clone for ConsTuples +where + I: Clone + Iterator, +{ + clone_fields!(iter); +} + +/// Create an iterator that maps for example iterators of +/// `((A, B), C)` to `(A, B, C)`. +pub fn cons_tuples(iterable: I) -> ConsTuples +where + I: IntoIterator, +{ + ConsTuples { + iter: iterable.into_iter(), + } +} diff --git a/vendor/itertools/src/diff.rs b/vendor/itertools/src/diff.rs new file mode 100644 index 00000000000000..c6d99657efd347 --- /dev/null +++ b/vendor/itertools/src/diff.rs @@ -0,0 +1,104 @@ +//! "Diff"ing iterators for caching elements to sequential collections without requiring the new +//! elements' iterator to be `Clone`. +//! +//! - [`Diff`] (produced by the [`diff_with`] function) +//! describes the difference between two non-`Clone` iterators `I` and `J` after breaking ASAP from +//! a lock-step comparison. + +use std::fmt; + +use crate::free::put_back; +use crate::structs::PutBack; + +/// A type returned by the [`diff_with`] function. +/// +/// `Diff` represents the way in which the elements yielded by the iterator `I` differ to some +/// iterator `J`. +pub enum Diff +where + I: Iterator, + J: Iterator, +{ + /// The index of the first non-matching element along with both iterator's remaining elements + /// starting with the first mis-match. + FirstMismatch(usize, PutBack, PutBack), + /// The total number of elements that were in `J` along with the remaining elements of `I`. + Shorter(usize, PutBack), + /// The total number of elements that were in `I` along with the remaining elements of `J`. + Longer(usize, PutBack), +} + +impl fmt::Debug for Diff +where + I: Iterator, + J: Iterator, + PutBack: fmt::Debug, + PutBack: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::FirstMismatch(idx, i, j) => f + .debug_tuple("FirstMismatch") + .field(idx) + .field(i) + .field(j) + .finish(), + Self::Shorter(idx, i) => f.debug_tuple("Shorter").field(idx).field(i).finish(), + Self::Longer(idx, j) => f.debug_tuple("Longer").field(idx).field(j).finish(), + } + } +} + +impl Clone for Diff +where + I: Iterator, + J: Iterator, + PutBack: Clone, + PutBack: Clone, +{ + fn clone(&self) -> Self { + match self { + Self::FirstMismatch(idx, i, j) => Self::FirstMismatch(*idx, i.clone(), j.clone()), + Self::Shorter(idx, i) => Self::Shorter(*idx, i.clone()), + Self::Longer(idx, j) => Self::Longer(*idx, j.clone()), + } + } +} + +/// Compares every element yielded by both `i` and `j` with the given function in lock-step and +/// returns a [`Diff`] which describes how `j` differs from `i`. +/// +/// If the number of elements yielded by `j` is less than the number of elements yielded by `i`, +/// the number of `j` elements yielded will be returned along with `i`'s remaining elements as +/// `Diff::Shorter`. +/// +/// If the two elements of a step differ, the index of those elements along with the remaining +/// elements of both `i` and `j` are returned as `Diff::FirstMismatch`. +/// +/// If `i` becomes exhausted before `j` becomes exhausted, the number of elements in `i` along with +/// the remaining `j` elements will be returned as `Diff::Longer`. +pub fn diff_with(i: I, j: J, mut is_equal: F) -> Option> +where + I: IntoIterator, + J: IntoIterator, + F: FnMut(&I::Item, &J::Item) -> bool, +{ + let mut i = i.into_iter(); + let mut j = j.into_iter(); + let mut idx = 0; + while let Some(i_elem) = i.next() { + match j.next() { + None => return Some(Diff::Shorter(idx, put_back(i).with_value(i_elem))), + Some(j_elem) => { + if !is_equal(&i_elem, &j_elem) { + let remaining_i = put_back(i).with_value(i_elem); + let remaining_j = put_back(j).with_value(j_elem); + return Some(Diff::FirstMismatch(idx, remaining_i, remaining_j)); + } + } + } + idx += 1; + } + j.next() + .map(|j_elem| Diff::Longer(idx, put_back(j).with_value(j_elem))) +} diff --git a/vendor/itertools/src/duplicates_impl.rs b/vendor/itertools/src/duplicates_impl.rs new file mode 100644 index 00000000000000..a0db15432d854f --- /dev/null +++ b/vendor/itertools/src/duplicates_impl.rs @@ -0,0 +1,216 @@ +use std::hash::Hash; + +mod private { + use std::collections::HashMap; + use std::fmt; + use std::hash::Hash; + + #[derive(Clone)] + #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] + pub struct DuplicatesBy { + pub(crate) iter: I, + pub(crate) meta: Meta, + } + + impl fmt::Debug for DuplicatesBy + where + I: Iterator + fmt::Debug, + V: fmt::Debug + Hash + Eq, + { + debug_fmt_fields!(DuplicatesBy, iter, meta.used); + } + + impl DuplicatesBy { + pub(crate) fn new(iter: I, key_method: F) -> Self { + Self { + iter, + meta: Meta { + used: HashMap::new(), + pending: 0, + key_method, + }, + } + } + } + + #[derive(Clone)] + pub struct Meta { + used: HashMap, + pending: usize, + key_method: F, + } + + impl Meta + where + Key: Eq + Hash, + { + /// Takes an item and returns it back to the caller if it's the second time we see it. + /// Otherwise the item is consumed and None is returned + #[inline(always)] + fn filter(&mut self, item: I) -> Option + where + F: KeyMethod, + { + let kv = self.key_method.make(item); + match self.used.get_mut(kv.key_ref()) { + None => { + self.used.insert(kv.key(), false); + self.pending += 1; + None + } + Some(true) => None, + Some(produced) => { + *produced = true; + self.pending -= 1; + Some(kv.value()) + } + } + } + } + + impl Iterator for DuplicatesBy + where + I: Iterator, + Key: Eq + Hash, + F: KeyMethod, + { + type Item = I::Item; + + fn next(&mut self) -> Option { + let Self { iter, meta } = self; + iter.find_map(|v| meta.filter(v)) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (_, hi) = self.iter.size_hint(); + let hi = hi.map(|hi| { + if hi <= self.meta.pending { + // fewer or equally many iter-remaining elements than pending elements + // => at most, each iter-remaining element is matched + hi + } else { + // fewer pending elements than iter-remaining elements + // => at most: + // * each pending element is matched + // * the other iter-remaining elements come in pairs + self.meta.pending + (hi - self.meta.pending) / 2 + } + }); + // The lower bound is always 0 since we might only get unique items from now on + (0, hi) + } + } + + impl DoubleEndedIterator for DuplicatesBy + where + I: DoubleEndedIterator, + Key: Eq + Hash, + F: KeyMethod, + { + fn next_back(&mut self) -> Option { + let Self { iter, meta } = self; + iter.rev().find_map(|v| meta.filter(v)) + } + } + + /// A keying method for use with `DuplicatesBy` + pub trait KeyMethod { + type Container: KeyXorValue; + + fn make(&mut self, value: V) -> Self::Container; + } + + /// Apply the identity function to elements before checking them for equality. + #[derive(Debug, Clone)] + pub struct ById; + impl KeyMethod for ById { + type Container = JustValue; + + fn make(&mut self, v: V) -> Self::Container { + JustValue(v) + } + } + + /// Apply a user-supplied function to elements before checking them for equality. + #[derive(Clone)] + pub struct ByFn(pub(crate) F); + impl fmt::Debug for ByFn { + debug_fmt_fields!(ByFn,); + } + impl KeyMethod for ByFn + where + F: FnMut(&V) -> K, + { + type Container = KeyValue; + + fn make(&mut self, v: V) -> Self::Container { + KeyValue((self.0)(&v), v) + } + } + + // Implementors of this trait can hold onto a key and a value but only give access to one of them + // at a time. This allows the key and the value to be the same value internally + pub trait KeyXorValue { + fn key_ref(&self) -> &K; + fn key(self) -> K; + fn value(self) -> V; + } + + #[derive(Debug)] + pub struct KeyValue(K, V); + impl KeyXorValue for KeyValue { + fn key_ref(&self) -> &K { + &self.0 + } + fn key(self) -> K { + self.0 + } + fn value(self) -> V { + self.1 + } + } + + #[derive(Debug)] + pub struct JustValue(V); + impl KeyXorValue for JustValue { + fn key_ref(&self) -> &V { + &self.0 + } + fn key(self) -> V { + self.0 + } + fn value(self) -> V { + self.0 + } + } +} + +/// An iterator adapter to filter for duplicate elements. +/// +/// See [`.duplicates_by()`](crate::Itertools::duplicates_by) for more information. +pub type DuplicatesBy = private::DuplicatesBy>; + +/// Create a new `DuplicatesBy` iterator. +pub fn duplicates_by(iter: I, f: F) -> DuplicatesBy +where + Key: Eq + Hash, + F: FnMut(&I::Item) -> Key, + I: Iterator, +{ + DuplicatesBy::new(iter, private::ByFn(f)) +} + +/// An iterator adapter to filter out duplicate elements. +/// +/// See [`.duplicates()`](crate::Itertools::duplicates) for more information. +pub type Duplicates = private::DuplicatesBy::Item, private::ById>; + +/// Create a new `Duplicates` iterator. +pub fn duplicates(iter: I) -> Duplicates +where + I: Iterator, + I::Item: Eq + Hash, +{ + Duplicates::new(iter, private::ById) +} diff --git a/vendor/itertools/src/either_or_both.rs b/vendor/itertools/src/either_or_both.rs new file mode 100644 index 00000000000000..b7a7fc14115b40 --- /dev/null +++ b/vendor/itertools/src/either_or_both.rs @@ -0,0 +1,514 @@ +use core::ops::{Deref, DerefMut}; + +use crate::EitherOrBoth::*; + +use either::Either; + +/// Value that either holds a single A or B, or both. +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub enum EitherOrBoth { + /// Both values are present. + Both(A, B), + /// Only the left value of type `A` is present. + Left(A), + /// Only the right value of type `B` is present. + Right(B), +} + +impl EitherOrBoth { + /// If `Left`, or `Both`, return true. Otherwise, return false. + pub fn has_left(&self) -> bool { + self.as_ref().left().is_some() + } + + /// If `Right`, or `Both`, return true, otherwise, return false. + pub fn has_right(&self) -> bool { + self.as_ref().right().is_some() + } + + /// If `Left`, return true. Otherwise, return false. + /// Exclusive version of [`has_left`](EitherOrBoth::has_left). + pub fn is_left(&self) -> bool { + matches!(self, Left(_)) + } + + /// If `Right`, return true. Otherwise, return false. + /// Exclusive version of [`has_right`](EitherOrBoth::has_right). + pub fn is_right(&self) -> bool { + matches!(self, Right(_)) + } + + /// If `Both`, return true. Otherwise, return false. + pub fn is_both(&self) -> bool { + self.as_ref().both().is_some() + } + + /// If `Left`, or `Both`, return `Some` with the left value. Otherwise, return `None`. + pub fn left(self) -> Option { + match self { + Left(left) | Both(left, _) => Some(left), + _ => None, + } + } + + /// If `Right`, or `Both`, return `Some` with the right value. Otherwise, return `None`. + pub fn right(self) -> Option { + match self { + Right(right) | Both(_, right) => Some(right), + _ => None, + } + } + + /// Return tuple of options corresponding to the left and right value respectively + /// + /// If `Left` return `(Some(..), None)`, if `Right` return `(None,Some(..))`, else return + /// `(Some(..),Some(..))` + pub fn left_and_right(self) -> (Option, Option) { + self.map_any(Some, Some).or_default() + } + + /// If `Left`, return `Some` with the left value. If `Right` or `Both`, return `None`. + /// + /// # Examples + /// + /// ``` + /// // On the `Left` variant. + /// # use itertools::{EitherOrBoth, EitherOrBoth::{Left, Right, Both}}; + /// let x: EitherOrBoth<_, ()> = Left("bonjour"); + /// assert_eq!(x.just_left(), Some("bonjour")); + /// + /// // On the `Right` variant. + /// let x: EitherOrBoth<(), _> = Right("hola"); + /// assert_eq!(x.just_left(), None); + /// + /// // On the `Both` variant. + /// let x = Both("bonjour", "hola"); + /// assert_eq!(x.just_left(), None); + /// ``` + pub fn just_left(self) -> Option { + match self { + Left(left) => Some(left), + _ => None, + } + } + + /// If `Right`, return `Some` with the right value. If `Left` or `Both`, return `None`. + /// + /// # Examples + /// + /// ``` + /// // On the `Left` variant. + /// # use itertools::{EitherOrBoth::{Left, Right, Both}, EitherOrBoth}; + /// let x: EitherOrBoth<_, ()> = Left("auf wiedersehen"); + /// assert_eq!(x.just_left(), Some("auf wiedersehen")); + /// + /// // On the `Right` variant. + /// let x: EitherOrBoth<(), _> = Right("adios"); + /// assert_eq!(x.just_left(), None); + /// + /// // On the `Both` variant. + /// let x = Both("auf wiedersehen", "adios"); + /// assert_eq!(x.just_left(), None); + /// ``` + pub fn just_right(self) -> Option { + match self { + Right(right) => Some(right), + _ => None, + } + } + + /// If `Both`, return `Some` containing the left and right values. Otherwise, return `None`. + pub fn both(self) -> Option<(A, B)> { + match self { + Both(a, b) => Some((a, b)), + _ => None, + } + } + + /// If `Left` or `Both`, return the left value. Otherwise, convert the right value and return it. + pub fn into_left(self) -> A + where + B: Into, + { + match self { + Left(a) | Both(a, _) => a, + Right(b) => b.into(), + } + } + + /// If `Right` or `Both`, return the right value. Otherwise, convert the left value and return it. + pub fn into_right(self) -> B + where + A: Into, + { + match self { + Right(b) | Both(_, b) => b, + Left(a) => a.into(), + } + } + + /// Converts from `&EitherOrBoth` to `EitherOrBoth<&A, &B>`. + pub fn as_ref(&self) -> EitherOrBoth<&A, &B> { + match *self { + Left(ref left) => Left(left), + Right(ref right) => Right(right), + Both(ref left, ref right) => Both(left, right), + } + } + + /// Converts from `&mut EitherOrBoth` to `EitherOrBoth<&mut A, &mut B>`. + pub fn as_mut(&mut self) -> EitherOrBoth<&mut A, &mut B> { + match *self { + Left(ref mut left) => Left(left), + Right(ref mut right) => Right(right), + Both(ref mut left, ref mut right) => Both(left, right), + } + } + + /// Converts from `&EitherOrBoth` to `EitherOrBoth<&_, &_>` using the [`Deref`] trait. + pub fn as_deref(&self) -> EitherOrBoth<&A::Target, &B::Target> + where + A: Deref, + B: Deref, + { + match *self { + Left(ref left) => Left(left), + Right(ref right) => Right(right), + Both(ref left, ref right) => Both(left, right), + } + } + + /// Converts from `&mut EitherOrBoth` to `EitherOrBoth<&mut _, &mut _>` using the [`DerefMut`] trait. + pub fn as_deref_mut(&mut self) -> EitherOrBoth<&mut A::Target, &mut B::Target> + where + A: DerefMut, + B: DerefMut, + { + match *self { + Left(ref mut left) => Left(left), + Right(ref mut right) => Right(right), + Both(ref mut left, ref mut right) => Both(left, right), + } + } + + /// Convert `EitherOrBoth` to `EitherOrBoth`. + pub fn flip(self) -> EitherOrBoth { + match self { + Left(a) => Right(a), + Right(b) => Left(b), + Both(a, b) => Both(b, a), + } + } + + /// Apply the function `f` on the value `a` in `Left(a)` or `Both(a, b)` variants. If it is + /// present rewrapping the result in `self`'s original variant. + pub fn map_left(self, f: F) -> EitherOrBoth + where + F: FnOnce(A) -> M, + { + match self { + Both(a, b) => Both(f(a), b), + Left(a) => Left(f(a)), + Right(b) => Right(b), + } + } + + /// Apply the function `f` on the value `b` in `Right(b)` or `Both(a, b)` variants. + /// If it is present rewrapping the result in `self`'s original variant. + pub fn map_right(self, f: F) -> EitherOrBoth + where + F: FnOnce(B) -> M, + { + match self { + Left(a) => Left(a), + Right(b) => Right(f(b)), + Both(a, b) => Both(a, f(b)), + } + } + + /// Apply the functions `f` and `g` on the value `a` and `b` respectively; + /// found in `Left(a)`, `Right(b)`, or `Both(a, b)` variants. + /// The Result is rewrapped `self`'s original variant. + pub fn map_any(self, f: F, g: G) -> EitherOrBoth + where + F: FnOnce(A) -> L, + G: FnOnce(B) -> R, + { + match self { + Left(a) => Left(f(a)), + Right(b) => Right(g(b)), + Both(a, b) => Both(f(a), g(b)), + } + } + + /// Apply the function `f` on the value `a` in `Left(a)` or `Both(a, _)` variants if it is + /// present. + pub fn left_and_then(self, f: F) -> EitherOrBoth + where + F: FnOnce(A) -> EitherOrBoth, + { + match self { + Left(a) | Both(a, _) => f(a), + Right(b) => Right(b), + } + } + + /// Apply the function `f` on the value `b` + /// in `Right(b)` or `Both(_, b)` variants if it is present. + pub fn right_and_then(self, f: F) -> EitherOrBoth + where + F: FnOnce(B) -> EitherOrBoth, + { + match self { + Left(a) => Left(a), + Right(b) | Both(_, b) => f(b), + } + } + + /// Returns a tuple consisting of the `l` and `r` in `Both(l, r)`, if present. + /// Otherwise, returns the wrapped value for the present element, and the supplied + /// value for the other. The first (`l`) argument is used for a missing `Left` + /// value. The second (`r`) argument is used for a missing `Right` value. + /// + /// Arguments passed to `or` are eagerly evaluated; if you are passing + /// the result of a function call, it is recommended to use [`or_else`], + /// which is lazily evaluated. + /// + /// [`or_else`]: EitherOrBoth::or_else + /// + /// # Examples + /// + /// ``` + /// # use itertools::EitherOrBoth; + /// assert_eq!(EitherOrBoth::Both("tree", 1).or("stone", 5), ("tree", 1)); + /// assert_eq!(EitherOrBoth::Left("tree").or("stone", 5), ("tree", 5)); + /// assert_eq!(EitherOrBoth::Right(1).or("stone", 5), ("stone", 1)); + /// ``` + pub fn or(self, l: A, r: B) -> (A, B) { + match self { + Left(inner_l) => (inner_l, r), + Right(inner_r) => (l, inner_r), + Both(inner_l, inner_r) => (inner_l, inner_r), + } + } + + /// Returns a tuple consisting of the `l` and `r` in `Both(l, r)`, if present. + /// Otherwise, returns the wrapped value for the present element, and the [`default`](Default::default) + /// for the other. + pub fn or_default(self) -> (A, B) + where + A: Default, + B: Default, + { + match self { + Left(l) => (l, B::default()), + Right(r) => (A::default(), r), + Both(l, r) => (l, r), + } + } + + /// Returns a tuple consisting of the `l` and `r` in `Both(l, r)`, if present. + /// Otherwise, returns the wrapped value for the present element, and computes the + /// missing value with the supplied closure. The first argument (`l`) is used for a + /// missing `Left` value. The second argument (`r`) is used for a missing `Right` value. + /// + /// # Examples + /// + /// ``` + /// # use itertools::EitherOrBoth; + /// let k = 10; + /// assert_eq!(EitherOrBoth::Both("tree", 1).or_else(|| "stone", || 2 * k), ("tree", 1)); + /// assert_eq!(EitherOrBoth::Left("tree").or_else(|| "stone", || 2 * k), ("tree", 20)); + /// assert_eq!(EitherOrBoth::Right(1).or_else(|| "stone", || 2 * k), ("stone", 1)); + /// ``` + pub fn or_else A, R: FnOnce() -> B>(self, l: L, r: R) -> (A, B) { + match self { + Left(inner_l) => (inner_l, r()), + Right(inner_r) => (l(), inner_r), + Both(inner_l, inner_r) => (inner_l, inner_r), + } + } + + /// Returns a mutable reference to the left value. If the left value is not present, + /// it is replaced with `val`. + pub fn left_or_insert(&mut self, val: A) -> &mut A { + self.left_or_insert_with(|| val) + } + + /// Returns a mutable reference to the right value. If the right value is not present, + /// it is replaced with `val`. + pub fn right_or_insert(&mut self, val: B) -> &mut B { + self.right_or_insert_with(|| val) + } + + /// If the left value is not present, replace it the value computed by the closure `f`. + /// Returns a mutable reference to the now-present left value. + pub fn left_or_insert_with(&mut self, f: F) -> &mut A + where + F: FnOnce() -> A, + { + match self { + Left(left) | Both(left, _) => left, + Right(_) => self.insert_left(f()), + } + } + + /// If the right value is not present, replace it the value computed by the closure `f`. + /// Returns a mutable reference to the now-present right value. + pub fn right_or_insert_with(&mut self, f: F) -> &mut B + where + F: FnOnce() -> B, + { + match self { + Right(right) | Both(_, right) => right, + Left(_) => self.insert_right(f()), + } + } + + /// Sets the `left` value of this instance, and returns a mutable reference to it. + /// Does not affect the `right` value. + /// + /// # Examples + /// ``` + /// # use itertools::{EitherOrBoth, EitherOrBoth::{Left, Right, Both}}; + /// + /// // Overwriting a pre-existing value. + /// let mut either: EitherOrBoth<_, ()> = Left(0_u32); + /// assert_eq!(*either.insert_left(69), 69); + /// + /// // Inserting a second value. + /// let mut either = Right("no"); + /// assert_eq!(*either.insert_left("yes"), "yes"); + /// assert_eq!(either, Both("yes", "no")); + /// ``` + pub fn insert_left(&mut self, val: A) -> &mut A { + match self { + Left(left) | Both(left, _) => { + *left = val; + left + } + Right(right) => { + // This is like a map in place operation. We move out of the reference, + // change the value, and then move back into the reference. + unsafe { + // SAFETY: We know this pointer is valid for reading since we got it from a reference. + let right = std::ptr::read(right as *mut _); + // SAFETY: Again, we know the pointer is valid since we got it from a reference. + std::ptr::write(self as *mut _, Both(val, right)); + } + + if let Both(left, _) = self { + left + } else { + // SAFETY: The above pattern will always match, since we just + // set `self` equal to `Both`. + unsafe { std::hint::unreachable_unchecked() } + } + } + } + } + + /// Sets the `right` value of this instance, and returns a mutable reference to it. + /// Does not affect the `left` value. + /// + /// # Examples + /// ``` + /// # use itertools::{EitherOrBoth, EitherOrBoth::{Left, Both}}; + /// // Overwriting a pre-existing value. + /// let mut either: EitherOrBoth<_, ()> = Left(0_u32); + /// assert_eq!(*either.insert_left(69), 69); + /// + /// // Inserting a second value. + /// let mut either = Left("what's"); + /// assert_eq!(*either.insert_right(9 + 10), 21 - 2); + /// assert_eq!(either, Both("what's", 9+10)); + /// ``` + pub fn insert_right(&mut self, val: B) -> &mut B { + match self { + Right(right) | Both(_, right) => { + *right = val; + right + } + Left(left) => { + // This is like a map in place operation. We move out of the reference, + // change the value, and then move back into the reference. + unsafe { + // SAFETY: We know this pointer is valid for reading since we got it from a reference. + let left = std::ptr::read(left as *mut _); + // SAFETY: Again, we know the pointer is valid since we got it from a reference. + std::ptr::write(self as *mut _, Both(left, val)); + } + if let Both(_, right) = self { + right + } else { + // SAFETY: The above pattern will always match, since we just + // set `self` equal to `Both`. + unsafe { std::hint::unreachable_unchecked() } + } + } + } + } + + /// Set `self` to `Both(..)`, containing the specified left and right values, + /// and returns a mutable reference to those values. + pub fn insert_both(&mut self, left: A, right: B) -> (&mut A, &mut B) { + *self = Both(left, right); + if let Both(left, right) = self { + (left, right) + } else { + // SAFETY: The above pattern will always match, since we just + // set `self` equal to `Both`. + unsafe { std::hint::unreachable_unchecked() } + } + } +} + +impl EitherOrBoth { + /// Return either value of left, right, or apply a function `f` to both values if both are present. + /// The input function has to return the same type as both Right and Left carry. + /// + /// This function can be used to preferrably extract the left resp. right value, + /// but fall back to the other (i.e. right resp. left) if the preferred one is not present. + /// + /// # Examples + /// ``` + /// # use itertools::EitherOrBoth; + /// assert_eq!(EitherOrBoth::Both(3, 7).reduce(u32::max), 7); + /// assert_eq!(EitherOrBoth::Left(3).reduce(u32::max), 3); + /// assert_eq!(EitherOrBoth::Right(7).reduce(u32::max), 7); + /// + /// // Extract the left value if present, fall back to the right otherwise. + /// assert_eq!(EitherOrBoth::Left("left").reduce(|l, _r| l), "left"); + /// assert_eq!(EitherOrBoth::Right("right").reduce(|l, _r| l), "right"); + /// assert_eq!(EitherOrBoth::Both("left", "right").reduce(|l, _r| l), "left"); + /// ``` + pub fn reduce(self, f: F) -> T + where + F: FnOnce(T, T) -> T, + { + match self { + Left(a) => a, + Right(b) => b, + Both(a, b) => f(a, b), + } + } +} + +impl From> for Option> { + fn from(value: EitherOrBoth) -> Self { + match value { + Left(l) => Some(Either::Left(l)), + Right(r) => Some(Either::Right(r)), + Both(..) => None, + } + } +} + +impl From> for EitherOrBoth { + fn from(either: Either) -> Self { + match either { + Either::Left(l) => Left(l), + Either::Right(l) => Right(l), + } + } +} diff --git a/vendor/itertools/src/exactly_one_err.rs b/vendor/itertools/src/exactly_one_err.rs new file mode 100644 index 00000000000000..19b9e19189a5f1 --- /dev/null +++ b/vendor/itertools/src/exactly_one_err.rs @@ -0,0 +1,125 @@ +#[cfg(feature = "use_std")] +use std::error::Error; +use std::fmt::{Debug, Display, Formatter, Result as FmtResult}; + +use std::iter::ExactSizeIterator; + +use either::Either; + +use crate::size_hint; + +/// Iterator returned for the error case of `Itertools::exactly_one()` +/// This iterator yields exactly the same elements as the input iterator. +/// +/// During the execution of `exactly_one` the iterator must be mutated. This wrapper +/// effectively "restores" the state of the input iterator when it's handed back. +/// +/// This is very similar to `PutBackN` except this iterator only supports 0-2 elements and does not +/// use a `Vec`. +#[derive(Clone)] +pub struct ExactlyOneError +where + I: Iterator, +{ + first_two: Option>, + inner: I, +} + +impl ExactlyOneError +where + I: Iterator, +{ + /// Creates a new `ExactlyOneErr` iterator. + pub(crate) fn new(first_two: Option>, inner: I) -> Self { + Self { first_two, inner } + } + + fn additional_len(&self) -> usize { + match self.first_two { + Some(Either::Left(_)) => 2, + Some(Either::Right(_)) => 1, + None => 0, + } + } +} + +impl Iterator for ExactlyOneError +where + I: Iterator, +{ + type Item = I::Item; + + fn next(&mut self) -> Option { + match self.first_two.take() { + Some(Either::Left([first, second])) => { + self.first_two = Some(Either::Right(second)); + Some(first) + } + Some(Either::Right(second)) => Some(second), + None => self.inner.next(), + } + } + + fn size_hint(&self) -> (usize, Option) { + size_hint::add_scalar(self.inner.size_hint(), self.additional_len()) + } + + fn fold(self, mut init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + match self.first_two { + Some(Either::Left([first, second])) => { + init = f(init, first); + init = f(init, second); + } + Some(Either::Right(second)) => init = f(init, second), + None => {} + } + self.inner.fold(init, f) + } +} + +impl ExactSizeIterator for ExactlyOneError where I: ExactSizeIterator {} + +impl Display for ExactlyOneError +where + I: Iterator, +{ + fn fmt(&self, f: &mut Formatter) -> FmtResult { + let additional = self.additional_len(); + if additional > 0 { + write!(f, "got at least 2 elements when exactly one was expected") + } else { + write!(f, "got zero elements when exactly one was expected") + } + } +} + +impl Debug for ExactlyOneError +where + I: Iterator + Debug, + I::Item: Debug, +{ + fn fmt(&self, f: &mut Formatter) -> FmtResult { + let mut dbg = f.debug_struct("ExactlyOneError"); + match &self.first_two { + Some(Either::Left([first, second])) => { + dbg.field("first", first).field("second", second); + } + Some(Either::Right(second)) => { + dbg.field("second", second); + } + None => {} + } + dbg.field("inner", &self.inner).finish() + } +} + +#[cfg(feature = "use_std")] +impl Error for ExactlyOneError +where + I: Iterator + Debug, + I::Item: Debug, +{ +} diff --git a/vendor/itertools/src/extrema_set.rs b/vendor/itertools/src/extrema_set.rs new file mode 100644 index 00000000000000..d24114c6d9ab1f --- /dev/null +++ b/vendor/itertools/src/extrema_set.rs @@ -0,0 +1,50 @@ +#![cfg(feature = "use_alloc")] +use alloc::{vec, vec::Vec}; +use std::cmp::Ordering; + +/// Implementation guts for `min_set`, `min_set_by`, and `min_set_by_key`. +pub fn min_set_impl( + mut it: I, + mut key_for: F, + mut compare: Compare, +) -> Vec +where + I: Iterator, + F: FnMut(&I::Item) -> K, + Compare: FnMut(&I::Item, &I::Item, &K, &K) -> Ordering, +{ + match it.next() { + None => Vec::new(), + Some(element) => { + let mut current_key = key_for(&element); + let mut result = vec![element]; + it.for_each(|element| { + let key = key_for(&element); + match compare(&element, &result[0], &key, ¤t_key) { + Ordering::Less => { + result.clear(); + result.push(element); + current_key = key; + } + Ordering::Equal => { + result.push(element); + } + Ordering::Greater => {} + } + }); + result + } + } +} + +/// Implementation guts for `ax_set`, `max_set_by`, and `max_set_by_key`. +pub fn max_set_impl(it: I, key_for: F, mut compare: Compare) -> Vec +where + I: Iterator, + F: FnMut(&I::Item) -> K, + Compare: FnMut(&I::Item, &I::Item, &K, &K) -> Ordering, +{ + min_set_impl(it, key_for, |it1, it2, key1, key2| { + compare(it2, it1, key2, key1) + }) +} diff --git a/vendor/itertools/src/flatten_ok.rs b/vendor/itertools/src/flatten_ok.rs new file mode 100644 index 00000000000000..48f1e90a647965 --- /dev/null +++ b/vendor/itertools/src/flatten_ok.rs @@ -0,0 +1,205 @@ +use crate::size_hint; +use std::{ + fmt, + iter::{DoubleEndedIterator, FusedIterator}, +}; + +pub fn flatten_ok(iter: I) -> FlattenOk +where + I: Iterator>, + T: IntoIterator, +{ + FlattenOk { + iter, + inner_front: None, + inner_back: None, + } +} + +/// An iterator adaptor that flattens `Result::Ok` values and +/// allows `Result::Err` values through unchanged. +/// +/// See [`.flatten_ok()`](crate::Itertools::flatten_ok) for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct FlattenOk +where + I: Iterator>, + T: IntoIterator, +{ + iter: I, + inner_front: Option, + inner_back: Option, +} + +impl Iterator for FlattenOk +where + I: Iterator>, + T: IntoIterator, +{ + type Item = Result; + + fn next(&mut self) -> Option { + loop { + // Handle the front inner iterator. + if let Some(inner) = &mut self.inner_front { + if let Some(item) = inner.next() { + return Some(Ok(item)); + } + + // This is necessary for the iterator to implement `FusedIterator` + // with only the original iterator being fused. + self.inner_front = None; + } + + match self.iter.next() { + Some(Ok(ok)) => self.inner_front = Some(ok.into_iter()), + Some(Err(e)) => return Some(Err(e)), + None => { + // Handle the back inner iterator. + if let Some(inner) = &mut self.inner_back { + if let Some(item) = inner.next() { + return Some(Ok(item)); + } + + // This is necessary for the iterator to implement `FusedIterator` + // with only the original iterator being fused. + self.inner_back = None; + } else { + return None; + } + } + } + } + } + + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + // Front + let mut acc = match self.inner_front { + Some(x) => x.fold(init, |a, o| f(a, Ok(o))), + None => init, + }; + + acc = self.iter.fold(acc, |acc, x| match x { + Ok(it) => it.into_iter().fold(acc, |a, o| f(a, Ok(o))), + Err(e) => f(acc, Err(e)), + }); + + // Back + match self.inner_back { + Some(x) => x.fold(acc, |a, o| f(a, Ok(o))), + None => acc, + } + } + + fn size_hint(&self) -> (usize, Option) { + let inner_hint = |inner: &Option| { + inner + .as_ref() + .map(Iterator::size_hint) + .unwrap_or((0, Some(0))) + }; + let inner_front = inner_hint(&self.inner_front); + let inner_back = inner_hint(&self.inner_back); + // The outer iterator `Ok` case could be (0, None) as we don't know its size_hint yet. + let outer = match self.iter.size_hint() { + (0, Some(0)) => (0, Some(0)), + _ => (0, None), + }; + + size_hint::add(size_hint::add(inner_front, inner_back), outer) + } +} + +impl DoubleEndedIterator for FlattenOk +where + I: DoubleEndedIterator>, + T: IntoIterator, + T::IntoIter: DoubleEndedIterator, +{ + fn next_back(&mut self) -> Option { + loop { + // Handle the back inner iterator. + if let Some(inner) = &mut self.inner_back { + if let Some(item) = inner.next_back() { + return Some(Ok(item)); + } + + // This is necessary for the iterator to implement `FusedIterator` + // with only the original iterator being fused. + self.inner_back = None; + } + + match self.iter.next_back() { + Some(Ok(ok)) => self.inner_back = Some(ok.into_iter()), + Some(Err(e)) => return Some(Err(e)), + None => { + // Handle the front inner iterator. + if let Some(inner) = &mut self.inner_front { + if let Some(item) = inner.next_back() { + return Some(Ok(item)); + } + + // This is necessary for the iterator to implement `FusedIterator` + // with only the original iterator being fused. + self.inner_front = None; + } else { + return None; + } + } + } + } + } + + fn rfold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + // Back + let mut acc = match self.inner_back { + Some(x) => x.rfold(init, |a, o| f(a, Ok(o))), + None => init, + }; + + acc = self.iter.rfold(acc, |acc, x| match x { + Ok(it) => it.into_iter().rfold(acc, |a, o| f(a, Ok(o))), + Err(e) => f(acc, Err(e)), + }); + + // Front + match self.inner_front { + Some(x) => x.rfold(acc, |a, o| f(a, Ok(o))), + None => acc, + } + } +} + +impl Clone for FlattenOk +where + I: Iterator> + Clone, + T: IntoIterator, + T::IntoIter: Clone, +{ + clone_fields!(iter, inner_front, inner_back); +} + +impl fmt::Debug for FlattenOk +where + I: Iterator> + fmt::Debug, + T: IntoIterator, + T::IntoIter: fmt::Debug, +{ + debug_fmt_fields!(FlattenOk, iter, inner_front, inner_back); +} + +/// Only the iterator being flattened needs to implement [`FusedIterator`]. +impl FusedIterator for FlattenOk +where + I: FusedIterator>, + T: IntoIterator, +{ +} diff --git a/vendor/itertools/src/format.rs b/vendor/itertools/src/format.rs new file mode 100644 index 00000000000000..15cee34d6aad9c --- /dev/null +++ b/vendor/itertools/src/format.rs @@ -0,0 +1,178 @@ +use std::cell::Cell; +use std::fmt; + +/// Format all iterator elements lazily, separated by `sep`. +/// +/// The format value can only be formatted once, after that the iterator is +/// exhausted. +/// +/// See [`.format_with()`](crate::Itertools::format_with) for more information. +pub struct FormatWith<'a, I, F> { + sep: &'a str, + /// `FormatWith` uses interior mutability because `Display::fmt` takes `&self`. + inner: Cell>, +} + +/// Format all iterator elements lazily, separated by `sep`. +/// +/// The format value can only be formatted once, after that the iterator is +/// exhausted. +/// +/// See [`.format()`](crate::Itertools::format) +/// for more information. +pub struct Format<'a, I> { + sep: &'a str, + /// `Format` uses interior mutability because `Display::fmt` takes `&self`. + inner: Cell>, +} + +pub fn new_format(iter: I, separator: &str, f: F) -> FormatWith<'_, I, F> +where + I: Iterator, + F: FnMut(I::Item, &mut dyn FnMut(&dyn fmt::Display) -> fmt::Result) -> fmt::Result, +{ + FormatWith { + sep: separator, + inner: Cell::new(Some((iter, f))), + } +} + +pub fn new_format_default(iter: I, separator: &str) -> Format<'_, I> +where + I: Iterator, +{ + Format { + sep: separator, + inner: Cell::new(Some(iter)), + } +} + +impl<'a, I, F> fmt::Display for FormatWith<'a, I, F> +where + I: Iterator, + F: FnMut(I::Item, &mut dyn FnMut(&dyn fmt::Display) -> fmt::Result) -> fmt::Result, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let (mut iter, mut format) = match self.inner.take() { + Some(t) => t, + None => panic!("FormatWith: was already formatted once"), + }; + + if let Some(fst) = iter.next() { + format(fst, &mut |disp: &dyn fmt::Display| disp.fmt(f))?; + iter.try_for_each(|elt| { + if !self.sep.is_empty() { + f.write_str(self.sep)?; + } + format(elt, &mut |disp: &dyn fmt::Display| disp.fmt(f)) + })?; + } + Ok(()) + } +} + +impl<'a, I, F> fmt::Debug for FormatWith<'a, I, F> +where + I: Iterator, + F: FnMut(I::Item, &mut dyn FnMut(&dyn fmt::Display) -> fmt::Result) -> fmt::Result, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl<'a, I> Format<'a, I> +where + I: Iterator, +{ + fn format( + &self, + f: &mut fmt::Formatter, + cb: fn(&I::Item, &mut fmt::Formatter) -> fmt::Result, + ) -> fmt::Result { + let mut iter = match self.inner.take() { + Some(t) => t, + None => panic!("Format: was already formatted once"), + }; + + if let Some(fst) = iter.next() { + cb(&fst, f)?; + iter.try_for_each(|elt| { + if !self.sep.is_empty() { + f.write_str(self.sep)?; + } + cb(&elt, f) + })?; + } + Ok(()) + } +} + +macro_rules! impl_format { + ($($fmt_trait:ident)*) => { + $( + impl<'a, I> fmt::$fmt_trait for Format<'a, I> + where I: Iterator, + I::Item: fmt::$fmt_trait, + { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.format(f, fmt::$fmt_trait::fmt) + } + } + )* + } +} + +impl_format! {Display Debug UpperExp LowerExp UpperHex LowerHex Octal Binary Pointer} + +impl<'a, I, F> Clone for FormatWith<'a, I, F> +where + (I, F): Clone, +{ + fn clone(&self) -> Self { + struct PutBackOnDrop<'r, 'a, I, F> { + into: &'r FormatWith<'a, I, F>, + inner: Option<(I, F)>, + } + // This ensures we preserve the state of the original `FormatWith` if `Clone` panics + impl<'r, 'a, I, F> Drop for PutBackOnDrop<'r, 'a, I, F> { + fn drop(&mut self) { + self.into.inner.set(self.inner.take()) + } + } + let pbod = PutBackOnDrop { + inner: self.inner.take(), + into: self, + }; + Self { + inner: Cell::new(pbod.inner.clone()), + sep: self.sep, + } + } +} + +impl<'a, I> Clone for Format<'a, I> +where + I: Clone, +{ + fn clone(&self) -> Self { + struct PutBackOnDrop<'r, 'a, I> { + into: &'r Format<'a, I>, + inner: Option, + } + // This ensures we preserve the state of the original `FormatWith` if `Clone` panics + impl<'r, 'a, I> Drop for PutBackOnDrop<'r, 'a, I> { + fn drop(&mut self) { + self.into.inner.set(self.inner.take()) + } + } + let pbod = PutBackOnDrop { + inner: self.inner.take(), + into: self, + }; + Self { + inner: Cell::new(pbod.inner.clone()), + sep: self.sep, + } + } +} diff --git a/vendor/itertools/src/free.rs b/vendor/itertools/src/free.rs new file mode 100644 index 00000000000000..8d0bcf3ea966e0 --- /dev/null +++ b/vendor/itertools/src/free.rs @@ -0,0 +1,317 @@ +//! Free functions that create iterator adaptors or call iterator methods. +//! +//! The benefit of free functions is that they accept any [`IntoIterator`] as +//! argument, so the resulting code may be easier to read. + +#[cfg(feature = "use_alloc")] +use std::fmt::Display; +use std::iter::{self, Zip}; +#[cfg(feature = "use_alloc")] +type VecIntoIter = alloc::vec::IntoIter; + +#[cfg(feature = "use_alloc")] +use alloc::string::String; + +use crate::intersperse::{Intersperse, IntersperseWith}; +use crate::Itertools; + +pub use crate::adaptors::{interleave, put_back}; +#[cfg(feature = "use_alloc")] +pub use crate::kmerge_impl::kmerge; +pub use crate::merge_join::{merge, merge_join_by}; +#[cfg(feature = "use_alloc")] +pub use crate::multipeek_impl::multipeek; +#[cfg(feature = "use_alloc")] +pub use crate::peek_nth::peek_nth; +#[cfg(feature = "use_alloc")] +pub use crate::put_back_n_impl::put_back_n; +#[cfg(feature = "use_alloc")] +pub use crate::rciter_impl::rciter; +pub use crate::zip_eq_impl::zip_eq; + +/// Iterate `iterable` with a particular value inserted between each element. +/// +/// [`IntoIterator`] enabled version of [`Iterator::intersperse`]. +/// +/// ``` +/// use itertools::intersperse; +/// +/// itertools::assert_equal(intersperse((0..3), 8), vec![0, 8, 1, 8, 2]); +/// ``` +pub fn intersperse(iterable: I, element: I::Item) -> Intersperse +where + I: IntoIterator, + ::Item: Clone, +{ + Itertools::intersperse(iterable.into_iter(), element) +} + +/// Iterate `iterable` with a particular value created by a function inserted +/// between each element. +/// +/// [`IntoIterator`] enabled version of [`Iterator::intersperse_with`]. +/// +/// ``` +/// use itertools::intersperse_with; +/// +/// let mut i = 10; +/// itertools::assert_equal(intersperse_with((0..3), || { i -= 1; i }), vec![0, 9, 1, 8, 2]); +/// assert_eq!(i, 8); +/// ``` +pub fn intersperse_with(iterable: I, element: F) -> IntersperseWith +where + I: IntoIterator, + F: FnMut() -> I::Item, +{ + Itertools::intersperse_with(iterable.into_iter(), element) +} + +/// Iterate `iterable` with a running index. +/// +/// [`IntoIterator`] enabled version of [`Iterator::enumerate`]. +/// +/// ``` +/// use itertools::enumerate; +/// +/// for (i, elt) in enumerate(&[1, 2, 3]) { +/// /* loop body */ +/// } +/// ``` +pub fn enumerate(iterable: I) -> iter::Enumerate +where + I: IntoIterator, +{ + iterable.into_iter().enumerate() +} + +/// Iterate `iterable` in reverse. +/// +/// [`IntoIterator`] enabled version of [`Iterator::rev`]. +/// +/// ``` +/// use itertools::rev; +/// +/// for elt in rev(&[1, 2, 3]) { +/// /* loop body */ +/// } +/// ``` +pub fn rev(iterable: I) -> iter::Rev +where + I: IntoIterator, + I::IntoIter: DoubleEndedIterator, +{ + iterable.into_iter().rev() +} + +/// Converts the arguments to iterators and zips them. +/// +/// [`IntoIterator`] enabled version of [`Iterator::zip`]. +/// +/// ## Example +/// +/// ``` +/// use itertools::zip; +/// +/// let mut result: Vec<(i32, char)> = Vec::new(); +/// +/// for (a, b) in zip(&[1, 2, 3, 4, 5], &['a', 'b', 'c']) { +/// result.push((*a, *b)); +/// } +/// assert_eq!(result, vec![(1, 'a'),(2, 'b'),(3, 'c')]); +/// ``` +#[deprecated( + note = "Use [std::iter::zip](https://doc.rust-lang.org/std/iter/fn.zip.html) instead", + since = "0.10.4" +)] +pub fn zip(i: I, j: J) -> Zip +where + I: IntoIterator, + J: IntoIterator, +{ + i.into_iter().zip(j) +} + +/// Takes two iterables and creates a new iterator over both in sequence. +/// +/// [`IntoIterator`] enabled version of [`Iterator::chain`]. +/// +/// ## Example +/// ``` +/// use itertools::chain; +/// +/// let mut result:Vec = Vec::new(); +/// +/// for element in chain(&[1, 2, 3], &[4]) { +/// result.push(*element); +/// } +/// assert_eq!(result, vec![1, 2, 3, 4]); +/// ``` +pub fn chain( + i: I, + j: J, +) -> iter::Chain<::IntoIter, ::IntoIter> +where + I: IntoIterator, + J: IntoIterator, +{ + i.into_iter().chain(j) +} + +/// Create an iterator that clones each element from `&T` to `T`. +/// +/// [`IntoIterator`] enabled version of [`Iterator::cloned`]. +/// +/// ``` +/// use itertools::cloned; +/// +/// assert_eq!(cloned(b"abc").next(), Some(b'a')); +/// ``` +pub fn cloned<'a, I, T>(iterable: I) -> iter::Cloned +where + I: IntoIterator, + T: Clone + 'a, +{ + iterable.into_iter().cloned() +} + +/// Perform a fold operation over the iterable. +/// +/// [`IntoIterator`] enabled version of [`Iterator::fold`]. +/// +/// ``` +/// use itertools::fold; +/// +/// assert_eq!(fold(&[1., 2., 3.], 0., |a, &b| f32::max(a, b)), 3.); +/// ``` +pub fn fold(iterable: I, init: B, f: F) -> B +where + I: IntoIterator, + F: FnMut(B, I::Item) -> B, +{ + iterable.into_iter().fold(init, f) +} + +/// Test whether the predicate holds for all elements in the iterable. +/// +/// [`IntoIterator`] enabled version of [`Iterator::all`]. +/// +/// ``` +/// use itertools::all; +/// +/// assert!(all(&[1, 2, 3], |elt| *elt > 0)); +/// ``` +pub fn all(iterable: I, f: F) -> bool +where + I: IntoIterator, + F: FnMut(I::Item) -> bool, +{ + iterable.into_iter().all(f) +} + +/// Test whether the predicate holds for any elements in the iterable. +/// +/// [`IntoIterator`] enabled version of [`Iterator::any`]. +/// +/// ``` +/// use itertools::any; +/// +/// assert!(any(&[0, -1, 2], |elt| *elt > 0)); +/// ``` +pub fn any(iterable: I, f: F) -> bool +where + I: IntoIterator, + F: FnMut(I::Item) -> bool, +{ + iterable.into_iter().any(f) +} + +/// Return the maximum value of the iterable. +/// +/// [`IntoIterator`] enabled version of [`Iterator::max`]. +/// +/// ``` +/// use itertools::max; +/// +/// assert_eq!(max(0..10), Some(9)); +/// ``` +pub fn max(iterable: I) -> Option +where + I: IntoIterator, + I::Item: Ord, +{ + iterable.into_iter().max() +} + +/// Return the minimum value of the iterable. +/// +/// [`IntoIterator`] enabled version of [`Iterator::min`]. +/// +/// ``` +/// use itertools::min; +/// +/// assert_eq!(min(0..10), Some(0)); +/// ``` +pub fn min(iterable: I) -> Option +where + I: IntoIterator, + I::Item: Ord, +{ + iterable.into_iter().min() +} + +/// Combine all iterator elements into one `String`, separated by `sep`. +/// +/// [`IntoIterator`] enabled version of [`Itertools::join`]. +/// +/// ``` +/// use itertools::join; +/// +/// assert_eq!(join(&[1, 2, 3], ", "), "1, 2, 3"); +/// ``` +#[cfg(feature = "use_alloc")] +pub fn join(iterable: I, sep: &str) -> String +where + I: IntoIterator, + I::Item: Display, +{ + iterable.into_iter().join(sep) +} + +/// Sort all iterator elements into a new iterator in ascending order. +/// +/// [`IntoIterator`] enabled version of [`Itertools::sorted`]. +/// +/// ``` +/// use itertools::sorted; +/// use itertools::assert_equal; +/// +/// assert_equal(sorted("rust".chars()), "rstu".chars()); +/// ``` +#[cfg(feature = "use_alloc")] +pub fn sorted(iterable: I) -> VecIntoIter +where + I: IntoIterator, + I::Item: Ord, +{ + iterable.into_iter().sorted() +} + +/// Sort all iterator elements into a new iterator in ascending order. +/// This sort is unstable (i.e., may reorder equal elements). +/// +/// [`IntoIterator`] enabled version of [`Itertools::sorted_unstable`]. +/// +/// ``` +/// use itertools::sorted_unstable; +/// use itertools::assert_equal; +/// +/// assert_equal(sorted_unstable("rust".chars()), "rstu".chars()); +/// ``` +#[cfg(feature = "use_alloc")] +pub fn sorted_unstable(iterable: I) -> VecIntoIter +where + I: IntoIterator, + I::Item: Ord, +{ + iterable.into_iter().sorted_unstable() +} diff --git a/vendor/itertools/src/group_map.rs b/vendor/itertools/src/group_map.rs new file mode 100644 index 00000000000000..3dcee83afd00b3 --- /dev/null +++ b/vendor/itertools/src/group_map.rs @@ -0,0 +1,32 @@ +#![cfg(feature = "use_std")] + +use std::collections::HashMap; +use std::hash::Hash; +use std::iter::Iterator; + +/// Return a `HashMap` of keys mapped to a list of their corresponding values. +/// +/// See [`.into_group_map()`](crate::Itertools::into_group_map) +/// for more information. +pub fn into_group_map(iter: I) -> HashMap> +where + I: Iterator, + K: Hash + Eq, +{ + let mut lookup = HashMap::new(); + + iter.for_each(|(key, val)| { + lookup.entry(key).or_insert_with(Vec::new).push(val); + }); + + lookup +} + +pub fn into_group_map_by(iter: I, mut f: F) -> HashMap> +where + I: Iterator, + K: Hash + Eq, + F: FnMut(&V) -> K, +{ + into_group_map(iter.map(|v| (f(&v), v))) +} diff --git a/vendor/itertools/src/groupbylazy.rs b/vendor/itertools/src/groupbylazy.rs new file mode 100644 index 00000000000000..5847c8f7d1f770 --- /dev/null +++ b/vendor/itertools/src/groupbylazy.rs @@ -0,0 +1,613 @@ +use alloc::vec::{self, Vec}; +use std::cell::{Cell, RefCell}; + +/// A trait to unify `FnMut` for `ChunkBy` with the chunk key in `IntoChunks` +trait KeyFunction { + type Key; + fn call_mut(&mut self, arg: A) -> Self::Key; +} + +impl KeyFunction for F +where + F: FnMut(A) -> K + ?Sized, +{ + type Key = K; + #[inline] + fn call_mut(&mut self, arg: A) -> Self::Key { + (*self)(arg) + } +} + +/// `ChunkIndex` acts like the grouping key function for `IntoChunks` +#[derive(Debug, Clone)] +struct ChunkIndex { + size: usize, + index: usize, + key: usize, +} + +impl ChunkIndex { + #[inline(always)] + fn new(size: usize) -> Self { + Self { + size, + index: 0, + key: 0, + } + } +} + +impl KeyFunction for ChunkIndex { + type Key = usize; + #[inline(always)] + fn call_mut(&mut self, _arg: A) -> Self::Key { + if self.index == self.size { + self.key += 1; + self.index = 0; + } + self.index += 1; + self.key + } +} + +#[derive(Clone)] +struct GroupInner +where + I: Iterator, +{ + key: F, + iter: I, + current_key: Option, + current_elt: Option, + /// flag set if iterator is exhausted + done: bool, + /// Index of group we are currently buffering or visiting + top_group: usize, + /// Least index for which we still have elements buffered + oldest_buffered_group: usize, + /// Group index for `buffer[0]` -- the slots + /// `bottom_group..oldest_buffered_group` are unused and will be erased when + /// that range is large enough. + bottom_group: usize, + /// Buffered groups, from `bottom_group` (index 0) to `top_group`. + buffer: Vec>, + /// index of last group iter that was dropped, + /// `usize::MAX` initially when no group was dropped + dropped_group: usize, +} + +impl GroupInner +where + I: Iterator, + F: for<'a> KeyFunction<&'a I::Item, Key = K>, + K: PartialEq, +{ + /// `client`: Index of group that requests next element + #[inline(always)] + fn step(&mut self, client: usize) -> Option { + /* + println!("client={}, bottom_group={}, oldest_buffered_group={}, top_group={}, buffers=[{}]", + client, self.bottom_group, self.oldest_buffered_group, + self.top_group, + self.buffer.iter().map(|elt| elt.len()).format(", ")); + */ + if client < self.oldest_buffered_group { + None + } else if client < self.top_group + || (client == self.top_group && self.buffer.len() > self.top_group - self.bottom_group) + { + self.lookup_buffer(client) + } else if self.done { + None + } else if self.top_group == client { + self.step_current() + } else { + self.step_buffering(client) + } + } + + #[inline(never)] + fn lookup_buffer(&mut self, client: usize) -> Option { + // if `bufidx` doesn't exist in self.buffer, it might be empty + let bufidx = client - self.bottom_group; + if client < self.oldest_buffered_group { + return None; + } + let elt = self.buffer.get_mut(bufidx).and_then(|queue| queue.next()); + if elt.is_none() && client == self.oldest_buffered_group { + // FIXME: VecDeque is unfortunately not zero allocation when empty, + // so we do this job manually. + // `bottom_group..oldest_buffered_group` is unused, and if it's large enough, erase it. + self.oldest_buffered_group += 1; + // skip forward further empty queues too + while self + .buffer + .get(self.oldest_buffered_group - self.bottom_group) + .map_or(false, |buf| buf.len() == 0) + { + self.oldest_buffered_group += 1; + } + + let nclear = self.oldest_buffered_group - self.bottom_group; + if nclear > 0 && nclear >= self.buffer.len() / 2 { + let mut i = 0; + self.buffer.retain(|buf| { + i += 1; + debug_assert!(buf.len() == 0 || i > nclear); + i > nclear + }); + self.bottom_group = self.oldest_buffered_group; + } + } + elt + } + + /// Take the next element from the iterator, and set the done + /// flag if exhausted. Must not be called after done. + #[inline(always)] + fn next_element(&mut self) -> Option { + debug_assert!(!self.done); + match self.iter.next() { + None => { + self.done = true; + None + } + otherwise => otherwise, + } + } + + #[inline(never)] + fn step_buffering(&mut self, client: usize) -> Option { + // requested a later group -- walk through the current group up to + // the requested group index, and buffer the elements (unless + // the group is marked as dropped). + // Because the `Groups` iterator is always the first to request + // each group index, client is the next index efter top_group. + debug_assert!(self.top_group + 1 == client); + let mut group = Vec::new(); + + if let Some(elt) = self.current_elt.take() { + if self.top_group != self.dropped_group { + group.push(elt); + } + } + let mut first_elt = None; // first element of the next group + + while let Some(elt) = self.next_element() { + let key = self.key.call_mut(&elt); + match self.current_key.take() { + None => {} + Some(old_key) => { + if old_key != key { + self.current_key = Some(key); + first_elt = Some(elt); + break; + } + } + } + self.current_key = Some(key); + if self.top_group != self.dropped_group { + group.push(elt); + } + } + + if self.top_group != self.dropped_group { + self.push_next_group(group); + } + if first_elt.is_some() { + self.top_group += 1; + debug_assert!(self.top_group == client); + } + first_elt + } + + fn push_next_group(&mut self, group: Vec) { + // When we add a new buffered group, fill up slots between oldest_buffered_group and top_group + while self.top_group - self.bottom_group > self.buffer.len() { + if self.buffer.is_empty() { + self.bottom_group += 1; + self.oldest_buffered_group += 1; + } else { + self.buffer.push(Vec::new().into_iter()); + } + } + self.buffer.push(group.into_iter()); + debug_assert!(self.top_group + 1 - self.bottom_group == self.buffer.len()); + } + + /// This is the immediate case, where we use no buffering + #[inline] + fn step_current(&mut self) -> Option { + debug_assert!(!self.done); + if let elt @ Some(..) = self.current_elt.take() { + return elt; + } + match self.next_element() { + None => None, + Some(elt) => { + let key = self.key.call_mut(&elt); + match self.current_key.take() { + None => {} + Some(old_key) => { + if old_key != key { + self.current_key = Some(key); + self.current_elt = Some(elt); + self.top_group += 1; + return None; + } + } + } + self.current_key = Some(key); + Some(elt) + } + } + } + + /// Request the just started groups' key. + /// + /// `client`: Index of group + /// + /// **Panics** if no group key is available. + fn group_key(&mut self, client: usize) -> K { + // This can only be called after we have just returned the first + // element of a group. + // Perform this by simply buffering one more element, grabbing the + // next key. + debug_assert!(!self.done); + debug_assert!(client == self.top_group); + debug_assert!(self.current_key.is_some()); + debug_assert!(self.current_elt.is_none()); + let old_key = self.current_key.take().unwrap(); + if let Some(elt) = self.next_element() { + let key = self.key.call_mut(&elt); + if old_key != key { + self.top_group += 1; + } + self.current_key = Some(key); + self.current_elt = Some(elt); + } + old_key + } +} + +impl GroupInner +where + I: Iterator, +{ + /// Called when a group is dropped + fn drop_group(&mut self, client: usize) { + // It's only useful to track the maximal index + if self.dropped_group == !0 || client > self.dropped_group { + self.dropped_group = client; + } + } +} + +#[deprecated(note = "Use `ChunkBy` instead", since = "0.13.0")] +/// See [`ChunkBy`](crate::structs::ChunkBy). +pub type GroupBy = ChunkBy; + +/// `ChunkBy` is the storage for the lazy grouping operation. +/// +/// If the groups are consumed in their original order, or if each +/// group is dropped without keeping it around, then `ChunkBy` uses +/// no allocations. It needs allocations only if several group iterators +/// are alive at the same time. +/// +/// This type implements [`IntoIterator`] (it is **not** an iterator +/// itself), because the group iterators need to borrow from this +/// value. It should be stored in a local variable or temporary and +/// iterated. +/// +/// See [`.chunk_by()`](crate::Itertools::chunk_by) for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct ChunkBy +where + I: Iterator, +{ + inner: RefCell>, + // the group iterator's current index. Keep this in the main value + // so that simultaneous iterators all use the same state. + index: Cell, +} + +/// Create a new +pub fn new(iter: J, f: F) -> ChunkBy +where + J: IntoIterator, + F: FnMut(&J::Item) -> K, +{ + ChunkBy { + inner: RefCell::new(GroupInner { + key: f, + iter: iter.into_iter(), + current_key: None, + current_elt: None, + done: false, + top_group: 0, + oldest_buffered_group: 0, + bottom_group: 0, + buffer: Vec::new(), + dropped_group: !0, + }), + index: Cell::new(0), + } +} + +impl ChunkBy +where + I: Iterator, +{ + /// `client`: Index of group that requests next element + fn step(&self, client: usize) -> Option + where + F: FnMut(&I::Item) -> K, + K: PartialEq, + { + self.inner.borrow_mut().step(client) + } + + /// `client`: Index of group + fn drop_group(&self, client: usize) { + self.inner.borrow_mut().drop_group(client); + } +} + +impl<'a, K, I, F> IntoIterator for &'a ChunkBy +where + I: Iterator, + I::Item: 'a, + F: FnMut(&I::Item) -> K, + K: PartialEq, +{ + type Item = (K, Group<'a, K, I, F>); + type IntoIter = Groups<'a, K, I, F>; + + fn into_iter(self) -> Self::IntoIter { + Groups { parent: self } + } +} + +/// An iterator that yields the Group iterators. +/// +/// Iterator element type is `(K, Group)`: +/// the group's key `K` and the group's iterator. +/// +/// See [`.chunk_by()`](crate::Itertools::chunk_by) for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct Groups<'a, K, I, F> +where + I: Iterator + 'a, + I::Item: 'a, + K: 'a, + F: 'a, +{ + parent: &'a ChunkBy, +} + +impl<'a, K, I, F> Iterator for Groups<'a, K, I, F> +where + I: Iterator, + I::Item: 'a, + F: FnMut(&I::Item) -> K, + K: PartialEq, +{ + type Item = (K, Group<'a, K, I, F>); + + #[inline] + fn next(&mut self) -> Option { + let index = self.parent.index.get(); + self.parent.index.set(index + 1); + let inner = &mut *self.parent.inner.borrow_mut(); + inner.step(index).map(|elt| { + let key = inner.group_key(index); + ( + key, + Group { + parent: self.parent, + index, + first: Some(elt), + }, + ) + }) + } +} + +/// An iterator for the elements in a single group. +/// +/// Iterator element type is `I::Item`. +pub struct Group<'a, K, I, F> +where + I: Iterator + 'a, + I::Item: 'a, + K: 'a, + F: 'a, +{ + parent: &'a ChunkBy, + index: usize, + first: Option, +} + +impl<'a, K, I, F> Drop for Group<'a, K, I, F> +where + I: Iterator, + I::Item: 'a, +{ + fn drop(&mut self) { + self.parent.drop_group(self.index); + } +} + +impl<'a, K, I, F> Iterator for Group<'a, K, I, F> +where + I: Iterator, + I::Item: 'a, + F: FnMut(&I::Item) -> K, + K: PartialEq, +{ + type Item = I::Item; + #[inline] + fn next(&mut self) -> Option { + if let elt @ Some(..) = self.first.take() { + return elt; + } + self.parent.step(self.index) + } +} + +///// IntoChunks ///// + +/// Create a new +pub fn new_chunks(iter: J, size: usize) -> IntoChunks +where + J: IntoIterator, +{ + IntoChunks { + inner: RefCell::new(GroupInner { + key: ChunkIndex::new(size), + iter: iter.into_iter(), + current_key: None, + current_elt: None, + done: false, + top_group: 0, + oldest_buffered_group: 0, + bottom_group: 0, + buffer: Vec::new(), + dropped_group: !0, + }), + index: Cell::new(0), + } +} + +/// `ChunkLazy` is the storage for a lazy chunking operation. +/// +/// `IntoChunks` behaves just like `ChunkBy`: it is iterable, and +/// it only buffers if several chunk iterators are alive at the same time. +/// +/// This type implements [`IntoIterator`] (it is **not** an iterator +/// itself), because the chunk iterators need to borrow from this +/// value. It should be stored in a local variable or temporary and +/// iterated. +/// +/// Iterator element type is `Chunk`, each chunk's iterator. +/// +/// See [`.chunks()`](crate::Itertools::chunks) for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct IntoChunks +where + I: Iterator, +{ + inner: RefCell>, + // the chunk iterator's current index. Keep this in the main value + // so that simultaneous iterators all use the same state. + index: Cell, +} + +impl Clone for IntoChunks +where + I: Clone + Iterator, + I::Item: Clone, +{ + clone_fields!(inner, index); +} + +impl IntoChunks +where + I: Iterator, +{ + /// `client`: Index of chunk that requests next element + fn step(&self, client: usize) -> Option { + self.inner.borrow_mut().step(client) + } + + /// `client`: Index of chunk + fn drop_group(&self, client: usize) { + self.inner.borrow_mut().drop_group(client); + } +} + +impl<'a, I> IntoIterator for &'a IntoChunks +where + I: Iterator, + I::Item: 'a, +{ + type Item = Chunk<'a, I>; + type IntoIter = Chunks<'a, I>; + + fn into_iter(self) -> Self::IntoIter { + Chunks { parent: self } + } +} + +/// An iterator that yields the Chunk iterators. +/// +/// Iterator element type is `Chunk`. +/// +/// See [`.chunks()`](crate::Itertools::chunks) for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[derive(Clone)] +pub struct Chunks<'a, I> +where + I: Iterator + 'a, + I::Item: 'a, +{ + parent: &'a IntoChunks, +} + +impl<'a, I> Iterator for Chunks<'a, I> +where + I: Iterator, + I::Item: 'a, +{ + type Item = Chunk<'a, I>; + + #[inline] + fn next(&mut self) -> Option { + let index = self.parent.index.get(); + self.parent.index.set(index + 1); + let inner = &mut *self.parent.inner.borrow_mut(); + inner.step(index).map(|elt| Chunk { + parent: self.parent, + index, + first: Some(elt), + }) + } +} + +/// An iterator for the elements in a single chunk. +/// +/// Iterator element type is `I::Item`. +pub struct Chunk<'a, I> +where + I: Iterator + 'a, + I::Item: 'a, +{ + parent: &'a IntoChunks, + index: usize, + first: Option, +} + +impl<'a, I> Drop for Chunk<'a, I> +where + I: Iterator, + I::Item: 'a, +{ + fn drop(&mut self) { + self.parent.drop_group(self.index); + } +} + +impl<'a, I> Iterator for Chunk<'a, I> +where + I: Iterator, + I::Item: 'a, +{ + type Item = I::Item; + #[inline] + fn next(&mut self) -> Option { + if let elt @ Some(..) = self.first.take() { + return elt; + } + self.parent.step(self.index) + } +} diff --git a/vendor/itertools/src/grouping_map.rs b/vendor/itertools/src/grouping_map.rs new file mode 100644 index 00000000000000..b4aae9ecf1ba32 --- /dev/null +++ b/vendor/itertools/src/grouping_map.rs @@ -0,0 +1,614 @@ +#![cfg(feature = "use_std")] + +use crate::{ + adaptors::map::{MapSpecialCase, MapSpecialCaseFn}, + MinMaxResult, +}; +use std::cmp::Ordering; +use std::collections::HashMap; +use std::hash::Hash; +use std::iter::Iterator; +use std::ops::{Add, Mul}; + +/// A wrapper to allow for an easy [`into_grouping_map_by`](crate::Itertools::into_grouping_map_by) +pub type MapForGrouping = MapSpecialCase>; + +#[derive(Clone)] +pub struct GroupingMapFn(F); + +impl std::fmt::Debug for GroupingMapFn { + debug_fmt_fields!(GroupingMapFn,); +} + +impl K> MapSpecialCaseFn for GroupingMapFn { + type Out = (K, V); + fn call(&mut self, v: V) -> Self::Out { + ((self.0)(&v), v) + } +} + +pub(crate) fn new_map_for_grouping K>( + iter: I, + key_mapper: F, +) -> MapForGrouping { + MapSpecialCase { + iter, + f: GroupingMapFn(key_mapper), + } +} + +/// Creates a new `GroupingMap` from `iter` +pub fn new(iter: I) -> GroupingMap +where + I: Iterator, + K: Hash + Eq, +{ + GroupingMap { iter } +} + +/// `GroupingMapBy` is an intermediate struct for efficient group-and-fold operations. +/// +/// See [`GroupingMap`] for more informations. +pub type GroupingMapBy = GroupingMap>; + +/// `GroupingMap` is an intermediate struct for efficient group-and-fold operations. +/// It groups elements by their key and at the same time fold each group +/// using some aggregating operation. +/// +/// No method on this struct performs temporary allocations. +#[derive(Clone, Debug)] +#[must_use = "GroupingMap is lazy and do nothing unless consumed"] +pub struct GroupingMap { + iter: I, +} + +impl GroupingMap +where + I: Iterator, + K: Hash + Eq, +{ + /// This is the generic way to perform any operation on a `GroupingMap`. + /// It's suggested to use this method only to implement custom operations + /// when the already provided ones are not enough. + /// + /// Groups elements from the `GroupingMap` source by key and applies `operation` to the elements + /// of each group sequentially, passing the previously accumulated value, a reference to the key + /// and the current element as arguments, and stores the results in an `HashMap`. + /// + /// The `operation` function is invoked on each element with the following parameters: + /// - the current value of the accumulator of the group if there is currently one; + /// - a reference to the key of the group this element belongs to; + /// - the element from the source being aggregated; + /// + /// If `operation` returns `Some(element)` then the accumulator is updated with `element`, + /// otherwise the previous accumulation is discarded. + /// + /// Return a `HashMap` associating the key of each group with the result of aggregation of + /// that group's elements. If the aggregation of the last element of a group discards the + /// accumulator then there won't be an entry associated to that group's key. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = vec![2, 8, 5, 7, 9, 0, 4, 10]; + /// let lookup = data.into_iter() + /// .into_grouping_map_by(|&n| n % 4) + /// .aggregate(|acc, _key, val| { + /// if val == 0 || val == 10 { + /// None + /// } else { + /// Some(acc.unwrap_or(0) + val) + /// } + /// }); + /// + /// assert_eq!(lookup[&0], 4); // 0 resets the accumulator so only 4 is summed + /// assert_eq!(lookup[&1], 5 + 9); + /// assert_eq!(lookup.get(&2), None); // 10 resets the accumulator and nothing is summed afterward + /// assert_eq!(lookup[&3], 7); + /// assert_eq!(lookup.len(), 3); // The final keys are only 0, 1 and 2 + /// ``` + pub fn aggregate(self, mut operation: FO) -> HashMap + where + FO: FnMut(Option, &K, V) -> Option, + { + let mut destination_map = HashMap::new(); + + self.iter.for_each(|(key, val)| { + let acc = destination_map.remove(&key); + if let Some(op_res) = operation(acc, &key, val) { + destination_map.insert(key, op_res); + } + }); + + destination_map + } + + /// Groups elements from the `GroupingMap` source by key and applies `operation` to the elements + /// of each group sequentially, passing the previously accumulated value, a reference to the key + /// and the current element as arguments, and stores the results in a new map. + /// + /// `init` is called to obtain the initial value of each accumulator. + /// + /// `operation` is a function that is invoked on each element with the following parameters: + /// - the current value of the accumulator of the group; + /// - a reference to the key of the group this element belongs to; + /// - the element from the source being accumulated. + /// + /// Return a `HashMap` associating the key of each group with the result of folding that group's elements. + /// + /// ``` + /// use itertools::Itertools; + /// + /// #[derive(Debug, Default)] + /// struct Accumulator { + /// acc: usize, + /// } + /// + /// let lookup = (1..=7) + /// .into_grouping_map_by(|&n| n % 3) + /// .fold_with(|_key, _val| Default::default(), |Accumulator { acc }, _key, val| { + /// let acc = acc + val; + /// Accumulator { acc } + /// }); + /// + /// assert_eq!(lookup[&0].acc, 3 + 6); + /// assert_eq!(lookup[&1].acc, 1 + 4 + 7); + /// assert_eq!(lookup[&2].acc, 2 + 5); + /// assert_eq!(lookup.len(), 3); + /// ``` + pub fn fold_with(self, mut init: FI, mut operation: FO) -> HashMap + where + FI: FnMut(&K, &V) -> R, + FO: FnMut(R, &K, V) -> R, + { + self.aggregate(|acc, key, val| { + let acc = acc.unwrap_or_else(|| init(key, &val)); + Some(operation(acc, key, val)) + }) + } + + /// Groups elements from the `GroupingMap` source by key and applies `operation` to the elements + /// of each group sequentially, passing the previously accumulated value, a reference to the key + /// and the current element as arguments, and stores the results in a new map. + /// + /// `init` is the value from which will be cloned the initial value of each accumulator. + /// + /// `operation` is a function that is invoked on each element with the following parameters: + /// - the current value of the accumulator of the group; + /// - a reference to the key of the group this element belongs to; + /// - the element from the source being accumulated. + /// + /// Return a `HashMap` associating the key of each group with the result of folding that group's elements. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let lookup = (1..=7) + /// .into_grouping_map_by(|&n| n % 3) + /// .fold(0, |acc, _key, val| acc + val); + /// + /// assert_eq!(lookup[&0], 3 + 6); + /// assert_eq!(lookup[&1], 1 + 4 + 7); + /// assert_eq!(lookup[&2], 2 + 5); + /// assert_eq!(lookup.len(), 3); + /// ``` + pub fn fold(self, init: R, operation: FO) -> HashMap + where + R: Clone, + FO: FnMut(R, &K, V) -> R, + { + self.fold_with(|_, _| init.clone(), operation) + } + + /// Groups elements from the `GroupingMap` source by key and applies `operation` to the elements + /// of each group sequentially, passing the previously accumulated value, a reference to the key + /// and the current element as arguments, and stores the results in a new map. + /// + /// This is similar to [`fold`] but the initial value of the accumulator is the first element of the group. + /// + /// `operation` is a function that is invoked on each element with the following parameters: + /// - the current value of the accumulator of the group; + /// - a reference to the key of the group this element belongs to; + /// - the element from the source being accumulated. + /// + /// Return a `HashMap` associating the key of each group with the result of folding that group's elements. + /// + /// [`fold`]: GroupingMap::fold + /// + /// ``` + /// use itertools::Itertools; + /// + /// let lookup = (1..=7) + /// .into_grouping_map_by(|&n| n % 3) + /// .reduce(|acc, _key, val| acc + val); + /// + /// assert_eq!(lookup[&0], 3 + 6); + /// assert_eq!(lookup[&1], 1 + 4 + 7); + /// assert_eq!(lookup[&2], 2 + 5); + /// assert_eq!(lookup.len(), 3); + /// ``` + pub fn reduce(self, mut operation: FO) -> HashMap + where + FO: FnMut(V, &K, V) -> V, + { + self.aggregate(|acc, key, val| { + Some(match acc { + Some(acc) => operation(acc, key, val), + None => val, + }) + }) + } + + /// See [`.reduce()`](GroupingMap::reduce). + #[deprecated(note = "Use .reduce() instead", since = "0.13.0")] + pub fn fold_first(self, operation: FO) -> HashMap + where + FO: FnMut(V, &K, V) -> V, + { + self.reduce(operation) + } + + /// Groups elements from the `GroupingMap` source by key and collects the elements of each group in + /// an instance of `C`. The iteration order is preserved when inserting elements. + /// + /// Return a `HashMap` associating the key of each group with the collection containing that group's elements. + /// + /// ``` + /// use itertools::Itertools; + /// use std::collections::HashSet; + /// + /// let lookup = vec![0, 1, 2, 3, 4, 5, 6, 2, 3, 6].into_iter() + /// .into_grouping_map_by(|&n| n % 3) + /// .collect::>(); + /// + /// assert_eq!(lookup[&0], vec![0, 3, 6].into_iter().collect::>()); + /// assert_eq!(lookup[&1], vec![1, 4].into_iter().collect::>()); + /// assert_eq!(lookup[&2], vec![2, 5].into_iter().collect::>()); + /// assert_eq!(lookup.len(), 3); + /// ``` + pub fn collect(self) -> HashMap + where + C: Default + Extend, + { + let mut destination_map = HashMap::new(); + + self.iter.for_each(|(key, val)| { + destination_map + .entry(key) + .or_insert_with(C::default) + .extend(Some(val)); + }); + + destination_map + } + + /// Groups elements from the `GroupingMap` source by key and finds the maximum of each group. + /// + /// If several elements are equally maximum, the last element is picked. + /// + /// Returns a `HashMap` associating the key of each group with the maximum of that group's elements. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() + /// .into_grouping_map_by(|&n| n % 3) + /// .max(); + /// + /// assert_eq!(lookup[&0], 12); + /// assert_eq!(lookup[&1], 7); + /// assert_eq!(lookup[&2], 8); + /// assert_eq!(lookup.len(), 3); + /// ``` + pub fn max(self) -> HashMap + where + V: Ord, + { + self.max_by(|_, v1, v2| V::cmp(v1, v2)) + } + + /// Groups elements from the `GroupingMap` source by key and finds the maximum of each group + /// with respect to the specified comparison function. + /// + /// If several elements are equally maximum, the last element is picked. + /// + /// Returns a `HashMap` associating the key of each group with the maximum of that group's elements. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() + /// .into_grouping_map_by(|&n| n % 3) + /// .max_by(|_key, x, y| y.cmp(x)); + /// + /// assert_eq!(lookup[&0], 3); + /// assert_eq!(lookup[&1], 1); + /// assert_eq!(lookup[&2], 5); + /// assert_eq!(lookup.len(), 3); + /// ``` + pub fn max_by(self, mut compare: F) -> HashMap + where + F: FnMut(&K, &V, &V) -> Ordering, + { + self.reduce(|acc, key, val| match compare(key, &acc, &val) { + Ordering::Less | Ordering::Equal => val, + Ordering::Greater => acc, + }) + } + + /// Groups elements from the `GroupingMap` source by key and finds the element of each group + /// that gives the maximum from the specified function. + /// + /// If several elements are equally maximum, the last element is picked. + /// + /// Returns a `HashMap` associating the key of each group with the maximum of that group's elements. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() + /// .into_grouping_map_by(|&n| n % 3) + /// .max_by_key(|_key, &val| val % 4); + /// + /// assert_eq!(lookup[&0], 3); + /// assert_eq!(lookup[&1], 7); + /// assert_eq!(lookup[&2], 5); + /// assert_eq!(lookup.len(), 3); + /// ``` + pub fn max_by_key(self, mut f: F) -> HashMap + where + F: FnMut(&K, &V) -> CK, + CK: Ord, + { + self.max_by(|key, v1, v2| f(key, v1).cmp(&f(key, v2))) + } + + /// Groups elements from the `GroupingMap` source by key and finds the minimum of each group. + /// + /// If several elements are equally minimum, the first element is picked. + /// + /// Returns a `HashMap` associating the key of each group with the minimum of that group's elements. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() + /// .into_grouping_map_by(|&n| n % 3) + /// .min(); + /// + /// assert_eq!(lookup[&0], 3); + /// assert_eq!(lookup[&1], 1); + /// assert_eq!(lookup[&2], 5); + /// assert_eq!(lookup.len(), 3); + /// ``` + pub fn min(self) -> HashMap + where + V: Ord, + { + self.min_by(|_, v1, v2| V::cmp(v1, v2)) + } + + /// Groups elements from the `GroupingMap` source by key and finds the minimum of each group + /// with respect to the specified comparison function. + /// + /// If several elements are equally minimum, the first element is picked. + /// + /// Returns a `HashMap` associating the key of each group with the minimum of that group's elements. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() + /// .into_grouping_map_by(|&n| n % 3) + /// .min_by(|_key, x, y| y.cmp(x)); + /// + /// assert_eq!(lookup[&0], 12); + /// assert_eq!(lookup[&1], 7); + /// assert_eq!(lookup[&2], 8); + /// assert_eq!(lookup.len(), 3); + /// ``` + pub fn min_by(self, mut compare: F) -> HashMap + where + F: FnMut(&K, &V, &V) -> Ordering, + { + self.reduce(|acc, key, val| match compare(key, &acc, &val) { + Ordering::Less | Ordering::Equal => acc, + Ordering::Greater => val, + }) + } + + /// Groups elements from the `GroupingMap` source by key and finds the element of each group + /// that gives the minimum from the specified function. + /// + /// If several elements are equally minimum, the first element is picked. + /// + /// Returns a `HashMap` associating the key of each group with the minimum of that group's elements. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() + /// .into_grouping_map_by(|&n| n % 3) + /// .min_by_key(|_key, &val| val % 4); + /// + /// assert_eq!(lookup[&0], 12); + /// assert_eq!(lookup[&1], 4); + /// assert_eq!(lookup[&2], 8); + /// assert_eq!(lookup.len(), 3); + /// ``` + pub fn min_by_key(self, mut f: F) -> HashMap + where + F: FnMut(&K, &V) -> CK, + CK: Ord, + { + self.min_by(|key, v1, v2| f(key, v1).cmp(&f(key, v2))) + } + + /// Groups elements from the `GroupingMap` source by key and find the maximum and minimum of + /// each group. + /// + /// If several elements are equally maximum, the last element is picked. + /// If several elements are equally minimum, the first element is picked. + /// + /// See [`Itertools::minmax`](crate::Itertools::minmax) for the non-grouping version. + /// + /// Differences from the non grouping version: + /// - It never produces a `MinMaxResult::NoElements` + /// - It doesn't have any speedup + /// + /// Returns a `HashMap` associating the key of each group with the minimum and maximum of that group's elements. + /// + /// ``` + /// use itertools::Itertools; + /// use itertools::MinMaxResult::{OneElement, MinMax}; + /// + /// let lookup = vec![1, 3, 4, 5, 7, 9, 12].into_iter() + /// .into_grouping_map_by(|&n| n % 3) + /// .minmax(); + /// + /// assert_eq!(lookup[&0], MinMax(3, 12)); + /// assert_eq!(lookup[&1], MinMax(1, 7)); + /// assert_eq!(lookup[&2], OneElement(5)); + /// assert_eq!(lookup.len(), 3); + /// ``` + pub fn minmax(self) -> HashMap> + where + V: Ord, + { + self.minmax_by(|_, v1, v2| V::cmp(v1, v2)) + } + + /// Groups elements from the `GroupingMap` source by key and find the maximum and minimum of + /// each group with respect to the specified comparison function. + /// + /// If several elements are equally maximum, the last element is picked. + /// If several elements are equally minimum, the first element is picked. + /// + /// It has the same differences from the non-grouping version as `minmax`. + /// + /// Returns a `HashMap` associating the key of each group with the minimum and maximum of that group's elements. + /// + /// ``` + /// use itertools::Itertools; + /// use itertools::MinMaxResult::{OneElement, MinMax}; + /// + /// let lookup = vec![1, 3, 4, 5, 7, 9, 12].into_iter() + /// .into_grouping_map_by(|&n| n % 3) + /// .minmax_by(|_key, x, y| y.cmp(x)); + /// + /// assert_eq!(lookup[&0], MinMax(12, 3)); + /// assert_eq!(lookup[&1], MinMax(7, 1)); + /// assert_eq!(lookup[&2], OneElement(5)); + /// assert_eq!(lookup.len(), 3); + /// ``` + pub fn minmax_by(self, mut compare: F) -> HashMap> + where + F: FnMut(&K, &V, &V) -> Ordering, + { + self.aggregate(|acc, key, val| { + Some(match acc { + Some(MinMaxResult::OneElement(e)) => { + if compare(key, &val, &e) == Ordering::Less { + MinMaxResult::MinMax(val, e) + } else { + MinMaxResult::MinMax(e, val) + } + } + Some(MinMaxResult::MinMax(min, max)) => { + if compare(key, &val, &min) == Ordering::Less { + MinMaxResult::MinMax(val, max) + } else if compare(key, &val, &max) != Ordering::Less { + MinMaxResult::MinMax(min, val) + } else { + MinMaxResult::MinMax(min, max) + } + } + None => MinMaxResult::OneElement(val), + Some(MinMaxResult::NoElements) => unreachable!(), + }) + }) + } + + /// Groups elements from the `GroupingMap` source by key and find the elements of each group + /// that gives the minimum and maximum from the specified function. + /// + /// If several elements are equally maximum, the last element is picked. + /// If several elements are equally minimum, the first element is picked. + /// + /// It has the same differences from the non-grouping version as `minmax`. + /// + /// Returns a `HashMap` associating the key of each group with the minimum and maximum of that group's elements. + /// + /// ``` + /// use itertools::Itertools; + /// use itertools::MinMaxResult::{OneElement, MinMax}; + /// + /// let lookup = vec![1, 3, 4, 5, 7, 9, 12].into_iter() + /// .into_grouping_map_by(|&n| n % 3) + /// .minmax_by_key(|_key, &val| val % 4); + /// + /// assert_eq!(lookup[&0], MinMax(12, 3)); + /// assert_eq!(lookup[&1], MinMax(4, 7)); + /// assert_eq!(lookup[&2], OneElement(5)); + /// assert_eq!(lookup.len(), 3); + /// ``` + pub fn minmax_by_key(self, mut f: F) -> HashMap> + where + F: FnMut(&K, &V) -> CK, + CK: Ord, + { + self.minmax_by(|key, v1, v2| f(key, v1).cmp(&f(key, v2))) + } + + /// Groups elements from the `GroupingMap` source by key and sums them. + /// + /// This is just a shorthand for `self.reduce(|acc, _, val| acc + val)`. + /// It is more limited than `Iterator::sum` since it doesn't use the `Sum` trait. + /// + /// Returns a `HashMap` associating the key of each group with the sum of that group's elements. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() + /// .into_grouping_map_by(|&n| n % 3) + /// .sum(); + /// + /// assert_eq!(lookup[&0], 3 + 9 + 12); + /// assert_eq!(lookup[&1], 1 + 4 + 7); + /// assert_eq!(lookup[&2], 5 + 8); + /// assert_eq!(lookup.len(), 3); + /// ``` + pub fn sum(self) -> HashMap + where + V: Add, + { + self.reduce(|acc, _, val| acc + val) + } + + /// Groups elements from the `GroupingMap` source by key and multiply them. + /// + /// This is just a shorthand for `self.reduce(|acc, _, val| acc * val)`. + /// It is more limited than `Iterator::product` since it doesn't use the `Product` trait. + /// + /// Returns a `HashMap` associating the key of each group with the product of that group's elements. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() + /// .into_grouping_map_by(|&n| n % 3) + /// .product(); + /// + /// assert_eq!(lookup[&0], 3 * 9 * 12); + /// assert_eq!(lookup[&1], 1 * 4 * 7); + /// assert_eq!(lookup[&2], 5 * 8); + /// assert_eq!(lookup.len(), 3); + /// ``` + pub fn product(self) -> HashMap + where + V: Mul, + { + self.reduce(|acc, _, val| acc * val) + } +} diff --git a/vendor/itertools/src/impl_macros.rs b/vendor/itertools/src/impl_macros.rs new file mode 100644 index 00000000000000..3db5ba021967c8 --- /dev/null +++ b/vendor/itertools/src/impl_macros.rs @@ -0,0 +1,34 @@ +//! +//! Implementation's internal macros + +macro_rules! debug_fmt_fields { + ($tyname:ident, $($($field:tt/*TODO ideally we would accept ident or tuple element here*/).+),*) => { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + f.debug_struct(stringify!($tyname)) + $( + .field(stringify!($($field).+), &self.$($field).+) + )* + .finish() + } + } +} + +macro_rules! clone_fields { + ($($field:ident),*) => { + #[inline] // TODO is this sensible? + fn clone(&self) -> Self { + Self { + $($field: self.$field.clone(),)* + } + } + } +} + +macro_rules! ignore_ident{ + ($id:ident, $($t:tt)*) => {$($t)*}; +} + +macro_rules! count_ident { + () => {0}; + ($i0:ident $($i:ident)*) => {1 + count_ident!($($i)*)}; +} diff --git a/vendor/itertools/src/intersperse.rs b/vendor/itertools/src/intersperse.rs new file mode 100644 index 00000000000000..5f4f7938ad052e --- /dev/null +++ b/vendor/itertools/src/intersperse.rs @@ -0,0 +1,142 @@ +use super::size_hint; +use std::iter::{Fuse, FusedIterator}; + +pub trait IntersperseElement { + fn generate(&mut self) -> Item; +} + +#[derive(Debug, Clone)] +pub struct IntersperseElementSimple(Item); + +impl IntersperseElement for IntersperseElementSimple { + fn generate(&mut self) -> Item { + self.0.clone() + } +} + +/// An iterator adaptor to insert a particular value +/// between each element of the adapted iterator. +/// +/// Iterator element type is `I::Item` +/// +/// This iterator is *fused*. +/// +/// See [`.intersperse()`](crate::Itertools::intersperse) for more information. +pub type Intersperse = IntersperseWith::Item>>; + +/// Create a new Intersperse iterator +pub fn intersperse(iter: I, elt: I::Item) -> Intersperse +where + I: Iterator, +{ + intersperse_with(iter, IntersperseElementSimple(elt)) +} + +impl Item> IntersperseElement for F { + fn generate(&mut self) -> Item { + self() + } +} + +/// An iterator adaptor to insert a particular value created by a function +/// between each element of the adapted iterator. +/// +/// Iterator element type is `I::Item` +/// +/// This iterator is *fused*. +/// +/// See [`.intersperse_with()`](crate::Itertools::intersperse_with) for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[derive(Clone, Debug)] +pub struct IntersperseWith +where + I: Iterator, +{ + element: ElemF, + iter: Fuse, + /// `peek` is None while no item have been taken out of `iter` (at definition). + /// Then `peek` will alternatively be `Some(None)` and `Some(Some(item))`, + /// where `None` indicates it's time to generate from `element` (unless `iter` is empty). + peek: Option>, +} + +/// Create a new `IntersperseWith` iterator +pub fn intersperse_with(iter: I, elt: ElemF) -> IntersperseWith +where + I: Iterator, +{ + IntersperseWith { + peek: None, + iter: iter.fuse(), + element: elt, + } +} + +impl Iterator for IntersperseWith +where + I: Iterator, + ElemF: IntersperseElement, +{ + type Item = I::Item; + #[inline] + fn next(&mut self) -> Option { + let Self { + element, + iter, + peek, + } = self; + match peek { + Some(item @ Some(_)) => item.take(), + Some(None) => match iter.next() { + new @ Some(_) => { + *peek = Some(new); + Some(element.generate()) + } + None => None, + }, + None => { + *peek = Some(None); + iter.next() + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let mut sh = self.iter.size_hint(); + sh = size_hint::add(sh, sh); + match self.peek { + Some(Some(_)) => size_hint::add_scalar(sh, 1), + Some(None) => sh, + None => size_hint::sub_scalar(sh, 1), + } + } + + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + let Self { + mut element, + mut iter, + peek, + } = self; + let mut accum = init; + + if let Some(x) = peek.unwrap_or_else(|| iter.next()) { + accum = f(accum, x); + } + + iter.fold(accum, |accum, x| { + let accum = f(accum, element.generate()); + f(accum, x) + }) + } +} + +impl FusedIterator for IntersperseWith +where + I: Iterator, + ElemF: IntersperseElement, +{ +} diff --git a/vendor/itertools/src/iter_index.rs b/vendor/itertools/src/iter_index.rs new file mode 100644 index 00000000000000..aadaa72a766912 --- /dev/null +++ b/vendor/itertools/src/iter_index.rs @@ -0,0 +1,116 @@ +use core::iter::{Skip, Take}; +use core::ops::{Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive}; + +#[cfg(doc)] +use crate::Itertools; + +mod private_iter_index { + use core::ops; + + pub trait Sealed {} + + impl Sealed for ops::Range {} + impl Sealed for ops::RangeInclusive {} + impl Sealed for ops::RangeTo {} + impl Sealed for ops::RangeToInclusive {} + impl Sealed for ops::RangeFrom {} + impl Sealed for ops::RangeFull {} +} + +/// Used by [`Itertools::get`] to know which iterator +/// to turn different ranges into. +pub trait IteratorIndex: private_iter_index::Sealed +where + I: Iterator, +{ + /// The type returned for this type of index. + type Output: Iterator; + + /// Returns an adapted iterator for the current index. + /// + /// Prefer calling [`Itertools::get`] instead + /// of calling this directly. + fn index(self, from: I) -> Self::Output; +} + +impl IteratorIndex for Range +where + I: Iterator, +{ + type Output = Skip>; + + fn index(self, iter: I) -> Self::Output { + iter.take(self.end).skip(self.start) + } +} + +impl IteratorIndex for RangeInclusive +where + I: Iterator, +{ + type Output = Take>; + + fn index(self, iter: I) -> Self::Output { + // end - start + 1 without overflowing if possible + let length = if *self.end() == usize::MAX { + assert_ne!(*self.start(), 0); + self.end() - self.start() + 1 + } else { + (self.end() + 1).saturating_sub(*self.start()) + }; + iter.skip(*self.start()).take(length) + } +} + +impl IteratorIndex for RangeTo +where + I: Iterator, +{ + type Output = Take; + + fn index(self, iter: I) -> Self::Output { + iter.take(self.end) + } +} + +impl IteratorIndex for RangeToInclusive +where + I: Iterator, +{ + type Output = Take; + + fn index(self, iter: I) -> Self::Output { + assert_ne!(self.end, usize::MAX); + iter.take(self.end + 1) + } +} + +impl IteratorIndex for RangeFrom +where + I: Iterator, +{ + type Output = Skip; + + fn index(self, iter: I) -> Self::Output { + iter.skip(self.start) + } +} + +impl IteratorIndex for RangeFull +where + I: Iterator, +{ + type Output = I; + + fn index(self, iter: I) -> Self::Output { + iter + } +} + +pub fn get(iter: I, index: R) -> R::Output +where + I: IntoIterator, + R: IteratorIndex, +{ + index.index(iter.into_iter()) +} diff --git a/vendor/itertools/src/k_smallest.rs b/vendor/itertools/src/k_smallest.rs new file mode 100644 index 00000000000000..7b2f62ea124bf8 --- /dev/null +++ b/vendor/itertools/src/k_smallest.rs @@ -0,0 +1,98 @@ +use alloc::vec::Vec; +use core::cmp::Ordering; + +/// Consumes a given iterator, returning the minimum elements in **ascending** order. +pub(crate) fn k_smallest_general(iter: I, k: usize, mut comparator: F) -> Vec +where + I: Iterator, + F: FnMut(&I::Item, &I::Item) -> Ordering, +{ + /// Sift the element currently at `origin` away from the root until it is properly ordered. + /// + /// This will leave **larger** elements closer to the root of the heap. + fn sift_down(heap: &mut [T], is_less_than: &mut F, mut origin: usize) + where + F: FnMut(&T, &T) -> bool, + { + #[inline] + fn children_of(n: usize) -> (usize, usize) { + (2 * n + 1, 2 * n + 2) + } + + while origin < heap.len() { + let (left_idx, right_idx) = children_of(origin); + if left_idx >= heap.len() { + return; + } + + let replacement_idx = + if right_idx < heap.len() && is_less_than(&heap[left_idx], &heap[right_idx]) { + right_idx + } else { + left_idx + }; + + if is_less_than(&heap[origin], &heap[replacement_idx]) { + heap.swap(origin, replacement_idx); + origin = replacement_idx; + } else { + return; + } + } + } + + if k == 0 { + iter.last(); + return Vec::new(); + } + if k == 1 { + return iter.min_by(comparator).into_iter().collect(); + } + let mut iter = iter.fuse(); + let mut storage: Vec = iter.by_ref().take(k).collect(); + + let mut is_less_than = move |a: &_, b: &_| comparator(a, b) == Ordering::Less; + + // Rearrange the storage into a valid heap by reordering from the second-bottom-most layer up to the root. + // Slightly faster than ordering on each insert, but only by a factor of lg(k). + // The resulting heap has the **largest** item on top. + for i in (0..=(storage.len() / 2)).rev() { + sift_down(&mut storage, &mut is_less_than, i); + } + + iter.for_each(|val| { + debug_assert_eq!(storage.len(), k); + if is_less_than(&val, &storage[0]) { + // Treating this as an push-and-pop saves having to write a sift-up implementation. + // https://en.wikipedia.org/wiki/Binary_heap#Insert_then_extract + storage[0] = val; + // We retain the smallest items we've seen so far, but ordered largest first so we can drop the largest efficiently. + sift_down(&mut storage, &mut is_less_than, 0); + } + }); + + // Ultimately the items need to be in least-first, strict order, but the heap is currently largest-first. + // To achieve this, repeatedly, + // 1) "pop" the largest item off the heap into the tail slot of the underlying storage, + // 2) shrink the logical size of the heap by 1, + // 3) restore the heap property over the remaining items. + let mut heap = &mut storage[..]; + while heap.len() > 1 { + let last_idx = heap.len() - 1; + heap.swap(0, last_idx); + // Sifting over a truncated slice means that the sifting will not disturb already popped elements. + heap = &mut heap[..last_idx]; + sift_down(heap, &mut is_less_than, 0); + } + + storage +} + +#[inline] +pub(crate) fn key_to_cmp(mut key: F) -> impl FnMut(&T, &T) -> Ordering +where + F: FnMut(&T) -> K, + K: Ord, +{ + move |a, b| key(a).cmp(&key(b)) +} diff --git a/vendor/itertools/src/kmerge_impl.rs b/vendor/itertools/src/kmerge_impl.rs new file mode 100644 index 00000000000000..0be3840a1b6686 --- /dev/null +++ b/vendor/itertools/src/kmerge_impl.rs @@ -0,0 +1,240 @@ +use crate::size_hint; +use crate::Itertools; + +use alloc::vec::Vec; +use std::fmt; +use std::iter::FusedIterator; +use std::mem::replace; + +/// Head element and Tail iterator pair +/// +/// `PartialEq`, `Eq`, `PartialOrd` and `Ord` are implemented by comparing sequences based on +/// first items (which are guaranteed to exist). +/// +/// The meanings of `PartialOrd` and `Ord` are reversed so as to turn the heap used in +/// `KMerge` into a min-heap. +#[derive(Debug)] +struct HeadTail +where + I: Iterator, +{ + head: I::Item, + tail: I, +} + +impl HeadTail +where + I: Iterator, +{ + /// Constructs a `HeadTail` from an `Iterator`. Returns `None` if the `Iterator` is empty. + fn new(mut it: I) -> Option { + let head = it.next(); + head.map(|h| Self { head: h, tail: it }) + } + + /// Get the next element and update `head`, returning the old head in `Some`. + /// + /// Returns `None` when the tail is exhausted (only `head` then remains). + fn next(&mut self) -> Option { + if let Some(next) = self.tail.next() { + Some(replace(&mut self.head, next)) + } else { + None + } + } + + /// Hints at the size of the sequence, same as the `Iterator` method. + fn size_hint(&self) -> (usize, Option) { + size_hint::add_scalar(self.tail.size_hint(), 1) + } +} + +impl Clone for HeadTail +where + I: Iterator + Clone, + I::Item: Clone, +{ + clone_fields!(head, tail); +} + +/// Make `data` a heap (min-heap w.r.t the sorting). +fn heapify(data: &mut [T], mut less_than: S) +where + S: FnMut(&T, &T) -> bool, +{ + for i in (0..data.len() / 2).rev() { + sift_down(data, i, &mut less_than); + } +} + +/// Sift down element at `index` (`heap` is a min-heap wrt the ordering) +fn sift_down(heap: &mut [T], index: usize, mut less_than: S) +where + S: FnMut(&T, &T) -> bool, +{ + debug_assert!(index <= heap.len()); + let mut pos = index; + let mut child = 2 * pos + 1; + // Require the right child to be present + // This allows to find the index of the smallest child without a branch + // that wouldn't be predicted if present + while child + 1 < heap.len() { + // pick the smaller of the two children + // use arithmetic to avoid an unpredictable branch + child += less_than(&heap[child + 1], &heap[child]) as usize; + + // sift down is done if we are already in order + if !less_than(&heap[child], &heap[pos]) { + return; + } + heap.swap(pos, child); + pos = child; + child = 2 * pos + 1; + } + // Check if the last (left) child was an only child + // if it is then it has to be compared with the parent + if child + 1 == heap.len() && less_than(&heap[child], &heap[pos]) { + heap.swap(pos, child); + } +} + +/// An iterator adaptor that merges an abitrary number of base iterators in ascending order. +/// If all base iterators are sorted (ascending), the result is sorted. +/// +/// Iterator element type is `I::Item`. +/// +/// See [`.kmerge()`](crate::Itertools::kmerge) for more information. +pub type KMerge = KMergeBy; + +pub trait KMergePredicate { + fn kmerge_pred(&mut self, a: &T, b: &T) -> bool; +} + +#[derive(Clone, Debug)] +pub struct KMergeByLt; + +impl KMergePredicate for KMergeByLt { + fn kmerge_pred(&mut self, a: &T, b: &T) -> bool { + a < b + } +} + +impl bool> KMergePredicate for F { + fn kmerge_pred(&mut self, a: &T, b: &T) -> bool { + self(a, b) + } +} + +/// Create an iterator that merges elements of the contained iterators using +/// the ordering function. +/// +/// [`IntoIterator`] enabled version of [`Itertools::kmerge`]. +/// +/// ``` +/// use itertools::kmerge; +/// +/// for elt in kmerge(vec![vec![0, 2, 4], vec![1, 3, 5], vec![6, 7]]) { +/// /* loop body */ +/// } +/// ``` +pub fn kmerge(iterable: I) -> KMerge<::IntoIter> +where + I: IntoIterator, + I::Item: IntoIterator, + <::Item as IntoIterator>::Item: PartialOrd, +{ + kmerge_by(iterable, KMergeByLt) +} + +/// An iterator adaptor that merges an abitrary number of base iterators +/// according to an ordering function. +/// +/// Iterator element type is `I::Item`. +/// +/// See [`.kmerge_by()`](crate::Itertools::kmerge_by) for more +/// information. +#[must_use = "this iterator adaptor is not lazy but does nearly nothing unless consumed"] +pub struct KMergeBy +where + I: Iterator, +{ + heap: Vec>, + less_than: F, +} + +impl fmt::Debug for KMergeBy +where + I: Iterator + fmt::Debug, + I::Item: fmt::Debug, +{ + debug_fmt_fields!(KMergeBy, heap); +} + +/// Create an iterator that merges elements of the contained iterators. +/// +/// [`IntoIterator`] enabled version of [`Itertools::kmerge_by`]. +pub fn kmerge_by( + iterable: I, + mut less_than: F, +) -> KMergeBy<::IntoIter, F> +where + I: IntoIterator, + I::Item: IntoIterator, + F: KMergePredicate<<::Item as IntoIterator>::Item>, +{ + let iter = iterable.into_iter(); + let (lower, _) = iter.size_hint(); + let mut heap: Vec<_> = Vec::with_capacity(lower); + heap.extend(iter.filter_map(|it| HeadTail::new(it.into_iter()))); + heapify(&mut heap, |a, b| less_than.kmerge_pred(&a.head, &b.head)); + KMergeBy { heap, less_than } +} + +impl Clone for KMergeBy +where + I: Iterator + Clone, + I::Item: Clone, + F: Clone, +{ + clone_fields!(heap, less_than); +} + +impl Iterator for KMergeBy +where + I: Iterator, + F: KMergePredicate, +{ + type Item = I::Item; + + fn next(&mut self) -> Option { + if self.heap.is_empty() { + return None; + } + let result = if let Some(next) = self.heap[0].next() { + next + } else { + self.heap.swap_remove(0).head + }; + let less_than = &mut self.less_than; + sift_down(&mut self.heap, 0, |a, b| { + less_than.kmerge_pred(&a.head, &b.head) + }); + Some(result) + } + + fn size_hint(&self) -> (usize, Option) { + #[allow(deprecated)] //TODO: once msrv hits 1.51. replace `fold1` with `reduce` + self.heap + .iter() + .map(|i| i.size_hint()) + .fold1(size_hint::add) + .unwrap_or((0, Some(0))) + } +} + +impl FusedIterator for KMergeBy +where + I: Iterator, + F: KMergePredicate, +{ +} diff --git a/vendor/itertools/src/lazy_buffer.rs b/vendor/itertools/src/lazy_buffer.rs new file mode 100644 index 00000000000000..fefcff8f5c64de --- /dev/null +++ b/vendor/itertools/src/lazy_buffer.rs @@ -0,0 +1,75 @@ +use alloc::vec::Vec; +use std::iter::Fuse; +use std::ops::Index; + +use crate::size_hint::{self, SizeHint}; + +#[derive(Debug, Clone)] +pub struct LazyBuffer { + it: Fuse, + buffer: Vec, +} + +impl LazyBuffer +where + I: Iterator, +{ + pub fn new(it: I) -> Self { + Self { + it: it.fuse(), + buffer: Vec::new(), + } + } + + pub fn len(&self) -> usize { + self.buffer.len() + } + + pub fn size_hint(&self) -> SizeHint { + size_hint::add_scalar(self.it.size_hint(), self.len()) + } + + pub fn count(self) -> usize { + self.len() + self.it.count() + } + + pub fn get_next(&mut self) -> bool { + if let Some(x) = self.it.next() { + self.buffer.push(x); + true + } else { + false + } + } + + pub fn prefill(&mut self, len: usize) { + let buffer_len = self.buffer.len(); + if len > buffer_len { + let delta = len - buffer_len; + self.buffer.extend(self.it.by_ref().take(delta)); + } + } +} + +impl LazyBuffer +where + I: Iterator, + I::Item: Clone, +{ + pub fn get_at(&self, indices: &[usize]) -> Vec { + indices.iter().map(|i| self.buffer[*i].clone()).collect() + } +} + +impl Index for LazyBuffer +where + I: Iterator, + I::Item: Sized, + Vec: Index, +{ + type Output = as Index>::Output; + + fn index(&self, index: J) -> &Self::Output { + self.buffer.index(index) + } +} diff --git a/vendor/itertools/src/lib.rs b/vendor/itertools/src/lib.rs new file mode 100644 index 00000000000000..f4de79c5043a8c --- /dev/null +++ b/vendor/itertools/src/lib.rs @@ -0,0 +1,4365 @@ +#![warn(missing_docs, clippy::default_numeric_fallback)] +#![crate_name = "itertools"] +#![cfg_attr(not(feature = "use_std"), no_std)] + +//! Extra iterator adaptors, functions and macros. +//! +//! To extend [`Iterator`] with methods in this crate, import +//! the [`Itertools`] trait: +//! +//! ``` +//! use itertools::Itertools; +//! ``` +//! +//! Now, new methods like [`interleave`](Itertools::interleave) +//! are available on all iterators: +//! +//! ``` +//! use itertools::Itertools; +//! +//! let it = (1..3).interleave(vec![-1, -2]); +//! itertools::assert_equal(it, vec![1, -1, 2, -2]); +//! ``` +//! +//! Most iterator methods are also provided as functions (with the benefit +//! that they convert parameters using [`IntoIterator`]): +//! +//! ``` +//! use itertools::interleave; +//! +//! for elt in interleave(&[1, 2, 3], &[2, 3, 4]) { +//! /* loop body */ +//! } +//! ``` +//! +//! ## Crate Features +//! +//! - `use_std` +//! - Enabled by default. +//! - Disable to compile itertools using `#![no_std]`. This disables +//! any item that depend on allocations (see the `use_alloc` feature) +//! and hash maps (like `unique`, `counts`, `into_grouping_map` and more). +//! - `use_alloc` +//! - Enabled by default. +//! - Enables any item that depend on allocations (like `chunk_by`, +//! `kmerge`, `join` and many more). +//! +//! ## Rust Version +//! +//! This version of itertools requires Rust 1.43.1 or later. + +#[cfg(not(feature = "use_std"))] +extern crate core as std; + +#[cfg(feature = "use_alloc")] +extern crate alloc; + +#[cfg(feature = "use_alloc")] +use alloc::{collections::VecDeque, string::String, vec::Vec}; + +pub use either::Either; + +use core::borrow::Borrow; +use std::cmp::Ordering; +#[cfg(feature = "use_std")] +use std::collections::HashMap; +#[cfg(feature = "use_std")] +use std::collections::HashSet; +use std::fmt; +#[cfg(feature = "use_alloc")] +use std::fmt::Write; +#[cfg(feature = "use_std")] +use std::hash::Hash; +use std::iter::{once, IntoIterator}; +#[cfg(feature = "use_alloc")] +type VecDequeIntoIter = alloc::collections::vec_deque::IntoIter; +#[cfg(feature = "use_alloc")] +type VecIntoIter = alloc::vec::IntoIter; +use std::iter::FromIterator; + +#[macro_use] +mod impl_macros; + +// for compatibility with no std and macros +#[doc(hidden)] +pub use std::iter as __std_iter; + +/// The concrete iterator types. +pub mod structs { + #[cfg(feature = "use_alloc")] + pub use crate::adaptors::MultiProduct; + pub use crate::adaptors::{ + Batching, Coalesce, Dedup, DedupBy, DedupByWithCount, DedupWithCount, FilterMapOk, + FilterOk, Interleave, InterleaveShortest, MapInto, MapOk, Positions, Product, PutBack, + TakeWhileRef, TupleCombinations, Update, WhileSome, + }; + #[cfg(feature = "use_alloc")] + pub use crate::combinations::Combinations; + #[cfg(feature = "use_alloc")] + pub use crate::combinations_with_replacement::CombinationsWithReplacement; + pub use crate::cons_tuples_impl::ConsTuples; + #[cfg(feature = "use_std")] + pub use crate::duplicates_impl::{Duplicates, DuplicatesBy}; + pub use crate::exactly_one_err::ExactlyOneError; + pub use crate::flatten_ok::FlattenOk; + pub use crate::format::{Format, FormatWith}; + #[allow(deprecated)] + #[cfg(feature = "use_alloc")] + pub use crate::groupbylazy::GroupBy; + #[cfg(feature = "use_alloc")] + pub use crate::groupbylazy::{Chunk, ChunkBy, Chunks, Group, Groups, IntoChunks}; + #[cfg(feature = "use_std")] + pub use crate::grouping_map::{GroupingMap, GroupingMapBy}; + pub use crate::intersperse::{Intersperse, IntersperseWith}; + #[cfg(feature = "use_alloc")] + pub use crate::kmerge_impl::{KMerge, KMergeBy}; + pub use crate::merge_join::{Merge, MergeBy, MergeJoinBy}; + #[cfg(feature = "use_alloc")] + pub use crate::multipeek_impl::MultiPeek; + pub use crate::pad_tail::PadUsing; + #[cfg(feature = "use_alloc")] + pub use crate::peek_nth::PeekNth; + pub use crate::peeking_take_while::PeekingTakeWhile; + #[cfg(feature = "use_alloc")] + pub use crate::permutations::Permutations; + #[cfg(feature = "use_alloc")] + pub use crate::powerset::Powerset; + pub use crate::process_results_impl::ProcessResults; + #[cfg(feature = "use_alloc")] + pub use crate::put_back_n_impl::PutBackN; + #[cfg(feature = "use_alloc")] + pub use crate::rciter_impl::RcIter; + pub use crate::repeatn::RepeatN; + #[allow(deprecated)] + pub use crate::sources::{Iterate, Unfold}; + pub use crate::take_while_inclusive::TakeWhileInclusive; + #[cfg(feature = "use_alloc")] + pub use crate::tee::Tee; + pub use crate::tuple_impl::{CircularTupleWindows, TupleBuffer, TupleWindows, Tuples}; + #[cfg(feature = "use_std")] + pub use crate::unique_impl::{Unique, UniqueBy}; + pub use crate::with_position::WithPosition; + pub use crate::zip_eq_impl::ZipEq; + pub use crate::zip_longest::ZipLongest; + pub use crate::ziptuple::Zip; +} + +/// Traits helpful for using certain `Itertools` methods in generic contexts. +pub mod traits { + pub use crate::iter_index::IteratorIndex; + pub use crate::tuple_impl::HomogeneousTuple; +} + +pub use crate::concat_impl::concat; +pub use crate::cons_tuples_impl::cons_tuples; +pub use crate::diff::diff_with; +pub use crate::diff::Diff; +#[cfg(feature = "use_alloc")] +pub use crate::kmerge_impl::kmerge_by; +pub use crate::minmax::MinMaxResult; +pub use crate::peeking_take_while::PeekingNext; +pub use crate::process_results_impl::process_results; +pub use crate::repeatn::repeat_n; +#[allow(deprecated)] +pub use crate::sources::{iterate, unfold}; +#[allow(deprecated)] +pub use crate::structs::*; +pub use crate::unziptuple::{multiunzip, MultiUnzip}; +pub use crate::with_position::Position; +pub use crate::ziptuple::multizip; +mod adaptors; +mod either_or_both; +pub use crate::either_or_both::EitherOrBoth; +#[doc(hidden)] +pub mod free; +#[doc(inline)] +pub use crate::free::*; +#[cfg(feature = "use_alloc")] +mod combinations; +#[cfg(feature = "use_alloc")] +mod combinations_with_replacement; +mod concat_impl; +mod cons_tuples_impl; +mod diff; +#[cfg(feature = "use_std")] +mod duplicates_impl; +mod exactly_one_err; +#[cfg(feature = "use_alloc")] +mod extrema_set; +mod flatten_ok; +mod format; +#[cfg(feature = "use_alloc")] +mod group_map; +#[cfg(feature = "use_alloc")] +mod groupbylazy; +#[cfg(feature = "use_std")] +mod grouping_map; +mod intersperse; +mod iter_index; +#[cfg(feature = "use_alloc")] +mod k_smallest; +#[cfg(feature = "use_alloc")] +mod kmerge_impl; +#[cfg(feature = "use_alloc")] +mod lazy_buffer; +mod merge_join; +mod minmax; +#[cfg(feature = "use_alloc")] +mod multipeek_impl; +mod pad_tail; +#[cfg(feature = "use_alloc")] +mod peek_nth; +mod peeking_take_while; +#[cfg(feature = "use_alloc")] +mod permutations; +#[cfg(feature = "use_alloc")] +mod powerset; +mod process_results_impl; +#[cfg(feature = "use_alloc")] +mod put_back_n_impl; +#[cfg(feature = "use_alloc")] +mod rciter_impl; +mod repeatn; +mod size_hint; +mod sources; +mod take_while_inclusive; +#[cfg(feature = "use_alloc")] +mod tee; +mod tuple_impl; +#[cfg(feature = "use_std")] +mod unique_impl; +mod unziptuple; +mod with_position; +mod zip_eq_impl; +mod zip_longest; +mod ziptuple; + +#[macro_export] +/// Create an iterator over the “cartesian product” of iterators. +/// +/// Iterator element type is like `(A, B, ..., E)` if formed +/// from iterators `(I, J, ..., M)` with element types `I::Item = A`, `J::Item = B`, etc. +/// +/// ``` +/// # use itertools::iproduct; +/// # +/// # fn main() { +/// // Iterate over the coordinates of a 4 x 4 x 4 grid +/// // from (0, 0, 0), (0, 0, 1), .., (0, 1, 0), (0, 1, 1), .. etc until (3, 3, 3) +/// for (i, j, k) in iproduct!(0..4, 0..4, 0..4) { +/// // .. +/// } +/// # } +/// ``` +macro_rules! iproduct { + (@flatten $I:expr,) => ( + $I + ); + (@flatten $I:expr, $J:expr, $($K:expr,)*) => ( + $crate::iproduct!(@flatten $crate::cons_tuples($crate::iproduct!($I, $J)), $($K,)*) + ); + () => ( + $crate::__std_iter::once(()) + ); + ($I:expr $(,)?) => ( + $crate::__std_iter::IntoIterator::into_iter($I).map(|elt| (elt,)) + ); + ($I:expr, $J:expr $(,)?) => ( + $crate::Itertools::cartesian_product( + $crate::__std_iter::IntoIterator::into_iter($I), + $crate::__std_iter::IntoIterator::into_iter($J), + ) + ); + ($I:expr, $J:expr, $($K:expr),+ $(,)?) => ( + $crate::iproduct!(@flatten $crate::iproduct!($I, $J), $($K,)+) + ); +} + +#[macro_export] +/// Create an iterator running multiple iterators in lockstep. +/// +/// The `izip!` iterator yields elements until any subiterator +/// returns `None`. +/// +/// This is a version of the standard ``.zip()`` that's supporting more than +/// two iterators. The iterator element type is a tuple with one element +/// from each of the input iterators. Just like ``.zip()``, the iteration stops +/// when the shortest of the inputs reaches its end. +/// +/// **Note:** The result of this macro is in the general case an iterator +/// composed of repeated `.zip()` and a `.map()`; it has an anonymous type. +/// The special cases of one and two arguments produce the equivalent of +/// `$a.into_iter()` and `$a.into_iter().zip($b)` respectively. +/// +/// Prefer this macro `izip!()` over [`multizip`] for the performance benefits +/// of using the standard library `.zip()`. +/// +/// ``` +/// # use itertools::izip; +/// # +/// # fn main() { +/// +/// // iterate over three sequences side-by-side +/// let mut results = [0, 0, 0, 0]; +/// let inputs = [3, 7, 9, 6]; +/// +/// for (r, index, input) in izip!(&mut results, 0..10, &inputs) { +/// *r = index * 10 + input; +/// } +/// +/// assert_eq!(results, [0 + 3, 10 + 7, 29, 36]); +/// # } +/// ``` +macro_rules! izip { + // @closure creates a tuple-flattening closure for .map() call. usage: + // @closure partial_pattern => partial_tuple , rest , of , iterators + // eg. izip!( @closure ((a, b), c) => (a, b, c) , dd , ee ) + ( @closure $p:pat => $tup:expr ) => { + |$p| $tup + }; + + // The "b" identifier is a different identifier on each recursion level thanks to hygiene. + ( @closure $p:pat => ( $($tup:tt)* ) , $_iter:expr $( , $tail:expr )* ) => { + $crate::izip!(@closure ($p, b) => ( $($tup)*, b ) $( , $tail )*) + }; + + // unary + ($first:expr $(,)*) => { + $crate::__std_iter::IntoIterator::into_iter($first) + }; + + // binary + ($first:expr, $second:expr $(,)*) => { + $crate::izip!($first) + .zip($second) + }; + + // n-ary where n > 2 + ( $first:expr $( , $rest:expr )* $(,)* ) => { + $crate::izip!($first) + $( + .zip($rest) + )* + .map( + $crate::izip!(@closure a => (a) $( , $rest )*) + ) + }; +} + +#[macro_export] +/// [Chain][`chain`] zero or more iterators together into one sequence. +/// +/// The comma-separated arguments must implement [`IntoIterator`]. +/// The final argument may be followed by a trailing comma. +/// +/// [`chain`]: Iterator::chain +/// +/// # Examples +/// +/// Empty invocations of `chain!` expand to an invocation of [`std::iter::empty`]: +/// ``` +/// use std::iter; +/// use itertools::chain; +/// +/// let _: iter::Empty<()> = chain!(); +/// let _: iter::Empty = chain!(); +/// ``` +/// +/// Invocations of `chain!` with one argument expand to [`arg.into_iter()`](IntoIterator): +/// ``` +/// use std::{ops::Range, slice}; +/// use itertools::chain; +/// let _: as IntoIterator>::IntoIter = chain!((2..6),); // trailing comma optional! +/// let _: <&[_] as IntoIterator>::IntoIter = chain!(&[2, 3, 4]); +/// ``` +/// +/// Invocations of `chain!` with multiple arguments [`.into_iter()`](IntoIterator) each +/// argument, and then [`chain`] them together: +/// ``` +/// use std::{iter::*, ops::Range, slice}; +/// use itertools::{assert_equal, chain}; +/// +/// // e.g., this: +/// let with_macro: Chain, Take>>, slice::Iter<_>> = +/// chain![once(&0), repeat(&1).take(2), &[2, 3, 5],]; +/// +/// // ...is equivalent to this: +/// let with_method: Chain, Take>>, slice::Iter<_>> = +/// once(&0) +/// .chain(repeat(&1).take(2)) +/// .chain(&[2, 3, 5]); +/// +/// assert_equal(with_macro, with_method); +/// ``` +macro_rules! chain { + () => { + core::iter::empty() + }; + ($first:expr $(, $rest:expr )* $(,)?) => { + { + let iter = core::iter::IntoIterator::into_iter($first); + $( + let iter = + core::iter::Iterator::chain( + iter, + core::iter::IntoIterator::into_iter($rest)); + )* + iter + } + }; +} + +/// An [`Iterator`] blanket implementation that provides extra adaptors and +/// methods. +/// +/// This trait defines a number of methods. They are divided into two groups: +/// +/// * *Adaptors* take an iterator and parameter as input, and return +/// a new iterator value. These are listed first in the trait. An example +/// of an adaptor is [`.interleave()`](Itertools::interleave) +/// +/// * *Regular methods* are those that don't return iterators and instead +/// return a regular value of some other kind. +/// [`.next_tuple()`](Itertools::next_tuple) is an example and the first regular +/// method in the list. +pub trait Itertools: Iterator { + // adaptors + + /// Alternate elements from two iterators until both have run out. + /// + /// Iterator element type is `Self::Item`. + /// + /// This iterator is *fused*. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let it = (1..7).interleave(vec![-1, -2]); + /// itertools::assert_equal(it, vec![1, -1, 2, -2, 3, 4, 5, 6]); + /// ``` + fn interleave(self, other: J) -> Interleave + where + J: IntoIterator, + Self: Sized, + { + interleave(self, other) + } + + /// Alternate elements from two iterators until at least one of them has run + /// out. + /// + /// Iterator element type is `Self::Item`. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let it = (1..7).interleave_shortest(vec![-1, -2]); + /// itertools::assert_equal(it, vec![1, -1, 2, -2, 3]); + /// ``` + fn interleave_shortest(self, other: J) -> InterleaveShortest + where + J: IntoIterator, + Self: Sized, + { + adaptors::interleave_shortest(self, other.into_iter()) + } + + /// An iterator adaptor to insert a particular value + /// between each element of the adapted iterator. + /// + /// Iterator element type is `Self::Item`. + /// + /// This iterator is *fused*. + /// + /// ``` + /// use itertools::Itertools; + /// + /// itertools::assert_equal((0..3).intersperse(8), vec![0, 8, 1, 8, 2]); + /// ``` + fn intersperse(self, element: Self::Item) -> Intersperse + where + Self: Sized, + Self::Item: Clone, + { + intersperse::intersperse(self, element) + } + + /// An iterator adaptor to insert a particular value created by a function + /// between each element of the adapted iterator. + /// + /// Iterator element type is `Self::Item`. + /// + /// This iterator is *fused*. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let mut i = 10; + /// itertools::assert_equal((0..3).intersperse_with(|| { i -= 1; i }), vec![0, 9, 1, 8, 2]); + /// assert_eq!(i, 8); + /// ``` + fn intersperse_with(self, element: F) -> IntersperseWith + where + Self: Sized, + F: FnMut() -> Self::Item, + { + intersperse::intersperse_with(self, element) + } + + /// Returns an iterator over a subsection of the iterator. + /// + /// Works similarly to [`slice::get`](https://doc.rust-lang.org/std/primitive.slice.html#method.get). + /// + /// **Panics** for ranges `..=usize::MAX` and `0..=usize::MAX`. + /// + /// It's a generalisation of [`Iterator::take`] and [`Iterator::skip`], + /// and uses these under the hood. + /// Therefore, the resulting iterator is: + /// - [`ExactSizeIterator`] if the adapted iterator is [`ExactSizeIterator`]. + /// - [`DoubleEndedIterator`] if the adapted iterator is [`DoubleEndedIterator`] and [`ExactSizeIterator`]. + /// + /// # Unspecified Behavior + /// The result of indexing with an exhausted [`core::ops::RangeInclusive`] is unspecified. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// + /// let vec = vec![3, 1, 4, 1, 5]; + /// + /// let mut range: Vec<_> = + /// vec.iter().get(1..=3).copied().collect(); + /// assert_eq!(&range, &[1, 4, 1]); + /// + /// // It works with other types of ranges, too + /// range = vec.iter().get(..2).copied().collect(); + /// assert_eq!(&range, &[3, 1]); + /// + /// range = vec.iter().get(0..1).copied().collect(); + /// assert_eq!(&range, &[3]); + /// + /// range = vec.iter().get(2..).copied().collect(); + /// assert_eq!(&range, &[4, 1, 5]); + /// + /// range = vec.iter().get(..=2).copied().collect(); + /// assert_eq!(&range, &[3, 1, 4]); + /// + /// range = vec.iter().get(..).copied().collect(); + /// assert_eq!(range, vec); + /// ``` + fn get(self, index: R) -> R::Output + where + Self: Sized, + R: traits::IteratorIndex, + { + iter_index::get(self, index) + } + + /// Create an iterator which iterates over both this and the specified + /// iterator simultaneously, yielding pairs of two optional elements. + /// + /// This iterator is *fused*. + /// + /// As long as neither input iterator is exhausted yet, it yields two values + /// via `EitherOrBoth::Both`. + /// + /// When the parameter iterator is exhausted, it only yields a value from the + /// `self` iterator via `EitherOrBoth::Left`. + /// + /// When the `self` iterator is exhausted, it only yields a value from the + /// parameter iterator via `EitherOrBoth::Right`. + /// + /// When both iterators return `None`, all further invocations of `.next()` + /// will return `None`. + /// + /// Iterator element type is + /// [`EitherOrBoth`](EitherOrBoth). + /// + /// ```rust + /// use itertools::EitherOrBoth::{Both, Right}; + /// use itertools::Itertools; + /// let it = (0..1).zip_longest(1..3); + /// itertools::assert_equal(it, vec![Both(0, 1), Right(2)]); + /// ``` + #[inline] + fn zip_longest(self, other: J) -> ZipLongest + where + J: IntoIterator, + Self: Sized, + { + zip_longest::zip_longest(self, other.into_iter()) + } + + /// Create an iterator which iterates over both this and the specified + /// iterator simultaneously, yielding pairs of elements. + /// + /// **Panics** if the iterators reach an end and they are not of equal + /// lengths. + #[inline] + fn zip_eq(self, other: J) -> ZipEq + where + J: IntoIterator, + Self: Sized, + { + zip_eq(self, other) + } + + /// A “meta iterator adaptor”. Its closure receives a reference to the + /// iterator and may pick off as many elements as it likes, to produce the + /// next iterator element. + /// + /// Iterator element type is `B`. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // An adaptor that gathers elements in pairs + /// let pit = (0..4).batching(|it| { + /// match it.next() { + /// None => None, + /// Some(x) => match it.next() { + /// None => None, + /// Some(y) => Some((x, y)), + /// } + /// } + /// }); + /// + /// itertools::assert_equal(pit, vec![(0, 1), (2, 3)]); + /// ``` + /// + fn batching(self, f: F) -> Batching + where + F: FnMut(&mut Self) -> Option, + Self: Sized, + { + adaptors::batching(self, f) + } + + /// Return an *iterable* that can group iterator elements. + /// Consecutive elements that map to the same key (“runs”), are assigned + /// to the same group. + /// + /// `ChunkBy` is the storage for the lazy grouping operation. + /// + /// If the groups are consumed in order, or if each group's iterator is + /// dropped without keeping it around, then `ChunkBy` uses no + /// allocations. It needs allocations only if several group iterators + /// are alive at the same time. + /// + /// This type implements [`IntoIterator`] (it is **not** an iterator + /// itself), because the group iterators need to borrow from this + /// value. It should be stored in a local variable or temporary and + /// iterated. + /// + /// Iterator element type is `(K, Group)`: the group's key and the + /// group iterator. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // chunk data into runs of larger than zero or not. + /// let data = vec![1, 3, -2, -2, 1, 0, 1, 2]; + /// // chunks: |---->|------>|--------->| + /// + /// // Note: The `&` is significant here, `ChunkBy` is iterable + /// // only by reference. You can also call `.into_iter()` explicitly. + /// let mut data_grouped = Vec::new(); + /// for (key, chunk) in &data.into_iter().chunk_by(|elt| *elt >= 0) { + /// data_grouped.push((key, chunk.collect())); + /// } + /// assert_eq!(data_grouped, vec![(true, vec![1, 3]), (false, vec![-2, -2]), (true, vec![1, 0, 1, 2])]); + /// ``` + #[cfg(feature = "use_alloc")] + fn chunk_by(self, key: F) -> ChunkBy + where + Self: Sized, + F: FnMut(&Self::Item) -> K, + K: PartialEq, + { + groupbylazy::new(self, key) + } + + /// See [`.chunk_by()`](Itertools::chunk_by). + #[deprecated(note = "Use .chunk_by() instead", since = "0.13.0")] + #[cfg(feature = "use_alloc")] + fn group_by(self, key: F) -> ChunkBy + where + Self: Sized, + F: FnMut(&Self::Item) -> K, + K: PartialEq, + { + self.chunk_by(key) + } + + /// Return an *iterable* that can chunk the iterator. + /// + /// Yield subiterators (chunks) that each yield a fixed number elements, + /// determined by `size`. The last chunk will be shorter if there aren't + /// enough elements. + /// + /// `IntoChunks` is based on `ChunkBy`: it is iterable (implements + /// `IntoIterator`, **not** `Iterator`), and it only buffers if several + /// chunk iterators are alive at the same time. + /// + /// Iterator element type is `Chunk`, each chunk's iterator. + /// + /// **Panics** if `size` is 0. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = vec![1, 1, 2, -2, 6, 0, 3, 1]; + /// //chunk size=3 |------->|-------->|--->| + /// + /// // Note: The `&` is significant here, `IntoChunks` is iterable + /// // only by reference. You can also call `.into_iter()` explicitly. + /// for chunk in &data.into_iter().chunks(3) { + /// // Check that the sum of each chunk is 4. + /// assert_eq!(4, chunk.sum()); + /// } + /// ``` + #[cfg(feature = "use_alloc")] + fn chunks(self, size: usize) -> IntoChunks + where + Self: Sized, + { + assert!(size != 0); + groupbylazy::new_chunks(self, size) + } + + /// Return an iterator over all contiguous windows producing tuples of + /// a specific size (up to 12). + /// + /// `tuple_windows` clones the iterator elements so that they can be + /// part of successive windows, this makes it most suited for iterators + /// of references and other values that are cheap to copy. + /// + /// ``` + /// use itertools::Itertools; + /// let mut v = Vec::new(); + /// + /// // pairwise iteration + /// for (a, b) in (1..5).tuple_windows() { + /// v.push((a, b)); + /// } + /// assert_eq!(v, vec![(1, 2), (2, 3), (3, 4)]); + /// + /// let mut it = (1..5).tuple_windows(); + /// assert_eq!(Some((1, 2, 3)), it.next()); + /// assert_eq!(Some((2, 3, 4)), it.next()); + /// assert_eq!(None, it.next()); + /// + /// // this requires a type hint + /// let it = (1..5).tuple_windows::<(_, _, _)>(); + /// itertools::assert_equal(it, vec![(1, 2, 3), (2, 3, 4)]); + /// + /// // you can also specify the complete type + /// use itertools::TupleWindows; + /// use std::ops::Range; + /// + /// let it: TupleWindows, (u32, u32, u32)> = (1..5).tuple_windows(); + /// itertools::assert_equal(it, vec![(1, 2, 3), (2, 3, 4)]); + /// ``` + fn tuple_windows(self) -> TupleWindows + where + Self: Sized + Iterator, + T: traits::HomogeneousTuple, + T::Item: Clone, + { + tuple_impl::tuple_windows(self) + } + + /// Return an iterator over all windows, wrapping back to the first + /// elements when the window would otherwise exceed the length of the + /// iterator, producing tuples of a specific size (up to 12). + /// + /// `circular_tuple_windows` clones the iterator elements so that they can be + /// part of successive windows, this makes it most suited for iterators + /// of references and other values that are cheap to copy. + /// + /// ``` + /// use itertools::Itertools; + /// let mut v = Vec::new(); + /// for (a, b) in (1..5).circular_tuple_windows() { + /// v.push((a, b)); + /// } + /// assert_eq!(v, vec![(1, 2), (2, 3), (3, 4), (4, 1)]); + /// + /// let mut it = (1..5).circular_tuple_windows(); + /// assert_eq!(Some((1, 2, 3)), it.next()); + /// assert_eq!(Some((2, 3, 4)), it.next()); + /// assert_eq!(Some((3, 4, 1)), it.next()); + /// assert_eq!(Some((4, 1, 2)), it.next()); + /// assert_eq!(None, it.next()); + /// + /// // this requires a type hint + /// let it = (1..5).circular_tuple_windows::<(_, _, _)>(); + /// itertools::assert_equal(it, vec![(1, 2, 3), (2, 3, 4), (3, 4, 1), (4, 1, 2)]); + /// ``` + fn circular_tuple_windows(self) -> CircularTupleWindows + where + Self: Sized + Clone + Iterator + ExactSizeIterator, + T: tuple_impl::TupleCollect + Clone, + T::Item: Clone, + { + tuple_impl::circular_tuple_windows(self) + } + /// Return an iterator that groups the items in tuples of a specific size + /// (up to 12). + /// + /// See also the method [`.next_tuple()`](Itertools::next_tuple). + /// + /// ``` + /// use itertools::Itertools; + /// let mut v = Vec::new(); + /// for (a, b) in (1..5).tuples() { + /// v.push((a, b)); + /// } + /// assert_eq!(v, vec![(1, 2), (3, 4)]); + /// + /// let mut it = (1..7).tuples(); + /// assert_eq!(Some((1, 2, 3)), it.next()); + /// assert_eq!(Some((4, 5, 6)), it.next()); + /// assert_eq!(None, it.next()); + /// + /// // this requires a type hint + /// let it = (1..7).tuples::<(_, _, _)>(); + /// itertools::assert_equal(it, vec![(1, 2, 3), (4, 5, 6)]); + /// + /// // you can also specify the complete type + /// use itertools::Tuples; + /// use std::ops::Range; + /// + /// let it: Tuples, (u32, u32, u32)> = (1..7).tuples(); + /// itertools::assert_equal(it, vec![(1, 2, 3), (4, 5, 6)]); + /// ``` + /// + /// See also [`Tuples::into_buffer`]. + fn tuples(self) -> Tuples + where + Self: Sized + Iterator, + T: traits::HomogeneousTuple, + { + tuple_impl::tuples(self) + } + + /// Split into an iterator pair that both yield all elements from + /// the original iterator. + /// + /// **Note:** If the iterator is clonable, prefer using that instead + /// of using this method. Cloning is likely to be more efficient. + /// + /// Iterator element type is `Self::Item`. + /// + /// ``` + /// use itertools::Itertools; + /// let xs = vec![0, 1, 2, 3]; + /// + /// let (mut t1, t2) = xs.into_iter().tee(); + /// itertools::assert_equal(t1.next(), Some(0)); + /// itertools::assert_equal(t2, 0..4); + /// itertools::assert_equal(t1, 1..4); + /// ``` + #[cfg(feature = "use_alloc")] + fn tee(self) -> (Tee, Tee) + where + Self: Sized, + Self::Item: Clone, + { + tee::new(self) + } + + /// Convert each item of the iterator using the [`Into`] trait. + /// + /// ```rust + /// use itertools::Itertools; + /// + /// (1i32..42i32).map_into::().collect_vec(); + /// ``` + fn map_into(self) -> MapInto + where + Self: Sized, + Self::Item: Into, + { + adaptors::map_into(self) + } + + /// Return an iterator adaptor that applies the provided closure + /// to every `Result::Ok` value. `Result::Err` values are + /// unchanged. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let input = vec![Ok(41), Err(false), Ok(11)]; + /// let it = input.into_iter().map_ok(|i| i + 1); + /// itertools::assert_equal(it, vec![Ok(42), Err(false), Ok(12)]); + /// ``` + fn map_ok(self, f: F) -> MapOk + where + Self: Iterator> + Sized, + F: FnMut(T) -> U, + { + adaptors::map_ok(self, f) + } + + /// Return an iterator adaptor that filters every `Result::Ok` + /// value with the provided closure. `Result::Err` values are + /// unchanged. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let input = vec![Ok(22), Err(false), Ok(11)]; + /// let it = input.into_iter().filter_ok(|&i| i > 20); + /// itertools::assert_equal(it, vec![Ok(22), Err(false)]); + /// ``` + fn filter_ok(self, f: F) -> FilterOk + where + Self: Iterator> + Sized, + F: FnMut(&T) -> bool, + { + adaptors::filter_ok(self, f) + } + + /// Return an iterator adaptor that filters and transforms every + /// `Result::Ok` value with the provided closure. `Result::Err` + /// values are unchanged. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let input = vec![Ok(22), Err(false), Ok(11)]; + /// let it = input.into_iter().filter_map_ok(|i| if i > 20 { Some(i * 2) } else { None }); + /// itertools::assert_equal(it, vec![Ok(44), Err(false)]); + /// ``` + fn filter_map_ok(self, f: F) -> FilterMapOk + where + Self: Iterator> + Sized, + F: FnMut(T) -> Option, + { + adaptors::filter_map_ok(self, f) + } + + /// Return an iterator adaptor that flattens every `Result::Ok` value into + /// a series of `Result::Ok` values. `Result::Err` values are unchanged. + /// + /// This is useful when you have some common error type for your crate and + /// need to propagate it upwards, but the `Result::Ok` case needs to be flattened. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let input = vec![Ok(0..2), Err(false), Ok(2..4)]; + /// let it = input.iter().cloned().flatten_ok(); + /// itertools::assert_equal(it.clone(), vec![Ok(0), Ok(1), Err(false), Ok(2), Ok(3)]); + /// + /// // This can also be used to propagate errors when collecting. + /// let output_result: Result, bool> = it.collect(); + /// assert_eq!(output_result, Err(false)); + /// ``` + fn flatten_ok(self) -> FlattenOk + where + Self: Iterator> + Sized, + T: IntoIterator, + { + flatten_ok::flatten_ok(self) + } + + /// “Lift” a function of the values of the current iterator so as to process + /// an iterator of `Result` values instead. + /// + /// `processor` is a closure that receives an adapted version of the iterator + /// as the only argument — the adapted iterator produces elements of type `T`, + /// as long as the original iterator produces `Ok` values. + /// + /// If the original iterable produces an error at any point, the adapted + /// iterator ends and it will return the error iself. + /// + /// Otherwise, the return value from the closure is returned wrapped + /// inside `Ok`. + /// + /// # Example + /// + /// ``` + /// use itertools::Itertools; + /// + /// type Item = Result; + /// + /// let first_values: Vec = vec![Ok(1), Ok(0), Ok(3)]; + /// let second_values: Vec = vec![Ok(2), Ok(1), Err("overflow")]; + /// + /// // “Lift” the iterator .max() method to work on the Ok-values. + /// let first_max = first_values.into_iter().process_results(|iter| iter.max().unwrap_or(0)); + /// let second_max = second_values.into_iter().process_results(|iter| iter.max().unwrap_or(0)); + /// + /// assert_eq!(first_max, Ok(3)); + /// assert!(second_max.is_err()); + /// ``` + fn process_results(self, processor: F) -> Result + where + Self: Iterator> + Sized, + F: FnOnce(ProcessResults) -> R, + { + process_results(self, processor) + } + + /// Return an iterator adaptor that merges the two base iterators in + /// ascending order. If both base iterators are sorted (ascending), the + /// result is sorted. + /// + /// Iterator element type is `Self::Item`. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let a = (0..11).step_by(3); + /// let b = (0..11).step_by(5); + /// let it = a.merge(b); + /// itertools::assert_equal(it, vec![0, 0, 3, 5, 6, 9, 10]); + /// ``` + fn merge(self, other: J) -> Merge + where + Self: Sized, + Self::Item: PartialOrd, + J: IntoIterator, + { + merge(self, other) + } + + /// Return an iterator adaptor that merges the two base iterators in order. + /// This is much like [`.merge()`](Itertools::merge) but allows for a custom ordering. + /// + /// This can be especially useful for sequences of tuples. + /// + /// Iterator element type is `Self::Item`. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let a = (0..).zip("bc".chars()); + /// let b = (0..).zip("ad".chars()); + /// let it = a.merge_by(b, |x, y| x.1 <= y.1); + /// itertools::assert_equal(it, vec![(0, 'a'), (0, 'b'), (1, 'c'), (1, 'd')]); + /// ``` + + fn merge_by(self, other: J, is_first: F) -> MergeBy + where + Self: Sized, + J: IntoIterator, + F: FnMut(&Self::Item, &Self::Item) -> bool, + { + merge_join::merge_by_new(self, other, is_first) + } + + /// Create an iterator that merges items from both this and the specified + /// iterator in ascending order. + /// + /// The function can either return an `Ordering` variant or a boolean. + /// + /// If `cmp_fn` returns `Ordering`, + /// it chooses whether to pair elements based on the `Ordering` returned by the + /// specified compare function. At any point, inspecting the tip of the + /// iterators `I` and `J` as items `i` of type `I::Item` and `j` of type + /// `J::Item` respectively, the resulting iterator will: + /// + /// - Emit `EitherOrBoth::Left(i)` when `i < j`, + /// and remove `i` from its source iterator + /// - Emit `EitherOrBoth::Right(j)` when `i > j`, + /// and remove `j` from its source iterator + /// - Emit `EitherOrBoth::Both(i, j)` when `i == j`, + /// and remove both `i` and `j` from their respective source iterators + /// + /// ``` + /// use itertools::Itertools; + /// use itertools::EitherOrBoth::{Left, Right, Both}; + /// + /// let a = vec![0, 2, 4, 6, 1].into_iter(); + /// let b = (0..10).step_by(3); + /// + /// itertools::assert_equal( + /// a.merge_join_by(b, |i, j| i.cmp(j)), + /// vec![Both(0, 0), Left(2), Right(3), Left(4), Both(6, 6), Left(1), Right(9)] + /// ); + /// ``` + /// + /// If `cmp_fn` returns `bool`, + /// it chooses whether to pair elements based on the boolean returned by the + /// specified function. At any point, inspecting the tip of the + /// iterators `I` and `J` as items `i` of type `I::Item` and `j` of type + /// `J::Item` respectively, the resulting iterator will: + /// + /// - Emit `Either::Left(i)` when `true`, + /// and remove `i` from its source iterator + /// - Emit `Either::Right(j)` when `false`, + /// and remove `j` from its source iterator + /// + /// It is similar to the `Ordering` case if the first argument is considered + /// "less" than the second argument. + /// + /// ``` + /// use itertools::Itertools; + /// use itertools::Either::{Left, Right}; + /// + /// let a = vec![0, 2, 4, 6, 1].into_iter(); + /// let b = (0..10).step_by(3); + /// + /// itertools::assert_equal( + /// a.merge_join_by(b, |i, j| i <= j), + /// vec![Left(0), Right(0), Left(2), Right(3), Left(4), Left(6), Left(1), Right(6), Right(9)] + /// ); + /// ``` + #[inline] + fn merge_join_by(self, other: J, cmp_fn: F) -> MergeJoinBy + where + J: IntoIterator, + F: FnMut(&Self::Item, &J::Item) -> T, + Self: Sized, + { + merge_join_by(self, other, cmp_fn) + } + + /// Return an iterator adaptor that flattens an iterator of iterators by + /// merging them in ascending order. + /// + /// If all base iterators are sorted (ascending), the result is sorted. + /// + /// Iterator element type is `Self::Item`. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let a = (0..6).step_by(3); + /// let b = (1..6).step_by(3); + /// let c = (2..6).step_by(3); + /// let it = vec![a, b, c].into_iter().kmerge(); + /// itertools::assert_equal(it, vec![0, 1, 2, 3, 4, 5]); + /// ``` + #[cfg(feature = "use_alloc")] + fn kmerge(self) -> KMerge<::IntoIter> + where + Self: Sized, + Self::Item: IntoIterator, + ::Item: PartialOrd, + { + kmerge(self) + } + + /// Return an iterator adaptor that flattens an iterator of iterators by + /// merging them according to the given closure. + /// + /// The closure `first` is called with two elements *a*, *b* and should + /// return `true` if *a* is ordered before *b*. + /// + /// If all base iterators are sorted according to `first`, the result is + /// sorted. + /// + /// Iterator element type is `Self::Item`. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let a = vec![-1f64, 2., 3., -5., 6., -7.]; + /// let b = vec![0., 2., -4.]; + /// let mut it = vec![a, b].into_iter().kmerge_by(|a, b| a.abs() < b.abs()); + /// assert_eq!(it.next(), Some(0.)); + /// assert_eq!(it.last(), Some(-7.)); + /// ``` + #[cfg(feature = "use_alloc")] + fn kmerge_by(self, first: F) -> KMergeBy<::IntoIter, F> + where + Self: Sized, + Self::Item: IntoIterator, + F: FnMut(&::Item, &::Item) -> bool, + { + kmerge_by(self, first) + } + + /// Return an iterator adaptor that iterates over the cartesian product of + /// the element sets of two iterators `self` and `J`. + /// + /// Iterator element type is `(Self::Item, J::Item)`. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let it = (0..2).cartesian_product("αβ".chars()); + /// itertools::assert_equal(it, vec![(0, 'α'), (0, 'β'), (1, 'α'), (1, 'β')]); + /// ``` + fn cartesian_product(self, other: J) -> Product + where + Self: Sized, + Self::Item: Clone, + J: IntoIterator, + J::IntoIter: Clone, + { + adaptors::cartesian_product(self, other.into_iter()) + } + + /// Return an iterator adaptor that iterates over the cartesian product of + /// all subiterators returned by meta-iterator `self`. + /// + /// All provided iterators must yield the same `Item` type. To generate + /// the product of iterators yielding multiple types, use the + /// [`iproduct`] macro instead. + /// + /// The iterator element type is `Vec`, where `T` is the iterator element + /// of the subiterators. + /// + /// Note that the iterator is fused. + /// + /// ``` + /// use itertools::Itertools; + /// let mut multi_prod = (0..3).map(|i| (i * 2)..(i * 2 + 2)) + /// .multi_cartesian_product(); + /// assert_eq!(multi_prod.next(), Some(vec![0, 2, 4])); + /// assert_eq!(multi_prod.next(), Some(vec![0, 2, 5])); + /// assert_eq!(multi_prod.next(), Some(vec![0, 3, 4])); + /// assert_eq!(multi_prod.next(), Some(vec![0, 3, 5])); + /// assert_eq!(multi_prod.next(), Some(vec![1, 2, 4])); + /// assert_eq!(multi_prod.next(), Some(vec![1, 2, 5])); + /// assert_eq!(multi_prod.next(), Some(vec![1, 3, 4])); + /// assert_eq!(multi_prod.next(), Some(vec![1, 3, 5])); + /// assert_eq!(multi_prod.next(), None); + /// ``` + /// + /// If the adapted iterator is empty, the result is an iterator yielding a single empty vector. + /// This is known as the [nullary cartesian product](https://en.wikipedia.org/wiki/Empty_product#Nullary_Cartesian_product). + /// + /// ``` + /// use itertools::Itertools; + /// let mut nullary_cartesian_product = (0..0).map(|i| (i * 2)..(i * 2 + 2)).multi_cartesian_product(); + /// assert_eq!(nullary_cartesian_product.next(), Some(vec![])); + /// assert_eq!(nullary_cartesian_product.next(), None); + /// ``` + #[cfg(feature = "use_alloc")] + fn multi_cartesian_product(self) -> MultiProduct<::IntoIter> + where + Self: Sized, + Self::Item: IntoIterator, + ::IntoIter: Clone, + ::Item: Clone, + { + adaptors::multi_cartesian_product(self) + } + + /// Return an iterator adaptor that uses the passed-in closure to + /// optionally merge together consecutive elements. + /// + /// The closure `f` is passed two elements, `previous` and `current` and may + /// return either (1) `Ok(combined)` to merge the two values or + /// (2) `Err((previous', current'))` to indicate they can't be merged. + /// In (2), the value `previous'` is emitted by the iterator. + /// Either (1) `combined` or (2) `current'` becomes the previous value + /// when coalesce continues with the next pair of elements to merge. The + /// value that remains at the end is also emitted by the iterator. + /// + /// Iterator element type is `Self::Item`. + /// + /// This iterator is *fused*. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // sum same-sign runs together + /// let data = vec![-1., -2., -3., 3., 1., 0., -1.]; + /// itertools::assert_equal(data.into_iter().coalesce(|x, y| + /// if (x >= 0.) == (y >= 0.) { + /// Ok(x + y) + /// } else { + /// Err((x, y)) + /// }), + /// vec![-6., 4., -1.]); + /// ``` + fn coalesce(self, f: F) -> Coalesce + where + Self: Sized, + F: FnMut(Self::Item, Self::Item) -> Result, + { + adaptors::coalesce(self, f) + } + + /// Remove duplicates from sections of consecutive identical elements. + /// If the iterator is sorted, all elements will be unique. + /// + /// Iterator element type is `Self::Item`. + /// + /// This iterator is *fused*. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = vec![1., 1., 2., 3., 3., 2., 2.]; + /// itertools::assert_equal(data.into_iter().dedup(), + /// vec![1., 2., 3., 2.]); + /// ``` + fn dedup(self) -> Dedup + where + Self: Sized, + Self::Item: PartialEq, + { + adaptors::dedup(self) + } + + /// Remove duplicates from sections of consecutive identical elements, + /// determining equality using a comparison function. + /// If the iterator is sorted, all elements will be unique. + /// + /// Iterator element type is `Self::Item`. + /// + /// This iterator is *fused*. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = vec![(0, 1.), (1, 1.), (0, 2.), (0, 3.), (1, 3.), (1, 2.), (2, 2.)]; + /// itertools::assert_equal(data.into_iter().dedup_by(|x, y| x.1 == y.1), + /// vec![(0, 1.), (0, 2.), (0, 3.), (1, 2.)]); + /// ``` + fn dedup_by(self, cmp: Cmp) -> DedupBy + where + Self: Sized, + Cmp: FnMut(&Self::Item, &Self::Item) -> bool, + { + adaptors::dedup_by(self, cmp) + } + + /// Remove duplicates from sections of consecutive identical elements, while keeping a count of + /// how many repeated elements were present. + /// If the iterator is sorted, all elements will be unique. + /// + /// Iterator element type is `(usize, Self::Item)`. + /// + /// This iterator is *fused*. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = vec!['a', 'a', 'b', 'c', 'c', 'b', 'b']; + /// itertools::assert_equal(data.into_iter().dedup_with_count(), + /// vec![(2, 'a'), (1, 'b'), (2, 'c'), (2, 'b')]); + /// ``` + fn dedup_with_count(self) -> DedupWithCount + where + Self: Sized, + { + adaptors::dedup_with_count(self) + } + + /// Remove duplicates from sections of consecutive identical elements, while keeping a count of + /// how many repeated elements were present. + /// This will determine equality using a comparison function. + /// If the iterator is sorted, all elements will be unique. + /// + /// Iterator element type is `(usize, Self::Item)`. + /// + /// This iterator is *fused*. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = vec![(0, 'a'), (1, 'a'), (0, 'b'), (0, 'c'), (1, 'c'), (1, 'b'), (2, 'b')]; + /// itertools::assert_equal(data.into_iter().dedup_by_with_count(|x, y| x.1 == y.1), + /// vec![(2, (0, 'a')), (1, (0, 'b')), (2, (0, 'c')), (2, (1, 'b'))]); + /// ``` + fn dedup_by_with_count(self, cmp: Cmp) -> DedupByWithCount + where + Self: Sized, + Cmp: FnMut(&Self::Item, &Self::Item) -> bool, + { + adaptors::dedup_by_with_count(self, cmp) + } + + /// Return an iterator adaptor that produces elements that appear more than once during the + /// iteration. Duplicates are detected using hash and equality. + /// + /// The iterator is stable, returning the duplicate items in the order in which they occur in + /// the adapted iterator. Each duplicate item is returned exactly once. If an item appears more + /// than twice, the second item is the item retained and the rest are discarded. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = vec![10, 20, 30, 20, 40, 10, 50]; + /// itertools::assert_equal(data.into_iter().duplicates(), + /// vec![20, 10]); + /// ``` + #[cfg(feature = "use_std")] + fn duplicates(self) -> Duplicates + where + Self: Sized, + Self::Item: Eq + Hash, + { + duplicates_impl::duplicates(self) + } + + /// Return an iterator adaptor that produces elements that appear more than once during the + /// iteration. Duplicates are detected using hash and equality. + /// + /// Duplicates are detected by comparing the key they map to with the keying function `f` by + /// hash and equality. The keys are stored in a hash map in the iterator. + /// + /// The iterator is stable, returning the duplicate items in the order in which they occur in + /// the adapted iterator. Each duplicate item is returned exactly once. If an item appears more + /// than twice, the second item is the item retained and the rest are discarded. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = vec!["a", "bb", "aa", "c", "ccc"]; + /// itertools::assert_equal(data.into_iter().duplicates_by(|s| s.len()), + /// vec!["aa", "c"]); + /// ``` + #[cfg(feature = "use_std")] + fn duplicates_by(self, f: F) -> DuplicatesBy + where + Self: Sized, + V: Eq + Hash, + F: FnMut(&Self::Item) -> V, + { + duplicates_impl::duplicates_by(self, f) + } + + /// Return an iterator adaptor that filters out elements that have + /// already been produced once during the iteration. Duplicates + /// are detected using hash and equality. + /// + /// Clones of visited elements are stored in a hash set in the + /// iterator. + /// + /// The iterator is stable, returning the non-duplicate items in the order + /// in which they occur in the adapted iterator. In a set of duplicate + /// items, the first item encountered is the item retained. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = vec![10, 20, 30, 20, 40, 10, 50]; + /// itertools::assert_equal(data.into_iter().unique(), + /// vec![10, 20, 30, 40, 50]); + /// ``` + #[cfg(feature = "use_std")] + fn unique(self) -> Unique + where + Self: Sized, + Self::Item: Clone + Eq + Hash, + { + unique_impl::unique(self) + } + + /// Return an iterator adaptor that filters out elements that have + /// already been produced once during the iteration. + /// + /// Duplicates are detected by comparing the key they map to + /// with the keying function `f` by hash and equality. + /// The keys are stored in a hash set in the iterator. + /// + /// The iterator is stable, returning the non-duplicate items in the order + /// in which they occur in the adapted iterator. In a set of duplicate + /// items, the first item encountered is the item retained. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = vec!["a", "bb", "aa", "c", "ccc"]; + /// itertools::assert_equal(data.into_iter().unique_by(|s| s.len()), + /// vec!["a", "bb", "ccc"]); + /// ``` + #[cfg(feature = "use_std")] + fn unique_by(self, f: F) -> UniqueBy + where + Self: Sized, + V: Eq + Hash, + F: FnMut(&Self::Item) -> V, + { + unique_impl::unique_by(self, f) + } + + /// Return an iterator adaptor that borrows from this iterator and + /// takes items while the closure `accept` returns `true`. + /// + /// This adaptor can only be used on iterators that implement `PeekingNext` + /// like `.peekable()`, `put_back` and a few other collection iterators. + /// + /// The last and rejected element (first `false`) is still available when + /// `peeking_take_while` is done. + /// + /// + /// See also [`.take_while_ref()`](Itertools::take_while_ref) + /// which is a similar adaptor. + fn peeking_take_while(&mut self, accept: F) -> PeekingTakeWhile + where + Self: Sized + PeekingNext, + F: FnMut(&Self::Item) -> bool, + { + peeking_take_while::peeking_take_while(self, accept) + } + + /// Return an iterator adaptor that borrows from a `Clone`-able iterator + /// to only pick off elements while the predicate `accept` returns `true`. + /// + /// It uses the `Clone` trait to restore the original iterator so that the + /// last and rejected element (first `false`) is still available when + /// `take_while_ref` is done. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let mut hexadecimals = "0123456789abcdef".chars(); + /// + /// let decimals = hexadecimals.take_while_ref(|c| c.is_numeric()) + /// .collect::(); + /// assert_eq!(decimals, "0123456789"); + /// assert_eq!(hexadecimals.next(), Some('a')); + /// + /// ``` + fn take_while_ref(&mut self, accept: F) -> TakeWhileRef + where + Self: Clone, + F: FnMut(&Self::Item) -> bool, + { + adaptors::take_while_ref(self, accept) + } + + /// Returns an iterator adaptor that consumes elements while the given + /// predicate is `true`, *including* the element for which the predicate + /// first returned `false`. + /// + /// The [`.take_while()`][std::iter::Iterator::take_while] adaptor is useful + /// when you want items satisfying a predicate, but to know when to stop + /// taking elements, we have to consume that first element that doesn't + /// satisfy the predicate. This adaptor includes that element where + /// [`.take_while()`][std::iter::Iterator::take_while] would drop it. + /// + /// The [`.take_while_ref()`][crate::Itertools::take_while_ref] adaptor + /// serves a similar purpose, but this adaptor doesn't require [`Clone`]ing + /// the underlying elements. + /// + /// ```rust + /// # use itertools::Itertools; + /// let items = vec![1, 2, 3, 4, 5]; + /// let filtered: Vec<_> = items + /// .into_iter() + /// .take_while_inclusive(|&n| n % 3 != 0) + /// .collect(); + /// + /// assert_eq!(filtered, vec![1, 2, 3]); + /// ``` + /// + /// ```rust + /// # use itertools::Itertools; + /// let items = vec![1, 2, 3, 4, 5]; + /// + /// let take_while_inclusive_result: Vec<_> = items + /// .iter() + /// .copied() + /// .take_while_inclusive(|&n| n % 3 != 0) + /// .collect(); + /// let take_while_result: Vec<_> = items + /// .into_iter() + /// .take_while(|&n| n % 3 != 0) + /// .collect(); + /// + /// assert_eq!(take_while_inclusive_result, vec![1, 2, 3]); + /// assert_eq!(take_while_result, vec![1, 2]); + /// // both iterators have the same items remaining at this point---the 3 + /// // is lost from the `take_while` vec + /// ``` + /// + /// ```rust + /// # use itertools::Itertools; + /// #[derive(Debug, PartialEq)] + /// struct NoCloneImpl(i32); + /// + /// let non_clonable_items: Vec<_> = vec![1, 2, 3, 4, 5] + /// .into_iter() + /// .map(NoCloneImpl) + /// .collect(); + /// let filtered: Vec<_> = non_clonable_items + /// .into_iter() + /// .take_while_inclusive(|n| n.0 % 3 != 0) + /// .collect(); + /// let expected: Vec<_> = vec![1, 2, 3].into_iter().map(NoCloneImpl).collect(); + /// assert_eq!(filtered, expected); + fn take_while_inclusive(self, accept: F) -> TakeWhileInclusive + where + Self: Sized, + F: FnMut(&Self::Item) -> bool, + { + take_while_inclusive::TakeWhileInclusive::new(self, accept) + } + + /// Return an iterator adaptor that filters `Option` iterator elements + /// and produces `A`. Stops on the first `None` encountered. + /// + /// Iterator element type is `A`, the unwrapped element. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // List all hexadecimal digits + /// itertools::assert_equal( + /// (0..).map(|i| std::char::from_digit(i, 16)).while_some(), + /// "0123456789abcdef".chars()); + /// + /// ``` + fn while_some(self) -> WhileSome + where + Self: Sized + Iterator>, + { + adaptors::while_some(self) + } + + /// Return an iterator adaptor that iterates over the combinations of the + /// elements from an iterator. + /// + /// Iterator element can be any homogeneous tuple of type `Self::Item` with + /// size up to 12. + /// + /// # Guarantees + /// + /// If the adapted iterator is deterministic, + /// this iterator adapter yields items in a reliable order. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let mut v = Vec::new(); + /// for (a, b) in (1..5).tuple_combinations() { + /// v.push((a, b)); + /// } + /// assert_eq!(v, vec![(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]); + /// + /// let mut it = (1..5).tuple_combinations(); + /// assert_eq!(Some((1, 2, 3)), it.next()); + /// assert_eq!(Some((1, 2, 4)), it.next()); + /// assert_eq!(Some((1, 3, 4)), it.next()); + /// assert_eq!(Some((2, 3, 4)), it.next()); + /// assert_eq!(None, it.next()); + /// + /// // this requires a type hint + /// let it = (1..5).tuple_combinations::<(_, _, _)>(); + /// itertools::assert_equal(it, vec![(1, 2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4)]); + /// + /// // you can also specify the complete type + /// use itertools::TupleCombinations; + /// use std::ops::Range; + /// + /// let it: TupleCombinations, (u32, u32, u32)> = (1..5).tuple_combinations(); + /// itertools::assert_equal(it, vec![(1, 2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4)]); + /// ``` + fn tuple_combinations(self) -> TupleCombinations + where + Self: Sized + Clone, + Self::Item: Clone, + T: adaptors::HasCombination, + { + adaptors::tuple_combinations(self) + } + + /// Return an iterator adaptor that iterates over the `k`-length combinations of + /// the elements from an iterator. + /// + /// Iterator element type is `Vec`. The iterator produces a new `Vec` per iteration, + /// and clones the iterator elements. + /// + /// # Guarantees + /// + /// If the adapted iterator is deterministic, + /// this iterator adapter yields items in a reliable order. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let it = (1..5).combinations(3); + /// itertools::assert_equal(it, vec![ + /// vec![1, 2, 3], + /// vec![1, 2, 4], + /// vec![1, 3, 4], + /// vec![2, 3, 4], + /// ]); + /// ``` + /// + /// Note: Combinations does not take into account the equality of the iterated values. + /// ``` + /// use itertools::Itertools; + /// + /// let it = vec![1, 2, 2].into_iter().combinations(2); + /// itertools::assert_equal(it, vec![ + /// vec![1, 2], // Note: these are the same + /// vec![1, 2], // Note: these are the same + /// vec![2, 2], + /// ]); + /// ``` + #[cfg(feature = "use_alloc")] + fn combinations(self, k: usize) -> Combinations + where + Self: Sized, + Self::Item: Clone, + { + combinations::combinations(self, k) + } + + /// Return an iterator that iterates over the `k`-length combinations of + /// the elements from an iterator, with replacement. + /// + /// Iterator element type is `Vec`. The iterator produces a new `Vec` per iteration, + /// and clones the iterator elements. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let it = (1..4).combinations_with_replacement(2); + /// itertools::assert_equal(it, vec![ + /// vec![1, 1], + /// vec![1, 2], + /// vec![1, 3], + /// vec![2, 2], + /// vec![2, 3], + /// vec![3, 3], + /// ]); + /// ``` + #[cfg(feature = "use_alloc")] + fn combinations_with_replacement(self, k: usize) -> CombinationsWithReplacement + where + Self: Sized, + Self::Item: Clone, + { + combinations_with_replacement::combinations_with_replacement(self, k) + } + + /// Return an iterator adaptor that iterates over all k-permutations of the + /// elements from an iterator. + /// + /// Iterator element type is `Vec` with length `k`. The iterator + /// produces a new `Vec` per iteration, and clones the iterator elements. + /// + /// If `k` is greater than the length of the input iterator, the resultant + /// iterator adaptor will be empty. + /// + /// If you are looking for permutations with replacements, + /// use `repeat_n(iter, k).multi_cartesian_product()` instead. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let perms = (5..8).permutations(2); + /// itertools::assert_equal(perms, vec![ + /// vec![5, 6], + /// vec![5, 7], + /// vec![6, 5], + /// vec![6, 7], + /// vec![7, 5], + /// vec![7, 6], + /// ]); + /// ``` + /// + /// Note: Permutations does not take into account the equality of the iterated values. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let it = vec![2, 2].into_iter().permutations(2); + /// itertools::assert_equal(it, vec![ + /// vec![2, 2], // Note: these are the same + /// vec![2, 2], // Note: these are the same + /// ]); + /// ``` + /// + /// Note: The source iterator is collected lazily, and will not be + /// re-iterated if the permutations adaptor is completed and re-iterated. + #[cfg(feature = "use_alloc")] + fn permutations(self, k: usize) -> Permutations + where + Self: Sized, + Self::Item: Clone, + { + permutations::permutations(self, k) + } + + /// Return an iterator that iterates through the powerset of the elements from an + /// iterator. + /// + /// Iterator element type is `Vec`. The iterator produces a new `Vec` + /// per iteration, and clones the iterator elements. + /// + /// The powerset of a set contains all subsets including the empty set and the full + /// input set. A powerset has length _2^n_ where _n_ is the length of the input + /// set. + /// + /// Each `Vec` produced by this iterator represents a subset of the elements + /// produced by the source iterator. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let sets = (1..4).powerset().collect::>(); + /// itertools::assert_equal(sets, vec![ + /// vec![], + /// vec![1], + /// vec![2], + /// vec![3], + /// vec![1, 2], + /// vec![1, 3], + /// vec![2, 3], + /// vec![1, 2, 3], + /// ]); + /// ``` + #[cfg(feature = "use_alloc")] + fn powerset(self) -> Powerset + where + Self: Sized, + Self::Item: Clone, + { + powerset::powerset(self) + } + + /// Return an iterator adaptor that pads the sequence to a minimum length of + /// `min` by filling missing elements using a closure `f`. + /// + /// Iterator element type is `Self::Item`. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let it = (0..5).pad_using(10, |i| 2*i); + /// itertools::assert_equal(it, vec![0, 1, 2, 3, 4, 10, 12, 14, 16, 18]); + /// + /// let it = (0..10).pad_using(5, |i| 2*i); + /// itertools::assert_equal(it, vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); + /// + /// let it = (0..5).pad_using(10, |i| 2*i).rev(); + /// itertools::assert_equal(it, vec![18, 16, 14, 12, 10, 4, 3, 2, 1, 0]); + /// ``` + fn pad_using(self, min: usize, f: F) -> PadUsing + where + Self: Sized, + F: FnMut(usize) -> Self::Item, + { + pad_tail::pad_using(self, min, f) + } + + /// Return an iterator adaptor that combines each element with a `Position` to + /// ease special-case handling of the first or last elements. + /// + /// Iterator element type is + /// [`(Position, Self::Item)`](Position) + /// + /// ``` + /// use itertools::{Itertools, Position}; + /// + /// let it = (0..4).with_position(); + /// itertools::assert_equal(it, + /// vec![(Position::First, 0), + /// (Position::Middle, 1), + /// (Position::Middle, 2), + /// (Position::Last, 3)]); + /// + /// let it = (0..1).with_position(); + /// itertools::assert_equal(it, vec![(Position::Only, 0)]); + /// ``` + fn with_position(self) -> WithPosition + where + Self: Sized, + { + with_position::with_position(self) + } + + /// Return an iterator adaptor that yields the indices of all elements + /// satisfying a predicate, counted from the start of the iterator. + /// + /// Equivalent to `iter.enumerate().filter(|(_, v)| predicate(*v)).map(|(i, _)| i)`. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = vec![1, 2, 3, 3, 4, 6, 7, 9]; + /// itertools::assert_equal(data.iter().positions(|v| v % 2 == 0), vec![1, 4, 5]); + /// + /// itertools::assert_equal(data.iter().positions(|v| v % 2 == 1).rev(), vec![7, 6, 3, 2, 0]); + /// ``` + fn positions

(self, predicate: P) -> Positions + where + Self: Sized, + P: FnMut(Self::Item) -> bool, + { + adaptors::positions(self, predicate) + } + + /// Return an iterator adaptor that applies a mutating function + /// to each element before yielding it. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let input = vec![vec![1], vec![3, 2, 1]]; + /// let it = input.into_iter().update(|mut v| v.push(0)); + /// itertools::assert_equal(it, vec![vec![1, 0], vec![3, 2, 1, 0]]); + /// ``` + fn update(self, updater: F) -> Update + where + Self: Sized, + F: FnMut(&mut Self::Item), + { + adaptors::update(self, updater) + } + + // non-adaptor methods + /// Advances the iterator and returns the next items grouped in a tuple of + /// a specific size (up to 12). + /// + /// If there are enough elements to be grouped in a tuple, then the tuple is + /// returned inside `Some`, otherwise `None` is returned. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let mut iter = 1..5; + /// + /// assert_eq!(Some((1, 2)), iter.next_tuple()); + /// ``` + fn next_tuple(&mut self) -> Option + where + Self: Sized + Iterator, + T: traits::HomogeneousTuple, + { + T::collect_from_iter_no_buf(self) + } + + /// Collects all items from the iterator into a tuple of a specific size + /// (up to 12). + /// + /// If the number of elements inside the iterator is **exactly** equal to + /// the tuple size, then the tuple is returned inside `Some`, otherwise + /// `None` is returned. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let iter = 1..3; + /// + /// if let Some((x, y)) = iter.collect_tuple() { + /// assert_eq!((x, y), (1, 2)) + /// } else { + /// panic!("Expected two elements") + /// } + /// ``` + fn collect_tuple(mut self) -> Option + where + Self: Sized + Iterator, + T: traits::HomogeneousTuple, + { + match self.next_tuple() { + elt @ Some(_) => match self.next() { + Some(_) => None, + None => elt, + }, + _ => None, + } + } + + /// Find the position and value of the first element satisfying a predicate. + /// + /// The iterator is not advanced past the first element found. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let text = "Hα"; + /// assert_eq!(text.chars().find_position(|ch| ch.is_lowercase()), Some((1, 'α'))); + /// ``` + fn find_position

(&mut self, mut pred: P) -> Option<(usize, Self::Item)> + where + P: FnMut(&Self::Item) -> bool, + { + self.enumerate().find(|(_, elt)| pred(elt)) + } + /// Find the value of the first element satisfying a predicate or return the last element, if any. + /// + /// The iterator is not advanced past the first element found. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let numbers = [1, 2, 3, 4]; + /// assert_eq!(numbers.iter().find_or_last(|&&x| x > 5), Some(&4)); + /// assert_eq!(numbers.iter().find_or_last(|&&x| x > 2), Some(&3)); + /// assert_eq!(std::iter::empty::().find_or_last(|&x| x > 5), None); + /// ``` + fn find_or_last

(mut self, mut predicate: P) -> Option + where + Self: Sized, + P: FnMut(&Self::Item) -> bool, + { + let mut prev = None; + self.find_map(|x| { + if predicate(&x) { + Some(x) + } else { + prev = Some(x); + None + } + }) + .or(prev) + } + /// Find the value of the first element satisfying a predicate or return the first element, if any. + /// + /// The iterator is not advanced past the first element found. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let numbers = [1, 2, 3, 4]; + /// assert_eq!(numbers.iter().find_or_first(|&&x| x > 5), Some(&1)); + /// assert_eq!(numbers.iter().find_or_first(|&&x| x > 2), Some(&3)); + /// assert_eq!(std::iter::empty::().find_or_first(|&x| x > 5), None); + /// ``` + fn find_or_first

(mut self, mut predicate: P) -> Option + where + Self: Sized, + P: FnMut(&Self::Item) -> bool, + { + let first = self.next()?; + Some(if predicate(&first) { + first + } else { + self.find(|x| predicate(x)).unwrap_or(first) + }) + } + /// Returns `true` if the given item is present in this iterator. + /// + /// This method is short-circuiting. If the given item is present in this + /// iterator, this method will consume the iterator up-to-and-including + /// the item. If the given item is not present in this iterator, the + /// iterator will be exhausted. + /// + /// ``` + /// use itertools::Itertools; + /// + /// #[derive(PartialEq, Debug)] + /// enum Enum { A, B, C, D, E, } + /// + /// let mut iter = vec![Enum::A, Enum::B, Enum::C, Enum::D].into_iter(); + /// + /// // search `iter` for `B` + /// assert_eq!(iter.contains(&Enum::B), true); + /// // `B` was found, so the iterator now rests at the item after `B` (i.e, `C`). + /// assert_eq!(iter.next(), Some(Enum::C)); + /// + /// // search `iter` for `E` + /// assert_eq!(iter.contains(&Enum::E), false); + /// // `E` wasn't found, so `iter` is now exhausted + /// assert_eq!(iter.next(), None); + /// ``` + fn contains(&mut self, query: &Q) -> bool + where + Self: Sized, + Self::Item: Borrow, + Q: PartialEq, + { + self.any(|x| x.borrow() == query) + } + + /// Check whether all elements compare equal. + /// + /// Empty iterators are considered to have equal elements: + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = vec![1, 1, 1, 2, 2, 3, 3, 3, 4, 5, 5]; + /// assert!(!data.iter().all_equal()); + /// assert!(data[0..3].iter().all_equal()); + /// assert!(data[3..5].iter().all_equal()); + /// assert!(data[5..8].iter().all_equal()); + /// + /// let data : Option = None; + /// assert!(data.into_iter().all_equal()); + /// ``` + fn all_equal(&mut self) -> bool + where + Self: Sized, + Self::Item: PartialEq, + { + match self.next() { + None => true, + Some(a) => self.all(|x| a == x), + } + } + + /// If there are elements and they are all equal, return a single copy of that element. + /// If there are no elements, return an Error containing None. + /// If there are elements and they are not all equal, return a tuple containing the first + /// two non-equal elements found. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = vec![1, 1, 1, 2, 2, 3, 3, 3, 4, 5, 5]; + /// assert_eq!(data.iter().all_equal_value(), Err(Some((&1, &2)))); + /// assert_eq!(data[0..3].iter().all_equal_value(), Ok(&1)); + /// assert_eq!(data[3..5].iter().all_equal_value(), Ok(&2)); + /// assert_eq!(data[5..8].iter().all_equal_value(), Ok(&3)); + /// + /// let data : Option = None; + /// assert_eq!(data.into_iter().all_equal_value(), Err(None)); + /// ``` + #[allow(clippy::type_complexity)] + fn all_equal_value(&mut self) -> Result> + where + Self: Sized, + Self::Item: PartialEq, + { + let first = self.next().ok_or(None)?; + let other = self.find(|x| x != &first); + if let Some(other) = other { + Err(Some((first, other))) + } else { + Ok(first) + } + } + + /// Check whether all elements are unique (non equal). + /// + /// Empty iterators are considered to have unique elements: + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = vec![1, 2, 3, 4, 1, 5]; + /// assert!(!data.iter().all_unique()); + /// assert!(data[0..4].iter().all_unique()); + /// assert!(data[1..6].iter().all_unique()); + /// + /// let data : Option = None; + /// assert!(data.into_iter().all_unique()); + /// ``` + #[cfg(feature = "use_std")] + fn all_unique(&mut self) -> bool + where + Self: Sized, + Self::Item: Eq + Hash, + { + let mut used = HashSet::new(); + self.all(move |elt| used.insert(elt)) + } + + /// Consume the first `n` elements from the iterator eagerly, + /// and return the same iterator again. + /// + /// It works similarly to `.skip(n)` except it is eager and + /// preserves the iterator type. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let mut iter = "αβγ".chars().dropping(2); + /// itertools::assert_equal(iter, "γ".chars()); + /// ``` + /// + /// *Fusing notes: if the iterator is exhausted by dropping, + /// the result of calling `.next()` again depends on the iterator implementation.* + fn dropping(mut self, n: usize) -> Self + where + Self: Sized, + { + if n > 0 { + self.nth(n - 1); + } + self + } + + /// Consume the last `n` elements from the iterator eagerly, + /// and return the same iterator again. + /// + /// This is only possible on double ended iterators. `n` may be + /// larger than the number of elements. + /// + /// Note: This method is eager, dropping the back elements immediately and + /// preserves the iterator type. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let init = vec![0, 3, 6, 9].into_iter().dropping_back(1); + /// itertools::assert_equal(init, vec![0, 3, 6]); + /// ``` + fn dropping_back(mut self, n: usize) -> Self + where + Self: Sized + DoubleEndedIterator, + { + if n > 0 { + (&mut self).rev().nth(n - 1); + } + self + } + + /// Combine all an iterator's elements into one element by using [`Extend`]. + /// + /// This combinator will extend the first item with each of the rest of the + /// items of the iterator. If the iterator is empty, the default value of + /// `I::Item` is returned. + /// + /// ```rust + /// use itertools::Itertools; + /// + /// let input = vec![vec![1], vec![2, 3], vec![4, 5, 6]]; + /// assert_eq!(input.into_iter().concat(), + /// vec![1, 2, 3, 4, 5, 6]); + /// ``` + fn concat(self) -> Self::Item + where + Self: Sized, + Self::Item: + Extend<<::Item as IntoIterator>::Item> + IntoIterator + Default, + { + concat(self) + } + + /// `.collect_vec()` is simply a type specialization of [`Iterator::collect`], + /// for convenience. + #[cfg(feature = "use_alloc")] + fn collect_vec(self) -> Vec + where + Self: Sized, + { + self.collect() + } + + /// `.try_collect()` is more convenient way of writing + /// `.collect::>()` + /// + /// # Example + /// + /// ``` + /// use std::{fs, io}; + /// use itertools::Itertools; + /// + /// fn process_dir_entries(entries: &[fs::DirEntry]) { + /// // ... + /// } + /// + /// fn do_stuff() -> std::io::Result<()> { + /// let entries: Vec<_> = fs::read_dir(".")?.try_collect()?; + /// process_dir_entries(&entries); + /// + /// Ok(()) + /// } + /// ``` + fn try_collect(self) -> Result + where + Self: Sized + Iterator>, + Result: FromIterator>, + { + self.collect() + } + + /// Assign to each reference in `self` from the `from` iterator, + /// stopping at the shortest of the two iterators. + /// + /// The `from` iterator is queried for its next element before the `self` + /// iterator, and if either is exhausted the method is done. + /// + /// Return the number of elements written. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let mut xs = [0; 4]; + /// xs.iter_mut().set_from(1..); + /// assert_eq!(xs, [1, 2, 3, 4]); + /// ``` + #[inline] + fn set_from<'a, A: 'a, J>(&mut self, from: J) -> usize + where + Self: Iterator, + J: IntoIterator, + { + from.into_iter() + .zip(self) + .map(|(new, old)| *old = new) + .count() + } + + /// Combine all iterator elements into one `String`, separated by `sep`. + /// + /// Use the `Display` implementation of each element. + /// + /// ``` + /// use itertools::Itertools; + /// + /// assert_eq!(["a", "b", "c"].iter().join(", "), "a, b, c"); + /// assert_eq!([1, 2, 3].iter().join(", "), "1, 2, 3"); + /// ``` + #[cfg(feature = "use_alloc")] + fn join(&mut self, sep: &str) -> String + where + Self::Item: std::fmt::Display, + { + match self.next() { + None => String::new(), + Some(first_elt) => { + // estimate lower bound of capacity needed + let (lower, _) = self.size_hint(); + let mut result = String::with_capacity(sep.len() * lower); + write!(&mut result, "{}", first_elt).unwrap(); + self.for_each(|elt| { + result.push_str(sep); + write!(&mut result, "{}", elt).unwrap(); + }); + result + } + } + } + + /// Format all iterator elements, separated by `sep`. + /// + /// All elements are formatted (any formatting trait) + /// with `sep` inserted between each element. + /// + /// **Panics** if the formatter helper is formatted more than once. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = [1.1, 2.71828, -3.]; + /// assert_eq!( + /// format!("{:.2}", data.iter().format(", ")), + /// "1.10, 2.72, -3.00"); + /// ``` + fn format(self, sep: &str) -> Format + where + Self: Sized, + { + format::new_format_default(self, sep) + } + + /// Format all iterator elements, separated by `sep`. + /// + /// This is a customizable version of [`.format()`](Itertools::format). + /// + /// The supplied closure `format` is called once per iterator element, + /// with two arguments: the element and a callback that takes a + /// `&Display` value, i.e. any reference to type that implements `Display`. + /// + /// Using `&format_args!(...)` is the most versatile way to apply custom + /// element formatting. The callback can be called multiple times if needed. + /// + /// **Panics** if the formatter helper is formatted more than once. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = [1.1, 2.71828, -3.]; + /// let data_formatter = data.iter().format_with(", ", |elt, f| f(&format_args!("{:.2}", elt))); + /// assert_eq!(format!("{}", data_formatter), + /// "1.10, 2.72, -3.00"); + /// + /// // .format_with() is recursively composable + /// let matrix = [[1., 2., 3.], + /// [4., 5., 6.]]; + /// let matrix_formatter = matrix.iter().format_with("\n", |row, f| { + /// f(&row.iter().format_with(", ", |elt, g| g(&elt))) + /// }); + /// assert_eq!(format!("{}", matrix_formatter), + /// "1, 2, 3\n4, 5, 6"); + /// + /// + /// ``` + fn format_with(self, sep: &str, format: F) -> FormatWith + where + Self: Sized, + F: FnMut(Self::Item, &mut dyn FnMut(&dyn fmt::Display) -> fmt::Result) -> fmt::Result, + { + format::new_format(self, sep, format) + } + + /// Fold `Result` values from an iterator. + /// + /// Only `Ok` values are folded. If no error is encountered, the folded + /// value is returned inside `Ok`. Otherwise, the operation terminates + /// and returns the first `Err` value it encounters. No iterator elements are + /// consumed after the first error. + /// + /// The first accumulator value is the `start` parameter. + /// Each iteration passes the accumulator value and the next value inside `Ok` + /// to the fold function `f` and its return value becomes the new accumulator value. + /// + /// For example the sequence *Ok(1), Ok(2), Ok(3)* will result in a + /// computation like this: + /// + /// ```no_run + /// # let start = 0; + /// # let f = |x, y| x + y; + /// let mut accum = start; + /// accum = f(accum, 1); + /// accum = f(accum, 2); + /// accum = f(accum, 3); + /// ``` + /// + /// With a `start` value of 0 and an addition as folding function, + /// this effectively results in *((0 + 1) + 2) + 3* + /// + /// ``` + /// use std::ops::Add; + /// use itertools::Itertools; + /// + /// let values = [1, 2, -2, -1, 2, 1]; + /// assert_eq!( + /// values.iter() + /// .map(Ok::<_, ()>) + /// .fold_ok(0, Add::add), + /// Ok(3) + /// ); + /// assert!( + /// values.iter() + /// .map(|&x| if x >= 0 { Ok(x) } else { Err("Negative number") }) + /// .fold_ok(0, Add::add) + /// .is_err() + /// ); + /// ``` + fn fold_ok(&mut self, mut start: B, mut f: F) -> Result + where + Self: Iterator>, + F: FnMut(B, A) -> B, + { + for elt in self { + match elt { + Ok(v) => start = f(start, v), + Err(u) => return Err(u), + } + } + Ok(start) + } + + /// Fold `Option` values from an iterator. + /// + /// Only `Some` values are folded. If no `None` is encountered, the folded + /// value is returned inside `Some`. Otherwise, the operation terminates + /// and returns `None`. No iterator elements are consumed after the `None`. + /// + /// This is the `Option` equivalent to [`fold_ok`](Itertools::fold_ok). + /// + /// ``` + /// use std::ops::Add; + /// use itertools::Itertools; + /// + /// let mut values = vec![Some(1), Some(2), Some(-2)].into_iter(); + /// assert_eq!(values.fold_options(5, Add::add), Some(5 + 1 + 2 - 2)); + /// + /// let mut more_values = vec![Some(2), None, Some(0)].into_iter(); + /// assert!(more_values.fold_options(0, Add::add).is_none()); + /// assert_eq!(more_values.next().unwrap(), Some(0)); + /// ``` + fn fold_options(&mut self, mut start: B, mut f: F) -> Option + where + Self: Iterator>, + F: FnMut(B, A) -> B, + { + for elt in self { + match elt { + Some(v) => start = f(start, v), + None => return None, + } + } + Some(start) + } + + /// Accumulator of the elements in the iterator. + /// + /// Like `.fold()`, without a base case. If the iterator is + /// empty, return `None`. With just one element, return it. + /// Otherwise elements are accumulated in sequence using the closure `f`. + /// + /// ``` + /// use itertools::Itertools; + /// + /// assert_eq!((0..10).fold1(|x, y| x + y).unwrap_or(0), 45); + /// assert_eq!((0..0).fold1(|x, y| x * y), None); + /// ``` + #[deprecated( + note = "Use [`Iterator::reduce`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.reduce) instead", + since = "0.10.2" + )] + fn fold1(mut self, f: F) -> Option + where + F: FnMut(Self::Item, Self::Item) -> Self::Item, + Self: Sized, + { + self.next().map(move |x| self.fold(x, f)) + } + + /// Accumulate the elements in the iterator in a tree-like manner. + /// + /// You can think of it as, while there's more than one item, repeatedly + /// combining adjacent items. It does so in bottom-up-merge-sort order, + /// however, so that it needs only logarithmic stack space. + /// + /// This produces a call tree like the following (where the calls under + /// an item are done after reading that item): + /// + /// ```text + /// 1 2 3 4 5 6 7 + /// │ │ │ │ │ │ │ + /// └─f └─f └─f │ + /// │ │ │ │ + /// └───f └─f + /// │ │ + /// └─────f + /// ``` + /// + /// Which, for non-associative functions, will typically produce a different + /// result than the linear call tree used by [`Iterator::reduce`]: + /// + /// ```text + /// 1 2 3 4 5 6 7 + /// │ │ │ │ │ │ │ + /// └─f─f─f─f─f─f + /// ``` + /// + /// If `f` is associative you should also decide carefully: + /// + /// - if `f` is a trivial operation like `u32::wrapping_add`, prefer the normal + /// [`Iterator::reduce`] instead since it will most likely result in the generation of simpler + /// code because the compiler is able to optimize it + /// - otherwise if `f` is non-trivial like `format!`, you should use `tree_reduce` since it + /// reduces the number of operations from `O(n)` to `O(ln(n))` + /// + /// Here "non-trivial" means: + /// + /// - any allocating operation + /// - any function that is a composition of many operations + /// + /// ``` + /// use itertools::Itertools; + /// + /// // The same tree as above + /// let num_strings = (1..8).map(|x| x.to_string()); + /// assert_eq!(num_strings.tree_reduce(|x, y| format!("f({}, {})", x, y)), + /// Some(String::from("f(f(f(1, 2), f(3, 4)), f(f(5, 6), 7))"))); + /// + /// // Like fold1, an empty iterator produces None + /// assert_eq!((0..0).tree_reduce(|x, y| x * y), None); + /// + /// // tree_reduce matches fold1 for associative operations... + /// assert_eq!((0..10).tree_reduce(|x, y| x + y), + /// (0..10).fold1(|x, y| x + y)); + /// // ...but not for non-associative ones + /// assert_ne!((0..10).tree_reduce(|x, y| x - y), + /// (0..10).fold1(|x, y| x - y)); + /// ``` + fn tree_reduce(mut self, mut f: F) -> Option + where + F: FnMut(Self::Item, Self::Item) -> Self::Item, + Self: Sized, + { + type State = Result>; + + fn inner0(it: &mut II, f: &mut FF) -> State + where + II: Iterator, + FF: FnMut(T, T) -> T, + { + // This function could be replaced with `it.next().ok_or(None)`, + // but half the useful tree_reduce work is combining adjacent items, + // so put that in a form that LLVM is more likely to optimize well. + + let a = if let Some(v) = it.next() { + v + } else { + return Err(None); + }; + let b = if let Some(v) = it.next() { + v + } else { + return Err(Some(a)); + }; + Ok(f(a, b)) + } + + fn inner(stop: usize, it: &mut II, f: &mut FF) -> State + where + II: Iterator, + FF: FnMut(T, T) -> T, + { + let mut x = inner0(it, f)?; + for height in 0..stop { + // Try to get another tree the same size with which to combine it, + // creating a new tree that's twice as big for next time around. + let next = if height == 0 { + inner0(it, f) + } else { + inner(height, it, f) + }; + match next { + Ok(y) => x = f(x, y), + + // If we ran out of items, combine whatever we did manage + // to get. It's better combined with the current value + // than something in a parent frame, because the tree in + // the parent is always as least as big as this one. + Err(None) => return Err(Some(x)), + Err(Some(y)) => return Err(Some(f(x, y))), + } + } + Ok(x) + } + + match inner(usize::MAX, &mut self, &mut f) { + Err(x) => x, + _ => unreachable!(), + } + } + + /// See [`.tree_reduce()`](Itertools::tree_reduce). + #[deprecated(note = "Use .tree_reduce() instead", since = "0.13.0")] + fn tree_fold1(self, f: F) -> Option + where + F: FnMut(Self::Item, Self::Item) -> Self::Item, + Self: Sized, + { + self.tree_reduce(f) + } + + /// An iterator method that applies a function, producing a single, final value. + /// + /// `fold_while()` is basically equivalent to [`Iterator::fold`] but with additional support for + /// early exit via short-circuiting. + /// + /// ``` + /// use itertools::Itertools; + /// use itertools::FoldWhile::{Continue, Done}; + /// + /// let numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + /// + /// let mut result = 0; + /// + /// // for loop: + /// for i in &numbers { + /// if *i > 5 { + /// break; + /// } + /// result = result + i; + /// } + /// + /// // fold: + /// let result2 = numbers.iter().fold(0, |acc, x| { + /// if *x > 5 { acc } else { acc + x } + /// }); + /// + /// // fold_while: + /// let result3 = numbers.iter().fold_while(0, |acc, x| { + /// if *x > 5 { Done(acc) } else { Continue(acc + x) } + /// }).into_inner(); + /// + /// // they're the same + /// assert_eq!(result, result2); + /// assert_eq!(result2, result3); + /// ``` + /// + /// The big difference between the computations of `result2` and `result3` is that while + /// `fold()` called the provided closure for every item of the callee iterator, + /// `fold_while()` actually stopped iterating as soon as it encountered `Fold::Done(_)`. + fn fold_while(&mut self, init: B, mut f: F) -> FoldWhile + where + Self: Sized, + F: FnMut(B, Self::Item) -> FoldWhile, + { + use Result::{Err as Break, Ok as Continue}; + + let result = self.try_fold( + init, + #[inline(always)] + |acc, v| match f(acc, v) { + FoldWhile::Continue(acc) => Continue(acc), + FoldWhile::Done(acc) => Break(acc), + }, + ); + + match result { + Continue(acc) => FoldWhile::Continue(acc), + Break(acc) => FoldWhile::Done(acc), + } + } + + /// Iterate over the entire iterator and add all the elements. + /// + /// An empty iterator returns `None`, otherwise `Some(sum)`. + /// + /// # Panics + /// + /// When calling `sum1()` and a primitive integer type is being returned, this + /// method will panic if the computation overflows and debug assertions are + /// enabled. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// + /// let empty_sum = (1..1).sum1::(); + /// assert_eq!(empty_sum, None); + /// + /// let nonempty_sum = (1..11).sum1::(); + /// assert_eq!(nonempty_sum, Some(55)); + /// ``` + fn sum1(mut self) -> Option + where + Self: Sized, + S: std::iter::Sum, + { + self.next().map(|first| once(first).chain(self).sum()) + } + + /// Iterate over the entire iterator and multiply all the elements. + /// + /// An empty iterator returns `None`, otherwise `Some(product)`. + /// + /// # Panics + /// + /// When calling `product1()` and a primitive integer type is being returned, + /// method will panic if the computation overflows and debug assertions are + /// enabled. + /// + /// # Examples + /// ``` + /// use itertools::Itertools; + /// + /// let empty_product = (1..1).product1::(); + /// assert_eq!(empty_product, None); + /// + /// let nonempty_product = (1..11).product1::(); + /// assert_eq!(nonempty_product, Some(3628800)); + /// ``` + fn product1

(mut self) -> Option

+ where + Self: Sized, + P: std::iter::Product, + { + self.next().map(|first| once(first).chain(self).product()) + } + + /// Sort all iterator elements into a new iterator in ascending order. + /// + /// **Note:** This consumes the entire iterator, uses the + /// [`slice::sort_unstable`] method and returns the result as a new + /// iterator that owns its elements. + /// + /// This sort is unstable (i.e., may reorder equal elements). + /// + /// The sorted iterator, if directly collected to a `Vec`, is converted + /// without any extra copying or allocation cost. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // sort the letters of the text in ascending order + /// let text = "bdacfe"; + /// itertools::assert_equal(text.chars().sorted_unstable(), + /// "abcdef".chars()); + /// ``` + #[cfg(feature = "use_alloc")] + fn sorted_unstable(self) -> VecIntoIter + where + Self: Sized, + Self::Item: Ord, + { + // Use .sort_unstable() directly since it is not quite identical with + // .sort_by(Ord::cmp) + let mut v = Vec::from_iter(self); + v.sort_unstable(); + v.into_iter() + } + + /// Sort all iterator elements into a new iterator in ascending order. + /// + /// **Note:** This consumes the entire iterator, uses the + /// [`slice::sort_unstable_by`] method and returns the result as a new + /// iterator that owns its elements. + /// + /// This sort is unstable (i.e., may reorder equal elements). + /// + /// The sorted iterator, if directly collected to a `Vec`, is converted + /// without any extra copying or allocation cost. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // sort people in descending order by age + /// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 27)]; + /// + /// let oldest_people_first = people + /// .into_iter() + /// .sorted_unstable_by(|a, b| Ord::cmp(&b.1, &a.1)) + /// .map(|(person, _age)| person); + /// + /// itertools::assert_equal(oldest_people_first, + /// vec!["Jill", "Jack", "Jane", "John"]); + /// ``` + #[cfg(feature = "use_alloc")] + fn sorted_unstable_by(self, cmp: F) -> VecIntoIter + where + Self: Sized, + F: FnMut(&Self::Item, &Self::Item) -> Ordering, + { + let mut v = Vec::from_iter(self); + v.sort_unstable_by(cmp); + v.into_iter() + } + + /// Sort all iterator elements into a new iterator in ascending order. + /// + /// **Note:** This consumes the entire iterator, uses the + /// [`slice::sort_unstable_by_key`] method and returns the result as a new + /// iterator that owns its elements. + /// + /// This sort is unstable (i.e., may reorder equal elements). + /// + /// The sorted iterator, if directly collected to a `Vec`, is converted + /// without any extra copying or allocation cost. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // sort people in descending order by age + /// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 27)]; + /// + /// let oldest_people_first = people + /// .into_iter() + /// .sorted_unstable_by_key(|x| -x.1) + /// .map(|(person, _age)| person); + /// + /// itertools::assert_equal(oldest_people_first, + /// vec!["Jill", "Jack", "Jane", "John"]); + /// ``` + #[cfg(feature = "use_alloc")] + fn sorted_unstable_by_key(self, f: F) -> VecIntoIter + where + Self: Sized, + K: Ord, + F: FnMut(&Self::Item) -> K, + { + let mut v = Vec::from_iter(self); + v.sort_unstable_by_key(f); + v.into_iter() + } + + /// Sort all iterator elements into a new iterator in ascending order. + /// + /// **Note:** This consumes the entire iterator, uses the + /// [`slice::sort`] method and returns the result as a new + /// iterator that owns its elements. + /// + /// This sort is stable (i.e., does not reorder equal elements). + /// + /// The sorted iterator, if directly collected to a `Vec`, is converted + /// without any extra copying or allocation cost. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // sort the letters of the text in ascending order + /// let text = "bdacfe"; + /// itertools::assert_equal(text.chars().sorted(), + /// "abcdef".chars()); + /// ``` + #[cfg(feature = "use_alloc")] + fn sorted(self) -> VecIntoIter + where + Self: Sized, + Self::Item: Ord, + { + // Use .sort() directly since it is not quite identical with + // .sort_by(Ord::cmp) + let mut v = Vec::from_iter(self); + v.sort(); + v.into_iter() + } + + /// Sort all iterator elements into a new iterator in ascending order. + /// + /// **Note:** This consumes the entire iterator, uses the + /// [`slice::sort_by`] method and returns the result as a new + /// iterator that owns its elements. + /// + /// This sort is stable (i.e., does not reorder equal elements). + /// + /// The sorted iterator, if directly collected to a `Vec`, is converted + /// without any extra copying or allocation cost. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // sort people in descending order by age + /// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 30)]; + /// + /// let oldest_people_first = people + /// .into_iter() + /// .sorted_by(|a, b| Ord::cmp(&b.1, &a.1)) + /// .map(|(person, _age)| person); + /// + /// itertools::assert_equal(oldest_people_first, + /// vec!["Jill", "Jack", "Jane", "John"]); + /// ``` + #[cfg(feature = "use_alloc")] + fn sorted_by(self, cmp: F) -> VecIntoIter + where + Self: Sized, + F: FnMut(&Self::Item, &Self::Item) -> Ordering, + { + let mut v = Vec::from_iter(self); + v.sort_by(cmp); + v.into_iter() + } + + /// Sort all iterator elements into a new iterator in ascending order. + /// + /// **Note:** This consumes the entire iterator, uses the + /// [`slice::sort_by_key`] method and returns the result as a new + /// iterator that owns its elements. + /// + /// This sort is stable (i.e., does not reorder equal elements). + /// + /// The sorted iterator, if directly collected to a `Vec`, is converted + /// without any extra copying or allocation cost. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // sort people in descending order by age + /// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 30)]; + /// + /// let oldest_people_first = people + /// .into_iter() + /// .sorted_by_key(|x| -x.1) + /// .map(|(person, _age)| person); + /// + /// itertools::assert_equal(oldest_people_first, + /// vec!["Jill", "Jack", "Jane", "John"]); + /// ``` + #[cfg(feature = "use_alloc")] + fn sorted_by_key(self, f: F) -> VecIntoIter + where + Self: Sized, + K: Ord, + F: FnMut(&Self::Item) -> K, + { + let mut v = Vec::from_iter(self); + v.sort_by_key(f); + v.into_iter() + } + + /// Sort all iterator elements into a new iterator in ascending order. The key function is + /// called exactly once per key. + /// + /// **Note:** This consumes the entire iterator, uses the + /// [`slice::sort_by_cached_key`] method and returns the result as a new + /// iterator that owns its elements. + /// + /// This sort is stable (i.e., does not reorder equal elements). + /// + /// The sorted iterator, if directly collected to a `Vec`, is converted + /// without any extra copying or allocation cost. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // sort people in descending order by age + /// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 30)]; + /// + /// let oldest_people_first = people + /// .into_iter() + /// .sorted_by_cached_key(|x| -x.1) + /// .map(|(person, _age)| person); + /// + /// itertools::assert_equal(oldest_people_first, + /// vec!["Jill", "Jack", "Jane", "John"]); + /// ``` + #[cfg(feature = "use_alloc")] + fn sorted_by_cached_key(self, f: F) -> VecIntoIter + where + Self: Sized, + K: Ord, + F: FnMut(&Self::Item) -> K, + { + let mut v = Vec::from_iter(self); + v.sort_by_cached_key(f); + v.into_iter() + } + + /// Sort the k smallest elements into a new iterator, in ascending order. + /// + /// **Note:** This consumes the entire iterator, and returns the result + /// as a new iterator that owns its elements. If the input contains + /// less than k elements, the result is equivalent to `self.sorted()`. + /// + /// This is guaranteed to use `k * sizeof(Self::Item) + O(1)` memory + /// and `O(n log k)` time, with `n` the number of elements in the input. + /// + /// The sorted iterator, if directly collected to a `Vec`, is converted + /// without any extra copying or allocation cost. + /// + /// **Note:** This is functionally-equivalent to `self.sorted().take(k)` + /// but much more efficient. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // A random permutation of 0..15 + /// let numbers = vec![6, 9, 1, 14, 0, 4, 8, 7, 11, 2, 10, 3, 13, 12, 5]; + /// + /// let five_smallest = numbers + /// .into_iter() + /// .k_smallest(5); + /// + /// itertools::assert_equal(five_smallest, 0..5); + /// ``` + #[cfg(feature = "use_alloc")] + fn k_smallest(self, k: usize) -> VecIntoIter + where + Self: Sized, + Self::Item: Ord, + { + // The stdlib heap has optimised handling of "holes", which is not included in our heap implementation in k_smallest_general. + // While the difference is unlikely to have practical impact unless `Self::Item` is very large, this method uses the stdlib structure + // to maintain performance compared to previous versions of the crate. + use alloc::collections::BinaryHeap; + + if k == 0 { + self.last(); + return Vec::new().into_iter(); + } + if k == 1 { + return self.min().into_iter().collect_vec().into_iter(); + } + + let mut iter = self.fuse(); + let mut heap: BinaryHeap<_> = iter.by_ref().take(k).collect(); + + iter.for_each(|i| { + debug_assert_eq!(heap.len(), k); + // Equivalent to heap.push(min(i, heap.pop())) but more efficient. + // This should be done with a single `.peek_mut().unwrap()` but + // `PeekMut` sifts-down unconditionally on Rust 1.46.0 and prior. + if *heap.peek().unwrap() > i { + *heap.peek_mut().unwrap() = i; + } + }); + + heap.into_sorted_vec().into_iter() + } + + /// Sort the k smallest elements into a new iterator using the provided comparison. + /// + /// The sorted iterator, if directly collected to a `Vec`, is converted + /// without any extra copying or allocation cost. + /// + /// This corresponds to `self.sorted_by(cmp).take(k)` in the same way that + /// [`k_smallest`](Itertools::k_smallest) corresponds to `self.sorted().take(k)`, + /// in both semantics and complexity. + /// + /// Particularly, a custom heap implementation ensures the comparison is not cloned. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // A random permutation of 0..15 + /// let numbers = vec![6, 9, 1, 14, 0, 4, 8, 7, 11, 2, 10, 3, 13, 12, 5]; + /// + /// let five_smallest = numbers + /// .into_iter() + /// .k_smallest_by(5, |a, b| (a % 7).cmp(&(b % 7)).then(a.cmp(b))); + /// + /// itertools::assert_equal(five_smallest, vec![0, 7, 14, 1, 8]); + /// ``` + #[cfg(feature = "use_alloc")] + fn k_smallest_by(self, k: usize, cmp: F) -> VecIntoIter + where + Self: Sized, + F: FnMut(&Self::Item, &Self::Item) -> Ordering, + { + k_smallest::k_smallest_general(self, k, cmp).into_iter() + } + + /// Return the elements producing the k smallest outputs of the provided function. + /// + /// The sorted iterator, if directly collected to a `Vec`, is converted + /// without any extra copying or allocation cost. + /// + /// This corresponds to `self.sorted_by_key(key).take(k)` in the same way that + /// [`k_smallest`](Itertools::k_smallest) corresponds to `self.sorted().take(k)`, + /// in both semantics and complexity. + /// + /// Particularly, a custom heap implementation ensures the comparison is not cloned. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // A random permutation of 0..15 + /// let numbers = vec![6, 9, 1, 14, 0, 4, 8, 7, 11, 2, 10, 3, 13, 12, 5]; + /// + /// let five_smallest = numbers + /// .into_iter() + /// .k_smallest_by_key(5, |n| (n % 7, *n)); + /// + /// itertools::assert_equal(five_smallest, vec![0, 7, 14, 1, 8]); + /// ``` + #[cfg(feature = "use_alloc")] + fn k_smallest_by_key(self, k: usize, key: F) -> VecIntoIter + where + Self: Sized, + F: FnMut(&Self::Item) -> K, + K: Ord, + { + self.k_smallest_by(k, k_smallest::key_to_cmp(key)) + } + + /// Sort the k largest elements into a new iterator, in descending order. + /// + /// The sorted iterator, if directly collected to a `Vec`, is converted + /// without any extra copying or allocation cost. + /// + /// It is semantically equivalent to [`k_smallest`](Itertools::k_smallest) + /// with a reversed `Ord`. + /// However, this is implemented with a custom binary heap which does not + /// have the same performance characteristics for very large `Self::Item`. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // A random permutation of 0..15 + /// let numbers = vec![6, 9, 1, 14, 0, 4, 8, 7, 11, 2, 10, 3, 13, 12, 5]; + /// + /// let five_largest = numbers + /// .into_iter() + /// .k_largest(5); + /// + /// itertools::assert_equal(five_largest, vec![14, 13, 12, 11, 10]); + /// ``` + #[cfg(feature = "use_alloc")] + fn k_largest(self, k: usize) -> VecIntoIter + where + Self: Sized, + Self::Item: Ord, + { + self.k_largest_by(k, Self::Item::cmp) + } + + /// Sort the k largest elements into a new iterator using the provided comparison. + /// + /// The sorted iterator, if directly collected to a `Vec`, is converted + /// without any extra copying or allocation cost. + /// + /// Functionally equivalent to [`k_smallest_by`](Itertools::k_smallest_by) + /// with a reversed `Ord`. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // A random permutation of 0..15 + /// let numbers = vec![6, 9, 1, 14, 0, 4, 8, 7, 11, 2, 10, 3, 13, 12, 5]; + /// + /// let five_largest = numbers + /// .into_iter() + /// .k_largest_by(5, |a, b| (a % 7).cmp(&(b % 7)).then(a.cmp(b))); + /// + /// itertools::assert_equal(five_largest, vec![13, 6, 12, 5, 11]); + /// ``` + #[cfg(feature = "use_alloc")] + fn k_largest_by(self, k: usize, mut cmp: F) -> VecIntoIter + where + Self: Sized, + F: FnMut(&Self::Item, &Self::Item) -> Ordering, + { + self.k_smallest_by(k, move |a, b| cmp(b, a)) + } + + /// Return the elements producing the k largest outputs of the provided function. + /// + /// The sorted iterator, if directly collected to a `Vec`, is converted + /// without any extra copying or allocation cost. + /// + /// Functionally equivalent to [`k_smallest_by_key`](Itertools::k_smallest_by_key) + /// with a reversed `Ord`. + /// + /// ``` + /// use itertools::Itertools; + /// + /// // A random permutation of 0..15 + /// let numbers = vec![6, 9, 1, 14, 0, 4, 8, 7, 11, 2, 10, 3, 13, 12, 5]; + /// + /// let five_largest = numbers + /// .into_iter() + /// .k_largest_by_key(5, |n| (n % 7, *n)); + /// + /// itertools::assert_equal(five_largest, vec![13, 6, 12, 5, 11]); + /// ``` + #[cfg(feature = "use_alloc")] + fn k_largest_by_key(self, k: usize, key: F) -> VecIntoIter + where + Self: Sized, + F: FnMut(&Self::Item) -> K, + K: Ord, + { + self.k_largest_by(k, k_smallest::key_to_cmp(key)) + } + + /// Consumes the iterator and return an iterator of the last `n` elements. + /// + /// The iterator, if directly collected to a `VecDeque`, is converted + /// without any extra copying or allocation cost. + /// If directly collected to a `Vec`, it may need some data movement + /// but no re-allocation. + /// + /// ``` + /// use itertools::{assert_equal, Itertools}; + /// + /// let v = vec![5, 9, 8, 4, 2, 12, 0]; + /// assert_equal(v.iter().tail(3), &[2, 12, 0]); + /// assert_equal(v.iter().tail(10), &v); + /// + /// assert_equal(v.iter().tail(1), v.iter().last()); + /// + /// assert_equal((0..100).tail(10), 90..100); + /// + /// assert_equal((0..100).filter(|x| x % 3 == 0).tail(10), (72..100).step_by(3)); + /// ``` + /// + /// For double ended iterators without side-effects, you might prefer + /// `.rev().take(n).rev()` to have a similar result (lazy and non-allocating) + /// without consuming the entire iterator. + #[cfg(feature = "use_alloc")] + fn tail(self, n: usize) -> VecDequeIntoIter + where + Self: Sized, + { + match n { + 0 => { + self.last(); + VecDeque::new() + } + 1 => self.last().into_iter().collect(), + _ => { + // Skip the starting part of the iterator if possible. + let (low, _) = self.size_hint(); + let mut iter = self.fuse().skip(low.saturating_sub(n)); + // TODO: If VecDeque has a more efficient method than + // `.pop_front();.push_back(val)` in the future then maybe revisit this. + let mut data: Vec<_> = iter.by_ref().take(n).collect(); + // Update `data` cyclically. + let idx = iter.fold(0, |i, val| { + debug_assert_eq!(data.len(), n); + data[i] = val; + if i + 1 == n { + 0 + } else { + i + 1 + } + }); + // Respect the insertion order, efficiently. + let mut data = VecDeque::from(data); + data.rotate_left(idx); + data + } + } + .into_iter() + } + + /// Collect all iterator elements into one of two + /// partitions. Unlike [`Iterator::partition`], each partition may + /// have a distinct type. + /// + /// ``` + /// use itertools::{Itertools, Either}; + /// + /// let successes_and_failures = vec![Ok(1), Err(false), Err(true), Ok(2)]; + /// + /// let (successes, failures): (Vec<_>, Vec<_>) = successes_and_failures + /// .into_iter() + /// .partition_map(|r| { + /// match r { + /// Ok(v) => Either::Left(v), + /// Err(v) => Either::Right(v), + /// } + /// }); + /// + /// assert_eq!(successes, [1, 2]); + /// assert_eq!(failures, [false, true]); + /// ``` + fn partition_map(self, mut predicate: F) -> (A, B) + where + Self: Sized, + F: FnMut(Self::Item) -> Either, + A: Default + Extend, + B: Default + Extend, + { + let mut left = A::default(); + let mut right = B::default(); + + self.for_each(|val| match predicate(val) { + Either::Left(v) => left.extend(Some(v)), + Either::Right(v) => right.extend(Some(v)), + }); + + (left, right) + } + + /// Partition a sequence of `Result`s into one list of all the `Ok` elements + /// and another list of all the `Err` elements. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let successes_and_failures = vec![Ok(1), Err(false), Err(true), Ok(2)]; + /// + /// let (successes, failures): (Vec<_>, Vec<_>) = successes_and_failures + /// .into_iter() + /// .partition_result(); + /// + /// assert_eq!(successes, [1, 2]); + /// assert_eq!(failures, [false, true]); + /// ``` + fn partition_result(self) -> (A, B) + where + Self: Iterator> + Sized, + A: Default + Extend, + B: Default + Extend, + { + self.partition_map(|r| match r { + Ok(v) => Either::Left(v), + Err(v) => Either::Right(v), + }) + } + + /// Return a `HashMap` of keys mapped to `Vec`s of values. Keys and values + /// are taken from `(Key, Value)` tuple pairs yielded by the input iterator. + /// + /// Essentially a shorthand for `.into_grouping_map().collect::>()`. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let data = vec![(0, 10), (2, 12), (3, 13), (0, 20), (3, 33), (2, 42)]; + /// let lookup = data.into_iter().into_group_map(); + /// + /// assert_eq!(lookup[&0], vec![10, 20]); + /// assert_eq!(lookup.get(&1), None); + /// assert_eq!(lookup[&2], vec![12, 42]); + /// assert_eq!(lookup[&3], vec![13, 33]); + /// ``` + #[cfg(feature = "use_std")] + fn into_group_map(self) -> HashMap> + where + Self: Iterator + Sized, + K: Hash + Eq, + { + group_map::into_group_map(self) + } + + /// Return an `Iterator` on a `HashMap`. Keys mapped to `Vec`s of values. The key is specified + /// in the closure. + /// + /// Essentially a shorthand for `.into_grouping_map_by(f).collect::>()`. + /// + /// ``` + /// use itertools::Itertools; + /// use std::collections::HashMap; + /// + /// let data = vec![(0, 10), (2, 12), (3, 13), (0, 20), (3, 33), (2, 42)]; + /// let lookup: HashMap> = + /// data.clone().into_iter().into_group_map_by(|a| a.0); + /// + /// assert_eq!(lookup[&0], vec![(0,10),(0,20)]); + /// assert_eq!(lookup.get(&1), None); + /// assert_eq!(lookup[&2], vec![(2,12), (2,42)]); + /// assert_eq!(lookup[&3], vec![(3,13), (3,33)]); + /// + /// assert_eq!( + /// data.into_iter() + /// .into_group_map_by(|x| x.0) + /// .into_iter() + /// .map(|(key, values)| (key, values.into_iter().fold(0,|acc, (_,v)| acc + v ))) + /// .collect::>()[&0], + /// 30, + /// ); + /// ``` + #[cfg(feature = "use_std")] + fn into_group_map_by(self, f: F) -> HashMap> + where + Self: Iterator + Sized, + K: Hash + Eq, + F: FnMut(&V) -> K, + { + group_map::into_group_map_by(self, f) + } + + /// Constructs a `GroupingMap` to be used later with one of the efficient + /// group-and-fold operations it allows to perform. + /// + /// The input iterator must yield item in the form of `(K, V)` where the + /// value of type `K` will be used as key to identify the groups and the + /// value of type `V` as value for the folding operation. + /// + /// See [`GroupingMap`] for more informations + /// on what operations are available. + #[cfg(feature = "use_std")] + fn into_grouping_map(self) -> GroupingMap + where + Self: Iterator + Sized, + K: Hash + Eq, + { + grouping_map::new(self) + } + + /// Constructs a `GroupingMap` to be used later with one of the efficient + /// group-and-fold operations it allows to perform. + /// + /// The values from this iterator will be used as values for the folding operation + /// while the keys will be obtained from the values by calling `key_mapper`. + /// + /// See [`GroupingMap`] for more informations + /// on what operations are available. + #[cfg(feature = "use_std")] + fn into_grouping_map_by(self, key_mapper: F) -> GroupingMapBy + where + Self: Iterator + Sized, + K: Hash + Eq, + F: FnMut(&V) -> K, + { + grouping_map::new(grouping_map::new_map_for_grouping(self, key_mapper)) + } + + /// Return all minimum elements of an iterator. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// + /// let a: [i32; 0] = []; + /// assert_eq!(a.iter().min_set(), Vec::<&i32>::new()); + /// + /// let a = [1]; + /// assert_eq!(a.iter().min_set(), vec![&1]); + /// + /// let a = [1, 2, 3, 4, 5]; + /// assert_eq!(a.iter().min_set(), vec![&1]); + /// + /// let a = [1, 1, 1, 1]; + /// assert_eq!(a.iter().min_set(), vec![&1, &1, &1, &1]); + /// ``` + /// + /// The elements can be floats but no particular result is guaranteed + /// if an element is NaN. + #[cfg(feature = "use_alloc")] + fn min_set(self) -> Vec + where + Self: Sized, + Self::Item: Ord, + { + extrema_set::min_set_impl(self, |_| (), |x, y, _, _| x.cmp(y)) + } + + /// Return all minimum elements of an iterator, as determined by + /// the specified function. + /// + /// # Examples + /// + /// ``` + /// # use std::cmp::Ordering; + /// use itertools::Itertools; + /// + /// let a: [(i32, i32); 0] = []; + /// assert_eq!(a.iter().min_set_by(|_, _| Ordering::Equal), Vec::<&(i32, i32)>::new()); + /// + /// let a = [(1, 2)]; + /// assert_eq!(a.iter().min_set_by(|&&(k1,_), &&(k2, _)| k1.cmp(&k2)), vec![&(1, 2)]); + /// + /// let a = [(1, 2), (2, 2), (3, 9), (4, 8), (5, 9)]; + /// assert_eq!(a.iter().min_set_by(|&&(_,k1), &&(_,k2)| k1.cmp(&k2)), vec![&(1, 2), &(2, 2)]); + /// + /// let a = [(1, 2), (1, 3), (1, 4), (1, 5)]; + /// assert_eq!(a.iter().min_set_by(|&&(k1,_), &&(k2, _)| k1.cmp(&k2)), vec![&(1, 2), &(1, 3), &(1, 4), &(1, 5)]); + /// ``` + /// + /// The elements can be floats but no particular result is guaranteed + /// if an element is NaN. + #[cfg(feature = "use_alloc")] + fn min_set_by(self, mut compare: F) -> Vec + where + Self: Sized, + F: FnMut(&Self::Item, &Self::Item) -> Ordering, + { + extrema_set::min_set_impl(self, |_| (), |x, y, _, _| compare(x, y)) + } + + /// Return all minimum elements of an iterator, as determined by + /// the specified function. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// + /// let a: [(i32, i32); 0] = []; + /// assert_eq!(a.iter().min_set_by_key(|_| ()), Vec::<&(i32, i32)>::new()); + /// + /// let a = [(1, 2)]; + /// assert_eq!(a.iter().min_set_by_key(|&&(k,_)| k), vec![&(1, 2)]); + /// + /// let a = [(1, 2), (2, 2), (3, 9), (4, 8), (5, 9)]; + /// assert_eq!(a.iter().min_set_by_key(|&&(_, k)| k), vec![&(1, 2), &(2, 2)]); + /// + /// let a = [(1, 2), (1, 3), (1, 4), (1, 5)]; + /// assert_eq!(a.iter().min_set_by_key(|&&(k, _)| k), vec![&(1, 2), &(1, 3), &(1, 4), &(1, 5)]); + /// ``` + /// + /// The elements can be floats but no particular result is guaranteed + /// if an element is NaN. + #[cfg(feature = "use_alloc")] + fn min_set_by_key(self, key: F) -> Vec + where + Self: Sized, + K: Ord, + F: FnMut(&Self::Item) -> K, + { + extrema_set::min_set_impl(self, key, |_, _, kx, ky| kx.cmp(ky)) + } + + /// Return all maximum elements of an iterator. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// + /// let a: [i32; 0] = []; + /// assert_eq!(a.iter().max_set(), Vec::<&i32>::new()); + /// + /// let a = [1]; + /// assert_eq!(a.iter().max_set(), vec![&1]); + /// + /// let a = [1, 2, 3, 4, 5]; + /// assert_eq!(a.iter().max_set(), vec![&5]); + /// + /// let a = [1, 1, 1, 1]; + /// assert_eq!(a.iter().max_set(), vec![&1, &1, &1, &1]); + /// ``` + /// + /// The elements can be floats but no particular result is guaranteed + /// if an element is NaN. + #[cfg(feature = "use_alloc")] + fn max_set(self) -> Vec + where + Self: Sized, + Self::Item: Ord, + { + extrema_set::max_set_impl(self, |_| (), |x, y, _, _| x.cmp(y)) + } + + /// Return all maximum elements of an iterator, as determined by + /// the specified function. + /// + /// # Examples + /// + /// ``` + /// # use std::cmp::Ordering; + /// use itertools::Itertools; + /// + /// let a: [(i32, i32); 0] = []; + /// assert_eq!(a.iter().max_set_by(|_, _| Ordering::Equal), Vec::<&(i32, i32)>::new()); + /// + /// let a = [(1, 2)]; + /// assert_eq!(a.iter().max_set_by(|&&(k1,_), &&(k2, _)| k1.cmp(&k2)), vec![&(1, 2)]); + /// + /// let a = [(1, 2), (2, 2), (3, 9), (4, 8), (5, 9)]; + /// assert_eq!(a.iter().max_set_by(|&&(_,k1), &&(_,k2)| k1.cmp(&k2)), vec![&(3, 9), &(5, 9)]); + /// + /// let a = [(1, 2), (1, 3), (1, 4), (1, 5)]; + /// assert_eq!(a.iter().max_set_by(|&&(k1,_), &&(k2, _)| k1.cmp(&k2)), vec![&(1, 2), &(1, 3), &(1, 4), &(1, 5)]); + /// ``` + /// + /// The elements can be floats but no particular result is guaranteed + /// if an element is NaN. + #[cfg(feature = "use_alloc")] + fn max_set_by(self, mut compare: F) -> Vec + where + Self: Sized, + F: FnMut(&Self::Item, &Self::Item) -> Ordering, + { + extrema_set::max_set_impl(self, |_| (), |x, y, _, _| compare(x, y)) + } + + /// Return all maximum elements of an iterator, as determined by + /// the specified function. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// + /// let a: [(i32, i32); 0] = []; + /// assert_eq!(a.iter().max_set_by_key(|_| ()), Vec::<&(i32, i32)>::new()); + /// + /// let a = [(1, 2)]; + /// assert_eq!(a.iter().max_set_by_key(|&&(k,_)| k), vec![&(1, 2)]); + /// + /// let a = [(1, 2), (2, 2), (3, 9), (4, 8), (5, 9)]; + /// assert_eq!(a.iter().max_set_by_key(|&&(_, k)| k), vec![&(3, 9), &(5, 9)]); + /// + /// let a = [(1, 2), (1, 3), (1, 4), (1, 5)]; + /// assert_eq!(a.iter().max_set_by_key(|&&(k, _)| k), vec![&(1, 2), &(1, 3), &(1, 4), &(1, 5)]); + /// ``` + /// + /// The elements can be floats but no particular result is guaranteed + /// if an element is NaN. + #[cfg(feature = "use_alloc")] + fn max_set_by_key(self, key: F) -> Vec + where + Self: Sized, + K: Ord, + F: FnMut(&Self::Item) -> K, + { + extrema_set::max_set_impl(self, key, |_, _, kx, ky| kx.cmp(ky)) + } + + /// Return the minimum and maximum elements in the iterator. + /// + /// The return type `MinMaxResult` is an enum of three variants: + /// + /// - `NoElements` if the iterator is empty. + /// - `OneElement(x)` if the iterator has exactly one element. + /// - `MinMax(x, y)` is returned otherwise, where `x <= y`. Two + /// values are equal if and only if there is more than one + /// element in the iterator and all elements are equal. + /// + /// On an iterator of length `n`, `minmax` does `1.5 * n` comparisons, + /// and so is faster than calling `min` and `max` separately which does + /// `2 * n` comparisons. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// use itertools::MinMaxResult::{NoElements, OneElement, MinMax}; + /// + /// let a: [i32; 0] = []; + /// assert_eq!(a.iter().minmax(), NoElements); + /// + /// let a = [1]; + /// assert_eq!(a.iter().minmax(), OneElement(&1)); + /// + /// let a = [1, 2, 3, 4, 5]; + /// assert_eq!(a.iter().minmax(), MinMax(&1, &5)); + /// + /// let a = [1, 1, 1, 1]; + /// assert_eq!(a.iter().minmax(), MinMax(&1, &1)); + /// ``` + /// + /// The elements can be floats but no particular result is guaranteed + /// if an element is NaN. + fn minmax(self) -> MinMaxResult + where + Self: Sized, + Self::Item: PartialOrd, + { + minmax::minmax_impl(self, |_| (), |x, y, _, _| x < y) + } + + /// Return the minimum and maximum element of an iterator, as determined by + /// the specified function. + /// + /// The return value is a variant of [`MinMaxResult`] like for [`.minmax()`](Itertools::minmax). + /// + /// For the minimum, the first minimal element is returned. For the maximum, + /// the last maximal element wins. This matches the behavior of the standard + /// [`Iterator::min`] and [`Iterator::max`] methods. + /// + /// The keys can be floats but no particular result is guaranteed + /// if a key is NaN. + fn minmax_by_key(self, key: F) -> MinMaxResult + where + Self: Sized, + K: PartialOrd, + F: FnMut(&Self::Item) -> K, + { + minmax::minmax_impl(self, key, |_, _, xk, yk| xk < yk) + } + + /// Return the minimum and maximum element of an iterator, as determined by + /// the specified comparison function. + /// + /// The return value is a variant of [`MinMaxResult`] like for [`.minmax()`](Itertools::minmax). + /// + /// For the minimum, the first minimal element is returned. For the maximum, + /// the last maximal element wins. This matches the behavior of the standard + /// [`Iterator::min`] and [`Iterator::max`] methods. + fn minmax_by(self, mut compare: F) -> MinMaxResult + where + Self: Sized, + F: FnMut(&Self::Item, &Self::Item) -> Ordering, + { + minmax::minmax_impl(self, |_| (), |x, y, _, _| Ordering::Less == compare(x, y)) + } + + /// Return the position of the maximum element in the iterator. + /// + /// If several elements are equally maximum, the position of the + /// last of them is returned. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// + /// let a: [i32; 0] = []; + /// assert_eq!(a.iter().position_max(), None); + /// + /// let a = [-3, 0, 1, 5, -10]; + /// assert_eq!(a.iter().position_max(), Some(3)); + /// + /// let a = [1, 1, -1, -1]; + /// assert_eq!(a.iter().position_max(), Some(1)); + /// ``` + fn position_max(self) -> Option + where + Self: Sized, + Self::Item: Ord, + { + self.enumerate() + .max_by(|x, y| Ord::cmp(&x.1, &y.1)) + .map(|x| x.0) + } + + /// Return the position of the maximum element in the iterator, as + /// determined by the specified function. + /// + /// If several elements are equally maximum, the position of the + /// last of them is returned. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// + /// let a: [i32; 0] = []; + /// assert_eq!(a.iter().position_max_by_key(|x| x.abs()), None); + /// + /// let a = [-3_i32, 0, 1, 5, -10]; + /// assert_eq!(a.iter().position_max_by_key(|x| x.abs()), Some(4)); + /// + /// let a = [1_i32, 1, -1, -1]; + /// assert_eq!(a.iter().position_max_by_key(|x| x.abs()), Some(3)); + /// ``` + fn position_max_by_key(self, mut key: F) -> Option + where + Self: Sized, + K: Ord, + F: FnMut(&Self::Item) -> K, + { + self.enumerate() + .max_by(|x, y| Ord::cmp(&key(&x.1), &key(&y.1))) + .map(|x| x.0) + } + + /// Return the position of the maximum element in the iterator, as + /// determined by the specified comparison function. + /// + /// If several elements are equally maximum, the position of the + /// last of them is returned. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// + /// let a: [i32; 0] = []; + /// assert_eq!(a.iter().position_max_by(|x, y| x.cmp(y)), None); + /// + /// let a = [-3_i32, 0, 1, 5, -10]; + /// assert_eq!(a.iter().position_max_by(|x, y| x.cmp(y)), Some(3)); + /// + /// let a = [1_i32, 1, -1, -1]; + /// assert_eq!(a.iter().position_max_by(|x, y| x.cmp(y)), Some(1)); + /// ``` + fn position_max_by(self, mut compare: F) -> Option + where + Self: Sized, + F: FnMut(&Self::Item, &Self::Item) -> Ordering, + { + self.enumerate() + .max_by(|x, y| compare(&x.1, &y.1)) + .map(|x| x.0) + } + + /// Return the position of the minimum element in the iterator. + /// + /// If several elements are equally minimum, the position of the + /// first of them is returned. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// + /// let a: [i32; 0] = []; + /// assert_eq!(a.iter().position_min(), None); + /// + /// let a = [-3, 0, 1, 5, -10]; + /// assert_eq!(a.iter().position_min(), Some(4)); + /// + /// let a = [1, 1, -1, -1]; + /// assert_eq!(a.iter().position_min(), Some(2)); + /// ``` + fn position_min(self) -> Option + where + Self: Sized, + Self::Item: Ord, + { + self.enumerate() + .min_by(|x, y| Ord::cmp(&x.1, &y.1)) + .map(|x| x.0) + } + + /// Return the position of the minimum element in the iterator, as + /// determined by the specified function. + /// + /// If several elements are equally minimum, the position of the + /// first of them is returned. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// + /// let a: [i32; 0] = []; + /// assert_eq!(a.iter().position_min_by_key(|x| x.abs()), None); + /// + /// let a = [-3_i32, 0, 1, 5, -10]; + /// assert_eq!(a.iter().position_min_by_key(|x| x.abs()), Some(1)); + /// + /// let a = [1_i32, 1, -1, -1]; + /// assert_eq!(a.iter().position_min_by_key(|x| x.abs()), Some(0)); + /// ``` + fn position_min_by_key(self, mut key: F) -> Option + where + Self: Sized, + K: Ord, + F: FnMut(&Self::Item) -> K, + { + self.enumerate() + .min_by(|x, y| Ord::cmp(&key(&x.1), &key(&y.1))) + .map(|x| x.0) + } + + /// Return the position of the minimum element in the iterator, as + /// determined by the specified comparison function. + /// + /// If several elements are equally minimum, the position of the + /// first of them is returned. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// + /// let a: [i32; 0] = []; + /// assert_eq!(a.iter().position_min_by(|x, y| x.cmp(y)), None); + /// + /// let a = [-3_i32, 0, 1, 5, -10]; + /// assert_eq!(a.iter().position_min_by(|x, y| x.cmp(y)), Some(4)); + /// + /// let a = [1_i32, 1, -1, -1]; + /// assert_eq!(a.iter().position_min_by(|x, y| x.cmp(y)), Some(2)); + /// ``` + fn position_min_by(self, mut compare: F) -> Option + where + Self: Sized, + F: FnMut(&Self::Item, &Self::Item) -> Ordering, + { + self.enumerate() + .min_by(|x, y| compare(&x.1, &y.1)) + .map(|x| x.0) + } + + /// Return the positions of the minimum and maximum elements in + /// the iterator. + /// + /// The return type [`MinMaxResult`] is an enum of three variants: + /// + /// - `NoElements` if the iterator is empty. + /// - `OneElement(xpos)` if the iterator has exactly one element. + /// - `MinMax(xpos, ypos)` is returned otherwise, where the + /// element at `xpos` ≤ the element at `ypos`. While the + /// referenced elements themselves may be equal, `xpos` cannot + /// be equal to `ypos`. + /// + /// On an iterator of length `n`, `position_minmax` does `1.5 * n` + /// comparisons, and so is faster than calling `position_min` and + /// `position_max` separately which does `2 * n` comparisons. + /// + /// For the minimum, if several elements are equally minimum, the + /// position of the first of them is returned. For the maximum, if + /// several elements are equally maximum, the position of the last + /// of them is returned. + /// + /// The elements can be floats but no particular result is + /// guaranteed if an element is NaN. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// use itertools::MinMaxResult::{NoElements, OneElement, MinMax}; + /// + /// let a: [i32; 0] = []; + /// assert_eq!(a.iter().position_minmax(), NoElements); + /// + /// let a = [10]; + /// assert_eq!(a.iter().position_minmax(), OneElement(0)); + /// + /// let a = [-3, 0, 1, 5, -10]; + /// assert_eq!(a.iter().position_minmax(), MinMax(4, 3)); + /// + /// let a = [1, 1, -1, -1]; + /// assert_eq!(a.iter().position_minmax(), MinMax(2, 1)); + /// ``` + fn position_minmax(self) -> MinMaxResult + where + Self: Sized, + Self::Item: PartialOrd, + { + use crate::MinMaxResult::{MinMax, NoElements, OneElement}; + match minmax::minmax_impl(self.enumerate(), |_| (), |x, y, _, _| x.1 < y.1) { + NoElements => NoElements, + OneElement(x) => OneElement(x.0), + MinMax(x, y) => MinMax(x.0, y.0), + } + } + + /// Return the postions of the minimum and maximum elements of an + /// iterator, as determined by the specified function. + /// + /// The return value is a variant of [`MinMaxResult`] like for + /// [`position_minmax`]. + /// + /// For the minimum, if several elements are equally minimum, the + /// position of the first of them is returned. For the maximum, if + /// several elements are equally maximum, the position of the last + /// of them is returned. + /// + /// The keys can be floats but no particular result is guaranteed + /// if a key is NaN. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// use itertools::MinMaxResult::{NoElements, OneElement, MinMax}; + /// + /// let a: [i32; 0] = []; + /// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), NoElements); + /// + /// let a = [10_i32]; + /// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), OneElement(0)); + /// + /// let a = [-3_i32, 0, 1, 5, -10]; + /// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), MinMax(1, 4)); + /// + /// let a = [1_i32, 1, -1, -1]; + /// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), MinMax(0, 3)); + /// ``` + /// + /// [`position_minmax`]: Self::position_minmax + fn position_minmax_by_key(self, mut key: F) -> MinMaxResult + where + Self: Sized, + K: PartialOrd, + F: FnMut(&Self::Item) -> K, + { + use crate::MinMaxResult::{MinMax, NoElements, OneElement}; + match self.enumerate().minmax_by_key(|e| key(&e.1)) { + NoElements => NoElements, + OneElement(x) => OneElement(x.0), + MinMax(x, y) => MinMax(x.0, y.0), + } + } + + /// Return the postions of the minimum and maximum elements of an + /// iterator, as determined by the specified comparison function. + /// + /// The return value is a variant of [`MinMaxResult`] like for + /// [`position_minmax`]. + /// + /// For the minimum, if several elements are equally minimum, the + /// position of the first of them is returned. For the maximum, if + /// several elements are equally maximum, the position of the last + /// of them is returned. + /// + /// # Examples + /// + /// ``` + /// use itertools::Itertools; + /// use itertools::MinMaxResult::{NoElements, OneElement, MinMax}; + /// + /// let a: [i32; 0] = []; + /// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), NoElements); + /// + /// let a = [10_i32]; + /// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), OneElement(0)); + /// + /// let a = [-3_i32, 0, 1, 5, -10]; + /// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), MinMax(4, 3)); + /// + /// let a = [1_i32, 1, -1, -1]; + /// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), MinMax(2, 1)); + /// ``` + /// + /// [`position_minmax`]: Self::position_minmax + fn position_minmax_by(self, mut compare: F) -> MinMaxResult + where + Self: Sized, + F: FnMut(&Self::Item, &Self::Item) -> Ordering, + { + use crate::MinMaxResult::{MinMax, NoElements, OneElement}; + match self.enumerate().minmax_by(|x, y| compare(&x.1, &y.1)) { + NoElements => NoElements, + OneElement(x) => OneElement(x.0), + MinMax(x, y) => MinMax(x.0, y.0), + } + } + + /// If the iterator yields exactly one element, that element will be returned, otherwise + /// an error will be returned containing an iterator that has the same output as the input + /// iterator. + /// + /// This provides an additional layer of validation over just calling `Iterator::next()`. + /// If your assumption that there should only be one element yielded is false this provides + /// the opportunity to detect and handle that, preventing errors at a distance. + /// + /// # Examples + /// ``` + /// use itertools::Itertools; + /// + /// assert_eq!((0..10).filter(|&x| x == 2).exactly_one().unwrap(), 2); + /// assert!((0..10).filter(|&x| x > 1 && x < 4).exactly_one().unwrap_err().eq(2..4)); + /// assert!((0..10).filter(|&x| x > 1 && x < 5).exactly_one().unwrap_err().eq(2..5)); + /// assert!((0..10).filter(|&_| false).exactly_one().unwrap_err().eq(0..0)); + /// ``` + fn exactly_one(mut self) -> Result> + where + Self: Sized, + { + match self.next() { + Some(first) => match self.next() { + Some(second) => Err(ExactlyOneError::new( + Some(Either::Left([first, second])), + self, + )), + None => Ok(first), + }, + None => Err(ExactlyOneError::new(None, self)), + } + } + + /// If the iterator yields no elements, `Ok(None)` will be returned. If the iterator yields + /// exactly one element, that element will be returned, otherwise an error will be returned + /// containing an iterator that has the same output as the input iterator. + /// + /// This provides an additional layer of validation over just calling `Iterator::next()`. + /// If your assumption that there should be at most one element yielded is false this provides + /// the opportunity to detect and handle that, preventing errors at a distance. + /// + /// # Examples + /// ``` + /// use itertools::Itertools; + /// + /// assert_eq!((0..10).filter(|&x| x == 2).at_most_one().unwrap(), Some(2)); + /// assert!((0..10).filter(|&x| x > 1 && x < 4).at_most_one().unwrap_err().eq(2..4)); + /// assert!((0..10).filter(|&x| x > 1 && x < 5).at_most_one().unwrap_err().eq(2..5)); + /// assert_eq!((0..10).filter(|&_| false).at_most_one().unwrap(), None); + /// ``` + fn at_most_one(mut self) -> Result, ExactlyOneError> + where + Self: Sized, + { + match self.next() { + Some(first) => match self.next() { + Some(second) => Err(ExactlyOneError::new( + Some(Either::Left([first, second])), + self, + )), + None => Ok(Some(first)), + }, + None => Ok(None), + } + } + + /// An iterator adaptor that allows the user to peek at multiple `.next()` + /// values without advancing the base iterator. + /// + /// # Examples + /// ``` + /// use itertools::Itertools; + /// + /// let mut iter = (0..10).multipeek(); + /// assert_eq!(iter.peek(), Some(&0)); + /// assert_eq!(iter.peek(), Some(&1)); + /// assert_eq!(iter.peek(), Some(&2)); + /// assert_eq!(iter.next(), Some(0)); + /// assert_eq!(iter.peek(), Some(&1)); + /// ``` + #[cfg(feature = "use_alloc")] + fn multipeek(self) -> MultiPeek + where + Self: Sized, + { + multipeek_impl::multipeek(self) + } + + /// Collect the items in this iterator and return a `HashMap` which + /// contains each item that appears in the iterator and the number + /// of times it appears. + /// + /// # Examples + /// ``` + /// # use itertools::Itertools; + /// let counts = [1, 1, 1, 3, 3, 5].into_iter().counts(); + /// assert_eq!(counts[&1], 3); + /// assert_eq!(counts[&3], 2); + /// assert_eq!(counts[&5], 1); + /// assert_eq!(counts.get(&0), None); + /// ``` + #[cfg(feature = "use_std")] + fn counts(self) -> HashMap + where + Self: Sized, + Self::Item: Eq + Hash, + { + let mut counts = HashMap::new(); + self.for_each(|item| *counts.entry(item).or_default() += 1); + counts + } + + /// Collect the items in this iterator and return a `HashMap` which + /// contains each item that appears in the iterator and the number + /// of times it appears, + /// determining identity using a keying function. + /// + /// ``` + /// # use itertools::Itertools; + /// struct Character { + /// first_name: &'static str, + /// last_name: &'static str, + /// } + /// + /// let characters = + /// vec![ + /// Character { first_name: "Amy", last_name: "Pond" }, + /// Character { first_name: "Amy", last_name: "Wong" }, + /// Character { first_name: "Amy", last_name: "Santiago" }, + /// Character { first_name: "James", last_name: "Bond" }, + /// Character { first_name: "James", last_name: "Sullivan" }, + /// Character { first_name: "James", last_name: "Norington" }, + /// Character { first_name: "James", last_name: "Kirk" }, + /// ]; + /// + /// let first_name_frequency = + /// characters + /// .into_iter() + /// .counts_by(|c| c.first_name); + /// + /// assert_eq!(first_name_frequency["Amy"], 3); + /// assert_eq!(first_name_frequency["James"], 4); + /// assert_eq!(first_name_frequency.contains_key("Asha"), false); + /// ``` + #[cfg(feature = "use_std")] + fn counts_by(self, f: F) -> HashMap + where + Self: Sized, + K: Eq + Hash, + F: FnMut(Self::Item) -> K, + { + self.map(f).counts() + } + + /// Converts an iterator of tuples into a tuple of containers. + /// + /// It consumes an entire iterator of n-ary tuples, producing `n` collections, one for each + /// column. + /// + /// This function is, in some sense, the opposite of [`multizip`]. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let inputs = vec![(1, 2, 3), (4, 5, 6), (7, 8, 9)]; + /// + /// let (a, b, c): (Vec<_>, Vec<_>, Vec<_>) = inputs + /// .into_iter() + /// .multiunzip(); + /// + /// assert_eq!(a, vec![1, 4, 7]); + /// assert_eq!(b, vec![2, 5, 8]); + /// assert_eq!(c, vec![3, 6, 9]); + /// ``` + fn multiunzip(self) -> FromI + where + Self: Sized + MultiUnzip, + { + MultiUnzip::multiunzip(self) + } + + /// Returns the length of the iterator if one exists. + /// Otherwise return `self.size_hint()`. + /// + /// Fallible [`ExactSizeIterator::len`]. + /// + /// Inherits guarantees and restrictions from [`Iterator::size_hint`]. + /// + /// ``` + /// use itertools::Itertools; + /// + /// assert_eq!([0; 10].iter().try_len(), Ok(10)); + /// assert_eq!((10..15).try_len(), Ok(5)); + /// assert_eq!((15..10).try_len(), Ok(0)); + /// assert_eq!((10..).try_len(), Err((usize::MAX, None))); + /// assert_eq!((10..15).filter(|x| x % 2 == 0).try_len(), Err((0, Some(5)))); + /// ``` + fn try_len(&self) -> Result { + let sh = self.size_hint(); + match sh { + (lo, Some(hi)) if lo == hi => Ok(lo), + _ => Err(sh), + } + } +} + +impl Itertools for T where T: Iterator + ?Sized {} + +/// Return `true` if both iterables produce equal sequences +/// (elements pairwise equal and sequences of the same length), +/// `false` otherwise. +/// +/// [`IntoIterator`] enabled version of [`Iterator::eq`]. +/// +/// ``` +/// assert!(itertools::equal(vec![1, 2, 3], 1..4)); +/// assert!(!itertools::equal(&[0, 0], &[0, 0, 0])); +/// ``` +pub fn equal(a: I, b: J) -> bool +where + I: IntoIterator, + J: IntoIterator, + I::Item: PartialEq, +{ + a.into_iter().eq(b) +} + +/// Assert that two iterables produce equal sequences, with the same +/// semantics as [`equal(a, b)`](equal). +/// +/// **Panics** on assertion failure with a message that shows the +/// two different elements and the iteration index. +/// +/// ```should_panic +/// # use itertools::assert_equal; +/// assert_equal("exceed".split('c'), "excess".split('c')); +/// // ^PANIC: panicked at 'Failed assertion Some("eed") == Some("ess") for iteration 1'. +/// ``` +pub fn assert_equal(a: I, b: J) +where + I: IntoIterator, + J: IntoIterator, + I::Item: fmt::Debug + PartialEq, + J::Item: fmt::Debug, +{ + let mut ia = a.into_iter(); + let mut ib = b.into_iter(); + let mut i: usize = 0; + loop { + match (ia.next(), ib.next()) { + (None, None) => return, + (a, b) => { + let equal = match (&a, &b) { + (Some(a), Some(b)) => a == b, + _ => false, + }; + assert!( + equal, + "Failed assertion {a:?} == {b:?} for iteration {i}", + i = i, + a = a, + b = b + ); + i += 1; + } + } + } +} + +/// Partition a sequence using predicate `pred` so that elements +/// that map to `true` are placed before elements which map to `false`. +/// +/// The order within the partitions is arbitrary. +/// +/// Return the index of the split point. +/// +/// ``` +/// use itertools::partition; +/// +/// # // use repeated numbers to not promise any ordering +/// let mut data = [7, 1, 1, 7, 1, 1, 7]; +/// let split_index = partition(&mut data, |elt| *elt >= 3); +/// +/// assert_eq!(data, [7, 7, 7, 1, 1, 1, 1]); +/// assert_eq!(split_index, 3); +/// ``` +pub fn partition<'a, A: 'a, I, F>(iter: I, mut pred: F) -> usize +where + I: IntoIterator, + I::IntoIter: DoubleEndedIterator, + F: FnMut(&A) -> bool, +{ + let mut split_index = 0; + let mut iter = iter.into_iter(); + while let Some(front) = iter.next() { + if !pred(front) { + match iter.rfind(|back| pred(back)) { + Some(back) => std::mem::swap(front, back), + None => break, + } + } + split_index += 1; + } + split_index +} + +/// An enum used for controlling the execution of `fold_while`. +/// +/// See [`.fold_while()`](Itertools::fold_while) for more information. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum FoldWhile { + /// Continue folding with this value + Continue(T), + /// Fold is complete and will return this value + Done(T), +} + +impl FoldWhile { + /// Return the value in the continue or done. + pub fn into_inner(self) -> T { + match self { + Self::Continue(x) | Self::Done(x) => x, + } + } + + /// Return true if `self` is `Done`, false if it is `Continue`. + pub fn is_done(&self) -> bool { + match *self { + Self::Continue(_) => false, + Self::Done(_) => true, + } + } +} diff --git a/vendor/itertools/src/merge_join.rs b/vendor/itertools/src/merge_join.rs new file mode 100644 index 00000000000000..c0de35f90e2481 --- /dev/null +++ b/vendor/itertools/src/merge_join.rs @@ -0,0 +1,347 @@ +use std::cmp::Ordering; +use std::fmt; +use std::iter::{Fuse, FusedIterator}; +use std::marker::PhantomData; + +use either::Either; + +use super::adaptors::{put_back, PutBack}; +use crate::either_or_both::EitherOrBoth; +use crate::size_hint::{self, SizeHint}; +#[cfg(doc)] +use crate::Itertools; + +#[derive(Clone, Debug)] +pub struct MergeLte; + +/// An iterator adaptor that merges the two base iterators in ascending order. +/// If both base iterators are sorted (ascending), the result is sorted. +/// +/// Iterator element type is `I::Item`. +/// +/// See [`.merge()`](crate::Itertools::merge_by) for more information. +pub type Merge = MergeBy; + +/// Create an iterator that merges elements in `i` and `j`. +/// +/// [`IntoIterator`] enabled version of [`Itertools::merge`](crate::Itertools::merge). +/// +/// ``` +/// use itertools::merge; +/// +/// for elt in merge(&[1, 2, 3], &[2, 3, 4]) { +/// /* loop body */ +/// } +/// ``` +pub fn merge( + i: I, + j: J, +) -> Merge<::IntoIter, ::IntoIter> +where + I: IntoIterator, + J: IntoIterator, + I::Item: PartialOrd, +{ + merge_by_new(i, j, MergeLte) +} + +/// An iterator adaptor that merges the two base iterators in ascending order. +/// If both base iterators are sorted (ascending), the result is sorted. +/// +/// Iterator element type is `I::Item`. +/// +/// See [`.merge_by()`](crate::Itertools::merge_by) for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct MergeBy { + left: PutBack>, + right: PutBack>, + cmp_fn: F, +} + +/// Create a `MergeBy` iterator. +pub fn merge_by_new(a: I, b: J, cmp: F) -> MergeBy +where + I: IntoIterator, + J: IntoIterator, +{ + MergeBy { + left: put_back(a.into_iter().fuse()), + right: put_back(b.into_iter().fuse()), + cmp_fn: cmp, + } +} + +/// Return an iterator adaptor that merge-joins items from the two base iterators in ascending order. +/// +/// [`IntoIterator`] enabled version of [`Itertools::merge_join_by`]. +pub fn merge_join_by( + left: I, + right: J, + cmp_fn: F, +) -> MergeJoinBy +where + I: IntoIterator, + J: IntoIterator, + F: FnMut(&I::Item, &J::Item) -> T, +{ + MergeBy { + left: put_back(left.into_iter().fuse()), + right: put_back(right.into_iter().fuse()), + cmp_fn: MergeFuncLR(cmp_fn, PhantomData), + } +} + +/// An iterator adaptor that merge-joins items from the two base iterators in ascending order. +/// +/// See [`.merge_join_by()`](crate::Itertools::merge_join_by) for more information. +pub type MergeJoinBy = + MergeBy::Item, ::Item>>::T>>; + +#[derive(Clone, Debug)] +pub struct MergeFuncLR(F, PhantomData); + +pub trait FuncLR { + type T; +} + +impl T> FuncLR for F { + type T = T; +} + +pub trait OrderingOrBool { + type MergeResult; + fn left(left: L) -> Self::MergeResult; + fn right(right: R) -> Self::MergeResult; + // "merge" never returns (Some(...), Some(...), ...) so Option> + // is appealing but it is always followed by two put_backs, so we think the compiler is + // smart enough to optimize it. Or we could move put_backs into "merge". + fn merge(&mut self, left: L, right: R) -> (Option>, Self::MergeResult); + fn size_hint(left: SizeHint, right: SizeHint) -> SizeHint; +} + +impl Ordering> OrderingOrBool for MergeFuncLR { + type MergeResult = EitherOrBoth; + fn left(left: L) -> Self::MergeResult { + EitherOrBoth::Left(left) + } + fn right(right: R) -> Self::MergeResult { + EitherOrBoth::Right(right) + } + fn merge(&mut self, left: L, right: R) -> (Option>, Self::MergeResult) { + match self.0(&left, &right) { + Ordering::Equal => (None, EitherOrBoth::Both(left, right)), + Ordering::Less => (Some(Either::Right(right)), EitherOrBoth::Left(left)), + Ordering::Greater => (Some(Either::Left(left)), EitherOrBoth::Right(right)), + } + } + fn size_hint(left: SizeHint, right: SizeHint) -> SizeHint { + let (a_lower, a_upper) = left; + let (b_lower, b_upper) = right; + let lower = ::std::cmp::max(a_lower, b_lower); + let upper = match (a_upper, b_upper) { + (Some(x), Some(y)) => x.checked_add(y), + _ => None, + }; + (lower, upper) + } +} + +impl bool> OrderingOrBool for MergeFuncLR { + type MergeResult = Either; + fn left(left: L) -> Self::MergeResult { + Either::Left(left) + } + fn right(right: R) -> Self::MergeResult { + Either::Right(right) + } + fn merge(&mut self, left: L, right: R) -> (Option>, Self::MergeResult) { + if self.0(&left, &right) { + (Some(Either::Right(right)), Either::Left(left)) + } else { + (Some(Either::Left(left)), Either::Right(right)) + } + } + fn size_hint(left: SizeHint, right: SizeHint) -> SizeHint { + // Not ExactSizeIterator because size may be larger than usize + size_hint::add(left, right) + } +} + +impl bool> OrderingOrBool for F { + type MergeResult = T; + fn left(left: T) -> Self::MergeResult { + left + } + fn right(right: T) -> Self::MergeResult { + right + } + fn merge(&mut self, left: T, right: T) -> (Option>, Self::MergeResult) { + if self(&left, &right) { + (Some(Either::Right(right)), left) + } else { + (Some(Either::Left(left)), right) + } + } + fn size_hint(left: SizeHint, right: SizeHint) -> SizeHint { + // Not ExactSizeIterator because size may be larger than usize + size_hint::add(left, right) + } +} + +impl OrderingOrBool for MergeLte { + type MergeResult = T; + fn left(left: T) -> Self::MergeResult { + left + } + fn right(right: T) -> Self::MergeResult { + right + } + fn merge(&mut self, left: T, right: T) -> (Option>, Self::MergeResult) { + if left <= right { + (Some(Either::Right(right)), left) + } else { + (Some(Either::Left(left)), right) + } + } + fn size_hint(left: SizeHint, right: SizeHint) -> SizeHint { + // Not ExactSizeIterator because size may be larger than usize + size_hint::add(left, right) + } +} + +impl Clone for MergeBy +where + I: Iterator, + J: Iterator, + PutBack>: Clone, + PutBack>: Clone, + F: Clone, +{ + clone_fields!(left, right, cmp_fn); +} + +impl fmt::Debug for MergeBy +where + I: Iterator + fmt::Debug, + I::Item: fmt::Debug, + J: Iterator + fmt::Debug, + J::Item: fmt::Debug, +{ + debug_fmt_fields!(MergeBy, left, right); +} + +impl Iterator for MergeBy +where + I: Iterator, + J: Iterator, + F: OrderingOrBool, +{ + type Item = F::MergeResult; + + fn next(&mut self) -> Option { + match (self.left.next(), self.right.next()) { + (None, None) => None, + (Some(left), None) => Some(F::left(left)), + (None, Some(right)) => Some(F::right(right)), + (Some(left), Some(right)) => { + let (not_next, next) = self.cmp_fn.merge(left, right); + match not_next { + Some(Either::Left(l)) => { + self.left.put_back(l); + } + Some(Either::Right(r)) => { + self.right.put_back(r); + } + None => (), + } + + Some(next) + } + } + } + + fn fold(mut self, init: B, mut f: G) -> B + where + Self: Sized, + G: FnMut(B, Self::Item) -> B, + { + let mut acc = init; + let mut left = self.left.next(); + let mut right = self.right.next(); + + loop { + match (left, right) { + (Some(l), Some(r)) => match self.cmp_fn.merge(l, r) { + (Some(Either::Right(r)), x) => { + acc = f(acc, x); + left = self.left.next(); + right = Some(r); + } + (Some(Either::Left(l)), x) => { + acc = f(acc, x); + left = Some(l); + right = self.right.next(); + } + (None, x) => { + acc = f(acc, x); + left = self.left.next(); + right = self.right.next(); + } + }, + (Some(l), None) => { + self.left.put_back(l); + acc = self.left.fold(acc, |acc, x| f(acc, F::left(x))); + break; + } + (None, Some(r)) => { + self.right.put_back(r); + acc = self.right.fold(acc, |acc, x| f(acc, F::right(x))); + break; + } + (None, None) => { + break; + } + } + } + + acc + } + + fn size_hint(&self) -> SizeHint { + F::size_hint(self.left.size_hint(), self.right.size_hint()) + } + + fn nth(&mut self, mut n: usize) -> Option { + loop { + if n == 0 { + break self.next(); + } + n -= 1; + match (self.left.next(), self.right.next()) { + (None, None) => break None, + (Some(_left), None) => break self.left.nth(n).map(F::left), + (None, Some(_right)) => break self.right.nth(n).map(F::right), + (Some(left), Some(right)) => { + let (not_next, _) = self.cmp_fn.merge(left, right); + match not_next { + Some(Either::Left(l)) => { + self.left.put_back(l); + } + Some(Either::Right(r)) => { + self.right.put_back(r); + } + None => (), + } + } + } + } + } +} + +impl FusedIterator for MergeBy +where + I: Iterator, + J: Iterator, + F: OrderingOrBool, +{ +} diff --git a/vendor/itertools/src/minmax.rs b/vendor/itertools/src/minmax.rs new file mode 100644 index 00000000000000..5c9674e01124ef --- /dev/null +++ b/vendor/itertools/src/minmax.rs @@ -0,0 +1,116 @@ +/// `MinMaxResult` is an enum returned by `minmax`. +/// +/// See [`.minmax()`](crate::Itertools::minmax) for more detail. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum MinMaxResult { + /// Empty iterator + NoElements, + + /// Iterator with one element, so the minimum and maximum are the same + OneElement(T), + + /// More than one element in the iterator, the first element is not larger + /// than the second + MinMax(T, T), +} + +impl MinMaxResult { + /// `into_option` creates an `Option` of type `(T, T)`. The returned `Option` + /// has variant `None` if and only if the `MinMaxResult` has variant + /// `NoElements`. Otherwise `Some((x, y))` is returned where `x <= y`. + /// If the `MinMaxResult` has variant `OneElement(x)`, performing this + /// operation will make one clone of `x`. + /// + /// # Examples + /// + /// ``` + /// use itertools::MinMaxResult::{self, NoElements, OneElement, MinMax}; + /// + /// let r: MinMaxResult = NoElements; + /// assert_eq!(r.into_option(), None); + /// + /// let r = OneElement(1); + /// assert_eq!(r.into_option(), Some((1, 1))); + /// + /// let r = MinMax(1, 2); + /// assert_eq!(r.into_option(), Some((1, 2))); + /// ``` + pub fn into_option(self) -> Option<(T, T)> { + match self { + Self::NoElements => None, + Self::OneElement(x) => Some((x.clone(), x)), + Self::MinMax(x, y) => Some((x, y)), + } + } +} + +/// Implementation guts for `minmax` and `minmax_by_key`. +pub fn minmax_impl(mut it: I, mut key_for: F, mut lt: L) -> MinMaxResult +where + I: Iterator, + F: FnMut(&I::Item) -> K, + L: FnMut(&I::Item, &I::Item, &K, &K) -> bool, +{ + let (mut min, mut max, mut min_key, mut max_key) = match it.next() { + None => return MinMaxResult::NoElements, + Some(x) => match it.next() { + None => return MinMaxResult::OneElement(x), + Some(y) => { + let xk = key_for(&x); + let yk = key_for(&y); + if !lt(&y, &x, &yk, &xk) { + (x, y, xk, yk) + } else { + (y, x, yk, xk) + } + } + }, + }; + + loop { + // `first` and `second` are the two next elements we want to look + // at. We first compare `first` and `second` (#1). The smaller one + // is then compared to current minimum (#2). The larger one is + // compared to current maximum (#3). This way we do 3 comparisons + // for 2 elements. + let first = match it.next() { + None => break, + Some(x) => x, + }; + let second = match it.next() { + None => { + let first_key = key_for(&first); + if lt(&first, &min, &first_key, &min_key) { + min = first; + } else if !lt(&first, &max, &first_key, &max_key) { + max = first; + } + break; + } + Some(x) => x, + }; + let first_key = key_for(&first); + let second_key = key_for(&second); + if !lt(&second, &first, &second_key, &first_key) { + if lt(&first, &min, &first_key, &min_key) { + min = first; + min_key = first_key; + } + if !lt(&second, &max, &second_key, &max_key) { + max = second; + max_key = second_key; + } + } else { + if lt(&second, &min, &second_key, &min_key) { + min = second; + min_key = second_key; + } + if !lt(&first, &max, &first_key, &max_key) { + max = first; + max_key = first_key; + } + } + } + + MinMaxResult::MinMax(min, max) +} diff --git a/vendor/itertools/src/multipeek_impl.rs b/vendor/itertools/src/multipeek_impl.rs new file mode 100644 index 00000000000000..6f800b6fb6c907 --- /dev/null +++ b/vendor/itertools/src/multipeek_impl.rs @@ -0,0 +1,116 @@ +use crate::size_hint; +#[cfg(doc)] +use crate::Itertools; +use crate::PeekingNext; +use alloc::collections::VecDeque; +use std::iter::Fuse; + +/// See [`multipeek()`] for more information. +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct MultiPeek +where + I: Iterator, +{ + iter: Fuse, + buf: VecDeque, + index: usize, +} + +/// An iterator adaptor that allows the user to peek at multiple `.next()` +/// values without advancing the base iterator. +/// +/// [`IntoIterator`] enabled version of [`Itertools::multipeek`]. +pub fn multipeek(iterable: I) -> MultiPeek +where + I: IntoIterator, +{ + MultiPeek { + iter: iterable.into_iter().fuse(), + buf: VecDeque::new(), + index: 0, + } +} + +impl MultiPeek +where + I: Iterator, +{ + /// Reset the peeking “cursor” + pub fn reset_peek(&mut self) { + self.index = 0; + } +} + +impl MultiPeek { + /// Works exactly like `.next()` with the only difference that it doesn't + /// advance itself. `.peek()` can be called multiple times, to peek + /// further ahead. + /// When `.next()` is called, reset the peeking “cursor”. + pub fn peek(&mut self) -> Option<&I::Item> { + let ret = if self.index < self.buf.len() { + Some(&self.buf[self.index]) + } else { + match self.iter.next() { + Some(x) => { + self.buf.push_back(x); + Some(&self.buf[self.index]) + } + None => return None, + } + }; + + self.index += 1; + ret + } +} + +impl PeekingNext for MultiPeek +where + I: Iterator, +{ + fn peeking_next(&mut self, accept: F) -> Option + where + F: FnOnce(&Self::Item) -> bool, + { + if self.buf.is_empty() { + if let Some(r) = self.peek() { + if !accept(r) { + return None; + } + } + } else if let Some(r) = self.buf.front() { + if !accept(r) { + return None; + } + } + self.next() + } +} + +impl Iterator for MultiPeek +where + I: Iterator, +{ + type Item = I::Item; + + fn next(&mut self) -> Option { + self.index = 0; + self.buf.pop_front().or_else(|| self.iter.next()) + } + + fn size_hint(&self) -> (usize, Option) { + size_hint::add_scalar(self.iter.size_hint(), self.buf.len()) + } + + fn fold(self, mut init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + init = self.buf.into_iter().fold(init, &mut f); + self.iter.fold(init, f) + } +} + +// Same size +impl ExactSizeIterator for MultiPeek where I: ExactSizeIterator {} diff --git a/vendor/itertools/src/pad_tail.rs b/vendor/itertools/src/pad_tail.rs new file mode 100644 index 00000000000000..5595b42bacf21d --- /dev/null +++ b/vendor/itertools/src/pad_tail.rs @@ -0,0 +1,124 @@ +use crate::size_hint; +use std::iter::{Fuse, FusedIterator}; + +/// An iterator adaptor that pads a sequence to a minimum length by filling +/// missing elements using a closure. +/// +/// Iterator element type is `I::Item`. +/// +/// See [`.pad_using()`](crate::Itertools::pad_using) for more information. +#[derive(Clone)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct PadUsing { + iter: Fuse, + min: usize, + pos: usize, + filler: F, +} + +impl std::fmt::Debug for PadUsing +where + I: std::fmt::Debug, +{ + debug_fmt_fields!(PadUsing, iter, min, pos); +} + +/// Create a new `PadUsing` iterator. +pub fn pad_using(iter: I, min: usize, filler: F) -> PadUsing +where + I: Iterator, + F: FnMut(usize) -> I::Item, +{ + PadUsing { + iter: iter.fuse(), + min, + pos: 0, + filler, + } +} + +impl Iterator for PadUsing +where + I: Iterator, + F: FnMut(usize) -> I::Item, +{ + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + match self.iter.next() { + None => { + if self.pos < self.min { + let e = Some((self.filler)(self.pos)); + self.pos += 1; + e + } else { + None + } + } + e => { + self.pos += 1; + e + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let tail = self.min.saturating_sub(self.pos); + size_hint::max(self.iter.size_hint(), (tail, Some(tail))) + } + + fn fold(self, mut init: B, mut f: G) -> B + where + G: FnMut(B, Self::Item) -> B, + { + let mut pos = self.pos; + init = self.iter.fold(init, |acc, item| { + pos += 1; + f(acc, item) + }); + (pos..self.min).map(self.filler).fold(init, f) + } +} + +impl DoubleEndedIterator for PadUsing +where + I: DoubleEndedIterator + ExactSizeIterator, + F: FnMut(usize) -> I::Item, +{ + fn next_back(&mut self) -> Option { + if self.min == 0 { + self.iter.next_back() + } else if self.iter.len() >= self.min { + self.min -= 1; + self.iter.next_back() + } else { + self.min -= 1; + Some((self.filler)(self.min)) + } + } + + fn rfold(self, mut init: B, mut f: G) -> B + where + G: FnMut(B, Self::Item) -> B, + { + init = (self.iter.len()..self.min) + .map(self.filler) + .rfold(init, &mut f); + self.iter.rfold(init, f) + } +} + +impl ExactSizeIterator for PadUsing +where + I: ExactSizeIterator, + F: FnMut(usize) -> I::Item, +{ +} + +impl FusedIterator for PadUsing +where + I: FusedIterator, + F: FnMut(usize) -> I::Item, +{ +} diff --git a/vendor/itertools/src/peek_nth.rs b/vendor/itertools/src/peek_nth.rs new file mode 100644 index 00000000000000..b03a3ef5f2a776 --- /dev/null +++ b/vendor/itertools/src/peek_nth.rs @@ -0,0 +1,178 @@ +use crate::size_hint; +use crate::PeekingNext; +use alloc::collections::VecDeque; +use std::iter::Fuse; + +/// See [`peek_nth()`] for more information. +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct PeekNth +where + I: Iterator, +{ + iter: Fuse, + buf: VecDeque, +} + +/// A drop-in replacement for [`std::iter::Peekable`] which adds a `peek_nth` +/// method allowing the user to `peek` at a value several iterations forward +/// without advancing the base iterator. +/// +/// This differs from `multipeek` in that subsequent calls to `peek` or +/// `peek_nth` will always return the same value until `next` is called +/// (making `reset_peek` unnecessary). +pub fn peek_nth(iterable: I) -> PeekNth +where + I: IntoIterator, +{ + PeekNth { + iter: iterable.into_iter().fuse(), + buf: VecDeque::new(), + } +} + +impl PeekNth +where + I: Iterator, +{ + /// Works exactly like the `peek` method in [`std::iter::Peekable`]. + pub fn peek(&mut self) -> Option<&I::Item> { + self.peek_nth(0) + } + + /// Works exactly like the `peek_mut` method in [`std::iter::Peekable`]. + pub fn peek_mut(&mut self) -> Option<&mut I::Item> { + self.peek_nth_mut(0) + } + + /// Returns a reference to the `nth` value without advancing the iterator. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use itertools::peek_nth; + /// + /// let xs = vec![1, 2, 3]; + /// let mut iter = peek_nth(xs.into_iter()); + /// + /// assert_eq!(iter.peek_nth(0), Some(&1)); + /// assert_eq!(iter.next(), Some(1)); + /// + /// // The iterator does not advance even if we call `peek_nth` multiple times + /// assert_eq!(iter.peek_nth(0), Some(&2)); + /// assert_eq!(iter.peek_nth(1), Some(&3)); + /// assert_eq!(iter.next(), Some(2)); + /// + /// // Calling `peek_nth` past the end of the iterator will return `None` + /// assert_eq!(iter.peek_nth(1), None); + /// ``` + pub fn peek_nth(&mut self, n: usize) -> Option<&I::Item> { + let unbuffered_items = (n + 1).saturating_sub(self.buf.len()); + + self.buf.extend(self.iter.by_ref().take(unbuffered_items)); + + self.buf.get(n) + } + + /// Returns a mutable reference to the `nth` value without advancing the iterator. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use itertools::peek_nth; + /// + /// let xs = vec![1, 2, 3, 4, 5]; + /// let mut iter = peek_nth(xs.into_iter()); + /// + /// assert_eq!(iter.peek_nth_mut(0), Some(&mut 1)); + /// assert_eq!(iter.next(), Some(1)); + /// + /// // The iterator does not advance even if we call `peek_nth_mut` multiple times + /// assert_eq!(iter.peek_nth_mut(0), Some(&mut 2)); + /// assert_eq!(iter.peek_nth_mut(1), Some(&mut 3)); + /// assert_eq!(iter.next(), Some(2)); + /// + /// // Peek into the iterator and set the value behind the mutable reference. + /// if let Some(p) = iter.peek_nth_mut(1) { + /// assert_eq!(*p, 4); + /// *p = 9; + /// } + /// + /// // The value we put in reappears as the iterator continues. + /// assert_eq!(iter.next(), Some(3)); + /// assert_eq!(iter.next(), Some(9)); + /// + /// // Calling `peek_nth_mut` past the end of the iterator will return `None` + /// assert_eq!(iter.peek_nth_mut(1), None); + /// ``` + pub fn peek_nth_mut(&mut self, n: usize) -> Option<&mut I::Item> { + let unbuffered_items = (n + 1).saturating_sub(self.buf.len()); + + self.buf.extend(self.iter.by_ref().take(unbuffered_items)); + + self.buf.get_mut(n) + } + + /// Works exactly like the `next_if` method in [`std::iter::Peekable`]. + pub fn next_if(&mut self, func: impl FnOnce(&I::Item) -> bool) -> Option { + match self.next() { + Some(item) if func(&item) => Some(item), + Some(item) => { + self.buf.push_front(item); + None + } + _ => None, + } + } + + /// Works exactly like the `next_if_eq` method in [`std::iter::Peekable`]. + pub fn next_if_eq(&mut self, expected: &T) -> Option + where + T: ?Sized, + I::Item: PartialEq, + { + self.next_if(|next| next == expected) + } +} + +impl Iterator for PeekNth +where + I: Iterator, +{ + type Item = I::Item; + + fn next(&mut self) -> Option { + self.buf.pop_front().or_else(|| self.iter.next()) + } + + fn size_hint(&self) -> (usize, Option) { + size_hint::add_scalar(self.iter.size_hint(), self.buf.len()) + } + + fn fold(self, mut init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + init = self.buf.into_iter().fold(init, &mut f); + self.iter.fold(init, f) + } +} + +impl ExactSizeIterator for PeekNth where I: ExactSizeIterator {} + +impl PeekingNext for PeekNth +where + I: Iterator, +{ + fn peeking_next(&mut self, accept: F) -> Option + where + F: FnOnce(&Self::Item) -> bool, + { + self.peek().filter(|item| accept(item))?; + self.next() + } +} diff --git a/vendor/itertools/src/peeking_take_while.rs b/vendor/itertools/src/peeking_take_while.rs new file mode 100644 index 00000000000000..19872a964fddcd --- /dev/null +++ b/vendor/itertools/src/peeking_take_while.rs @@ -0,0 +1,201 @@ +use crate::PutBack; +#[cfg(feature = "use_alloc")] +use crate::PutBackN; +use crate::RepeatN; +use std::iter::Peekable; + +/// An iterator that allows peeking at an element before deciding to accept it. +/// +/// See [`.peeking_take_while()`](crate::Itertools::peeking_take_while) +/// for more information. +/// +/// This is implemented by peeking adaptors like peekable and put back, +/// but also by a few iterators that can be peeked natively, like the slice’s +/// by reference iterator ([`std::slice::Iter`]). +pub trait PeekingNext: Iterator { + /// Pass a reference to the next iterator element to the closure `accept`; + /// if `accept` returns `true`, return it as the next element, + /// else `None`. + fn peeking_next(&mut self, accept: F) -> Option + where + Self: Sized, + F: FnOnce(&Self::Item) -> bool; +} + +impl<'a, I> PeekingNext for &'a mut I +where + I: PeekingNext, +{ + fn peeking_next(&mut self, accept: F) -> Option + where + F: FnOnce(&Self::Item) -> bool, + { + (*self).peeking_next(accept) + } +} + +impl PeekingNext for Peekable +where + I: Iterator, +{ + fn peeking_next(&mut self, accept: F) -> Option + where + F: FnOnce(&Self::Item) -> bool, + { + if let Some(r) = self.peek() { + if !accept(r) { + return None; + } + } + self.next() + } +} + +impl PeekingNext for PutBack +where + I: Iterator, +{ + fn peeking_next(&mut self, accept: F) -> Option + where + F: FnOnce(&Self::Item) -> bool, + { + if let Some(r) = self.next() { + if !accept(&r) { + self.put_back(r); + return None; + } + Some(r) + } else { + None + } + } +} + +#[cfg(feature = "use_alloc")] +impl PeekingNext for PutBackN +where + I: Iterator, +{ + fn peeking_next(&mut self, accept: F) -> Option + where + F: FnOnce(&Self::Item) -> bool, + { + if let Some(r) = self.next() { + if !accept(&r) { + self.put_back(r); + return None; + } + Some(r) + } else { + None + } + } +} + +impl PeekingNext for RepeatN { + fn peeking_next(&mut self, accept: F) -> Option + where + F: FnOnce(&Self::Item) -> bool, + { + let r = self.elt.as_ref()?; + if !accept(r) { + return None; + } + self.next() + } +} + +/// An iterator adaptor that takes items while a closure returns `true`. +/// +/// See [`.peeking_take_while()`](crate::Itertools::peeking_take_while) +/// for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct PeekingTakeWhile<'a, I, F> +where + I: Iterator + 'a, +{ + iter: &'a mut I, + f: F, +} + +impl<'a, I, F> std::fmt::Debug for PeekingTakeWhile<'a, I, F> +where + I: Iterator + std::fmt::Debug + 'a, +{ + debug_fmt_fields!(PeekingTakeWhile, iter); +} + +/// Create a `PeekingTakeWhile` +pub fn peeking_take_while(iter: &mut I, f: F) -> PeekingTakeWhile +where + I: Iterator, +{ + PeekingTakeWhile { iter, f } +} + +impl<'a, I, F> Iterator for PeekingTakeWhile<'a, I, F> +where + I: PeekingNext, + F: FnMut(&I::Item) -> bool, +{ + type Item = I::Item; + fn next(&mut self) -> Option { + self.iter.peeking_next(&mut self.f) + } + + fn size_hint(&self) -> (usize, Option) { + (0, self.iter.size_hint().1) + } +} + +impl<'a, I, F> PeekingNext for PeekingTakeWhile<'a, I, F> +where + I: PeekingNext, + F: FnMut(&I::Item) -> bool, +{ + fn peeking_next(&mut self, g: G) -> Option + where + G: FnOnce(&Self::Item) -> bool, + { + let f = &mut self.f; + self.iter.peeking_next(|r| f(r) && g(r)) + } +} + +// Some iterators are so lightweight we can simply clone them to save their +// state and use that for peeking. +macro_rules! peeking_next_by_clone { + ([$($typarm:tt)*] $type_:ty) => { + impl<$($typarm)*> PeekingNext for $type_ { + fn peeking_next(&mut self, accept: F) -> Option + where F: FnOnce(&Self::Item) -> bool + { + let saved_state = self.clone(); + if let Some(r) = self.next() { + if !accept(&r) { + *self = saved_state; + } else { + return Some(r) + } + } + None + } + } + } +} + +peeking_next_by_clone! { ['a, T] ::std::slice::Iter<'a, T> } +peeking_next_by_clone! { ['a] ::std::str::Chars<'a> } +peeking_next_by_clone! { ['a] ::std::str::CharIndices<'a> } +peeking_next_by_clone! { ['a] ::std::str::Bytes<'a> } +peeking_next_by_clone! { ['a, T] ::std::option::Iter<'a, T> } +peeking_next_by_clone! { ['a, T] ::std::result::Iter<'a, T> } +peeking_next_by_clone! { [T] ::std::iter::Empty } +#[cfg(feature = "use_alloc")] +peeking_next_by_clone! { ['a, T] alloc::collections::linked_list::Iter<'a, T> } +#[cfg(feature = "use_alloc")] +peeking_next_by_clone! { ['a, T] alloc::collections::vec_deque::Iter<'a, T> } + +// cloning a Rev has no extra overhead; peekable and put backs are never DEI. +peeking_next_by_clone! { [I: Clone + PeekingNext + DoubleEndedIterator] +::std::iter::Rev } diff --git a/vendor/itertools/src/permutations.rs b/vendor/itertools/src/permutations.rs new file mode 100644 index 00000000000000..91389a73a7528f --- /dev/null +++ b/vendor/itertools/src/permutations.rs @@ -0,0 +1,186 @@ +use alloc::boxed::Box; +use alloc::vec::Vec; +use std::fmt; +use std::iter::once; +use std::iter::FusedIterator; + +use super::lazy_buffer::LazyBuffer; +use crate::size_hint::{self, SizeHint}; + +/// An iterator adaptor that iterates through all the `k`-permutations of the +/// elements from an iterator. +/// +/// See [`.permutations()`](crate::Itertools::permutations) for +/// more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct Permutations { + vals: LazyBuffer, + state: PermutationState, +} + +impl Clone for Permutations +where + I: Clone + Iterator, + I::Item: Clone, +{ + clone_fields!(vals, state); +} + +#[derive(Clone, Debug)] +enum PermutationState { + /// No permutation generated yet. + Start { k: usize }, + /// Values from the iterator are not fully loaded yet so `n` is still unknown. + Buffered { k: usize, min_n: usize }, + /// All values from the iterator are known so `n` is known. + Loaded { + indices: Box<[usize]>, + cycles: Box<[usize]>, + }, + /// No permutation left to generate. + End, +} + +impl fmt::Debug for Permutations +where + I: Iterator + fmt::Debug, + I::Item: fmt::Debug, +{ + debug_fmt_fields!(Permutations, vals, state); +} + +pub fn permutations(iter: I, k: usize) -> Permutations { + Permutations { + vals: LazyBuffer::new(iter), + state: PermutationState::Start { k }, + } +} + +impl Iterator for Permutations +where + I: Iterator, + I::Item: Clone, +{ + type Item = Vec; + + fn next(&mut self) -> Option { + let Self { vals, state } = self; + match state { + PermutationState::Start { k: 0 } => { + *state = PermutationState::End; + Some(Vec::new()) + } + &mut PermutationState::Start { k } => { + vals.prefill(k); + if vals.len() != k { + *state = PermutationState::End; + return None; + } + *state = PermutationState::Buffered { k, min_n: k }; + Some(vals[0..k].to_vec()) + } + PermutationState::Buffered { ref k, min_n } => { + if vals.get_next() { + let item = (0..*k - 1) + .chain(once(*min_n)) + .map(|i| vals[i].clone()) + .collect(); + *min_n += 1; + Some(item) + } else { + let n = *min_n; + let prev_iteration_count = n - *k + 1; + let mut indices: Box<[_]> = (0..n).collect(); + let mut cycles: Box<[_]> = (n - k..n).rev().collect(); + // Advance the state to the correct point. + for _ in 0..prev_iteration_count { + if advance(&mut indices, &mut cycles) { + *state = PermutationState::End; + return None; + } + } + let item = vals.get_at(&indices[0..*k]); + *state = PermutationState::Loaded { indices, cycles }; + Some(item) + } + } + PermutationState::Loaded { indices, cycles } => { + if advance(indices, cycles) { + *state = PermutationState::End; + return None; + } + let k = cycles.len(); + Some(vals.get_at(&indices[0..k])) + } + PermutationState::End => None, + } + } + + fn count(self) -> usize { + let Self { vals, state } = self; + let n = vals.count(); + state.size_hint_for(n).1.unwrap() + } + + fn size_hint(&self) -> SizeHint { + let (mut low, mut upp) = self.vals.size_hint(); + low = self.state.size_hint_for(low).0; + upp = upp.and_then(|n| self.state.size_hint_for(n).1); + (low, upp) + } +} + +impl FusedIterator for Permutations +where + I: Iterator, + I::Item: Clone, +{ +} + +fn advance(indices: &mut [usize], cycles: &mut [usize]) -> bool { + let n = indices.len(); + let k = cycles.len(); + // NOTE: if `cycles` are only zeros, then we reached the last permutation. + for i in (0..k).rev() { + if cycles[i] == 0 { + cycles[i] = n - i - 1; + indices[i..].rotate_left(1); + } else { + let swap_index = n - cycles[i]; + indices.swap(i, swap_index); + cycles[i] -= 1; + return false; + } + } + true +} + +impl PermutationState { + fn size_hint_for(&self, n: usize) -> SizeHint { + // At the beginning, there are `n!/(n-k)!` items to come. + let at_start = |n, k| { + debug_assert!(n >= k); + let total = (n - k + 1..=n).try_fold(1usize, |acc, i| acc.checked_mul(i)); + (total.unwrap_or(usize::MAX), total) + }; + match *self { + Self::Start { k } if n < k => (0, Some(0)), + Self::Start { k } => at_start(n, k), + Self::Buffered { k, min_n } => { + // Same as `Start` minus the previously generated items. + size_hint::sub_scalar(at_start(n, k), min_n - k + 1) + } + Self::Loaded { + ref indices, + ref cycles, + } => { + let count = cycles.iter().enumerate().try_fold(0usize, |acc, (i, &c)| { + acc.checked_mul(indices.len() - i) + .and_then(|count| count.checked_add(c)) + }); + (count.unwrap_or(usize::MAX), count) + } + Self::End => (0, Some(0)), + } + } +} diff --git a/vendor/itertools/src/powerset.rs b/vendor/itertools/src/powerset.rs new file mode 100644 index 00000000000000..734eaf6149ac66 --- /dev/null +++ b/vendor/itertools/src/powerset.rs @@ -0,0 +1,131 @@ +use alloc::vec::Vec; +use std::fmt; +use std::iter::FusedIterator; + +use super::combinations::{combinations, Combinations}; +use crate::adaptors::checked_binomial; +use crate::size_hint::{self, SizeHint}; + +/// An iterator to iterate through the powerset of the elements from an iterator. +/// +/// See [`.powerset()`](crate::Itertools::powerset) for more +/// information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct Powerset { + combs: Combinations, +} + +impl Clone for Powerset +where + I: Clone + Iterator, + I::Item: Clone, +{ + clone_fields!(combs); +} + +impl fmt::Debug for Powerset +where + I: Iterator + fmt::Debug, + I::Item: fmt::Debug, +{ + debug_fmt_fields!(Powerset, combs); +} + +/// Create a new `Powerset` from a clonable iterator. +pub fn powerset(src: I) -> Powerset +where + I: Iterator, + I::Item: Clone, +{ + Powerset { + combs: combinations(src, 0), + } +} + +impl Powerset { + /// Returns true if `k` has been incremented, false otherwise. + fn increment_k(&mut self) -> bool { + if self.combs.k() < self.combs.n() || self.combs.k() == 0 { + self.combs.reset(self.combs.k() + 1); + true + } else { + false + } + } +} + +impl Iterator for Powerset +where + I: Iterator, + I::Item: Clone, +{ + type Item = Vec; + + fn next(&mut self) -> Option { + if let Some(elt) = self.combs.next() { + Some(elt) + } else if self.increment_k() { + self.combs.next() + } else { + None + } + } + + fn nth(&mut self, mut n: usize) -> Option { + loop { + match self.combs.try_nth(n) { + Ok(item) => return Some(item), + Err(steps) => { + if !self.increment_k() { + return None; + } + n -= steps; + } + } + } + } + + fn size_hint(&self) -> SizeHint { + let k = self.combs.k(); + // Total bounds for source iterator. + let (n_min, n_max) = self.combs.src().size_hint(); + let low = remaining_for(n_min, k).unwrap_or(usize::MAX); + let upp = n_max.and_then(|n| remaining_for(n, k)); + size_hint::add(self.combs.size_hint(), (low, upp)) + } + + fn count(self) -> usize { + let k = self.combs.k(); + let (n, combs_count) = self.combs.n_and_count(); + combs_count + remaining_for(n, k).unwrap() + } + + fn fold(self, mut init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + let mut it = self.combs; + if it.k() == 0 { + init = it.by_ref().fold(init, &mut f); + it.reset(1); + } + init = it.by_ref().fold(init, &mut f); + // n is now known for sure because k >= 1 and all k-combinations have been generated. + for k in it.k() + 1..=it.n() { + it.reset(k); + init = it.by_ref().fold(init, &mut f); + } + init + } +} + +impl FusedIterator for Powerset +where + I: Iterator, + I::Item: Clone, +{ +} + +fn remaining_for(n: usize, k: usize) -> Option { + (k + 1..=n).try_fold(0usize, |sum, i| sum.checked_add(checked_binomial(n, i)?)) +} diff --git a/vendor/itertools/src/process_results_impl.rs b/vendor/itertools/src/process_results_impl.rs new file mode 100644 index 00000000000000..ad6c60d3cfb33a --- /dev/null +++ b/vendor/itertools/src/process_results_impl.rs @@ -0,0 +1,108 @@ +#[cfg(doc)] +use crate::Itertools; + +/// An iterator that produces only the `T` values as long as the +/// inner iterator produces `Ok(T)`. +/// +/// Used by [`process_results`](crate::process_results), see its docs +/// for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[derive(Debug)] +pub struct ProcessResults<'a, I, E: 'a> { + error: &'a mut Result<(), E>, + iter: I, +} + +impl<'a, I, E> ProcessResults<'a, I, E> { + #[inline(always)] + fn next_body(&mut self, item: Option>) -> Option { + match item { + Some(Ok(x)) => Some(x), + Some(Err(e)) => { + *self.error = Err(e); + None + } + None => None, + } + } +} + +impl<'a, I, T, E> Iterator for ProcessResults<'a, I, E> +where + I: Iterator>, +{ + type Item = T; + + fn next(&mut self) -> Option { + let item = self.iter.next(); + self.next_body(item) + } + + fn size_hint(&self) -> (usize, Option) { + (0, self.iter.size_hint().1) + } + + fn fold(mut self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + let error = self.error; + self.iter + .try_fold(init, |acc, opt| match opt { + Ok(x) => Ok(f(acc, x)), + Err(e) => { + *error = Err(e); + Err(acc) + } + }) + .unwrap_or_else(|e| e) + } +} + +impl<'a, I, T, E> DoubleEndedIterator for ProcessResults<'a, I, E> +where + I: Iterator>, + I: DoubleEndedIterator, +{ + fn next_back(&mut self) -> Option { + let item = self.iter.next_back(); + self.next_body(item) + } + + fn rfold(mut self, init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + let error = self.error; + self.iter + .try_rfold(init, |acc, opt| match opt { + Ok(x) => Ok(f(acc, x)), + Err(e) => { + *error = Err(e); + Err(acc) + } + }) + .unwrap_or_else(|e| e) + } +} + +/// “Lift” a function of the values of an iterator so that it can process +/// an iterator of `Result` values instead. +/// +/// [`IntoIterator`] enabled version of [`Itertools::process_results`]. +pub fn process_results(iterable: I, processor: F) -> Result +where + I: IntoIterator>, + F: FnOnce(ProcessResults) -> R, +{ + let iter = iterable.into_iter(); + let mut error = Ok(()); + + let result = processor(ProcessResults { + error: &mut error, + iter, + }); + + error.map(|_| result) +} diff --git a/vendor/itertools/src/put_back_n_impl.rs b/vendor/itertools/src/put_back_n_impl.rs new file mode 100644 index 00000000000000..a9eb4179c49a05 --- /dev/null +++ b/vendor/itertools/src/put_back_n_impl.rs @@ -0,0 +1,71 @@ +use alloc::vec::Vec; + +use crate::size_hint; + +/// An iterator adaptor that allows putting multiple +/// items in front of the iterator. +/// +/// Iterator element type is `I::Item`. +#[derive(Debug, Clone)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct PutBackN { + top: Vec, + iter: I, +} + +/// Create an iterator where you can put back multiple values to the front +/// of the iteration. +/// +/// Iterator element type is `I::Item`. +pub fn put_back_n(iterable: I) -> PutBackN +where + I: IntoIterator, +{ + PutBackN { + top: Vec::new(), + iter: iterable.into_iter(), + } +} + +impl PutBackN { + /// Puts `x` in front of the iterator. + /// + /// The values are yielded in order of the most recently put back + /// values first. + /// + /// ```rust + /// use itertools::put_back_n; + /// + /// let mut it = put_back_n(1..5); + /// it.next(); + /// it.put_back(1); + /// it.put_back(0); + /// + /// assert!(itertools::equal(it, 0..5)); + /// ``` + #[inline] + pub fn put_back(&mut self, x: I::Item) { + self.top.push(x); + } +} + +impl Iterator for PutBackN { + type Item = I::Item; + #[inline] + fn next(&mut self) -> Option { + self.top.pop().or_else(|| self.iter.next()) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + size_hint::add_scalar(self.iter.size_hint(), self.top.len()) + } + + fn fold(self, mut init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + init = self.top.into_iter().rfold(init, &mut f); + self.iter.fold(init, f) + } +} diff --git a/vendor/itertools/src/rciter_impl.rs b/vendor/itertools/src/rciter_impl.rs new file mode 100644 index 00000000000000..e3b7532069730d --- /dev/null +++ b/vendor/itertools/src/rciter_impl.rs @@ -0,0 +1,102 @@ +use alloc::rc::Rc; +use std::cell::RefCell; +use std::iter::{FusedIterator, IntoIterator}; + +/// A wrapper for `Rc>`, that implements the `Iterator` trait. +#[derive(Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct RcIter { + /// The boxed iterator. + pub rciter: Rc>, +} + +/// Return an iterator inside a `Rc>` wrapper. +/// +/// The returned `RcIter` can be cloned, and each clone will refer back to the +/// same original iterator. +/// +/// `RcIter` allows doing interesting things like using `.zip()` on an iterator with +/// itself, at the cost of runtime borrow checking which may have a performance +/// penalty. +/// +/// Iterator element type is `Self::Item`. +/// +/// ``` +/// use itertools::rciter; +/// use itertools::zip; +/// +/// // In this example a range iterator is created and we iterate it using +/// // three separate handles (two of them given to zip). +/// // We also use the IntoIterator implementation for `&RcIter`. +/// +/// let mut iter = rciter(0..9); +/// let mut z = zip(&iter, &iter); +/// +/// assert_eq!(z.next(), Some((0, 1))); +/// assert_eq!(z.next(), Some((2, 3))); +/// assert_eq!(z.next(), Some((4, 5))); +/// assert_eq!(iter.next(), Some(6)); +/// assert_eq!(z.next(), Some((7, 8))); +/// assert_eq!(z.next(), None); +/// ``` +/// +/// **Panics** in iterator methods if a borrow error is encountered in the +/// iterator methods. It can only happen if the `RcIter` is reentered in +/// `.next()`, i.e. if it somehow participates in an “iterator knot” +/// where it is an adaptor of itself. +pub fn rciter(iterable: I) -> RcIter +where + I: IntoIterator, +{ + RcIter { + rciter: Rc::new(RefCell::new(iterable.into_iter())), + } +} + +impl Clone for RcIter { + clone_fields!(rciter); +} + +impl Iterator for RcIter +where + I: Iterator, +{ + type Item = A; + #[inline] + fn next(&mut self) -> Option { + self.rciter.borrow_mut().next() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + // To work sanely with other API that assume they own an iterator, + // so it can't change in other places, we can't guarantee as much + // in our size_hint. Other clones may drain values under our feet. + (0, self.rciter.borrow().size_hint().1) + } +} + +impl DoubleEndedIterator for RcIter +where + I: DoubleEndedIterator, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.rciter.borrow_mut().next_back() + } +} + +/// Return an iterator from `&RcIter` (by simply cloning it). +impl<'a, I> IntoIterator for &'a RcIter +where + I: Iterator, +{ + type Item = I::Item; + type IntoIter = RcIter; + + fn into_iter(self) -> RcIter { + self.clone() + } +} + +impl FusedIterator for RcIter where I: FusedIterator {} diff --git a/vendor/itertools/src/repeatn.rs b/vendor/itertools/src/repeatn.rs new file mode 100644 index 00000000000000..d86ad9facd3324 --- /dev/null +++ b/vendor/itertools/src/repeatn.rs @@ -0,0 +1,83 @@ +use std::iter::FusedIterator; + +/// An iterator that produces *n* repetitions of an element. +/// +/// See [`repeat_n()`](crate::repeat_n) for more information. +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[derive(Clone, Debug)] +pub struct RepeatN { + pub(crate) elt: Option, + n: usize, +} + +/// Create an iterator that produces `n` repetitions of `element`. +pub fn repeat_n(element: A, n: usize) -> RepeatN +where + A: Clone, +{ + if n == 0 { + RepeatN { elt: None, n } + } else { + RepeatN { + elt: Some(element), + n, + } + } +} + +impl Iterator for RepeatN +where + A: Clone, +{ + type Item = A; + + fn next(&mut self) -> Option { + if self.n > 1 { + self.n -= 1; + self.elt.as_ref().cloned() + } else { + self.n = 0; + self.elt.take() + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.n, Some(self.n)) + } + + fn fold(self, mut init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + match self { + Self { elt: Some(elt), n } => { + debug_assert!(n > 0); + init = (1..n).map(|_| elt.clone()).fold(init, &mut f); + f(init, elt) + } + _ => init, + } + } +} + +impl DoubleEndedIterator for RepeatN +where + A: Clone, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.next() + } + + #[inline] + fn rfold(self, init: B, f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + self.fold(init, f) + } +} + +impl ExactSizeIterator for RepeatN where A: Clone {} + +impl FusedIterator for RepeatN where A: Clone {} diff --git a/vendor/itertools/src/size_hint.rs b/vendor/itertools/src/size_hint.rs new file mode 100644 index 00000000000000..6cfead7f2b42d4 --- /dev/null +++ b/vendor/itertools/src/size_hint.rs @@ -0,0 +1,94 @@ +//! Arithmetic on `Iterator.size_hint()` values. +//! + +use std::cmp; + +/// `SizeHint` is the return type of `Iterator::size_hint()`. +pub type SizeHint = (usize, Option); + +/// Add `SizeHint` correctly. +#[inline] +pub fn add(a: SizeHint, b: SizeHint) -> SizeHint { + let min = a.0.saturating_add(b.0); + let max = match (a.1, b.1) { + (Some(x), Some(y)) => x.checked_add(y), + _ => None, + }; + + (min, max) +} + +/// Add `x` correctly to a `SizeHint`. +#[inline] +pub fn add_scalar(sh: SizeHint, x: usize) -> SizeHint { + let (mut low, mut hi) = sh; + low = low.saturating_add(x); + hi = hi.and_then(|elt| elt.checked_add(x)); + (low, hi) +} + +/// Subtract `x` correctly from a `SizeHint`. +#[inline] +pub fn sub_scalar(sh: SizeHint, x: usize) -> SizeHint { + let (mut low, mut hi) = sh; + low = low.saturating_sub(x); + hi = hi.map(|elt| elt.saturating_sub(x)); + (low, hi) +} + +/// Multiply `SizeHint` correctly +#[inline] +pub fn mul(a: SizeHint, b: SizeHint) -> SizeHint { + let low = a.0.saturating_mul(b.0); + let hi = match (a.1, b.1) { + (Some(x), Some(y)) => x.checked_mul(y), + (Some(0), None) | (None, Some(0)) => Some(0), + _ => None, + }; + (low, hi) +} + +/// Multiply `x` correctly with a `SizeHint`. +#[inline] +pub fn mul_scalar(sh: SizeHint, x: usize) -> SizeHint { + let (mut low, mut hi) = sh; + low = low.saturating_mul(x); + hi = hi.and_then(|elt| elt.checked_mul(x)); + (low, hi) +} + +/// Return the maximum +#[inline] +pub fn max(a: SizeHint, b: SizeHint) -> SizeHint { + let (a_lower, a_upper) = a; + let (b_lower, b_upper) = b; + + let lower = cmp::max(a_lower, b_lower); + + let upper = match (a_upper, b_upper) { + (Some(x), Some(y)) => Some(cmp::max(x, y)), + _ => None, + }; + + (lower, upper) +} + +/// Return the minimum +#[inline] +pub fn min(a: SizeHint, b: SizeHint) -> SizeHint { + let (a_lower, a_upper) = a; + let (b_lower, b_upper) = b; + let lower = cmp::min(a_lower, b_lower); + let upper = match (a_upper, b_upper) { + (Some(u1), Some(u2)) => Some(cmp::min(u1, u2)), + _ => a_upper.or(b_upper), + }; + (lower, upper) +} + +#[test] +fn mul_size_hints() { + assert_eq!(mul((3, Some(4)), (3, Some(4))), (9, Some(16))); + assert_eq!(mul((3, Some(4)), (usize::MAX, None)), (usize::MAX, None)); + assert_eq!(mul((3, None), (0, Some(0))), (0, Some(0))); +} diff --git a/vendor/itertools/src/sources.rs b/vendor/itertools/src/sources.rs new file mode 100644 index 00000000000000..c405ffdc7196e9 --- /dev/null +++ b/vendor/itertools/src/sources.rs @@ -0,0 +1,153 @@ +//! Iterators that are sources (produce elements from parameters, +//! not from another iterator). +#![allow(deprecated)] + +use std::fmt; +use std::mem; + +/// Creates a new unfold source with the specified closure as the "iterator +/// function" and an initial state to eventually pass to the closure +/// +/// `unfold` is a general iterator builder: it has a mutable state value, +/// and a closure with access to the state that produces the next value. +/// +/// This more or less equivalent to a regular struct with an [`Iterator`] +/// implementation, and is useful for one-off iterators. +/// +/// ``` +/// // an iterator that yields sequential Fibonacci numbers, +/// // and stops at the maximum representable value. +/// +/// use itertools::unfold; +/// +/// let mut fibonacci = unfold((1u32, 1u32), |(x1, x2)| { +/// // Attempt to get the next Fibonacci number +/// let next = x1.saturating_add(*x2); +/// +/// // Shift left: ret <- x1 <- x2 <- next +/// let ret = *x1; +/// *x1 = *x2; +/// *x2 = next; +/// +/// // If addition has saturated at the maximum, we are finished +/// if ret == *x1 && ret > 1 { +/// None +/// } else { +/// Some(ret) +/// } +/// }); +/// +/// itertools::assert_equal(fibonacci.by_ref().take(8), +/// vec![1, 1, 2, 3, 5, 8, 13, 21]); +/// assert_eq!(fibonacci.last(), Some(2_971_215_073)) +/// ``` +#[deprecated( + note = "Use [std::iter::from_fn](https://doc.rust-lang.org/std/iter/fn.from_fn.html) instead", + since = "0.13.0" +)] +pub fn unfold(initial_state: St, f: F) -> Unfold +where + F: FnMut(&mut St) -> Option, +{ + Unfold { + f, + state: initial_state, + } +} + +impl fmt::Debug for Unfold +where + St: fmt::Debug, +{ + debug_fmt_fields!(Unfold, state); +} + +/// See [`unfold`](crate::unfold) for more information. +#[derive(Clone)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[deprecated( + note = "Use [std::iter::FromFn](https://doc.rust-lang.org/std/iter/struct.FromFn.html) instead", + since = "0.13.0" +)] +pub struct Unfold { + f: F, + /// Internal state that will be passed to the closure on the next iteration + pub state: St, +} + +impl Iterator for Unfold +where + F: FnMut(&mut St) -> Option, +{ + type Item = A; + + #[inline] + fn next(&mut self) -> Option { + (self.f)(&mut self.state) + } +} + +/// An iterator that infinitely applies function to value and yields results. +/// +/// This `struct` is created by the [`iterate()`](crate::iterate) function. +/// See its documentation for more. +#[derive(Clone)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +pub struct Iterate { + state: St, + f: F, +} + +impl fmt::Debug for Iterate +where + St: fmt::Debug, +{ + debug_fmt_fields!(Iterate, state); +} + +impl Iterator for Iterate +where + F: FnMut(&St) -> St, +{ + type Item = St; + + #[inline] + fn next(&mut self) -> Option { + let next_state = (self.f)(&self.state); + Some(mem::replace(&mut self.state, next_state)) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + (usize::MAX, None) + } +} + +/// Creates a new iterator that infinitely applies function to value and yields results. +/// +/// ``` +/// use itertools::iterate; +/// +/// itertools::assert_equal(iterate(1, |i| i % 3 + 1).take(5), vec![1, 2, 3, 1, 2]); +/// ``` +/// +/// **Panics** if compute the next value does. +/// +/// ```should_panic +/// # use itertools::iterate; +/// let mut it = iterate(25u32, |x| x - 10).take_while(|&x| x > 10); +/// assert_eq!(it.next(), Some(25)); // `Iterate` holds 15. +/// assert_eq!(it.next(), Some(15)); // `Iterate` holds 5. +/// it.next(); // `5 - 10` overflows. +/// ``` +/// +/// You can alternatively use [`core::iter::successors`] as it better describes a finite iterator. +pub fn iterate(initial_value: St, f: F) -> Iterate +where + F: FnMut(&St) -> St, +{ + Iterate { + state: initial_value, + f, + } +} diff --git a/vendor/itertools/src/take_while_inclusive.rs b/vendor/itertools/src/take_while_inclusive.rs new file mode 100644 index 00000000000000..420da9847af977 --- /dev/null +++ b/vendor/itertools/src/take_while_inclusive.rs @@ -0,0 +1,96 @@ +use core::iter::FusedIterator; +use std::fmt; + +/// An iterator adaptor that consumes elements while the given predicate is +/// `true`, including the element for which the predicate first returned +/// `false`. +/// +/// See [`.take_while_inclusive()`](crate::Itertools::take_while_inclusive) +/// for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[derive(Clone)] +pub struct TakeWhileInclusive { + iter: I, + predicate: F, + done: bool, +} + +impl TakeWhileInclusive +where + I: Iterator, + F: FnMut(&I::Item) -> bool, +{ + /// Create a new [`TakeWhileInclusive`] from an iterator and a predicate. + pub(crate) fn new(iter: I, predicate: F) -> Self { + Self { + iter, + predicate, + done: false, + } + } +} + +impl fmt::Debug for TakeWhileInclusive +where + I: Iterator + fmt::Debug, +{ + debug_fmt_fields!(TakeWhileInclusive, iter, done); +} + +impl Iterator for TakeWhileInclusive +where + I: Iterator, + F: FnMut(&I::Item) -> bool, +{ + type Item = I::Item; + + fn next(&mut self) -> Option { + if self.done { + None + } else { + self.iter.next().map(|item| { + if !(self.predicate)(&item) { + self.done = true; + } + item + }) + } + } + + fn size_hint(&self) -> (usize, Option) { + if self.done { + (0, Some(0)) + } else { + (0, self.iter.size_hint().1) + } + } + + fn fold(mut self, init: B, mut f: Fold) -> B + where + Fold: FnMut(B, Self::Item) -> B, + { + if self.done { + init + } else { + let predicate = &mut self.predicate; + self.iter + .try_fold(init, |mut acc, item| { + let is_ok = predicate(&item); + acc = f(acc, item); + if is_ok { + Ok(acc) + } else { + Err(acc) + } + }) + .unwrap_or_else(|err| err) + } + } +} + +impl FusedIterator for TakeWhileInclusive +where + I: Iterator, + F: FnMut(&I::Item) -> bool, +{ +} diff --git a/vendor/itertools/src/tee.rs b/vendor/itertools/src/tee.rs new file mode 100644 index 00000000000000..0984c5de963971 --- /dev/null +++ b/vendor/itertools/src/tee.rs @@ -0,0 +1,93 @@ +use super::size_hint; + +use alloc::collections::VecDeque; +use alloc::rc::Rc; +use std::cell::RefCell; + +/// Common buffer object for the two tee halves +#[derive(Debug)] +struct TeeBuffer { + backlog: VecDeque, + iter: I, + /// The owner field indicates which id should read from the backlog + owner: bool, +} + +/// One half of an iterator pair where both return the same elements. +/// +/// See [`.tee()`](crate::Itertools::tee) for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[derive(Debug)] +pub struct Tee +where + I: Iterator, +{ + rcbuffer: Rc>>, + id: bool, +} + +pub fn new(iter: I) -> (Tee, Tee) +where + I: Iterator, +{ + let buffer = TeeBuffer { + backlog: VecDeque::new(), + iter, + owner: false, + }; + let t1 = Tee { + rcbuffer: Rc::new(RefCell::new(buffer)), + id: true, + }; + let t2 = Tee { + rcbuffer: t1.rcbuffer.clone(), + id: false, + }; + (t1, t2) +} + +impl Iterator for Tee +where + I: Iterator, + I::Item: Clone, +{ + type Item = I::Item; + fn next(&mut self) -> Option { + // .borrow_mut may fail here -- but only if the user has tied some kind of weird + // knot where the iterator refers back to itself. + let mut buffer = self.rcbuffer.borrow_mut(); + if buffer.owner == self.id { + match buffer.backlog.pop_front() { + None => {} + some_elt => return some_elt, + } + } + match buffer.iter.next() { + None => None, + Some(elt) => { + buffer.backlog.push_back(elt.clone()); + buffer.owner = !self.id; + Some(elt) + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let buffer = self.rcbuffer.borrow(); + let sh = buffer.iter.size_hint(); + + if buffer.owner == self.id { + let log_len = buffer.backlog.len(); + size_hint::add_scalar(sh, log_len) + } else { + sh + } + } +} + +impl ExactSizeIterator for Tee +where + I: ExactSizeIterator, + I::Item: Clone, +{ +} diff --git a/vendor/itertools/src/tuple_impl.rs b/vendor/itertools/src/tuple_impl.rs new file mode 100644 index 00000000000000..c0d556fc95b1f6 --- /dev/null +++ b/vendor/itertools/src/tuple_impl.rs @@ -0,0 +1,401 @@ +//! Some iterator that produces tuples + +use std::iter::Cycle; +use std::iter::Fuse; +use std::iter::FusedIterator; + +use crate::size_hint; + +// `HomogeneousTuple` is a public facade for `TupleCollect`, allowing +// tuple-related methods to be used by clients in generic contexts, while +// hiding the implementation details of `TupleCollect`. +// See https://github.com/rust-itertools/itertools/issues/387 + +/// Implemented for homogeneous tuples of size up to 12. +pub trait HomogeneousTuple: TupleCollect {} + +impl HomogeneousTuple for T {} + +/// An iterator over a incomplete tuple. +/// +/// See [`.tuples()`](crate::Itertools::tuples) and +/// [`Tuples::into_buffer()`]. +#[derive(Clone, Debug)] +pub struct TupleBuffer +where + T: HomogeneousTuple, +{ + cur: usize, + buf: T::Buffer, +} + +impl TupleBuffer +where + T: HomogeneousTuple, +{ + fn new(buf: T::Buffer) -> Self { + Self { cur: 0, buf } + } +} + +impl Iterator for TupleBuffer +where + T: HomogeneousTuple, +{ + type Item = T::Item; + + fn next(&mut self) -> Option { + let s = self.buf.as_mut(); + if let Some(ref mut item) = s.get_mut(self.cur) { + self.cur += 1; + item.take() + } else { + None + } + } + + fn size_hint(&self) -> (usize, Option) { + let buffer = &self.buf.as_ref()[self.cur..]; + let len = if buffer.is_empty() { + 0 + } else { + buffer + .iter() + .position(|x| x.is_none()) + .unwrap_or(buffer.len()) + }; + (len, Some(len)) + } +} + +impl ExactSizeIterator for TupleBuffer where T: HomogeneousTuple {} + +/// An iterator that groups the items in tuples of a specific size. +/// +/// See [`.tuples()`](crate::Itertools::tuples) for more information. +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct Tuples +where + I: Iterator, + T: HomogeneousTuple, +{ + iter: Fuse, + buf: T::Buffer, +} + +/// Create a new tuples iterator. +pub fn tuples(iter: I) -> Tuples +where + I: Iterator, + T: HomogeneousTuple, +{ + Tuples { + iter: iter.fuse(), + buf: Default::default(), + } +} + +impl Iterator for Tuples +where + I: Iterator, + T: HomogeneousTuple, +{ + type Item = T; + + fn next(&mut self) -> Option { + T::collect_from_iter(&mut self.iter, &mut self.buf) + } + + fn size_hint(&self) -> (usize, Option) { + // The number of elts we've drawn from the underlying iterator, but have + // not yet produced as a tuple. + let buffered = T::buffer_len(&self.buf); + // To that, we must add the size estimates of the underlying iterator. + let (unbuffered_lo, unbuffered_hi) = self.iter.size_hint(); + // The total low estimate is the sum of the already-buffered elements, + // plus the low estimate of remaining unbuffered elements, divided by + // the tuple size. + let total_lo = add_then_div(unbuffered_lo, buffered, T::num_items()).unwrap_or(usize::MAX); + // And likewise for the total high estimate, but using the high estimate + // of the remaining unbuffered elements. + let total_hi = unbuffered_hi.and_then(|hi| add_then_div(hi, buffered, T::num_items())); + (total_lo, total_hi) + } +} + +/// `(n + a) / d` avoiding overflow when possible, returns `None` if it overflows. +fn add_then_div(n: usize, a: usize, d: usize) -> Option { + debug_assert_ne!(d, 0); + (n / d).checked_add(a / d)?.checked_add((n % d + a % d) / d) +} + +impl ExactSizeIterator for Tuples +where + I: ExactSizeIterator, + T: HomogeneousTuple, +{ +} + +impl Tuples +where + I: Iterator, + T: HomogeneousTuple, +{ + /// Return a buffer with the produced items that was not enough to be grouped in a tuple. + /// + /// ``` + /// use itertools::Itertools; + /// + /// let mut iter = (0..5).tuples(); + /// assert_eq!(Some((0, 1, 2)), iter.next()); + /// assert_eq!(None, iter.next()); + /// itertools::assert_equal(vec![3, 4], iter.into_buffer()); + /// ``` + pub fn into_buffer(self) -> TupleBuffer { + TupleBuffer::new(self.buf) + } +} + +/// An iterator over all contiguous windows that produces tuples of a specific size. +/// +/// See [`.tuple_windows()`](crate::Itertools::tuple_windows) for more +/// information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[derive(Clone, Debug)] +pub struct TupleWindows +where + I: Iterator, + T: HomogeneousTuple, +{ + iter: I, + last: Option, +} + +/// Create a new tuple windows iterator. +pub fn tuple_windows(iter: I) -> TupleWindows +where + I: Iterator, + T: HomogeneousTuple, + T::Item: Clone, +{ + TupleWindows { last: None, iter } +} + +impl Iterator for TupleWindows +where + I: Iterator, + T: HomogeneousTuple + Clone, + T::Item: Clone, +{ + type Item = T; + + fn next(&mut self) -> Option { + if T::num_items() == 1 { + return T::collect_from_iter_no_buf(&mut self.iter); + } + if let Some(new) = self.iter.next() { + if let Some(ref mut last) = self.last { + last.left_shift_push(new); + Some(last.clone()) + } else { + use std::iter::once; + let iter = once(new).chain(&mut self.iter); + self.last = T::collect_from_iter_no_buf(iter); + self.last.clone() + } + } else { + None + } + } + + fn size_hint(&self) -> (usize, Option) { + let mut sh = self.iter.size_hint(); + // Adjust the size hint at the beginning + // OR when `num_items == 1` (but it does not change the size hint). + if self.last.is_none() { + sh = size_hint::sub_scalar(sh, T::num_items() - 1); + } + sh + } +} + +impl ExactSizeIterator for TupleWindows +where + I: ExactSizeIterator, + T: HomogeneousTuple + Clone, + T::Item: Clone, +{ +} + +impl FusedIterator for TupleWindows +where + I: FusedIterator, + T: HomogeneousTuple + Clone, + T::Item: Clone, +{ +} + +/// An iterator over all windows, wrapping back to the first elements when the +/// window would otherwise exceed the length of the iterator, producing tuples +/// of a specific size. +/// +/// See [`.circular_tuple_windows()`](crate::Itertools::circular_tuple_windows) for more +/// information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[derive(Debug, Clone)] +pub struct CircularTupleWindows +where + I: Iterator + Clone, + T: TupleCollect + Clone, +{ + iter: TupleWindows, T>, + len: usize, +} + +pub fn circular_tuple_windows(iter: I) -> CircularTupleWindows +where + I: Iterator + Clone + ExactSizeIterator, + T: TupleCollect + Clone, + T::Item: Clone, +{ + let len = iter.len(); + let iter = tuple_windows(iter.cycle()); + + CircularTupleWindows { iter, len } +} + +impl Iterator for CircularTupleWindows +where + I: Iterator + Clone, + T: TupleCollect + Clone, + T::Item: Clone, +{ + type Item = T; + + fn next(&mut self) -> Option { + if self.len != 0 { + self.len -= 1; + self.iter.next() + } else { + None + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.len, Some(self.len)) + } +} + +impl ExactSizeIterator for CircularTupleWindows +where + I: Iterator + Clone, + T: TupleCollect + Clone, + T::Item: Clone, +{ +} + +impl FusedIterator for CircularTupleWindows +where + I: Iterator + Clone, + T: TupleCollect + Clone, + T::Item: Clone, +{ +} + +pub trait TupleCollect: Sized { + type Item; + type Buffer: Default + AsRef<[Option]> + AsMut<[Option]>; + + fn buffer_len(buf: &Self::Buffer) -> usize { + let s = buf.as_ref(); + s.iter().position(Option::is_none).unwrap_or(s.len()) + } + + fn collect_from_iter(iter: I, buf: &mut Self::Buffer) -> Option + where + I: IntoIterator; + + fn collect_from_iter_no_buf(iter: I) -> Option + where + I: IntoIterator; + + fn num_items() -> usize; + + fn left_shift_push(&mut self, item: Self::Item); +} + +macro_rules! rev_for_each_ident{ + ($m:ident, ) => {}; + ($m:ident, $i0:ident, $($i:ident,)*) => { + rev_for_each_ident!($m, $($i,)*); + $m!($i0); + }; +} + +macro_rules! impl_tuple_collect { + ($dummy:ident,) => {}; // stop + ($dummy:ident, $($Y:ident,)*) => ( + impl_tuple_collect!($($Y,)*); + impl TupleCollect for ($(ignore_ident!($Y, A),)*) { + type Item = A; + type Buffer = [Option; count_ident!($($Y)*) - 1]; + + #[allow(unused_assignments, unused_mut)] + fn collect_from_iter(iter: I, buf: &mut Self::Buffer) -> Option + where I: IntoIterator + { + let mut iter = iter.into_iter(); + $( + let mut $Y = None; + )* + + loop { + $( + $Y = iter.next(); + if $Y.is_none() { + break + } + )* + return Some(($($Y.unwrap()),*,)) + } + + let mut i = 0; + let mut s = buf.as_mut(); + $( + if i < s.len() { + s[i] = $Y; + i += 1; + } + )* + return None; + } + + fn collect_from_iter_no_buf(iter: I) -> Option + where I: IntoIterator + { + let mut iter = iter.into_iter(); + + Some(($( + { let $Y = iter.next()?; $Y }, + )*)) + } + + fn num_items() -> usize { + count_ident!($($Y)*) + } + + fn left_shift_push(&mut self, mut item: A) { + use std::mem::replace; + + let &mut ($(ref mut $Y),*,) = self; + macro_rules! replace_item{($i:ident) => { + item = replace($i, item); + }} + rev_for_each_ident!(replace_item, $($Y,)*); + drop(item); + } + } + ) +} +impl_tuple_collect!(dummy, a, b, c, d, e, f, g, h, i, j, k, l,); diff --git a/vendor/itertools/src/unique_impl.rs b/vendor/itertools/src/unique_impl.rs new file mode 100644 index 00000000000000..0f6397e48fb9de --- /dev/null +++ b/vendor/itertools/src/unique_impl.rs @@ -0,0 +1,188 @@ +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::fmt; +use std::hash::Hash; +use std::iter::FusedIterator; + +/// An iterator adapter to filter out duplicate elements. +/// +/// See [`.unique_by()`](crate::Itertools::unique) for more information. +#[derive(Clone)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct UniqueBy { + iter: I, + // Use a Hashmap for the Entry API in order to prevent hashing twice. + // This can maybe be replaced with a HashSet once `get_or_insert_with` + // or a proper Entry API for Hashset is stable and meets this msrv + used: HashMap, + f: F, +} + +impl fmt::Debug for UniqueBy +where + I: Iterator + fmt::Debug, + V: fmt::Debug + Hash + Eq, +{ + debug_fmt_fields!(UniqueBy, iter, used); +} + +/// Create a new `UniqueBy` iterator. +pub fn unique_by(iter: I, f: F) -> UniqueBy +where + V: Eq + Hash, + F: FnMut(&I::Item) -> V, + I: Iterator, +{ + UniqueBy { + iter, + used: HashMap::new(), + f, + } +} + +// count the number of new unique keys in iterable (`used` is the set already seen) +fn count_new_keys(mut used: HashMap, iterable: I) -> usize +where + I: IntoIterator, + K: Hash + Eq, +{ + let iter = iterable.into_iter(); + let current_used = used.len(); + used.extend(iter.map(|key| (key, ()))); + used.len() - current_used +} + +impl Iterator for UniqueBy +where + I: Iterator, + V: Eq + Hash, + F: FnMut(&I::Item) -> V, +{ + type Item = I::Item; + + fn next(&mut self) -> Option { + let Self { iter, used, f } = self; + iter.find(|v| used.insert(f(v), ()).is_none()) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (low, hi) = self.iter.size_hint(); + ((low > 0 && self.used.is_empty()) as usize, hi) + } + + fn count(self) -> usize { + let mut key_f = self.f; + count_new_keys(self.used, self.iter.map(move |elt| key_f(&elt))) + } +} + +impl DoubleEndedIterator for UniqueBy +where + I: DoubleEndedIterator, + V: Eq + Hash, + F: FnMut(&I::Item) -> V, +{ + fn next_back(&mut self) -> Option { + let Self { iter, used, f } = self; + iter.rfind(|v| used.insert(f(v), ()).is_none()) + } +} + +impl FusedIterator for UniqueBy +where + I: FusedIterator, + V: Eq + Hash, + F: FnMut(&I::Item) -> V, +{ +} + +impl Iterator for Unique +where + I: Iterator, + I::Item: Eq + Hash + Clone, +{ + type Item = I::Item; + + fn next(&mut self) -> Option { + let UniqueBy { iter, used, .. } = &mut self.iter; + iter.find_map(|v| { + if let Entry::Vacant(entry) = used.entry(v) { + let elt = entry.key().clone(); + entry.insert(()); + return Some(elt); + } + None + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (low, hi) = self.iter.iter.size_hint(); + ((low > 0 && self.iter.used.is_empty()) as usize, hi) + } + + fn count(self) -> usize { + count_new_keys(self.iter.used, self.iter.iter) + } +} + +impl DoubleEndedIterator for Unique +where + I: DoubleEndedIterator, + I::Item: Eq + Hash + Clone, +{ + fn next_back(&mut self) -> Option { + let UniqueBy { iter, used, .. } = &mut self.iter; + iter.rev().find_map(|v| { + if let Entry::Vacant(entry) = used.entry(v) { + let elt = entry.key().clone(); + entry.insert(()); + return Some(elt); + } + None + }) + } +} + +impl FusedIterator for Unique +where + I: FusedIterator, + I::Item: Eq + Hash + Clone, +{ +} + +/// An iterator adapter to filter out duplicate elements. +/// +/// See [`.unique()`](crate::Itertools::unique) for more information. +#[derive(Clone)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct Unique +where + I: Iterator, + I::Item: Eq + Hash + Clone, +{ + iter: UniqueBy, +} + +impl fmt::Debug for Unique +where + I: Iterator + fmt::Debug, + I::Item: Hash + Eq + fmt::Debug + Clone, +{ + debug_fmt_fields!(Unique, iter); +} + +pub fn unique(iter: I) -> Unique +where + I: Iterator, + I::Item: Eq + Hash + Clone, +{ + Unique { + iter: UniqueBy { + iter, + used: HashMap::new(), + f: (), + }, + } +} diff --git a/vendor/itertools/src/unziptuple.rs b/vendor/itertools/src/unziptuple.rs new file mode 100644 index 00000000000000..2c79c2d842cbc1 --- /dev/null +++ b/vendor/itertools/src/unziptuple.rs @@ -0,0 +1,80 @@ +/// Converts an iterator of tuples into a tuple of containers. +/// +/// `multiunzip()` consumes an entire iterator of n-ary tuples, producing `n` collections, one for each +/// column. +/// +/// This function is, in some sense, the opposite of [`multizip`]. +/// +/// ``` +/// use itertools::multiunzip; +/// +/// let inputs = vec![(1, 2, 3), (4, 5, 6), (7, 8, 9)]; +/// +/// let (a, b, c): (Vec<_>, Vec<_>, Vec<_>) = multiunzip(inputs); +/// +/// assert_eq!(a, vec![1, 4, 7]); +/// assert_eq!(b, vec![2, 5, 8]); +/// assert_eq!(c, vec![3, 6, 9]); +/// ``` +/// +/// [`multizip`]: crate::multizip +pub fn multiunzip(i: I) -> FromI +where + I: IntoIterator, + I::IntoIter: MultiUnzip, +{ + i.into_iter().multiunzip() +} + +/// An iterator that can be unzipped into multiple collections. +/// +/// See [`.multiunzip()`](crate::Itertools::multiunzip) for more information. +pub trait MultiUnzip: Iterator { + /// Unzip this iterator into multiple collections. + fn multiunzip(self) -> FromI; +} + +macro_rules! impl_unzip_iter { + ($($T:ident => $FromT:ident),*) => ( + #[allow(non_snake_case)] + impl, $($T, $FromT: Default + Extend<$T>),* > MultiUnzip<($($FromT,)*)> for IT { + fn multiunzip(self) -> ($($FromT,)*) { + // This implementation mirrors the logic of Iterator::unzip resp. Extend for (A, B) as close as possible. + // Unfortunately a lot of the used api there is still unstable (https://github.com/rust-lang/rust/issues/72631). + // + // Iterator::unzip: https://doc.rust-lang.org/src/core/iter/traits/iterator.rs.html#2825-2865 + // Extend for (A, B): https://doc.rust-lang.org/src/core/iter/traits/collect.rs.html#370-411 + + let mut res = ($($FromT::default(),)*); + let ($($FromT,)*) = &mut res; + + // Still unstable #72631 + // let (lower_bound, _) = self.size_hint(); + // if lower_bound > 0 { + // $($FromT.extend_reserve(lower_bound);)* + // } + + self.fold((), |(), ($($T,)*)| { + // Still unstable #72631 + // $( $FromT.extend_one($T); )* + $( $FromT.extend(std::iter::once($T)); )* + }); + res + } + } + ); +} + +impl_unzip_iter!(); +impl_unzip_iter!(A => FromA); +impl_unzip_iter!(A => FromA, B => FromB); +impl_unzip_iter!(A => FromA, B => FromB, C => FromC); +impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD); +impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE); +impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE, F => FromF); +impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE, F => FromF, G => FromG); +impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE, F => FromF, G => FromG, H => FromH); +impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE, F => FromF, G => FromG, H => FromH, I => FromI); +impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE, F => FromF, G => FromG, H => FromH, I => FromI, J => FromJ); +impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE, F => FromF, G => FromG, H => FromH, I => FromI, J => FromJ, K => FromK); +impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE, F => FromF, G => FromG, H => FromH, I => FromI, J => FromJ, K => FromK, L => FromL); diff --git a/vendor/itertools/src/with_position.rs b/vendor/itertools/src/with_position.rs new file mode 100644 index 00000000000000..2d56bb9b224710 --- /dev/null +++ b/vendor/itertools/src/with_position.rs @@ -0,0 +1,124 @@ +use std::fmt; +use std::iter::{Fuse, FusedIterator, Peekable}; + +/// An iterator adaptor that wraps each element in an [`Position`]. +/// +/// Iterator element type is `(Position, I::Item)`. +/// +/// See [`.with_position()`](crate::Itertools::with_position) for more information. +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct WithPosition +where + I: Iterator, +{ + handled_first: bool, + peekable: Peekable>, +} + +impl fmt::Debug for WithPosition +where + I: Iterator, + Peekable>: fmt::Debug, +{ + debug_fmt_fields!(WithPosition, handled_first, peekable); +} + +impl Clone for WithPosition +where + I: Clone + Iterator, + I::Item: Clone, +{ + clone_fields!(handled_first, peekable); +} + +/// Create a new `WithPosition` iterator. +pub fn with_position(iter: I) -> WithPosition +where + I: Iterator, +{ + WithPosition { + handled_first: false, + peekable: iter.fuse().peekable(), + } +} + +/// The first component of the value yielded by `WithPosition`. +/// Indicates the position of this element in the iterator results. +/// +/// See [`.with_position()`](crate::Itertools::with_position) for more information. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum Position { + /// This is the first element. + First, + /// This is neither the first nor the last element. + Middle, + /// This is the last element. + Last, + /// This is the only element. + Only, +} + +impl Iterator for WithPosition { + type Item = (Position, I::Item); + + fn next(&mut self) -> Option { + match self.peekable.next() { + Some(item) => { + if !self.handled_first { + // Haven't seen the first item yet, and there is one to give. + self.handled_first = true; + // Peek to see if this is also the last item, + // in which case tag it as `Only`. + match self.peekable.peek() { + Some(_) => Some((Position::First, item)), + None => Some((Position::Only, item)), + } + } else { + // Have seen the first item, and there's something left. + // Peek to see if this is the last item. + match self.peekable.peek() { + Some(_) => Some((Position::Middle, item)), + None => Some((Position::Last, item)), + } + } + } + // Iterator is finished. + None => None, + } + } + + fn size_hint(&self) -> (usize, Option) { + self.peekable.size_hint() + } + + fn fold(mut self, mut init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + if let Some(mut head) = self.peekable.next() { + if !self.handled_first { + // The current head is `First` or `Only`, + // it depends if there is another item or not. + match self.peekable.next() { + Some(second) => { + let first = std::mem::replace(&mut head, second); + init = f(init, (Position::First, first)); + } + None => return f(init, (Position::Only, head)), + } + } + // Have seen the first item, and there's something left. + init = self.peekable.fold(init, |acc, mut item| { + std::mem::swap(&mut head, &mut item); + f(acc, (Position::Middle, item)) + }); + // The "head" is now the last item. + init = f(init, (Position::Last, head)); + } + init + } +} + +impl ExactSizeIterator for WithPosition where I: ExactSizeIterator {} + +impl FusedIterator for WithPosition {} diff --git a/vendor/itertools/src/zip_eq_impl.rs b/vendor/itertools/src/zip_eq_impl.rs new file mode 100644 index 00000000000000..6d3b68296656ee --- /dev/null +++ b/vendor/itertools/src/zip_eq_impl.rs @@ -0,0 +1,64 @@ +use super::size_hint; + +/// An iterator which iterates two other iterators simultaneously +/// and panic if they have different lengths. +/// +/// See [`.zip_eq()`](crate::Itertools::zip_eq) for more information. +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct ZipEq { + a: I, + b: J, +} + +/// Zips two iterators but **panics** if they are not of the same length. +/// +/// [`IntoIterator`] enabled version of [`Itertools::zip_eq`](crate::Itertools::zip_eq). +/// +/// ``` +/// use itertools::zip_eq; +/// +/// let data = [1, 2, 3, 4, 5]; +/// for (a, b) in zip_eq(&data[..data.len() - 1], &data[1..]) { +/// /* loop body */ +/// } +/// ``` +pub fn zip_eq(i: I, j: J) -> ZipEq +where + I: IntoIterator, + J: IntoIterator, +{ + ZipEq { + a: i.into_iter(), + b: j.into_iter(), + } +} + +impl Iterator for ZipEq +where + I: Iterator, + J: Iterator, +{ + type Item = (I::Item, J::Item); + + fn next(&mut self) -> Option { + match (self.a.next(), self.b.next()) { + (None, None) => None, + (Some(a), Some(b)) => Some((a, b)), + (None, Some(_)) | (Some(_), None) => { + panic!("itertools: .zip_eq() reached end of one iterator before the other") + } + } + } + + fn size_hint(&self) -> (usize, Option) { + size_hint::min(self.a.size_hint(), self.b.size_hint()) + } +} + +impl ExactSizeIterator for ZipEq +where + I: ExactSizeIterator, + J: ExactSizeIterator, +{ +} diff --git a/vendor/itertools/src/zip_longest.rs b/vendor/itertools/src/zip_longest.rs new file mode 100644 index 00000000000000..d4eb9a882e3a31 --- /dev/null +++ b/vendor/itertools/src/zip_longest.rs @@ -0,0 +1,139 @@ +use super::size_hint; +use std::cmp::Ordering::{Equal, Greater, Less}; +use std::iter::{Fuse, FusedIterator}; + +use crate::either_or_both::EitherOrBoth; + +// ZipLongest originally written by SimonSapin, +// and dedicated to itertools https://github.com/rust-lang/rust/pull/19283 + +/// An iterator which iterates two other iterators simultaneously +/// and wraps the elements in [`EitherOrBoth`]. +/// +/// This iterator is *fused*. +/// +/// See [`.zip_longest()`](crate::Itertools::zip_longest) for more information. +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct ZipLongest { + a: Fuse, + b: Fuse, +} + +/// Create a new `ZipLongest` iterator. +pub fn zip_longest(a: T, b: U) -> ZipLongest +where + T: Iterator, + U: Iterator, +{ + ZipLongest { + a: a.fuse(), + b: b.fuse(), + } +} + +impl Iterator for ZipLongest +where + T: Iterator, + U: Iterator, +{ + type Item = EitherOrBoth; + + #[inline] + fn next(&mut self) -> Option { + match (self.a.next(), self.b.next()) { + (None, None) => None, + (Some(a), None) => Some(EitherOrBoth::Left(a)), + (None, Some(b)) => Some(EitherOrBoth::Right(b)), + (Some(a), Some(b)) => Some(EitherOrBoth::Both(a, b)), + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + size_hint::max(self.a.size_hint(), self.b.size_hint()) + } + + #[inline] + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + let Self { mut a, mut b } = self; + let res = a.try_fold(init, |init, a| match b.next() { + Some(b) => Ok(f(init, EitherOrBoth::Both(a, b))), + None => Err(f(init, EitherOrBoth::Left(a))), + }); + match res { + Ok(acc) => b.map(EitherOrBoth::Right).fold(acc, f), + Err(acc) => a.map(EitherOrBoth::Left).fold(acc, f), + } + } +} + +impl DoubleEndedIterator for ZipLongest +where + T: DoubleEndedIterator + ExactSizeIterator, + U: DoubleEndedIterator + ExactSizeIterator, +{ + #[inline] + fn next_back(&mut self) -> Option { + match self.a.len().cmp(&self.b.len()) { + Equal => match (self.a.next_back(), self.b.next_back()) { + (None, None) => None, + (Some(a), Some(b)) => Some(EitherOrBoth::Both(a, b)), + // These can only happen if .len() is inconsistent with .next_back() + (Some(a), None) => Some(EitherOrBoth::Left(a)), + (None, Some(b)) => Some(EitherOrBoth::Right(b)), + }, + Greater => self.a.next_back().map(EitherOrBoth::Left), + Less => self.b.next_back().map(EitherOrBoth::Right), + } + } + + fn rfold(self, mut init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + let Self { mut a, mut b } = self; + let a_len = a.len(); + let b_len = b.len(); + match a_len.cmp(&b_len) { + Equal => {} + Greater => { + init = a + .by_ref() + .rev() + .take(a_len - b_len) + .map(EitherOrBoth::Left) + .fold(init, &mut f) + } + Less => { + init = b + .by_ref() + .rev() + .take(b_len - a_len) + .map(EitherOrBoth::Right) + .fold(init, &mut f) + } + } + a.rfold(init, |acc, item_a| { + f(acc, EitherOrBoth::Both(item_a, b.next_back().unwrap())) + }) + } +} + +impl ExactSizeIterator for ZipLongest +where + T: ExactSizeIterator, + U: ExactSizeIterator, +{ +} + +impl FusedIterator for ZipLongest +where + T: Iterator, + U: Iterator, +{ +} diff --git a/vendor/itertools/src/ziptuple.rs b/vendor/itertools/src/ziptuple.rs new file mode 100644 index 00000000000000..3ada0296caac16 --- /dev/null +++ b/vendor/itertools/src/ziptuple.rs @@ -0,0 +1,137 @@ +use super::size_hint; + +/// See [`multizip`] for more information. +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +pub struct Zip { + t: T, +} + +/// An iterator that generalizes `.zip()` and allows running multiple iterators in lockstep. +/// +/// The iterator `Zip<(I, J, ..., M)>` is formed from a tuple of iterators (or values that +/// implement [`IntoIterator`]) and yields elements +/// until any of the subiterators yields `None`. +/// +/// The iterator element type is a tuple like like `(A, B, ..., E)` where `A` to `E` are the +/// element types of the subiterator. +/// +/// **Note:** The result of this function is a value of a named type (`Zip<(I, J, +/// ..)>` of each component iterator `I, J, ...`) if each component iterator is +/// nameable. +/// +/// Prefer [`izip!()`](crate::izip) over `multizip` for the performance benefits of using the +/// standard library `.zip()`. Prefer `multizip` if a nameable type is needed. +/// +/// ``` +/// use itertools::multizip; +/// +/// // iterate over three sequences side-by-side +/// let mut results = [0, 0, 0, 0]; +/// let inputs = [3, 7, 9, 6]; +/// +/// for (r, index, input) in multizip((&mut results, 0..10, &inputs)) { +/// *r = index * 10 + input; +/// } +/// +/// assert_eq!(results, [0 + 3, 10 + 7, 29, 36]); +/// ``` +pub fn multizip(t: U) -> Zip +where + Zip: From + Iterator, +{ + Zip::from(t) +} + +macro_rules! impl_zip_iter { + ($($B:ident),*) => ( + #[allow(non_snake_case)] + impl<$($B: IntoIterator),*> From<($($B,)*)> for Zip<($($B::IntoIter,)*)> { + fn from(t: ($($B,)*)) -> Self { + let ($($B,)*) = t; + Zip { t: ($($B.into_iter(),)*) } + } + } + + #[allow(non_snake_case)] + #[allow(unused_assignments)] + impl<$($B),*> Iterator for Zip<($($B,)*)> + where + $( + $B: Iterator, + )* + { + type Item = ($($B::Item,)*); + + fn next(&mut self) -> Option + { + let ($(ref mut $B,)*) = self.t; + + // NOTE: Just like iter::Zip, we check the iterators + // for None in order. We may finish unevenly (some + // iterators gave n + 1 elements, some only n). + $( + let $B = match $B.next() { + None => return None, + Some(elt) => elt + }; + )* + Some(($($B,)*)) + } + + fn size_hint(&self) -> (usize, Option) + { + let sh = (usize::MAX, None); + let ($(ref $B,)*) = self.t; + $( + let sh = size_hint::min($B.size_hint(), sh); + )* + sh + } + } + + #[allow(non_snake_case)] + impl<$($B),*> ExactSizeIterator for Zip<($($B,)*)> where + $( + $B: ExactSizeIterator, + )* + { } + + #[allow(non_snake_case)] + impl<$($B),*> DoubleEndedIterator for Zip<($($B,)*)> where + $( + $B: DoubleEndedIterator + ExactSizeIterator, + )* + { + #[inline] + fn next_back(&mut self) -> Option { + let ($(ref mut $B,)*) = self.t; + let size = *[$( $B.len(), )*].iter().min().unwrap(); + + $( + if $B.len() != size { + for _ in 0..$B.len() - size { $B.next_back(); } + } + )* + + match ($($B.next_back(),)*) { + ($(Some($B),)*) => Some(($($B,)*)), + _ => None, + } + } + } + ); +} + +impl_zip_iter!(A); +impl_zip_iter!(A, B); +impl_zip_iter!(A, B, C); +impl_zip_iter!(A, B, C, D); +impl_zip_iter!(A, B, C, D, E); +impl_zip_iter!(A, B, C, D, E, F); +impl_zip_iter!(A, B, C, D, E, F, G); +impl_zip_iter!(A, B, C, D, E, F, G, H); +impl_zip_iter!(A, B, C, D, E, F, G, H, I); +impl_zip_iter!(A, B, C, D, E, F, G, H, I, J); +impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K); +impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L); diff --git a/vendor/itertools/tests/adaptors_no_collect.rs b/vendor/itertools/tests/adaptors_no_collect.rs new file mode 100644 index 00000000000000..977224af29f70a --- /dev/null +++ b/vendor/itertools/tests/adaptors_no_collect.rs @@ -0,0 +1,51 @@ +use itertools::Itertools; + +struct PanickingCounter { + curr: usize, + max: usize, +} + +impl Iterator for PanickingCounter { + type Item = (); + + fn next(&mut self) -> Option { + self.curr += 1; + + assert_ne!( + self.curr, self.max, + "Input iterator reached maximum of {} suggesting collection by adaptor", + self.max + ); + + Some(()) + } +} + +fn no_collect_test(to_adaptor: T) +where + A: Iterator, + T: Fn(PanickingCounter) -> A, +{ + let counter = PanickingCounter { + curr: 0, + max: 10_000, + }; + let adaptor = to_adaptor(counter); + + for _ in adaptor.take(5) {} +} + +#[test] +fn permutations_no_collect() { + no_collect_test(|iter| iter.permutations(5)) +} + +#[test] +fn combinations_no_collect() { + no_collect_test(|iter| iter.combinations(5)) +} + +#[test] +fn combinations_with_replacement_no_collect() { + no_collect_test(|iter| iter.combinations_with_replacement(5)) +} diff --git a/vendor/itertools/tests/flatten_ok.rs b/vendor/itertools/tests/flatten_ok.rs new file mode 100644 index 00000000000000..bf835b5d70a173 --- /dev/null +++ b/vendor/itertools/tests/flatten_ok.rs @@ -0,0 +1,76 @@ +use itertools::{assert_equal, Itertools}; +use std::{ops::Range, vec::IntoIter}; + +fn mix_data() -> IntoIter, bool>> { + vec![Ok(0..2), Err(false), Ok(2..4), Err(true), Ok(4..6)].into_iter() +} + +fn ok_data() -> IntoIter, bool>> { + vec![Ok(0..2), Ok(2..4), Ok(4..6)].into_iter() +} + +#[test] +fn flatten_ok_mixed_expected_forward() { + assert_equal( + mix_data().flatten_ok(), + vec![ + Ok(0), + Ok(1), + Err(false), + Ok(2), + Ok(3), + Err(true), + Ok(4), + Ok(5), + ], + ); +} + +#[test] +fn flatten_ok_mixed_expected_reverse() { + assert_equal( + mix_data().flatten_ok().rev(), + vec![ + Ok(5), + Ok(4), + Err(true), + Ok(3), + Ok(2), + Err(false), + Ok(1), + Ok(0), + ], + ); +} + +#[test] +fn flatten_ok_collect_mixed_forward() { + assert_eq!( + mix_data().flatten_ok().collect::, _>>(), + Err(false) + ); +} + +#[test] +fn flatten_ok_collect_mixed_reverse() { + assert_eq!( + mix_data().flatten_ok().rev().collect::, _>>(), + Err(true) + ); +} + +#[test] +fn flatten_ok_collect_ok_forward() { + assert_eq!( + ok_data().flatten_ok().collect::, _>>(), + Ok((0..6).collect()) + ); +} + +#[test] +fn flatten_ok_collect_ok_reverse() { + assert_eq!( + ok_data().flatten_ok().rev().collect::, _>>(), + Ok((0..6).rev().collect()) + ); +} diff --git a/vendor/itertools/tests/laziness.rs b/vendor/itertools/tests/laziness.rs new file mode 100644 index 00000000000000..c559d33adc5dfe --- /dev/null +++ b/vendor/itertools/tests/laziness.rs @@ -0,0 +1,283 @@ +#![allow(unstable_name_collisions)] + +use itertools::Itertools; + +#[derive(Debug, Clone)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +struct Panicking; + +impl Iterator for Panicking { + type Item = u8; + + fn next(&mut self) -> Option { + panic!("iterator adaptor is not lazy") + } + + fn size_hint(&self) -> (usize, Option) { + (0, Some(0)) + } +} + +impl ExactSizeIterator for Panicking {} + +/// ## Usage example +/// ```compile_fail +/// must_use_tests! { +/// name { +/// Panicking.name(); // Add `let _ =` only if required (encountered error). +/// } +/// // ... +/// } +/// ``` +/// +/// **TODO:** test missing `must_use` attributes better, maybe with a new lint. +macro_rules! must_use_tests { + ($($(#[$attr:meta])* $name:ident $body:block)*) => { + $( + /// `#[deny(unused_must_use)]` should force us to ignore the resulting iterators + /// by adding `let _ = ...;` on every iterator. + /// If it does not, then a `must_use` attribute is missing on the associated struct. + /// + /// However, it's only helpful if we don't add `let _ =` before seeing if there is an error or not. + /// And it does not protect us against removed `must_use` attributes. + /// There is no simple way to test this yet. + #[deny(unused_must_use)] + #[test] + $(#[$attr])* + fn $name() $body + )* + }; +} + +must_use_tests! { + // Itertools trait: + interleave { + let _ = Panicking.interleave(Panicking); + } + interleave_shortest { + let _ = Panicking.interleave_shortest(Panicking); + } + intersperse { + let _ = Panicking.intersperse(0); + } + intersperse_with { + let _ = Panicking.intersperse_with(|| 0); + } + get { + let _ = Panicking.get(1..4); + let _ = Panicking.get(1..=4); + let _ = Panicking.get(1..); + let _ = Panicking.get(..4); + let _ = Panicking.get(..=4); + let _ = Panicking.get(..); + } + zip_longest { + let _ = Panicking.zip_longest(Panicking); + } + zip_eq { + let _ = Panicking.zip_eq(Panicking); + } + batching { + let _ = Panicking.batching(Iterator::next); + } + chunk_by { + // ChunkBy + let _ = Panicking.chunk_by(|x| *x); + // Groups + let _ = Panicking.chunk_by(|x| *x).into_iter(); + } + chunks { + // IntoChunks + let _ = Panicking.chunks(1); + let _ = Panicking.chunks(2); + // Chunks + let _ = Panicking.chunks(1).into_iter(); + let _ = Panicking.chunks(2).into_iter(); + } + tuple_windows { + let _ = Panicking.tuple_windows::<(_,)>(); + let _ = Panicking.tuple_windows::<(_, _)>(); + let _ = Panicking.tuple_windows::<(_, _, _)>(); + } + circular_tuple_windows { + let _ = Panicking.circular_tuple_windows::<(_,)>(); + let _ = Panicking.circular_tuple_windows::<(_, _)>(); + let _ = Panicking.circular_tuple_windows::<(_, _, _)>(); + } + tuples { + let _ = Panicking.tuples::<(_,)>(); + let _ = Panicking.tuples::<(_, _)>(); + let _ = Panicking.tuples::<(_, _, _)>(); + } + tee { + let _ = Panicking.tee(); + } + map_into { + let _ = Panicking.map_into::(); + } + map_ok { + let _ = Panicking.map(Ok::).map_ok(|x| x + 1); + } + filter_ok { + let _ = Panicking.map(Ok::).filter_ok(|x| x % 2 == 0); + } + filter_map_ok { + let _ = Panicking.map(Ok::).filter_map_ok(|x| { + if x % 2 == 0 { + Some(x + 1) + } else { + None + } + }); + } + flatten_ok { + let _ = Panicking.map(|x| Ok::<_, ()>([x])).flatten_ok(); + } + merge { + let _ = Panicking.merge(Panicking); + } + merge_by { + let _ = Panicking.merge_by(Panicking, |_, _| true); + } + merge_join_by { + let _ = Panicking.merge_join_by(Panicking, |_, _| true); + let _ = Panicking.merge_join_by(Panicking, Ord::cmp); + } + #[should_panic] + kmerge { + let _ = Panicking.map(|_| Panicking).kmerge(); + } + #[should_panic] + kmerge_by { + let _ = Panicking.map(|_| Panicking).kmerge_by(|_, _| true); + } + cartesian_product { + let _ = Panicking.cartesian_product(Panicking); + } + multi_cartesian_product { + let _ = vec![Panicking, Panicking, Panicking].into_iter().multi_cartesian_product(); + } + coalesce { + let _ = Panicking.coalesce(|x, y| if x == y { Ok(x) } else { Err((x, y)) }); + } + dedup { + let _ = Panicking.dedup(); + } + dedup_by { + let _ = Panicking.dedup_by(|_, _| true); + } + dedup_with_count { + let _ = Panicking.dedup_with_count(); + } + dedup_by_with_count { + let _ = Panicking.dedup_by_with_count(|_, _| true); + } + duplicates { + let _ = Panicking.duplicates(); + } + duplicates_by { + let _ = Panicking.duplicates_by(|x| *x); + } + unique { + let _ = Panicking.unique(); + } + unique_by { + let _ = Panicking.unique_by(|x| *x); + } + peeking_take_while { + let _ = Panicking.peekable().peeking_take_while(|x| x % 2 == 0); + } + take_while_ref { + let _ = Panicking.take_while_ref(|x| x % 2 == 0); + } + take_while_inclusive { + let _ = Panicking.take_while_inclusive(|x| x % 2 == 0); + } + while_some { + let _ = Panicking.map(Some).while_some(); + } + tuple_combinations1 { + let _ = Panicking.tuple_combinations::<(_,)>(); + } + #[should_panic] + tuple_combinations2 { + let _ = Panicking.tuple_combinations::<(_, _)>(); + } + #[should_panic] + tuple_combinations3 { + let _ = Panicking.tuple_combinations::<(_, _, _)>(); + } + combinations { + let _ = Panicking.combinations(0); + let _ = Panicking.combinations(1); + let _ = Panicking.combinations(2); + } + combinations_with_replacement { + let _ = Panicking.combinations_with_replacement(0); + let _ = Panicking.combinations_with_replacement(1); + let _ = Panicking.combinations_with_replacement(2); + } + permutations { + let _ = Panicking.permutations(0); + let _ = Panicking.permutations(1); + let _ = Panicking.permutations(2); + } + powerset { + let _ = Panicking.powerset(); + } + pad_using { + let _ = Panicking.pad_using(25, |_| 10); + } + with_position { + let _ = Panicking.with_position(); + } + positions { + let _ = Panicking.positions(|v| v % 2 == 0); + } + update { + let _ = Panicking.update(|n| *n += 1); + } + multipeek { + let _ = Panicking.multipeek(); + } + // Not iterator themselves but still lazy. + into_grouping_map { + let _ = Panicking.map(|x| (x, x + 1)).into_grouping_map(); + } + into_grouping_map_by { + let _ = Panicking.into_grouping_map_by(|x| *x); + } + // Macros: + iproduct { + let _ = itertools::iproduct!(Panicking); + let _ = itertools::iproduct!(Panicking, Panicking); + let _ = itertools::iproduct!(Panicking, Panicking, Panicking); + } + izip { + let _ = itertools::izip!(Panicking); + let _ = itertools::izip!(Panicking, Panicking); + let _ = itertools::izip!(Panicking, Panicking, Panicking); + } + chain { + let _ = itertools::chain!(Panicking); + let _ = itertools::chain!(Panicking, Panicking); + let _ = itertools::chain!(Panicking, Panicking, Panicking); + } + // Free functions: + multizip { + let _ = itertools::multizip((Panicking, Panicking)); + } + put_back { + let _ = itertools::put_back(Panicking); + let _ = itertools::put_back(Panicking).with_value(15); + } + peek_nth { + let _ = itertools::peek_nth(Panicking); + } + put_back_n { + let _ = itertools::put_back_n(Panicking); + } + rciter { + let _ = itertools::rciter(Panicking); + } +} diff --git a/vendor/itertools/tests/macros_hygiene.rs b/vendor/itertools/tests/macros_hygiene.rs new file mode 100644 index 00000000000000..20b59fba87371d --- /dev/null +++ b/vendor/itertools/tests/macros_hygiene.rs @@ -0,0 +1,14 @@ +#[test] +fn iproduct_hygiene() { + let _ = itertools::iproduct!(); + let _ = itertools::iproduct!(0..6); + let _ = itertools::iproduct!(0..6, 0..9); + let _ = itertools::iproduct!(0..6, 0..9, 0..12); +} + +#[test] +fn izip_hygiene() { + let _ = itertools::izip!(0..6); + let _ = itertools::izip!(0..6, 0..9); + let _ = itertools::izip!(0..6, 0..9, 0..12); +} diff --git a/vendor/itertools/tests/merge_join.rs b/vendor/itertools/tests/merge_join.rs new file mode 100644 index 00000000000000..776252fc58d179 --- /dev/null +++ b/vendor/itertools/tests/merge_join.rs @@ -0,0 +1,101 @@ +use itertools::free::merge_join_by; +use itertools::EitherOrBoth; + +#[test] +fn empty() { + let left: Vec = vec![]; + let right: Vec = vec![]; + let expected_result: Vec> = vec![]; + let actual_result = merge_join_by(left, right, |l, r| l.cmp(r)).collect::>(); + assert_eq!(expected_result, actual_result); +} + +#[test] +fn left_only() { + let left: Vec = vec![1, 2, 3]; + let right: Vec = vec![]; + let expected_result: Vec> = vec![ + EitherOrBoth::Left(1), + EitherOrBoth::Left(2), + EitherOrBoth::Left(3), + ]; + let actual_result = merge_join_by(left, right, |l, r| l.cmp(r)).collect::>(); + assert_eq!(expected_result, actual_result); +} + +#[test] +fn right_only() { + let left: Vec = vec![]; + let right: Vec = vec![1, 2, 3]; + let expected_result: Vec> = vec![ + EitherOrBoth::Right(1), + EitherOrBoth::Right(2), + EitherOrBoth::Right(3), + ]; + let actual_result = merge_join_by(left, right, |l, r| l.cmp(r)).collect::>(); + assert_eq!(expected_result, actual_result); +} + +#[test] +fn first_left_then_right() { + let left: Vec = vec![1, 2, 3]; + let right: Vec = vec![4, 5, 6]; + let expected_result: Vec> = vec![ + EitherOrBoth::Left(1), + EitherOrBoth::Left(2), + EitherOrBoth::Left(3), + EitherOrBoth::Right(4), + EitherOrBoth::Right(5), + EitherOrBoth::Right(6), + ]; + let actual_result = merge_join_by(left, right, |l, r| l.cmp(r)).collect::>(); + assert_eq!(expected_result, actual_result); +} + +#[test] +fn first_right_then_left() { + let left: Vec = vec![4, 5, 6]; + let right: Vec = vec![1, 2, 3]; + let expected_result: Vec> = vec![ + EitherOrBoth::Right(1), + EitherOrBoth::Right(2), + EitherOrBoth::Right(3), + EitherOrBoth::Left(4), + EitherOrBoth::Left(5), + EitherOrBoth::Left(6), + ]; + let actual_result = merge_join_by(left, right, |l, r| l.cmp(r)).collect::>(); + assert_eq!(expected_result, actual_result); +} + +#[test] +fn interspersed_left_and_right() { + let left: Vec = vec![1, 3, 5]; + let right: Vec = vec![2, 4, 6]; + let expected_result: Vec> = vec![ + EitherOrBoth::Left(1), + EitherOrBoth::Right(2), + EitherOrBoth::Left(3), + EitherOrBoth::Right(4), + EitherOrBoth::Left(5), + EitherOrBoth::Right(6), + ]; + let actual_result = merge_join_by(left, right, |l, r| l.cmp(r)).collect::>(); + assert_eq!(expected_result, actual_result); +} + +#[test] +fn overlapping_left_and_right() { + let left: Vec = vec![1, 3, 4, 6]; + let right: Vec = vec![2, 3, 4, 5]; + let expected_result: Vec> = vec![ + EitherOrBoth::Left(1), + EitherOrBoth::Right(2), + EitherOrBoth::Both(3, 3), + EitherOrBoth::Both(4, 4), + EitherOrBoth::Right(5), + EitherOrBoth::Left(6), + ]; + let actual_result = merge_join_by(left, right, |l, r| l.cmp(r)).collect::>(); + assert_eq!(expected_result, actual_result); +} diff --git a/vendor/itertools/tests/peeking_take_while.rs b/vendor/itertools/tests/peeking_take_while.rs new file mode 100644 index 00000000000000..5be97271dd80ef --- /dev/null +++ b/vendor/itertools/tests/peeking_take_while.rs @@ -0,0 +1,69 @@ +use itertools::Itertools; +use itertools::{put_back, put_back_n}; + +#[test] +fn peeking_take_while_peekable() { + let mut r = (0..10).peekable(); + r.peeking_take_while(|x| *x <= 3).count(); + assert_eq!(r.next(), Some(4)); +} + +#[test] +fn peeking_take_while_put_back() { + let mut r = put_back(0..10); + r.peeking_take_while(|x| *x <= 3).count(); + assert_eq!(r.next(), Some(4)); + r.peeking_take_while(|_| true).count(); + assert_eq!(r.next(), None); +} + +#[test] +fn peeking_take_while_put_back_n() { + let mut r = put_back_n(6..10); + for elt in (0..6).rev() { + r.put_back(elt); + } + r.peeking_take_while(|x| *x <= 3).count(); + assert_eq!(r.next(), Some(4)); + r.peeking_take_while(|_| true).count(); + assert_eq!(r.next(), None); +} + +#[test] +fn peeking_take_while_slice_iter() { + let v = [1, 2, 3, 4, 5, 6]; + let mut r = v.iter(); + r.peeking_take_while(|x| **x <= 3).count(); + assert_eq!(r.next(), Some(&4)); + r.peeking_take_while(|_| true).count(); + assert_eq!(r.next(), None); +} + +#[test] +fn peeking_take_while_slice_iter_rev() { + let v = [1, 2, 3, 4, 5, 6]; + let mut r = v.iter().rev(); + r.peeking_take_while(|x| **x >= 3).count(); + assert_eq!(r.next(), Some(&2)); + r.peeking_take_while(|_| true).count(); + assert_eq!(r.next(), None); +} + +#[test] +fn peeking_take_while_nested() { + let mut xs = (0..10).peekable(); + let ys: Vec<_> = xs + .peeking_take_while(|x| *x < 6) + .peeking_take_while(|x| *x != 3) + .collect(); + assert_eq!(ys, vec![0, 1, 2]); + assert_eq!(xs.next(), Some(3)); + + let mut xs = (4..10).peekable(); + let ys: Vec<_> = xs + .peeking_take_while(|x| *x != 3) + .peeking_take_while(|x| *x < 6) + .collect(); + assert_eq!(ys, vec![4, 5]); + assert_eq!(xs.next(), Some(6)); +} diff --git a/vendor/itertools/tests/quick.rs b/vendor/itertools/tests/quick.rs new file mode 100644 index 00000000000000..5b8fd6a2105229 --- /dev/null +++ b/vendor/itertools/tests/quick.rs @@ -0,0 +1,1967 @@ +//! The purpose of these tests is to cover corner cases of iterators +//! and adaptors. +//! +//! In particular we test the tedious size_hint and exact size correctness. + +#![allow(deprecated, unstable_name_collisions)] + +use itertools::free::{ + cloned, enumerate, multipeek, peek_nth, put_back, put_back_n, rciter, zip, zip_eq, +}; +use itertools::Itertools; +use itertools::{iproduct, izip, multizip, EitherOrBoth}; +use quickcheck as qc; +use std::cmp::{max, min, Ordering}; +use std::collections::{HashMap, HashSet}; +use std::default::Default; +use std::num::Wrapping; +use std::ops::Range; + +use quickcheck::TestResult; +use rand::seq::SliceRandom; +use rand::Rng; + +/// Trait for size hint modifier types +trait HintKind: Copy + Send + qc::Arbitrary { + fn loosen_bounds(&self, org_hint: (usize, Option)) -> (usize, Option); +} + +/// Exact size hint variant that leaves hints unchanged +#[derive(Clone, Copy, Debug)] +struct Exact {} + +impl HintKind for Exact { + fn loosen_bounds(&self, org_hint: (usize, Option)) -> (usize, Option) { + org_hint + } +} + +impl qc::Arbitrary for Exact { + fn arbitrary(_: &mut G) -> Self { + Self {} + } +} + +/// Inexact size hint variant to simulate imprecise (but valid) size hints +/// +/// Will always decrease the lower bound and increase the upper bound +/// of the size hint by set amounts. +#[derive(Clone, Copy, Debug)] +struct Inexact { + underestimate: usize, + overestimate: usize, +} + +impl HintKind for Inexact { + fn loosen_bounds(&self, org_hint: (usize, Option)) -> (usize, Option) { + let (org_lower, org_upper) = org_hint; + ( + org_lower.saturating_sub(self.underestimate), + org_upper.and_then(move |x| x.checked_add(self.overestimate)), + ) + } +} + +impl qc::Arbitrary for Inexact { + fn arbitrary(g: &mut G) -> Self { + let ue_value = usize::arbitrary(g); + let oe_value = usize::arbitrary(g); + // Compensate for quickcheck using extreme values too rarely + let ue_choices = &[0, ue_value, usize::MAX]; + let oe_choices = &[0, oe_value, usize::MAX]; + Self { + underestimate: *ue_choices.choose(g).unwrap(), + overestimate: *oe_choices.choose(g).unwrap(), + } + } + + fn shrink(&self) -> Box> { + let underestimate_value = self.underestimate; + let overestimate_value = self.overestimate; + Box::new(underestimate_value.shrink().flat_map(move |ue_value| { + overestimate_value.shrink().map(move |oe_value| Self { + underestimate: ue_value, + overestimate: oe_value, + }) + })) + } +} + +/// Our base iterator that we can impl Arbitrary for +/// +/// By default we'll return inexact bounds estimates for size_hint +/// to make tests harder to pass. +/// +/// NOTE: Iter is tricky and is not fused, to help catch bugs. +/// At the end it will return None once, then return Some(0), +/// then return None again. +#[derive(Clone, Debug)] +struct Iter { + iterator: Range, + // fuse/done flag + fuse_flag: i32, + hint_kind: SK, +} + +impl Iter +where + HK: HintKind, +{ + fn new(it: Range, hint_kind: HK) -> Self { + Self { + iterator: it, + fuse_flag: 0, + hint_kind, + } + } +} + +impl Iterator for Iter +where + Range: Iterator, + as Iterator>::Item: Default, + HK: HintKind, +{ + type Item = as Iterator>::Item; + + fn next(&mut self) -> Option { + let elt = self.iterator.next(); + if elt.is_none() { + self.fuse_flag += 1; + // check fuse flag + if self.fuse_flag == 2 { + return Some(Default::default()); + } + } + elt + } + + fn size_hint(&self) -> (usize, Option) { + let org_hint = self.iterator.size_hint(); + self.hint_kind.loosen_bounds(org_hint) + } +} + +impl DoubleEndedIterator for Iter +where + Range: DoubleEndedIterator, + as Iterator>::Item: Default, + HK: HintKind, +{ + fn next_back(&mut self) -> Option { + self.iterator.next_back() + } +} + +impl ExactSizeIterator for Iter +where + Range: ExactSizeIterator, + as Iterator>::Item: Default, +{ +} + +impl qc::Arbitrary for Iter +where + T: qc::Arbitrary, + HK: HintKind, +{ + fn arbitrary(g: &mut G) -> Self { + Self::new(T::arbitrary(g)..T::arbitrary(g), HK::arbitrary(g)) + } + + fn shrink(&self) -> Box> { + let r = self.iterator.clone(); + let hint_kind = self.hint_kind; + Box::new(r.start.shrink().flat_map(move |a| { + r.end + .shrink() + .map(move |b| Self::new(a.clone()..b, hint_kind)) + })) + } +} + +/// A meta-iterator which yields `Iter`s whose start/endpoints are +/// increased or decreased linearly on each iteration. +#[derive(Clone, Debug)] +struct ShiftRange { + range_start: i32, + range_end: i32, + start_step: i32, + end_step: i32, + iter_count: u32, + hint_kind: HK, +} + +impl Iterator for ShiftRange +where + HK: HintKind, +{ + type Item = Iter; + + fn next(&mut self) -> Option { + if self.iter_count == 0 { + return None; + } + + let iter = Iter::new(self.range_start..self.range_end, self.hint_kind); + + self.range_start += self.start_step; + self.range_end += self.end_step; + self.iter_count -= 1; + + Some(iter) + } +} + +impl ExactSizeIterator for ShiftRange {} + +impl qc::Arbitrary for ShiftRange +where + HK: HintKind, +{ + fn arbitrary(g: &mut G) -> Self { + const MAX_STARTING_RANGE_DIFF: i32 = 32; + const MAX_STEP_MODULO: i32 = 8; + const MAX_ITER_COUNT: u32 = 3; + + let range_start = qc::Arbitrary::arbitrary(g); + let range_end = range_start + g.gen_range(0, MAX_STARTING_RANGE_DIFF + 1); + let start_step = g.gen_range(-MAX_STEP_MODULO, MAX_STEP_MODULO + 1); + let end_step = g.gen_range(-MAX_STEP_MODULO, MAX_STEP_MODULO + 1); + let iter_count = g.gen_range(0, MAX_ITER_COUNT + 1); + let hint_kind = qc::Arbitrary::arbitrary(g); + + Self { + range_start, + range_end, + start_step, + end_step, + iter_count, + hint_kind, + } + } +} + +fn correct_count(get_it: F) -> bool +where + I: Iterator, + F: Fn() -> I, +{ + let mut counts = vec![get_it().count()]; + + 'outer: loop { + let mut it = get_it(); + + for _ in 0..(counts.len() - 1) { + #[allow(clippy::manual_assert)] + if it.next().is_none() { + panic!("Iterator shouldn't be finished, may not be deterministic"); + } + } + + if it.next().is_none() { + break 'outer; + } + + counts.push(it.count()); + } + + let total_actual_count = counts.len() - 1; + + for (i, returned_count) in counts.into_iter().enumerate() { + let actual_count = total_actual_count - i; + if actual_count != returned_count { + println!( + "Total iterations: {} True count: {} returned count: {}", + i, actual_count, returned_count + ); + + return false; + } + } + + true +} + +fn correct_size_hint(mut it: I) -> bool { + // record size hint at each iteration + let initial_hint = it.size_hint(); + let mut hints = Vec::with_capacity(initial_hint.0 + 1); + hints.push(initial_hint); + while let Some(_) = it.next() { + hints.push(it.size_hint()) + } + + let mut true_count = hints.len(); // start off +1 too much + + // check all the size hints + for &(low, hi) in &hints { + true_count -= 1; + if low > true_count || (hi.is_some() && hi.unwrap() < true_count) { + println!("True size: {:?}, size hint: {:?}", true_count, (low, hi)); + //println!("All hints: {:?}", hints); + return false; + } + } + true +} + +fn exact_size(mut it: I) -> bool { + // check every iteration + let (mut low, mut hi) = it.size_hint(); + if Some(low) != hi { + return false; + } + while let Some(_) = it.next() { + let (xlow, xhi) = it.size_hint(); + if low != xlow + 1 { + return false; + } + low = xlow; + hi = xhi; + if Some(low) != hi { + return false; + } + } + let (low, hi) = it.size_hint(); + low == 0 && hi == Some(0) +} + +// Exact size for this case, without ExactSizeIterator +fn exact_size_for_this(mut it: I) -> bool { + // check every iteration + let (mut low, mut hi) = it.size_hint(); + if Some(low) != hi { + return false; + } + while let Some(_) = it.next() { + let (xlow, xhi) = it.size_hint(); + if low != xlow + 1 { + return false; + } + low = xlow; + hi = xhi; + if Some(low) != hi { + return false; + } + } + let (low, hi) = it.size_hint(); + low == 0 && hi == Some(0) +} + +/* + * NOTE: Range is broken! + * (all signed ranges are) +#[quickcheck] +fn size_range_i8(a: Iter) -> bool { + exact_size(a) +} + +#[quickcheck] +fn size_range_i16(a: Iter) -> bool { + exact_size(a) +} + +#[quickcheck] +fn size_range_u8(a: Iter) -> bool { + exact_size(a) +} + */ + +macro_rules! quickcheck { + // accept several property function definitions + // The property functions can use pattern matching and `mut` as usual + // in the function arguments, but the functions can not be generic. + {$($(#$attr:tt)* fn $fn_name:ident($($arg:tt)*) -> $ret:ty { $($code:tt)* })*} => ( + $( + #[test] + $(#$attr)* + fn $fn_name() { + fn prop($($arg)*) -> $ret { + $($code)* + } + ::quickcheck::quickcheck(quickcheck!(@fn prop [] $($arg)*)); + } + )* + ); + // parse argument list (with patterns allowed) into prop as fn(_, _) -> _ + (@fn $f:ident [$($t:tt)*]) => { + $f as fn($($t),*) -> _ + }; + (@fn $f:ident [$($p:tt)*] : $($tail:tt)*) => { + quickcheck!(@fn $f [$($p)* _] $($tail)*) + }; + (@fn $f:ident [$($p:tt)*] $t:tt $($tail:tt)*) => { + quickcheck!(@fn $f [$($p)*] $($tail)*) + }; +} + +quickcheck! { + + fn size_product(a: Iter, b: Iter) -> bool { + correct_size_hint(a.cartesian_product(b)) + } + fn size_product3(a: Iter, b: Iter, c: Iter) -> bool { + correct_size_hint(iproduct!(a, b, c)) + } + + fn correct_cartesian_product3(a: Iter, b: Iter, c: Iter, + take_manual: usize) -> () + { + // test correctness of iproduct through regular iteration (take) + // and through fold. + let ac = a.clone(); + let br = &b.clone(); + let cr = &c.clone(); + let answer: Vec<_> = ac.flat_map(move |ea| br.clone().flat_map(move |eb| cr.clone().map(move |ec| (ea, eb, ec)))).collect(); + let mut product_iter = iproduct!(a, b, c); + let mut actual = Vec::new(); + + actual.extend((&mut product_iter).take(take_manual)); + if actual.len() == take_manual { + product_iter.fold((), |(), elt| actual.push(elt)); + } + assert_eq!(answer, actual); + } + + fn size_multi_product(a: ShiftRange) -> bool { + correct_size_hint(a.multi_cartesian_product()) + } + fn correct_multi_product3(a: ShiftRange, take_manual: usize) -> () { + // Fix no. of iterators at 3 + let a = ShiftRange { iter_count: 3, ..a }; + + // test correctness of MultiProduct through regular iteration (take) + // and through fold. + let mut iters = a.clone(); + let i0 = iters.next().unwrap(); + let i1r = &iters.next().unwrap(); + let i2r = &iters.next().unwrap(); + let answer: Vec<_> = i0.flat_map(move |ei0| i1r.clone().flat_map(move |ei1| i2r.clone().map(move |ei2| vec![ei0, ei1, ei2]))).collect(); + let mut multi_product = a.clone().multi_cartesian_product(); + let mut actual = Vec::new(); + + actual.extend((&mut multi_product).take(take_manual)); + if actual.len() == take_manual { + multi_product.fold((), |(), elt| actual.push(elt)); + } + assert_eq!(answer, actual); + + assert_eq!(answer.into_iter().last(), a.multi_cartesian_product().last()); + } + + fn correct_empty_multi_product() -> () { + let empty = Vec::>::new().into_iter().multi_cartesian_product(); + assert!(correct_size_hint(empty.clone())); + itertools::assert_equal(empty, std::iter::once(Vec::new())) + } + + fn size_multipeek(a: Iter, s: u8) -> bool { + let mut it = multipeek(a); + // peek a few times + for _ in 0..s { + it.peek(); + } + exact_size(it) + } + + fn size_peek_nth(a: Iter, s: u8) -> bool { + let mut it = peek_nth(a); + // peek a few times + for n in 0..s { + it.peek_nth(n as usize); + } + exact_size(it) + } + + fn equal_merge(mut a: Vec, mut b: Vec) -> bool { + a.sort(); + b.sort(); + let mut merged = a.clone(); + merged.extend(b.iter().cloned()); + merged.sort(); + itertools::equal(&merged, a.iter().merge(&b)) + } + fn size_merge(a: Iter, b: Iter) -> bool { + correct_size_hint(a.merge(b)) + } + fn size_zip(a: Iter, b: Iter, c: Iter) -> bool { + let filt = a.clone().dedup(); + correct_size_hint(multizip((filt, b.clone(), c.clone()))) && + exact_size(multizip((a, b, c))) + } + fn size_zip_rc(a: Iter, b: Iter) -> bool { + let rc = rciter(a); + correct_size_hint(multizip((&rc, &rc, b))) + } + + fn size_zip_macro(a: Iter, b: Iter, c: Iter) -> bool { + let filt = a.clone().dedup(); + correct_size_hint(izip!(filt, b.clone(), c.clone())) && + exact_size(izip!(a, b, c)) + } + fn equal_kmerge(mut a: Vec, mut b: Vec, mut c: Vec) -> bool { + use itertools::free::kmerge; + a.sort(); + b.sort(); + c.sort(); + let mut merged = a.clone(); + merged.extend(b.iter().cloned()); + merged.extend(c.iter().cloned()); + merged.sort(); + itertools::equal(merged.into_iter(), kmerge(vec![a, b, c])) + } + + // Any number of input iterators + fn equal_kmerge_2(mut inputs: Vec>) -> bool { + use itertools::free::kmerge; + // sort the inputs + for input in &mut inputs { + input.sort(); + } + let mut merged = inputs.concat(); + merged.sort(); + itertools::equal(merged.into_iter(), kmerge(inputs)) + } + + // Any number of input iterators + fn equal_kmerge_by_ge(mut inputs: Vec>) -> bool { + // sort the inputs + for input in &mut inputs { + input.sort(); + input.reverse(); + } + let mut merged = inputs.concat(); + merged.sort(); + merged.reverse(); + itertools::equal(merged.into_iter(), + inputs.into_iter().kmerge_by(|x, y| x >= y)) + } + + // Any number of input iterators + fn equal_kmerge_by_lt(mut inputs: Vec>) -> bool { + // sort the inputs + for input in &mut inputs { + input.sort(); + } + let mut merged = inputs.concat(); + merged.sort(); + itertools::equal(merged.into_iter(), + inputs.into_iter().kmerge_by(|x, y| x < y)) + } + + // Any number of input iterators + fn equal_kmerge_by_le(mut inputs: Vec>) -> bool { + // sort the inputs + for input in &mut inputs { + input.sort(); + } + let mut merged = inputs.concat(); + merged.sort(); + itertools::equal(merged.into_iter(), + inputs.into_iter().kmerge_by(|x, y| x <= y)) + } + fn size_kmerge(a: Iter, b: Iter, c: Iter) -> bool { + use itertools::free::kmerge; + correct_size_hint(kmerge(vec![a, b, c])) + } + fn equal_zip_eq(a: Vec, b: Vec) -> bool { + let len = std::cmp::min(a.len(), b.len()); + let a = &a[..len]; + let b = &b[..len]; + itertools::equal(zip_eq(a, b), zip(a, b)) + } + + #[should_panic] + fn zip_eq_panics(a: Vec, b: Vec) -> TestResult { + if a.len() == b.len() { return TestResult::discard(); } + zip_eq(a.iter(), b.iter()).for_each(|_| {}); + TestResult::passed() // won't come here + } + + fn equal_positions(a: Vec) -> bool { + let with_pos = a.iter().positions(|v| v % 2 == 0); + let without = a.iter().enumerate().filter(|(_, v)| *v % 2 == 0).map(|(i, _)| i); + itertools::equal(with_pos.clone(), without.clone()) + && itertools::equal(with_pos.rev(), without.rev()) + } + fn size_zip_longest(a: Iter, b: Iter) -> bool { + let filt = a.clone().dedup(); + let filt2 = b.clone().dedup(); + correct_size_hint(filt.zip_longest(b.clone())) && + correct_size_hint(a.clone().zip_longest(filt2)) && + exact_size(a.zip_longest(b)) + } + fn size_2_zip_longest(a: Iter, b: Iter) -> bool { + let it = a.clone().zip_longest(b.clone()); + let jt = a.clone().zip_longest(b.clone()); + itertools::equal(a, + it.filter_map(|elt| match elt { + EitherOrBoth::Both(x, _) => Some(x), + EitherOrBoth::Left(x) => Some(x), + _ => None, + } + )) + && + itertools::equal(b, + jt.filter_map(|elt| match elt { + EitherOrBoth::Both(_, y) => Some(y), + EitherOrBoth::Right(y) => Some(y), + _ => None, + } + )) + } + fn size_interleave(a: Iter, b: Iter) -> bool { + correct_size_hint(a.interleave(b)) + } + fn exact_interleave(a: Iter, b: Iter) -> bool { + exact_size_for_this(a.interleave(b)) + } + fn size_interleave_shortest(a: Iter, b: Iter) -> bool { + correct_size_hint(a.interleave_shortest(b)) + } + fn exact_interleave_shortest(a: Vec<()>, b: Vec<()>) -> bool { + exact_size_for_this(a.iter().interleave_shortest(&b)) + } + fn size_intersperse(a: Iter, x: i16) -> bool { + correct_size_hint(a.intersperse(x)) + } + fn equal_intersperse(a: Vec, x: i32) -> bool { + let mut inter = false; + let mut i = 0; + for elt in a.iter().cloned().intersperse(x) { + if inter { + if elt != x { return false } + } else { + if elt != a[i] { return false } + i += 1; + } + inter = !inter; + } + true + } + + fn equal_combinations_2(a: Vec) -> bool { + let mut v = Vec::new(); + for (i, x) in enumerate(&a) { + for y in &a[i + 1..] { + v.push((x, y)); + } + } + itertools::equal(a.iter().tuple_combinations::<(_, _)>(), v) + } + + fn collect_tuple_matches_size(a: Iter) -> bool { + let size = a.clone().count(); + a.collect_tuple::<(_, _, _)>().is_some() == (size == 3) + } + + fn correct_permutations(vals: HashSet, k: usize) -> () { + // Test permutations only on iterators of distinct integers, to prevent + // false positives. + + const MAX_N: usize = 5; + + let n = min(vals.len(), MAX_N); + let vals: HashSet = vals.into_iter().take(n).collect(); + + let perms = vals.iter().permutations(k); + + let mut actual = HashSet::new(); + + for perm in perms { + assert_eq!(perm.len(), k); + + let all_items_valid = perm.iter().all(|p| vals.contains(p)); + assert!(all_items_valid, "perm contains value not from input: {:?}", perm); + + // Check that all perm items are distinct + let distinct_len = { + let perm_set: HashSet<_> = perm.iter().collect(); + perm_set.len() + }; + assert_eq!(perm.len(), distinct_len); + + // Check that the perm is new + assert!(actual.insert(perm.clone()), "perm already encountered: {:?}", perm); + } + } + + fn permutations_lexic_order(a: usize, b: usize) -> () { + let a = a % 6; + let b = b % 6; + + let n = max(a, b); + let k = min (a, b); + + let expected_first: Vec = (0..k).collect(); + let expected_last: Vec = ((n - k)..n).rev().collect(); + + let mut perms = (0..n).permutations(k); + + let mut curr_perm = match perms.next() { + Some(p) => p, + None => { return; } + }; + + assert_eq!(expected_first, curr_perm); + + for next_perm in perms { + assert!( + next_perm > curr_perm, + "next perm isn't greater-than current; next_perm={:?} curr_perm={:?} n={}", + next_perm, curr_perm, n + ); + + curr_perm = next_perm; + } + + assert_eq!(expected_last, curr_perm); + + } + + fn permutations_count(n: usize, k: usize) -> bool { + let n = n % 6; + + correct_count(|| (0..n).permutations(k)) + } + + fn permutations_size(a: Iter, k: usize) -> bool { + correct_size_hint(a.take(5).permutations(k)) + } + + fn permutations_k0_yields_once(n: usize) -> () { + let k = 0; + let expected: Vec> = vec![vec![]]; + let actual = (0..n).permutations(k).collect_vec(); + + assert_eq!(expected, actual); + } +} + +quickcheck! { + fn correct_peek_nth(mut a: Vec) -> () { + let mut it = peek_nth(a.clone()); + for start_pos in 0..a.len() + 2 { + for real_idx in start_pos..a.len() + 2 { + let peek_idx = real_idx - start_pos; + assert_eq!(it.peek_nth(peek_idx), a.get(real_idx)); + assert_eq!(it.peek_nth_mut(peek_idx), a.get_mut(real_idx)); + } + assert_eq!(it.next(), a.get(start_pos).copied()); + } + } + + fn peek_nth_mut_replace(a: Vec, b: Vec) -> () { + let mut it = peek_nth(a.iter()); + for (i, m) in b.iter().enumerate().take(a.len().min(b.len())) { + *it.peek_nth_mut(i).unwrap() = m; + } + for (i, m) in a.iter().enumerate() { + assert_eq!(it.next().unwrap(), b.get(i).unwrap_or(m)); + } + assert_eq!(it.next(), None); + assert_eq!(it.next(), None); + } + + fn peek_nth_next_if(a: Vec) -> () { + let mut it = peek_nth(a.clone()); + for (idx, mut value) in a.iter().copied().enumerate() { + let should_be_none = it.next_if(|x| x != &value); + assert_eq!(should_be_none, None); + if value % 5 == 0 { + // Sometimes, peek up to 3 further. + let n = value as usize % 3; + let nth = it.peek_nth(n); + assert_eq!(nth, a.get(idx + n)); + } else if value % 5 == 1 { + // Sometimes, peek next element mutably. + if let Some(v) = it.peek_mut() { + *v = v.wrapping_sub(1); + let should_be_none = it.next_if_eq(&value); + assert_eq!(should_be_none, None); + value = value.wrapping_sub(1); + } + } + let eq = it.next_if_eq(&value); + assert_eq!(eq, Some(value)); + } + } +} + +quickcheck! { + fn dedup_via_coalesce(a: Vec) -> bool { + let mut b = a.clone(); + b.dedup(); + itertools::equal( + &b, + a + .iter() + .coalesce(|x, y| { + if x==y { + Ok(x) + } else { + Err((x, y)) + } + }) + .fold(vec![], |mut v, n| { + v.push(n); + v + }) + ) + } +} + +quickcheck! { + fn equal_dedup(a: Vec) -> bool { + let mut b = a.clone(); + b.dedup(); + itertools::equal(&b, a.iter().dedup()) + } +} + +quickcheck! { + fn equal_dedup_by(a: Vec<(i32, i32)>) -> bool { + let mut b = a.clone(); + b.dedup_by(|x, y| x.0==y.0); + itertools::equal(&b, a.iter().dedup_by(|x, y| x.0==y.0)) + } +} + +quickcheck! { + fn size_dedup(a: Vec) -> bool { + correct_size_hint(a.iter().dedup()) + } +} + +quickcheck! { + fn size_dedup_by(a: Vec<(i32, i32)>) -> bool { + correct_size_hint(a.iter().dedup_by(|x, y| x.0==y.0)) + } +} + +quickcheck! { + fn exact_repeatn((n, x): (usize, i32)) -> bool { + let it = itertools::repeat_n(x, n); + exact_size(it) + } +} + +quickcheck! { + fn size_put_back(a: Vec, x: Option) -> bool { + let mut it = put_back(a.into_iter()); + if let Some(t) = x { + it.put_back(t); + } + correct_size_hint(it) + } +} + +quickcheck! { + fn size_put_backn(a: Vec, b: Vec) -> bool { + let mut it = put_back_n(a.into_iter()); + for elt in b { + it.put_back(elt) + } + correct_size_hint(it) + } +} + +quickcheck! { + fn merge_join_by_ordering_vs_bool(a: Vec, b: Vec) -> bool { + use either::Either; + use itertools::free::merge_join_by; + let mut has_equal = false; + let it_ord = merge_join_by(a.clone(), b.clone(), Ord::cmp).flat_map(|v| match v { + EitherOrBoth::Both(l, r) => { + has_equal = true; + vec![Either::Left(l), Either::Right(r)] + } + EitherOrBoth::Left(l) => vec![Either::Left(l)], + EitherOrBoth::Right(r) => vec![Either::Right(r)], + }); + let it_bool = merge_join_by(a, b, PartialOrd::le); + itertools::equal(it_ord, it_bool) || has_equal + } + fn merge_join_by_bool_unwrapped_is_merge_by(a: Vec, b: Vec) -> bool { + use either::Either; + use itertools::free::merge_join_by; + let it = a.clone().into_iter().merge_by(b.clone(), PartialOrd::ge); + let it_join = merge_join_by(a, b, PartialOrd::ge).map(Either::into_inner); + itertools::equal(it, it_join) + } +} + +quickcheck! { + fn size_tee(a: Vec) -> bool { + let (mut t1, mut t2) = a.iter().tee(); + t1.next(); + t1.next(); + t2.next(); + exact_size(t1) && exact_size(t2) + } +} + +quickcheck! { + fn size_tee_2(a: Vec) -> bool { + let (mut t1, mut t2) = a.iter().dedup().tee(); + t1.next(); + t1.next(); + t2.next(); + correct_size_hint(t1) && correct_size_hint(t2) + } +} + +quickcheck! { + fn size_take_while_ref(a: Vec, stop: u8) -> bool { + correct_size_hint(a.iter().take_while_ref(|x| **x != stop)) + } +} + +quickcheck! { + fn equal_partition(a: Vec) -> bool { + let mut a = a; + let mut ap = a.clone(); + let split_index = itertools::partition(&mut ap, |x| *x >= 0); + let parted = (0..split_index).all(|i| ap[i] >= 0) && + (split_index..a.len()).all(|i| ap[i] < 0); + + a.sort(); + ap.sort(); + parted && (a == ap) + } +} + +quickcheck! { + fn size_combinations(a: Iter) -> bool { + let it = a.clone().tuple_combinations::<(_, _)>(); + correct_size_hint(it.clone()) && it.count() == binomial(a.count(), 2) + } + + fn exact_size_combinations_1(a: Vec) -> bool { + let it = a.iter().tuple_combinations::<(_,)>(); + exact_size_for_this(it.clone()) && it.count() == binomial(a.len(), 1) + } + fn exact_size_combinations_2(a: Vec) -> bool { + let it = a.iter().tuple_combinations::<(_, _)>(); + exact_size_for_this(it.clone()) && it.count() == binomial(a.len(), 2) + } + fn exact_size_combinations_3(mut a: Vec) -> bool { + a.truncate(15); + let it = a.iter().tuple_combinations::<(_, _, _)>(); + exact_size_for_this(it.clone()) && it.count() == binomial(a.len(), 3) + } +} + +fn binomial(n: usize, k: usize) -> usize { + if k > n { + 0 + } else { + (n - k + 1..=n).product::() / (1..=k).product::() + } +} + +quickcheck! { + fn equal_combinations(it: Iter) -> bool { + let values = it.clone().collect_vec(); + let mut cmb = it.tuple_combinations(); + for i in 0..values.len() { + for j in i+1..values.len() { + let pair = (values[i], values[j]); + if pair != cmb.next().unwrap() { + return false; + } + } + } + cmb.next().is_none() + } +} + +quickcheck! { + fn size_pad_tail(it: Iter, pad: u8) -> bool { + correct_size_hint(it.clone().pad_using(pad as usize, |_| 0)) && + correct_size_hint(it.dropping(1).rev().pad_using(pad as usize, |_| 0)) + } +} + +quickcheck! { + fn size_pad_tail2(it: Iter, pad: u8) -> bool { + exact_size(it.pad_using(pad as usize, |_| 0)) + } +} + +quickcheck! { + fn size_powerset(it: Iter) -> bool { + // Powerset cardinality gets large very quickly, limit input to keep test fast. + correct_size_hint(it.take(12).powerset()) + } +} + +quickcheck! { + fn size_duplicates(it: Iter) -> bool { + correct_size_hint(it.duplicates()) + } +} + +quickcheck! { + fn size_unique(it: Iter) -> bool { + correct_size_hint(it.unique()) + } + + fn count_unique(it: Vec, take_first: u8) -> () { + let answer = { + let mut v = it.clone(); + v.sort(); v.dedup(); + v.len() + }; + let mut iter = cloned(&it).unique(); + let first_count = (&mut iter).take(take_first as usize).count(); + let rest_count = iter.count(); + assert_eq!(answer, first_count + rest_count); + } +} + +quickcheck! { + fn fuzz_chunk_by_lazy_1(it: Iter) -> bool { + let jt = it.clone(); + let chunks = it.chunk_by(|k| *k); + itertools::equal(jt, chunks.into_iter().flat_map(|(_, x)| x)) + } +} + +quickcheck! { + fn fuzz_chunk_by_lazy_2(data: Vec) -> bool { + let chunks = data.iter().chunk_by(|k| *k / 10); + let res = itertools::equal(data.iter(), chunks.into_iter().flat_map(|(_, x)| x)); + res + } +} + +quickcheck! { + fn fuzz_chunk_by_lazy_3(data: Vec) -> bool { + let grouper = data.iter().chunk_by(|k| *k / 10); + let chunks = grouper.into_iter().collect_vec(); + let res = itertools::equal(data.iter(), chunks.into_iter().flat_map(|(_, x)| x)); + res + } +} + +quickcheck! { + fn fuzz_chunk_by_lazy_duo(data: Vec, order: Vec<(bool, bool)>) -> bool { + let grouper = data.iter().chunk_by(|k| *k / 3); + let mut chunks1 = grouper.into_iter(); + let mut chunks2 = grouper.into_iter(); + let mut elts = Vec::<&u8>::new(); + let mut old_chunks = Vec::new(); + + let tup1 = |(_, b)| b; + for &(ord, consume_now) in &order { + let iter = &mut [&mut chunks1, &mut chunks2][ord as usize]; + match iter.next() { + Some((_, gr)) => if consume_now { + for og in old_chunks.drain(..) { + elts.extend(og); + } + elts.extend(gr); + } else { + old_chunks.push(gr); + }, + None => break, + } + } + for og in old_chunks.drain(..) { + elts.extend(og); + } + for gr in chunks1.map(&tup1) { elts.extend(gr); } + for gr in chunks2.map(&tup1) { elts.extend(gr); } + itertools::assert_equal(&data, elts); + true + } +} + +quickcheck! { + fn chunk_clone_equal(a: Vec, size: u8) -> () { + let mut size = size; + if size == 0 { + size += 1; + } + let it = a.chunks(size as usize); + itertools::assert_equal(it.clone(), it); + } +} + +quickcheck! { + fn equal_chunks_lazy(a: Vec, size: u8) -> bool { + let mut size = size; + if size == 0 { + size += 1; + } + let chunks = a.iter().chunks(size as usize); + let it = a.chunks(size as usize); + for (a, b) in chunks.into_iter().zip(it) { + if !itertools::equal(a, b) { + return false; + } + } + true + } +} + +// tuple iterators +quickcheck! { + fn equal_circular_tuple_windows_1(a: Vec) -> bool { + let x = a.iter().map(|e| (e,) ); + let y = a.iter().circular_tuple_windows::<(_,)>(); + itertools::assert_equal(x,y); + true + } + + fn equal_circular_tuple_windows_2(a: Vec) -> bool { + let x = (0..a.len()).map(|start_idx| ( + &a[start_idx], + &a[(start_idx + 1) % a.len()], + )); + let y = a.iter().circular_tuple_windows::<(_, _)>(); + itertools::assert_equal(x,y); + true + } + + fn equal_circular_tuple_windows_3(a: Vec) -> bool { + let x = (0..a.len()).map(|start_idx| ( + &a[start_idx], + &a[(start_idx + 1) % a.len()], + &a[(start_idx + 2) % a.len()], + )); + let y = a.iter().circular_tuple_windows::<(_, _, _)>(); + itertools::assert_equal(x,y); + true + } + + fn equal_circular_tuple_windows_4(a: Vec) -> bool { + let x = (0..a.len()).map(|start_idx| ( + &a[start_idx], + &a[(start_idx + 1) % a.len()], + &a[(start_idx + 2) % a.len()], + &a[(start_idx + 3) % a.len()], + )); + let y = a.iter().circular_tuple_windows::<(_, _, _, _)>(); + itertools::assert_equal(x,y); + true + } + + fn equal_cloned_circular_tuple_windows(a: Vec) -> bool { + let x = a.iter().circular_tuple_windows::<(_, _, _, _)>(); + let y = x.clone(); + itertools::assert_equal(x,y); + true + } + + fn equal_cloned_circular_tuple_windows_noninitial(a: Vec) -> bool { + let mut x = a.iter().circular_tuple_windows::<(_, _, _, _)>(); + let _ = x.next(); + let y = x.clone(); + itertools::assert_equal(x,y); + true + } + + fn equal_cloned_circular_tuple_windows_complete(a: Vec) -> bool { + let mut x = a.iter().circular_tuple_windows::<(_, _, _, _)>(); + for _ in x.by_ref() {} + let y = x.clone(); + itertools::assert_equal(x,y); + true + } + + fn circular_tuple_windows_exact_size(a: Vec) -> bool { + exact_size(a.iter().circular_tuple_windows::<(_, _, _, _)>()) + } + + fn equal_tuple_windows_1(a: Vec) -> bool { + let x = a.windows(1).map(|s| (&s[0], )); + let y = a.iter().tuple_windows::<(_,)>(); + itertools::equal(x, y) + } + + fn equal_tuple_windows_2(a: Vec) -> bool { + let x = a.windows(2).map(|s| (&s[0], &s[1])); + let y = a.iter().tuple_windows::<(_, _)>(); + itertools::equal(x, y) + } + + fn equal_tuple_windows_3(a: Vec) -> bool { + let x = a.windows(3).map(|s| (&s[0], &s[1], &s[2])); + let y = a.iter().tuple_windows::<(_, _, _)>(); + itertools::equal(x, y) + } + + fn equal_tuple_windows_4(a: Vec) -> bool { + let x = a.windows(4).map(|s| (&s[0], &s[1], &s[2], &s[3])); + let y = a.iter().tuple_windows::<(_, _, _, _)>(); + itertools::equal(x, y) + } + + fn tuple_windows_exact_size_1(a: Vec) -> bool { + exact_size(a.iter().tuple_windows::<(_,)>()) + } + + fn tuple_windows_exact_size_4(a: Vec) -> bool { + exact_size(a.iter().tuple_windows::<(_, _, _, _)>()) + } + + fn equal_tuples_1(a: Vec) -> bool { + let x = a.chunks(1).map(|s| (&s[0], )); + let y = a.iter().tuples::<(_,)>(); + itertools::equal(x, y) + } + + fn equal_tuples_2(a: Vec) -> bool { + let x = a.chunks(2).filter(|s| s.len() == 2).map(|s| (&s[0], &s[1])); + let y = a.iter().tuples::<(_, _)>(); + itertools::equal(x, y) + } + + fn equal_tuples_3(a: Vec) -> bool { + let x = a.chunks(3).filter(|s| s.len() == 3).map(|s| (&s[0], &s[1], &s[2])); + let y = a.iter().tuples::<(_, _, _)>(); + itertools::equal(x, y) + } + + fn equal_tuples_4(a: Vec) -> bool { + let x = a.chunks(4).filter(|s| s.len() == 4).map(|s| (&s[0], &s[1], &s[2], &s[3])); + let y = a.iter().tuples::<(_, _, _, _)>(); + itertools::equal(x, y) + } + + fn exact_tuple_buffer(a: Vec) -> bool { + let mut iter = a.iter().tuples::<(_, _, _, _)>(); + (&mut iter).last(); + let buffer = iter.into_buffer(); + assert_eq!(buffer.len(), a.len() % 4); + exact_size(buffer) + } + + fn tuples_size_hint_inexact(a: Iter) -> bool { + correct_size_hint(a.clone().tuples::<(_,)>()) + && correct_size_hint(a.clone().tuples::<(_, _)>()) + && correct_size_hint(a.tuples::<(_, _, _, _)>()) + } + + fn tuples_size_hint_exact(a: Iter) -> bool { + exact_size(a.clone().tuples::<(_,)>()) + && exact_size(a.clone().tuples::<(_, _)>()) + && exact_size(a.tuples::<(_, _, _, _)>()) + } +} + +// with_position +quickcheck! { + fn with_position_exact_size_1(a: Vec) -> bool { + exact_size_for_this(a.iter().with_position()) + } + fn with_position_exact_size_2(a: Iter) -> bool { + exact_size_for_this(a.with_position()) + } +} + +quickcheck! { + fn correct_group_map_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` + let count = a.len(); + let lookup = a.into_iter().map(|i| (i % modulo, i)).into_group_map(); + + assert_eq!(lookup.values().flat_map(|vals| vals.iter()).count(), count); + + for (&key, vals) in lookup.iter() { + assert!(vals.iter().all(|&val| val % modulo == key)); + } + } +} + +/// A peculiar type: Equality compares both tuple items, but ordering only the +/// first item. This is so we can check the stability property easily. +#[derive(Clone, Debug, PartialEq, Eq)] +struct Val(u32, u32); + +impl PartialOrd for Val { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Val { + fn cmp(&self, other: &Self) -> Ordering { + self.0.cmp(&other.0) + } +} + +impl qc::Arbitrary for Val { + fn arbitrary(g: &mut G) -> Self { + let (x, y) = <(u32, u32)>::arbitrary(g); + Self(x, y) + } + fn shrink(&self) -> Box> { + Box::new((self.0, self.1).shrink().map(|(x, y)| Self(x, y))) + } +} + +quickcheck! { + fn minmax(a: Vec) -> bool { + use itertools::MinMaxResult; + + + let minmax = a.iter().minmax(); + let expected = match a.len() { + 0 => MinMaxResult::NoElements, + 1 => MinMaxResult::OneElement(&a[0]), + _ => MinMaxResult::MinMax(a.iter().min().unwrap(), + a.iter().max().unwrap()), + }; + minmax == expected + } +} + +quickcheck! { + fn minmax_f64(a: Vec) -> TestResult { + use itertools::MinMaxResult; + + if a.iter().any(|x| x.is_nan()) { + return TestResult::discard(); + } + + let min = cloned(&a).fold1(f64::min); + let max = cloned(&a).fold1(f64::max); + + let minmax = cloned(&a).minmax(); + let expected = match a.len() { + 0 => MinMaxResult::NoElements, + 1 => MinMaxResult::OneElement(min.unwrap()), + _ => MinMaxResult::MinMax(min.unwrap(), max.unwrap()), + }; + TestResult::from_bool(minmax == expected) + } +} + +quickcheck! { + fn tree_reduce_f64(mut a: Vec) -> TestResult { + fn collapse_adjacent(x: Vec, mut f: F) -> Vec + where F: FnMut(f64, f64) -> f64 + { + let mut out = Vec::new(); + for i in (0..x.len()).step_by(2) { + if i == x.len()-1 { + out.push(x[i]) + } else { + out.push(f(x[i], x[i+1])); + } + } + out + } + + if a.iter().any(|x| x.is_nan()) { + return TestResult::discard(); + } + + let actual = a.iter().cloned().tree_reduce(f64::atan2); + + while a.len() > 1 { + a = collapse_adjacent(a, f64::atan2); + } + let expected = a.pop(); + + TestResult::from_bool(actual == expected) + } +} + +quickcheck! { + fn exactly_one_i32(a: Vec) -> TestResult { + let ret = a.iter().cloned().exactly_one(); + match a.len() { + 1 => TestResult::from_bool(ret.unwrap() == a[0]), + _ => TestResult::from_bool(ret.unwrap_err().eq(a.iter().cloned())), + } + } +} + +quickcheck! { + fn at_most_one_i32(a: Vec) -> TestResult { + let ret = a.iter().cloned().at_most_one(); + match a.len() { + 0 => TestResult::from_bool(ret.unwrap().is_none()), + 1 => TestResult::from_bool(ret.unwrap() == Some(a[0])), + _ => TestResult::from_bool(ret.unwrap_err().eq(a.iter().cloned())), + } + } +} + +quickcheck! { + fn consistent_grouping_map_with_by(a: Vec, modulo: u8) -> () { + let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` + + let lookup_grouping_map = a.iter().copied().map(|i| (i % modulo, i)).into_grouping_map().collect::>(); + let lookup_grouping_map_by = a.iter().copied().into_grouping_map_by(|i| i % modulo).collect::>(); + + assert_eq!(lookup_grouping_map, lookup_grouping_map_by); + } + + fn correct_grouping_map_by_aggregate_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = if modulo < 2 { 2 } else { modulo } as u64; // Avoid `% 0` + let lookup = a.iter() + .map(|&b| b as u64) // Avoid overflows + .into_grouping_map_by(|i| i % modulo) + .aggregate(|acc, &key, val| { + assert!(val % modulo == key); + if val % (modulo - 1) == 0 { + None + } else { + Some(acc.unwrap_or(0) + val) + } + }); + + let group_map_lookup = a.iter() + .map(|&b| b as u64) + .map(|i| (i % modulo, i)) + .into_group_map() + .into_iter() + .filter_map(|(key, vals)| { + vals.into_iter().fold(None, |acc, val| { + if val % (modulo - 1) == 0 { + None + } else { + Some(acc.unwrap_or(0) + val) + } + }).map(|new_val| (key, new_val)) + }) + .collect::>(); + assert_eq!(lookup, group_map_lookup); + + for m in 0..modulo { + assert_eq!( + lookup.get(&m).copied(), + a.iter() + .map(|&b| b as u64) + .filter(|&val| val % modulo == m) + .fold(None, |acc, val| { + if val % (modulo - 1) == 0 { + None + } else { + Some(acc.unwrap_or(0) + val) + } + }) + ); + } + } + + fn correct_grouping_map_by_fold_with_modulo_key(a: Vec, modulo: u8) -> () { + #[derive(Debug, Default, PartialEq)] + struct Accumulator { + acc: u64, + } + + let modulo = if modulo == 0 { 1 } else { modulo } as u64; // Avoid `% 0` + let lookup = a.iter().map(|&b| b as u64) // Avoid overflows + .into_grouping_map_by(|i| i % modulo) + .fold_with(|_key, _val| Default::default(), |Accumulator { acc }, &key, val| { + assert!(val % modulo == key); + let acc = acc + val; + Accumulator { acc } + }); + + let group_map_lookup = a.iter() + .map(|&b| b as u64) + .map(|i| (i % modulo, i)) + .into_group_map() + .into_iter() + .map(|(key, vals)| (key, vals.into_iter().sum())).map(|(key, acc)| (key,Accumulator { acc })) + .collect::>(); + assert_eq!(lookup, group_map_lookup); + + for (&key, &Accumulator { acc: sum }) in lookup.iter() { + assert_eq!(sum, a.iter().map(|&b| b as u64).filter(|&val| val % modulo == key).sum::()); + } + } + + fn correct_grouping_map_by_fold_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = if modulo == 0 { 1 } else { modulo } as u64; // Avoid `% 0` + let lookup = a.iter().map(|&b| b as u64) // Avoid overflows + .into_grouping_map_by(|i| i % modulo) + .fold(0u64, |acc, &key, val| { + assert!(val % modulo == key); + acc + val + }); + + let group_map_lookup = a.iter() + .map(|&b| b as u64) + .map(|i| (i % modulo, i)) + .into_group_map() + .into_iter() + .map(|(key, vals)| (key, vals.into_iter().sum())) + .collect::>(); + assert_eq!(lookup, group_map_lookup); + + for (&key, &sum) in lookup.iter() { + assert_eq!(sum, a.iter().map(|&b| b as u64).filter(|&val| val % modulo == key).sum::()); + } + } + + fn correct_grouping_map_by_reduce_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = if modulo == 0 { 1 } else { modulo } as u64; // Avoid `% 0` + let lookup = a.iter().map(|&b| b as u64) // Avoid overflows + .into_grouping_map_by(|i| i % modulo) + .reduce(|acc, &key, val| { + assert!(val % modulo == key); + acc + val + }); + + // TODO: Swap `fold1` with stdlib's `reduce` when it's stabilized + let group_map_lookup = a.iter() + .map(|&b| b as u64) + .map(|i| (i % modulo, i)) + .into_group_map() + .into_iter() + .map(|(key, vals)| (key, vals.into_iter().fold1(|acc, val| acc + val).unwrap())) + .collect::>(); + assert_eq!(lookup, group_map_lookup); + + for (&key, &sum) in lookup.iter() { + assert_eq!(sum, a.iter().map(|&b| b as u64).filter(|&val| val % modulo == key).sum::()); + } + } + + fn correct_grouping_map_by_collect_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` + let lookup_grouping_map = a.iter().copied().into_grouping_map_by(|i| i % modulo).collect::>(); + let lookup_group_map = a.iter().copied().map(|i| (i % modulo, i)).into_group_map(); + + assert_eq!(lookup_grouping_map, lookup_group_map); + } + + fn correct_grouping_map_by_max_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` + let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).max(); + + let group_map_lookup = a.iter().copied() + .map(|i| (i % modulo, i)) + .into_group_map() + .into_iter() + .map(|(key, vals)| (key, vals.into_iter().max().unwrap())) + .collect::>(); + assert_eq!(lookup, group_map_lookup); + + for (&key, &max) in lookup.iter() { + assert_eq!(Some(max), a.iter().copied().filter(|&val| val % modulo == key).max()); + } + } + + fn correct_grouping_map_by_max_by_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` + let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).max_by(|_, v1, v2| v1.cmp(v2)); + + let group_map_lookup = a.iter().copied() + .map(|i| (i % modulo, i)) + .into_group_map() + .into_iter() + .map(|(key, vals)| (key, vals.into_iter().max_by(|v1, v2| v1.cmp(v2)).unwrap())) + .collect::>(); + assert_eq!(lookup, group_map_lookup); + + for (&key, &max) in lookup.iter() { + assert_eq!(Some(max), a.iter().copied().filter(|&val| val % modulo == key).max_by(|v1, v2| v1.cmp(v2))); + } + } + + fn correct_grouping_map_by_max_by_key_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` + let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).max_by_key(|_, &val| val); + + let group_map_lookup = a.iter().copied() + .map(|i| (i % modulo, i)) + .into_group_map() + .into_iter() + .map(|(key, vals)| (key, vals.into_iter().max_by_key(|&val| val).unwrap())) + .collect::>(); + assert_eq!(lookup, group_map_lookup); + + for (&key, &max) in lookup.iter() { + assert_eq!(Some(max), a.iter().copied().filter(|&val| val % modulo == key).max_by_key(|&val| val)); + } + } + + fn correct_grouping_map_by_min_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` + let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).min(); + + let group_map_lookup = a.iter().copied() + .map(|i| (i % modulo, i)) + .into_group_map() + .into_iter() + .map(|(key, vals)| (key, vals.into_iter().min().unwrap())) + .collect::>(); + assert_eq!(lookup, group_map_lookup); + + for (&key, &min) in lookup.iter() { + assert_eq!(Some(min), a.iter().copied().filter(|&val| val % modulo == key).min()); + } + } + + fn correct_grouping_map_by_min_by_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` + let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).min_by(|_, v1, v2| v1.cmp(v2)); + + let group_map_lookup = a.iter().copied() + .map(|i| (i % modulo, i)) + .into_group_map() + .into_iter() + .map(|(key, vals)| (key, vals.into_iter().min_by(|v1, v2| v1.cmp(v2)).unwrap())) + .collect::>(); + assert_eq!(lookup, group_map_lookup); + + for (&key, &min) in lookup.iter() { + assert_eq!(Some(min), a.iter().copied().filter(|&val| val % modulo == key).min_by(|v1, v2| v1.cmp(v2))); + } + } + + fn correct_grouping_map_by_min_by_key_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` + let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).min_by_key(|_, &val| val); + + let group_map_lookup = a.iter().copied() + .map(|i| (i % modulo, i)) + .into_group_map() + .into_iter() + .map(|(key, vals)| (key, vals.into_iter().min_by_key(|&val| val).unwrap())) + .collect::>(); + assert_eq!(lookup, group_map_lookup); + + for (&key, &min) in lookup.iter() { + assert_eq!(Some(min), a.iter().copied().filter(|&val| val % modulo == key).min_by_key(|&val| val)); + } + } + + fn correct_grouping_map_by_minmax_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` + let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).minmax(); + + let group_map_lookup = a.iter().copied() + .map(|i| (i % modulo, i)) + .into_group_map() + .into_iter() + .map(|(key, vals)| (key, vals.into_iter().minmax())) + .collect::>(); + assert_eq!(lookup, group_map_lookup); + + for (&key, &minmax) in lookup.iter() { + assert_eq!(minmax, a.iter().copied().filter(|&val| val % modulo == key).minmax()); + } + } + + fn correct_grouping_map_by_minmax_by_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` + let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).minmax_by(|_, v1, v2| v1.cmp(v2)); + + let group_map_lookup = a.iter().copied() + .map(|i| (i % modulo, i)) + .into_group_map() + .into_iter() + .map(|(key, vals)| (key, vals.into_iter().minmax_by(|v1, v2| v1.cmp(v2)))) + .collect::>(); + assert_eq!(lookup, group_map_lookup); + + for (&key, &minmax) in lookup.iter() { + assert_eq!(minmax, a.iter().copied().filter(|&val| val % modulo == key).minmax_by(|v1, v2| v1.cmp(v2))); + } + } + + fn correct_grouping_map_by_minmax_by_key_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` + let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).minmax_by_key(|_, &val| val); + + let group_map_lookup = a.iter().copied() + .map(|i| (i % modulo, i)) + .into_group_map() + .into_iter() + .map(|(key, vals)| (key, vals.into_iter().minmax_by_key(|&val| val))) + .collect::>(); + assert_eq!(lookup, group_map_lookup); + + for (&key, &minmax) in lookup.iter() { + assert_eq!(minmax, a.iter().copied().filter(|&val| val % modulo == key).minmax_by_key(|&val| val)); + } + } + + fn correct_grouping_map_by_sum_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = if modulo == 0 { 1 } else { modulo } as u64; // Avoid `% 0` + let lookup = a.iter().map(|&b| b as u64) // Avoid overflows + .into_grouping_map_by(|i| i % modulo) + .sum(); + + let group_map_lookup = a.iter().map(|&b| b as u64) + .map(|i| (i % modulo, i)) + .into_group_map() + .into_iter() + .map(|(key, vals)| (key, vals.into_iter().sum())) + .collect::>(); + assert_eq!(lookup, group_map_lookup); + + for (&key, &sum) in lookup.iter() { + assert_eq!(sum, a.iter().map(|&b| b as u64).filter(|&val| val % modulo == key).sum::()); + } + } + + fn correct_grouping_map_by_product_modulo_key(a: Vec, modulo: u8) -> () { + let modulo = Wrapping(if modulo == 0 { 1 } else { modulo } as u64); // Avoid `% 0` + let lookup = a.iter().map(|&b| Wrapping(b as u64)) // Avoid overflows + .into_grouping_map_by(|i| i % modulo) + .product(); + + let group_map_lookup = a.iter().map(|&b| Wrapping(b as u64)) + .map(|i| (i % modulo, i)) + .into_group_map() + .into_iter() + .map(|(key, vals)| (key, vals.into_iter().product::>())) + .collect::>(); + assert_eq!(lookup, group_map_lookup); + + for (&key, &prod) in lookup.iter() { + assert_eq!( + prod, + a.iter() + .map(|&b| Wrapping(b as u64)) + .filter(|&val| val % modulo == key) + .product::>() + ); + } + } + + // This should check that if multiple elements are equally minimum or maximum + // then `max`, `min` and `minmax` pick the first minimum and the last maximum. + // This is to be consistent with `std::iter::max` and `std::iter::min`. + fn correct_grouping_map_by_min_max_minmax_order_modulo_key() -> () { + use itertools::MinMaxResult; + + let lookup = (0..=10) + .into_grouping_map_by(|_| 0) + .max_by(|_, _, _| Ordering::Equal); + + assert_eq!(lookup[&0], 10); + + let lookup = (0..=10) + .into_grouping_map_by(|_| 0) + .min_by(|_, _, _| Ordering::Equal); + + assert_eq!(lookup[&0], 0); + + let lookup = (0..=10) + .into_grouping_map_by(|_| 0) + .minmax_by(|_, _, _| Ordering::Equal); + + assert_eq!(lookup[&0], MinMaxResult::MinMax(0, 10)); + } +} + +quickcheck! { + fn counts(nums: Vec) -> TestResult { + let counts = nums.iter().counts(); + for (&item, &count) in counts.iter() { + #[allow(clippy::absurd_extreme_comparisons)] + if count <= 0 { + return TestResult::failed(); + } + if count != nums.iter().filter(|&x| x == item).count() { + return TestResult::failed(); + } + } + for item in nums.iter() { + if !counts.contains_key(item) { + return TestResult::failed(); + } + } + TestResult::passed() + } +} + +quickcheck! { + fn test_double_ended_zip_2(a: Vec, b: Vec) -> TestResult { + let mut x = + multizip((a.clone().into_iter(), b.clone().into_iter())) + .collect_vec(); + x.reverse(); + + let y = + multizip((a.into_iter(), b.into_iter())) + .rfold(Vec::new(), |mut vec, e| { vec.push(e); vec }); + + TestResult::from_bool(itertools::equal(x, y)) + } + + fn test_double_ended_zip_3(a: Vec, b: Vec, c: Vec) -> TestResult { + let mut x = + multizip((a.clone().into_iter(), b.clone().into_iter(), c.clone().into_iter())) + .collect_vec(); + x.reverse(); + + let y = + multizip((a.into_iter(), b.into_iter(), c.into_iter())) + .rfold(Vec::new(), |mut vec, e| { vec.push(e); vec }); + + TestResult::from_bool(itertools::equal(x, y)) + } +} + +fn is_fused(mut it: I) -> bool { + for _ in it.by_ref() {} + for _ in 0..10 { + if it.next().is_some() { + return false; + } + } + true +} + +quickcheck! { + fn fused_combination(a: Iter) -> bool + { + is_fused(a.clone().combinations(1)) && + is_fused(a.combinations(3)) + } + + fn fused_combination_with_replacement(a: Iter) -> bool + { + is_fused(a.clone().combinations_with_replacement(1)) && + is_fused(a.combinations_with_replacement(3)) + } + + fn fused_tuple_combination(a: Iter) -> bool + { + is_fused(a.clone().fuse().tuple_combinations::<(_,)>()) && + is_fused(a.fuse().tuple_combinations::<(_,_,_)>()) + } + + fn fused_unique(a: Iter) -> bool + { + is_fused(a.fuse().unique()) + } + + fn fused_unique_by(a: Iter) -> bool + { + is_fused(a.fuse().unique_by(|x| x % 100)) + } + + fn fused_interleave_shortest(a: Iter, b: Iter) -> bool + { + !is_fused(a.clone().interleave_shortest(b.clone())) && + is_fused(a.fuse().interleave_shortest(b.fuse())) + } + + fn fused_product(a: Iter, b: Iter) -> bool + { + is_fused(a.fuse().cartesian_product(b.fuse())) + } + + fn fused_merge(a: Iter, b: Iter) -> bool + { + is_fused(a.fuse().merge(b.fuse())) + } + + fn fused_filter_ok(a: Iter) -> bool + { + is_fused(a.map(|x| if x % 2 == 0 {Ok(x)} else {Err(x)} ) + .filter_ok(|x| x % 3 == 0) + .fuse()) + } + + fn fused_filter_map_ok(a: Iter) -> bool + { + is_fused(a.map(|x| if x % 2 == 0 {Ok(x)} else {Err(x)} ) + .filter_map_ok(|x| if x % 3 == 0 {Some(x / 3)} else {None}) + .fuse()) + } + + fn fused_positions(a: Iter) -> bool + { + !is_fused(a.clone().positions(|x|x%2==0)) && + is_fused(a.fuse().positions(|x|x%2==0)) + } + + fn fused_update(a: Iter) -> bool + { + !is_fused(a.clone().update(|x|*x+=1)) && + is_fused(a.fuse().update(|x|*x+=1)) + } + + fn fused_tuple_windows(a: Iter) -> bool + { + is_fused(a.fuse().tuple_windows::<(_,_)>()) + } + + fn fused_pad_using(a: Iter) -> bool + { + is_fused(a.fuse().pad_using(100,|_|0)) + } +} + +quickcheck! { + fn min_set_contains_min(a: Vec<(usize, char)>) -> bool { + let result_set = a.iter().min_set(); + if let Some(result_element) = a.iter().min() { + result_set.contains(&result_element) + } else { + result_set.is_empty() + } + } + + fn min_set_by_contains_min(a: Vec<(usize, char)>) -> bool { + let compare = |x: &&(usize, char), y: &&(usize, char)| x.1.cmp(&y.1); + let result_set = a.iter().min_set_by(compare); + if let Some(result_element) = a.iter().min_by(compare) { + result_set.contains(&result_element) + } else { + result_set.is_empty() + } + } + + fn min_set_by_key_contains_min(a: Vec<(usize, char)>) -> bool { + let key = |x: &&(usize, char)| x.1; + let result_set = a.iter().min_set_by_key(&key); + if let Some(result_element) = a.iter().min_by_key(&key) { + result_set.contains(&result_element) + } else { + result_set.is_empty() + } + } + + fn max_set_contains_max(a: Vec<(usize, char)>) -> bool { + let result_set = a.iter().max_set(); + if let Some(result_element) = a.iter().max() { + result_set.contains(&result_element) + } else { + result_set.is_empty() + } + } + + fn max_set_by_contains_max(a: Vec<(usize, char)>) -> bool { + let compare = |x: &&(usize, char), y: &&(usize, char)| x.1.cmp(&y.1); + let result_set = a.iter().max_set_by(compare); + if let Some(result_element) = a.iter().max_by(compare) { + result_set.contains(&result_element) + } else { + result_set.is_empty() + } + } + + fn max_set_by_key_contains_max(a: Vec<(usize, char)>) -> bool { + let key = |x: &&(usize, char)| x.1; + let result_set = a.iter().max_set_by_key(&key); + if let Some(result_element) = a.iter().max_by_key(&key) { + result_set.contains(&result_element) + } else { + result_set.is_empty() + } + } + + fn tail(v: Vec, n: u8) -> bool { + let n = n as usize; + let result = &v[v.len().saturating_sub(n)..]; + itertools::equal(v.iter().tail(n), result) + && itertools::equal(v.iter().filter(|_| true).tail(n), result) + } +} diff --git a/vendor/itertools/tests/specializations.rs b/vendor/itertools/tests/specializations.rs new file mode 100644 index 00000000000000..71231147226beb --- /dev/null +++ b/vendor/itertools/tests/specializations.rs @@ -0,0 +1,582 @@ +#![allow(unstable_name_collisions)] + +use itertools::Itertools; +use quickcheck::Arbitrary; +use quickcheck::{quickcheck, TestResult}; +use rand::Rng; +use std::fmt::Debug; + +struct Unspecialized(I); + +impl Iterator for Unspecialized +where + I: Iterator, +{ + type Item = I::Item; + + #[inline(always)] + fn next(&mut self) -> Option { + self.0.next() + } +} + +impl DoubleEndedIterator for Unspecialized +where + I: DoubleEndedIterator, +{ + #[inline(always)] + fn next_back(&mut self) -> Option { + self.0.next_back() + } +} + +fn test_specializations(it: &I) +where + I::Item: Eq + Debug + Clone, + I: Iterator + Clone, +{ + macro_rules! check_specialized { + ($src:expr, |$it:pat| $closure:expr) => { + // Many iterators special-case the first elements, so we test specializations for iterators that have already been advanced. + let mut src = $src.clone(); + for _ in 0..5 { + let $it = src.clone(); + let v1 = $closure; + let $it = Unspecialized(src.clone()); + let v2 = $closure; + assert_eq!(v1, v2); + src.next(); + } + } + } + check_specialized!(it, |i| i.count()); + check_specialized!(it, |i| i.last()); + check_specialized!(it, |i| i.collect::>()); + check_specialized!(it, |i| { + let mut parameters_from_fold = vec![]; + let fold_result = i.fold(vec![], |mut acc, v: I::Item| { + parameters_from_fold.push((acc.clone(), v.clone())); + acc.push(v); + acc + }); + (parameters_from_fold, fold_result) + }); + check_specialized!(it, |mut i| { + let mut parameters_from_all = vec![]; + let first = i.next(); + let all_result = i.all(|x| { + parameters_from_all.push(x.clone()); + Some(x) == first + }); + (parameters_from_all, all_result) + }); + let size = it.clone().count(); + for n in 0..size + 2 { + check_specialized!(it, |mut i| i.nth(n)); + } + // size_hint is a bit harder to check + let mut it_sh = it.clone(); + for n in 0..size + 2 { + let len = it_sh.clone().count(); + let (min, max) = it_sh.size_hint(); + assert_eq!(size - n.min(size), len); + assert!(min <= len); + if let Some(max) = max { + assert!(len <= max); + } + it_sh.next(); + } +} + +fn test_double_ended_specializations(it: &I) +where + I::Item: Eq + Debug + Clone, + I: DoubleEndedIterator + Clone, +{ + macro_rules! check_specialized { + ($src:expr, |$it:pat| $closure:expr) => { + // Many iterators special-case the first elements, so we test specializations for iterators that have already been advanced. + let mut src = $src.clone(); + for step in 0..8 { + let $it = src.clone(); + let v1 = $closure; + let $it = Unspecialized(src.clone()); + let v2 = $closure; + assert_eq!(v1, v2); + if step % 2 == 0 { + src.next(); + } else { + src.next_back(); + } + } + } + } + check_specialized!(it, |i| { + let mut parameters_from_rfold = vec![]; + let rfold_result = i.rfold(vec![], |mut acc, v: I::Item| { + parameters_from_rfold.push((acc.clone(), v.clone())); + acc.push(v); + acc + }); + (parameters_from_rfold, rfold_result) + }); + let size = it.clone().count(); + for n in 0..size + 2 { + check_specialized!(it, |mut i| i.nth_back(n)); + } +} + +quickcheck! { + fn interleave(v: Vec, w: Vec) -> () { + test_specializations(&v.iter().interleave(w.iter())); + } + + fn interleave_shortest(v: Vec, w: Vec) -> () { + test_specializations(&v.iter().interleave_shortest(w.iter())); + } + + fn batching(v: Vec) -> () { + test_specializations(&v.iter().batching(Iterator::next)); + } + + fn tuple_windows(v: Vec) -> () { + test_specializations(&v.iter().tuple_windows::<(_,)>()); + test_specializations(&v.iter().tuple_windows::<(_, _)>()); + test_specializations(&v.iter().tuple_windows::<(_, _, _)>()); + } + + fn circular_tuple_windows(v: Vec) -> () { + test_specializations(&v.iter().circular_tuple_windows::<(_,)>()); + test_specializations(&v.iter().circular_tuple_windows::<(_, _)>()); + test_specializations(&v.iter().circular_tuple_windows::<(_, _, _)>()); + } + + fn tuples(v: Vec) -> () { + test_specializations(&v.iter().tuples::<(_,)>()); + test_specializations(&v.iter().tuples::<(_, _)>()); + test_specializations(&v.iter().tuples::<(_, _, _)>()); + } + + fn cartesian_product(a: Vec, b: Vec) -> TestResult { + if a.len() * b.len() > 100 { + return TestResult::discard(); + } + test_specializations(&a.iter().cartesian_product(&b)); + TestResult::passed() + } + + fn multi_cartesian_product(a: Vec, b: Vec, c: Vec) -> TestResult { + if a.len() * b.len() * c.len() > 100 { + return TestResult::discard(); + } + test_specializations(&vec![a, b, c].into_iter().multi_cartesian_product()); + TestResult::passed() + } + + fn coalesce(v: Vec) -> () { + test_specializations(&v.iter().coalesce(|x, y| if x == y { Ok(x) } else { Err((x, y)) })) + } + + fn dedup(v: Vec) -> () { + test_specializations(&v.iter().dedup()) + } + + fn dedup_by(v: Vec) -> () { + test_specializations(&v.iter().dedup_by(PartialOrd::ge)) + } + + fn dedup_with_count(v: Vec) -> () { + test_specializations(&v.iter().dedup_with_count()) + } + + fn dedup_by_with_count(v: Vec) -> () { + test_specializations(&v.iter().dedup_by_with_count(PartialOrd::ge)) + } + + fn duplicates(v: Vec) -> () { + let it = v.iter().duplicates(); + test_specializations(&it); + test_double_ended_specializations(&it); + } + + fn duplicates_by(v: Vec) -> () { + let it = v.iter().duplicates_by(|x| *x % 10); + test_specializations(&it); + test_double_ended_specializations(&it); + } + + fn unique(v: Vec) -> () { + let it = v.iter().unique(); + test_specializations(&it); + test_double_ended_specializations(&it); + } + + fn unique_by(v: Vec) -> () { + let it = v.iter().unique_by(|x| *x % 50); + test_specializations(&it); + test_double_ended_specializations(&it); + } + + fn take_while_inclusive(v: Vec) -> () { + test_specializations(&v.iter().copied().take_while_inclusive(|&x| x < 100)); + } + + fn while_some(v: Vec) -> () { + test_specializations(&v.iter().map(|&x| if x < 100 { Some(2 * x) } else { None }).while_some()); + } + + fn pad_using(v: Vec) -> () { + use std::convert::TryFrom; + let it = v.iter().copied().pad_using(10, |i| u8::try_from(5 * i).unwrap_or(u8::MAX)); + test_specializations(&it); + test_double_ended_specializations(&it); + } + + fn with_position(v: Vec) -> () { + test_specializations(&v.iter().with_position()); + } + + fn positions(v: Vec) -> () { + let it = v.iter().positions(|x| x % 5 == 0); + test_specializations(&it); + test_double_ended_specializations(&it); + } + + fn update(v: Vec) -> () { + let it = v.iter().copied().update(|x| *x = x.wrapping_mul(7)); + test_specializations(&it); + test_double_ended_specializations(&it); + } + + fn tuple_combinations(v: Vec) -> TestResult { + if v.len() > 10 { + return TestResult::discard(); + } + test_specializations(&v.iter().tuple_combinations::<(_,)>()); + test_specializations(&v.iter().tuple_combinations::<(_, _)>()); + test_specializations(&v.iter().tuple_combinations::<(_, _, _)>()); + TestResult::passed() + } + + fn intersperse(v: Vec) -> () { + test_specializations(&v.into_iter().intersperse(0)); + } + + fn intersperse_with(v: Vec) -> () { + test_specializations(&v.into_iter().intersperse_with(|| 0)); + } + + fn combinations(a: Vec, n: u8) -> TestResult { + if n > 3 || a.len() > 8 { + return TestResult::discard(); + } + test_specializations(&a.iter().combinations(n as usize)); + TestResult::passed() + } + + fn combinations_with_replacement(a: Vec, n: u8) -> TestResult { + if n > 3 || a.len() > 7 { + return TestResult::discard(); + } + test_specializations(&a.iter().combinations_with_replacement(n as usize)); + TestResult::passed() + } + + fn permutations(a: Vec, n: u8) -> TestResult { + if n > 3 || a.len() > 8 { + return TestResult::discard(); + } + test_specializations(&a.iter().permutations(n as usize)); + TestResult::passed() + } + + fn powerset(a: Vec) -> TestResult { + if a.len() > 6 { + return TestResult::discard(); + } + test_specializations(&a.iter().powerset()); + TestResult::passed() + } + + fn zip_longest(a: Vec, b: Vec) -> () { + let it = a.into_iter().zip_longest(b); + test_specializations(&it); + test_double_ended_specializations(&it); + } + + fn zip_eq(a: Vec) -> () { + test_specializations(&a.iter().zip_eq(a.iter().rev())) + } + + fn multizip(a: Vec) -> () { + let it = itertools::multizip((a.iter(), a.iter().rev(), a.iter().take(50))); + test_specializations(&it); + test_double_ended_specializations(&it); + } + + fn izip(a: Vec, b: Vec) -> () { + test_specializations(&itertools::izip!(b.iter(), a, b.iter().rev())); + } + + fn iproduct(a: Vec, b: Vec, c: Vec) -> TestResult { + if a.len() * b.len() * c.len() > 200 { + return TestResult::discard(); + } + test_specializations(&itertools::iproduct!(a, b.iter(), c)); + TestResult::passed() + } + + fn repeat_n(element: i8, n: u8) -> () { + let it = itertools::repeat_n(element, n as usize); + test_specializations(&it); + test_double_ended_specializations(&it); + } + + fn exactly_one_error(v: Vec) -> TestResult { + // Use `at_most_one` would be similar. + match v.iter().exactly_one() { + Ok(_) => TestResult::discard(), + Err(it) => { + test_specializations(&it); + TestResult::passed() + } + } + } +} + +quickcheck! { + fn put_back_qc(test_vec: Vec) -> () { + test_specializations(&itertools::put_back(test_vec.iter())); + let mut pb = itertools::put_back(test_vec.into_iter()); + pb.put_back(1); + test_specializations(&pb); + } + + fn put_back_n(v: Vec, n: u8) -> () { + let mut it = itertools::put_back_n(v); + for k in 0..n { + it.put_back(k); + } + test_specializations(&it); + } + + fn multipeek(v: Vec, n: u8) -> () { + let mut it = v.into_iter().multipeek(); + for _ in 0..n { + it.peek(); + } + test_specializations(&it); + } + + fn peek_nth_with_peek(v: Vec, n: u8) -> () { + let mut it = itertools::peek_nth(v); + for _ in 0..n { + it.peek(); + } + test_specializations(&it); + } + + fn peek_nth_with_peek_nth(v: Vec, n: u8) -> () { + let mut it = itertools::peek_nth(v); + it.peek_nth(n as usize); + test_specializations(&it); + } + + fn peek_nth_with_peek_mut(v: Vec, n: u8) -> () { + let mut it = itertools::peek_nth(v); + for _ in 0..n { + if let Some(x) = it.peek_mut() { + *x = x.wrapping_add(50); + } + } + test_specializations(&it); + } + + fn peek_nth_with_peek_nth_mut(v: Vec, n: u8) -> () { + let mut it = itertools::peek_nth(v); + if let Some(x) = it.peek_nth_mut(n as usize) { + *x = x.wrapping_add(50); + } + test_specializations(&it); + } +} + +quickcheck! { + fn merge(a: Vec, b: Vec) -> () { + test_specializations(&a.into_iter().merge(b)) + } + + fn merge_by(a: Vec, b: Vec) -> () { + test_specializations(&a.into_iter().merge_by(b, PartialOrd::ge)) + } + + fn merge_join_by_ordering(i1: Vec, i2: Vec) -> () { + test_specializations(&i1.into_iter().merge_join_by(i2, Ord::cmp)); + } + + fn merge_join_by_bool(i1: Vec, i2: Vec) -> () { + test_specializations(&i1.into_iter().merge_join_by(i2, PartialOrd::ge)); + } + + fn kmerge(a: Vec, b: Vec, c: Vec) -> () { + test_specializations(&vec![a, b, c] + .into_iter() + .map(|v| v.into_iter().sorted()) + .kmerge()); + } + + fn kmerge_by(a: Vec, b: Vec, c: Vec) -> () { + test_specializations(&vec![a, b, c] + .into_iter() + .map(|v| v.into_iter().sorted_by_key(|a| a.abs())) + .kmerge_by(|a, b| a.abs() < b.abs())); + } +} + +quickcheck! { + fn map_into(v: Vec) -> () { + let it = v.into_iter().map_into::(); + test_specializations(&it); + test_double_ended_specializations(&it); + } + + fn map_ok(v: Vec>) -> () { + let it = v.into_iter().map_ok(|u| u.checked_add(1)); + test_specializations(&it); + test_double_ended_specializations(&it); + } + + fn filter_ok(v: Vec>) -> () { + test_specializations(&v.into_iter().filter_ok(|&i| i < 20)); + } + + fn filter_map_ok(v: Vec>) -> () { + test_specializations(&v.into_iter().filter_map_ok(|i| if i < 20 { Some(i * 2) } else { None })); + } + + // `SmallIter2` because `Vec` is too slow and we get bad coverage from a singleton like Option + fn flatten_ok(v: Vec, char>>) -> () { + let it = v.into_iter().flatten_ok(); + test_specializations(&it); + test_double_ended_specializations(&it); + } +} + +quickcheck! { + // TODO Replace this function by a normal call to test_specializations + fn process_results(v: Vec>) -> () { + helper(v.iter().copied()); + helper(v.iter().copied().filter(Result::is_ok)); + + fn helper(it: impl DoubleEndedIterator> + Clone) { + macro_rules! check_results_specialized { + ($src:expr, |$it:pat| $closure:expr) => { + assert_eq!( + itertools::process_results($src.clone(), |$it| $closure), + itertools::process_results($src.clone(), |i| { + let $it = Unspecialized(i); + $closure + }), + ) + } + } + + check_results_specialized!(it, |i| i.count()); + check_results_specialized!(it, |i| i.last()); + check_results_specialized!(it, |i| i.collect::>()); + check_results_specialized!(it, |i| i.rev().collect::>()); + check_results_specialized!(it, |i| { + let mut parameters_from_fold = vec![]; + let fold_result = i.fold(vec![], |mut acc, v| { + parameters_from_fold.push((acc.clone(), v)); + acc.push(v); + acc + }); + (parameters_from_fold, fold_result) + }); + check_results_specialized!(it, |i| { + let mut parameters_from_rfold = vec![]; + let rfold_result = i.rfold(vec![], |mut acc, v| { + parameters_from_rfold.push((acc.clone(), v)); + acc.push(v); + acc + }); + (parameters_from_rfold, rfold_result) + }); + check_results_specialized!(it, |mut i| { + let mut parameters_from_all = vec![]; + let first = i.next(); + let all_result = i.all(|x| { + parameters_from_all.push(x); + Some(x)==first + }); + (parameters_from_all, all_result) + }); + let size = it.clone().count(); + for n in 0..size + 2 { + check_results_specialized!(it, |mut i| i.nth(n)); + } + for n in 0..size + 2 { + check_results_specialized!(it, |mut i| i.nth_back(n)); + } + } + } +} + +/// Like `VecIntoIter` with maximum 2 elements. +#[derive(Debug, Clone, Default)] +enum SmallIter2 { + #[default] + Zero, + One(T), + Two(T, T), +} + +impl Arbitrary for SmallIter2 { + fn arbitrary(g: &mut G) -> Self { + match g.gen_range(0u8, 3) { + 0 => Self::Zero, + 1 => Self::One(T::arbitrary(g)), + 2 => Self::Two(T::arbitrary(g), T::arbitrary(g)), + _ => unreachable!(), + } + } + // maybe implement shrink too, maybe not +} + +impl Iterator for SmallIter2 { + type Item = T; + + fn next(&mut self) -> Option { + match std::mem::take(self) { + Self::Zero => None, + Self::One(val) => Some(val), + Self::Two(val, second) => { + *self = Self::One(second); + Some(val) + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let len = match self { + Self::Zero => 0, + Self::One(_) => 1, + Self::Two(_, _) => 2, + }; + (len, Some(len)) + } +} + +impl DoubleEndedIterator for SmallIter2 { + fn next_back(&mut self) -> Option { + match std::mem::take(self) { + Self::Zero => None, + Self::One(val) => Some(val), + Self::Two(first, val) => { + *self = Self::One(first); + Some(val) + } + } + } +} diff --git a/vendor/itertools/tests/test_core.rs b/vendor/itertools/tests/test_core.rs new file mode 100644 index 00000000000000..32af246c017b6a --- /dev/null +++ b/vendor/itertools/tests/test_core.rs @@ -0,0 +1,374 @@ +//! Licensed under the Apache License, Version 2.0 +//! https://www.apache.org/licenses/LICENSE-2.0 or the MIT license +//! https://opensource.org/licenses/MIT, at your +//! option. This file may not be copied, modified, or distributed +//! except according to those terms. +#![no_std] +#![allow(deprecated)] + +use crate::it::chain; +use crate::it::free::put_back; +use crate::it::interleave; +use crate::it::intersperse; +use crate::it::intersperse_with; +use crate::it::iproduct; +use crate::it::izip; +use crate::it::multizip; +use crate::it::Itertools; +use core::iter; +use itertools as it; + +#[allow(dead_code)] +fn get_esi_then_esi(it: I) { + fn is_esi(_: impl ExactSizeIterator) {} + is_esi(it.clone().get(1..4)); + is_esi(it.clone().get(1..=4)); + is_esi(it.clone().get(1..)); + is_esi(it.clone().get(..4)); + is_esi(it.clone().get(..=4)); + is_esi(it.get(..)); +} + +#[allow(dead_code)] +fn get_dei_esi_then_dei_esi(it: I) { + fn is_dei_esi(_: impl DoubleEndedIterator + ExactSizeIterator) {} + is_dei_esi(it.clone().get(1..4)); + is_dei_esi(it.clone().get(1..=4)); + is_dei_esi(it.clone().get(1..)); + is_dei_esi(it.clone().get(..4)); + is_dei_esi(it.clone().get(..=4)); + is_dei_esi(it.get(..)); +} + +#[test] +fn get_1_max() { + let mut it = (0..5).get(1..=usize::MAX); + assert_eq!(it.next(), Some(1)); + assert_eq!(it.next_back(), Some(4)); +} + +#[test] +#[should_panic] +fn get_full_range_inclusive() { + let _it = (0..5).get(0..=usize::MAX); +} + +#[test] +fn product0() { + let mut prod = iproduct!(); + assert_eq!(prod.next(), Some(())); + assert!(prod.next().is_none()); +} + +#[test] +fn iproduct1() { + let s = "αβ"; + + let mut prod = iproduct!(s.chars()); + assert_eq!(prod.next(), Some(('α',))); + assert_eq!(prod.next(), Some(('β',))); + assert!(prod.next().is_none()); +} + +#[test] +fn product2() { + let s = "αβ"; + + let mut prod = iproduct!(s.chars(), 0..2); + assert!(prod.next() == Some(('α', 0))); + assert!(prod.next() == Some(('α', 1))); + assert!(prod.next() == Some(('β', 0))); + assert!(prod.next() == Some(('β', 1))); + assert!(prod.next().is_none()); +} + +#[test] +fn product_temporary() { + for (_x, _y, _z) in iproduct!( + [0, 1, 2].iter().cloned(), + [0, 1, 2].iter().cloned(), + [0, 1, 2].iter().cloned() + ) { + // ok + } +} + +#[test] +fn izip_macro() { + let mut zip = izip!(2..3); + assert!(zip.next() == Some(2)); + assert!(zip.next().is_none()); + + let mut zip = izip!(0..3, 0..2, 0..2i8); + for i in 0..2 { + assert!((i as usize, i, i as i8) == zip.next().unwrap()); + } + assert!(zip.next().is_none()); + + let xs: [isize; 0] = []; + let mut zip = izip!(0..3, 0..2, 0..2i8, &xs); + assert!(zip.next().is_none()); +} + +#[test] +fn izip2() { + let _zip1: iter::Zip<_, _> = izip!(1.., 2..); + let _zip2: iter::Zip<_, _> = izip!(1.., 2..,); +} + +#[test] +fn izip3() { + let mut zip: iter::Map, _> = izip!(0..3, 0..2, 0..2i8); + for i in 0..2 { + assert!((i as usize, i, i as i8) == zip.next().unwrap()); + } + assert!(zip.next().is_none()); +} + +#[test] +fn multizip3() { + let mut zip = multizip((0..3, 0..2, 0..2i8)); + for i in 0..2 { + assert!((i as usize, i, i as i8) == zip.next().unwrap()); + } + assert!(zip.next().is_none()); + + let xs: [isize; 0] = []; + let mut zip = multizip((0..3, 0..2, 0..2i8, xs.iter())); + assert!(zip.next().is_none()); + + for (_, _, _, _, _) in multizip((0..3, 0..2, xs.iter(), &xs, xs.to_vec())) { + /* test compiles */ + } +} + +#[test] +fn chain_macro() { + let mut chain = chain!(2..3); + assert!(chain.next() == Some(2)); + assert!(chain.next().is_none()); + + let mut chain = chain!(0..2, 2..3, 3..5i8); + for i in 0..5i8 { + assert_eq!(Some(i), chain.next()); + } + assert!(chain.next().is_none()); + + let mut chain = chain!(); + assert_eq!(chain.next(), Option::<()>::None); +} + +#[test] +fn chain2() { + let _ = chain!(1.., 2..); + let _ = chain!(1.., 2..,); +} + +#[test] +fn write_to() { + let xs = [7, 9, 8]; + let mut ys = [0; 5]; + let cnt = ys.iter_mut().set_from(xs.iter().copied()); + assert!(cnt == xs.len()); + assert!(ys == [7, 9, 8, 0, 0]); + + let cnt = ys.iter_mut().set_from(0..10); + assert!(cnt == ys.len()); + assert!(ys == [0, 1, 2, 3, 4]); +} + +#[test] +fn test_interleave() { + let xs: [u8; 0] = []; + let ys = [7u8, 9, 8, 10]; + let zs = [2u8, 77]; + let it = interleave(xs.iter(), ys.iter()); + it::assert_equal(it, ys.iter()); + + let rs = [7u8, 2, 9, 77, 8, 10]; + let it = interleave(ys.iter(), zs.iter()); + it::assert_equal(it, rs.iter()); +} + +#[test] +fn test_intersperse() { + let xs = [1u8, 2, 3]; + let ys = [1u8, 0, 2, 0, 3]; + let it = intersperse(&xs, &0); + it::assert_equal(it, ys.iter()); +} + +#[test] +fn test_intersperse_with() { + let xs = [1u8, 2, 3]; + let ys = [1u8, 10, 2, 10, 3]; + let i = 10; + let it = intersperse_with(&xs, || &i); + it::assert_equal(it, ys.iter()); +} + +#[test] +fn dropping() { + let xs = [1, 2, 3]; + let mut it = xs.iter().dropping(2); + assert_eq!(it.next(), Some(&3)); + assert!(it.next().is_none()); + let mut it = xs.iter().dropping(5); + assert!(it.next().is_none()); +} + +#[test] +fn batching() { + let xs = [0, 1, 2, 1, 3]; + let ys = [(0, 1), (2, 1)]; + + // An iterator that gathers elements up in pairs + let pit = xs + .iter() + .cloned() + .batching(|it| it.next().and_then(|x| it.next().map(|y| (x, y)))); + it::assert_equal(pit, ys.iter().cloned()); +} + +#[test] +fn test_put_back() { + let xs = [0, 1, 1, 1, 2, 1, 3, 3]; + let mut pb = put_back(xs.iter().cloned()); + pb.next(); + pb.put_back(1); + pb.put_back(0); + it::assert_equal(pb, xs.iter().cloned()); +} + +#[test] +fn merge() { + it::assert_equal((0..10).step_by(2).merge((1..10).step_by(2)), 0..10); +} + +#[test] +fn repeatn() { + let s = "α"; + let mut it = it::repeat_n(s, 3); + assert_eq!(it.len(), 3); + assert_eq!(it.next(), Some(s)); + assert_eq!(it.next(), Some(s)); + assert_eq!(it.next(), Some(s)); + assert_eq!(it.next(), None); + assert_eq!(it.next(), None); +} + +#[test] +fn count_clones() { + // Check that RepeatN only clones N - 1 times. + + use core::cell::Cell; + #[derive(PartialEq, Debug)] + struct Foo { + n: Cell, + } + + impl Clone for Foo { + fn clone(&self) -> Self { + let n = self.n.get(); + self.n.set(n + 1); + Self { + n: Cell::new(n + 1), + } + } + } + + for n in 0..10 { + let f = Foo { n: Cell::new(0) }; + let it = it::repeat_n(f, n); + // drain it + let last = it.last(); + if n == 0 { + assert_eq!(last, None); + } else { + assert_eq!( + last, + Some(Foo { + n: Cell::new(n - 1) + }) + ); + } + } +} + +#[test] +fn part() { + let mut data = [7, 1, 1, 9, 1, 1, 3]; + let i = it::partition(&mut data, |elt| *elt >= 3); + assert_eq!(i, 3); + assert_eq!(data, [7, 3, 9, 1, 1, 1, 1]); + + let i = it::partition(&mut data, |elt| *elt == 1); + assert_eq!(i, 4); + assert_eq!(data, [1, 1, 1, 1, 9, 3, 7]); + + let mut data = [1, 2, 3, 4, 5, 6, 7, 8, 9]; + let i = it::partition(&mut data, |elt| *elt % 3 == 0); + assert_eq!(i, 3); + assert_eq!(data, [9, 6, 3, 4, 5, 2, 7, 8, 1]); +} + +#[test] +fn tree_reduce() { + for i in 0..100 { + assert_eq!((0..i).tree_reduce(|x, y| x + y), (0..i).fold1(|x, y| x + y)); + } +} + +#[test] +fn exactly_one() { + assert_eq!((0..10).filter(|&x| x == 2).exactly_one().unwrap(), 2); + assert!((0..10) + .filter(|&x| x > 1 && x < 4) + .exactly_one() + .unwrap_err() + .eq(2..4)); + assert!((0..10) + .filter(|&x| x > 1 && x < 5) + .exactly_one() + .unwrap_err() + .eq(2..5)); + assert!((0..10) + .filter(|&_| false) + .exactly_one() + .unwrap_err() + .eq(0..0)); +} + +#[test] +fn at_most_one() { + assert_eq!((0..10).filter(|&x| x == 2).at_most_one().unwrap(), Some(2)); + assert!((0..10) + .filter(|&x| x > 1 && x < 4) + .at_most_one() + .unwrap_err() + .eq(2..4)); + assert!((0..10) + .filter(|&x| x > 1 && x < 5) + .at_most_one() + .unwrap_err() + .eq(2..5)); + assert_eq!((0..10).filter(|&_| false).at_most_one().unwrap(), None); +} + +#[test] +fn sum1() { + let v: &[i32] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + assert_eq!(v[..0].iter().cloned().sum1::(), None); + assert_eq!(v[1..2].iter().cloned().sum1::(), Some(1)); + assert_eq!(v[1..3].iter().cloned().sum1::(), Some(3)); + assert_eq!(v.iter().cloned().sum1::(), Some(55)); +} + +#[test] +fn product1() { + let v: &[i32] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + assert_eq!(v[..0].iter().cloned().product1::(), None); + assert_eq!(v[..1].iter().cloned().product1::(), Some(0)); + assert_eq!(v[1..3].iter().cloned().product1::(), Some(2)); + assert_eq!(v[1..5].iter().cloned().product1::(), Some(24)); +} diff --git a/vendor/itertools/tests/test_std.rs b/vendor/itertools/tests/test_std.rs new file mode 100644 index 00000000000000..00246d506dcfbb --- /dev/null +++ b/vendor/itertools/tests/test_std.rs @@ -0,0 +1,1523 @@ +#![allow(unstable_name_collisions)] + +use crate::it::cloned; +use crate::it::free::put_back_n; +use crate::it::free::rciter; +use crate::it::iproduct; +use crate::it::izip; +use crate::it::multipeek; +use crate::it::multizip; +use crate::it::peek_nth; +use crate::it::repeat_n; +use crate::it::ExactlyOneError; +use crate::it::FoldWhile; +use crate::it::Itertools; +use itertools as it; +use quickcheck as qc; +use rand::{ + distributions::{Distribution, Standard}, + rngs::StdRng, + Rng, SeedableRng, +}; +use rand::{seq::SliceRandom, thread_rng}; +use std::{cmp::min, fmt::Debug, marker::PhantomData}; + +#[test] +fn product3() { + let prod = iproduct!(0..3, 0..2, 0..2); + assert_eq!(prod.size_hint(), (12, Some(12))); + let v = prod.collect_vec(); + for i in 0..3 { + for j in 0..2 { + for k in 0..2 { + assert!((i, j, k) == v[(i * 2 * 2 + j * 2 + k) as usize]); + } + } + } + for (_, _, _, _) in iproduct!(0..3, 0..2, 0..2, 0..3) { /* test compiles */ } +} + +#[test] +fn interleave_shortest() { + let v0: Vec = vec![0, 2, 4]; + let v1: Vec = vec![1, 3, 5, 7]; + let it = v0.into_iter().interleave_shortest(v1); + assert_eq!(it.size_hint(), (6, Some(6))); + assert_eq!(it.collect_vec(), vec![0, 1, 2, 3, 4, 5]); + + let v0: Vec = vec![0, 2, 4, 6, 8]; + let v1: Vec = vec![1, 3, 5]; + let it = v0.into_iter().interleave_shortest(v1); + assert_eq!(it.size_hint(), (7, Some(7))); + assert_eq!(it.collect_vec(), vec![0, 1, 2, 3, 4, 5, 6]); + + let i0 = ::std::iter::repeat(0); + let v1: Vec<_> = vec![1, 3, 5]; + let it = i0.interleave_shortest(v1); + assert_eq!(it.size_hint(), (7, Some(7))); + + let v0: Vec<_> = vec![0, 2, 4]; + let i1 = ::std::iter::repeat(1); + let it = v0.into_iter().interleave_shortest(i1); + assert_eq!(it.size_hint(), (6, Some(6))); +} + +#[test] +fn duplicates_by() { + let xs = ["aaa", "bbbbb", "aa", "ccc", "bbbb", "aaaaa", "cccc"]; + let ys = ["aa", "bbbb", "cccc"]; + it::assert_equal(ys.iter(), xs.iter().duplicates_by(|x| x[..2].to_string())); + it::assert_equal( + ys.iter(), + xs.iter().rev().duplicates_by(|x| x[..2].to_string()).rev(), + ); + let ys_rev = ["ccc", "aa", "bbbbb"]; + it::assert_equal( + ys_rev.iter(), + xs.iter().duplicates_by(|x| x[..2].to_string()).rev(), + ); +} + +#[test] +fn duplicates() { + let xs = [0, 1, 2, 3, 2, 1, 3]; + let ys = [2, 1, 3]; + it::assert_equal(ys.iter(), xs.iter().duplicates()); + it::assert_equal(ys.iter(), xs.iter().rev().duplicates().rev()); + let ys_rev = [3, 2, 1]; + it::assert_equal(ys_rev.iter(), xs.iter().duplicates().rev()); + + let xs = [0, 1, 0, 1]; + let ys = [0, 1]; + it::assert_equal(ys.iter(), xs.iter().duplicates()); + it::assert_equal(ys.iter(), xs.iter().rev().duplicates().rev()); + let ys_rev = [1, 0]; + it::assert_equal(ys_rev.iter(), xs.iter().duplicates().rev()); + + let xs = [0, 1, 2, 1, 2]; + let ys = vec![1, 2]; + assert_eq!(ys, xs.iter().duplicates().cloned().collect_vec()); + assert_eq!( + ys, + xs.iter().rev().duplicates().rev().cloned().collect_vec() + ); + let ys_rev = vec![2, 1]; + assert_eq!(ys_rev, xs.iter().duplicates().rev().cloned().collect_vec()); +} + +#[test] +fn unique_by() { + let xs = ["aaa", "bbbbb", "aa", "ccc", "bbbb", "aaaaa", "cccc"]; + let ys = ["aaa", "bbbbb", "ccc"]; + it::assert_equal(ys.iter(), xs.iter().unique_by(|x| x[..2].to_string())); + it::assert_equal( + ys.iter(), + xs.iter().rev().unique_by(|x| x[..2].to_string()).rev(), + ); + let ys_rev = ["cccc", "aaaaa", "bbbb"]; + it::assert_equal( + ys_rev.iter(), + xs.iter().unique_by(|x| x[..2].to_string()).rev(), + ); +} + +#[test] +fn unique() { + let xs = [0, 1, 2, 3, 2, 1, 3]; + let ys = [0, 1, 2, 3]; + it::assert_equal(ys.iter(), xs.iter().unique()); + it::assert_equal(ys.iter(), xs.iter().rev().unique().rev()); + let ys_rev = [3, 1, 2, 0]; + it::assert_equal(ys_rev.iter(), xs.iter().unique().rev()); + + let xs = [0, 1]; + let ys = [0, 1]; + it::assert_equal(ys.iter(), xs.iter().unique()); + it::assert_equal(ys.iter(), xs.iter().rev().unique().rev()); + let ys_rev = [1, 0]; + it::assert_equal(ys_rev.iter(), xs.iter().unique().rev()); +} + +#[test] +fn intersperse() { + let xs = ["a", "", "b", "c"]; + let v: Vec<&str> = xs.iter().cloned().intersperse(", ").collect(); + let text: String = v.concat(); + assert_eq!(text, "a, , b, c".to_string()); + + let ys = [0, 1, 2, 3]; + let mut it = ys[..0].iter().copied().intersperse(1); + assert!(it.next().is_none()); +} + +#[test] +fn dedup() { + let xs = [0, 1, 1, 1, 2, 1, 3, 3]; + let ys = [0, 1, 2, 1, 3]; + it::assert_equal(ys.iter(), xs.iter().dedup()); + let xs = [0, 0, 0, 0, 0]; + let ys = [0]; + it::assert_equal(ys.iter(), xs.iter().dedup()); + + let xs = [0, 1, 1, 1, 2, 1, 3, 3]; + let ys = [0, 1, 2, 1, 3]; + let mut xs_d = Vec::new(); + xs.iter().dedup().fold((), |(), &elt| xs_d.push(elt)); + assert_eq!(&xs_d, &ys); +} + +#[test] +fn coalesce() { + let data = [-1., -2., -3., 3., 1., 0., -1.]; + let it = data.iter().cloned().coalesce(|x, y| { + if (x >= 0.) == (y >= 0.) { + Ok(x + y) + } else { + Err((x, y)) + } + }); + itertools::assert_equal(it.clone(), vec![-6., 4., -1.]); + assert_eq!( + it.fold(vec![], |mut v, n| { + v.push(n); + v + }), + vec![-6., 4., -1.] + ); +} + +#[test] +fn dedup_by() { + let xs = [ + (0, 0), + (0, 1), + (1, 1), + (2, 1), + (0, 2), + (3, 1), + (0, 3), + (1, 3), + ]; + let ys = [(0, 0), (0, 1), (0, 2), (3, 1), (0, 3)]; + it::assert_equal(ys.iter(), xs.iter().dedup_by(|x, y| x.1 == y.1)); + let xs = [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)]; + let ys = [(0, 1)]; + it::assert_equal(ys.iter(), xs.iter().dedup_by(|x, y| x.0 == y.0)); + + let xs = [ + (0, 0), + (0, 1), + (1, 1), + (2, 1), + (0, 2), + (3, 1), + (0, 3), + (1, 3), + ]; + let ys = [(0, 0), (0, 1), (0, 2), (3, 1), (0, 3)]; + let mut xs_d = Vec::new(); + xs.iter() + .dedup_by(|x, y| x.1 == y.1) + .fold((), |(), &elt| xs_d.push(elt)); + assert_eq!(&xs_d, &ys); +} + +#[test] +fn dedup_with_count() { + let xs: [i32; 8] = [0, 1, 1, 1, 2, 1, 3, 3]; + let ys: [(usize, &i32); 5] = [(1, &0), (3, &1), (1, &2), (1, &1), (2, &3)]; + + it::assert_equal(ys.iter().cloned(), xs.iter().dedup_with_count()); + + let xs: [i32; 5] = [0, 0, 0, 0, 0]; + let ys: [(usize, &i32); 1] = [(5, &0)]; + + it::assert_equal(ys.iter().cloned(), xs.iter().dedup_with_count()); +} + +#[test] +fn dedup_by_with_count() { + let xs = [ + (0, 0), + (0, 1), + (1, 1), + (2, 1), + (0, 2), + (3, 1), + (0, 3), + (1, 3), + ]; + let ys = [ + (1, &(0, 0)), + (3, &(0, 1)), + (1, &(0, 2)), + (1, &(3, 1)), + (2, &(0, 3)), + ]; + + it::assert_equal( + ys.iter().cloned(), + xs.iter().dedup_by_with_count(|x, y| x.1 == y.1), + ); + + let xs = [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)]; + let ys = [(5, &(0, 1))]; + + it::assert_equal( + ys.iter().cloned(), + xs.iter().dedup_by_with_count(|x, y| x.0 == y.0), + ); +} + +#[test] +fn all_equal() { + assert!("".chars().all_equal()); + assert!("A".chars().all_equal()); + assert!(!"AABBCCC".chars().all_equal()); + assert!("AAAAAAA".chars().all_equal()); + for (_key, mut sub) in &"AABBCCC".chars().chunk_by(|&x| x) { + assert!(sub.all_equal()); + } +} + +#[test] +fn all_equal_value() { + assert_eq!("".chars().all_equal_value(), Err(None)); + assert_eq!("A".chars().all_equal_value(), Ok('A')); + assert_eq!("AABBCCC".chars().all_equal_value(), Err(Some(('A', 'B')))); + assert_eq!("AAAAAAA".chars().all_equal_value(), Ok('A')); + { + let mut it = [1, 2, 3].iter().copied(); + let result = it.all_equal_value(); + assert_eq!(result, Err(Some((1, 2)))); + let remaining = it.next(); + assert_eq!(remaining, Some(3)); + assert!(it.next().is_none()); + } +} + +#[test] +fn all_unique() { + assert!("ABCDEFGH".chars().all_unique()); + assert!(!"ABCDEFGA".chars().all_unique()); + assert!(::std::iter::empty::().all_unique()); +} + +#[test] +fn test_put_back_n() { + let xs = [0, 1, 1, 1, 2, 1, 3, 3]; + let mut pb = put_back_n(xs.iter().cloned()); + pb.next(); + pb.next(); + pb.put_back(1); + pb.put_back(0); + it::assert_equal(pb, xs.iter().cloned()); +} + +#[test] +fn tee() { + let xs = [0, 1, 2, 3]; + let (mut t1, mut t2) = xs.iter().cloned().tee(); + assert_eq!(t1.next(), Some(0)); + assert_eq!(t2.next(), Some(0)); + assert_eq!(t1.next(), Some(1)); + assert_eq!(t1.next(), Some(2)); + assert_eq!(t1.next(), Some(3)); + assert_eq!(t1.next(), None); + assert_eq!(t2.next(), Some(1)); + assert_eq!(t2.next(), Some(2)); + assert_eq!(t1.next(), None); + assert_eq!(t2.next(), Some(3)); + assert_eq!(t2.next(), None); + assert_eq!(t1.next(), None); + assert_eq!(t2.next(), None); + + let (t1, t2) = xs.iter().cloned().tee(); + it::assert_equal(t1, xs.iter().cloned()); + it::assert_equal(t2, xs.iter().cloned()); + + let (t1, t2) = xs.iter().cloned().tee(); + it::assert_equal(t1.zip(t2), xs.iter().cloned().zip(xs.iter().cloned())); +} + +#[test] +fn test_rciter() { + let xs = [0, 1, 1, 1, 2, 1, 3, 5, 6]; + + let mut r1 = rciter(xs.iter().cloned()); + let mut r2 = r1.clone(); + assert_eq!(r1.next(), Some(0)); + assert_eq!(r2.next(), Some(1)); + let mut z = r1.zip(r2); + assert_eq!(z.next(), Some((1, 1))); + assert_eq!(z.next(), Some((2, 1))); + assert_eq!(z.next(), Some((3, 5))); + assert_eq!(z.next(), None); + + // test intoiterator + let r1 = rciter(0..5); + let mut z = izip!(&r1, r1); + assert_eq!(z.next(), Some((0, 1))); +} + +#[test] +fn trait_pointers() { + struct ByRef<'r, I: ?Sized>(&'r mut I); + + impl<'r, X, I> Iterator for ByRef<'r, I> + where + I: ?Sized + 'r + Iterator, + { + type Item = X; + fn next(&mut self) -> Option { + self.0.next() + } + } + + let mut it = Box::new(0..10) as Box>; + assert_eq!(it.next(), Some(0)); + + { + let jt: &mut dyn Iterator = &mut *it; + assert_eq!(jt.next(), Some(1)); + + { + let mut r = ByRef(jt); + assert_eq!(r.next(), Some(2)); + } + + assert_eq!(jt.find_position(|x| *x == 4), Some((1, 4))); + jt.for_each(|_| ()); + } +} + +#[test] +fn merge_by() { + let odd: Vec<(u32, &str)> = vec![(1, "hello"), (3, "world"), (5, "!")]; + let even = [(2, "foo"), (4, "bar"), (6, "baz")]; + let expected = [ + (1, "hello"), + (2, "foo"), + (3, "world"), + (4, "bar"), + (5, "!"), + (6, "baz"), + ]; + let results = odd.iter().merge_by(even.iter(), |a, b| a.0 <= b.0); + it::assert_equal(results, expected.iter()); +} + +#[test] +fn merge_by_btree() { + use std::collections::BTreeMap; + let mut bt1 = BTreeMap::new(); + bt1.insert("hello", 1); + bt1.insert("world", 3); + let mut bt2 = BTreeMap::new(); + bt2.insert("foo", 2); + bt2.insert("bar", 4); + let results = bt1.into_iter().merge_by(bt2, |a, b| a.0 <= b.0); + let expected = vec![("bar", 4), ("foo", 2), ("hello", 1), ("world", 3)]; + it::assert_equal(results, expected); +} + +#[test] +fn kmerge() { + let its = (0..4).map(|s| (s..10).step_by(4)); + + it::assert_equal(its.kmerge(), 0..10); +} + +#[test] +fn kmerge_2() { + let its = vec![3, 2, 1, 0].into_iter().map(|s| (s..10).step_by(4)); + + it::assert_equal(its.kmerge(), 0..10); +} + +#[test] +fn kmerge_empty() { + let its = (0..4).map(|_| 0..0); + assert_eq!(its.kmerge().next(), None); +} + +#[test] +fn kmerge_size_hint() { + let its = (0..5).map(|_| (0..10)); + assert_eq!(its.kmerge().size_hint(), (50, Some(50))); +} + +#[test] +fn kmerge_empty_size_hint() { + let its = (0..5).map(|_| (0..0)); + assert_eq!(its.kmerge().size_hint(), (0, Some(0))); +} + +#[test] +fn join() { + let many = [1, 2, 3]; + let one = [1]; + let none: Vec = vec![]; + + assert_eq!(many.iter().join(", "), "1, 2, 3"); + assert_eq!(one.iter().join(", "), "1"); + assert_eq!(none.iter().join(", "), ""); +} + +#[test] +fn sorted_unstable_by() { + let sc = [3, 4, 1, 2].iter().cloned().sorted_by(|&a, &b| a.cmp(&b)); + it::assert_equal(sc, vec![1, 2, 3, 4]); + + let v = (0..5).sorted_unstable_by(|&a, &b| a.cmp(&b).reverse()); + it::assert_equal(v, vec![4, 3, 2, 1, 0]); +} + +#[test] +fn sorted_unstable_by_key() { + let sc = [3, 4, 1, 2].iter().cloned().sorted_unstable_by_key(|&x| x); + it::assert_equal(sc, vec![1, 2, 3, 4]); + + let v = (0..5).sorted_unstable_by_key(|&x| -x); + it::assert_equal(v, vec![4, 3, 2, 1, 0]); +} + +#[test] +fn sorted_by() { + let sc = [3, 4, 1, 2].iter().cloned().sorted_by(|&a, &b| a.cmp(&b)); + it::assert_equal(sc, vec![1, 2, 3, 4]); + + let v = (0..5).sorted_by(|&a, &b| a.cmp(&b).reverse()); + it::assert_equal(v, vec![4, 3, 2, 1, 0]); +} + +qc::quickcheck! { + fn k_smallest_range(n: i64, m: u16, k: u16) -> () { + // u16 is used to constrain k and m to 0..2¹⁶, + // otherwise the test could use too much memory. + let (k, m) = (k as usize, m as u64); + + let mut v: Vec<_> = (n..n.saturating_add(m as _)).collect(); + // Generate a random permutation of n..n+m + v.shuffle(&mut thread_rng()); + + // Construct the right answers for the top and bottom elements + let mut sorted = v.clone(); + sorted.sort(); + // how many elements are we checking + let num_elements = min(k, m as _); + + // Compute the top and bottom k in various combinations + let sorted_smallest = sorted[..num_elements].iter().cloned(); + let smallest = v.iter().cloned().k_smallest(k); + let smallest_by = v.iter().cloned().k_smallest_by(k, Ord::cmp); + let smallest_by_key = v.iter().cloned().k_smallest_by_key(k, |&x| x); + + let sorted_largest = sorted[sorted.len() - num_elements..].iter().rev().cloned(); + let largest = v.iter().cloned().k_largest(k); + let largest_by = v.iter().cloned().k_largest_by(k, Ord::cmp); + let largest_by_key = v.iter().cloned().k_largest_by_key(k, |&x| x); + + // Check the variations produce the same answers and that they're right + it::assert_equal(smallest, sorted_smallest.clone()); + it::assert_equal(smallest_by, sorted_smallest.clone()); + it::assert_equal(smallest_by_key, sorted_smallest); + + it::assert_equal(largest, sorted_largest.clone()); + it::assert_equal(largest_by, sorted_largest.clone()); + it::assert_equal(largest_by_key, sorted_largest); + } +} + +#[derive(Clone, Debug)] +struct RandIter { + idx: usize, + len: usize, + rng: R, + _t: PhantomData, +} + +impl Iterator for RandIter +where + Standard: Distribution, +{ + type Item = T; + fn next(&mut self) -> Option { + if self.idx == self.len { + None + } else { + self.idx += 1; + Some(self.rng.gen()) + } + } +} + +impl qc::Arbitrary for RandIter { + fn arbitrary(g: &mut G) -> Self { + Self { + idx: 0, + len: g.size(), + rng: R::seed_from_u64(g.next_u64()), + _t: PhantomData {}, + } + } +} + +// Check that taking the k smallest is the same as +// sorting then taking the k first elements +fn k_smallest_sort(i: I, k: u16) +where + I: Iterator + Clone, + I::Item: Ord + Debug, +{ + let j = i.clone(); + let k = k as usize; + it::assert_equal(i.k_smallest(k), j.sorted().take(k)) +} + +// Similar to `k_smallest_sort` but for our custom heap implementation. +fn k_smallest_by_sort(i: I, k: u16) +where + I: Iterator + Clone, + I::Item: Ord + Debug, +{ + let j = i.clone(); + let k = k as usize; + it::assert_equal(i.k_smallest_by(k, Ord::cmp), j.sorted().take(k)) +} + +macro_rules! generic_test { + ($f:ident, $($t:ty),+) => { + $(paste::item! { + qc::quickcheck! { + fn [< $f _ $t >](i: RandIter<$t>, k: u16) -> () { + $f(i, k) + } + } + })+ + }; +} + +generic_test!(k_smallest_sort, u8, u16, u32, u64, i8, i16, i32, i64); +generic_test!(k_smallest_by_sort, u8, u16, u32, u64, i8, i16, i32, i64); + +#[test] +fn sorted_by_key() { + let sc = [3, 4, 1, 2].iter().cloned().sorted_by_key(|&x| x); + it::assert_equal(sc, vec![1, 2, 3, 4]); + + let v = (0..5).sorted_by_key(|&x| -x); + it::assert_equal(v, vec![4, 3, 2, 1, 0]); +} + +#[test] +fn sorted_by_cached_key() { + // Track calls to key function + let mut ncalls = 0; + + let sorted = [3, 4, 1, 2].iter().cloned().sorted_by_cached_key(|&x| { + ncalls += 1; + x.to_string() + }); + it::assert_equal(sorted, vec![1, 2, 3, 4]); + // Check key function called once per element + assert_eq!(ncalls, 4); + + let mut ncalls = 0; + + let sorted = (0..5).sorted_by_cached_key(|&x| { + ncalls += 1; + -x + }); + it::assert_equal(sorted, vec![4, 3, 2, 1, 0]); + // Check key function called once per element + assert_eq!(ncalls, 5); +} + +#[test] +fn test_multipeek() { + let nums = vec![1u8, 2, 3, 4, 5]; + + let mp = multipeek(nums.iter().copied()); + assert_eq!(nums, mp.collect::>()); + + let mut mp = multipeek(nums.iter().copied()); + assert_eq!(mp.peek(), Some(&1)); + assert_eq!(mp.next(), Some(1)); + assert_eq!(mp.peek(), Some(&2)); + assert_eq!(mp.peek(), Some(&3)); + assert_eq!(mp.next(), Some(2)); + assert_eq!(mp.peek(), Some(&3)); + assert_eq!(mp.peek(), Some(&4)); + assert_eq!(mp.peek(), Some(&5)); + assert_eq!(mp.peek(), None); + assert_eq!(mp.next(), Some(3)); + assert_eq!(mp.next(), Some(4)); + assert_eq!(mp.peek(), Some(&5)); + assert_eq!(mp.peek(), None); + assert_eq!(mp.next(), Some(5)); + assert_eq!(mp.next(), None); + assert_eq!(mp.peek(), None); +} + +#[test] +fn test_multipeek_reset() { + let data = [1, 2, 3, 4]; + + let mut mp = multipeek(cloned(&data)); + assert_eq!(mp.peek(), Some(&1)); + assert_eq!(mp.next(), Some(1)); + assert_eq!(mp.peek(), Some(&2)); + assert_eq!(mp.peek(), Some(&3)); + mp.reset_peek(); + assert_eq!(mp.peek(), Some(&2)); + assert_eq!(mp.next(), Some(2)); +} + +#[test] +fn test_multipeek_peeking_next() { + use crate::it::PeekingNext; + let nums = [1u8, 2, 3, 4, 5, 6, 7]; + + let mut mp = multipeek(nums.iter().copied()); + assert_eq!(mp.peeking_next(|&x| x != 0), Some(1)); + assert_eq!(mp.next(), Some(2)); + assert_eq!(mp.peek(), Some(&3)); + assert_eq!(mp.peek(), Some(&4)); + assert_eq!(mp.peeking_next(|&x| x == 3), Some(3)); + assert_eq!(mp.peek(), Some(&4)); + assert_eq!(mp.peeking_next(|&x| x != 4), None); + assert_eq!(mp.peeking_next(|&x| x == 4), Some(4)); + assert_eq!(mp.peek(), Some(&5)); + assert_eq!(mp.peek(), Some(&6)); + assert_eq!(mp.peeking_next(|&x| x != 5), None); + assert_eq!(mp.peek(), Some(&7)); + assert_eq!(mp.peeking_next(|&x| x == 5), Some(5)); + assert_eq!(mp.peeking_next(|&x| x == 6), Some(6)); + assert_eq!(mp.peek(), Some(&7)); + assert_eq!(mp.peek(), None); + assert_eq!(mp.next(), Some(7)); + assert_eq!(mp.peek(), None); +} + +#[test] +fn test_repeat_n_peeking_next() { + use crate::it::PeekingNext; + let mut rn = repeat_n(0, 5); + assert_eq!(rn.peeking_next(|&x| x != 0), None); + assert_eq!(rn.peeking_next(|&x| x <= 0), Some(0)); + assert_eq!(rn.next(), Some(0)); + assert_eq!(rn.peeking_next(|&x| x <= 0), Some(0)); + assert_eq!(rn.peeking_next(|&x| x != 0), None); + assert_eq!(rn.peeking_next(|&x| x >= 0), Some(0)); + assert_eq!(rn.next(), Some(0)); + assert_eq!(rn.peeking_next(|&x| x <= 0), None); + assert_eq!(rn.next(), None); +} + +#[test] +fn test_peek_nth() { + let nums = vec![1u8, 2, 3, 4, 5]; + + let iter = peek_nth(nums.iter().copied()); + assert_eq!(nums, iter.collect::>()); + + let mut iter = peek_nth(nums.iter().copied()); + + assert_eq!(iter.peek_nth(0), Some(&1)); + assert_eq!(iter.peek_nth(0), Some(&1)); + assert_eq!(iter.next(), Some(1)); + + assert_eq!(iter.peek_nth(0), Some(&2)); + assert_eq!(iter.peek_nth(1), Some(&3)); + assert_eq!(iter.next(), Some(2)); + + assert_eq!(iter.peek_nth(0), Some(&3)); + assert_eq!(iter.peek_nth(1), Some(&4)); + assert_eq!(iter.peek_nth(2), Some(&5)); + assert_eq!(iter.peek_nth(3), None); + + assert_eq!(iter.next(), Some(3)); + assert_eq!(iter.next(), Some(4)); + + assert_eq!(iter.peek_nth(0), Some(&5)); + assert_eq!(iter.peek_nth(1), None); + assert_eq!(iter.next(), Some(5)); + assert_eq!(iter.next(), None); + + assert_eq!(iter.peek_nth(0), None); + assert_eq!(iter.peek_nth(1), None); +} + +#[test] +fn test_peek_nth_peeking_next() { + use it::PeekingNext; + let nums = [1u8, 2, 3, 4, 5, 6, 7]; + let mut iter = peek_nth(nums.iter().copied()); + + assert_eq!(iter.peeking_next(|&x| x != 0), Some(1)); + assert_eq!(iter.next(), Some(2)); + + assert_eq!(iter.peek_nth(0), Some(&3)); + assert_eq!(iter.peek_nth(1), Some(&4)); + assert_eq!(iter.peeking_next(|&x| x == 3), Some(3)); + assert_eq!(iter.peek(), Some(&4)); + + assert_eq!(iter.peeking_next(|&x| x != 4), None); + assert_eq!(iter.peeking_next(|&x| x == 4), Some(4)); + assert_eq!(iter.peek_nth(0), Some(&5)); + assert_eq!(iter.peek_nth(1), Some(&6)); + + assert_eq!(iter.peeking_next(|&x| x != 5), None); + assert_eq!(iter.peek(), Some(&5)); + + assert_eq!(iter.peeking_next(|&x| x == 5), Some(5)); + assert_eq!(iter.peeking_next(|&x| x == 6), Some(6)); + assert_eq!(iter.peek_nth(0), Some(&7)); + assert_eq!(iter.peek_nth(1), None); + assert_eq!(iter.next(), Some(7)); + assert_eq!(iter.peek(), None); +} + +#[test] +fn test_peek_nth_next_if() { + let nums = [1u8, 2, 3, 4, 5, 6, 7]; + let mut iter = peek_nth(nums.iter().copied()); + + assert_eq!(iter.next_if(|&x| x != 0), Some(1)); + assert_eq!(iter.next(), Some(2)); + + assert_eq!(iter.peek_nth(0), Some(&3)); + assert_eq!(iter.peek_nth(1), Some(&4)); + assert_eq!(iter.next_if_eq(&3), Some(3)); + assert_eq!(iter.peek(), Some(&4)); + + assert_eq!(iter.next_if(|&x| x != 4), None); + assert_eq!(iter.next_if_eq(&4), Some(4)); + assert_eq!(iter.peek_nth(0), Some(&5)); + assert_eq!(iter.peek_nth(1), Some(&6)); + + assert_eq!(iter.next_if(|&x| x != 5), None); + assert_eq!(iter.peek(), Some(&5)); + + assert_eq!(iter.next_if(|&x| x % 2 == 1), Some(5)); + assert_eq!(iter.next_if_eq(&6), Some(6)); + assert_eq!(iter.peek_nth(0), Some(&7)); + assert_eq!(iter.peek_nth(1), None); + assert_eq!(iter.next(), Some(7)); + assert_eq!(iter.peek(), None); +} + +#[test] +fn pad_using() { + it::assert_equal((0..0).pad_using(1, |_| 1), 1..2); + + let v: Vec = vec![0, 1, 2]; + let r = v.into_iter().pad_using(5, |n| n); + it::assert_equal(r, vec![0, 1, 2, 3, 4]); + + let v: Vec = vec![0, 1, 2]; + let r = v.into_iter().pad_using(1, |_| panic!()); + it::assert_equal(r, vec![0, 1, 2]); +} + +#[test] +fn chunk_by() { + for (ch1, sub) in &"AABBCCC".chars().chunk_by(|&x| x) { + for ch2 in sub { + assert_eq!(ch1, ch2); + } + } + + for (ch1, sub) in &"AAABBBCCCCDDDD".chars().chunk_by(|&x| x) { + for ch2 in sub { + assert_eq!(ch1, ch2); + if ch1 == 'C' { + break; + } + } + } + + let toupper = |ch: &char| ch.to_uppercase().next().unwrap(); + + // try all possible orderings + for indices in permutohedron::Heap::new(&mut [0, 1, 2, 3]) { + let chunks = "AaaBbbccCcDDDD".chars().chunk_by(&toupper); + let mut subs = chunks.into_iter().collect_vec(); + + for &idx in &indices[..] { + let (key, text) = match idx { + 0 => ('A', "Aaa".chars()), + 1 => ('B', "Bbb".chars()), + 2 => ('C', "ccCc".chars()), + 3 => ('D', "DDDD".chars()), + _ => unreachable!(), + }; + assert_eq!(key, subs[idx].0); + it::assert_equal(&mut subs[idx].1, text); + } + } + + let chunks = "AAABBBCCCCDDDD".chars().chunk_by(|&x| x); + let mut subs = chunks.into_iter().map(|(_, g)| g).collect_vec(); + + let sd = subs.pop().unwrap(); + let sc = subs.pop().unwrap(); + let sb = subs.pop().unwrap(); + let sa = subs.pop().unwrap(); + for (a, b, c, d) in multizip((sa, sb, sc, sd)) { + assert_eq!(a, 'A'); + assert_eq!(b, 'B'); + assert_eq!(c, 'C'); + assert_eq!(d, 'D'); + } + + // check that the key closure is called exactly n times + { + let mut ntimes = 0; + let text = "AABCCC"; + for (_, sub) in &text.chars().chunk_by(|&x| { + ntimes += 1; + x + }) { + for _ in sub {} + } + assert_eq!(ntimes, text.len()); + } + + { + let mut ntimes = 0; + let text = "AABCCC"; + for _ in &text.chars().chunk_by(|&x| { + ntimes += 1; + x + }) {} + assert_eq!(ntimes, text.len()); + } + + { + let text = "ABCCCDEEFGHIJJKK"; + let gr = text.chars().chunk_by(|&x| x); + it::assert_equal(gr.into_iter().flat_map(|(_, sub)| sub), text.chars()); + } +} + +#[test] +fn chunk_by_lazy_2() { + let data = [0, 1]; + let chunks = data.iter().chunk_by(|k| *k); + let gs = chunks.into_iter().collect_vec(); + it::assert_equal(data.iter(), gs.into_iter().flat_map(|(_k, g)| g)); + + let data = [0, 1, 1, 0, 0]; + let chunks = data.iter().chunk_by(|k| *k); + let mut gs = chunks.into_iter().collect_vec(); + gs[1..].reverse(); + it::assert_equal(&[0, 0, 0, 1, 1], gs.into_iter().flat_map(|(_, g)| g)); + + let grouper = data.iter().chunk_by(|k| *k); + let mut chunks = Vec::new(); + for (k, chunk) in &grouper { + if *k == 1 { + chunks.push(chunk); + } + } + it::assert_equal(&mut chunks[0], &[1, 1]); + + let data = [0, 0, 0, 1, 1, 0, 0, 2, 2, 3, 3]; + let grouper = data.iter().chunk_by(|k| *k); + let mut chunks = Vec::new(); + for (i, (_, chunk)) in grouper.into_iter().enumerate() { + if i < 2 { + chunks.push(chunk); + } else if i < 4 { + for _ in chunk {} + } else { + chunks.push(chunk); + } + } + it::assert_equal(&mut chunks[0], &[0, 0, 0]); + it::assert_equal(&mut chunks[1], &[1, 1]); + it::assert_equal(&mut chunks[2], &[3, 3]); + + let data = [0, 0, 0, 1, 1, 0, 0, 2, 2, 3, 3]; + let mut i = 0; + let grouper = data.iter().chunk_by(move |_| { + let k = i / 3; + i += 1; + k + }); + for (i, chunk) in &grouper { + match i { + 0 => it::assert_equal(chunk, &[0, 0, 0]), + 1 => it::assert_equal(chunk, &[1, 1, 0]), + 2 => it::assert_equal(chunk, &[0, 2, 2]), + 3 => it::assert_equal(chunk, &[3, 3]), + _ => unreachable!(), + } + } +} + +#[test] +fn chunk_by_lazy_3() { + // test consuming each chunk on the lap after it was produced + let data = [0, 0, 0, 1, 1, 0, 0, 1, 1, 2, 2]; + let grouper = data.iter().chunk_by(|elt| *elt); + let mut last = None; + for (key, chunk) in &grouper { + if let Some(gr) = last.take() { + for elt in gr { + assert!(elt != key && i32::abs(elt - key) == 1); + } + } + last = Some(chunk); + } +} + +#[test] +fn chunks() { + let data = [0, 0, 0, 1, 1, 0, 0, 2, 2, 3, 3]; + let grouper = data.iter().chunks(3); + for (i, chunk) in grouper.into_iter().enumerate() { + match i { + 0 => it::assert_equal(chunk, &[0, 0, 0]), + 1 => it::assert_equal(chunk, &[1, 1, 0]), + 2 => it::assert_equal(chunk, &[0, 2, 2]), + 3 => it::assert_equal(chunk, &[3, 3]), + _ => unreachable!(), + } + } +} + +#[test] +fn concat_empty() { + let data: Vec> = Vec::new(); + assert_eq!(data.into_iter().concat(), Vec::new()) +} + +#[test] +fn concat_non_empty() { + let data = vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]; + assert_eq!(data.into_iter().concat(), vec![1, 2, 3, 4, 5, 6, 7, 8, 9]) +} + +#[test] +fn combinations() { + assert!((1..3).combinations(5).next().is_none()); + + let it = (1..3).combinations(2); + it::assert_equal(it, vec![vec![1, 2]]); + + let it = (1..5).combinations(2); + it::assert_equal( + it, + vec![ + vec![1, 2], + vec![1, 3], + vec![1, 4], + vec![2, 3], + vec![2, 4], + vec![3, 4], + ], + ); + + it::assert_equal((0..0).tuple_combinations::<(_, _)>(), >::new()); + it::assert_equal((0..1).tuple_combinations::<(_, _)>(), >::new()); + it::assert_equal((0..2).tuple_combinations::<(_, _)>(), vec![(0, 1)]); + + it::assert_equal((0..0).combinations(2), >>::new()); + it::assert_equal((0..1).combinations(1), vec![vec![0]]); + it::assert_equal((0..2).combinations(1), vec![vec![0], vec![1]]); + it::assert_equal((0..2).combinations(2), vec![vec![0, 1]]); +} + +#[test] +fn combinations_of_too_short() { + for i in 1..10 { + assert!((0..0).combinations(i).next().is_none()); + assert!((0..i - 1).combinations(i).next().is_none()); + } +} + +#[test] +fn combinations_zero() { + it::assert_equal((1..3).combinations(0), vec![vec![]]); + it::assert_equal((0..0).combinations(0), vec![vec![]]); +} + +fn binomial(n: usize, k: usize) -> usize { + if k > n { + 0 + } else { + (n - k + 1..=n).product::() / (1..=k).product::() + } +} + +#[test] +fn combinations_range_count() { + for n in 0..=10 { + for k in 0..=10 { + let len = binomial(n, k); + let mut it = (0..n).combinations(k); + assert_eq!(len, it.clone().count()); + assert_eq!(len, it.size_hint().0); + assert_eq!(Some(len), it.size_hint().1); + for count in (0..len).rev() { + let elem = it.next(); + assert!(elem.is_some()); + assert_eq!(count, it.clone().count()); + assert_eq!(count, it.size_hint().0); + assert_eq!(Some(count), it.size_hint().1); + } + let should_be_none = it.next(); + assert!(should_be_none.is_none()); + } + } +} + +#[test] +fn combinations_inexact_size_hints() { + for k in 0..=10 { + let mut numbers = (0..18).filter(|i| i % 2 == 0); // 9 elements + let mut it = numbers.clone().combinations(k); + let real_n = numbers.clone().count(); + let len = binomial(real_n, k); + assert_eq!(len, it.clone().count()); + + let mut nb_loaded = 0; + let sh = numbers.size_hint(); + assert_eq!(binomial(sh.0 + nb_loaded, k), it.size_hint().0); + assert_eq!(sh.1.map(|n| binomial(n + nb_loaded, k)), it.size_hint().1); + + for next_count in 1..=len { + let elem = it.next(); + assert!(elem.is_some()); + assert_eq!(len - next_count, it.clone().count()); + if next_count == 1 { + // The very first time, the lazy buffer is prefilled. + nb_loaded = numbers.by_ref().take(k).count(); + } else { + // Then it loads one item each time until exhausted. + let nb = numbers.next(); + if nb.is_some() { + nb_loaded += 1; + } + } + let sh = numbers.size_hint(); + if next_count > real_n - k + 1 { + assert_eq!(0, sh.0); + assert_eq!(Some(0), sh.1); + assert_eq!(real_n, nb_loaded); + // Once it's fully loaded, size hints of `it` are exacts. + } + assert_eq!(binomial(sh.0 + nb_loaded, k) - next_count, it.size_hint().0); + assert_eq!( + sh.1.map(|n| binomial(n + nb_loaded, k) - next_count), + it.size_hint().1 + ); + } + let should_be_none = it.next(); + assert!(should_be_none.is_none()); + } +} + +#[test] +fn permutations_zero() { + it::assert_equal((1..3).permutations(0), vec![vec![]]); + it::assert_equal((0..0).permutations(0), vec![vec![]]); +} + +#[test] +fn permutations_range_count() { + for n in 0..=7 { + for k in 0..=7 { + let len = if k <= n { (n - k + 1..=n).product() } else { 0 }; + let mut it = (0..n).permutations(k); + assert_eq!(len, it.clone().count()); + assert_eq!(len, it.size_hint().0); + assert_eq!(Some(len), it.size_hint().1); + for count in (0..len).rev() { + let elem = it.next(); + assert!(elem.is_some()); + assert_eq!(count, it.clone().count()); + assert_eq!(count, it.size_hint().0); + assert_eq!(Some(count), it.size_hint().1); + } + let should_be_none = it.next(); + assert!(should_be_none.is_none()); + } + } +} + +#[test] +fn permutations_overflowed_size_hints() { + let mut it = std::iter::repeat(()).permutations(2); + assert_eq!(it.size_hint().0, usize::MAX); + assert_eq!(it.size_hint().1, None); + for nb_generated in 1..=1000 { + it.next(); + assert!(it.size_hint().0 >= usize::MAX - nb_generated); + assert_eq!(it.size_hint().1, None); + } +} + +#[test] +fn combinations_with_replacement() { + // Pool smaller than n + it::assert_equal((0..1).combinations_with_replacement(2), vec![vec![0, 0]]); + // Pool larger than n + it::assert_equal( + (0..3).combinations_with_replacement(2), + vec![ + vec![0, 0], + vec![0, 1], + vec![0, 2], + vec![1, 1], + vec![1, 2], + vec![2, 2], + ], + ); + // Zero size + it::assert_equal((0..3).combinations_with_replacement(0), vec![vec![]]); + // Zero size on empty pool + it::assert_equal((0..0).combinations_with_replacement(0), vec![vec![]]); + // Empty pool + it::assert_equal( + (0..0).combinations_with_replacement(2), + >>::new(), + ); +} + +#[test] +fn combinations_with_replacement_range_count() { + for n in 0..=7 { + for k in 0..=7 { + let len = binomial(usize::saturating_sub(n + k, 1), k); + let mut it = (0..n).combinations_with_replacement(k); + assert_eq!(len, it.clone().count()); + assert_eq!(len, it.size_hint().0); + assert_eq!(Some(len), it.size_hint().1); + for count in (0..len).rev() { + let elem = it.next(); + assert!(elem.is_some()); + assert_eq!(count, it.clone().count()); + assert_eq!(count, it.size_hint().0); + assert_eq!(Some(count), it.size_hint().1); + } + let should_be_none = it.next(); + assert!(should_be_none.is_none()); + } + } +} + +#[test] +fn powerset() { + it::assert_equal((0..0).powerset(), vec![vec![]]); + it::assert_equal((0..1).powerset(), vec![vec![], vec![0]]); + it::assert_equal( + (0..2).powerset(), + vec![vec![], vec![0], vec![1], vec![0, 1]], + ); + it::assert_equal( + (0..3).powerset(), + vec![ + vec![], + vec![0], + vec![1], + vec![2], + vec![0, 1], + vec![0, 2], + vec![1, 2], + vec![0, 1, 2], + ], + ); + + assert_eq!((0..4).powerset().count(), 1 << 4); + assert_eq!((0..8).powerset().count(), 1 << 8); + assert_eq!((0..16).powerset().count(), 1 << 16); + + for n in 0..=10 { + let mut it = (0..n).powerset(); + let len = 2_usize.pow(n); + assert_eq!(len, it.clone().count()); + assert_eq!(len, it.size_hint().0); + assert_eq!(Some(len), it.size_hint().1); + for count in (0..len).rev() { + let elem = it.next(); + assert!(elem.is_some()); + assert_eq!(count, it.clone().count()); + assert_eq!(count, it.size_hint().0); + assert_eq!(Some(count), it.size_hint().1); + } + let should_be_none = it.next(); + assert!(should_be_none.is_none()); + } +} + +#[test] +fn diff_mismatch() { + let a = [1, 2, 3, 4]; + let b = vec![1.0, 5.0, 3.0, 4.0]; + let b_map = b.into_iter().map(|f| f as i32); + let diff = it::diff_with(a.iter(), b_map, |a, b| *a == b); + + assert!(match diff { + Some(it::Diff::FirstMismatch(1, _, from_diff)) => + from_diff.collect::>() == vec![5, 3, 4], + _ => false, + }); +} + +#[test] +fn diff_longer() { + let a = [1, 2, 3, 4]; + let b = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]; + let b_map = b.into_iter().map(|f| f as i32); + let diff = it::diff_with(a.iter(), b_map, |a, b| *a == b); + + assert!(match diff { + Some(it::Diff::Longer(_, remaining)) => remaining.collect::>() == vec![5, 6], + _ => false, + }); +} + +#[test] +fn diff_shorter() { + let a = [1, 2, 3, 4]; + let b = vec![1.0, 2.0]; + let b_map = b.into_iter().map(|f| f as i32); + let diff = it::diff_with(a.iter(), b_map, |a, b| *a == b); + + assert!(match diff { + Some(it::Diff::Shorter(len, _)) => len == 2, + _ => false, + }); +} + +#[test] +fn extrema_set() { + use std::cmp::Ordering; + + // A peculiar type: Equality compares both tuple items, but ordering only the + // first item. Used to distinguish equal elements. + #[derive(Clone, Debug, PartialEq, Eq)] + struct Val(u32, u32); + + impl PartialOrd for Val { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl Ord for Val { + fn cmp(&self, other: &Self) -> Ordering { + self.0.cmp(&other.0) + } + } + + assert_eq!(None::.iter().min_set(), Vec::<&u32>::new()); + assert_eq!(None::.iter().max_set(), Vec::<&u32>::new()); + + assert_eq!(Some(1u32).iter().min_set(), vec![&1]); + assert_eq!(Some(1u32).iter().max_set(), vec![&1]); + + let data = [Val(0, 1), Val(2, 0), Val(0, 2), Val(1, 0), Val(2, 1)]; + + let min_set = data.iter().min_set(); + assert_eq!(min_set, vec![&Val(0, 1), &Val(0, 2)]); + + let min_set_by_key = data.iter().min_set_by_key(|v| v.1); + assert_eq!(min_set_by_key, vec![&Val(2, 0), &Val(1, 0)]); + + let min_set_by = data.iter().min_set_by(|x, y| x.1.cmp(&y.1)); + assert_eq!(min_set_by, vec![&Val(2, 0), &Val(1, 0)]); + + let max_set = data.iter().max_set(); + assert_eq!(max_set, vec![&Val(2, 0), &Val(2, 1)]); + + let max_set_by_key = data.iter().max_set_by_key(|v| v.1); + assert_eq!(max_set_by_key, vec![&Val(0, 2)]); + + let max_set_by = data.iter().max_set_by(|x, y| x.1.cmp(&y.1)); + assert_eq!(max_set_by, vec![&Val(0, 2)]); +} + +#[test] +fn minmax() { + use crate::it::MinMaxResult; + use std::cmp::Ordering; + + // A peculiar type: Equality compares both tuple items, but ordering only the + // first item. This is so we can check the stability property easily. + #[derive(Clone, Debug, PartialEq, Eq)] + struct Val(u32, u32); + + impl PartialOrd for Val { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl Ord for Val { + fn cmp(&self, other: &Self) -> Ordering { + self.0.cmp(&other.0) + } + } + + assert_eq!( + None::>.iter().minmax(), + MinMaxResult::NoElements + ); + + assert_eq!(Some(1u32).iter().minmax(), MinMaxResult::OneElement(&1)); + + let data = [Val(0, 1), Val(2, 0), Val(0, 2), Val(1, 0), Val(2, 1)]; + + let minmax = data.iter().minmax(); + assert_eq!(minmax, MinMaxResult::MinMax(&Val(0, 1), &Val(2, 1))); + + let (min, max) = data.iter().minmax_by_key(|v| v.1).into_option().unwrap(); + assert_eq!(min, &Val(2, 0)); + assert_eq!(max, &Val(0, 2)); + + let (min, max) = data + .iter() + .minmax_by(|x, y| x.1.cmp(&y.1)) + .into_option() + .unwrap(); + assert_eq!(min, &Val(2, 0)); + assert_eq!(max, &Val(0, 2)); +} + +#[test] +fn format() { + let data = [0, 1, 2, 3]; + let ans1 = "0, 1, 2, 3"; + let ans2 = "0--1--2--3"; + + let t1 = format!("{}", data.iter().format(", ")); + assert_eq!(t1, ans1); + let t2 = format!("{:?}", data.iter().format("--")); + assert_eq!(t2, ans2); + + let dataf = [1.1, 5.71828, -22.]; + let t3 = format!("{:.2e}", dataf.iter().format(", ")); + assert_eq!(t3, "1.10e0, 5.72e0, -2.20e1"); +} + +#[test] +fn while_some() { + let ns = (1..10) + .map(|x| if x % 5 != 0 { Some(x) } else { None }) + .while_some(); + it::assert_equal(ns, vec![1, 2, 3, 4]); +} + +#[test] +fn fold_while() { + let mut iterations = 0; + let vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let sum = vec + .into_iter() + .fold_while(0, |acc, item| { + iterations += 1; + let new_sum = acc + item; + if new_sum <= 20 { + FoldWhile::Continue(new_sum) + } else { + FoldWhile::Done(acc) + } + }) + .into_inner(); + assert_eq!(iterations, 6); + assert_eq!(sum, 15); +} + +#[test] +fn tree_reduce() { + let x = [ + "", + "0", + "0 1 x", + "0 1 x 2 x", + "0 1 x 2 3 x x", + "0 1 x 2 3 x x 4 x", + "0 1 x 2 3 x x 4 5 x x", + "0 1 x 2 3 x x 4 5 x 6 x x", + "0 1 x 2 3 x x 4 5 x 6 7 x x x", + "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 x", + "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 9 x x", + "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 9 x 10 x x", + "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 9 x 10 11 x x x", + "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 9 x 10 11 x x 12 x x", + "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 9 x 10 11 x x 12 13 x x x", + "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 9 x 10 11 x x 12 13 x 14 x x x", + "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 9 x 10 11 x x 12 13 x 14 15 x x x x", + ]; + for (i, &s) in x.iter().enumerate() { + let expected = if s.is_empty() { + None + } else { + Some(s.to_string()) + }; + let num_strings = (0..i).map(|x| x.to_string()); + let actual = num_strings.tree_reduce(|a, b| format!("{} {} x", a, b)); + assert_eq!(actual, expected); + } +} + +#[test] +fn exactly_one_question_mark_syntax_works() { + exactly_one_question_mark_return().unwrap_err(); +} + +fn exactly_one_question_mark_return() -> Result<(), ExactlyOneError>> +{ + [].iter().exactly_one()?; + Ok(()) +} + +#[test] +fn multiunzip() { + let (a, b, c): (Vec<_>, Vec<_>, Vec<_>) = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + .iter() + .cloned() + .multiunzip(); + assert_eq!((a, b, c), (vec![0, 3, 6], vec![1, 4, 7], vec![2, 5, 8])); + let (): () = [(), (), ()].iter().cloned().multiunzip(); + #[allow(clippy::type_complexity)] + let t: ( + Vec<_>, + Vec<_>, + Vec<_>, + Vec<_>, + Vec<_>, + Vec<_>, + Vec<_>, + Vec<_>, + Vec<_>, + Vec<_>, + Vec<_>, + Vec<_>, + ) = [(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)] + .iter() + .cloned() + .multiunzip(); + assert_eq!( + t, + ( + vec![0], + vec![1], + vec![2], + vec![3], + vec![4], + vec![5], + vec![6], + vec![7], + vec![8], + vec![9], + vec![10], + vec![11] + ) + ); +} diff --git a/vendor/itertools/tests/tuples.rs b/vendor/itertools/tests/tuples.rs new file mode 100644 index 00000000000000..9fc8b3cc78a566 --- /dev/null +++ b/vendor/itertools/tests/tuples.rs @@ -0,0 +1,86 @@ +use itertools::Itertools; + +#[test] +fn tuples() { + let v = [1, 2, 3, 4, 5]; + let mut iter = v.iter().cloned().tuples(); + assert_eq!(Some((1,)), iter.next()); + assert_eq!(Some((2,)), iter.next()); + assert_eq!(Some((3,)), iter.next()); + assert_eq!(Some((4,)), iter.next()); + assert_eq!(Some((5,)), iter.next()); + assert_eq!(None, iter.next()); + assert_eq!(None, iter.into_buffer().next()); + + let mut iter = v.iter().cloned().tuples(); + assert_eq!(Some((1, 2)), iter.next()); + assert_eq!(Some((3, 4)), iter.next()); + assert_eq!(None, iter.next()); + itertools::assert_equal(vec![5], iter.into_buffer()); + + let mut iter = v.iter().cloned().tuples(); + assert_eq!(Some((1, 2, 3)), iter.next()); + assert_eq!(None, iter.next()); + itertools::assert_equal(vec![4, 5], iter.into_buffer()); + + let mut iter = v.iter().cloned().tuples(); + assert_eq!(Some((1, 2, 3, 4)), iter.next()); + assert_eq!(None, iter.next()); + itertools::assert_equal(vec![5], iter.into_buffer()); +} + +#[test] +fn tuple_windows() { + let v = [1, 2, 3, 4, 5]; + + let mut iter = v.iter().cloned().tuple_windows(); + assert_eq!(Some((1,)), iter.next()); + assert_eq!(Some((2,)), iter.next()); + assert_eq!(Some((3,)), iter.next()); + + let mut iter = v.iter().cloned().tuple_windows(); + assert_eq!(Some((1, 2)), iter.next()); + assert_eq!(Some((2, 3)), iter.next()); + assert_eq!(Some((3, 4)), iter.next()); + assert_eq!(Some((4, 5)), iter.next()); + assert_eq!(None, iter.next()); + + let mut iter = v.iter().cloned().tuple_windows(); + assert_eq!(Some((1, 2, 3)), iter.next()); + assert_eq!(Some((2, 3, 4)), iter.next()); + assert_eq!(Some((3, 4, 5)), iter.next()); + assert_eq!(None, iter.next()); + + let mut iter = v.iter().cloned().tuple_windows(); + assert_eq!(Some((1, 2, 3, 4)), iter.next()); + assert_eq!(Some((2, 3, 4, 5)), iter.next()); + assert_eq!(None, iter.next()); + + let v = [1, 2, 3]; + let mut iter = v.iter().cloned().tuple_windows::<(_, _, _, _)>(); + assert_eq!(None, iter.next()); +} + +#[test] +fn next_tuple() { + let v = [1, 2, 3, 4, 5]; + let mut iter = v.iter(); + assert_eq!(iter.next_tuple().map(|(&x, &y)| (x, y)), Some((1, 2))); + assert_eq!(iter.next_tuple().map(|(&x, &y)| (x, y)), Some((3, 4))); + assert_eq!(iter.next_tuple::<(_, _)>(), None); +} + +#[test] +fn collect_tuple() { + let v = [1, 2]; + let iter = v.iter().cloned(); + assert_eq!(iter.collect_tuple(), Some((1, 2))); + + let v = [1]; + let iter = v.iter().cloned(); + assert_eq!(iter.collect_tuple::<(_, _)>(), None); + + let v = [1, 2, 3]; + let iter = v.iter().cloned(); + assert_eq!(iter.collect_tuple::<(_, _)>(), None); +} diff --git a/vendor/itertools/tests/zip.rs b/vendor/itertools/tests/zip.rs new file mode 100644 index 00000000000000..716ac20b31dda4 --- /dev/null +++ b/vendor/itertools/tests/zip.rs @@ -0,0 +1,56 @@ +use itertools::multizip; +use itertools::EitherOrBoth::{Both, Left, Right}; +use itertools::Itertools; + +#[test] +fn zip_longest_fused() { + let a = [Some(1), None, Some(3), Some(4)]; + let b = [1, 2, 3]; + + let unfused = a + .iter() + .batching(|it| *it.next().unwrap()) + .zip_longest(b.iter().cloned()); + itertools::assert_equal(unfused, vec![Both(1, 1), Right(2), Right(3)]); +} + +#[test] +fn test_zip_longest_size_hint() { + let c = (1..10).cycle(); + let v: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let v2 = &[10, 11, 12]; + + assert_eq!(c.zip_longest(v.iter()).size_hint(), (std::usize::MAX, None)); + + assert_eq!(v.iter().zip_longest(v2.iter()).size_hint(), (10, Some(10))); +} + +#[test] +fn test_double_ended_zip_longest() { + let xs = [1, 2, 3, 4, 5, 6]; + let ys = [1, 2, 3, 7]; + let a = xs.iter().copied(); + let b = ys.iter().copied(); + let mut it = a.zip_longest(b); + assert_eq!(it.next(), Some(Both(1, 1))); + assert_eq!(it.next(), Some(Both(2, 2))); + assert_eq!(it.next_back(), Some(Left(6))); + assert_eq!(it.next_back(), Some(Left(5))); + assert_eq!(it.next_back(), Some(Both(4, 7))); + assert_eq!(it.next(), Some(Both(3, 3))); + assert_eq!(it.next(), None); +} + +#[test] +fn test_double_ended_zip() { + let xs = [1, 2, 3, 4, 5, 6]; + let ys = [1, 2, 3, 7]; + let a = xs.iter().copied(); + let b = ys.iter().copied(); + let mut it = multizip((a, b)); + assert_eq!(it.next_back(), Some((4, 7))); + assert_eq!(it.next_back(), Some((3, 3))); + assert_eq!(it.next_back(), Some((2, 2))); + assert_eq!(it.next_back(), Some((1, 1))); + assert_eq!(it.next_back(), None); +} diff --git a/vendor/libc/.cargo-checksum.json b/vendor/libc/.cargo-checksum.json new file mode 100644 index 00000000000000..0edc0b8ceb9f4d --- /dev/null +++ b/vendor/libc/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"98fdce84ef32aa54b41de15fc9dbebbfe6fcdc8d3c03c17fed6f36f6bfc0843e",".editorconfig":"e57fecd6b82cd69640ca1bc4e44f0c7acfe5fc12f641f14af9536e323b4159db",".git-blame-ignore-revs":"761aa385c661241fa77c15b502c361398cf500bbb9f8c3a4579b412c4c6249d7",".release-plz.toml":"fcf2d382c4a2abd96caf9cc391b63e0c94d5832f5c48e9ab9eb4b2c847c0887c","CHANGELOG.md":"5dc77b4161d173b54837a0df9a25cc5f6dfbd7319918d2a3767527fe9920b210","CONTRIBUTING.md":"1cac4c47d46f83d06eeabfb7bf3a70b1a5405a913db1afa31c0e6387eb5bc189","Cargo.lock":"65aaca88ee856ff95e3cc6f25d79a6e8533e973665b4a67cc5d90bde9123cac7","Cargo.toml":"cbae2079ed7be2e12c340f4284e8987e7f532beb59aa6fcee2b7755b99c23fc7","Cargo.toml.orig":"1b4281b7d5468656703919278b880ed3a9f025a27fbadb7d9bd7d14940702b46","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"a8d47ff51ca256f56a8932dba07660672dbfe3004257ca8de708aac1415937a1","README.md":"a198be444453fe0b24d4fb6a8b732eb9e6dc77ebcfd119fca95b97b45c77c77a","build.rs":"f01c77e53ffb343d5c8885e097183589bb91d1fbab78b14bc0a658694616f95c","cherry-pick-stable.sh":"c7d95e3cf2624115219edc224e3ee56484d3f05f8e157f92d5a0e3a735e51640","rustfmt.toml":"e9321ff995242e8cb0a4984254f0748ef11a89ca4618cab8f047ee59a88768e7","src/fuchsia/aarch64.rs":"9cd032304a54321a8926cf3157194c5d79a2491b1b974a05fe71729fa43b5738","src/fuchsia/mod.rs":"e0ed316a30662f2bd1cdf1c8013440c6f2688c48083326f58533565a588e00dd","src/fuchsia/riscv64.rs":"f2aba92cb88480bd72a9eb7a41aafc63c5db293b93113fa973fe5ab1fd819e29","src/fuchsia/x86_64.rs":"a6de0a3c9a45e7af2f0bac96d73da6a7cfb8c003335183016b08a25e2acab65b","src/hermit.rs":"f150c2882a4d8e510259697ff7685899e74bfabf84e5d37103c54b4387093775","src/lib.rs":"86c46729a494060c40a63f3732c5a4c1f3d757ba08d398c1ec3de2030c91a27b","src/macros.rs":"ba63c9018fe21b20dba1ceea97fb293860148d8bd8fbc7c0ab038ce2afcff1b2","src/new/bionic/mod.rs":"752e47b8a3c8cd3090e1af970f4c3e33e5fe5bb3f388bab8cfa66d99562dbfab","src/new/bionic/sys/mod.rs":"0fc3d4ff1c37a21f47c127e5b3818d6c115690e39082be9c3796b8ac2cc99478","src/new/bionic/sys/socket.rs":"c11b51f13897a7fff420f28e93bef74ee084fc08cc0a3ab15d5b286656a0043c","src/new/linux_uapi/linux/can.rs":"5684eccda3fe635c94927bfcc3cc33e357b18a16b19b72ee6b27e0f6df72079f","src/new/linux_uapi/linux/can/j1939.rs":"ab2a330c3d3cca3ac6a693b381661772036e10a6dc0004db3809a4f42cf724ba","src/new/linux_uapi/linux/can/raw.rs":"cc39efa823b9f4d13bc0978ce51186e91d484c2bcfce4c6b0cbfa28a3a256488","src/new/linux_uapi/linux/mod.rs":"e9135b549d9427b99f5978ee4096e5b7d613104cf1a91433488536ce0af3f74f","src/new/linux_uapi/mod.rs":"7497197e880a36e9276110947611d1911bc2375316cd5969a18c83ac6f9be78d","src/new/mod.rs":"143ce9cb8b1f50d08594ec8d305ad1a9d663b3dac139a1a4b758ac5c25e8a14c","src/primitives.rs":"ea7e28520f5f3991ab444f9262cb8497a8794ced2cf5e1754795fe82ebed5fb7","src/psp.rs":"081cf4e5127ba10ebb62a9a5f8f849dd92742693db443db429d446ee472b5d41","src/sgx.rs":"964d6af358f5c85f948275090e5a7854e0169c43b5c338070b6a4cd156ebc9e6","src/solid/aarch64.rs":"4d4236500f98858fc249f3b6858af5009851c8c582031926b8195b2646f7da5e","src/solid/arm.rs":"4d4236500f98858fc249f3b6858af5009851c8c582031926b8195b2646f7da5e","src/solid/mod.rs":"0a89235e63d28a0e1a938243de862fe60bd3a3e9373c09c8c5cd42399b1c712e","src/switch.rs":"bfdcaf0268d79652ee52c1a2837959b8253e6a4124fd94dda82727ecc442a758","src/teeos/mod.rs":"d03cf399183ac40c6f74ce09787089007648d00b112c9fa8254723f0c2135c94","src/trusty.rs":"c5012aeefc4307c11374f062ad1d530e2ec556e7069e375de326a49c77f65e22","src/types.rs":"0d11841d8045deabf7bcded237a347978bd41e8e2fd227acc98400c383e221c6","src/unix/aix/mod.rs":"fec4d43917078c55debffe104e42d14dc66e039c7685566043a13ca42ebee072","src/unix/aix/powerpc64.rs":"45614bea9cf2732fca2a6d7f1bdc7d62eb2dcf2146993e1d726f677f6f4d3a47","src/unix/bsd/apple/b32/mod.rs":"56e90d43e36bcf0a4012072f92dc905dd40af386014b94c978f30b3bcbed8abf","src/unix/bsd/apple/b64/aarch64/mod.rs":"897be1845603876b2849a1fddf53ccd8a97b1156907f4833f7dfb0778e840d0f","src/unix/bsd/apple/b64/mod.rs":"75a313514fd3b9f21391ddb77f965386c36e99bbf4a4c952445e4e8d50bb16b5","src/unix/bsd/apple/b64/x86_64/mod.rs":"889efaf7baeca8ba2857fba1cba19c09dd1d27f3661b6687295f82caba716d9a","src/unix/bsd/apple/mod.rs":"be8da0a8b9c788f7f3302ad5578eae7cdeab88ea198918c3cd0545aae157b919","src/unix/bsd/freebsdlike/dragonfly/errno.rs":"07b19390b9ae8f541ac35fd4e14685d639b95152d6d7a33814bb749b8b927298","src/unix/bsd/freebsdlike/dragonfly/mod.rs":"dc6daf4a8e04ad504e2aea0d489457299d87c05a6e966283b7a40e343887a2b8","src/unix/bsd/freebsdlike/freebsd/aarch64.rs":"246e20e9a143d4ac81d37d940e020ede283f8df5aecd5d149b297664c4293a84","src/unix/bsd/freebsdlike/freebsd/arm.rs":"6e938534090f85040f7228b781ba57020c412d4f8f99d65ca5ce2a0a0baf93a1","src/unix/bsd/freebsdlike/freebsd/freebsd11/b32.rs":"951c9297ed31a13509716068bc04c202ace3cbca3cc485e3a7f6b2fefa06e396","src/unix/bsd/freebsdlike/freebsd/freebsd11/b64.rs":"62ae6e372b644a4270c0bb325edaac4a9e553ad83ffdcf2f4d8608a2ec2bae9c","src/unix/bsd/freebsdlike/freebsd/freebsd11/mod.rs":"16595db5aaf422e425ac9ba1e693aa0f176f743ca194a767f265e1b3cb1b3f22","src/unix/bsd/freebsdlike/freebsd/freebsd12/mod.rs":"8467832d4b8a73e473371f49a2d7b56f632b8f44262c43b5f409c4094cbad26a","src/unix/bsd/freebsdlike/freebsd/freebsd12/x86_64.rs":"64c4bd82eaf30a2681417f982fce28de7d1b0743bfaa14004692a56cee44be21","src/unix/bsd/freebsdlike/freebsd/freebsd13/mod.rs":"2027bae85dac0ca1cfc97f4304023c06174231af3547f33c3f4fed0dcf8c5732","src/unix/bsd/freebsdlike/freebsd/freebsd13/x86_64.rs":"64c4bd82eaf30a2681417f982fce28de7d1b0743bfaa14004692a56cee44be21","src/unix/bsd/freebsdlike/freebsd/freebsd14/mod.rs":"d536725067b3a85fc57929d95a85dfdbe593c9fbe72c246f4ab855f602074fe5","src/unix/bsd/freebsdlike/freebsd/freebsd14/x86_64.rs":"d6e66809e109dc779efe5584b79d34fcd6fdba91973a415d64ae66f481f45940","src/unix/bsd/freebsdlike/freebsd/freebsd15/mod.rs":"9835c3374c2daea20de73ab7896044bc1de14f6cd0711df9d47d4e3a001c4ded","src/unix/bsd/freebsdlike/freebsd/freebsd15/x86_64.rs":"d6e66809e109dc779efe5584b79d34fcd6fdba91973a415d64ae66f481f45940","src/unix/bsd/freebsdlike/freebsd/mod.rs":"69818c7db4f93d1a2e61f414d433fb9c1007dcde11260a69f808c8b7b92834be","src/unix/bsd/freebsdlike/freebsd/powerpc.rs":"809148c48a16cef7de40378c9322a5795b40fec8c7eeaccb20df44a3c1c77c1d","src/unix/bsd/freebsdlike/freebsd/powerpc64.rs":"8ec51f2eb1eae1504b743fc22b0c498c1a1c11bbf17f8199d9d3a6af2ab108ca","src/unix/bsd/freebsdlike/freebsd/riscv64.rs":"4e8e313c3a3736fbb663c26bc445684e89c91092870dc896848df13f8ef89cfe","src/unix/bsd/freebsdlike/freebsd/x86.rs":"a3a17037050ba9f2314574a2cec8ff1962d08561e60616a3f170ba256371ff0b","src/unix/bsd/freebsdlike/freebsd/x86_64/mod.rs":"b79601d4f5f297f2aafb17aa4ed69f128d1668eda5c59866c3eae498034403aa","src/unix/bsd/freebsdlike/mod.rs":"4acf3a50e2944b0e7a35b5a6a4cae40e61283f62461b598d9efb252045aeaa8d","src/unix/bsd/mod.rs":"e05f88aabb6ca6c3f388d314a4235894734f8a0a3c0ea87f748ea7de4132a70b","src/unix/bsd/netbsdlike/mod.rs":"fa5c797fb3f57b637284ed725c10f63827f0de008edd326341dc56c1638610c7","src/unix/bsd/netbsdlike/netbsd/aarch64.rs":"ba2425edbf025f13ab3c3534d1671d310392df3a7d237539817d9cfa675971c8","src/unix/bsd/netbsdlike/netbsd/arm.rs":"f498ac1c11d0ebf6ee2c23cddb573c2358dcb5191924bd96e1bbc86870a86407","src/unix/bsd/netbsdlike/netbsd/mips.rs":"20cdd8d1427c986ecc3fcf7960d337917a13cfd8386dd2d54f8693a23d60892f","src/unix/bsd/netbsdlike/netbsd/mod.rs":"461feb5dc8dddea353ab6d32519045bc9f16525a48449c93dc7148469d94c9c3","src/unix/bsd/netbsdlike/netbsd/powerpc.rs":"c19c4edbc73b5a97b51e3e2ad39b9fee02ad15e80c70ceb3a1abfe977e5c0ead","src/unix/bsd/netbsdlike/netbsd/riscv64.rs":"efa1a156cff1ab2450439adbb3ab2113bed6b7de2205c99e9cba875aa2b1c153","src/unix/bsd/netbsdlike/netbsd/sparc64.rs":"d50816e830225779ac9e9a55a7e3e097882153d72987061d76a96ee736c8af9c","src/unix/bsd/netbsdlike/netbsd/x86.rs":"3006b6a086c0241f5383ca101e7b9357368d713f9c38400633491656d110798e","src/unix/bsd/netbsdlike/netbsd/x86_64.rs":"cb864e23a32eff1bf37563218cf6ce7dac8d028584c385107c84562cf1d87866","src/unix/bsd/netbsdlike/openbsd/aarch64.rs":"3960096fb915d2f75015e1706720d4cd0044938bcfe6727b097751b4c47df6a5","src/unix/bsd/netbsdlike/openbsd/arm.rs":"f064d935f416ca9f7e5e767b9b46da2250c997d667c0c7f4b4c7dfe02d0258c3","src/unix/bsd/netbsdlike/openbsd/mips64.rs":"bee7664d88f8451ae22552fc0721b6b6a6dee2493cc42bcb9829c1e47e4b05f5","src/unix/bsd/netbsdlike/openbsd/mod.rs":"154badb82f62c726fa7a0c320c5934bf459752262dcd101d43c2b3d3afb58cc4","src/unix/bsd/netbsdlike/openbsd/powerpc.rs":"f064d935f416ca9f7e5e767b9b46da2250c997d667c0c7f4b4c7dfe02d0258c3","src/unix/bsd/netbsdlike/openbsd/powerpc64.rs":"1f62a42e2970c42de9e3492fbf3cd5b45410889f033743579266342d1a9e2a00","src/unix/bsd/netbsdlike/openbsd/riscv64.rs":"c93baaf8e3afa8c79a1acb03234b0bb85b148a706481de909528513f45afa136","src/unix/bsd/netbsdlike/openbsd/sparc64.rs":"8d4c5a4cae63e09e1c156164ddc82e0fc77926841d4d4e419dd2e7a7b7145f58","src/unix/bsd/netbsdlike/openbsd/x86.rs":"e6da2fdff7706fd3eac147d3aaf16afdd8542f231f502660d1d89c79b5eca21b","src/unix/bsd/netbsdlike/openbsd/x86_64.rs":"89be4988c6acca7ce411aa2907401b9fed1ffce6ad606cc150683f1e136cba94","src/unix/cygwin/mod.rs":"c98bb7c1118c249f2e7533c68b0d77bd778a1050ab52aab603a03151061e084f","src/unix/haiku/b32.rs":"c3f8678ceee65a3094d8133b0d1a94470860e0b1867977f0569c52c5a20e039f","src/unix/haiku/b64.rs":"f97ce9225f4710893dab03ab3e13bc62152cc84f90c597ec88f6dc1f4c27d242","src/unix/haiku/bsd.rs":"4d9af31fdac2561ee5f942dca97dd2f48139ca74660d40b854b307fa5679d1c8","src/unix/haiku/mod.rs":"a1e1ab46a354da23a8348331d014b184a3f3b9d7fec8ced4c6efede9f5c38a45","src/unix/haiku/native.rs":"8248c0491d62ed96b5c2707a998f8d13cf2a49f2d06fa724848863860cb40e69","src/unix/haiku/x86_64.rs":"09f2384474b2fcb7d0febb0e9e802610a627cadca29dc0e60eb4cfe15552f978","src/unix/hurd/b32.rs":"501f426f7aeb60acf4119064100dd94bbdfebff2cec785428708059d853dc123","src/unix/hurd/b64.rs":"b9b2082e721a5ec89ba55fd5a16bbffcc8a05ca7cef6dbfbd78aff0806cb931f","src/unix/hurd/mod.rs":"b09754b468b78b64463cbf6d5d50ffba76e27efe66bb1e5020054627d751d98a","src/unix/linux_like/android/b32/arm.rs":"e68f6a15870a22e0383770ed1a5bd443d4c2ed237d16fea338c5da1ab9bf1fe3","src/unix/linux_like/android/b32/mod.rs":"5b10ebe56435d868846ae720bb9081cf814486722b5c13520fd4ef50a7ecfb58","src/unix/linux_like/android/b32/x86/mod.rs":"52f402bc27e3ddc519cd2699205bc0f31ba9737f89772d26c07e9c28a7f35762","src/unix/linux_like/android/b64/aarch64/mod.rs":"6d4fcf287ee09d65cfd8d8d9e2b551185f1cf9d90072922b9f703d2871deb036","src/unix/linux_like/android/b64/mod.rs":"04346a4a75b7cf20992eed6a2cfb986d38daf539208c462c10a7ccf3cd516068","src/unix/linux_like/android/b64/riscv64/mod.rs":"10705a5608bc761718ed54ce6dcc2a83c8aa9300337c4f9a67152637dc8d3b11","src/unix/linux_like/android/b64/x86_64/mod.rs":"7243327f35f4f4e59642c9015ee65a13fbc61618fb8ca615580f7f84a3b72e45","src/unix/linux_like/android/mod.rs":"a75260c2c9951ab305559d249a9586167d609a473ecf1d568c876a428d866da1","src/unix/linux_like/emscripten/lfs64.rs":"3a1d1779bcf16525a578a68237f9854090eae4c9819e18ffb5a288f39be4afbe","src/unix/linux_like/emscripten/mod.rs":"fab8e539c9681b444e96cc46eed01fd2640347ff0499b82f238ccec796f15175","src/unix/linux_like/linux/arch/generic/mod.rs":"c8f4d88ba7ffe044c47fc84ca1e21751bfd2446806ccabbe8729958cbb5d1ccc","src/unix/linux_like/linux/arch/mips/mod.rs":"058ebf07f8b10358af9a7f66bd96ba14df1cc6a942203da9abe8d1abab00fcbb","src/unix/linux_like/linux/arch/mod.rs":"8bc5898b03760a95dd4f124ac76ad92e5ae36b2d0644203d752ef2b37e487c3a","src/unix/linux_like/linux/arch/powerpc/mod.rs":"0e20b7e63fe39a200cb4813eeb934bc25d91a2427cd1b1d81bc2cfa4e2368ed5","src/unix/linux_like/linux/arch/sparc/mod.rs":"96ed29a86c629657c521a5e12dece22e570ef7ceee9e8f4a58c2e0782d74e01d","src/unix/linux_like/linux/gnu/b32/arm/mod.rs":"0ab9b524f4e4182eb92ac40c7b5101ce73509aa2d483eab58eef212005c21497","src/unix/linux_like/linux/gnu/b32/csky/mod.rs":"8fdab3a121a111b9856f0de29fe75262d8aa5f1a3d75b273cc72c8c809e87c48","src/unix/linux_like/linux/gnu/b32/m68k/mod.rs":"4c79cca606495e3d98f386a0f8d447f3f281df5ade019380b9018f05999bf849","src/unix/linux_like/linux/gnu/b32/mips/mod.rs":"981838f4092d4e3e343087903246ad7cfd7dc1a9fb1fc854419b744ed395d742","src/unix/linux_like/linux/gnu/b32/mod.rs":"050fa9856b151b3e33214b570cf7527eca11eaa9145d40a1524824b5232b2500","src/unix/linux_like/linux/gnu/b32/powerpc.rs":"7c3b9aad8856408517e056bdcfde877ca9d4529b9c39ffada70b56cdf244c403","src/unix/linux_like/linux/gnu/b32/riscv32/mod.rs":"4bdfd096759a489c0fcbfb1f38ff5c364bbe3fa3f6f3844a43486f5fb2e1eb24","src/unix/linux_like/linux/gnu/b32/sparc/mod.rs":"d8fc8800d01891bb9fd652008817eba58bf9fa823a0cac658bc252ac53885222","src/unix/linux_like/linux/gnu/b32/x86/mod.rs":"ce42dc6c6b620f898d924fdb26895b7b832a4f38e838059f99f663bda2cb3e69","src/unix/linux_like/linux/gnu/b64/aarch64/ilp32.rs":"638b2d717dcf5b59a0058c3dabab43edd84de9d1d7da6e16ad0c58e863000417","src/unix/linux_like/linux/gnu/b64/aarch64/lp64.rs":"28c11e70467b2f00735d3a04c369e96e04fd44d0587ee33359f38328b0677ee6","src/unix/linux_like/linux/gnu/b64/aarch64/mod.rs":"9bfab4e70363d559f76600cbb38659f8e204bea14b165e0054118d34ad3f94ff","src/unix/linux_like/linux/gnu/b64/loongarch64/mod.rs":"f2d8b176c64d791a5a34b7753cab2f50e34cd450e6819bf0b180bd3c6a9d9771","src/unix/linux_like/linux/gnu/b64/mips64/mod.rs":"3fac2105995a594d66e39912f4613ed67b1046a0fc11de97487306337729dccb","src/unix/linux_like/linux/gnu/b64/mod.rs":"30d1286c6b53a8c1cc090921a4192d5c05c7dbe0a7ff1aa4577774f9db934515","src/unix/linux_like/linux/gnu/b64/powerpc64/mod.rs":"285c465bd0cb1e66c2907306d560316b561732bca8ed2008eba370f48a0f957f","src/unix/linux_like/linux/gnu/b64/riscv64/mod.rs":"cd5b8088ddb38bbee6cd8f293f4dd8e0970a5c04d25b4bb4f4c8a5da61a09029","src/unix/linux_like/linux/gnu/b64/s390x.rs":"64fba1b75736ef6e22c11751c9daa3abd61af47b27da641d655e9ebb04b0f507","src/unix/linux_like/linux/gnu/b64/sparc64/mod.rs":"ef87054c07622d4f53401e72a9a937331d573802c7bfa707761097af8c47a968","src/unix/linux_like/linux/gnu/b64/x86_64/mod.rs":"deb7d1bb4639e0adaba2ad63636bc30df7d5318d934e514c0273fa08051eb5b2","src/unix/linux_like/linux/gnu/b64/x86_64/not_x32.rs":"07240332eaf44996d86e37b12d71b519b499c9c9b57898441c58ac2e8c0cb6f7","src/unix/linux_like/linux/gnu/b64/x86_64/x32.rs":"914898b781dfe6b2f755730d6000223d1beea177731e180ccbfdd84a0b8b3bd9","src/unix/linux_like/linux/gnu/mod.rs":"606d323e8aa2c14b22a54f9fc8720ae237df583ae1ba9e5bfe2249663ad02c5c","src/unix/linux_like/linux/mod.rs":"4ab0a27762842a59a5e59f9029d8a49527de4226c5d6e24156f316d7f52a4284","src/unix/linux_like/linux/musl/b32/arm/mod.rs":"8df7c7015240f62151363a0a545fb3be96e5d816b62ef673d84294e87e9bb9ea","src/unix/linux_like/linux/musl/b32/hexagon.rs":"1b0c68839dc46d00010d99e946f356d50dc4ad1c7468f99a8afe839f9542ebd0","src/unix/linux_like/linux/musl/b32/mips/mod.rs":"12c57cfa8eae992b3764114ba6868cd729995d71786029aa0f775086e935065b","src/unix/linux_like/linux/musl/b32/mod.rs":"e0f53df7ca1dbe9b0b25ccecf1adf664227995e58d67a222a8d046d2a879dfc8","src/unix/linux_like/linux/musl/b32/powerpc.rs":"92089167ddbe1fde8663373699ee16c7b01c451c442c27ab1a392582c992dc32","src/unix/linux_like/linux/musl/b32/riscv32/mod.rs":"f13543de5c3b4f8c23d9c9f4f3ad90f514be122d2707f7f781c696261cb11f91","src/unix/linux_like/linux/musl/b32/x86/mod.rs":"264aafdd2d3dbbf57764a671797ca0ef53baee4737f7acf09db4f14a5d13831d","src/unix/linux_like/linux/musl/b64/aarch64/mod.rs":"5ba43a3198d9dff45bc411925353674f1e4cef29eace6b2b1cb6cad070680308","src/unix/linux_like/linux/musl/b64/loongarch64/mod.rs":"294414bcb24a5b59335e49d00f6285b926bd352df20dcff4f935ae094d21495e","src/unix/linux_like/linux/musl/b64/mips64.rs":"d448cf011098728d32eab0b212696063c3365ef5e94fd5e26f8861a3efdbb097","src/unix/linux_like/linux/musl/b64/mod.rs":"e3055a6690ed1dc63b865957be649bfa165852c693e2a387c2c627939157a773","src/unix/linux_like/linux/musl/b64/powerpc64.rs":"03552edded40fccc52c8259af289cbeb074482c1337ef0c32c3cfff81bd3d537","src/unix/linux_like/linux/musl/b64/riscv64/mod.rs":"e13c6430f950035f94989771122c733866651194e7218c7d0b243ae04ef7c864","src/unix/linux_like/linux/musl/b64/s390x.rs":"30f9ac1527e49a57f6d829e54a82ca48e7a1b74507904e6c89a13f933da30ff5","src/unix/linux_like/linux/musl/b64/wasm32/mod.rs":"2d2a01fd01b372ebf1ff63d23072ae548f8a392f443f41a455e0bfb6a8705b70","src/unix/linux_like/linux/musl/b64/wasm32/wali.rs":"69e0d06289f1c86898ef3ab505e397af2acce146accb62efff654fe458b6af02","src/unix/linux_like/linux/musl/b64/x86_64/mod.rs":"95b8adc3443988aa4908e8c1d8c8c0a1619a7636e8ea286edd95ec815d6cd5d2","src/unix/linux_like/linux/musl/lfs64.rs":"308c5b5c9b2f4b1ad17e5c2d02946f84ae83e8f5cb0e789d8d3e76c1923a5d31","src/unix/linux_like/linux/musl/mod.rs":"d8afb6167cab328d65ce2a755a5a4559de30173dd4d09bb90001734a15437730","src/unix/linux_like/linux/uclibc/arm/mod.rs":"a90c7811623714e168b676aa50b162931e66ce86f8c59b0acac131afde474b2c","src/unix/linux_like/linux/uclibc/mips/mips32/mod.rs":"59493f1ab84ddbcf9dc5881c9cfc26e28d4fb5322d63f60eb7de5f9e8e329580","src/unix/linux_like/linux/uclibc/mips/mips64/mod.rs":"a35532d5ae376f403873aa566f37bff99c6c323d334f3201667e5f7200b04643","src/unix/linux_like/linux/uclibc/mips/mod.rs":"e552f579a8dc9e690891113aa6360607ad962bd84cbb60c370b5c5f7c7d1d8c0","src/unix/linux_like/linux/uclibc/mod.rs":"128d586702c6aa6f1d1c56342f855d62550063ea7df97c6c16abdb01fd6bf94e","src/unix/linux_like/linux/uclibc/x86_64/l4re.rs":"f29e4a969f0bf7359b984e17335cc957f980a70b49912c032fd762dd05743081","src/unix/linux_like/linux/uclibc/x86_64/mod.rs":"be62714a2ff04387093145a3d765585eaaac71e4fb53d1983546a57940fa2ce4","src/unix/linux_like/linux/uclibc/x86_64/other.rs":"12f8d4049862fc0c4d94b770f2d0341c1c7bf3da0619436169c12cadc4093def","src/unix/linux_like/mod.rs":"537ab6b4af3685a71487e31491d82cc2a08e8d4ce4a9dc88c6eeb29ae1daf5b7","src/unix/mod.rs":"4f6c804705ede5fa221cce2a9b23a69daaeccdbf390647a4d9f4e94aa462082d","src/unix/newlib/aarch64/mod.rs":"ec594c54dc0e7784668d02ef737fd194dcc3f1e6ee23328d810fd2453bcb6f20","src/unix/newlib/arm/mod.rs":"a1fb6caa077c2ed69adf12da07c814ffab4c1311579f23bae2b26a40cf180665","src/unix/newlib/espidf/mod.rs":"77e8ad5b7db027b8b0b5aa5126f15bc0e35b6f3deb2339acf403c961f13df26f","src/unix/newlib/generic.rs":"182e584f14e0984934130425dd2be0781ca2c131449b3ae1734a07c72c3d43cd","src/unix/newlib/horizon/mod.rs":"9ea04f90566fc14fcfd4ec5bd7c1ef467d6b26ce80bda73f4eec2fe7b652e816","src/unix/newlib/mod.rs":"18def44ab6d32cc50cb89242a1ef9edfa0ffe8010cafb27aaef8ebb970696dea","src/unix/newlib/powerpc/mod.rs":"4e5f804a13e907e17ebb66dcbf3b0fe6e1a611f91876aad8d8a0a69c7df0a7e8","src/unix/newlib/rtems/mod.rs":"6e26c8d4ce78557b3d0eef33f0057e46545c655612c7d86c38bb759f5e032788","src/unix/newlib/vita/mod.rs":"20fd016df6c8aa9097ab3410c5efd187a2f2a202b5e7c0e0ee67714036108329","src/unix/nto/aarch64.rs":"73ad54ebca13454a75c7b0815e853175070a8ac2eefa338012a03e8b59f01e0c","src/unix/nto/mod.rs":"a5219667280d9664a382b91dde8374f9959252a402d1b85dd3577957f4bf88b9","src/unix/nto/neutrino.rs":"2cef6af9943eec590b2b0af96a63bc3169e9d2af5c7713e3360eb09a807f248a","src/unix/nto/x86_64.rs":"8da99138e210516a95d49c8c0265eada4c5f7b93d59be86224844410f5e7929b","src/unix/nuttx/mod.rs":"137c69eca97ba9e0ca61baf6b9dafc11d68f07a1f5de527f9ff3fdc30e3f1ca9","src/unix/redox/mod.rs":"7a5b62cdb08d8eae9c871d9d3158cedf8a603c50716263228bf0a1568daf32c3","src/unix/solarish/compat.rs":"4346fbe9f8640868ac20b63bf3b52f883a37587e1df15ffe54fa0393a48a5588","src/unix/solarish/illumos.rs":"c6305f2555bc542dd63ac0edbc8e517f65a7a870ef9c406d0809d25c6c32276c","src/unix/solarish/mod.rs":"fd370036f3b0a198369104d426692ea4d1d4b9905ad3c15d61caec38e908dd02","src/unix/solarish/solaris.rs":"4045113ee68a9e29f6e2211dbfabe7fd423b21e7b882a982a589719d2c437657","src/unix/solarish/x86.rs":"44261c1f1b300dac9fa0dab93ec85d0c3b3c48b15bc4515b9820c9421cff7427","src/unix/solarish/x86_64.rs":"d888cd12da647f543df8cce7ae04e4a67f8647f71fd14cf7b4f968dbafcd4f5e","src/unix/solarish/x86_common.rs":"4ae02d88622f7f080f5e8cd328f13187edbc5e124fb3e05e4cf212597f6cce48","src/vxworks/aarch64.rs":"4d4236500f98858fc249f3b6858af5009851c8c582031926b8195b2646f7da5e","src/vxworks/arm.rs":"4d4236500f98858fc249f3b6858af5009851c8c582031926b8195b2646f7da5e","src/vxworks/mod.rs":"29474e4025c3bcccc1aa63407928d58623ea7337aa74c325ca8fb01248d52256","src/vxworks/powerpc.rs":"4d4236500f98858fc249f3b6858af5009851c8c582031926b8195b2646f7da5e","src/vxworks/powerpc64.rs":"4d4236500f98858fc249f3b6858af5009851c8c582031926b8195b2646f7da5e","src/vxworks/riscv32.rs":"b1f933205800f0da00f975d53b18fe0035e075cc4613acf110a09a277dc3302a","src/vxworks/riscv64.rs":"b1f933205800f0da00f975d53b18fe0035e075cc4613acf110a09a277dc3302a","src/vxworks/x86.rs":"b1f933205800f0da00f975d53b18fe0035e075cc4613acf110a09a277dc3302a","src/vxworks/x86_64.rs":"b1f933205800f0da00f975d53b18fe0035e075cc4613acf110a09a277dc3302a","src/wasi/mod.rs":"2d15648f99fe90cff9076f1ad93c9dffe04051a4f350e89ca4e513a20e97933c","src/wasi/p2.rs":"feecc0485eabd2c32bc5d800df6ad1b9b4d282741342fb08792f2635204e1e08","src/windows/gnu/mod.rs":"f8c154637cd4b9b5b35b197373d67742d0678abb5f674905897a00029785c455","src/windows/mod.rs":"455795a86354420b1151f35d0ec6ab75e165adf4abbd9111352e9d44edb20634","src/windows/msvc/mod.rs":"7bc0f1e7e73815296cd6b63b2700e12624e9f47b5c4113a1a87fae8e64549c00","src/xous.rs":"1a83621c40248ad4d0c08e1fd4c1107d5efcbc2f4f0169538b7b4a885abedbfa","tests/const_fn.rs":"8ac3171d7bced3576a4e93f48570b3e00c553d7510ab85a7473ae3b716a812dc"},"package":"2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976"} \ No newline at end of file diff --git a/vendor/libc/.cargo_vcs_info.json b/vendor/libc/.cargo_vcs_info.json new file mode 100644 index 00000000000000..322f793c32fdc9 --- /dev/null +++ b/vendor/libc/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "9f598d245e18ecb243118cfde095f24598ec9d5b" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/libc/.editorconfig b/vendor/libc/.editorconfig new file mode 100644 index 00000000000000..155c9905f91e13 --- /dev/null +++ b/vendor/libc/.editorconfig @@ -0,0 +1,7 @@ +[*.sh] +# See https://github.com/mvdan/sh/blob/master/cmd/shfmt/shfmt.1.scd#examples +indent_style = space +indent_size = 4 + +switch_case_indent = true +space_redirects = true diff --git a/vendor/libc/.git-blame-ignore-revs b/vendor/libc/.git-blame-ignore-revs new file mode 100644 index 00000000000000..d358a2cd3d3498 --- /dev/null +++ b/vendor/libc/.git-blame-ignore-revs @@ -0,0 +1,6 @@ +# Format macro bodies +50f26e08e146b7e9c7d1af9614486eba327d1e31 + +# Automated changes related to the 2021 edition upgrade +643182f7da26cedb09349b8bb3735c2e58ba24e6 +108310db03e7db35ef48a902d9ce9a88ab8f9b77 diff --git a/vendor/libc/.release-plz.toml b/vendor/libc/.release-plz.toml new file mode 100644 index 00000000000000..6442af58ad98e9 --- /dev/null +++ b/vendor/libc/.release-plz.toml @@ -0,0 +1,49 @@ +[workspace] +git_release_name = "{{ version }}" +git_tag_name = "{{ version }}" + +[changelog] +body = """ +## [{{ version | trim_start_matches(pat="v") }}]\ + {%- if release_link -%}\ + ({{ release_link }})\ + {% endif %} \ + - {{ timestamp | date(format="%Y-%m-%d") }} +{% for group, commits in commits | group_by(attribute="group") %} +### {{ group | upper_first }} + {% for commit in commits %} + - {% if commit.scope -%}{{ commit.scope | upper_first }}: {% endif %} + {%- if commit.breaking %}[**breaking**] {% endif %} + {{- commit.message }} + {%- if commit.links %} ([{{ commit.links.1.text }}]({{ commit.links.1.href }})){% endif -%} + {% endfor %} +{% endfor %} +{%- if github -%} +{% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %} + ## New Contributors ❤️ +{% endif %}\ +{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %} + * @{{ contributor.username }} made their first contribution + {%- if contributor.pr_number %} in \ + [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \ + {%- endif %} +{%- endfor -%} +{%- endif %} +""" + +commit_parsers = [ + { message = '(?i)^(\w+: )?feat', group = "added" }, + { message = '(?i)^(\w+: )?add', group = "added" }, + { message = '(?i)^(\w+: )?change', group = "changed" }, + { message = '(?i)^(\w+: )?cleanup', group = "cleanup" }, + { message = '(?i)^(\w+: )?deprecate', group = "deprecated" }, + { message = '(?i)^(\w+: )?remove', group = "removed" }, + { message = '(?i)^(\w+: )?fix', group = "fixed" }, + { message = '(?i)^(\w+: )?fix', group = "fixed" }, + { message = '^.*', group = "other" }, +] + +link_parsers = [ + # Extract backport patterns + { pattern = '\(backport <.*/(\d+)>\)', text = "#$1", href = "https://github.com/rust-lang/libc/pull/$1"} +] diff --git a/vendor/libc/CHANGELOG.md b/vendor/libc/CHANGELOG.md new file mode 100644 index 00000000000000..e9b726cf197904 --- /dev/null +++ b/vendor/libc/CHANGELOG.md @@ -0,0 +1,747 @@ +# Changelog + +## [0.2.177](https://github.com/rust-lang/libc/compare/0.2.176...0.2.177) - 2025-10-09 + +### Added + +- Apple: Add `TIOCGETA`, `TIOCSETA`, `TIOCSETAW`, `TIOCSETAF` constants ([#4736](https://github.com/rust-lang/libc/pull/4736)) +- Apple: Add `pthread_cond_timedwait_relative_np` ([#4719](https://github.com/rust-lang/libc/pull/4719)) +- BSDs: Add `_CS_PATH` constant ([#4738](https://github.com/rust-lang/libc/pull/4738)) +- Linux-like: Add `SIGEMT` for mips* and sparc* architectures ([#4730](https://github.com/rust-lang/libc/pull/4730)) +- OpenBSD: Add `elf_aux_info` ([#4729](https://github.com/rust-lang/libc/pull/4729)) +- Redox: Add more sysconf constants ([#4728](https://github.com/rust-lang/libc/pull/4728)) +- Windows: Add `wcsnlen` ([#4721](https://github.com/rust-lang/libc/pull/4721)) + +### Changed + +- WASIP2: Invert conditional to include p2 APIs ([#4733](https://github.com/rust-lang/libc/pull/4733)) + +## [0.2.176](https://github.com/rust-lang/libc/compare/0.2.175...0.2.176) - 2025-09-23 + +### Support + +- The default FreeBSD version has been raised from 11 to 12. This matches `rustc` since 1.78. ([#2406](https://github.com/rust-lang/libc/pull/2406)) +- `Debug` is now always implemented, rather than being gated behind the `extra_traits` feature. ([#4624](https://github.com/rust-lang/libc/pull/4624)) + +### Added + +- AIX: Restore some non-POSIX functions guarded by the `_KERNEL` macro. ([#4607](https://github.com/rust-lang/libc/pull/4607)) +- FreeBSD 14: Add `st_fileref` to `struct stat` ([#4642](https://github.com/rust-lang/libc/pull/4642)) +- Haiku: Add the `accept4` POSIX call ([#4586](https://github.com/rust-lang/libc/pull/4586)) +- Introduce a wrapper for representing padding ([#4632](https://github.com/rust-lang/libc/pull/4632)) +- Linux: Add `EM_RISCV` ([#4659](https://github.com/rust-lang/libc/pull/4659)) +- Linux: Add `MS_NOSYMFOLLOW` ([#4389](https://github.com/rust-lang/libc/pull/4389)) +- Linux: Add `backtrace_symbols(_fd)` ([#4668](https://github.com/rust-lang/libc/pull/4668)) +- Linux: Add missing `SOL_PACKET` optnames ([#4669](https://github.com/rust-lang/libc/pull/4669)) +- Musl s390x: Add `SYS_mseal` ([#4549](https://github.com/rust-lang/libc/pull/4549)) +- NuttX: Add `__errno` ([#4687](https://github.com/rust-lang/libc/pull/4687)) +- Redox: Add `dirfd`, `VDISABLE`, and resource consts ([#4660](https://github.com/rust-lang/libc/pull/4660)) +- Redox: Add more `resource.h`, `fcntl.h` constants ([#4666](https://github.com/rust-lang/libc/pull/4666)) +- Redox: Enable `strftime` and `mkostemp[s]` ([#4629](https://github.com/rust-lang/libc/pull/4629)) +- Unix, Windows: Add `qsort_r` (Unix), and `qsort(_s)` (Windows) ([#4677](https://github.com/rust-lang/libc/pull/4677)) +- Unix: Add `dlvsym` for Linux-gnu, FreeBSD, and NetBSD ([#4671](https://github.com/rust-lang/libc/pull/4671)) +- Unix: Add `sigqueue` ([#4620](https://github.com/rust-lang/libc/pull/4620)) + +### Changed + +- FreeBSD 15: Mark `kinfo_proc` as non-exhaustive ([#4553](https://github.com/rust-lang/libc/pull/4553)) +- FreeBSD: Set the ELF symbol version for `readdir_r` ([#4694](https://github.com/rust-lang/libc/pull/4694)) +- Linux: Correct the config for whether or not `epoll_event` is packed ([#4639](https://github.com/rust-lang/libc/pull/4639)) +- Tests: Replace the old `ctest` with the much more reliable new implementation ([#4655](https://github.com/rust-lang/libc/pull/4655) and many related PRs) + +### Fixed + +- AIX: Fix the type of the 4th arguement of `getgrnam_r` ([#4656](https://github.com/rust-lang/libc/pull/4656 +- FreeBSD: Limit `P_IDLEPROC` to FreeBSD 15 ([#4640](https://github.com/rust-lang/libc/pull/4640)) +- FreeBSD: Limit `mcontext_t::mc_tlsbase` to FreeBSD 15 ([#4640](https://github.com/rust-lang/libc/pull/464)) +- FreeBSD: Update gating of `mcontext_t.mc_tlsbase` ([#4703](https://github.com/rust-lang/libc/pull/4703)) +- Musl s390x: Correct the definition of `statfs[64]` ([#4549](https://github.com/rust-lang/libc/pull/4549)) +- Musl s390x: Make `fpreg_t` a union ([#4549](https://github.com/rust-lang/libc/pull/4549)) +- Redox: Fix the types of `gid_t` and `uid_t` ([#4689](https://github.com/rust-lang/libc/pull/4689)) +- Redox: Fix the value of `MAP_FIXED` ([#4684](https://github.com/rust-lang/libc/pull/4684)) + +### Deprecated + +- Apple: Correct the `deprecated` attribute for `iconv` ([`a97a0b53`](https://github.com/rust-lang/libc/commit/a97a0b53fb7faf5f99cd720ab12b1b8a5bf9f950)) +- FreeBSD: Deprecate `TIOCMGDTRWAIT` and `TIOCMSDTRWAIT` ([#4685](https://github.com/rust-lang/libc/pull/4685)) + +### Removed + +- FreeBSD: Remove `JAIL_{GET,SET}_MASK`, `_MC_FLAG_MASK` ([#4691](https://github.com/rust-lang/libc/pull/4691)) + +## [0.2.175](https://github.com/rust-lang/libc/compare/0.2.174...0.2.175) - 2025-08-10 + +### Added + +- AIX: Add `getpeereid` ([#4524](https://github.com/rust-lang/libc/pull/4524)) +- AIX: Add `struct ld_info` and friends ([#4578](https://github.com/rust-lang/libc/pull/4578)) +- AIX: Retore `struct winsize` ([#4577](https://github.com/rust-lang/libc/pull/4577)) +- Android: Add UDP socket option constants ([#4619](https://github.com/rust-lang/libc/pull/4619)) +- Android: Add `CLONE_CLEAR_SIGHAND` and `CLONE_INTO_CGROUP` ([#4502](https://github.com/rust-lang/libc/pull/4502)) +- Android: Add more `prctl` constants ([#4531](https://github.com/rust-lang/libc/pull/4531)) +- FreeBSD Add further TCP stack-related constants ([#4196](https://github.com/rust-lang/libc/pull/4196)) +- FreeBSD x86-64: Add `mcontext_t.mc_tlsbase ` ([#4503](https://github.com/rust-lang/libc/pull/4503)) +- FreeBSD15: Add `kinfo_proc.ki_uerrmsg` ([#4552](https://github.com/rust-lang/libc/pull/4552)) +- FreeBSD: Add `in_conninfo` ([#4482](https://github.com/rust-lang/libc/pull/4482)) +- FreeBSD: Add `xinpgen` and related types ([#4482](https://github.com/rust-lang/libc/pull/4482)) +- FreeBSD: Add `xktls_session` ([#4482](https://github.com/rust-lang/libc/pull/4482)) +- Haiku: Add functionality from `libbsd` ([#4221](https://github.com/rust-lang/libc/pull/4221)) +- Linux: Add `SECBIT_*` ([#4480](https://github.com/rust-lang/libc/pull/4480)) +- NetBSD, OpenBSD: Export `ioctl` request generator macros ([#4460](https://github.com/rust-lang/libc/pull/4460)) +- NetBSD: Add `ptsname_r` ([#4608](https://github.com/rust-lang/libc/pull/4608)) +- RISCV32: Add time-related syscalls ([#4612](https://github.com/rust-lang/libc/pull/4612)) +- Solarish: Add `strftime*` ([#4453](https://github.com/rust-lang/libc/pull/4453)) +- linux: Add `EXEC_RESTRICT_*` and `EXEC_DENY_*` ([#4545](https://github.com/rust-lang/libc/pull/4545)) + +### Changed + +- AIX: Add `const` to signatures to be consistent with other platforms ([#4563](https://github.com/rust-lang/libc/pull/4563)) + +### Fixed + +- AIX: Fix the type of `struct statvfs.f_fsid` ([#4576](https://github.com/rust-lang/libc/pull/4576)) +- AIX: Fix the type of constants for the `ioctl` `request` argument ([#4582](https://github.com/rust-lang/libc/pull/4582)) +- AIX: Fix the types of `stat{,64}.st_*tim` ([#4597](https://github.com/rust-lang/libc/pull/4597)) +- AIX: Use unique `errno` values ([#4507](https://github.com/rust-lang/libc/pull/4507)) +- Build: Fix an incorrect `target_os` -> `target_arch` check ([#4550](https://github.com/rust-lang/libc/pull/4550)) +- FreeBSD: Fix the type of `xktls_session_onedir.ifnet` ([#4552](https://github.com/rust-lang/libc/pull/4552)) +- Mips64 musl: Fix the type of `nlink_t` ([#4509](https://github.com/rust-lang/libc/pull/4509)) +- Mips64 musl: Use a special MIPS definition of `stack_t` ([#4528](https://github.com/rust-lang/libc/pull/4528)) +- Mips64: Fix `SI_TIMER`, `SI_MESGQ` and `SI_ASYNCIO` definitions ([#4529](https://github.com/rust-lang/libc/pull/4529)) +- Musl Mips64: Swap the order of `si_errno` and `si_code` in `siginfo_t` ([#4530](https://github.com/rust-lang/libc/pull/4530)) +- Musl Mips64: Use a special MIPS definition of `statfs` ([#4527](https://github.com/rust-lang/libc/pull/4527)) +- Musl: Fix the definition of `fanotify_event_metadata` ([#4510](https://github.com/rust-lang/libc/pull/4510)) +- NetBSD: Correct `enum fae_action` to be `#[repr(C)]` ([#60a8cfd5](https://github.com/rust-lang/libc/commit/60a8cfd564f83164d45b9533ff7a0d7371878f2a)) +- PSP: Correct `char` -> `c_char` ([eaab4fc3](https://github.com/rust-lang/libc/commit/eaab4fc3f05dc646a953d4fd5ba46dfa1f8bd6f6)) +- PowerPC musl: Fix `termios` definitions ([#4518](https://github.com/rust-lang/libc/pull/4518)) +- PowerPC musl: Fix the definition of `EDEADLK` ([#4517](https://github.com/rust-lang/libc/pull/4517)) +- PowerPC musl: Fix the definition of `NCCS` ([#4513](https://github.com/rust-lang/libc/pull/4513)) +- PowerPC musl: Fix the definitions of `MAP_LOCKED` and `MAP_NORESERVE` ([#4516](https://github.com/rust-lang/libc/pull/4516)) +- PowerPC64 musl: Fix the definition of `shmid_ds` ([#4519](https://github.com/rust-lang/libc/pull/4519)) + +### Deprecated + +- Linux: `MAP_32BIT` is only defined on x86 on non-x86 architectures ([#4511](https://github.com/rust-lang/libc/pull/4511)) + +### Removed + +- AIX: Remove duplicate constant definitions `FIND` and `ENTER` ([#4588](https://github.com/rust-lang/libc/pull/4588)) +- s390x musl: Remove `O_FSYNC` ([#4515](https://github.com/rust-lang/libc/pull/4515)) +- s390x musl: Remove `RTLD_DEEPBIND` ([#4515](https://github.com/rust-lang/libc/pull/4515)) + + +## [0.2.174](https://github.com/rust-lang/libc/compare/0.2.173...0.2.174) - 2025-06-17 + +### Added + +- Linux: Make `pidfd_info` fields pub ([#4487](https://github.com/rust-lang/libc/pull/4487)) + +### Fixed + +- Gnu x32: Add missing `timespec.tv_nsec` ([#4497](https://github.com/rust-lang/libc/pull/4497)) +- NuttX: Use `nlink_t` type for `st_nlink` in `struct stat` definition ([#4483](https://github.com/rust-lang/libc/pull/4483)) + +### Other + +- Allow new `unpredictable_function_pointer_comparisons` lints ([#4489](https://github.com/rust-lang/libc/pull/4489)) +- OpenBSD: Fix some clippy warnings to use `pointer::cast`. ([#4490](https://github.com/rust-lang/libc/pull/4490)) +- Remove unessecary semicolons from definitions of `CMSG_NXTHDR`. ([#4492](https://github.com/rust-lang/libc/pull/4492)) + + +## [0.2.173](https://github.com/rust-lang/libc/compare/0.2.172...0.2.173) - 2025-06-09 + +### Added + +- AIX: Add an AIX triple to Cargo.toml for doc ([#4475](https://github.com/rust-lang/libc/pull/4475)) +- FreeBSD: Add the `SO_SPLICE` socket option support for FreeBSD >= 14.2 ([#4451](https://github.com/rust-lang/libc/pull/4451)) +- Linux GNU: Prepare for supporting `_TIME_BITS=64` ([#4433](https://github.com/rust-lang/libc/pull/4433)) +- Linux: Add constant PACKET_IGNORE_OUTGOING ([#4319](https://github.com/rust-lang/libc/pull/4319)) +- Linux: Add constants and types for `nsfs` ioctls ([#4436](https://github.com/rust-lang/libc/pull/4436)) +- Linux: Add constants for Memory-Deny-Write-Execute `prctls` ([#4400](https://github.com/rust-lang/libc/pull/4400)) +- Linux: Add constants from `linux/cn_proc.h` and `linux/connector.h` ([#4434](https://github.com/rust-lang/libc/pull/4434)) +- Linux: Add new flags for `pwritev2` and `preadv2` ([#4452](https://github.com/rust-lang/libc/pull/4452)) +- Linux: Add pid_type enum values ([#4403](https://github.com/rust-lang/libc/pull/4403)) +- Linux: Update pidfd constants and types (Linux 6.9-6.15) ([#4402](https://github.com/rust-lang/libc/pull/4402)) +- Loongarch64 musl: Define the `MADV_SOFT_OFFLINE` constant ([#4448](https://github.com/rust-lang/libc/pull/4448)) +- Musl: Add new fields since 1.2.0/1.2.2 to `struct tcp_info` ([#4443](https://github.com/rust-lang/libc/pull/4443)) +- Musl: Prepare for supporting v1.2.3 ([#4443](https://github.com/rust-lang/libc/pull/4443)) +- NuttX: Add `arc4random` and `arc4random_buf` ([#4464](https://github.com/rust-lang/libc/pull/4464)) +- RISC-V Musl: Add `MADV_SOFT_OFFLINE` definition ([#4447](https://github.com/rust-lang/libc/pull/4447)) +- Redox: Define SCM_RIGHTS ([#4440](https://github.com/rust-lang/libc/pull/4440)) +- VxWorks: Add missing UTIME defines and TASK_RENAME_LENGTH ([#4407](https://github.com/rust-lang/libc/pull/4407)) +- Windows: Add more `time.h` functions ([#4427](https://github.com/rust-lang/libc/pull/4427)) + +### Changed + +- Redox: Update `SA_` constants. ([#4426](https://github.com/rust-lang/libc/pull/4426)) +- Redox: make `CMSG_ALIGN`, `CMSG_LEN`, and `CMSG_SPACE` const functions ([#4441](https://github.com/rust-lang/libc/pull/4441)) + +### Fixed + +- AIX: Enable libc-test and fix definitions/declarations. ([#4450](https://github.com/rust-lang/libc/pull/4450)) +- Emscripten: Fix querying emcc on windows (use emcc.bat) ([#4248](https://github.com/rust-lang/libc/pull/4248)) +- Hurd: Fix build from missing `fpos_t` ([#4472](https://github.com/rust-lang/libc/pull/4472)) +- Loongarch64 Musl: Fix the `struct ipc_perm` bindings ([#4384](https://github.com/rust-lang/libc/pull/4384)) +- Musl: Fix the `O_LARGEFILE` constant value. ([#4443](https://github.com/rust-lang/libc/pull/4443)) + +## [0.2.172](https://github.com/rust-lang/libc/compare/0.2.171...0.2.172) - 2025-04-14 + +### Added + +- Android: Add `getauxval` for 32-bit targets ([#4338](https://github.com/rust-lang/libc/pull/4338)) +- Android: Add `if_tun.h` ioctls ([#4379](https://github.com/rust-lang/libc/pull/4379)) +- Android: Define `SO_BINDTOIFINDEX` ([#4391](https://github.com/rust-lang/libc/pull/4391)) +- Cygwin: Add `posix_spawn_file_actions_add[f]chdir[_np]` ([#4387](https://github.com/rust-lang/libc/pull/4387)) +- Cygwin: Add new socket options ([#4350](https://github.com/rust-lang/libc/pull/4350)) +- Cygwin: Add statfs & fcntl ([#4321](https://github.com/rust-lang/libc/pull/4321)) +- FreeBSD: Add `filedesc` and `fdescenttbl` ([#4327](https://github.com/rust-lang/libc/pull/4327)) +- Glibc: Add unstable support for _FILE_OFFSET_BITS=64 ([#4345](https://github.com/rust-lang/libc/pull/4345)) +- Hermit: Add `AF_UNSPEC` ([#4344](https://github.com/rust-lang/libc/pull/4344)) +- Hermit: Add `AF_VSOCK` ([#4344](https://github.com/rust-lang/libc/pull/4344)) +- Illumos, NetBSD: Add `timerfd` APIs ([#4333](https://github.com/rust-lang/libc/pull/4333)) +- Linux: Add `_IO`, `_IOW`, `_IOR`, `_IOWR` to the exported API ([#4325](https://github.com/rust-lang/libc/pull/4325)) +- Linux: Add `tcp_info` to uClibc bindings ([#4347](https://github.com/rust-lang/libc/pull/4347)) +- Linux: Add further BPF program flags ([#4356](https://github.com/rust-lang/libc/pull/4356)) +- Linux: Add missing INPUT_PROP_XXX flags from `input-event-codes.h` ([#4326](https://github.com/rust-lang/libc/pull/4326)) +- Linux: Add missing TLS bindings ([#4296](https://github.com/rust-lang/libc/pull/4296)) +- Linux: Add more constants from `seccomp.h` ([#4330](https://github.com/rust-lang/libc/pull/4330)) +- Linux: Add more glibc `ptrace_sud_config` and related `PTRACE_*ET_SYSCALL_USER_DISPATCH_CONFIG`. ([#4386](https://github.com/rust-lang/libc/pull/4386)) +- Linux: Add new netlink flags ([#4288](https://github.com/rust-lang/libc/pull/4288)) +- Linux: Define ioctl codes on more architectures ([#4382](https://github.com/rust-lang/libc/pull/4382)) +- Linux: Add missing `pthread_attr_setstack` ([#4349](https://github.com/rust-lang/libc/pull/4349)) +- Musl: Add missing `utmpx` API ([#4332](https://github.com/rust-lang/libc/pull/4332)) +- Musl: Enable `getrandom` on all platforms ([#4346](https://github.com/rust-lang/libc/pull/4346)) +- NuttX: Add more signal constants ([#4353](https://github.com/rust-lang/libc/pull/4353)) +- QNX: Add QNX 7.1-iosock and 8.0 to list of additional cfgs ([#4169](https://github.com/rust-lang/libc/pull/4169)) +- QNX: Add support for alternative Neutrino network stack `io-sock` ([#4169](https://github.com/rust-lang/libc/pull/4169)) +- Redox: Add more `sys/socket.h` and `sys/uio.h` definitions ([#4388](https://github.com/rust-lang/libc/pull/4388)) +- Solaris: Temporarily define `O_DIRECT` and `SIGINFO` ([#4348](https://github.com/rust-lang/libc/pull/4348)) +- Solarish: Add `secure_getenv` ([#4342](https://github.com/rust-lang/libc/pull/4342)) +- VxWorks: Add missing `d_type` member to `dirent` ([#4352](https://github.com/rust-lang/libc/pull/4352)) +- VxWorks: Add missing signal-related constsants ([#4352](https://github.com/rust-lang/libc/pull/4352)) +- VxWorks: Add more error codes ([#4337](https://github.com/rust-lang/libc/pull/4337)) + +### Deprecated + +- FreeBSD: Deprecate `TCP_PCAP_OUT` and `TCP_PCAP_IN` ([#4381](https://github.com/rust-lang/libc/pull/4381)) + +### Fixed + +- Cygwin: Fix member types of `statfs` ([#4324](https://github.com/rust-lang/libc/pull/4324)) +- Cygwin: Fix tests ([#4357](https://github.com/rust-lang/libc/pull/4357)) +- Hermit: Make `AF_INET = 3` ([#4344](https://github.com/rust-lang/libc/pull/4344)) +- Musl: Fix the syscall table on RISC-V-32 ([#4335](https://github.com/rust-lang/libc/pull/4335)) +- Musl: Fix the value of `SA_ONSTACK` on RISC-V-32 ([#4335](https://github.com/rust-lang/libc/pull/4335)) +- VxWorks: Fix a typo in the `waitpid` parameter name ([#4334](https://github.com/rust-lang/libc/pull/4334)) + +### Removed + +- Musl: Remove `O_FSYNC` on RISC-V-32 (use `O_SYNC` instead) ([#4335](https://github.com/rust-lang/libc/pull/4335)) +- Musl: Remove `RTLD_DEEPBIND` on RISC-V-32 ([#4335](https://github.com/rust-lang/libc/pull/4335)) + +### Other + +- CI: Add matrix env variables to the environment ([#4345](https://github.com/rust-lang/libc/pull/4345)) +- CI: Always deny warnings ([#4363](https://github.com/rust-lang/libc/pull/4363)) +- CI: Always upload successfully created artifacts ([#4345](https://github.com/rust-lang/libc/pull/4345)) +- CI: Install musl from source for loongarch64 ([#4320](https://github.com/rust-lang/libc/pull/4320)) +- CI: Revert "Also skip `MFD_EXEC` and `MFD_NOEXEC_SEAL` on sparc64" ([#]()) +- CI: Use `$PWD` instead of `$(pwd)` in run-docker ([#4345](https://github.com/rust-lang/libc/pull/4345)) +- Solarish: Restrict `openpty` and `forkpty` polyfills to Illumos, replace Solaris implementation with bindings ([#4329](https://github.com/rust-lang/libc/pull/4329)) +- Testing: Ensure the makedev test does not emit unused errors ([#4363](https://github.com/rust-lang/libc/pull/4363)) + +## [0.2.171](https://github.com/rust-lang/libc/compare/0.2.170...0.2.171) - 2025-03-11 + +### Added + +- Android: Add `if_nameindex`/`if_freenameindex` support ([#4247](https://github.com/rust-lang/libc/pull/4247)) +- Apple: Add missing proc types and constants ([#4310](https://github.com/rust-lang/libc/pull/4310)) +- BSD: Add `devname` ([#4285](https://github.com/rust-lang/libc/pull/4285)) +- Cygwin: Add PTY and group API ([#4309](https://github.com/rust-lang/libc/pull/4309)) +- Cygwin: Add support ([#4279](https://github.com/rust-lang/libc/pull/4279)) +- FreeBSD: Make `spawn.h` interfaces available on all FreeBSD-like systems ([#4294](https://github.com/rust-lang/libc/pull/4294)) +- Linux: Add `AF_XDP` structs for all Linux environments ([#4163](https://github.com/rust-lang/libc/pull/4163)) +- Linux: Add SysV semaphore constants ([#4286](https://github.com/rust-lang/libc/pull/4286)) +- Linux: Add `F_SEAL_EXEC` ([#4316](https://github.com/rust-lang/libc/pull/4316)) +- Linux: Add `SO_PREFER_BUSY_POLL` and `SO_BUSY_POLL_BUDGET` ([#3917](https://github.com/rust-lang/libc/pull/3917)) +- Linux: Add `devmem` structs ([#4299](https://github.com/rust-lang/libc/pull/4299)) +- Linux: Add socket constants up to `SO_DEVMEM_DONTNEED` ([#4299](https://github.com/rust-lang/libc/pull/4299)) +- NetBSD, OpenBSD, DragonflyBSD: Add `closefrom` ([#4290](https://github.com/rust-lang/libc/pull/4290)) +- NuttX: Add `pw_passwd` field to `passwd` ([#4222](https://github.com/rust-lang/libc/pull/4222)) +- Solarish: define `IP_BOUND_IF` and `IPV6_BOUND_IF` ([#4287](https://github.com/rust-lang/libc/pull/4287)) +- Wali: Add bindings for `wasm32-wali-linux-musl` target ([#4244](https://github.com/rust-lang/libc/pull/4244)) + +### Changed + +- AIX: Use `sa_sigaction` instead of a union ([#4250](https://github.com/rust-lang/libc/pull/4250)) +- Make `msqid_ds.__msg_cbytes` public ([#4301](https://github.com/rust-lang/libc/pull/4301)) +- Unix: Make all `major`, `minor`, `makedev` into `const fn` ([#4208](https://github.com/rust-lang/libc/pull/4208)) + +### Deprecated + +- Linux: Deprecate obsolete packet filter interfaces ([#4267](https://github.com/rust-lang/libc/pull/4267)) + +### Fixed + +- Cygwin: Fix strerror_r ([#4308](https://github.com/rust-lang/libc/pull/4308)) +- Cygwin: Fix usage of f! ([#4308](https://github.com/rust-lang/libc/pull/4308)) +- Hermit: Make `stat::st_size` signed ([#4298](https://github.com/rust-lang/libc/pull/4298)) +- Linux: Correct values for `SI_TIMER`, `SI_MESGQ`, `SI_ASYNCIO` ([#4292](https://github.com/rust-lang/libc/pull/4292)) +- NuttX: Update `tm_zone` and `d_name` fields to use `c_char` type ([#4222](https://github.com/rust-lang/libc/pull/4222)) +- Xous: Include the prelude to define `c_int` ([#4304](https://github.com/rust-lang/libc/pull/4304)) + +### Other + +- Add labels to FIXMEs ([#4231](https://github.com/rust-lang/libc/pull/4231), [#4232](https://github.com/rust-lang/libc/pull/4232), [#4234](https://github.com/rust-lang/libc/pull/4234), [#4235](https://github.com/rust-lang/libc/pull/4235), [#4236](https://github.com/rust-lang/libc/pull/4236)) +- CI: Fix "cannot find libc" error on Sparc64 ([#4317](https://github.com/rust-lang/libc/pull/4317)) +- CI: Fix "cannot find libc" error on s390x ([#4317](https://github.com/rust-lang/libc/pull/4317)) +- CI: Pass `--no-self-update` to `rustup update` ([#4306](https://github.com/rust-lang/libc/pull/4306)) +- CI: Remove tests for the `i586-pc-windows-msvc` target ([#4311](https://github.com/rust-lang/libc/pull/4311)) +- CI: Remove the `check_cfg` job ([#4322](https://github.com/rust-lang/libc/pull/4312)) +- Change the range syntax that is giving `ctest` problems ([#4311](https://github.com/rust-lang/libc/pull/4311)) +- Linux: Split out the stat struct for gnu/b32/mips ([#4276](https://github.com/rust-lang/libc/pull/4276)) + +### Removed + +- NuttX: Remove `pthread_set_name_np` ([#4251](https://github.com/rust-lang/libc/pull/4251)) + +## [0.2.170](https://github.com/rust-lang/libc/compare/0.2.169...0.2.170) - 2025-02-23 + +### Added + +- Android: Declare `setdomainname` and `getdomainname` +- FreeBSD: Add `evdev` structures +- FreeBSD: Add the new `st_filerev` field to `stat32` ([#4254](https://github.com/rust-lang/libc/pull/4254)) +- Linux: Add `SI_*`` and `TRAP_*`` signal codes +- Linux: Add experimental configuration to enable 64-bit time in kernel APIs, set by `RUST_LIBC_UNSTABLE_LINUX_TIME_BITS64`. +- Linux: Add recent socket timestamping flags +- Linux: Added new CANFD_FDF flag for the flags field of canfd_frame +- Musl: add CLONE_NEWTIME +- Solarish: add the posix_spawn family of functions + +### Deprecated + +- Linux: deprecate kernel modules syscalls + +### Changed + +- Emscripten: Assume version is at least 3.1.42 + +### Fixed + +- BSD: Correct the definition of `WEXITSTATUS` +- Hurd: Fix CMSG_DATA on 64bit systems ([#4240](https://github.com/rust-lang/libc/pull/424)) +- NetBSD: fix `getmntinfo` ([#4265](https://github.com/rust-lang/libc/pull/4265) +- VxWorks: Fix the size of `time_t` + +### Other + +- Add labels to FIXMEs , , +- CI: Bump FreeBSD CI to 13.4 and 14.2 +- Copy definitions from core::ffi and centralize them +- Define c_char at top-level and remove per-target c_char definitions +- Port style.rs to syn and add tests for the style checker + +## [0.2.169](https://github.com/rust-lang/libc/compare/0.2.168...0.2.169) - 2024-12-18 + +### Added + +- FreeBSD: add more socket TCP stack constants +- Fuchsia: add a `sockaddr_vm` definition + +### Fixed + +**Breaking**: [rust-lang/rust#132975](https://github.com/rust-lang/rust/pull/132975) corrected the signedness of `core::ffi::c_char` on various Tier 2 and Tier 3 platforms (mostly Arm and RISC-V) to match Clang. This release contains the corresponding changes to `libc`, including the following specific pull requests: + +- ESP-IDF: Replace arch-conditional `c_char` with a reexport +- Fix `c_char` on various targets +- Mirror `c_char` configuration from `rust-lang/rust` + +### Cleanup + +- Do not re-export `c_void` in target-specific code + +## [0.2.168](https://github.com/rust-lang/libc/compare/0.2.167...0.2.168) - 2024-12-09 + +### Added + +- Linux: Add new process flags ([#4174](https://github.com/rust-lang/libc/pull/4174)) +- Linux: Make `IFA_*` constants available on all Linux targets +- Linux: add `MAP_DROPPABLE` +- Solaris, Illumos: add `SIGRTMIN` and `SIGRTMAX` +- Unix, Linux: adding POSIX `memccpy` and `mempcpy` GNU extension +- CI: Upload artifacts created by libc-test +- CI: Use workflow commands to group output by target +- CI: add caching + +## [0.2.167](https://github.com/rust-lang/libc/compare/0.2.166...0.2.167) - 2024-11-28 + +### Added + +- Solarish: add `st_fstype` to `stat` +- Trusty: Add `intptr_t` and `uintptr_t` ([#4161](https://github.com/rust-lang/libc/pull/4161)) + +### Fixed + +- Fix the build with `rustc-dep-of-std` +- Wasi: Add back unsafe block for `clockid_t` static variables ([#4157](https://github.com/rust-lang/libc/pull/4157)) + +### Cleanup + +- Create an internal prelude +- Fix `unused_qualifications` + +### Other + +- CI: Check various FreeBSD versions ([#4159](https://github.com/rust-lang/libc/pull/4159)) +- CI: add a timeout for all jobs +- CI: verify MSRV for `wasm32-wasi` +- Migrate to the 2021 edition + +### Removed + +- Remove one unused import after the edition 2021 bump + +## [0.2.166](https://github.com/rust-lang/libc/compare/0.2.165...0.2.166) - 2024-11-26 + +### Fixed + +This release resolves two cases of unintentional breakage from the previous release: + +- Revert removal of array size hacks [#4150](https://github.com/rust-lang/libc/pull/4150) +- Ensure `const extern` functions are always enabled [#4151](https://github.com/rust-lang/libc/pull/4151) + +## [0.2.165](https://github.com/rust-lang/libc/compare/0.2.164...0.2.165) - 2024-11-25 + +### Added + +- Android: add `mkostemp`, `mkostemps` +- Android: add a few API 30 calls +- Android: add missing syscall constants +- Apple: add `in6_ifreq` +- Apple: add missing `sysctl` net types (before release: remove `if_family_id` ([#4137](https://github.com/rust-lang/libc/pulls/4137))) +- Freebsd: add `kcmp` call support +- Hurd: add `MAP_32BIT` and `MAP_EXCL` +- Hurd: add `domainname` field to `utsname` ([#4089](https://github.com/rust-lang/libc/pulls/4089)) +- Linux GNU: add `f_flags` to struct `statfs` for arm, mips, powerpc and x86 +- Linux GNU: add `malloc_stats` +- Linux: add ELF relocation-related structs +- Linux: add `ptp_*` structs +- Linux: add `ptp_clock_caps` +- Linux: add `ptp_pin_function` and most `PTP_` constants +- Linux: add missing AF_XDP structs & constants +- Linux: add missing netfilter consts ([#3734](https://github.com/rust-lang/libc/pulls/3734)) +- Linux: add struct and constants for the `mount_setattr` syscall +- Linux: add wireless API +- Linux: expose the `len8_dlc` field of `can_frame` +- Musl: add `utmpx` API +- Musl: add missing syscall constants +- NetBSD: add `mcontext`-related data for RISCV64 +- Redox: add new `netinet` constants ) +- Solarish: add `_POSIX_VDISABLE` ([#4103](https://github.com/rust-lang/libc/pulls/4103)) +- Tests: Add a test that the `const extern fn` macro works +- Tests: Add test of primitive types against `std` +- Unix: Add `htonl`, `htons`, `ntohl`, `ntohs` +- Unix: add `aligned_alloc` +- Windows: add `aligned_realloc` + +### Fixed + +- **breaking** Hurd: fix `MAP_HASSEMAPHORE` name ([#4127](https://github.com/rust-lang/libc/pulls/4127)) +- **breaking** ulibc Mips: fix `SA_*` mismatched types ([#3211](https://github.com/rust-lang/libc/pulls/3211)) +- Aix: fix an enum FFI safety warning +- Haiku: fix some typos ([#3664](https://github.com/rust-lang/libc/pulls/3664)) +- Tests: fix `Elf{32,64}_Relr`-related tests +- Tests: fix libc-tests for `loongarch64-linux-musl` +- Tests: fix some clippy warnings +- Tests: fix tests on `riscv64gc-unknown-freebsd` + +### Deprecated + +- Apple: deprecate `iconv_open` +- Apple: deprecate `mach_task_self` +- Apple: update `mach` deprecation notices for things that were removed in `main` + +### Cleanup + +- Adjust the `f!` macro to be more flexible +- Aix: remove duplicate constants +- CI: make scripts more uniform +- Drop the `libc_align` conditional +- Drop the `libc_cfg_target_vendor` conditional +- Drop the `libc_const_size_of` conditional +- Drop the `libc_core_cvoid` conditional +- Drop the `libc_int128` conditional +- Drop the `libc_non_exhaustive` conditional +- Drop the `libc_packedN` conditional +- Drop the `libc_priv_mod_use` conditional +- Drop the `libc_union` conditional +- Drop the `long_array` conditional +- Drop the `ptr_addr_of` conditional +- Drop warnings about deprecated cargo features +- Eliminate uses of `struct_formatter` +- Fix a few other array size hacks +- Glibc: remove redundant definitions ([#3261](https://github.com/rust-lang/libc/pulls/3261)) +- Musl: remove redundant definitions ([#3261](https://github.com/rust-lang/libc/pulls/3261)) +- Musl: unify definitions of `siginfo_t` ([#3261](https://github.com/rust-lang/libc/pulls/3261)) +- Musl: unify definitions of statfs and statfs64 ([#3261](https://github.com/rust-lang/libc/pulls/3261)) +- Musl: unify definitions of statvfs and statvfs64 ([#3261](https://github.com/rust-lang/libc/pulls/3261)) +- Musl: unify statx definitions ([#3978](https://github.com/rust-lang/libc/pulls/3978)) +- Remove array size hacks for Rust < 1.47 +- Remove repetitive words +- Use #[derive] for Copy/Clone in s! and friends +- Use some tricks to format macro bodies + +### Other + +- Apply formatting to macro bodies +- Bump libc-test to Rust 2021 Edition +- CI: Add a check that semver files don't contain duplicate entries +- CI: Add `fanotify_event_info_fid` to FAM-exempt types +- CI: Allow rustfmt to organize imports ([#4136](https://github.com/rust-lang/libc/pulls/4136)) +- CI: Always run rustfmt +- CI: Change 32-bit Docker images to use EOL repos +- CI: Change 64-bit Docker images to ubuntu:24.10 +- CI: Disable the check for >1 s! invocation +- CI: Ensure build channels get run even if FILTER is unset +- CI: Ensure there is a fallback for no_std +- CI: Fix cases where unset variables cause errors +- CI: Naming adjustments and cleanup +- CI: Only invoke rustup if running in CI +- CI: Remove the logic to handle old rust versions +- CI: Set -u (error on unset) in all script files +- CI: add support for `loongarch64-unknown-linux-musl` +- CI: make `aarch64-apple-darwin` not a nightly-only target +- CI: run shellcheck on all scripts +- CI: update musl headers to Linux 6.6 +- CI: use qemu-sparc64 to run sparc64 tests +- Drop the `libc_const_extern_fn` conditional +- Drop the `libc_underscore_const_names` conditional +- Explicitly set the edition to 2015 +- Introduce a `git-blame-ignore-revs` file +- Tests: Ignore fields as required on Ubuntu 24.10 +- Tests: skip `ATF_*` constants for OpenBSD +- Triagebot: Add an autolabel for CI + +## [0.2.164](https://github.com/rust-lang/libc/compare/0.2.163...0.2.164) - 2024-11-16 + +### MSRV + +This release increases the MSRV of `libc` to 1.63. + +### Other + +- CI: remove tests with rust < 1.63 +- MSRV: document the MSRV of the stable channel to be 1.63 +- MacOS: move ifconf to s_no_extra_traits + +## [0.2.163](https://github.com/rust-lang/libc/compare/0.2.162...0.2.163) - 2024-11-16 + +### Added + +- Aix: add more `dlopen` flags +- Android: add group calls +- FreeBSD: add `TCP_FUNCTION_BLK` and `TCP_FUNCTION_ALIAS` +- Linux: add `confstr` +- Solarish: add `aio` +- Solarish: add `arc4random*` + +### Changed + +- Emscripten: upgrade emsdk to 3.1.68 +- Hurd: use more standard types +- Hurd: use the standard `ssize_t = isize` +- Solaris: fix `confstr` and `ucontext_t` + +### Other + +- CI: add Solaris +- CI: add `i686-unknown-freebsd` +- CI: ensure that calls to `sort` do not depend on locale +- Specify `rust-version` in `Cargo.toml` + +## [0.2.162](https://github.com/rust-lang/libc/compare/0.2.161...0.2.162) - 2024-11-07 + +### Added + +- Android: fix the alignment of `uc_mcontext` on arm64 +- Apple: add `host_cpu_load_info` +- ESP-IDF: add a time flag +- FreeBSD: add the `CLOSE_RANGE_CLOEXEC` flag +- FreeBSD: fix test errors regarding `__gregset_t` +- FreeBSD: fix tests on x86 FreeBSD 15 +- FreeBSD: make `ucontext_t` and `mcontext_t` available on all architectures +- Haiku: add `getentropy` +- Illumos: add `syncfs` +- Illumos: add some recently-added constants +- Linux: add `ioctl` flags +- Linux: add epoll busy polling parameters +- NuttX: add `pthread_[get/set]name_np` +- RTEMS: add `arc4random_buf` +- Trusty OS: add initial support +- WASIp2: expand socket support + +### Fixed + +- Emscripten: don't pass `-lc` +- Hurd: change `st_fsid` field to `st_dev` +- Hurd: fix the definition of `utsname` +- Illumos/Solaris: fix `FNM_CASEFOLD` definition +- Solaris: fix all tests + +### Other + +- CI: Add loongarch64 +- CI: Check that semver files are sorted +- CI: Re-enable the FreeBSD 15 job +- Clean up imports and `extern crate` usage +- Convert `mode_t` constants to octal +- Remove the `wasm32-wasi` target that has been deleted upstream + +## [0.2.161](https://github.com/rust-lang/libc/compare/0.2.160...0.2.161) - 2024-10-17 + +### Fixed + +- OpenBSD: fix `FNM_PATHNAME` and `FNM_NOESCAPE` values + +## [0.2.160](https://github.com/rust-lang/libc/compare/0.2.159...0.2.160) - 2024-10-17 + +### Added + +- Android: add `PR_GET_NAME` and `PR_SET_NAME` +- Apple: add `F_TRANSFEREXTENTS` +- Apple: add `mach_error_string` +- Apple: add additional `pthread` APIs +- Apple: add the `LOCAL_PEERTOKEN` socket option +- BSD: add `RTF_*`, `RTA_*`, `RTAX_*`, and `RTM_*` definitions +- Emscripten: add `AT_EACCESS` +- Emscripten: add `getgrgid`, `getgrnam`, `getgrnam_r` and `getgrgid_r` +- Emscripten: add `getpwnam_r` and `getpwuid_r` +- FreeBSD: add `POLLRDHUP` +- Haiku: add `arc4random` +- Illumos: add `ptsname_r` +- Linux: add `fanotify` interfaces +- Linux: add `tcp_info` +- Linux: add additional AF_PACKET options +- Linux: make Elf constants always available +- Musl x86: add `iopl` and `ioperm` +- Musl: add `posix_spawn` chdir functions +- Musl: add `utmpx.h` constants +- NetBSD: add `sysctlnametomib`, `CLOCK_THREAD_CPUTIME_ID` and `CLOCK_PROCESS_CPUTIME_ID` +- Nuttx: initial support +- RTEMS: add `getentropy` +- RTEMS: initial support +- Solarish: add `POLLRDHUP`, `POSIX_FADV_*`, `O_RSYNC`, and `posix_fallocate` +- Unix: add `fnmatch.h` +- VxWorks: add riscv64 support +- VxWorks: update constants related to the scheduler + +### Changed + +- Redox: change `ino_t` to be `c_ulonglong` + +### Fixed + +- ESP-IDF: fix mismatched constants and structs +- FreeBSD: fix `struct stat` on FreeBSD 12+ + +### Other + +- CI: Fix CI for FreeBSD 15 +- Docs: link to `windows-sys` + +## [0.2.159](https://github.com/rust-lang/libc/compare/0.2.158...0.2.159) - 2024-09-24 + +### Added + +- Android: add more `AT_*` constants in +- Apple: add missing `NOTE_*` constants in +- Hermit: add missing error numbers in +- Hurd: add `__timeval` for 64-bit support in +- Linux: add `epoll_pwait2` in +- Linux: add `mq_notify` in +- Linux: add missing `NFT_CT_*` constants in +- Linux: add the `fchmodat2` syscall in +- Linux: add the `mseal` syscall in +- OpenBSD: add `sendmmsg` and `recvmmsg` in +- Unix: add `IN6ADDR_ANY_INIT` and `IN6ADDR_LOOPBACK_INIT` in +- VxWorks: add `S_ISVTX` in +- VxWorks: add `vxCpuLib` and `taskLib` functions +- WASIp2: add definitions for `std::net` support in + +### Fixed + +- Correctly handle version checks when `clippy-driver` is used + +### Changed + +- EspIdf: change signal constants to c_int in +- HorizonOS: update network definitions in +- Linux: combine `ioctl` APIs in +- WASI: enable CI testing in +- WASIp2: enable CI testing in + +## [0.2.158](https://github.com/rust-lang/libc/compare/0.2.157...0.2.158) - 2024-08-19 + +### Other +- WASI: fix missing `Iterator` with `rustc-dep-of-std` in + +## [0.2.157](https://github.com/rust-lang/libc/compare/0.2.156...0.2.157) - 2024-08-17 + +### Added + +- Apple: add `_NSGetArgv`, `_NSGetArgc` and `_NSGetProgname` in +- Build: add `RUSTC_WRAPPER` support in +- FreeBSD: add `execvpe` support from 14.1 release in +- Fuchsia: add `SO_BINDTOIFINDEX` +- Linux: add `klogctl` in +- MacOS: add `fcntl` OFD commands in +- NetBSD: add `_lwp_park` in +- Solaris: add missing networking support in +- Unix: add `pthread_equal` in +- WASI: add `select`, `FD_SET`, `FD_ZERO`, `FD_ISSET ` in + +### Fixed +- TEEOS: fix octal notation for `O_*` constants in + +### Changed +- FreeBSD: always use freebsd12 when `rustc_dep_of_std` is set in + +## [0.2.156](https://github.com/rust-lang/libc/compare/v0.2.155...v0.2.156) - 2024-08-15 + +### Added +- Apple: add `F_ALLOCATEPERSIST` in +- Apple: add `os_sync_wait_on_address` and related definitions in +- BSD: generalise `IPV6_DONTFRAG` to all BSD targets in +- FreeBSD/DragonFly: add `IP_RECVTTL`/`IPV6_RECVHOPLIMIT` in +- Hurd: add `XATTR_CREATE`, `XATTR_REPLACE` in +- Linux GNU: `confstr` API and `_CS_*` in +- Linux musl: add `preadv2` and `pwritev2` (1.2.5 min.) in +- VxWorks: add the constant `SOMAXCONN` in +- VxWorks: add a few errnoLib related constants in + +### Fixed +- Solaris/illumos: Change `ifa_flags` type to u64 in +- QNX 7.0: Disable `libregex` in + +### Changed +- QNX NTO: update platform support in +- `addr_of!(EXTERN_STATIC)` is now considered safe in + +### Removed +- Apple: remove `rmx_state` in + +### Other +- Update or remove CI tests that have been failing diff --git a/vendor/libc/CONTRIBUTING.md b/vendor/libc/CONTRIBUTING.md new file mode 100644 index 00000000000000..0cdfaeadf90593 --- /dev/null +++ b/vendor/libc/CONTRIBUTING.md @@ -0,0 +1,126 @@ +# Contributing to `libc` + +Welcome! If you are reading this document, it means you are interested in +contributing to the `libc` crate. + +## v1.0 Roadmap + +`libc` has two active branches: `main` and `libc-0.2`. `main` is for active +development of the upcoming v1.0 release, and should be the target of all pull +requests. `libc-0.2` is for updates to the currently released version. + +If a pull request to `main` is a good candidate for inclusion in an `0.2.x` +release, include `@rustbot label stable-nominated` in a comment to propose this. +Good candidates will usually meet the following: + +1. The included changes are non-breaking. +2. The change applies cleanly to both branches. +3. There is a usecase that justifies inclusion in a stable release (all + additions should always have a usecase, hopefully). + +Once a `stable-nominated` PR targeting `main` has merged, it can be cherry +picked to the `libc-0.2` branch. A maintainer will likely do these cherry picks +in a batch. + +Alternatively, you can start this process yourself by creating a new branch +based on `libc-0.2` and running `git cherry-pick -xe commit-sha-on-main` +(`git +cherry-pick -xe start-sha^..end-sha` if a range of commits is needed). +`git` will automatically add the "cherry picked from commit" note, but try to +add a backport note so the original PR gets crosslinked: + +``` +# ... original commit message ... + +(backport ) # add manually +(cherry picked from commit 104b6a4ae31c726814c36318dc718470cc96e167) # added by git +``` + +Once the cherry-pick is complete, open a PR targeting `libc-0.2`. + +See the [tracking issue](https://github.com/rust-lang/libc/issues/3248) for +details. + +## Adding an API + +Want to use an API which currently isn't bound in `libc`? It's quite easy to add +one! + +The internal structure of this crate is designed to minimize the number of +`#[cfg]` attributes in order to easily be able to add new items which apply to +all platforms in the future. As a result, the crate is organized hierarchically +based on platform. Each module has a number of `#[cfg]`'d children, but only one +is ever actually compiled. Each module then reexports all the contents of its +children. + +This means that for each platform that libc supports, the path from a leaf +module to the root will contain all bindings for the platform in question. +Consequently, this indicates where an API should be added! Adding an API at a +particular level in the hierarchy means that it is supported on all the child +platforms of that level. For example, when adding a Unix API it should be added +to `src/unix/mod.rs`, but when adding a Linux-only API it should be added to +`src/unix/linux_like/linux/mod.rs`. + +If you're not 100% sure at what level of the hierarchy an API should be added +at, fear not! This crate has CI support which tests any binding against all +platforms supported, so you'll see failures if an API is added at the wrong +level or has different signatures across platforms. + +New symbol(s) (i.e. functions, constants etc.) should also be added to the +symbols list(s) found in the `libc-test/semver` directory. These lists keep +track of what symbols are public in the libc crate and ensures they remain +available between changes to the crate. If the new symbol(s) are available on +all supported Unixes it should be added to `unix.txt` list1, +otherwise they should be added to the OS specific list(s). + +With that in mind, the steps for adding a new API are: + +1. Determine where in the module hierarchy your API should be added. +2. Add the API, including adding new symbol(s) to the semver lists. +3. Send a PR to this repo. +4. Wait for CI to pass, fixing errors. +5. Wait for a merge! + +1: Note that this list has nothing to do with any Unix or Posix +standard, it's just a list shared among all OSs that declare `#[cfg(unix)]`. + +## Test before you commit + +We have two automated tests running on +[GitHub Actions](https://github.com/rust-lang/libc/actions): + +1. `libc-test` + - `cd libc-test && cargo test` + - Use the `skip_*()` functions in `build.rs` if you really need a workaround. +2. Style checker + - [`./ci/style.sh`](https://github.com/rust-lang/libc/blob/main/ci/style.sh) + +## Breaking change policy + +Sometimes an upstream adds a breaking change to their API e.g. removing outdated +items, changing the type signature, etc. And we probably should follow that +change to build the `libc` crate successfully. It's annoying to do the +equivalent of semver-major versioning for each such change. Instead, we mark the +item as deprecated and do the actual change after a certain period. The steps +are: + +1. Add `#[deprecated(since = "", note="")]` attribute to the item. + - The `since` field should have a next version of `libc` (e.g., if the current + version is `0.2.1`, it should be `0.2.2`). + - The `note` field should have a reason to deprecate and a tracking issue to + call for comments (e.g., "We consider removing this as the upstream removed + it. If you're using it, please comment on #XXX"). +2. If we don't see any concerns for a while, do the change actually. + +## Supported target policy + +When Rust removes a support for a target, the libc crate also may remove the +support at any time. + +## Releasing your change to crates.io + +This repository uses [release-plz] to handle releases. Once your pull request +has been merged, a maintainer just needs to verify the generated changelog, then +merge the bot's release PR. This will automatically publish to crates.io! + +[release-plz]: https://github.com/MarcoIeni/release-plz diff --git a/vendor/libc/Cargo.lock b/vendor/libc/Cargo.lock new file mode 100644 index 00000000000000..5b7b58c2cd6076 --- /dev/null +++ b/vendor/libc/Cargo.lock @@ -0,0 +1,16 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "libc" +version = "0.2.177" +dependencies = [ + "rustc-std-workspace-core", +] + +[[package]] +name = "rustc-std-workspace-core" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9c45b374136f52f2d6311062c7146bff20fec063c3f5d46a410bd937746955" diff --git a/vendor/libc/Cargo.toml b/vendor/libc/Cargo.toml new file mode 100644 index 00000000000000..d6c80a49e03bc8 --- /dev/null +++ b/vendor/libc/Cargo.toml @@ -0,0 +1,201 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.63" +name = "libc" +version = "0.2.177" +authors = ["The Rust Project Developers"] +build = "build.rs" +exclude = [ + "/ci/*", + "/.github/*", + "/.cirrus.yml", + "/triagebot.toml", +] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Raw FFI bindings to platform libraries like libc." +readme = "README.md" +keywords = [ + "libc", + "ffi", + "bindings", + "operating", + "system", +] +categories = [ + "external-ffi-bindings", + "no-std", + "os", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/libc" + +[package.metadata.docs.rs] +features = ["extra_traits"] +default-target = "x86_64-unknown-linux-gnu" +targets = [ + "aarch64-apple-darwin", + "aarch64-apple-ios", + "aarch64-linux-android", + "aarch64-pc-windows-msvc", + "aarch64-unknown-freebsd", + "aarch64-unknown-fuchsia", + "aarch64-unknown-hermit", + "aarch64-unknown-linux-gnu", + "aarch64-unknown-linux-musl", + "aarch64-unknown-netbsd", + "aarch64-unknown-openbsd", + "aarch64-wrs-vxworks", + "arm-linux-androideabi", + "arm-unknown-linux-gnueabi", + "arm-unknown-linux-gnueabihf", + "arm-unknown-linux-musleabi", + "arm-unknown-linux-musleabihf", + "armebv7r-none-eabi", + "armebv7r-none-eabihf", + "armv5te-unknown-linux-gnueabi", + "armv5te-unknown-linux-musleabi", + "armv7-linux-androideabi", + "armv7-unknown-linux-gnueabihf", + "armv7-unknown-linux-musleabihf", + "armv7-wrs-vxworks-eabihf", + "armv7r-none-eabi", + "armv7r-none-eabihf", + "i586-unknown-linux-gnu", + "i586-unknown-linux-musl", + "i686-linux-android", + "i686-pc-windows-gnu", + "i686-pc-windows-msvc", + "i686-pc-windows-msvc", + "i686-unknown-freebsd", + "i686-unknown-haiku", + "i686-unknown-linux-gnu", + "i686-unknown-linux-musl", + "i686-unknown-netbsd", + "i686-unknown-openbsd", + "i686-wrs-vxworks", + "mips-unknown-linux-gnu", + "mips-unknown-linux-musl", + "mips64-unknown-linux-gnuabi64", + "mips64-unknown-linux-muslabi64", + "mips64el-unknown-linux-gnuabi64", + "mips64el-unknown-linux-muslabi64", + "mipsel-sony-psp", + "mipsel-unknown-linux-gnu", + "mipsel-unknown-linux-musl", + "nvptx64-nvidia-cuda", + "powerpc-unknown-linux-gnu", + "powerpc-unknown-linux-gnuspe", + "powerpc-unknown-netbsd", + "powerpc-wrs-vxworks", + "powerpc-wrs-vxworks-spe", + "powerpc64-ibm-aix", + "powerpc64-unknown-freebsd", + "powerpc64-unknown-linux-gnu", + "powerpc64-wrs-vxworks", + "powerpc64le-unknown-linux-gnu", + "powerpc64le-unknown-linux-musl", + "riscv32gc-unknown-linux-gnu", + "riscv32i-unknown-none-elf", + "riscv32imac-unknown-none-elf", + "riscv32imc-unknown-none-elf", + "riscv32-wrs-vxworks", + "riscv64gc-unknown-freebsd", + "riscv64gc-unknown-hermit", + "riscv64gc-unknown-linux-gnu", + "riscv64gc-unknown-linux-musl", + "riscv64gc-unknown-none-elf", + "riscv64imac-unknown-none-elf", + "riscv64-wrs-vxworks", + "s390x-unknown-linux-gnu", + "s390x-unknown-linux-musl", + "sparc-unknown-linux-gnu", + "sparc64-unknown-linux-gnu", + "sparc64-unknown-netbsd", + "sparcv9-sun-solaris", + "thumbv6m-none-eabi", + "thumbv7em-none-eabi", + "thumbv7em-none-eabihf", + "thumbv7m-none-eabi", + "thumbv7neon-linux-androideabi", + "thumbv7neon-unknown-linux-gnueabihf", + "wasm32-unknown-emscripten", + "wasm32-unknown-unknown", + "x86_64-apple-darwin", + "x86_64-apple-ios", + "x86_64-fortanix-unknown-sgx", + "x86_64-linux-android", + "x86_64-pc-solaris", + "x86_64-pc-windows-gnu", + "x86_64-pc-windows-msvc", + "x86_64-unknown-dragonfly", + "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", + "x86_64-unknown-haiku", + "x86_64-unknown-hermit", + "x86_64-unknown-illumos", + "x86_64-unknown-l4re-uclibc", + "x86_64-unknown-linux-gnu", + "x86_64-unknown-linux-gnux32", + "x86_64-unknown-linux-musl", + "x86_64-unknown-netbsd", + "x86_64-unknown-openbsd", + "x86_64-unknown-redox", + "x86_64-wrs-vxworks", +] +cargo-args = ["-Zbuild-std=core"] + +[features] +align = [] +const-extern-fn = [] +default = ["std"] +extra_traits = [] +rustc-dep-of-std = [ + "align", + "rustc-std-workspace-core", +] +std = [] +use_std = ["std"] + +[lib] +name = "libc" +path = "src/lib.rs" + +[[test]] +name = "const_fn" +path = "tests/const_fn.rs" + +[dependencies.rustc-std-workspace-core] +version = "1.0.1" +optional = true + +[lints.clippy] +expl_impl_clone_on_copy = "allow" +explicit_iter_loop = "warn" +identity_op = "allow" +manual_assert = "warn" +map_unwrap_or = "warn" +missing_safety_doc = "allow" +non_minimal_cfg = "allow" +ptr_as_ptr = "warn" +uninlined_format_args = "allow" +unnecessary_cast = "allow" +unnecessary_semicolon = "warn" +used_underscore_binding = "allow" + +[lints.rust] +unused_qualifications = "allow" diff --git a/vendor/libc/LICENSE-APACHE b/vendor/libc/LICENSE-APACHE new file mode 100644 index 00000000000000..1b5ec8b78e237b --- /dev/null +++ b/vendor/libc/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/vendor/libc/LICENSE-MIT b/vendor/libc/LICENSE-MIT new file mode 100644 index 00000000000000..78061811c33c81 --- /dev/null +++ b/vendor/libc/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014-2020 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/libc/README.md b/vendor/libc/README.md new file mode 100644 index 00000000000000..c616d8b29f52b2 --- /dev/null +++ b/vendor/libc/README.md @@ -0,0 +1,117 @@ +# libc - Raw FFI bindings to platforms' system libraries + +[![GHA Status]][GitHub Actions] [![Cirrus CI Status]][Cirrus CI] [![Latest Version]][crates.io] [![Documentation]][docs.rs] ![License] + +`libc` provides all of the definitions necessary to easily interoperate with C +code (or "C-like" code) on each of the platforms that Rust supports. This +includes type definitions (e.g. `c_int`), constants (e.g. `EINVAL`) as well as +function headers (e.g. `malloc`). + +This crate exports all underlying platform types, functions, and constants under +the crate root, so all items are accessible as `libc::foo`. The types and values +of all the exported APIs match the platform that libc is compiled for. + +Windows API bindings are not included in this crate. If you are looking for +WinAPI bindings, consider using crates like [windows-sys]. + +More detailed information about the design of this library can be found in its +[associated RFC][rfc]. + +[rfc]: https://github.com/rust-lang/rfcs/blob/HEAD/text/1291-promote-libc.md +[windows-sys]: https://docs.rs/windows-sys + +## v1.0 Roadmap + +Currently, `libc` has two active branches: `main` for the upcoming v1.0 release, +and `libc-0.2` for the currently published version. By default all pull requests +should target `main`; once reviewed, they can be cherry picked to the `libc-0.2` +branch if needed. + +We will stop making new v0.2 releases once v1.0 is released. + +See the section in [CONTRIBUTING.md](CONTRIBUTING.md#v10-roadmap) for more +details. + +## Usage + +Add the following to your `Cargo.toml`: + +```toml +[dependencies] +libc = "0.2" +``` + +## Features + +* `std`: by default `libc` links to the standard library. Disable this feature + to remove this dependency and be able to use `libc` in `#![no_std]` crates. + +* `extra_traits`: all `struct`s implemented in `libc` are `Copy` and `Clone`. + This feature derives `Debug`, `Eq`, `Hash`, and `PartialEq`. + +The following features are deprecated: + +* `use_std`: this is equivalent to `std` +* `const-extern-fn`: this is now enabled by default +* `align`: this is now enabled by default + +## Rust version support + +The minimum supported Rust toolchain version is currently **Rust 1.63**. + +Increases to the MSRV are allowed to change without a major (i.e. semver- +breaking) release in order to avoid a ripple effect in the ecosystem. A policy +for when this may change is a work in progress. + +`libc` may continue to compile with Rust versions older than the current MSRV +but this is not guaranteed. + +## Platform support + +You can see the platform(target)-specific docs on [docs.rs], select a platform +you want to see. + +See [`ci/verify-build.sh`](https://github.com/rust-lang/libc/blob/HEAD/ci/verify-build.sh) for +the platforms on which `libc` is guaranteed to build for each Rust toolchain. +The test-matrix at [GitHub Actions] and [Cirrus CI] show the platforms in which +`libc` tests are run. + +

+ +## License + +This project is licensed under either of + +* [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) + ([LICENSE-APACHE](https://github.com/rust-lang/libc/blob/HEAD/LICENSE-APACHE)) + +* [MIT License](https://opensource.org/licenses/MIT) + ([LICENSE-MIT](https://github.com/rust-lang/libc/blob/HEAD/LICENSE-MIT)) + +at your option. + +## Contributing + +We welcome all people who want to contribute. Please see the +[contributing instructions] for more information. + +[contributing instructions]: https://github.com/rust-lang/libc/blob/HEAD/CONTRIBUTING.md + +Contributions in any form (issues, pull requests, etc.) to this project must +adhere to Rust's [Code of Conduct]. + +[Code of Conduct]: https://www.rust-lang.org/policies/code-of-conduct + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in `libc` by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. + +[GitHub Actions]: https://github.com/rust-lang/libc/actions +[GHA Status]: https://github.com/rust-lang/libc/workflows/CI/badge.svg +[Cirrus CI]: https://cirrus-ci.com/github/rust-lang/libc +[Cirrus CI Status]: https://api.cirrus-ci.com/github/rust-lang/libc.svg +[crates.io]: https://crates.io/crates/libc +[Latest Version]: https://img.shields.io/crates/v/libc.svg +[Documentation]: https://docs.rs/libc/badge.svg +[docs.rs]: https://docs.rs/libc +[License]: https://img.shields.io/crates/l/libc.svg diff --git a/vendor/libc/build.rs b/vendor/libc/build.rs new file mode 100644 index 00000000000000..802ea7a37def04 --- /dev/null +++ b/vendor/libc/build.rs @@ -0,0 +1,298 @@ +use std::process::{Command, Output}; +use std::{env, str}; + +// List of cfgs this build script is allowed to set. The list is needed to support check-cfg, as we +// need to know all the possible cfgs that this script will set. If you need to set another cfg +// make sure to add it to this list as well. +const ALLOWED_CFGS: &[&str] = &[ + "emscripten_old_stat_abi", + "espidf_time32", + "freebsd10", + "freebsd11", + "freebsd12", + "freebsd13", + "freebsd14", + "freebsd15", + // Corresponds to `_FILE_OFFSET_BITS=64` in glibc + "gnu_file_offset_bits64", + // Corresponds to `_TIME_BITS=64` in glibc + "gnu_time_bits64", + "libc_deny_warnings", + "libc_thread_local", + // Corresponds to `__USE_TIME_BITS64` in UAPI + "linux_time_bits64", + "musl_v1_2_3", +]; + +// Extra values to allow for check-cfg. +const CHECK_CFG_EXTRA: &[(&str, &[&str])] = &[ + ( + "target_os", + &[ + "switch", "aix", "ohos", "hurd", "rtems", "visionos", "nuttx", "cygwin", + ], + ), + ( + "target_env", + &["illumos", "wasi", "aix", "ohos", "nto71_iosock", "nto80"], + ), + ( + "target_arch", + &["loongarch64", "mips32r6", "mips64r6", "csky"], + ), +]; + +fn main() { + // Avoid unnecessary re-building. + println!("cargo:rerun-if-changed=build.rs"); + + let (rustc_minor_ver, _is_nightly) = rustc_minor_nightly(); + let rustc_dep_of_std = env::var("CARGO_FEATURE_RUSTC_DEP_OF_STD").is_ok(); + let libc_ci = env::var("LIBC_CI").is_ok(); + let target_env = env::var("CARGO_CFG_TARGET_ENV").unwrap_or_default(); + let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap_or_default(); + let target_ptr_width = env::var("CARGO_CFG_TARGET_POINTER_WIDTH").unwrap_or_default(); + let target_arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap_or_default(); + + // The ABI of libc used by std is backward compatible with FreeBSD 12. + // The ABI of libc from crates.io is backward compatible with FreeBSD 12. + // + // On CI, we detect the actual FreeBSD version and match its ABI exactly, + // running tests to ensure that the ABI is correct. + println!("cargo:rerun-if-env-changed=RUST_LIBC_UNSTABLE_FREEBSD_VERSION"); + // Allow overriding the default version for testing + let which_freebsd = if let Ok(version) = env::var("RUST_LIBC_UNSTABLE_FREEBSD_VERSION") { + let vers = version.parse().unwrap(); + println!("cargo:warning=setting FreeBSD version to {vers}"); + vers + } else if libc_ci { + which_freebsd().unwrap_or(12) + } else { + 12 + }; + + match which_freebsd { + x if x < 10 => panic!("FreeBSD older than 10 is not supported"), + 10 => set_cfg("freebsd10"), + 11 => set_cfg("freebsd11"), + 12 => set_cfg("freebsd12"), + 13 => set_cfg("freebsd13"), + 14 => set_cfg("freebsd14"), + _ => set_cfg("freebsd15"), + } + + match emcc_version_code() { + Some(v) if (v < 30142) => set_cfg("emscripten_old_stat_abi"), + // Non-Emscripten or version >= 3.1.42. + _ => (), + } + + let musl_v1_2_3 = env::var("RUST_LIBC_UNSTABLE_MUSL_V1_2_3").is_ok(); + println!("cargo:rerun-if-env-changed=RUST_LIBC_UNSTABLE_MUSL_V1_2_3"); + // loongarch64 and ohos have already updated + if musl_v1_2_3 || target_arch == "loongarch64" || target_env == "ohos" { + // FIXME(musl): enable time64 api as well + set_cfg("musl_v1_2_3"); + } + let linux_time_bits64 = env::var("RUST_LIBC_UNSTABLE_LINUX_TIME_BITS64").is_ok(); + println!("cargo:rerun-if-env-changed=RUST_LIBC_UNSTABLE_LINUX_TIME_BITS64"); + if linux_time_bits64 { + set_cfg("linux_time_bits64"); + } + println!("cargo:rerun-if-env-changed=RUST_LIBC_UNSTABLE_GNU_FILE_OFFSET_BITS"); + println!("cargo:rerun-if-env-changed=RUST_LIBC_UNSTABLE_GNU_TIME_BITS"); + if target_env == "gnu" + && target_os == "linux" + && target_ptr_width == "32" + && target_arch != "riscv32" + && target_arch != "x86_64" + { + let defaultbits = "32".to_string(); + let (timebits, filebits) = match ( + env::var("RUST_LIBC_UNSTABLE_GNU_TIME_BITS"), + env::var("RUST_LIBC_UNSTABLE_GNU_FILE_OFFSET_BITS"), + ) { + (Ok(_), Ok(_)) => panic!("Do not set both RUST_LIBC_UNSTABLE_GNU_TIME_BITS and RUST_LIBC_UNSTABLE_GNU_FILE_OFFSET_BITS"), + (Err(_), Err(_)) => (defaultbits.clone(), defaultbits.clone()), + (Ok(tb), Err(_)) if tb == "64" => (tb.clone(), tb.clone()), + (Ok(tb), Err(_)) if tb == "32" => (tb, defaultbits.clone()), + (Ok(_), Err(_)) => panic!("Invalid value for RUST_LIBC_UNSTABLE_GNU_TIME_BITS, must be 32 or 64"), + (Err(_), Ok(fb)) if fb == "32" || fb == "64" => (defaultbits.clone(), fb), + (Err(_), Ok(_)) => panic!("Invalid value for RUST_LIBC_UNSTABLE_GNU_FILE_OFFSET_BITS, must be 32 or 64"), + }; + let valid_bits = ["32", "64"]; + assert!( + valid_bits.contains(&filebits.as_str()) && valid_bits.contains(&timebits.as_str()), + "Invalid value for RUST_LIBC_UNSTABLE_GNU_TIME_BITS or RUST_LIBC_UNSTABLE_GNU_FILE_OFFSET_BITS, must be 32, 64 or unset" + ); + assert!( + !(filebits == "32" && timebits == "64"), + "RUST_LIBC_UNSTABLE_GNU_FILE_OFFSET_BITS must be 64 or unset if RUST_LIBC_UNSTABLE_GNU_TIME_BITS is 64" + ); + if timebits == "64" { + set_cfg("linux_time_bits64"); + set_cfg("gnu_time_bits64"); + } + if filebits == "64" { + set_cfg("gnu_file_offset_bits64"); + } + } + + // On CI: deny all warnings + if libc_ci { + set_cfg("libc_deny_warnings"); + } + + // #[thread_local] is currently unstable + if rustc_dep_of_std { + set_cfg("libc_thread_local"); + } + + // Since Rust 1.80, configuration that isn't recognized by default needs to be provided to + // avoid warnings. + if rustc_minor_ver >= 80 { + for cfg in ALLOWED_CFGS { + if rustc_minor_ver >= 75 { + println!("cargo:rustc-check-cfg=cfg({cfg})"); + } else { + println!("cargo:rustc-check-cfg=values({cfg})"); + } + } + for &(name, values) in CHECK_CFG_EXTRA { + let values = values.join("\",\""); + if rustc_minor_ver >= 75 { + println!("cargo:rustc-check-cfg=cfg({name},values(\"{values}\"))"); + } else { + println!("cargo:rustc-check-cfg=values({name},\"{values}\")"); + } + } + } +} + +/// Run `rustc --version` and capture the output, adjusting arguments as needed if `clippy-driver` +/// is used instead. +fn rustc_version_cmd(is_clippy_driver: bool) -> Output { + let rustc = env::var_os("RUSTC").expect("Failed to get rustc version: missing RUSTC env"); + + let mut cmd = match env::var_os("RUSTC_WRAPPER") { + Some(ref wrapper) if wrapper.is_empty() => Command::new(rustc), + Some(wrapper) => { + let mut cmd = Command::new(wrapper); + cmd.arg(rustc); + if is_clippy_driver { + cmd.arg("--rustc"); + } + + cmd + } + None => Command::new(rustc), + }; + + cmd.arg("--version"); + + let output = cmd.output().expect("Failed to get rustc version"); + + assert!( + output.status.success(), + "failed to run rustc: {}", + String::from_utf8_lossy(output.stderr.as_slice()) + ); + + output +} + +/// Return the minor version of `rustc`, as well as a bool indicating whether or not the version +/// is a nightly. +fn rustc_minor_nightly() -> (u32, bool) { + macro_rules! otry { + ($e:expr) => { + match $e { + Some(e) => e, + None => panic!("Failed to get rustc version"), + } + }; + } + + let mut output = rustc_version_cmd(false); + + if otry!(str::from_utf8(&output.stdout).ok()).starts_with("clippy") { + output = rustc_version_cmd(true); + } + + let version = otry!(str::from_utf8(&output.stdout).ok()); + + let mut pieces = version.split('.'); + + assert_eq!( + pieces.next(), + Some("rustc 1"), + "Failed to get rustc version" + ); + + let minor = pieces.next(); + + // If `rustc` was built from a tarball, its version string + // will have neither a git hash nor a commit date + // (e.g. "rustc 1.39.0"). Treat this case as non-nightly, + // since a nightly build should either come from CI + // or a git checkout + let nightly_raw = otry!(pieces.next()).split('-').nth(1); + let nightly = nightly_raw.map_or(false, |raw| { + raw.starts_with("dev") || raw.starts_with("nightly") + }); + let minor = otry!(otry!(minor).parse().ok()); + + (minor, nightly) +} + +fn which_freebsd() -> Option { + let output = Command::new("freebsd-version").output().ok()?; + if !output.status.success() { + return None; + } + + let stdout = String::from_utf8(output.stdout).ok()?; + + match &stdout { + s if s.starts_with("10") => Some(10), + s if s.starts_with("11") => Some(11), + s if s.starts_with("12") => Some(12), + s if s.starts_with("13") => Some(13), + s if s.starts_with("14") => Some(14), + s if s.starts_with("15") => Some(15), + _ => None, + } +} + +fn emcc_version_code() -> Option { + let emcc = if cfg!(target_os = "windows") { + "emcc.bat" + } else { + "emcc" + }; + + let output = Command::new(emcc).arg("-dumpversion").output().ok()?; + if !output.status.success() { + return None; + } + + let version = String::from_utf8(output.stdout).ok()?; + + // Some Emscripten versions come with `-git` attached, so split the + // version string also on the `-` char. + let mut pieces = version.trim().split(['.', '-']); + + let major = pieces.next().and_then(|x| x.parse().ok()).unwrap_or(0); + let minor = pieces.next().and_then(|x| x.parse().ok()).unwrap_or(0); + let patch = pieces.next().and_then(|x| x.parse().ok()).unwrap_or(0); + + Some(major * 10000 + minor * 100 + patch) +} + +fn set_cfg(cfg: &str) { + assert!( + ALLOWED_CFGS.contains(&cfg), + "trying to set cfg {cfg}, but it is not in ALLOWED_CFGS", + ); + println!("cargo:rustc-cfg={cfg}"); +} diff --git a/vendor/libc/cherry-pick-stable.sh b/vendor/libc/cherry-pick-stable.sh new file mode 100755 index 00000000000000..c338be4f2ab222 --- /dev/null +++ b/vendor/libc/cherry-pick-stable.sh @@ -0,0 +1,150 @@ +#!/bin/bash + +set -e + +# Parse arguments +DRY_RUN=false +while [[ $# -gt 0 ]]; do + case $1 in + --dry-run|-d) + DRY_RUN=true + shift + ;; + --help|-h) + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Cherry-pick commits from PRs labeled 'stable-nominated' to current branch" + echo "" + echo "Options:" + echo " -d, --dry-run Show what would be done without making changes" + echo " -h, --help Show this help message" + exit 0 + ;; + *) + echo "Unknown option: $1" + echo "Use --help for usage information" + exit 1 + ;; + esac +done + +if [ "$DRY_RUN" = true ]; then + echo "[DRY RUN MODE - No changes will be made]" + echo "" +fi + +current_branch=$(git branch --show-current) +echo "Current branch: $current_branch" +echo "Fetching PRs with 'stable-nominated' label..." +echo "" + +# Get PRs with stable-nominated label that are merged +# Sort by merge date (oldest first) to preserve merge order and avoid conflicts +# Format: PR number, title, merge commit SHA +prs=$(gh pr list --state merged --label stable-nominated --json number,title,mergeCommit,mergedAt --jq 'sort_by(.mergedAt) | .[] | "\(.number)|\(.title)|\(.mergeCommit.oid)"') + +if [ -z "$prs" ]; then + echo "No PRs found with 'stable-nominated' label." + exit 0 +fi + +# Arrays to track results +declare -a successful +declare -a failed +declare -a skipped + +echo "Found PRs to cherry-pick:" +echo "" + +# Process each PR +while IFS='|' read -r pr_number title commit_sha; do + echo "----------------------------------------" + echo "PR #${pr_number}: ${title}" + echo "Commit: ${commit_sha}" + + # Check if commit already exists in current branch + if git branch --contains "$commit_sha" 2>/dev/null | grep -q "^\*"; then + echo "⏭ Already cherry-picked, skipping" + skipped+=("PR #${pr_number}: ${title}") + echo "" + continue + fi + + # Cherry-pick with -xe flags as specified + if [ "$DRY_RUN" = true ]; then + echo "Would cherry-pick with: git cherry-pick -xe $commit_sha" + echo "Would add backport note: (backport https://github.com/rust-lang/libc/pull/$pr_number)" + successful+=("PR #${pr_number}: ${title} (${commit_sha:0:8})") + else + if git cherry-pick -xe "$commit_sha" 2>&1; then + # Add backport note before the cherry-pick note as per CONTRIBUTING.md + current_msg=$(git log -1 --format=%B) + backport_line="(backport https://github.com/rust-lang/libc/pull/$pr_number)" + + # Insert backport line before "(cherry picked from commit" line + new_msg=$(echo "$current_msg" | sed "/^(cherry picked from commit/i\\ +$backport_line\\ +") + + # Amend the commit with the new message + git commit --amend -m "$new_msg" + + echo "✓ Successfully cherry-picked with backport note" + successful+=("PR #${pr_number}: ${title} (${commit_sha:0:8})") + else + echo "✗ Failed to cherry-pick" + failed+=("PR #${pr_number}: ${title} (${commit_sha:0:8})") + # Abort the failed cherry-pick + git cherry-pick --abort 2>/dev/null || true + fi + fi + echo "" +done <<< "$prs" + +# Print summary +echo "========================================" +if [ "$DRY_RUN" = true ]; then + echo "SUMMARY (DRY RUN)" +else + echo "SUMMARY" +fi +echo "========================================" +echo "" + +if [ ${#successful[@]} -gt 0 ]; then + if [ "$DRY_RUN" = true ]; then + echo "Would cherry-pick (${#successful[@]}):" + else + echo "Successfully cherry-picked (${#successful[@]}):" + fi + for item in "${successful[@]}"; do + echo " ✓ $item" + done + echo "" +fi + +if [ ${#skipped[@]} -gt 0 ]; then + echo "Skipped (${#skipped[@]}):" + for item in "${skipped[@]}"; do + echo " ⏭ $item" + done + echo "" +fi + +if [ ${#failed[@]} -gt 0 ]; then + echo "Failed (${#failed[@]}):" + for item in "${failed[@]}"; do + echo " ✗ $item" + done + echo "" + if [ "$DRY_RUN" = false ]; then + echo "Please resolve conflicts manually and re-run if needed." + fi + exit 1 +fi + +if [ "$DRY_RUN" = true ]; then + echo "Dry run complete! Run without --dry-run to apply changes." +else + echo "All done!" +fi diff --git a/vendor/libc/rustfmt.toml b/vendor/libc/rustfmt.toml new file mode 100644 index 00000000000000..de0fc5ecc0166e --- /dev/null +++ b/vendor/libc/rustfmt.toml @@ -0,0 +1,4 @@ +edition = "2021" +error_on_line_overflow = true +group_imports = "StdExternalCrate" +imports_granularity = "Module" diff --git a/vendor/libc/src/fuchsia/aarch64.rs b/vendor/libc/src/fuchsia/aarch64.rs new file mode 100644 index 00000000000000..577f0d99cf24d6 --- /dev/null +++ b/vendor/libc/src/fuchsia/aarch64.rs @@ -0,0 +1,69 @@ +use crate::off_t; +use crate::prelude::*; + +pub type __u64 = c_ulonglong; +pub type wchar_t = u32; +pub type nlink_t = c_ulong; +pub type blksize_t = c_long; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad0: c_ulong, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + __pad1: c_int, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_uint; 2], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad0: c_ulong, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + __pad1: c_int, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_uint; 2], + } + + pub struct ipc_perm { + pub __ipc_perm_key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + pub __seq: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } +} + +// From https://cs.opensource.google/fuchsia/fuchsia/+/main:zircon/third_party/ulib/musl/include/bits/signal.h;l=20-21;drc=0827b18ab9540c46f8037f407d17ea15a79e9ba7 +pub const MINSIGSTKSZ: size_t = 6144; +pub const SIGSTKSZ: size_t = 12288; diff --git a/vendor/libc/src/fuchsia/mod.rs b/vendor/libc/src/fuchsia/mod.rs new file mode 100644 index 00000000000000..31f13b16832d20 --- /dev/null +++ b/vendor/libc/src/fuchsia/mod.rs @@ -0,0 +1,4322 @@ +//! Definitions found commonly among almost all Unix derivatives +//! +//! More functions and definitions can be found in the more specific modules +//! according to the platform in question. + +use crate::prelude::*; + +// PUB_TYPE + +pub type intmax_t = i64; +pub type uintmax_t = u64; + +pub type locale_t = *mut c_void; + +pub type size_t = usize; +pub type ptrdiff_t = isize; +pub type intptr_t = isize; +pub type uintptr_t = usize; +pub type ssize_t = isize; + +pub type pid_t = i32; +pub type uid_t = u32; +pub type gid_t = u32; +pub type in_addr_t = u32; +pub type in_port_t = u16; +pub type sighandler_t = size_t; +pub type cc_t = c_uchar; +pub type sa_family_t = u16; +pub type pthread_key_t = c_uint; +pub type speed_t = c_uint; +pub type tcflag_t = c_uint; +pub type clockid_t = c_int; +pub type key_t = c_int; +pub type id_t = c_uint; +pub type useconds_t = u32; +pub type dev_t = u64; +pub type socklen_t = u32; +pub type pthread_t = c_ulong; +pub type mode_t = u32; +pub type ino64_t = u64; +pub type off64_t = i64; +pub type blkcnt64_t = i64; +pub type rlim64_t = u64; +pub type mqd_t = c_int; +pub type nfds_t = c_ulong; +pub type nl_item = c_int; +pub type idtype_t = c_uint; +pub type loff_t = c_longlong; + +pub type __u8 = c_uchar; +pub type __u16 = c_ushort; +pub type __s16 = c_short; +pub type __u32 = c_uint; +pub type __s32 = c_int; + +pub type Elf32_Half = u16; +pub type Elf32_Word = u32; +pub type Elf32_Off = u32; +pub type Elf32_Addr = u32; + +pub type Elf64_Half = u16; +pub type Elf64_Word = u32; +pub type Elf64_Off = u64; +pub type Elf64_Addr = u64; +pub type Elf64_Xword = u64; + +pub type clock_t = c_long; +pub type time_t = c_long; +pub type suseconds_t = c_long; +pub type ino_t = u64; +pub type off_t = i64; +pub type blkcnt_t = i64; + +pub type shmatt_t = c_ulong; +pub type msgqnum_t = c_ulong; +pub type msglen_t = c_ulong; +pub type fsblkcnt_t = c_ulonglong; +pub type fsfilcnt_t = c_ulonglong; +pub type rlim_t = c_ulonglong; + +// FIXME(fuchsia): why are these uninhabited types? that seems... wrong? +// Presumably these should be `()` or an `extern type` (when that stabilizes). +#[derive(Debug)] +pub enum timezone {} +impl Copy for timezone {} +impl Clone for timezone { + fn clone(&self) -> timezone { + *self + } +} +#[derive(Debug)] +pub enum DIR {} +impl Copy for DIR {} +impl Clone for DIR { + fn clone(&self) -> DIR { + *self + } +} + +#[derive(Debug)] +pub enum fpos64_t {} // FIXME(fuchsia): fill this out with a struct +impl Copy for fpos64_t {} +impl Clone for fpos64_t { + fn clone(&self) -> fpos64_t { + *self + } +} + +// PUB_STRUCT + +s! { + pub struct group { + pub gr_name: *mut c_char, + pub gr_passwd: *mut c_char, + pub gr_gid: crate::gid_t, + pub gr_mem: *mut *mut c_char, + } + + pub struct utimbuf { + pub actime: time_t, + pub modtime: time_t, + } + + pub struct timeval { + pub tv_sec: time_t, + pub tv_usec: suseconds_t, + } + + pub struct timespec { + pub tv_sec: time_t, + pub tv_nsec: c_long, + } + + // FIXME(fuchsia): the rlimit and rusage related functions and types don't exist + // within zircon. Are there reasons for keeping them around? + pub struct rlimit { + pub rlim_cur: rlim_t, + pub rlim_max: rlim_t, + } + + pub struct rusage { + pub ru_utime: timeval, + pub ru_stime: timeval, + pub ru_maxrss: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad1: u32, + pub ru_ixrss: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad2: u32, + pub ru_idrss: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad3: u32, + pub ru_isrss: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad4: u32, + pub ru_minflt: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad5: u32, + pub ru_majflt: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad6: u32, + pub ru_nswap: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad7: u32, + pub ru_inblock: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad8: u32, + pub ru_oublock: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad9: u32, + pub ru_msgsnd: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad10: u32, + pub ru_msgrcv: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad11: u32, + pub ru_nsignals: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad12: u32, + pub ru_nvcsw: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad13: u32, + pub ru_nivcsw: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad14: u32, + } + + pub struct in_addr { + pub s_addr: in_addr_t, + } + + pub struct in6_addr { + pub s6_addr: [u8; 16], + } + + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct ip_mreqn { + pub imr_multiaddr: in_addr, + pub imr_address: in_addr, + pub imr_ifindex: c_int, + } + + pub struct ipv6_mreq { + pub ipv6mr_multiaddr: in6_addr, + pub ipv6mr_interface: c_uint, + } + + pub struct hostent { + pub h_name: *mut c_char, + pub h_aliases: *mut *mut c_char, + pub h_addrtype: c_int, + pub h_length: c_int, + pub h_addr_list: *mut *mut c_char, + } + + pub struct iovec { + pub iov_base: *mut c_void, + pub iov_len: size_t, + } + + pub struct pollfd { + pub fd: c_int, + pub events: c_short, + pub revents: c_short, + } + + pub struct winsize { + pub ws_row: c_ushort, + pub ws_col: c_ushort, + pub ws_xpixel: c_ushort, + pub ws_ypixel: c_ushort, + } + + pub struct linger { + pub l_onoff: c_int, + pub l_linger: c_int, + } + + pub struct sigval { + // Actually a union of an int and a void* + pub sival_ptr: *mut c_void, + } + + // + pub struct itimerval { + pub it_interval: crate::timeval, + pub it_value: crate::timeval, + } + + // + pub struct tms { + pub tms_utime: crate::clock_t, + pub tms_stime: crate::clock_t, + pub tms_cutime: crate::clock_t, + pub tms_cstime: crate::clock_t, + } + + pub struct servent { + pub s_name: *mut c_char, + pub s_aliases: *mut *mut c_char, + pub s_port: c_int, + pub s_proto: *mut c_char, + } + + pub struct protoent { + pub p_name: *mut c_char, + pub p_aliases: *mut *mut c_char, + pub p_proto: c_int, + } + + pub struct aiocb { + pub aio_fildes: c_int, + pub aio_lio_opcode: c_int, + pub aio_reqprio: c_int, + pub aio_buf: *mut c_void, + pub aio_nbytes: size_t, + pub aio_sigevent: crate::sigevent, + __td: *mut c_void, + __lock: [c_int; 2], + __err: c_int, + __ret: ssize_t, + pub aio_offset: off_t, + __next: *mut c_void, + __prev: *mut c_void, + #[cfg(target_pointer_width = "32")] + __dummy4: [c_char; 24], + #[cfg(target_pointer_width = "64")] + __dummy4: [c_char; 16], + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; crate::NCCS], + pub __c_ispeed: crate::speed_t, + pub __c_ospeed: crate::speed_t, + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct ucred { + pub pid: crate::pid_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + } + + pub struct sockaddr { + pub sa_family: sa_family_t, + pub sa_data: [c_char; 14], + } + + pub struct sockaddr_in { + pub sin_family: sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [u8; 8], + } + + pub struct sockaddr_in6 { + pub sin6_family: sa_family_t, + pub sin6_port: crate::in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: u32, + } + + pub struct sockaddr_vm { + pub svm_family: sa_family_t, + pub svm_reserved1: c_ushort, + pub svm_port: crate::in_port_t, + pub svm_cid: c_uint, + pub svm_zero: [u8; 4], + } + + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: socklen_t, + + pub ai_addr: *mut crate::sockaddr, + + pub ai_canonname: *mut c_char, + + pub ai_next: *mut addrinfo, + } + + pub struct sockaddr_ll { + pub sll_family: c_ushort, + pub sll_protocol: c_ushort, + pub sll_ifindex: c_int, + pub sll_hatype: c_ushort, + pub sll_pkttype: c_uchar, + pub sll_halen: c_uchar, + pub sll_addr: [c_uchar; 8], + } + + pub struct fd_set { + fds_bits: [c_ulong; FD_SETSIZE as usize / ULONG_SIZE], + } + + pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + pub tm_gmtoff: c_long, + pub tm_zone: *const c_char, + } + + pub struct sched_param { + pub sched_priority: c_int, + pub sched_ss_low_priority: c_int, + pub sched_ss_repl_period: crate::timespec, + pub sched_ss_init_budget: crate::timespec, + pub sched_ss_max_repl: c_int, + } + + pub struct Dl_info { + pub dli_fname: *const c_char, + pub dli_fbase: *mut c_void, + pub dli_sname: *const c_char, + pub dli_saddr: *mut c_void, + } + + pub struct epoll_event { + pub events: u32, + pub u64: u64, + } + + pub struct lconv { + pub decimal_point: *mut c_char, + pub thousands_sep: *mut c_char, + pub grouping: *mut c_char, + pub int_curr_symbol: *mut c_char, + pub currency_symbol: *mut c_char, + pub mon_decimal_point: *mut c_char, + pub mon_thousands_sep: *mut c_char, + pub mon_grouping: *mut c_char, + pub positive_sign: *mut c_char, + pub negative_sign: *mut c_char, + pub int_frac_digits: c_char, + pub frac_digits: c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub p_sign_posn: c_char, + pub n_sign_posn: c_char, + pub int_p_cs_precedes: c_char, + pub int_p_sep_by_space: c_char, + pub int_n_cs_precedes: c_char, + pub int_n_sep_by_space: c_char, + pub int_p_sign_posn: c_char, + pub int_n_sign_posn: c_char, + } + + pub struct rlimit64 { + pub rlim_cur: rlim64_t, + pub rlim_max: rlim64_t, + } + + pub struct glob_t { + pub gl_pathc: size_t, + pub gl_pathv: *mut *mut c_char, + pub gl_offs: size_t, + pub gl_flags: c_int, + + __unused1: *mut c_void, + __unused2: *mut c_void, + __unused3: *mut c_void, + __unused4: *mut c_void, + __unused5: *mut c_void, + } + + pub struct ifaddrs { + pub ifa_next: *mut ifaddrs, + pub ifa_name: *mut c_char, + pub ifa_flags: c_uint, + pub ifa_addr: *mut crate::sockaddr, + pub ifa_netmask: *mut crate::sockaddr, + pub ifa_ifu: *mut crate::sockaddr, // FIXME(union) This should be a union + pub ifa_data: *mut c_void, + } + + pub struct passwd { + pub pw_name: *mut c_char, + pub pw_passwd: *mut c_char, + pub pw_uid: crate::uid_t, + pub pw_gid: crate::gid_t, + pub pw_gecos: *mut c_char, + pub pw_dir: *mut c_char, + pub pw_shell: *mut c_char, + } + + pub struct spwd { + pub sp_namp: *mut c_char, + pub sp_pwdp: *mut c_char, + pub sp_lstchg: c_long, + pub sp_min: c_long, + pub sp_max: c_long, + pub sp_warn: c_long, + pub sp_inact: c_long, + pub sp_expire: c_long, + pub sp_flag: c_ulong, + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + #[cfg(target_endian = "little")] + pub f_fsid: c_ulong, + #[cfg(all(target_pointer_width = "32", not(target_arch = "x86_64")))] + __f_unused: c_int, + #[cfg(target_endian = "big")] + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct dqblk { + pub dqb_bhardlimit: u64, + pub dqb_bsoftlimit: u64, + pub dqb_curspace: u64, + pub dqb_ihardlimit: u64, + pub dqb_isoftlimit: u64, + pub dqb_curinodes: u64, + pub dqb_btime: u64, + pub dqb_itime: u64, + pub dqb_valid: u32, + } + + pub struct signalfd_siginfo { + pub ssi_signo: u32, + pub ssi_errno: i32, + pub ssi_code: i32, + pub ssi_pid: u32, + pub ssi_uid: u32, + pub ssi_fd: i32, + pub ssi_tid: u32, + pub ssi_band: u32, + pub ssi_overrun: u32, + pub ssi_trapno: u32, + pub ssi_status: i32, + pub ssi_int: i32, + pub ssi_ptr: u64, + pub ssi_utime: u64, + pub ssi_stime: u64, + pub ssi_addr: u64, + pub ssi_addr_lsb: u16, + _pad2: u16, + pub ssi_syscall: i32, + pub ssi_call_addr: u64, + pub ssi_arch: u32, + _pad: [u8; 28], + } + + pub struct itimerspec { + pub it_interval: crate::timespec, + pub it_value: crate::timespec, + } + + pub struct fsid_t { + __val: [c_int; 2], + } + + pub struct cpu_set_t { + #[cfg(all(target_pointer_width = "32", not(target_arch = "x86_64")))] + bits: [u32; 32], + #[cfg(not(all(target_pointer_width = "32", not(target_arch = "x86_64"))))] + bits: [u64; 16], + } + + pub struct if_nameindex { + pub if_index: c_uint, + pub if_name: *mut c_char, + } + + // System V IPC + pub struct msginfo { + pub msgpool: c_int, + pub msgmap: c_int, + pub msgmax: c_int, + pub msgmnb: c_int, + pub msgmni: c_int, + pub msgssz: c_int, + pub msgtql: c_int, + pub msgseg: c_ushort, + } + + pub struct mmsghdr { + pub msg_hdr: crate::msghdr, + pub msg_len: c_uint, + } + + pub struct sembuf { + pub sem_num: c_ushort, + pub sem_op: c_short, + pub sem_flg: c_short, + } + + pub struct input_event { + pub time: crate::timeval, + pub type_: crate::__u16, + pub code: crate::__u16, + pub value: crate::__s32, + } + + pub struct input_id { + pub bustype: crate::__u16, + pub vendor: crate::__u16, + pub product: crate::__u16, + pub version: crate::__u16, + } + + pub struct input_absinfo { + pub value: crate::__s32, + pub minimum: crate::__s32, + pub maximum: crate::__s32, + pub fuzz: crate::__s32, + pub flat: crate::__s32, + pub resolution: crate::__s32, + } + + pub struct input_keymap_entry { + pub flags: crate::__u8, + pub len: crate::__u8, + pub index: crate::__u16, + pub keycode: crate::__u32, + pub scancode: [crate::__u8; 32], + } + + pub struct input_mask { + pub type_: crate::__u32, + pub codes_size: crate::__u32, + pub codes_ptr: crate::__u64, + } + + pub struct ff_replay { + pub length: crate::__u16, + pub delay: crate::__u16, + } + + pub struct ff_trigger { + pub button: crate::__u16, + pub interval: crate::__u16, + } + + pub struct ff_envelope { + pub attack_length: crate::__u16, + pub attack_level: crate::__u16, + pub fade_length: crate::__u16, + pub fade_level: crate::__u16, + } + + pub struct ff_constant_effect { + pub level: crate::__s16, + pub envelope: ff_envelope, + } + + pub struct ff_ramp_effect { + pub start_level: crate::__s16, + pub end_level: crate::__s16, + pub envelope: ff_envelope, + } + + pub struct ff_condition_effect { + pub right_saturation: crate::__u16, + pub left_saturation: crate::__u16, + + pub right_coeff: crate::__s16, + pub left_coeff: crate::__s16, + + pub deadband: crate::__u16, + pub center: crate::__s16, + } + + pub struct ff_periodic_effect { + pub waveform: crate::__u16, + pub period: crate::__u16, + pub magnitude: crate::__s16, + pub offset: crate::__s16, + pub phase: crate::__u16, + + pub envelope: ff_envelope, + + pub custom_len: crate::__u32, + pub custom_data: *mut crate::__s16, + } + + pub struct ff_rumble_effect { + pub strong_magnitude: crate::__u16, + pub weak_magnitude: crate::__u16, + } + + pub struct ff_effect { + pub type_: crate::__u16, + pub id: crate::__s16, + pub direction: crate::__u16, + pub trigger: ff_trigger, + pub replay: ff_replay, + // FIXME(1.0): this is actually a union + #[cfg(target_pointer_width = "64")] + pub u: [u64; 4], + #[cfg(target_pointer_width = "32")] + pub u: [u32; 7], + } + + pub struct dl_phdr_info { + #[cfg(target_pointer_width = "64")] + pub dlpi_addr: Elf64_Addr, + #[cfg(target_pointer_width = "32")] + pub dlpi_addr: Elf32_Addr, + + pub dlpi_name: *const c_char, + + #[cfg(target_pointer_width = "64")] + pub dlpi_phdr: *const Elf64_Phdr, + #[cfg(target_pointer_width = "32")] + pub dlpi_phdr: *const Elf32_Phdr, + + #[cfg(target_pointer_width = "64")] + pub dlpi_phnum: Elf64_Half, + #[cfg(target_pointer_width = "32")] + pub dlpi_phnum: Elf32_Half, + + pub dlpi_adds: c_ulonglong, + pub dlpi_subs: c_ulonglong, + pub dlpi_tls_modid: size_t, + pub dlpi_tls_data: *mut c_void, + } + + pub struct Elf32_Phdr { + pub p_type: Elf32_Word, + pub p_offset: Elf32_Off, + pub p_vaddr: Elf32_Addr, + pub p_paddr: Elf32_Addr, + pub p_filesz: Elf32_Word, + pub p_memsz: Elf32_Word, + pub p_flags: Elf32_Word, + pub p_align: Elf32_Word, + } + + pub struct Elf64_Phdr { + pub p_type: Elf64_Word, + pub p_flags: Elf64_Word, + pub p_offset: Elf64_Off, + pub p_vaddr: Elf64_Addr, + pub p_paddr: Elf64_Addr, + pub p_filesz: Elf64_Xword, + pub p_memsz: Elf64_Xword, + pub p_align: Elf64_Xword, + } + + pub struct statfs64 { + pub f_type: c_ulong, + pub f_bsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_ulong, + pub f_frsize: c_ulong, + pub f_flags: c_ulong, + pub f_spare: [c_ulong; 4], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct pthread_attr_t { + __size: [u64; 7], + } + + pub struct sigset_t { + __val: [c_ulong; 16], + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: c_ulong, + __pad1: c_ulong, + __pad2: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + pub msg_rtime: crate::time_t, + pub msg_ctime: crate::time_t, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __pad1: c_ulong, + __pad2: c_ulong, + } + + pub struct statfs { + pub f_type: c_ulong, + pub f_bsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_ulong, + pub f_frsize: c_ulong, + pub f_flags: c_ulong, + pub f_spare: [c_ulong; 4], + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: c_int, + __pad1: c_int, + pub msg_control: *mut c_void, + pub msg_controllen: crate::socklen_t, + __pad2: crate::socklen_t, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + pub cmsg_len: crate::socklen_t, + pub __pad1: c_int, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct sem_t { + __val: [c_int; 8], + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + pub _pad: [c_int; 29], + _align: [usize; 0], + } + + pub struct termios2 { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; 19], + pub c_ispeed: crate::speed_t, + pub c_ospeed: crate::speed_t, + } + + pub struct in6_pktinfo { + pub ipi6_addr: crate::in6_addr, + pub ipi6_ifindex: c_uint, + } + + #[cfg_attr( + any(target_pointer_width = "32", target_arch = "x86_64"), + repr(align(4)) + )] + #[cfg_attr( + not(any(target_pointer_width = "32", target_arch = "x86_64")), + repr(align(8)) + )] + pub struct pthread_mutexattr_t { + size: [u8; crate::__SIZEOF_PTHREAD_MUTEXATTR_T], + } + + #[cfg_attr(target_pointer_width = "32", repr(align(4)))] + #[cfg_attr(target_pointer_width = "64", repr(align(8)))] + pub struct pthread_rwlockattr_t { + size: [u8; crate::__SIZEOF_PTHREAD_RWLOCKATTR_T], + } + + #[repr(align(4))] + pub struct pthread_condattr_t { + size: [u8; crate::__SIZEOF_PTHREAD_CONDATTR_T], + } +} + +s_no_extra_traits! { + pub struct sysinfo { + pub uptime: c_ulong, + pub loads: [c_ulong; 3], + pub totalram: c_ulong, + pub freeram: c_ulong, + pub sharedram: c_ulong, + pub bufferram: c_ulong, + pub totalswap: c_ulong, + pub freeswap: c_ulong, + pub procs: c_ushort, + pub pad: c_ushort, + pub totalhigh: c_ulong, + pub freehigh: c_ulong, + pub mem_unit: c_uint, + pub __reserved: [c_char; 256], + } + + pub struct sockaddr_un { + pub sun_family: sa_family_t, + pub sun_path: [c_char; 108], + } + + pub struct sockaddr_storage { + pub ss_family: sa_family_t, + __ss_pad2: [u8; 128 - 2 - 8], + __ss_align: size_t, + } + + pub struct utsname { + pub sysname: [c_char; 65], + pub nodename: [c_char; 65], + pub release: [c_char; 65], + pub version: [c_char; 65], + pub machine: [c_char; 65], + pub domainname: [c_char; 65], + } + + pub struct dirent { + pub d_ino: crate::ino_t, + pub d_off: off_t, + pub d_reclen: c_ushort, + pub d_type: c_uchar, + pub d_name: [c_char; 256], + } + + pub struct dirent64 { + pub d_ino: crate::ino64_t, + pub d_off: off64_t, + pub d_reclen: c_ushort, + pub d_type: c_uchar, + pub d_name: [c_char; 256], + } + + // x32 compatibility + // See https://sourceware.org/bugzilla/show_bug.cgi?id=21279 + pub struct mq_attr { + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub mq_flags: i64, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub mq_maxmsg: i64, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub mq_msgsize: i64, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub mq_curmsgs: i64, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pad: [i64; 4], + + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub mq_flags: c_long, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub mq_maxmsg: c_long, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub mq_msgsize: c_long, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub mq_curmsgs: c_long, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pad: [c_long; 4], + } + + pub struct sockaddr_nl { + pub nl_family: crate::sa_family_t, + nl_pad: c_ushort, + pub nl_pid: u32, + pub nl_groups: u32, + } + + pub struct sigevent { + pub sigev_value: crate::sigval, + pub sigev_signo: c_int, + pub sigev_notify: c_int, + pub sigev_notify_function: fn(crate::sigval), + pub sigev_notify_attributes: *mut pthread_attr_t, + pub __pad: [c_char; 56 - 3 * 8], + } + + #[cfg_attr( + all( + target_pointer_width = "32", + any(target_arch = "arm", target_arch = "x86_64") + ), + repr(align(4)) + )] + #[cfg_attr( + any( + target_pointer_width = "64", + not(any(target_arch = "arm", target_arch = "x86_64")) + ), + repr(align(8)) + )] + pub struct pthread_mutex_t { + size: [u8; crate::__SIZEOF_PTHREAD_MUTEX_T], + } + + #[cfg_attr( + all( + target_pointer_width = "32", + any(target_arch = "arm", target_arch = "x86_64") + ), + repr(align(4)) + )] + #[cfg_attr( + any( + target_pointer_width = "64", + not(any(target_arch = "arm", target_arch = "x86_64")) + ), + repr(align(8)) + )] + pub struct pthread_rwlock_t { + size: [u8; crate::__SIZEOF_PTHREAD_RWLOCK_T], + } + + #[cfg_attr(target_pointer_width = "32", repr(align(4)))] + #[cfg_attr(target_pointer_width = "64", repr(align(8)))] + #[cfg_attr(target_arch = "x86", repr(align(4)))] + #[cfg_attr(not(target_arch = "x86"), repr(align(8)))] + pub struct pthread_cond_t { + size: [u8; crate::__SIZEOF_PTHREAD_COND_T], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for sysinfo { + fn eq(&self, other: &sysinfo) -> bool { + self.uptime == other.uptime + && self.loads == other.loads + && self.totalram == other.totalram + && self.freeram == other.freeram + && self.sharedram == other.sharedram + && self.bufferram == other.bufferram + && self.totalswap == other.totalswap + && self.freeswap == other.freeswap + && self.procs == other.procs + && self.pad == other.pad + && self.totalhigh == other.totalhigh + && self.freehigh == other.freehigh + && self.mem_unit == other.mem_unit + && self + .__reserved + .iter() + .zip(other.__reserved.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for sysinfo {} + impl hash::Hash for sysinfo { + fn hash(&self, state: &mut H) { + self.uptime.hash(state); + self.loads.hash(state); + self.totalram.hash(state); + self.freeram.hash(state); + self.sharedram.hash(state); + self.bufferram.hash(state); + self.totalswap.hash(state); + self.freeswap.hash(state); + self.procs.hash(state); + self.pad.hash(state); + self.totalhigh.hash(state); + self.freehigh.hash(state); + self.mem_unit.hash(state); + self.__reserved.hash(state); + } + } + + impl PartialEq for sockaddr_un { + fn eq(&self, other: &sockaddr_un) -> bool { + self.sun_family == other.sun_family + && self + .sun_path + .iter() + .zip(other.sun_path.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for sockaddr_un {} + impl hash::Hash for sockaddr_un { + fn hash(&self, state: &mut H) { + self.sun_family.hash(state); + self.sun_path.hash(state); + } + } + + impl PartialEq for sockaddr_storage { + fn eq(&self, other: &sockaddr_storage) -> bool { + self.ss_family == other.ss_family + && self.__ss_align == other.__ss_align + && self + .__ss_pad2 + .iter() + .zip(other.__ss_pad2.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for sockaddr_storage {} + impl hash::Hash for sockaddr_storage { + fn hash(&self, state: &mut H) { + self.ss_family.hash(state); + self.__ss_align.hash(state); + self.__ss_pad2.hash(state); + } + } + + impl PartialEq for utsname { + fn eq(&self, other: &utsname) -> bool { + self.sysname + .iter() + .zip(other.sysname.iter()) + .all(|(a, b)| a == b) + && self + .nodename + .iter() + .zip(other.nodename.iter()) + .all(|(a, b)| a == b) + && self + .release + .iter() + .zip(other.release.iter()) + .all(|(a, b)| a == b) + && self + .version + .iter() + .zip(other.version.iter()) + .all(|(a, b)| a == b) + && self + .machine + .iter() + .zip(other.machine.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for utsname {} + impl hash::Hash for utsname { + fn hash(&self, state: &mut H) { + self.sysname.hash(state); + self.nodename.hash(state); + self.release.hash(state); + self.version.hash(state); + self.machine.hash(state); + } + } + + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_ino == other.d_ino + && self.d_off == other.d_off + && self.d_reclen == other.d_reclen + && self.d_type == other.d_type + && self + .d_name + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for dirent {} + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_ino.hash(state); + self.d_off.hash(state); + self.d_reclen.hash(state); + self.d_type.hash(state); + self.d_name.hash(state); + } + } + + impl PartialEq for dirent64 { + fn eq(&self, other: &dirent64) -> bool { + self.d_ino == other.d_ino + && self.d_off == other.d_off + && self.d_reclen == other.d_reclen + && self.d_type == other.d_type + && self + .d_name + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for dirent64 {} + impl hash::Hash for dirent64 { + fn hash(&self, state: &mut H) { + self.d_ino.hash(state); + self.d_off.hash(state); + self.d_reclen.hash(state); + self.d_type.hash(state); + self.d_name.hash(state); + } + } + + impl PartialEq for mq_attr { + fn eq(&self, other: &mq_attr) -> bool { + self.mq_flags == other.mq_flags + && self.mq_maxmsg == other.mq_maxmsg + && self.mq_msgsize == other.mq_msgsize + && self.mq_curmsgs == other.mq_curmsgs + } + } + impl Eq for mq_attr {} + impl hash::Hash for mq_attr { + fn hash(&self, state: &mut H) { + self.mq_flags.hash(state); + self.mq_maxmsg.hash(state); + self.mq_msgsize.hash(state); + self.mq_curmsgs.hash(state); + } + } + + impl PartialEq for sockaddr_nl { + fn eq(&self, other: &sockaddr_nl) -> bool { + self.nl_family == other.nl_family + && self.nl_pid == other.nl_pid + && self.nl_groups == other.nl_groups + } + } + impl Eq for sockaddr_nl {} + impl hash::Hash for sockaddr_nl { + fn hash(&self, state: &mut H) { + self.nl_family.hash(state); + self.nl_pid.hash(state); + self.nl_groups.hash(state); + } + } + + // FIXME(msrv): suggested method was added in 1.85 + #[allow(unpredictable_function_pointer_comparisons)] + impl PartialEq for sigevent { + fn eq(&self, other: &sigevent) -> bool { + self.sigev_value == other.sigev_value + && self.sigev_signo == other.sigev_signo + && self.sigev_notify == other.sigev_notify + && self.sigev_notify_function == other.sigev_notify_function + && self.sigev_notify_attributes == other.sigev_notify_attributes + } + } + impl Eq for sigevent {} + impl hash::Hash for sigevent { + fn hash(&self, state: &mut H) { + self.sigev_value.hash(state); + self.sigev_signo.hash(state); + self.sigev_notify.hash(state); + self.sigev_notify_function.hash(state); + self.sigev_notify_attributes.hash(state); + } + } + + impl PartialEq for pthread_cond_t { + fn eq(&self, other: &pthread_cond_t) -> bool { + self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) + } + } + impl Eq for pthread_cond_t {} + impl hash::Hash for pthread_cond_t { + fn hash(&self, state: &mut H) { + self.size.hash(state); + } + } + + impl PartialEq for pthread_mutex_t { + fn eq(&self, other: &pthread_mutex_t) -> bool { + self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) + } + } + impl Eq for pthread_mutex_t {} + impl hash::Hash for pthread_mutex_t { + fn hash(&self, state: &mut H) { + self.size.hash(state); + } + } + + impl PartialEq for pthread_rwlock_t { + fn eq(&self, other: &pthread_rwlock_t) -> bool { + self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) + } + } + impl Eq for pthread_rwlock_t {} + impl hash::Hash for pthread_rwlock_t { + fn hash(&self, state: &mut H) { + self.size.hash(state); + } + } + } +} + +// PUB_CONST + +pub const INT_MIN: c_int = -2147483648; +pub const INT_MAX: c_int = 2147483647; + +pub const SIG_DFL: sighandler_t = 0 as sighandler_t; +pub const SIG_IGN: sighandler_t = 1 as sighandler_t; +pub const SIG_ERR: sighandler_t = !0 as sighandler_t; + +pub const DT_UNKNOWN: u8 = 0; +pub const DT_FIFO: u8 = 1; +pub const DT_CHR: u8 = 2; +pub const DT_DIR: u8 = 4; +pub const DT_BLK: u8 = 6; +pub const DT_REG: u8 = 8; +pub const DT_LNK: u8 = 10; +pub const DT_SOCK: u8 = 12; + +pub const FD_CLOEXEC: c_int = 0x1; + +pub const USRQUOTA: c_int = 0; +pub const GRPQUOTA: c_int = 1; + +pub const SIGIOT: c_int = 6; + +pub const S_ISUID: mode_t = 0o4000; +pub const S_ISGID: mode_t = 0o2000; +pub const S_ISVTX: mode_t = 0o1000; + +pub const IF_NAMESIZE: size_t = 16; +pub const IFNAMSIZ: size_t = IF_NAMESIZE; + +pub const LOG_EMERG: c_int = 0; +pub const LOG_ALERT: c_int = 1; +pub const LOG_CRIT: c_int = 2; +pub const LOG_ERR: c_int = 3; +pub const LOG_WARNING: c_int = 4; +pub const LOG_NOTICE: c_int = 5; +pub const LOG_INFO: c_int = 6; +pub const LOG_DEBUG: c_int = 7; + +pub const LOG_KERN: c_int = 0; +pub const LOG_USER: c_int = 1 << 3; +pub const LOG_MAIL: c_int = 2 << 3; +pub const LOG_DAEMON: c_int = 3 << 3; +pub const LOG_AUTH: c_int = 4 << 3; +pub const LOG_SYSLOG: c_int = 5 << 3; +pub const LOG_LPR: c_int = 6 << 3; +pub const LOG_NEWS: c_int = 7 << 3; +pub const LOG_UUCP: c_int = 8 << 3; +pub const LOG_LOCAL0: c_int = 16 << 3; +pub const LOG_LOCAL1: c_int = 17 << 3; +pub const LOG_LOCAL2: c_int = 18 << 3; +pub const LOG_LOCAL3: c_int = 19 << 3; +pub const LOG_LOCAL4: c_int = 20 << 3; +pub const LOG_LOCAL5: c_int = 21 << 3; +pub const LOG_LOCAL6: c_int = 22 << 3; +pub const LOG_LOCAL7: c_int = 23 << 3; + +pub const LOG_PID: c_int = 0x01; +pub const LOG_CONS: c_int = 0x02; +pub const LOG_ODELAY: c_int = 0x04; +pub const LOG_NDELAY: c_int = 0x08; +pub const LOG_NOWAIT: c_int = 0x10; + +pub const LOG_PRIMASK: c_int = 7; +pub const LOG_FACMASK: c_int = 0x3f8; + +pub const PRIO_PROCESS: c_int = 0; +pub const PRIO_PGRP: c_int = 1; +pub const PRIO_USER: c_int = 2; + +pub const PRIO_MIN: c_int = -20; +pub const PRIO_MAX: c_int = 20; + +pub const IPPROTO_ICMP: c_int = 1; +pub const IPPROTO_ICMPV6: c_int = 58; +pub const IPPROTO_TCP: c_int = 6; +pub const IPPROTO_UDP: c_int = 17; +pub const IPPROTO_IP: c_int = 0; +pub const IPPROTO_IPV6: c_int = 41; + +pub const INADDR_LOOPBACK: in_addr_t = 2130706433; +pub const INADDR_ANY: in_addr_t = 0; +pub const INADDR_BROADCAST: in_addr_t = 4294967295; +pub const INADDR_NONE: in_addr_t = 4294967295; + +pub const EXIT_FAILURE: c_int = 1; +pub const EXIT_SUCCESS: c_int = 0; +pub const RAND_MAX: c_int = 2147483647; +pub const EOF: c_int = -1; +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; +pub const _IOFBF: c_int = 0; +pub const _IONBF: c_int = 2; +pub const _IOLBF: c_int = 1; + +pub const F_DUPFD: c_int = 0; +pub const F_GETFD: c_int = 1; +pub const F_SETFD: c_int = 2; +pub const F_GETFL: c_int = 3; +pub const F_SETFL: c_int = 4; + +// Linux-specific fcntls +pub const F_SETLEASE: c_int = 1024; +pub const F_GETLEASE: c_int = 1025; +pub const F_NOTIFY: c_int = 1026; +pub const F_CANCELLK: c_int = 1029; +pub const F_DUPFD_CLOEXEC: c_int = 1030; +pub const F_SETPIPE_SZ: c_int = 1031; +pub const F_GETPIPE_SZ: c_int = 1032; +pub const F_ADD_SEALS: c_int = 1033; +pub const F_GET_SEALS: c_int = 1034; + +pub const F_SEAL_SEAL: c_int = 0x0001; +pub const F_SEAL_SHRINK: c_int = 0x0002; +pub const F_SEAL_GROW: c_int = 0x0004; +pub const F_SEAL_WRITE: c_int = 0x0008; + +// FIXME(#235): Include file sealing fcntls once we have a way to verify them. + +pub const SIGTRAP: c_int = 5; + +pub const PTHREAD_CREATE_JOINABLE: c_int = 0; +pub const PTHREAD_CREATE_DETACHED: c_int = 1; + +pub const CLOCK_REALTIME: crate::clockid_t = 0; +pub const CLOCK_MONOTONIC: crate::clockid_t = 1; +pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 2; +pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 3; +pub const CLOCK_MONOTONIC_RAW: crate::clockid_t = 4; +pub const CLOCK_REALTIME_COARSE: crate::clockid_t = 5; +pub const CLOCK_MONOTONIC_COARSE: crate::clockid_t = 6; +pub const CLOCK_BOOTTIME: crate::clockid_t = 7; +pub const CLOCK_REALTIME_ALARM: crate::clockid_t = 8; +pub const CLOCK_BOOTTIME_ALARM: crate::clockid_t = 9; +pub const CLOCK_SGI_CYCLE: crate::clockid_t = 10; +pub const CLOCK_TAI: crate::clockid_t = 11; +pub const TIMER_ABSTIME: c_int = 1; + +pub const RLIMIT_CPU: c_int = 0; +pub const RLIMIT_FSIZE: c_int = 1; +pub const RLIMIT_DATA: c_int = 2; +pub const RLIMIT_STACK: c_int = 3; +pub const RLIMIT_CORE: c_int = 4; +pub const RLIMIT_LOCKS: c_int = 10; +pub const RLIMIT_SIGPENDING: c_int = 11; +pub const RLIMIT_MSGQUEUE: c_int = 12; +pub const RLIMIT_NICE: c_int = 13; +pub const RLIMIT_RTPRIO: c_int = 14; + +pub const RUSAGE_SELF: c_int = 0; + +pub const O_RDONLY: c_int = 0; +pub const O_WRONLY: c_int = 1; +pub const O_RDWR: c_int = 2; + +pub const S_IFIFO: mode_t = 0o1_0000; +pub const S_IFCHR: mode_t = 0o2_0000; +pub const S_IFBLK: mode_t = 0o6_0000; +pub const S_IFDIR: mode_t = 0o4_0000; +pub const S_IFREG: mode_t = 0o10_0000; +pub const S_IFLNK: mode_t = 0o12_0000; +pub const S_IFSOCK: mode_t = 0o14_0000; +pub const S_IFMT: mode_t = 0o17_0000; +pub const S_IRWXU: mode_t = 0o0700; +pub const S_IXUSR: mode_t = 0o0100; +pub const S_IWUSR: mode_t = 0o0200; +pub const S_IRUSR: mode_t = 0o0400; +pub const S_IRWXG: mode_t = 0o0070; +pub const S_IXGRP: mode_t = 0o0010; +pub const S_IWGRP: mode_t = 0o0020; +pub const S_IRGRP: mode_t = 0o0040; +pub const S_IRWXO: mode_t = 0o0007; +pub const S_IXOTH: mode_t = 0o0001; +pub const S_IWOTH: mode_t = 0o0002; +pub const S_IROTH: mode_t = 0o0004; +pub const F_OK: c_int = 0; +pub const R_OK: c_int = 4; +pub const W_OK: c_int = 2; +pub const X_OK: c_int = 1; +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGABRT: c_int = 6; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGSEGV: c_int = 11; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; + +pub const PROT_NONE: c_int = 0; +pub const PROT_READ: c_int = 1; +pub const PROT_WRITE: c_int = 2; +pub const PROT_EXEC: c_int = 4; + +pub const LC_CTYPE: c_int = 0; +pub const LC_NUMERIC: c_int = 1; +pub const LC_TIME: c_int = 2; +pub const LC_COLLATE: c_int = 3; +pub const LC_MONETARY: c_int = 4; +pub const LC_MESSAGES: c_int = 5; +pub const LC_ALL: c_int = 6; +pub const LC_CTYPE_MASK: c_int = 1 << LC_CTYPE; +pub const LC_NUMERIC_MASK: c_int = 1 << LC_NUMERIC; +pub const LC_TIME_MASK: c_int = 1 << LC_TIME; +pub const LC_COLLATE_MASK: c_int = 1 << LC_COLLATE; +pub const LC_MONETARY_MASK: c_int = 1 << LC_MONETARY; +pub const LC_MESSAGES_MASK: c_int = 1 << LC_MESSAGES; +// LC_ALL_MASK defined per platform + +pub const MAP_FILE: c_int = 0x0000; +pub const MAP_SHARED: c_int = 0x0001; +pub const MAP_PRIVATE: c_int = 0x0002; +pub const MAP_FIXED: c_int = 0x0010; + +pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; + +// MS_ flags for msync(2) +pub const MS_ASYNC: c_int = 0x0001; +pub const MS_INVALIDATE: c_int = 0x0002; +pub const MS_SYNC: c_int = 0x0004; + +// MS_ flags for mount(2) +pub const MS_RDONLY: c_ulong = 0x01; +pub const MS_NOSUID: c_ulong = 0x02; +pub const MS_NODEV: c_ulong = 0x04; +pub const MS_NOEXEC: c_ulong = 0x08; +pub const MS_SYNCHRONOUS: c_ulong = 0x10; +pub const MS_REMOUNT: c_ulong = 0x20; +pub const MS_MANDLOCK: c_ulong = 0x40; +pub const MS_DIRSYNC: c_ulong = 0x80; +pub const MS_NOATIME: c_ulong = 0x0400; +pub const MS_NODIRATIME: c_ulong = 0x0800; +pub const MS_BIND: c_ulong = 0x1000; +pub const MS_MOVE: c_ulong = 0x2000; +pub const MS_REC: c_ulong = 0x4000; +pub const MS_SILENT: c_ulong = 0x8000; +pub const MS_POSIXACL: c_ulong = 0x010000; +pub const MS_UNBINDABLE: c_ulong = 0x020000; +pub const MS_PRIVATE: c_ulong = 0x040000; +pub const MS_SLAVE: c_ulong = 0x080000; +pub const MS_SHARED: c_ulong = 0x100000; +pub const MS_RELATIME: c_ulong = 0x200000; +pub const MS_KERNMOUNT: c_ulong = 0x400000; +pub const MS_I_VERSION: c_ulong = 0x800000; +pub const MS_STRICTATIME: c_ulong = 0x1000000; +pub const MS_ACTIVE: c_ulong = 0x40000000; +pub const MS_NOUSER: c_ulong = 0x80000000; +pub const MS_MGC_VAL: c_ulong = 0xc0ed0000; +pub const MS_MGC_MSK: c_ulong = 0xffff0000; +pub const MS_RMT_MASK: c_ulong = 0x800051; + +pub const EPERM: c_int = 1; +pub const ENOENT: c_int = 2; +pub const ESRCH: c_int = 3; +pub const EINTR: c_int = 4; +pub const EIO: c_int = 5; +pub const ENXIO: c_int = 6; +pub const E2BIG: c_int = 7; +pub const ENOEXEC: c_int = 8; +pub const EBADF: c_int = 9; +pub const ECHILD: c_int = 10; +pub const EAGAIN: c_int = 11; +pub const ENOMEM: c_int = 12; +pub const EACCES: c_int = 13; +pub const EFAULT: c_int = 14; +pub const ENOTBLK: c_int = 15; +pub const EBUSY: c_int = 16; +pub const EEXIST: c_int = 17; +pub const EXDEV: c_int = 18; +pub const ENODEV: c_int = 19; +pub const ENOTDIR: c_int = 20; +pub const EISDIR: c_int = 21; +pub const EINVAL: c_int = 22; +pub const ENFILE: c_int = 23; +pub const EMFILE: c_int = 24; +pub const ENOTTY: c_int = 25; +pub const ETXTBSY: c_int = 26; +pub const EFBIG: c_int = 27; +pub const ENOSPC: c_int = 28; +pub const ESPIPE: c_int = 29; +pub const EROFS: c_int = 30; +pub const EMLINK: c_int = 31; +pub const EPIPE: c_int = 32; +pub const EDOM: c_int = 33; +pub const ERANGE: c_int = 34; +pub const EWOULDBLOCK: c_int = EAGAIN; + +pub const SCM_RIGHTS: c_int = 0x01; +pub const SCM_CREDENTIALS: c_int = 0x02; + +pub const PROT_GROWSDOWN: c_int = 0x1000000; +pub const PROT_GROWSUP: c_int = 0x2000000; + +pub const MAP_TYPE: c_int = 0x000f; + +pub const MADV_NORMAL: c_int = 0; +pub const MADV_RANDOM: c_int = 1; +pub const MADV_SEQUENTIAL: c_int = 2; +pub const MADV_WILLNEED: c_int = 3; +pub const MADV_DONTNEED: c_int = 4; +pub const MADV_FREE: c_int = 8; +pub const MADV_REMOVE: c_int = 9; +pub const MADV_DONTFORK: c_int = 10; +pub const MADV_DOFORK: c_int = 11; +pub const MADV_MERGEABLE: c_int = 12; +pub const MADV_UNMERGEABLE: c_int = 13; +pub const MADV_HUGEPAGE: c_int = 14; +pub const MADV_NOHUGEPAGE: c_int = 15; +pub const MADV_DONTDUMP: c_int = 16; +pub const MADV_DODUMP: c_int = 17; +pub const MADV_HWPOISON: c_int = 100; +pub const MADV_SOFT_OFFLINE: c_int = 101; + +pub const IFF_UP: c_int = 0x1; +pub const IFF_BROADCAST: c_int = 0x2; +pub const IFF_DEBUG: c_int = 0x4; +pub const IFF_LOOPBACK: c_int = 0x8; +pub const IFF_POINTOPOINT: c_int = 0x10; +pub const IFF_NOTRAILERS: c_int = 0x20; +pub const IFF_RUNNING: c_int = 0x40; +pub const IFF_NOARP: c_int = 0x80; +pub const IFF_PROMISC: c_int = 0x100; +pub const IFF_ALLMULTI: c_int = 0x200; +pub const IFF_MASTER: c_int = 0x400; +pub const IFF_SLAVE: c_int = 0x800; +pub const IFF_MULTICAST: c_int = 0x1000; +pub const IFF_PORTSEL: c_int = 0x2000; +pub const IFF_AUTOMEDIA: c_int = 0x4000; +pub const IFF_DYNAMIC: c_int = 0x8000; +pub const IFF_TUN: c_int = 0x0001; +pub const IFF_TAP: c_int = 0x0002; +pub const IFF_NO_PI: c_int = 0x1000; + +pub const SOL_IP: c_int = 0; +pub const SOL_TCP: c_int = 6; +pub const SOL_UDP: c_int = 17; +pub const SOL_IPV6: c_int = 41; +pub const SOL_ICMPV6: c_int = 58; +pub const SOL_RAW: c_int = 255; +pub const SOL_DECNET: c_int = 261; +pub const SOL_X25: c_int = 262; +pub const SOL_PACKET: c_int = 263; +pub const SOL_ATM: c_int = 264; +pub const SOL_AAL: c_int = 265; +pub const SOL_IRDA: c_int = 266; +pub const SOL_NETBEUI: c_int = 267; +pub const SOL_LLC: c_int = 268; +pub const SOL_DCCP: c_int = 269; +pub const SOL_NETLINK: c_int = 270; +pub const SOL_TIPC: c_int = 271; + +pub const AF_UNSPEC: c_int = 0; +pub const AF_UNIX: c_int = 1; +pub const AF_LOCAL: c_int = 1; +pub const AF_INET: c_int = 2; +pub const AF_AX25: c_int = 3; +pub const AF_IPX: c_int = 4; +pub const AF_APPLETALK: c_int = 5; +pub const AF_NETROM: c_int = 6; +pub const AF_BRIDGE: c_int = 7; +pub const AF_ATMPVC: c_int = 8; +pub const AF_X25: c_int = 9; +pub const AF_INET6: c_int = 10; +pub const AF_ROSE: c_int = 11; +pub const AF_DECnet: c_int = 12; +pub const AF_NETBEUI: c_int = 13; +pub const AF_SECURITY: c_int = 14; +pub const AF_KEY: c_int = 15; +pub const AF_NETLINK: c_int = 16; +pub const AF_ROUTE: c_int = AF_NETLINK; +pub const AF_PACKET: c_int = 17; +pub const AF_ASH: c_int = 18; +pub const AF_ECONET: c_int = 19; +pub const AF_ATMSVC: c_int = 20; +pub const AF_RDS: c_int = 21; +pub const AF_SNA: c_int = 22; +pub const AF_IRDA: c_int = 23; +pub const AF_PPPOX: c_int = 24; +pub const AF_WANPIPE: c_int = 25; +pub const AF_LLC: c_int = 26; +pub const AF_CAN: c_int = 29; +pub const AF_TIPC: c_int = 30; +pub const AF_BLUETOOTH: c_int = 31; +pub const AF_IUCV: c_int = 32; +pub const AF_RXRPC: c_int = 33; +pub const AF_ISDN: c_int = 34; +pub const AF_PHONET: c_int = 35; +pub const AF_IEEE802154: c_int = 36; +pub const AF_CAIF: c_int = 37; +pub const AF_ALG: c_int = 38; + +pub const PF_UNSPEC: c_int = AF_UNSPEC; +pub const PF_UNIX: c_int = AF_UNIX; +pub const PF_LOCAL: c_int = AF_LOCAL; +pub const PF_INET: c_int = AF_INET; +pub const PF_AX25: c_int = AF_AX25; +pub const PF_IPX: c_int = AF_IPX; +pub const PF_APPLETALK: c_int = AF_APPLETALK; +pub const PF_NETROM: c_int = AF_NETROM; +pub const PF_BRIDGE: c_int = AF_BRIDGE; +pub const PF_ATMPVC: c_int = AF_ATMPVC; +pub const PF_X25: c_int = AF_X25; +pub const PF_INET6: c_int = AF_INET6; +pub const PF_ROSE: c_int = AF_ROSE; +pub const PF_DECnet: c_int = AF_DECnet; +pub const PF_NETBEUI: c_int = AF_NETBEUI; +pub const PF_SECURITY: c_int = AF_SECURITY; +pub const PF_KEY: c_int = AF_KEY; +pub const PF_NETLINK: c_int = AF_NETLINK; +pub const PF_ROUTE: c_int = AF_ROUTE; +pub const PF_PACKET: c_int = AF_PACKET; +pub const PF_ASH: c_int = AF_ASH; +pub const PF_ECONET: c_int = AF_ECONET; +pub const PF_ATMSVC: c_int = AF_ATMSVC; +pub const PF_RDS: c_int = AF_RDS; +pub const PF_SNA: c_int = AF_SNA; +pub const PF_IRDA: c_int = AF_IRDA; +pub const PF_PPPOX: c_int = AF_PPPOX; +pub const PF_WANPIPE: c_int = AF_WANPIPE; +pub const PF_LLC: c_int = AF_LLC; +pub const PF_CAN: c_int = AF_CAN; +pub const PF_TIPC: c_int = AF_TIPC; +pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; +pub const PF_IUCV: c_int = AF_IUCV; +pub const PF_RXRPC: c_int = AF_RXRPC; +pub const PF_ISDN: c_int = AF_ISDN; +pub const PF_PHONET: c_int = AF_PHONET; +pub const PF_IEEE802154: c_int = AF_IEEE802154; +pub const PF_CAIF: c_int = AF_CAIF; +pub const PF_ALG: c_int = AF_ALG; + +pub const SOMAXCONN: c_int = 128; + +pub const MSG_OOB: c_int = 1; +pub const MSG_PEEK: c_int = 2; +pub const MSG_DONTROUTE: c_int = 4; +pub const MSG_CTRUNC: c_int = 8; +pub const MSG_TRUNC: c_int = 0x20; +pub const MSG_DONTWAIT: c_int = 0x40; +pub const MSG_EOR: c_int = 0x80; +pub const MSG_WAITALL: c_int = 0x100; +pub const MSG_FIN: c_int = 0x200; +pub const MSG_SYN: c_int = 0x400; +pub const MSG_CONFIRM: c_int = 0x800; +pub const MSG_RST: c_int = 0x1000; +pub const MSG_ERRQUEUE: c_int = 0x2000; +pub const MSG_NOSIGNAL: c_int = 0x4000; +pub const MSG_MORE: c_int = 0x8000; +pub const MSG_WAITFORONE: c_int = 0x10000; +pub const MSG_FASTOPEN: c_int = 0x20000000; +pub const MSG_CMSG_CLOEXEC: c_int = 0x40000000; + +pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; + +pub const SOCK_RAW: c_int = 3; +pub const SOCK_RDM: c_int = 4; + +pub const IP_TOS: c_int = 1; +pub const IP_TTL: c_int = 2; +pub const IP_HDRINCL: c_int = 3; +pub const IP_RECVTOS: c_int = 13; +pub const IP_FREEBIND: c_int = 15; +pub const IP_TRANSPARENT: c_int = 19; +pub const IP_MULTICAST_IF: c_int = 32; +pub const IP_MULTICAST_TTL: c_int = 33; +pub const IP_MULTICAST_LOOP: c_int = 34; +pub const IP_ADD_MEMBERSHIP: c_int = 35; +pub const IP_DROP_MEMBERSHIP: c_int = 36; + +pub const IPV6_UNICAST_HOPS: c_int = 16; +pub const IPV6_MULTICAST_IF: c_int = 17; +pub const IPV6_MULTICAST_HOPS: c_int = 18; +pub const IPV6_MULTICAST_LOOP: c_int = 19; +pub const IPV6_ADD_MEMBERSHIP: c_int = 20; +pub const IPV6_DROP_MEMBERSHIP: c_int = 21; +pub const IPV6_V6ONLY: c_int = 26; +pub const IPV6_RECVPKTINFO: c_int = 49; +pub const IPV6_RECVTCLASS: c_int = 66; +pub const IPV6_TCLASS: c_int = 67; + +pub const TCP_NODELAY: c_int = 1; +pub const TCP_MAXSEG: c_int = 2; +pub const TCP_CORK: c_int = 3; +pub const TCP_KEEPIDLE: c_int = 4; +pub const TCP_KEEPINTVL: c_int = 5; +pub const TCP_KEEPCNT: c_int = 6; +pub const TCP_SYNCNT: c_int = 7; +pub const TCP_LINGER2: c_int = 8; +pub const TCP_DEFER_ACCEPT: c_int = 9; +pub const TCP_WINDOW_CLAMP: c_int = 10; +pub const TCP_INFO: c_int = 11; +pub const TCP_QUICKACK: c_int = 12; +pub const TCP_CONGESTION: c_int = 13; + +pub const SO_DEBUG: c_int = 1; + +pub const SHUT_RD: c_int = 0; +pub const SHUT_WR: c_int = 1; +pub const SHUT_RDWR: c_int = 2; + +pub const LOCK_SH: c_int = 1; +pub const LOCK_EX: c_int = 2; +pub const LOCK_NB: c_int = 4; +pub const LOCK_UN: c_int = 8; + +pub const SS_ONSTACK: c_int = 1; +pub const SS_DISABLE: c_int = 2; + +pub const PATH_MAX: c_int = 4096; + +pub const FD_SETSIZE: usize = 1024; + +pub const EPOLLIN: c_int = 0x1; +pub const EPOLLPRI: c_int = 0x2; +pub const EPOLLOUT: c_int = 0x4; +pub const EPOLLRDNORM: c_int = 0x40; +pub const EPOLLRDBAND: c_int = 0x80; +pub const EPOLLWRNORM: c_int = 0x100; +pub const EPOLLWRBAND: c_int = 0x200; +pub const EPOLLMSG: c_int = 0x400; +pub const EPOLLERR: c_int = 0x8; +pub const EPOLLHUP: c_int = 0x10; +pub const EPOLLET: c_int = 0x80000000; + +pub const EPOLL_CTL_ADD: c_int = 1; +pub const EPOLL_CTL_MOD: c_int = 3; +pub const EPOLL_CTL_DEL: c_int = 2; + +pub const MNT_DETACH: c_int = 0x2; +pub const MNT_EXPIRE: c_int = 0x4; + +pub const Q_GETFMT: c_int = 0x800004; +pub const Q_GETINFO: c_int = 0x800005; +pub const Q_SETINFO: c_int = 0x800006; +pub const QIF_BLIMITS: u32 = 1; +pub const QIF_SPACE: u32 = 2; +pub const QIF_ILIMITS: u32 = 4; +pub const QIF_INODES: u32 = 8; +pub const QIF_BTIME: u32 = 16; +pub const QIF_ITIME: u32 = 32; +pub const QIF_LIMITS: u32 = 5; +pub const QIF_USAGE: u32 = 10; +pub const QIF_TIMES: u32 = 48; +pub const QIF_ALL: u32 = 63; + +pub const MNT_FORCE: c_int = 0x1; + +pub const Q_SYNC: c_int = 0x800001; +pub const Q_QUOTAON: c_int = 0x800002; +pub const Q_QUOTAOFF: c_int = 0x800003; +pub const Q_GETQUOTA: c_int = 0x800007; +pub const Q_SETQUOTA: c_int = 0x800008; + +pub const TCIOFF: c_int = 2; +pub const TCION: c_int = 3; +pub const TCOOFF: c_int = 0; +pub const TCOON: c_int = 1; +pub const TCIFLUSH: c_int = 0; +pub const TCOFLUSH: c_int = 1; +pub const TCIOFLUSH: c_int = 2; +pub const NL0: c_int = 0x00000000; +pub const NL1: c_int = 0x00000100; +pub const TAB0: c_int = 0x00000000; +pub const CR0: c_int = 0x00000000; +pub const FF0: c_int = 0x00000000; +pub const BS0: c_int = 0x00000000; +pub const VT0: c_int = 0x00000000; +pub const VERASE: usize = 2; +pub const VKILL: usize = 3; +pub const VINTR: usize = 0; +pub const VQUIT: usize = 1; +pub const VLNEXT: usize = 15; +pub const IGNBRK: crate::tcflag_t = 0x00000001; +pub const BRKINT: crate::tcflag_t = 0x00000002; +pub const IGNPAR: crate::tcflag_t = 0x00000004; +pub const PARMRK: crate::tcflag_t = 0x00000008; +pub const INPCK: crate::tcflag_t = 0x00000010; +pub const ISTRIP: crate::tcflag_t = 0x00000020; +pub const INLCR: crate::tcflag_t = 0x00000040; +pub const IGNCR: crate::tcflag_t = 0x00000080; +pub const ICRNL: crate::tcflag_t = 0x00000100; +pub const IXANY: crate::tcflag_t = 0x00000800; +pub const IMAXBEL: crate::tcflag_t = 0x00002000; +pub const OPOST: crate::tcflag_t = 0x1; +pub const CS5: crate::tcflag_t = 0x00000000; +pub const CRTSCTS: crate::tcflag_t = 0x80000000; +pub const ECHO: crate::tcflag_t = 0x00000008; +pub const OCRNL: crate::tcflag_t = 0o000010; +pub const ONOCR: crate::tcflag_t = 0o000020; +pub const ONLRET: crate::tcflag_t = 0o000040; +pub const OFILL: crate::tcflag_t = 0o000100; +pub const OFDEL: crate::tcflag_t = 0o000200; + +pub const CLONE_VM: c_int = 0x100; +pub const CLONE_FS: c_int = 0x200; +pub const CLONE_FILES: c_int = 0x400; +pub const CLONE_SIGHAND: c_int = 0x800; +pub const CLONE_PTRACE: c_int = 0x2000; +pub const CLONE_VFORK: c_int = 0x4000; +pub const CLONE_PARENT: c_int = 0x8000; +pub const CLONE_THREAD: c_int = 0x10000; +pub const CLONE_NEWNS: c_int = 0x20000; +pub const CLONE_SYSVSEM: c_int = 0x40000; +pub const CLONE_SETTLS: c_int = 0x80000; +pub const CLONE_PARENT_SETTID: c_int = 0x100000; +pub const CLONE_CHILD_CLEARTID: c_int = 0x200000; +pub const CLONE_DETACHED: c_int = 0x400000; +pub const CLONE_UNTRACED: c_int = 0x800000; +pub const CLONE_CHILD_SETTID: c_int = 0x01000000; +pub const CLONE_NEWUTS: c_int = 0x04000000; +pub const CLONE_NEWIPC: c_int = 0x08000000; +pub const CLONE_NEWUSER: c_int = 0x10000000; +pub const CLONE_NEWPID: c_int = 0x20000000; +pub const CLONE_NEWNET: c_int = 0x40000000; +pub const CLONE_IO: c_int = 0x80000000; +pub const CLONE_NEWCGROUP: c_int = 0x02000000; + +pub const WNOHANG: c_int = 0x00000001; +pub const WUNTRACED: c_int = 0x00000002; +pub const WSTOPPED: c_int = WUNTRACED; +pub const WEXITED: c_int = 0x00000004; +pub const WCONTINUED: c_int = 0x00000008; +pub const WNOWAIT: c_int = 0x01000000; + +// Options set using PTRACE_SETOPTIONS. +pub const PTRACE_O_TRACESYSGOOD: c_int = 0x00000001; +pub const PTRACE_O_TRACEFORK: c_int = 0x00000002; +pub const PTRACE_O_TRACEVFORK: c_int = 0x00000004; +pub const PTRACE_O_TRACECLONE: c_int = 0x00000008; +pub const PTRACE_O_TRACEEXEC: c_int = 0x00000010; +pub const PTRACE_O_TRACEVFORKDONE: c_int = 0x00000020; +pub const PTRACE_O_TRACEEXIT: c_int = 0x00000040; +pub const PTRACE_O_TRACESECCOMP: c_int = 0x00000080; +pub const PTRACE_O_EXITKILL: c_int = 0x00100000; +pub const PTRACE_O_SUSPEND_SECCOMP: c_int = 0x00200000; +pub const PTRACE_O_MASK: c_int = 0x003000ff; + +// Wait extended result codes for the above trace options. +pub const PTRACE_EVENT_FORK: c_int = 1; +pub const PTRACE_EVENT_VFORK: c_int = 2; +pub const PTRACE_EVENT_CLONE: c_int = 3; +pub const PTRACE_EVENT_EXEC: c_int = 4; +pub const PTRACE_EVENT_VFORK_DONE: c_int = 5; +pub const PTRACE_EVENT_EXIT: c_int = 6; +pub const PTRACE_EVENT_SECCOMP: c_int = 7; +// PTRACE_EVENT_STOP was added to glibc in 2.26 +// pub const PTRACE_EVENT_STOP: c_int = 128; + +pub const __WNOTHREAD: c_int = 0x20000000; +pub const __WALL: c_int = 0x40000000; +pub const __WCLONE: c_int = 0x80000000; + +pub const SPLICE_F_MOVE: c_uint = 0x01; +pub const SPLICE_F_NONBLOCK: c_uint = 0x02; +pub const SPLICE_F_MORE: c_uint = 0x04; +pub const SPLICE_F_GIFT: c_uint = 0x08; + +pub const RTLD_LOCAL: c_int = 0; +pub const RTLD_LAZY: c_int = 1; + +pub const POSIX_FADV_NORMAL: c_int = 0; +pub const POSIX_FADV_RANDOM: c_int = 1; +pub const POSIX_FADV_SEQUENTIAL: c_int = 2; +pub const POSIX_FADV_WILLNEED: c_int = 3; + +pub const AT_FDCWD: c_int = -100; +pub const AT_SYMLINK_NOFOLLOW: c_int = 0x100; +pub const AT_REMOVEDIR: c_int = 0x200; +pub const AT_EACCESS: c_int = 0x200; +pub const AT_SYMLINK_FOLLOW: c_int = 0x400; +pub const AT_NO_AUTOMOUNT: c_int = 0x800; +pub const AT_EMPTY_PATH: c_int = 0x1000; + +pub const LOG_CRON: c_int = 9 << 3; +pub const LOG_AUTHPRIV: c_int = 10 << 3; +pub const LOG_FTP: c_int = 11 << 3; +pub const LOG_PERROR: c_int = 0x20; + +pub const PIPE_BUF: usize = 4096; + +pub const SI_LOAD_SHIFT: c_uint = 16; + +pub const CLD_EXITED: c_int = 1; +pub const CLD_KILLED: c_int = 2; +pub const CLD_DUMPED: c_int = 3; +pub const CLD_TRAPPED: c_int = 4; +pub const CLD_STOPPED: c_int = 5; +pub const CLD_CONTINUED: c_int = 6; + +pub const SIGEV_SIGNAL: c_int = 0; +pub const SIGEV_NONE: c_int = 1; +pub const SIGEV_THREAD: c_int = 2; + +pub const P_ALL: idtype_t = 0; +pub const P_PID: idtype_t = 1; +pub const P_PGID: idtype_t = 2; + +pub const UTIME_OMIT: c_long = 1073741822; +pub const UTIME_NOW: c_long = 1073741823; + +pub const POLLIN: c_short = 0x1; +pub const POLLPRI: c_short = 0x2; +pub const POLLOUT: c_short = 0x4; +pub const POLLERR: c_short = 0x8; +pub const POLLHUP: c_short = 0x10; +pub const POLLNVAL: c_short = 0x20; +pub const POLLRDNORM: c_short = 0x040; +pub const POLLRDBAND: c_short = 0x080; + +pub const ABDAY_1: crate::nl_item = 0x20000; +pub const ABDAY_2: crate::nl_item = 0x20001; +pub const ABDAY_3: crate::nl_item = 0x20002; +pub const ABDAY_4: crate::nl_item = 0x20003; +pub const ABDAY_5: crate::nl_item = 0x20004; +pub const ABDAY_6: crate::nl_item = 0x20005; +pub const ABDAY_7: crate::nl_item = 0x20006; + +pub const DAY_1: crate::nl_item = 0x20007; +pub const DAY_2: crate::nl_item = 0x20008; +pub const DAY_3: crate::nl_item = 0x20009; +pub const DAY_4: crate::nl_item = 0x2000A; +pub const DAY_5: crate::nl_item = 0x2000B; +pub const DAY_6: crate::nl_item = 0x2000C; +pub const DAY_7: crate::nl_item = 0x2000D; + +pub const ABMON_1: crate::nl_item = 0x2000E; +pub const ABMON_2: crate::nl_item = 0x2000F; +pub const ABMON_3: crate::nl_item = 0x20010; +pub const ABMON_4: crate::nl_item = 0x20011; +pub const ABMON_5: crate::nl_item = 0x20012; +pub const ABMON_6: crate::nl_item = 0x20013; +pub const ABMON_7: crate::nl_item = 0x20014; +pub const ABMON_8: crate::nl_item = 0x20015; +pub const ABMON_9: crate::nl_item = 0x20016; +pub const ABMON_10: crate::nl_item = 0x20017; +pub const ABMON_11: crate::nl_item = 0x20018; +pub const ABMON_12: crate::nl_item = 0x20019; + +pub const MON_1: crate::nl_item = 0x2001A; +pub const MON_2: crate::nl_item = 0x2001B; +pub const MON_3: crate::nl_item = 0x2001C; +pub const MON_4: crate::nl_item = 0x2001D; +pub const MON_5: crate::nl_item = 0x2001E; +pub const MON_6: crate::nl_item = 0x2001F; +pub const MON_7: crate::nl_item = 0x20020; +pub const MON_8: crate::nl_item = 0x20021; +pub const MON_9: crate::nl_item = 0x20022; +pub const MON_10: crate::nl_item = 0x20023; +pub const MON_11: crate::nl_item = 0x20024; +pub const MON_12: crate::nl_item = 0x20025; + +pub const AM_STR: crate::nl_item = 0x20026; +pub const PM_STR: crate::nl_item = 0x20027; + +pub const D_T_FMT: crate::nl_item = 0x20028; +pub const D_FMT: crate::nl_item = 0x20029; +pub const T_FMT: crate::nl_item = 0x2002A; +pub const T_FMT_AMPM: crate::nl_item = 0x2002B; + +pub const ERA: crate::nl_item = 0x2002C; +pub const ERA_D_FMT: crate::nl_item = 0x2002E; +pub const ALT_DIGITS: crate::nl_item = 0x2002F; +pub const ERA_D_T_FMT: crate::nl_item = 0x20030; +pub const ERA_T_FMT: crate::nl_item = 0x20031; + +pub const CODESET: crate::nl_item = 14; + +pub const CRNCYSTR: crate::nl_item = 0x4000F; + +pub const RUSAGE_THREAD: c_int = 1; +pub const RUSAGE_CHILDREN: c_int = -1; + +pub const RADIXCHAR: crate::nl_item = 0x10000; +pub const THOUSEP: crate::nl_item = 0x10001; + +pub const YESEXPR: crate::nl_item = 0x50000; +pub const NOEXPR: crate::nl_item = 0x50001; +pub const YESSTR: crate::nl_item = 0x50002; +pub const NOSTR: crate::nl_item = 0x50003; + +pub const FILENAME_MAX: c_uint = 4096; +pub const L_tmpnam: c_uint = 20; +pub const _PC_LINK_MAX: c_int = 0; +pub const _PC_MAX_CANON: c_int = 1; +pub const _PC_MAX_INPUT: c_int = 2; +pub const _PC_NAME_MAX: c_int = 3; +pub const _PC_PATH_MAX: c_int = 4; +pub const _PC_PIPE_BUF: c_int = 5; +pub const _PC_CHOWN_RESTRICTED: c_int = 6; +pub const _PC_NO_TRUNC: c_int = 7; +pub const _PC_VDISABLE: c_int = 8; +pub const _PC_SYNC_IO: c_int = 9; +pub const _PC_ASYNC_IO: c_int = 10; +pub const _PC_PRIO_IO: c_int = 11; +pub const _PC_SOCK_MAXBUF: c_int = 12; +pub const _PC_FILESIZEBITS: c_int = 13; +pub const _PC_REC_INCR_XFER_SIZE: c_int = 14; +pub const _PC_REC_MAX_XFER_SIZE: c_int = 15; +pub const _PC_REC_MIN_XFER_SIZE: c_int = 16; +pub const _PC_REC_XFER_ALIGN: c_int = 17; +pub const _PC_ALLOC_SIZE_MIN: c_int = 18; +pub const _PC_SYMLINK_MAX: c_int = 19; +pub const _PC_2_SYMLINKS: c_int = 20; + +pub const _SC_ARG_MAX: c_int = 0; +pub const _SC_CHILD_MAX: c_int = 1; +pub const _SC_CLK_TCK: c_int = 2; +pub const _SC_NGROUPS_MAX: c_int = 3; +pub const _SC_OPEN_MAX: c_int = 4; +pub const _SC_STREAM_MAX: c_int = 5; +pub const _SC_TZNAME_MAX: c_int = 6; +pub const _SC_JOB_CONTROL: c_int = 7; +pub const _SC_SAVED_IDS: c_int = 8; +pub const _SC_REALTIME_SIGNALS: c_int = 9; +pub const _SC_PRIORITY_SCHEDULING: c_int = 10; +pub const _SC_TIMERS: c_int = 11; +pub const _SC_ASYNCHRONOUS_IO: c_int = 12; +pub const _SC_PRIORITIZED_IO: c_int = 13; +pub const _SC_SYNCHRONIZED_IO: c_int = 14; +pub const _SC_FSYNC: c_int = 15; +pub const _SC_MAPPED_FILES: c_int = 16; +pub const _SC_MEMLOCK: c_int = 17; +pub const _SC_MEMLOCK_RANGE: c_int = 18; +pub const _SC_MEMORY_PROTECTION: c_int = 19; +pub const _SC_MESSAGE_PASSING: c_int = 20; +pub const _SC_SEMAPHORES: c_int = 21; +pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 22; +pub const _SC_AIO_LISTIO_MAX: c_int = 23; +pub const _SC_AIO_MAX: c_int = 24; +pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 25; +pub const _SC_DELAYTIMER_MAX: c_int = 26; +pub const _SC_MQ_OPEN_MAX: c_int = 27; +pub const _SC_MQ_PRIO_MAX: c_int = 28; +pub const _SC_VERSION: c_int = 29; +pub const _SC_PAGESIZE: c_int = 30; +pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; +pub const _SC_RTSIG_MAX: c_int = 31; +pub const _SC_SEM_NSEMS_MAX: c_int = 32; +pub const _SC_SEM_VALUE_MAX: c_int = 33; +pub const _SC_SIGQUEUE_MAX: c_int = 34; +pub const _SC_TIMER_MAX: c_int = 35; +pub const _SC_BC_BASE_MAX: c_int = 36; +pub const _SC_BC_DIM_MAX: c_int = 37; +pub const _SC_BC_SCALE_MAX: c_int = 38; +pub const _SC_BC_STRING_MAX: c_int = 39; +pub const _SC_COLL_WEIGHTS_MAX: c_int = 40; +pub const _SC_EXPR_NEST_MAX: c_int = 42; +pub const _SC_LINE_MAX: c_int = 43; +pub const _SC_RE_DUP_MAX: c_int = 44; +pub const _SC_2_VERSION: c_int = 46; +pub const _SC_2_C_BIND: c_int = 47; +pub const _SC_2_C_DEV: c_int = 48; +pub const _SC_2_FORT_DEV: c_int = 49; +pub const _SC_2_FORT_RUN: c_int = 50; +pub const _SC_2_SW_DEV: c_int = 51; +pub const _SC_2_LOCALEDEF: c_int = 52; +pub const _SC_UIO_MAXIOV: c_int = 60; +pub const _SC_IOV_MAX: c_int = 60; +pub const _SC_THREADS: c_int = 67; +pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 68; +pub const _SC_GETGR_R_SIZE_MAX: c_int = 69; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 70; +pub const _SC_LOGIN_NAME_MAX: c_int = 71; +pub const _SC_TTY_NAME_MAX: c_int = 72; +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 73; +pub const _SC_THREAD_KEYS_MAX: c_int = 74; +pub const _SC_THREAD_STACK_MIN: c_int = 75; +pub const _SC_THREAD_THREADS_MAX: c_int = 76; +pub const _SC_THREAD_ATTR_STACKADDR: c_int = 77; +pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 78; +pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 79; +pub const _SC_THREAD_PRIO_INHERIT: c_int = 80; +pub const _SC_THREAD_PRIO_PROTECT: c_int = 81; +pub const _SC_THREAD_PROCESS_SHARED: c_int = 82; +pub const _SC_NPROCESSORS_CONF: c_int = 83; +pub const _SC_NPROCESSORS_ONLN: c_int = 84; +pub const _SC_PHYS_PAGES: c_int = 85; +pub const _SC_AVPHYS_PAGES: c_int = 86; +pub const _SC_ATEXIT_MAX: c_int = 87; +pub const _SC_PASS_MAX: c_int = 88; +pub const _SC_XOPEN_VERSION: c_int = 89; +pub const _SC_XOPEN_XCU_VERSION: c_int = 90; +pub const _SC_XOPEN_UNIX: c_int = 91; +pub const _SC_XOPEN_CRYPT: c_int = 92; +pub const _SC_XOPEN_ENH_I18N: c_int = 93; +pub const _SC_XOPEN_SHM: c_int = 94; +pub const _SC_2_CHAR_TERM: c_int = 95; +pub const _SC_2_UPE: c_int = 97; +pub const _SC_XOPEN_XPG2: c_int = 98; +pub const _SC_XOPEN_XPG3: c_int = 99; +pub const _SC_XOPEN_XPG4: c_int = 100; +pub const _SC_NZERO: c_int = 109; +pub const _SC_XBS5_ILP32_OFF32: c_int = 125; +pub const _SC_XBS5_ILP32_OFFBIG: c_int = 126; +pub const _SC_XBS5_LP64_OFF64: c_int = 127; +pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 128; +pub const _SC_XOPEN_LEGACY: c_int = 129; +pub const _SC_XOPEN_REALTIME: c_int = 130; +pub const _SC_XOPEN_REALTIME_THREADS: c_int = 131; +pub const _SC_ADVISORY_INFO: c_int = 132; +pub const _SC_BARRIERS: c_int = 133; +pub const _SC_CLOCK_SELECTION: c_int = 137; +pub const _SC_CPUTIME: c_int = 138; +pub const _SC_THREAD_CPUTIME: c_int = 139; +pub const _SC_MONOTONIC_CLOCK: c_int = 149; +pub const _SC_READER_WRITER_LOCKS: c_int = 153; +pub const _SC_SPIN_LOCKS: c_int = 154; +pub const _SC_REGEXP: c_int = 155; +pub const _SC_SHELL: c_int = 157; +pub const _SC_SPAWN: c_int = 159; +pub const _SC_SPORADIC_SERVER: c_int = 160; +pub const _SC_THREAD_SPORADIC_SERVER: c_int = 161; +pub const _SC_TIMEOUTS: c_int = 164; +pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 165; +pub const _SC_2_PBS: c_int = 168; +pub const _SC_2_PBS_ACCOUNTING: c_int = 169; +pub const _SC_2_PBS_LOCATE: c_int = 170; +pub const _SC_2_PBS_MESSAGE: c_int = 171; +pub const _SC_2_PBS_TRACK: c_int = 172; +pub const _SC_SYMLOOP_MAX: c_int = 173; +pub const _SC_STREAMS: c_int = 174; +pub const _SC_2_PBS_CHECKPOINT: c_int = 175; +pub const _SC_V6_ILP32_OFF32: c_int = 176; +pub const _SC_V6_ILP32_OFFBIG: c_int = 177; +pub const _SC_V6_LP64_OFF64: c_int = 178; +pub const _SC_V6_LPBIG_OFFBIG: c_int = 179; +pub const _SC_HOST_NAME_MAX: c_int = 180; +pub const _SC_TRACE: c_int = 181; +pub const _SC_TRACE_EVENT_FILTER: c_int = 182; +pub const _SC_TRACE_INHERIT: c_int = 183; +pub const _SC_TRACE_LOG: c_int = 184; +pub const _SC_IPV6: c_int = 235; +pub const _SC_RAW_SOCKETS: c_int = 236; +pub const _SC_V7_ILP32_OFF32: c_int = 237; +pub const _SC_V7_ILP32_OFFBIG: c_int = 238; +pub const _SC_V7_LP64_OFF64: c_int = 239; +pub const _SC_V7_LPBIG_OFFBIG: c_int = 240; +pub const _SC_SS_REPL_MAX: c_int = 241; +pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 242; +pub const _SC_TRACE_NAME_MAX: c_int = 243; +pub const _SC_TRACE_SYS_MAX: c_int = 244; +pub const _SC_TRACE_USER_EVENT_MAX: c_int = 245; +pub const _SC_XOPEN_STREAMS: c_int = 246; +pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 247; +pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 248; + +pub const RLIM_SAVED_MAX: crate::rlim_t = RLIM_INFINITY; +pub const RLIM_SAVED_CUR: crate::rlim_t = RLIM_INFINITY; + +pub const GLOB_ERR: c_int = 1 << 0; +pub const GLOB_MARK: c_int = 1 << 1; +pub const GLOB_NOSORT: c_int = 1 << 2; +pub const GLOB_DOOFFS: c_int = 1 << 3; +pub const GLOB_NOCHECK: c_int = 1 << 4; +pub const GLOB_APPEND: c_int = 1 << 5; +pub const GLOB_NOESCAPE: c_int = 1 << 6; + +pub const GLOB_NOSPACE: c_int = 1; +pub const GLOB_ABORTED: c_int = 2; +pub const GLOB_NOMATCH: c_int = 3; + +pub const POSIX_MADV_NORMAL: c_int = 0; +pub const POSIX_MADV_RANDOM: c_int = 1; +pub const POSIX_MADV_SEQUENTIAL: c_int = 2; +pub const POSIX_MADV_WILLNEED: c_int = 3; + +pub const S_IEXEC: mode_t = 0o0100; +pub const S_IWRITE: mode_t = 0o0200; +pub const S_IREAD: mode_t = 0o0400; + +pub const F_LOCK: c_int = 1; +pub const F_TEST: c_int = 3; +pub const F_TLOCK: c_int = 2; +pub const F_ULOCK: c_int = 0; + +pub const IFF_LOWER_UP: c_int = 0x10000; +pub const IFF_DORMANT: c_int = 0x20000; +pub const IFF_ECHO: c_int = 0x40000; + +pub const ST_RDONLY: c_ulong = 1; +pub const ST_NOSUID: c_ulong = 2; +pub const ST_NODEV: c_ulong = 4; +pub const ST_NOEXEC: c_ulong = 8; +pub const ST_SYNCHRONOUS: c_ulong = 16; +pub const ST_MANDLOCK: c_ulong = 64; +pub const ST_WRITE: c_ulong = 128; +pub const ST_APPEND: c_ulong = 256; +pub const ST_IMMUTABLE: c_ulong = 512; +pub const ST_NOATIME: c_ulong = 1024; +pub const ST_NODIRATIME: c_ulong = 2048; + +pub const RTLD_NEXT: *mut c_void = -1i64 as *mut c_void; +pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); +pub const RTLD_NODELETE: c_int = 0x1000; +pub const RTLD_NOW: c_int = 0x2; + +pub const TCP_MD5SIG: c_int = 14; + +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + size: [0; __SIZEOF_PTHREAD_MUTEX_T], +}; +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + size: [0; __SIZEOF_PTHREAD_COND_T], +}; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + size: [0; __SIZEOF_PTHREAD_RWLOCK_T], +}; +pub const PTHREAD_MUTEX_NORMAL: c_int = 0; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; +pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; +pub const PTHREAD_PROCESS_PRIVATE: c_int = 0; +pub const PTHREAD_PROCESS_SHARED: c_int = 1; +pub const __SIZEOF_PTHREAD_COND_T: usize = 48; + +pub const RENAME_NOREPLACE: c_int = 1; +pub const RENAME_EXCHANGE: c_int = 2; +pub const RENAME_WHITEOUT: c_int = 4; + +pub const SCHED_OTHER: c_int = 0; +pub const SCHED_FIFO: c_int = 1; +pub const SCHED_RR: c_int = 2; +pub const SCHED_BATCH: c_int = 3; +pub const SCHED_IDLE: c_int = 5; + +// netinet/in.h +// NOTE: These are in addition to the constants defined in src/unix/mod.rs + +// IPPROTO_IP defined in src/unix/mod.rs +/// Hop-by-hop option header +pub const IPPROTO_HOPOPTS: c_int = 0; +// IPPROTO_ICMP defined in src/unix/mod.rs +/// group mgmt protocol +pub const IPPROTO_IGMP: c_int = 2; +/// for compatibility +pub const IPPROTO_IPIP: c_int = 4; +// IPPROTO_TCP defined in src/unix/mod.rs +/// exterior gateway protocol +pub const IPPROTO_EGP: c_int = 8; +/// pup +pub const IPPROTO_PUP: c_int = 12; +// IPPROTO_UDP defined in src/unix/mod.rs +/// xns idp +pub const IPPROTO_IDP: c_int = 22; +/// tp-4 w/ class negotiation +pub const IPPROTO_TP: c_int = 29; +/// DCCP +pub const IPPROTO_DCCP: c_int = 33; +// IPPROTO_IPV6 defined in src/unix/mod.rs +/// IP6 routing header +pub const IPPROTO_ROUTING: c_int = 43; +/// IP6 fragmentation header +pub const IPPROTO_FRAGMENT: c_int = 44; +/// resource reservation +pub const IPPROTO_RSVP: c_int = 46; +/// General Routing Encap. +pub const IPPROTO_GRE: c_int = 47; +/// IP6 Encap Sec. Payload +pub const IPPROTO_ESP: c_int = 50; +/// IP6 Auth Header +pub const IPPROTO_AH: c_int = 51; +// IPPROTO_ICMPV6 defined in src/unix/mod.rs +/// IP6 no next header +pub const IPPROTO_NONE: c_int = 59; +/// IP6 destination option +pub const IPPROTO_DSTOPTS: c_int = 60; +pub const IPPROTO_MTP: c_int = 92; +pub const IPPROTO_BEETPH: c_int = 94; +/// encapsulation header +pub const IPPROTO_ENCAP: c_int = 98; +/// Protocol indep. multicast +pub const IPPROTO_PIM: c_int = 103; +/// IP Payload Comp. Protocol +pub const IPPROTO_COMP: c_int = 108; +/// SCTP +pub const IPPROTO_SCTP: c_int = 132; +pub const IPPROTO_MH: c_int = 135; +pub const IPPROTO_UDPLITE: c_int = 136; +pub const IPPROTO_MPLS: c_int = 137; +/// raw IP packet +pub const IPPROTO_RAW: c_int = 255; +pub const IPPROTO_MAX: c_int = 256; + +pub const AF_IB: c_int = 27; +pub const AF_MPLS: c_int = 28; +pub const AF_NFC: c_int = 39; +pub const AF_VSOCK: c_int = 40; +pub const PF_IB: c_int = AF_IB; +pub const PF_MPLS: c_int = AF_MPLS; +pub const PF_NFC: c_int = AF_NFC; +pub const PF_VSOCK: c_int = AF_VSOCK; + +// System V IPC +pub const IPC_PRIVATE: crate::key_t = 0; + +pub const IPC_CREAT: c_int = 0o1000; +pub const IPC_EXCL: c_int = 0o2000; +pub const IPC_NOWAIT: c_int = 0o4000; + +pub const IPC_RMID: c_int = 0; +pub const IPC_SET: c_int = 1; +pub const IPC_STAT: c_int = 2; +pub const IPC_INFO: c_int = 3; +pub const MSG_STAT: c_int = 11; +pub const MSG_INFO: c_int = 12; + +pub const MSG_NOERROR: c_int = 0o10000; +pub const MSG_EXCEPT: c_int = 0o20000; +pub const MSG_COPY: c_int = 0o40000; + +pub const SHM_R: c_int = 0o400; +pub const SHM_W: c_int = 0o200; + +pub const SHM_RDONLY: c_int = 0o10000; +pub const SHM_RND: c_int = 0o20000; +pub const SHM_REMAP: c_int = 0o40000; +pub const SHM_EXEC: c_int = 0o100000; + +pub const SHM_LOCK: c_int = 11; +pub const SHM_UNLOCK: c_int = 12; + +pub const SHM_HUGETLB: c_int = 0o4000; +pub const SHM_NORESERVE: c_int = 0o10000; + +pub const EPOLLRDHUP: c_int = 0x2000; +pub const EPOLLEXCLUSIVE: c_int = 0x10000000; +pub const EPOLLONESHOT: c_int = 0x40000000; + +pub const QFMT_VFS_OLD: c_int = 1; +pub const QFMT_VFS_V0: c_int = 2; +pub const QFMT_VFS_V1: c_int = 4; + +pub const EFD_SEMAPHORE: c_int = 0x1; + +pub const LOG_NFACILITIES: c_int = 24; + +pub const SEM_FAILED: *mut crate::sem_t = ptr::null_mut(); + +pub const RB_AUTOBOOT: c_int = 0x01234567u32 as i32; +pub const RB_HALT_SYSTEM: c_int = 0xcdef0123u32 as i32; +pub const RB_ENABLE_CAD: c_int = 0x89abcdefu32 as i32; +pub const RB_DISABLE_CAD: c_int = 0x00000000u32 as i32; +pub const RB_POWER_OFF: c_int = 0x4321fedcu32 as i32; +pub const RB_SW_SUSPEND: c_int = 0xd000fce2u32 as i32; +pub const RB_KEXEC: c_int = 0x45584543u32 as i32; + +pub const AI_PASSIVE: c_int = 0x0001; +pub const AI_CANONNAME: c_int = 0x0002; +pub const AI_NUMERICHOST: c_int = 0x0004; +pub const AI_V4MAPPED: c_int = 0x0008; +pub const AI_ALL: c_int = 0x0010; +pub const AI_ADDRCONFIG: c_int = 0x0020; + +pub const AI_NUMERICSERV: c_int = 0x0400; + +pub const EAI_BADFLAGS: c_int = -1; +pub const EAI_NONAME: c_int = -2; +pub const EAI_AGAIN: c_int = -3; +pub const EAI_FAIL: c_int = -4; +pub const EAI_FAMILY: c_int = -6; +pub const EAI_SOCKTYPE: c_int = -7; +pub const EAI_SERVICE: c_int = -8; +pub const EAI_MEMORY: c_int = -10; +pub const EAI_OVERFLOW: c_int = -12; + +pub const NI_NUMERICHOST: c_int = 1; +pub const NI_NUMERICSERV: c_int = 2; +pub const NI_NOFQDN: c_int = 4; +pub const NI_NAMEREQD: c_int = 8; +pub const NI_DGRAM: c_int = 16; + +pub const SYNC_FILE_RANGE_WAIT_BEFORE: c_uint = 1; +pub const SYNC_FILE_RANGE_WRITE: c_uint = 2; +pub const SYNC_FILE_RANGE_WAIT_AFTER: c_uint = 4; + +pub const EAI_SYSTEM: c_int = -11; + +pub const AIO_CANCELED: c_int = 0; +pub const AIO_NOTCANCELED: c_int = 1; +pub const AIO_ALLDONE: c_int = 2; +pub const LIO_READ: c_int = 0; +pub const LIO_WRITE: c_int = 1; +pub const LIO_NOP: c_int = 2; +pub const LIO_WAIT: c_int = 0; +pub const LIO_NOWAIT: c_int = 1; + +pub const MREMAP_MAYMOVE: c_int = 1; +pub const MREMAP_FIXED: c_int = 2; + +pub const PR_SET_PDEATHSIG: c_int = 1; +pub const PR_GET_PDEATHSIG: c_int = 2; + +pub const PR_GET_DUMPABLE: c_int = 3; +pub const PR_SET_DUMPABLE: c_int = 4; + +pub const PR_GET_UNALIGN: c_int = 5; +pub const PR_SET_UNALIGN: c_int = 6; +pub const PR_UNALIGN_NOPRINT: c_int = 1; +pub const PR_UNALIGN_SIGBUS: c_int = 2; + +pub const PR_GET_KEEPCAPS: c_int = 7; +pub const PR_SET_KEEPCAPS: c_int = 8; + +pub const PR_GET_FPEMU: c_int = 9; +pub const PR_SET_FPEMU: c_int = 10; +pub const PR_FPEMU_NOPRINT: c_int = 1; +pub const PR_FPEMU_SIGFPE: c_int = 2; + +pub const PR_GET_FPEXC: c_int = 11; +pub const PR_SET_FPEXC: c_int = 12; +pub const PR_FP_EXC_SW_ENABLE: c_int = 0x80; +pub const PR_FP_EXC_DIV: c_int = 0x010000; +pub const PR_FP_EXC_OVF: c_int = 0x020000; +pub const PR_FP_EXC_UND: c_int = 0x040000; +pub const PR_FP_EXC_RES: c_int = 0x080000; +pub const PR_FP_EXC_INV: c_int = 0x100000; +pub const PR_FP_EXC_DISABLED: c_int = 0; +pub const PR_FP_EXC_NONRECOV: c_int = 1; +pub const PR_FP_EXC_ASYNC: c_int = 2; +pub const PR_FP_EXC_PRECISE: c_int = 3; + +pub const PR_GET_TIMING: c_int = 13; +pub const PR_SET_TIMING: c_int = 14; +pub const PR_TIMING_STATISTICAL: c_int = 0; +pub const PR_TIMING_TIMESTAMP: c_int = 1; + +pub const PR_SET_NAME: c_int = 15; +pub const PR_GET_NAME: c_int = 16; + +pub const PR_GET_ENDIAN: c_int = 19; +pub const PR_SET_ENDIAN: c_int = 20; +pub const PR_ENDIAN_BIG: c_int = 0; +pub const PR_ENDIAN_LITTLE: c_int = 1; +pub const PR_ENDIAN_PPC_LITTLE: c_int = 2; + +pub const PR_GET_SECCOMP: c_int = 21; +pub const PR_SET_SECCOMP: c_int = 22; + +pub const PR_CAPBSET_READ: c_int = 23; +pub const PR_CAPBSET_DROP: c_int = 24; + +pub const PR_GET_TSC: c_int = 25; +pub const PR_SET_TSC: c_int = 26; +pub const PR_TSC_ENABLE: c_int = 1; +pub const PR_TSC_SIGSEGV: c_int = 2; + +pub const PR_GET_SECUREBITS: c_int = 27; +pub const PR_SET_SECUREBITS: c_int = 28; + +pub const PR_SET_TIMERSLACK: c_int = 29; +pub const PR_GET_TIMERSLACK: c_int = 30; + +pub const PR_TASK_PERF_EVENTS_DISABLE: c_int = 31; +pub const PR_TASK_PERF_EVENTS_ENABLE: c_int = 32; + +pub const PR_MCE_KILL: c_int = 33; +pub const PR_MCE_KILL_CLEAR: c_int = 0; +pub const PR_MCE_KILL_SET: c_int = 1; + +pub const PR_MCE_KILL_LATE: c_int = 0; +pub const PR_MCE_KILL_EARLY: c_int = 1; +pub const PR_MCE_KILL_DEFAULT: c_int = 2; + +pub const PR_MCE_KILL_GET: c_int = 34; + +pub const PR_SET_MM: c_int = 35; +pub const PR_SET_MM_START_CODE: c_int = 1; +pub const PR_SET_MM_END_CODE: c_int = 2; +pub const PR_SET_MM_START_DATA: c_int = 3; +pub const PR_SET_MM_END_DATA: c_int = 4; +pub const PR_SET_MM_START_STACK: c_int = 5; +pub const PR_SET_MM_START_BRK: c_int = 6; +pub const PR_SET_MM_BRK: c_int = 7; +pub const PR_SET_MM_ARG_START: c_int = 8; +pub const PR_SET_MM_ARG_END: c_int = 9; +pub const PR_SET_MM_ENV_START: c_int = 10; +pub const PR_SET_MM_ENV_END: c_int = 11; +pub const PR_SET_MM_AUXV: c_int = 12; +pub const PR_SET_MM_EXE_FILE: c_int = 13; +pub const PR_SET_MM_MAP: c_int = 14; +pub const PR_SET_MM_MAP_SIZE: c_int = 15; + +pub const PR_SET_PTRACER: c_int = 0x59616d61; +pub const PR_SET_PTRACER_ANY: c_ulong = 0xffffffffffffffff; + +pub const PR_SET_CHILD_SUBREAPER: c_int = 36; +pub const PR_GET_CHILD_SUBREAPER: c_int = 37; + +pub const PR_SET_NO_NEW_PRIVS: c_int = 38; +pub const PR_GET_NO_NEW_PRIVS: c_int = 39; + +pub const PR_GET_TID_ADDRESS: c_int = 40; + +pub const PR_SET_THP_DISABLE: c_int = 41; +pub const PR_GET_THP_DISABLE: c_int = 42; + +pub const PR_MPX_ENABLE_MANAGEMENT: c_int = 43; +pub const PR_MPX_DISABLE_MANAGEMENT: c_int = 44; + +pub const PR_SET_FP_MODE: c_int = 45; +pub const PR_GET_FP_MODE: c_int = 46; +pub const PR_FP_MODE_FR: c_int = 1 << 0; +pub const PR_FP_MODE_FRE: c_int = 1 << 1; + +pub const PR_CAP_AMBIENT: c_int = 47; +pub const PR_CAP_AMBIENT_IS_SET: c_int = 1; +pub const PR_CAP_AMBIENT_RAISE: c_int = 2; +pub const PR_CAP_AMBIENT_LOWER: c_int = 3; +pub const PR_CAP_AMBIENT_CLEAR_ALL: c_int = 4; + +pub const ITIMER_REAL: c_int = 0; +pub const ITIMER_VIRTUAL: c_int = 1; +pub const ITIMER_PROF: c_int = 2; + +pub const TFD_CLOEXEC: c_int = O_CLOEXEC; +pub const TFD_NONBLOCK: c_int = O_NONBLOCK; +pub const TFD_TIMER_ABSTIME: c_int = 1; + +pub const XATTR_CREATE: c_int = 0x1; +pub const XATTR_REPLACE: c_int = 0x2; + +pub const _POSIX_VDISABLE: crate::cc_t = 0; + +pub const FALLOC_FL_KEEP_SIZE: c_int = 0x01; +pub const FALLOC_FL_PUNCH_HOLE: c_int = 0x02; +pub const FALLOC_FL_COLLAPSE_RANGE: c_int = 0x08; +pub const FALLOC_FL_ZERO_RANGE: c_int = 0x10; +pub const FALLOC_FL_INSERT_RANGE: c_int = 0x20; +pub const FALLOC_FL_UNSHARE_RANGE: c_int = 0x40; + +// On Linux, libc doesn't define this constant, libattr does instead. +// We still define it for Linux as it's defined by libc on other platforms, +// and it's mentioned in the man pages for getxattr and setxattr. +pub const ENOATTR: c_int = crate::ENODATA; + +pub const SO_ORIGINAL_DST: c_int = 80; +pub const IUTF8: crate::tcflag_t = 0x00004000; +pub const CMSPAR: crate::tcflag_t = 0o10000000000; + +pub const MFD_CLOEXEC: c_uint = 0x0001; +pub const MFD_ALLOW_SEALING: c_uint = 0x0002; + +// these are used in the p_type field of Elf32_Phdr and Elf64_Phdr, which has +// the type Elf32Word and Elf64Word respectively. Luckily, both of those are u32 +// so we can use that type here to avoid having to cast. +pub const PT_NULL: u32 = 0; +pub const PT_LOAD: u32 = 1; +pub const PT_DYNAMIC: u32 = 2; +pub const PT_INTERP: u32 = 3; +pub const PT_NOTE: u32 = 4; +pub const PT_SHLIB: u32 = 5; +pub const PT_PHDR: u32 = 6; +pub const PT_TLS: u32 = 7; +pub const PT_NUM: u32 = 8; +pub const PT_LOOS: u32 = 0x60000000; +pub const PT_GNU_EH_FRAME: u32 = 0x6474e550; +pub const PT_GNU_STACK: u32 = 0x6474e551; +pub const PT_GNU_RELRO: u32 = 0x6474e552; + +// Ethernet protocol IDs. +pub const ETH_P_LOOP: c_int = 0x0060; +pub const ETH_P_PUP: c_int = 0x0200; +pub const ETH_P_PUPAT: c_int = 0x0201; +pub const ETH_P_IP: c_int = 0x0800; +pub const ETH_P_X25: c_int = 0x0805; +pub const ETH_P_ARP: c_int = 0x0806; +pub const ETH_P_BPQ: c_int = 0x08FF; +pub const ETH_P_IEEEPUP: c_int = 0x0a00; +pub const ETH_P_IEEEPUPAT: c_int = 0x0a01; +pub const ETH_P_BATMAN: c_int = 0x4305; +pub const ETH_P_DEC: c_int = 0x6000; +pub const ETH_P_DNA_DL: c_int = 0x6001; +pub const ETH_P_DNA_RC: c_int = 0x6002; +pub const ETH_P_DNA_RT: c_int = 0x6003; +pub const ETH_P_LAT: c_int = 0x6004; +pub const ETH_P_DIAG: c_int = 0x6005; +pub const ETH_P_CUST: c_int = 0x6006; +pub const ETH_P_SCA: c_int = 0x6007; +pub const ETH_P_TEB: c_int = 0x6558; +pub const ETH_P_RARP: c_int = 0x8035; +pub const ETH_P_ATALK: c_int = 0x809B; +pub const ETH_P_AARP: c_int = 0x80F3; +pub const ETH_P_8021Q: c_int = 0x8100; +pub const ETH_P_IPX: c_int = 0x8137; +pub const ETH_P_IPV6: c_int = 0x86DD; +pub const ETH_P_PAUSE: c_int = 0x8808; +pub const ETH_P_SLOW: c_int = 0x8809; +pub const ETH_P_WCCP: c_int = 0x883E; +pub const ETH_P_MPLS_UC: c_int = 0x8847; +pub const ETH_P_MPLS_MC: c_int = 0x8848; +pub const ETH_P_ATMMPOA: c_int = 0x884c; +pub const ETH_P_PPP_DISC: c_int = 0x8863; +pub const ETH_P_PPP_SES: c_int = 0x8864; +pub const ETH_P_LINK_CTL: c_int = 0x886c; +pub const ETH_P_ATMFATE: c_int = 0x8884; +pub const ETH_P_PAE: c_int = 0x888E; +pub const ETH_P_AOE: c_int = 0x88A2; +pub const ETH_P_8021AD: c_int = 0x88A8; +pub const ETH_P_802_EX1: c_int = 0x88B5; +pub const ETH_P_TIPC: c_int = 0x88CA; +pub const ETH_P_8021AH: c_int = 0x88E7; +pub const ETH_P_MVRP: c_int = 0x88F5; +pub const ETH_P_1588: c_int = 0x88F7; +pub const ETH_P_PRP: c_int = 0x88FB; +pub const ETH_P_FCOE: c_int = 0x8906; +pub const ETH_P_TDLS: c_int = 0x890D; +pub const ETH_P_FIP: c_int = 0x8914; +pub const ETH_P_80221: c_int = 0x8917; +pub const ETH_P_LOOPBACK: c_int = 0x9000; +pub const ETH_P_QINQ1: c_int = 0x9100; +pub const ETH_P_QINQ2: c_int = 0x9200; +pub const ETH_P_QINQ3: c_int = 0x9300; +pub const ETH_P_EDSA: c_int = 0xDADA; +pub const ETH_P_AF_IUCV: c_int = 0xFBFB; + +pub const ETH_P_802_3_MIN: c_int = 0x0600; + +pub const ETH_P_802_3: c_int = 0x0001; +pub const ETH_P_AX25: c_int = 0x0002; +pub const ETH_P_ALL: c_int = 0x0003; +pub const ETH_P_802_2: c_int = 0x0004; +pub const ETH_P_SNAP: c_int = 0x0005; +pub const ETH_P_DDCMP: c_int = 0x0006; +pub const ETH_P_WAN_PPP: c_int = 0x0007; +pub const ETH_P_PPP_MP: c_int = 0x0008; +pub const ETH_P_LOCALTALK: c_int = 0x0009; +pub const ETH_P_CAN: c_int = 0x000C; +pub const ETH_P_CANFD: c_int = 0x000D; +pub const ETH_P_PPPTALK: c_int = 0x0010; +pub const ETH_P_TR_802_2: c_int = 0x0011; +pub const ETH_P_MOBITEX: c_int = 0x0015; +pub const ETH_P_CONTROL: c_int = 0x0016; +pub const ETH_P_IRDA: c_int = 0x0017; +pub const ETH_P_ECONET: c_int = 0x0018; +pub const ETH_P_HDLC: c_int = 0x0019; +pub const ETH_P_ARCNET: c_int = 0x001A; +pub const ETH_P_DSA: c_int = 0x001B; +pub const ETH_P_TRAILER: c_int = 0x001C; +pub const ETH_P_PHONET: c_int = 0x00F5; +pub const ETH_P_IEEE802154: c_int = 0x00F6; +pub const ETH_P_CAIF: c_int = 0x00F7; + +pub const SFD_CLOEXEC: c_int = 0x080000; + +pub const NCCS: usize = 32; + +pub const O_TRUNC: c_int = 0x00040000; +pub const O_NOATIME: c_int = 0x00002000; +pub const O_CLOEXEC: c_int = 0x00000100; +pub const O_TMPFILE: c_int = 0x00004000; + +pub const EBFONT: c_int = 59; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENONET: c_int = 64; +pub const ENOPKG: c_int = 65; +pub const EREMOTE: c_int = 66; +pub const ENOLINK: c_int = 67; +pub const EADV: c_int = 68; +pub const ESRMNT: c_int = 69; +pub const ECOMM: c_int = 70; +pub const EPROTO: c_int = 71; +pub const EDOTDOT: c_int = 73; + +pub const SA_NODEFER: c_int = 0x40000000; +pub const SA_RESETHAND: c_int = 0x80000000; +pub const SA_RESTART: c_int = 0x10000000; +pub const SA_NOCLDSTOP: c_int = 0x00000001; + +pub const EPOLL_CLOEXEC: c_int = 0x80000; + +pub const EFD_CLOEXEC: c_int = 0x80000; + +pub const BUFSIZ: c_uint = 1024; +pub const TMP_MAX: c_uint = 10000; +pub const FOPEN_MAX: c_uint = 1000; +pub const O_PATH: c_int = 0x00400000; +pub const O_EXEC: c_int = O_PATH; +pub const O_SEARCH: c_int = O_PATH; +pub const O_ACCMODE: c_int = 03 | O_SEARCH; +pub const O_NDELAY: c_int = O_NONBLOCK; +pub const NI_MAXHOST: crate::socklen_t = 255; +pub const PTHREAD_STACK_MIN: size_t = 2048; +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; + +pub const POSIX_MADV_DONTNEED: c_int = 4; + +pub const RLIM_INFINITY: crate::rlim_t = !0; +pub const RLIMIT_RTTIME: c_int = 15; +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIMIT_NLIMITS: c_int = 16; +#[allow(deprecated)] +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIM_NLIMITS: c_int = RLIMIT_NLIMITS; + +pub const MAP_ANONYMOUS: c_int = MAP_ANON; + +pub const SOCK_DCCP: c_int = 6; +pub const SOCK_PACKET: c_int = 10; + +pub const TCP_COOKIE_TRANSACTIONS: c_int = 15; +pub const TCP_THIN_LINEAR_TIMEOUTS: c_int = 16; +pub const TCP_THIN_DUPACK: c_int = 17; +pub const TCP_USER_TIMEOUT: c_int = 18; +pub const TCP_REPAIR: c_int = 19; +pub const TCP_REPAIR_QUEUE: c_int = 20; +pub const TCP_QUEUE_SEQ: c_int = 21; +pub const TCP_REPAIR_OPTIONS: c_int = 22; +pub const TCP_FASTOPEN: c_int = 23; +pub const TCP_TIMESTAMP: c_int = 24; + +pub const SIGUNUSED: c_int = crate::SIGSYS; + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; + +pub const CPU_SETSIZE: c_int = 128; + +pub const PTRACE_TRACEME: c_int = 0; +pub const PTRACE_PEEKTEXT: c_int = 1; +pub const PTRACE_PEEKDATA: c_int = 2; +pub const PTRACE_PEEKUSER: c_int = 3; +pub const PTRACE_POKETEXT: c_int = 4; +pub const PTRACE_POKEDATA: c_int = 5; +pub const PTRACE_POKEUSER: c_int = 6; +pub const PTRACE_CONT: c_int = 7; +pub const PTRACE_KILL: c_int = 8; +pub const PTRACE_SINGLESTEP: c_int = 9; +pub const PTRACE_GETREGS: c_int = 12; +pub const PTRACE_SETREGS: c_int = 13; +pub const PTRACE_GETFPREGS: c_int = 14; +pub const PTRACE_SETFPREGS: c_int = 15; +pub const PTRACE_ATTACH: c_int = 16; +pub const PTRACE_DETACH: c_int = 17; +pub const PTRACE_GETFPXREGS: c_int = 18; +pub const PTRACE_SETFPXREGS: c_int = 19; +pub const PTRACE_SYSCALL: c_int = 24; +pub const PTRACE_SETOPTIONS: c_int = 0x4200; +pub const PTRACE_GETEVENTMSG: c_int = 0x4201; +pub const PTRACE_GETSIGINFO: c_int = 0x4202; +pub const PTRACE_SETSIGINFO: c_int = 0x4203; +pub const PTRACE_GETREGSET: c_int = 0x4204; +pub const PTRACE_SETREGSET: c_int = 0x4205; +pub const PTRACE_SEIZE: c_int = 0x4206; +pub const PTRACE_INTERRUPT: c_int = 0x4207; +pub const PTRACE_LISTEN: c_int = 0x4208; +pub const PTRACE_PEEKSIGINFO: c_int = 0x4209; + +pub const EPOLLWAKEUP: c_int = 0x20000000; + +pub const EFD_NONBLOCK: c_int = crate::O_NONBLOCK; + +pub const SFD_NONBLOCK: c_int = crate::O_NONBLOCK; + +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +pub const TIOCINQ: c_int = crate::FIONREAD; + +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; + +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: c_int = 0x00000800; +pub const TAB2: c_int = 0x00001000; +pub const TAB3: c_int = 0x00001800; +pub const CR1: c_int = 0x00000200; +pub const CR2: c_int = 0x00000400; +pub const CR3: c_int = 0x00000600; +pub const FF1: c_int = 0x00008000; +pub const BS1: c_int = 0x00002000; +pub const VT1: c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const SO_BINDTODEVICE: c_int = 25; +pub const SO_TIMESTAMP: c_int = 29; +pub const SO_MARK: c_int = 36; +pub const SO_RXQ_OVFL: c_int = 40; +pub const SO_PEEK_OFF: c_int = 42; +pub const SO_BUSY_POLL: c_int = 46; +pub const SO_BINDTOIFINDEX: c_int = 62; + +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; + +pub const O_ASYNC: c_int = 0x00000400; + +pub const FIOCLEX: c_int = 0x5451; +pub const FIONBIO: c_int = 0x5421; + +pub const RLIMIT_RSS: c_int = 5; +pub const RLIMIT_NOFILE: c_int = 7; +pub const RLIMIT_AS: c_int = 9; +pub const RLIMIT_NPROC: c_int = 6; +pub const RLIMIT_MEMLOCK: c_int = 8; + +pub const O_APPEND: c_int = 0x00100000; +pub const O_CREAT: c_int = 0x00010000; +pub const O_EXCL: c_int = 0x00020000; +pub const O_NOCTTY: c_int = 0x00000200; +pub const O_NONBLOCK: c_int = 0x00000010; +pub const O_SYNC: c_int = 0x00000040 | O_DSYNC; +pub const O_RSYNC: c_int = O_SYNC; +pub const O_DSYNC: c_int = 0x00000020; + +pub const SOCK_CLOEXEC: c_int = 0o2000000; +pub const SOCK_NONBLOCK: c_int = 0o4000; + +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SOCK_SEQPACKET: c_int = 5; + +pub const SOL_SOCKET: c_int = 1; + +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EDEADLOCK: c_int = EDEADLK; +pub const EMULTIHOP: c_int = 72; +pub const EBADMSG: c_int = 74; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const ERFKILL: c_int = 132; +pub const EHWPOISON: c_int = 133; + +pub const SO_REUSEADDR: c_int = 2; +pub const SO_TYPE: c_int = 3; +pub const SO_ERROR: c_int = 4; +pub const SO_DONTROUTE: c_int = 5; +pub const SO_BROADCAST: c_int = 6; +pub const SO_SNDBUF: c_int = 7; +pub const SO_RCVBUF: c_int = 8; +pub const SO_KEEPALIVE: c_int = 9; +pub const SO_OOBINLINE: c_int = 10; +pub const SO_NO_CHECK: c_int = 11; +pub const SO_PRIORITY: c_int = 12; +pub const SO_LINGER: c_int = 13; +pub const SO_BSDCOMPAT: c_int = 14; +pub const SO_REUSEPORT: c_int = 15; +pub const SO_PASSCRED: c_int = 16; +pub const SO_PEERCRED: c_int = 17; +pub const SO_RCVLOWAT: c_int = 18; +pub const SO_SNDLOWAT: c_int = 19; +pub const SO_RCVTIMEO: c_int = 20; +pub const SO_SNDTIMEO: c_int = 21; +pub const SO_ACCEPTCONN: c_int = 30; +pub const SO_SNDBUFFORCE: c_int = 32; +pub const SO_RCVBUFFORCE: c_int = 33; +pub const SO_PROTOCOL: c_int = 38; +pub const SO_DOMAIN: c_int = 39; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const EXTPROC: crate::tcflag_t = 0x00010000; + +pub const MAP_HUGETLB: c_int = 0x040000; + +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_SETOWN: c_int = 8; + +pub const VEOF: usize = 4; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; + +pub const TCGETS: c_int = 0x5401; +pub const TCSETS: c_int = 0x5402; +pub const TCSETSW: c_int = 0x5403; +pub const TCSETSF: c_int = 0x5404; +pub const TCGETA: c_int = 0x5405; +pub const TCSETA: c_int = 0x5406; +pub const TCSETAW: c_int = 0x5407; +pub const TCSETAF: c_int = 0x5408; +pub const TCSBRK: c_int = 0x5409; +pub const TCXONC: c_int = 0x540A; +pub const TCFLSH: c_int = 0x540B; +pub const TIOCGSOFTCAR: c_int = 0x5419; +pub const TIOCSSOFTCAR: c_int = 0x541A; +pub const TIOCLINUX: c_int = 0x541C; +pub const TIOCGSERIAL: c_int = 0x541E; +pub const TIOCEXCL: c_int = 0x540C; +pub const TIOCNXCL: c_int = 0x540D; +pub const TIOCSCTTY: c_int = 0x540E; +pub const TIOCGPGRP: c_int = 0x540F; +pub const TIOCSPGRP: c_int = 0x5410; +pub const TIOCOUTQ: c_int = 0x5411; +pub const TIOCSTI: c_int = 0x5412; +pub const TIOCGWINSZ: c_int = 0x5413; +pub const TIOCSWINSZ: c_int = 0x5414; +pub const TIOCMGET: c_int = 0x5415; +pub const TIOCMBIS: c_int = 0x5416; +pub const TIOCMBIC: c_int = 0x5417; +pub const TIOCMSET: c_int = 0x5418; +pub const FIONREAD: c_int = 0x541B; +pub const TIOCCONS: c_int = 0x541D; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const TIOCM_LE: c_int = 0x001; +pub const TIOCM_DTR: c_int = 0x002; +pub const TIOCM_RTS: c_int = 0x004; +pub const TIOCM_ST: c_int = 0x008; +pub const TIOCM_SR: c_int = 0x010; +pub const TIOCM_CTS: c_int = 0x020; +pub const TIOCM_CAR: c_int = 0x040; +pub const TIOCM_RNG: c_int = 0x080; +pub const TIOCM_DSR: c_int = 0x100; +pub const TIOCM_CD: c_int = TIOCM_CAR; +pub const TIOCM_RI: c_int = TIOCM_RNG; + +pub const O_DIRECTORY: c_int = 0x00080000; +pub const O_DIRECT: c_int = 0x00000800; +pub const O_LARGEFILE: c_int = 0x00001000; +pub const O_NOFOLLOW: c_int = 0x00000080; + +pub const HUGETLB_FLAG_ENCODE_SHIFT: u32 = 26; +pub const MAP_HUGE_SHIFT: u32 = 26; + +// intentionally not public, only used for fd_set +cfg_if! { + if #[cfg(target_pointer_width = "32")] { + const ULONG_SIZE: usize = 32; + } else if #[cfg(target_pointer_width = "64")] { + const ULONG_SIZE: usize = 64; + } else { + // Unknown target_pointer_width + } +} + +// END_PUB_CONST + +f! { + pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] &= !(1 << (fd % size)); + return; + } + + pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0; + } + + pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] |= 1 << (fd % size); + return; + } + + pub fn FD_ZERO(set: *mut fd_set) -> () { + for slot in (*set).fds_bits.iter_mut() { + *slot = 0; + } + } + + pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { + for slot in cpuset.bits.iter_mut() { + *slot = 0; + } + } + + pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + cpuset.bits[idx] |= 1 << offset; + () + } + + pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + cpuset.bits[idx] &= !(1 << offset); + () + } + + pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { + let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + 0 != (cpuset.bits[idx] & (1 << offset)) + } + + pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { + set1.bits == set2.bits + } + + pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { + cmsg.offset(1) as *mut c_uchar + } + + pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + if ((*cmsg).cmsg_len as size_t) < size_of::() { + core::ptr::null_mut::() + } else if __CMSG_NEXT(cmsg).add(size_of::()) >= __MHDR_END(mhdr) { + core::ptr::null_mut::() + } else { + __CMSG_NEXT(cmsg).cast() + } + } + + pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr { + if (*mhdr).msg_controllen as size_t >= size_of::() { + (*mhdr).msg_control.cast() + } else { + core::ptr::null_mut::() + } + } + + pub const fn CMSG_ALIGN(len: size_t) -> size_t { + (len + size_of::() - 1) & !(size_of::() - 1) + } + + pub const fn CMSG_SPACE(len: c_uint) -> c_uint { + (CMSG_ALIGN(len as size_t) + CMSG_ALIGN(size_of::())) as c_uint + } + + pub const fn CMSG_LEN(len: c_uint) -> c_uint { + (CMSG_ALIGN(size_of::()) + len as size_t) as c_uint + } +} + +safe_f! { + pub const fn WIFSTOPPED(status: c_int) -> bool { + (status & 0xff) == 0x7f + } + + pub const fn WSTOPSIG(status: c_int) -> c_int { + (status >> 8) & 0xff + } + + pub const fn WIFCONTINUED(status: c_int) -> bool { + status == 0xffff + } + + pub const fn WIFSIGNALED(status: c_int) -> bool { + ((status & 0x7f) + 1) as i8 >= 2 + } + + pub const fn WTERMSIG(status: c_int) -> c_int { + status & 0x7f + } + + pub const fn WIFEXITED(status: c_int) -> bool { + (status & 0x7f) == 0 + } + + pub const fn WEXITSTATUS(status: c_int) -> c_int { + (status >> 8) & 0xff + } + + pub const fn WCOREDUMP(status: c_int) -> bool { + (status & 0x80) != 0 + } + + pub const fn QCMD(cmd: c_int, type_: c_int) -> c_int { + (cmd << 8) | (type_ & 0x00ff) + } + + pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { + let major = major as crate::dev_t; + let minor = minor as crate::dev_t; + let mut dev = 0; + dev |= (major & 0x00000fff) << 8; + dev |= (major & 0xfffff000) << 32; + dev |= (minor & 0x000000ff) << 0; + dev |= (minor & 0xffffff00) << 12; + dev + } + + pub const fn major(dev: crate::dev_t) -> c_uint { + let mut major = 0; + major |= (dev & 0x00000000000fff00) >> 8; + major |= (dev & 0xfffff00000000000) >> 32; + major as c_uint + } + + pub const fn minor(dev: crate::dev_t) -> c_uint { + let mut minor = 0; + minor |= (dev & 0x00000000000000ff) >> 0; + minor |= (dev & 0x00000ffffff00000) >> 12; + minor as c_uint + } +} + +fn __CMSG_LEN(cmsg: *const cmsghdr) -> ssize_t { + ((unsafe { (*cmsg).cmsg_len as size_t } + size_of::() - 1) & !(size_of::() - 1)) + as ssize_t +} + +fn __CMSG_NEXT(cmsg: *const cmsghdr) -> *mut c_uchar { + (unsafe { cmsg.offset(__CMSG_LEN(cmsg)) }) as *mut c_uchar +} + +fn __MHDR_END(mhdr: *const msghdr) -> *mut c_uchar { + unsafe { (*mhdr).msg_control.offset((*mhdr).msg_controllen as isize) }.cast() +} + +// EXTERN_FN + +#[link(name = "c")] +#[link(name = "fdio")] +extern "C" {} + +#[derive(Debug)] +pub enum FILE {} +impl Copy for FILE {} +impl Clone for FILE { + fn clone(&self) -> FILE { + *self + } +} +#[derive(Debug)] +pub enum fpos_t {} // FIXME(fuchsia): fill this out with a struct +impl Copy for fpos_t {} +impl Clone for fpos_t { + fn clone(&self) -> fpos_t { + *self + } +} + +extern "C" { + pub fn isalnum(c: c_int) -> c_int; + pub fn isalpha(c: c_int) -> c_int; + pub fn iscntrl(c: c_int) -> c_int; + pub fn isdigit(c: c_int) -> c_int; + pub fn isgraph(c: c_int) -> c_int; + pub fn islower(c: c_int) -> c_int; + pub fn isprint(c: c_int) -> c_int; + pub fn ispunct(c: c_int) -> c_int; + pub fn isspace(c: c_int) -> c_int; + pub fn isupper(c: c_int) -> c_int; + pub fn isxdigit(c: c_int) -> c_int; + pub fn isblank(c: c_int) -> c_int; + pub fn tolower(c: c_int) -> c_int; + pub fn toupper(c: c_int) -> c_int; + pub fn fopen(filename: *const c_char, mode: *const c_char) -> *mut FILE; + pub fn freopen(filename: *const c_char, mode: *const c_char, file: *mut FILE) -> *mut FILE; + pub fn fflush(file: *mut FILE) -> c_int; + pub fn fclose(file: *mut FILE) -> c_int; + pub fn remove(filename: *const c_char) -> c_int; + pub fn rename(oldname: *const c_char, newname: *const c_char) -> c_int; + pub fn tmpfile() -> *mut FILE; + pub fn setvbuf(stream: *mut FILE, buffer: *mut c_char, mode: c_int, size: size_t) -> c_int; + pub fn setbuf(stream: *mut FILE, buf: *mut c_char); + pub fn getchar() -> c_int; + pub fn putchar(c: c_int) -> c_int; + pub fn fgetc(stream: *mut FILE) -> c_int; + pub fn fgets(buf: *mut c_char, n: c_int, stream: *mut FILE) -> *mut c_char; + pub fn fputc(c: c_int, stream: *mut FILE) -> c_int; + pub fn fputs(s: *const c_char, stream: *mut FILE) -> c_int; + pub fn puts(s: *const c_char) -> c_int; + pub fn ungetc(c: c_int, stream: *mut FILE) -> c_int; + pub fn fread(ptr: *mut c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; + pub fn fwrite(ptr: *const c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; + pub fn fseek(stream: *mut FILE, offset: c_long, whence: c_int) -> c_int; + pub fn ftell(stream: *mut FILE) -> c_long; + pub fn rewind(stream: *mut FILE); + pub fn fgetpos(stream: *mut FILE, ptr: *mut fpos_t) -> c_int; + pub fn fsetpos(stream: *mut FILE, ptr: *const fpos_t) -> c_int; + pub fn feof(stream: *mut FILE) -> c_int; + pub fn ferror(stream: *mut FILE) -> c_int; + pub fn perror(s: *const c_char); + pub fn atof(s: *const c_char) -> c_double; + pub fn atoi(s: *const c_char) -> c_int; + pub fn atol(s: *const c_char) -> c_long; + pub fn atoll(s: *const c_char) -> c_longlong; + pub fn strtod(s: *const c_char, endp: *mut *mut c_char) -> c_double; + pub fn strtof(s: *const c_char, endp: *mut *mut c_char) -> c_float; + pub fn strtol(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_long; + pub fn strtoll(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_longlong; + pub fn strtoul(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulong; + pub fn strtoull(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulonglong; + pub fn calloc(nobj: size_t, size: size_t) -> *mut c_void; + pub fn malloc(size: size_t) -> *mut c_void; + pub fn realloc(p: *mut c_void, size: size_t) -> *mut c_void; + pub fn free(p: *mut c_void); + pub fn abort() -> !; + pub fn exit(status: c_int) -> !; + pub fn _exit(status: c_int) -> !; + pub fn atexit(cb: extern "C" fn()) -> c_int; + pub fn system(s: *const c_char) -> c_int; + pub fn getenv(s: *const c_char) -> *mut c_char; + + pub fn strcpy(dst: *mut c_char, src: *const c_char) -> *mut c_char; + pub fn strncpy(dst: *mut c_char, src: *const c_char, n: size_t) -> *mut c_char; + pub fn strcat(s: *mut c_char, ct: *const c_char) -> *mut c_char; + pub fn strncat(s: *mut c_char, ct: *const c_char, n: size_t) -> *mut c_char; + pub fn strcmp(cs: *const c_char, ct: *const c_char) -> c_int; + pub fn strncmp(cs: *const c_char, ct: *const c_char, n: size_t) -> c_int; + pub fn strcoll(cs: *const c_char, ct: *const c_char) -> c_int; + pub fn strchr(cs: *const c_char, c: c_int) -> *mut c_char; + pub fn strrchr(cs: *const c_char, c: c_int) -> *mut c_char; + pub fn strspn(cs: *const c_char, ct: *const c_char) -> size_t; + pub fn strcspn(cs: *const c_char, ct: *const c_char) -> size_t; + pub fn strdup(cs: *const c_char) -> *mut c_char; + pub fn strpbrk(cs: *const c_char, ct: *const c_char) -> *mut c_char; + pub fn strstr(cs: *const c_char, ct: *const c_char) -> *mut c_char; + pub fn strlen(cs: *const c_char) -> size_t; + pub fn strnlen(cs: *const c_char, maxlen: size_t) -> size_t; + pub fn strerror(n: c_int) -> *mut c_char; + pub fn strtok(s: *mut c_char, t: *const c_char) -> *mut c_char; + pub fn strxfrm(s: *mut c_char, ct: *const c_char, n: size_t) -> size_t; + pub fn wcslen(buf: *const wchar_t) -> size_t; + pub fn wcstombs(dest: *mut c_char, src: *const wchar_t, n: size_t) -> size_t; + + pub fn memchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; + pub fn wmemchr(cx: *const wchar_t, c: wchar_t, n: size_t) -> *mut wchar_t; + pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; + pub fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; + pub fn memmove(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; + pub fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void; + + pub fn abs(i: c_int) -> c_int; + pub fn labs(i: c_long) -> c_long; + pub fn rand() -> c_int; + pub fn srand(seed: c_uint); + + pub fn getpwnam(name: *const c_char) -> *mut passwd; + pub fn getpwuid(uid: crate::uid_t) -> *mut passwd; + + pub fn fprintf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; + pub fn printf(format: *const c_char, ...) -> c_int; + pub fn snprintf(s: *mut c_char, n: size_t, format: *const c_char, ...) -> c_int; + pub fn sprintf(s: *mut c_char, format: *const c_char, ...) -> c_int; + pub fn fscanf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; + pub fn scanf(format: *const c_char, ...) -> c_int; + pub fn sscanf(s: *const c_char, format: *const c_char, ...) -> c_int; + pub fn getchar_unlocked() -> c_int; + pub fn putchar_unlocked(c: c_int) -> c_int; + + pub fn socket(domain: c_int, ty: c_int, protocol: c_int) -> c_int; + pub fn connect(socket: c_int, address: *const sockaddr, len: socklen_t) -> c_int; + pub fn listen(socket: c_int, backlog: c_int) -> c_int; + pub fn accept(socket: c_int, address: *mut sockaddr, address_len: *mut socklen_t) -> c_int; + pub fn getpeername(socket: c_int, address: *mut sockaddr, address_len: *mut socklen_t) + -> c_int; + pub fn getsockname(socket: c_int, address: *mut sockaddr, address_len: *mut socklen_t) + -> c_int; + pub fn setsockopt( + socket: c_int, + level: c_int, + name: c_int, + value: *const c_void, + option_len: socklen_t, + ) -> c_int; + pub fn socketpair( + domain: c_int, + type_: c_int, + protocol: c_int, + socket_vector: *mut c_int, + ) -> c_int; + pub fn sendto( + socket: c_int, + buf: *const c_void, + len: size_t, + flags: c_int, + addr: *const sockaddr, + addrlen: socklen_t, + ) -> ssize_t; + pub fn shutdown(socket: c_int, how: c_int) -> c_int; + + pub fn chmod(path: *const c_char, mode: mode_t) -> c_int; + pub fn fchmod(fd: c_int, mode: mode_t) -> c_int; + + pub fn fstat(fildes: c_int, buf: *mut stat) -> c_int; + + pub fn mkdir(path: *const c_char, mode: mode_t) -> c_int; + + pub fn stat(path: *const c_char, buf: *mut stat) -> c_int; + + pub fn pclose(stream: *mut crate::FILE) -> c_int; + pub fn fdopen(fd: c_int, mode: *const c_char) -> *mut crate::FILE; + pub fn fileno(stream: *mut crate::FILE) -> c_int; + + pub fn open(path: *const c_char, oflag: c_int, ...) -> c_int; + pub fn creat(path: *const c_char, mode: mode_t) -> c_int; + pub fn fcntl(fd: c_int, cmd: c_int, ...) -> c_int; + + pub fn opendir(dirname: *const c_char) -> *mut crate::DIR; + pub fn readdir(dirp: *mut crate::DIR) -> *mut crate::dirent; + pub fn readdir_r( + dirp: *mut crate::DIR, + entry: *mut crate::dirent, + result: *mut *mut crate::dirent, + ) -> c_int; + pub fn closedir(dirp: *mut crate::DIR) -> c_int; + pub fn rewinddir(dirp: *mut crate::DIR); + + pub fn openat(dirfd: c_int, pathname: *const c_char, flags: c_int, ...) -> c_int; + pub fn fchmodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, flags: c_int) -> c_int; + pub fn fchown(fd: c_int, owner: crate::uid_t, group: crate::gid_t) -> c_int; + pub fn fchownat( + dirfd: c_int, + pathname: *const c_char, + owner: crate::uid_t, + group: crate::gid_t, + flags: c_int, + ) -> c_int; + pub fn fstatat(dirfd: c_int, pathname: *const c_char, buf: *mut stat, flags: c_int) -> c_int; + pub fn linkat( + olddirfd: c_int, + oldpath: *const c_char, + newdirfd: c_int, + newpath: *const c_char, + flags: c_int, + ) -> c_int; + pub fn mkdirat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; + pub fn readlinkat( + dirfd: c_int, + pathname: *const c_char, + buf: *mut c_char, + bufsiz: size_t, + ) -> ssize_t; + pub fn renameat( + olddirfd: c_int, + oldpath: *const c_char, + newdirfd: c_int, + newpath: *const c_char, + ) -> c_int; + pub fn symlinkat(target: *const c_char, newdirfd: c_int, linkpath: *const c_char) -> c_int; + pub fn unlinkat(dirfd: c_int, pathname: *const c_char, flags: c_int) -> c_int; + + pub fn access(path: *const c_char, amode: c_int) -> c_int; + pub fn alarm(seconds: c_uint) -> c_uint; + pub fn chdir(dir: *const c_char) -> c_int; + pub fn chown(path: *const c_char, uid: uid_t, gid: gid_t) -> c_int; + pub fn lchown(path: *const c_char, uid: uid_t, gid: gid_t) -> c_int; + pub fn close(fd: c_int) -> c_int; + pub fn dup(fd: c_int) -> c_int; + pub fn dup2(src: c_int, dst: c_int) -> c_int; + + pub fn execl(path: *const c_char, arg0: *const c_char, ...) -> c_int; + pub fn execle(path: *const c_char, arg0: *const c_char, ...) -> c_int; + pub fn execlp(file: *const c_char, arg0: *const c_char, ...) -> c_int; + + // DIFF(main): changed to `*const *mut` in e77f551de9 + pub fn execv(prog: *const c_char, argv: *const *const c_char) -> c_int; + pub fn execve( + prog: *const c_char, + argv: *const *const c_char, + envp: *const *const c_char, + ) -> c_int; + pub fn execvp(c: *const c_char, argv: *const *const c_char) -> c_int; + + pub fn fork() -> pid_t; + pub fn fpathconf(filedes: c_int, name: c_int) -> c_long; + pub fn getcwd(buf: *mut c_char, size: size_t) -> *mut c_char; + pub fn getegid() -> gid_t; + pub fn geteuid() -> uid_t; + pub fn getgid() -> gid_t; + pub fn getgroups(ngroups_max: c_int, groups: *mut gid_t) -> c_int; + pub fn getlogin() -> *mut c_char; + pub fn getopt(argc: c_int, argv: *const *mut c_char, optstr: *const c_char) -> c_int; + pub fn getpgid(pid: pid_t) -> pid_t; + pub fn getpgrp() -> pid_t; + pub fn getpid() -> pid_t; + pub fn getppid() -> pid_t; + pub fn getuid() -> uid_t; + pub fn isatty(fd: c_int) -> c_int; + pub fn link(src: *const c_char, dst: *const c_char) -> c_int; + pub fn lseek(fd: c_int, offset: off_t, whence: c_int) -> off_t; + pub fn pathconf(path: *const c_char, name: c_int) -> c_long; + pub fn pause() -> c_int; + pub fn pipe(fds: *mut c_int) -> c_int; + pub fn posix_memalign(memptr: *mut *mut c_void, align: size_t, size: size_t) -> c_int; + pub fn read(fd: c_int, buf: *mut c_void, count: size_t) -> ssize_t; + pub fn rmdir(path: *const c_char) -> c_int; + pub fn seteuid(uid: uid_t) -> c_int; + pub fn setegid(gid: gid_t) -> c_int; + pub fn setgid(gid: gid_t) -> c_int; + pub fn setpgid(pid: pid_t, pgid: pid_t) -> c_int; + pub fn setsid() -> pid_t; + pub fn setuid(uid: uid_t) -> c_int; + pub fn sleep(secs: c_uint) -> c_uint; + pub fn nanosleep(rqtp: *const timespec, rmtp: *mut timespec) -> c_int; + pub fn tcgetpgrp(fd: c_int) -> pid_t; + pub fn tcsetpgrp(fd: c_int, pgrp: crate::pid_t) -> c_int; + pub fn ttyname(fd: c_int) -> *mut c_char; + pub fn unlink(c: *const c_char) -> c_int; + pub fn wait(status: *mut c_int) -> pid_t; + pub fn waitpid(pid: pid_t, status: *mut c_int, options: c_int) -> pid_t; + pub fn write(fd: c_int, buf: *const c_void, count: size_t) -> ssize_t; + pub fn pread(fd: c_int, buf: *mut c_void, count: size_t, offset: off_t) -> ssize_t; + pub fn pwrite(fd: c_int, buf: *const c_void, count: size_t, offset: off_t) -> ssize_t; + pub fn umask(mask: mode_t) -> mode_t; + + pub fn utime(file: *const c_char, buf: *const utimbuf) -> c_int; + + pub fn kill(pid: pid_t, sig: c_int) -> c_int; + + pub fn mlock(addr: *const c_void, len: size_t) -> c_int; + pub fn munlock(addr: *const c_void, len: size_t) -> c_int; + pub fn mlockall(flags: c_int) -> c_int; + pub fn munlockall() -> c_int; + + pub fn mmap( + addr: *mut c_void, + len: size_t, + prot: c_int, + flags: c_int, + fd: c_int, + offset: off_t, + ) -> *mut c_void; + pub fn munmap(addr: *mut c_void, len: size_t) -> c_int; + + pub fn if_nametoindex(ifname: *const c_char) -> c_uint; + pub fn if_indextoname(ifindex: c_uint, ifname: *mut c_char) -> *mut c_char; + + pub fn lstat(path: *const c_char, buf: *mut stat) -> c_int; + + pub fn fsync(fd: c_int) -> c_int; + + pub fn setenv(name: *const c_char, val: *const c_char, overwrite: c_int) -> c_int; + pub fn unsetenv(name: *const c_char) -> c_int; + + pub fn symlink(path1: *const c_char, path2: *const c_char) -> c_int; + + pub fn ftruncate(fd: c_int, length: off_t) -> c_int; + + pub fn signal(signum: c_int, handler: sighandler_t) -> sighandler_t; + + pub fn realpath(pathname: *const c_char, resolved: *mut c_char) -> *mut c_char; + + pub fn flock(fd: c_int, operation: c_int) -> c_int; + + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; + pub fn times(buf: *mut crate::tms) -> crate::clock_t; + + pub fn pthread_self() -> crate::pthread_t; + pub fn pthread_join(native: crate::pthread_t, value: *mut *mut c_void) -> c_int; + pub fn pthread_exit(value: *mut c_void) -> !; + pub fn pthread_attr_init(attr: *mut crate::pthread_attr_t) -> c_int; + pub fn pthread_attr_destroy(attr: *mut crate::pthread_attr_t) -> c_int; + pub fn pthread_attr_getstacksize( + attr: *const crate::pthread_attr_t, + stacksize: *mut size_t, + ) -> c_int; + pub fn pthread_attr_setstacksize(attr: *mut crate::pthread_attr_t, stack_size: size_t) + -> c_int; + pub fn pthread_attr_setdetachstate(attr: *mut crate::pthread_attr_t, state: c_int) -> c_int; + pub fn pthread_detach(thread: crate::pthread_t) -> c_int; + pub fn sched_yield() -> c_int; + pub fn pthread_key_create( + key: *mut pthread_key_t, + dtor: Option, + ) -> c_int; + pub fn pthread_key_delete(key: pthread_key_t) -> c_int; + pub fn pthread_getspecific(key: pthread_key_t) -> *mut c_void; + pub fn pthread_setspecific(key: pthread_key_t, value: *const c_void) -> c_int; + pub fn pthread_mutex_init( + lock: *mut pthread_mutex_t, + attr: *const pthread_mutexattr_t, + ) -> c_int; + pub fn pthread_mutex_destroy(lock: *mut pthread_mutex_t) -> c_int; + pub fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> c_int; + pub fn pthread_mutex_trylock(lock: *mut pthread_mutex_t) -> c_int; + pub fn pthread_mutex_unlock(lock: *mut pthread_mutex_t) -> c_int; + + pub fn pthread_mutexattr_init(attr: *mut pthread_mutexattr_t) -> c_int; + pub fn pthread_mutexattr_destroy(attr: *mut pthread_mutexattr_t) -> c_int; + pub fn pthread_mutexattr_settype(attr: *mut pthread_mutexattr_t, _type: c_int) -> c_int; + + pub fn pthread_cond_init(cond: *mut pthread_cond_t, attr: *const pthread_condattr_t) -> c_int; + pub fn pthread_cond_wait(cond: *mut pthread_cond_t, lock: *mut pthread_mutex_t) -> c_int; + pub fn pthread_cond_timedwait( + cond: *mut pthread_cond_t, + lock: *mut pthread_mutex_t, + abstime: *const crate::timespec, + ) -> c_int; + pub fn pthread_cond_signal(cond: *mut pthread_cond_t) -> c_int; + pub fn pthread_cond_broadcast(cond: *mut pthread_cond_t) -> c_int; + pub fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> c_int; + pub fn pthread_condattr_init(attr: *mut pthread_condattr_t) -> c_int; + pub fn pthread_condattr_destroy(attr: *mut pthread_condattr_t) -> c_int; + pub fn pthread_rwlock_init( + lock: *mut pthread_rwlock_t, + attr: *const pthread_rwlockattr_t, + ) -> c_int; + pub fn pthread_rwlock_destroy(lock: *mut pthread_rwlock_t) -> c_int; + pub fn pthread_rwlock_rdlock(lock: *mut pthread_rwlock_t) -> c_int; + pub fn pthread_rwlock_tryrdlock(lock: *mut pthread_rwlock_t) -> c_int; + pub fn pthread_rwlock_wrlock(lock: *mut pthread_rwlock_t) -> c_int; + pub fn pthread_rwlock_trywrlock(lock: *mut pthread_rwlock_t) -> c_int; + pub fn pthread_rwlock_unlock(lock: *mut pthread_rwlock_t) -> c_int; + pub fn pthread_rwlockattr_init(attr: *mut pthread_rwlockattr_t) -> c_int; + pub fn pthread_rwlockattr_destroy(attr: *mut pthread_rwlockattr_t) -> c_int; + pub fn pthread_getname_np(thread: crate::pthread_t, name: *mut c_char, len: size_t) -> c_int; + pub fn pthread_setname_np(thread: crate::pthread_t, name: *const c_char) -> c_int; + pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + + pub fn getsockopt( + sockfd: c_int, + level: c_int, + optname: c_int, + optval: *mut c_void, + optlen: *mut crate::socklen_t, + ) -> c_int; + pub fn raise(signum: c_int) -> c_int; + pub fn sigaction(signum: c_int, act: *const sigaction, oldact: *mut sigaction) -> c_int; + + pub fn utimes(filename: *const c_char, times: *const crate::timeval) -> c_int; + pub fn dlopen(filename: *const c_char, flag: c_int) -> *mut c_void; + pub fn dlerror() -> *mut c_char; + pub fn dlsym(handle: *mut c_void, symbol: *const c_char) -> *mut c_void; + pub fn dlclose(handle: *mut c_void) -> c_int; + pub fn dladdr(addr: *const c_void, info: *mut Dl_info) -> c_int; + + pub fn getaddrinfo( + node: *const c_char, + service: *const c_char, + hints: *const addrinfo, + res: *mut *mut addrinfo, + ) -> c_int; + pub fn freeaddrinfo(res: *mut addrinfo); + pub fn gai_strerror(errcode: c_int) -> *const c_char; + pub fn res_init() -> c_int; + + pub fn gmtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; + pub fn localtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; + pub fn mktime(tm: *mut tm) -> time_t; + pub fn time(time: *mut time_t) -> time_t; + pub fn gmtime(time_p: *const time_t) -> *mut tm; + pub fn localtime(time_p: *const time_t) -> *mut tm; + + pub fn mknod(pathname: *const c_char, mode: mode_t, dev: crate::dev_t) -> c_int; + pub fn uname(buf: *mut crate::utsname) -> c_int; + pub fn gethostname(name: *mut c_char, len: size_t) -> c_int; + pub fn getservbyname(name: *const c_char, proto: *const c_char) -> *mut servent; + pub fn getprotobyname(name: *const c_char) -> *mut protoent; + pub fn getprotobynumber(proto: c_int) -> *mut protoent; + pub fn usleep(secs: c_uint) -> c_int; + pub fn send(socket: c_int, buf: *const c_void, len: size_t, flags: c_int) -> ssize_t; + pub fn recv(socket: c_int, buf: *mut c_void, len: size_t, flags: c_int) -> ssize_t; + pub fn putenv(string: *mut c_char) -> c_int; + pub fn poll(fds: *mut pollfd, nfds: nfds_t, timeout: c_int) -> c_int; + pub fn select( + nfds: c_int, + readfds: *mut fd_set, + writefds: *mut fd_set, + errorfds: *mut fd_set, + timeout: *mut timeval, + ) -> c_int; + pub fn setlocale(category: c_int, locale: *const c_char) -> *mut c_char; + pub fn localeconv() -> *mut lconv; + + pub fn sem_destroy(sem: *mut sem_t) -> c_int; + pub fn sem_wait(sem: *mut sem_t) -> c_int; + pub fn sem_trywait(sem: *mut sem_t) -> c_int; + pub fn sem_post(sem: *mut sem_t) -> c_int; + pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; + pub fn statvfs(path: *const c_char, buf: *mut statvfs) -> c_int; + pub fn fstatvfs(fd: c_int, buf: *mut statvfs) -> c_int; + + pub fn readlink(path: *const c_char, buf: *mut c_char, bufsz: size_t) -> ssize_t; + + pub fn sigemptyset(set: *mut sigset_t) -> c_int; + pub fn sigaddset(set: *mut sigset_t, signum: c_int) -> c_int; + pub fn sigfillset(set: *mut sigset_t) -> c_int; + pub fn sigdelset(set: *mut sigset_t, signum: c_int) -> c_int; + pub fn sigismember(set: *const sigset_t, signum: c_int) -> c_int; + + pub fn sigprocmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; + pub fn sigpending(set: *mut sigset_t) -> c_int; + + pub fn timegm(tm: *mut crate::tm) -> time_t; + + pub fn getsid(pid: pid_t) -> pid_t; + + pub fn sysconf(name: c_int) -> c_long; + + pub fn mkfifo(path: *const c_char, mode: mode_t) -> c_int; + + pub fn pselect( + nfds: c_int, + readfds: *mut fd_set, + writefds: *mut fd_set, + errorfds: *mut fd_set, + timeout: *const timespec, + sigmask: *const sigset_t, + ) -> c_int; + pub fn fseeko(stream: *mut crate::FILE, offset: off_t, whence: c_int) -> c_int; + pub fn ftello(stream: *mut crate::FILE) -> off_t; + pub fn tcdrain(fd: c_int) -> c_int; + pub fn cfgetispeed(termios: *const crate::termios) -> crate::speed_t; + pub fn cfgetospeed(termios: *const crate::termios) -> crate::speed_t; + pub fn cfmakeraw(termios: *mut crate::termios); + pub fn cfsetispeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int; + pub fn cfsetospeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int; + pub fn cfsetspeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int; + pub fn tcgetattr(fd: c_int, termios: *mut crate::termios) -> c_int; + pub fn tcsetattr(fd: c_int, optional_actions: c_int, termios: *const crate::termios) -> c_int; + pub fn tcflow(fd: c_int, action: c_int) -> c_int; + pub fn tcflush(fd: c_int, action: c_int) -> c_int; + pub fn tcgetsid(fd: c_int) -> crate::pid_t; + pub fn tcsendbreak(fd: c_int, duration: c_int) -> c_int; + pub fn mkstemp(template: *mut c_char) -> c_int; + pub fn mkdtemp(template: *mut c_char) -> *mut c_char; + + pub fn tmpnam(ptr: *mut c_char) -> *mut c_char; + + pub fn openlog(ident: *const c_char, logopt: c_int, facility: c_int); + pub fn closelog(); + pub fn setlogmask(maskpri: c_int) -> c_int; + pub fn syslog(priority: c_int, message: *const c_char, ...); + + pub fn grantpt(fd: c_int) -> c_int; + pub fn posix_openpt(flags: c_int) -> c_int; + pub fn ptsname(fd: c_int) -> *mut c_char; + pub fn unlockpt(fd: c_int) -> c_int; + + pub fn fdatasync(fd: c_int) -> c_int; + pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn clock_settime(clk_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; + pub fn dirfd(dirp: *mut crate::DIR) -> c_int; + + pub fn pthread_getattr_np(native: crate::pthread_t, attr: *mut crate::pthread_attr_t) -> c_int; + pub fn pthread_attr_getstack( + attr: *const crate::pthread_attr_t, + stackaddr: *mut *mut c_void, + stacksize: *mut size_t, + ) -> c_int; + pub fn memalign(align: size_t, size: size_t) -> *mut c_void; + pub fn setgroups(ngroups: size_t, ptr: *const crate::gid_t) -> c_int; + pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; + pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; + pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; + pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; + + pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; + pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; + pub fn utimensat( + dirfd: c_int, + path: *const c_char, + times: *const crate::timespec, + flag: c_int, + ) -> c_int; + pub fn duplocale(base: crate::locale_t) -> crate::locale_t; + pub fn freelocale(loc: crate::locale_t); + pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; + pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; + + pub fn fdopendir(fd: c_int) -> *mut crate::DIR; + + pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; + pub fn pthread_condattr_getclock( + attr: *const pthread_condattr_t, + clock_id: *mut clockid_t, + ) -> c_int; + pub fn pthread_condattr_setclock( + attr: *mut pthread_condattr_t, + clock_id: crate::clockid_t, + ) -> c_int; + pub fn accept4( + fd: c_int, + addr: *mut crate::sockaddr, + len: *mut crate::socklen_t, + flg: c_int, + ) -> c_int; + pub fn ptsname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + pub fn clearenv() -> c_int; + pub fn waitid( + idtype: idtype_t, + id: id_t, + infop: *mut crate::siginfo_t, + options: c_int, + ) -> c_int; + pub fn setreuid(ruid: crate::uid_t, euid: crate::uid_t) -> c_int; + pub fn setregid(rgid: crate::gid_t, egid: crate::gid_t) -> c_int; + pub fn getresuid( + ruid: *mut crate::uid_t, + euid: *mut crate::uid_t, + suid: *mut crate::uid_t, + ) -> c_int; + pub fn getresgid( + rgid: *mut crate::gid_t, + egid: *mut crate::gid_t, + sgid: *mut crate::gid_t, + ) -> c_int; + pub fn acct(filename: *const c_char) -> c_int; + pub fn brk(addr: *mut c_void) -> c_int; + pub fn setresgid(rgid: crate::gid_t, egid: crate::gid_t, sgid: crate::gid_t) -> c_int; + pub fn setresuid(ruid: crate::uid_t, euid: crate::uid_t, suid: crate::uid_t) -> c_int; + pub fn openpty( + amaster: *mut c_int, + aslave: *mut c_int, + name: *mut c_char, + termp: *const termios, + winp: *const crate::winsize, + ) -> c_int; + + // DIFF(main): changed to `*const *mut` in e77f551de9 + pub fn execvpe( + file: *const c_char, + argv: *const *const c_char, + envp: *const *const c_char, + ) -> c_int; + pub fn fexecve(fd: c_int, argv: *const *const c_char, envp: *const *const c_char) -> c_int; + + pub fn ioctl(fd: c_int, request: c_int, ...) -> c_int; + + pub fn lutimes(file: *const c_char, times: *const crate::timeval) -> c_int; + + pub fn setpwent(); + pub fn endpwent(); + pub fn getpwent() -> *mut passwd; + + pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; + + // System V IPC + pub fn shmget(key: crate::key_t, size: size_t, shmflg: c_int) -> c_int; + pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; + pub fn shmdt(shmaddr: *const c_void) -> c_int; + pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; + pub fn ftok(pathname: *const c_char, proj_id: c_int) -> crate::key_t; + pub fn semget(key: crate::key_t, nsems: c_int, semflag: c_int) -> c_int; + pub fn semop(semid: c_int, sops: *mut crate::sembuf, nsops: size_t) -> c_int; + pub fn semctl(semid: c_int, semnum: c_int, cmd: c_int, ...) -> c_int; + pub fn msgctl(msqid: c_int, cmd: c_int, buf: *mut msqid_ds) -> c_int; + pub fn msgget(key: crate::key_t, msgflg: c_int) -> c_int; + pub fn msgrcv( + msqid: c_int, + msgp: *mut c_void, + msgsz: size_t, + msgtyp: c_long, + msgflg: c_int, + ) -> ssize_t; + pub fn msgsnd(msqid: c_int, msgp: *const c_void, msgsz: size_t, msgflg: c_int) -> c_int; + + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn __errno_location() -> *mut c_int; + + pub fn fallocate(fd: c_int, mode: c_int, offset: off_t, len: off_t) -> c_int; + pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; + pub fn readahead(fd: c_int, offset: off64_t, count: size_t) -> ssize_t; + pub fn signalfd(fd: c_int, mask: *const crate::sigset_t, flags: c_int) -> c_int; + pub fn timerfd_create(clockid: c_int, flags: c_int) -> c_int; + pub fn timerfd_gettime(fd: c_int, curr_value: *mut itimerspec) -> c_int; + pub fn timerfd_settime( + fd: c_int, + flags: c_int, + new_value: *const itimerspec, + old_value: *mut itimerspec, + ) -> c_int; + pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn quotactl(cmd: c_int, special: *const c_char, id: c_int, data: *mut c_char) -> c_int; + pub fn dup3(oldfd: c_int, newfd: c_int, flags: c_int) -> c_int; + pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; + pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; + pub fn sigtimedwait( + set: *const sigset_t, + info: *mut siginfo_t, + timeout: *const crate::timespec, + ) -> c_int; + pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; + pub fn nl_langinfo_l(item: crate::nl_item, locale: crate::locale_t) -> *mut c_char; + pub fn getnameinfo( + sa: *const crate::sockaddr, + salen: crate::socklen_t, + host: *mut c_char, + hostlen: crate::socklen_t, + serv: *mut c_char, + servlen: crate::socklen_t, + flags: c_int, + ) -> c_int; + pub fn reboot(how_to: c_int) -> c_int; + pub fn setfsgid(gid: crate::gid_t) -> c_int; + pub fn setfsuid(uid: crate::uid_t) -> c_int; + + // Not available now on Android + pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; + pub fn if_nameindex() -> *mut if_nameindex; + pub fn if_freenameindex(ptr: *mut if_nameindex); + pub fn sync_file_range(fd: c_int, offset: off64_t, nbytes: off64_t, flags: c_uint) -> c_int; + pub fn getifaddrs(ifap: *mut *mut crate::ifaddrs) -> c_int; + pub fn freeifaddrs(ifa: *mut crate::ifaddrs); + + pub fn glob( + pattern: *const c_char, + flags: c_int, + errfunc: Option c_int>, + pglob: *mut crate::glob_t, + ) -> c_int; + pub fn globfree(pglob: *mut crate::glob_t); + + pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + + pub fn shm_unlink(name: *const c_char) -> c_int; + + pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); + + pub fn telldir(dirp: *mut crate::DIR) -> c_long; + pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + + pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; + + pub fn recvfrom( + socket: c_int, + buf: *mut c_void, + len: size_t, + flags: c_int, + addr: *mut crate::sockaddr, + addrlen: *mut crate::socklen_t, + ) -> ssize_t; + pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; + pub fn futimes(fd: c_int, times: *const crate::timeval) -> c_int; + pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; + + pub fn bind( + socket: c_int, + address: *const crate::sockaddr, + address_len: crate::socklen_t, + ) -> c_int; + + pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + + pub fn sendmsg(fd: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; + pub fn recvmsg(fd: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; + pub fn getdomainname(name: *mut c_char, len: size_t) -> c_int; + pub fn setdomainname(name: *const c_char, len: size_t) -> c_int; + pub fn vhangup() -> c_int; + pub fn sendmmsg(sockfd: c_int, msgvec: *mut mmsghdr, vlen: c_uint, flags: c_int) -> c_int; + pub fn recvmmsg( + sockfd: c_int, + msgvec: *mut mmsghdr, + vlen: c_uint, + flags: c_int, + timeout: *mut crate::timespec, + ) -> c_int; + pub fn sync(); + pub fn syscall(num: c_long, ...) -> c_long; + pub fn sched_getaffinity( + pid: crate::pid_t, + cpusetsize: size_t, + cpuset: *mut cpu_set_t, + ) -> c_int; + pub fn sched_setaffinity( + pid: crate::pid_t, + cpusetsize: size_t, + cpuset: *const cpu_set_t, + ) -> c_int; + pub fn umount(target: *const c_char) -> c_int; + pub fn sched_get_priority_max(policy: c_int) -> c_int; + pub fn tee(fd_in: c_int, fd_out: c_int, len: size_t, flags: c_uint) -> ssize_t; + pub fn settimeofday(tv: *const crate::timeval, tz: *const crate::timezone) -> c_int; + pub fn splice( + fd_in: c_int, + off_in: *mut crate::loff_t, + fd_out: c_int, + off_out: *mut crate::loff_t, + len: size_t, + flags: c_uint, + ) -> ssize_t; + pub fn eventfd(init: c_uint, flags: c_int) -> c_int; + pub fn sched_rr_get_interval(pid: crate::pid_t, tp: *mut crate::timespec) -> c_int; + pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; + pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; + pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; + pub fn swapoff(puath: *const c_char) -> c_int; + pub fn vmsplice(fd: c_int, iov: *const crate::iovec, nr_segs: size_t, flags: c_uint) + -> ssize_t; + pub fn mount( + src: *const c_char, + target: *const c_char, + fstype: *const c_char, + flags: c_ulong, + data: *const c_void, + ) -> c_int; + pub fn personality(persona: c_ulong) -> c_int; + pub fn sched_getparam(pid: crate::pid_t, param: *mut crate::sched_param) -> c_int; + pub fn ppoll( + fds: *mut crate::pollfd, + nfds: nfds_t, + timeout: *const crate::timespec, + sigmask: *const sigset_t, + ) -> c_int; + pub fn pthread_mutex_timedlock( + lock: *mut pthread_mutex_t, + abstime: *const crate::timespec, + ) -> c_int; + pub fn clone( + cb: extern "C" fn(*mut c_void) -> c_int, + child_stack: *mut c_void, + flags: c_int, + arg: *mut c_void, + ... + ) -> c_int; + pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; + pub fn clock_nanosleep( + clk_id: crate::clockid_t, + flags: c_int, + rqtp: *const crate::timespec, + rmtp: *mut crate::timespec, + ) -> c_int; + pub fn pthread_attr_getguardsize( + attr: *const crate::pthread_attr_t, + guardsize: *mut size_t, + ) -> c_int; + pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; + pub fn sethostname(name: *const c_char, len: size_t) -> c_int; + pub fn sched_get_priority_min(policy: c_int) -> c_int; + pub fn umount2(target: *const c_char, flags: c_int) -> c_int; + pub fn swapon(path: *const c_char, swapflags: c_int) -> c_int; + pub fn sched_setscheduler( + pid: crate::pid_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; + pub fn getgrgid_r( + gid: crate::gid_t, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; + pub fn sem_close(sem: *mut sem_t) -> c_int; + pub fn getdtablesize() -> c_int; + pub fn getgrnam_r( + name: *const c_char, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn initgroups(user: *const c_char, group: crate::gid_t) -> c_int; + pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; + pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; + pub fn getgrnam(name: *const c_char) -> *mut crate::group; + pub fn pthread_cancel(thread: crate::pthread_t) -> c_int; + pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; + pub fn sem_unlink(name: *const c_char) -> c_int; + pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; + pub fn getpwnam_r( + name: *const c_char, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + pub fn getpwuid_r( + uid: crate::uid_t, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; + pub fn pthread_atfork( + prepare: Option, + parent: Option, + child: Option, + ) -> c_int; + pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; + + pub fn setgrent(); + pub fn endgrent(); + pub fn getgrent() -> *mut crate::group; + + pub fn getgrouplist( + user: *const c_char, + group: crate::gid_t, + groups: *mut crate::gid_t, + ngroups: *mut c_int, + ) -> c_int; + pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; + pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; + pub fn pthread_create( + native: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + f: extern "C" fn(*mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + pub fn dl_iterate_phdr( + callback: Option< + unsafe extern "C" fn( + info: *mut crate::dl_phdr_info, + size: size_t, + data: *mut c_void, + ) -> c_int, + >, + data: *mut c_void, + ) -> c_int; +} + +cfg_if! { + if #[cfg(target_arch = "aarch64")] { + mod aarch64; + pub use self::aarch64::*; + } else if #[cfg(any(target_arch = "x86_64"))] { + mod x86_64; + pub use self::x86_64::*; + } else if #[cfg(any(target_arch = "riscv64"))] { + mod riscv64; + pub use self::riscv64::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/fuchsia/riscv64.rs b/vendor/libc/src/fuchsia/riscv64.rs new file mode 100644 index 00000000000000..c57d52aad13867 --- /dev/null +++ b/vendor/libc/src/fuchsia/riscv64.rs @@ -0,0 +1,46 @@ +use crate::off_t; +use crate::prelude::*; + +// From psABI Calling Convention for RV64 +pub type __u64 = c_ulonglong; +pub type wchar_t = i32; + +pub type nlink_t = c_ulong; +pub type blksize_t = c_long; + +pub type stat64 = stat; +s! { + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + __pad0: c_int, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_long; 3], + } + + // Not actually used, IPC calls just return ENOSYS + pub struct ipc_perm { + pub __ipc_perm_key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + pub __seq: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } +} diff --git a/vendor/libc/src/fuchsia/x86_64.rs b/vendor/libc/src/fuchsia/x86_64.rs new file mode 100644 index 00000000000000..add60a45640204 --- /dev/null +++ b/vendor/libc/src/fuchsia/x86_64.rs @@ -0,0 +1,142 @@ +use crate::off_t; +use crate::prelude::*; + +pub type wchar_t = i32; +pub type nlink_t = u64; +pub type blksize_t = c_long; +pub type __u64 = c_ulonglong; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + __pad0: c_int, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_long; 3], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + __pad0: c_int, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __reserved: [c_long; 3], + } + + pub struct mcontext_t { + __private: [u64; 32], + } + + pub struct ipc_perm { + pub __ipc_perm_key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + pub __seq: c_int, + __unused1: c_long, + __unused2: c_long, + } +} + +s_no_extra_traits! { + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_mcontext: mcontext_t, + pub uc_sigmask: crate::sigset_t, + __private: [u8; 512], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for ucontext_t { + fn eq(&self, other: &ucontext_t) -> bool { + self.uc_flags == other.uc_flags + && self.uc_link == other.uc_link + && self.uc_stack == other.uc_stack + && self.uc_mcontext == other.uc_mcontext + && self.uc_sigmask == other.uc_sigmask + && self + .__private + .iter() + .zip(other.__private.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for ucontext_t {} + impl hash::Hash for ucontext_t { + fn hash(&self, state: &mut H) { + self.uc_flags.hash(state); + self.uc_link.hash(state); + self.uc_stack.hash(state); + self.uc_mcontext.hash(state); + self.uc_sigmask.hash(state); + self.__private.hash(state); + } + } + } +} + +// offsets in user_regs_structs, from sys/reg.h +pub const R15: c_int = 0; +pub const R14: c_int = 1; +pub const R13: c_int = 2; +pub const R12: c_int = 3; +pub const RBP: c_int = 4; +pub const RBX: c_int = 5; +pub const R11: c_int = 6; +pub const R10: c_int = 7; +pub const R9: c_int = 8; +pub const R8: c_int = 9; +pub const RAX: c_int = 10; +pub const RCX: c_int = 11; +pub const RDX: c_int = 12; +pub const RSI: c_int = 13; +pub const RDI: c_int = 14; +pub const ORIG_RAX: c_int = 15; +pub const RIP: c_int = 16; +pub const CS: c_int = 17; +pub const EFLAGS: c_int = 18; +pub const RSP: c_int = 19; +pub const SS: c_int = 20; +pub const FS_BASE: c_int = 21; +pub const GS_BASE: c_int = 22; +pub const DS: c_int = 23; +pub const ES: c_int = 24; +pub const FS: c_int = 25; +pub const GS: c_int = 26; + +pub const MAP_32BIT: c_int = 0x0040; + +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; diff --git a/vendor/libc/src/hermit.rs b/vendor/libc/src/hermit.rs new file mode 100644 index 00000000000000..b96be6b0e2a2fb --- /dev/null +++ b/vendor/libc/src/hermit.rs @@ -0,0 +1,561 @@ +//! Hermit C type definitions + +use crate::prelude::*; + +pub type intmax_t = i64; +pub type uintmax_t = u64; +pub type intptr_t = isize; +pub type uintptr_t = usize; + +pub type size_t = usize; +pub type ssize_t = isize; +pub type ptrdiff_t = isize; + +pub type clockid_t = i32; +pub type in_addr_t = u32; +pub type in_port_t = u16; +pub type mode_t = u32; +pub type nfds_t = usize; +pub type pid_t = i32; +pub type sa_family_t = u8; +pub type socklen_t = u32; +pub type time_t = i64; + +s! { + pub struct addrinfo { + pub ai_flags: i32, + pub ai_family: i32, + pub ai_socktype: i32, + pub ai_protocol: i32, + pub ai_addrlen: socklen_t, + pub ai_canonname: *mut c_char, + pub ai_addr: *mut sockaddr, + pub ai_next: *mut addrinfo, + } + + pub struct dirent64 { + pub d_ino: u64, + pub d_off: i64, + pub d_reclen: u16, + pub d_type: u8, + pub d_name: [c_char; 256], + } + + #[repr(align(4))] + pub struct in6_addr { + pub s6_addr: [u8; 16], + } + + pub struct in_addr { + pub s_addr: in_addr_t, + } + + pub struct iovec { + iov_base: *mut c_void, + iov_len: usize, + } + + pub struct pollfd { + pub fd: i32, + pub events: i16, + pub revents: i16, + } + + pub struct sockaddr { + pub sa_len: u8, + pub sa_family: sa_family_t, + pub sa_data: [c_char; 14], + } + + pub struct sockaddr_in { + pub sin_len: u8, + pub sin_family: sa_family_t, + pub sin_port: in_port_t, + pub sin_addr: in_addr, + pub sin_zero: [c_char; 8], + } + + pub struct sockaddr_in6 { + pub sin6_len: u8, + pub sin6_family: sa_family_t, + pub sin6_port: in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: in6_addr, + pub sin6_scope_id: u32, + } + + pub struct sockaddr_storage { + pub ss_len: u8, + pub ss_family: sa_family_t, + __ss_pad1: [u8; 6], + __ss_align: i64, + __ss_pad2: [u8; 112], + } + + pub struct stat { + pub st_dev: u64, + pub st_ino: u64, + pub st_nlink: u64, + pub st_mode: mode_t, + pub st_uid: u32, + pub st_gid: u32, + pub st_rdev: u64, + pub st_size: i64, + pub st_blksize: i64, + pub st_blocks: i64, + pub st_atim: timespec, + pub st_mtim: timespec, + pub st_ctim: timespec, + } + + pub struct timespec { + pub tv_sec: time_t, + pub tv_nsec: i32, + } +} + +pub const AF_UNSPEC: i32 = 0; +pub const AF_INET: i32 = 3; +pub const AF_INET6: i32 = 1; +pub const AF_VSOCK: i32 = 2; + +pub const CLOCK_REALTIME: clockid_t = 1; +pub const CLOCK_MONOTONIC: clockid_t = 4; + +pub const DT_UNKNOWN: u8 = 0; +pub const DT_FIFO: u8 = 1; +pub const DT_CHR: u8 = 2; +pub const DT_DIR: u8 = 4; +pub const DT_BLK: u8 = 6; +pub const DT_REG: u8 = 8; +pub const DT_LNK: u8 = 10; +pub const DT_SOCK: u8 = 12; +pub const DT_WHT: u8 = 14; + +pub const EAI_AGAIN: i32 = 2; +pub const EAI_BADFLAGS: i32 = 3; +pub const EAI_FAIL: i32 = 4; +pub const EAI_FAMILY: i32 = 5; +pub const EAI_MEMORY: i32 = 6; +pub const EAI_NODATA: i32 = 7; +pub const EAI_NONAME: i32 = 8; +pub const EAI_SERVICE: i32 = 9; +pub const EAI_SOCKTYPE: i32 = 10; +pub const EAI_SYSTEM: i32 = 11; +pub const EAI_OVERFLOW: i32 = 14; + +pub const EFD_SEMAPHORE: i16 = 0o1; +pub const EFD_NONBLOCK: i16 = 0o4000; +pub const EFD_CLOEXEC: i16 = 0o40000; + +pub const F_DUPFD: i32 = 0; +pub const F_GETFD: i32 = 1; +pub const F_SETFD: i32 = 2; +pub const F_GETFL: i32 = 3; +pub const F_SETFL: i32 = 4; + +pub const FD_CLOEXEC: i32 = 1; + +pub const FIONBIO: i32 = 0x8008667e; + +pub const FUTEX_RELATIVE_TIMEOUT: u32 = 1; + +pub const IP_TOS: i32 = 1; +pub const IP_TTL: i32 = 2; +pub const IP_ADD_MEMBERSHIP: i32 = 3; +pub const IP_DROP_MEMBERSHIP: i32 = 4; +pub const IP_MULTICAST_TTL: i32 = 5; +pub const IP_MULTICAST_LOOP: i32 = 7; + +pub const IPPROTO_IP: i32 = 0; +pub const IPPROTO_TCP: i32 = 6; +pub const IPPROTO_UDP: i32 = 17; +pub const IPPROTO_IPV6: i32 = 41; + +pub const IPV6_ADD_MEMBERSHIP: i32 = 12; +pub const IPV6_DROP_MEMBERSHIP: i32 = 13; +pub const IPV6_MULTICAST_LOOP: i32 = 19; +pub const IPV6_V6ONLY: i32 = 27; + +pub const MSG_PEEK: i32 = 1; + +pub const O_RDONLY: i32 = 0o0; +pub const O_WRONLY: i32 = 0o1; +pub const O_RDWR: i32 = 0o2; +pub const O_CREAT: i32 = 0o100; +pub const O_EXCL: i32 = 0o200; +pub const O_TRUNC: i32 = 0o1000; +pub const O_APPEND: i32 = 0o2000; +pub const O_NONBLOCK: i32 = 0o4000; +pub const O_DIRECTORY: i32 = 0o200000; + +pub const POLLIN: i16 = 0x1; +pub const POLLPRI: i16 = 0x2; +pub const POLLOUT: i16 = 0x4; +pub const POLLERR: i16 = 0x8; +pub const POLLHUP: i16 = 0x10; +pub const POLLNVAL: i16 = 0x20; +pub const POLLRDNORM: i16 = 0x040; +pub const POLLRDBAND: i16 = 0x080; +pub const POLLWRNORM: i16 = 0x0100; +pub const POLLWRBAND: i16 = 0x0200; +pub const POLLRDHUP: i16 = 0x2000; + +pub const S_IRWXU: mode_t = 0o0700; +pub const S_IRUSR: mode_t = 0o0400; +pub const S_IWUSR: mode_t = 0o0200; +pub const S_IXUSR: mode_t = 0o0100; +pub const S_IRWXG: mode_t = 0o0070; +pub const S_IRGRP: mode_t = 0o0040; +pub const S_IWGRP: mode_t = 0o0020; +pub const S_IXGRP: mode_t = 0o0010; +pub const S_IRWXO: mode_t = 0o0007; +pub const S_IROTH: mode_t = 0o0004; +pub const S_IWOTH: mode_t = 0o0002; +pub const S_IXOTH: mode_t = 0o0001; + +pub const S_IFMT: mode_t = 0o17_0000; +pub const S_IFSOCK: mode_t = 0o14_0000; +pub const S_IFLNK: mode_t = 0o12_0000; +pub const S_IFREG: mode_t = 0o10_0000; +pub const S_IFBLK: mode_t = 0o6_0000; +pub const S_IFDIR: mode_t = 0o4_0000; +pub const S_IFCHR: mode_t = 0o2_0000; +pub const S_IFIFO: mode_t = 0o1_0000; + +pub const SHUT_RD: i32 = 0; +pub const SHUT_WR: i32 = 1; +pub const SHUT_RDWR: i32 = 2; + +pub const SO_REUSEADDR: i32 = 0x0004; +pub const SO_KEEPALIVE: i32 = 0x0008; +pub const SO_BROADCAST: i32 = 0x0020; +pub const SO_LINGER: i32 = 0x0080; +pub const SO_SNDBUF: i32 = 0x1001; +pub const SO_RCVBUF: i32 = 0x1002; +pub const SO_SNDTIMEO: i32 = 0x1005; +pub const SO_RCVTIMEO: i32 = 0x1006; +pub const SO_ERROR: i32 = 0x1007; + +pub const SOCK_STREAM: i32 = 1; +pub const SOCK_DGRAM: i32 = 2; +pub const SOCK_NONBLOCK: i32 = 0o4000; +pub const SOCK_CLOEXEC: i32 = 0o40000; + +pub const SOL_SOCKET: i32 = 4095; + +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; + +pub const TCP_NODELAY: i32 = 1; + +pub const EPERM: i32 = 1; +pub const ENOENT: i32 = 2; +pub const ESRCH: i32 = 3; +pub const EINTR: i32 = 4; +pub const EIO: i32 = 5; +pub const ENXIO: i32 = 6; +pub const E2BIG: i32 = 7; +pub const ENOEXEC: i32 = 8; +pub const EBADF: i32 = 9; +pub const ECHILD: i32 = 10; +pub const EAGAIN: i32 = 11; +pub const ENOMEM: i32 = 12; +pub const EACCES: i32 = 13; +pub const EFAULT: i32 = 14; +pub const ENOTBLK: i32 = 15; +pub const EBUSY: i32 = 16; +pub const EEXIST: i32 = 17; +pub const EXDEV: i32 = 18; +pub const ENODEV: i32 = 19; +pub const ENOTDIR: i32 = 20; +pub const EISDIR: i32 = 21; +pub const EINVAL: i32 = 22; +pub const ENFILE: i32 = 23; +pub const EMFILE: i32 = 24; +pub const ENOTTY: i32 = 25; +pub const ETXTBSY: i32 = 26; +pub const EFBIG: i32 = 27; +pub const ENOSPC: i32 = 28; +pub const ESPIPE: i32 = 29; +pub const EROFS: i32 = 30; +pub const EMLINK: i32 = 31; +pub const EPIPE: i32 = 32; +pub const EDOM: i32 = 33; +pub const ERANGE: i32 = 34; +pub const EDEADLK: i32 = 35; +pub const ENAMETOOLONG: i32 = 36; +pub const ENOLCK: i32 = 37; +pub const ENOSYS: i32 = 38; +pub const ENOTEMPTY: i32 = 39; +pub const ELOOP: i32 = 40; +pub const EWOULDBLOCK: i32 = EAGAIN; +pub const ENOMSG: i32 = 42; +pub const EIDRM: i32 = 43; +pub const ECHRNG: i32 = 44; +pub const EL2NSYNC: i32 = 45; +pub const EL3HLT: i32 = 46; +pub const EL3RST: i32 = 47; +pub const ELNRNG: i32 = 48; +pub const EUNATCH: i32 = 49; +pub const ENOCSI: i32 = 50; +pub const EL2HLT: i32 = 51; +pub const EBADE: i32 = 52; +pub const EBADR: i32 = 53; +pub const EXFULL: i32 = 54; +pub const ENOANO: i32 = 55; +pub const EBADRQC: i32 = 56; +pub const EBADSLT: i32 = 57; +pub const EDEADLOCK: i32 = EDEADLK; +pub const EBFONT: i32 = 59; +pub const ENOSTR: i32 = 60; +pub const ENODATA: i32 = 61; +pub const ETIME: i32 = 62; +pub const ENOSR: i32 = 63; +pub const ENONET: i32 = 64; +pub const ENOPKG: i32 = 65; +pub const EREMOTE: i32 = 66; +pub const ENOLINK: i32 = 67; +pub const EADV: i32 = 68; +pub const ESRMNT: i32 = 69; +pub const ECOMM: i32 = 70; +pub const EPROTO: i32 = 71; +pub const EMULTIHOP: i32 = 72; +pub const EDOTDOT: i32 = 73; +pub const EBADMSG: i32 = 74; +pub const EOVERFLOW: i32 = 75; +pub const ENOTUNIQ: i32 = 76; +pub const EBADFD: i32 = 77; +pub const EREMCHG: i32 = 78; +pub const ELIBACC: i32 = 79; +pub const ELIBBAD: i32 = 80; +pub const ELIBSCN: i32 = 81; +pub const ELIBMAX: i32 = 82; +pub const ELIBEXEC: i32 = 83; +pub const EILSEQ: i32 = 84; +pub const ERESTART: i32 = 85; +pub const ESTRPIPE: i32 = 86; +pub const EUSERS: i32 = 87; +pub const ENOTSOCK: i32 = 88; +pub const EDESTADDRREQ: i32 = 89; +pub const EMSGSIZE: i32 = 90; +pub const EPROTOTYPE: i32 = 91; +pub const ENOPROTOOPT: i32 = 92; +pub const EPROTONOSUPPORT: i32 = 93; +pub const ESOCKTNOSUPPORT: i32 = 94; +pub const EOPNOTSUPP: i32 = 95; +pub const EPFNOSUPPORT: i32 = 96; +pub const EAFNOSUPPORT: i32 = 97; +pub const EADDRINUSE: i32 = 98; +pub const EADDRNOTAVAIL: i32 = 99; +pub const ENETDOWN: i32 = 100; +pub const ENETUNREACH: i32 = 101; +pub const ENETRESET: i32 = 102; +pub const ECONNABORTED: i32 = 103; +pub const ECONNRESET: i32 = 104; +pub const ENOBUFS: i32 = 105; +pub const EISCONN: i32 = 106; +pub const ENOTCONN: i32 = 107; +pub const ESHUTDOWN: i32 = 108; +pub const ETOOMANYREFS: i32 = 109; +pub const ETIMEDOUT: i32 = 110; +pub const ECONNREFUSED: i32 = 111; +pub const EHOSTDOWN: i32 = 112; +pub const EHOSTUNREACH: i32 = 113; +pub const EALREADY: i32 = 114; +pub const EINPROGRESS: i32 = 115; +pub const ESTALE: i32 = 116; +pub const EUCLEAN: i32 = 117; +pub const ENOTNAM: i32 = 118; +pub const ENAVAIL: i32 = 119; +pub const EISNAM: i32 = 120; +pub const EREMOTEIO: i32 = 121; +pub const EDQUOT: i32 = 122; +pub const ENOMEDIUM: i32 = 123; +pub const EMEDIUMTYPE: i32 = 124; +pub const ECANCELED: i32 = 125; +pub const ENOKEY: i32 = 126; +pub const EKEYEXPIRED: i32 = 127; +pub const EKEYREVOKED: i32 = 128; +pub const EKEYREJECTED: i32 = 129; +pub const EOWNERDEAD: i32 = 130; +pub const ENOTRECOVERABLE: i32 = 131; +pub const ERFKILL: i32 = 132; +pub const EHWPOISON: i32 = 133; + +extern "C" { + #[link_name = "sys_alloc"] + pub fn alloc(size: usize, align: usize) -> *mut u8; + + #[link_name = "sys_alloc_zeroed"] + pub fn alloc_zeroed(size: usize, align: usize) -> *mut u8; + + #[link_name = "sys_realloc"] + pub fn realloc(ptr: *mut u8, size: usize, align: usize, new_size: usize) -> *mut u8; + + #[link_name = "sys_dealloc"] + pub fn dealloc(ptr: *mut u8, size: usize, align: usize); + + #[link_name = "sys_exit"] + pub fn exit(status: i32) -> !; + + #[link_name = "sys_abort"] + pub fn abort() -> !; + + #[link_name = "sys_errno"] + pub fn errno() -> i32; + + #[link_name = "sys_clock_gettime"] + pub fn clock_gettime(clockid: clockid_t, tp: *mut timespec) -> i32; + + #[link_name = "sys_nanosleep"] + pub fn nanosleep(req: *const timespec) -> i32; + + #[link_name = "sys_available_parallelism"] + pub fn available_parallelism() -> usize; + + #[link_name = "sys_futex_wait"] + pub fn futex_wait( + address: *mut u32, + expected: u32, + timeout: *const timespec, + flags: u32, + ) -> i32; + + #[link_name = "sys_futex_wake"] + pub fn futex_wake(address: *mut u32, count: i32) -> i32; + + #[link_name = "sys_stat"] + pub fn stat(path: *const c_char, stat: *mut stat) -> i32; + + #[link_name = "sys_fstat"] + pub fn fstat(fd: i32, stat: *mut stat) -> i32; + + #[link_name = "sys_lstat"] + pub fn lstat(path: *const c_char, stat: *mut stat) -> i32; + + #[link_name = "sys_open"] + pub fn open(path: *const c_char, flags: i32, mode: mode_t) -> i32; + + #[link_name = "sys_unlink"] + pub fn unlink(path: *const c_char) -> i32; + + #[link_name = "sys_mkdir"] + pub fn mkdir(path: *const c_char, mode: mode_t) -> i32; + + #[link_name = "sys_rmdir"] + pub fn rmdir(path: *const c_char) -> i32; + + #[link_name = "sys_read"] + pub fn read(fd: i32, buf: *mut u8, len: usize) -> isize; + + #[link_name = "sys_write"] + pub fn write(fd: i32, buf: *const u8, len: usize) -> isize; + + #[link_name = "sys_readv"] + pub fn readv(fd: i32, iov: *const iovec, iovcnt: usize) -> isize; + + #[link_name = "sys_writev"] + pub fn writev(fd: i32, iov: *const iovec, iovcnt: usize) -> isize; + + #[link_name = "sys_close"] + pub fn close(fd: i32) -> i32; + + #[link_name = "sys_dup"] + pub fn dup(fd: i32) -> i32; + + #[link_name = "sys_fcntl"] + pub fn fcntl(fd: i32, cmd: i32, arg: i32) -> i32; + + #[link_name = "sys_getdents64"] + pub fn getdents64(fd: i32, dirp: *mut dirent64, count: usize) -> isize; + + #[link_name = "sys_getaddrinfo"] + pub fn getaddrinfo( + nodename: *const c_char, + servname: *const c_char, + hints: *const addrinfo, + res: *mut *mut addrinfo, + ) -> i32; + + #[link_name = "sys_freeaddrinfo"] + pub fn freeaddrinfo(ai: *mut addrinfo); + + #[link_name = "sys_socket"] + pub fn socket(domain: i32, ty: i32, protocol: i32) -> i32; + + #[link_name = "sys_bind"] + pub fn bind(sockfd: i32, addr: *const sockaddr, addrlen: socklen_t) -> i32; + + #[link_name = "sys_listen"] + pub fn listen(sockfd: i32, backlog: i32) -> i32; + + #[link_name = "sys_accept"] + pub fn accept(sockfd: i32, addr: *mut sockaddr, addrlen: *mut socklen_t) -> i32; + + #[link_name = "sys_connect"] + pub fn connect(sockfd: i32, addr: *const sockaddr, addrlen: socklen_t) -> i32; + + #[link_name = "sys_recv"] + pub fn recv(sockfd: i32, buf: *mut u8, len: usize, flags: i32) -> isize; + + #[link_name = "sys_recvfrom"] + pub fn recvfrom( + sockfd: i32, + buf: *mut c_void, + len: usize, + flags: i32, + addr: *mut sockaddr, + addrlen: *mut socklen_t, + ) -> isize; + + #[link_name = "sys_send"] + pub fn send(sockfd: i32, buf: *const c_void, len: usize, flags: i32) -> isize; + + #[link_name = "sys_sendto"] + pub fn sendto( + sockfd: i32, + buf: *const c_void, + len: usize, + flags: i32, + to: *const sockaddr, + tolen: socklen_t, + ) -> isize; + + #[link_name = "sys_getpeername"] + pub fn getpeername(sockfd: i32, addr: *mut sockaddr, addrlen: *mut socklen_t) -> i32; + + #[link_name = "sys_getsockname"] + pub fn getsockname(sockfd: i32, addr: *mut sockaddr, addrlen: *mut socklen_t) -> i32; + + #[link_name = "sys_getsockopt"] + pub fn getsockopt( + sockfd: i32, + level: i32, + optname: i32, + optval: *mut c_void, + optlen: *mut socklen_t, + ) -> i32; + + #[link_name = "sys_setsockopt"] + pub fn setsockopt( + sockfd: i32, + level: i32, + optname: i32, + optval: *const c_void, + optlen: socklen_t, + ) -> i32; + + #[link_name = "sys_ioctl"] + pub fn ioctl(sockfd: i32, cmd: i32, argp: *mut c_void) -> i32; + + #[link_name = "sys_shutdown"] + pub fn shutdown(sockfd: i32, how: i32) -> i32; + + #[link_name = "sys_eventfd"] + pub fn eventfd(initval: u64, flags: i16) -> i32; + + #[link_name = "sys_poll"] + pub fn poll(fds: *mut pollfd, nfds: nfds_t, timeout: i32) -> i32; +} diff --git a/vendor/libc/src/lib.rs b/vendor/libc/src/lib.rs new file mode 100644 index 00000000000000..aa919b5ca038e3 --- /dev/null +++ b/vendor/libc/src/lib.rs @@ -0,0 +1,159 @@ +//! libc - Raw FFI bindings to platforms' system libraries +#![crate_name = "libc"] +#![crate_type = "rlib"] +#![allow( + renamed_and_removed_lints, // Keep this order. + unknown_lints, // Keep this order. + nonstandard_style, + overflowing_literals, + unused_macros, + unused_macro_rules, +)] +#![warn( + missing_copy_implementations, + missing_debug_implementations, + safe_packed_borrows +)] +// Prepare for a future upgrade +#![warn(rust_2024_compatibility)] +// Things missing for 2024 that are blocked on MSRV or breakage +#![allow( + missing_unsafe_on_extern, + edition_2024_expr_fragment_specifier, + // Allowed globally, the warning is enabled in individual modules as we work through them + unsafe_op_in_unsafe_fn +)] +#![cfg_attr(libc_deny_warnings, deny(warnings))] +// Attributes needed when building as part of the standard library +#![cfg_attr(feature = "rustc-dep-of-std", feature(link_cfg, no_core))] +#![cfg_attr(libc_thread_local, feature(thread_local))] +#![cfg_attr(feature = "rustc-dep-of-std", allow(internal_features))] +// DIFF(1.0): The thread local references that raise this lint were removed in 1.0 +#![cfg_attr(feature = "rustc-dep-of-std", allow(static_mut_refs))] +#![cfg_attr(not(feature = "rustc-dep-of-std"), no_std)] +#![cfg_attr(feature = "rustc-dep-of-std", no_core)] + +#[macro_use] +mod macros; +mod new; + +cfg_if! { + if #[cfg(feature = "rustc-dep-of-std")] { + extern crate rustc_std_workspace_core as core; + } +} + +pub use core::ffi::c_void; + +#[allow(unused_imports)] // needed while the module is empty on some platforms +pub use new::*; + +cfg_if! { + if #[cfg(windows)] { + mod primitives; + pub use crate::primitives::*; + + mod windows; + pub use crate::windows::*; + + prelude!(); + } else if #[cfg(target_os = "fuchsia")] { + mod primitives; + pub use crate::primitives::*; + + mod fuchsia; + pub use crate::fuchsia::*; + + prelude!(); + } else if #[cfg(target_os = "switch")] { + mod primitives; + pub use primitives::*; + + mod switch; + pub use switch::*; + + prelude!(); + } else if #[cfg(target_os = "psp")] { + mod primitives; + pub use primitives::*; + + mod psp; + pub use crate::psp::*; + + prelude!(); + } else if #[cfg(target_os = "vxworks")] { + mod primitives; + pub use crate::primitives::*; + + mod vxworks; + pub use crate::vxworks::*; + + prelude!(); + } else if #[cfg(target_os = "solid_asp3")] { + mod primitives; + pub use crate::primitives::*; + + mod solid; + pub use crate::solid::*; + + prelude!(); + } else if #[cfg(unix)] { + mod primitives; + pub use crate::primitives::*; + + mod unix; + pub use crate::unix::*; + + prelude!(); + } else if #[cfg(target_os = "hermit")] { + mod primitives; + pub use crate::primitives::*; + + mod hermit; + pub use crate::hermit::*; + + prelude!(); + } else if #[cfg(target_os = "teeos")] { + mod primitives; + pub use primitives::*; + + mod teeos; + pub use teeos::*; + + prelude!(); + } else if #[cfg(target_os = "trusty")] { + mod primitives; + pub use crate::primitives::*; + + mod trusty; + pub use crate::trusty::*; + + prelude!(); + } else if #[cfg(all(target_env = "sgx", target_vendor = "fortanix"))] { + mod primitives; + pub use crate::primitives::*; + + mod sgx; + pub use crate::sgx::*; + + prelude!(); + } else if #[cfg(any(target_env = "wasi", target_os = "wasi"))] { + mod primitives; + pub use crate::primitives::*; + + mod wasi; + pub use crate::wasi::*; + + prelude!(); + } else if #[cfg(target_os = "xous")] { + mod primitives; + pub use crate::primitives::*; + + mod xous; + pub use crate::xous::*; + + prelude!(); + } else { + // non-supported targets: empty... + } +} diff --git a/vendor/libc/src/macros.rs b/vendor/libc/src/macros.rs new file mode 100644 index 00000000000000..6906da6bd70da6 --- /dev/null +++ b/vendor/libc/src/macros.rs @@ -0,0 +1,446 @@ +/// A macro for defining #[cfg] if-else statements. +/// +/// This is similar to the `if/elif` C preprocessor macro by allowing definition +/// of a cascade of `#[cfg]` cases, emitting the implementation which matches +/// first. +/// +/// This allows you to conveniently provide a long list #[cfg]'d blocks of code +/// without having to rewrite each clause multiple times. +macro_rules! cfg_if { + // match if/else chains with a final `else` + ($( + if #[cfg($($meta:meta),*)] { $($it:item)* } + ) else * else { + $($it2:item)* + }) => { + cfg_if! { + @__items + () ; + $( ( ($($meta),*) ($($it)*) ), )* + ( () ($($it2)*) ), + } + }; + + // match if/else chains lacking a final `else` + ( + if #[cfg($($i_met:meta),*)] { $($i_it:item)* } + $( + else if #[cfg($($e_met:meta),*)] { $($e_it:item)* } + )* + ) => { + cfg_if! { + @__items + () ; + ( ($($i_met),*) ($($i_it)*) ), + $( ( ($($e_met),*) ($($e_it)*) ), )* + ( () () ), + } + }; + + // Internal and recursive macro to emit all the items + // + // Collects all the negated `cfg`s in a list at the beginning and after the + // semicolon is all the remaining items + (@__items ($($not:meta,)*) ; ) => {}; + (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), + $($rest:tt)*) => { + // Emit all items within one block, applying an appropriate #[cfg]. The + // #[cfg] will require all `$m` matchers specified and must also negate + // all previous matchers. + cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* } + + // Recurse to emit all other items in `$rest`, and when we do so add all + // our `$m` matchers to the list of `$not` matchers as future emissions + // will have to negate everything we just matched as well. + cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* } + }; + + // Internal macro to Apply a cfg attribute to a list of items + (@__apply $m:meta, $($it:item)*) => { + $(#[$m] $it)* + }; +} + +/// Create an internal crate prelude with `core` reexports and common types. +macro_rules! prelude { + () => { + mod types; + + /// Frequently-used types that are available on all platforms + /// + /// We need to reexport the core types so this works with `rust-dep-of-std`. + mod prelude { + // Exports from `core` + #[allow(unused_imports)] + pub(crate) use core::clone::Clone; + #[allow(unused_imports)] + pub(crate) use core::default::Default; + #[allow(unused_imports)] + pub(crate) use core::marker::{Copy, Send, Sync}; + #[allow(unused_imports)] + pub(crate) use core::option::Option; + #[allow(unused_imports)] + pub(crate) use core::prelude::v1::derive; + #[allow(unused_imports)] + pub(crate) use core::{fmt, hash, iter, mem, ptr}; + + #[allow(unused_imports)] + pub(crate) use fmt::Debug; + #[allow(unused_imports)] + pub(crate) use mem::{align_of, align_of_val, size_of, size_of_val}; + + #[allow(unused_imports)] + pub(crate) use crate::types::{CEnumRepr, Padding}; + // Commonly used types defined in this crate + #[allow(unused_imports)] + pub(crate) use crate::{ + c_char, c_double, c_float, c_int, c_long, c_longlong, c_short, c_uchar, c_uint, + c_ulong, c_ulonglong, c_ushort, c_void, intptr_t, size_t, ssize_t, uintptr_t, + }; + } + }; +} + +/// Implement `Clone` and `Copy` for a struct, as well as `Debug`, `Eq`, `Hash`, and +/// `PartialEq` if the `extra_traits` feature is enabled. +/// +/// Use [`s_no_extra_traits`] for structs where the `extra_traits` feature does not +/// make sense, and for unions. +macro_rules! s { + ($( + $(#[$attr:meta])* + pub $t:ident $i:ident { $($field:tt)* } + )*) => ($( + s!(it: $(#[$attr])* pub $t $i { $($field)* }); + )*); + + (it: $(#[$attr:meta])* pub union $i:ident { $($field:tt)* }) => ( + compile_error!("unions cannot derive extra traits, use s_no_extra_traits instead"); + ); + + (it: $(#[$attr:meta])* pub struct $i:ident { $($field:tt)* }) => ( + __item! { + #[repr(C)] + #[cfg_attr( + feature = "extra_traits", + ::core::prelude::v1::derive(Eq, Hash, PartialEq) + )] + #[::core::prelude::v1::derive( + ::core::clone::Clone, + ::core::marker::Copy, + ::core::fmt::Debug, + )] + #[allow(deprecated)] + $(#[$attr])* + pub struct $i { $($field)* } + } + ); +} + +/// Implement `Clone` and `Copy` for a tuple struct, as well as `Debug`, `Eq`, `Hash`, +/// and `PartialEq` if the `extra_traits` feature is enabled. +/// +/// This is the same as [`s`] but works for tuple structs. +macro_rules! s_paren { + ($( + $(#[$attr:meta])* + pub struct $i:ident ( $($field:tt)* ); + )*) => ($( + __item! { + #[cfg_attr( + feature = "extra_traits", + ::core::prelude::v1::derive(Eq, Hash, PartialEq) + )] + #[::core::prelude::v1::derive( + ::core::clone::Clone, + ::core::marker::Copy, + ::core::fmt::Debug, + )] + $(#[$attr])* + pub struct $i ( $($field)* ); + } + )*); +} + +/// Implement `Clone`, `Copy`, and `Debug` since those can be derived, but exclude `PartialEq`, +/// `Eq`, and `Hash`. +/// +/// Most items will prefer to use [`s`]. +macro_rules! s_no_extra_traits { + ($( + $(#[$attr:meta])* + pub $t:ident $i:ident { $($field:tt)* } + )*) => ($( + s_no_extra_traits!(it: $(#[$attr])* pub $t $i { $($field)* }); + )*); + + (it: $(#[$attr:meta])* pub union $i:ident { $($field:tt)* }) => ( + __item! { + #[repr(C)] + #[::core::prelude::v1::derive(::core::clone::Clone, ::core::marker::Copy)] + $(#[$attr])* + pub union $i { $($field)* } + } + + impl ::core::fmt::Debug for $i { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + f.debug_struct(::core::stringify!($i)).finish_non_exhaustive() + } + } + ); + + (it: $(#[$attr:meta])* pub struct $i:ident { $($field:tt)* }) => ( + __item! { + #[repr(C)] + #[::core::prelude::v1::derive( + ::core::clone::Clone, + ::core::marker::Copy, + ::core::fmt::Debug, + )] + $(#[$attr])* + pub struct $i { $($field)* } + } + ); +} + +/// Specify that an enum should have no traits that aren't specified in the macro +/// invocation, i.e. no `Clone` or `Copy`. +macro_rules! missing { + ($( + $(#[$attr:meta])* + pub enum $i:ident {} + )*) => ($( + $(#[$attr])* + #[allow(missing_copy_implementations)] + pub enum $i { } + )*); +} + +/// Implement `Clone` and `Copy` for an enum, as well as `Debug`, `Eq`, `Hash`, and +/// `PartialEq` if the `extra_traits` feature is enabled. +// FIXME(#4419): Replace all uses of `e!` with `c_enum!` +macro_rules! e { + ($( + $(#[$attr:meta])* + pub enum $i:ident { $($field:tt)* } + )*) => ($( + __item! { + #[cfg_attr( + feature = "extra_traits", + ::core::prelude::v1::derive(Eq, Hash, PartialEq) + )] + #[::core::prelude::v1::derive( + ::core::clone::Clone, + ::core::marker::Copy, + ::core::fmt::Debug, + )] + $(#[$attr])* + pub enum $i { $($field)* } + } + )*); +} + +/// Represent a C enum as Rust constants and a type. +/// +/// C enums can't soundly be mapped to Rust enums since C enums are allowed to have duplicates or +/// unlisted values, but this is UB in Rust. This enum doesn't implement any traits, its main +/// purpose is to calculate the correct enum values. +/// +/// See for more. +macro_rules! c_enum { + ($( + $(#[repr($repr:ty)])? + pub enum $ty_name:ident { + $($variant:ident $(= $value:expr)?,)+ + } + )+) => { + $(c_enum!(@expand; + $(#[repr($repr)])? + pub enum $ty_name { + $($variant $(= $value)?,)+ + } + );)+ + }; + + (@expand; + $(#[repr($repr:ty)])? + pub enum $ty_name:ident { + $($variant:ident $(= $value:expr)?,)+ + } + ) => { + pub type $ty_name = c_enum!(@ty $($repr)?); + c_enum!(@one; $ty_name; 0; $($variant $(= $value)?,)+); + }; + + // Matcher for a single variant + (@one; $_ty_name:ident; $_idx:expr;) => {}; + ( + @one; $ty_name:ident; $default_val:expr; + $variant:ident $(= $value:expr)?, + $($tail:tt)* + ) => { + pub const $variant: $ty_name = { + #[allow(unused_variables)] + let r = $default_val; + $(let r = $value;)? + r + }; + + // The next value is always one more than the previous value, unless + // set explicitly. + c_enum!(@one; $ty_name; $variant + 1; $($tail)*); + }; + + // Use a specific type if provided, otherwise default to `CEnumRepr` + (@ty $repr:ty) => { $repr }; + (@ty) => { $crate::prelude::CEnumRepr }; +} + +/// Define a `unsafe` function. +macro_rules! f { + ($( + $(#[$attr:meta])* + // Less than ideal hack to match either `fn` or `const fn`. + pub $(fn $i:ident)? $(const fn $const_i:ident)? + ($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty + $body:block + )+) => {$( + #[inline] + $(#[$attr])* + pub $(unsafe extern "C" fn $i)? $(const unsafe extern "C" fn $const_i)? + ($($arg: $argty),*) -> $ret + $body + )+}; +} + +/// Define a safe function. +macro_rules! safe_f { + ($( + $(#[$attr:meta])* + // Less than ideal hack to match either `fn` or `const fn`. + pub $(fn $i:ident)? $(const fn $const_i:ident)? + ($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty + $body:block + )+) => {$( + #[inline] + $(#[$attr])* + pub $(extern "C" fn $i)? $(const extern "C" fn $const_i)? + ($($arg: $argty),*) -> $ret + $body + )+}; +} + +macro_rules! __item { + ($i:item) => { + $i + }; +} + +// This macro is used to deprecate items that should be accessed via the mach2 crate +macro_rules! deprecated_mach { + (pub const $id:ident: $ty:ty = $expr:expr;) => { + #[deprecated( + since = "0.2.55", + note = "Use the `mach2` crate instead", + )] + #[allow(deprecated)] + pub const $id: $ty = $expr; + }; + ($(pub const $id:ident: $ty:ty = $expr:expr;)*) => { + $( + deprecated_mach!( + pub const $id: $ty = $expr; + ); + )* + }; + (pub type $id:ident = $ty:ty;) => { + #[deprecated( + since = "0.2.55", + note = "Use the `mach2` crate instead", + )] + #[allow(deprecated)] + pub type $id = $ty; + }; + ($(pub type $id:ident = $ty:ty;)*) => { + $( + deprecated_mach!( + pub type $id = $ty; + ); + )* + } +} + +#[cfg(test)] +mod tests { + use crate::types::CEnumRepr; + + #[test] + fn c_enumbasic() { + // By default, variants get sequential values. + c_enum! { + pub enum e { + VAR0, + VAR1, + VAR2, + } + } + + assert_eq!(VAR0, 0 as CEnumRepr); + assert_eq!(VAR1, 1 as CEnumRepr); + assert_eq!(VAR2, 2 as CEnumRepr); + } + + #[test] + fn c_enumrepr() { + // By default, variants get sequential values. + c_enum! { + #[repr(u16)] + pub enum e { + VAR0, + } + } + + assert_eq!(VAR0, 0_u16); + } + + #[test] + fn c_enumset_value() { + // Setting an explicit value resets the count. + c_enum! { + pub enum e { + VAR2 = 2, + VAR3, + VAR4, + } + } + + assert_eq!(VAR2, 2 as CEnumRepr); + assert_eq!(VAR3, 3 as CEnumRepr); + assert_eq!(VAR4, 4 as CEnumRepr); + } + + #[test] + fn c_enummultiple_set_value() { + // C enums always take one more than the previous value, unless set to a specific + // value. Duplicates are allowed. + c_enum! { + pub enum e { + VAR0, + VAR2_0 = 2, + VAR3_0, + VAR4_0, + VAR2_1 = 2, + VAR3_1, + VAR4_1, + } + } + + assert_eq!(VAR0, 0 as CEnumRepr); + assert_eq!(VAR2_0, 2 as CEnumRepr); + assert_eq!(VAR3_0, 3 as CEnumRepr); + assert_eq!(VAR4_0, 4 as CEnumRepr); + assert_eq!(VAR2_1, 2 as CEnumRepr); + assert_eq!(VAR3_1, 3 as CEnumRepr); + assert_eq!(VAR4_1, 4 as CEnumRepr); + } +} diff --git a/vendor/libc/src/new/bionic/mod.rs b/vendor/libc/src/new/bionic/mod.rs new file mode 100644 index 00000000000000..644a4ab96d90fc --- /dev/null +++ b/vendor/libc/src/new/bionic/mod.rs @@ -0,0 +1,2 @@ +mod sys; +pub use sys::*; diff --git a/vendor/libc/src/new/bionic/sys/mod.rs b/vendor/libc/src/new/bionic/sys/mod.rs new file mode 100644 index 00000000000000..fd96d0821ac88c --- /dev/null +++ b/vendor/libc/src/new/bionic/sys/mod.rs @@ -0,0 +1,2 @@ +mod socket; +pub use socket::*; diff --git a/vendor/libc/src/new/bionic/sys/socket.rs b/vendor/libc/src/new/bionic/sys/socket.rs new file mode 100644 index 00000000000000..49af36fe93356c --- /dev/null +++ b/vendor/libc/src/new/bionic/sys/socket.rs @@ -0,0 +1,51 @@ +//! Header: `bionic/libc/include/sys/socket.h` + +use crate::prelude::*; + +s! { + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: size_t, + pub msg_control: *mut c_void, + pub msg_controllen: size_t, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + pub cmsg_len: size_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct ucred { + pub pid: crate::pid_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + } +} + +extern "C" { + pub fn recvmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: c_uint, + flags: c_int, + timeout: *const crate::timespec, + ) -> c_int; + pub fn sendmmsg( + sockfd: c_int, + msgvec: *const crate::mmsghdr, + vlen: c_uint, + flags: c_int, + ) -> c_int; + pub fn recvfrom( + socket: c_int, + buf: *mut c_void, + len: size_t, + flags: c_int, + addr: *mut crate::sockaddr, + addrlen: *mut crate::socklen_t, + ) -> ssize_t; +} diff --git a/vendor/libc/src/new/linux_uapi/linux/can.rs b/vendor/libc/src/new/linux_uapi/linux/can.rs new file mode 100644 index 00000000000000..b9479a63bacea9 --- /dev/null +++ b/vendor/libc/src/new/linux_uapi/linux/can.rs @@ -0,0 +1,136 @@ +//! Header: `uapi/linux/can.h` + +pub(crate) mod j1939; +pub(crate) mod raw; + +pub use j1939::*; +pub use raw::*; + +use crate::prelude::*; + +pub const CAN_EFF_FLAG: canid_t = 0x80000000; +pub const CAN_RTR_FLAG: canid_t = 0x40000000; +pub const CAN_ERR_FLAG: canid_t = 0x20000000; + +pub const CAN_SFF_MASK: canid_t = 0x000007FF; +pub const CAN_EFF_MASK: canid_t = 0x1FFFFFFF; +pub const CAN_ERR_MASK: canid_t = 0x1FFFFFFF; +pub const CANXL_PRIO_MASK: crate::canid_t = CAN_SFF_MASK; + +pub type canid_t = u32; + +pub const CAN_SFF_ID_BITS: c_int = 11; +pub const CAN_EFF_ID_BITS: c_int = 29; +pub const CANXL_PRIO_BITS: c_int = CAN_SFF_ID_BITS; + +pub type can_err_mask_t = u32; + +pub const CAN_MAX_DLC: c_int = 8; +pub const CAN_MAX_DLEN: usize = 8; + +pub const CANFD_MAX_DLC: c_int = 15; +pub const CANFD_MAX_DLEN: usize = 64; + +pub const CANXL_MIN_DLC: c_int = 0; +pub const CANXL_MAX_DLC: c_int = 2047; +pub const CANXL_MAX_DLC_MASK: c_int = 0x07FF; +pub const CANXL_MIN_DLEN: usize = 1; +pub const CANXL_MAX_DLEN: usize = 2048; + +s! { + #[repr(align(8))] + pub struct can_frame { + pub can_id: canid_t, + // FIXME(1.0): this field was renamed to `len` in Linux 5.11 + pub can_dlc: u8, + __pad: u8, + __res0: u8, + pub len8_dlc: u8, + pub data: [u8; CAN_MAX_DLEN], + } +} + +pub const CANFD_BRS: c_int = 0x01; +pub const CANFD_ESI: c_int = 0x02; +pub const CANFD_FDF: c_int = 0x04; + +s! { + #[repr(align(8))] + pub struct canfd_frame { + pub can_id: canid_t, + pub len: u8, + pub flags: u8, + __res0: u8, + __res1: u8, + pub data: [u8; CANFD_MAX_DLEN], + } +} + +pub const CANXL_XLF: c_int = 0x80; +pub const CANXL_SEC: c_int = 0x01; + +s! { + #[repr(align(8))] + pub struct canxl_frame { + pub prio: canid_t, + pub flags: u8, + pub sdt: u8, + pub len: u16, + pub af: u32, + pub data: [u8; CANXL_MAX_DLEN], + } +} + +pub const CAN_MTU: usize = size_of::(); +pub const CANFD_MTU: usize = size_of::(); +pub const CANXL_MTU: usize = size_of::(); +// FIXME(offset_of): use `core::mem::offset_of!` once that is available +// https://github.com/rust-lang/rfcs/pull/3308 +// pub const CANXL_HDR_SIZE: usize = core::mem::offset_of!(canxl_frame, data); +pub const CANXL_HDR_SIZE: usize = 12; +pub const CANXL_MIN_MTU: usize = CANXL_HDR_SIZE + 64; +pub const CANXL_MAX_MTU: usize = CANXL_MTU; + +pub const CAN_RAW: c_int = 1; +pub const CAN_BCM: c_int = 2; +pub const CAN_TP16: c_int = 3; +pub const CAN_TP20: c_int = 4; +pub const CAN_MCNET: c_int = 5; +pub const CAN_ISOTP: c_int = 6; +pub const CAN_J1939: c_int = 7; +pub const CAN_NPROTO: c_int = 8; + +pub const SOL_CAN_BASE: c_int = 100; + +s_no_extra_traits! { + pub struct sockaddr_can { + pub can_family: crate::sa_family_t, + pub can_ifindex: c_int, + pub can_addr: __c_anonymous_sockaddr_can_can_addr, + } + + pub union __c_anonymous_sockaddr_can_can_addr { + pub tp: __c_anonymous_sockaddr_can_tp, + pub j1939: __c_anonymous_sockaddr_can_j1939, + } +} + +s! { + pub struct __c_anonymous_sockaddr_can_tp { + pub rx_id: canid_t, + pub tx_id: canid_t, + } + + pub struct __c_anonymous_sockaddr_can_j1939 { + pub name: u64, + pub pgn: u32, + pub addr: u8, + } + + pub struct can_filter { + pub can_id: canid_t, + pub can_mask: canid_t, + } +} + +pub const CAN_INV_FILTER: canid_t = 0x20000000; diff --git a/vendor/libc/src/new/linux_uapi/linux/can/j1939.rs b/vendor/libc/src/new/linux_uapi/linux/can/j1939.rs new file mode 100644 index 00000000000000..fdf425ce6c0c1b --- /dev/null +++ b/vendor/libc/src/new/linux_uapi/linux/can/j1939.rs @@ -0,0 +1,60 @@ +//! `linux/can/j1939.h` + +pub use crate::linux::can::*; + +pub const J1939_MAX_UNICAST_ADDR: c_uchar = 0xfd; +pub const J1939_IDLE_ADDR: c_uchar = 0xfe; +pub const J1939_NO_ADDR: c_uchar = 0xff; +pub const J1939_NO_NAME: c_ulong = 0; +pub const J1939_PGN_REQUEST: c_uint = 0x0ea00; +pub const J1939_PGN_ADDRESS_CLAIMED: c_uint = 0x0ee00; +pub const J1939_PGN_ADDRESS_COMMANDED: c_uint = 0x0fed8; +pub const J1939_PGN_PDU1_MAX: c_uint = 0x3ff00; +pub const J1939_PGN_MAX: c_uint = 0x3ffff; +pub const J1939_NO_PGN: c_uint = 0x40000; + +pub type pgn_t = u32; +pub type priority_t = u8; +pub type name_t = u64; + +pub const SOL_CAN_J1939: c_int = SOL_CAN_BASE + CAN_J1939; + +// FIXME(cleanup): these could use c_enum if it can accept anonymous enums. + +pub const SO_J1939_FILTER: c_int = 1; +pub const SO_J1939_PROMISC: c_int = 2; +pub const SO_J1939_SEND_PRIO: c_int = 3; +pub const SO_J1939_ERRQUEUE: c_int = 4; + +pub const SCM_J1939_DEST_ADDR: c_int = 1; +pub const SCM_J1939_DEST_NAME: c_int = 2; +pub const SCM_J1939_PRIO: c_int = 3; +pub const SCM_J1939_ERRQUEUE: c_int = 4; + +pub const J1939_NLA_PAD: c_int = 0; +pub const J1939_NLA_BYTES_ACKED: c_int = 1; +pub const J1939_NLA_TOTAL_SIZE: c_int = 2; +pub const J1939_NLA_PGN: c_int = 3; +pub const J1939_NLA_SRC_NAME: c_int = 4; +pub const J1939_NLA_DEST_NAME: c_int = 5; +pub const J1939_NLA_SRC_ADDR: c_int = 6; +pub const J1939_NLA_DEST_ADDR: c_int = 7; + +pub const J1939_EE_INFO_NONE: c_int = 0; +pub const J1939_EE_INFO_TX_ABORT: c_int = 1; +pub const J1939_EE_INFO_RX_RTS: c_int = 2; +pub const J1939_EE_INFO_RX_DPO: c_int = 3; +pub const J1939_EE_INFO_RX_ABORT: c_int = 4; + +s! { + pub struct j1939_filter { + pub name: name_t, + pub name_mask: name_t, + pub pgn: pgn_t, + pub pgn_mask: pgn_t, + pub addr: u8, + pub addr_mask: u8, + } +} + +pub const J1939_FILTER_MAX: c_int = 512; diff --git a/vendor/libc/src/new/linux_uapi/linux/can/raw.rs b/vendor/libc/src/new/linux_uapi/linux/can/raw.rs new file mode 100644 index 00000000000000..1f92a13edbba69 --- /dev/null +++ b/vendor/libc/src/new/linux_uapi/linux/can/raw.rs @@ -0,0 +1,15 @@ +//! `linux/can/raw.h` + +pub use crate::linux::can::*; + +pub const SOL_CAN_RAW: c_int = SOL_CAN_BASE + CAN_RAW; +pub const CAN_RAW_FILTER_MAX: c_int = 512; + +// FIXME(cleanup): use `c_enum!`, which needs to be adapted to allow omitting a type. +pub const CAN_RAW_FILTER: c_int = 1; +pub const CAN_RAW_ERR_FILTER: c_int = 2; +pub const CAN_RAW_LOOPBACK: c_int = 3; +pub const CAN_RAW_RECV_OWN_MSGS: c_int = 4; +pub const CAN_RAW_FD_FRAMES: c_int = 5; +pub const CAN_RAW_JOIN_FILTERS: c_int = 6; +pub const CAN_RAW_XL_FRAMES: c_int = 7; diff --git a/vendor/libc/src/new/linux_uapi/linux/mod.rs b/vendor/libc/src/new/linux_uapi/linux/mod.rs new file mode 100644 index 00000000000000..4a9c04d6396b1a --- /dev/null +++ b/vendor/libc/src/new/linux_uapi/linux/mod.rs @@ -0,0 +1,4 @@ +//! The `linux` directory within `include/uapi` in the Linux source tree. + +pub(crate) mod can; +pub use can::*; diff --git a/vendor/libc/src/new/linux_uapi/mod.rs b/vendor/libc/src/new/linux_uapi/mod.rs new file mode 100644 index 00000000000000..e0d4e094c435f1 --- /dev/null +++ b/vendor/libc/src/new/linux_uapi/mod.rs @@ -0,0 +1,4 @@ +//! This directory maps to `include/uapi` in the Linux source tree. + +pub(crate) mod linux; +pub use linux::*; diff --git a/vendor/libc/src/new/mod.rs b/vendor/libc/src/new/mod.rs new file mode 100644 index 00000000000000..0a2a55b0f469bb --- /dev/null +++ b/vendor/libc/src/new/mod.rs @@ -0,0 +1,15 @@ +//! This module contains the future directory structure. If possible, new definitions should +//! get added here. +//! +//! Eventually everything should be moved over, and we will move this directory to the top +//! level in `src`. + +cfg_if! { + if #[cfg(target_os = "linux")] { + mod linux_uapi; + pub use linux_uapi::*; + } else if #[cfg(target_os = "android")] { + mod bionic; + pub use bionic::*; + } +} diff --git a/vendor/libc/src/primitives.rs b/vendor/libc/src/primitives.rs new file mode 100644 index 00000000000000..80a10af4c85462 --- /dev/null +++ b/vendor/libc/src/primitives.rs @@ -0,0 +1,95 @@ +//! This module contains type aliases for C's platform-specific types +//! and fixed-width integer types. +//! +//! The platform-specific types definitions were taken from rust-lang/rust in +//! library/core/src/ffi/primitives.rs +//! +//! The fixed-width integer aliases are deprecated: use the Rust types instead. + +pub type c_schar = i8; +pub type c_uchar = u8; +pub type c_short = i16; +pub type c_ushort = u16; + +pub type c_longlong = i64; +pub type c_ulonglong = u64; + +pub type c_float = f32; +pub type c_double = f64; + +cfg_if! { + if #[cfg(all( + not(windows), + not(target_vendor = "apple"), + not(target_os = "vita"), + any( + target_arch = "aarch64", + target_arch = "arm", + target_arch = "csky", + target_arch = "hexagon", + target_arch = "msp430", + target_arch = "powerpc", + target_arch = "powerpc64", + target_arch = "riscv32", + target_arch = "riscv64", + target_arch = "s390x", + target_arch = "xtensa", + ) + ))] { + pub type c_char = u8; + } else { + // On every other target, c_char is signed. + pub type c_char = i8; + } +} + +cfg_if! { + if #[cfg(any(target_arch = "avr", target_arch = "msp430"))] { + pub type c_int = i16; + pub type c_uint = u16; + } else { + pub type c_int = i32; + pub type c_uint = u32; + } +} + +cfg_if! { + if #[cfg(all(target_pointer_width = "64", not(windows)))] { + pub type c_long = i64; + pub type c_ulong = u64; + } else { + // The minimal size of `long` in the C standard is 32 bits + pub type c_long = i32; + pub type c_ulong = u32; + } +} + +#[deprecated(since = "0.2.55", note = "Use i8 instead.")] +pub type int8_t = i8; +#[deprecated(since = "0.2.55", note = "Use i16 instead.")] +pub type int16_t = i16; +#[deprecated(since = "0.2.55", note = "Use i32 instead.")] +pub type int32_t = i32; +#[deprecated(since = "0.2.55", note = "Use i64 instead.")] +pub type int64_t = i64; +#[deprecated(since = "0.2.55", note = "Use u8 instead.")] +pub type uint8_t = u8; +#[deprecated(since = "0.2.55", note = "Use u16 instead.")] +pub type uint16_t = u16; +#[deprecated(since = "0.2.55", note = "Use u32 instead.")] +pub type uint32_t = u32; +#[deprecated(since = "0.2.55", note = "Use u64 instead.")] +pub type uint64_t = u64; + +cfg_if! { + if #[cfg(all(target_arch = "aarch64", not(target_os = "windows")))] { + /// C `__int128` (a GCC extension that's part of many ABIs) + pub type __int128 = i128; + /// C `unsigned __int128` (a GCC extension that's part of many ABIs) + pub type __uint128 = u128; + /// C __int128_t (alternate name for [__int128][]) + pub type __int128_t = i128; + /// C __uint128_t (alternate name for [__uint128][]) + pub type __uint128_t = u128; + } +} diff --git a/vendor/libc/src/psp.rs b/vendor/libc/src/psp.rs new file mode 100644 index 00000000000000..823567127c4019 --- /dev/null +++ b/vendor/libc/src/psp.rs @@ -0,0 +1,4131 @@ +//! PSP C type definitions +//! +//! These type declarations are not enough, as they must be ultimately resolved +//! by the linker. Crates that use these definitions must, somewhere in the +//! crate graph, include a stub provider crate such as the `psp` crate. + +use crate::prelude::*; + +pub type intmax_t = i64; +pub type uintmax_t = u64; + +pub type size_t = usize; +pub type ptrdiff_t = isize; +pub type intptr_t = isize; +pub type uintptr_t = usize; +pub type ssize_t = isize; + +pub type SceKernelVTimerHandler = unsafe extern "C" fn( + uid: SceUid, + arg1: *mut SceKernelSysClock, + arg2: *mut SceKernelSysClock, + arg3: *mut c_void, +) -> u32; + +pub type SceKernelVTimerHandlerWide = + unsafe extern "C" fn(uid: SceUid, arg1: i64, arg2: i64, arg3: *mut c_void) -> u32; + +pub type SceKernelThreadEventHandler = + unsafe extern "C" fn(mask: i32, thid: SceUid, common: *mut c_void) -> i32; + +pub type SceKernelAlarmHandler = unsafe extern "C" fn(common: *mut c_void) -> u32; + +pub type SceKernelCallbackFunction = + unsafe extern "C" fn(arg1: i32, arg2: i32, arg: *mut c_void) -> i32; + +pub type SceKernelThreadEntry = unsafe extern "C" fn(args: usize, argp: *mut c_void) -> i32; + +pub type PowerCallback = extern "C" fn(unknown: i32, power_info: i32); + +pub type IoPermissions = i32; + +pub type UmdCallback = fn(unknown: i32, event: i32) -> i32; + +pub type SceMpegRingbufferCb = + Option i32>; + +pub type GuCallback = Option; +pub type GuSwapBuffersCallback = + Option; + +pub type SceNetAdhocctlHandler = + Option; + +pub type AdhocMatchingCallback = Option< + unsafe extern "C" fn( + matching_id: i32, + event: i32, + mac: *mut u8, + opt_len: i32, + opt_data: *mut c_void, + ), +>; + +pub type SceNetApctlHandler = Option< + unsafe extern "C" fn(oldState: i32, newState: i32, event: i32, error: i32, pArg: *mut c_void), +>; + +pub type HttpMallocFunction = Option *mut c_void>; +pub type HttpReallocFunction = + Option *mut c_void>; +pub type HttpFreeFunction = Option; +pub type HttpPasswordCB = Option< + unsafe extern "C" fn( + request: i32, + auth_type: HttpAuthType, + realm: *const u8, + username: *mut u8, + password: *mut u8, + need_entity: i32, + entity_body: *mut *mut u8, + entity_size: *mut usize, + save: *mut i32, + ) -> i32, +>; + +pub type socklen_t = u32; + +e! { + #[repr(u32)] + pub enum AudioFormat { + Stereo = 0, + Mono = 0x10, + } + + #[repr(u32)] + pub enum DisplayMode { + Lcd = 0, + } + + #[repr(u32)] + pub enum DisplayPixelFormat { + Psm5650 = 0, + Psm5551 = 1, + Psm4444 = 2, + Psm8888 = 3, + } + + #[repr(u32)] + pub enum DisplaySetBufSync { + Immediate = 0, + NextFrame = 1, + } + + #[repr(i32)] + pub enum AudioOutputFrequency { + Khz48 = 48000, + Khz44_1 = 44100, + Khz32 = 32000, + Khz24 = 24000, + Khz22_05 = 22050, + Khz16 = 16000, + Khz12 = 12000, + Khz11_025 = 11025, + Khz8 = 8000, + } + + #[repr(i32)] + pub enum AudioInputFrequency { + Khz44_1 = 44100, + Khz22_05 = 22050, + Khz11_025 = 11025, + } + + #[repr(u32)] + pub enum CtrlMode { + Digital = 0, + Analog, + } + + #[repr(i32)] + pub enum GeMatrixType { + Bone0 = 0, + Bone1, + Bone2, + Bone3, + Bone4, + Bone5, + Bone6, + Bone7, + World, + View, + Projection, + TexGen, + } + + #[repr(i32)] + pub enum GeListState { + Done = 0, + Queued, + DrawingDone, + StallReached, + CancelDone, + } + + #[repr(u8)] + pub enum GeCommand { + Nop = 0, + Vaddr = 0x1, + Iaddr = 0x2, + Prim = 0x4, + Bezier = 0x5, + Spline = 0x6, + BoundingBox = 0x7, + Jump = 0x8, + BJump = 0x9, + Call = 0xa, + Ret = 0xb, + End = 0xc, + Signal = 0xe, + Finish = 0xf, + Base = 0x10, + VertexType = 0x12, + OffsetAddr = 0x13, + Origin = 0x14, + Region1 = 0x15, + Region2 = 0x16, + LightingEnable = 0x17, + LightEnable0 = 0x18, + LightEnable1 = 0x19, + LightEnable2 = 0x1a, + LightEnable3 = 0x1b, + DepthClampEnable = 0x1c, + CullFaceEnable = 0x1d, + TextureMapEnable = 0x1e, + FogEnable = 0x1f, + DitherEnable = 0x20, + AlphaBlendEnable = 0x21, + AlphaTestEnable = 0x22, + ZTestEnable = 0x23, + StencilTestEnable = 0x24, + AntiAliasEnable = 0x25, + PatchCullEnable = 0x26, + ColorTestEnable = 0x27, + LogicOpEnable = 0x28, + BoneMatrixNumber = 0x2a, + BoneMatrixData = 0x2b, + MorphWeight0 = 0x2c, + MorphWeight1 = 0x2d, + MorphWeight2 = 0x2e, + MorphWeight3 = 0x2f, + MorphWeight4 = 0x30, + MorphWeight5 = 0x31, + MorphWeight6 = 0x32, + MorphWeight7 = 0x33, + PatchDivision = 0x36, + PatchPrimitive = 0x37, + PatchFacing = 0x38, + WorldMatrixNumber = 0x3a, + WorldMatrixData = 0x3b, + ViewMatrixNumber = 0x3c, + ViewMatrixData = 0x3d, + ProjMatrixNumber = 0x3e, + ProjMatrixData = 0x3f, + TGenMatrixNumber = 0x40, + TGenMatrixData = 0x41, + ViewportXScale = 0x42, + ViewportYScale = 0x43, + ViewportZScale = 0x44, + ViewportXCenter = 0x45, + ViewportYCenter = 0x46, + ViewportZCenter = 0x47, + TexScaleU = 0x48, + TexScaleV = 0x49, + TexOffsetU = 0x4a, + TexOffsetV = 0x4b, + OffsetX = 0x4c, + OffsetY = 0x4d, + ShadeMode = 0x50, + ReverseNormal = 0x51, + MaterialUpdate = 0x53, + MaterialEmissive = 0x54, + MaterialAmbient = 0x55, + MaterialDiffuse = 0x56, + MaterialSpecular = 0x57, + MaterialAlpha = 0x58, + MaterialSpecularCoef = 0x5b, + AmbientColor = 0x5c, + AmbientAlpha = 0x5d, + LightMode = 0x5e, + LightType0 = 0x5f, + LightType1 = 0x60, + LightType2 = 0x61, + LightType3 = 0x62, + Light0X = 0x63, + Light0Y, + Light0Z, + Light1X, + Light1Y, + Light1Z, + Light2X, + Light2Y, + Light2Z, + Light3X, + Light3Y, + Light3Z, + Light0DirectionX = 0x6f, + Light0DirectionY, + Light0DirectionZ, + Light1DirectionX, + Light1DirectionY, + Light1DirectionZ, + Light2DirectionX, + Light2DirectionY, + Light2DirectionZ, + Light3DirectionX, + Light3DirectionY, + Light3DirectionZ, + Light0ConstantAtten = 0x7b, + Light0LinearAtten, + Light0QuadtraticAtten, + Light1ConstantAtten, + Light1LinearAtten, + Light1QuadtraticAtten, + Light2ConstantAtten, + Light2LinearAtten, + Light2QuadtraticAtten, + Light3ConstantAtten, + Light3LinearAtten, + Light3QuadtraticAtten, + Light0ExponentAtten = 0x87, + Light1ExponentAtten, + Light2ExponentAtten, + Light3ExponentAtten, + Light0CutoffAtten = 0x8b, + Light1CutoffAtten, + Light2CutoffAtten, + Light3CutoffAtten, + Light0Ambient = 0x8f, + Light0Diffuse, + Light0Specular, + Light1Ambient, + Light1Diffuse, + Light1Specular, + Light2Ambient, + Light2Diffuse, + Light2Specular, + Light3Ambient, + Light3Diffuse, + Light3Specular, + Cull = 0x9b, + FrameBufPtr = 0x9c, + FrameBufWidth = 0x9d, + ZBufPtr = 0x9e, + ZBufWidth = 0x9f, + TexAddr0 = 0xa0, + TexAddr1, + TexAddr2, + TexAddr3, + TexAddr4, + TexAddr5, + TexAddr6, + TexAddr7, + TexBufWidth0 = 0xa8, + TexBufWidth1, + TexBufWidth2, + TexBufWidth3, + TexBufWidth4, + TexBufWidth5, + TexBufWidth6, + TexBufWidth7, + ClutAddr = 0xb0, + ClutAddrUpper = 0xb1, + TransferSrc, + TransferSrcW, + TransferDst, + TransferDstW, + TexSize0 = 0xb8, + TexSize1, + TexSize2, + TexSize3, + TexSize4, + TexSize5, + TexSize6, + TexSize7, + TexMapMode = 0xc0, + TexShadeLs = 0xc1, + TexMode = 0xc2, + TexFormat = 0xc3, + LoadClut = 0xc4, + ClutFormat = 0xc5, + TexFilter = 0xc6, + TexWrap = 0xc7, + TexLevel = 0xc8, + TexFunc = 0xc9, + TexEnvColor = 0xca, + TexFlush = 0xcb, + TexSync = 0xcc, + Fog1 = 0xcd, + Fog2 = 0xce, + FogColor = 0xcf, + TexLodSlope = 0xd0, + FramebufPixFormat = 0xd2, + ClearMode = 0xd3, + Scissor1 = 0xd4, + Scissor2 = 0xd5, + MinZ = 0xd6, + MaxZ = 0xd7, + ColorTest = 0xd8, + ColorRef = 0xd9, + ColorTestmask = 0xda, + AlphaTest = 0xdb, + StencilTest = 0xdc, + StencilOp = 0xdd, + ZTest = 0xde, + BlendMode = 0xdf, + BlendFixedA = 0xe0, + BlendFixedB = 0xe1, + Dith0 = 0xe2, + Dith1, + Dith2, + Dith3, + LogicOp = 0xe6, + ZWriteDisable = 0xe7, + MaskRgb = 0xe8, + MaskAlpha = 0xe9, + TransferStart = 0xea, + TransferSrcPos = 0xeb, + TransferDstPos = 0xec, + TransferSize = 0xee, + Vscx = 0xf0, + Vscy = 0xf1, + Vscz = 0xf2, + Vtcs = 0xf3, + Vtct = 0xf4, + Vtcq = 0xf5, + Vcv = 0xf6, + Vap = 0xf7, + Vfc = 0xf8, + Vscv = 0xf9, + + Unknown03 = 0x03, + Unknown0D = 0x0d, + Unknown11 = 0x11, + Unknown29 = 0x29, + Unknown34 = 0x34, + Unknown35 = 0x35, + Unknown39 = 0x39, + Unknown4E = 0x4e, + Unknown4F = 0x4f, + Unknown52 = 0x52, + Unknown59 = 0x59, + Unknown5A = 0x5a, + UnknownB6 = 0xb6, + UnknownB7 = 0xb7, + UnknownD1 = 0xd1, + UnknownED = 0xed, + UnknownEF = 0xef, + UnknownFA = 0xfa, + UnknownFB = 0xfb, + UnknownFC = 0xfc, + UnknownFD = 0xfd, + UnknownFE = 0xfe, + NopFF = 0xff, + } + + #[repr(i32)] + pub enum SceSysMemPartitionId { + SceKernelUnknownPartition = 0, + SceKernelPrimaryKernelPartition = 1, + SceKernelPrimaryUserPartition = 2, + SceKernelOtherKernelPartition1 = 3, + SceKernelOtherKernelPartition2 = 4, + SceKernelVshellPARTITION = 5, + SceKernelScUserPartition = 6, + SceKernelMeUserPartition = 7, + SceKernelExtendedScKernelPartition = 8, + SceKernelExtendedSc2KernelPartition = 9, + SceKernelExtendedMeKernelPartition = 10, + SceKernelVshellKernelPartition = 11, + SceKernelExtendedKernelPartition = 12, + } + + #[repr(i32)] + pub enum SceSysMemBlockTypes { + Low = 0, + High, + Addr, + } + + #[repr(u32)] + pub enum Interrupt { + Gpio = 4, + Ata = 5, + Umd = 6, + Mscm0 = 7, + Wlan = 8, + Audio = 10, + I2c = 12, + Sircs = 14, + Systimer0 = 15, + Systimer1 = 16, + Systimer2 = 17, + Systimer3 = 18, + Thread0 = 19, + Nand = 20, + Dmacplus = 21, + Dma0 = 22, + Dma1 = 23, + Memlmd = 24, + Ge = 25, + Vblank = 30, + Mecodec = 31, + Hpremote = 36, + Mscm1 = 60, + Mscm2 = 61, + Thread1 = 65, + Interrupt = 66, + } + + #[repr(u32)] + pub enum SubInterrupt { + Gpio = Interrupt::Gpio as u32, + Ata = Interrupt::Ata as u32, + Umd = Interrupt::Umd as u32, + Dmacplus = Interrupt::Dmacplus as u32, + Ge = Interrupt::Ge as u32, + Display = Interrupt::Vblank as u32, + } + + #[repr(u32)] + pub enum SceKernelIdListType { + Thread = 1, + Semaphore = 2, + EventFlag = 3, + Mbox = 4, + Vpl = 5, + Fpl = 6, + Mpipe = 7, + Callback = 8, + ThreadEventHandler = 9, + Alarm = 10, + VTimer = 11, + SleepThread = 64, + DelayThread = 65, + SuspendThread = 66, + DormantThread = 67, + } + + #[repr(i32)] + pub enum UsbCamResolution { + Px160_120 = 0, + Px176_144 = 1, + Px320_240 = 2, + Px352_288 = 3, + Px640_480 = 4, + Px1024_768 = 5, + Px1280_960 = 6, + Px480_272 = 7, + Px360_272 = 8, + } + + #[repr(i32)] + pub enum UsbCamResolutionEx { + Px160_120 = 0, + Px176_144 = 1, + Px320_240 = 2, + Px352_288 = 3, + Px360_272 = 4, + Px480_272 = 5, + Px640_480 = 6, + Px1024_768 = 7, + Px1280_960 = 8, + } + + #[repr(i32)] + pub enum UsbCamDelay { + NoDelay = 0, + Delay10Sec = 1, + Delay20Sec = 2, + Delay30Sec = 3, + } + + #[repr(i32)] + pub enum UsbCamFrameRate { + Fps3_75 = 0, + Fps5 = 1, + Fps7_5 = 2, + Fps10 = 3, + Fps15 = 4, + Fps20 = 5, + Fps30 = 6, + Fps60 = 7, + } + + #[repr(i32)] + pub enum UsbCamWb { + Auto = 0, + Daylight = 1, + Fluorescent = 2, + Incadescent = 3, + } + + #[repr(i32)] + pub enum UsbCamEffectMode { + Normal = 0, + Negative = 1, + Blackwhite = 2, + Sepia = 3, + Blue = 4, + Red = 5, + Green = 6, + } + + #[repr(i32)] + pub enum UsbCamEvLevel { + Pos2_0 = 0, + Pos1_7 = 1, + Pos1_5 = 2, + Pos1_3 = 3, + Pos1_0 = 4, + Pos0_7 = 5, + Pos0_5 = 6, + Pos0_3 = 7, + Zero = 8, + Neg0_3, + Neg0_5, + Neg0_7, + Neg1_0, + Neg1_3, + Neg1_5, + Neg1_7, + Neg2_0, + } + + #[repr(i32)] + pub enum RtcCheckValidError { + InvalidYear = -1, + InvalidMonth = -2, + InvalidDay = -3, + InvalidHour = -4, + InvalidMinutes = -5, + InvalidSeconds = -6, + InvalidMicroseconds = -7, + } + + #[repr(u32)] + pub enum PowerTick { + All = 0, + Suspend = 1, + Display = 6, + } + + #[repr(u32)] + pub enum IoAssignPerms { + RdWr = 0, + RdOnly = 1, + } + + #[repr(u32)] + pub enum IoWhence { + Set = 0, + Cur = 1, + End = 2, + } + + #[repr(u32)] + pub enum UmdType { + Game = 0x10, + Video = 0x20, + Audio = 0x40, + } + + #[repr(u32)] + pub enum GuPrimitive { + Points = 0, + Lines = 1, + LineStrip = 2, + Triangles = 3, + TriangleStrip = 4, + TriangleFan = 5, + Sprites = 6, + } + + #[repr(u32)] + pub enum PatchPrimitive { + Points = 0, + LineStrip = 2, + TriangleStrip = 4, + } + + #[repr(u32)] + pub enum GuState { + AlphaTest = 0, + DepthTest = 1, + ScissorTest = 2, + StencilTest = 3, + Blend = 4, + CullFace = 5, + Dither = 6, + Fog = 7, + ClipPlanes = 8, + Texture2D = 9, + Lighting = 10, + Light0 = 11, + Light1 = 12, + Light2 = 13, + Light3 = 14, + LineSmooth = 15, + PatchCullFace = 16, + ColorTest = 17, + ColorLogicOp = 18, + FaceNormalReverse = 19, + PatchFace = 20, + Fragment2X = 21, + } + + #[repr(u32)] + pub enum MatrixMode { + Projection = 0, + View = 1, + Model = 2, + Texture = 3, + } + + #[repr(u32)] + pub enum TexturePixelFormat { + Psm5650 = 0, + Psm5551 = 1, + Psm4444 = 2, + Psm8888 = 3, + PsmT4 = 4, + PsmT8 = 5, + PsmT16 = 6, + PsmT32 = 7, + PsmDxt1 = 8, + PsmDxt3 = 9, + PsmDxt5 = 10, + } + + #[repr(u32)] + pub enum SplineMode { + FillFill = 0, + OpenFill = 1, + FillOpen = 2, + OpenOpen = 3, + } + + #[repr(u32)] + pub enum ShadingModel { + Flat = 0, + Smooth = 1, + } + + #[repr(u32)] + pub enum LogicalOperation { + Clear = 0, + And = 1, + AndReverse = 2, + Copy = 3, + AndInverted = 4, + Noop = 5, + Xor = 6, + Or = 7, + Nor = 8, + Equiv = 9, + Inverted = 10, + OrReverse = 11, + CopyInverted = 12, + OrInverted = 13, + Nand = 14, + Set = 15, + } + + #[repr(u32)] + pub enum TextureFilter { + Nearest = 0, + Linear = 1, + NearestMipmapNearest = 4, + LinearMipmapNearest = 5, + NearestMipmapLinear = 6, + LinearMipmapLinear = 7, + } + + #[repr(u32)] + pub enum TextureMapMode { + TextureCoords = 0, + TextureMatrix = 1, + EnvironmentMap = 2, + } + + #[repr(u32)] + pub enum TextureLevelMode { + Auto = 0, + Const = 1, + Slope = 2, + } + + #[repr(u32)] + pub enum TextureProjectionMapMode { + Position = 0, + Uv = 1, + NormalizedNormal = 2, + Normal = 3, + } + + #[repr(u32)] + pub enum GuTexWrapMode { + Repeat = 0, + Clamp = 1, + } + + #[repr(u32)] + pub enum FrontFaceDirection { + Clockwise = 0, + CounterClockwise = 1, + } + + #[repr(u32)] + pub enum AlphaFunc { + Never = 0, + Always, + Equal, + NotEqual, + Less, + LessOrEqual, + Greater, + GreaterOrEqual, + } + + #[repr(u32)] + pub enum StencilFunc { + Never = 0, + Always, + Equal, + NotEqual, + Less, + LessOrEqual, + Greater, + GreaterOrEqual, + } + + #[repr(u32)] + pub enum ColorFunc { + Never = 0, + Always, + Equal, + NotEqual, + } + + #[repr(u32)] + pub enum DepthFunc { + Never = 0, + Always, + Equal, + NotEqual, + Less, + LessOrEqual, + Greater, + GreaterOrEqual, + } + + #[repr(u32)] + pub enum TextureEffect { + Modulate = 0, + Decal = 1, + Blend = 2, + Replace = 3, + Add = 4, + } + + #[repr(u32)] + pub enum TextureColorComponent { + Rgb = 0, + Rgba = 1, + } + + #[repr(u32)] + pub enum MipmapLevel { + None = 0, + Level1, + Level2, + Level3, + Level4, + Level5, + Level6, + Level7, + } + + #[repr(u32)] + pub enum BlendOp { + Add = 0, + Subtract = 1, + ReverseSubtract = 2, + Min = 3, + Max = 4, + Abs = 5, + } + + #[repr(u32)] + pub enum BlendSrc { + SrcColor = 0, + OneMinusSrcColor = 1, + SrcAlpha = 2, + OneMinusSrcAlpha = 3, + Fix = 10, + } + + #[repr(u32)] + pub enum BlendDst { + DstColor = 0, + OneMinusDstColor = 1, + DstAlpha = 4, + OneMinusDstAlpha = 5, + Fix = 10, + } + + #[repr(u32)] + pub enum StencilOperation { + Keep = 0, + Zero = 1, + Replace = 2, + Invert = 3, + Incr = 4, + Decr = 5, + } + + #[repr(u32)] + pub enum LightMode { + SingleColor = 0, + SeparateSpecularColor = 1, + } + + #[repr(u32)] + pub enum LightType { + Directional = 0, + Pointlight = 1, + Spotlight = 2, + } + + #[repr(u32)] + pub enum GuContextType { + Direct = 0, + Call = 1, + Send = 2, + } + + #[repr(u32)] + pub enum GuQueueMode { + Tail = 0, + Head = 1, + } + + #[repr(u32)] + pub enum GuSyncMode { + Finish = 0, + Signal = 1, + Done = 2, + List = 3, + Send = 4, + } + + #[repr(u32)] + pub enum GuSyncBehavior { + Wait = 0, + NoWait = 1, + } + + #[repr(u32)] + pub enum GuCallbackId { + Signal = 1, + Finish = 4, + } + + #[repr(u32)] + pub enum SignalBehavior { + Suspend = 1, + Continue = 2, + } + + #[repr(u32)] + pub enum ClutPixelFormat { + Psm5650 = 0, + Psm5551 = 1, + Psm4444 = 2, + Psm8888 = 3, + } + + #[repr(C)] + pub enum KeyType { + Directory = 1, + Integer = 2, + String = 3, + Bytes = 4, + } + + #[repr(u32)] + pub enum UtilityMsgDialogMode { + Error, + Text, + } + + #[repr(u32)] + pub enum UtilityMsgDialogPressed { + Unknown1, + Yes, + No, + Back, + } + + #[repr(u32)] + pub enum UtilityDialogButtonAccept { + Circle, + Cross, + } + + #[repr(u32)] + pub enum SceUtilityOskInputLanguage { + Default, + Japanese, + English, + French, + Spanish, + German, + Italian, + Dutch, + Portugese, + Russian, + Korean, + } + + #[repr(u32)] + pub enum SceUtilityOskInputType { + All, + LatinDigit, + LatinSymbol, + LatinLowercase = 4, + LatinUppercase = 8, + JapaneseDigit = 0x100, + JapaneseSymbol = 0x200, + JapaneseLowercase = 0x400, + JapaneseUppercase = 0x800, + JapaneseHiragana = 0x1000, + JapaneseHalfWidthKatakana = 0x2000, + JapaneseKatakana = 0x4000, + JapaneseKanji = 0x8000, + RussianLowercase = 0x10000, + RussianUppercase = 0x20000, + Korean = 0x40000, + Url = 0x80000, + } + + #[repr(u32)] + pub enum SceUtilityOskState { + None, + Initializing, + Initialized, + Visible, + Quit, + Finished, + } + + #[repr(u32)] + pub enum SceUtilityOskResult { + Unchanged, + Cancelled, + Changed, + } + + #[repr(u32)] + pub enum SystemParamLanguage { + Japanese, + English, + French, + Spanish, + German, + Italian, + Dutch, + Portugese, + Russian, + Korean, + ChineseTraditional, + ChineseSimplified, + } + + #[repr(u32)] + pub enum SystemParamId { + StringNickname = 1, + AdhocChannel, + WlanPowerSave, + DateFormat, + TimeFormat, + Timezone, + DaylightSavings, + Language, + Unknown, + } + + #[repr(u32)] + pub enum SystemParamAdhocChannel { + ChannelAutomatic = 0, + Channel1 = 1, + Channel6 = 6, + Channel11 = 11, + } + + #[repr(u32)] + pub enum SystemParamWlanPowerSaveState { + Off, + On, + } + + #[repr(u32)] + pub enum SystemParamDateFormat { + YYYYMMDD, + MMDDYYYY, + DDMMYYYY, + } + + #[repr(u32)] + pub enum SystemParamTimeFormat { + Hour24, + Hour12, + } + + #[repr(u32)] + pub enum SystemParamDaylightSavings { + Std, + Dst, + } + + #[repr(u32)] + pub enum AvModule { + AvCodec, + SasCore, + Atrac3Plus, + MpegBase, + Mp3, + Vaudio, + Aac, + G729, + } + + #[repr(u32)] + pub enum Module { + NetCommon = 0x100, + NetAdhoc, + NetInet, + NetParseUri, + NetHttp, + NetSsl, + + UsbPspCm = 0x200, + UsbMic, + UsbCam, + UsbGps, + + AvCodec = 0x300, + AvSascore, + AvAtrac3Plus, + AvMpegBase, + AvMp3, + AvVaudio, + AvAac, + AvG729, + + NpCommon = 0x400, + NpService, + NpMatching2, + NpDrm = 0x500, + + Irda = 0x600, + } + + #[repr(u32)] + pub enum NetModule { + NetCommon = 1, + NetAdhoc, + NetInet, + NetParseUri, + NetHttp, + NetSsl, + } + + #[repr(u32)] + pub enum UsbModule { + UsbPspCm = 1, + UsbAcc, + UsbMic, + UsbCam, + UsbGps, + } + + #[repr(u32)] + pub enum NetParam { + Name, + Ssid, + Secure, + WepKey, + IsStaticIp, + Ip, + NetMask, + Route, + ManualDns, + PrimaryDns, + SecondaryDns, + ProxyUser, + ProxyPass, + UseProxy, + ProxyServer, + ProxyPort, + Unknown1, + Unknown2, + } + + #[repr(u32)] + pub enum UtilityNetconfAction { + ConnectAP, + DisplayStatus, + ConnectAdhoc, + } + + #[repr(u32)] + pub enum UtilitySavedataMode { + AutoLoad, + AutoSave, + Load, + Save, + ListLoad, + ListSave, + ListDelete, + Delete, + } + + #[repr(u32)] + pub enum UtilitySavedataFocus { + Unknown1, + FirstList, + LastList, + Latest, + Oldest, + Unknown2, + Unknown3, + FirstEmpty, + LastEmpty, + } + + #[repr(u32)] + pub enum UtilityGameSharingMode { + Single = 1, + Multiple, + } + + #[repr(u32)] + pub enum UtilityGameSharingDataType { + File = 1, + Memory, + } + + #[repr(u32)] + pub enum UtilityHtmlViewerInterfaceMode { + Full, + Limited, + None, + } + + #[repr(u32)] + pub enum UtilityHtmlViewerCookieMode { + Disabled = 0, + Enabled, + Confirm, + Default, + } + + #[repr(u32)] + pub enum UtilityHtmlViewerTextSize { + Large, + Normal, + Small, + } + + #[repr(u32)] + pub enum UtilityHtmlViewerDisplayMode { + Normal, + Fit, + SmartFit, + } + + #[repr(u32)] + pub enum UtilityHtmlViewerConnectMode { + Last, + ManualOnce, + ManualAll, + } + + #[repr(u32)] + pub enum UtilityHtmlViewerDisconnectMode { + Enable, + Disable, + Confirm, + } + + #[repr(u32)] + pub enum ScePspnetAdhocPtpState { + Closed, + Listen, + SynSent, + SynReceived, + Established, + } + + #[repr(u32)] + pub enum AdhocMatchingMode { + Host = 1, + Client, + Ptp, + } + + #[repr(u32)] + pub enum ApctlState { + Disconnected, + Scanning, + Joining, + GettingIp, + GotIp, + EapAuth, + KeyExchange, + } + + #[repr(u32)] + pub enum ApctlEvent { + ConnectRequest, + ScanRequest, + ScanComplete, + Established, + GetIp, + DisconnectRequest, + Error, + Info, + EapAuth, + KeyExchange, + Reconnect, + } + + #[repr(u32)] + pub enum ApctlInfo { + ProfileName, + Bssid, + Ssid, + SsidLength, + SecurityType, + Strength, + Channel, + PowerSave, + Ip, + SubnetMask, + Gateway, + PrimaryDns, + SecondaryDns, + UseProxy, + ProxyUrl, + ProxyPort, + EapType, + StartBrowser, + Wifisp, + } + + #[repr(u32)] + pub enum ApctlInfoSecurityType { + None, + Wep, + Wpa, + } + + #[repr(u32)] + pub enum HttpMethod { + Get, + Post, + Head, + } + + #[repr(u32)] + pub enum HttpAuthType { + Basic, + Digest, + } +} + +s_paren! { + #[repr(transparent)] + pub struct SceUid(pub i32); + + #[repr(transparent)] + #[allow(dead_code)] + pub struct SceMpeg(*mut *mut c_void); + + #[repr(transparent)] + #[allow(dead_code)] + pub struct SceMpegStream(*mut c_void); + + #[repr(transparent)] + pub struct Mp3Handle(pub i32); + + #[repr(transparent)] + #[allow(dead_code)] + pub struct RegHandle(u32); +} + +s! { + pub struct sockaddr { + pub sa_len: u8, + pub sa_family: u8, + pub sa_data: [u8; 14], + } + + pub struct in_addr { + pub s_addr: u32, + } + + pub struct AudioInputParams { + pub unknown1: i32, + pub gain: i32, + pub unknown2: i32, + pub unknown3: i32, + pub unknown4: i32, + pub unknown5: i32, + } + + pub struct Atrac3BufferInfo { + pub puc_write_position_first_buf: *mut u8, + pub ui_writable_byte_first_buf: u32, + pub ui_min_write_byte_first_buf: u32, + pub ui_read_position_first_buf: u32, + pub puc_write_position_second_buf: *mut u8, + pub ui_writable_byte_second_buf: u32, + pub ui_min_write_byte_second_buf: u32, + pub ui_read_position_second_buf: u32, + } + + pub struct SceCtrlData { + pub timestamp: u32, + pub buttons: i32, + pub lx: u8, + pub ly: u8, + pub rsrv: [u8; 6], + } + + pub struct SceCtrlLatch { + pub ui_make: u32, + pub ui_break: u32, + pub ui_press: u32, + pub ui_release: u32, + } + + pub struct GeStack { + pub stack: [u32; 8], + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct GeCallbackData { + pub signal_func: Option, + pub signal_arg: *mut c_void, + pub finish_func: Option, + pub finish_arg: *mut c_void, + } + + pub struct GeListArgs { + pub size: u32, + pub context: *mut GeContext, + pub num_stacks: u32, + pub stacks: *mut GeStack, + } + + pub struct GeBreakParam { + pub buf: [u32; 4], + } + + pub struct SceKernelLoadExecParam { + pub size: usize, + pub args: usize, + pub argp: *mut c_void, + pub key: *const u8, + } + + pub struct timeval { + pub tv_sec: i32, + pub tv_usec: i32, + } + + pub struct timezone { + pub tz_minutes_west: i32, + pub tz_dst_time: i32, + } + + pub struct IntrHandlerOptionParam { + size: i32, + entry: u32, + common: u32, + gp: u32, + intr_code: u16, + sub_count: u16, + intr_level: u16, + enabled: u16, + calls: u32, + field_1c: u32, + total_clock_lo: u32, + total_clock_hi: u32, + min_clock_lo: u32, + min_clock_hi: u32, + max_clock_lo: u32, + max_clock_hi: u32, + } + + pub struct SceKernelLMOption { + pub size: usize, + pub m_pid_text: SceUid, + pub m_pid_data: SceUid, + pub flags: u32, + pub position: u8, + pub access: u8, + pub c_reserved: [u8; 2usize], + } + + pub struct SceKernelSMOption { + pub size: usize, + pub m_pid_stack: SceUid, + pub stack_size: usize, + pub priority: i32, + pub attribute: u32, + } + + pub struct SceKernelModuleInfo { + pub size: usize, + pub n_segment: u8, + pub reserved: [u8; 3usize], + pub segment_addr: [i32; 4usize], + pub segment_size: [i32; 4usize], + pub entry_addr: u32, + pub gp_value: u32, + pub text_addr: u32, + pub text_size: u32, + pub data_size: u32, + pub bss_size: u32, + pub attribute: u16, + pub version: [u8; 2usize], + pub name: [u8; 28usize], + } + + pub struct DebugProfilerRegs { + pub enable: u32, + pub systemck: u32, + pub cpuck: u32, + pub internal: u32, + pub memory: u32, + pub copz: u32, + pub vfpu: u32, + pub sleep: u32, + pub bus_access: u32, + pub uncached_load: u32, + pub uncached_store: u32, + pub cached_load: u32, + pub cached_store: u32, + pub i_miss: u32, + pub d_miss: u32, + pub d_writeback: u32, + pub cop0_inst: u32, + pub fpu_inst: u32, + pub vfpu_inst: u32, + pub local_bus: u32, + } + + pub struct SceKernelSysClock { + pub low: u32, + pub hi: u32, + } + + pub struct SceKernelThreadOptParam { + pub size: usize, + pub stack_mpid: SceUid, + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct SceKernelThreadInfo { + pub size: usize, + pub name: [u8; 32], + pub attr: u32, + pub status: i32, + pub entry: SceKernelThreadEntry, + pub stack: *mut c_void, + pub stack_size: i32, + pub gp_reg: *mut c_void, + pub init_priority: i32, + pub current_priority: i32, + pub wait_type: i32, + pub wait_id: SceUid, + pub wakeup_count: i32, + pub exit_status: i32, + pub run_clocks: SceKernelSysClock, + pub intr_preempt_count: u32, + pub thread_preempt_count: u32, + pub release_count: u32, + } + + pub struct SceKernelThreadRunStatus { + pub size: usize, + pub status: i32, + pub current_priority: i32, + pub wait_type: i32, + pub wait_id: i32, + pub wakeup_count: i32, + pub run_clocks: SceKernelSysClock, + pub intr_preempt_count: u32, + pub thread_preempt_count: u32, + pub release_count: u32, + } + + pub struct SceKernelSemaOptParam { + pub size: usize, + } + + pub struct SceKernelSemaInfo { + pub size: usize, + pub name: [u8; 32], + pub attr: u32, + pub init_count: i32, + pub current_count: i32, + pub max_count: i32, + pub num_wait_threads: i32, + } + + pub struct SceKernelEventFlagInfo { + pub size: usize, + pub name: [u8; 32], + pub attr: u32, + pub init_pattern: u32, + pub current_pattern: u32, + pub num_wait_threads: i32, + } + + pub struct SceKernelEventFlagOptParam { + pub size: usize, + } + + pub struct SceKernelMbxOptParam { + pub size: usize, + } + + pub struct SceKernelMbxInfo { + pub size: usize, + pub name: [u8; 32usize], + pub attr: u32, + pub num_wait_threads: i32, + pub num_messages: i32, + pub first_message: *mut c_void, + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct SceKernelVTimerInfo { + pub size: usize, + pub name: [u8; 32], + pub active: i32, + pub base: SceKernelSysClock, + pub current: SceKernelSysClock, + pub schedule: SceKernelSysClock, + pub handler: SceKernelVTimerHandler, + pub common: *mut c_void, + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct SceKernelThreadEventHandlerInfo { + pub size: usize, + pub name: [u8; 32], + pub thread_id: SceUid, + pub mask: i32, + pub handler: SceKernelThreadEventHandler, + pub common: *mut c_void, + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct SceKernelAlarmInfo { + pub size: usize, + pub schedule: SceKernelSysClock, + pub handler: SceKernelAlarmHandler, + pub common: *mut c_void, + } + + pub struct SceKernelSystemStatus { + pub size: usize, + pub status: u32, + pub idle_clocks: SceKernelSysClock, + pub comes_out_of_idle_count: u32, + pub thread_switch_count: u32, + pub vfpu_switch_count: u32, + } + + pub struct SceKernelMppInfo { + pub size: usize, + pub name: [u8; 32], + pub attr: u32, + pub buf_size: i32, + pub free_size: i32, + pub num_send_wait_threads: i32, + pub num_receive_wait_threads: i32, + } + + pub struct SceKernelVplOptParam { + pub size: usize, + } + + pub struct SceKernelVplInfo { + pub size: usize, + pub name: [u8; 32], + pub attr: u32, + pub pool_size: i32, + pub free_size: i32, + pub num_wait_threads: i32, + } + + pub struct SceKernelFplOptParam { + pub size: usize, + } + + pub struct SceKernelFplInfo { + pub size: usize, + pub name: [u8; 32usize], + pub attr: u32, + pub block_size: i32, + pub num_blocks: i32, + pub free_blocks: i32, + pub num_wait_threads: i32, + } + + pub struct SceKernelVTimerOptParam { + pub size: usize, + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct SceKernelCallbackInfo { + pub size: usize, + pub name: [u8; 32usize], + pub thread_id: SceUid, + pub callback: SceKernelCallbackFunction, + pub common: *mut c_void, + pub notify_count: i32, + pub notify_arg: i32, + } + + pub struct UsbCamSetupStillParam { + pub size: i32, + pub resolution: UsbCamResolution, + pub jpeg_size: i32, + pub reverse_flags: i32, + pub delay: UsbCamDelay, + pub comp_level: i32, + } + + pub struct UsbCamSetupStillExParam { + pub size: i32, + pub unk: u32, + pub resolution: UsbCamResolutionEx, + pub jpeg_size: i32, + pub comp_level: i32, + pub unk2: u32, + pub unk3: u32, + pub flip: i32, + pub mirror: i32, + pub delay: UsbCamDelay, + pub unk4: [u32; 5usize], + } + + pub struct UsbCamSetupVideoParam { + pub size: i32, + pub resolution: UsbCamResolution, + pub framerate: UsbCamFrameRate, + pub white_balance: UsbCamWb, + pub saturation: i32, + pub brightness: i32, + pub contrast: i32, + pub sharpness: i32, + pub effect_mode: UsbCamEffectMode, + pub frame_size: i32, + pub unk: u32, + pub evl_evel: UsbCamEvLevel, + } + + pub struct UsbCamSetupVideoExParam { + pub size: i32, + pub unk: u32, + pub resolution: UsbCamResolutionEx, + pub framerate: UsbCamFrameRate, + pub unk2: u32, + pub unk3: u32, + pub white_balance: UsbCamWb, + pub saturation: i32, + pub brightness: i32, + pub contrast: i32, + pub sharpness: i32, + pub unk4: u32, + pub unk5: u32, + pub unk6: [u32; 3usize], + pub effect_mode: UsbCamEffectMode, + pub unk7: u32, + pub unk8: u32, + pub unk9: u32, + pub unk10: u32, + pub unk11: u32, + pub frame_size: i32, + pub unk12: u32, + pub ev_level: UsbCamEvLevel, + } + + pub struct ScePspDateTime { + pub year: u16, + pub month: u16, + pub day: u16, + pub hour: u16, + pub minutes: u16, + pub seconds: u16, + pub microseconds: u32, + } + + pub struct SceIoStat { + pub st_mode: i32, + pub st_attr: i32, + pub st_size: i64, + pub st_ctime: ScePspDateTime, + pub st_atime: ScePspDateTime, + pub st_mtime: ScePspDateTime, + pub st_private: [u32; 6usize], + } + + pub struct UmdInfo { + pub size: u32, + pub type_: UmdType, + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct SceMpegRingbuffer { + pub packets: i32, + pub unk0: u32, + pub unk1: u32, + pub unk2: u32, + pub unk3: u32, + pub data: *mut c_void, + pub callback: SceMpegRingbufferCb, + pub cb_param: *mut c_void, + pub unk4: u32, + pub unk5: u32, + pub sce_mpeg: *mut c_void, + } + + pub struct SceMpegAu { + pub pts_msb: u32, + pub pts: u32, + pub dts_msb: u32, + pub dts: u32, + pub es_buffer: u32, + pub au_size: u32, + } + + pub struct SceMpegAvcMode { + pub unk0: i32, + pub pixel_format: super::DisplayPixelFormat, + } + + #[repr(align(64))] + pub struct SceMpegLLI { + pub src: *mut c_void, + pub dst: *mut c_void, + pub next: *mut c_void, + pub size: i32, + } + + #[repr(align(64))] + pub struct SceMpegYCrCbBuffer { + pub frame_buffer_height16: i32, + pub frame_buffer_width16: i32, + pub unknown: i32, + pub unknown2: i32, + pub y_buffer: *mut c_void, + pub y_buffer2: *mut c_void, + pub cr_buffer: *mut c_void, + pub cb_buffer: *mut c_void, + pub cr_buffer2: *mut c_void, + pub cb_buffer2: *mut c_void, + + pub frame_height: i32, + pub frame_width: i32, + pub frame_buffer_width: i32, + pub unknown3: [i32; 11usize], + } + + pub struct ScePspSRect { + pub x: i16, + pub y: i16, + pub w: i16, + pub h: i16, + } + + pub struct ScePspIRect { + pub x: i32, + pub y: i32, + pub w: i32, + pub h: i32, + } + + pub struct ScePspL64Rect { + pub x: u64, + pub y: u64, + pub w: u64, + pub h: u64, + } + + pub struct ScePspSVector2 { + pub x: i16, + pub y: i16, + } + + pub struct ScePspIVector2 { + pub x: i32, + pub y: i32, + } + + pub struct ScePspL64Vector2 { + pub x: u64, + pub y: u64, + } + + pub struct ScePspSVector3 { + pub x: i16, + pub y: i16, + pub z: i16, + } + + pub struct ScePspIVector3 { + pub x: i32, + pub y: i32, + pub z: i32, + } + + pub struct ScePspL64Vector3 { + pub x: u64, + pub y: u64, + pub z: u64, + } + + pub struct ScePspSVector4 { + pub x: i16, + pub y: i16, + pub z: i16, + pub w: i16, + } + + pub struct ScePspIVector4 { + pub x: i32, + pub y: i32, + pub z: i32, + pub w: i32, + } + + pub struct ScePspL64Vector4 { + pub x: u64, + pub y: u64, + pub z: u64, + pub w: u64, + } + + pub struct ScePspIMatrix2 { + pub x: ScePspIVector2, + pub y: ScePspIVector2, + } + + pub struct ScePspIMatrix3 { + pub x: ScePspIVector3, + pub y: ScePspIVector3, + pub z: ScePspIVector3, + } + + #[repr(align(16))] + pub struct ScePspIMatrix4 { + pub x: ScePspIVector4, + pub y: ScePspIVector4, + pub z: ScePspIVector4, + pub w: ScePspIVector4, + } + + pub struct ScePspIMatrix4Unaligned { + pub x: ScePspIVector4, + pub y: ScePspIVector4, + pub z: ScePspIVector4, + pub w: ScePspIVector4, + } + + pub struct SceMp3InitArg { + pub mp3_stream_start: u32, + pub unk1: u32, + pub mp3_stream_end: u32, + pub unk2: u32, + pub mp3_buf: *mut c_void, + pub mp3_buf_size: i32, + pub pcm_buf: *mut c_void, + pub pcm_buf_size: i32, + } + + pub struct OpenPSID { + pub data: [u8; 16usize], + } + + pub struct UtilityDialogCommon { + pub size: u32, + pub language: SystemParamLanguage, + pub button_accept: UtilityDialogButtonAccept, + pub graphics_thread: i32, + pub access_thread: i32, + pub font_thread: i32, + pub sound_thread: i32, + pub result: i32, + pub reserved: [i32; 4usize], + } + + pub struct UtilityNetconfAdhoc { + pub name: [u8; 8usize], + pub timeout: u32, + } + + pub struct UtilityNetconfData { + pub base: UtilityDialogCommon, + pub action: UtilityNetconfAction, + pub adhocparam: *mut UtilityNetconfAdhoc, + pub hotspot: i32, + pub hotspot_connected: i32, + pub wifisp: i32, + } + + pub struct UtilitySavedataFileData { + pub buf: *mut c_void, + pub buf_size: usize, + pub size: usize, + pub unknown: i32, + } + + pub struct UtilitySavedataListSaveNewData { + pub icon0: UtilitySavedataFileData, + pub title: *mut u8, + } + + pub struct UtilityGameSharingParams { + pub base: UtilityDialogCommon, + pub unknown1: i32, + pub unknown2: i32, + pub name: [u8; 8usize], + pub unknown3: i32, + pub unknown4: i32, + pub unknown5: i32, + pub result: i32, + pub filepath: *mut u8, + pub mode: UtilityGameSharingMode, + pub datatype: UtilityGameSharingDataType, + pub data: *mut c_void, + pub datasize: u32, + } + + pub struct UtilityHtmlViewerParam { + pub base: UtilityDialogCommon, + pub memaddr: *mut c_void, + pub memsize: u32, + pub unknown1: i32, + pub unknown2: i32, + pub initialurl: *mut u8, + pub numtabs: u32, + pub interfacemode: UtilityHtmlViewerInterfaceMode, + pub options: i32, + pub dldirname: *mut u8, + pub dlfilename: *mut u8, + pub uldirname: *mut u8, + pub ulfilename: *mut u8, + pub cookiemode: UtilityHtmlViewerCookieMode, + pub unknown3: u32, + pub homeurl: *mut u8, + pub textsize: UtilityHtmlViewerTextSize, + pub displaymode: UtilityHtmlViewerDisplayMode, + pub connectmode: UtilityHtmlViewerConnectMode, + pub disconnectmode: UtilityHtmlViewerDisconnectMode, + pub memused: u32, + pub unknown4: [i32; 10usize], + } + + pub struct SceUtilityOskData { + pub unk_00: i32, + pub unk_04: i32, + pub language: SceUtilityOskInputLanguage, + pub unk_12: i32, + pub inputtype: SceUtilityOskInputType, + pub lines: i32, + pub unk_24: i32, + pub desc: *mut u16, + pub intext: *mut u16, + pub outtextlength: i32, + pub outtext: *mut u16, + pub result: SceUtilityOskResult, + pub outtextlimit: i32, + } + + pub struct SceUtilityOskParams { + pub base: UtilityDialogCommon, + pub datacount: i32, + pub data: *mut SceUtilityOskData, + pub state: SceUtilityOskState, + pub unk_60: i32, + } + + pub struct SceNetMallocStat { + pub pool: i32, + pub maximum: i32, + pub free: i32, + } + + pub struct SceNetAdhocctlAdhocId { + pub unknown: i32, + pub adhoc_id: [u8; 9usize], + pub unk: [u8; 3usize], + } + + pub struct SceNetAdhocctlScanInfo { + pub next: *mut SceNetAdhocctlScanInfo, + pub channel: i32, + pub name: [u8; 8usize], + pub bssid: [u8; 6usize], + pub unknown: [u8; 2usize], + pub unknown2: i32, + } + + pub struct SceNetAdhocctlGameModeInfo { + pub count: i32, + pub macs: [[u8; 6usize]; 16usize], + } + + pub struct SceNetAdhocPtpStat { + pub next: *mut SceNetAdhocPtpStat, + pub ptp_id: i32, + pub mac: [u8; 6usize], + pub peermac: [u8; 6usize], + pub port: u16, + pub peerport: u16, + pub sent_data: u32, + pub rcvd_data: u32, + pub state: ScePspnetAdhocPtpState, + } + + pub struct SceNetAdhocPdpStat { + pub next: *mut SceNetAdhocPdpStat, + pub pdp_id: i32, + pub mac: [u8; 6usize], + pub port: u16, + pub rcvd_data: u32, + } + + pub struct AdhocPoolStat { + pub size: i32, + pub maxsize: i32, + pub freesize: i32, + } +} + +s_no_extra_traits! { + pub struct GeContext { + pub context: [u32; 512], + } + + pub struct SceKernelUtilsSha1Context { + pub h: [u32; 5usize], + pub us_remains: u16, + pub us_computed: u16, + pub ull_total_len: u64, + pub buf: [u8; 64usize], + } + + pub struct SceKernelUtilsMt19937Context { + pub count: u32, + pub state: [u32; 624usize], + } + + pub struct SceKernelUtilsMd5Context { + pub h: [u32; 4usize], + pub pad: u32, + pub us_remains: u16, + pub us_computed: u16, + pub ull_total_len: u64, + pub buf: [u8; 64usize], + } + + pub struct SceIoDirent { + pub d_stat: SceIoStat, + pub d_name: [u8; 256usize], + pub d_private: *mut c_void, + pub dummy: i32, + } + + pub struct ScePspFRect { + pub x: f32, + pub y: f32, + pub w: f32, + pub h: f32, + } + + #[repr(align(16))] + pub struct ScePspFVector3 { + pub x: f32, + pub y: f32, + pub z: f32, + } + + #[repr(align(16))] + pub struct ScePspFVector4 { + pub x: f32, + pub y: f32, + pub z: f32, + pub w: f32, + } + + pub struct ScePspFVector4Unaligned { + pub x: f32, + pub y: f32, + pub z: f32, + pub w: f32, + } + + pub struct ScePspFVector2 { + pub x: f32, + pub y: f32, + } + + pub struct ScePspFMatrix2 { + pub x: ScePspFVector2, + pub y: ScePspFVector2, + } + + pub struct ScePspFMatrix3 { + pub x: ScePspFVector3, + pub y: ScePspFVector3, + pub z: ScePspFVector3, + } + + #[repr(align(16))] + pub struct ScePspFMatrix4 { + pub x: ScePspFVector4, + pub y: ScePspFVector4, + pub z: ScePspFVector4, + pub w: ScePspFVector4, + } + + pub struct ScePspFMatrix4Unaligned { + pub x: ScePspFVector4, + pub y: ScePspFVector4, + pub z: ScePspFVector4, + pub w: ScePspFVector4, + } + + pub union ScePspVector3 { + pub fv: ScePspFVector3, + pub iv: ScePspIVector3, + pub f: [f32; 3usize], + pub i: [i32; 3usize], + } + + pub union ScePspVector4 { + pub fv: ScePspFVector4, + pub iv: ScePspIVector4, + pub qw: u128, + pub f: [f32; 4usize], + pub i: [i32; 4usize], + } + + pub union ScePspMatrix2 { + pub fm: ScePspFMatrix2, + pub im: ScePspIMatrix2, + pub fv: [ScePspFVector2; 2usize], + pub iv: [ScePspIVector2; 2usize], + pub v: [ScePspVector2; 2usize], + pub f: [[f32; 2usize]; 2usize], + pub i: [[i32; 2usize]; 2usize], + } + + pub union ScePspMatrix3 { + pub fm: ScePspFMatrix3, + pub im: ScePspIMatrix3, + pub fv: [ScePspFVector3; 3usize], + pub iv: [ScePspIVector3; 3usize], + pub v: [ScePspVector3; 3usize], + pub f: [[f32; 3usize]; 3usize], + pub i: [[i32; 3usize]; 3usize], + } + + pub union ScePspVector2 { + pub fv: ScePspFVector2, + pub iv: ScePspIVector2, + pub f: [f32; 2usize], + pub i: [i32; 2usize], + } + + pub union ScePspMatrix4 { + pub fm: ScePspFMatrix4, + pub im: ScePspIMatrix4, + pub fv: [ScePspFVector4; 4usize], + pub iv: [ScePspIVector4; 4usize], + pub v: [ScePspVector4; 4usize], + pub f: [[f32; 4usize]; 4usize], + pub i: [[i32; 4usize]; 4usize], + } + + pub struct Key { + pub key_type: KeyType, + pub name: [u8; 256usize], + pub name_len: u32, + pub unk2: u32, + pub unk3: u32, + } + + pub struct UtilityMsgDialogParams { + pub base: UtilityDialogCommon, + pub unknown: i32, + pub mode: UtilityMsgDialogMode, + pub error_value: u32, + pub message: [u8; 512usize], + pub options: i32, + pub button_pressed: UtilityMsgDialogPressed, + } + + pub union UtilityNetData { + pub as_uint: u32, + pub as_string: [u8; 128usize], + } + + pub struct UtilitySavedataSFOParam { + pub title: [u8; 128usize], + pub savedata_title: [u8; 128usize], + pub detail: [u8; 1024usize], + pub parental_level: u8, + pub unknown: [u8; 3usize], + } + + pub struct SceUtilitySavedataParam { + pub base: UtilityDialogCommon, + pub mode: UtilitySavedataMode, + pub unknown1: i32, + pub overwrite: i32, + pub game_name: [u8; 13usize], + pub reserved: [u8; 3usize], + pub save_name: [u8; 20usize], + pub save_name_list: *mut [u8; 20usize], + pub file_name: [u8; 13usize], + pub reserved1: [u8; 3usize], + pub data_buf: *mut c_void, + pub data_buf_size: usize, + pub data_size: usize, + pub sfo_param: UtilitySavedataSFOParam, + pub icon0_file_data: UtilitySavedataFileData, + pub icon1_file_data: UtilitySavedataFileData, + pub pic1_file_data: UtilitySavedataFileData, + pub snd0_file_data: UtilitySavedataFileData, + pub new_data: *mut UtilitySavedataListSaveNewData, + pub focus: UtilitySavedataFocus, + pub unknown2: [i32; 4usize], + pub key: [u8; 16], + pub unknown3: [u8; 20], + } + + pub struct SceNetAdhocctlPeerInfo { + pub next: *mut SceNetAdhocctlPeerInfo, + pub nickname: [u8; 128usize], + pub mac: [u8; 6usize], + pub unknown: [u8; 6usize], + pub timestamp: u32, + } + + pub struct SceNetAdhocctlParams { + pub channel: i32, + pub name: [u8; 8usize], + pub bssid: [u8; 6usize], + pub nickname: [u8; 128usize], + } + + pub union SceNetApctlInfo { + pub name: [u8; 64usize], + pub bssid: [u8; 6usize], + pub ssid: [u8; 32usize], + pub ssid_length: u32, + pub security_type: u32, + pub strength: u8, + pub channel: u8, + pub power_save: u8, + pub ip: [u8; 16usize], + pub sub_net_mask: [u8; 16usize], + pub gateway: [u8; 16usize], + pub primary_dns: [u8; 16usize], + pub secondary_dns: [u8; 16usize], + pub use_proxy: u32, + pub proxy_url: [u8; 128usize], + pub proxy_port: u16, + pub eap_type: u32, + pub start_browser: u32, + pub wifisp: u32, + } +} + +pub const INT_MIN: c_int = -2147483648; +pub const INT_MAX: c_int = 2147483647; + +pub const AUDIO_VOLUME_MAX: u32 = 0x8000; +pub const AUDIO_CHANNEL_MAX: u32 = 8; +pub const AUDIO_NEXT_CHANNEL: i32 = -1; +pub const AUDIO_SAMPLE_MIN: u32 = 64; +pub const AUDIO_SAMPLE_MAX: u32 = 65472; + +pub const PSP_CTRL_SELECT: i32 = 0x000001; +pub const PSP_CTRL_START: i32 = 0x000008; +pub const PSP_CTRL_UP: i32 = 0x000010; +pub const PSP_CTRL_RIGHT: i32 = 0x000020; +pub const PSP_CTRL_DOWN: i32 = 0x000040; +pub const PSP_CTRL_LEFT: i32 = 0x000080; +pub const PSP_CTRL_LTRIGGER: i32 = 0x000100; +pub const PSP_CTRL_RTRIGGER: i32 = 0x000200; +pub const PSP_CTRL_TRIANGLE: i32 = 0x001000; +pub const PSP_CTRL_CIRCLE: i32 = 0x002000; +pub const PSP_CTRL_CROSS: i32 = 0x004000; +pub const PSP_CTRL_SQUARE: i32 = 0x008000; +pub const PSP_CTRL_HOME: i32 = 0x010000; +pub const PSP_CTRL_HOLD: i32 = 0x020000; +pub const PSP_CTRL_NOTE: i32 = 0x800000; +pub const PSP_CTRL_SCREEN: i32 = 0x400000; +pub const PSP_CTRL_VOLUP: i32 = 0x100000; +pub const PSP_CTRL_VOLDOWN: i32 = 0x200000; +pub const PSP_CTRL_WLAN_UP: i32 = 0x040000; +pub const PSP_CTRL_REMOTE: i32 = 0x080000; +pub const PSP_CTRL_DISC: i32 = 0x1000000; +pub const PSP_CTRL_MS: i32 = 0x2000000; + +pub const USB_CAM_PID: i32 = 0x282; +pub const USB_BUS_DRIVER_NAME: &str = "USBBusDriver"; +pub const USB_CAM_DRIVER_NAME: &str = "USBCamDriver"; +pub const USB_CAM_MIC_DRIVER_NAME: &str = "USBCamMicDriver"; +pub const USB_STOR_DRIVER_NAME: &str = "USBStor_Driver"; + +pub const ACTIVATED: i32 = 0x200; +pub const CONNECTED: i32 = 0x020; +pub const ESTABLISHED: i32 = 0x002; + +pub const USB_CAM_FLIP: i32 = 1; +pub const USB_CAM_MIRROR: i32 = 0x100; + +pub const THREAD_ATTR_VFPU: i32 = 0x00004000; +pub const THREAD_ATTR_USER: i32 = 0x80000000; +pub const THREAD_ATTR_USBWLAN: i32 = 0xa0000000; +pub const THREAD_ATTR_VSH: i32 = 0xc0000000; +pub const THREAD_ATTR_SCRATCH_SRAM: i32 = 0x00008000; +pub const THREAD_ATTR_NO_FILLSTACK: i32 = 0x00100000; +pub const THREAD_ATTR_CLEAR_STACK: i32 = 0x00200000; + +pub const EVENT_WAIT_MULTIPLE: i32 = 0x200; + +pub const EVENT_WAIT_AND: i32 = 0; +pub const EVENT_WAIT_OR: i32 = 1; +pub const EVENT_WAIT_CLEAR: i32 = 0x20; + +pub const POWER_INFO_POWER_SWITCH: i32 = 0x80000000; +pub const POWER_INFO_HOLD_SWITCH: i32 = 0x40000000; +pub const POWER_INFO_STANDBY: i32 = 0x00080000; +pub const POWER_INFO_RESUME_COMPLETE: i32 = 0x00040000; +pub const POWER_INFO_RESUMING: i32 = 0x00020000; +pub const POWER_INFO_SUSPENDING: i32 = 0x00010000; +pub const POWER_INFO_AC_POWER: i32 = 0x00001000; +pub const POWER_INFO_BATTERY_LOW: i32 = 0x00000100; +pub const POWER_INFO_BATTERY_EXIST: i32 = 0x00000080; +pub const POWER_INFO_BATTERY_POWER: i32 = 0x0000007; + +pub const FIO_S_IFLNK: i32 = 0x4000; +pub const FIO_S_IFDIR: i32 = 0x1000; +pub const FIO_S_IFREG: i32 = 0x2000; +pub const FIO_S_ISUID: i32 = 0x0800; +pub const FIO_S_ISGID: i32 = 0x0400; +pub const FIO_S_ISVTX: i32 = 0x0200; +pub const FIO_S_IRUSR: i32 = 0x0100; +pub const FIO_S_IWUSR: i32 = 0x0080; +pub const FIO_S_IXUSR: i32 = 0x0040; +pub const FIO_S_IRGRP: i32 = 0x0020; +pub const FIO_S_IWGRP: i32 = 0x0010; +pub const FIO_S_IXGRP: i32 = 0x0008; +pub const FIO_S_IROTH: i32 = 0x0004; +pub const FIO_S_IWOTH: i32 = 0x0002; +pub const FIO_S_IXOTH: i32 = 0x0001; + +pub const FIO_SO_IFLNK: i32 = 0x0008; +pub const FIO_SO_IFDIR: i32 = 0x0010; +pub const FIO_SO_IFREG: i32 = 0x0020; +pub const FIO_SO_IROTH: i32 = 0x0004; +pub const FIO_SO_IWOTH: i32 = 0x0002; +pub const FIO_SO_IXOTH: i32 = 0x0001; + +pub const PSP_O_RD_ONLY: i32 = 0x0001; +pub const PSP_O_WR_ONLY: i32 = 0x0002; +pub const PSP_O_RD_WR: i32 = 0x0003; +pub const PSP_O_NBLOCK: i32 = 0x0004; +pub const PSP_O_DIR: i32 = 0x0008; +pub const PSP_O_APPEND: i32 = 0x0100; +pub const PSP_O_CREAT: i32 = 0x0200; +pub const PSP_O_TRUNC: i32 = 0x0400; +pub const PSP_O_EXCL: i32 = 0x0800; +pub const PSP_O_NO_WAIT: i32 = 0x8000; + +pub const UMD_NOT_PRESENT: i32 = 0x01; +pub const UMD_PRESENT: i32 = 0x02; +pub const UMD_CHANGED: i32 = 0x04; +pub const UMD_INITING: i32 = 0x08; +pub const UMD_INITED: i32 = 0x10; +pub const UMD_READY: i32 = 0x20; + +pub const PLAY_PAUSE: i32 = 0x1; +pub const FORWARD: i32 = 0x4; +pub const BACK: i32 = 0x8; +pub const VOL_UP: i32 = 0x10; +pub const VOL_DOWN: i32 = 0x20; +pub const HOLD: i32 = 0x80; + +pub const GU_PI: f32 = 3.141593; + +pub const GU_TEXTURE_8BIT: i32 = 1; +pub const GU_TEXTURE_16BIT: i32 = 2; +pub const GU_TEXTURE_32BITF: i32 = 3; +pub const GU_COLOR_5650: i32 = 4 << 2; +pub const GU_COLOR_5551: i32 = 5 << 2; +pub const GU_COLOR_4444: i32 = 6 << 2; +pub const GU_COLOR_8888: i32 = 7 << 2; +pub const GU_NORMAL_8BIT: i32 = 1 << 5; +pub const GU_NORMAL_16BIT: i32 = 2 << 5; +pub const GU_NORMAL_32BITF: i32 = 3 << 5; +pub const GU_VERTEX_8BIT: i32 = 1 << 7; +pub const GU_VERTEX_16BIT: i32 = 2 << 7; +pub const GU_VERTEX_32BITF: i32 = 3 << 7; +pub const GU_WEIGHT_8BIT: i32 = 1 << 9; +pub const GU_WEIGHT_16BIT: i32 = 2 << 9; +pub const GU_WEIGHT_32BITF: i32 = 3 << 9; +pub const GU_INDEX_8BIT: i32 = 1 << 11; +pub const GU_INDEX_16BIT: i32 = 2 << 11; +pub const GU_WEIGHTS1: i32 = (((1 - 1) & 7) << 14) as i32; +pub const GU_WEIGHTS2: i32 = (((2 - 1) & 7) << 14) as i32; +pub const GU_WEIGHTS3: i32 = (((3 - 1) & 7) << 14) as i32; +pub const GU_WEIGHTS4: i32 = (((4 - 1) & 7) << 14) as i32; +pub const GU_WEIGHTS5: i32 = (((5 - 1) & 7) << 14) as i32; +pub const GU_WEIGHTS6: i32 = (((6 - 1) & 7) << 14) as i32; +pub const GU_WEIGHTS7: i32 = (((7 - 1) & 7) << 14) as i32; +pub const GU_WEIGHTS8: i32 = (((8 - 1) & 7) << 14) as i32; +pub const GU_VERTICES1: i32 = (((1 - 1) & 7) << 18) as i32; +pub const GU_VERTICES2: i32 = (((2 - 1) & 7) << 18) as i32; +pub const GU_VERTICES3: i32 = (((3 - 1) & 7) << 18) as i32; +pub const GU_VERTICES4: i32 = (((4 - 1) & 7) << 18) as i32; +pub const GU_VERTICES5: i32 = (((5 - 1) & 7) << 18) as i32; +pub const GU_VERTICES6: i32 = (((6 - 1) & 7) << 18) as i32; +pub const GU_VERTICES7: i32 = (((7 - 1) & 7) << 18) as i32; +pub const GU_VERTICES8: i32 = (((8 - 1) & 7) << 18) as i32; +pub const GU_TRANSFORM_2D: i32 = 1 << 23; +pub const GU_TRANSFORM_3D: i32 = 0; + +pub const GU_COLOR_BUFFER_BIT: i32 = 1; +pub const GU_STENCIL_BUFFER_BIT: i32 = 2; +pub const GU_DEPTH_BUFFER_BIT: i32 = 4; +pub const GU_FAST_CLEAR_BIT: i32 = 16; + +pub const GU_AMBIENT: i32 = 1; +pub const GU_DIFFUSE: i32 = 2; +pub const GU_SPECULAR: i32 = 4; +pub const GU_UNKNOWN_LIGHT_COMPONENT: i32 = 8; + +pub const SYSTEM_REGISTRY: [u8; 7] = *b"/system"; +pub const REG_KEYNAME_SIZE: u32 = 27; + +pub const UTILITY_MSGDIALOG_ERROR: i32 = 0; +pub const UTILITY_MSGDIALOG_TEXT: i32 = 1; +pub const UTILITY_MSGDIALOG_YES_NO_BUTTONS: i32 = 0x10; +pub const UTILITY_MSGDIALOG_DEFAULT_NO: i32 = 0x100; + +pub const UTILITY_HTMLVIEWER_OPEN_SCE_START_PAGE: i32 = 0x000001; +pub const UTILITY_HTMLVIEWER_DISABLE_STARTUP_LIMITS: i32 = 0x000002; +pub const UTILITY_HTMLVIEWER_DISABLE_EXIT_DIALOG: i32 = 0x000004; +pub const UTILITY_HTMLVIEWER_DISABLE_CURSOR: i32 = 0x000008; +pub const UTILITY_HTMLVIEWER_DISABLE_DOWNLOAD_COMPLETE_DIALOG: i32 = 0x000010; +pub const UTILITY_HTMLVIEWER_DISABLE_DOWNLOAD_START_DIALOG: i32 = 0x000020; +pub const UTILITY_HTMLVIEWER_DISABLE_DOWNLOAD_DESTINATION_DIALOG: i32 = 0x000040; +pub const UTILITY_HTMLVIEWER_LOCK_DOWNLOAD_DESTINATION_DIALOG: i32 = 0x000080; +pub const UTILITY_HTMLVIEWER_DISABLE_TAB_DISPLAY: i32 = 0x000100; +pub const UTILITY_HTMLVIEWER_ENABLE_ANALOG_HOLD: i32 = 0x000200; +pub const UTILITY_HTMLVIEWER_ENABLE_FLASH: i32 = 0x000400; +pub const UTILITY_HTMLVIEWER_DISABLE_LRTRIGGER: i32 = 0x000800; + +extern "C" { + pub fn sceAudioChReserve(channel: i32, sample_count: i32, format: AudioFormat) -> i32; + pub fn sceAudioChRelease(channel: i32) -> i32; + pub fn sceAudioOutput(channel: i32, vol: i32, buf: *mut c_void) -> i32; + pub fn sceAudioOutputBlocking(channel: i32, vol: i32, buf: *mut c_void) -> i32; + pub fn sceAudioOutputPanned( + channel: i32, + left_vol: i32, + right_vol: i32, + buf: *mut c_void, + ) -> i32; + pub fn sceAudioOutputPannedBlocking( + channel: i32, + left_vol: i32, + right_vol: i32, + buf: *mut c_void, + ) -> i32; + pub fn sceAudioGetChannelRestLen(channel: i32) -> i32; + pub fn sceAudioGetChannelRestLength(channel: i32) -> i32; + pub fn sceAudioSetChannelDataLen(channel: i32, sample_count: i32) -> i32; + pub fn sceAudioChangeChannelConfig(channel: i32, format: AudioFormat) -> i32; + pub fn sceAudioChangeChannelVolume(channel: i32, left_vol: i32, right_vol: i32) -> i32; + pub fn sceAudioOutput2Reserve(sample_count: i32) -> i32; + pub fn sceAudioOutput2Release() -> i32; + pub fn sceAudioOutput2ChangeLength(sample_count: i32) -> i32; + pub fn sceAudioOutput2OutputBlocking(vol: i32, buf: *mut c_void) -> i32; + pub fn sceAudioOutput2GetRestSample() -> i32; + pub fn sceAudioSRCChReserve( + sample_count: i32, + freq: AudioOutputFrequency, + channels: i32, + ) -> i32; + pub fn sceAudioSRCChRelease() -> i32; + pub fn sceAudioSRCOutputBlocking(vol: i32, buf: *mut c_void) -> i32; + pub fn sceAudioInputInit(unknown1: i32, gain: i32, unknown2: i32) -> i32; + pub fn sceAudioInputInitEx(params: *mut AudioInputParams) -> i32; + pub fn sceAudioInputBlocking(sample_count: i32, freq: AudioInputFrequency, buf: *mut c_void); + pub fn sceAudioInput(sample_count: i32, freq: AudioInputFrequency, buf: *mut c_void); + pub fn sceAudioGetInputLength() -> i32; + pub fn sceAudioWaitInputEnd() -> i32; + pub fn sceAudioPollInputEnd() -> i32; + + pub fn sceAtracGetAtracID(ui_codec_type: u32) -> i32; + pub fn sceAtracSetDataAndGetID(buf: *mut c_void, bufsize: usize) -> i32; + pub fn sceAtracDecodeData( + atrac_id: i32, + out_samples: *mut u16, + out_n: *mut i32, + out_end: *mut i32, + out_remain_frame: *mut i32, + ) -> i32; + pub fn sceAtracGetRemainFrame(atrac_id: i32, out_remain_frame: *mut i32) -> i32; + pub fn sceAtracGetStreamDataInfo( + atrac_id: i32, + write_pointer: *mut *mut u8, + available_bytes: *mut u32, + read_offset: *mut u32, + ) -> i32; + pub fn sceAtracAddStreamData(atrac_id: i32, bytes_to_add: u32) -> i32; + pub fn sceAtracGetBitrate(atrac_id: i32, out_bitrate: *mut i32) -> i32; + pub fn sceAtracSetLoopNum(atrac_id: i32, nloops: i32) -> i32; + pub fn sceAtracReleaseAtracID(atrac_id: i32) -> i32; + pub fn sceAtracGetNextSample(atrac_id: i32, out_n: *mut i32) -> i32; + pub fn sceAtracGetMaxSample(atrac_id: i32, out_max: *mut i32) -> i32; + pub fn sceAtracGetBufferInfoForReseting( + atrac_id: i32, + ui_sample: u32, + pbuffer_info: *mut Atrac3BufferInfo, + ) -> i32; + pub fn sceAtracGetChannel(atrac_id: i32, pui_channel: *mut u32) -> i32; + pub fn sceAtracGetInternalErrorInfo(atrac_id: i32, pi_result: *mut i32) -> i32; + pub fn sceAtracGetLoopStatus( + atrac_id: i32, + pi_loop_num: *mut i32, + pui_loop_status: *mut u32, + ) -> i32; + pub fn sceAtracGetNextDecodePosition(atrac_id: i32, pui_sample_position: *mut u32) -> i32; + pub fn sceAtracGetSecondBufferInfo( + atrac_id: i32, + pui_position: *mut u32, + pui_data_byte: *mut u32, + ) -> i32; + pub fn sceAtracGetSoundSample( + atrac_id: i32, + pi_end_sample: *mut i32, + pi_loop_start_sample: *mut i32, + pi_loop_end_sample: *mut i32, + ) -> i32; + pub fn sceAtracResetPlayPosition( + atrac_id: i32, + ui_sample: u32, + ui_write_byte_first_buf: u32, + ui_write_byte_second_buf: u32, + ) -> i32; + pub fn sceAtracSetData(atrac_id: i32, puc_buffer_addr: *mut u8, ui_buffer_byte: u32) -> i32; + pub fn sceAtracSetHalfwayBuffer( + atrac_id: i32, + puc_buffer_addr: *mut u8, + ui_read_byte: u32, + ui_buffer_byte: u32, + ) -> i32; + pub fn sceAtracSetHalfwayBufferAndGetID( + puc_buffer_addr: *mut u8, + ui_read_byte: u32, + ui_buffer_byte: u32, + ) -> i32; + pub fn sceAtracSetSecondBuffer( + atrac_id: i32, + puc_second_buffer_addr: *mut u8, + ui_second_buffer_byte: u32, + ) -> i32; + + pub fn sceCtrlSetSamplingCycle(cycle: i32) -> i32; + pub fn sceCtrlGetSamplingCycle(pcycle: *mut i32) -> i32; + pub fn sceCtrlSetSamplingMode(mode: CtrlMode) -> i32; + pub fn sceCtrlGetSamplingMode(pmode: *mut i32) -> i32; + pub fn sceCtrlPeekBufferPositive(pad_data: *mut SceCtrlData, count: i32) -> i32; + pub fn sceCtrlPeekBufferNegative(pad_data: *mut SceCtrlData, count: i32) -> i32; + pub fn sceCtrlReadBufferPositive(pad_data: *mut SceCtrlData, count: i32) -> i32; + pub fn sceCtrlReadBufferNegative(pad_data: *mut SceCtrlData, count: i32) -> i32; + pub fn sceCtrlPeekLatch(latch_data: *mut SceCtrlLatch) -> i32; + pub fn sceCtrlReadLatch(latch_data: *mut SceCtrlLatch) -> i32; + pub fn sceCtrlSetIdleCancelThreshold(idlereset: i32, idleback: i32) -> i32; + pub fn sceCtrlGetIdleCancelThreshold(idlereset: *mut i32, idleback: *mut i32) -> i32; + + pub fn sceDisplaySetMode(mode: DisplayMode, width: usize, height: usize) -> u32; + pub fn sceDisplayGetMode(pmode: *mut i32, pwidth: *mut i32, pheight: *mut i32) -> i32; + pub fn sceDisplaySetFrameBuf( + top_addr: *const u8, + buffer_width: usize, + pixel_format: DisplayPixelFormat, + sync: DisplaySetBufSync, + ) -> u32; + pub fn sceDisplayGetFrameBuf( + top_addr: *mut *mut c_void, + buffer_width: *mut usize, + pixel_format: *mut DisplayPixelFormat, + sync: DisplaySetBufSync, + ) -> i32; + pub fn sceDisplayGetVcount() -> u32; + pub fn sceDisplayWaitVblank() -> i32; + pub fn sceDisplayWaitVblankCB() -> i32; + pub fn sceDisplayWaitVblankStart() -> i32; + pub fn sceDisplayWaitVblankStartCB() -> i32; + pub fn sceDisplayGetAccumulatedHcount() -> i32; + pub fn sceDisplayGetCurrentHcount() -> i32; + pub fn sceDisplayGetFramePerSec() -> f32; + pub fn sceDisplayIsForeground() -> i32; + pub fn sceDisplayIsVblank() -> i32; + + pub fn sceGeEdramGetSize() -> u32; + pub fn sceGeEdramGetAddr() -> *mut u8; + pub fn sceGeEdramSetAddrTranslation(width: i32) -> i32; + pub fn sceGeGetCmd(cmd: i32) -> u32; + pub fn sceGeGetMtx(type_: GeMatrixType, matrix: *mut c_void) -> i32; + pub fn sceGeGetStack(stack_id: i32, stack: *mut GeStack) -> i32; + pub fn sceGeSaveContext(context: *mut GeContext) -> i32; + pub fn sceGeRestoreContext(context: *const GeContext) -> i32; + pub fn sceGeListEnQueue( + list: *const c_void, + stall: *mut c_void, + cbid: i32, + arg: *mut GeListArgs, + ) -> i32; + pub fn sceGeListEnQueueHead( + list: *const c_void, + stall: *mut c_void, + cbid: i32, + arg: *mut GeListArgs, + ) -> i32; + pub fn sceGeListDeQueue(qid: i32) -> i32; + pub fn sceGeListUpdateStallAddr(qid: i32, stall: *mut c_void) -> i32; + pub fn sceGeListSync(qid: i32, sync_type: i32) -> GeListState; + pub fn sceGeDrawSync(sync_type: i32) -> GeListState; + pub fn sceGeBreak(mode: i32, p_param: *mut GeBreakParam) -> i32; + pub fn sceGeContinue() -> i32; + pub fn sceGeSetCallback(cb: *mut GeCallbackData) -> i32; + pub fn sceGeUnsetCallback(cbid: i32) -> i32; + + pub fn sceKernelExitGame(); + pub fn sceKernelRegisterExitCallback(id: SceUid) -> i32; + pub fn sceKernelLoadExec(file: *const u8, param: *mut SceKernelLoadExecParam) -> i32; + + pub fn sceKernelAllocPartitionMemory( + partition: SceSysMemPartitionId, + name: *const u8, + type_: SceSysMemBlockTypes, + size: u32, + addr: *mut c_void, + ) -> SceUid; + pub fn sceKernelGetBlockHeadAddr(blockid: SceUid) -> *mut c_void; + pub fn sceKernelFreePartitionMemory(blockid: SceUid) -> i32; + pub fn sceKernelTotalFreeMemSize() -> usize; + pub fn sceKernelMaxFreeMemSize() -> usize; + pub fn sceKernelDevkitVersion() -> u32; + pub fn sceKernelSetCompiledSdkVersion(version: u32) -> i32; + pub fn sceKernelGetCompiledSdkVersion() -> u32; + + pub fn sceKernelLibcTime(t: *mut i32) -> i32; + pub fn sceKernelLibcClock() -> u32; + pub fn sceKernelLibcGettimeofday(tp: *mut timeval, tzp: *mut timezone) -> i32; + pub fn sceKernelDcacheWritebackAll(); + pub fn sceKernelDcacheWritebackInvalidateAll(); + pub fn sceKernelDcacheWritebackRange(p: *const c_void, size: u32); + pub fn sceKernelDcacheWritebackInvalidateRange(p: *const c_void, size: u32); + pub fn sceKernelDcacheInvalidateRange(p: *const c_void, size: u32); + pub fn sceKernelIcacheInvalidateAll(); + pub fn sceKernelIcacheInvalidateRange(p: *const c_void, size: u32); + pub fn sceKernelUtilsMt19937Init(ctx: *mut SceKernelUtilsMt19937Context, seed: u32) -> i32; + pub fn sceKernelUtilsMt19937UInt(ctx: *mut SceKernelUtilsMt19937Context) -> u32; + pub fn sceKernelUtilsMd5Digest(data: *mut u8, size: u32, digest: *mut u8) -> i32; + pub fn sceKernelUtilsMd5BlockInit(ctx: *mut SceKernelUtilsMd5Context) -> i32; + pub fn sceKernelUtilsMd5BlockUpdate( + ctx: *mut SceKernelUtilsMd5Context, + data: *mut u8, + size: u32, + ) -> i32; + pub fn sceKernelUtilsMd5BlockResult(ctx: *mut SceKernelUtilsMd5Context, digest: *mut u8) + -> i32; + pub fn sceKernelUtilsSha1Digest(data: *mut u8, size: u32, digest: *mut u8) -> i32; + pub fn sceKernelUtilsSha1BlockInit(ctx: *mut SceKernelUtilsSha1Context) -> i32; + pub fn sceKernelUtilsSha1BlockUpdate( + ctx: *mut SceKernelUtilsSha1Context, + data: *mut u8, + size: u32, + ) -> i32; + pub fn sceKernelUtilsSha1BlockResult( + ctx: *mut SceKernelUtilsSha1Context, + digest: *mut u8, + ) -> i32; + + pub fn sceKernelRegisterSubIntrHandler( + int_no: i32, + no: i32, + handler: *mut c_void, + arg: *mut c_void, + ) -> i32; + pub fn sceKernelReleaseSubIntrHandler(int_no: i32, no: i32) -> i32; + pub fn sceKernelEnableSubIntr(int_no: i32, no: i32) -> i32; + pub fn sceKernelDisableSubIntr(int_no: i32, no: i32) -> i32; + pub fn QueryIntrHandlerInfo( + intr_code: SceUid, + sub_intr_code: SceUid, + data: *mut IntrHandlerOptionParam, + ) -> i32; + + pub fn sceKernelCpuSuspendIntr() -> u32; + pub fn sceKernelCpuResumeIntr(flags: u32); + pub fn sceKernelCpuResumeIntrWithSync(flags: u32); + pub fn sceKernelIsCpuIntrSuspended(flags: u32) -> i32; + pub fn sceKernelIsCpuIntrEnable() -> i32; + + pub fn sceKernelLoadModule( + path: *const u8, + flags: i32, + option: *mut SceKernelLMOption, + ) -> SceUid; + pub fn sceKernelLoadModuleMs( + path: *const u8, + flags: i32, + option: *mut SceKernelLMOption, + ) -> SceUid; + pub fn sceKernelLoadModuleByID( + fid: SceUid, + flags: i32, + option: *mut SceKernelLMOption, + ) -> SceUid; + pub fn sceKernelLoadModuleBufferUsbWlan( + buf_size: usize, + buf: *mut c_void, + flags: i32, + option: *mut SceKernelLMOption, + ) -> SceUid; + pub fn sceKernelStartModule( + mod_id: SceUid, + arg_size: usize, + argp: *mut c_void, + status: *mut i32, + option: *mut SceKernelSMOption, + ) -> i32; + pub fn sceKernelStopModule( + mod_id: SceUid, + arg_size: usize, + argp: *mut c_void, + status: *mut i32, + option: *mut SceKernelSMOption, + ) -> i32; + pub fn sceKernelUnloadModule(mod_id: SceUid) -> i32; + pub fn sceKernelSelfStopUnloadModule(unknown: i32, arg_size: usize, argp: *mut c_void) -> i32; + pub fn sceKernelStopUnloadSelfModule( + arg_size: usize, + argp: *mut c_void, + status: *mut i32, + option: *mut SceKernelSMOption, + ) -> i32; + pub fn sceKernelQueryModuleInfo(mod_id: SceUid, info: *mut SceKernelModuleInfo) -> i32; + pub fn sceKernelGetModuleIdList( + read_buf: *mut SceUid, + read_buf_size: i32, + id_count: *mut i32, + ) -> i32; + + pub fn sceKernelVolatileMemLock(unk: i32, ptr: *mut *mut c_void, size: *mut i32) -> i32; + pub fn sceKernelVolatileMemTryLock(unk: i32, ptr: *mut *mut c_void, size: *mut i32) -> i32; + pub fn sceKernelVolatileMemUnlock(unk: i32) -> i32; + + pub fn sceKernelStdin() -> SceUid; + pub fn sceKernelStdout() -> SceUid; + pub fn sceKernelStderr() -> SceUid; + + pub fn sceKernelGetThreadmanIdType(uid: SceUid) -> SceKernelIdListType; + pub fn sceKernelCreateThread( + name: *const u8, + entry: SceKernelThreadEntry, + init_priority: i32, + stack_size: i32, + attr: i32, + option: *mut SceKernelThreadOptParam, + ) -> SceUid; + pub fn sceKernelDeleteThread(thid: SceUid) -> i32; + pub fn sceKernelStartThread(id: SceUid, arg_len: usize, arg_p: *mut c_void) -> i32; + pub fn sceKernelExitThread(status: i32) -> i32; + pub fn sceKernelExitDeleteThread(status: i32) -> i32; + pub fn sceKernelTerminateThread(thid: SceUid) -> i32; + pub fn sceKernelTerminateDeleteThread(thid: SceUid) -> i32; + pub fn sceKernelSuspendDispatchThread() -> i32; + pub fn sceKernelResumeDispatchThread(state: i32) -> i32; + pub fn sceKernelSleepThread() -> i32; + pub fn sceKernelSleepThreadCB() -> i32; + pub fn sceKernelWakeupThread(thid: SceUid) -> i32; + pub fn sceKernelCancelWakeupThread(thid: SceUid) -> i32; + pub fn sceKernelSuspendThread(thid: SceUid) -> i32; + pub fn sceKernelResumeThread(thid: SceUid) -> i32; + pub fn sceKernelWaitThreadEnd(thid: SceUid, timeout: *mut u32) -> i32; + pub fn sceKernelWaitThreadEndCB(thid: SceUid, timeout: *mut u32) -> i32; + pub fn sceKernelDelayThread(delay: u32) -> i32; + pub fn sceKernelDelayThreadCB(delay: u32) -> i32; + pub fn sceKernelDelaySysClockThread(delay: *mut SceKernelSysClock) -> i32; + pub fn sceKernelDelaySysClockThreadCB(delay: *mut SceKernelSysClock) -> i32; + pub fn sceKernelChangeCurrentThreadAttr(unknown: i32, attr: i32) -> i32; + pub fn sceKernelChangeThreadPriority(thid: SceUid, priority: i32) -> i32; + pub fn sceKernelRotateThreadReadyQueue(priority: i32) -> i32; + pub fn sceKernelReleaseWaitThread(thid: SceUid) -> i32; + pub fn sceKernelGetThreadId() -> i32; + pub fn sceKernelGetThreadCurrentPriority() -> i32; + pub fn sceKernelGetThreadExitStatus(thid: SceUid) -> i32; + pub fn sceKernelCheckThreadStack() -> i32; + pub fn sceKernelGetThreadStackFreeSize(thid: SceUid) -> i32; + pub fn sceKernelReferThreadStatus(thid: SceUid, info: *mut SceKernelThreadInfo) -> i32; + pub fn sceKernelReferThreadRunStatus( + thid: SceUid, + status: *mut SceKernelThreadRunStatus, + ) -> i32; + pub fn sceKernelCreateSema( + name: *const u8, + attr: u32, + init_val: i32, + max_val: i32, + option: *mut SceKernelSemaOptParam, + ) -> SceUid; + pub fn sceKernelDeleteSema(sema_id: SceUid) -> i32; + pub fn sceKernelSignalSema(sema_id: SceUid, signal: i32) -> i32; + pub fn sceKernelWaitSema(sema_id: SceUid, signal: i32, timeout: *mut u32) -> i32; + pub fn sceKernelWaitSemaCB(sema_id: SceUid, signal: i32, timeout: *mut u32) -> i32; + pub fn sceKernelPollSema(sema_id: SceUid, signal: i32) -> i32; + pub fn sceKernelReferSemaStatus(sema_id: SceUid, info: *mut SceKernelSemaInfo) -> i32; + pub fn sceKernelCreateEventFlag( + name: *const u8, + attr: i32, + bits: i32, + opt: *mut SceKernelEventFlagOptParam, + ) -> SceUid; + pub fn sceKernelSetEventFlag(ev_id: SceUid, bits: u32) -> i32; + pub fn sceKernelClearEventFlag(ev_id: SceUid, bits: u32) -> i32; + pub fn sceKernelPollEventFlag(ev_id: SceUid, bits: u32, wait: i32, out_bits: *mut u32) -> i32; + pub fn sceKernelWaitEventFlag( + ev_id: SceUid, + bits: u32, + wait: i32, + out_bits: *mut u32, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelWaitEventFlagCB( + ev_id: SceUid, + bits: u32, + wait: i32, + out_bits: *mut u32, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelDeleteEventFlag(ev_id: SceUid) -> i32; + pub fn sceKernelReferEventFlagStatus(event: SceUid, status: *mut SceKernelEventFlagInfo) + -> i32; + pub fn sceKernelCreateMbx( + name: *const u8, + attr: u32, + option: *mut SceKernelMbxOptParam, + ) -> SceUid; + pub fn sceKernelDeleteMbx(mbx_id: SceUid) -> i32; + pub fn sceKernelSendMbx(mbx_id: SceUid, message: *mut c_void) -> i32; + pub fn sceKernelReceiveMbx(mbx_id: SceUid, message: *mut *mut c_void, timeout: *mut u32) + -> i32; + pub fn sceKernelReceiveMbxCB( + mbx_id: SceUid, + message: *mut *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelPollMbx(mbx_id: SceUid, pmessage: *mut *mut c_void) -> i32; + pub fn sceKernelCancelReceiveMbx(mbx_id: SceUid, num: *mut i32) -> i32; + pub fn sceKernelReferMbxStatus(mbx_id: SceUid, info: *mut SceKernelMbxInfo) -> i32; + pub fn sceKernelSetAlarm( + clock: u32, + handler: SceKernelAlarmHandler, + common: *mut c_void, + ) -> SceUid; + pub fn sceKernelSetSysClockAlarm( + clock: *mut SceKernelSysClock, + handler: *mut SceKernelAlarmHandler, + common: *mut c_void, + ) -> SceUid; + pub fn sceKernelCancelAlarm(alarm_id: SceUid) -> i32; + pub fn sceKernelReferAlarmStatus(alarm_id: SceUid, info: *mut SceKernelAlarmInfo) -> i32; + pub fn sceKernelCreateCallback( + name: *const u8, + func: SceKernelCallbackFunction, + arg: *mut c_void, + ) -> SceUid; + pub fn sceKernelReferCallbackStatus(cb: SceUid, status: *mut SceKernelCallbackInfo) -> i32; + pub fn sceKernelDeleteCallback(cb: SceUid) -> i32; + pub fn sceKernelNotifyCallback(cb: SceUid, arg2: i32) -> i32; + pub fn sceKernelCancelCallback(cb: SceUid) -> i32; + pub fn sceKernelGetCallbackCount(cb: SceUid) -> i32; + pub fn sceKernelCheckCallback() -> i32; + pub fn sceKernelGetThreadmanIdList( + type_: SceKernelIdListType, + read_buf: *mut SceUid, + read_buf_size: i32, + id_count: *mut i32, + ) -> i32; + pub fn sceKernelReferSystemStatus(status: *mut SceKernelSystemStatus) -> i32; + pub fn sceKernelCreateMsgPipe( + name: *const u8, + part: i32, + attr: i32, + unk1: *mut c_void, + opt: *mut c_void, + ) -> SceUid; + pub fn sceKernelDeleteMsgPipe(uid: SceUid) -> i32; + pub fn sceKernelSendMsgPipe( + uid: SceUid, + message: *mut c_void, + size: u32, + unk1: i32, + unk2: *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelSendMsgPipeCB( + uid: SceUid, + message: *mut c_void, + size: u32, + unk1: i32, + unk2: *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelTrySendMsgPipe( + uid: SceUid, + message: *mut c_void, + size: u32, + unk1: i32, + unk2: *mut c_void, + ) -> i32; + pub fn sceKernelReceiveMsgPipe( + uid: SceUid, + message: *mut c_void, + size: u32, + unk1: i32, + unk2: *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelReceiveMsgPipeCB( + uid: SceUid, + message: *mut c_void, + size: u32, + unk1: i32, + unk2: *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelTryReceiveMsgPipe( + uid: SceUid, + message: *mut c_void, + size: u32, + unk1: i32, + unk2: *mut c_void, + ) -> i32; + pub fn sceKernelCancelMsgPipe(uid: SceUid, send: *mut i32, recv: *mut i32) -> i32; + pub fn sceKernelReferMsgPipeStatus(uid: SceUid, info: *mut SceKernelMppInfo) -> i32; + pub fn sceKernelCreateVpl( + name: *const u8, + part: i32, + attr: i32, + size: u32, + opt: *mut SceKernelVplOptParam, + ) -> SceUid; + pub fn sceKernelDeleteVpl(uid: SceUid) -> i32; + pub fn sceKernelAllocateVpl( + uid: SceUid, + size: u32, + data: *mut *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelAllocateVplCB( + uid: SceUid, + size: u32, + data: *mut *mut c_void, + timeout: *mut u32, + ) -> i32; + pub fn sceKernelTryAllocateVpl(uid: SceUid, size: u32, data: *mut *mut c_void) -> i32; + pub fn sceKernelFreeVpl(uid: SceUid, data: *mut c_void) -> i32; + pub fn sceKernelCancelVpl(uid: SceUid, num: *mut i32) -> i32; + pub fn sceKernelReferVplStatus(uid: SceUid, info: *mut SceKernelVplInfo) -> i32; + pub fn sceKernelCreateFpl( + name: *const u8, + part: i32, + attr: i32, + size: u32, + blocks: u32, + opt: *mut SceKernelFplOptParam, + ) -> i32; + pub fn sceKernelDeleteFpl(uid: SceUid) -> i32; + pub fn sceKernelAllocateFpl(uid: SceUid, data: *mut *mut c_void, timeout: *mut u32) -> i32; + pub fn sceKernelAllocateFplCB(uid: SceUid, data: *mut *mut c_void, timeout: *mut u32) -> i32; + pub fn sceKernelTryAllocateFpl(uid: SceUid, data: *mut *mut c_void) -> i32; + pub fn sceKernelFreeFpl(uid: SceUid, data: *mut c_void) -> i32; + pub fn sceKernelCancelFpl(uid: SceUid, pnum: *mut i32) -> i32; + pub fn sceKernelReferFplStatus(uid: SceUid, info: *mut SceKernelFplInfo) -> i32; + pub fn sceKernelUSec2SysClock(usec: u32, clock: *mut SceKernelSysClock) -> i32; + pub fn sceKernelUSec2SysClockWide(usec: u32) -> i64; + pub fn sceKernelSysClock2USec( + clock: *mut SceKernelSysClock, + low: *mut u32, + high: *mut u32, + ) -> i32; + pub fn sceKernelSysClock2USecWide(clock: i64, low: *mut u32, high: *mut u32) -> i32; + pub fn sceKernelGetSystemTime(time: *mut SceKernelSysClock) -> i32; + pub fn sceKernelGetSystemTimeWide() -> i64; + pub fn sceKernelGetSystemTimeLow() -> u32; + pub fn sceKernelCreateVTimer(name: *const u8, opt: *mut SceKernelVTimerOptParam) -> SceUid; + pub fn sceKernelDeleteVTimer(uid: SceUid) -> i32; + pub fn sceKernelGetVTimerBase(uid: SceUid, base: *mut SceKernelSysClock) -> i32; + pub fn sceKernelGetVTimerBaseWide(uid: SceUid) -> i64; + pub fn sceKernelGetVTimerTime(uid: SceUid, time: *mut SceKernelSysClock) -> i32; + pub fn sceKernelGetVTimerTimeWide(uid: SceUid) -> i64; + pub fn sceKernelSetVTimerTime(uid: SceUid, time: *mut SceKernelSysClock) -> i32; + pub fn sceKernelSetVTimerTimeWide(uid: SceUid, time: i64) -> i64; + pub fn sceKernelStartVTimer(uid: SceUid) -> i32; + pub fn sceKernelStopVTimer(uid: SceUid) -> i32; + pub fn sceKernelSetVTimerHandler( + uid: SceUid, + time: *mut SceKernelSysClock, + handler: SceKernelVTimerHandler, + common: *mut c_void, + ) -> i32; + pub fn sceKernelSetVTimerHandlerWide( + uid: SceUid, + time: i64, + handler: SceKernelVTimerHandlerWide, + common: *mut c_void, + ) -> i32; + pub fn sceKernelCancelVTimerHandler(uid: SceUid) -> i32; + pub fn sceKernelReferVTimerStatus(uid: SceUid, info: *mut SceKernelVTimerInfo) -> i32; + pub fn sceKernelRegisterThreadEventHandler( + name: *const u8, + thread_id: SceUid, + mask: i32, + handler: SceKernelThreadEventHandler, + common: *mut c_void, + ) -> SceUid; + pub fn sceKernelReleaseThreadEventHandler(uid: SceUid) -> i32; + pub fn sceKernelReferThreadEventHandlerStatus( + uid: SceUid, + info: *mut SceKernelThreadEventHandlerInfo, + ) -> i32; + pub fn sceKernelReferThreadProfiler() -> *mut DebugProfilerRegs; + pub fn sceKernelReferGlobalProfiler() -> *mut DebugProfilerRegs; + + pub fn sceUsbStart(driver_name: *const u8, size: i32, args: *mut c_void) -> i32; + pub fn sceUsbStop(driver_name: *const u8, size: i32, args: *mut c_void) -> i32; + pub fn sceUsbActivate(pid: u32) -> i32; + pub fn sceUsbDeactivate(pid: u32) -> i32; + pub fn sceUsbGetState() -> i32; + pub fn sceUsbGetDrvState(driver_name: *const u8) -> i32; +} + +extern "C" { + pub fn sceUsbCamSetupStill(param: *mut UsbCamSetupStillParam) -> i32; + pub fn sceUsbCamSetupStillEx(param: *mut UsbCamSetupStillExParam) -> i32; + pub fn sceUsbCamStillInputBlocking(buf: *mut u8, size: usize) -> i32; + pub fn sceUsbCamStillInput(buf: *mut u8, size: usize) -> i32; + pub fn sceUsbCamStillWaitInputEnd() -> i32; + pub fn sceUsbCamStillPollInputEnd() -> i32; + pub fn sceUsbCamStillCancelInput() -> i32; + pub fn sceUsbCamStillGetInputLength() -> i32; + pub fn sceUsbCamSetupVideo( + param: *mut UsbCamSetupVideoParam, + work_area: *mut c_void, + work_area_size: i32, + ) -> i32; + pub fn sceUsbCamSetupVideoEx( + param: *mut UsbCamSetupVideoExParam, + work_area: *mut c_void, + work_area_size: i32, + ) -> i32; + pub fn sceUsbCamStartVideo() -> i32; + pub fn sceUsbCamStopVideo() -> i32; + pub fn sceUsbCamReadVideoFrameBlocking(buf: *mut u8, size: usize) -> i32; + pub fn sceUsbCamReadVideoFrame(buf: *mut u8, size: usize) -> i32; + pub fn sceUsbCamWaitReadVideoFrameEnd() -> i32; + pub fn sceUsbCamPollReadVideoFrameEnd() -> i32; + pub fn sceUsbCamGetReadVideoFrameSize() -> i32; + pub fn sceUsbCamSetSaturation(saturation: i32) -> i32; + pub fn sceUsbCamSetBrightness(brightness: i32) -> i32; + pub fn sceUsbCamSetContrast(contrast: i32) -> i32; + pub fn sceUsbCamSetSharpness(sharpness: i32) -> i32; + pub fn sceUsbCamSetImageEffectMode(effect_mode: UsbCamEffectMode) -> i32; + pub fn sceUsbCamSetEvLevel(exposure_level: UsbCamEvLevel) -> i32; + pub fn sceUsbCamSetReverseMode(reverse_flags: i32) -> i32; + pub fn sceUsbCamSetZoom(zoom: i32) -> i32; + pub fn sceUsbCamGetSaturation(saturation: *mut i32) -> i32; + pub fn sceUsbCamGetBrightness(brightness: *mut i32) -> i32; + pub fn sceUsbCamGetContrast(contrast: *mut i32) -> i32; + pub fn sceUsbCamGetSharpness(sharpness: *mut i32) -> i32; + pub fn sceUsbCamGetImageEffectMode(effect_mode: *mut UsbCamEffectMode) -> i32; + pub fn sceUsbCamGetEvLevel(exposure_level: *mut UsbCamEvLevel) -> i32; + pub fn sceUsbCamGetReverseMode(reverse_flags: *mut i32) -> i32; + pub fn sceUsbCamGetZoom(zoom: *mut i32) -> i32; + pub fn sceUsbCamAutoImageReverseSW(on: i32) -> i32; + pub fn sceUsbCamGetAutoImageReverseState() -> i32; + pub fn sceUsbCamGetLensDirection() -> i32; + + pub fn sceUsbstorBootRegisterNotify(event_flag: SceUid) -> i32; + pub fn sceUsbstorBootUnregisterNotify(event_flag: u32) -> i32; + pub fn sceUsbstorBootSetCapacity(size: u32) -> i32; + + pub fn scePowerRegisterCallback(slot: i32, cbid: SceUid) -> i32; + pub fn scePowerUnregisterCallback(slot: i32) -> i32; + pub fn scePowerIsPowerOnline() -> i32; + pub fn scePowerIsBatteryExist() -> i32; + pub fn scePowerIsBatteryCharging() -> i32; + pub fn scePowerGetBatteryChargingStatus() -> i32; + pub fn scePowerIsLowBattery() -> i32; + pub fn scePowerGetBatteryLifePercent() -> i32; + pub fn scePowerGetBatteryLifeTime() -> i32; + pub fn scePowerGetBatteryTemp() -> i32; + pub fn scePowerGetBatteryElec() -> i32; + pub fn scePowerGetBatteryVolt() -> i32; + pub fn scePowerSetCpuClockFrequency(cpufreq: i32) -> i32; + pub fn scePowerSetBusClockFrequency(busfreq: i32) -> i32; + pub fn scePowerGetCpuClockFrequency() -> i32; + pub fn scePowerGetCpuClockFrequencyInt() -> i32; + pub fn scePowerGetCpuClockFrequencyFloat() -> f32; + pub fn scePowerGetBusClockFrequency() -> i32; + pub fn scePowerGetBusClockFrequencyInt() -> i32; + pub fn scePowerGetBusClockFrequencyFloat() -> f32; + pub fn scePowerSetClockFrequency(pllfreq: i32, cpufreq: i32, busfreq: i32) -> i32; + pub fn scePowerLock(unknown: i32) -> i32; + pub fn scePowerUnlock(unknown: i32) -> i32; + pub fn scePowerTick(t: PowerTick) -> i32; + pub fn scePowerGetIdleTimer() -> i32; + pub fn scePowerIdleTimerEnable(unknown: i32) -> i32; + pub fn scePowerIdleTimerDisable(unknown: i32) -> i32; + pub fn scePowerRequestStandby() -> i32; + pub fn scePowerRequestSuspend() -> i32; + + pub fn sceWlanDevIsPowerOn() -> i32; + pub fn sceWlanGetSwitchState() -> i32; + pub fn sceWlanGetEtherAddr(ether_addr: *mut u8) -> i32; + + pub fn sceWlanDevAttach() -> i32; + pub fn sceWlanDevDetach() -> i32; + + pub fn sceRtcGetTickResolution() -> u32; + pub fn sceRtcGetCurrentTick(tick: *mut u64) -> i32; + pub fn sceRtcGetCurrentClock(tm: *mut ScePspDateTime, tz: i32) -> i32; + pub fn sceRtcGetCurrentClockLocalTime(tm: *mut ScePspDateTime) -> i32; + pub fn sceRtcConvertUtcToLocalTime(tick_utc: *const u64, tick_local: *mut u64) -> i32; + pub fn sceRtcConvertLocalTimeToUTC(tick_local: *const u64, tick_utc: *mut u64) -> i32; + pub fn sceRtcIsLeapYear(year: i32) -> i32; + pub fn sceRtcGetDaysInMonth(year: i32, month: i32) -> i32; + pub fn sceRtcGetDayOfWeek(year: i32, month: i32, day: i32) -> i32; + pub fn sceRtcCheckValid(date: *const ScePspDateTime) -> i32; + pub fn sceRtcSetTick(date: *mut ScePspDateTime, tick: *const u64) -> i32; + pub fn sceRtcGetTick(date: *const ScePspDateTime, tick: *mut u64) -> i32; + pub fn sceRtcCompareTick(tick1: *const u64, tick2: *const u64) -> i32; + pub fn sceRtcTickAddTicks(dest_tick: *mut u64, src_tick: *const u64, num_ticks: u64) -> i32; + pub fn sceRtcTickAddMicroseconds(dest_tick: *mut u64, src_tick: *const u64, num_ms: u64) + -> i32; + pub fn sceRtcTickAddSeconds(dest_tick: *mut u64, src_tick: *const u64, num_seconds: u64) + -> i32; + pub fn sceRtcTickAddMinutes(dest_tick: *mut u64, src_tick: *const u64, num_minutes: u64) + -> i32; + pub fn sceRtcTickAddHours(dest_tick: *mut u64, src_tick: *const u64, num_hours: u64) -> i32; + pub fn sceRtcTickAddDays(dest_tick: *mut u64, src_tick: *const u64, num_days: u64) -> i32; + pub fn sceRtcTickAddWeeks(dest_tick: *mut u64, src_tick: *const u64, num_weeks: u64) -> i32; + pub fn sceRtcTickAddMonths(dest_tick: *mut u64, src_tick: *const u64, num_months: u64) -> i32; + pub fn sceRtcTickAddYears(dest_tick: *mut u64, src_tick: *const u64, num_years: u64) -> i32; + pub fn sceRtcSetTime_t(date: *mut ScePspDateTime, time: u32) -> i32; + pub fn sceRtcGetTime_t(date: *const ScePspDateTime, time: *mut u32) -> i32; + pub fn sceRtcSetTime64_t(date: *mut ScePspDateTime, time: u64) -> i32; + pub fn sceRtcGetTime64_t(date: *const ScePspDateTime, time: *mut u64) -> i32; + pub fn sceRtcSetDosTime(date: *mut ScePspDateTime, dos_time: u32) -> i32; + pub fn sceRtcGetDosTime(date: *mut ScePspDateTime, dos_time: u32) -> i32; + pub fn sceRtcSetWin32FileTime(date: *mut ScePspDateTime, time: *mut u64) -> i32; + pub fn sceRtcGetWin32FileTime(date: *mut ScePspDateTime, time: *mut u64) -> i32; + pub fn sceRtcParseDateTime(dest_tick: *mut u64, date_string: *const u8) -> i32; + pub fn sceRtcFormatRFC3339( + psz_date_time: *mut c_char, + p_utc: *const u64, + time_zone_minutes: i32, + ) -> i32; + pub fn sceRtcFormatRFC3339LocalTime(psz_date_time: *mut c_char, p_utc: *const u64) -> i32; + pub fn sceRtcParseRFC3339(p_utc: *mut u64, psz_date_time: *const u8) -> i32; + pub fn sceRtcFormatRFC2822( + psz_date_time: *mut c_char, + p_utc: *const u64, + time_zone_minutes: i32, + ) -> i32; + pub fn sceRtcFormatRFC2822LocalTime(psz_date_time: *mut c_char, p_utc: *const u64) -> i32; + + pub fn sceIoOpen(file: *const u8, flags: i32, permissions: IoPermissions) -> SceUid; + pub fn sceIoOpenAsync(file: *const u8, flags: i32, permissions: IoPermissions) -> SceUid; + pub fn sceIoClose(fd: SceUid) -> i32; + pub fn sceIoCloseAsync(fd: SceUid) -> i32; + pub fn sceIoRead(fd: SceUid, data: *mut c_void, size: u32) -> i32; + pub fn sceIoReadAsync(fd: SceUid, data: *mut c_void, size: u32) -> i32; + pub fn sceIoWrite(fd: SceUid, data: *const c_void, size: usize) -> i32; + pub fn sceIoWriteAsync(fd: SceUid, data: *const c_void, size: u32) -> i32; + pub fn sceIoLseek(fd: SceUid, offset: i64, whence: IoWhence) -> i64; + pub fn sceIoLseekAsync(fd: SceUid, offset: i64, whence: IoWhence) -> i32; + pub fn sceIoLseek32(fd: SceUid, offset: i32, whence: IoWhence) -> i32; + pub fn sceIoLseek32Async(fd: SceUid, offset: i32, whence: IoWhence) -> i32; + pub fn sceIoRemove(file: *const u8) -> i32; + pub fn sceIoMkdir(dir: *const u8, mode: IoPermissions) -> i32; + pub fn sceIoRmdir(path: *const u8) -> i32; + pub fn sceIoChdir(path: *const u8) -> i32; + pub fn sceIoRename(oldname: *const u8, newname: *const u8) -> i32; + pub fn sceIoDopen(dirname: *const u8) -> SceUid; + pub fn sceIoDread(fd: SceUid, dir: *mut SceIoDirent) -> i32; + pub fn sceIoDclose(fd: SceUid) -> i32; + pub fn sceIoDevctl( + dev: *const u8, + cmd: u32, + indata: *mut c_void, + inlen: i32, + outdata: *mut c_void, + outlen: i32, + ) -> i32; + pub fn sceIoAssign( + dev1: *const u8, + dev2: *const u8, + dev3: *const u8, + mode: IoAssignPerms, + unk1: *mut c_void, + unk2: i32, + ) -> i32; + pub fn sceIoUnassign(dev: *const u8) -> i32; + pub fn sceIoGetstat(file: *const u8, stat: *mut SceIoStat) -> i32; + pub fn sceIoChstat(file: *const u8, stat: *mut SceIoStat, bits: i32) -> i32; + pub fn sceIoIoctl( + fd: SceUid, + cmd: u32, + indata: *mut c_void, + inlen: i32, + outdata: *mut c_void, + outlen: i32, + ) -> i32; + pub fn sceIoIoctlAsync( + fd: SceUid, + cmd: u32, + indata: *mut c_void, + inlen: i32, + outdata: *mut c_void, + outlen: i32, + ) -> i32; + pub fn sceIoSync(device: *const u8, unk: u32) -> i32; + pub fn sceIoWaitAsync(fd: SceUid, res: *mut i64) -> i32; + pub fn sceIoWaitAsyncCB(fd: SceUid, res: *mut i64) -> i32; + pub fn sceIoPollAsync(fd: SceUid, res: *mut i64) -> i32; + pub fn sceIoGetAsyncStat(fd: SceUid, poll: i32, res: *mut i64) -> i32; + pub fn sceIoCancel(fd: SceUid) -> i32; + pub fn sceIoGetDevType(fd: SceUid) -> i32; + pub fn sceIoChangeAsyncPriority(fd: SceUid, pri: i32) -> i32; + pub fn sceIoSetAsyncCallback(fd: SceUid, cb: SceUid, argp: *mut c_void) -> i32; + + pub fn sceJpegInitMJpeg() -> i32; + pub fn sceJpegFinishMJpeg() -> i32; + pub fn sceJpegCreateMJpeg(width: i32, height: i32) -> i32; + pub fn sceJpegDeleteMJpeg() -> i32; + pub fn sceJpegDecodeMJpeg(jpeg_buf: *mut u8, size: usize, rgba: *mut c_void, unk: u32) -> i32; + + pub fn sceUmdCheckMedium() -> i32; + pub fn sceUmdGetDiscInfo(info: *mut UmdInfo) -> i32; + pub fn sceUmdActivate(unit: i32, drive: *const u8) -> i32; + pub fn sceUmdDeactivate(unit: i32, drive: *const u8) -> i32; + pub fn sceUmdWaitDriveStat(state: i32) -> i32; + pub fn sceUmdWaitDriveStatWithTimer(state: i32, timeout: u32) -> i32; + pub fn sceUmdWaitDriveStatCB(state: i32, timeout: u32) -> i32; + pub fn sceUmdCancelWaitDriveStat() -> i32; + pub fn sceUmdGetDriveStat() -> i32; + pub fn sceUmdGetErrorStat() -> i32; + pub fn sceUmdRegisterUMDCallBack(cbid: i32) -> i32; + pub fn sceUmdUnRegisterUMDCallBack(cbid: i32) -> i32; + pub fn sceUmdReplacePermit() -> i32; + pub fn sceUmdReplaceProhibit() -> i32; + + pub fn sceMpegInit() -> i32; + pub fn sceMpegFinish(); + pub fn sceMpegRingbufferQueryMemSize(packets: i32) -> i32; + pub fn sceMpegRingbufferConstruct( + ringbuffer: *mut SceMpegRingbuffer, + packets: i32, + data: *mut c_void, + size: i32, + callback: SceMpegRingbufferCb, + cb_param: *mut c_void, + ) -> i32; + pub fn sceMpegRingbufferDestruct(ringbuffer: *mut SceMpegRingbuffer); + pub fn sceMpegRingbufferAvailableSize(ringbuffer: *mut SceMpegRingbuffer) -> i32; + pub fn sceMpegRingbufferPut( + ringbuffer: *mut SceMpegRingbuffer, + num_packets: i32, + available: i32, + ) -> i32; + pub fn sceMpegQueryMemSize(unk: i32) -> i32; + pub fn sceMpegCreate( + handle: SceMpeg, + data: *mut c_void, + size: i32, + ringbuffer: *mut SceMpegRingbuffer, + frame_width: i32, + unk1: i32, + unk2: i32, + ) -> i32; + pub fn sceMpegDelete(handle: SceMpeg); + pub fn sceMpegQueryStreamOffset(handle: SceMpeg, buffer: *mut c_void, offset: *mut i32) -> i32; + pub fn sceMpegQueryStreamSize(buffer: *mut c_void, size: *mut i32) -> i32; + pub fn sceMpegRegistStream(handle: SceMpeg, stream_id: i32, unk: i32) -> SceMpegStream; + pub fn sceMpegUnRegistStream(handle: SceMpeg, stream: SceMpegStream); + pub fn sceMpegFlushAllStream(handle: SceMpeg) -> i32; + pub fn sceMpegMallocAvcEsBuf(handle: SceMpeg) -> *mut c_void; + pub fn sceMpegFreeAvcEsBuf(handle: SceMpeg, buf: *mut c_void); + pub fn sceMpegQueryAtracEsSize(handle: SceMpeg, es_size: *mut i32, out_size: *mut i32) -> i32; + pub fn sceMpegInitAu(handle: SceMpeg, es_buffer: *mut c_void, au: *mut SceMpegAu) -> i32; + pub fn sceMpegGetAvcAu( + handle: SceMpeg, + stream: SceMpegStream, + au: *mut SceMpegAu, + unk: *mut i32, + ) -> i32; + pub fn sceMpegAvcDecodeMode(handle: SceMpeg, mode: *mut SceMpegAvcMode) -> i32; + pub fn sceMpegAvcDecode( + handle: SceMpeg, + au: *mut SceMpegAu, + iframe_width: i32, + buffer: *mut c_void, + init: *mut i32, + ) -> i32; + pub fn sceMpegAvcDecodeStop( + handle: SceMpeg, + frame_width: i32, + buffer: *mut c_void, + status: *mut i32, + ) -> i32; + pub fn sceMpegGetAtracAu( + handle: SceMpeg, + stream: SceMpegStream, + au: *mut SceMpegAu, + unk: *mut c_void, + ) -> i32; + pub fn sceMpegAtracDecode( + handle: SceMpeg, + au: *mut SceMpegAu, + buffer: *mut c_void, + init: i32, + ) -> i32; + + pub fn sceMpegBaseYCrCbCopyVme(yuv_buffer: *mut c_void, buffer: *mut i32, type_: i32) -> i32; + pub fn sceMpegBaseCscInit(width: i32) -> i32; + pub fn sceMpegBaseCscVme( + rgb_buffer: *mut c_void, + rgb_buffer2: *mut c_void, + width: i32, + y_cr_cb_buffer: *mut SceMpegYCrCbBuffer, + ) -> i32; + pub fn sceMpegbase_BEA18F91(lli: *mut SceMpegLLI) -> i32; + + pub fn sceHprmPeekCurrentKey(key: *mut i32) -> i32; + pub fn sceHprmPeekLatch(latch: *mut [u32; 4]) -> i32; + pub fn sceHprmReadLatch(latch: *mut [u32; 4]) -> i32; + pub fn sceHprmIsHeadphoneExist() -> i32; + pub fn sceHprmIsRemoteExist() -> i32; + pub fn sceHprmIsMicrophoneExist() -> i32; + + pub fn sceGuDepthBuffer(zbp: *mut c_void, zbw: i32); + pub fn sceGuDispBuffer(width: i32, height: i32, dispbp: *mut c_void, dispbw: i32); + pub fn sceGuDrawBuffer(psm: DisplayPixelFormat, fbp: *mut c_void, fbw: i32); + pub fn sceGuDrawBufferList(psm: DisplayPixelFormat, fbp: *mut c_void, fbw: i32); + pub fn sceGuDisplay(state: bool) -> bool; + pub fn sceGuDepthFunc(function: DepthFunc); + pub fn sceGuDepthMask(mask: i32); + pub fn sceGuDepthOffset(offset: i32); + pub fn sceGuDepthRange(near: i32, far: i32); + pub fn sceGuFog(near: f32, far: f32, color: u32); + pub fn sceGuInit(); + pub fn sceGuTerm(); + pub fn sceGuBreak(mode: i32); + pub fn sceGuContinue(); + pub fn sceGuSetCallback(signal: GuCallbackId, callback: GuCallback) -> GuCallback; + pub fn sceGuSignal(behavior: SignalBehavior, signal: i32); + pub fn sceGuSendCommandf(cmd: GeCommand, argument: f32); + pub fn sceGuSendCommandi(cmd: GeCommand, argument: i32); + pub fn sceGuGetMemory(size: i32) -> *mut c_void; + pub fn sceGuStart(context_type: GuContextType, list: *mut c_void); + pub fn sceGuFinish() -> i32; + pub fn sceGuFinishId(id: u32) -> i32; + pub fn sceGuCallList(list: *const c_void); + pub fn sceGuCallMode(mode: i32); + pub fn sceGuCheckList() -> i32; + pub fn sceGuSendList(mode: GuQueueMode, list: *const c_void, context: *mut GeContext); + pub fn sceGuSwapBuffers() -> *mut c_void; + pub fn sceGuSync(mode: GuSyncMode, behavior: GuSyncBehavior) -> GeListState; + pub fn sceGuDrawArray( + prim: GuPrimitive, + vtype: i32, + count: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGuBeginObject( + vtype: i32, + count: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGuEndObject(); + pub fn sceGuSetStatus(state: GuState, status: i32); + pub fn sceGuGetStatus(state: GuState) -> bool; + pub fn sceGuSetAllStatus(status: i32); + pub fn sceGuGetAllStatus() -> i32; + pub fn sceGuEnable(state: GuState); + pub fn sceGuDisable(state: GuState); + pub fn sceGuLight(light: i32, type_: LightType, components: i32, position: &ScePspFVector3); + pub fn sceGuLightAtt(light: i32, atten0: f32, atten1: f32, atten2: f32); + pub fn sceGuLightColor(light: i32, component: i32, color: u32); + pub fn sceGuLightMode(mode: LightMode); + pub fn sceGuLightSpot(light: i32, direction: &ScePspFVector3, exponent: f32, cutoff: f32); + pub fn sceGuClear(flags: i32); + pub fn sceGuClearColor(color: u32); + pub fn sceGuClearDepth(depth: u32); + pub fn sceGuClearStencil(stencil: u32); + pub fn sceGuPixelMask(mask: u32); + pub fn sceGuColor(color: u32); + pub fn sceGuColorFunc(func: ColorFunc, color: u32, mask: u32); + pub fn sceGuColorMaterial(components: i32); + pub fn sceGuAlphaFunc(func: AlphaFunc, value: i32, mask: i32); + pub fn sceGuAmbient(color: u32); + pub fn sceGuAmbientColor(color: u32); + pub fn sceGuBlendFunc(op: BlendOp, src: BlendSrc, dest: BlendDst, src_fix: u32, dest_fix: u32); + pub fn sceGuMaterial(components: i32, color: u32); + pub fn sceGuModelColor(emissive: u32, ambient: u32, diffuse: u32, specular: u32); + pub fn sceGuStencilFunc(func: StencilFunc, ref_: i32, mask: i32); + pub fn sceGuStencilOp(fail: StencilOperation, zfail: StencilOperation, zpass: StencilOperation); + pub fn sceGuSpecular(power: f32); + pub fn sceGuFrontFace(order: FrontFaceDirection); + pub fn sceGuLogicalOp(op: LogicalOperation); + pub fn sceGuSetDither(matrix: &ScePspIMatrix4); + pub fn sceGuShadeModel(mode: ShadingModel); + pub fn sceGuCopyImage( + psm: DisplayPixelFormat, + sx: i32, + sy: i32, + width: i32, + height: i32, + srcw: i32, + src: *mut c_void, + dx: i32, + dy: i32, + destw: i32, + dest: *mut c_void, + ); + pub fn sceGuTexEnvColor(color: u32); + pub fn sceGuTexFilter(min: TextureFilter, mag: TextureFilter); + pub fn sceGuTexFlush(); + pub fn sceGuTexFunc(tfx: TextureEffect, tcc: TextureColorComponent); + pub fn sceGuTexImage( + mipmap: MipmapLevel, + width: i32, + height: i32, + tbw: i32, + tbp: *const c_void, + ); + pub fn sceGuTexLevelMode(mode: TextureLevelMode, bias: f32); + pub fn sceGuTexMapMode(mode: TextureMapMode, a1: u32, a2: u32); + pub fn sceGuTexMode(tpsm: TexturePixelFormat, maxmips: i32, a2: i32, swizzle: i32); + pub fn sceGuTexOffset(u: f32, v: f32); + pub fn sceGuTexProjMapMode(mode: TextureProjectionMapMode); + pub fn sceGuTexScale(u: f32, v: f32); + pub fn sceGuTexSlope(slope: f32); + pub fn sceGuTexSync(); + pub fn sceGuTexWrap(u: GuTexWrapMode, v: GuTexWrapMode); + pub fn sceGuClutLoad(num_blocks: i32, cbp: *const c_void); + pub fn sceGuClutMode(cpsm: ClutPixelFormat, shift: u32, mask: u32, a3: u32); + pub fn sceGuOffset(x: u32, y: u32); + pub fn sceGuScissor(x: i32, y: i32, w: i32, h: i32); + pub fn sceGuViewport(cx: i32, cy: i32, width: i32, height: i32); + pub fn sceGuDrawBezier( + v_type: i32, + u_count: i32, + v_count: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGuPatchDivide(ulevel: u32, vlevel: u32); + pub fn sceGuPatchFrontFace(a0: u32); + pub fn sceGuPatchPrim(prim: PatchPrimitive); + pub fn sceGuDrawSpline( + v_type: i32, + u_count: i32, + v_count: i32, + u_edge: i32, + v_edge: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGuSetMatrix(type_: MatrixMode, matrix: &ScePspFMatrix4); + pub fn sceGuBoneMatrix(index: u32, matrix: &ScePspFMatrix4); + pub fn sceGuMorphWeight(index: i32, weight: f32); + pub fn sceGuDrawArrayN( + primitive_type: GuPrimitive, + v_type: i32, + count: i32, + a3: i32, + indices: *const c_void, + vertices: *const c_void, + ); + + pub fn sceGumDrawArray( + prim: GuPrimitive, + v_type: i32, + count: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGumDrawArrayN( + prim: GuPrimitive, + v_type: i32, + count: i32, + a3: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGumDrawBezier( + v_type: i32, + u_count: i32, + v_count: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGumDrawSpline( + v_type: i32, + u_count: i32, + v_count: i32, + u_edge: i32, + v_edge: i32, + indices: *const c_void, + vertices: *const c_void, + ); + pub fn sceGumFastInverse(); + pub fn sceGumFullInverse(); + pub fn sceGumLoadIdentity(); + pub fn sceGumLoadMatrix(m: &ScePspFMatrix4); + pub fn sceGumLookAt(eye: &ScePspFVector3, center: &ScePspFVector3, up: &ScePspFVector3); + pub fn sceGumMatrixMode(mode: MatrixMode); + pub fn sceGumMultMatrix(m: &ScePspFMatrix4); + pub fn sceGumOrtho(left: f32, right: f32, bottom: f32, top: f32, near: f32, far: f32); + pub fn sceGumPerspective(fovy: f32, aspect: f32, near: f32, far: f32); + pub fn sceGumPopMatrix(); + pub fn sceGumPushMatrix(); + pub fn sceGumRotateX(angle: f32); + pub fn sceGumRotateY(angle: f32); + pub fn sceGumRotateZ(angle: f32); + pub fn sceGumRotateXYZ(v: &ScePspFVector3); + pub fn sceGumRotateZYX(v: &ScePspFVector3); + pub fn sceGumScale(v: &ScePspFVector3); + pub fn sceGumStoreMatrix(m: &mut ScePspFMatrix4); + pub fn sceGumTranslate(v: &ScePspFVector3); + pub fn sceGumUpdateMatrix(); + + pub fn sceMp3ReserveMp3Handle(args: *mut SceMp3InitArg) -> i32; + pub fn sceMp3ReleaseMp3Handle(handle: Mp3Handle) -> i32; + pub fn sceMp3InitResource() -> i32; + pub fn sceMp3TermResource() -> i32; + pub fn sceMp3Init(handle: Mp3Handle) -> i32; + pub fn sceMp3Decode(handle: Mp3Handle, dst: *mut *mut i16) -> i32; + pub fn sceMp3GetInfoToAddStreamData( + handle: Mp3Handle, + dst: *mut *mut u8, + to_write: *mut i32, + src_pos: *mut i32, + ) -> i32; + pub fn sceMp3NotifyAddStreamData(handle: Mp3Handle, size: i32) -> i32; + pub fn sceMp3CheckStreamDataNeeded(handle: Mp3Handle) -> i32; + pub fn sceMp3SetLoopNum(handle: Mp3Handle, loop_: i32) -> i32; + pub fn sceMp3GetLoopNum(handle: Mp3Handle) -> i32; + pub fn sceMp3GetSumDecodedSample(handle: Mp3Handle) -> i32; + pub fn sceMp3GetMaxOutputSample(handle: Mp3Handle) -> i32; + pub fn sceMp3GetSamplingRate(handle: Mp3Handle) -> i32; + pub fn sceMp3GetBitRate(handle: Mp3Handle) -> i32; + pub fn sceMp3GetMp3ChannelNum(handle: Mp3Handle) -> i32; + pub fn sceMp3ResetPlayPosition(handle: Mp3Handle) -> i32; + + pub fn sceRegOpenRegistry(reg: *mut Key, mode: i32, handle: *mut RegHandle) -> i32; + pub fn sceRegFlushRegistry(handle: RegHandle) -> i32; + pub fn sceRegCloseRegistry(handle: RegHandle) -> i32; + pub fn sceRegOpenCategory( + handle: RegHandle, + name: *const u8, + mode: i32, + dir_handle: *mut RegHandle, + ) -> i32; + pub fn sceRegRemoveCategory(handle: RegHandle, name: *const u8) -> i32; + pub fn sceRegCloseCategory(dir_handle: RegHandle) -> i32; + pub fn sceRegFlushCategory(dir_handle: RegHandle) -> i32; + pub fn sceRegGetKeyInfo( + dir_handle: RegHandle, + name: *const u8, + key_handle: *mut RegHandle, + type_: *mut KeyType, + size: *mut usize, + ) -> i32; + pub fn sceRegGetKeyInfoByName( + dir_handle: RegHandle, + name: *const u8, + type_: *mut KeyType, + size: *mut usize, + ) -> i32; + pub fn sceRegGetKeyValue( + dir_handle: RegHandle, + key_handle: RegHandle, + buf: *mut c_void, + size: usize, + ) -> i32; + pub fn sceRegGetKeyValueByName( + dir_handle: RegHandle, + name: *const u8, + buf: *mut c_void, + size: usize, + ) -> i32; + pub fn sceRegSetKeyValue( + dir_handle: RegHandle, + name: *const u8, + buf: *const c_void, + size: usize, + ) -> i32; + pub fn sceRegGetKeysNum(dir_handle: RegHandle, num: *mut i32) -> i32; + pub fn sceRegGetKeys(dir_handle: RegHandle, buf: *mut u8, num: i32) -> i32; + pub fn sceRegCreateKey(dir_handle: RegHandle, name: *const u8, type_: i32, size: usize) -> i32; + pub fn sceRegRemoveRegistry(key: *mut Key) -> i32; + + pub fn sceOpenPSIDGetOpenPSID(openpsid: *mut OpenPSID) -> i32; + + pub fn sceUtilityMsgDialogInitStart(params: *mut UtilityMsgDialogParams) -> i32; + pub fn sceUtilityMsgDialogShutdownStart(); + pub fn sceUtilityMsgDialogGetStatus() -> i32; + pub fn sceUtilityMsgDialogUpdate(n: i32); + pub fn sceUtilityMsgDialogAbort() -> i32; + pub fn sceUtilityNetconfInitStart(data: *mut UtilityNetconfData) -> i32; + pub fn sceUtilityNetconfShutdownStart() -> i32; + pub fn sceUtilityNetconfUpdate(unknown: i32) -> i32; + pub fn sceUtilityNetconfGetStatus() -> i32; + pub fn sceUtilityCheckNetParam(id: i32) -> i32; + pub fn sceUtilityGetNetParam(conf: i32, param: NetParam, data: *mut UtilityNetData) -> i32; + pub fn sceUtilitySavedataInitStart(params: *mut SceUtilitySavedataParam) -> i32; + pub fn sceUtilitySavedataGetStatus() -> i32; + pub fn sceUtilitySavedataShutdownStart() -> i32; + pub fn sceUtilitySavedataUpdate(unknown: i32); + pub fn sceUtilityGameSharingInitStart(params: *mut UtilityGameSharingParams) -> i32; + pub fn sceUtilityGameSharingShutdownStart(); + pub fn sceUtilityGameSharingGetStatus() -> i32; + pub fn sceUtilityGameSharingUpdate(n: i32); + pub fn sceUtilityHtmlViewerInitStart(params: *mut UtilityHtmlViewerParam) -> i32; + pub fn sceUtilityHtmlViewerShutdownStart() -> i32; + pub fn sceUtilityHtmlViewerUpdate(n: i32) -> i32; + pub fn sceUtilityHtmlViewerGetStatus() -> i32; + pub fn sceUtilitySetSystemParamInt(id: SystemParamId, value: i32) -> i32; + pub fn sceUtilitySetSystemParamString(id: SystemParamId, str: *const u8) -> i32; + pub fn sceUtilityGetSystemParamInt(id: SystemParamId, value: *mut i32) -> i32; + pub fn sceUtilityGetSystemParamString(id: SystemParamId, str: *mut u8, len: i32) -> i32; + pub fn sceUtilityOskInitStart(params: *mut SceUtilityOskParams) -> i32; + pub fn sceUtilityOskShutdownStart() -> i32; + pub fn sceUtilityOskUpdate(n: i32) -> i32; + pub fn sceUtilityOskGetStatus() -> i32; + pub fn sceUtilityLoadNetModule(module: NetModule) -> i32; + pub fn sceUtilityUnloadNetModule(module: NetModule) -> i32; + pub fn sceUtilityLoadAvModule(module: AvModule) -> i32; + pub fn sceUtilityUnloadAvModule(module: AvModule) -> i32; + pub fn sceUtilityLoadUsbModule(module: UsbModule) -> i32; + pub fn sceUtilityUnloadUsbModule(module: UsbModule) -> i32; + pub fn sceUtilityLoadModule(module: Module) -> i32; + pub fn sceUtilityUnloadModule(module: Module) -> i32; + pub fn sceUtilityCreateNetParam(conf: i32) -> i32; + pub fn sceUtilitySetNetParam(param: NetParam, val: *const c_void) -> i32; + pub fn sceUtilityCopyNetParam(src: i32, dest: i32) -> i32; + pub fn sceUtilityDeleteNetParam(conf: i32) -> i32; + + pub fn sceNetInit( + poolsize: i32, + calloutprio: i32, + calloutstack: i32, + netintrprio: i32, + netintrstack: i32, + ) -> i32; + pub fn sceNetTerm() -> i32; + pub fn sceNetFreeThreadinfo(thid: i32) -> i32; + pub fn sceNetThreadAbort(thid: i32) -> i32; + pub fn sceNetEtherStrton(name: *mut u8, mac: *mut u8); + pub fn sceNetEtherNtostr(mac: *mut u8, name: *mut u8); + pub fn sceNetGetLocalEtherAddr(mac: *mut u8) -> i32; + pub fn sceNetGetMallocStat(stat: *mut SceNetMallocStat) -> i32; + + pub fn sceNetAdhocctlInit( + stacksize: i32, + priority: i32, + adhoc_id: *mut SceNetAdhocctlAdhocId, + ) -> i32; + pub fn sceNetAdhocctlTerm() -> i32; + pub fn sceNetAdhocctlConnect(name: *const u8) -> i32; + pub fn sceNetAdhocctlDisconnect() -> i32; + pub fn sceNetAdhocctlGetState(event: *mut i32) -> i32; + pub fn sceNetAdhocctlCreate(name: *const u8) -> i32; + pub fn sceNetAdhocctlJoin(scaninfo: *mut SceNetAdhocctlScanInfo) -> i32; + pub fn sceNetAdhocctlGetAdhocId(id: *mut SceNetAdhocctlAdhocId) -> i32; + pub fn sceNetAdhocctlCreateEnterGameMode( + name: *const u8, + unknown: i32, + num: i32, + macs: *mut u8, + timeout: u32, + unknown2: i32, + ) -> i32; + pub fn sceNetAdhocctlJoinEnterGameMode( + name: *const u8, + hostmac: *mut u8, + timeout: u32, + unknown: i32, + ) -> i32; + pub fn sceNetAdhocctlGetGameModeInfo(gamemodeinfo: *mut SceNetAdhocctlGameModeInfo) -> i32; + pub fn sceNetAdhocctlExitGameMode() -> i32; + pub fn sceNetAdhocctlGetPeerList(length: *mut i32, buf: *mut c_void) -> i32; + pub fn sceNetAdhocctlGetPeerInfo( + mac: *mut u8, + size: i32, + peerinfo: *mut SceNetAdhocctlPeerInfo, + ) -> i32; + pub fn sceNetAdhocctlScan() -> i32; + pub fn sceNetAdhocctlGetScanInfo(length: *mut i32, buf: *mut c_void) -> i32; + pub fn sceNetAdhocctlAddHandler(handler: SceNetAdhocctlHandler, unknown: *mut c_void) -> i32; + pub fn sceNetAdhocctlDelHandler(id: i32) -> i32; + pub fn sceNetAdhocctlGetNameByAddr(mac: *mut u8, nickname: *mut u8) -> i32; + pub fn sceNetAdhocctlGetAddrByName( + nickname: *mut u8, + length: *mut i32, + buf: *mut c_void, + ) -> i32; + pub fn sceNetAdhocctlGetParameter(params: *mut SceNetAdhocctlParams) -> i32; + + pub fn sceNetAdhocInit() -> i32; + pub fn sceNetAdhocTerm() -> i32; + pub fn sceNetAdhocPdpCreate(mac: *mut u8, port: u16, buf_size: u32, unk1: i32) -> i32; + pub fn sceNetAdhocPdpDelete(id: i32, unk1: i32) -> i32; + pub fn sceNetAdhocPdpSend( + id: i32, + dest_mac_addr: *mut u8, + port: u16, + data: *mut c_void, + len: u32, + timeout: u32, + nonblock: i32, + ) -> i32; + pub fn sceNetAdhocPdpRecv( + id: i32, + src_mac_addr: *mut u8, + port: *mut u16, + data: *mut c_void, + data_length: *mut c_void, + timeout: u32, + nonblock: i32, + ) -> i32; + pub fn sceNetAdhocGetPdpStat(size: *mut i32, stat: *mut SceNetAdhocPdpStat) -> i32; + pub fn sceNetAdhocGameModeCreateMaster(data: *mut c_void, size: i32) -> i32; + pub fn sceNetAdhocGameModeCreateReplica(mac: *mut u8, data: *mut c_void, size: i32) -> i32; + pub fn sceNetAdhocGameModeUpdateMaster() -> i32; + pub fn sceNetAdhocGameModeUpdateReplica(id: i32, unk1: i32) -> i32; + pub fn sceNetAdhocGameModeDeleteMaster() -> i32; + pub fn sceNetAdhocGameModeDeleteReplica(id: i32) -> i32; + pub fn sceNetAdhocPtpOpen( + srcmac: *mut u8, + srcport: u16, + destmac: *mut u8, + destport: u16, + buf_size: u32, + delay: u32, + count: i32, + unk1: i32, + ) -> i32; + pub fn sceNetAdhocPtpConnect(id: i32, timeout: u32, nonblock: i32) -> i32; + pub fn sceNetAdhocPtpListen( + srcmac: *mut u8, + srcport: u16, + buf_size: u32, + delay: u32, + count: i32, + queue: i32, + unk1: i32, + ) -> i32; + pub fn sceNetAdhocPtpAccept( + id: i32, + mac: *mut u8, + port: *mut u16, + timeout: u32, + nonblock: i32, + ) -> i32; + pub fn sceNetAdhocPtpSend( + id: i32, + data: *mut c_void, + data_size: *mut i32, + timeout: u32, + nonblock: i32, + ) -> i32; + pub fn sceNetAdhocPtpRecv( + id: i32, + data: *mut c_void, + data_size: *mut i32, + timeout: u32, + nonblock: i32, + ) -> i32; + pub fn sceNetAdhocPtpFlush(id: i32, timeout: u32, nonblock: i32) -> i32; + pub fn sceNetAdhocPtpClose(id: i32, unk1: i32) -> i32; + pub fn sceNetAdhocGetPtpStat(size: *mut i32, stat: *mut SceNetAdhocPtpStat) -> i32; +} + +extern "C" { + pub fn sceNetAdhocMatchingInit(memsize: i32) -> i32; + pub fn sceNetAdhocMatchingTerm() -> i32; + pub fn sceNetAdhocMatchingCreate( + mode: AdhocMatchingMode, + max_peers: i32, + port: u16, + buf_size: i32, + hello_delay: u32, + ping_delay: u32, + init_count: i32, + msg_delay: u32, + callback: AdhocMatchingCallback, + ) -> i32; + pub fn sceNetAdhocMatchingDelete(matching_id: i32) -> i32; + pub fn sceNetAdhocMatchingStart( + matching_id: i32, + evth_pri: i32, + evth_stack: i32, + inth_pri: i32, + inth_stack: i32, + opt_len: i32, + opt_data: *mut c_void, + ) -> i32; + pub fn sceNetAdhocMatchingStop(matching_id: i32) -> i32; + pub fn sceNetAdhocMatchingSelectTarget( + matching_id: i32, + mac: *mut u8, + opt_len: i32, + opt_data: *mut c_void, + ) -> i32; + pub fn sceNetAdhocMatchingCancelTarget(matching_id: i32, mac: *mut u8) -> i32; + pub fn sceNetAdhocMatchingCancelTargetWithOpt( + matching_id: i32, + mac: *mut u8, + opt_len: i32, + opt_data: *mut c_void, + ) -> i32; + pub fn sceNetAdhocMatchingSendData( + matching_id: i32, + mac: *mut u8, + data_len: i32, + data: *mut c_void, + ) -> i32; + pub fn sceNetAdhocMatchingAbortSendData(matching_id: i32, mac: *mut u8) -> i32; + pub fn sceNetAdhocMatchingSetHelloOpt( + matching_id: i32, + opt_len: i32, + opt_data: *mut c_void, + ) -> i32; + pub fn sceNetAdhocMatchingGetHelloOpt( + matching_id: i32, + opt_len: *mut i32, + opt_data: *mut c_void, + ) -> i32; + pub fn sceNetAdhocMatchingGetMembers( + matching_id: i32, + length: *mut i32, + buf: *mut c_void, + ) -> i32; + pub fn sceNetAdhocMatchingGetPoolMaxAlloc() -> i32; + pub fn sceNetAdhocMatchingGetPoolStat(poolstat: *mut AdhocPoolStat) -> i32; +} + +extern "C" { + pub fn sceNetApctlInit(stack_size: i32, init_priority: i32) -> i32; + pub fn sceNetApctlTerm() -> i32; + pub fn sceNetApctlGetInfo(code: ApctlInfo, pinfo: *mut SceNetApctlInfo) -> i32; + pub fn sceNetApctlAddHandler(handler: SceNetApctlHandler, parg: *mut c_void) -> i32; + pub fn sceNetApctlDelHandler(handler_id: i32) -> i32; + pub fn sceNetApctlConnect(conn_index: i32) -> i32; + pub fn sceNetApctlDisconnect() -> i32; + pub fn sceNetApctlGetState(pstate: *mut ApctlState) -> i32; + + pub fn sceNetInetInit() -> i32; + pub fn sceNetInetTerm() -> i32; + pub fn sceNetInetAccept(s: i32, addr: *mut sockaddr, addr_len: *mut socklen_t) -> i32; + pub fn sceNetInetBind(s: i32, my_addr: *const sockaddr, addr_len: socklen_t) -> i32; + pub fn sceNetInetConnect(s: i32, serv_addr: *const sockaddr, addr_len: socklen_t) -> i32; + pub fn sceNetInetGetsockopt( + s: i32, + level: i32, + opt_name: i32, + opt_val: *mut c_void, + optl_en: *mut socklen_t, + ) -> i32; + pub fn sceNetInetListen(s: i32, backlog: i32) -> i32; + pub fn sceNetInetRecv(s: i32, buf: *mut c_void, len: usize, flags: i32) -> usize; + pub fn sceNetInetRecvfrom( + s: i32, + buf: *mut c_void, + flags: usize, + arg1: i32, + from: *mut sockaddr, + from_len: *mut socklen_t, + ) -> usize; + pub fn sceNetInetSend(s: i32, buf: *const c_void, len: usize, flags: i32) -> usize; + pub fn sceNetInetSendto( + s: i32, + buf: *const c_void, + len: usize, + flags: i32, + to: *const sockaddr, + to_len: socklen_t, + ) -> usize; + pub fn sceNetInetSetsockopt( + s: i32, + level: i32, + opt_name: i32, + opt_val: *const c_void, + opt_len: socklen_t, + ) -> i32; + pub fn sceNetInetShutdown(s: i32, how: i32) -> i32; + pub fn sceNetInetSocket(domain: i32, type_: i32, protocol: i32) -> i32; + pub fn sceNetInetClose(s: i32) -> i32; + pub fn sceNetInetGetErrno() -> i32; + + pub fn sceSslInit(unknown1: i32) -> i32; + pub fn sceSslEnd() -> i32; + pub fn sceSslGetUsedMemoryMax(memory: *mut u32) -> i32; + pub fn sceSslGetUsedMemoryCurrent(memory: *mut u32) -> i32; + + pub fn sceHttpInit(unknown1: u32) -> i32; + pub fn sceHttpEnd() -> i32; + pub fn sceHttpCreateTemplate(agent: *mut u8, unknown1: i32, unknown2: i32) -> i32; + pub fn sceHttpDeleteTemplate(templateid: i32) -> i32; + pub fn sceHttpCreateConnection( + templateid: i32, + host: *mut u8, + unknown1: *mut u8, + port: u16, + unknown2: i32, + ) -> i32; + pub fn sceHttpCreateConnectionWithURL(templateid: i32, url: *const u8, unknown1: i32) -> i32; + pub fn sceHttpDeleteConnection(connection_id: i32) -> i32; + pub fn sceHttpCreateRequest( + connection_id: i32, + method: HttpMethod, + path: *mut u8, + content_length: u64, + ) -> i32; + pub fn sceHttpCreateRequestWithURL( + connection_id: i32, + method: HttpMethod, + url: *mut u8, + content_length: u64, + ) -> i32; + pub fn sceHttpDeleteRequest(request_id: i32) -> i32; + pub fn sceHttpSendRequest(request_id: i32, data: *mut c_void, data_size: u32) -> i32; + pub fn sceHttpAbortRequest(request_id: i32) -> i32; + pub fn sceHttpReadData(request_id: i32, data: *mut c_void, data_size: u32) -> i32; + pub fn sceHttpGetContentLength(request_id: i32, content_length: *mut u64) -> i32; + pub fn sceHttpGetStatusCode(request_id: i32, status_code: *mut i32) -> i32; + pub fn sceHttpSetResolveTimeOut(id: i32, timeout: u32) -> i32; + pub fn sceHttpSetResolveRetry(id: i32, count: i32) -> i32; + pub fn sceHttpSetConnectTimeOut(id: i32, timeout: u32) -> i32; + pub fn sceHttpSetSendTimeOut(id: i32, timeout: u32) -> i32; + pub fn sceHttpSetRecvTimeOut(id: i32, timeout: u32) -> i32; + pub fn sceHttpEnableKeepAlive(id: i32) -> i32; + pub fn sceHttpDisableKeepAlive(id: i32) -> i32; + pub fn sceHttpEnableRedirect(id: i32) -> i32; + pub fn sceHttpDisableRedirect(id: i32) -> i32; + pub fn sceHttpEnableCookie(id: i32) -> i32; + pub fn sceHttpDisableCookie(id: i32) -> i32; + pub fn sceHttpSaveSystemCookie() -> i32; + pub fn sceHttpLoadSystemCookie() -> i32; + pub fn sceHttpAddExtraHeader(id: i32, name: *mut u8, value: *mut u8, unknown1: i32) -> i32; + pub fn sceHttpDeleteHeader(id: i32, name: *const u8) -> i32; + pub fn sceHttpsInit(unknown1: i32, unknown2: i32, unknown3: i32, unknown4: i32) -> i32; + pub fn sceHttpsEnd() -> i32; + pub fn sceHttpsLoadDefaultCert(unknown1: i32, unknown2: i32) -> i32; + pub fn sceHttpDisableAuth(id: i32) -> i32; + pub fn sceHttpDisableCache(id: i32) -> i32; + pub fn sceHttpEnableAuth(id: i32) -> i32; + pub fn sceHttpEnableCache(id: i32) -> i32; + pub fn sceHttpEndCache() -> i32; + pub fn sceHttpGetAllHeader(request: i32, header: *mut *mut u8, header_size: *mut u32) -> i32; + pub fn sceHttpGetNetworkErrno(request: i32, err_num: *mut i32) -> i32; + pub fn sceHttpGetProxy( + id: i32, + activate_flag: *mut i32, + mode: *mut i32, + proxy_host: *mut u8, + len: usize, + proxy_port: *mut u16, + ) -> i32; + pub fn sceHttpInitCache(max_size: usize) -> i32; + pub fn sceHttpSetAuthInfoCB(id: i32, cbfunc: HttpPasswordCB) -> i32; + pub fn sceHttpSetProxy( + id: i32, + activate_flag: i32, + mode: i32, + new_proxy_host: *const u8, + new_proxy_port: u16, + ) -> i32; + pub fn sceHttpSetResHeaderMaxSize(id: i32, header_size: u32) -> i32; + pub fn sceHttpSetMallocFunction( + malloc_func: HttpMallocFunction, + free_func: HttpFreeFunction, + realloc_func: HttpReallocFunction, + ) -> i32; + + pub fn sceNetResolverInit() -> i32; + pub fn sceNetResolverCreate(rid: *mut i32, buf: *mut c_void, buf_length: u32) -> i32; + pub fn sceNetResolverDelete(rid: i32) -> i32; + pub fn sceNetResolverStartNtoA( + rid: i32, + hostname: *const u8, + addr: *mut in_addr, + timeout: u32, + retry: i32, + ) -> i32; + pub fn sceNetResolverStartAtoN( + rid: i32, + addr: *const in_addr, + hostname: *mut u8, + hostname_len: u32, + timeout: u32, + retry: i32, + ) -> i32; + pub fn sceNetResolverStop(rid: i32) -> i32; + pub fn sceNetResolverTerm() -> i32; +} diff --git a/vendor/libc/src/sgx.rs b/vendor/libc/src/sgx.rs new file mode 100644 index 00000000000000..9cf9c6d3b41b8d --- /dev/null +++ b/vendor/libc/src/sgx.rs @@ -0,0 +1,15 @@ +//! SGX C types definition + +use crate::prelude::*; + +pub type intmax_t = i64; +pub type uintmax_t = u64; + +pub type size_t = usize; +pub type ptrdiff_t = isize; +pub type intptr_t = isize; +pub type uintptr_t = usize; +pub type ssize_t = isize; + +pub const INT_MIN: c_int = -2147483648; +pub const INT_MAX: c_int = 2147483647; diff --git a/vendor/libc/src/solid/aarch64.rs b/vendor/libc/src/solid/aarch64.rs new file mode 100644 index 00000000000000..376783c8234baf --- /dev/null +++ b/vendor/libc/src/solid/aarch64.rs @@ -0,0 +1 @@ +pub type wchar_t = u32; diff --git a/vendor/libc/src/solid/arm.rs b/vendor/libc/src/solid/arm.rs new file mode 100644 index 00000000000000..376783c8234baf --- /dev/null +++ b/vendor/libc/src/solid/arm.rs @@ -0,0 +1 @@ +pub type wchar_t = u32; diff --git a/vendor/libc/src/solid/mod.rs b/vendor/libc/src/solid/mod.rs new file mode 100644 index 00000000000000..40d6a9d3485868 --- /dev/null +++ b/vendor/libc/src/solid/mod.rs @@ -0,0 +1,876 @@ +//! Interface to the [SOLID] C library +//! +//! [SOLID]: https://solid.kmckk.com/ + +use crate::prelude::*; + +pub type intmax_t = i64; +pub type uintmax_t = u64; + +pub type uintptr_t = usize; +pub type intptr_t = isize; +pub type ptrdiff_t = isize; +pub type size_t = crate::uintptr_t; +pub type ssize_t = intptr_t; + +pub type clock_t = c_uint; +pub type time_t = i64; +pub type clockid_t = c_int; +pub type timer_t = c_int; +pub type suseconds_t = c_int; +pub type useconds_t = c_uint; + +pub type sighandler_t = size_t; + +// sys/ansi.h +pub type __caddr_t = *mut c_char; +pub type __gid_t = u32; +pub type __in_addr_t = u32; +pub type __in_port_t = u16; +pub type __mode_t = u32; +pub type __off_t = i64; +pub type __pid_t = i32; +pub type __sa_family_t = u8; +pub type __socklen_t = c_uint; +pub type __uid_t = u32; +pub type __fsblkcnt_t = u64; +pub type __fsfilcnt_t = u64; + +// locale.h +pub type locale_t = usize; + +// nl_types.h +pub type nl_item = c_long; + +// sys/types.h +pub type __va_list = *mut c_char; +pub type u_int8_t = u8; +pub type u_int16_t = u16; +pub type u_int32_t = u32; +pub type u_int64_t = u64; +pub type u_char = c_uchar; +pub type u_short = c_ushort; +pub type u_int = c_uint; +pub type u_long = c_ulong; +pub type unchar = c_uchar; +pub type ushort = c_ushort; +pub type uint = c_uint; +pub type ulong = c_ulong; +pub type u_quad_t = u64; +pub type quad_t = i64; +pub type qaddr_t = *mut quad_t; +pub type longlong_t = i64; +pub type u_longlong_t = u64; +pub type blkcnt_t = i64; +pub type blksize_t = i32; +pub type fsblkcnt_t = __fsblkcnt_t; +pub type fsfilcnt_t = __fsfilcnt_t; +pub type caddr_t = __caddr_t; +pub type daddr_t = i64; +pub type dev_t = u64; +pub type fixpt_t = u32; +pub type gid_t = __gid_t; +pub type idtype_t = c_int; +pub type id_t = u32; +pub type ino_t = u64; +pub type key_t = c_long; +pub type mode_t = __mode_t; +pub type nlink_t = u32; +pub type off_t = __off_t; +pub type pid_t = __pid_t; +pub type lwpid_t = i32; +pub type rlim_t = u64; +pub type segsz_t = i32; +pub type swblk_t = i32; +pub type mqd_t = c_int; +pub type cpuid_t = c_ulong; +pub type psetid_t = c_int; + +s! { + // stat.h + pub struct stat { + pub st_dev: dev_t, + pub st_ino: ino_t, + pub st_mode: c_short, + pub st_nlink: c_short, + pub st_uid: c_short, + pub st_gid: c_short, + pub st_rdev: dev_t, + pub st_size: off_t, + pub st_atime: time_t, + pub st_mtime: time_t, + pub st_ctime: time_t, + pub st_blksize: blksize_t, + } + + // time.h + pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + pub tm_gmtoff: c_long, + pub tm_zone: *mut c_char, + } + + // stdlib.h + pub struct qdiv_t { + pub quot: quad_t, + pub rem: quad_t, + } + pub struct lldiv_t { + pub quot: c_longlong, + pub rem: c_longlong, + } + pub struct div_t { + pub quot: c_int, + pub rem: c_int, + } + pub struct ldiv_t { + pub quot: c_long, + pub rem: c_long, + } + + // locale.h + pub struct lconv { + pub decimal_point: *mut c_char, + pub thousands_sep: *mut c_char, + pub grouping: *mut c_char, + pub int_curr_symbol: *mut c_char, + pub currency_symbol: *mut c_char, + pub mon_decimal_point: *mut c_char, + pub mon_thousands_sep: *mut c_char, + pub mon_grouping: *mut c_char, + pub positive_sign: *mut c_char, + pub negative_sign: *mut c_char, + pub int_frac_digits: c_char, + pub frac_digits: c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub p_sign_posn: c_char, + pub n_sign_posn: c_char, + pub int_p_cs_precedes: c_char, + pub int_n_cs_precedes: c_char, + pub int_p_sep_by_space: c_char, + pub int_n_sep_by_space: c_char, + pub int_p_sign_posn: c_char, + pub int_n_sign_posn: c_char, + } + + pub struct iovec { + pub iov_base: *mut c_void, + pub iov_len: size_t, + } + + pub struct timeval { + pub tv_sec: c_long, + pub tv_usec: c_long, + } +} + +pub const INT_MIN: c_int = -2147483648; +pub const INT_MAX: c_int = 2147483647; + +pub const EXIT_FAILURE: c_int = 1; +pub const EXIT_SUCCESS: c_int = 0; +pub const RAND_MAX: c_int = 0x7fffffff; +pub const EOF: c_int = -1; +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; +pub const _IOFBF: c_int = 0; +pub const _IONBF: c_int = 2; +pub const _IOLBF: c_int = 1; +pub const BUFSIZ: c_uint = 1024; +pub const FOPEN_MAX: c_uint = 20; +pub const FILENAME_MAX: c_uint = 1024; + +pub const O_RDONLY: c_int = 1; +pub const O_WRONLY: c_int = 2; +pub const O_RDWR: c_int = 4; +pub const O_APPEND: c_int = 8; +pub const O_CREAT: c_int = 0x10; +pub const O_EXCL: c_int = 0x400; +pub const O_TEXT: c_int = 0x100; +pub const O_BINARY: c_int = 0x200; +pub const O_TRUNC: c_int = 0x20; +pub const S_IEXEC: c_short = 0o0100; +pub const S_IWRITE: c_short = 0o0200; +pub const S_IREAD: c_short = 0o0400; +pub const S_IFCHR: c_short = 0o2_0000; +pub const S_IFDIR: c_short = 0o4_0000; +pub const S_IFMT: c_short = 0o16_0000; +pub const S_IFIFO: c_short = 0o1_0000; +pub const S_IFBLK: c_short = 0o6_0000; +pub const S_IFREG: c_short = 0o10_0000; + +pub const LC_ALL: c_int = 0; +pub const LC_COLLATE: c_int = 1; +pub const LC_CTYPE: c_int = 2; +pub const LC_MONETARY: c_int = 3; +pub const LC_NUMERIC: c_int = 4; +pub const LC_TIME: c_int = 5; +pub const LC_MESSAGES: c_int = 6; +pub const _LC_LAST: c_int = 7; + +pub const EPERM: c_int = 1; +pub const ENOENT: c_int = 2; +pub const ESRCH: c_int = 3; +pub const EINTR: c_int = 4; +pub const EIO: c_int = 5; +pub const ENXIO: c_int = 6; +pub const E2BIG: c_int = 7; +pub const ENOEXEC: c_int = 8; +pub const EBADF: c_int = 9; +pub const ECHILD: c_int = 10; +pub const EAGAIN: c_int = 11; +pub const ENOMEM: c_int = 12; +pub const EACCES: c_int = 13; +pub const EFAULT: c_int = 14; +pub const ENOTBLK: c_int = 15; +pub const EBUSY: c_int = 16; +pub const EEXIST: c_int = 17; +pub const EXDEV: c_int = 18; +pub const ENODEV: c_int = 19; +pub const ENOTDIR: c_int = 20; +pub const EISDIR: c_int = 21; +pub const EINVAL: c_int = 22; +pub const ENFILE: c_int = 23; +pub const EMFILE: c_int = 24; +pub const ENOTTY: c_int = 25; +pub const ETXTBSY: c_int = 26; +pub const EFBIG: c_int = 27; +pub const ENOSPC: c_int = 28; +pub const ESPIPE: c_int = 29; +pub const EROFS: c_int = 30; +pub const EMLINK: c_int = 31; +pub const EPIPE: c_int = 32; +pub const EDOM: c_int = 33; +pub const ERANGE: c_int = 34; + +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const EWOULDBLOCK: c_int = EAGAIN; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; + +pub const EDEADLOCK: c_int = EDEADLK; + +pub const EBFONT: c_int = 59; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENONET: c_int = 64; +pub const ENOPKG: c_int = 65; +pub const EREMOTE: c_int = 66; +pub const ENOLINK: c_int = 67; +pub const EADV: c_int = 68; +pub const ESRMNT: c_int = 69; +pub const ECOMM: c_int = 70; +pub const EPROTO: c_int = 71; +pub const EMULTIHOP: c_int = 72; +pub const EDOTDOT: c_int = 73; +pub const EBADMSG: c_int = 74; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDQUOT: c_int = 122; + +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; + +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; + +pub const ENOTSUP: c_int = 132; +pub const EFTYPE: c_int = 133; + +// signal codes +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGTRAP: c_int = 5; +pub const SIGABRT: c_int = 6; +pub const SIGIOT: c_int = SIGABRT; +pub const SIGEMT: c_int = 7; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGBUS: c_int = 10; +pub const SIGSEGV: c_int = 11; +pub const SIGSYS: c_int = 12; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; +pub const SIGURG: c_int = 16; +pub const SIGSTOP: c_int = 17; +pub const SIGTSTP: c_int = 18; +pub const SIGCONT: c_int = 19; +pub const SIGCHLD: c_int = 20; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGIO: c_int = 23; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGINFO: c_int = 29; +pub const SIGUSR1: c_int = 30; +pub const SIGUSR2: c_int = 31; +pub const SIGPWR: c_int = 32; + +#[derive(Debug)] +pub enum FILE {} +impl Copy for FILE {} +impl Clone for FILE { + fn clone(&self) -> FILE { + *self + } +} +#[derive(Debug)] +pub enum fpos_t {} +impl Copy for fpos_t {} +impl Clone for fpos_t { + fn clone(&self) -> fpos_t { + *self + } +} + +extern "C" { + // ctype.h + pub fn isalnum(c: c_int) -> c_int; + pub fn isalpha(c: c_int) -> c_int; + pub fn iscntrl(c: c_int) -> c_int; + pub fn isdigit(c: c_int) -> c_int; + pub fn isgraph(c: c_int) -> c_int; + pub fn islower(c: c_int) -> c_int; + pub fn isprint(c: c_int) -> c_int; + pub fn ispunct(c: c_int) -> c_int; + pub fn isspace(c: c_int) -> c_int; + pub fn isupper(c: c_int) -> c_int; + pub fn isxdigit(c: c_int) -> c_int; + pub fn isblank(c: c_int) -> c_int; + pub fn tolower(c: c_int) -> c_int; + pub fn toupper(c: c_int) -> c_int; + + // stdio.h + pub fn __get_stdio_file(fileno: c_int) -> *mut FILE; + pub fn clearerr(arg1: *mut FILE); + pub fn fclose(arg1: *mut FILE) -> c_int; + pub fn feof(arg1: *mut FILE) -> c_int; + pub fn ferror(arg1: *mut FILE) -> c_int; + pub fn fflush(arg1: *mut FILE) -> c_int; + pub fn fgetc(arg1: *mut FILE) -> c_int; + pub fn fgets(arg1: *mut c_char, arg2: c_int, arg3: *mut FILE) -> *mut c_char; + pub fn fopen(arg1: *const c_char, arg2: *const c_char) -> *mut FILE; + pub fn fprintf(arg1: *mut FILE, arg2: *const c_char, ...) -> c_int; + pub fn fputc(arg1: c_int, arg2: *mut FILE) -> c_int; + pub fn fputs(arg1: *const c_char, arg2: *mut FILE) -> c_int; + pub fn fread(arg1: *mut c_void, arg2: size_t, arg3: size_t, arg4: *mut FILE) -> size_t; + pub fn freopen(arg1: *const c_char, arg2: *const c_char, arg3: *mut FILE) -> *mut FILE; + pub fn fscanf(arg1: *mut FILE, arg2: *const c_char, ...) -> c_int; + pub fn fseek(arg1: *mut FILE, arg2: c_long, arg3: c_int) -> c_int; + pub fn ftell(arg1: *mut FILE) -> c_long; + pub fn fwrite(arg1: *const c_void, arg2: size_t, arg3: size_t, arg4: *mut FILE) -> size_t; + pub fn getc(arg1: *mut FILE) -> c_int; + pub fn getchar() -> c_int; + pub fn perror(arg1: *const c_char); + pub fn printf(arg1: *const c_char, ...) -> c_int; + pub fn putc(arg1: c_int, arg2: *mut FILE) -> c_int; + pub fn putchar(arg1: c_int) -> c_int; + pub fn puts(arg1: *const c_char) -> c_int; + pub fn remove(arg1: *const c_char) -> c_int; + pub fn rewind(arg1: *mut FILE); + pub fn scanf(arg1: *const c_char, ...) -> c_int; + pub fn setbuf(arg1: *mut FILE, arg2: *mut c_char); + pub fn setvbuf(arg1: *mut FILE, arg2: *mut c_char, arg3: c_int, arg4: size_t) -> c_int; + pub fn sscanf(arg1: *const c_char, arg2: *const c_char, ...) -> c_int; + pub fn tmpfile() -> *mut FILE; + pub fn ungetc(arg1: c_int, arg2: *mut FILE) -> c_int; + pub fn vfprintf(arg1: *mut FILE, arg2: *const c_char, arg3: __va_list) -> c_int; + pub fn vprintf(arg1: *const c_char, arg2: __va_list) -> c_int; + pub fn gets(arg1: *mut c_char) -> *mut c_char; + pub fn sprintf(arg1: *mut c_char, arg2: *const c_char, ...) -> c_int; + pub fn tmpnam(arg1: *const c_char) -> *mut c_char; + pub fn vsprintf(arg1: *mut c_char, arg2: *const c_char, arg3: __va_list) -> c_int; + pub fn rename(arg1: *const c_char, arg2: *const c_char) -> c_int; + pub fn asiprintf(arg1: *mut *mut c_char, arg2: *const c_char, ...) -> c_int; + pub fn fiprintf(arg1: *mut FILE, arg2: *const c_char, ...) -> c_int; + pub fn fiscanf(arg1: *mut FILE, arg2: *const c_char, ...) -> c_int; + pub fn iprintf(arg1: *const c_char, ...) -> c_int; + pub fn iscanf(arg1: *const c_char, ...) -> c_int; + pub fn siprintf(arg1: *mut c_char, arg2: *const c_char, ...) -> c_int; + pub fn siscanf(arg1: *mut c_char, arg2: *const c_char, ...) -> c_int; + pub fn sniprintf(arg1: *mut c_char, arg2: size_t, arg3: *const c_char, ...) -> c_int; + pub fn vasiprintf(arg1: *mut *mut c_char, arg2: *const c_char, arg3: __va_list) -> c_int; + pub fn vfiprintf(arg1: *mut FILE, arg2: *const c_char, arg3: __va_list) -> c_int; + pub fn vfiscanf(arg1: *mut FILE, arg2: *const c_char, arg3: __va_list) -> c_int; + pub fn viprintf(arg1: *const c_char, arg2: __va_list) -> c_int; + pub fn viscanf(arg1: *const c_char, arg2: __va_list) -> c_int; + pub fn vsiprintf(arg1: *mut c_char, arg2: *const c_char, arg3: __va_list) -> c_int; + pub fn vsiscanf(arg1: *const c_char, arg2: *const c_char, arg3: __va_list) -> c_int; + pub fn vsniprintf( + arg1: *mut c_char, + arg2: size_t, + arg3: *const c_char, + arg4: __va_list, + ) -> c_int; + pub fn vdiprintf(arg1: c_int, arg2: *const c_char, arg3: __va_list) -> c_int; + pub fn diprintf(arg1: c_int, arg2: *const c_char, ...) -> c_int; + pub fn fgetpos(arg1: *mut FILE, arg2: *mut fpos_t) -> c_int; + pub fn fsetpos(arg1: *mut FILE, arg2: *const fpos_t) -> c_int; + pub fn fdopen(arg1: c_int, arg2: *const c_char) -> *mut FILE; + pub fn fileno(arg1: *mut FILE) -> c_int; + pub fn flockfile(arg1: *mut FILE); + pub fn ftrylockfile(arg1: *mut FILE) -> c_int; + pub fn funlockfile(arg1: *mut FILE); + pub fn getc_unlocked(arg1: *mut FILE) -> c_int; + pub fn getchar_unlocked() -> c_int; + pub fn putc_unlocked(arg1: c_int, arg2: *mut FILE) -> c_int; + pub fn putchar_unlocked(arg1: c_int) -> c_int; + pub fn snprintf(arg1: *mut c_char, arg2: size_t, arg3: *const c_char, ...) -> c_int; + pub fn vsnprintf( + arg1: *mut c_char, + arg2: size_t, + arg3: *const c_char, + arg4: __va_list, + ) -> c_int; + pub fn getw(arg1: *mut FILE) -> c_int; + pub fn putw(arg1: c_int, arg2: *mut FILE) -> c_int; + pub fn tempnam(arg1: *const c_char, arg2: *const c_char) -> *mut c_char; + pub fn fseeko(stream: *mut FILE, offset: off_t, whence: c_int) -> c_int; + pub fn ftello(stream: *mut FILE) -> off_t; + + // stdlib.h + pub fn atof(arg1: *const c_char) -> f64; + pub fn strtod(arg1: *const c_char, arg2: *mut *mut c_char) -> f64; + pub fn drand48() -> f64; + pub fn erand48(arg1: *mut c_ushort) -> f64; + pub fn strtof(arg1: *const c_char, arg2: *mut *mut c_char) -> f32; + pub fn strtold(arg1: *const c_char, arg2: *mut *mut c_char) -> f64; + pub fn strtod_l(arg1: *const c_char, arg2: *mut *mut c_char, arg3: locale_t) -> f64; + pub fn strtof_l(arg1: *const c_char, arg2: *mut *mut c_char, arg3: locale_t) -> f32; + pub fn strtold_l(arg1: *const c_char, arg2: *mut *mut c_char, arg3: locale_t) -> f64; + pub fn _Exit(arg1: c_int) -> !; + pub fn abort() -> !; + pub fn abs(arg1: c_int) -> c_int; + pub fn atexit(arg1: Option) -> c_int; + pub fn atoi(arg1: *const c_char) -> c_int; + pub fn atol(arg1: *const c_char) -> c_long; + pub fn itoa(arg1: c_int, arg2: *mut c_char, arg3: c_int) -> *mut c_char; + pub fn ltoa(arg1: c_long, arg2: *mut c_char, arg3: c_int) -> *mut c_char; + pub fn ultoa(arg1: c_ulong, arg2: *mut c_char, arg3: c_int) -> *mut c_char; + pub fn bsearch( + arg1: *const c_void, + arg2: *const c_void, + arg3: size_t, + arg4: size_t, + arg5: Option c_int>, + ) -> *mut c_void; + pub fn calloc(arg1: size_t, arg2: size_t) -> *mut c_void; + pub fn div(arg1: c_int, arg2: c_int) -> div_t; + pub fn exit(arg1: c_int) -> !; + pub fn free(arg1: *mut c_void); + pub fn getenv(arg1: *const c_char) -> *mut c_char; + pub fn labs(arg1: c_long) -> c_long; + pub fn ldiv(arg1: c_long, arg2: c_long) -> ldiv_t; + pub fn malloc(arg1: size_t) -> *mut c_void; + pub fn qsort( + arg1: *mut c_void, + arg2: size_t, + arg3: size_t, + arg4: Option c_int>, + ); + pub fn rand() -> c_int; + pub fn realloc(arg1: *mut c_void, arg2: size_t) -> *mut c_void; + pub fn srand(arg1: c_uint); + pub fn strtol(arg1: *const c_char, arg2: *mut *mut c_char, arg3: c_int) -> c_long; + pub fn strtoul(arg1: *const c_char, arg2: *mut *mut c_char, arg3: c_int) -> c_ulong; + pub fn mblen(arg1: *const c_char, arg2: size_t) -> c_int; + pub fn mbstowcs(arg1: *mut wchar_t, arg2: *const c_char, arg3: size_t) -> size_t; + pub fn wctomb(arg1: *mut c_char, arg2: wchar_t) -> c_int; + pub fn mbtowc(arg1: *mut wchar_t, arg2: *const c_char, arg3: size_t) -> c_int; + pub fn wcstombs(arg1: *mut c_char, arg2: *const wchar_t, arg3: size_t) -> size_t; + pub fn rand_r(arg1: *mut c_uint) -> c_int; + pub fn jrand48(arg1: *mut c_ushort) -> c_long; + pub fn lcong48(arg1: *mut c_ushort); + pub fn lrand48() -> c_long; + pub fn mrand48() -> c_long; + pub fn nrand48(arg1: *mut c_ushort) -> c_long; + pub fn seed48(arg1: *mut c_ushort) -> *mut c_ushort; + pub fn srand48(arg1: c_long); + pub fn putenv(arg1: *mut c_char) -> c_int; + pub fn a64l(arg1: *const c_char) -> c_long; + pub fn l64a(arg1: c_long) -> *mut c_char; + pub fn random() -> c_long; + pub fn setstate(arg1: *mut c_char) -> *mut c_char; + pub fn initstate(arg1: c_uint, arg2: *mut c_char, arg3: size_t) -> *mut c_char; + pub fn srandom(arg1: c_uint); + pub fn mkostemp(arg1: *mut c_char, arg2: c_int) -> c_int; + pub fn mkostemps(arg1: *mut c_char, arg2: c_int, arg3: c_int) -> c_int; + pub fn mkdtemp(arg1: *mut c_char) -> *mut c_char; + pub fn mkstemp(arg1: *mut c_char) -> c_int; + pub fn mktemp(arg1: *mut c_char) -> *mut c_char; + pub fn atoll(arg1: *const c_char) -> c_longlong; + pub fn llabs(arg1: c_longlong) -> c_longlong; + pub fn lldiv(arg1: c_longlong, arg2: c_longlong) -> lldiv_t; + pub fn strtoll(arg1: *const c_char, arg2: *mut *mut c_char, arg3: c_int) -> c_longlong; + pub fn strtoull(arg1: *const c_char, arg2: *mut *mut c_char, arg3: c_int) -> c_ulonglong; + pub fn aligned_alloc(arg1: size_t, arg2: size_t) -> *mut c_void; + pub fn at_quick_exit(arg1: Option) -> c_int; + pub fn quick_exit(arg1: c_int); + pub fn setenv(arg1: *const c_char, arg2: *const c_char, arg3: c_int) -> c_int; + pub fn unsetenv(arg1: *const c_char) -> c_int; + pub fn humanize_number( + arg1: *mut c_char, + arg2: size_t, + arg3: i64, + arg4: *const c_char, + arg5: c_int, + arg6: c_int, + ) -> c_int; + pub fn dehumanize_number(arg1: *const c_char, arg2: *mut i64) -> c_int; + pub fn getenv_r(arg1: *const c_char, arg2: *mut c_char, arg3: size_t) -> c_int; + pub fn heapsort( + arg1: *mut c_void, + arg2: size_t, + arg3: size_t, + arg4: Option c_int>, + ) -> c_int; + pub fn mergesort( + arg1: *mut c_void, + arg2: size_t, + arg3: size_t, + arg4: Option c_int>, + ) -> c_int; + pub fn radixsort( + arg1: *mut *const c_uchar, + arg2: c_int, + arg3: *const c_uchar, + arg4: c_uint, + ) -> c_int; + pub fn sradixsort( + arg1: *mut *const c_uchar, + arg2: c_int, + arg3: *const c_uchar, + arg4: c_uint, + ) -> c_int; + pub fn getprogname() -> *const c_char; + pub fn setprogname(arg1: *const c_char); + pub fn qabs(arg1: quad_t) -> quad_t; + pub fn strtoq(arg1: *const c_char, arg2: *mut *mut c_char, arg3: c_int) -> quad_t; + pub fn strtouq(arg1: *const c_char, arg2: *mut *mut c_char, arg3: c_int) -> u_quad_t; + pub fn strsuftoll( + arg1: *const c_char, + arg2: *const c_char, + arg3: c_longlong, + arg4: c_longlong, + ) -> c_longlong; + pub fn strsuftollx( + arg1: *const c_char, + arg2: *const c_char, + arg3: c_longlong, + arg4: c_longlong, + arg5: *mut c_char, + arg6: size_t, + ) -> c_longlong; + pub fn l64a_r(arg1: c_long, arg2: *mut c_char, arg3: c_int) -> c_int; + pub fn qdiv(arg1: quad_t, arg2: quad_t) -> qdiv_t; + pub fn strtol_l( + arg1: *const c_char, + arg2: *mut *mut c_char, + arg3: c_int, + arg4: locale_t, + ) -> c_long; + pub fn strtoul_l( + arg1: *const c_char, + arg2: *mut *mut c_char, + arg3: c_int, + arg4: locale_t, + ) -> c_ulong; + pub fn strtoll_l( + arg1: *const c_char, + arg2: *mut *mut c_char, + arg3: c_int, + arg4: locale_t, + ) -> c_longlong; + pub fn strtoull_l( + arg1: *const c_char, + arg2: *mut *mut c_char, + arg3: c_int, + arg4: locale_t, + ) -> c_ulonglong; + pub fn strtoq_l( + arg1: *const c_char, + arg2: *mut *mut c_char, + arg3: c_int, + arg4: locale_t, + ) -> quad_t; + pub fn strtouq_l( + arg1: *const c_char, + arg2: *mut *mut c_char, + arg3: c_int, + arg4: locale_t, + ) -> u_quad_t; + pub fn _mb_cur_max_l(arg1: locale_t) -> size_t; + pub fn mblen_l(arg1: *const c_char, arg2: size_t, arg3: locale_t) -> c_int; + pub fn mbstowcs_l( + arg1: *mut wchar_t, + arg2: *const c_char, + arg3: size_t, + arg4: locale_t, + ) -> size_t; + pub fn wctomb_l(arg1: *mut c_char, arg2: wchar_t, arg3: locale_t) -> c_int; + pub fn mbtowc_l(arg1: *mut wchar_t, arg2: *const c_char, arg3: size_t, arg4: locale_t) + -> c_int; + pub fn wcstombs_l( + arg1: *mut c_char, + arg2: *const wchar_t, + arg3: size_t, + arg4: locale_t, + ) -> size_t; + + // string.h + pub fn memchr(arg1: *const c_void, arg2: c_int, arg3: size_t) -> *mut c_void; + pub fn memcmp(arg1: *const c_void, arg2: *const c_void, arg3: size_t) -> c_int; + pub fn memcpy(arg1: *mut c_void, arg2: *const c_void, arg3: size_t) -> *mut c_void; + pub fn memmove(arg1: *mut c_void, arg2: *const c_void, arg3: size_t) -> *mut c_void; + pub fn memset(arg1: *mut c_void, arg2: c_int, arg3: size_t) -> *mut c_void; + pub fn strcat(arg1: *mut c_char, arg2: *const c_char) -> *mut c_char; + pub fn strchr(arg1: *const c_char, arg2: c_int) -> *mut c_char; + pub fn strcmp(arg1: *const c_char, arg2: *const c_char) -> c_int; + pub fn strcoll(arg1: *const c_char, arg2: *const c_char) -> c_int; + pub fn strcpy(arg1: *mut c_char, arg2: *const c_char) -> *mut c_char; + pub fn strcspn(arg1: *const c_char, arg2: *const c_char) -> size_t; + pub fn strerror(arg1: c_int) -> *mut c_char; + pub fn strlen(arg1: *const c_char) -> size_t; + pub fn strncat(arg1: *mut c_char, arg2: *const c_char, arg3: size_t) -> *mut c_char; + pub fn strncmp(arg1: *const c_char, arg2: *const c_char, arg3: size_t) -> c_int; + pub fn strncpy(arg1: *mut c_char, arg2: *const c_char, arg3: size_t) -> *mut c_char; + pub fn strpbrk(arg1: *const c_char, arg2: *const c_char) -> *mut c_char; + pub fn strrchr(arg1: *const c_char, arg2: c_int) -> *mut c_char; + pub fn strspn(arg1: *const c_char, arg2: *const c_char) -> size_t; + pub fn strstr(arg1: *const c_char, arg2: *const c_char) -> *mut c_char; + pub fn strtok(arg1: *mut c_char, arg2: *const c_char) -> *mut c_char; + pub fn strtok_r(arg1: *mut c_char, arg2: *const c_char, arg3: *mut *mut c_char) -> *mut c_char; + pub fn strerror_r(arg1: c_int, arg2: *mut c_char, arg3: size_t) -> c_int; + pub fn strxfrm(arg1: *mut c_char, arg2: *const c_char, arg3: size_t) -> size_t; + pub fn memccpy( + arg1: *mut c_void, + arg2: *const c_void, + arg3: c_int, + arg4: size_t, + ) -> *mut c_void; + pub fn strdup(arg1: *const c_char) -> *mut c_char; + pub fn stpcpy(arg1: *mut c_char, arg2: *const c_char) -> *mut c_char; + pub fn stpncpy(arg1: *mut c_char, arg2: *const c_char, arg3: size_t) -> *mut c_char; + pub fn strnlen(arg1: *const c_char, arg2: size_t) -> size_t; + pub fn memmem( + arg1: *const c_void, + arg2: size_t, + arg3: *const c_void, + arg4: size_t, + ) -> *mut c_void; + pub fn strcasestr(arg1: *const c_char, arg2: *const c_char) -> *mut c_char; + pub fn strlcat(arg1: *mut c_char, arg2: *const c_char, arg3: size_t) -> size_t; + pub fn strlcpy(arg1: *mut c_char, arg2: *const c_char, arg3: size_t) -> size_t; + pub fn strsep(arg1: *mut *mut c_char, arg2: *const c_char) -> *mut c_char; + pub fn stresep(arg1: *mut *mut c_char, arg2: *const c_char, arg3: c_int) -> *mut c_char; + pub fn strndup(arg1: *const c_char, arg2: size_t) -> *mut c_char; + pub fn memrchr(arg1: *const c_void, arg2: c_int, arg3: size_t) -> *mut c_void; + pub fn explicit_memset(arg1: *mut c_void, arg2: c_int, arg3: size_t) -> *mut c_void; + pub fn consttime_memequal(arg1: *const c_void, arg2: *const c_void, arg3: size_t) -> c_int; + pub fn strcoll_l(arg1: *const c_char, arg2: *const c_char, arg3: locale_t) -> c_int; + pub fn strxfrm_l( + arg1: *mut c_char, + arg2: *const c_char, + arg3: size_t, + arg4: locale_t, + ) -> size_t; + pub fn strerror_l(arg1: c_int, arg2: locale_t) -> *mut c_char; + + // strings.h + pub fn bcmp(arg1: *const c_void, arg2: *const c_void, arg3: size_t) -> c_int; + pub fn bcopy(arg1: *const c_void, arg2: *mut c_void, arg3: size_t); + pub fn bzero(arg1: *mut c_void, arg2: size_t); + pub fn ffs(arg1: c_int) -> c_int; + pub fn popcount(arg1: c_uint) -> c_uint; + pub fn popcountl(arg1: c_ulong) -> c_uint; + pub fn popcountll(arg1: c_ulonglong) -> c_uint; + pub fn popcount32(arg1: u32) -> c_uint; + pub fn popcount64(arg1: u64) -> c_uint; + pub fn rindex(arg1: *const c_char, arg2: c_int) -> *mut c_char; + pub fn strcasecmp(arg1: *const c_char, arg2: *const c_char) -> c_int; + pub fn strncasecmp(arg1: *const c_char, arg2: *const c_char, arg3: size_t) -> c_int; + + // signal.h + pub fn signal(arg1: c_int, arg2: sighandler_t) -> sighandler_t; + pub fn raise(arg1: c_int) -> c_int; + + // time.h + pub fn asctime(arg1: *const tm) -> *mut c_char; + pub fn clock() -> clock_t; + pub fn ctime(arg1: *const time_t) -> *mut c_char; + pub fn difftime(arg1: time_t, arg2: time_t) -> f64; + pub fn gmtime(arg1: *const time_t) -> *mut tm; + pub fn localtime(arg1: *const time_t) -> *mut tm; + pub fn time(arg1: *mut time_t) -> time_t; + pub fn mktime(arg1: *mut tm) -> time_t; + pub fn strftime( + arg1: *mut c_char, + arg2: size_t, + arg3: *const c_char, + arg4: *const tm, + ) -> size_t; + pub fn utime(arg1: *const c_char, arg2: *mut time_t) -> c_int; + pub fn asctime_r(arg1: *const tm, arg2: *mut c_char) -> *mut c_char; + pub fn ctime_r(arg1: *const time_t, arg2: *mut c_char) -> *mut c_char; + pub fn gmtime_r(arg1: *const time_t, arg2: *mut tm) -> *mut tm; + pub fn localtime_r(arg1: *const time_t, arg2: *mut tm) -> *mut tm; + + // sys/stat.h + pub fn stat(arg1: *const c_char, arg2: *mut stat) -> c_int; + pub fn lstat(arg1: *const c_char, arg2: *mut stat) -> c_int; + pub fn fstat(arg1: c_int, arg2: *mut stat) -> c_int; + pub fn chmod(arg1: *const c_char, arg2: __mode_t) -> c_int; + pub fn mkdir(arg1: *const c_char, arg2: __mode_t) -> c_int; + + // fcntl.h + pub fn open(arg1: *const c_char, arg2: c_int, ...) -> c_int; + pub fn creat(arg1: *const c_char, arg2: c_int) -> c_int; + pub fn close(arg1: c_int) -> c_int; + pub fn read(arg1: c_int, arg2: *mut c_void, arg3: c_int) -> c_int; + pub fn write(arg1: c_int, arg2: *const c_void, arg3: c_int) -> c_int; + pub fn unlink(arg1: *const c_char) -> c_int; + pub fn tell(arg1: c_int) -> c_long; + pub fn dup(arg1: c_int) -> c_int; + pub fn dup2(arg1: c_int, arg2: c_int) -> c_int; + pub fn access(arg1: *const c_char, arg2: c_int) -> c_int; + pub fn rmdir(arg1: *const c_char) -> c_int; + pub fn chdir(arg1: *const c_char) -> c_int; + pub fn _exit(arg1: c_int); + pub fn getwd(arg1: *mut c_char) -> *mut c_char; + pub fn getcwd(arg1: *mut c_char, arg2: size_t) -> *mut c_char; + pub static mut optarg: *mut c_char; + pub static mut opterr: c_int; + pub static mut optind: c_int; + pub static mut optopt: c_int; + pub static mut optreset: c_int; + pub fn getopt(arg1: c_int, arg2: *mut *mut c_char, arg3: *const c_char) -> c_int; + pub static mut suboptarg: *mut c_char; + pub fn getsubopt( + arg1: *mut *mut c_char, + arg2: *const *mut c_char, + arg3: *mut *mut c_char, + ) -> c_int; + pub fn fcntl(arg1: c_int, arg2: c_int, ...) -> c_int; + pub fn getpid() -> pid_t; + pub fn sleep(arg1: c_uint) -> c_uint; + pub fn usleep(arg1: useconds_t) -> c_int; + + // locale.h + pub fn localeconv() -> *mut lconv; + pub fn setlocale(arg1: c_int, arg2: *const c_char) -> *mut c_char; + pub fn duplocale(arg1: locale_t) -> locale_t; + pub fn freelocale(arg1: locale_t); + pub fn localeconv_l(arg1: locale_t) -> *mut lconv; + pub fn newlocale(arg1: c_int, arg2: *const c_char, arg3: locale_t) -> locale_t; + + // langinfo.h + pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; + pub fn nl_langinfo_l(item: crate::nl_item, locale: locale_t) -> *mut c_char; + + // malloc.h + pub fn memalign(align: size_t, size: size_t) -> *mut c_void; + + // sys/types.h + pub fn lseek(arg1: c_int, arg2: __off_t, arg3: c_int) -> __off_t; +} + +cfg_if! { + if #[cfg(target_arch = "aarch64")] { + mod aarch64; + pub use self::aarch64::*; + } else if #[cfg(any(target_arch = "arm"))] { + mod arm; + pub use self::arm::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/switch.rs b/vendor/libc/src/switch.rs new file mode 100644 index 00000000000000..d965ff7005fb24 --- /dev/null +++ b/vendor/libc/src/switch.rs @@ -0,0 +1,16 @@ +//! Switch C type definitions + +pub type intmax_t = i64; +pub type uintmax_t = u64; + +pub type size_t = usize; +pub type ptrdiff_t = isize; +pub type intptr_t = isize; +pub type uintptr_t = usize; +pub type ssize_t = isize; + +pub type off_t = i64; +pub type wchar_t = u32; + +pub const INT_MIN: c_int = -2147483648; +pub const INT_MAX: c_int = 2147483647; diff --git a/vendor/libc/src/teeos/mod.rs b/vendor/libc/src/teeos/mod.rs new file mode 100644 index 00000000000000..fd9c0b168aba49 --- /dev/null +++ b/vendor/libc/src/teeos/mod.rs @@ -0,0 +1,1355 @@ +//! Libc bindings for teeos +//! +//! Apparently the loader just dynamically links it anyway, but fails +//! when linking is explicitly requested. +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] + +use crate::prelude::*; + +pub type c_bool = i32; + +pub type intmax_t = i64; + +pub type uintmax_t = u64; + +pub type size_t = usize; + +pub type ptrdiff_t = isize; + +pub type intptr_t = isize; + +pub type uintptr_t = usize; + +pub type ssize_t = isize; + +pub type pid_t = c_int; + +pub type wchar_t = u32; + +// long double in C means A float point value, which has 128bit length. +// but some bit maybe not used, so the real length of long double could be 80(x86) or 128(power pc/IEEE) +// this is different from f128(not stable and not included default) in Rust, so we use u128 for FFI(Rust to C). +// this is unstable and will cause to memfault/data abort. +pub type c_longdouble = _CLongDouble; + +pub type pthread_t = c_ulong; + +pub type pthread_key_t = c_uint; + +pub type pthread_spinlock_t = c_int; + +pub type off_t = i64; + +pub type time_t = c_long; + +pub type clock_t = c_long; + +pub type clockid_t = c_int; + +pub type suseconds_t = c_long; + +pub type once_fn = extern "C" fn() -> c_void; + +pub type pthread_once_t = c_int; + +pub type va_list = *mut c_char; + +pub type wint_t = c_uint; + +pub type wctype_t = c_ulong; + +pub type cmpfunc = extern "C" fn(x: *const c_void, y: *const c_void) -> c_int; + +#[repr(align(16))] +pub struct _CLongDouble(pub u128); + +#[repr(align(8))] +#[repr(C)] +pub struct pthread_cond_t { + #[doc(hidden)] + size: [u8; __SIZEOF_PTHREAD_COND_T], +} + +#[repr(align(8))] +#[repr(C)] +pub struct pthread_mutex_t { + #[doc(hidden)] + size: [u8; __SIZEOF_PTHREAD_MUTEX_T], +} + +#[repr(align(4))] +#[repr(C)] +pub struct pthread_mutexattr_t { + #[doc(hidden)] + size: [u8; __SIZEOF_PTHREAD_MUTEXATTR_T], +} + +#[repr(align(4))] +#[repr(C)] +pub struct pthread_condattr_t { + #[doc(hidden)] + size: [u8; __SIZEOF_PTHREAD_CONDATTR_T], +} + +#[repr(C)] +pub struct pthread_attr_t { + __size: [u64; 7], +} + +#[repr(C)] +pub struct cpu_set_t { + bits: [c_ulong; 128 / size_of::()], +} + +#[repr(C)] +pub struct timespec { + pub tv_sec: time_t, + pub tv_nsec: c_long, +} + +#[repr(C)] +pub struct timeval { + pub tv_sec: time_t, + pub tv_usec: suseconds_t, +} + +#[repr(C)] +pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + pub __tm_gmtoff: c_long, + pub __tm_zone: *const c_char, +} + +#[repr(C)] +pub struct mbstate_t { + pub __opaque1: c_uint, + pub __opaque2: c_uint, +} + +#[repr(C)] +pub struct sem_t { + pub __val: [c_int; 4 * size_of::() / size_of::()], +} + +#[repr(C)] +pub struct div_t { + pub quot: c_int, + pub rem: c_int, +} + +// fcntl +pub const O_CREAT: u32 = 0o100; + +pub const O_EXCL: u32 = 0o200; + +pub const O_NOCTTY: u32 = 0o400; + +pub const O_TRUNC: u32 = 0o1000; + +pub const O_APPEND: u32 = 0o2000; + +pub const O_NONBLOCK: u32 = 0o4000; + +pub const O_DSYNC: u32 = 0o10000; + +pub const O_SYNC: u32 = 0o4010000; + +pub const O_RSYNC: u32 = 0o4010000; + +pub const O_DIRECTORY: u32 = 0o200000; + +pub const O_NOFOLLOW: u32 = 0o400000; + +pub const O_CLOEXEC: u32 = 0o2000000; + +pub const O_ASYNC: u32 = 0o20000; + +pub const O_DIRECT: u32 = 0o40000; + +pub const O_LARGEFILE: u32 = 0o100000; + +pub const O_NOATIME: u32 = 0o1000000; + +pub const O_PATH: u32 = 0o10000000; + +pub const O_TMPFILE: u32 = 0o20200000; + +pub const O_NDELAY: u32 = O_NONBLOCK; + +pub const F_DUPFD: u32 = 0; + +pub const F_GETFD: u32 = 1; + +pub const F_SETFD: u32 = 2; + +pub const F_GETFL: u32 = 3; + +pub const F_SETFL: u32 = 4; + +pub const F_SETOWN: u32 = 8; + +pub const F_GETOWN: u32 = 9; + +pub const F_SETSIG: u32 = 10; + +pub const F_GETSIG: u32 = 11; + +pub const F_GETLK: u32 = 12; + +pub const F_SETLK: u32 = 13; + +pub const F_SETLKW: u32 = 14; + +pub const F_SETOWN_EX: u32 = 15; + +pub const F_GETOWN_EX: u32 = 16; + +pub const F_GETOWNER_UIDS: u32 = 17; + +// mman +pub const MAP_FAILED: u64 = 0xffffffffffffffff; + +pub const MAP_FIXED_NOREPLACE: u32 = 0x100000; + +pub const MAP_SHARED_VALIDATE: u32 = 0x03; + +pub const MAP_SHARED: u32 = 0x01; + +pub const MAP_PRIVATE: u32 = 0x02; + +pub const MAP_TYPE: u32 = 0x0f; + +pub const MAP_FIXED: u32 = 0x10; + +pub const MAP_ANON: u32 = 0x20; + +pub const MAP_ANONYMOUS: u32 = MAP_ANON; + +pub const MAP_NORESERVE: u32 = 0x4000; + +pub const MAP_GROWSDOWN: u32 = 0x0100; + +pub const MAP_DENYWRITE: u32 = 0x0800; + +pub const MAP_EXECUTABLE: u32 = 0x1000; + +pub const MAP_LOCKED: u32 = 0x2000; + +pub const MAP_POPULATE: u32 = 0x8000; + +pub const MAP_NONBLOCK: u32 = 0x10000; + +pub const MAP_STACK: u32 = 0x20000; + +pub const MAP_HUGETLB: u32 = 0x40000; + +pub const MAP_SYNC: u32 = 0x80000; + +pub const MAP_FILE: u32 = 0; + +pub const MAP_HUGE_SHIFT: u32 = 26; + +pub const MAP_HUGE_MASK: u32 = 0x3f; + +pub const MAP_HUGE_16KB: u32 = 14 << 26; + +pub const MAP_HUGE_64KB: u32 = 16 << 26; + +pub const MAP_HUGE_512KB: u32 = 19 << 26; + +pub const MAP_HUGE_1MB: u32 = 20 << 26; + +pub const MAP_HUGE_2MB: u32 = 21 << 26; + +pub const MAP_HUGE_8MB: u32 = 23 << 26; + +pub const MAP_HUGE_16MB: u32 = 24 << 26; + +pub const MAP_HUGE_32MB: u32 = 25 << 26; + +pub const MAP_HUGE_256MB: u32 = 28 << 26; + +pub const MAP_HUGE_512MB: u32 = 29 << 26; + +pub const MAP_HUGE_1GB: u32 = 30 << 26; + +pub const MAP_HUGE_2GB: u32 = 31 << 26; + +pub const MAP_HUGE_16GB: u32 = 34u32 << 26; + +pub const PROT_NONE: u32 = 0; + +pub const PROT_READ: u32 = 1; + +pub const PROT_WRITE: u32 = 2; + +pub const PROT_EXEC: u32 = 4; + +pub const PROT_GROWSDOWN: u32 = 0x01000000; + +pub const PROT_GROWSUP: u32 = 0x02000000; + +pub const MS_ASYNC: u32 = 1; + +pub const MS_INVALIDATE: u32 = 2; + +pub const MS_SYNC: u32 = 4; + +pub const MCL_CURRENT: u32 = 1; + +pub const MCL_FUTURE: u32 = 2; + +pub const MCL_ONFAULT: u32 = 4; + +pub const POSIX_MADV_NORMAL: u32 = 0; + +pub const POSIX_MADV_RANDOM: u32 = 1; + +pub const POSIX_MADV_SEQUENTIAL: u32 = 2; + +pub const POSIX_MADV_WILLNEED: u32 = 3; + +pub const POSIX_MADV_DONTNEED: u32 = 4; + +// wctype +pub const WCTYPE_ALNUM: u64 = 1; + +pub const WCTYPE_ALPHA: u64 = 2; + +pub const WCTYPE_BLANK: u64 = 3; + +pub const WCTYPE_CNTRL: u64 = 4; + +pub const WCTYPE_DIGIT: u64 = 5; + +pub const WCTYPE_GRAPH: u64 = 6; + +pub const WCTYPE_LOWER: u64 = 7; + +pub const WCTYPE_PRINT: u64 = 8; + +pub const WCTYPE_PUNCT: u64 = 9; + +pub const WCTYPE_SPACE: u64 = 10; + +pub const WCTYPE_UPPER: u64 = 11; + +pub const WCTYPE_XDIGIT: u64 = 12; + +// locale +pub const LC_CTYPE: i32 = 0; + +pub const LC_NUMERIC: i32 = 1; + +pub const LC_TIME: i32 = 2; + +pub const LC_COLLATE: i32 = 3; + +pub const LC_MONETARY: i32 = 4; + +pub const LC_MESSAGES: i32 = 5; + +pub const LC_ALL: i32 = 6; + +// pthread +pub const __SIZEOF_PTHREAD_COND_T: usize = 48; + +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; + +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; + +// errno.h +pub const EPERM: c_int = 1; + +pub const ENOENT: c_int = 2; + +pub const ESRCH: c_int = 3; + +pub const EINTR: c_int = 4; + +pub const EIO: c_int = 5; + +pub const ENXIO: c_int = 6; + +pub const E2BIG: c_int = 7; + +pub const ENOEXEC: c_int = 8; + +pub const EBADF: c_int = 9; + +pub const ECHILD: c_int = 10; + +pub const EAGAIN: c_int = 11; + +pub const ENOMEM: c_int = 12; + +pub const EACCES: c_int = 13; + +pub const EFAULT: c_int = 14; + +pub const ENOTBLK: c_int = 15; + +pub const EBUSY: c_int = 16; + +pub const EEXIST: c_int = 17; + +pub const EXDEV: c_int = 18; + +pub const ENODEV: c_int = 19; + +pub const ENOTDIR: c_int = 20; + +pub const EISDIR: c_int = 21; + +pub const EINVAL: c_int = 22; + +pub const ENFILE: c_int = 23; + +pub const EMFILE: c_int = 24; + +pub const ENOTTY: c_int = 25; + +pub const ETXTBSY: c_int = 26; + +pub const EFBIG: c_int = 27; + +pub const ENOSPC: c_int = 28; + +pub const ESPIPE: c_int = 29; + +pub const EROFS: c_int = 30; + +pub const EMLINK: c_int = 31; + +pub const EPIPE: c_int = 32; + +pub const EDOM: c_int = 33; + +pub const ERANGE: c_int = 34; + +pub const EDEADLK: c_int = 35; + +pub const ENAMETOOLONG: c_int = 36; + +pub const ENOLCK: c_int = 37; + +pub const ENOSYS: c_int = 38; + +pub const ENOTEMPTY: c_int = 39; + +pub const ELOOP: c_int = 40; + +pub const EWOULDBLOCK: c_int = EAGAIN; + +pub const ENOMSG: c_int = 42; + +pub const EIDRM: c_int = 43; + +pub const ECHRNG: c_int = 44; + +pub const EL2NSYNC: c_int = 45; + +pub const EL3HLT: c_int = 46; + +pub const EL3RST: c_int = 47; + +pub const ELNRNG: c_int = 48; + +pub const EUNATCH: c_int = 49; + +pub const ENOCSI: c_int = 50; + +pub const EL2HLT: c_int = 51; + +pub const EBADE: c_int = 52; + +pub const EBADR: c_int = 53; + +pub const EXFULL: c_int = 54; + +pub const ENOANO: c_int = 55; + +pub const EBADRQC: c_int = 56; + +pub const EBADSLT: c_int = 57; + +pub const EDEADLOCK: c_int = EDEADLK; + +pub const EBFONT: c_int = 59; + +pub const ENOSTR: c_int = 60; + +pub const ENODATA: c_int = 61; + +pub const ETIME: c_int = 62; + +pub const ENOSR: c_int = 63; + +pub const ENONET: c_int = 64; + +pub const ENOPKG: c_int = 65; + +pub const EREMOTE: c_int = 66; + +pub const ENOLINK: c_int = 67; + +pub const EADV: c_int = 68; + +pub const ESRMNT: c_int = 69; + +pub const ECOMM: c_int = 70; + +pub const EPROTO: c_int = 71; + +pub const EMULTIHOP: c_int = 72; + +pub const EDOTDOT: c_int = 73; + +pub const EBADMSG: c_int = 74; + +pub const EOVERFLOW: c_int = 75; + +pub const ENOTUNIQ: c_int = 76; + +pub const EBADFD: c_int = 77; + +pub const EREMCHG: c_int = 78; + +pub const ELIBACC: c_int = 79; + +pub const ELIBBAD: c_int = 80; + +pub const ELIBSCN: c_int = 81; + +pub const ELIBMAX: c_int = 82; + +pub const ELIBEXEC: c_int = 83; + +pub const EILSEQ: c_int = 84; + +pub const ERESTART: c_int = 85; + +pub const ESTRPIPE: c_int = 86; + +pub const EUSERS: c_int = 87; + +pub const ENOTSOCK: c_int = 88; + +pub const EDESTADDRREQ: c_int = 89; + +pub const EMSGSIZE: c_int = 90; + +pub const EPROTOTYPE: c_int = 91; + +pub const ENOPROTOOPT: c_int = 92; + +pub const EPROTONOSUPPOR: c_int = 93; + +pub const ESOCKTNOSUPPOR: c_int = 94; + +pub const EOPNOTSUPP: c_int = 95; + +pub const ENOTSUP: c_int = EOPNOTSUPP; + +pub const EPFNOSUPPORT: c_int = 96; + +pub const EAFNOSUPPORT: c_int = 97; + +pub const EADDRINUSE: c_int = 98; + +pub const EADDRNOTAVAIL: c_int = 99; + +pub const ENETDOWN: c_int = 100; + +pub const ENETUNREACH: c_int = 101; + +pub const ENETRESET: c_int = 102; + +pub const ECONNABORTED: c_int = 103; + +pub const ECONNRESET: c_int = 104; + +pub const ENOBUFS: c_int = 105; + +pub const EISCONN: c_int = 106; + +pub const ENOTCONN: c_int = 107; + +pub const ESHUTDOWN: c_int = 108; + +pub const ETOOMANYREFS: c_int = 109; + +pub const ETIMEDOUT: c_int = 110; + +pub const ECONNREFUSED: c_int = 111; + +pub const EHOSTDOWN: c_int = 112; + +pub const EHOSTUNREACH: c_int = 113; + +pub const EALREADY: c_int = 114; + +pub const EINPROGRESS: c_int = 115; + +pub const ESTALE: c_int = 116; + +pub const EUCLEAN: c_int = 117; + +pub const ENOTNAM: c_int = 118; + +pub const ENAVAIL: c_int = 119; + +pub const EISNAM: c_int = 120; + +pub const EREMOTEIO: c_int = 121; + +pub const EDQUOT: c_int = 122; + +pub const ENOMEDIUM: c_int = 123; + +pub const EMEDIUMTYPE: c_int = 124; + +pub const ECANCELED: c_int = 125; + +pub const ENOKEY: c_int = 126; + +pub const EKEYEXPIRED: c_int = 127; + +pub const EKEYREVOKED: c_int = 128; + +pub const EKEYREJECTED: c_int = 129; + +pub const EOWNERDEAD: c_int = 130; + +pub const ENOTRECOVERABLE: c_int = 131; + +pub const ERFKILL: c_int = 132; + +pub const EHWPOISON: c_int = 133; + +// pthread_attr.h +pub const TEESMP_THREAD_ATTR_CA_WILDCARD: c_int = 0; + +pub const TEESMP_THREAD_ATTR_CA_INHERIT: c_int = -1; + +pub const TEESMP_THREAD_ATTR_TASK_ID_INHERIT: c_int = -1; + +pub const TEESMP_THREAD_ATTR_HAS_SHADOW: c_int = 0x1; + +pub const TEESMP_THREAD_ATTR_NO_SHADOW: c_int = 0x0; + +// unistd.h +pub const _SC_ARG_MAX: c_int = 0; + +pub const _SC_CHILD_MAX: c_int = 1; + +pub const _SC_CLK_TCK: c_int = 2; + +pub const _SC_NGROUPS_MAX: c_int = 3; + +pub const _SC_OPEN_MAX: c_int = 4; + +pub const _SC_STREAM_MAX: c_int = 5; + +pub const _SC_TZNAME_MAX: c_int = 6; + +pub const _SC_JOB_CONTROL: c_int = 7; + +pub const _SC_SAVED_IDS: c_int = 8; + +pub const _SC_REALTIME_SIGNALS: c_int = 9; + +pub const _SC_PRIORITY_SCHEDULING: c_int = 10; + +pub const _SC_TIMERS: c_int = 11; + +pub const _SC_ASYNCHRONOUS_IO: c_int = 12; + +pub const _SC_PRIORITIZED_IO: c_int = 13; + +pub const _SC_SYNCHRONIZED_IO: c_int = 14; + +pub const _SC_FSYNC: c_int = 15; + +pub const _SC_MAPPED_FILES: c_int = 16; + +pub const _SC_MEMLOCK: c_int = 17; + +pub const _SC_MEMLOCK_RANGE: c_int = 18; + +pub const _SC_MEMORY_PROTECTION: c_int = 19; + +pub const _SC_MESSAGE_PASSING: c_int = 20; + +pub const _SC_SEMAPHORES: c_int = 21; + +pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 22; + +pub const _SC_AIO_LISTIO_MAX: c_int = 23; + +pub const _SC_AIO_MAX: c_int = 24; + +pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 25; + +pub const _SC_DELAYTIMER_MAX: c_int = 26; + +pub const _SC_MQ_OPEN_MAX: c_int = 27; + +pub const _SC_MQ_PRIO_MAX: c_int = 28; + +pub const _SC_VERSION: c_int = 29; + +pub const _SC_PAGE_SIZE: c_int = 30; + +pub const _SC_PAGESIZE: c_int = 30; /* !! */ + +pub const _SC_RTSIG_MAX: c_int = 31; + +pub const _SC_SEM_NSEMS_MAX: c_int = 32; + +pub const _SC_SEM_VALUE_MAX: c_int = 33; + +pub const _SC_SIGQUEUE_MAX: c_int = 34; + +pub const _SC_TIMER_MAX: c_int = 35; + +pub const _SC_BC_BASE_MAX: c_int = 36; + +pub const _SC_BC_DIM_MAX: c_int = 37; + +pub const _SC_BC_SCALE_MAX: c_int = 38; + +pub const _SC_BC_STRING_MAX: c_int = 39; + +pub const _SC_COLL_WEIGHTS_MAX: c_int = 40; + +pub const _SC_EXPR_NEST_MAX: c_int = 42; + +pub const _SC_LINE_MAX: c_int = 43; + +pub const _SC_RE_DUP_MAX: c_int = 44; + +pub const _SC_2_VERSION: c_int = 46; + +pub const _SC_2_C_BIND: c_int = 47; + +pub const _SC_2_C_DEV: c_int = 48; + +pub const _SC_2_FORT_DEV: c_int = 49; + +pub const _SC_2_FORT_RUN: c_int = 50; + +pub const _SC_2_SW_DEV: c_int = 51; + +pub const _SC_2_LOCALEDEF: c_int = 52; + +pub const _SC_UIO_MAXIOV: c_int = 60; /* !! */ + +pub const _SC_IOV_MAX: c_int = 60; + +pub const _SC_THREADS: c_int = 67; + +pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 68; + +pub const _SC_GETGR_R_SIZE_MAX: c_int = 69; + +pub const _SC_GETPW_R_SIZE_MAX: c_int = 70; + +pub const _SC_LOGIN_NAME_MAX: c_int = 71; + +pub const _SC_TTY_NAME_MAX: c_int = 72; + +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 73; + +pub const _SC_THREAD_KEYS_MAX: c_int = 74; + +pub const _SC_THREAD_STACK_MIN: c_int = 75; + +pub const _SC_THREAD_THREADS_MAX: c_int = 76; + +pub const _SC_THREAD_ATTR_STACKADDR: c_int = 77; + +pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 78; + +pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 79; + +pub const _SC_THREAD_PRIO_INHERIT: c_int = 80; + +pub const _SC_THREAD_PRIO_PROTECT: c_int = 81; + +pub const _SC_THREAD_PROCESS_SHARED: c_int = 82; + +pub const _SC_NPROCESSORS_CONF: c_int = 83; + +pub const _SC_NPROCESSORS_ONLN: c_int = 84; + +pub const _SC_PHYS_PAGES: c_int = 85; + +pub const _SC_AVPHYS_PAGES: c_int = 86; + +pub const _SC_ATEXIT_MAX: c_int = 87; + +pub const _SC_PASS_MAX: c_int = 88; + +pub const _SC_XOPEN_VERSION: c_int = 89; + +pub const _SC_XOPEN_XCU_VERSION: c_int = 90; + +pub const _SC_XOPEN_UNIX: c_int = 91; + +pub const _SC_XOPEN_CRYPT: c_int = 92; + +pub const _SC_XOPEN_ENH_I18N: c_int = 93; + +pub const _SC_XOPEN_SHM: c_int = 94; + +pub const _SC_2_CHAR_TERM: c_int = 95; + +pub const _SC_2_UPE: c_int = 97; + +pub const _SC_XOPEN_XPG2: c_int = 98; + +pub const _SC_XOPEN_XPG3: c_int = 99; + +pub const _SC_XOPEN_XPG4: c_int = 100; + +pub const _SC_NZERO: c_int = 109; + +pub const _SC_XBS5_ILP32_OFF32: c_int = 125; + +pub const _SC_XBS5_ILP32_OFFBIG: c_int = 126; + +pub const _SC_XBS5_LP64_OFF64: c_int = 127; + +pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 128; + +pub const _SC_XOPEN_LEGACY: c_int = 129; + +pub const _SC_XOPEN_REALTIME: c_int = 130; + +pub const _SC_XOPEN_REALTIME_THREADS: c_int = 131; + +pub const _SC_ADVISORY_INFO: c_int = 132; + +pub const _SC_BARRIERS: c_int = 133; + +pub const _SC_CLOCK_SELECTION: c_int = 137; + +pub const _SC_CPUTIME: c_int = 138; + +pub const _SC_THREAD_CPUTIME: c_int = 139; + +pub const _SC_MONOTONIC_CLOCK: c_int = 149; + +pub const _SC_READER_WRITER_LOCKS: c_int = 153; + +pub const _SC_SPIN_LOCKS: c_int = 154; + +pub const _SC_REGEXP: c_int = 155; + +pub const _SC_SHELL: c_int = 157; + +pub const _SC_SPAWN: c_int = 159; + +pub const _SC_SPORADIC_SERVER: c_int = 160; + +pub const _SC_THREAD_SPORADIC_SERVER: c_int = 161; + +pub const _SC_TIMEOUTS: c_int = 164; + +pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 165; + +pub const _SC_2_PBS: c_int = 168; + +pub const _SC_2_PBS_ACCOUNTING: c_int = 169; + +pub const _SC_2_PBS_LOCATE: c_int = 170; + +pub const _SC_2_PBS_MESSAGE: c_int = 171; + +pub const _SC_2_PBS_TRACK: c_int = 172; + +pub const _SC_SYMLOOP_MAX: c_int = 173; + +pub const _SC_STREAMS: c_int = 174; + +pub const _SC_2_PBS_CHECKPOINT: c_int = 175; + +pub const _SC_V6_ILP32_OFF32: c_int = 176; + +pub const _SC_V6_ILP32_OFFBIG: c_int = 177; + +pub const _SC_V6_LP64_OFF64: c_int = 178; + +pub const _SC_V6_LPBIG_OFFBIG: c_int = 179; + +pub const _SC_HOST_NAME_MAX: c_int = 180; + +pub const _SC_TRACE: c_int = 181; + +pub const _SC_TRACE_EVENT_FILTER: c_int = 182; + +pub const _SC_TRACE_INHERIT: c_int = 183; + +pub const _SC_TRACE_LOG: c_int = 184; + +pub const _SC_IPV6: c_int = 235; + +pub const _SC_RAW_SOCKETS: c_int = 236; + +pub const _SC_V7_ILP32_OFF32: c_int = 237; + +pub const _SC_V7_ILP32_OFFBIG: c_int = 238; + +pub const _SC_V7_LP64_OFF64: c_int = 239; + +pub const _SC_V7_LPBIG_OFFBIG: c_int = 240; + +pub const _SC_SS_REPL_MAX: c_int = 241; + +pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 242; + +pub const _SC_TRACE_NAME_MAX: c_int = 243; + +pub const _SC_TRACE_SYS_MAX: c_int = 244; + +pub const _SC_TRACE_USER_EVENT_MAX: c_int = 245; + +pub const _SC_XOPEN_STREAMS: c_int = 246; + +pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 247; + +pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 248; + +// limits.h +pub const PTHREAD_KEYS_MAX: c_int = 128; + +pub const PTHREAD_STACK_MIN: c_int = 2048; + +pub const PTHREAD_DESTRUCTOR_ITERATIONS: c_int = 4; + +pub const SEM_VALUE_MAX: c_int = 0x7fffffff; + +pub const SEM_NSEMS_MAX: c_int = 256; + +pub const DELAYTIMER_MAX: c_int = 0x7fffffff; + +pub const MQ_PRIO_MAX: c_int = 32768; + +pub const LOGIN_NAME_MAX: c_int = 256; + +// time.h +pub const CLOCK_REALTIME: clockid_t = 0; + +pub const CLOCK_MONOTONIC: clockid_t = 1; + +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + size: [0; __SIZEOF_PTHREAD_MUTEX_T], +}; + +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + size: [0; __SIZEOF_PTHREAD_COND_T], +}; + +pub const PTHREAD_MUTEX_NORMAL: c_int = 0; + +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; + +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; + +pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; + +pub const PTHREAD_MUTEX_STALLED: c_int = 0; + +pub const PTHREAD_MUTEX_ROBUST: c_int = 1; + +extern "C" { + // ---- ALLOC ----------------------------------------------------------------------------- + pub fn calloc(nobj: size_t, size: size_t) -> *mut c_void; + + pub fn malloc(size: size_t) -> *mut c_void; + + pub fn realloc(p: *mut c_void, size: size_t) -> *mut c_void; + + pub fn aligned_alloc(align: size_t, len: size_t) -> *mut c_void; + + pub fn free(p: *mut c_void); + + pub fn posix_memalign(memptr: *mut *mut c_void, align: size_t, size: size_t) -> c_int; + + pub fn memchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; + + pub fn wmemchr(cx: *const wchar_t, c: wchar_t, n: size_t) -> *mut wchar_t; + + pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; + + pub fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; + + pub fn memmove(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; + + pub fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void; + + // ----- PTHREAD --------------------------------------------------------------------------- + pub fn pthread_self() -> pthread_t; + + pub fn pthread_join(native: pthread_t, value: *mut *mut c_void) -> c_int; + + // detach or pthread_attr_setdetachstate must not be called! + //pub fn pthread_detach(thread: pthread_t) -> c_int; + + pub fn pthread_exit(value: *mut c_void) -> !; + + pub fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int; + + pub fn pthread_attr_destroy(attr: *mut pthread_attr_t) -> c_int; + + pub fn pthread_attr_getstack( + attr: *const pthread_attr_t, + stackaddr: *mut *mut c_void, + stacksize: *mut size_t, + ) -> c_int; + + pub fn pthread_attr_setstacksize(attr: *mut pthread_attr_t, stack_size: size_t) -> c_int; + + pub fn pthread_attr_getstacksize(attr: *const pthread_attr_t, size: *mut size_t) -> c_int; + + pub fn pthread_attr_settee( + attr: *mut pthread_attr_t, + ca: c_int, + task_id: c_int, + shadow: c_int, + ) -> c_int; + + // C-TA API do not include this interface, but TA can use. + pub fn sched_yield() -> c_int; + + pub fn pthread_key_create( + key: *mut pthread_key_t, + dtor: Option, + ) -> c_int; + + pub fn pthread_key_delete(key: pthread_key_t) -> c_int; + + pub fn pthread_getspecific(key: pthread_key_t) -> *mut c_void; + + pub fn pthread_setspecific(key: pthread_key_t, value: *const c_void) -> c_int; + + pub fn pthread_mutex_destroy(lock: *mut pthread_mutex_t) -> c_int; + + pub fn pthread_mutex_init( + lock: *mut pthread_mutex_t, + attr: *const pthread_mutexattr_t, + ) -> c_int; + + pub fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> c_int; + + pub fn pthread_mutex_trylock(lock: *mut pthread_mutex_t) -> c_int; + + pub fn pthread_mutex_unlock(lock: *mut pthread_mutex_t) -> c_int; + + pub fn pthread_mutexattr_destroy(attr: *mut pthread_mutexattr_t) -> c_int; + + pub fn pthread_mutexattr_init(attr: *mut pthread_mutexattr_t) -> c_int; + + pub fn pthread_mutexattr_settype(attr: *mut pthread_mutexattr_t, _type: c_int) -> c_int; + + pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; + + pub fn pthread_cond_broadcast(cond: *mut pthread_cond_t) -> c_int; + + pub fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> c_int; + + pub fn pthread_cond_init(cond: *mut pthread_cond_t, attr: *const pthread_condattr_t) -> c_int; + + pub fn pthread_cond_signal(cond: *mut pthread_cond_t) -> c_int; + + pub fn pthread_cond_wait(cond: *mut pthread_cond_t, lock: *mut pthread_mutex_t) -> c_int; + + pub fn pthread_cond_timedwait( + cond: *mut pthread_cond_t, + lock: *mut pthread_mutex_t, + abstime: *const timespec, + ) -> c_int; + + pub fn pthread_mutexattr_setrobust(attr: *mut pthread_mutexattr_t, robustness: c_int) -> c_int; + + pub fn pthread_create( + native: *mut pthread_t, + attr: *const pthread_attr_t, + f: extern "C" fn(*mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + + pub fn pthread_spin_init(lock: *mut pthread_spinlock_t, pshared: c_int) -> c_int; + + pub fn pthread_spin_destroy(lock: *mut pthread_spinlock_t) -> c_int; + + pub fn pthread_spin_lock(lock: *mut pthread_spinlock_t) -> c_int; + + pub fn pthread_spin_trylock(lock: *mut pthread_spinlock_t) -> c_int; + + pub fn pthread_spin_unlock(lock: *mut pthread_spinlock_t) -> c_int; + + pub fn pthread_setschedprio(native: pthread_t, priority: c_int) -> c_int; + + pub fn pthread_once(pot: *mut pthread_once_t, f: Option) -> c_int; + + pub fn pthread_equal(p1: pthread_t, p2: pthread_t) -> c_int; + + pub fn pthread_mutexattr_setprotocol(a: *mut pthread_mutexattr_t, protocol: c_int) -> c_int; + + pub fn pthread_attr_setstack( + attr: *mut pthread_attr_t, + stack: *mut c_void, + size: size_t, + ) -> c_int; + + pub fn pthread_setaffinity_np(td: pthread_t, size: size_t, set: *const cpu_set_t) -> c_int; + + pub fn pthread_getaffinity_np(td: pthread_t, size: size_t, set: *mut cpu_set_t) -> c_int; + + // stdio.h + pub fn printf(fmt: *const c_char, ...) -> c_int; + + pub fn scanf(fmt: *const c_char, ...) -> c_int; + + pub fn snprintf(s: *mut c_char, n: size_t, fmt: *const c_char, ...) -> c_int; + + pub fn sprintf(s: *mut c_char, fmt: *const c_char, ...) -> c_int; + + pub fn vsnprintf(s: *mut c_char, n: size_t, fmt: *const c_char, ap: va_list) -> c_int; + + pub fn vsprintf(s: *mut c_char, fmt: *const c_char, ap: va_list) -> c_int; + + // Not available. + //pub fn pthread_setname_np(thread: pthread_t, name: *const c_char) -> c_int; + + pub fn abort() -> !; + + // Not available. + //pub fn prctl(op: c_int, ...) -> c_int; + + pub fn sched_getaffinity(pid: pid_t, cpusetsize: size_t, cpuset: *mut cpu_set_t) -> c_int; + + pub fn sched_setaffinity(pid: pid_t, cpusetsize: size_t, cpuset: *const cpu_set_t) -> c_int; + + // sysconf is currently only implemented as a stub. + pub fn sysconf(name: c_int) -> c_long; + + // mman.h + pub fn mmap( + addr: *mut c_void, + len: size_t, + prot: c_int, + flags: c_int, + fd: c_int, + offset: off_t, + ) -> *mut c_void; + pub fn munmap(addr: *mut c_void, len: size_t) -> c_int; + + // errno.h + pub fn __errno_location() -> *mut c_int; + + pub fn strerror(e: c_int) -> *mut c_char; + + // time.h + pub fn clock_gettime(clock_id: clockid_t, tp: *mut timespec) -> c_int; + + // unistd + pub fn getpid() -> pid_t; + + // time + pub fn gettimeofday(tv: *mut timeval, tz: *mut c_void) -> c_int; + + pub fn strftime( + restrict: *mut c_char, + sz: size_t, + _restrict: *const c_char, + __restrict: *const tm, + ) -> size_t; + + pub fn time(t: *mut time_t) -> time_t; + + // sem + pub fn sem_close(sem: *mut sem_t) -> c_int; + + pub fn sem_destroy(sem: *mut sem_t) -> c_int; + + pub fn sem_getvalue(sem: *mut sem_t, valp: *mut c_int) -> c_int; + + pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; + + pub fn sem_open(name: *const c_char, flags: c_int, ...) -> *mut sem_t; + + pub fn sem_post(sem: *mut sem_t) -> c_int; + + pub fn sem_unlink(name: *const c_char) -> c_int; + + pub fn sem_wait(sem: *mut sem_t) -> c_int; + + // locale + pub fn setlocale(cat: c_int, name: *const c_char) -> *mut c_char; + + pub fn strcoll(l: *const c_char, r: *const c_char) -> c_int; + + pub fn strxfrm(dest: *mut c_char, src: *const c_char, n: size_t) -> size_t; + + pub fn strtod(s: *const c_char, p: *mut *mut c_char) -> c_double; + + // multibyte + pub fn mbrtowc(wc: *mut wchar_t, src: *const c_char, n: size_t, st: *mut mbstate_t) -> size_t; + + pub fn wcrtomb(s: *mut c_char, wc: wchar_t, st: *mut mbstate_t) -> size_t; + + pub fn wctob(c: wint_t) -> c_int; + + // prng + pub fn srandom(seed: c_uint); + + pub fn initstate(seed: c_uint, state: *mut c_char, size: size_t) -> *mut c_char; + + pub fn setstate(state: *mut c_char) -> *mut c_char; + + pub fn random() -> c_long; + + // string + pub fn strchr(s: *const c_char, c: c_int) -> *mut c_char; + + pub fn strlen(cs: *const c_char) -> size_t; + + pub fn strcmp(l: *const c_char, r: *const c_char) -> c_int; + + pub fn strcpy(dest: *mut c_char, src: *const c_char) -> *mut c_char; + + pub fn strncmp(_l: *const c_char, r: *const c_char, n: size_t) -> c_int; + + pub fn strncpy(dest: *mut c_char, src: *const c_char, n: size_t) -> *mut c_char; + + pub fn strnlen(cs: *const c_char, n: size_t) -> size_t; + + pub fn strrchr(s: *const c_char, c: c_int) -> *mut c_char; + + pub fn strstr(h: *const c_char, n: *const c_char) -> *mut c_char; + + pub fn wcschr(s: *const wchar_t, c: wchar_t) -> *mut wchar_t; + + pub fn wcslen(s: *const wchar_t) -> size_t; + + // ctype + pub fn isalpha(c: c_int) -> c_int; + + pub fn isascii(c: c_int) -> c_int; + + pub fn isdigit(c: c_int) -> c_int; + + pub fn islower(c: c_int) -> c_int; + + pub fn isprint(c: c_int) -> c_int; + + pub fn isspace(c: c_int) -> c_int; + + pub fn iswctype(wc: wint_t, ttype: wctype_t) -> c_int; + + pub fn iswdigit(wc: wint_t) -> c_int; + + pub fn iswlower(wc: wint_t) -> c_int; + + pub fn iswspace(wc: wint_t) -> c_int; + + pub fn iswupper(wc: wint_t) -> c_int; + + pub fn towupper(wc: wint_t) -> wint_t; + + pub fn towlower(wc: wint_t) -> wint_t; + + // cmath + pub fn atan(x: c_double) -> c_double; + + pub fn ceil(x: c_double) -> c_double; + + pub fn ceilf(x: c_float) -> c_float; + + pub fn exp(x: c_double) -> c_double; + + pub fn fabs(x: c_double) -> c_double; + + pub fn floor(x: c_double) -> c_double; + + pub fn frexp(x: c_double, e: *mut c_int) -> c_double; + + pub fn log(x: c_double) -> c_double; + + pub fn log2(x: c_double) -> c_double; + + pub fn pow(x: c_double, y: c_double) -> c_double; + + pub fn roundf(x: c_float) -> c_float; + + pub fn scalbn(x: c_double, n: c_int) -> c_double; + + pub fn sqrt(x: c_double) -> c_double; + + // stdlib + pub fn abs(x: c_int) -> c_int; + + pub fn atof(s: *const c_char) -> c_double; + + pub fn atoi(s: *const c_char) -> c_int; + + pub fn atol(s: *const c_char) -> c_long; + + pub fn atoll(s: *const c_char) -> c_longlong; + + pub fn bsearch( + key: *const c_void, + base: *const c_void, + nel: size_t, + width: size_t, + cmp: cmpfunc, + ) -> *mut c_void; + + pub fn div(num: c_int, den: c_int) -> div_t; + + pub fn ecvt(x: c_double, n: c_int, dp: *mut c_int, sign: *mut c_int) -> *mut c_char; + + pub fn imaxabs(a: intmax_t) -> intmax_t; + + pub fn llabs(a: c_longlong) -> c_longlong; + + pub fn qsort(base: *mut c_void, nel: size_t, width: size_t, cmp: cmpfunc); + + pub fn strtoul(s: *const c_char, p: *mut *mut c_char, base: c_int) -> c_ulong; + + pub fn strtol(s: *const c_char, p: *mut *mut c_char, base: c_int) -> c_long; + + pub fn wcstod(s: *const wchar_t, p: *mut *mut wchar_t) -> c_double; +} + +pub fn errno() -> c_int { + unsafe { *__errno_location() } +} + +pub fn CPU_COUNT_S(size: usize, cpuset: &cpu_set_t) -> c_int { + let mut s: u32 = 0; + let size_of_mask = size_of_val(&cpuset.bits[0]); + + for i in cpuset.bits[..(size / size_of_mask)].iter() { + s += i.count_ones(); + } + s as c_int +} + +pub fn CPU_COUNT(cpuset: &cpu_set_t) -> c_int { + CPU_COUNT_S(size_of::(), cpuset) +} diff --git a/vendor/libc/src/trusty.rs b/vendor/libc/src/trusty.rs new file mode 100644 index 00000000000000..7441aade0631eb --- /dev/null +++ b/vendor/libc/src/trusty.rs @@ -0,0 +1,72 @@ +use crate::prelude::*; +pub type size_t = usize; +pub type ssize_t = isize; + +pub type off_t = i64; + +pub type c_uint8_t = u8; +pub type c_uint16_t = u16; +pub type c_uint32_t = u32; +pub type c_uint64_t = u64; + +pub type c_int8_t = i8; +pub type c_int16_t = i16; +pub type c_int32_t = i32; +pub type c_int64_t = i64; + +pub type intptr_t = isize; +pub type uintptr_t = usize; + +pub type time_t = c_long; + +pub type clockid_t = c_int; + +s! { + pub struct iovec { + pub iov_base: *mut c_void, + pub iov_len: size_t, + } + + pub struct timespec { + pub tv_sec: time_t, + pub tv_nsec: c_long, + } +} + +pub const PROT_READ: i32 = 1; +pub const PROT_WRITE: i32 = 2; + +// Trusty only supports `CLOCK_BOOTTIME`. +pub const CLOCK_BOOTTIME: clockid_t = 7; + +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; + +pub const AT_PAGESZ: c_ulong = 6; + +pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; + +extern "C" { + pub fn calloc(nobj: size_t, size: size_t) -> *mut c_void; + pub fn malloc(size: size_t) -> *mut c_void; + pub fn realloc(p: *mut c_void, size: size_t) -> *mut c_void; + pub fn free(p: *mut c_void); + pub fn memalign(align: size_t, size: size_t) -> *mut c_void; + pub fn posix_memalign(memptr: *mut *mut c_void, align: size_t, size: size_t) -> c_int; + pub fn write(fd: c_int, buf: *const c_void, count: size_t) -> ssize_t; + pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + pub fn close(fd: c_int) -> c_int; + pub fn strlen(cs: *const c_char) -> size_t; + pub fn getauxval(type_: c_ulong) -> c_ulong; + pub fn mmap( + addr: *mut c_void, + len: size_t, + prot: c_int, + flags: c_int, + fd: c_int, + offset: off_t, + ) -> *mut c_void; + pub fn munmap(addr: *mut c_void, len: size_t) -> c_int; + pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn nanosleep(rqtp: *const crate::timespec, rmtp: *mut crate::timespec) -> c_int; +} diff --git a/vendor/libc/src/types.rs b/vendor/libc/src/types.rs new file mode 100644 index 00000000000000..7d49a425d59ead --- /dev/null +++ b/vendor/libc/src/types.rs @@ -0,0 +1,39 @@ +//! Platform-agnostic support types. + +use core::mem::MaybeUninit; + +use crate::prelude::*; + +/// A transparent wrapper over `MaybeUninit` to represent uninitialized padding +/// while providing `Default`. +// This is restricted to `Copy` types since that's a loose indicator that zeros is actually +// a valid bitpattern. There is no technical reason this is required, though, so it could be +// lifted in the future if it becomes a problem. +#[allow(unused)] +#[repr(transparent)] +#[derive(Clone, Copy)] +pub(crate) struct Padding(MaybeUninit); + +impl Default for Padding { + fn default() -> Self { + Self(MaybeUninit::zeroed()) + } +} + +impl fmt::Debug for Padding { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Taken frmo `MaybeUninit`'s debug implementation + // NB: there is no `.pad_fmt` so we can't use a simpler `format_args!("Padding<{..}>"). + let full_name = core::any::type_name::(); + let prefix_len = full_name.find("Padding").unwrap(); + f.pad(&full_name[prefix_len..]) + } +} + +/// The default repr type used for C style enums in Rust. +#[cfg(target_env = "msvc")] +#[allow(unused)] +pub(crate) type CEnumRepr = c_int; +#[cfg(not(target_env = "msvc"))] +#[allow(unused)] +pub(crate) type CEnumRepr = c_uint; diff --git a/vendor/libc/src/unix/aix/mod.rs b/vendor/libc/src/unix/aix/mod.rs new file mode 100644 index 00000000000000..b6d1af52d133cb --- /dev/null +++ b/vendor/libc/src/unix/aix/mod.rs @@ -0,0 +1,3382 @@ +use crate::prelude::*; +use crate::{in_addr_t, in_port_t}; + +pub type caddr_t = *mut c_char; +pub type clockid_t = c_longlong; +pub type blkcnt_t = c_long; +pub type clock_t = c_int; +pub type daddr_t = c_long; +pub type dev_t = c_ulong; +pub type fpos64_t = c_longlong; +pub type fsblkcnt_t = c_ulong; +pub type fsfilcnt_t = c_ulong; +pub type ino_t = c_ulong; +pub type key_t = c_int; +pub type mode_t = c_uint; +pub type nlink_t = c_short; +pub type rlim_t = c_ulong; +pub type speed_t = c_uint; +pub type tcflag_t = c_uint; +pub type time_t = c_long; +pub type time64_t = i64; +pub type timer_t = c_long; +pub type wchar_t = c_uint; +pub type nfds_t = c_uint; +pub type projid_t = c_int; +pub type id_t = c_uint; +pub type blksize64_t = c_ulonglong; +pub type blkcnt64_t = c_ulonglong; +pub type suseconds_t = c_int; +pub type useconds_t = c_uint; +pub type off_t = c_long; +pub type offset_t = c_longlong; +pub type off64_t = c_longlong; +pub type idtype_t = c_uint; + +pub type socklen_t = c_uint; +pub type sa_family_t = c_uchar; + +pub type signal_t = c_int; +pub type pthread_t = c_uint; +pub type pthread_key_t = c_uint; +pub type thread_t = pthread_t; +pub type blksize_t = c_long; +pub type nl_item = c_int; +pub type mqd_t = c_int; +pub type shmatt_t = c_ulong; +pub type regoff_t = c_long; +pub type rlim64_t = c_ulonglong; + +pub type sem_t = c_int; +pub type pollset_t = c_int; +pub type sctp_assoc_t = c_uint; + +pub type pthread_rwlockattr_t = *mut c_void; +pub type pthread_condattr_t = *mut c_void; +pub type pthread_mutexattr_t = *mut c_void; +pub type pthread_attr_t = *mut c_void; +pub type pthread_barrierattr_t = *mut c_void; +pub type posix_spawn_file_actions_t = *mut c_char; +pub type iconv_t = *mut c_void; + +e! { + #[repr(u32)] + pub enum uio_rw { + UIO_READ = 0, + UIO_WRITE, + UIO_READ_NO_MOVE, + UIO_WRITE_NO_MOVE, + UIO_PWRITE, + } + #[repr(u32)] + pub enum ACTION { + FIND = 0, + ENTER, + } +} + +s! { + pub struct fsid_t { + pub val: [c_uint; 2], + } + + pub struct fsid64_t { + pub val: [crate::uint64_t; 2], + } + + pub struct timezone { + pub tz_minuteswest: c_int, + pub tz_dsttime: c_int, + } + + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct dirent { + pub d_offset: c_ulong, + pub d_ino: crate::ino_t, + pub d_reclen: c_ushort, + pub d_namlen: c_ushort, + pub d_name: [c_char; 256], + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_cc: [crate::cc_t; crate::NCCS], + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_sysid: c_uint, + pub l_pid: crate::pid_t, + pub l_vfs: c_int, + pub l_start: off64_t, + pub l_len: off64_t, + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: c_int, + pub msg_control: *mut c_void, + pub msg_controllen: socklen_t, + pub msg_flags: c_int, + } + + pub struct statvfs64 { + pub f_bsize: crate::blksize64_t, + pub f_frsize: crate::blksize64_t, + pub f_blocks: crate::blkcnt64_t, + pub f_bfree: crate::blkcnt64_t, + pub f_bavail: crate::blkcnt64_t, + pub f_files: crate::blkcnt64_t, + pub f_ffree: crate::blkcnt64_t, + pub f_favail: crate::blkcnt64_t, + pub f_fsid: fsid64_t, + pub f_basetype: [c_char; 16], + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + pub f_fstr: [c_char; 32], + pub f_filler: [c_ulong; 16], + } + + pub struct lconv { + pub decimal_point: *mut c_char, + pub thousands_sep: *mut c_char, + pub grouping: *mut c_char, + pub int_curr_symbol: *mut c_char, + pub currency_symbol: *mut c_char, + pub mon_decimal_point: *mut c_char, + pub mon_thousands_sep: *mut c_char, + pub mon_grouping: *mut c_char, + pub positive_sign: *mut c_char, + pub negative_sign: *mut c_char, + pub int_frac_digits: c_char, + pub frac_digits: c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub p_sign_posn: c_char, + pub n_sign_posn: c_char, + pub left_parenthesis: *mut c_char, + pub right_parenthesis: *mut c_char, + pub int_p_cs_precedes: c_char, + pub int_p_sep_by_space: c_char, + pub int_n_cs_precedes: c_char, + pub int_n_sep_by_space: c_char, + pub int_p_sign_posn: c_char, + pub int_n_sign_posn: c_char, + } + + pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + } + + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: c_ulong, + pub ai_canonname: *mut c_char, + pub ai_addr: *mut crate::sockaddr, + pub ai_next: *mut addrinfo, + pub ai_eflags: c_int, + } + + pub struct in_addr { + pub s_addr: in_addr_t, + } + + pub struct ip_mreq_source { + pub imr_multiaddr: in_addr, + pub imr_sourceaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct sockaddr { + pub sa_len: c_uchar, + pub sa_family: sa_family_t, + pub sa_data: [c_char; 14], + } + + pub struct sockaddr_dl { + pub sdl_len: c_uchar, + pub sdl_family: c_uchar, + pub sdl_index: c_ushort, + pub sdl_type: c_uchar, + pub sdl_nlen: c_uchar, + pub sdl_alen: c_uchar, + pub sdl_slen: c_uchar, + pub sdl_data: [c_char; 120], + } + + pub struct sockaddr_in { + pub sin_len: c_uchar, + pub sin_family: sa_family_t, + pub sin_port: in_port_t, + pub sin_addr: in_addr, + pub sin_zero: [c_uchar; 8], + } + + pub struct sockaddr_in6 { + pub sin6_len: c_uchar, + pub sin6_family: c_uchar, + pub sin6_port: crate::uint16_t, + pub sin6_flowinfo: crate::uint32_t, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: crate::uint32_t, + } + + pub struct sockaddr_storage { + pub __ss_len: c_uchar, + pub ss_family: sa_family_t, + __ss_pad1: [c_char; 6], + __ss_align: crate::int64_t, + __ss_pad2: [c_char; 1265], + } + + pub struct sockaddr_un { + pub sun_len: c_uchar, + pub sun_family: sa_family_t, + pub sun_path: [c_char; 1023], + } + + pub struct st_timespec { + pub tv_sec: crate::time_t, + pub tv_nsec: c_int, + } + + pub struct statfs64 { + pub f_version: c_int, + pub f_type: c_int, + pub f_bsize: blksize64_t, + pub f_blocks: blkcnt64_t, + pub f_bfree: blkcnt64_t, + pub f_bavail: blkcnt64_t, + pub f_files: crate::uint64_t, + pub f_ffree: crate::uint64_t, + pub f_fsid: fsid64_t, + pub f_vfstype: c_int, + pub f_fsize: blksize64_t, + pub f_vfsnumber: c_int, + pub f_vfsoff: c_int, + pub f_vfslen: c_int, + pub f_vfsvers: c_int, + pub f_fname: [c_char; 32], + pub f_fpack: [c_char; 32], + pub f_name_max: c_int, + } + + pub struct passwd { + pub pw_name: *mut c_char, + pub pw_passwd: *mut c_char, + pub pw_uid: crate::uid_t, + pub pw_gid: crate::gid_t, + pub pw_gecos: *mut c_char, + pub pw_dir: *mut c_char, + pub pw_shell: *mut c_char, + } + + pub struct utsname { + pub sysname: [c_char; 32], + pub nodename: [c_char; 32], + pub release: [c_char; 32], + pub version: [c_char; 32], + pub machine: [c_char; 32], + } + + pub struct xutsname { + pub nid: c_uint, + pub reserved: c_int, + pub longnid: c_ulonglong, + } + + pub struct cmsghdr { + pub cmsg_len: crate::socklen_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct sigevent { + pub sigev_value: crate::sigval, + pub sigev_signo: c_int, + pub sigev_notify: c_int, + pub sigev_notify_function: extern "C" fn(val: crate::sigval), + pub sigev_notify_attributes: *mut pthread_attr_t, + } + + pub struct osigevent { + pub sevt_value: *mut c_void, + pub sevt_signo: signal_t, + } + + pub struct poll_ctl { + pub cmd: c_short, + pub events: c_short, + pub fd: c_int, + } + + pub struct sf_parms { + pub header_data: *mut c_void, + pub header_length: c_uint, + pub file_descriptor: c_int, + pub file_size: crate::uint64_t, + pub file_offset: crate::uint64_t, + pub file_bytes: crate::int64_t, + pub trailer_data: *mut c_void, + pub trailer_length: c_uint, + pub bytes_sent: crate::uint64_t, + } + + pub struct mmsghdr { + pub msg_hdr: crate::msghdr, + pub msg_len: c_uint, + } + + pub struct sched_param { + pub sched_priority: c_int, + pub sched_policy: c_int, + pub sched_reserved: [c_int; 6], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + pub __pad: [c_int; 4], + } + + pub struct posix_spawnattr_t { + pub posix_attr_flags: c_short, + pub posix_attr_pgroup: crate::pid_t, + pub posix_attr_sigmask: crate::sigset_t, + pub posix_attr_sigdefault: crate::sigset_t, + pub posix_attr_schedpolicy: c_int, + pub posix_attr_schedparam: sched_param, + } + + pub struct glob_t { + pub gl_pathc: size_t, + pub gl_pathv: *mut *mut c_char, + pub gl_offs: size_t, + pub gl_padr: *mut c_void, + pub gl_ptx: *mut c_void, + } + + pub struct mallinfo { + pub arena: c_ulong, + pub ordblks: c_int, + pub smblks: c_int, + pub hblks: c_int, + pub hblkhd: c_int, + pub usmblks: c_ulong, + pub fsmblks: c_ulong, + pub uordblks: c_ulong, + pub fordblks: c_ulong, + pub keepcost: c_int, + } + + pub struct exit_status { + pub e_termination: c_short, + pub e_exit: c_short, + } + + pub struct utmp { + pub ut_user: [c_char; 256], + pub ut_id: [c_char; 14], + pub ut_line: [c_char; 64], + pub ut_pid: crate::pid_t, + pub ut_type: c_short, + pub ut_time: time64_t, + pub ut_exit: exit_status, + pub ut_host: [c_char; 256], + pub __dbl_word_pad: c_int, + pub __reservedA: [c_int; 2], + pub __reservedV: [c_int; 6], + } + + pub struct regmatch_t { + pub rm_so: regoff_t, + pub rm_eo: regoff_t, + } + + pub struct regex_t { + pub re_nsub: size_t, + pub re_comp: *mut c_void, + pub re_cflags: c_int, + pub re_erroff: size_t, + pub re_len: size_t, + pub re_ucoll: [crate::wchar_t; 2], + pub re_lsub: [*mut c_void; 24], + pub re_esub: [*mut c_void; 24], + pub re_map: *mut c_uchar, + pub __maxsub: c_int, + pub __unused: [*mut c_void; 34], + } + + pub struct rlimit64 { + pub rlim_cur: rlim64_t, + pub rlim_max: rlim64_t, + } + + pub struct shmid_ds { + pub shm_perm: ipc_perm, + pub shm_segsz: size_t, + pub shm_lpid: crate::pid_t, + pub shm_cpid: crate::pid_t, + pub shm_nattch: shmatt_t, + pub shm_cnattch: shmatt_t, + pub shm_atime: time_t, + pub shm_dtime: time_t, + pub shm_ctime: time_t, + pub shm_handle: crate::uint32_t, + pub shm_extshm: c_int, + pub shm_pagesize: crate::int64_t, + pub shm_lba: crate::uint64_t, + pub shm_reserved0: crate::int64_t, + pub shm_reserved1: crate::int64_t, + } + + pub struct stat64 { + pub st_dev: dev_t, + pub st_ino: ino_t, + pub st_mode: mode_t, + pub st_nlink: nlink_t, + pub st_flag: c_ushort, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: dev_t, + pub st_ssize: c_int, + pub st_atim: crate::timespec, + pub st_mtim: crate::timespec, + pub st_ctim: crate::timespec, + pub st_blksize: blksize_t, + pub st_blocks: blkcnt_t, + pub st_vfstype: c_int, + pub st_vfs: c_uint, + pub st_type: c_uint, + pub st_gen: c_uint, + pub st_reserved: [c_uint; 10], + pub st_size: off64_t, + } + + pub struct mntent { + pub mnt_fsname: *mut c_char, + pub mnt_dir: *mut c_char, + pub mnt_type: *mut c_char, + pub mnt_opts: *mut c_char, + pub mnt_freq: c_int, + pub mnt_passno: c_int, + } + + pub struct ipc_perm { + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: mode_t, + pub seq: c_ushort, + pub __reserved: c_ushort, + pub key: key_t, + } + + pub struct entry { + pub key: *mut c_char, + pub data: *mut c_void, + } + + pub struct mq_attr { + pub mq_flags: c_long, + pub mq_maxmsg: c_long, + pub mq_msgsize: c_long, + pub mq_curmsgs: c_long, + } + + pub struct sembuf { + pub sem_num: c_ushort, + pub sem_op: c_short, + pub sem_flg: c_short, + } + + pub struct if_nameindex { + pub if_index: c_uint, + pub if_name: *mut c_char, + } + + pub struct itimerspec { + pub it_interval: crate::timespec, + pub it_value: crate::timespec, + } + + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, // FIXME(union): this field is actually a union + pub sa_mask: sigset_t, + pub sa_flags: c_int, + } +} + +s_no_extra_traits! { + pub union __poll_ctl_ext_u { + pub addr: *mut c_void, + pub data32: u32, + pub data: u64, + } + + pub struct poll_ctl_ext { + pub version: u8, + pub command: u8, + pub events: c_short, + pub fd: c_int, + pub u: __poll_ctl_ext_u, + pub reserved64: [u64; 6], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for __poll_ctl_ext_u { + fn eq(&self, other: &__poll_ctl_ext_u) -> bool { + unsafe { + self.addr == other.addr + && self.data32 == other.data32 + && self.data == other.data + } + } + } + impl Eq for __poll_ctl_ext_u {} + impl hash::Hash for __poll_ctl_ext_u { + fn hash(&self, state: &mut H) { + unsafe { + self.addr.hash(state); + self.data32.hash(state); + self.data.hash(state); + } + } + } + + impl PartialEq for poll_ctl_ext { + fn eq(&self, other: &poll_ctl_ext) -> bool { + self.version == other.version + && self.command == other.command + && self.events == other.events + && self.fd == other.fd + && self.reserved64 == other.reserved64 + && self.u == other.u + } + } + impl Eq for poll_ctl_ext {} + impl hash::Hash for poll_ctl_ext { + fn hash(&self, state: &mut H) { + self.version.hash(state); + self.command.hash(state); + self.events.hash(state); + self.fd.hash(state); + self.u.hash(state); + self.reserved64.hash(state); + } + } + } +} + +// dlfcn.h +pub const RTLD_LAZY: c_int = 0x4; +pub const RTLD_NOW: c_int = 0x2; +pub const RTLD_GLOBAL: c_int = 0x10000; +pub const RTLD_LOCAL: c_int = 0x80000; +pub const RTLD_MEMBER: c_int = 0x40000; +pub const RTLD_NOAUTODEFER: c_int = 0x20000; +pub const RTLD_DEFAULT: *mut c_void = -1isize as *mut c_void; +pub const RTLD_MYSELF: *mut c_void = -2isize as *mut c_void; +pub const RTLD_NEXT: *mut c_void = -3isize as *mut c_void; + +// fcntl.h +pub const O_RDONLY: c_int = 0x0; +pub const O_WRONLY: c_int = 0x1; +pub const O_RDWR: c_int = 0x2; +pub const O_NDELAY: c_int = 0x8000; +pub const O_APPEND: c_int = 0x8; +pub const O_DSYNC: c_int = 0x400000; +pub const O_CREAT: c_int = 0x100; +pub const O_EXCL: c_int = 0x400; +pub const O_NOCTTY: c_int = 0x800; +pub const O_TRUNC: c_int = 0x200; +pub const O_NOFOLLOW: c_int = 0x1000000; +pub const O_DIRECTORY: c_int = 0x80000; +pub const O_SEARCH: c_int = 0x20; +pub const O_EXEC: c_int = 0x20; +pub const O_CLOEXEC: c_int = 0x800000; +pub const O_ACCMODE: c_int = O_RDONLY | O_WRONLY | O_RDWR | O_EXEC | O_SEARCH; +pub const O_DIRECT: c_int = 0x8000000; +pub const O_TTY_INIT: c_int = 0; +pub const O_RSYNC: c_int = 0x200000; +pub const O_LARGEFILE: c_int = 0x4000000; +pub const F_DUPFD: c_int = 0; +pub const F_DUPFD_CLOEXEC: c_int = 16; +pub const F_GETFD: c_int = 1; +pub const F_SETFD: c_int = 2; +pub const F_GETFL: c_int = 3; +pub const F_SETFL: c_int = 4; +pub const F_GETLK: c_int = F_GETLK64; +pub const F_SETLK: c_int = F_SETLK64; +pub const F_SETLKW: c_int = F_SETLKW64; +pub const F_GETOWN: c_int = 8; +pub const F_SETOWN: c_int = 9; +pub const F_CLOSEM: c_int = 10; +pub const F_GETLK64: c_int = 11; +pub const F_SETLK64: c_int = 12; +pub const F_SETLKW64: c_int = 13; +pub const F_DUP2FD: c_int = 14; +pub const F_TSTLK: c_int = 15; +pub const AT_FDCWD: c_int = -2; +pub const AT_SYMLINK_NOFOLLOW: c_int = 1; +pub const AT_SYMLINK_FOLLOW: c_int = 2; +pub const AT_REMOVEDIR: c_int = 1; +pub const AT_EACCESS: c_int = 1; +pub const O_SYNC: c_int = 16; +pub const O_NONBLOCK: c_int = 4; +pub const FASYNC: c_int = 0x20000; +pub const POSIX_FADV_NORMAL: c_int = 1; +pub const POSIX_FADV_SEQUENTIAL: c_int = 2; +pub const POSIX_FADV_RANDOM: c_int = 3; +pub const POSIX_FADV_WILLNEED: c_int = 4; +pub const POSIX_FADV_DONTNEED: c_int = 5; +pub const POSIX_FADV_NOREUSE: c_int = 6; + +// glob.h +pub const GLOB_APPEND: c_int = 0x1; +pub const GLOB_DOOFFS: c_int = 0x2; +pub const GLOB_ERR: c_int = 0x4; +pub const GLOB_MARK: c_int = 0x8; +pub const GLOB_NOCHECK: c_int = 0x10; +pub const GLOB_NOSORT: c_int = 0x20; +pub const GLOB_NOESCAPE: c_int = 0x80; +pub const GLOB_NOSPACE: c_int = 0x2000; +pub const GLOB_ABORTED: c_int = 0x1000; +pub const GLOB_NOMATCH: c_int = 0x4000; +pub const GLOB_NOSYS: c_int = 0x8000; + +// langinfo.h +pub const DAY_1: crate::nl_item = 13; +pub const DAY_2: crate::nl_item = 14; +pub const DAY_3: crate::nl_item = 15; +pub const DAY_4: crate::nl_item = 16; +pub const DAY_5: crate::nl_item = 17; +pub const DAY_6: crate::nl_item = 18; +pub const DAY_7: crate::nl_item = 19; +pub const ABDAY_1: crate::nl_item = 6; +pub const ABDAY_2: crate::nl_item = 7; +pub const ABDAY_3: crate::nl_item = 8; +pub const ABDAY_4: crate::nl_item = 9; +pub const ABDAY_5: crate::nl_item = 10; +pub const ABDAY_6: crate::nl_item = 11; +pub const ABDAY_7: crate::nl_item = 12; +pub const MON_1: crate::nl_item = 32; +pub const MON_2: crate::nl_item = 33; +pub const MON_3: crate::nl_item = 34; +pub const MON_4: crate::nl_item = 35; +pub const MON_5: crate::nl_item = 36; +pub const MON_6: crate::nl_item = 37; +pub const MON_7: crate::nl_item = 38; +pub const MON_8: crate::nl_item = 39; +pub const MON_9: crate::nl_item = 40; +pub const MON_10: crate::nl_item = 41; +pub const MON_11: crate::nl_item = 42; +pub const MON_12: crate::nl_item = 43; +pub const ABMON_1: crate::nl_item = 20; +pub const ABMON_2: crate::nl_item = 21; +pub const ABMON_3: crate::nl_item = 22; +pub const ABMON_4: crate::nl_item = 23; +pub const ABMON_5: crate::nl_item = 24; +pub const ABMON_6: crate::nl_item = 25; +pub const ABMON_7: crate::nl_item = 26; +pub const ABMON_8: crate::nl_item = 27; +pub const ABMON_9: crate::nl_item = 28; +pub const ABMON_10: crate::nl_item = 29; +pub const ABMON_11: crate::nl_item = 30; +pub const ABMON_12: crate::nl_item = 31; +pub const RADIXCHAR: crate::nl_item = 44; +pub const THOUSEP: crate::nl_item = 45; +pub const YESSTR: crate::nl_item = 46; +pub const NOSTR: crate::nl_item = 47; +pub const CRNCYSTR: crate::nl_item = 48; +pub const D_T_FMT: crate::nl_item = 1; +pub const D_FMT: crate::nl_item = 2; +pub const T_FMT: crate::nl_item = 3; +pub const AM_STR: crate::nl_item = 4; +pub const PM_STR: crate::nl_item = 5; +pub const CODESET: crate::nl_item = 49; +pub const T_FMT_AMPM: crate::nl_item = 55; +pub const ERA: crate::nl_item = 56; +pub const ERA_D_FMT: crate::nl_item = 57; +pub const ERA_D_T_FMT: crate::nl_item = 58; +pub const ERA_T_FMT: crate::nl_item = 59; +pub const ALT_DIGITS: crate::nl_item = 60; +pub const YESEXPR: crate::nl_item = 61; +pub const NOEXPR: crate::nl_item = 62; + +// locale.h +pub const LC_GLOBAL_LOCALE: crate::locale_t = -1isize as crate::locale_t; +pub const LC_COLLATE: c_int = 0; +pub const LC_CTYPE: c_int = 1; +pub const LC_MONETARY: c_int = 2; +pub const LC_NUMERIC: c_int = 3; +pub const LC_TIME: c_int = 4; +pub const LC_MESSAGES: c_int = 5; +pub const LC_ALL: c_int = -1; +pub const LC_COLLATE_MASK: c_int = 1; +pub const LC_CTYPE_MASK: c_int = 2; +pub const LC_MESSAGES_MASK: c_int = 4; +pub const LC_MONETARY_MASK: c_int = 8; +pub const LC_NUMERIC_MASK: c_int = 16; +pub const LC_TIME_MASK: c_int = 32; +pub const LC_ALL_MASK: c_int = LC_COLLATE_MASK + | LC_CTYPE_MASK + | LC_MESSAGES_MASK + | LC_MONETARY_MASK + | LC_NUMERIC_MASK + | LC_TIME_MASK; + +// netdb.h +pub const NI_MAXHOST: crate::socklen_t = 1025; +pub const NI_MAXSERV: crate::socklen_t = 32; +pub const NI_NOFQDN: crate::socklen_t = 0x1; +pub const NI_NUMERICHOST: crate::socklen_t = 0x2; +pub const NI_NAMEREQD: crate::socklen_t = 0x4; +pub const NI_NUMERICSERV: crate::socklen_t = 0x8; +pub const NI_DGRAM: crate::socklen_t = 0x10; +pub const NI_NUMERICSCOPE: crate::socklen_t = 0x40; +pub const EAI_AGAIN: c_int = 2; +pub const EAI_BADFLAGS: c_int = 3; +pub const EAI_FAIL: c_int = 4; +pub const EAI_FAMILY: c_int = 5; +pub const EAI_MEMORY: c_int = 6; +pub const EAI_NODATA: c_int = 7; +pub const EAI_NONAME: c_int = 8; +pub const EAI_SERVICE: c_int = 9; +pub const EAI_SOCKTYPE: c_int = 10; +pub const EAI_SYSTEM: c_int = 11; +pub const EAI_OVERFLOW: c_int = 13; +pub const AI_CANONNAME: c_int = 0x01; +pub const AI_PASSIVE: c_int = 0x02; +pub const AI_NUMERICHOST: c_int = 0x04; +pub const AI_ADDRCONFIG: c_int = 0x08; +pub const AI_V4MAPPED: c_int = 0x10; +pub const AI_ALL: c_int = 0x20; +pub const AI_NUMERICSERV: c_int = 0x40; +pub const AI_EXTFLAGS: c_int = 0x80; +pub const AI_DEFAULT: c_int = AI_V4MAPPED | AI_ADDRCONFIG; +pub const IPV6_ADDRFORM: c_int = 22; +pub const IPV6_ADDR_PREFERENCES: c_int = 74; +pub const IPV6_CHECKSUM: c_int = 39; +pub const IPV6_DONTFRAG: c_int = 45; +pub const IPV6_DSTOPTS: c_int = 54; +pub const IPV6_FLOWINFO_FLOWLABEL: c_int = 0x00ffffff; +pub const IPV6_FLOWINFO_PRIORITY: c_int = 0x0f000000; +pub const IPV6_FLOWINFO_PRIFLOW: c_int = 0x0fffffff; +pub const IPV6_FLOWINFO_SRFLAG: c_int = 0x10000000; +pub const IPV6_FLOWINFO_VERSION: c_int = 0xf0000000; +pub const IPV6_HOPLIMIT: c_int = 40; +pub const IPV6_HOPOPTS: c_int = 52; +pub const IPV6_NEXTHOP: c_int = 48; +pub const IPV6_PATHMTU: c_int = 46; +pub const IPV6_PKTINFO: c_int = 33; +pub const IPV6_PREFER_SRC_CGA: c_int = 16; +pub const IPV6_PREFER_SRC_COA: c_int = 2; +pub const IPV6_PREFER_SRC_HOME: c_int = 1; +pub const IPV6_PREFER_SRC_NONCGA: c_int = 32; +pub const IPV6_PREFER_SRC_PUBLIC: c_int = 4; +pub const IPV6_PREFER_SRC_TMP: c_int = 8; +pub const IPV6_RECVDSTOPTS: c_int = 56; +pub const IPV6_RECVHOPLIMIT: c_int = 41; +pub const IPV6_RECVHOPOPTS: c_int = 53; +pub const IPV6_RECVPATHMTU: c_int = 47; +pub const IPV6_RECVRTHDR: c_int = 51; +pub const IPV6_RECVTCLASS: c_int = 42; +pub const IPV6_RTHDR: c_int = 50; +pub const IPV6_RTHDRDSTOPTS: c_int = 55; +pub const IPV6_TCLASS: c_int = 43; + +// net/bpf.h +pub const DLT_NULL: c_int = 0x18; +pub const DLT_EN10MB: c_int = 0x6; +pub const DLT_EN3MB: c_int = 0x1a; +pub const DLT_AX25: c_int = 0x5; +pub const DLT_PRONET: c_int = 0xd; +pub const DLT_IEEE802: c_int = 0x7; +pub const DLT_ARCNET: c_int = 0x23; +pub const DLT_SLIP: c_int = 0x1c; +pub const DLT_PPP: c_int = 0x17; +pub const DLT_FDDI: c_int = 0xf; +pub const DLT_ATM: c_int = 0x25; +pub const DLT_IPOIB: c_int = 0xc7; +pub const BIOCSETF: c_int = 0x80104267; +pub const BIOCGRTIMEOUT: c_int = 0x4010426e; +pub const BIOCGBLEN: c_int = 0x40044266; +pub const BIOCSBLEN: c_int = 0xc0044266; +pub const BIOCFLUSH: c_int = 0x20004268; +pub const BIOCPROMISC: c_int = 0x20004269; +pub const BIOCGDLT: c_int = 0x4004426a; +pub const BIOCSRTIMEOUT: c_int = 0x8010426d; +pub const BIOCGSTATS: c_int = 0x4008426f; +pub const BIOCIMMEDIATE: c_int = 0x80044270; +pub const BIOCVERSION: c_int = 0x40044271; +pub const BIOCSDEVNO: c_int = 0x20004272; +pub const BIOCGETIF: c_int = 0x4020426b; +pub const BIOCSETIF: c_int = 0x8020426c; +pub const BPF_ABS: c_int = 32; +pub const BPF_ADD: c_int = 0; +pub const BPF_ALIGNMENT: c_ulong = 4; +pub const BPF_ALU: c_int = 4; +pub const BPF_AND: c_int = 80; +pub const BPF_B: c_int = 16; +pub const BPF_DIV: c_int = 48; +pub const BPF_H: c_int = 8; +pub const BPF_IMM: c_int = 0; +pub const BPF_IND: c_int = 64; +pub const BPF_JA: c_int = 0; +pub const BPF_JEQ: c_int = 16; +pub const BPF_JGE: c_int = 48; +pub const BPF_JGT: c_int = 32; +pub const BPF_JMP: c_int = 5; +pub const BPF_JSET: c_int = 64; +pub const BPF_K: c_int = 0; +pub const BPF_LD: c_int = 0; +pub const BPF_LDX: c_int = 1; +pub const BPF_LEN: c_int = 128; +pub const BPF_LSH: c_int = 96; +pub const BPF_MAXINSNS: c_int = 512; +pub const BPF_MEM: c_int = 96; +pub const BPF_MEMWORDS: c_int = 16; +pub const BPF_MISC: c_int = 7; +pub const BPF_MSH: c_int = 160; +pub const BPF_MUL: c_int = 32; +pub const BPF_NEG: c_int = 128; +pub const BPF_OR: c_int = 64; +pub const BPF_RET: c_int = 6; +pub const BPF_RSH: c_int = 112; +pub const BPF_ST: c_int = 2; +pub const BPF_STX: c_int = 3; +pub const BPF_SUB: c_int = 16; +pub const BPF_W: c_int = 0; +pub const BPF_X: c_int = 8; + +// net/if.h +pub const IFNET_SLOWHZ: c_int = 1; +pub const IFQ_MAXLEN: c_int = 50; +pub const IFF_UP: c_int = 0x1; +pub const IFF_BROADCAST: c_int = 0x2; +pub const IFF_DEBUG: c_int = 0x4; +pub const IFF_LOOPBACK: c_int = 0x8; +pub const IFF_POINTOPOINT: c_int = 0x10; +pub const IFF_NOTRAILERS: c_int = 0x20; +pub const IFF_RUNNING: c_int = 0x40; +pub const IFF_NOARP: c_int = 0x80; +pub const IFF_PROMISC: c_int = 0x100; +pub const IFF_ALLMULTI: c_int = 0x200; +pub const IFF_MULTICAST: c_int = 0x80000; +pub const IFF_LINK0: c_int = 0x100000; +pub const IFF_LINK1: c_int = 0x200000; +pub const IFF_LINK2: c_int = 0x400000; +pub const IFF_OACTIVE: c_int = 0x400; +pub const IFF_SIMPLEX: c_int = 0x800; + +// net/if_arp.h +pub const ARPHRD_ETHER: c_int = 1; +pub const ARPHRD_802_5: c_int = 6; +pub const ARPHRD_802_3: c_int = 6; +pub const ARPHRD_FDDI: c_int = 1; + +// net/route.h +pub const RTM_ADD: c_int = 0x1; +pub const RTM_DELETE: c_int = 0x2; +pub const RTM_CHANGE: c_int = 0x3; +pub const RTM_GET: c_int = 0x4; +pub const RTM_LOSING: c_int = 0x5; +pub const RTM_REDIRECT: c_int = 0x6; +pub const RTM_MISS: c_int = 0x7; +pub const RTM_LOCK: c_int = 0x8; +pub const RTM_OLDADD: c_int = 0x9; +pub const RTM_OLDDEL: c_int = 0xa; +pub const RTM_RESOLVE: c_int = 0xb; +pub const RTM_NEWADDR: c_int = 0xc; +pub const RTM_DELADDR: c_int = 0xd; +pub const RTM_IFINFO: c_int = 0xe; +pub const RTM_EXPIRE: c_int = 0xf; +pub const RTM_RTLOST: c_int = 0x10; +pub const RTM_GETNEXT: c_int = 0x11; +pub const RTM_SAMEADDR: c_int = 0x12; +pub const RTM_SET: c_int = 0x13; +pub const RTV_MTU: c_int = 0x1; +pub const RTV_HOPCOUNT: c_int = 0x2; +pub const RTV_EXPIRE: c_int = 0x4; +pub const RTV_RPIPE: c_int = 0x8; +pub const RTV_SPIPE: c_int = 0x10; +pub const RTV_SSTHRESH: c_int = 0x20; +pub const RTV_RTT: c_int = 0x40; +pub const RTV_RTTVAR: c_int = 0x80; +pub const RTA_DST: c_int = 0x1; +pub const RTA_GATEWAY: c_int = 0x2; +pub const RTA_NETMASK: c_int = 0x4; +pub const RTA_GENMASK: c_int = 0x8; +pub const RTA_IFP: c_int = 0x10; +pub const RTA_IFA: c_int = 0x20; +pub const RTA_AUTHOR: c_int = 0x40; +pub const RTA_BRD: c_int = 0x80; +pub const RTA_DOWNSTREAM: c_int = 0x100; +pub const RTAX_DST: c_int = 0; +pub const RTAX_GATEWAY: c_int = 1; +pub const RTAX_NETMASK: c_int = 2; +pub const RTAX_GENMASK: c_int = 3; +pub const RTAX_IFP: c_int = 4; +pub const RTAX_IFA: c_int = 5; +pub const RTAX_AUTHOR: c_int = 6; +pub const RTAX_BRD: c_int = 7; +pub const RTAX_MAX: c_int = 8; +pub const RTF_UP: c_int = 0x1; +pub const RTF_GATEWAY: c_int = 0x2; +pub const RTF_HOST: c_int = 0x4; +pub const RTF_REJECT: c_int = 0x8; +pub const RTF_DYNAMIC: c_int = 0x10; +pub const RTF_MODIFIED: c_int = 0x20; +pub const RTF_DONE: c_int = 0x40; +pub const RTF_MASK: c_int = 0x80; +pub const RTF_CLONING: c_int = 0x100; +pub const RTF_XRESOLVE: c_int = 0x200; +pub const RTF_LLINFO: c_int = 0x400; +pub const RTF_STATIC: c_int = 0x800; +pub const RTF_BLACKHOLE: c_int = 0x1000; +pub const RTF_BUL: c_int = 0x2000; +pub const RTF_PROTO2: c_int = 0x4000; +pub const RTF_PROTO1: c_int = 0x8000; +pub const RTF_CLONE: c_int = 0x10000; +pub const RTF_CLONED: c_int = 0x20000; +pub const RTF_PROTO3: c_int = 0x40000; +pub const RTF_BCE: c_int = 0x80000; +pub const RTF_PINNED: c_int = 0x100000; +pub const RTF_LOCAL: c_int = 0x200000; +pub const RTF_BROADCAST: c_int = 0x400000; +pub const RTF_MULTICAST: c_int = 0x800000; +pub const RTF_ACTIVE_DGD: c_int = 0x1000000; +pub const RTF_STOPSRCH: c_int = 0x2000000; +pub const RTF_FREE_IN_PROG: c_int = 0x4000000; +pub const RTF_PERMANENT6: c_int = 0x8000000; +pub const RTF_UNREACHABLE: c_int = 0x10000000; +pub const RTF_CACHED: c_int = 0x20000000; +pub const RTF_SMALLMTU: c_int = 0x40000; + +// netinet/in.h +pub const IPPROTO_HOPOPTS: c_int = 0; +pub const IPPROTO_IGMP: c_int = 2; +pub const IPPROTO_GGP: c_int = 3; +pub const IPPROTO_IPIP: c_int = 4; +pub const IPPROTO_EGP: c_int = 8; +pub const IPPROTO_PUP: c_int = 12; +pub const IPPROTO_IDP: c_int = 22; +pub const IPPROTO_TP: c_int = 29; +pub const IPPROTO_ROUTING: c_int = 43; +pub const IPPROTO_FRAGMENT: c_int = 44; +pub const IPPROTO_QOS: c_int = 45; +pub const IPPROTO_RSVP: c_int = 46; +pub const IPPROTO_GRE: c_int = 47; +pub const IPPROTO_ESP: c_int = 50; +pub const IPPROTO_AH: c_int = 51; +pub const IPPROTO_NONE: c_int = 59; +pub const IPPROTO_DSTOPTS: c_int = 60; +pub const IPPROTO_LOCAL: c_int = 63; +pub const IPPROTO_EON: c_int = 80; +pub const IPPROTO_BIP: c_int = 0x53; +pub const IPPROTO_SCTP: c_int = 132; +pub const IPPROTO_MH: c_int = 135; +pub const IPPROTO_GIF: c_int = 140; +pub const IPPROTO_RAW: c_int = 255; +pub const IP_OPTIONS: c_int = 1; +pub const IP_HDRINCL: c_int = 2; +pub const IP_TOS: c_int = 3; +pub const IP_TTL: c_int = 4; +pub const IP_UNICAST_HOPS: c_int = 4; +pub const IP_RECVOPTS: c_int = 5; +pub const IP_RECVRETOPTS: c_int = 6; +pub const IP_RECVDSTADDR: c_int = 7; +pub const IP_RETOPTS: c_int = 8; +pub const IP_MULTICAST_IF: c_int = 9; +pub const IP_MULTICAST_TTL: c_int = 10; +pub const IP_MULTICAST_HOPS: c_int = 10; +pub const IP_MULTICAST_LOOP: c_int = 11; +pub const IP_ADD_MEMBERSHIP: c_int = 12; +pub const IP_DROP_MEMBERSHIP: c_int = 13; +pub const IP_RECVMACHDR: c_int = 14; +pub const IP_RECVIFINFO: c_int = 15; +pub const IP_BROADCAST_IF: c_int = 16; +pub const IP_DHCPMODE: c_int = 17; +pub const IP_RECVIF: c_int = 20; +pub const IP_ADDRFORM: c_int = 22; +pub const IP_DONTFRAG: c_int = 25; +pub const IP_FINDPMTU: c_int = 26; +pub const IP_PMTUAGE: c_int = 27; +pub const IP_RECVINTERFACE: c_int = 32; +pub const IP_RECVTTL: c_int = 34; +pub const IP_BLOCK_SOURCE: c_int = 58; +pub const IP_UNBLOCK_SOURCE: c_int = 59; +pub const IP_ADD_SOURCE_MEMBERSHIP: c_int = 60; +pub const IP_DROP_SOURCE_MEMBERSHIP: c_int = 61; +pub const IP_DEFAULT_MULTICAST_TTL: c_int = 1; +pub const IP_DEFAULT_MULTICAST_LOOP: c_int = 1; +pub const IP_INC_MEMBERSHIPS: c_int = 20; +pub const IP_INIT_MEMBERSHIP: c_int = 20; +pub const IPV6_UNICAST_HOPS: c_int = IP_TTL; +pub const IPV6_MULTICAST_IF: c_int = IP_MULTICAST_IF; +pub const IPV6_MULTICAST_HOPS: c_int = IP_MULTICAST_TTL; +pub const IPV6_MULTICAST_LOOP: c_int = IP_MULTICAST_LOOP; +pub const IPV6_RECVPKTINFO: c_int = 35; +pub const IPV6_V6ONLY: c_int = 37; +pub const IPV6_ADD_MEMBERSHIP: c_int = IP_ADD_MEMBERSHIP; +pub const IPV6_DROP_MEMBERSHIP: c_int = IP_DROP_MEMBERSHIP; +pub const IPV6_JOIN_GROUP: c_int = IP_ADD_MEMBERSHIP; +pub const IPV6_LEAVE_GROUP: c_int = IP_DROP_MEMBERSHIP; +pub const MCAST_BLOCK_SOURCE: c_int = 64; +pub const MCAST_EXCLUDE: c_int = 2; +pub const MCAST_INCLUDE: c_int = 1; +pub const MCAST_JOIN_GROUP: c_int = 62; +pub const MCAST_JOIN_SOURCE_GROUP: c_int = 66; +pub const MCAST_LEAVE_GROUP: c_int = 63; +pub const MCAST_LEAVE_SOURCE_GROUP: c_int = 67; +pub const MCAST_UNBLOCK_SOURCE: c_int = 65; + +// netinet/ip.h +pub const MAXTTL: c_int = 255; +pub const IPDEFTTL: c_int = 64; +pub const IPOPT_CONTROL: c_int = 0; +pub const IPOPT_EOL: c_int = 0; +pub const IPOPT_LSRR: c_int = 131; +pub const IPOPT_MINOFF: c_int = 4; +pub const IPOPT_NOP: c_int = 1; +pub const IPOPT_OFFSET: c_int = 2; +pub const IPOPT_OLEN: c_int = 1; +pub const IPOPT_OPTVAL: c_int = 0; +pub const IPOPT_RESERVED1: c_int = 0x20; +pub const IPOPT_RESERVED2: c_int = 0x60; +pub const IPOPT_RR: c_int = 7; +pub const IPOPT_SSRR: c_int = 137; +pub const IPOPT_TS: c_int = 68; +pub const IPOPT_TS_PRESPEC: c_int = 3; +pub const IPOPT_TS_TSANDADDR: c_int = 1; +pub const IPOPT_TS_TSONLY: c_int = 0; +pub const IPTOS_LOWDELAY: c_int = 16; +pub const IPTOS_PREC_CRITIC_ECP: c_int = 160; +pub const IPTOS_PREC_FLASH: c_int = 96; +pub const IPTOS_PREC_FLASHOVERRIDE: c_int = 128; +pub const IPTOS_PREC_IMMEDIATE: c_int = 64; +pub const IPTOS_PREC_INTERNETCONTROL: c_int = 192; +pub const IPTOS_PREC_NETCONTROL: c_int = 224; +pub const IPTOS_PREC_PRIORITY: c_int = 32; +pub const IPTOS_PREC_ROUTINE: c_int = 16; +pub const IPTOS_RELIABILITY: c_int = 4; +pub const IPTOS_THROUGHPUT: c_int = 8; +pub const IPVERSION: c_int = 4; + +// netinet/tcp.h +pub const TCP_NODELAY: c_int = 0x1; +pub const TCP_MAXSEG: c_int = 0x2; +pub const TCP_RFC1323: c_int = 0x4; +pub const TCP_KEEPALIVE: c_int = 0x8; +pub const TCP_KEEPIDLE: c_int = 0x11; +pub const TCP_KEEPINTVL: c_int = 0x12; +pub const TCP_KEEPCNT: c_int = 0x13; +pub const TCP_NODELAYACK: c_int = 0x14; + +// pthread.h +pub const PTHREAD_BARRIER_SERIAL_THREAD: c_int = 2; +pub const PTHREAD_CREATE_JOINABLE: c_int = 0; +pub const PTHREAD_CREATE_DETACHED: c_int = 1; +pub const PTHREAD_PROCESS_SHARED: c_int = 0; +pub const PTHREAD_PROCESS_PRIVATE: c_ushort = 1; +pub const PTHREAD_STACK_MIN: size_t = PAGESIZE as size_t * 4; +pub const PTHREAD_MUTEX_NORMAL: c_int = 5; +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 3; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 4; +pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; +pub const PTHREAD_MUTEX_ROBUST: c_int = 1; +pub const PTHREAD_MUTEX_STALLED: c_int = 0; +pub const PTHREAD_PRIO_INHERIT: c_int = 3; +pub const PTHREAD_PRIO_NONE: c_int = 1; +pub const PTHREAD_PRIO_PROTECT: c_int = 2; + +// regex.h +pub const REG_EXTENDED: c_int = 1; +pub const REG_ICASE: c_int = 2; +pub const REG_NEWLINE: c_int = 4; +pub const REG_NOSUB: c_int = 8; +pub const REG_NOTBOL: c_int = 0x100; +pub const REG_NOTEOL: c_int = 0x200; +pub const REG_NOMATCH: c_int = 1; +pub const REG_BADPAT: c_int = 2; +pub const REG_ECOLLATE: c_int = 3; +pub const REG_ECTYPE: c_int = 4; +pub const REG_EESCAPE: c_int = 5; +pub const REG_ESUBREG: c_int = 6; +pub const REG_EBRACK: c_int = 7; +pub const REG_EPAREN: c_int = 8; +pub const REG_EBRACE: c_int = 9; +pub const REG_BADBR: c_int = 10; +pub const REG_ERANGE: c_int = 11; +pub const REG_ESPACE: c_int = 12; +pub const REG_BADRPT: c_int = 13; +pub const REG_ECHAR: c_int = 14; +pub const REG_EBOL: c_int = 15; +pub const REG_EEOL: c_int = 16; +pub const REG_ENOSYS: c_int = 17; + +// rpcsvc/mount.h +pub const NFSMNT_SOFT: c_int = 0x001; +pub const NFSMNT_WSIZE: c_int = 0x002; +pub const NFSMNT_RSIZE: c_int = 0x004; +pub const NFSMNT_TIMEO: c_int = 0x008; +pub const NFSMNT_RETRANS: c_int = 0x010; +pub const NFSMNT_HOSTNAME: c_int = 0x020; +pub const NFSMNT_INT: c_int = 0x040; +pub const NFSMNT_NOAC: c_int = 0x080; +pub const NFSMNT_ACREGMIN: c_int = 0x0100; +pub const NFSMNT_ACREGMAX: c_int = 0x0200; +pub const NFSMNT_ACDIRMIN: c_int = 0x0400; +pub const NFSMNT_ACDIRMAX: c_int = 0x0800; + +// rpcsvc/rstat.h +pub const CPUSTATES: c_int = 4; + +// semaphore.h +pub const SEM_FAILED: *mut sem_t = -1isize as *mut crate::sem_t; + +// spawn.h +// DIFF(main): changed to `c_short` in f62eb023ab +pub const POSIX_SPAWN_SETPGROUP: c_int = 0x1; +pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x2; +pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x4; +pub const POSIX_SPAWN_SETSCHEDULER: c_int = 0x8; +pub const POSIX_SPAWN_SETSCHEDPARAM: c_int = 0x10; +pub const POSIX_SPAWN_RESETIDS: c_int = 0x20; +pub const POSIX_SPAWN_FORK_HANDLERS: c_int = 0x1000; + +// stdio.h +pub const EOF: c_int = -1; +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; +pub const _IOFBF: c_int = 0o000; +pub const _IONBF: c_int = 0o004; +pub const _IOLBF: c_int = 0o100; +pub const BUFSIZ: c_uint = 4096; +pub const FOPEN_MAX: c_uint = 32767; +pub const FILENAME_MAX: c_uint = 255; +pub const L_tmpnam: c_uint = 21; +pub const TMP_MAX: c_uint = 16384; + +// stdlib.h +pub const EXIT_FAILURE: c_int = 1; +pub const EXIT_SUCCESS: c_int = 0; +pub const RAND_MAX: c_int = 32767; + +// sys/access.h +pub const F_OK: c_int = 0; +pub const R_OK: c_int = 4; +pub const W_OK: c_int = 2; +pub const X_OK: c_int = 1; + +// sys/aio.h +pub const LIO_NOP: c_int = 0; +pub const LIO_READ: c_int = 1; +pub const LIO_WRITE: c_int = 2; +pub const LIO_NOWAIT: c_int = 0; +pub const LIO_WAIT: c_int = 1; +pub const AIO_ALLDONE: c_int = 2; +pub const AIO_CANCELED: c_int = 0; +pub const AIO_NOTCANCELED: c_int = 1; + +// sys/errno.h +pub const EPERM: c_int = 1; +pub const ENOENT: c_int = 2; +pub const ESRCH: c_int = 3; +pub const EINTR: c_int = 4; +pub const EIO: c_int = 5; +pub const ENXIO: c_int = 6; +pub const E2BIG: c_int = 7; +pub const ENOEXEC: c_int = 8; +pub const EBADF: c_int = 9; +pub const ECHILD: c_int = 10; +pub const EAGAIN: c_int = 11; +pub const ENOMEM: c_int = 12; +pub const EACCES: c_int = 13; +pub const EFAULT: c_int = 14; +pub const ENOTBLK: c_int = 15; +pub const EBUSY: c_int = 16; +pub const EEXIST: c_int = 17; +pub const EXDEV: c_int = 18; +pub const ENODEV: c_int = 19; +pub const ENOTDIR: c_int = 20; +pub const EISDIR: c_int = 21; +pub const EINVAL: c_int = 22; +pub const ENFILE: c_int = 23; +pub const EMFILE: c_int = 24; +pub const ENOTTY: c_int = 25; +pub const ETXTBSY: c_int = 26; +pub const EFBIG: c_int = 27; +pub const ENOSPC: c_int = 28; +pub const ESPIPE: c_int = 29; +pub const EROFS: c_int = 30; +pub const EMLINK: c_int = 31; +pub const EPIPE: c_int = 32; +pub const EDOM: c_int = 33; +pub const ERANGE: c_int = 34; +pub const ENOMSG: c_int = 35; +pub const EIDRM: c_int = 36; +pub const ECHRNG: c_int = 37; +pub const EL2NSYNC: c_int = 38; +pub const EL3HLT: c_int = 39; +pub const EL3RST: c_int = 40; +pub const ELNRNG: c_int = 41; +pub const EUNATCH: c_int = 42; +pub const ENOCSI: c_int = 43; +pub const EL2HLT: c_int = 44; +pub const EDEADLK: c_int = 45; +pub const ENOTREADY: c_int = 46; +pub const EWRPROTECT: c_int = 47; +pub const EFORMAT: c_int = 48; +pub const ENOLCK: c_int = 49; +pub const ENOCONNECT: c_int = 50; +pub const ESTALE: c_int = 52; +pub const EDIST: c_int = 53; +pub const EWOULDBLOCK: c_int = 54; +pub const EINPROGRESS: c_int = 55; +pub const EALREADY: c_int = 56; +pub const ENOTSOCK: c_int = 57; +pub const EDESTADDRREQ: c_int = 58; +pub const EMSGSIZE: c_int = 59; +pub const EPROTOTYPE: c_int = 60; +pub const ENOPROTOOPT: c_int = 61; +pub const EPROTONOSUPPORT: c_int = 62; +pub const ESOCKTNOSUPPORT: c_int = 63; +pub const EOPNOTSUPP: c_int = 64; +pub const EPFNOSUPPORT: c_int = 65; +pub const EAFNOSUPPORT: c_int = 66; +pub const EADDRINUSE: c_int = 67; +pub const EADDRNOTAVAIL: c_int = 68; +pub const ENETDOWN: c_int = 69; +pub const ENETUNREACH: c_int = 70; +pub const ENETRESET: c_int = 71; +pub const ECONNABORTED: c_int = 72; +pub const ECONNRESET: c_int = 73; +pub const ENOBUFS: c_int = 74; +pub const EISCONN: c_int = 75; +pub const ENOTCONN: c_int = 76; +pub const ESHUTDOWN: c_int = 77; +pub const ETIMEDOUT: c_int = 78; +pub const ECONNREFUSED: c_int = 79; +pub const EHOSTDOWN: c_int = 80; +pub const EHOSTUNREACH: c_int = 81; +pub const ERESTART: c_int = 82; +pub const EPROCLIM: c_int = 83; +pub const EUSERS: c_int = 84; +pub const ELOOP: c_int = 85; +pub const ENAMETOOLONG: c_int = 86; +pub const ENOTEMPTY: c_int = 87; +pub const EDQUOT: c_int = 88; +pub const ECORRUPT: c_int = 89; +pub const ESYSERROR: c_int = 90; +pub const EREMOTE: c_int = 93; +pub const ENOTRECOVERABLE: c_int = 94; +pub const EOWNERDEAD: c_int = 95; +// errnos 96-108 reserved for future use compatible with AIX PS/2 +pub const ENOSYS: c_int = 109; +pub const EMEDIA: c_int = 110; +pub const ESOFT: c_int = 111; +pub const ENOATTR: c_int = 112; +pub const ESAD: c_int = 113; +pub const ENOTRUST: c_int = 114; +pub const ETOOMANYREFS: c_int = 115; +pub const EILSEQ: c_int = 116; +pub const ECANCELED: c_int = 117; +pub const ENOSR: c_int = 118; +pub const ETIME: c_int = 119; +pub const EBADMSG: c_int = 120; +pub const EPROTO: c_int = 121; +pub const ENODATA: c_int = 122; +pub const ENOSTR: c_int = 123; +pub const ENOTSUP: c_int = 124; +pub const EMULTIHOP: c_int = 125; +pub const ENOLINK: c_int = 126; +pub const EOVERFLOW: c_int = 127; + +// sys/dr.h +pub const LPAR_INFO_FORMAT1: c_int = 1; +pub const LPAR_INFO_FORMAT2: c_int = 2; +pub const WPAR_INFO_FORMAT: c_int = 3; +pub const PROC_MODULE_INFO: c_int = 4; +pub const NUM_PROC_MODULE_TYPES: c_int = 5; +pub const LPAR_INFO_VRME_NUM_POOLS: c_int = 6; +pub const LPAR_INFO_VRME_POOLS: c_int = 7; +pub const LPAR_INFO_VRME_LPAR: c_int = 8; +pub const LPAR_INFO_VRME_RESET_HWMARKS: c_int = 9; +pub const LPAR_INFO_VRME_ALLOW_DESIRED: c_int = 10; +pub const EMTP_INFO_FORMAT: c_int = 11; +pub const LPAR_INFO_LPM_CAPABILITY: c_int = 12; +pub const ENERGYSCALE_INFO: c_int = 13; + +// sys/file.h +pub const LOCK_SH: c_int = 1; +pub const LOCK_EX: c_int = 2; +pub const LOCK_NB: c_int = 4; +pub const LOCK_UN: c_int = 8; + +// sys/flock.h +pub const F_RDLCK: c_short = 0o01; +pub const F_WRLCK: c_short = 0o02; +pub const F_UNLCK: c_short = 0o03; + +// sys/fs/quota_common.h +pub const Q_QUOTAON: c_int = 0x100; +pub const Q_QUOTAOFF: c_int = 0x200; +pub const Q_SETUSE: c_int = 0x500; +pub const Q_SYNC: c_int = 0x600; +pub const Q_GETQUOTA: c_int = 0x300; +pub const Q_SETQLIM: c_int = 0x400; +pub const Q_SETQUOTA: c_int = 0x400; + +// sys/ioctl.h +pub const IOCPARM_MASK: c_int = 0x7f; +pub const IOC_VOID: c_int = 0x20000000; +pub const IOC_OUT: c_int = 0x40000000; +pub const IOC_IN: c_int = 0x40000000 << 1; +pub const IOC_INOUT: c_int = IOC_IN | IOC_OUT; +pub const FIOCLEX: c_int = 0x20006601; +pub const FIONCLEX: c_int = 0x20006602; +pub const FIONREAD: c_int = 0x4004667f; +pub const FIONBIO: c_int = 0x8004667e; +pub const FIOASYNC: c_int = 0x8004667d; +pub const FIOSETOWN: c_int = 0x8004667c; +pub const FIOGETOWN: c_int = 0x4004667b; +pub const TIOCGETD: c_int = 0x40047400; +pub const TIOCSETD: c_int = 0x80047401; +pub const TIOCHPCL: c_int = 0x20007402; +pub const TIOCMODG: c_int = 0x40047403; +pub const TIOCMODS: c_int = 0x80047404; +pub const TIOCM_LE: c_int = 0x1; +pub const TIOCM_DTR: c_int = 0x2; +pub const TIOCM_RTS: c_int = 0x4; +pub const TIOCM_ST: c_int = 0x8; +pub const TIOCM_SR: c_int = 0x10; +pub const TIOCM_CTS: c_int = 0x20; +pub const TIOCM_CAR: c_int = 0x40; +pub const TIOCM_CD: c_int = 0x40; +pub const TIOCM_RNG: c_int = 0x80; +pub const TIOCM_RI: c_int = 0x80; +pub const TIOCM_DSR: c_int = 0x100; +pub const TIOCGETP: c_int = 0x40067408; +pub const TIOCSETP: c_int = 0x80067409; +pub const TIOCSETN: c_int = 0x8006740a; +pub const TIOCEXCL: c_int = 0x2000740d; +pub const TIOCNXCL: c_int = 0x2000740e; +pub const TIOCFLUSH: c_int = 0x80047410; +pub const TIOCSETC: c_int = 0x80067411; +pub const TIOCGETC: c_int = 0x40067412; +pub const TANDEM: c_int = 0x1; +pub const CBREAK: c_int = 0x2; +pub const LCASE: c_int = 0x4; +pub const MDMBUF: c_int = 0x800000; +pub const XTABS: c_int = 0xc00; +pub const SIOCADDMULTI: c_int = 0x80206931; +pub const SIOCADDRT: c_int = 0x8038720a; +pub const SIOCDARP: c_int = 0x804c6920; +pub const SIOCDELMULTI: c_int = 0x80206932; +pub const SIOCDELRT: c_int = 0x8038720b; +pub const SIOCDIFADDR: c_int = 0x80286919; +pub const SIOCGARP: c_int = 0xc04c6926; +pub const SIOCGIFADDR: c_int = 0xc0286921; +pub const SIOCGIFBRDADDR: c_int = 0xc0286923; +pub const SIOCGIFCONF: c_int = 0xc0106945; +pub const SIOCGIFDSTADDR: c_int = 0xc0286922; +pub const SIOCGIFFLAGS: c_int = 0xc0286911; +pub const SIOCGIFHWADDR: c_int = 0xc0546995; +pub const SIOCGIFMETRIC: c_int = 0xc0286917; +pub const SIOCGIFMTU: c_int = 0xc0286956; +pub const SIOCGIFNETMASK: c_int = 0xc0286925; +pub const SIOCSARP: c_int = 0x804c691e; +pub const SIOCSIFADDR: c_int = 0x8028690c; +pub const SIOCSIFBRDADDR: c_int = 0x80286913; +pub const SIOCSIFDSTADDR: c_int = 0x8028690e; +pub const SIOCSIFFLAGS: c_int = 0x80286910; +pub const SIOCSIFMETRIC: c_int = 0x80286918; +pub const SIOCSIFMTU: c_int = 0x80286958; +pub const SIOCSIFNETMASK: c_int = 0x80286916; +pub const TIOCUCNTL: c_int = 0x80047466; +pub const TIOCCONS: c_int = 0x80047462; +pub const TIOCPKT: c_int = 0x80047470; +pub const TIOCPKT_DATA: c_int = 0; +pub const TIOCPKT_FLUSHREAD: c_int = 1; +pub const TIOCPKT_FLUSHWRITE: c_int = 2; +pub const TIOCPKT_NOSTOP: c_int = 0x10; +pub const TIOCPKT_DOSTOP: c_int = 0x20; +pub const TIOCPKT_START: c_int = 8; +pub const TIOCPKT_STOP: c_int = 4; + +// sys/ipc.h +pub const IPC_ALLOC: c_int = 0o100000; +pub const IPC_CREAT: c_int = 0o020000; +pub const IPC_EXCL: c_int = 0o002000; +pub const IPC_NOWAIT: c_int = 0o004000; +pub const IPC_RMID: c_int = 0; +pub const IPC_SET: c_int = 101; +pub const IPC_R: c_int = 0o0400; +pub const IPC_W: c_int = 0o0200; +pub const IPC_O: c_int = 0o1000; +pub const IPC_NOERROR: c_int = 0o10000; +pub const IPC_STAT: c_int = 102; +pub const IPC_PRIVATE: crate::key_t = -1; +pub const SHM_LOCK: c_int = 201; +pub const SHM_UNLOCK: c_int = 202; + +// sys/ldr.h +pub const L_GETMESSAGES: c_int = 1; +pub const L_GETINFO: c_int = 2; +pub const L_GETLIBPATH: c_int = 3; +pub const L_GETKERNINFO: c_int = 4; +pub const L_GETLIB32INFO: c_int = 5; +pub const L_GETLIB64INFO: c_int = 6; +pub const L_GETPROCINFO: c_int = 7; +pub const L_GETXINFO: c_int = 8; + +// sys/limits.h +pub const PATH_MAX: c_int = 1023; +pub const PAGESIZE: c_int = 4096; +pub const IOV_MAX: c_int = 16; +pub const AIO_LISTIO_MAX: c_int = 4096; +pub const PIPE_BUF: usize = 32768; +pub const OPEN_MAX: c_int = 65534; +pub const MAX_INPUT: c_int = 512; +pub const MAX_CANON: c_int = 256; +pub const ARG_MAX: c_int = 1048576; +pub const BC_BASE_MAX: c_int = 99; +pub const BC_DIM_MAX: c_int = 0x800; +pub const BC_SCALE_MAX: c_int = 99; +pub const BC_STRING_MAX: c_int = 0x800; +pub const CHARCLASS_NAME_MAX: c_int = 14; +pub const CHILD_MAX: c_int = 128; +pub const COLL_WEIGHTS_MAX: c_int = 4; +pub const EXPR_NEST_MAX: c_int = 32; +pub const NZERO: c_int = 20; + +// sys/lockf.h +pub const F_LOCK: c_int = 1; +pub const F_TEST: c_int = 3; +pub const F_TLOCK: c_int = 2; +pub const F_ULOCK: c_int = 0; + +// sys/machine.h +pub const BIG_ENDIAN: c_int = 4321; +pub const LITTLE_ENDIAN: c_int = 1234; +pub const PDP_ENDIAN: c_int = 3412; + +// sys/mman.h +pub const PROT_NONE: c_int = 0; +pub const PROT_READ: c_int = 1; +pub const PROT_WRITE: c_int = 2; +pub const PROT_EXEC: c_int = 4; +pub const MAP_FILE: c_int = 0; +pub const MAP_SHARED: c_int = 1; +pub const MAP_PRIVATE: c_int = 2; +pub const MAP_FIXED: c_int = 0x100; +pub const MAP_ANON: c_int = 0x10; +pub const MAP_ANONYMOUS: c_int = 0x10; +pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; +pub const MAP_TYPE: c_int = 0xf0; +pub const MCL_CURRENT: c_int = 0x100; +pub const MCL_FUTURE: c_int = 0x200; +pub const MS_SYNC: c_int = 0x20; +pub const MS_ASYNC: c_int = 0x10; +pub const MS_INVALIDATE: c_int = 0x40; +pub const POSIX_MADV_NORMAL: c_int = 1; +pub const POSIX_MADV_RANDOM: c_int = 3; +pub const POSIX_MADV_SEQUENTIAL: c_int = 2; +pub const POSIX_MADV_WILLNEED: c_int = 4; +pub const POSIX_MADV_DONTNEED: c_int = 5; +pub const MADV_NORMAL: c_int = 0; +pub const MADV_RANDOM: c_int = 1; +pub const MADV_SEQUENTIAL: c_int = 2; +pub const MADV_WILLNEED: c_int = 3; +pub const MADV_DONTNEED: c_int = 4; + +// sys/mode.h +pub const S_IFMT: mode_t = 0o17_0000; +pub const S_IFREG: mode_t = 0o10_0000; +pub const S_IFDIR: mode_t = 0o4_0000; +pub const S_IFBLK: mode_t = 0o6_0000; +pub const S_IFCHR: mode_t = 0o2_0000; +pub const S_IFIFO: mode_t = 0o1_0000; +pub const S_IRWXU: mode_t = 0o0700; +pub const S_IRUSR: mode_t = 0o0400; +pub const S_IWUSR: mode_t = 0o0200; +pub const S_IXUSR: mode_t = 0o0100; +pub const S_IRWXG: mode_t = 0o0070; +pub const S_IRGRP: mode_t = 0o0040; +pub const S_IWGRP: mode_t = 0o0020; +pub const S_IXGRP: mode_t = 0o0010; +pub const S_IRWXO: mode_t = 0o0007; +pub const S_IROTH: mode_t = 0o0004; +pub const S_IWOTH: mode_t = 0o0002; +pub const S_IXOTH: mode_t = 0o0001; +pub const S_IFLNK: mode_t = 0o12_0000; +pub const S_IFSOCK: mode_t = 0o14_0000; +pub const S_IEXEC: mode_t = 0o0100; +pub const S_IWRITE: mode_t = 0o0200; +pub const S_IREAD: mode_t = 0o0400; + +// sys/msg.h +pub const MSG_NOERROR: c_int = 0o10000; + +// sys/m_signal.h +pub const SIGSTKSZ: size_t = 4096; +pub const MINSIGSTKSZ: size_t = 1200; + +// sys/params.h +pub const MAXPATHLEN: c_int = PATH_MAX + 1; +pub const MAXSYMLINKS: c_int = 20; +pub const MAXHOSTNAMELEN: c_int = 256; +pub const MAXUPRC: c_int = 128; +pub const NGROUPS_MAX: c_ulong = 2048; +pub const NGROUPS: c_ulong = NGROUPS_MAX; +pub const NOFILE: c_int = OPEN_MAX; + +// sys/poll.h +pub const POLLIN: c_short = 0x0001; +pub const POLLPRI: c_short = 0x0004; +pub const POLLOUT: c_short = 0x0002; +pub const POLLERR: c_short = 0x4000; +pub const POLLHUP: c_short = 0x2000; +pub const POLLMSG: c_short = 0x0080; +pub const POLLSYNC: c_short = 0x8000; +pub const POLLNVAL: c_short = POLLSYNC; +pub const POLLNORM: c_short = POLLIN; +pub const POLLRDNORM: c_short = 0x0010; +pub const POLLWRNORM: c_short = POLLOUT; +pub const POLLRDBAND: c_short = 0x0020; +pub const POLLWRBAND: c_short = 0x0040; + +// sys/pollset.h +pub const PS_ADD: c_uchar = 0; +pub const PS_MOD: c_uchar = 1; +pub const PS_DELETE: c_uchar = 2; +pub const PS_REPLACE: c_uchar = 3; + +// sys/ptrace.h +pub const PT_TRACE_ME: c_int = 0; +pub const PT_READ_I: c_int = 1; +pub const PT_READ_D: c_int = 2; +pub const PT_WRITE_I: c_int = 4; +pub const PT_WRITE_D: c_int = 5; +pub const PT_CONTINUE: c_int = 7; +pub const PT_KILL: c_int = 8; +pub const PT_STEP: c_int = 9; +pub const PT_READ_GPR: c_int = 11; +pub const PT_READ_FPR: c_int = 12; +pub const PT_WRITE_GPR: c_int = 14; +pub const PT_WRITE_FPR: c_int = 15; +pub const PT_READ_BLOCK: c_int = 17; +pub const PT_WRITE_BLOCK: c_int = 19; +pub const PT_ATTACH: c_int = 30; +pub const PT_DETACH: c_int = 31; +pub const PT_REGSET: c_int = 32; +pub const PT_REATT: c_int = 33; +pub const PT_LDINFO: c_int = 34; +pub const PT_MULTI: c_int = 35; +pub const PT_NEXT: c_int = 36; +pub const PT_SET: c_int = 37; +pub const PT_CLEAR: c_int = 38; +pub const PT_LDXINFO: c_int = 39; +pub const PT_QUERY: c_int = 40; +pub const PT_WATCH: c_int = 41; +pub const PTT_CONTINUE: c_int = 50; +pub const PTT_STEP: c_int = 51; +pub const PTT_READ_SPRS: c_int = 52; +pub const PTT_WRITE_SPRS: c_int = 53; +pub const PTT_READ_GPRS: c_int = 54; +pub const PTT_WRITE_GPRS: c_int = 55; +pub const PTT_READ_FPRS: c_int = 56; +pub const PTT_WRITE_FPRS: c_int = 57; +pub const PTT_READ_VEC: c_int = 58; +pub const PTT_WRITE_VEC: c_int = 59; +pub const PTT_WATCH: c_int = 60; +pub const PTT_SET_TRAP: c_int = 61; +pub const PTT_CLEAR_TRAP: c_int = 62; +pub const PTT_READ_UKEYSET: c_int = 63; +pub const PT_GET_UKEY: c_int = 64; +pub const PTT_READ_FPSCR_HI: c_int = 65; +pub const PTT_WRITE_FPSCR_HI: c_int = 66; +pub const PTT_READ_VSX: c_int = 67; +pub const PTT_WRITE_VSX: c_int = 68; +pub const PTT_READ_TM: c_int = 69; +pub const PTRACE_ATTACH: c_int = 14; +pub const PTRACE_CONT: c_int = 7; +pub const PTRACE_DETACH: c_int = 15; +pub const PTRACE_GETFPREGS: c_int = 12; +pub const PTRACE_GETREGS: c_int = 10; +pub const PTRACE_KILL: c_int = 8; +pub const PTRACE_PEEKDATA: c_int = 2; +pub const PTRACE_PEEKTEXT: c_int = 1; +pub const PTRACE_PEEKUSER: c_int = 3; +pub const PTRACE_POKEDATA: c_int = 5; +pub const PTRACE_POKETEXT: c_int = 4; +pub const PTRACE_POKEUSER: c_int = 6; +pub const PTRACE_SETFPREGS: c_int = 13; +pub const PTRACE_SETREGS: c_int = 11; +pub const PTRACE_SINGLESTEP: c_int = 9; +pub const PTRACE_SYSCALL: c_int = 16; +pub const PTRACE_TRACEME: c_int = 0; + +// sys/resource.h +pub const RLIMIT_CPU: c_int = 0; +pub const RLIMIT_FSIZE: c_int = 1; +pub const RLIMIT_DATA: c_int = 2; +pub const RLIMIT_STACK: c_int = 3; +pub const RLIMIT_CORE: c_int = 4; +pub const RLIMIT_RSS: c_int = 5; +pub const RLIMIT_AS: c_int = 6; +pub const RLIMIT_NOFILE: c_int = 7; +pub const RLIMIT_THREADS: c_int = 8; +pub const RLIMIT_NPROC: c_int = 9; +pub const RUSAGE_SELF: c_int = 0; +pub const RUSAGE_CHILDREN: c_int = -1; +pub const PRIO_PROCESS: c_int = 0; +pub const PRIO_PGRP: c_int = 1; +pub const PRIO_USER: c_int = 2; +pub const RUSAGE_THREAD: c_int = 1; +pub const RLIM_SAVED_MAX: c_ulong = RLIM_INFINITY - 1; +pub const RLIM_SAVED_CUR: c_ulong = RLIM_INFINITY - 2; +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIM_NLIMITS: c_int = 10; + +// sys/sched.h +pub const SCHED_OTHER: c_int = 0; +pub const SCHED_FIFO: c_int = 1; +pub const SCHED_RR: c_int = 2; +pub const SCHED_LOCAL: c_int = 3; +pub const SCHED_GLOBAL: c_int = 4; +pub const SCHED_FIFO2: c_int = 5; +pub const SCHED_FIFO3: c_int = 6; +pub const SCHED_FIFO4: c_int = 7; + +// sys/sem.h +pub const SEM_UNDO: c_int = 0o10000; +pub const GETNCNT: c_int = 3; +pub const GETPID: c_int = 4; +pub const GETVAL: c_int = 5; +pub const GETALL: c_int = 6; +pub const GETZCNT: c_int = 7; +pub const SETVAL: c_int = 8; +pub const SETALL: c_int = 9; + +// sys/shm.h +pub const SHMLBA: c_int = 0x10000000; +pub const SHMLBA_EXTSHM: c_int = 0x1000; +pub const SHM_SHMAT: c_int = 0x80000000; +pub const SHM_RDONLY: c_int = 0o10000; +pub const SHM_RND: c_int = 0o20000; +pub const SHM_PIN: c_int = 0o4000; +pub const SHM_LGPAGE: c_int = 0o20000000000; +pub const SHM_MAP: c_int = 0o4000; +pub const SHM_FMAP: c_int = 0o2000; +pub const SHM_COPY: c_int = 0o40000; +pub const SHM_CLEAR: c_int = 0; +pub const SHM_HGSEG: c_int = 0o10000000000; +pub const SHM_R: c_int = IPC_R; +pub const SHM_W: c_int = IPC_W; +pub const SHM_DEST: c_int = 0o2000; + +// sys/signal.h +pub const SA_ONSTACK: c_int = 0x00000001; +pub const SA_RESETHAND: c_int = 0x00000002; +pub const SA_RESTART: c_int = 0x00000008; +pub const SA_SIGINFO: c_int = 0x00000100; +pub const SA_NODEFER: c_int = 0x00000200; +pub const SA_NOCLDWAIT: c_int = 0x00000400; +pub const SA_NOCLDSTOP: c_int = 0x00000004; +pub const SS_ONSTACK: c_int = 0x00000001; +pub const SS_DISABLE: c_int = 0x00000002; +pub const SIGCHLD: c_int = 20; +pub const SIGBUS: c_int = 10; +pub const SIG_BLOCK: c_int = 0; +pub const SIG_UNBLOCK: c_int = 1; +pub const SIG_SETMASK: c_int = 2; +pub const SIGEV_NONE: c_int = 1; +pub const SIGEV_SIGNAL: c_int = 2; +pub const SIGEV_THREAD: c_int = 3; +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGABRT: c_int = 6; +pub const SIGEMT: c_int = 7; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGSEGV: c_int = 11; +pub const SIGSYS: c_int = 12; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; +pub const SIGUSR1: c_int = 30; +pub const SIGUSR2: c_int = 31; +pub const SIGPWR: c_int = 29; +pub const SIGWINCH: c_int = 28; +pub const SIGURG: c_int = 16; +pub const SIGPOLL: c_int = SIGIO; +pub const SIGIO: c_int = 23; +pub const SIGSTOP: c_int = 17; +pub const SIGTSTP: c_int = 18; +pub const SIGCONT: c_int = 19; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGVTALRM: c_int = 34; +pub const SIGPROF: c_int = 32; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGTRAP: c_int = 5; +pub const SIGCLD: c_int = 20; +pub const SIGRTMAX: c_int = 57; +pub const SIGRTMIN: c_int = 50; +pub const SI_USER: c_int = 0; +pub const SI_UNDEFINED: c_int = 8; +pub const SI_EMPTY: c_int = 9; +pub const BUS_ADRALN: c_int = 1; +pub const BUS_ADRERR: c_int = 2; +pub const BUS_OBJERR: c_int = 3; +pub const BUS_UEGARD: c_int = 4; +pub const CLD_EXITED: c_int = 10; +pub const CLD_KILLED: c_int = 11; +pub const CLD_DUMPED: c_int = 12; +pub const CLD_TRAPPED: c_int = 13; +pub const CLD_STOPPED: c_int = 14; +pub const CLD_CONTINUED: c_int = 15; +pub const FPE_INTDIV: c_int = 20; +pub const FPE_INTOVF: c_int = 21; +pub const FPE_FLTDIV: c_int = 22; +pub const FPE_FLTOVF: c_int = 23; +pub const FPE_FLTUND: c_int = 24; +pub const FPE_FLTRES: c_int = 25; +pub const FPE_FLTINV: c_int = 26; +pub const FPE_FLTSUB: c_int = 27; +pub const ILL_ILLOPC: c_int = 30; +pub const ILL_ILLOPN: c_int = 31; +pub const ILL_ILLADR: c_int = 32; +pub const ILL_ILLTRP: c_int = 33; +pub const ILL_PRVOPC: c_int = 34; +pub const ILL_PRVREG: c_int = 35; +pub const ILL_COPROC: c_int = 36; +pub const ILL_BADSTK: c_int = 37; +pub const ILL_TMBADTHING: c_int = 38; +pub const POLL_IN: c_int = 40; +pub const POLL_OUT: c_int = 41; +pub const POLL_MSG: c_int = -3; +pub const POLL_ERR: c_int = 43; +pub const POLL_PRI: c_int = 44; +pub const POLL_HUP: c_int = 45; +pub const SEGV_MAPERR: c_int = 50; +pub const SEGV_ACCERR: c_int = 51; +pub const SEGV_KEYERR: c_int = 52; +pub const TRAP_BRKPT: c_int = 60; +pub const TRAP_TRACE: c_int = 61; +pub const SI_QUEUE: c_int = 71; +pub const SI_TIMER: c_int = 72; +pub const SI_ASYNCIO: c_int = 73; +pub const SI_MESGQ: c_int = 74; + +// sys/socket.h +pub const AF_UNSPEC: c_int = 0; +pub const AF_UNIX: c_int = 1; +pub const AF_INET: c_int = 2; +pub const AF_IMPLINK: c_int = 3; +pub const AF_PUP: c_int = 4; +pub const AF_CHAOS: c_int = 5; +pub const AF_NS: c_int = 6; +pub const AF_ECMA: c_int = 8; +pub const AF_DATAKIT: c_int = 9; +pub const AF_CCITT: c_int = 10; +pub const AF_SNA: c_int = 11; +pub const AF_DECnet: c_int = 12; +pub const AF_DLI: c_int = 13; +pub const AF_LAT: c_int = 14; +pub const SO_TIMESTAMPNS: c_int = 0x100a; +pub const SOMAXCONN: c_int = 1024; +pub const AF_LOCAL: c_int = AF_UNIX; +pub const UIO_MAXIOV: c_int = 1024; +pub const pseudo_AF_XTP: c_int = 19; +pub const AF_HYLINK: c_int = 15; +pub const AF_APPLETALK: c_int = 16; +pub const AF_ISO: c_int = 7; +pub const AF_OSI: c_int = AF_ISO; +pub const AF_ROUTE: c_int = 17; +pub const AF_LINK: c_int = 18; +pub const AF_INET6: c_int = 24; +pub const AF_INTF: c_int = 20; +pub const AF_RIF: c_int = 21; +pub const AF_NDD: c_int = 23; +pub const AF_MAX: c_int = 30; +pub const PF_UNSPEC: c_int = AF_UNSPEC; +pub const PF_UNIX: c_int = AF_UNIX; +pub const PF_INET: c_int = AF_INET; +pub const PF_IMPLINK: c_int = AF_IMPLINK; +pub const PF_PUP: c_int = AF_PUP; +pub const PF_CHAOS: c_int = AF_CHAOS; +pub const PF_NS: c_int = AF_NS; +pub const PF_ISO: c_int = AF_ISO; +pub const PF_OSI: c_int = AF_ISO; +pub const PF_ECMA: c_int = AF_ECMA; +pub const PF_DATAKIT: c_int = AF_DATAKIT; +pub const PF_CCITT: c_int = AF_CCITT; +pub const PF_SNA: c_int = AF_SNA; +pub const PF_DECnet: c_int = AF_DECnet; +pub const PF_DLI: c_int = AF_DLI; +pub const PF_LAT: c_int = AF_LAT; +pub const PF_HYLINK: c_int = AF_HYLINK; +pub const PF_APPLETALK: c_int = AF_APPLETALK; +pub const PF_ROUTE: c_int = AF_ROUTE; +pub const PF_LINK: c_int = AF_LINK; +pub const PF_XTP: c_int = 19; +pub const PF_RIF: c_int = AF_RIF; +pub const PF_INTF: c_int = AF_INTF; +pub const PF_NDD: c_int = AF_NDD; +pub const PF_INET6: c_int = AF_INET6; +pub const PF_MAX: c_int = AF_MAX; +pub const SF_CLOSE: c_int = 1; +pub const SF_REUSE: c_int = 2; +pub const SF_DONT_CACHE: c_int = 4; +pub const SF_SYNC_CACHE: c_int = 8; +pub const SOCK_DGRAM: c_int = 2; +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_RAW: c_int = 3; +pub const SOCK_RDM: c_int = 4; +pub const SOCK_SEQPACKET: c_int = 5; +pub const SOL_SOCKET: c_int = 0xffff; +pub const SO_DEBUG: c_int = 0x0001; +pub const SO_ACCEPTCONN: c_int = 0x0002; +pub const SO_REUSEADDR: c_int = 0x0004; +pub const SO_KEEPALIVE: c_int = 0x0008; +pub const SO_DONTROUTE: c_int = 0x0010; +pub const SO_BROADCAST: c_int = 0x0020; +pub const SO_USELOOPBACK: c_int = 0x0040; +pub const SO_LINGER: c_int = 0x0080; +pub const SO_OOBINLINE: c_int = 0x0100; +pub const SO_REUSEPORT: c_int = 0x0200; +pub const SO_USE_IFBUFS: c_int = 0x0400; +pub const SO_CKSUMRECV: c_int = 0x0800; +pub const SO_NOREUSEADDR: c_int = 0x1000; +pub const SO_KERNACCEPT: c_int = 0x2000; +pub const SO_NOMULTIPATH: c_int = 0x4000; +pub const SO_AUDIT: c_int = 0x8000; +pub const SO_SNDBUF: c_int = 0x1001; +pub const SO_RCVBUF: c_int = 0x1002; +pub const SO_SNDLOWAT: c_int = 0x1003; +pub const SO_RCVLOWAT: c_int = 0x1004; +pub const SO_SNDTIMEO: c_int = 0x1005; +pub const SO_RCVTIMEO: c_int = 0x1006; +pub const SO_ERROR: c_int = 0x1007; +pub const SO_TYPE: c_int = 0x1008; +pub const SCM_RIGHTS: c_int = 0x01; +pub const MSG_OOB: c_int = 0x1; +pub const MSG_PEEK: c_int = 0x2; +pub const MSG_DONTROUTE: c_int = 0x4; +pub const MSG_EOR: c_int = 0x8; +pub const MSG_TRUNC: c_int = 0x10; +pub const MSG_CTRUNC: c_int = 0x20; +pub const MSG_WAITALL: c_int = 0x40; +pub const MSG_MPEG2: c_int = 0x80; +pub const MSG_NOSIGNAL: c_int = 0x100; +pub const MSG_WAITFORONE: c_int = 0x200; +pub const MSG_ARGEXT: c_int = 0x400; +pub const MSG_NONBLOCK: c_int = 0x4000; +pub const MSG_COMPAT: c_int = 0x8000; +pub const MSG_MAXIOVLEN: c_int = 16; +pub const SHUT_RD: c_int = 0; +pub const SHUT_WR: c_int = 1; +pub const SHUT_RDWR: c_int = 2; + +// sys/stat.h +pub const UTIME_NOW: c_int = -2; +pub const UTIME_OMIT: c_int = -3; + +// sys/statvfs.h +pub const ST_RDONLY: c_ulong = 0x0001; +pub const ST_NOSUID: c_ulong = 0x0040; +pub const ST_NODEV: c_ulong = 0x0080; + +// sys/stropts.h +pub const I_NREAD: c_int = 0x20005301; +pub const I_PUSH: c_int = 0x20005302; +pub const I_POP: c_int = 0x20005303; +pub const I_LOOK: c_int = 0x20005304; +pub const I_FLUSH: c_int = 0x20005305; +pub const I_SRDOPT: c_int = 0x20005306; +pub const I_GRDOPT: c_int = 0x20005307; +pub const I_STR: c_int = 0x20005308; +pub const I_SETSIG: c_int = 0x20005309; +pub const I_GETSIG: c_int = 0x2000530a; +pub const I_FIND: c_int = 0x2000530b; +pub const I_LINK: c_int = 0x2000530c; +pub const I_UNLINK: c_int = 0x2000530d; +pub const I_PEEK: c_int = 0x2000530f; +pub const I_FDINSERT: c_int = 0x20005310; +pub const I_SENDFD: c_int = 0x20005311; +pub const I_RECVFD: c_int = 0x20005312; +pub const I_SWROPT: c_int = 0x20005314; +pub const I_GWROPT: c_int = 0x20005315; +pub const I_LIST: c_int = 0x20005316; +pub const I_PLINK: c_int = 0x2000531d; +pub const I_PUNLINK: c_int = 0x2000531e; +pub const I_FLUSHBAND: c_int = 0x20005313; +pub const I_CKBAND: c_int = 0x20005318; +pub const I_GETBAND: c_int = 0x20005319; +pub const I_ATMARK: c_int = 0x20005317; +pub const I_SETCLTIME: c_int = 0x2000531b; +pub const I_GETCLTIME: c_int = 0x2000531c; +pub const I_CANPUT: c_int = 0x2000531a; + +// sys/syslog.h +pub const LOG_CRON: c_int = 9 << 3; +pub const LOG_AUTHPRIV: c_int = 10 << 3; +pub const LOG_NFACILITIES: c_int = 24; +pub const LOG_PERROR: c_int = 0x20; + +// sys/systemcfg.h +pub const SC_ARCH: c_int = 1; +pub const SC_IMPL: c_int = 2; +pub const SC_VERS: c_int = 3; +pub const SC_WIDTH: c_int = 4; +pub const SC_NCPUS: c_int = 5; +pub const SC_L1C_ATTR: c_int = 6; +pub const SC_L1C_ISZ: c_int = 7; +pub const SC_L1C_DSZ: c_int = 8; +pub const SC_L1C_ICA: c_int = 9; +pub const SC_L1C_DCA: c_int = 10; +pub const SC_L1C_IBS: c_int = 11; +pub const SC_L1C_DBS: c_int = 12; +pub const SC_L1C_ILS: c_int = 13; +pub const SC_L1C_DLS: c_int = 14; +pub const SC_L2C_SZ: c_int = 15; +pub const SC_L2C_AS: c_int = 16; +pub const SC_TLB_ATTR: c_int = 17; +pub const SC_ITLB_SZ: c_int = 18; +pub const SC_DTLB_SZ: c_int = 19; +pub const SC_ITLB_ATT: c_int = 20; +pub const SC_DTLB_ATT: c_int = 21; +pub const SC_RESRV_SZ: c_int = 22; +pub const SC_PRI_LC: c_int = 23; +pub const SC_PRO_LC: c_int = 24; +pub const SC_RTC_TYPE: c_int = 25; +pub const SC_VIRT_AL: c_int = 26; +pub const SC_CAC_CONG: c_int = 27; +pub const SC_MOD_ARCH: c_int = 28; +pub const SC_MOD_IMPL: c_int = 29; +pub const SC_XINT: c_int = 30; +pub const SC_XFRAC: c_int = 31; +pub const SC_KRN_ATTR: c_int = 32; +pub const SC_PHYSMEM: c_int = 33; +pub const SC_SLB_ATTR: c_int = 34; +pub const SC_SLB_SZ: c_int = 35; +pub const SC_MAX_NCPUS: c_int = 37; +pub const SC_MAX_REALADDR: c_int = 38; +pub const SC_ORIG_ENT_CAP: c_int = 39; +pub const SC_ENT_CAP: c_int = 40; +pub const SC_DISP_WHE: c_int = 41; +pub const SC_CAPINC: c_int = 42; +pub const SC_VCAPW: c_int = 43; +pub const SC_SPLP_STAT: c_int = 44; +pub const SC_SMT_STAT: c_int = 45; +pub const SC_SMT_TC: c_int = 46; +pub const SC_VMX_VER: c_int = 47; +pub const SC_LMB_SZ: c_int = 48; +pub const SC_MAX_XCPU: c_int = 49; +pub const SC_EC_LVL: c_int = 50; +pub const SC_AME_STAT: c_int = 51; +pub const SC_ECO_STAT: c_int = 52; +pub const SC_DFP_VER: c_int = 53; +pub const SC_VRM_STAT: c_int = 54; +pub const SC_PHYS_IMP: c_int = 55; +pub const SC_PHYS_VER: c_int = 56; +pub const SC_SPCM_STATUS: c_int = 57; +pub const SC_SPCM_MAX: c_int = 58; +pub const SC_TM_VER: c_int = 59; +pub const SC_NX_CAP: c_int = 60; +pub const SC_PKS_STATE: c_int = 61; +pub const SC_MMA_VER: c_int = 62; +pub const POWER_RS: c_int = 1; +pub const POWER_PC: c_int = 2; +pub const IA64: c_int = 3; +pub const POWER_RS1: c_int = 0x1; +pub const POWER_RSC: c_int = 0x2; +pub const POWER_RS2: c_int = 0x4; +pub const POWER_601: c_int = 0x8; +pub const POWER_604: c_int = 0x10; +pub const POWER_603: c_int = 0x20; +pub const POWER_620: c_int = 0x40; +pub const POWER_630: c_int = 0x80; +pub const POWER_A35: c_int = 0x100; +pub const POWER_RS64II: c_int = 0x200; +pub const POWER_RS64III: c_int = 0x400; +pub const POWER_4: c_int = 0x800; +pub const POWER_RS64IV: c_int = POWER_4; +pub const POWER_MPC7450: c_int = 0x1000; +pub const POWER_5: c_int = 0x2000; +pub const POWER_6: c_int = 0x4000; +pub const POWER_7: c_int = 0x8000; +pub const POWER_8: c_int = 0x10000; +pub const POWER_9: c_int = 0x20000; + +// sys/time.h +pub const FD_SETSIZE: usize = 65534; +pub const TIMEOFDAY: c_int = 9; +pub const CLOCK_REALTIME: crate::clockid_t = TIMEOFDAY as clockid_t; +pub const CLOCK_MONOTONIC: crate::clockid_t = 10; +pub const TIMER_ABSTIME: c_int = 999; +pub const ITIMER_REAL: c_int = 0; +pub const ITIMER_VIRTUAL: c_int = 1; +pub const ITIMER_PROF: c_int = 2; +pub const ITIMER_VIRT: c_int = 3; +pub const ITIMER_REAL1: c_int = 20; +pub const ITIMER_REAL_TH: c_int = ITIMER_REAL1; +pub const DST_AUST: c_int = 2; +pub const DST_CAN: c_int = 6; +pub const DST_EET: c_int = 5; +pub const DST_MET: c_int = 4; +pub const DST_NONE: c_int = 0; +pub const DST_USA: c_int = 1; +pub const DST_WET: c_int = 3; + +// sys/termio.h +pub const CSTART: crate::tcflag_t = 0o21; +pub const CSTOP: crate::tcflag_t = 0o23; +pub const TCGETA: c_int = TIOC | 5; +pub const TCSETA: c_int = TIOC | 6; +pub const TCSETAW: c_int = TIOC | 7; +pub const TCSETAF: c_int = TIOC | 8; +pub const TCSBRK: c_int = TIOC | 9; +pub const TCXONC: c_int = TIOC | 11; +pub const TCFLSH: c_int = TIOC | 12; +pub const TCGETS: c_int = TIOC | 1; +pub const TCSETS: c_int = TIOC | 2; +pub const TCSANOW: c_int = 0; +pub const TCSETSW: c_int = TIOC | 3; +pub const TCSADRAIN: c_int = 1; +pub const TCSETSF: c_int = TIOC | 4; +pub const TCSAFLUSH: c_int = 2; +pub const TCIFLUSH: c_int = 0; +pub const TCOFLUSH: c_int = 1; +pub const TCIOFLUSH: c_int = 2; +pub const TCOOFF: c_int = 0; +pub const TCOON: c_int = 1; +pub const TCIOFF: c_int = 2; +pub const TCION: c_int = 3; +pub const TIOC: c_int = 0x5400; +pub const TIOCGWINSZ: c_int = 0x40087468; +pub const TIOCSWINSZ: c_int = 0x80087467; +pub const TIOCLBIS: c_int = 0x8004747f; +pub const TIOCLBIC: c_int = 0x8004747e; +pub const TIOCLSET: c_int = 0x8004747d; +pub const TIOCLGET: c_int = 0x4004747c; +pub const TIOCSBRK: c_int = 0x2000747b; +pub const TIOCCBRK: c_int = 0x2000747a; +pub const TIOCSDTR: c_int = 0x20007479; +pub const TIOCCDTR: c_int = 0x20007478; +pub const TIOCSLTC: c_int = 0x80067475; +pub const TIOCGLTC: c_int = 0x40067474; +pub const TIOCOUTQ: c_int = 0x40047473; +pub const TIOCNOTTY: c_int = 0x20007471; +pub const TIOCSTOP: c_int = 0x2000746f; +pub const TIOCSTART: c_int = 0x2000746e; +pub const TIOCGPGRP: c_int = 0x40047477; +pub const TIOCSPGRP: c_int = 0x80047476; +pub const TIOCGSID: c_int = 0x40047448; +pub const TIOCSTI: c_int = 0x80017472; +pub const TIOCMSET: c_int = 0x8004746d; +pub const TIOCMBIS: c_int = 0x8004746c; +pub const TIOCMBIC: c_int = 0x8004746b; +pub const TIOCMGET: c_int = 0x4004746a; +pub const TIOCREMOTE: c_int = 0x80047469; + +// sys/user.h +pub const MAXCOMLEN: c_int = 32; +pub const UF_SYSTEM: c_int = 0x1000; + +// sys/vattr.h +pub const AT_FLAGS: c_int = 0x80; +pub const AT_GID: c_int = 8; +pub const AT_UID: c_int = 4; + +// sys/wait.h +pub const P_ALL: idtype_t = 0; +pub const P_PID: idtype_t = 1; +pub const P_PGID: idtype_t = 2; +pub const WNOHANG: c_int = 0x1; +pub const WUNTRACED: c_int = 0x2; +pub const WEXITED: c_int = 0x04; +pub const WCONTINUED: c_int = 0x01000000; +pub const WNOWAIT: c_int = 0x10; +pub const WSTOPPED: c_int = _W_STOPPED; +pub const _W_STOPPED: c_int = 0x00000040; +pub const _W_SLWTED: c_int = 0x0000007c; +pub const _W_SEWTED: c_int = 0x0000007d; +pub const _W_SFWTED: c_int = 0x0000007e; +pub const _W_STRC: c_int = 0x0000007f; + +// termios.h +pub const NCCS: usize = 16; +pub const OLCUC: crate::tcflag_t = 2; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS5: crate::tcflag_t = 0x00000000; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const ECHO: crate::tcflag_t = 0x00000008; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOCTL: crate::tcflag_t = 0x00020000; +pub const ECHOPRT: crate::tcflag_t = 0x00040000; +pub const ECHOKE: crate::tcflag_t = 0x00080000; +pub const IGNBRK: crate::tcflag_t = 0x00000001; +pub const BRKINT: crate::tcflag_t = 0x00000002; +pub const IGNPAR: crate::tcflag_t = 0x00000004; +pub const PARMRK: crate::tcflag_t = 0x00000008; +pub const INPCK: crate::tcflag_t = 0x00000010; +pub const ISTRIP: crate::tcflag_t = 0x00000020; +pub const INLCR: crate::tcflag_t = 0x00000040; +pub const IGNCR: crate::tcflag_t = 0x00000080; +pub const ICRNL: crate::tcflag_t = 0x00000100; +pub const IXON: crate::tcflag_t = 0x00000200; +pub const IXOFF: crate::tcflag_t = 0x00000400; +pub const IXANY: crate::tcflag_t = 0x00001000; +pub const IMAXBEL: crate::tcflag_t = 0x00010000; +pub const OPOST: crate::tcflag_t = 0x00000001; +pub const ONLCR: crate::tcflag_t = 0x00000004; +pub const OCRNL: crate::tcflag_t = 0x00000008; +pub const ONOCR: crate::tcflag_t = 0x00000010; +pub const ONLRET: crate::tcflag_t = 0x00000020; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const IEXTEN: crate::tcflag_t = 0x00200000; +pub const TOSTOP: crate::tcflag_t = 0x00010000; +pub const FLUSHO: crate::tcflag_t = 0x00100000; +pub const PENDIN: crate::tcflag_t = 0x20000000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const VINTR: usize = 0; +pub const VQUIT: usize = 1; +pub const VERASE: usize = 2; +pub const VKILL: usize = 3; +pub const VEOF: usize = 4; +pub const VEOL: usize = 5; +pub const VSTART: usize = 7; +pub const VSTOP: usize = 8; +pub const VSUSP: usize = 9; +pub const VMIN: usize = 4; +pub const VTIME: usize = 5; +pub const VEOL2: usize = 6; +pub const VDSUSP: usize = 10; +pub const VREPRINT: usize = 11; +pub const VDISCRD: usize = 12; +pub const VWERSE: usize = 13; +pub const VLNEXT: usize = 14; +pub const B0: crate::speed_t = 0x0; +pub const B50: crate::speed_t = 0x1; +pub const B75: crate::speed_t = 0x2; +pub const B110: crate::speed_t = 0x3; +pub const B134: crate::speed_t = 0x4; +pub const B150: crate::speed_t = 0x5; +pub const B200: crate::speed_t = 0x6; +pub const B300: crate::speed_t = 0x7; +pub const B600: crate::speed_t = 0x8; +pub const B1200: crate::speed_t = 0x9; +pub const B1800: crate::speed_t = 0xa; +pub const B2400: crate::speed_t = 0xb; +pub const B4800: crate::speed_t = 0xc; +pub const B9600: crate::speed_t = 0xd; +pub const B19200: crate::speed_t = 0xe; +pub const B38400: crate::speed_t = 0xf; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const IUCLC: crate::tcflag_t = 0x00000800; +pub const OFILL: crate::tcflag_t = 0x00000040; +pub const OFDEL: crate::tcflag_t = 0x00000080; +pub const CRDLY: crate::tcflag_t = 0x00000300; +pub const CR0: crate::tcflag_t = 0x00000000; +pub const CR1: crate::tcflag_t = 0x00000100; +pub const CR2: crate::tcflag_t = 0x00000200; +pub const CR3: crate::tcflag_t = 0x00000300; +pub const TABDLY: crate::tcflag_t = 0x00000c00; +pub const TAB0: crate::tcflag_t = 0x00000000; +pub const TAB1: crate::tcflag_t = 0x00000400; +pub const TAB2: crate::tcflag_t = 0x00000800; +pub const TAB3: crate::tcflag_t = 0x00000c00; +pub const BSDLY: crate::tcflag_t = 0x00001000; +pub const BS0: crate::tcflag_t = 0x00000000; +pub const BS1: crate::tcflag_t = 0x00001000; +pub const FFDLY: crate::tcflag_t = 0x00002000; +pub const FF0: crate::tcflag_t = 0x00000000; +pub const FF1: crate::tcflag_t = 0x00002000; +pub const NLDLY: crate::tcflag_t = 0x00004000; +pub const NL0: crate::tcflag_t = 0x00000000; +pub const NL1: crate::tcflag_t = 0x00004000; +pub const VTDLY: crate::tcflag_t = 0x00008000; +pub const VT0: crate::tcflag_t = 0x00000000; +pub const VT1: crate::tcflag_t = 0x00008000; +pub const OXTABS: crate::tcflag_t = 0x00040000; +pub const ONOEOT: crate::tcflag_t = 0x00080000; +pub const CBAUD: crate::tcflag_t = 0x0000000f; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const CIBAUD: crate::tcflag_t = 0x000f0000; +pub const IBSHIFT: crate::tcflag_t = 16; +pub const PAREXT: crate::tcflag_t = 0x00100000; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const XCASE: crate::tcflag_t = 0x00000004; +pub const ALTWERASE: crate::tcflag_t = 0x00400000; + +// time.h +pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 11; +pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 12; + +// unistd.h +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; +pub const _POSIX_VDISABLE: c_int = 0xff; +pub const _PC_LINK_MAX: c_int = 11; +pub const _PC_MAX_CANON: c_int = 12; +pub const _PC_MAX_INPUT: c_int = 13; +pub const _PC_NAME_MAX: c_int = 14; +pub const _PC_PATH_MAX: c_int = 16; +pub const _PC_PIPE_BUF: c_int = 17; +pub const _PC_NO_TRUNC: c_int = 15; +pub const _PC_VDISABLE: c_int = 18; +pub const _PC_CHOWN_RESTRICTED: c_int = 10; +pub const _PC_ASYNC_IO: c_int = 19; +pub const _PC_PRIO_IO: c_int = 21; +pub const _PC_SYNC_IO: c_int = 20; +pub const _PC_ALLOC_SIZE_MIN: c_int = 26; +pub const _PC_REC_INCR_XFER_SIZE: c_int = 27; +pub const _PC_REC_MAX_XFER_SIZE: c_int = 28; +pub const _PC_REC_MIN_XFER_SIZE: c_int = 29; +pub const _PC_REC_XFER_ALIGN: c_int = 30; +pub const _PC_SYMLINK_MAX: c_int = 25; +pub const _PC_2_SYMLINKS: c_int = 31; +pub const _PC_TIMESTAMP_RESOLUTION: c_int = 32; +pub const _PC_FILESIZEBITS: c_int = 22; +pub const _SC_ARG_MAX: c_int = 0; +pub const _SC_CHILD_MAX: c_int = 1; +pub const _SC_CLK_TCK: c_int = 2; +pub const _SC_NGROUPS_MAX: c_int = 3; +pub const _SC_OPEN_MAX: c_int = 4; +pub const _SC_JOB_CONTROL: c_int = 7; +pub const _SC_SAVED_IDS: c_int = 8; +pub const _SC_VERSION: c_int = 9; +pub const _SC_PASS_MAX: c_int = 45; +pub const _SC_PAGESIZE: c_int = _SC_PAGE_SIZE; +pub const _SC_PAGE_SIZE: c_int = 48; +pub const _SC_XOPEN_VERSION: c_int = 46; +pub const _SC_NPROCESSORS_CONF: c_int = 71; +pub const _SC_NPROCESSORS_ONLN: c_int = 72; +pub const _SC_STREAM_MAX: c_int = 5; +pub const _SC_TZNAME_MAX: c_int = 6; +pub const _SC_AIO_LISTIO_MAX: c_int = 75; +pub const _SC_AIO_MAX: c_int = 76; +pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 77; +pub const _SC_ASYNCHRONOUS_IO: c_int = 78; +pub const _SC_DELAYTIMER_MAX: c_int = 79; +pub const _SC_FSYNC: c_int = 80; +pub const _SC_MAPPED_FILES: c_int = 84; +pub const _SC_MEMLOCK: c_int = 85; +pub const _SC_MEMLOCK_RANGE: c_int = 86; +pub const _SC_MEMORY_PROTECTION: c_int = 87; +pub const _SC_MESSAGE_PASSING: c_int = 88; +pub const _SC_MQ_OPEN_MAX: c_int = 89; +pub const _SC_MQ_PRIO_MAX: c_int = 90; +pub const _SC_PRIORITIZED_IO: c_int = 91; +pub const _SC_PRIORITY_SCHEDULING: c_int = 92; +pub const _SC_REALTIME_SIGNALS: c_int = 93; +pub const _SC_RTSIG_MAX: c_int = 94; +pub const _SC_SEMAPHORES: c_int = 95; +pub const _SC_SEM_NSEMS_MAX: c_int = 96; +pub const _SC_SEM_VALUE_MAX: c_int = 97; +pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 98; +pub const _SC_SIGQUEUE_MAX: c_int = 99; +pub const _SC_SYNCHRONIZED_IO: c_int = 100; +pub const _SC_TIMERS: c_int = 102; +pub const _SC_TIMER_MAX: c_int = 103; +pub const _SC_2_C_BIND: c_int = 51; +pub const _SC_2_C_DEV: c_int = 32; +pub const _SC_2_C_VERSION: c_int = 52; +pub const _SC_2_FORT_DEV: c_int = 33; +pub const _SC_2_FORT_RUN: c_int = 34; +pub const _SC_2_LOCALEDEF: c_int = 35; +pub const _SC_2_SW_DEV: c_int = 36; +pub const _SC_2_UPE: c_int = 53; +pub const _SC_2_VERSION: c_int = 31; +pub const _SC_BC_BASE_MAX: c_int = 23; +pub const _SC_BC_DIM_MAX: c_int = 24; +pub const _SC_BC_SCALE_MAX: c_int = 25; +pub const _SC_BC_STRING_MAX: c_int = 26; +pub const _SC_COLL_WEIGHTS_MAX: c_int = 50; +pub const _SC_EXPR_NEST_MAX: c_int = 28; +pub const _SC_LINE_MAX: c_int = 29; +pub const _SC_RE_DUP_MAX: c_int = 30; +pub const _SC_XOPEN_CRYPT: c_int = 56; +pub const _SC_XOPEN_ENH_I18N: c_int = 57; +pub const _SC_XOPEN_SHM: c_int = 55; +pub const _SC_2_CHAR_TERM: c_int = 54; +pub const _SC_XOPEN_XCU_VERSION: c_int = 109; +pub const _SC_ATEXIT_MAX: c_int = 47; +pub const _SC_IOV_MAX: c_int = 58; +pub const _SC_XOPEN_UNIX: c_int = 73; +pub const _SC_T_IOV_MAX: c_int = 0; +pub const _SC_PHYS_PAGES: c_int = 113; +pub const _SC_AVPHYS_PAGES: c_int = 114; +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 101; +pub const _SC_GETGR_R_SIZE_MAX: c_int = 81; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 82; +pub const _SC_LOGIN_NAME_MAX: c_int = 83; +pub const _SC_THREAD_KEYS_MAX: c_int = 68; +pub const _SC_THREAD_STACK_MIN: c_int = 69; +pub const _SC_THREAD_THREADS_MAX: c_int = 70; +pub const _SC_TTY_NAME_MAX: c_int = 104; +pub const _SC_THREADS: c_int = 60; +pub const _SC_THREAD_ATTR_STACKADDR: c_int = 61; +pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 62; +pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 64; +pub const _SC_THREAD_PRIO_INHERIT: c_int = 65; +pub const _SC_THREAD_PRIO_PROTECT: c_int = 66; +pub const _SC_THREAD_PROCESS_SHARED: c_int = 67; +pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 59; +pub const _SC_XOPEN_LEGACY: c_int = 112; +pub const _SC_XOPEN_REALTIME: c_int = 110; +pub const _SC_XOPEN_REALTIME_THREADS: c_int = 111; +pub const _SC_XBS5_ILP32_OFF32: c_int = 105; +pub const _SC_XBS5_ILP32_OFFBIG: c_int = 106; +pub const _SC_XBS5_LP64_OFF64: c_int = 107; +pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 108; +pub const _SC_2_PBS: c_int = 132; +pub const _SC_2_PBS_ACCOUNTING: c_int = 133; +pub const _SC_2_PBS_CHECKPOINT: c_int = 134; +pub const _SC_2_PBS_LOCATE: c_int = 135; +pub const _SC_2_PBS_MESSAGE: c_int = 136; +pub const _SC_2_PBS_TRACK: c_int = 137; +pub const _SC_ADVISORY_INFO: c_int = 130; +pub const _SC_BARRIERS: c_int = 138; +pub const _SC_CLOCK_SELECTION: c_int = 139; +pub const _SC_CPUTIME: c_int = 140; +pub const _SC_HOST_NAME_MAX: c_int = 126; +pub const _SC_MONOTONIC_CLOCK: c_int = 141; +pub const _SC_READER_WRITER_LOCKS: c_int = 142; +pub const _SC_REGEXP: c_int = 127; +pub const _SC_SHELL: c_int = 128; +pub const _SC_SPAWN: c_int = 143; +pub const _SC_SPIN_LOCKS: c_int = 144; +pub const _SC_SPORADIC_SERVER: c_int = 145; +pub const _SC_SS_REPL_MAX: c_int = 156; +pub const _SC_SYMLOOP_MAX: c_int = 129; +pub const _SC_THREAD_CPUTIME: c_int = 146; +pub const _SC_THREAD_SPORADIC_SERVER: c_int = 147; +pub const _SC_TIMEOUTS: c_int = 148; +pub const _SC_TRACE: c_int = 149; +pub const _SC_TRACE_EVENT_FILTER: c_int = 150; +pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 157; +pub const _SC_TRACE_INHERIT: c_int = 151; +pub const _SC_TRACE_LOG: c_int = 152; +pub const _SC_TRACE_NAME_MAX: c_int = 158; +pub const _SC_TRACE_SYS_MAX: c_int = 159; +pub const _SC_TRACE_USER_EVENT_MAX: c_int = 160; +pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 153; +pub const _SC_V6_ILP32_OFF32: c_int = 121; +pub const _SC_V6_ILP32_OFFBIG: c_int = 122; +pub const _SC_V6_LP64_OFF64: c_int = 123; +pub const _SC_V6_LPBIG_OFFBIG: c_int = 124; +pub const _SC_XOPEN_STREAMS: c_int = 125; +pub const _SC_IPV6: c_int = 154; +pub const _SC_RAW_SOCKETS: c_int = 155; + +// utmp.h +pub const EMPTY: c_short = 0; +pub const RUN_LVL: c_short = 1; +pub const BOOT_TIME: c_short = 2; +pub const OLD_TIME: c_short = 3; +pub const NEW_TIME: c_short = 4; +pub const INIT_PROCESS: c_short = 5; +pub const LOGIN_PROCESS: c_short = 6; +pub const USER_PROCESS: c_short = 7; +pub const DEAD_PROCESS: c_short = 8; +pub const ACCOUNTING: c_short = 9; + +f! { + pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr { + if (*mhdr).msg_controllen as usize >= size_of::() { + (*mhdr).msg_control as *mut cmsghdr + } else { + core::ptr::null_mut::() + } + } + + pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + if cmsg.is_null() { + CMSG_FIRSTHDR(mhdr) + } else { + if (cmsg as usize + (*cmsg).cmsg_len as usize + size_of::()) + > ((*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize) + { + core::ptr::null_mut::() + } else { + // AIX does not have any alignment/padding for ancillary data, so we don't need _CMSG_ALIGN here. + (cmsg as usize + (*cmsg).cmsg_len as usize) as *mut cmsghdr + } + } + } + + pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { + (cmsg as *mut c_uchar).offset(size_of::() as isize) + } + + pub const fn CMSG_LEN(length: c_uint) -> c_uint { + size_of::() as c_uint + length + } + + pub const fn CMSG_SPACE(length: c_uint) -> c_uint { + size_of::() as c_uint + length + } + + pub fn FD_ZERO(set: *mut fd_set) -> () { + for slot in (*set).fds_bits.iter_mut() { + *slot = 0; + } + } + + pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { + let bits = size_of::() * 8; + let fd = fd as usize; + (*set).fds_bits[fd / bits] |= 1 << (fd % bits); + return; + } + + pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { + let bits = size_of::() * 8; + let fd = fd as usize; + (*set).fds_bits[fd / bits] &= !(1 << (fd % bits)); + return; + } + + pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { + let bits = size_of::() * 8; + let fd = fd as usize; + return ((*set).fds_bits[fd / bits] & (1 << (fd % bits))) != 0; + } +} + +safe_f! { + pub const fn WIFSTOPPED(status: c_int) -> bool { + (status & _W_STOPPED) != 0 + } + + pub const fn WSTOPSIG(status: c_int) -> c_int { + if WIFSTOPPED(status) { + (((status as c_uint) >> 8) & 0xff) as c_int + } else { + -1 + } + } + + pub const fn WIFEXITED(status: c_int) -> bool { + (status & 0xFF) == 0 + } + + pub const fn WEXITSTATUS(status: c_int) -> c_int { + if WIFEXITED(status) { + (((status as c_uint) >> 8) & 0xff) as c_int + } else { + -1 + } + } + + pub const fn WIFSIGNALED(status: c_int) -> bool { + !WIFEXITED(status) && !WIFSTOPPED(status) + } + + pub const fn WTERMSIG(status: c_int) -> c_int { + if WIFSIGNALED(status) { + (((status as c_uint) >> 16) & 0xff) as c_int + } else { + -1 + } + } + + pub const fn WIFCONTINUED(status: c_int) -> bool { + (status & WCONTINUED) != 0 + } + + // AIX doesn't have native WCOREDUMP. + pub const fn WCOREDUMP(_status: c_int) -> bool { + false + } + + pub const fn major(dev: crate::dev_t) -> c_uint { + let x = dev >> 16; + x as c_uint + } + + pub const fn minor(dev: crate::dev_t) -> c_uint { + let y = dev & 0xFFFF; + y as c_uint + } + + pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { + let major = major as crate::dev_t; + let minor = minor as crate::dev_t; + let mut dev = 0; + dev |= major << 16; + dev |= minor; + dev + } +} + +#[link(name = "thread")] +extern "C" { + pub fn thr_kill(id: thread_t, sig: c_int) -> c_int; + pub fn thr_self() -> thread_t; +} + +#[link(name = "pthread")] +extern "C" { + pub fn pthread_atfork( + prepare: Option, + parent: Option, + child: Option, + ) -> c_int; + + pub fn pthread_attr_getdetachstate( + attr: *const crate::pthread_attr_t, + detachstate: *mut c_int, + ) -> c_int; + + pub fn pthread_attr_getguardsize( + attr: *const crate::pthread_attr_t, + guardsize: *mut size_t, + ) -> c_int; + + pub fn pthread_attr_getinheritsched( + attr: *const crate::pthread_attr_t, + inheritsched: *mut c_int, + ) -> c_int; + + pub fn pthread_attr_getschedparam( + attr: *const crate::pthread_attr_t, + param: *mut sched_param, + ) -> c_int; + + pub fn pthread_attr_getstackaddr( + attr: *const crate::pthread_attr_t, + stackaddr: *mut *mut c_void, + ) -> c_int; + + pub fn pthread_attr_getschedpolicy( + attr: *const crate::pthread_attr_t, + policy: *mut c_int, + ) -> c_int; + + pub fn pthread_attr_getscope( + attr: *const crate::pthread_attr_t, + contentionscope: *mut c_int, + ) -> c_int; + + pub fn pthread_attr_getstack( + attr: *const crate::pthread_attr_t, + stackaddr: *mut *mut c_void, + stacksize: *mut size_t, + ) -> c_int; + + pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; + + pub fn pthread_attr_setinheritsched( + attr: *mut crate::pthread_attr_t, + inheritsched: c_int, + ) -> c_int; + + pub fn pthread_attr_setschedparam( + attr: *mut crate::pthread_attr_t, + param: *const sched_param, + ) -> c_int; + + pub fn pthread_attr_setschedpolicy(attr: *mut crate::pthread_attr_t, policy: c_int) -> c_int; + + pub fn pthread_attr_setscope(attr: *mut crate::pthread_attr_t, contentionscope: c_int) + -> c_int; + + pub fn pthread_attr_setstack( + attr: *mut crate::pthread_attr_t, + stackaddr: *mut c_void, + stacksize: size_t, + ) -> c_int; + + pub fn pthread_attr_setstackaddr( + attr: *mut crate::pthread_attr_t, + stackaddr: *mut c_void, + ) -> c_int; + + pub fn pthread_barrierattr_destroy(attr: *mut crate::pthread_barrierattr_t) -> c_int; + + pub fn pthread_barrierattr_getpshared( + attr: *const crate::pthread_barrierattr_t, + pshared: *mut c_int, + ) -> c_int; + + pub fn pthread_barrierattr_init(attr: *mut crate::pthread_barrierattr_t) -> c_int; + + pub fn pthread_barrierattr_setpshared( + attr: *mut crate::pthread_barrierattr_t, + pshared: c_int, + ) -> c_int; + + pub fn pthread_barrier_destroy(barrier: *mut pthread_barrier_t) -> c_int; + + pub fn pthread_barrier_init( + barrier: *mut pthread_barrier_t, + attr: *const crate::pthread_barrierattr_t, + count: c_uint, + ) -> c_int; + + pub fn pthread_barrier_wait(barrier: *mut pthread_barrier_t) -> c_int; + + pub fn pthread_cancel(thread: crate::pthread_t) -> c_int; + + pub fn pthread_cleanup_pop(execute: c_int) -> c_void; + + pub fn pthread_cleanup_push( + routine: Option, + arg: *mut c_void, + ) -> c_void; + + pub fn pthread_condattr_getclock( + attr: *const pthread_condattr_t, + clock_id: *mut clockid_t, + ) -> c_int; + + pub fn pthread_condattr_getpshared( + attr: *const pthread_condattr_t, + pshared: *mut c_int, + ) -> c_int; + + pub fn pthread_condattr_setclock( + attr: *mut pthread_condattr_t, + clock_id: crate::clockid_t, + ) -> c_int; + + pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: c_int) -> c_int; + + pub fn pthread_create( + thread: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + start_routine: extern "C" fn(*mut c_void) -> *mut c_void, + arg: *mut c_void, + ) -> c_int; + + pub fn pthread_getconcurrency() -> c_int; + + pub fn pthread_getcpuclockid( + thread_id: crate::pthread_t, + clock_id: *mut crate::clockid_t, + ) -> c_int; + + pub fn pthread_getschedparam( + thread: crate::pthread_t, + policy: *mut c_int, + param: *mut sched_param, + ) -> c_int; + + pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; + + pub fn pthread_mutexattr_getprioceiling( + attr: *const crate::pthread_mutexattr_t, + prioceiling: *mut c_int, + ) -> c_int; + + pub fn pthread_mutexattr_getprotocol( + attr: *const pthread_mutexattr_t, + protocol: *mut c_int, + ) -> c_int; + + pub fn pthread_mutexattr_getpshared( + attr: *const pthread_mutexattr_t, + pshared: *mut c_int, + ) -> c_int; + + pub fn pthread_mutexattr_getrobust( + attr: *const crate::pthread_mutexattr_t, + robust: *mut c_int, + ) -> c_int; + + pub fn pthread_mutexattr_gettype( + attr: *const crate::pthread_mutexattr_t, + _type: *mut c_int, + ) -> c_int; + + pub fn pthread_mutexattr_setprioceiling( + attr: *mut crate::pthread_mutexattr_t, + prioceiling: c_int, + ) -> c_int; + + pub fn pthread_mutexattr_setprotocol(attr: *mut pthread_mutexattr_t, protocol: c_int) -> c_int; + + pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; + + pub fn pthread_mutexattr_setrobust( + attr: *mut crate::pthread_mutexattr_t, + robust: c_int, + ) -> c_int; + + pub fn pthread_mutex_consistent(mutex: *mut crate::pthread_mutex_t) -> c_int; + + pub fn pthread_mutex_getprioceiling( + mutex: *const crate::pthread_mutex_t, + prioceiling: *mut c_int, + ) -> c_int; + + pub fn pthread_mutex_setprioceiling( + mutex: *mut crate::pthread_mutex_t, + prioceiling: c_int, + old_ceiling: *mut c_int, + ) -> c_int; + + pub fn pthread_mutex_timedlock( + mutex: *mut pthread_mutex_t, + abstime: *const crate::timespec, + ) -> c_int; + + pub fn pthread_once( + once_control: *mut crate::pthread_once_t, + init_routine: Option, + ) -> c_int; + + pub fn pthread_rwlockattr_getpshared( + attr: *const pthread_rwlockattr_t, + pshared: *mut c_int, + ) -> c_int; + + pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, pshared: c_int) -> c_int; + + pub fn pthread_rwlock_timedrdlock( + rwlock: *mut crate::pthread_rwlock_t, + abstime: *const crate::timespec, + ) -> c_int; + + pub fn pthread_rwlock_timedwrlock( + rwlock: *mut crate::pthread_rwlock_t, + abstime: *const crate::timespec, + ) -> c_int; + + pub fn pthread_setcancelstate(state: c_int, oldstate: *mut c_int) -> c_int; + pub fn pthread_setcanceltype(_type: c_int, oldtype: *mut c_int) -> c_int; + + pub fn pthread_setconcurrency(new_level: c_int) -> c_int; + + pub fn pthread_setschedparam( + thread: crate::pthread_t, + policy: c_int, + param: *const sched_param, + ) -> c_int; + + pub fn pthread_setschedprio(thread: crate::pthread_t, prio: c_int) -> c_int; + + pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oset: *mut sigset_t) -> c_int; + + pub fn pthread_spin_destroy(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_spin_init(lock: *mut pthread_spinlock_t, pshared: c_int) -> c_int; + pub fn pthread_spin_lock(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_spin_trylock(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_spin_unlock(lock: *mut pthread_spinlock_t) -> c_int; + + pub fn pthread_testcancel() -> c_void; +} + +#[link(name = "iconv")] +extern "C" { + pub fn iconv( + cd: iconv_t, + inbuf: *mut *mut c_char, + inbytesleft: *mut size_t, + outbuf: *mut *mut c_char, + outbytesleft: *mut size_t, + ) -> size_t; + pub fn iconv_close(cd: iconv_t) -> c_int; + pub fn iconv_open(tocode: *const c_char, fromcode: *const c_char) -> iconv_t; +} + +extern "C" { + pub fn acct(filename: *mut c_char) -> c_int; + #[link_name = "_posix_aio_cancel"] + pub fn aio_cancel(fildes: c_int, aiocbp: *mut crate::aiocb) -> c_int; + #[link_name = "_posix_aio_error"] + pub fn aio_error(aiocbp: *const crate::aiocb) -> c_int; + #[link_name = "_posix_aio_fsync"] + pub fn aio_fsync(op: c_int, aiocbp: *mut crate::aiocb) -> c_int; + #[link_name = "_posix_aio_read"] + pub fn aio_read(aiocbp: *mut crate::aiocb) -> c_int; + #[link_name = "_posix_aio_return"] + pub fn aio_return(aiocbp: *mut crate::aiocb) -> ssize_t; + #[link_name = "_posix_aio_suspend"] + pub fn aio_suspend( + list: *const *const crate::aiocb, + nent: c_int, + timeout: *const crate::timespec, + ) -> c_int; + #[link_name = "_posix_aio_write"] + pub fn aio_write(aiocbp: *mut crate::aiocb) -> c_int; + pub fn basename(path: *mut c_char) -> *mut c_char; + pub fn bind( + socket: c_int, + address: *const crate::sockaddr, + address_len: crate::socklen_t, + ) -> c_int; + pub fn brk(addr: *mut c_void) -> c_int; + pub fn clearenv() -> c_int; + pub fn clock_getcpuclockid(pid: crate::pid_t, clk_id: *mut crate::clockid_t) -> c_int; + pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn clock_nanosleep( + clk_id: crate::clockid_t, + flags: c_int, + rqtp: *const crate::timespec, + rmtp: *mut crate::timespec, + ) -> c_int; + pub fn clock_settime(clock_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; + pub fn creat64(path: *const c_char, mode: mode_t) -> c_int; + pub fn ctermid(s: *mut c_char) -> *mut c_char; + pub fn dirfd(dirp: *mut crate::DIR) -> c_int; + pub fn dirname(path: *mut c_char) -> *mut c_char; + pub fn drand48() -> c_double; + pub fn duplocale(arg1: crate::locale_t) -> crate::locale_t; + pub fn endgrent(); + pub fn endmntent(streamp: *mut crate::FILE) -> c_int; + pub fn endpwent(); + pub fn endutent(); + pub fn endutxent(); + pub fn erand48(xseed: *mut c_ushort) -> c_double; + pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; + pub fn fattach(fildes: c_int, path: *const c_char) -> c_int; + pub fn fdatasync(fd: c_int) -> c_int; + + // DIFF(main): changed to `*const *mut` in e77f551de9 + pub fn fexecve(fd: c_int, argv: *const *const c_char, envp: *const *const c_char) -> c_int; + + pub fn ffs(value: c_int) -> c_int; + pub fn ffsl(value: c_long) -> c_int; + pub fn ffsll(value: c_longlong) -> c_int; + pub fn fgetgrent(file: *mut crate::FILE) -> *mut crate::group; + pub fn fgetpos64(stream: *mut crate::FILE, ptr: *mut fpos64_t) -> c_int; + pub fn fgetpwent(file: *mut crate::FILE) -> *mut crate::passwd; + pub fn fopen64(filename: *const c_char, mode: *const c_char) -> *mut crate::FILE; + pub fn freelocale(loc: crate::locale_t); + pub fn freopen64( + filename: *const c_char, + mode: *const c_char, + file: *mut crate::FILE, + ) -> *mut crate::FILE; + pub fn fseeko64(stream: *mut crate::FILE, offset: off64_t, whence: c_int) -> c_int; + pub fn fsetpos64(stream: *mut crate::FILE, ptr: *const fpos64_t) -> c_int; + pub fn fstat64(fildes: c_int, buf: *mut stat64) -> c_int; + pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; + pub fn fstatfs64(fd: c_int, buf: *mut statfs64) -> c_int; + pub fn fstatvfs64(fd: c_int, buf: *mut statvfs64) -> c_int; + pub fn ftello64(stream: *mut crate::FILE) -> off64_t; + pub fn ftok(path: *const c_char, id: c_int) -> crate::key_t; + pub fn ftruncate64(fd: c_int, length: off64_t) -> c_int; + pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; + pub fn getcontext(ucp: *mut ucontext_t) -> c_int; + pub fn getdomainname(name: *mut c_char, len: c_int) -> c_int; + pub fn getdtablesize() -> c_int; + pub fn getgrent() -> *mut crate::group; + pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; + #[link_name = "_posix_getgrgid_r"] + pub fn getgrgid_r( + gid: crate::gid_t, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn getgrnam(name: *const c_char) -> *mut crate::group; + #[link_name = "_posix_getgrnam_r"] + pub fn getgrnam_r( + name: *const c_char, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn getgrset(user: *const c_char) -> *mut c_char; + pub fn gethostid() -> c_long; + pub fn getmntent(stream: *mut crate::FILE) -> *mut crate::mntent; + pub fn getnameinfo( + sa: *const crate::sockaddr, + salen: size_t, + host: *mut c_char, + hostlen: size_t, + serv: *mut c_char, + servlen: size_t, + flags: c_int, + ) -> c_int; + pub fn getpagesize() -> c_int; + pub fn getpeereid(socket: c_int, euid: *mut crate::uid_t, egid: *mut crate::gid_t) -> c_int; + pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; + pub fn getpwent() -> *mut crate::passwd; + #[link_name = "_posix_getpwnam_r"] + pub fn getpwnam_r( + name: *const c_char, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + #[link_name = "_posix_getpwuid_r"] + pub fn getpwuid_r( + uid: crate::uid_t, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; + pub fn getrlimit64(resource: c_int, rlim: *mut rlimit64) -> c_int; + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; + pub fn getitimer(which: c_int, curr_value: *mut crate::itimerval) -> c_int; + pub fn getutent() -> *mut utmp; + pub fn getutid(u: *const utmp) -> *mut utmp; + pub fn getutline(u: *const utmp) -> *mut utmp; + pub fn getutxent() -> *mut utmpx; + pub fn getutxid(ut: *const utmpx) -> *mut utmpx; + pub fn getutxline(ut: *const utmpx) -> *mut utmpx; + pub fn glob( + pattern: *const c_char, + flags: c_int, + errfunc: Option c_int>, + pglob: *mut crate::glob_t, + ) -> c_int; + pub fn globfree(pglob: *mut crate::glob_t); + pub fn hasmntopt(mnt: *const crate::mntent, opt: *const c_char) -> *mut c_char; + pub fn hcreate(nelt: size_t) -> c_int; + pub fn hdestroy(); + pub fn hsearch(entry: entry, action: ACTION) -> *mut entry; + pub fn if_freenameindex(ptr: *mut if_nameindex); + pub fn if_nameindex() -> *mut if_nameindex; + pub fn initgroups(name: *const c_char, basegid: crate::gid_t) -> c_int; + pub fn ioctl(fildes: c_int, request: c_int, ...) -> c_int; + pub fn jrand48(xseed: *mut c_ushort) -> c_long; + pub fn lcong48(p: *mut c_ushort); + pub fn lfind( + key: *const c_void, + base: *const c_void, + nelp: *mut size_t, + width: size_t, + compar: Option c_int>, + ) -> *mut c_void; + #[link_name = "_posix_lio_listio"] + pub fn lio_listio( + mode: c_int, + aiocb_list: *const *mut aiocb, + nent: c_int, + sevp: *mut sigevent, + ) -> c_int; + pub fn loadquery(flags: c_int, buf: *mut c_void, buflen: c_uint, ...) -> c_int; + pub fn lpar_get_info(command: c_int, buf: *mut c_void, bufsize: size_t) -> c_int; + pub fn lpar_set_resources(id: c_int, resource: *mut c_void) -> c_int; + pub fn lrand48() -> c_long; + pub fn lsearch( + key: *const c_void, + base: *mut c_void, + nelp: *mut size_t, + width: size_t, + compar: Option c_int>, + ) -> *mut c_void; + pub fn lseek64(fd: c_int, offset: off64_t, whence: c_int) -> off64_t; + pub fn lstat64(path: *const c_char, buf: *mut stat64) -> c_int; + pub fn madvise(addr: caddr_t, len: size_t, advice: c_int) -> c_int; + pub fn makecontext(ucp: *mut crate::ucontext_t, func: extern "C" fn(), argc: c_int, ...); + pub fn mallinfo() -> crate::mallinfo; + pub fn mallopt(param: c_int, value: c_int) -> c_int; + pub fn memmem( + haystack: *const c_void, + haystacklen: size_t, + needle: *const c_void, + needlelen: size_t, + ) -> *mut c_void; + pub fn memset_s(s: *mut c_void, smax: size_t, c: c_int, n: size_t) -> c_int; + pub fn mincore(addr: caddr_t, len: size_t, vec: *mut c_char) -> c_int; + pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; + pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; + pub fn mount(device: *const c_char, path: *const c_char, flags: c_int) -> c_int; + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn mq_close(mqd: crate::mqd_t) -> c_int; + pub fn mq_getattr(mqd: crate::mqd_t, attr: *mut crate::mq_attr) -> c_int; + pub fn mq_notify(mqd: crate::mqd_t, notification: *const crate::sigevent) -> c_int; + pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> crate::mqd_t; + pub fn mq_receive( + mqd: crate::mqd_t, + msg_ptr: *mut c_char, + msg_len: size_t, + msg_prio: *mut c_uint, + ) -> ssize_t; + pub fn mq_send( + mqd: crate::mqd_t, + msg_ptr: *const c_char, + msg_len: size_t, + msg_prio: c_uint, + ) -> c_int; + pub fn mq_setattr( + mqd: crate::mqd_t, + newattr: *const crate::mq_attr, + oldattr: *mut crate::mq_attr, + ) -> c_int; + pub fn mq_timedreceive( + mqd: crate::mqd_t, + msg_ptr: *mut c_char, + msg_len: size_t, + msg_prio: *mut c_uint, + abs_timeout: *const crate::timespec, + ) -> ssize_t; + pub fn mq_timedsend( + mqd: crate::mqd_t, + msg_ptr: *const c_char, + msg_len: size_t, + msg_prio: c_uint, + abs_timeout: *const crate::timespec, + ) -> c_int; + pub fn mq_unlink(name: *const c_char) -> c_int; + pub fn mrand48() -> c_long; + pub fn msgctl(msqid: c_int, cmd: c_int, buf: *mut msqid_ds) -> c_int; + pub fn msgget(key: crate::key_t, msgflg: c_int) -> c_int; + pub fn msgrcv( + msqid: c_int, + msgp: *mut c_void, + msgsz: size_t, + msgtyp: c_long, + msgflg: c_int, + ) -> ssize_t; + pub fn msgsnd(msqid: c_int, msgp: *const c_void, msgsz: size_t, msgflg: c_int) -> c_int; + pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; + pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; + pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; + pub fn nl_langinfo_l(item: crate::nl_item, loc: crate::locale_t) -> *mut c_char; + pub fn nrand48(xseed: *mut c_ushort) -> c_long; + pub fn open64(path: *const c_char, oflag: c_int, ...) -> c_int; + pub fn pollset_create(maxfd: c_int) -> pollset_t; + pub fn pollset_ctl(ps: pollset_t, pollctl_array: *mut poll_ctl, array_length: c_int) -> c_int; + pub fn pollset_destroy(ps: pollset_t) -> c_int; + pub fn pollset_poll( + ps: pollset_t, + polldata_array: *mut crate::pollfd, + array_length: c_int, + timeout: c_int, + ) -> c_int; + pub fn pollset_query(ps: pollset_t, pollfd_query: *mut crate::pollfd) -> c_int; + pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; + pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; + pub fn posix_fadvise64(fd: c_int, offset: off64_t, len: off64_t, advise: c_int) -> c_int; + pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; + pub fn posix_fallocate64(fd: c_int, offset: off64_t, len: off64_t) -> c_int; + pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + pub fn posix_spawn( + pid: *mut crate::pid_t, + path: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawn_file_actions_addclose( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + ) -> c_int; + pub fn posix_spawn_file_actions_adddup2( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + newfd: c_int, + ) -> c_int; + pub fn posix_spawn_file_actions_addopen( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + path: *const c_char, + oflag: c_int, + mode: mode_t, + ) -> c_int; + pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; + pub fn posix_spawnattr_getpgroup( + attr: *const posix_spawnattr_t, + flags: *mut crate::pid_t, + ) -> c_int; + pub fn posix_spawnattr_getschedparam( + attr: *const posix_spawnattr_t, + param: *mut crate::sched_param, + ) -> c_int; + pub fn posix_spawnattr_getschedpolicy( + attr: *const posix_spawnattr_t, + flags: *mut c_int, + ) -> c_int; + pub fn posix_spawnattr_getsigdefault( + attr: *const posix_spawnattr_t, + default: *mut sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getsigmask( + attr: *const posix_spawnattr_t, + default: *mut sigset_t, + ) -> c_int; + pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; + pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: crate::pid_t) -> c_int; + pub fn posix_spawnattr_setschedparam( + attr: *mut posix_spawnattr_t, + param: *const crate::sched_param, + ) -> c_int; + pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, flags: c_int) -> c_int; + pub fn posix_spawnattr_setsigdefault( + attr: *mut posix_spawnattr_t, + default: *const crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigmask( + attr: *mut posix_spawnattr_t, + default: *const crate::sigset_t, + ) -> c_int; + pub fn posix_spawnp( + pid: *mut crate::pid_t, + file: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn pread64(fd: c_int, buf: *mut c_void, count: size_t, offset: off64_t) -> ssize_t; + pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: offset_t) -> ssize_t; + pub fn ptrace64( + request: c_int, + id: c_longlong, + addr: c_longlong, + data: c_int, + buff: *mut c_int, + ) -> c_int; + pub fn pututline(u: *const utmp) -> *mut utmp; + pub fn pututxline(ut: *const utmpx) -> *mut utmpx; + pub fn pwrite64(fd: c_int, buf: *const c_void, count: size_t, offset: off64_t) -> ssize_t; + pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: offset_t) + -> ssize_t; + pub fn quotactl(cmd: *mut c_char, special: c_int, id: c_int, data: caddr_t) -> c_int; + pub fn rand() -> c_int; + pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + // AIX header socket.h maps recvfrom() to nrecvfrom() + #[link_name = "nrecvfrom"] + pub fn recvfrom( + socket: c_int, + buf: *mut c_void, + len: size_t, + flags: c_int, + addr: *mut crate::sockaddr, + addrlen: *mut crate::socklen_t, + ) -> ssize_t; + pub fn recvmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: c_uint, + flags: c_int, + timeout: *mut crate::timespec, + ) -> c_int; + // AIX header socket.h maps recvmsg() to nrecvmsg(). + #[link_name = "nrecvmsg"] + pub fn recvmsg(sockfd: c_int, msg: *mut msghdr, flags: c_int) -> ssize_t; + pub fn regcomp(preg: *mut regex_t, pattern: *const c_char, cflags: c_int) -> c_int; + pub fn regerror( + errcode: c_int, + preg: *const crate::regex_t, + errbuf: *mut c_char, + errbuf_size: size_t, + ) -> size_t; + pub fn regexec( + preg: *const regex_t, + input: *const c_char, + nmatch: size_t, + pmatch: *mut regmatch_t, + eflags: c_int, + ) -> c_int; + pub fn regfree(preg: *mut regex_t); + pub fn sbrk(increment: intptr_t) -> *mut c_void; + pub fn sched_getparam(pid: crate::pid_t, param: *mut sched_param) -> c_int; + pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; + pub fn sched_get_priority_max(policy: c_int) -> c_int; + pub fn sched_get_priority_min(policy: c_int) -> c_int; + pub fn sched_rr_get_interval(pid: crate::pid_t, tp: *mut crate::timespec) -> c_int; + pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; + pub fn sched_setscheduler( + pid: crate::pid_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + pub fn sctp_opt_info( + sd: c_int, + id: crate::sctp_assoc_t, + opt: c_int, + arg_size: *mut c_void, + size: *mut size_t, + ) -> c_int; + pub fn sctp_peeloff(s: c_int, id: *mut c_uint) -> c_int; + pub fn seed48(xseed: *mut c_ushort) -> *mut c_ushort; + pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); + pub fn sem_close(sem: *mut sem_t) -> c_int; + pub fn sem_destroy(sem: *mut sem_t) -> c_int; + pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; + pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; + pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; + pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; + pub fn sem_unlink(name: *const c_char) -> c_int; + pub fn semctl(semid: c_int, semnum: c_int, cmd: c_int, ...) -> c_int; + pub fn semget(key: crate::key_t, nsems: c_int, semflag: c_int) -> c_int; + pub fn semop(semid: c_int, sops: *mut sembuf, nsops: size_t) -> c_int; + pub fn send_file(socket: *mut c_int, iobuf: *mut sf_parms, flags: c_uint) -> ssize_t; + pub fn sendmmsg(sockfd: c_int, msgvec: *mut mmsghdr, vlen: c_uint, flags: c_int) -> c_int; + // AIX header socket.h maps sendmsg() to nsendmsg(). + #[link_name = "nsendmsg"] + pub fn sendmsg(sockfd: c_int, msg: *const msghdr, flags: c_int) -> ssize_t; + pub fn setcontext(ucp: *const ucontext_t) -> c_int; + pub fn setdomainname(name: *const c_char, len: c_int) -> c_int; + pub fn setgroups(ngroups: c_int, ptr: *const crate::gid_t) -> c_int; + pub fn setgrent(); + pub fn sethostid(hostid: c_int) -> c_int; + pub fn sethostname(name: *const c_char, len: c_int) -> c_int; + pub fn setmntent(filename: *const c_char, ty: *const c_char) -> *mut crate::FILE; + pub fn setpriority(which: c_int, who: id_t, priority: c_int) -> c_int; + pub fn setpwent(); + pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; + pub fn setrlimit64(resource: c_int, rlim: *const rlimit64) -> c_int; + pub fn settimeofday(tv: *const crate::timeval, tz: *const crate::timezone) -> c_int; + pub fn setitimer( + which: c_int, + new_value: *const crate::itimerval, + old_value: *mut crate::itimerval, + ) -> c_int; + pub fn setutent(); + pub fn setutxent(); + pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; + pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; + pub fn sigtimedwait( + set: *const sigset_t, + info: *mut siginfo_t, + timeout: *const crate::timespec, + ) -> c_int; + pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; + pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; + pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; + pub fn shmdt(shmaddr: *const c_void) -> c_int; + pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; + pub fn shmget(key: key_t, size: size_t, shmflg: c_int) -> c_int; + pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; + pub fn shm_unlink(name: *const c_char) -> c_int; + pub fn splice(socket1: c_int, socket2: c_int, flags: c_int) -> c_int; + pub fn srand(seed: c_uint); + pub fn srand48(seed: c_long); + pub fn stat64(path: *const c_char, buf: *mut stat64) -> c_int; + pub fn stat64at(dirfd: c_int, path: *const c_char, buf: *mut stat64, flags: c_int) -> c_int; + pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; + pub fn statfs64(path: *const c_char, buf: *mut statfs64) -> c_int; + pub fn statvfs64(path: *const c_char, buf: *mut statvfs64) -> c_int; + pub fn statx(path: *const c_char, buf: *mut stat, length: c_int, command: c_int) -> c_int; + pub fn strcasecmp_l( + string1: *const c_char, + string2: *const c_char, + locale: crate::locale_t, + ) -> c_int; + pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + pub fn strftime( + arg1: *mut c_char, + arg2: size_t, + arg3: *const c_char, + arg4: *const tm, + ) -> size_t; + pub fn strncasecmp_l( + string1: *const c_char, + string2: *const c_char, + length: size_t, + locale: crate::locale_t, + ) -> c_int; + pub fn strptime(s: *const c_char, format: *const c_char, tm: *mut crate::tm) -> *mut c_char; + pub fn strsep(string: *mut *mut c_char, delim: *const c_char) -> *mut c_char; + pub fn swapcontext(uocp: *mut ucontext_t, ucp: *const ucontext_t) -> c_int; + pub fn swapoff(path: *const c_char) -> c_int; + pub fn swapon(path: *const c_char) -> c_int; + pub fn sync(); + pub fn telldir(dirp: *mut crate::DIR) -> c_long; + pub fn timer_create( + clockid: crate::clockid_t, + sevp: *mut crate::sigevent, + timerid: *mut crate::timer_t, + ) -> c_int; + pub fn timer_delete(timerid: timer_t) -> c_int; + pub fn timer_getoverrun(timerid: timer_t) -> c_int; + pub fn timer_gettime(timerid: timer_t, value: *mut itimerspec) -> c_int; + pub fn timer_settime( + timerid: crate::timer_t, + flags: c_int, + new_value: *const crate::itimerspec, + old_value: *mut crate::itimerspec, + ) -> c_int; + pub fn truncate64(path: *const c_char, length: off64_t) -> c_int; + pub fn uname(buf: *mut crate::utsname) -> c_int; + pub fn updwtmp(file: *const c_char, u: *const utmp); + pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; + pub fn utmpname(file: *const c_char) -> c_int; + pub fn utimensat( + dirfd: c_int, + path: *const c_char, + times: *const crate::timespec, + flag: c_int, + ) -> c_int; + pub fn wait4( + pid: crate::pid_t, + status: *mut c_int, + options: c_int, + rusage: *mut crate::rusage, + ) -> crate::pid_t; + pub fn waitid( + idtype: idtype_t, + id: id_t, + infop: *mut crate::siginfo_t, + options: c_int, + ) -> c_int; + pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + + // Use AIX thread-safe version errno. + pub fn _Errno() -> *mut c_int; +} + +cfg_if! { + if #[cfg(target_arch = "powerpc64")] { + mod powerpc64; + pub use self::powerpc64::*; + } +} diff --git a/vendor/libc/src/unix/aix/powerpc64.rs b/vendor/libc/src/unix/aix/powerpc64.rs new file mode 100644 index 00000000000000..ba4ddc057c40be --- /dev/null +++ b/vendor/libc/src/unix/aix/powerpc64.rs @@ -0,0 +1,477 @@ +use crate::off_t; +use crate::prelude::*; + +// Define lock_data_instrumented as an empty enum +missing! { + #[derive(Debug)] + pub enum lock_data_instrumented {} +} + +s! { + pub struct sigset_t { + pub ss_set: [c_ulong; 4], + } + + pub struct fd_set { + pub fds_bits: [c_long; 1024], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_sysid: c_uint, + pub l_pid: crate::pid_t, + pub l_vfs: c_int, + pub l_start: off_t, + pub l_len: off_t, + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_basetype: [c_char; 16], + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + pub f_fstr: [c_char; 32], + pub f_filler: [c_ulong; 16], + } + + pub struct pthread_rwlock_t { + __rw_word: [c_long; 10], + } + + pub struct pthread_cond_t { + __cv_word: [c_long; 6], + } + + pub struct pthread_mutex_t { + __mt_word: [c_long; 8], + } + + pub struct pthread_once_t { + __on_word: [c_long; 9], + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_flag: c_ushort, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_ssize: c_int, + pub st_atim: crate::timespec, + pub st_mtim: crate::timespec, + pub st_ctim: crate::timespec, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_vfstype: c_int, + pub st_vfs: c_uint, + pub st_type: c_uint, + pub st_gen: c_uint, + pub st_reserved: [c_uint; 9], + pub st_padto_ll: c_uint, + pub st_size: off_t, + } + + pub struct statfs { + pub f_version: c_int, + pub f_type: c_int, + pub f_bsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsblkcnt_t, + pub f_ffree: crate::fsblkcnt_t, + pub f_fsid: crate::fsid64_t, + pub f_vfstype: c_int, + pub f_fsize: c_ulong, + pub f_vfsnumber: c_int, + pub f_vfsoff: c_int, + pub f_vfslen: c_int, + pub f_vfsvers: c_int, + pub f_fname: [c_char; 32], + pub f_fpack: [c_char; 32], + pub f_name_max: c_int, + } + + pub struct aiocb { + pub aio_lio_opcode: c_int, + pub aio_fildes: c_int, + pub aio_word1: c_int, + pub aio_offset: off_t, + pub aio_buf: *mut c_void, + pub aio_return: ssize_t, + pub aio_errno: c_int, + pub aio_nbytes: size_t, + pub aio_reqprio: c_int, + pub aio_sigevent: crate::sigevent, + pub aio_word2: c_int, + pub aio_fp: c_int, + pub aio_handle: *mut aiocb, + pub aio_reserved: [c_uint; 2], + pub aio_sigev_tid: c_long, + } + + pub struct __vmxreg_t { + __v: [c_uint; 4], + } + + pub struct __vmx_context_t { + pub __vr: [crate::__vmxreg_t; 32], + pub __pad1: [c_uint; 3], + pub __vscr: c_uint, + pub __vrsave: c_uint, + pub __pad2: [c_uint; 3], + } + + pub struct __vsx_context_t { + pub __vsr_dw1: [c_ulonglong; 32], + } + + pub struct __tm_context_t { + pub vmx: crate::__vmx_context_t, + pub vsx: crate::__vsx_context_t, + pub gpr: [c_ulonglong; 32], + pub lr: c_ulonglong, + pub ctr: c_ulonglong, + pub cr: c_uint, + pub xer: c_uint, + pub amr: c_ulonglong, + pub texasr: c_ulonglong, + pub tfiar: c_ulonglong, + pub tfhar: c_ulonglong, + pub ppr: c_ulonglong, + pub dscr: c_ulonglong, + pub tar: c_ulonglong, + pub fpscr: c_uint, + pub fpscrx: c_uint, + pub fpr: [fpreg_t; 32], + pub tmcontext: c_char, + pub tmstate: c_char, + pub prevowner: c_char, + pub pad: [c_char; 5], + } + + pub struct __context64 { + pub gpr: [c_ulonglong; 32], + pub msr: c_ulonglong, + pub iar: c_ulonglong, + pub lr: c_ulonglong, + pub ctr: c_ulonglong, + pub cr: c_uint, + pub xer: c_uint, + pub fpscr: c_uint, + pub fpscrx: c_uint, + pub except: [c_ulonglong; 1], + pub fpr: [fpreg_t; 32], + pub fpeu: c_char, + pub fpinfo: c_char, + pub fpscr24_31: c_char, + pub pad: [c_char; 1], + pub excp_type: c_int, + } + + pub struct mcontext_t { + pub jmp_context: __context64, + } + + pub struct __extctx_t { + pub __flags: c_uint, + pub __rsvd1: [c_uint; 3], + pub __vmx: crate::__vmx_context_t, + pub __ukeys: [c_uint; 2], + pub __vsx: crate::__vsx_context_t, + pub __tm: crate::__tm_context_t, + pub __reserved: [c_char; 1860], + pub __extctx_magic: c_int, + } + + pub struct ucontext_t { + pub __sc_onstack: c_int, + pub uc_sigmask: crate::sigset_t, + pub __sc_uerror: c_int, + pub uc_mcontext: crate::mcontext_t, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub __extctx: *mut crate::__extctx_t, + pub __extctx_magic: c_int, + pub __pad: [c_int; 1], + } + + pub struct utmpx { + pub ut_user: [c_char; 256], + pub ut_id: [c_char; 14], + pub ut_line: [c_char; 64], + pub ut_pid: crate::pid_t, + pub ut_type: c_short, + pub ut_tv: crate::timeval, + pub ut_host: [c_char; 256], + pub __dbl_word_pad: c_int, + pub __reservedA: [c_int; 2], + pub __reservedV: [c_int; 6], + } + + pub struct pthread_spinlock_t { + pub __sp_word: [c_long; 3], + } + + pub struct pthread_barrier_t { + pub __br_word: [c_long; 5], + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_first: c_uint, + pub msg_last: c_uint, + pub msg_cbytes: c_uint, + pub msg_qnum: c_uint, + pub msg_qbytes: c_ulong, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + pub msg_stime: crate::time_t, + pub msg_rtime: crate::time_t, + pub msg_ctime: crate::time_t, + pub msg_rwait: c_int, + pub msg_wwait: c_int, + pub msg_reqevents: c_ushort, + } +} + +s_no_extra_traits! { + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + pub si_pid: crate::pid_t, + pub si_uid: crate::uid_t, + pub si_status: c_int, + pub si_addr: *mut c_void, + pub si_band: c_long, + pub si_value: crate::sigval, + pub __si_flags: c_int, + pub __pad: [c_int; 3], + } + + pub union _kernel_simple_lock { + pub _slock: c_long, + pub _slockp: *mut lock_data_instrumented, + } + + pub struct fileops_t { + pub fo_rw: Option< + extern "C" fn( + file: *mut file, + rw: crate::uio_rw, + io: *mut c_void, + ext: c_long, + secattr: *mut c_void, + ) -> c_int, + >, + pub fo_ioctl: Option< + extern "C" fn( + file: *mut file, + a: c_long, + b: crate::caddr_t, + c: c_long, + d: c_long, + ) -> c_int, + >, + pub fo_select: Option< + extern "C" fn(file: *mut file, a: c_int, b: *mut c_ushort, c: extern "C" fn()) -> c_int, + >, + pub fo_close: Option c_int>, + pub fo_fstat: Option c_int>, + } + + pub struct file { + pub f_flag: c_long, + pub f_count: c_int, + pub f_options: c_short, + pub f_type: c_short, + // Should be pointer to 'vnode' + pub f_data: *mut c_void, + pub f_offset: c_longlong, + pub f_dir_off: c_long, + // Should be pointer to 'cred' + pub f_cred: *mut c_void, + pub f_lock: _kernel_simple_lock, + pub f_offset_lock: _kernel_simple_lock, + pub f_vinfo: crate::caddr_t, + pub f_ops: *mut fileops_t, + pub f_parentp: crate::caddr_t, + pub f_fnamep: crate::caddr_t, + pub f_fdata: [c_char; 160], + } + + pub union __ld_info_file { + pub _ldinfo_fd: c_int, + pub _ldinfo_fp: *mut file, + pub _core_offset: c_long, + } + + pub struct ld_info { + pub ldinfo_next: c_uint, + pub ldinfo_flags: c_uint, + pub _file: __ld_info_file, + pub ldinfo_textorg: *mut c_void, + pub ldinfo_textsize: c_ulong, + pub ldinfo_dataorg: *mut c_void, + pub ldinfo_datasize: c_ulong, + pub ldinfo_filename: [c_char; 2], + } + + pub union __pollfd_ext_u { + pub addr: *mut c_void, + pub data32: u32, + pub data: u64, + } + + pub struct pollfd_ext { + pub fd: c_int, + pub events: c_short, + pub revents: c_short, + pub data: __pollfd_ext_u, + } + + pub struct fpreg_t { + pub d: c_double, + } +} + +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut c_void { + self.si_addr + } + + pub unsafe fn si_value(&self) -> crate::sigval { + self.si_value + } + + pub unsafe fn si_pid(&self) -> crate::pid_t { + self.si_pid + } + + pub unsafe fn si_uid(&self) -> crate::uid_t { + self.si_uid + } + + pub unsafe fn si_status(&self) -> c_int { + self.si_status + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for siginfo_t { + fn eq(&self, other: &siginfo_t) -> bool { + self.si_signo == other.si_signo + && self.si_errno == other.si_errno + && self.si_code == other.si_code + && self.si_pid == other.si_pid + && self.si_uid == other.si_uid + && self.si_status == other.si_status + && self.si_addr == other.si_addr + && self.si_band == other.si_band + && self.__si_flags == other.__si_flags + && self.si_value == other.si_value + } + } + impl Eq for siginfo_t {} + impl hash::Hash for siginfo_t { + fn hash(&self, state: &mut H) { + self.si_signo.hash(state); + self.si_errno.hash(state); + self.si_code.hash(state); + self.si_pid.hash(state); + self.si_uid.hash(state); + self.si_status.hash(state); + self.si_addr.hash(state); + self.si_band.hash(state); + self.si_value.hash(state); + self.__si_flags.hash(state); + } + } + + impl PartialEq for __pollfd_ext_u { + fn eq(&self, other: &__pollfd_ext_u) -> bool { + unsafe { + self.addr == other.addr + && self.data32 == other.data32 + && self.data == other.data + } + } + } + impl Eq for __pollfd_ext_u {} + impl hash::Hash for __pollfd_ext_u { + fn hash(&self, state: &mut H) { + unsafe { + self.addr.hash(state); + self.data.hash(state); + self.data32.hash(state); + } + } + } + + impl PartialEq for pollfd_ext { + fn eq(&self, other: &pollfd_ext) -> bool { + self.fd == other.fd + && self.events == other.events + && self.revents == other.revents + && self.data == other.data + } + } + impl Eq for pollfd_ext {} + impl hash::Hash for pollfd_ext { + fn hash(&self, state: &mut H) { + self.fd.hash(state); + self.events.hash(state); + self.revents.hash(state); + self.data.hash(state); + } + } + impl PartialEq for fpreg_t { + fn eq(&self, other: &fpreg_t) -> bool { + self.d == other.d + } + } + + impl Eq for fpreg_t {} + + impl hash::Hash for fpreg_t { + fn hash(&self, state: &mut H) { + let d: u64 = unsafe { mem::transmute(self.d) }; + d.hash(state); + } + } + } +} + +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + __mt_word: [0, 2, 0, 0, 0, 0, 0, 0], +}; +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + __cv_word: [0, 0, 0, 0, 2, 0], +}; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + __rw_word: [2, 0, 0, 0, 0, 0, 0, 0, 0, 0], +}; + +pub const PTHREAD_ONCE_INIT: pthread_once_t = pthread_once_t { + __on_word: [0, 0, 0, 0, 0, 2, 0, 0, 0], +}; + +pub const RLIM_INFINITY: c_ulong = 0x7fffffffffffffff; + +extern "C" { + pub fn getsystemcfg(label: c_int) -> c_ulong; +} diff --git a/vendor/libc/src/unix/bsd/apple/b32/mod.rs b/vendor/libc/src/unix/bsd/apple/b32/mod.rs new file mode 100644 index 00000000000000..bd6762558f508e --- /dev/null +++ b/vendor/libc/src/unix/bsd/apple/b32/mod.rs @@ -0,0 +1,135 @@ +//! 32-bit specific Apple (ios/darwin) definitions + +use crate::prelude::*; + +pub type boolean_t = c_int; + +s! { + pub struct if_data { + pub ifi_type: c_uchar, + pub ifi_typelen: c_uchar, + pub ifi_physical: c_uchar, + pub ifi_addrlen: c_uchar, + pub ifi_hdrlen: c_uchar, + pub ifi_recvquota: c_uchar, + pub ifi_xmitquota: c_uchar, + pub ifi_unused1: c_uchar, + pub ifi_mtu: u32, + pub ifi_metric: u32, + pub ifi_baudrate: u32, + pub ifi_ipackets: u32, + pub ifi_ierrors: u32, + pub ifi_opackets: u32, + pub ifi_oerrors: u32, + pub ifi_collisions: u32, + pub ifi_ibytes: u32, + pub ifi_obytes: u32, + pub ifi_imcasts: u32, + pub ifi_omcasts: u32, + pub ifi_iqdrops: u32, + pub ifi_noproto: u32, + pub ifi_recvtiming: u32, + pub ifi_xmittiming: u32, + pub ifi_lastchange: crate::timeval, + pub ifi_unused2: u32, + pub ifi_hwassist: u32, + pub ifi_reserved1: u32, + pub ifi_reserved2: u32, + } + + pub struct bpf_hdr { + pub bh_tstamp: crate::timeval, + pub bh_caplen: u32, + pub bh_datalen: u32, + pub bh_hdrlen: c_ushort, + } + + pub struct malloc_zone_t { + __private: [crate::uintptr_t; 18], // FIXME(macos): keeping private for now + } +} + +s_no_extra_traits! { + pub struct pthread_attr_t { + __sig: c_long, + __opaque: [c_char; 36], + } + + pub struct pthread_once_t { + __sig: c_long, + __opaque: [c_char; crate::__PTHREAD_ONCE_SIZE__], + } + + #[repr(align(16))] + pub struct max_align_t { + priv_: [f64; 2], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for pthread_attr_t { + fn eq(&self, other: &pthread_attr_t) -> bool { + self.__sig == other.__sig + && self + .__opaque + .iter() + .zip(other.__opaque.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for pthread_attr_t {} + impl hash::Hash for pthread_attr_t { + fn hash(&self, state: &mut H) { + self.__sig.hash(state); + self.__opaque.hash(state); + } + } + impl PartialEq for pthread_once_t { + fn eq(&self, other: &pthread_once_t) -> bool { + self.__sig == other.__sig + && self + .__opaque + .iter() + .zip(other.__opaque.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for pthread_once_t {} + impl hash::Hash for pthread_once_t { + fn hash(&self, state: &mut H) { + self.__sig.hash(state); + self.__opaque.hash(state); + } + } + } +} + +#[doc(hidden)] +#[deprecated(since = "0.2.55")] +pub const NET_RT_MAXID: c_int = 10; + +pub const __PTHREAD_MUTEX_SIZE__: usize = 40; +pub const __PTHREAD_COND_SIZE__: usize = 24; +pub const __PTHREAD_CONDATTR_SIZE__: usize = 4; +pub const __PTHREAD_ONCE_SIZE__: usize = 4; +pub const __PTHREAD_RWLOCK_SIZE__: usize = 124; +pub const __PTHREAD_RWLOCKATTR_SIZE__: usize = 12; + +pub const TIOCTIMESTAMP: c_ulong = 0x40087459; +pub const TIOCDCDTIMESTAMP: c_ulong = 0x40087458; + +pub const BIOCSETF: c_ulong = 0x80084267; +pub const BIOCSRTIMEOUT: c_ulong = 0x8008426d; +pub const BIOCGRTIMEOUT: c_ulong = 0x4008426e; +pub const BIOCSETFNR: c_ulong = 0x8008427e; + +const _PTHREAD_ONCE_SIG_INIT: c_long = 0x30B1BCBA; +pub const PTHREAD_ONCE_INIT: crate::pthread_once_t = crate::pthread_once_t { + __sig: _PTHREAD_ONCE_SIG_INIT, + __opaque: [0; 4], +}; + +extern "C" { + pub fn exchangedata(path1: *const c_char, path2: *const c_char, options: c_ulong) -> c_int; +} diff --git a/vendor/libc/src/unix/bsd/apple/b64/aarch64/mod.rs b/vendor/libc/src/unix/bsd/apple/b64/aarch64/mod.rs new file mode 100644 index 00000000000000..a13013c09b03b2 --- /dev/null +++ b/vendor/libc/src/unix/bsd/apple/b64/aarch64/mod.rs @@ -0,0 +1,53 @@ +use crate::prelude::*; + +pub type boolean_t = c_int; +pub type mcontext_t = *mut __darwin_mcontext64; + +s! { + pub struct malloc_zone_t { + __private: [crate::uintptr_t; 18], // FIXME(macos): needs arm64 auth pointers support + } + + pub struct ucontext_t { + pub uc_onstack: c_int, + pub uc_sigmask: crate::sigset_t, + pub uc_stack: crate::stack_t, + pub uc_link: *mut crate::ucontext_t, + pub uc_mcsize: usize, + pub uc_mcontext: mcontext_t, + } + + pub struct __darwin_mcontext64 { + pub __es: __darwin_arm_exception_state64, + pub __ss: __darwin_arm_thread_state64, + pub __ns: __darwin_arm_neon_state64, + } + + pub struct __darwin_arm_exception_state64 { + pub __far: u64, + pub __esr: u32, + pub __exception: u32, + } + + pub struct __darwin_arm_thread_state64 { + pub __x: [u64; 29], + pub __fp: u64, + pub __lr: u64, + pub __sp: u64, + pub __pc: u64, + pub __cpsr: u32, + pub __pad: u32, + } + + pub struct __darwin_arm_neon_state64 { + pub __v: [crate::__uint128_t; 32], + pub __fpsr: u32, + pub __fpcr: u32, + } +} + +s_no_extra_traits! { + pub struct max_align_t { + priv_: f64, + } +} diff --git a/vendor/libc/src/unix/bsd/apple/b64/mod.rs b/vendor/libc/src/unix/bsd/apple/b64/mod.rs new file mode 100644 index 00000000000000..34743464a44e76 --- /dev/null +++ b/vendor/libc/src/unix/bsd/apple/b64/mod.rs @@ -0,0 +1,141 @@ +//! 64-bit specific Apple (ios/darwin) definitions + +use crate::prelude::*; + +s! { + pub struct timeval32 { + pub tv_sec: i32, + pub tv_usec: i32, + } + + pub struct if_data { + pub ifi_type: c_uchar, + pub ifi_typelen: c_uchar, + pub ifi_physical: c_uchar, + pub ifi_addrlen: c_uchar, + pub ifi_hdrlen: c_uchar, + pub ifi_recvquota: c_uchar, + pub ifi_xmitquota: c_uchar, + pub ifi_unused1: c_uchar, + pub ifi_mtu: u32, + pub ifi_metric: u32, + pub ifi_baudrate: u32, + pub ifi_ipackets: u32, + pub ifi_ierrors: u32, + pub ifi_opackets: u32, + pub ifi_oerrors: u32, + pub ifi_collisions: u32, + pub ifi_ibytes: u32, + pub ifi_obytes: u32, + pub ifi_imcasts: u32, + pub ifi_omcasts: u32, + pub ifi_iqdrops: u32, + pub ifi_noproto: u32, + pub ifi_recvtiming: u32, + pub ifi_xmittiming: u32, + pub ifi_lastchange: timeval32, + pub ifi_unused2: u32, + pub ifi_hwassist: u32, + pub ifi_reserved1: u32, + pub ifi_reserved2: u32, + } + + pub struct bpf_hdr { + pub bh_tstamp: crate::timeval32, + pub bh_caplen: u32, + pub bh_datalen: u32, + pub bh_hdrlen: c_ushort, + } +} + +s_no_extra_traits! { + pub struct pthread_attr_t { + __sig: c_long, + __opaque: [c_char; 56], + } + + pub struct pthread_once_t { + __sig: c_long, + __opaque: [c_char; __PTHREAD_ONCE_SIZE__], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for pthread_attr_t { + fn eq(&self, other: &pthread_attr_t) -> bool { + self.__sig == other.__sig + && self + .__opaque + .iter() + .zip(other.__opaque.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for pthread_attr_t {} + impl hash::Hash for pthread_attr_t { + fn hash(&self, state: &mut H) { + self.__sig.hash(state); + self.__opaque.hash(state); + } + } + impl PartialEq for pthread_once_t { + fn eq(&self, other: &pthread_once_t) -> bool { + self.__sig == other.__sig + && self + .__opaque + .iter() + .zip(other.__opaque.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for pthread_once_t {} + impl hash::Hash for pthread_once_t { + fn hash(&self, state: &mut H) { + self.__sig.hash(state); + self.__opaque.hash(state); + } + } + } +} + +#[doc(hidden)] +#[deprecated(since = "0.2.55")] +pub const NET_RT_MAXID: c_int = 11; + +pub const __PTHREAD_MUTEX_SIZE__: usize = 56; +pub const __PTHREAD_COND_SIZE__: usize = 40; +pub const __PTHREAD_CONDATTR_SIZE__: usize = 8; +pub const __PTHREAD_ONCE_SIZE__: usize = 8; +pub const __PTHREAD_RWLOCK_SIZE__: usize = 192; +pub const __PTHREAD_RWLOCKATTR_SIZE__: usize = 16; + +pub const TIOCTIMESTAMP: c_ulong = 0x40107459; +pub const TIOCDCDTIMESTAMP: c_ulong = 0x40107458; + +pub const BIOCSETF: c_ulong = 0x80104267; +pub const BIOCSRTIMEOUT: c_ulong = 0x8010426d; +pub const BIOCGRTIMEOUT: c_ulong = 0x4010426e; +pub const BIOCSETFNR: c_ulong = 0x8010427e; + +const _PTHREAD_ONCE_SIG_INIT: c_long = 0x30B1BCBA; +pub const PTHREAD_ONCE_INIT: crate::pthread_once_t = crate::pthread_once_t { + __sig: _PTHREAD_ONCE_SIG_INIT, + __opaque: [0; 8], +}; + +extern "C" { + pub fn exchangedata(path1: *const c_char, path2: *const c_char, options: c_uint) -> c_int; +} + +cfg_if! { + if #[cfg(target_arch = "x86_64")] { + mod x86_64; + pub use self::x86_64::*; + } else if #[cfg(target_arch = "aarch64")] { + mod aarch64; + pub use self::aarch64::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/unix/bsd/apple/b64/x86_64/mod.rs b/vendor/libc/src/unix/bsd/apple/b64/x86_64/mod.rs new file mode 100644 index 00000000000000..5365becf66c3e7 --- /dev/null +++ b/vendor/libc/src/unix/bsd/apple/b64/x86_64/mod.rs @@ -0,0 +1,179 @@ +use crate::prelude::*; + +pub type boolean_t = c_uint; +pub type mcontext_t = *mut __darwin_mcontext64; + +s! { + pub struct ucontext_t { + pub uc_onstack: c_int, + pub uc_sigmask: crate::sigset_t, + pub uc_stack: crate::stack_t, + pub uc_link: *mut crate::ucontext_t, + pub uc_mcsize: usize, + pub uc_mcontext: mcontext_t, + } + + pub struct __darwin_mcontext64 { + pub __es: __darwin_x86_exception_state64, + pub __ss: __darwin_x86_thread_state64, + pub __fs: __darwin_x86_float_state64, + } + + pub struct __darwin_x86_exception_state64 { + pub __trapno: u16, + pub __cpu: u16, + pub __err: u32, + pub __faultvaddr: u64, + } + + pub struct __darwin_x86_thread_state64 { + pub __rax: u64, + pub __rbx: u64, + pub __rcx: u64, + pub __rdx: u64, + pub __rdi: u64, + pub __rsi: u64, + pub __rbp: u64, + pub __rsp: u64, + pub __r8: u64, + pub __r9: u64, + pub __r10: u64, + pub __r11: u64, + pub __r12: u64, + pub __r13: u64, + pub __r14: u64, + pub __r15: u64, + pub __rip: u64, + pub __rflags: u64, + pub __cs: u64, + pub __fs: u64, + pub __gs: u64, + } + + pub struct __darwin_x86_float_state64 { + pub __fpu_reserved: [c_int; 2], + __fpu_fcw: c_short, + __fpu_fsw: c_short, + pub __fpu_ftw: u8, + pub __fpu_rsrv1: u8, + pub __fpu_fop: u16, + pub __fpu_ip: u32, + pub __fpu_cs: u16, + pub __fpu_rsrv2: u16, + pub __fpu_dp: u32, + pub __fpu_ds: u16, + pub __fpu_rsrv3: u16, + pub __fpu_mxcsr: u32, + pub __fpu_mxcsrmask: u32, + pub __fpu_stmm0: __darwin_mmst_reg, + pub __fpu_stmm1: __darwin_mmst_reg, + pub __fpu_stmm2: __darwin_mmst_reg, + pub __fpu_stmm3: __darwin_mmst_reg, + pub __fpu_stmm4: __darwin_mmst_reg, + pub __fpu_stmm5: __darwin_mmst_reg, + pub __fpu_stmm6: __darwin_mmst_reg, + pub __fpu_stmm7: __darwin_mmst_reg, + pub __fpu_xmm0: __darwin_xmm_reg, + pub __fpu_xmm1: __darwin_xmm_reg, + pub __fpu_xmm2: __darwin_xmm_reg, + pub __fpu_xmm3: __darwin_xmm_reg, + pub __fpu_xmm4: __darwin_xmm_reg, + pub __fpu_xmm5: __darwin_xmm_reg, + pub __fpu_xmm6: __darwin_xmm_reg, + pub __fpu_xmm7: __darwin_xmm_reg, + pub __fpu_xmm8: __darwin_xmm_reg, + pub __fpu_xmm9: __darwin_xmm_reg, + pub __fpu_xmm10: __darwin_xmm_reg, + pub __fpu_xmm11: __darwin_xmm_reg, + pub __fpu_xmm12: __darwin_xmm_reg, + pub __fpu_xmm13: __darwin_xmm_reg, + pub __fpu_xmm14: __darwin_xmm_reg, + pub __fpu_xmm15: __darwin_xmm_reg, + // this field is actually [u8; 96], but defining it with a bigger type + // allows us to auto-implement traits for it since the length of the + // array is less than 32 + __fpu_rsrv4: [u32; 24], + pub __fpu_reserved1: c_int, + } + + pub struct __darwin_mmst_reg { + pub __mmst_reg: [c_char; 10], + pub __mmst_rsrv: [c_char; 6], + } + + pub struct __darwin_xmm_reg { + pub __xmm_reg: [c_char; 16], + } + + pub struct malloc_introspection_t { + _private: [crate::uintptr_t; 16], // FIXME(macos): keeping private for now + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct malloc_zone_t { + _reserved1: *mut c_void, + _reserved2: *mut c_void, + pub size: + Option size_t>, + pub malloc: + Option *mut c_void>, + pub calloc: Option< + unsafe extern "C" fn( + zone: *mut malloc_zone_t, + num_items: size_t, + size: size_t, + ) -> *mut c_void, + >, + pub valloc: + Option *mut c_void>, + pub free: Option, + pub realloc: Option< + unsafe extern "C" fn( + zone: *mut malloc_zone_t, + ptr: *mut c_void, + size: size_t, + ) -> *mut c_void, + >, + pub destroy: Option, + pub zone_name: *const c_char, + pub batch_malloc: Option< + unsafe extern "C" fn( + zone: *mut malloc_zone_t, + size: size_t, + results: *mut *mut c_void, + num_requested: c_uint, + ) -> c_uint, + >, + pub batch_free: Option< + unsafe extern "C" fn( + zone: *mut malloc_zone_t, + to_be_freed: *mut *mut c_void, + num_to_be_freed: c_uint, + ), + >, + pub introspect: *mut malloc_introspection_t, + pub version: c_uint, + pub memalign: Option< + unsafe extern "C" fn( + zone: *mut malloc_zone_t, + alignment: size_t, + size: size_t, + ) -> *mut c_void, + >, + pub free_definite_size: + Option, + pub pressure_relief: + Option size_t>, + pub claimed_address: Option< + unsafe extern "C" fn(zone: *mut malloc_zone_t, ptr: *mut c_void) -> crate::boolean_t, + >, + } +} + +s_no_extra_traits! { + #[repr(align(16))] + pub struct max_align_t { + priv_: [f64; 2], + } +} diff --git a/vendor/libc/src/unix/bsd/apple/mod.rs b/vendor/libc/src/unix/bsd/apple/mod.rs new file mode 100644 index 00000000000000..857508f794ad1c --- /dev/null +++ b/vendor/libc/src/unix/bsd/apple/mod.rs @@ -0,0 +1,6245 @@ +//! Apple (ios/darwin)-specific definitions +//! +//! This covers *-apple-* triples currently + +use crate::prelude::*; +use crate::{cmsghdr, off_t}; + +pub type wchar_t = i32; +pub type clock_t = c_ulong; +pub type time_t = c_long; +pub type suseconds_t = i32; +pub type dev_t = i32; +pub type ino_t = u64; +pub type mode_t = u16; +pub type nlink_t = u16; +pub type blksize_t = i32; +pub type rlim_t = u64; +pub type pthread_key_t = c_ulong; +pub type sigset_t = u32; +pub type clockid_t = c_uint; +pub type fsblkcnt_t = c_uint; +pub type fsfilcnt_t = c_uint; +pub type speed_t = c_ulong; +pub type tcflag_t = c_ulong; +pub type nl_item = c_int; +pub type id_t = c_uint; +pub type sem_t = c_int; +pub type idtype_t = c_uint; +pub type integer_t = c_int; +pub type cpu_type_t = integer_t; +pub type cpu_subtype_t = integer_t; +pub type natural_t = u32; +pub type mach_msg_type_number_t = natural_t; +pub type kern_return_t = c_int; +pub type uuid_t = [u8; 16]; +pub type task_info_t = *mut integer_t; +pub type host_info_t = *mut integer_t; +pub type task_flavor_t = natural_t; +pub type rusage_info_t = *mut c_void; +pub type vm_offset_t = crate::uintptr_t; +pub type vm_size_t = crate::uintptr_t; +pub type vm_address_t = vm_offset_t; +pub type quad_t = i64; +pub type u_quad_t = u64; + +pub type posix_spawnattr_t = *mut c_void; +pub type posix_spawn_file_actions_t = *mut c_void; +pub type key_t = c_int; +pub type shmatt_t = c_ushort; + +pub type sae_associd_t = u32; +pub type sae_connid_t = u32; + +pub type mach_port_t = c_uint; +pub type host_t = c_uint; +pub type host_flavor_t = integer_t; +pub type host_info64_t = *mut integer_t; +pub type processor_flavor_t = c_int; +pub type thread_flavor_t = natural_t; +pub type thread_inspect_t = crate::mach_port_t; +pub type thread_act_t = crate::mach_port_t; +pub type thread_act_array_t = *mut crate::thread_act_t; +pub type policy_t = c_int; +pub type mach_error_t = crate::kern_return_t; +pub type mach_vm_address_t = u64; +pub type mach_vm_offset_t = u64; +pub type mach_vm_size_t = u64; +pub type vm_map_t = crate::mach_port_t; +pub type mem_entry_name_port_t = crate::mach_port_t; +pub type memory_object_t = crate::mach_port_t; +pub type memory_object_offset_t = c_ulonglong; +pub type vm_inherit_t = c_uint; +pub type vm_prot_t = c_int; + +pub type ledger_t = crate::mach_port_t; +pub type ledger_array_t = *mut crate::ledger_t; + +pub type iconv_t = *mut c_void; + +// mach/host_info.h +pub type host_cpu_load_info_t = *mut host_cpu_load_info; +pub type host_cpu_load_info_data_t = host_cpu_load_info; + +// mach/processor_info.h +pub type processor_cpu_load_info_t = *mut processor_cpu_load_info; +pub type processor_cpu_load_info_data_t = processor_cpu_load_info; +pub type processor_basic_info_t = *mut processor_basic_info; +pub type processor_basic_info_data_t = processor_basic_info; +pub type processor_set_basic_info_data_t = processor_set_basic_info; +pub type processor_set_basic_info_t = *mut processor_set_basic_info; +pub type processor_set_load_info_data_t = processor_set_load_info; +pub type processor_set_load_info_t = *mut processor_set_load_info; +pub type processor_info_t = *mut integer_t; +pub type processor_info_array_t = *mut integer_t; + +pub type mach_task_basic_info_data_t = mach_task_basic_info; +pub type mach_task_basic_info_t = *mut mach_task_basic_info; +pub type task_thread_times_info_data_t = task_thread_times_info; +pub type task_thread_times_info_t = *mut task_thread_times_info; + +pub type thread_info_t = *mut integer_t; +pub type thread_basic_info_t = *mut thread_basic_info; +pub type thread_basic_info_data_t = thread_basic_info; +pub type thread_identifier_info_t = *mut thread_identifier_info; +pub type thread_identifier_info_data_t = thread_identifier_info; +pub type thread_extended_info_t = *mut thread_extended_info; +pub type thread_extended_info_data_t = thread_extended_info; + +pub type thread_t = crate::mach_port_t; +pub type thread_policy_flavor_t = natural_t; +pub type thread_policy_t = *mut integer_t; +pub type thread_latency_qos_t = integer_t; +pub type thread_throughput_qos_t = integer_t; +pub type thread_standard_policy_data_t = thread_standard_policy; +pub type thread_standard_policy_t = *mut thread_standard_policy; +pub type thread_extended_policy_data_t = thread_extended_policy; +pub type thread_extended_policy_t = *mut thread_extended_policy; +pub type thread_time_constraint_policy_data_t = thread_time_constraint_policy; +pub type thread_time_constraint_policy_t = *mut thread_time_constraint_policy; +pub type thread_precedence_policy_data_t = thread_precedence_policy; +pub type thread_precedence_policy_t = *mut thread_precedence_policy; +pub type thread_affinity_policy_data_t = thread_affinity_policy; +pub type thread_affinity_policy_t = *mut thread_affinity_policy; +pub type thread_background_policy_data_t = thread_background_policy; +pub type thread_background_policy_t = *mut thread_background_policy; +pub type thread_latency_qos_policy_data_t = thread_latency_qos_policy; +pub type thread_latency_qos_policy_t = *mut thread_latency_qos_policy; +pub type thread_throughput_qos_policy_data_t = thread_throughput_qos_policy; +pub type thread_throughput_qos_policy_t = *mut thread_throughput_qos_policy; + +pub type pthread_introspection_hook_t = + extern "C" fn(event: c_uint, thread: crate::pthread_t, addr: *mut c_void, size: size_t); +pub type pthread_jit_write_callback_t = Option c_int>; + +pub type os_clockid_t = u32; + +pub type os_sync_wait_on_address_flags_t = u32; +pub type os_sync_wake_by_address_flags_t = u32; + +pub type os_unfair_lock = os_unfair_lock_s; +pub type os_unfair_lock_t = *mut os_unfair_lock; + +pub type os_log_t = *mut c_void; +pub type os_log_type_t = u8; +pub type os_signpost_id_t = u64; +pub type os_signpost_type_t = u8; + +pub type vm_statistics_t = *mut vm_statistics; +pub type vm_statistics_data_t = vm_statistics; +pub type vm_statistics64_t = *mut vm_statistics64; +pub type vm_statistics64_data_t = vm_statistics64; + +pub type task_t = crate::mach_port_t; +pub type task_inspect_t = crate::mach_port_t; + +pub type sysdir_search_path_enumeration_state = c_uint; + +pub type CCStatus = i32; +pub type CCCryptorStatus = i32; +pub type CCRNGStatus = crate::CCCryptorStatus; + +pub type copyfile_state_t = *mut c_void; +pub type copyfile_flags_t = u32; +pub type copyfile_callback_t = Option< + extern "C" fn( + c_int, + c_int, + copyfile_state_t, + *const c_char, + *const c_char, + *mut c_void, + ) -> c_int, +>; + +pub type attrgroup_t = u32; +pub type vol_capabilities_set_t = [u32; 4]; + +deprecated_mach! { + pub type mach_timebase_info_data_t = mach_timebase_info; +} + +#[derive(Debug)] +pub enum timezone {} +impl Copy for timezone {} +impl Clone for timezone { + fn clone(&self) -> timezone { + *self + } +} + +#[derive(Debug)] +#[repr(u32)] +pub enum qos_class_t { + QOS_CLASS_USER_INTERACTIVE = 0x21, + QOS_CLASS_USER_INITIATED = 0x19, + QOS_CLASS_DEFAULT = 0x15, + QOS_CLASS_UTILITY = 0x11, + QOS_CLASS_BACKGROUND = 0x09, + QOS_CLASS_UNSPECIFIED = 0x00, +} +impl Copy for qos_class_t {} +impl Clone for qos_class_t { + fn clone(&self) -> qos_class_t { + *self + } +} + +#[derive(Debug)] +#[repr(u32)] +pub enum sysdir_search_path_directory_t { + SYSDIR_DIRECTORY_APPLICATION = 1, + SYSDIR_DIRECTORY_DEMO_APPLICATION = 2, + SYSDIR_DIRECTORY_DEVELOPER_APPLICATION = 3, + SYSDIR_DIRECTORY_ADMIN_APPLICATION = 4, + SYSDIR_DIRECTORY_LIBRARY = 5, + SYSDIR_DIRECTORY_DEVELOPER = 6, + SYSDIR_DIRECTORY_USER = 7, + SYSDIR_DIRECTORY_DOCUMENTATION = 8, + SYSDIR_DIRECTORY_DOCUMENT = 9, + SYSDIR_DIRECTORY_CORESERVICE = 10, + SYSDIR_DIRECTORY_AUTOSAVED_INFORMATION = 11, + SYSDIR_DIRECTORY_DESKTOP = 12, + SYSDIR_DIRECTORY_CACHES = 13, + SYSDIR_DIRECTORY_APPLICATION_SUPPORT = 14, + SYSDIR_DIRECTORY_DOWNLOADS = 15, + SYSDIR_DIRECTORY_INPUT_METHODS = 16, + SYSDIR_DIRECTORY_MOVIES = 17, + SYSDIR_DIRECTORY_MUSIC = 18, + SYSDIR_DIRECTORY_PICTURES = 19, + SYSDIR_DIRECTORY_PRINTER_DESCRIPTION = 20, + SYSDIR_DIRECTORY_SHARED_PUBLIC = 21, + SYSDIR_DIRECTORY_PREFERENCE_PANES = 22, + SYSDIR_DIRECTORY_ALL_APPLICATIONS = 100, + SYSDIR_DIRECTORY_ALL_LIBRARIES = 101, +} +impl Copy for sysdir_search_path_directory_t {} +impl Clone for sysdir_search_path_directory_t { + fn clone(&self) -> sysdir_search_path_directory_t { + *self + } +} + +#[derive(Debug)] +#[repr(u32)] +pub enum sysdir_search_path_domain_mask_t { + SYSDIR_DOMAIN_MASK_USER = (1 << 0), + SYSDIR_DOMAIN_MASK_LOCAL = (1 << 1), + SYSDIR_DOMAIN_MASK_NETWORK = (1 << 2), + SYSDIR_DOMAIN_MASK_SYSTEM = (1 << 3), + SYSDIR_DOMAIN_MASK_ALL = 0x0ffff, +} +impl Copy for sysdir_search_path_domain_mask_t {} +impl Clone for sysdir_search_path_domain_mask_t { + fn clone(&self) -> sysdir_search_path_domain_mask_t { + *self + } +} + +s! { + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct ip_mreqn { + pub imr_multiaddr: in_addr, + pub imr_address: in_addr, + pub imr_ifindex: c_int, + } + + pub struct ip_mreq_source { + pub imr_multiaddr: in_addr, + pub imr_sourceaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct aiocb { + pub aio_fildes: c_int, + pub aio_offset: off_t, + pub aio_buf: *mut c_void, + pub aio_nbytes: size_t, + pub aio_reqprio: c_int, + pub aio_sigevent: sigevent, + pub aio_lio_opcode: c_int, + } + + pub struct glob_t { + pub gl_pathc: size_t, + __unused1: c_int, + pub gl_offs: size_t, + __unused2: c_int, + pub gl_pathv: *mut *mut c_char, + + __unused3: *mut c_void, + + __unused4: *mut c_void, + __unused5: *mut c_void, + __unused6: *mut c_void, + __unused7: *mut c_void, + __unused8: *mut c_void, + } + + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: crate::socklen_t, + pub ai_canonname: *mut c_char, + pub ai_addr: *mut crate::sockaddr, + pub ai_next: *mut addrinfo, + } + + #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] + pub struct mach_timebase_info { + pub numer: u32, + pub denom: u32, + } + + pub struct stat { + pub st_dev: dev_t, + pub st_mode: mode_t, + pub st_nlink: nlink_t, + pub st_ino: ino_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: dev_t, + pub st_atime: time_t, + pub st_atime_nsec: c_long, + pub st_mtime: time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: time_t, + pub st_ctime_nsec: c_long, + pub st_birthtime: time_t, + pub st_birthtime_nsec: c_long, + pub st_size: off_t, + pub st_blocks: crate::blkcnt_t, + pub st_blksize: blksize_t, + pub st_flags: u32, + pub st_gen: u32, + pub st_lspare: i32, + pub st_qspare: [i64; 2], + } + + pub struct pthread_mutexattr_t { + __sig: c_long, + __opaque: [u8; 8], + } + + pub struct pthread_condattr_t { + __sig: c_long, + __opaque: [u8; __PTHREAD_CONDATTR_SIZE__], + } + + pub struct pthread_rwlockattr_t { + __sig: c_long, + __opaque: [u8; __PTHREAD_RWLOCKATTR_SIZE__], + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + pub si_pid: crate::pid_t, + pub si_uid: crate::uid_t, + pub si_status: c_int, + pub si_addr: *mut c_void, + //Requires it to be union for tests + //pub si_value: crate::sigval, + _pad: [usize; 9], + } + + pub struct sigaction { + // FIXME(union): this field is actually a union + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: sigset_t, + pub sa_flags: c_int, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } + + pub struct fstore_t { + pub fst_flags: c_uint, + pub fst_posmode: c_int, + pub fst_offset: off_t, + pub fst_length: off_t, + pub fst_bytesalloc: off_t, + } + + pub struct fpunchhole_t { + pub fp_flags: c_uint, /* unused */ + pub reserved: c_uint, /* (to maintain 8-byte alignment) */ + pub fp_offset: off_t, /* IN: start of the region */ + pub fp_length: off_t, /* IN: size of the region */ + } + + pub struct ftrimactivefile_t { + pub fta_offset: off_t, + pub fta_length: off_t, + } + + pub struct fspecread_t { + pub fsr_flags: c_uint, + pub reserved: c_uint, + pub fsr_offset: off_t, + pub fsr_length: off_t, + } + + pub struct radvisory { + pub ra_offset: off_t, + pub ra_count: c_int, + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + } + + pub struct Dl_info { + pub dli_fname: *const c_char, + pub dli_fbase: *mut c_void, + pub dli_sname: *const c_char, + pub dli_saddr: *mut c_void, + } + + pub struct sockaddr_in { + pub sin_len: u8, + pub sin_family: crate::sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [c_char; 8], + } + + pub struct kevent64_s { + pub ident: u64, + pub filter: i16, + pub flags: u16, + pub fflags: u32, + pub data: i64, + pub udata: u64, + pub ext: [u64; 2], + } + + pub struct dqblk { + pub dqb_bhardlimit: u64, + pub dqb_bsoftlimit: u64, + pub dqb_curbytes: u64, + pub dqb_ihardlimit: u32, + pub dqb_isoftlimit: u32, + pub dqb_curinodes: u32, + pub dqb_btime: u32, + pub dqb_itime: u32, + pub dqb_id: u32, + pub dqb_spare: [u32; 4], + } + + pub struct if_msghdr { + pub ifm_msglen: c_ushort, + pub ifm_version: c_uchar, + pub ifm_type: c_uchar, + pub ifm_addrs: c_int, + pub ifm_flags: c_int, + pub ifm_index: c_ushort, + pub ifm_data: if_data, + } + + pub struct ifa_msghdr { + pub ifam_msglen: c_ushort, + pub ifam_version: c_uchar, + pub ifam_type: c_uchar, + pub ifam_addrs: c_int, + pub ifam_flags: c_int, + pub ifam_index: c_ushort, + pub ifam_metric: c_int, + } + + pub struct ifma_msghdr { + pub ifmam_msglen: c_ushort, + pub ifmam_version: c_uchar, + pub ifmam_type: c_uchar, + pub ifmam_addrs: c_int, + pub ifmam_flags: c_int, + pub ifmam_index: c_ushort, + } + + pub struct ifma_msghdr2 { + pub ifmam_msglen: c_ushort, + pub ifmam_version: c_uchar, + pub ifmam_type: c_uchar, + pub ifmam_addrs: c_int, + pub ifmam_flags: c_int, + pub ifmam_index: c_ushort, + pub ifmam_refcount: i32, + } + + pub struct rt_metrics { + pub rmx_locks: u32, + pub rmx_mtu: u32, + pub rmx_hopcount: u32, + pub rmx_expire: i32, + pub rmx_recvpipe: u32, + pub rmx_sendpipe: u32, + pub rmx_ssthresh: u32, + pub rmx_rtt: u32, + pub rmx_rttvar: u32, + pub rmx_pksent: u32, + /// This field does not exist anymore, the u32 is now part of a resized + /// `rmx_filler` array. + pub rmx_state: u32, + pub rmx_filler: [u32; 3], + } + + pub struct rt_msghdr { + pub rtm_msglen: c_ushort, + pub rtm_version: c_uchar, + pub rtm_type: c_uchar, + pub rtm_index: c_ushort, + pub rtm_flags: c_int, + pub rtm_addrs: c_int, + pub rtm_pid: crate::pid_t, + pub rtm_seq: c_int, + pub rtm_errno: c_int, + pub rtm_use: c_int, + pub rtm_inits: u32, + pub rtm_rmx: rt_metrics, + } + + pub struct rt_msghdr2 { + pub rtm_msglen: c_ushort, + pub rtm_version: c_uchar, + pub rtm_type: c_uchar, + pub rtm_index: c_ushort, + pub rtm_flags: c_int, + pub rtm_addrs: c_int, + pub rtm_refcnt: i32, + pub rtm_parentflags: c_int, + pub rtm_reserved: c_int, + pub rtm_use: c_int, + pub rtm_inits: u32, + pub rtm_rmx: rt_metrics, + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_cc: [crate::cc_t; crate::NCCS], + pub c_ispeed: crate::speed_t, + pub c_ospeed: crate::speed_t, + } + + pub struct flock { + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + pub l_type: c_short, + pub l_whence: c_short, + } + + pub struct sf_hdtr { + pub headers: *mut crate::iovec, + pub hdr_cnt: c_int, + pub trailers: *mut crate::iovec, + pub trl_cnt: c_int, + } + + pub struct lconv { + pub decimal_point: *mut c_char, + pub thousands_sep: *mut c_char, + pub grouping: *mut c_char, + pub int_curr_symbol: *mut c_char, + pub currency_symbol: *mut c_char, + pub mon_decimal_point: *mut c_char, + pub mon_thousands_sep: *mut c_char, + pub mon_grouping: *mut c_char, + pub positive_sign: *mut c_char, + pub negative_sign: *mut c_char, + pub int_frac_digits: c_char, + pub frac_digits: c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub p_sign_posn: c_char, + pub n_sign_posn: c_char, + pub int_p_cs_precedes: c_char, + pub int_n_cs_precedes: c_char, + pub int_p_sep_by_space: c_char, + pub int_n_sep_by_space: c_char, + pub int_p_sign_posn: c_char, + pub int_n_sign_posn: c_char, + } + + pub struct proc_taskinfo { + pub pti_virtual_size: u64, + pub pti_resident_size: u64, + pub pti_total_user: u64, + pub pti_total_system: u64, + pub pti_threads_user: u64, + pub pti_threads_system: u64, + pub pti_policy: i32, + pub pti_faults: i32, + pub pti_pageins: i32, + pub pti_cow_faults: i32, + pub pti_messages_sent: i32, + pub pti_messages_received: i32, + pub pti_syscalls_mach: i32, + pub pti_syscalls_unix: i32, + pub pti_csw: i32, + pub pti_threadnum: i32, + pub pti_numrunning: i32, + pub pti_priority: i32, + } + + pub struct proc_bsdinfo { + pub pbi_flags: u32, + pub pbi_status: u32, + pub pbi_xstatus: u32, + pub pbi_pid: u32, + pub pbi_ppid: u32, + pub pbi_uid: crate::uid_t, + pub pbi_gid: crate::gid_t, + pub pbi_ruid: crate::uid_t, + pub pbi_rgid: crate::gid_t, + pub pbi_svuid: crate::uid_t, + pub pbi_svgid: crate::gid_t, + pub rfu_1: u32, + pub pbi_comm: [c_char; MAXCOMLEN], + pub pbi_name: [c_char; 32], // MAXCOMLEN * 2, but macro isn't happy... + pub pbi_nfiles: u32, + pub pbi_pgid: u32, + pub pbi_pjobc: u32, + pub e_tdev: u32, + pub e_tpgid: u32, + pub pbi_nice: i32, + pub pbi_start_tvsec: u64, + pub pbi_start_tvusec: u64, + } + + pub struct proc_taskallinfo { + pub pbsd: proc_bsdinfo, + pub ptinfo: proc_taskinfo, + } + + pub struct xsw_usage { + pub xsu_total: u64, + pub xsu_avail: u64, + pub xsu_used: u64, + pub xsu_pagesize: u32, + pub xsu_encrypted: crate::boolean_t, + } + + pub struct xucred { + pub cr_version: c_uint, + pub cr_uid: crate::uid_t, + pub cr_ngroups: c_short, + pub cr_groups: [crate::gid_t; 16], + } + + #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] + pub struct mach_header { + pub magic: u32, + pub cputype: cpu_type_t, + pub cpusubtype: cpu_subtype_t, + pub filetype: u32, + pub ncmds: u32, + pub sizeofcmds: u32, + pub flags: u32, + } + + #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] + pub struct mach_header_64 { + pub magic: u32, + pub cputype: cpu_type_t, + pub cpusubtype: cpu_subtype_t, + pub filetype: u32, + pub ncmds: u32, + pub sizeofcmds: u32, + pub flags: u32, + pub reserved: u32, + } + + pub struct segment_command { + pub cmd: u32, + pub cmdsize: u32, + pub segname: [c_char; 16], + pub vmaddr: u32, + pub vmsize: u32, + pub fileoff: u32, + pub filesize: u32, + pub maxprot: vm_prot_t, + pub initprot: vm_prot_t, + pub nsects: u32, + pub flags: u32, + } + + pub struct segment_command_64 { + pub cmd: u32, + pub cmdsize: u32, + pub segname: [c_char; 16], + pub vmaddr: u64, + pub vmsize: u64, + pub fileoff: u64, + pub filesize: u64, + pub maxprot: vm_prot_t, + pub initprot: vm_prot_t, + pub nsects: u32, + pub flags: u32, + } + + pub struct load_command { + pub cmd: u32, + pub cmdsize: u32, + } + + pub struct sockaddr_dl { + pub sdl_len: c_uchar, + pub sdl_family: c_uchar, + pub sdl_index: c_ushort, + pub sdl_type: c_uchar, + pub sdl_nlen: c_uchar, + pub sdl_alen: c_uchar, + pub sdl_slen: c_uchar, + pub sdl_data: [c_char; 12], + } + + pub struct sockaddr_inarp { + pub sin_len: c_uchar, + pub sin_family: c_uchar, + pub sin_port: c_ushort, + pub sin_addr: crate::in_addr, + pub sin_srcaddr: crate::in_addr, + pub sin_tos: c_ushort, + pub sin_other: c_ushort, + } + + pub struct sockaddr_ctl { + pub sc_len: c_uchar, + pub sc_family: c_uchar, + pub ss_sysaddr: u16, + pub sc_id: u32, + pub sc_unit: u32, + pub sc_reserved: [u32; 5], + } + + pub struct in_pktinfo { + pub ipi_ifindex: c_uint, + pub ipi_spec_dst: crate::in_addr, + pub ipi_addr: crate::in_addr, + } + + pub struct in6_pktinfo { + pub ipi6_addr: crate::in6_addr, + pub ipi6_ifindex: c_uint, + } + + // sys/ipc.h: + + pub struct ipc_perm { + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: mode_t, + pub _seq: c_ushort, + pub _key: crate::key_t, + } + + // sys/sem.h + + pub struct sembuf { + pub sem_num: c_ushort, + pub sem_op: c_short, + pub sem_flg: c_short, + } + + // sys/shm.h + + pub struct arphdr { + pub ar_hrd: u16, + pub ar_pro: u16, + pub ar_hln: u8, + pub ar_pln: u8, + pub ar_op: u16, + } + + pub struct in_addr { + pub s_addr: crate::in_addr_t, + } + + // net/ndrv.h + pub struct sockaddr_ndrv { + pub snd_len: c_uchar, + pub snd_family: c_uchar, + pub snd_name: [c_uchar; crate::IFNAMSIZ], + } + + // sys/socket.h + + pub struct sa_endpoints_t { + pub sae_srcif: c_uint, // optional source interface + pub sae_srcaddr: *const crate::sockaddr, // optional source address + pub sae_srcaddrlen: crate::socklen_t, // size of source address + pub sae_dstaddr: *const crate::sockaddr, // destination address + pub sae_dstaddrlen: crate::socklen_t, // size of destination address + } + + pub struct timex { + pub modes: c_uint, + pub offset: c_long, + pub freq: c_long, + pub maxerror: c_long, + pub esterror: c_long, + pub status: c_int, + pub constant: c_long, + pub precision: c_long, + pub tolerance: c_long, + pub ppsfreq: c_long, + pub jitter: c_long, + pub shift: c_int, + pub stabil: c_long, + pub jitcnt: c_long, + pub calcnt: c_long, + pub errcnt: c_long, + pub stbcnt: c_long, + } + + pub struct ntptimeval { + pub time: crate::timespec, + pub maxerror: c_long, + pub esterror: c_long, + pub tai: c_long, + pub time_state: c_int, + } + + pub struct thread_standard_policy { + pub no_data: natural_t, + } + + pub struct thread_extended_policy { + pub timeshare: boolean_t, + } + + pub struct thread_time_constraint_policy { + pub period: u32, + pub computation: u32, + pub constraint: u32, + pub preemptible: boolean_t, + } + + pub struct thread_precedence_policy { + pub importance: integer_t, + } + + pub struct thread_affinity_policy { + pub affinity_tag: integer_t, + } + + pub struct thread_background_policy { + pub priority: integer_t, + } + + pub struct thread_latency_qos_policy { + pub thread_latency_qos_tier: thread_latency_qos_t, + } + + pub struct thread_throughput_qos_policy { + pub thread_throughput_qos_tier: thread_throughput_qos_t, + } + + // malloc/malloc.h + pub struct malloc_statistics_t { + pub blocks_in_use: c_uint, + pub size_in_use: size_t, + pub max_size_in_use: size_t, + pub size_allocated: size_t, + } + + pub struct mstats { + pub bytes_total: size_t, + pub chunks_used: size_t, + pub bytes_used: size_t, + pub chunks_free: size_t, + pub bytes_free: size_t, + } + + pub struct vm_range_t { + pub address: crate::vm_address_t, + pub size: crate::vm_size_t, + } + + // sched.h + pub struct sched_param { + pub sched_priority: c_int, + __opaque: [c_char; 4], + } + + pub struct vinfo_stat { + pub vst_dev: u32, + pub vst_mode: u16, + pub vst_nlink: u16, + pub vst_ino: u64, + pub vst_uid: crate::uid_t, + pub vst_gid: crate::gid_t, + pub vst_atime: i64, + pub vst_atimensec: i64, + pub vst_mtime: i64, + pub vst_mtimensec: i64, + pub vst_ctime: i64, + pub vst_ctimensec: i64, + pub vst_birthtime: i64, + pub vst_birthtimensec: i64, + pub vst_size: off_t, + pub vst_blocks: i64, + pub vst_blksize: i32, + pub vst_flags: u32, + pub vst_gen: u32, + pub vst_rdev: u32, + pub vst_qspare: [i64; 2], + } + + pub struct vnode_info { + pub vi_stat: vinfo_stat, + pub vi_type: c_int, + pub vi_pad: c_int, + pub vi_fsid: crate::fsid_t, + } + + pub struct vnode_info_path { + pub vip_vi: vnode_info, + // Normally it's `vip_path: [c_char; MAXPATHLEN]` but because libc supports an old rustc + // version, we go around this limitation like this. + pub vip_path: [[c_char; 32]; 32], + } + + pub struct proc_vnodepathinfo { + pub pvi_cdir: vnode_info_path, + pub pvi_rdir: vnode_info_path, + } + + pub struct vm_statistics { + pub free_count: natural_t, + pub active_count: natural_t, + pub inactive_count: natural_t, + pub wire_count: natural_t, + pub zero_fill_count: natural_t, + pub reactivations: natural_t, + pub pageins: natural_t, + pub pageouts: natural_t, + pub faults: natural_t, + pub cow_faults: natural_t, + pub lookups: natural_t, + pub hits: natural_t, + pub purgeable_count: natural_t, + pub purges: natural_t, + pub speculative_count: natural_t, + } + + pub struct task_thread_times_info { + pub user_time: time_value_t, + pub system_time: time_value_t, + } + + pub struct rusage_info_v0 { + pub ri_uuid: [u8; 16], + pub ri_user_time: u64, + pub ri_system_time: u64, + pub ri_pkg_idle_wkups: u64, + pub ri_interrupt_wkups: u64, + pub ri_pageins: u64, + pub ri_wired_size: u64, + pub ri_resident_size: u64, + pub ri_phys_footprint: u64, + pub ri_proc_start_abstime: u64, + pub ri_proc_exit_abstime: u64, + } + + pub struct rusage_info_v1 { + pub ri_uuid: [u8; 16], + pub ri_user_time: u64, + pub ri_system_time: u64, + pub ri_pkg_idle_wkups: u64, + pub ri_interrupt_wkups: u64, + pub ri_pageins: u64, + pub ri_wired_size: u64, + pub ri_resident_size: u64, + pub ri_phys_footprint: u64, + pub ri_proc_start_abstime: u64, + pub ri_proc_exit_abstime: u64, + pub ri_child_user_time: u64, + pub ri_child_system_time: u64, + pub ri_child_pkg_idle_wkups: u64, + pub ri_child_interrupt_wkups: u64, + pub ri_child_pageins: u64, + pub ri_child_elapsed_abstime: u64, + } + + pub struct rusage_info_v2 { + pub ri_uuid: [u8; 16], + pub ri_user_time: u64, + pub ri_system_time: u64, + pub ri_pkg_idle_wkups: u64, + pub ri_interrupt_wkups: u64, + pub ri_pageins: u64, + pub ri_wired_size: u64, + pub ri_resident_size: u64, + pub ri_phys_footprint: u64, + pub ri_proc_start_abstime: u64, + pub ri_proc_exit_abstime: u64, + pub ri_child_user_time: u64, + pub ri_child_system_time: u64, + pub ri_child_pkg_idle_wkups: u64, + pub ri_child_interrupt_wkups: u64, + pub ri_child_pageins: u64, + pub ri_child_elapsed_abstime: u64, + pub ri_diskio_bytesread: u64, + pub ri_diskio_byteswritten: u64, + } + + pub struct rusage_info_v3 { + pub ri_uuid: [u8; 16], + pub ri_user_time: u64, + pub ri_system_time: u64, + pub ri_pkg_idle_wkups: u64, + pub ri_interrupt_wkups: u64, + pub ri_pageins: u64, + pub ri_wired_size: u64, + pub ri_resident_size: u64, + pub ri_phys_footprint: u64, + pub ri_proc_start_abstime: u64, + pub ri_proc_exit_abstime: u64, + pub ri_child_user_time: u64, + pub ri_child_system_time: u64, + pub ri_child_pkg_idle_wkups: u64, + pub ri_child_interrupt_wkups: u64, + pub ri_child_pageins: u64, + pub ri_child_elapsed_abstime: u64, + pub ri_diskio_bytesread: u64, + pub ri_diskio_byteswritten: u64, + pub ri_cpu_time_qos_default: u64, + pub ri_cpu_time_qos_maintenance: u64, + pub ri_cpu_time_qos_background: u64, + pub ri_cpu_time_qos_utility: u64, + pub ri_cpu_time_qos_legacy: u64, + pub ri_cpu_time_qos_user_initiated: u64, + pub ri_cpu_time_qos_user_interactive: u64, + pub ri_billed_system_time: u64, + pub ri_serviced_system_time: u64, + } + + pub struct rusage_info_v4 { + pub ri_uuid: [u8; 16], + pub ri_user_time: u64, + pub ri_system_time: u64, + pub ri_pkg_idle_wkups: u64, + pub ri_interrupt_wkups: u64, + pub ri_pageins: u64, + pub ri_wired_size: u64, + pub ri_resident_size: u64, + pub ri_phys_footprint: u64, + pub ri_proc_start_abstime: u64, + pub ri_proc_exit_abstime: u64, + pub ri_child_user_time: u64, + pub ri_child_system_time: u64, + pub ri_child_pkg_idle_wkups: u64, + pub ri_child_interrupt_wkups: u64, + pub ri_child_pageins: u64, + pub ri_child_elapsed_abstime: u64, + pub ri_diskio_bytesread: u64, + pub ri_diskio_byteswritten: u64, + pub ri_cpu_time_qos_default: u64, + pub ri_cpu_time_qos_maintenance: u64, + pub ri_cpu_time_qos_background: u64, + pub ri_cpu_time_qos_utility: u64, + pub ri_cpu_time_qos_legacy: u64, + pub ri_cpu_time_qos_user_initiated: u64, + pub ri_cpu_time_qos_user_interactive: u64, + pub ri_billed_system_time: u64, + pub ri_serviced_system_time: u64, + pub ri_logical_writes: u64, + pub ri_lifetime_max_phys_footprint: u64, + pub ri_instructions: u64, + pub ri_cycles: u64, + pub ri_billed_energy: u64, + pub ri_serviced_energy: u64, + pub ri_interval_max_phys_footprint: u64, + pub ri_runnable_time: u64, + } + + pub struct image_offset { + pub uuid: crate::uuid_t, + pub offset: u32, + } + + pub struct attrlist { + pub bitmapcount: c_ushort, + pub reserved: u16, + pub commonattr: attrgroup_t, + pub volattr: attrgroup_t, + pub dirattr: attrgroup_t, + pub fileattr: attrgroup_t, + pub forkattr: attrgroup_t, + } + + pub struct attrreference_t { + pub attr_dataoffset: i32, + pub attr_length: u32, + } + + pub struct vol_capabilities_attr_t { + pub capabilities: vol_capabilities_set_t, + pub valid: vol_capabilities_set_t, + } + + pub struct attribute_set_t { + pub commonattr: attrgroup_t, + pub volattr: attrgroup_t, + pub dirattr: attrgroup_t, + pub fileattr: attrgroup_t, + pub forkattr: attrgroup_t, + } + + pub struct vol_attributes_attr_t { + pub validattr: attribute_set_t, + pub nativeattr: attribute_set_t, + } + + #[repr(align(8))] + pub struct tcp_connection_info { + pub tcpi_state: u8, + pub tcpi_snd_wscale: u8, + pub tcpi_rcv_wscale: u8, + __pad1: u8, + pub tcpi_options: u32, + pub tcpi_flags: u32, + pub tcpi_rto: u32, + pub tcpi_maxseg: u32, + pub tcpi_snd_ssthresh: u32, + pub tcpi_snd_cwnd: u32, + pub tcpi_snd_wnd: u32, + pub tcpi_snd_sbbytes: u32, + pub tcpi_rcv_wnd: u32, + pub tcpi_rttcur: u32, + pub tcpi_srtt: u32, + pub tcpi_rttvar: u32, + pub tcpi_tfo_cookie_req: u32, + pub tcpi_tfo_cookie_rcv: u32, + pub tcpi_tfo_syn_loss: u32, + pub tcpi_tfo_syn_data_sent: u32, + pub tcpi_tfo_syn_data_acked: u32, + pub tcpi_tfo_syn_data_rcv: u32, + pub tcpi_tfo_cookie_req_rcv: u32, + pub tcpi_tfo_cookie_sent: u32, + pub tcpi_tfo_cookie_invalid: u32, + pub tcpi_tfo_cookie_wrong: u32, + pub tcpi_tfo_no_cookie_rcv: u32, + pub tcpi_tfo_heuristics_disable: u32, + pub tcpi_tfo_send_blackhole: u32, + pub tcpi_tfo_recv_blackhole: u32, + pub tcpi_tfo_onebyte_proxy: u32, + __pad2: u32, + pub tcpi_txpackets: u64, + pub tcpi_txbytes: u64, + pub tcpi_txretransmitbytes: u64, + pub tcpi_rxpackets: u64, + pub tcpi_rxbytes: u64, + pub tcpi_rxoutoforderbytes: u64, + pub tcpi_rxretransmitpackets: u64, + } + + pub struct in6_addrlifetime { + pub ia6t_expire: time_t, + pub ia6t_preferred: time_t, + pub ia6t_vltime: u32, + pub ia6t_pltime: u32, + } + + pub struct in6_ifstat { + pub ifs6_in_receive: crate::u_quad_t, + pub ifs6_in_hdrerr: crate::u_quad_t, + pub ifs6_in_toobig: crate::u_quad_t, + pub ifs6_in_noroute: crate::u_quad_t, + pub ifs6_in_addrerr: crate::u_quad_t, + pub ifs6_in_protounknown: crate::u_quad_t, + pub ifs6_in_truncated: crate::u_quad_t, + pub ifs6_in_discard: crate::u_quad_t, + pub ifs6_in_deliver: crate::u_quad_t, + pub ifs6_out_forward: crate::u_quad_t, + pub ifs6_out_request: crate::u_quad_t, + pub ifs6_out_discard: crate::u_quad_t, + pub ifs6_out_fragok: crate::u_quad_t, + pub ifs6_out_fragfail: crate::u_quad_t, + pub ifs6_out_fragcreat: crate::u_quad_t, + pub ifs6_reass_reqd: crate::u_quad_t, + pub ifs6_reass_ok: crate::u_quad_t, + pub ifs6_atmfrag_rcvd: crate::u_quad_t, + pub ifs6_reass_fail: crate::u_quad_t, + pub ifs6_in_mcast: crate::u_quad_t, + pub ifs6_out_mcast: crate::u_quad_t, + pub ifs6_cantfoward_icmp6: crate::u_quad_t, + pub ifs6_addr_expiry_cnt: crate::u_quad_t, + pub ifs6_pfx_expiry_cnt: crate::u_quad_t, + pub ifs6_defrtr_expiry_cnt: crate::u_quad_t, + } + + pub struct icmp6_ifstat { + pub ifs6_in_msg: crate::u_quad_t, + pub ifs6_in_error: crate::u_quad_t, + pub ifs6_in_dstunreach: crate::u_quad_t, + pub ifs6_in_adminprohib: crate::u_quad_t, + pub ifs6_in_timeexceed: crate::u_quad_t, + pub ifs6_in_paramprob: crate::u_quad_t, + pub ifs6_in_pkttoobig: crate::u_quad_t, + pub ifs6_in_echo: crate::u_quad_t, + pub ifs6_in_echoreply: crate::u_quad_t, + pub ifs6_in_routersolicit: crate::u_quad_t, + pub ifs6_in_routeradvert: crate::u_quad_t, + pub ifs6_in_neighborsolicit: crate::u_quad_t, + pub ifs6_in_neighboradvert: crate::u_quad_t, + pub ifs6_in_redirect: crate::u_quad_t, + pub ifs6_in_mldquery: crate::u_quad_t, + pub ifs6_in_mldreport: crate::u_quad_t, + pub ifs6_in_mlddone: crate::u_quad_t, + pub ifs6_out_msg: crate::u_quad_t, + pub ifs6_out_error: crate::u_quad_t, + pub ifs6_out_dstunreach: crate::u_quad_t, + pub ifs6_out_adminprohib: crate::u_quad_t, + pub ifs6_out_timeexceed: crate::u_quad_t, + pub ifs6_out_paramprob: crate::u_quad_t, + pub ifs6_out_pkttoobig: crate::u_quad_t, + pub ifs6_out_echo: crate::u_quad_t, + pub ifs6_out_echoreply: crate::u_quad_t, + pub ifs6_out_routersolicit: crate::u_quad_t, + pub ifs6_out_routeradvert: crate::u_quad_t, + pub ifs6_out_neighborsolicit: crate::u_quad_t, + pub ifs6_out_neighboradvert: crate::u_quad_t, + pub ifs6_out_redirect: crate::u_quad_t, + pub ifs6_out_mldquery: crate::u_quad_t, + pub ifs6_out_mldreport: crate::u_quad_t, + pub ifs6_out_mlddone: crate::u_quad_t, + } + + // mach/host_info.h + pub struct host_cpu_load_info { + pub cpu_ticks: [crate::natural_t; CPU_STATE_MAX as usize], + } + + // net/if_mib.h + pub struct ifmibdata { + /// Name of interface + pub ifmd_name: [c_char; crate::IFNAMSIZ], + /// Number of promiscuous listeners + pub ifmd_pcount: c_uint, + /// Interface flags + pub ifmd_flags: c_uint, + /// Instantaneous length of send queue + pub ifmd_snd_len: c_uint, + /// Maximum length of send queue + pub ifmd_snd_maxlen: c_uint, + /// Number of drops in send queue + pub ifmd_snd_drops: c_uint, + /// For future expansion + pub ifmd_filler: [c_uint; 4], + /// Generic information and statistics + pub ifmd_data: if_data64, + } + + pub struct ifs_iso_8802_3 { + pub dot3StatsAlignmentErrors: u32, + pub dot3StatsFCSErrors: u32, + pub dot3StatsSingleCollisionFrames: u32, + pub dot3StatsMultipleCollisionFrames: u32, + pub dot3StatsSQETestErrors: u32, + pub dot3StatsDeferredTransmissions: u32, + pub dot3StatsLateCollisions: u32, + pub dot3StatsExcessiveCollisions: u32, + pub dot3StatsInternalMacTransmitErrors: u32, + pub dot3StatsCarrierSenseErrors: u32, + pub dot3StatsFrameTooLongs: u32, + pub dot3StatsInternalMacReceiveErrors: u32, + pub dot3StatsEtherChipSet: u32, + pub dot3StatsMissedFrames: u32, + pub dot3StatsCollFrequencies: [u32; 16], + pub dot3Compliance: u32, + } + + // kern_control.h + pub struct ctl_info { + pub ctl_id: u32, + pub ctl_name: [c_char; MAX_KCTL_NAME], + } + + // sys/proc_info.h + pub struct proc_fdinfo { + pub proc_fd: i32, + pub proc_fdtype: u32, + } +} + +s_no_extra_traits! { + #[repr(packed(4))] + pub struct ifconf { + pub ifc_len: c_int, + pub ifc_ifcu: __c_anonymous_ifc_ifcu, + } + + #[repr(packed(4))] + pub struct kevent { + pub ident: crate::uintptr_t, + pub filter: i16, + pub flags: u16, + pub fflags: u32, + pub data: intptr_t, + pub udata: *mut c_void, + } + + #[repr(packed(4))] + pub struct semid_ds { + // Note the manpage shows different types than the system header. + pub sem_perm: ipc_perm, + pub sem_base: i32, + pub sem_nsems: c_ushort, + pub sem_otime: crate::time_t, + pub sem_pad1: i32, + pub sem_ctime: crate::time_t, + pub sem_pad2: i32, + pub sem_pad3: [i32; 4], + } + + #[repr(packed(4))] + pub struct shmid_ds { + pub shm_perm: ipc_perm, + pub shm_segsz: size_t, + pub shm_lpid: crate::pid_t, + pub shm_cpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + pub shm_atime: crate::time_t, // FIXME(macos): 64-bit wrong align => wrong offset + pub shm_dtime: crate::time_t, // FIXME(macos): 64-bit wrong align => wrong offset + pub shm_ctime: crate::time_t, // FIXME(macos): 64-bit wrong align => wrong offset + // FIXME: 64-bit wrong align => wrong offset: + pub shm_internal: *mut c_void, + } + + pub struct proc_threadinfo { + pub pth_user_time: u64, + pub pth_system_time: u64, + pub pth_cpu_usage: i32, + pub pth_policy: i32, + pub pth_run_state: i32, + pub pth_flags: i32, + pub pth_sleep_time: i32, + pub pth_curpri: i32, + pub pth_priority: i32, + pub pth_maxpriority: i32, + pub pth_name: [c_char; MAXTHREADNAMESIZE], + } + + pub struct statfs { + pub f_bsize: u32, + pub f_iosize: i32, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::fsid_t, + pub f_owner: crate::uid_t, + pub f_type: u32, + pub f_flags: u32, + pub f_fssubtype: u32, + pub f_fstypename: [c_char; 16], + pub f_mntonname: [c_char; 1024], + pub f_mntfromname: [c_char; 1024], + pub f_flags_ext: u32, + pub f_reserved: [u32; 7], + } + + pub struct dirent { + pub d_ino: u64, + pub d_seekoff: u64, + pub d_reclen: u16, + pub d_namlen: u16, + pub d_type: u8, + pub d_name: [c_char; 1024], + } + + pub struct pthread_rwlock_t { + __sig: c_long, + __opaque: [u8; __PTHREAD_RWLOCK_SIZE__], + } + + pub struct pthread_mutex_t { + __sig: c_long, + __opaque: [u8; __PTHREAD_MUTEX_SIZE__], + } + + pub struct pthread_cond_t { + __sig: c_long, + __opaque: [u8; __PTHREAD_COND_SIZE__], + } + + pub struct sockaddr_storage { + pub ss_len: u8, + pub ss_family: crate::sa_family_t, + __ss_pad1: [u8; 6], + __ss_align: i64, + __ss_pad2: [u8; 112], + } + + pub struct utmpx { + pub ut_user: [c_char; _UTX_USERSIZE], + pub ut_id: [c_char; _UTX_IDSIZE], + pub ut_line: [c_char; _UTX_LINESIZE], + pub ut_pid: crate::pid_t, + pub ut_type: c_short, + pub ut_tv: crate::timeval, + pub ut_host: [c_char; _UTX_HOSTSIZE], + ut_pad: [u32; 16], + } + + pub struct sigevent { + pub sigev_notify: c_int, + pub sigev_signo: c_int, + pub sigev_value: crate::sigval, + __unused1: *mut c_void, //actually a function pointer + pub sigev_notify_attributes: *mut crate::pthread_attr_t, + } + + pub struct processor_cpu_load_info { + pub cpu_ticks: [c_uint; CPU_STATE_MAX as usize], + } + + pub struct processor_basic_info { + pub cpu_type: cpu_type_t, + pub cpu_subtype: cpu_subtype_t, + pub running: crate::boolean_t, + pub slot_num: c_int, + pub is_master: crate::boolean_t, + } + + pub struct processor_set_basic_info { + pub processor_count: c_int, + pub default_policy: c_int, + } + + pub struct processor_set_load_info { + pub task_count: c_int, + pub thread_count: c_int, + pub load_average: integer_t, + pub mach_factor: integer_t, + } + + pub struct time_value_t { + pub seconds: integer_t, + pub microseconds: integer_t, + } + + pub struct thread_basic_info { + pub user_time: time_value_t, + pub system_time: time_value_t, + pub cpu_usage: crate::integer_t, + pub policy: crate::policy_t, + pub run_state: crate::integer_t, + pub flags: crate::integer_t, + pub suspend_count: crate::integer_t, + pub sleep_time: crate::integer_t, + } + + pub struct thread_identifier_info { + pub thread_id: u64, + pub thread_handle: u64, + pub dispatch_qaddr: u64, + } + + pub struct thread_extended_info { + pub pth_user_time: u64, + pub pth_system_time: u64, + pub pth_cpu_usage: i32, + pub pth_policy: i32, + pub pth_run_state: i32, + pub pth_flags: i32, + pub pth_sleep_time: i32, + pub pth_curpri: i32, + pub pth_priority: i32, + pub pth_maxpriority: i32, + pub pth_name: [c_char; MAXTHREADNAMESIZE], + } + + #[repr(packed(4))] + pub struct if_data64 { + pub ifi_type: c_uchar, + pub ifi_typelen: c_uchar, + pub ifi_physical: c_uchar, + pub ifi_addrlen: c_uchar, + pub ifi_hdrlen: c_uchar, + pub ifi_recvquota: c_uchar, + pub ifi_xmitquota: c_uchar, + pub ifi_unused1: c_uchar, + pub ifi_mtu: u32, + pub ifi_metric: u32, + pub ifi_baudrate: u64, + pub ifi_ipackets: u64, + pub ifi_ierrors: u64, + pub ifi_opackets: u64, + pub ifi_oerrors: u64, + pub ifi_collisions: u64, + pub ifi_ibytes: u64, + pub ifi_obytes: u64, + pub ifi_imcasts: u64, + pub ifi_omcasts: u64, + pub ifi_iqdrops: u64, + pub ifi_noproto: u64, + pub ifi_recvtiming: u32, + pub ifi_xmittiming: u32, + #[cfg(target_pointer_width = "32")] + pub ifi_lastchange: crate::timeval, + #[cfg(not(target_pointer_width = "32"))] + pub ifi_lastchange: timeval32, + } + + #[repr(packed(4))] + pub struct if_msghdr2 { + pub ifm_msglen: c_ushort, + pub ifm_version: c_uchar, + pub ifm_type: c_uchar, + pub ifm_addrs: c_int, + pub ifm_flags: c_int, + pub ifm_index: c_ushort, + pub ifm_snd_len: c_int, + pub ifm_snd_maxlen: c_int, + pub ifm_snd_drops: c_int, + pub ifm_timer: c_int, + pub ifm_data: if_data64, + } + + #[repr(packed(8))] + pub struct vm_statistics64 { + pub free_count: natural_t, + pub active_count: natural_t, + pub inactive_count: natural_t, + pub wire_count: natural_t, + pub zero_fill_count: u64, + pub reactivations: u64, + pub pageins: u64, + pub pageouts: u64, + pub faults: u64, + pub cow_faults: u64, + pub lookups: u64, + pub hits: u64, + pub purges: u64, + pub purgeable_count: natural_t, + pub speculative_count: natural_t, + pub decompressions: u64, + pub compressions: u64, + pub swapins: u64, + pub swapouts: u64, + pub compressor_page_count: natural_t, + pub throttled_count: natural_t, + pub external_page_count: natural_t, + pub internal_page_count: natural_t, + pub total_uncompressed_pages_in_compressor: u64, + } + + #[repr(packed(4))] + pub struct mach_task_basic_info { + pub virtual_size: mach_vm_size_t, + pub resident_size: mach_vm_size_t, + pub resident_size_max: mach_vm_size_t, + pub user_time: time_value_t, + pub system_time: time_value_t, + pub policy: crate::policy_t, + pub suspend_count: integer_t, + } + + #[repr(packed(4))] + pub struct log2phys { + pub l2p_flags: c_uint, + pub l2p_contigbytes: off_t, + pub l2p_devoffset: off_t, + } + + pub struct os_unfair_lock_s { + _os_unfair_lock_opaque: u32, + } + + #[repr(packed(1))] + pub struct sockaddr_vm { + pub svm_len: c_uchar, + pub svm_family: crate::sa_family_t, + pub svm_reserved1: c_ushort, + pub svm_port: c_uint, + pub svm_cid: c_uint, + } + + pub struct ifdevmtu { + pub ifdm_current: c_int, + pub ifdm_min: c_int, + pub ifdm_max: c_int, + } + + pub union __c_anonymous_ifk_data { + pub ifk_ptr: *mut c_void, + pub ifk_value: c_int, + } + + #[repr(packed(4))] + pub struct ifkpi { + pub ifk_module_id: c_uint, + pub ifk_type: c_uint, + pub ifk_data: __c_anonymous_ifk_data, + } + + pub union __c_anonymous_ifr_ifru { + pub ifru_addr: crate::sockaddr, + pub ifru_dstaddr: crate::sockaddr, + pub ifru_broadaddr: crate::sockaddr, + pub ifru_flags: c_short, + pub ifru_metrics: c_int, + pub ifru_mtu: c_int, + pub ifru_phys: c_int, + pub ifru_media: c_int, + pub ifru_intval: c_int, + pub ifru_data: *mut c_char, + pub ifru_devmtu: ifdevmtu, + pub ifru_kpi: ifkpi, + pub ifru_wake_flags: u32, + pub ifru_route_refcnt: u32, + pub ifru_cap: [c_int; 2], + pub ifru_functional_type: u32, + } + + pub struct ifreq { + pub ifr_name: [c_char; crate::IFNAMSIZ], + pub ifr_ifru: __c_anonymous_ifr_ifru, + } + + pub union __c_anonymous_ifc_ifcu { + pub ifcu_buf: *mut c_char, + pub ifcu_req: *mut ifreq, + } + + pub union __c_anonymous_ifr_ifru6 { + pub ifru_addr: crate::sockaddr_in6, + pub ifru_dstaddr: crate::sockaddr_in6, + pub ifru_flags: c_int, + pub ifru_flags6: c_int, + pub ifru_metrics: c_int, + pub ifru_intval: c_int, + pub ifru_data: *mut c_char, + pub ifru_lifetime: in6_addrlifetime, + pub ifru_stat: in6_ifstat, + pub ifru_icmp6stat: icmp6_ifstat, + pub ifru_scope_id: [u32; SCOPE6_ID_MAX], + } + + pub struct in6_ifreq { + pub ifr_name: [c_char; crate::IFNAMSIZ], + pub ifr_ifru: __c_anonymous_ifr_ifru6, + } +} + +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut c_void { + self.si_addr + } + + pub unsafe fn si_value(&self) -> crate::sigval { + #[repr(C)] + struct siginfo_timer { + _si_signo: c_int, + _si_errno: c_int, + _si_code: c_int, + _si_pid: crate::pid_t, + _si_uid: crate::uid_t, + _si_status: c_int, + _si_addr: *mut c_void, + si_value: crate::sigval, + } + + (*(self as *const siginfo_t).cast::()).si_value + } + + pub unsafe fn si_pid(&self) -> crate::pid_t { + self.si_pid + } + + pub unsafe fn si_uid(&self) -> crate::uid_t { + self.si_uid + } + + pub unsafe fn si_status(&self) -> c_int { + self.si_status + } +} + +s_no_extra_traits! { + pub union semun { + pub val: c_int, + pub buf: *mut semid_ds, + pub array: *mut c_ushort, + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for semun { + fn eq(&self, other: &semun) -> bool { + unsafe { self.val == other.val } + } + } + impl Eq for semun {} + impl hash::Hash for semun { + fn hash(&self, state: &mut H) { + unsafe { self.val.hash(state) }; + } + } + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for ifconf + where + Self: Copy, + { + fn eq(&self, other: &Self) -> bool { + let len_ptr1 = core::ptr::addr_of!(self.ifc_len); + let len_ptr2 = core::ptr::addr_of!(other.ifc_len); + let ifcu_ptr1 = core::ptr::addr_of!(self.ifc_ifcu); + let ifcu_ptr2 = core::ptr::addr_of!(other.ifc_ifcu); + + // SAFETY: `ifconf` implements `Copy` so the reads are valid + let len1 = unsafe { len_ptr1.read_unaligned() }; + let len2 = unsafe { len_ptr2.read_unaligned() }; + let ifcu1 = unsafe { ifcu_ptr1.read_unaligned() }; + let ifcu2 = unsafe { ifcu_ptr2.read_unaligned() }; + + len1 == len2 && ifcu1 == ifcu2 + } + } + impl Eq for ifconf {} + + impl PartialEq for kevent { + fn eq(&self, other: &kevent) -> bool { + self.ident == other.ident + && self.filter == other.filter + && self.flags == other.flags + && self.fflags == other.fflags + && self.data == other.data + && self.udata == other.udata + } + } + impl Eq for kevent {} + impl hash::Hash for kevent { + fn hash(&self, state: &mut H) { + let ident = self.ident; + let filter = self.filter; + let flags = self.flags; + let fflags = self.fflags; + let data = self.data; + let udata = self.udata; + ident.hash(state); + filter.hash(state); + flags.hash(state); + fflags.hash(state); + data.hash(state); + udata.hash(state); + } + } + + impl PartialEq for semid_ds { + fn eq(&self, other: &semid_ds) -> bool { + let sem_perm = self.sem_perm; + let sem_pad3 = self.sem_pad3; + let other_sem_perm = other.sem_perm; + let other_sem_pad3 = other.sem_pad3; + sem_perm == other_sem_perm + && self.sem_base == other.sem_base + && self.sem_nsems == other.sem_nsems + && self.sem_otime == other.sem_otime + && self.sem_pad1 == other.sem_pad1 + && self.sem_ctime == other.sem_ctime + && self.sem_pad2 == other.sem_pad2 + && sem_pad3 == other_sem_pad3 + } + } + impl Eq for semid_ds {} + impl hash::Hash for semid_ds { + fn hash(&self, state: &mut H) { + let sem_perm = self.sem_perm; + let sem_base = self.sem_base; + let sem_nsems = self.sem_nsems; + let sem_otime = self.sem_otime; + let sem_pad1 = self.sem_pad1; + let sem_ctime = self.sem_ctime; + let sem_pad2 = self.sem_pad2; + let sem_pad3 = self.sem_pad3; + sem_perm.hash(state); + sem_base.hash(state); + sem_nsems.hash(state); + sem_otime.hash(state); + sem_pad1.hash(state); + sem_ctime.hash(state); + sem_pad2.hash(state); + sem_pad3.hash(state); + } + } + + impl PartialEq for shmid_ds { + fn eq(&self, other: &shmid_ds) -> bool { + let shm_perm = self.shm_perm; + let other_shm_perm = other.shm_perm; + shm_perm == other_shm_perm + && self.shm_segsz == other.shm_segsz + && self.shm_lpid == other.shm_lpid + && self.shm_cpid == other.shm_cpid + && self.shm_nattch == other.shm_nattch + && self.shm_atime == other.shm_atime + && self.shm_dtime == other.shm_dtime + && self.shm_ctime == other.shm_ctime + && self.shm_internal == other.shm_internal + } + } + impl Eq for shmid_ds {} + impl hash::Hash for shmid_ds { + fn hash(&self, state: &mut H) { + let shm_perm = self.shm_perm; + let shm_segsz = self.shm_segsz; + let shm_lpid = self.shm_lpid; + let shm_cpid = self.shm_cpid; + let shm_nattch = self.shm_nattch; + let shm_atime = self.shm_atime; + let shm_dtime = self.shm_dtime; + let shm_ctime = self.shm_ctime; + let shm_internal = self.shm_internal; + shm_perm.hash(state); + shm_segsz.hash(state); + shm_lpid.hash(state); + shm_cpid.hash(state); + shm_nattch.hash(state); + shm_atime.hash(state); + shm_dtime.hash(state); + shm_ctime.hash(state); + shm_internal.hash(state); + } + } + + impl PartialEq for proc_threadinfo { + fn eq(&self, other: &proc_threadinfo) -> bool { + self.pth_user_time == other.pth_user_time + && self.pth_system_time == other.pth_system_time + && self.pth_cpu_usage == other.pth_cpu_usage + && self.pth_policy == other.pth_policy + && self.pth_run_state == other.pth_run_state + && self.pth_flags == other.pth_flags + && self.pth_sleep_time == other.pth_sleep_time + && self.pth_curpri == other.pth_curpri + && self.pth_priority == other.pth_priority + && self.pth_maxpriority == other.pth_maxpriority + && self + .pth_name + .iter() + .zip(other.pth_name.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for proc_threadinfo {} + impl hash::Hash for proc_threadinfo { + fn hash(&self, state: &mut H) { + self.pth_user_time.hash(state); + self.pth_system_time.hash(state); + self.pth_cpu_usage.hash(state); + self.pth_policy.hash(state); + self.pth_run_state.hash(state); + self.pth_flags.hash(state); + self.pth_sleep_time.hash(state); + self.pth_curpri.hash(state); + self.pth_priority.hash(state); + self.pth_maxpriority.hash(state); + self.pth_name.hash(state); + } + } + + impl PartialEq for statfs { + fn eq(&self, other: &statfs) -> bool { + self.f_bsize == other.f_bsize + && self.f_iosize == other.f_iosize + && self.f_blocks == other.f_blocks + && self.f_bfree == other.f_bfree + && self.f_bavail == other.f_bavail + && self.f_files == other.f_files + && self.f_ffree == other.f_ffree + && self.f_fsid == other.f_fsid + && self.f_owner == other.f_owner + && self.f_flags == other.f_flags + && self.f_fssubtype == other.f_fssubtype + && self.f_fstypename == other.f_fstypename + && self.f_type == other.f_type + && self + .f_mntonname + .iter() + .zip(other.f_mntonname.iter()) + .all(|(a, b)| a == b) + && self + .f_mntfromname + .iter() + .zip(other.f_mntfromname.iter()) + .all(|(a, b)| a == b) + && self.f_reserved == other.f_reserved + } + } + + impl Eq for statfs {} + + impl hash::Hash for statfs { + fn hash(&self, state: &mut H) { + self.f_bsize.hash(state); + self.f_iosize.hash(state); + self.f_blocks.hash(state); + self.f_bfree.hash(state); + self.f_bavail.hash(state); + self.f_files.hash(state); + self.f_ffree.hash(state); + self.f_fsid.hash(state); + self.f_owner.hash(state); + self.f_flags.hash(state); + self.f_fssubtype.hash(state); + self.f_fstypename.hash(state); + self.f_type.hash(state); + self.f_mntonname.hash(state); + self.f_mntfromname.hash(state); + self.f_reserved.hash(state); + } + } + + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_ino == other.d_ino + && self.d_seekoff == other.d_seekoff + && self.d_reclen == other.d_reclen + && self.d_namlen == other.d_namlen + && self.d_type == other.d_type + && self + .d_name + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for dirent {} + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_ino.hash(state); + self.d_seekoff.hash(state); + self.d_reclen.hash(state); + self.d_namlen.hash(state); + self.d_type.hash(state); + self.d_name.hash(state); + } + } + impl PartialEq for pthread_rwlock_t { + fn eq(&self, other: &pthread_rwlock_t) -> bool { + self.__sig == other.__sig + && self + .__opaque + .iter() + .zip(other.__opaque.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for pthread_rwlock_t {} + impl hash::Hash for pthread_rwlock_t { + fn hash(&self, state: &mut H) { + self.__sig.hash(state); + self.__opaque.hash(state); + } + } + + impl PartialEq for pthread_mutex_t { + fn eq(&self, other: &pthread_mutex_t) -> bool { + self.__sig == other.__sig + && self + .__opaque + .iter() + .zip(other.__opaque.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for pthread_mutex_t {} + + impl hash::Hash for pthread_mutex_t { + fn hash(&self, state: &mut H) { + self.__sig.hash(state); + self.__opaque.hash(state); + } + } + + impl PartialEq for pthread_cond_t { + fn eq(&self, other: &pthread_cond_t) -> bool { + self.__sig == other.__sig + && self + .__opaque + .iter() + .zip(other.__opaque.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for pthread_cond_t {} + + impl hash::Hash for pthread_cond_t { + fn hash(&self, state: &mut H) { + self.__sig.hash(state); + self.__opaque.hash(state); + } + } + + impl PartialEq for sockaddr_storage { + fn eq(&self, other: &sockaddr_storage) -> bool { + self.ss_len == other.ss_len + && self.ss_family == other.ss_family + && self + .__ss_pad1 + .iter() + .zip(other.__ss_pad1.iter()) + .all(|(a, b)| a == b) + && self.__ss_align == other.__ss_align + && self + .__ss_pad2 + .iter() + .zip(other.__ss_pad2.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for sockaddr_storage {} + + impl hash::Hash for sockaddr_storage { + fn hash(&self, state: &mut H) { + self.ss_len.hash(state); + self.ss_family.hash(state); + self.__ss_pad1.hash(state); + self.__ss_align.hash(state); + self.__ss_pad2.hash(state); + } + } + + impl PartialEq for utmpx { + fn eq(&self, other: &utmpx) -> bool { + self.ut_user + .iter() + .zip(other.ut_user.iter()) + .all(|(a, b)| a == b) + && self.ut_id == other.ut_id + && self.ut_line == other.ut_line + && self.ut_pid == other.ut_pid + && self.ut_type == other.ut_type + && self.ut_tv == other.ut_tv + && self + .ut_host + .iter() + .zip(other.ut_host.iter()) + .all(|(a, b)| a == b) + && self.ut_pad == other.ut_pad + } + } + + impl Eq for utmpx {} + + impl hash::Hash for utmpx { + fn hash(&self, state: &mut H) { + self.ut_user.hash(state); + self.ut_id.hash(state); + self.ut_line.hash(state); + self.ut_pid.hash(state); + self.ut_type.hash(state); + self.ut_tv.hash(state); + self.ut_host.hash(state); + self.ut_pad.hash(state); + } + } + + impl PartialEq for sigevent { + fn eq(&self, other: &sigevent) -> bool { + self.sigev_notify == other.sigev_notify + && self.sigev_signo == other.sigev_signo + && self.sigev_value == other.sigev_value + && self.sigev_notify_attributes == other.sigev_notify_attributes + } + } + + impl Eq for sigevent {} + + impl hash::Hash for sigevent { + fn hash(&self, state: &mut H) { + self.sigev_notify.hash(state); + self.sigev_signo.hash(state); + self.sigev_value.hash(state); + self.sigev_notify_attributes.hash(state); + } + } + + impl PartialEq for processor_cpu_load_info { + fn eq(&self, other: &processor_cpu_load_info) -> bool { + self.cpu_ticks == other.cpu_ticks + } + } + impl Eq for processor_cpu_load_info {} + impl hash::Hash for processor_cpu_load_info { + fn hash(&self, state: &mut H) { + self.cpu_ticks.hash(state); + } + } + + impl PartialEq for processor_basic_info { + fn eq(&self, other: &processor_basic_info) -> bool { + self.cpu_type == other.cpu_type + && self.cpu_subtype == other.cpu_subtype + && self.running == other.running + && self.slot_num == other.slot_num + && self.is_master == other.is_master + } + } + impl Eq for processor_basic_info {} + impl hash::Hash for processor_basic_info { + fn hash(&self, state: &mut H) { + self.cpu_type.hash(state); + self.cpu_subtype.hash(state); + self.running.hash(state); + self.slot_num.hash(state); + self.is_master.hash(state); + } + } + + impl PartialEq for processor_set_basic_info { + fn eq(&self, other: &processor_set_basic_info) -> bool { + self.processor_count == other.processor_count + && self.default_policy == other.default_policy + } + } + impl Eq for processor_set_basic_info {} + impl hash::Hash for processor_set_basic_info { + fn hash(&self, state: &mut H) { + self.processor_count.hash(state); + self.default_policy.hash(state); + } + } + + impl PartialEq for processor_set_load_info { + fn eq(&self, other: &processor_set_load_info) -> bool { + self.task_count == other.task_count + && self.thread_count == other.thread_count + && self.load_average == other.load_average + && self.mach_factor == other.mach_factor + } + } + impl Eq for processor_set_load_info {} + impl hash::Hash for processor_set_load_info { + fn hash(&self, state: &mut H) { + self.task_count.hash(state); + self.thread_count.hash(state); + self.load_average.hash(state); + self.mach_factor.hash(state); + } + } + + impl PartialEq for time_value_t { + fn eq(&self, other: &time_value_t) -> bool { + self.seconds == other.seconds && self.microseconds == other.microseconds + } + } + impl Eq for time_value_t {} + impl hash::Hash for time_value_t { + fn hash(&self, state: &mut H) { + self.seconds.hash(state); + self.microseconds.hash(state); + } + } + impl PartialEq for thread_basic_info { + fn eq(&self, other: &thread_basic_info) -> bool { + self.user_time == other.user_time + && self.system_time == other.system_time + && self.cpu_usage == other.cpu_usage + && self.policy == other.policy + && self.run_state == other.run_state + && self.flags == other.flags + && self.suspend_count == other.suspend_count + && self.sleep_time == other.sleep_time + } + } + impl Eq for thread_basic_info {} + impl hash::Hash for thread_basic_info { + fn hash(&self, state: &mut H) { + self.user_time.hash(state); + self.system_time.hash(state); + self.cpu_usage.hash(state); + self.policy.hash(state); + self.run_state.hash(state); + self.flags.hash(state); + self.suspend_count.hash(state); + self.sleep_time.hash(state); + } + } + impl PartialEq for thread_extended_info { + fn eq(&self, other: &thread_extended_info) -> bool { + self.pth_user_time == other.pth_user_time + && self.pth_system_time == other.pth_system_time + && self.pth_cpu_usage == other.pth_cpu_usage + && self.pth_policy == other.pth_policy + && self.pth_run_state == other.pth_run_state + && self.pth_flags == other.pth_flags + && self.pth_sleep_time == other.pth_sleep_time + && self.pth_curpri == other.pth_curpri + && self.pth_priority == other.pth_priority + && self.pth_maxpriority == other.pth_maxpriority + && self + .pth_name + .iter() + .zip(other.pth_name.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for thread_extended_info {} + impl hash::Hash for thread_extended_info { + fn hash(&self, state: &mut H) { + self.pth_user_time.hash(state); + self.pth_system_time.hash(state); + self.pth_cpu_usage.hash(state); + self.pth_policy.hash(state); + self.pth_run_state.hash(state); + self.pth_flags.hash(state); + self.pth_sleep_time.hash(state); + self.pth_curpri.hash(state); + self.pth_priority.hash(state); + self.pth_maxpriority.hash(state); + self.pth_name.hash(state); + } + } + impl PartialEq for thread_identifier_info { + fn eq(&self, other: &thread_identifier_info) -> bool { + self.thread_id == other.thread_id + && self.thread_handle == other.thread_handle + && self.dispatch_qaddr == other.dispatch_qaddr + } + } + impl Eq for thread_identifier_info {} + impl hash::Hash for thread_identifier_info { + fn hash(&self, state: &mut H) { + self.thread_id.hash(state); + self.thread_handle.hash(state); + self.dispatch_qaddr.hash(state); + } + } + impl PartialEq for if_data64 { + fn eq(&self, other: &if_data64) -> bool { + self.ifi_type == other.ifi_type + && self.ifi_typelen == other.ifi_typelen + && self.ifi_physical == other.ifi_physical + && self.ifi_addrlen == other.ifi_addrlen + && self.ifi_hdrlen == other.ifi_hdrlen + && self.ifi_recvquota == other.ifi_recvquota + && self.ifi_xmitquota == other.ifi_xmitquota + && self.ifi_unused1 == other.ifi_unused1 + && self.ifi_mtu == other.ifi_mtu + && self.ifi_metric == other.ifi_metric + && self.ifi_baudrate == other.ifi_baudrate + && self.ifi_ipackets == other.ifi_ipackets + && self.ifi_ierrors == other.ifi_ierrors + && self.ifi_opackets == other.ifi_opackets + && self.ifi_oerrors == other.ifi_oerrors + && self.ifi_collisions == other.ifi_collisions + && self.ifi_ibytes == other.ifi_ibytes + && self.ifi_obytes == other.ifi_obytes + && self.ifi_imcasts == other.ifi_imcasts + && self.ifi_omcasts == other.ifi_omcasts + && self.ifi_iqdrops == other.ifi_iqdrops + && self.ifi_noproto == other.ifi_noproto + && self.ifi_recvtiming == other.ifi_recvtiming + && self.ifi_xmittiming == other.ifi_xmittiming + && self.ifi_lastchange == other.ifi_lastchange + } + } + impl Eq for if_data64 {} + impl hash::Hash for if_data64 { + fn hash(&self, state: &mut H) { + let ifi_type = self.ifi_type; + let ifi_typelen = self.ifi_typelen; + let ifi_physical = self.ifi_physical; + let ifi_addrlen = self.ifi_addrlen; + let ifi_hdrlen = self.ifi_hdrlen; + let ifi_recvquota = self.ifi_recvquota; + let ifi_xmitquota = self.ifi_xmitquota; + let ifi_unused1 = self.ifi_unused1; + let ifi_mtu = self.ifi_mtu; + let ifi_metric = self.ifi_metric; + let ifi_baudrate = self.ifi_baudrate; + let ifi_ipackets = self.ifi_ipackets; + let ifi_ierrors = self.ifi_ierrors; + let ifi_opackets = self.ifi_opackets; + let ifi_oerrors = self.ifi_oerrors; + let ifi_collisions = self.ifi_collisions; + let ifi_ibytes = self.ifi_ibytes; + let ifi_obytes = self.ifi_obytes; + let ifi_imcasts = self.ifi_imcasts; + let ifi_omcasts = self.ifi_omcasts; + let ifi_iqdrops = self.ifi_iqdrops; + let ifi_noproto = self.ifi_noproto; + let ifi_recvtiming = self.ifi_recvtiming; + let ifi_xmittiming = self.ifi_xmittiming; + let ifi_lastchange = self.ifi_lastchange; + ifi_type.hash(state); + ifi_typelen.hash(state); + ifi_physical.hash(state); + ifi_addrlen.hash(state); + ifi_hdrlen.hash(state); + ifi_recvquota.hash(state); + ifi_xmitquota.hash(state); + ifi_unused1.hash(state); + ifi_mtu.hash(state); + ifi_metric.hash(state); + ifi_baudrate.hash(state); + ifi_ipackets.hash(state); + ifi_ierrors.hash(state); + ifi_opackets.hash(state); + ifi_oerrors.hash(state); + ifi_collisions.hash(state); + ifi_ibytes.hash(state); + ifi_obytes.hash(state); + ifi_imcasts.hash(state); + ifi_omcasts.hash(state); + ifi_iqdrops.hash(state); + ifi_noproto.hash(state); + ifi_recvtiming.hash(state); + ifi_xmittiming.hash(state); + ifi_lastchange.hash(state); + } + } + impl PartialEq for if_msghdr2 { + fn eq(&self, other: &if_msghdr2) -> bool { + self.ifm_msglen == other.ifm_msglen + && self.ifm_version == other.ifm_version + && self.ifm_type == other.ifm_type + && self.ifm_addrs == other.ifm_addrs + && self.ifm_flags == other.ifm_flags + && self.ifm_index == other.ifm_index + && self.ifm_snd_len == other.ifm_snd_len + && self.ifm_snd_maxlen == other.ifm_snd_maxlen + && self.ifm_snd_drops == other.ifm_snd_drops + && self.ifm_timer == other.ifm_timer + && self.ifm_data == other.ifm_data + } + } + impl Eq for if_msghdr2 {} + impl hash::Hash for if_msghdr2 { + fn hash(&self, state: &mut H) { + let ifm_msglen = self.ifm_msglen; + let ifm_version = self.ifm_version; + let ifm_type = self.ifm_type; + let ifm_addrs = self.ifm_addrs; + let ifm_flags = self.ifm_flags; + let ifm_index = self.ifm_index; + let ifm_snd_len = self.ifm_snd_len; + let ifm_snd_maxlen = self.ifm_snd_maxlen; + let ifm_snd_drops = self.ifm_snd_drops; + let ifm_timer = self.ifm_timer; + let ifm_data = self.ifm_data; + ifm_msglen.hash(state); + ifm_version.hash(state); + ifm_type.hash(state); + ifm_addrs.hash(state); + ifm_flags.hash(state); + ifm_index.hash(state); + ifm_snd_len.hash(state); + ifm_snd_maxlen.hash(state); + ifm_snd_drops.hash(state); + ifm_timer.hash(state); + ifm_data.hash(state); + } + } + impl PartialEq for vm_statistics64 { + fn eq(&self, other: &vm_statistics64) -> bool { + // Otherwise rustfmt crashes... + let total_uncompressed = self.total_uncompressed_pages_in_compressor; + self.free_count == other.free_count + && self.active_count == other.active_count + && self.inactive_count == other.inactive_count + && self.wire_count == other.wire_count + && self.zero_fill_count == other.zero_fill_count + && self.reactivations == other.reactivations + && self.pageins == other.pageins + && self.pageouts == other.pageouts + && self.faults == other.faults + && self.cow_faults == other.cow_faults + && self.lookups == other.lookups + && self.hits == other.hits + && self.purges == other.purges + && self.purgeable_count == other.purgeable_count + && self.speculative_count == other.speculative_count + && self.decompressions == other.decompressions + && self.compressions == other.compressions + && self.swapins == other.swapins + && self.swapouts == other.swapouts + && self.compressor_page_count == other.compressor_page_count + && self.throttled_count == other.throttled_count + && self.external_page_count == other.external_page_count + && self.internal_page_count == other.internal_page_count + && total_uncompressed == other.total_uncompressed_pages_in_compressor + } + } + impl Eq for vm_statistics64 {} + impl hash::Hash for vm_statistics64 { + fn hash(&self, state: &mut H) { + let free_count = self.free_count; + let active_count = self.active_count; + let inactive_count = self.inactive_count; + let wire_count = self.wire_count; + let zero_fill_count = self.zero_fill_count; + let reactivations = self.reactivations; + let pageins = self.pageins; + let pageouts = self.pageouts; + let faults = self.faults; + let cow_faults = self.cow_faults; + let lookups = self.lookups; + let hits = self.hits; + let purges = self.purges; + let purgeable_count = self.purgeable_count; + let speculative_count = self.speculative_count; + let decompressions = self.decompressions; + let compressions = self.compressions; + let swapins = self.swapins; + let swapouts = self.swapouts; + let compressor_page_count = self.compressor_page_count; + let throttled_count = self.throttled_count; + let external_page_count = self.external_page_count; + let internal_page_count = self.internal_page_count; + // Otherwise rustfmt crashes... + let total_uncompressed = self.total_uncompressed_pages_in_compressor; + free_count.hash(state); + active_count.hash(state); + inactive_count.hash(state); + wire_count.hash(state); + zero_fill_count.hash(state); + reactivations.hash(state); + pageins.hash(state); + pageouts.hash(state); + faults.hash(state); + cow_faults.hash(state); + lookups.hash(state); + hits.hash(state); + purges.hash(state); + purgeable_count.hash(state); + speculative_count.hash(state); + decompressions.hash(state); + compressions.hash(state); + swapins.hash(state); + swapouts.hash(state); + compressor_page_count.hash(state); + throttled_count.hash(state); + external_page_count.hash(state); + internal_page_count.hash(state); + total_uncompressed.hash(state); + } + } + + impl PartialEq for mach_task_basic_info { + fn eq(&self, other: &mach_task_basic_info) -> bool { + self.virtual_size == other.virtual_size + && self.resident_size == other.resident_size + && self.resident_size_max == other.resident_size_max + && self.user_time == other.user_time + && self.system_time == other.system_time + && self.policy == other.policy + && self.suspend_count == other.suspend_count + } + } + impl Eq for mach_task_basic_info {} + impl hash::Hash for mach_task_basic_info { + fn hash(&self, state: &mut H) { + let virtual_size = self.virtual_size; + let resident_size = self.resident_size; + let resident_size_max = self.resident_size_max; + let user_time = self.user_time; + let system_time = self.system_time; + let policy = self.policy; + let suspend_count = self.suspend_count; + virtual_size.hash(state); + resident_size.hash(state); + resident_size_max.hash(state); + user_time.hash(state); + system_time.hash(state); + policy.hash(state); + suspend_count.hash(state); + } + } + + impl PartialEq for log2phys { + fn eq(&self, other: &log2phys) -> bool { + self.l2p_flags == other.l2p_flags + && self.l2p_contigbytes == other.l2p_contigbytes + && self.l2p_devoffset == other.l2p_devoffset + } + } + impl Eq for log2phys {} + impl hash::Hash for log2phys { + fn hash(&self, state: &mut H) { + let l2p_flags = self.l2p_flags; + let l2p_contigbytes = self.l2p_contigbytes; + let l2p_devoffset = self.l2p_devoffset; + l2p_flags.hash(state); + l2p_contigbytes.hash(state); + l2p_devoffset.hash(state); + } + } + impl PartialEq for os_unfair_lock { + fn eq(&self, other: &os_unfair_lock) -> bool { + self._os_unfair_lock_opaque == other._os_unfair_lock_opaque + } + } + + impl Eq for os_unfair_lock {} + + impl hash::Hash for os_unfair_lock { + fn hash(&self, state: &mut H) { + self._os_unfair_lock_opaque.hash(state); + } + } + + impl PartialEq for sockaddr_vm { + fn eq(&self, other: &sockaddr_vm) -> bool { + self.svm_len == other.svm_len + && self.svm_family == other.svm_family + && self.svm_reserved1 == other.svm_reserved1 + && self.svm_port == other.svm_port + && self.svm_cid == other.svm_cid + } + } + + impl Eq for sockaddr_vm {} + + impl hash::Hash for sockaddr_vm { + fn hash(&self, state: &mut H) { + let svm_len = self.svm_len; + let svm_family = self.svm_family; + let svm_reserved1 = self.svm_reserved1; + let svm_port = self.svm_port; + let svm_cid = self.svm_cid; + + svm_len.hash(state); + svm_family.hash(state); + svm_reserved1.hash(state); + svm_port.hash(state); + svm_cid.hash(state); + } + } + + impl PartialEq for ifdevmtu { + fn eq(&self, other: &ifdevmtu) -> bool { + self.ifdm_current == other.ifdm_current + && self.ifdm_min == other.ifdm_min + && self.ifdm_max == other.ifdm_max + } + } + + impl Eq for ifdevmtu {} + + impl hash::Hash for ifdevmtu { + fn hash(&self, state: &mut H) { + self.ifdm_current.hash(state); + self.ifdm_min.hash(state); + self.ifdm_max.hash(state); + } + } + + impl PartialEq for __c_anonymous_ifk_data { + fn eq(&self, other: &__c_anonymous_ifk_data) -> bool { + unsafe { self.ifk_ptr == other.ifk_ptr && self.ifk_value == other.ifk_value } + } + } + + impl Eq for __c_anonymous_ifk_data {} + impl hash::Hash for __c_anonymous_ifk_data { + fn hash(&self, state: &mut H) { + unsafe { + self.ifk_ptr.hash(state); + self.ifk_value.hash(state); + } + } + } + + impl PartialEq for ifkpi { + fn eq(&self, other: &ifkpi) -> bool { + self.ifk_module_id == other.ifk_module_id && self.ifk_type == other.ifk_type + } + } + + impl Eq for ifkpi {} + + impl hash::Hash for ifkpi { + fn hash(&self, state: &mut H) { + self.ifk_module_id.hash(state); + self.ifk_type.hash(state); + } + } + + impl PartialEq for __c_anonymous_ifr_ifru { + fn eq(&self, other: &__c_anonymous_ifr_ifru) -> bool { + unsafe { + self.ifru_addr == other.ifru_addr + && self.ifru_dstaddr == other.ifru_dstaddr + && self.ifru_broadaddr == other.ifru_broadaddr + && self.ifru_flags == other.ifru_flags + && self.ifru_metrics == other.ifru_metrics + && self.ifru_mtu == other.ifru_mtu + && self.ifru_phys == other.ifru_phys + && self.ifru_media == other.ifru_media + && self.ifru_intval == other.ifru_intval + && self.ifru_data == other.ifru_data + && self.ifru_devmtu == other.ifru_devmtu + && self.ifru_kpi == other.ifru_kpi + && self.ifru_wake_flags == other.ifru_wake_flags + && self.ifru_route_refcnt == other.ifru_route_refcnt + && self + .ifru_cap + .iter() + .zip(other.ifru_cap.iter()) + .all(|(a, b)| a == b) + && self.ifru_functional_type == other.ifru_functional_type + } + } + } + + impl Eq for __c_anonymous_ifr_ifru {} + + impl hash::Hash for __c_anonymous_ifr_ifru { + fn hash(&self, state: &mut H) { + unsafe { + self.ifru_addr.hash(state); + self.ifru_dstaddr.hash(state); + self.ifru_broadaddr.hash(state); + self.ifru_flags.hash(state); + self.ifru_metrics.hash(state); + self.ifru_mtu.hash(state); + self.ifru_phys.hash(state); + self.ifru_media.hash(state); + self.ifru_intval.hash(state); + self.ifru_data.hash(state); + self.ifru_devmtu.hash(state); + self.ifru_kpi.hash(state); + self.ifru_wake_flags.hash(state); + self.ifru_route_refcnt.hash(state); + self.ifru_cap.hash(state); + self.ifru_functional_type.hash(state); + } + } + } + + impl PartialEq for ifreq { + fn eq(&self, other: &ifreq) -> bool { + self.ifr_name == other.ifr_name && self.ifr_ifru == other.ifr_ifru + } + } + + impl Eq for ifreq {} + + impl hash::Hash for ifreq { + fn hash(&self, state: &mut H) { + self.ifr_name.hash(state); + self.ifr_ifru.hash(state); + } + } + + impl Eq for __c_anonymous_ifc_ifcu {} + + impl PartialEq for __c_anonymous_ifc_ifcu { + fn eq(&self, other: &__c_anonymous_ifc_ifcu) -> bool { + unsafe { self.ifcu_buf == other.ifcu_buf && self.ifcu_req == other.ifcu_req } + } + } + + impl hash::Hash for __c_anonymous_ifc_ifcu { + fn hash(&self, state: &mut H) { + unsafe { self.ifcu_buf.hash(state) }; + unsafe { self.ifcu_req.hash(state) }; + } + } + + impl PartialEq for __c_anonymous_ifr_ifru6 { + fn eq(&self, other: &__c_anonymous_ifr_ifru6) -> bool { + unsafe { + self.ifru_addr == other.ifru_addr + && self.ifru_dstaddr == other.ifru_dstaddr + && self.ifru_flags == other.ifru_flags + && self.ifru_flags6 == other.ifru_flags6 + && self.ifru_metrics == other.ifru_metrics + && self.ifru_intval == other.ifru_intval + && self.ifru_data == other.ifru_data + && self + .ifru_scope_id + .iter() + .zip(other.ifru_scope_id.iter()) + .all(|(a, b)| a == b) + } + } + } + + impl Eq for __c_anonymous_ifr_ifru6 {} + + impl hash::Hash for __c_anonymous_ifr_ifru6 { + fn hash(&self, state: &mut H) { + unsafe { + self.ifru_addr.hash(state); + self.ifru_dstaddr.hash(state); + self.ifru_flags.hash(state); + self.ifru_flags6.hash(state); + self.ifru_metrics.hash(state); + self.ifru_intval.hash(state); + self.ifru_data.hash(state); + self.ifru_scope_id.hash(state); + } + } + } + + impl PartialEq for in6_ifreq { + fn eq(&self, other: &in6_ifreq) -> bool { + self.ifr_name == other.ifr_name && self.ifr_ifru == other.ifr_ifru + } + } + + impl Eq for in6_ifreq {} + } +} + +pub const _UTX_USERSIZE: usize = 256; +pub const _UTX_LINESIZE: usize = 32; +pub const _UTX_IDSIZE: usize = 4; +pub const _UTX_HOSTSIZE: usize = 256; + +pub const EMPTY: c_short = 0; +pub const RUN_LVL: c_short = 1; +pub const BOOT_TIME: c_short = 2; +pub const OLD_TIME: c_short = 3; +pub const NEW_TIME: c_short = 4; +pub const INIT_PROCESS: c_short = 5; +pub const LOGIN_PROCESS: c_short = 6; +pub const USER_PROCESS: c_short = 7; +pub const DEAD_PROCESS: c_short = 8; +pub const ACCOUNTING: c_short = 9; +pub const SIGNATURE: c_short = 10; +pub const SHUTDOWN_TIME: c_short = 11; + +pub const LC_COLLATE_MASK: c_int = 1 << 0; +pub const LC_CTYPE_MASK: c_int = 1 << 1; +pub const LC_MESSAGES_MASK: c_int = 1 << 2; +pub const LC_MONETARY_MASK: c_int = 1 << 3; +pub const LC_NUMERIC_MASK: c_int = 1 << 4; +pub const LC_TIME_MASK: c_int = 1 << 5; +pub const LC_ALL_MASK: c_int = LC_COLLATE_MASK + | LC_CTYPE_MASK + | LC_MESSAGES_MASK + | LC_MONETARY_MASK + | LC_NUMERIC_MASK + | LC_TIME_MASK; + +pub const CODESET: crate::nl_item = 0; +pub const D_T_FMT: crate::nl_item = 1; +pub const D_FMT: crate::nl_item = 2; +pub const T_FMT: crate::nl_item = 3; +pub const T_FMT_AMPM: crate::nl_item = 4; +pub const AM_STR: crate::nl_item = 5; +pub const PM_STR: crate::nl_item = 6; + +pub const DAY_1: crate::nl_item = 7; +pub const DAY_2: crate::nl_item = 8; +pub const DAY_3: crate::nl_item = 9; +pub const DAY_4: crate::nl_item = 10; +pub const DAY_5: crate::nl_item = 11; +pub const DAY_6: crate::nl_item = 12; +pub const DAY_7: crate::nl_item = 13; + +pub const ABDAY_1: crate::nl_item = 14; +pub const ABDAY_2: crate::nl_item = 15; +pub const ABDAY_3: crate::nl_item = 16; +pub const ABDAY_4: crate::nl_item = 17; +pub const ABDAY_5: crate::nl_item = 18; +pub const ABDAY_6: crate::nl_item = 19; +pub const ABDAY_7: crate::nl_item = 20; + +pub const MON_1: crate::nl_item = 21; +pub const MON_2: crate::nl_item = 22; +pub const MON_3: crate::nl_item = 23; +pub const MON_4: crate::nl_item = 24; +pub const MON_5: crate::nl_item = 25; +pub const MON_6: crate::nl_item = 26; +pub const MON_7: crate::nl_item = 27; +pub const MON_8: crate::nl_item = 28; +pub const MON_9: crate::nl_item = 29; +pub const MON_10: crate::nl_item = 30; +pub const MON_11: crate::nl_item = 31; +pub const MON_12: crate::nl_item = 32; + +pub const ABMON_1: crate::nl_item = 33; +pub const ABMON_2: crate::nl_item = 34; +pub const ABMON_3: crate::nl_item = 35; +pub const ABMON_4: crate::nl_item = 36; +pub const ABMON_5: crate::nl_item = 37; +pub const ABMON_6: crate::nl_item = 38; +pub const ABMON_7: crate::nl_item = 39; +pub const ABMON_8: crate::nl_item = 40; +pub const ABMON_9: crate::nl_item = 41; +pub const ABMON_10: crate::nl_item = 42; +pub const ABMON_11: crate::nl_item = 43; +pub const ABMON_12: crate::nl_item = 44; + +pub const CLOCK_REALTIME: crate::clockid_t = 0; +pub const CLOCK_MONOTONIC_RAW: crate::clockid_t = 4; +pub const CLOCK_MONOTONIC_RAW_APPROX: crate::clockid_t = 5; +pub const CLOCK_MONOTONIC: crate::clockid_t = 6; +pub const CLOCK_UPTIME_RAW: crate::clockid_t = 8; +pub const CLOCK_UPTIME_RAW_APPROX: crate::clockid_t = 9; +pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 12; +pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 16; + +pub const ERA: crate::nl_item = 45; +pub const ERA_D_FMT: crate::nl_item = 46; +pub const ERA_D_T_FMT: crate::nl_item = 47; +pub const ERA_T_FMT: crate::nl_item = 48; +pub const ALT_DIGITS: crate::nl_item = 49; + +pub const RADIXCHAR: crate::nl_item = 50; +pub const THOUSEP: crate::nl_item = 51; + +pub const YESEXPR: crate::nl_item = 52; +pub const NOEXPR: crate::nl_item = 53; + +pub const YESSTR: crate::nl_item = 54; +pub const NOSTR: crate::nl_item = 55; + +pub const CRNCYSTR: crate::nl_item = 56; + +pub const D_MD_ORDER: crate::nl_item = 57; + +pub const EXIT_FAILURE: c_int = 1; +pub const EXIT_SUCCESS: c_int = 0; +pub const RAND_MAX: c_int = 2147483647; +pub const EOF: c_int = -1; +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; +pub const SEEK_HOLE: c_int = 3; +pub const SEEK_DATA: c_int = 4; +pub const _IOFBF: c_int = 0; +pub const _IONBF: c_int = 2; +pub const _IOLBF: c_int = 1; +pub const BUFSIZ: c_uint = 1024; +pub const FOPEN_MAX: c_uint = 20; +pub const FILENAME_MAX: c_uint = 1024; +pub const L_tmpnam: c_uint = 1024; +pub const TMP_MAX: c_uint = 308915776; +pub const _PC_LINK_MAX: c_int = 1; +pub const _PC_MAX_CANON: c_int = 2; +pub const _PC_MAX_INPUT: c_int = 3; +pub const _PC_NAME_MAX: c_int = 4; +pub const _PC_PATH_MAX: c_int = 5; +pub const _PC_PIPE_BUF: c_int = 6; +pub const _PC_CHOWN_RESTRICTED: c_int = 7; +pub const _PC_NO_TRUNC: c_int = 8; +pub const _PC_VDISABLE: c_int = 9; +pub const _PC_NAME_CHARS_MAX: c_int = 10; +pub const _PC_CASE_SENSITIVE: c_int = 11; +pub const _PC_CASE_PRESERVING: c_int = 12; +pub const _PC_EXTENDED_SECURITY_NP: c_int = 13; +pub const _PC_AUTH_OPAQUE_NP: c_int = 14; +pub const _PC_2_SYMLINKS: c_int = 15; +pub const _PC_ALLOC_SIZE_MIN: c_int = 16; +pub const _PC_ASYNC_IO: c_int = 17; +pub const _PC_FILESIZEBITS: c_int = 18; +pub const _PC_PRIO_IO: c_int = 19; +pub const _PC_REC_INCR_XFER_SIZE: c_int = 20; +pub const _PC_REC_MAX_XFER_SIZE: c_int = 21; +pub const _PC_REC_MIN_XFER_SIZE: c_int = 22; +pub const _PC_REC_XFER_ALIGN: c_int = 23; +pub const _PC_SYMLINK_MAX: c_int = 24; +pub const _PC_SYNC_IO: c_int = 25; +pub const _PC_XATTR_SIZE_BITS: c_int = 26; +pub const _PC_MIN_HOLE_SIZE: c_int = 27; +pub const O_EVTONLY: c_int = 0x00008000; +pub const O_NOCTTY: c_int = 0x00020000; +pub const O_DIRECTORY: c_int = 0x00100000; +pub const O_SYMLINK: c_int = 0x00200000; +pub const O_DSYNC: c_int = 0x00400000; +pub const O_CLOEXEC: c_int = 0x01000000; +pub const O_NOFOLLOW_ANY: c_int = 0x20000000; +pub const O_EXEC: c_int = 0x40000000; +pub const O_SEARCH: c_int = O_EXEC | O_DIRECTORY; +pub const S_IFIFO: mode_t = 0o1_0000; +pub const S_IFCHR: mode_t = 0o2_0000; +pub const S_IFBLK: mode_t = 0o6_0000; +pub const S_IFDIR: mode_t = 0o4_0000; +pub const S_IFREG: mode_t = 0o10_0000; +pub const S_IFLNK: mode_t = 0o12_0000; +pub const S_IFSOCK: mode_t = 0o14_0000; +pub const S_IFMT: mode_t = 0o17_0000; +pub const S_IEXEC: mode_t = 0o0100; +pub const S_IWRITE: mode_t = 0o0200; +pub const S_IREAD: mode_t = 0o0400; +pub const S_IRWXU: mode_t = 0o0700; +pub const S_IXUSR: mode_t = 0o0100; +pub const S_IWUSR: mode_t = 0o0200; +pub const S_IRUSR: mode_t = 0o0400; +pub const S_IRWXG: mode_t = 0o0070; +pub const S_IXGRP: mode_t = 0o0010; +pub const S_IWGRP: mode_t = 0o0020; +pub const S_IRGRP: mode_t = 0o0040; +pub const S_IRWXO: mode_t = 0o0007; +pub const S_IXOTH: mode_t = 0o0001; +pub const S_IWOTH: mode_t = 0o0002; +pub const S_IROTH: mode_t = 0o0004; +pub const F_OK: c_int = 0; +pub const R_OK: c_int = 4; +pub const W_OK: c_int = 2; +pub const X_OK: c_int = 1; +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; +pub const F_LOCK: c_int = 1; +pub const F_TEST: c_int = 3; +pub const F_TLOCK: c_int = 2; +pub const F_ULOCK: c_int = 0; +pub const F_GETLK: c_int = 7; +pub const F_SETLK: c_int = 8; +pub const F_SETLKW: c_int = 9; +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGABRT: c_int = 6; +pub const SIGEMT: c_int = 7; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGSEGV: c_int = 11; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; + +pub const PROT_NONE: c_int = 0; +pub const PROT_READ: c_int = 1; +pub const PROT_WRITE: c_int = 2; +pub const PROT_EXEC: c_int = 4; + +pub const PT_TRACE_ME: c_int = 0; +pub const PT_READ_I: c_int = 1; +pub const PT_READ_D: c_int = 2; +pub const PT_READ_U: c_int = 3; +pub const PT_WRITE_I: c_int = 4; +pub const PT_WRITE_D: c_int = 5; +pub const PT_WRITE_U: c_int = 6; +pub const PT_CONTINUE: c_int = 7; +pub const PT_KILL: c_int = 8; +pub const PT_STEP: c_int = 9; +pub const PT_ATTACH: c_int = 10; +pub const PT_DETACH: c_int = 11; +pub const PT_SIGEXC: c_int = 12; +pub const PT_THUPDATE: c_int = 13; +pub const PT_ATTACHEXC: c_int = 14; + +pub const PT_FORCEQUOTA: c_int = 30; +pub const PT_DENY_ATTACH: c_int = 31; +pub const PT_FIRSTMACH: c_int = 32; + +pub const MAP_FILE: c_int = 0x0000; +pub const MAP_SHARED: c_int = 0x0001; +pub const MAP_PRIVATE: c_int = 0x0002; +pub const MAP_FIXED: c_int = 0x0010; +pub const MAP_ANON: c_int = 0x1000; +pub const MAP_ANONYMOUS: c_int = MAP_ANON; + +pub const CPU_STATE_USER: c_int = 0; +pub const CPU_STATE_SYSTEM: c_int = 1; +pub const CPU_STATE_IDLE: c_int = 2; +pub const CPU_STATE_NICE: c_int = 3; +pub const CPU_STATE_MAX: c_int = 4; + +pub const PROCESSOR_BASIC_INFO: c_int = 1; +pub const PROCESSOR_CPU_LOAD_INFO: c_int = 2; +pub const PROCESSOR_PM_REGS_INFO: c_int = 0x10000001; +pub const PROCESSOR_TEMPERATURE: c_int = 0x10000002; +pub const PROCESSOR_SET_LOAD_INFO: c_int = 4; +pub const PROCESSOR_SET_BASIC_INFO: c_int = 5; + +deprecated_mach! { + pub const VM_FLAGS_FIXED: c_int = 0x0000; + pub const VM_FLAGS_ANYWHERE: c_int = 0x0001; + pub const VM_FLAGS_PURGABLE: c_int = 0x0002; + pub const VM_FLAGS_RANDOM_ADDR: c_int = 0x0008; + pub const VM_FLAGS_NO_CACHE: c_int = 0x0010; + pub const VM_FLAGS_RESILIENT_CODESIGN: c_int = 0x0020; + pub const VM_FLAGS_RESILIENT_MEDIA: c_int = 0x0040; + pub const VM_FLAGS_OVERWRITE: c_int = 0x4000; + pub const VM_FLAGS_SUPERPAGE_MASK: c_int = 0x70000; + pub const VM_FLAGS_RETURN_DATA_ADDR: c_int = 0x100000; + pub const VM_FLAGS_RETURN_4K_DATA_ADDR: c_int = 0x800000; + pub const VM_FLAGS_ALIAS_MASK: c_int = 0xFF000000; + pub const VM_FLAGS_USER_ALLOCATE: c_int = 0xff07401f; + pub const VM_FLAGS_USER_MAP: c_int = 0xff97401f; + pub const VM_FLAGS_USER_REMAP: c_int = VM_FLAGS_FIXED + | VM_FLAGS_ANYWHERE + | VM_FLAGS_RANDOM_ADDR + | VM_FLAGS_OVERWRITE + | VM_FLAGS_RETURN_DATA_ADDR + | VM_FLAGS_RESILIENT_CODESIGN; + + pub const VM_FLAGS_SUPERPAGE_SHIFT: c_int = 16; + pub const SUPERPAGE_NONE: c_int = 0; + pub const SUPERPAGE_SIZE_ANY: c_int = 1; + pub const VM_FLAGS_SUPERPAGE_NONE: c_int = SUPERPAGE_NONE << VM_FLAGS_SUPERPAGE_SHIFT; + pub const VM_FLAGS_SUPERPAGE_SIZE_ANY: c_int = SUPERPAGE_SIZE_ANY << VM_FLAGS_SUPERPAGE_SHIFT; + pub const SUPERPAGE_SIZE_2MB: c_int = 2; + pub const VM_FLAGS_SUPERPAGE_SIZE_2MB: c_int = SUPERPAGE_SIZE_2MB << VM_FLAGS_SUPERPAGE_SHIFT; + + pub const VM_MEMORY_MALLOC: c_int = 1; + pub const VM_MEMORY_MALLOC_SMALL: c_int = 2; + pub const VM_MEMORY_MALLOC_LARGE: c_int = 3; + pub const VM_MEMORY_MALLOC_HUGE: c_int = 4; + pub const VM_MEMORY_SBRK: c_int = 5; + pub const VM_MEMORY_REALLOC: c_int = 6; + pub const VM_MEMORY_MALLOC_TINY: c_int = 7; + pub const VM_MEMORY_MALLOC_LARGE_REUSABLE: c_int = 8; + pub const VM_MEMORY_MALLOC_LARGE_REUSED: c_int = 9; + pub const VM_MEMORY_ANALYSIS_TOOL: c_int = 10; + pub const VM_MEMORY_MALLOC_NANO: c_int = 11; + pub const VM_MEMORY_MACH_MSG: c_int = 20; + pub const VM_MEMORY_IOKIT: c_int = 21; + pub const VM_MEMORY_STACK: c_int = 30; + pub const VM_MEMORY_GUARD: c_int = 31; + pub const VM_MEMORY_SHARED_PMAP: c_int = 32; + pub const VM_MEMORY_DYLIB: c_int = 33; + pub const VM_MEMORY_OBJC_DISPATCHERS: c_int = 34; + pub const VM_MEMORY_UNSHARED_PMAP: c_int = 35; + pub const VM_MEMORY_APPKIT: c_int = 40; + pub const VM_MEMORY_FOUNDATION: c_int = 41; + pub const VM_MEMORY_COREGRAPHICS: c_int = 42; + pub const VM_MEMORY_CORESERVICES: c_int = 43; + pub const VM_MEMORY_CARBON: c_int = VM_MEMORY_CORESERVICES; + pub const VM_MEMORY_JAVA: c_int = 44; + pub const VM_MEMORY_COREDATA: c_int = 45; + pub const VM_MEMORY_COREDATA_OBJECTIDS: c_int = 46; + pub const VM_MEMORY_ATS: c_int = 50; + pub const VM_MEMORY_LAYERKIT: c_int = 51; + pub const VM_MEMORY_CGIMAGE: c_int = 52; + pub const VM_MEMORY_TCMALLOC: c_int = 53; + pub const VM_MEMORY_COREGRAPHICS_DATA: c_int = 54; + pub const VM_MEMORY_COREGRAPHICS_SHARED: c_int = 55; + pub const VM_MEMORY_COREGRAPHICS_FRAMEBUFFERS: c_int = 56; + pub const VM_MEMORY_COREGRAPHICS_BACKINGSTORES: c_int = 57; + pub const VM_MEMORY_COREGRAPHICS_XALLOC: c_int = 58; + pub const VM_MEMORY_COREGRAPHICS_MISC: c_int = VM_MEMORY_COREGRAPHICS; + pub const VM_MEMORY_DYLD: c_int = 60; + pub const VM_MEMORY_DYLD_MALLOC: c_int = 61; + pub const VM_MEMORY_SQLITE: c_int = 62; + pub const VM_MEMORY_JAVASCRIPT_CORE: c_int = 63; + pub const VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR: c_int = 64; + pub const VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE: c_int = 65; + pub const VM_MEMORY_GLSL: c_int = 66; + pub const VM_MEMORY_OPENCL: c_int = 67; + pub const VM_MEMORY_COREIMAGE: c_int = 68; + pub const VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS: c_int = 69; + pub const VM_MEMORY_IMAGEIO: c_int = 70; + pub const VM_MEMORY_COREPROFILE: c_int = 71; + pub const VM_MEMORY_ASSETSD: c_int = 72; + pub const VM_MEMORY_OS_ALLOC_ONCE: c_int = 73; + pub const VM_MEMORY_LIBDISPATCH: c_int = 74; + pub const VM_MEMORY_ACCELERATE: c_int = 75; + pub const VM_MEMORY_COREUI: c_int = 76; + pub const VM_MEMORY_COREUIFILE: c_int = 77; + pub const VM_MEMORY_GENEALOGY: c_int = 78; + pub const VM_MEMORY_RAWCAMERA: c_int = 79; + pub const VM_MEMORY_CORPSEINFO: c_int = 80; + pub const VM_MEMORY_ASL: c_int = 81; + pub const VM_MEMORY_SWIFT_RUNTIME: c_int = 82; + pub const VM_MEMORY_SWIFT_METADATA: c_int = 83; + pub const VM_MEMORY_DHMM: c_int = 84; + pub const VM_MEMORY_SCENEKIT: c_int = 86; + pub const VM_MEMORY_SKYWALK: c_int = 87; + pub const VM_MEMORY_APPLICATION_SPECIFIC_1: c_int = 240; + pub const VM_MEMORY_APPLICATION_SPECIFIC_16: c_int = 255; +} + +pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; + +pub const MS_ASYNC: c_int = 0x0001; +pub const MS_INVALIDATE: c_int = 0x0002; +pub const MS_SYNC: c_int = 0x0010; + +pub const MS_KILLPAGES: c_int = 0x0004; +pub const MS_DEACTIVATE: c_int = 0x0008; + +pub const EPERM: c_int = 1; +pub const ENOENT: c_int = 2; +pub const ESRCH: c_int = 3; +pub const EINTR: c_int = 4; +pub const EIO: c_int = 5; +pub const ENXIO: c_int = 6; +pub const E2BIG: c_int = 7; +pub const ENOEXEC: c_int = 8; +pub const EBADF: c_int = 9; +pub const ECHILD: c_int = 10; +pub const EDEADLK: c_int = 11; +pub const ENOMEM: c_int = 12; +pub const EACCES: c_int = 13; +pub const EFAULT: c_int = 14; +pub const ENOTBLK: c_int = 15; +pub const EBUSY: c_int = 16; +pub const EEXIST: c_int = 17; +pub const EXDEV: c_int = 18; +pub const ENODEV: c_int = 19; +pub const ENOTDIR: c_int = 20; +pub const EISDIR: c_int = 21; +pub const EINVAL: c_int = 22; +pub const ENFILE: c_int = 23; +pub const EMFILE: c_int = 24; +pub const ENOTTY: c_int = 25; +pub const ETXTBSY: c_int = 26; +pub const EFBIG: c_int = 27; +pub const ENOSPC: c_int = 28; +pub const ESPIPE: c_int = 29; +pub const EROFS: c_int = 30; +pub const EMLINK: c_int = 31; +pub const EPIPE: c_int = 32; +pub const EDOM: c_int = 33; +pub const ERANGE: c_int = 34; +pub const EAGAIN: c_int = 35; +pub const EWOULDBLOCK: c_int = EAGAIN; +pub const EINPROGRESS: c_int = 36; +pub const EALREADY: c_int = 37; +pub const ENOTSOCK: c_int = 38; +pub const EDESTADDRREQ: c_int = 39; +pub const EMSGSIZE: c_int = 40; +pub const EPROTOTYPE: c_int = 41; +pub const ENOPROTOOPT: c_int = 42; +pub const EPROTONOSUPPORT: c_int = 43; +pub const ESOCKTNOSUPPORT: c_int = 44; +pub const ENOTSUP: c_int = 45; +pub const EPFNOSUPPORT: c_int = 46; +pub const EAFNOSUPPORT: c_int = 47; +pub const EADDRINUSE: c_int = 48; +pub const EADDRNOTAVAIL: c_int = 49; +pub const ENETDOWN: c_int = 50; +pub const ENETUNREACH: c_int = 51; +pub const ENETRESET: c_int = 52; +pub const ECONNABORTED: c_int = 53; +pub const ECONNRESET: c_int = 54; +pub const ENOBUFS: c_int = 55; +pub const EISCONN: c_int = 56; +pub const ENOTCONN: c_int = 57; +pub const ESHUTDOWN: c_int = 58; +pub const ETOOMANYREFS: c_int = 59; +pub const ETIMEDOUT: c_int = 60; +pub const ECONNREFUSED: c_int = 61; +pub const ELOOP: c_int = 62; +pub const ENAMETOOLONG: c_int = 63; +pub const EHOSTDOWN: c_int = 64; +pub const EHOSTUNREACH: c_int = 65; +pub const ENOTEMPTY: c_int = 66; +pub const EPROCLIM: c_int = 67; +pub const EUSERS: c_int = 68; +pub const EDQUOT: c_int = 69; +pub const ESTALE: c_int = 70; +pub const EREMOTE: c_int = 71; +pub const EBADRPC: c_int = 72; +pub const ERPCMISMATCH: c_int = 73; +pub const EPROGUNAVAIL: c_int = 74; +pub const EPROGMISMATCH: c_int = 75; +pub const EPROCUNAVAIL: c_int = 76; +pub const ENOLCK: c_int = 77; +pub const ENOSYS: c_int = 78; +pub const EFTYPE: c_int = 79; +pub const EAUTH: c_int = 80; +pub const ENEEDAUTH: c_int = 81; +pub const EPWROFF: c_int = 82; +pub const EDEVERR: c_int = 83; +pub const EOVERFLOW: c_int = 84; +pub const EBADEXEC: c_int = 85; +pub const EBADARCH: c_int = 86; +pub const ESHLIBVERS: c_int = 87; +pub const EBADMACHO: c_int = 88; +pub const ECANCELED: c_int = 89; +pub const EIDRM: c_int = 90; +pub const ENOMSG: c_int = 91; +pub const EILSEQ: c_int = 92; +pub const ENOATTR: c_int = 93; +pub const EBADMSG: c_int = 94; +pub const EMULTIHOP: c_int = 95; +pub const ENODATA: c_int = 96; +pub const ENOLINK: c_int = 97; +pub const ENOSR: c_int = 98; +pub const ENOSTR: c_int = 99; +pub const EPROTO: c_int = 100; +pub const ETIME: c_int = 101; +pub const EOPNOTSUPP: c_int = 102; +pub const ENOPOLICY: c_int = 103; +pub const ENOTRECOVERABLE: c_int = 104; +pub const EOWNERDEAD: c_int = 105; +pub const EQFULL: c_int = 106; +pub const ELAST: c_int = 106; + +pub const EAI_AGAIN: c_int = 2; +pub const EAI_BADFLAGS: c_int = 3; +pub const EAI_FAIL: c_int = 4; +pub const EAI_FAMILY: c_int = 5; +pub const EAI_MEMORY: c_int = 6; +pub const EAI_NODATA: c_int = 7; +pub const EAI_NONAME: c_int = 8; +pub const EAI_SERVICE: c_int = 9; +pub const EAI_SOCKTYPE: c_int = 10; +pub const EAI_SYSTEM: c_int = 11; +pub const EAI_OVERFLOW: c_int = 14; + +pub const F_DUPFD: c_int = 0; +pub const F_DUPFD_CLOEXEC: c_int = 67; +pub const F_GETFD: c_int = 1; +pub const F_SETFD: c_int = 2; +pub const F_GETFL: c_int = 3; +pub const F_SETFL: c_int = 4; +pub const F_PREALLOCATE: c_int = 42; +pub const F_RDADVISE: c_int = 44; +pub const F_RDAHEAD: c_int = 45; +pub const F_NOCACHE: c_int = 48; +pub const F_LOG2PHYS: c_int = 49; +pub const F_GETPATH: c_int = 50; +pub const F_FULLFSYNC: c_int = 51; +pub const F_FREEZE_FS: c_int = 53; +pub const F_THAW_FS: c_int = 54; +pub const F_GLOBAL_NOCACHE: c_int = 55; +pub const F_NODIRECT: c_int = 62; +pub const F_LOG2PHYS_EXT: c_int = 65; +pub const F_BARRIERFSYNC: c_int = 85; +// See https://github.com/apple/darwin-xnu/blob/main/bsd/sys/fcntl.h +pub const F_OFD_SETLK: c_int = 90; /* Acquire or release open file description lock */ +pub const F_OFD_SETLKW: c_int = 91; /* (as F_OFD_SETLK but blocking if conflicting lock) */ +pub const F_OFD_GETLK: c_int = 92; /* Examine OFD lock */ +pub const F_PUNCHHOLE: c_int = 99; +pub const F_TRIM_ACTIVE_FILE: c_int = 100; +pub const F_SPECULATIVE_READ: c_int = 101; +pub const F_GETPATH_NOFIRMLINK: c_int = 102; +pub const F_TRANSFEREXTENTS: c_int = 110; + +pub const F_ALLOCATECONTIG: c_uint = 0x02; +pub const F_ALLOCATEALL: c_uint = 0x04; +pub const F_ALLOCATEPERSIST: c_uint = 0x08; + +pub const F_PEOFPOSMODE: c_int = 3; +pub const F_VOLPOSMODE: c_int = 4; + +pub const AT_FDCWD: c_int = -2; +pub const AT_EACCESS: c_int = 0x0010; +pub const AT_SYMLINK_NOFOLLOW: c_int = 0x0020; +pub const AT_SYMLINK_FOLLOW: c_int = 0x0040; +pub const AT_REMOVEDIR: c_int = 0x0080; + +pub const PTHREAD_INTROSPECTION_THREAD_CREATE: c_uint = 1; +pub const PTHREAD_INTROSPECTION_THREAD_START: c_uint = 2; +pub const PTHREAD_INTROSPECTION_THREAD_TERMINATE: c_uint = 3; +pub const PTHREAD_INTROSPECTION_THREAD_DESTROY: c_uint = 4; + +pub const TIOCMODG: c_ulong = 0x40047403; +pub const TIOCMODS: c_ulong = 0x80047404; +pub const TIOCM_LE: c_int = 0x1; +pub const TIOCM_DTR: c_int = 0x2; +pub const TIOCM_RTS: c_int = 0x4; +pub const TIOCM_ST: c_int = 0x8; +pub const TIOCM_SR: c_int = 0x10; +pub const TIOCM_CTS: c_int = 0x20; +pub const TIOCM_CAR: c_int = 0x40; +pub const TIOCM_CD: c_int = 0x40; +pub const TIOCM_RNG: c_int = 0x80; +pub const TIOCM_RI: c_int = 0x80; +pub const TIOCM_DSR: c_int = 0x100; +pub const TIOCEXCL: c_int = 0x2000740d; +pub const TIOCNXCL: c_int = 0x2000740e; +pub const TIOCFLUSH: c_ulong = 0x80047410; +pub const TIOCGETD: c_ulong = 0x4004741a; +pub const TIOCSETD: c_ulong = 0x8004741b; +pub const TIOCIXON: c_uint = 0x20007481; +pub const TIOCIXOFF: c_uint = 0x20007480; +pub const TIOCSDTR: c_uint = 0x20007479; +pub const TIOCCDTR: c_uint = 0x20007478; +pub const TIOCGPGRP: c_ulong = 0x40047477; +pub const TIOCSPGRP: c_ulong = 0x80047476; +pub const TIOCOUTQ: c_ulong = 0x40047473; +pub const TIOCSTI: c_ulong = 0x80017472; +pub const TIOCNOTTY: c_uint = 0x20007471; +pub const TIOCPKT: c_ulong = 0x80047470; +pub const TIOCPKT_DATA: c_int = 0x0; +pub const TIOCPKT_FLUSHREAD: c_int = 0x1; +pub const TIOCPKT_FLUSHWRITE: c_int = 0x2; +pub const TIOCPKT_STOP: c_int = 0x4; +pub const TIOCPKT_START: c_int = 0x8; +pub const TIOCPKT_NOSTOP: c_int = 0x10; +pub const TIOCPKT_DOSTOP: c_int = 0x20; +pub const TIOCPKT_IOCTL: c_int = 0x40; +pub const TIOCSTOP: c_uint = 0x2000746f; +pub const TIOCSTART: c_uint = 0x2000746e; +pub const TIOCMSET: c_ulong = 0x8004746d; +pub const TIOCMBIS: c_ulong = 0x8004746c; +pub const TIOCMBIC: c_ulong = 0x8004746b; +pub const TIOCMGET: c_ulong = 0x4004746a; +pub const TIOCREMOTE: c_ulong = 0x80047469; +pub const TIOCGWINSZ: c_ulong = 0x40087468; +pub const TIOCSWINSZ: c_ulong = 0x80087467; +pub const TIOCUCNTL: c_ulong = 0x80047466; +pub const TIOCSTAT: c_uint = 0x20007465; +pub const TIOCSCONS: c_uint = 0x20007463; +pub const TIOCCONS: c_ulong = 0x80047462; +pub const TIOCSCTTY: c_uint = 0x20007461; +pub const TIOCEXT: c_ulong = 0x80047460; +pub const TIOCSIG: c_uint = 0x2000745f; +pub const TIOCDRAIN: c_uint = 0x2000745e; +pub const TIOCMSDTRWAIT: c_ulong = 0x8004745b; +pub const TIOCMGDTRWAIT: c_ulong = 0x4004745a; +pub const TIOCSDRAINWAIT: c_ulong = 0x80047457; +pub const TIOCGDRAINWAIT: c_ulong = 0x40047456; +pub const TIOCDSIMICROCODE: c_uint = 0x20007455; +pub const TIOCPTYGRANT: c_uint = 0x20007454; +pub const TIOCPTYGNAME: c_uint = 0x40807453; +pub const TIOCPTYUNLK: c_uint = 0x20007452; +pub const TIOCGETA: c_ulong = 0x40487413; +pub const TIOCSETA: c_ulong = 0x80487414; +pub const TIOCSETAW: c_ulong = 0x80487415; +pub const TIOCSETAF: c_ulong = 0x80487416; + +pub const BIOCGRSIG: c_ulong = 0x40044272; +pub const BIOCSRSIG: c_ulong = 0x80044273; +pub const BIOCSDLT: c_ulong = 0x80044278; +pub const BIOCGSEESENT: c_ulong = 0x40044276; +pub const BIOCSSEESENT: c_ulong = 0x80044277; +pub const BIOCGDLTLIST: c_ulong = 0xc00c4279; + +pub const FIODTYPE: c_ulong = 0x4004667a; + +pub const B0: speed_t = 0; +pub const B50: speed_t = 50; +pub const B75: speed_t = 75; +pub const B110: speed_t = 110; +pub const B134: speed_t = 134; +pub const B150: speed_t = 150; +pub const B200: speed_t = 200; +pub const B300: speed_t = 300; +pub const B600: speed_t = 600; +pub const B1200: speed_t = 1200; +pub const B1800: speed_t = 1800; +pub const B2400: speed_t = 2400; +pub const B4800: speed_t = 4800; +pub const B9600: speed_t = 9600; +pub const B19200: speed_t = 19200; +pub const B38400: speed_t = 38400; +pub const B7200: speed_t = 7200; +pub const B14400: speed_t = 14400; +pub const B28800: speed_t = 28800; +pub const B57600: speed_t = 57600; +pub const B76800: speed_t = 76800; +pub const B115200: speed_t = 115200; +pub const B230400: speed_t = 230400; +pub const EXTA: speed_t = 19200; +pub const EXTB: speed_t = 38400; + +pub const SIGTRAP: c_int = 5; + +pub const GLOB_APPEND: c_int = 0x0001; +pub const GLOB_DOOFFS: c_int = 0x0002; +pub const GLOB_ERR: c_int = 0x0004; +pub const GLOB_MARK: c_int = 0x0008; +pub const GLOB_NOCHECK: c_int = 0x0010; +pub const GLOB_NOSORT: c_int = 0x0020; +pub const GLOB_NOESCAPE: c_int = 0x2000; + +pub const GLOB_NOSPACE: c_int = -1; +pub const GLOB_ABORTED: c_int = -2; +pub const GLOB_NOMATCH: c_int = -3; + +pub const POSIX_MADV_NORMAL: c_int = 0; +pub const POSIX_MADV_RANDOM: c_int = 1; +pub const POSIX_MADV_SEQUENTIAL: c_int = 2; +pub const POSIX_MADV_WILLNEED: c_int = 3; +pub const POSIX_MADV_DONTNEED: c_int = 4; + +pub const _SC_IOV_MAX: c_int = 56; +pub const _SC_GETGR_R_SIZE_MAX: c_int = 70; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 71; +pub const _SC_LOGIN_NAME_MAX: c_int = 73; +pub const _SC_MQ_PRIO_MAX: c_int = 75; +pub const _SC_THREAD_ATTR_STACKADDR: c_int = 82; +pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 83; +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 85; +pub const _SC_THREAD_KEYS_MAX: c_int = 86; +pub const _SC_THREAD_PRIO_INHERIT: c_int = 87; +pub const _SC_THREAD_PRIO_PROTECT: c_int = 88; +pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 89; +pub const _SC_THREAD_PROCESS_SHARED: c_int = 90; +pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 91; +pub const _SC_THREAD_STACK_MIN: c_int = 93; +pub const _SC_THREAD_THREADS_MAX: c_int = 94; +pub const _SC_THREADS: c_int = 96; +pub const _SC_TTY_NAME_MAX: c_int = 101; +pub const _SC_ATEXIT_MAX: c_int = 107; +pub const _SC_XOPEN_CRYPT: c_int = 108; +pub const _SC_XOPEN_ENH_I18N: c_int = 109; +pub const _SC_XOPEN_LEGACY: c_int = 110; +pub const _SC_XOPEN_REALTIME: c_int = 111; +pub const _SC_XOPEN_REALTIME_THREADS: c_int = 112; +pub const _SC_XOPEN_SHM: c_int = 113; +pub const _SC_XOPEN_UNIX: c_int = 115; +pub const _SC_XOPEN_VERSION: c_int = 116; +pub const _SC_XOPEN_XCU_VERSION: c_int = 121; +pub const _SC_PHYS_PAGES: c_int = 200; + +pub const PTHREAD_PROCESS_PRIVATE: c_int = 2; +pub const PTHREAD_PROCESS_SHARED: c_int = 1; +pub const PTHREAD_CREATE_JOINABLE: c_int = 1; +pub const PTHREAD_CREATE_DETACHED: c_int = 2; +pub const PTHREAD_INHERIT_SCHED: c_int = 1; +pub const PTHREAD_EXPLICIT_SCHED: c_int = 2; +pub const PTHREAD_CANCEL_ENABLE: c_int = 0x01; +pub const PTHREAD_CANCEL_DISABLE: c_int = 0x00; +pub const PTHREAD_CANCEL_DEFERRED: c_int = 0x02; +pub const PTHREAD_CANCEL_ASYNCHRONOUS: c_int = 0x00; +pub const PTHREAD_CANCELED: *mut c_void = 1 as *mut c_void; +pub const PTHREAD_SCOPE_SYSTEM: c_int = 1; +pub const PTHREAD_SCOPE_PROCESS: c_int = 2; +pub const PTHREAD_PRIO_NONE: c_int = 0; +pub const PTHREAD_PRIO_INHERIT: c_int = 1; +pub const PTHREAD_PRIO_PROTECT: c_int = 2; + +#[cfg(target_arch = "aarch64")] +pub const PTHREAD_STACK_MIN: size_t = 16384; +#[cfg(not(target_arch = "aarch64"))] +pub const PTHREAD_STACK_MIN: size_t = 8192; + +pub const RLIMIT_CPU: c_int = 0; +pub const RLIMIT_FSIZE: c_int = 1; +pub const RLIMIT_DATA: c_int = 2; +pub const RLIMIT_STACK: c_int = 3; +pub const RLIMIT_CORE: c_int = 4; +pub const RLIMIT_AS: c_int = 5; +pub const RLIMIT_RSS: c_int = RLIMIT_AS; +pub const RLIMIT_MEMLOCK: c_int = 6; +pub const RLIMIT_NPROC: c_int = 7; +pub const RLIMIT_NOFILE: c_int = 8; +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIM_NLIMITS: c_int = 9; +pub const _RLIMIT_POSIX_FLAG: c_int = 0x1000; + +pub const RLIM_INFINITY: rlim_t = 0x7fff_ffff_ffff_ffff; + +pub const RUSAGE_SELF: c_int = 0; +pub const RUSAGE_CHILDREN: c_int = -1; + +pub const MADV_NORMAL: c_int = 0; +pub const MADV_RANDOM: c_int = 1; +pub const MADV_SEQUENTIAL: c_int = 2; +pub const MADV_WILLNEED: c_int = 3; +pub const MADV_DONTNEED: c_int = 4; +pub const MADV_FREE: c_int = 5; +pub const MADV_ZERO_WIRED_PAGES: c_int = 6; +pub const MADV_FREE_REUSABLE: c_int = 7; +pub const MADV_FREE_REUSE: c_int = 8; +pub const MADV_CAN_REUSE: c_int = 9; + +pub const MINCORE_INCORE: c_int = 0x1; +pub const MINCORE_REFERENCED: c_int = 0x2; +pub const MINCORE_MODIFIED: c_int = 0x4; +pub const MINCORE_REFERENCED_OTHER: c_int = 0x8; +pub const MINCORE_MODIFIED_OTHER: c_int = 0x10; + +pub const CTLIOCGINFO: c_ulong = 0xc0644e03; + +// +// sys/netinet/in.h +// Protocols (RFC 1700) +// NOTE: These are in addition to the constants defined in src/unix/mod.rs + +// IPPROTO_IP defined in src/unix/mod.rs +/// IP6 hop-by-hop options +pub const IPPROTO_HOPOPTS: c_int = 0; +// IPPROTO_ICMP defined in src/unix/mod.rs +/// group mgmt protocol +pub const IPPROTO_IGMP: c_int = 2; +/// gateway2 (deprecated) +pub const IPPROTO_GGP: c_int = 3; +/// for compatibility +pub const IPPROTO_IPIP: c_int = 4; +// IPPROTO_TCP defined in src/unix/mod.rs +/// Stream protocol II. +pub const IPPROTO_ST: c_int = 7; +/// exterior gateway protocol +pub const IPPROTO_EGP: c_int = 8; +/// private interior gateway +pub const IPPROTO_PIGP: c_int = 9; +/// BBN RCC Monitoring +pub const IPPROTO_RCCMON: c_int = 10; +/// network voice protocol +pub const IPPROTO_NVPII: c_int = 11; +/// pup +pub const IPPROTO_PUP: c_int = 12; +/// Argus +pub const IPPROTO_ARGUS: c_int = 13; +/// EMCON +pub const IPPROTO_EMCON: c_int = 14; +/// Cross Net Debugger +pub const IPPROTO_XNET: c_int = 15; +/// Chaos +pub const IPPROTO_CHAOS: c_int = 16; +// IPPROTO_UDP defined in src/unix/mod.rs +/// Multiplexing +pub const IPPROTO_MUX: c_int = 18; +/// DCN Measurement Subsystems +pub const IPPROTO_MEAS: c_int = 19; +/// Host Monitoring +pub const IPPROTO_HMP: c_int = 20; +/// Packet Radio Measurement +pub const IPPROTO_PRM: c_int = 21; +/// xns idp +pub const IPPROTO_IDP: c_int = 22; +/// Trunk-1 +pub const IPPROTO_TRUNK1: c_int = 23; +/// Trunk-2 +pub const IPPROTO_TRUNK2: c_int = 24; +/// Leaf-1 +pub const IPPROTO_LEAF1: c_int = 25; +/// Leaf-2 +pub const IPPROTO_LEAF2: c_int = 26; +/// Reliable Data +pub const IPPROTO_RDP: c_int = 27; +/// Reliable Transaction +pub const IPPROTO_IRTP: c_int = 28; +/// tp-4 w/ class negotiation +pub const IPPROTO_TP: c_int = 29; +/// Bulk Data Transfer +pub const IPPROTO_BLT: c_int = 30; +/// Network Services +pub const IPPROTO_NSP: c_int = 31; +/// Merit Internodal +pub const IPPROTO_INP: c_int = 32; +/// Sequential Exchange +pub const IPPROTO_SEP: c_int = 33; +/// Third Party Connect +pub const IPPROTO_3PC: c_int = 34; +/// InterDomain Policy Routing +pub const IPPROTO_IDPR: c_int = 35; +/// XTP +pub const IPPROTO_XTP: c_int = 36; +/// Datagram Delivery +pub const IPPROTO_DDP: c_int = 37; +/// Control Message Transport +pub const IPPROTO_CMTP: c_int = 38; +/// TP++ Transport +pub const IPPROTO_TPXX: c_int = 39; +/// IL transport protocol +pub const IPPROTO_IL: c_int = 40; +// IPPROTO_IPV6 defined in src/unix/mod.rs +/// Source Demand Routing +pub const IPPROTO_SDRP: c_int = 42; +/// IP6 routing header +pub const IPPROTO_ROUTING: c_int = 43; +/// IP6 fragmentation header +pub const IPPROTO_FRAGMENT: c_int = 44; +/// InterDomain Routing +pub const IPPROTO_IDRP: c_int = 45; +/// resource reservation +pub const IPPROTO_RSVP: c_int = 46; +/// General Routing Encap. +pub const IPPROTO_GRE: c_int = 47; +/// Mobile Host Routing +pub const IPPROTO_MHRP: c_int = 48; +/// BHA +pub const IPPROTO_BHA: c_int = 49; +/// IP6 Encap Sec. Payload +pub const IPPROTO_ESP: c_int = 50; +/// IP6 Auth Header +pub const IPPROTO_AH: c_int = 51; +/// Integ. Net Layer Security +pub const IPPROTO_INLSP: c_int = 52; +/// IP with encryption +pub const IPPROTO_SWIPE: c_int = 53; +/// Next Hop Resolution +pub const IPPROTO_NHRP: c_int = 54; +/* 55-57: Unassigned */ +// IPPROTO_ICMPV6 defined in src/unix/mod.rs +/// IP6 no next header +pub const IPPROTO_NONE: c_int = 59; +/// IP6 destination option +pub const IPPROTO_DSTOPTS: c_int = 60; +/// any host internal protocol +pub const IPPROTO_AHIP: c_int = 61; +/// CFTP +pub const IPPROTO_CFTP: c_int = 62; +/// "hello" routing protocol +pub const IPPROTO_HELLO: c_int = 63; +/// SATNET/Backroom EXPAK +pub const IPPROTO_SATEXPAK: c_int = 64; +/// Kryptolan +pub const IPPROTO_KRYPTOLAN: c_int = 65; +/// Remote Virtual Disk +pub const IPPROTO_RVD: c_int = 66; +/// Pluribus Packet Core +pub const IPPROTO_IPPC: c_int = 67; +/// Any distributed FS +pub const IPPROTO_ADFS: c_int = 68; +/// Satnet Monitoring +pub const IPPROTO_SATMON: c_int = 69; +/// VISA Protocol +pub const IPPROTO_VISA: c_int = 70; +/// Packet Core Utility +pub const IPPROTO_IPCV: c_int = 71; +/// Comp. Prot. Net. Executive +pub const IPPROTO_CPNX: c_int = 72; +/// Comp. Prot. HeartBeat +pub const IPPROTO_CPHB: c_int = 73; +/// Wang Span Network +pub const IPPROTO_WSN: c_int = 74; +/// Packet Video Protocol +pub const IPPROTO_PVP: c_int = 75; +/// BackRoom SATNET Monitoring +pub const IPPROTO_BRSATMON: c_int = 76; +/// Sun net disk proto (temp.) +pub const IPPROTO_ND: c_int = 77; +/// WIDEBAND Monitoring +pub const IPPROTO_WBMON: c_int = 78; +/// WIDEBAND EXPAK +pub const IPPROTO_WBEXPAK: c_int = 79; +/// ISO cnlp +pub const IPPROTO_EON: c_int = 80; +/// VMTP +pub const IPPROTO_VMTP: c_int = 81; +/// Secure VMTP +pub const IPPROTO_SVMTP: c_int = 82; +/// Banyon VINES +pub const IPPROTO_VINES: c_int = 83; +/// TTP +pub const IPPROTO_TTP: c_int = 84; +/// NSFNET-IGP +pub const IPPROTO_IGP: c_int = 85; +/// dissimilar gateway prot. +pub const IPPROTO_DGP: c_int = 86; +/// TCF +pub const IPPROTO_TCF: c_int = 87; +/// Cisco/GXS IGRP +pub const IPPROTO_IGRP: c_int = 88; +/// OSPFIGP +pub const IPPROTO_OSPFIGP: c_int = 89; +/// Strite RPC protocol +pub const IPPROTO_SRPC: c_int = 90; +/// Locus Address Resoloution +pub const IPPROTO_LARP: c_int = 91; +/// Multicast Transport +pub const IPPROTO_MTP: c_int = 92; +/// AX.25 Frames +pub const IPPROTO_AX25: c_int = 93; +/// IP encapsulated in IP +pub const IPPROTO_IPEIP: c_int = 94; +/// Mobile Int.ing control +pub const IPPROTO_MICP: c_int = 95; +/// Semaphore Comm. security +pub const IPPROTO_SCCSP: c_int = 96; +/// Ethernet IP encapsulation +pub const IPPROTO_ETHERIP: c_int = 97; +/// encapsulation header +pub const IPPROTO_ENCAP: c_int = 98; +/// any private encr. scheme +pub const IPPROTO_APES: c_int = 99; +/// GMTP +pub const IPPROTO_GMTP: c_int = 100; + +/* 101-254: Partly Unassigned */ +/// Protocol Independent Mcast +pub const IPPROTO_PIM: c_int = 103; +/// payload compression (IPComp) +pub const IPPROTO_IPCOMP: c_int = 108; +/// PGM +pub const IPPROTO_PGM: c_int = 113; +/// SCTP +pub const IPPROTO_SCTP: c_int = 132; + +/* 255: Reserved */ +/* BSD Private, local use, namespace incursion */ +/// divert pseudo-protocol +pub const IPPROTO_DIVERT: c_int = 254; +/// raw IP packet +pub const IPPROTO_RAW: c_int = 255; +pub const IPPROTO_MAX: c_int = 256; +/// last return value of *_input(), meaning "all job for this pkt is done". +pub const IPPROTO_DONE: c_int = 257; + +pub const AF_UNSPEC: c_int = 0; +pub const AF_LOCAL: c_int = 1; +pub const AF_UNIX: c_int = AF_LOCAL; +pub const AF_INET: c_int = 2; +pub const AF_IMPLINK: c_int = 3; +pub const AF_PUP: c_int = 4; +pub const AF_CHAOS: c_int = 5; +pub const AF_NS: c_int = 6; +pub const AF_ISO: c_int = 7; +pub const AF_OSI: c_int = AF_ISO; +pub const AF_ECMA: c_int = 8; +pub const AF_DATAKIT: c_int = 9; +pub const AF_CCITT: c_int = 10; +pub const AF_SNA: c_int = 11; +pub const AF_DECnet: c_int = 12; +pub const AF_DLI: c_int = 13; +pub const AF_LAT: c_int = 14; +pub const AF_HYLINK: c_int = 15; +pub const AF_APPLETALK: c_int = 16; +pub const AF_ROUTE: c_int = 17; +pub const AF_LINK: c_int = 18; +pub const pseudo_AF_XTP: c_int = 19; +pub const AF_COIP: c_int = 20; +pub const AF_CNT: c_int = 21; +pub const pseudo_AF_RTIP: c_int = 22; +pub const AF_IPX: c_int = 23; +pub const AF_SIP: c_int = 24; +pub const pseudo_AF_PIP: c_int = 25; +pub const AF_NDRV: c_int = 27; +pub const AF_ISDN: c_int = 28; +pub const AF_E164: c_int = AF_ISDN; +pub const pseudo_AF_KEY: c_int = 29; +pub const AF_INET6: c_int = 30; +pub const AF_NATM: c_int = 31; +pub const AF_SYSTEM: c_int = 32; +pub const AF_NETBIOS: c_int = 33; +pub const AF_PPP: c_int = 34; +pub const pseudo_AF_HDRCMPLT: c_int = 35; +pub const AF_IEEE80211: c_int = 37; +pub const AF_UTUN: c_int = 38; +pub const AF_VSOCK: c_int = 40; +pub const AF_SYS_CONTROL: c_int = 2; + +pub const SYSPROTO_EVENT: c_int = 1; +pub const SYSPROTO_CONTROL: c_int = 2; + +pub const PF_UNSPEC: c_int = AF_UNSPEC; +pub const PF_LOCAL: c_int = AF_LOCAL; +pub const PF_UNIX: c_int = PF_LOCAL; +pub const PF_INET: c_int = AF_INET; +pub const PF_IMPLINK: c_int = AF_IMPLINK; +pub const PF_PUP: c_int = AF_PUP; +pub const PF_CHAOS: c_int = AF_CHAOS; +pub const PF_NS: c_int = AF_NS; +pub const PF_ISO: c_int = AF_ISO; +pub const PF_OSI: c_int = AF_ISO; +pub const PF_ECMA: c_int = AF_ECMA; +pub const PF_DATAKIT: c_int = AF_DATAKIT; +pub const PF_CCITT: c_int = AF_CCITT; +pub const PF_SNA: c_int = AF_SNA; +pub const PF_DECnet: c_int = AF_DECnet; +pub const PF_DLI: c_int = AF_DLI; +pub const PF_LAT: c_int = AF_LAT; +pub const PF_HYLINK: c_int = AF_HYLINK; +pub const PF_APPLETALK: c_int = AF_APPLETALK; +pub const PF_ROUTE: c_int = AF_ROUTE; +pub const PF_LINK: c_int = AF_LINK; +pub const PF_XTP: c_int = pseudo_AF_XTP; +pub const PF_COIP: c_int = AF_COIP; +pub const PF_CNT: c_int = AF_CNT; +pub const PF_SIP: c_int = AF_SIP; +pub const PF_IPX: c_int = AF_IPX; +pub const PF_RTIP: c_int = pseudo_AF_RTIP; +pub const PF_PIP: c_int = pseudo_AF_PIP; +pub const PF_NDRV: c_int = AF_NDRV; +pub const PF_ISDN: c_int = AF_ISDN; +pub const PF_KEY: c_int = pseudo_AF_KEY; +pub const PF_INET6: c_int = AF_INET6; +pub const PF_NATM: c_int = AF_NATM; +pub const PF_SYSTEM: c_int = AF_SYSTEM; +pub const PF_NETBIOS: c_int = AF_NETBIOS; +pub const PF_PPP: c_int = AF_PPP; +pub const PF_VSOCK: c_int = AF_VSOCK; + +pub const NET_RT_DUMP: c_int = 1; +pub const NET_RT_FLAGS: c_int = 2; +pub const NET_RT_IFLIST: c_int = 3; + +pub const SOMAXCONN: c_int = 128; + +pub const SOCK_MAXADDRLEN: c_int = 255; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SOCK_RAW: c_int = 3; +pub const SOCK_RDM: c_int = 4; +pub const SOCK_SEQPACKET: c_int = 5; +pub const IP_TTL: c_int = 4; +pub const IP_HDRINCL: c_int = 2; +pub const IP_RECVDSTADDR: c_int = 7; +pub const IP_ADD_MEMBERSHIP: c_int = 12; +pub const IP_DROP_MEMBERSHIP: c_int = 13; +pub const IP_RECVIF: c_int = 20; +pub const IP_RECVTTL: c_int = 24; +pub const IP_BOUND_IF: c_int = 25; +pub const IP_PKTINFO: c_int = 26; +pub const IP_RECVTOS: c_int = 27; +pub const IP_DONTFRAG: c_int = 28; +pub const IPV6_JOIN_GROUP: c_int = 12; +pub const IPV6_LEAVE_GROUP: c_int = 13; +pub const IPV6_CHECKSUM: c_int = 26; +pub const IPV6_RECVTCLASS: c_int = 35; +pub const IPV6_TCLASS: c_int = 36; +pub const IPV6_RECVHOPLIMIT: c_int = 37; +pub const IPV6_PKTINFO: c_int = 46; +pub const IPV6_HOPLIMIT: c_int = 47; +pub const IPV6_RECVPKTINFO: c_int = 61; +pub const IP_ADD_SOURCE_MEMBERSHIP: c_int = 70; +pub const IP_DROP_SOURCE_MEMBERSHIP: c_int = 71; +pub const IP_BLOCK_SOURCE: c_int = 72; +pub const IP_UNBLOCK_SOURCE: c_int = 73; +pub const IPV6_BOUND_IF: c_int = 125; + +pub const TCP_NOPUSH: c_int = 4; +pub const TCP_NOOPT: c_int = 8; +pub const TCP_KEEPALIVE: c_int = 0x10; +pub const TCP_KEEPINTVL: c_int = 0x101; +pub const TCP_KEEPCNT: c_int = 0x102; +/// Enable/Disable TCP Fastopen on this socket +pub const TCP_FASTOPEN: c_int = 0x105; +pub const TCP_CONNECTION_INFO: c_int = 0x106; + +pub const SOL_LOCAL: c_int = 0; + +/// Retrieve peer credentials. +pub const LOCAL_PEERCRED: c_int = 0x001; +/// Retrieve peer PID. +pub const LOCAL_PEERPID: c_int = 0x002; +/// Retrieve effective peer PID. +pub const LOCAL_PEEREPID: c_int = 0x003; +/// Retrieve peer UUID. +pub const LOCAL_PEERUUID: c_int = 0x004; +/// Retrieve effective peer UUID. +pub const LOCAL_PEEREUUID: c_int = 0x005; +/// Retrieve peer audit token. +pub const LOCAL_PEERTOKEN: c_int = 0x006; + +pub const SOL_SOCKET: c_int = 0xffff; + +pub const SO_DEBUG: c_int = 0x01; +pub const SO_ACCEPTCONN: c_int = 0x0002; +pub const SO_REUSEADDR: c_int = 0x0004; +pub const SO_KEEPALIVE: c_int = 0x0008; +pub const SO_DONTROUTE: c_int = 0x0010; +pub const SO_BROADCAST: c_int = 0x0020; +pub const SO_USELOOPBACK: c_int = 0x0040; +pub const SO_LINGER: c_int = 0x0080; +pub const SO_OOBINLINE: c_int = 0x0100; +pub const SO_REUSEPORT: c_int = 0x0200; +pub const SO_TIMESTAMP: c_int = 0x0400; +pub const SO_TIMESTAMP_MONOTONIC: c_int = 0x0800; +pub const SO_DONTTRUNC: c_int = 0x2000; +pub const SO_WANTMORE: c_int = 0x4000; +pub const SO_WANTOOBFLAG: c_int = 0x8000; +pub const SO_SNDBUF: c_int = 0x1001; +pub const SO_RCVBUF: c_int = 0x1002; +pub const SO_SNDLOWAT: c_int = 0x1003; +pub const SO_RCVLOWAT: c_int = 0x1004; +pub const SO_SNDTIMEO: c_int = 0x1005; +pub const SO_RCVTIMEO: c_int = 0x1006; +pub const SO_ERROR: c_int = 0x1007; +pub const SO_TYPE: c_int = 0x1008; +pub const SO_LABEL: c_int = 0x1010; +pub const SO_PEERLABEL: c_int = 0x1011; +pub const SO_NREAD: c_int = 0x1020; +pub const SO_NKE: c_int = 0x1021; +pub const SO_NOSIGPIPE: c_int = 0x1022; +pub const SO_NOADDRERR: c_int = 0x1023; +pub const SO_NWRITE: c_int = 0x1024; +pub const SO_REUSESHAREUID: c_int = 0x1025; +pub const SO_NOTIFYCONFLICT: c_int = 0x1026; +pub const SO_LINGER_SEC: c_int = 0x1080; +pub const SO_RANDOMPORT: c_int = 0x1082; +pub const SO_NP_EXTENSIONS: c_int = 0x1083; + +pub const MSG_OOB: c_int = 0x1; +pub const MSG_PEEK: c_int = 0x2; +pub const MSG_DONTROUTE: c_int = 0x4; +pub const MSG_EOR: c_int = 0x8; +pub const MSG_TRUNC: c_int = 0x10; +pub const MSG_CTRUNC: c_int = 0x20; +pub const MSG_WAITALL: c_int = 0x40; +pub const MSG_DONTWAIT: c_int = 0x80; +pub const MSG_EOF: c_int = 0x100; +pub const MSG_FLUSH: c_int = 0x400; +pub const MSG_HOLD: c_int = 0x800; +pub const MSG_SEND: c_int = 0x1000; +pub const MSG_HAVEMORE: c_int = 0x2000; +pub const MSG_RCVMORE: c_int = 0x4000; +pub const MSG_NEEDSA: c_int = 0x10000; +pub const MSG_NOSIGNAL: c_int = 0x80000; + +pub const SCM_TIMESTAMP: c_int = 0x02; +pub const SCM_CREDS: c_int = 0x03; + +// https://github.com/aosm/xnu/blob/HEAD/bsd/net/if.h#L140-L156 +pub const IFF_UP: c_int = 0x1; // interface is up +pub const IFF_BROADCAST: c_int = 0x2; // broadcast address valid +pub const IFF_DEBUG: c_int = 0x4; // turn on debugging +pub const IFF_LOOPBACK: c_int = 0x8; // is a loopback net +pub const IFF_POINTOPOINT: c_int = 0x10; // interface is point-to-point link +pub const IFF_NOTRAILERS: c_int = 0x20; // obsolete: avoid use of trailers +pub const IFF_RUNNING: c_int = 0x40; // resources allocated +pub const IFF_NOARP: c_int = 0x80; // no address resolution protocol +pub const IFF_PROMISC: c_int = 0x100; // receive all packets +pub const IFF_ALLMULTI: c_int = 0x200; // receive all multicast packets +pub const IFF_OACTIVE: c_int = 0x400; // transmission in progress +pub const IFF_SIMPLEX: c_int = 0x800; // can't hear own transmissions +pub const IFF_LINK0: c_int = 0x1000; // per link layer defined bit +pub const IFF_LINK1: c_int = 0x2000; // per link layer defined bit +pub const IFF_LINK2: c_int = 0x4000; // per link layer defined bit +pub const IFF_ALTPHYS: c_int = IFF_LINK2; // use alternate physical connection +pub const IFF_MULTICAST: c_int = 0x8000; // supports multicast + +pub const SCOPE6_ID_MAX: size_t = 16; + +pub const SHUT_RD: c_int = 0; +pub const SHUT_WR: c_int = 1; +pub const SHUT_RDWR: c_int = 2; + +pub const SAE_ASSOCID_ANY: crate::sae_associd_t = 0; +/// ((sae_associd_t)(-1ULL)) +pub const SAE_ASSOCID_ALL: crate::sae_associd_t = 0xffffffff; + +pub const SAE_CONNID_ANY: crate::sae_connid_t = 0; +/// ((sae_connid_t)(-1ULL)) +pub const SAE_CONNID_ALL: crate::sae_connid_t = 0xffffffff; + +// connectx() flag parameters + +/// resume connect() on read/write +pub const CONNECT_RESUME_ON_READ_WRITE: c_uint = 0x1; +/// data is idempotent +pub const CONNECT_DATA_IDEMPOTENT: c_uint = 0x2; +/// data includes security that replaces the TFO-cookie +pub const CONNECT_DATA_AUTHENTICATED: c_uint = 0x4; + +pub const LOCK_SH: c_int = 1; +pub const LOCK_EX: c_int = 2; +pub const LOCK_NB: c_int = 4; +pub const LOCK_UN: c_int = 8; + +pub const MAP_COPY: c_int = 0x0002; +pub const MAP_RENAME: c_int = 0x0020; +pub const MAP_NORESERVE: c_int = 0x0040; +pub const MAP_NOEXTEND: c_int = 0x0100; +pub const MAP_HASSEMAPHORE: c_int = 0x0200; +pub const MAP_NOCACHE: c_int = 0x0400; +pub const MAP_JIT: c_int = 0x0800; + +pub const _SC_ARG_MAX: c_int = 1; +pub const _SC_CHILD_MAX: c_int = 2; +pub const _SC_CLK_TCK: c_int = 3; +pub const _SC_NGROUPS_MAX: c_int = 4; +pub const _SC_OPEN_MAX: c_int = 5; +pub const _SC_JOB_CONTROL: c_int = 6; +pub const _SC_SAVED_IDS: c_int = 7; +pub const _SC_VERSION: c_int = 8; +pub const _SC_BC_BASE_MAX: c_int = 9; +pub const _SC_BC_DIM_MAX: c_int = 10; +pub const _SC_BC_SCALE_MAX: c_int = 11; +pub const _SC_BC_STRING_MAX: c_int = 12; +pub const _SC_COLL_WEIGHTS_MAX: c_int = 13; +pub const _SC_EXPR_NEST_MAX: c_int = 14; +pub const _SC_LINE_MAX: c_int = 15; +pub const _SC_RE_DUP_MAX: c_int = 16; +pub const _SC_2_VERSION: c_int = 17; +pub const _SC_2_C_BIND: c_int = 18; +pub const _SC_2_C_DEV: c_int = 19; +pub const _SC_2_CHAR_TERM: c_int = 20; +pub const _SC_2_FORT_DEV: c_int = 21; +pub const _SC_2_FORT_RUN: c_int = 22; +pub const _SC_2_LOCALEDEF: c_int = 23; +pub const _SC_2_SW_DEV: c_int = 24; +pub const _SC_2_UPE: c_int = 25; +pub const _SC_STREAM_MAX: c_int = 26; +pub const _SC_TZNAME_MAX: c_int = 27; +pub const _SC_ASYNCHRONOUS_IO: c_int = 28; +pub const _SC_PAGESIZE: c_int = 29; +pub const _SC_MEMLOCK: c_int = 30; +pub const _SC_MEMLOCK_RANGE: c_int = 31; +pub const _SC_MEMORY_PROTECTION: c_int = 32; +pub const _SC_MESSAGE_PASSING: c_int = 33; +pub const _SC_PRIORITIZED_IO: c_int = 34; +pub const _SC_PRIORITY_SCHEDULING: c_int = 35; +pub const _SC_REALTIME_SIGNALS: c_int = 36; +pub const _SC_SEMAPHORES: c_int = 37; +pub const _SC_FSYNC: c_int = 38; +pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 39; +pub const _SC_SYNCHRONIZED_IO: c_int = 40; +pub const _SC_TIMERS: c_int = 41; +pub const _SC_AIO_LISTIO_MAX: c_int = 42; +pub const _SC_AIO_MAX: c_int = 43; +pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 44; +pub const _SC_DELAYTIMER_MAX: c_int = 45; +pub const _SC_MQ_OPEN_MAX: c_int = 46; +pub const _SC_MAPPED_FILES: c_int = 47; +pub const _SC_RTSIG_MAX: c_int = 48; +pub const _SC_SEM_NSEMS_MAX: c_int = 49; +pub const _SC_SEM_VALUE_MAX: c_int = 50; +pub const _SC_SIGQUEUE_MAX: c_int = 51; +pub const _SC_TIMER_MAX: c_int = 52; +pub const _SC_NPROCESSORS_CONF: c_int = 57; +pub const _SC_NPROCESSORS_ONLN: c_int = 58; +pub const _SC_2_PBS: c_int = 59; +pub const _SC_2_PBS_ACCOUNTING: c_int = 60; +pub const _SC_2_PBS_CHECKPOINT: c_int = 61; +pub const _SC_2_PBS_LOCATE: c_int = 62; +pub const _SC_2_PBS_MESSAGE: c_int = 63; +pub const _SC_2_PBS_TRACK: c_int = 64; +pub const _SC_ADVISORY_INFO: c_int = 65; +pub const _SC_BARRIERS: c_int = 66; +pub const _SC_CLOCK_SELECTION: c_int = 67; +pub const _SC_CPUTIME: c_int = 68; +pub const _SC_FILE_LOCKING: c_int = 69; +pub const _SC_HOST_NAME_MAX: c_int = 72; +pub const _SC_MONOTONIC_CLOCK: c_int = 74; +pub const _SC_READER_WRITER_LOCKS: c_int = 76; +pub const _SC_REGEXP: c_int = 77; +pub const _SC_SHELL: c_int = 78; +pub const _SC_SPAWN: c_int = 79; +pub const _SC_SPIN_LOCKS: c_int = 80; +pub const _SC_SPORADIC_SERVER: c_int = 81; +pub const _SC_THREAD_CPUTIME: c_int = 84; +pub const _SC_THREAD_SPORADIC_SERVER: c_int = 92; +pub const _SC_TIMEOUTS: c_int = 95; +pub const _SC_TRACE: c_int = 97; +pub const _SC_TRACE_EVENT_FILTER: c_int = 98; +pub const _SC_TRACE_INHERIT: c_int = 99; +pub const _SC_TRACE_LOG: c_int = 100; +pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 102; +pub const _SC_V6_ILP32_OFF32: c_int = 103; +pub const _SC_V6_ILP32_OFFBIG: c_int = 104; +pub const _SC_V6_LP64_OFF64: c_int = 105; +pub const _SC_V6_LPBIG_OFFBIG: c_int = 106; +pub const _SC_IPV6: c_int = 118; +pub const _SC_RAW_SOCKETS: c_int = 119; +pub const _SC_SYMLOOP_MAX: c_int = 120; +pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; +pub const _SC_XOPEN_STREAMS: c_int = 114; +pub const _SC_XBS5_ILP32_OFF32: c_int = 122; +pub const _SC_XBS5_ILP32_OFFBIG: c_int = 123; +pub const _SC_XBS5_LP64_OFF64: c_int = 124; +pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 125; +pub const _SC_SS_REPL_MAX: c_int = 126; +pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 127; +pub const _SC_TRACE_NAME_MAX: c_int = 128; +pub const _SC_TRACE_SYS_MAX: c_int = 129; +pub const _SC_TRACE_USER_EVENT_MAX: c_int = 130; +pub const _SC_PASS_MAX: c_int = 131; +// `confstr` keys (only the values guaranteed by `man confstr`). +pub const _CS_PATH: c_int = 1; +pub const _CS_DARWIN_USER_DIR: c_int = 65536; +pub const _CS_DARWIN_USER_TEMP_DIR: c_int = 65537; +pub const _CS_DARWIN_USER_CACHE_DIR: c_int = 65538; + +pub const PTHREAD_MUTEX_NORMAL: c_int = 0; +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 1; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 2; +pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; +pub const _PTHREAD_MUTEX_SIG_init: c_long = 0x32AAABA7; +pub const _PTHREAD_COND_SIG_init: c_long = 0x3CB0B1BB; +pub const _PTHREAD_RWLOCK_SIG_init: c_long = 0x2DA8B3B4; +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + __sig: _PTHREAD_MUTEX_SIG_init, + __opaque: [0; __PTHREAD_MUTEX_SIZE__], +}; +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + __sig: _PTHREAD_COND_SIG_init, + __opaque: [0; __PTHREAD_COND_SIZE__], +}; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + __sig: _PTHREAD_RWLOCK_SIG_init, + __opaque: [0; __PTHREAD_RWLOCK_SIZE__], +}; + +pub const OS_UNFAIR_LOCK_INIT: os_unfair_lock = os_unfair_lock { + _os_unfair_lock_opaque: 0, +}; + +pub const OS_LOG_TYPE_DEFAULT: crate::os_log_type_t = 0x00; +pub const OS_LOG_TYPE_INFO: crate::os_log_type_t = 0x01; +pub const OS_LOG_TYPE_DEBUG: crate::os_log_type_t = 0x02; +pub const OS_LOG_TYPE_ERROR: crate::os_log_type_t = 0x10; +pub const OS_LOG_TYPE_FAULT: crate::os_log_type_t = 0x11; + +pub const OS_SIGNPOST_EVENT: crate::os_signpost_type_t = 0x00; +pub const OS_SIGNPOST_INTERVAL_BEGIN: crate::os_signpost_type_t = 0x01; +pub const OS_SIGNPOST_INTERVAL_END: crate::os_signpost_type_t = 0x02; + +pub const MINSIGSTKSZ: size_t = 32768; +pub const SIGSTKSZ: size_t = 131072; + +pub const FD_SETSIZE: usize = 1024; + +pub const ST_NOSUID: c_ulong = 2; + +pub const SCHED_OTHER: c_int = 1; +pub const SCHED_FIFO: c_int = 4; +pub const SCHED_RR: c_int = 2; + +pub const EVFILT_READ: i16 = -1; +pub const EVFILT_WRITE: i16 = -2; +pub const EVFILT_AIO: i16 = -3; +pub const EVFILT_VNODE: i16 = -4; +pub const EVFILT_PROC: i16 = -5; +pub const EVFILT_SIGNAL: i16 = -6; +pub const EVFILT_TIMER: i16 = -7; +pub const EVFILT_MACHPORT: i16 = -8; +pub const EVFILT_FS: i16 = -9; +pub const EVFILT_USER: i16 = -10; +pub const EVFILT_VM: i16 = -12; + +pub const EV_ADD: u16 = 0x1; +pub const EV_DELETE: u16 = 0x2; +pub const EV_ENABLE: u16 = 0x4; +pub const EV_DISABLE: u16 = 0x8; +pub const EV_ONESHOT: u16 = 0x10; +pub const EV_CLEAR: u16 = 0x20; +pub const EV_RECEIPT: u16 = 0x40; +pub const EV_DISPATCH: u16 = 0x80; +pub const EV_FLAG0: u16 = 0x1000; +pub const EV_POLL: u16 = 0x1000; +pub const EV_FLAG1: u16 = 0x2000; +pub const EV_OOBAND: u16 = 0x2000; +pub const EV_ERROR: u16 = 0x4000; +pub const EV_EOF: u16 = 0x8000; +pub const EV_SYSFLAGS: u16 = 0xf000; + +pub const NOTE_TRIGGER: u32 = 0x01000000; +pub const NOTE_FFNOP: u32 = 0x00000000; +pub const NOTE_FFAND: u32 = 0x40000000; +pub const NOTE_FFOR: u32 = 0x80000000; +pub const NOTE_FFCOPY: u32 = 0xc0000000; +pub const NOTE_FFCTRLMASK: u32 = 0xc0000000; +pub const NOTE_FFLAGSMASK: u32 = 0x00ffffff; +pub const NOTE_LOWAT: u32 = 0x00000001; +pub const NOTE_DELETE: u32 = 0x00000001; +pub const NOTE_WRITE: u32 = 0x00000002; +pub const NOTE_EXTEND: u32 = 0x00000004; +pub const NOTE_ATTRIB: u32 = 0x00000008; +pub const NOTE_LINK: u32 = 0x00000010; +pub const NOTE_RENAME: u32 = 0x00000020; +pub const NOTE_REVOKE: u32 = 0x00000040; +pub const NOTE_NONE: u32 = 0x00000080; +pub const NOTE_EXIT: u32 = 0x80000000; +pub const NOTE_FORK: u32 = 0x40000000; +pub const NOTE_EXEC: u32 = 0x20000000; +#[doc(hidden)] +#[deprecated(since = "0.2.49", note = "Deprecated since MacOSX 10.9")] +pub const NOTE_REAP: u32 = 0x10000000; +pub const NOTE_SIGNAL: u32 = 0x08000000; +pub const NOTE_EXITSTATUS: u32 = 0x04000000; +pub const NOTE_EXIT_DETAIL: u32 = 0x02000000; +pub const NOTE_PDATAMASK: u32 = 0x000fffff; +pub const NOTE_PCTRLMASK: u32 = 0xfff00000; +#[doc(hidden)] +#[deprecated(since = "0.2.49", note = "Deprecated since MacOSX 10.9")] +pub const NOTE_EXIT_REPARENTED: u32 = 0x00080000; +pub const NOTE_EXIT_DETAIL_MASK: u32 = 0x00070000; +pub const NOTE_EXIT_DECRYPTFAIL: u32 = 0x00010000; +pub const NOTE_EXIT_MEMORY: u32 = 0x00020000; +pub const NOTE_EXIT_CSERROR: u32 = 0x00040000; +pub const NOTE_VM_PRESSURE: u32 = 0x80000000; +pub const NOTE_VM_PRESSURE_TERMINATE: u32 = 0x40000000; +pub const NOTE_VM_PRESSURE_SUDDEN_TERMINATE: u32 = 0x20000000; +pub const NOTE_VM_ERROR: u32 = 0x10000000; +pub const NOTE_SECONDS: u32 = 0x00000001; +pub const NOTE_USECONDS: u32 = 0x00000002; +pub const NOTE_NSECONDS: u32 = 0x00000004; +pub const NOTE_ABSOLUTE: u32 = 0x00000008; +pub const NOTE_LEEWAY: u32 = 0x00000010; +pub const NOTE_CRITICAL: u32 = 0x00000020; +pub const NOTE_BACKGROUND: u32 = 0x00000040; +pub const NOTE_MACH_CONTINUOUS_TIME: u32 = 0x00000080; +pub const NOTE_MACHTIME: u32 = 0x00000100; +pub const NOTE_TRACK: u32 = 0x00000001; +pub const NOTE_TRACKERR: u32 = 0x00000002; +pub const NOTE_CHILD: u32 = 0x00000004; + +pub const OCRNL: crate::tcflag_t = 0x00000010; +pub const ONOCR: crate::tcflag_t = 0x00000020; +pub const ONLRET: crate::tcflag_t = 0x00000040; +pub const OFILL: crate::tcflag_t = 0x00000080; +pub const NLDLY: crate::tcflag_t = 0x00000300; +pub const TABDLY: crate::tcflag_t = 0x00000c04; +pub const CRDLY: crate::tcflag_t = 0x00003000; +pub const FFDLY: crate::tcflag_t = 0x00004000; +pub const BSDLY: crate::tcflag_t = 0x00008000; +pub const VTDLY: crate::tcflag_t = 0x00010000; +pub const OFDEL: crate::tcflag_t = 0x00020000; + +pub const NL0: crate::tcflag_t = 0x00000000; +pub const NL1: crate::tcflag_t = 0x00000100; +pub const TAB0: crate::tcflag_t = 0x00000000; +pub const TAB1: crate::tcflag_t = 0x00000400; +pub const TAB2: crate::tcflag_t = 0x00000800; +pub const CR0: crate::tcflag_t = 0x00000000; +pub const CR1: crate::tcflag_t = 0x00001000; +pub const CR2: crate::tcflag_t = 0x00002000; +pub const CR3: crate::tcflag_t = 0x00003000; +pub const FF0: crate::tcflag_t = 0x00000000; +pub const FF1: crate::tcflag_t = 0x00004000; +pub const BS0: crate::tcflag_t = 0x00000000; +pub const BS1: crate::tcflag_t = 0x00008000; +pub const TAB3: crate::tcflag_t = 0x00000004; +pub const VT0: crate::tcflag_t = 0x00000000; +pub const VT1: crate::tcflag_t = 0x00010000; +pub const IUTF8: crate::tcflag_t = 0x00004000; +pub const CRTSCTS: crate::tcflag_t = 0x00030000; + +pub const NI_MAXHOST: crate::socklen_t = 1025; +pub const NI_MAXSERV: crate::socklen_t = 32; +pub const NI_NOFQDN: c_int = 0x00000001; +pub const NI_NUMERICHOST: c_int = 0x00000002; +pub const NI_NAMEREQD: c_int = 0x00000004; +pub const NI_NUMERICSERV: c_int = 0x00000008; +pub const NI_NUMERICSCOPE: c_int = 0x00000100; +pub const NI_DGRAM: c_int = 0x00000010; + +pub const Q_GETQUOTA: c_int = 0x300; +pub const Q_SETQUOTA: c_int = 0x400; + +pub const RENAME_SWAP: c_uint = 0x00000002; +pub const RENAME_EXCL: c_uint = 0x00000004; + +pub const RTLD_LOCAL: c_int = 0x4; +pub const RTLD_FIRST: c_int = 0x100; +pub const RTLD_NODELETE: c_int = 0x80; +pub const RTLD_NOLOAD: c_int = 0x10; +pub const RTLD_GLOBAL: c_int = 0x8; +pub const RTLD_MAIN_ONLY: *mut c_void = -5isize as *mut c_void; + +pub const _WSTOPPED: c_int = 0o177; + +pub const LOG_NETINFO: c_int = 12 << 3; +pub const LOG_REMOTEAUTH: c_int = 13 << 3; +pub const LOG_INSTALL: c_int = 14 << 3; +pub const LOG_RAS: c_int = 15 << 3; +pub const LOG_LAUNCHD: c_int = 24 << 3; +pub const LOG_NFACILITIES: c_int = 25; + +pub const CTLTYPE: c_int = 0xf; +pub const CTLTYPE_NODE: c_int = 1; +pub const CTLTYPE_INT: c_int = 2; +pub const CTLTYPE_STRING: c_int = 3; +pub const CTLTYPE_QUAD: c_int = 4; +pub const CTLTYPE_OPAQUE: c_int = 5; +pub const CTLTYPE_STRUCT: c_int = CTLTYPE_OPAQUE; +pub const CTLFLAG_RD: c_int = 0x80000000; +pub const CTLFLAG_WR: c_int = 0x40000000; +pub const CTLFLAG_RW: c_int = CTLFLAG_RD | CTLFLAG_WR; +pub const CTLFLAG_NOLOCK: c_int = 0x20000000; +pub const CTLFLAG_ANYBODY: c_int = 0x10000000; +pub const CTLFLAG_SECURE: c_int = 0x08000000; +pub const CTLFLAG_MASKED: c_int = 0x04000000; +pub const CTLFLAG_NOAUTO: c_int = 0x02000000; +pub const CTLFLAG_KERN: c_int = 0x01000000; +pub const CTLFLAG_LOCKED: c_int = 0x00800000; +pub const CTLFLAG_OID2: c_int = 0x00400000; +pub const CTL_UNSPEC: c_int = 0; +pub const CTL_KERN: c_int = 1; +pub const CTL_VM: c_int = 2; +pub const CTL_VFS: c_int = 3; +pub const CTL_NET: c_int = 4; +pub const CTL_DEBUG: c_int = 5; +pub const CTL_HW: c_int = 6; +pub const CTL_MACHDEP: c_int = 7; +pub const CTL_USER: c_int = 8; +pub const CTL_MAXID: c_int = 9; +pub const KERN_OSTYPE: c_int = 1; +pub const KERN_OSRELEASE: c_int = 2; +pub const KERN_OSREV: c_int = 3; +pub const KERN_VERSION: c_int = 4; +pub const KERN_MAXVNODES: c_int = 5; +pub const KERN_MAXPROC: c_int = 6; +pub const KERN_MAXFILES: c_int = 7; +pub const KERN_ARGMAX: c_int = 8; +pub const KERN_SECURELVL: c_int = 9; +pub const KERN_HOSTNAME: c_int = 10; +pub const KERN_HOSTID: c_int = 11; +pub const KERN_CLOCKRATE: c_int = 12; +pub const KERN_VNODE: c_int = 13; +pub const KERN_PROC: c_int = 14; +pub const KERN_FILE: c_int = 15; +pub const KERN_PROF: c_int = 16; +pub const KERN_POSIX1: c_int = 17; +pub const KERN_NGROUPS: c_int = 18; +pub const KERN_JOB_CONTROL: c_int = 19; +pub const KERN_SAVED_IDS: c_int = 20; +pub const KERN_BOOTTIME: c_int = 21; +pub const KERN_NISDOMAINNAME: c_int = 22; +pub const KERN_DOMAINNAME: c_int = KERN_NISDOMAINNAME; +pub const KERN_MAXPARTITIONS: c_int = 23; +pub const KERN_KDEBUG: c_int = 24; +pub const KERN_UPDATEINTERVAL: c_int = 25; +pub const KERN_OSRELDATE: c_int = 26; +pub const KERN_NTP_PLL: c_int = 27; +pub const KERN_BOOTFILE: c_int = 28; +pub const KERN_MAXFILESPERPROC: c_int = 29; +pub const KERN_MAXPROCPERUID: c_int = 30; +pub const KERN_DUMPDEV: c_int = 31; +pub const KERN_IPC: c_int = 32; +pub const KERN_DUMMY: c_int = 33; +pub const KERN_PS_STRINGS: c_int = 34; +pub const KERN_USRSTACK32: c_int = 35; +pub const KERN_LOGSIGEXIT: c_int = 36; +pub const KERN_SYMFILE: c_int = 37; +pub const KERN_PROCARGS: c_int = 38; +pub const KERN_NETBOOT: c_int = 40; +pub const KERN_SYSV: c_int = 42; +pub const KERN_AFFINITY: c_int = 43; +pub const KERN_TRANSLATE: c_int = 44; +pub const KERN_CLASSIC: c_int = KERN_TRANSLATE; +pub const KERN_EXEC: c_int = 45; +pub const KERN_CLASSICHANDLER: c_int = KERN_EXEC; +pub const KERN_AIOMAX: c_int = 46; +pub const KERN_AIOPROCMAX: c_int = 47; +pub const KERN_AIOTHREADS: c_int = 48; +pub const KERN_COREFILE: c_int = 50; +pub const KERN_COREDUMP: c_int = 51; +pub const KERN_SUGID_COREDUMP: c_int = 52; +pub const KERN_PROCDELAYTERM: c_int = 53; +pub const KERN_SHREG_PRIVATIZABLE: c_int = 54; +pub const KERN_LOW_PRI_WINDOW: c_int = 56; +pub const KERN_LOW_PRI_DELAY: c_int = 57; +pub const KERN_POSIX: c_int = 58; +pub const KERN_USRSTACK64: c_int = 59; +pub const KERN_NX_PROTECTION: c_int = 60; +pub const KERN_TFP: c_int = 61; +pub const KERN_PROCNAME: c_int = 62; +pub const KERN_THALTSTACK: c_int = 63; +pub const KERN_SPECULATIVE_READS: c_int = 64; +pub const KERN_OSVERSION: c_int = 65; +pub const KERN_SAFEBOOT: c_int = 66; +pub const KERN_RAGEVNODE: c_int = 68; +pub const KERN_TTY: c_int = 69; +pub const KERN_CHECKOPENEVT: c_int = 70; +pub const KERN_THREADNAME: c_int = 71; +pub const KERN_MAXID: c_int = 72; +pub const KERN_RAGE_PROC: c_int = 1; +pub const KERN_RAGE_THREAD: c_int = 2; +pub const KERN_UNRAGE_PROC: c_int = 3; +pub const KERN_UNRAGE_THREAD: c_int = 4; +pub const KERN_OPENEVT_PROC: c_int = 1; +pub const KERN_UNOPENEVT_PROC: c_int = 2; +pub const KERN_TFP_POLICY: c_int = 1; +pub const KERN_TFP_POLICY_DENY: c_int = 0; +pub const KERN_TFP_POLICY_DEFAULT: c_int = 2; +pub const KERN_KDEFLAGS: c_int = 1; +pub const KERN_KDDFLAGS: c_int = 2; +pub const KERN_KDENABLE: c_int = 3; +pub const KERN_KDSETBUF: c_int = 4; +pub const KERN_KDGETBUF: c_int = 5; +pub const KERN_KDSETUP: c_int = 6; +pub const KERN_KDREMOVE: c_int = 7; +pub const KERN_KDSETREG: c_int = 8; +pub const KERN_KDGETREG: c_int = 9; +pub const KERN_KDREADTR: c_int = 10; +pub const KERN_KDPIDTR: c_int = 11; +pub const KERN_KDTHRMAP: c_int = 12; +pub const KERN_KDPIDEX: c_int = 14; +pub const KERN_KDSETRTCDEC: c_int = 15; +pub const KERN_KDGETENTROPY: c_int = 16; +pub const KERN_KDWRITETR: c_int = 17; +pub const KERN_KDWRITEMAP: c_int = 18; +#[doc(hidden)] +#[deprecated(since = "0.2.49", note = "Removed in MacOSX 10.12")] +pub const KERN_KDENABLE_BG_TRACE: c_int = 19; +#[doc(hidden)] +#[deprecated(since = "0.2.49", note = "Removed in MacOSX 10.12")] +pub const KERN_KDDISABLE_BG_TRACE: c_int = 20; +pub const KERN_KDREADCURTHRMAP: c_int = 21; +pub const KERN_KDSET_TYPEFILTER: c_int = 22; +pub const KERN_KDBUFWAIT: c_int = 23; +pub const KERN_KDCPUMAP: c_int = 24; +pub const KERN_PROC_ALL: c_int = 0; +pub const KERN_PROC_PID: c_int = 1; +pub const KERN_PROC_PGRP: c_int = 2; +pub const KERN_PROC_SESSION: c_int = 3; +pub const KERN_PROC_TTY: c_int = 4; +pub const KERN_PROC_UID: c_int = 5; +pub const KERN_PROC_RUID: c_int = 6; +pub const KERN_PROC_LCID: c_int = 7; +pub const KERN_SUCCESS: c_int = 0; +pub const KERN_INVALID_ADDRESS: c_int = 1; +pub const KERN_PROTECTION_FAILURE: c_int = 2; +pub const KERN_NO_SPACE: c_int = 3; +pub const KERN_INVALID_ARGUMENT: c_int = 4; +pub const KERN_FAILURE: c_int = 5; +pub const KERN_RESOURCE_SHORTAGE: c_int = 6; +pub const KERN_NOT_RECEIVER: c_int = 7; +pub const KERN_NO_ACCESS: c_int = 8; +pub const KERN_MEMORY_FAILURE: c_int = 9; +pub const KERN_MEMORY_ERROR: c_int = 10; +pub const KERN_ALREADY_IN_SET: c_int = 11; +pub const KERN_NOT_IN_SET: c_int = 12; +pub const KERN_NAME_EXISTS: c_int = 13; +pub const KERN_ABORTED: c_int = 14; +pub const KERN_INVALID_NAME: c_int = 15; +pub const KERN_INVALID_TASK: c_int = 16; +pub const KERN_INVALID_RIGHT: c_int = 17; +pub const KERN_INVALID_VALUE: c_int = 18; +pub const KERN_UREFS_OVERFLOW: c_int = 19; +pub const KERN_INVALID_CAPABILITY: c_int = 20; +pub const KERN_RIGHT_EXISTS: c_int = 21; +pub const KERN_INVALID_HOST: c_int = 22; +pub const KERN_MEMORY_PRESENT: c_int = 23; +pub const KERN_MEMORY_DATA_MOVED: c_int = 24; +pub const KERN_MEMORY_RESTART_COPY: c_int = 25; +pub const KERN_INVALID_PROCESSOR_SET: c_int = 26; +pub const KERN_POLICY_LIMIT: c_int = 27; +pub const KERN_INVALID_POLICY: c_int = 28; +pub const KERN_INVALID_OBJECT: c_int = 29; +pub const KERN_ALREADY_WAITING: c_int = 30; +pub const KERN_DEFAULT_SET: c_int = 31; +pub const KERN_EXCEPTION_PROTECTED: c_int = 32; +pub const KERN_INVALID_LEDGER: c_int = 33; +pub const KERN_INVALID_MEMORY_CONTROL: c_int = 34; +pub const KERN_INVALID_SECURITY: c_int = 35; +pub const KERN_NOT_DEPRESSED: c_int = 36; +pub const KERN_TERMINATED: c_int = 37; +pub const KERN_LOCK_SET_DESTROYED: c_int = 38; +pub const KERN_LOCK_UNSTABLE: c_int = 39; +pub const KERN_LOCK_OWNED: c_int = 40; +pub const KERN_LOCK_OWNED_SELF: c_int = 41; +pub const KERN_SEMAPHORE_DESTROYED: c_int = 42; +pub const KERN_RPC_SERVER_TERMINATED: c_int = 43; +pub const KERN_RPC_TERMINATE_ORPHAN: c_int = 44; +pub const KERN_RPC_CONTINUE_ORPHAN: c_int = 45; +pub const KERN_NOT_SUPPORTED: c_int = 46; +pub const KERN_NODE_DOWN: c_int = 47; +pub const KERN_NOT_WAITING: c_int = 48; +pub const KERN_OPERATION_TIMED_OUT: c_int = 49; +pub const KERN_CODESIGN_ERROR: c_int = 50; +pub const KERN_POLICY_STATIC: c_int = 51; +pub const KERN_INSUFFICIENT_BUFFER_SIZE: c_int = 52; +pub const KIPC_MAXSOCKBUF: c_int = 1; +pub const KIPC_SOCKBUF_WASTE: c_int = 2; +pub const KIPC_SOMAXCONN: c_int = 3; +pub const KIPC_MAX_LINKHDR: c_int = 4; +pub const KIPC_MAX_PROTOHDR: c_int = 5; +pub const KIPC_MAX_HDR: c_int = 6; +pub const KIPC_MAX_DATALEN: c_int = 7; +pub const KIPC_MBSTAT: c_int = 8; +pub const KIPC_NMBCLUSTERS: c_int = 9; +pub const KIPC_SOQLIMITCOMPAT: c_int = 10; +pub const VM_METER: c_int = 1; +pub const VM_LOADAVG: c_int = 2; +pub const VM_MACHFACTOR: c_int = 4; +pub const VM_SWAPUSAGE: c_int = 5; +pub const VM_MAXID: c_int = 6; +pub const VM_PROT_NONE: crate::vm_prot_t = 0x00; +pub const VM_PROT_READ: crate::vm_prot_t = 0x01; +pub const VM_PROT_WRITE: crate::vm_prot_t = 0x02; +pub const VM_PROT_EXECUTE: crate::vm_prot_t = 0x04; +pub const MEMORY_OBJECT_NULL: crate::memory_object_t = 0; +pub const HW_MACHINE: c_int = 1; +pub const HW_MODEL: c_int = 2; +pub const HW_NCPU: c_int = 3; +pub const HW_BYTEORDER: c_int = 4; +pub const HW_PHYSMEM: c_int = 5; +pub const HW_USERMEM: c_int = 6; +pub const HW_PAGESIZE: c_int = 7; +pub const HW_DISKNAMES: c_int = 8; +pub const HW_DISKSTATS: c_int = 9; +pub const HW_EPOCH: c_int = 10; +pub const HW_FLOATINGPT: c_int = 11; +pub const HW_MACHINE_ARCH: c_int = 12; +pub const HW_VECTORUNIT: c_int = 13; +pub const HW_BUS_FREQ: c_int = 14; +pub const HW_CPU_FREQ: c_int = 15; +pub const HW_CACHELINE: c_int = 16; +pub const HW_L1ICACHESIZE: c_int = 17; +pub const HW_L1DCACHESIZE: c_int = 18; +pub const HW_L2SETTINGS: c_int = 19; +pub const HW_L2CACHESIZE: c_int = 20; +pub const HW_L3SETTINGS: c_int = 21; +pub const HW_L3CACHESIZE: c_int = 22; +pub const HW_TB_FREQ: c_int = 23; +pub const HW_MEMSIZE: c_int = 24; +pub const HW_AVAILCPU: c_int = 25; +pub const HW_TARGET: c_int = 26; +pub const HW_PRODUCT: c_int = 27; +pub const HW_MAXID: c_int = 28; +pub const USER_CS_PATH: c_int = 1; +pub const USER_BC_BASE_MAX: c_int = 2; +pub const USER_BC_DIM_MAX: c_int = 3; +pub const USER_BC_SCALE_MAX: c_int = 4; +pub const USER_BC_STRING_MAX: c_int = 5; +pub const USER_COLL_WEIGHTS_MAX: c_int = 6; +pub const USER_EXPR_NEST_MAX: c_int = 7; +pub const USER_LINE_MAX: c_int = 8; +pub const USER_RE_DUP_MAX: c_int = 9; +pub const USER_POSIX2_VERSION: c_int = 10; +pub const USER_POSIX2_C_BIND: c_int = 11; +pub const USER_POSIX2_C_DEV: c_int = 12; +pub const USER_POSIX2_CHAR_TERM: c_int = 13; +pub const USER_POSIX2_FORT_DEV: c_int = 14; +pub const USER_POSIX2_FORT_RUN: c_int = 15; +pub const USER_POSIX2_LOCALEDEF: c_int = 16; +pub const USER_POSIX2_SW_DEV: c_int = 17; +pub const USER_POSIX2_UPE: c_int = 18; +pub const USER_STREAM_MAX: c_int = 19; +pub const USER_TZNAME_MAX: c_int = 20; +pub const USER_MAXID: c_int = 21; +pub const CTL_DEBUG_NAME: c_int = 0; +pub const CTL_DEBUG_VALUE: c_int = 1; +pub const CTL_DEBUG_MAXID: c_int = 20; + +pub const PRIO_DARWIN_THREAD: c_int = 3; +pub const PRIO_DARWIN_PROCESS: c_int = 4; +pub const PRIO_DARWIN_BG: c_int = 0x1000; +pub const PRIO_DARWIN_NONUI: c_int = 0x1001; + +pub const SEM_FAILED: *mut sem_t = -1isize as *mut crate::sem_t; + +pub const AI_PASSIVE: c_int = 0x00000001; +pub const AI_CANONNAME: c_int = 0x00000002; +pub const AI_NUMERICHOST: c_int = 0x00000004; +pub const AI_NUMERICSERV: c_int = 0x00001000; +pub const AI_MASK: c_int = + AI_PASSIVE | AI_CANONNAME | AI_NUMERICHOST | AI_NUMERICSERV | AI_ADDRCONFIG; +pub const AI_ALL: c_int = 0x00000100; +pub const AI_V4MAPPED_CFG: c_int = 0x00000200; +pub const AI_ADDRCONFIG: c_int = 0x00000400; +pub const AI_V4MAPPED: c_int = 0x00000800; +pub const AI_DEFAULT: c_int = AI_V4MAPPED_CFG | AI_ADDRCONFIG; +pub const AI_UNUSABLE: c_int = 0x10000000; + +pub const SIGEV_NONE: c_int = 0; +pub const SIGEV_SIGNAL: c_int = 1; +pub const SIGEV_THREAD: c_int = 3; + +pub const AIO_CANCELED: c_int = 2; +pub const AIO_NOTCANCELED: c_int = 4; +pub const AIO_ALLDONE: c_int = 1; +#[deprecated( + since = "0.2.64", + note = "Can vary at runtime. Use sysconf(3) instead" +)] +pub const AIO_LISTIO_MAX: c_int = 16; +pub const LIO_NOP: c_int = 0; +pub const LIO_WRITE: c_int = 2; +pub const LIO_READ: c_int = 1; +pub const LIO_WAIT: c_int = 2; +pub const LIO_NOWAIT: c_int = 1; + +pub const WEXITED: c_int = 0x00000004; +pub const WSTOPPED: c_int = 0x00000008; +pub const WCONTINUED: c_int = 0x00000010; +pub const WNOWAIT: c_int = 0x00000020; + +pub const P_ALL: idtype_t = 0; +pub const P_PID: idtype_t = 1; +pub const P_PGID: idtype_t = 2; + +pub const UTIME_OMIT: c_long = -2; +pub const UTIME_NOW: c_long = -1; + +pub const XATTR_NOFOLLOW: c_int = 0x0001; +pub const XATTR_CREATE: c_int = 0x0002; +pub const XATTR_REPLACE: c_int = 0x0004; +pub const XATTR_NOSECURITY: c_int = 0x0008; +pub const XATTR_NODEFAULT: c_int = 0x0010; +pub const XATTR_SHOWCOMPRESSION: c_int = 0x0020; + +pub const NET_RT_IFLIST2: c_int = 0x0006; + +// net/route.h +pub const RTF_DELCLONE: c_int = 0x80; +pub const RTF_CLONING: c_int = 0x100; +pub const RTF_XRESOLVE: c_int = 0x200; +pub const RTF_LLINFO: c_int = 0x400; +pub const RTF_NOIFREF: c_int = 0x2000; +pub const RTF_PRCLONING: c_int = 0x10000; +pub const RTF_WASCLONED: c_int = 0x20000; +pub const RTF_PROTO3: c_int = 0x40000; +pub const RTF_PINNED: c_int = 0x100000; +pub const RTF_LOCAL: c_int = 0x200000; +pub const RTF_BROADCAST: c_int = 0x400000; +pub const RTF_MULTICAST: c_int = 0x800000; +pub const RTF_IFSCOPE: c_int = 0x1000000; +pub const RTF_CONDEMNED: c_int = 0x2000000; +pub const RTF_IFREF: c_int = 0x4000000; +pub const RTF_PROXY: c_int = 0x8000000; +pub const RTF_ROUTER: c_int = 0x10000000; +pub const RTF_DEAD: c_int = 0x20000000; +pub const RTF_GLOBAL: c_int = 0x40000000; + +pub const RTM_VERSION: c_int = 5; + +// Message types +pub const RTM_LOCK: c_int = 0x8; +pub const RTM_OLDADD: c_int = 0x9; +pub const RTM_OLDDEL: c_int = 0xa; +pub const RTM_RESOLVE: c_int = 0xb; +pub const RTM_NEWADDR: c_int = 0xc; +pub const RTM_DELADDR: c_int = 0xd; +pub const RTM_IFINFO: c_int = 0xe; +pub const RTM_NEWMADDR: c_int = 0xf; +pub const RTM_DELMADDR: c_int = 0x10; +pub const RTM_IFINFO2: c_int = 0x12; +pub const RTM_NEWMADDR2: c_int = 0x13; +pub const RTM_GET2: c_int = 0x14; + +// Bitmask values for rtm_inits and rmx_locks. +pub const RTV_MTU: c_int = 0x1; +pub const RTV_HOPCOUNT: c_int = 0x2; +pub const RTV_EXPIRE: c_int = 0x4; +pub const RTV_RPIPE: c_int = 0x8; +pub const RTV_SPIPE: c_int = 0x10; +pub const RTV_SSTHRESH: c_int = 0x20; +pub const RTV_RTT: c_int = 0x40; +pub const RTV_RTTVAR: c_int = 0x80; + +pub const RTAX_MAX: c_int = 8; + +pub const KERN_PROCARGS2: c_int = 49; + +pub const PROC_PIDTASKALLINFO: c_int = 2; +pub const PROC_PIDTBSDINFO: c_int = 3; +pub const PROC_PIDTASKINFO: c_int = 4; +pub const PROC_PIDTHREADINFO: c_int = 5; +pub const PROC_PIDVNODEPATHINFO: c_int = 9; +pub const PROC_PIDPATHINFO_MAXSIZE: c_int = 4096; + +pub const PROC_PIDLISTFDS: c_int = 1; +pub const PROC_PIDLISTFD_SIZE: c_int = size_of::() as c_int; +pub const PROX_FDTYPE_ATALK: c_int = 0; +pub const PROX_FDTYPE_VNODE: c_int = 1; +pub const PROX_FDTYPE_SOCKET: c_int = 2; +pub const PROX_FDTYPE_PSHM: c_int = 3; +pub const PROX_FDTYPE_PSEM: c_int = 4; +pub const PROX_FDTYPE_KQUEUE: c_int = 5; +pub const PROX_FDTYPE_PIPE: c_int = 6; +pub const PROX_FDTYPE_FSEVENTS: c_int = 7; +pub const PROX_FDTYPE_NETPOLICY: c_int = 9; +pub const PROX_FDTYPE_CHANNEL: c_int = 10; +pub const PROX_FDTYPE_NEXUS: c_int = 11; + +pub const PROC_CSM_ALL: c_uint = 0x0001; +pub const PROC_CSM_NOSMT: c_uint = 0x0002; +pub const PROC_CSM_TECS: c_uint = 0x0004; +pub const MAXCOMLEN: usize = 16; +pub const MAXTHREADNAMESIZE: usize = 64; + +pub const XUCRED_VERSION: c_uint = 0; + +pub const LC_SEGMENT: u32 = 0x1; +pub const LC_SEGMENT_64: u32 = 0x19; + +pub const MH_MAGIC: u32 = 0xfeedface; +pub const MH_MAGIC_64: u32 = 0xfeedfacf; + +// net/if_utun.h +pub const UTUN_OPT_FLAGS: c_int = 1; +pub const UTUN_OPT_IFNAME: c_int = 2; + +// net/bpf.h +pub const DLT_NULL: c_uint = 0; // no link-layer encapsulation +pub const DLT_EN10MB: c_uint = 1; // Ethernet (10Mb) +pub const DLT_EN3MB: c_uint = 2; // Experimental Ethernet (3Mb) +pub const DLT_AX25: c_uint = 3; // Amateur Radio AX.25 +pub const DLT_PRONET: c_uint = 4; // Proteon ProNET Token Ring +pub const DLT_CHAOS: c_uint = 5; // Chaos +pub const DLT_IEEE802: c_uint = 6; // IEEE 802 Networks +pub const DLT_ARCNET: c_uint = 7; // ARCNET +pub const DLT_SLIP: c_uint = 8; // Serial Line IP +pub const DLT_PPP: c_uint = 9; // Point-to-point Protocol +pub const DLT_FDDI: c_uint = 10; // FDDI +pub const DLT_ATM_RFC1483: c_uint = 11; // LLC/SNAP encapsulated atm +pub const DLT_RAW: c_uint = 12; // raw IP +pub const DLT_LOOP: c_uint = 108; + +// https://github.com/apple/darwin-xnu/blob/HEAD/bsd/net/bpf.h#L100 +// sizeof(i32) +pub const BPF_ALIGNMENT: c_int = 4; + +// sys/mount.h +pub const MNT_NODEV: c_int = 0x00000010; +pub const MNT_UNION: c_int = 0x00000020; +pub const MNT_CPROTECT: c_int = 0x00000080; + +// MAC labeled / "quarantined" flag +pub const MNT_QUARANTINE: c_int = 0x00000400; + +// Flags set by internal operations. +pub const MNT_LOCAL: c_int = 0x00001000; +pub const MNT_QUOTA: c_int = 0x00002000; +pub const MNT_ROOTFS: c_int = 0x00004000; +pub const MNT_DOVOLFS: c_int = 0x00008000; + +pub const MNT_DONTBROWSE: c_int = 0x00100000; +pub const MNT_IGNORE_OWNERSHIP: c_int = 0x00200000; +pub const MNT_AUTOMOUNTED: c_int = 0x00400000; +pub const MNT_JOURNALED: c_int = 0x00800000; +pub const MNT_NOUSERXATTR: c_int = 0x01000000; +pub const MNT_DEFWRITE: c_int = 0x02000000; +pub const MNT_MULTILABEL: c_int = 0x04000000; +pub const MNT_NOATIME: c_int = 0x10000000; +pub const MNT_SNAPSHOT: c_int = 0x40000000; + +// External filesystem command modifier flags. +pub const MNT_NOBLOCK: c_int = 0x00020000; + +// sys/spawn.h: +// DIFF(main): changed to `c_short` in f62eb023ab +pub const POSIX_SPAWN_RESETIDS: c_int = 0x0001; +pub const POSIX_SPAWN_SETPGROUP: c_int = 0x0002; +pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x0004; +pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x0008; +pub const POSIX_SPAWN_SETEXEC: c_int = 0x0040; +pub const POSIX_SPAWN_START_SUSPENDED: c_int = 0x0080; +pub const POSIX_SPAWN_CLOEXEC_DEFAULT: c_int = 0x4000; + +// sys/ipc.h: +pub const IPC_CREAT: c_int = 0x200; +pub const IPC_EXCL: c_int = 0x400; +pub const IPC_NOWAIT: c_int = 0x800; +pub const IPC_PRIVATE: key_t = 0; + +pub const IPC_RMID: c_int = 0; +pub const IPC_SET: c_int = 1; +pub const IPC_STAT: c_int = 2; + +pub const IPC_R: c_int = 0x100; +pub const IPC_W: c_int = 0x80; +pub const IPC_M: c_int = 0x1000; + +// sys/sem.h +pub const SEM_UNDO: c_int = 0o10000; + +pub const GETNCNT: c_int = 3; +pub const GETPID: c_int = 4; +pub const GETVAL: c_int = 5; +pub const GETALL: c_int = 6; +pub const GETZCNT: c_int = 7; +pub const SETVAL: c_int = 8; +pub const SETALL: c_int = 9; + +// sys/shm.h +pub const SHM_RDONLY: c_int = 0x1000; +pub const SHM_RND: c_int = 0x2000; +#[cfg(target_arch = "aarch64")] +pub const SHMLBA: c_int = 16 * 1024; +#[cfg(not(target_arch = "aarch64"))] +pub const SHMLBA: c_int = 4096; +pub const SHM_R: c_int = IPC_R; +pub const SHM_W: c_int = IPC_W; + +// Flags for chflags(2) +pub const UF_SETTABLE: c_uint = 0x0000ffff; +pub const UF_NODUMP: c_uint = 0x00000001; +pub const UF_IMMUTABLE: c_uint = 0x00000002; +pub const UF_APPEND: c_uint = 0x00000004; +pub const UF_OPAQUE: c_uint = 0x00000008; +pub const UF_COMPRESSED: c_uint = 0x00000020; +pub const UF_TRACKED: c_uint = 0x00000040; +pub const SF_SETTABLE: c_uint = 0xffff0000; +pub const SF_ARCHIVED: c_uint = 0x00010000; +pub const SF_IMMUTABLE: c_uint = 0x00020000; +pub const SF_APPEND: c_uint = 0x00040000; +pub const UF_HIDDEN: c_uint = 0x00008000; + +// +pub const NTP_API: c_int = 4; +pub const MAXPHASE: c_long = 500000000; +pub const MAXFREQ: c_long = 500000; +pub const MINSEC: c_int = 256; +pub const MAXSEC: c_int = 2048; +pub const NANOSECOND: c_long = 1000000000; +pub const SCALE_PPM: c_int = 65; +pub const MAXTC: c_int = 10; +pub const MOD_OFFSET: c_uint = 0x0001; +pub const MOD_FREQUENCY: c_uint = 0x0002; +pub const MOD_MAXERROR: c_uint = 0x0004; +pub const MOD_ESTERROR: c_uint = 0x0008; +pub const MOD_STATUS: c_uint = 0x0010; +pub const MOD_TIMECONST: c_uint = 0x0020; +pub const MOD_PPSMAX: c_uint = 0x0040; +pub const MOD_TAI: c_uint = 0x0080; +pub const MOD_MICRO: c_uint = 0x1000; +pub const MOD_NANO: c_uint = 0x2000; +pub const MOD_CLKB: c_uint = 0x4000; +pub const MOD_CLKA: c_uint = 0x8000; +pub const STA_PLL: c_int = 0x0001; +pub const STA_PPSFREQ: c_int = 0x0002; +pub const STA_PPSTIME: c_int = 0x0004; +pub const STA_FLL: c_int = 0x0008; +pub const STA_INS: c_int = 0x0010; +pub const STA_DEL: c_int = 0x0020; +pub const STA_UNSYNC: c_int = 0x0040; +pub const STA_FREQHOLD: c_int = 0x0080; +pub const STA_PPSSIGNAL: c_int = 0x0100; +pub const STA_PPSJITTER: c_int = 0x0200; +pub const STA_PPSWANDER: c_int = 0x0400; +pub const STA_PPSERROR: c_int = 0x0800; +pub const STA_CLOCKERR: c_int = 0x1000; +pub const STA_NANO: c_int = 0x2000; +pub const STA_MODE: c_int = 0x4000; +pub const STA_CLK: c_int = 0x8000; +pub const STA_RONLY: c_int = STA_PPSSIGNAL + | STA_PPSJITTER + | STA_PPSWANDER + | STA_PPSERROR + | STA_CLOCKERR + | STA_NANO + | STA_MODE + | STA_CLK; +pub const TIME_OK: c_int = 0; +pub const TIME_INS: c_int = 1; +pub const TIME_DEL: c_int = 2; +pub const TIME_OOP: c_int = 3; +pub const TIME_WAIT: c_int = 4; +pub const TIME_ERROR: c_int = 5; + +// +pub const MNT_WAIT: c_int = 1; +pub const MNT_NOWAIT: c_int = 2; + +// +pub const THREAD_STANDARD_POLICY: c_int = 1; +pub const THREAD_STANDARD_POLICY_COUNT: c_int = 0; +pub const THREAD_EXTENDED_POLICY: c_int = 1; +pub const THREAD_TIME_CONSTRAINT_POLICY: c_int = 2; +pub const THREAD_PRECEDENCE_POLICY: c_int = 3; +pub const THREAD_AFFINITY_POLICY: c_int = 4; +pub const THREAD_AFFINITY_TAG_NULL: c_int = 0; +pub const THREAD_BACKGROUND_POLICY: c_int = 5; +pub const THREAD_BACKGROUND_POLICY_DARWIN_BG: c_int = 0x1000; +pub const THREAD_LATENCY_QOS_POLICY: c_int = 7; +pub const THREAD_THROUGHPUT_QOS_POLICY: c_int = 8; + +// +pub const TH_STATE_RUNNING: c_int = 1; +pub const TH_STATE_STOPPED: c_int = 2; +pub const TH_STATE_WAITING: c_int = 3; +pub const TH_STATE_UNINTERRUPTIBLE: c_int = 4; +pub const TH_STATE_HALTED: c_int = 5; +pub const TH_FLAGS_SWAPPED: c_int = 0x1; +pub const TH_FLAGS_IDLE: c_int = 0x2; +pub const TH_FLAGS_GLOBAL_FORCED_IDLE: c_int = 0x4; +pub const THREAD_BASIC_INFO: c_int = 3; +pub const THREAD_IDENTIFIER_INFO: c_int = 4; +pub const THREAD_EXTENDED_INFO: c_int = 5; + +// CommonCrypto/CommonCryptoError.h +pub const kCCSuccess: i32 = 0; +pub const kCCParamError: i32 = -4300; +pub const kCCBufferTooSmall: i32 = -4301; +pub const kCCMemoryFailure: i32 = -4302; +pub const kCCAlignmentError: i32 = -4303; +pub const kCCDecodeError: i32 = -4304; +pub const kCCUnimplemented: i32 = -4305; +pub const kCCOverflow: i32 = -4306; +pub const kCCRNGFailure: i32 = -4307; +pub const kCCUnspecifiedError: i32 = -4308; +pub const kCCCallSequenceError: i32 = -4309; +pub const kCCKeySizeError: i32 = -4310; +pub const kCCInvalidKey: i32 = -4311; + +// mach/host_info.h +pub const HOST_LOAD_INFO: i32 = 1; +pub const HOST_VM_INFO: i32 = 2; +pub const HOST_CPU_LOAD_INFO: i32 = 3; +pub const HOST_VM_INFO64: i32 = 4; +pub const HOST_EXTMOD_INFO64: i32 = 5; +pub const HOST_EXPIRED_TASK_INFO: i32 = 6; + +// mach/vm_statistics.h +pub const VM_PAGE_QUERY_PAGE_PRESENT: i32 = 0x1; +pub const VM_PAGE_QUERY_PAGE_FICTITIOUS: i32 = 0x2; +pub const VM_PAGE_QUERY_PAGE_REF: i32 = 0x4; +pub const VM_PAGE_QUERY_PAGE_DIRTY: i32 = 0x8; +pub const VM_PAGE_QUERY_PAGE_PAGED_OUT: i32 = 0x10; +pub const VM_PAGE_QUERY_PAGE_COPIED: i32 = 0x20; +pub const VM_PAGE_QUERY_PAGE_SPECULATIVE: i32 = 0x40; +pub const VM_PAGE_QUERY_PAGE_EXTERNAL: i32 = 0x80; +pub const VM_PAGE_QUERY_PAGE_CS_VALIDATED: i32 = 0x100; +pub const VM_PAGE_QUERY_PAGE_CS_TAINTED: i32 = 0x200; +pub const VM_PAGE_QUERY_PAGE_CS_NX: i32 = 0x400; + +// mach/task_info.h +pub const TASK_THREAD_TIMES_INFO: u32 = 3; +pub const HOST_CPU_LOAD_INFO_COUNT: u32 = 4; +pub const MACH_TASK_BASIC_INFO: u32 = 20; + +pub const MACH_PORT_NULL: i32 = 0; + +pub const RUSAGE_INFO_V0: c_int = 0; +pub const RUSAGE_INFO_V1: c_int = 1; +pub const RUSAGE_INFO_V2: c_int = 2; +pub const RUSAGE_INFO_V3: c_int = 3; +pub const RUSAGE_INFO_V4: c_int = 4; + +// copyfile.h +pub const COPYFILE_ACL: crate::copyfile_flags_t = 1 << 0; +pub const COPYFILE_STAT: crate::copyfile_flags_t = 1 << 1; +pub const COPYFILE_XATTR: crate::copyfile_flags_t = 1 << 2; +pub const COPYFILE_DATA: crate::copyfile_flags_t = 1 << 3; +pub const COPYFILE_SECURITY: crate::copyfile_flags_t = COPYFILE_STAT | COPYFILE_ACL; +pub const COPYFILE_METADATA: crate::copyfile_flags_t = COPYFILE_SECURITY | COPYFILE_XATTR; +pub const COPYFILE_RECURSIVE: crate::copyfile_flags_t = 1 << 15; +pub const COPYFILE_CHECK: crate::copyfile_flags_t = 1 << 16; +pub const COPYFILE_EXCL: crate::copyfile_flags_t = 1 << 17; +pub const COPYFILE_NOFOLLOW_SRC: crate::copyfile_flags_t = 1 << 18; +pub const COPYFILE_NOFOLLOW_DST: crate::copyfile_flags_t = 1 << 19; +pub const COPYFILE_MOVE: crate::copyfile_flags_t = 1 << 20; +pub const COPYFILE_UNLINK: crate::copyfile_flags_t = 1 << 21; +pub const COPYFILE_NOFOLLOW: crate::copyfile_flags_t = + COPYFILE_NOFOLLOW_SRC | COPYFILE_NOFOLLOW_DST; +pub const COPYFILE_PACK: crate::copyfile_flags_t = 1 << 22; +pub const COPYFILE_UNPACK: crate::copyfile_flags_t = 1 << 23; +pub const COPYFILE_CLONE: crate::copyfile_flags_t = 1 << 24; +pub const COPYFILE_CLONE_FORCE: crate::copyfile_flags_t = 1 << 25; +pub const COPYFILE_RUN_IN_PLACE: crate::copyfile_flags_t = 1 << 26; +pub const COPYFILE_DATA_SPARSE: crate::copyfile_flags_t = 1 << 27; +pub const COPYFILE_PRESERVE_DST_TRACKED: crate::copyfile_flags_t = 1 << 28; +pub const COPYFILE_VERBOSE: crate::copyfile_flags_t = 1 << 30; +pub const COPYFILE_RECURSE_ERROR: c_int = 0; +pub const COPYFILE_RECURSE_FILE: c_int = 1; +pub const COPYFILE_RECURSE_DIR: c_int = 2; +pub const COPYFILE_RECURSE_DIR_CLEANUP: c_int = 3; +pub const COPYFILE_COPY_DATA: c_int = 4; +pub const COPYFILE_COPY_XATTR: c_int = 5; +pub const COPYFILE_START: c_int = 1; +pub const COPYFILE_FINISH: c_int = 2; +pub const COPYFILE_ERR: c_int = 3; +pub const COPYFILE_PROGRESS: c_int = 4; +pub const COPYFILE_CONTINUE: c_int = 0; +pub const COPYFILE_SKIP: c_int = 1; +pub const COPYFILE_QUIT: c_int = 2; +pub const COPYFILE_STATE_SRC_FD: c_int = 1; +pub const COPYFILE_STATE_SRC_FILENAME: c_int = 2; +pub const COPYFILE_STATE_DST_FD: c_int = 3; +pub const COPYFILE_STATE_DST_FILENAME: c_int = 4; +pub const COPYFILE_STATE_QUARANTINE: c_int = 5; +pub const COPYFILE_STATE_STATUS_CB: c_int = 6; +pub const COPYFILE_STATE_STATUS_CTX: c_int = 7; +pub const COPYFILE_STATE_COPIED: c_int = 8; +pub const COPYFILE_STATE_XATTRNAME: c_int = 9; +pub const COPYFILE_STATE_WAS_CLONED: c_int = 10; +pub const COPYFILE_STATE_SRC_BSIZE: c_int = 11; +pub const COPYFILE_STATE_DST_BSIZE: c_int = 12; +pub const COPYFILE_STATE_BSIZE: c_int = 13; + +// +pub const ATTR_BIT_MAP_COUNT: c_ushort = 5; +pub const FSOPT_NOFOLLOW: u32 = 0x1; +pub const FSOPT_NOFOLLOW_ANY: u32 = 0x800; +pub const FSOPT_REPORT_FULLSIZE: u32 = 0x4; +pub const FSOPT_PACK_INVAL_ATTRS: u32 = 0x8; +pub const FSOPT_ATTR_CMN_EXTENDED: u32 = 0x20; +pub const FSOPT_RETURN_REALDEV: u32 = 0x200; +pub const ATTR_CMN_NAME: attrgroup_t = 0x00000001; +pub const ATTR_CMN_DEVID: attrgroup_t = 0x00000002; +pub const ATTR_CMN_FSID: attrgroup_t = 0x00000004; +pub const ATTR_CMN_OBJTYPE: attrgroup_t = 0x00000008; +pub const ATTR_CMN_OBJTAG: attrgroup_t = 0x00000010; +pub const ATTR_CMN_OBJID: attrgroup_t = 0x00000020; +pub const ATTR_CMN_OBJPERMANENTID: attrgroup_t = 0x00000040; +pub const ATTR_CMN_PAROBJID: attrgroup_t = 0x00000080; +pub const ATTR_CMN_SCRIPT: attrgroup_t = 0x00000100; +pub const ATTR_CMN_CRTIME: attrgroup_t = 0x00000200; +pub const ATTR_CMN_MODTIME: attrgroup_t = 0x00000400; +pub const ATTR_CMN_CHGTIME: attrgroup_t = 0x00000800; +pub const ATTR_CMN_ACCTIME: attrgroup_t = 0x00001000; +pub const ATTR_CMN_BKUPTIME: attrgroup_t = 0x00002000; +pub const ATTR_CMN_FNDRINFO: attrgroup_t = 0x00004000; +pub const ATTR_CMN_OWNERID: attrgroup_t = 0x00008000; +pub const ATTR_CMN_GRPID: attrgroup_t = 0x00010000; +pub const ATTR_CMN_ACCESSMASK: attrgroup_t = 0x00020000; +pub const ATTR_CMN_FLAGS: attrgroup_t = 0x00040000; +pub const ATTR_CMN_GEN_COUNT: attrgroup_t = 0x00080000; +pub const ATTR_CMN_DOCUMENT_ID: attrgroup_t = 0x00100000; +pub const ATTR_CMN_USERACCESS: attrgroup_t = 0x00200000; +pub const ATTR_CMN_EXTENDED_SECURITY: attrgroup_t = 0x00400000; +pub const ATTR_CMN_UUID: attrgroup_t = 0x00800000; +pub const ATTR_CMN_GRPUUID: attrgroup_t = 0x01000000; +pub const ATTR_CMN_FILEID: attrgroup_t = 0x02000000; +pub const ATTR_CMN_PARENTID: attrgroup_t = 0x04000000; +pub const ATTR_CMN_FULLPATH: attrgroup_t = 0x08000000; +pub const ATTR_CMN_ADDEDTIME: attrgroup_t = 0x10000000; +pub const ATTR_CMN_DATA_PROTECT_FLAGS: attrgroup_t = 0x40000000; +pub const ATTR_CMN_RETURNED_ATTRS: attrgroup_t = 0x80000000; +pub const ATTR_VOL_FSTYPE: attrgroup_t = 0x00000001; +pub const ATTR_VOL_SIGNATURE: attrgroup_t = 0x00000002; +pub const ATTR_VOL_SIZE: attrgroup_t = 0x00000004; +pub const ATTR_VOL_SPACEFREE: attrgroup_t = 0x00000008; +pub const ATTR_VOL_SPACEAVAIL: attrgroup_t = 0x00000010; +pub const ATTR_VOL_MINALLOCATION: attrgroup_t = 0x00000020; +pub const ATTR_VOL_ALLOCATIONCLUMP: attrgroup_t = 0x00000040; +pub const ATTR_VOL_IOBLOCKSIZE: attrgroup_t = 0x00000080; +pub const ATTR_VOL_OBJCOUNT: attrgroup_t = 0x00000100; +pub const ATTR_VOL_FILECOUNT: attrgroup_t = 0x00000200; +pub const ATTR_VOL_DIRCOUNT: attrgroup_t = 0x00000400; +pub const ATTR_VOL_MAXOBJCOUNT: attrgroup_t = 0x00000800; +pub const ATTR_VOL_MOUNTPOINT: attrgroup_t = 0x00001000; +pub const ATTR_VOL_NAME: attrgroup_t = 0x00002000; +pub const ATTR_VOL_MOUNTFLAGS: attrgroup_t = 0x00004000; +pub const ATTR_VOL_MOUNTEDDEVICE: attrgroup_t = 0x00008000; +pub const ATTR_VOL_ENCODINGSUSED: attrgroup_t = 0x00010000; +pub const ATTR_VOL_CAPABILITIES: attrgroup_t = 0x00020000; +pub const ATTR_VOL_UUID: attrgroup_t = 0x00040000; +pub const ATTR_VOL_SPACEUSED: attrgroup_t = 0x00800000; +pub const ATTR_VOL_QUOTA_SIZE: attrgroup_t = 0x10000000; +pub const ATTR_VOL_RESERVED_SIZE: attrgroup_t = 0x20000000; +pub const ATTR_VOL_ATTRIBUTES: attrgroup_t = 0x40000000; +pub const ATTR_VOL_INFO: attrgroup_t = 0x80000000; +pub const ATTR_DIR_LINKCOUNT: attrgroup_t = 0x00000001; +pub const ATTR_DIR_ENTRYCOUNT: attrgroup_t = 0x00000002; +pub const ATTR_DIR_MOUNTSTATUS: attrgroup_t = 0x00000004; +pub const ATTR_DIR_ALLOCSIZE: attrgroup_t = 0x00000008; +pub const ATTR_DIR_IOBLOCKSIZE: attrgroup_t = 0x00000010; +pub const ATTR_DIR_DATALENGTH: attrgroup_t = 0x00000020; +pub const ATTR_FILE_LINKCOUNT: attrgroup_t = 0x00000001; +pub const ATTR_FILE_TOTALSIZE: attrgroup_t = 0x00000002; +pub const ATTR_FILE_ALLOCSIZE: attrgroup_t = 0x00000004; +pub const ATTR_FILE_IOBLOCKSIZE: attrgroup_t = 0x00000008; +pub const ATTR_FILE_DEVTYPE: attrgroup_t = 0x00000020; +pub const ATTR_FILE_FORKCOUNT: attrgroup_t = 0x00000080; +pub const ATTR_FILE_FORKLIST: attrgroup_t = 0x00000100; +pub const ATTR_FILE_DATALENGTH: attrgroup_t = 0x00000200; +pub const ATTR_FILE_DATAALLOCSIZE: attrgroup_t = 0x00000400; +pub const ATTR_FILE_RSRCLENGTH: attrgroup_t = 0x00001000; +pub const ATTR_FILE_RSRCALLOCSIZE: attrgroup_t = 0x00002000; +pub const ATTR_CMNEXT_RELPATH: attrgroup_t = 0x00000004; +pub const ATTR_CMNEXT_PRIVATESIZE: attrgroup_t = 0x00000008; +pub const ATTR_CMNEXT_LINKID: attrgroup_t = 0x00000010; +pub const ATTR_CMNEXT_NOFIRMLINKPATH: attrgroup_t = 0x00000020; +pub const ATTR_CMNEXT_REALDEVID: attrgroup_t = 0x00000040; +pub const ATTR_CMNEXT_REALFSID: attrgroup_t = 0x00000080; +pub const ATTR_CMNEXT_CLONEID: attrgroup_t = 0x00000100; +pub const ATTR_CMNEXT_EXT_FLAGS: attrgroup_t = 0x00000200; +pub const ATTR_CMNEXT_RECURSIVE_GENCOUNT: attrgroup_t = 0x00000400; +pub const DIR_MNTSTATUS_MNTPOINT: u32 = 0x1; +pub const VOL_CAPABILITIES_FORMAT: usize = 0; +pub const VOL_CAPABILITIES_INTERFACES: usize = 1; +pub const VOL_CAP_FMT_PERSISTENTOBJECTIDS: attrgroup_t = 0x00000001; +pub const VOL_CAP_FMT_SYMBOLICLINKS: attrgroup_t = 0x00000002; +pub const VOL_CAP_FMT_HARDLINKS: attrgroup_t = 0x00000004; +pub const VOL_CAP_FMT_JOURNAL: attrgroup_t = 0x00000008; +pub const VOL_CAP_FMT_JOURNAL_ACTIVE: attrgroup_t = 0x00000010; +pub const VOL_CAP_FMT_NO_ROOT_TIMES: attrgroup_t = 0x00000020; +pub const VOL_CAP_FMT_SPARSE_FILES: attrgroup_t = 0x00000040; +pub const VOL_CAP_FMT_ZERO_RUNS: attrgroup_t = 0x00000080; +pub const VOL_CAP_FMT_CASE_SENSITIVE: attrgroup_t = 0x00000100; +pub const VOL_CAP_FMT_CASE_PRESERVING: attrgroup_t = 0x00000200; +pub const VOL_CAP_FMT_FAST_STATFS: attrgroup_t = 0x00000400; +pub const VOL_CAP_FMT_2TB_FILESIZE: attrgroup_t = 0x00000800; +pub const VOL_CAP_FMT_OPENDENYMODES: attrgroup_t = 0x00001000; +pub const VOL_CAP_FMT_HIDDEN_FILES: attrgroup_t = 0x00002000; +pub const VOL_CAP_FMT_PATH_FROM_ID: attrgroup_t = 0x00004000; +pub const VOL_CAP_FMT_NO_VOLUME_SIZES: attrgroup_t = 0x00008000; +pub const VOL_CAP_FMT_DECMPFS_COMPRESSION: attrgroup_t = 0x00010000; +pub const VOL_CAP_FMT_64BIT_OBJECT_IDS: attrgroup_t = 0x00020000; +pub const VOL_CAP_FMT_DIR_HARDLINKS: attrgroup_t = 0x00040000; +pub const VOL_CAP_FMT_DOCUMENT_ID: attrgroup_t = 0x00080000; +pub const VOL_CAP_FMT_WRITE_GENERATION_COUNT: attrgroup_t = 0x00100000; +pub const VOL_CAP_FMT_NO_IMMUTABLE_FILES: attrgroup_t = 0x00200000; +pub const VOL_CAP_FMT_NO_PERMISSIONS: attrgroup_t = 0x00400000; +pub const VOL_CAP_FMT_SHARED_SPACE: attrgroup_t = 0x00800000; +pub const VOL_CAP_FMT_VOL_GROUPS: attrgroup_t = 0x01000000; +pub const VOL_CAP_FMT_SEALED: attrgroup_t = 0x02000000; +pub const VOL_CAP_INT_SEARCHFS: attrgroup_t = 0x00000001; +pub const VOL_CAP_INT_ATTRLIST: attrgroup_t = 0x00000002; +pub const VOL_CAP_INT_NFSEXPORT: attrgroup_t = 0x00000004; +pub const VOL_CAP_INT_READDIRATTR: attrgroup_t = 0x00000008; +pub const VOL_CAP_INT_EXCHANGEDATA: attrgroup_t = 0x00000010; +pub const VOL_CAP_INT_COPYFILE: attrgroup_t = 0x00000020; +pub const VOL_CAP_INT_ALLOCATE: attrgroup_t = 0x00000040; +pub const VOL_CAP_INT_VOL_RENAME: attrgroup_t = 0x00000080; +pub const VOL_CAP_INT_ADVLOCK: attrgroup_t = 0x00000100; +pub const VOL_CAP_INT_FLOCK: attrgroup_t = 0x00000200; +pub const VOL_CAP_INT_EXTENDED_SECURITY: attrgroup_t = 0x00000400; +pub const VOL_CAP_INT_USERACCESS: attrgroup_t = 0x00000800; +pub const VOL_CAP_INT_MANLOCK: attrgroup_t = 0x00001000; +pub const VOL_CAP_INT_NAMEDSTREAMS: attrgroup_t = 0x00002000; +pub const VOL_CAP_INT_EXTENDED_ATTR: attrgroup_t = 0x00004000; +pub const VOL_CAP_INT_CLONE: attrgroup_t = 0x00010000; +pub const VOL_CAP_INT_SNAPSHOT: attrgroup_t = 0x00020000; +pub const VOL_CAP_INT_RENAME_SWAP: attrgroup_t = 0x00040000; +pub const VOL_CAP_INT_RENAME_EXCL: attrgroup_t = 0x00080000; +pub const VOL_CAP_INT_RENAME_OPENFAIL: attrgroup_t = 0x00100000; + +// os/clock.h +pub const OS_CLOCK_MACH_ABSOLUTE_TIME: os_clockid_t = 32; + +// os/os_sync_wait_on_address.h +pub const OS_SYNC_WAIT_ON_ADDRESS_NONE: os_sync_wait_on_address_flags_t = 0x00000000; +pub const OS_SYNC_WAIT_ON_ADDRESS_SHARED: os_sync_wait_on_address_flags_t = 0x00000001; +pub const OS_SYNC_WAKE_BY_ADDRESS_NONE: os_sync_wake_by_address_flags_t = 0x00000000; +pub const OS_SYNC_WAKE_BY_ADDRESS_SHARED: os_sync_wake_by_address_flags_t = 0x00000001; + +// +/// Process being created by fork. +pub const SIDL: u32 = 1; +/// Currently runnable. +pub const SRUN: u32 = 2; +/// Sleeping on an address. +pub const SSLEEP: u32 = 3; +/// Process debugging or suspension. +pub const SSTOP: u32 = 4; +/// Awaiting collection by parent. +pub const SZOMB: u32 = 5; + +// sys/vsock.h +pub const VMADDR_CID_ANY: c_uint = 0xFFFFFFFF; +pub const VMADDR_CID_HYPERVISOR: c_uint = 0; +pub const VMADDR_CID_RESERVED: c_uint = 1; +pub const VMADDR_CID_HOST: c_uint = 2; +pub const VMADDR_PORT_ANY: c_uint = 0xFFFFFFFF; + +const fn __DARWIN_ALIGN32(p: usize) -> usize { + const __DARWIN_ALIGNBYTES32: usize = size_of::() - 1; + (p + __DARWIN_ALIGNBYTES32) & !__DARWIN_ALIGNBYTES32 +} + +pub const THREAD_EXTENDED_POLICY_COUNT: mach_msg_type_number_t = + (size_of::() / size_of::()) as mach_msg_type_number_t; +pub const THREAD_TIME_CONSTRAINT_POLICY_COUNT: mach_msg_type_number_t = + (size_of::() / size_of::()) + as mach_msg_type_number_t; +pub const THREAD_PRECEDENCE_POLICY_COUNT: mach_msg_type_number_t = + (size_of::() / size_of::()) + as mach_msg_type_number_t; +pub const THREAD_AFFINITY_POLICY_COUNT: mach_msg_type_number_t = + (size_of::() / size_of::()) as mach_msg_type_number_t; +pub const THREAD_BACKGROUND_POLICY_COUNT: mach_msg_type_number_t = + (size_of::() / size_of::()) + as mach_msg_type_number_t; +pub const THREAD_LATENCY_QOS_POLICY_COUNT: mach_msg_type_number_t = + (size_of::() / size_of::()) + as mach_msg_type_number_t; +pub const THREAD_THROUGHPUT_QOS_POLICY_COUNT: mach_msg_type_number_t = + (size_of::() / size_of::()) + as mach_msg_type_number_t; +pub const THREAD_BASIC_INFO_COUNT: mach_msg_type_number_t = + (size_of::() / size_of::()) as mach_msg_type_number_t; +pub const THREAD_IDENTIFIER_INFO_COUNT: mach_msg_type_number_t = + (size_of::() / size_of::()) as mach_msg_type_number_t; +pub const THREAD_EXTENDED_INFO_COUNT: mach_msg_type_number_t = + (size_of::() / size_of::()) as mach_msg_type_number_t; + +pub const TASK_THREAD_TIMES_INFO_COUNT: u32 = + (size_of::() / size_of::()) as u32; +pub const MACH_TASK_BASIC_INFO_COUNT: u32 = + (size_of::() / size_of::()) as u32; +pub const HOST_VM_INFO64_COUNT: mach_msg_type_number_t = + (size_of::() / size_of::()) as mach_msg_type_number_t; + +// bsd/net/if_mib.h +/// Non-interface-specific +pub const IFMIB_SYSTEM: c_int = 1; +/// Per-interface data table +pub const IFMIB_IFDATA: c_int = 2; +/// All interfaces data at once +pub const IFMIB_IFALLDATA: c_int = 3; + +/// Generic stats for all kinds of ifaces +pub const IFDATA_GENERAL: c_int = 1; +/// Specific to the type of interface +pub const IFDATA_LINKSPECIFIC: c_int = 2; +/// Addresses assigned to interface +pub const IFDATA_ADDRS: c_int = 3; +/// Multicast addresses assigned to interface +pub const IFDATA_MULTIADDRS: c_int = 4; + +/// Number of interfaces configured +pub const IFMIB_IFCOUNT: c_int = 1; + +/// Functions not specific to a type of iface +pub const NETLINK_GENERIC: c_int = 0; + +pub const DOT3COMPLIANCE_STATS: c_int = 1; +pub const DOT3COMPLIANCE_COLLS: c_int = 2; + +// kern_control.h +pub const MAX_KCTL_NAME: usize = 96; + +f! { + pub fn CMSG_NXTHDR(mhdr: *const crate::msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + if cmsg.is_null() { + return crate::CMSG_FIRSTHDR(mhdr); + } + let cmsg_len = (*cmsg).cmsg_len as usize; + let next = cmsg as usize + __DARWIN_ALIGN32(cmsg_len); + let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; + if next + __DARWIN_ALIGN32(size_of::()) > max { + core::ptr::null_mut() + } else { + next as *mut cmsghdr + } + } + + pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { + (cmsg as *mut c_uchar).add(__DARWIN_ALIGN32(size_of::())) + } + + pub const fn CMSG_SPACE(length: c_uint) -> c_uint { + (__DARWIN_ALIGN32(size_of::()) + __DARWIN_ALIGN32(length as usize)) as c_uint + } + + pub const fn CMSG_LEN(length: c_uint) -> c_uint { + (__DARWIN_ALIGN32(size_of::()) + length as usize) as c_uint + } + + pub const fn VM_MAKE_TAG(id: u8) -> u32 { + (id as u32) << 24u32 + } +} + +safe_f! { + pub const fn WSTOPSIG(status: c_int) -> c_int { + status >> 8 + } + + pub const fn _WSTATUS(status: c_int) -> c_int { + status & 0x7f + } + + pub const fn WIFCONTINUED(status: c_int) -> bool { + _WSTATUS(status) == _WSTOPPED && WSTOPSIG(status) == 0x13 + } + + pub const fn WIFSIGNALED(status: c_int) -> bool { + _WSTATUS(status) != _WSTOPPED && _WSTATUS(status) != 0 + } + + pub const fn WIFSTOPPED(status: c_int) -> bool { + _WSTATUS(status) == _WSTOPPED && WSTOPSIG(status) != 0x13 + } + + pub const fn makedev(major: i32, minor: i32) -> dev_t { + (major << 24) | minor + } + + pub const fn major(dev: dev_t) -> i32 { + (dev >> 24) & 0xff + } + + pub const fn minor(dev: dev_t) -> i32 { + dev & 0xffffff + } +} + +extern "C" { + pub fn setgrent(); + #[doc(hidden)] + #[deprecated(since = "0.2.49", note = "Deprecated in MacOSX 10.5")] + #[cfg_attr(not(target_arch = "aarch64"), link_name = "daemon$1050")] + pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; + #[doc(hidden)] + #[deprecated(since = "0.2.49", note = "Deprecated in MacOSX 10.10")] + pub fn sem_destroy(sem: *mut sem_t) -> c_int; + #[doc(hidden)] + #[deprecated(since = "0.2.49", note = "Deprecated in MacOSX 10.10")] + pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; + pub fn aio_read(aiocbp: *mut aiocb) -> c_int; + pub fn aio_write(aiocbp: *mut aiocb) -> c_int; + pub fn aio_fsync(op: c_int, aiocbp: *mut aiocb) -> c_int; + pub fn aio_error(aiocbp: *const aiocb) -> c_int; + pub fn aio_return(aiocbp: *mut aiocb) -> ssize_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "aio_suspend$UNIX2003" + )] + pub fn aio_suspend( + aiocb_list: *const *const aiocb, + nitems: c_int, + timeout: *const crate::timespec, + ) -> c_int; + pub fn aio_cancel(fd: c_int, aiocbp: *mut aiocb) -> c_int; + pub fn chflags(path: *const c_char, flags: c_uint) -> c_int; + pub fn fchflags(fd: c_int, flags: c_uint) -> c_int; + pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn lio_listio( + mode: c_int, + aiocb_list: *const *mut aiocb, + nitems: c_int, + sevp: *mut sigevent, + ) -> c_int; + + pub fn dirfd(dirp: *mut crate::DIR) -> c_int; + + pub fn lutimes(file: *const c_char, times: *const crate::timeval) -> c_int; + + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; + pub fn getutxent() -> *mut utmpx; + pub fn getutxid(ut: *const utmpx) -> *mut utmpx; + pub fn getutxline(ut: *const utmpx) -> *mut utmpx; + pub fn pututxline(ut: *const utmpx) -> *mut utmpx; + pub fn setutxent(); + pub fn endutxent(); + pub fn utmpxname(file: *const c_char) -> c_int; + + pub fn asctime(tm: *const crate::tm) -> *mut c_char; + pub fn ctime(clock: *const time_t) -> *mut c_char; + pub fn getdate(datestr: *const c_char) -> *mut crate::tm; + pub fn strptime( + buf: *const c_char, + format: *const c_char, + timeptr: *mut crate::tm, + ) -> *mut c_char; + pub fn asctime_r(tm: *const crate::tm, result: *mut c_char) -> *mut c_char; + pub fn ctime_r(clock: *const time_t, result: *mut c_char) -> *mut c_char; + + pub fn getnameinfo( + sa: *const crate::sockaddr, + salen: crate::socklen_t, + host: *mut c_char, + hostlen: crate::socklen_t, + serv: *mut c_char, + servlen: crate::socklen_t, + flags: c_int, + ) -> c_int; + pub fn mincore(addr: *const c_void, len: size_t, vec: *mut c_char) -> c_int; + pub fn sysctlnametomib(name: *const c_char, mibp: *mut c_int, sizep: *mut size_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "mprotect$UNIX2003" + )] + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn semget(key: key_t, nsems: c_int, semflg: c_int) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "semctl$UNIX2003" + )] + pub fn semctl(semid: c_int, semnum: c_int, cmd: c_int, ...) -> c_int; + pub fn semop(semid: c_int, sops: *mut sembuf, nsops: size_t) -> c_int; + pub fn shm_open(name: *const c_char, oflag: c_int, ...) -> c_int; + pub fn ftok(pathname: *const c_char, proj_id: c_int) -> key_t; + pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; + pub fn shmdt(shmaddr: *const c_void) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "shmctl$UNIX2003" + )] + pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; + pub fn shmget(key: key_t, size: size_t, shmflg: c_int) -> c_int; + pub fn sysctl( + name: *mut c_int, + namelen: c_uint, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *mut c_void, + newlen: size_t, + ) -> c_int; + pub fn sysctlbyname( + name: *const c_char, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *mut c_void, + newlen: size_t, + ) -> c_int; + #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] + pub fn mach_absolute_time() -> u64; + #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] + #[allow(deprecated)] + pub fn mach_timebase_info(info: *mut crate::mach_timebase_info) -> c_int; + #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] + pub fn mach_host_self() -> mach_port_t; + #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] + pub fn mach_thread_self() -> mach_port_t; + pub fn pthread_cond_timedwait_relative_np( + cond: *mut pthread_cond_t, + lock: *mut pthread_mutex_t, + timeout: *const crate::timespec, + ) -> c_int; + pub fn pthread_once( + once_control: *mut crate::pthread_once_t, + init_routine: Option, + ) -> c_int; + pub fn pthread_attr_getinheritsched( + attr: *const crate::pthread_attr_t, + inheritsched: *mut c_int, + ) -> c_int; + pub fn pthread_attr_getschedpolicy( + attr: *const crate::pthread_attr_t, + policy: *mut c_int, + ) -> c_int; + pub fn pthread_attr_getscope( + attr: *const crate::pthread_attr_t, + contentionscope: *mut c_int, + ) -> c_int; + pub fn pthread_attr_getstackaddr( + attr: *const crate::pthread_attr_t, + stackaddr: *mut *mut c_void, + ) -> c_int; + pub fn pthread_attr_getdetachstate( + attr: *const crate::pthread_attr_t, + detachstate: *mut c_int, + ) -> c_int; + pub fn pthread_attr_setinheritsched( + attr: *mut crate::pthread_attr_t, + inheritsched: c_int, + ) -> c_int; + pub fn pthread_attr_setschedpolicy(attr: *mut crate::pthread_attr_t, policy: c_int) -> c_int; + pub fn pthread_attr_setscope(attr: *mut crate::pthread_attr_t, contentionscope: c_int) + -> c_int; + pub fn pthread_attr_setstackaddr( + attr: *mut crate::pthread_attr_t, + stackaddr: *mut c_void, + ) -> c_int; + pub fn pthread_setname_np(name: *const c_char) -> c_int; + pub fn pthread_getname_np(thread: crate::pthread_t, name: *mut c_char, len: size_t) -> c_int; + pub fn pthread_mach_thread_np(thread: crate::pthread_t) -> crate::mach_port_t; + pub fn pthread_from_mach_thread_np(port: crate::mach_port_t) -> crate::pthread_t; + pub fn pthread_create_from_mach_thread( + thread: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + f: extern "C" fn(*mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + pub fn pthread_stack_frame_decode_np( + frame_addr: crate::uintptr_t, + return_addr: *mut crate::uintptr_t, + ) -> crate::uintptr_t; + pub fn pthread_get_stackaddr_np(thread: crate::pthread_t) -> *mut c_void; + pub fn pthread_get_stacksize_np(thread: crate::pthread_t) -> size_t; + pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: c_int) -> c_int; + pub fn pthread_condattr_getpshared( + attr: *const pthread_condattr_t, + pshared: *mut c_int, + ) -> c_int; + pub fn pthread_main_np() -> c_int; + pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; + pub fn pthread_mutexattr_getpshared( + attr: *const pthread_mutexattr_t, + pshared: *mut c_int, + ) -> c_int; + pub fn pthread_rwlockattr_getpshared( + attr: *const pthread_rwlockattr_t, + val: *mut c_int, + ) -> c_int; + pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, val: c_int) -> c_int; + pub fn pthread_threadid_np(thread: crate::pthread_t, thread_id: *mut u64) -> c_int; + pub fn pthread_attr_set_qos_class_np( + attr: *mut pthread_attr_t, + class: qos_class_t, + priority: c_int, + ) -> c_int; + pub fn pthread_attr_get_qos_class_np( + attr: *mut pthread_attr_t, + class: *mut qos_class_t, + priority: *mut c_int, + ) -> c_int; + pub fn pthread_set_qos_class_self_np(class: qos_class_t, priority: c_int) -> c_int; + pub fn pthread_get_qos_class_np( + thread: crate::pthread_t, + class: *mut qos_class_t, + priority: *mut c_int, + ) -> c_int; + pub fn pthread_attr_getschedparam( + attr: *const crate::pthread_attr_t, + param: *mut sched_param, + ) -> c_int; + pub fn pthread_attr_setschedparam( + attr: *mut crate::pthread_attr_t, + param: *const sched_param, + ) -> c_int; + pub fn pthread_getschedparam( + thread: crate::pthread_t, + policy: *mut c_int, + param: *mut sched_param, + ) -> c_int; + pub fn pthread_setschedparam( + thread: crate::pthread_t, + policy: c_int, + param: *const sched_param, + ) -> c_int; + + // Available from Big Sur + pub fn pthread_introspection_hook_install( + hook: crate::pthread_introspection_hook_t, + ) -> crate::pthread_introspection_hook_t; + pub fn pthread_introspection_setspecific_np( + thread: crate::pthread_t, + key: crate::pthread_key_t, + value: *const c_void, + ) -> c_int; + pub fn pthread_introspection_getspecific_np( + thread: crate::pthread_t, + key: crate::pthread_key_t, + ) -> *mut c_void; + pub fn pthread_jit_write_protect_np(enabled: c_int); + pub fn pthread_jit_write_protect_supported_np() -> c_int; + // An array of pthread_jit_write_with_callback_np must declare + // the list of callbacks e.g. + // #[link_section = "__DATA_CONST,__pth_jit_func"] + // static callbacks: [libc::pthread_jit_write_callback_t; 2] = [native_jit_write_cb, + // std::mem::transmute::(std::ptr::null())]; + // (a handy PTHREAD_JIT_WRITE_CALLBACK_NP macro for other languages). + pub fn pthread_jit_write_with_callback_np( + callback: crate::pthread_jit_write_callback_t, + ctx: *mut c_void, + ) -> c_int; + pub fn pthread_jit_write_freeze_callbacks_np(); + pub fn pthread_cpu_number_np(cpu_number_out: *mut size_t) -> c_int; + + // Available starting with macOS 14.4. + pub fn os_sync_wait_on_address( + addr: *mut c_void, + value: u64, + size: size_t, + flags: os_sync_wait_on_address_flags_t, + ) -> c_int; + pub fn os_sync_wait_on_address_with_deadline( + addr: *mut c_void, + value: u64, + size: size_t, + flags: os_sync_wait_on_address_flags_t, + clockid: os_clockid_t, + deadline: u64, + ) -> c_int; + pub fn os_sync_wait_on_address_with_timeout( + addr: *mut c_void, + value: u64, + size: size_t, + flags: os_sync_wait_on_address_flags_t, + clockid: os_clockid_t, + timeout_ns: u64, + ) -> c_int; + pub fn os_sync_wake_by_address_any( + addr: *mut c_void, + size: size_t, + flags: os_sync_wake_by_address_flags_t, + ) -> c_int; + pub fn os_sync_wake_by_address_all( + addr: *mut c_void, + size: size_t, + flags: os_sync_wake_by_address_flags_t, + ) -> c_int; + + pub fn os_unfair_lock_lock(lock: os_unfair_lock_t); + pub fn os_unfair_lock_trylock(lock: os_unfair_lock_t) -> bool; + pub fn os_unfair_lock_unlock(lock: os_unfair_lock_t); + pub fn os_unfair_lock_assert_owner(lock: os_unfair_lock_t); + pub fn os_unfair_lock_assert_not_owner(lock: os_unfair_lock_t); + + pub fn os_log_create(subsystem: *const c_char, category: *const c_char) -> crate::os_log_t; + pub fn os_log_type_enabled(oslog: crate::os_log_t, tpe: crate::os_log_type_t) -> bool; + pub fn os_signpost_id_make_with_pointer( + log: crate::os_log_t, + ptr: *const c_void, + ) -> crate::os_signpost_id_t; + pub fn os_signpost_id_generate(log: crate::os_log_t) -> crate::os_signpost_id_t; + pub fn os_signpost_enabled(log: crate::os_log_t) -> bool; + + pub fn thread_policy_set( + thread: thread_t, + flavor: thread_policy_flavor_t, + policy_info: thread_policy_t, + count: mach_msg_type_number_t, + ) -> kern_return_t; + pub fn thread_policy_get( + thread: thread_t, + flavor: thread_policy_flavor_t, + policy_info: thread_policy_t, + count: *mut mach_msg_type_number_t, + get_default: *mut boolean_t, + ) -> kern_return_t; + pub fn thread_info( + target_act: thread_inspect_t, + flavor: thread_flavor_t, + thread_info_out: thread_info_t, + thread_info_outCnt: *mut mach_msg_type_number_t, + ) -> kern_return_t; + #[cfg_attr(doc, doc(alias = "__errno_location"))] + #[cfg_attr(doc, doc(alias = "errno"))] + pub fn __error() -> *mut c_int; + pub fn backtrace(buf: *mut *mut c_void, sz: c_int) -> c_int; + pub fn backtrace_symbols(addrs: *const *mut c_void, sz: c_int) -> *mut *mut c_char; + pub fn backtrace_symbols_fd(addrs: *const *mut c_void, sz: c_int, fd: c_int); + pub fn backtrace_from_fp(startfp: *mut c_void, array: *mut *mut c_void, size: c_int) -> c_int; + pub fn backtrace_image_offsets( + array: *const *mut c_void, + image_offsets: *mut image_offset, + size: c_int, + ); + pub fn backtrace_async(array: *mut *mut c_void, length: size_t, task_id: *mut u32) -> size_t; + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "statfs$INODE64" + )] + pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "fstatfs$INODE64" + )] + pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; + pub fn kevent( + kq: c_int, + changelist: *const crate::kevent, + nchanges: c_int, + eventlist: *mut crate::kevent, + nevents: c_int, + timeout: *const crate::timespec, + ) -> c_int; + pub fn kevent64( + kq: c_int, + changelist: *const crate::kevent64_s, + nchanges: c_int, + eventlist: *mut crate::kevent64_s, + nevents: c_int, + flags: c_uint, + timeout: *const crate::timespec, + ) -> c_int; + pub fn mount( + src: *const c_char, + target: *const c_char, + flags: c_int, + data: *mut c_void, + ) -> c_int; + pub fn fmount(src: *const c_char, fd: c_int, flags: c_int, data: *mut c_void) -> c_int; + pub fn ptrace(request: c_int, pid: crate::pid_t, addr: *mut c_char, data: c_int) -> c_int; + pub fn quotactl(special: *const c_char, cmd: c_int, id: c_int, data: *mut c_char) -> c_int; + pub fn sethostname(name: *const c_char, len: c_int) -> c_int; + pub fn sendfile( + fd: c_int, + s: c_int, + offset: off_t, + len: *mut off_t, + hdtr: *mut crate::sf_hdtr, + flags: c_int, + ) -> c_int; + pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; + pub fn utimensat( + dirfd: c_int, + path: *const c_char, + times: *const crate::timespec, + flag: c_int, + ) -> c_int; + pub fn openpty( + amaster: *mut c_int, + aslave: *mut c_int, + name: *mut c_char, + termp: *mut termios, + winp: *mut crate::winsize, + ) -> c_int; + pub fn forkpty( + amaster: *mut c_int, + name: *mut c_char, + termp: *mut termios, + winp: *mut crate::winsize, + ) -> crate::pid_t; + pub fn login_tty(fd: c_int) -> c_int; + pub fn duplocale(base: crate::locale_t) -> crate::locale_t; + pub fn freelocale(loc: crate::locale_t) -> c_int; + pub fn localeconv_l(loc: crate::locale_t) -> *mut lconv; + pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; + pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; + pub fn querylocale(mask: c_int, loc: crate::locale_t) -> *const c_char; + pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; + pub fn setpriority(which: c_int, who: crate::id_t, prio: c_int) -> c_int; + pub fn getdomainname(name: *mut c_char, len: c_int) -> c_int; + pub fn setdomainname(name: *const c_char, len: c_int) -> c_int; + pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn getxattr( + path: *const c_char, + name: *const c_char, + value: *mut c_void, + size: size_t, + position: u32, + flags: c_int, + ) -> ssize_t; + pub fn fgetxattr( + filedes: c_int, + name: *const c_char, + value: *mut c_void, + size: size_t, + position: u32, + flags: c_int, + ) -> ssize_t; + pub fn setxattr( + path: *const c_char, + name: *const c_char, + value: *const c_void, + size: size_t, + position: u32, + flags: c_int, + ) -> c_int; + pub fn fsetxattr( + filedes: c_int, + name: *const c_char, + value: *const c_void, + size: size_t, + position: u32, + flags: c_int, + ) -> c_int; + pub fn listxattr(path: *const c_char, list: *mut c_char, size: size_t, flags: c_int) + -> ssize_t; + pub fn flistxattr(filedes: c_int, list: *mut c_char, size: size_t, flags: c_int) -> ssize_t; + pub fn removexattr(path: *const c_char, name: *const c_char, flags: c_int) -> c_int; + pub fn renamex_np(from: *const c_char, to: *const c_char, flags: c_uint) -> c_int; + pub fn renameatx_np( + fromfd: c_int, + from: *const c_char, + tofd: c_int, + to: *const c_char, + flags: c_uint, + ) -> c_int; + pub fn fremovexattr(filedes: c_int, name: *const c_char, flags: c_int) -> c_int; + + pub fn getgrouplist( + name: *const c_char, + basegid: c_int, + groups: *mut c_int, + ngroups: *mut c_int, + ) -> c_int; + pub fn initgroups(user: *const c_char, basegroup: c_int) -> c_int; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "waitid$UNIX2003" + )] + pub fn waitid( + idtype: idtype_t, + id: id_t, + infop: *mut crate::siginfo_t, + options: c_int, + ) -> c_int; + pub fn brk(addr: *const c_void) -> *mut c_void; + pub fn sbrk(increment: c_int) -> *mut c_void; + pub fn settimeofday(tv: *const crate::timeval, tz: *const crate::timezone) -> c_int; + #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] + pub fn _dyld_image_count() -> u32; + #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] + #[allow(deprecated)] + pub fn _dyld_get_image_header(image_index: u32) -> *const mach_header; + #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] + pub fn _dyld_get_image_vmaddr_slide(image_index: u32) -> intptr_t; + #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] + pub fn _dyld_get_image_name(image_index: u32) -> *const c_char; + + pub fn posix_spawn( + pid: *mut crate::pid_t, + path: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnp( + pid: *mut crate::pid_t, + file: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_getsigdefault( + attr: *const posix_spawnattr_t, + default: *mut crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigdefault( + attr: *mut posix_spawnattr_t, + default: *const crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getsigmask( + attr: *const posix_spawnattr_t, + default: *mut crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigmask( + attr: *mut posix_spawnattr_t, + default: *const crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; + pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; + pub fn posix_spawnattr_getpgroup( + attr: *const posix_spawnattr_t, + flags: *mut crate::pid_t, + ) -> c_int; + pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: crate::pid_t) -> c_int; + pub fn posix_spawnattr_setarchpref_np( + attr: *mut posix_spawnattr_t, + count: size_t, + pref: *mut crate::cpu_type_t, + subpref: *mut crate::cpu_subtype_t, + ocount: *mut size_t, + ) -> c_int; + pub fn posix_spawnattr_getarchpref_np( + attr: *const posix_spawnattr_t, + count: size_t, + pref: *mut crate::cpu_type_t, + subpref: *mut crate::cpu_subtype_t, + ocount: *mut size_t, + ) -> c_int; + pub fn posix_spawnattr_getbinpref_np( + attr: *const posix_spawnattr_t, + count: size_t, + pref: *mut crate::cpu_type_t, + ocount: *mut size_t, + ) -> c_int; + pub fn posix_spawnattr_setbinpref_np( + attr: *mut posix_spawnattr_t, + count: size_t, + pref: *mut crate::cpu_type_t, + ocount: *mut size_t, + ) -> c_int; + pub fn posix_spawnattr_set_qos_class_np( + attr: *mut posix_spawnattr_t, + qos_class: crate::qos_class_t, + ) -> c_int; + pub fn posix_spawnattr_get_qos_class_np( + attr: *const posix_spawnattr_t, + qos_class: *mut crate::qos_class_t, + ) -> c_int; + + pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_addopen( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + path: *const c_char, + oflag: c_int, + mode: mode_t, + ) -> c_int; + pub fn posix_spawn_file_actions_addclose( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + ) -> c_int; + pub fn posix_spawn_file_actions_adddup2( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + newfd: c_int, + ) -> c_int; + pub fn uname(buf: *mut crate::utsname) -> c_int; + + pub fn connectx( + socket: c_int, + endpoints: *const sa_endpoints_t, + associd: sae_associd_t, + flags: c_uint, + iov: *const crate::iovec, + iovcnt: c_uint, + len: *mut size_t, + connid: *mut sae_connid_t, + ) -> c_int; + pub fn disconnectx(socket: c_int, associd: sae_associd_t, connid: sae_connid_t) -> c_int; + + pub fn ntp_adjtime(buf: *mut timex) -> c_int; + pub fn ntp_gettime(buf: *mut ntptimeval) -> c_int; + + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "getmntinfo$INODE64" + )] + pub fn getmntinfo(mntbufp: *mut *mut statfs, flags: c_int) -> c_int; + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "getfsstat$INODE64" + )] + pub fn getfsstat(mntbufp: *mut statfs, bufsize: c_int, flags: c_int) -> c_int; + + // Copy-on-write functions. + // According to the man page `flags` is an `int` but in the header + // this is a `uint32_t`. + pub fn clonefile(src: *const c_char, dst: *const c_char, flags: u32) -> c_int; + pub fn clonefileat( + src_dirfd: c_int, + src: *const c_char, + dst_dirfd: c_int, + dst: *const c_char, + flags: u32, + ) -> c_int; + pub fn fclonefileat(srcfd: c_int, dst_dirfd: c_int, dst: *const c_char, flags: u32) -> c_int; + + pub fn copyfile( + from: *const c_char, + to: *const c_char, + state: copyfile_state_t, + flags: copyfile_flags_t, + ) -> c_int; + pub fn fcopyfile( + from: c_int, + to: c_int, + state: copyfile_state_t, + flags: copyfile_flags_t, + ) -> c_int; + pub fn copyfile_state_free(s: copyfile_state_t) -> c_int; + pub fn copyfile_state_alloc() -> copyfile_state_t; + pub fn copyfile_state_get(s: copyfile_state_t, flags: u32, dst: *mut c_void) -> c_int; + pub fn copyfile_state_set(s: copyfile_state_t, flags: u32, src: *const c_void) -> c_int; + + pub fn mach_error_string(error_value: crate::mach_error_t) -> *mut c_char; + + // Added in macOS 10.13 + // ISO/IEC 9899:2011 ("ISO C11") K.3.7.4.1 + pub fn memset_s(s: *mut c_void, smax: size_t, c: c_int, n: size_t) -> c_int; + // Added in macOS 10.5 + pub fn memset_pattern4(b: *mut c_void, pattern4: *const c_void, len: size_t); + pub fn memset_pattern8(b: *mut c_void, pattern8: *const c_void, len: size_t); + pub fn memset_pattern16(b: *mut c_void, pattern16: *const c_void, len: size_t); + + // Inherited from BSD but available from Big Sur only + pub fn strtonum( + __numstr: *const c_char, + __minval: c_longlong, + __maxval: c_longlong, + errstrp: *mut *const c_char, + ) -> c_longlong; + + pub fn mstats() -> mstats; + pub fn malloc_printf(format: *const c_char, ...); + pub fn malloc_zone_check(zone: *mut crate::malloc_zone_t) -> crate::boolean_t; + pub fn malloc_zone_print(zone: *mut crate::malloc_zone_t, verbose: crate::boolean_t); + pub fn malloc_zone_statistics(zone: *mut crate::malloc_zone_t, stats: *mut malloc_statistics_t); + pub fn malloc_zone_log(zone: *mut crate::malloc_zone_t, address: *mut c_void); + pub fn malloc_zone_print_ptr_info(ptr: *mut c_void); + pub fn malloc_default_zone() -> *mut crate::malloc_zone_t; + pub fn malloc_zone_from_ptr(ptr: *const c_void) -> *mut crate::malloc_zone_t; + pub fn malloc_zone_malloc(zone: *mut crate::malloc_zone_t, size: size_t) -> *mut c_void; + pub fn malloc_zone_valloc(zone: *mut crate::malloc_zone_t, size: size_t) -> *mut c_void; + pub fn malloc_zone_calloc( + zone: *mut crate::malloc_zone_t, + num_items: size_t, + size: size_t, + ) -> *mut c_void; + pub fn malloc_zone_realloc( + zone: *mut crate::malloc_zone_t, + ptr: *mut c_void, + size: size_t, + ) -> *mut c_void; + pub fn malloc_zone_free(zone: *mut crate::malloc_zone_t, ptr: *mut c_void); + + pub fn proc_listpids(t: u32, typeinfo: u32, buffer: *mut c_void, buffersize: c_int) -> c_int; + pub fn proc_listallpids(buffer: *mut c_void, buffersize: c_int) -> c_int; + pub fn proc_listpgrppids(pgrpid: crate::pid_t, buffer: *mut c_void, buffersize: c_int) + -> c_int; + pub fn proc_listchildpids(ppid: crate::pid_t, buffer: *mut c_void, buffersize: c_int) -> c_int; + pub fn proc_pidinfo( + pid: c_int, + flavor: c_int, + arg: u64, + buffer: *mut c_void, + buffersize: c_int, + ) -> c_int; + pub fn proc_pidfdinfo( + pid: c_int, + fd: c_int, + flavor: c_int, + buffer: *mut c_void, + buffersize: c_int, + ) -> c_int; + pub fn proc_pidfileportinfo( + pid: c_int, + fileport: u32, + flavor: c_int, + buffer: *mut c_void, + buffersize: c_int, + ) -> c_int; + pub fn proc_pidpath(pid: c_int, buffer: *mut c_void, buffersize: u32) -> c_int; + pub fn proc_name(pid: c_int, buffer: *mut c_void, buffersize: u32) -> c_int; + pub fn proc_regionfilename( + pid: c_int, + address: u64, + buffer: *mut c_void, + buffersize: u32, + ) -> c_int; + pub fn proc_kmsgbuf(buffer: *mut c_void, buffersize: u32) -> c_int; + pub fn proc_libversion(major: *mut c_int, minor: *mut c_int) -> c_int; + pub fn proc_pid_rusage(pid: c_int, flavor: c_int, buffer: *mut rusage_info_t) -> c_int; + + // Available from Big Sur + pub fn proc_set_no_smt() -> c_int; + pub fn proc_setthread_no_smt() -> c_int; + pub fn proc_set_csm(flags: u32) -> c_int; + pub fn proc_setthread_csm(flags: u32) -> c_int; + /// # Notes + /// + /// `id` is of type [`uuid_t`]. + pub fn gethostuuid(id: *mut u8, timeout: *const crate::timespec) -> c_int; + + pub fn gethostid() -> c_long; + pub fn sethostid(hostid: c_long); + + pub fn CCRandomGenerateBytes(bytes: *mut c_void, size: size_t) -> crate::CCRNGStatus; + pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; + + // FIXME(1.0): should this actually be deprecated? + #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] + pub fn _NSGetExecutablePath(buf: *mut c_char, bufsize: *mut u32) -> c_int; + + // crt_externs.h + pub fn _NSGetArgv() -> *mut *mut *mut c_char; + pub fn _NSGetArgc() -> *mut c_int; + pub fn _NSGetEnviron() -> *mut *mut *mut c_char; + pub fn _NSGetProgname() -> *mut *mut c_char; + + #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] + pub fn mach_vm_map( + target_task: crate::vm_map_t, + address: *mut crate::mach_vm_address_t, + size: crate::mach_vm_size_t, + mask: crate::mach_vm_offset_t, + flags: c_int, + object: crate::mem_entry_name_port_t, + offset: crate::memory_object_offset_t, + copy: crate::boolean_t, + cur_protection: crate::vm_prot_t, + max_protection: crate::vm_prot_t, + inheritance: crate::vm_inherit_t, + ) -> crate::kern_return_t; + + pub fn vm_allocate( + target_task: vm_map_t, + address: *mut vm_address_t, + size: vm_size_t, + flags: c_int, + ) -> crate::kern_return_t; + + pub fn vm_deallocate( + target_task: vm_map_t, + address: vm_address_t, + size: vm_size_t, + ) -> crate::kern_return_t; + + pub fn host_statistics64( + host_priv: host_t, + flavor: host_flavor_t, + host_info64_out: host_info64_t, + host_info64_outCnt: *mut mach_msg_type_number_t, + ) -> crate::kern_return_t; + pub fn host_processor_info( + host: host_t, + flavor: processor_flavor_t, + out_processor_count: *mut natural_t, + out_processor_info: *mut processor_info_array_t, + out_processor_infoCnt: *mut mach_msg_type_number_t, + ) -> crate::kern_return_t; + + #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] + pub static mut mach_task_self_: crate::mach_port_t; + pub fn task_for_pid( + host: crate::mach_port_t, + pid: crate::pid_t, + task: *mut crate::mach_port_t, + ) -> crate::kern_return_t; + pub fn task_info( + host: crate::mach_port_t, + flavor: task_flavor_t, + task_info_out: task_info_t, + task_info_count: *mut mach_msg_type_number_t, + ) -> crate::kern_return_t; + pub fn task_create( + target_task: crate::task_t, + ledgers: crate::ledger_array_t, + ledgersCnt: crate::mach_msg_type_number_t, + inherit_memory: crate::boolean_t, + child_task: *mut crate::task_t, + ) -> crate::kern_return_t; + pub fn task_terminate(target_task: crate::task_t) -> crate::kern_return_t; + pub fn task_threads( + target_task: crate::task_inspect_t, + act_list: *mut crate::thread_act_array_t, + act_listCnt: *mut crate::mach_msg_type_number_t, + ) -> crate::kern_return_t; + pub fn host_statistics( + host_priv: host_t, + flavor: host_flavor_t, + host_info_out: host_info_t, + host_info_outCnt: *mut mach_msg_type_number_t, + ) -> crate::kern_return_t; + + // sysdir.h + pub fn sysdir_start_search_path_enumeration( + dir: sysdir_search_path_directory_t, + domainMask: sysdir_search_path_domain_mask_t, + ) -> crate::sysdir_search_path_enumeration_state; + pub fn sysdir_get_next_search_path_enumeration( + state: crate::sysdir_search_path_enumeration_state, + path: *mut c_char, + ) -> crate::sysdir_search_path_enumeration_state; + + pub static vm_page_size: vm_size_t; + + pub fn getattrlist( + path: *const c_char, + attrList: *mut c_void, + attrBuf: *mut c_void, + attrBufSize: size_t, + options: u32, + ) -> c_int; + pub fn fgetattrlist( + fd: c_int, + attrList: *mut c_void, + attrBuf: *mut c_void, + attrBufSize: size_t, + options: u32, + ) -> c_int; + pub fn getattrlistat( + fd: c_int, + path: *const c_char, + attrList: *mut c_void, + attrBuf: *mut c_void, + attrBufSize: size_t, + options: c_ulong, + ) -> c_int; + pub fn setattrlist( + path: *const c_char, + attrList: *mut c_void, + attrBuf: *mut c_void, + attrBufSize: size_t, + options: u32, + ) -> c_int; + pub fn fsetattrlist( + fd: c_int, + attrList: *mut c_void, + attrBuf: *mut c_void, + attrBufSize: size_t, + options: u32, + ) -> c_int; + pub fn setattrlistat( + dir_fd: c_int, + path: *const c_char, + attrList: *mut c_void, + attrBuf: *mut c_void, + attrBufSize: size_t, + options: u32, + ) -> c_int; + pub fn getattrlistbulk( + dirfd: c_int, + attrList: *mut c_void, + attrBuf: *mut c_void, + attrBufSize: size_t, + options: u64, + ) -> c_int; + + pub fn malloc_size(ptr: *const c_void) -> size_t; + pub fn malloc_good_size(size: size_t) -> size_t; + + pub fn dirname(path: *mut c_char) -> *mut c_char; + pub fn basename(path: *mut c_char) -> *mut c_char; + + pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; + pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; + pub fn freadlink(fd: c_int, buf: *mut c_char, size: size_t) -> c_int; + pub fn execvP( + file: *const c_char, + search_path: *const c_char, + argv: *const *mut c_char, + ) -> c_int; + + pub fn qsort_r( + base: *mut c_void, + num: size_t, + size: size_t, + arg: *mut c_void, + compar: Option c_int>, + ); +} + +#[allow(deprecated)] +#[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] +pub unsafe fn mach_task_self() -> crate::mach_port_t { + mach_task_self_ +} + +cfg_if! { + if #[cfg(target_os = "macos")] { + extern "C" { + pub fn clock_settime(clock_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; + } + } +} +cfg_if! { + if #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "tvos", + target_os = "visionos" + ))] { + extern "C" { + pub fn memmem( + haystack: *const c_void, + haystacklen: size_t, + needle: *const c_void, + needlelen: size_t, + ) -> *mut c_void; + pub fn task_set_info( + target_task: crate::task_t, + flavor: crate::task_flavor_t, + task_info_in: crate::task_info_t, + task_info_inCnt: crate::mach_msg_type_number_t, + ) -> crate::kern_return_t; + } + } +} + +// These require a dependency on `libiconv`, and including this when built as +// part of `std` means every Rust program gets it. Ideally we would have a link +// modifier to only include these if they are used, but we do not. +#[cfg_attr(not(feature = "rustc-dep-of-std"), link(name = "iconv"))] +extern "C" { + #[deprecated(note = "Will be removed in 1.0 to avoid the `iconv` dependency")] + pub fn iconv_open(tocode: *const c_char, fromcode: *const c_char) -> iconv_t; + #[deprecated(note = "Will be removed in 1.0 to avoid the `iconv` dependency")] + pub fn iconv( + cd: iconv_t, + inbuf: *mut *mut c_char, + inbytesleft: *mut size_t, + outbuf: *mut *mut c_char, + outbytesleft: *mut size_t, + ) -> size_t; + #[deprecated(note = "Will be removed in 1.0 to avoid the `iconv` dependency")] + pub fn iconv_close(cd: iconv_t) -> c_int; +} + +cfg_if! { + if #[cfg(target_pointer_width = "32")] { + mod b32; + pub use self::b32::*; + } else if #[cfg(target_pointer_width = "64")] { + mod b64; + pub use self::b64::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/errno.rs b/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/errno.rs new file mode 100644 index 00000000000000..874c1da84d3a58 --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/errno.rs @@ -0,0 +1,17 @@ +use crate::prelude::*; + +/* DIFF(main): module removed in de76fee6 */ + +// DragonFlyBSD's __error function is declared with "static inline", so it must +// be implemented in the libc crate, as a pointer to a static thread_local. +f! { + #[deprecated(since = "0.2.77", note = "Use `__errno_location()` instead")] + pub fn __error() -> *mut c_int { + &mut errno + } +} + +extern "C" { + #[thread_local] + pub static mut errno: c_int; +} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs new file mode 100644 index 00000000000000..8720bf7fb36495 --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs @@ -0,0 +1,1635 @@ +use crate::prelude::*; +use crate::{cmsghdr, off_t}; + +pub type dev_t = u32; +pub type wchar_t = i32; +pub type clock_t = u64; +pub type ino_t = u64; +pub type lwpid_t = i32; +pub type nlink_t = u32; +pub type blksize_t = i64; +pub type clockid_t = c_ulong; + +pub type time_t = i64; +pub type suseconds_t = i64; + +pub type uuid_t = crate::uuid; + +pub type fsblkcnt_t = u64; +pub type fsfilcnt_t = u64; +pub type idtype_t = c_uint; +pub type shmatt_t = c_uint; + +pub type mqd_t = c_int; +pub type sem_t = *mut sem; + +pub type cpuset_t = cpumask_t; +pub type cpu_set_t = cpumask_t; + +pub type register_t = c_long; +pub type umtx_t = c_int; +pub type pthread_barrierattr_t = c_int; +pub type pthread_barrier_t = crate::uintptr_t; +pub type pthread_spinlock_t = crate::uintptr_t; + +pub type segsz_t = usize; + +pub type vm_prot_t = u8; +pub type vm_maptype_t = u8; +pub type vm_inherit_t = i8; +pub type vm_subsys_t = c_int; +pub type vm_eflags_t = c_uint; + +pub type vm_map_t = *mut __c_anonymous_vm_map; +pub type vm_map_entry_t = *mut vm_map_entry; + +pub type pmap = __c_anonymous_pmap; + +#[derive(Debug)] +pub enum sem {} +impl Copy for sem {} +impl Clone for sem { + fn clone(&self) -> sem { + *self + } +} + +e! { + #[repr(u32)] + pub enum lwpstat { + LSRUN = 1, + LSSTOP = 2, + LSSLEEP = 3, + } + + #[repr(u32)] + pub enum procstat { + SIDL = 1, + SACTIVE = 2, + SSTOP = 3, + SZOMB = 4, + SCORE = 5, + } +} + +s! { + pub struct kevent { + pub ident: crate::uintptr_t, + pub filter: c_short, + pub flags: c_ushort, + pub fflags: c_uint, + pub data: intptr_t, + pub udata: *mut c_void, + } + + pub struct exit_status { + pub e_termination: u16, + pub e_exit: u16, + } + + pub struct aiocb { + pub aio_fildes: c_int, + pub aio_offset: off_t, + pub aio_buf: *mut c_void, + pub aio_nbytes: size_t, + pub aio_sigevent: sigevent, + pub aio_lio_opcode: c_int, + pub aio_reqprio: c_int, + _aio_val: c_int, + _aio_err: c_int, + } + + pub struct uuid { + pub time_low: u32, + pub time_mid: u16, + pub time_hi_and_version: u16, + pub clock_seq_hi_and_reserved: u8, + pub clock_seq_low: u8, + pub node: [u8; 6], + } + + pub struct mq_attr { + pub mq_flags: c_long, + pub mq_maxmsg: c_long, + pub mq_msgsize: c_long, + pub mq_curmsgs: c_long, + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + pub f_owner: crate::uid_t, + pub f_type: c_uint, + pub f_syncreads: u64, + pub f_syncwrites: u64, + pub f_asyncreads: u64, + pub f_asyncwrites: u64, + pub f_fsid_uuid: crate::uuid_t, + pub f_uid_uuid: crate::uuid_t, + } + + pub struct stat { + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_dev: crate::dev_t, + pub st_mode: crate::mode_t, + pub st_padding1: u16, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_size: off_t, + pub st_blocks: i64, + pub __old_st_blksize: u32, + pub st_flags: u32, + pub st_gen: u32, + pub st_lspare: i32, + pub st_blksize: i64, + pub st_qspare2: i64, + } + + pub struct if_data { + pub ifi_type: c_uchar, + pub ifi_physical: c_uchar, + pub ifi_addrlen: c_uchar, + pub ifi_hdrlen: c_uchar, + pub ifi_recvquota: c_uchar, + pub ifi_xmitquota: c_uchar, + pub ifi_mtu: c_ulong, + pub ifi_metric: c_ulong, + pub ifi_link_state: c_ulong, + pub ifi_baudrate: u64, + pub ifi_ipackets: c_ulong, + pub ifi_ierrors: c_ulong, + pub ifi_opackets: c_ulong, + pub ifi_oerrors: c_ulong, + pub ifi_collisions: c_ulong, + pub ifi_ibytes: c_ulong, + pub ifi_obytes: c_ulong, + pub ifi_imcasts: c_ulong, + pub ifi_omcasts: c_ulong, + pub ifi_iqdrops: c_ulong, + pub ifi_noproto: c_ulong, + pub ifi_hwassist: c_ulong, + pub ifi_oqdrops: c_ulong, + pub ifi_lastchange: crate::timeval, + } + + pub struct if_msghdr { + pub ifm_msglen: c_ushort, + pub ifm_version: c_uchar, + pub ifm_type: c_uchar, + pub ifm_addrs: c_int, + pub ifm_flags: c_int, + pub ifm_index: c_ushort, + pub ifm_data: if_data, + } + + pub struct sockaddr_dl { + pub sdl_len: c_uchar, + pub sdl_family: c_uchar, + pub sdl_index: c_ushort, + pub sdl_type: c_uchar, + pub sdl_nlen: c_uchar, + pub sdl_alen: c_uchar, + pub sdl_slen: c_uchar, + pub sdl_data: [c_char; 12], + pub sdl_rcf: c_ushort, + pub sdl_route: [c_ushort; 16], + } + + pub struct xucred { + pub cr_version: c_uint, + pub cr_uid: crate::uid_t, + pub cr_ngroups: c_short, + pub cr_groups: [crate::gid_t; 16], + __cr_unused1: *mut c_void, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } + + pub struct cpumask_t { + ary: [u64; 4], + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_lpid: crate::pid_t, + pub shm_cpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + shm_internal: *mut c_void, + } + + pub struct kinfo_file { + pub f_size: size_t, + pub f_pid: crate::pid_t, + pub f_uid: crate::uid_t, + pub f_fd: c_int, + pub f_file: *mut c_void, + pub f_type: c_short, + pub f_count: c_int, + pub f_msgcount: c_int, + pub f_offset: off_t, + pub f_data: *mut c_void, + pub f_flag: c_uint, + } + + pub struct kinfo_cputime { + pub cp_user: u64, + pub cp_nice: u64, + pub cp_sys: u64, + pub cp_intr: u64, + pub cp_idel: u64, + cp_unused01: u64, + cp_unused02: u64, + pub cp_sample_pc: u64, + pub cp_sample_sp: u64, + pub cp_msg: [c_char; 32], + } + + pub struct kinfo_lwp { + pub kl_pid: crate::pid_t, + pub kl_tid: crate::lwpid_t, + pub kl_flags: c_int, + pub kl_stat: crate::lwpstat, + pub kl_lock: c_int, + pub kl_tdflags: c_int, + pub kl_mpcount: c_int, + pub kl_prio: c_int, + pub kl_tdprio: c_int, + pub kl_rtprio: crate::rtprio, + pub kl_uticks: u64, + pub kl_sticks: u64, + pub kl_iticks: u64, + pub kl_cpticks: u64, + pub kl_pctcpu: c_uint, + pub kl_slptime: c_uint, + pub kl_origcpu: c_int, + pub kl_estcpu: c_int, + pub kl_cpuid: c_int, + pub kl_ru: crate::rusage, + pub kl_siglist: crate::sigset_t, + pub kl_sigmask: crate::sigset_t, + pub kl_wchan: crate::uintptr_t, + pub kl_wmesg: [c_char; 9], + pub kl_comm: [c_char; MAXCOMLEN + 1], + } + + pub struct kinfo_proc { + pub kp_paddr: crate::uintptr_t, + pub kp_flags: c_int, + pub kp_stat: crate::procstat, + pub kp_lock: c_int, + pub kp_acflag: c_int, + pub kp_traceflag: c_int, + pub kp_fd: crate::uintptr_t, + pub kp_siglist: crate::sigset_t, + pub kp_sigignore: crate::sigset_t, + pub kp_sigcatch: crate::sigset_t, + pub kp_sigflag: c_int, + pub kp_start: crate::timeval, + pub kp_comm: [c_char; MAXCOMLEN + 1], + pub kp_uid: crate::uid_t, + pub kp_ngroups: c_short, + pub kp_groups: [crate::gid_t; NGROUPS], + pub kp_ruid: crate::uid_t, + pub kp_svuid: crate::uid_t, + pub kp_rgid: crate::gid_t, + pub kp_svgid: crate::gid_t, + pub kp_pid: crate::pid_t, + pub kp_ppid: crate::pid_t, + pub kp_pgid: crate::pid_t, + pub kp_jobc: c_int, + pub kp_sid: crate::pid_t, + pub kp_login: [c_char; 40], // MAXNAMELEN rounded up to the nearest sizeof(long) + pub kp_tdev: crate::dev_t, + pub kp_tpgid: crate::pid_t, + pub kp_tsid: crate::pid_t, + pub kp_exitstat: c_ushort, + pub kp_nthreads: c_int, + pub kp_nice: c_int, + pub kp_swtime: c_uint, + pub kp_vm_map_size: size_t, + pub kp_vm_rssize: crate::segsz_t, + pub kp_vm_swrss: crate::segsz_t, + pub kp_vm_tsize: crate::segsz_t, + pub kp_vm_dsize: crate::segsz_t, + pub kp_vm_ssize: crate::segsz_t, + pub kp_vm_prssize: c_uint, + pub kp_jailid: c_int, + pub kp_ru: crate::rusage, + pub kp_cru: crate::rusage, + pub kp_auxflags: c_int, + pub kp_lwp: crate::kinfo_lwp, + pub kp_ktaddr: crate::uintptr_t, + kp_spare: [c_int; 2], + } + + pub struct __c_anonymous_vm_map { + _priv: [crate::uintptr_t; 36], + } + + pub struct vm_map_entry { + _priv: [crate::uintptr_t; 15], + pub eflags: crate::vm_eflags_t, + pub maptype: crate::vm_maptype_t, + pub protection: crate::vm_prot_t, + pub max_protection: crate::vm_prot_t, + pub inheritance: crate::vm_inherit_t, + pub wired_count: c_int, + pub id: crate::vm_subsys_t, + } + + pub struct __c_anonymous_pmap { + _priv1: [crate::uintptr_t; 32], + _priv2: [crate::uintptr_t; 32], + _priv3: [crate::uintptr_t; 32], + _priv4: [crate::uintptr_t; 32], + _priv5: [crate::uintptr_t; 8], + } + + pub struct vmspace { + vm_map: __c_anonymous_vm_map, + vm_pmap: __c_anonymous_pmap, + pub vm_flags: c_int, + pub vm_shm: *mut c_char, + pub vm_rssize: crate::segsz_t, + pub vm_swrss: crate::segsz_t, + pub vm_tsize: crate::segsz_t, + pub vm_dsize: crate::segsz_t, + pub vm_ssize: crate::segsz_t, + pub vm_taddr: *mut c_char, + pub vm_daddr: *mut c_char, + pub vm_maxsaddr: *mut c_char, + pub vm_minsaddr: *mut c_char, + _unused1: c_int, + _unused2: c_int, + pub vm_pagesupply: c_int, + pub vm_holdcnt: c_uint, + pub vm_refcnt: c_uint, + } + + pub struct cpuctl_msr_args_t { + pub msr: c_int, + pub data: u64, + } + + pub struct cpuctl_cpuid_args_t { + pub level: c_int, + pub data: [u32; 4], + } + + pub struct cpuctl_cpuid_count_args_t { + pub level: c_int, + pub level_type: c_int, + pub data: [u32; 4], + } + + pub struct cpuctl_update_args_t { + pub data: *mut c_void, + pub size: size_t, + } +} + +s_no_extra_traits! { + pub struct utmpx { + pub ut_name: [c_char; 32], + pub ut_id: [c_char; 4], + + pub ut_line: [c_char; 32], + pub ut_host: [c_char; 256], + + pub ut_unused: [u8; 16], + pub ut_session: u16, + pub ut_type: u16, + pub ut_pid: crate::pid_t, + ut_exit: exit_status, + ut_ss: crate::sockaddr_storage, + pub ut_tv: crate::timeval, + pub ut_unused2: [u8; 16], + } + + pub struct lastlogx { + pub ll_tv: crate::timeval, + pub ll_line: [c_char; _UTX_LINESIZE], + pub ll_host: [c_char; _UTX_HOSTSIZE], + pub ll_ss: crate::sockaddr_storage, + } + + pub struct dirent { + pub d_fileno: crate::ino_t, + pub d_namlen: u16, + pub d_type: u8, + __unused1: u8, + __unused2: u32, + pub d_name: [c_char; 256], + } + + pub struct statfs { + __spare2: c_long, + pub f_bsize: c_long, + pub f_iosize: c_long, + pub f_blocks: c_long, + pub f_bfree: c_long, + pub f_bavail: c_long, + pub f_files: c_long, + pub f_ffree: c_long, + pub f_fsid: crate::fsid_t, + pub f_owner: crate::uid_t, + pub f_type: c_int, + pub f_flags: c_int, + pub f_syncwrites: c_long, + pub f_asyncwrites: c_long, + pub f_fstypename: [c_char; 16], + pub f_mntonname: [c_char; 80], + pub f_syncreads: c_long, + pub f_asyncreads: c_long, + __spares1: c_short, + pub f_mntfromname: [c_char; 80], + __spares2: c_short, + __spare: [c_long; 2], + } + + pub struct sigevent { + pub sigev_notify: c_int, + // The union is 8-byte in size, so it is aligned at a 8-byte offset. + #[cfg(target_pointer_width = "64")] + __unused1: c_int, + pub sigev_signo: c_int, //actually a union + // pad the union + #[cfg(target_pointer_width = "64")] + __unused2: c_int, + pub sigev_value: crate::sigval, + __unused3: *mut c_void, //actually a function pointer + } + + pub struct mcontext_t { + pub mc_onstack: register_t, + pub mc_rdi: register_t, + pub mc_rsi: register_t, + pub mc_rdx: register_t, + pub mc_rcx: register_t, + pub mc_r8: register_t, + pub mc_r9: register_t, + pub mc_rax: register_t, + pub mc_rbx: register_t, + pub mc_rbp: register_t, + pub mc_r10: register_t, + pub mc_r11: register_t, + pub mc_r12: register_t, + pub mc_r13: register_t, + pub mc_r14: register_t, + pub mc_r15: register_t, + pub mc_xflags: register_t, + pub mc_trapno: register_t, + pub mc_addr: register_t, + pub mc_flags: register_t, + pub mc_err: register_t, + pub mc_rip: register_t, + pub mc_cs: register_t, + pub mc_rflags: register_t, + pub mc_rsp: register_t, + pub mc_ss: register_t, + pub mc_len: c_uint, + pub mc_fpformat: c_uint, + pub mc_ownedfp: c_uint, + __reserved: c_uint, + __unused: [c_uint; 8], + pub mc_fpregs: [[c_uint; 8]; 32], + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct ucontext_t { + pub uc_sigmask: crate::sigset_t, + pub uc_mcontext: mcontext_t, + pub uc_link: *mut ucontext_t, + pub uc_stack: stack_t, + pub uc_cofunc: Option, + pub uc_arg: *mut c_void, + __pad: [c_int; 4], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for utmpx { + fn eq(&self, other: &utmpx) -> bool { + self.ut_name == other.ut_name + && self.ut_id == other.ut_id + && self.ut_line == other.ut_line + && self + .ut_host + .iter() + .zip(other.ut_host.iter()) + .all(|(a, b)| a == b) + && self.ut_unused == other.ut_unused + && self.ut_session == other.ut_session + && self.ut_type == other.ut_type + && self.ut_pid == other.ut_pid + && self.ut_exit == other.ut_exit + && self.ut_ss == other.ut_ss + && self.ut_tv == other.ut_tv + && self.ut_unused2 == other.ut_unused2 + } + } + impl Eq for utmpx {} + impl hash::Hash for utmpx { + fn hash(&self, state: &mut H) { + self.ut_name.hash(state); + self.ut_id.hash(state); + self.ut_line.hash(state); + self.ut_host.hash(state); + self.ut_unused.hash(state); + self.ut_session.hash(state); + self.ut_type.hash(state); + self.ut_pid.hash(state); + self.ut_exit.hash(state); + self.ut_ss.hash(state); + self.ut_tv.hash(state); + self.ut_unused2.hash(state); + } + } + impl PartialEq for lastlogx { + fn eq(&self, other: &lastlogx) -> bool { + self.ll_tv == other.ll_tv + && self.ll_line == other.ll_line + && self.ll_host == other.ll_host + && self.ll_ss == other.ll_ss + } + } + impl Eq for lastlogx {} + impl hash::Hash for lastlogx { + fn hash(&self, state: &mut H) { + self.ll_tv.hash(state); + self.ll_line.hash(state); + self.ll_host.hash(state); + self.ll_ss.hash(state); + } + } + + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_fileno == other.d_fileno + && self.d_namlen == other.d_namlen + && self.d_type == other.d_type + // Ignore __unused1 + // Ignore __unused2 + && self + .d_name + .iter() + .zip(other.d_name.iter()) + .all(|(a,b)| a == b) + } + } + impl Eq for dirent {} + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_fileno.hash(state); + self.d_namlen.hash(state); + self.d_type.hash(state); + // Ignore __unused1 + // Ignore __unused2 + self.d_name.hash(state); + } + } + + impl PartialEq for statfs { + fn eq(&self, other: &statfs) -> bool { + self.f_bsize == other.f_bsize + && self.f_iosize == other.f_iosize + && self.f_blocks == other.f_blocks + && self.f_bfree == other.f_bfree + && self.f_bavail == other.f_bavail + && self.f_files == other.f_files + && self.f_ffree == other.f_ffree + && self.f_fsid == other.f_fsid + && self.f_owner == other.f_owner + && self.f_type == other.f_type + && self.f_flags == other.f_flags + && self.f_syncwrites == other.f_syncwrites + && self.f_asyncwrites == other.f_asyncwrites + && self.f_fstypename == other.f_fstypename + && self + .f_mntonname + .iter() + .zip(other.f_mntonname.iter()) + .all(|(a, b)| a == b) + && self.f_syncreads == other.f_syncreads + && self.f_asyncreads == other.f_asyncreads + && self + .f_mntfromname + .iter() + .zip(other.f_mntfromname.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for statfs {} + impl hash::Hash for statfs { + fn hash(&self, state: &mut H) { + self.f_bsize.hash(state); + self.f_iosize.hash(state); + self.f_blocks.hash(state); + self.f_bfree.hash(state); + self.f_bavail.hash(state); + self.f_files.hash(state); + self.f_ffree.hash(state); + self.f_fsid.hash(state); + self.f_owner.hash(state); + self.f_type.hash(state); + self.f_flags.hash(state); + self.f_syncwrites.hash(state); + self.f_asyncwrites.hash(state); + self.f_fstypename.hash(state); + self.f_mntonname.hash(state); + self.f_syncreads.hash(state); + self.f_asyncreads.hash(state); + self.f_mntfromname.hash(state); + } + } + + impl PartialEq for sigevent { + fn eq(&self, other: &sigevent) -> bool { + self.sigev_notify == other.sigev_notify + && self.sigev_signo == other.sigev_signo + && self.sigev_value == other.sigev_value + } + } + impl Eq for sigevent {} + impl hash::Hash for sigevent { + fn hash(&self, state: &mut H) { + self.sigev_notify.hash(state); + self.sigev_signo.hash(state); + self.sigev_value.hash(state); + } + } + impl PartialEq for mcontext_t { + fn eq(&self, other: &mcontext_t) -> bool { + self.mc_onstack == other.mc_onstack + && self.mc_rdi == other.mc_rdi + && self.mc_rsi == other.mc_rsi + && self.mc_rdx == other.mc_rdx + && self.mc_rcx == other.mc_rcx + && self.mc_r8 == other.mc_r8 + && self.mc_r9 == other.mc_r9 + && self.mc_rax == other.mc_rax + && self.mc_rbx == other.mc_rbx + && self.mc_rbp == other.mc_rbp + && self.mc_r10 == other.mc_r10 + && self.mc_r11 == other.mc_r11 + && self.mc_r12 == other.mc_r12 + && self.mc_r13 == other.mc_r13 + && self.mc_r14 == other.mc_r14 + && self.mc_r15 == other.mc_r15 + && self.mc_xflags == other.mc_xflags + && self.mc_trapno == other.mc_trapno + && self.mc_addr == other.mc_addr + && self.mc_flags == other.mc_flags + && self.mc_err == other.mc_err + && self.mc_rip == other.mc_rip + && self.mc_cs == other.mc_cs + && self.mc_rflags == other.mc_rflags + && self.mc_rsp == other.mc_rsp + && self.mc_ss == other.mc_ss + && self.mc_len == other.mc_len + && self.mc_fpformat == other.mc_fpformat + && self.mc_ownedfp == other.mc_ownedfp + && self.mc_fpregs == other.mc_fpregs + } + } + impl Eq for mcontext_t {} + impl hash::Hash for mcontext_t { + fn hash(&self, state: &mut H) { + self.mc_onstack.hash(state); + self.mc_rdi.hash(state); + self.mc_rsi.hash(state); + self.mc_rdx.hash(state); + self.mc_rcx.hash(state); + self.mc_r8.hash(state); + self.mc_r9.hash(state); + self.mc_rax.hash(state); + self.mc_rbx.hash(state); + self.mc_rbp.hash(state); + self.mc_r10.hash(state); + self.mc_r11.hash(state); + self.mc_r10.hash(state); + self.mc_r11.hash(state); + self.mc_r12.hash(state); + self.mc_r13.hash(state); + self.mc_r14.hash(state); + self.mc_r15.hash(state); + self.mc_xflags.hash(state); + self.mc_trapno.hash(state); + self.mc_addr.hash(state); + self.mc_flags.hash(state); + self.mc_err.hash(state); + self.mc_rip.hash(state); + self.mc_cs.hash(state); + self.mc_rflags.hash(state); + self.mc_rsp.hash(state); + self.mc_ss.hash(state); + self.mc_len.hash(state); + self.mc_fpformat.hash(state); + self.mc_ownedfp.hash(state); + self.mc_fpregs.hash(state); + } + } + // FIXME(msrv): suggested method was added in 1.85 + #[allow(unpredictable_function_pointer_comparisons)] + impl PartialEq for ucontext_t { + fn eq(&self, other: &ucontext_t) -> bool { + self.uc_sigmask == other.uc_sigmask + && self.uc_mcontext == other.uc_mcontext + && self.uc_link == other.uc_link + && self.uc_stack == other.uc_stack + && self.uc_cofunc == other.uc_cofunc + && self.uc_arg == other.uc_arg + } + } + impl Eq for ucontext_t {} + impl hash::Hash for ucontext_t { + fn hash(&self, state: &mut H) { + self.uc_sigmask.hash(state); + self.uc_mcontext.hash(state); + self.uc_link.hash(state); + self.uc_stack.hash(state); + self.uc_cofunc.hash(state); + self.uc_arg.hash(state); + } + } + } +} + +pub const RAND_MAX: c_int = 0x7fff_ffff; +pub const PTHREAD_STACK_MIN: size_t = 16384; +pub const SIGSTKSZ: size_t = 40960; +pub const SIGCKPT: c_int = 33; +pub const SIGCKPTEXIT: c_int = 34; +pub const CKPT_FREEZE: c_int = 0x1; +pub const CKPT_THAW: c_int = 0x2; +pub const MADV_INVAL: c_int = 10; +pub const MADV_SETMAP: c_int = 11; +pub const O_CLOEXEC: c_int = 0x00020000; +pub const O_DIRECTORY: c_int = 0x08000000; +pub const F_GETLK: c_int = 7; +pub const F_SETLK: c_int = 8; +pub const F_SETLKW: c_int = 9; +pub const F_GETPATH: c_int = 19; +pub const ENOMEDIUM: c_int = 93; +pub const ENOTRECOVERABLE: c_int = 94; +pub const EOWNERDEAD: c_int = 95; +pub const EASYNC: c_int = 99; +pub const ELAST: c_int = 99; +pub const RLIMIT_POSIXLOCKS: c_int = 11; +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIM_NLIMITS: crate::rlim_t = 12; + +pub const Q_GETQUOTA: c_int = 0x300; +pub const Q_SETQUOTA: c_int = 0x400; + +pub const CTL_UNSPEC: c_int = 0; +pub const CTL_KERN: c_int = 1; +pub const CTL_VM: c_int = 2; +pub const CTL_VFS: c_int = 3; +pub const CTL_NET: c_int = 4; +pub const CTL_DEBUG: c_int = 5; +pub const CTL_HW: c_int = 6; +pub const CTL_MACHDEP: c_int = 7; +pub const CTL_USER: c_int = 8; +pub const CTL_P1003_1B: c_int = 9; +pub const CTL_LWKT: c_int = 10; +pub const CTL_MAXID: c_int = 11; +pub const KERN_OSTYPE: c_int = 1; +pub const KERN_OSRELEASE: c_int = 2; +pub const KERN_OSREV: c_int = 3; +pub const KERN_VERSION: c_int = 4; +pub const KERN_MAXVNODES: c_int = 5; +pub const KERN_MAXPROC: c_int = 6; +pub const KERN_MAXFILES: c_int = 7; +pub const KERN_ARGMAX: c_int = 8; +pub const KERN_SECURELVL: c_int = 9; +pub const KERN_HOSTNAME: c_int = 10; +pub const KERN_HOSTID: c_int = 11; +pub const KERN_CLOCKRATE: c_int = 12; +pub const KERN_VNODE: c_int = 13; +pub const KERN_PROC: c_int = 14; +pub const KERN_FILE: c_int = 15; +pub const KERN_PROF: c_int = 16; +pub const KERN_POSIX1: c_int = 17; +pub const KERN_NGROUPS: c_int = 18; +pub const KERN_JOB_CONTROL: c_int = 19; +pub const KERN_SAVED_IDS: c_int = 20; +pub const KERN_BOOTTIME: c_int = 21; +pub const KERN_NISDOMAINNAME: c_int = 22; +pub const KERN_UPDATEINTERVAL: c_int = 23; +pub const KERN_OSRELDATE: c_int = 24; +pub const KERN_NTP_PLL: c_int = 25; +pub const KERN_BOOTFILE: c_int = 26; +pub const KERN_MAXFILESPERPROC: c_int = 27; +pub const KERN_MAXPROCPERUID: c_int = 28; +pub const KERN_DUMPDEV: c_int = 29; +pub const KERN_IPC: c_int = 30; +pub const KERN_DUMMY: c_int = 31; +pub const KERN_PS_STRINGS: c_int = 32; +pub const KERN_USRSTACK: c_int = 33; +pub const KERN_LOGSIGEXIT: c_int = 34; +pub const KERN_IOV_MAX: c_int = 35; +pub const KERN_MAXPOSIXLOCKSPERUID: c_int = 36; +pub const KERN_MAXID: c_int = 37; +pub const KERN_PROC_ALL: c_int = 0; +pub const KERN_PROC_PID: c_int = 1; +pub const KERN_PROC_PGRP: c_int = 2; +pub const KERN_PROC_SESSION: c_int = 3; +pub const KERN_PROC_TTY: c_int = 4; +pub const KERN_PROC_UID: c_int = 5; +pub const KERN_PROC_RUID: c_int = 6; +pub const KERN_PROC_ARGS: c_int = 7; +pub const KERN_PROC_CWD: c_int = 8; +pub const KERN_PROC_PATHNAME: c_int = 9; +pub const KERN_PROC_FLAGMASK: c_int = 0x10; +pub const KERN_PROC_FLAG_LWP: c_int = 0x10; +pub const KIPC_MAXSOCKBUF: c_int = 1; +pub const KIPC_SOCKBUF_WASTE: c_int = 2; +pub const KIPC_SOMAXCONN: c_int = 3; +pub const KIPC_MAX_LINKHDR: c_int = 4; +pub const KIPC_MAX_PROTOHDR: c_int = 5; +pub const KIPC_MAX_HDR: c_int = 6; +pub const KIPC_MAX_DATALEN: c_int = 7; +pub const KIPC_MBSTAT: c_int = 8; +pub const KIPC_NMBCLUSTERS: c_int = 9; +pub const HW_MACHINE: c_int = 1; +pub const HW_MODEL: c_int = 2; +pub const HW_NCPU: c_int = 3; +pub const HW_BYTEORDER: c_int = 4; +pub const HW_PHYSMEM: c_int = 5; +pub const HW_USERMEM: c_int = 6; +pub const HW_PAGESIZE: c_int = 7; +pub const HW_DISKNAMES: c_int = 8; +pub const HW_DISKSTATS: c_int = 9; +pub const HW_FLOATINGPT: c_int = 10; +pub const HW_MACHINE_ARCH: c_int = 11; +pub const HW_MACHINE_PLATFORM: c_int = 12; +pub const HW_SENSORS: c_int = 13; +pub const HW_MAXID: c_int = 14; +pub const USER_CS_PATH: c_int = 1; +pub const USER_BC_BASE_MAX: c_int = 2; +pub const USER_BC_DIM_MAX: c_int = 3; +pub const USER_BC_SCALE_MAX: c_int = 4; +pub const USER_BC_STRING_MAX: c_int = 5; +pub const USER_COLL_WEIGHTS_MAX: c_int = 6; +pub const USER_EXPR_NEST_MAX: c_int = 7; +pub const USER_LINE_MAX: c_int = 8; +pub const USER_RE_DUP_MAX: c_int = 9; +pub const USER_POSIX2_VERSION: c_int = 10; +pub const USER_POSIX2_C_BIND: c_int = 11; +pub const USER_POSIX2_C_DEV: c_int = 12; +pub const USER_POSIX2_CHAR_TERM: c_int = 13; +pub const USER_POSIX2_FORT_DEV: c_int = 14; +pub const USER_POSIX2_FORT_RUN: c_int = 15; +pub const USER_POSIX2_LOCALEDEF: c_int = 16; +pub const USER_POSIX2_SW_DEV: c_int = 17; +pub const USER_POSIX2_UPE: c_int = 18; +pub const USER_STREAM_MAX: c_int = 19; +pub const USER_TZNAME_MAX: c_int = 20; +pub const USER_MAXID: c_int = 21; +pub const CTL_P1003_1B_ASYNCHRONOUS_IO: c_int = 1; +pub const CTL_P1003_1B_MAPPED_FILES: c_int = 2; +pub const CTL_P1003_1B_MEMLOCK: c_int = 3; +pub const CTL_P1003_1B_MEMLOCK_RANGE: c_int = 4; +pub const CTL_P1003_1B_MEMORY_PROTECTION: c_int = 5; +pub const CTL_P1003_1B_MESSAGE_PASSING: c_int = 6; +pub const CTL_P1003_1B_PRIORITIZED_IO: c_int = 7; +pub const CTL_P1003_1B_PRIORITY_SCHEDULING: c_int = 8; +pub const CTL_P1003_1B_REALTIME_SIGNALS: c_int = 9; +pub const CTL_P1003_1B_SEMAPHORES: c_int = 10; +pub const CTL_P1003_1B_FSYNC: c_int = 11; +pub const CTL_P1003_1B_SHARED_MEMORY_OBJECTS: c_int = 12; +pub const CTL_P1003_1B_SYNCHRONIZED_IO: c_int = 13; +pub const CTL_P1003_1B_TIMERS: c_int = 14; +pub const CTL_P1003_1B_AIO_LISTIO_MAX: c_int = 15; +pub const CTL_P1003_1B_AIO_MAX: c_int = 16; +pub const CTL_P1003_1B_AIO_PRIO_DELTA_MAX: c_int = 17; +pub const CTL_P1003_1B_DELAYTIMER_MAX: c_int = 18; +pub const CTL_P1003_1B_UNUSED1: c_int = 19; +pub const CTL_P1003_1B_PAGESIZE: c_int = 20; +pub const CTL_P1003_1B_RTSIG_MAX: c_int = 21; +pub const CTL_P1003_1B_SEM_NSEMS_MAX: c_int = 22; +pub const CTL_P1003_1B_SEM_VALUE_MAX: c_int = 23; +pub const CTL_P1003_1B_SIGQUEUE_MAX: c_int = 24; +pub const CTL_P1003_1B_TIMER_MAX: c_int = 25; +pub const CTL_P1003_1B_MAXID: c_int = 26; + +pub const CPUCTL_RSMSR: c_int = 0xc0106301; +pub const CPUCTL_WRMSR: c_int = 0xc0106302; +pub const CPUCTL_CPUID: c_int = 0xc0106303; +pub const CPUCTL_UPDATE: c_int = 0xc0106304; +pub const CPUCTL_MSRSBIT: c_int = 0xc0106305; +pub const CPUCTL_MSRCBIT: c_int = 0xc0106306; +pub const CPUCTL_CPUID_COUNT: c_int = 0xc0106307; + +pub const CPU_SETSIZE: size_t = size_of::() * 8; + +pub const EVFILT_READ: i16 = -1; +pub const EVFILT_WRITE: i16 = -2; +pub const EVFILT_AIO: i16 = -3; +pub const EVFILT_VNODE: i16 = -4; +pub const EVFILT_PROC: i16 = -5; +pub const EVFILT_SIGNAL: i16 = -6; +pub const EVFILT_TIMER: i16 = -7; +pub const EVFILT_EXCEPT: i16 = -8; +pub const EVFILT_USER: i16 = -9; +pub const EVFILT_FS: i16 = -10; + +pub const EV_ADD: u16 = 0x1; +pub const EV_DELETE: u16 = 0x2; +pub const EV_ENABLE: u16 = 0x4; +pub const EV_DISABLE: u16 = 0x8; +pub const EV_ONESHOT: u16 = 0x10; +pub const EV_CLEAR: u16 = 0x20; +pub const EV_RECEIPT: u16 = 0x40; +pub const EV_DISPATCH: u16 = 0x80; +pub const EV_NODATA: u16 = 0x1000; +pub const EV_FLAG1: u16 = 0x2000; +pub const EV_ERROR: u16 = 0x4000; +pub const EV_EOF: u16 = 0x8000; +pub const EV_HUP: u16 = 0x8000; +pub const EV_SYSFLAGS: u16 = 0xf000; + +pub const FIODNAME: c_ulong = 0x80106678; + +pub const NOTE_TRIGGER: u32 = 0x01000000; +pub const NOTE_FFNOP: u32 = 0x00000000; +pub const NOTE_FFAND: u32 = 0x40000000; +pub const NOTE_FFOR: u32 = 0x80000000; +pub const NOTE_FFCOPY: u32 = 0xc0000000; +pub const NOTE_FFCTRLMASK: u32 = 0xc0000000; +pub const NOTE_FFLAGSMASK: u32 = 0x00ffffff; +pub const NOTE_LOWAT: u32 = 0x00000001; +pub const NOTE_OOB: u32 = 0x00000002; +pub const NOTE_DELETE: u32 = 0x00000001; +pub const NOTE_WRITE: u32 = 0x00000002; +pub const NOTE_EXTEND: u32 = 0x00000004; +pub const NOTE_ATTRIB: u32 = 0x00000008; +pub const NOTE_LINK: u32 = 0x00000010; +pub const NOTE_RENAME: u32 = 0x00000020; +pub const NOTE_REVOKE: u32 = 0x00000040; +pub const NOTE_EXIT: u32 = 0x80000000; +pub const NOTE_FORK: u32 = 0x40000000; +pub const NOTE_EXEC: u32 = 0x20000000; +pub const NOTE_PDATAMASK: u32 = 0x000fffff; +pub const NOTE_PCTRLMASK: u32 = 0xf0000000; +pub const NOTE_TRACK: u32 = 0x00000001; +pub const NOTE_TRACKERR: u32 = 0x00000002; +pub const NOTE_CHILD: u32 = 0x00000004; + +pub const SO_SNDSPACE: c_int = 0x100a; +pub const SO_CPUHINT: c_int = 0x1030; +pub const SO_PASSCRED: c_int = 0x4000; + +pub const PT_FIRSTMACH: c_int = 32; + +pub const PROC_REAP_ACQUIRE: c_int = 0x0001; +pub const PROC_REAP_RELEASE: c_int = 0x0002; +pub const PROC_REAP_STATUS: c_int = 0x0003; +pub const PROC_PDEATHSIG_CTL: c_int = 0x0004; +pub const PROC_PDEATHSIG_STATUS: c_int = 0x0005; + +// https://github.com/DragonFlyBSD/DragonFlyBSD/blob/HEAD/sys/net/if.h#L101 +pub const IFF_UP: c_int = 0x1; // interface is up +pub const IFF_BROADCAST: c_int = 0x2; // broadcast address valid +pub const IFF_DEBUG: c_int = 0x4; // turn on debugging +pub const IFF_LOOPBACK: c_int = 0x8; // is a loopback net +pub const IFF_POINTOPOINT: c_int = 0x10; // interface is point-to-point link +pub const IFF_SMART: c_int = 0x20; // interface manages own routes +pub const IFF_RUNNING: c_int = 0x40; // resources allocated +pub const IFF_NOARP: c_int = 0x80; // no address resolution protocol +pub const IFF_PROMISC: c_int = 0x100; // receive all packets +pub const IFF_ALLMULTI: c_int = 0x200; // receive all multicast packets +pub const IFF_OACTIVE_COMPAT: c_int = 0x400; // was transmission in progress +pub const IFF_SIMPLEX: c_int = 0x800; // can't hear own transmissions +pub const IFF_LINK0: c_int = 0x1000; // per link layer defined bit +pub const IFF_LINK1: c_int = 0x2000; // per link layer defined bit +pub const IFF_LINK2: c_int = 0x4000; // per link layer defined bit +pub const IFF_ALTPHYS: c_int = IFF_LINK2; // use alternate physical connection +pub const IFF_MULTICAST: c_int = 0x8000; // supports multicast + // was interface is in polling mode +pub const IFF_POLLING_COMPAT: c_int = 0x10000; +pub const IFF_PPROMISC: c_int = 0x20000; // user-requested promisc mode +pub const IFF_MONITOR: c_int = 0x40000; // user-requested monitor mode +pub const IFF_STATICARP: c_int = 0x80000; // static ARP +pub const IFF_NPOLLING: c_int = 0x100000; // interface is in polling mode +pub const IFF_IDIRECT: c_int = 0x200000; // direct input + +// +// sys/netinet/in.h +// Protocols (RFC 1700) +// NOTE: These are in addition to the constants defined in src/unix/mod.rs + +// IPPROTO_IP defined in src/unix/mod.rs +/// IP6 hop-by-hop options +pub const IPPROTO_HOPOPTS: c_int = 0; +// IPPROTO_ICMP defined in src/unix/mod.rs +/// group mgmt protocol +pub const IPPROTO_IGMP: c_int = 2; +/// gateway^2 (deprecated) +pub const IPPROTO_GGP: c_int = 3; +/// for compatibility +pub const IPPROTO_IPIP: c_int = 4; +// IPPROTO_TCP defined in src/unix/mod.rs +/// Stream protocol II. +pub const IPPROTO_ST: c_int = 7; +/// exterior gateway protocol +pub const IPPROTO_EGP: c_int = 8; +/// private interior gateway +pub const IPPROTO_PIGP: c_int = 9; +/// BBN RCC Monitoring +pub const IPPROTO_RCCMON: c_int = 10; +/// network voice protocol +pub const IPPROTO_NVPII: c_int = 11; +/// pup +pub const IPPROTO_PUP: c_int = 12; +/// Argus +pub const IPPROTO_ARGUS: c_int = 13; +/// EMCON +pub const IPPROTO_EMCON: c_int = 14; +/// Cross Net Debugger +pub const IPPROTO_XNET: c_int = 15; +/// Chaos +pub const IPPROTO_CHAOS: c_int = 16; +// IPPROTO_UDP defined in src/unix/mod.rs +/// Multiplexing +pub const IPPROTO_MUX: c_int = 18; +/// DCN Measurement Subsystems +pub const IPPROTO_MEAS: c_int = 19; +/// Host Monitoring +pub const IPPROTO_HMP: c_int = 20; +/// Packet Radio Measurement +pub const IPPROTO_PRM: c_int = 21; +/// xns idp +pub const IPPROTO_IDP: c_int = 22; +/// Trunk-1 +pub const IPPROTO_TRUNK1: c_int = 23; +/// Trunk-2 +pub const IPPROTO_TRUNK2: c_int = 24; +/// Leaf-1 +pub const IPPROTO_LEAF1: c_int = 25; +/// Leaf-2 +pub const IPPROTO_LEAF2: c_int = 26; +/// Reliable Data +pub const IPPROTO_RDP: c_int = 27; +/// Reliable Transaction +pub const IPPROTO_IRTP: c_int = 28; +/// tp-4 w/ class negotiation +pub const IPPROTO_TP: c_int = 29; +/// Bulk Data Transfer +pub const IPPROTO_BLT: c_int = 30; +/// Network Services +pub const IPPROTO_NSP: c_int = 31; +/// Merit Internodal +pub const IPPROTO_INP: c_int = 32; +/// Sequential Exchange +pub const IPPROTO_SEP: c_int = 33; +/// Third Party Connect +pub const IPPROTO_3PC: c_int = 34; +/// InterDomain Policy Routing +pub const IPPROTO_IDPR: c_int = 35; +/// XTP +pub const IPPROTO_XTP: c_int = 36; +/// Datagram Delivery +pub const IPPROTO_DDP: c_int = 37; +/// Control Message Transport +pub const IPPROTO_CMTP: c_int = 38; +/// TP++ Transport +pub const IPPROTO_TPXX: c_int = 39; +/// IL transport protocol +pub const IPPROTO_IL: c_int = 40; +// IPPROTO_IPV6 defined in src/unix/mod.rs +/// Source Demand Routing +pub const IPPROTO_SDRP: c_int = 42; +/// IP6 routing header +pub const IPPROTO_ROUTING: c_int = 43; +/// IP6 fragmentation header +pub const IPPROTO_FRAGMENT: c_int = 44; +/// InterDomain Routing +pub const IPPROTO_IDRP: c_int = 45; +/// resource reservation +pub const IPPROTO_RSVP: c_int = 46; +/// General Routing Encap. +pub const IPPROTO_GRE: c_int = 47; +/// Mobile Host Routing +pub const IPPROTO_MHRP: c_int = 48; +/// BHA +pub const IPPROTO_BHA: c_int = 49; +/// IP6 Encap Sec. Payload +pub const IPPROTO_ESP: c_int = 50; +/// IP6 Auth Header +pub const IPPROTO_AH: c_int = 51; +/// Integ. Net Layer Security +pub const IPPROTO_INLSP: c_int = 52; +/// IP with encryption +pub const IPPROTO_SWIPE: c_int = 53; +/// Next Hop Resolution +pub const IPPROTO_NHRP: c_int = 54; +/// IP Mobility +pub const IPPROTO_MOBILE: c_int = 55; +/// Transport Layer Security +pub const IPPROTO_TLSP: c_int = 56; +/// SKIP +pub const IPPROTO_SKIP: c_int = 57; +// IPPROTO_ICMPV6 defined in src/unix/mod.rs +/// IP6 no next header +pub const IPPROTO_NONE: c_int = 59; +/// IP6 destination option +pub const IPPROTO_DSTOPTS: c_int = 60; +/// any host internal protocol +pub const IPPROTO_AHIP: c_int = 61; +/// CFTP +pub const IPPROTO_CFTP: c_int = 62; +/// "hello" routing protocol +pub const IPPROTO_HELLO: c_int = 63; +/// SATNET/Backroom EXPAK +pub const IPPROTO_SATEXPAK: c_int = 64; +/// Kryptolan +pub const IPPROTO_KRYPTOLAN: c_int = 65; +/// Remote Virtual Disk +pub const IPPROTO_RVD: c_int = 66; +/// Pluribus Packet Core +pub const IPPROTO_IPPC: c_int = 67; +/// Any distributed FS +pub const IPPROTO_ADFS: c_int = 68; +/// Satnet Monitoring +pub const IPPROTO_SATMON: c_int = 69; +/// VISA Protocol +pub const IPPROTO_VISA: c_int = 70; +/// Packet Core Utility +pub const IPPROTO_IPCV: c_int = 71; +/// Comp. Prot. Net. Executive +pub const IPPROTO_CPNX: c_int = 72; +/// Comp. Prot. HeartBeat +pub const IPPROTO_CPHB: c_int = 73; +/// Wang Span Network +pub const IPPROTO_WSN: c_int = 74; +/// Packet Video Protocol +pub const IPPROTO_PVP: c_int = 75; +/// BackRoom SATNET Monitoring +pub const IPPROTO_BRSATMON: c_int = 76; +/// Sun net disk proto (temp.) +pub const IPPROTO_ND: c_int = 77; +/// WIDEBAND Monitoring +pub const IPPROTO_WBMON: c_int = 78; +/// WIDEBAND EXPAK +pub const IPPROTO_WBEXPAK: c_int = 79; +/// ISO cnlp +pub const IPPROTO_EON: c_int = 80; +/// VMTP +pub const IPPROTO_VMTP: c_int = 81; +/// Secure VMTP +pub const IPPROTO_SVMTP: c_int = 82; +/// Banyon VINES +pub const IPPROTO_VINES: c_int = 83; +/// TTP +pub const IPPROTO_TTP: c_int = 84; +/// NSFNET-IGP +pub const IPPROTO_IGP: c_int = 85; +/// dissimilar gateway prot. +pub const IPPROTO_DGP: c_int = 86; +/// TCF +pub const IPPROTO_TCF: c_int = 87; +/// Cisco/GXS IGRP +pub const IPPROTO_IGRP: c_int = 88; +/// OSPFIGP +pub const IPPROTO_OSPFIGP: c_int = 89; +/// Strite RPC protocol +pub const IPPROTO_SRPC: c_int = 90; +/// Locus Address Resoloution +pub const IPPROTO_LARP: c_int = 91; +/// Multicast Transport +pub const IPPROTO_MTP: c_int = 92; +/// AX.25 Frames +pub const IPPROTO_AX25: c_int = 93; +/// IP encapsulated in IP +pub const IPPROTO_IPEIP: c_int = 94; +/// Mobile Int.ing control +pub const IPPROTO_MICP: c_int = 95; +/// Semaphore Comm. security +pub const IPPROTO_SCCSP: c_int = 96; +/// Ethernet IP encapsulation +pub const IPPROTO_ETHERIP: c_int = 97; +/// encapsulation header +pub const IPPROTO_ENCAP: c_int = 98; +/// any private encr. scheme +pub const IPPROTO_APES: c_int = 99; +/// GMTP +pub const IPPROTO_GMTP: c_int = 100; +/// payload compression (IPComp) +pub const IPPROTO_IPCOMP: c_int = 108; + +/* 101-254: Partly Unassigned */ +/// Protocol Independent Mcast +pub const IPPROTO_PIM: c_int = 103; +/// CARP +pub const IPPROTO_CARP: c_int = 112; +/// PGM +pub const IPPROTO_PGM: c_int = 113; +/// PFSYNC +pub const IPPROTO_PFSYNC: c_int = 240; + +/* 255: Reserved */ +/* BSD Private, local use, namespace incursion, no longer used */ +/// divert pseudo-protocol +pub const IPPROTO_DIVERT: c_int = 254; +pub const IPPROTO_MAX: c_int = 256; +/// last return value of *_input(), meaning "all job for this pkt is done". +pub const IPPROTO_DONE: c_int = 257; + +/// Used by RSS: the layer3 protocol is unknown +pub const IPPROTO_UNKNOWN: c_int = 258; + +// sys/netinet/tcp.h +pub const TCP_SIGNATURE_ENABLE: c_int = 16; +pub const TCP_KEEPINIT: c_int = 32; +pub const TCP_FASTKEEP: c_int = 128; + +pub const AF_BLUETOOTH: c_int = 33; +pub const AF_MPLS: c_int = 34; +pub const AF_IEEE80211: c_int = 35; + +pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; + +pub const NET_RT_DUMP: c_int = 1; +pub const NET_RT_FLAGS: c_int = 2; +pub const NET_RT_IFLIST: c_int = 3; +pub const NET_RT_MAXID: c_int = 4; + +pub const SOMAXOPT_SIZE: c_int = 65536; + +pub const MSG_UNUSED09: c_int = 0x00000200; +pub const MSG_NOSIGNAL: c_int = 0x00000400; +pub const MSG_SYNC: c_int = 0x00000800; +pub const MSG_CMSG_CLOEXEC: c_int = 0x00001000; +pub const MSG_FBLOCKING: c_int = 0x00010000; +pub const MSG_FNONBLOCKING: c_int = 0x00020000; +pub const MSG_FMASK: c_int = 0xFFFF0000; + +// sys/mount.h +pub const MNT_NODEV: c_int = 0x00000010; +pub const MNT_AUTOMOUNTED: c_int = 0x00000020; +pub const MNT_TRIM: c_int = 0x01000000; +pub const MNT_LOCAL: c_int = 0x00001000; +pub const MNT_QUOTA: c_int = 0x00002000; +pub const MNT_ROOTFS: c_int = 0x00004000; +pub const MNT_USER: c_int = 0x00008000; +pub const MNT_IGNORE: c_int = 0x00800000; + +// utmpx entry types +pub const EMPTY: c_short = 0; +pub const RUN_LVL: c_short = 1; +pub const BOOT_TIME: c_short = 2; +pub const OLD_TIME: c_short = 3; +pub const NEW_TIME: c_short = 4; +pub const INIT_PROCESS: c_short = 5; +pub const LOGIN_PROCESS: c_short = 6; +pub const USER_PROCESS: c_short = 7; +pub const DEAD_PROCESS: c_short = 8; +pub const ACCOUNTING: c_short = 9; +pub const SIGNATURE: c_short = 10; +pub const DOWNTIME: c_short = 11; +// utmpx database types +pub const UTX_DB_UTMPX: c_uint = 0; +pub const UTX_DB_WTMPX: c_uint = 1; +pub const UTX_DB_LASTLOG: c_uint = 2; +pub const _UTX_LINESIZE: usize = 32; +pub const _UTX_USERSIZE: usize = 32; +pub const _UTX_IDSIZE: usize = 4; +pub const _UTX_HOSTSIZE: usize = 256; + +pub const LC_COLLATE_MASK: c_int = 1 << 0; +pub const LC_CTYPE_MASK: c_int = 1 << 1; +pub const LC_MONETARY_MASK: c_int = 1 << 2; +pub const LC_NUMERIC_MASK: c_int = 1 << 3; +pub const LC_TIME_MASK: c_int = 1 << 4; +pub const LC_MESSAGES_MASK: c_int = 1 << 5; +pub const LC_ALL_MASK: c_int = LC_COLLATE_MASK + | LC_CTYPE_MASK + | LC_MESSAGES_MASK + | LC_MONETARY_MASK + | LC_NUMERIC_MASK + | LC_TIME_MASK; + +pub const TIOCSIG: c_ulong = 0x2000745f; +pub const BTUARTDISC: c_int = 0x7; +pub const TIOCDCDTIMESTAMP: c_ulong = 0x40107458; +pub const TIOCISPTMASTER: c_ulong = 0x20007455; +pub const TIOCMODG: c_ulong = 0x40047403; +pub const TIOCMODS: c_ulong = 0x80047404; +pub const TIOCREMOTE: c_ulong = 0x80047469; +pub const TIOCTIMESTAMP: c_ulong = 0x40107459; + +// Constants used by "at" family of system calls. +pub const AT_FDCWD: c_int = 0xFFFAFDCD; // invalid file descriptor +pub const AT_SYMLINK_NOFOLLOW: c_int = 1; +pub const AT_REMOVEDIR: c_int = 2; +pub const AT_EACCESS: c_int = 4; +pub const AT_SYMLINK_FOLLOW: c_int = 8; + +pub const VCHECKPT: usize = 19; + +pub const _PC_2_SYMLINKS: c_int = 22; +pub const _PC_TIMESTAMP_RESOLUTION: c_int = 23; + +pub const _CS_PATH: c_int = 1; + +pub const _SC_V7_ILP32_OFF32: c_int = 122; +pub const _SC_V7_ILP32_OFFBIG: c_int = 123; +pub const _SC_V7_LP64_OFF64: c_int = 124; +pub const _SC_V7_LPBIG_OFFBIG: c_int = 125; +pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 126; +pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 127; + +pub const WCONTINUED: c_int = 0x4; +pub const WSTOPPED: c_int = 0x2; +pub const WNOWAIT: c_int = 0x8; +pub const WEXITED: c_int = 0x10; +pub const WTRAPPED: c_int = 0x20; + +// Similar to FreeBSD, only the standardized ones are exposed. +// There are more. +pub const P_PID: idtype_t = 0; +pub const P_PGID: idtype_t = 2; +pub const P_ALL: idtype_t = 7; + +// Values for struct rtprio (type_ field) +pub const RTP_PRIO_REALTIME: c_ushort = 0; +pub const RTP_PRIO_NORMAL: c_ushort = 1; +pub const RTP_PRIO_IDLE: c_ushort = 2; +pub const RTP_PRIO_THREAD: c_ushort = 3; + +// Flags for chflags(2) +pub const UF_NOHISTORY: c_ulong = 0x00000040; +pub const UF_CACHE: c_ulong = 0x00000080; +pub const UF_XLINK: c_ulong = 0x00000100; +pub const SF_NOHISTORY: c_ulong = 0x00400000; +pub const SF_CACHE: c_ulong = 0x00800000; +pub const SF_XLINK: c_ulong = 0x01000000; + +// timespec constants +pub const UTIME_OMIT: c_long = -2; +pub const UTIME_NOW: c_long = -1; + +pub const MINCORE_SUPER: c_int = 0x20; + +// kinfo_proc constants +pub const MAXCOMLEN: usize = 16; +pub const MAXLOGNAME: usize = 33; +pub const NGROUPS: usize = 16; + +pub const RB_PAUSE: c_int = 0x40000; +pub const RB_VIDEO: c_int = 0x20000000; + +// net/route.h +pub const RTF_CLONING: c_int = 0x100; +pub const RTF_PRCLONING: c_int = 0x10000; +pub const RTF_WASCLONED: c_int = 0x20000; +pub const RTF_MPLSOPS: c_int = 0x1000000; + +pub const RTM_VERSION: c_int = 7; + +pub const RTAX_MPLS1: c_int = 8; +pub const RTAX_MPLS2: c_int = 9; +pub const RTAX_MPLS3: c_int = 10; +pub const RTAX_MAX: c_int = 11; + +const fn _CMSG_ALIGN(n: usize) -> usize { + (n + (size_of::() - 1)) & !(size_of::() - 1) +} + +f! { + pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { + (cmsg as *mut c_uchar).offset(_CMSG_ALIGN(size_of::()) as isize) + } + + pub const fn CMSG_LEN(length: c_uint) -> c_uint { + (_CMSG_ALIGN(size_of::()) + length as usize) as c_uint + } + + pub fn CMSG_NXTHDR(mhdr: *const crate::msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + let next = cmsg as usize + + _CMSG_ALIGN((*cmsg).cmsg_len as usize) + + _CMSG_ALIGN(size_of::()); + let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; + if next <= max { + (cmsg as usize + _CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr + } else { + core::ptr::null_mut::() + } + } + + pub const fn CMSG_SPACE(length: c_uint) -> c_uint { + (_CMSG_ALIGN(size_of::()) + _CMSG_ALIGN(length as usize)) as c_uint + } + + pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { + for slot in cpuset.ary.iter_mut() { + *slot = 0; + } + } + + pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let (idx, offset) = ((cpu >> 6) & 3, cpu & 63); + cpuset.ary[idx] |= 1 << offset; + () + } + + pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let (idx, offset) = ((cpu >> 6) & 3, cpu & 63); + cpuset.ary[idx] &= !(1 << offset); + () + } + + pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { + let (idx, offset) = ((cpu >> 6) & 3, cpu & 63); + 0 != cpuset.ary[idx] & (1 << offset) + } +} + +safe_f! { + pub const fn WIFSIGNALED(status: c_int) -> bool { + (status & 0o177) != 0o177 && (status & 0o177) != 0 + } + + pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { + let major = major as crate::dev_t; + let minor = minor as crate::dev_t; + let mut dev = 0; + dev |= major << 8; + dev |= minor; + dev + } + + pub const fn major(dev: crate::dev_t) -> c_int { + ((dev >> 8) & 0xff) as c_int + } + + pub const fn minor(dev: crate::dev_t) -> c_int { + (dev & 0xffff00ff) as c_int + } +} + +extern "C" { + pub fn __errno_location() -> *mut c_int; + pub fn setgrent(); + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + + pub fn setutxdb(_type: c_uint, file: *mut c_char) -> c_int; + + pub fn aio_waitcomplete(iocbp: *mut *mut aiocb, timeout: *mut crate::timespec) -> c_int; + + pub fn devname_r( + dev: crate::dev_t, + mode: crate::mode_t, + buf: *mut c_char, + len: size_t, + ) -> *mut c_char; + + pub fn waitid( + idtype: idtype_t, + id: crate::id_t, + infop: *mut crate::siginfo_t, + options: c_int, + ) -> c_int; + + pub fn freelocale(loc: crate::locale_t); + + pub fn lwp_rtprio( + function: c_int, + pid: crate::pid_t, + lwpid: lwpid_t, + rtp: *mut super::rtprio, + ) -> c_int; + + pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; + pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; + pub fn uname(buf: *mut crate::utsname) -> c_int; + pub fn memmem( + haystack: *const c_void, + haystacklen: size_t, + needle: *const c_void, + needlelen: size_t, + ) -> *mut c_void; + pub fn pthread_spin_init(lock: *mut pthread_spinlock_t, pshared: c_int) -> c_int; + pub fn pthread_spin_destroy(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_spin_lock(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_spin_trylock(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_spin_unlock(lock: *mut pthread_spinlock_t) -> c_int; + + pub fn sched_getaffinity(pid: crate::pid_t, cpusetsize: size_t, mask: *mut cpu_set_t) -> c_int; + pub fn sched_setaffinity( + pid: crate::pid_t, + cpusetsize: size_t, + mask: *const cpu_set_t, + ) -> c_int; + pub fn sched_getcpu() -> c_int; + pub fn setproctitle(fmt: *const c_char, ...); + + pub fn shmget(key: crate::key_t, size: size_t, shmflg: c_int) -> c_int; + pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; + pub fn shmdt(shmaddr: *const c_void) -> c_int; + pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; + pub fn procctl( + idtype: crate::idtype_t, + id: crate::id_t, + cmd: c_int, + data: *mut c_void, + ) -> c_int; + + pub fn updwtmpx(file: *const c_char, ut: *const utmpx) -> c_int; + pub fn getlastlogx(fname: *const c_char, uid: crate::uid_t, ll: *mut lastlogx) + -> *mut lastlogx; + pub fn updlastlogx(fname: *const c_char, uid: crate::uid_t, ll: *mut lastlogx) -> c_int; + pub fn getutxuser(name: *const c_char) -> utmpx; + pub fn utmpxname(file: *const c_char) -> c_int; + + pub fn sys_checkpoint(tpe: c_int, fd: c_int, pid: crate::pid_t, retval: c_int) -> c_int; + + pub fn umtx_sleep(ptr: *const c_int, value: c_int, timeout: c_int) -> c_int; + pub fn umtx_wakeup(ptr: *const c_int, count: c_int) -> c_int; + + pub fn dirname(path: *mut c_char) -> *mut c_char; + pub fn basename(path: *mut c_char) -> *mut c_char; + pub fn getmntinfo(mntbufp: *mut *mut crate::statfs, flags: c_int) -> c_int; + pub fn getmntvinfo( + mntbufp: *mut *mut crate::statfs, + mntvbufp: *mut *mut crate::statvfs, + flags: c_int, + ) -> c_int; + + pub fn closefrom(lowfd: c_int) -> c_int; +} + +#[link(name = "rt")] +extern "C" { + pub fn aio_cancel(fd: c_int, aiocbp: *mut aiocb) -> c_int; + pub fn aio_error(aiocbp: *const aiocb) -> c_int; + pub fn aio_fsync(op: c_int, aiocbp: *mut aiocb) -> c_int; + pub fn aio_read(aiocbp: *mut aiocb) -> c_int; + pub fn aio_return(aiocbp: *mut aiocb) -> ssize_t; + pub fn aio_suspend( + aiocb_list: *const *const aiocb, + nitems: c_int, + timeout: *const crate::timespec, + ) -> c_int; + pub fn aio_write(aiocbp: *mut aiocb) -> c_int; + pub fn lio_listio( + mode: c_int, + aiocb_list: *const *mut aiocb, + nitems: c_int, + sevp: *mut sigevent, + ) -> c_int; + + pub fn reallocf(ptr: *mut c_void, size: size_t) -> *mut c_void; + pub fn freezero(ptr: *mut c_void, size: size_t); +} + +#[link(name = "kvm")] +extern "C" { + pub fn kvm_vm_map_entry_first( + kvm: *mut crate::kvm_t, + map: vm_map_t, + entry: vm_map_entry_t, + ) -> vm_map_entry_t; + pub fn kvm_vm_map_entry_next( + kvm: *mut crate::kvm_t, + map: vm_map_entry_t, + entry: vm_map_entry_t, + ) -> vm_map_entry_t; +} + +// DIFF(main): module removed in de76fee6 +cfg_if! { + if #[cfg(libc_thread_local)] { + mod errno; + pub use self::errno::*; + } +} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/aarch64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/aarch64.rs new file mode 100644 index 00000000000000..e74c26bb46e2c9 --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/aarch64.rs @@ -0,0 +1,110 @@ +use crate::prelude::*; + +pub type clock_t = i32; +pub type wchar_t = u32; +pub type time_t = i64; +pub type suseconds_t = i64; +pub type register_t = i64; + +s_no_extra_traits! { + pub struct gpregs { + pub gp_x: [crate::register_t; 30], + pub gp_lr: crate::register_t, + pub gp_sp: crate::register_t, + pub gp_elr: crate::register_t, + pub gp_spsr: u32, + pub gp_pad: c_int, + } + + pub struct fpregs { + pub fp_q: u128, + pub fp_sr: u32, + pub fp_cr: u32, + pub fp_flags: c_int, + pub fp_pad: c_int, + } + + pub struct mcontext_t { + pub mc_gpregs: gpregs, + pub mc_fpregs: fpregs, + pub mc_flags: c_int, + pub mc_pad: c_int, + pub mc_spare: [u64; 8], + } +} + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for gpregs { + fn eq(&self, other: &gpregs) -> bool { + self.gp_x.iter().zip(other.gp_x.iter()).all(|(a, b)| a == b) + && self.gp_lr == other.gp_lr + && self.gp_sp == other.gp_sp + && self.gp_elr == other.gp_elr + && self.gp_spsr == other.gp_spsr + && self.gp_pad == other.gp_pad + } + } + impl Eq for gpregs {} + impl hash::Hash for gpregs { + fn hash(&self, state: &mut H) { + self.gp_x.hash(state); + self.gp_lr.hash(state); + self.gp_sp.hash(state); + self.gp_elr.hash(state); + self.gp_spsr.hash(state); + self.gp_pad.hash(state); + } + } + impl PartialEq for fpregs { + fn eq(&self, other: &fpregs) -> bool { + self.fp_q == other.fp_q + && self.fp_sr == other.fp_sr + && self.fp_cr == other.fp_cr + && self.fp_flags == other.fp_flags + && self.fp_pad == other.fp_pad + } + } + impl Eq for fpregs {} + impl hash::Hash for fpregs { + fn hash(&self, state: &mut H) { + self.fp_q.hash(state); + self.fp_sr.hash(state); + self.fp_cr.hash(state); + self.fp_flags.hash(state); + self.fp_pad.hash(state); + } + } + impl PartialEq for mcontext_t { + fn eq(&self, other: &mcontext_t) -> bool { + self.mc_gpregs == other.mc_gpregs + && self.mc_fpregs == other.mc_fpregs + && self.mc_flags == other.mc_flags + && self.mc_pad == other.mc_pad + && self + .mc_spare + .iter() + .zip(other.mc_spare.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for mcontext_t {} + impl hash::Hash for mcontext_t { + fn hash(&self, state: &mut H) { + self.mc_gpregs.hash(state); + self.mc_fpregs.hash(state); + self.mc_flags.hash(state); + self.mc_pad.hash(state); + self.mc_spare.hash(state); + } + } + } +} + +pub const BIOCSRTIMEOUT: c_ulong = 0x8010426d; +pub const BIOCGRTIMEOUT: c_ulong = 0x4010426e; +pub const MAP_32BIT: c_int = 0x00080000; +pub const MINSIGSTKSZ: size_t = 4096; // 1024 * 4 +pub const TIOCTIMESTAMP: c_ulong = 0x40107459; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/arm.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/arm.rs new file mode 100644 index 00000000000000..c17e12913d8f82 --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/arm.rs @@ -0,0 +1,53 @@ +use crate::prelude::*; + +pub type clock_t = u32; +pub type wchar_t = u32; +pub type time_t = i64; +pub type suseconds_t = i32; +pub type register_t = i32; +pub type __greg_t = c_uint; +pub type __gregset_t = [crate::__greg_t; 17]; + +s_no_extra_traits! { + pub struct mcontext_t { + pub __gregs: crate::__gregset_t, + pub mc_vfp_size: usize, + pub mc_vfp_ptr: *mut c_void, + pub mc_spare: [c_uint; 33], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for mcontext_t { + fn eq(&self, other: &mcontext_t) -> bool { + self.__gregs == other.__gregs + && self.mc_vfp_size == other.mc_vfp_size + && self.mc_vfp_ptr == other.mc_vfp_ptr + && self + .mc_spare + .iter() + .zip(other.mc_spare.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for mcontext_t {} + impl hash::Hash for mcontext_t { + fn hash(&self, state: &mut H) { + self.__gregs.hash(state); + self.mc_vfp_size.hash(state); + self.mc_vfp_ptr.hash(state); + self.mc_spare.hash(state); + } + } + } +} + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const BIOCSRTIMEOUT: c_ulong = 0x8010426d; +pub const BIOCGRTIMEOUT: c_ulong = 0x4010426e; + +pub const MAP_32BIT: c_int = 0x00080000; +pub const MINSIGSTKSZ: size_t = 4096; // 1024 * 4 +pub const TIOCTIMESTAMP: c_ulong = 0x40107459; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b32.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b32.rs new file mode 100644 index 00000000000000..dca7d6ee799888 --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b32.rs @@ -0,0 +1,37 @@ +use crate::off_t; +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +#[cfg_attr(feature = "extra_traits", derive(Eq, Hash, PartialEq))] +pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_size: off_t, + pub st_blocks: crate::blkcnt_t, + pub st_blksize: crate::blksize_t, + pub st_flags: crate::fflags_t, + pub st_gen: u32, + pub st_lspare: i32, + pub st_birthtime: crate::time_t, + pub st_birthtime_nsec: c_long, + __unused: [u8; 8], +} + +impl Copy for crate::stat {} +impl Clone for crate::stat { + fn clone(&self) -> crate::stat { + *self + } +} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b64.rs new file mode 100644 index 00000000000000..1f31aac0e3d3d6 --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b64.rs @@ -0,0 +1,36 @@ +use crate::off_t; +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +#[cfg_attr(feature = "extra_traits", derive(Eq, Hash, PartialEq))] +pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_size: off_t, + pub st_blocks: crate::blkcnt_t, + pub st_blksize: crate::blksize_t, + pub st_flags: crate::fflags_t, + pub st_gen: u32, + pub st_lspare: i32, + pub st_birthtime: crate::time_t, + pub st_birthtime_nsec: c_long, +} + +impl Copy for crate::stat {} +impl Clone for crate::stat { + fn clone(&self) -> crate::stat { + *self + } +} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/mod.rs new file mode 100644 index 00000000000000..b3b032bc66949f --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/mod.rs @@ -0,0 +1,449 @@ +use crate::prelude::*; + +// APIs that were changed after FreeBSD 11 + +// The type of `nlink_t` changed from `u16` to `u64` in FreeBSD 12: +pub type nlink_t = u16; +// Type of `dev_t` changed from `u32` to `u64` in FreeBSD 12: +pub type dev_t = u32; +// Type of `ino_t` changed from `__uint32_t` to `__uint64_t` in FreeBSD 12: +pub type ino_t = u32; + +s! { + pub struct kevent { + pub ident: crate::uintptr_t, + pub filter: c_short, + pub flags: c_ushort, + pub fflags: c_uint, + pub data: intptr_t, + pub udata: *mut c_void, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_lpid: crate::pid_t, + pub shm_cpid: crate::pid_t, + // Type of shm_nattc changed from `int` to `shmatt_t` (aka `unsigned + // int`) in FreeBSD 12: + pub shm_nattch: c_int, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + } + + pub struct kinfo_proc { + /// Size of this structure. + pub ki_structsize: c_int, + /// Reserved: layout identifier. + pub ki_layout: c_int, + /// Address of command arguments. + pub ki_args: *mut crate::pargs, + // This is normally "struct proc". + /// Address of proc. + pub ki_paddr: *mut c_void, + // This is normally "struct user". + /// Kernel virtual address of u-area. + pub ki_addr: *mut c_void, + // This is normally "struct vnode". + /// Pointer to trace file. + pub ki_tracep: *mut c_void, + // This is normally "struct vnode". + /// Pointer to executable file. + pub ki_textvp: *mut c_void, + /// Pointer to open file info. + pub ki_fd: *mut crate::filedesc, + // This is normally "struct vmspace". + /// Pointer to kernel vmspace struct. + pub ki_vmspace: *mut c_void, + /// Sleep address. + pub ki_wchan: *mut c_void, + /// Process identifier. + pub ki_pid: crate::pid_t, + /// Parent process ID. + pub ki_ppid: crate::pid_t, + /// Process group ID. + pub ki_pgid: crate::pid_t, + /// tty process group ID. + pub ki_tpgid: crate::pid_t, + /// Process session ID. + pub ki_sid: crate::pid_t, + /// Terminal session ID. + pub ki_tsid: crate::pid_t, + /// Job control counter. + pub ki_jobc: c_short, + /// Unused (just here for alignment). + pub ki_spare_short1: c_short, + /// Controlling tty dev. + pub ki_tdev: crate::dev_t, + /// Signals arrived but not delivered. + pub ki_siglist: crate::sigset_t, + /// Current signal mask. + pub ki_sigmask: crate::sigset_t, + /// Signals being ignored. + pub ki_sigignore: crate::sigset_t, + /// Signals being caught by user. + pub ki_sigcatch: crate::sigset_t, + /// Effective user ID. + pub ki_uid: crate::uid_t, + /// Real user ID. + pub ki_ruid: crate::uid_t, + /// Saved effective user ID. + pub ki_svuid: crate::uid_t, + /// Real group ID. + pub ki_rgid: crate::gid_t, + /// Saved effective group ID. + pub ki_svgid: crate::gid_t, + /// Number of groups. + pub ki_ngroups: c_short, + /// Unused (just here for alignment). + pub ki_spare_short2: c_short, + /// Groups. + pub ki_groups: [crate::gid_t; crate::KI_NGROUPS], + /// Virtual size. + pub ki_size: crate::vm_size_t, + /// Current resident set size in pages. + pub ki_rssize: crate::segsz_t, + /// Resident set size before last swap. + pub ki_swrss: crate::segsz_t, + /// Text size (pages) XXX. + pub ki_tsize: crate::segsz_t, + /// Data size (pages) XXX. + pub ki_dsize: crate::segsz_t, + /// Stack size (pages). + pub ki_ssize: crate::segsz_t, + /// Exit status for wait & stop signal. + pub ki_xstat: crate::u_short, + /// Accounting flags. + pub ki_acflag: crate::u_short, + /// %cpu for process during `ki_swtime`. + pub ki_pctcpu: crate::fixpt_t, + /// Time averaged value of `ki_cpticks`. + pub ki_estcpu: crate::u_int, + /// Time since last blocked. + pub ki_slptime: crate::u_int, + /// Time swapped in or out. + pub ki_swtime: crate::u_int, + /// Number of copy-on-write faults. + pub ki_cow: crate::u_int, + /// Real time in microsec. + pub ki_runtime: u64, + /// Starting time. + pub ki_start: crate::timeval, + /// Time used by process children. + pub ki_childtime: crate::timeval, + /// P_* flags. + pub ki_flag: c_long, + /// KI_* flags (below). + pub ki_kiflag: c_long, + /// Kernel trace points. + pub ki_traceflag: c_int, + /// S* process status. + pub ki_stat: c_char, + /// Process "nice" value. + pub ki_nice: i8, // signed char + /// Process lock (prevent swap) count. + pub ki_lock: c_char, + /// Run queue index. + pub ki_rqindex: c_char, + /// Which cpu we are on. + pub ki_oncpu_old: c_uchar, + /// Last cpu we were on. + pub ki_lastcpu_old: c_uchar, + /// Thread name. + pub ki_tdname: [c_char; crate::TDNAMLEN + 1], + /// Wchan message. + pub ki_wmesg: [c_char; crate::WMESGLEN + 1], + /// Setlogin name. + pub ki_login: [c_char; crate::LOGNAMELEN + 1], + /// Lock name. + pub ki_lockname: [c_char; crate::LOCKNAMELEN + 1], + /// Command name. + pub ki_comm: [c_char; crate::COMMLEN + 1], + /// Emulation name. + pub ki_emul: [c_char; crate::KI_EMULNAMELEN + 1], + /// Login class. + pub ki_loginclass: [c_char; crate::LOGINCLASSLEN + 1], + /// More thread name. + pub ki_moretdname: [c_char; crate::MAXCOMLEN - crate::TDNAMLEN + 1], + /// Spare string space. + pub ki_sparestrings: [[c_char; 23]; 2], // little hack to allow PartialEq + /// Spare room for growth. + pub ki_spareints: [c_int; crate::KI_NSPARE_INT], + /// Which cpu we are on. + pub ki_oncpu: c_int, + /// Last cpu we were on. + pub ki_lastcpu: c_int, + /// PID of tracing process. + pub ki_tracer: c_int, + /// P2_* flags. + pub ki_flag2: c_int, + /// Default FIB number. + pub ki_fibnum: c_int, + /// Credential flags. + pub ki_cr_flags: crate::u_int, + /// Process jail ID. + pub ki_jid: c_int, + /// Number of threads in total. + pub ki_numthreads: c_int, + /// Thread ID. + pub ki_tid: crate::lwpid_t, + /// Process priority. + pub ki_pri: crate::priority, + /// Process rusage statistics. + pub ki_rusage: crate::rusage, + /// rusage of children processes. + pub ki_rusage_ch: crate::rusage, + // This is normally "struct pcb". + /// Kernel virtual addr of pcb. + pub ki_pcb: *mut c_void, + /// Kernel virtual addr of stack. + pub ki_kstack: *mut c_void, + /// User convenience pointer. + pub ki_udata: *mut c_void, + // This is normally "struct thread". + pub ki_tdaddr: *mut c_void, + pub ki_spareptrs: [*mut c_void; crate::KI_NSPARE_PTR], + pub ki_sparelongs: [c_long; crate::KI_NSPARE_LONG], + /// PS_* flags. + pub ki_sflag: c_long, + /// kthread flag. + pub ki_tdflags: c_long, + } +} + +s_no_extra_traits! { + pub struct dirent { + pub d_fileno: crate::ino_t, + pub d_reclen: u16, + pub d_type: u8, + // Type of `d_namlen` changed from `char` to `u16` in FreeBSD 12: + pub d_namlen: u8, + pub d_name: [c_char; 256], + } + + pub struct statfs { + pub f_version: u32, + pub f_type: u32, + pub f_flags: u64, + pub f_bsize: u64, + pub f_iosize: u64, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: i64, + pub f_files: u64, + pub f_ffree: i64, + pub f_syncwrites: u64, + pub f_asyncwrites: u64, + pub f_syncreads: u64, + pub f_asyncreads: u64, + f_spare: [u64; 10], + pub f_namemax: u32, + pub f_owner: crate::uid_t, + pub f_fsid: crate::fsid_t, + f_charspare: [c_char; 80], + pub f_fstypename: [c_char; 16], + // Array length changed from 88 to 1024 in FreeBSD 12: + pub f_mntfromname: [c_char; 88], + // Array length changed from 88 to 1024 in FreeBSD 12: + pub f_mntonname: [c_char; 88], + } + + pub struct vnstat { + pub vn_fileid: u64, + pub vn_size: u64, + pub vn_mntdir: *mut c_char, + pub vn_dev: u32, + pub vn_fsid: u32, + pub vn_type: c_int, + pub vn_mode: u16, + pub vn_devname: [c_char; crate::SPECNAMELEN as usize + 1], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for statfs { + fn eq(&self, other: &statfs) -> bool { + self.f_version == other.f_version + && self.f_type == other.f_type + && self.f_flags == other.f_flags + && self.f_bsize == other.f_bsize + && self.f_iosize == other.f_iosize + && self.f_blocks == other.f_blocks + && self.f_bfree == other.f_bfree + && self.f_bavail == other.f_bavail + && self.f_files == other.f_files + && self.f_ffree == other.f_ffree + && self.f_syncwrites == other.f_syncwrites + && self.f_asyncwrites == other.f_asyncwrites + && self.f_syncreads == other.f_syncreads + && self.f_asyncreads == other.f_asyncreads + && self.f_namemax == other.f_namemax + && self.f_owner == other.f_owner + && self.f_fsid == other.f_fsid + && self.f_fstypename == other.f_fstypename + && self + .f_mntfromname + .iter() + .zip(other.f_mntfromname.iter()) + .all(|(a, b)| a == b) + && self + .f_mntonname + .iter() + .zip(other.f_mntonname.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for statfs {} + impl hash::Hash for statfs { + fn hash(&self, state: &mut H) { + self.f_version.hash(state); + self.f_type.hash(state); + self.f_flags.hash(state); + self.f_bsize.hash(state); + self.f_iosize.hash(state); + self.f_blocks.hash(state); + self.f_bfree.hash(state); + self.f_bavail.hash(state); + self.f_files.hash(state); + self.f_ffree.hash(state); + self.f_syncwrites.hash(state); + self.f_asyncwrites.hash(state); + self.f_syncreads.hash(state); + self.f_asyncreads.hash(state); + self.f_namemax.hash(state); + self.f_owner.hash(state); + self.f_fsid.hash(state); + self.f_fstypename.hash(state); + self.f_mntfromname.hash(state); + self.f_mntonname.hash(state); + } + } + + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_fileno == other.d_fileno + && self.d_reclen == other.d_reclen + && self.d_type == other.d_type + && self.d_namlen == other.d_namlen + && self.d_name[..self.d_namlen as _] + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for dirent {} + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_fileno.hash(state); + self.d_reclen.hash(state); + self.d_type.hash(state); + self.d_namlen.hash(state); + self.d_name[..self.d_namlen as _].hash(state); + } + } + + impl PartialEq for vnstat { + fn eq(&self, other: &vnstat) -> bool { + let self_vn_devname: &[c_char] = &self.vn_devname; + let other_vn_devname: &[c_char] = &other.vn_devname; + + self.vn_fileid == other.vn_fileid + && self.vn_size == other.vn_size + && self.vn_mntdir == other.vn_mntdir + && self.vn_dev == other.vn_dev + && self.vn_fsid == other.vn_fsid + && self.vn_type == other.vn_type + && self.vn_mode == other.vn_mode + && self_vn_devname == other_vn_devname + } + } + impl Eq for vnstat {} + impl hash::Hash for vnstat { + fn hash(&self, state: &mut H) { + let self_vn_devname: &[c_char] = &self.vn_devname; + + self.vn_fileid.hash(state); + self.vn_size.hash(state); + self.vn_mntdir.hash(state); + self.vn_dev.hash(state); + self.vn_fsid.hash(state); + self.vn_type.hash(state); + self.vn_mode.hash(state); + self_vn_devname.hash(state); + } + } + } +} + +pub const ELAST: c_int = 96; +pub const RAND_MAX: c_int = 0x7fff_fffd; +pub const KI_NSPARE_PTR: usize = 6; +pub const MINCORE_SUPER: c_int = 0x20; +/// max length of devicename +pub const SPECNAMELEN: c_int = 63; + +safe_f! { + pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { + let major = major as crate::dev_t; + let minor = minor as crate::dev_t; + (major << 8) | minor + } + + pub const fn major(dev: crate::dev_t) -> c_int { + ((dev >> 8) & 0xff) as c_int + } + + pub const fn minor(dev: crate::dev_t) -> c_int { + (dev & 0xffff00ff) as c_int + } +} + +extern "C" { + // Return type c_int was removed in FreeBSD 12 + pub fn setgrent() -> c_int; + + // Type of `addr` argument changed from `const void*` to `void*` + // in FreeBSD 12 + pub fn mprotect(addr: *const c_void, len: size_t, prot: c_int) -> c_int; + + // Return type c_int was removed in FreeBSD 12 + pub fn freelocale(loc: crate::locale_t) -> c_int; + + // Return type c_int changed to ssize_t in FreeBSD 12: + pub fn msgrcv( + msqid: c_int, + msgp: *mut c_void, + msgsz: size_t, + msgtyp: c_long, + msgflg: c_int, + ) -> c_int; + + // Type of `path` argument changed from `const void*` to `void*` + // in FreeBSD 12 + pub fn dirname(path: *const c_char) -> *mut c_char; + pub fn basename(path: *const c_char) -> *mut c_char; + + // Argument order of the function pointer changed in FreeBSD 14. From 14 onwards the signature + // matches the POSIX specification by having the third argument be a mutable pointer, on + // earlier versions the first argument is the mutable pointer. + #[link_name = "qsort_r@FBSD_1.0"] + pub fn qsort_r( + base: *mut c_void, + num: size_t, + size: size_t, + arg: *mut c_void, + compar: Option c_int>, + ); +} + +cfg_if! { + if #[cfg(target_pointer_width = "64")] { + mod b64; + pub use self::b64::*; + } else { + mod b32; + pub use self::b32::*; + } +} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/mod.rs new file mode 100644 index 00000000000000..962d7817a2649c --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/mod.rs @@ -0,0 +1,487 @@ +use crate::off_t; +use crate::prelude::*; + +// APIs in FreeBSD 12 that have changed since 11. + +pub type nlink_t = u64; +pub type dev_t = u64; +pub type ino_t = u64; +pub type shmatt_t = c_uint; + +s! { + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_lpid: crate::pid_t, + pub shm_cpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + } + + pub struct kevent { + pub ident: crate::uintptr_t, + pub filter: c_short, + pub flags: c_ushort, + pub fflags: c_uint, + pub data: i64, + pub udata: *mut c_void, + pub ext: [u64; 4], + } + + pub struct kvm_page { + pub version: c_uint, + pub paddr: c_ulong, + pub kmap_vaddr: c_ulong, + pub dmap_vaddr: c_ulong, + pub prot: crate::vm_prot_t, + pub offset: crate::u_long, + pub len: size_t, + } + + pub struct kinfo_proc { + /// Size of this structure. + pub ki_structsize: c_int, + /// Reserved: layout identifier. + pub ki_layout: c_int, + /// Address of command arguments. + pub ki_args: *mut crate::pargs, + // This is normally "struct proc". + /// Address of proc. + pub ki_paddr: *mut c_void, + // This is normally "struct user". + /// Kernel virtual address of u-area. + pub ki_addr: *mut c_void, + // This is normally "struct vnode". + /// Pointer to trace file. + pub ki_tracep: *mut c_void, + // This is normally "struct vnode". + /// Pointer to executable file. + pub ki_textvp: *mut c_void, + /// Pointer to open file info. + pub ki_fd: *mut crate::filedesc, + // This is normally "struct vmspace". + /// Pointer to kernel vmspace struct. + pub ki_vmspace: *mut c_void, + /// Sleep address. + pub ki_wchan: *mut c_void, + /// Process identifier. + pub ki_pid: crate::pid_t, + /// Parent process ID. + pub ki_ppid: crate::pid_t, + /// Process group ID. + pub ki_pgid: crate::pid_t, + /// tty process group ID. + pub ki_tpgid: crate::pid_t, + /// Process session ID. + pub ki_sid: crate::pid_t, + /// Terminal session ID. + pub ki_tsid: crate::pid_t, + /// Job control counter. + pub ki_jobc: c_short, + /// Unused (just here for alignment). + pub ki_spare_short1: c_short, + /// Controlling tty dev. + pub ki_tdev_freebsd11: u32, + /// Signals arrived but not delivered. + pub ki_siglist: crate::sigset_t, + /// Current signal mask. + pub ki_sigmask: crate::sigset_t, + /// Signals being ignored. + pub ki_sigignore: crate::sigset_t, + /// Signals being caught by user. + pub ki_sigcatch: crate::sigset_t, + /// Effective user ID. + pub ki_uid: crate::uid_t, + /// Real user ID. + pub ki_ruid: crate::uid_t, + /// Saved effective user ID. + pub ki_svuid: crate::uid_t, + /// Real group ID. + pub ki_rgid: crate::gid_t, + /// Saved effective group ID. + pub ki_svgid: crate::gid_t, + /// Number of groups. + pub ki_ngroups: c_short, + /// Unused (just here for alignment). + pub ki_spare_short2: c_short, + /// Groups. + pub ki_groups: [crate::gid_t; crate::KI_NGROUPS], + /// Virtual size. + pub ki_size: crate::vm_size_t, + /// Current resident set size in pages. + pub ki_rssize: crate::segsz_t, + /// Resident set size before last swap. + pub ki_swrss: crate::segsz_t, + /// Text size (pages) XXX. + pub ki_tsize: crate::segsz_t, + /// Data size (pages) XXX. + pub ki_dsize: crate::segsz_t, + /// Stack size (pages). + pub ki_ssize: crate::segsz_t, + /// Exit status for wait & stop signal. + pub ki_xstat: crate::u_short, + /// Accounting flags. + pub ki_acflag: crate::u_short, + /// %cpu for process during `ki_swtime`. + pub ki_pctcpu: crate::fixpt_t, + /// Time averaged value of `ki_cpticks`. + pub ki_estcpu: crate::u_int, + /// Time since last blocked. + pub ki_slptime: crate::u_int, + /// Time swapped in or out. + pub ki_swtime: crate::u_int, + /// Number of copy-on-write faults. + pub ki_cow: crate::u_int, + /// Real time in microsec. + pub ki_runtime: u64, + /// Starting time. + pub ki_start: crate::timeval, + /// Time used by process children. + pub ki_childtime: crate::timeval, + /// P_* flags. + pub ki_flag: c_long, + /// KI_* flags (below). + pub ki_kiflag: c_long, + /// Kernel trace points. + pub ki_traceflag: c_int, + /// S* process status. + pub ki_stat: c_char, + /// Process "nice" value. + pub ki_nice: i8, // signed char + /// Process lock (prevent swap) count. + pub ki_lock: c_char, + /// Run queue index. + pub ki_rqindex: c_char, + /// Which cpu we are on. + pub ki_oncpu_old: c_uchar, + /// Last cpu we were on. + pub ki_lastcpu_old: c_uchar, + /// Thread name. + pub ki_tdname: [c_char; crate::TDNAMLEN + 1], + /// Wchan message. + pub ki_wmesg: [c_char; crate::WMESGLEN + 1], + /// Setlogin name. + pub ki_login: [c_char; crate::LOGNAMELEN + 1], + /// Lock name. + pub ki_lockname: [c_char; crate::LOCKNAMELEN + 1], + /// Command name. + pub ki_comm: [c_char; crate::COMMLEN + 1], + /// Emulation name. + pub ki_emul: [c_char; crate::KI_EMULNAMELEN + 1], + /// Login class. + pub ki_loginclass: [c_char; crate::LOGINCLASSLEN + 1], + /// More thread name. + pub ki_moretdname: [c_char; crate::MAXCOMLEN - crate::TDNAMLEN + 1], + /// Spare string space. + pub ki_sparestrings: [[c_char; 23]; 2], // little hack to allow PartialEq + /// Spare room for growth. + pub ki_spareints: [c_int; crate::KI_NSPARE_INT], + /// Controlling tty dev. + pub ki_tdev: crate::dev_t, + /// Which cpu we are on. + pub ki_oncpu: c_int, + /// Last cpu we were on. + pub ki_lastcpu: c_int, + /// PID of tracing process. + pub ki_tracer: c_int, + /// P2_* flags. + pub ki_flag2: c_int, + /// Default FIB number. + pub ki_fibnum: c_int, + /// Credential flags. + pub ki_cr_flags: crate::u_int, + /// Process jail ID. + pub ki_jid: c_int, + /// Number of threads in total. + pub ki_numthreads: c_int, + /// Thread ID. + pub ki_tid: crate::lwpid_t, + /// Process priority. + pub ki_pri: crate::priority, + /// Process rusage statistics. + pub ki_rusage: crate::rusage, + /// rusage of children processes. + pub ki_rusage_ch: crate::rusage, + // This is normally "struct pcb". + /// Kernel virtual addr of pcb. + pub ki_pcb: *mut c_void, + /// Kernel virtual addr of stack. + pub ki_kstack: *mut c_void, + /// User convenience pointer. + pub ki_udata: *mut c_void, + // This is normally "struct thread". + pub ki_tdaddr: *mut c_void, + pub ki_spareptrs: [*mut c_void; crate::KI_NSPARE_PTR], + pub ki_sparelongs: [c_long; crate::KI_NSPARE_LONG], + /// PS_* flags. + pub ki_sflag: c_long, + /// kthread flag. + pub ki_tdflags: c_long, + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + st_padding0: i16, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + st_padding1: i32, + pub st_rdev: crate::dev_t, + #[cfg(target_arch = "x86")] + st_atim_ext: i32, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + #[cfg(target_arch = "x86")] + st_mtim_ext: i32, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + #[cfg(target_arch = "x86")] + st_ctim_ext: i32, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + #[cfg(target_arch = "x86")] + st_btim_ext: i32, + pub st_birthtime: crate::time_t, + pub st_birthtime_nsec: c_long, + pub st_size: off_t, + pub st_blocks: crate::blkcnt_t, + pub st_blksize: crate::blksize_t, + pub st_flags: crate::fflags_t, + pub st_gen: u64, + pub st_spare: [u64; 10], + } +} + +s_no_extra_traits! { + pub struct dirent { + pub d_fileno: crate::ino_t, + pub d_off: off_t, + pub d_reclen: u16, + pub d_type: u8, + d_pad0: u8, + pub d_namlen: u16, + d_pad1: u16, + pub d_name: [c_char; 256], + } + + pub struct statfs { + pub f_version: u32, + pub f_type: u32, + pub f_flags: u64, + pub f_bsize: u64, + pub f_iosize: u64, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: i64, + pub f_files: u64, + pub f_ffree: i64, + pub f_syncwrites: u64, + pub f_asyncwrites: u64, + pub f_syncreads: u64, + pub f_asyncreads: u64, + f_spare: [u64; 10], + pub f_namemax: u32, + pub f_owner: crate::uid_t, + pub f_fsid: crate::fsid_t, + f_charspare: [c_char; 80], + pub f_fstypename: [c_char; 16], + pub f_mntfromname: [c_char; 1024], + pub f_mntonname: [c_char; 1024], + } + + pub struct vnstat { + pub vn_fileid: u64, + pub vn_size: u64, + pub vn_dev: u64, + pub vn_fsid: u64, + pub vn_mntdir: *mut c_char, + pub vn_type: c_int, + pub vn_mode: u16, + pub vn_devname: [c_char; crate::SPECNAMELEN as usize + 1], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for statfs { + fn eq(&self, other: &statfs) -> bool { + self.f_version == other.f_version + && self.f_type == other.f_type + && self.f_flags == other.f_flags + && self.f_bsize == other.f_bsize + && self.f_iosize == other.f_iosize + && self.f_blocks == other.f_blocks + && self.f_bfree == other.f_bfree + && self.f_bavail == other.f_bavail + && self.f_files == other.f_files + && self.f_ffree == other.f_ffree + && self.f_syncwrites == other.f_syncwrites + && self.f_asyncwrites == other.f_asyncwrites + && self.f_syncreads == other.f_syncreads + && self.f_asyncreads == other.f_asyncreads + && self.f_namemax == other.f_namemax + && self.f_owner == other.f_owner + && self.f_fsid == other.f_fsid + && self.f_fstypename == other.f_fstypename + && self + .f_mntfromname + .iter() + .zip(other.f_mntfromname.iter()) + .all(|(a, b)| a == b) + && self + .f_mntonname + .iter() + .zip(other.f_mntonname.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for statfs {} + impl hash::Hash for statfs { + fn hash(&self, state: &mut H) { + self.f_version.hash(state); + self.f_type.hash(state); + self.f_flags.hash(state); + self.f_bsize.hash(state); + self.f_iosize.hash(state); + self.f_blocks.hash(state); + self.f_bfree.hash(state); + self.f_bavail.hash(state); + self.f_files.hash(state); + self.f_ffree.hash(state); + self.f_syncwrites.hash(state); + self.f_asyncwrites.hash(state); + self.f_syncreads.hash(state); + self.f_asyncreads.hash(state); + self.f_namemax.hash(state); + self.f_owner.hash(state); + self.f_fsid.hash(state); + self.f_charspare.hash(state); + self.f_fstypename.hash(state); + self.f_mntfromname.hash(state); + self.f_mntonname.hash(state); + } + } + + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_fileno == other.d_fileno + && self.d_off == other.d_off + && self.d_reclen == other.d_reclen + && self.d_type == other.d_type + && self.d_namlen == other.d_namlen + && self.d_name[..self.d_namlen as _] + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for dirent {} + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_fileno.hash(state); + self.d_off.hash(state); + self.d_reclen.hash(state); + self.d_type.hash(state); + self.d_namlen.hash(state); + self.d_name[..self.d_namlen as _].hash(state); + } + } + + impl PartialEq for vnstat { + fn eq(&self, other: &vnstat) -> bool { + let self_vn_devname: &[c_char] = &self.vn_devname; + let other_vn_devname: &[c_char] = &other.vn_devname; + + self.vn_fileid == other.vn_fileid + && self.vn_size == other.vn_size + && self.vn_dev == other.vn_dev + && self.vn_fsid == other.vn_fsid + && self.vn_mntdir == other.vn_mntdir + && self.vn_type == other.vn_type + && self.vn_mode == other.vn_mode + && self_vn_devname == other_vn_devname + } + } + impl Eq for vnstat {} + impl hash::Hash for vnstat { + fn hash(&self, state: &mut H) { + let self_vn_devname: &[c_char] = &self.vn_devname; + + self.vn_fileid.hash(state); + self.vn_size.hash(state); + self.vn_dev.hash(state); + self.vn_fsid.hash(state); + self.vn_mntdir.hash(state); + self.vn_type.hash(state); + self.vn_mode.hash(state); + self_vn_devname.hash(state); + } + } + } +} + +pub const RAND_MAX: c_int = 0x7fff_fffd; +pub const ELAST: c_int = 97; + +/// max length of devicename +pub const SPECNAMELEN: c_int = 63; +pub const KI_NSPARE_PTR: usize = 6; + +pub const MINCORE_SUPER: c_int = 0x20; + +safe_f! { + pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { + let major = major as crate::dev_t; + let minor = minor as crate::dev_t; + let mut dev = 0; + dev |= ((major & 0xffffff00) as dev_t) << 32; + dev |= ((major & 0x000000ff) as dev_t) << 8; + dev |= ((minor & 0x0000ff00) as dev_t) << 24; + dev |= ((minor & 0xffff00ff) as dev_t) << 0; + dev + } + + pub const fn major(dev: crate::dev_t) -> c_int { + (((dev >> 32) & 0xffffff00) | ((dev >> 8) & 0xff)) as c_int + } + + pub const fn minor(dev: crate::dev_t) -> c_int { + (((dev >> 24) & 0xff00) | (dev & 0xffff00ff)) as c_int + } +} + +extern "C" { + pub fn setgrent(); + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn freelocale(loc: crate::locale_t); + pub fn msgrcv( + msqid: c_int, + msgp: *mut c_void, + msgsz: size_t, + msgtyp: c_long, + msgflg: c_int, + ) -> ssize_t; + + pub fn dirname(path: *mut c_char) -> *mut c_char; + pub fn basename(path: *mut c_char) -> *mut c_char; + + #[link_name = "qsort_r@FBSD_1.0"] + pub fn qsort_r( + base: *mut c_void, + num: size_t, + size: size_t, + arg: *mut c_void, + compar: Option c_int>, + ); +} + +cfg_if! { + if #[cfg(target_arch = "x86_64")] { + mod x86_64; + pub use self::x86_64::*; + } +} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/x86_64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/x86_64.rs new file mode 100644 index 00000000000000..b29171cc509c51 --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/x86_64.rs @@ -0,0 +1,7 @@ +use crate::prelude::*; + +pub const PROC_KPTI_CTL: c_int = crate::PROC_PROCCTL_MD_MIN; +pub const PROC_KPTI_CTL_ENABLE_ON_EXEC: c_int = 1; +pub const PROC_KPTI_CTL_DISABLE_ON_EXEC: c_int = 2; +pub const PROC_KPTI_STATUS: c_int = crate::PROC_PROCCTL_MD_MIN + 1; +pub const PROC_KPTI_STATUS_ACTIVE: c_int = 0x80000000; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/mod.rs new file mode 100644 index 00000000000000..7b0e467ba375ef --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/mod.rs @@ -0,0 +1,531 @@ +use crate::off_t; +use crate::prelude::*; + +// APIs in FreeBSD 13 that have changed since 11. + +pub type nlink_t = u64; +pub type dev_t = u64; +pub type ino_t = u64; +pub type shmatt_t = c_uint; +pub type kpaddr_t = u64; +pub type kssize_t = i64; +pub type domainset_t = __c_anonymous_domainset; + +s! { + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_lpid: crate::pid_t, + pub shm_cpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + } + + pub struct kevent { + pub ident: crate::uintptr_t, + pub filter: c_short, + pub flags: c_ushort, + pub fflags: c_uint, + pub data: i64, + pub udata: *mut c_void, + pub ext: [u64; 4], + } + + pub struct kvm_page { + pub kp_version: crate::u_int, + pub kp_paddr: crate::kpaddr_t, + pub kp_kmap_vaddr: crate::kvaddr_t, + pub kp_dmap_vaddr: crate::kvaddr_t, + pub kp_prot: crate::vm_prot_t, + pub kp_offset: off_t, + pub kp_len: size_t, + } + + pub struct __c_anonymous_domainset { + #[cfg(target_pointer_width = "64")] + _priv: [c_ulong; 4], + #[cfg(target_pointer_width = "32")] + _priv: [c_ulong; 8], + } + + pub struct kinfo_proc { + /// Size of this structure. + pub ki_structsize: c_int, + /// Reserved: layout identifier. + pub ki_layout: c_int, + /// Address of command arguments. + pub ki_args: *mut crate::pargs, + // This is normally "struct proc". + /// Address of proc. + pub ki_paddr: *mut c_void, + // This is normally "struct user". + /// Kernel virtual address of u-area. + pub ki_addr: *mut c_void, + // This is normally "struct vnode". + /// Pointer to trace file. + pub ki_tracep: *mut c_void, + // This is normally "struct vnode". + /// Pointer to executable file. + pub ki_textvp: *mut c_void, + /// Pointer to open file info. + pub ki_fd: *mut crate::filedesc, + // This is normally "struct vmspace". + /// Pointer to kernel vmspace struct. + pub ki_vmspace: *mut c_void, + /// Sleep address. + pub ki_wchan: *const c_void, + /// Process identifier. + pub ki_pid: crate::pid_t, + /// Parent process ID. + pub ki_ppid: crate::pid_t, + /// Process group ID. + pub ki_pgid: crate::pid_t, + /// tty process group ID. + pub ki_tpgid: crate::pid_t, + /// Process session ID. + pub ki_sid: crate::pid_t, + /// Terminal session ID. + pub ki_tsid: crate::pid_t, + /// Job control counter. + pub ki_jobc: c_short, + /// Unused (just here for alignment). + pub ki_spare_short1: c_short, + /// Controlling tty dev. + pub ki_tdev_freebsd11: u32, + /// Signals arrived but not delivered. + pub ki_siglist: crate::sigset_t, + /// Current signal mask. + pub ki_sigmask: crate::sigset_t, + /// Signals being ignored. + pub ki_sigignore: crate::sigset_t, + /// Signals being caught by user. + pub ki_sigcatch: crate::sigset_t, + /// Effective user ID. + pub ki_uid: crate::uid_t, + /// Real user ID. + pub ki_ruid: crate::uid_t, + /// Saved effective user ID. + pub ki_svuid: crate::uid_t, + /// Real group ID. + pub ki_rgid: crate::gid_t, + /// Saved effective group ID. + pub ki_svgid: crate::gid_t, + /// Number of groups. + pub ki_ngroups: c_short, + /// Unused (just here for alignment). + pub ki_spare_short2: c_short, + /// Groups. + pub ki_groups: [crate::gid_t; crate::KI_NGROUPS], + /// Virtual size. + pub ki_size: crate::vm_size_t, + /// Current resident set size in pages. + pub ki_rssize: crate::segsz_t, + /// Resident set size before last swap. + pub ki_swrss: crate::segsz_t, + /// Text size (pages) XXX. + pub ki_tsize: crate::segsz_t, + /// Data size (pages) XXX. + pub ki_dsize: crate::segsz_t, + /// Stack size (pages). + pub ki_ssize: crate::segsz_t, + /// Exit status for wait & stop signal. + pub ki_xstat: crate::u_short, + /// Accounting flags. + pub ki_acflag: crate::u_short, + /// %cpu for process during `ki_swtime`. + pub ki_pctcpu: crate::fixpt_t, + /// Time averaged value of `ki_cpticks`. + pub ki_estcpu: crate::u_int, + /// Time since last blocked. + pub ki_slptime: crate::u_int, + /// Time swapped in or out. + pub ki_swtime: crate::u_int, + /// Number of copy-on-write faults. + pub ki_cow: crate::u_int, + /// Real time in microsec. + pub ki_runtime: u64, + /// Starting time. + pub ki_start: crate::timeval, + /// Time used by process children. + pub ki_childtime: crate::timeval, + /// P_* flags. + pub ki_flag: c_long, + /// KI_* flags (below). + pub ki_kiflag: c_long, + /// Kernel trace points. + pub ki_traceflag: c_int, + /// S* process status. + pub ki_stat: c_char, + /// Process "nice" value. + pub ki_nice: i8, // signed char + /// Process lock (prevent swap) count. + pub ki_lock: c_char, + /// Run queue index. + pub ki_rqindex: c_char, + /// Which cpu we are on. + pub ki_oncpu_old: c_uchar, + /// Last cpu we were on. + pub ki_lastcpu_old: c_uchar, + /// Thread name. + pub ki_tdname: [c_char; crate::TDNAMLEN + 1], + /// Wchan message. + pub ki_wmesg: [c_char; crate::WMESGLEN + 1], + /// Setlogin name. + pub ki_login: [c_char; crate::LOGNAMELEN + 1], + /// Lock name. + pub ki_lockname: [c_char; crate::LOCKNAMELEN + 1], + /// Command name. + pub ki_comm: [c_char; crate::COMMLEN + 1], + /// Emulation name. + pub ki_emul: [c_char; crate::KI_EMULNAMELEN + 1], + /// Login class. + pub ki_loginclass: [c_char; crate::LOGINCLASSLEN + 1], + /// More thread name. + pub ki_moretdname: [c_char; crate::MAXCOMLEN - crate::TDNAMLEN + 1], + /// Spare string space. + pub ki_sparestrings: [[c_char; 23]; 2], // little hack to allow PartialEq + /// Spare room for growth. + pub ki_spareints: [c_int; crate::KI_NSPARE_INT], + /// Controlling tty dev. + pub ki_tdev: u64, + /// Which cpu we are on. + pub ki_oncpu: c_int, + /// Last cpu we were on. + pub ki_lastcpu: c_int, + /// PID of tracing process. + pub ki_tracer: c_int, + /// P2_* flags. + pub ki_flag2: c_int, + /// Default FIB number. + pub ki_fibnum: c_int, + /// Credential flags. + pub ki_cr_flags: crate::u_int, + /// Process jail ID. + pub ki_jid: c_int, + /// Number of threads in total. + pub ki_numthreads: c_int, + /// Thread ID. + pub ki_tid: crate::lwpid_t, + /// Process priority. + pub ki_pri: crate::priority, + /// Process rusage statistics. + pub ki_rusage: crate::rusage, + /// rusage of children processes. + pub ki_rusage_ch: crate::rusage, + // This is normally "struct pcb". + /// Kernel virtual addr of pcb. + pub ki_pcb: *mut c_void, + /// Kernel virtual addr of stack. + pub ki_kstack: *mut c_void, + /// User convenience pointer. + pub ki_udata: *mut c_void, + // This is normally "struct thread". + pub ki_tdaddr: *mut c_void, + // This is normally "struct pwddesc". + /// Pointer to process paths info. + pub ki_pd: *mut c_void, + pub ki_spareptrs: [*mut c_void; crate::KI_NSPARE_PTR], + pub ki_sparelongs: [c_long; crate::KI_NSPARE_LONG], + /// PS_* flags. + pub ki_sflag: c_long, + /// kthread flag. + pub ki_tdflags: c_long, + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + st_padding0: i16, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + st_padding1: i32, + pub st_rdev: crate::dev_t, + #[cfg(target_arch = "x86")] + st_atim_ext: i32, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + #[cfg(target_arch = "x86")] + st_mtim_ext: i32, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + #[cfg(target_arch = "x86")] + st_ctim_ext: i32, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + #[cfg(target_arch = "x86")] + st_btim_ext: i32, + pub st_birthtime: crate::time_t, + pub st_birthtime_nsec: c_long, + pub st_size: off_t, + pub st_blocks: crate::blkcnt_t, + pub st_blksize: crate::blksize_t, + pub st_flags: crate::fflags_t, + pub st_gen: u64, + pub st_spare: [u64; 10], + } +} + +s_no_extra_traits! { + pub struct dirent { + pub d_fileno: crate::ino_t, + pub d_off: off_t, + pub d_reclen: u16, + pub d_type: u8, + d_pad0: u8, + pub d_namlen: u16, + d_pad1: u16, + pub d_name: [c_char; 256], + } + + pub struct statfs { + pub f_version: u32, + pub f_type: u32, + pub f_flags: u64, + pub f_bsize: u64, + pub f_iosize: u64, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: i64, + pub f_files: u64, + pub f_ffree: i64, + pub f_syncwrites: u64, + pub f_asyncwrites: u64, + pub f_syncreads: u64, + pub f_asyncreads: u64, + f_spare: [u64; 10], + pub f_namemax: u32, + pub f_owner: crate::uid_t, + pub f_fsid: crate::fsid_t, + f_charspare: [c_char; 80], + pub f_fstypename: [c_char; 16], + pub f_mntfromname: [c_char; 1024], + pub f_mntonname: [c_char; 1024], + } + + pub struct vnstat { + pub vn_fileid: u64, + pub vn_size: u64, + pub vn_dev: u64, + pub vn_fsid: u64, + pub vn_mntdir: *mut c_char, + pub vn_type: c_int, + pub vn_mode: u16, + pub vn_devname: [c_char; crate::SPECNAMELEN as usize + 1], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for statfs { + fn eq(&self, other: &statfs) -> bool { + self.f_version == other.f_version + && self.f_type == other.f_type + && self.f_flags == other.f_flags + && self.f_bsize == other.f_bsize + && self.f_iosize == other.f_iosize + && self.f_blocks == other.f_blocks + && self.f_bfree == other.f_bfree + && self.f_bavail == other.f_bavail + && self.f_files == other.f_files + && self.f_ffree == other.f_ffree + && self.f_syncwrites == other.f_syncwrites + && self.f_asyncwrites == other.f_asyncwrites + && self.f_syncreads == other.f_syncreads + && self.f_asyncreads == other.f_asyncreads + && self.f_namemax == other.f_namemax + && self.f_owner == other.f_owner + && self.f_fsid == other.f_fsid + && self.f_fstypename == other.f_fstypename + && self + .f_mntfromname + .iter() + .zip(other.f_mntfromname.iter()) + .all(|(a, b)| a == b) + && self + .f_mntonname + .iter() + .zip(other.f_mntonname.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for statfs {} + impl hash::Hash for statfs { + fn hash(&self, state: &mut H) { + self.f_version.hash(state); + self.f_type.hash(state); + self.f_flags.hash(state); + self.f_bsize.hash(state); + self.f_iosize.hash(state); + self.f_blocks.hash(state); + self.f_bfree.hash(state); + self.f_bavail.hash(state); + self.f_files.hash(state); + self.f_ffree.hash(state); + self.f_syncwrites.hash(state); + self.f_asyncwrites.hash(state); + self.f_syncreads.hash(state); + self.f_asyncreads.hash(state); + self.f_namemax.hash(state); + self.f_owner.hash(state); + self.f_fsid.hash(state); + self.f_charspare.hash(state); + self.f_fstypename.hash(state); + self.f_mntfromname.hash(state); + self.f_mntonname.hash(state); + } + } + + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_fileno == other.d_fileno + && self.d_off == other.d_off + && self.d_reclen == other.d_reclen + && self.d_type == other.d_type + && self.d_namlen == other.d_namlen + && self.d_name[..self.d_namlen as _] + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for dirent {} + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_fileno.hash(state); + self.d_off.hash(state); + self.d_reclen.hash(state); + self.d_type.hash(state); + self.d_namlen.hash(state); + self.d_name[..self.d_namlen as _].hash(state); + } + } + + impl PartialEq for vnstat { + fn eq(&self, other: &vnstat) -> bool { + let self_vn_devname: &[c_char] = &self.vn_devname; + let other_vn_devname: &[c_char] = &other.vn_devname; + + self.vn_fileid == other.vn_fileid + && self.vn_size == other.vn_size + && self.vn_dev == other.vn_dev + && self.vn_fsid == other.vn_fsid + && self.vn_mntdir == other.vn_mntdir + && self.vn_type == other.vn_type + && self.vn_mode == other.vn_mode + && self_vn_devname == other_vn_devname + } + } + impl Eq for vnstat {} + impl hash::Hash for vnstat { + fn hash(&self, state: &mut H) { + let self_vn_devname: &[c_char] = &self.vn_devname; + + self.vn_fileid.hash(state); + self.vn_size.hash(state); + self.vn_dev.hash(state); + self.vn_fsid.hash(state); + self.vn_mntdir.hash(state); + self.vn_type.hash(state); + self.vn_mode.hash(state); + self_vn_devname.hash(state); + } + } + } +} + +pub const RAND_MAX: c_int = 0x7fff_ffff; +pub const ELAST: c_int = 97; + +pub const KF_TYPE_EVENTFD: c_int = 13; + +/// max length of devicename +pub const SPECNAMELEN: c_int = 255; +pub const KI_NSPARE_PTR: usize = 5; + +/// domainset policies +pub const DOMAINSET_POLICY_INVALID: c_int = 0; +pub const DOMAINSET_POLICY_ROUNDROBIN: c_int = 1; +pub const DOMAINSET_POLICY_FIRSTTOUCH: c_int = 2; +pub const DOMAINSET_POLICY_PREFER: c_int = 3; +pub const DOMAINSET_POLICY_INTERLEAVE: c_int = 4; + +pub const MINCORE_SUPER: c_int = 0x20; + +safe_f! { + pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { + let major = major as crate::dev_t; + let minor = minor as crate::dev_t; + let mut dev = 0; + dev |= ((major & 0xffffff00) as dev_t) << 32; + dev |= ((major & 0x000000ff) as dev_t) << 8; + dev |= ((minor & 0x0000ff00) as dev_t) << 24; + dev |= ((minor & 0xffff00ff) as dev_t) << 0; + dev + } + + pub const fn major(dev: crate::dev_t) -> c_int { + (((dev >> 32) & 0xffffff00) | ((dev >> 8) & 0xff)) as c_int + } + + pub const fn minor(dev: crate::dev_t) -> c_int { + (((dev >> 24) & 0xff00) | (dev & 0xffff00ff)) as c_int + } +} + +extern "C" { + pub fn setgrent(); + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn freelocale(loc: crate::locale_t); + pub fn msgrcv( + msqid: c_int, + msgp: *mut c_void, + msgsz: size_t, + msgtyp: c_long, + msgflg: c_int, + ) -> ssize_t; + + pub fn cpuset_getdomain( + level: crate::cpulevel_t, + which: crate::cpuwhich_t, + id: crate::id_t, + setsize: size_t, + mask: *mut crate::domainset_t, + policy: *mut c_int, + ) -> c_int; + pub fn cpuset_setdomain( + level: crate::cpulevel_t, + which: crate::cpuwhich_t, + id: crate::id_t, + setsize: size_t, + mask: *const crate::domainset_t, + policy: c_int, + ) -> c_int; + + pub fn dirname(path: *mut c_char) -> *mut c_char; + pub fn basename(path: *mut c_char) -> *mut c_char; + + #[link_name = "qsort_r@FBSD_1.0"] + pub fn qsort_r( + base: *mut c_void, + num: size_t, + size: size_t, + arg: *mut c_void, + compar: Option c_int>, + ); +} + +#[link(name = "kvm")] +extern "C" { + pub fn kvm_kerndisp(kd: *mut crate::kvm_t) -> crate::kssize_t; +} + +cfg_if! { + if #[cfg(target_arch = "x86_64")] { + mod x86_64; + pub use self::x86_64::*; + } +} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/x86_64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/x86_64.rs new file mode 100644 index 00000000000000..b29171cc509c51 --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/x86_64.rs @@ -0,0 +1,7 @@ +use crate::prelude::*; + +pub const PROC_KPTI_CTL: c_int = crate::PROC_PROCCTL_MD_MIN; +pub const PROC_KPTI_CTL_ENABLE_ON_EXEC: c_int = 1; +pub const PROC_KPTI_CTL_DISABLE_ON_EXEC: c_int = 2; +pub const PROC_KPTI_STATUS: c_int = crate::PROC_PROCCTL_MD_MIN + 1; +pub const PROC_KPTI_STATUS_ACTIVE: c_int = 0x80000000; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/mod.rs new file mode 100644 index 00000000000000..f20a46655665d8 --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/mod.rs @@ -0,0 +1,532 @@ +use crate::off_t; +use crate::prelude::*; + +// APIs in FreeBSD 14 that have changed since 11. + +pub type nlink_t = u64; +pub type dev_t = u64; +pub type ino_t = u64; +pub type shmatt_t = c_uint; +pub type kpaddr_t = u64; +pub type kssize_t = i64; +pub type domainset_t = __c_anonymous_domainset; + +s! { + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_lpid: crate::pid_t, + pub shm_cpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + } + + pub struct kevent { + pub ident: crate::uintptr_t, + pub filter: c_short, + pub flags: c_ushort, + pub fflags: c_uint, + pub data: i64, + pub udata: *mut c_void, + pub ext: [u64; 4], + } + + pub struct kvm_page { + pub kp_version: crate::u_int, + pub kp_paddr: crate::kpaddr_t, + pub kp_kmap_vaddr: crate::kvaddr_t, + pub kp_dmap_vaddr: crate::kvaddr_t, + pub kp_prot: crate::vm_prot_t, + pub kp_offset: off_t, + pub kp_len: size_t, + } + + pub struct __c_anonymous_domainset { + #[cfg(target_pointer_width = "64")] + _priv: [c_ulong; 4], + #[cfg(target_pointer_width = "32")] + _priv: [c_ulong; 8], + } + + pub struct kinfo_proc { + /// Size of this structure. + pub ki_structsize: c_int, + /// Reserved: layout identifier. + pub ki_layout: c_int, + /// Address of command arguments. + pub ki_args: *mut crate::pargs, + // This is normally "struct proc". + /// Address of proc. + pub ki_paddr: *mut c_void, + // This is normally "struct user". + /// Kernel virtual address of u-area. + pub ki_addr: *mut c_void, + // This is normally "struct vnode". + /// Pointer to trace file. + pub ki_tracep: *mut c_void, + // This is normally "struct vnode". + /// Pointer to executable file. + pub ki_textvp: *mut c_void, + /// Pointer to open file info. + pub ki_fd: *mut crate::filedesc, + // This is normally "struct vmspace". + /// Pointer to kernel vmspace struct. + pub ki_vmspace: *mut c_void, + /// Sleep address. + pub ki_wchan: *const c_void, + /// Process identifier. + pub ki_pid: crate::pid_t, + /// Parent process ID. + pub ki_ppid: crate::pid_t, + /// Process group ID. + pub ki_pgid: crate::pid_t, + /// tty process group ID. + pub ki_tpgid: crate::pid_t, + /// Process session ID. + pub ki_sid: crate::pid_t, + /// Terminal session ID. + pub ki_tsid: crate::pid_t, + /// Job control counter. + pub ki_jobc: c_short, + /// Unused (just here for alignment). + pub ki_spare_short1: c_short, + /// Controlling tty dev. + pub ki_tdev_freebsd11: u32, + /// Signals arrived but not delivered. + pub ki_siglist: crate::sigset_t, + /// Current signal mask. + pub ki_sigmask: crate::sigset_t, + /// Signals being ignored. + pub ki_sigignore: crate::sigset_t, + /// Signals being caught by user. + pub ki_sigcatch: crate::sigset_t, + /// Effective user ID. + pub ki_uid: crate::uid_t, + /// Real user ID. + pub ki_ruid: crate::uid_t, + /// Saved effective user ID. + pub ki_svuid: crate::uid_t, + /// Real group ID. + pub ki_rgid: crate::gid_t, + /// Saved effective group ID. + pub ki_svgid: crate::gid_t, + /// Number of groups. + pub ki_ngroups: c_short, + /// Unused (just here for alignment). + pub ki_spare_short2: c_short, + /// Groups. + pub ki_groups: [crate::gid_t; crate::KI_NGROUPS], + /// Virtual size. + pub ki_size: crate::vm_size_t, + /// Current resident set size in pages. + pub ki_rssize: crate::segsz_t, + /// Resident set size before last swap. + pub ki_swrss: crate::segsz_t, + /// Text size (pages) XXX. + pub ki_tsize: crate::segsz_t, + /// Data size (pages) XXX. + pub ki_dsize: crate::segsz_t, + /// Stack size (pages). + pub ki_ssize: crate::segsz_t, + /// Exit status for wait & stop signal. + pub ki_xstat: crate::u_short, + /// Accounting flags. + pub ki_acflag: crate::u_short, + /// %cpu for process during `ki_swtime`. + pub ki_pctcpu: crate::fixpt_t, + /// Time averaged value of `ki_cpticks`. + pub ki_estcpu: crate::u_int, + /// Time since last blocked. + pub ki_slptime: crate::u_int, + /// Time swapped in or out. + pub ki_swtime: crate::u_int, + /// Number of copy-on-write faults. + pub ki_cow: crate::u_int, + /// Real time in microsec. + pub ki_runtime: u64, + /// Starting time. + pub ki_start: crate::timeval, + /// Time used by process children. + pub ki_childtime: crate::timeval, + /// P_* flags. + pub ki_flag: c_long, + /// KI_* flags (below). + pub ki_kiflag: c_long, + /// Kernel trace points. + pub ki_traceflag: c_int, + /// S* process status. + pub ki_stat: c_char, + /// Process "nice" value. + pub ki_nice: i8, // signed char + /// Process lock (prevent swap) count. + pub ki_lock: c_char, + /// Run queue index. + pub ki_rqindex: c_char, + /// Which cpu we are on. + pub ki_oncpu_old: c_uchar, + /// Last cpu we were on. + pub ki_lastcpu_old: c_uchar, + /// Thread name. + pub ki_tdname: [c_char; crate::TDNAMLEN + 1], + /// Wchan message. + pub ki_wmesg: [c_char; crate::WMESGLEN + 1], + /// Setlogin name. + pub ki_login: [c_char; crate::LOGNAMELEN + 1], + /// Lock name. + pub ki_lockname: [c_char; crate::LOCKNAMELEN + 1], + /// Command name. + pub ki_comm: [c_char; crate::COMMLEN + 1], + /// Emulation name. + pub ki_emul: [c_char; crate::KI_EMULNAMELEN + 1], + /// Login class. + pub ki_loginclass: [c_char; crate::LOGINCLASSLEN + 1], + /// More thread name. + pub ki_moretdname: [c_char; crate::MAXCOMLEN - crate::TDNAMLEN + 1], + /// Spare string space. + pub ki_sparestrings: [[c_char; 23]; 2], // little hack to allow PartialEq + /// Spare room for growth. + pub ki_spareints: [c_int; crate::KI_NSPARE_INT], + /// Controlling tty dev. + pub ki_tdev: u64, + /// Which cpu we are on. + pub ki_oncpu: c_int, + /// Last cpu we were on. + pub ki_lastcpu: c_int, + /// PID of tracing process. + pub ki_tracer: c_int, + /// P2_* flags. + pub ki_flag2: c_int, + /// Default FIB number. + pub ki_fibnum: c_int, + /// Credential flags. + pub ki_cr_flags: crate::u_int, + /// Process jail ID. + pub ki_jid: c_int, + /// Number of threads in total. + pub ki_numthreads: c_int, + /// Thread ID. + pub ki_tid: crate::lwpid_t, + /// Process priority. + pub ki_pri: crate::priority, + /// Process rusage statistics. + pub ki_rusage: crate::rusage, + /// rusage of children processes. + pub ki_rusage_ch: crate::rusage, + // This is normally "struct pcb". + /// Kernel virtual addr of pcb. + pub ki_pcb: *mut c_void, + /// Kernel virtual addr of stack. + pub ki_kstack: *mut c_void, + /// User convenience pointer. + pub ki_udata: *mut c_void, + // This is normally "struct thread". + pub ki_tdaddr: *mut c_void, + // This is normally "struct pwddesc". + /// Pointer to process paths info. + pub ki_pd: *mut c_void, + pub ki_spareptrs: [*mut c_void; crate::KI_NSPARE_PTR], + pub ki_sparelongs: [c_long; crate::KI_NSPARE_LONG], + /// PS_* flags. + pub ki_sflag: c_long, + /// kthread flag. + pub ki_tdflags: c_long, + } + + #[non_exhaustive] + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + st_padding0: i16, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + st_padding1: i32, + pub st_rdev: crate::dev_t, + #[cfg(target_arch = "x86")] + st_atim_ext: i32, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + #[cfg(target_arch = "x86")] + st_mtim_ext: i32, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + #[cfg(target_arch = "x86")] + st_ctim_ext: i32, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + #[cfg(target_arch = "x86")] + st_btim_ext: i32, + pub st_birthtime: crate::time_t, + pub st_birthtime_nsec: c_long, + pub st_size: off_t, + pub st_blocks: crate::blkcnt_t, + pub st_blksize: crate::blksize_t, + pub st_flags: crate::fflags_t, + pub st_gen: u64, + pub st_filerev: u64, + pub st_spare: [u64; 9], + } +} + +s_no_extra_traits! { + pub struct dirent { + pub d_fileno: crate::ino_t, + pub d_off: off_t, + pub d_reclen: u16, + pub d_type: u8, + d_pad0: u8, + pub d_namlen: u16, + d_pad1: u16, + pub d_name: [c_char; 256], + } + + pub struct statfs { + pub f_version: u32, + pub f_type: u32, + pub f_flags: u64, + pub f_bsize: u64, + pub f_iosize: u64, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: i64, + pub f_files: u64, + pub f_ffree: i64, + pub f_syncwrites: u64, + pub f_asyncwrites: u64, + pub f_syncreads: u64, + pub f_asyncreads: u64, + f_spare: [u64; 10], + pub f_namemax: u32, + pub f_owner: crate::uid_t, + pub f_fsid: crate::fsid_t, + f_charspare: [c_char; 80], + pub f_fstypename: [c_char; 16], + pub f_mntfromname: [c_char; 1024], + pub f_mntonname: [c_char; 1024], + } + + pub struct vnstat { + pub vn_fileid: u64, + pub vn_size: u64, + pub vn_dev: u64, + pub vn_fsid: u64, + pub vn_mntdir: *mut c_char, + pub vn_type: c_int, + pub vn_mode: u16, + pub vn_devname: [c_char; crate::SPECNAMELEN as usize + 1], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for statfs { + fn eq(&self, other: &statfs) -> bool { + self.f_version == other.f_version + && self.f_type == other.f_type + && self.f_flags == other.f_flags + && self.f_bsize == other.f_bsize + && self.f_iosize == other.f_iosize + && self.f_blocks == other.f_blocks + && self.f_bfree == other.f_bfree + && self.f_bavail == other.f_bavail + && self.f_files == other.f_files + && self.f_ffree == other.f_ffree + && self.f_syncwrites == other.f_syncwrites + && self.f_asyncwrites == other.f_asyncwrites + && self.f_syncreads == other.f_syncreads + && self.f_asyncreads == other.f_asyncreads + && self.f_namemax == other.f_namemax + && self.f_owner == other.f_owner + && self.f_fsid == other.f_fsid + && self.f_fstypename == other.f_fstypename + && self + .f_mntfromname + .iter() + .zip(other.f_mntfromname.iter()) + .all(|(a, b)| a == b) + && self + .f_mntonname + .iter() + .zip(other.f_mntonname.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for statfs {} + impl hash::Hash for statfs { + fn hash(&self, state: &mut H) { + self.f_version.hash(state); + self.f_type.hash(state); + self.f_flags.hash(state); + self.f_bsize.hash(state); + self.f_iosize.hash(state); + self.f_blocks.hash(state); + self.f_bfree.hash(state); + self.f_bavail.hash(state); + self.f_files.hash(state); + self.f_ffree.hash(state); + self.f_syncwrites.hash(state); + self.f_asyncwrites.hash(state); + self.f_syncreads.hash(state); + self.f_asyncreads.hash(state); + self.f_namemax.hash(state); + self.f_owner.hash(state); + self.f_fsid.hash(state); + self.f_charspare.hash(state); + self.f_fstypename.hash(state); + self.f_mntfromname.hash(state); + self.f_mntonname.hash(state); + } + } + + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_fileno == other.d_fileno + && self.d_off == other.d_off + && self.d_reclen == other.d_reclen + && self.d_type == other.d_type + && self.d_namlen == other.d_namlen + && self.d_name[..self.d_namlen as _] + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for dirent {} + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_fileno.hash(state); + self.d_off.hash(state); + self.d_reclen.hash(state); + self.d_type.hash(state); + self.d_namlen.hash(state); + self.d_name[..self.d_namlen as _].hash(state); + } + } + + impl PartialEq for vnstat { + fn eq(&self, other: &vnstat) -> bool { + let self_vn_devname: &[c_char] = &self.vn_devname; + let other_vn_devname: &[c_char] = &other.vn_devname; + + self.vn_fileid == other.vn_fileid + && self.vn_size == other.vn_size + && self.vn_dev == other.vn_dev + && self.vn_fsid == other.vn_fsid + && self.vn_mntdir == other.vn_mntdir + && self.vn_type == other.vn_type + && self.vn_mode == other.vn_mode + && self_vn_devname == other_vn_devname + } + } + impl Eq for vnstat {} + impl hash::Hash for vnstat { + fn hash(&self, state: &mut H) { + let self_vn_devname: &[c_char] = &self.vn_devname; + + self.vn_fileid.hash(state); + self.vn_size.hash(state); + self.vn_dev.hash(state); + self.vn_fsid.hash(state); + self.vn_mntdir.hash(state); + self.vn_type.hash(state); + self.vn_mode.hash(state); + self_vn_devname.hash(state); + } + } + } +} + +pub const RAND_MAX: c_int = 0x7fff_ffff; +pub const ELAST: c_int = 97; + +pub const KF_TYPE_EVENTFD: c_int = 13; + +/// max length of devicename +pub const SPECNAMELEN: c_int = 255; +pub const KI_NSPARE_PTR: usize = 5; + +/// domainset policies +pub const DOMAINSET_POLICY_INVALID: c_int = 0; +pub const DOMAINSET_POLICY_ROUNDROBIN: c_int = 1; +pub const DOMAINSET_POLICY_FIRSTTOUCH: c_int = 2; +pub const DOMAINSET_POLICY_PREFER: c_int = 3; +pub const DOMAINSET_POLICY_INTERLEAVE: c_int = 4; + +pub const MINCORE_SUPER: c_int = 0x60; + +safe_f! { + pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { + let major = major as crate::dev_t; + let minor = minor as crate::dev_t; + let mut dev = 0; + dev |= ((major & 0xffffff00) as dev_t) << 32; + dev |= ((major & 0x000000ff) as dev_t) << 8; + dev |= ((minor & 0x0000ff00) as dev_t) << 24; + dev |= ((minor & 0xffff00ff) as dev_t) << 0; + dev + } + + pub const fn major(dev: crate::dev_t) -> c_int { + (((dev >> 32) & 0xffffff00) | ((dev >> 8) & 0xff)) as c_int + } + + pub const fn minor(dev: crate::dev_t) -> c_int { + (((dev >> 24) & 0xff00) | (dev & 0xffff00ff)) as c_int + } +} + +extern "C" { + pub fn setgrent(); + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn freelocale(loc: crate::locale_t); + pub fn msgrcv( + msqid: c_int, + msgp: *mut c_void, + msgsz: size_t, + msgtyp: c_long, + msgflg: c_int, + ) -> ssize_t; + + pub fn cpuset_getdomain( + level: crate::cpulevel_t, + which: crate::cpuwhich_t, + id: crate::id_t, + setsize: size_t, + mask: *mut crate::domainset_t, + policy: *mut c_int, + ) -> c_int; + pub fn cpuset_setdomain( + level: crate::cpulevel_t, + which: crate::cpuwhich_t, + id: crate::id_t, + setsize: size_t, + mask: *const crate::domainset_t, + policy: c_int, + ) -> c_int; + + pub fn dirname(path: *mut c_char) -> *mut c_char; + pub fn basename(path: *mut c_char) -> *mut c_char; + + pub fn qsort_r( + base: *mut c_void, + num: size_t, + size: size_t, + compar: Option c_int>, + arg: *mut c_void, + ); +} + +#[link(name = "kvm")] +extern "C" { + pub fn kvm_kerndisp(kd: *mut crate::kvm_t) -> crate::kssize_t; +} + +cfg_if! { + if #[cfg(target_arch = "x86_64")] { + mod x86_64; + pub use self::x86_64::*; + } +} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/x86_64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/x86_64.rs new file mode 100644 index 00000000000000..3e037471fbf68b --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/x86_64.rs @@ -0,0 +1,14 @@ +use crate::prelude::*; + +pub const PROC_KPTI_CTL: c_int = crate::PROC_PROCCTL_MD_MIN; +pub const PROC_KPTI_CTL_ENABLE_ON_EXEC: c_int = 1; +pub const PROC_KPTI_CTL_DISABLE_ON_EXEC: c_int = 2; +pub const PROC_KPTI_STATUS: c_int = crate::PROC_PROCCTL_MD_MIN + 1; +pub const PROC_KPTI_STATUS_ACTIVE: c_int = 0x80000000; +pub const PROC_LA_CTL: c_int = crate::PROC_PROCCTL_MD_MIN + 2; +pub const PROC_LA_STATUS: c_int = crate::PROC_PROCCTL_MD_MIN + 3; +pub const PROC_LA_CTL_LA48_ON_EXEC: c_int = 1; +pub const PROC_LA_CTL_LA57_ON_EXEC: c_int = 2; +pub const PROC_LA_CTL_DEFAULT_ON_EXEC: c_int = 3; +pub const PROC_LA_STATUS_LA48: c_int = 0x01000000; +pub const PROC_LA_STATUS_LA57: c_int = 0x02000000; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/mod.rs new file mode 100644 index 00000000000000..c0d27ef370e6f5 --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/mod.rs @@ -0,0 +1,534 @@ +use crate::off_t; +use crate::prelude::*; + +// APIs in FreeBSD 15 that have changed since 11. + +pub type nlink_t = u64; +pub type dev_t = u64; +pub type ino_t = u64; +pub type shmatt_t = c_uint; +pub type kpaddr_t = u64; +pub type kssize_t = i64; +pub type domainset_t = __c_anonymous_domainset; + +s! { + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_lpid: crate::pid_t, + pub shm_cpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + } + + pub struct kevent { + pub ident: crate::uintptr_t, + pub filter: c_short, + pub flags: c_ushort, + pub fflags: c_uint, + pub data: i64, + pub udata: *mut c_void, + pub ext: [u64; 4], + } + + pub struct kvm_page { + pub kp_version: crate::u_int, + pub kp_paddr: crate::kpaddr_t, + pub kp_kmap_vaddr: crate::kvaddr_t, + pub kp_dmap_vaddr: crate::kvaddr_t, + pub kp_prot: crate::vm_prot_t, + pub kp_offset: off_t, + pub kp_len: size_t, + } + + pub struct __c_anonymous_domainset { + #[cfg(target_pointer_width = "64")] + _priv: [c_ulong; 4], + #[cfg(target_pointer_width = "32")] + _priv: [c_ulong; 8], + } + + #[non_exhaustive] + pub struct kinfo_proc { + /// Size of this structure. + pub ki_structsize: c_int, + /// Reserved: layout identifier. + pub ki_layout: c_int, + /// Address of command arguments. + pub ki_args: *mut crate::pargs, + // This is normally "struct proc". + /// Address of proc. + pub ki_paddr: *mut c_void, + // This is normally "struct user". + /// Kernel virtual address of u-area. + pub ki_addr: *mut c_void, + // This is normally "struct vnode". + /// Pointer to trace file. + pub ki_tracep: *mut c_void, + // This is normally "struct vnode". + /// Pointer to executable file. + pub ki_textvp: *mut c_void, + /// Pointer to open file info. + pub ki_fd: *mut crate::filedesc, + // This is normally "struct vmspace". + /// Pointer to kernel vmspace struct. + pub ki_vmspace: *mut c_void, + /// Sleep address. + pub ki_wchan: *const c_void, + /// Process identifier. + pub ki_pid: crate::pid_t, + /// Parent process ID. + pub ki_ppid: crate::pid_t, + /// Process group ID. + pub ki_pgid: crate::pid_t, + /// tty process group ID. + pub ki_tpgid: crate::pid_t, + /// Process session ID. + pub ki_sid: crate::pid_t, + /// Terminal session ID. + pub ki_tsid: crate::pid_t, + /// Job control counter. + pub ki_jobc: c_short, + /// Unused (just here for alignment). + pub ki_spare_short1: c_short, + /// Controlling tty dev. + pub ki_tdev_freebsd11: u32, + /// Signals arrived but not delivered. + pub ki_siglist: crate::sigset_t, + /// Current signal mask. + pub ki_sigmask: crate::sigset_t, + /// Signals being ignored. + pub ki_sigignore: crate::sigset_t, + /// Signals being caught by user. + pub ki_sigcatch: crate::sigset_t, + /// Effective user ID. + pub ki_uid: crate::uid_t, + /// Real user ID. + pub ki_ruid: crate::uid_t, + /// Saved effective user ID. + pub ki_svuid: crate::uid_t, + /// Real group ID. + pub ki_rgid: crate::gid_t, + /// Saved effective group ID. + pub ki_svgid: crate::gid_t, + /// Number of groups. + pub ki_ngroups: c_short, + /// Unused (just here for alignment). + pub ki_spare_short2: c_short, + /// Groups. + pub ki_groups: [crate::gid_t; crate::KI_NGROUPS], + /// Virtual size. + pub ki_size: crate::vm_size_t, + /// Current resident set size in pages. + pub ki_rssize: crate::segsz_t, + /// Resident set size before last swap. + pub ki_swrss: crate::segsz_t, + /// Text size (pages) XXX. + pub ki_tsize: crate::segsz_t, + /// Data size (pages) XXX. + pub ki_dsize: crate::segsz_t, + /// Stack size (pages). + pub ki_ssize: crate::segsz_t, + /// Exit status for wait & stop signal. + pub ki_xstat: crate::u_short, + /// Accounting flags. + pub ki_acflag: crate::u_short, + /// %cpu for process during `ki_swtime`. + pub ki_pctcpu: crate::fixpt_t, + /// Time averaged value of `ki_cpticks`. + pub ki_estcpu: crate::u_int, + /// Time since last blocked. + pub ki_slptime: crate::u_int, + /// Time swapped in or out. + pub ki_swtime: crate::u_int, + /// Number of copy-on-write faults. + pub ki_cow: crate::u_int, + /// Real time in microsec. + pub ki_runtime: u64, + /// Starting time. + pub ki_start: crate::timeval, + /// Time used by process children. + pub ki_childtime: crate::timeval, + /// P_* flags. + pub ki_flag: c_long, + /// KI_* flags (below). + pub ki_kiflag: c_long, + /// Kernel trace points. + pub ki_traceflag: c_int, + /// S* process status. + pub ki_stat: c_char, + /// Process "nice" value. + pub ki_nice: i8, // signed char + /// Process lock (prevent swap) count. + pub ki_lock: c_char, + /// Run queue index. + pub ki_rqindex: c_char, + /// Which cpu we are on. + pub ki_oncpu_old: c_uchar, + /// Last cpu we were on. + pub ki_lastcpu_old: c_uchar, + /// Thread name. + pub ki_tdname: [c_char; crate::TDNAMLEN + 1], + /// Wchan message. + pub ki_wmesg: [c_char; crate::WMESGLEN + 1], + /// Setlogin name. + pub ki_login: [c_char; crate::LOGNAMELEN + 1], + /// Lock name. + pub ki_lockname: [c_char; crate::LOCKNAMELEN + 1], + /// Command name. + pub ki_comm: [c_char; crate::COMMLEN + 1], + /// Emulation name. + pub ki_emul: [c_char; crate::KI_EMULNAMELEN + 1], + /// Login class. + pub ki_loginclass: [c_char; crate::LOGINCLASSLEN + 1], + /// More thread name. + pub ki_moretdname: [c_char; crate::MAXCOMLEN - crate::TDNAMLEN + 1], + /// Spare string space. + pub ki_sparestrings: [[c_char; 23]; 2], // little hack to allow PartialEq + /// Spare room for growth. + pub ki_spareints: [c_int; crate::KI_NSPARE_INT], + /// Controlling tty dev. + pub ki_tdev: u64, + /// Which cpu we are on. + pub ki_oncpu: c_int, + /// Last cpu we were on. + pub ki_lastcpu: c_int, + /// PID of tracing process. + pub ki_tracer: c_int, + /// P2_* flags. + pub ki_flag2: c_int, + /// Default FIB number. + pub ki_fibnum: c_int, + /// Credential flags. + pub ki_cr_flags: crate::u_int, + /// Process jail ID. + pub ki_jid: c_int, + /// Number of threads in total. + pub ki_numthreads: c_int, + /// Thread ID. + pub ki_tid: crate::lwpid_t, + /// Process priority. + pub ki_pri: crate::priority, + /// Process rusage statistics. + pub ki_rusage: crate::rusage, + /// rusage of children processes. + pub ki_rusage_ch: crate::rusage, + // This is normally "struct pcb". + /// Kernel virtual addr of pcb. + pub ki_pcb: *mut c_void, + /// Kernel virtual addr of stack. + pub ki_kstack: *mut c_void, + /// User convenience pointer. + pub ki_udata: *mut c_void, + // This is normally "struct thread". + pub ki_tdaddr: *mut c_void, + // This is normally "struct pwddesc". + /// Pointer to process paths info. + pub ki_pd: *mut c_void, + /// Address of the ext err msg place + pub ki_uerrmsg: *mut c_void, + pub ki_spareptrs: [*mut c_void; crate::KI_NSPARE_PTR], + pub ki_sparelongs: [c_long; crate::KI_NSPARE_LONG], + /// PS_* flags. + pub ki_sflag: c_long, + /// kthread flag. + pub ki_tdflags: c_long, + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + st_padding0: i16, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + st_padding1: i32, + pub st_rdev: crate::dev_t, + #[cfg(target_arch = "x86")] + st_atim_ext: i32, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + #[cfg(target_arch = "x86")] + st_mtim_ext: i32, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + #[cfg(target_arch = "x86")] + st_ctim_ext: i32, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + #[cfg(target_arch = "x86")] + st_btim_ext: i32, + pub st_birthtime: crate::time_t, + pub st_birthtime_nsec: c_long, + pub st_size: off_t, + pub st_blocks: crate::blkcnt_t, + pub st_blksize: crate::blksize_t, + pub st_flags: crate::fflags_t, + pub st_gen: u64, + pub st_filerev: u64, + pub st_spare: [u64; 9], + } +} + +s_no_extra_traits! { + pub struct dirent { + pub d_fileno: crate::ino_t, + pub d_off: off_t, + pub d_reclen: u16, + pub d_type: u8, + d_pad0: u8, + pub d_namlen: u16, + d_pad1: u16, + pub d_name: [c_char; 256], + } + + pub struct statfs { + pub f_version: u32, + pub f_type: u32, + pub f_flags: u64, + pub f_bsize: u64, + pub f_iosize: u64, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: i64, + pub f_files: u64, + pub f_ffree: i64, + pub f_syncwrites: u64, + pub f_asyncwrites: u64, + pub f_syncreads: u64, + pub f_asyncreads: u64, + f_spare: [u64; 10], + pub f_namemax: u32, + pub f_owner: crate::uid_t, + pub f_fsid: crate::fsid_t, + f_charspare: [c_char; 80], + pub f_fstypename: [c_char; 16], + pub f_mntfromname: [c_char; 1024], + pub f_mntonname: [c_char; 1024], + } + + pub struct vnstat { + pub vn_fileid: u64, + pub vn_size: u64, + pub vn_dev: u64, + pub vn_fsid: u64, + pub vn_mntdir: *mut c_char, + pub vn_type: c_int, + pub vn_mode: u16, + pub vn_devname: [c_char; crate::SPECNAMELEN as usize + 1], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for statfs { + fn eq(&self, other: &statfs) -> bool { + self.f_version == other.f_version + && self.f_type == other.f_type + && self.f_flags == other.f_flags + && self.f_bsize == other.f_bsize + && self.f_iosize == other.f_iosize + && self.f_blocks == other.f_blocks + && self.f_bfree == other.f_bfree + && self.f_bavail == other.f_bavail + && self.f_files == other.f_files + && self.f_ffree == other.f_ffree + && self.f_syncwrites == other.f_syncwrites + && self.f_asyncwrites == other.f_asyncwrites + && self.f_syncreads == other.f_syncreads + && self.f_asyncreads == other.f_asyncreads + && self.f_namemax == other.f_namemax + && self.f_owner == other.f_owner + && self.f_fsid == other.f_fsid + && self.f_fstypename == other.f_fstypename + && self + .f_mntfromname + .iter() + .zip(other.f_mntfromname.iter()) + .all(|(a, b)| a == b) + && self + .f_mntonname + .iter() + .zip(other.f_mntonname.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for statfs {} + impl hash::Hash for statfs { + fn hash(&self, state: &mut H) { + self.f_version.hash(state); + self.f_type.hash(state); + self.f_flags.hash(state); + self.f_bsize.hash(state); + self.f_iosize.hash(state); + self.f_blocks.hash(state); + self.f_bfree.hash(state); + self.f_bavail.hash(state); + self.f_files.hash(state); + self.f_ffree.hash(state); + self.f_syncwrites.hash(state); + self.f_asyncwrites.hash(state); + self.f_syncreads.hash(state); + self.f_asyncreads.hash(state); + self.f_namemax.hash(state); + self.f_owner.hash(state); + self.f_fsid.hash(state); + self.f_charspare.hash(state); + self.f_fstypename.hash(state); + self.f_mntfromname.hash(state); + self.f_mntonname.hash(state); + } + } + + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_fileno == other.d_fileno + && self.d_off == other.d_off + && self.d_reclen == other.d_reclen + && self.d_type == other.d_type + && self.d_namlen == other.d_namlen + && self.d_name[..self.d_namlen as _] + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for dirent {} + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_fileno.hash(state); + self.d_off.hash(state); + self.d_reclen.hash(state); + self.d_type.hash(state); + self.d_namlen.hash(state); + self.d_name[..self.d_namlen as _].hash(state); + } + } + + impl PartialEq for vnstat { + fn eq(&self, other: &vnstat) -> bool { + let self_vn_devname: &[c_char] = &self.vn_devname; + let other_vn_devname: &[c_char] = &other.vn_devname; + + self.vn_fileid == other.vn_fileid + && self.vn_size == other.vn_size + && self.vn_dev == other.vn_dev + && self.vn_fsid == other.vn_fsid + && self.vn_mntdir == other.vn_mntdir + && self.vn_type == other.vn_type + && self.vn_mode == other.vn_mode + && self_vn_devname == other_vn_devname + } + } + impl Eq for vnstat {} + impl hash::Hash for vnstat { + fn hash(&self, state: &mut H) { + let self_vn_devname: &[c_char] = &self.vn_devname; + + self.vn_fileid.hash(state); + self.vn_size.hash(state); + self.vn_dev.hash(state); + self.vn_fsid.hash(state); + self.vn_mntdir.hash(state); + self.vn_type.hash(state); + self.vn_mode.hash(state); + self_vn_devname.hash(state); + } + } + } +} + +pub const RAND_MAX: c_int = 0x7fff_ffff; +pub const ELAST: c_int = 97; + +pub const KF_TYPE_EVENTFD: c_int = 13; + +/// max length of devicename +pub const SPECNAMELEN: c_int = 255; +pub const KI_NSPARE_PTR: usize = 4; + +/// domainset policies +pub const DOMAINSET_POLICY_INVALID: c_int = 0; +pub const DOMAINSET_POLICY_ROUNDROBIN: c_int = 1; +pub const DOMAINSET_POLICY_FIRSTTOUCH: c_int = 2; +pub const DOMAINSET_POLICY_PREFER: c_int = 3; +pub const DOMAINSET_POLICY_INTERLEAVE: c_int = 4; + +pub const MINCORE_SUPER: c_int = 0x60; + +safe_f! { + pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { + let major = major as crate::dev_t; + let minor = minor as crate::dev_t; + let mut dev = 0; + dev |= ((major & 0xffffff00) as dev_t) << 32; + dev |= ((major & 0x000000ff) as dev_t) << 8; + dev |= ((minor & 0x0000ff00) as dev_t) << 24; + dev |= ((minor & 0xffff00ff) as dev_t) << 0; + dev + } + + pub const fn major(dev: crate::dev_t) -> c_int { + (((dev >> 32) & 0xffffff00) | ((dev >> 8) & 0xff)) as c_int + } + + pub const fn minor(dev: crate::dev_t) -> c_int { + (((dev >> 24) & 0xff00) | (dev & 0xffff00ff)) as c_int + } +} + +extern "C" { + pub fn setgrent(); + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn freelocale(loc: crate::locale_t); + pub fn msgrcv( + msqid: c_int, + msgp: *mut c_void, + msgsz: size_t, + msgtyp: c_long, + msgflg: c_int, + ) -> ssize_t; + + pub fn cpuset_getdomain( + level: crate::cpulevel_t, + which: crate::cpuwhich_t, + id: crate::id_t, + setsize: size_t, + mask: *mut crate::domainset_t, + policy: *mut c_int, + ) -> c_int; + pub fn cpuset_setdomain( + level: crate::cpulevel_t, + which: crate::cpuwhich_t, + id: crate::id_t, + setsize: size_t, + mask: *const crate::domainset_t, + policy: c_int, + ) -> c_int; + + pub fn dirname(path: *mut c_char) -> *mut c_char; + pub fn basename(path: *mut c_char) -> *mut c_char; + + pub fn qsort_r( + base: *mut c_void, + num: size_t, + size: size_t, + compar: Option c_int>, + arg: *mut c_void, + ); +} + +#[link(name = "kvm")] +extern "C" { + pub fn kvm_kerndisp(kd: *mut crate::kvm_t) -> crate::kssize_t; +} + +cfg_if! { + if #[cfg(target_arch = "x86_64")] { + mod x86_64; + pub use self::x86_64::*; + } +} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/x86_64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/x86_64.rs new file mode 100644 index 00000000000000..3e037471fbf68b --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/x86_64.rs @@ -0,0 +1,14 @@ +use crate::prelude::*; + +pub const PROC_KPTI_CTL: c_int = crate::PROC_PROCCTL_MD_MIN; +pub const PROC_KPTI_CTL_ENABLE_ON_EXEC: c_int = 1; +pub const PROC_KPTI_CTL_DISABLE_ON_EXEC: c_int = 2; +pub const PROC_KPTI_STATUS: c_int = crate::PROC_PROCCTL_MD_MIN + 1; +pub const PROC_KPTI_STATUS_ACTIVE: c_int = 0x80000000; +pub const PROC_LA_CTL: c_int = crate::PROC_PROCCTL_MD_MIN + 2; +pub const PROC_LA_STATUS: c_int = crate::PROC_PROCCTL_MD_MIN + 3; +pub const PROC_LA_CTL_LA48_ON_EXEC: c_int = 1; +pub const PROC_LA_CTL_LA57_ON_EXEC: c_int = 2; +pub const PROC_LA_CTL_DEFAULT_ON_EXEC: c_int = 3; +pub const PROC_LA_STATUS_LA48: c_int = 0x01000000; +pub const PROC_LA_STATUS_LA57: c_int = 0x02000000; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs new file mode 100644 index 00000000000000..a5166d4e15c75a --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs @@ -0,0 +1,5659 @@ +use crate::prelude::*; +use crate::{cmsghdr, off_t}; + +pub type fflags_t = u32; + +pub type vm_prot_t = u_char; +pub type kvaddr_t = u64; +pub type segsz_t = isize; +pub type __fixpt_t = u32; +pub type fixpt_t = __fixpt_t; +pub type __lwpid_t = i32; +pub type lwpid_t = __lwpid_t; +pub type blksize_t = i32; +pub type ksize_t = u64; +pub type inp_gen_t = u64; +pub type so_gen_t = u64; +pub type clockid_t = c_int; +pub type sem_t = _sem; +pub type timer_t = *mut __c_anonymous__timer; + +pub type fsblkcnt_t = u64; +pub type fsfilcnt_t = u64; +pub type idtype_t = c_uint; + +pub type msglen_t = c_ulong; +pub type msgqnum_t = c_ulong; + +pub type cpulevel_t = c_int; +pub type cpuwhich_t = c_int; + +pub type mqd_t = *mut c_void; + +pub type pthread_spinlock_t = *mut __c_anonymous_pthread_spinlock; +pub type pthread_barrierattr_t = *mut __c_anonymous_pthread_barrierattr; +pub type pthread_barrier_t = *mut __c_anonymous_pthread_barrier; + +pub type uuid_t = crate::uuid; +pub type u_int = c_uint; +pub type u_char = c_uchar; +pub type u_long = c_ulong; +pub type u_short = c_ushort; + +pub type caddr_t = *mut c_char; + +pub type fhandle_t = fhandle; + +pub type au_id_t = crate::uid_t; +pub type au_asid_t = crate::pid_t; + +pub type cpusetid_t = c_int; + +pub type sctp_assoc_t = u32; + +pub type eventfd_t = u64; + +#[derive(Debug)] +#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] +#[repr(u32)] +pub enum devstat_support_flags { + DEVSTAT_ALL_SUPPORTED = 0x00, + DEVSTAT_NO_BLOCKSIZE = 0x01, + DEVSTAT_NO_ORDERED_TAGS = 0x02, + DEVSTAT_BS_UNAVAILABLE = 0x04, +} +impl Copy for devstat_support_flags {} +impl Clone for devstat_support_flags { + fn clone(&self) -> devstat_support_flags { + *self + } +} + +#[derive(Debug)] +#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] +#[repr(u32)] +pub enum devstat_trans_flags { + DEVSTAT_NO_DATA = 0x00, + DEVSTAT_READ = 0x01, + DEVSTAT_WRITE = 0x02, + DEVSTAT_FREE = 0x03, +} + +impl Copy for devstat_trans_flags {} +impl Clone for devstat_trans_flags { + fn clone(&self) -> devstat_trans_flags { + *self + } +} + +#[derive(Debug)] +#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] +#[repr(u32)] +pub enum devstat_tag_type { + DEVSTAT_TAG_SIMPLE = 0x00, + DEVSTAT_TAG_HEAD = 0x01, + DEVSTAT_TAG_ORDERED = 0x02, + DEVSTAT_TAG_NONE = 0x03, +} +impl Copy for devstat_tag_type {} +impl Clone for devstat_tag_type { + fn clone(&self) -> devstat_tag_type { + *self + } +} + +#[derive(Debug)] +#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] +#[repr(u32)] +pub enum devstat_match_flags { + DEVSTAT_MATCH_NONE = 0x00, + DEVSTAT_MATCH_TYPE = 0x01, + DEVSTAT_MATCH_IF = 0x02, + DEVSTAT_MATCH_PASS = 0x04, +} +impl Copy for devstat_match_flags {} +impl Clone for devstat_match_flags { + fn clone(&self) -> devstat_match_flags { + *self + } +} + +#[derive(Debug)] +#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] +#[repr(u32)] +pub enum devstat_priority { + DEVSTAT_PRIORITY_MIN = 0x000, + DEVSTAT_PRIORITY_OTHER = 0x020, + DEVSTAT_PRIORITY_PASS = 0x030, + DEVSTAT_PRIORITY_FD = 0x040, + DEVSTAT_PRIORITY_WFD = 0x050, + DEVSTAT_PRIORITY_TAPE = 0x060, + DEVSTAT_PRIORITY_CD = 0x090, + DEVSTAT_PRIORITY_DISK = 0x110, + DEVSTAT_PRIORITY_ARRAY = 0x120, + DEVSTAT_PRIORITY_MAX = 0xfff, +} +impl Copy for devstat_priority {} +impl Clone for devstat_priority { + fn clone(&self) -> devstat_priority { + *self + } +} + +#[derive(Debug)] +#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] +#[repr(u32)] +pub enum devstat_type_flags { + DEVSTAT_TYPE_DIRECT = 0x000, + DEVSTAT_TYPE_SEQUENTIAL = 0x001, + DEVSTAT_TYPE_PRINTER = 0x002, + DEVSTAT_TYPE_PROCESSOR = 0x003, + DEVSTAT_TYPE_WORM = 0x004, + DEVSTAT_TYPE_CDROM = 0x005, + DEVSTAT_TYPE_SCANNER = 0x006, + DEVSTAT_TYPE_OPTICAL = 0x007, + DEVSTAT_TYPE_CHANGER = 0x008, + DEVSTAT_TYPE_COMM = 0x009, + DEVSTAT_TYPE_ASC0 = 0x00a, + DEVSTAT_TYPE_ASC1 = 0x00b, + DEVSTAT_TYPE_STORARRAY = 0x00c, + DEVSTAT_TYPE_ENCLOSURE = 0x00d, + DEVSTAT_TYPE_FLOPPY = 0x00e, + DEVSTAT_TYPE_MASK = 0x00f, + DEVSTAT_TYPE_IF_SCSI = 0x010, + DEVSTAT_TYPE_IF_IDE = 0x020, + DEVSTAT_TYPE_IF_OTHER = 0x030, + DEVSTAT_TYPE_IF_MASK = 0x0f0, + DEVSTAT_TYPE_PASS = 0x100, +} +impl Copy for devstat_type_flags {} +impl Clone for devstat_type_flags { + fn clone(&self) -> devstat_type_flags { + *self + } +} + +#[derive(Debug)] +#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] +#[repr(u32)] +pub enum devstat_metric { + DSM_NONE, + DSM_TOTAL_BYTES, + DSM_TOTAL_BYTES_READ, + DSM_TOTAL_BYTES_WRITE, + DSM_TOTAL_TRANSFERS, + DSM_TOTAL_TRANSFERS_READ, + DSM_TOTAL_TRANSFERS_WRITE, + DSM_TOTAL_TRANSFERS_OTHER, + DSM_TOTAL_BLOCKS, + DSM_TOTAL_BLOCKS_READ, + DSM_TOTAL_BLOCKS_WRITE, + DSM_KB_PER_TRANSFER, + DSM_KB_PER_TRANSFER_READ, + DSM_KB_PER_TRANSFER_WRITE, + DSM_TRANSFERS_PER_SECOND, + DSM_TRANSFERS_PER_SECOND_READ, + DSM_TRANSFERS_PER_SECOND_WRITE, + DSM_TRANSFERS_PER_SECOND_OTHER, + DSM_MB_PER_SECOND, + DSM_MB_PER_SECOND_READ, + DSM_MB_PER_SECOND_WRITE, + DSM_BLOCKS_PER_SECOND, + DSM_BLOCKS_PER_SECOND_READ, + DSM_BLOCKS_PER_SECOND_WRITE, + DSM_MS_PER_TRANSACTION, + DSM_MS_PER_TRANSACTION_READ, + DSM_MS_PER_TRANSACTION_WRITE, + DSM_SKIP, + DSM_TOTAL_BYTES_FREE, + DSM_TOTAL_TRANSFERS_FREE, + DSM_TOTAL_BLOCKS_FREE, + DSM_KB_PER_TRANSFER_FREE, + DSM_MB_PER_SECOND_FREE, + DSM_TRANSFERS_PER_SECOND_FREE, + DSM_BLOCKS_PER_SECOND_FREE, + DSM_MS_PER_TRANSACTION_OTHER, + DSM_MS_PER_TRANSACTION_FREE, + DSM_BUSY_PCT, + DSM_QUEUE_LENGTH, + DSM_TOTAL_DURATION, + DSM_TOTAL_DURATION_READ, + DSM_TOTAL_DURATION_WRITE, + DSM_TOTAL_DURATION_FREE, + DSM_TOTAL_DURATION_OTHER, + DSM_TOTAL_BUSY_TIME, + DSM_MAX, +} +impl Copy for devstat_metric {} +impl Clone for devstat_metric { + fn clone(&self) -> devstat_metric { + *self + } +} + +#[derive(Debug)] +#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] +#[repr(u32)] +pub enum devstat_select_mode { + DS_SELECT_ADD, + DS_SELECT_ONLY, + DS_SELECT_REMOVE, + DS_SELECT_ADDONLY, +} +impl Copy for devstat_select_mode {} +impl Clone for devstat_select_mode { + fn clone(&self) -> devstat_select_mode { + *self + } +} + +s! { + pub struct aiocb { + pub aio_fildes: c_int, + pub aio_offset: off_t, + pub aio_buf: *mut c_void, + pub aio_nbytes: size_t, + __unused1: [c_int; 2], + __unused2: *mut c_void, + pub aio_lio_opcode: c_int, + pub aio_reqprio: c_int, + // unused 3 through 5 are the __aiocb_private structure + __unused3: c_long, + __unused4: c_long, + __unused5: *mut c_void, + pub aio_sigevent: sigevent, + } + + pub struct jail { + pub version: u32, + pub path: *mut c_char, + pub hostname: *mut c_char, + pub jailname: *mut c_char, + pub ip4s: c_uint, + pub ip6s: c_uint, + pub ip4: *mut crate::in_addr, + pub ip6: *mut crate::in6_addr, + } + + pub struct statvfs { + pub f_bavail: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_blocks: crate::fsblkcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_bsize: c_ulong, + pub f_flag: c_ulong, + pub f_frsize: c_ulong, + pub f_fsid: c_ulong, + pub f_namemax: c_ulong, + } + + // internal structure has changed over time + pub struct _sem { + data: [u32; 4], + } + pub struct sembuf { + pub sem_num: c_ushort, + pub sem_op: c_short, + pub sem_flg: c_short, + } + + pub struct input_event { + pub time: crate::timeval, + pub type_: crate::u_short, + pub code: crate::u_short, + pub value: i32, + } + + pub struct input_absinfo { + pub value: i32, + pub minimum: i32, + pub maximum: i32, + pub fuzz: i32, + pub flat: i32, + pub resolution: i32, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + __unused1: *mut c_void, + __unused2: *mut c_void, + pub msg_cbytes: crate::msglen_t, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + pub msg_stime: crate::time_t, + pub msg_rtime: crate::time_t, + pub msg_ctime: crate::time_t, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } + + pub struct mmsghdr { + pub msg_hdr: crate::msghdr, + pub msg_len: ssize_t, + } + + pub struct sockcred { + pub sc_uid: crate::uid_t, + pub sc_euid: crate::uid_t, + pub sc_gid: crate::gid_t, + pub sc_egid: crate::gid_t, + pub sc_ngroups: c_int, + pub sc_groups: [crate::gid_t; 1], + } + + pub struct ptrace_vm_entry { + pub pve_entry: c_int, + pub pve_timestamp: c_int, + pub pve_start: c_ulong, + pub pve_end: c_ulong, + pub pve_offset: c_ulong, + pub pve_prot: c_uint, + pub pve_pathlen: c_uint, + pub pve_fileid: c_long, + pub pve_fsid: u32, + pub pve_path: *mut c_char, + } + + pub struct ptrace_lwpinfo { + pub pl_lwpid: lwpid_t, + pub pl_event: c_int, + pub pl_flags: c_int, + pub pl_sigmask: crate::sigset_t, + pub pl_siglist: crate::sigset_t, + pub pl_siginfo: crate::siginfo_t, + pub pl_tdname: [c_char; crate::MAXCOMLEN as usize + 1], + pub pl_child_pid: crate::pid_t, + pub pl_syscall_code: c_uint, + pub pl_syscall_narg: c_uint, + } + + pub struct ptrace_sc_ret { + pub sr_retval: [crate::register_t; 2], + pub sr_error: c_int, + } + + pub struct ptrace_coredump { + pub pc_fd: c_int, + pub pc_flags: u32, + pub pc_limit: off_t, + } + + pub struct ptrace_sc_remote { + pub pscr_ret: ptrace_sc_ret, + pub pscr_syscall: c_uint, + pub pscr_nargs: c_uint, + pub pscr_args: *mut crate::register_t, + } + + pub struct cpuset_t { + #[cfg(all(any(freebsd15, freebsd14), target_pointer_width = "64"))] + __bits: [c_long; 16], + #[cfg(all(any(freebsd15, freebsd14), target_pointer_width = "32"))] + __bits: [c_long; 32], + #[cfg(all(not(any(freebsd15, freebsd14)), target_pointer_width = "64"))] + __bits: [c_long; 4], + #[cfg(all(not(any(freebsd15, freebsd14)), target_pointer_width = "32"))] + __bits: [c_long; 8], + } + + pub struct cap_rights_t { + cr_rights: [u64; 2], + } + + pub struct umutex { + m_owner: crate::lwpid_t, + m_flags: u32, + m_ceilings: [u32; 2], + m_rb_link: crate::uintptr_t, + #[cfg(target_pointer_width = "32")] + m_pad: u32, + m_spare: [u32; 2], + } + + pub struct ucond { + c_has_waiters: u32, + c_flags: u32, + c_clockid: u32, + c_spare: [u32; 1], + } + + pub struct uuid { + pub time_low: u32, + pub time_mid: u16, + pub time_hi_and_version: u16, + pub clock_seq_hi_and_reserved: u8, + pub clock_seq_low: u8, + pub node: [u8; _UUID_NODE_LEN], + } + + pub struct __c_anonymous_pthread_spinlock { + s_clock: umutex, + } + + pub struct __c_anonymous_pthread_barrierattr { + pshared: c_int, + } + + pub struct __c_anonymous_pthread_barrier { + b_lock: umutex, + b_cv: ucond, + b_cycle: i64, + b_count: c_int, + b_waiters: c_int, + b_refcount: c_int, + b_destroying: c_int, + } + + pub struct kinfo_vmentry { + pub kve_structsize: c_int, + pub kve_type: c_int, + pub kve_start: u64, + pub kve_end: u64, + pub kve_offset: u64, + pub kve_vn_fileid: u64, + #[cfg(not(freebsd11))] + pub kve_vn_fsid_freebsd11: u32, + #[cfg(freebsd11)] + pub kve_vn_fsid: u32, + pub kve_flags: c_int, + pub kve_resident: c_int, + pub kve_private_resident: c_int, + pub kve_protection: c_int, + pub kve_ref_count: c_int, + pub kve_shadow_count: c_int, + pub kve_vn_type: c_int, + pub kve_vn_size: u64, + #[cfg(not(freebsd11))] + pub kve_vn_rdev_freebsd11: u32, + #[cfg(freebsd11)] + pub kve_vn_rdev: u32, + pub kve_vn_mode: u16, + pub kve_status: u16, + #[cfg(not(freebsd11))] + pub kve_vn_fsid: u64, + #[cfg(not(freebsd11))] + pub kve_vn_rdev: u64, + #[cfg(not(freebsd11))] + _kve_is_spare: [c_int; 8], + #[cfg(freebsd11)] + _kve_is_spare: [c_int; 12], + pub kve_path: [[c_char; 32]; 32], + } + + pub struct __c_anonymous_filestat { + pub stqe_next: *mut filestat, + } + + pub struct filestat { + pub fs_type: c_int, + pub fs_flags: c_int, + pub fs_fflags: c_int, + pub fs_uflags: c_int, + pub fs_fd: c_int, + pub fs_ref_count: c_int, + pub fs_offset: off_t, + pub fs_typedep: *mut c_void, + pub fs_path: *mut c_char, + pub next: __c_anonymous_filestat, + pub fs_cap_rights: cap_rights_t, + } + + pub struct filestat_list { + pub stqh_first: *mut filestat, + pub stqh_last: *mut *mut filestat, + } + + pub struct procstat { + pub tpe: c_int, + pub kd: crate::uintptr_t, + pub vmentries: *mut c_void, + pub files: *mut c_void, + pub argv: *mut c_void, + pub envv: *mut c_void, + pub core: crate::uintptr_t, + } + + pub struct itimerspec { + pub it_interval: crate::timespec, + pub it_value: crate::timespec, + } + + pub struct __c_anonymous__timer { + _priv: [c_int; 3], + } + + /// Used to hold a copy of the command line, if it had a sane length. + pub struct pargs { + /// Reference count. + pub ar_ref: u_int, + /// Length. + pub ar_length: u_int, + /// Arguments. + pub ar_args: [c_uchar; 1], + } + + pub struct priority { + /// Scheduling class. + pub pri_class: u_char, + /// Normal priority level. + pub pri_level: u_char, + /// Priority before propagation. + pub pri_native: u_char, + /// User priority based on p_cpu and p_nice. + pub pri_user: u_char, + } + + pub struct kvm_swap { + pub ksw_devname: [c_char; 32], + pub ksw_used: u_int, + pub ksw_total: u_int, + pub ksw_flags: c_int, + pub ksw_reserved1: u_int, + pub ksw_reserved2: u_int, + } + + pub struct nlist { + /// symbol name (in memory) + pub n_name: *const c_char, + /// type defines + pub n_type: c_uchar, + /// "type" and binding information + pub n_other: c_char, + /// used by stab entries + pub n_desc: c_short, + pub n_value: c_ulong, + } + + pub struct kvm_nlist { + pub n_name: *const c_char, + pub n_type: c_uchar, + pub n_value: crate::kvaddr_t, + } + + pub struct __c_anonymous_sem { + _priv: crate::uintptr_t, + } + + pub struct semid_ds { + pub sem_perm: crate::ipc_perm, + pub __sem_base: *mut __c_anonymous_sem, + pub sem_nsems: c_ushort, + pub sem_otime: crate::time_t, + pub sem_ctime: crate::time_t, + } + + pub struct vmtotal { + pub t_vm: u64, + pub t_avm: u64, + pub t_rm: u64, + pub t_arm: u64, + pub t_vmshr: u64, + pub t_avmshr: u64, + pub t_rmshr: u64, + pub t_armshr: u64, + pub t_free: u64, + pub t_rq: i16, + pub t_dw: i16, + pub t_pw: i16, + pub t_sl: i16, + pub t_sw: i16, + pub t_pad: [u16; 3], + } + + pub struct sockstat { + pub inp_ppcb: u64, + pub so_addr: u64, + pub so_pcb: u64, + pub unp_conn: u64, + pub dom_family: c_int, + pub proto: c_int, + pub so_rcv_sb_state: c_int, + pub so_snd_sb_state: c_int, + /// Socket address. + pub sa_local: crate::sockaddr_storage, + /// Peer address. + pub sa_peer: crate::sockaddr_storage, + pub type_: c_int, + pub dname: [c_char; 32], + #[cfg(any(freebsd12, freebsd13, freebsd14, freebsd15))] + pub sendq: c_uint, + #[cfg(any(freebsd12, freebsd13, freebsd14, freebsd15))] + pub recvq: c_uint, + } + + pub struct shmstat { + pub size: u64, + pub mode: u16, + } + + pub struct spacectl_range { + pub r_offset: off_t, + pub r_len: off_t, + } + + pub struct rusage_ext { + pub rux_runtime: u64, + pub rux_uticks: u64, + pub rux_sticks: u64, + pub rux_iticks: u64, + pub rux_uu: u64, + pub rux_su: u64, + pub rux_tu: u64, + } + + pub struct if_clonereq { + pub ifcr_total: c_int, + pub ifcr_count: c_int, + pub ifcr_buffer: *mut c_char, + } + + pub struct if_msghdr { + /// to skip over non-understood messages + pub ifm_msglen: c_ushort, + /// future binary compatibility + pub ifm_version: c_uchar, + /// message type + pub ifm_type: c_uchar, + /// like rtm_addrs + pub ifm_addrs: c_int, + /// value of if_flags + pub ifm_flags: c_int, + /// index for associated ifp + pub ifm_index: c_ushort, + pub _ifm_spare1: c_ushort, + /// statistics and other data about if + pub ifm_data: if_data, + } + + pub struct if_msghdrl { + /// to skip over non-understood messages + pub ifm_msglen: c_ushort, + /// future binary compatibility + pub ifm_version: c_uchar, + /// message type + pub ifm_type: c_uchar, + /// like rtm_addrs + pub ifm_addrs: c_int, + /// value of if_flags + pub ifm_flags: c_int, + /// index for associated ifp + pub ifm_index: c_ushort, + /// spare space to grow if_index, see if_var.h + pub _ifm_spare1: c_ushort, + /// length of if_msghdrl incl. if_data + pub ifm_len: c_ushort, + /// offset of if_data from beginning + pub ifm_data_off: c_ushort, + pub _ifm_spare2: c_int, + /// statistics and other data about if + pub ifm_data: if_data, + } + + pub struct ifa_msghdr { + /// to skip over non-understood messages + pub ifam_msglen: c_ushort, + /// future binary compatibility + pub ifam_version: c_uchar, + /// message type + pub ifam_type: c_uchar, + /// like rtm_addrs + pub ifam_addrs: c_int, + /// value of ifa_flags + pub ifam_flags: c_int, + /// index for associated ifp + pub ifam_index: c_ushort, + pub _ifam_spare1: c_ushort, + /// value of ifa_ifp->if_metric + pub ifam_metric: c_int, + } + + pub struct ifa_msghdrl { + /// to skip over non-understood messages + pub ifam_msglen: c_ushort, + /// future binary compatibility + pub ifam_version: c_uchar, + /// message type + pub ifam_type: c_uchar, + /// like rtm_addrs + pub ifam_addrs: c_int, + /// value of ifa_flags + pub ifam_flags: c_int, + /// index for associated ifp + pub ifam_index: c_ushort, + /// spare space to grow if_index, see if_var.h + pub _ifam_spare1: c_ushort, + /// length of ifa_msghdrl incl. if_data + pub ifam_len: c_ushort, + /// offset of if_data from beginning + pub ifam_data_off: c_ushort, + /// value of ifa_ifp->if_metric + pub ifam_metric: c_int, + /// statistics and other data about if or address + pub ifam_data: if_data, + } + + pub struct ifma_msghdr { + /// to skip over non-understood messages + pub ifmam_msglen: c_ushort, + /// future binary compatibility + pub ifmam_version: c_uchar, + /// message type + pub ifmam_type: c_uchar, + /// like rtm_addrs + pub ifmam_addrs: c_int, + /// value of ifa_flags + pub ifmam_flags: c_int, + /// index for associated ifp + pub ifmam_index: c_ushort, + pub _ifmam_spare1: c_ushort, + } + + pub struct if_announcemsghdr { + /// to skip over non-understood messages + pub ifan_msglen: c_ushort, + /// future binary compatibility + pub ifan_version: c_uchar, + /// message type + pub ifan_type: c_uchar, + /// index for associated ifp + pub ifan_index: c_ushort, + /// if name, e.g. "en0" + pub ifan_name: [c_char; crate::IFNAMSIZ as usize], + /// what type of announcement + pub ifan_what: c_ushort, + } + + pub struct ifreq_buffer { + pub length: size_t, + pub buffer: *mut c_void, + } + + pub struct ifaliasreq { + /// if name, e.g. "en0" + pub ifra_name: [c_char; crate::IFNAMSIZ as usize], + pub ifra_addr: crate::sockaddr, + pub ifra_broadaddr: crate::sockaddr, + pub ifra_mask: crate::sockaddr, + pub ifra_vhid: c_int, + } + + /// 9.x compat + pub struct oifaliasreq { + /// if name, e.g. "en0" + pub ifra_name: [c_char; crate::IFNAMSIZ as usize], + pub ifra_addr: crate::sockaddr, + pub ifra_broadaddr: crate::sockaddr, + pub ifra_mask: crate::sockaddr, + } + + pub struct ifmediareq { + /// if name, e.g. "en0" + pub ifm_name: [c_char; crate::IFNAMSIZ as usize], + /// current media options + pub ifm_current: c_int, + /// don't care mask + pub ifm_mask: c_int, + /// media status + pub ifm_status: c_int, + /// active options + pub ifm_active: c_int, + /// # entries in ifm_ulist array + pub ifm_count: c_int, + /// media words + pub ifm_ulist: *mut c_int, + } + + pub struct ifdrv { + /// if name, e.g. "en0" + pub ifd_name: [c_char; crate::IFNAMSIZ as usize], + pub ifd_cmd: c_ulong, + pub ifd_len: size_t, + pub ifd_data: *mut c_void, + } + + pub struct ifi2creq { + /// i2c address (0xA0, 0xA2) + pub dev_addr: u8, + /// read offset + pub offset: u8, + /// read length + pub len: u8, + pub spare0: u8, + pub spare1: u32, + /// read buffer + pub data: [u8; 8], + } + + pub struct ifrsshash { + /// if name, e.g. "en0" + pub ifrh_name: [c_char; crate::IFNAMSIZ as usize], + /// RSS_FUNC_ + pub ifrh_func: u8, + pub ifrh_spare0: u8, + pub ifrh_spare1: u16, + /// RSS_TYPE_ + pub ifrh_types: u32, + } + + pub struct ifmibdata { + /// name of interface + pub ifmd_name: [c_char; crate::IFNAMSIZ as usize], + /// number of promiscuous listeners + pub ifmd_pcount: c_int, + /// interface flags + pub ifmd_flags: c_int, + /// instantaneous length of send queue + pub ifmd_snd_len: c_int, + /// maximum length of send queue + pub ifmd_snd_maxlen: c_int, + /// number of drops in send queue + pub ifmd_snd_drops: c_int, + /// for future expansion + pub ifmd_filler: [c_int; 4], + /// generic information and statistics + pub ifmd_data: if_data, + } + + pub struct ifmib_iso_8802_3 { + pub dot3StatsAlignmentErrors: u32, + pub dot3StatsFCSErrors: u32, + pub dot3StatsSingleCollisionFrames: u32, + pub dot3StatsMultipleCollisionFrames: u32, + pub dot3StatsSQETestErrors: u32, + pub dot3StatsDeferredTransmissions: u32, + pub dot3StatsLateCollisions: u32, + pub dot3StatsExcessiveCollisions: u32, + pub dot3StatsInternalMacTransmitErrors: u32, + pub dot3StatsCarrierSenseErrors: u32, + pub dot3StatsFrameTooLongs: u32, + pub dot3StatsInternalMacReceiveErrors: u32, + pub dot3StatsEtherChipSet: u32, + pub dot3StatsMissedFrames: u32, + pub dot3StatsCollFrequencies: [u32; 16], + pub dot3Compliance: u32, + } + + pub struct __c_anonymous_ph { + pub ph1: u64, + pub ph2: u64, + } + + pub struct fid { + pub fid_len: c_ushort, + pub fid_data0: c_ushort, + pub fid_data: [c_char; crate::MAXFIDSZ as usize], + } + + pub struct fhandle { + pub fh_fsid: crate::fsid_t, + pub fh_fid: fid, + } + + pub struct bintime { + pub sec: crate::time_t, + pub frac: u64, + } + + pub struct clockinfo { + /// clock frequency + pub hz: c_int, + /// micro-seconds per hz tick + pub tick: c_int, + pub spare: c_int, + /// statistics clock frequency + pub stathz: c_int, + /// profiling clock frequency + pub profhz: c_int, + } + + pub struct __c_anonymous_stailq_entry_devstat { + pub stqe_next: *mut devstat, + } + + pub struct devstat { + /// Update sequence + pub sequence0: crate::u_int, + /// Allocated entry + pub allocated: c_int, + /// started ops + pub start_count: crate::u_int, + /// completed ops + pub end_count: crate::u_int, + /// busy time unaccounted for since this time + pub busy_from: bintime, + pub dev_links: __c_anonymous_stailq_entry_devstat, + /// Devstat device number. + pub device_number: u32, + pub device_name: [c_char; DEVSTAT_NAME_LEN as usize], + pub unit_number: c_int, + pub bytes: [u64; DEVSTAT_N_TRANS_FLAGS as usize], + pub operations: [u64; DEVSTAT_N_TRANS_FLAGS as usize], + pub duration: [bintime; DEVSTAT_N_TRANS_FLAGS as usize], + pub busy_time: bintime, + /// Time the device was created. + pub creation_time: bintime, + /// Block size, bytes + pub block_size: u32, + /// The number of simple, ordered, and head of queue tags sent. + pub tag_types: [u64; 3], + /// Which statistics are supported by a given device. + pub flags: devstat_support_flags, + /// Device type + pub device_type: devstat_type_flags, + /// Controls list pos. + pub priority: devstat_priority, + /// Identification for GEOM nodes + pub id: *const c_void, + /// Update sequence + pub sequence1: crate::u_int, + } + + pub struct devstat_match { + pub match_fields: devstat_match_flags, + pub device_type: devstat_type_flags, + pub num_match_categories: c_int, + } + + pub struct devstat_match_table { + pub match_str: *const c_char, + pub type_: devstat_type_flags, + pub match_field: devstat_match_flags, + } + + pub struct device_selection { + pub device_number: u32, + pub device_name: [c_char; DEVSTAT_NAME_LEN as usize], + pub unit_number: c_int, + pub selected: c_int, + pub bytes: u64, + pub position: c_int, + } + + pub struct devinfo { + pub devices: *mut devstat, + pub mem_ptr: *mut u8, + pub generation: c_long, + pub numdevs: c_int, + } + + pub struct sockcred2 { + pub sc_version: c_int, + pub sc_pid: crate::pid_t, + pub sc_uid: crate::uid_t, + pub sc_euid: crate::uid_t, + pub sc_gid: crate::gid_t, + pub sc_egid: crate::gid_t, + pub sc_ngroups: c_int, + pub sc_groups: [crate::gid_t; 1], + } + + pub struct ifconf { + pub ifc_len: c_int, + pub ifc_ifcu: __c_anonymous_ifc_ifcu, + } + + pub struct au_mask_t { + pub am_success: c_uint, + pub am_failure: c_uint, + } + + pub struct au_tid_t { + pub port: u32, + pub machine: u32, + } + + pub struct auditinfo_t { + pub ai_auid: crate::au_id_t, + pub ai_mask: crate::au_mask_t, + pub ai_termid: au_tid_t, + pub ai_asid: crate::au_asid_t, + } + + pub struct tcp_fastopen { + pub enable: c_int, + pub psk: [u8; crate::TCP_FASTOPEN_PSK_LEN as usize], + } + + pub struct tcp_function_set { + pub function_set_name: [c_char; crate::TCP_FUNCTION_NAME_LEN_MAX as usize], + pub pcbcnt: u32, + } + + // Note: this structure will change in a backwards-incompatible way in + // FreeBSD 15. + pub struct tcp_info { + pub tcpi_state: u8, + pub __tcpi_ca_state: u8, + pub __tcpi_retransmits: u8, + pub __tcpi_probes: u8, + pub __tcpi_backoff: u8, + pub tcpi_options: u8, + pub tcp_snd_wscale: u8, + pub tcp_rcv_wscale: u8, + pub tcpi_rto: u32, + pub __tcpi_ato: u32, + pub tcpi_snd_mss: u32, + pub tcpi_rcv_mss: u32, + pub __tcpi_unacked: u32, + pub __tcpi_sacked: u32, + pub __tcpi_lost: u32, + pub __tcpi_retrans: u32, + pub __tcpi_fackets: u32, + pub __tcpi_last_data_sent: u32, + pub __tcpi_last_ack_sent: u32, + pub tcpi_last_data_recv: u32, + pub __tcpi_last_ack_recv: u32, + pub __tcpi_pmtu: u32, + pub __tcpi_rcv_ssthresh: u32, + pub tcpi_rtt: u32, + pub tcpi_rttvar: u32, + pub tcpi_snd_ssthresh: u32, + pub tcpi_snd_cwnd: u32, + pub __tcpi_advmss: u32, + pub __tcpi_reordering: u32, + pub __tcpi_rcv_rtt: u32, + pub tcpi_rcv_space: u32, + pub tcpi_snd_wnd: u32, + pub tcpi_snd_bwnd: u32, + pub tcpi_snd_nxt: u32, + pub tcpi_rcv_nxt: u32, + pub tcpi_toe_tid: u32, + pub tcpi_snd_rexmitpack: u32, + pub tcpi_rcv_ooopack: u32, + pub tcpi_snd_zerowin: u32, + #[cfg(any(freebsd15, freebsd14))] + pub tcpi_delivered_ce: u32, + #[cfg(any(freebsd15, freebsd14))] + pub tcpi_received_ce: u32, + #[cfg(any(freebsd15, freebsd14))] + pub __tcpi_delivered_e1_bytes: u32, + #[cfg(any(freebsd15, freebsd14))] + pub __tcpi_delivered_e0_bytes: u32, + #[cfg(any(freebsd15, freebsd14))] + pub __tcpi_delivered_ce_bytes: u32, + #[cfg(any(freebsd15, freebsd14))] + pub __tcpi_received_e1_bytes: u32, + #[cfg(any(freebsd15, freebsd14))] + pub __tcpi_received_e0_bytes: u32, + #[cfg(any(freebsd15, freebsd14))] + pub __tcpi_received_ce_bytes: u32, + #[cfg(any(freebsd15, freebsd14))] + pub tcpi_total_tlp: u32, + #[cfg(any(freebsd15, freebsd14))] + pub tcpi_total_tlp_bytes: u64, + #[cfg(any(freebsd15, freebsd14))] + pub tcpi_snd_una: u32, + #[cfg(any(freebsd15, freebsd14))] + pub tcpi_snd_max: u32, + #[cfg(any(freebsd15, freebsd14))] + pub tcpi_rcv_numsacks: u32, + #[cfg(any(freebsd15, freebsd14))] + pub tcpi_rcv_adv: u32, + #[cfg(any(freebsd15, freebsd14))] + pub tcpi_dupacks: u32, + #[cfg(freebsd14)] + pub __tcpi_pad: [u32; 10], + #[cfg(freebsd15)] + pub __tcpi_pad: [u32; 14], + #[cfg(not(any(freebsd15, freebsd14)))] + pub __tcpi_pad: [u32; 26], + } + + pub struct _umtx_time { + pub _timeout: crate::timespec, + pub _flags: u32, + pub _clockid: u32, + } + + pub struct shm_largepage_conf { + pub psind: c_int, + pub alloc_policy: c_int, + __pad: [c_int; 10], + } + + pub struct memory_type { + __priva: [crate::uintptr_t; 32], + __privb: [crate::uintptr_t; 26], + } + + pub struct memory_type_list { + __priv: [crate::uintptr_t; 2], + } + + pub struct pidfh { + __priva: [[crate::uintptr_t; 32]; 8], + __privb: [crate::uintptr_t; 2], + } + + pub struct sctp_event { + pub se_assoc_id: crate::sctp_assoc_t, + pub se_type: u16, + pub se_on: u8, + } + + pub struct sctp_event_subscribe { + pub sctp_data_io_event: u8, + pub sctp_association_event: u8, + pub sctp_address_event: u8, + pub sctp_send_failure_event: u8, + pub sctp_peer_error_event: u8, + pub sctp_shutdown_event: u8, + pub sctp_partial_delivery_event: u8, + pub sctp_adaptation_layer_event: u8, + pub sctp_authentication_event: u8, + pub sctp_sender_dry_event: u8, + pub sctp_stream_reset_event: u8, + } + + pub struct sctp_initmsg { + pub sinit_num_ostreams: u16, + pub sinit_max_instreams: u16, + pub sinit_max_attempts: u16, + pub sinit_max_init_timeo: u16, + } + + pub struct sctp_sndrcvinfo { + pub sinfo_stream: u16, + pub sinfo_ssn: u16, + pub sinfo_flags: u16, + pub sinfo_ppid: u32, + pub sinfo_context: u32, + pub sinfo_timetolive: u32, + pub sinfo_tsn: u32, + pub sinfo_cumtsn: u32, + pub sinfo_assoc_id: crate::sctp_assoc_t, + pub sinfo_keynumber: u16, + pub sinfo_keynumber_valid: u16, + pub __reserve_pad: [[u8; 23]; 4], + } + + pub struct sctp_extrcvinfo { + pub sinfo_stream: u16, + pub sinfo_ssn: u16, + pub sinfo_flags: u16, + pub sinfo_ppid: u32, + pub sinfo_context: u32, + pub sinfo_timetolive: u32, + pub sinfo_tsn: u32, + pub sinfo_cumtsn: u32, + pub sinfo_assoc_id: crate::sctp_assoc_t, + pub serinfo_next_flags: u16, + pub serinfo_next_stream: u16, + pub serinfo_next_aid: u32, + pub serinfo_next_length: u32, + pub serinfo_next_ppid: u32, + pub sinfo_keynumber: u16, + pub sinfo_keynumber_valid: u16, + pub __reserve_pad: [[u8; 19]; 4], + } + + pub struct sctp_sndinfo { + pub snd_sid: u16, + pub snd_flags: u16, + pub snd_ppid: u32, + pub snd_context: u32, + pub snd_assoc_id: crate::sctp_assoc_t, + } + + pub struct sctp_prinfo { + pub pr_policy: u16, + pub pr_value: u32, + } + + pub struct sctp_default_prinfo { + pub pr_policy: u16, + pub pr_value: u32, + pub pr_assoc_id: crate::sctp_assoc_t, + } + + pub struct sctp_authinfo { + pub auth_keynumber: u16, + } + + pub struct sctp_rcvinfo { + pub rcv_sid: u16, + pub rcv_ssn: u16, + pub rcv_flags: u16, + pub rcv_ppid: u32, + pub rcv_tsn: u32, + pub rcv_cumtsn: u32, + pub rcv_context: u32, + pub rcv_assoc_id: crate::sctp_assoc_t, + } + + pub struct sctp_nxtinfo { + pub nxt_sid: u16, + pub nxt_flags: u16, + pub nxt_ppid: u32, + pub nxt_length: u32, + pub nxt_assoc_id: crate::sctp_assoc_t, + } + + pub struct sctp_recvv_rn { + pub recvv_rcvinfo: sctp_rcvinfo, + pub recvv_nxtinfo: sctp_nxtinfo, + } + + pub struct sctp_sendv_spa { + pub sendv_flags: u32, + pub sendv_sndinfo: sctp_sndinfo, + pub sendv_prinfo: sctp_prinfo, + pub sendv_authinfo: sctp_authinfo, + } + + pub struct sctp_snd_all_completes { + pub sall_stream: u16, + pub sall_flags: u16, + pub sall_ppid: u32, + pub sall_context: u32, + pub sall_num_sent: u32, + pub sall_num_failed: u32, + } + + pub struct sctp_pcbinfo { + pub ep_count: u32, + pub asoc_count: u32, + pub laddr_count: u32, + pub raddr_count: u32, + pub chk_count: u32, + pub readq_count: u32, + pub free_chunks: u32, + pub stream_oque: u32, + } + + pub struct sctp_sockstat { + pub ss_assoc_id: crate::sctp_assoc_t, + pub ss_total_sndbuf: u32, + pub ss_total_recv_buf: u32, + } + + pub struct sctp_assoc_change { + pub sac_type: u16, + pub sac_flags: u16, + pub sac_length: u32, + pub sac_state: u16, + pub sac_error: u16, + pub sac_outbound_streams: u16, + pub sac_inbound_streams: u16, + pub sac_assoc_id: crate::sctp_assoc_t, + pub sac_info: [u8; 0], + } + + pub struct sctp_paddr_change { + pub spc_type: u16, + pub spc_flags: u16, + pub spc_length: u32, + pub spc_aaddr: crate::sockaddr_storage, + pub spc_state: u32, + pub spc_error: u32, + pub spc_assoc_id: crate::sctp_assoc_t, + } + + pub struct sctp_remote_error { + pub sre_type: u16, + pub sre_flags: u16, + pub sre_length: u32, + pub sre_error: u16, + pub sre_assoc_id: crate::sctp_assoc_t, + pub sre_data: [u8; 0], + } + + pub struct sctp_send_failed_event { + pub ssfe_type: u16, + pub ssfe_flags: u16, + pub ssfe_length: u32, + pub ssfe_error: u32, + pub ssfe_info: sctp_sndinfo, + pub ssfe_assoc_id: crate::sctp_assoc_t, + pub ssfe_data: [u8; 0], + } + + pub struct sctp_shutdown_event { + pub sse_type: u16, + pub sse_flags: u16, + pub sse_length: u32, + pub sse_assoc_id: crate::sctp_assoc_t, + } + + pub struct sctp_adaptation_event { + pub sai_type: u16, + pub sai_flags: u16, + pub sai_length: u32, + pub sai_adaptation_ind: u32, + pub sai_assoc_id: crate::sctp_assoc_t, + } + + pub struct sctp_setadaptation { + pub ssb_adaptation_ind: u32, + } + + pub struct sctp_pdapi_event { + pub pdapi_type: u16, + pub pdapi_flags: u16, + pub pdapi_length: u32, + pub pdapi_indication: u32, + pub pdapi_stream: u16, + pub pdapi_seq: u16, + pub pdapi_assoc_id: crate::sctp_assoc_t, + } + + pub struct sctp_sender_dry_event { + pub sender_dry_type: u16, + pub sender_dry_flags: u16, + pub sender_dry_length: u32, + pub sender_dry_assoc_id: crate::sctp_assoc_t, + } + + pub struct sctp_stream_reset_event { + pub strreset_type: u16, + pub strreset_flags: u16, + pub strreset_length: u32, + pub strreset_assoc_id: crate::sctp_assoc_t, + pub strreset_stream_list: [u16; 0], + } + + pub struct sctp_stream_change_event { + pub strchange_type: u16, + pub strchange_flags: u16, + pub strchange_length: u32, + pub strchange_assoc_id: crate::sctp_assoc_t, + pub strchange_instrms: u16, + pub strchange_outstrms: u16, + } + + pub struct filedesc { + pub fd_files: *mut fdescenttbl, + pub fd_map: *mut c_ulong, + pub fd_freefile: c_int, + pub fd_refcnt: c_int, + pub fd_holdcnt: c_int, + fd_sx: sx, + fd_kqlist: kqlist, + pub fd_holdleaderscount: c_int, + pub fd_holdleaderswakeup: c_int, + } + + pub struct fdescenttbl { + pub fdt_nfiles: c_int, + fdt_ofiles: [*mut c_void; 0], + } + + // FIXME: Should be private. + #[doc(hidden)] + pub struct sx { + lock_object: lock_object, + sx_lock: crate::uintptr_t, + } + + // FIXME: Should be private. + #[doc(hidden)] + pub struct lock_object { + lo_name: *const c_char, + lo_flags: c_uint, + lo_data: c_uint, + // This is normally `struct witness`. + lo_witness: *mut c_void, + } + + // FIXME: Should be private. + #[doc(hidden)] + pub struct kqlist { + tqh_first: *mut c_void, + tqh_last: *mut *mut c_void, + } + + pub struct splice { + pub sp_fd: c_int, + pub sp_max: off_t, + pub sp_idle: crate::timeval, + } +} + +s_no_extra_traits! { + pub struct utmpx { + pub ut_type: c_short, + pub ut_tv: crate::timeval, + pub ut_id: [c_char; 8], + pub ut_pid: crate::pid_t, + pub ut_user: [c_char; 32], + pub ut_line: [c_char; 16], + pub ut_host: [c_char; 128], + pub __ut_spare: [c_char; 64], + } + + pub union __c_anonymous_cr_pid { + __cr_unused: *mut c_void, + pub cr_pid: crate::pid_t, + } + + pub struct xucred { + pub cr_version: c_uint, + pub cr_uid: crate::uid_t, + pub cr_ngroups: c_short, + pub cr_groups: [crate::gid_t; 16], + pub cr_pid__c_anonymous_union: __c_anonymous_cr_pid, + } + + pub struct sockaddr_dl { + pub sdl_len: c_uchar, + pub sdl_family: c_uchar, + pub sdl_index: c_ushort, + pub sdl_type: c_uchar, + pub sdl_nlen: c_uchar, + pub sdl_alen: c_uchar, + pub sdl_slen: c_uchar, + pub sdl_data: [c_char; 46], + } + + pub struct mq_attr { + pub mq_flags: c_long, + pub mq_maxmsg: c_long, + pub mq_msgsize: c_long, + pub mq_curmsgs: c_long, + __reserved: [c_long; 4], + } + + pub struct sigevent { + pub sigev_notify: c_int, + pub sigev_signo: c_int, + pub sigev_value: crate::sigval, + //The rest of the structure is actually a union. We expose only + //sigev_notify_thread_id because it's the most useful union member. + pub sigev_notify_thread_id: crate::lwpid_t, + #[cfg(target_pointer_width = "64")] + __unused1: c_int, + __unused2: [c_long; 7], + } + + pub struct ptsstat { + #[cfg(any(freebsd12, freebsd13, freebsd14, freebsd15))] + pub dev: u64, + #[cfg(not(any(freebsd12, freebsd13, freebsd14, freebsd15)))] + pub dev: u32, + pub devname: [c_char; SPECNAMELEN as usize + 1], + } + + pub union __c_anonymous_elf32_auxv_union { + pub a_val: c_int, + } + + pub struct Elf32_Auxinfo { + pub a_type: c_int, + pub a_un: __c_anonymous_elf32_auxv_union, + } + + pub union __c_anonymous_ifi_epoch { + pub tt: crate::time_t, + pub ph: u64, + } + + pub union __c_anonymous_ifi_lastchange { + pub tv: crate::timeval, + pub ph: __c_anonymous_ph, + } + + pub struct if_data { + /// ethernet, tokenring, etc + pub ifi_type: u8, + /// e.g., AUI, Thinnet, 10base-T, etc + pub ifi_physical: u8, + /// media address length + pub ifi_addrlen: u8, + /// media header length + pub ifi_hdrlen: u8, + /// current link state + pub ifi_link_state: u8, + /// carp vhid + pub ifi_vhid: u8, + /// length of this data struct + pub ifi_datalen: u16, + /// maximum transmission unit + pub ifi_mtu: u32, + /// routing metric (external only) + pub ifi_metric: u32, + /// linespeed + pub ifi_baudrate: u64, + /// packets received on interface + pub ifi_ipackets: u64, + /// input errors on interface + pub ifi_ierrors: u64, + /// packets sent on interface + pub ifi_opackets: u64, + /// output errors on interface + pub ifi_oerrors: u64, + /// collisions on csma interfaces + pub ifi_collisions: u64, + /// total number of octets received + pub ifi_ibytes: u64, + /// total number of octets sent + pub ifi_obytes: u64, + /// packets received via multicast + pub ifi_imcasts: u64, + /// packets sent via multicast + pub ifi_omcasts: u64, + /// dropped on input + pub ifi_iqdrops: u64, + /// dropped on output + pub ifi_oqdrops: u64, + /// destined for unsupported protocol + pub ifi_noproto: u64, + /// HW offload capabilities, see IFCAP + pub ifi_hwassist: u64, + /// uptime at attach or stat reset + pub __ifi_epoch: __c_anonymous_ifi_epoch, + /// time of last administrative change + pub __ifi_lastchange: __c_anonymous_ifi_lastchange, + } + + pub union __c_anonymous_ifr_ifru { + pub ifru_addr: crate::sockaddr, + pub ifru_dstaddr: crate::sockaddr, + pub ifru_broadaddr: crate::sockaddr, + pub ifru_buffer: ifreq_buffer, + pub ifru_flags: [c_short; 2], + pub ifru_index: c_short, + pub ifru_jid: c_int, + pub ifru_metric: c_int, + pub ifru_mtu: c_int, + pub ifru_phys: c_int, + pub ifru_media: c_int, + pub ifru_data: crate::caddr_t, + pub ifru_cap: [c_int; 2], + pub ifru_fib: c_uint, + pub ifru_vlan_pcp: c_uchar, + } + + pub struct ifreq { + /// if name, e.g. "en0" + pub ifr_name: [c_char; crate::IFNAMSIZ], + pub ifr_ifru: __c_anonymous_ifr_ifru, + } + + pub union __c_anonymous_ifc_ifcu { + pub ifcu_buf: crate::caddr_t, + pub ifcu_req: *mut ifreq, + } + + pub struct ifstat { + /// if name, e.g. "en0" + pub ifs_name: [c_char; crate::IFNAMSIZ as usize], + pub ascii: [c_char; crate::IFSTATMAX as usize + 1], + } + + pub struct ifrsskey { + /// if name, e.g. "en0" + pub ifrk_name: [c_char; crate::IFNAMSIZ as usize], + /// RSS_FUNC_ + pub ifrk_func: u8, + pub ifrk_spare0: u8, + pub ifrk_keylen: u16, + pub ifrk_key: [u8; crate::RSS_KEYLEN as usize], + } + + pub struct ifdownreason { + pub ifdr_name: [c_char; crate::IFNAMSIZ as usize], + pub ifdr_reason: u32, + pub ifdr_vendor: u32, + pub ifdr_msg: [c_char; crate::IFDR_MSG_SIZE as usize], + } + + #[repr(packed)] + pub struct sctphdr { + pub src_port: u16, + pub dest_port: u16, + pub v_tag: u32, + pub checksum: u32, + } + + #[repr(packed)] + pub struct sctp_chunkhdr { + pub chunk_type: u8, + pub chunk_flags: u8, + pub chunk_length: u16, + } + + #[repr(packed)] + pub struct sctp_paramhdr { + pub param_type: u16, + pub param_length: u16, + } + + #[repr(packed)] + pub struct sctp_gen_error_cause { + pub code: u16, + pub length: u16, + pub info: [u8; 0], + } + + #[repr(packed)] + pub struct sctp_error_cause { + pub code: u16, + pub length: u16, + } + + #[repr(packed)] + pub struct sctp_error_invalid_stream { + pub cause: sctp_error_cause, + pub stream_id: u16, + __reserved: u16, + } + + #[repr(packed)] + pub struct sctp_error_missing_param { + pub cause: sctp_error_cause, + pub num_missing_params: u32, + pub tpe: [u8; 0], + } + + #[repr(packed)] + pub struct sctp_error_stale_cookie { + pub cause: sctp_error_cause, + pub stale_time: u32, + } + + #[repr(packed)] + pub struct sctp_error_out_of_resource { + pub cause: sctp_error_cause, + } + + #[repr(packed)] + pub struct sctp_error_unresolv_addr { + pub cause: sctp_error_cause, + } + + #[repr(packed)] + pub struct sctp_error_unrecognized_chunk { + pub cause: sctp_error_cause, + pub ch: sctp_chunkhdr, + } + + #[repr(packed)] + pub struct sctp_error_no_user_data { + pub cause: sctp_error_cause, + pub tsn: u32, + } + + #[repr(packed)] + pub struct sctp_error_auth_invalid_hmac { + pub cause: sctp_error_cause, + pub hmac_id: u16, + } + + pub struct kinfo_file { + pub kf_structsize: c_int, + pub kf_type: c_int, + pub kf_fd: c_int, + pub kf_ref_count: c_int, + pub kf_flags: c_int, + _kf_pad0: c_int, + pub kf_offset: i64, + _priv: [u8; 304], // FIXME(freebsd): this is really a giant union + pub kf_status: u16, + _kf_pad1: u16, + _kf_ispare0: c_int, + pub kf_cap_rights: crate::cap_rights_t, + _kf_cap_spare: u64, + pub kf_path: [c_char; crate::PATH_MAX as usize], + } + + pub struct ucontext_t { + pub uc_sigmask: crate::sigset_t, + pub uc_mcontext: crate::mcontext_t, + pub uc_link: *mut crate::ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_flags: c_int, + __spare__: [c_int; 4], + } + + #[repr(align(8))] + pub struct xinpgen { + pub xig_len: ksize_t, + pub xig_count: u32, + _xig_spare32: u32, + pub xig_gen: inp_gen_t, + pub xig_sogen: so_gen_t, + _xig_spare64: [u64; 4], + } + + pub struct in_addr_4in6 { + _ia46_pad32: [u32; 3], + pub ia46_addr4: crate::in_addr, + } + + pub union in_dependaddr { + pub id46_addr: crate::in_addr_4in6, + pub id6_addr: crate::in6_addr, + } + + pub struct in_endpoints { + pub ie_fport: u16, + pub ie_lport: u16, + pub ie_dependfaddr: crate::in_dependaddr, + pub ie_dependladdr: crate::in_dependaddr, + pub ie6_zoneid: u32, + } + + pub struct in_conninfo { + pub inc_flags: u8, + pub inc_len: u8, + pub inc_fibnum: u16, + pub inc_ie: crate::in_endpoints, + } + + pub struct xktls_session_onedir { + // Note: this field is called `gen` in upstream FreeBSD, but `gen` is + // reserved keyword in Rust since the 2024 Edition, hence `gennum`. + pub gennum: u64, + _rsrv1: [u64; 8], + _rsrv2: [u32; 8], + pub iv: [u8; 32], + pub cipher_algorithm: i32, + pub auth_algorithm: i32, + pub cipher_key_len: u16, + pub iv_len: u16, + pub auth_key_len: u16, + pub max_frame_len: u16, + pub tls_vmajor: u8, + pub tls_vminor: u8, + pub tls_hlen: u8, + pub tls_tlen: u8, + pub tls_bs: u8, + pub flags: u8, + pub drv_st_len: u16, + pub ifnet: [c_char; 16], + } + + pub struct xktls_session { + pub tsz: u32, + pub fsz: u32, + pub inp_gencnt: u64, + pub so_pcb: kvaddr_t, + pub coninf: crate::in_conninfo, + pub rx_vlan_id: c_ushort, + pub rcv: crate::xktls_session_onedir, + pub snd: crate::xktls_session_onedir, + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for utmpx { + fn eq(&self, other: &utmpx) -> bool { + self.ut_type == other.ut_type + && self.ut_tv == other.ut_tv + && self.ut_id == other.ut_id + && self.ut_pid == other.ut_pid + && self.ut_user == other.ut_user + && self.ut_line == other.ut_line + && self + .ut_host + .iter() + .zip(other.ut_host.iter()) + .all(|(a, b)| a == b) + && self + .__ut_spare + .iter() + .zip(other.__ut_spare.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for utmpx {} + impl hash::Hash for utmpx { + fn hash(&self, state: &mut H) { + self.ut_type.hash(state); + self.ut_tv.hash(state); + self.ut_id.hash(state); + self.ut_pid.hash(state); + self.ut_user.hash(state); + self.ut_line.hash(state); + self.ut_host.hash(state); + self.__ut_spare.hash(state); + } + } + + impl PartialEq for __c_anonymous_cr_pid { + fn eq(&self, other: &__c_anonymous_cr_pid) -> bool { + unsafe { self.cr_pid == other.cr_pid } + } + } + impl Eq for __c_anonymous_cr_pid {} + impl hash::Hash for __c_anonymous_cr_pid { + fn hash(&self, state: &mut H) { + unsafe { self.cr_pid.hash(state) }; + } + } + + impl PartialEq for xucred { + fn eq(&self, other: &xucred) -> bool { + self.cr_version == other.cr_version + && self.cr_uid == other.cr_uid + && self.cr_ngroups == other.cr_ngroups + && self.cr_groups == other.cr_groups + && self.cr_pid__c_anonymous_union == other.cr_pid__c_anonymous_union + } + } + impl Eq for xucred {} + impl hash::Hash for xucred { + fn hash(&self, state: &mut H) { + self.cr_version.hash(state); + self.cr_uid.hash(state); + self.cr_ngroups.hash(state); + self.cr_groups.hash(state); + self.cr_pid__c_anonymous_union.hash(state); + } + } + + impl PartialEq for sockaddr_dl { + fn eq(&self, other: &sockaddr_dl) -> bool { + self.sdl_len == other.sdl_len + && self.sdl_family == other.sdl_family + && self.sdl_index == other.sdl_index + && self.sdl_type == other.sdl_type + && self.sdl_nlen == other.sdl_nlen + && self.sdl_alen == other.sdl_alen + && self.sdl_slen == other.sdl_slen + && self + .sdl_data + .iter() + .zip(other.sdl_data.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for sockaddr_dl {} + impl hash::Hash for sockaddr_dl { + fn hash(&self, state: &mut H) { + self.sdl_len.hash(state); + self.sdl_family.hash(state); + self.sdl_index.hash(state); + self.sdl_type.hash(state); + self.sdl_nlen.hash(state); + self.sdl_alen.hash(state); + self.sdl_slen.hash(state); + self.sdl_data.hash(state); + } + } + + impl PartialEq for mq_attr { + fn eq(&self, other: &mq_attr) -> bool { + self.mq_flags == other.mq_flags + && self.mq_maxmsg == other.mq_maxmsg + && self.mq_msgsize == other.mq_msgsize + && self.mq_curmsgs == other.mq_curmsgs + } + } + impl Eq for mq_attr {} + impl hash::Hash for mq_attr { + fn hash(&self, state: &mut H) { + self.mq_flags.hash(state); + self.mq_maxmsg.hash(state); + self.mq_msgsize.hash(state); + self.mq_curmsgs.hash(state); + } + } + + impl PartialEq for sigevent { + fn eq(&self, other: &sigevent) -> bool { + self.sigev_notify == other.sigev_notify + && self.sigev_signo == other.sigev_signo + && self.sigev_value == other.sigev_value + && self.sigev_notify_thread_id == other.sigev_notify_thread_id + } + } + impl Eq for sigevent {} + impl hash::Hash for sigevent { + fn hash(&self, state: &mut H) { + self.sigev_notify.hash(state); + self.sigev_signo.hash(state); + self.sigev_value.hash(state); + self.sigev_notify_thread_id.hash(state); + } + } + + impl PartialEq for ptsstat { + fn eq(&self, other: &ptsstat) -> bool { + let self_devname: &[c_char] = &self.devname; + let other_devname: &[c_char] = &other.devname; + + self.dev == other.dev && self_devname == other_devname + } + } + impl Eq for ptsstat {} + impl hash::Hash for ptsstat { + fn hash(&self, state: &mut H) { + let self_devname: &[c_char] = &self.devname; + + self.dev.hash(state); + self_devname.hash(state); + } + } + + impl PartialEq for __c_anonymous_elf32_auxv_union { + fn eq(&self, other: &__c_anonymous_elf32_auxv_union) -> bool { + unsafe { self.a_val == other.a_val } + } + } + impl Eq for __c_anonymous_elf32_auxv_union {} + impl PartialEq for Elf32_Auxinfo { + fn eq(&self, other: &Elf32_Auxinfo) -> bool { + self.a_type == other.a_type && self.a_un == other.a_un + } + } + impl Eq for Elf32_Auxinfo {} + + impl PartialEq for __c_anonymous_ifr_ifru { + fn eq(&self, other: &__c_anonymous_ifr_ifru) -> bool { + unsafe { + self.ifru_addr == other.ifru_addr + && self.ifru_dstaddr == other.ifru_dstaddr + && self.ifru_broadaddr == other.ifru_broadaddr + && self.ifru_buffer == other.ifru_buffer + && self.ifru_flags == other.ifru_flags + && self.ifru_index == other.ifru_index + && self.ifru_jid == other.ifru_jid + && self.ifru_metric == other.ifru_metric + && self.ifru_mtu == other.ifru_mtu + && self.ifru_phys == other.ifru_phys + && self.ifru_media == other.ifru_media + && self.ifru_data == other.ifru_data + && self.ifru_cap == other.ifru_cap + && self.ifru_fib == other.ifru_fib + && self.ifru_vlan_pcp == other.ifru_vlan_pcp + } + } + } + impl Eq for __c_anonymous_ifr_ifru {} + impl hash::Hash for __c_anonymous_ifr_ifru { + fn hash(&self, state: &mut H) { + unsafe { self.ifru_addr.hash(state) }; + unsafe { self.ifru_dstaddr.hash(state) }; + unsafe { self.ifru_broadaddr.hash(state) }; + unsafe { self.ifru_buffer.hash(state) }; + unsafe { self.ifru_flags.hash(state) }; + unsafe { self.ifru_index.hash(state) }; + unsafe { self.ifru_jid.hash(state) }; + unsafe { self.ifru_metric.hash(state) }; + unsafe { self.ifru_mtu.hash(state) }; + unsafe { self.ifru_phys.hash(state) }; + unsafe { self.ifru_media.hash(state) }; + unsafe { self.ifru_data.hash(state) }; + unsafe { self.ifru_cap.hash(state) }; + unsafe { self.ifru_fib.hash(state) }; + unsafe { self.ifru_vlan_pcp.hash(state) }; + } + } + + impl PartialEq for ifreq { + fn eq(&self, other: &ifreq) -> bool { + self.ifr_name == other.ifr_name && self.ifr_ifru == other.ifr_ifru + } + } + impl Eq for ifreq {} + impl hash::Hash for ifreq { + fn hash(&self, state: &mut H) { + self.ifr_name.hash(state); + self.ifr_ifru.hash(state); + } + } + + impl Eq for __c_anonymous_ifc_ifcu {} + + impl PartialEq for __c_anonymous_ifc_ifcu { + fn eq(&self, other: &__c_anonymous_ifc_ifcu) -> bool { + unsafe { self.ifcu_buf == other.ifcu_buf && self.ifcu_req == other.ifcu_req } + } + } + + impl hash::Hash for __c_anonymous_ifc_ifcu { + fn hash(&self, state: &mut H) { + unsafe { self.ifcu_buf.hash(state) }; + unsafe { self.ifcu_req.hash(state) }; + } + } + + impl PartialEq for ifstat { + fn eq(&self, other: &ifstat) -> bool { + let self_ascii: &[c_char] = &self.ascii; + let other_ascii: &[c_char] = &other.ascii; + + self.ifs_name == other.ifs_name && self_ascii == other_ascii + } + } + impl Eq for ifstat {} + impl hash::Hash for ifstat { + fn hash(&self, state: &mut H) { + self.ifs_name.hash(state); + self.ascii.hash(state); + } + } + + impl PartialEq for ifrsskey { + fn eq(&self, other: &ifrsskey) -> bool { + let self_ifrk_key: &[u8] = &self.ifrk_key; + let other_ifrk_key: &[u8] = &other.ifrk_key; + + self.ifrk_name == other.ifrk_name + && self.ifrk_func == other.ifrk_func + && self.ifrk_spare0 == other.ifrk_spare0 + && self.ifrk_keylen == other.ifrk_keylen + && self_ifrk_key == other_ifrk_key + } + } + impl Eq for ifrsskey {} + impl hash::Hash for ifrsskey { + fn hash(&self, state: &mut H) { + self.ifrk_name.hash(state); + self.ifrk_func.hash(state); + self.ifrk_spare0.hash(state); + self.ifrk_keylen.hash(state); + self.ifrk_key.hash(state); + } + } + + impl PartialEq for ifdownreason { + fn eq(&self, other: &ifdownreason) -> bool { + let self_ifdr_msg: &[c_char] = &self.ifdr_msg; + let other_ifdr_msg: &[c_char] = &other.ifdr_msg; + + self.ifdr_name == other.ifdr_name + && self.ifdr_reason == other.ifdr_reason + && self.ifdr_vendor == other.ifdr_vendor + && self_ifdr_msg == other_ifdr_msg + } + } + impl Eq for ifdownreason {} + impl hash::Hash for ifdownreason { + fn hash(&self, state: &mut H) { + self.ifdr_name.hash(state); + self.ifdr_reason.hash(state); + self.ifdr_vendor.hash(state); + self.ifdr_msg.hash(state); + } + } + + impl PartialEq for __c_anonymous_ifi_epoch { + fn eq(&self, other: &__c_anonymous_ifi_epoch) -> bool { + unsafe { self.tt == other.tt && self.ph == other.ph } + } + } + impl Eq for __c_anonymous_ifi_epoch {} + impl hash::Hash for __c_anonymous_ifi_epoch { + fn hash(&self, state: &mut H) { + unsafe { + self.tt.hash(state); + self.ph.hash(state); + } + } + } + + impl PartialEq for __c_anonymous_ifi_lastchange { + fn eq(&self, other: &__c_anonymous_ifi_lastchange) -> bool { + unsafe { self.tv == other.tv && self.ph == other.ph } + } + } + impl Eq for __c_anonymous_ifi_lastchange {} + impl hash::Hash for __c_anonymous_ifi_lastchange { + fn hash(&self, state: &mut H) { + unsafe { + self.tv.hash(state); + self.ph.hash(state); + } + } + } + + impl PartialEq for if_data { + fn eq(&self, other: &if_data) -> bool { + self.ifi_type == other.ifi_type + && self.ifi_physical == other.ifi_physical + && self.ifi_addrlen == other.ifi_addrlen + && self.ifi_hdrlen == other.ifi_hdrlen + && self.ifi_link_state == other.ifi_link_state + && self.ifi_vhid == other.ifi_vhid + && self.ifi_datalen == other.ifi_datalen + && self.ifi_mtu == other.ifi_mtu + && self.ifi_metric == other.ifi_metric + && self.ifi_baudrate == other.ifi_baudrate + && self.ifi_ipackets == other.ifi_ipackets + && self.ifi_ierrors == other.ifi_ierrors + && self.ifi_opackets == other.ifi_opackets + && self.ifi_oerrors == other.ifi_oerrors + && self.ifi_collisions == other.ifi_collisions + && self.ifi_ibytes == other.ifi_ibytes + && self.ifi_obytes == other.ifi_obytes + && self.ifi_imcasts == other.ifi_imcasts + && self.ifi_omcasts == other.ifi_omcasts + && self.ifi_iqdrops == other.ifi_iqdrops + && self.ifi_oqdrops == other.ifi_oqdrops + && self.ifi_noproto == other.ifi_noproto + && self.ifi_hwassist == other.ifi_hwassist + && self.__ifi_epoch == other.__ifi_epoch + && self.__ifi_lastchange == other.__ifi_lastchange + } + } + impl Eq for if_data {} + impl hash::Hash for if_data { + fn hash(&self, state: &mut H) { + self.ifi_type.hash(state); + self.ifi_physical.hash(state); + self.ifi_addrlen.hash(state); + self.ifi_hdrlen.hash(state); + self.ifi_link_state.hash(state); + self.ifi_vhid.hash(state); + self.ifi_datalen.hash(state); + self.ifi_mtu.hash(state); + self.ifi_metric.hash(state); + self.ifi_baudrate.hash(state); + self.ifi_ipackets.hash(state); + self.ifi_ierrors.hash(state); + self.ifi_opackets.hash(state); + self.ifi_oerrors.hash(state); + self.ifi_collisions.hash(state); + self.ifi_ibytes.hash(state); + self.ifi_obytes.hash(state); + self.ifi_imcasts.hash(state); + self.ifi_omcasts.hash(state); + self.ifi_iqdrops.hash(state); + self.ifi_oqdrops.hash(state); + self.ifi_noproto.hash(state); + self.ifi_hwassist.hash(state); + self.__ifi_epoch.hash(state); + self.__ifi_lastchange.hash(state); + } + } + + impl PartialEq for sctphdr { + fn eq(&self, other: &sctphdr) -> bool { + return { self.src_port } == { other.src_port } + && { self.dest_port } == { other.dest_port } + && { self.v_tag } == { other.v_tag } + && { self.checksum } == { other.checksum }; + } + } + impl Eq for sctphdr {} + impl hash::Hash for sctphdr { + fn hash(&self, state: &mut H) { + { self.src_port }.hash(state); + { self.dest_port }.hash(state); + { self.v_tag }.hash(state); + { self.checksum }.hash(state); + } + } + + impl PartialEq for sctp_chunkhdr { + fn eq(&self, other: &sctp_chunkhdr) -> bool { + return { self.chunk_type } == { other.chunk_type } + && { self.chunk_flags } == { other.chunk_flags } + && { self.chunk_length } == { other.chunk_length }; + } + } + impl Eq for sctp_chunkhdr {} + impl hash::Hash for sctp_chunkhdr { + fn hash(&self, state: &mut H) { + { self.chunk_type }.hash(state); + { self.chunk_flags }.hash(state); + { self.chunk_length }.hash(state); + } + } + + impl PartialEq for sctp_paramhdr { + fn eq(&self, other: &sctp_paramhdr) -> bool { + return { self.param_type } == { other.param_type } && { self.param_length } == { + other.param_length + }; + } + } + impl Eq for sctp_paramhdr {} + impl hash::Hash for sctp_paramhdr { + fn hash(&self, state: &mut H) { + { self.param_type }.hash(state); + { self.param_length }.hash(state); + } + } + + impl PartialEq for sctp_gen_error_cause { + fn eq(&self, other: &sctp_gen_error_cause) -> bool { + return { self.code } == { other.code } && { self.length } == { other.length } && { + self.info + } + .iter() + .zip({ other.info }.iter()) + .all(|(a, b)| a == b); + } + } + impl Eq for sctp_gen_error_cause {} + impl hash::Hash for sctp_gen_error_cause { + fn hash(&self, state: &mut H) { + { self.code }.hash(state); + { self.length }.hash(state); + { self.info }.hash(state); + } + } + + impl PartialEq for sctp_error_cause { + fn eq(&self, other: &sctp_error_cause) -> bool { + return { self.code } == { other.code } && { self.length } == { other.length }; + } + } + impl Eq for sctp_error_cause {} + impl hash::Hash for sctp_error_cause { + fn hash(&self, state: &mut H) { + { self.code }.hash(state); + { self.length }.hash(state); + } + } + + impl PartialEq for sctp_error_invalid_stream { + fn eq(&self, other: &sctp_error_invalid_stream) -> bool { + return { self.cause } == { other.cause } && { self.stream_id } == { + other.stream_id + }; + } + } + impl Eq for sctp_error_invalid_stream {} + impl hash::Hash for sctp_error_invalid_stream { + fn hash(&self, state: &mut H) { + { self.cause }.hash(state); + { self.stream_id }.hash(state); + } + } + + impl PartialEq for sctp_error_missing_param { + fn eq(&self, other: &sctp_error_missing_param) -> bool { + return { self.cause } == { other.cause } + && { self.num_missing_params } == { other.num_missing_params } + && { self.tpe } + .iter() + .zip({ other.tpe }.iter()) + .all(|(a, b)| a == b); + } + } + impl Eq for sctp_error_missing_param {} + impl hash::Hash for sctp_error_missing_param { + fn hash(&self, state: &mut H) { + { self.cause }.hash(state); + { self.num_missing_params }.hash(state); + { self.tpe }.hash(state); + } + } + + impl PartialEq for sctp_error_stale_cookie { + fn eq(&self, other: &sctp_error_stale_cookie) -> bool { + return { self.cause } == { other.cause } && { self.stale_time } == { + other.stale_time + }; + } + } + impl Eq for sctp_error_stale_cookie {} + impl hash::Hash for sctp_error_stale_cookie { + fn hash(&self, state: &mut H) { + { self.cause }.hash(state); + { self.stale_time }.hash(state); + } + } + + impl PartialEq for sctp_error_out_of_resource { + fn eq(&self, other: &sctp_error_out_of_resource) -> bool { + return { self.cause } == { other.cause }; + } + } + impl Eq for sctp_error_out_of_resource {} + impl hash::Hash for sctp_error_out_of_resource { + fn hash(&self, state: &mut H) { + { self.cause }.hash(state); + } + } + + impl PartialEq for sctp_error_unresolv_addr { + fn eq(&self, other: &sctp_error_unresolv_addr) -> bool { + return { self.cause } == { other.cause }; + } + } + impl Eq for sctp_error_unresolv_addr {} + impl hash::Hash for sctp_error_unresolv_addr { + fn hash(&self, state: &mut H) { + { self.cause }.hash(state); + } + } + + impl PartialEq for sctp_error_unrecognized_chunk { + fn eq(&self, other: &sctp_error_unrecognized_chunk) -> bool { + return { self.cause } == { other.cause } && { self.ch } == { other.ch }; + } + } + impl Eq for sctp_error_unrecognized_chunk {} + impl hash::Hash for sctp_error_unrecognized_chunk { + fn hash(&self, state: &mut H) { + { self.cause }.hash(state); + { self.ch }.hash(state); + } + } + + impl PartialEq for sctp_error_no_user_data { + fn eq(&self, other: &sctp_error_no_user_data) -> bool { + return { self.cause } == { other.cause } && { self.tsn } == { other.tsn }; + } + } + impl Eq for sctp_error_no_user_data {} + impl hash::Hash for sctp_error_no_user_data { + fn hash(&self, state: &mut H) { + { self.cause }.hash(state); + { self.tsn }.hash(state); + } + } + + impl PartialEq for sctp_error_auth_invalid_hmac { + fn eq(&self, other: &sctp_error_auth_invalid_hmac) -> bool { + return { self.cause } == { other.cause } && { self.hmac_id } == { other.hmac_id }; + } + } + impl Eq for sctp_error_auth_invalid_hmac {} + impl hash::Hash for sctp_error_auth_invalid_hmac { + fn hash(&self, state: &mut H) { + { self.cause }.hash(state); + { self.hmac_id }.hash(state); + } + } + + impl PartialEq for kinfo_file { + fn eq(&self, other: &kinfo_file) -> bool { + self.kf_structsize == other.kf_structsize + && self.kf_type == other.kf_type + && self.kf_fd == other.kf_fd + && self.kf_ref_count == other.kf_ref_count + && self.kf_flags == other.kf_flags + && self.kf_offset == other.kf_offset + && self.kf_status == other.kf_status + && self.kf_cap_rights == other.kf_cap_rights + && self + .kf_path + .iter() + .zip(other.kf_path.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for kinfo_file {} + impl hash::Hash for kinfo_file { + fn hash(&self, state: &mut H) { + self.kf_structsize.hash(state); + self.kf_type.hash(state); + self.kf_fd.hash(state); + self.kf_ref_count.hash(state); + self.kf_flags.hash(state); + self.kf_offset.hash(state); + self.kf_status.hash(state); + self.kf_cap_rights.hash(state); + self.kf_path.hash(state); + } + } + } +} + +#[derive(Debug)] +#[repr(u32)] +pub enum dot3Vendors { + dot3VendorAMD = 1, + dot3VendorIntel = 2, + dot3VendorNational = 4, + dot3VendorFujitsu = 5, + dot3VendorDigital = 6, + dot3VendorWesternDigital = 7, +} +impl Copy for dot3Vendors {} +impl Clone for dot3Vendors { + fn clone(&self) -> dot3Vendors { + *self + } +} + +// aio.h +pub const LIO_VECTORED: c_int = 4; +pub const LIO_WRITEV: c_int = 5; +pub const LIO_READV: c_int = 6; + +// sys/caprights.h +pub const CAP_RIGHTS_VERSION_00: i32 = 0; +pub const CAP_RIGHTS_VERSION: i32 = CAP_RIGHTS_VERSION_00; + +// sys/capsicum.h +macro_rules! cap_right { + ($idx:expr, $bit:expr) => { + ((1u64 << (57 + ($idx))) | ($bit)) + }; +} +pub const CAP_READ: u64 = cap_right!(0, 0x0000000000000001u64); +pub const CAP_WRITE: u64 = cap_right!(0, 0x0000000000000002u64); +pub const CAP_SEEK_TELL: u64 = cap_right!(0, 0x0000000000000004u64); +pub const CAP_SEEK: u64 = CAP_SEEK_TELL | 0x0000000000000008u64; +pub const CAP_PREAD: u64 = CAP_SEEK | CAP_READ; +pub const CAP_PWRITE: u64 = CAP_SEEK | CAP_WRITE; +pub const CAP_MMAP: u64 = cap_right!(0, 0x0000000000000010u64); +pub const CAP_MMAP_R: u64 = CAP_MMAP | CAP_SEEK | CAP_READ; +pub const CAP_MMAP_W: u64 = CAP_MMAP | CAP_SEEK | CAP_WRITE; +pub const CAP_MMAP_X: u64 = CAP_MMAP | CAP_SEEK | 0x0000000000000020u64; +pub const CAP_MMAP_RW: u64 = CAP_MMAP_R | CAP_MMAP_W; +pub const CAP_MMAP_RX: u64 = CAP_MMAP_R | CAP_MMAP_X; +pub const CAP_MMAP_WX: u64 = CAP_MMAP_W | CAP_MMAP_X; +pub const CAP_MMAP_RWX: u64 = CAP_MMAP_R | CAP_MMAP_W | CAP_MMAP_X; +pub const CAP_CREATE: u64 = cap_right!(0, 0x0000000000000040u64); +pub const CAP_FEXECVE: u64 = cap_right!(0, 0x0000000000000080u64); +pub const CAP_FSYNC: u64 = cap_right!(0, 0x0000000000000100u64); +pub const CAP_FTRUNCATE: u64 = cap_right!(0, 0x0000000000000200u64); +pub const CAP_LOOKUP: u64 = cap_right!(0, 0x0000000000000400u64); +pub const CAP_FCHDIR: u64 = cap_right!(0, 0x0000000000000800u64); +pub const CAP_FCHFLAGS: u64 = cap_right!(0, 0x0000000000001000u64); +pub const CAP_CHFLAGSAT: u64 = CAP_FCHFLAGS | CAP_LOOKUP; +pub const CAP_FCHMOD: u64 = cap_right!(0, 0x0000000000002000u64); +pub const CAP_FCHMODAT: u64 = CAP_FCHMOD | CAP_LOOKUP; +pub const CAP_FCHOWN: u64 = cap_right!(0, 0x0000000000004000u64); +pub const CAP_FCHOWNAT: u64 = CAP_FCHOWN | CAP_LOOKUP; +pub const CAP_FCNTL: u64 = cap_right!(0, 0x0000000000008000u64); +pub const CAP_FLOCK: u64 = cap_right!(0, 0x0000000000010000u64); +pub const CAP_FPATHCONF: u64 = cap_right!(0, 0x0000000000020000u64); +pub const CAP_FSCK: u64 = cap_right!(0, 0x0000000000040000u64); +pub const CAP_FSTAT: u64 = cap_right!(0, 0x0000000000080000u64); +pub const CAP_FSTATAT: u64 = CAP_FSTAT | CAP_LOOKUP; +pub const CAP_FSTATFS: u64 = cap_right!(0, 0x0000000000100000u64); +pub const CAP_FUTIMES: u64 = cap_right!(0, 0x0000000000200000u64); +pub const CAP_FUTIMESAT: u64 = CAP_FUTIMES | CAP_LOOKUP; +// Note: this was named CAP_LINKAT prior to FreeBSD 11.0. +pub const CAP_LINKAT_TARGET: u64 = CAP_LOOKUP | 0x0000000000400000u64; +pub const CAP_MKDIRAT: u64 = CAP_LOOKUP | 0x0000000000800000u64; +pub const CAP_MKFIFOAT: u64 = CAP_LOOKUP | 0x0000000001000000u64; +pub const CAP_MKNODAT: u64 = CAP_LOOKUP | 0x0000000002000000u64; +// Note: this was named CAP_RENAMEAT prior to FreeBSD 11.0. +pub const CAP_RENAMEAT_SOURCE: u64 = CAP_LOOKUP | 0x0000000004000000u64; +pub const CAP_SYMLINKAT: u64 = CAP_LOOKUP | 0x0000000008000000u64; +pub const CAP_UNLINKAT: u64 = CAP_LOOKUP | 0x0000000010000000u64; +pub const CAP_ACCEPT: u64 = cap_right!(0, 0x0000000020000000u64); +pub const CAP_BIND: u64 = cap_right!(0, 0x0000000040000000u64); +pub const CAP_CONNECT: u64 = cap_right!(0, 0x0000000080000000u64); +pub const CAP_GETPEERNAME: u64 = cap_right!(0, 0x0000000100000000u64); +pub const CAP_GETSOCKNAME: u64 = cap_right!(0, 0x0000000200000000u64); +pub const CAP_GETSOCKOPT: u64 = cap_right!(0, 0x0000000400000000u64); +pub const CAP_LISTEN: u64 = cap_right!(0, 0x0000000800000000u64); +pub const CAP_PEELOFF: u64 = cap_right!(0, 0x0000001000000000u64); +pub const CAP_RECV: u64 = CAP_READ; +pub const CAP_SEND: u64 = CAP_WRITE; +pub const CAP_SETSOCKOPT: u64 = cap_right!(0, 0x0000002000000000u64); +pub const CAP_SHUTDOWN: u64 = cap_right!(0, 0x0000004000000000u64); +pub const CAP_BINDAT: u64 = CAP_LOOKUP | 0x0000008000000000u64; +pub const CAP_CONNECTAT: u64 = CAP_LOOKUP | 0x0000010000000000u64; +pub const CAP_LINKAT_SOURCE: u64 = CAP_LOOKUP | 0x0000020000000000u64; +pub const CAP_RENAMEAT_TARGET: u64 = CAP_LOOKUP | 0x0000040000000000u64; +pub const CAP_SOCK_CLIENT: u64 = CAP_CONNECT + | CAP_GETPEERNAME + | CAP_GETSOCKNAME + | CAP_GETSOCKOPT + | CAP_PEELOFF + | CAP_RECV + | CAP_SEND + | CAP_SETSOCKOPT + | CAP_SHUTDOWN; +pub const CAP_SOCK_SERVER: u64 = CAP_ACCEPT + | CAP_BIND + | CAP_GETPEERNAME + | CAP_GETSOCKNAME + | CAP_GETSOCKOPT + | CAP_LISTEN + | CAP_PEELOFF + | CAP_RECV + | CAP_SEND + | CAP_SETSOCKOPT + | CAP_SHUTDOWN; +#[deprecated(since = "0.2.165", note = "Not stable across OS versions")] +pub const CAP_ALL0: u64 = cap_right!(0, 0x000007FFFFFFFFFFu64); +#[deprecated(since = "0.2.165", note = "Not stable across OS versions")] +pub const CAP_UNUSED0_44: u64 = cap_right!(0, 0x0000080000000000u64); +#[deprecated(since = "0.2.165", note = "Not stable across OS versions")] +pub const CAP_UNUSED0_57: u64 = cap_right!(0, 0x0100000000000000u64); +pub const CAP_MAC_GET: u64 = cap_right!(1, 0x0000000000000001u64); +pub const CAP_MAC_SET: u64 = cap_right!(1, 0x0000000000000002u64); +pub const CAP_SEM_GETVALUE: u64 = cap_right!(1, 0x0000000000000004u64); +pub const CAP_SEM_POST: u64 = cap_right!(1, 0x0000000000000008u64); +pub const CAP_SEM_WAIT: u64 = cap_right!(1, 0x0000000000000010u64); +pub const CAP_EVENT: u64 = cap_right!(1, 0x0000000000000020u64); +pub const CAP_KQUEUE_EVENT: u64 = cap_right!(1, 0x0000000000000040u64); +pub const CAP_IOCTL: u64 = cap_right!(1, 0x0000000000000080u64); +pub const CAP_TTYHOOK: u64 = cap_right!(1, 0x0000000000000100u64); +pub const CAP_PDGETPID: u64 = cap_right!(1, 0x0000000000000200u64); +pub const CAP_PDWAIT: u64 = cap_right!(1, 0x0000000000000400u64); +pub const CAP_PDKILL: u64 = cap_right!(1, 0x0000000000000800u64); +pub const CAP_EXTATTR_DELETE: u64 = cap_right!(1, 0x0000000000001000u64); +pub const CAP_EXTATTR_GET: u64 = cap_right!(1, 0x0000000000002000u64); +pub const CAP_EXTATTR_LIST: u64 = cap_right!(1, 0x0000000000004000u64); +pub const CAP_EXTATTR_SET: u64 = cap_right!(1, 0x0000000000008000u64); +pub const CAP_ACL_CHECK: u64 = cap_right!(1, 0x0000000000010000u64); +pub const CAP_ACL_DELETE: u64 = cap_right!(1, 0x0000000000020000u64); +pub const CAP_ACL_GET: u64 = cap_right!(1, 0x0000000000040000u64); +pub const CAP_ACL_SET: u64 = cap_right!(1, 0x0000000000080000u64); +pub const CAP_KQUEUE_CHANGE: u64 = cap_right!(1, 0x0000000000100000u64); +pub const CAP_KQUEUE: u64 = CAP_KQUEUE_EVENT | CAP_KQUEUE_CHANGE; +#[deprecated(since = "0.2.165", note = "Not stable across OS versions")] +pub const CAP_ALL1: u64 = cap_right!(1, 0x00000000001FFFFFu64); +#[deprecated(since = "0.2.165", note = "Not stable across OS versions")] +pub const CAP_UNUSED1_22: u64 = cap_right!(1, 0x0000000000200000u64); +#[deprecated(since = "0.2.165", note = "Not stable across OS versions")] +pub const CAP_UNUSED1_57: u64 = cap_right!(1, 0x0100000000000000u64); +pub const CAP_FCNTL_GETFL: u32 = 1 << 3; +pub const CAP_FCNTL_SETFL: u32 = 1 << 4; +pub const CAP_FCNTL_GETOWN: u32 = 1 << 5; +pub const CAP_FCNTL_SETOWN: u32 = 1 << 6; + +// sys/devicestat.h +pub const DEVSTAT_N_TRANS_FLAGS: c_int = 4; +pub const DEVSTAT_NAME_LEN: c_int = 16; + +// sys/cpuset.h +cfg_if! { + if #[cfg(any(freebsd15, freebsd14))] { + pub const CPU_SETSIZE: c_int = 1024; + } else { + pub const CPU_SETSIZE: c_int = 256; + } +} + +pub const SIGEV_THREAD_ID: c_int = 4; + +pub const EXTATTR_NAMESPACE_EMPTY: c_int = 0; +pub const EXTATTR_NAMESPACE_USER: c_int = 1; +pub const EXTATTR_NAMESPACE_SYSTEM: c_int = 2; + +pub const PTHREAD_STACK_MIN: size_t = MINSIGSTKSZ; +pub const PTHREAD_MUTEX_ADAPTIVE_NP: c_int = 4; +pub const PTHREAD_MUTEX_STALLED: c_int = 0; +pub const PTHREAD_MUTEX_ROBUST: c_int = 1; +pub const SIGSTKSZ: size_t = MINSIGSTKSZ + 32768; +pub const SF_NODISKIO: c_int = 0x00000001; +pub const SF_MNOWAIT: c_int = 0x00000002; +pub const SF_SYNC: c_int = 0x00000004; +pub const SF_USER_READAHEAD: c_int = 0x00000008; +pub const SF_NOCACHE: c_int = 0x00000010; +pub const O_CLOEXEC: c_int = 0x00100000; +pub const O_DIRECTORY: c_int = 0x00020000; +pub const O_DSYNC: c_int = 0x01000000; +pub const O_EMPTY_PATH: c_int = 0x02000000; +pub const O_EXEC: c_int = 0x00040000; +pub const O_PATH: c_int = 0x00400000; +pub const O_RESOLVE_BENEATH: c_int = 0x00800000; +pub const O_SEARCH: c_int = O_EXEC; +pub const O_TTY_INIT: c_int = 0x00080000; +pub const O_VERIFY: c_int = 0x00200000; +pub const F_GETLK: c_int = 11; +pub const F_SETLK: c_int = 12; +pub const F_SETLKW: c_int = 13; +pub const ENOTCAPABLE: c_int = 93; +pub const ECAPMODE: c_int = 94; +pub const ENOTRECOVERABLE: c_int = 95; +pub const EOWNERDEAD: c_int = 96; +pub const EINTEGRITY: c_int = 97; +pub const RLIMIT_NPTS: c_int = 11; +pub const RLIMIT_SWAP: c_int = 12; +pub const RLIMIT_KQUEUES: c_int = 13; +pub const RLIMIT_UMTXP: c_int = 14; +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIM_NLIMITS: crate::rlim_t = 15; +pub const RLIM_SAVED_MAX: crate::rlim_t = crate::RLIM_INFINITY; +pub const RLIM_SAVED_CUR: crate::rlim_t = crate::RLIM_INFINITY; + +pub const CP_USER: c_int = 0; +pub const CP_NICE: c_int = 1; +pub const CP_SYS: c_int = 2; +pub const CP_INTR: c_int = 3; +pub const CP_IDLE: c_int = 4; +pub const CPUSTATES: c_int = 5; + +pub const NI_NOFQDN: c_int = 0x00000001; +pub const NI_NUMERICHOST: c_int = 0x00000002; +pub const NI_NAMEREQD: c_int = 0x00000004; +pub const NI_NUMERICSERV: c_int = 0x00000008; +pub const NI_DGRAM: c_int = 0x00000010; +pub const NI_NUMERICSCOPE: c_int = 0x00000020; + +pub const XU_NGROUPS: c_int = 16; + +pub const Q_GETQUOTA: c_int = 0x700; +pub const Q_SETQUOTA: c_int = 0x800; + +pub const MAP_GUARD: c_int = 0x00002000; +pub const MAP_EXCL: c_int = 0x00004000; +pub const MAP_PREFAULT_READ: c_int = 0x00040000; +pub const MAP_ALIGNMENT_SHIFT: c_int = 24; +pub const MAP_ALIGNMENT_MASK: c_int = 0xff << MAP_ALIGNMENT_SHIFT; +pub const MAP_ALIGNED_SUPER: c_int = 1 << MAP_ALIGNMENT_SHIFT; + +pub const POSIX_FADV_NORMAL: c_int = 0; +pub const POSIX_FADV_RANDOM: c_int = 1; +pub const POSIX_FADV_SEQUENTIAL: c_int = 2; +pub const POSIX_FADV_WILLNEED: c_int = 3; +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; + +pub const POLLINIGNEOF: c_short = 0x2000; +pub const POLLRDHUP: c_short = 0x4000; + +pub const EVFILT_READ: i16 = -1; +pub const EVFILT_WRITE: i16 = -2; +pub const EVFILT_AIO: i16 = -3; +pub const EVFILT_VNODE: i16 = -4; +pub const EVFILT_PROC: i16 = -5; +pub const EVFILT_SIGNAL: i16 = -6; +pub const EVFILT_TIMER: i16 = -7; +pub const EVFILT_PROCDESC: i16 = -8; +pub const EVFILT_FS: i16 = -9; +pub const EVFILT_LIO: i16 = -10; +pub const EVFILT_USER: i16 = -11; +pub const EVFILT_SENDFILE: i16 = -12; +pub const EVFILT_EMPTY: i16 = -13; + +pub const EV_ADD: u16 = 0x1; +pub const EV_DELETE: u16 = 0x2; +pub const EV_ENABLE: u16 = 0x4; +pub const EV_DISABLE: u16 = 0x8; +pub const EV_FORCEONESHOT: u16 = 0x100; +pub const EV_KEEPUDATA: u16 = 0x200; + +pub const EV_ONESHOT: u16 = 0x10; +pub const EV_CLEAR: u16 = 0x20; +pub const EV_RECEIPT: u16 = 0x40; +pub const EV_DISPATCH: u16 = 0x80; +pub const EV_SYSFLAGS: u16 = 0xf000; +pub const EV_DROP: u16 = 0x1000; +pub const EV_FLAG1: u16 = 0x2000; +pub const EV_FLAG2: u16 = 0x4000; + +pub const EV_EOF: u16 = 0x8000; +pub const EV_ERROR: u16 = 0x4000; + +pub const NOTE_TRIGGER: u32 = 0x01000000; +pub const NOTE_FFNOP: u32 = 0x00000000; +pub const NOTE_FFAND: u32 = 0x40000000; +pub const NOTE_FFOR: u32 = 0x80000000; +pub const NOTE_FFCOPY: u32 = 0xc0000000; +pub const NOTE_FFCTRLMASK: u32 = 0xc0000000; +pub const NOTE_FFLAGSMASK: u32 = 0x00ffffff; +pub const NOTE_LOWAT: u32 = 0x00000001; +pub const NOTE_FILE_POLL: u32 = 0x00000002; +pub const NOTE_DELETE: u32 = 0x00000001; +pub const NOTE_WRITE: u32 = 0x00000002; +pub const NOTE_EXTEND: u32 = 0x00000004; +pub const NOTE_ATTRIB: u32 = 0x00000008; +pub const NOTE_LINK: u32 = 0x00000010; +pub const NOTE_RENAME: u32 = 0x00000020; +pub const NOTE_REVOKE: u32 = 0x00000040; +pub const NOTE_OPEN: u32 = 0x00000080; +pub const NOTE_CLOSE: u32 = 0x00000100; +pub const NOTE_CLOSE_WRITE: u32 = 0x00000200; +pub const NOTE_READ: u32 = 0x00000400; +pub const NOTE_EXIT: u32 = 0x80000000; +pub const NOTE_FORK: u32 = 0x40000000; +pub const NOTE_EXEC: u32 = 0x20000000; +pub const NOTE_PDATAMASK: u32 = 0x000fffff; +pub const NOTE_PCTRLMASK: u32 = 0xf0000000; +pub const NOTE_TRACK: u32 = 0x00000001; +pub const NOTE_TRACKERR: u32 = 0x00000002; +pub const NOTE_CHILD: u32 = 0x00000004; +pub const NOTE_SECONDS: u32 = 0x00000001; +pub const NOTE_MSECONDS: u32 = 0x00000002; +pub const NOTE_USECONDS: u32 = 0x00000004; +pub const NOTE_NSECONDS: u32 = 0x00000008; +pub const NOTE_ABSTIME: u32 = 0x00000010; + +pub const MADV_PROTECT: c_int = 10; + +#[doc(hidden)] +#[deprecated( + since = "0.2.72", + note = "CTL_UNSPEC is deprecated. Use CTL_SYSCTL instead" +)] +pub const CTL_UNSPEC: c_int = 0; +pub const CTL_SYSCTL: c_int = 0; +pub const CTL_KERN: c_int = 1; +pub const CTL_VM: c_int = 2; +pub const CTL_VFS: c_int = 3; +pub const CTL_NET: c_int = 4; +pub const CTL_DEBUG: c_int = 5; +pub const CTL_HW: c_int = 6; +pub const CTL_MACHDEP: c_int = 7; +pub const CTL_USER: c_int = 8; +pub const CTL_P1003_1B: c_int = 9; + +// sys/sysctl.h +pub const CTL_MAXNAME: c_int = 24; + +pub const CTLTYPE: c_int = 0xf; +pub const CTLTYPE_NODE: c_int = 1; +pub const CTLTYPE_INT: c_int = 2; +pub const CTLTYPE_STRING: c_int = 3; +pub const CTLTYPE_S64: c_int = 4; +pub const CTLTYPE_OPAQUE: c_int = 5; +pub const CTLTYPE_STRUCT: c_int = CTLTYPE_OPAQUE; +pub const CTLTYPE_UINT: c_int = 6; +pub const CTLTYPE_LONG: c_int = 7; +pub const CTLTYPE_ULONG: c_int = 8; +pub const CTLTYPE_U64: c_int = 9; +pub const CTLTYPE_U8: c_int = 0xa; +pub const CTLTYPE_U16: c_int = 0xb; +pub const CTLTYPE_S8: c_int = 0xc; +pub const CTLTYPE_S16: c_int = 0xd; +pub const CTLTYPE_S32: c_int = 0xe; +pub const CTLTYPE_U32: c_int = 0xf; + +pub const CTLFLAG_RD: c_int = 0x80000000; +pub const CTLFLAG_WR: c_int = 0x40000000; +pub const CTLFLAG_RW: c_int = CTLFLAG_RD | CTLFLAG_WR; +pub const CTLFLAG_DORMANT: c_int = 0x20000000; +pub const CTLFLAG_ANYBODY: c_int = 0x10000000; +pub const CTLFLAG_SECURE: c_int = 0x08000000; +pub const CTLFLAG_PRISON: c_int = 0x04000000; +pub const CTLFLAG_DYN: c_int = 0x02000000; +pub const CTLFLAG_SKIP: c_int = 0x01000000; +pub const CTLMASK_SECURE: c_int = 0x00F00000; +pub const CTLFLAG_TUN: c_int = 0x00080000; +pub const CTLFLAG_RDTUN: c_int = CTLFLAG_RD | CTLFLAG_TUN; +pub const CTLFLAG_RWTUN: c_int = CTLFLAG_RW | CTLFLAG_TUN; +pub const CTLFLAG_MPSAFE: c_int = 0x00040000; +pub const CTLFLAG_VNET: c_int = 0x00020000; +pub const CTLFLAG_DYING: c_int = 0x00010000; +pub const CTLFLAG_CAPRD: c_int = 0x00008000; +pub const CTLFLAG_CAPWR: c_int = 0x00004000; +pub const CTLFLAG_STATS: c_int = 0x00002000; +pub const CTLFLAG_NOFETCH: c_int = 0x00001000; +pub const CTLFLAG_CAPRW: c_int = CTLFLAG_CAPRD | CTLFLAG_CAPWR; +pub const CTLFLAG_NEEDGIANT: c_int = 0x00000800; + +pub const CTLSHIFT_SECURE: c_int = 20; +pub const CTLFLAG_SECURE1: c_int = CTLFLAG_SECURE | (0 << CTLSHIFT_SECURE); +pub const CTLFLAG_SECURE2: c_int = CTLFLAG_SECURE | (1 << CTLSHIFT_SECURE); +pub const CTLFLAG_SECURE3: c_int = CTLFLAG_SECURE | (2 << CTLSHIFT_SECURE); + +pub const OID_AUTO: c_int = -1; + +pub const CTL_SYSCTL_DEBUG: c_int = 0; +pub const CTL_SYSCTL_NAME: c_int = 1; +pub const CTL_SYSCTL_NEXT: c_int = 2; +pub const CTL_SYSCTL_NAME2OID: c_int = 3; +pub const CTL_SYSCTL_OIDFMT: c_int = 4; +pub const CTL_SYSCTL_OIDDESCR: c_int = 5; +pub const CTL_SYSCTL_OIDLABEL: c_int = 6; +pub const CTL_SYSCTL_NEXTNOSKIP: c_int = 7; + +pub const KERN_OSTYPE: c_int = 1; +pub const KERN_OSRELEASE: c_int = 2; +pub const KERN_OSREV: c_int = 3; +pub const KERN_VERSION: c_int = 4; +pub const KERN_MAXVNODES: c_int = 5; +pub const KERN_MAXPROC: c_int = 6; +pub const KERN_MAXFILES: c_int = 7; +pub const KERN_ARGMAX: c_int = 8; +pub const KERN_SECURELVL: c_int = 9; +pub const KERN_HOSTNAME: c_int = 10; +pub const KERN_HOSTID: c_int = 11; +pub const KERN_CLOCKRATE: c_int = 12; +pub const KERN_VNODE: c_int = 13; +pub const KERN_PROC: c_int = 14; +pub const KERN_FILE: c_int = 15; +pub const KERN_PROF: c_int = 16; +pub const KERN_POSIX1: c_int = 17; +pub const KERN_NGROUPS: c_int = 18; +pub const KERN_JOB_CONTROL: c_int = 19; +pub const KERN_SAVED_IDS: c_int = 20; +pub const KERN_BOOTTIME: c_int = 21; +pub const KERN_NISDOMAINNAME: c_int = 22; +pub const KERN_UPDATEINTERVAL: c_int = 23; +pub const KERN_OSRELDATE: c_int = 24; +pub const KERN_NTP_PLL: c_int = 25; +pub const KERN_BOOTFILE: c_int = 26; +pub const KERN_MAXFILESPERPROC: c_int = 27; +pub const KERN_MAXPROCPERUID: c_int = 28; +pub const KERN_DUMPDEV: c_int = 29; +pub const KERN_IPC: c_int = 30; +pub const KERN_DUMMY: c_int = 31; +pub const KERN_PS_STRINGS: c_int = 32; +pub const KERN_USRSTACK: c_int = 33; +pub const KERN_LOGSIGEXIT: c_int = 34; +pub const KERN_IOV_MAX: c_int = 35; +pub const KERN_HOSTUUID: c_int = 36; +pub const KERN_ARND: c_int = 37; +pub const KERN_MAXPHYS: c_int = 38; + +pub const KERN_PROC_ALL: c_int = 0; +pub const KERN_PROC_PID: c_int = 1; +pub const KERN_PROC_PGRP: c_int = 2; +pub const KERN_PROC_SESSION: c_int = 3; +pub const KERN_PROC_TTY: c_int = 4; +pub const KERN_PROC_UID: c_int = 5; +pub const KERN_PROC_RUID: c_int = 6; +pub const KERN_PROC_ARGS: c_int = 7; +pub const KERN_PROC_PROC: c_int = 8; +pub const KERN_PROC_SV_NAME: c_int = 9; +pub const KERN_PROC_RGID: c_int = 10; +pub const KERN_PROC_GID: c_int = 11; +pub const KERN_PROC_PATHNAME: c_int = 12; +pub const KERN_PROC_OVMMAP: c_int = 13; +pub const KERN_PROC_OFILEDESC: c_int = 14; +pub const KERN_PROC_KSTACK: c_int = 15; +pub const KERN_PROC_INC_THREAD: c_int = 0x10; +pub const KERN_PROC_VMMAP: c_int = 32; +pub const KERN_PROC_FILEDESC: c_int = 33; +pub const KERN_PROC_GROUPS: c_int = 34; +pub const KERN_PROC_ENV: c_int = 35; +pub const KERN_PROC_AUXV: c_int = 36; +pub const KERN_PROC_RLIMIT: c_int = 37; +pub const KERN_PROC_PS_STRINGS: c_int = 38; +pub const KERN_PROC_UMASK: c_int = 39; +pub const KERN_PROC_OSREL: c_int = 40; +pub const KERN_PROC_SIGTRAMP: c_int = 41; +pub const KERN_PROC_CWD: c_int = 42; +pub const KERN_PROC_NFDS: c_int = 43; +pub const KERN_PROC_SIGFASTBLK: c_int = 44; + +pub const KIPC_MAXSOCKBUF: c_int = 1; +pub const KIPC_SOCKBUF_WASTE: c_int = 2; +pub const KIPC_SOMAXCONN: c_int = 3; +pub const KIPC_MAX_LINKHDR: c_int = 4; +pub const KIPC_MAX_PROTOHDR: c_int = 5; +pub const KIPC_MAX_HDR: c_int = 6; +pub const KIPC_MAX_DATALEN: c_int = 7; + +pub const HW_MACHINE: c_int = 1; +pub const HW_MODEL: c_int = 2; +pub const HW_NCPU: c_int = 3; +pub const HW_BYTEORDER: c_int = 4; +pub const HW_PHYSMEM: c_int = 5; +pub const HW_USERMEM: c_int = 6; +pub const HW_PAGESIZE: c_int = 7; +pub const HW_DISKNAMES: c_int = 8; +pub const HW_DISKSTATS: c_int = 9; +pub const HW_FLOATINGPT: c_int = 10; +pub const HW_MACHINE_ARCH: c_int = 11; +pub const HW_REALMEM: c_int = 12; + +pub const USER_CS_PATH: c_int = 1; +pub const USER_BC_BASE_MAX: c_int = 2; +pub const USER_BC_DIM_MAX: c_int = 3; +pub const USER_BC_SCALE_MAX: c_int = 4; +pub const USER_BC_STRING_MAX: c_int = 5; +pub const USER_COLL_WEIGHTS_MAX: c_int = 6; +pub const USER_EXPR_NEST_MAX: c_int = 7; +pub const USER_LINE_MAX: c_int = 8; +pub const USER_RE_DUP_MAX: c_int = 9; +pub const USER_POSIX2_VERSION: c_int = 10; +pub const USER_POSIX2_C_BIND: c_int = 11; +pub const USER_POSIX2_C_DEV: c_int = 12; +pub const USER_POSIX2_CHAR_TERM: c_int = 13; +pub const USER_POSIX2_FORT_DEV: c_int = 14; +pub const USER_POSIX2_FORT_RUN: c_int = 15; +pub const USER_POSIX2_LOCALEDEF: c_int = 16; +pub const USER_POSIX2_SW_DEV: c_int = 17; +pub const USER_POSIX2_UPE: c_int = 18; +pub const USER_STREAM_MAX: c_int = 19; +pub const USER_TZNAME_MAX: c_int = 20; +pub const USER_LOCALBASE: c_int = 21; + +pub const CTL_P1003_1B_ASYNCHRONOUS_IO: c_int = 1; +pub const CTL_P1003_1B_MAPPED_FILES: c_int = 2; +pub const CTL_P1003_1B_MEMLOCK: c_int = 3; +pub const CTL_P1003_1B_MEMLOCK_RANGE: c_int = 4; +pub const CTL_P1003_1B_MEMORY_PROTECTION: c_int = 5; +pub const CTL_P1003_1B_MESSAGE_PASSING: c_int = 6; +pub const CTL_P1003_1B_PRIORITIZED_IO: c_int = 7; +pub const CTL_P1003_1B_PRIORITY_SCHEDULING: c_int = 8; +pub const CTL_P1003_1B_REALTIME_SIGNALS: c_int = 9; +pub const CTL_P1003_1B_SEMAPHORES: c_int = 10; +pub const CTL_P1003_1B_FSYNC: c_int = 11; +pub const CTL_P1003_1B_SHARED_MEMORY_OBJECTS: c_int = 12; +pub const CTL_P1003_1B_SYNCHRONIZED_IO: c_int = 13; +pub const CTL_P1003_1B_TIMERS: c_int = 14; +pub const CTL_P1003_1B_AIO_LISTIO_MAX: c_int = 15; +pub const CTL_P1003_1B_AIO_MAX: c_int = 16; +pub const CTL_P1003_1B_AIO_PRIO_DELTA_MAX: c_int = 17; +pub const CTL_P1003_1B_DELAYTIMER_MAX: c_int = 18; +pub const CTL_P1003_1B_MQ_OPEN_MAX: c_int = 19; +pub const CTL_P1003_1B_PAGESIZE: c_int = 20; +pub const CTL_P1003_1B_RTSIG_MAX: c_int = 21; +pub const CTL_P1003_1B_SEM_NSEMS_MAX: c_int = 22; +pub const CTL_P1003_1B_SEM_VALUE_MAX: c_int = 23; +pub const CTL_P1003_1B_SIGQUEUE_MAX: c_int = 24; +pub const CTL_P1003_1B_TIMER_MAX: c_int = 25; + +pub const TIOCGPTN: c_ulong = 0x4004740f; +pub const TIOCPTMASTER: c_ulong = 0x2000741c; +pub const TIOCSIG: c_ulong = 0x2004745f; +pub const TIOCM_DCD: c_int = 0x40; +pub const H4DISC: c_int = 0x7; + +pub const VM_TOTAL: c_int = 1; + +cfg_if! { + if #[cfg(target_pointer_width = "64")] { + pub const BIOCSETFNR: c_ulong = 0x80104282; + } else { + pub const BIOCSETFNR: c_ulong = 0x80084282; + } +} + +cfg_if! { + if #[cfg(target_pointer_width = "64")] { + pub const FIODGNAME: c_ulong = 0x80106678; + } else { + pub const FIODGNAME: c_ulong = 0x80086678; + } +} + +pub const FIONWRITE: c_ulong = 0x40046677; +pub const FIONSPACE: c_ulong = 0x40046676; +pub const FIOSEEKDATA: c_ulong = 0xc0086661; +pub const FIOSEEKHOLE: c_ulong = 0xc0086662; +pub const FIOSSHMLPGCNF: c_ulong = 0x80306664; + +pub const JAIL_API_VERSION: u32 = 2; +pub const JAIL_CREATE: c_int = 0x01; +pub const JAIL_UPDATE: c_int = 0x02; +pub const JAIL_ATTACH: c_int = 0x04; +pub const JAIL_DYING: c_int = 0x08; +pub const JAIL_SYS_DISABLE: c_int = 0; +pub const JAIL_SYS_NEW: c_int = 1; +pub const JAIL_SYS_INHERIT: c_int = 2; + +pub const MNT_ACLS: c_int = 0x08000000; +pub const MNT_BYFSID: c_int = 0x08000000; +pub const MNT_GJOURNAL: c_int = 0x02000000; +pub const MNT_MULTILABEL: c_int = 0x04000000; +pub const MNT_NFS4ACLS: c_int = 0x00000010; +pub const MNT_SNAPSHOT: c_int = 0x01000000; +pub const MNT_UNION: c_int = 0x00000020; +pub const MNT_NONBUSY: c_int = 0x04000000; + +pub const SCM_BINTIME: c_int = 0x04; +pub const SCM_REALTIME: c_int = 0x05; +pub const SCM_MONOTONIC: c_int = 0x06; +pub const SCM_TIME_INFO: c_int = 0x07; +pub const SCM_CREDS2: c_int = 0x08; + +pub const SO_BINTIME: c_int = 0x2000; +pub const SO_NO_OFFLOAD: c_int = 0x4000; +pub const SO_NO_DDP: c_int = 0x8000; +pub const SO_REUSEPORT_LB: c_int = 0x10000; +pub const SO_LABEL: c_int = 0x1009; +pub const SO_PEERLABEL: c_int = 0x1010; +pub const SO_LISTENQLIMIT: c_int = 0x1011; +pub const SO_LISTENQLEN: c_int = 0x1012; +pub const SO_LISTENINCQLEN: c_int = 0x1013; +pub const SO_SETFIB: c_int = 0x1014; +pub const SO_USER_COOKIE: c_int = 0x1015; +pub const SO_PROTOCOL: c_int = 0x1016; +pub const SO_PROTOTYPE: c_int = SO_PROTOCOL; +pub const SO_TS_CLOCK: c_int = 0x1017; +pub const SO_DOMAIN: c_int = 0x1019; +pub const SO_SPLICE: c_int = 0x1023; +pub const SO_VENDOR: c_int = 0x80000000; + +pub const SO_TS_REALTIME_MICRO: c_int = 0; +pub const SO_TS_BINTIME: c_int = 1; +pub const SO_TS_REALTIME: c_int = 2; +pub const SO_TS_MONOTONIC: c_int = 3; +pub const SO_TS_DEFAULT: c_int = SO_TS_REALTIME_MICRO; +pub const SO_TS_CLOCK_MAX: c_int = SO_TS_MONOTONIC; + +pub const LOCAL_CREDS: c_int = 2; +pub const LOCAL_CREDS_PERSISTENT: c_int = 3; +pub const LOCAL_CONNWAIT: c_int = 4; +pub const LOCAL_VENDOR: c_int = SO_VENDOR; + +pub const PL_EVENT_NONE: c_int = 0; +pub const PL_EVENT_SIGNAL: c_int = 1; +pub const PL_FLAG_SA: c_int = 0x01; +pub const PL_FLAG_BOUND: c_int = 0x02; +pub const PL_FLAG_SCE: c_int = 0x04; +pub const PL_FLAG_SCX: c_int = 0x08; +pub const PL_FLAG_EXEC: c_int = 0x10; +pub const PL_FLAG_SI: c_int = 0x20; +pub const PL_FLAG_FORKED: c_int = 0x40; +pub const PL_FLAG_CHILD: c_int = 0x80; +pub const PL_FLAG_BORN: c_int = 0x100; +pub const PL_FLAG_EXITED: c_int = 0x200; +pub const PL_FLAG_VFORKED: c_int = 0x400; +pub const PL_FLAG_VFORK_DONE: c_int = 0x800; + +pub const PT_LWPINFO: c_int = 13; +pub const PT_GETNUMLWPS: c_int = 14; +pub const PT_GETLWPLIST: c_int = 15; +pub const PT_CLEARSTEP: c_int = 16; +pub const PT_SETSTEP: c_int = 17; +pub const PT_SUSPEND: c_int = 18; +pub const PT_RESUME: c_int = 19; +pub const PT_TO_SCE: c_int = 20; +pub const PT_TO_SCX: c_int = 21; +pub const PT_SYSCALL: c_int = 22; +pub const PT_FOLLOW_FORK: c_int = 23; +pub const PT_LWP_EVENTS: c_int = 24; +pub const PT_GET_EVENT_MASK: c_int = 25; +pub const PT_SET_EVENT_MASK: c_int = 26; +pub const PT_GET_SC_ARGS: c_int = 27; +pub const PT_GET_SC_RET: c_int = 28; +pub const PT_COREDUMP: c_int = 29; +pub const PT_GETREGS: c_int = 33; +pub const PT_SETREGS: c_int = 34; +pub const PT_GETFPREGS: c_int = 35; +pub const PT_SETFPREGS: c_int = 36; +pub const PT_GETDBREGS: c_int = 37; +pub const PT_SETDBREGS: c_int = 38; +pub const PT_VM_TIMESTAMP: c_int = 40; +pub const PT_VM_ENTRY: c_int = 41; +pub const PT_GETREGSET: c_int = 42; +pub const PT_SETREGSET: c_int = 43; +pub const PT_SC_REMOTE: c_int = 44; +pub const PT_FIRSTMACH: c_int = 64; + +pub const PTRACE_EXEC: c_int = 0x0001; +pub const PTRACE_SCE: c_int = 0x0002; +pub const PTRACE_SCX: c_int = 0x0004; +pub const PTRACE_SYSCALL: c_int = PTRACE_SCE | PTRACE_SCX; +pub const PTRACE_FORK: c_int = 0x0008; +pub const PTRACE_LWP: c_int = 0x0010; +pub const PTRACE_VFORK: c_int = 0x0020; +pub const PTRACE_DEFAULT: c_int = PTRACE_EXEC; + +pub const PC_COMPRESS: u32 = 0x00000001; +pub const PC_ALL: u32 = 0x00000002; + +pub const PROC_SPROTECT: c_int = 1; +pub const PROC_REAP_ACQUIRE: c_int = 2; +pub const PROC_REAP_RELEASE: c_int = 3; +pub const PROC_REAP_STATUS: c_int = 4; +pub const PROC_REAP_GETPIDS: c_int = 5; +pub const PROC_REAP_KILL: c_int = 6; +pub const PROC_TRACE_CTL: c_int = 7; +pub const PROC_TRACE_STATUS: c_int = 8; +pub const PROC_TRAPCAP_CTL: c_int = 9; +pub const PROC_TRAPCAP_STATUS: c_int = 10; +pub const PROC_PDEATHSIG_CTL: c_int = 11; +pub const PROC_PDEATHSIG_STATUS: c_int = 12; +pub const PROC_ASLR_CTL: c_int = 13; +pub const PROC_ASLR_STATUS: c_int = 14; +pub const PROC_PROTMAX_CTL: c_int = 15; +pub const PROC_PROTMAX_STATUS: c_int = 16; +pub const PROC_STACKGAP_CTL: c_int = 17; +pub const PROC_STACKGAP_STATUS: c_int = 18; +pub const PROC_NO_NEW_PRIVS_CTL: c_int = 19; +pub const PROC_NO_NEW_PRIVS_STATUS: c_int = 20; +pub const PROC_WXMAP_CTL: c_int = 21; +pub const PROC_WXMAP_STATUS: c_int = 22; +pub const PROC_PROCCTL_MD_MIN: c_int = 0x10000000; + +pub const PPROT_SET: c_int = 1; +pub const PPROT_CLEAR: c_int = 2; +pub const PPROT_DESCEND: c_int = 0x10; +pub const PPROT_INHERIT: c_int = 0x20; + +pub const PROC_TRACE_CTL_ENABLE: c_int = 1; +pub const PROC_TRACE_CTL_DISABLE: c_int = 2; +pub const PROC_TRACE_CTL_DISABLE_EXEC: c_int = 3; + +pub const PROC_TRAPCAP_CTL_ENABLE: c_int = 1; +pub const PROC_TRAPCAP_CTL_DISABLE: c_int = 2; + +pub const PROC_ASLR_FORCE_ENABLE: c_int = 1; +pub const PROC_ASLR_FORCE_DISABLE: c_int = 2; +pub const PROC_ASLR_NOFORCE: c_int = 3; +pub const PROC_ASLR_ACTIVE: c_int = 0x80000000; + +pub const PROC_PROTMAX_FORCE_ENABLE: c_int = 1; +pub const PROC_PROTMAX_FORCE_DISABLE: c_int = 2; +pub const PROC_PROTMAX_NOFORCE: c_int = 3; +pub const PROC_PROTMAX_ACTIVE: c_int = 0x80000000; + +pub const PROC_STACKGAP_ENABLE: c_int = 0x0001; +pub const PROC_STACKGAP_DISABLE: c_int = 0x0002; +pub const PROC_STACKGAP_ENABLE_EXEC: c_int = 0x0004; +pub const PROC_STACKGAP_DISABLE_EXEC: c_int = 0x0008; + +pub const PROC_NO_NEW_PRIVS_ENABLE: c_int = 1; +pub const PROC_NO_NEW_PRIVS_DISABLE: c_int = 2; + +pub const PROC_WX_MAPPINGS_PERMIT: c_int = 0x0001; +pub const PROC_WX_MAPPINGS_DISALLOW_EXEC: c_int = 0x0002; +pub const PROC_WXORX_ENFORCE: c_int = 0x80000000; + +pub const AF_SLOW: c_int = 33; +pub const AF_SCLUSTER: c_int = 34; +pub const AF_ARP: c_int = 35; +pub const AF_BLUETOOTH: c_int = 36; +pub const AF_IEEE80211: c_int = 37; +pub const AF_INET_SDP: c_int = 40; +pub const AF_INET6_SDP: c_int = 42; + +// sys/net/if.h +pub const IF_MAXUNIT: c_int = 0x7fff; +/// (n) interface is up +pub const IFF_UP: c_int = 0x1; +/// (i) broadcast address valid +pub const IFF_BROADCAST: c_int = 0x2; +/// (n) turn on debugging +pub const IFF_DEBUG: c_int = 0x4; +/// (i) is a loopback net +pub const IFF_LOOPBACK: c_int = 0x8; +/// (i) is a point-to-point link +pub const IFF_POINTOPOINT: c_int = 0x10; +/// (i) calls if_input in net epoch +#[deprecated(since = "0.2.149", note = "Removed in FreeBSD 14")] +pub const IFF_KNOWSEPOCH: c_int = 0x20; +/// (d) resources allocated +pub const IFF_RUNNING: c_int = 0x40; +#[doc(hidden)] +#[deprecated( + since = "0.2.54", + note = "IFF_DRV_RUNNING is deprecated. Use the portable IFF_RUNNING instead" +)] +/// (d) resources allocate +pub const IFF_DRV_RUNNING: c_int = 0x40; +/// (n) no address resolution protocol +pub const IFF_NOARP: c_int = 0x80; +/// (n) receive all packets +pub const IFF_PROMISC: c_int = 0x100; +/// (n) receive all multicast packets +pub const IFF_ALLMULTI: c_int = 0x200; +/// (d) tx hardware queue is full +pub const IFF_OACTIVE: c_int = 0x400; +#[doc(hidden)] +#[deprecated(since = "0.2.54", note = "Use the portable `IFF_OACTIVE` instead")] +/// (d) tx hardware queue is full +pub const IFF_DRV_OACTIVE: c_int = 0x400; +/// (i) can't hear own transmissions +pub const IFF_SIMPLEX: c_int = 0x800; +/// per link layer defined bit +pub const IFF_LINK0: c_int = 0x1000; +/// per link layer defined bit +pub const IFF_LINK1: c_int = 0x2000; +/// per link layer defined bit +pub const IFF_LINK2: c_int = 0x4000; +/// use alternate physical connection +pub const IFF_ALTPHYS: c_int = IFF_LINK2; +/// (i) supports multicast +pub const IFF_MULTICAST: c_int = 0x8000; +/// (i) unconfigurable using ioctl(2) +pub const IFF_CANTCONFIG: c_int = 0x10000; +/// (n) user-requested promisc mode +pub const IFF_PPROMISC: c_int = 0x20000; +/// (n) user-requested monitor mode +pub const IFF_MONITOR: c_int = 0x40000; +/// (n) static ARP +pub const IFF_STATICARP: c_int = 0x80000; +/// (n) interface is winding down +pub const IFF_DYING: c_int = 0x200000; +/// (n) interface is being renamed +pub const IFF_RENAMING: c_int = 0x400000; +/// interface is not part of any groups +#[deprecated(since = "0.2.149", note = "Removed in FreeBSD 14")] +pub const IFF_NOGROUP: c_int = 0x800000; + +/// link invalid/unknown +pub const LINK_STATE_UNKNOWN: c_int = 0; +/// link is down +pub const LINK_STATE_DOWN: c_int = 1; +/// link is up +pub const LINK_STATE_UP: c_int = 2; + +/// can offload checksum on RX +pub const IFCAP_RXCSUM: c_int = 0x00001; +/// can offload checksum on TX +pub const IFCAP_TXCSUM: c_int = 0x00002; +/// can be a network console +pub const IFCAP_NETCONS: c_int = 0x00004; +/// VLAN-compatible MTU +pub const IFCAP_VLAN_MTU: c_int = 0x00008; +/// hardware VLAN tag support +pub const IFCAP_VLAN_HWTAGGING: c_int = 0x00010; +/// 9000 byte MTU supported +pub const IFCAP_JUMBO_MTU: c_int = 0x00020; +/// driver supports polling +pub const IFCAP_POLLING: c_int = 0x00040; +/// can do IFCAP_HWCSUM on VLANs +pub const IFCAP_VLAN_HWCSUM: c_int = 0x00080; +/// can do TCP Segmentation Offload +pub const IFCAP_TSO4: c_int = 0x00100; +/// can do TCP6 Segmentation Offload +pub const IFCAP_TSO6: c_int = 0x00200; +/// can do Large Receive Offload +pub const IFCAP_LRO: c_int = 0x00400; +/// wake on any unicast frame +pub const IFCAP_WOL_UCAST: c_int = 0x00800; +/// wake on any multicast frame +pub const IFCAP_WOL_MCAST: c_int = 0x01000; +/// wake on any Magic Packet +pub const IFCAP_WOL_MAGIC: c_int = 0x02000; +/// interface can offload TCP +pub const IFCAP_TOE4: c_int = 0x04000; +/// interface can offload TCP6 +pub const IFCAP_TOE6: c_int = 0x08000; +/// interface hw can filter vlan tag +pub const IFCAP_VLAN_HWFILTER: c_int = 0x10000; +/// can do SIOCGIFCAPNV/SIOCSIFCAPNV +pub const IFCAP_NV: c_int = 0x20000; +/// can do IFCAP_TSO on VLANs +pub const IFCAP_VLAN_HWTSO: c_int = 0x40000; +/// the runtime link state is dynamic +pub const IFCAP_LINKSTATE: c_int = 0x80000; +/// netmap mode supported/enabled +pub const IFCAP_NETMAP: c_int = 0x100000; +/// can offload checksum on IPv6 RX +pub const IFCAP_RXCSUM_IPV6: c_int = 0x200000; +/// can offload checksum on IPv6 TX +pub const IFCAP_TXCSUM_IPV6: c_int = 0x400000; +/// manages counters internally +pub const IFCAP_HWSTATS: c_int = 0x800000; +/// hardware supports TX rate limiting +pub const IFCAP_TXRTLMT: c_int = 0x1000000; +/// hardware rx timestamping +pub const IFCAP_HWRXTSTMP: c_int = 0x2000000; +/// understands M_EXTPG mbufs +pub const IFCAP_MEXTPG: c_int = 0x4000000; +/// can do TLS encryption and segmentation for TCP +pub const IFCAP_TXTLS4: c_int = 0x8000000; +/// can do TLS encryption and segmentation for TCP6 +pub const IFCAP_TXTLS6: c_int = 0x10000000; +/// can do IFCAN_HWCSUM on VXLANs +pub const IFCAP_VXLAN_HWCSUM: c_int = 0x20000000; +/// can do IFCAP_TSO on VXLANs +pub const IFCAP_VXLAN_HWTSO: c_int = 0x40000000; +/// can do TLS with rate limiting +pub const IFCAP_TXTLS_RTLMT: c_int = 0x80000000; + +pub const IFCAP_HWCSUM_IPV6: c_int = IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6; +pub const IFCAP_HWCSUM: c_int = IFCAP_RXCSUM | IFCAP_TXCSUM; +pub const IFCAP_TSO: c_int = IFCAP_TSO4 | IFCAP_TSO6; +pub const IFCAP_WOL: c_int = IFCAP_WOL_UCAST | IFCAP_WOL_MCAST | IFCAP_WOL_MAGIC; +pub const IFCAP_TOE: c_int = IFCAP_TOE4 | IFCAP_TOE6; +pub const IFCAP_TXTLS: c_int = IFCAP_TXTLS4 | IFCAP_TXTLS6; +pub const IFCAP_CANTCHANGE: c_int = IFCAP_NETMAP | IFCAP_NV; + +pub const IFQ_MAXLEN: c_int = 50; +pub const IFNET_SLOWHZ: c_int = 1; + +pub const IFAN_ARRIVAL: c_int = 0; +pub const IFAN_DEPARTURE: c_int = 1; + +pub const IFSTATMAX: c_int = 800; + +pub const RSS_FUNC_NONE: c_int = 0; +pub const RSS_FUNC_PRIVATE: c_int = 1; +pub const RSS_FUNC_TOEPLITZ: c_int = 2; + +pub const RSS_TYPE_IPV4: c_int = 0x00000001; +pub const RSS_TYPE_TCP_IPV4: c_int = 0x00000002; +pub const RSS_TYPE_IPV6: c_int = 0x00000004; +pub const RSS_TYPE_IPV6_EX: c_int = 0x00000008; +pub const RSS_TYPE_TCP_IPV6: c_int = 0x00000010; +pub const RSS_TYPE_TCP_IPV6_EX: c_int = 0x00000020; +pub const RSS_TYPE_UDP_IPV4: c_int = 0x00000040; +pub const RSS_TYPE_UDP_IPV6: c_int = 0x00000080; +pub const RSS_TYPE_UDP_IPV6_EX: c_int = 0x00000100; +pub const RSS_KEYLEN: c_int = 128; + +pub const IFNET_PCP_NONE: c_int = 0xff; +pub const IFDR_MSG_SIZE: c_int = 64; +pub const IFDR_REASON_MSG: c_int = 1; +pub const IFDR_REASON_VENDOR: c_int = 2; + +// sys/net/if_mib.h + +/// non-interface-specific +pub const IFMIB_SYSTEM: c_int = 1; +/// per-interface data table +pub const IFMIB_IFDATA: c_int = 2; + +/// generic stats for all kinds of ifaces +pub const IFDATA_GENERAL: c_int = 1; +/// specific to the type of interface +pub const IFDATA_LINKSPECIFIC: c_int = 2; +/// driver name and unit +pub const IFDATA_DRIVERNAME: c_int = 3; + +/// number of interfaces configured +pub const IFMIB_IFCOUNT: c_int = 1; + +/// functions not specific to a type of iface +pub const NETLINK_GENERIC: c_int = 0; + +pub const DOT3COMPLIANCE_STATS: c_int = 1; +pub const DOT3COMPLIANCE_COLLS: c_int = 2; + +pub const dot3ChipSetAMD7990: c_int = 1; +pub const dot3ChipSetAMD79900: c_int = 2; +pub const dot3ChipSetAMD79C940: c_int = 3; + +pub const dot3ChipSetIntel82586: c_int = 1; +pub const dot3ChipSetIntel82596: c_int = 2; +pub const dot3ChipSetIntel82557: c_int = 3; + +pub const dot3ChipSetNational8390: c_int = 1; +pub const dot3ChipSetNationalSonic: c_int = 2; + +pub const dot3ChipSetFujitsu86950: c_int = 1; + +pub const dot3ChipSetDigitalDC21040: c_int = 1; +pub const dot3ChipSetDigitalDC21140: c_int = 2; +pub const dot3ChipSetDigitalDC21041: c_int = 3; +pub const dot3ChipSetDigitalDC21140A: c_int = 4; +pub const dot3ChipSetDigitalDC21142: c_int = 5; + +pub const dot3ChipSetWesternDigital83C690: c_int = 1; +pub const dot3ChipSetWesternDigital83C790: c_int = 2; + +// sys/netinet/in.h +// Protocols (RFC 1700) +// NOTE: These are in addition to the constants defined in src/unix/mod.rs + +// IPPROTO_IP defined in src/unix/mod.rs +/// IP6 hop-by-hop options +pub const IPPROTO_HOPOPTS: c_int = 0; +// IPPROTO_ICMP defined in src/unix/mod.rs +/// group mgmt protocol +pub const IPPROTO_IGMP: c_int = 2; +/// gateway^2 (deprecated) +pub const IPPROTO_GGP: c_int = 3; +/// for compatibility +pub const IPPROTO_IPIP: c_int = 4; +// IPPROTO_TCP defined in src/unix/mod.rs +/// Stream protocol II. +pub const IPPROTO_ST: c_int = 7; +/// exterior gateway protocol +pub const IPPROTO_EGP: c_int = 8; +/// private interior gateway +pub const IPPROTO_PIGP: c_int = 9; +/// BBN RCC Monitoring +pub const IPPROTO_RCCMON: c_int = 10; +/// network voice protocol +pub const IPPROTO_NVPII: c_int = 11; +/// pup +pub const IPPROTO_PUP: c_int = 12; +/// Argus +pub const IPPROTO_ARGUS: c_int = 13; +/// EMCON +pub const IPPROTO_EMCON: c_int = 14; +/// Cross Net Debugger +pub const IPPROTO_XNET: c_int = 15; +/// Chaos +pub const IPPROTO_CHAOS: c_int = 16; +// IPPROTO_UDP defined in src/unix/mod.rs +/// Multiplexing +pub const IPPROTO_MUX: c_int = 18; +/// DCN Measurement Subsystems +pub const IPPROTO_MEAS: c_int = 19; +/// Host Monitoring +pub const IPPROTO_HMP: c_int = 20; +/// Packet Radio Measurement +pub const IPPROTO_PRM: c_int = 21; +/// xns idp +pub const IPPROTO_IDP: c_int = 22; +/// Trunk-1 +pub const IPPROTO_TRUNK1: c_int = 23; +/// Trunk-2 +pub const IPPROTO_TRUNK2: c_int = 24; +/// Leaf-1 +pub const IPPROTO_LEAF1: c_int = 25; +/// Leaf-2 +pub const IPPROTO_LEAF2: c_int = 26; +/// Reliable Data +pub const IPPROTO_RDP: c_int = 27; +/// Reliable Transaction +pub const IPPROTO_IRTP: c_int = 28; +/// tp-4 w/ class negotiation +pub const IPPROTO_TP: c_int = 29; +/// Bulk Data Transfer +pub const IPPROTO_BLT: c_int = 30; +/// Network Services +pub const IPPROTO_NSP: c_int = 31; +/// Merit Internodal +pub const IPPROTO_INP: c_int = 32; +#[doc(hidden)] +#[deprecated( + since = "0.2.72", + note = "IPPROTO_SEP is deprecated. Use IPPROTO_DCCP instead" +)] +pub const IPPROTO_SEP: c_int = 33; +/// Datagram Congestion Control Protocol +pub const IPPROTO_DCCP: c_int = 33; +/// Third Party Connect +pub const IPPROTO_3PC: c_int = 34; +/// InterDomain Policy Routing +pub const IPPROTO_IDPR: c_int = 35; +/// XTP +pub const IPPROTO_XTP: c_int = 36; +/// Datagram Delivery +pub const IPPROTO_DDP: c_int = 37; +/// Control Message Transport +pub const IPPROTO_CMTP: c_int = 38; +/// TP++ Transport +pub const IPPROTO_TPXX: c_int = 39; +/// IL transport protocol +pub const IPPROTO_IL: c_int = 40; +// IPPROTO_IPV6 defined in src/unix/mod.rs +/// Source Demand Routing +pub const IPPROTO_SDRP: c_int = 42; +/// IP6 routing header +pub const IPPROTO_ROUTING: c_int = 43; +/// IP6 fragmentation header +pub const IPPROTO_FRAGMENT: c_int = 44; +/// InterDomain Routing +pub const IPPROTO_IDRP: c_int = 45; +/// resource reservation +pub const IPPROTO_RSVP: c_int = 46; +/// General Routing Encap. +pub const IPPROTO_GRE: c_int = 47; +/// Mobile Host Routing +pub const IPPROTO_MHRP: c_int = 48; +/// BHA +pub const IPPROTO_BHA: c_int = 49; +/// IP6 Encap Sec. Payload +pub const IPPROTO_ESP: c_int = 50; +/// IP6 Auth Header +pub const IPPROTO_AH: c_int = 51; +/// Integ. Net Layer Security +pub const IPPROTO_INLSP: c_int = 52; +/// IP with encryption +pub const IPPROTO_SWIPE: c_int = 53; +/// Next Hop Resolution +pub const IPPROTO_NHRP: c_int = 54; +/// IP Mobility +pub const IPPROTO_MOBILE: c_int = 55; +/// Transport Layer Security +pub const IPPROTO_TLSP: c_int = 56; +/// SKIP +pub const IPPROTO_SKIP: c_int = 57; +// IPPROTO_ICMPV6 defined in src/unix/mod.rs +/// IP6 no next header +pub const IPPROTO_NONE: c_int = 59; +/// IP6 destination option +pub const IPPROTO_DSTOPTS: c_int = 60; +/// any host internal protocol +pub const IPPROTO_AHIP: c_int = 61; +/// CFTP +pub const IPPROTO_CFTP: c_int = 62; +/// "hello" routing protocol +pub const IPPROTO_HELLO: c_int = 63; +/// SATNET/Backroom EXPAK +pub const IPPROTO_SATEXPAK: c_int = 64; +/// Kryptolan +pub const IPPROTO_KRYPTOLAN: c_int = 65; +/// Remote Virtual Disk +pub const IPPROTO_RVD: c_int = 66; +/// Pluribus Packet Core +pub const IPPROTO_IPPC: c_int = 67; +/// Any distributed FS +pub const IPPROTO_ADFS: c_int = 68; +/// Satnet Monitoring +pub const IPPROTO_SATMON: c_int = 69; +/// VISA Protocol +pub const IPPROTO_VISA: c_int = 70; +/// Packet Core Utility +pub const IPPROTO_IPCV: c_int = 71; +/// Comp. Prot. Net. Executive +pub const IPPROTO_CPNX: c_int = 72; +/// Comp. Prot. HeartBeat +pub const IPPROTO_CPHB: c_int = 73; +/// Wang Span Network +pub const IPPROTO_WSN: c_int = 74; +/// Packet Video Protocol +pub const IPPROTO_PVP: c_int = 75; +/// BackRoom SATNET Monitoring +pub const IPPROTO_BRSATMON: c_int = 76; +/// Sun net disk proto (temp.) +pub const IPPROTO_ND: c_int = 77; +/// WIDEBAND Monitoring +pub const IPPROTO_WBMON: c_int = 78; +/// WIDEBAND EXPAK +pub const IPPROTO_WBEXPAK: c_int = 79; +/// ISO cnlp +pub const IPPROTO_EON: c_int = 80; +/// VMTP +pub const IPPROTO_VMTP: c_int = 81; +/// Secure VMTP +pub const IPPROTO_SVMTP: c_int = 82; +/// Banyon VINES +pub const IPPROTO_VINES: c_int = 83; +/// TTP +pub const IPPROTO_TTP: c_int = 84; +/// NSFNET-IGP +pub const IPPROTO_IGP: c_int = 85; +/// dissimilar gateway prot. +pub const IPPROTO_DGP: c_int = 86; +/// TCF +pub const IPPROTO_TCF: c_int = 87; +/// Cisco/GXS IGRP +pub const IPPROTO_IGRP: c_int = 88; +/// OSPFIGP +pub const IPPROTO_OSPFIGP: c_int = 89; +/// Strite RPC protocol +pub const IPPROTO_SRPC: c_int = 90; +/// Locus Address Resoloution +pub const IPPROTO_LARP: c_int = 91; +/// Multicast Transport +pub const IPPROTO_MTP: c_int = 92; +/// AX.25 Frames +pub const IPPROTO_AX25: c_int = 93; +/// IP encapsulated in IP +pub const IPPROTO_IPEIP: c_int = 94; +/// Mobile Int.ing control +pub const IPPROTO_MICP: c_int = 95; +/// Semaphore Comm. security +pub const IPPROTO_SCCSP: c_int = 96; +/// Ethernet IP encapsulation +pub const IPPROTO_ETHERIP: c_int = 97; +/// encapsulation header +pub const IPPROTO_ENCAP: c_int = 98; +/// any private encr. scheme +pub const IPPROTO_APES: c_int = 99; +/// GMTP +pub const IPPROTO_GMTP: c_int = 100; +/// payload compression (IPComp) +pub const IPPROTO_IPCOMP: c_int = 108; +/// SCTP +pub const IPPROTO_SCTP: c_int = 132; +/// IPv6 Mobility Header +pub const IPPROTO_MH: c_int = 135; +/// UDP-Lite +pub const IPPROTO_UDPLITE: c_int = 136; +/// IP6 Host Identity Protocol +pub const IPPROTO_HIP: c_int = 139; +/// IP6 Shim6 Protocol +pub const IPPROTO_SHIM6: c_int = 140; + +/* 101-254: Partly Unassigned */ +/// Protocol Independent Mcast +pub const IPPROTO_PIM: c_int = 103; +/// CARP +pub const IPPROTO_CARP: c_int = 112; +/// PGM +pub const IPPROTO_PGM: c_int = 113; +/// MPLS-in-IP +pub const IPPROTO_MPLS: c_int = 137; +/// PFSYNC +pub const IPPROTO_PFSYNC: c_int = 240; + +/* 255: Reserved */ +/* BSD Private, local use, namespace incursion, no longer used */ +/// OLD divert pseudo-proto +pub const IPPROTO_OLD_DIVERT: c_int = 254; +pub const IPPROTO_MAX: c_int = 256; +/// last return value of *_input(), meaning "all job for this pkt is done". +pub const IPPROTO_DONE: c_int = 257; + +/* Only used internally, so can be outside the range of valid IP protocols. */ +/// divert pseudo-protocol +pub const IPPROTO_DIVERT: c_int = 258; +/// SeND pseudo-protocol +pub const IPPROTO_SEND: c_int = 259; + +// sys/netinet/TCP.h +pub const TCP_MD5SIG: c_int = 16; +pub const TCP_INFO: c_int = 32; +pub const TCP_CONGESTION: c_int = 64; +pub const TCP_CCALGOOPT: c_int = 65; +pub const TCP_MAXUNACKTIME: c_int = 68; +#[deprecated(since = "0.2.160", note = "Removed in FreeBSD 15")] +pub const TCP_MAXPEAKRATE: c_int = 69; +pub const TCP_IDLE_REDUCE: c_int = 70; +pub const TCP_REMOTE_UDP_ENCAPS_PORT: c_int = 71; +pub const TCP_DELACK: c_int = 72; +pub const TCP_FIN_IS_RST: c_int = 73; +pub const TCP_LOG_LIMIT: c_int = 74; +pub const TCP_SHARED_CWND_ALLOWED: c_int = 75; +pub const TCP_PROC_ACCOUNTING: c_int = 76; +pub const TCP_USE_CMP_ACKS: c_int = 77; +pub const TCP_PERF_INFO: c_int = 78; +pub const TCP_LRD: c_int = 79; +pub const TCP_KEEPINIT: c_int = 128; +pub const TCP_FASTOPEN: c_int = 1025; +#[deprecated(since = "0.2.171", note = "removed in FreeBSD 15")] +pub const TCP_PCAP_OUT: c_int = 2048; +#[deprecated(since = "0.2.171", note = "removed in FreeBSD 15")] +pub const TCP_PCAP_IN: c_int = 4096; +pub const TCP_FUNCTION_BLK: c_int = 8192; +pub const TCP_FUNCTION_ALIAS: c_int = 8193; +pub const TCP_FASTOPEN_PSK_LEN: c_int = 16; +pub const TCP_FUNCTION_NAME_LEN_MAX: c_int = 32; + +pub const TCP_REUSPORT_LB_NUMA: c_int = 1026; +pub const TCP_RACK_MBUF_QUEUE: c_int = 1050; +pub const TCP_RACK_TLP_REDUCE: c_int = 1052; +pub const TCP_RACK_PACE_MAX_SEG: c_int = 1054; +pub const TCP_RACK_PACE_ALWAYS: c_int = 1055; +pub const TCP_RACK_PRR_SENDALOT: c_int = 1057; +pub const TCP_RACK_MIN_TO: c_int = 1058; +pub const TCP_RACK_EARLY_SEG: c_int = 1060; +pub const TCP_RACK_REORD_THRESH: c_int = 1061; +pub const TCP_RACK_REORD_FADE: c_int = 1062; +pub const TCP_RACK_TLP_THRESH: c_int = 1063; +pub const TCP_RACK_PKT_DELAY: c_int = 1064; +pub const TCP_BBR_IWINTSO: c_int = 1067; +pub const TCP_BBR_STARTUP_PG: c_int = 1069; +pub const TCP_BBR_DRAIN_PG: c_int = 1070; +pub const TCP_BBR_PROBE_RTT_INT: c_int = 1072; +pub const TCP_BBR_STARTUP_LOSS_EXIT: c_int = 1074; +pub const TCP_BBR_TSLIMITS: c_int = 1076; +pub const TCP_BBR_PACE_OH: c_int = 1077; +pub const TCP_BBR_USEDEL_RATE: c_int = 1079; +pub const TCP_BBR_MIN_RTO: c_int = 1080; +pub const TCP_BBR_MAX_RTO: c_int = 1081; +pub const TCP_BBR_ALGORITHM: c_int = 1083; +pub const TCP_BBR_PACE_PER_SEC: c_int = 1086; +pub const TCP_BBR_PACE_DEL_TAR: c_int = 1087; +pub const TCP_BBR_PACE_SEG_MAX: c_int = 1088; +pub const TCP_BBR_PACE_SEG_MIN: c_int = 1089; +pub const TCP_BBR_PACE_CROSS: c_int = 1090; +pub const TCP_BBR_TMR_PACE_OH: c_int = 1096; +pub const TCP_BBR_RACK_RTT_USE: c_int = 1098; +pub const TCP_BBR_RETRAN_WTSO: c_int = 1099; +pub const TCP_BBR_PROBE_RTT_GAIN: c_int = 1101; +pub const TCP_BBR_PROBE_RTT_LEN: c_int = 1102; +pub const TCP_BBR_SEND_IWND_IN_TSO: c_int = 1103; +pub const TCP_BBR_USE_RACK_RR: c_int = 1104; +pub const TCP_BBR_HDWR_PACE: c_int = 1105; +pub const TCP_BBR_UTTER_MAX_TSO: c_int = 1106; +pub const TCP_BBR_EXTRA_STATE: c_int = 1107; +pub const TCP_BBR_FLOOR_MIN_TSO: c_int = 1108; +pub const TCP_BBR_MIN_TOPACEOUT: c_int = 1109; +pub const TCP_BBR_TSTMP_RAISES: c_int = 1110; +pub const TCP_BBR_POLICER_DETECT: c_int = 1111; +pub const TCP_BBR_RACK_INIT_RATE: c_int = 1112; + +pub const IP_BINDANY: c_int = 24; +pub const IP_BINDMULTI: c_int = 25; +pub const IP_RSS_LISTEN_BUCKET: c_int = 26; +pub const IP_ORIGDSTADDR: c_int = 27; +pub const IP_RECVORIGDSTADDR: c_int = IP_ORIGDSTADDR; + +pub const IP_DONTFRAG: c_int = 67; +pub const IP_RECVTOS: c_int = 68; + +pub const IPV6_BINDANY: c_int = 64; +pub const IPV6_ORIGDSTADDR: c_int = 72; +pub const IPV6_RECVORIGDSTADDR: c_int = IPV6_ORIGDSTADDR; + +pub const PF_SLOW: c_int = AF_SLOW; +pub const PF_SCLUSTER: c_int = AF_SCLUSTER; +pub const PF_ARP: c_int = AF_ARP; +pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; +pub const PF_IEEE80211: c_int = AF_IEEE80211; +pub const PF_INET_SDP: c_int = AF_INET_SDP; +pub const PF_INET6_SDP: c_int = AF_INET6_SDP; + +pub const NET_RT_DUMP: c_int = 1; +pub const NET_RT_FLAGS: c_int = 2; +pub const NET_RT_IFLIST: c_int = 3; +pub const NET_RT_IFMALIST: c_int = 4; +pub const NET_RT_IFLISTL: c_int = 5; + +// System V IPC +pub const IPC_INFO: c_int = 3; +pub const MSG_NOERROR: c_int = 0o10000; +pub const SHM_LOCK: c_int = 11; +pub const SHM_UNLOCK: c_int = 12; +pub const SHM_STAT: c_int = 13; +pub const SHM_INFO: c_int = 14; +pub const SHM_ANON: *mut c_char = 1 as *mut c_char; + +// The *_MAXID constants never should've been used outside of the +// FreeBSD base system. And with the exception of CTL_P1003_1B_MAXID, +// they were all removed in svn r262489. They remain here for backwards +// compatibility only, and are scheduled to be removed in libc 1.0.0. +#[doc(hidden)] +#[deprecated(since = "0.2.54", note = "Removed in FreeBSD 11")] +pub const CTL_MAXID: c_int = 10; +#[doc(hidden)] +#[deprecated(since = "0.2.54", note = "Removed in FreeBSD 11")] +pub const KERN_MAXID: c_int = 38; +#[doc(hidden)] +#[deprecated(since = "0.2.54", note = "Removed in FreeBSD 11")] +pub const HW_MAXID: c_int = 13; +#[doc(hidden)] +#[deprecated(since = "0.2.54", note = "Removed in FreeBSD 11")] +pub const USER_MAXID: c_int = 21; +#[doc(hidden)] +#[deprecated(since = "0.2.74", note = "Removed in FreeBSD 13")] +pub const CTL_P1003_1B_MAXID: c_int = 26; + +pub const MSG_NOTIFICATION: c_int = 0x00002000; +pub const MSG_NBIO: c_int = 0x00004000; +pub const MSG_COMPAT: c_int = 0x00008000; +pub const MSG_CMSG_CLOEXEC: c_int = 0x00040000; +pub const MSG_NOSIGNAL: c_int = 0x20000; +pub const MSG_WAITFORONE: c_int = 0x00080000; + +// utmpx entry types +pub const EMPTY: c_short = 0; +pub const BOOT_TIME: c_short = 1; +pub const OLD_TIME: c_short = 2; +pub const NEW_TIME: c_short = 3; +pub const USER_PROCESS: c_short = 4; +pub const INIT_PROCESS: c_short = 5; +pub const LOGIN_PROCESS: c_short = 6; +pub const DEAD_PROCESS: c_short = 7; +pub const SHUTDOWN_TIME: c_short = 8; +// utmp database types +pub const UTXDB_ACTIVE: c_int = 0; +pub const UTXDB_LASTLOGIN: c_int = 1; +pub const UTXDB_LOG: c_int = 2; + +pub const LC_COLLATE_MASK: c_int = 1 << 0; +pub const LC_CTYPE_MASK: c_int = 1 << 1; +pub const LC_MONETARY_MASK: c_int = 1 << 2; +pub const LC_NUMERIC_MASK: c_int = 1 << 3; +pub const LC_TIME_MASK: c_int = 1 << 4; +pub const LC_MESSAGES_MASK: c_int = 1 << 5; +pub const LC_ALL_MASK: c_int = LC_COLLATE_MASK + | LC_CTYPE_MASK + | LC_MESSAGES_MASK + | LC_MONETARY_MASK + | LC_NUMERIC_MASK + | LC_TIME_MASK; + +pub const WSTOPPED: c_int = 2; // same as WUNTRACED +pub const WCONTINUED: c_int = 4; +pub const WNOWAIT: c_int = 8; +pub const WEXITED: c_int = 16; +pub const WTRAPPED: c_int = 32; + +// FreeBSD defines a great many more of these, we only expose the +// standardized ones. +pub const P_PID: idtype_t = 0; +pub const P_PGID: idtype_t = 2; +pub const P_ALL: idtype_t = 7; + +pub const UTIME_OMIT: c_long = -2; +pub const UTIME_NOW: c_long = -1; + +pub const B460800: crate::speed_t = 460800; +pub const B921600: crate::speed_t = 921600; + +pub const AT_FDCWD: c_int = -100; +pub const AT_EACCESS: c_int = 0x100; +pub const AT_SYMLINK_NOFOLLOW: c_int = 0x200; +pub const AT_SYMLINK_FOLLOW: c_int = 0x400; +pub const AT_REMOVEDIR: c_int = 0x800; +pub const AT_RESOLVE_BENEATH: c_int = 0x2000; +pub const AT_EMPTY_PATH: c_int = 0x4000; + +pub const AT_NULL: c_int = 0; +pub const AT_IGNORE: c_int = 1; +pub const AT_EXECFD: c_int = 2; +pub const AT_PHDR: c_int = 3; +pub const AT_PHENT: c_int = 4; +pub const AT_PHNUM: c_int = 5; +pub const AT_PAGESZ: c_int = 6; +pub const AT_BASE: c_int = 7; +pub const AT_FLAGS: c_int = 8; +pub const AT_ENTRY: c_int = 9; +pub const AT_NOTELF: c_int = 10; +pub const AT_UID: c_int = 11; +pub const AT_EUID: c_int = 12; +pub const AT_GID: c_int = 13; +pub const AT_EGID: c_int = 14; +pub const AT_EXECPATH: c_int = 15; +pub const AT_CANARY: c_int = 16; +pub const AT_OSRELDATE: c_int = 18; +pub const AT_NCPUS: c_int = 19; +pub const AT_PAGESIZES: c_int = 20; +pub const AT_TIMEKEEP: c_int = 22; +pub const AT_HWCAP: c_int = 25; +pub const AT_HWCAP2: c_int = 26; +pub const AT_USRSTACKBASE: c_int = 35; +pub const AT_USRSTACKLIM: c_int = 36; + +pub const TABDLY: crate::tcflag_t = 0x00000004; +pub const TAB0: crate::tcflag_t = 0x00000000; +pub const TAB3: crate::tcflag_t = 0x00000004; + +pub const _PC_ACL_NFS4: c_int = 64; + +pub const _SC_CPUSET_SIZE: c_int = 122; + +pub const _UUID_NODE_LEN: usize = 6; + +// Flags which can be passed to pdfork(2) +pub const PD_DAEMON: c_int = 0x00000001; +pub const PD_CLOEXEC: c_int = 0x00000002; +pub const PD_ALLOWED_AT_FORK: c_int = PD_DAEMON | PD_CLOEXEC; + +// Values for struct rtprio (type_ field) +pub const RTP_PRIO_REALTIME: c_ushort = 2; +pub const RTP_PRIO_NORMAL: c_ushort = 3; +pub const RTP_PRIO_IDLE: c_ushort = 4; + +// Flags for chflags(2) +pub const UF_SYSTEM: c_ulong = 0x00000080; +pub const UF_SPARSE: c_ulong = 0x00000100; +pub const UF_OFFLINE: c_ulong = 0x00000200; +pub const UF_REPARSE: c_ulong = 0x00000400; +pub const UF_ARCHIVE: c_ulong = 0x00000800; +pub const UF_READONLY: c_ulong = 0x00001000; +pub const UF_HIDDEN: c_ulong = 0x00008000; +pub const SF_SNAPSHOT: c_ulong = 0x00200000; + +// fcntl commands +pub const F_ADD_SEALS: c_int = 19; +pub const F_GET_SEALS: c_int = 20; +pub const F_OGETLK: c_int = 7; +pub const F_OSETLK: c_int = 8; +pub const F_OSETLKW: c_int = 9; +pub const F_RDAHEAD: c_int = 16; +pub const F_READAHEAD: c_int = 15; +pub const F_SETLK_REMOTE: c_int = 14; +pub const F_KINFO: c_int = 22; + +// for use with F_ADD_SEALS +pub const F_SEAL_GROW: c_int = 4; +pub const F_SEAL_SEAL: c_int = 1; +pub const F_SEAL_SHRINK: c_int = 2; +pub const F_SEAL_WRITE: c_int = 8; + +// for use with fspacectl +pub const SPACECTL_DEALLOC: c_int = 1; + +// For realhostname* api +pub const HOSTNAME_FOUND: c_int = 0; +pub const HOSTNAME_INCORRECTNAME: c_int = 1; +pub const HOSTNAME_INVALIDADDR: c_int = 2; +pub const HOSTNAME_INVALIDNAME: c_int = 3; + +// For rfork +pub const RFFDG: c_int = 4; +pub const RFPROC: c_int = 16; +pub const RFMEM: c_int = 32; +pub const RFNOWAIT: c_int = 64; +pub const RFCFDG: c_int = 4096; +pub const RFTHREAD: c_int = 8192; +pub const RFSIGSHARE: c_int = 16384; +pub const RFLINUXTHPN: c_int = 65536; +pub const RFTSIGZMB: c_int = 524288; +pub const RFSPAWN: c_int = 2147483648; + +// For eventfd +pub const EFD_SEMAPHORE: c_int = 0x1; +pub const EFD_NONBLOCK: c_int = 0x4; +pub const EFD_CLOEXEC: c_int = 0x100000; + +pub const MALLOCX_ZERO: c_int = 0x40; + +/// size of returned wchan message +pub const WMESGLEN: usize = 8; +/// size of returned lock name +pub const LOCKNAMELEN: usize = 8; +/// size of returned thread name +pub const TDNAMLEN: usize = 16; +/// size of returned ki_comm name +pub const COMMLEN: usize = 19; +/// size of returned ki_emul +pub const KI_EMULNAMELEN: usize = 16; +/// number of groups in ki_groups +pub const KI_NGROUPS: usize = 16; +cfg_if! { + if #[cfg(freebsd11)] { + pub const KI_NSPARE_INT: usize = 4; + } else { + pub const KI_NSPARE_INT: usize = 2; + } +} +pub const KI_NSPARE_LONG: usize = 12; +/// Flags for the process credential. +pub const KI_CRF_CAPABILITY_MODE: usize = 0x00000001; +/// Steal a bit from ki_cr_flags to indicate that the cred had more than +/// KI_NGROUPS groups. +pub const KI_CRF_GRP_OVERFLOW: usize = 0x80000000; +/// controlling tty vnode active +pub const KI_CTTY: usize = 0x00000001; +/// session leader +pub const KI_SLEADER: usize = 0x00000002; +/// proc blocked on lock ki_lockname +pub const KI_LOCKBLOCK: usize = 0x00000004; +/// size of returned ki_login +pub const LOGNAMELEN: usize = 17; +/// size of returned ki_loginclass +pub const LOGINCLASSLEN: usize = 17; + +pub const KF_ATTR_VALID: c_int = 0x0001; +pub const KF_TYPE_NONE: c_int = 0; +pub const KF_TYPE_VNODE: c_int = 1; +pub const KF_TYPE_SOCKET: c_int = 2; +pub const KF_TYPE_PIPE: c_int = 3; +pub const KF_TYPE_FIFO: c_int = 4; +pub const KF_TYPE_KQUEUE: c_int = 5; +pub const KF_TYPE_MQUEUE: c_int = 7; +pub const KF_TYPE_SHM: c_int = 8; +pub const KF_TYPE_SEM: c_int = 9; +pub const KF_TYPE_PTS: c_int = 10; +pub const KF_TYPE_PROCDESC: c_int = 11; +pub const KF_TYPE_DEV: c_int = 12; +pub const KF_TYPE_UNKNOWN: c_int = 255; + +pub const KF_VTYPE_VNON: c_int = 0; +pub const KF_VTYPE_VREG: c_int = 1; +pub const KF_VTYPE_VDIR: c_int = 2; +pub const KF_VTYPE_VBLK: c_int = 3; +pub const KF_VTYPE_VCHR: c_int = 4; +pub const KF_VTYPE_VLNK: c_int = 5; +pub const KF_VTYPE_VSOCK: c_int = 6; +pub const KF_VTYPE_VFIFO: c_int = 7; +pub const KF_VTYPE_VBAD: c_int = 8; +pub const KF_VTYPE_UNKNOWN: c_int = 255; + +/// Current working directory +pub const KF_FD_TYPE_CWD: c_int = -1; +/// Root directory +pub const KF_FD_TYPE_ROOT: c_int = -2; +/// Jail directory +pub const KF_FD_TYPE_JAIL: c_int = -3; +/// Ktrace vnode +pub const KF_FD_TYPE_TRACE: c_int = -4; +pub const KF_FD_TYPE_TEXT: c_int = -5; +/// Controlling terminal +pub const KF_FD_TYPE_CTTY: c_int = -6; +pub const KF_FLAG_READ: c_int = 0x00000001; +pub const KF_FLAG_WRITE: c_int = 0x00000002; +pub const KF_FLAG_APPEND: c_int = 0x00000004; +pub const KF_FLAG_ASYNC: c_int = 0x00000008; +pub const KF_FLAG_FSYNC: c_int = 0x00000010; +pub const KF_FLAG_NONBLOCK: c_int = 0x00000020; +pub const KF_FLAG_DIRECT: c_int = 0x00000040; +pub const KF_FLAG_HASLOCK: c_int = 0x00000080; +pub const KF_FLAG_SHLOCK: c_int = 0x00000100; +pub const KF_FLAG_EXLOCK: c_int = 0x00000200; +pub const KF_FLAG_NOFOLLOW: c_int = 0x00000400; +pub const KF_FLAG_CREAT: c_int = 0x00000800; +pub const KF_FLAG_TRUNC: c_int = 0x00001000; +pub const KF_FLAG_EXCL: c_int = 0x00002000; +pub const KF_FLAG_EXEC: c_int = 0x00004000; + +pub const KVME_TYPE_NONE: c_int = 0; +pub const KVME_TYPE_DEFAULT: c_int = 1; +pub const KVME_TYPE_VNODE: c_int = 2; +pub const KVME_TYPE_SWAP: c_int = 3; +pub const KVME_TYPE_DEVICE: c_int = 4; +pub const KVME_TYPE_PHYS: c_int = 5; +pub const KVME_TYPE_DEAD: c_int = 6; +pub const KVME_TYPE_SG: c_int = 7; +pub const KVME_TYPE_MGTDEVICE: c_int = 8; +// Present in `sys/user.h` but is undefined for whatever reason... +// pub const KVME_TYPE_GUARD: c_int = 9; +pub const KVME_TYPE_UNKNOWN: c_int = 255; +pub const KVME_PROT_READ: c_int = 0x00000001; +pub const KVME_PROT_WRITE: c_int = 0x00000002; +pub const KVME_PROT_EXEC: c_int = 0x00000004; +pub const KVME_FLAG_COW: c_int = 0x00000001; +pub const KVME_FLAG_NEEDS_COPY: c_int = 0x00000002; +pub const KVME_FLAG_NOCOREDUMP: c_int = 0x00000004; +pub const KVME_FLAG_SUPER: c_int = 0x00000008; +pub const KVME_FLAG_GROWS_UP: c_int = 0x00000010; +pub const KVME_FLAG_GROWS_DOWN: c_int = 0x00000020; +pub const KVME_FLAG_USER_WIRED: c_int = 0x00000040; + +pub const KKST_MAXLEN: c_int = 1024; +/// Stack is valid. +pub const KKST_STATE_STACKOK: c_int = 0; +/// Stack swapped out. +pub const KKST_STATE_SWAPPED: c_int = 1; +pub const KKST_STATE_RUNNING: c_int = 2; + +// Constants about priority. +pub const PRI_MIN: c_int = 0; +pub const PRI_MAX: c_int = 255; +pub const PRI_MIN_ITHD: c_int = PRI_MIN; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +#[allow(deprecated)] +pub const PRI_MAX_ITHD: c_int = PRI_MIN_REALTIME - 1; +pub const PI_REALTIME: c_int = PRI_MIN_ITHD + 0; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const PI_AV: c_int = PRI_MIN_ITHD + 4; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const PI_NET: c_int = PRI_MIN_ITHD + 8; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const PI_DISK: c_int = PRI_MIN_ITHD + 12; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const PI_TTY: c_int = PRI_MIN_ITHD + 16; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const PI_DULL: c_int = PRI_MIN_ITHD + 20; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const PI_SOFT: c_int = PRI_MIN_ITHD + 24; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const PRI_MIN_REALTIME: c_int = 48; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +#[allow(deprecated)] +pub const PRI_MAX_REALTIME: c_int = PRI_MIN_KERN - 1; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const PRI_MIN_KERN: c_int = 80; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +#[allow(deprecated)] +pub const PRI_MAX_KERN: c_int = PRI_MIN_TIMESHARE - 1; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +#[allow(deprecated)] +pub const PSWP: c_int = PRI_MIN_KERN + 0; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +#[allow(deprecated)] +pub const PVM: c_int = PRI_MIN_KERN + 4; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +#[allow(deprecated)] +pub const PINOD: c_int = PRI_MIN_KERN + 8; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +#[allow(deprecated)] +pub const PRIBIO: c_int = PRI_MIN_KERN + 12; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +#[allow(deprecated)] +pub const PVFS: c_int = PRI_MIN_KERN + 16; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +#[allow(deprecated)] +pub const PZERO: c_int = PRI_MIN_KERN + 20; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +#[allow(deprecated)] +pub const PSOCK: c_int = PRI_MIN_KERN + 24; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +#[allow(deprecated)] +pub const PWAIT: c_int = PRI_MIN_KERN + 28; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +#[allow(deprecated)] +pub const PLOCK: c_int = PRI_MIN_KERN + 32; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +#[allow(deprecated)] +pub const PPAUSE: c_int = PRI_MIN_KERN + 36; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const PRI_MIN_TIMESHARE: c_int = 120; +pub const PRI_MAX_TIMESHARE: c_int = PRI_MIN_IDLE - 1; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +#[allow(deprecated)] +pub const PUSER: c_int = PRI_MIN_TIMESHARE; +pub const PRI_MIN_IDLE: c_int = 224; +pub const PRI_MAX_IDLE: c_int = PRI_MAX; + +pub const NZERO: c_int = 0; + +// Resource utilization information. +pub const RUSAGE_THREAD: c_int = 1; + +cfg_if! { + if #[cfg(any(freebsd11, target_pointer_width = "32"))] { + pub const ARG_MAX: c_int = 256 * 1024; + } else { + pub const ARG_MAX: c_int = 2 * 256 * 1024; + } +} +pub const CHILD_MAX: c_int = 40; +/// max command name remembered +pub const MAXCOMLEN: usize = 19; +/// max interpreter file name length +pub const MAXINTERP: c_int = crate::PATH_MAX; +/// max login name length (incl. NUL) +pub const MAXLOGNAME: c_int = 33; +/// max simultaneous processes +pub const MAXUPRC: c_int = CHILD_MAX; +/// max bytes for an exec function +pub const NCARGS: c_int = ARG_MAX; +/// /* max number groups +pub const NGROUPS: c_int = NGROUPS_MAX + 1; +/// max open files per process +pub const NOFILE: c_int = OPEN_MAX; +/// marker for empty group set member +pub const NOGROUP: c_int = 65535; +/// max hostname size +pub const MAXHOSTNAMELEN: c_int = 256; +/// max bytes in term canon input line +pub const MAX_CANON: c_int = 255; +/// max bytes in terminal input +pub const MAX_INPUT: c_int = 255; +/// max bytes in a file name +pub const NAME_MAX: c_int = 255; +pub const MAXSYMLINKS: c_int = 32; +/// max supplemental group id's +pub const NGROUPS_MAX: c_int = 1023; +/// max open files per process +pub const OPEN_MAX: c_int = 64; + +pub const _POSIX_ARG_MAX: c_int = 4096; +pub const _POSIX_LINK_MAX: c_int = 8; +pub const _POSIX_MAX_CANON: c_int = 255; +pub const _POSIX_MAX_INPUT: c_int = 255; +pub const _POSIX_NAME_MAX: c_int = 14; +pub const _POSIX_PIPE_BUF: c_int = 512; +pub const _POSIX_SSIZE_MAX: c_int = 32767; +pub const _POSIX_STREAM_MAX: c_int = 8; + +/// max ibase/obase values in bc(1) +pub const BC_BASE_MAX: c_int = 99; +/// max array elements in bc(1) +pub const BC_DIM_MAX: c_int = 2048; +/// max scale value in bc(1) +pub const BC_SCALE_MAX: c_int = 99; +/// max const string length in bc(1) +pub const BC_STRING_MAX: c_int = 1000; +/// max character class name size +pub const CHARCLASS_NAME_MAX: c_int = 14; +/// max weights for order keyword +pub const COLL_WEIGHTS_MAX: c_int = 10; +/// max expressions nested in expr(1) +pub const EXPR_NEST_MAX: c_int = 32; +/// max bytes in an input line +pub const LINE_MAX: c_int = 2048; +/// max RE's in interval notation +pub const RE_DUP_MAX: c_int = 255; + +pub const _POSIX2_BC_BASE_MAX: c_int = 99; +pub const _POSIX2_BC_DIM_MAX: c_int = 2048; +pub const _POSIX2_BC_SCALE_MAX: c_int = 99; +pub const _POSIX2_BC_STRING_MAX: c_int = 1000; +pub const _POSIX2_CHARCLASS_NAME_MAX: c_int = 14; +pub const _POSIX2_COLL_WEIGHTS_MAX: c_int = 2; +pub const _POSIX2_EQUIV_CLASS_MAX: c_int = 2; +pub const _POSIX2_EXPR_NEST_MAX: c_int = 32; +pub const _POSIX2_LINE_MAX: c_int = 2048; +pub const _POSIX2_RE_DUP_MAX: c_int = 255; + +// sys/proc.h +pub const TDF_BORROWING: c_int = 0x00000001; +pub const TDF_INPANIC: c_int = 0x00000002; +pub const TDF_INMEM: c_int = 0x00000004; +pub const TDF_SINTR: c_int = 0x00000008; +pub const TDF_TIMEOUT: c_int = 0x00000010; +pub const TDF_IDLETD: c_int = 0x00000020; +pub const TDF_CANSWAP: c_int = 0x00000040; +pub const TDF_KTH_SUSP: c_int = 0x00000100; +pub const TDF_ALLPROCSUSP: c_int = 0x00000200; +pub const TDF_BOUNDARY: c_int = 0x00000400; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const TDF_ASTPENDING: c_int = 0x00000800; +pub const TDF_SBDRY: c_int = 0x00002000; +pub const TDF_UPIBLOCKED: c_int = 0x00004000; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const TDF_NEEDSUSPCHK: c_int = 0x00008000; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const TDF_NEEDRESCHED: c_int = 0x00010000; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const TDF_NEEDSIGCHK: c_int = 0x00020000; +pub const TDF_NOLOAD: c_int = 0x00040000; +pub const TDF_SERESTART: c_int = 0x00080000; +pub const TDF_THRWAKEUP: c_int = 0x00100000; +pub const TDF_SEINTR: c_int = 0x00200000; +pub const TDF_SWAPINREQ: c_int = 0x00400000; +#[deprecated(since = "0.2.133", note = "Removed in FreeBSD 14")] +pub const TDF_UNUSED23: c_int = 0x00800000; +pub const TDF_SCHED0: c_int = 0x01000000; +pub const TDF_SCHED1: c_int = 0x02000000; +pub const TDF_SCHED2: c_int = 0x04000000; +pub const TDF_SCHED3: c_int = 0x08000000; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const TDF_ALRMPEND: c_int = 0x10000000; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const TDF_PROFPEND: c_int = 0x20000000; +#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] +pub const TDF_MACPEND: c_int = 0x40000000; + +pub const TDB_SUSPEND: c_int = 0x00000001; +pub const TDB_XSIG: c_int = 0x00000002; +pub const TDB_USERWR: c_int = 0x00000004; +pub const TDB_SCE: c_int = 0x00000008; +pub const TDB_SCX: c_int = 0x00000010; +pub const TDB_EXEC: c_int = 0x00000020; +pub const TDB_FORK: c_int = 0x00000040; +pub const TDB_STOPATFORK: c_int = 0x00000080; +pub const TDB_CHILD: c_int = 0x00000100; +pub const TDB_BORN: c_int = 0x00000200; +pub const TDB_EXIT: c_int = 0x00000400; +pub const TDB_VFORK: c_int = 0x00000800; +pub const TDB_FSTP: c_int = 0x00001000; +pub const TDB_STEP: c_int = 0x00002000; + +pub const TDP_OLDMASK: c_int = 0x00000001; +pub const TDP_INKTR: c_int = 0x00000002; +pub const TDP_INKTRACE: c_int = 0x00000004; +pub const TDP_BUFNEED: c_int = 0x00000008; +pub const TDP_COWINPROGRESS: c_int = 0x00000010; +pub const TDP_ALTSTACK: c_int = 0x00000020; +pub const TDP_DEADLKTREAT: c_int = 0x00000040; +pub const TDP_NOFAULTING: c_int = 0x00000080; +pub const TDP_OWEUPC: c_int = 0x00000200; +pub const TDP_ITHREAD: c_int = 0x00000400; +pub const TDP_SYNCIO: c_int = 0x00000800; +pub const TDP_SCHED1: c_int = 0x00001000; +pub const TDP_SCHED2: c_int = 0x00002000; +pub const TDP_SCHED3: c_int = 0x00004000; +pub const TDP_SCHED4: c_int = 0x00008000; +pub const TDP_GEOM: c_int = 0x00010000; +pub const TDP_SOFTDEP: c_int = 0x00020000; +pub const TDP_NORUNNINGBUF: c_int = 0x00040000; +pub const TDP_WAKEUP: c_int = 0x00080000; +pub const TDP_INBDFLUSH: c_int = 0x00100000; +pub const TDP_KTHREAD: c_int = 0x00200000; +pub const TDP_CALLCHAIN: c_int = 0x00400000; +pub const TDP_IGNSUSP: c_int = 0x00800000; +pub const TDP_AUDITREC: c_int = 0x01000000; +pub const TDP_RFPPWAIT: c_int = 0x02000000; +pub const TDP_RESETSPUR: c_int = 0x04000000; +pub const TDP_NERRNO: c_int = 0x08000000; +pub const TDP_EXECVMSPC: c_int = 0x40000000; + +pub const TDI_SUSPENDED: c_int = 0x0001; +pub const TDI_SLEEPING: c_int = 0x0002; +pub const TDI_SWAPPED: c_int = 0x0004; +pub const TDI_LOCK: c_int = 0x0008; +pub const TDI_IWAIT: c_int = 0x0010; + +pub const P_ADVLOCK: c_int = 0x00000001; +pub const P_CONTROLT: c_int = 0x00000002; +pub const P_KPROC: c_int = 0x00000004; +#[deprecated(since = "1.0", note = "Replaced in FreeBSD 15 by P_IDLEPROC")] +pub const P_UNUSED3: c_int = 0x00000008; +#[cfg(freebsd15)] +pub const P_IDLEPROC: c_int = 0x00000008; +pub const P_PPWAIT: c_int = 0x00000010; +pub const P_PROFIL: c_int = 0x00000020; +pub const P_STOPPROF: c_int = 0x00000040; +pub const P_HADTHREADS: c_int = 0x00000080; +pub const P_SUGID: c_int = 0x00000100; +pub const P_SYSTEM: c_int = 0x00000200; +pub const P_SINGLE_EXIT: c_int = 0x00000400; +pub const P_TRACED: c_int = 0x00000800; +pub const P_WAITED: c_int = 0x00001000; +pub const P_WEXIT: c_int = 0x00002000; +pub const P_EXEC: c_int = 0x00004000; +pub const P_WKILLED: c_int = 0x00008000; +pub const P_CONTINUED: c_int = 0x00010000; +pub const P_STOPPED_SIG: c_int = 0x00020000; +pub const P_STOPPED_TRACE: c_int = 0x00040000; +pub const P_STOPPED_SINGLE: c_int = 0x00080000; +pub const P_PROTECTED: c_int = 0x00100000; +pub const P_SIGEVENT: c_int = 0x00200000; +pub const P_SINGLE_BOUNDARY: c_int = 0x00400000; +pub const P_HWPMC: c_int = 0x00800000; +pub const P_JAILED: c_int = 0x01000000; +pub const P_TOTAL_STOP: c_int = 0x02000000; +pub const P_INEXEC: c_int = 0x04000000; +pub const P_STATCHILD: c_int = 0x08000000; +pub const P_INMEM: c_int = 0x10000000; +pub const P_SWAPPINGOUT: c_int = 0x20000000; +pub const P_SWAPPINGIN: c_int = 0x40000000; +pub const P_PPTRACE: c_int = 0x80000000; +pub const P_STOPPED: c_int = P_STOPPED_SIG | P_STOPPED_SINGLE | P_STOPPED_TRACE; + +pub const P2_INHERIT_PROTECTED: c_int = 0x00000001; +pub const P2_NOTRACE: c_int = 0x00000002; +pub const P2_NOTRACE_EXEC: c_int = 0x00000004; +pub const P2_AST_SU: c_int = 0x00000008; +pub const P2_PTRACE_FSTP: c_int = 0x00000010; +pub const P2_TRAPCAP: c_int = 0x00000020; +pub const P2_STKGAP_DISABLE: c_int = 0x00000800; +pub const P2_STKGAP_DISABLE_EXEC: c_int = 0x00001000; + +pub const P_TREE_ORPHANED: c_int = 0x00000001; +pub const P_TREE_FIRST_ORPHAN: c_int = 0x00000002; +pub const P_TREE_REAPER: c_int = 0x00000004; + +pub const SIDL: c_char = 1; +pub const SRUN: c_char = 2; +pub const SSLEEP: c_char = 3; +pub const SSTOP: c_char = 4; +pub const SZOMB: c_char = 5; +pub const SWAIT: c_char = 6; +pub const SLOCK: c_char = 7; + +pub const P_MAGIC: c_int = 0xbeefface; + +pub const TDP_SIGFASTBLOCK: c_int = 0x00000100; +pub const TDP_UIOHELD: c_int = 0x10000000; +pub const TDP_SIGFASTPENDING: c_int = 0x80000000; +pub const TDP2_COMPAT32RB: c_int = 0x00000002; +pub const P2_PROTMAX_ENABLE: c_int = 0x00000200; +pub const P2_PROTMAX_DISABLE: c_int = 0x00000400; +pub const TDP2_SBPAGES: c_int = 0x00000001; +pub const P2_ASLR_ENABLE: c_int = 0x00000040; +pub const P2_ASLR_DISABLE: c_int = 0x00000080; +pub const P2_ASLR_IGNSTART: c_int = 0x00000100; +pub const P_TREE_GRPEXITED: c_int = 0x00000008; + +// libprocstat.h +pub const PS_FST_VTYPE_VNON: c_int = 1; +pub const PS_FST_VTYPE_VREG: c_int = 2; +pub const PS_FST_VTYPE_VDIR: c_int = 3; +pub const PS_FST_VTYPE_VBLK: c_int = 4; +pub const PS_FST_VTYPE_VCHR: c_int = 5; +pub const PS_FST_VTYPE_VLNK: c_int = 6; +pub const PS_FST_VTYPE_VSOCK: c_int = 7; +pub const PS_FST_VTYPE_VFIFO: c_int = 8; +pub const PS_FST_VTYPE_VBAD: c_int = 9; +pub const PS_FST_VTYPE_UNKNOWN: c_int = 255; + +pub const PS_FST_TYPE_VNODE: c_int = 1; +pub const PS_FST_TYPE_FIFO: c_int = 2; +pub const PS_FST_TYPE_SOCKET: c_int = 3; +pub const PS_FST_TYPE_PIPE: c_int = 4; +pub const PS_FST_TYPE_PTS: c_int = 5; +pub const PS_FST_TYPE_KQUEUE: c_int = 6; +pub const PS_FST_TYPE_MQUEUE: c_int = 8; +pub const PS_FST_TYPE_SHM: c_int = 9; +pub const PS_FST_TYPE_SEM: c_int = 10; +pub const PS_FST_TYPE_UNKNOWN: c_int = 11; +pub const PS_FST_TYPE_NONE: c_int = 12; +pub const PS_FST_TYPE_PROCDESC: c_int = 13; +pub const PS_FST_TYPE_DEV: c_int = 14; +pub const PS_FST_TYPE_EVENTFD: c_int = 15; + +pub const PS_FST_UFLAG_RDIR: c_int = 0x0001; +pub const PS_FST_UFLAG_CDIR: c_int = 0x0002; +pub const PS_FST_UFLAG_JAIL: c_int = 0x0004; +pub const PS_FST_UFLAG_TRACE: c_int = 0x0008; +pub const PS_FST_UFLAG_TEXT: c_int = 0x0010; +pub const PS_FST_UFLAG_MMAP: c_int = 0x0020; +pub const PS_FST_UFLAG_CTTY: c_int = 0x0040; + +pub const PS_FST_FFLAG_READ: c_int = 0x0001; +pub const PS_FST_FFLAG_WRITE: c_int = 0x0002; +pub const PS_FST_FFLAG_NONBLOCK: c_int = 0x0004; +pub const PS_FST_FFLAG_APPEND: c_int = 0x0008; +pub const PS_FST_FFLAG_SHLOCK: c_int = 0x0010; +pub const PS_FST_FFLAG_EXLOCK: c_int = 0x0020; +pub const PS_FST_FFLAG_ASYNC: c_int = 0x0040; +pub const PS_FST_FFLAG_SYNC: c_int = 0x0080; +pub const PS_FST_FFLAG_NOFOLLOW: c_int = 0x0100; +pub const PS_FST_FFLAG_CREAT: c_int = 0x0200; +pub const PS_FST_FFLAG_TRUNC: c_int = 0x0400; +pub const PS_FST_FFLAG_EXCL: c_int = 0x0800; +pub const PS_FST_FFLAG_DIRECT: c_int = 0x1000; +pub const PS_FST_FFLAG_EXEC: c_int = 0x2000; +pub const PS_FST_FFLAG_HASLOCK: c_int = 0x4000; + +// sys/mount.h + +/// File identifier. +/// These are unique per filesystem on a single machine. +/// +/// Note that the offset of fid_data is 4 bytes, so care must be taken to avoid +/// undefined behavior accessing unaligned fields within an embedded struct. +pub const MAXFIDSZ: c_int = 16; +/// Length of type name including null. +pub const MFSNAMELEN: c_int = 16; +cfg_if! { + if #[cfg(any(freebsd10, freebsd11))] { + /// Size of on/from name bufs. + pub const MNAMELEN: c_int = 88; + } else { + /// Size of on/from name bufs. + pub const MNAMELEN: c_int = 1024; + } +} + +/// Using journaled soft updates. +pub const MNT_SUJ: u64 = 0x100000000; +/// Mounted by automountd(8). +pub const MNT_AUTOMOUNTED: u64 = 0x200000000; +/// Filesys metadata untrusted. +pub const MNT_UNTRUSTED: u64 = 0x800000000; + +/// Require TLS. +pub const MNT_EXTLS: u64 = 0x4000000000; +/// Require TLS with client cert. +pub const MNT_EXTLSCERT: u64 = 0x8000000000; +/// Require TLS with user cert. +pub const MNT_EXTLSCERTUSER: u64 = 0x10000000000; + +/// Filesystem is stored locally. +pub const MNT_LOCAL: u64 = 0x000001000; +/// Quotas are enabled on fs. +pub const MNT_QUOTA: u64 = 0x000002000; +/// Identifies the root fs. +pub const MNT_ROOTFS: u64 = 0x000004000; +/// Mounted by a user. +pub const MNT_USER: u64 = 0x000008000; +/// Do not show entry in df. +pub const MNT_IGNORE: u64 = 0x000800000; +/// Filesystem is verified. +pub const MNT_VERIFIED: u64 = 0x400000000; + +/// Do not cover a mount point. +pub const MNT_NOCOVER: u64 = 0x001000000000; +/// Only mount on empty dir. +pub const MNT_EMPTYDIR: u64 = 0x002000000000; +/// Recursively unmount uppers. +pub const MNT_RECURSE: u64 = 0x100000000000; +/// Unmount in async context. +pub const MNT_DEFERRED: u64 = 0x200000000000; + +/// Get configured filesystems. +pub const VFS_VFSCONF: c_int = 0; +/// Generic filesystem information. +pub const VFS_GENERIC: c_int = 0; + +/// int: highest defined filesystem type. +pub const VFS_MAXTYPENUM: c_int = 1; +/// struct: vfsconf for filesystem given as next argument. +pub const VFS_CONF: c_int = 2; + +/// Synchronously wait for I/O to complete. +pub const MNT_WAIT: c_int = 1; +/// Start all I/O, but do not wait for it. +pub const MNT_NOWAIT: c_int = 2; +/// Push data not written by filesystem syncer. +pub const MNT_LAZY: c_int = 3; +/// Suspend file system after sync. +pub const MNT_SUSPEND: c_int = 4; + +pub const MAXSECFLAVORS: c_int = 5; + +/// Statically compiled into kernel. +pub const VFCF_STATIC: c_int = 0x00010000; +/// May get data over the network. +pub const VFCF_NETWORK: c_int = 0x00020000; +/// Writes are not implemented. +pub const VFCF_READONLY: c_int = 0x00040000; +/// Data does not represent real files. +pub const VFCF_SYNTHETIC: c_int = 0x00080000; +/// Aliases some other mounted FS. +pub const VFCF_LOOPBACK: c_int = 0x00100000; +/// Stores file names as Unicode. +pub const VFCF_UNICODE: c_int = 0x00200000; +/// Can be mounted from within a jail. +pub const VFCF_JAIL: c_int = 0x00400000; +/// Supports delegated administration. +pub const VFCF_DELEGADMIN: c_int = 0x00800000; +/// Stop at Boundary: defer stop requests to kernel->user (AST) transition. +pub const VFCF_SBDRY: c_int = 0x01000000; + +// time.h + +/// not on dst +pub const DST_NONE: c_int = 0; +/// USA style dst +pub const DST_USA: c_int = 1; +/// Australian style dst +pub const DST_AUST: c_int = 2; +/// Western European dst +pub const DST_WET: c_int = 3; +/// Middle European dst +pub const DST_MET: c_int = 4; +/// Eastern European dst +pub const DST_EET: c_int = 5; +/// Canada +pub const DST_CAN: c_int = 6; + +pub const CPUCLOCK_WHICH_PID: c_int = 0; +pub const CPUCLOCK_WHICH_TID: c_int = 1; + +pub const MFD_CLOEXEC: c_uint = 0x00000001; +pub const MFD_ALLOW_SEALING: c_uint = 0x00000002; +pub const MFD_HUGETLB: c_uint = 0x00000004; +pub const MFD_HUGE_MASK: c_uint = 0xFC000000; +pub const MFD_HUGE_64KB: c_uint = 16 << 26; +pub const MFD_HUGE_512KB: c_uint = 19 << 26; +pub const MFD_HUGE_1MB: c_uint = 20 << 26; +pub const MFD_HUGE_2MB: c_uint = 21 << 26; +pub const MFD_HUGE_8MB: c_uint = 23 << 26; +pub const MFD_HUGE_16MB: c_uint = 24 << 26; +pub const MFD_HUGE_32MB: c_uint = 25 << 26; +pub const MFD_HUGE_256MB: c_uint = 28 << 26; +pub const MFD_HUGE_512MB: c_uint = 29 << 26; +pub const MFD_HUGE_1GB: c_uint = 30 << 26; +pub const MFD_HUGE_2GB: c_uint = 31 << 26; +pub const MFD_HUGE_16GB: c_uint = 34 << 26; + +pub const SHM_LARGEPAGE_ALLOC_DEFAULT: c_int = 0; +pub const SHM_LARGEPAGE_ALLOC_NOWAIT: c_int = 1; +pub const SHM_LARGEPAGE_ALLOC_HARD: c_int = 2; +pub const SHM_RENAME_NOREPLACE: c_int = 1 << 0; +pub const SHM_RENAME_EXCHANGE: c_int = 1 << 1; + +// sys/umtx.h + +pub const UMTX_OP_WAIT: c_int = 2; +pub const UMTX_OP_WAKE: c_int = 3; +pub const UMTX_OP_MUTEX_TRYLOCK: c_int = 4; +pub const UMTX_OP_MUTEX_LOCK: c_int = 5; +pub const UMTX_OP_MUTEX_UNLOCK: c_int = 6; +pub const UMTX_OP_SET_CEILING: c_int = 7; +pub const UMTX_OP_CV_WAIT: c_int = 8; +pub const UMTX_OP_CV_SIGNAL: c_int = 9; +pub const UMTX_OP_CV_BROADCAST: c_int = 10; +pub const UMTX_OP_WAIT_UINT: c_int = 11; +pub const UMTX_OP_RW_RDLOCK: c_int = 12; +pub const UMTX_OP_RW_WRLOCK: c_int = 13; +pub const UMTX_OP_RW_UNLOCK: c_int = 14; +pub const UMTX_OP_WAIT_UINT_PRIVATE: c_int = 15; +pub const UMTX_OP_WAKE_PRIVATE: c_int = 16; +pub const UMTX_OP_MUTEX_WAIT: c_int = 17; +pub const UMTX_OP_NWAKE_PRIVATE: c_int = 21; +pub const UMTX_OP_MUTEX_WAKE2: c_int = 22; +pub const UMTX_OP_SEM2_WAIT: c_int = 23; +pub const UMTX_OP_SEM2_WAKE: c_int = 24; +pub const UMTX_OP_SHM: c_int = 25; +pub const UMTX_OP_ROBUST_LISTS: c_int = 26; + +pub const UMTX_ABSTIME: u32 = 1; + +pub const CPU_LEVEL_ROOT: c_int = 1; +pub const CPU_LEVEL_CPUSET: c_int = 2; +pub const CPU_LEVEL_WHICH: c_int = 3; + +pub const CPU_WHICH_TID: c_int = 1; +pub const CPU_WHICH_PID: c_int = 2; +pub const CPU_WHICH_CPUSET: c_int = 3; +pub const CPU_WHICH_IRQ: c_int = 4; +pub const CPU_WHICH_JAIL: c_int = 5; + +// net/route.h +pub const RTF_LLDATA: c_int = 0x400; +pub const RTF_FIXEDMTU: c_int = 0x80000; + +pub const RTM_VERSION: c_int = 5; + +pub const RTAX_MAX: c_int = 8; + +// sys/signal.h +pub const SIGTHR: c_int = 32; +pub const SIGLWP: c_int = SIGTHR; +pub const SIGLIBRT: c_int = 33; + +// netinet/sctp.h +pub const SCTP_FUTURE_ASSOC: c_int = 0; +pub const SCTP_CURRENT_ASSOC: c_int = 1; +pub const SCTP_ALL_ASSOC: c_int = 2; + +pub const SCTP_NO_NEXT_MSG: c_int = 0x0000; +pub const SCTP_NEXT_MSG_AVAIL: c_int = 0x0001; +pub const SCTP_NEXT_MSG_ISCOMPLETE: c_int = 0x0002; +pub const SCTP_NEXT_MSG_IS_UNORDERED: c_int = 0x0004; +pub const SCTP_NEXT_MSG_IS_NOTIFICATION: c_int = 0x0008; + +pub const SCTP_RECVV_NOINFO: c_int = 0; +pub const SCTP_RECVV_RCVINFO: c_int = 1; +pub const SCTP_RECVV_NXTINFO: c_int = 2; +pub const SCTP_RECVV_RN: c_int = 3; + +pub const SCTP_SENDV_NOINFO: c_int = 0; +pub const SCTP_SENDV_SNDINFO: c_int = 1; +pub const SCTP_SENDV_PRINFO: c_int = 2; +pub const SCTP_SENDV_AUTHINFO: c_int = 3; +pub const SCTP_SENDV_SPA: c_int = 4; + +pub const SCTP_SEND_SNDINFO_VALID: c_int = 0x00000001; +pub const SCTP_SEND_PRINFO_VALID: c_int = 0x00000002; +pub const SCTP_SEND_AUTHINFO_VALID: c_int = 0x00000004; + +pub const SCTP_NOTIFICATION: c_int = 0x0010; +pub const SCTP_COMPLETE: c_int = 0x0020; +pub const SCTP_EOF: c_int = 0x0100; +pub const SCTP_ABORT: c_int = 0x0200; +pub const SCTP_UNORDERED: c_int = 0x0400; +pub const SCTP_ADDR_OVER: c_int = 0x0800; +pub const SCTP_SENDALL: c_int = 0x1000; +pub const SCTP_EOR: c_int = 0x2000; +pub const SCTP_SACK_IMMEDIATELY: c_int = 0x4000; +pub const SCTP_PR_SCTP_NONE: c_int = 0x0000; +pub const SCTP_PR_SCTP_TTL: c_int = 0x0001; +pub const SCTP_PR_SCTP_PRIO: c_int = 0x0002; +pub const SCTP_PR_SCTP_BUF: c_int = SCTP_PR_SCTP_PRIO; +pub const SCTP_PR_SCTP_RTX: c_int = 0x0003; +pub const SCTP_PR_SCTP_MAX: c_int = SCTP_PR_SCTP_RTX; +pub const SCTP_PR_SCTP_ALL: c_int = 0x000f; + +pub const SCTP_INIT: c_int = 0x0001; +pub const SCTP_SNDRCV: c_int = 0x0002; +pub const SCTP_EXTRCV: c_int = 0x0003; +pub const SCTP_SNDINFO: c_int = 0x0004; +pub const SCTP_RCVINFO: c_int = 0x0005; +pub const SCTP_NXTINFO: c_int = 0x0006; +pub const SCTP_PRINFO: c_int = 0x0007; +pub const SCTP_AUTHINFO: c_int = 0x0008; +pub const SCTP_DSTADDRV4: c_int = 0x0009; +pub const SCTP_DSTADDRV6: c_int = 0x000a; + +pub const SCTP_RTOINFO: c_int = 0x00000001; +pub const SCTP_ASSOCINFO: c_int = 0x00000002; +pub const SCTP_INITMSG: c_int = 0x00000003; +pub const SCTP_NODELAY: c_int = 0x00000004; +pub const SCTP_AUTOCLOSE: c_int = 0x00000005; +pub const SCTP_SET_PEER_PRIMARY_ADDR: c_int = 0x00000006; +pub const SCTP_PRIMARY_ADDR: c_int = 0x00000007; +pub const SCTP_ADAPTATION_LAYER: c_int = 0x00000008; +pub const SCTP_ADAPTION_LAYER: c_int = 0x00000008; +pub const SCTP_DISABLE_FRAGMENTS: c_int = 0x00000009; +pub const SCTP_PEER_ADDR_PARAMS: c_int = 0x0000000a; +pub const SCTP_DEFAULT_SEND_PARAM: c_int = 0x0000000b; +pub const SCTP_EVENTS: c_int = 0x0000000c; +pub const SCTP_I_WANT_MAPPED_V4_ADDR: c_int = 0x0000000d; +pub const SCTP_MAXSEG: c_int = 0x0000000e; +pub const SCTP_DELAYED_SACK: c_int = 0x0000000f; +pub const SCTP_FRAGMENT_INTERLEAVE: c_int = 0x00000010; +pub const SCTP_PARTIAL_DELIVERY_POINT: c_int = 0x00000011; +pub const SCTP_AUTH_CHUNK: c_int = 0x00000012; +pub const SCTP_AUTH_KEY: c_int = 0x00000013; +pub const SCTP_HMAC_IDENT: c_int = 0x00000014; +pub const SCTP_AUTH_ACTIVE_KEY: c_int = 0x00000015; +pub const SCTP_AUTH_DELETE_KEY: c_int = 0x00000016; +pub const SCTP_USE_EXT_RCVINFO: c_int = 0x00000017; +pub const SCTP_AUTO_ASCONF: c_int = 0x00000018; +pub const SCTP_MAXBURST: c_int = 0x00000019; +pub const SCTP_MAX_BURST: c_int = 0x00000019; +pub const SCTP_CONTEXT: c_int = 0x0000001a; +pub const SCTP_EXPLICIT_EOR: c_int = 0x00000001b; +pub const SCTP_REUSE_PORT: c_int = 0x00000001c; +pub const SCTP_AUTH_DEACTIVATE_KEY: c_int = 0x00000001d; +pub const SCTP_EVENT: c_int = 0x0000001e; +pub const SCTP_RECVRCVINFO: c_int = 0x0000001f; +pub const SCTP_RECVNXTINFO: c_int = 0x00000020; +pub const SCTP_DEFAULT_SNDINFO: c_int = 0x00000021; +pub const SCTP_DEFAULT_PRINFO: c_int = 0x00000022; +pub const SCTP_PEER_ADDR_THLDS: c_int = 0x00000023; +pub const SCTP_REMOTE_UDP_ENCAPS_PORT: c_int = 0x00000024; +pub const SCTP_ECN_SUPPORTED: c_int = 0x00000025; +pub const SCTP_AUTH_SUPPORTED: c_int = 0x00000027; +pub const SCTP_ASCONF_SUPPORTED: c_int = 0x00000028; +pub const SCTP_RECONFIG_SUPPORTED: c_int = 0x00000029; +pub const SCTP_NRSACK_SUPPORTED: c_int = 0x00000030; +pub const SCTP_PKTDROP_SUPPORTED: c_int = 0x00000031; +pub const SCTP_MAX_CWND: c_int = 0x00000032; + +pub const SCTP_STATUS: c_int = 0x00000100; +pub const SCTP_GET_PEER_ADDR_INFO: c_int = 0x00000101; +pub const SCTP_PEER_AUTH_CHUNKS: c_int = 0x00000102; +pub const SCTP_LOCAL_AUTH_CHUNKS: c_int = 0x00000103; +pub const SCTP_GET_ASSOC_NUMBER: c_int = 0x00000104; +pub const SCTP_GET_ASSOC_ID_LIST: c_int = 0x00000105; +pub const SCTP_TIMEOUTS: c_int = 0x00000106; +pub const SCTP_PR_STREAM_STATUS: c_int = 0x00000107; +pub const SCTP_PR_ASSOC_STATUS: c_int = 0x00000108; + +pub const SCTP_COMM_UP: c_int = 0x0001; +pub const SCTP_COMM_LOST: c_int = 0x0002; +pub const SCTP_RESTART: c_int = 0x0003; +pub const SCTP_SHUTDOWN_COMP: c_int = 0x0004; +pub const SCTP_CANT_STR_ASSOC: c_int = 0x0005; + +pub const SCTP_ASSOC_SUPPORTS_PR: c_int = 0x01; +pub const SCTP_ASSOC_SUPPORTS_AUTH: c_int = 0x02; +pub const SCTP_ASSOC_SUPPORTS_ASCONF: c_int = 0x03; +pub const SCTP_ASSOC_SUPPORTS_MULTIBUF: c_int = 0x04; +pub const SCTP_ASSOC_SUPPORTS_RE_CONFIG: c_int = 0x05; +pub const SCTP_ASSOC_SUPPORTS_INTERLEAVING: c_int = 0x06; +pub const SCTP_ASSOC_SUPPORTS_MAX: c_int = 0x06; + +pub const SCTP_ADDR_AVAILABLE: c_int = 0x0001; +pub const SCTP_ADDR_UNREACHABLE: c_int = 0x0002; +pub const SCTP_ADDR_REMOVED: c_int = 0x0003; +pub const SCTP_ADDR_ADDED: c_int = 0x0004; +pub const SCTP_ADDR_MADE_PRIM: c_int = 0x0005; +pub const SCTP_ADDR_CONFIRMED: c_int = 0x0006; + +pub const SCTP_ACTIVE: c_int = 0x0001; +pub const SCTP_INACTIVE: c_int = 0x0002; +pub const SCTP_UNCONFIRMED: c_int = 0x0200; + +pub const SCTP_DATA_UNSENT: c_int = 0x0001; +pub const SCTP_DATA_SENT: c_int = 0x0002; + +pub const SCTP_PARTIAL_DELIVERY_ABORTED: c_int = 0x0001; + +pub const SCTP_AUTH_NEW_KEY: c_int = 0x0001; +pub const SCTP_AUTH_NEWKEY: c_int = SCTP_AUTH_NEW_KEY; +pub const SCTP_AUTH_NO_AUTH: c_int = 0x0002; +pub const SCTP_AUTH_FREE_KEY: c_int = 0x0003; + +pub const SCTP_STREAM_RESET_INCOMING_SSN: c_int = 0x0001; +pub const SCTP_STREAM_RESET_OUTGOING_SSN: c_int = 0x0002; +pub const SCTP_STREAM_RESET_DENIED: c_int = 0x0004; +pub const SCTP_STREAM_RESET_FAILED: c_int = 0x0008; + +pub const SCTP_ASSOC_RESET_DENIED: c_int = 0x0004; +pub const SCTP_ASSOC_RESET_FAILED: c_int = 0x0008; + +pub const SCTP_STREAM_CHANGE_DENIED: c_int = 0x0004; +pub const SCTP_STREAM_CHANGE_FAILED: c_int = 0x0008; + +pub const KENV_DUMP_LOADER: c_int = 4; +pub const KENV_DUMP_STATIC: c_int = 5; + +pub const RB_PAUSE: c_int = 0x100000; +pub const RB_REROOT: c_int = 0x200000; +pub const RB_POWERCYCLE: c_int = 0x400000; +pub const RB_PROBE: c_int = 0x10000000; +pub const RB_MULTIPLE: c_int = 0x20000000; + +// netinet/in_pcb.h +pub const INC_ISIPV6: c_uchar = 0x01; +pub const INC_IPV6MINMTU: c_uchar = 0x02; + +// sys/time.h +pub const CLOCK_BOOTTIME: crate::clockid_t = crate::CLOCK_UPTIME; +pub const CLOCK_REALTIME_COARSE: crate::clockid_t = crate::CLOCK_REALTIME_FAST; +pub const CLOCK_MONOTONIC_COARSE: crate::clockid_t = crate::CLOCK_MONOTONIC_FAST; + +// sys/timerfd.h + +pub const TFD_NONBLOCK: c_int = crate::O_NONBLOCK; +pub const TFD_CLOEXEC: c_int = O_CLOEXEC; +pub const TFD_TIMER_ABSTIME: c_int = 0x01; +pub const TFD_TIMER_CANCEL_ON_SET: c_int = 0x02; + +// sys/unistd.h + +pub const CLOSE_RANGE_CLOEXEC: c_uint = 1 << 2; + +pub const KCMP_FILE: c_int = 100; +pub const KCMP_FILEOBJ: c_int = 101; +pub const KCMP_FILES: c_int = 102; +pub const KCMP_SIGHAND: c_int = 103; +pub const KCMP_VM: c_int = 104; + +pub const fn MAP_ALIGNED(a: c_int) -> c_int { + a << 24 +} + +const fn _ALIGN(p: usize) -> usize { + (p + _ALIGNBYTES) & !_ALIGNBYTES +} + +f! { + pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { + (cmsg as *mut c_uchar).add(_ALIGN(size_of::())) + } + + pub const fn CMSG_LEN(length: c_uint) -> c_uint { + _ALIGN(size_of::()) as c_uint + length + } + + pub fn CMSG_NXTHDR(mhdr: *const crate::msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + if cmsg.is_null() { + return crate::CMSG_FIRSTHDR(mhdr); + } + let next = cmsg as usize + _ALIGN((*cmsg).cmsg_len as usize) + _ALIGN(size_of::()); + let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; + if next > max { + core::ptr::null_mut::() + } else { + (cmsg as usize + _ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr + } + } + + pub const fn CMSG_SPACE(length: c_uint) -> c_uint { + (_ALIGN(size_of::()) + _ALIGN(length as usize)) as c_uint + } + + pub fn MALLOCX_ALIGN(lg: c_uint) -> c_int { + ffsl(lg as c_long - 1) + } + + pub const fn MALLOCX_TCACHE(tc: c_int) -> c_int { + (tc + 2) << 8 as c_int + } + + pub const fn MALLOCX_ARENA(a: c_int) -> c_int { + (a + 1) << 20 as c_int + } + + pub fn SOCKCREDSIZE(ngrps: usize) -> usize { + let ngrps = if ngrps > 0 { ngrps - 1 } else { 0 }; + size_of::() + size_of::() * ngrps + } + + pub fn uname(buf: *mut crate::utsname) -> c_int { + __xuname(256, buf as *mut c_void) + } + + pub fn CPU_ZERO(cpuset: &mut cpuset_t) -> () { + for slot in cpuset.__bits.iter_mut() { + *slot = 0; + } + } + + pub fn CPU_FILL(cpuset: &mut cpuset_t) -> () { + for slot in cpuset.__bits.iter_mut() { + *slot = !0; + } + } + + pub fn CPU_SET(cpu: usize, cpuset: &mut cpuset_t) -> () { + let bitset_bits = 8 * size_of::(); + let (idx, offset) = (cpu / bitset_bits, cpu % bitset_bits); + cpuset.__bits[idx] |= 1 << offset; + } + + pub fn CPU_CLR(cpu: usize, cpuset: &mut cpuset_t) -> () { + let bitset_bits = 8 * size_of::(); + let (idx, offset) = (cpu / bitset_bits, cpu % bitset_bits); + cpuset.__bits[idx] &= !(1 << offset); + } + + pub fn CPU_ISSET(cpu: usize, cpuset: &cpuset_t) -> bool { + let bitset_bits = 8 * size_of::(); + let (idx, offset) = (cpu / bitset_bits, cpu % bitset_bits); + 0 != cpuset.__bits[idx] & (1 << offset) + } + + pub fn CPU_COUNT(cpuset: &cpuset_t) -> c_int { + let mut s: u32 = 0; + let cpuset_size = size_of::(); + let bitset_size = size_of::(); + + for i in cpuset.__bits[..(cpuset_size / bitset_size)].iter() { + s += i.count_ones(); + } + s as c_int + } + + pub fn SOCKCRED2SIZE(ngrps: usize) -> usize { + let ngrps = if ngrps > 0 { ngrps - 1 } else { 0 }; + size_of::() + size_of::() * ngrps + } + + pub fn PROT_MAX(x: c_int) -> c_int { + x << 16 + } + + pub fn PROT_MAX_EXTRACT(x: c_int) -> c_int { + (x >> 16) & (crate::PROT_READ | crate::PROT_WRITE | crate::PROT_EXEC) + } +} + +safe_f! { + pub const fn WIFSIGNALED(status: c_int) -> bool { + (status & 0o177) != 0o177 && (status & 0o177) != 0 && status != 0x13 + } + + pub const fn INVALID_SINFO_FLAG(x: c_int) -> bool { + (x) & 0xfffffff0 + & !(SCTP_EOF + | SCTP_ABORT + | SCTP_UNORDERED + | SCTP_ADDR_OVER + | SCTP_SENDALL + | SCTP_EOR + | SCTP_SACK_IMMEDIATELY) + != 0 + } + + pub const fn PR_SCTP_POLICY(x: c_int) -> c_int { + x & 0x0f + } + + pub const fn PR_SCTP_ENABLED(x: c_int) -> bool { + PR_SCTP_POLICY(x) != SCTP_PR_SCTP_NONE && PR_SCTP_POLICY(x) != SCTP_PR_SCTP_ALL + } + + pub const fn PR_SCTP_TTL_ENABLED(x: c_int) -> bool { + PR_SCTP_POLICY(x) == SCTP_PR_SCTP_TTL + } + + pub const fn PR_SCTP_BUF_ENABLED(x: c_int) -> bool { + PR_SCTP_POLICY(x) == SCTP_PR_SCTP_BUF + } + + pub const fn PR_SCTP_RTX_ENABLED(x: c_int) -> bool { + PR_SCTP_POLICY(x) == SCTP_PR_SCTP_RTX + } + + pub const fn PR_SCTP_INVALID_POLICY(x: c_int) -> bool { + PR_SCTP_POLICY(x) > SCTP_PR_SCTP_MAX + } + + pub const fn PR_SCTP_VALID_POLICY(x: c_int) -> bool { + PR_SCTP_POLICY(x) <= SCTP_PR_SCTP_MAX + } +} + +cfg_if! { + if #[cfg(not(any(freebsd10, freebsd11)))] { + extern "C" { + pub fn fhlink(fhp: *mut fhandle_t, to: *const c_char) -> c_int; + pub fn fhlinkat(fhp: *mut fhandle_t, tofd: c_int, to: *const c_char) -> c_int; + pub fn fhreadlink(fhp: *mut fhandle_t, buf: *mut c_char, bufsize: size_t) -> c_int; + pub fn getfhat(fd: c_int, path: *mut c_char, fhp: *mut fhandle, flag: c_int) -> c_int; + } + } +} + +extern "C" { + #[cfg_attr(doc, doc(alias = "__errno_location"))] + #[cfg_attr(doc, doc(alias = "errno"))] + pub fn __error() -> *mut c_int; + + pub fn aio_cancel(fd: c_int, aiocbp: *mut aiocb) -> c_int; + pub fn aio_error(aiocbp: *const aiocb) -> c_int; + pub fn aio_fsync(op: c_int, aiocbp: *mut aiocb) -> c_int; + pub fn aio_read(aiocbp: *mut aiocb) -> c_int; + pub fn aio_readv(aiocbp: *mut crate::aiocb) -> c_int; + pub fn aio_return(aiocbp: *mut aiocb) -> ssize_t; + pub fn aio_suspend( + aiocb_list: *const *const aiocb, + nitems: c_int, + timeout: *const crate::timespec, + ) -> c_int; + pub fn aio_write(aiocbp: *mut aiocb) -> c_int; + pub fn aio_writev(aiocbp: *mut crate::aiocb) -> c_int; + + pub fn copy_file_range( + infd: c_int, + inoffp: *mut off_t, + outfd: c_int, + outoffp: *mut off_t, + len: size_t, + flags: c_uint, + ) -> ssize_t; + + pub fn devname_r( + dev: crate::dev_t, + mode: crate::mode_t, + buf: *mut c_char, + len: c_int, + ) -> *mut c_char; + + pub fn extattr_delete_fd(fd: c_int, attrnamespace: c_int, attrname: *const c_char) -> c_int; + pub fn extattr_delete_file( + path: *const c_char, + attrnamespace: c_int, + attrname: *const c_char, + ) -> c_int; + pub fn extattr_delete_link( + path: *const c_char, + attrnamespace: c_int, + attrname: *const c_char, + ) -> c_int; + pub fn extattr_get_fd( + fd: c_int, + attrnamespace: c_int, + attrname: *const c_char, + data: *mut c_void, + nbytes: size_t, + ) -> ssize_t; + pub fn extattr_get_file( + path: *const c_char, + attrnamespace: c_int, + attrname: *const c_char, + data: *mut c_void, + nbytes: size_t, + ) -> ssize_t; + pub fn extattr_get_link( + path: *const c_char, + attrnamespace: c_int, + attrname: *const c_char, + data: *mut c_void, + nbytes: size_t, + ) -> ssize_t; + pub fn extattr_list_fd( + fd: c_int, + attrnamespace: c_int, + data: *mut c_void, + nbytes: size_t, + ) -> ssize_t; + pub fn extattr_list_file( + path: *const c_char, + attrnamespace: c_int, + data: *mut c_void, + nbytes: size_t, + ) -> ssize_t; + pub fn extattr_list_link( + path: *const c_char, + attrnamespace: c_int, + data: *mut c_void, + nbytes: size_t, + ) -> ssize_t; + pub fn extattr_set_fd( + fd: c_int, + attrnamespace: c_int, + attrname: *const c_char, + data: *const c_void, + nbytes: size_t, + ) -> ssize_t; + pub fn extattr_set_file( + path: *const c_char, + attrnamespace: c_int, + attrname: *const c_char, + data: *const c_void, + nbytes: size_t, + ) -> ssize_t; + pub fn extattr_set_link( + path: *const c_char, + attrnamespace: c_int, + attrname: *const c_char, + data: *const c_void, + nbytes: size_t, + ) -> ssize_t; + + pub fn fspacectl( + fd: c_int, + cmd: c_int, + rqsr: *const spacectl_range, + flags: c_int, + rmsr: *mut spacectl_range, + ) -> c_int; + + pub fn jail(jail: *mut crate::jail) -> c_int; + pub fn jail_attach(jid: c_int) -> c_int; + pub fn jail_remove(jid: c_int) -> c_int; + pub fn jail_get(iov: *mut crate::iovec, niov: c_uint, flags: c_int) -> c_int; + pub fn jail_set(iov: *mut crate::iovec, niov: c_uint, flags: c_int) -> c_int; + + pub fn lio_listio( + mode: c_int, + aiocb_list: *const *mut aiocb, + nitems: c_int, + sevp: *mut sigevent, + ) -> c_int; + + pub fn getutxuser(user: *const c_char) -> *mut utmpx; + pub fn setutxdb(_type: c_int, file: *const c_char) -> c_int; + + pub fn aio_waitcomplete(iocbp: *mut *mut aiocb, timeout: *mut crate::timespec) -> ssize_t; + pub fn mq_getfd_np(mqd: crate::mqd_t) -> c_int; + + pub fn waitid( + idtype: idtype_t, + id: crate::id_t, + infop: *mut crate::siginfo_t, + options: c_int, + ) -> c_int; + pub fn ptsname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + + pub fn ftok(pathname: *const c_char, proj_id: c_int) -> crate::key_t; + pub fn shmget(key: crate::key_t, size: size_t, shmflg: c_int) -> c_int; + pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; + pub fn shmdt(shmaddr: *const c_void) -> c_int; + pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; + pub fn semget(key: crate::key_t, nsems: c_int, semflg: c_int) -> c_int; + pub fn semctl(semid: c_int, semnum: c_int, cmd: c_int, ...) -> c_int; + pub fn semop(semid: c_int, sops: *mut sembuf, nsops: size_t) -> c_int; + pub fn msgctl(msqid: c_int, cmd: c_int, buf: *mut crate::msqid_ds) -> c_int; + pub fn msgget(key: crate::key_t, msgflg: c_int) -> c_int; + pub fn msgsnd(msqid: c_int, msgp: *const c_void, msgsz: size_t, msgflg: c_int) -> c_int; + pub fn cfmakesane(termios: *mut crate::termios); + + pub fn pdfork(fdp: *mut c_int, flags: c_int) -> crate::pid_t; + pub fn pdgetpid(fd: c_int, pidp: *mut crate::pid_t) -> c_int; + pub fn pdkill(fd: c_int, signum: c_int) -> c_int; + + pub fn rtprio_thread(function: c_int, lwpid: crate::lwpid_t, rtp: *mut super::rtprio) -> c_int; + + pub fn uuidgen(store: *mut uuid, count: c_int) -> c_int; + + pub fn thr_kill(id: c_long, sig: c_int) -> c_int; + pub fn thr_kill2(pid: crate::pid_t, id: c_long, sig: c_int) -> c_int; + pub fn thr_self(tid: *mut c_long) -> c_int; + pub fn pthread_getthreadid_np() -> c_int; + pub fn pthread_getaffinity_np( + td: crate::pthread_t, + cpusetsize: size_t, + cpusetp: *mut cpuset_t, + ) -> c_int; + pub fn pthread_setaffinity_np( + td: crate::pthread_t, + cpusetsize: size_t, + cpusetp: *const cpuset_t, + ) -> c_int; + + // sched.h linux compatibility api + pub fn sched_getaffinity( + pid: crate::pid_t, + cpusetsz: size_t, + cpuset: *mut crate::cpuset_t, + ) -> c_int; + pub fn sched_setaffinity( + pid: crate::pid_t, + cpusetsz: size_t, + cpuset: *const crate::cpuset_t, + ) -> c_int; + pub fn sched_getcpu() -> c_int; + + pub fn pthread_mutex_consistent(mutex: *mut crate::pthread_mutex_t) -> c_int; + + pub fn pthread_mutexattr_getrobust( + attr: *mut crate::pthread_mutexattr_t, + robust: *mut c_int, + ) -> c_int; + pub fn pthread_mutexattr_setrobust( + attr: *mut crate::pthread_mutexattr_t, + robust: c_int, + ) -> c_int; + + pub fn pthread_spin_init(lock: *mut pthread_spinlock_t, pshared: c_int) -> c_int; + pub fn pthread_spin_destroy(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_spin_lock(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_spin_trylock(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_spin_unlock(lock: *mut pthread_spinlock_t) -> c_int; + + #[cfg_attr(all(target_os = "freebsd", freebsd11), link_name = "statfs@FBSD_1.0")] + pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; + #[cfg_attr(all(target_os = "freebsd", freebsd11), link_name = "fstatfs@FBSD_1.0")] + pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; + + pub fn dup3(src: c_int, dst: c_int, flags: c_int) -> c_int; + pub fn __xuname(nmln: c_int, buf: *mut c_void) -> c_int; + + pub fn sendmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: size_t, + flags: c_int, + ) -> ssize_t; + pub fn recvmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: size_t, + flags: c_int, + timeout: *const crate::timespec, + ) -> ssize_t; + pub fn memmem( + haystack: *const c_void, + haystacklen: size_t, + needle: *const c_void, + needlelen: size_t, + ) -> *mut c_void; + + pub fn fhopen(fhp: *const fhandle_t, flags: c_int) -> c_int; + pub fn fhstat(fhp: *const fhandle, buf: *mut crate::stat) -> c_int; + pub fn fhstatfs(fhp: *const fhandle_t, buf: *mut crate::statfs) -> c_int; + pub fn getfh(path: *const c_char, fhp: *mut fhandle_t) -> c_int; + pub fn lgetfh(path: *const c_char, fhp: *mut fhandle_t) -> c_int; + pub fn getfsstat(buf: *mut crate::statfs, bufsize: c_long, mode: c_int) -> c_int; + #[cfg_attr( + all(target_os = "freebsd", freebsd11), + link_name = "getmntinfo@FBSD_1.0" + )] + pub fn getmntinfo(mntbufp: *mut *mut crate::statfs, mode: c_int) -> c_int; + pub fn mount( + type_: *const c_char, + dir: *const c_char, + flags: c_int, + data: *mut c_void, + ) -> c_int; + pub fn nmount(iov: *mut crate::iovec, niov: c_uint, flags: c_int) -> c_int; + + pub fn setproctitle(fmt: *const c_char, ...); + pub fn rfork(flags: c_int) -> c_int; + pub fn cpuset_getaffinity( + level: cpulevel_t, + which: cpuwhich_t, + id: crate::id_t, + setsize: size_t, + mask: *mut cpuset_t, + ) -> c_int; + pub fn cpuset_setaffinity( + level: cpulevel_t, + which: cpuwhich_t, + id: crate::id_t, + setsize: size_t, + mask: *const cpuset_t, + ) -> c_int; + pub fn cpuset(setid: *mut crate::cpusetid_t) -> c_int; + pub fn cpuset_getid( + level: cpulevel_t, + which: cpuwhich_t, + id: crate::id_t, + setid: *mut crate::cpusetid_t, + ) -> c_int; + pub fn cpuset_setid(which: cpuwhich_t, id: crate::id_t, setid: crate::cpusetid_t) -> c_int; + pub fn cap_enter() -> c_int; + pub fn cap_getmode(modep: *mut c_uint) -> c_int; + pub fn cap_fcntls_get(fd: c_int, fcntlrightsp: *mut u32) -> c_int; + pub fn cap_fcntls_limit(fd: c_int, fcntlrights: u32) -> c_int; + pub fn cap_ioctls_get(fd: c_int, cmds: *mut u_long, maxcmds: usize) -> isize; + pub fn cap_ioctls_limit(fd: c_int, cmds: *const u_long, ncmds: usize) -> c_int; + pub fn __cap_rights_init(version: c_int, rights: *mut cap_rights_t, ...) -> *mut cap_rights_t; + pub fn __cap_rights_get(version: c_int, fd: c_int, rightsp: *mut cap_rights_t) -> c_int; + pub fn __cap_rights_set(rights: *mut cap_rights_t, ...) -> *mut cap_rights_t; + pub fn __cap_rights_clear(rights: *mut cap_rights_t, ...) -> *mut cap_rights_t; + pub fn __cap_rights_is_set(rights: *const cap_rights_t, ...) -> bool; + pub fn cap_rights_is_valid(rights: *const cap_rights_t) -> bool; + pub fn cap_rights_limit(fd: c_int, rights: *const cap_rights_t) -> c_int; + pub fn cap_rights_merge(dst: *mut cap_rights_t, src: *const cap_rights_t) -> *mut cap_rights_t; + pub fn cap_rights_remove(dst: *mut cap_rights_t, src: *const cap_rights_t) + -> *mut cap_rights_t; + pub fn cap_rights_contains(big: *const cap_rights_t, little: *const cap_rights_t) -> bool; + pub fn cap_sandboxed() -> bool; + + pub fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void; + + pub fn ffs(value: c_int) -> c_int; + pub fn ffsl(value: c_long) -> c_int; + pub fn ffsll(value: c_longlong) -> c_int; + pub fn fls(value: c_int) -> c_int; + pub fn flsl(value: c_long) -> c_int; + pub fn flsll(value: c_longlong) -> c_int; + pub fn malloc_stats_print( + write_cb: unsafe extern "C" fn(*mut c_void, *const c_char), + cbopaque: *mut c_void, + opt: *const c_char, + ); + pub fn mallctl( + name: *const c_char, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *mut c_void, + newlen: size_t, + ) -> c_int; + pub fn mallctlnametomib(name: *const c_char, mibp: *mut size_t, miplen: *mut size_t) -> c_int; + pub fn mallctlbymib( + mib: *const size_t, + mible: size_t, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *mut c_void, + newlen: size_t, + ) -> c_int; + pub fn mallocx(size: size_t, flags: c_int) -> *mut c_void; + pub fn rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void; + pub fn xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t; + pub fn sallocx(ptr: *const c_void, flags: c_int) -> size_t; + pub fn dallocx(ptr: *mut c_void, flags: c_int); + pub fn sdallocx(ptr: *mut c_void, size: size_t, flags: c_int); + pub fn nallocx(size: size_t, flags: c_int) -> size_t; + + pub fn procctl( + idtype: crate::idtype_t, + id: crate::id_t, + cmd: c_int, + data: *mut c_void, + ) -> c_int; + + pub fn getpagesize() -> c_int; + pub fn getpagesizes(pagesize: *mut size_t, nelem: c_int) -> c_int; + + pub fn clock_getcpuclockid2(arg1: crate::id_t, arg2: c_int, arg3: *mut clockid_t) -> c_int; + pub fn strchrnul(s: *const c_char, c: c_int) -> *mut c_char; + + pub fn shm_create_largepage( + path: *const c_char, + flags: c_int, + psind: c_int, + alloc_policy: c_int, + mode: crate::mode_t, + ) -> c_int; + pub fn shm_rename(path_from: *const c_char, path_to: *const c_char, flags: c_int) -> c_int; + pub fn memfd_create(name: *const c_char, flags: c_uint) -> c_int; + pub fn setaudit(auditinfo: *const auditinfo_t) -> c_int; + + pub fn eventfd(init: c_uint, flags: c_int) -> c_int; + pub fn eventfd_read(fd: c_int, value: *mut eventfd_t) -> c_int; + pub fn eventfd_write(fd: c_int, value: eventfd_t) -> c_int; + + pub fn fdatasync(fd: c_int) -> c_int; + + pub fn elf_aux_info(aux: c_int, buf: *mut c_void, buflen: c_int) -> c_int; + pub fn setproctitle_fast(fmt: *const c_char, ...); + pub fn timingsafe_bcmp(a: *const c_void, b: *const c_void, len: size_t) -> c_int; + pub fn timingsafe_memcmp(a: *const c_void, b: *const c_void, len: size_t) -> c_int; + + pub fn _umtx_op( + obj: *mut c_void, + op: c_int, + val: c_ulong, + uaddr: *mut c_void, + uaddr2: *mut c_void, + ) -> c_int; + + pub fn sctp_peeloff(s: c_int, id: crate::sctp_assoc_t) -> c_int; + pub fn sctp_bindx(s: c_int, addrs: *mut crate::sockaddr, num: c_int, tpe: c_int) -> c_int; + pub fn sctp_connectx( + s: c_int, + addrs: *const crate::sockaddr, + addrcnt: c_int, + id: *mut crate::sctp_assoc_t, + ) -> c_int; + pub fn sctp_getaddrlen(family: crate::sa_family_t) -> c_int; + pub fn sctp_getpaddrs( + s: c_int, + asocid: crate::sctp_assoc_t, + addrs: *mut *mut crate::sockaddr, + ) -> c_int; + pub fn sctp_freepaddrs(addrs: *mut crate::sockaddr); + pub fn sctp_getladdrs( + s: c_int, + asocid: crate::sctp_assoc_t, + addrs: *mut *mut crate::sockaddr, + ) -> c_int; + pub fn sctp_freeladdrs(addrs: *mut crate::sockaddr); + pub fn sctp_opt_info( + s: c_int, + id: crate::sctp_assoc_t, + opt: c_int, + arg: *mut c_void, + size: *mut crate::socklen_t, + ) -> c_int; + pub fn sctp_sendv( + sd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + addrs: *mut crate::sockaddr, + addrcnt: c_int, + info: *mut c_void, + infolen: crate::socklen_t, + infotype: c_uint, + flags: c_int, + ) -> ssize_t; + pub fn sctp_recvv( + sd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + from: *mut crate::sockaddr, + fromlen: *mut crate::socklen_t, + info: *mut c_void, + infolen: *mut crate::socklen_t, + infotype: *mut c_uint, + flags: *mut c_int, + ) -> ssize_t; + + pub fn timerfd_create(clockid: c_int, flags: c_int) -> c_int; + pub fn timerfd_gettime(fd: c_int, curr_value: *mut itimerspec) -> c_int; + pub fn timerfd_settime( + fd: c_int, + flags: c_int, + new_value: *const itimerspec, + old_value: *mut itimerspec, + ) -> c_int; + pub fn closefrom(lowfd: c_int); + pub fn close_range(lowfd: c_uint, highfd: c_uint, flags: c_int) -> c_int; + + pub fn execvpe( + file: *const c_char, + argv: *const *const c_char, + envp: *const *const c_char, + ) -> c_int; + + pub fn kcmp( + pid1: crate::pid_t, + pid2: crate::pid_t, + type_: c_int, + idx1: c_ulong, + idx2: c_ulong, + ) -> c_int; + pub fn dlvsym( + handle: *mut c_void, + symbol: *const c_char, + version: *const c_char, + ) -> *mut c_void; +} + +#[link(name = "memstat")] +extern "C" { + pub fn memstat_strerror(error: c_int) -> *const c_char; + pub fn memstat_mtl_alloc() -> *mut memory_type_list; + pub fn memstat_mtl_first(list: *mut memory_type_list) -> *mut memory_type; + pub fn memstat_mtl_next(mtp: *mut memory_type) -> *mut memory_type; + pub fn memstat_mtl_find( + list: *mut memory_type_list, + allocator: c_int, + name: *const c_char, + ) -> *mut memory_type; + pub fn memstat_mtl_free(list: *mut memory_type_list); + pub fn memstat_mtl_geterror(list: *mut memory_type_list) -> c_int; + pub fn memstat_get_name(mtp: *const memory_type) -> *const c_char; +} + +#[link(name = "kvm")] +extern "C" { + pub fn kvm_dpcpu_setcpu(kd: *mut crate::kvm_t, cpu: c_uint) -> c_int; + pub fn kvm_getargv( + kd: *mut crate::kvm_t, + p: *const kinfo_proc, + nchr: c_int, + ) -> *mut *mut c_char; + pub fn kvm_getcptime(kd: *mut crate::kvm_t, cp_time: *mut c_long) -> c_int; + pub fn kvm_getenvv( + kd: *mut crate::kvm_t, + p: *const kinfo_proc, + nchr: c_int, + ) -> *mut *mut c_char; + pub fn kvm_geterr(kd: *mut crate::kvm_t) -> *mut c_char; + pub fn kvm_getmaxcpu(kd: *mut crate::kvm_t) -> c_int; + pub fn kvm_getncpus(kd: *mut crate::kvm_t) -> c_int; + pub fn kvm_getpcpu(kd: *mut crate::kvm_t, cpu: c_int) -> *mut c_void; + pub fn kvm_counter_u64_fetch(kd: *mut crate::kvm_t, base: c_ulong) -> u64; + pub fn kvm_getswapinfo( + kd: *mut crate::kvm_t, + info: *mut kvm_swap, + maxswap: c_int, + flags: c_int, + ) -> c_int; + pub fn kvm_native(kd: *mut crate::kvm_t) -> c_int; + pub fn kvm_nlist(kd: *mut crate::kvm_t, nl: *mut nlist) -> c_int; + pub fn kvm_nlist2(kd: *mut crate::kvm_t, nl: *mut kvm_nlist) -> c_int; + pub fn kvm_read_zpcpu( + kd: *mut crate::kvm_t, + base: c_ulong, + buf: *mut c_void, + size: size_t, + cpu: c_int, + ) -> ssize_t; + pub fn kvm_read2( + kd: *mut crate::kvm_t, + addr: kvaddr_t, + buf: *mut c_void, + nbytes: size_t, + ) -> ssize_t; +} + +#[link(name = "util")] +extern "C" { + pub fn extattr_namespace_to_string(attrnamespace: c_int, string: *mut *mut c_char) -> c_int; + pub fn extattr_string_to_namespace(string: *const c_char, attrnamespace: *mut c_int) -> c_int; + pub fn realhostname(host: *mut c_char, hsize: size_t, ip: *const crate::in_addr) -> c_int; + pub fn realhostname_sa( + host: *mut c_char, + hsize: size_t, + addr: *mut crate::sockaddr, + addrlen: c_int, + ) -> c_int; + + pub fn kld_isloaded(name: *const c_char) -> c_int; + pub fn kld_load(name: *const c_char) -> c_int; + + pub fn kinfo_getvmmap(pid: crate::pid_t, cntp: *mut c_int) -> *mut kinfo_vmentry; + + pub fn hexdump(ptr: *const c_void, length: c_int, hdr: *const c_char, flags: c_int); + pub fn humanize_number( + buf: *mut c_char, + len: size_t, + number: i64, + suffix: *const c_char, + scale: c_int, + flags: c_int, + ) -> c_int; + + pub fn flopen(path: *const c_char, flags: c_int, ...) -> c_int; + pub fn flopenat(fd: c_int, path: *const c_char, flags: c_int, ...) -> c_int; + + pub fn getlocalbase() -> *const c_char; + + pub fn pidfile_open( + path: *const c_char, + mode: crate::mode_t, + pidptr: *mut crate::pid_t, + ) -> *mut crate::pidfh; + pub fn pidfile_write(path: *mut crate::pidfh) -> c_int; + pub fn pidfile_close(path: *mut crate::pidfh) -> c_int; + pub fn pidfile_remove(path: *mut crate::pidfh) -> c_int; + pub fn pidfile_fileno(path: *const crate::pidfh) -> c_int; + // FIXME(freebsd): pidfile_signal in due time (both manpage present and updated image snapshot) +} + +#[link(name = "procstat")] +extern "C" { + pub fn procstat_open_sysctl() -> *mut procstat; + pub fn procstat_getfiles( + procstat: *mut procstat, + kp: *mut kinfo_proc, + mmapped: c_int, + ) -> *mut filestat_list; + pub fn procstat_freefiles(procstat: *mut procstat, head: *mut filestat_list); + pub fn procstat_getprocs( + procstat: *mut procstat, + what: c_int, + arg: c_int, + count: *mut c_uint, + ) -> *mut kinfo_proc; + pub fn procstat_freeprocs(procstat: *mut procstat, p: *mut kinfo_proc); + pub fn procstat_getvmmap( + procstat: *mut procstat, + kp: *mut kinfo_proc, + count: *mut c_uint, + ) -> *mut kinfo_vmentry; + pub fn procstat_freevmmap(procstat: *mut procstat, vmmap: *mut kinfo_vmentry); + pub fn procstat_close(procstat: *mut procstat); + pub fn procstat_freeargv(procstat: *mut procstat); + pub fn procstat_freeenvv(procstat: *mut procstat); + pub fn procstat_freegroups(procstat: *mut procstat, groups: *mut crate::gid_t); + pub fn procstat_freeptlwpinfo(procstat: *mut procstat, pl: *mut ptrace_lwpinfo); + pub fn procstat_getargv( + procstat: *mut procstat, + kp: *mut kinfo_proc, + nchr: size_t, + ) -> *mut *mut c_char; + pub fn procstat_getenvv( + procstat: *mut procstat, + kp: *mut kinfo_proc, + nchr: size_t, + ) -> *mut *mut c_char; + pub fn procstat_getgroups( + procstat: *mut procstat, + kp: *mut kinfo_proc, + count: *mut c_uint, + ) -> *mut crate::gid_t; + pub fn procstat_getosrel( + procstat: *mut procstat, + kp: *mut kinfo_proc, + osrelp: *mut c_int, + ) -> c_int; + pub fn procstat_getpathname( + procstat: *mut procstat, + kp: *mut kinfo_proc, + pathname: *mut c_char, + maxlen: size_t, + ) -> c_int; + pub fn procstat_getrlimit( + procstat: *mut procstat, + kp: *mut kinfo_proc, + which: c_int, + rlimit: *mut crate::rlimit, + ) -> c_int; + pub fn procstat_getumask( + procstat: *mut procstat, + kp: *mut kinfo_proc, + maskp: *mut c_ushort, + ) -> c_int; + pub fn procstat_open_core(filename: *const c_char) -> *mut procstat; + pub fn procstat_open_kvm(nlistf: *const c_char, memf: *const c_char) -> *mut procstat; + pub fn procstat_get_socket_info( + proc_: *mut procstat, + fst: *mut filestat, + sock: *mut sockstat, + errbuf: *mut c_char, + ) -> c_int; + pub fn procstat_get_vnode_info( + proc_: *mut procstat, + fst: *mut filestat, + vn: *mut vnstat, + errbuf: *mut c_char, + ) -> c_int; + pub fn procstat_get_pts_info( + proc_: *mut procstat, + fst: *mut filestat, + pts: *mut ptsstat, + errbuf: *mut c_char, + ) -> c_int; + pub fn procstat_get_shm_info( + proc_: *mut procstat, + fst: *mut filestat, + shm: *mut shmstat, + errbuf: *mut c_char, + ) -> c_int; +} + +#[link(name = "rt")] +extern "C" { + pub fn timer_create(clock_id: clockid_t, evp: *mut sigevent, timerid: *mut timer_t) -> c_int; + pub fn timer_delete(timerid: timer_t) -> c_int; + pub fn timer_getoverrun(timerid: timer_t) -> c_int; + pub fn timer_gettime(timerid: timer_t, value: *mut itimerspec) -> c_int; + pub fn timer_settime( + timerid: timer_t, + flags: c_int, + value: *const itimerspec, + ovalue: *mut itimerspec, + ) -> c_int; +} + +#[link(name = "devstat")] +extern "C" { + pub fn devstat_getnumdevs(kd: *mut crate::kvm_t) -> c_int; + pub fn devstat_getgeneration(kd: *mut crate::kvm_t) -> c_long; + pub fn devstat_getversion(kd: *mut crate::kvm_t) -> c_int; + pub fn devstat_checkversion(kd: *mut crate::kvm_t) -> c_int; + pub fn devstat_selectdevs( + dev_select: *mut *mut device_selection, + num_selected: *mut c_int, + num_selections: *mut c_int, + select_generation: *mut c_long, + current_generation: c_long, + devices: *mut devstat, + numdevs: c_int, + matches: *mut devstat_match, + num_matches: c_int, + dev_selections: *mut *mut c_char, + num_dev_selections: c_int, + select_mode: devstat_select_mode, + maxshowdevs: c_int, + perf_select: c_int, + ) -> c_int; + pub fn devstat_buildmatch( + match_str: *mut c_char, + matches: *mut *mut devstat_match, + num_matches: *mut c_int, + ) -> c_int; +} + +cfg_if! { + if #[cfg(freebsd15)] { + mod freebsd15; + pub use self::freebsd15::*; + } else if #[cfg(freebsd14)] { + mod freebsd14; + pub use self::freebsd14::*; + } else if #[cfg(freebsd13)] { + mod freebsd13; + pub use self::freebsd13::*; + } else if #[cfg(freebsd12)] { + mod freebsd12; + pub use self::freebsd12::*; + } else if #[cfg(any(freebsd10, freebsd11))] { + mod freebsd11; + pub use self::freebsd11::*; + } else { + // Unknown freebsd version + } +} + +cfg_if! { + if #[cfg(target_arch = "x86")] { + mod x86; + pub use self::x86::*; + } else if #[cfg(target_arch = "x86_64")] { + mod x86_64; + pub use self::x86_64::*; + } else if #[cfg(target_arch = "aarch64")] { + mod aarch64; + pub use self::aarch64::*; + } else if #[cfg(target_arch = "arm")] { + mod arm; + pub use self::arm::*; + } else if #[cfg(target_arch = "powerpc64")] { + mod powerpc64; + pub use self::powerpc64::*; + } else if #[cfg(target_arch = "powerpc")] { + mod powerpc; + pub use self::powerpc::*; + } else if #[cfg(target_arch = "riscv64")] { + mod riscv64; + pub use self::riscv64::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc.rs new file mode 100644 index 00000000000000..e4275b10ba508c --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc.rs @@ -0,0 +1,62 @@ +use crate::prelude::*; + +pub type clock_t = u32; +pub type wchar_t = i32; +pub type time_t = i64; +pub type suseconds_t = i32; +pub type register_t = i32; + +s_no_extra_traits! { + #[repr(align(16))] + pub struct mcontext_t { + pub mc_vers: c_int, + pub mc_flags: c_int, + pub mc_onstack: c_int, + pub mc_len: c_int, + pub mc_avec: [u64; 64], + pub mc_av: [u32; 2], + pub mc_frame: [crate::register_t; 42], + pub mc_fpreg: [u64; 33], + pub mc_vsxfpreg: [u64; 32], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for mcontext_t { + fn eq(&self, other: &mcontext_t) -> bool { + self.mc_vers == other.mc_vers + && self.mc_flags == other.mc_flags + && self.mc_onstack == other.mc_onstack + && self.mc_len == other.mc_len + && self.mc_avec == other.mc_avec + && self.mc_av == other.mc_av + && self.mc_frame == other.mc_frame + && self.mc_fpreg == other.mc_fpreg + && self.mc_vsxfpreg == other.mc_vsxfpreg + } + } + impl Eq for mcontext_t {} + impl hash::Hash for mcontext_t { + fn hash(&self, state: &mut H) { + self.mc_vers.hash(state); + self.mc_flags.hash(state); + self.mc_onstack.hash(state); + self.mc_len.hash(state); + self.mc_avec.hash(state); + self.mc_av.hash(state); + self.mc_frame.hash(state); + self.mc_fpreg.hash(state); + self.mc_vsxfpreg.hash(state); + } + } + } +} + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const BIOCSRTIMEOUT: c_ulong = 0x8010426d; +pub const BIOCGRTIMEOUT: c_ulong = 0x4010426e; +pub const MAP_32BIT: c_int = 0x00080000; +pub const MINSIGSTKSZ: size_t = 2048; // 512 * 4 +pub const TIOCTIMESTAMP: c_ulong = 0x40107459; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc64.rs new file mode 100644 index 00000000000000..b5a81311ecc60b --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc64.rs @@ -0,0 +1,63 @@ +use crate::prelude::*; + +pub type clock_t = u32; +pub type wchar_t = i32; +pub type time_t = i64; +pub type suseconds_t = i64; +pub type register_t = i64; + +s_no_extra_traits! { + #[repr(align(16))] + pub struct mcontext_t { + pub mc_vers: c_int, + pub mc_flags: c_int, + pub mc_onstack: c_int, + pub mc_len: c_int, + pub mc_avec: [u64; 64], + pub mc_av: [u32; 2], + pub mc_frame: [crate::register_t; 42], + pub mc_fpreg: [u64; 33], + pub mc_vsxfpreg: [u64; 32], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for mcontext_t { + fn eq(&self, other: &mcontext_t) -> bool { + self.mc_vers == other.mc_vers + && self.mc_flags == other.mc_flags + && self.mc_onstack == other.mc_onstack + && self.mc_len == other.mc_len + && self.mc_avec == other.mc_avec + && self.mc_av == other.mc_av + && self.mc_frame == other.mc_frame + && self.mc_fpreg == other.mc_fpreg + && self.mc_vsxfpreg == other.mc_vsxfpreg + } + } + impl Eq for mcontext_t {} + impl hash::Hash for mcontext_t { + fn hash(&self, state: &mut H) { + self.mc_vers.hash(state); + self.mc_flags.hash(state); + self.mc_onstack.hash(state); + self.mc_len.hash(state); + self.mc_avec.hash(state); + self.mc_av.hash(state); + self.mc_frame.hash(state); + self.mc_fpreg.hash(state); + self.mc_vsxfpreg.hash(state); + } + } + } +} + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const BIOCSRTIMEOUT: c_ulong = 0x8010426d; +pub const BIOCGRTIMEOUT: c_ulong = 0x4010426e; + +pub const MAP_32BIT: c_int = 0x00080000; +pub const MINSIGSTKSZ: size_t = 2048; // 512 * 4 +pub const TIOCTIMESTAMP: c_ulong = 0x40107459; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/riscv64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/riscv64.rs new file mode 100644 index 00000000000000..5ae5d34a746605 --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/riscv64.rs @@ -0,0 +1,116 @@ +use crate::prelude::*; + +pub type clock_t = i32; +pub type wchar_t = c_int; +pub type time_t = i64; +pub type suseconds_t = c_long; +pub type register_t = i64; + +s_no_extra_traits! { + pub struct gpregs { + pub gp_ra: crate::register_t, + pub gp_sp: crate::register_t, + pub gp_gp: crate::register_t, + pub gp_tp: crate::register_t, + pub gp_t: [crate::register_t; 7], + pub gp_s: [crate::register_t; 12], + pub gp_a: [crate::register_t; 8], + pub gp_sepc: crate::register_t, + pub gp_sstatus: crate::register_t, + } + + pub struct fpregs { + pub fp_x: [[u64; 2]; 32], + pub fp_fcsr: u64, + pub fp_flags: c_int, + pub pad: c_int, + } + + pub struct mcontext_t { + pub mc_gpregs: gpregs, + pub mc_fpregs: fpregs, + pub mc_flags: c_int, + pub mc_pad: c_int, + pub mc_spare: [u64; 8], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for gpregs { + fn eq(&self, other: &gpregs) -> bool { + self.gp_ra == other.gp_ra + && self.gp_sp == other.gp_sp + && self.gp_gp == other.gp_gp + && self.gp_tp == other.gp_tp + && self.gp_t.iter().zip(other.gp_t.iter()).all(|(a, b)| a == b) + && self.gp_s.iter().zip(other.gp_s.iter()).all(|(a, b)| a == b) + && self.gp_a.iter().zip(other.gp_a.iter()).all(|(a, b)| a == b) + && self.gp_sepc == other.gp_sepc + && self.gp_sstatus == other.gp_sstatus + } + } + impl Eq for gpregs {} + impl hash::Hash for gpregs { + fn hash(&self, state: &mut H) { + self.gp_ra.hash(state); + self.gp_sp.hash(state); + self.gp_gp.hash(state); + self.gp_tp.hash(state); + self.gp_t.hash(state); + self.gp_s.hash(state); + self.gp_a.hash(state); + self.gp_sepc.hash(state); + self.gp_sstatus.hash(state); + } + } + impl PartialEq for fpregs { + fn eq(&self, other: &fpregs) -> bool { + self.fp_x == other.fp_x + && self.fp_fcsr == other.fp_fcsr + && self.fp_flags == other.fp_flags + && self.pad == other.pad + } + } + impl Eq for fpregs {} + impl hash::Hash for fpregs { + fn hash(&self, state: &mut H) { + self.fp_x.hash(state); + self.fp_fcsr.hash(state); + self.fp_flags.hash(state); + self.pad.hash(state); + } + } + impl PartialEq for mcontext_t { + fn eq(&self, other: &mcontext_t) -> bool { + self.mc_gpregs == other.mc_gpregs + && self.mc_fpregs == other.mc_fpregs + && self.mc_flags == other.mc_flags + && self.mc_pad == other.mc_pad + && self + .mc_spare + .iter() + .zip(other.mc_spare.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for mcontext_t {} + impl hash::Hash for mcontext_t { + fn hash(&self, state: &mut H) { + self.mc_gpregs.hash(state); + self.mc_fpregs.hash(state); + self.mc_flags.hash(state); + self.mc_pad.hash(state); + self.mc_spare.hash(state); + } + } + } +} + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const BIOCSRTIMEOUT: c_ulong = 0x8010426d; +pub const BIOCGRTIMEOUT: c_ulong = 0x4010426e; +pub const MAP_32BIT: c_int = 0x00080000; +pub const MINSIGSTKSZ: size_t = 4096; // 1024 * 4 +pub const TIOCTIMESTAMP: c_ulong = 0x40107459; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86.rs new file mode 100644 index 00000000000000..5becde55db43ee --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86.rs @@ -0,0 +1,134 @@ +use crate::prelude::*; + +pub type clock_t = c_ulong; +pub type wchar_t = i32; +pub type time_t = i32; +pub type suseconds_t = i32; +pub type register_t = i32; + +s_no_extra_traits! { + #[repr(align(16))] + pub struct mcontext_t { + pub mc_onstack: register_t, + pub mc_gs: register_t, + pub mc_fs: register_t, + pub mc_es: register_t, + pub mc_ds: register_t, + pub mc_edi: register_t, + pub mc_esi: register_t, + pub mc_ebp: register_t, + pub mc_isp: register_t, + pub mc_ebx: register_t, + pub mc_edx: register_t, + pub mc_ecx: register_t, + pub mc_eax: register_t, + pub mc_trapno: register_t, + pub mc_err: register_t, + pub mc_eip: register_t, + pub mc_cs: register_t, + pub mc_eflags: register_t, + pub mc_esp: register_t, + pub mc_ss: register_t, + pub mc_len: c_int, + pub mc_fpformat: c_int, + pub mc_ownedfp: c_int, + pub mc_flags: register_t, + pub mc_fpstate: [c_int; 128], + pub mc_fsbase: register_t, + pub mc_gsbase: register_t, + pub mc_xfpustate: register_t, + pub mc_xfpustate_len: register_t, + pub mc_spare2: [c_int; 4], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for mcontext_t { + fn eq(&self, other: &mcontext_t) -> bool { + self.mc_onstack == other.mc_onstack + && self.mc_gs == other.mc_gs + && self.mc_fs == other.mc_fs + && self.mc_es == other.mc_es + && self.mc_ds == other.mc_ds + && self.mc_edi == other.mc_edi + && self.mc_esi == other.mc_esi + && self.mc_ebp == other.mc_ebp + && self.mc_isp == other.mc_isp + && self.mc_ebx == other.mc_ebx + && self.mc_edx == other.mc_edx + && self.mc_ecx == other.mc_ecx + && self.mc_eax == other.mc_eax + && self.mc_trapno == other.mc_trapno + && self.mc_err == other.mc_err + && self.mc_eip == other.mc_eip + && self.mc_cs == other.mc_cs + && self.mc_eflags == other.mc_eflags + && self.mc_esp == other.mc_esp + && self.mc_ss == other.mc_ss + && self.mc_len == other.mc_len + && self.mc_fpformat == other.mc_fpformat + && self.mc_ownedfp == other.mc_ownedfp + && self.mc_flags == other.mc_flags + && self + .mc_fpstate + .iter() + .zip(other.mc_fpstate.iter()) + .all(|(a, b)| a == b) + && self.mc_fsbase == other.mc_fsbase + && self.mc_gsbase == other.mc_gsbase + && self.mc_xfpustate == other.mc_xfpustate + && self.mc_xfpustate_len == other.mc_xfpustate_len + && self + .mc_spare2 + .iter() + .zip(other.mc_spare2.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for mcontext_t {} + impl hash::Hash for mcontext_t { + fn hash(&self, state: &mut H) { + self.mc_onstack.hash(state); + self.mc_gs.hash(state); + self.mc_fs.hash(state); + self.mc_es.hash(state); + self.mc_ds.hash(state); + self.mc_edi.hash(state); + self.mc_esi.hash(state); + self.mc_ebp.hash(state); + self.mc_isp.hash(state); + self.mc_ebx.hash(state); + self.mc_edx.hash(state); + self.mc_ecx.hash(state); + self.mc_eax.hash(state); + self.mc_trapno.hash(state); + self.mc_err.hash(state); + self.mc_eip.hash(state); + self.mc_cs.hash(state); + self.mc_eflags.hash(state); + self.mc_esp.hash(state); + self.mc_ss.hash(state); + self.mc_len.hash(state); + self.mc_fpformat.hash(state); + self.mc_ownedfp.hash(state); + self.mc_flags.hash(state); + self.mc_fpstate.hash(state); + self.mc_fsbase.hash(state); + self.mc_gsbase.hash(state); + self.mc_xfpustate.hash(state); + self.mc_xfpustate_len.hash(state); + self.mc_spare2.hash(state); + } + } + } +} + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const MINSIGSTKSZ: size_t = 2048; // 512 * 4 + +pub const BIOCSRTIMEOUT: c_ulong = 0x8008426d; +pub const BIOCGRTIMEOUT: c_ulong = 0x4008426e; +pub const KINFO_FILE_SIZE: c_int = 1392; +pub const TIOCTIMESTAMP: c_ulong = 0x40087459; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/mod.rs new file mode 100644 index 00000000000000..d665e3da01e875 --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/mod.rs @@ -0,0 +1,346 @@ +use crate::prelude::*; + +pub type clock_t = i32; +pub type wchar_t = i32; +pub type time_t = i64; +pub type suseconds_t = i64; +pub type register_t = i64; + +s! { + pub struct reg32 { + pub r_fs: u32, + pub r_es: u32, + pub r_ds: u32, + pub r_edi: u32, + pub r_esi: u32, + pub r_ebp: u32, + pub r_isp: u32, + pub r_ebx: u32, + pub r_edx: u32, + pub r_ecx: u32, + pub r_eax: u32, + pub r_trapno: u32, + pub r_err: u32, + pub r_eip: u32, + pub r_cs: u32, + pub r_eflags: u32, + pub r_esp: u32, + pub r_ss: u32, + pub r_gs: u32, + } + + pub struct reg { + pub r_r15: i64, + pub r_r14: i64, + pub r_r13: i64, + pub r_r12: i64, + pub r_r11: i64, + pub r_r10: i64, + pub r_r9: i64, + pub r_r8: i64, + pub r_rdi: i64, + pub r_rsi: i64, + pub r_rbp: i64, + pub r_rbx: i64, + pub r_rdx: i64, + pub r_rcx: i64, + pub r_rax: i64, + pub r_trapno: u32, + pub r_fs: u16, + pub r_gs: u16, + pub r_err: u32, + pub r_es: u16, + pub r_ds: u16, + pub r_rip: i64, + pub r_cs: i64, + pub r_rflags: i64, + pub r_rsp: i64, + pub r_ss: i64, + } +} + +s_no_extra_traits! { + pub struct fpreg32 { + pub fpr_env: [u32; 7], + pub fpr_acc: [[u8; 10]; 8], + pub fpr_ex_sw: u32, + pub fpr_pad: [u8; 64], + } + + pub struct fpreg { + pub fpr_env: [u64; 4], + pub fpr_acc: [[u8; 16]; 8], + pub fpr_xacc: [[u8; 16]; 16], + pub fpr_spare: [u64; 12], + } + + pub struct xmmreg { + pub xmm_env: [u32; 8], + pub xmm_acc: [[u8; 16]; 8], + pub xmm_reg: [[u8; 16]; 8], + pub xmm_pad: [u8; 224], + } + + pub union __c_anonymous_elf64_auxv_union { + pub a_val: c_long, + pub a_ptr: *mut c_void, + pub a_fcn: extern "C" fn(), + } + + pub struct Elf64_Auxinfo { + pub a_type: c_long, + pub a_un: __c_anonymous_elf64_auxv_union, + } + + #[repr(align(16))] + pub struct max_align_t { + priv_: [f64; 4], + } + + #[repr(align(16))] + #[cfg_attr(not(any(freebsd11, freebsd12, freebsd13, freebsd14)), non_exhaustive)] + pub struct mcontext_t { + pub mc_onstack: register_t, + pub mc_rdi: register_t, + pub mc_rsi: register_t, + pub mc_rdx: register_t, + pub mc_rcx: register_t, + pub mc_r8: register_t, + pub mc_r9: register_t, + pub mc_rax: register_t, + pub mc_rbx: register_t, + pub mc_rbp: register_t, + pub mc_r10: register_t, + pub mc_r11: register_t, + pub mc_r12: register_t, + pub mc_r13: register_t, + pub mc_r14: register_t, + pub mc_r15: register_t, + pub mc_trapno: u32, + pub mc_fs: u16, + pub mc_gs: u16, + pub mc_addr: register_t, + pub mc_flags: u32, + pub mc_es: u16, + pub mc_ds: u16, + pub mc_err: register_t, + pub mc_rip: register_t, + pub mc_cs: register_t, + pub mc_rflags: register_t, + pub mc_rsp: register_t, + pub mc_ss: register_t, + pub mc_len: c_long, + pub mc_fpformat: c_long, + pub mc_ownedfp: c_long, + pub mc_fpstate: [c_long; 64], + pub mc_fsbase: register_t, + pub mc_gsbase: register_t, + pub mc_xfpustate: register_t, + pub mc_xfpustate_len: register_t, + // freebsd < 15 + #[cfg(any(freebsd11, freebsd12, freebsd13, freebsd14))] + pub mc_spare: [c_long; 4], + // freebsd >= 15 + #[cfg(not(any(freebsd11, freebsd12, freebsd13, freebsd14)))] + pub mc_tlsbase: register_t, + #[cfg(not(any(freebsd11, freebsd12, freebsd13, freebsd14)))] + pub mc_spare: [c_long; 3], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for fpreg32 { + fn eq(&self, other: &fpreg32) -> bool { + self.fpr_env == other.fpr_env + && self.fpr_acc == other.fpr_acc + && self.fpr_ex_sw == other.fpr_ex_sw + && self + .fpr_pad + .iter() + .zip(other.fpr_pad.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for fpreg32 {} + impl hash::Hash for fpreg32 { + fn hash(&self, state: &mut H) { + self.fpr_env.hash(state); + self.fpr_acc.hash(state); + self.fpr_ex_sw.hash(state); + self.fpr_pad.hash(state); + } + } + + impl PartialEq for fpreg { + fn eq(&self, other: &fpreg) -> bool { + self.fpr_env == other.fpr_env + && self.fpr_acc == other.fpr_acc + && self.fpr_xacc == other.fpr_xacc + && self.fpr_spare == other.fpr_spare + } + } + impl Eq for fpreg {} + impl hash::Hash for fpreg { + fn hash(&self, state: &mut H) { + self.fpr_env.hash(state); + self.fpr_acc.hash(state); + self.fpr_xacc.hash(state); + self.fpr_spare.hash(state); + } + } + + impl PartialEq for xmmreg { + fn eq(&self, other: &xmmreg) -> bool { + self.xmm_env == other.xmm_env + && self.xmm_acc == other.xmm_acc + && self.xmm_reg == other.xmm_reg + && self + .xmm_pad + .iter() + .zip(other.xmm_pad.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for xmmreg {} + impl hash::Hash for xmmreg { + fn hash(&self, state: &mut H) { + self.xmm_env.hash(state); + self.xmm_acc.hash(state); + self.xmm_reg.hash(state); + self.xmm_pad.hash(state); + } + } + + // FIXME(msrv): suggested method was added in 1.85 + #[allow(unpredictable_function_pointer_comparisons)] + impl PartialEq for __c_anonymous_elf64_auxv_union { + fn eq(&self, other: &__c_anonymous_elf64_auxv_union) -> bool { + unsafe { + self.a_val == other.a_val + || self.a_ptr == other.a_ptr + || self.a_fcn == other.a_fcn + } + } + } + impl Eq for __c_anonymous_elf64_auxv_union {} + impl PartialEq for Elf64_Auxinfo { + fn eq(&self, other: &Elf64_Auxinfo) -> bool { + self.a_type == other.a_type && self.a_un == other.a_un + } + } + impl Eq for Elf64_Auxinfo {} + + impl PartialEq for mcontext_t { + fn eq(&self, other: &mcontext_t) -> bool { + self.mc_onstack == other.mc_onstack + && self.mc_rdi == other.mc_rdi + && self.mc_rsi == other.mc_rsi + && self.mc_rdx == other.mc_rdx + && self.mc_rcx == other.mc_rcx + && self.mc_r8 == other.mc_r8 + && self.mc_r9 == other.mc_r9 + && self.mc_rax == other.mc_rax + && self.mc_rbx == other.mc_rbx + && self.mc_rbp == other.mc_rbp + && self.mc_r10 == other.mc_r10 + && self.mc_r11 == other.mc_r11 + && self.mc_r12 == other.mc_r12 + && self.mc_r13 == other.mc_r13 + && self.mc_r14 == other.mc_r14 + && self.mc_r15 == other.mc_r15 + && self.mc_trapno == other.mc_trapno + && self.mc_fs == other.mc_fs + && self.mc_gs == other.mc_gs + && self.mc_addr == other.mc_addr + && self.mc_flags == other.mc_flags + && self.mc_es == other.mc_es + && self.mc_ds == other.mc_ds + && self.mc_err == other.mc_err + && self.mc_rip == other.mc_rip + && self.mc_cs == other.mc_cs + && self.mc_rflags == other.mc_rflags + && self.mc_rsp == other.mc_rsp + && self.mc_ss == other.mc_ss + && self.mc_len == other.mc_len + && self.mc_fpformat == other.mc_fpformat + && self.mc_ownedfp == other.mc_ownedfp + && self + .mc_fpstate + .iter() + .zip(other.mc_fpstate.iter()) + .all(|(a, b)| a == b) + && self.mc_fsbase == other.mc_fsbase + && self.mc_gsbase == other.mc_gsbase + && self.mc_xfpustate == other.mc_xfpustate + && self.mc_xfpustate_len == other.mc_xfpustate_len + && self.mc_spare == other.mc_spare + } + } + impl Eq for mcontext_t {} + impl hash::Hash for mcontext_t { + fn hash(&self, state: &mut H) { + self.mc_onstack.hash(state); + self.mc_rdi.hash(state); + self.mc_rsi.hash(state); + self.mc_rdx.hash(state); + self.mc_rcx.hash(state); + self.mc_r8.hash(state); + self.mc_r9.hash(state); + self.mc_rax.hash(state); + self.mc_rbx.hash(state); + self.mc_rbp.hash(state); + self.mc_r10.hash(state); + self.mc_r11.hash(state); + self.mc_r12.hash(state); + self.mc_r13.hash(state); + self.mc_r14.hash(state); + self.mc_r15.hash(state); + self.mc_trapno.hash(state); + self.mc_fs.hash(state); + self.mc_gs.hash(state); + self.mc_addr.hash(state); + self.mc_flags.hash(state); + self.mc_es.hash(state); + self.mc_ds.hash(state); + self.mc_err.hash(state); + self.mc_rip.hash(state); + self.mc_cs.hash(state); + self.mc_rflags.hash(state); + self.mc_rsp.hash(state); + self.mc_ss.hash(state); + self.mc_len.hash(state); + self.mc_fpformat.hash(state); + self.mc_ownedfp.hash(state); + self.mc_fpstate.hash(state); + self.mc_fsbase.hash(state); + self.mc_gsbase.hash(state); + self.mc_xfpustate.hash(state); + self.mc_xfpustate_len.hash(state); + self.mc_spare.hash(state); + } + } + } +} + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const BIOCSRTIMEOUT: c_ulong = 0x8010426d; +pub const BIOCGRTIMEOUT: c_ulong = 0x4010426e; + +pub const MAP_32BIT: c_int = 0x00080000; +pub const MINSIGSTKSZ: size_t = 2048; // 512 * 4 + +pub const _MC_HASSEGS: u32 = 0x1; +pub const _MC_HASBASES: u32 = 0x2; +pub const _MC_HASFPXSTATE: u32 = 0x4; + +pub const _MC_FPFMT_NODEV: c_long = 0x10000; +pub const _MC_FPFMT_XMM: c_long = 0x10002; +pub const _MC_FPOWNED_NONE: c_long = 0x20000; +pub const _MC_FPOWNED_FPU: c_long = 0x20001; +pub const _MC_FPOWNED_PCB: c_long = 0x20002; + +pub const KINFO_FILE_SIZE: c_int = 1392; + +pub const TIOCTIMESTAMP: c_ulong = 0x40107459; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/mod.rs new file mode 100644 index 00000000000000..4bf62033474f04 --- /dev/null +++ b/vendor/libc/src/unix/bsd/freebsdlike/mod.rs @@ -0,0 +1,2009 @@ +use crate::off_t; +use crate::prelude::*; + +pub type mode_t = u16; +pub type pthread_attr_t = *mut c_void; +pub type rlim_t = i64; +pub type pthread_mutex_t = *mut c_void; +pub type pthread_mutexattr_t = *mut c_void; +pub type pthread_cond_t = *mut c_void; +pub type pthread_condattr_t = *mut c_void; +pub type pthread_rwlock_t = *mut c_void; +pub type pthread_rwlockattr_t = *mut c_void; +pub type pthread_key_t = c_int; +pub type tcflag_t = c_uint; +pub type speed_t = c_uint; +pub type nl_item = c_int; +pub type id_t = i64; +pub type vm_size_t = crate::uintptr_t; +pub type key_t = c_long; + +// elf.h + +pub type Elf32_Addr = u32; +pub type Elf32_Half = u16; +pub type Elf32_Lword = u64; +pub type Elf32_Off = u32; +pub type Elf32_Sword = i32; +pub type Elf32_Word = u32; + +pub type Elf64_Addr = u64; +pub type Elf64_Half = u16; +pub type Elf64_Lword = u64; +pub type Elf64_Off = u64; +pub type Elf64_Sword = i32; +pub type Elf64_Sxword = i64; +pub type Elf64_Word = u32; +pub type Elf64_Xword = u64; + +pub type iconv_t = *mut c_void; + +// It's an alias over "struct __kvm_t". However, its fields aren't supposed to be used directly, +// making the type definition system dependent. Better not bind it exactly. +pub type kvm_t = c_void; + +pub type posix_spawnattr_t = *mut c_void; +pub type posix_spawn_file_actions_t = *mut c_void; + +cfg_if! { + if #[cfg(target_pointer_width = "64")] { + type Elf_Addr = Elf64_Addr; + type Elf_Half = Elf64_Half; + type Elf_Phdr = Elf64_Phdr; + } else if #[cfg(target_pointer_width = "32")] { + type Elf_Addr = Elf32_Addr; + type Elf_Half = Elf32_Half; + type Elf_Phdr = Elf32_Phdr; + } +} + +// link.h + +#[derive(Debug)] +pub enum timezone {} +impl Copy for timezone {} +impl Clone for timezone { + fn clone(&self) -> timezone { + *self + } +} + +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut c_void { + self.si_addr + } + + pub unsafe fn si_value(&self) -> crate::sigval { + self.si_value + } + + pub unsafe fn si_pid(&self) -> crate::pid_t { + self.si_pid + } + + pub unsafe fn si_uid(&self) -> crate::uid_t { + self.si_uid + } + + pub unsafe fn si_status(&self) -> c_int { + self.si_status + } +} + +s! { + pub struct in_addr { + pub s_addr: crate::in_addr_t, + } + + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct ip_mreqn { + pub imr_multiaddr: in_addr, + pub imr_address: in_addr, + pub imr_ifindex: c_int, + } + + pub struct ip_mreq_source { + pub imr_multiaddr: in_addr, + pub imr_sourceaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct glob_t { + pub gl_pathc: size_t, + pub gl_matchc: size_t, + pub gl_offs: size_t, + pub gl_flags: c_int, + pub gl_pathv: *mut *mut c_char, + __unused3: *mut c_void, + __unused4: *mut c_void, + __unused5: *mut c_void, + __unused6: *mut c_void, + __unused7: *mut c_void, + __unused8: *mut c_void, + } + + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: crate::socklen_t, + pub ai_canonname: *mut c_char, + pub ai_addr: *mut crate::sockaddr, + pub ai_next: *mut addrinfo, + } + + pub struct sigset_t { + bits: [u32; 4], + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + pub si_pid: crate::pid_t, + pub si_uid: crate::uid_t, + pub si_status: c_int, + pub si_addr: *mut c_void, + pub si_value: crate::sigval, + _pad1: c_long, + _pad2: [c_int; 7], + } + + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_flags: c_int, + pub sa_mask: sigset_t, + } + + pub struct sched_param { + pub sched_priority: c_int, + } + + pub struct Dl_info { + pub dli_fname: *const c_char, + pub dli_fbase: *mut c_void, + pub dli_sname: *const c_char, + pub dli_saddr: *mut c_void, + } + + pub struct sockaddr_in { + pub sin_len: u8, + pub sin_family: crate::sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [c_char; 8], + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_cc: [crate::cc_t; crate::NCCS], + pub c_ispeed: crate::speed_t, + pub c_ospeed: crate::speed_t, + } + + pub struct flock { + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + pub l_type: c_short, + pub l_whence: c_short, + #[cfg(not(target_os = "dragonfly"))] + pub l_sysid: c_int, + } + + pub struct sf_hdtr { + pub headers: *mut crate::iovec, + pub hdr_cnt: c_int, + pub trailers: *mut crate::iovec, + pub trl_cnt: c_int, + } + + pub struct lconv { + pub decimal_point: *mut c_char, + pub thousands_sep: *mut c_char, + pub grouping: *mut c_char, + pub int_curr_symbol: *mut c_char, + pub currency_symbol: *mut c_char, + pub mon_decimal_point: *mut c_char, + pub mon_thousands_sep: *mut c_char, + pub mon_grouping: *mut c_char, + pub positive_sign: *mut c_char, + pub negative_sign: *mut c_char, + pub int_frac_digits: c_char, + pub frac_digits: c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub p_sign_posn: c_char, + pub n_sign_posn: c_char, + pub int_p_cs_precedes: c_char, + pub int_n_cs_precedes: c_char, + pub int_p_sep_by_space: c_char, + pub int_n_sep_by_space: c_char, + pub int_p_sign_posn: c_char, + pub int_n_sign_posn: c_char, + } + + pub struct cmsgcred { + pub cmcred_pid: crate::pid_t, + pub cmcred_uid: crate::uid_t, + pub cmcred_euid: crate::uid_t, + pub cmcred_gid: crate::gid_t, + pub cmcred_ngroups: c_short, + pub cmcred_groups: [crate::gid_t; CMGROUP_MAX], + } + + pub struct rtprio { + pub type_: c_ushort, + pub prio: c_ushort, + } + + pub struct in6_pktinfo { + pub ipi6_addr: crate::in6_addr, + pub ipi6_ifindex: c_uint, + } + + pub struct arphdr { + pub ar_hrd: u16, + pub ar_pro: u16, + pub ar_hln: u8, + pub ar_pln: u8, + pub ar_op: u16, + } + + pub struct timex { + pub modes: c_uint, + pub offset: c_long, + pub freq: c_long, + pub maxerror: c_long, + pub esterror: c_long, + pub status: c_int, + pub constant: c_long, + pub precision: c_long, + pub tolerance: c_long, + pub ppsfreq: c_long, + pub jitter: c_long, + pub shift: c_int, + pub stabil: c_long, + pub jitcnt: c_long, + pub calcnt: c_long, + pub errcnt: c_long, + pub stbcnt: c_long, + } + + pub struct ntptimeval { + pub time: crate::timespec, + pub maxerror: c_long, + pub esterror: c_long, + pub tai: c_long, + pub time_state: c_int, + } + + pub struct accept_filter_arg { + pub af_name: [c_char; 16], + af_arg: [c_char; 256 - 16], + } + + pub struct ptrace_io_desc { + pub piod_op: c_int, + pub piod_offs: *mut c_void, + pub piod_addr: *mut c_void, + pub piod_len: size_t, + } + + // bpf.h + + pub struct bpf_program { + pub bf_len: c_uint, + pub bf_insns: *mut bpf_insn, + } + + pub struct bpf_stat { + pub bs_recv: c_uint, + pub bs_drop: c_uint, + } + + pub struct bpf_version { + pub bv_major: c_ushort, + pub bv_minor: c_ushort, + } + + pub struct bpf_hdr { + pub bh_tstamp: crate::timeval, + pub bh_caplen: u32, + pub bh_datalen: u32, + pub bh_hdrlen: c_ushort, + } + + pub struct bpf_insn { + pub code: c_ushort, + pub jt: c_uchar, + pub jf: c_uchar, + pub k: u32, + } + + pub struct bpf_dltlist { + bfl_len: c_uint, + bfl_list: *mut c_uint, + } + + // elf.h + + pub struct Elf32_Phdr { + pub p_type: Elf32_Word, + pub p_offset: Elf32_Off, + pub p_vaddr: Elf32_Addr, + pub p_paddr: Elf32_Addr, + pub p_filesz: Elf32_Word, + pub p_memsz: Elf32_Word, + pub p_flags: Elf32_Word, + pub p_align: Elf32_Word, + } + + pub struct Elf64_Phdr { + pub p_type: Elf64_Word, + pub p_flags: Elf64_Word, + pub p_offset: Elf64_Off, + pub p_vaddr: Elf64_Addr, + pub p_paddr: Elf64_Addr, + pub p_filesz: Elf64_Xword, + pub p_memsz: Elf64_Xword, + pub p_align: Elf64_Xword, + } + + // link.h + + pub struct dl_phdr_info { + pub dlpi_addr: Elf_Addr, + pub dlpi_name: *const c_char, + pub dlpi_phdr: *const Elf_Phdr, + pub dlpi_phnum: Elf_Half, + pub dlpi_adds: c_ulonglong, + pub dlpi_subs: c_ulonglong, + pub dlpi_tls_modid: usize, + pub dlpi_tls_data: *mut c_void, + } + + pub struct ipc_perm { + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub mode: mode_t, + pub seq: c_ushort, + pub key: crate::key_t, + } + + pub struct eui64 { + pub octet: [u8; EUI64_LEN], + } +} + +s_no_extra_traits! { + pub struct sockaddr_storage { + pub ss_len: u8, + pub ss_family: crate::sa_family_t, + __ss_pad1: [u8; 6], + __ss_align: i64, + __ss_pad2: [u8; 112], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for sockaddr_storage { + fn eq(&self, other: &sockaddr_storage) -> bool { + self.ss_len == other.ss_len + && self.ss_family == other.ss_family + && self.__ss_pad1 == other.__ss_pad1 + && self.__ss_align == other.__ss_align + && self + .__ss_pad2 + .iter() + .zip(other.__ss_pad2.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for sockaddr_storage {} + impl hash::Hash for sockaddr_storage { + fn hash(&self, state: &mut H) { + self.ss_len.hash(state); + self.ss_family.hash(state); + self.__ss_pad1.hash(state); + self.__ss_align.hash(state); + self.__ss_pad2.hash(state); + } + } + } +} + +// Non-public helper constant +const SIZEOF_LONG: usize = size_of::(); + +#[deprecated( + since = "0.2.64", + note = "Can vary at runtime. Use sysconf(3) instead" +)] +pub const AIO_LISTIO_MAX: c_int = 16; +pub const AIO_CANCELED: c_int = 1; +pub const AIO_NOTCANCELED: c_int = 2; +pub const AIO_ALLDONE: c_int = 3; +pub const LIO_NOP: c_int = 0; +pub const LIO_WRITE: c_int = 1; +pub const LIO_READ: c_int = 2; +pub const LIO_WAIT: c_int = 1; +pub const LIO_NOWAIT: c_int = 0; + +pub const SIGEV_NONE: c_int = 0; +pub const SIGEV_SIGNAL: c_int = 1; +pub const SIGEV_THREAD: c_int = 2; +pub const SIGEV_KEVENT: c_int = 3; + +pub const CODESET: crate::nl_item = 0; +pub const D_T_FMT: crate::nl_item = 1; +pub const D_FMT: crate::nl_item = 2; +pub const T_FMT: crate::nl_item = 3; +pub const T_FMT_AMPM: crate::nl_item = 4; +pub const AM_STR: crate::nl_item = 5; +pub const PM_STR: crate::nl_item = 6; + +pub const DAY_1: crate::nl_item = 7; +pub const DAY_2: crate::nl_item = 8; +pub const DAY_3: crate::nl_item = 9; +pub const DAY_4: crate::nl_item = 10; +pub const DAY_5: crate::nl_item = 11; +pub const DAY_6: crate::nl_item = 12; +pub const DAY_7: crate::nl_item = 13; + +pub const ABDAY_1: crate::nl_item = 14; +pub const ABDAY_2: crate::nl_item = 15; +pub const ABDAY_3: crate::nl_item = 16; +pub const ABDAY_4: crate::nl_item = 17; +pub const ABDAY_5: crate::nl_item = 18; +pub const ABDAY_6: crate::nl_item = 19; +pub const ABDAY_7: crate::nl_item = 20; + +pub const MON_1: crate::nl_item = 21; +pub const MON_2: crate::nl_item = 22; +pub const MON_3: crate::nl_item = 23; +pub const MON_4: crate::nl_item = 24; +pub const MON_5: crate::nl_item = 25; +pub const MON_6: crate::nl_item = 26; +pub const MON_7: crate::nl_item = 27; +pub const MON_8: crate::nl_item = 28; +pub const MON_9: crate::nl_item = 29; +pub const MON_10: crate::nl_item = 30; +pub const MON_11: crate::nl_item = 31; +pub const MON_12: crate::nl_item = 32; + +pub const ABMON_1: crate::nl_item = 33; +pub const ABMON_2: crate::nl_item = 34; +pub const ABMON_3: crate::nl_item = 35; +pub const ABMON_4: crate::nl_item = 36; +pub const ABMON_5: crate::nl_item = 37; +pub const ABMON_6: crate::nl_item = 38; +pub const ABMON_7: crate::nl_item = 39; +pub const ABMON_8: crate::nl_item = 40; +pub const ABMON_9: crate::nl_item = 41; +pub const ABMON_10: crate::nl_item = 42; +pub const ABMON_11: crate::nl_item = 43; +pub const ABMON_12: crate::nl_item = 44; + +pub const ERA: crate::nl_item = 45; +pub const ERA_D_FMT: crate::nl_item = 46; +pub const ERA_D_T_FMT: crate::nl_item = 47; +pub const ERA_T_FMT: crate::nl_item = 48; +pub const ALT_DIGITS: crate::nl_item = 49; + +pub const RADIXCHAR: crate::nl_item = 50; +pub const THOUSEP: crate::nl_item = 51; + +pub const YESEXPR: crate::nl_item = 52; +pub const NOEXPR: crate::nl_item = 53; + +pub const YESSTR: crate::nl_item = 54; +pub const NOSTR: crate::nl_item = 55; + +pub const CRNCYSTR: crate::nl_item = 56; + +pub const D_MD_ORDER: crate::nl_item = 57; + +pub const ALTMON_1: crate::nl_item = 58; +pub const ALTMON_2: crate::nl_item = 59; +pub const ALTMON_3: crate::nl_item = 60; +pub const ALTMON_4: crate::nl_item = 61; +pub const ALTMON_5: crate::nl_item = 62; +pub const ALTMON_6: crate::nl_item = 63; +pub const ALTMON_7: crate::nl_item = 64; +pub const ALTMON_8: crate::nl_item = 65; +pub const ALTMON_9: crate::nl_item = 66; +pub const ALTMON_10: crate::nl_item = 67; +pub const ALTMON_11: crate::nl_item = 68; +pub const ALTMON_12: crate::nl_item = 69; + +pub const EXIT_FAILURE: c_int = 1; +pub const EXIT_SUCCESS: c_int = 0; +pub const EOF: c_int = -1; +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; +pub const SEEK_DATA: c_int = 3; +pub const SEEK_HOLE: c_int = 4; +pub const _IOFBF: c_int = 0; +pub const _IONBF: c_int = 2; +pub const _IOLBF: c_int = 1; +pub const BUFSIZ: c_uint = 1024; +pub const FOPEN_MAX: c_uint = 20; +pub const FILENAME_MAX: c_uint = 1024; +pub const L_tmpnam: c_uint = 1024; +pub const TMP_MAX: c_uint = 308915776; + +pub const O_NOCTTY: c_int = 32768; +pub const O_DIRECT: c_int = 0x00010000; + +pub const S_IFIFO: mode_t = 0o1_0000; +pub const S_IFCHR: mode_t = 0o2_0000; +pub const S_IFBLK: mode_t = 0o6_0000; +pub const S_IFDIR: mode_t = 0o4_0000; +pub const S_IFREG: mode_t = 0o10_0000; +pub const S_IFLNK: mode_t = 0o12_0000; +pub const S_IFSOCK: mode_t = 0o14_0000; +pub const S_IFMT: mode_t = 0o17_0000; +pub const S_IEXEC: mode_t = 0o0100; +pub const S_IWRITE: mode_t = 0o0200; +pub const S_IREAD: mode_t = 0o0400; +pub const S_IRWXU: mode_t = 0o0700; +pub const S_IXUSR: mode_t = 0o0100; +pub const S_IWUSR: mode_t = 0o0200; +pub const S_IRUSR: mode_t = 0o0400; +pub const S_IRWXG: mode_t = 0o0070; +pub const S_IXGRP: mode_t = 0o0010; +pub const S_IWGRP: mode_t = 0o0020; +pub const S_IRGRP: mode_t = 0o0040; +pub const S_IRWXO: mode_t = 0o0007; +pub const S_IXOTH: mode_t = 0o0001; +pub const S_IWOTH: mode_t = 0o0002; +pub const S_IROTH: mode_t = 0o0004; +pub const F_OK: c_int = 0; +pub const R_OK: c_int = 4; +pub const W_OK: c_int = 2; +pub const X_OK: c_int = 1; +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; +pub const F_LOCK: c_int = 1; +pub const F_TEST: c_int = 3; +pub const F_TLOCK: c_int = 2; +pub const F_ULOCK: c_int = 0; +pub const F_DUPFD_CLOEXEC: c_int = 17; +pub const F_DUP2FD: c_int = 10; +pub const F_DUP2FD_CLOEXEC: c_int = 18; +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGABRT: c_int = 6; +pub const SIGEMT: c_int = 7; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGSEGV: c_int = 11; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; + +pub const PROT_NONE: c_int = 0; +pub const PROT_READ: c_int = 1; +pub const PROT_WRITE: c_int = 2; +pub const PROT_EXEC: c_int = 4; + +pub const MAP_FILE: c_int = 0x0000; +pub const MAP_SHARED: c_int = 0x0001; +pub const MAP_PRIVATE: c_int = 0x0002; +pub const MAP_FIXED: c_int = 0x0010; +pub const MAP_ANON: c_int = 0x1000; +pub const MAP_ANONYMOUS: c_int = MAP_ANON; + +pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; + +pub const MNT_EXPUBLIC: c_int = 0x20000000; +pub const MNT_NOATIME: c_int = 0x10000000; +pub const MNT_NOCLUSTERR: c_int = 0x40000000; +pub const MNT_NOCLUSTERW: c_int = 0x80000000; +pub const MNT_NOSYMFOLLOW: c_int = 0x00400000; +pub const MNT_SOFTDEP: c_int = 0x00200000; +pub const MNT_SUIDDIR: c_int = 0x00100000; +pub const MNT_EXRDONLY: c_int = 0x00000080; +pub const MNT_DEFEXPORTED: c_int = 0x00000200; +pub const MNT_EXPORTANON: c_int = 0x00000400; +pub const MNT_EXKERB: c_int = 0x00000800; +pub const MNT_DELEXPORT: c_int = 0x00020000; + +pub const MS_SYNC: c_int = 0x0000; +pub const MS_ASYNC: c_int = 0x0001; +pub const MS_INVALIDATE: c_int = 0x0002; + +pub const EPERM: c_int = 1; +pub const ENOENT: c_int = 2; +pub const ESRCH: c_int = 3; +pub const EINTR: c_int = 4; +pub const EIO: c_int = 5; +pub const ENXIO: c_int = 6; +pub const E2BIG: c_int = 7; +pub const ENOEXEC: c_int = 8; +pub const EBADF: c_int = 9; +pub const ECHILD: c_int = 10; +pub const EDEADLK: c_int = 11; +pub const ENOMEM: c_int = 12; +pub const EACCES: c_int = 13; +pub const EFAULT: c_int = 14; +pub const ENOTBLK: c_int = 15; +pub const EBUSY: c_int = 16; +pub const EEXIST: c_int = 17; +pub const EXDEV: c_int = 18; +pub const ENODEV: c_int = 19; +pub const ENOTDIR: c_int = 20; +pub const EISDIR: c_int = 21; +pub const EINVAL: c_int = 22; +pub const ENFILE: c_int = 23; +pub const EMFILE: c_int = 24; +pub const ENOTTY: c_int = 25; +pub const ETXTBSY: c_int = 26; +pub const EFBIG: c_int = 27; +pub const ENOSPC: c_int = 28; +pub const ESPIPE: c_int = 29; +pub const EROFS: c_int = 30; +pub const EMLINK: c_int = 31; +pub const EPIPE: c_int = 32; +pub const EDOM: c_int = 33; +pub const ERANGE: c_int = 34; +pub const EAGAIN: c_int = 35; +pub const EWOULDBLOCK: c_int = 35; +pub const EINPROGRESS: c_int = 36; +pub const EALREADY: c_int = 37; +pub const ENOTSOCK: c_int = 38; +pub const EDESTADDRREQ: c_int = 39; +pub const EMSGSIZE: c_int = 40; +pub const EPROTOTYPE: c_int = 41; +pub const ENOPROTOOPT: c_int = 42; +pub const EPROTONOSUPPORT: c_int = 43; +pub const ESOCKTNOSUPPORT: c_int = 44; +pub const EOPNOTSUPP: c_int = 45; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 46; +pub const EAFNOSUPPORT: c_int = 47; +pub const EADDRINUSE: c_int = 48; +pub const EADDRNOTAVAIL: c_int = 49; +pub const ENETDOWN: c_int = 50; +pub const ENETUNREACH: c_int = 51; +pub const ENETRESET: c_int = 52; +pub const ECONNABORTED: c_int = 53; +pub const ECONNRESET: c_int = 54; +pub const ENOBUFS: c_int = 55; +pub const EISCONN: c_int = 56; +pub const ENOTCONN: c_int = 57; +pub const ESHUTDOWN: c_int = 58; +pub const ETOOMANYREFS: c_int = 59; +pub const ETIMEDOUT: c_int = 60; +pub const ECONNREFUSED: c_int = 61; +pub const ELOOP: c_int = 62; +pub const ENAMETOOLONG: c_int = 63; +pub const EHOSTDOWN: c_int = 64; +pub const EHOSTUNREACH: c_int = 65; +pub const ENOTEMPTY: c_int = 66; +pub const EPROCLIM: c_int = 67; +pub const EUSERS: c_int = 68; +pub const EDQUOT: c_int = 69; +pub const ESTALE: c_int = 70; +pub const EREMOTE: c_int = 71; +pub const EBADRPC: c_int = 72; +pub const ERPCMISMATCH: c_int = 73; +pub const EPROGUNAVAIL: c_int = 74; +pub const EPROGMISMATCH: c_int = 75; +pub const EPROCUNAVAIL: c_int = 76; +pub const ENOLCK: c_int = 77; +pub const ENOSYS: c_int = 78; +pub const EFTYPE: c_int = 79; +pub const EAUTH: c_int = 80; +pub const ENEEDAUTH: c_int = 81; +pub const EIDRM: c_int = 82; +pub const ENOMSG: c_int = 83; +pub const EOVERFLOW: c_int = 84; +pub const ECANCELED: c_int = 85; +pub const EILSEQ: c_int = 86; +pub const ENOATTR: c_int = 87; +pub const EDOOFUS: c_int = 88; +pub const EBADMSG: c_int = 89; +pub const EMULTIHOP: c_int = 90; +pub const ENOLINK: c_int = 91; +pub const EPROTO: c_int = 92; + +pub const POLLSTANDARD: c_short = crate::POLLIN + | crate::POLLPRI + | crate::POLLOUT + | crate::POLLRDNORM + | crate::POLLRDBAND + | crate::POLLWRBAND + | crate::POLLERR + | crate::POLLHUP + | crate::POLLNVAL; + +pub const AI_PASSIVE: c_int = 0x00000001; +pub const AI_CANONNAME: c_int = 0x00000002; +pub const AI_NUMERICHOST: c_int = 0x00000004; +pub const AI_NUMERICSERV: c_int = 0x00000008; +pub const AI_ALL: c_int = 0x00000100; +pub const AI_ADDRCONFIG: c_int = 0x00000400; +pub const AI_V4MAPPED: c_int = 0x00000800; + +pub const EAI_AGAIN: c_int = 2; +pub const EAI_BADFLAGS: c_int = 3; +pub const EAI_FAIL: c_int = 4; +pub const EAI_FAMILY: c_int = 5; +pub const EAI_MEMORY: c_int = 6; +pub const EAI_NONAME: c_int = 8; +pub const EAI_SERVICE: c_int = 9; +pub const EAI_SOCKTYPE: c_int = 10; +pub const EAI_SYSTEM: c_int = 11; +pub const EAI_OVERFLOW: c_int = 14; + +pub const F_DUPFD: c_int = 0; +pub const F_GETFD: c_int = 1; +pub const F_SETFD: c_int = 2; +pub const F_GETFL: c_int = 3; +pub const F_SETFL: c_int = 4; + +pub const SIGTRAP: c_int = 5; + +pub const GLOB_APPEND: c_int = 0x0001; +pub const GLOB_DOOFFS: c_int = 0x0002; +pub const GLOB_ERR: c_int = 0x0004; +pub const GLOB_MARK: c_int = 0x0008; +pub const GLOB_NOCHECK: c_int = 0x0010; +pub const GLOB_NOSORT: c_int = 0x0020; +pub const GLOB_NOESCAPE: c_int = 0x2000; + +pub const GLOB_NOSPACE: c_int = -1; +pub const GLOB_ABORTED: c_int = -2; +pub const GLOB_NOMATCH: c_int = -3; + +pub const POSIX_MADV_NORMAL: c_int = 0; +pub const POSIX_MADV_RANDOM: c_int = 1; +pub const POSIX_MADV_SEQUENTIAL: c_int = 2; +pub const POSIX_MADV_WILLNEED: c_int = 3; +pub const POSIX_MADV_DONTNEED: c_int = 4; + +pub const PTHREAD_BARRIER_SERIAL_THREAD: c_int = -1; +pub const PTHREAD_PROCESS_PRIVATE: c_int = 0; +pub const PTHREAD_PROCESS_SHARED: c_int = 1; +pub const PTHREAD_CREATE_JOINABLE: c_int = 0; +pub const PTHREAD_CREATE_DETACHED: c_int = 1; + +pub const RLIMIT_CPU: c_int = 0; +pub const RLIMIT_FSIZE: c_int = 1; +pub const RLIMIT_DATA: c_int = 2; +pub const RLIMIT_STACK: c_int = 3; +pub const RLIMIT_CORE: c_int = 4; +pub const RLIMIT_RSS: c_int = 5; +pub const RLIMIT_MEMLOCK: c_int = 6; +pub const RLIMIT_NPROC: c_int = 7; +pub const RLIMIT_NOFILE: c_int = 8; +pub const RLIMIT_SBSIZE: c_int = 9; +pub const RLIMIT_VMEM: c_int = 10; +pub const RLIMIT_AS: c_int = RLIMIT_VMEM; +pub const RLIM_INFINITY: rlim_t = 0x7fff_ffff_ffff_ffff; + +pub const RUSAGE_SELF: c_int = 0; +pub const RUSAGE_CHILDREN: c_int = -1; + +pub const CLOCK_REALTIME: crate::clockid_t = 0; +pub const CLOCK_VIRTUAL: crate::clockid_t = 1; +pub const CLOCK_PROF: crate::clockid_t = 2; +pub const CLOCK_MONOTONIC: crate::clockid_t = 4; +pub const CLOCK_UPTIME: crate::clockid_t = 5; +pub const CLOCK_UPTIME_PRECISE: crate::clockid_t = 7; +pub const CLOCK_UPTIME_FAST: crate::clockid_t = 8; +pub const CLOCK_REALTIME_PRECISE: crate::clockid_t = 9; +pub const CLOCK_REALTIME_FAST: crate::clockid_t = 10; +pub const CLOCK_MONOTONIC_PRECISE: crate::clockid_t = 11; +pub const CLOCK_MONOTONIC_FAST: crate::clockid_t = 12; +pub const CLOCK_SECOND: crate::clockid_t = 13; +pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 14; +pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 15; + +pub const MADV_NORMAL: c_int = 0; +pub const MADV_RANDOM: c_int = 1; +pub const MADV_SEQUENTIAL: c_int = 2; +pub const MADV_WILLNEED: c_int = 3; +pub const MADV_DONTNEED: c_int = 4; +pub const MADV_FREE: c_int = 5; +pub const MADV_NOSYNC: c_int = 6; +pub const MADV_AUTOSYNC: c_int = 7; +pub const MADV_NOCORE: c_int = 8; +pub const MADV_CORE: c_int = 9; + +pub const MINCORE_INCORE: c_int = 0x1; +pub const MINCORE_REFERENCED: c_int = 0x2; +pub const MINCORE_MODIFIED: c_int = 0x4; +pub const MINCORE_REFERENCED_OTHER: c_int = 0x8; +pub const MINCORE_MODIFIED_OTHER: c_int = 0x10; + +pub const AF_UNSPEC: c_int = 0; +pub const AF_LOCAL: c_int = 1; +pub const AF_UNIX: c_int = AF_LOCAL; +pub const AF_INET: c_int = 2; +pub const AF_IMPLINK: c_int = 3; +pub const AF_PUP: c_int = 4; +pub const AF_CHAOS: c_int = 5; +pub const AF_NETBIOS: c_int = 6; +pub const AF_ISO: c_int = 7; +pub const AF_OSI: c_int = AF_ISO; +pub const AF_ECMA: c_int = 8; +pub const AF_DATAKIT: c_int = 9; +pub const AF_CCITT: c_int = 10; +pub const AF_SNA: c_int = 11; +pub const AF_DECnet: c_int = 12; +pub const AF_DLI: c_int = 13; +pub const AF_LAT: c_int = 14; +pub const AF_HYLINK: c_int = 15; +pub const AF_APPLETALK: c_int = 16; +pub const AF_ROUTE: c_int = 17; +pub const AF_LINK: c_int = 18; +pub const pseudo_AF_XTP: c_int = 19; +pub const AF_COIP: c_int = 20; +pub const AF_CNT: c_int = 21; +pub const pseudo_AF_RTIP: c_int = 22; +pub const AF_IPX: c_int = 23; +pub const AF_SIP: c_int = 24; +pub const pseudo_AF_PIP: c_int = 25; +pub const AF_ISDN: c_int = 26; +pub const AF_E164: c_int = AF_ISDN; +pub const pseudo_AF_KEY: c_int = 27; +pub const AF_INET6: c_int = 28; +pub const AF_NATM: c_int = 29; +pub const AF_ATM: c_int = 30; +pub const pseudo_AF_HDRCMPLT: c_int = 31; +pub const AF_NETGRAPH: c_int = 32; + +pub const PF_UNSPEC: c_int = AF_UNSPEC; +pub const PF_LOCAL: c_int = AF_LOCAL; +pub const PF_UNIX: c_int = PF_LOCAL; +pub const PF_INET: c_int = AF_INET; +pub const PF_IMPLINK: c_int = AF_IMPLINK; +pub const PF_PUP: c_int = AF_PUP; +pub const PF_CHAOS: c_int = AF_CHAOS; +pub const PF_NETBIOS: c_int = AF_NETBIOS; +pub const PF_ISO: c_int = AF_ISO; +pub const PF_OSI: c_int = AF_ISO; +pub const PF_ECMA: c_int = AF_ECMA; +pub const PF_DATAKIT: c_int = AF_DATAKIT; +pub const PF_CCITT: c_int = AF_CCITT; +pub const PF_SNA: c_int = AF_SNA; +pub const PF_DECnet: c_int = AF_DECnet; +pub const PF_DLI: c_int = AF_DLI; +pub const PF_LAT: c_int = AF_LAT; +pub const PF_HYLINK: c_int = AF_HYLINK; +pub const PF_APPLETALK: c_int = AF_APPLETALK; +pub const PF_ROUTE: c_int = AF_ROUTE; +pub const PF_LINK: c_int = AF_LINK; +pub const PF_XTP: c_int = pseudo_AF_XTP; +pub const PF_COIP: c_int = AF_COIP; +pub const PF_CNT: c_int = AF_CNT; +pub const PF_SIP: c_int = AF_SIP; +pub const PF_IPX: c_int = AF_IPX; +pub const PF_RTIP: c_int = pseudo_AF_RTIP; +pub const PF_PIP: c_int = pseudo_AF_PIP; +pub const PF_ISDN: c_int = AF_ISDN; +pub const PF_KEY: c_int = pseudo_AF_KEY; +pub const PF_INET6: c_int = AF_INET6; +pub const PF_NATM: c_int = AF_NATM; +pub const PF_ATM: c_int = AF_ATM; +pub const PF_NETGRAPH: c_int = AF_NETGRAPH; + +pub const PIOD_READ_D: c_int = 1; +pub const PIOD_WRITE_D: c_int = 2; +pub const PIOD_READ_I: c_int = 3; +pub const PIOD_WRITE_I: c_int = 4; + +pub const PT_TRACE_ME: c_int = 0; +pub const PT_READ_I: c_int = 1; +pub const PT_READ_D: c_int = 2; +pub const PT_WRITE_I: c_int = 4; +pub const PT_WRITE_D: c_int = 5; +pub const PT_CONTINUE: c_int = 7; +pub const PT_KILL: c_int = 8; +pub const PT_STEP: c_int = 9; +pub const PT_ATTACH: c_int = 10; +pub const PT_DETACH: c_int = 11; +pub const PT_IO: c_int = 12; + +pub const SOMAXCONN: c_int = 128; + +pub const MSG_OOB: c_int = 0x00000001; +pub const MSG_PEEK: c_int = 0x00000002; +pub const MSG_DONTROUTE: c_int = 0x00000004; +pub const MSG_EOR: c_int = 0x00000008; +pub const MSG_TRUNC: c_int = 0x00000010; +pub const MSG_CTRUNC: c_int = 0x00000020; +pub const MSG_WAITALL: c_int = 0x00000040; +pub const MSG_DONTWAIT: c_int = 0x00000080; +pub const MSG_EOF: c_int = 0x00000100; + +pub const SCM_TIMESTAMP: c_int = 0x02; +pub const SCM_CREDS: c_int = 0x03; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SOCK_RAW: c_int = 3; +pub const SOCK_RDM: c_int = 4; +pub const SOCK_SEQPACKET: c_int = 5; +pub const SOCK_CLOEXEC: c_int = 0x10000000; +pub const SOCK_NONBLOCK: c_int = 0x20000000; +pub const SOCK_MAXADDRLEN: c_int = 255; +pub const IP_TTL: c_int = 4; +pub const IP_HDRINCL: c_int = 2; +pub const IP_RECVDSTADDR: c_int = 7; +pub const IP_SENDSRCADDR: c_int = IP_RECVDSTADDR; +pub const IP_ADD_MEMBERSHIP: c_int = 12; +pub const IP_DROP_MEMBERSHIP: c_int = 13; +pub const IP_RECVIF: c_int = 20; +pub const IP_RECVTTL: c_int = 65; +pub const IPV6_RECVHOPLIMIT: c_int = 37; +pub const IPV6_JOIN_GROUP: c_int = 12; +pub const IPV6_LEAVE_GROUP: c_int = 13; +pub const IPV6_CHECKSUM: c_int = 26; +pub const IPV6_RECVPKTINFO: c_int = 36; +pub const IPV6_PKTINFO: c_int = 46; +pub const IPV6_HOPLIMIT: c_int = 47; +pub const IPV6_RECVTCLASS: c_int = 57; +pub const IPV6_TCLASS: c_int = 61; +pub const IP_ADD_SOURCE_MEMBERSHIP: c_int = 70; +pub const IP_DROP_SOURCE_MEMBERSHIP: c_int = 71; +pub const IP_BLOCK_SOURCE: c_int = 72; +pub const IP_UNBLOCK_SOURCE: c_int = 73; + +pub const TCP_NOPUSH: c_int = 4; +pub const TCP_NOOPT: c_int = 8; +pub const TCP_KEEPIDLE: c_int = 256; +pub const TCP_KEEPINTVL: c_int = 512; +pub const TCP_KEEPCNT: c_int = 1024; + +pub const SOL_SOCKET: c_int = 0xffff; +pub const SO_DEBUG: c_int = 0x01; +pub const SO_ACCEPTCONN: c_int = 0x0002; +pub const SO_REUSEADDR: c_int = 0x0004; +pub const SO_KEEPALIVE: c_int = 0x0008; +pub const SO_DONTROUTE: c_int = 0x0010; +pub const SO_BROADCAST: c_int = 0x0020; +pub const SO_USELOOPBACK: c_int = 0x0040; +pub const SO_LINGER: c_int = 0x0080; +pub const SO_OOBINLINE: c_int = 0x0100; +pub const SO_REUSEPORT: c_int = 0x0200; +pub const SO_TIMESTAMP: c_int = 0x0400; +pub const SO_NOSIGPIPE: c_int = 0x0800; +pub const SO_ACCEPTFILTER: c_int = 0x1000; +pub const SO_SNDBUF: c_int = 0x1001; +pub const SO_RCVBUF: c_int = 0x1002; +pub const SO_SNDLOWAT: c_int = 0x1003; +pub const SO_RCVLOWAT: c_int = 0x1004; +pub const SO_SNDTIMEO: c_int = 0x1005; +pub const SO_RCVTIMEO: c_int = 0x1006; +pub const SO_ERROR: c_int = 0x1007; +pub const SO_TYPE: c_int = 0x1008; + +pub const LOCAL_PEERCRED: c_int = 1; + +// net/route.h +pub const RTF_XRESOLVE: c_int = 0x200; +pub const RTF_LLINFO: c_int = 0x400; +pub const RTF_PROTO3: c_int = 0x40000; +pub const RTF_PINNED: c_int = 0x100000; +pub const RTF_LOCAL: c_int = 0x200000; +pub const RTF_BROADCAST: c_int = 0x400000; +pub const RTF_MULTICAST: c_int = 0x800000; + +pub const RTM_LOCK: c_int = 0x8; +pub const RTM_RESOLVE: c_int = 0xb; +pub const RTM_NEWADDR: c_int = 0xc; +pub const RTM_DELADDR: c_int = 0xd; +pub const RTM_IFINFO: c_int = 0xe; +pub const RTM_NEWMADDR: c_int = 0xf; +pub const RTM_DELMADDR: c_int = 0x10; +pub const RTM_IFANNOUNCE: c_int = 0x11; +pub const RTM_IEEE80211: c_int = 0x12; + +pub const SHUT_RD: c_int = 0; +pub const SHUT_WR: c_int = 1; +pub const SHUT_RDWR: c_int = 2; + +pub const LOCK_SH: c_int = 1; +pub const LOCK_EX: c_int = 2; +pub const LOCK_NB: c_int = 4; +pub const LOCK_UN: c_int = 8; + +pub const MAP_COPY: c_int = 0x0002; +#[doc(hidden)] +#[deprecated( + since = "0.2.54", + note = "Removed in FreeBSD 11, unused in DragonFlyBSD" +)] +pub const MAP_RENAME: c_int = 0x0020; +#[doc(hidden)] +#[deprecated( + since = "0.2.54", + note = "Removed in FreeBSD 11, unused in DragonFlyBSD" +)] +pub const MAP_NORESERVE: c_int = 0x0040; +pub const MAP_HASSEMAPHORE: c_int = 0x0200; +pub const MAP_STACK: c_int = 0x0400; +pub const MAP_NOSYNC: c_int = 0x0800; +pub const MAP_NOCORE: c_int = 0x020000; + +pub const IPPROTO_RAW: c_int = 255; + +pub const _PC_LINK_MAX: c_int = 1; +pub const _PC_MAX_CANON: c_int = 2; +pub const _PC_MAX_INPUT: c_int = 3; +pub const _PC_NAME_MAX: c_int = 4; +pub const _PC_PATH_MAX: c_int = 5; +pub const _PC_PIPE_BUF: c_int = 6; +pub const _PC_CHOWN_RESTRICTED: c_int = 7; +pub const _PC_NO_TRUNC: c_int = 8; +pub const _PC_VDISABLE: c_int = 9; +pub const _PC_ALLOC_SIZE_MIN: c_int = 10; +pub const _PC_FILESIZEBITS: c_int = 12; +pub const _PC_REC_INCR_XFER_SIZE: c_int = 14; +pub const _PC_REC_MAX_XFER_SIZE: c_int = 15; +pub const _PC_REC_MIN_XFER_SIZE: c_int = 16; +pub const _PC_REC_XFER_ALIGN: c_int = 17; +pub const _PC_SYMLINK_MAX: c_int = 18; +pub const _PC_MIN_HOLE_SIZE: c_int = 21; +pub const _PC_ASYNC_IO: c_int = 53; +pub const _PC_PRIO_IO: c_int = 54; +pub const _PC_SYNC_IO: c_int = 55; +pub const _PC_ACL_EXTENDED: c_int = 59; +pub const _PC_ACL_PATH_MAX: c_int = 60; +pub const _PC_CAP_PRESENT: c_int = 61; +pub const _PC_INF_PRESENT: c_int = 62; +pub const _PC_MAC_PRESENT: c_int = 63; + +pub const _SC_ARG_MAX: c_int = 1; +pub const _SC_CHILD_MAX: c_int = 2; +pub const _SC_CLK_TCK: c_int = 3; +pub const _SC_NGROUPS_MAX: c_int = 4; +pub const _SC_OPEN_MAX: c_int = 5; +pub const _SC_JOB_CONTROL: c_int = 6; +pub const _SC_SAVED_IDS: c_int = 7; +pub const _SC_VERSION: c_int = 8; +pub const _SC_BC_BASE_MAX: c_int = 9; +pub const _SC_BC_DIM_MAX: c_int = 10; +pub const _SC_BC_SCALE_MAX: c_int = 11; +pub const _SC_BC_STRING_MAX: c_int = 12; +pub const _SC_COLL_WEIGHTS_MAX: c_int = 13; +pub const _SC_EXPR_NEST_MAX: c_int = 14; +pub const _SC_LINE_MAX: c_int = 15; +pub const _SC_RE_DUP_MAX: c_int = 16; +pub const _SC_2_VERSION: c_int = 17; +pub const _SC_2_C_BIND: c_int = 18; +pub const _SC_2_C_DEV: c_int = 19; +pub const _SC_2_CHAR_TERM: c_int = 20; +pub const _SC_2_FORT_DEV: c_int = 21; +pub const _SC_2_FORT_RUN: c_int = 22; +pub const _SC_2_LOCALEDEF: c_int = 23; +pub const _SC_2_SW_DEV: c_int = 24; +pub const _SC_2_UPE: c_int = 25; +pub const _SC_STREAM_MAX: c_int = 26; +pub const _SC_TZNAME_MAX: c_int = 27; +pub const _SC_ASYNCHRONOUS_IO: c_int = 28; +pub const _SC_MAPPED_FILES: c_int = 29; +pub const _SC_MEMLOCK: c_int = 30; +pub const _SC_MEMLOCK_RANGE: c_int = 31; +pub const _SC_MEMORY_PROTECTION: c_int = 32; +pub const _SC_MESSAGE_PASSING: c_int = 33; +pub const _SC_PRIORITIZED_IO: c_int = 34; +pub const _SC_PRIORITY_SCHEDULING: c_int = 35; +pub const _SC_REALTIME_SIGNALS: c_int = 36; +pub const _SC_SEMAPHORES: c_int = 37; +pub const _SC_FSYNC: c_int = 38; +pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 39; +pub const _SC_SYNCHRONIZED_IO: c_int = 40; +pub const _SC_TIMERS: c_int = 41; +pub const _SC_AIO_LISTIO_MAX: c_int = 42; +pub const _SC_AIO_MAX: c_int = 43; +pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 44; +pub const _SC_DELAYTIMER_MAX: c_int = 45; +pub const _SC_MQ_OPEN_MAX: c_int = 46; +pub const _SC_PAGESIZE: c_int = 47; +pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; +pub const _SC_RTSIG_MAX: c_int = 48; +pub const _SC_SEM_NSEMS_MAX: c_int = 49; +pub const _SC_SEM_VALUE_MAX: c_int = 50; +pub const _SC_SIGQUEUE_MAX: c_int = 51; +pub const _SC_TIMER_MAX: c_int = 52; +pub const _SC_IOV_MAX: c_int = 56; +pub const _SC_NPROCESSORS_CONF: c_int = 57; +pub const _SC_2_PBS: c_int = 59; +pub const _SC_2_PBS_ACCOUNTING: c_int = 60; +pub const _SC_2_PBS_CHECKPOINT: c_int = 61; +pub const _SC_2_PBS_LOCATE: c_int = 62; +pub const _SC_2_PBS_MESSAGE: c_int = 63; +pub const _SC_2_PBS_TRACK: c_int = 64; +pub const _SC_ADVISORY_INFO: c_int = 65; +pub const _SC_BARRIERS: c_int = 66; +pub const _SC_CLOCK_SELECTION: c_int = 67; +pub const _SC_CPUTIME: c_int = 68; +pub const _SC_FILE_LOCKING: c_int = 69; +pub const _SC_NPROCESSORS_ONLN: c_int = 58; +pub const _SC_GETGR_R_SIZE_MAX: c_int = 70; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 71; +pub const _SC_HOST_NAME_MAX: c_int = 72; +pub const _SC_LOGIN_NAME_MAX: c_int = 73; +pub const _SC_MONOTONIC_CLOCK: c_int = 74; +pub const _SC_MQ_PRIO_MAX: c_int = 75; +pub const _SC_READER_WRITER_LOCKS: c_int = 76; +pub const _SC_REGEXP: c_int = 77; +pub const _SC_SHELL: c_int = 78; +pub const _SC_SPAWN: c_int = 79; +pub const _SC_SPIN_LOCKS: c_int = 80; +pub const _SC_SPORADIC_SERVER: c_int = 81; +pub const _SC_THREAD_ATTR_STACKADDR: c_int = 82; +pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 83; +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 85; +pub const _SC_THREAD_KEYS_MAX: c_int = 86; +pub const _SC_THREAD_PRIO_INHERIT: c_int = 87; +pub const _SC_THREAD_PRIO_PROTECT: c_int = 88; +pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 89; +pub const _SC_THREAD_PROCESS_SHARED: c_int = 90; +pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 91; +pub const _SC_THREAD_SPORADIC_SERVER: c_int = 92; +pub const _SC_THREAD_STACK_MIN: c_int = 93; +pub const _SC_THREAD_THREADS_MAX: c_int = 94; +pub const _SC_TIMEOUTS: c_int = 95; +pub const _SC_THREADS: c_int = 96; +pub const _SC_TRACE: c_int = 97; +pub const _SC_TRACE_EVENT_FILTER: c_int = 98; +pub const _SC_TRACE_INHERIT: c_int = 99; +pub const _SC_TRACE_LOG: c_int = 100; +pub const _SC_TTY_NAME_MAX: c_int = 101; +pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 102; +pub const _SC_V6_ILP32_OFF32: c_int = 103; +pub const _SC_V6_ILP32_OFFBIG: c_int = 104; +pub const _SC_V6_LP64_OFF64: c_int = 105; +pub const _SC_V6_LPBIG_OFFBIG: c_int = 106; +pub const _SC_ATEXIT_MAX: c_int = 107; +pub const _SC_XOPEN_CRYPT: c_int = 108; +pub const _SC_XOPEN_ENH_I18N: c_int = 109; +pub const _SC_XOPEN_LEGACY: c_int = 110; +pub const _SC_XOPEN_REALTIME: c_int = 111; +pub const _SC_XOPEN_REALTIME_THREADS: c_int = 112; +pub const _SC_XOPEN_SHM: c_int = 113; +pub const _SC_XOPEN_STREAMS: c_int = 114; +pub const _SC_XOPEN_UNIX: c_int = 115; +pub const _SC_XOPEN_VERSION: c_int = 116; +pub const _SC_XOPEN_XCU_VERSION: c_int = 117; +pub const _SC_IPV6: c_int = 118; +pub const _SC_RAW_SOCKETS: c_int = 119; +pub const _SC_SYMLOOP_MAX: c_int = 120; +pub const _SC_PHYS_PAGES: c_int = 121; + +pub const _CS_PATH: c_int = 1; + +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = ptr::null_mut(); +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = ptr::null_mut(); +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = ptr::null_mut(); +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 1; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 2; +pub const PTHREAD_MUTEX_NORMAL: c_int = 3; +pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_ERRORCHECK; + +pub const SCHED_FIFO: c_int = 1; +pub const SCHED_OTHER: c_int = 2; +pub const SCHED_RR: c_int = 3; + +pub const FD_SETSIZE: usize = 1024; + +pub const ST_NOSUID: c_ulong = 2; + +pub const NI_MAXHOST: size_t = 1025; + +pub const XUCRED_VERSION: c_uint = 0; + +pub const RTLD_LOCAL: c_int = 0; +pub const RTLD_NODELETE: c_int = 0x1000; +pub const RTLD_NOLOAD: c_int = 0x2000; +pub const RTLD_GLOBAL: c_int = 0x100; + +pub const LOG_NTP: c_int = 12 << 3; +pub const LOG_SECURITY: c_int = 13 << 3; +pub const LOG_CONSOLE: c_int = 14 << 3; +pub const LOG_NFACILITIES: c_int = 24; + +pub const TIOCEXCL: c_ulong = 0x2000740d; +pub const TIOCNXCL: c_ulong = 0x2000740e; +pub const TIOCFLUSH: c_ulong = 0x80047410; +pub const TIOCGETA: c_ulong = 0x402c7413; +pub const TIOCSETA: c_ulong = 0x802c7414; +pub const TIOCSETAW: c_ulong = 0x802c7415; +pub const TIOCSETAF: c_ulong = 0x802c7416; +pub const TIOCGETD: c_ulong = 0x4004741a; +pub const TIOCSETD: c_ulong = 0x8004741b; +pub const TIOCGDRAINWAIT: c_ulong = 0x40047456; +pub const TIOCSDRAINWAIT: c_ulong = 0x80047457; +#[cfg_attr( + not(target_os = "dragonfly"), + deprecated = "unused since FreeBSD 8, removed in FreeBSD 15" +)] +pub const TIOCMGDTRWAIT: c_ulong = 0x4004745a; +#[cfg_attr( + not(target_os = "dragonfly"), + deprecated = "unused since FreeBSD 8, removed in FreeBSD 15" +)] +pub const TIOCMSDTRWAIT: c_ulong = 0x8004745b; +pub const TIOCDRAIN: c_ulong = 0x2000745e; +pub const TIOCEXT: c_ulong = 0x80047460; +pub const TIOCSCTTY: c_ulong = 0x20007461; +pub const TIOCCONS: c_ulong = 0x80047462; +pub const TIOCGSID: c_ulong = 0x40047463; +pub const TIOCSTAT: c_ulong = 0x20007465; +pub const TIOCUCNTL: c_ulong = 0x80047466; +pub const TIOCSWINSZ: c_ulong = 0x80087467; +pub const TIOCGWINSZ: c_ulong = 0x40087468; +pub const TIOCMGET: c_ulong = 0x4004746a; +pub const TIOCM_LE: c_int = 0x1; +pub const TIOCM_DTR: c_int = 0x2; +pub const TIOCM_RTS: c_int = 0x4; +pub const TIOCM_ST: c_int = 0x8; +pub const TIOCM_SR: c_int = 0x10; +pub const TIOCM_CTS: c_int = 0x20; +pub const TIOCM_RI: c_int = 0x80; +pub const TIOCM_DSR: c_int = 0x100; +pub const TIOCM_CD: c_int = 0x40; +pub const TIOCM_CAR: c_int = 0x40; +pub const TIOCM_RNG: c_int = 0x80; +pub const TIOCMBIC: c_ulong = 0x8004746b; +pub const TIOCMBIS: c_ulong = 0x8004746c; +pub const TIOCMSET: c_ulong = 0x8004746d; +pub const TIOCSTART: c_ulong = 0x2000746e; +pub const TIOCSTOP: c_ulong = 0x2000746f; +pub const TIOCPKT: c_ulong = 0x80047470; +pub const TIOCPKT_DATA: c_int = 0x0; +pub const TIOCPKT_FLUSHREAD: c_int = 0x1; +pub const TIOCPKT_FLUSHWRITE: c_int = 0x2; +pub const TIOCPKT_STOP: c_int = 0x4; +pub const TIOCPKT_START: c_int = 0x8; +pub const TIOCPKT_NOSTOP: c_int = 0x10; +pub const TIOCPKT_DOSTOP: c_int = 0x20; +pub const TIOCPKT_IOCTL: c_int = 0x40; +pub const TIOCNOTTY: c_ulong = 0x20007471; +pub const TIOCSTI: c_ulong = 0x80017472; +pub const TIOCOUTQ: c_ulong = 0x40047473; +pub const TIOCSPGRP: c_ulong = 0x80047476; +pub const TIOCGPGRP: c_ulong = 0x40047477; +pub const TIOCCDTR: c_ulong = 0x20007478; +pub const TIOCSDTR: c_ulong = 0x20007479; +pub const TTYDISC: c_int = 0x0; +pub const SLIPDISC: c_int = 0x4; +pub const PPPDISC: c_int = 0x5; +pub const NETGRAPHDISC: c_int = 0x6; + +pub const BIOCGRSIG: c_ulong = 0x40044272; +pub const BIOCSRSIG: c_ulong = 0x80044273; +pub const BIOCSDLT: c_ulong = 0x80044278; +pub const BIOCGSEESENT: c_ulong = 0x40044276; +pub const BIOCSSEESENT: c_ulong = 0x80044277; +cfg_if! { + if #[cfg(target_pointer_width = "64")] { + pub const BIOCGDLTLIST: c_ulong = 0xc0104279; + pub const BIOCSETF: c_ulong = 0x80104267; + } else if #[cfg(target_pointer_width = "32")] { + pub const BIOCGDLTLIST: c_ulong = 0xc0084279; + pub const BIOCSETF: c_ulong = 0x80084267; + } +} + +pub const FIODTYPE: c_ulong = 0x4004667a; +pub const FIOGETLBA: c_ulong = 0x40046679; + +pub const B0: speed_t = 0; +pub const B50: speed_t = 50; +pub const B75: speed_t = 75; +pub const B110: speed_t = 110; +pub const B134: speed_t = 134; +pub const B150: speed_t = 150; +pub const B200: speed_t = 200; +pub const B300: speed_t = 300; +pub const B600: speed_t = 600; +pub const B1200: speed_t = 1200; +pub const B1800: speed_t = 1800; +pub const B2400: speed_t = 2400; +pub const B4800: speed_t = 4800; +pub const B9600: speed_t = 9600; +pub const B19200: speed_t = 19200; +pub const B38400: speed_t = 38400; +pub const B7200: speed_t = 7200; +pub const B14400: speed_t = 14400; +pub const B28800: speed_t = 28800; +pub const B57600: speed_t = 57600; +pub const B76800: speed_t = 76800; +pub const B115200: speed_t = 115200; +pub const B230400: speed_t = 230400; +pub const EXTA: speed_t = 19200; +pub const EXTB: speed_t = 38400; + +pub const SEM_FAILED: *mut sem_t = ptr::null_mut(); + +pub const CRTSCTS: crate::tcflag_t = 0x00030000; +pub const CCTS_OFLOW: crate::tcflag_t = 0x00010000; +pub const CRTS_IFLOW: crate::tcflag_t = 0x00020000; +pub const CDTR_IFLOW: crate::tcflag_t = 0x00040000; +pub const CDSR_OFLOW: crate::tcflag_t = 0x00080000; +pub const CCAR_OFLOW: crate::tcflag_t = 0x00100000; +pub const VERASE2: usize = 7; +pub const OCRNL: crate::tcflag_t = 0x10; +pub const ONOCR: crate::tcflag_t = 0x20; +pub const ONLRET: crate::tcflag_t = 0x40; + +pub const CMGROUP_MAX: usize = 16; + +pub const EUI64_LEN: usize = 8; + +// https://github.com/freebsd/freebsd/blob/HEAD/sys/net/bpf.h +pub const BPF_ALIGNMENT: usize = SIZEOF_LONG; + +// Values for rtprio struct (prio field) and syscall (function argument) +pub const RTP_PRIO_MIN: c_ushort = 0; +pub const RTP_PRIO_MAX: c_ushort = 31; +pub const RTP_LOOKUP: c_int = 0; +pub const RTP_SET: c_int = 1; + +// Flags for chflags(2) +pub const UF_SETTABLE: c_ulong = 0x0000ffff; +pub const UF_NODUMP: c_ulong = 0x00000001; +pub const UF_IMMUTABLE: c_ulong = 0x00000002; +pub const UF_APPEND: c_ulong = 0x00000004; +pub const UF_OPAQUE: c_ulong = 0x00000008; +pub const UF_NOUNLINK: c_ulong = 0x00000010; +pub const SF_SETTABLE: c_ulong = 0xffff0000; +pub const SF_ARCHIVED: c_ulong = 0x00010000; +pub const SF_IMMUTABLE: c_ulong = 0x00020000; +pub const SF_APPEND: c_ulong = 0x00040000; +pub const SF_NOUNLINK: c_ulong = 0x00100000; + +pub const TIMER_ABSTIME: c_int = 1; + +// +pub const NTP_API: c_int = 4; +pub const MAXPHASE: c_long = 500000000; +pub const MAXFREQ: c_long = 500000; +pub const MINSEC: c_int = 256; +pub const MAXSEC: c_int = 2048; +pub const NANOSECOND: c_long = 1000000000; +pub const SCALE_PPM: c_int = 65; +pub const MAXTC: c_int = 10; +pub const MOD_OFFSET: c_uint = 0x0001; +pub const MOD_FREQUENCY: c_uint = 0x0002; +pub const MOD_MAXERROR: c_uint = 0x0004; +pub const MOD_ESTERROR: c_uint = 0x0008; +pub const MOD_STATUS: c_uint = 0x0010; +pub const MOD_TIMECONST: c_uint = 0x0020; +pub const MOD_PPSMAX: c_uint = 0x0040; +pub const MOD_TAI: c_uint = 0x0080; +pub const MOD_MICRO: c_uint = 0x1000; +pub const MOD_NANO: c_uint = 0x2000; +pub const MOD_CLKB: c_uint = 0x4000; +pub const MOD_CLKA: c_uint = 0x8000; +pub const STA_PLL: c_int = 0x0001; +pub const STA_PPSFREQ: c_int = 0x0002; +pub const STA_PPSTIME: c_int = 0x0004; +pub const STA_FLL: c_int = 0x0008; +pub const STA_INS: c_int = 0x0010; +pub const STA_DEL: c_int = 0x0020; +pub const STA_UNSYNC: c_int = 0x0040; +pub const STA_FREQHOLD: c_int = 0x0080; +pub const STA_PPSSIGNAL: c_int = 0x0100; +pub const STA_PPSJITTER: c_int = 0x0200; +pub const STA_PPSWANDER: c_int = 0x0400; +pub const STA_PPSERROR: c_int = 0x0800; +pub const STA_CLOCKERR: c_int = 0x1000; +pub const STA_NANO: c_int = 0x2000; +pub const STA_MODE: c_int = 0x4000; +pub const STA_CLK: c_int = 0x8000; +pub const STA_RONLY: c_int = STA_PPSSIGNAL + | STA_PPSJITTER + | STA_PPSWANDER + | STA_PPSERROR + | STA_CLOCKERR + | STA_NANO + | STA_MODE + | STA_CLK; +pub const TIME_OK: c_int = 0; +pub const TIME_INS: c_int = 1; +pub const TIME_DEL: c_int = 2; +pub const TIME_OOP: c_int = 3; +pub const TIME_WAIT: c_int = 4; +pub const TIME_ERROR: c_int = 5; + +pub const REG_ENOSYS: c_int = -1; +pub const REG_ILLSEQ: c_int = 17; + +pub const IPC_PRIVATE: crate::key_t = 0; +pub const IPC_CREAT: c_int = 0o1000; +pub const IPC_EXCL: c_int = 0o2000; +pub const IPC_NOWAIT: c_int = 0o4000; +pub const IPC_RMID: c_int = 0; +pub const IPC_SET: c_int = 1; +pub const IPC_STAT: c_int = 2; +pub const IPC_R: c_int = 0o400; +pub const IPC_W: c_int = 0o200; +pub const IPC_M: c_int = 0o10000; + +pub const SHM_RDONLY: c_int = 0o10000; +pub const SHM_RND: c_int = 0o20000; +pub const SHM_R: c_int = 0o400; +pub const SHM_W: c_int = 0o200; + +pub const KENV_GET: c_int = 0; +pub const KENV_SET: c_int = 1; +pub const KENV_UNSET: c_int = 2; +pub const KENV_DUMP: c_int = 3; +pub const KENV_MNAMELEN: c_int = 128; +pub const KENV_MVALLEN: c_int = 128; + +pub const RB_ASKNAME: c_int = 0x001; +pub const RB_SINGLE: c_int = 0x002; +pub const RB_NOSYNC: c_int = 0x004; +pub const RB_HALT: c_int = 0x008; +pub const RB_INITNAME: c_int = 0x010; +pub const RB_DFLTROOT: c_int = 0x020; +pub const RB_KDB: c_int = 0x040; +pub const RB_RDONLY: c_int = 0x080; +pub const RB_DUMP: c_int = 0x100; +pub const RB_MINIROOT: c_int = 0x200; +pub const RB_VERBOSE: c_int = 0x800; +pub const RB_SERIAL: c_int = 0x1000; +pub const RB_CDROM: c_int = 0x2000; +pub const RB_POWEROFF: c_int = 0x4000; +pub const RB_GDB: c_int = 0x8000; +pub const RB_MUTE: c_int = 0x10000; +pub const RB_SELFTEST: c_int = 0x20000; + +// For getrandom() +pub const GRND_NONBLOCK: c_uint = 0x1; +pub const GRND_RANDOM: c_uint = 0x2; +pub const GRND_INSECURE: c_uint = 0x4; + +// DIFF(main): changed to `c_short` in f62eb023ab +pub const POSIX_SPAWN_RESETIDS: c_int = 0x01; +pub const POSIX_SPAWN_SETPGROUP: c_int = 0x02; +pub const POSIX_SPAWN_SETSCHEDPARAM: c_int = 0x04; +pub const POSIX_SPAWN_SETSCHEDULER: c_int = 0x08; +pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x10; +pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x20; + +safe_f! { + pub const fn WIFCONTINUED(status: c_int) -> bool { + status == 0x13 + } + + pub const fn WSTOPSIG(status: c_int) -> c_int { + status >> 8 + } + + pub const fn WIFSTOPPED(status: c_int) -> bool { + (status & 0o177) == 0o177 + } +} + +extern "C" { + pub fn sem_destroy(sem: *mut sem_t) -> c_int; + pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; + + pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut crate::timezone) -> c_int; + pub fn accept4( + s: c_int, + addr: *mut crate::sockaddr, + addrlen: *mut crate::socklen_t, + flags: c_int, + ) -> c_int; + pub fn chflags(path: *const c_char, flags: c_ulong) -> c_int; + pub fn chflagsat(fd: c_int, path: *const c_char, flags: c_ulong, atflag: c_int) -> c_int; + + pub fn clock_nanosleep( + clk_id: crate::clockid_t, + flags: c_int, + rqtp: *const crate::timespec, + rmtp: *mut crate::timespec, + ) -> c_int; + + pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn clock_settime(clk_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; + pub fn clock_getcpuclockid(pid: crate::pid_t, clk_id: *mut crate::clockid_t) -> c_int; + + pub fn pthread_getcpuclockid(thread: crate::pthread_t, clk_id: *mut crate::clockid_t) -> c_int; + + pub fn dirfd(dirp: *mut crate::DIR) -> c_int; + pub fn duplocale(base: crate::locale_t) -> crate::locale_t; + pub fn endutxent(); + pub fn fchflags(fd: c_int, flags: c_ulong) -> c_int; + + // DIFF(main): changed to `*const *mut` in e77f551de9 + pub fn fexecve(fd: c_int, argv: *const *const c_char, envp: *const *const c_char) -> c_int; + + pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; + pub fn getdomainname(name: *mut c_char, len: c_int) -> c_int; + pub fn getgrent_r( + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn getpwent_r( + pwd: *mut crate::passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::passwd, + ) -> c_int; + pub fn getgrouplist( + name: *const c_char, + basegid: crate::gid_t, + groups: *mut crate::gid_t, + ngroups: *mut c_int, + ) -> c_int; + pub fn getnameinfo( + sa: *const crate::sockaddr, + salen: crate::socklen_t, + host: *mut c_char, + hostlen: size_t, + serv: *mut c_char, + servlen: size_t, + flags: c_int, + ) -> c_int; + pub fn getpriority(which: c_int, who: c_int) -> c_int; + pub fn getresgid( + rgid: *mut crate::gid_t, + egid: *mut crate::gid_t, + sgid: *mut crate::gid_t, + ) -> c_int; + pub fn getresuid( + ruid: *mut crate::uid_t, + euid: *mut crate::uid_t, + suid: *mut crate::uid_t, + ) -> c_int; + pub fn getutxent() -> *mut utmpx; + pub fn getutxid(ut: *const utmpx) -> *mut utmpx; + pub fn getutxline(ut: *const utmpx) -> *mut utmpx; + pub fn initgroups(name: *const c_char, basegid: crate::gid_t) -> c_int; + #[cfg_attr( + all(target_os = "freebsd", any(freebsd11, freebsd10)), + link_name = "kevent@FBSD_1.0" + )] + pub fn kevent( + kq: c_int, + changelist: *const crate::kevent, + nchanges: c_int, + eventlist: *mut crate::kevent, + nevents: c_int, + timeout: *const crate::timespec, + ) -> c_int; + pub fn lchflags(path: *const c_char, flags: c_ulong) -> c_int; + pub fn lutimes(file: *const c_char, times: *const crate::timeval) -> c_int; + pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; + pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; + #[cfg_attr( + all(target_os = "freebsd", any(freebsd11, freebsd10)), + link_name = "mknodat@FBSD_1.1" + )] + pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; + pub fn malloc_usable_size(ptr: *const c_void) -> size_t; + pub fn mincore(addr: *const c_void, len: size_t, vec: *mut c_char) -> c_int; + pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; + pub fn nl_langinfo_l(item: crate::nl_item, locale: crate::locale_t) -> *mut c_char; + pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; + pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; + pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; + pub fn ppoll( + fds: *mut crate::pollfd, + nfds: crate::nfds_t, + timeout: *const crate::timespec, + sigmask: *const sigset_t, + ) -> c_int; + pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn pthread_attr_get_np(tid: crate::pthread_t, attr: *mut crate::pthread_attr_t) -> c_int; + pub fn pthread_attr_getguardsize( + attr: *const crate::pthread_attr_t, + guardsize: *mut size_t, + ) -> c_int; + pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; + pub fn pthread_attr_getstack( + attr: *const crate::pthread_attr_t, + stackaddr: *mut *mut c_void, + stacksize: *mut size_t, + ) -> c_int; + pub fn pthread_condattr_getclock( + attr: *const pthread_condattr_t, + clock_id: *mut clockid_t, + ) -> c_int; + pub fn pthread_condattr_getpshared( + attr: *const pthread_condattr_t, + pshared: *mut c_int, + ) -> c_int; + pub fn pthread_condattr_setclock( + attr: *mut pthread_condattr_t, + clock_id: crate::clockid_t, + ) -> c_int; + pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: c_int) -> c_int; + pub fn pthread_main_np() -> c_int; + pub fn pthread_mutex_timedlock( + lock: *mut pthread_mutex_t, + abstime: *const crate::timespec, + ) -> c_int; + pub fn pthread_mutexattr_getpshared( + attr: *const pthread_mutexattr_t, + pshared: *mut c_int, + ) -> c_int; + pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; + pub fn pthread_rwlockattr_getpshared( + attr: *const pthread_rwlockattr_t, + val: *mut c_int, + ) -> c_int; + pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, val: c_int) -> c_int; + pub fn pthread_barrierattr_init(attr: *mut crate::pthread_barrierattr_t) -> c_int; + pub fn pthread_barrierattr_destroy(attr: *mut crate::pthread_barrierattr_t) -> c_int; + pub fn pthread_barrierattr_getpshared( + attr: *const crate::pthread_barrierattr_t, + shared: *mut c_int, + ) -> c_int; + pub fn pthread_barrierattr_setpshared( + attr: *mut crate::pthread_barrierattr_t, + shared: c_int, + ) -> c_int; + pub fn pthread_barrier_init( + barrier: *mut pthread_barrier_t, + attr: *const crate::pthread_barrierattr_t, + count: c_uint, + ) -> c_int; + pub fn pthread_barrier_destroy(barrier: *mut pthread_barrier_t) -> c_int; + pub fn pthread_barrier_wait(barrier: *mut pthread_barrier_t) -> c_int; + pub fn pthread_get_name_np(tid: crate::pthread_t, name: *mut c_char, len: size_t); + pub fn pthread_set_name_np(tid: crate::pthread_t, name: *const c_char); + pub fn pthread_getname_np( + thread: crate::pthread_t, + buffer: *mut c_char, + length: size_t, + ) -> c_int; + pub fn pthread_setname_np(thread: crate::pthread_t, name: *const c_char) -> c_int; + pub fn pthread_setschedparam( + native: crate::pthread_t, + policy: c_int, + param: *const sched_param, + ) -> c_int; + pub fn pthread_getschedparam( + native: crate::pthread_t, + policy: *mut c_int, + param: *mut sched_param, + ) -> c_int; + pub fn ptrace(request: c_int, pid: crate::pid_t, addr: *mut c_char, data: c_int) -> c_int; + pub fn utrace(addr: *const c_void, len: size_t) -> c_int; + pub fn pututxline(ut: *const utmpx) -> *mut utmpx; + pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn querylocale(mask: c_int, loc: crate::locale_t) -> *const c_char; + pub fn rtprio(function: c_int, pid: crate::pid_t, rtp: *mut rtprio) -> c_int; + pub fn sched_rr_get_interval(pid: crate::pid_t, t: *mut crate::timespec) -> c_int; + pub fn sched_getparam(pid: crate::pid_t, param: *mut sched_param) -> c_int; + pub fn sched_setparam(pid: crate::pid_t, param: *const sched_param) -> c_int; + pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; + pub fn sched_setscheduler( + pid: crate::pid_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; + pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; + pub fn sendfile( + fd: c_int, + s: c_int, + offset: off_t, + nbytes: size_t, + hdtr: *mut crate::sf_hdtr, + sbytes: *mut off_t, + flags: c_int, + ) -> c_int; + pub fn setdomainname(name: *const c_char, len: c_int) -> c_int; + pub fn sethostname(name: *const c_char, len: c_int) -> c_int; + pub fn setpriority(which: c_int, who: c_int, prio: c_int) -> c_int; + pub fn setresgid(rgid: crate::gid_t, egid: crate::gid_t, sgid: crate::gid_t) -> c_int; + pub fn setresuid(ruid: crate::uid_t, euid: crate::uid_t, suid: crate::uid_t) -> c_int; + pub fn settimeofday(tv: *const crate::timeval, tz: *const crate::timezone) -> c_int; + pub fn setutxent(); + pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; + pub fn sigtimedwait( + set: *const sigset_t, + info: *mut siginfo_t, + timeout: *const crate::timespec, + ) -> c_int; + pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; + pub fn sysctl( + name: *const c_int, + namelen: c_uint, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *const c_void, + newlen: size_t, + ) -> c_int; + pub fn sysctlbyname( + name: *const c_char, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *const c_void, + newlen: size_t, + ) -> c_int; + pub fn sysctlnametomib(name: *const c_char, mibp: *mut c_int, sizep: *mut size_t) -> c_int; + pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; + pub fn utimensat( + dirfd: c_int, + path: *const c_char, + times: *const crate::timespec, + flag: c_int, + ) -> c_int; + + pub fn ntp_adjtime(buf: *mut timex) -> c_int; + pub fn ntp_gettime(buf: *mut ntptimeval) -> c_int; + + // #include + pub fn dl_iterate_phdr( + callback: Option< + unsafe extern "C" fn(info: *mut dl_phdr_info, size: usize, data: *mut c_void) -> c_int, + >, + data: *mut c_void, + ) -> c_int; + + pub fn iconv_open(tocode: *const c_char, fromcode: *const c_char) -> iconv_t; + pub fn iconv( + cd: iconv_t, + inbuf: *mut *mut c_char, + inbytesleft: *mut size_t, + outbuf: *mut *mut c_char, + outbytesleft: *mut size_t, + ) -> size_t; + pub fn iconv_close(cd: iconv_t) -> c_int; + + // Added in `FreeBSD` 11.0 + // Added in `DragonFly BSD` 5.4 + pub fn explicit_bzero(s: *mut c_void, len: size_t); + // ISO/IEC 9899:2011 ("ISO C11") K.3.7.4.1 + pub fn memset_s(s: *mut c_void, smax: size_t, c: c_int, n: size_t) -> c_int; + pub fn gethostid() -> c_long; + pub fn sethostid(hostid: c_long); + + pub fn eui64_aton(a: *const c_char, e: *mut eui64) -> c_int; + pub fn eui64_ntoa(id: *const eui64, a: *mut c_char, len: size_t) -> c_int; + pub fn eui64_ntohost(hostname: *mut c_char, len: size_t, id: *const eui64) -> c_int; + pub fn eui64_hostton(hostname: *const c_char, id: *mut eui64) -> c_int; + + pub fn eaccess(path: *const c_char, mode: c_int) -> c_int; + + pub fn kenv(action: c_int, name: *const c_char, value: *mut c_char, len: c_int) -> c_int; + pub fn reboot(howto: c_int) -> c_int; + + pub fn exect(path: *const c_char, argv: *const *mut c_char, envp: *const *mut c_char) -> c_int; + pub fn execvP( + file: *const c_char, + search_path: *const c_char, + argv: *const *mut c_char, + ) -> c_int; + + pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; + pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; + + pub fn posix_spawn( + pid: *mut crate::pid_t, + path: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnp( + pid: *mut crate::pid_t, + file: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_getsigdefault( + attr: *const posix_spawnattr_t, + default: *mut crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigdefault( + attr: *mut posix_spawnattr_t, + default: *const crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getsigmask( + attr: *const posix_spawnattr_t, + default: *mut crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigmask( + attr: *mut posix_spawnattr_t, + default: *const crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; + pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; + pub fn posix_spawnattr_getpgroup( + attr: *const posix_spawnattr_t, + flags: *mut crate::pid_t, + ) -> c_int; + pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: crate::pid_t) -> c_int; + pub fn posix_spawnattr_getschedpolicy( + attr: *const posix_spawnattr_t, + flags: *mut c_int, + ) -> c_int; + pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, flags: c_int) -> c_int; + pub fn posix_spawnattr_getschedparam( + attr: *const posix_spawnattr_t, + param: *mut crate::sched_param, + ) -> c_int; + pub fn posix_spawnattr_setschedparam( + attr: *mut posix_spawnattr_t, + param: *const crate::sched_param, + ) -> c_int; + + pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_addopen( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + path: *const c_char, + oflag: c_int, + mode: mode_t, + ) -> c_int; + pub fn posix_spawn_file_actions_addclose( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + ) -> c_int; + pub fn posix_spawn_file_actions_adddup2( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + newfd: c_int, + ) -> c_int; +} + +#[link(name = "rt")] +extern "C" { + pub fn mq_close(mqd: crate::mqd_t) -> c_int; + pub fn mq_getattr(mqd: crate::mqd_t, attr: *mut crate::mq_attr) -> c_int; + pub fn mq_notify(mqd: crate::mqd_t, notification: *const crate::sigevent) -> c_int; + pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> crate::mqd_t; + pub fn mq_receive( + mqd: crate::mqd_t, + msg_ptr: *mut c_char, + msg_len: size_t, + msg_prio: *mut c_uint, + ) -> ssize_t; + pub fn mq_send( + mqd: crate::mqd_t, + msg_ptr: *const c_char, + msg_len: size_t, + msg_prio: c_uint, + ) -> c_int; + pub fn mq_setattr( + mqd: crate::mqd_t, + newattr: *const crate::mq_attr, + oldattr: *mut crate::mq_attr, + ) -> c_int; + pub fn mq_timedreceive( + mqd: crate::mqd_t, + msg_ptr: *mut c_char, + msg_len: size_t, + msg_prio: *mut c_uint, + abs_timeout: *const crate::timespec, + ) -> ssize_t; + pub fn mq_timedsend( + mqd: crate::mqd_t, + msg_ptr: *const c_char, + msg_len: size_t, + msg_prio: c_uint, + abs_timeout: *const crate::timespec, + ) -> c_int; + pub fn mq_unlink(name: *const c_char) -> c_int; + + pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; + pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; +} + +#[link(name = "util")] +extern "C" { + pub fn openpty( + amaster: *mut c_int, + aslave: *mut c_int, + name: *mut c_char, + termp: *mut termios, + winp: *mut crate::winsize, + ) -> c_int; + pub fn forkpty( + amaster: *mut c_int, + name: *mut c_char, + termp: *mut termios, + winp: *mut crate::winsize, + ) -> crate::pid_t; + pub fn login_tty(fd: c_int) -> c_int; + pub fn fparseln( + stream: *mut crate::FILE, + len: *mut size_t, + lineno: *mut size_t, + delim: *const c_char, + flags: c_int, + ) -> *mut c_char; +} + +#[link(name = "execinfo")] +extern "C" { + pub fn backtrace(addrlist: *mut *mut c_void, len: size_t) -> size_t; + pub fn backtrace_symbols(addrlist: *const *mut c_void, len: size_t) -> *mut *mut c_char; + pub fn backtrace_symbols_fd(addrlist: *const *mut c_void, len: size_t, fd: c_int) -> c_int; +} + +#[link(name = "kvm")] +extern "C" { + pub fn kvm_open( + execfile: *const c_char, + corefile: *const c_char, + swapfile: *const c_char, + flags: c_int, + errstr: *const c_char, + ) -> *mut crate::kvm_t; + pub fn kvm_close(kd: *mut crate::kvm_t) -> c_int; + pub fn kvm_getprocs( + kd: *mut crate::kvm_t, + op: c_int, + arg: c_int, + cnt: *mut c_int, + ) -> *mut crate::kinfo_proc; + pub fn kvm_getloadavg(kd: *mut kvm_t, loadavg: *mut c_double, nelem: c_int) -> c_int; + pub fn kvm_openfiles( + execfile: *const c_char, + corefile: *const c_char, + swapfile: *const c_char, + flags: c_int, + errbuf: *mut c_char, + ) -> *mut crate::kvm_t; + pub fn kvm_read( + kd: *mut crate::kvm_t, + addr: c_ulong, + buf: *mut c_void, + nbytes: size_t, + ) -> ssize_t; + pub fn kvm_write( + kd: *mut crate::kvm_t, + addr: c_ulong, + buf: *const c_void, + nbytes: size_t, + ) -> ssize_t; +} + +cfg_if! { + if #[cfg(target_os = "freebsd")] { + mod freebsd; + pub use self::freebsd::*; + } else if #[cfg(target_os = "dragonfly")] { + mod dragonfly; + pub use self::dragonfly::*; + } else { + // ... + } +} diff --git a/vendor/libc/src/unix/bsd/mod.rs b/vendor/libc/src/unix/bsd/mod.rs new file mode 100644 index 00000000000000..24531db8531453 --- /dev/null +++ b/vendor/libc/src/unix/bsd/mod.rs @@ -0,0 +1,969 @@ +use crate::prelude::*; + +pub type off_t = i64; +pub type useconds_t = u32; +pub type blkcnt_t = i64; +pub type socklen_t = u32; +pub type sa_family_t = u8; +pub type pthread_t = crate::uintptr_t; +pub type nfds_t = c_uint; +pub type regoff_t = off_t; + +s! { + pub struct sockaddr { + pub sa_len: u8, + pub sa_family: sa_family_t, + pub sa_data: [c_char; 14], + } + + pub struct sockaddr_in6 { + pub sin6_len: u8, + pub sin6_family: sa_family_t, + pub sin6_port: crate::in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: u32, + } + + pub struct passwd { + pub pw_name: *mut c_char, + pub pw_passwd: *mut c_char, + pub pw_uid: crate::uid_t, + pub pw_gid: crate::gid_t, + pub pw_change: crate::time_t, + pub pw_class: *mut c_char, + pub pw_gecos: *mut c_char, + pub pw_dir: *mut c_char, + pub pw_shell: *mut c_char, + pub pw_expire: crate::time_t, + + #[cfg(not(any( + target_os = "macos", + target_os = "ios", + target_os = "tvos", + target_os = "watchos", + target_os = "visionos", + target_os = "netbsd", + target_os = "openbsd" + )))] + pub pw_fields: c_int, + } + + pub struct ifaddrs { + pub ifa_next: *mut ifaddrs, + pub ifa_name: *mut c_char, + pub ifa_flags: c_uint, + pub ifa_addr: *mut crate::sockaddr, + pub ifa_netmask: *mut crate::sockaddr, + pub ifa_dstaddr: *mut crate::sockaddr, + pub ifa_data: *mut c_void, + #[cfg(target_os = "netbsd")] + pub ifa_addrflags: c_uint, + } + + pub struct fd_set { + #[cfg(all( + target_pointer_width = "64", + any(target_os = "freebsd", target_os = "dragonfly") + ))] + fds_bits: [i64; FD_SETSIZE as usize / 64], + #[cfg(not(all( + target_pointer_width = "64", + any(target_os = "freebsd", target_os = "dragonfly") + )))] + fds_bits: [i32; FD_SETSIZE as usize / 32], + } + + pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + pub tm_gmtoff: c_long, + pub tm_zone: *mut c_char, + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: c_int, + pub msg_control: *mut c_void, + pub msg_controllen: crate::socklen_t, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + pub cmsg_len: crate::socklen_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct fsid_t { + __fsid_val: [i32; 2], + } + + pub struct if_nameindex { + pub if_index: c_uint, + pub if_name: *mut c_char, + } + + pub struct regex_t { + __re_magic: c_int, + __re_nsub: size_t, + __re_endp: *const c_char, + __re_g: *mut c_void, + } + + pub struct regmatch_t { + pub rm_so: regoff_t, + pub rm_eo: regoff_t, + } + + pub struct option { + pub name: *const c_char, + pub has_arg: c_int, + pub flag: *mut c_int, + pub val: c_int, + } +} + +s_no_extra_traits! { + pub struct sockaddr_un { + pub sun_len: u8, + pub sun_family: sa_family_t, + pub sun_path: [c_char; 104], + } + + pub struct utsname { + #[cfg(not(target_os = "dragonfly"))] + pub sysname: [c_char; 256], + #[cfg(target_os = "dragonfly")] + pub sysname: [c_char; 32], + #[cfg(not(target_os = "dragonfly"))] + pub nodename: [c_char; 256], + #[cfg(target_os = "dragonfly")] + pub nodename: [c_char; 32], + #[cfg(not(target_os = "dragonfly"))] + pub release: [c_char; 256], + #[cfg(target_os = "dragonfly")] + pub release: [c_char; 32], + #[cfg(not(target_os = "dragonfly"))] + pub version: [c_char; 256], + #[cfg(target_os = "dragonfly")] + pub version: [c_char; 32], + #[cfg(not(target_os = "dragonfly"))] + pub machine: [c_char; 256], + #[cfg(target_os = "dragonfly")] + pub machine: [c_char; 32], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for sockaddr_un { + fn eq(&self, other: &sockaddr_un) -> bool { + self.sun_len == other.sun_len + && self.sun_family == other.sun_family + && self + .sun_path + .iter() + .zip(other.sun_path.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for sockaddr_un {} + + impl hash::Hash for sockaddr_un { + fn hash(&self, state: &mut H) { + self.sun_len.hash(state); + self.sun_family.hash(state); + self.sun_path.hash(state); + } + } + + impl PartialEq for utsname { + fn eq(&self, other: &utsname) -> bool { + self.sysname + .iter() + .zip(other.sysname.iter()) + .all(|(a, b)| a == b) + && self + .nodename + .iter() + .zip(other.nodename.iter()) + .all(|(a, b)| a == b) + && self + .release + .iter() + .zip(other.release.iter()) + .all(|(a, b)| a == b) + && self + .version + .iter() + .zip(other.version.iter()) + .all(|(a, b)| a == b) + && self + .machine + .iter() + .zip(other.machine.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for utsname {} + + impl hash::Hash for utsname { + fn hash(&self, state: &mut H) { + self.sysname.hash(state); + self.nodename.hash(state); + self.release.hash(state); + self.version.hash(state); + self.machine.hash(state); + } + } + } +} + +pub const LC_ALL: c_int = 0; +pub const LC_COLLATE: c_int = 1; +pub const LC_CTYPE: c_int = 2; +pub const LC_MONETARY: c_int = 3; +pub const LC_NUMERIC: c_int = 4; +pub const LC_TIME: c_int = 5; +pub const LC_MESSAGES: c_int = 6; + +pub const FIOCLEX: c_ulong = 0x20006601; +pub const FIONCLEX: c_ulong = 0x20006602; +pub const FIONREAD: c_ulong = 0x4004667f; +pub const FIONBIO: c_ulong = 0x8004667e; +pub const FIOASYNC: c_ulong = 0x8004667d; +pub const FIOSETOWN: c_ulong = 0x8004667c; +pub const FIOGETOWN: c_ulong = 0x4004667b; + +pub const PATH_MAX: c_int = 1024; +pub const MAXPATHLEN: c_int = PATH_MAX; + +pub const IOV_MAX: c_int = 1024; + +pub const SA_ONSTACK: c_int = 0x0001; +pub const SA_SIGINFO: c_int = 0x0040; +pub const SA_RESTART: c_int = 0x0002; +pub const SA_RESETHAND: c_int = 0x0004; +pub const SA_NOCLDSTOP: c_int = 0x0008; +pub const SA_NODEFER: c_int = 0x0010; +pub const SA_NOCLDWAIT: c_int = 0x0020; + +pub const SS_ONSTACK: c_int = 1; +pub const SS_DISABLE: c_int = 4; + +pub const SIGCHLD: c_int = 20; +pub const SIGBUS: c_int = 10; +pub const SIGUSR1: c_int = 30; +pub const SIGUSR2: c_int = 31; +pub const SIGCONT: c_int = 19; +pub const SIGSTOP: c_int = 17; +pub const SIGTSTP: c_int = 18; +pub const SIGURG: c_int = 16; +pub const SIGIO: c_int = 23; +pub const SIGSYS: c_int = 12; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGINFO: c_int = 29; + +pub const SIG_SETMASK: c_int = 3; +pub const SIG_BLOCK: c_int = 0x1; +pub const SIG_UNBLOCK: c_int = 0x2; + +pub const IP_TOS: c_int = 3; +pub const IP_MULTICAST_IF: c_int = 9; +pub const IP_MULTICAST_TTL: c_int = 10; +pub const IP_MULTICAST_LOOP: c_int = 11; + +pub const IPV6_UNICAST_HOPS: c_int = 4; +pub const IPV6_MULTICAST_IF: c_int = 9; +pub const IPV6_MULTICAST_HOPS: c_int = 10; +pub const IPV6_MULTICAST_LOOP: c_int = 11; +pub const IPV6_V6ONLY: c_int = 27; +pub const IPV6_DONTFRAG: c_int = 62; + +pub const IPTOS_ECN_NOTECT: u8 = 0x00; +pub const IPTOS_ECN_MASK: u8 = 0x03; +pub const IPTOS_ECN_ECT1: u8 = 0x01; +pub const IPTOS_ECN_ECT0: u8 = 0x02; +pub const IPTOS_ECN_CE: u8 = 0x03; + +pub const ST_RDONLY: c_ulong = 1; + +pub const SCM_RIGHTS: c_int = 0x01; + +pub const NCCS: usize = 20; + +pub const O_ACCMODE: c_int = 0x3; +pub const O_RDONLY: c_int = 0; +pub const O_WRONLY: c_int = 1; +pub const O_RDWR: c_int = 2; +pub const O_APPEND: c_int = 8; +pub const O_CREAT: c_int = 512; +pub const O_TRUNC: c_int = 1024; +pub const O_EXCL: c_int = 2048; +pub const O_ASYNC: c_int = 0x40; +pub const O_SYNC: c_int = 0x80; +pub const O_NONBLOCK: c_int = 0x4; +pub const O_NOFOLLOW: c_int = 0x100; +pub const O_SHLOCK: c_int = 0x10; +pub const O_EXLOCK: c_int = 0x20; +pub const O_FSYNC: c_int = O_SYNC; +pub const O_NDELAY: c_int = O_NONBLOCK; + +pub const F_GETOWN: c_int = 5; +pub const F_SETOWN: c_int = 6; + +pub const F_RDLCK: c_short = 1; +pub const F_UNLCK: c_short = 2; +pub const F_WRLCK: c_short = 3; + +pub const MNT_RDONLY: c_int = 0x00000001; +pub const MNT_SYNCHRONOUS: c_int = 0x00000002; +pub const MNT_NOEXEC: c_int = 0x00000004; +pub const MNT_NOSUID: c_int = 0x00000008; +pub const MNT_ASYNC: c_int = 0x00000040; +pub const MNT_EXPORTED: c_int = 0x00000100; +pub const MNT_UPDATE: c_int = 0x00010000; +pub const MNT_RELOAD: c_int = 0x00040000; +pub const MNT_FORCE: c_int = 0x00080000; + +pub const Q_SYNC: c_int = 0x600; +pub const Q_QUOTAON: c_int = 0x100; +pub const Q_QUOTAOFF: c_int = 0x200; + +pub const TCIOFF: c_int = 3; +pub const TCION: c_int = 4; +pub const TCOOFF: c_int = 1; +pub const TCOON: c_int = 2; +pub const TCIFLUSH: c_int = 1; +pub const TCOFLUSH: c_int = 2; +pub const TCIOFLUSH: c_int = 3; +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; +pub const VEOF: usize = 0; +pub const VEOL: usize = 1; +pub const VEOL2: usize = 2; +pub const VERASE: usize = 3; +pub const VWERASE: usize = 4; +pub const VKILL: usize = 5; +pub const VREPRINT: usize = 6; +pub const VINTR: usize = 8; +pub const VQUIT: usize = 9; +pub const VSUSP: usize = 10; +pub const VDSUSP: usize = 11; +pub const VSTART: usize = 12; +pub const VSTOP: usize = 13; +pub const VLNEXT: usize = 14; +pub const VDISCARD: usize = 15; +pub const VMIN: usize = 16; +pub const VTIME: usize = 17; +pub const VSTATUS: usize = 18; +pub const _POSIX_VDISABLE: crate::cc_t = 0xff; +pub const IGNBRK: crate::tcflag_t = 0x00000001; +pub const BRKINT: crate::tcflag_t = 0x00000002; +pub const IGNPAR: crate::tcflag_t = 0x00000004; +pub const PARMRK: crate::tcflag_t = 0x00000008; +pub const INPCK: crate::tcflag_t = 0x00000010; +pub const ISTRIP: crate::tcflag_t = 0x00000020; +pub const INLCR: crate::tcflag_t = 0x00000040; +pub const IGNCR: crate::tcflag_t = 0x00000080; +pub const ICRNL: crate::tcflag_t = 0x00000100; +pub const IXON: crate::tcflag_t = 0x00000200; +pub const IXOFF: crate::tcflag_t = 0x00000400; +pub const IXANY: crate::tcflag_t = 0x00000800; +pub const IMAXBEL: crate::tcflag_t = 0x00002000; +pub const OPOST: crate::tcflag_t = 0x1; +pub const ONLCR: crate::tcflag_t = 0x2; +pub const OXTABS: crate::tcflag_t = 0x4; +pub const ONOEOT: crate::tcflag_t = 0x8; +pub const CIGNORE: crate::tcflag_t = 0x00000001; +pub const CSIZE: crate::tcflag_t = 0x00000300; +pub const CS5: crate::tcflag_t = 0x00000000; +pub const CS6: crate::tcflag_t = 0x00000100; +pub const CS7: crate::tcflag_t = 0x00000200; +pub const CS8: crate::tcflag_t = 0x00000300; +pub const CSTOPB: crate::tcflag_t = 0x00000400; +pub const CREAD: crate::tcflag_t = 0x00000800; +pub const PARENB: crate::tcflag_t = 0x00001000; +pub const PARODD: crate::tcflag_t = 0x00002000; +pub const HUPCL: crate::tcflag_t = 0x00004000; +pub const CLOCAL: crate::tcflag_t = 0x00008000; +pub const ECHOKE: crate::tcflag_t = 0x00000001; +pub const ECHOE: crate::tcflag_t = 0x00000002; +pub const ECHOK: crate::tcflag_t = 0x00000004; +pub const ECHO: crate::tcflag_t = 0x00000008; +pub const ECHONL: crate::tcflag_t = 0x00000010; +pub const ECHOPRT: crate::tcflag_t = 0x00000020; +pub const ECHOCTL: crate::tcflag_t = 0x00000040; +pub const ISIG: crate::tcflag_t = 0x00000080; +pub const ICANON: crate::tcflag_t = 0x00000100; +pub const ALTWERASE: crate::tcflag_t = 0x00000200; +pub const IEXTEN: crate::tcflag_t = 0x00000400; +pub const EXTPROC: crate::tcflag_t = 0x00000800; +pub const TOSTOP: crate::tcflag_t = 0x00400000; +pub const FLUSHO: crate::tcflag_t = 0x00800000; +pub const NOKERNINFO: crate::tcflag_t = 0x02000000; +pub const PENDIN: crate::tcflag_t = 0x20000000; +pub const NOFLSH: crate::tcflag_t = 0x80000000; +pub const MDMBUF: crate::tcflag_t = 0x00100000; + +pub const WNOHANG: c_int = 0x00000001; +pub const WUNTRACED: c_int = 0x00000002; + +pub const RTLD_LAZY: c_int = 0x1; +pub const RTLD_NOW: c_int = 0x2; +pub const RTLD_NEXT: *mut c_void = -1isize as *mut c_void; +pub const RTLD_DEFAULT: *mut c_void = -2isize as *mut c_void; +pub const RTLD_SELF: *mut c_void = -3isize as *mut c_void; + +pub const LOG_CRON: c_int = 9 << 3; +pub const LOG_AUTHPRIV: c_int = 10 << 3; +pub const LOG_FTP: c_int = 11 << 3; +pub const LOG_PERROR: c_int = 0x20; + +pub const TCP_NODELAY: c_int = 1; +pub const TCP_MAXSEG: c_int = 2; + +pub const PIPE_BUF: usize = 512; + +// si_code values for SIGBUS signal +pub const BUS_ADRALN: c_int = 1; +pub const BUS_ADRERR: c_int = 2; +pub const BUS_OBJERR: c_int = 3; + +// si_code values for SIGCHLD signal +pub const CLD_EXITED: c_int = 1; +pub const CLD_KILLED: c_int = 2; +pub const CLD_DUMPED: c_int = 3; +pub const CLD_TRAPPED: c_int = 4; +pub const CLD_STOPPED: c_int = 5; +pub const CLD_CONTINUED: c_int = 6; + +pub const POLLIN: c_short = 0x1; +pub const POLLPRI: c_short = 0x2; +pub const POLLOUT: c_short = 0x4; +pub const POLLERR: c_short = 0x8; +pub const POLLHUP: c_short = 0x10; +pub const POLLNVAL: c_short = 0x20; +pub const POLLRDNORM: c_short = 0x040; +pub const POLLWRNORM: c_short = 0x004; +pub const POLLRDBAND: c_short = 0x080; +pub const POLLWRBAND: c_short = 0x100; + +pub const BIOCGBLEN: c_ulong = 0x40044266; +pub const BIOCSBLEN: c_ulong = 0xc0044266; +pub const BIOCFLUSH: c_uint = 0x20004268; +pub const BIOCPROMISC: c_uint = 0x20004269; +pub const BIOCGDLT: c_ulong = 0x4004426a; +pub const BIOCGETIF: c_ulong = 0x4020426b; +pub const BIOCSETIF: c_ulong = 0x8020426c; +pub const BIOCGSTATS: c_ulong = 0x4008426f; +pub const BIOCIMMEDIATE: c_ulong = 0x80044270; +pub const BIOCVERSION: c_ulong = 0x40044271; +pub const BIOCGHDRCMPLT: c_ulong = 0x40044274; +pub const BIOCSHDRCMPLT: c_ulong = 0x80044275; +pub const SIOCGIFADDR: c_ulong = 0xc0206921; + +pub const REG_BASIC: c_int = 0o0000; +pub const REG_EXTENDED: c_int = 0o0001; +pub const REG_ICASE: c_int = 0o0002; +pub const REG_NOSUB: c_int = 0o0004; +pub const REG_NEWLINE: c_int = 0o0010; +pub const REG_NOSPEC: c_int = 0o0020; +pub const REG_PEND: c_int = 0o0040; +pub const REG_DUMP: c_int = 0o0200; + +pub const REG_NOMATCH: c_int = 1; +pub const REG_BADPAT: c_int = 2; +pub const REG_ECOLLATE: c_int = 3; +pub const REG_ECTYPE: c_int = 4; +pub const REG_EESCAPE: c_int = 5; +pub const REG_ESUBREG: c_int = 6; +pub const REG_EBRACK: c_int = 7; +pub const REG_EPAREN: c_int = 8; +pub const REG_EBRACE: c_int = 9; +pub const REG_BADBR: c_int = 10; +pub const REG_ERANGE: c_int = 11; +pub const REG_ESPACE: c_int = 12; +pub const REG_BADRPT: c_int = 13; +pub const REG_EMPTY: c_int = 14; +pub const REG_ASSERT: c_int = 15; +pub const REG_INVARG: c_int = 16; +pub const REG_ATOI: c_int = 255; +pub const REG_ITOA: c_int = 0o0400; + +pub const REG_NOTBOL: c_int = 0o00001; +pub const REG_NOTEOL: c_int = 0o00002; +pub const REG_STARTEND: c_int = 0o00004; +pub const REG_TRACE: c_int = 0o00400; +pub const REG_LARGE: c_int = 0o01000; +pub const REG_BACKR: c_int = 0o02000; + +pub const TIOCCBRK: c_uint = 0x2000747a; +pub const TIOCSBRK: c_uint = 0x2000747b; + +pub const PRIO_PROCESS: c_int = 0; +pub const PRIO_PGRP: c_int = 1; +pub const PRIO_USER: c_int = 2; + +pub const ITIMER_REAL: c_int = 0; +pub const ITIMER_VIRTUAL: c_int = 1; +pub const ITIMER_PROF: c_int = 2; + +// net/route.h + +pub const RTF_UP: c_int = 0x1; +pub const RTF_GATEWAY: c_int = 0x2; +pub const RTF_HOST: c_int = 0x4; +pub const RTF_REJECT: c_int = 0x8; +pub const RTF_DYNAMIC: c_int = 0x10; +pub const RTF_MODIFIED: c_int = 0x20; +pub const RTF_DONE: c_int = 0x40; +pub const RTF_STATIC: c_int = 0x800; +pub const RTF_BLACKHOLE: c_int = 0x1000; +pub const RTF_PROTO2: c_int = 0x4000; +pub const RTF_PROTO1: c_int = 0x8000; + +// Message types +pub const RTM_ADD: c_int = 0x1; +pub const RTM_DELETE: c_int = 0x2; +pub const RTM_CHANGE: c_int = 0x3; +pub const RTM_GET: c_int = 0x4; +pub const RTM_LOSING: c_int = 0x5; +pub const RTM_REDIRECT: c_int = 0x6; +pub const RTM_MISS: c_int = 0x7; + +// Bitmask values for rtm_addrs. +pub const RTA_DST: c_int = 0x1; +pub const RTA_GATEWAY: c_int = 0x2; +pub const RTA_NETMASK: c_int = 0x4; +pub const RTA_GENMASK: c_int = 0x8; +pub const RTA_IFP: c_int = 0x10; +pub const RTA_IFA: c_int = 0x20; +pub const RTA_AUTHOR: c_int = 0x40; +pub const RTA_BRD: c_int = 0x80; + +// Index offsets for sockaddr array for alternate internal encoding. +pub const RTAX_DST: c_int = 0; +pub const RTAX_GATEWAY: c_int = 1; +pub const RTAX_NETMASK: c_int = 2; +pub const RTAX_GENMASK: c_int = 3; +pub const RTAX_IFP: c_int = 4; +pub const RTAX_IFA: c_int = 5; +pub const RTAX_AUTHOR: c_int = 6; +pub const RTAX_BRD: c_int = 7; + +f! { + pub fn CMSG_FIRSTHDR(mhdr: *const crate::msghdr) -> *mut cmsghdr { + if (*mhdr).msg_controllen as usize >= size_of::() { + (*mhdr).msg_control.cast::() + } else { + core::ptr::null_mut() + } + } + + pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { + let bits = size_of_val(&(*set).fds_bits[0]) * 8; + let fd = fd as usize; + (*set).fds_bits[fd / bits] &= !(1 << (fd % bits)); + return; + } + + pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { + let bits = size_of_val(&(*set).fds_bits[0]) * 8; + let fd = fd as usize; + return ((*set).fds_bits[fd / bits] & (1 << (fd % bits))) != 0; + } + + pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { + let bits = size_of_val(&(*set).fds_bits[0]) * 8; + let fd = fd as usize; + (*set).fds_bits[fd / bits] |= 1 << (fd % bits); + return; + } + + pub fn FD_ZERO(set: *mut fd_set) -> () { + for slot in &mut (*set).fds_bits { + *slot = 0; + } + } +} + +safe_f! { + pub const fn WTERMSIG(status: c_int) -> c_int { + status & 0o177 + } + + pub const fn WIFEXITED(status: c_int) -> bool { + (status & 0o177) == 0 + } + + pub const fn WEXITSTATUS(status: c_int) -> c_int { + (status >> 8) & 0x00ff + } + + pub const fn WCOREDUMP(status: c_int) -> bool { + (status & 0o200) != 0 + } + + pub const fn QCMD(cmd: c_int, type_: c_int) -> c_int { + (cmd << 8) | (type_ & 0x00ff) + } +} + +extern "C" { + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "getrlimit$UNIX2003" + )] + pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "setrlimit$UNIX2003" + )] + pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; + + pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + pub fn abs(i: c_int) -> c_int; + pub fn labs(i: c_long) -> c_long; + #[cfg_attr( + all(target_os = "freebsd", any(freebsd12, freebsd11, freebsd10)), + link_name = "rand@FBSD_1.0" + )] + pub fn rand() -> c_int; + #[cfg_attr( + all(target_os = "freebsd", any(freebsd12, freebsd11, freebsd10)), + link_name = "srand@FBSD_1.0" + )] + pub fn srand(seed: c_uint); + + pub fn getifaddrs(ifap: *mut *mut crate::ifaddrs) -> c_int; + pub fn freeifaddrs(ifa: *mut crate::ifaddrs); + pub fn setgroups(ngroups: c_int, ptr: *const crate::gid_t) -> c_int; + pub fn setlogin(name: *const c_char) -> c_int; + pub fn ioctl(fd: c_int, request: c_ulong, ...) -> c_int; + pub fn kqueue() -> c_int; + pub fn unmount(target: *const c_char, arg: c_int) -> c_int; + pub fn syscall(num: c_int, ...) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__getpwent50")] + pub fn getpwent() -> *mut passwd; + pub fn setpwent(); + pub fn endpwent(); + pub fn endgrent(); + pub fn getgrent() -> *mut crate::group; + + pub fn getprogname() -> *const c_char; + pub fn setprogname(name: *const c_char); + pub fn getloadavg(loadavg: *mut c_double, nelem: c_int) -> c_int; + pub fn if_nameindex() -> *mut if_nameindex; + pub fn if_freenameindex(ptr: *mut if_nameindex); + + pub fn getpeereid(socket: c_int, euid: *mut crate::uid_t, egid: *mut crate::gid_t) -> c_int; + + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "glob$INODE64" + )] + #[cfg_attr(target_os = "netbsd", link_name = "__glob30")] + #[cfg_attr( + all(target_os = "freebsd", any(freebsd11, freebsd10)), + link_name = "glob@FBSD_1.0" + )] + pub fn glob( + pattern: *const c_char, + flags: c_int, + errfunc: Option c_int>, + pglob: *mut crate::glob_t, + ) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__globfree30")] + #[cfg_attr( + all(target_os = "freebsd", any(freebsd11, freebsd10)), + link_name = "globfree@FBSD_1.0" + )] + pub fn globfree(pglob: *mut crate::glob_t); + + pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + + pub fn shm_unlink(name: *const c_char) -> c_int; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86_64"), + link_name = "seekdir$INODE64" + )] + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "seekdir$INODE64$UNIX2003" + )] + pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86_64"), + link_name = "telldir$INODE64" + )] + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "telldir$INODE64$UNIX2003" + )] + pub fn telldir(dirp: *mut crate::DIR) -> c_long; + pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "msync$UNIX2003" + )] + #[cfg_attr(target_os = "netbsd", link_name = "__msync13")] + pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "recvfrom$UNIX2003" + )] + pub fn recvfrom( + socket: c_int, + buf: *mut c_void, + len: size_t, + flags: c_int, + addr: *mut crate::sockaddr, + addrlen: *mut crate::socklen_t, + ) -> ssize_t; + pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__futimes50")] + pub fn futimes(fd: c_int, times: *const crate::timeval) -> c_int; + pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "bind$UNIX2003" + )] + pub fn bind( + socket: c_int, + address: *const crate::sockaddr, + address_len: crate::socklen_t, + ) -> c_int; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "writev$UNIX2003" + )] + pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "readv$UNIX2003" + )] + pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "sendmsg$UNIX2003" + )] + pub fn sendmsg(fd: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "recvmsg$UNIX2003" + )] + pub fn recvmsg(fd: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; + + pub fn sync(); + pub fn getgrgid_r( + gid: crate::gid_t, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "sigaltstack$UNIX2003" + )] + #[cfg_attr(target_os = "netbsd", link_name = "__sigaltstack14")] + pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; + pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; + pub fn sem_close(sem: *mut sem_t) -> c_int; + pub fn getdtablesize() -> c_int; + pub fn getgrnam_r( + name: *const c_char, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pthread_sigmask$UNIX2003" + )] + pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; + pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; + pub fn getgrnam(name: *const c_char) -> *mut crate::group; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pthread_cancel$UNIX2003" + )] + pub fn pthread_cancel(thread: crate::pthread_t) -> c_int; + pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; + pub fn sched_get_priority_min(policy: c_int) -> c_int; + pub fn sched_get_priority_max(policy: c_int) -> c_int; + pub fn sem_unlink(name: *const c_char) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__getpwnam_r50")] + pub fn getpwnam_r( + name: *const c_char, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__getpwuid_r50")] + pub fn getpwuid_r( + uid: crate::uid_t, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "sigwait$UNIX2003" + )] + pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; + pub fn pthread_atfork( + prepare: Option, + parent: Option, + child: Option, + ) -> c_int; + pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "popen$UNIX2003" + )] + pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; + pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; + pub fn pthread_create( + native: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + f: extern "C" fn(*mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + pub fn acct(filename: *const c_char) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "wait4$UNIX2003" + )] + #[cfg_attr( + all(target_os = "freebsd", any(freebsd12, freebsd11, freebsd10)), + link_name = "wait4@FBSD_1.0" + )] + pub fn wait4( + pid: crate::pid_t, + status: *mut c_int, + options: c_int, + rusage: *mut crate::rusage, + ) -> crate::pid_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "getitimer$UNIX2003" + )] + pub fn getitimer(which: c_int, curr_value: *mut crate::itimerval) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "setitimer$UNIX2003" + )] + pub fn setitimer( + which: c_int, + new_value: *const crate::itimerval, + old_value: *mut crate::itimerval, + ) -> c_int; + + pub fn regcomp(preg: *mut regex_t, pattern: *const c_char, cflags: c_int) -> c_int; + + pub fn regexec( + preg: *const regex_t, + input: *const c_char, + nmatch: size_t, + pmatch: *mut regmatch_t, + eflags: c_int, + ) -> c_int; + + pub fn regerror( + errcode: c_int, + preg: *const regex_t, + errbuf: *mut c_char, + errbuf_size: size_t, + ) -> size_t; + + pub fn regfree(preg: *mut regex_t); + + pub fn arc4random() -> u32; + pub fn arc4random_buf(buf: *mut c_void, size: size_t); + pub fn arc4random_uniform(l: u32) -> u32; + + pub fn drand48() -> c_double; + pub fn erand48(xseed: *mut c_ushort) -> c_double; + pub fn lrand48() -> c_long; + pub fn nrand48(xseed: *mut c_ushort) -> c_long; + pub fn mrand48() -> c_long; + pub fn jrand48(xseed: *mut c_ushort) -> c_long; + pub fn srand48(seed: c_long); + pub fn seed48(xseed: *mut c_ushort) -> *mut c_ushort; + pub fn lcong48(p: *mut c_ushort); + pub fn getopt_long( + argc: c_int, + argv: *const *mut c_char, + optstring: *const c_char, + longopts: *const option, + longindex: *mut c_int, + ) -> c_int; + + pub fn strftime( + buf: *mut c_char, + maxsize: size_t, + format: *const c_char, + timeptr: *const crate::tm, + ) -> size_t; + pub fn strftime_l( + buf: *mut c_char, + maxsize: size_t, + format: *const c_char, + timeptr: *const crate::tm, + locale: crate::locale_t, + ) -> size_t; + + pub fn devname(dev: crate::dev_t, mode_t: crate::mode_t) -> *mut c_char; +} + +cfg_if! { + if #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "tvos", + target_os = "watchos", + target_os = "visionos" + ))] { + mod apple; + pub use self::apple::*; + } else if #[cfg(any(target_os = "openbsd", target_os = "netbsd"))] { + mod netbsdlike; + pub use self::netbsdlike::*; + } else if #[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] { + mod freebsdlike; + pub use self::freebsdlike::*; + } else { + // Unknown target_os + } +} diff --git a/vendor/libc/src/unix/bsd/netbsdlike/mod.rs b/vendor/libc/src/unix/bsd/netbsdlike/mod.rs new file mode 100644 index 00000000000000..bc3e4cdf094ff3 --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/mod.rs @@ -0,0 +1,905 @@ +use crate::off_t; +use crate::prelude::*; + +pub type wchar_t = i32; +pub type time_t = i64; +pub type mode_t = u32; +pub type nlink_t = u32; +pub type ino_t = u64; +pub type pthread_key_t = c_int; +pub type rlim_t = u64; +pub type speed_t = c_uint; +pub type tcflag_t = c_uint; +pub type nl_item = c_long; +pub type clockid_t = c_int; +pub type id_t = u32; +pub type sem_t = *mut sem; +pub type key_t = c_long; + +#[derive(Debug)] +pub enum timezone {} +impl Copy for timezone {} +impl Clone for timezone { + fn clone(&self) -> timezone { + *self + } +} +#[derive(Debug)] +pub enum sem {} +impl Copy for sem {} +impl Clone for sem { + fn clone(&self) -> sem { + *self + } +} + +s! { + pub struct sched_param { + pub sched_priority: c_int, + } + + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } + + pub struct in6_pktinfo { + pub ipi6_addr: crate::in6_addr, + pub ipi6_ifindex: c_uint, + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_cc: [crate::cc_t; crate::NCCS], + pub c_ispeed: c_int, + pub c_ospeed: c_int, + } + + pub struct flock { + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + pub l_type: c_short, + pub l_whence: c_short, + } + + pub struct ipc_perm { + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub mode: mode_t, + #[cfg(target_os = "openbsd")] + pub seq: c_ushort, + #[cfg(target_os = "netbsd")] + pub _seq: c_ushort, + #[cfg(target_os = "openbsd")] + pub key: crate::key_t, + #[cfg(target_os = "netbsd")] + pub _key: crate::key_t, + } + + pub struct ptrace_io_desc { + pub piod_op: c_int, + pub piod_offs: *mut c_void, + pub piod_addr: *mut c_void, + pub piod_len: size_t, + } + + pub struct mmsghdr { + pub msg_hdr: crate::msghdr, + pub msg_len: c_uint, + } +} + +pub const D_T_FMT: crate::nl_item = 0; +pub const D_FMT: crate::nl_item = 1; +pub const T_FMT: crate::nl_item = 2; +pub const T_FMT_AMPM: crate::nl_item = 3; +pub const AM_STR: crate::nl_item = 4; +pub const PM_STR: crate::nl_item = 5; + +pub const DAY_1: crate::nl_item = 6; +pub const DAY_2: crate::nl_item = 7; +pub const DAY_3: crate::nl_item = 8; +pub const DAY_4: crate::nl_item = 9; +pub const DAY_5: crate::nl_item = 10; +pub const DAY_6: crate::nl_item = 11; +pub const DAY_7: crate::nl_item = 12; + +pub const ABDAY_1: crate::nl_item = 13; +pub const ABDAY_2: crate::nl_item = 14; +pub const ABDAY_3: crate::nl_item = 15; +pub const ABDAY_4: crate::nl_item = 16; +pub const ABDAY_5: crate::nl_item = 17; +pub const ABDAY_6: crate::nl_item = 18; +pub const ABDAY_7: crate::nl_item = 19; + +pub const MON_1: crate::nl_item = 20; +pub const MON_2: crate::nl_item = 21; +pub const MON_3: crate::nl_item = 22; +pub const MON_4: crate::nl_item = 23; +pub const MON_5: crate::nl_item = 24; +pub const MON_6: crate::nl_item = 25; +pub const MON_7: crate::nl_item = 26; +pub const MON_8: crate::nl_item = 27; +pub const MON_9: crate::nl_item = 28; +pub const MON_10: crate::nl_item = 29; +pub const MON_11: crate::nl_item = 30; +pub const MON_12: crate::nl_item = 31; + +pub const ABMON_1: crate::nl_item = 32; +pub const ABMON_2: crate::nl_item = 33; +pub const ABMON_3: crate::nl_item = 34; +pub const ABMON_4: crate::nl_item = 35; +pub const ABMON_5: crate::nl_item = 36; +pub const ABMON_6: crate::nl_item = 37; +pub const ABMON_7: crate::nl_item = 38; +pub const ABMON_8: crate::nl_item = 39; +pub const ABMON_9: crate::nl_item = 40; +pub const ABMON_10: crate::nl_item = 41; +pub const ABMON_11: crate::nl_item = 42; +pub const ABMON_12: crate::nl_item = 43; + +pub const RADIXCHAR: crate::nl_item = 44; +pub const THOUSEP: crate::nl_item = 45; +pub const YESSTR: crate::nl_item = 46; +pub const YESEXPR: crate::nl_item = 47; +pub const NOSTR: crate::nl_item = 48; +pub const NOEXPR: crate::nl_item = 49; +pub const CRNCYSTR: crate::nl_item = 50; + +pub const CODESET: crate::nl_item = 51; + +pub const EXIT_FAILURE: c_int = 1; +pub const EXIT_SUCCESS: c_int = 0; +pub const RAND_MAX: c_int = 2147483647; +pub const EOF: c_int = -1; +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; +pub const _IOFBF: c_int = 0; +pub const _IONBF: c_int = 2; +pub const _IOLBF: c_int = 1; +pub const BUFSIZ: c_uint = 1024; +pub const FOPEN_MAX: c_uint = 20; +pub const FILENAME_MAX: c_uint = 1024; +pub const L_tmpnam: c_uint = 1024; +pub const O_NOCTTY: c_int = 32768; +pub const S_IFIFO: mode_t = 0o1_0000; +pub const S_IFCHR: mode_t = 0o2_0000; +pub const S_IFBLK: mode_t = 0o6_0000; +pub const S_IFDIR: mode_t = 0o4_0000; +pub const S_IFREG: mode_t = 0o10_0000; +pub const S_IFLNK: mode_t = 0o12_0000; +pub const S_IFSOCK: mode_t = 0o14_0000; +pub const S_IFMT: mode_t = 0o17_0000; +pub const S_IEXEC: mode_t = 0o0100; +pub const S_IWRITE: mode_t = 0o0200; +pub const S_IREAD: mode_t = 0o0400; +pub const S_IRWXU: mode_t = 0o0700; +pub const S_IXUSR: mode_t = 0o0100; +pub const S_IWUSR: mode_t = 0o0200; +pub const S_IRUSR: mode_t = 0o0400; +pub const S_IRWXG: mode_t = 0o0070; +pub const S_IXGRP: mode_t = 0o0010; +pub const S_IWGRP: mode_t = 0o0020; +pub const S_IRGRP: mode_t = 0o0040; +pub const S_IRWXO: mode_t = 0o0007; +pub const S_IXOTH: mode_t = 0o0001; +pub const S_IWOTH: mode_t = 0o0002; +pub const S_IROTH: mode_t = 0o0004; +pub const F_OK: c_int = 0; +pub const R_OK: c_int = 4; +pub const W_OK: c_int = 2; +pub const X_OK: c_int = 1; +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; +pub const F_LOCK: c_int = 1; +pub const F_TEST: c_int = 3; +pub const F_TLOCK: c_int = 2; +pub const F_ULOCK: c_int = 0; +pub const F_GETLK: c_int = 7; +pub const F_SETLK: c_int = 8; +pub const F_SETLKW: c_int = 9; +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGABRT: c_int = 6; +pub const SIGEMT: c_int = 7; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGSEGV: c_int = 11; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; + +pub const PROT_NONE: c_int = 0; +pub const PROT_READ: c_int = 1; +pub const PROT_WRITE: c_int = 2; +pub const PROT_EXEC: c_int = 4; + +pub const MAP_FILE: c_int = 0x0000; +pub const MAP_SHARED: c_int = 0x0001; +pub const MAP_PRIVATE: c_int = 0x0002; +pub const MAP_FIXED: c_int = 0x0010; +pub const MAP_ANON: c_int = 0x1000; +pub const MAP_ANONYMOUS: c_int = MAP_ANON; + +pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; + +pub const IPC_CREAT: c_int = 0o001000; +pub const IPC_EXCL: c_int = 0o002000; +pub const IPC_NOWAIT: c_int = 0o004000; + +pub const IPC_PRIVATE: crate::key_t = 0; + +pub const IPC_RMID: c_int = 0; +pub const IPC_SET: c_int = 1; +pub const IPC_STAT: c_int = 2; + +pub const IPC_R: c_int = 0o000400; +pub const IPC_W: c_int = 0o000200; +pub const IPC_M: c_int = 0o010000; + +pub const SHM_R: c_int = IPC_R; +pub const SHM_W: c_int = IPC_W; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; + +pub const MS_ASYNC: c_int = 0x0001; + +pub const EPERM: c_int = 1; +pub const ENOENT: c_int = 2; +pub const ESRCH: c_int = 3; +pub const EINTR: c_int = 4; +pub const EIO: c_int = 5; +pub const ENXIO: c_int = 6; +pub const E2BIG: c_int = 7; +pub const ENOEXEC: c_int = 8; +pub const EBADF: c_int = 9; +pub const ECHILD: c_int = 10; +pub const EDEADLK: c_int = 11; +pub const ENOMEM: c_int = 12; +pub const EACCES: c_int = 13; +pub const EFAULT: c_int = 14; +pub const ENOTBLK: c_int = 15; +pub const EBUSY: c_int = 16; +pub const EEXIST: c_int = 17; +pub const EXDEV: c_int = 18; +pub const ENODEV: c_int = 19; +pub const ENOTDIR: c_int = 20; +pub const EISDIR: c_int = 21; +pub const EINVAL: c_int = 22; +pub const ENFILE: c_int = 23; +pub const EMFILE: c_int = 24; +pub const ENOTTY: c_int = 25; +pub const ETXTBSY: c_int = 26; +pub const EFBIG: c_int = 27; +pub const ENOSPC: c_int = 28; +pub const ESPIPE: c_int = 29; +pub const EROFS: c_int = 30; +pub const EMLINK: c_int = 31; +pub const EPIPE: c_int = 32; +pub const EDOM: c_int = 33; +pub const ERANGE: c_int = 34; +pub const EAGAIN: c_int = 35; +pub const EWOULDBLOCK: c_int = 35; +pub const EINPROGRESS: c_int = 36; +pub const EALREADY: c_int = 37; +pub const ENOTSOCK: c_int = 38; +pub const EDESTADDRREQ: c_int = 39; +pub const EMSGSIZE: c_int = 40; +pub const EPROTOTYPE: c_int = 41; +pub const ENOPROTOOPT: c_int = 42; +pub const EPROTONOSUPPORT: c_int = 43; +pub const ESOCKTNOSUPPORT: c_int = 44; +pub const EOPNOTSUPP: c_int = 45; +pub const EPFNOSUPPORT: c_int = 46; +pub const EAFNOSUPPORT: c_int = 47; +pub const EADDRINUSE: c_int = 48; +pub const EADDRNOTAVAIL: c_int = 49; +pub const ENETDOWN: c_int = 50; +pub const ENETUNREACH: c_int = 51; +pub const ENETRESET: c_int = 52; +pub const ECONNABORTED: c_int = 53; +pub const ECONNRESET: c_int = 54; +pub const ENOBUFS: c_int = 55; +pub const EISCONN: c_int = 56; +pub const ENOTCONN: c_int = 57; +pub const ESHUTDOWN: c_int = 58; +pub const ETOOMANYREFS: c_int = 59; +pub const ETIMEDOUT: c_int = 60; +pub const ECONNREFUSED: c_int = 61; +pub const ELOOP: c_int = 62; +pub const ENAMETOOLONG: c_int = 63; +pub const EHOSTDOWN: c_int = 64; +pub const EHOSTUNREACH: c_int = 65; +pub const ENOTEMPTY: c_int = 66; +pub const EPROCLIM: c_int = 67; +pub const EUSERS: c_int = 68; +pub const EDQUOT: c_int = 69; +pub const ESTALE: c_int = 70; +pub const EREMOTE: c_int = 71; +pub const EBADRPC: c_int = 72; +pub const ERPCMISMATCH: c_int = 73; +pub const EPROGUNAVAIL: c_int = 74; +pub const EPROGMISMATCH: c_int = 75; +pub const EPROCUNAVAIL: c_int = 76; +pub const ENOLCK: c_int = 77; +pub const ENOSYS: c_int = 78; +pub const EFTYPE: c_int = 79; +pub const EAUTH: c_int = 80; +pub const ENEEDAUTH: c_int = 81; + +pub const F_DUPFD: c_int = 0; +pub const F_GETFD: c_int = 1; +pub const F_SETFD: c_int = 2; +pub const F_GETFL: c_int = 3; +pub const F_SETFL: c_int = 4; + +pub const SIGTRAP: c_int = 5; + +pub const GLOB_APPEND: c_int = 0x0001; +pub const GLOB_DOOFFS: c_int = 0x0002; +pub const GLOB_ERR: c_int = 0x0004; +pub const GLOB_MARK: c_int = 0x0008; +pub const GLOB_NOCHECK: c_int = 0x0010; +pub const GLOB_NOSORT: c_int = 0x0020; +pub const GLOB_NOESCAPE: c_int = 0x1000; + +pub const GLOB_NOSPACE: c_int = -1; +pub const GLOB_ABORTED: c_int = -2; +pub const GLOB_NOMATCH: c_int = -3; +pub const GLOB_NOSYS: c_int = -4; + +pub const POSIX_MADV_NORMAL: c_int = 0; +pub const POSIX_MADV_RANDOM: c_int = 1; +pub const POSIX_MADV_SEQUENTIAL: c_int = 2; +pub const POSIX_MADV_WILLNEED: c_int = 3; +pub const POSIX_MADV_DONTNEED: c_int = 4; + +// DIFF(main): changed to `c_short` in f62eb023ab +pub const POSIX_SPAWN_RESETIDS: c_int = 0x01; +pub const POSIX_SPAWN_SETPGROUP: c_int = 0x02; +pub const POSIX_SPAWN_SETSCHEDPARAM: c_int = 0x04; +pub const POSIX_SPAWN_SETSCHEDULER: c_int = 0x08; +pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x10; +pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x20; + +pub const PTHREAD_CREATE_JOINABLE: c_int = 0; +pub const PTHREAD_CREATE_DETACHED: c_int = 1; + +pub const PIOD_READ_D: c_int = 1; +pub const PIOD_WRITE_D: c_int = 2; +pub const PIOD_READ_I: c_int = 3; +pub const PIOD_WRITE_I: c_int = 4; +pub const PIOD_READ_AUXV: c_int = 5; + +pub const PT_TRACE_ME: c_int = 0; +pub const PT_READ_I: c_int = 1; +pub const PT_READ_D: c_int = 2; +pub const PT_WRITE_I: c_int = 4; +pub const PT_WRITE_D: c_int = 5; +pub const PT_CONTINUE: c_int = 7; +pub const PT_KILL: c_int = 8; +pub const PT_ATTACH: c_int = 9; +pub const PT_DETACH: c_int = 10; +pub const PT_IO: c_int = 11; + +// http://man.openbsd.org/OpenBSD-current/man2/clock_getres.2 +// The man page says clock_gettime(3) can accept various values as clockid_t but +// http://fxr.watson.org/fxr/source/kern/kern_time.c?v=OPENBSD;im=excerpts#L161 +// the implementation rejects anything other than the below two +// +// http://netbsd.gw.com/cgi-bin/man-cgi?clock_gettime +// https://github.com/jsonn/src/blob/HEAD/sys/kern/subr_time.c#L222 +// Basically the same goes for NetBSD +pub const CLOCK_REALTIME: crate::clockid_t = 0; +pub const CLOCK_MONOTONIC: crate::clockid_t = 3; + +pub const RLIMIT_CPU: c_int = 0; +pub const RLIMIT_FSIZE: c_int = 1; +pub const RLIMIT_DATA: c_int = 2; +pub const RLIMIT_STACK: c_int = 3; +pub const RLIMIT_CORE: c_int = 4; +pub const RLIMIT_RSS: c_int = 5; +pub const RLIMIT_MEMLOCK: c_int = 6; +pub const RLIMIT_NPROC: c_int = 7; +pub const RLIMIT_NOFILE: c_int = 8; + +pub const RLIM_INFINITY: rlim_t = 0x7fff_ffff_ffff_ffff; +pub const RLIM_SAVED_MAX: rlim_t = RLIM_INFINITY; +pub const RLIM_SAVED_CUR: rlim_t = RLIM_INFINITY; + +pub const RUSAGE_SELF: c_int = 0; +pub const RUSAGE_CHILDREN: c_int = -1; + +pub const MADV_NORMAL: c_int = 0; +pub const MADV_RANDOM: c_int = 1; +pub const MADV_SEQUENTIAL: c_int = 2; +pub const MADV_WILLNEED: c_int = 3; +pub const MADV_DONTNEED: c_int = 4; +pub const MADV_FREE: c_int = 6; + +// sys/fstypes.h in NetBSD, or sys/mount.h in OpenBSD +pub const MNT_NODEV: c_int = 0x00000010; +pub const MNT_LOCAL: c_int = 0x00001000; +pub const MNT_QUOTA: c_int = 0x00002000; + +// sys/ioccom.h in NetBSD and OpenBSD +pub const IOCPARM_MASK: u32 = 0x1fff; + +pub const IOC_VOID: c_ulong = 0x20000000; +pub const IOC_OUT: c_ulong = 0x40000000; +pub const IOC_IN: c_ulong = 0x80000000; +pub const IOC_INOUT: c_ulong = IOC_IN | IOC_OUT; +pub const IOC_DIRMASK: c_ulong = 0xe0000000; + +pub const fn _IO(g: c_ulong, n: c_ulong) -> c_ulong { + _IOC(IOC_VOID, g, n, 0) +} + +/// Build an ioctl number for an read-only ioctl. +pub const fn _IOR(g: c_ulong, n: c_ulong) -> c_ulong { + _IOC(IOC_OUT, g, n, mem::size_of::() as c_ulong) +} + +/// Build an ioctl number for an write-only ioctl. +pub const fn _IOW(g: c_ulong, n: c_ulong) -> c_ulong { + _IOC(IOC_IN, g, n, mem::size_of::() as c_ulong) +} + +/// Build an ioctl number for a read-write ioctl. +pub const fn _IOWR(g: c_ulong, n: c_ulong) -> c_ulong { + _IOC(IOC_INOUT, g, n, mem::size_of::() as c_ulong) +} + +pub const AF_UNSPEC: c_int = 0; +pub const AF_LOCAL: c_int = 1; +pub const AF_UNIX: c_int = AF_LOCAL; +pub const AF_INET: c_int = 2; +pub const AF_IMPLINK: c_int = 3; +pub const AF_PUP: c_int = 4; +pub const AF_CHAOS: c_int = 5; +pub const AF_NS: c_int = 6; +pub const AF_ISO: c_int = 7; +pub const AF_OSI: c_int = AF_ISO; +pub const AF_DATAKIT: c_int = 9; +pub const AF_CCITT: c_int = 10; +pub const AF_SNA: c_int = 11; +pub const AF_DECnet: c_int = 12; +pub const AF_DLI: c_int = 13; +pub const AF_LAT: c_int = 14; +pub const AF_HYLINK: c_int = 15; +pub const AF_APPLETALK: c_int = 16; +pub const AF_LINK: c_int = 18; +pub const pseudo_AF_XTP: c_int = 19; +pub const AF_COIP: c_int = 20; +pub const AF_CNT: c_int = 21; +pub const pseudo_AF_RTIP: c_int = 22; +pub const AF_IPX: c_int = 23; +pub const AF_INET6: c_int = 24; +pub const pseudo_AF_PIP: c_int = 25; +pub const AF_ISDN: c_int = 26; +pub const AF_E164: c_int = AF_ISDN; +pub const AF_NATM: c_int = 27; + +pub const PF_UNSPEC: c_int = AF_UNSPEC; +pub const PF_LOCAL: c_int = AF_LOCAL; +pub const PF_UNIX: c_int = PF_LOCAL; +pub const PF_INET: c_int = AF_INET; +pub const PF_IMPLINK: c_int = AF_IMPLINK; +pub const PF_PUP: c_int = AF_PUP; +pub const PF_CHAOS: c_int = AF_CHAOS; +pub const PF_NS: c_int = AF_NS; +pub const PF_ISO: c_int = AF_ISO; +pub const PF_OSI: c_int = AF_ISO; +pub const PF_DATAKIT: c_int = AF_DATAKIT; +pub const PF_CCITT: c_int = AF_CCITT; +pub const PF_SNA: c_int = AF_SNA; +pub const PF_DECnet: c_int = AF_DECnet; +pub const PF_DLI: c_int = AF_DLI; +pub const PF_LAT: c_int = AF_LAT; +pub const PF_HYLINK: c_int = AF_HYLINK; +pub const PF_APPLETALK: c_int = AF_APPLETALK; +pub const PF_LINK: c_int = AF_LINK; +pub const PF_XTP: c_int = pseudo_AF_XTP; +pub const PF_COIP: c_int = AF_COIP; +pub const PF_CNT: c_int = AF_CNT; +pub const PF_IPX: c_int = AF_IPX; +pub const PF_INET6: c_int = AF_INET6; +pub const PF_RTIP: c_int = pseudo_AF_RTIP; +pub const PF_PIP: c_int = pseudo_AF_PIP; +pub const PF_ISDN: c_int = AF_ISDN; +pub const PF_NATM: c_int = AF_NATM; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SOCK_RAW: c_int = 3; +pub const SOCK_RDM: c_int = 4; +pub const SOCK_SEQPACKET: c_int = 5; +pub const IP_TTL: c_int = 4; +pub const IP_HDRINCL: c_int = 2; +pub const IP_ADD_MEMBERSHIP: c_int = 12; +pub const IP_DROP_MEMBERSHIP: c_int = 13; +pub const IPV6_RECVPKTINFO: c_int = 36; +pub const IPV6_PKTINFO: c_int = 46; +pub const IPV6_RECVTCLASS: c_int = 57; +pub const IPV6_TCLASS: c_int = 61; + +pub const SOL_SOCKET: c_int = 0xffff; +pub const SO_DEBUG: c_int = 0x01; +pub const SO_ACCEPTCONN: c_int = 0x0002; +pub const SO_REUSEADDR: c_int = 0x0004; +pub const SO_KEEPALIVE: c_int = 0x0008; +pub const SO_DONTROUTE: c_int = 0x0010; +pub const SO_BROADCAST: c_int = 0x0020; +pub const SO_USELOOPBACK: c_int = 0x0040; +pub const SO_LINGER: c_int = 0x0080; +pub const SO_OOBINLINE: c_int = 0x0100; +pub const SO_REUSEPORT: c_int = 0x0200; +pub const SO_SNDBUF: c_int = 0x1001; +pub const SO_RCVBUF: c_int = 0x1002; +pub const SO_SNDLOWAT: c_int = 0x1003; +pub const SO_RCVLOWAT: c_int = 0x1004; +pub const SO_ERROR: c_int = 0x1007; +pub const SO_TYPE: c_int = 0x1008; + +pub const SOMAXCONN: c_int = 128; + +pub const MSG_OOB: c_int = 0x1; +pub const MSG_PEEK: c_int = 0x2; +pub const MSG_DONTROUTE: c_int = 0x4; +pub const MSG_EOR: c_int = 0x8; +pub const MSG_TRUNC: c_int = 0x10; +pub const MSG_CTRUNC: c_int = 0x20; +pub const MSG_WAITALL: c_int = 0x40; +pub const MSG_DONTWAIT: c_int = 0x80; +pub const MSG_BCAST: c_int = 0x100; +pub const MSG_MCAST: c_int = 0x200; +pub const MSG_NOSIGNAL: c_int = 0x400; +pub const MSG_CMSG_CLOEXEC: c_int = 0x800; + +pub const SHUT_RD: c_int = 0; +pub const SHUT_WR: c_int = 1; +pub const SHUT_RDWR: c_int = 2; + +pub const LOCK_SH: c_int = 1; +pub const LOCK_EX: c_int = 2; +pub const LOCK_NB: c_int = 4; +pub const LOCK_UN: c_int = 8; + +pub const IPPROTO_RAW: c_int = 255; + +pub const _SC_ARG_MAX: c_int = 1; +pub const _SC_CHILD_MAX: c_int = 2; +pub const _SC_NGROUPS_MAX: c_int = 4; +pub const _SC_OPEN_MAX: c_int = 5; +pub const _SC_JOB_CONTROL: c_int = 6; +pub const _SC_SAVED_IDS: c_int = 7; +pub const _SC_VERSION: c_int = 8; +pub const _SC_BC_BASE_MAX: c_int = 9; +pub const _SC_BC_DIM_MAX: c_int = 10; +pub const _SC_BC_SCALE_MAX: c_int = 11; +pub const _SC_BC_STRING_MAX: c_int = 12; +pub const _SC_COLL_WEIGHTS_MAX: c_int = 13; +pub const _SC_EXPR_NEST_MAX: c_int = 14; +pub const _SC_LINE_MAX: c_int = 15; +pub const _SC_RE_DUP_MAX: c_int = 16; +pub const _SC_2_VERSION: c_int = 17; +pub const _SC_2_C_BIND: c_int = 18; +pub const _SC_2_C_DEV: c_int = 19; +pub const _SC_2_CHAR_TERM: c_int = 20; +pub const _SC_2_FORT_DEV: c_int = 21; +pub const _SC_2_FORT_RUN: c_int = 22; +pub const _SC_2_LOCALEDEF: c_int = 23; +pub const _SC_2_SW_DEV: c_int = 24; +pub const _SC_2_UPE: c_int = 25; +pub const _SC_STREAM_MAX: c_int = 26; +pub const _SC_TZNAME_MAX: c_int = 27; +pub const _SC_PAGESIZE: c_int = 28; +pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; +pub const _SC_FSYNC: c_int = 29; +pub const _SC_XOPEN_SHM: c_int = 30; + +pub const Q_GETQUOTA: c_int = 0x300; +pub const Q_SETQUOTA: c_int = 0x400; + +pub const RTLD_GLOBAL: c_int = 0x100; + +pub const LOG_NFACILITIES: c_int = 24; + +pub const HW_NCPU: c_int = 3; + +pub const B0: speed_t = 0; +pub const B50: speed_t = 50; +pub const B75: speed_t = 75; +pub const B110: speed_t = 110; +pub const B134: speed_t = 134; +pub const B150: speed_t = 150; +pub const B200: speed_t = 200; +pub const B300: speed_t = 300; +pub const B600: speed_t = 600; +pub const B1200: speed_t = 1200; +pub const B1800: speed_t = 1800; +pub const B2400: speed_t = 2400; +pub const B4800: speed_t = 4800; +pub const B9600: speed_t = 9600; +pub const B19200: speed_t = 19200; +pub const B38400: speed_t = 38400; +pub const B7200: speed_t = 7200; +pub const B14400: speed_t = 14400; +pub const B28800: speed_t = 28800; +pub const B57600: speed_t = 57600; +pub const B76800: speed_t = 76800; +pub const B115200: speed_t = 115200; +pub const B230400: speed_t = 230400; +pub const EXTA: speed_t = 19200; +pub const EXTB: speed_t = 38400; + +pub const SEM_FAILED: *mut sem_t = ptr::null_mut(); + +pub const CRTSCTS: crate::tcflag_t = 0x00010000; +pub const CRTS_IFLOW: crate::tcflag_t = CRTSCTS; +pub const CCTS_OFLOW: crate::tcflag_t = CRTSCTS; +pub const OCRNL: crate::tcflag_t = 0x10; + +pub const TIOCEXCL: c_ulong = 0x2000740d; +pub const TIOCNXCL: c_ulong = 0x2000740e; +pub const TIOCFLUSH: c_ulong = 0x80047410; +pub const TIOCGETA: c_ulong = 0x402c7413; +pub const TIOCSETA: c_ulong = 0x802c7414; +pub const TIOCSETAW: c_ulong = 0x802c7415; +pub const TIOCSETAF: c_ulong = 0x802c7416; +pub const TIOCGETD: c_ulong = 0x4004741a; +pub const TIOCSETD: c_ulong = 0x8004741b; +pub const TIOCMGET: c_ulong = 0x4004746a; +pub const TIOCMBIC: c_ulong = 0x8004746b; +pub const TIOCMBIS: c_ulong = 0x8004746c; +pub const TIOCMSET: c_ulong = 0x8004746d; +pub const TIOCSTART: c_ulong = 0x2000746e; +pub const TIOCSTOP: c_ulong = 0x2000746f; +pub const TIOCSCTTY: c_ulong = 0x20007461; +pub const TIOCGWINSZ: c_ulong = 0x40087468; +pub const TIOCSWINSZ: c_ulong = 0x80087467; +pub const TIOCM_LE: c_int = 0o0001; +pub const TIOCM_DTR: c_int = 0o0002; +pub const TIOCM_RTS: c_int = 0o0004; +pub const TIOCM_ST: c_int = 0o0010; +pub const TIOCM_SR: c_int = 0o0020; +pub const TIOCM_CTS: c_int = 0o0040; +pub const TIOCM_CAR: c_int = 0o0100; +pub const TIOCM_RNG: c_int = 0o0200; +pub const TIOCM_DSR: c_int = 0o0400; +pub const TIOCM_CD: c_int = TIOCM_CAR; +pub const TIOCM_RI: c_int = TIOCM_RNG; + +pub const TIMER_ABSTIME: c_int = 1; + +// sys/reboot.h + +pub const RB_AUTOBOOT: c_int = 0; + +pub const TCP_INFO: c_int = 9; + +#[link(name = "util")] +extern "C" { + pub fn setgrent(); + pub fn sem_destroy(sem: *mut sem_t) -> c_int; + pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; + + pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; + pub fn accept4( + s: c_int, + addr: *mut crate::sockaddr, + addrlen: *mut crate::socklen_t, + flags: c_int, + ) -> c_int; + pub fn mincore(addr: *mut c_void, len: size_t, vec: *mut c_char) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__clock_getres50")] + pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__clock_gettime50")] + pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__clock_settime50")] + pub fn clock_settime(clk_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; + pub fn __errno() -> *mut c_int; + pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; + pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; + pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; + pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; + pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; + pub fn utimensat( + dirfd: c_int, + path: *const c_char, + times: *const crate::timespec, + flag: c_int, + ) -> c_int; + pub fn fdatasync(fd: c_int) -> c_int; + pub fn login_tty(fd: c_int) -> c_int; + pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; + pub fn setpriority(which: c_int, who: crate::id_t, prio: c_int) -> c_int; + + pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; + pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; + pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; + pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; + pub fn pthread_condattr_setclock( + attr: *mut pthread_condattr_t, + clock_id: crate::clockid_t, + ) -> c_int; + pub fn sethostname(name: *const c_char, len: size_t) -> c_int; + pub fn pthread_mutex_timedlock( + lock: *mut pthread_mutex_t, + abstime: *const crate::timespec, + ) -> c_int; + pub fn pthread_spin_init(lock: *mut pthread_spinlock_t, pshared: c_int) -> c_int; + pub fn pthread_spin_destroy(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_spin_lock(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_spin_trylock(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_spin_unlock(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_setschedparam( + native: crate::pthread_t, + policy: c_int, + param: *const sched_param, + ) -> c_int; + pub fn pthread_getschedparam( + native: crate::pthread_t, + policy: *mut c_int, + param: *mut sched_param, + ) -> c_int; + pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; + + pub fn getgrouplist( + name: *const c_char, + basegid: crate::gid_t, + groups: *mut crate::gid_t, + ngroups: *mut c_int, + ) -> c_int; + pub fn initgroups(name: *const c_char, basegid: crate::gid_t) -> c_int; + pub fn getdomainname(name: *mut c_char, len: size_t) -> c_int; + pub fn setdomainname(name: *const c_char, len: size_t) -> c_int; + pub fn uname(buf: *mut crate::utsname) -> c_int; + + pub fn shmget(key: crate::key_t, size: size_t, shmflg: c_int) -> c_int; + pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; + pub fn shmdt(shmaddr: *const c_void) -> c_int; + pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; + + // DIFF(main): changed to `*const *mut` in e77f551de9 + pub fn execvpe( + file: *const c_char, + argv: *const *const c_char, + envp: *const *const c_char, + ) -> c_int; + + pub fn waitid( + idtype: idtype_t, + id: crate::id_t, + infop: *mut crate::siginfo_t, + options: c_int, + ) -> c_int; + + pub fn posix_spawn( + pid: *mut crate::pid_t, + path: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnp( + pid: *mut crate::pid_t, + file: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_getsigdefault( + attr: *const posix_spawnattr_t, + default: *mut crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigdefault( + attr: *mut posix_spawnattr_t, + default: *const crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getsigmask( + attr: *const posix_spawnattr_t, + default: *mut crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigmask( + attr: *mut posix_spawnattr_t, + default: *const crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; + pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; + pub fn posix_spawnattr_getpgroup( + attr: *const posix_spawnattr_t, + flags: *mut crate::pid_t, + ) -> c_int; + pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: crate::pid_t) -> c_int; + pub fn posix_spawnattr_getschedpolicy( + attr: *const posix_spawnattr_t, + flags: *mut c_int, + ) -> c_int; + pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, flags: c_int) -> c_int; + pub fn posix_spawnattr_getschedparam( + attr: *const posix_spawnattr_t, + param: *mut crate::sched_param, + ) -> c_int; + pub fn posix_spawnattr_setschedparam( + attr: *mut posix_spawnattr_t, + param: *const crate::sched_param, + ) -> c_int; + + pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_addopen( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + path: *const c_char, + oflag: c_int, + mode: mode_t, + ) -> c_int; + pub fn posix_spawn_file_actions_addclose( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + ) -> c_int; + pub fn posix_spawn_file_actions_adddup2( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + newfd: c_int, + ) -> c_int; +} + +extern "C" { + pub fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void; + pub fn gethostid() -> c_long; + pub fn sethostid(hostid: c_long) -> c_int; + pub fn ftok(path: *const c_char, id: c_int) -> crate::key_t; + + pub fn dirname(path: *mut c_char) -> *mut c_char; + pub fn basename(path: *mut c_char) -> *mut c_char; + pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; + + pub fn sendmmsg(sockfd: c_int, mmsg: *mut crate::mmsghdr, vlen: c_uint, flags: c_int) -> c_int; + pub fn recvmmsg( + sockfd: c_int, + mmsg: *mut crate::mmsghdr, + vlen: c_uint, + flags: c_int, + timeout: *mut crate::timespec, + ) -> c_int; + + pub fn closefrom(lowfd: c_int) -> c_int; +} + +cfg_if! { + if #[cfg(target_os = "netbsd")] { + mod netbsd; + pub use self::netbsd::*; + } else if #[cfg(target_os = "openbsd")] { + mod openbsd; + pub use self::openbsd::*; + } else { + // Unknown target_os + } +} diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/aarch64.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/aarch64.rs new file mode 100644 index 00000000000000..e0206af04f8f1b --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/aarch64.rs @@ -0,0 +1,132 @@ +use crate::prelude::*; +use crate::PT_FIRSTMACH; + +pub type greg_t = u64; +pub type __cpu_simple_lock_nv_t = c_uchar; + +s! { + pub struct __fregset { + pub __qregs: [__c_anonymous__freg; 32], + pub __fpcr: u32, + pub __fpsr: u32, + } + + pub struct mcontext_t { + pub __gregs: [crate::greg_t; 32], + pub __fregs: __fregset, + __spare: [crate::greg_t; 8], + } + + pub struct ucontext_t { + pub uc_flags: c_uint, + pub uc_link: *mut ucontext_t, + pub uc_sigmask: crate::sigset_t, + pub uc_stack: crate::stack_t, + pub uc_mcontext: mcontext_t, + } +} + +s_no_extra_traits! { + #[repr(align(16))] + pub union __c_anonymous__freg { + pub __b8: [u8; 16], + pub __h16: [u16; 8], + pub __s32: [u32; 4], + pub __d64: [u64; 2], + pub __q128: [u128; 1], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for __c_anonymous__freg { + fn eq(&self, other: &__c_anonymous__freg) -> bool { + unsafe { + self.__b8 == other.__b8 + || self.__h16 == other.__h16 + || self.__s32 == other.__s32 + || self.__d64 == other.__d64 + || self.__q128 == other.__q128 + } + } + } + impl Eq for __c_anonymous__freg {} + impl hash::Hash for __c_anonymous__freg { + fn hash(&self, state: &mut H) { + unsafe { + self.__b8.hash(state); + self.__h16.hash(state); + self.__s32.hash(state); + self.__d64.hash(state); + self.__q128.hash(state); + } + } + } + } +} + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const PT_GETREGS: c_int = PT_FIRSTMACH + 0; +pub const PT_SETREGS: c_int = PT_FIRSTMACH + 1; +pub const PT_GETFPREGS: c_int = PT_FIRSTMACH + 2; +pub const PT_SETFPREGS: c_int = PT_FIRSTMACH + 3; + +pub const _REG_R0: c_int = 0; +pub const _REG_R1: c_int = 1; +pub const _REG_R2: c_int = 2; +pub const _REG_R3: c_int = 3; +pub const _REG_R4: c_int = 4; +pub const _REG_R5: c_int = 5; +pub const _REG_R6: c_int = 6; +pub const _REG_R7: c_int = 7; +pub const _REG_R8: c_int = 8; +pub const _REG_R9: c_int = 9; +pub const _REG_R10: c_int = 10; +pub const _REG_R11: c_int = 11; +pub const _REG_R12: c_int = 12; +pub const _REG_R13: c_int = 13; +pub const _REG_R14: c_int = 14; +pub const _REG_R15: c_int = 15; +pub const _REG_CPSR: c_int = 16; +pub const _REG_X0: c_int = 0; +pub const _REG_X1: c_int = 1; +pub const _REG_X2: c_int = 2; +pub const _REG_X3: c_int = 3; +pub const _REG_X4: c_int = 4; +pub const _REG_X5: c_int = 5; +pub const _REG_X6: c_int = 6; +pub const _REG_X7: c_int = 7; +pub const _REG_X8: c_int = 8; +pub const _REG_X9: c_int = 9; +pub const _REG_X10: c_int = 10; +pub const _REG_X11: c_int = 11; +pub const _REG_X12: c_int = 12; +pub const _REG_X13: c_int = 13; +pub const _REG_X14: c_int = 14; +pub const _REG_X15: c_int = 15; +pub const _REG_X16: c_int = 16; +pub const _REG_X17: c_int = 17; +pub const _REG_X18: c_int = 18; +pub const _REG_X19: c_int = 19; +pub const _REG_X20: c_int = 20; +pub const _REG_X21: c_int = 21; +pub const _REG_X22: c_int = 22; +pub const _REG_X23: c_int = 23; +pub const _REG_X24: c_int = 24; +pub const _REG_X25: c_int = 25; +pub const _REG_X26: c_int = 26; +pub const _REG_X27: c_int = 27; +pub const _REG_X28: c_int = 28; +pub const _REG_X29: c_int = 29; +pub const _REG_X30: c_int = 30; +pub const _REG_X31: c_int = 31; +pub const _REG_ELR: c_int = 32; +pub const _REG_SPSR: c_int = 33; +pub const _REG_TIPDR: c_int = 34; + +pub const _REG_RV: c_int = _REG_X0; +pub const _REG_FP: c_int = _REG_X29; +pub const _REG_LR: c_int = _REG_X30; +pub const _REG_SP: c_int = _REG_X31; +pub const _REG_PC: c_int = _REG_ELR; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/arm.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/arm.rs new file mode 100644 index 00000000000000..9ff44bd40826a2 --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/arm.rs @@ -0,0 +1,70 @@ +use crate::prelude::*; +use crate::PT_FIRSTMACH; + +pub type __cpu_simple_lock_nv_t = c_int; + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const PT_GETREGS: c_int = PT_FIRSTMACH + 1; +pub const PT_SETREGS: c_int = PT_FIRSTMACH + 2; +pub const PT_GETFPREGS: c_int = PT_FIRSTMACH + 3; +pub const PT_SETFPREGS: c_int = PT_FIRSTMACH + 4; + +pub const _REG_R0: c_int = 0; +pub const _REG_R1: c_int = 1; +pub const _REG_R2: c_int = 2; +pub const _REG_R3: c_int = 3; +pub const _REG_R4: c_int = 4; +pub const _REG_R5: c_int = 5; +pub const _REG_R6: c_int = 6; +pub const _REG_R7: c_int = 7; +pub const _REG_R8: c_int = 8; +pub const _REG_R9: c_int = 9; +pub const _REG_R10: c_int = 10; +pub const _REG_R11: c_int = 11; +pub const _REG_R12: c_int = 12; +pub const _REG_R13: c_int = 13; +pub const _REG_R14: c_int = 14; +pub const _REG_R15: c_int = 15; +pub const _REG_CPSR: c_int = 16; +pub const _REG_X0: c_int = 0; +pub const _REG_X1: c_int = 1; +pub const _REG_X2: c_int = 2; +pub const _REG_X3: c_int = 3; +pub const _REG_X4: c_int = 4; +pub const _REG_X5: c_int = 5; +pub const _REG_X6: c_int = 6; +pub const _REG_X7: c_int = 7; +pub const _REG_X8: c_int = 8; +pub const _REG_X9: c_int = 9; +pub const _REG_X10: c_int = 10; +pub const _REG_X11: c_int = 11; +pub const _REG_X12: c_int = 12; +pub const _REG_X13: c_int = 13; +pub const _REG_X14: c_int = 14; +pub const _REG_X15: c_int = 15; +pub const _REG_X16: c_int = 16; +pub const _REG_X17: c_int = 17; +pub const _REG_X18: c_int = 18; +pub const _REG_X19: c_int = 19; +pub const _REG_X20: c_int = 20; +pub const _REG_X21: c_int = 21; +pub const _REG_X22: c_int = 22; +pub const _REG_X23: c_int = 23; +pub const _REG_X24: c_int = 24; +pub const _REG_X25: c_int = 25; +pub const _REG_X26: c_int = 26; +pub const _REG_X27: c_int = 27; +pub const _REG_X28: c_int = 28; +pub const _REG_X29: c_int = 29; +pub const _REG_X30: c_int = 30; +pub const _REG_X31: c_int = 31; +pub const _REG_ELR: c_int = 32; +pub const _REG_SPSR: c_int = 33; +pub const _REG_TIPDR: c_int = 34; + +pub const _REG_RV: c_int = _REG_R0; +pub const _REG_FP: c_int = _REG_R11; +pub const _REG_LR: c_int = _REG_R13; +pub const _REG_SP: c_int = _REG_R14; +pub const _REG_PC: c_int = _REG_R15; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mips.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mips.rs new file mode 100644 index 00000000000000..1b24b4f6e3159a --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mips.rs @@ -0,0 +1,11 @@ +use crate::prelude::*; +use crate::PT_FIRSTMACH; + +pub type __cpu_simple_lock_nv_t = c_int; + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const PT_GETREGS: c_int = PT_FIRSTMACH + 1; +pub const PT_SETREGS: c_int = PT_FIRSTMACH + 2; +pub const PT_GETFPREGS: c_int = PT_FIRSTMACH + 3; +pub const PT_SETFPREGS: c_int = PT_FIRSTMACH + 4; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs new file mode 100644 index 00000000000000..9f0831323af798 --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs @@ -0,0 +1,3007 @@ +use crate::prelude::*; +use crate::{cmsghdr, off_t}; + +pub type clock_t = c_uint; +pub type suseconds_t = c_int; +pub type dev_t = u64; +pub type blksize_t = i32; +pub type fsblkcnt_t = u64; +pub type fsfilcnt_t = u64; +pub type idtype_t = c_int; +pub type mqd_t = c_int; +type __pthread_spin_t = __cpu_simple_lock_nv_t; +pub type vm_size_t = crate::uintptr_t; // FIXME(deprecated): deprecated since long time +pub type lwpid_t = c_uint; +pub type shmatt_t = c_uint; +pub type cpuid_t = c_ulong; +pub type cpuset_t = _cpuset; +pub type pthread_spin_t = c_uchar; +pub type timer_t = c_int; + +// elf.h + +pub type Elf32_Addr = u32; +pub type Elf32_Half = u16; +pub type Elf32_Lword = u64; +pub type Elf32_Off = u32; +pub type Elf32_Sword = i32; +pub type Elf32_Word = u32; + +pub type Elf64_Addr = u64; +pub type Elf64_Half = u16; +pub type Elf64_Lword = u64; +pub type Elf64_Off = u64; +pub type Elf64_Sword = i32; +pub type Elf64_Sxword = i64; +pub type Elf64_Word = u32; +pub type Elf64_Xword = u64; + +pub type iconv_t = *mut c_void; + +e! { + #[repr(C)] + pub enum fae_action { + FAE_OPEN, + FAE_DUP2, + FAE_CLOSE, + } +} + +cfg_if! { + if #[cfg(target_pointer_width = "64")] { + type Elf_Addr = Elf64_Addr; + type Elf_Half = Elf64_Half; + type Elf_Phdr = Elf64_Phdr; + } else if #[cfg(target_pointer_width = "32")] { + type Elf_Addr = Elf32_Addr; + type Elf_Half = Elf32_Half; + type Elf_Phdr = Elf32_Phdr; + } +} + +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut c_void { + self.si_addr + } + + pub unsafe fn si_code(&self) -> c_int { + self.si_code + } + + pub unsafe fn si_errno(&self) -> c_int { + self.si_errno + } + + pub unsafe fn si_pid(&self) -> crate::pid_t { + #[repr(C)] + struct siginfo_timer { + _si_signo: c_int, + _si_errno: c_int, + _si_code: c_int, + __pad1: c_int, + _pid: crate::pid_t, + } + (*(self as *const siginfo_t as *const siginfo_timer))._pid + } + + pub unsafe fn si_uid(&self) -> crate::uid_t { + #[repr(C)] + struct siginfo_timer { + _si_signo: c_int, + _si_errno: c_int, + _si_code: c_int, + __pad1: c_int, + _pid: crate::pid_t, + _uid: crate::uid_t, + } + (*(self as *const siginfo_t as *const siginfo_timer))._uid + } + + pub unsafe fn si_value(&self) -> crate::sigval { + #[repr(C)] + struct siginfo_timer { + _si_signo: c_int, + _si_errno: c_int, + _si_code: c_int, + __pad1: c_int, + _pid: crate::pid_t, + _uid: crate::uid_t, + value: crate::sigval, + } + (*(self as *const siginfo_t as *const siginfo_timer)).value + } + + pub unsafe fn si_status(&self) -> c_int { + #[repr(C)] + struct siginfo_timer { + _si_signo: c_int, + _si_errno: c_int, + _si_code: c_int, + __pad1: c_int, + _pid: crate::pid_t, + _uid: crate::uid_t, + _value: crate::sigval, + _cpid: crate::pid_t, + _cuid: crate::uid_t, + status: c_int, + } + (*(self as *const siginfo_t as *const siginfo_timer)).status + } +} + +s! { + pub struct aiocb { + pub aio_offset: off_t, + pub aio_buf: *mut c_void, + pub aio_nbytes: size_t, + pub aio_fildes: c_int, + pub aio_lio_opcode: c_int, + pub aio_reqprio: c_int, + pub aio_sigevent: crate::sigevent, + _state: c_int, + _errno: c_int, + _retval: ssize_t, + } + + pub struct glob_t { + pub gl_pathc: size_t, + pub gl_matchc: size_t, + pub gl_offs: size_t, + pub gl_flags: c_int, + pub gl_pathv: *mut *mut c_char, + + __unused3: *mut c_void, + + __unused4: *mut c_void, + __unused5: *mut c_void, + __unused6: *mut c_void, + __unused7: *mut c_void, + __unused8: *mut c_void, + } + + pub struct mq_attr { + pub mq_flags: c_long, + pub mq_maxmsg: c_long, + pub mq_msgsize: c_long, + pub mq_curmsgs: c_long, + } + + pub struct itimerspec { + pub it_interval: crate::timespec, + pub it_value: crate::timespec, + } + + pub struct sigset_t { + __bits: [u32; 4], + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_mode: crate::mode_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_atime: crate::time_t, + pub st_atimensec: c_long, + pub st_mtime: crate::time_t, + pub st_mtimensec: c_long, + pub st_ctime: crate::time_t, + pub st_ctimensec: c_long, + pub st_birthtime: crate::time_t, + pub st_birthtimensec: c_long, + pub st_size: off_t, + pub st_blocks: crate::blkcnt_t, + pub st_blksize: crate::blksize_t, + pub st_flags: u32, + pub st_gen: u32, + pub st_spare: [u32; 2], + } + + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: crate::socklen_t, + pub ai_canonname: *mut c_char, + pub ai_addr: *mut crate::sockaddr, + pub ai_next: *mut crate::addrinfo, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_code: c_int, + pub si_errno: c_int, + __pad1: c_int, + pub si_addr: *mut c_void, + __pad2: [u64; 13], + } + + pub struct pthread_attr_t { + pta_magic: c_uint, + pta_flags: c_int, + pta_private: *mut c_void, + } + + pub struct pthread_mutex_t { + ptm_magic: c_uint, + ptm_errorcheck: __pthread_spin_t, + #[cfg(any( + target_arch = "sparc", + target_arch = "sparc64", + target_arch = "x86", + target_arch = "x86_64" + ))] + ptm_pad1: [u8; 3], + // actually a union with a non-unused, 0-initialized field + ptm_unused: __pthread_spin_t, + #[cfg(any( + target_arch = "sparc", + target_arch = "sparc64", + target_arch = "x86", + target_arch = "x86_64" + ))] + ptm_pad2: [u8; 3], + ptm_owner: crate::pthread_t, + ptm_waiters: *mut u8, + ptm_recursed: c_uint, + ptm_spare2: *mut c_void, + } + + pub struct pthread_mutexattr_t { + ptma_magic: c_uint, + ptma_private: *mut c_void, + } + + pub struct pthread_rwlockattr_t { + ptra_magic: c_uint, + ptra_private: *mut c_void, + } + + pub struct pthread_cond_t { + ptc_magic: c_uint, + ptc_lock: __pthread_spin_t, + ptc_waiters_first: *mut u8, + ptc_waiters_last: *mut u8, + ptc_mutex: *mut crate::pthread_mutex_t, + ptc_private: *mut c_void, + } + + pub struct pthread_condattr_t { + ptca_magic: c_uint, + ptca_private: *mut c_void, + } + + pub struct pthread_rwlock_t { + ptr_magic: c_uint, + ptr_interlock: __pthread_spin_t, + ptr_rblocked_first: *mut u8, + ptr_rblocked_last: *mut u8, + ptr_wblocked_first: *mut u8, + ptr_wblocked_last: *mut u8, + ptr_nreaders: c_uint, + ptr_owner: crate::pthread_t, + ptr_private: *mut c_void, + } + + pub struct pthread_spinlock_t { + pts_magic: c_uint, + pts_spin: crate::pthread_spin_t, + pts_flags: c_int, + } + + pub struct kevent { + pub ident: crate::uintptr_t, + pub filter: u32, + pub flags: u32, + pub fflags: u32, + pub data: i64, + // FIXME(netbsd): NetBSD 10.0 will finally have same layout as other BSD + pub udata: intptr_t, + } + + pub struct dqblk { + pub dqb_bhardlimit: u32, + pub dqb_bsoftlimit: u32, + pub dqb_curblocks: u32, + pub dqb_ihardlimit: u32, + pub dqb_isoftlimit: u32, + pub dqb_curinodes: u32, + pub dqb_btime: i32, + pub dqb_itime: i32, + } + + pub struct Dl_info { + pub dli_fname: *const c_char, + pub dli_fbase: *mut c_void, + pub dli_sname: *const c_char, + pub dli_saddr: *const c_void, + } + + pub struct lconv { + pub decimal_point: *mut c_char, + pub thousands_sep: *mut c_char, + pub grouping: *mut c_char, + pub int_curr_symbol: *mut c_char, + pub currency_symbol: *mut c_char, + pub mon_decimal_point: *mut c_char, + pub mon_thousands_sep: *mut c_char, + pub mon_grouping: *mut c_char, + pub positive_sign: *mut c_char, + pub negative_sign: *mut c_char, + pub int_frac_digits: c_char, + pub frac_digits: c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub p_sign_posn: c_char, + pub n_sign_posn: c_char, + pub int_p_cs_precedes: c_char, + pub int_n_cs_precedes: c_char, + pub int_p_sep_by_space: c_char, + pub int_n_sep_by_space: c_char, + pub int_p_sign_posn: c_char, + pub int_n_sign_posn: c_char, + } + + pub struct if_data { + pub ifi_type: c_uchar, + pub ifi_addrlen: c_uchar, + pub ifi_hdrlen: c_uchar, + pub ifi_link_state: c_int, + pub ifi_mtu: u64, + pub ifi_metric: u64, + pub ifi_baudrate: u64, + pub ifi_ipackets: u64, + pub ifi_ierrors: u64, + pub ifi_opackets: u64, + pub ifi_oerrors: u64, + pub ifi_collisions: u64, + pub ifi_ibytes: u64, + pub ifi_obytes: u64, + pub ifi_imcasts: u64, + pub ifi_omcasts: u64, + pub ifi_iqdrops: u64, + pub ifi_noproto: u64, + pub ifi_lastchange: crate::timespec, + } + + pub struct if_msghdr { + pub ifm_msglen: c_ushort, + pub ifm_version: c_uchar, + pub ifm_type: c_uchar, + pub ifm_addrs: c_int, + pub ifm_flags: c_int, + pub ifm_index: c_ushort, + pub ifm_data: if_data, + } + + pub struct sockcred { + pub sc_pid: crate::pid_t, + pub sc_uid: crate::uid_t, + pub sc_euid: crate::uid_t, + pub sc_gid: crate::gid_t, + pub sc_egid: crate::gid_t, + pub sc_ngroups: c_int, + pub sc_groups: [crate::gid_t; 1], + } + + pub struct uucred { + pub cr_unused: c_ushort, + pub cr_uid: crate::uid_t, + pub cr_gid: crate::gid_t, + pub cr_ngroups: c_int, + pub cr_groups: [crate::gid_t; NGROUPS_MAX as usize], + } + + pub struct unpcbid { + pub unp_pid: crate::pid_t, + pub unp_euid: crate::uid_t, + pub unp_egid: crate::gid_t, + } + + pub struct sockaddr_dl { + pub sdl_len: c_uchar, + pub sdl_family: c_uchar, + pub sdl_index: c_ushort, + pub sdl_type: u8, + pub sdl_nlen: u8, + pub sdl_alen: u8, + pub sdl_slen: u8, + pub sdl_data: [c_char; 12], + } + + pub struct __exit_status { + pub e_termination: u16, + pub e_exit: u16, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_lpid: crate::pid_t, + pub shm_cpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + _shm_internal: *mut c_void, + } + + pub struct utmp { + pub ut_line: [c_char; UT_LINESIZE], + pub ut_name: [c_char; UT_NAMESIZE], + pub ut_host: [c_char; UT_HOSTSIZE], + pub ut_time: crate::time_t, + } + + pub struct lastlog { + pub ll_line: [c_char; UT_LINESIZE], + pub ll_host: [c_char; UT_HOSTSIZE], + pub ll_time: crate::time_t, + } + + pub struct timex { + pub modes: c_uint, + pub offset: c_long, + pub freq: c_long, + pub maxerror: c_long, + pub esterror: c_long, + pub status: c_int, + pub constant: c_long, + pub precision: c_long, + pub tolerance: c_long, + pub ppsfreq: c_long, + pub jitter: c_long, + pub shift: c_int, + pub stabil: c_long, + pub jitcnt: c_long, + pub calcnt: c_long, + pub errcnt: c_long, + pub stbcnt: c_long, + } + + pub struct ntptimeval { + pub time: crate::timespec, + pub maxerror: c_long, + pub esterror: c_long, + pub tai: c_long, + pub time_state: c_int, + } + + // elf.h + + pub struct Elf32_Phdr { + pub p_type: Elf32_Word, + pub p_offset: Elf32_Off, + pub p_vaddr: Elf32_Addr, + pub p_paddr: Elf32_Addr, + pub p_filesz: Elf32_Word, + pub p_memsz: Elf32_Word, + pub p_flags: Elf32_Word, + pub p_align: Elf32_Word, + } + + pub struct Elf64_Phdr { + pub p_type: Elf64_Word, + pub p_flags: Elf64_Word, + pub p_offset: Elf64_Off, + pub p_vaddr: Elf64_Addr, + pub p_paddr: Elf64_Addr, + pub p_filesz: Elf64_Xword, + pub p_memsz: Elf64_Xword, + pub p_align: Elf64_Xword, + } + + pub struct Aux32Info { + pub a_type: Elf32_Word, + pub a_v: Elf32_Word, + } + + pub struct Aux64Info { + pub a_type: Elf64_Word, + pub a_v: Elf64_Xword, + } + + // link.h + + pub struct dl_phdr_info { + pub dlpi_addr: Elf_Addr, + pub dlpi_name: *const c_char, + pub dlpi_phdr: *const Elf_Phdr, + pub dlpi_phnum: Elf_Half, + pub dlpi_adds: c_ulonglong, + pub dlpi_subs: c_ulonglong, + pub dlpi_tls_modid: usize, + pub dlpi_tls_data: *mut c_void, + } + + pub struct _cpuset { + bits: [u32; 0], + } + + pub struct accept_filter_arg { + pub af_name: [c_char; 16], + af_arg: [c_char; 256 - 16], + } + + pub struct ki_sigset_t { + pub __bits: [u32; 4], + } + + pub struct kinfo_proc2 { + pub p_forw: u64, + pub p_back: u64, + pub p_paddr: u64, + pub p_addr: u64, + pub p_fd: u64, + pub p_cwdi: u64, + pub p_stats: u64, + pub p_limit: u64, + pub p_vmspace: u64, + pub p_sigacts: u64, + pub p_sess: u64, + pub p_tsess: u64, + pub p_ru: u64, + pub p_eflag: i32, + pub p_exitsig: i32, + pub p_flag: i32, + pub p_pid: i32, + pub p_ppid: i32, + pub p_sid: i32, + pub p__pgid: i32, + pub p_tpgid: i32, + pub p_uid: u32, + pub p_ruid: u32, + pub p_gid: u32, + pub p_rgid: u32, + pub p_groups: [u32; KI_NGROUPS as usize], + pub p_ngroups: i16, + pub p_jobc: i16, + pub p_tdev: u32, + pub p_estcpu: u32, + pub p_rtime_sec: u32, + pub p_rtime_usec: u32, + pub p_cpticks: i32, + pub p_pctcpu: u32, + pub p_swtime: u32, + pub p_slptime: u32, + pub p_schedflags: i32, + pub p_uticks: u64, + pub p_sticks: u64, + pub p_iticks: u64, + pub p_tracep: u64, + pub p_traceflag: i32, + pub p_holdcnt: i32, + pub p_siglist: ki_sigset_t, + pub p_sigmask: ki_sigset_t, + pub p_sigignore: ki_sigset_t, + pub p_sigcatch: ki_sigset_t, + pub p_stat: i8, + pub p_priority: u8, + pub p_usrpri: u8, + pub p_nice: u8, + pub p_xstat: u16, + pub p_acflag: u16, + pub p_comm: [c_char; KI_MAXCOMLEN as usize], + pub p_wmesg: [c_char; KI_WMESGLEN as usize], + pub p_wchan: u64, + pub p_login: [c_char; KI_MAXLOGNAME as usize], + pub p_vm_rssize: i32, + pub p_vm_tsize: i32, + pub p_vm_dsize: i32, + pub p_vm_ssize: i32, + pub p_uvalid: i64, + pub p_ustart_sec: u32, + pub p_ustart_usec: u32, + pub p_uutime_sec: u32, + pub p_uutime_usec: u32, + pub p_ustime_sec: u32, + pub p_ustime_usec: u32, + pub p_uru_maxrss: u64, + pub p_uru_ixrss: u64, + pub p_uru_idrss: u64, + pub p_uru_isrss: u64, + pub p_uru_minflt: u64, + pub p_uru_majflt: u64, + pub p_uru_nswap: u64, + pub p_uru_inblock: u64, + pub p_uru_oublock: u64, + pub p_uru_msgsnd: u64, + pub p_uru_msgrcv: u64, + pub p_uru_nsignals: u64, + pub p_uru_nvcsw: u64, + pub p_uru_nivcsw: u64, + pub p_uctime_sec: u32, + pub p_uctime_usec: u32, + pub p_cpuid: u64, + pub p_realflag: u64, + pub p_nlwps: u64, + pub p_nrlwps: u64, + pub p_realstat: u64, + pub p_svuid: u32, + pub p_svgid: u32, + pub p_ename: [c_char; KI_MAXEMULLEN as usize], + pub p_vm_vsize: i64, + pub p_vm_msize: i64, + } + + pub struct kinfo_lwp { + pub l_forw: u64, + pub l_back: u64, + pub l_laddr: u64, + pub l_addr: u64, + pub l_lid: i32, + pub l_flag: i32, + pub l_swtime: u32, + pub l_slptime: u32, + pub l_schedflags: i32, + pub l_holdcnt: i32, + pub l_priority: u8, + pub l_usrpri: u8, + pub l_stat: i8, + l_pad1: i8, + l_pad2: i32, + pub l_wmesg: [c_char; KI_WMESGLEN as usize], + pub l_wchan: u64, + pub l_cpuid: u64, + pub l_rtime_sec: u32, + pub l_rtime_usec: u32, + pub l_cpticks: u32, + pub l_pctcpu: u32, + pub l_pid: u32, + pub l_name: [c_char; KI_LNAMELEN as usize], + } + + pub struct kinfo_vmentry { + pub kve_start: u64, + pub kve_end: u64, + pub kve_offset: u64, + pub kve_type: u32, + pub kve_flags: u32, + pub kve_count: u32, + pub kve_wired_count: u32, + pub kve_advice: u32, + pub kve_attributes: u32, + pub kve_protection: u32, + pub kve_max_protection: u32, + pub kve_ref_count: u32, + pub kve_inheritance: u32, + pub kve_vn_fileid: u64, + pub kve_vn_size: u64, + pub kve_vn_fsid: u64, + pub kve_vn_rdev: u64, + pub kve_vn_type: u32, + pub kve_vn_mode: u32, + pub kve_path: [[c_char; 32]; 32], + } + + pub struct __c_anonymous_posix_spawn_fae_open { + pub path: *mut c_char, + pub oflag: c_int, + pub mode: crate::mode_t, + } + + pub struct __c_anonymous_posix_spawn_fae_dup2 { + pub newfildes: c_int, + } + + pub struct posix_spawnattr_t { + pub sa_flags: c_short, + pub sa_pgroup: crate::pid_t, + pub sa_schedparam: crate::sched_param, + pub sa_schedpolicy: c_int, + pub sa_sigdefault: sigset_t, + pub sa_sigmask: sigset_t, + } + + pub struct posix_spawn_file_actions_entry_t { + pub fae_action: fae_action, + pub fae_fildes: c_int, + pub fae_data: __c_anonymous_posix_spawn_fae, + } + + pub struct posix_spawn_file_actions_t { + pub size: c_uint, + pub len: c_uint, + pub fae: *mut posix_spawn_file_actions_entry_t, + } + + pub struct ptrace_lwpinfo { + pub pl_lwpid: lwpid_t, + pub pl_event: c_int, + } + + pub struct ptrace_lwpstatus { + pub pl_lwpid: lwpid_t, + pub pl_sigpend: sigset_t, + pub pl_sigmask: sigset_t, + pub pl_name: [c_char; 20], + pub pl_private: *mut c_void, + } + + pub struct ptrace_siginfo { + pub psi_siginfo: siginfo_t, + pub psi_lwpid: lwpid_t, + } + + pub struct ptrace_event { + pub pe_set_event: c_int, + } + + pub struct sysctldesc { + pub descr_num: i32, + pub descr_ver: u32, + pub descr_len: u32, + pub descr_str: [c_char; 1], + } + + pub struct ifreq { + pub _priv: [[c_char; 6]; 24], + } + + pub struct ifconf { + pub ifc_len: c_int, + pub ifc_ifcu: __c_anonymous_ifc_ifcu, + } + + pub struct tcp_info { + pub tcpi_state: u8, + pub __tcpi_ca_state: u8, + pub __tcpi_retransmits: u8, + pub __tcpi_probes: u8, + pub __tcpi_backoff: u8, + pub tcpi_options: u8, + pub tcp_snd_wscale: u8, + pub tcp_rcv_wscale: u8, + pub tcpi_rto: u32, + pub __tcpi_ato: u32, + pub tcpi_snd_mss: u32, + pub tcpi_rcv_mss: u32, + pub __tcpi_unacked: u32, + pub __tcpi_sacked: u32, + pub __tcpi_lost: u32, + pub __tcpi_retrans: u32, + pub __tcpi_fackets: u32, + pub __tcpi_last_data_sent: u32, + pub __tcpi_last_ack_sent: u32, + pub tcpi_last_data_recv: u32, + pub __tcpi_last_ack_recv: u32, + pub __tcpi_pmtu: u32, + pub __tcpi_rcv_ssthresh: u32, + pub tcpi_rtt: u32, + pub tcpi_rttvar: u32, + pub tcpi_snd_ssthresh: u32, + pub tcpi_snd_cwnd: u32, + pub __tcpi_advmss: u32, + pub __tcpi_reordering: u32, + pub __tcpi_rcv_rtt: u32, + pub tcpi_rcv_space: u32, + pub tcpi_snd_wnd: u32, + pub tcpi_snd_bwnd: u32, + pub tcpi_snd_nxt: u32, + pub tcpi_rcv_nxt: u32, + pub tcpi_toe_tid: u32, + pub tcpi_snd_rexmitpack: u32, + pub tcpi_rcv_ooopack: u32, + pub tcpi_snd_zerowin: u32, + pub __tcpi_pad: [u32; 26], + } +} + +s_no_extra_traits! { + pub struct utmpx { + pub ut_name: [c_char; _UTX_USERSIZE], + pub ut_id: [c_char; _UTX_IDSIZE], + pub ut_line: [c_char; _UTX_LINESIZE], + pub ut_host: [c_char; _UTX_HOSTSIZE], + pub ut_session: u16, + pub ut_type: u16, + pub ut_pid: crate::pid_t, + pub ut_exit: __exit_status, // FIXME(netbsd): when anonymous struct are supported + pub ut_ss: sockaddr_storage, + pub ut_tv: crate::timeval, + pub ut_pad: [u8; _UTX_PADSIZE], + } + + pub struct lastlogx { + pub ll_tv: crate::timeval, + pub ll_line: [c_char; _UTX_LINESIZE], + pub ll_host: [c_char; _UTX_HOSTSIZE], + pub ll_ss: sockaddr_storage, + } + + pub struct in_pktinfo { + pub ipi_addr: crate::in_addr, + pub ipi_ifindex: c_uint, + } + + pub struct arphdr { + pub ar_hrd: u16, + pub ar_pro: u16, + pub ar_hln: u8, + pub ar_pln: u8, + pub ar_op: u16, + } + + pub struct in_addr { + pub s_addr: crate::in_addr_t, + } + + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct sockaddr_in { + pub sin_len: u8, + pub sin_family: crate::sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [i8; 8], + } + + pub struct dirent { + pub d_fileno: crate::ino_t, + pub d_reclen: u16, + pub d_namlen: u16, + pub d_type: u8, + pub d_name: [c_char; 512], + } + + pub struct statvfs { + pub f_flag: c_ulong, + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_iosize: c_ulong, + + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_bresvd: crate::fsblkcnt_t, + + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fresvd: crate::fsfilcnt_t, + + pub f_syncreads: u64, + pub f_syncwrites: u64, + + pub f_asyncreads: u64, + pub f_asyncwrites: u64, + + pub f_fsidx: crate::fsid_t, + pub f_fsid: c_ulong, + pub f_namemax: c_ulong, + pub f_owner: crate::uid_t, + + pub f_spare: [u32; 4], + + pub f_fstypename: [c_char; 32], + pub f_mntonname: [c_char; 1024], + pub f_mntfromname: [c_char; 1024], + } + + pub struct sockaddr_storage { + pub ss_len: u8, + pub ss_family: crate::sa_family_t, + __ss_pad1: [u8; 6], + __ss_pad2: i64, + __ss_pad3: [u8; 112], + } + + pub struct sigevent { + pub sigev_notify: c_int, + pub sigev_signo: c_int, + pub sigev_value: crate::sigval, + __unused1: *mut c_void, //actually a function pointer + pub sigev_notify_attributes: *mut c_void, + } + + pub union __c_anonymous_posix_spawn_fae { + pub open: __c_anonymous_posix_spawn_fae_open, + pub dup2: __c_anonymous_posix_spawn_fae_dup2, + } + + pub union __c_anonymous_ifc_ifcu { + pub ifcu_buf: *mut c_void, + pub ifcu_req: *mut ifreq, + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for utmpx { + fn eq(&self, other: &utmpx) -> bool { + self.ut_type == other.ut_type + && self.ut_pid == other.ut_pid + && self.ut_name == other.ut_name + && self.ut_line == other.ut_line + && self.ut_id == other.ut_id + && self.ut_exit == other.ut_exit + && self.ut_session == other.ut_session + && self.ut_tv == other.ut_tv + && self.ut_ss == other.ut_ss + && self + .ut_pad + .iter() + .zip(other.ut_pad.iter()) + .all(|(a, b)| a == b) + && self + .ut_host + .iter() + .zip(other.ut_host.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for utmpx {} + + impl hash::Hash for utmpx { + fn hash(&self, state: &mut H) { + self.ut_name.hash(state); + self.ut_type.hash(state); + self.ut_pid.hash(state); + self.ut_line.hash(state); + self.ut_id.hash(state); + self.ut_host.hash(state); + self.ut_exit.hash(state); + self.ut_session.hash(state); + self.ut_tv.hash(state); + self.ut_ss.hash(state); + self.ut_pad.hash(state); + } + } + + impl PartialEq for lastlogx { + fn eq(&self, other: &lastlogx) -> bool { + self.ll_tv == other.ll_tv + && self.ll_line == other.ll_line + && self.ll_ss == other.ll_ss + && self + .ll_host + .iter() + .zip(other.ll_host.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for lastlogx {} + + impl hash::Hash for lastlogx { + fn hash(&self, state: &mut H) { + self.ll_tv.hash(state); + self.ll_line.hash(state); + self.ll_host.hash(state); + self.ll_ss.hash(state); + } + } + + impl PartialEq for in_pktinfo { + fn eq(&self, other: &in_pktinfo) -> bool { + self.ipi_addr == other.ipi_addr && self.ipi_ifindex == other.ipi_ifindex + } + } + impl Eq for in_pktinfo {} + impl hash::Hash for in_pktinfo { + fn hash(&self, state: &mut H) { + self.ipi_addr.hash(state); + self.ipi_ifindex.hash(state); + } + } + + impl PartialEq for arphdr { + fn eq(&self, other: &arphdr) -> bool { + self.ar_hrd == other.ar_hrd + && self.ar_pro == other.ar_pro + && self.ar_hln == other.ar_hln + && self.ar_pln == other.ar_pln + && self.ar_op == other.ar_op + } + } + impl Eq for arphdr {} + impl hash::Hash for arphdr { + fn hash(&self, state: &mut H) { + let ar_hrd = self.ar_hrd; + let ar_pro = self.ar_pro; + let ar_op = self.ar_op; + ar_hrd.hash(state); + ar_pro.hash(state); + self.ar_hln.hash(state); + self.ar_pln.hash(state); + ar_op.hash(state); + } + } + + impl PartialEq for in_addr { + fn eq(&self, other: &in_addr) -> bool { + self.s_addr == other.s_addr + } + } + impl Eq for in_addr {} + impl hash::Hash for in_addr { + fn hash(&self, state: &mut H) { + let s_addr = self.s_addr; + s_addr.hash(state); + } + } + + impl PartialEq for ip_mreq { + fn eq(&self, other: &ip_mreq) -> bool { + self.imr_multiaddr == other.imr_multiaddr + && self.imr_interface == other.imr_interface + } + } + impl Eq for ip_mreq {} + impl hash::Hash for ip_mreq { + fn hash(&self, state: &mut H) { + self.imr_multiaddr.hash(state); + self.imr_interface.hash(state); + } + } + + impl PartialEq for sockaddr_in { + fn eq(&self, other: &sockaddr_in) -> bool { + self.sin_len == other.sin_len + && self.sin_family == other.sin_family + && self.sin_port == other.sin_port + && self.sin_addr == other.sin_addr + && self.sin_zero == other.sin_zero + } + } + impl Eq for sockaddr_in {} + impl hash::Hash for sockaddr_in { + fn hash(&self, state: &mut H) { + self.sin_len.hash(state); + self.sin_family.hash(state); + self.sin_port.hash(state); + self.sin_addr.hash(state); + self.sin_zero.hash(state); + } + } + + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_fileno == other.d_fileno + && self.d_reclen == other.d_reclen + && self.d_namlen == other.d_namlen + && self.d_type == other.d_type + && self + .d_name + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for dirent {} + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_fileno.hash(state); + self.d_reclen.hash(state); + self.d_namlen.hash(state); + self.d_type.hash(state); + self.d_name.hash(state); + } + } + + impl PartialEq for statvfs { + fn eq(&self, other: &statvfs) -> bool { + self.f_flag == other.f_flag + && self.f_bsize == other.f_bsize + && self.f_frsize == other.f_frsize + && self.f_iosize == other.f_iosize + && self.f_blocks == other.f_blocks + && self.f_bfree == other.f_bfree + && self.f_bavail == other.f_bavail + && self.f_bresvd == other.f_bresvd + && self.f_files == other.f_files + && self.f_ffree == other.f_ffree + && self.f_favail == other.f_favail + && self.f_fresvd == other.f_fresvd + && self.f_syncreads == other.f_syncreads + && self.f_syncwrites == other.f_syncwrites + && self.f_asyncreads == other.f_asyncreads + && self.f_asyncwrites == other.f_asyncwrites + && self.f_fsidx == other.f_fsidx + && self.f_fsid == other.f_fsid + && self.f_namemax == other.f_namemax + && self.f_owner == other.f_owner + && self.f_spare == other.f_spare + && self.f_fstypename == other.f_fstypename + && self + .f_mntonname + .iter() + .zip(other.f_mntonname.iter()) + .all(|(a, b)| a == b) + && self + .f_mntfromname + .iter() + .zip(other.f_mntfromname.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for statvfs {} + impl hash::Hash for statvfs { + fn hash(&self, state: &mut H) { + self.f_flag.hash(state); + self.f_bsize.hash(state); + self.f_frsize.hash(state); + self.f_iosize.hash(state); + self.f_blocks.hash(state); + self.f_bfree.hash(state); + self.f_bavail.hash(state); + self.f_bresvd.hash(state); + self.f_files.hash(state); + self.f_ffree.hash(state); + self.f_favail.hash(state); + self.f_fresvd.hash(state); + self.f_syncreads.hash(state); + self.f_syncwrites.hash(state); + self.f_asyncreads.hash(state); + self.f_asyncwrites.hash(state); + self.f_fsidx.hash(state); + self.f_fsid.hash(state); + self.f_namemax.hash(state); + self.f_owner.hash(state); + self.f_spare.hash(state); + self.f_fstypename.hash(state); + self.f_mntonname.hash(state); + self.f_mntfromname.hash(state); + } + } + + impl PartialEq for sockaddr_storage { + fn eq(&self, other: &sockaddr_storage) -> bool { + self.ss_len == other.ss_len + && self.ss_family == other.ss_family + && self.__ss_pad1 == other.__ss_pad1 + && self.__ss_pad2 == other.__ss_pad2 + && self + .__ss_pad3 + .iter() + .zip(other.__ss_pad3.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for sockaddr_storage {} + impl hash::Hash for sockaddr_storage { + fn hash(&self, state: &mut H) { + self.ss_len.hash(state); + self.ss_family.hash(state); + self.__ss_pad1.hash(state); + self.__ss_pad2.hash(state); + self.__ss_pad3.hash(state); + } + } + + impl PartialEq for sigevent { + fn eq(&self, other: &sigevent) -> bool { + self.sigev_notify == other.sigev_notify + && self.sigev_signo == other.sigev_signo + && self.sigev_value == other.sigev_value + && self.sigev_notify_attributes == other.sigev_notify_attributes + } + } + impl Eq for sigevent {} + impl hash::Hash for sigevent { + fn hash(&self, state: &mut H) { + self.sigev_notify.hash(state); + self.sigev_signo.hash(state); + self.sigev_value.hash(state); + self.sigev_notify_attributes.hash(state); + } + } + + impl Eq for __c_anonymous_posix_spawn_fae {} + + impl PartialEq for __c_anonymous_posix_spawn_fae { + fn eq(&self, other: &__c_anonymous_posix_spawn_fae) -> bool { + unsafe { self.open == other.open || self.dup2 == other.dup2 } + } + } + + impl hash::Hash for __c_anonymous_posix_spawn_fae { + fn hash(&self, state: &mut H) { + unsafe { + self.open.hash(state); + self.dup2.hash(state); + } + } + } + + impl Eq for __c_anonymous_ifc_ifcu {} + + impl PartialEq for __c_anonymous_ifc_ifcu { + fn eq(&self, other: &__c_anonymous_ifc_ifcu) -> bool { + unsafe { self.ifcu_buf == other.ifcu_buf || self.ifcu_req == other.ifcu_req } + } + } + + impl hash::Hash for __c_anonymous_ifc_ifcu { + fn hash(&self, state: &mut H) { + unsafe { + self.ifcu_buf.hash(state); + self.ifcu_req.hash(state); + } + } + } + } +} + +pub const AT_FDCWD: c_int = -100; +pub const AT_EACCESS: c_int = 0x100; +pub const AT_SYMLINK_NOFOLLOW: c_int = 0x200; +pub const AT_SYMLINK_FOLLOW: c_int = 0x400; +pub const AT_REMOVEDIR: c_int = 0x800; + +pub const AT_NULL: c_int = 0; +pub const AT_IGNORE: c_int = 1; +pub const AT_EXECFD: c_int = 2; +pub const AT_PHDR: c_int = 3; +pub const AT_PHENT: c_int = 4; +pub const AT_PHNUM: c_int = 5; +pub const AT_PAGESZ: c_int = 6; +pub const AT_BASE: c_int = 7; +pub const AT_FLAGS: c_int = 8; +pub const AT_ENTRY: c_int = 9; +pub const AT_DCACHEBSIZE: c_int = 10; +pub const AT_ICACHEBSIZE: c_int = 11; +pub const AT_UCACHEBSIZE: c_int = 12; +pub const AT_STACKBASE: c_int = 13; +pub const AT_EUID: c_int = 2000; +pub const AT_RUID: c_int = 2001; +pub const AT_EGID: c_int = 2002; +pub const AT_RGID: c_int = 2003; +pub const AT_SUN_LDELF: c_int = 2004; +pub const AT_SUN_LDSHDR: c_int = 2005; +pub const AT_SUN_LDNAME: c_int = 2006; +pub const AT_SUN_LDPGSIZE: c_int = 2007; +pub const AT_SUN_PLATFORM: c_int = 2008; +pub const AT_SUN_HWCAP: c_int = 2009; +pub const AT_SUN_IFLUSH: c_int = 2010; +pub const AT_SUN_CPU: c_int = 2011; +pub const AT_SUN_EMUL_ENTRY: c_int = 2012; +pub const AT_SUN_EMUL_EXECFD: c_int = 2013; +pub const AT_SUN_EXECNAME: c_int = 2014; + +pub const EXTATTR_NAMESPACE_USER: c_int = 1; +pub const EXTATTR_NAMESPACE_SYSTEM: c_int = 2; + +pub const LC_COLLATE_MASK: c_int = 1 << crate::LC_COLLATE; +pub const LC_CTYPE_MASK: c_int = 1 << crate::LC_CTYPE; +pub const LC_MONETARY_MASK: c_int = 1 << crate::LC_MONETARY; +pub const LC_NUMERIC_MASK: c_int = 1 << crate::LC_NUMERIC; +pub const LC_TIME_MASK: c_int = 1 << crate::LC_TIME; +pub const LC_MESSAGES_MASK: c_int = 1 << crate::LC_MESSAGES; +pub const LC_ALL_MASK: c_int = !0; + +pub const ERA: crate::nl_item = 52; +pub const ERA_D_FMT: crate::nl_item = 53; +pub const ERA_D_T_FMT: crate::nl_item = 54; +pub const ERA_T_FMT: crate::nl_item = 55; +pub const ALT_DIGITS: crate::nl_item = 56; + +pub const O_CLOEXEC: c_int = 0x400000; +pub const O_ALT_IO: c_int = 0x40000; +pub const O_NOSIGPIPE: c_int = 0x1000000; +pub const O_SEARCH: c_int = 0x800000; +pub const O_DIRECTORY: c_int = 0x200000; +pub const O_DIRECT: c_int = 0x00080000; +pub const O_RSYNC: c_int = 0x00020000; + +pub const MS_SYNC: c_int = 0x4; +pub const MS_INVALIDATE: c_int = 0x2; + +// Here because they are not present on OpenBSD +// (https://github.com/openbsd/src/blob/HEAD/sys/sys/resource.h) +pub const RLIMIT_SBSIZE: c_int = 9; +pub const RLIMIT_AS: c_int = 10; +pub const RLIMIT_NTHR: c_int = 11; + +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIM_NLIMITS: c_int = 12; + +pub const EIDRM: c_int = 82; +pub const ENOMSG: c_int = 83; +pub const EOVERFLOW: c_int = 84; +pub const EILSEQ: c_int = 85; +pub const ENOTSUP: c_int = 86; +pub const ECANCELED: c_int = 87; +pub const EBADMSG: c_int = 88; +pub const ENODATA: c_int = 89; +pub const ENOSR: c_int = 90; +pub const ENOSTR: c_int = 91; +pub const ETIME: c_int = 92; +pub const ENOATTR: c_int = 93; +pub const EMULTIHOP: c_int = 94; +pub const ENOLINK: c_int = 95; +pub const EPROTO: c_int = 96; +pub const EOWNERDEAD: c_int = 97; +pub const ENOTRECOVERABLE: c_int = 98; +#[deprecated( + since = "0.2.143", + note = "This value will always match the highest defined error number \ + and thus is not stable. \ + See #3040 for more info." +)] +pub const ELAST: c_int = 98; + +pub const F_DUPFD_CLOEXEC: c_int = 12; +pub const F_CLOSEM: c_int = 10; +pub const F_GETNOSIGPIPE: c_int = 13; +pub const F_SETNOSIGPIPE: c_int = 14; +pub const F_MAXFD: c_int = 11; +pub const F_GETPATH: c_int = 15; + +pub const FUTEX_WAIT: c_int = 0; +pub const FUTEX_WAKE: c_int = 1; +pub const FUTEX_FD: c_int = 2; +pub const FUTEX_REQUEUE: c_int = 3; +pub const FUTEX_CMP_REQUEUE: c_int = 4; +pub const FUTEX_WAKE_OP: c_int = 5; +pub const FUTEX_LOCK_PI: c_int = 6; +pub const FUTEX_UNLOCK_PI: c_int = 7; +pub const FUTEX_TRYLOCK_PI: c_int = 8; +pub const FUTEX_WAIT_BITSET: c_int = 9; +pub const FUTEX_WAKE_BITSET: c_int = 10; +pub const FUTEX_WAIT_REQUEUE_PI: c_int = 11; +pub const FUTEX_CMP_REQUEUE_PI: c_int = 12; +pub const FUTEX_PRIVATE_FLAG: c_int = 1 << 7; +pub const FUTEX_CLOCK_REALTIME: c_int = 1 << 8; +pub const FUTEX_CMD_MASK: c_int = !(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME); +pub const FUTEX_WAITERS: u32 = 1 << 31; +pub const FUTEX_OWNER_DIED: u32 = 1 << 30; +pub const FUTEX_SYNCOBJ_1: u32 = 1 << 29; +pub const FUTEX_SYNCOBJ_0: u32 = 1 << 28; +pub const FUTEX_TID_MASK: u32 = (1 << 28) - 1; +pub const FUTEX_BITSET_MATCH_ANY: u32 = !0; + +pub const IP_RECVDSTADDR: c_int = 7; +pub const IP_SENDSRCADDR: c_int = IP_RECVDSTADDR; +pub const IP_RECVIF: c_int = 20; +pub const IP_PKTINFO: c_int = 25; +pub const IP_RECVPKTINFO: c_int = 26; +pub const IPV6_JOIN_GROUP: c_int = 12; +pub const IPV6_LEAVE_GROUP: c_int = 13; + +pub const TCP_KEEPIDLE: c_int = 3; +pub const TCP_KEEPINTVL: c_int = 5; +pub const TCP_KEEPCNT: c_int = 6; +pub const TCP_KEEPINIT: c_int = 7; +pub const TCP_MD5SIG: c_int = 0x10; +pub const TCP_CONGCTL: c_int = 0x20; + +pub const SOCK_CONN_DGRAM: c_int = 6; +pub const SOCK_DCCP: c_int = SOCK_CONN_DGRAM; +pub const SOCK_NOSIGPIPE: c_int = 0x40000000; +pub const SOCK_FLAGS_MASK: c_int = 0xf0000000; + +pub const SO_SNDTIMEO: c_int = 0x100b; +pub const SO_RCVTIMEO: c_int = 0x100c; +pub const SO_NOSIGPIPE: c_int = 0x0800; +pub const SO_ACCEPTFILTER: c_int = 0x1000; +pub const SO_TIMESTAMP: c_int = 0x2000; +pub const SO_OVERFLOWED: c_int = 0x1009; +pub const SO_NOHEADER: c_int = 0x100a; + +// http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/sys/un.h?annotate +pub const LOCAL_OCREDS: c_int = 0x0001; // pass credentials to receiver +pub const LOCAL_CONNWAIT: c_int = 0x0002; // connects block until accepted +pub const LOCAL_PEEREID: c_int = 0x0003; // get peer identification +pub const LOCAL_CREDS: c_int = 0x0004; // pass credentials to receiver + +// https://github.com/NetBSD/src/blob/trunk/sys/net/if.h#L373 +pub const IFF_UP: c_int = 0x0001; // interface is up +pub const IFF_BROADCAST: c_int = 0x0002; // broadcast address valid +pub const IFF_DEBUG: c_int = 0x0004; // turn on debugging +pub const IFF_LOOPBACK: c_int = 0x0008; // is a loopback net +pub const IFF_POINTOPOINT: c_int = 0x0010; // interface is point-to-point link +pub const IFF_NOTRAILERS: c_int = 0x0020; // avoid use of trailers +pub const IFF_RUNNING: c_int = 0x0040; // resources allocated +pub const IFF_NOARP: c_int = 0x0080; // no address resolution protocol +pub const IFF_PROMISC: c_int = 0x0100; // receive all packets +pub const IFF_ALLMULTI: c_int = 0x0200; // receive all multicast packets +pub const IFF_OACTIVE: c_int = 0x0400; // transmission in progress +pub const IFF_SIMPLEX: c_int = 0x0800; // can't hear own transmissions +pub const IFF_LINK0: c_int = 0x1000; // per link layer defined bit +pub const IFF_LINK1: c_int = 0x2000; // per link layer defined bit +pub const IFF_LINK2: c_int = 0x4000; // per link layer defined bit +pub const IFF_MULTICAST: c_int = 0x8000; // supports multicast + +// sys/netinet/in.h +// Protocols (RFC 1700) +// NOTE: These are in addition to the constants defined in src/unix/mod.rs + +// IPPROTO_IP defined in src/unix/mod.rs +/// Hop-by-hop option header +pub const IPPROTO_HOPOPTS: c_int = 0; +// IPPROTO_ICMP defined in src/unix/mod.rs +/// group mgmt protocol +pub const IPPROTO_IGMP: c_int = 2; +/// gateway^2 (deprecated) +pub const IPPROTO_GGP: c_int = 3; +/// for compatibility +pub const IPPROTO_IPIP: c_int = 4; +// IPPROTO_TCP defined in src/unix/mod.rs +/// exterior gateway protocol +pub const IPPROTO_EGP: c_int = 8; +/// pup +pub const IPPROTO_PUP: c_int = 12; +// IPPROTO_UDP defined in src/unix/mod.rs +/// xns idp +pub const IPPROTO_IDP: c_int = 22; +/// tp-4 w/ class negotiation +pub const IPPROTO_TP: c_int = 29; +/// DCCP +pub const IPPROTO_DCCP: c_int = 33; +// IPPROTO_IPV6 defined in src/unix/mod.rs +/// IP6 routing header +pub const IPPROTO_ROUTING: c_int = 43; +/// IP6 fragmentation header +pub const IPPROTO_FRAGMENT: c_int = 44; +/// resource reservation +pub const IPPROTO_RSVP: c_int = 46; +/// General Routing Encap. +pub const IPPROTO_GRE: c_int = 47; +/// IP6 Encap Sec. Payload +pub const IPPROTO_ESP: c_int = 50; +/// IP6 Auth Header +pub const IPPROTO_AH: c_int = 51; +/// IP Mobility RFC 2004 +pub const IPPROTO_MOBILE: c_int = 55; +/// IPv6 ICMP +pub const IPPROTO_IPV6_ICMP: c_int = 58; +// IPPROTO_ICMPV6 defined in src/unix/mod.rs +/// IP6 no next header +pub const IPPROTO_NONE: c_int = 59; +/// IP6 destination option +pub const IPPROTO_DSTOPTS: c_int = 60; +/// ISO cnlp +pub const IPPROTO_EON: c_int = 80; +/// Ethernet-in-IP +pub const IPPROTO_ETHERIP: c_int = 97; +/// encapsulation header +pub const IPPROTO_ENCAP: c_int = 98; +/// Protocol indep. multicast +pub const IPPROTO_PIM: c_int = 103; +/// IP Payload Comp. Protocol +pub const IPPROTO_IPCOMP: c_int = 108; +/// VRRP RFC 2338 +pub const IPPROTO_VRRP: c_int = 112; +/// Common Address Resolution Protocol +pub const IPPROTO_CARP: c_int = 112; +/// L2TPv3 +pub const IPPROTO_L2TP: c_int = 115; +/// SCTP +pub const IPPROTO_SCTP: c_int = 132; +/// PFSYNC +pub const IPPROTO_PFSYNC: c_int = 240; +pub const IPPROTO_MAX: c_int = 256; + +/// last return value of *_input(), meaning "all job for this pkt is done". +pub const IPPROTO_DONE: c_int = 257; + +/// sysctl placeholder for (FAST_)IPSEC +pub const CTL_IPPROTO_IPSEC: c_int = 258; + +pub const AF_OROUTE: c_int = 17; +pub const AF_ARP: c_int = 28; +pub const pseudo_AF_KEY: c_int = 29; +pub const pseudo_AF_HDRCMPLT: c_int = 30; +pub const AF_BLUETOOTH: c_int = 31; +pub const AF_IEEE80211: c_int = 32; +pub const AF_MPLS: c_int = 33; +pub const AF_ROUTE: c_int = 34; +pub const NET_RT_DUMP: c_int = 1; +pub const NET_RT_FLAGS: c_int = 2; +pub const NET_RT_OOOIFLIST: c_int = 3; +pub const NET_RT_OOIFLIST: c_int = 4; +pub const NET_RT_OIFLIST: c_int = 5; +pub const NET_RT_IFLIST: c_int = 6; +pub const NET_RT_MAXID: c_int = 7; + +pub const PF_OROUTE: c_int = AF_OROUTE; +pub const PF_ARP: c_int = AF_ARP; +pub const PF_KEY: c_int = pseudo_AF_KEY; +pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; +pub const PF_MPLS: c_int = AF_MPLS; +pub const PF_ROUTE: c_int = AF_ROUTE; + +pub const MSG_NBIO: c_int = 0x1000; +pub const MSG_WAITFORONE: c_int = 0x2000; +pub const MSG_NOTIFICATION: c_int = 0x4000; + +pub const SCM_TIMESTAMP: c_int = 0x08; +pub const SCM_CREDS: c_int = 0x10; + +pub const O_DSYNC: c_int = 0x10000; + +pub const MAP_RENAME: c_int = 0x20; +pub const MAP_NORESERVE: c_int = 0x40; +pub const MAP_HASSEMAPHORE: c_int = 0x200; +pub const MAP_TRYFIXED: c_int = 0x400; +pub const MAP_WIRED: c_int = 0x800; +pub const MAP_STACK: c_int = 0x2000; +// map alignment aliases for MAP_ALIGNED +pub const MAP_ALIGNMENT_SHIFT: c_int = 24; +pub const MAP_ALIGNMENT_MASK: c_int = 0xff << MAP_ALIGNMENT_SHIFT; +pub const MAP_ALIGNMENT_64KB: c_int = 16 << MAP_ALIGNMENT_SHIFT; +pub const MAP_ALIGNMENT_16MB: c_int = 24 << MAP_ALIGNMENT_SHIFT; +pub const MAP_ALIGNMENT_4GB: c_int = 32 << MAP_ALIGNMENT_SHIFT; +pub const MAP_ALIGNMENT_1TB: c_int = 40 << MAP_ALIGNMENT_SHIFT; +pub const MAP_ALIGNMENT_256TB: c_int = 48 << MAP_ALIGNMENT_SHIFT; +pub const MAP_ALIGNMENT_64PB: c_int = 56 << MAP_ALIGNMENT_SHIFT; +// mremap flag +pub const MAP_REMAPDUP: c_int = 0x004; + +pub const DCCP_TYPE_REQUEST: c_int = 0; +pub const DCCP_TYPE_RESPONSE: c_int = 1; +pub const DCCP_TYPE_DATA: c_int = 2; +pub const DCCP_TYPE_ACK: c_int = 3; +pub const DCCP_TYPE_DATAACK: c_int = 4; +pub const DCCP_TYPE_CLOSEREQ: c_int = 5; +pub const DCCP_TYPE_CLOSE: c_int = 6; +pub const DCCP_TYPE_RESET: c_int = 7; +pub const DCCP_TYPE_MOVE: c_int = 8; + +pub const DCCP_FEATURE_CC: c_int = 1; +pub const DCCP_FEATURE_ECN: c_int = 2; +pub const DCCP_FEATURE_ACKRATIO: c_int = 3; +pub const DCCP_FEATURE_ACKVECTOR: c_int = 4; +pub const DCCP_FEATURE_MOBILITY: c_int = 5; +pub const DCCP_FEATURE_LOSSWINDOW: c_int = 6; +pub const DCCP_FEATURE_CONN_NONCE: c_int = 8; +pub const DCCP_FEATURE_IDENTREG: c_int = 7; + +pub const DCCP_OPT_PADDING: c_int = 0; +pub const DCCP_OPT_DATA_DISCARD: c_int = 1; +pub const DCCP_OPT_SLOW_RECV: c_int = 2; +pub const DCCP_OPT_BUF_CLOSED: c_int = 3; +pub const DCCP_OPT_CHANGE_L: c_int = 32; +pub const DCCP_OPT_CONFIRM_L: c_int = 33; +pub const DCCP_OPT_CHANGE_R: c_int = 34; +pub const DCCP_OPT_CONFIRM_R: c_int = 35; +pub const DCCP_OPT_INIT_COOKIE: c_int = 36; +pub const DCCP_OPT_NDP_COUNT: c_int = 37; +pub const DCCP_OPT_ACK_VECTOR0: c_int = 38; +pub const DCCP_OPT_ACK_VECTOR1: c_int = 39; +pub const DCCP_OPT_RECV_BUF_DROPS: c_int = 40; +pub const DCCP_OPT_TIMESTAMP: c_int = 41; +pub const DCCP_OPT_TIMESTAMP_ECHO: c_int = 42; +pub const DCCP_OPT_ELAPSEDTIME: c_int = 43; +pub const DCCP_OPT_DATACHECKSUM: c_int = 44; + +pub const DCCP_REASON_UNSPEC: c_int = 0; +pub const DCCP_REASON_CLOSED: c_int = 1; +pub const DCCP_REASON_INVALID: c_int = 2; +pub const DCCP_REASON_OPTION_ERR: c_int = 3; +pub const DCCP_REASON_FEA_ERR: c_int = 4; +pub const DCCP_REASON_CONN_REF: c_int = 5; +pub const DCCP_REASON_BAD_SNAME: c_int = 6; +pub const DCCP_REASON_BAD_COOKIE: c_int = 7; +pub const DCCP_REASON_INV_MOVE: c_int = 8; +pub const DCCP_REASON_UNANSW_CH: c_int = 10; +pub const DCCP_REASON_FRUITLESS_NEG: c_int = 11; + +pub const DCCP_CCID: c_int = 1; +pub const DCCP_CSLEN: c_int = 2; +pub const DCCP_MAXSEG: c_int = 4; +pub const DCCP_SERVICE: c_int = 8; + +pub const DCCP_NDP_LIMIT: c_int = 16; +pub const DCCP_SEQ_NUM_LIMIT: c_int = 16777216; +pub const DCCP_MAX_OPTIONS: c_int = 32; +pub const DCCP_MAX_PKTS: c_int = 100; + +pub const _PC_LINK_MAX: c_int = 1; +pub const _PC_MAX_CANON: c_int = 2; +pub const _PC_MAX_INPUT: c_int = 3; +pub const _PC_NAME_MAX: c_int = 4; +pub const _PC_PATH_MAX: c_int = 5; +pub const _PC_PIPE_BUF: c_int = 6; +pub const _PC_CHOWN_RESTRICTED: c_int = 7; +pub const _PC_NO_TRUNC: c_int = 8; +pub const _PC_VDISABLE: c_int = 9; +pub const _PC_SYNC_IO: c_int = 10; +pub const _PC_FILESIZEBITS: c_int = 11; +pub const _PC_SYMLINK_MAX: c_int = 12; +pub const _PC_2_SYMLINKS: c_int = 13; +pub const _PC_ACL_EXTENDED: c_int = 14; +pub const _PC_MIN_HOLE_SIZE: c_int = 15; + +pub const _CS_PATH: c_int = 1; + +pub const _SC_SYNCHRONIZED_IO: c_int = 31; +pub const _SC_IOV_MAX: c_int = 32; +pub const _SC_MAPPED_FILES: c_int = 33; +pub const _SC_MEMLOCK: c_int = 34; +pub const _SC_MEMLOCK_RANGE: c_int = 35; +pub const _SC_MEMORY_PROTECTION: c_int = 36; +pub const _SC_LOGIN_NAME_MAX: c_int = 37; +pub const _SC_MONOTONIC_CLOCK: c_int = 38; +pub const _SC_CLK_TCK: c_int = 39; +pub const _SC_ATEXIT_MAX: c_int = 40; +pub const _SC_THREADS: c_int = 41; +pub const _SC_SEMAPHORES: c_int = 42; +pub const _SC_BARRIERS: c_int = 43; +pub const _SC_TIMERS: c_int = 44; +pub const _SC_SPIN_LOCKS: c_int = 45; +pub const _SC_READER_WRITER_LOCKS: c_int = 46; +pub const _SC_GETGR_R_SIZE_MAX: c_int = 47; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 48; +pub const _SC_CLOCK_SELECTION: c_int = 49; +pub const _SC_ASYNCHRONOUS_IO: c_int = 50; +pub const _SC_AIO_LISTIO_MAX: c_int = 51; +pub const _SC_AIO_MAX: c_int = 52; +pub const _SC_MESSAGE_PASSING: c_int = 53; +pub const _SC_MQ_OPEN_MAX: c_int = 54; +pub const _SC_MQ_PRIO_MAX: c_int = 55; +pub const _SC_PRIORITY_SCHEDULING: c_int = 56; +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 57; +pub const _SC_THREAD_KEYS_MAX: c_int = 58; +pub const _SC_THREAD_STACK_MIN: c_int = 59; +pub const _SC_THREAD_THREADS_MAX: c_int = 60; +pub const _SC_THREAD_ATTR_STACKADDR: c_int = 61; +pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 62; +pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 63; +pub const _SC_THREAD_PRIO_INHERIT: c_int = 64; +pub const _SC_THREAD_PRIO_PROTECT: c_int = 65; +pub const _SC_THREAD_PROCESS_SHARED: c_int = 66; +pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 67; +pub const _SC_TTY_NAME_MAX: c_int = 68; +pub const _SC_HOST_NAME_MAX: c_int = 69; +pub const _SC_PASS_MAX: c_int = 70; +pub const _SC_REGEXP: c_int = 71; +pub const _SC_SHELL: c_int = 72; +pub const _SC_SYMLOOP_MAX: c_int = 73; +pub const _SC_V6_ILP32_OFF32: c_int = 74; +pub const _SC_V6_ILP32_OFFBIG: c_int = 75; +pub const _SC_V6_LP64_OFF64: c_int = 76; +pub const _SC_V6_LPBIG_OFFBIG: c_int = 77; +pub const _SC_2_PBS: c_int = 80; +pub const _SC_2_PBS_ACCOUNTING: c_int = 81; +pub const _SC_2_PBS_CHECKPOINT: c_int = 82; +pub const _SC_2_PBS_LOCATE: c_int = 83; +pub const _SC_2_PBS_MESSAGE: c_int = 84; +pub const _SC_2_PBS_TRACK: c_int = 85; +pub const _SC_SPAWN: c_int = 86; +pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 87; +pub const _SC_TIMER_MAX: c_int = 88; +pub const _SC_SEM_NSEMS_MAX: c_int = 89; +pub const _SC_CPUTIME: c_int = 90; +pub const _SC_THREAD_CPUTIME: c_int = 91; +pub const _SC_DELAYTIMER_MAX: c_int = 92; +// These two variables will be supported in NetBSD 8.0 +// pub const _SC_SIGQUEUE_MAX : c_int = 93; +// pub const _SC_REALTIME_SIGNALS : c_int = 94; +pub const _SC_PHYS_PAGES: c_int = 121; +pub const _SC_NPROCESSORS_CONF: c_int = 1001; +pub const _SC_NPROCESSORS_ONLN: c_int = 1002; +pub const _SC_SCHED_RT_TS: c_int = 2001; +pub const _SC_SCHED_PRI_MIN: c_int = 2002; +pub const _SC_SCHED_PRI_MAX: c_int = 2003; + +pub const FD_SETSIZE: usize = 0x100; + +pub const ST_NOSUID: c_ulong = 8; + +pub const BIOCGRSIG: c_ulong = 0x40044272; +pub const BIOCSRSIG: c_ulong = 0x80044273; +pub const BIOCSDLT: c_ulong = 0x80044278; +pub const BIOCGSEESENT: c_ulong = 0x40044276; +pub const BIOCSSEESENT: c_ulong = 0x80044277; + +// +pub const MNT_UNION: c_int = 0x00000020; +pub const MNT_NOCOREDUMP: c_int = 0x00008000; +pub const MNT_RELATIME: c_int = 0x00020000; +pub const MNT_IGNORE: c_int = 0x00100000; +pub const MNT_NFS4ACLS: c_int = 0x00200000; +pub const MNT_DISCARD: c_int = 0x00800000; +pub const MNT_EXTATTR: c_int = 0x01000000; +pub const MNT_LOG: c_int = 0x02000000; +pub const MNT_NOATIME: c_int = 0x04000000; +pub const MNT_AUTOMOUNTED: c_int = 0x10000000; +pub const MNT_SYMPERM: c_int = 0x20000000; +pub const MNT_NODEVMTIME: c_int = 0x40000000; +pub const MNT_SOFTDEP: c_int = 0x80000000; +pub const MNT_POSIX1EACLS: c_int = 0x00000800; +pub const MNT_ACLS: c_int = MNT_POSIX1EACLS; +pub const MNT_WAIT: c_int = 1; +pub const MNT_NOWAIT: c_int = 2; +pub const MNT_LAZY: c_int = 3; + +// sys/ioccom.h +pub const IOCPARM_SHIFT: u32 = 16; +pub const IOCGROUP_SHIFT: u32 = 8; + +pub const fn IOCPARM_LEN(x: u32) -> u32 { + (x >> IOCPARM_SHIFT) & crate::IOCPARM_MASK +} + +pub const fn IOCBASECMD(x: u32) -> u32 { + x & (!(crate::IOCPARM_MASK << IOCPARM_SHIFT)) +} + +pub const fn IOCGROUP(x: u32) -> u32 { + (x >> IOCGROUP_SHIFT) & 0xff +} + +pub const fn _IOC(inout: c_ulong, group: c_ulong, num: c_ulong, len: c_ulong) -> c_ulong { + (inout) + | (((len) & crate::IOCPARM_MASK as c_ulong) << IOCPARM_SHIFT) + | ((group) << IOCGROUP_SHIFT) + | (num) +} + +// +pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 2; +pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 4; +pub const NTP_API: c_int = 4; +pub const MAXPHASE: c_long = 500000000; +pub const MAXFREQ: c_long = 500000; +pub const MINSEC: c_int = 256; +pub const MAXSEC: c_int = 2048; +pub const NANOSECOND: c_long = 1000000000; +pub const SCALE_PPM: c_int = 65; +pub const MAXTC: c_int = 10; +pub const MOD_OFFSET: c_uint = 0x0001; +pub const MOD_FREQUENCY: c_uint = 0x0002; +pub const MOD_MAXERROR: c_uint = 0x0004; +pub const MOD_ESTERROR: c_uint = 0x0008; +pub const MOD_STATUS: c_uint = 0x0010; +pub const MOD_TIMECONST: c_uint = 0x0020; +pub const MOD_PPSMAX: c_uint = 0x0040; +pub const MOD_TAI: c_uint = 0x0080; +pub const MOD_MICRO: c_uint = 0x1000; +pub const MOD_NANO: c_uint = 0x2000; +pub const MOD_CLKB: c_uint = 0x4000; +pub const MOD_CLKA: c_uint = 0x8000; +pub const STA_PLL: c_int = 0x0001; +pub const STA_PPSFREQ: c_int = 0x0002; +pub const STA_PPSTIME: c_int = 0x0004; +pub const STA_FLL: c_int = 0x0008; +pub const STA_INS: c_int = 0x0010; +pub const STA_DEL: c_int = 0x0020; +pub const STA_UNSYNC: c_int = 0x0040; +pub const STA_FREQHOLD: c_int = 0x0080; +pub const STA_PPSSIGNAL: c_int = 0x0100; +pub const STA_PPSJITTER: c_int = 0x0200; +pub const STA_PPSWANDER: c_int = 0x0400; +pub const STA_PPSERROR: c_int = 0x0800; +pub const STA_CLOCKERR: c_int = 0x1000; +pub const STA_NANO: c_int = 0x2000; +pub const STA_MODE: c_int = 0x4000; +pub const STA_CLK: c_int = 0x8000; +pub const STA_RONLY: c_int = STA_PPSSIGNAL + | STA_PPSJITTER + | STA_PPSWANDER + | STA_PPSERROR + | STA_CLOCKERR + | STA_NANO + | STA_MODE + | STA_CLK; +pub const TIME_OK: c_int = 0; +pub const TIME_INS: c_int = 1; +pub const TIME_DEL: c_int = 2; +pub const TIME_OOP: c_int = 3; +pub const TIME_WAIT: c_int = 4; +pub const TIME_ERROR: c_int = 5; + +pub const LITTLE_ENDIAN: c_int = 1234; +pub const BIG_ENDIAN: c_int = 4321; + +pub const PL_EVENT_NONE: c_int = 0; +pub const PL_EVENT_SIGNAL: c_int = 1; +pub const PL_EVENT_SUSPENDED: c_int = 2; + +cfg_if! { + if #[cfg(any( + target_arch = "sparc", + target_arch = "sparc64", + target_arch = "x86", + target_arch = "x86_64" + ))] { + pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + ptm_magic: 0x33330003, + ptm_errorcheck: 0, + ptm_pad1: [0; 3], + ptm_unused: 0, + ptm_pad2: [0; 3], + ptm_waiters: 0 as *mut _, + ptm_owner: 0, + ptm_recursed: 0, + ptm_spare2: 0 as *mut _, + }; + } else { + pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + ptm_magic: 0x33330003, + ptm_errorcheck: 0, + ptm_unused: 0, + ptm_waiters: 0 as *mut _, + ptm_owner: 0, + ptm_recursed: 0, + ptm_spare2: 0 as *mut _, + }; + } +} + +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + ptc_magic: 0x55550005, + ptc_lock: 0, + ptc_waiters_first: 0 as *mut _, + ptc_waiters_last: 0 as *mut _, + ptc_mutex: 0 as *mut _, + ptc_private: 0 as *mut _, +}; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + ptr_magic: 0x99990009, + ptr_interlock: 0, + ptr_rblocked_first: 0 as *mut _, + ptr_rblocked_last: 0 as *mut _, + ptr_wblocked_first: 0 as *mut _, + ptr_wblocked_last: 0 as *mut _, + ptr_nreaders: 0, + ptr_owner: 0, + ptr_private: 0 as *mut _, +}; +pub const PTHREAD_MUTEX_NORMAL: c_int = 0; +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 1; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 2; +pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; + +pub const SCHED_NONE: c_int = -1; +pub const SCHED_OTHER: c_int = 0; +pub const SCHED_FIFO: c_int = 1; +pub const SCHED_RR: c_int = 2; + +pub const EVFILT_AIO: u32 = 2; +pub const EVFILT_PROC: u32 = 4; +pub const EVFILT_READ: u32 = 0; +pub const EVFILT_SIGNAL: u32 = 5; +pub const EVFILT_TIMER: u32 = 6; +pub const EVFILT_VNODE: u32 = 3; +pub const EVFILT_WRITE: u32 = 1; +pub const EVFILT_FS: u32 = 7; +pub const EVFILT_USER: u32 = 8; +pub const EVFILT_EMPTY: u32 = 9; + +pub const EV_ADD: u32 = 0x1; +pub const EV_DELETE: u32 = 0x2; +pub const EV_ENABLE: u32 = 0x4; +pub const EV_DISABLE: u32 = 0x8; +pub const EV_ONESHOT: u32 = 0x10; +pub const EV_CLEAR: u32 = 0x20; +pub const EV_RECEIPT: u32 = 0x40; +pub const EV_DISPATCH: u32 = 0x80; +pub const EV_FLAG1: u32 = 0x2000; +pub const EV_ERROR: u32 = 0x4000; +pub const EV_EOF: u32 = 0x8000; +pub const EV_SYSFLAGS: u32 = 0xf000; + +pub const NOTE_TRIGGER: u32 = 0x01000000; +pub const NOTE_FFNOP: u32 = 0x00000000; +pub const NOTE_FFAND: u32 = 0x40000000; +pub const NOTE_FFOR: u32 = 0x80000000; +pub const NOTE_FFCOPY: u32 = 0xc0000000; +pub const NOTE_FFCTRLMASK: u32 = 0xc0000000; +pub const NOTE_FFLAGSMASK: u32 = 0x00ffffff; +pub const NOTE_LOWAT: u32 = 0x00000001; +pub const NOTE_DELETE: u32 = 0x00000001; +pub const NOTE_WRITE: u32 = 0x00000002; +pub const NOTE_EXTEND: u32 = 0x00000004; +pub const NOTE_ATTRIB: u32 = 0x00000008; +pub const NOTE_LINK: u32 = 0x00000010; +pub const NOTE_RENAME: u32 = 0x00000020; +pub const NOTE_REVOKE: u32 = 0x00000040; +pub const NOTE_EXIT: u32 = 0x80000000; +pub const NOTE_FORK: u32 = 0x40000000; +pub const NOTE_EXEC: u32 = 0x20000000; +pub const NOTE_PDATAMASK: u32 = 0x000fffff; +pub const NOTE_PCTRLMASK: u32 = 0xf0000000; +pub const NOTE_TRACK: u32 = 0x00000001; +pub const NOTE_TRACKERR: u32 = 0x00000002; +pub const NOTE_CHILD: u32 = 0x00000004; +pub const NOTE_MSECONDS: u32 = 0x00000000; +pub const NOTE_SECONDS: u32 = 0x00000001; +pub const NOTE_USECONDS: u32 = 0x00000002; +pub const NOTE_NSECONDS: u32 = 0x00000003; +pub const NOTE_ABSTIME: u32 = 0x000000010; + +pub const TMP_MAX: c_uint = 308915776; + +pub const AI_PASSIVE: c_int = 0x00000001; +pub const AI_CANONNAME: c_int = 0x00000002; +pub const AI_NUMERICHOST: c_int = 0x00000004; +pub const AI_NUMERICSERV: c_int = 0x00000008; +pub const AI_ADDRCONFIG: c_int = 0x00000400; +pub const AI_SRV: c_int = 0x00000800; + +pub const NI_MAXHOST: crate::socklen_t = 1025; +pub const NI_MAXSERV: crate::socklen_t = 32; + +pub const NI_NOFQDN: c_int = 0x00000001; +pub const NI_NUMERICHOST: c_int = 0x000000002; +pub const NI_NAMEREQD: c_int = 0x000000004; +pub const NI_NUMERICSERV: c_int = 0x000000008; +pub const NI_DGRAM: c_int = 0x00000010; +pub const NI_WITHSCOPEID: c_int = 0x00000020; +pub const NI_NUMERICSCOPE: c_int = 0x00000040; + +pub const RTLD_NOLOAD: c_int = 0x2000; +pub const RTLD_LOCAL: c_int = 0x200; + +pub const CTL_MAXNAME: c_int = 12; +pub const SYSCTL_NAMELEN: c_int = 32; +pub const SYSCTL_DEFSIZE: c_int = 8; +pub const CTLTYPE_NODE: c_int = 1; +pub const CTLTYPE_INT: c_int = 2; +pub const CTLTYPE_STRING: c_int = 3; +pub const CTLTYPE_QUAD: c_int = 4; +pub const CTLTYPE_STRUCT: c_int = 5; +pub const CTLTYPE_BOOL: c_int = 6; +pub const CTLFLAG_READONLY: c_int = 0x00000000; +pub const CTLFLAG_READWRITE: c_int = 0x00000070; +pub const CTLFLAG_ANYWRITE: c_int = 0x00000080; +pub const CTLFLAG_PRIVATE: c_int = 0x00000100; +pub const CTLFLAG_PERMANENT: c_int = 0x00000200; +pub const CTLFLAG_OWNDATA: c_int = 0x00000400; +pub const CTLFLAG_IMMEDIATE: c_int = 0x00000800; +pub const CTLFLAG_HEX: c_int = 0x00001000; +pub const CTLFLAG_ROOT: c_int = 0x00002000; +pub const CTLFLAG_ANYNUMBER: c_int = 0x00004000; +pub const CTLFLAG_HIDDEN: c_int = 0x00008000; +pub const CTLFLAG_ALIAS: c_int = 0x00010000; +pub const CTLFLAG_MMAP: c_int = 0x00020000; +pub const CTLFLAG_OWNDESC: c_int = 0x00040000; +pub const CTLFLAG_UNSIGNED: c_int = 0x00080000; +pub const SYSCTL_VERS_MASK: c_int = 0xff000000; +pub const SYSCTL_VERS_0: c_int = 0x00000000; +pub const SYSCTL_VERS_1: c_int = 0x01000000; +pub const SYSCTL_VERSION: c_int = SYSCTL_VERS_1; +pub const CTL_EOL: c_int = -1; +pub const CTL_QUERY: c_int = -2; +pub const CTL_CREATE: c_int = -3; +pub const CTL_CREATESYM: c_int = -4; +pub const CTL_DESTROY: c_int = -5; +pub const CTL_MMAP: c_int = -6; +pub const CTL_DESCRIBE: c_int = -7; +pub const CTL_UNSPEC: c_int = 0; +pub const CTL_KERN: c_int = 1; +pub const CTL_VM: c_int = 2; +pub const CTL_VFS: c_int = 3; +pub const CTL_NET: c_int = 4; +pub const CTL_DEBUG: c_int = 5; +pub const CTL_HW: c_int = 6; +pub const CTL_MACHDEP: c_int = 7; +pub const CTL_USER: c_int = 8; +pub const CTL_DDB: c_int = 9; +pub const CTL_PROC: c_int = 10; +pub const CTL_VENDOR: c_int = 11; +pub const CTL_EMUL: c_int = 12; +pub const CTL_SECURITY: c_int = 13; +pub const CTL_MAXID: c_int = 14; +pub const KERN_OSTYPE: c_int = 1; +pub const KERN_OSRELEASE: c_int = 2; +pub const KERN_OSREV: c_int = 3; +pub const KERN_VERSION: c_int = 4; +pub const KERN_MAXVNODES: c_int = 5; +pub const KERN_MAXPROC: c_int = 6; +pub const KERN_MAXFILES: c_int = 7; +pub const KERN_ARGMAX: c_int = 8; +pub const KERN_SECURELVL: c_int = 9; +pub const KERN_HOSTNAME: c_int = 10; +pub const KERN_HOSTID: c_int = 11; +pub const KERN_CLOCKRATE: c_int = 12; +pub const KERN_VNODE: c_int = 13; +pub const KERN_PROC: c_int = 14; +pub const KERN_FILE: c_int = 15; +pub const KERN_PROF: c_int = 16; +pub const KERN_POSIX1: c_int = 17; +pub const KERN_NGROUPS: c_int = 18; +pub const KERN_JOB_CONTROL: c_int = 19; +pub const KERN_SAVED_IDS: c_int = 20; +pub const KERN_OBOOTTIME: c_int = 21; +pub const KERN_DOMAINNAME: c_int = 22; +pub const KERN_MAXPARTITIONS: c_int = 23; +pub const KERN_RAWPARTITION: c_int = 24; +pub const KERN_NTPTIME: c_int = 25; +pub const KERN_TIMEX: c_int = 26; +pub const KERN_AUTONICETIME: c_int = 27; +pub const KERN_AUTONICEVAL: c_int = 28; +pub const KERN_RTC_OFFSET: c_int = 29; +pub const KERN_ROOT_DEVICE: c_int = 30; +pub const KERN_MSGBUFSIZE: c_int = 31; +pub const KERN_FSYNC: c_int = 32; +pub const KERN_OLDSYSVMSG: c_int = 33; +pub const KERN_OLDSYSVSEM: c_int = 34; +pub const KERN_OLDSYSVSHM: c_int = 35; +pub const KERN_OLDSHORTCORENAME: c_int = 36; +pub const KERN_SYNCHRONIZED_IO: c_int = 37; +pub const KERN_IOV_MAX: c_int = 38; +pub const KERN_MBUF: c_int = 39; +pub const KERN_MAPPED_FILES: c_int = 40; +pub const KERN_MEMLOCK: c_int = 41; +pub const KERN_MEMLOCK_RANGE: c_int = 42; +pub const KERN_MEMORY_PROTECTION: c_int = 43; +pub const KERN_LOGIN_NAME_MAX: c_int = 44; +pub const KERN_DEFCORENAME: c_int = 45; +pub const KERN_LOGSIGEXIT: c_int = 46; +pub const KERN_PROC2: c_int = 47; +pub const KERN_PROC_ARGS: c_int = 48; +pub const KERN_FSCALE: c_int = 49; +pub const KERN_CCPU: c_int = 50; +pub const KERN_CP_TIME: c_int = 51; +pub const KERN_OLDSYSVIPC_INFO: c_int = 52; +pub const KERN_MSGBUF: c_int = 53; +pub const KERN_CONSDEV: c_int = 54; +pub const KERN_MAXPTYS: c_int = 55; +pub const KERN_PIPE: c_int = 56; +pub const KERN_MAXPHYS: c_int = 57; +pub const KERN_SBMAX: c_int = 58; +pub const KERN_TKSTAT: c_int = 59; +pub const KERN_MONOTONIC_CLOCK: c_int = 60; +pub const KERN_URND: c_int = 61; +pub const KERN_LABELSECTOR: c_int = 62; +pub const KERN_LABELOFFSET: c_int = 63; +pub const KERN_LWP: c_int = 64; +pub const KERN_FORKFSLEEP: c_int = 65; +pub const KERN_POSIX_THREADS: c_int = 66; +pub const KERN_POSIX_SEMAPHORES: c_int = 67; +pub const KERN_POSIX_BARRIERS: c_int = 68; +pub const KERN_POSIX_TIMERS: c_int = 69; +pub const KERN_POSIX_SPIN_LOCKS: c_int = 70; +pub const KERN_POSIX_READER_WRITER_LOCKS: c_int = 71; +pub const KERN_DUMP_ON_PANIC: c_int = 72; +pub const KERN_SOMAXKVA: c_int = 73; +pub const KERN_ROOT_PARTITION: c_int = 74; +pub const KERN_DRIVERS: c_int = 75; +pub const KERN_BUF: c_int = 76; +pub const KERN_FILE2: c_int = 77; +pub const KERN_VERIEXEC: c_int = 78; +pub const KERN_CP_ID: c_int = 79; +pub const KERN_HARDCLOCK_TICKS: c_int = 80; +pub const KERN_ARND: c_int = 81; +pub const KERN_SYSVIPC: c_int = 82; +pub const KERN_BOOTTIME: c_int = 83; +pub const KERN_EVCNT: c_int = 84; +pub const KERN_MAXID: c_int = 85; +pub const KERN_PROC_ALL: c_int = 0; +pub const KERN_PROC_PID: c_int = 1; +pub const KERN_PROC_PGRP: c_int = 2; +pub const KERN_PROC_SESSION: c_int = 3; +pub const KERN_PROC_TTY: c_int = 4; +pub const KERN_PROC_UID: c_int = 5; +pub const KERN_PROC_RUID: c_int = 6; +pub const KERN_PROC_GID: c_int = 7; +pub const KERN_PROC_RGID: c_int = 8; +pub const KERN_PROC_ARGV: c_int = 1; +pub const KERN_PROC_NARGV: c_int = 2; +pub const KERN_PROC_ENV: c_int = 3; +pub const KERN_PROC_NENV: c_int = 4; +pub const KERN_PROC_PATHNAME: c_int = 5; +pub const VM_PROC: c_int = 16; +pub const VM_PROC_MAP: c_int = 1; + +pub const EAI_AGAIN: c_int = 2; +pub const EAI_BADFLAGS: c_int = 3; +pub const EAI_FAIL: c_int = 4; +pub const EAI_FAMILY: c_int = 5; +pub const EAI_MEMORY: c_int = 6; +pub const EAI_NODATA: c_int = 7; +pub const EAI_NONAME: c_int = 8; +pub const EAI_SERVICE: c_int = 9; +pub const EAI_SOCKTYPE: c_int = 10; +pub const EAI_SYSTEM: c_int = 11; +pub const EAI_OVERFLOW: c_int = 14; + +pub const AIO_CANCELED: c_int = 1; +pub const AIO_NOTCANCELED: c_int = 2; +pub const AIO_ALLDONE: c_int = 3; +pub const LIO_NOP: c_int = 0; +pub const LIO_WRITE: c_int = 1; +pub const LIO_READ: c_int = 2; +pub const LIO_WAIT: c_int = 1; +pub const LIO_NOWAIT: c_int = 0; + +pub const SIGEV_NONE: c_int = 0; +pub const SIGEV_SIGNAL: c_int = 1; +pub const SIGEV_THREAD: c_int = 2; + +pub const WSTOPPED: c_int = 0x00000002; // same as WUNTRACED +pub const WCONTINUED: c_int = 0x00000010; +pub const WEXITED: c_int = 0x000000020; +pub const WNOWAIT: c_int = 0x00010000; + +pub const WALTSIG: c_int = 0x00000004; +pub const WALLSIG: c_int = 0x00000008; +pub const WTRAPPED: c_int = 0x00000040; +pub const WNOZOMBIE: c_int = 0x00020000; + +pub const P_ALL: idtype_t = 0; +pub const P_PID: idtype_t = 1; +pub const P_PGID: idtype_t = 4; + +pub const UTIME_OMIT: c_long = 1073741822; +pub const UTIME_NOW: c_long = 1073741823; + +pub const B460800: crate::speed_t = 460800; +pub const B921600: crate::speed_t = 921600; + +pub const ONOCR: crate::tcflag_t = 0x20; +pub const ONLRET: crate::tcflag_t = 0x40; +pub const CDTRCTS: crate::tcflag_t = 0x00020000; +pub const CHWFLOW: crate::tcflag_t = crate::MDMBUF | crate::CRTSCTS | crate::CDTRCTS; + +// pub const _PATH_UTMPX: &[c_char; 14] = b"/var/run/utmpx"; +// pub const _PATH_WTMPX: &[c_char; 14] = b"/var/log/wtmpx"; +// pub const _PATH_LASTLOGX: &[c_char; 17] = b"/var/log/lastlogx"; +// pub const _PATH_UTMP_UPDATE: &[c_char; 24] = b"/usr/libexec/utmp_update"; +pub const UT_NAMESIZE: usize = 8; +pub const UT_LINESIZE: usize = 8; +pub const UT_HOSTSIZE: usize = 16; +pub const _UTX_USERSIZE: usize = 32; +pub const _UTX_LINESIZE: usize = 32; +pub const _UTX_PADSIZE: usize = 40; +pub const _UTX_IDSIZE: usize = 4; +pub const _UTX_HOSTSIZE: usize = 256; +pub const EMPTY: u16 = 0; +pub const RUN_LVL: u16 = 1; +pub const BOOT_TIME: u16 = 2; +pub const OLD_TIME: u16 = 3; +pub const NEW_TIME: u16 = 4; +pub const INIT_PROCESS: u16 = 5; +pub const LOGIN_PROCESS: u16 = 6; +pub const USER_PROCESS: u16 = 7; +pub const DEAD_PROCESS: u16 = 8; +pub const ACCOUNTING: u16 = 9; +pub const SIGNATURE: u16 = 10; +pub const DOWN_TIME: u16 = 11; + +pub const SOCK_CLOEXEC: c_int = 0x10000000; +pub const SOCK_NONBLOCK: c_int = 0x20000000; + +// Uncomment on next NetBSD release +// pub const FIOSEEKDATA: c_ulong = 0xc0086661; +// pub const FIOSEEKHOLE: c_ulong = 0xc0086662; +pub const OFIOGETBMAP: c_ulong = 0xc004667a; +pub const FIOGETBMAP: c_ulong = 0xc008667a; +pub const FIONWRITE: c_ulong = 0x40046679; +pub const FIONSPACE: c_ulong = 0x40046678; +pub const FIBMAP: c_ulong = 0xc008667a; + +pub const SIGSTKSZ: size_t = 40960; + +pub const REG_ENOSYS: c_int = 17; + +pub const PT_DUMPCORE: c_int = 12; +pub const PT_LWPINFO: c_int = 13; +pub const PT_SYSCALL: c_int = 14; +pub const PT_SYSCALLEMU: c_int = 15; +pub const PT_SET_EVENT_MASK: c_int = 16; +pub const PT_GET_EVENT_MASK: c_int = 17; +pub const PT_GET_PROCESS_STATE: c_int = 18; +pub const PT_SET_SIGINFO: c_int = 19; +pub const PT_GET_SIGINFO: c_int = 20; +pub const PT_RESUME: c_int = 21; +pub const PT_SUSPEND: c_int = 23; +pub const PT_STOP: c_int = 23; +pub const PT_LWPSTATUS: c_int = 24; +pub const PT_LWPNEXT: c_int = 25; +pub const PT_SET_SIGPASS: c_int = 26; +pub const PT_GET_SIGPASS: c_int = 27; +pub const PT_FIRSTMACH: c_int = 32; +pub const POSIX_SPAWN_RETURNERROR: c_int = 0x40; + +// Flags for chflags(2) +pub const SF_APPEND: c_ulong = 0x00040000; +pub const SF_ARCHIVED: c_ulong = 0x00010000; +pub const SF_IMMUTABLE: c_ulong = 0x00020000; +pub const SF_LOG: c_ulong = 0x00400000; +pub const SF_SETTABLE: c_ulong = 0xffff0000; +pub const SF_SNAPINVAL: c_ulong = 0x00800000; +pub const SF_SNAPSHOT: c_ulong = 0x00200000; +pub const UF_APPEND: c_ulong = 0x00000004; +pub const UF_IMMUTABLE: c_ulong = 0x00000002; +pub const UF_NODUMP: c_ulong = 0x00000001; +pub const UF_OPAQUE: c_ulong = 0x00000008; +pub const UF_SETTABLE: c_ulong = 0x0000ffff; + +// sys/sysctl.h +pub const KVME_PROT_READ: c_int = 0x00000001; +pub const KVME_PROT_WRITE: c_int = 0x00000002; +pub const KVME_PROT_EXEC: c_int = 0x00000004; + +pub const KVME_FLAG_COW: c_int = 0x00000001; +pub const KVME_FLAG_NEEDS_COPY: c_int = 0x00000002; +pub const KVME_FLAG_NOCOREDUMP: c_int = 0x000000004; +pub const KVME_FLAG_PAGEABLE: c_int = 0x000000008; +pub const KVME_FLAG_GROWS_UP: c_int = 0x000000010; +pub const KVME_FLAG_GROWS_DOWN: c_int = 0x000000020; + +pub const NGROUPS_MAX: c_int = 16; + +pub const KI_NGROUPS: c_int = 16; +pub const KI_MAXCOMLEN: c_int = 24; +pub const KI_WMESGLEN: c_int = 8; +pub const KI_MAXLOGNAME: c_int = 24; +pub const KI_MAXEMULLEN: c_int = 16; +pub const KI_LNAMELEN: c_int = 20; + +// sys/lwp.h +pub const LSIDL: c_int = 1; +pub const LSRUN: c_int = 2; +pub const LSSLEEP: c_int = 3; +pub const LSSTOP: c_int = 4; +pub const LSZOMB: c_int = 5; +pub const LSONPROC: c_int = 7; +pub const LSSUSPENDED: c_int = 8; + +// sys/xattr.h +pub const XATTR_CREATE: c_int = 0x01; +pub const XATTR_REPLACE: c_int = 0x02; +// sys/extattr.h +pub const EXTATTR_NAMESPACE_EMPTY: c_int = 0; + +// For getrandom() +pub const GRND_NONBLOCK: c_uint = 0x1; +pub const GRND_RANDOM: c_uint = 0x2; +pub const GRND_INSECURE: c_uint = 0x4; + +// sys/reboot.h +pub const RB_ASKNAME: c_int = 0x000000001; +pub const RB_SINGLE: c_int = 0x000000002; +pub const RB_NOSYNC: c_int = 0x000000004; +pub const RB_HALT: c_int = 0x000000008; +pub const RB_INITNAME: c_int = 0x000000010; +pub const RB_KDB: c_int = 0x000000040; +pub const RB_RDONLY: c_int = 0x000000080; +pub const RB_DUMP: c_int = 0x000000100; +pub const RB_MINIROOT: c_int = 0x000000200; +pub const RB_STRING: c_int = 0x000000400; +pub const RB_POWERDOWN: c_int = RB_HALT | 0x000000800; +pub const RB_USERCONF: c_int = 0x000001000; + +pub const fn MAP_ALIGNED(alignment: c_int) -> c_int { + alignment << MAP_ALIGNMENT_SHIFT +} + +// net/route.h +pub const RTF_MASK: c_int = 0x80; +pub const RTF_CONNECTED: c_int = 0x100; +pub const RTF_ANNOUNCE: c_int = 0x20000; +pub const RTF_SRC: c_int = 0x10000; +pub const RTF_LOCAL: c_int = 0x40000; +pub const RTF_BROADCAST: c_int = 0x80000; +pub const RTF_UPDATING: c_int = 0x100000; +pub const RTF_DONTCHANGEIFA: c_int = 0x200000; + +pub const RTM_VERSION: c_int = 4; +pub const RTM_LOCK: c_int = 0x8; +pub const RTM_IFANNOUNCE: c_int = 0x10; +pub const RTM_IEEE80211: c_int = 0x11; +pub const RTM_SETGATE: c_int = 0x12; +pub const RTM_LLINFO_UPD: c_int = 0x13; +pub const RTM_IFINFO: c_int = 0x14; +pub const RTM_OCHGADDR: c_int = 0x15; +pub const RTM_NEWADDR: c_int = 0x16; +pub const RTM_DELADDR: c_int = 0x17; +pub const RTM_CHGADDR: c_int = 0x18; + +pub const RTA_TAG: c_int = 0x100; + +pub const RTAX_TAG: c_int = 8; +pub const RTAX_MAX: c_int = 9; + +// sys/timerfd.h +pub const TFD_CLOEXEC: i32 = crate::O_CLOEXEC; +pub const TFD_NONBLOCK: i32 = crate::O_NONBLOCK; +pub const TFD_TIMER_ABSTIME: i32 = crate::O_WRONLY; +pub const TFD_TIMER_CANCEL_ON_SET: i32 = crate::O_RDWR; + +const fn _ALIGN(p: usize) -> usize { + (p + _ALIGNBYTES) & !_ALIGNBYTES +} + +f! { + pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { + (cmsg as *mut c_uchar).add(_ALIGN(size_of::())) + } + + pub const fn CMSG_LEN(length: c_uint) -> c_uint { + _ALIGN(size_of::()) as c_uint + length + } + + pub fn CMSG_NXTHDR(mhdr: *const crate::msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + if cmsg.is_null() { + return crate::CMSG_FIRSTHDR(mhdr); + } + let next = cmsg as usize + _ALIGN((*cmsg).cmsg_len as usize) + _ALIGN(size_of::()); + let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; + if next > max { + core::ptr::null_mut::() + } else { + (cmsg as usize + _ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr + } + } + + pub const fn CMSG_SPACE(length: c_uint) -> c_uint { + (_ALIGN(size_of::()) + _ALIGN(length as usize)) as c_uint + } + + // dirfd() is a macro on netbsd to access + // the first field of the struct where dirp points to: + // http://cvsweb.netbsd.org/bsdweb.cgi/src/include/dirent.h?rev=1.36 + pub fn dirfd(dirp: *mut crate::DIR) -> c_int { + *(dirp as *const c_int) + } + + pub fn SOCKCREDSIZE(ngrps: usize) -> usize { + let ngrps = if ngrps > 0 { ngrps - 1 } else { 0 }; + size_of::() + size_of::() * ngrps + } + + pub fn PROT_MPROTECT(x: c_int) -> c_int { + x << 3 + } + + pub fn PROT_MPROTECT_EXTRACT(x: c_int) -> c_int { + (x >> 3) & 0x7 + } +} + +safe_f! { + pub const fn WSTOPSIG(status: c_int) -> c_int { + status >> 8 + } + + pub const fn WIFSIGNALED(status: c_int) -> bool { + (status & 0o177) != 0o177 && (status & 0o177) != 0 + } + + pub const fn WIFSTOPPED(status: c_int) -> bool { + (status & 0o177) == 0o177 + } + + pub const fn WIFCONTINUED(status: c_int) -> bool { + status == 0xffff + } + + pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { + let major = major as crate::dev_t; + let minor = minor as crate::dev_t; + let mut dev = 0; + dev |= (major << 8) & 0x000ff00; + dev |= (minor << 12) & 0xfff00000; + dev |= minor & 0xff; + dev + } + + pub const fn major(dev: crate::dev_t) -> c_int { + (((dev as u32) & 0x000fff00) >> 8) as c_int + } + + pub const fn minor(dev: crate::dev_t) -> c_int { + let mut res = 0; + res |= ((dev as u32) & 0xfff00000) >> 12; + res |= (dev as u32) & 0x000000ff; + res as c_int + } +} + +extern "C" { + pub fn ntp_adjtime(buf: *mut timex) -> c_int; + pub fn ntp_gettime(buf: *mut ntptimeval) -> c_int; + pub fn clock_nanosleep( + clk_id: crate::clockid_t, + flags: c_int, + rqtp: *const crate::timespec, + rmtp: *mut crate::timespec, + ) -> c_int; + + pub fn reallocarr(ptr: *mut c_void, number: size_t, size: size_t) -> c_int; + + pub fn chflags(path: *const c_char, flags: c_ulong) -> c_int; + pub fn fchflags(fd: c_int, flags: c_ulong) -> c_int; + pub fn lchflags(path: *const c_char, flags: c_ulong) -> c_int; + + pub fn extattr_list_fd( + fd: c_int, + attrnamespace: c_int, + data: *mut c_void, + nbytes: size_t, + ) -> ssize_t; + pub fn extattr_list_file( + path: *const c_char, + attrnamespace: c_int, + data: *mut c_void, + nbytes: size_t, + ) -> ssize_t; + pub fn extattr_list_link( + path: *const c_char, + attrnamespace: c_int, + data: *mut c_void, + nbytes: size_t, + ) -> ssize_t; + pub fn extattr_delete_fd(fd: c_int, attrnamespace: c_int, attrname: *const c_char) -> c_int; + pub fn extattr_delete_file( + path: *const c_char, + attrnamespace: c_int, + attrname: *const c_char, + ) -> c_int; + pub fn extattr_delete_link( + path: *const c_char, + attrnamespace: c_int, + attrname: *const c_char, + ) -> c_int; + pub fn extattr_get_fd( + fd: c_int, + attrnamespace: c_int, + attrname: *const c_char, + data: *mut c_void, + nbytes: size_t, + ) -> ssize_t; + pub fn extattr_get_file( + path: *const c_char, + attrnamespace: c_int, + attrname: *const c_char, + data: *mut c_void, + nbytes: size_t, + ) -> ssize_t; + pub fn extattr_get_link( + path: *const c_char, + attrnamespace: c_int, + attrname: *const c_char, + data: *mut c_void, + nbytes: size_t, + ) -> ssize_t; + pub fn extattr_namespace_to_string(attrnamespace: c_int, string: *mut *mut c_char) -> c_int; + pub fn extattr_set_fd( + fd: c_int, + attrnamespace: c_int, + attrname: *const c_char, + data: *const c_void, + nbytes: size_t, + ) -> c_int; + pub fn extattr_set_file( + path: *const c_char, + attrnamespace: c_int, + attrname: *const c_char, + data: *const c_void, + nbytes: size_t, + ) -> c_int; + pub fn extattr_set_link( + path: *const c_char, + attrnamespace: c_int, + attrname: *const c_char, + data: *const c_void, + nbytes: size_t, + ) -> c_int; + pub fn extattr_string_to_namespace(string: *const c_char, attrnamespace: *mut c_int) -> c_int; + + pub fn openpty( + amaster: *mut c_int, + aslave: *mut c_int, + name: *mut c_char, + termp: *mut crate::termios, + winp: *mut crate::winsize, + ) -> c_int; + pub fn forkpty( + amaster: *mut c_int, + name: *mut c_char, + termp: *mut crate::termios, + winp: *mut crate::winsize, + ) -> crate::pid_t; + + pub fn ptsname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + + #[link_name = "__lutimes50"] + pub fn lutimes(file: *const c_char, times: *const crate::timeval) -> c_int; + #[link_name = "__gettimeofday50"] + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; + pub fn getnameinfo( + sa: *const crate::sockaddr, + salen: crate::socklen_t, + host: *mut c_char, + hostlen: crate::socklen_t, + serv: *mut c_char, + servlen: crate::socklen_t, + flags: c_int, + ) -> c_int; + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn sysctl( + name: *const c_int, + namelen: c_uint, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *const c_void, + newlen: size_t, + ) -> c_int; + pub fn sysctlbyname( + name: *const c_char, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *const c_void, + newlen: size_t, + ) -> c_int; + pub fn sysctlnametomib(sname: *const c_char, name: *mut c_int, namelenp: *mut size_t) -> c_int; + #[link_name = "__kevent50"] + pub fn kevent( + kq: c_int, + changelist: *const crate::kevent, + nchanges: size_t, + eventlist: *mut crate::kevent, + nevents: size_t, + timeout: *const crate::timespec, + ) -> c_int; + #[link_name = "__mount50"] + pub fn mount( + src: *const c_char, + target: *const c_char, + flags: c_int, + data: *mut c_void, + size: size_t, + ) -> c_int; + pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> crate::mqd_t; + pub fn mq_close(mqd: crate::mqd_t) -> c_int; + pub fn mq_getattr(mqd: crate::mqd_t, attr: *mut crate::mq_attr) -> c_int; + pub fn mq_notify(mqd: crate::mqd_t, notification: *const crate::sigevent) -> c_int; + pub fn mq_receive( + mqd: crate::mqd_t, + msg_ptr: *mut c_char, + msg_len: size_t, + msg_prio: *mut c_uint, + ) -> ssize_t; + pub fn mq_send( + mqd: crate::mqd_t, + msg_ptr: *const c_char, + msg_len: size_t, + msg_prio: c_uint, + ) -> c_int; + pub fn mq_setattr( + mqd: crate::mqd_t, + newattr: *const crate::mq_attr, + oldattr: *mut crate::mq_attr, + ) -> c_int; + #[link_name = "__mq_timedreceive50"] + pub fn mq_timedreceive( + mqd: crate::mqd_t, + msg_ptr: *mut c_char, + msg_len: size_t, + msg_prio: *mut c_uint, + abs_timeout: *const crate::timespec, + ) -> ssize_t; + #[link_name = "__mq_timedsend50"] + pub fn mq_timedsend( + mqd: crate::mqd_t, + msg_ptr: *const c_char, + msg_len: size_t, + msg_prio: c_uint, + abs_timeout: *const crate::timespec, + ) -> c_int; + pub fn mq_unlink(name: *const c_char) -> c_int; + pub fn ptrace(request: c_int, pid: crate::pid_t, addr: *mut c_void, data: c_int) -> c_int; + pub fn utrace(label: *const c_char, addr: *mut c_void, len: size_t) -> c_int; + pub fn pthread_getname_np(t: crate::pthread_t, name: *mut c_char, len: size_t) -> c_int; + pub fn pthread_setname_np( + t: crate::pthread_t, + name: *const c_char, + arg: *const c_void, + ) -> c_int; + pub fn pthread_attr_get_np(thread: crate::pthread_t, attr: *mut crate::pthread_attr_t) + -> c_int; + pub fn pthread_getattr_np(native: crate::pthread_t, attr: *mut crate::pthread_attr_t) -> c_int; + pub fn pthread_attr_getguardsize( + attr: *const crate::pthread_attr_t, + guardsize: *mut size_t, + ) -> c_int; + pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; + pub fn pthread_attr_getstack( + attr: *const crate::pthread_attr_t, + stackaddr: *mut *mut c_void, + stacksize: *mut size_t, + ) -> c_int; + pub fn pthread_getaffinity_np( + thread: crate::pthread_t, + size: size_t, + set: *mut cpuset_t, + ) -> c_int; + pub fn pthread_setaffinity_np( + thread: crate::pthread_t, + size: size_t, + set: *mut cpuset_t, + ) -> c_int; + + pub fn _cpuset_create() -> *mut cpuset_t; + pub fn _cpuset_destroy(set: *mut cpuset_t); + pub fn _cpuset_clr(cpu: cpuid_t, set: *mut cpuset_t) -> c_int; + pub fn _cpuset_set(cpu: cpuid_t, set: *mut cpuset_t) -> c_int; + pub fn _cpuset_isset(cpu: cpuid_t, set: *const cpuset_t) -> c_int; + pub fn _cpuset_size(set: *const cpuset_t) -> size_t; + pub fn _cpuset_zero(set: *mut cpuset_t); + #[link_name = "__sigtimedwait50"] + pub fn sigtimedwait( + set: *const sigset_t, + info: *mut siginfo_t, + timeout: *const crate::timespec, + ) -> c_int; + pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; + + pub fn duplocale(base: crate::locale_t) -> crate::locale_t; + pub fn freelocale(loc: crate::locale_t); + pub fn localeconv_l(loc: crate::locale_t) -> *mut lconv; + pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; + #[link_name = "__settimeofday50"] + pub fn settimeofday(tv: *const crate::timeval, tz: *const c_void) -> c_int; + + pub fn dup3(src: c_int, dst: c_int, flags: c_int) -> c_int; + + pub fn kqueue1(flags: c_int) -> c_int; + + pub fn _lwp_self() -> lwpid_t; + pub fn memmem( + haystack: *const c_void, + haystacklen: size_t, + needle: *const c_void, + needlelen: size_t, + ) -> *mut c_void; + + // link.h + + pub fn dl_iterate_phdr( + callback: Option< + unsafe extern "C" fn(info: *mut dl_phdr_info, size: usize, data: *mut c_void) -> c_int, + >, + data: *mut c_void, + ) -> c_int; + + // dlfcn.h + + pub fn _dlauxinfo() -> *mut c_void; + + pub fn iconv_open(tocode: *const c_char, fromcode: *const c_char) -> iconv_t; + pub fn iconv( + cd: iconv_t, + inbuf: *mut *mut c_char, + inbytesleft: *mut size_t, + outbuf: *mut *mut c_char, + outbytesleft: *mut size_t, + ) -> size_t; + pub fn iconv_close(cd: iconv_t) -> c_int; + + pub fn timer_create( + clockid: crate::clockid_t, + sevp: *mut crate::sigevent, + timerid: *mut crate::timer_t, + ) -> c_int; + pub fn timer_delete(timerid: crate::timer_t) -> c_int; + pub fn timer_getoverrun(timerid: crate::timer_t) -> c_int; + pub fn timer_gettime(timerid: crate::timer_t, curr_value: *mut crate::itimerspec) -> c_int; + pub fn timer_settime( + timerid: crate::timer_t, + flags: c_int, + new_value: *const crate::itimerspec, + old_value: *mut crate::itimerspec, + ) -> c_int; + pub fn dlvsym( + handle: *mut c_void, + symbol: *const c_char, + version: *const c_char, + ) -> *mut c_void; + + // Added in `NetBSD` 7.0 + pub fn explicit_memset(b: *mut c_void, c: c_int, len: size_t); + pub fn consttime_memequal(a: *const c_void, b: *const c_void, len: size_t) -> c_int; + + pub fn setproctitle(fmt: *const c_char, ...); + pub fn mremap( + oldp: *mut c_void, + oldsize: size_t, + newp: *mut c_void, + newsize: size_t, + flags: c_int, + ) -> *mut c_void; + + pub fn sched_rr_get_interval(pid: crate::pid_t, t: *mut crate::timespec) -> c_int; + pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; + pub fn sched_getparam(pid: crate::pid_t, param: *mut crate::sched_param) -> c_int; + pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; + pub fn sched_setscheduler( + pid: crate::pid_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + + #[link_name = "__pollts50"] + pub fn pollts( + fds: *mut crate::pollfd, + nfds: crate::nfds_t, + ts: *const crate::timespec, + sigmask: *const crate::sigset_t, + ) -> c_int; + pub fn ppoll( + fds: *mut crate::pollfd, + nfds: crate::nfds_t, + ts: *const crate::timespec, + sigmask: *const crate::sigset_t, + ) -> c_int; + pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; + + pub fn reboot(mode: c_int, bootstr: *mut c_char) -> c_int; + + #[link_name = "___lwp_park60"] + pub fn _lwp_park( + clock: crate::clockid_t, + flags: c_int, + ts: *const crate::timespec, + unpark: crate::lwpid_t, + hint: *const c_void, + unparkhint: *mut c_void, + ) -> c_int; + pub fn _lwp_unpark(lwp: crate::lwpid_t, hint: *const c_void) -> c_int; + pub fn _lwp_unpark_all( + targets: *const crate::lwpid_t, + ntargets: size_t, + hint: *const c_void, + ) -> c_int; + #[link_name = "__getmntinfo13"] + pub fn getmntinfo(mntbufp: *mut *mut crate::statvfs, flags: c_int) -> c_int; + pub fn getvfsstat(buf: *mut statvfs, bufsize: size_t, flags: c_int) -> c_int; + + // Added in `NetBSD` 10.0 + pub fn timerfd_create(clockid: crate::clockid_t, flags: c_int) -> c_int; + pub fn timerfd_gettime(fd: c_int, curr_value: *mut itimerspec) -> c_int; + pub fn timerfd_settime( + fd: c_int, + flags: c_int, + new_value: *const itimerspec, + old_value: *mut itimerspec, + ) -> c_int; + + pub fn qsort_r( + base: *mut c_void, + num: size_t, + size: size_t, + compar: Option c_int>, + arg: *mut c_void, + ); +} + +#[link(name = "rt")] +extern "C" { + pub fn aio_read(aiocbp: *mut aiocb) -> c_int; + pub fn aio_write(aiocbp: *mut aiocb) -> c_int; + pub fn aio_fsync(op: c_int, aiocbp: *mut aiocb) -> c_int; + pub fn aio_error(aiocbp: *const aiocb) -> c_int; + pub fn aio_return(aiocbp: *mut aiocb) -> ssize_t; + #[link_name = "__aio_suspend50"] + pub fn aio_suspend( + aiocb_list: *const *const aiocb, + nitems: c_int, + timeout: *const crate::timespec, + ) -> c_int; + pub fn aio_cancel(fd: c_int, aiocbp: *mut aiocb) -> c_int; + pub fn lio_listio( + mode: c_int, + aiocb_list: *const *mut aiocb, + nitems: c_int, + sevp: *mut sigevent, + ) -> c_int; +} + +#[link(name = "util")] +extern "C" { + #[cfg_attr(target_os = "netbsd", link_name = "__getpwent_r50")] + pub fn getpwent_r( + pwd: *mut crate::passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::passwd, + ) -> c_int; + pub fn getgrent_r( + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + + pub fn updwtmpx(file: *const c_char, ut: *const utmpx) -> c_int; + pub fn getlastlogx(fname: *const c_char, uid: crate::uid_t, ll: *mut lastlogx) + -> *mut lastlogx; + pub fn updlastlogx(fname: *const c_char, uid: crate::uid_t, ll: *mut lastlogx) -> c_int; + pub fn utmpxname(file: *const c_char) -> c_int; + pub fn getutxent() -> *mut utmpx; + pub fn getutxid(ut: *const utmpx) -> *mut utmpx; + pub fn getutxline(ut: *const utmpx) -> *mut utmpx; + pub fn pututxline(ut: *const utmpx) -> *mut utmpx; + pub fn setutxent(); + pub fn endutxent(); + + pub fn getutmp(ux: *const utmpx, u: *mut utmp); + pub fn getutmpx(u: *const utmp, ux: *mut utmpx); + + pub fn utpname(file: *const c_char) -> c_int; + pub fn setutent(); + pub fn endutent(); + pub fn getutent() -> *mut utmp; + + pub fn efopen(p: *const c_char, m: *const c_char) -> crate::FILE; + pub fn emalloc(n: size_t) -> *mut c_void; + pub fn ecalloc(n: size_t, c: size_t) -> *mut c_void; + pub fn erealloc(p: *mut c_void, n: size_t) -> *mut c_void; + pub fn ereallocarr(p: *mut c_void, n: size_t, s: size_t); + pub fn estrdup(s: *const c_char) -> *mut c_char; + pub fn estrndup(s: *const c_char, len: size_t) -> *mut c_char; + pub fn estrlcpy(dst: *mut c_char, src: *const c_char, len: size_t) -> size_t; + pub fn estrlcat(dst: *mut c_char, src: *const c_char, len: size_t) -> size_t; + pub fn estrtoi( + nptr: *const c_char, + base: c_int, + lo: crate::intmax_t, + hi: crate::intmax_t, + ) -> crate::intmax_t; + pub fn estrtou( + nptr: *const c_char, + base: c_int, + lo: crate::uintmax_t, + hi: crate::uintmax_t, + ) -> crate::uintmax_t; + pub fn easprintf(string: *mut *mut c_char, fmt: *const c_char, ...) -> c_int; + pub fn evasprintf(string: *mut *mut c_char, fmt: *const c_char, ...) -> c_int; + pub fn esetfunc( + cb: Option, + ) -> Option; + pub fn secure_path(path: *const c_char) -> c_int; + pub fn snprintb(buf: *mut c_char, buflen: size_t, fmt: *const c_char, val: u64) -> c_int; + pub fn snprintb_m( + buf: *mut c_char, + buflen: size_t, + fmt: *const c_char, + val: u64, + max: size_t, + ) -> c_int; + + pub fn getbootfile() -> *const c_char; + pub fn getbyteorder() -> c_int; + pub fn getdiskrawname(buf: *mut c_char, buflen: size_t, name: *const c_char) -> *const c_char; + pub fn getdiskcookedname( + buf: *mut c_char, + buflen: size_t, + name: *const c_char, + ) -> *const c_char; + pub fn getfsspecname(buf: *mut c_char, buflen: size_t, spec: *const c_char) -> *const c_char; + + pub fn strpct( + buf: *mut c_char, + bufsiz: size_t, + numerator: crate::uintmax_t, + denominator: crate::uintmax_t, + precision: size_t, + ) -> *mut c_char; + pub fn strspct( + buf: *mut c_char, + bufsiz: size_t, + numerator: crate::intmax_t, + denominator: crate::intmax_t, + precision: size_t, + ) -> *mut c_char; + #[link_name = "__login50"] + pub fn login(ut: *const utmp); + #[link_name = "__loginx50"] + pub fn loginx(ut: *const utmpx); + pub fn logout(line: *const c_char); + pub fn logoutx(line: *const c_char, status: c_int, tpe: c_int); + pub fn logwtmp(line: *const c_char, name: *const c_char, host: *const c_char); + pub fn logwtmpx( + line: *const c_char, + name: *const c_char, + host: *const c_char, + status: c_int, + tpe: c_int, + ); + + pub fn getxattr( + path: *const c_char, + name: *const c_char, + value: *mut c_void, + size: size_t, + ) -> ssize_t; + pub fn lgetxattr( + path: *const c_char, + name: *const c_char, + value: *mut c_void, + size: size_t, + ) -> ssize_t; + pub fn fgetxattr( + filedes: c_int, + name: *const c_char, + value: *mut c_void, + size: size_t, + ) -> ssize_t; + pub fn setxattr( + path: *const c_char, + name: *const c_char, + value: *const c_void, + size: size_t, + ) -> c_int; + pub fn lsetxattr( + path: *const c_char, + name: *const c_char, + value: *const c_void, + size: size_t, + ) -> c_int; + pub fn fsetxattr( + filedes: c_int, + name: *const c_char, + value: *const c_void, + size: size_t, + flags: c_int, + ) -> c_int; + pub fn listxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; + pub fn llistxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; + pub fn flistxattr(filedes: c_int, list: *mut c_char, size: size_t) -> ssize_t; + pub fn removexattr(path: *const c_char, name: *const c_char) -> c_int; + pub fn lremovexattr(path: *const c_char, name: *const c_char) -> c_int; + pub fn fremovexattr(fd: c_int, path: *const c_char, name: *const c_char) -> c_int; + + pub fn string_to_flags( + string_p: *mut *mut c_char, + setp: *mut c_ulong, + clrp: *mut c_ulong, + ) -> c_int; + pub fn flags_to_string(flags: c_ulong, def: *const c_char) -> c_int; + + pub fn kinfo_getvmmap(pid: crate::pid_t, cntp: *mut size_t) -> *mut kinfo_vmentry; +} + +#[link(name = "execinfo")] +extern "C" { + pub fn backtrace(addrlist: *mut *mut c_void, len: size_t) -> size_t; + pub fn backtrace_symbols(addrlist: *const *mut c_void, len: size_t) -> *mut *mut c_char; + pub fn backtrace_symbols_fd(addrlist: *const *mut c_void, len: size_t, fd: c_int) -> c_int; + pub fn backtrace_symbols_fmt( + addrlist: *const *mut c_void, + len: size_t, + fmt: *const c_char, + ) -> *mut *mut c_char; + pub fn backtrace_symbols_fd_fmt( + addrlist: *const *mut c_void, + len: size_t, + fd: c_int, + fmt: *const c_char, + ) -> c_int; +} + +cfg_if! { + if #[cfg(target_arch = "aarch64")] { + mod aarch64; + pub use self::aarch64::*; + } else if #[cfg(target_arch = "arm")] { + mod arm; + pub use self::arm::*; + } else if #[cfg(target_arch = "powerpc")] { + mod powerpc; + pub use self::powerpc::*; + } else if #[cfg(target_arch = "sparc64")] { + mod sparc64; + pub use self::sparc64::*; + } else if #[cfg(target_arch = "x86_64")] { + mod x86_64; + pub use self::x86_64::*; + } else if #[cfg(target_arch = "x86")] { + mod x86; + pub use self::x86::*; + } else if #[cfg(target_arch = "mips")] { + mod mips; + pub use self::mips::*; + } else if #[cfg(target_arch = "riscv64")] { + mod riscv64; + pub use self::riscv64::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/powerpc.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/powerpc.rs new file mode 100644 index 00000000000000..f8f2d56c0d3742 --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/powerpc.rs @@ -0,0 +1,10 @@ +use crate::prelude::*; +use crate::PT_FIRSTMACH; + +pub type __cpu_simple_lock_nv_t = c_int; + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const PT_STEP: c_int = PT_FIRSTMACH + 0; +pub const PT_GETREGS: c_int = PT_FIRSTMACH + 1; +pub const PT_SETREGS: c_int = PT_FIRSTMACH + 2; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/riscv64.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/riscv64.rs new file mode 100644 index 00000000000000..47240cb2818c0b --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/riscv64.rs @@ -0,0 +1,77 @@ +use PT_FIRSTMACH; + +use crate::prelude::*; + +pub type __greg_t = u64; +pub type __cpu_simple_lock_nv_t = c_int; +pub type __gregset = [__greg_t; _NGREG]; +pub type __fregset = [__freg; _NFREG]; + +s! { + pub struct mcontext_t { + pub __gregs: __gregset, + pub __fregs: __fpregset, + __spare: [crate::__greg_t; 7], + } +} + +s_no_extra_traits! { + pub union __fpreg { + pub u_u64: u64, + pub u_d: c_double, + } +} + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const PT_GETREGS: c_int = PT_FIRSTMACH + 0; +pub const PT_SETREGS: c_int = PT_FIRSTMACH + 1; +pub const PT_GETFPREGS: c_int = PT_FIRSTMACH + 2; +pub const PT_SETFPREGS: c_int = PT_FIRSTMACH + 3; + +pub const _NGREG: usize = 32; +pub const _NFREG: usize = 33; + +pub const _REG_X1: c_int = 0; +pub const _REG_X2: c_int = 1; +pub const _REG_X3: c_int = 2; +pub const _REG_X4: c_int = 3; +pub const _REG_X5: c_int = 4; +pub const _REG_X6: c_int = 5; +pub const _REG_X7: c_int = 6; +pub const _REG_X8: c_int = 7; +pub const _REG_X9: c_int = 8; +pub const _REG_X10: c_int = 9; +pub const _REG_X11: c_int = 10; +pub const _REG_X12: c_int = 11; +pub const _REG_X13: c_int = 12; +pub const _REG_X14: c_int = 13; +pub const _REG_X15: c_int = 14; +pub const _REG_X16: c_int = 15; +pub const _REG_X17: c_int = 16; +pub const _REG_X18: c_int = 17; +pub const _REG_X19: c_int = 18; +pub const _REG_X20: c_int = 19; +pub const _REG_X21: c_int = 20; +pub const _REG_X22: c_int = 21; +pub const _REG_X23: c_int = 22; +pub const _REG_X24: c_int = 23; +pub const _REG_X25: c_int = 24; +pub const _REG_X26: c_int = 25; +pub const _REG_X27: c_int = 26; +pub const _REG_X28: c_int = 27; +pub const _REG_X29: c_int = 28; +pub const _REG_X30: c_int = 29; +pub const _REG_X31: c_int = 30; +pub const _REG_PC: c_int = 31; + +pub const _REG_RA: c_int = _REG_X1; +pub const _REG_SP: c_int = _REG_X2; +pub const _REG_GP: c_int = _REG_X3; +pub const _REG_TP: c_int = _REG_X4; +pub const _REG_S0: c_int = _REG_X8; +pub const _REG_RV: c_int = _REG_X10; +pub const _REG_A0: c_int = _REG_X10; + +pub const _REG_F0: c_int = 0; +pub const _REG_FPCSR: c_int = 32; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/sparc64.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/sparc64.rs new file mode 100644 index 00000000000000..91622f7eea3fab --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/sparc64.rs @@ -0,0 +1,7 @@ +use crate::prelude::*; + +pub type __cpu_simple_lock_nv_t = c_uchar; + +// should be pub(crate), but that requires Rust 1.18.0 +#[doc(hidden)] +pub const _ALIGNBYTES: usize = 0xf; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86.rs new file mode 100644 index 00000000000000..95f55768973ca3 --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86.rs @@ -0,0 +1,5 @@ +use crate::prelude::*; + +pub type __cpu_simple_lock_nv_t = c_uchar; + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86_64.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86_64.rs new file mode 100644 index 00000000000000..77daa4b1e9eb28 --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86_64.rs @@ -0,0 +1,56 @@ +use crate::prelude::*; +use crate::PT_FIRSTMACH; + +pub type c___greg_t = u64; +pub type __cpu_simple_lock_nv_t = c_uchar; + +s! { + pub struct mcontext_t { + pub __gregs: [c___greg_t; 26], + pub _mc_tlsbase: c___greg_t, + pub __fpregs: [[c_char; 32]; 16], + } + + pub struct ucontext_t { + pub uc_flags: c_uint, + pub uc_link: *mut crate::ucontext_t, + pub uc_sigmask: crate::sigset_t, + pub uc_stack: crate::stack_t, + pub uc_mcontext: crate::mcontext_t, + } +} + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const PT_STEP: c_int = PT_FIRSTMACH + 0; +pub const PT_GETREGS: c_int = PT_FIRSTMACH + 1; +pub const PT_SETREGS: c_int = PT_FIRSTMACH + 2; +pub const PT_GETFPREGS: c_int = PT_FIRSTMACH + 3; +pub const PT_SETFPREGS: c_int = PT_FIRSTMACH + 4; + +pub const _REG_RDI: c_int = 0; +pub const _REG_RSI: c_int = 1; +pub const _REG_RDX: c_int = 2; +pub const _REG_RCX: c_int = 3; +pub const _REG_R8: c_int = 4; +pub const _REG_R9: c_int = 5; +pub const _REG_R10: c_int = 6; +pub const _REG_R11: c_int = 7; +pub const _REG_R12: c_int = 8; +pub const _REG_R13: c_int = 9; +pub const _REG_R14: c_int = 10; +pub const _REG_R15: c_int = 11; +pub const _REG_RBP: c_int = 12; +pub const _REG_RBX: c_int = 13; +pub const _REG_RAX: c_int = 14; +pub const _REG_GS: c_int = 15; +pub const _REG_FS: c_int = 16; +pub const _REG_ES: c_int = 17; +pub const _REG_DS: c_int = 18; +pub const _REG_TRAPNO: c_int = 19; +pub const _REG_ERR: c_int = 20; +pub const _REG_RIP: c_int = 21; +pub const _REG_CS: c_int = 22; +pub const _REG_RFLAGS: c_int = 23; +pub const _REG_RSP: c_int = 24; +pub const _REG_SS: c_int = 25; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/aarch64.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/aarch64.rs new file mode 100644 index 00000000000000..e0d347fb5e6b87 --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/aarch64.rs @@ -0,0 +1,20 @@ +use crate::prelude::*; + +pub type ucontext_t = sigcontext; + +s! { + pub struct sigcontext { + __sc_unused: c_int, + pub sc_mask: c_int, + pub sc_sp: c_ulong, + pub sc_lr: c_ulong, + pub sc_elr: c_ulong, + pub sc_spsr: c_ulong, + pub sc_x: [c_ulong; 30], + pub sc_cookie: c_long, + } +} + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const _MAX_PAGE_SHIFT: u32 = 12; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/arm.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/arm.rs new file mode 100644 index 00000000000000..8b3f72139d86e9 --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/arm.rs @@ -0,0 +1,5 @@ +use crate::prelude::*; + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const _MAX_PAGE_SHIFT: u32 = 12; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mips64.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mips64.rs new file mode 100644 index 00000000000000..162ceda265df91 --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mips64.rs @@ -0,0 +1,4 @@ +#[doc(hidden)] +pub const _ALIGNBYTES: usize = 7; + +pub const _MAX_PAGE_SHIFT: u32 = 14; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mod.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mod.rs new file mode 100644 index 00000000000000..b28f4557f52187 --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mod.rs @@ -0,0 +1,2149 @@ +use crate::prelude::*; +use crate::unix::bsd::O_SYNC; +use crate::{cmsghdr, off_t}; + +pub type clock_t = i64; +pub type suseconds_t = c_long; +pub type dev_t = i32; +pub type sigset_t = c_uint; +pub type blksize_t = i32; +pub type fsblkcnt_t = u64; +pub type fsfilcnt_t = u64; +pub type idtype_t = c_uint; +pub type pthread_attr_t = *mut c_void; +pub type pthread_mutex_t = *mut c_void; +pub type pthread_mutexattr_t = *mut c_void; +pub type pthread_cond_t = *mut c_void; +pub type pthread_condattr_t = *mut c_void; +pub type pthread_rwlock_t = *mut c_void; +pub type pthread_rwlockattr_t = *mut c_void; +pub type pthread_spinlock_t = crate::uintptr_t; +pub type caddr_t = *mut c_char; + +// elf.h + +pub type Elf32_Addr = u32; +pub type Elf32_Half = u16; +pub type Elf32_Lword = u64; +pub type Elf32_Off = u32; +pub type Elf32_Sword = i32; +pub type Elf32_Word = u32; + +pub type Elf64_Addr = u64; +pub type Elf64_Half = u16; +pub type Elf64_Lword = u64; +pub type Elf64_Off = u64; +pub type Elf64_Sword = i32; +pub type Elf64_Sxword = i64; +pub type Elf64_Word = u32; +pub type Elf64_Xword = u64; + +// search.h + +pub type ENTRY = entry; +pub type ACTION = c_uint; + +// spawn.h +pub type posix_spawnattr_t = *mut c_void; +pub type posix_spawn_file_actions_t = *mut c_void; + +cfg_if! { + if #[cfg(target_pointer_width = "64")] { + type Elf_Addr = Elf64_Addr; + type Elf_Half = Elf64_Half; + type Elf_Phdr = Elf64_Phdr; + } else if #[cfg(target_pointer_width = "32")] { + type Elf_Addr = Elf32_Addr; + type Elf_Half = Elf32_Half; + type Elf_Phdr = Elf32_Phdr; + } +} + +s! { + pub struct ip_mreqn { + pub imr_multiaddr: in_addr, + pub imr_address: in_addr, + pub imr_ifindex: c_int, + } + + pub struct glob_t { + pub gl_pathc: size_t, + pub gl_matchc: size_t, + pub gl_offs: size_t, + pub gl_flags: c_int, + pub gl_pathv: *mut *mut c_char, + __unused1: *mut c_void, + __unused2: *mut c_void, + __unused3: *mut c_void, + __unused4: *mut c_void, + __unused5: *mut c_void, + __unused6: *mut c_void, + __unused7: *mut c_void, + } + + pub struct lconv { + pub decimal_point: *mut c_char, + pub thousands_sep: *mut c_char, + pub grouping: *mut c_char, + pub int_curr_symbol: *mut c_char, + pub currency_symbol: *mut c_char, + pub mon_decimal_point: *mut c_char, + pub mon_thousands_sep: *mut c_char, + pub mon_grouping: *mut c_char, + pub positive_sign: *mut c_char, + pub negative_sign: *mut c_char, + pub int_frac_digits: c_char, + pub frac_digits: c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub p_sign_posn: c_char, + pub n_sign_posn: c_char, + pub int_p_cs_precedes: c_char, + pub int_p_sep_by_space: c_char, + pub int_n_cs_precedes: c_char, + pub int_n_sep_by_space: c_char, + pub int_p_sign_posn: c_char, + pub int_n_sign_posn: c_char, + } + + pub struct ufs_args { + pub fspec: *mut c_char, + pub export_info: export_args, + } + + pub struct mfs_args { + pub fspec: *mut c_char, + pub export_info: export_args, + // https://github.com/openbsd/src/blob/HEAD/sys/sys/types.h#L134 + pub base: *mut c_char, + pub size: c_ulong, + } + + pub struct iso_args { + pub fspec: *mut c_char, + pub export_info: export_args, + pub flags: c_int, + pub sess: c_int, + } + + pub struct nfs_args { + pub version: c_int, + pub addr: *mut crate::sockaddr, + pub addrlen: c_int, + pub sotype: c_int, + pub proto: c_int, + pub fh: *mut c_uchar, + pub fhsize: c_int, + pub flags: c_int, + pub wsize: c_int, + pub rsize: c_int, + pub readdirsize: c_int, + pub timeo: c_int, + pub retrans: c_int, + pub maxgrouplist: c_int, + pub readahead: c_int, + pub leaseterm: c_int, + pub deadthresh: c_int, + pub hostname: *mut c_char, + pub acregmin: c_int, + pub acregmax: c_int, + pub acdirmin: c_int, + pub acdirmax: c_int, + } + + pub struct msdosfs_args { + pub fspec: *mut c_char, + pub export_info: export_args, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub mask: crate::mode_t, + pub flags: c_int, + } + + pub struct ntfs_args { + pub fspec: *mut c_char, + pub export_info: export_args, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub mode: crate::mode_t, + pub flag: c_ulong, + } + + pub struct udf_args { + pub fspec: *mut c_char, + pub lastblock: u32, + } + + pub struct tmpfs_args { + pub ta_version: c_int, + pub ta_nodes_max: crate::ino_t, + pub ta_size_max: off_t, + pub ta_root_uid: crate::uid_t, + pub ta_root_gid: crate::gid_t, + pub ta_root_mode: crate::mode_t, + } + + pub struct fusefs_args { + pub name: *mut c_char, + pub fd: c_int, + pub max_read: c_int, + pub allow_other: c_int, + } + + pub struct xucred { + pub cr_uid: crate::uid_t, + pub cr_gid: crate::gid_t, + pub cr_ngroups: c_short, + //https://github.com/openbsd/src/blob/HEAD/sys/sys/syslimits.h#L44 + pub cr_groups: [crate::gid_t; 16], + } + + pub struct export_args { + pub ex_flags: c_int, + pub ex_root: crate::uid_t, + pub ex_anon: xucred, + pub ex_addr: *mut crate::sockaddr, + pub ex_addrlen: c_int, + pub ex_mask: *mut crate::sockaddr, + pub ex_masklen: c_int, + } + + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct in_addr { + pub s_addr: crate::in_addr_t, + } + + pub struct sockaddr_in { + pub sin_len: u8, + pub sin_family: crate::sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [i8; 8], + } + + pub struct splice { + pub sp_fd: c_int, + pub sp_max: off_t, + pub sp_idle: crate::timeval, + } + + pub struct kevent { + pub ident: crate::uintptr_t, + pub filter: c_short, + pub flags: c_ushort, + pub fflags: c_uint, + pub data: i64, + pub udata: *mut c_void, + } + + pub struct stat { + pub st_mode: crate::mode_t, + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_size: off_t, + pub st_blocks: crate::blkcnt_t, + pub st_blksize: crate::blksize_t, + pub st_flags: u32, + pub st_gen: u32, + pub st_birthtime: crate::time_t, + pub st_birthtime_nsec: c_long, + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + } + + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: crate::socklen_t, + pub ai_addr: *mut crate::sockaddr, + pub ai_canonname: *mut c_char, + pub ai_next: *mut crate::addrinfo, + } + + pub struct Dl_info { + pub dli_fname: *const c_char, + pub dli_fbase: *mut c_void, + pub dli_sname: *const c_char, + pub dli_saddr: *mut c_void, + } + + pub struct if_data { + pub ifi_type: c_uchar, + pub ifi_addrlen: c_uchar, + pub ifi_hdrlen: c_uchar, + pub ifi_link_state: c_uchar, + pub ifi_mtu: u32, + pub ifi_metric: u32, + pub ifi_rdomain: u32, + pub ifi_baudrate: u64, + pub ifi_ipackets: u64, + pub ifi_ierrors: u64, + pub ifi_opackets: u64, + pub ifi_oerrors: u64, + pub ifi_collisions: u64, + pub ifi_ibytes: u64, + pub ifi_obytes: u64, + pub ifi_imcasts: u64, + pub ifi_omcasts: u64, + pub ifi_iqdrops: u64, + pub ifi_oqdrops: u64, + pub ifi_noproto: u64, + pub ifi_capabilities: u32, + pub ifi_lastchange: crate::timeval, + } + + pub struct if_msghdr { + pub ifm_msglen: c_ushort, + pub ifm_version: c_uchar, + pub ifm_type: c_uchar, + pub ifm_hdrlen: c_ushort, + pub ifm_index: c_ushort, + pub ifm_tableid: c_ushort, + pub ifm_pad1: c_uchar, + pub ifm_pad2: c_uchar, + pub ifm_addrs: c_int, + pub ifm_flags: c_int, + pub ifm_xflags: c_int, + pub ifm_data: if_data, + } + + pub struct sockaddr_dl { + pub sdl_len: c_uchar, + pub sdl_family: c_uchar, + pub sdl_index: c_ushort, + pub sdl_type: c_uchar, + pub sdl_nlen: c_uchar, + pub sdl_alen: c_uchar, + pub sdl_slen: c_uchar, + pub sdl_data: [c_char; 24], + } + + pub struct sockpeercred { + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub pid: crate::pid_t, + } + + pub struct arphdr { + pub ar_hrd: u16, + pub ar_pro: u16, + pub ar_hln: u8, + pub ar_pln: u8, + pub ar_op: u16, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: c_int, + pub shm_lpid: crate::pid_t, + pub shm_cpid: crate::pid_t, + pub shm_nattch: c_short, + pub shm_atime: crate::time_t, + __shm_atimensec: c_long, + pub shm_dtime: crate::time_t, + __shm_dtimensec: c_long, + pub shm_ctime: crate::time_t, + __shm_ctimensec: c_long, + pub shm_internal: *mut c_void, + } + + // elf.h + pub struct Elf32_Phdr { + pub p_type: Elf32_Word, + pub p_offset: Elf32_Off, + pub p_vaddr: Elf32_Addr, + pub p_paddr: Elf32_Addr, + pub p_filesz: Elf32_Word, + pub p_memsz: Elf32_Word, + pub p_flags: Elf32_Word, + pub p_align: Elf32_Word, + } + + pub struct Elf64_Phdr { + pub p_type: Elf64_Word, + pub p_flags: Elf64_Word, + pub p_offset: Elf64_Off, + pub p_vaddr: Elf64_Addr, + pub p_paddr: Elf64_Addr, + pub p_filesz: Elf64_Xword, + pub p_memsz: Elf64_Xword, + pub p_align: Elf64_Xword, + } + + // link.h + + pub struct dl_phdr_info { + pub dlpi_addr: Elf_Addr, + pub dlpi_name: *const c_char, + pub dlpi_phdr: *const Elf_Phdr, + pub dlpi_phnum: Elf_Half, + } + + // sys/sysctl.h + pub struct kinfo_proc { + pub p_forw: u64, + pub p_back: u64, + pub p_paddr: u64, + pub p_addr: u64, + pub p_fd: u64, + pub p_stats: u64, + pub p_limit: u64, + pub p_vmspace: u64, + pub p_sigacts: u64, + pub p_sess: u64, + pub p_tsess: u64, + pub p_ru: u64, + pub p_eflag: i32, + pub p_exitsig: i32, + pub p_flag: i32, + pub p_pid: i32, + pub p_ppid: i32, + pub p_sid: i32, + pub p__pgid: i32, + pub p_tpgid: i32, + pub p_uid: u32, + pub p_ruid: u32, + pub p_gid: u32, + pub p_rgid: u32, + pub p_groups: [u32; KI_NGROUPS as usize], + pub p_ngroups: i16, + pub p_jobc: i16, + pub p_tdev: u32, + pub p_estcpu: u32, + pub p_rtime_sec: u32, + pub p_rtime_usec: u32, + pub p_cpticks: i32, + pub p_pctcpu: u32, + pub p_swtime: u32, + pub p_slptime: u32, + pub p_schedflags: i32, + pub p_uticks: u64, + pub p_sticks: u64, + pub p_iticks: u64, + pub p_tracep: u64, + pub p_traceflag: i32, + pub p_holdcnt: i32, + pub p_siglist: i32, + pub p_sigmask: u32, + pub p_sigignore: u32, + pub p_sigcatch: u32, + pub p_stat: i8, + pub p_priority: u8, + pub p_usrpri: u8, + pub p_nice: u8, + pub p_xstat: u16, + pub p_spare: u16, + pub p_comm: [c_char; KI_MAXCOMLEN as usize], + pub p_wmesg: [c_char; KI_WMESGLEN as usize], + pub p_wchan: u64, + pub p_login: [c_char; KI_MAXLOGNAME as usize], + pub p_vm_rssize: i32, + pub p_vm_tsize: i32, + pub p_vm_dsize: i32, + pub p_vm_ssize: i32, + pub p_uvalid: i64, + pub p_ustart_sec: u64, + pub p_ustart_usec: u32, + pub p_uutime_sec: u32, + pub p_uutime_usec: u32, + pub p_ustime_sec: u32, + pub p_ustime_usec: u32, + pub p_uru_maxrss: u64, + pub p_uru_ixrss: u64, + pub p_uru_idrss: u64, + pub p_uru_isrss: u64, + pub p_uru_minflt: u64, + pub p_uru_majflt: u64, + pub p_uru_nswap: u64, + pub p_uru_inblock: u64, + pub p_uru_oublock: u64, + pub p_uru_msgsnd: u64, + pub p_uru_msgrcv: u64, + pub p_uru_nsignals: u64, + pub p_uru_nvcsw: u64, + pub p_uru_nivcsw: u64, + pub p_uctime_sec: u32, + pub p_uctime_usec: u32, + pub p_psflags: u32, + pub p_acflag: u32, + pub p_svuid: u32, + pub p_svgid: u32, + pub p_emul: [c_char; KI_EMULNAMELEN as usize], + pub p_rlim_rss_cur: u64, + pub p_cpuid: u64, + pub p_vm_map_size: u64, + pub p_tid: i32, + pub p_rtableid: u32, + pub p_pledge: u64, + pub p_name: [c_char; KI_MAXCOMLEN as usize], + } + + pub struct kinfo_vmentry { + pub kve_start: c_ulong, + pub kve_end: c_ulong, + pub kve_guard: c_ulong, + pub kve_fspace: c_ulong, + pub kve_fspace_augment: c_ulong, + pub kve_offset: u64, + pub kve_wired_count: c_int, + pub kve_etype: c_int, + pub kve_protection: c_int, + pub kve_max_protection: c_int, + pub kve_advice: c_int, + pub kve_inheritance: c_int, + pub kve_flags: u8, + } + + pub struct ptrace_state { + pub pe_report_event: c_int, + pub pe_other_pid: crate::pid_t, + pub pe_tid: crate::pid_t, + } + + pub struct ptrace_thread_state { + pub pts_tid: crate::pid_t, + } + + // search.h + pub struct entry { + pub key: *mut c_char, + pub data: *mut c_void, + } + + pub struct ifreq { + pub ifr_name: [c_char; crate::IFNAMSIZ], + pub ifr_ifru: __c_anonymous_ifr_ifru, + } + + pub struct tcp_info { + pub tcpi_state: u8, + pub __tcpi_ca_state: u8, + pub __tcpi_retransmits: u8, + pub __tcpi_probes: u8, + pub __tcpi_backoff: u8, + pub tcpi_options: u8, + pub tcpi_snd_wscale: u8, + pub tcpi_rcv_wscale: u8, + pub tcpi_rto: u32, + pub __tcpi_ato: u32, + pub tcpi_snd_mss: u32, + pub tcpi_rcv_mss: u32, + pub __tcpi_unacked: u32, + pub __tcpi_sacked: u32, + pub __tcpi_lost: u32, + pub __tcpi_retrans: u32, + pub __tcpi_fackets: u32, + pub tcpi_last_data_sent: u32, + pub tcpi_last_ack_sent: u32, + pub tcpi_last_data_recv: u32, + pub tcpi_last_ack_recv: u32, + pub __tcpi_pmtu: u32, + pub __tcpi_rcv_ssthresh: u32, + pub tcpi_rtt: u32, + pub tcpi_rttvar: u32, + pub tcpi_snd_ssthresh: u32, + pub tcpi_snd_cwnd: u32, + pub __tcpi_advmss: u32, + pub __tcpi_reordering: u32, + pub __tcpi_rcv_rtt: u32, + pub tcpi_rcv_space: u32, + pub tcpi_snd_wnd: u32, + pub tcpi_snd_nxt: u32, + pub tcpi_rcv_nxt: u32, + pub tcpi_toe_tid: u32, + pub tcpi_snd_rexmitpack: u32, + pub tcpi_rcv_ooopack: u32, + pub tcpi_snd_zerowin: u32, + pub tcpi_rttmin: u32, + pub tcpi_max_sndwnd: u32, + pub tcpi_rcv_adv: u32, + pub tcpi_rcv_up: u32, + pub tcpi_snd_una: u32, + pub tcpi_snd_up: u32, + pub tcpi_snd_wl1: u32, + pub tcpi_snd_wl2: u32, + pub tcpi_snd_max: u32, + pub tcpi_ts_recent: u32, + pub tcpi_ts_recent_age: u32, + pub tcpi_rfbuf_cnt: u32, + pub tcpi_rfbuf_ts: u32, + pub tcpi_so_rcv_sb_cc: u32, + pub tcpi_so_rcv_sb_hiwat: u32, + pub tcpi_so_rcv_sb_lowat: u32, + pub tcpi_so_rcv_sb_wat: u32, + pub tcpi_so_snd_sb_cc: u32, + pub tcpi_so_snd_sb_hiwat: u32, + pub tcpi_so_snd_sb_lowat: u32, + pub tcpi_so_snd_sb_wat: u32, + } +} + +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut c_char { + self.si_addr + } + + pub unsafe fn si_code(&self) -> c_int { + self.si_code + } + + pub unsafe fn si_errno(&self) -> c_int { + self.si_errno + } + + pub unsafe fn si_pid(&self) -> crate::pid_t { + #[repr(C)] + struct siginfo_timer { + _si_signo: c_int, + _si_code: c_int, + _si_errno: c_int, + _pad: [c_int; SI_PAD], + _pid: crate::pid_t, + } + (*(self as *const siginfo_t).cast::())._pid + } + + pub unsafe fn si_uid(&self) -> crate::uid_t { + #[repr(C)] + struct siginfo_timer { + _si_signo: c_int, + _si_code: c_int, + _si_errno: c_int, + _pad: [c_int; SI_PAD], + _pid: crate::pid_t, + _uid: crate::uid_t, + } + (*(self as *const siginfo_t).cast::())._uid + } + + pub unsafe fn si_value(&self) -> crate::sigval { + #[repr(C)] + struct siginfo_timer { + _si_signo: c_int, + _si_code: c_int, + _si_errno: c_int, + _pad: [c_int; SI_PAD], + _pid: crate::pid_t, + _uid: crate::uid_t, + value: crate::sigval, + } + (*(self as *const siginfo_t).cast::()).value + } +} + +s_no_extra_traits! { + pub struct dirent { + pub d_fileno: crate::ino_t, + pub d_off: off_t, + pub d_reclen: u16, + pub d_type: u8, + pub d_namlen: u8, + __d_padding: [u8; 4], + pub d_name: [c_char; 256], + } + + pub struct sockaddr_storage { + pub ss_len: u8, + pub ss_family: crate::sa_family_t, + __ss_pad1: [u8; 6], + __ss_pad2: i64, + __ss_pad3: [u8; 240], + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_code: c_int, + pub si_errno: c_int, + pub si_addr: *mut c_char, + #[cfg(target_pointer_width = "32")] + __pad: [u8; 112], + #[cfg(target_pointer_width = "64")] + __pad: [u8; 108], + } + + pub struct lastlog { + ll_time: crate::time_t, + ll_line: [c_char; UT_LINESIZE], + ll_host: [c_char; UT_HOSTSIZE], + } + + pub struct utmp { + pub ut_line: [c_char; UT_LINESIZE], + pub ut_name: [c_char; UT_NAMESIZE], + pub ut_host: [c_char; UT_HOSTSIZE], + pub ut_time: crate::time_t, + } + + pub union mount_info { + pub ufs_args: ufs_args, + pub mfs_args: mfs_args, + pub nfs_args: nfs_args, + pub iso_args: iso_args, + pub msdosfs_args: msdosfs_args, + pub ntfs_args: ntfs_args, + pub tmpfs_args: tmpfs_args, + align: [c_char; 160], + } + + pub union __c_anonymous_ifr_ifru { + pub ifru_addr: crate::sockaddr, + pub ifru_dstaddr: crate::sockaddr, + pub ifru_broadaddr: crate::sockaddr, + pub ifru_flags: c_short, + pub ifru_metric: c_int, + pub ifru_vnetid: i64, + pub ifru_media: u64, + pub ifru_data: crate::caddr_t, + pub ifru_index: c_uint, + } + + // This type uses the union mount_info: + pub struct statfs { + pub f_flags: u32, + pub f_bsize: u32, + pub f_iosize: u32, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: i64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: i64, + pub f_syncwrites: u64, + pub f_syncreads: u64, + pub f_asyncwrites: u64, + pub f_asyncreads: u64, + pub f_fsid: crate::fsid_t, + pub f_namemax: u32, + pub f_owner: crate::uid_t, + pub f_ctime: u64, + pub f_fstypename: [c_char; 16], + pub f_mntonname: [c_char; 90], + pub f_mntfromname: [c_char; 90], + pub f_mntfromspec: [c_char; 90], + pub mount_info: mount_info, + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_fileno == other.d_fileno + && self.d_off == other.d_off + && self.d_reclen == other.d_reclen + && self.d_type == other.d_type + && self.d_namlen == other.d_namlen + && self + .d_name + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for dirent {} + + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_fileno.hash(state); + self.d_off.hash(state); + self.d_reclen.hash(state); + self.d_type.hash(state); + self.d_namlen.hash(state); + self.d_name.hash(state); + } + } + + impl PartialEq for sockaddr_storage { + fn eq(&self, other: &sockaddr_storage) -> bool { + self.ss_len == other.ss_len && self.ss_family == other.ss_family + } + } + + impl Eq for sockaddr_storage {} + + impl hash::Hash for sockaddr_storage { + fn hash(&self, state: &mut H) { + self.ss_len.hash(state); + self.ss_family.hash(state); + } + } + + impl PartialEq for siginfo_t { + fn eq(&self, other: &siginfo_t) -> bool { + self.si_signo == other.si_signo + && self.si_code == other.si_code + && self.si_errno == other.si_errno + && self.si_addr == other.si_addr + } + } + + impl Eq for siginfo_t {} + + impl hash::Hash for siginfo_t { + fn hash(&self, state: &mut H) { + self.si_signo.hash(state); + self.si_code.hash(state); + self.si_errno.hash(state); + self.si_addr.hash(state); + } + } + + impl PartialEq for lastlog { + fn eq(&self, other: &lastlog) -> bool { + self.ll_time == other.ll_time + && self + .ll_line + .iter() + .zip(other.ll_line.iter()) + .all(|(a, b)| a == b) + && self + .ll_host + .iter() + .zip(other.ll_host.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for lastlog {} + + impl hash::Hash for lastlog { + fn hash(&self, state: &mut H) { + self.ll_time.hash(state); + self.ll_line.hash(state); + self.ll_host.hash(state); + } + } + + impl PartialEq for utmp { + fn eq(&self, other: &utmp) -> bool { + self.ut_time == other.ut_time + && self + .ut_line + .iter() + .zip(other.ut_line.iter()) + .all(|(a, b)| a == b) + && self + .ut_name + .iter() + .zip(other.ut_name.iter()) + .all(|(a, b)| a == b) + && self + .ut_host + .iter() + .zip(other.ut_host.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for utmp {} + + impl hash::Hash for utmp { + fn hash(&self, state: &mut H) { + self.ut_line.hash(state); + self.ut_name.hash(state); + self.ut_host.hash(state); + self.ut_time.hash(state); + } + } + + impl PartialEq for mount_info { + fn eq(&self, other: &mount_info) -> bool { + unsafe { + self.align + .iter() + .zip(other.align.iter()) + .all(|(a, b)| a == b) + } + } + } + + impl Eq for mount_info {} + + impl hash::Hash for mount_info { + fn hash(&self, state: &mut H) { + unsafe { self.align.hash(state) }; + } + } + + impl PartialEq for __c_anonymous_ifr_ifru { + fn eq(&self, other: &__c_anonymous_ifr_ifru) -> bool { + unsafe { + self.ifru_addr == other.ifru_addr + && self.ifru_dstaddr == other.ifru_dstaddr + && self.ifru_broadaddr == other.ifru_broadaddr + && self.ifru_flags == other.ifru_flags + && self.ifru_metric == other.ifru_metric + && self.ifru_vnetid == other.ifru_vnetid + && self.ifru_media == other.ifru_media + && self.ifru_data == other.ifru_data + && self.ifru_index == other.ifru_index + } + } + } + + impl Eq for __c_anonymous_ifr_ifru {} + + impl hash::Hash for __c_anonymous_ifr_ifru { + fn hash(&self, state: &mut H) { + unsafe { + self.ifru_addr.hash(state); + self.ifru_dstaddr.hash(state); + self.ifru_broadaddr.hash(state); + self.ifru_flags.hash(state); + self.ifru_metric.hash(state); + self.ifru_vnetid.hash(state); + self.ifru_media.hash(state); + self.ifru_data.hash(state); + self.ifru_index.hash(state); + } + } + } + + impl PartialEq for statfs { + fn eq(&self, other: &statfs) -> bool { + self.f_flags == other.f_flags + && self.f_bsize == other.f_bsize + && self.f_iosize == other.f_iosize + && self.f_blocks == other.f_blocks + && self.f_bfree == other.f_bfree + && self.f_bavail == other.f_bavail + && self.f_files == other.f_files + && self.f_ffree == other.f_ffree + && self.f_favail == other.f_favail + && self.f_syncwrites == other.f_syncwrites + && self.f_syncreads == other.f_syncreads + && self.f_asyncwrites == other.f_asyncwrites + && self.f_asyncreads == other.f_asyncreads + && self.f_fsid == other.f_fsid + && self.f_namemax == other.f_namemax + && self.f_owner == other.f_owner + && self.f_ctime == other.f_ctime + && self + .f_fstypename + .iter() + .zip(other.f_fstypename.iter()) + .all(|(a, b)| a == b) + && self + .f_mntonname + .iter() + .zip(other.f_mntonname.iter()) + .all(|(a, b)| a == b) + && self + .f_mntfromname + .iter() + .zip(other.f_mntfromname.iter()) + .all(|(a, b)| a == b) + && self + .f_mntfromspec + .iter() + .zip(other.f_mntfromspec.iter()) + .all(|(a, b)| a == b) + && self.mount_info == other.mount_info + } + } + + impl Eq for statfs {} + + impl hash::Hash for statfs { + fn hash(&self, state: &mut H) { + self.f_flags.hash(state); + self.f_bsize.hash(state); + self.f_iosize.hash(state); + self.f_blocks.hash(state); + self.f_bfree.hash(state); + self.f_bavail.hash(state); + self.f_files.hash(state); + self.f_ffree.hash(state); + self.f_favail.hash(state); + self.f_syncwrites.hash(state); + self.f_syncreads.hash(state); + self.f_asyncwrites.hash(state); + self.f_asyncreads.hash(state); + self.f_fsid.hash(state); + self.f_namemax.hash(state); + self.f_owner.hash(state); + self.f_ctime.hash(state); + self.f_fstypename.hash(state); + self.f_mntonname.hash(state); + self.f_mntfromname.hash(state); + self.f_mntfromspec.hash(state); + self.mount_info.hash(state); + } + } + } +} + +pub const UT_NAMESIZE: usize = 32; +pub const UT_LINESIZE: usize = 8; +pub const UT_HOSTSIZE: usize = 256; + +pub const O_CLOEXEC: c_int = 0x10000; +pub const O_DIRECTORY: c_int = 0x20000; +pub const O_RSYNC: c_int = O_SYNC; + +pub const MS_SYNC: c_int = 0x0002; +pub const MS_INVALIDATE: c_int = 0x0004; + +pub const POLLNORM: c_short = crate::POLLRDNORM; + +pub const ENOATTR: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const EOVERFLOW: c_int = 87; +pub const ECANCELED: c_int = 88; +pub const EIDRM: c_int = 89; +pub const ENOMSG: c_int = 90; +pub const ENOTSUP: c_int = 91; +pub const EBADMSG: c_int = 92; +pub const ENOTRECOVERABLE: c_int = 93; +pub const EOWNERDEAD: c_int = 94; +pub const EPROTO: c_int = 95; +pub const ELAST: c_int = 95; + +pub const F_DUPFD_CLOEXEC: c_int = 10; + +pub const UTIME_OMIT: c_long = -1; +pub const UTIME_NOW: c_long = -2; + +pub const AT_FDCWD: c_int = -100; +pub const AT_EACCESS: c_int = 0x01; +pub const AT_SYMLINK_NOFOLLOW: c_int = 0x02; +pub const AT_SYMLINK_FOLLOW: c_int = 0x04; +pub const AT_REMOVEDIR: c_int = 0x08; + +pub const AT_NULL: c_int = 0; +pub const AT_IGNORE: c_int = 1; +pub const AT_PAGESZ: c_int = 6; +pub const AT_HWCAP: c_int = 25; +pub const AT_HWCAP2: c_int = 26; + +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIM_NLIMITS: c_int = 9; + +pub const SO_TIMESTAMP: c_int = 0x0800; +pub const SO_SNDTIMEO: c_int = 0x1005; +pub const SO_RCVTIMEO: c_int = 0x1006; +pub const SO_BINDANY: c_int = 0x1000; +pub const SO_NETPROC: c_int = 0x1020; +pub const SO_RTABLE: c_int = 0x1021; +pub const SO_PEERCRED: c_int = 0x1022; +pub const SO_SPLICE: c_int = 0x1023; +pub const SO_DOMAIN: c_int = 0x1024; +pub const SO_PROTOCOL: c_int = 0x1025; + +// sys/netinet/in.h +// Protocols (RFC 1700) +// NOTE: These are in addition to the constants defined in src/unix/mod.rs + +// IPPROTO_IP defined in src/unix/mod.rs +/// Hop-by-hop option header +pub const IPPROTO_HOPOPTS: c_int = 0; +// IPPROTO_ICMP defined in src/unix/mod.rs +/// group mgmt protocol +pub const IPPROTO_IGMP: c_int = 2; +/// gateway^2 (deprecated) +pub const IPPROTO_GGP: c_int = 3; +/// for compatibility +pub const IPPROTO_IPIP: c_int = 4; +// IPPROTO_TCP defined in src/unix/mod.rs +/// exterior gateway protocol +pub const IPPROTO_EGP: c_int = 8; +/// pup +pub const IPPROTO_PUP: c_int = 12; +// IPPROTO_UDP defined in src/unix/mod.rs +/// xns idp +pub const IPPROTO_IDP: c_int = 22; +/// tp-4 w/ class negotiation +pub const IPPROTO_TP: c_int = 29; +// IPPROTO_IPV6 defined in src/unix/mod.rs +/// IP6 routing header +pub const IPPROTO_ROUTING: c_int = 43; +/// IP6 fragmentation header +pub const IPPROTO_FRAGMENT: c_int = 44; +/// resource reservation +pub const IPPROTO_RSVP: c_int = 46; +/// General Routing Encap. +pub const IPPROTO_GRE: c_int = 47; +/// IP6 Encap Sec. Payload +pub const IPPROTO_ESP: c_int = 50; +/// IP6 Auth Header +pub const IPPROTO_AH: c_int = 51; +/// IP Mobility RFC 2004 +pub const IPPROTO_MOBILE: c_int = 55; +// IPPROTO_ICMPV6 defined in src/unix/mod.rs +/// IP6 no next header +pub const IPPROTO_NONE: c_int = 59; +/// IP6 destination option +pub const IPPROTO_DSTOPTS: c_int = 60; +/// ISO cnlp +pub const IPPROTO_EON: c_int = 80; +/// Ethernet-in-IP +pub const IPPROTO_ETHERIP: c_int = 97; +/// encapsulation header +pub const IPPROTO_ENCAP: c_int = 98; +/// Protocol indep. multicast +pub const IPPROTO_PIM: c_int = 103; +/// IP Payload Comp. Protocol +pub const IPPROTO_IPCOMP: c_int = 108; +/// CARP +pub const IPPROTO_CARP: c_int = 112; +/// unicast MPLS packet +pub const IPPROTO_MPLS: c_int = 137; +/// PFSYNC +pub const IPPROTO_PFSYNC: c_int = 240; +pub const IPPROTO_MAX: c_int = 256; + +// Only used internally, so it can be outside the range of valid IP protocols +pub const IPPROTO_DIVERT: c_int = 258; + +pub const IP_RECVDSTADDR: c_int = 7; +pub const IP_SENDSRCADDR: c_int = IP_RECVDSTADDR; +pub const IP_RECVIF: c_int = 30; + +// sys/netinet/in.h +pub const TCP_MD5SIG: c_int = 0x04; +pub const TCP_NOPUSH: c_int = 0x10; + +pub const MSG_WAITFORONE: c_int = 0x1000; + +pub const AF_ECMA: c_int = 8; +pub const AF_ROUTE: c_int = 17; +pub const AF_ENCAP: c_int = 28; +pub const AF_SIP: c_int = 29; +pub const AF_KEY: c_int = 30; +pub const pseudo_AF_HDRCMPLT: c_int = 31; +pub const AF_BLUETOOTH: c_int = 32; +pub const AF_MPLS: c_int = 33; +pub const pseudo_AF_PFLOW: c_int = 34; +pub const pseudo_AF_PIPEX: c_int = 35; +pub const NET_RT_DUMP: c_int = 1; +pub const NET_RT_FLAGS: c_int = 2; +pub const NET_RT_IFLIST: c_int = 3; +pub const NET_RT_STATS: c_int = 4; +pub const NET_RT_TABLE: c_int = 5; +pub const NET_RT_IFNAMES: c_int = 6; +#[doc(hidden)] +#[deprecated( + since = "0.2.95", + note = "Possibly increasing over the releases and might not be so used in the field" +)] +pub const NET_RT_MAXID: c_int = 7; + +pub const IPV6_JOIN_GROUP: c_int = 12; +pub const IPV6_LEAVE_GROUP: c_int = 13; + +pub const PF_ROUTE: c_int = AF_ROUTE; +pub const PF_ECMA: c_int = AF_ECMA; +pub const PF_ENCAP: c_int = AF_ENCAP; +pub const PF_SIP: c_int = AF_SIP; +pub const PF_KEY: c_int = AF_KEY; +pub const PF_BPF: c_int = pseudo_AF_HDRCMPLT; +pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; +pub const PF_MPLS: c_int = AF_MPLS; +pub const PF_PFLOW: c_int = pseudo_AF_PFLOW; +pub const PF_PIPEX: c_int = pseudo_AF_PIPEX; + +pub const SCM_TIMESTAMP: c_int = 0x04; + +pub const O_DSYNC: c_int = 128; + +pub const MAP_RENAME: c_int = 0x0000; +pub const MAP_NORESERVE: c_int = 0x0000; +pub const MAP_HASSEMAPHORE: c_int = 0x0000; +pub const MAP_TRYFIXED: c_int = 0; + +pub const EIPSEC: c_int = 82; +pub const ENOMEDIUM: c_int = 85; +pub const EMEDIUMTYPE: c_int = 86; + +pub const EAI_BADFLAGS: c_int = -1; +pub const EAI_NONAME: c_int = -2; +pub const EAI_AGAIN: c_int = -3; +pub const EAI_FAIL: c_int = -4; +pub const EAI_NODATA: c_int = -5; +pub const EAI_FAMILY: c_int = -6; +pub const EAI_SOCKTYPE: c_int = -7; +pub const EAI_SERVICE: c_int = -8; +pub const EAI_MEMORY: c_int = -10; +pub const EAI_SYSTEM: c_int = -11; +pub const EAI_OVERFLOW: c_int = -14; + +pub const RUSAGE_THREAD: c_int = 1; + +pub const MAP_COPY: c_int = 0x0002; +pub const MAP_NOEXTEND: c_int = 0x0000; + +pub const _PC_LINK_MAX: c_int = 1; +pub const _PC_MAX_CANON: c_int = 2; +pub const _PC_MAX_INPUT: c_int = 3; +pub const _PC_NAME_MAX: c_int = 4; +pub const _PC_PATH_MAX: c_int = 5; +pub const _PC_PIPE_BUF: c_int = 6; +pub const _PC_CHOWN_RESTRICTED: c_int = 7; +pub const _PC_NO_TRUNC: c_int = 8; +pub const _PC_VDISABLE: c_int = 9; +pub const _PC_2_SYMLINKS: c_int = 10; +pub const _PC_ALLOC_SIZE_MIN: c_int = 11; +pub const _PC_ASYNC_IO: c_int = 12; +pub const _PC_FILESIZEBITS: c_int = 13; +pub const _PC_PRIO_IO: c_int = 14; +pub const _PC_REC_INCR_XFER_SIZE: c_int = 15; +pub const _PC_REC_MAX_XFER_SIZE: c_int = 16; +pub const _PC_REC_MIN_XFER_SIZE: c_int = 17; +pub const _PC_REC_XFER_ALIGN: c_int = 18; +pub const _PC_SYMLINK_MAX: c_int = 19; +pub const _PC_SYNC_IO: c_int = 20; +pub const _PC_TIMESTAMP_RESOLUTION: c_int = 21; + +pub const _CS_PATH: c_int = 1; + +pub const _SC_CLK_TCK: c_int = 3; +pub const _SC_SEM_NSEMS_MAX: c_int = 31; +pub const _SC_SEM_VALUE_MAX: c_int = 32; +pub const _SC_HOST_NAME_MAX: c_int = 33; +pub const _SC_MONOTONIC_CLOCK: c_int = 34; +pub const _SC_2_PBS: c_int = 35; +pub const _SC_2_PBS_ACCOUNTING: c_int = 36; +pub const _SC_2_PBS_CHECKPOINT: c_int = 37; +pub const _SC_2_PBS_LOCATE: c_int = 38; +pub const _SC_2_PBS_MESSAGE: c_int = 39; +pub const _SC_2_PBS_TRACK: c_int = 40; +pub const _SC_ADVISORY_INFO: c_int = 41; +pub const _SC_AIO_LISTIO_MAX: c_int = 42; +pub const _SC_AIO_MAX: c_int = 43; +pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 44; +pub const _SC_ASYNCHRONOUS_IO: c_int = 45; +pub const _SC_ATEXIT_MAX: c_int = 46; +pub const _SC_BARRIERS: c_int = 47; +pub const _SC_CLOCK_SELECTION: c_int = 48; +pub const _SC_CPUTIME: c_int = 49; +pub const _SC_DELAYTIMER_MAX: c_int = 50; +pub const _SC_IOV_MAX: c_int = 51; +pub const _SC_IPV6: c_int = 52; +pub const _SC_MAPPED_FILES: c_int = 53; +pub const _SC_MEMLOCK: c_int = 54; +pub const _SC_MEMLOCK_RANGE: c_int = 55; +pub const _SC_MEMORY_PROTECTION: c_int = 56; +pub const _SC_MESSAGE_PASSING: c_int = 57; +pub const _SC_MQ_OPEN_MAX: c_int = 58; +pub const _SC_MQ_PRIO_MAX: c_int = 59; +pub const _SC_PRIORITIZED_IO: c_int = 60; +pub const _SC_PRIORITY_SCHEDULING: c_int = 61; +pub const _SC_RAW_SOCKETS: c_int = 62; +pub const _SC_READER_WRITER_LOCKS: c_int = 63; +pub const _SC_REALTIME_SIGNALS: c_int = 64; +pub const _SC_REGEXP: c_int = 65; +pub const _SC_RTSIG_MAX: c_int = 66; +pub const _SC_SEMAPHORES: c_int = 67; +pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 68; +pub const _SC_SHELL: c_int = 69; +pub const _SC_SIGQUEUE_MAX: c_int = 70; +pub const _SC_SPAWN: c_int = 71; +pub const _SC_SPIN_LOCKS: c_int = 72; +pub const _SC_SPORADIC_SERVER: c_int = 73; +pub const _SC_SS_REPL_MAX: c_int = 74; +pub const _SC_SYNCHRONIZED_IO: c_int = 75; +pub const _SC_SYMLOOP_MAX: c_int = 76; +pub const _SC_THREAD_ATTR_STACKADDR: c_int = 77; +pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 78; +pub const _SC_THREAD_CPUTIME: c_int = 79; +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 80; +pub const _SC_THREAD_KEYS_MAX: c_int = 81; +pub const _SC_THREAD_PRIO_INHERIT: c_int = 82; +pub const _SC_THREAD_PRIO_PROTECT: c_int = 83; +pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 84; +pub const _SC_THREAD_PROCESS_SHARED: c_int = 85; +pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 86; +pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 87; +pub const _SC_THREAD_SPORADIC_SERVER: c_int = 88; +pub const _SC_THREAD_STACK_MIN: c_int = 89; +pub const _SC_THREAD_THREADS_MAX: c_int = 90; +pub const _SC_THREADS: c_int = 91; +pub const _SC_TIMEOUTS: c_int = 92; +pub const _SC_TIMER_MAX: c_int = 93; +pub const _SC_TIMERS: c_int = 94; +pub const _SC_TRACE: c_int = 95; +pub const _SC_TRACE_EVENT_FILTER: c_int = 96; +pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 97; +pub const _SC_TRACE_INHERIT: c_int = 98; +pub const _SC_TRACE_LOG: c_int = 99; +pub const _SC_GETGR_R_SIZE_MAX: c_int = 100; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 101; +pub const _SC_LOGIN_NAME_MAX: c_int = 102; +pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 103; +pub const _SC_TRACE_NAME_MAX: c_int = 104; +pub const _SC_TRACE_SYS_MAX: c_int = 105; +pub const _SC_TRACE_USER_EVENT_MAX: c_int = 106; +pub const _SC_TTY_NAME_MAX: c_int = 107; +pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 108; +pub const _SC_V6_ILP32_OFF32: c_int = 109; +pub const _SC_V6_ILP32_OFFBIG: c_int = 110; +pub const _SC_V6_LP64_OFF64: c_int = 111; +pub const _SC_V6_LPBIG_OFFBIG: c_int = 112; +pub const _SC_V7_ILP32_OFF32: c_int = 113; +pub const _SC_V7_ILP32_OFFBIG: c_int = 114; +pub const _SC_V7_LP64_OFF64: c_int = 115; +pub const _SC_V7_LPBIG_OFFBIG: c_int = 116; +pub const _SC_XOPEN_CRYPT: c_int = 117; +pub const _SC_XOPEN_ENH_I18N: c_int = 118; +pub const _SC_XOPEN_LEGACY: c_int = 119; +pub const _SC_XOPEN_REALTIME: c_int = 120; +pub const _SC_XOPEN_REALTIME_THREADS: c_int = 121; +pub const _SC_XOPEN_STREAMS: c_int = 122; +pub const _SC_XOPEN_UNIX: c_int = 123; +pub const _SC_XOPEN_UUCP: c_int = 124; +pub const _SC_XOPEN_VERSION: c_int = 125; +pub const _SC_PHYS_PAGES: c_int = 500; +pub const _SC_AVPHYS_PAGES: c_int = 501; +pub const _SC_NPROCESSORS_CONF: c_int = 502; +pub const _SC_NPROCESSORS_ONLN: c_int = 503; + +pub const FD_SETSIZE: usize = 1024; + +pub const SCHED_FIFO: c_int = 1; +pub const SCHED_OTHER: c_int = 2; +pub const SCHED_RR: c_int = 3; + +pub const ST_NOSUID: c_ulong = 2; + +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = ptr::null_mut(); +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = ptr::null_mut(); +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = ptr::null_mut(); + +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 1; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 2; +pub const PTHREAD_MUTEX_NORMAL: c_int = 3; +pub const PTHREAD_MUTEX_STRICT_NP: c_int = 4; +pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_STRICT_NP; + +pub const EVFILT_READ: i16 = -1; +pub const EVFILT_WRITE: i16 = -2; +pub const EVFILT_AIO: i16 = -3; +pub const EVFILT_VNODE: i16 = -4; +pub const EVFILT_PROC: i16 = -5; +pub const EVFILT_SIGNAL: i16 = -6; +pub const EVFILT_TIMER: i16 = -7; +pub const EVFILT_DEVICE: i16 = -8; +pub const EVFILT_EXCEPT: i16 = -9; + +pub const EV_ADD: u16 = 0x1; +pub const EV_DELETE: u16 = 0x2; +pub const EV_ENABLE: u16 = 0x4; +pub const EV_DISABLE: u16 = 0x8; +pub const EV_ONESHOT: u16 = 0x10; +pub const EV_CLEAR: u16 = 0x20; +pub const EV_RECEIPT: u16 = 0x40; +pub const EV_DISPATCH: u16 = 0x80; +pub const EV_FLAG1: u16 = 0x2000; +pub const EV_ERROR: u16 = 0x4000; +pub const EV_EOF: u16 = 0x8000; + +#[deprecated(since = "0.2.113", note = "Not stable across OS versions")] +pub const EV_SYSFLAGS: u16 = 0xf800; + +pub const NOTE_LOWAT: u32 = 0x00000001; +pub const NOTE_EOF: u32 = 0x00000002; +pub const NOTE_OOB: u32 = 0x00000004; +pub const NOTE_DELETE: u32 = 0x00000001; +pub const NOTE_WRITE: u32 = 0x00000002; +pub const NOTE_EXTEND: u32 = 0x00000004; +pub const NOTE_ATTRIB: u32 = 0x00000008; +pub const NOTE_LINK: u32 = 0x00000010; +pub const NOTE_RENAME: u32 = 0x00000020; +pub const NOTE_REVOKE: u32 = 0x00000040; +pub const NOTE_TRUNCATE: u32 = 0x00000080; +pub const NOTE_EXIT: u32 = 0x80000000; +pub const NOTE_FORK: u32 = 0x40000000; +pub const NOTE_EXEC: u32 = 0x20000000; +pub const NOTE_PDATAMASK: u32 = 0x000fffff; +pub const NOTE_PCTRLMASK: u32 = 0xf0000000; +pub const NOTE_TRACK: u32 = 0x00000001; +pub const NOTE_TRACKERR: u32 = 0x00000002; +pub const NOTE_CHILD: u32 = 0x00000004; +pub const NOTE_CHANGE: u32 = 0x00000001; + +pub const TMP_MAX: c_uint = 0x7fffffff; + +pub const AI_PASSIVE: c_int = 1; +pub const AI_CANONNAME: c_int = 2; +pub const AI_NUMERICHOST: c_int = 4; +pub const AI_EXT: c_int = 8; +pub const AI_NUMERICSERV: c_int = 16; +pub const AI_FQDN: c_int = 32; +pub const AI_ADDRCONFIG: c_int = 64; + +pub const NI_NUMERICHOST: c_int = 1; +pub const NI_NUMERICSERV: c_int = 2; +pub const NI_NOFQDN: c_int = 4; +pub const NI_NAMEREQD: c_int = 8; +pub const NI_DGRAM: c_int = 16; + +pub const NI_MAXHOST: size_t = 256; + +pub const RTLD_LOCAL: c_int = 0; + +pub const CTL_MAXNAME: c_int = 12; + +pub const CTLTYPE_NODE: c_int = 1; +pub const CTLTYPE_INT: c_int = 2; +pub const CTLTYPE_STRING: c_int = 3; +pub const CTLTYPE_QUAD: c_int = 4; +pub const CTLTYPE_STRUCT: c_int = 5; + +pub const CTL_UNSPEC: c_int = 0; +pub const CTL_KERN: c_int = 1; +pub const CTL_VM: c_int = 2; +pub const CTL_FS: c_int = 3; +pub const CTL_NET: c_int = 4; +pub const CTL_DEBUG: c_int = 5; +pub const CTL_HW: c_int = 6; +pub const CTL_MACHDEP: c_int = 7; +pub const CTL_DDB: c_int = 9; +pub const CTL_VFS: c_int = 10; +pub const CTL_MAXID: c_int = 11; + +pub const HW_NCPUONLINE: c_int = 25; + +pub const KERN_OSTYPE: c_int = 1; +pub const KERN_OSRELEASE: c_int = 2; +pub const KERN_OSREV: c_int = 3; +pub const KERN_VERSION: c_int = 4; +pub const KERN_MAXVNODES: c_int = 5; +pub const KERN_MAXPROC: c_int = 6; +pub const KERN_MAXFILES: c_int = 7; +pub const KERN_ARGMAX: c_int = 8; +pub const KERN_SECURELVL: c_int = 9; +pub const KERN_HOSTNAME: c_int = 10; +pub const KERN_HOSTID: c_int = 11; +pub const KERN_CLOCKRATE: c_int = 12; +pub const KERN_PROF: c_int = 16; +pub const KERN_POSIX1: c_int = 17; +pub const KERN_NGROUPS: c_int = 18; +pub const KERN_JOB_CONTROL: c_int = 19; +pub const KERN_SAVED_IDS: c_int = 20; +pub const KERN_BOOTTIME: c_int = 21; +pub const KERN_DOMAINNAME: c_int = 22; +pub const KERN_MAXPARTITIONS: c_int = 23; +pub const KERN_RAWPARTITION: c_int = 24; +pub const KERN_MAXTHREAD: c_int = 25; +pub const KERN_NTHREADS: c_int = 26; +pub const KERN_OSVERSION: c_int = 27; +pub const KERN_SOMAXCONN: c_int = 28; +pub const KERN_SOMINCONN: c_int = 29; +#[deprecated(since = "0.2.71", note = "Removed in OpenBSD 6.0")] +pub const KERN_USERMOUNT: c_int = 30; +pub const KERN_NOSUIDCOREDUMP: c_int = 32; +pub const KERN_FSYNC: c_int = 33; +pub const KERN_SYSVMSG: c_int = 34; +pub const KERN_SYSVSEM: c_int = 35; +pub const KERN_SYSVSHM: c_int = 36; +#[deprecated(since = "0.2.71", note = "Removed in OpenBSD 6.0")] +pub const KERN_ARND: c_int = 37; +pub const KERN_MSGBUFSIZE: c_int = 38; +pub const KERN_MALLOCSTATS: c_int = 39; +pub const KERN_CPTIME: c_int = 40; +pub const KERN_NCHSTATS: c_int = 41; +pub const KERN_FORKSTAT: c_int = 42; +pub const KERN_NSELCOLL: c_int = 43; +pub const KERN_TTY: c_int = 44; +pub const KERN_CCPU: c_int = 45; +pub const KERN_FSCALE: c_int = 46; +pub const KERN_NPROCS: c_int = 47; +pub const KERN_MSGBUF: c_int = 48; +pub const KERN_POOL: c_int = 49; +pub const KERN_STACKGAPRANDOM: c_int = 50; +pub const KERN_SYSVIPC_INFO: c_int = 51; +pub const KERN_SPLASSERT: c_int = 54; +pub const KERN_PROC_ARGS: c_int = 55; +pub const KERN_NFILES: c_int = 56; +pub const KERN_TTYCOUNT: c_int = 57; +pub const KERN_NUMVNODES: c_int = 58; +pub const KERN_MBSTAT: c_int = 59; +pub const KERN_SEMINFO: c_int = 61; +pub const KERN_SHMINFO: c_int = 62; +pub const KERN_INTRCNT: c_int = 63; +pub const KERN_WATCHDOG: c_int = 64; +pub const KERN_PROC: c_int = 66; +pub const KERN_MAXCLUSTERS: c_int = 67; +pub const KERN_EVCOUNT: c_int = 68; +pub const KERN_TIMECOUNTER: c_int = 69; +pub const KERN_MAXLOCKSPERUID: c_int = 70; +pub const KERN_CPTIME2: c_int = 71; +pub const KERN_CACHEPCT: c_int = 72; +pub const KERN_FILE: c_int = 73; +pub const KERN_CONSDEV: c_int = 75; +pub const KERN_NETLIVELOCKS: c_int = 76; +pub const KERN_POOL_DEBUG: c_int = 77; +pub const KERN_PROC_CWD: c_int = 78; +pub const KERN_PROC_NOBROADCASTKILL: c_int = 79; +pub const KERN_PROC_VMMAP: c_int = 80; +pub const KERN_GLOBAL_PTRACE: c_int = 81; +pub const KERN_CONSBUFSIZE: c_int = 82; +pub const KERN_CONSBUF: c_int = 83; +pub const KERN_AUDIO: c_int = 84; +pub const KERN_CPUSTATS: c_int = 85; +pub const KERN_PFSTATUS: c_int = 86; +pub const KERN_TIMEOUT_STATS: c_int = 87; +#[deprecated( + since = "0.2.95", + note = "Possibly increasing over the releases and might not be so used in the field" +)] +pub const KERN_MAXID: c_int = 88; + +pub const KERN_PROC_ALL: c_int = 0; +pub const KERN_PROC_PID: c_int = 1; +pub const KERN_PROC_PGRP: c_int = 2; +pub const KERN_PROC_SESSION: c_int = 3; +pub const KERN_PROC_TTY: c_int = 4; +pub const KERN_PROC_UID: c_int = 5; +pub const KERN_PROC_RUID: c_int = 6; +pub const KERN_PROC_KTHREAD: c_int = 7; +pub const KERN_PROC_SHOW_THREADS: c_int = 0x40000000; + +pub const KERN_SYSVIPC_MSG_INFO: c_int = 1; +pub const KERN_SYSVIPC_SEM_INFO: c_int = 2; +pub const KERN_SYSVIPC_SHM_INFO: c_int = 3; + +pub const KERN_PROC_ARGV: c_int = 1; +pub const KERN_PROC_NARGV: c_int = 2; +pub const KERN_PROC_ENV: c_int = 3; +pub const KERN_PROC_NENV: c_int = 4; + +pub const KI_NGROUPS: c_int = 16; +pub const KI_MAXCOMLEN: c_int = 24; +pub const KI_WMESGLEN: c_int = 8; +pub const KI_MAXLOGNAME: c_int = 32; +pub const KI_EMULNAMELEN: c_int = 8; + +pub const KVE_ET_OBJ: c_int = 0x00000001; +pub const KVE_ET_SUBMAP: c_int = 0x00000002; +pub const KVE_ET_COPYONWRITE: c_int = 0x00000004; +pub const KVE_ET_NEEDSCOPY: c_int = 0x00000008; +pub const KVE_ET_HOLE: c_int = 0x00000010; +pub const KVE_ET_NOFAULT: c_int = 0x00000020; +pub const KVE_ET_STACK: c_int = 0x00000040; +pub const KVE_ET_WC: c_int = 0x000000080; +pub const KVE_ET_CONCEAL: c_int = 0x000000100; +pub const KVE_ET_SYSCALL: c_int = 0x000000200; +pub const KVE_ET_FREEMAPPED: c_int = 0x000000800; + +pub const KVE_PROT_NONE: c_int = 0x00000000; +pub const KVE_PROT_READ: c_int = 0x00000001; +pub const KVE_PROT_WRITE: c_int = 0x00000002; +pub const KVE_PROT_EXEC: c_int = 0x00000004; + +pub const KVE_ADV_NORMAL: c_int = 0x00000000; +pub const KVE_ADV_RANDOM: c_int = 0x00000001; +pub const KVE_ADV_SEQUENTIAL: c_int = 0x00000002; + +pub const KVE_INH_SHARE: c_int = 0x00000000; +pub const KVE_INH_COPY: c_int = 0x00000010; +pub const KVE_INH_NONE: c_int = 0x00000020; +pub const KVE_INH_ZERO: c_int = 0x00000030; + +pub const KVE_F_STATIC: c_int = 0x1; +pub const KVE_F_KMEM: c_int = 0x2; + +pub const CHWFLOW: crate::tcflag_t = crate::MDMBUF | crate::CRTSCTS; +pub const OLCUC: crate::tcflag_t = 0x20; +pub const ONOCR: crate::tcflag_t = 0x40; +pub const ONLRET: crate::tcflag_t = 0x80; + +//https://github.com/openbsd/src/blob/HEAD/sys/sys/mount.h +pub const ISOFSMNT_NORRIP: c_int = 0x1; // disable Rock Ridge Ext +pub const ISOFSMNT_GENS: c_int = 0x2; // enable generation numbers +pub const ISOFSMNT_EXTATT: c_int = 0x4; // enable extended attr +pub const ISOFSMNT_NOJOLIET: c_int = 0x8; // disable Joliet Ext +pub const ISOFSMNT_SESS: c_int = 0x10; // use iso_args.sess + +pub const NFS_ARGSVERSION: c_int = 4; // change when nfs_args changes + +pub const NFSMNT_RESVPORT: c_int = 0; // always use reserved ports +pub const NFSMNT_SOFT: c_int = 0x1; // soft mount (hard is default) +pub const NFSMNT_WSIZE: c_int = 0x2; // set write size +pub const NFSMNT_RSIZE: c_int = 0x4; // set read size +pub const NFSMNT_TIMEO: c_int = 0x8; // set initial timeout +pub const NFSMNT_RETRANS: c_int = 0x10; // set number of request retries +pub const NFSMNT_MAXGRPS: c_int = 0x20; // set maximum grouplist size +pub const NFSMNT_INT: c_int = 0x40; // allow interrupts on hard mount +pub const NFSMNT_NOCONN: c_int = 0x80; // Don't Connect the socket +pub const NFSMNT_NQNFS: c_int = 0x100; // Use Nqnfs protocol +pub const NFSMNT_NFSV3: c_int = 0x200; // Use NFS Version 3 protocol +pub const NFSMNT_KERB: c_int = 0x400; // Use Kerberos authentication +pub const NFSMNT_DUMBTIMR: c_int = 0x800; // Don't estimate rtt dynamically +pub const NFSMNT_LEASETERM: c_int = 0x1000; // set lease term (nqnfs) +pub const NFSMNT_READAHEAD: c_int = 0x2000; // set read ahead +pub const NFSMNT_DEADTHRESH: c_int = 0x4000; // set dead server retry thresh +pub const NFSMNT_NOAC: c_int = 0x8000; // disable attribute cache +pub const NFSMNT_RDIRPLUS: c_int = 0x10000; // Use Readdirplus for V3 +pub const NFSMNT_READDIRSIZE: c_int = 0x20000; // Set readdir size + +/* Flags valid only in mount syscall arguments */ +pub const NFSMNT_ACREGMIN: c_int = 0x40000; // acregmin field valid +pub const NFSMNT_ACREGMAX: c_int = 0x80000; // acregmax field valid +pub const NFSMNT_ACDIRMIN: c_int = 0x100000; // acdirmin field valid +pub const NFSMNT_ACDIRMAX: c_int = 0x200000; // acdirmax field valid + +/* Flags valid only in kernel */ +pub const NFSMNT_INTERNAL: c_int = 0xfffc0000; // Bits set internally +pub const NFSMNT_HASWRITEVERF: c_int = 0x40000; // Has write verifier for V3 +pub const NFSMNT_GOTPATHCONF: c_int = 0x80000; // Got the V3 pathconf info +pub const NFSMNT_GOTFSINFO: c_int = 0x100000; // Got the V3 fsinfo +pub const NFSMNT_MNTD: c_int = 0x200000; // Mnt server for mnt point +pub const NFSMNT_DISMINPROG: c_int = 0x400000; // Dismount in progress +pub const NFSMNT_DISMNT: c_int = 0x800000; // Dismounted +pub const NFSMNT_SNDLOCK: c_int = 0x1000000; // Send socket lock +pub const NFSMNT_WANTSND: c_int = 0x2000000; // Want above +pub const NFSMNT_RCVLOCK: c_int = 0x4000000; // Rcv socket lock +pub const NFSMNT_WANTRCV: c_int = 0x8000000; // Want above +pub const NFSMNT_WAITAUTH: c_int = 0x10000000; // Wait for authentication +pub const NFSMNT_HASAUTH: c_int = 0x20000000; // Has authenticator +pub const NFSMNT_WANTAUTH: c_int = 0x40000000; // Wants an authenticator +pub const NFSMNT_AUTHERR: c_int = 0x80000000; // Authentication error + +pub const MSDOSFSMNT_SHORTNAME: c_int = 0x1; // Force old DOS short names only +pub const MSDOSFSMNT_LONGNAME: c_int = 0x2; // Force Win'95 long names +pub const MSDOSFSMNT_NOWIN95: c_int = 0x4; // Completely ignore Win95 entries + +pub const NTFS_MFLAG_CASEINS: c_int = 0x1; +pub const NTFS_MFLAG_ALLNAMES: c_int = 0x2; + +pub const TMPFS_ARGS_VERSION: c_int = 1; + +const SI_MAXSZ: size_t = 128; +const SI_PAD: size_t = (SI_MAXSZ / size_of::()) - 3; + +pub const MAP_STACK: c_int = 0x4000; +pub const MAP_CONCEAL: c_int = 0x8000; + +// https://github.com/openbsd/src/blob/HEAD/sys/net/if.h#L187 +pub const IFF_UP: c_int = 0x1; // interface is up +pub const IFF_BROADCAST: c_int = 0x2; // broadcast address valid +pub const IFF_DEBUG: c_int = 0x4; // turn on debugging +pub const IFF_LOOPBACK: c_int = 0x8; // is a loopback net +pub const IFF_POINTOPOINT: c_int = 0x10; // interface is point-to-point link +pub const IFF_STATICARP: c_int = 0x20; // only static ARP +pub const IFF_RUNNING: c_int = 0x40; // resources allocated +pub const IFF_NOARP: c_int = 0x80; // no address resolution protocol +pub const IFF_PROMISC: c_int = 0x100; // receive all packets +pub const IFF_ALLMULTI: c_int = 0x200; // receive all multicast packets +pub const IFF_OACTIVE: c_int = 0x400; // transmission in progress +pub const IFF_SIMPLEX: c_int = 0x800; // can't hear own transmissions +pub const IFF_LINK0: c_int = 0x1000; // per link layer defined bit +pub const IFF_LINK1: c_int = 0x2000; // per link layer defined bit +pub const IFF_LINK2: c_int = 0x4000; // per link layer defined bit +pub const IFF_MULTICAST: c_int = 0x8000; // supports multicast + +pub const PTHREAD_STACK_MIN: size_t = 1_usize << _MAX_PAGE_SHIFT; +pub const MINSIGSTKSZ: size_t = 3_usize << _MAX_PAGE_SHIFT; +pub const SIGSTKSZ: size_t = MINSIGSTKSZ + (1_usize << _MAX_PAGE_SHIFT) * 4; + +pub const PT_SET_EVENT_MASK: c_int = 12; +pub const PT_GET_EVENT_MASK: c_int = 13; +pub const PT_GET_PROCESS_STATE: c_int = 14; +pub const PT_GET_THREAD_FIRST: c_int = 15; +pub const PT_GET_THREAD_NEXT: c_int = 16; +pub const PT_FIRSTMACH: c_int = 32; + +pub const SOCK_CLOEXEC: c_int = 0x8000; +pub const SOCK_NONBLOCK: c_int = 0x4000; +pub const SOCK_DNS: c_int = 0x1000; + +pub const BIOCGRSIG: c_ulong = 0x40044273; +pub const BIOCSRSIG: c_ulong = 0x80044272; +pub const BIOCSDLT: c_ulong = 0x8004427a; + +pub const PTRACE_FORK: c_int = 0x0002; + +pub const WCONTINUED: c_int = 0x08; +pub const WEXITED: c_int = 0x04; +pub const WSTOPPED: c_int = 0x02; // same as WUNTRACED +pub const WNOWAIT: c_int = 0x10; +pub const WTRAPPED: c_int = 0x20; + +pub const P_ALL: crate::idtype_t = 0; +pub const P_PGID: crate::idtype_t = 1; +pub const P_PID: crate::idtype_t = 2; + +// search.h +pub const FIND: crate::ACTION = 0; +pub const ENTER: crate::ACTION = 1; + +// futex.h +pub const FUTEX_WAIT: c_int = 1; +pub const FUTEX_WAKE: c_int = 2; +pub const FUTEX_REQUEUE: c_int = 3; +pub const FUTEX_PRIVATE_FLAG: c_int = 128; + +// sysctl.h, kinfo_proc p_eflag constants +pub const EPROC_CTTY: i32 = 0x01; // controlling tty vnode active +pub const EPROC_SLEADER: i32 = 0x02; // session leader +pub const EPROC_UNVEIL: i32 = 0x04; // has unveil settings +pub const EPROC_LKUNVEIL: i32 = 0x08; // unveil is locked + +// Flags for chflags(2) +pub const UF_SETTABLE: c_uint = 0x0000ffff; +pub const UF_NODUMP: c_uint = 0x00000001; +pub const UF_IMMUTABLE: c_uint = 0x00000002; +pub const UF_APPEND: c_uint = 0x00000004; +pub const UF_OPAQUE: c_uint = 0x00000008; +pub const SF_SETTABLE: c_uint = 0xffff0000; +pub const SF_ARCHIVED: c_uint = 0x00010000; +pub const SF_IMMUTABLE: c_uint = 0x00020000; +pub const SF_APPEND: c_uint = 0x00040000; + +// sys/exec_elf.h - Legal values for p_type (segment type). +pub const PT_NULL: u32 = 0; +pub const PT_LOAD: u32 = 1; +pub const PT_DYNAMIC: u32 = 2; +pub const PT_INTERP: u32 = 3; +pub const PT_NOTE: u32 = 4; +pub const PT_SHLIB: u32 = 5; +pub const PT_PHDR: u32 = 6; +pub const PT_TLS: u32 = 7; +pub const PT_LOOS: u32 = 0x60000000; +pub const PT_HIOS: u32 = 0x6fffffff; +pub const PT_LOPROC: u32 = 0x70000000; +pub const PT_HIPROC: u32 = 0x7fffffff; + +pub const PT_GNU_EH_FRAME: u32 = 0x6474e550; +pub const PT_GNU_RELRO: u32 = 0x6474e552; + +// sys/exec_elf.h - Legal values for p_flags (segment flags). +pub const PF_X: u32 = 0x1; +pub const PF_W: u32 = 0x2; +pub const PF_R: u32 = 0x4; +pub const PF_MASKOS: u32 = 0x0ff00000; +pub const PF_MASKPROC: u32 = 0xf0000000; + +// sys/ioccom.h +pub const fn IOCPARM_LEN(x: u32) -> u32 { + (x >> 16) & crate::IOCPARM_MASK +} + +pub const fn IOCBASECMD(x: u32) -> u32 { + x & (!(crate::IOCPARM_MASK << 16)) +} + +pub const fn IOCGROUP(x: u32) -> u32 { + (x >> 8) & 0xff +} + +pub const fn _IOC(inout: c_ulong, group: c_ulong, num: c_ulong, len: c_ulong) -> c_ulong { + (inout) | (((len) & crate::IOCPARM_MASK as c_ulong) << 16) | ((group) << 8) | (num) +} + +// sys/mount.h +pub const MNT_NOPERM: c_int = 0x00000020; +pub const MNT_WXALLOWED: c_int = 0x00000800; +pub const MNT_EXRDONLY: c_int = 0x00000080; +pub const MNT_DEFEXPORTED: c_int = 0x00000200; +pub const MNT_EXPORTANON: c_int = 0x00000400; +pub const MNT_ROOTFS: c_int = 0x00004000; +pub const MNT_NOATIME: c_int = 0x00008000; +pub const MNT_DELEXPORT: c_int = 0x00020000; +pub const MNT_STALLED: c_int = 0x00100000; +pub const MNT_SWAPPABLE: c_int = 0x00200000; +pub const MNT_WANTRDWR: c_int = 0x02000000; +pub const MNT_SOFTDEP: c_int = 0x04000000; +pub const MNT_DOOMED: c_int = 0x08000000; + +// For use with vfs_fsync and getfsstat +pub const MNT_WAIT: c_int = 1; +pub const MNT_NOWAIT: c_int = 2; +pub const MNT_LAZY: c_int = 3; + +// sys/_time.h +pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 2; +pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 4; +pub const CLOCK_UPTIME: crate::clockid_t = 5; +pub const CLOCK_BOOTTIME: crate::clockid_t = 6; + +pub const LC_COLLATE_MASK: c_int = 1 << crate::LC_COLLATE; +pub const LC_CTYPE_MASK: c_int = 1 << crate::LC_CTYPE; +pub const LC_MONETARY_MASK: c_int = 1 << crate::LC_MONETARY; +pub const LC_NUMERIC_MASK: c_int = 1 << crate::LC_NUMERIC; +pub const LC_TIME_MASK: c_int = 1 << crate::LC_TIME; +pub const LC_MESSAGES_MASK: c_int = 1 << crate::LC_MESSAGES; + +const _LC_LAST: c_int = 7; +pub const LC_ALL_MASK: c_int = (1 << _LC_LAST) - 2; + +pub const LC_GLOBAL_LOCALE: crate::locale_t = -1isize as crate::locale_t; + +// sys/reboot.h +pub const RB_ASKNAME: c_int = 0x00001; +pub const RB_SINGLE: c_int = 0x00002; +pub const RB_NOSYNC: c_int = 0x00004; +pub const RB_HALT: c_int = 0x00008; +pub const RB_INITNAME: c_int = 0x00010; +pub const RB_KDB: c_int = 0x00040; +pub const RB_RDONLY: c_int = 0x00080; +pub const RB_DUMP: c_int = 0x00100; +pub const RB_MINIROOT: c_int = 0x00200; +pub const RB_CONFIG: c_int = 0x00400; +pub const RB_TIMEBAD: c_int = 0x00800; +pub const RB_POWERDOWN: c_int = 0x01000; +pub const RB_SERCONS: c_int = 0x02000; +pub const RB_USERREQ: c_int = 0x04000; +pub const RB_RESET: c_int = 0x08000; +pub const RB_GOODRANDOM: c_int = 0x10000; +pub const RB_UNHIBERNATE: c_int = 0x20000; + +// net/route.h +pub const RTF_CLONING: c_int = 0x100; +pub const RTF_MULTICAST: c_int = 0x200; +pub const RTF_LLINFO: c_int = 0x400; +pub const RTF_PROTO3: c_int = 0x2000; +pub const RTF_ANNOUNCE: c_int = crate::RTF_PROTO2; + +pub const RTF_CLONED: c_int = 0x10000; +pub const RTF_CACHED: c_int = 0x20000; +pub const RTF_MPATH: c_int = 0x40000; +pub const RTF_MPLS: c_int = 0x100000; +pub const RTF_LOCAL: c_int = 0x200000; +pub const RTF_BROADCAST: c_int = 0x400000; +pub const RTF_CONNECTED: c_int = 0x800000; +pub const RTF_BFD: c_int = 0x1000000; +pub const RTF_FMASK: c_int = crate::RTF_LLINFO + | crate::RTF_PROTO1 + | crate::RTF_PROTO2 + | crate::RTF_PROTO3 + | crate::RTF_BLACKHOLE + | crate::RTF_REJECT + | crate::RTF_STATIC + | crate::RTF_MPLS + | crate::RTF_BFD; + +pub const RTM_VERSION: c_int = 5; +pub const RTM_RESOLVE: c_int = 0xb; +pub const RTM_NEWADDR: c_int = 0xc; +pub const RTM_DELADDR: c_int = 0xd; +pub const RTM_IFINFO: c_int = 0xe; +pub const RTM_IFANNOUNCE: c_int = 0xf; +pub const RTM_DESYNC: c_int = 0x10; +pub const RTM_INVALIDATE: c_int = 0x11; +pub const RTM_BFD: c_int = 0x12; +pub const RTM_PROPOSAL: c_int = 0x13; +pub const RTM_CHGADDRATTR: c_int = 0x14; +pub const RTM_80211INFO: c_int = 0x15; +pub const RTM_SOURCE: c_int = 0x16; + +pub const RTA_SRC: c_int = 0x100; +pub const RTA_SRCMASK: c_int = 0x200; +pub const RTA_LABEL: c_int = 0x400; +pub const RTA_BFD: c_int = 0x800; +pub const RTA_DNS: c_int = 0x1000; +pub const RTA_STATIC: c_int = 0x2000; +pub const RTA_SEARCH: c_int = 0x4000; + +pub const RTAX_SRC: c_int = 8; +pub const RTAX_SRCMASK: c_int = 9; +pub const RTAX_LABEL: c_int = 10; +pub const RTAX_BFD: c_int = 11; +pub const RTAX_DNS: c_int = 12; +pub const RTAX_STATIC: c_int = 13; +pub const RTAX_SEARCH: c_int = 14; +pub const RTAX_MAX: c_int = 15; + +const fn _ALIGN(p: usize) -> usize { + (p + _ALIGNBYTES) & !_ALIGNBYTES +} + +f! { + pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { + (cmsg as *mut c_uchar).offset(_ALIGN(size_of::()) as isize) + } + + pub const fn CMSG_LEN(length: c_uint) -> c_uint { + _ALIGN(size_of::()) as c_uint + length + } + + pub fn CMSG_NXTHDR(mhdr: *const crate::msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + if cmsg.is_null() { + return crate::CMSG_FIRSTHDR(mhdr); + } + let next = cmsg as usize + _ALIGN((*cmsg).cmsg_len as usize) + _ALIGN(size_of::()); + let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; + if next > max { + core::ptr::null_mut::() + } else { + (cmsg as usize + _ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr + } + } + + pub const fn CMSG_SPACE(length: c_uint) -> c_uint { + (_ALIGN(size_of::()) + _ALIGN(length as usize)) as c_uint + } +} + +safe_f! { + pub const fn WSTOPSIG(status: c_int) -> c_int { + status >> 8 + } + + pub const fn WIFSIGNALED(status: c_int) -> bool { + (status & 0o177) != 0o177 && (status & 0o177) != 0 + } + + pub const fn WIFSTOPPED(status: c_int) -> bool { + (status & 0xff) == 0o177 + } + + pub const fn WIFCONTINUED(status: c_int) -> bool { + (status & 0o177777) == 0o177777 + } + + pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { + let major = major as crate::dev_t; + let minor = minor as crate::dev_t; + let mut dev = 0; + dev |= (major & 0xff) << 8; + dev |= minor & 0xff; + dev |= (minor & 0xffff00) << 8; + dev + } + + pub const fn major(dev: crate::dev_t) -> c_uint { + ((dev as c_uint) >> 8) & 0xff + } + + pub const fn minor(dev: crate::dev_t) -> c_uint { + let dev = dev as c_uint; + let mut res = 0; + res |= (dev) & 0xff; + res |= ((dev) & 0xffff0000) >> 8; + res + } +} + +extern "C" { + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut crate::timezone) -> c_int; + pub fn settimeofday(tp: *const crate::timeval, tz: *const crate::timezone) -> c_int; + pub fn pledge(promises: *const c_char, execpromises: *const c_char) -> c_int; + pub fn unveil(path: *const c_char, permissions: *const c_char) -> c_int; + pub fn strtonum( + nptr: *const c_char, + minval: c_longlong, + maxval: c_longlong, + errstr: *mut *const c_char, + ) -> c_longlong; + pub fn dup3(src: c_int, dst: c_int, flags: c_int) -> c_int; + pub fn chflags(path: *const c_char, flags: c_uint) -> c_int; + pub fn fchflags(fd: c_int, flags: c_uint) -> c_int; + pub fn chflagsat(fd: c_int, path: *const c_char, flags: c_uint, atflag: c_int) -> c_int; + pub fn dirfd(dirp: *mut crate::DIR) -> c_int; + pub fn getnameinfo( + sa: *const crate::sockaddr, + salen: crate::socklen_t, + host: *mut c_char, + hostlen: size_t, + serv: *mut c_char, + servlen: size_t, + flags: c_int, + ) -> c_int; + pub fn getresgid( + rgid: *mut crate::gid_t, + egid: *mut crate::gid_t, + sgid: *mut crate::gid_t, + ) -> c_int; + pub fn getresuid( + ruid: *mut crate::uid_t, + euid: *mut crate::uid_t, + suid: *mut crate::uid_t, + ) -> c_int; + pub fn kevent( + kq: c_int, + changelist: *const crate::kevent, + nchanges: c_int, + eventlist: *mut crate::kevent, + nevents: c_int, + timeout: *const crate::timespec, + ) -> c_int; + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn getthrid() -> crate::pid_t; + pub fn pthread_attr_getguardsize( + attr: *const crate::pthread_attr_t, + guardsize: *mut size_t, + ) -> c_int; + pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; + pub fn pthread_attr_getstack( + attr: *const crate::pthread_attr_t, + stackaddr: *mut *mut c_void, + stacksize: *mut size_t, + ) -> c_int; + pub fn pthread_main_np() -> c_int; + pub fn pthread_get_name_np(tid: crate::pthread_t, name: *mut c_char, len: size_t); + pub fn pthread_set_name_np(tid: crate::pthread_t, name: *const c_char); + pub fn pthread_stackseg_np(thread: crate::pthread_t, sinfo: *mut crate::stack_t) -> c_int; + + pub fn openpty( + amaster: *mut c_int, + aslave: *mut c_int, + name: *mut c_char, + termp: *const crate::termios, + winp: *const crate::winsize, + ) -> c_int; + pub fn forkpty( + amaster: *mut c_int, + name: *mut c_char, + termp: *const crate::termios, + winp: *const crate::winsize, + ) -> crate::pid_t; + + pub fn sysctl( + name: *const c_int, + namelen: c_uint, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *mut c_void, + newlen: size_t, + ) -> c_int; + pub fn setresgid(rgid: crate::gid_t, egid: crate::gid_t, sgid: crate::gid_t) -> c_int; + pub fn setresuid(ruid: crate::uid_t, euid: crate::uid_t, suid: crate::uid_t) -> c_int; + pub fn ptrace(request: c_int, pid: crate::pid_t, addr: caddr_t, data: c_int) -> c_int; + pub fn utrace(label: *const c_char, addr: *const c_void, len: size_t) -> c_int; + pub fn memmem( + haystack: *const c_void, + haystacklen: size_t, + needle: *const c_void, + needlelen: size_t, + ) -> *mut c_void; + // #include + pub fn dl_iterate_phdr( + callback: Option< + unsafe extern "C" fn(info: *mut dl_phdr_info, size: usize, data: *mut c_void) -> c_int, + >, + data: *mut c_void, + ) -> c_int; + pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; + pub fn freelocale(loc: crate::locale_t); + pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; + pub fn duplocale(base: crate::locale_t) -> crate::locale_t; + + // Added in `OpenBSD` 5.5 + pub fn explicit_bzero(s: *mut c_void, len: size_t); + + pub fn setproctitle(fmt: *const c_char, ...); + + pub fn freezero(ptr: *mut c_void, size: size_t); + pub fn malloc_conceal(size: size_t) -> *mut c_void; + pub fn calloc_conceal(nmemb: size_t, size: size_t) -> *mut c_void; + + pub fn srand48_deterministic(seed: c_long); + pub fn seed48_deterministic(xseed: *mut c_ushort) -> *mut c_ushort; + pub fn lcong48_deterministic(p: *mut c_ushort); + + pub fn lsearch( + key: *const c_void, + base: *mut c_void, + nelp: *mut size_t, + width: size_t, + compar: Option c_int>, + ) -> *mut c_void; + pub fn lfind( + key: *const c_void, + base: *const c_void, + nelp: *mut size_t, + width: size_t, + compar: Option c_int>, + ) -> *mut c_void; + pub fn hcreate(nelt: size_t) -> c_int; + pub fn hdestroy(); + pub fn hsearch(entry: crate::ENTRY, action: crate::ACTION) -> *mut crate::ENTRY; + + // futex.h + pub fn futex( + uaddr: *mut u32, + op: c_int, + val: c_int, + timeout: *const crate::timespec, + uaddr2: *mut u32, + ) -> c_int; + + pub fn mimmutable(addr: *mut c_void, len: size_t) -> c_int; + + pub fn reboot(mode: c_int) -> c_int; + + pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; + pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; + pub fn getmntinfo(mntbufp: *mut *mut crate::statfs, flags: c_int) -> c_int; + pub fn getfsstat(buf: *mut statfs, bufsize: size_t, flags: c_int) -> c_int; + + pub fn elf_aux_info(aux: c_int, buf: *mut c_void, buflen: c_int) -> c_int; +} + +#[link(name = "execinfo")] +extern "C" { + pub fn backtrace(addrlist: *mut *mut c_void, len: size_t) -> size_t; + pub fn backtrace_symbols(addrlist: *const *mut c_void, len: size_t) -> *mut *mut c_char; + pub fn backtrace_symbols_fd(addrlist: *const *mut c_void, len: size_t, fd: c_int) -> c_int; + pub fn backtrace_symbols_fmt( + addrlist: *const *mut c_void, + len: size_t, + fmt: *const c_char, + ) -> *mut *mut c_char; +} + +cfg_if! { + if #[cfg(target_arch = "aarch64")] { + mod aarch64; + pub use self::aarch64::*; + } else if #[cfg(target_arch = "arm")] { + mod arm; + pub use self::arm::*; + } else if #[cfg(target_arch = "mips64")] { + mod mips64; + pub use self::mips64::*; + } else if #[cfg(target_arch = "powerpc")] { + mod powerpc; + pub use self::powerpc::*; + } else if #[cfg(target_arch = "powerpc64")] { + mod powerpc64; + pub use self::powerpc64::*; + } else if #[cfg(target_arch = "riscv64")] { + mod riscv64; + pub use self::riscv64::*; + } else if #[cfg(target_arch = "sparc64")] { + mod sparc64; + pub use self::sparc64::*; + } else if #[cfg(target_arch = "x86")] { + mod x86; + pub use self::x86::*; + } else if #[cfg(target_arch = "x86_64")] { + mod x86_64; + pub use self::x86_64::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc.rs new file mode 100644 index 00000000000000..8b3f72139d86e9 --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc.rs @@ -0,0 +1,5 @@ +use crate::prelude::*; + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const _MAX_PAGE_SHIFT: u32 = 12; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc64.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc64.rs new file mode 100644 index 00000000000000..5ebe85741454ee --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc64.rs @@ -0,0 +1,5 @@ +use crate::prelude::*; + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const _MAX_PAGE_SHIFT: u32 = 12; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/riscv64.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/riscv64.rs new file mode 100644 index 00000000000000..3545763d12c540 --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/riscv64.rs @@ -0,0 +1,25 @@ +use crate::prelude::*; + +pub type ucontext_t = sigcontext; + +s! { + pub struct sigcontext { + __sc_unused: c_int, + pub sc_mask: c_int, + pub sc_ra: c_long, + pub sc_sp: c_long, + pub sc_gp: c_long, + pub sc_tp: c_long, + pub sc_t: [c_long; 7], + pub sc_s: [c_long; 12], + pub sc_a: [c_long; 8], + pub sc_sepc: c_long, + pub sc_f: [c_long; 32], + pub sc_fcsr: c_long, + pub sc_cookie: c_long, + } +} + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const _MAX_PAGE_SHIFT: u32 = 12; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/sparc64.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/sparc64.rs new file mode 100644 index 00000000000000..88481f4f014e81 --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/sparc64.rs @@ -0,0 +1,4 @@ +#[doc(hidden)] +pub const _ALIGNBYTES: usize = 0xf; + +pub const _MAX_PAGE_SHIFT: u32 = 13; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86.rs new file mode 100644 index 00000000000000..97dc58327d2226 --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86.rs @@ -0,0 +1,5 @@ +use crate::prelude::*; + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const _MAX_PAGE_SHIFT: u32 = 12; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86_64.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86_64.rs new file mode 100644 index 00000000000000..984570c3870130 --- /dev/null +++ b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86_64.rs @@ -0,0 +1,109 @@ +use crate::prelude::*; +use crate::PT_FIRSTMACH; + +pub type ucontext_t = sigcontext; + +s! { + pub struct sigcontext { + pub sc_rdi: c_long, + pub sc_rsi: c_long, + pub sc_rdx: c_long, + pub sc_rcx: c_long, + pub sc_r8: c_long, + pub sc_r9: c_long, + pub sc_r10: c_long, + pub sc_r11: c_long, + pub sc_r12: c_long, + pub sc_r13: c_long, + pub sc_r14: c_long, + pub sc_r15: c_long, + pub sc_rbp: c_long, + pub sc_rbx: c_long, + pub sc_rax: c_long, + pub sc_gs: c_long, + pub sc_fs: c_long, + pub sc_es: c_long, + pub sc_ds: c_long, + pub sc_trapno: c_long, + pub sc_err: c_long, + pub sc_rip: c_long, + pub sc_cs: c_long, + pub sc_rflags: c_long, + pub sc_rsp: c_long, + pub sc_ss: c_long, + pub sc_fpstate: *mut fxsave64, + __sc_unused: c_int, + pub sc_mask: c_int, + pub sc_cookie: c_long, + } +} + +s_no_extra_traits! { + #[repr(packed)] + pub struct fxsave64 { + pub fx_fcw: u16, + pub fx_fsw: u16, + pub fx_ftw: u8, + __fx_unused1: u8, + pub fx_fop: u16, + pub fx_rip: u64, + pub fx_rdp: u64, + pub fx_mxcsr: u32, + pub fx_mxcsr_mask: u32, + pub fx_st: [[u64; 2]; 8], + pub fx_xmm: [[u64; 2]; 16], + __fx_unused3: [u8; 96], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + // `fxsave64` is packed, so field access is unaligned. + // use {x} to create temporary storage, copy field to it, and do aligned access. + impl PartialEq for fxsave64 { + fn eq(&self, other: &fxsave64) -> bool { + return { self.fx_fcw } == { other.fx_fcw } + && { self.fx_fsw } == { other.fx_fsw } + && { self.fx_ftw } == { other.fx_ftw } + && { self.fx_fop } == { other.fx_fop } + && { self.fx_rip } == { other.fx_rip } + && { self.fx_rdp } == { other.fx_rdp } + && { self.fx_mxcsr } == { other.fx_mxcsr } + && { self.fx_mxcsr_mask } == { other.fx_mxcsr_mask } + && { self.fx_st } + .iter() + .zip({ other.fx_st }.iter()) + .all(|(a, b)| a == b) + && { self.fx_xmm } + .iter() + .zip({ other.fx_xmm }.iter()) + .all(|(a, b)| a == b); + } + } + impl Eq for fxsave64 {} + impl hash::Hash for fxsave64 { + fn hash(&self, state: &mut H) { + { self.fx_fcw }.hash(state); + { self.fx_fsw }.hash(state); + { self.fx_ftw }.hash(state); + { self.fx_fop }.hash(state); + { self.fx_rip }.hash(state); + { self.fx_rdp }.hash(state); + { self.fx_mxcsr }.hash(state); + { self.fx_mxcsr_mask }.hash(state); + { self.fx_st }.hash(state); + { self.fx_xmm }.hash(state); + } + } + } +} + +pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; + +pub const _MAX_PAGE_SHIFT: u32 = 12; + +pub const PT_STEP: c_int = PT_FIRSTMACH + 0; +pub const PT_GETREGS: c_int = PT_FIRSTMACH + 1; +pub const PT_SETREGS: c_int = PT_FIRSTMACH + 2; +pub const PT_GETFPREGS: c_int = PT_FIRSTMACH + 3; +pub const PT_SETFPREGS: c_int = PT_FIRSTMACH + 4; diff --git a/vendor/libc/src/unix/cygwin/mod.rs b/vendor/libc/src/unix/cygwin/mod.rs new file mode 100644 index 00000000000000..12e30f3f9016c5 --- /dev/null +++ b/vendor/libc/src/unix/cygwin/mod.rs @@ -0,0 +1,2477 @@ +use crate::prelude::*; +use crate::*; + +pub type wchar_t = c_ushort; + +pub type blkcnt_t = i64; +pub type blksize_t = i32; +pub type dev_t = u32; +pub type fsblkcnt_t = c_ulong; +pub type fsfilcnt_t = c_ulong; +pub type ino_t = u64; +pub type key_t = c_longlong; +pub type sa_family_t = u16; +pub type socklen_t = c_int; + +pub type off_t = c_long; +pub type id_t = u32; +pub type mode_t = u32; +pub type _off64_t = c_longlong; +pub type loff_t = _off64_t; +pub type iconv_t = *mut c_void; +pub type clock_t = c_ulong; +pub type time_t = c_long; +pub type clockid_t = c_ulong; +pub type timer_t = c_ulong; +pub type nl_item = c_int; +pub type nlink_t = c_ushort; +pub type suseconds_t = c_long; +pub type useconds_t = c_ulong; + +#[derive(Debug)] +pub enum timezone {} +impl Copy for timezone {} +impl Clone for timezone { + fn clone(&self) -> timezone { + *self + } +} + +pub type sigset_t = c_ulong; + +pub type fd_mask = c_ulong; + +pub type pthread_t = *mut c_void; +pub type pthread_mutex_t = *mut c_void; + +// Must be usize due to libstd/sys_common/thread_local.rs, +// should technically be *mut c_void +pub type pthread_key_t = usize; + +pub type pthread_attr_t = *mut c_void; +pub type pthread_mutexattr_t = *mut c_void; +pub type pthread_condattr_t = *mut c_void; +pub type pthread_cond_t = *mut c_void; + +// The following ones should be *mut c_void +pub type pthread_barrierattr_t = usize; +pub type pthread_barrier_t = usize; +pub type pthread_spinlock_t = usize; + +pub type pthread_rwlock_t = *mut c_void; +pub type pthread_rwlockattr_t = *mut c_void; + +pub type register_t = intptr_t; +pub type u_char = c_uchar; +pub type u_short = c_ushort; +pub type u_long = c_ulong; +pub type u_int = c_uint; +pub type caddr_t = *mut c_char; +pub type vm_size_t = c_ulong; + +pub type rlim_t = c_ulong; + +pub type nfds_t = c_uint; + +pub type sem_t = *mut sem; + +#[derive(Debug)] +pub enum sem {} +impl Copy for sem {} +impl Clone for sem { + fn clone(&self) -> sem { + *self + } +} + +pub type tcflag_t = c_uint; +pub type speed_t = c_uint; + +pub type vm_offset_t = c_ulong; + +pub type posix_spawn_file_actions_t = *mut c_void; +pub type posix_spawnattr_t = *mut c_void; + +s! { + pub struct itimerspec { + pub it_interval: timespec, + pub it_value: timespec, + } + + pub struct cpu_set_t { + bits: [u64; 16], + } + + pub struct sigaction { + pub sa_sigaction: sighandler_t, + pub sa_mask: sigset_t, + pub sa_flags: c_int, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + pub tm_gmtoff: c_long, + pub tm_zone: *const c_char, + } + + pub struct bintime { + pub sec: time_t, + pub frac: u64, + } + + pub struct passwd { + pub pw_name: *mut c_char, + pub pw_passwd: *mut c_char, + pub pw_uid: uid_t, + pub pw_gid: gid_t, + pub pw_comment: *mut c_char, + pub pw_gecos: *mut c_char, + pub pw_dir: *mut c_char, + pub pw_shell: *mut c_char, + } + + pub struct if_nameindex { + pub if_index: c_uint, + pub if_name: *mut c_char, + } + + pub struct ucred { + pub pid: pid_t, + pub uid: uid_t, + pub gid: gid_t, + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: socklen_t, + pub msg_iov: *mut iovec, + pub msg_iovlen: c_int, + pub msg_control: *mut c_void, + pub msg_controllen: socklen_t, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + pub cmsg_len: size_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct Dl_info { + pub dli_fname: [c_char; PATH_MAX as usize], + pub dli_fbase: *mut c_void, + pub dli_sname: *const c_char, + pub dli_saddr: *mut c_void, + } + + pub struct in6_pktinfo { + pub ipi6_addr: in6_addr, + pub ipi6_ifindex: u32, + } + + pub struct sockaddr_in6 { + pub sin6_family: sa_family_t, + pub sin6_port: in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: in6_addr, + pub sin6_scope_id: u32, + } + + pub struct ip_mreq_source { + pub imr_multiaddr: in_addr, + pub imr_sourceaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: socklen_t, + pub ai_canonname: *mut c_char, + pub ai_addr: *mut sockaddr, + pub ai_next: *mut addrinfo, + } + + pub struct lconv { + pub decimal_point: *mut c_char, + pub thousands_sep: *mut c_char, + pub grouping: *mut c_char, + pub int_curr_symbol: *mut c_char, + pub currency_symbol: *mut c_char, + pub mon_decimal_point: *mut c_char, + pub mon_thousands_sep: *mut c_char, + pub mon_grouping: *mut c_char, + pub positive_sign: *mut c_char, + pub negative_sign: *mut c_char, + pub int_frac_digits: c_char, + pub frac_digits: c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub p_sign_posn: c_char, + pub n_sign_posn: c_char, + pub int_n_cs_precedes: c_char, + pub int_n_sep_by_space: c_char, + pub int_n_sign_posn: c_char, + pub int_p_cs_precedes: c_char, + pub int_p_sep_by_space: c_char, + pub int_p_sign_posn: c_char, + } + + pub struct termios { + pub c_iflag: tcflag_t, + pub c_oflag: tcflag_t, + pub c_cflag: tcflag_t, + pub c_lflag: tcflag_t, + pub c_line: c_char, + pub c_cc: [cc_t; NCCS], + pub c_ispeed: speed_t, + pub c_ospeed: speed_t, + } + + pub struct sched_param { + pub sched_priority: c_int, + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: pid_t, + } + + pub struct hostent { + pub h_name: *const c_char, + pub h_aliases: *mut *mut c_char, + pub h_addrtype: c_short, + pub h_length: c_short, + pub h_addr_list: *mut *mut c_char, + } + + pub struct linger { + pub l_onoff: c_ushort, + pub l_linger: c_ushort, + } + + pub struct fd_set { + fds_bits: [fd_mask; FD_SETSIZE / size_of::() / 8], + } + + pub struct _uc_fpxreg { + pub significand: [u16; 4], + pub exponent: u16, + pub padding: [u16; 3], + } + + pub struct _uc_xmmreg { + pub element: [u32; 4], + } + + pub struct _fpstate { + pub cwd: u16, + pub swd: u16, + pub ftw: u16, + pub fop: u16, + pub rip: u64, + pub rdp: u64, + pub mxcsr: u32, + pub mxcr_mask: u32, + pub st: [_uc_fpxreg; 8], + pub xmm: [_uc_xmmreg; 16], + pub padding: [u32; 24], + } + + #[repr(align(16))] + pub struct mcontext_t { + pub p1home: u64, + pub p2home: u64, + pub p3home: u64, + pub p4home: u64, + pub p5home: u64, + pub p6home: u64, + pub ctxflags: u32, + pub mxcsr: u32, + pub cs: u16, + pub ds: u16, + pub es: u16, + pub fs: u16, + pub gs: u16, + pub ss: u16, + pub eflags: u32, + pub dr0: u64, + pub dr1: u64, + pub dr2: u64, + pub dr3: u64, + pub dr6: u64, + pub dr7: u64, + pub rax: u64, + pub rcx: u64, + pub rdx: u64, + pub rbx: u64, + pub rsp: u64, + pub rbp: u64, + pub rsi: u64, + pub rdi: u64, + pub r8: u64, + pub r9: u64, + pub r10: u64, + pub r11: u64, + pub r12: u64, + pub r13: u64, + pub r14: u64, + pub r15: u64, + pub rip: u64, + pub fpregs: _fpstate, + pub vregs: [u64; 52], + pub vcx: u64, + pub dbc: u64, + pub btr: u64, + pub bfr: u64, + pub etr: u64, + pub efr: u64, + pub oldmask: u64, + pub cr2: u64, + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigevent { + pub sigev_value: sigval, + pub sigev_signo: c_int, + pub sigev_notify: c_int, + pub sigev_notify_function: Option, + pub sigev_notify_attributes: *mut pthread_attr_t, + } + + #[repr(align(8))] + pub struct ucontext_t { + pub uc_mcontext: mcontext_t, + pub uc_link: *mut ucontext_t, + pub uc_sigmask: sigset_t, + pub uc_stack: stack_t, + pub uc_flags: c_ulong, + } + + pub struct sockaddr { + pub sa_family: sa_family_t, + pub sa_data: [c_char; 14], + } + + pub struct sockaddr_storage { + pub ss_family: sa_family_t, + __ss_pad1: [c_char; 6], + __ss_align: i64, + __ss_pad2: [c_char; 112], + } + + pub struct stat { + pub st_dev: dev_t, + pub st_ino: ino_t, + pub st_mode: mode_t, + pub st_nlink: nlink_t, + pub st_uid: uid_t, + pub st_gid: gid_t, + pub st_rdev: dev_t, + pub st_size: off_t, + pub st_atime: time_t, + pub st_atime_nsec: c_long, + pub st_mtime: time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: blksize_t, + pub st_blocks: blkcnt_t, + pub st_birthtime: time_t, + pub st_birthtime_nsec: c_long, + } + + pub struct in_addr { + pub s_addr: in_addr_t, + } + + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct in_pktinfo { + pub ipi_addr: in_addr, + pub ipi_ifindex: u32, + } + + pub struct sockaddr_in { + pub sin_family: sa_family_t, + pub sin_port: in_port_t, + pub sin_addr: in_addr, + pub sin_zero: [u8; 8], + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: fsblkcnt_t, + pub f_bfree: fsblkcnt_t, + pub f_bavail: fsblkcnt_t, + pub f_files: fsfilcnt_t, + pub f_ffree: fsfilcnt_t, + pub f_favail: fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + } + + pub struct statfs { + pub f_type: c_long, + pub f_bsize: c_long, + pub f_blocks: c_long, + pub f_bfree: c_long, + pub f_bavail: c_long, + pub f_files: c_long, + pub f_ffree: c_long, + pub f_fsid: c_long, + pub f_namelen: c_long, + pub f_spare: [c_long; 6], + } +} + +s_no_extra_traits! { + #[repr(align(16))] + pub struct max_align_t { + priv_: [f64; 4], + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_code: c_int, + pub si_pid: pid_t, + pub si_uid: uid_t, + pub si_errno: c_int, + __pad: [u32; 32], + } + + pub union __c_anonymous_ifr_ifru { + pub ifru_addr: sockaddr, + pub ifru_broadaddr: sockaddr, + pub ifru_dstaddr: sockaddr, + pub ifru_netmask: sockaddr, + pub ifru_hwaddr: sockaddr, + pub ifru_flags: c_int, + pub ifru_metric: c_int, + pub ifru_mtu: c_int, + pub ifru_ifindex: c_int, + pub ifru_data: *mut c_char, + __ifru_pad: [c_char; 28], + } + + pub struct ifreq { + /// if name, e.g. "en0" + pub ifr_name: [c_char; IFNAMSIZ], + pub ifr_ifru: __c_anonymous_ifr_ifru, + } + + pub union __c_anonymous_ifc_ifcu { + pub ifcu_buf: caddr_t, + pub ifcu_req: *mut ifreq, + } + + pub struct ifconf { + pub ifc_len: c_int, + pub ifc_ifcu: __c_anonymous_ifc_ifcu, + } + + pub struct dirent { + __d_version: u32, + pub d_ino: ino_t, + pub d_type: c_uchar, + __d_unused1: [c_uchar; 3], + __d_internal1: u32, + pub d_name: [c_char; 256], + } + + pub struct sockaddr_un { + pub sun_family: sa_family_t, + pub sun_path: [c_char; 108], + } + + pub struct utsname { + pub sysname: [c_char; 66], + pub nodename: [c_char; 65], + pub release: [c_char; 65], + pub version: [c_char; 65], + pub machine: [c_char; 65], + pub domainname: [c_char; 65], + } +} + +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut c_void { + #[repr(C)] + struct siginfo_si_addr { + _si_signo: c_int, + _si_code: c_int, + _si_pid: pid_t, + _si_uid: uid_t, + _si_errno: c_int, + si_addr: *mut c_void, + } + (*(self as *const siginfo_t as *const siginfo_si_addr)).si_addr + } + + pub unsafe fn si_status(&self) -> c_int { + #[repr(C)] + struct siginfo_sigchld { + _si_signo: c_int, + _si_code: c_int, + _si_pid: pid_t, + _si_uid: uid_t, + _si_errno: c_int, + si_status: c_int, + } + (*(self as *const siginfo_t as *const siginfo_sigchld)).si_status + } + + pub unsafe fn si_pid(&self) -> pid_t { + self.si_pid + } + + pub unsafe fn si_uid(&self) -> uid_t { + self.si_uid + } + + pub unsafe fn si_value(&self) -> sigval { + #[repr(C)] + struct siginfo_si_value { + _si_signo: c_int, + _si_code: c_int, + _si_pid: pid_t, + _si_uid: uid_t, + _si_errno: c_int, + si_value: sigval, + } + (*(self as *const siginfo_t as *const siginfo_si_value)).si_value + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for siginfo_t { + fn eq(&self, other: &siginfo_t) -> bool { + self.si_signo == other.si_signo + && self.si_code == other.si_code + && self.si_pid == other.si_pid + && self.si_uid == other.si_uid + && self.si_errno == other.si_errno + } + } + + impl Eq for siginfo_t {} + + impl hash::Hash for siginfo_t { + fn hash(&self, state: &mut H) { + self.si_signo.hash(state); + self.si_code.hash(state); + self.si_pid.hash(state); + self.si_uid.hash(state); + self.si_errno.hash(state); + // Ignore __pad + } + } + + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_ino == other.d_ino + && self.d_type == other.d_type + && self + .d_name + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for dirent {} + + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_ino.hash(state); + self.d_type.hash(state); + self.d_name.hash(state); + } + } + + impl PartialEq for sockaddr_un { + fn eq(&self, other: &sockaddr_un) -> bool { + self.sun_family == other.sun_family + && self + .sun_path + .iter() + .zip(other.sun_path.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for sockaddr_un {} + + impl hash::Hash for sockaddr_un { + fn hash(&self, state: &mut H) { + self.sun_family.hash(state); + self.sun_path.hash(state); + } + } + + impl PartialEq for utsname { + fn eq(&self, other: &utsname) -> bool { + self.sysname + .iter() + .zip(other.sysname.iter()) + .all(|(a, b)| a == b) + && self + .nodename + .iter() + .zip(other.nodename.iter()) + .all(|(a, b)| a == b) + && self + .release + .iter() + .zip(other.release.iter()) + .all(|(a, b)| a == b) + && self + .version + .iter() + .zip(other.version.iter()) + .all(|(a, b)| a == b) + && self + .machine + .iter() + .zip(other.machine.iter()) + .all(|(a, b)| a == b) + && self + .domainname + .iter() + .zip(other.domainname.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for utsname {} + + impl hash::Hash for utsname { + fn hash(&self, state: &mut H) { + self.sysname.hash(state); + self.nodename.hash(state); + self.release.hash(state); + self.version.hash(state); + self.machine.hash(state); + self.domainname.hash(state); + } + } + } +} + +pub const FD_SETSIZE: usize = 1024; + +pub const CPU_SETSIZE: c_int = 0x400; + +// si_code values for SIGBUS signal +pub const BUS_ADRALN: c_int = 25; +pub const BUS_ADRERR: c_int = 26; +pub const BUS_OBJERR: c_int = 27; + +// si_code values for SIGCHLD signal +pub const CLD_EXITED: c_int = 28; +pub const CLD_KILLED: c_int = 29; +pub const CLD_DUMPED: c_int = 30; +pub const CLD_TRAPPED: c_int = 31; +pub const CLD_STOPPED: c_int = 32; +pub const CLD_CONTINUED: c_int = 33; + +pub const SIGEV_SIGNAL: c_int = 0; +pub const SIGEV_NONE: c_int = 1; +pub const SIGEV_THREAD: c_int = 2; + +pub const SA_NOCLDSTOP: c_int = 0x00000001; +pub const SA_NOCLDWAIT: c_int = 0; // FIXME: does not exist on Cygwin! +pub const SA_SIGINFO: c_int = 0x00000002; +pub const SA_RESTART: c_int = 0x10000000; +pub const SA_ONSTACK: c_int = 0x20000000; +pub const SA_NODEFER: c_int = 0x40000000; +pub const SA_RESETHAND: c_int = 0x80000000; +pub const MINSIGSTKSZ: size_t = 8192; +pub const SIGSTKSZ: size_t = 32768; +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGTRAP: c_int = 5; +pub const SIGABRT: c_int = 6; +pub const SIGEMT: c_int = 7; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGBUS: c_int = 10; +pub const SIGSEGV: c_int = 11; +pub const SIGSYS: c_int = 12; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; +pub const SIGURG: c_int = 16; +pub const SIGSTOP: c_int = 17; +pub const SIGTSTP: c_int = 18; +pub const SIGCONT: c_int = 19; +pub const SIGCHLD: c_int = 20; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGIO: c_int = 23; +pub const SIGPOLL: c_int = 23; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGPWR: c_int = 29; +pub const SIGUSR1: c_int = 30; +pub const SIGUSR2: c_int = 31; + +pub const SS_ONSTACK: c_int = 0x1; +pub const SS_DISABLE: c_int = 0x2; + +pub const SIG_SETMASK: c_int = 0; +pub const SIG_BLOCK: c_int = 1; +pub const SIG_UNBLOCK: c_int = 2; + +pub const TIMER_ABSTIME: c_int = 4; +pub const CLOCK_REALTIME_COARSE: clockid_t = 0; +pub const CLOCK_REALTIME: clockid_t = 1; +pub const CLOCK_PROCESS_CPUTIME_ID: clockid_t = 2; +pub const CLOCK_THREAD_CPUTIME_ID: clockid_t = 3; +pub const CLOCK_MONOTONIC: clockid_t = 4; +pub const CLOCK_MONOTONIC_RAW: clockid_t = 5; +pub const CLOCK_MONOTONIC_COARSE: clockid_t = 6; +pub const CLOCK_BOOTTIME: clockid_t = 7; +pub const CLOCK_REALTIME_ALARM: clockid_t = 8; +pub const CLOCK_BOOTTIME_ALARM: clockid_t = 9; + +pub const ITIMER_REAL: c_int = 0; +pub const ITIMER_VIRTUAL: c_int = 1; +pub const ITIMER_PROF: c_int = 2; + +pub const PRIO_PROCESS: c_int = 0; +pub const PRIO_PGRP: c_int = 1; +pub const PRIO_USER: c_int = 2; +pub const RLIMIT_CPU: c_int = 0; +pub const RLIMIT_FSIZE: c_int = 1; +pub const RLIMIT_DATA: c_int = 2; +pub const RLIMIT_STACK: c_int = 3; +pub const RLIMIT_CORE: c_int = 4; +pub const RLIMIT_NOFILE: c_int = 5; +pub const RLIMIT_AS: c_int = 6; +pub const RLIM_NLIMITS: c_int = 7; +pub const RLIMIT_NLIMITS: c_int = RLIM_NLIMITS; +pub const RLIM_INFINITY: rlim_t = !0; +pub const RLIM_SAVED_MAX: rlim_t = RLIM_INFINITY; +pub const RLIM_SAVED_CUR: rlim_t = RLIM_INFINITY; + +pub const RUSAGE_SELF: c_int = 0; +pub const RUSAGE_CHILDREN: c_int = -1; + +pub const IFF_UP: c_int = 0x1; // interface is up +pub const IFF_BROADCAST: c_int = 0x2; // broadcast address valid +pub const IFF_LOOPBACK: c_int = 0x8; // is a loopback net +pub const IFF_POINTOPOINT: c_int = 0x10; // interface is point-to-point link +pub const IFF_NOTRAILERS: c_int = 0x20; // avoid use of trailers +pub const IFF_RUNNING: c_int = 0x40; // resources allocated +pub const IFF_NOARP: c_int = 0x80; // no address resolution protocol +pub const IFF_PROMISC: c_int = 0x100; // receive all packets +pub const IFF_MULTICAST: c_int = 0x1000; // supports multicast +pub const IFF_LOWER_UP: c_int = 0x10000; // driver signals L1 up +pub const IFF_DORMANT: c_int = 0x20000; // driver signals dormant + +pub const IF_NAMESIZE: size_t = 44; +pub const IFNAMSIZ: size_t = IF_NAMESIZE; + +pub const FIONREAD: c_int = 0x4008667f; +pub const FIONBIO: c_int = 0x8004667e; +pub const FIOASYNC: c_int = 0x8008667d; +pub const FIOCLEX: c_int = 0; // FIXME: does not exist on Cygwin! +pub const SIOCGIFCONF: c_ulong = 0x80107364; +pub const SIOCGIFFLAGS: c_ulong = 0x80507365; +pub const SIOCGIFADDR: c_ulong = 0x80507366; +pub const SIOCGIFBRDADDR: c_ulong = 0x80507367; +pub const SIOCGIFNETMASK: c_ulong = 0x80507368; +pub const SIOCGIFHWADDR: c_ulong = 0x80507369; +pub const SIOCGIFMETRIC: c_ulong = 0x8050736a; +pub const SIOCGIFMTU: c_ulong = 0x8050736b; +pub const SIOCGIFINDEX: c_ulong = 0x8050736c; +pub const SIOGIFINDEX: c_ulong = SIOCGIFINDEX; +pub const SIOCGIFDSTADDR: c_ulong = 0x8050736e; +pub const SOL_SOCKET: c_int = 0xffff; +pub const SO_DEBUG: c_int = 1; +pub const SO_ACCEPTCONN: c_int = 0x0002; +pub const SO_REUSEADDR: c_int = 0x0004; +pub const SO_KEEPALIVE: c_int = 0x0008; +pub const SO_DONTROUTE: c_int = 0x0010; +pub const SO_BROADCAST: c_int = 0x0020; +pub const SO_USELOOPBACK: c_int = 0x0040; +pub const SO_LINGER: c_int = 0x0080; +pub const SO_OOBINLINE: c_int = 0x0100; +pub const SO_PEERCRED: c_int = 0x0200; +pub const SO_PASSCRED: c_int = 0x0400; +pub const SO_SNDBUF: c_int = 0x1001; +pub const SO_RCVBUF: c_int = 0x1002; +pub const SO_SNDLOWAT: c_int = 0x1003; +pub const SO_RCVLOWAT: c_int = 0x1004; +pub const SO_SNDTIMEO: c_int = 0x1005; +pub const SO_RCVTIMEO: c_int = 0x1006; +pub const SO_ERROR: c_int = 0x1007; +pub const SO_TYPE: c_int = 0x1008; + +pub const SCM_RIGHTS: c_int = 0x01; +pub const SCM_CREDENTIALS: c_int = 0x02; +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SOCK_RAW: c_int = 3; +pub const SOCK_RDM: c_int = 4; +pub const SOCK_SEQPACKET: c_int = 5; +pub const SOCK_NONBLOCK: c_int = 0x01000000; +pub const SOCK_CLOEXEC: c_int = 0x02000000; +pub const AF_UNSPEC: c_int = 0; +pub const AF_LOCAL: c_int = 1; +pub const AF_UNIX: c_int = AF_LOCAL; +pub const AF_INET: c_int = 2; +pub const AF_IMPLINK: c_int = 3; +pub const AF_PUP: c_int = 4; +pub const AF_CHAOS: c_int = 5; +pub const AF_NS: c_int = 6; +pub const AF_ISO: c_int = 7; +pub const AF_OSI: c_int = AF_ISO; +pub const AF_ECMA: c_int = 8; +pub const AF_DATAKIT: c_int = 9; +pub const AF_CCITT: c_int = 10; +pub const AF_SNA: c_int = 11; +pub const AF_DECnet: c_int = 12; +pub const AF_DLI: c_int = 13; +pub const AF_LAT: c_int = 14; +pub const AF_HYLINK: c_int = 15; +pub const AF_APPLETALK: c_int = 16; +pub const AF_NETBIOS: c_int = 17; +pub const AF_INET6: c_int = 23; +pub const PF_UNSPEC: c_int = AF_UNSPEC; +pub const PF_LOCAL: c_int = AF_LOCAL; +pub const PF_UNIX: c_int = PF_LOCAL; +pub const PF_INET: c_int = AF_INET; +pub const PF_IMPLINK: c_int = AF_IMPLINK; +pub const PF_PUP: c_int = AF_PUP; +pub const PF_CHAOS: c_int = AF_CHAOS; +pub const PF_NS: c_int = AF_NS; +pub const PF_ISO: c_int = AF_ISO; +pub const PF_OSI: c_int = AF_ISO; +pub const PF_DATAKIT: c_int = AF_DATAKIT; +pub const PF_CCITT: c_int = AF_CCITT; +pub const PF_SNA: c_int = AF_SNA; +pub const PF_DECnet: c_int = AF_DECnet; +pub const PF_DLI: c_int = AF_DLI; +pub const PF_LAT: c_int = AF_LAT; +pub const PF_HYLINK: c_int = AF_HYLINK; +pub const PF_APPLETALK: c_int = AF_APPLETALK; +pub const PF_NETBIOS: c_int = AF_NETBIOS; +pub const PF_INET6: c_int = AF_INET6; +pub const SOMAXCONN: c_int = 0x7fffffff; +pub const MSG_OOB: c_int = 0x1; +pub const MSG_PEEK: c_int = 0x2; +pub const MSG_DONTROUTE: c_int = 0x4; +pub const MSG_WAITALL: c_int = 0x8; +pub const MSG_DONTWAIT: c_int = 0x10; +pub const MSG_NOSIGNAL: c_int = 0x20; +pub const MSG_TRUNC: c_int = 0x0100; +pub const MSG_CTRUNC: c_int = 0x0200; +pub const MSG_BCAST: c_int = 0x0400; +pub const MSG_MCAST: c_int = 0x0800; +pub const MSG_CMSG_CLOEXEC: c_int = 0x1000; +pub const MSG_EOR: c_int = 0x8000; +pub const SOL_IP: c_int = 0; +pub const SOL_IPV6: c_int = 41; +pub const SOL_TCP: c_int = 6; +pub const SOL_UDP: c_int = 17; +pub const IPTOS_LOWDELAY: u8 = 0x10; +pub const IPTOS_THROUGHPUT: u8 = 0x08; +pub const IPTOS_RELIABILITY: u8 = 0x04; +pub const IPTOS_LOWCOST: u8 = 0x02; +pub const IPTOS_MINCOST: u8 = IPTOS_LOWCOST; +pub const IP_DEFAULT_MULTICAST_TTL: c_int = 1; +pub const IP_DEFAULT_MULTICAST_LOOP: c_int = 1; +pub const IP_OPTIONS: c_int = 1; +pub const IP_HDRINCL: c_int = 2; +pub const IP_TOS: c_int = 3; +pub const IP_TTL: c_int = 4; +pub const IP_MULTICAST_IF: c_int = 9; +pub const IP_MULTICAST_TTL: c_int = 10; +pub const IP_MULTICAST_LOOP: c_int = 11; +pub const IP_ADD_MEMBERSHIP: c_int = 12; +pub const IP_DROP_MEMBERSHIP: c_int = 13; +pub const IP_ADD_SOURCE_MEMBERSHIP: c_int = 15; +pub const IP_DROP_SOURCE_MEMBERSHIP: c_int = 16; +pub const IP_BLOCK_SOURCE: c_int = 17; +pub const IP_UNBLOCK_SOURCE: c_int = 18; +pub const IP_PKTINFO: c_int = 19; +pub const IP_RECVTTL: c_int = 21; +pub const IP_UNICAST_IF: c_int = 31; +pub const IP_RECVTOS: c_int = 40; +pub const IP_MTU_DISCOVER: c_int = 71; +pub const IP_MTU: c_int = 73; +pub const IP_RECVERR: c_int = 75; +pub const IP_PMTUDISC_WANT: c_int = 0; +pub const IP_PMTUDISC_DO: c_int = 1; +pub const IP_PMTUDISC_DONT: c_int = 2; +pub const IP_PMTUDISC_PROBE: c_int = 3; +pub const IPV6_HOPOPTS: c_int = 1; +pub const IPV6_HDRINCL: c_int = 2; +pub const IPV6_UNICAST_HOPS: c_int = 4; +pub const IPV6_MULTICAST_IF: c_int = 9; +pub const IPV6_MULTICAST_HOPS: c_int = 10; +pub const IPV6_MULTICAST_LOOP: c_int = 11; +pub const IPV6_ADD_MEMBERSHIP: c_int = 12; +pub const IPV6_DROP_MEMBERSHIP: c_int = 13; +pub const IPV6_JOIN_GROUP: c_int = 12; +pub const IPV6_LEAVE_GROUP: c_int = 13; +pub const IPV6_DONTFRAG: c_int = 14; +pub const IPV6_PKTINFO: c_int = 19; +pub const IPV6_HOPLIMIT: c_int = 21; +pub const IPV6_CHECKSUM: c_int = 26; +pub const IPV6_V6ONLY: c_int = 27; +pub const IPV6_UNICAST_IF: c_int = 31; +pub const IPV6_RTHDR: c_int = 32; +pub const IPV6_RECVRTHDR: c_int = 38; +pub const IPV6_TCLASS: c_int = 39; +pub const IPV6_RECVTCLASS: c_int = 40; +pub const IPV6_MTU_DISCOVER: c_int = 71; +pub const IPV6_MTU: c_int = 72; +pub const IPV6_RECVERR: c_int = 75; +pub const IPV6_PMTUDISC_WANT: c_int = 0; +pub const IPV6_PMTUDISC_DO: c_int = 1; +pub const IPV6_PMTUDISC_DONT: c_int = 2; +pub const IPV6_PMTUDISC_PROBE: c_int = 3; +pub const MCAST_JOIN_GROUP: c_int = 41; +pub const MCAST_LEAVE_GROUP: c_int = 42; +pub const MCAST_BLOCK_SOURCE: c_int = 43; +pub const MCAST_UNBLOCK_SOURCE: c_int = 44; +pub const MCAST_JOIN_SOURCE_GROUP: c_int = 45; +pub const MCAST_LEAVE_SOURCE_GROUP: c_int = 46; +pub const MCAST_INCLUDE: c_int = 0; +pub const MCAST_EXCLUDE: c_int = 1; +pub const SHUT_RD: c_int = 0; +pub const SHUT_WR: c_int = 1; +pub const SHUT_RDWR: c_int = 2; + +pub const S_BLKSIZE: mode_t = 1024; +pub const S_IREAD: mode_t = 256; +pub const S_IWRITE: mode_t = 128; +pub const S_IEXEC: mode_t = 64; +pub const S_ENFMT: mode_t = 1024; +pub const S_IFMT: mode_t = 61440; +pub const S_IFDIR: mode_t = 16384; +pub const S_IFCHR: mode_t = 8192; +pub const S_IFBLK: mode_t = 24576; +pub const S_IFREG: mode_t = 32768; +pub const S_IFLNK: mode_t = 40960; +pub const S_IFSOCK: mode_t = 49152; +pub const S_IFIFO: mode_t = 4096; +pub const S_IRWXU: mode_t = 448; +pub const S_IRUSR: mode_t = 256; +pub const S_IWUSR: mode_t = 128; +pub const S_IXUSR: mode_t = 64; +pub const S_IRWXG: mode_t = 56; +pub const S_IRGRP: mode_t = 32; +pub const S_IWGRP: mode_t = 16; +pub const S_IXGRP: mode_t = 8; +pub const S_IRWXO: mode_t = 7; +pub const S_IROTH: mode_t = 4; +pub const S_IWOTH: mode_t = 2; +pub const S_IXOTH: mode_t = 1; +pub const UTIME_NOW: c_long = -2; +pub const UTIME_OMIT: c_long = -1; + +pub const ARG_MAX: c_int = 32000; +pub const CHILD_MAX: c_int = 256; +pub const IOV_MAX: c_int = 1024; +pub const PTHREAD_STACK_MIN: size_t = 65536; +pub const PATH_MAX: c_int = 4096; +pub const PIPE_BUF: usize = 4096; +pub const NGROUPS_MAX: c_int = 1024; + +pub const FORK_RELOAD: c_int = 1; +pub const FORK_NO_RELOAD: c_int = 0; + +pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); +pub const RTLD_LOCAL: c_int = 0; +pub const RTLD_LAZY: c_int = 1; +pub const RTLD_NOW: c_int = 2; +pub const RTLD_GLOBAL: c_int = 4; +pub const RTLD_NODELETE: c_int = 8; +pub const RTLD_NOLOAD: c_int = 16; +pub const RTLD_DEEPBIND: c_int = 32; + +/// IP6 hop-by-hop options +pub const IPPROTO_HOPOPTS: c_int = 0; + +/// gateway mgmt protocol +pub const IPPROTO_IGMP: c_int = 2; + +/// IPIP tunnels (older KA9Q tunnels use 94) +pub const IPPROTO_IPIP: c_int = 4; + +/// exterior gateway protocol +pub const IPPROTO_EGP: c_int = 8; + +/// pup +pub const IPPROTO_PUP: c_int = 12; + +/// xns idp +pub const IPPROTO_IDP: c_int = 22; + +/// IP6 routing header +pub const IPPROTO_ROUTING: c_int = 43; + +/// IP6 fragmentation header +pub const IPPROTO_FRAGMENT: c_int = 44; + +/// IP6 Encap Sec. Payload +pub const IPPROTO_ESP: c_int = 50; + +/// IP6 Auth Header +pub const IPPROTO_AH: c_int = 51; + +/// IP6 no next header +pub const IPPROTO_NONE: c_int = 59; + +/// IP6 destination option +pub const IPPROTO_DSTOPTS: c_int = 60; + +pub const IPPROTO_RAW: c_int = 255; +pub const IPPROTO_MAX: c_int = 256; + +pub const AI_PASSIVE: c_int = 0x1; +pub const AI_CANONNAME: c_int = 0x2; +pub const AI_NUMERICHOST: c_int = 0x4; +pub const AI_NUMERICSERV: c_int = 0x8; +pub const AI_ALL: c_int = 0x100; +pub const AI_ADDRCONFIG: c_int = 0x400; +pub const AI_V4MAPPED: c_int = 0x800; +pub const NI_NOFQDN: c_int = 0x1; +pub const NI_NUMERICHOST: c_int = 0x2; +pub const NI_NAMEREQD: c_int = 0x4; +pub const NI_NUMERICSERV: c_int = 0x8; +pub const NI_DGRAM: c_int = 0x10; +pub const NI_MAXHOST: c_int = 1025; +pub const NI_MAXSERV: c_int = 32; +pub const EAI_AGAIN: c_int = 2; +pub const EAI_BADFLAGS: c_int = 3; +pub const EAI_FAIL: c_int = 4; +pub const EAI_FAMILY: c_int = 5; +pub const EAI_MEMORY: c_int = 6; +pub const EAI_NODATA: c_int = 7; +pub const EAI_NONAME: c_int = 8; +pub const EAI_SERVICE: c_int = 9; +pub const EAI_SOCKTYPE: c_int = 10; +pub const EAI_SYSTEM: c_int = 11; +pub const EAI_OVERFLOW: c_int = 14; + +pub const POLLIN: c_short = 0x1; +pub const POLLPRI: c_short = 0x2; +pub const POLLOUT: c_short = 0x4; +pub const POLLERR: c_short = 0x8; +pub const POLLHUP: c_short = 0x10; +pub const POLLNVAL: c_short = 0x20; +pub const POLLRDNORM: c_short = 0x1; +pub const POLLRDBAND: c_short = 0x2; +pub const POLLWRNORM: c_short = 0x4; +pub const POLLWRBAND: c_short = 0x4; + +pub const LC_ALL: c_int = 0; +pub const LC_COLLATE: c_int = 1; +pub const LC_CTYPE: c_int = 2; +pub const LC_MONETARY: c_int = 3; +pub const LC_NUMERIC: c_int = 4; +pub const LC_TIME: c_int = 5; +pub const LC_MESSAGES: c_int = 6; +pub const LC_ALL_MASK: c_int = 1 << 0; +pub const LC_COLLATE_MASK: c_int = 1 << 1; +pub const LC_CTYPE_MASK: c_int = 1 << 2; +pub const LC_MONETARY_MASK: c_int = 1 << 3; +pub const LC_NUMERIC_MASK: c_int = 1 << 4; +pub const LC_TIME_MASK: c_int = 1 << 5; +pub const LC_MESSAGES_MASK: c_int = 1 << 6; +pub const LC_GLOBAL_LOCALE: locale_t = -1isize as locale_t; + +pub const SEM_FAILED: *mut sem_t = core::ptr::null_mut(); + +pub const ST_RDONLY: c_ulong = 0x80000; +pub const ST_NOSUID: c_ulong = 0; + +pub const TIOCMGET: c_int = 0x5415; +pub const TIOCMBIS: c_int = 0x5416; +pub const TIOCMBIC: c_int = 0x5417; +pub const TIOCMSET: c_int = 0x5418; +pub const TIOCINQ: c_int = 0x541B; +pub const TIOCSCTTY: c_int = 0x540E; +pub const TIOCSBRK: c_int = 0x5427; +pub const TIOCCBRK: c_int = 0x5428; +pub const TIOCM_DTR: c_int = 0x002; +pub const TIOCM_RTS: c_int = 0x004; +pub const TIOCM_CTS: c_int = 0x020; +pub const TIOCM_CAR: c_int = 0x040; +pub const TIOCM_RNG: c_int = 0x080; +pub const TIOCM_CD: c_int = TIOCM_CAR; +pub const TIOCM_RI: c_int = TIOCM_RNG; +pub const TCOOFF: c_int = 0; +pub const TCOON: c_int = 1; +pub const TCIOFF: c_int = 2; +pub const TCION: c_int = 3; +pub const TCGETA: c_int = 5; +pub const TCSETA: c_int = 6; +pub const TCSETAW: c_int = 7; +pub const TCSETAF: c_int = 8; +pub const TCIFLUSH: c_int = 0; +pub const TCOFLUSH: c_int = 1; +pub const TCIOFLUSH: c_int = 2; +pub const TCFLSH: c_int = 3; +pub const TCSAFLUSH: c_int = 1; +pub const TCSANOW: c_int = 2; +pub const TCSADRAIN: c_int = 3; +pub const TIOCPKT: c_int = 6; +pub const TIOCPKT_DATA: c_int = 0x0; +pub const TIOCPKT_FLUSHREAD: c_int = 0x1; +pub const TIOCPKT_FLUSHWRITE: c_int = 0x2; +pub const TIOCPKT_STOP: c_int = 0x4; +pub const TIOCPKT_START: c_int = 0x8; +pub const TIOCPKT_NOSTOP: c_int = 0x10; +pub const TIOCPKT_DOSTOP: c_int = 0x20; +pub const IGNBRK: tcflag_t = 0x00001; +pub const BRKINT: tcflag_t = 0x00002; +pub const IGNPAR: tcflag_t = 0x00004; +pub const IMAXBEL: tcflag_t = 0x00008; +pub const INPCK: tcflag_t = 0x00010; +pub const ISTRIP: tcflag_t = 0x00020; +pub const INLCR: tcflag_t = 0x00040; +pub const IGNCR: tcflag_t = 0x00080; +pub const ICRNL: tcflag_t = 0x00100; +pub const IXON: tcflag_t = 0x00400; +pub const IXOFF: tcflag_t = 0x01000; +pub const IUCLC: tcflag_t = 0x04000; +pub const IXANY: tcflag_t = 0x08000; +pub const PARMRK: tcflag_t = 0x10000; +pub const IUTF8: tcflag_t = 0x20000; +pub const OPOST: tcflag_t = 0x00001; +pub const OLCUC: tcflag_t = 0x00002; +pub const OCRNL: tcflag_t = 0x00004; +pub const ONLCR: tcflag_t = 0x00008; +pub const ONOCR: tcflag_t = 0x00010; +pub const ONLRET: tcflag_t = 0x00020; +pub const OFILL: tcflag_t = 0x00040; +pub const CRDLY: tcflag_t = 0x00180; +pub const CR0: tcflag_t = 0x00000; +pub const CR1: tcflag_t = 0x00080; +pub const CR2: tcflag_t = 0x00100; +pub const CR3: tcflag_t = 0x00180; +pub const NLDLY: tcflag_t = 0x00200; +pub const NL0: tcflag_t = 0x00000; +pub const NL1: tcflag_t = 0x00200; +pub const BSDLY: tcflag_t = 0x00400; +pub const BS0: tcflag_t = 0x00000; +pub const BS1: tcflag_t = 0x00400; +pub const TABDLY: tcflag_t = 0x01800; +pub const TAB0: tcflag_t = 0x00000; +pub const TAB1: tcflag_t = 0x00800; +pub const TAB2: tcflag_t = 0x01000; +pub const TAB3: tcflag_t = 0x01800; +pub const XTABS: tcflag_t = 0x01800; +pub const VTDLY: tcflag_t = 0x02000; +pub const VT0: tcflag_t = 0x00000; +pub const VT1: tcflag_t = 0x02000; +pub const FFDLY: tcflag_t = 0x04000; +pub const FF0: tcflag_t = 0x00000; +pub const FF1: tcflag_t = 0x04000; +pub const OFDEL: tcflag_t = 0x08000; +pub const CBAUD: tcflag_t = 0x0100f; +pub const B0: speed_t = 0x00000; +pub const B50: speed_t = 0x00001; +pub const B75: speed_t = 0x00002; +pub const B110: speed_t = 0x00003; +pub const B134: speed_t = 0x00004; +pub const B150: speed_t = 0x00005; +pub const B200: speed_t = 0x00006; +pub const B300: speed_t = 0x00007; +pub const B600: speed_t = 0x00008; +pub const B1200: speed_t = 0x00009; +pub const B1800: speed_t = 0x0000a; +pub const B2400: speed_t = 0x0000b; +pub const B4800: speed_t = 0x0000c; +pub const B9600: speed_t = 0x0000d; +pub const B19200: speed_t = 0x0000e; +pub const B38400: speed_t = 0x0000f; +pub const CSIZE: tcflag_t = 0x00030; +pub const CS5: tcflag_t = 0x00000; +pub const CS6: tcflag_t = 0x00010; +pub const CS7: tcflag_t = 0x00020; +pub const CS8: tcflag_t = 0x00030; +pub const CSTOPB: tcflag_t = 0x00040; +pub const CREAD: tcflag_t = 0x00080; +pub const PARENB: tcflag_t = 0x00100; +pub const PARODD: tcflag_t = 0x00200; +pub const HUPCL: tcflag_t = 0x00400; +pub const CLOCAL: tcflag_t = 0x00800; +pub const CBAUDEX: tcflag_t = 0x0100f; +pub const B57600: speed_t = 0x01001; +pub const B115200: speed_t = 0x01002; +pub const B230400: speed_t = 0x01004; +pub const B460800: speed_t = 0x01006; +pub const B500000: speed_t = 0x01007; +pub const B576000: speed_t = 0x01008; +pub const B921600: speed_t = 0x01009; +pub const B1000000: speed_t = 0x0100a; +pub const B1152000: speed_t = 0x0100b; +pub const B1500000: speed_t = 0x0100c; +pub const B2000000: speed_t = 0x0100d; +pub const B2500000: speed_t = 0x0100e; +pub const B3000000: speed_t = 0x0100f; +pub const CRTSCTS: tcflag_t = 0x08000; +pub const CMSPAR: tcflag_t = 0x40000000; +pub const ISIG: tcflag_t = 0x0001; +pub const ICANON: tcflag_t = 0x0002; +pub const ECHO: tcflag_t = 0x0004; +pub const ECHOE: tcflag_t = 0x0008; +pub const ECHOK: tcflag_t = 0x0010; +pub const ECHONL: tcflag_t = 0x0020; +pub const NOFLSH: tcflag_t = 0x0040; +pub const TOSTOP: tcflag_t = 0x0080; +pub const IEXTEN: tcflag_t = 0x0100; +pub const FLUSHO: tcflag_t = 0x0200; +pub const ECHOKE: tcflag_t = 0x0400; +pub const ECHOCTL: tcflag_t = 0x0800; +pub const VDISCARD: usize = 1; +pub const VEOL: usize = 2; +pub const VEOL2: usize = 3; +pub const VEOF: usize = 4; +pub const VERASE: usize = 5; +pub const VINTR: usize = 6; +pub const VKILL: usize = 7; +pub const VLNEXT: usize = 8; +pub const VMIN: usize = 9; +pub const VQUIT: usize = 10; +pub const VREPRINT: usize = 11; +pub const VSTART: usize = 12; +pub const VSTOP: usize = 13; +pub const VSUSP: usize = 14; +pub const VSWTC: usize = 15; +pub const VTIME: usize = 16; +pub const VWERASE: usize = 17; +pub const NCCS: usize = 18; + +pub const TIOCGWINSZ: c_int = 0x5401; +pub const TIOCSWINSZ: c_int = 0x5402; +pub const TIOCLINUX: c_int = 0x5403; +pub const TIOCGPGRP: c_int = 0x540f; +pub const TIOCSPGRP: c_int = 0x5410; + +pub const WNOHANG: c_int = 1; +pub const WUNTRACED: c_int = 2; +pub const WCONTINUED: c_int = 8; + +pub const EXIT_FAILURE: c_int = 1; +pub const EXIT_SUCCESS: c_int = 0; + +pub const PROT_NONE: c_int = 0; +pub const PROT_READ: c_int = 1; +pub const PROT_WRITE: c_int = 2; +pub const PROT_EXEC: c_int = 4; +pub const MAP_FILE: c_int = 0; +pub const MAP_SHARED: c_int = 1; +pub const MAP_PRIVATE: c_int = 2; +pub const MAP_TYPE: c_int = 0xf; +pub const MAP_FIXED: c_int = 0x10; +pub const MAP_ANON: c_int = 0x20; +pub const MAP_ANONYMOUS: c_int = MAP_ANON; +pub const MAP_NORESERVE: c_int = 0x4000; +pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; +pub const MS_ASYNC: c_int = 1; +pub const MS_SYNC: c_int = 2; +pub const MS_INVALIDATE: c_int = 4; +pub const POSIX_MADV_NORMAL: c_int = 0; +pub const POSIX_MADV_SEQUENTIAL: c_int = 1; +pub const POSIX_MADV_RANDOM: c_int = 2; +pub const POSIX_MADV_WILLNEED: c_int = 3; +pub const POSIX_MADV_DONTNEED: c_int = 4; +pub const MADV_NORMAL: c_int = 0; +pub const MADV_SEQUENTIAL: c_int = 1; +pub const MADV_RANDOM: c_int = 2; +pub const MADV_WILLNEED: c_int = 3; +pub const MADV_DONTNEED: c_int = 4; + +pub const F_ULOCK: c_int = 0; +pub const F_LOCK: c_int = 1; +pub const F_TLOCK: c_int = 2; +pub const F_TEST: c_int = 3; + +pub const F_OK: c_int = 0; +pub const R_OK: c_int = 4; +pub const W_OK: c_int = 2; +pub const X_OK: c_int = 1; +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; +pub const _SC_ARG_MAX: c_int = 0; +pub const _SC_CHILD_MAX: c_int = 1; +pub const _SC_CLK_TCK: c_int = 2; +pub const _SC_NGROUPS_MAX: c_int = 3; +pub const _SC_OPEN_MAX: c_int = 4; +pub const _SC_JOB_CONTROL: c_int = 5; +pub const _SC_SAVED_IDS: c_int = 6; +pub const _SC_VERSION: c_int = 7; +pub const _SC_PAGESIZE: c_int = 8; +pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; +pub const _SC_NPROCESSORS_CONF: c_int = 9; +pub const _SC_NPROCESSORS_ONLN: c_int = 10; +pub const _SC_PHYS_PAGES: c_int = 11; +pub const _SC_AVPHYS_PAGES: c_int = 12; +pub const _SC_MQ_OPEN_MAX: c_int = 13; +pub const _SC_MQ_PRIO_MAX: c_int = 14; +pub const _SC_RTSIG_MAX: c_int = 15; +pub const _SC_SEM_NSEMS_MAX: c_int = 16; +pub const _SC_SEM_VALUE_MAX: c_int = 17; +pub const _SC_SIGQUEUE_MAX: c_int = 18; +pub const _SC_TIMER_MAX: c_int = 19; +pub const _SC_TZNAME_MAX: c_int = 20; +pub const _SC_ASYNCHRONOUS_IO: c_int = 21; +pub const _SC_FSYNC: c_int = 22; +pub const _SC_MAPPED_FILES: c_int = 23; +pub const _SC_MEMLOCK: c_int = 24; +pub const _SC_MEMLOCK_RANGE: c_int = 25; +pub const _SC_MEMORY_PROTECTION: c_int = 26; +pub const _SC_MESSAGE_PASSING: c_int = 27; +pub const _SC_PRIORITIZED_IO: c_int = 28; +pub const _SC_REALTIME_SIGNALS: c_int = 29; +pub const _SC_SEMAPHORES: c_int = 30; +pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 31; +pub const _SC_SYNCHRONIZED_IO: c_int = 32; +pub const _SC_TIMERS: c_int = 33; +pub const _SC_AIO_LISTIO_MAX: c_int = 34; +pub const _SC_AIO_MAX: c_int = 35; +pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 36; +pub const _SC_DELAYTIMER_MAX: c_int = 37; +pub const _SC_THREAD_KEYS_MAX: c_int = 38; +pub const _SC_THREAD_STACK_MIN: c_int = 39; +pub const _SC_THREAD_THREADS_MAX: c_int = 40; +pub const _SC_TTY_NAME_MAX: c_int = 41; +pub const _SC_THREADS: c_int = 42; +pub const _SC_THREAD_ATTR_STACKADDR: c_int = 43; +pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 44; +pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 45; +pub const _SC_THREAD_PRIO_INHERIT: c_int = 46; +pub const _SC_THREAD_PRIO_PROTECT: c_int = 47; +pub const _SC_THREAD_PRIO_CEILING: c_int = _SC_THREAD_PRIO_PROTECT; +pub const _SC_THREAD_PROCESS_SHARED: c_int = 48; +pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 49; +pub const _SC_GETGR_R_SIZE_MAX: c_int = 50; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 51; +pub const _SC_LOGIN_NAME_MAX: c_int = 52; +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 53; +pub const _SC_ADVISORY_INFO: c_int = 54; +pub const _SC_ATEXIT_MAX: c_int = 55; +pub const _SC_BARRIERS: c_int = 56; +pub const _SC_BC_BASE_MAX: c_int = 57; +pub const _SC_BC_DIM_MAX: c_int = 58; +pub const _SC_BC_SCALE_MAX: c_int = 59; +pub const _SC_BC_STRING_MAX: c_int = 60; +pub const _SC_CLOCK_SELECTION: c_int = 61; +pub const _SC_COLL_WEIGHTS_MAX: c_int = 62; +pub const _SC_CPUTIME: c_int = 63; +pub const _SC_EXPR_NEST_MAX: c_int = 64; +pub const _SC_HOST_NAME_MAX: c_int = 65; +pub const _SC_IOV_MAX: c_int = 66; +pub const _SC_IPV6: c_int = 67; +pub const _SC_LINE_MAX: c_int = 68; +pub const _SC_MONOTONIC_CLOCK: c_int = 69; +pub const _SC_RAW_SOCKETS: c_int = 70; +pub const _SC_READER_WRITER_LOCKS: c_int = 71; +pub const _SC_REGEXP: c_int = 72; +pub const _SC_RE_DUP_MAX: c_int = 73; +pub const _SC_SHELL: c_int = 74; +pub const _SC_SPAWN: c_int = 75; +pub const _SC_SPIN_LOCKS: c_int = 76; +pub const _SC_SPORADIC_SERVER: c_int = 77; +pub const _SC_SS_REPL_MAX: c_int = 78; +pub const _SC_SYMLOOP_MAX: c_int = 79; +pub const _SC_THREAD_CPUTIME: c_int = 80; +pub const _SC_THREAD_SPORADIC_SERVER: c_int = 81; +pub const _SC_TIMEOUTS: c_int = 82; +pub const _SC_TRACE: c_int = 83; +pub const _SC_TRACE_EVENT_FILTER: c_int = 84; +pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 85; +pub const _SC_TRACE_INHERIT: c_int = 86; +pub const _SC_TRACE_LOG: c_int = 87; +pub const _SC_TRACE_NAME_MAX: c_int = 88; +pub const _SC_TRACE_SYS_MAX: c_int = 89; +pub const _SC_TRACE_USER_EVENT_MAX: c_int = 90; +pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 91; +pub const _SC_V7_ILP32_OFF32: c_int = 92; +pub const _SC_V6_ILP32_OFF32: c_int = _SC_V7_ILP32_OFF32; +pub const _SC_XBS5_ILP32_OFF32: c_int = _SC_V7_ILP32_OFF32; +pub const _SC_V7_ILP32_OFFBIG: c_int = 93; +pub const _SC_V6_ILP32_OFFBIG: c_int = _SC_V7_ILP32_OFFBIG; +pub const _SC_XBS5_ILP32_OFFBIG: c_int = _SC_V7_ILP32_OFFBIG; +pub const _SC_V7_LP64_OFF64: c_int = 94; +pub const _SC_V6_LP64_OFF64: c_int = _SC_V7_LP64_OFF64; +pub const _SC_XBS5_LP64_OFF64: c_int = _SC_V7_LP64_OFF64; +pub const _SC_V7_LPBIG_OFFBIG: c_int = 95; +pub const _SC_V6_LPBIG_OFFBIG: c_int = _SC_V7_LPBIG_OFFBIG; +pub const _SC_XBS5_LPBIG_OFFBIG: c_int = _SC_V7_LPBIG_OFFBIG; +pub const _SC_XOPEN_CRYPT: c_int = 96; +pub const _SC_XOPEN_ENH_I18N: c_int = 97; +pub const _SC_XOPEN_LEGACY: c_int = 98; +pub const _SC_XOPEN_REALTIME: c_int = 99; +pub const _SC_STREAM_MAX: c_int = 100; +pub const _SC_PRIORITY_SCHEDULING: c_int = 101; +pub const _SC_XOPEN_REALTIME_THREADS: c_int = 102; +pub const _SC_XOPEN_SHM: c_int = 103; +pub const _SC_XOPEN_STREAMS: c_int = 104; +pub const _SC_XOPEN_UNIX: c_int = 105; +pub const _SC_XOPEN_VERSION: c_int = 106; +pub const _SC_2_CHAR_TERM: c_int = 107; +pub const _SC_2_C_BIND: c_int = 108; +pub const _SC_2_C_DEV: c_int = 109; +pub const _SC_2_FORT_DEV: c_int = 110; +pub const _SC_2_FORT_RUN: c_int = 111; +pub const _SC_2_LOCALEDEF: c_int = 112; +pub const _SC_2_PBS: c_int = 113; +pub const _SC_2_PBS_ACCOUNTING: c_int = 114; +pub const _SC_2_PBS_CHECKPOINT: c_int = 115; +pub const _SC_2_PBS_LOCATE: c_int = 116; +pub const _SC_2_PBS_MESSAGE: c_int = 117; +pub const _SC_2_PBS_TRACK: c_int = 118; +pub const _SC_2_SW_DEV: c_int = 119; +pub const _SC_2_UPE: c_int = 120; +pub const _SC_2_VERSION: c_int = 121; +pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 122; +pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 123; +pub const _SC_XOPEN_UUCP: c_int = 124; +pub const _SC_LEVEL1_ICACHE_SIZE: c_int = 125; +pub const _SC_LEVEL1_ICACHE_ASSOC: c_int = 126; +pub const _SC_LEVEL1_ICACHE_LINESIZE: c_int = 127; +pub const _SC_LEVEL1_DCACHE_SIZE: c_int = 128; +pub const _SC_LEVEL1_DCACHE_ASSOC: c_int = 129; +pub const _SC_LEVEL1_DCACHE_LINESIZE: c_int = 130; +pub const _SC_LEVEL2_CACHE_SIZE: c_int = 131; +pub const _SC_LEVEL2_CACHE_ASSOC: c_int = 132; +pub const _SC_LEVEL2_CACHE_LINESIZE: c_int = 133; +pub const _SC_LEVEL3_CACHE_SIZE: c_int = 134; +pub const _SC_LEVEL3_CACHE_ASSOC: c_int = 135; +pub const _SC_LEVEL3_CACHE_LINESIZE: c_int = 136; +pub const _SC_LEVEL4_CACHE_SIZE: c_int = 137; +pub const _SC_LEVEL4_CACHE_ASSOC: c_int = 138; +pub const _SC_LEVEL4_CACHE_LINESIZE: c_int = 139; +pub const _PC_LINK_MAX: c_int = 0; +pub const _PC_MAX_CANON: c_int = 1; +pub const _PC_MAX_INPUT: c_int = 2; +pub const _PC_NAME_MAX: c_int = 3; +pub const _PC_PATH_MAX: c_int = 4; +pub const _PC_PIPE_BUF: c_int = 5; +pub const _PC_CHOWN_RESTRICTED: c_int = 6; +pub const _PC_NO_TRUNC: c_int = 7; +pub const _PC_VDISABLE: c_int = 8; +pub const _PC_ASYNC_IO: c_int = 9; +pub const _PC_PRIO_IO: c_int = 10; +pub const _PC_SYNC_IO: c_int = 11; +pub const _PC_FILESIZEBITS: c_int = 12; +pub const _PC_2_SYMLINKS: c_int = 13; +pub const _PC_SYMLINK_MAX: c_int = 14; +pub const _PC_ALLOC_SIZE_MIN: c_int = 15; +pub const _PC_REC_INCR_XFER_SIZE: c_int = 16; +pub const _PC_REC_MAX_XFER_SIZE: c_int = 17; +pub const _PC_REC_MIN_XFER_SIZE: c_int = 18; +pub const _PC_REC_XFER_ALIGN: c_int = 19; +pub const _PC_TIMESTAMP_RESOLUTION: c_int = 20; +pub const _CS_PATH: c_int = 0; + +pub const O_ACCMODE: c_int = 0x3; +pub const O_RDONLY: c_int = 0; +pub const O_WRONLY: c_int = 1; +pub const O_RDWR: c_int = 2; +pub const O_APPEND: c_int = 0x0008; +pub const O_CREAT: c_int = 0x0200; +pub const O_TRUNC: c_int = 0x0400; +pub const O_EXCL: c_int = 0x0800; +pub const O_SYNC: c_int = 0x2000; +pub const O_NONBLOCK: c_int = 0x4000; +pub const O_NOCTTY: c_int = 0x8000; +pub const O_CLOEXEC: c_int = 0x40000; +pub const O_NOFOLLOW: c_int = 0x100000; +pub const O_DIRECTORY: c_int = 0x200000; +pub const O_EXEC: c_int = 0x400000; +pub const O_SEARCH: c_int = 0x400000; +pub const O_DIRECT: c_int = 0x80000; +pub const O_DSYNC: c_int = 0x2000; +pub const O_RSYNC: c_int = 0x2000; +pub const O_TMPFILE: c_int = 0x800000; +pub const O_NOATIME: c_int = 0x1000000; +pub const O_PATH: c_int = 0x2000000; +pub const F_DUPFD: c_int = 0; +pub const F_GETFD: c_int = 1; +pub const F_SETFD: c_int = 2; +pub const F_GETFL: c_int = 3; +pub const F_SETFL: c_int = 4; +pub const F_GETOWN: c_int = 5; +pub const F_SETOWN: c_int = 6; +pub const F_GETLK: c_int = 7; +pub const F_SETLK: c_int = 8; +pub const F_SETLKW: c_int = 9; +pub const F_RGETLK: c_int = 10; +pub const F_RSETLK: c_int = 11; +pub const F_CNVT: c_int = 12; +pub const F_RSETLKW: c_int = 13; +pub const F_DUPFD_CLOEXEC: c_int = 14; +pub const F_RDLCK: c_int = 1; +pub const F_WRLCK: c_int = 2; +pub const F_UNLCK: c_int = 3; +pub const AT_FDCWD: c_int = -2; +pub const AT_EACCESS: c_int = 1; +pub const AT_SYMLINK_NOFOLLOW: c_int = 2; +pub const AT_SYMLINK_FOLLOW: c_int = 4; +pub const AT_REMOVEDIR: c_int = 8; +pub const AT_EMPTY_PATH: c_int = 16; +pub const LOCK_SH: c_int = 1; +pub const LOCK_EX: c_int = 2; +pub const LOCK_NB: c_int = 4; +pub const LOCK_UN: c_int = 8; + +pub const EPERM: c_int = 1; +pub const ENOENT: c_int = 2; +pub const ESRCH: c_int = 3; +pub const EINTR: c_int = 4; +pub const EIO: c_int = 5; +pub const ENXIO: c_int = 6; +pub const E2BIG: c_int = 7; +pub const ENOEXEC: c_int = 8; +pub const EBADF: c_int = 9; +pub const ECHILD: c_int = 10; +pub const EAGAIN: c_int = 11; +pub const ENOMEM: c_int = 12; +pub const EACCES: c_int = 13; +pub const EFAULT: c_int = 14; +pub const ENOTBLK: c_int = 15; +pub const EBUSY: c_int = 16; +pub const EEXIST: c_int = 17; +pub const EXDEV: c_int = 18; +pub const ENODEV: c_int = 19; +pub const ENOTDIR: c_int = 20; +pub const EISDIR: c_int = 21; +pub const EINVAL: c_int = 22; +pub const ENFILE: c_int = 23; +pub const EMFILE: c_int = 24; +pub const ENOTTY: c_int = 25; +pub const ETXTBSY: c_int = 26; +pub const EFBIG: c_int = 27; +pub const ENOSPC: c_int = 28; +pub const ESPIPE: c_int = 29; +pub const EROFS: c_int = 30; +pub const EMLINK: c_int = 31; +pub const EPIPE: c_int = 32; +pub const EDOM: c_int = 33; +pub const ERANGE: c_int = 34; +pub const ENOMSG: c_int = 35; +pub const EIDRM: c_int = 36; +pub const ECHRNG: c_int = 37; +pub const EL2NSYNC: c_int = 38; +pub const EL3HLT: c_int = 39; +pub const EL3RST: c_int = 40; +pub const ELNRNG: c_int = 41; +pub const EUNATCH: c_int = 42; +pub const ENOCSI: c_int = 43; +pub const EL2HLT: c_int = 44; +pub const EDEADLK: c_int = 45; +pub const ENOLCK: c_int = 46; +pub const EBADE: c_int = 50; +pub const EBADR: c_int = 51; +pub const EXFULL: c_int = 52; +pub const ENOANO: c_int = 53; +pub const EBADRQC: c_int = 54; +pub const EBADSLT: c_int = 55; +pub const EDEADLOCK: c_int = 56; +pub const EBFONT: c_int = 57; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENONET: c_int = 64; +pub const ENOPKG: c_int = 65; +pub const EREMOTE: c_int = 66; +pub const ENOLINK: c_int = 67; +pub const EADV: c_int = 68; +pub const ESRMNT: c_int = 69; +pub const ECOMM: c_int = 70; +pub const EPROTO: c_int = 71; +pub const EMULTIHOP: c_int = 74; +pub const EDOTDOT: c_int = 76; +pub const EBADMSG: c_int = 77; +pub const EFTYPE: c_int = 79; +pub const ENOTUNIQ: c_int = 80; +pub const EBADFD: c_int = 81; +pub const EREMCHG: c_int = 82; +pub const ELIBACC: c_int = 83; +pub const ELIBBAD: c_int = 84; +pub const ELIBSCN: c_int = 85; +pub const ELIBMAX: c_int = 86; +pub const ELIBEXEC: c_int = 87; +pub const ENOSYS: c_int = 88; +pub const ENOTEMPTY: c_int = 90; +pub const ENAMETOOLONG: c_int = 91; +pub const ELOOP: c_int = 92; +pub const EOPNOTSUPP: c_int = 95; +pub const EPFNOSUPPORT: c_int = 96; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EAFNOSUPPORT: c_int = 106; +pub const EPROTOTYPE: c_int = 107; +pub const ENOTSOCK: c_int = 108; +pub const ENOPROTOOPT: c_int = 109; +pub const ESHUTDOWN: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EADDRINUSE: c_int = 112; +pub const ECONNABORTED: c_int = 113; +pub const ENETUNREACH: c_int = 114; +pub const ENETDOWN: c_int = 115; +pub const ETIMEDOUT: c_int = 116; +pub const EHOSTDOWN: c_int = 117; +pub const EHOSTUNREACH: c_int = 118; +pub const EINPROGRESS: c_int = 119; +pub const EALREADY: c_int = 120; +pub const EDESTADDRREQ: c_int = 121; +pub const EMSGSIZE: c_int = 122; +pub const EPROTONOSUPPORT: c_int = 123; +pub const ESOCKTNOSUPPORT: c_int = 124; +pub const EADDRNOTAVAIL: c_int = 125; +pub const ENETRESET: c_int = 126; +pub const EISCONN: c_int = 127; +pub const ENOTCONN: c_int = 128; +pub const ETOOMANYREFS: c_int = 129; +pub const EPROCLIM: c_int = 130; +pub const EUSERS: c_int = 131; +pub const EDQUOT: c_int = 132; +pub const ESTALE: c_int = 133; +pub const ENOTSUP: c_int = 134; +pub const ENOMEDIUM: c_int = 135; +pub const EILSEQ: c_int = 138; +pub const EOVERFLOW: c_int = 139; +pub const ECANCELED: c_int = 140; +pub const ENOTRECOVERABLE: c_int = 141; +pub const EOWNERDEAD: c_int = 142; +pub const ESTRPIPE: c_int = 143; +pub const EWOULDBLOCK: c_int = EAGAIN; /* Operation would block */ + +pub const SCHED_OTHER: c_int = 3; +pub const SCHED_FIFO: c_int = 1; +pub const SCHED_RR: c_int = 2; + +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = 21 as *mut _; +pub const PTHREAD_CREATE_DETACHED: c_int = 1; +pub const PTHREAD_CREATE_JOINABLE: c_int = 0; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 0; +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 1; +pub const PTHREAD_MUTEX_NORMAL: c_int = 2; +pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: pthread_mutex_t = 18 as *mut _; +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: pthread_mutex_t = 20 as *mut _; +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = 19 as *mut _; +pub const PTHREAD_PROCESS_SHARED: c_int = 1; +pub const PTHREAD_PROCESS_PRIVATE: c_int = 0; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = 22 as *mut _; + +pub const LITTLE_ENDIAN: c_int = 1234; +pub const BIG_ENDIAN: c_int = 4321; + +pub const TCP_NODELAY: c_int = 1; +pub const TCP_KEEPIDLE: c_int = 3; +pub const TCP_MAXSEG: c_int = 4; +pub const TCP_QUICKACK: c_int = 12; +pub const TCP_USER_TIMEOUT: c_int = 14; +pub const TCP_FASTOPEN: c_int = 15; +pub const TCP_KEEPCNT: c_int = 16; +pub const TCP_KEEPINTVL: c_int = 17; + +pub const WINDOWS_POST: c_int = 0; +pub const WINDOWS_SEND: c_int = 1; +pub const WINDOWS_HWND: c_int = 2; + +pub const MOUNT_TEXT: c_uint = 0x01; +pub const MOUNT_SYSTEM: c_uint = 0x08; +pub const MOUNT_EXEC: c_uint = 0x10; +pub const MOUNT_CYGDRIVE: c_uint = 0x20; +pub const MOUNT_CYGWIN_EXEC: c_uint = 0x40; +pub const MOUNT_SPARSE: c_uint = 0x80; +pub const MOUNT_NOTEXEC: c_uint = 0x100; +pub const MOUNT_DEVFS: c_uint = 0x200; +pub const MOUNT_PROC: c_uint = 0x400; +pub const MOUNT_RO: c_uint = 0x1000; +pub const MOUNT_NOACL: c_uint = 0x2000; +pub const MOUNT_NOPOSIX: c_uint = 0x4000; +pub const MOUNT_OVERRIDE: c_uint = 0x8000; +pub const MOUNT_IMMUTABLE: c_uint = 0x10000; +pub const MOUNT_AUTOMATIC: c_uint = 0x20000; +pub const MOUNT_DOS: c_uint = 0x40000; +pub const MOUNT_IHASH: c_uint = 0x80000; +pub const MOUNT_BIND: c_uint = 0x100000; +pub const MOUNT_USER_TEMP: c_uint = 0x200000; +pub const MOUNT_DONT_USE: c_uint = 0x80000000; + +pub const _POSIX_VDISABLE: cc_t = 0; + +pub const GRND_NONBLOCK: c_uint = 0x1; +pub const GRND_RANDOM: c_uint = 0x2; + +pub const _IONBF: c_int = 2; +pub const BUFSIZ: c_int = 1024; + +pub const POSIX_SPAWN_RESETIDS: c_int = 0x01; +pub const POSIX_SPAWN_SETPGROUP: c_int = 0x02; +pub const POSIX_SPAWN_SETSCHEDPARAM: c_int = 0x04; +pub const POSIX_SPAWN_SETSCHEDULER: c_int = 0x08; +pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x10; +pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x20; + +pub const POSIX_FADV_NORMAL: c_int = 0; +pub const POSIX_FADV_SEQUENTIAL: c_int = 1; +pub const POSIX_FADV_RANDOM: c_int = 2; +pub const POSIX_FADV_WILLNEED: c_int = 3; +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; + +pub const FALLOC_FL_PUNCH_HOLE: c_int = 0x0001; +pub const FALLOC_FL_ZERO_RANGE: c_int = 0x0002; +pub const FALLOC_FL_UNSHARE_RANGE: c_int = 0x0004; +pub const FALLOC_FL_COLLAPSE_RANGE: c_int = 0x0008; +pub const FALLOC_FL_INSERT_RANGE: c_int = 0x0010; +pub const FALLOC_FL_KEEP_SIZE: c_int = 0x1000; + +f! { + pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] &= !(1 << (fd % size)); + } + + pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0 + } + + pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] |= 1 << (fd % size); + } + + pub fn FD_ZERO(set: *mut fd_set) -> () { + for slot in (*set).fds_bits.iter_mut() { + *slot = 0; + } + } + + pub fn CPU_ALLOC_SIZE(count: c_int) -> size_t { + let _dummy: cpu_set_t = cpu_set_t { bits: [0; 16] }; + let size_in_bits = 8 * size_of_val(&_dummy.bits[0]); + ((count as size_t + size_in_bits - 1) / 8) as size_t + } + + pub fn CPU_COUNT_S(size: usize, cpuset: &cpu_set_t) -> c_int { + let mut s: u32 = 0; + let size_of_mask = size_of_val(&cpuset.bits[0]); + for i in cpuset.bits[..(size / size_of_mask)].iter() { + s += i.count_ones(); + } + s as c_int + } + + pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { + for slot in cpuset.bits.iter_mut() { + *slot = 0; + } + } + pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); + if cpu < size_in_bits { + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + cpuset.bits[idx] |= 1 << offset; + } + } + + pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); + if cpu < size_in_bits { + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + cpuset.bits[idx] &= !(1 << offset); + } + } + + pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { + let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); + if cpu < size_in_bits { + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + 0 != (cpuset.bits[idx] & (1 << offset)) + } else { + false + } + } + + pub fn CPU_COUNT(cpuset: &cpu_set_t) -> c_int { + CPU_COUNT_S(size_of::(), cpuset) + } + + pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { + set1.bits == set2.bits + } + + pub fn CMSG_LEN(length: c_uint) -> c_uint { + CMSG_ALIGN(size_of::()) as c_uint + length + } + + pub const fn CMSG_SPACE(length: c_uint) -> c_uint { + (CMSG_ALIGN(length as usize) + CMSG_ALIGN(size_of::())) as c_uint + } + + pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr { + if (*mhdr).msg_controllen as usize >= size_of::() { + (*mhdr).msg_control.cast() + } else { + core::ptr::null_mut() + } + } + + pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + let next = (cmsg as usize + CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr; + let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; + if next as usize + CMSG_ALIGN(size_of::()) as usize > max { + core::ptr::null_mut() + } else { + next + } + } + + pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { + cmsg.offset(1).cast_mut().cast() + } +} + +safe_f! { + pub const fn makedev(ma: c_uint, mi: c_uint) -> dev_t { + let ma = ma as dev_t; + let mi = mi as dev_t; + (ma << 16) | (mi & 0xffff) + } + + pub const fn major(dev: dev_t) -> c_uint { + ((dev >> 16) & 0xffff) as c_uint + } + + pub const fn minor(dev: dev_t) -> c_uint { + (dev & 0xffff) as c_uint + } + + pub const fn WIFEXITED(status: c_int) -> bool { + (status & 0xff) == 0 + } + + pub const fn WIFSIGNALED(status: c_int) -> bool { + (status & 0o177) != 0o177 && (status & 0o177) != 0 + } + + pub const fn WIFSTOPPED(status: c_int) -> bool { + (status & 0xff) == 0o177 + } + + pub const fn WIFCONTINUED(status: c_int) -> bool { + (status & 0o177777) == 0o177777 + } + + pub const fn WEXITSTATUS(status: c_int) -> c_int { + (status >> 8) & 0xff + } + + pub const fn WTERMSIG(status: c_int) -> c_int { + status & 0o177 + } + + pub const fn WSTOPSIG(status: c_int) -> c_int { + (status >> 8) & 0xff + } + + pub const fn WCOREDUMP(status: c_int) -> bool { + WIFSIGNALED(status) && (status & 0x80) != 0 + } +} + +const fn CMSG_ALIGN(len: usize) -> usize { + len + size_of::() - 1 & !(size_of::() - 1) +} + +extern "C" { + pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; + pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; + + pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; + pub fn sigsuspend(mask: *const sigset_t) -> c_int; + pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; + pub fn pthread_kill(thread: pthread_t, sig: c_int) -> c_int; + + pub fn sigtimedwait( + set: *const sigset_t, + info: *mut siginfo_t, + timeout: *const timespec, + ) -> c_int; + + pub fn strftime(s: *mut c_char, max: size_t, format: *const c_char, tm: *const tm) -> size_t; + + pub fn asctime_r(tm: *const tm, buf: *mut c_char) -> *mut c_char; + pub fn ctime_r(timep: *const time_t, buf: *mut c_char) -> *mut c_char; + pub fn strptime(s: *const c_char, format: *const c_char, tm: *mut tm) -> *mut c_char; + pub fn clock_settime(clk_id: clockid_t, tp: *const timespec) -> c_int; + pub fn clock_gettime(clk_id: clockid_t, tp: *mut timespec) -> c_int; + pub fn clock_getres(clk_id: clockid_t, tp: *mut timespec) -> c_int; + + pub fn timer_create(clockid: clockid_t, sevp: *mut sigevent, timerid: *mut timer_t) -> c_int; + + pub fn timer_delete(timerid: timer_t) -> c_int; + + pub fn timer_settime( + timerid: timer_t, + flags: c_int, + new_value: *const itimerspec, + old_value: *mut itimerspec, + ) -> c_int; + + pub fn timer_gettime(timerid: timer_t, curr_value: *mut itimerspec) -> c_int; + pub fn timer_getoverrun(timerid: timer_t) -> c_int; + + pub fn clock_nanosleep( + clk_id: clockid_t, + flags: c_int, + rqtp: *const timespec, + rmtp: *mut timespec, + ) -> c_int; + + pub fn clock_getcpuclockid(pid: pid_t, clk_id: *mut clockid_t) -> c_int; + + pub fn futimes(fd: c_int, times: *const timeval) -> c_int; + pub fn lutimes(file: *const c_char, times: *const timeval) -> c_int; + pub fn settimeofday(tv: *const timeval, tz: *const timezone) -> c_int; + pub fn getitimer(which: c_int, curr_value: *mut itimerval) -> c_int; + + pub fn setitimer(which: c_int, new_value: *const itimerval, old_value: *mut itimerval) + -> c_int; + + pub fn gettimeofday(tp: *mut timeval, tz: *mut c_void) -> c_int; + pub fn futimesat(fd: c_int, path: *const c_char, times: *const timeval) -> c_int; + + pub fn getrlimit(resource: c_int, rlim: *mut rlimit) -> c_int; + pub fn setrlimit(resource: c_int, rlim: *const rlimit) -> c_int; + pub fn getpriority(which: c_int, who: id_t) -> c_int; + pub fn setpriority(which: c_int, who: id_t, prio: c_int) -> c_int; + + pub fn getpwnam_r( + name: *const c_char, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + + pub fn getpwuid_r( + uid: uid_t, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + + pub fn getpwent() -> *mut passwd; + pub fn setpwent(); + pub fn endpwent(); + + pub fn if_nameindex() -> *mut if_nameindex; + pub fn if_freenameindex(ptr: *mut if_nameindex); + + pub fn readv(fd: c_int, iov: *const iovec, iovcnt: c_int) -> ssize_t; + pub fn writev(fd: c_int, iov: *const iovec, iovcnt: c_int) -> ssize_t; + + pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; + + pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; + + pub fn utimensat( + dirfd: c_int, + path: *const c_char, + times: *const timespec, + flag: c_int, + ) -> c_int; + + pub fn futimens(fd: c_int, times: *const timespec) -> c_int; + + pub fn dlfork(val: c_int); + + pub fn accept4(s: c_int, addr: *mut sockaddr, addrlen: *mut socklen_t, flags: c_int) -> c_int; + + pub fn bind(socket: c_int, address: *const sockaddr, address_len: socklen_t) -> c_int; + + pub fn recvfrom( + socket: c_int, + buf: *mut c_void, + len: size_t, + flags: c_int, + addr: *mut sockaddr, + addrlen: *mut socklen_t, + ) -> ssize_t; + + pub fn recvmsg(fd: c_int, msg: *mut msghdr, flags: c_int) -> ssize_t; + pub fn sendmsg(fd: c_int, msg: *const msghdr, flags: c_int) -> ssize_t; + + pub fn getnameinfo( + sa: *const sockaddr, + salen: socklen_t, + host: *mut c_char, + hostlen: socklen_t, + serv: *mut c_char, + sevlen: socklen_t, + flags: c_int, + ) -> c_int; + + pub fn ppoll( + fds: *mut pollfd, + nfds: nfds_t, + timeout: *const timespec, + sigmask: *const sigset_t, + ) -> c_int; + + pub fn newlocale(mask: c_int, locale: *const c_char, base: locale_t) -> locale_t; + pub fn freelocale(loc: locale_t); + pub fn duplocale(base: locale_t) -> locale_t; + pub fn uselocale(loc: locale_t) -> locale_t; + + pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; + pub fn sem_destroy(sem: *mut sem_t) -> c_int; + pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; + pub fn sem_close(sem: *mut sem_t) -> c_int; + pub fn sem_unlink(name: *const c_char) -> c_int; + pub fn sem_timedwait(sem: *mut sem_t, abstime: *const timespec) -> c_int; + pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; + + pub fn clearenv() -> c_int; + pub fn ptsname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + pub fn getpt() -> c_int; + pub fn memalign(align: size_t, size: size_t) -> *mut c_void; + pub fn getloadavg(loadavg: *mut c_double, nelem: c_int) -> c_int; + + pub fn abs(i: c_int) -> c_int; + pub fn arc4random() -> u32; + pub fn arc4random_uniform(l: u32) -> u32; + pub fn arc4random_buf(buf: *mut c_void, size: size_t); + pub fn labs(i: c_long) -> c_long; + pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; + pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; + pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; + pub fn rand() -> c_int; + pub fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void; + pub fn reallocf(ptr: *mut c_void, size: size_t) -> *mut c_void; + pub fn srand(seed: c_uint); + pub fn drand48() -> c_double; + pub fn erand48(xseed: *mut c_ushort) -> c_double; + pub fn jrand48(xseed: *mut c_ushort) -> c_long; + pub fn lcong48(p: *mut c_ushort); + pub fn lrand48() -> c_long; + pub fn mrand48() -> c_long; + pub fn nrand48(xseed: *mut c_ushort) -> c_long; + pub fn seed48(xseed: *mut c_ushort) -> *mut c_ushort; + pub fn srand48(seed: c_long); + + pub fn qsort_r( + base: *mut c_void, + num: size_t, + size: size_t, + compar: Option c_int>, + arg: *mut c_void, + ); + + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; + pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; + pub fn shm_unlink(name: *const c_char) -> c_int; + + pub fn explicit_bzero(s: *mut c_void, len: size_t); + pub fn ffs(value: c_int) -> c_int; + pub fn ffsl(value: c_long) -> c_int; + pub fn ffsll(value: c_longlong) -> c_int; + pub fn fls(value: c_int) -> c_int; + pub fn flsl(value: c_long) -> c_int; + pub fn flsll(value: c_longlong) -> c_int; + pub fn strcasecmp_l(s1: *const c_char, s2: *const c_char, loc: locale_t) -> c_int; + + pub fn strncasecmp_l(s1: *const c_char, s2: *const c_char, n: size_t, loc: locale_t) -> c_int; + + pub fn timingsafe_bcmp(a: *const c_void, b: *const c_void, len: size_t) -> c_int; + pub fn timingsafe_memcmp(a: *const c_void, b: *const c_void, len: size_t) -> c_int; + + pub fn memmem( + haystack: *const c_void, + haystacklen: size_t, + needle: *const c_void, + needlelen: size_t, + ) -> *mut c_void; + + pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; + #[link_name = "__xpg_strerror_r"] + pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + pub fn strsep(string: *mut *mut c_char, delim: *const c_char) -> *mut c_char; + + #[link_name = "__gnu_basename"] + pub fn basename(path: *const c_char) -> *mut c_char; + + pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; + pub fn dup3(src: c_int, dst: c_int, flags: c_int) -> c_int; + pub fn eaccess(pathname: *const c_char, mode: c_int) -> c_int; + pub fn euidaccess(pathname: *const c_char, mode: c_int) -> c_int; + + pub fn execvpe( + file: *const c_char, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + + pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; + + pub fn fexecve(fd: c_int, argv: *const *mut c_char, envp: *const *mut c_char) -> c_int; + + pub fn fdatasync(fd: c_int) -> c_int; + pub fn getdomainname(name: *mut c_char, len: size_t) -> c_int; + pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; + pub fn gethostid() -> c_long; + pub fn getpagesize() -> c_int; + pub fn getpeereid(socket: c_int, euid: *mut uid_t, egid: *mut gid_t) -> c_int; + + pub fn pthread_atfork( + prepare: Option, + parent: Option, + child: Option, + ) -> c_int; + + pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; + pub fn sbrk(increment: intptr_t) -> *mut c_void; + pub fn setgroups(ngroups: c_int, ptr: *const gid_t) -> c_int; + pub fn sethostname(name: *const c_char, len: size_t) -> c_int; + pub fn vhangup() -> c_int; + pub fn getdtablesize() -> c_int; + pub fn sync(); + + pub fn __errno() -> *mut c_int; + + pub fn sched_setparam(pid: pid_t, param: *const sched_param) -> c_int; + pub fn sched_getparam(pid: pid_t, param: *mut sched_param) -> c_int; + + pub fn sched_setscheduler(pid: pid_t, policy: c_int, param: *const sched_param) -> c_int; + + pub fn sched_getscheduler(pid: pid_t) -> c_int; + pub fn sched_get_priority_max(policy: c_int) -> c_int; + pub fn sched_get_priority_min(policy: c_int) -> c_int; + pub fn sched_rr_get_interval(pid: pid_t, t: *mut timespec) -> c_int; + pub fn sched_getcpu() -> c_int; + pub fn sched_getaffinity(pid: pid_t, cpusetsize: size_t, mask: *mut cpu_set_t) -> c_int; + + pub fn sched_setaffinity(pid: pid_t, cpusetsize: size_t, cpuset: *const cpu_set_t) -> c_int; + + pub fn pthread_attr_getguardsize(attr: *const pthread_attr_t, guardsize: *mut size_t) -> c_int; + + pub fn pthread_attr_getschedparam( + attr: *const pthread_attr_t, + param: *mut sched_param, + ) -> c_int; + + pub fn pthread_attr_setschedparam( + attr: *mut pthread_attr_t, + param: *const sched_param, + ) -> c_int; + + pub fn pthread_attr_getstack( + attr: *const pthread_attr_t, + stackaddr: *mut *mut c_void, + stacksize: *mut size_t, + ) -> c_int; + + pub fn pthread_cancel(thread: pthread_t) -> c_int; + + pub fn pthread_condattr_getclock( + attr: *const pthread_condattr_t, + clock_id: *mut clockid_t, + ) -> c_int; + + pub fn pthread_condattr_getpshared( + attr: *const pthread_condattr_t, + pshared: *mut c_int, + ) -> c_int; + + pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, clock_id: clockid_t) -> c_int; + + pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: c_int) -> c_int; + pub fn pthread_barrierattr_init(attr: *mut pthread_barrierattr_t) -> c_int; + + pub fn pthread_barrierattr_setpshared(attr: *mut pthread_barrierattr_t, shared: c_int) + -> c_int; + + pub fn pthread_barrierattr_getpshared( + attr: *const pthread_barrierattr_t, + shared: *mut c_int, + ) -> c_int; + + pub fn pthread_barrierattr_destroy(attr: *mut pthread_barrierattr_t) -> c_int; + + pub fn pthread_barrier_init( + barrier: *mut pthread_barrier_t, + attr: *const pthread_barrierattr_t, + count: c_uint, + ) -> c_int; + + pub fn pthread_barrier_destroy(barrier: *mut pthread_barrier_t) -> c_int; + pub fn pthread_barrier_wait(barrier: *mut pthread_barrier_t) -> c_int; + + pub fn pthread_create( + native: *mut pthread_t, + attr: *const pthread_attr_t, + f: extern "C" fn(*mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + + pub fn pthread_getcpuclockid(thread: pthread_t, clk_id: *mut clockid_t) -> c_int; + + pub fn pthread_getschedparam( + native: pthread_t, + policy: *mut c_int, + param: *mut sched_param, + ) -> c_int; + + pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, abstime: *const timespec) -> c_int; + + pub fn pthread_mutexattr_getprotocol( + attr: *const pthread_mutexattr_t, + protocol: *mut c_int, + ) -> c_int; + + pub fn pthread_mutexattr_getpshared( + attr: *const pthread_mutexattr_t, + pshared: *mut c_int, + ) -> c_int; + + pub fn pthread_mutexattr_setprotocol(attr: *mut pthread_mutexattr_t, protocol: c_int) -> c_int; + + pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; + + pub fn pthread_spin_destroy(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_spin_init(lock: *mut pthread_spinlock_t, pshared: c_int) -> c_int; + pub fn pthread_spin_lock(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_spin_trylock(lock: *mut pthread_spinlock_t) -> c_int; + pub fn pthread_spin_unlock(lock: *mut pthread_spinlock_t) -> c_int; + + pub fn pthread_rwlockattr_getpshared( + attr: *const pthread_rwlockattr_t, + val: *mut c_int, + ) -> c_int; + + pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, val: c_int) -> c_int; + + pub fn pthread_setschedparam( + native: pthread_t, + policy: c_int, + param: *const sched_param, + ) -> c_int; + + pub fn pthread_setschedprio(native: pthread_t, priority: c_int) -> c_int; + + pub fn pthread_getaffinity_np( + thread: pthread_t, + cpusetsize: size_t, + cpuset: *mut cpu_set_t, + ) -> c_int; + + pub fn pthread_getattr_np(native: pthread_t, attr: *mut pthread_attr_t) -> c_int; + pub fn pthread_getname_np(thread: pthread_t, name: *mut c_char, len: size_t) -> c_int; + + pub fn pthread_setaffinity_np( + thread: pthread_t, + cpusetsize: size_t, + cpuset: *const cpu_set_t, + ) -> c_int; + + pub fn pthread_setname_np(thread: pthread_t, name: *const c_char) -> c_int; + pub fn pthread_sigqueue(thread: pthread_t, sig: c_int, value: sigval) -> c_int; + + pub fn ioctl(fd: c_int, request: c_int, ...) -> c_int; + + pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; + + pub fn mount(src: *const c_char, target: *const c_char, flags: c_uint) -> c_int; + + pub fn umount(target: *const c_char) -> c_int; + pub fn cygwin_umount(target: *const c_char, flags: c_uint) -> c_int; + + pub fn dirfd(dirp: *mut DIR) -> c_int; + pub fn seekdir(dirp: *mut DIR, loc: c_long); + pub fn telldir(dirp: *mut DIR) -> c_long; + + pub fn uname(buf: *mut utsname) -> c_int; + + pub fn posix_spawn( + pid: *mut pid_t, + path: *const c_char, + file_actions: *const posix_spawn_file_actions_t, + attrp: *const posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnp( + pid: *mut pid_t, + file: *const c_char, + file_actions: *const posix_spawn_file_actions_t, + attrp: *const posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_getsigdefault( + attr: *const posix_spawnattr_t, + default: *mut sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigdefault( + attr: *mut posix_spawnattr_t, + default: *const sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getsigmask( + attr: *const posix_spawnattr_t, + default: *mut sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigmask( + attr: *mut posix_spawnattr_t, + default: *const sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; + pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; + pub fn posix_spawnattr_getpgroup(attr: *const posix_spawnattr_t, flags: *mut pid_t) -> c_int; + pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: pid_t) -> c_int; + pub fn posix_spawnattr_getschedpolicy( + attr: *const posix_spawnattr_t, + flags: *mut c_int, + ) -> c_int; + pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, flags: c_int) -> c_int; + pub fn posix_spawnattr_getschedparam( + attr: *const posix_spawnattr_t, + param: *mut sched_param, + ) -> c_int; + pub fn posix_spawnattr_setschedparam( + attr: *mut posix_spawnattr_t, + param: *const sched_param, + ) -> c_int; + + pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_addopen( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + path: *const c_char, + oflag: c_int, + mode: mode_t, + ) -> c_int; + pub fn posix_spawn_file_actions_addclose( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + ) -> c_int; + pub fn posix_spawn_file_actions_adddup2( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + newfd: c_int, + ) -> c_int; + pub fn posix_spawn_file_actions_addchdir( + actions: *mut crate::posix_spawn_file_actions_t, + path: *const c_char, + ) -> c_int; + pub fn posix_spawn_file_actions_addfchdir( + actions: *mut crate::posix_spawn_file_actions_t, + fd: c_int, + ) -> c_int; + pub fn posix_spawn_file_actions_addchdir_np( + actions: *mut crate::posix_spawn_file_actions_t, + path: *const c_char, + ) -> c_int; + pub fn posix_spawn_file_actions_addfchdir_np( + actions: *mut crate::posix_spawn_file_actions_t, + fd: c_int, + ) -> c_int; + + pub fn forkpty( + amaster: *mut c_int, + name: *mut c_char, + termp: *const termios, + winp: *const crate::winsize, + ) -> crate::pid_t; + pub fn openpty( + amaster: *mut c_int, + aslave: *mut c_int, + name: *mut c_char, + termp: *const termios, + winp: *const crate::winsize, + ) -> c_int; + + pub fn getgrgid_r( + gid: crate::gid_t, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn getgrouplist( + user: *const c_char, + group: crate::gid_t, + groups: *mut crate::gid_t, + ngroups: *mut c_int, + ) -> c_int; + pub fn getgrnam_r( + name: *const c_char, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn initgroups(user: *const c_char, group: crate::gid_t) -> c_int; + + pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; + pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; + + pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; + pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; + pub fn fallocate(fd: c_int, mode: c_int, offset: off_t, len: off_t) -> c_int; +} diff --git a/vendor/libc/src/unix/haiku/b32.rs b/vendor/libc/src/unix/haiku/b32.rs new file mode 100644 index 00000000000000..1aa27e615ca4ea --- /dev/null +++ b/vendor/libc/src/unix/haiku/b32.rs @@ -0,0 +1,18 @@ +pub type time_t = i32; + +pub type Elf_Addr = crate::Elf32_Addr; +pub type Elf_Half = crate::Elf32_Half; +pub type Elf_Phdr = crate::Elf32_Phdr; + +s! { + pub struct Elf32_Phdr { + pub p_type: crate::Elf32_Word, + pub p_offset: crate::Elf32_Off, + pub p_vaddr: crate::Elf32_Addr, + pub p_paddr: crate::Elf32_Addr, + pub p_filesz: crate::Elf32_Word, + pub p_memsz: crate::Elf32_Word, + pub p_flags: crate::Elf32_Word, + pub p_align: crate::Elf32_Word, + } +} diff --git a/vendor/libc/src/unix/haiku/b64.rs b/vendor/libc/src/unix/haiku/b64.rs new file mode 100644 index 00000000000000..3355241fdb7971 --- /dev/null +++ b/vendor/libc/src/unix/haiku/b64.rs @@ -0,0 +1,18 @@ +pub type time_t = i64; + +pub type Elf_Addr = crate::Elf64_Addr; +pub type Elf_Half = crate::Elf64_Half; +pub type Elf_Phdr = crate::Elf64_Phdr; + +s! { + pub struct Elf64_Phdr { + pub p_type: crate::Elf64_Word, + pub p_flags: crate::Elf64_Word, + pub p_offset: crate::Elf64_Off, + pub p_vaddr: crate::Elf64_Addr, + pub p_paddr: crate::Elf64_Addr, + pub p_filesz: crate::Elf64_Xword, + pub p_memsz: crate::Elf64_Xword, + pub p_align: crate::Elf64_Xword, + } +} diff --git a/vendor/libc/src/unix/haiku/bsd.rs b/vendor/libc/src/unix/haiku/bsd.rs new file mode 100644 index 00000000000000..1e3881e2c67ff5 --- /dev/null +++ b/vendor/libc/src/unix/haiku/bsd.rs @@ -0,0 +1,151 @@ +//! This file contains the BSD APIs available in Haiku. It corresponds to the +//! header files in `headers/compatibility/bsd`. +//! +//! Note that Haiku's BSD compatibility is a combination of system APIs and +//! utility libraries. There should only be system APIs in `libc`. When you are +//! trying to determine whether something should be included in this file, the +//! best indicator is whether it also exists in the BSD-specific definitions in +//! this libc crate. + +use crate::prelude::*; + +// stringlist.h (utility library) +// Note: this is kept because it was previously introduced +pub type StringList = _stringlist; + +s! { + // stringlist.h (utility library) + // Note: this is kept because it was previously introduced + pub struct _stringlist { + pub sl_str: *mut *mut c_char, + pub sl_max: size_t, + pub sl_cur: size_t, + } + + // sys/event.h + pub struct kevent { + pub ident: crate::uintptr_t, + pub filter: c_short, + pub flags: c_ushort, + pub fflags: c_uint, + pub data: i64, + pub udata: *mut c_void, + pub ext: [u64; 4], + } + + // sys/link_elf.h + pub struct dl_phdr_info { + pub dlpi_addr: crate::Elf_Addr, + pub dlpi_name: *const c_char, + pub dlpi_phdr: *const crate::Elf_Phdr, + pub dlpi_phnum: crate::Elf_Half, + } +} + +// sys/event.h +pub const EVFILT_READ: i16 = -1; +pub const EVFILT_WRITE: i16 = -2; +pub const EVFILT_PROC: i16 = -5; +pub const EV_ADD: u16 = 0x0001; +pub const EV_DELETE: u16 = 0x0002; +pub const EV_ONESHOT: u16 = 0x0010; +pub const EV_CLEAR: u16 = 0x0020; +pub const EV_EOF: u16 = 0x8000; +pub const EV_ERROR: u16 = 0x4000; +pub const NOTE_EXIT: u32 = 0x80000000; + +// sys/ioccom.h +pub const IOC_VOID: c_ulong = 0x20000000; +pub const IOC_OUT: c_ulong = 0x40000000; +pub const IOC_IN: c_ulong = 0x80000000; +pub const IOC_INOUT: c_ulong = IOC_IN | IOC_OUT; +pub const IOC_DIRMASK: c_ulong = 0xe0000000; + +#[link(name = "bsd")] +extern "C" { + // stdlib.h + pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; + pub fn getprogname() -> *const c_char; + pub fn setprogname(progname: *const c_char); + pub fn arc4random() -> u32; + pub fn arc4random_uniform(upper_bound: u32) -> u32; + pub fn arc4random_buf(buf: *mut c_void, n: size_t); + pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; + pub fn strtonum( + nptr: *const c_char, + minval: c_longlong, + maxval: c_longlong, + errstr: *mut *const c_char, + ) -> c_longlong; + + // pty.h + pub fn openpty( + amaster: *mut c_int, + aslave: *mut c_int, + name: *mut c_char, + termp: *mut crate::termios, + winp: *mut crate::winsize, + ) -> c_int; + pub fn login_tty(_fd: c_int) -> c_int; + pub fn forkpty( + amaster: *mut c_int, + name: *mut c_char, + termp: *mut crate::termios, + winp: *mut crate::winsize, + ) -> crate::pid_t; + + // string.h + pub fn strsep(string: *mut *mut c_char, delimiters: *const c_char) -> *mut c_char; + pub fn explicit_bzero(buf: *mut c_void, len: size_t); + + // stringlist.h (utility library) + // Note: this is kept because it was previously introduced + pub fn sl_init() -> *mut StringList; + pub fn sl_add(sl: *mut StringList, n: *mut c_char) -> c_int; + pub fn sl_free(sl: *mut StringList, i: c_int); + pub fn sl_find(sl: *mut StringList, n: *mut c_char) -> *mut c_char; + + // sys/event.h + pub fn kqueue() -> c_int; + pub fn kevent( + kq: c_int, + changelist: *const kevent, + nchanges: c_int, + eventlist: *mut kevent, + nevents: c_int, + timeout: *const crate::timespec, + ) -> c_int; + + // sys/link_elf.h + pub fn dl_iterate_phdr( + callback: Option< + unsafe extern "C" fn(info: *mut dl_phdr_info, size: usize, data: *mut c_void) -> c_int, + >, + data: *mut c_void, + ) -> c_int; + + // sys/time.h + pub fn lutimes(file: *const c_char, times: *const crate::timeval) -> c_int; + + // sys/uov.h + pub fn preadv( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: crate::off_t, + ) -> ssize_t; + pub fn pwritev( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: crate::off_t, + ) -> ssize_t; + + // sys/wait.h + pub fn wait4( + pid: crate::pid_t, + status: *mut c_int, + options: c_int, + rusage: *mut crate::rusage, + ) -> crate::pid_t; +} diff --git a/vendor/libc/src/unix/haiku/mod.rs b/vendor/libc/src/unix/haiku/mod.rs new file mode 100644 index 00000000000000..964598e97ca35e --- /dev/null +++ b/vendor/libc/src/unix/haiku/mod.rs @@ -0,0 +1,2097 @@ +use crate::prelude::*; + +// This module contains bindings to the native Haiku API. The Haiku API +// originates from BeOS, and it was the original way to perform low level +// system and IO operations. The POSIX API was in that era was like a +// compatibility layer. In current Haiku development, both the POSIX API and +// the Haiku API are considered to be co-equal status. However, they are not +// integrated like they are on other UNIX platforms, which means that for many +// low level concepts there are two versions, like processes (POSIX) and +// teams (Haiku), or pthreads and native threads. +// +// Both the POSIX API and the Haiku API live in libroot.so, the library that is +// linked to any binary by default. Additionally, Haiku supports several +// non-POSIX APIs from BSD and GNU, which live in libbsd.so and libgnu.so. These +// modules are also supported. +// +// The module is comprised of the following files: +// - `mod.rs` (this file) implements the C11 and POSIX API found in +// `headers/posix` +// - `b32.rs`, `b64.rs` and `x86_64.rs` contain platform-specific definitions +// of the C11 and POSIX APIs +// - `native.rs` defines the native Haiku API that is implemented in +// `libroot.so` and that are found in `headers/os`. +// - `bsd.rs` defines the BSD customizations available on Haiku found in +// `headers/compatibility/bsd` + +pub type rlim_t = crate::uintptr_t; +pub type sa_family_t = u8; +pub type pthread_key_t = c_int; +pub type nfds_t = c_ulong; +pub type tcflag_t = c_uint; +pub type speed_t = c_uchar; +pub type clock_t = i32; +pub type clockid_t = i32; +pub type suseconds_t = i32; +pub type wchar_t = i32; +pub type off_t = i64; +pub type ino_t = i64; +pub type blkcnt_t = i64; +pub type blksize_t = i32; +pub type dev_t = i32; +pub type mode_t = u32; +pub type nlink_t = i32; +pub type useconds_t = u32; +pub type socklen_t = u32; +pub type pthread_t = crate::uintptr_t; +pub type pthread_condattr_t = crate::uintptr_t; +pub type pthread_mutexattr_t = crate::uintptr_t; +pub type pthread_rwlockattr_t = crate::uintptr_t; +pub type sigset_t = u64; +pub type fsblkcnt_t = i64; +pub type fsfilcnt_t = i64; +pub type pthread_attr_t = *mut c_void; +pub type nl_item = c_int; +pub type id_t = i32; +pub type idtype_t = c_int; +pub type fd_mask = u32; +pub type regoff_t = c_int; +pub type key_t = i32; +pub type msgqnum_t = u32; +pub type msglen_t = u32; + +pub type Elf32_Addr = u32; +pub type Elf32_Half = u16; +pub type Elf32_Off = u32; +pub type Elf32_Sword = i32; +pub type Elf32_Word = u32; + +pub type Elf64_Addr = u64; +pub type Elf64_Half = u16; +pub type Elf64_Off = u64; +pub type Elf64_Sword = i32; +pub type Elf64_Sxword = i64; +pub type Elf64_Word = u32; +pub type Elf64_Xword = u64; + +pub type ENTRY = entry; +pub type ACTION = c_int; + +pub type posix_spawnattr_t = *mut c_void; +pub type posix_spawn_file_actions_t = *mut c_void; + +#[derive(Debug)] +pub enum timezone {} +impl Copy for timezone {} +impl Clone for timezone { + fn clone(&self) -> timezone { + *self + } +} + +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut c_void { + self.si_addr + } + + pub unsafe fn si_pid(&self) -> crate::pid_t { + self.si_pid + } + + pub unsafe fn si_uid(&self) -> crate::uid_t { + self.si_uid + } + + pub unsafe fn si_status(&self) -> c_int { + self.si_status + } +} + +s! { + pub struct in_addr { + pub s_addr: crate::in_addr_t, + } + + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct sockaddr { + pub sa_len: u8, + pub sa_family: sa_family_t, + pub sa_data: [u8; 30], + } + + pub struct sockaddr_in { + pub sin_len: u8, + pub sin_family: sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [i8; 24], + } + + pub struct sockaddr_in6 { + pub sin6_len: u8, + pub sin6_family: u8, + pub sin6_port: u16, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: u32, + } + + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: socklen_t, + pub ai_canonname: *mut c_char, + pub ai_addr: *mut crate::sockaddr, + pub ai_next: *mut addrinfo, + } + + pub struct ifaddrs { + pub ifa_next: *mut ifaddrs, + pub ifa_name: *const c_char, + pub ifa_flags: c_uint, + pub ifa_addr: *mut crate::sockaddr, + pub ifa_netmask: *mut crate::sockaddr, + pub ifa_dstaddr: *mut crate::sockaddr, + pub ifa_data: *mut c_void, + } + + pub struct fd_set { + // size for 1024 bits, and a fd_mask with size u32 + fds_bits: [fd_mask; 32], + } + + pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + pub tm_gmtoff: c_int, + pub tm_zone: *mut c_char, + } + + pub struct utsname { + pub sysname: [c_char; 32], + pub nodename: [c_char; 32], + pub release: [c_char; 32], + pub version: [c_char; 32], + pub machine: [c_char; 32], + } + + pub struct lconv { + pub decimal_point: *mut c_char, + pub thousands_sep: *mut c_char, + pub grouping: *mut c_char, + pub int_curr_symbol: *mut c_char, + pub currency_symbol: *mut c_char, + pub mon_decimal_point: *mut c_char, + pub mon_thousands_sep: *mut c_char, + pub mon_grouping: *mut c_char, + pub positive_sign: *mut c_char, + pub negative_sign: *mut c_char, + pub int_frac_digits: c_char, + pub frac_digits: c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub p_sign_posn: c_char, + pub n_sign_posn: c_char, + pub int_p_cs_precedes: c_char, + pub int_p_sep_by_space: c_char, + pub int_n_cs_precedes: c_char, + pub int_n_sep_by_space: c_char, + pub int_p_sign_posn: c_char, + pub int_n_sign_posn: c_char, + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: c_int, + pub msg_control: *mut c_void, + pub msg_controllen: socklen_t, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + pub cmsg_len: crate::socklen_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct Dl_info { + pub dli_fname: *const c_char, + pub dli_fbase: *mut c_void, + pub dli_sname: *const c_char, + pub dli_saddr: *mut c_void, + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: c_char, + pub c_ispeed: crate::speed_t, + pub c_ospeed: crate::speed_t, + pub c_cc: [crate::cc_t; crate::NCCS], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct stat { + pub st_dev: dev_t, + pub st_ino: ino_t, + pub st_mode: mode_t, + pub st_nlink: nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_size: off_t, + pub st_rdev: dev_t, + pub st_blksize: blksize_t, + pub st_atime: time_t, + pub st_atime_nsec: c_long, + pub st_mtime: time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: time_t, + pub st_ctime_nsec: c_long, + pub st_crtime: time_t, + pub st_crtime_nsec: c_long, + pub st_type: u32, + pub st_blocks: blkcnt_t, + } + + pub struct glob_t { + pub gl_pathc: size_t, + __unused1: size_t, + pub gl_offs: size_t, + __unused2: size_t, + pub gl_pathv: *mut *mut c_char, + + __unused3: *mut c_void, + __unused4: *mut c_void, + __unused5: *mut c_void, + __unused6: *mut c_void, + __unused7: *mut c_void, + __unused8: *mut c_void, + } + + pub struct pthread_mutex_t { + flags: u32, + lock: i32, + unused: i32, + owner: i32, + owner_count: i32, + } + + pub struct pthread_cond_t { + flags: u32, + unused: i32, + mutex: *mut c_void, + waiter_count: i32, + lock: i32, + } + + pub struct pthread_rwlock_t { + flags: u32, + owner: i32, + lock_sem: i32, // this is actually a union + lock_count: i32, + reader_count: i32, + writer_count: i32, + waiters: [*mut c_void; 2], + } + + pub struct pthread_spinlock_t { + lock: u32, + } + + pub struct passwd { + pub pw_name: *mut c_char, + pub pw_passwd: *mut c_char, + pub pw_uid: crate::uid_t, + pub pw_gid: crate::gid_t, + pub pw_dir: *mut c_char, + pub pw_shell: *mut c_char, + pub pw_gecos: *mut c_char, + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_code: c_int, + pub si_errno: c_int, + pub si_pid: crate::pid_t, + pub si_uid: crate::uid_t, + pub si_addr: *mut c_void, + pub si_status: c_int, + pub si_band: c_long, + pub sigval: *mut c_void, + } + + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, //actually a union with sa_handler + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + sa_userdata: *mut c_void, + } + + pub struct sem_t { + pub type_: i32, + pub named_sem_id: i32, // actually a union with unnamed_sem (i32) + pub padding: [i32; 2], + } + + pub struct ucred { + pub pid: crate::pid_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + } + + pub struct sockaddr_dl { + pub sdl_len: u8, + pub sdl_family: u8, + pub sdl_e_type: u16, + pub sdl_index: u32, + pub sdl_type: u8, + pub sdl_nlen: u8, + pub sdl_alen: u8, + pub sdl_slen: u8, + pub sdl_data: [u8; 46], + } + + pub struct spwd { + pub sp_namp: *mut c_char, + pub sp_pwdp: *mut c_char, + pub sp_lstchg: c_int, + pub sp_min: c_int, + pub sp_max: c_int, + pub sp_warn: c_int, + pub sp_inact: c_int, + pub sp_expire: c_int, + pub sp_flag: c_int, + } + + pub struct regex_t { + __buffer: *mut c_void, + __allocated: size_t, + __used: size_t, + __syntax: c_ulong, + __fastmap: *mut c_char, + __translate: *mut c_char, + __re_nsub: size_t, + __bitfield: u8, + } + + pub struct regmatch_t { + pub rm_so: regoff_t, + pub rm_eo: regoff_t, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + pub msg_stime: crate::time_t, + pub msg_rtime: crate::time_t, + pub msg_ctime: crate::time_t, + } + + pub struct ipc_perm { + pub key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: mode_t, + } + + pub struct sembuf { + pub sem_num: c_ushort, + pub sem_op: c_short, + pub sem_flg: c_short, + } + + pub struct entry { + pub key: *mut c_char, + pub data: *mut c_void, + } + + pub struct option { + pub name: *const c_char, + pub has_arg: c_int, + pub flag: *mut c_int, + pub val: c_int, + } +} + +s_no_extra_traits! { + pub struct sockaddr_un { + pub sun_len: u8, + pub sun_family: sa_family_t, + pub sun_path: [c_char; 126], + } + pub struct sockaddr_storage { + pub ss_len: u8, + pub ss_family: sa_family_t, + __ss_pad1: [u8; 6], + __ss_pad2: u64, + __ss_pad3: [u8; 112], + } + pub struct dirent { + pub d_dev: dev_t, + pub d_pdev: dev_t, + pub d_ino: ino_t, + pub d_pino: i64, + pub d_reclen: c_ushort, + pub d_name: [c_char; 1024], // Max length is _POSIX_PATH_MAX + } + + pub struct sigevent { + pub sigev_notify: c_int, + pub sigev_signo: c_int, + pub sigev_value: crate::sigval, + __unused1: *mut c_void, // actually a function pointer + pub sigev_notify_attributes: *mut crate::pthread_attr_t, + } + + pub struct utmpx { + pub ut_type: c_short, + pub ut_tv: crate::timeval, + pub ut_id: [c_char; 8], + pub ut_pid: crate::pid_t, + pub ut_user: [c_char; 32], + pub ut_line: [c_char; 16], + pub ut_host: [c_char; 128], + __ut_reserved: [c_char; 64], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for utmpx { + fn eq(&self, other: &utmpx) -> bool { + self.ut_type == other.ut_type + && self.ut_tv == other.ut_tv + && self.ut_id == other.ut_id + && self.ut_pid == other.ut_pid + && self.ut_user == other.ut_user + && self.ut_line == other.ut_line + && self + .ut_host + .iter() + .zip(other.ut_host.iter()) + .all(|(a, b)| a == b) + && self.__ut_reserved == other.__ut_reserved + } + } + + impl Eq for utmpx {} + impl hash::Hash for utmpx { + fn hash(&self, state: &mut H) { + self.ut_type.hash(state); + self.ut_tv.hash(state); + self.ut_id.hash(state); + self.ut_pid.hash(state); + self.ut_user.hash(state); + self.ut_line.hash(state); + self.ut_host.hash(state); + self.__ut_reserved.hash(state); + } + } + impl PartialEq for sockaddr_un { + fn eq(&self, other: &sockaddr_un) -> bool { + self.sun_len == other.sun_len + && self.sun_family == other.sun_family + && self + .sun_path + .iter() + .zip(other.sun_path.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for sockaddr_un {} + impl hash::Hash for sockaddr_un { + fn hash(&self, state: &mut H) { + self.sun_len.hash(state); + self.sun_family.hash(state); + self.sun_path.hash(state); + } + } + + impl PartialEq for sockaddr_storage { + fn eq(&self, other: &sockaddr_storage) -> bool { + self.ss_len == other.ss_len + && self.ss_family == other.ss_family + && self + .__ss_pad1 + .iter() + .zip(other.__ss_pad1.iter()) + .all(|(a, b)| a == b) + && self.__ss_pad2 == other.__ss_pad2 + && self + .__ss_pad3 + .iter() + .zip(other.__ss_pad3.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for sockaddr_storage {} + impl hash::Hash for sockaddr_storage { + fn hash(&self, state: &mut H) { + self.ss_len.hash(state); + self.ss_family.hash(state); + self.__ss_pad1.hash(state); + self.__ss_pad2.hash(state); + self.__ss_pad3.hash(state); + } + } + + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_dev == other.d_dev + && self.d_pdev == other.d_pdev + && self.d_ino == other.d_ino + && self.d_pino == other.d_pino + && self.d_reclen == other.d_reclen + && self + .d_name + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for dirent {} + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_dev.hash(state); + self.d_pdev.hash(state); + self.d_ino.hash(state); + self.d_pino.hash(state); + self.d_reclen.hash(state); + self.d_name.hash(state); + } + } + + impl PartialEq for sigevent { + fn eq(&self, other: &sigevent) -> bool { + self.sigev_notify == other.sigev_notify + && self.sigev_signo == other.sigev_signo + && self.sigev_value == other.sigev_value + && self.sigev_notify_attributes == other.sigev_notify_attributes + } + } + impl Eq for sigevent {} + impl hash::Hash for sigevent { + fn hash(&self, state: &mut H) { + self.sigev_notify.hash(state); + self.sigev_signo.hash(state); + self.sigev_value.hash(state); + self.sigev_notify_attributes.hash(state); + } + } + } +} + +pub const EXIT_FAILURE: c_int = 1; +pub const EXIT_SUCCESS: c_int = 0; +pub const RAND_MAX: c_int = 2147483647; +pub const EOF: c_int = -1; +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; +pub const L_SET: c_int = SEEK_SET; +pub const L_INCR: c_int = SEEK_CUR; +pub const L_XTND: c_int = SEEK_END; +pub const _IOFBF: c_int = 0; +pub const _IONBF: c_int = 2; +pub const _IOLBF: c_int = 1; + +pub const F_DUPFD: c_int = 0x0001; +pub const F_GETFD: c_int = 0x0002; +pub const F_SETFD: c_int = 0x0004; +pub const F_GETFL: c_int = 0x0008; +pub const F_SETFL: c_int = 0x0010; +pub const F_GETLK: c_int = 0x0020; +pub const F_SETLK: c_int = 0x0080; +pub const F_SETLKW: c_int = 0x0100; +pub const F_DUPFD_CLOEXEC: c_int = 0x0200; + +pub const F_RDLCK: c_int = 0x0040; +pub const F_UNLCK: c_int = 0x0200; +pub const F_WRLCK: c_int = 0x0400; + +pub const AT_FDCWD: c_int = -100; +pub const AT_SYMLINK_NOFOLLOW: c_int = 0x01; +pub const AT_SYMLINK_FOLLOW: c_int = 0x02; +pub const AT_REMOVEDIR: c_int = 0x04; +pub const AT_EACCESS: c_int = 0x08; + +pub const POLLIN: c_short = 0x0001; +pub const POLLOUT: c_short = 0x0002; +pub const POLLRDNORM: c_short = POLLIN; +pub const POLLWRNORM: c_short = POLLOUT; +pub const POLLRDBAND: c_short = 0x0008; +pub const POLLWRBAND: c_short = 0x0010; +pub const POLLPRI: c_short = 0x0020; +pub const POLLERR: c_short = 0x0004; +pub const POLLHUP: c_short = 0x0080; +pub const POLLNVAL: c_short = 0x1000; + +pub const PTHREAD_CREATE_JOINABLE: c_int = 0; +pub const PTHREAD_CREATE_DETACHED: c_int = 1; + +pub const CLOCK_REALTIME: c_int = -1; +pub const CLOCK_MONOTONIC: c_int = 0; +pub const CLOCK_PROCESS_CPUTIME_ID: c_int = -2; +pub const CLOCK_THREAD_CPUTIME_ID: c_int = -3; + +pub const RLIMIT_CORE: c_int = 0; +pub const RLIMIT_CPU: c_int = 1; +pub const RLIMIT_DATA: c_int = 2; +pub const RLIMIT_FSIZE: c_int = 3; +pub const RLIMIT_NOFILE: c_int = 4; +pub const RLIMIT_STACK: c_int = 5; +pub const RLIMIT_AS: c_int = 6; +pub const RLIM_INFINITY: crate::rlim_t = 0xffffffff; +// Haiku specific +pub const RLIMIT_NOVMON: c_int = 7; +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIM_NLIMITS: c_int = 8; + +pub const RUSAGE_SELF: c_int = 0; + +pub const RTLD_LAZY: c_int = 0; + +pub const NCCS: usize = 11; + +pub const O_RDONLY: c_int = 0x0000; +pub const O_WRONLY: c_int = 0x0001; +pub const O_RDWR: c_int = 0x0002; +pub const O_ACCMODE: c_int = 0x0003; + +pub const O_EXCL: c_int = 0x0100; +pub const O_CREAT: c_int = 0x0200; +pub const O_TRUNC: c_int = 0x0400; +pub const O_NOCTTY: c_int = 0x1000; +pub const O_NOTRAVERSE: c_int = 0x2000; + +pub const O_CLOEXEC: c_int = 0x00000040; +pub const O_NONBLOCK: c_int = 0x00000080; +pub const O_APPEND: c_int = 0x00000800; +pub const O_SYNC: c_int = 0x00010000; +pub const O_RSYNC: c_int = 0x00020000; +pub const O_DSYNC: c_int = 0x00040000; +pub const O_NOFOLLOW: c_int = 0x00080000; +pub const O_NOCACHE: c_int = 0x00100000; +pub const O_DIRECTORY: c_int = 0x00200000; + +pub const S_IFIFO: mode_t = 0o1_0000; +pub const S_IFCHR: mode_t = 0o2_0000; +pub const S_IFBLK: mode_t = 0o6_0000; +pub const S_IFDIR: mode_t = 0o4_0000; +pub const S_IFREG: mode_t = 0o10_0000; +pub const S_IFLNK: mode_t = 0o12_0000; +pub const S_IFSOCK: mode_t = 0o14_0000; +pub const S_IFMT: mode_t = 0o17_0000; + +pub const S_IRWXU: mode_t = 0o0700; +pub const S_IRUSR: mode_t = 0o0400; +pub const S_IWUSR: mode_t = 0o0200; +pub const S_IXUSR: mode_t = 0o0100; +pub const S_IRWXG: mode_t = 0o0070; +pub const S_IRGRP: mode_t = 0o0040; +pub const S_IWGRP: mode_t = 0o0020; +pub const S_IXGRP: mode_t = 0o0010; +pub const S_IRWXO: mode_t = 0o0007; +pub const S_IROTH: mode_t = 0o0004; +pub const S_IWOTH: mode_t = 0o0002; +pub const S_IXOTH: mode_t = 0o0001; + +pub const F_OK: c_int = 0; +pub const R_OK: c_int = 4; +pub const W_OK: c_int = 2; +pub const X_OK: c_int = 1; +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; + +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGCHLD: c_int = 5; +pub const SIGABRT: c_int = 6; +pub const SIGPIPE: c_int = 7; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGSTOP: c_int = 10; +pub const SIGSEGV: c_int = 11; +pub const SIGCONT: c_int = 12; +pub const SIGTSTP: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; +pub const SIGTTIN: c_int = 16; +pub const SIGTTOU: c_int = 17; +pub const SIGUSR1: c_int = 18; +pub const SIGUSR2: c_int = 19; +pub const SIGWINCH: c_int = 20; +pub const SIGKILLTHR: c_int = 21; +pub const SIGTRAP: c_int = 22; +pub const SIGPOLL: c_int = 23; +pub const SIGPROF: c_int = 24; +pub const SIGSYS: c_int = 25; +pub const SIGURG: c_int = 26; +pub const SIGVTALRM: c_int = 27; +pub const SIGXCPU: c_int = 28; +pub const SIGXFSZ: c_int = 29; +pub const SIGBUS: c_int = 30; + +pub const SIG_BLOCK: c_int = 1; +pub const SIG_UNBLOCK: c_int = 2; +pub const SIG_SETMASK: c_int = 3; + +pub const SIGEV_NONE: c_int = 0; +pub const SIGEV_SIGNAL: c_int = 1; +pub const SIGEV_THREAD: c_int = 2; + +pub const EAI_AGAIN: c_int = 2; +pub const EAI_BADFLAGS: c_int = 3; +pub const EAI_FAIL: c_int = 4; +pub const EAI_FAMILY: c_int = 5; +pub const EAI_MEMORY: c_int = 6; +pub const EAI_NODATA: c_int = 7; +pub const EAI_NONAME: c_int = 8; +pub const EAI_SERVICE: c_int = 9; +pub const EAI_SOCKTYPE: c_int = 10; +pub const EAI_SYSTEM: c_int = 11; +pub const EAI_OVERFLOW: c_int = 14; + +pub const PROT_NONE: c_int = 0; +pub const PROT_READ: c_int = 1; +pub const PROT_WRITE: c_int = 2; +pub const PROT_EXEC: c_int = 4; + +pub const LC_ALL: c_int = 0; +pub const LC_COLLATE: c_int = 1; +pub const LC_CTYPE: c_int = 2; +pub const LC_MONETARY: c_int = 3; +pub const LC_NUMERIC: c_int = 4; +pub const LC_TIME: c_int = 5; +pub const LC_MESSAGES: c_int = 6; + +// FIXME(haiku): Haiku does not have MAP_FILE, but library/std/os.rs requires it +pub const MAP_FILE: c_int = 0x00; +pub const MAP_SHARED: c_int = 0x01; +pub const MAP_PRIVATE: c_int = 0x02; +pub const MAP_FIXED: c_int = 0x04; +pub const MAP_ANONYMOUS: c_int = 0x08; +pub const MAP_NORESERVE: c_int = 0x10; +pub const MAP_ANON: c_int = MAP_ANONYMOUS; + +pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; + +pub const MS_ASYNC: c_int = 0x01; +pub const MS_INVALIDATE: c_int = 0x04; +pub const MS_SYNC: c_int = 0x02; + +pub const E2BIG: c_int = -2147454975; +pub const ECHILD: c_int = -2147454974; +pub const EDEADLK: c_int = -2147454973; +pub const EFBIG: c_int = -2147454972; +pub const EMLINK: c_int = -2147454971; +pub const ENFILE: c_int = -2147454970; +pub const ENODEV: c_int = -2147454969; +pub const ENOLCK: c_int = -2147454968; +pub const ENOSYS: c_int = -2147454967; +pub const ENOTTY: c_int = -2147454966; +pub const ENXIO: c_int = -2147454965; +pub const ESPIPE: c_int = -2147454964; +pub const ESRCH: c_int = -2147454963; +pub const EFPOS: c_int = -2147454962; +pub const ESIGPARM: c_int = -2147454961; +pub const EDOM: c_int = -2147454960; +pub const ERANGE: c_int = -2147454959; +pub const EPROTOTYPE: c_int = -2147454958; +pub const EPROTONOSUPPORT: c_int = -2147454957; +pub const EPFNOSUPPORT: c_int = -2147454956; +pub const EAFNOSUPPORT: c_int = -2147454955; +pub const EADDRINUSE: c_int = -2147454954; +pub const EADDRNOTAVAIL: c_int = -2147454953; +pub const ENETDOWN: c_int = -2147454952; +pub const ENETUNREACH: c_int = -2147454951; +pub const ENETRESET: c_int = -2147454950; +pub const ECONNABORTED: c_int = -2147454949; +pub const ECONNRESET: c_int = -2147454948; +pub const EISCONN: c_int = -2147454947; +pub const ENOTCONN: c_int = -2147454946; +pub const ESHUTDOWN: c_int = -2147454945; +pub const ECONNREFUSED: c_int = -2147454944; +pub const EHOSTUNREACH: c_int = -2147454943; +pub const ENOPROTOOPT: c_int = -2147454942; +pub const ENOBUFS: c_int = -2147454941; +pub const EINPROGRESS: c_int = -2147454940; +pub const EALREADY: c_int = -2147454939; +pub const EILSEQ: c_int = -2147454938; +pub const ENOMSG: c_int = -2147454937; +pub const ESTALE: c_int = -2147454936; +pub const EOVERFLOW: c_int = -2147454935; +pub const EMSGSIZE: c_int = -2147454934; +pub const EOPNOTSUPP: c_int = -2147454933; +pub const ENOTSOCK: c_int = -2147454932; +pub const EHOSTDOWN: c_int = -2147454931; +pub const EBADMSG: c_int = -2147454930; +pub const ECANCELED: c_int = -2147454929; +pub const EDESTADDRREQ: c_int = -2147454928; +pub const EDQUOT: c_int = -2147454927; +pub const EIDRM: c_int = -2147454926; +pub const EMULTIHOP: c_int = -2147454925; +pub const ENODATA: c_int = -2147454924; +pub const ENOLINK: c_int = -2147454923; +pub const ENOSR: c_int = -2147454922; +pub const ENOSTR: c_int = -2147454921; +pub const ENOTSUP: c_int = -2147454920; +pub const EPROTO: c_int = -2147454919; +pub const ETIME: c_int = -2147454918; +pub const ETXTBSY: c_int = -2147454917; +pub const ENOATTR: c_int = -2147454916; + +// INT_MIN +pub const ENOMEM: c_int = -2147483648; + +// POSIX errors that can be mapped to BeOS error codes +pub const EACCES: c_int = -2147483646; +pub const EINTR: c_int = -2147483638; +pub const EIO: c_int = -2147483647; +pub const EBUSY: c_int = -2147483634; +pub const EFAULT: c_int = -2147478783; +pub const ETIMEDOUT: c_int = -2147483639; +pub const EAGAIN: c_int = -2147483637; +pub const EWOULDBLOCK: c_int = -2147483637; +pub const EBADF: c_int = -2147459072; +pub const EEXIST: c_int = -2147459070; +pub const EINVAL: c_int = -2147483643; +pub const ENAMETOOLONG: c_int = -2147459068; +pub const ENOENT: c_int = -2147459069; +pub const EPERM: c_int = -2147483633; +pub const ENOTDIR: c_int = -2147459067; +pub const EISDIR: c_int = -2147459063; +pub const ENOTEMPTY: c_int = -2147459066; +pub const ENOSPC: c_int = -2147459065; +pub const EROFS: c_int = -2147459064; +pub const EMFILE: c_int = -2147459062; +pub const EXDEV: c_int = -2147459061; +pub const ELOOP: c_int = -2147459060; +pub const ENOEXEC: c_int = -2147478782; +pub const EPIPE: c_int = -2147459059; + +pub const IPPROTO_RAW: c_int = 255; + +// These are prefixed with POSIX_ on Haiku +pub const MADV_NORMAL: c_int = 1; +pub const MADV_SEQUENTIAL: c_int = 2; +pub const MADV_RANDOM: c_int = 3; +pub const MADV_WILLNEED: c_int = 4; +pub const MADV_DONTNEED: c_int = 5; +pub const MADV_FREE: c_int = 6; + +// https://github.com/haiku/haiku/blob/HEAD/headers/posix/net/if.h#L80 +pub const IFF_UP: c_int = 0x0001; +pub const IFF_BROADCAST: c_int = 0x0002; // valid broadcast address +pub const IFF_LOOPBACK: c_int = 0x0008; +pub const IFF_POINTOPOINT: c_int = 0x0010; // point-to-point link +pub const IFF_NOARP: c_int = 0x0040; // no address resolution +pub const IFF_AUTOUP: c_int = 0x0080; // auto dial +pub const IFF_PROMISC: c_int = 0x0100; // receive all packets +pub const IFF_ALLMULTI: c_int = 0x0200; // receive all multicast packets +pub const IFF_SIMPLEX: c_int = 0x0800; // doesn't receive own transmissions +pub const IFF_LINK: c_int = 0x1000; // has link +pub const IFF_AUTO_CONFIGURED: c_int = 0x2000; +pub const IFF_CONFIGURING: c_int = 0x4000; +pub const IFF_MULTICAST: c_int = 0x8000; // supports multicast + +pub const AF_UNSPEC: c_int = 0; +pub const AF_INET: c_int = 1; +pub const AF_APPLETALK: c_int = 2; +pub const AF_ROUTE: c_int = 3; +pub const AF_LINK: c_int = 4; +pub const AF_INET6: c_int = 5; +pub const AF_DLI: c_int = 6; +pub const AF_IPX: c_int = 7; +pub const AF_NOTIFY: c_int = 8; +pub const AF_LOCAL: c_int = 9; +pub const AF_UNIX: c_int = AF_LOCAL; +pub const AF_BLUETOOTH: c_int = 10; + +pub const PF_UNSPEC: c_int = AF_UNSPEC; +pub const PF_INET: c_int = AF_INET; +pub const PF_ROUTE: c_int = AF_ROUTE; +pub const PF_LINK: c_int = AF_LINK; +pub const PF_INET6: c_int = AF_INET6; +pub const PF_LOCAL: c_int = AF_LOCAL; +pub const PF_UNIX: c_int = AF_UNIX; +pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; + +pub const IP_OPTIONS: c_int = 1; +pub const IP_HDRINCL: c_int = 2; +pub const IP_TOS: c_int = 3; +pub const IP_TTL: c_int = 4; +pub const IP_RECVOPTS: c_int = 5; +pub const IP_RECVRETOPTS: c_int = 6; +pub const IP_RECVDSTADDR: c_int = 7; +pub const IP_RETOPTS: c_int = 8; +pub const IP_MULTICAST_IF: c_int = 9; +pub const IP_MULTICAST_TTL: c_int = 10; +pub const IP_MULTICAST_LOOP: c_int = 11; +pub const IP_ADD_MEMBERSHIP: c_int = 12; +pub const IP_DROP_MEMBERSHIP: c_int = 13; +pub const IP_BLOCK_SOURCE: c_int = 14; +pub const IP_UNBLOCK_SOURCE: c_int = 15; +pub const IP_ADD_SOURCE_MEMBERSHIP: c_int = 16; +pub const IP_DROP_SOURCE_MEMBERSHIP: c_int = 17; + +pub const TCP_NODELAY: c_int = 0x01; +pub const TCP_MAXSEG: c_int = 0x02; +pub const TCP_NOPUSH: c_int = 0x04; +pub const TCP_NOOPT: c_int = 0x08; + +pub const IF_NAMESIZE: size_t = 32; +pub const IFNAMSIZ: size_t = IF_NAMESIZE; + +pub const IPV6_MULTICAST_IF: c_int = 24; +pub const IPV6_MULTICAST_HOPS: c_int = 25; +pub const IPV6_MULTICAST_LOOP: c_int = 26; +pub const IPV6_UNICAST_HOPS: c_int = 27; +pub const IPV6_JOIN_GROUP: c_int = 28; +pub const IPV6_LEAVE_GROUP: c_int = 29; +pub const IPV6_V6ONLY: c_int = 30; +pub const IPV6_PKTINFO: c_int = 31; +pub const IPV6_RECVPKTINFO: c_int = 32; +pub const IPV6_HOPLIMIT: c_int = 33; +pub const IPV6_RECVHOPLIMIT: c_int = 34; +pub const IPV6_HOPOPTS: c_int = 35; +pub const IPV6_DSTOPTS: c_int = 36; +pub const IPV6_RTHDR: c_int = 37; + +pub const MSG_OOB: c_int = 0x0001; +pub const MSG_PEEK: c_int = 0x0002; +pub const MSG_DONTROUTE: c_int = 0x0004; +pub const MSG_EOR: c_int = 0x0008; +pub const MSG_TRUNC: c_int = 0x0010; +pub const MSG_CTRUNC: c_int = 0x0020; +pub const MSG_WAITALL: c_int = 0x0040; +pub const MSG_DONTWAIT: c_int = 0x0080; +pub const MSG_BCAST: c_int = 0x0100; +pub const MSG_MCAST: c_int = 0x0200; +pub const MSG_EOF: c_int = 0x0400; +pub const MSG_NOSIGNAL: c_int = 0x0800; + +pub const SHUT_RD: c_int = 0; +pub const SHUT_WR: c_int = 1; +pub const SHUT_RDWR: c_int = 2; + +pub const LOCK_SH: c_int = 0x01; +pub const LOCK_EX: c_int = 0x02; +pub const LOCK_NB: c_int = 0x04; +pub const LOCK_UN: c_int = 0x08; + +pub const MINSIGSTKSZ: size_t = 8192; +pub const SIGSTKSZ: size_t = 16384; + +pub const IOV_MAX: c_int = 1024; +pub const PATH_MAX: c_int = 1024; + +pub const SA_NOCLDSTOP: c_int = 0x01; +pub const SA_NOCLDWAIT: c_int = 0x02; +pub const SA_RESETHAND: c_int = 0x04; +pub const SA_NODEFER: c_int = 0x08; +pub const SA_RESTART: c_int = 0x10; +pub const SA_ONSTACK: c_int = 0x20; +pub const SA_SIGINFO: c_int = 0x40; +pub const SA_NOMASK: c_int = SA_NODEFER; +pub const SA_STACK: c_int = SA_ONSTACK; +pub const SA_ONESHOT: c_int = SA_RESETHAND; + +pub const SS_ONSTACK: c_int = 0x1; +pub const SS_DISABLE: c_int = 0x2; + +// DIFF(main): changed to `c_int` in 500365e1 +pub const FD_SETSIZE: usize = 1024; + +pub const RTLD_LOCAL: c_int = 0x0; +pub const RTLD_NOW: c_int = 0x1; +pub const RTLD_GLOBAL: c_int = 0x2; +pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); + +pub const BUFSIZ: c_uint = 8192; +pub const FILENAME_MAX: c_uint = 256; +pub const FOPEN_MAX: c_uint = 128; +pub const L_tmpnam: c_uint = 512; +pub const TMP_MAX: c_uint = 32768; + +pub const _PC_CHOWN_RESTRICTED: c_int = 1; +pub const _PC_MAX_CANON: c_int = 2; +pub const _PC_MAX_INPUT: c_int = 3; +pub const _PC_NAME_MAX: c_int = 4; +pub const _PC_NO_TRUNC: c_int = 5; +pub const _PC_PATH_MAX: c_int = 6; +pub const _PC_PIPE_BUF: c_int = 7; +pub const _PC_VDISABLE: c_int = 8; +pub const _PC_LINK_MAX: c_int = 25; +pub const _PC_SYNC_IO: c_int = 26; +pub const _PC_ASYNC_IO: c_int = 27; +pub const _PC_PRIO_IO: c_int = 28; +pub const _PC_SOCK_MAXBUF: c_int = 29; +pub const _PC_FILESIZEBITS: c_int = 30; +pub const _PC_REC_INCR_XFER_SIZE: c_int = 31; +pub const _PC_REC_MAX_XFER_SIZE: c_int = 32; +pub const _PC_REC_MIN_XFER_SIZE: c_int = 33; +pub const _PC_REC_XFER_ALIGN: c_int = 34; +pub const _PC_ALLOC_SIZE_MIN: c_int = 35; +pub const _PC_SYMLINK_MAX: c_int = 36; +pub const _PC_2_SYMLINKS: c_int = 37; +pub const _PC_XATTR_EXISTS: c_int = 38; +pub const _PC_XATTR_ENABLED: c_int = 39; + +pub const FIONBIO: c_ulong = 0xbe000000; +pub const FIONREAD: c_ulong = 0xbe000001; +pub const FIOSEEKDATA: c_ulong = 0xbe000002; +pub const FIOSEEKHOLE: c_ulong = 0xbe000003; + +pub const _SC_ARG_MAX: c_int = 15; +pub const _SC_CHILD_MAX: c_int = 16; +pub const _SC_CLK_TCK: c_int = 17; +pub const _SC_JOB_CONTROL: c_int = 18; +pub const _SC_NGROUPS_MAX: c_int = 19; +pub const _SC_OPEN_MAX: c_int = 20; +pub const _SC_SAVED_IDS: c_int = 21; +pub const _SC_STREAM_MAX: c_int = 22; +pub const _SC_TZNAME_MAX: c_int = 23; +pub const _SC_VERSION: c_int = 24; +pub const _SC_GETGR_R_SIZE_MAX: c_int = 25; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 26; +pub const _SC_PAGESIZE: c_int = 27; +pub const _SC_PAGE_SIZE: c_int = 27; +pub const _SC_SEM_NSEMS_MAX: c_int = 28; +pub const _SC_SEM_VALUE_MAX: c_int = 29; +pub const _SC_SEMAPHORES: c_int = 30; +pub const _SC_THREADS: c_int = 31; +pub const _SC_IOV_MAX: c_int = 32; +pub const _SC_UIO_MAXIOV: c_int = 32; +pub const _SC_NPROCESSORS_CONF: c_int = 34; +pub const _SC_NPROCESSORS_ONLN: c_int = 35; +pub const _SC_ATEXIT_MAX: c_int = 37; +pub const _SC_PASS_MAX: c_int = 39; +pub const _SC_PHYS_PAGES: c_int = 40; +pub const _SC_AVPHYS_PAGES: c_int = 41; +pub const _SC_PIPE: c_int = 42; +pub const _SC_SELECT: c_int = 43; +pub const _SC_POLL: c_int = 44; +pub const _SC_MAPPED_FILES: c_int = 45; +pub const _SC_THREAD_PROCESS_SHARED: c_int = 46; +pub const _SC_THREAD_STACK_MIN: c_int = 47; +pub const _SC_THREAD_ATTR_STACKADDR: c_int = 48; +pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 49; +pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 50; +pub const _SC_REALTIME_SIGNALS: c_int = 51; +pub const _SC_MEMORY_PROTECTION: c_int = 52; +pub const _SC_SIGQUEUE_MAX: c_int = 53; +pub const _SC_RTSIG_MAX: c_int = 54; +pub const _SC_MONOTONIC_CLOCK: c_int = 55; +pub const _SC_DELAYTIMER_MAX: c_int = 56; +pub const _SC_TIMER_MAX: c_int = 57; +pub const _SC_TIMERS: c_int = 58; +pub const _SC_CPUTIME: c_int = 59; +pub const _SC_THREAD_CPUTIME: c_int = 60; +pub const _SC_HOST_NAME_MAX: c_int = 61; +pub const _SC_REGEXP: c_int = 62; +pub const _SC_SYMLOOP_MAX: c_int = 63; +pub const _SC_SHELL: c_int = 64; +pub const _SC_TTY_NAME_MAX: c_int = 65; +pub const _SC_ADVISORY_INFO: c_int = 66; +pub const _SC_BARRIERS: c_int = 67; +pub const _SC_CLOCK_SELECTION: c_int = 68; +pub const _SC_FSYNC: c_int = 69; +pub const _SC_IPV6: c_int = 70; +pub const _SC_MEMLOCK: c_int = 71; +pub const _SC_MEMLOCK_RANGE: c_int = 72; +pub const _SC_MESSAGE_PASSING: c_int = 73; +pub const _SC_PRIORITIZED_IO: c_int = 74; +pub const _SC_PRIORITY_SCHEDULING: c_int = 75; +pub const _SC_READER_WRITER_LOCKS: c_int = 76; +pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 77; +pub const _SC_SPAWN: c_int = 78; +pub const _SC_SPIN_LOCKS: c_int = 79; +pub const _SC_SPORADIC_SERVER: c_int = 80; +pub const _SC_SYNCHRONIZED_IO: c_int = 81; +pub const _SC_THREAD_PRIO_INHERIT: c_int = 82; +pub const _SC_THREAD_PRIO_PROTECT: c_int = 83; +pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 84; +pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 85; +pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 86; +pub const _SC_THREAD_SPORADIC_SERVER: c_int = 87; +pub const _SC_TIMEOUTS: c_int = 88; +pub const _SC_TRACE: c_int = 89; +pub const _SC_TRACE_EVENT_FILTER: c_int = 90; +pub const _SC_TRACE_INHERIT: c_int = 91; +pub const _SC_TRACE_LOG: c_int = 92; +pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 93; +pub const _SC_V6_ILP32_OFF32: c_int = 94; +pub const _SC_V6_ILP32_OFFBIG: c_int = 95; +pub const _SC_V6_LP64_OFF64: c_int = 96; +pub const _SC_V6_LPBIG_OFFBIG: c_int = 97; +pub const _SC_V7_ILP32_OFF32: c_int = 98; +pub const _SC_V7_ILP32_OFFBIG: c_int = 99; +pub const _SC_V7_LP64_OFF64: c_int = 100; +pub const _SC_V7_LPBIG_OFFBIG: c_int = 101; +pub const _SC_2_C_BIND: c_int = 102; +pub const _SC_2_C_DEV: c_int = 103; +pub const _SC_2_CHAR_TERM: c_int = 104; +pub const _SC_2_FORT_DEV: c_int = 105; +pub const _SC_2_FORT_RUN: c_int = 106; +pub const _SC_2_LOCALEDEF: c_int = 107; +pub const _SC_2_PBS: c_int = 108; +pub const _SC_2_PBS_ACCOUNTING: c_int = 109; +pub const _SC_2_PBS_CHECKPOINT: c_int = 110; +pub const _SC_2_PBS_LOCATE: c_int = 111; +pub const _SC_2_PBS_MESSAGE: c_int = 112; +pub const _SC_2_PBS_TRACK: c_int = 113; +pub const _SC_2_SW_DEV: c_int = 114; +pub const _SC_2_UPE: c_int = 115; +pub const _SC_2_VERSION: c_int = 116; +pub const _SC_XOPEN_CRYPT: c_int = 117; +pub const _SC_XOPEN_ENH_I18N: c_int = 118; +pub const _SC_XOPEN_REALTIME: c_int = 119; +pub const _SC_XOPEN_REALTIME_THREADS: c_int = 120; +pub const _SC_XOPEN_SHM: c_int = 121; +pub const _SC_XOPEN_STREAMS: c_int = 122; +pub const _SC_XOPEN_UNIX: c_int = 123; +pub const _SC_XOPEN_UUCP: c_int = 124; +pub const _SC_XOPEN_VERSION: c_int = 125; +pub const _SC_BC_BASE_MAX: c_int = 129; +pub const _SC_BC_DIM_MAX: c_int = 130; +pub const _SC_BC_SCALE_MAX: c_int = 131; +pub const _SC_BC_STRING_MAX: c_int = 132; +pub const _SC_COLL_WEIGHTS_MAX: c_int = 133; +pub const _SC_EXPR_NEST_MAX: c_int = 134; +pub const _SC_LINE_MAX: c_int = 135; +pub const _SC_LOGIN_NAME_MAX: c_int = 136; +pub const _SC_MQ_OPEN_MAX: c_int = 137; +pub const _SC_MQ_PRIO_MAX: c_int = 138; +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 139; +pub const _SC_THREAD_KEYS_MAX: c_int = 140; +pub const _SC_THREAD_THREADS_MAX: c_int = 141; +pub const _SC_RE_DUP_MAX: c_int = 142; + +pub const PTHREAD_STACK_MIN: size_t = 8192; + +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + flags: 0, + lock: 0, + unused: -42, + owner: -1, + owner_count: 0, +}; +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + flags: 0, + unused: -42, + mutex: 0 as *mut _, + waiter_count: 0, + lock: 0, +}; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + flags: 0, + owner: -1, + lock_sem: 0, + lock_count: 0, + reader_count: 0, + writer_count: 0, + waiters: [0 as *mut _; 2], +}; + +pub const PTHREAD_MUTEX_DEFAULT: c_int = 0; +pub const PTHREAD_MUTEX_NORMAL: c_int = 1; +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 3; + +pub const FIOCLEX: c_ulong = 0; // FIXME(haiku): does not exist on Haiku! + +pub const RUSAGE_CHILDREN: c_int = -1; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SOCK_RAW: c_int = 3; +pub const SOCK_SEQPACKET: c_int = 5; +pub const SOCK_NONBLOCK: c_int = 0x00040000; +pub const SOCK_CLOEXEC: c_int = 0x00080000; + +pub const SOL_SOCKET: c_int = -1; +pub const SO_ACCEPTCONN: c_int = 0x00000001; +pub const SO_BROADCAST: c_int = 0x00000002; +pub const SO_DEBUG: c_int = 0x00000004; +pub const SO_DONTROUTE: c_int = 0x00000008; +pub const SO_KEEPALIVE: c_int = 0x00000010; +pub const SO_OOBINLINE: c_int = 0x00000020; +pub const SO_REUSEADDR: c_int = 0x00000040; +pub const SO_REUSEPORT: c_int = 0x00000080; +pub const SO_USELOOPBACK: c_int = 0x00000100; +pub const SO_LINGER: c_int = 0x00000200; +pub const SO_SNDBUF: c_int = 0x40000001; +pub const SO_SNDLOWAT: c_int = 0x40000002; +pub const SO_SNDTIMEO: c_int = 0x40000003; +pub const SO_RCVBUF: c_int = 0x40000004; +pub const SO_RCVLOWAT: c_int = 0x40000005; +pub const SO_RCVTIMEO: c_int = 0x40000006; +pub const SO_ERROR: c_int = 0x40000007; +pub const SO_TYPE: c_int = 0x40000008; +pub const SO_NONBLOCK: c_int = 0x40000009; +pub const SO_BINDTODEVICE: c_int = 0x4000000a; +pub const SO_PEERCRED: c_int = 0x4000000b; + +pub const SCM_RIGHTS: c_int = 0x01; + +pub const SOMAXCONN: c_int = 32; + +pub const NI_MAXHOST: size_t = 1025; + +pub const WNOHANG: c_int = 0x01; +pub const WUNTRACED: c_int = 0x02; +pub const WCONTINUED: c_int = 0x04; +pub const WEXITED: c_int = 0x08; +pub const WSTOPPED: c_int = 0x10; +pub const WNOWAIT: c_int = 0x20; + +// si_code values for SIGBUS signal +pub const BUS_ADRALN: c_int = 40; +pub const BUS_ADRERR: c_int = 41; +pub const BUS_OBJERR: c_int = 42; + +// si_code values for SIGCHLD signal +pub const CLD_EXITED: c_int = 60; +pub const CLD_KILLED: c_int = 61; +pub const CLD_DUMPED: c_int = 62; +pub const CLD_TRAPPED: c_int = 63; +pub const CLD_STOPPED: c_int = 64; +pub const CLD_CONTINUED: c_int = 65; + +pub const P_ALL: idtype_t = 0; +pub const P_PID: idtype_t = 1; +pub const P_PGID: idtype_t = 2; + +pub const UTIME_OMIT: c_long = 1000000001; +pub const UTIME_NOW: c_long = 1000000000; + +pub const VINTR: usize = 0; +pub const VQUIT: usize = 1; +pub const VERASE: usize = 2; +pub const VKILL: usize = 3; +pub const VEOF: usize = 4; +pub const VEOL: usize = 5; +pub const VMIN: usize = 4; +pub const VTIME: usize = 5; +pub const VEOL2: usize = 6; +pub const VSWTCH: usize = 7; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VSUSP: usize = 10; + +pub const IGNBRK: crate::tcflag_t = 0x01; +pub const BRKINT: crate::tcflag_t = 0x02; +pub const IGNPAR: crate::tcflag_t = 0x04; +pub const PARMRK: crate::tcflag_t = 0x08; +pub const INPCK: crate::tcflag_t = 0x10; +pub const ISTRIP: crate::tcflag_t = 0x20; +pub const INLCR: crate::tcflag_t = 0x40; +pub const IGNCR: crate::tcflag_t = 0x80; +pub const ICRNL: crate::tcflag_t = 0x100; +pub const IUCLC: crate::tcflag_t = 0x200; +pub const IXON: crate::tcflag_t = 0x400; +pub const IXANY: crate::tcflag_t = 0x800; +pub const IXOFF: crate::tcflag_t = 0x1000; + +pub const OPOST: crate::tcflag_t = 0x00000001; +pub const OLCUC: crate::tcflag_t = 0x00000002; +pub const ONLCR: crate::tcflag_t = 0x00000004; +pub const OCRNL: crate::tcflag_t = 0x00000008; +pub const ONOCR: crate::tcflag_t = 0x00000010; +pub const ONLRET: crate::tcflag_t = 0x00000020; +pub const OFILL: crate::tcflag_t = 0x00000040; +pub const OFDEL: crate::tcflag_t = 0x00000080; +pub const NLDLY: crate::tcflag_t = 0x00000100; +pub const NL0: crate::tcflag_t = 0x00000000; +pub const NL1: crate::tcflag_t = 0x00000100; +pub const CRDLY: crate::tcflag_t = 0x00000600; +pub const CR0: crate::tcflag_t = 0x00000000; +pub const CR1: crate::tcflag_t = 0x00000200; +pub const CR2: crate::tcflag_t = 0x00000400; +pub const CR3: crate::tcflag_t = 0x00000600; +pub const TABDLY: crate::tcflag_t = 0x00001800; +pub const TAB0: crate::tcflag_t = 0x00000000; +pub const TAB1: crate::tcflag_t = 0x00000800; +pub const TAB2: crate::tcflag_t = 0x00001000; +pub const TAB3: crate::tcflag_t = 0x00001800; +pub const BSDLY: crate::tcflag_t = 0x00002000; +pub const BS0: crate::tcflag_t = 0x00000000; +pub const BS1: crate::tcflag_t = 0x00002000; +pub const VTDLY: crate::tcflag_t = 0x00004000; +pub const VT0: crate::tcflag_t = 0x00000000; +pub const VT1: crate::tcflag_t = 0x00004000; +pub const FFDLY: crate::tcflag_t = 0x00008000; +pub const FF0: crate::tcflag_t = 0x00000000; +pub const FF1: crate::tcflag_t = 0x00008000; + +pub const CSIZE: crate::tcflag_t = 0x00000020; +pub const CS5: crate::tcflag_t = 0x00000000; +pub const CS6: crate::tcflag_t = 0x00000000; +pub const CS7: crate::tcflag_t = 0x00000000; +pub const CS8: crate::tcflag_t = 0x00000020; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const XLOBLK: crate::tcflag_t = 0x00001000; +pub const CTSFLOW: crate::tcflag_t = 0x00002000; +pub const RTSFLOW: crate::tcflag_t = 0x00004000; +pub const CRTSCTS: crate::tcflag_t = RTSFLOW | CTSFLOW; + +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const XCASE: crate::tcflag_t = 0x00000004; +pub const ECHO: crate::tcflag_t = 0x00000008; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const IEXTEN: crate::tcflag_t = 0x00000200; +pub const ECHOCTL: crate::tcflag_t = 0x00000400; +pub const ECHOPRT: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00001000; +pub const FLUSHO: crate::tcflag_t = 0x00002000; +pub const PENDIN: crate::tcflag_t = 0x00004000; + +pub const TCGB_CTS: c_int = 0x01; +pub const TCGB_DSR: c_int = 0x02; +pub const TCGB_RI: c_int = 0x04; +pub const TCGB_DCD: c_int = 0x08; +pub const TIOCM_CTS: c_int = TCGB_CTS; +pub const TIOCM_CD: c_int = TCGB_DCD; +pub const TIOCM_CAR: c_int = TCGB_DCD; +pub const TIOCM_RI: c_int = TCGB_RI; +pub const TIOCM_RNG: c_int = TCGB_RI; +pub const TIOCM_DSR: c_int = TCGB_DSR; +pub const TIOCM_DTR: c_int = 0x10; +pub const TIOCM_RTS: c_int = 0x20; + +pub const B0: speed_t = 0x00; +pub const B50: speed_t = 0x01; +pub const B75: speed_t = 0x02; +pub const B110: speed_t = 0x03; +pub const B134: speed_t = 0x04; +pub const B150: speed_t = 0x05; +pub const B200: speed_t = 0x06; +pub const B300: speed_t = 0x07; +pub const B600: speed_t = 0x08; +pub const B1200: speed_t = 0x09; +pub const B1800: speed_t = 0x0A; +pub const B2400: speed_t = 0x0B; +pub const B4800: speed_t = 0x0C; +pub const B9600: speed_t = 0x0D; +pub const B19200: speed_t = 0x0E; +pub const B38400: speed_t = 0x0F; +pub const B57600: speed_t = 0x10; +pub const B115200: speed_t = 0x11; +pub const B230400: speed_t = 0x12; +pub const B31250: speed_t = 0x13; + +pub const TCSANOW: c_int = 0x01; +pub const TCSADRAIN: c_int = 0x02; +pub const TCSAFLUSH: c_int = 0x04; + +pub const TCOOFF: c_int = 0x01; +pub const TCOON: c_int = 0x02; +pub const TCIOFF: c_int = 0x04; +pub const TCION: c_int = 0x08; + +pub const TCIFLUSH: c_int = 0x01; +pub const TCOFLUSH: c_int = 0x02; +pub const TCIOFLUSH: c_int = 0x03; + +pub const TCGETA: c_ulong = 0x8000; +pub const TCSETA: c_ulong = TCGETA + 1; +pub const TCSETAF: c_ulong = TCGETA + 2; +pub const TCSETAW: c_ulong = TCGETA + 3; +pub const TCSBRK: c_ulong = TCGETA + 5; +pub const TCFLSH: c_ulong = TCGETA + 6; +pub const TCXONC: c_ulong = TCGETA + 7; +pub const TCGETBITS: c_ulong = TCGETA + 9; +pub const TCSETDTR: c_ulong = TCGETA + 10; +pub const TCSETRTS: c_ulong = TCGETA + 11; +pub const TIOCGWINSZ: c_ulong = TCGETA + 12; +pub const TIOCSWINSZ: c_ulong = TCGETA + 13; +pub const TIOCGPGRP: c_ulong = TCGETA + 15; +pub const TIOCSPGRP: c_ulong = TCGETA + 16; +pub const TIOCSCTTY: c_ulong = TCGETA + 17; +pub const TIOCMGET: c_ulong = TCGETA + 18; +pub const TIOCMSET: c_ulong = TCGETA + 19; +pub const TIOCSBRK: c_ulong = TCGETA + 20; +pub const TIOCCBRK: c_ulong = TCGETA + 21; +pub const TIOCMBIS: c_ulong = TCGETA + 22; +pub const TIOCMBIC: c_ulong = TCGETA + 23; +pub const TIOCGSID: c_ulong = TCGETA + 24; +pub const TIOCOUTQ: c_ulong = TCGETA + 25; +pub const TIOCEXCL: c_ulong = TCGETA + 26; +pub const TIOCNXCL: c_ulong = TCGETA + 27; + +pub const PRIO_PROCESS: c_int = 0; +pub const PRIO_PGRP: c_int = 1; +pub const PRIO_USER: c_int = 2; + +// utmpx entry types +pub const EMPTY: c_short = 0; +pub const BOOT_TIME: c_short = 1; +pub const OLD_TIME: c_short = 2; +pub const NEW_TIME: c_short = 3; +pub const USER_PROCESS: c_short = 4; +pub const INIT_PROCESS: c_short = 5; +pub const LOGIN_PROCESS: c_short = 6; +pub const DEAD_PROCESS: c_short = 7; + +pub const LOG_PID: c_int = 1 << 12; +pub const LOG_CONS: c_int = 2 << 12; +pub const LOG_ODELAY: c_int = 4 << 12; +pub const LOG_NDELAY: c_int = 8 << 12; +pub const LOG_SERIAL: c_int = 16 << 12; +pub const LOG_PERROR: c_int = 32 << 12; +pub const LOG_NOWAIT: c_int = 64 << 12; + +// spawn.h +// DIFF(main): changed to `c_short` in f62eb023ab +pub const POSIX_SPAWN_RESETIDS: c_int = 0x01; +pub const POSIX_SPAWN_SETPGROUP: c_int = 0x02; +pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x10; +pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x20; +pub const POSIX_SPAWN_SETSID: c_int = 0x40; + +const fn CMSG_ALIGN(len: usize) -> usize { + len + size_of::() - 1 & !(size_of::() - 1) +} + +f! { + pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr { + if (*mhdr).msg_controllen as usize >= size_of::() { + (*mhdr).msg_control as *mut cmsghdr + } else { + core::ptr::null_mut::() + } + } + + pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { + (cmsg as *mut c_uchar).offset(CMSG_ALIGN(size_of::()) as isize) + } + + pub const fn CMSG_SPACE(length: c_uint) -> c_uint { + (CMSG_ALIGN(length as usize) + CMSG_ALIGN(size_of::())) as c_uint + } + + pub const fn CMSG_LEN(length: c_uint) -> c_uint { + CMSG_ALIGN(size_of::()) as c_uint + length + } + + pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + if cmsg.is_null() { + return crate::CMSG_FIRSTHDR(mhdr); + } + let next = cmsg as usize + + CMSG_ALIGN((*cmsg).cmsg_len as usize) + + CMSG_ALIGN(size_of::()); + let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; + if next > max { + core::ptr::null_mut::() + } else { + (cmsg as usize + CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr + } + } + + pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] &= !(1 << (fd % size)); + return; + } + + pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0; + } + + pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] |= 1 << (fd % size); + return; + } + + pub fn FD_ZERO(set: *mut fd_set) -> () { + for slot in (*set).fds_bits.iter_mut() { + *slot = 0; + } + } +} + +safe_f! { + pub const fn WIFEXITED(status: c_int) -> bool { + (status & !0xff) == 0 + } + + pub const fn WEXITSTATUS(status: c_int) -> c_int { + status & 0xff + } + + pub const fn WIFSIGNALED(status: c_int) -> bool { + ((status >> 8) & 0xff) != 0 + } + + pub const fn WTERMSIG(status: c_int) -> c_int { + (status >> 8) & 0xff + } + + pub const fn WIFSTOPPED(status: c_int) -> bool { + ((status >> 16) & 0xff) != 0 + } + + pub const fn WSTOPSIG(status: c_int) -> c_int { + (status >> 16) & 0xff + } + + // actually WIFCORED, but this is used everywhere else + pub const fn WCOREDUMP(status: c_int) -> bool { + (status & 0x10000) != 0 + } + + pub const fn WIFCONTINUED(status: c_int) -> bool { + (status & 0x20000) != 0 + } +} + +extern "C" { + pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; + pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; + pub fn getpriority(which: c_int, who: id_t) -> c_int; + pub fn setpriority(which: c_int, who: id_t, priority: c_int) -> c_int; + + pub fn endusershell(); + pub fn getpass(prompt: *const c_char) -> *mut c_char; + pub fn getusershell() -> *mut c_char; + pub fn issetugid() -> c_int; + pub fn setusershell(); + + pub fn utimensat( + fd: c_int, + path: *const c_char, + times: *const crate::timespec, + flag: c_int, + ) -> c_int; + pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; + pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + pub fn _errnop() -> *mut c_int; + + pub fn abs(i: c_int) -> c_int; + pub fn labs(i: c_long) -> c_long; + pub fn rand() -> c_int; + pub fn srand(seed: c_uint); + pub fn getifaddrs(ifap: *mut *mut crate::ifaddrs) -> c_int; + pub fn freeifaddrs(ifa: *mut crate::ifaddrs); + pub fn ppoll( + fds: *mut crate::pollfd, + numfds: crate::nfds_t, + timeout: *const crate::timespec, + sigMask: *const sigset_t, + ) -> c_int; + + pub fn getspent() -> *mut spwd; + pub fn getspent_r( + pwd: *mut spwd, + buf: *mut c_char, + bufferSize: size_t, + res: *mut *mut spwd, + ) -> c_int; + pub fn setspent(); + pub fn endspent(); + pub fn getspnam(name: *const c_char) -> *mut spwd; + pub fn getspnam_r( + name: *const c_char, + spwd: *mut spwd, + buffer: *mut c_char, + bufferSize: size_t, + res: *mut *mut spwd, + ) -> c_int; + pub fn sgetspent(line: *const c_char) -> *mut spwd; + pub fn sgetspent_r( + line: *const c_char, + spwd: *mut spwd, + buffer: *mut c_char, + bufferSize: size_t, + res: *mut *mut spwd, + ) -> c_int; + pub fn fgetspent(file: *mut crate::FILE) -> *mut spwd; + pub fn fgetspent_r( + file: *mut crate::FILE, + spwd: *mut spwd, + buffer: *mut c_char, + bufferSize: size_t, + res: *mut *mut spwd, + ) -> c_int; + pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; + pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; + pub fn sem_destroy(sem: *mut sem_t) -> c_int; + pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; + + pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn clock_settime(clk_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; + pub fn clock_getcpuclockid(pid: crate::pid_t, clk_id: *mut crate::clockid_t) -> c_int; + pub fn pthread_create( + thread: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + f: extern "C" fn(*mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + pub fn pthread_attr_getguardsize( + attr: *const crate::pthread_attr_t, + guardsize: *mut size_t, + ) -> c_int; + pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; + pub fn pthread_attr_getstack( + attr: *const crate::pthread_attr_t, + stackaddr: *mut *mut c_void, + stacksize: *mut size_t, + ) -> c_int; + pub fn pthread_condattr_getclock( + attr: *const pthread_condattr_t, + clock_id: *mut clockid_t, + ) -> c_int; + pub fn pthread_condattr_setclock( + attr: *mut pthread_condattr_t, + clock_id: crate::clockid_t, + ) -> c_int; + pub fn valloc(numBytes: size_t) -> *mut c_void; + pub fn malloc_usable_size(ptr: *mut c_void) -> size_t; + pub fn memalign(align: size_t, size: size_t) -> *mut c_void; + pub fn setgroups(ngroups: c_int, ptr: *const crate::gid_t) -> c_int; + pub fn initgroups(name: *const c_char, basegid: crate::gid_t) -> c_int; + pub fn ioctl(fd: c_int, request: c_ulong, ...) -> c_int; + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn dirfd(dirp: *mut crate::DIR) -> c_int; + pub fn getnameinfo( + sa: *const crate::sockaddr, + salen: crate::socklen_t, + host: *mut c_char, + hostlen: crate::socklen_t, + serv: *mut c_char, + servlen: crate::socklen_t, + flags: c_int, + ) -> c_int; + pub fn pthread_mutex_timedlock( + lock: *mut pthread_mutex_t, + abstime: *const crate::timespec, + ) -> c_int; + pub fn pthread_sigqueue(thread: crate::pthread_t, sig: c_int, value: crate::sigval) -> c_int; + pub fn pthread_spin_init(lock: *mut crate::pthread_spinlock_t, pshared: c_int) -> c_int; + pub fn pthread_spin_destroy(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_spin_lock(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_spin_trylock(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_spin_unlock(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn waitid( + idtype: idtype_t, + id: id_t, + infop: *mut crate::siginfo_t, + options: c_int, + ) -> c_int; + + pub fn glob( + pattern: *const c_char, + flags: c_int, + errfunc: Option c_int>, + pglob: *mut crate::glob_t, + ) -> c_int; + pub fn globfree(pglob: *mut crate::glob_t); + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; + pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advice: c_int) -> c_int; + pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; + + pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; + pub fn shm_unlink(name: *const c_char) -> c_int; + + pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); + + pub fn telldir(dirp: *mut crate::DIR) -> c_long; + pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + + pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; + + pub fn recvfrom( + socket: c_int, + buf: *mut c_void, + len: size_t, + flags: c_int, + addr: *mut crate::sockaddr, + addrlen: *mut crate::socklen_t, + ) -> ssize_t; + pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; + + pub fn bind( + socket: c_int, + address: *const crate::sockaddr, + address_len: crate::socklen_t, + ) -> c_int; + + pub fn accept4( + socket: c_int, + address: *mut crate::sockaddr, + addressLength: *mut crate::socklen_t, + flags: c_int, + ) -> c_int; + + pub fn writev(fd: c_int, iov: *const crate::iovec, count: c_int) -> ssize_t; + pub fn readv(fd: c_int, iov: *const crate::iovec, count: c_int) -> ssize_t; + + pub fn sendmsg(fd: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; + pub fn recvmsg(fd: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; + + // DIFF(main): changed to `*const *mut` in e77f551de9 + pub fn execvpe( + file: *const c_char, + argv: *const *const c_char, + environment: *const *const c_char, + ) -> c_int; + + pub fn getgrgid_r( + gid: crate::gid_t, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn getgrouplist( + user: *const c_char, + basegroup: crate::gid_t, + grouplist: *mut crate::gid_t, + groupcount: *mut c_int, + ) -> c_int; + pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; + pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; + pub fn sem_close(sem: *mut sem_t) -> c_int; + pub fn getdtablesize() -> c_int; + pub fn getgrnam_r( + name: *const c_char, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; + pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; + pub fn getgrnam(name: *const c_char) -> *mut crate::group; + pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; + pub fn sem_unlink(name: *const c_char) -> c_int; + pub fn getpwnam_r( + name: *const c_char, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + pub fn getpwuid_r( + uid: crate::uid_t, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + pub fn getpwent() -> *mut passwd; + pub fn setpwent(); + pub fn endpwent(); + pub fn endgrent(); + pub fn getgrent() -> *mut crate::group; + pub fn setgrent(); + pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; + pub fn pthread_atfork( + prepare: Option, + parent: Option, + child: Option, + ) -> c_int; + pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; + pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; + pub fn sethostname(name: *const c_char, len: size_t) -> c_int; + pub fn uname(buf: *mut crate::utsname) -> c_int; + pub fn getutxent() -> *mut utmpx; + pub fn getutxid(ut: *const utmpx) -> *mut utmpx; + pub fn getutxline(ut: *const utmpx) -> *mut utmpx; + pub fn pututxline(ut: *const utmpx) -> *mut utmpx; + pub fn setutxent(); + pub fn endutxent(); + pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; + + pub fn sigtimedwait( + set: *const sigset_t, + info: *mut siginfo_t, + timeout: *const crate::timespec, + ) -> c_int; + pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; + + pub fn getitimer(which: c_int, curr_value: *mut crate::itimerval) -> c_int; + pub fn setitimer( + which: c_int, + new_value: *const crate::itimerval, + old_value: *mut crate::itimerval, + ) -> c_int; + + pub fn regcomp(preg: *mut regex_t, pattern: *const c_char, cflags: c_int) -> c_int; + + pub fn regexec( + preg: *const regex_t, + input: *const c_char, + nmatch: size_t, + pmatch: *mut regmatch_t, + eflags: c_int, + ) -> c_int; + + pub fn regerror( + errcode: c_int, + preg: *const regex_t, + errbuf: *mut c_char, + errbuf_size: size_t, + ) -> size_t; + + pub fn regfree(preg: *mut regex_t); + + pub fn msgctl(msqid: c_int, cmd: c_int, buf: *mut msqid_ds) -> c_int; + pub fn msgget(key: crate::key_t, msgflg: c_int) -> c_int; + pub fn msgrcv( + msqid: c_int, + msgp: *mut c_void, + msgsz: size_t, + msgtype: c_long, + msgflg: c_int, + ) -> ssize_t; + pub fn msgsnd(msqid: c_int, msgp: *const c_void, msgsz: size_t, msgflg: c_int) -> c_int; + pub fn semget(key: crate::key_t, nsems: c_int, semflg: c_int) -> c_int; + pub fn semctl(semid: c_int, semnum: c_int, cmd: c_int, ...) -> c_int; + pub fn semop(semid: c_int, sops: *mut sembuf, nsops: size_t) -> c_int; + pub fn ftok(pathname: *const c_char, proj_id: c_int) -> crate::key_t; + + pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; + + pub fn lsearch( + key: *const c_void, + base: *mut c_void, + nelp: *mut size_t, + width: size_t, + compar: Option c_int>, + ) -> *mut c_void; + pub fn lfind( + key: *const c_void, + base: *const c_void, + nelp: *mut size_t, + width: size_t, + compar: Option c_int>, + ) -> *mut c_void; + pub fn hcreate(nelt: size_t) -> c_int; + pub fn hdestroy(); + pub fn hsearch(entry: crate::ENTRY, action: crate::ACTION) -> *mut crate::ENTRY; + + pub fn drand48() -> c_double; + pub fn erand48(xseed: *mut c_ushort) -> c_double; + pub fn lrand48() -> c_long; + pub fn nrand48(xseed: *mut c_ushort) -> c_long; + pub fn mrand48() -> c_long; + pub fn jrand48(xseed: *mut c_ushort) -> c_long; + pub fn srand48(seed: c_long); + pub fn seed48(xseed: *mut c_ushort) -> *mut c_ushort; + pub fn lcong48(p: *mut c_ushort); + + pub fn clearenv() -> c_int; + pub fn ctermid(s: *mut c_char) -> *mut c_char; + + pub fn sync(); + pub fn getpagesize() -> c_int; + + pub fn brk(addr: *mut c_void) -> c_int; + pub fn sbrk(increment: intptr_t) -> *mut c_void; + + pub fn posix_spawn( + pid: *mut crate::pid_t, + path: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnp( + pid: *mut crate::pid_t, + file: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + + pub fn posix_spawn_file_actions_init(file_actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_destroy(file_actions: *mut posix_spawn_file_actions_t) + -> c_int; + pub fn posix_spawn_file_actions_addopen( + file_actions: *mut posix_spawn_file_actions_t, + fildes: c_int, + path: *const c_char, + oflag: c_int, + mode: mode_t, + ) -> c_int; + pub fn posix_spawn_file_actions_addclose( + file_actions: *mut posix_spawn_file_actions_t, + fildes: c_int, + ) -> c_int; + pub fn posix_spawn_file_actions_adddup2( + file_actions: *mut posix_spawn_file_actions_t, + fildes: c_int, + newfildes: c_int, + ) -> c_int; + + pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, _flags: *mut c_short) -> c_int; + pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; + pub fn posix_spawnattr_getpgroup( + attr: *const posix_spawnattr_t, + _pgroup: *mut crate::pid_t, + ) -> c_int; + pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, pgroup: crate::pid_t) -> c_int; + pub fn posix_spawnattr_getsigdefault( + attr: *const posix_spawnattr_t, + sigdefault: *mut crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigdefault( + attr: *mut posix_spawnattr_t, + sigdefault: *const crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getsigmask( + attr: *const posix_spawnattr_t, + _sigmask: *mut crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigmask( + attr: *mut posix_spawnattr_t, + sigmask: *const crate::sigset_t, + ) -> c_int; + pub fn getopt_long( + argc: c_int, + argv: *const *mut c_char, + optstring: *const c_char, + longopts: *const option, + longindex: *mut c_int, + ) -> c_int; + pub fn strcasecmp_l( + string1: *const c_char, + string2: *const c_char, + locale: crate::locale_t, + ) -> c_int; + pub fn strncasecmp_l( + string1: *const c_char, + string2: *const c_char, + length: size_t, + locale: crate::locale_t, + ) -> c_int; + + pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; +} + +#[link(name = "gnu")] +extern "C" { + pub fn memmem( + source: *const c_void, + sourceLength: size_t, + search: *const c_void, + searchLength: size_t, + ) -> *mut c_void; + + pub fn pthread_getattr_np(thread: crate::pthread_t, attr: *mut crate::pthread_attr_t) -> c_int; + pub fn pthread_getname_np( + thread: crate::pthread_t, + buffer: *mut c_char, + length: size_t, + ) -> c_int; + pub fn pthread_setname_np(thread: crate::pthread_t, name: *const c_char) -> c_int; +} + +cfg_if! { + if #[cfg(target_pointer_width = "64")] { + mod b64; + pub use self::b64::*; + } else { + mod b32; + pub use self::b32::*; + } +} + +cfg_if! { + if #[cfg(target_arch = "x86")] { + // TODO + // mod x86; + // pub use self::x86::*; + } else if #[cfg(target_arch = "x86_64")] { + mod x86_64; + pub use self::x86_64::*; + } else if #[cfg(target_arch = "aarch64")] { + // TODO + // mod aarch64; + // pub use self::aarch64::*; + } +} + +mod bsd; +pub use self::bsd::*; + +mod native; +pub use self::native::*; diff --git a/vendor/libc/src/unix/haiku/native.rs b/vendor/libc/src/unix/haiku/native.rs new file mode 100644 index 00000000000000..13a203f92ff565 --- /dev/null +++ b/vendor/libc/src/unix/haiku/native.rs @@ -0,0 +1,1388 @@ +use crate::off_t; +use crate::prelude::*; + +// This file follows the Haiku API for Haiku R1 beta 5. It is organized by the +// C/C++ header files in which the concepts can be found, while adhering to the +// style guide for this crate. + +// Helper macro to generate u32 constants. The Haiku API uses (non-standard) +// multi-character constants (like 'UPDA' or 'MSGM') to represent 32 bit +// integer constants. + +macro_rules! haiku_constant { + ($a:tt, $b:tt, $c:tt, $d:tt) => { + (($a as u32) << 24) + (($b as u32) << 16) + (($c as u32) << 8) + ($d as u32) + }; +} + +// support/SupportDefs.h +pub type status_t = i32; +pub type bigtime_t = i64; +pub type nanotime_t = i64; +pub type type_code = u32; +pub type perform_code = u32; + +// kernel/OS.h +pub type area_id = i32; +pub type port_id = i32; +pub type sem_id = i32; +pub type team_id = i32; +pub type thread_id = i32; + +pub type thread_func = extern "C" fn(*mut c_void) -> status_t; + +// kernel/image.h +pub type image_id = i32; + +c_enum! { + // kernel/OS.h + pub enum thread_state { + B_THREAD_RUNNING = 1, + B_THREAD_READY, + B_THREAD_RECEIVING, + B_THREAD_ASLEEP, + B_THREAD_SUSPENDED, + B_THREAD_WAITING, + } + + // kernel/image.h + pub enum image_type { + B_APP_IMAGE = 1, + B_LIBRARY_IMAGE, + B_ADD_ON_IMAGE, + B_SYSTEM_IMAGE, + } + + // kernel/scheduler.h + + pub enum be_task_flags { + B_DEFAULT_MEDIA_PRIORITY = 0x000, + B_OFFLINE_PROCESSING = 0x001, + B_STATUS_RENDERING = 0x002, + B_USER_INPUT_HANDLING = 0x004, + B_LIVE_VIDEO_MANIPULATION = 0x008, + B_VIDEO_PLAYBACK = 0x010, + B_VIDEO_RECORDING = 0x020, + B_LIVE_AUDIO_MANIPULATION = 0x040, + B_AUDIO_PLAYBACK = 0x080, + B_AUDIO_RECORDING = 0x100, + B_LIVE_3D_RENDERING = 0x200, + B_NUMBER_CRUNCHING = 0x400, + B_MIDI_PROCESSING = 0x800, + } + + pub enum schduler_mode { + SCHEDULER_MODE_LOW_LATENCY, + SCHEDULER_MODE_POWER_SAVING, + } + + // FindDirectory.h + pub enum path_base_directory { + B_FIND_PATH_INSTALLATION_LOCATION_DIRECTORY, + B_FIND_PATH_ADD_ONS_DIRECTORY, + B_FIND_PATH_APPS_DIRECTORY, + B_FIND_PATH_BIN_DIRECTORY, + B_FIND_PATH_BOOT_DIRECTORY, + B_FIND_PATH_CACHE_DIRECTORY, + B_FIND_PATH_DATA_DIRECTORY, + B_FIND_PATH_DEVELOP_DIRECTORY, + B_FIND_PATH_DEVELOP_LIB_DIRECTORY, + B_FIND_PATH_DOCUMENTATION_DIRECTORY, + B_FIND_PATH_ETC_DIRECTORY, + B_FIND_PATH_FONTS_DIRECTORY, + B_FIND_PATH_HEADERS_DIRECTORY, + B_FIND_PATH_LIB_DIRECTORY, + B_FIND_PATH_LOG_DIRECTORY, + B_FIND_PATH_MEDIA_NODES_DIRECTORY, + B_FIND_PATH_PACKAGES_DIRECTORY, + B_FIND_PATH_PREFERENCES_DIRECTORY, + B_FIND_PATH_SERVERS_DIRECTORY, + B_FIND_PATH_SETTINGS_DIRECTORY, + B_FIND_PATH_SOUNDS_DIRECTORY, + B_FIND_PATH_SPOOL_DIRECTORY, + B_FIND_PATH_TRANSLATORS_DIRECTORY, + B_FIND_PATH_VAR_DIRECTORY, + B_FIND_PATH_IMAGE_PATH = 1000, + B_FIND_PATH_PACKAGE_PATH, + } + + pub enum directory_which { + B_DESKTOP_DIRECTORY = 0, + B_TRASH_DIRECTORY, + B_SYSTEM_DIRECTORY = 1000, + B_SYSTEM_ADDONS_DIRECTORY = 1002, + B_SYSTEM_BOOT_DIRECTORY, + B_SYSTEM_FONTS_DIRECTORY, + B_SYSTEM_LIB_DIRECTORY, + B_SYSTEM_SERVERS_DIRECTORY, + B_SYSTEM_APPS_DIRECTORY, + B_SYSTEM_BIN_DIRECTORY, + B_SYSTEM_DOCUMENTATION_DIRECTORY = 1010, + B_SYSTEM_PREFERENCES_DIRECTORY, + B_SYSTEM_TRANSLATORS_DIRECTORY, + B_SYSTEM_MEDIA_NODES_DIRECTORY, + B_SYSTEM_SOUNDS_DIRECTORY, + B_SYSTEM_DATA_DIRECTORY, + B_SYSTEM_DEVELOP_DIRECTORY, + B_SYSTEM_PACKAGES_DIRECTORY, + B_SYSTEM_HEADERS_DIRECTORY, + B_SYSTEM_ETC_DIRECTORY = 2008, + B_SYSTEM_SETTINGS_DIRECTORY = 2010, + B_SYSTEM_LOG_DIRECTORY = 2012, + B_SYSTEM_SPOOL_DIRECTORY, + B_SYSTEM_TEMP_DIRECTORY, + B_SYSTEM_VAR_DIRECTORY, + B_SYSTEM_CACHE_DIRECTORY = 2020, + B_SYSTEM_NONPACKAGED_DIRECTORY = 2023, + B_SYSTEM_NONPACKAGED_ADDONS_DIRECTORY, + B_SYSTEM_NONPACKAGED_TRANSLATORS_DIRECTORY, + B_SYSTEM_NONPACKAGED_MEDIA_NODES_DIRECTORY, + B_SYSTEM_NONPACKAGED_BIN_DIRECTORY, + B_SYSTEM_NONPACKAGED_DATA_DIRECTORY, + B_SYSTEM_NONPACKAGED_FONTS_DIRECTORY, + B_SYSTEM_NONPACKAGED_SOUNDS_DIRECTORY, + B_SYSTEM_NONPACKAGED_DOCUMENTATION_DIRECTORY, + B_SYSTEM_NONPACKAGED_LIB_DIRECTORY, + B_SYSTEM_NONPACKAGED_HEADERS_DIRECTORY, + B_SYSTEM_NONPACKAGED_DEVELOP_DIRECTORY, + B_USER_DIRECTORY = 3000, + B_USER_CONFIG_DIRECTORY, + B_USER_ADDONS_DIRECTORY, + B_USER_BOOT_DIRECTORY, + B_USER_FONTS_DIRECTORY, + B_USER_LIB_DIRECTORY, + B_USER_SETTINGS_DIRECTORY, + B_USER_DESKBAR_DIRECTORY, + B_USER_PRINTERS_DIRECTORY, + B_USER_TRANSLATORS_DIRECTORY, + B_USER_MEDIA_NODES_DIRECTORY, + B_USER_SOUNDS_DIRECTORY, + B_USER_DATA_DIRECTORY, + B_USER_CACHE_DIRECTORY, + B_USER_PACKAGES_DIRECTORY, + B_USER_HEADERS_DIRECTORY, + B_USER_NONPACKAGED_DIRECTORY, + B_USER_NONPACKAGED_ADDONS_DIRECTORY, + B_USER_NONPACKAGED_TRANSLATORS_DIRECTORY, + B_USER_NONPACKAGED_MEDIA_NODES_DIRECTORY, + B_USER_NONPACKAGED_BIN_DIRECTORY, + B_USER_NONPACKAGED_DATA_DIRECTORY, + B_USER_NONPACKAGED_FONTS_DIRECTORY, + B_USER_NONPACKAGED_SOUNDS_DIRECTORY, + B_USER_NONPACKAGED_DOCUMENTATION_DIRECTORY, + B_USER_NONPACKAGED_LIB_DIRECTORY, + B_USER_NONPACKAGED_HEADERS_DIRECTORY, + B_USER_NONPACKAGED_DEVELOP_DIRECTORY, + B_USER_DEVELOP_DIRECTORY, + B_USER_DOCUMENTATION_DIRECTORY, + B_USER_SERVERS_DIRECTORY, + B_USER_APPS_DIRECTORY, + B_USER_BIN_DIRECTORY, + B_USER_PREFERENCES_DIRECTORY, + B_USER_ETC_DIRECTORY, + B_USER_LOG_DIRECTORY, + B_USER_SPOOL_DIRECTORY, + B_USER_VAR_DIRECTORY, + B_APPS_DIRECTORY = 4000, + B_PREFERENCES_DIRECTORY, + B_UTILITIES_DIRECTORY, + B_PACKAGE_LINKS_DIRECTORY, + } + + // kernel/OS.h + + pub enum topology_level_type { + B_TOPOLOGY_UNKNOWN, + B_TOPOLOGY_ROOT, + B_TOPOLOGY_SMT, + B_TOPOLOGY_CORE, + B_TOPOLOGY_PACKAGE, + } + + pub enum cpu_platform { + B_CPU_UNKNOWN, + B_CPU_x86, + B_CPU_x86_64, + B_CPU_PPC, + B_CPU_PPC_64, + B_CPU_M68K, + B_CPU_ARM, + B_CPU_ARM_64, + B_CPU_ALPHA, + B_CPU_MIPS, + B_CPU_SH, + B_CPU_SPARC, + B_CPU_RISC_V, + } + + pub enum cpu_vendor { + B_CPU_VENDOR_UNKNOWN, + B_CPU_VENDOR_AMD, + B_CPU_VENDOR_CYRIX, + B_CPU_VENDOR_IDT, + B_CPU_VENDOR_INTEL, + B_CPU_VENDOR_NATIONAL_SEMICONDUCTOR, + B_CPU_VENDOR_RISE, + B_CPU_VENDOR_TRANSMETA, + B_CPU_VENDOR_VIA, + B_CPU_VENDOR_IBM, + B_CPU_VENDOR_MOTOROLA, + B_CPU_VENDOR_NEC, + B_CPU_VENDOR_HYGON, + B_CPU_VENDOR_SUN, + B_CPU_VENDOR_FUJITSU, + } +} + +s! { + // kernel/OS.h + pub struct area_info { + pub area: area_id, + pub name: [c_char; B_OS_NAME_LENGTH], + pub size: usize, + pub lock: u32, + pub protection: u32, + pub team: team_id, + pub ram_size: u32, + pub copy_count: u32, + pub in_count: u32, + pub out_count: u32, + pub address: *mut c_void, + } + + pub struct port_info { + pub port: port_id, + pub team: team_id, + pub name: [c_char; B_OS_NAME_LENGTH], + pub capacity: i32, + pub queue_count: i32, + pub total_count: i32, + } + + pub struct port_message_info { + pub size: size_t, + pub sender: crate::uid_t, + pub sender_group: crate::gid_t, + pub sender_team: crate::team_id, + } + + pub struct team_info { + pub team: team_id, + pub thread_count: i32, + pub image_count: i32, + pub area_count: i32, + pub debugger_nub_thread: thread_id, + pub debugger_nub_port: port_id, + pub argc: i32, + pub args: [c_char; 64], + pub uid: crate::uid_t, + pub gid: crate::gid_t, + } + + pub struct sem_info { + pub sem: sem_id, + pub team: team_id, + pub name: [c_char; B_OS_NAME_LENGTH], + pub count: i32, + pub latest_holder: thread_id, + } + + pub struct team_usage_info { + pub user_time: bigtime_t, + pub kernel_time: bigtime_t, + } + + pub struct thread_info { + pub thread: thread_id, + pub team: team_id, + pub name: [c_char; B_OS_NAME_LENGTH], + pub state: thread_state, + pub priority: i32, + pub sem: sem_id, + pub user_time: bigtime_t, + pub kernel_time: bigtime_t, + pub stack_base: *mut c_void, + pub stack_end: *mut c_void, + } + + pub struct cpu_info { + pub active_time: bigtime_t, + pub enabled: bool, + pub current_frequency: u64, + } + + pub struct system_info { + pub boot_time: bigtime_t, + pub cpu_count: u32, + pub max_pages: u64, + pub used_pages: u64, + pub cached_pages: u64, + pub block_cache_pages: u64, + pub ignored_pages: u64, + pub needed_memory: u64, + pub free_memory: u64, + pub max_swap_pages: u64, + pub free_swap_pages: u64, + pub page_faults: u32, + pub max_sems: u32, + pub used_sems: u32, + pub max_ports: u32, + pub used_ports: u32, + pub max_threads: u32, + pub used_threads: u32, + pub max_teams: u32, + pub used_teams: u32, + pub kernel_name: [c_char; B_FILE_NAME_LENGTH], + pub kernel_build_date: [c_char; B_OS_NAME_LENGTH], + pub kernel_build_time: [c_char; B_OS_NAME_LENGTH], + pub kernel_version: i64, + pub abi: u32, + } + + pub struct object_wait_info { + pub object: i32, + pub type_: u16, + pub events: u16, + } + + pub struct cpu_topology_root_info { + pub platform: cpu_platform, + } + + pub struct cpu_topology_package_info { + pub vendor: cpu_vendor, + pub cache_line_size: u32, + } + + pub struct cpu_topology_core_info { + pub model: u32, + pub default_frequency: u64, + } + // kernel/fs_attr.h + pub struct attr_info { + pub type_: u32, + pub size: off_t, + } + + // kernel/fs_index.h + pub struct index_info { + pub type_: u32, + pub size: off_t, + pub modification_time: crate::time_t, + pub creation_time: crate::time_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + } + + //kernel/fs_info.h + pub struct fs_info { + pub dev: crate::dev_t, + pub root: crate::ino_t, + pub flags: u32, + pub block_size: off_t, + pub io_size: off_t, + pub total_blocks: off_t, + pub free_blocks: off_t, + pub total_nodes: off_t, + pub free_nodes: off_t, + pub device_name: [c_char; 128], + pub volume_name: [c_char; B_FILE_NAME_LENGTH], + pub fsh_name: [c_char; B_OS_NAME_LENGTH], + } + + // kernel/image.h + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct image_info { + pub id: image_id, + pub image_type: c_int, + pub sequence: i32, + pub init_order: i32, + // FIXME(1.0): these should be made optional + pub init_routine: extern "C" fn(), + pub term_routine: extern "C" fn(), + pub device: crate::dev_t, + pub node: crate::ino_t, + pub name: [c_char; crate::PATH_MAX as usize], + pub text: *mut c_void, + pub data: *mut c_void, + pub text_size: i32, + pub data_size: i32, + pub api_version: i32, + pub abi: i32, + } + + pub struct __c_anonymous_eax_0 { + pub max_eax: u32, + pub vendor_id: [c_char; 12], + } + + pub struct __c_anonymous_eax_1 { + pub stepping: u32, + pub model: u32, + pub family: u32, + pub tpe: u32, + __reserved_0: u32, + pub extended_model: u32, + pub extended_family: u32, + __reserved_1: u32, + pub brand_index: u32, + pub clflush: u32, + pub logical_cpus: u32, + pub apic_id: u32, + pub features: u32, + pub extended_features: u32, + } + + pub struct __c_anonymous_eax_2 { + pub call_num: u8, + pub cache_descriptors: [u8; 15], + } + + pub struct __c_anonymous_eax_3 { + __reserved: [u32; 2], + pub serial_number_high: u32, + pub serial_number_low: u32, + } + + pub struct __c_anonymous_regs { + pub eax: u32, + pub ebx: u32, + pub edx: u32, + pub ecx: u32, + } +} + +s_no_extra_traits! { + pub union cpuid_info { + pub eax_0: __c_anonymous_eax_0, + pub eax_1: __c_anonymous_eax_1, + pub eax_2: __c_anonymous_eax_2, + pub eax_3: __c_anonymous_eax_3, + pub as_chars: [c_char; 16], + pub regs: __c_anonymous_regs, + } + + pub union __c_anonymous_cpu_topology_info_data { + pub root: cpu_topology_root_info, + pub package: cpu_topology_package_info, + pub core: cpu_topology_core_info, + } + + pub struct cpu_topology_node_info { + pub id: u32, + pub type_: topology_level_type, + pub level: u32, + pub data: __c_anonymous_cpu_topology_info_data, + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for cpuid_info { + fn eq(&self, other: &cpuid_info) -> bool { + unsafe { + self.eax_0 == other.eax_0 + || self.eax_1 == other.eax_1 + || self.eax_2 == other.eax_2 + || self.eax_3 == other.eax_3 + || self.as_chars == other.as_chars + || self.regs == other.regs + } + } + } + impl Eq for cpuid_info {} + + impl PartialEq for __c_anonymous_cpu_topology_info_data { + fn eq(&self, other: &__c_anonymous_cpu_topology_info_data) -> bool { + unsafe { + self.root == other.root + || self.package == other.package + || self.core == other.core + } + } + } + impl Eq for __c_anonymous_cpu_topology_info_data {} + + impl PartialEq for cpu_topology_node_info { + fn eq(&self, other: &cpu_topology_node_info) -> bool { + self.id == other.id && self.type_ == other.type_ && self.level == other.level + } + } + + impl Eq for cpu_topology_node_info {} + } +} + +// kernel/OS.h +pub const B_OS_NAME_LENGTH: usize = 32; +pub const B_PAGE_SIZE: usize = 4096; +pub const B_INFINITE_TIMEOUT: usize = 9223372036854775807; + +pub const B_RELATIVE_TIMEOUT: u32 = 0x8; +pub const B_ABSOLUTE_TIMEOUT: u32 = 0x10; +pub const B_TIMEOUT_REAL_TIME_BASE: u32 = 0x40; +pub const B_ABSOLUTE_REAL_TIME_TIMEOUT: u32 = B_ABSOLUTE_TIMEOUT | B_TIMEOUT_REAL_TIME_BASE; + +pub const B_NO_LOCK: u32 = 0; +pub const B_LAZY_LOCK: u32 = 1; +pub const B_FULL_LOCK: u32 = 2; +pub const B_CONTIGUOUS: u32 = 3; +pub const B_LOMEM: u32 = 4; +pub const B_32_BIT_FULL_LOCK: u32 = 5; +pub const B_32_BIT_CONTIGUOUS: u32 = 6; + +pub const B_ANY_ADDRESS: u32 = 0; +pub const B_EXACT_ADDRESS: u32 = 1; +pub const B_BASE_ADDRESS: u32 = 2; +pub const B_CLONE_ADDRESS: u32 = 3; +pub const B_ANY_KERNEL_ADDRESS: u32 = 4; +pub const B_RANDOMIZED_ANY_ADDRESS: u32 = 6; +pub const B_RANDOMIZED_BASE_ADDRESS: u32 = 7; + +pub const B_READ_AREA: u32 = 1 << 0; +pub const B_WRITE_AREA: u32 = 1 << 1; +pub const B_EXECUTE_AREA: u32 = 1 << 2; +pub const B_STACK_AREA: u32 = 1 << 3; +pub const B_CLONEABLE_AREA: u32 = 1 << 8; + +pub const B_CAN_INTERRUPT: u32 = 0x01; +pub const B_CHECK_PERMISSION: u32 = 0x04; +pub const B_KILL_CAN_INTERRUPT: u32 = 0x20; +pub const B_DO_NOT_RESCHEDULE: u32 = 0x02; +pub const B_RELEASE_ALL: u32 = 0x08; +pub const B_RELEASE_IF_WAITING_ONLY: u32 = 0x10; + +pub const B_CURRENT_TEAM: team_id = 0; +pub const B_SYSTEM_TEAM: team_id = 1; + +pub const B_TEAM_USAGE_SELF: i32 = 0; +pub const B_TEAM_USAGE_CHILDREN: i32 = -1; + +pub const B_IDLE_PRIORITY: i32 = 0; +pub const B_LOWEST_ACTIVE_PRIORITY: i32 = 1; +pub const B_LOW_PRIORITY: i32 = 5; +pub const B_NORMAL_PRIORITY: i32 = 10; +pub const B_DISPLAY_PRIORITY: i32 = 15; +pub const B_URGENT_DISPLAY_PRIORITY: i32 = 20; +pub const B_REAL_TIME_DISPLAY_PRIORITY: i32 = 100; +pub const B_URGENT_PRIORITY: i32 = 110; +pub const B_REAL_TIME_PRIORITY: i32 = 120; + +pub const B_SYSTEM_TIMEBASE: i32 = 0; +pub const B_FIRST_REAL_TIME_PRIORITY: i32 = B_REAL_TIME_DISPLAY_PRIORITY; + +pub const B_ONE_SHOT_ABSOLUTE_ALARM: u32 = 1; +pub const B_ONE_SHOT_RELATIVE_ALARM: u32 = 2; +pub const B_PERIODIC_ALARM: u32 = 3; + +pub const B_OBJECT_TYPE_FD: u16 = 0; +pub const B_OBJECT_TYPE_SEMAPHORE: u16 = 1; +pub const B_OBJECT_TYPE_PORT: u16 = 2; +pub const B_OBJECT_TYPE_THREAD: u16 = 3; + +pub const B_EVENT_READ: u16 = 0x0001; +pub const B_EVENT_WRITE: u16 = 0x0002; +pub const B_EVENT_ERROR: u16 = 0x0004; +pub const B_EVENT_PRIORITY_READ: u16 = 0x0008; +pub const B_EVENT_PRIORITY_WRITE: u16 = 0x0010; +pub const B_EVENT_HIGH_PRIORITY_READ: u16 = 0x0020; +pub const B_EVENT_HIGH_PRIORITY_WRITE: u16 = 0x0040; +pub const B_EVENT_DISCONNECTED: u16 = 0x0080; +pub const B_EVENT_ACQUIRE_SEMAPHORE: u16 = 0x0001; +pub const B_EVENT_INVALID: u16 = 0x1000; + +// kernel/fs_info.h +pub const B_FS_IS_READONLY: u32 = 0x00000001; +pub const B_FS_IS_REMOVABLE: u32 = 0x00000002; +pub const B_FS_IS_PERSISTENT: u32 = 0x00000004; +pub const B_FS_IS_SHARED: u32 = 0x00000008; +pub const B_FS_HAS_MIME: u32 = 0x00010000; +pub const B_FS_HAS_ATTR: u32 = 0x00020000; +pub const B_FS_HAS_QUERY: u32 = 0x00040000; +pub const B_FS_HAS_SELF_HEALING_LINKS: u32 = 0x00080000; +pub const B_FS_HAS_ALIASES: u32 = 0x00100000; +pub const B_FS_SUPPORTS_NODE_MONITORING: u32 = 0x00200000; +pub const B_FS_SUPPORTS_MONITOR_CHILDREN: u32 = 0x00400000; + +// kernel/fs_query.h +pub const B_LIVE_QUERY: u32 = 0x00000001; +pub const B_QUERY_NON_INDEXED: u32 = 0x00000002; + +// kernel/fs_volume.h +pub const B_MOUNT_READ_ONLY: u32 = 1; +pub const B_MOUNT_VIRTUAL_DEVICE: u32 = 2; +pub const B_FORCE_UNMOUNT: u32 = 1; + +// kernel/image.h +pub const B_FLUSH_DCACHE: u32 = 0x0001; +pub const B_FLUSH_ICACHE: u32 = 0x0004; +pub const B_INVALIDATE_DCACHE: u32 = 0x0002; +pub const B_INVALIDATE_ICACHE: u32 = 0x0008; + +pub const B_SYMBOL_TYPE_DATA: i32 = 0x1; +pub const B_SYMBOL_TYPE_TEXT: i32 = 0x2; +pub const B_SYMBOL_TYPE_ANY: i32 = 0x5; + +// storage/StorageDefs.h +pub const B_DEV_NAME_LENGTH: usize = 128; +pub const B_FILE_NAME_LENGTH: usize = crate::FILENAME_MAX as usize; +pub const B_PATH_NAME_LENGTH: usize = crate::PATH_MAX as usize; +pub const B_ATTR_NAME_LENGTH: usize = B_FILE_NAME_LENGTH - 1; +pub const B_MIME_TYPE_LENGTH: usize = B_ATTR_NAME_LENGTH - 15; +pub const B_MAX_SYMLINKS: usize = 16; + +// Haiku open modes in BFile are passed as u32 +pub const B_READ_ONLY: u32 = crate::O_RDONLY as u32; +pub const B_WRITE_ONLY: u32 = crate::O_WRONLY as u32; +pub const B_READ_WRITE: u32 = crate::O_RDWR as u32; + +pub const B_FAIL_IF_EXISTS: u32 = crate::O_EXCL as u32; +pub const B_CREATE_FILE: u32 = crate::O_CREAT as u32; +pub const B_ERASE_FILE: u32 = crate::O_TRUNC as u32; +pub const B_OPEN_AT_END: u32 = crate::O_APPEND as u32; + +pub const B_FILE_NODE: u32 = 0x01; +pub const B_SYMLINK_NODE: u32 = 0x02; +pub const B_DIRECTORY_NODE: u32 = 0x04; +pub const B_ANY_NODE: u32 = 0x07; + +// support/Errors.h +pub const B_GENERAL_ERROR_BASE: status_t = core::i32::MIN; +pub const B_OS_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x1000; +pub const B_APP_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x2000; +pub const B_INTERFACE_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x3000; +pub const B_MEDIA_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x4000; +pub const B_TRANSLATION_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x4800; +pub const B_MIDI_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x5000; +pub const B_STORAGE_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x6000; +pub const B_POSIX_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x7000; +pub const B_MAIL_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x8000; +pub const B_PRINT_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x9000; +pub const B_DEVICE_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0xa000; +pub const B_ERRORS_END: status_t = B_GENERAL_ERROR_BASE + 0xffff; + +// General errors +pub const B_NO_MEMORY: status_t = B_GENERAL_ERROR_BASE + 0; +pub const B_IO_ERROR: status_t = B_GENERAL_ERROR_BASE + 1; +pub const B_PERMISSION_DENIED: status_t = B_GENERAL_ERROR_BASE + 2; +pub const B_BAD_INDEX: status_t = B_GENERAL_ERROR_BASE + 3; +pub const B_BAD_TYPE: status_t = B_GENERAL_ERROR_BASE + 4; +pub const B_BAD_VALUE: status_t = B_GENERAL_ERROR_BASE + 5; +pub const B_MISMATCHED_VALUES: status_t = B_GENERAL_ERROR_BASE + 6; +pub const B_NAME_NOT_FOUND: status_t = B_GENERAL_ERROR_BASE + 7; +pub const B_NAME_IN_USE: status_t = B_GENERAL_ERROR_BASE + 8; +pub const B_TIMED_OUT: status_t = B_GENERAL_ERROR_BASE + 9; +pub const B_INTERRUPTED: status_t = B_GENERAL_ERROR_BASE + 10; +pub const B_WOULD_BLOCK: status_t = B_GENERAL_ERROR_BASE + 11; +pub const B_CANCELED: status_t = B_GENERAL_ERROR_BASE + 12; +pub const B_NO_INIT: status_t = B_GENERAL_ERROR_BASE + 13; +pub const B_NOT_INITIALIZED: status_t = B_GENERAL_ERROR_BASE + 13; +pub const B_BUSY: status_t = B_GENERAL_ERROR_BASE + 14; +pub const B_NOT_ALLOWED: status_t = B_GENERAL_ERROR_BASE + 15; +pub const B_BAD_DATA: status_t = B_GENERAL_ERROR_BASE + 16; +pub const B_DONT_DO_THAT: status_t = B_GENERAL_ERROR_BASE + 17; + +pub const B_ERROR: status_t = -1; +pub const B_OK: status_t = 0; +pub const B_NO_ERROR: status_t = 0; + +// Kernel kit errors +pub const B_BAD_SEM_ID: status_t = B_OS_ERROR_BASE + 0; +pub const B_NO_MORE_SEMS: status_t = B_OS_ERROR_BASE + 1; + +pub const B_BAD_THREAD_ID: status_t = B_OS_ERROR_BASE + 0x100; +pub const B_NO_MORE_THREADS: status_t = B_OS_ERROR_BASE + 0x101; +pub const B_BAD_THREAD_STATE: status_t = B_OS_ERROR_BASE + 0x102; +pub const B_BAD_TEAM_ID: status_t = B_OS_ERROR_BASE + 0x103; +pub const B_NO_MORE_TEAMS: status_t = B_OS_ERROR_BASE + 0x104; + +pub const B_BAD_PORT_ID: status_t = B_OS_ERROR_BASE + 0x200; +pub const B_NO_MORE_PORTS: status_t = B_OS_ERROR_BASE + 0x201; + +pub const B_BAD_IMAGE_ID: status_t = B_OS_ERROR_BASE + 0x300; +pub const B_BAD_ADDRESS: status_t = B_OS_ERROR_BASE + 0x301; +pub const B_NOT_AN_EXECUTABLE: status_t = B_OS_ERROR_BASE + 0x302; +pub const B_MISSING_LIBRARY: status_t = B_OS_ERROR_BASE + 0x303; +pub const B_MISSING_SYMBOL: status_t = B_OS_ERROR_BASE + 0x304; +pub const B_UNKNOWN_EXECUTABLE: status_t = B_OS_ERROR_BASE + 0x305; +pub const B_LEGACY_EXECUTABLE: status_t = B_OS_ERROR_BASE + 0x306; + +pub const B_DEBUGGER_ALREADY_INSTALLED: status_t = B_OS_ERROR_BASE + 0x400; + +// Application kit errors +pub const B_BAD_REPLY: status_t = B_APP_ERROR_BASE + 0; +pub const B_DUPLICATE_REPLY: status_t = B_APP_ERROR_BASE + 1; +pub const B_MESSAGE_TO_SELF: status_t = B_APP_ERROR_BASE + 2; +pub const B_BAD_HANDLER: status_t = B_APP_ERROR_BASE + 3; +pub const B_ALREADY_RUNNING: status_t = B_APP_ERROR_BASE + 4; +pub const B_LAUNCH_FAILED: status_t = B_APP_ERROR_BASE + 5; +pub const B_AMBIGUOUS_APP_LAUNCH: status_t = B_APP_ERROR_BASE + 6; +pub const B_UNKNOWN_MIME_TYPE: status_t = B_APP_ERROR_BASE + 7; +pub const B_BAD_SCRIPT_SYNTAX: status_t = B_APP_ERROR_BASE + 8; +pub const B_LAUNCH_FAILED_NO_RESOLVE_LINK: status_t = B_APP_ERROR_BASE + 9; +pub const B_LAUNCH_FAILED_EXECUTABLE: status_t = B_APP_ERROR_BASE + 10; +pub const B_LAUNCH_FAILED_APP_NOT_FOUND: status_t = B_APP_ERROR_BASE + 11; +pub const B_LAUNCH_FAILED_APP_IN_TRASH: status_t = B_APP_ERROR_BASE + 12; +pub const B_LAUNCH_FAILED_NO_PREFERRED_APP: status_t = B_APP_ERROR_BASE + 13; +pub const B_LAUNCH_FAILED_FILES_APP_NOT_FOUND: status_t = B_APP_ERROR_BASE + 14; +pub const B_BAD_MIME_SNIFFER_RULE: status_t = B_APP_ERROR_BASE + 15; +pub const B_NOT_A_MESSAGE: status_t = B_APP_ERROR_BASE + 16; +pub const B_SHUTDOWN_CANCELLED: status_t = B_APP_ERROR_BASE + 17; +pub const B_SHUTTING_DOWN: status_t = B_APP_ERROR_BASE + 18; + +// Storage kit errors +pub const B_FILE_ERROR: status_t = B_STORAGE_ERROR_BASE + 0; +pub const B_FILE_EXISTS: status_t = B_STORAGE_ERROR_BASE + 2; +pub const B_ENTRY_NOT_FOUND: status_t = B_STORAGE_ERROR_BASE + 3; +pub const B_NAME_TOO_LONG: status_t = B_STORAGE_ERROR_BASE + 4; +pub const B_NOT_A_DIRECTORY: status_t = B_STORAGE_ERROR_BASE + 5; +pub const B_DIRECTORY_NOT_EMPTY: status_t = B_STORAGE_ERROR_BASE + 6; +pub const B_DEVICE_FULL: status_t = B_STORAGE_ERROR_BASE + 7; +pub const B_READ_ONLY_DEVICE: status_t = B_STORAGE_ERROR_BASE + 8; +pub const B_IS_A_DIRECTORY: status_t = B_STORAGE_ERROR_BASE + 9; +pub const B_NO_MORE_FDS: status_t = B_STORAGE_ERROR_BASE + 10; +pub const B_CROSS_DEVICE_LINK: status_t = B_STORAGE_ERROR_BASE + 11; +pub const B_LINK_LIMIT: status_t = B_STORAGE_ERROR_BASE + 12; +pub const B_BUSTED_PIPE: status_t = B_STORAGE_ERROR_BASE + 13; +pub const B_UNSUPPORTED: status_t = B_STORAGE_ERROR_BASE + 14; +pub const B_PARTITION_TOO_SMALL: status_t = B_STORAGE_ERROR_BASE + 15; +pub const B_PARTIAL_READ: status_t = B_STORAGE_ERROR_BASE + 16; +pub const B_PARTIAL_WRITE: status_t = B_STORAGE_ERROR_BASE + 17; + +// Mapped posix errors +pub const B_BUFFER_OVERFLOW: status_t = crate::EOVERFLOW; +pub const B_TOO_MANY_ARGS: status_t = crate::E2BIG; +pub const B_FILE_TOO_LARGE: status_t = crate::EFBIG; +pub const B_RESULT_NOT_REPRESENTABLE: status_t = crate::ERANGE; +pub const B_DEVICE_NOT_FOUND: status_t = crate::ENODEV; +pub const B_NOT_SUPPORTED: status_t = crate::EOPNOTSUPP; + +// Media kit errors +pub const B_STREAM_NOT_FOUND: status_t = B_MEDIA_ERROR_BASE + 0; +pub const B_SERVER_NOT_FOUND: status_t = B_MEDIA_ERROR_BASE + 1; +pub const B_RESOURCE_NOT_FOUND: status_t = B_MEDIA_ERROR_BASE + 2; +pub const B_RESOURCE_UNAVAILABLE: status_t = B_MEDIA_ERROR_BASE + 3; +pub const B_BAD_SUBSCRIBER: status_t = B_MEDIA_ERROR_BASE + 4; +pub const B_SUBSCRIBER_NOT_ENTERED: status_t = B_MEDIA_ERROR_BASE + 5; +pub const B_BUFFER_NOT_AVAILABLE: status_t = B_MEDIA_ERROR_BASE + 6; +pub const B_LAST_BUFFER_ERROR: status_t = B_MEDIA_ERROR_BASE + 7; + +pub const B_MEDIA_SYSTEM_FAILURE: status_t = B_MEDIA_ERROR_BASE + 100; +pub const B_MEDIA_BAD_NODE: status_t = B_MEDIA_ERROR_BASE + 101; +pub const B_MEDIA_NODE_BUSY: status_t = B_MEDIA_ERROR_BASE + 102; +pub const B_MEDIA_BAD_FORMAT: status_t = B_MEDIA_ERROR_BASE + 103; +pub const B_MEDIA_BAD_BUFFER: status_t = B_MEDIA_ERROR_BASE + 104; +pub const B_MEDIA_TOO_MANY_NODES: status_t = B_MEDIA_ERROR_BASE + 105; +pub const B_MEDIA_TOO_MANY_BUFFERS: status_t = B_MEDIA_ERROR_BASE + 106; +pub const B_MEDIA_NODE_ALREADY_EXISTS: status_t = B_MEDIA_ERROR_BASE + 107; +pub const B_MEDIA_BUFFER_ALREADY_EXISTS: status_t = B_MEDIA_ERROR_BASE + 108; +pub const B_MEDIA_CANNOT_SEEK: status_t = B_MEDIA_ERROR_BASE + 109; +pub const B_MEDIA_CANNOT_CHANGE_RUN_MODE: status_t = B_MEDIA_ERROR_BASE + 110; +pub const B_MEDIA_APP_ALREADY_REGISTERED: status_t = B_MEDIA_ERROR_BASE + 111; +pub const B_MEDIA_APP_NOT_REGISTERED: status_t = B_MEDIA_ERROR_BASE + 112; +pub const B_MEDIA_CANNOT_RECLAIM_BUFFERS: status_t = B_MEDIA_ERROR_BASE + 113; +pub const B_MEDIA_BUFFERS_NOT_RECLAIMED: status_t = B_MEDIA_ERROR_BASE + 114; +pub const B_MEDIA_TIME_SOURCE_STOPPED: status_t = B_MEDIA_ERROR_BASE + 115; +pub const B_MEDIA_TIME_SOURCE_BUSY: status_t = B_MEDIA_ERROR_BASE + 116; +pub const B_MEDIA_BAD_SOURCE: status_t = B_MEDIA_ERROR_BASE + 117; +pub const B_MEDIA_BAD_DESTINATION: status_t = B_MEDIA_ERROR_BASE + 118; +pub const B_MEDIA_ALREADY_CONNECTED: status_t = B_MEDIA_ERROR_BASE + 119; +pub const B_MEDIA_NOT_CONNECTED: status_t = B_MEDIA_ERROR_BASE + 120; +pub const B_MEDIA_BAD_CLIP_FORMAT: status_t = B_MEDIA_ERROR_BASE + 121; +pub const B_MEDIA_ADDON_FAILED: status_t = B_MEDIA_ERROR_BASE + 122; +pub const B_MEDIA_ADDON_DISABLED: status_t = B_MEDIA_ERROR_BASE + 123; +pub const B_MEDIA_CHANGE_IN_PROGRESS: status_t = B_MEDIA_ERROR_BASE + 124; +pub const B_MEDIA_STALE_CHANGE_COUNT: status_t = B_MEDIA_ERROR_BASE + 125; +pub const B_MEDIA_ADDON_RESTRICTED: status_t = B_MEDIA_ERROR_BASE + 126; +pub const B_MEDIA_NO_HANDLER: status_t = B_MEDIA_ERROR_BASE + 127; +pub const B_MEDIA_DUPLICATE_FORMAT: status_t = B_MEDIA_ERROR_BASE + 128; +pub const B_MEDIA_REALTIME_DISABLED: status_t = B_MEDIA_ERROR_BASE + 129; +pub const B_MEDIA_REALTIME_UNAVAILABLE: status_t = B_MEDIA_ERROR_BASE + 130; + +// Mail kit errors +pub const B_MAIL_NO_DAEMON: status_t = B_MAIL_ERROR_BASE + 0; +pub const B_MAIL_UNKNOWN_USER: status_t = B_MAIL_ERROR_BASE + 1; +pub const B_MAIL_WRONG_PASSWORD: status_t = B_MAIL_ERROR_BASE + 2; +pub const B_MAIL_UNKNOWN_HOST: status_t = B_MAIL_ERROR_BASE + 3; +pub const B_MAIL_ACCESS_ERROR: status_t = B_MAIL_ERROR_BASE + 4; +pub const B_MAIL_UNKNOWN_FIELD: status_t = B_MAIL_ERROR_BASE + 5; +pub const B_MAIL_NO_RECIPIENT: status_t = B_MAIL_ERROR_BASE + 6; +pub const B_MAIL_INVALID_MAIL: status_t = B_MAIL_ERROR_BASE + 7; + +// Print kit errors +pub const B_NO_PRINT_SERVER: status_t = B_PRINT_ERROR_BASE + 0; + +// Device kit errors +pub const B_DEV_INVALID_IOCTL: status_t = B_DEVICE_ERROR_BASE + 0; +pub const B_DEV_NO_MEMORY: status_t = B_DEVICE_ERROR_BASE + 1; +pub const B_DEV_BAD_DRIVE_NUM: status_t = B_DEVICE_ERROR_BASE + 2; +pub const B_DEV_NO_MEDIA: status_t = B_DEVICE_ERROR_BASE + 3; +pub const B_DEV_UNREADABLE: status_t = B_DEVICE_ERROR_BASE + 4; +pub const B_DEV_FORMAT_ERROR: status_t = B_DEVICE_ERROR_BASE + 5; +pub const B_DEV_TIMEOUT: status_t = B_DEVICE_ERROR_BASE + 6; +pub const B_DEV_RECALIBRATE_ERROR: status_t = B_DEVICE_ERROR_BASE + 7; +pub const B_DEV_SEEK_ERROR: status_t = B_DEVICE_ERROR_BASE + 8; +pub const B_DEV_ID_ERROR: status_t = B_DEVICE_ERROR_BASE + 9; +pub const B_DEV_READ_ERROR: status_t = B_DEVICE_ERROR_BASE + 10; +pub const B_DEV_WRITE_ERROR: status_t = B_DEVICE_ERROR_BASE + 11; +pub const B_DEV_NOT_READY: status_t = B_DEVICE_ERROR_BASE + 12; +pub const B_DEV_MEDIA_CHANGED: status_t = B_DEVICE_ERROR_BASE + 13; +pub const B_DEV_MEDIA_CHANGE_REQUESTED: status_t = B_DEVICE_ERROR_BASE + 14; +pub const B_DEV_RESOURCE_CONFLICT: status_t = B_DEVICE_ERROR_BASE + 15; +pub const B_DEV_CONFIGURATION_ERROR: status_t = B_DEVICE_ERROR_BASE + 16; +pub const B_DEV_DISABLED_BY_USER: status_t = B_DEVICE_ERROR_BASE + 17; +pub const B_DEV_DOOR_OPEN: status_t = B_DEVICE_ERROR_BASE + 18; + +pub const B_DEV_INVALID_PIPE: status_t = B_DEVICE_ERROR_BASE + 19; +pub const B_DEV_CRC_ERROR: status_t = B_DEVICE_ERROR_BASE + 20; +pub const B_DEV_STALLED: status_t = B_DEVICE_ERROR_BASE + 21; +pub const B_DEV_BAD_PID: status_t = B_DEVICE_ERROR_BASE + 22; +pub const B_DEV_UNEXPECTED_PID: status_t = B_DEVICE_ERROR_BASE + 23; +pub const B_DEV_DATA_OVERRUN: status_t = B_DEVICE_ERROR_BASE + 24; +pub const B_DEV_DATA_UNDERRUN: status_t = B_DEVICE_ERROR_BASE + 25; +pub const B_DEV_FIFO_OVERRUN: status_t = B_DEVICE_ERROR_BASE + 26; +pub const B_DEV_FIFO_UNDERRUN: status_t = B_DEVICE_ERROR_BASE + 27; +pub const B_DEV_PENDING: status_t = B_DEVICE_ERROR_BASE + 28; +pub const B_DEV_MULTIPLE_ERRORS: status_t = B_DEVICE_ERROR_BASE + 29; +pub const B_DEV_TOO_LATE: status_t = B_DEVICE_ERROR_BASE + 30; + +// translation kit errors +pub const B_TRANSLATION_BASE_ERROR: status_t = B_TRANSLATION_ERROR_BASE + 0; +pub const B_NO_TRANSLATOR: status_t = B_TRANSLATION_ERROR_BASE + 1; +pub const B_ILLEGAL_DATA: status_t = B_TRANSLATION_ERROR_BASE + 2; + +// support/TypeConstants.h +pub const B_AFFINE_TRANSFORM_TYPE: u32 = haiku_constant!('A', 'M', 'T', 'X'); +pub const B_ALIGNMENT_TYPE: u32 = haiku_constant!('A', 'L', 'G', 'N'); +pub const B_ANY_TYPE: u32 = haiku_constant!('A', 'N', 'Y', 'T'); +pub const B_ATOM_TYPE: u32 = haiku_constant!('A', 'T', 'O', 'M'); +pub const B_ATOMREF_TYPE: u32 = haiku_constant!('A', 'T', 'M', 'R'); +pub const B_BOOL_TYPE: u32 = haiku_constant!('B', 'O', 'O', 'L'); +pub const B_CHAR_TYPE: u32 = haiku_constant!('C', 'H', 'A', 'R'); +pub const B_COLOR_8_BIT_TYPE: u32 = haiku_constant!('C', 'L', 'R', 'B'); +pub const B_DOUBLE_TYPE: u32 = haiku_constant!('D', 'B', 'L', 'E'); +pub const B_FLOAT_TYPE: u32 = haiku_constant!('F', 'L', 'O', 'T'); +pub const B_GRAYSCALE_8_BIT_TYPE: u32 = haiku_constant!('G', 'R', 'Y', 'B'); +pub const B_INT16_TYPE: u32 = haiku_constant!('S', 'H', 'R', 'T'); +pub const B_INT32_TYPE: u32 = haiku_constant!('L', 'O', 'N', 'G'); +pub const B_INT64_TYPE: u32 = haiku_constant!('L', 'L', 'N', 'G'); +pub const B_INT8_TYPE: u32 = haiku_constant!('B', 'Y', 'T', 'E'); +pub const B_LARGE_ICON_TYPE: u32 = haiku_constant!('I', 'C', 'O', 'N'); +pub const B_MEDIA_PARAMETER_GROUP_TYPE: u32 = haiku_constant!('B', 'M', 'C', 'G'); +pub const B_MEDIA_PARAMETER_TYPE: u32 = haiku_constant!('B', 'M', 'C', 'T'); +pub const B_MEDIA_PARAMETER_WEB_TYPE: u32 = haiku_constant!('B', 'M', 'C', 'W'); +pub const B_MESSAGE_TYPE: u32 = haiku_constant!('M', 'S', 'G', 'G'); +pub const B_MESSENGER_TYPE: u32 = haiku_constant!('M', 'S', 'N', 'G'); +pub const B_MIME_TYPE: u32 = haiku_constant!('M', 'I', 'M', 'E'); +pub const B_MINI_ICON_TYPE: u32 = haiku_constant!('M', 'I', 'C', 'N'); +pub const B_MONOCHROME_1_BIT_TYPE: u32 = haiku_constant!('M', 'N', 'O', 'B'); +pub const B_OBJECT_TYPE: u32 = haiku_constant!('O', 'P', 'T', 'R'); +pub const B_OFF_T_TYPE: u32 = haiku_constant!('O', 'F', 'F', 'T'); +pub const B_PATTERN_TYPE: u32 = haiku_constant!('P', 'A', 'T', 'N'); +pub const B_POINTER_TYPE: u32 = haiku_constant!('P', 'N', 'T', 'R'); +pub const B_POINT_TYPE: u32 = haiku_constant!('B', 'P', 'N', 'T'); +pub const B_PROPERTY_INFO_TYPE: u32 = haiku_constant!('S', 'C', 'T', 'D'); +pub const B_RAW_TYPE: u32 = haiku_constant!('R', 'A', 'W', 'T'); +pub const B_RECT_TYPE: u32 = haiku_constant!('R', 'E', 'C', 'T'); +pub const B_REF_TYPE: u32 = haiku_constant!('R', 'R', 'E', 'F'); +pub const B_RGB_32_BIT_TYPE: u32 = haiku_constant!('R', 'G', 'B', 'B'); +pub const B_RGB_COLOR_TYPE: u32 = haiku_constant!('R', 'G', 'B', 'C'); +pub const B_SIZE_TYPE: u32 = haiku_constant!('S', 'I', 'Z', 'E'); +pub const B_SIZE_T_TYPE: u32 = haiku_constant!('S', 'I', 'Z', 'T'); +pub const B_SSIZE_T_TYPE: u32 = haiku_constant!('S', 'S', 'Z', 'T'); +pub const B_STRING_TYPE: u32 = haiku_constant!('C', 'S', 'T', 'R'); +pub const B_STRING_LIST_TYPE: u32 = haiku_constant!('S', 'T', 'R', 'L'); +pub const B_TIME_TYPE: u32 = haiku_constant!('T', 'I', 'M', 'E'); +pub const B_UINT16_TYPE: u32 = haiku_constant!('U', 'S', 'H', 'T'); +pub const B_UINT32_TYPE: u32 = haiku_constant!('U', 'L', 'N', 'G'); +pub const B_UINT64_TYPE: u32 = haiku_constant!('U', 'L', 'L', 'G'); +pub const B_UINT8_TYPE: u32 = haiku_constant!('U', 'B', 'Y', 'T'); +pub const B_VECTOR_ICON_TYPE: u32 = haiku_constant!('V', 'I', 'C', 'N'); +pub const B_XATTR_TYPE: u32 = haiku_constant!('X', 'A', 'T', 'R'); +pub const B_NETWORK_ADDRESS_TYPE: u32 = haiku_constant!('N', 'W', 'A', 'D'); +pub const B_MIME_STRING_TYPE: u32 = haiku_constant!('M', 'I', 'M', 'S'); +pub const B_ASCII_TYPE: u32 = haiku_constant!('T', 'E', 'X', 'T'); +pub const B_APP_IMAGE_SYMBOL: *const c_void = core::ptr::null(); + +extern "C" { + // kernel/OS.h + pub fn create_area( + name: *const c_char, + startAddress: *mut *mut c_void, + addressSpec: u32, + size: usize, + lock: u32, + protection: u32, + ) -> area_id; + pub fn clone_area( + name: *const c_char, + destAddress: *mut *mut c_void, + addressSpec: u32, + protection: u32, + source: area_id, + ) -> area_id; + pub fn find_area(name: *const c_char) -> area_id; + pub fn area_for(address: *mut c_void) -> area_id; + pub fn delete_area(id: area_id) -> status_t; + pub fn resize_area(id: area_id, newSize: usize) -> status_t; + pub fn set_area_protection(id: area_id, newProtection: u32) -> status_t; + pub fn _get_area_info(id: area_id, areaInfo: *mut area_info, size: usize) -> status_t; + pub fn _get_next_area_info( + team: team_id, + cookie: *mut isize, + areaInfo: *mut area_info, + size: usize, + ) -> status_t; + + pub fn create_port(capacity: i32, name: *const c_char) -> port_id; + pub fn find_port(name: *const c_char) -> port_id; + pub fn read_port( + port: port_id, + code: *mut i32, + buffer: *mut c_void, + bufferSize: size_t, + ) -> ssize_t; + pub fn read_port_etc( + port: port_id, + code: *mut i32, + buffer: *mut c_void, + bufferSize: size_t, + flags: u32, + timeout: bigtime_t, + ) -> ssize_t; + pub fn write_port( + port: port_id, + code: i32, + buffer: *const c_void, + bufferSize: size_t, + ) -> status_t; + pub fn write_port_etc( + port: port_id, + code: i32, + buffer: *const c_void, + bufferSize: size_t, + flags: u32, + timeout: bigtime_t, + ) -> status_t; + pub fn close_port(port: port_id) -> status_t; + pub fn delete_port(port: port_id) -> status_t; + pub fn port_buffer_size(port: port_id) -> ssize_t; + pub fn port_buffer_size_etc(port: port_id, flags: u32, timeout: bigtime_t) -> ssize_t; + pub fn port_count(port: port_id) -> ssize_t; + pub fn set_port_owner(port: port_id, team: team_id) -> status_t; + + pub fn _get_port_info(port: port_id, buf: *mut port_info, portInfoSize: size_t) -> status_t; + pub fn _get_next_port_info( + port: port_id, + cookie: *mut i32, + portInfo: *mut port_info, + portInfoSize: size_t, + ) -> status_t; + pub fn _get_port_message_info_etc( + port: port_id, + info: *mut port_message_info, + infoSize: size_t, + flags: u32, + timeout: bigtime_t, + ) -> status_t; + + pub fn create_sem(count: i32, name: *const c_char) -> sem_id; + pub fn delete_sem(id: sem_id) -> status_t; + pub fn acquire_sem(id: sem_id) -> status_t; + pub fn acquire_sem_etc(id: sem_id, count: i32, flags: u32, timeout: bigtime_t) -> status_t; + pub fn release_sem(id: sem_id) -> status_t; + pub fn release_sem_etc(id: sem_id, count: i32, flags: u32) -> status_t; + pub fn switch_sem(semToBeReleased: sem_id, id: sem_id) -> status_t; + pub fn switch_sem_etc( + semToBeReleased: sem_id, + id: sem_id, + count: i32, + flags: u32, + timeout: bigtime_t, + ) -> status_t; + pub fn get_sem_count(id: sem_id, threadCount: *mut i32) -> status_t; + pub fn set_sem_owner(id: sem_id, team: team_id) -> status_t; + pub fn _get_sem_info(id: sem_id, info: *mut sem_info, infoSize: size_t) -> status_t; + pub fn _get_next_sem_info( + team: team_id, + cookie: *mut i32, + info: *mut sem_info, + infoSize: size_t, + ) -> status_t; + + pub fn kill_team(team: team_id) -> status_t; + pub fn _get_team_info(team: team_id, info: *mut team_info, size: size_t) -> status_t; + pub fn _get_next_team_info(cookie: *mut i32, info: *mut team_info, size: size_t) -> status_t; + + pub fn spawn_thread( + func: thread_func, + name: *const c_char, + priority: i32, + data: *mut c_void, + ) -> thread_id; + pub fn kill_thread(thread: thread_id) -> status_t; + pub fn resume_thread(thread: thread_id) -> status_t; + pub fn suspend_thread(thread: thread_id) -> status_t; + + pub fn rename_thread(thread: thread_id, newName: *const c_char) -> status_t; + pub fn set_thread_priority(thread: thread_id, newPriority: i32) -> status_t; + pub fn suggest_thread_priority( + what: u32, + period: i32, + jitter: crate::bigtime_t, + length: crate::bigtime_t, + ) -> i32; + pub fn estimate_max_scheduling_latency(th: crate::thread_id) -> crate::bigtime_t; + pub fn exit_thread(status: status_t); + pub fn wait_for_thread(thread: thread_id, returnValue: *mut status_t) -> status_t; + pub fn on_exit_thread(callback: extern "C" fn(*mut c_void), data: *mut c_void) -> status_t; + + pub fn find_thread(name: *const c_char) -> thread_id; + + pub fn get_scheduler_mode() -> i32; + pub fn set_scheduler_mode(mode: i32) -> status_t; + + pub fn send_data( + thread: thread_id, + code: i32, + buffer: *const c_void, + bufferSize: size_t, + ) -> status_t; + pub fn receive_data(sender: *mut thread_id, buffer: *mut c_void, bufferSize: size_t) -> i32; + pub fn has_data(thread: thread_id) -> bool; + + pub fn snooze(amount: bigtime_t) -> status_t; + pub fn snooze_etc(amount: bigtime_t, timeBase: c_int, flags: u32) -> status_t; + pub fn snooze_until(time: bigtime_t, timeBase: c_int) -> status_t; + + pub fn _get_thread_info(id: thread_id, info: *mut thread_info, size: size_t) -> status_t; + pub fn _get_next_thread_info( + team: team_id, + cookie: *mut i32, + info: *mut thread_info, + size: size_t, + ) -> status_t; + + pub fn get_pthread_thread_id(thread: crate::pthread_t) -> thread_id; + + pub fn _get_team_usage_info( + team: team_id, + who: i32, + info: *mut team_usage_info, + size: size_t, + ) -> status_t; + + pub fn real_time_clock() -> c_ulong; + pub fn set_real_time_clock(secsSinceJan1st1970: c_ulong); + pub fn real_time_clock_usecs() -> bigtime_t; + pub fn system_time() -> bigtime_t; + pub fn system_time_nsecs() -> nanotime_t; + // set_timezone() is deprecated and a no-op + + pub fn set_alarm(when: bigtime_t, flags: u32) -> bigtime_t; + pub fn debugger(message: *const c_char); + pub fn disable_debugger(state: c_int) -> c_int; + + pub fn get_system_info(info: *mut system_info) -> status_t; + pub fn _get_cpu_info_etc( + firstCPU: u32, + cpuCount: u32, + info: *mut cpu_info, + size: size_t, + ) -> status_t; + pub fn get_cpu_topology_info( + topologyInfos: *mut cpu_topology_node_info, + topologyInfoCount: *mut u32, + ) -> status_t; + pub fn is_computer_on() -> i32; + pub fn is_computer_on_fire() -> c_double; + pub fn send_signal(threadID: thread_id, signal: c_uint) -> c_int; + pub fn set_signal_stack(base: *mut c_void, size: size_t); + + pub fn wait_for_objects(infos: *mut object_wait_info, numInfos: c_int) -> ssize_t; + pub fn wait_for_objects_etc( + infos: *mut object_wait_info, + numInfos: c_int, + flags: u32, + timeout: bigtime_t, + ) -> ssize_t; + + // kernel/fs_attr.h + pub fn fs_read_attr( + fd: c_int, + attribute: *const c_char, + type_: u32, + pos: off_t, + buffer: *mut c_void, + readBytes: size_t, + ) -> ssize_t; + pub fn fs_write_attr( + fd: c_int, + attribute: *const c_char, + type_: u32, + pos: off_t, + buffer: *const c_void, + writeBytes: size_t, + ) -> ssize_t; + pub fn fs_remove_attr(fd: c_int, attribute: *const c_char) -> c_int; + pub fn fs_stat_attr(fd: c_int, attribute: *const c_char, attrInfo: *mut attr_info) -> c_int; + + pub fn fs_open_attr( + path: *const c_char, + attribute: *const c_char, + type_: u32, + openMode: c_int, + ) -> c_int; + pub fn fs_fopen_attr(fd: c_int, attribute: *const c_char, type_: u32, openMode: c_int) + -> c_int; + pub fn fs_close_attr(fd: c_int) -> c_int; + + pub fn fs_open_attr_dir(path: *const c_char) -> *mut crate::DIR; + pub fn fs_lopen_attr_dir(path: *const c_char) -> *mut crate::DIR; + pub fn fs_fopen_attr_dir(fd: c_int) -> *mut crate::DIR; + pub fn fs_close_attr_dir(dir: *mut crate::DIR) -> c_int; + pub fn fs_read_attr_dir(dir: *mut crate::DIR) -> *mut crate::dirent; + pub fn fs_rewind_attr_dir(dir: *mut crate::DIR); + + // kernel/fs_image.h + pub fn fs_create_index( + device: crate::dev_t, + name: *const c_char, + type_: u32, + flags: u32, + ) -> c_int; + pub fn fs_remove_index(device: crate::dev_t, name: *const c_char) -> c_int; + pub fn fs_stat_index( + device: crate::dev_t, + name: *const c_char, + indexInfo: *mut index_info, + ) -> c_int; + + pub fn fs_open_index_dir(device: crate::dev_t) -> *mut crate::DIR; + pub fn fs_close_index_dir(indexDirectory: *mut crate::DIR) -> c_int; + pub fn fs_read_index_dir(indexDirectory: *mut crate::DIR) -> *mut crate::dirent; + pub fn fs_rewind_index_dir(indexDirectory: *mut crate::DIR); + + // kernel/fs_info.h + pub fn dev_for_path(path: *const c_char) -> crate::dev_t; + pub fn next_dev(pos: *mut i32) -> crate::dev_t; + pub fn fs_stat_dev(dev: crate::dev_t, info: *mut fs_info) -> c_int; + + // kernel/fs_query.h + pub fn fs_open_query(device: crate::dev_t, query: *const c_char, flags: u32) + -> *mut crate::DIR; + pub fn fs_open_live_query( + device: crate::dev_t, + query: *const c_char, + flags: u32, + port: port_id, + token: i32, + ) -> *mut crate::DIR; + pub fn fs_close_query(d: *mut crate::DIR) -> c_int; + pub fn fs_read_query(d: *mut crate::DIR) -> *mut crate::dirent; + pub fn get_path_for_dirent(dent: *mut crate::dirent, buf: *mut c_char, len: size_t) + -> status_t; + + // kernel/fs_volume.h + pub fn fs_mount_volume( + where_: *const c_char, + device: *const c_char, + filesystem: *const c_char, + flags: u32, + parameters: *const c_char, + ) -> crate::dev_t; + pub fn fs_unmount_volume(path: *const c_char, flags: u32) -> status_t; + + // kernel/image.h + pub fn load_image( + argc: i32, + argv: *mut *const c_char, + environ: *mut *const c_char, + ) -> thread_id; + pub fn load_add_on(path: *const c_char) -> image_id; + pub fn unload_add_on(image: image_id) -> status_t; + pub fn get_image_symbol( + image: image_id, + name: *const c_char, + symbolType: i32, + symbolLocation: *mut *mut c_void, + ) -> status_t; + pub fn get_nth_image_symbol( + image: image_id, + n: i32, + nameBuffer: *mut c_char, + nameLength: *mut i32, + symbolType: *mut i32, + symbolLocation: *mut *mut c_void, + ) -> status_t; + pub fn clear_caches(address: *mut c_void, length: size_t, flags: u32); + pub fn _get_image_info(image: image_id, info: *mut image_info, size: size_t) -> status_t; + pub fn _get_next_image_info( + team: team_id, + cookie: *mut i32, + info: *mut image_info, + size: size_t, + ) -> status_t; + pub fn find_path( + codePointer: *const c_void, + baseDirectory: path_base_directory, + subPath: *const c_char, + pathBuffer: *mut c_char, + bufferSize: usize, + ) -> status_t; + pub fn find_path_etc( + codePointer: *const c_void, + dependency: *const c_char, + architecture: *const c_char, + baseDirectory: path_base_directory, + subPath: *const c_char, + flags: u32, + pathBuffer: *mut c_char, + bufferSize: size_t, + ) -> status_t; + pub fn find_path_for_path( + path: *const c_char, + baseDirectory: path_base_directory, + subPath: *const c_char, + pathBuffer: *mut c_char, + bufferSize: size_t, + ) -> status_t; + pub fn find_path_for_path_etc( + path: *const c_char, + dependency: *const c_char, + architecture: *const c_char, + baseDirectory: path_base_directory, + subPath: *const c_char, + flags: u32, + pathBuffer: *mut c_char, + bufferSize: size_t, + ) -> status_t; + pub fn find_paths( + baseDirectory: path_base_directory, + subPath: *const c_char, + _paths: *mut *mut *mut c_char, + pathCount: *mut size_t, + ) -> status_t; + pub fn find_paths_etc( + architecture: *const c_char, + baseDirectory: path_base_directory, + subPath: *const c_char, + flags: u32, + _paths: *mut *mut *mut c_char, + pathCount: *mut size_t, + ) -> status_t; + pub fn find_directory( + which: directory_which, + volume: crate::dev_t, + createIt: bool, + pathString: *mut c_char, + length: i32, + ) -> status_t; + + pub fn get_cpuid(info: *mut cpuid_info, eaxRegister: u32, cpuNum: u32) -> status_t; +} + +// The following functions are defined as macros in C/C++ +#[inline] +pub unsafe fn get_cpu_info(firstCPU: u32, cpuCount: u32, info: *mut cpu_info) -> status_t { + _get_cpu_info_etc(firstCPU, cpuCount, info, size_of::() as size_t) +} + +#[inline] +pub unsafe fn get_area_info(id: area_id, info: *mut area_info) -> status_t { + _get_area_info(id, info, size_of::() as usize) +} + +#[inline] +pub unsafe fn get_next_area_info( + team: team_id, + cookie: *mut isize, + info: *mut area_info, +) -> status_t { + _get_next_area_info(team, cookie, info, size_of::() as usize) +} + +#[inline] +pub unsafe fn get_port_info(port: port_id, buf: *mut port_info) -> status_t { + _get_port_info(port, buf, size_of::() as size_t) +} + +#[inline] +pub unsafe fn get_next_port_info( + port: port_id, + cookie: *mut i32, + portInfo: *mut port_info, +) -> status_t { + _get_next_port_info(port, cookie, portInfo, size_of::() as size_t) +} + +#[inline] +pub unsafe fn get_port_message_info_etc( + port: port_id, + info: *mut port_message_info, + flags: u32, + timeout: bigtime_t, +) -> status_t { + _get_port_message_info_etc( + port, + info, + size_of::() as size_t, + flags, + timeout, + ) +} + +#[inline] +pub unsafe fn get_sem_info(id: sem_id, info: *mut sem_info) -> status_t { + _get_sem_info(id, info, size_of::() as size_t) +} + +#[inline] +pub unsafe fn get_next_sem_info(team: team_id, cookie: *mut i32, info: *mut sem_info) -> status_t { + _get_next_sem_info(team, cookie, info, size_of::() as size_t) +} + +#[inline] +pub unsafe fn get_team_info(team: team_id, info: *mut team_info) -> status_t { + _get_team_info(team, info, size_of::() as size_t) +} + +#[inline] +pub unsafe fn get_next_team_info(cookie: *mut i32, info: *mut team_info) -> status_t { + _get_next_team_info(cookie, info, size_of::() as size_t) +} + +#[inline] +pub unsafe fn get_team_usage_info(team: team_id, who: i32, info: *mut team_usage_info) -> status_t { + _get_team_usage_info(team, who, info, size_of::() as size_t) +} + +#[inline] +pub unsafe fn get_thread_info(id: thread_id, info: *mut thread_info) -> status_t { + _get_thread_info(id, info, size_of::() as size_t) +} + +#[inline] +pub unsafe fn get_next_thread_info( + team: team_id, + cookie: *mut i32, + info: *mut thread_info, +) -> status_t { + _get_next_thread_info(team, cookie, info, size_of::() as size_t) +} + +// kernel/image.h +#[inline] +pub unsafe fn get_image_info(image: image_id, info: *mut image_info) -> status_t { + _get_image_info(image, info, size_of::() as size_t) +} + +#[inline] +pub unsafe fn get_next_image_info( + team: team_id, + cookie: *mut i32, + info: *mut image_info, +) -> status_t { + _get_next_image_info(team, cookie, info, size_of::() as size_t) +} diff --git a/vendor/libc/src/unix/haiku/x86_64.rs b/vendor/libc/src/unix/haiku/x86_64.rs new file mode 100644 index 00000000000000..16e2612ed760d1 --- /dev/null +++ b/vendor/libc/src/unix/haiku/x86_64.rs @@ -0,0 +1,208 @@ +use crate::prelude::*; + +s_no_extra_traits! { + pub struct fpu_state { + pub control: c_ushort, + pub status: c_ushort, + pub tag: c_ushort, + pub opcode: c_ushort, + pub rip: c_ulong, + pub rdp: c_ulong, + pub mxcsr: c_uint, + pub mscsr_mask: c_uint, + pub _fpreg: [[c_uchar; 8]; 16], + pub _xmm: [[c_uchar; 16]; 16], + pub _reserved_416_511: [c_uchar; 96], + } + + pub struct xstate_hdr { + pub bv: c_ulong, + pub xcomp_bv: c_ulong, + pub _reserved: [c_uchar; 48], + } + + pub struct savefpu { + pub fp_fxsave: fpu_state, + pub fp_xstate: xstate_hdr, + pub _fp_ymm: [[c_uchar; 16]; 16], + } + + pub struct mcontext_t { + pub rax: c_ulong, + pub rbx: c_ulong, + pub rcx: c_ulong, + pub rdx: c_ulong, + pub rdi: c_ulong, + pub rsi: c_ulong, + pub rbp: c_ulong, + pub r8: c_ulong, + pub r9: c_ulong, + pub r10: c_ulong, + pub r11: c_ulong, + pub r12: c_ulong, + pub r13: c_ulong, + pub r14: c_ulong, + pub r15: c_ulong, + pub rsp: c_ulong, + pub rip: c_ulong, + pub rflags: c_ulong, + pub fpu: savefpu, + } + + pub struct ucontext_t { + pub uc_link: *mut ucontext_t, + pub uc_sigmask: crate::sigset_t, + pub uc_stack: crate::stack_t, + pub uc_mcontext: mcontext_t, + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for fpu_state { + fn eq(&self, other: &fpu_state) -> bool { + self.control == other.control + && self.status == other.status + && self.tag == other.tag + && self.opcode == other.opcode + && self.rip == other.rip + && self.rdp == other.rdp + && self.mxcsr == other.mxcsr + && self.mscsr_mask == other.mscsr_mask + && self + ._fpreg + .iter() + .zip(other._fpreg.iter()) + .all(|(a, b)| a == b) + && self._xmm.iter().zip(other._xmm.iter()).all(|(a, b)| a == b) + && self + ._reserved_416_511 + .iter() + .zip(other._reserved_416_511.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for fpu_state {} + impl hash::Hash for fpu_state { + fn hash(&self, state: &mut H) { + self.control.hash(state); + self.status.hash(state); + self.tag.hash(state); + self.opcode.hash(state); + self.rip.hash(state); + self.rdp.hash(state); + self.mxcsr.hash(state); + self.mscsr_mask.hash(state); + self._fpreg.hash(state); + self._xmm.hash(state); + self._reserved_416_511.hash(state); + } + } + + impl PartialEq for xstate_hdr { + fn eq(&self, other: &xstate_hdr) -> bool { + self.bv == other.bv + && self.xcomp_bv == other.xcomp_bv + && self + ._reserved + .iter() + .zip(other._reserved.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for xstate_hdr {} + impl hash::Hash for xstate_hdr { + fn hash(&self, state: &mut H) { + self.bv.hash(state); + self.xcomp_bv.hash(state); + self._reserved.hash(state); + } + } + + impl PartialEq for savefpu { + fn eq(&self, other: &savefpu) -> bool { + self.fp_fxsave == other.fp_fxsave + && self.fp_xstate == other.fp_xstate + && self + ._fp_ymm + .iter() + .zip(other._fp_ymm.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for savefpu {} + impl hash::Hash for savefpu { + fn hash(&self, state: &mut H) { + self.fp_fxsave.hash(state); + self.fp_xstate.hash(state); + self._fp_ymm.hash(state); + } + } + + impl PartialEq for mcontext_t { + fn eq(&self, other: &mcontext_t) -> bool { + self.rax == other.rax + && self.rbx == other.rbx + && self.rbx == other.rbx + && self.rcx == other.rcx + && self.rdx == other.rdx + && self.rdi == other.rdi + && self.rsi == other.rsi + && self.r8 == other.r8 + && self.r9 == other.r9 + && self.r10 == other.r10 + && self.r11 == other.r11 + && self.r12 == other.r12 + && self.r13 == other.r13 + && self.r14 == other.r14 + && self.r15 == other.r15 + && self.rsp == other.rsp + && self.rip == other.rip + && self.rflags == other.rflags + && self.fpu == other.fpu + } + } + impl Eq for mcontext_t {} + impl hash::Hash for mcontext_t { + fn hash(&self, state: &mut H) { + self.rax.hash(state); + self.rbx.hash(state); + self.rcx.hash(state); + self.rdx.hash(state); + self.rdi.hash(state); + self.rsi.hash(state); + self.rbp.hash(state); + self.r8.hash(state); + self.r9.hash(state); + self.r10.hash(state); + self.r11.hash(state); + self.r12.hash(state); + self.r13.hash(state); + self.r14.hash(state); + self.r15.hash(state); + self.rsp.hash(state); + self.rip.hash(state); + self.rflags.hash(state); + self.fpu.hash(state); + } + } + + impl PartialEq for ucontext_t { + fn eq(&self, other: &ucontext_t) -> bool { + self.uc_link == other.uc_link + && self.uc_sigmask == other.uc_sigmask + && self.uc_stack == other.uc_stack + && self.uc_mcontext == other.uc_mcontext + } + } + impl Eq for ucontext_t {} + impl hash::Hash for ucontext_t { + fn hash(&self, state: &mut H) { + self.uc_link.hash(state); + self.uc_sigmask.hash(state); + self.uc_stack.hash(state); + self.uc_mcontext.hash(state); + } + } + } +} diff --git a/vendor/libc/src/unix/hurd/b32.rs b/vendor/libc/src/unix/hurd/b32.rs new file mode 100644 index 00000000000000..e706789006dbaa --- /dev/null +++ b/vendor/libc/src/unix/hurd/b32.rs @@ -0,0 +1,92 @@ +use crate::prelude::*; + +pub type __int64_t = c_longlong; +pub type __uint64_t = c_ulonglong; + +pub type int_fast16_t = c_int; +pub type int_fast32_t = c_int; +pub type int_fast64_t = c_longlong; +pub type uint_fast16_t = c_uint; +pub type uint_fast32_t = c_uint; +pub type uint_fast64_t = c_ulonglong; + +pub type __quad_t = c_longlong; +pub type __u_quad_t = c_ulonglong; +pub type __intmax_t = c_longlong; +pub type __uintmax_t = c_ulonglong; + +pub type __squad_type = crate::__int64_t; +pub type __uquad_type = crate::__uint64_t; +pub type __sword_type = c_int; +pub type __uword_type = c_uint; +pub type __slong32_type = c_long; +pub type __ulong32_type = c_ulong; +pub type __s64_type = crate::__int64_t; +pub type __u64_type = crate::__uint64_t; + +pub type __ipc_pid_t = c_ushort; + +pub type Elf32_Half = u16; +pub type Elf32_Word = u32; +pub type Elf32_Off = u32; +pub type Elf32_Addr = u32; +pub type Elf32_Section = u16; + +pub type Elf_Addr = crate::Elf32_Addr; +pub type Elf_Half = crate::Elf32_Half; +pub type Elf_Ehdr = crate::Elf32_Ehdr; +pub type Elf_Phdr = crate::Elf32_Phdr; +pub type Elf_Shdr = crate::Elf32_Shdr; +pub type Elf_Sym = crate::Elf32_Sym; + +s! { + pub struct Elf32_Ehdr { + pub e_ident: [c_uchar; 16], + pub e_type: Elf32_Half, + pub e_machine: Elf32_Half, + pub e_version: Elf32_Word, + pub e_entry: Elf32_Addr, + pub e_phoff: Elf32_Off, + pub e_shoff: Elf32_Off, + pub e_flags: Elf32_Word, + pub e_ehsize: Elf32_Half, + pub e_phentsize: Elf32_Half, + pub e_phnum: Elf32_Half, + pub e_shentsize: Elf32_Half, + pub e_shnum: Elf32_Half, + pub e_shstrndx: Elf32_Half, + } + + pub struct Elf32_Shdr { + pub sh_name: Elf32_Word, + pub sh_type: Elf32_Word, + pub sh_flags: Elf32_Word, + pub sh_addr: Elf32_Addr, + pub sh_offset: Elf32_Off, + pub sh_size: Elf32_Word, + pub sh_link: Elf32_Word, + pub sh_info: Elf32_Word, + pub sh_addralign: Elf32_Word, + pub sh_entsize: Elf32_Word, + } + + pub struct Elf32_Sym { + pub st_name: Elf32_Word, + pub st_value: Elf32_Addr, + pub st_size: Elf32_Word, + pub st_info: c_uchar, + pub st_other: c_uchar, + pub st_shndx: Elf32_Section, + } + + pub struct Elf32_Phdr { + pub p_type: crate::Elf32_Word, + pub p_offset: crate::Elf32_Off, + pub p_vaddr: crate::Elf32_Addr, + pub p_paddr: crate::Elf32_Addr, + pub p_filesz: crate::Elf32_Word, + pub p_memsz: crate::Elf32_Word, + pub p_flags: crate::Elf32_Word, + pub p_align: crate::Elf32_Word, + } +} diff --git a/vendor/libc/src/unix/hurd/b64.rs b/vendor/libc/src/unix/hurd/b64.rs new file mode 100644 index 00000000000000..a44428c575adfc --- /dev/null +++ b/vendor/libc/src/unix/hurd/b64.rs @@ -0,0 +1,94 @@ +use crate::prelude::*; + +pub type __int64_t = c_long; +pub type __uint64_t = c_ulong; + +pub type int_fast16_t = c_long; +pub type int_fast32_t = c_long; +pub type int_fast64_t = c_long; +pub type uint_fast16_t = c_ulong; +pub type uint_fast32_t = c_ulong; +pub type uint_fast64_t = c_ulong; + +pub type __quad_t = c_long; +pub type __u_quad_t = c_ulong; +pub type __intmax_t = c_long; +pub type __uintmax_t = c_ulong; + +pub type __squad_type = c_long; +pub type __uquad_type = c_ulong; +pub type __sword_type = c_long; +pub type __uword_type = c_ulong; +pub type __slong32_type = c_int; +pub type __ulong32_type = c_uint; +pub type __s64_type = c_long; +pub type __u64_type = c_ulong; + +pub type __ipc_pid_t = c_int; + +pub type Elf64_Half = u16; +pub type Elf64_Word = u32; +pub type Elf64_Off = u64; +pub type Elf64_Addr = u64; +pub type Elf64_Xword = u64; +pub type Elf64_Sxword = i64; +pub type Elf64_Section = u16; + +pub type Elf_Addr = crate::Elf64_Addr; +pub type Elf_Half = crate::Elf64_Half; +pub type Elf_Ehdr = crate::Elf64_Ehdr; +pub type Elf_Phdr = crate::Elf64_Phdr; +pub type Elf_Shdr = crate::Elf64_Shdr; +pub type Elf_Sym = crate::Elf64_Sym; + +s! { + pub struct Elf64_Ehdr { + pub e_ident: [c_uchar; 16], + pub e_type: Elf64_Half, + pub e_machine: Elf64_Half, + pub e_version: Elf64_Word, + pub e_entry: Elf64_Addr, + pub e_phoff: Elf64_Off, + pub e_shoff: Elf64_Off, + pub e_flags: Elf64_Word, + pub e_ehsize: Elf64_Half, + pub e_phentsize: Elf64_Half, + pub e_phnum: Elf64_Half, + pub e_shentsize: Elf64_Half, + pub e_shnum: Elf64_Half, + pub e_shstrndx: Elf64_Half, + } + + pub struct Elf64_Shdr { + pub sh_name: Elf64_Word, + pub sh_type: Elf64_Word, + pub sh_flags: Elf64_Xword, + pub sh_addr: Elf64_Addr, + pub sh_offset: Elf64_Off, + pub sh_size: Elf64_Xword, + pub sh_link: Elf64_Word, + pub sh_info: Elf64_Word, + pub sh_addralign: Elf64_Xword, + pub sh_entsize: Elf64_Xword, + } + + pub struct Elf64_Sym { + pub st_name: Elf64_Word, + pub st_info: c_uchar, + pub st_other: c_uchar, + pub st_shndx: Elf64_Section, + pub st_value: Elf64_Addr, + pub st_size: Elf64_Xword, + } + + pub struct Elf64_Phdr { + pub p_type: crate::Elf64_Word, + pub p_flags: crate::Elf64_Word, + pub p_offset: crate::Elf64_Off, + pub p_vaddr: crate::Elf64_Addr, + pub p_paddr: crate::Elf64_Addr, + pub p_filesz: crate::Elf64_Xword, + pub p_memsz: crate::Elf64_Xword, + pub p_align: crate::Elf64_Xword, + } +} diff --git a/vendor/libc/src/unix/hurd/mod.rs b/vendor/libc/src/unix/hurd/mod.rs new file mode 100644 index 00000000000000..24e9fe56f392dc --- /dev/null +++ b/vendor/libc/src/unix/hurd/mod.rs @@ -0,0 +1,4623 @@ +#![allow(dead_code)] + +use crate::c_schar; +use crate::prelude::*; + +// types +pub type __s16_type = c_short; +pub type __u16_type = c_ushort; +pub type __s32_type = c_int; +pub type __u32_type = c_uint; +pub type __slongword_type = c_long; +pub type __ulongword_type = c_ulong; + +pub type __u_char = c_uchar; +pub type __u_short = c_ushort; +pub type __u_int = c_uint; +pub type __u_long = c_ulong; +pub type __int8_t = c_schar; +pub type __uint8_t = c_uchar; +pub type __int16_t = c_short; +pub type __uint16_t = c_ushort; +pub type __int32_t = c_int; +pub type __uint32_t = c_uint; +pub type __int_least8_t = __int8_t; +pub type __uint_least8_t = __uint8_t; +pub type __int_least16_t = __int16_t; +pub type __uint_least16_t = __uint16_t; +pub type __int_least32_t = __int32_t; +pub type __uint_least32_t = __uint32_t; +pub type __int_least64_t = __int64_t; +pub type __uint_least64_t = __uint64_t; + +pub type __dev_t = __uword_type; +pub type __uid_t = __u32_type; +pub type __gid_t = __u32_type; +pub type __ino_t = __ulongword_type; +pub type __ino64_t = __uquad_type; +pub type __mode_t = __u32_type; +pub type __nlink_t = __uword_type; +pub type __off_t = __slongword_type; +pub type __off64_t = __squad_type; +pub type __pid_t = __s32_type; +pub type __rlim_t = __ulongword_type; +pub type __rlim64_t = __uquad_type; +pub type __blkcnt_t = __slongword_type; +pub type __blkcnt64_t = __squad_type; +pub type __fsblkcnt_t = __ulongword_type; +pub type __fsblkcnt64_t = __uquad_type; +pub type __fsfilcnt_t = __ulongword_type; +pub type __fsfilcnt64_t = __uquad_type; +pub type __fsword_t = __sword_type; +pub type __id_t = __u32_type; +pub type __clock_t = __slongword_type; +pub type __time_t = __slongword_type; +pub type __useconds_t = __u32_type; +pub type __suseconds_t = __slongword_type; +pub type __suseconds64_t = __squad_type; +pub type __daddr_t = __s32_type; +pub type __key_t = __s32_type; +pub type __clockid_t = __s32_type; +pub type __timer_t = __uword_type; +pub type __blksize_t = __slongword_type; +pub type __fsid_t = __uquad_type; +pub type __ssize_t = __sword_type; +pub type __syscall_slong_t = __slongword_type; +pub type __syscall_ulong_t = __ulongword_type; +pub type __cpu_mask = __ulongword_type; + +pub type __loff_t = __off64_t; +pub type __caddr_t = *mut c_char; +pub type __intptr_t = __sword_type; +pub type __ptrdiff_t = __sword_type; +pub type __socklen_t = __u32_type; +pub type __sig_atomic_t = c_int; +pub type __time64_t = __int64_t; +pub type wchar_t = c_int; +pub type wint_t = c_uint; +pub type gid_t = __gid_t; +pub type uid_t = __uid_t; +pub type off_t = __off_t; +pub type off64_t = __off64_t; +pub type useconds_t = __useconds_t; +pub type pid_t = __pid_t; +pub type socklen_t = __socklen_t; + +pub type in_addr_t = u32; + +pub type _Float32 = f32; +pub type _Float64 = f64; +pub type _Float32x = f64; +pub type _Float64x = f64; + +pub type __locale_t = *mut __locale_struct; +pub type locale_t = __locale_t; + +pub type u_char = __u_char; +pub type u_short = __u_short; +pub type u_int = __u_int; +pub type u_long = __u_long; +pub type quad_t = __quad_t; +pub type u_quad_t = __u_quad_t; +pub type fsid_t = __fsid_t; +pub type loff_t = __loff_t; +pub type ino_t = __ino_t; +pub type ino64_t = __ino64_t; +pub type dev_t = __dev_t; +pub type mode_t = __mode_t; +pub type nlink_t = __nlink_t; +pub type id_t = __id_t; +pub type daddr_t = __daddr_t; +pub type caddr_t = __caddr_t; +pub type key_t = __key_t; +pub type clock_t = __clock_t; +pub type clockid_t = __clockid_t; +pub type time_t = __time_t; +pub type timer_t = __timer_t; +pub type suseconds_t = __suseconds_t; +pub type ulong = c_ulong; +pub type ushort = c_ushort; +pub type uint = c_uint; +pub type u_int8_t = __uint8_t; +pub type u_int16_t = __uint16_t; +pub type u_int32_t = __uint32_t; +pub type u_int64_t = __uint64_t; +pub type register_t = c_int; +pub type __sigset_t = c_ulong; +pub type sigset_t = __sigset_t; + +pub type __fd_mask = c_long; +pub type fd_mask = __fd_mask; +pub type blksize_t = __blksize_t; +pub type blkcnt_t = __blkcnt_t; +pub type fsblkcnt_t = __fsblkcnt_t; +pub type fsfilcnt_t = __fsfilcnt_t; +pub type blkcnt64_t = __blkcnt64_t; +pub type fsblkcnt64_t = __fsblkcnt64_t; +pub type fsfilcnt64_t = __fsfilcnt64_t; + +pub type __pthread_spinlock_t = c_int; +pub type __tss_t = c_int; +pub type __thrd_t = c_long; +pub type __pthread_t = c_long; +pub type pthread_t = __pthread_t; +pub type __pthread_process_shared = c_uint; +pub type __pthread_inheritsched = c_uint; +pub type __pthread_contentionscope = c_uint; +pub type __pthread_detachstate = c_uint; +pub type pthread_attr_t = __pthread_attr; +pub type __pthread_mutex_protocol = c_uint; +pub type __pthread_mutex_type = c_uint; +pub type __pthread_mutex_robustness = c_uint; +pub type pthread_mutexattr_t = __pthread_mutexattr; +pub type pthread_mutex_t = __pthread_mutex; +pub type pthread_condattr_t = __pthread_condattr; +pub type pthread_cond_t = __pthread_cond; +pub type pthread_spinlock_t = __pthread_spinlock_t; +pub type pthread_rwlockattr_t = __pthread_rwlockattr; +pub type pthread_rwlock_t = __pthread_rwlock; +pub type pthread_barrierattr_t = __pthread_barrierattr; +pub type pthread_barrier_t = __pthread_barrier; +pub type __pthread_key = c_int; +pub type pthread_key_t = __pthread_key; +pub type pthread_once_t = __pthread_once; + +pub type __rlimit_resource = c_uint; +pub type __rlimit_resource_t = __rlimit_resource; +pub type rlim_t = __rlim_t; +pub type rlim64_t = __rlim64_t; + +pub type __rusage_who = c_int; + +pub type __priority_which = c_uint; + +pub type sa_family_t = c_uchar; + +pub type in_port_t = u16; + +pub type __sigval_t = crate::sigval; + +pub type sigevent_t = sigevent; + +pub type nfds_t = c_ulong; + +pub type tcflag_t = c_uint; +pub type cc_t = c_uchar; +pub type speed_t = c_int; + +pub type sigval_t = crate::sigval; + +pub type greg_t = c_int; +pub type gregset_t = [greg_t; 19usize]; + +pub type __ioctl_dir = c_uint; + +pub type __ioctl_datum = c_uint; + +pub type __error_t_codes = c_int; + +pub type int_least8_t = __int_least8_t; +pub type int_least16_t = __int_least16_t; +pub type int_least32_t = __int_least32_t; +pub type int_least64_t = __int_least64_t; +pub type uint_least8_t = __uint_least8_t; +pub type uint_least16_t = __uint_least16_t; +pub type uint_least32_t = __uint_least32_t; +pub type uint_least64_t = __uint_least64_t; +pub type int_fast8_t = c_schar; +pub type uint_fast8_t = c_uchar; +pub type intmax_t = __intmax_t; +pub type uintmax_t = __uintmax_t; + +pub type tcp_seq = u32; + +pub type tcp_ca_state = c_uint; + +pub type idtype_t = c_uint; + +pub type mqd_t = c_int; + +pub type Lmid_t = c_long; + +pub type regoff_t = c_int; + +pub type nl_item = c_int; + +pub type iconv_t = *mut c_void; + +#[derive(Debug)] +pub enum fpos64_t {} // FIXME(hurd): fill this out with a struct +impl Copy for fpos64_t {} +impl Clone for fpos64_t { + fn clone(&self) -> fpos64_t { + *self + } +} + +#[derive(Debug)] +pub enum timezone {} +impl Copy for timezone {} +impl Clone for timezone { + fn clone(&self) -> timezone { + *self + } +} + +// structs +s! { + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct ip_mreqn { + pub imr_multiaddr: in_addr, + pub imr_address: in_addr, + pub imr_ifindex: c_int, + } + + pub struct ip_mreq_source { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + pub imr_sourceaddr: in_addr, + } + + pub struct sockaddr { + pub sa_len: c_uchar, + pub sa_family: sa_family_t, + pub sa_data: [c_char; 14usize], + } + + pub struct in_addr { + pub s_addr: in_addr_t, + } + + pub struct sockaddr_in { + pub sin_len: c_uchar, + pub sin_family: sa_family_t, + pub sin_port: in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [c_uchar; 8usize], + } + + pub struct sockaddr_in6 { + pub sin6_len: c_uchar, + pub sin6_family: sa_family_t, + pub sin6_port: in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: u32, + } + + pub struct sockaddr_un { + pub sun_len: c_uchar, + pub sun_family: sa_family_t, + pub sun_path: [c_char; 108usize], + } + + pub struct sockaddr_storage { + pub ss_len: c_uchar, + pub ss_family: sa_family_t, + pub __ss_padding: [c_char; 122usize], + pub __ss_align: __uint32_t, + } + + pub struct sockaddr_at { + pub _address: u8, + } + + pub struct sockaddr_ax25 { + pub _address: u8, + } + + pub struct sockaddr_x25 { + pub _address: u8, + } + + pub struct sockaddr_dl { + pub _address: u8, + } + pub struct sockaddr_eon { + pub _address: u8, + } + pub struct sockaddr_inarp { + pub _address: u8, + } + + pub struct sockaddr_ipx { + pub _address: u8, + } + pub struct sockaddr_iso { + pub _address: u8, + } + + pub struct sockaddr_ns { + pub _address: u8, + } + + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: crate::socklen_t, + pub ai_addr: *mut sockaddr, + pub ai_canonname: *mut c_char, + pub ai_next: *mut addrinfo, + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: c_int, + pub msg_control: *mut c_void, + pub msg_controllen: crate::socklen_t, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + pub cmsg_len: crate::socklen_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct dirent { + pub d_ino: __ino_t, + pub d_reclen: c_ushort, + pub d_type: c_uchar, + pub d_namlen: c_uchar, + pub d_name: [c_char; 1usize], + } + + pub struct dirent64 { + pub d_ino: __ino64_t, + pub d_reclen: c_ushort, + pub d_type: c_uchar, + pub d_namlen: c_uchar, + pub d_name: [c_char; 1usize], + } + + pub struct fd_set { + pub fds_bits: [__fd_mask; 8usize], + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_cc: [crate::cc_t; 20usize], + pub __ispeed: crate::speed_t, + pub __ospeed: crate::speed_t, + } + + pub struct mallinfo { + pub arena: c_int, + pub ordblks: c_int, + pub smblks: c_int, + pub hblks: c_int, + pub hblkhd: c_int, + pub usmblks: c_int, + pub fsmblks: c_int, + pub uordblks: c_int, + pub fordblks: c_int, + pub keepcost: c_int, + } + + pub struct mallinfo2 { + pub arena: size_t, + pub ordblks: size_t, + pub smblks: size_t, + pub hblks: size_t, + pub hblkhd: size_t, + pub usmblks: size_t, + pub fsmblks: size_t, + pub uordblks: size_t, + pub fordblks: size_t, + pub keepcost: size_t, + } + + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: __sigset_t, + pub sa_flags: c_int, + } + + pub struct sigevent { + pub sigev_value: crate::sigval, + pub sigev_signo: c_int, + pub sigev_notify: c_int, + __unused1: *mut c_void, //actually a function pointer + pub sigev_notify_attributes: *mut pthread_attr_t, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + pub si_pid: __pid_t, + pub si_uid: __uid_t, + pub si_addr: *mut c_void, + pub si_status: c_int, + pub si_band: c_long, + pub si_value: crate::sigval, + } + + pub struct timespec { + pub tv_sec: __time_t, + pub tv_nsec: __syscall_slong_t, + } + + pub struct __timeval { + pub tv_sec: i32, + pub tv_usec: i32, + } + + pub struct __locale_data { + pub _address: u8, + } + + pub struct stat { + pub st_fstype: c_int, + pub st_dev: __fsid_t, /* Actually st_fsid */ + pub st_ino: __ino_t, + pub st_gen: c_uint, + pub st_rdev: __dev_t, + pub st_mode: __mode_t, + pub st_nlink: __nlink_t, + pub st_uid: __uid_t, + pub st_gid: __gid_t, + pub st_size: __off_t, + pub st_atim: crate::timespec, + pub st_mtim: crate::timespec, + pub st_ctim: crate::timespec, + pub st_blksize: __blksize_t, + pub st_blocks: __blkcnt_t, + pub st_author: __uid_t, + pub st_flags: c_uint, + pub st_spare: [c_int; 11usize], + } + + pub struct stat64 { + pub st_fstype: c_int, + pub st_dev: __fsid_t, /* Actually st_fsid */ + pub st_ino: __ino64_t, + pub st_gen: c_uint, + pub st_rdev: __dev_t, + pub st_mode: __mode_t, + pub st_nlink: __nlink_t, + pub st_uid: __uid_t, + pub st_gid: __gid_t, + pub st_size: __off64_t, + pub st_atim: crate::timespec, + pub st_mtim: crate::timespec, + pub st_ctim: crate::timespec, + pub st_blksize: __blksize_t, + pub st_blocks: __blkcnt64_t, + pub st_author: __uid_t, + pub st_flags: c_uint, + pub st_spare: [c_int; 8usize], + } + + pub struct statx { + pub stx_mask: u32, + pub stx_blksize: u32, + pub stx_attributes: u64, + pub stx_nlink: u32, + pub stx_uid: u32, + pub stx_gid: u32, + pub stx_mode: u16, + __statx_pad1: [u16; 1], + pub stx_ino: u64, + pub stx_size: u64, + pub stx_blocks: u64, + pub stx_attributes_mask: u64, + pub stx_atime: crate::statx_timestamp, + pub stx_btime: crate::statx_timestamp, + pub stx_ctime: crate::statx_timestamp, + pub stx_mtime: crate::statx_timestamp, + pub stx_rdev_major: u32, + pub stx_rdev_minor: u32, + pub stx_dev_major: u32, + pub stx_dev_minor: u32, + __statx_pad2: [u64; 14], + } + + pub struct statx_timestamp { + pub tv_sec: i64, + pub tv_nsec: u32, + pub __statx_timestamp_pad1: [i32; 1], + } + + pub struct statfs { + pub f_type: c_uint, + pub f_bsize: c_ulong, + pub f_blocks: __fsblkcnt_t, + pub f_bfree: __fsblkcnt_t, + pub f_bavail: __fsblkcnt_t, + pub f_files: __fsblkcnt_t, + pub f_ffree: __fsblkcnt_t, + pub f_fsid: __fsid_t, + pub f_namelen: c_ulong, + pub f_favail: __fsfilcnt_t, + pub f_frsize: c_ulong, + pub f_flag: c_ulong, + pub f_spare: [c_uint; 3usize], + } + + pub struct statfs64 { + pub f_type: c_uint, + pub f_bsize: c_ulong, + pub f_blocks: __fsblkcnt64_t, + pub f_bfree: __fsblkcnt64_t, + pub f_bavail: __fsblkcnt64_t, + pub f_files: __fsblkcnt64_t, + pub f_ffree: __fsblkcnt64_t, + pub f_fsid: __fsid_t, + pub f_namelen: c_ulong, + pub f_favail: __fsfilcnt64_t, + pub f_frsize: c_ulong, + pub f_flag: c_ulong, + pub f_spare: [c_uint; 3usize], + } + + pub struct statvfs { + pub __f_type: c_uint, + pub f_bsize: c_ulong, + pub f_blocks: __fsblkcnt_t, + pub f_bfree: __fsblkcnt_t, + pub f_bavail: __fsblkcnt_t, + pub f_files: __fsfilcnt_t, + pub f_ffree: __fsfilcnt_t, + pub f_fsid: __fsid_t, + pub f_namemax: c_ulong, + pub f_favail: __fsfilcnt_t, + pub f_frsize: c_ulong, + pub f_flag: c_ulong, + pub f_spare: [c_uint; 3usize], + } + + pub struct statvfs64 { + pub __f_type: c_uint, + pub f_bsize: c_ulong, + pub f_blocks: __fsblkcnt64_t, + pub f_bfree: __fsblkcnt64_t, + pub f_bavail: __fsblkcnt64_t, + pub f_files: __fsfilcnt64_t, + pub f_ffree: __fsfilcnt64_t, + pub f_fsid: __fsid_t, + pub f_namemax: c_ulong, + pub f_favail: __fsfilcnt64_t, + pub f_frsize: c_ulong, + pub f_flag: c_ulong, + pub f_spare: [c_uint; 3usize], + } + + pub struct aiocb { + pub aio_fildes: c_int, + pub aio_lio_opcode: c_int, + pub aio_reqprio: c_int, + pub aio_buf: *mut c_void, + pub aio_nbytes: size_t, + pub aio_sigevent: crate::sigevent, + __next_prio: *mut aiocb, + __abs_prio: c_int, + __policy: c_int, + __error_code: c_int, + __return_value: ssize_t, + pub aio_offset: off_t, + #[cfg(all(not(target_arch = "x86_64"), target_pointer_width = "32"))] + __unused1: [c_char; 4], + __glibc_reserved: [c_char; 32], + } + + pub struct mq_attr { + pub mq_flags: c_long, + pub mq_maxmsg: c_long, + pub mq_msgsize: c_long, + pub mq_curmsgs: c_long, + } + + pub struct __exit_status { + pub e_termination: c_short, + pub e_exit: c_short, + } + + #[cfg_attr(target_pointer_width = "32", repr(align(4)))] + #[cfg_attr(target_pointer_width = "64", repr(align(8)))] + pub struct sem_t { + __size: [c_char; 20usize], + } + + pub struct __pthread { + pub _address: u8, + } + + pub struct __pthread_mutexattr { + pub __prioceiling: c_int, + pub __protocol: __pthread_mutex_protocol, + pub __pshared: __pthread_process_shared, + pub __mutex_type: __pthread_mutex_type, + } + pub struct __pthread_mutex { + pub __lock: c_uint, + pub __owner_id: c_uint, + pub __cnt: c_uint, + pub __shpid: c_int, + pub __type: c_int, + pub __flags: c_int, + pub __reserved1: c_uint, + pub __reserved2: c_uint, + } + + pub struct __pthread_condattr { + pub __pshared: __pthread_process_shared, + pub __clock: __clockid_t, + } + + pub struct __pthread_rwlockattr { + pub __pshared: __pthread_process_shared, + } + + pub struct __pthread_barrierattr { + pub __pshared: __pthread_process_shared, + } + + pub struct __pthread_once { + pub __run: c_int, + pub __lock: __pthread_spinlock_t, + } + + pub struct __pthread_cond { + pub __lock: __pthread_spinlock_t, + pub __queue: *mut __pthread, + pub __attr: *mut __pthread_condattr, + pub __wrefs: c_uint, + pub __data: *mut c_void, + } + + pub struct __pthread_attr { + pub __schedparam: sched_param, + pub __stackaddr: *mut c_void, + pub __stacksize: size_t, + pub __guardsize: size_t, + pub __detachstate: __pthread_detachstate, + pub __inheritsched: __pthread_inheritsched, + pub __contentionscope: __pthread_contentionscope, + pub __schedpolicy: c_int, + } + + pub struct __pthread_rwlock { + pub __held: __pthread_spinlock_t, + pub __lock: __pthread_spinlock_t, + pub __readers: c_int, + pub __readerqueue: *mut __pthread, + pub __writerqueue: *mut __pthread, + pub __attr: *mut __pthread_rwlockattr, + pub __data: *mut c_void, + } + + pub struct __pthread_barrier { + pub __lock: __pthread_spinlock_t, + pub __queue: *mut __pthread, + pub __pending: c_uint, + pub __count: c_uint, + pub __attr: *mut __pthread_barrierattr, + pub __data: *mut c_void, + } + + pub struct seminfo { + pub semmap: c_int, + pub semmni: c_int, + pub semmns: c_int, + pub semmnu: c_int, + pub semmsl: c_int, + pub semopm: c_int, + pub semume: c_int, + pub semusz: c_int, + pub semvmx: c_int, + pub semaem: c_int, + } + + pub struct _IO_FILE { + _unused: [u8; 0], + } + + pub struct sched_param { + pub sched_priority: c_int, + } + + pub struct iovec { + pub iov_base: *mut c_void, + pub iov_len: size_t, + } + + pub struct passwd { + pub pw_name: *mut c_char, + pub pw_passwd: *mut c_char, + pub pw_uid: __uid_t, + pub pw_gid: __gid_t, + pub pw_gecos: *mut c_char, + pub pw_dir: *mut c_char, + pub pw_shell: *mut c_char, + } + + pub struct spwd { + pub sp_namp: *mut c_char, + pub sp_pwdp: *mut c_char, + pub sp_lstchg: c_long, + pub sp_min: c_long, + pub sp_max: c_long, + pub sp_warn: c_long, + pub sp_inact: c_long, + pub sp_expire: c_long, + pub sp_flag: c_ulong, + } + + pub struct itimerspec { + pub it_interval: crate::timespec, + pub it_value: crate::timespec, + } + + pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + pub tm_gmtoff: c_long, + pub tm_zone: *const c_char, + } + + pub struct lconv { + pub decimal_point: *mut c_char, + pub thousands_sep: *mut c_char, + pub grouping: *mut c_char, + pub int_curr_symbol: *mut c_char, + pub currency_symbol: *mut c_char, + pub mon_decimal_point: *mut c_char, + pub mon_thousands_sep: *mut c_char, + pub mon_grouping: *mut c_char, + pub positive_sign: *mut c_char, + pub negative_sign: *mut c_char, + pub int_frac_digits: c_char, + pub frac_digits: c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub p_sign_posn: c_char, + pub n_sign_posn: c_char, + pub int_p_cs_precedes: c_char, + pub int_p_sep_by_space: c_char, + pub int_n_cs_precedes: c_char, + pub int_n_sep_by_space: c_char, + pub int_p_sign_posn: c_char, + pub int_n_sign_posn: c_char, + } + + pub struct Dl_info { + pub dli_fname: *const c_char, + pub dli_fbase: *mut c_void, + pub dli_sname: *const c_char, + pub dli_saddr: *mut c_void, + } + + pub struct ifaddrs { + pub ifa_next: *mut ifaddrs, + pub ifa_name: *mut c_char, + pub ifa_flags: c_uint, + pub ifa_addr: *mut crate::sockaddr, + pub ifa_netmask: *mut crate::sockaddr, + pub ifa_ifu: *mut crate::sockaddr, // FIXME(union) This should be a union + pub ifa_data: *mut c_void, + } + + pub struct arpreq { + pub arp_pa: crate::sockaddr, + pub arp_ha: crate::sockaddr, + pub arp_flags: c_int, + pub arp_netmask: crate::sockaddr, + pub arp_dev: [c_char; 16], + } + + pub struct arpreq_old { + pub arp_pa: crate::sockaddr, + pub arp_ha: crate::sockaddr, + pub arp_flags: c_int, + pub arp_netmask: crate::sockaddr, + } + + pub struct arphdr { + pub ar_hrd: u16, + pub ar_pro: u16, + pub ar_hln: u8, + pub ar_pln: u8, + pub ar_op: u16, + } + + pub struct arpd_request { + pub req: c_ushort, + pub ip: u32, + pub dev: c_ulong, + pub stamp: c_ulong, + pub updated: c_ulong, + pub ha: [c_uchar; crate::MAX_ADDR_LEN], + } + + pub struct mmsghdr { + pub msg_hdr: crate::msghdr, + pub msg_len: c_uint, + } + + pub struct ifreq { + /// interface name, e.g. "en0" + pub ifr_name: [c_char; crate::IFNAMSIZ], + pub ifr_ifru: crate::sockaddr, + } + + pub struct __locale_struct { + pub __locales: [*mut __locale_data; 13usize], + pub __ctype_b: *const c_ushort, + pub __ctype_tolower: *const c_int, + pub __ctype_toupper: *const c_int, + pub __names: [*const c_char; 13usize], + } + + pub struct utsname { + pub sysname: [c_char; _UTSNAME_LENGTH], + pub nodename: [c_char; _UTSNAME_LENGTH], + pub release: [c_char; _UTSNAME_LENGTH], + pub version: [c_char; _UTSNAME_LENGTH], + pub machine: [c_char; _UTSNAME_LENGTH], + pub domainname: [c_char; _UTSNAME_LENGTH], + } + + pub struct rlimit64 { + pub rlim_cur: rlim64_t, + pub rlim_max: rlim64_t, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } + + pub struct dl_phdr_info { + pub dlpi_addr: Elf_Addr, + pub dlpi_name: *const c_char, + pub dlpi_phdr: *const Elf_Phdr, + pub dlpi_phnum: Elf_Half, + pub dlpi_adds: c_ulonglong, + pub dlpi_subs: c_ulonglong, + pub dlpi_tls_modid: size_t, + pub dlpi_tls_data: *mut c_void, + } + + pub struct flock { + #[cfg(target_pointer_width = "32")] + pub l_type: c_int, + #[cfg(target_pointer_width = "32")] + pub l_whence: c_int, + #[cfg(target_pointer_width = "64")] + pub l_type: c_short, + #[cfg(target_pointer_width = "64")] + pub l_whence: c_short, + pub l_start: __off_t, + pub l_len: __off_t, + pub l_pid: __pid_t, + } + + pub struct flock64 { + #[cfg(target_pointer_width = "32")] + pub l_type: c_int, + #[cfg(target_pointer_width = "32")] + pub l_whence: c_int, + #[cfg(target_pointer_width = "64")] + pub l_type: c_short, + #[cfg(target_pointer_width = "64")] + pub l_whence: c_short, + pub l_start: __off_t, + pub l_len: __off64_t, + pub l_pid: __pid_t, + } + + pub struct glob_t { + pub gl_pathc: size_t, + pub gl_pathv: *mut *mut c_char, + pub gl_offs: size_t, + pub gl_flags: c_int, + + __unused1: *mut c_void, + __unused2: *mut c_void, + __unused3: *mut c_void, + __unused4: *mut c_void, + __unused5: *mut c_void, + } + + pub struct glob64_t { + pub gl_pathc: size_t, + pub gl_pathv: *mut *mut c_char, + pub gl_offs: size_t, + pub gl_flags: c_int, + + __unused1: *mut c_void, + __unused2: *mut c_void, + __unused3: *mut c_void, + __unused4: *mut c_void, + __unused5: *mut c_void, + } + + pub struct regex_t { + __buffer: *mut c_void, + __allocated: size_t, + __used: size_t, + __syntax: c_ulong, + __fastmap: *mut c_char, + __translate: *mut c_char, + __re_nsub: size_t, + __bitfield: u8, + } + + pub struct cpu_set_t { + #[cfg(all(target_pointer_width = "32", not(target_arch = "x86_64")))] + bits: [u32; 32], + #[cfg(not(all(target_pointer_width = "32", not(target_arch = "x86_64"))))] + bits: [u64; 16], + } + + pub struct if_nameindex { + pub if_index: c_uint, + pub if_name: *mut c_char, + } + + // System V IPC + pub struct msginfo { + pub msgpool: c_int, + pub msgmap: c_int, + pub msgmax: c_int, + pub msgmnb: c_int, + pub msgmni: c_int, + pub msgssz: c_int, + pub msgtql: c_int, + pub msgseg: c_ushort, + } + + pub struct sembuf { + pub sem_num: c_ushort, + pub sem_op: c_short, + pub sem_flg: c_short, + } + + pub struct mntent { + pub mnt_fsname: *mut c_char, + pub mnt_dir: *mut c_char, + pub mnt_type: *mut c_char, + pub mnt_opts: *mut c_char, + pub mnt_freq: c_int, + pub mnt_passno: c_int, + } + + pub struct posix_spawn_file_actions_t { + __allocated: c_int, + __used: c_int, + __actions: *mut c_int, + __pad: [c_int; 16], + } + + pub struct posix_spawnattr_t { + __flags: c_short, + __pgrp: crate::pid_t, + __sd: crate::sigset_t, + __ss: crate::sigset_t, + __sp: crate::sched_param, + __policy: c_int, + __pad: [c_int; 16], + } + + pub struct regmatch_t { + pub rm_so: regoff_t, + pub rm_eo: regoff_t, + } + + pub struct option { + pub name: *const c_char, + pub has_arg: c_int, + pub flag: *mut c_int, + pub val: c_int, + } +} + +s_no_extra_traits! { + pub struct utmpx { + pub ut_type: c_short, + pub ut_pid: crate::pid_t, + pub ut_line: [c_char; __UT_LINESIZE], + pub ut_id: [c_char; 4], + + pub ut_user: [c_char; __UT_NAMESIZE], + pub ut_host: [c_char; __UT_HOSTSIZE], + pub ut_exit: __exit_status, + + #[cfg(any(all(target_pointer_width = "32", not(target_arch = "x86_64"))))] + pub ut_session: c_long, + #[cfg(any(all(target_pointer_width = "32", not(target_arch = "x86_64"))))] + pub ut_tv: crate::timeval, + + #[cfg(not(any(all(target_pointer_width = "32", not(target_arch = "x86_64")))))] + pub ut_session: i32, + #[cfg(not(any(all(target_pointer_width = "32", not(target_arch = "x86_64")))))] + pub ut_tv: __timeval, + + pub ut_addr_v6: [i32; 4], + __glibc_reserved: [c_char; 20], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for utmpx { + fn eq(&self, other: &utmpx) -> bool { + self.ut_type == other.ut_type + && self.ut_pid == other.ut_pid + && self.ut_line == other.ut_line + && self.ut_id == other.ut_id + && self.ut_user == other.ut_user + && self + .ut_host + .iter() + .zip(other.ut_host.iter()) + .all(|(a, b)| a == b) + && self.ut_exit == other.ut_exit + && self.ut_session == other.ut_session + && self.ut_tv == other.ut_tv + && self.ut_addr_v6 == other.ut_addr_v6 + && self.__glibc_reserved == other.__glibc_reserved + } + } + + impl Eq for utmpx {} + + impl hash::Hash for utmpx { + fn hash(&self, state: &mut H) { + self.ut_type.hash(state); + self.ut_pid.hash(state); + self.ut_line.hash(state); + self.ut_id.hash(state); + self.ut_user.hash(state); + self.ut_host.hash(state); + self.ut_exit.hash(state); + self.ut_session.hash(state); + self.ut_tv.hash(state); + self.ut_addr_v6.hash(state); + self.__glibc_reserved.hash(state); + } + } + } +} + +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut c_void { + self.si_addr + } + + pub unsafe fn si_value(&self) -> crate::sigval { + self.si_value + } + + pub unsafe fn si_pid(&self) -> crate::pid_t { + self.si_pid + } + + pub unsafe fn si_uid(&self) -> crate::uid_t { + self.si_uid + } + + pub unsafe fn si_status(&self) -> c_int { + self.si_status + } +} + +// const + +// aio.h +pub const AIO_CANCELED: c_int = 0; +pub const AIO_NOTCANCELED: c_int = 1; +pub const AIO_ALLDONE: c_int = 2; +pub const LIO_READ: c_int = 0; +pub const LIO_WRITE: c_int = 1; +pub const LIO_NOP: c_int = 2; +pub const LIO_WAIT: c_int = 0; +pub const LIO_NOWAIT: c_int = 1; + +// glob.h +pub const GLOB_ERR: c_int = 1 << 0; +pub const GLOB_MARK: c_int = 1 << 1; +pub const GLOB_NOSORT: c_int = 1 << 2; +pub const GLOB_DOOFFS: c_int = 1 << 3; +pub const GLOB_NOCHECK: c_int = 1 << 4; +pub const GLOB_APPEND: c_int = 1 << 5; +pub const GLOB_NOESCAPE: c_int = 1 << 6; + +pub const GLOB_NOSPACE: c_int = 1; +pub const GLOB_ABORTED: c_int = 2; +pub const GLOB_NOMATCH: c_int = 3; + +pub const GLOB_PERIOD: c_int = 1 << 7; +pub const GLOB_ALTDIRFUNC: c_int = 1 << 9; +pub const GLOB_BRACE: c_int = 1 << 10; +pub const GLOB_NOMAGIC: c_int = 1 << 11; +pub const GLOB_TILDE: c_int = 1 << 12; +pub const GLOB_ONLYDIR: c_int = 1 << 13; +pub const GLOB_TILDE_CHECK: c_int = 1 << 14; + +// ipc.h +pub const IPC_PRIVATE: crate::key_t = 0; + +pub const IPC_CREAT: c_int = 0o1000; +pub const IPC_EXCL: c_int = 0o2000; +pub const IPC_NOWAIT: c_int = 0o4000; + +pub const IPC_RMID: c_int = 0; +pub const IPC_SET: c_int = 1; +pub const IPC_STAT: c_int = 2; +pub const IPC_INFO: c_int = 3; +pub const MSG_STAT: c_int = 11; +pub const MSG_INFO: c_int = 12; + +pub const MSG_NOERROR: c_int = 0o10000; +pub const MSG_EXCEPT: c_int = 0o20000; + +// shm.h +pub const SHM_R: c_int = 0o400; +pub const SHM_W: c_int = 0o200; + +pub const SHM_RDONLY: c_int = 0o10000; +pub const SHM_RND: c_int = 0o20000; +pub const SHM_REMAP: c_int = 0o40000; + +pub const SHM_LOCK: c_int = 11; +pub const SHM_UNLOCK: c_int = 12; +// unistd.h +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; +pub const __FD_SETSIZE: usize = 256; +pub const R_OK: c_int = 4; +pub const W_OK: c_int = 2; +pub const X_OK: c_int = 1; +pub const F_OK: c_int = 0; +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; +pub const SEEK_DATA: c_int = 3; +pub const SEEK_HOLE: c_int = 4; +pub const L_SET: c_int = 0; +pub const L_INCR: c_int = 1; +pub const L_XTND: c_int = 2; +pub const F_ULOCK: c_int = 0; +pub const F_LOCK: c_int = 1; +pub const F_TLOCK: c_int = 2; +pub const F_TEST: c_int = 3; +pub const CLOSE_RANGE_CLOEXEC: c_int = 4; + +// stdio.h +pub const EOF: c_int = -1; + +// stdlib.h +pub const WNOHANG: c_int = 1; +pub const WUNTRACED: c_int = 2; +pub const WSTOPPED: c_int = 2; +pub const WCONTINUED: c_int = 4; +pub const WNOWAIT: c_int = 8; +pub const WEXITED: c_int = 16; +pub const __W_CONTINUED: c_int = 65535; +pub const __WCOREFLAG: c_int = 128; +pub const RAND_MAX: c_int = 2147483647; +pub const EXIT_FAILURE: c_int = 1; +pub const EXIT_SUCCESS: c_int = 0; +pub const __LITTLE_ENDIAN: usize = 1234; +pub const __BIG_ENDIAN: usize = 4321; +pub const __PDP_ENDIAN: usize = 3412; +pub const __BYTE_ORDER: usize = 1234; +pub const __FLOAT_WORD_ORDER: usize = 1234; +pub const LITTLE_ENDIAN: usize = 1234; +pub const BIG_ENDIAN: usize = 4321; +pub const PDP_ENDIAN: usize = 3412; +pub const BYTE_ORDER: usize = 1234; + +// sys/select.h +pub const FD_SETSIZE: usize = 256; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 32; +pub const __SIZEOF_PTHREAD_ATTR_T: usize = 32; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 28; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 24; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 16; +pub const __SIZEOF_PTHREAD_COND_T: usize = 20; +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 8; +pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_ONCE_T: usize = 8; +pub const __PTHREAD_SPIN_LOCK_INITIALIZER: c_int = 0; +pub const PTHREAD_MUTEX_NORMAL: c_int = 0; + +// sys/resource.h +pub const RLIM_INFINITY: crate::rlim_t = 2147483647; +pub const RLIM64_INFINITY: crate::rlim64_t = 9223372036854775807; +pub const RLIM_SAVED_MAX: crate::rlim_t = RLIM_INFINITY; +pub const RLIM_SAVED_CUR: crate::rlim_t = RLIM_INFINITY; +pub const PRIO_MIN: c_int = -20; +pub const PRIO_MAX: c_int = 20; + +// pwd.h +pub const NSS_BUFLEN_PASSWD: usize = 1024; + +// sys/socket.h +pub const SOCK_TYPE_MASK: usize = 15; +pub const PF_UNSPEC: c_int = 0; +pub const PF_LOCAL: c_int = 1; +pub const PF_UNIX: c_int = 1; +pub const PF_FILE: c_int = 1; +pub const PF_INET: c_int = 2; +pub const PF_IMPLINK: c_int = 3; +pub const PF_PUP: c_int = 4; +pub const PF_CHAOS: c_int = 5; +pub const PF_NS: c_int = 6; +pub const PF_ISO: c_int = 7; +pub const PF_OSI: c_int = 7; +pub const PF_ECMA: c_int = 8; +pub const PF_DATAKIT: c_int = 9; +pub const PF_CCITT: c_int = 10; +pub const PF_SNA: c_int = 11; +pub const PF_DECnet: c_int = 12; +pub const PF_DLI: c_int = 13; +pub const PF_LAT: c_int = 14; +pub const PF_HYLINK: c_int = 15; +pub const PF_APPLETALK: c_int = 16; +pub const PF_ROUTE: c_int = 17; +pub const PF_XTP: c_int = 19; +pub const PF_COIP: c_int = 20; +pub const PF_CNT: c_int = 21; +pub const PF_RTIP: c_int = 22; +pub const PF_IPX: c_int = 23; +pub const PF_SIP: c_int = 24; +pub const PF_PIP: c_int = 25; +pub const PF_INET6: c_int = 26; +pub const PF_MAX: c_int = 27; +pub const AF_UNSPEC: c_int = 0; +pub const AF_LOCAL: c_int = 1; +pub const AF_UNIX: c_int = 1; +pub const AF_FILE: c_int = 1; +pub const AF_INET: c_int = 2; +pub const AF_IMPLINK: c_int = 3; +pub const AF_PUP: c_int = 4; +pub const AF_CHAOS: c_int = 5; +pub const AF_NS: c_int = 6; +pub const AF_ISO: c_int = 7; +pub const AF_OSI: c_int = 7; +pub const AF_ECMA: c_int = 8; +pub const AF_DATAKIT: c_int = 9; +pub const AF_CCITT: c_int = 10; +pub const AF_SNA: c_int = 11; +pub const AF_DECnet: c_int = 12; +pub const AF_DLI: c_int = 13; +pub const AF_LAT: c_int = 14; +pub const AF_HYLINK: c_int = 15; +pub const AF_APPLETALK: c_int = 16; +pub const AF_ROUTE: c_int = 17; +pub const pseudo_AF_XTP: c_int = 19; +pub const AF_COIP: c_int = 20; +pub const AF_CNT: c_int = 21; +pub const pseudo_AF_RTIP: c_int = 22; +pub const AF_IPX: c_int = 23; +pub const AF_SIP: c_int = 24; +pub const pseudo_AF_PIP: c_int = 25; +pub const AF_INET6: c_int = 26; +pub const AF_MAX: c_int = 27; +pub const SOMAXCONN: c_int = 4096; +pub const _SS_SIZE: usize = 128; +pub const CMGROUP_MAX: usize = 16; +pub const SOL_SOCKET: c_int = 65535; + +// sys/time.h +pub const ITIMER_REAL: c_int = 0; +pub const ITIMER_VIRTUAL: c_int = 1; +pub const ITIMER_PROF: c_int = 2; + +// netinet/in.h +pub const SOL_IP: c_int = 0; +pub const SOL_TCP: c_int = 6; +pub const SOL_UDP: c_int = 17; +pub const SOL_IPV6: c_int = 41; +pub const SOL_ICMPV6: c_int = 58; +pub const IP_OPTIONS: c_int = 1; +pub const IP_HDRINCL: c_int = 2; +pub const IP_TOS: c_int = 3; +pub const IP_TTL: c_int = 4; +pub const IP_RECVOPTS: c_int = 5; +pub const IP_RECVRETOPTS: c_int = 6; +pub const IP_RECVDSTADDR: c_int = 7; +pub const IP_RETOPTS: c_int = 8; +pub const IP_MULTICAST_IF: c_int = 9; +pub const IP_MULTICAST_TTL: c_int = 10; +pub const IP_MULTICAST_LOOP: c_int = 11; +pub const IP_ADD_MEMBERSHIP: c_int = 12; +pub const IP_DROP_MEMBERSHIP: c_int = 13; +pub const IPV6_ADDRFORM: c_int = 1; +pub const IPV6_2292PKTINFO: c_int = 2; +pub const IPV6_2292HOPOPTS: c_int = 3; +pub const IPV6_2292DSTOPTS: c_int = 4; +pub const IPV6_2292RTHDR: c_int = 5; +pub const IPV6_2292PKTOPTIONS: c_int = 6; +pub const IPV6_CHECKSUM: c_int = 7; +pub const IPV6_2292HOPLIMIT: c_int = 8; +pub const IPV6_RXINFO: c_int = 2; +pub const IPV6_TXINFO: c_int = 2; +pub const SCM_SRCINFO: c_int = 2; +pub const IPV6_UNICAST_HOPS: c_int = 16; +pub const IPV6_MULTICAST_IF: c_int = 17; +pub const IPV6_MULTICAST_HOPS: c_int = 18; +pub const IPV6_MULTICAST_LOOP: c_int = 19; +pub const IPV6_JOIN_GROUP: c_int = 20; +pub const IPV6_LEAVE_GROUP: c_int = 21; +pub const IPV6_ROUTER_ALERT: c_int = 22; +pub const IPV6_MTU_DISCOVER: c_int = 23; +pub const IPV6_MTU: c_int = 24; +pub const IPV6_RECVERR: c_int = 25; +pub const IPV6_V6ONLY: c_int = 26; +pub const IPV6_JOIN_ANYCAST: c_int = 27; +pub const IPV6_LEAVE_ANYCAST: c_int = 28; +pub const IPV6_RECVPKTINFO: c_int = 49; +pub const IPV6_PKTINFO: c_int = 50; +pub const IPV6_RECVHOPLIMIT: c_int = 51; +pub const IPV6_HOPLIMIT: c_int = 52; +pub const IPV6_RECVHOPOPTS: c_int = 53; +pub const IPV6_HOPOPTS: c_int = 54; +pub const IPV6_RTHDRDSTOPTS: c_int = 55; +pub const IPV6_RECVRTHDR: c_int = 56; +pub const IPV6_RTHDR: c_int = 57; +pub const IPV6_RECVDSTOPTS: c_int = 58; +pub const IPV6_DSTOPTS: c_int = 59; +pub const IPV6_RECVPATHMTU: c_int = 60; +pub const IPV6_PATHMTU: c_int = 61; +pub const IPV6_DONTFRAG: c_int = 62; +pub const IPV6_RECVTCLASS: c_int = 66; +pub const IPV6_TCLASS: c_int = 67; +pub const IPV6_ADDR_PREFERENCES: c_int = 72; +pub const IPV6_MINHOPCOUNT: c_int = 73; +pub const IPV6_ADD_MEMBERSHIP: c_int = 20; +pub const IPV6_DROP_MEMBERSHIP: c_int = 21; +pub const IPV6_RXHOPOPTS: c_int = 3; +pub const IPV6_RXDSTOPTS: c_int = 4; +pub const IPV6_RTHDR_LOOSE: c_int = 0; +pub const IPV6_RTHDR_STRICT: c_int = 1; +pub const IPV6_RTHDR_TYPE_0: c_int = 0; +pub const IN_CLASSA_NET: u32 = 4278190080; +pub const IN_CLASSA_NSHIFT: usize = 24; +pub const IN_CLASSA_HOST: u32 = 16777215; +pub const IN_CLASSA_MAX: u32 = 128; +pub const IN_CLASSB_NET: u32 = 4294901760; +pub const IN_CLASSB_NSHIFT: usize = 16; +pub const IN_CLASSB_HOST: u32 = 65535; +pub const IN_CLASSB_MAX: u32 = 65536; +pub const IN_CLASSC_NET: u32 = 4294967040; +pub const IN_CLASSC_NSHIFT: usize = 8; +pub const IN_CLASSC_HOST: u32 = 255; +pub const IN_LOOPBACKNET: u32 = 127; +pub const INET_ADDRSTRLEN: usize = 16; +pub const INET6_ADDRSTRLEN: usize = 46; + +// netinet/ip.h +pub const IPTOS_TOS_MASK: u8 = 0x1E; +pub const IPTOS_PREC_MASK: u8 = 0xE0; + +pub const IPTOS_ECN_NOT_ECT: u8 = 0x00; + +pub const IPTOS_LOWDELAY: u8 = 0x10; +pub const IPTOS_THROUGHPUT: u8 = 0x08; +pub const IPTOS_RELIABILITY: u8 = 0x04; +pub const IPTOS_MINCOST: u8 = 0x02; + +pub const IPTOS_PREC_NETCONTROL: u8 = 0xe0; +pub const IPTOS_PREC_INTERNETCONTROL: u8 = 0xc0; +pub const IPTOS_PREC_CRITIC_ECP: u8 = 0xa0; +pub const IPTOS_PREC_FLASHOVERRIDE: u8 = 0x80; +pub const IPTOS_PREC_FLASH: u8 = 0x60; +pub const IPTOS_PREC_IMMEDIATE: u8 = 0x40; +pub const IPTOS_PREC_PRIORITY: u8 = 0x20; +pub const IPTOS_PREC_ROUTINE: u8 = 0x00; + +pub const IPTOS_ECN_MASK: u8 = 0x03; +pub const IPTOS_ECN_ECT1: u8 = 0x01; +pub const IPTOS_ECN_ECT0: u8 = 0x02; +pub const IPTOS_ECN_CE: u8 = 0x03; + +pub const IPOPT_COPY: u8 = 0x80; +pub const IPOPT_CLASS_MASK: u8 = 0x60; +pub const IPOPT_NUMBER_MASK: u8 = 0x1f; + +pub const IPOPT_CONTROL: u8 = 0x00; +pub const IPOPT_RESERVED1: u8 = 0x20; +pub const IPOPT_MEASUREMENT: u8 = 0x40; +pub const IPOPT_RESERVED2: u8 = 0x60; +pub const IPOPT_END: u8 = 0 | IPOPT_CONTROL; +pub const IPOPT_NOOP: u8 = 1 | IPOPT_CONTROL; +pub const IPOPT_SEC: u8 = 2 | IPOPT_CONTROL | IPOPT_COPY; +pub const IPOPT_LSRR: u8 = 3 | IPOPT_CONTROL | IPOPT_COPY; +pub const IPOPT_TIMESTAMP: u8 = 4 | IPOPT_MEASUREMENT; +pub const IPOPT_RR: u8 = 7 | IPOPT_CONTROL; +pub const IPOPT_SID: u8 = 8 | IPOPT_CONTROL | IPOPT_COPY; +pub const IPOPT_SSRR: u8 = 9 | IPOPT_CONTROL | IPOPT_COPY; +pub const IPOPT_RA: u8 = 20 | IPOPT_CONTROL | IPOPT_COPY; +pub const IPVERSION: u8 = 4; +pub const MAXTTL: u8 = 255; +pub const IPDEFTTL: u8 = 64; +pub const IPOPT_OPTVAL: u8 = 0; +pub const IPOPT_OLEN: u8 = 1; +pub const IPOPT_OFFSET: u8 = 2; +pub const IPOPT_MINOFF: u8 = 4; +pub const MAX_IPOPTLEN: u8 = 40; +pub const IPOPT_NOP: u8 = IPOPT_NOOP; +pub const IPOPT_EOL: u8 = IPOPT_END; +pub const IPOPT_TS: u8 = IPOPT_TIMESTAMP; +pub const IPOPT_TS_TSONLY: u8 = 0; +pub const IPOPT_TS_TSANDADDR: u8 = 1; +pub const IPOPT_TS_PRESPEC: u8 = 3; + +// net/if_arp.h +pub const ARPOP_REQUEST: u16 = 1; +pub const ARPOP_REPLY: u16 = 2; +pub const ARPOP_RREQUEST: u16 = 3; +pub const ARPOP_RREPLY: u16 = 4; +pub const ARPOP_InREQUEST: u16 = 8; +pub const ARPOP_InREPLY: u16 = 9; +pub const ARPOP_NAK: u16 = 10; + +pub const MAX_ADDR_LEN: usize = 7; +pub const ARPD_UPDATE: c_ushort = 0x01; +pub const ARPD_LOOKUP: c_ushort = 0x02; +pub const ARPD_FLUSH: c_ushort = 0x03; +pub const ATF_MAGIC: c_int = 0x80; + +pub const ATF_NETMASK: c_int = 0x20; +pub const ATF_DONTPUB: c_int = 0x40; + +pub const ARPHRD_NETROM: u16 = 0; +pub const ARPHRD_ETHER: u16 = 1; +pub const ARPHRD_EETHER: u16 = 2; +pub const ARPHRD_AX25: u16 = 3; +pub const ARPHRD_PRONET: u16 = 4; +pub const ARPHRD_CHAOS: u16 = 5; +pub const ARPHRD_IEEE802: u16 = 6; +pub const ARPHRD_ARCNET: u16 = 7; +pub const ARPHRD_APPLETLK: u16 = 8; +pub const ARPHRD_DLCI: u16 = 15; +pub const ARPHRD_ATM: u16 = 19; +pub const ARPHRD_METRICOM: u16 = 23; +pub const ARPHRD_IEEE1394: u16 = 24; +pub const ARPHRD_EUI64: u16 = 27; +pub const ARPHRD_INFINIBAND: u16 = 32; + +pub const ARPHRD_SLIP: u16 = 256; +pub const ARPHRD_CSLIP: u16 = 257; +pub const ARPHRD_SLIP6: u16 = 258; +pub const ARPHRD_CSLIP6: u16 = 259; +pub const ARPHRD_RSRVD: u16 = 260; +pub const ARPHRD_ADAPT: u16 = 264; +pub const ARPHRD_ROSE: u16 = 270; +pub const ARPHRD_X25: u16 = 271; +pub const ARPHRD_HWX25: u16 = 272; +pub const ARPHRD_CAN: u16 = 280; +pub const ARPHRD_PPP: u16 = 512; +pub const ARPHRD_CISCO: u16 = 513; +pub const ARPHRD_HDLC: u16 = ARPHRD_CISCO; +pub const ARPHRD_LAPB: u16 = 516; +pub const ARPHRD_DDCMP: u16 = 517; +pub const ARPHRD_RAWHDLC: u16 = 518; + +pub const ARPHRD_TUNNEL: u16 = 768; +pub const ARPHRD_TUNNEL6: u16 = 769; +pub const ARPHRD_FRAD: u16 = 770; +pub const ARPHRD_SKIP: u16 = 771; +pub const ARPHRD_LOOPBACK: u16 = 772; +pub const ARPHRD_LOCALTLK: u16 = 773; +pub const ARPHRD_FDDI: u16 = 774; +pub const ARPHRD_BIF: u16 = 775; +pub const ARPHRD_SIT: u16 = 776; +pub const ARPHRD_IPDDP: u16 = 777; +pub const ARPHRD_IPGRE: u16 = 778; +pub const ARPHRD_PIMREG: u16 = 779; +pub const ARPHRD_HIPPI: u16 = 780; +pub const ARPHRD_ASH: u16 = 781; +pub const ARPHRD_ECONET: u16 = 782; +pub const ARPHRD_IRDA: u16 = 783; +pub const ARPHRD_FCPP: u16 = 784; +pub const ARPHRD_FCAL: u16 = 785; +pub const ARPHRD_FCPL: u16 = 786; +pub const ARPHRD_FCFABRIC: u16 = 787; +pub const ARPHRD_IEEE802_TR: u16 = 800; +pub const ARPHRD_IEEE80211: u16 = 801; +pub const ARPHRD_IEEE80211_PRISM: u16 = 802; +pub const ARPHRD_IEEE80211_RADIOTAP: u16 = 803; +pub const ARPHRD_IEEE802154: u16 = 804; + +pub const ARPHRD_VOID: u16 = 0xFFFF; +pub const ARPHRD_NONE: u16 = 0xFFFE; + +// bits/posix1_lim.h +pub const _POSIX_AIO_LISTIO_MAX: usize = 2; +pub const _POSIX_AIO_MAX: usize = 1; +pub const _POSIX_ARG_MAX: usize = 4096; +pub const _POSIX_CHILD_MAX: usize = 25; +pub const _POSIX_DELAYTIMER_MAX: usize = 32; +pub const _POSIX_HOST_NAME_MAX: usize = 255; +pub const _POSIX_LINK_MAX: usize = 8; +pub const _POSIX_LOGIN_NAME_MAX: usize = 9; +pub const _POSIX_MAX_CANON: usize = 255; +pub const _POSIX_MAX_INPUT: usize = 255; +pub const _POSIX_MQ_OPEN_MAX: usize = 8; +pub const _POSIX_MQ_PRIO_MAX: usize = 32; +pub const _POSIX_NAME_MAX: usize = 14; +pub const _POSIX_NGROUPS_MAX: usize = 8; +pub const _POSIX_OPEN_MAX: usize = 20; +pub const _POSIX_FD_SETSIZE: usize = 20; +pub const _POSIX_PATH_MAX: usize = 256; +pub const _POSIX_PIPE_BUF: usize = 512; +pub const _POSIX_RE_DUP_MAX: usize = 255; +pub const _POSIX_RTSIG_MAX: usize = 8; +pub const _POSIX_SEM_NSEMS_MAX: usize = 256; +pub const _POSIX_SEM_VALUE_MAX: usize = 32767; +pub const _POSIX_SIGQUEUE_MAX: usize = 32; +pub const _POSIX_SSIZE_MAX: usize = 32767; +pub const _POSIX_STREAM_MAX: usize = 8; +pub const _POSIX_SYMLINK_MAX: usize = 255; +pub const _POSIX_SYMLOOP_MAX: usize = 8; +pub const _POSIX_TIMER_MAX: usize = 32; +pub const _POSIX_TTY_NAME_MAX: usize = 9; +pub const _POSIX_TZNAME_MAX: usize = 6; +pub const _POSIX_QLIMIT: usize = 1; +pub const _POSIX_HIWAT: usize = 512; +pub const _POSIX_UIO_MAXIOV: usize = 16; +pub const _POSIX_CLOCKRES_MIN: usize = 20000000; +pub const NAME_MAX: usize = 255; +pub const NGROUPS_MAX: usize = 256; +pub const _POSIX_THREAD_KEYS_MAX: usize = 128; +pub const _POSIX_THREAD_DESTRUCTOR_ITERATIONS: usize = 4; +pub const _POSIX_THREAD_THREADS_MAX: usize = 64; +pub const SEM_VALUE_MAX: c_int = 2147483647; +pub const MAXNAMLEN: usize = 255; + +// netdb.h +pub const _PATH_HEQUIV: &[u8; 17usize] = b"/etc/hosts.equiv\0"; +pub const _PATH_HOSTS: &[u8; 11usize] = b"/etc/hosts\0"; +pub const _PATH_NETWORKS: &[u8; 14usize] = b"/etc/networks\0"; +pub const _PATH_NSSWITCH_CONF: &[u8; 19usize] = b"/etc/nsswitch.conf\0"; +pub const _PATH_PROTOCOLS: &[u8; 15usize] = b"/etc/protocols\0"; +pub const _PATH_SERVICES: &[u8; 14usize] = b"/etc/services\0"; +pub const HOST_NOT_FOUND: c_int = 1; +pub const TRY_AGAIN: c_int = 2; +pub const NO_RECOVERY: c_int = 3; +pub const NO_DATA: c_int = 4; +pub const NETDB_INTERNAL: c_int = -1; +pub const NETDB_SUCCESS: c_int = 0; +pub const NO_ADDRESS: c_int = 4; +pub const IPPORT_RESERVED: c_int = 1024; +pub const SCOPE_DELIMITER: u8 = 37u8; +pub const GAI_WAIT: c_int = 0; +pub const GAI_NOWAIT: c_int = 1; +pub const AI_PASSIVE: c_int = 1; +pub const AI_CANONNAME: c_int = 2; +pub const AI_NUMERICHOST: c_int = 4; +pub const AI_V4MAPPED: c_int = 8; +pub const AI_ALL: c_int = 16; +pub const AI_ADDRCONFIG: c_int = 32; +pub const AI_IDN: c_int = 64; +pub const AI_CANONIDN: c_int = 128; +pub const AI_NUMERICSERV: c_int = 1024; +pub const EAI_BADFLAGS: c_int = -1; +pub const EAI_NONAME: c_int = -2; +pub const EAI_AGAIN: c_int = -3; +pub const EAI_FAIL: c_int = -4; +pub const EAI_FAMILY: c_int = -6; +pub const EAI_SOCKTYPE: c_int = -7; +pub const EAI_SERVICE: c_int = -8; +pub const EAI_MEMORY: c_int = -10; +pub const EAI_SYSTEM: c_int = -11; +pub const EAI_OVERFLOW: c_int = -12; +pub const EAI_NODATA: c_int = -5; +pub const EAI_ADDRFAMILY: c_int = -9; +pub const EAI_INPROGRESS: c_int = -100; +pub const EAI_CANCELED: c_int = -101; +pub const EAI_NOTCANCELED: c_int = -102; +pub const EAI_ALLDONE: c_int = -103; +pub const EAI_INTR: c_int = -104; +pub const EAI_IDN_ENCODE: c_int = -105; +pub const NI_MAXHOST: usize = 1025; +pub const NI_MAXSERV: usize = 32; +pub const NI_NUMERICHOST: c_int = 1; +pub const NI_NUMERICSERV: c_int = 2; +pub const NI_NOFQDN: c_int = 4; +pub const NI_NAMEREQD: c_int = 8; +pub const NI_DGRAM: c_int = 16; +pub const NI_IDN: c_int = 32; + +// time.h +pub const CLOCK_REALTIME: crate::clockid_t = 0; +pub const CLOCK_MONOTONIC: crate::clockid_t = 1; +pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 2; +pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 3; +pub const CLOCK_MONOTONIC_RAW: crate::clockid_t = 4; +pub const CLOCK_REALTIME_COARSE: crate::clockid_t = 5; +pub const CLOCK_MONOTONIC_COARSE: crate::clockid_t = 6; +pub const TIMER_ABSTIME: c_int = 1; +pub const TIME_UTC: c_int = 1; + +// sys/poll.h +pub const POLLIN: i16 = 1; +pub const POLLPRI: i16 = 2; +pub const POLLOUT: i16 = 4; +pub const POLLRDNORM: i16 = 1; +pub const POLLRDBAND: i16 = 2; +pub const POLLWRNORM: i16 = 4; +pub const POLLWRBAND: i16 = 4; +pub const POLLERR: i16 = 8; +pub const POLLHUP: i16 = 16; +pub const POLLNVAL: i16 = 32; + +// locale.h +pub const __LC_CTYPE: usize = 0; +pub const __LC_NUMERIC: usize = 1; +pub const __LC_TIME: usize = 2; +pub const __LC_COLLATE: usize = 3; +pub const __LC_MONETARY: usize = 4; +pub const __LC_MESSAGES: usize = 5; +pub const __LC_ALL: usize = 6; +pub const __LC_PAPER: usize = 7; +pub const __LC_NAME: usize = 8; +pub const __LC_ADDRESS: usize = 9; +pub const __LC_TELEPHONE: usize = 10; +pub const __LC_MEASUREMENT: usize = 11; +pub const __LC_IDENTIFICATION: usize = 12; +pub const LC_CTYPE: c_int = 0; +pub const LC_NUMERIC: c_int = 1; +pub const LC_TIME: c_int = 2; +pub const LC_COLLATE: c_int = 3; +pub const LC_MONETARY: c_int = 4; +pub const LC_MESSAGES: c_int = 5; +pub const LC_ALL: c_int = 6; +pub const LC_PAPER: c_int = 7; +pub const LC_NAME: c_int = 8; +pub const LC_ADDRESS: c_int = 9; +pub const LC_TELEPHONE: c_int = 10; +pub const LC_MEASUREMENT: c_int = 11; +pub const LC_IDENTIFICATION: c_int = 12; +pub const LC_CTYPE_MASK: c_int = 1; +pub const LC_NUMERIC_MASK: c_int = 2; +pub const LC_TIME_MASK: c_int = 4; +pub const LC_COLLATE_MASK: c_int = 8; +pub const LC_MONETARY_MASK: c_int = 16; +pub const LC_MESSAGES_MASK: c_int = 32; +pub const LC_PAPER_MASK: c_int = 128; +pub const LC_NAME_MASK: c_int = 256; +pub const LC_ADDRESS_MASK: c_int = 512; +pub const LC_TELEPHONE_MASK: c_int = 1024; +pub const LC_MEASUREMENT_MASK: c_int = 2048; +pub const LC_IDENTIFICATION_MASK: c_int = 4096; +pub const LC_ALL_MASK: c_int = 8127; + +pub const ABDAY_1: crate::nl_item = 0x20000; +pub const ABDAY_2: crate::nl_item = 0x20001; +pub const ABDAY_3: crate::nl_item = 0x20002; +pub const ABDAY_4: crate::nl_item = 0x20003; +pub const ABDAY_5: crate::nl_item = 0x20004; +pub const ABDAY_6: crate::nl_item = 0x20005; +pub const ABDAY_7: crate::nl_item = 0x20006; + +pub const DAY_1: crate::nl_item = 0x20007; +pub const DAY_2: crate::nl_item = 0x20008; +pub const DAY_3: crate::nl_item = 0x20009; +pub const DAY_4: crate::nl_item = 0x2000A; +pub const DAY_5: crate::nl_item = 0x2000B; +pub const DAY_6: crate::nl_item = 0x2000C; +pub const DAY_7: crate::nl_item = 0x2000D; + +pub const ABMON_1: crate::nl_item = 0x2000E; +pub const ABMON_2: crate::nl_item = 0x2000F; +pub const ABMON_3: crate::nl_item = 0x20010; +pub const ABMON_4: crate::nl_item = 0x20011; +pub const ABMON_5: crate::nl_item = 0x20012; +pub const ABMON_6: crate::nl_item = 0x20013; +pub const ABMON_7: crate::nl_item = 0x20014; +pub const ABMON_8: crate::nl_item = 0x20015; +pub const ABMON_9: crate::nl_item = 0x20016; +pub const ABMON_10: crate::nl_item = 0x20017; +pub const ABMON_11: crate::nl_item = 0x20018; +pub const ABMON_12: crate::nl_item = 0x20019; + +pub const MON_1: crate::nl_item = 0x2001A; +pub const MON_2: crate::nl_item = 0x2001B; +pub const MON_3: crate::nl_item = 0x2001C; +pub const MON_4: crate::nl_item = 0x2001D; +pub const MON_5: crate::nl_item = 0x2001E; +pub const MON_6: crate::nl_item = 0x2001F; +pub const MON_7: crate::nl_item = 0x20020; +pub const MON_8: crate::nl_item = 0x20021; +pub const MON_9: crate::nl_item = 0x20022; +pub const MON_10: crate::nl_item = 0x20023; +pub const MON_11: crate::nl_item = 0x20024; +pub const MON_12: crate::nl_item = 0x20025; + +pub const AM_STR: crate::nl_item = 0x20026; +pub const PM_STR: crate::nl_item = 0x20027; + +pub const D_T_FMT: crate::nl_item = 0x20028; +pub const D_FMT: crate::nl_item = 0x20029; +pub const T_FMT: crate::nl_item = 0x2002A; +pub const T_FMT_AMPM: crate::nl_item = 0x2002B; + +pub const ERA: crate::nl_item = 0x2002C; +pub const ERA_D_FMT: crate::nl_item = 0x2002E; +pub const ALT_DIGITS: crate::nl_item = 0x2002F; +pub const ERA_D_T_FMT: crate::nl_item = 0x20030; +pub const ERA_T_FMT: crate::nl_item = 0x20031; + +pub const CODESET: crate::nl_item = 14; +pub const CRNCYSTR: crate::nl_item = 0x4000F; +pub const RADIXCHAR: crate::nl_item = 0x10000; +pub const THOUSEP: crate::nl_item = 0x10001; +pub const YESEXPR: crate::nl_item = 0x50000; +pub const NOEXPR: crate::nl_item = 0x50001; +pub const YESSTR: crate::nl_item = 0x50002; +pub const NOSTR: crate::nl_item = 0x50003; + +// reboot.h +pub const RB_AUTOBOOT: c_int = 0x0; +pub const RB_ASKNAME: c_int = 0x1; +pub const RB_SINGLE: c_int = 0x2; +pub const RB_KBD: c_int = 0x4; +pub const RB_HALT: c_int = 0x8; +pub const RB_INITNAME: c_int = 0x10; +pub const RB_DFLTROOT: c_int = 0x20; +pub const RB_NOBOOTRC: c_int = 0x20; +pub const RB_ALTBOOT: c_int = 0x40; +pub const RB_UNIPROC: c_int = 0x80; +pub const RB_DEBUGGER: c_int = 0x1000; + +// semaphore.h +pub const __SIZEOF_SEM_T: usize = 20; +pub const SEM_FAILED: *mut crate::sem_t = ptr::null_mut(); + +// termios.h +pub const IGNBRK: crate::tcflag_t = 1; +pub const BRKINT: crate::tcflag_t = 2; +pub const IGNPAR: crate::tcflag_t = 4; +pub const PARMRK: crate::tcflag_t = 8; +pub const INPCK: crate::tcflag_t = 16; +pub const ISTRIP: crate::tcflag_t = 32; +pub const INLCR: crate::tcflag_t = 64; +pub const IGNCR: crate::tcflag_t = 128; +pub const ICRNL: crate::tcflag_t = 256; +pub const IXON: crate::tcflag_t = 512; +pub const IXOFF: crate::tcflag_t = 1024; +pub const IXANY: crate::tcflag_t = 2048; +pub const IMAXBEL: crate::tcflag_t = 8192; +pub const IUCLC: crate::tcflag_t = 16384; +pub const OPOST: crate::tcflag_t = 1; +pub const ONLCR: crate::tcflag_t = 2; +pub const ONOEOT: crate::tcflag_t = 8; +pub const OCRNL: crate::tcflag_t = 16; +pub const ONOCR: crate::tcflag_t = 32; +pub const ONLRET: crate::tcflag_t = 64; +pub const NLDLY: crate::tcflag_t = 768; +pub const NL0: crate::tcflag_t = 0; +pub const NL1: crate::tcflag_t = 256; +pub const TABDLY: crate::tcflag_t = 3076; +pub const TAB0: crate::tcflag_t = 0; +pub const TAB1: crate::tcflag_t = 1024; +pub const TAB2: crate::tcflag_t = 2048; +pub const TAB3: crate::tcflag_t = 4; +pub const CRDLY: crate::tcflag_t = 12288; +pub const CR0: crate::tcflag_t = 0; +pub const CR1: crate::tcflag_t = 4096; +pub const CR2: crate::tcflag_t = 8192; +pub const CR3: crate::tcflag_t = 12288; +pub const FFDLY: crate::tcflag_t = 16384; +pub const FF0: crate::tcflag_t = 0; +pub const FF1: crate::tcflag_t = 16384; +pub const BSDLY: crate::tcflag_t = 32768; +pub const BS0: crate::tcflag_t = 0; +pub const BS1: crate::tcflag_t = 32768; +pub const VTDLY: crate::tcflag_t = 65536; +pub const VT0: crate::tcflag_t = 0; +pub const VT1: crate::tcflag_t = 65536; +pub const OLCUC: crate::tcflag_t = 131072; +pub const OFILL: crate::tcflag_t = 262144; +pub const OFDEL: crate::tcflag_t = 524288; +pub const CIGNORE: crate::tcflag_t = 1; +pub const CSIZE: crate::tcflag_t = 768; +pub const CS5: crate::tcflag_t = 0; +pub const CS6: crate::tcflag_t = 256; +pub const CS7: crate::tcflag_t = 512; +pub const CS8: crate::tcflag_t = 768; +pub const CSTOPB: crate::tcflag_t = 1024; +pub const CREAD: crate::tcflag_t = 2048; +pub const PARENB: crate::tcflag_t = 4096; +pub const PARODD: crate::tcflag_t = 8192; +pub const HUPCL: crate::tcflag_t = 16384; +pub const CLOCAL: crate::tcflag_t = 32768; +pub const CRTSCTS: crate::tcflag_t = 65536; +pub const CRTS_IFLOW: crate::tcflag_t = 65536; +pub const CCTS_OFLOW: crate::tcflag_t = 65536; +pub const CDTRCTS: crate::tcflag_t = 131072; +pub const MDMBUF: crate::tcflag_t = 1048576; +pub const CHWFLOW: crate::tcflag_t = 1245184; +pub const ECHOKE: crate::tcflag_t = 1; +pub const _ECHOE: crate::tcflag_t = 2; +pub const ECHOE: crate::tcflag_t = 2; +pub const _ECHOK: crate::tcflag_t = 4; +pub const ECHOK: crate::tcflag_t = 4; +pub const _ECHO: crate::tcflag_t = 8; +pub const ECHO: crate::tcflag_t = 8; +pub const _ECHONL: crate::tcflag_t = 16; +pub const ECHONL: crate::tcflag_t = 16; +pub const ECHOPRT: crate::tcflag_t = 32; +pub const ECHOCTL: crate::tcflag_t = 64; +pub const _ISIG: crate::tcflag_t = 128; +pub const ISIG: crate::tcflag_t = 128; +pub const _ICANON: crate::tcflag_t = 256; +pub const ICANON: crate::tcflag_t = 256; +pub const ALTWERASE: crate::tcflag_t = 512; +pub const _IEXTEN: crate::tcflag_t = 1024; +pub const IEXTEN: crate::tcflag_t = 1024; +pub const EXTPROC: crate::tcflag_t = 2048; +pub const _TOSTOP: crate::tcflag_t = 4194304; +pub const TOSTOP: crate::tcflag_t = 4194304; +pub const FLUSHO: crate::tcflag_t = 8388608; +pub const NOKERNINFO: crate::tcflag_t = 33554432; +pub const PENDIN: crate::tcflag_t = 536870912; +pub const _NOFLSH: crate::tcflag_t = 2147483648; +pub const NOFLSH: crate::tcflag_t = 2147483648; +pub const VEOF: usize = 0; +pub const VEOL: usize = 1; +pub const VEOL2: usize = 2; +pub const VERASE: usize = 3; +pub const VWERASE: usize = 4; +pub const VKILL: usize = 5; +pub const VREPRINT: usize = 6; +pub const VINTR: usize = 8; +pub const VQUIT: usize = 9; +pub const VSUSP: usize = 10; +pub const VDSUSP: usize = 11; +pub const VSTART: usize = 12; +pub const VSTOP: usize = 13; +pub const VLNEXT: usize = 14; +pub const VDISCARD: usize = 15; +pub const VMIN: usize = 16; +pub const VTIME: usize = 17; +pub const VSTATUS: usize = 18; +pub const NCCS: usize = 20; +pub const B0: crate::speed_t = 0; +pub const B50: crate::speed_t = 50; +pub const B75: crate::speed_t = 75; +pub const B110: crate::speed_t = 110; +pub const B134: crate::speed_t = 134; +pub const B150: crate::speed_t = 150; +pub const B200: crate::speed_t = 200; +pub const B300: crate::speed_t = 300; +pub const B600: crate::speed_t = 600; +pub const B1200: crate::speed_t = 1200; +pub const B1800: crate::speed_t = 1800; +pub const B2400: crate::speed_t = 2400; +pub const B4800: crate::speed_t = 4800; +pub const B9600: crate::speed_t = 9600; +pub const B7200: crate::speed_t = 7200; +pub const B14400: crate::speed_t = 14400; +pub const B19200: crate::speed_t = 19200; +pub const B28800: crate::speed_t = 28800; +pub const B38400: crate::speed_t = 38400; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const B57600: crate::speed_t = 57600; +pub const B76800: crate::speed_t = 76800; +pub const B115200: crate::speed_t = 115200; +pub const B230400: crate::speed_t = 230400; +pub const B460800: crate::speed_t = 460800; +pub const B500000: crate::speed_t = 500000; +pub const B576000: crate::speed_t = 576000; +pub const B921600: crate::speed_t = 921600; +pub const B1000000: crate::speed_t = 1000000; +pub const B1152000: crate::speed_t = 1152000; +pub const B1500000: crate::speed_t = 1500000; +pub const B2000000: crate::speed_t = 2000000; +pub const B2500000: crate::speed_t = 2500000; +pub const B3000000: crate::speed_t = 3000000; +pub const B3500000: crate::speed_t = 3500000; +pub const B4000000: crate::speed_t = 4000000; +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; +pub const TCSASOFT: c_int = 16; +pub const TCIFLUSH: c_int = 1; +pub const TCOFLUSH: c_int = 2; +pub const TCIOFLUSH: c_int = 3; +pub const TCOOFF: c_int = 1; +pub const TCOON: c_int = 2; +pub const TCIOFF: c_int = 3; +pub const TCION: c_int = 4; +pub const TTYDEF_IFLAG: crate::tcflag_t = 11042; +pub const TTYDEF_LFLAG: crate::tcflag_t = 1483; +pub const TTYDEF_CFLAG: crate::tcflag_t = 23040; +pub const TTYDEF_SPEED: crate::tcflag_t = 9600; +pub const CEOL: u8 = 0u8; +pub const CERASE: u8 = 127; +pub const CMIN: u8 = 1; +pub const CQUIT: u8 = 28; +pub const CTIME: u8 = 0; +pub const CBRK: u8 = 0u8; + +// dlfcn.h +pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); +pub const RTLD_NEXT: *mut c_void = -1i64 as *mut c_void; +pub const RTLD_LAZY: c_int = 1; +pub const RTLD_NOW: c_int = 2; +pub const RTLD_BINDING_MASK: c_int = 3; +pub const RTLD_NOLOAD: c_int = 4; +pub const RTLD_DEEPBIND: c_int = 8; +pub const RTLD_GLOBAL: c_int = 256; +pub const RTLD_LOCAL: c_int = 0; +pub const RTLD_NODELETE: c_int = 4096; +pub const DLFO_STRUCT_HAS_EH_DBASE: usize = 1; +pub const DLFO_STRUCT_HAS_EH_COUNT: usize = 0; +pub const LM_ID_BASE: c_long = 0; +pub const LM_ID_NEWLM: c_long = -1; + +// bits/signum_generic.h +pub const SIGINT: c_int = 2; +pub const SIGILL: c_int = 4; +pub const SIGABRT: c_int = 6; +pub const SIGFPE: c_int = 8; +pub const SIGSEGV: c_int = 11; +pub const SIGTERM: c_int = 15; +pub const SIGHUP: c_int = 1; +pub const SIGQUIT: c_int = 3; +pub const SIGTRAP: c_int = 5; +pub const SIGKILL: c_int = 9; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGIOT: c_int = 6; +pub const SIGBUS: c_int = 10; +pub const SIGSYS: c_int = 12; +pub const SIGEMT: c_int = 7; +pub const SIGINFO: c_int = 29; +pub const SIGLOST: c_int = 32; +pub const SIGURG: c_int = 16; +pub const SIGSTOP: c_int = 17; +pub const SIGTSTP: c_int = 18; +pub const SIGCONT: c_int = 19; +pub const SIGCHLD: c_int = 20; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGPOLL: c_int = 23; +pub const SIGXCPU: c_int = 24; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGXFSZ: c_int = 25; +pub const SIGUSR1: c_int = 30; +pub const SIGUSR2: c_int = 31; +pub const SIGWINCH: c_int = 28; +pub const SIGIO: c_int = 23; +pub const SIGCLD: c_int = 20; +pub const __SIGRTMIN: usize = 32; +pub const __SIGRTMAX: usize = 32; +pub const _NSIG: usize = 33; +pub const NSIG: usize = 33; + +// bits/sigaction.h +pub const SA_ONSTACK: c_int = 1; +pub const SA_RESTART: c_int = 2; +pub const SA_NODEFER: c_int = 16; +pub const SA_RESETHAND: c_int = 4; +pub const SA_NOCLDSTOP: c_int = 8; +pub const SA_SIGINFO: c_int = 64; +pub const SA_INTERRUPT: c_int = 0; +pub const SA_NOMASK: c_int = 16; +pub const SA_ONESHOT: c_int = 4; +pub const SA_STACK: c_int = 1; +pub const SIG_BLOCK: c_int = 1; +pub const SIG_UNBLOCK: c_int = 2; +pub const SIG_SETMASK: c_int = 3; + +// bits/sigcontext.h +pub const FPC_IE: u16 = 1; +pub const FPC_IM: u16 = 1; +pub const FPC_DE: u16 = 2; +pub const FPC_DM: u16 = 2; +pub const FPC_ZE: u16 = 4; +pub const FPC_ZM: u16 = 4; +pub const FPC_OE: u16 = 8; +pub const FPC_OM: u16 = 8; +pub const FPC_UE: u16 = 16; +pub const FPC_PE: u16 = 32; +pub const FPC_PC: u16 = 768; +pub const FPC_PC_24: u16 = 0; +pub const FPC_PC_53: u16 = 512; +pub const FPC_PC_64: u16 = 768; +pub const FPC_RC: u16 = 3072; +pub const FPC_RC_RN: u16 = 0; +pub const FPC_RC_RD: u16 = 1024; +pub const FPC_RC_RU: u16 = 2048; +pub const FPC_RC_CHOP: u16 = 3072; +pub const FPC_IC: u16 = 4096; +pub const FPC_IC_PROJ: u16 = 0; +pub const FPC_IC_AFF: u16 = 4096; +pub const FPS_IE: u16 = 1; +pub const FPS_DE: u16 = 2; +pub const FPS_ZE: u16 = 4; +pub const FPS_OE: u16 = 8; +pub const FPS_UE: u16 = 16; +pub const FPS_PE: u16 = 32; +pub const FPS_SF: u16 = 64; +pub const FPS_ES: u16 = 128; +pub const FPS_C0: u16 = 256; +pub const FPS_C1: u16 = 512; +pub const FPS_C2: u16 = 1024; +pub const FPS_TOS: u16 = 14336; +pub const FPS_TOS_SHIFT: u16 = 11; +pub const FPS_C3: u16 = 16384; +pub const FPS_BUSY: u16 = 32768; +pub const FPE_INTOVF_TRAP: c_int = 1; +pub const FPE_INTDIV_FAULT: c_int = 2; +pub const FPE_FLTOVF_FAULT: c_int = 3; +pub const FPE_FLTDIV_FAULT: c_int = 4; +pub const FPE_FLTUND_FAULT: c_int = 5; +pub const FPE_SUBRNG_FAULT: c_int = 7; +pub const FPE_FLTDNR_FAULT: c_int = 8; +pub const FPE_FLTINX_FAULT: c_int = 9; +pub const FPE_EMERR_FAULT: c_int = 10; +pub const FPE_EMBND_FAULT: c_int = 11; +pub const ILL_INVOPR_FAULT: c_int = 1; +pub const ILL_STACK_FAULT: c_int = 2; +pub const ILL_FPEOPR_FAULT: c_int = 3; +pub const DBG_SINGLE_TRAP: c_int = 1; +pub const DBG_BRKPNT_FAULT: c_int = 2; +pub const __NGREG: usize = 19; +pub const NGREG: usize = 19; + +// bits/sigstack.h +pub const MINSIGSTKSZ: usize = 8192; +pub const SIGSTKSZ: usize = 40960; + +// sys/stat.h +pub const __S_IFMT: mode_t = 0o17_0000; +pub const __S_IFDIR: mode_t = 0o4_0000; +pub const __S_IFCHR: mode_t = 0o2_0000; +pub const __S_IFBLK: mode_t = 0o6_0000; +pub const __S_IFREG: mode_t = 0o10_0000; +pub const __S_IFLNK: mode_t = 0o12_0000; +pub const __S_IFSOCK: mode_t = 0o14_0000; +pub const __S_IFIFO: mode_t = 0o1_0000; +pub const __S_ISUID: mode_t = 0o4000; +pub const __S_ISGID: mode_t = 0o2000; +pub const __S_ISVTX: mode_t = 0o1000; +pub const __S_IREAD: mode_t = 0o0400; +pub const __S_IWRITE: mode_t = 0o0200; +pub const __S_IEXEC: mode_t = 0o0100; +pub const S_INOCACHE: mode_t = 0o20_0000; +pub const S_IUSEUNK: mode_t = 0o40_0000; +pub const S_IUNKNOWN: mode_t = 0o700_0000; +pub const S_IUNKSHIFT: mode_t = 0o0014; +pub const S_IPTRANS: mode_t = 0o1000_0000; +pub const S_IATRANS: mode_t = 0o2000_0000; +pub const S_IROOT: mode_t = 0o4000_0000; +pub const S_ITRANS: mode_t = 0o7000_0000; +pub const S_IMMAP0: mode_t = 0o10000_0000; +pub const CMASK: mode_t = 18; +pub const UF_SETTABLE: c_uint = 65535; +pub const UF_NODUMP: c_uint = 1; +pub const UF_IMMUTABLE: c_uint = 2; +pub const UF_APPEND: c_uint = 4; +pub const UF_OPAQUE: c_uint = 8; +pub const UF_NOUNLINK: c_uint = 16; +pub const SF_SETTABLE: c_uint = 4294901760; +pub const SF_ARCHIVED: c_uint = 65536; +pub const SF_IMMUTABLE: c_uint = 131072; +pub const SF_APPEND: c_uint = 262144; +pub const SF_NOUNLINK: c_uint = 1048576; +pub const SF_SNAPSHOT: c_uint = 2097152; +pub const UTIME_NOW: c_long = -1; +pub const UTIME_OMIT: c_long = -2; +pub const S_IFMT: mode_t = 0o17_0000; +pub const S_IFDIR: mode_t = 0o4_0000; +pub const S_IFCHR: mode_t = 0o2_0000; +pub const S_IFBLK: mode_t = 0o6_0000; +pub const S_IFREG: mode_t = 0o10_0000; +pub const S_IFIFO: mode_t = 0o1_0000; +pub const S_IFLNK: mode_t = 0o12_0000; +pub const S_IFSOCK: mode_t = 0o14_0000; +pub const S_ISUID: mode_t = 0o4000; +pub const S_ISGID: mode_t = 0o2000; +pub const S_ISVTX: mode_t = 0o1000; +pub const S_IRUSR: mode_t = 0o0400; +pub const S_IWUSR: mode_t = 0o0200; +pub const S_IXUSR: mode_t = 0o0100; +pub const S_IRWXU: mode_t = 0o0700; +pub const S_IREAD: mode_t = 0o0400; +pub const S_IWRITE: mode_t = 0o0200; +pub const S_IEXEC: mode_t = 0o0100; +pub const S_IRGRP: mode_t = 0o0040; +pub const S_IWGRP: mode_t = 0o0020; +pub const S_IXGRP: mode_t = 0o0010; +pub const S_IRWXG: mode_t = 0o0070; +pub const S_IROTH: mode_t = 0o0004; +pub const S_IWOTH: mode_t = 0o0002; +pub const S_IXOTH: mode_t = 0o0001; +pub const S_IRWXO: mode_t = 0o0007; +pub const ACCESSPERMS: mode_t = 511; +pub const ALLPERMS: mode_t = 4095; +pub const DEFFILEMODE: mode_t = 438; +pub const S_BLKSIZE: usize = 512; +pub const STATX_TYPE: c_uint = 1; +pub const STATX_MODE: c_uint = 2; +pub const STATX_NLINK: c_uint = 4; +pub const STATX_UID: c_uint = 8; +pub const STATX_GID: c_uint = 16; +pub const STATX_ATIME: c_uint = 32; +pub const STATX_MTIME: c_uint = 64; +pub const STATX_CTIME: c_uint = 128; +pub const STATX_INO: c_uint = 256; +pub const STATX_SIZE: c_uint = 512; +pub const STATX_BLOCKS: c_uint = 1024; +pub const STATX_BASIC_STATS: c_uint = 2047; +pub const STATX_ALL: c_uint = 4095; +pub const STATX_BTIME: c_uint = 2048; +pub const STATX_MNT_ID: c_uint = 4096; +pub const STATX_DIOALIGN: c_uint = 8192; +pub const STATX__RESERVED: c_uint = 2147483648; +pub const STATX_ATTR_COMPRESSED: c_uint = 4; +pub const STATX_ATTR_IMMUTABLE: c_uint = 16; +pub const STATX_ATTR_APPEND: c_uint = 32; +pub const STATX_ATTR_NODUMP: c_uint = 64; +pub const STATX_ATTR_ENCRYPTED: c_uint = 2048; +pub const STATX_ATTR_AUTOMOUNT: c_uint = 4096; +pub const STATX_ATTR_MOUNT_ROOT: c_uint = 8192; +pub const STATX_ATTR_VERITY: c_uint = 1048576; +pub const STATX_ATTR_DAX: c_uint = 2097152; + +// sys/ioctl.h +pub const TIOCM_LE: c_int = 1; +pub const TIOCM_DTR: c_int = 2; +pub const TIOCM_RTS: c_int = 4; +pub const TIOCM_ST: c_int = 8; +pub const TIOCM_SR: c_int = 16; +pub const TIOCM_CTS: c_int = 32; +pub const TIOCM_CAR: c_int = 64; +pub const TIOCM_CD: c_int = 64; +pub const TIOCM_RNG: c_int = 128; +pub const TIOCM_RI: c_int = 128; +pub const TIOCM_DSR: c_int = 256; +pub const TIOCPKT_DATA: c_int = 0; +pub const TIOCPKT_FLUSHREAD: c_int = 1; +pub const TIOCPKT_FLUSHWRITE: c_int = 2; +pub const TIOCPKT_STOP: c_int = 4; +pub const TIOCPKT_START: c_int = 8; +pub const TIOCPKT_NOSTOP: c_int = 16; +pub const TIOCPKT_DOSTOP: c_int = 32; +pub const TIOCPKT_IOCTL: c_int = 64; +pub const TTYDISC: c_int = 0; +pub const TABLDISC: c_int = 3; +pub const SLIPDISC: c_int = 4; +pub const TANDEM: crate::tcflag_t = 1; +pub const CBREAK: crate::tcflag_t = 2; +pub const LCASE: crate::tcflag_t = 4; +pub const CRMOD: crate::tcflag_t = 16; +pub const RAW: crate::tcflag_t = 32; +pub const ODDP: crate::tcflag_t = 64; +pub const EVENP: crate::tcflag_t = 128; +pub const ANYP: crate::tcflag_t = 192; +pub const NLDELAY: crate::tcflag_t = 768; +pub const NL2: crate::tcflag_t = 512; +pub const NL3: crate::tcflag_t = 768; +pub const TBDELAY: crate::tcflag_t = 3072; +pub const XTABS: crate::tcflag_t = 3072; +pub const CRDELAY: crate::tcflag_t = 12288; +pub const VTDELAY: crate::tcflag_t = 16384; +pub const BSDELAY: crate::tcflag_t = 32768; +pub const ALLDELAY: crate::tcflag_t = 65280; +pub const CRTBS: crate::tcflag_t = 65536; +pub const PRTERA: crate::tcflag_t = 131072; +pub const CRTERA: crate::tcflag_t = 262144; +pub const TILDE: crate::tcflag_t = 524288; +pub const LITOUT: crate::tcflag_t = 2097152; +pub const NOHANG: crate::tcflag_t = 16777216; +pub const L001000: crate::tcflag_t = 33554432; +pub const CRTKIL: crate::tcflag_t = 67108864; +pub const PASS8: crate::tcflag_t = 134217728; +pub const CTLECH: crate::tcflag_t = 268435456; +pub const DECCTQ: crate::tcflag_t = 1073741824; + +pub const FIONBIO: c_ulong = 0xa008007e; +pub const FIONREAD: c_ulong = 0x6008007f; +pub const TIOCSWINSZ: c_ulong = 0x90200767; +pub const TIOCGWINSZ: c_ulong = 0x50200768; +pub const TIOCEXCL: c_ulong = 0x70d; +pub const TIOCNXCL: c_ulong = 0x70e; +pub const TIOCSCTTY: c_ulong = 0x761; + +pub const FIOCLEX: c_ulong = 1; + +// fcntl.h +pub const O_EXEC: c_int = 4; +pub const O_NORW: c_int = 0; +pub const O_RDONLY: c_int = 1; +pub const O_WRONLY: c_int = 2; +pub const O_RDWR: c_int = 3; +pub const O_ACCMODE: c_int = 3; +pub const O_LARGEFILE: c_int = 0; +pub const O_CREAT: c_int = 16; +pub const O_EXCL: c_int = 32; +pub const O_NOLINK: c_int = 64; +pub const O_NOTRANS: c_int = 128; +pub const O_NOFOLLOW: c_int = 1048576; +pub const O_DIRECTORY: c_int = 2097152; +pub const O_APPEND: c_int = 256; +pub const O_ASYNC: c_int = 512; +pub const O_FSYNC: c_int = 1024; +pub const O_SYNC: c_int = 1024; +pub const O_NOATIME: c_int = 2048; +pub const O_SHLOCK: c_int = 131072; +pub const O_EXLOCK: c_int = 262144; +pub const O_DSYNC: c_int = 1024; +pub const O_RSYNC: c_int = 1024; +pub const O_NONBLOCK: c_int = 8; +pub const O_NDELAY: c_int = 8; +pub const O_HURD: c_int = 458751; +pub const O_TRUNC: c_int = 65536; +pub const O_CLOEXEC: c_int = 4194304; +pub const O_IGNORE_CTTY: c_int = 524288; +pub const O_TMPFILE: c_int = 8388608; +pub const O_NOCTTY: c_int = 0; +pub const FREAD: c_int = 1; +pub const FWRITE: c_int = 2; +pub const FASYNC: c_int = 512; +pub const FCREAT: c_int = 16; +pub const FEXCL: c_int = 32; +pub const FTRUNC: c_int = 65536; +pub const FNOCTTY: c_int = 0; +pub const FFSYNC: c_int = 1024; +pub const FSYNC: c_int = 1024; +pub const FAPPEND: c_int = 256; +pub const FNONBLOCK: c_int = 8; +pub const FNDELAY: c_int = 8; +pub const F_DUPFD: c_int = 0; +pub const F_GETFD: c_int = 1; +pub const F_SETFD: c_int = 2; +pub const F_GETFL: c_int = 3; +pub const F_SETFL: c_int = 4; +pub const F_GETOWN: c_int = 5; +pub const F_SETOWN: c_int = 6; +pub const F_GETLK: c_int = 7; +pub const F_SETLK: c_int = 8; +pub const F_SETLKW: c_int = 9; +pub const F_GETLK64: c_int = 10; +pub const F_SETLK64: c_int = 11; +pub const F_SETLKW64: c_int = 12; +pub const F_DUPFD_CLOEXEC: c_int = 1030; +pub const FD_CLOEXEC: c_int = 1; +pub const F_RDLCK: c_int = 1; +pub const F_WRLCK: c_int = 2; +pub const F_UNLCK: c_int = 3; +pub const POSIX_FADV_NORMAL: c_int = 0; +pub const POSIX_FADV_RANDOM: c_int = 1; +pub const POSIX_FADV_SEQUENTIAL: c_int = 2; +pub const POSIX_FADV_WILLNEED: c_int = 3; +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; +pub const AT_FDCWD: c_int = -100; +pub const AT_SYMLINK_NOFOLLOW: c_int = 256; +pub const AT_REMOVEDIR: c_int = 512; +pub const AT_SYMLINK_FOLLOW: c_int = 1024; +pub const AT_NO_AUTOMOUNT: c_int = 2048; +pub const AT_EMPTY_PATH: c_int = 4096; +pub const AT_STATX_SYNC_TYPE: c_int = 24576; +pub const AT_STATX_SYNC_AS_STAT: c_int = 0; +pub const AT_STATX_FORCE_SYNC: c_int = 8192; +pub const AT_STATX_DONT_SYNC: c_int = 16384; +pub const AT_RECURSIVE: c_int = 32768; +pub const AT_EACCESS: c_int = 512; + +// sys/uio.h +pub const RWF_HIPRI: c_int = 1; +pub const RWF_DSYNC: c_int = 2; +pub const RWF_SYNC: c_int = 4; +pub const RWF_NOWAIT: c_int = 8; +pub const RWF_APPEND: c_int = 16; + +// errno.h +pub const EPERM: c_int = 1073741825; +pub const ENOENT: c_int = 1073741826; +pub const ESRCH: c_int = 1073741827; +pub const EINTR: c_int = 1073741828; +pub const EIO: c_int = 1073741829; +pub const ENXIO: c_int = 1073741830; +pub const E2BIG: c_int = 1073741831; +pub const ENOEXEC: c_int = 1073741832; +pub const EBADF: c_int = 1073741833; +pub const ECHILD: c_int = 1073741834; +pub const EDEADLK: c_int = 1073741835; +pub const ENOMEM: c_int = 1073741836; +pub const EACCES: c_int = 1073741837; +pub const EFAULT: c_int = 1073741838; +pub const ENOTBLK: c_int = 1073741839; +pub const EBUSY: c_int = 1073741840; +pub const EEXIST: c_int = 1073741841; +pub const EXDEV: c_int = 1073741842; +pub const ENODEV: c_int = 1073741843; +pub const ENOTDIR: c_int = 1073741844; +pub const EISDIR: c_int = 1073741845; +pub const EINVAL: c_int = 1073741846; +pub const EMFILE: c_int = 1073741848; +pub const ENFILE: c_int = 1073741847; +pub const ENOTTY: c_int = 1073741849; +pub const ETXTBSY: c_int = 1073741850; +pub const EFBIG: c_int = 1073741851; +pub const ENOSPC: c_int = 1073741852; +pub const ESPIPE: c_int = 1073741853; +pub const EROFS: c_int = 1073741854; +pub const EMLINK: c_int = 1073741855; +pub const EPIPE: c_int = 1073741856; +pub const EDOM: c_int = 1073741857; +pub const ERANGE: c_int = 1073741858; +pub const EAGAIN: c_int = 1073741859; +pub const EWOULDBLOCK: c_int = 1073741859; +pub const EINPROGRESS: c_int = 1073741860; +pub const EALREADY: c_int = 1073741861; +pub const ENOTSOCK: c_int = 1073741862; +pub const EMSGSIZE: c_int = 1073741864; +pub const EPROTOTYPE: c_int = 1073741865; +pub const ENOPROTOOPT: c_int = 1073741866; +pub const EPROTONOSUPPORT: c_int = 1073741867; +pub const ESOCKTNOSUPPORT: c_int = 1073741868; +pub const EOPNOTSUPP: c_int = 1073741869; +pub const EPFNOSUPPORT: c_int = 1073741870; +pub const EAFNOSUPPORT: c_int = 1073741871; +pub const EADDRINUSE: c_int = 1073741872; +pub const EADDRNOTAVAIL: c_int = 1073741873; +pub const ENETDOWN: c_int = 1073741874; +pub const ENETUNREACH: c_int = 1073741875; +pub const ENETRESET: c_int = 1073741876; +pub const ECONNABORTED: c_int = 1073741877; +pub const ECONNRESET: c_int = 1073741878; +pub const ENOBUFS: c_int = 1073741879; +pub const EISCONN: c_int = 1073741880; +pub const ENOTCONN: c_int = 1073741881; +pub const EDESTADDRREQ: c_int = 1073741863; +pub const ESHUTDOWN: c_int = 1073741882; +pub const ETOOMANYREFS: c_int = 1073741883; +pub const ETIMEDOUT: c_int = 1073741884; +pub const ECONNREFUSED: c_int = 1073741885; +pub const ELOOP: c_int = 1073741886; +pub const ENAMETOOLONG: c_int = 1073741887; +pub const EHOSTDOWN: c_int = 1073741888; +pub const EHOSTUNREACH: c_int = 1073741889; +pub const ENOTEMPTY: c_int = 1073741890; +pub const EPROCLIM: c_int = 1073741891; +pub const EUSERS: c_int = 1073741892; +pub const EDQUOT: c_int = 1073741893; +pub const ESTALE: c_int = 1073741894; +pub const EREMOTE: c_int = 1073741895; +pub const EBADRPC: c_int = 1073741896; +pub const ERPCMISMATCH: c_int = 1073741897; +pub const EPROGUNAVAIL: c_int = 1073741898; +pub const EPROGMISMATCH: c_int = 1073741899; +pub const EPROCUNAVAIL: c_int = 1073741900; +pub const ENOLCK: c_int = 1073741901; +pub const EFTYPE: c_int = 1073741903; +pub const EAUTH: c_int = 1073741904; +pub const ENEEDAUTH: c_int = 1073741905; +pub const ENOSYS: c_int = 1073741902; +pub const ELIBEXEC: c_int = 1073741907; +pub const ENOTSUP: c_int = 1073741942; +pub const EILSEQ: c_int = 1073741930; +pub const EBACKGROUND: c_int = 1073741924; +pub const EDIED: c_int = 1073741925; +pub const EGREGIOUS: c_int = 1073741927; +pub const EIEIO: c_int = 1073741928; +pub const EGRATUITOUS: c_int = 1073741929; +pub const EBADMSG: c_int = 1073741931; +pub const EIDRM: c_int = 1073741932; +pub const EMULTIHOP: c_int = 1073741933; +pub const ENODATA: c_int = 1073741934; +pub const ENOLINK: c_int = 1073741935; +pub const ENOMSG: c_int = 1073741936; +pub const ENOSR: c_int = 1073741937; +pub const ENOSTR: c_int = 1073741938; +pub const EOVERFLOW: c_int = 1073741939; +pub const EPROTO: c_int = 1073741940; +pub const ETIME: c_int = 1073741941; +pub const ECANCELED: c_int = 1073741943; +pub const EOWNERDEAD: c_int = 1073741944; +pub const ENOTRECOVERABLE: c_int = 1073741945; +pub const EMACH_SEND_IN_PROGRESS: c_int = 268435457; +pub const EMACH_SEND_INVALID_DATA: c_int = 268435458; +pub const EMACH_SEND_INVALID_DEST: c_int = 268435459; +pub const EMACH_SEND_TIMED_OUT: c_int = 268435460; +pub const EMACH_SEND_WILL_NOTIFY: c_int = 268435461; +pub const EMACH_SEND_NOTIFY_IN_PROGRESS: c_int = 268435462; +pub const EMACH_SEND_INTERRUPTED: c_int = 268435463; +pub const EMACH_SEND_MSG_TOO_SMALL: c_int = 268435464; +pub const EMACH_SEND_INVALID_REPLY: c_int = 268435465; +pub const EMACH_SEND_INVALID_RIGHT: c_int = 268435466; +pub const EMACH_SEND_INVALID_NOTIFY: c_int = 268435467; +pub const EMACH_SEND_INVALID_MEMORY: c_int = 268435468; +pub const EMACH_SEND_NO_BUFFER: c_int = 268435469; +pub const EMACH_SEND_NO_NOTIFY: c_int = 268435470; +pub const EMACH_SEND_INVALID_TYPE: c_int = 268435471; +pub const EMACH_SEND_INVALID_HEADER: c_int = 268435472; +pub const EMACH_RCV_IN_PROGRESS: c_int = 268451841; +pub const EMACH_RCV_INVALID_NAME: c_int = 268451842; +pub const EMACH_RCV_TIMED_OUT: c_int = 268451843; +pub const EMACH_RCV_TOO_LARGE: c_int = 268451844; +pub const EMACH_RCV_INTERRUPTED: c_int = 268451845; +pub const EMACH_RCV_PORT_CHANGED: c_int = 268451846; +pub const EMACH_RCV_INVALID_NOTIFY: c_int = 268451847; +pub const EMACH_RCV_INVALID_DATA: c_int = 268451848; +pub const EMACH_RCV_PORT_DIED: c_int = 268451849; +pub const EMACH_RCV_IN_SET: c_int = 268451850; +pub const EMACH_RCV_HEADER_ERROR: c_int = 268451851; +pub const EMACH_RCV_BODY_ERROR: c_int = 268451852; +pub const EKERN_INVALID_ADDRESS: c_int = 1; +pub const EKERN_PROTECTION_FAILURE: c_int = 2; +pub const EKERN_NO_SPACE: c_int = 3; +pub const EKERN_INVALID_ARGUMENT: c_int = 4; +pub const EKERN_FAILURE: c_int = 5; +pub const EKERN_RESOURCE_SHORTAGE: c_int = 6; +pub const EKERN_NOT_RECEIVER: c_int = 7; +pub const EKERN_NO_ACCESS: c_int = 8; +pub const EKERN_MEMORY_FAILURE: c_int = 9; +pub const EKERN_MEMORY_ERROR: c_int = 10; +pub const EKERN_NOT_IN_SET: c_int = 12; +pub const EKERN_NAME_EXISTS: c_int = 13; +pub const EKERN_ABORTED: c_int = 14; +pub const EKERN_INVALID_NAME: c_int = 15; +pub const EKERN_INVALID_TASK: c_int = 16; +pub const EKERN_INVALID_RIGHT: c_int = 17; +pub const EKERN_INVALID_VALUE: c_int = 18; +pub const EKERN_UREFS_OVERFLOW: c_int = 19; +pub const EKERN_INVALID_CAPABILITY: c_int = 20; +pub const EKERN_RIGHT_EXISTS: c_int = 21; +pub const EKERN_INVALID_HOST: c_int = 22; +pub const EKERN_MEMORY_PRESENT: c_int = 23; +pub const EKERN_WRITE_PROTECTION_FAILURE: c_int = 24; +pub const EKERN_TERMINATED: c_int = 26; +pub const EKERN_TIMEDOUT: c_int = 27; +pub const EKERN_INTERRUPTED: c_int = 28; +pub const EMIG_TYPE_ERROR: c_int = -300; +pub const EMIG_REPLY_MISMATCH: c_int = -301; +pub const EMIG_REMOTE_ERROR: c_int = -302; +pub const EMIG_BAD_ID: c_int = -303; +pub const EMIG_BAD_ARGUMENTS: c_int = -304; +pub const EMIG_NO_REPLY: c_int = -305; +pub const EMIG_EXCEPTION: c_int = -306; +pub const EMIG_ARRAY_TOO_LARGE: c_int = -307; +pub const EMIG_SERVER_DIED: c_int = -308; +pub const EMIG_DESTROY_REQUEST: c_int = -309; +pub const ED_IO_ERROR: c_int = 2500; +pub const ED_WOULD_BLOCK: c_int = 2501; +pub const ED_NO_SUCH_DEVICE: c_int = 2502; +pub const ED_ALREADY_OPEN: c_int = 2503; +pub const ED_DEVICE_DOWN: c_int = 2504; +pub const ED_INVALID_OPERATION: c_int = 2505; +pub const ED_INVALID_RECNUM: c_int = 2506; +pub const ED_INVALID_SIZE: c_int = 2507; +pub const ED_NO_MEMORY: c_int = 2508; +pub const ED_READ_ONLY: c_int = 2509; +pub const _HURD_ERRNOS: usize = 122; + +// sched.h +pub const SCHED_OTHER: c_int = 0; +pub const SCHED_FIFO: c_int = 1; +pub const SCHED_RR: c_int = 2; +pub const _BITS_TYPES_STRUCT_SCHED_PARAM: usize = 1; +pub const __CPU_SETSIZE: usize = 1024; +pub const CPU_SETSIZE: usize = 1024; + +// pthread.h +pub const PTHREAD_SPINLOCK_INITIALIZER: c_int = 0; +pub const PTHREAD_CANCEL_DISABLE: c_int = 0; +pub const PTHREAD_CANCEL_ENABLE: c_int = 1; +pub const PTHREAD_CANCEL_DEFERRED: c_int = 0; +pub const PTHREAD_CANCEL_ASYNCHRONOUS: c_int = 1; +pub const PTHREAD_BARRIER_SERIAL_THREAD: c_int = -1; + +// netinet/tcp.h +pub const TCP_NODELAY: c_int = 1; +pub const TCP_MAXSEG: c_int = 2; +pub const TCP_CORK: c_int = 3; +pub const TCP_KEEPIDLE: c_int = 4; +pub const TCP_KEEPINTVL: c_int = 5; +pub const TCP_KEEPCNT: c_int = 6; +pub const TCP_SYNCNT: c_int = 7; +pub const TCP_LINGER2: c_int = 8; +pub const TCP_DEFER_ACCEPT: c_int = 9; +pub const TCP_WINDOW_CLAMP: c_int = 10; +pub const TCP_INFO: c_int = 11; +pub const TCP_QUICKACK: c_int = 12; +pub const TCP_CONGESTION: c_int = 13; +pub const TCP_MD5SIG: c_int = 14; +pub const TCP_COOKIE_TRANSACTIONS: c_int = 15; +pub const TCP_THIN_LINEAR_TIMEOUTS: c_int = 16; +pub const TCP_THIN_DUPACK: c_int = 17; +pub const TCP_USER_TIMEOUT: c_int = 18; +pub const TCP_REPAIR: c_int = 19; +pub const TCP_REPAIR_QUEUE: c_int = 20; +pub const TCP_QUEUE_SEQ: c_int = 21; +pub const TCP_REPAIR_OPTIONS: c_int = 22; +pub const TCP_FASTOPEN: c_int = 23; +pub const TCP_TIMESTAMP: c_int = 24; +pub const TCP_NOTSENT_LOWAT: c_int = 25; +pub const TCP_CC_INFO: c_int = 26; +pub const TCP_SAVE_SYN: c_int = 27; +pub const TCP_SAVED_SYN: c_int = 28; +pub const TCP_REPAIR_WINDOW: c_int = 29; +pub const TCP_FASTOPEN_CONNECT: c_int = 30; +pub const TCP_ULP: c_int = 31; +pub const TCP_MD5SIG_EXT: c_int = 32; +pub const TCP_FASTOPEN_KEY: c_int = 33; +pub const TCP_FASTOPEN_NO_COOKIE: c_int = 34; +pub const TCP_ZEROCOPY_RECEIVE: c_int = 35; +pub const TCP_INQ: c_int = 36; +pub const TCP_CM_INQ: c_int = 36; +pub const TCP_TX_DELAY: c_int = 37; +pub const TCP_REPAIR_ON: c_int = 1; +pub const TCP_REPAIR_OFF: c_int = 0; +pub const TCP_REPAIR_OFF_NO_WP: c_int = -1; + +// stdint.h +pub const INT8_MIN: i8 = -128; +pub const INT16_MIN: i16 = -32768; +pub const INT32_MIN: i32 = -2147483648; +pub const INT8_MAX: i8 = 127; +pub const INT16_MAX: i16 = 32767; +pub const INT32_MAX: i32 = 2147483647; +pub const UINT8_MAX: u8 = 255; +pub const UINT16_MAX: u16 = 65535; +pub const UINT32_MAX: u32 = 4294967295; +pub const INT_LEAST8_MIN: int_least8_t = -128; +pub const INT_LEAST16_MIN: int_least16_t = -32768; +pub const INT_LEAST32_MIN: int_least32_t = -2147483648; +pub const INT_LEAST8_MAX: int_least8_t = 127; +pub const INT_LEAST16_MAX: int_least16_t = 32767; +pub const INT_LEAST32_MAX: int_least32_t = 2147483647; +pub const UINT_LEAST8_MAX: uint_least8_t = 255; +pub const UINT_LEAST16_MAX: uint_least16_t = 65535; +pub const UINT_LEAST32_MAX: uint_least32_t = 4294967295; +pub const INT_FAST8_MIN: int_fast8_t = -128; +pub const INT_FAST16_MIN: int_fast16_t = -2147483648; +pub const INT_FAST32_MIN: int_fast32_t = -2147483648; +pub const INT_FAST8_MAX: int_fast8_t = 127; +pub const INT_FAST16_MAX: int_fast16_t = 2147483647; +pub const INT_FAST32_MAX: int_fast32_t = 2147483647; +pub const UINT_FAST8_MAX: uint_fast8_t = 255; +pub const UINT_FAST16_MAX: uint_fast16_t = 4294967295; +pub const UINT_FAST32_MAX: uint_fast32_t = 4294967295; +pub const INTPTR_MIN: __intptr_t = -2147483648; +pub const INTPTR_MAX: __intptr_t = 2147483647; +pub const UINTPTR_MAX: usize = 4294967295; +pub const PTRDIFF_MIN: __ptrdiff_t = -2147483648; +pub const PTRDIFF_MAX: __ptrdiff_t = 2147483647; +pub const SIG_ATOMIC_MIN: __sig_atomic_t = -2147483648; +pub const SIG_ATOMIC_MAX: __sig_atomic_t = 2147483647; +pub const SIZE_MAX: usize = 4294967295; +pub const WINT_MIN: wint_t = 0; +pub const WINT_MAX: wint_t = 4294967295; +pub const INT8_WIDTH: usize = 8; +pub const UINT8_WIDTH: usize = 8; +pub const INT16_WIDTH: usize = 16; +pub const UINT16_WIDTH: usize = 16; +pub const INT32_WIDTH: usize = 32; +pub const UINT32_WIDTH: usize = 32; +pub const INT64_WIDTH: usize = 64; +pub const UINT64_WIDTH: usize = 64; +pub const INT_LEAST8_WIDTH: usize = 8; +pub const UINT_LEAST8_WIDTH: usize = 8; +pub const INT_LEAST16_WIDTH: usize = 16; +pub const UINT_LEAST16_WIDTH: usize = 16; +pub const INT_LEAST32_WIDTH: usize = 32; +pub const UINT_LEAST32_WIDTH: usize = 32; +pub const INT_LEAST64_WIDTH: usize = 64; +pub const UINT_LEAST64_WIDTH: usize = 64; +pub const INT_FAST8_WIDTH: usize = 8; +pub const UINT_FAST8_WIDTH: usize = 8; +pub const INT_FAST16_WIDTH: usize = 32; +pub const UINT_FAST16_WIDTH: usize = 32; +pub const INT_FAST32_WIDTH: usize = 32; +pub const UINT_FAST32_WIDTH: usize = 32; +pub const INT_FAST64_WIDTH: usize = 64; +pub const UINT_FAST64_WIDTH: usize = 64; +pub const INTPTR_WIDTH: usize = 32; +pub const UINTPTR_WIDTH: usize = 32; +pub const INTMAX_WIDTH: usize = 64; +pub const UINTMAX_WIDTH: usize = 64; +pub const PTRDIFF_WIDTH: usize = 32; +pub const SIG_ATOMIC_WIDTH: usize = 32; +pub const SIZE_WIDTH: usize = 32; +pub const WCHAR_WIDTH: usize = 32; +pub const WINT_WIDTH: usize = 32; + +pub const TH_FIN: u8 = 1; +pub const TH_SYN: u8 = 2; +pub const TH_RST: u8 = 4; +pub const TH_PUSH: u8 = 8; +pub const TH_ACK: u8 = 16; +pub const TH_URG: u8 = 32; +pub const TCPOPT_EOL: u8 = 0; +pub const TCPOPT_NOP: u8 = 1; +pub const TCPOPT_MAXSEG: u8 = 2; +pub const TCPOLEN_MAXSEG: u8 = 4; +pub const TCPOPT_WINDOW: u8 = 3; +pub const TCPOLEN_WINDOW: u8 = 3; +pub const TCPOPT_SACK_PERMITTED: u8 = 4; +pub const TCPOLEN_SACK_PERMITTED: u8 = 2; +pub const TCPOPT_SACK: u8 = 5; +pub const TCPOPT_TIMESTAMP: u8 = 8; +pub const TCPOLEN_TIMESTAMP: u8 = 10; +pub const TCPOLEN_TSTAMP_APPA: u8 = 12; +pub const TCPOPT_TSTAMP_HDR: u32 = 16844810; +pub const TCP_MSS: usize = 512; +pub const TCP_MAXWIN: usize = 65535; +pub const TCP_MAX_WINSHIFT: usize = 14; +pub const TCPI_OPT_TIMESTAMPS: u8 = 1; +pub const TCPI_OPT_SACK: u8 = 2; +pub const TCPI_OPT_WSCALE: u8 = 4; +pub const TCPI_OPT_ECN: u8 = 8; +pub const TCPI_OPT_ECN_SEEN: u8 = 16; +pub const TCPI_OPT_SYN_DATA: u8 = 32; +pub const TCP_MD5SIG_MAXKEYLEN: usize = 80; +pub const TCP_MD5SIG_FLAG_PREFIX: usize = 1; +pub const TCP_COOKIE_MIN: usize = 8; +pub const TCP_COOKIE_MAX: usize = 16; +pub const TCP_COOKIE_PAIR_SIZE: usize = 32; +pub const TCP_COOKIE_IN_ALWAYS: c_int = 1; +pub const TCP_COOKIE_OUT_NEVER: c_int = 2; +pub const TCP_S_DATA_IN: c_int = 4; +pub const TCP_S_DATA_OUT: c_int = 8; +pub const TCP_MSS_DEFAULT: usize = 536; +pub const TCP_MSS_DESIRED: usize = 1220; + +// sys/wait.h +pub const WCOREFLAG: c_int = 128; +pub const WAIT_ANY: pid_t = -1; +pub const WAIT_MYPGRP: pid_t = 0; + +// sys/file.h +pub const LOCK_SH: c_int = 1; +pub const LOCK_EX: c_int = 2; +pub const LOCK_UN: c_int = 8; +pub const LOCK_NB: c_int = 4; + +// sys/mman.h +pub const PROT_NONE: c_int = 0; +pub const PROT_READ: c_int = 4; +pub const PROT_WRITE: c_int = 2; +pub const PROT_EXEC: c_int = 1; +pub const MAP_FILE: c_int = 1; +pub const MAP_ANON: c_int = 2; +pub const MAP_ANONYMOUS: c_int = MAP_ANON; +pub const MAP_TYPE: c_int = 15; +pub const MAP_COPY: c_int = 32; +pub const MAP_SHARED: c_int = 16; +pub const MAP_PRIVATE: c_int = 0; +pub const MAP_FIXED: c_int = 256; +pub const MAP_NOEXTEND: c_int = 512; +pub const MAP_HASSEMAPHORE: c_int = 1024; +pub const MAP_INHERIT: c_int = 2048; +pub const MAP_32BIT: c_int = 4096; +pub const MAP_EXCL: c_int = 16384; +pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; +pub const MADV_NORMAL: c_int = 0; +pub const MADV_RANDOM: c_int = 1; +pub const MADV_SEQUENTIAL: c_int = 2; +pub const MADV_WILLNEED: c_int = 3; +pub const MADV_DONTNEED: c_int = 4; +pub const POSIX_MADV_NORMAL: c_int = 0; +pub const POSIX_MADV_RANDOM: c_int = 1; +pub const POSIX_MADV_SEQUENTIAL: c_int = 2; +pub const POSIX_MADV_WILLNEED: c_int = 3; +pub const POSIX_MADV_WONTNEED: c_int = 4; + +pub const MS_ASYNC: c_int = 1; +pub const MS_SYNC: c_int = 0; +pub const MS_INVALIDATE: c_int = 2; +pub const MREMAP_MAYMOVE: c_int = 1; +pub const MREMAP_FIXED: c_int = 2; +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; + +// sys/xattr.h +pub const XATTR_CREATE: c_int = 0x1; +pub const XATTR_REPLACE: c_int = 0x2; + +// spawn.h +pub const POSIX_SPAWN_USEVFORK: c_int = 64; +pub const POSIX_SPAWN_SETSID: c_int = 128; + +// sys/syslog.h +pub const LOG_CRON: c_int = 9 << 3; +pub const LOG_AUTHPRIV: c_int = 10 << 3; +pub const LOG_FTP: c_int = 11 << 3; +pub const LOG_PERROR: c_int = 0x20; + +// net/if.h +pub const IFF_UP: c_int = 0x1; +pub const IFF_BROADCAST: c_int = 0x2; +pub const IFF_DEBUG: c_int = 0x4; +pub const IFF_LOOPBACK: c_int = 0x8; +pub const IFF_POINTOPOINT: c_int = 0x10; +pub const IFF_NOTRAILERS: c_int = 0x20; +pub const IFF_RUNNING: c_int = 0x40; +pub const IFF_NOARP: c_int = 0x80; +pub const IFF_PROMISC: c_int = 0x100; +pub const IFF_ALLMULTI: c_int = 0x200; +pub const IFF_MASTER: c_int = 0x400; +pub const IFF_SLAVE: c_int = 0x800; +pub const IFF_MULTICAST: c_int = 0x1000; +pub const IFF_PORTSEL: c_int = 0x2000; +pub const IFF_AUTOMEDIA: c_int = 0x4000; +pub const IFF_DYNAMIC: c_int = 0x8000; + +// random.h +pub const GRND_NONBLOCK: c_uint = 1; +pub const GRND_RANDOM: c_uint = 2; +pub const GRND_INSECURE: c_uint = 4; + +pub const _PC_LINK_MAX: c_int = 0; +pub const _PC_MAX_CANON: c_int = 1; +pub const _PC_MAX_INPUT: c_int = 2; +pub const _PC_NAME_MAX: c_int = 3; +pub const _PC_PATH_MAX: c_int = 4; +pub const _PC_PIPE_BUF: c_int = 5; +pub const _PC_CHOWN_RESTRICTED: c_int = 6; +pub const _PC_NO_TRUNC: c_int = 7; +pub const _PC_VDISABLE: c_int = 8; +pub const _PC_SYNC_IO: c_int = 9; +pub const _PC_ASYNC_IO: c_int = 10; +pub const _PC_PRIO_IO: c_int = 11; +pub const _PC_SOCK_MAXBUF: c_int = 12; +pub const _PC_FILESIZEBITS: c_int = 13; +pub const _PC_REC_INCR_XFER_SIZE: c_int = 14; +pub const _PC_REC_MAX_XFER_SIZE: c_int = 15; +pub const _PC_REC_MIN_XFER_SIZE: c_int = 16; +pub const _PC_REC_XFER_ALIGN: c_int = 17; +pub const _PC_ALLOC_SIZE_MIN: c_int = 18; +pub const _PC_SYMLINK_MAX: c_int = 19; +pub const _PC_2_SYMLINKS: c_int = 20; +pub const _SC_ARG_MAX: c_int = 0; +pub const _SC_CHILD_MAX: c_int = 1; +pub const _SC_CLK_TCK: c_int = 2; +pub const _SC_NGROUPS_MAX: c_int = 3; +pub const _SC_OPEN_MAX: c_int = 4; +pub const _SC_STREAM_MAX: c_int = 5; +pub const _SC_TZNAME_MAX: c_int = 6; +pub const _SC_JOB_CONTROL: c_int = 7; +pub const _SC_SAVED_IDS: c_int = 8; +pub const _SC_REALTIME_SIGNALS: c_int = 9; +pub const _SC_PRIORITY_SCHEDULING: c_int = 10; +pub const _SC_TIMERS: c_int = 11; +pub const _SC_ASYNCHRONOUS_IO: c_int = 12; +pub const _SC_PRIORITIZED_IO: c_int = 13; +pub const _SC_SYNCHRONIZED_IO: c_int = 14; +pub const _SC_FSYNC: c_int = 15; +pub const _SC_MAPPED_FILES: c_int = 16; +pub const _SC_MEMLOCK: c_int = 17; +pub const _SC_MEMLOCK_RANGE: c_int = 18; +pub const _SC_MEMORY_PROTECTION: c_int = 19; +pub const _SC_MESSAGE_PASSING: c_int = 20; +pub const _SC_SEMAPHORES: c_int = 21; +pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 22; +pub const _SC_AIO_LISTIO_MAX: c_int = 23; +pub const _SC_AIO_MAX: c_int = 24; +pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 25; +pub const _SC_DELAYTIMER_MAX: c_int = 26; +pub const _SC_MQ_OPEN_MAX: c_int = 27; +pub const _SC_MQ_PRIO_MAX: c_int = 28; +pub const _SC_VERSION: c_int = 29; +pub const _SC_PAGESIZE: c_int = 30; +pub const _SC_PAGE_SIZE: c_int = 30; +pub const _SC_RTSIG_MAX: c_int = 31; +pub const _SC_SEM_NSEMS_MAX: c_int = 32; +pub const _SC_SEM_VALUE_MAX: c_int = 33; +pub const _SC_SIGQUEUE_MAX: c_int = 34; +pub const _SC_TIMER_MAX: c_int = 35; +pub const _SC_BC_BASE_MAX: c_int = 36; +pub const _SC_BC_DIM_MAX: c_int = 37; +pub const _SC_BC_SCALE_MAX: c_int = 38; +pub const _SC_BC_STRING_MAX: c_int = 39; +pub const _SC_COLL_WEIGHTS_MAX: c_int = 40; +pub const _SC_EQUIV_CLASS_MAX: c_int = 41; +pub const _SC_EXPR_NEST_MAX: c_int = 42; +pub const _SC_LINE_MAX: c_int = 43; +pub const _SC_RE_DUP_MAX: c_int = 44; +pub const _SC_CHARCLASS_NAME_MAX: c_int = 45; +pub const _SC_2_VERSION: c_int = 46; +pub const _SC_2_C_BIND: c_int = 47; +pub const _SC_2_C_DEV: c_int = 48; +pub const _SC_2_FORT_DEV: c_int = 49; +pub const _SC_2_FORT_RUN: c_int = 50; +pub const _SC_2_SW_DEV: c_int = 51; +pub const _SC_2_LOCALEDEF: c_int = 52; +pub const _SC_PII: c_int = 53; +pub const _SC_PII_XTI: c_int = 54; +pub const _SC_PII_SOCKET: c_int = 55; +pub const _SC_PII_INTERNET: c_int = 56; +pub const _SC_PII_OSI: c_int = 57; +pub const _SC_POLL: c_int = 58; +pub const _SC_SELECT: c_int = 59; +pub const _SC_UIO_MAXIOV: c_int = 60; +pub const _SC_IOV_MAX: c_int = 60; +pub const _SC_PII_INTERNET_STREAM: c_int = 61; +pub const _SC_PII_INTERNET_DGRAM: c_int = 62; +pub const _SC_PII_OSI_COTS: c_int = 63; +pub const _SC_PII_OSI_CLTS: c_int = 64; +pub const _SC_PII_OSI_M: c_int = 65; +pub const _SC_T_IOV_MAX: c_int = 66; +pub const _SC_THREADS: c_int = 67; +pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 68; +pub const _SC_GETGR_R_SIZE_MAX: c_int = 69; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 70; +pub const _SC_LOGIN_NAME_MAX: c_int = 71; +pub const _SC_TTY_NAME_MAX: c_int = 72; +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 73; +pub const _SC_THREAD_KEYS_MAX: c_int = 74; +pub const _SC_THREAD_STACK_MIN: c_int = 75; +pub const _SC_THREAD_THREADS_MAX: c_int = 76; +pub const _SC_THREAD_ATTR_STACKADDR: c_int = 77; +pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 78; +pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 79; +pub const _SC_THREAD_PRIO_INHERIT: c_int = 80; +pub const _SC_THREAD_PRIO_PROTECT: c_int = 81; +pub const _SC_THREAD_PROCESS_SHARED: c_int = 82; +pub const _SC_NPROCESSORS_CONF: c_int = 83; +pub const _SC_NPROCESSORS_ONLN: c_int = 84; +pub const _SC_PHYS_PAGES: c_int = 85; +pub const _SC_AVPHYS_PAGES: c_int = 86; +pub const _SC_ATEXIT_MAX: c_int = 87; +pub const _SC_PASS_MAX: c_int = 88; +pub const _SC_XOPEN_VERSION: c_int = 89; +pub const _SC_XOPEN_XCU_VERSION: c_int = 90; +pub const _SC_XOPEN_UNIX: c_int = 91; +pub const _SC_XOPEN_CRYPT: c_int = 92; +pub const _SC_XOPEN_ENH_I18N: c_int = 93; +pub const _SC_XOPEN_SHM: c_int = 94; +pub const _SC_2_CHAR_TERM: c_int = 95; +pub const _SC_2_C_VERSION: c_int = 96; +pub const _SC_2_UPE: c_int = 97; +pub const _SC_XOPEN_XPG2: c_int = 98; +pub const _SC_XOPEN_XPG3: c_int = 99; +pub const _SC_XOPEN_XPG4: c_int = 100; +pub const _SC_CHAR_BIT: c_int = 101; +pub const _SC_CHAR_MAX: c_int = 102; +pub const _SC_CHAR_MIN: c_int = 103; +pub const _SC_INT_MAX: c_int = 104; +pub const _SC_INT_MIN: c_int = 105; +pub const _SC_LONG_BIT: c_int = 106; +pub const _SC_WORD_BIT: c_int = 107; +pub const _SC_MB_LEN_MAX: c_int = 108; +pub const _SC_NZERO: c_int = 109; +pub const _SC_SSIZE_MAX: c_int = 110; +pub const _SC_SCHAR_MAX: c_int = 111; +pub const _SC_SCHAR_MIN: c_int = 112; +pub const _SC_SHRT_MAX: c_int = 113; +pub const _SC_SHRT_MIN: c_int = 114; +pub const _SC_UCHAR_MAX: c_int = 115; +pub const _SC_UINT_MAX: c_int = 116; +pub const _SC_ULONG_MAX: c_int = 117; +pub const _SC_USHRT_MAX: c_int = 118; +pub const _SC_NL_ARGMAX: c_int = 119; +pub const _SC_NL_LANGMAX: c_int = 120; +pub const _SC_NL_MSGMAX: c_int = 121; +pub const _SC_NL_NMAX: c_int = 122; +pub const _SC_NL_SETMAX: c_int = 123; +pub const _SC_NL_TEXTMAX: c_int = 124; +pub const _SC_XBS5_ILP32_OFF32: c_int = 125; +pub const _SC_XBS5_ILP32_OFFBIG: c_int = 126; +pub const _SC_XBS5_LP64_OFF64: c_int = 127; +pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 128; +pub const _SC_XOPEN_LEGACY: c_int = 129; +pub const _SC_XOPEN_REALTIME: c_int = 130; +pub const _SC_XOPEN_REALTIME_THREADS: c_int = 131; +pub const _SC_ADVISORY_INFO: c_int = 132; +pub const _SC_BARRIERS: c_int = 133; +pub const _SC_BASE: c_int = 134; +pub const _SC_C_LANG_SUPPORT: c_int = 135; +pub const _SC_C_LANG_SUPPORT_R: c_int = 136; +pub const _SC_CLOCK_SELECTION: c_int = 137; +pub const _SC_CPUTIME: c_int = 138; +pub const _SC_THREAD_CPUTIME: c_int = 139; +pub const _SC_DEVICE_IO: c_int = 140; +pub const _SC_DEVICE_SPECIFIC: c_int = 141; +pub const _SC_DEVICE_SPECIFIC_R: c_int = 142; +pub const _SC_FD_MGMT: c_int = 143; +pub const _SC_FIFO: c_int = 144; +pub const _SC_PIPE: c_int = 145; +pub const _SC_FILE_ATTRIBUTES: c_int = 146; +pub const _SC_FILE_LOCKING: c_int = 147; +pub const _SC_FILE_SYSTEM: c_int = 148; +pub const _SC_MONOTONIC_CLOCK: c_int = 149; +pub const _SC_MULTI_PROCESS: c_int = 150; +pub const _SC_SINGLE_PROCESS: c_int = 151; +pub const _SC_NETWORKING: c_int = 152; +pub const _SC_READER_WRITER_LOCKS: c_int = 153; +pub const _SC_SPIN_LOCKS: c_int = 154; +pub const _SC_REGEXP: c_int = 155; +pub const _SC_REGEX_VERSION: c_int = 156; +pub const _SC_SHELL: c_int = 157; +pub const _SC_SIGNALS: c_int = 158; +pub const _SC_SPAWN: c_int = 159; +pub const _SC_SPORADIC_SERVER: c_int = 160; +pub const _SC_THREAD_SPORADIC_SERVER: c_int = 161; +pub const _SC_SYSTEM_DATABASE: c_int = 162; +pub const _SC_SYSTEM_DATABASE_R: c_int = 163; +pub const _SC_TIMEOUTS: c_int = 164; +pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 165; +pub const _SC_USER_GROUPS: c_int = 166; +pub const _SC_USER_GROUPS_R: c_int = 167; +pub const _SC_2_PBS: c_int = 168; +pub const _SC_2_PBS_ACCOUNTING: c_int = 169; +pub const _SC_2_PBS_LOCATE: c_int = 170; +pub const _SC_2_PBS_MESSAGE: c_int = 171; +pub const _SC_2_PBS_TRACK: c_int = 172; +pub const _SC_SYMLOOP_MAX: c_int = 173; +pub const _SC_STREAMS: c_int = 174; +pub const _SC_2_PBS_CHECKPOINT: c_int = 175; +pub const _SC_V6_ILP32_OFF32: c_int = 176; +pub const _SC_V6_ILP32_OFFBIG: c_int = 177; +pub const _SC_V6_LP64_OFF64: c_int = 178; +pub const _SC_V6_LPBIG_OFFBIG: c_int = 179; +pub const _SC_HOST_NAME_MAX: c_int = 180; +pub const _SC_TRACE: c_int = 181; +pub const _SC_TRACE_EVENT_FILTER: c_int = 182; +pub const _SC_TRACE_INHERIT: c_int = 183; +pub const _SC_TRACE_LOG: c_int = 184; +pub const _SC_LEVEL1_ICACHE_SIZE: c_int = 185; +pub const _SC_LEVEL1_ICACHE_ASSOC: c_int = 186; +pub const _SC_LEVEL1_ICACHE_LINESIZE: c_int = 187; +pub const _SC_LEVEL1_DCACHE_SIZE: c_int = 188; +pub const _SC_LEVEL1_DCACHE_ASSOC: c_int = 189; +pub const _SC_LEVEL1_DCACHE_LINESIZE: c_int = 190; +pub const _SC_LEVEL2_CACHE_SIZE: c_int = 191; +pub const _SC_LEVEL2_CACHE_ASSOC: c_int = 192; +pub const _SC_LEVEL2_CACHE_LINESIZE: c_int = 193; +pub const _SC_LEVEL3_CACHE_SIZE: c_int = 194; +pub const _SC_LEVEL3_CACHE_ASSOC: c_int = 195; +pub const _SC_LEVEL3_CACHE_LINESIZE: c_int = 196; +pub const _SC_LEVEL4_CACHE_SIZE: c_int = 197; +pub const _SC_LEVEL4_CACHE_ASSOC: c_int = 198; +pub const _SC_LEVEL4_CACHE_LINESIZE: c_int = 199; +pub const _SC_IPV6: c_int = 235; +pub const _SC_RAW_SOCKETS: c_int = 236; +pub const _SC_V7_ILP32_OFF32: c_int = 237; +pub const _SC_V7_ILP32_OFFBIG: c_int = 238; +pub const _SC_V7_LP64_OFF64: c_int = 239; +pub const _SC_V7_LPBIG_OFFBIG: c_int = 240; +pub const _SC_SS_REPL_MAX: c_int = 241; +pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 242; +pub const _SC_TRACE_NAME_MAX: c_int = 243; +pub const _SC_TRACE_SYS_MAX: c_int = 244; +pub const _SC_TRACE_USER_EVENT_MAX: c_int = 245; +pub const _SC_XOPEN_STREAMS: c_int = 246; +pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 247; +pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 248; +pub const _SC_MINSIGSTKSZ: c_int = 249; +pub const _SC_SIGSTKSZ: c_int = 250; + +pub const _CS_PATH: c_int = 0; +pub const _CS_V6_WIDTH_RESTRICTED_ENVS: c_int = 1; +pub const _CS_GNU_LIBC_VERSION: c_int = 2; +pub const _CS_GNU_LIBPTHREAD_VERSION: c_int = 3; +pub const _CS_V5_WIDTH_RESTRICTED_ENVS: c_int = 4; +pub const _CS_V7_WIDTH_RESTRICTED_ENVS: c_int = 5; +pub const _CS_LFS_CFLAGS: c_int = 1000; +pub const _CS_LFS_LDFLAGS: c_int = 1001; +pub const _CS_LFS_LIBS: c_int = 1002; +pub const _CS_LFS_LINTFLAGS: c_int = 1003; +pub const _CS_LFS64_CFLAGS: c_int = 1004; +pub const _CS_LFS64_LDFLAGS: c_int = 1005; +pub const _CS_LFS64_LIBS: c_int = 1006; +pub const _CS_LFS64_LINTFLAGS: c_int = 1007; +pub const _CS_XBS5_ILP32_OFF32_CFLAGS: c_int = 1100; +pub const _CS_XBS5_ILP32_OFF32_LDFLAGS: c_int = 1101; +pub const _CS_XBS5_ILP32_OFF32_LIBS: c_int = 1102; +pub const _CS_XBS5_ILP32_OFF32_LINTFLAGS: c_int = 1103; +pub const _CS_XBS5_ILP32_OFFBIG_CFLAGS: c_int = 1104; +pub const _CS_XBS5_ILP32_OFFBIG_LDFLAGS: c_int = 1105; +pub const _CS_XBS5_ILP32_OFFBIG_LIBS: c_int = 1106; +pub const _CS_XBS5_ILP32_OFFBIG_LINTFLAGS: c_int = 1107; +pub const _CS_XBS5_LP64_OFF64_CFLAGS: c_int = 1108; +pub const _CS_XBS5_LP64_OFF64_LDFLAGS: c_int = 1109; +pub const _CS_XBS5_LP64_OFF64_LIBS: c_int = 1110; +pub const _CS_XBS5_LP64_OFF64_LINTFLAGS: c_int = 1111; +pub const _CS_XBS5_LPBIG_OFFBIG_CFLAGS: c_int = 1112; +pub const _CS_XBS5_LPBIG_OFFBIG_LDFLAGS: c_int = 1113; +pub const _CS_XBS5_LPBIG_OFFBIG_LIBS: c_int = 1114; +pub const _CS_XBS5_LPBIG_OFFBIG_LINTFLAGS: c_int = 1115; +pub const _CS_POSIX_V6_ILP32_OFF32_CFLAGS: c_int = 1116; +pub const _CS_POSIX_V6_ILP32_OFF32_LDFLAGS: c_int = 1117; +pub const _CS_POSIX_V6_ILP32_OFF32_LIBS: c_int = 1118; +pub const _CS_POSIX_V6_ILP32_OFF32_LINTFLAGS: c_int = 1119; +pub const _CS_POSIX_V6_ILP32_OFFBIG_CFLAGS: c_int = 1120; +pub const _CS_POSIX_V6_ILP32_OFFBIG_LDFLAGS: c_int = 1121; +pub const _CS_POSIX_V6_ILP32_OFFBIG_LIBS: c_int = 1122; +pub const _CS_POSIX_V6_ILP32_OFFBIG_LINTFLAGS: c_int = 1123; +pub const _CS_POSIX_V6_LP64_OFF64_CFLAGS: c_int = 1124; +pub const _CS_POSIX_V6_LP64_OFF64_LDFLAGS: c_int = 1125; +pub const _CS_POSIX_V6_LP64_OFF64_LIBS: c_int = 1126; +pub const _CS_POSIX_V6_LP64_OFF64_LINTFLAGS: c_int = 1127; +pub const _CS_POSIX_V6_LPBIG_OFFBIG_CFLAGS: c_int = 1128; +pub const _CS_POSIX_V6_LPBIG_OFFBIG_LDFLAGS: c_int = 1129; +pub const _CS_POSIX_V6_LPBIG_OFFBIG_LIBS: c_int = 1130; +pub const _CS_POSIX_V6_LPBIG_OFFBIG_LINTFLAGS: c_int = 1131; +pub const _CS_POSIX_V7_ILP32_OFF32_CFLAGS: c_int = 1132; +pub const _CS_POSIX_V7_ILP32_OFF32_LDFLAGS: c_int = 1133; +pub const _CS_POSIX_V7_ILP32_OFF32_LIBS: c_int = 1134; +pub const _CS_POSIX_V7_ILP32_OFF32_LINTFLAGS: c_int = 1135; +pub const _CS_POSIX_V7_ILP32_OFFBIG_CFLAGS: c_int = 1136; +pub const _CS_POSIX_V7_ILP32_OFFBIG_LDFLAGS: c_int = 1137; +pub const _CS_POSIX_V7_ILP32_OFFBIG_LIBS: c_int = 1138; +pub const _CS_POSIX_V7_ILP32_OFFBIG_LINTFLAGS: c_int = 1139; +pub const _CS_POSIX_V7_LP64_OFF64_CFLAGS: c_int = 1140; +pub const _CS_POSIX_V7_LP64_OFF64_LDFLAGS: c_int = 1141; +pub const _CS_POSIX_V7_LP64_OFF64_LIBS: c_int = 1142; +pub const _CS_POSIX_V7_LP64_OFF64_LINTFLAGS: c_int = 1143; +pub const _CS_POSIX_V7_LPBIG_OFFBIG_CFLAGS: c_int = 1144; +pub const _CS_POSIX_V7_LPBIG_OFFBIG_LDFLAGS: c_int = 1145; +pub const _CS_POSIX_V7_LPBIG_OFFBIG_LIBS: c_int = 1146; +pub const _CS_POSIX_V7_LPBIG_OFFBIG_LINTFLAGS: c_int = 1147; +pub const _CS_V6_ENV: c_int = 1148; +pub const _CS_V7_ENV: c_int = 1149; + +pub const PTHREAD_PROCESS_PRIVATE: __pthread_process_shared = 0; +pub const PTHREAD_PROCESS_SHARED: __pthread_process_shared = 1; + +pub const PTHREAD_EXPLICIT_SCHED: __pthread_inheritsched = 0; +pub const PTHREAD_INHERIT_SCHED: __pthread_inheritsched = 1; + +pub const PTHREAD_SCOPE_SYSTEM: __pthread_contentionscope = 0; +pub const PTHREAD_SCOPE_PROCESS: __pthread_contentionscope = 1; + +pub const PTHREAD_CREATE_JOINABLE: __pthread_detachstate = 0; +pub const PTHREAD_CREATE_DETACHED: __pthread_detachstate = 1; + +pub const PTHREAD_PRIO_NONE: __pthread_mutex_protocol = 0; +pub const PTHREAD_PRIO_INHERIT: __pthread_mutex_protocol = 1; +pub const PTHREAD_PRIO_PROTECT: __pthread_mutex_protocol = 2; + +pub const PTHREAD_MUTEX_TIMED: __pthread_mutex_type = 0; +pub const PTHREAD_MUTEX_ERRORCHECK: __pthread_mutex_type = 1; +pub const PTHREAD_MUTEX_RECURSIVE: __pthread_mutex_type = 2; + +pub const PTHREAD_MUTEX_STALLED: __pthread_mutex_robustness = 0; +pub const PTHREAD_MUTEX_ROBUST: __pthread_mutex_robustness = 256; + +pub const RLIMIT_CPU: crate::__rlimit_resource_t = 0; +pub const RLIMIT_FSIZE: crate::__rlimit_resource_t = 1; +pub const RLIMIT_DATA: crate::__rlimit_resource_t = 2; +pub const RLIMIT_STACK: crate::__rlimit_resource_t = 3; +pub const RLIMIT_CORE: crate::__rlimit_resource_t = 4; +pub const RLIMIT_RSS: crate::__rlimit_resource_t = 5; +pub const RLIMIT_MEMLOCK: crate::__rlimit_resource_t = 6; +pub const RLIMIT_NPROC: crate::__rlimit_resource_t = 7; +pub const RLIMIT_OFILE: crate::__rlimit_resource_t = 8; +pub const RLIMIT_NOFILE: crate::__rlimit_resource_t = 8; +pub const RLIMIT_SBSIZE: crate::__rlimit_resource_t = 9; +pub const RLIMIT_AS: crate::__rlimit_resource_t = 10; +pub const RLIMIT_VMEM: crate::__rlimit_resource_t = 10; +pub const RLIMIT_NLIMITS: crate::__rlimit_resource_t = 11; +pub const RLIM_NLIMITS: crate::__rlimit_resource_t = 11; + +pub const RUSAGE_SELF: __rusage_who = 0; +pub const RUSAGE_CHILDREN: __rusage_who = -1; + +pub const PRIO_PROCESS: __priority_which = 0; +pub const PRIO_PGRP: __priority_which = 1; +pub const PRIO_USER: __priority_which = 2; + +pub const __UT_LINESIZE: usize = 32; +pub const __UT_NAMESIZE: usize = 32; +pub const __UT_HOSTSIZE: usize = 256; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SOCK_RAW: c_int = 3; +pub const SOCK_RDM: c_int = 4; +pub const SOCK_SEQPACKET: c_int = 5; +pub const SOCK_CLOEXEC: c_int = 4194304; +pub const SOCK_NONBLOCK: c_int = 2048; + +pub const MSG_OOB: c_int = 1; +pub const MSG_PEEK: c_int = 2; +pub const MSG_DONTROUTE: c_int = 4; +pub const MSG_EOR: c_int = 8; +pub const MSG_TRUNC: c_int = 16; +pub const MSG_CTRUNC: c_int = 32; +pub const MSG_WAITALL: c_int = 64; +pub const MSG_DONTWAIT: c_int = 128; +pub const MSG_NOSIGNAL: c_int = 1024; +pub const MSG_CMSG_CLOEXEC: c_int = 0x40000000; + +pub const SCM_RIGHTS: c_int = 1; +pub const SCM_TIMESTAMP: c_int = 2; +pub const SCM_CREDS: c_int = 3; + +pub const SO_DEBUG: c_int = 1; +pub const SO_ACCEPTCONN: c_int = 2; +pub const SO_REUSEADDR: c_int = 4; +pub const SO_KEEPALIVE: c_int = 8; +pub const SO_DONTROUTE: c_int = 16; +pub const SO_BROADCAST: c_int = 32; +pub const SO_USELOOPBACK: c_int = 64; +pub const SO_LINGER: c_int = 128; +pub const SO_OOBINLINE: c_int = 256; +pub const SO_REUSEPORT: c_int = 512; +pub const SO_SNDBUF: c_int = 4097; +pub const SO_RCVBUF: c_int = 4098; +pub const SO_SNDLOWAT: c_int = 4099; +pub const SO_RCVLOWAT: c_int = 4100; +pub const SO_SNDTIMEO: c_int = 4101; +pub const SO_RCVTIMEO: c_int = 4102; +pub const SO_ERROR: c_int = 4103; +pub const SO_STYLE: c_int = 4104; +pub const SO_TYPE: c_int = 4104; + +pub const IPPROTO_IP: c_int = 0; +pub const IPPROTO_ICMP: c_int = 1; +pub const IPPROTO_IGMP: c_int = 2; +pub const IPPROTO_IPIP: c_int = 4; +pub const IPPROTO_TCP: c_int = 6; +pub const IPPROTO_EGP: c_int = 8; +pub const IPPROTO_PUP: c_int = 12; +pub const IPPROTO_UDP: c_int = 17; +pub const IPPROTO_IDP: c_int = 22; +pub const IPPROTO_TP: c_int = 29; +pub const IPPROTO_DCCP: c_int = 33; +pub const IPPROTO_IPV6: c_int = 41; +pub const IPPROTO_RSVP: c_int = 46; +pub const IPPROTO_GRE: c_int = 47; +pub const IPPROTO_ESP: c_int = 50; +pub const IPPROTO_AH: c_int = 51; +pub const IPPROTO_MTP: c_int = 92; +pub const IPPROTO_BEETPH: c_int = 94; +pub const IPPROTO_ENCAP: c_int = 98; +pub const IPPROTO_PIM: c_int = 103; +pub const IPPROTO_COMP: c_int = 108; +pub const IPPROTO_L2TP: c_int = 115; +pub const IPPROTO_SCTP: c_int = 132; +pub const IPPROTO_UDPLITE: c_int = 136; +pub const IPPROTO_MPLS: c_int = 137; +pub const IPPROTO_ETHERNET: c_int = 143; +pub const IPPROTO_RAW: c_int = 255; +pub const IPPROTO_MPTCP: c_int = 262; +pub const IPPROTO_MAX: c_int = 263; + +pub const IPPROTO_HOPOPTS: c_int = 0; +pub const IPPROTO_ROUTING: c_int = 43; +pub const IPPROTO_FRAGMENT: c_int = 44; +pub const IPPROTO_ICMPV6: c_int = 58; +pub const IPPROTO_NONE: c_int = 59; +pub const IPPROTO_DSTOPTS: c_int = 60; +pub const IPPROTO_MH: c_int = 135; + +pub const IPPORT_ECHO: in_port_t = 7; +pub const IPPORT_DISCARD: in_port_t = 9; +pub const IPPORT_SYSTAT: in_port_t = 11; +pub const IPPORT_DAYTIME: in_port_t = 13; +pub const IPPORT_NETSTAT: in_port_t = 15; +pub const IPPORT_FTP: in_port_t = 21; +pub const IPPORT_TELNET: in_port_t = 23; +pub const IPPORT_SMTP: in_port_t = 25; +pub const IPPORT_TIMESERVER: in_port_t = 37; +pub const IPPORT_NAMESERVER: in_port_t = 42; +pub const IPPORT_WHOIS: in_port_t = 43; +pub const IPPORT_MTP: in_port_t = 57; +pub const IPPORT_TFTP: in_port_t = 69; +pub const IPPORT_RJE: in_port_t = 77; +pub const IPPORT_FINGER: in_port_t = 79; +pub const IPPORT_TTYLINK: in_port_t = 87; +pub const IPPORT_SUPDUP: in_port_t = 95; +pub const IPPORT_EXECSERVER: in_port_t = 512; +pub const IPPORT_LOGINSERVER: in_port_t = 513; +pub const IPPORT_CMDSERVER: in_port_t = 514; +pub const IPPORT_EFSSERVER: in_port_t = 520; +pub const IPPORT_BIFFUDP: in_port_t = 512; +pub const IPPORT_WHOSERVER: in_port_t = 513; +pub const IPPORT_ROUTESERVER: in_port_t = 520; +pub const IPPORT_USERRESERVED: in_port_t = 5000; + +pub const DT_UNKNOWN: c_uchar = 0; +pub const DT_FIFO: c_uchar = 1; +pub const DT_CHR: c_uchar = 2; +pub const DT_DIR: c_uchar = 4; +pub const DT_BLK: c_uchar = 6; +pub const DT_REG: c_uchar = 8; +pub const DT_LNK: c_uchar = 10; +pub const DT_SOCK: c_uchar = 12; +pub const DT_WHT: c_uchar = 14; + +pub const ST_RDONLY: c_ulong = 1; +pub const ST_NOSUID: c_ulong = 2; +pub const ST_NOEXEC: c_ulong = 8; +pub const ST_SYNCHRONOUS: c_ulong = 16; +pub const ST_NOATIME: c_ulong = 32; +pub const ST_RELATIME: c_ulong = 64; + +pub const RTLD_DI_LMID: c_int = 1; +pub const RTLD_DI_LINKMAP: c_int = 2; +pub const RTLD_DI_CONFIGADDR: c_int = 3; +pub const RTLD_DI_SERINFO: c_int = 4; +pub const RTLD_DI_SERINFOSIZE: c_int = 5; +pub const RTLD_DI_ORIGIN: c_int = 6; +pub const RTLD_DI_PROFILENAME: c_int = 7; +pub const RTLD_DI_PROFILEOUT: c_int = 8; +pub const RTLD_DI_TLS_MODID: c_int = 9; +pub const RTLD_DI_TLS_DATA: c_int = 10; +pub const RTLD_DI_PHDR: c_int = 11; +pub const RTLD_DI_MAX: c_int = 11; + +pub const SI_ASYNCIO: c_int = -4; +pub const SI_MESGQ: c_int = -3; +pub const SI_TIMER: c_int = -2; +pub const SI_QUEUE: c_int = -1; +pub const SI_USER: c_int = 0; + +pub const ILL_ILLOPC: c_int = 1; +pub const ILL_ILLOPN: c_int = 2; +pub const ILL_ILLADR: c_int = 3; +pub const ILL_ILLTRP: c_int = 4; +pub const ILL_PRVOPC: c_int = 5; +pub const ILL_PRVREG: c_int = 6; +pub const ILL_COPROC: c_int = 7; +pub const ILL_BADSTK: c_int = 8; + +pub const FPE_INTDIV: c_int = 1; +pub const FPE_INTOVF: c_int = 2; +pub const FPE_FLTDIV: c_int = 3; +pub const FPE_FLTOVF: c_int = 4; +pub const FPE_FLTUND: c_int = 5; +pub const FPE_FLTRES: c_int = 6; +pub const FPE_FLTINV: c_int = 7; +pub const FPE_FLTSUB: c_int = 8; + +pub const SEGV_MAPERR: c_int = 1; +pub const SEGV_ACCERR: c_int = 2; + +pub const BUS_ADRALN: c_int = 1; +pub const BUS_ADRERR: c_int = 2; +pub const BUS_OBJERR: c_int = 3; + +pub const TRAP_BRKPT: c_int = 1; +pub const TRAP_TRACE: c_int = 2; + +pub const CLD_EXITED: c_int = 1; +pub const CLD_KILLED: c_int = 2; +pub const CLD_DUMPED: c_int = 3; +pub const CLD_TRAPPED: c_int = 4; +pub const CLD_STOPPED: c_int = 5; +pub const CLD_CONTINUED: c_int = 6; + +pub const POLL_IN: c_int = 1; +pub const POLL_OUT: c_int = 2; +pub const POLL_MSG: c_int = 3; +pub const POLL_ERR: c_int = 4; +pub const POLL_PRI: c_int = 5; +pub const POLL_HUP: c_int = 6; + +pub const SIGEV_SIGNAL: c_int = 0; +pub const SIGEV_NONE: c_int = 1; +pub const SIGEV_THREAD: c_int = 2; + +pub const REG_GS: c_uint = 0; +pub const REG_FS: c_uint = 1; +pub const REG_ES: c_uint = 2; +pub const REG_DS: c_uint = 3; +pub const REG_EDI: c_uint = 4; +pub const REG_ESI: c_uint = 5; +pub const REG_EBP: c_uint = 6; +pub const REG_ESP: c_uint = 7; +pub const REG_EBX: c_uint = 8; +pub const REG_EDX: c_uint = 9; +pub const REG_ECX: c_uint = 10; +pub const REG_EAX: c_uint = 11; +pub const REG_TRAPNO: c_uint = 12; +pub const REG_ERR: c_uint = 13; +pub const REG_EIP: c_uint = 14; +pub const REG_CS: c_uint = 15; +pub const REG_EFL: c_uint = 16; +pub const REG_UESP: c_uint = 17; +pub const REG_SS: c_uint = 18; + +pub const IOC_VOID: __ioctl_dir = 0; +pub const IOC_OUT: __ioctl_dir = 1; +pub const IOC_IN: __ioctl_dir = 2; +pub const IOC_INOUT: __ioctl_dir = 3; + +pub const IOC_8: __ioctl_datum = 0; +pub const IOC_16: __ioctl_datum = 1; +pub const IOC_32: __ioctl_datum = 2; +pub const IOC_64: __ioctl_datum = 3; + +pub const TCP_ESTABLISHED: c_uint = 1; +pub const TCP_SYN_SENT: c_uint = 2; +pub const TCP_SYN_RECV: c_uint = 3; +pub const TCP_FIN_WAIT1: c_uint = 4; +pub const TCP_FIN_WAIT2: c_uint = 5; +pub const TCP_TIME_WAIT: c_uint = 6; +pub const TCP_CLOSE: c_uint = 7; +pub const TCP_CLOSE_WAIT: c_uint = 8; +pub const TCP_LAST_ACK: c_uint = 9; +pub const TCP_LISTEN: c_uint = 10; +pub const TCP_CLOSING: c_uint = 11; + +pub const TCP_CA_Open: tcp_ca_state = 0; +pub const TCP_CA_Disorder: tcp_ca_state = 1; +pub const TCP_CA_CWR: tcp_ca_state = 2; +pub const TCP_CA_Recovery: tcp_ca_state = 3; +pub const TCP_CA_Loss: tcp_ca_state = 4; + +pub const TCP_NO_QUEUE: c_uint = 0; +pub const TCP_RECV_QUEUE: c_uint = 1; +pub const TCP_SEND_QUEUE: c_uint = 2; +pub const TCP_QUEUES_NR: c_uint = 3; + +pub const P_ALL: idtype_t = 0; +pub const P_PID: idtype_t = 1; +pub const P_PGID: idtype_t = 2; + +pub const SS_ONSTACK: c_int = 1; +pub const SS_DISABLE: c_int = 4; + +pub const SHUT_RD: c_int = 0; +pub const SHUT_WR: c_int = 1; +pub const SHUT_RDWR: c_int = 2; +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + __lock: 0, + __owner_id: 0, + __cnt: 0, + __shpid: 0, + __type: PTHREAD_MUTEX_TIMED as c_int, + __flags: 0, + __reserved1: 0, + __reserved2: 0, +}; +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + __lock: __PTHREAD_SPIN_LOCK_INITIALIZER, + __queue: 0i64 as *mut __pthread, + __attr: 0i64 as *mut __pthread_condattr, + __wrefs: 0, + __data: 0i64 as *mut c_void, +}; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + __held: __PTHREAD_SPIN_LOCK_INITIALIZER, + __lock: __PTHREAD_SPIN_LOCK_INITIALIZER, + __readers: 0, + __readerqueue: 0i64 as *mut __pthread, + __writerqueue: 0i64 as *mut __pthread, + __attr: 0i64 as *mut __pthread_rwlockattr, + __data: 0i64 as *mut c_void, +}; +pub const PTHREAD_STACK_MIN: size_t = 0; + +// Non-public helper constants +const _UTSNAME_LENGTH: usize = 1024; + +const fn CMSG_ALIGN(len: usize) -> usize { + (len + size_of::() - 1) & !(size_of::() - 1) +} + +// functions +f! { + pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr { + if (*mhdr).msg_controllen as usize >= size_of::() { + (*mhdr).msg_control.cast::() + } else { + core::ptr::null_mut::() + } + } + + pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { + (cmsg as *mut c_uchar).offset(CMSG_ALIGN(size_of::()) as isize) + } + + pub const fn CMSG_SPACE(length: c_uint) -> c_uint { + (CMSG_ALIGN(length as usize) + CMSG_ALIGN(size_of::())) as c_uint + } + + pub const fn CMSG_LEN(length: c_uint) -> c_uint { + CMSG_ALIGN(size_of::()) as c_uint + length + } + + pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + if ((*cmsg).cmsg_len as usize) < size_of::() { + return core::ptr::null_mut::(); + } + let next = (cmsg as usize + CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr; + let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; + if (next.offset(1)) as usize > max + || next as usize + CMSG_ALIGN((*next).cmsg_len as usize) > max + { + core::ptr::null_mut::() + } else { + next.cast::() + } + } + + pub fn CPU_ALLOC_SIZE(count: c_int) -> size_t { + let _dummy: cpu_set_t = mem::zeroed(); + let size_in_bits = 8 * size_of_val(&_dummy.bits[0]); + ((count as size_t + size_in_bits - 1) / 8) as size_t + } + + pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { + for slot in cpuset.bits.iter_mut() { + *slot = 0; + } + } + + pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + cpuset.bits[idx] |= 1 << offset; + } + + pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + cpuset.bits[idx] &= !(1 << offset); + } + + pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { + let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + 0 != (cpuset.bits[idx] & (1 << offset)) + } + + pub fn CPU_COUNT_S(size: usize, cpuset: &cpu_set_t) -> c_int { + let mut s: u32 = 0; + let size_of_mask = size_of_val(&cpuset.bits[0]); + for i in cpuset.bits[..(size / size_of_mask)].iter() { + s += i.count_ones(); + } + s as c_int + } + + pub fn CPU_COUNT(cpuset: &cpu_set_t) -> c_int { + CPU_COUNT_S(size_of::(), cpuset) + } + + pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { + set1.bits == set2.bits + } + + pub fn IPTOS_TOS(tos: u8) -> u8 { + tos & IPTOS_TOS_MASK + } + + pub fn IPTOS_PREC(tos: u8) -> u8 { + tos & IPTOS_PREC_MASK + } + + pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] &= !(1 << (fd % size)); + return; + } + + pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0; + } + + pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] |= 1 << (fd % size); + return; + } + + pub fn FD_ZERO(set: *mut fd_set) -> () { + for slot in (*set).fds_bits.iter_mut() { + *slot = 0; + } + } +} + +extern "C" { + pub fn lutimes(file: *const c_char, times: *const crate::timeval) -> c_int; + + pub fn futimes(fd: c_int, times: *const crate::timeval) -> c_int; + pub fn futimens(__fd: c_int, __times: *const crate::timespec) -> c_int; + + pub fn utimensat( + dirfd: c_int, + path: *const c_char, + times: *const crate::timespec, + flag: c_int, + ) -> c_int; + + pub fn mkfifoat(__fd: c_int, __path: *const c_char, __mode: __mode_t) -> c_int; + + pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; + + pub fn __libc_current_sigrtmin() -> c_int; + + pub fn __libc_current_sigrtmax() -> c_int; + + pub fn wait4( + pid: crate::pid_t, + status: *mut c_int, + options: c_int, + rusage: *mut crate::rusage, + ) -> crate::pid_t; + + pub fn waitid( + idtype: idtype_t, + id: id_t, + infop: *mut crate::siginfo_t, + options: c_int, + ) -> c_int; + + pub fn sigwait(__set: *const sigset_t, __sig: *mut c_int) -> c_int; + + pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; + pub fn sigtimedwait( + set: *const sigset_t, + info: *mut siginfo_t, + timeout: *const crate::timespec, + ) -> c_int; + pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; + + pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; + + pub fn ioctl(__fd: c_int, __request: c_ulong, ...) -> c_int; + + pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; + + pub fn dup3(oldfd: c_int, newfd: c_int, flags: c_int) -> c_int; + + pub fn pread64(fd: c_int, buf: *mut c_void, count: size_t, offset: off64_t) -> ssize_t; + pub fn pwrite64(fd: c_int, buf: *const c_void, count: size_t, offset: off64_t) -> ssize_t; + + pub fn readv(__fd: c_int, __iovec: *const crate::iovec, __count: c_int) -> ssize_t; + pub fn writev(__fd: c_int, __iovec: *const crate::iovec, __count: c_int) -> ssize_t; + + pub fn preadv( + __fd: c_int, + __iovec: *const crate::iovec, + __count: c_int, + __offset: __off_t, + ) -> ssize_t; + pub fn pwritev( + __fd: c_int, + __iovec: *const crate::iovec, + __count: c_int, + __offset: __off_t, + ) -> ssize_t; + + pub fn preadv64(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off64_t) + -> ssize_t; + pub fn pwritev64( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: off64_t, + ) -> ssize_t; + + pub fn fread_unlocked( + buf: *mut c_void, + size: size_t, + nobj: size_t, + stream: *mut crate::FILE, + ) -> size_t; + + pub fn aio_read(aiocbp: *mut aiocb) -> c_int; + pub fn aio_write(aiocbp: *mut aiocb) -> c_int; + pub fn aio_fsync(op: c_int, aiocbp: *mut aiocb) -> c_int; + pub fn aio_error(aiocbp: *const aiocb) -> c_int; + pub fn aio_return(aiocbp: *mut aiocb) -> ssize_t; + pub fn aio_suspend( + aiocb_list: *const *const aiocb, + nitems: c_int, + timeout: *const crate::timespec, + ) -> c_int; + pub fn aio_cancel(fd: c_int, aiocbp: *mut aiocb) -> c_int; + pub fn lio_listio( + mode: c_int, + aiocb_list: *const *mut aiocb, + nitems: c_int, + sevp: *mut crate::sigevent, + ) -> c_int; + + pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> crate::mqd_t; + pub fn mq_close(mqd: crate::mqd_t) -> c_int; + pub fn mq_unlink(name: *const c_char) -> c_int; + pub fn mq_receive( + mqd: crate::mqd_t, + msg_ptr: *mut c_char, + msg_len: size_t, + msg_prio: *mut c_uint, + ) -> ssize_t; + pub fn mq_timedreceive( + mqd: crate::mqd_t, + msg_ptr: *mut c_char, + msg_len: size_t, + msg_prio: *mut c_uint, + abs_timeout: *const crate::timespec, + ) -> ssize_t; + pub fn mq_send( + mqd: crate::mqd_t, + msg_ptr: *const c_char, + msg_len: size_t, + msg_prio: c_uint, + ) -> c_int; + pub fn mq_timedsend( + mqd: crate::mqd_t, + msg_ptr: *const c_char, + msg_len: size_t, + msg_prio: c_uint, + abs_timeout: *const crate::timespec, + ) -> c_int; + pub fn mq_getattr(mqd: crate::mqd_t, attr: *mut crate::mq_attr) -> c_int; + pub fn mq_setattr( + mqd: crate::mqd_t, + newattr: *const crate::mq_attr, + oldattr: *mut crate::mq_attr, + ) -> c_int; + + pub fn lseek64(__fd: c_int, __offset: __off64_t, __whence: c_int) -> __off64_t; + + pub fn lseek(__fd: c_int, __offset: __off_t, __whence: c_int) -> __off_t; + + pub fn fgetpos64(stream: *mut crate::FILE, ptr: *mut fpos64_t) -> c_int; + pub fn fseeko64(stream: *mut crate::FILE, offset: off64_t, whence: c_int) -> c_int; + pub fn fsetpos64(stream: *mut crate::FILE, ptr: *const fpos64_t) -> c_int; + pub fn ftello64(stream: *mut crate::FILE) -> off64_t; + + pub fn bind(__fd: c_int, __addr: *const sockaddr, __len: crate::socklen_t) -> c_int; + + pub fn accept4( + fd: c_int, + addr: *mut crate::sockaddr, + len: *mut crate::socklen_t, + flg: c_int, + ) -> c_int; + + pub fn ppoll( + fds: *mut crate::pollfd, + nfds: nfds_t, + timeout: *const crate::timespec, + sigmask: *const sigset_t, + ) -> c_int; + + pub fn recvmsg(__fd: c_int, __message: *mut msghdr, __flags: c_int) -> ssize_t; + + pub fn sendmsg(__fd: c_int, __message: *const msghdr, __flags: c_int) -> ssize_t; + + pub fn recvfrom( + socket: c_int, + buf: *mut c_void, + len: size_t, + flags: c_int, + addr: *mut crate::sockaddr, + addrlen: *mut crate::socklen_t, + ) -> ssize_t; + + pub fn sendfile(out_fd: c_int, in_fd: c_int, offset: *mut off_t, count: size_t) -> ssize_t; + pub fn sendfile64(out_fd: c_int, in_fd: c_int, offset: *mut off64_t, count: size_t) -> ssize_t; + + pub fn shutdown(__fd: c_int, __how: c_int) -> c_int; + + pub fn sethostname(name: *const c_char, len: size_t) -> c_int; + pub fn getdomainname(name: *mut c_char, len: size_t) -> c_int; + pub fn setdomainname(name: *const c_char, len: size_t) -> c_int; + pub fn if_nameindex() -> *mut if_nameindex; + pub fn if_freenameindex(ptr: *mut if_nameindex); + + pub fn getnameinfo( + sa: *const crate::sockaddr, + salen: crate::socklen_t, + host: *mut c_char, + hostlen: crate::socklen_t, + serv: *mut c_char, + servlen: crate::socklen_t, + flags: c_int, + ) -> c_int; + + pub fn getifaddrs(ifap: *mut *mut crate::ifaddrs) -> c_int; + pub fn freeifaddrs(ifa: *mut crate::ifaddrs); + + pub fn uname(buf: *mut crate::utsname) -> c_int; + + pub fn gethostid() -> c_long; + pub fn sethostid(hostid: c_long) -> c_int; + + pub fn setpwent(); + pub fn endpwent(); + pub fn getpwent() -> *mut passwd; + pub fn setgrent(); + pub fn endgrent(); + pub fn getgrent() -> *mut crate::group; + pub fn setspent(); + pub fn endspent(); + pub fn getspent() -> *mut spwd; + + pub fn getspnam(name: *const c_char) -> *mut spwd; + + pub fn getpwent_r( + pwd: *mut crate::passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::passwd, + ) -> c_int; + pub fn getgrent_r( + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn fgetpwent_r( + stream: *mut crate::FILE, + pwd: *mut crate::passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::passwd, + ) -> c_int; + pub fn fgetgrent_r( + stream: *mut crate::FILE, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + + pub fn putpwent(p: *const crate::passwd, stream: *mut crate::FILE) -> c_int; + pub fn putgrent(grp: *const crate::group, stream: *mut crate::FILE) -> c_int; + + pub fn getpwnam_r( + name: *const c_char, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + + pub fn getpwuid_r( + uid: crate::uid_t, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + + pub fn fgetspent_r( + fp: *mut crate::FILE, + spbuf: *mut crate::spwd, + buf: *mut c_char, + buflen: size_t, + spbufp: *mut *mut crate::spwd, + ) -> c_int; + pub fn sgetspent_r( + s: *const c_char, + spbuf: *mut crate::spwd, + buf: *mut c_char, + buflen: size_t, + spbufp: *mut *mut crate::spwd, + ) -> c_int; + pub fn getspent_r( + spbuf: *mut crate::spwd, + buf: *mut c_char, + buflen: size_t, + spbufp: *mut *mut crate::spwd, + ) -> c_int; + + pub fn getspnam_r( + name: *const c_char, + spbuf: *mut spwd, + buf: *mut c_char, + buflen: size_t, + spbufp: *mut *mut spwd, + ) -> c_int; + + // mntent.h + pub fn getmntent_r( + stream: *mut crate::FILE, + mntbuf: *mut crate::mntent, + buf: *mut c_char, + buflen: c_int, + ) -> *mut crate::mntent; + + pub fn utmpname(file: *const c_char) -> c_int; + pub fn utmpxname(file: *const c_char) -> c_int; + pub fn getutxent() -> *mut utmpx; + pub fn getutxid(ut: *const utmpx) -> *mut utmpx; + pub fn getutxline(ut: *const utmpx) -> *mut utmpx; + pub fn pututxline(ut: *const utmpx) -> *mut utmpx; + pub fn setutxent(); + pub fn endutxent(); + + pub fn getresuid( + ruid: *mut crate::uid_t, + euid: *mut crate::uid_t, + suid: *mut crate::uid_t, + ) -> c_int; + pub fn getresgid( + rgid: *mut crate::gid_t, + egid: *mut crate::gid_t, + sgid: *mut crate::gid_t, + ) -> c_int; + pub fn setresuid(ruid: crate::uid_t, euid: crate::uid_t, suid: crate::uid_t) -> c_int; + pub fn setresgid(rgid: crate::gid_t, egid: crate::gid_t, sgid: crate::gid_t) -> c_int; + + pub fn initgroups(user: *const c_char, group: crate::gid_t) -> c_int; + + pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; + pub fn getgrgid_r( + gid: crate::gid_t, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + + pub fn getgrnam(name: *const c_char) -> *mut crate::group; + pub fn getgrnam_r( + name: *const c_char, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + + pub fn getgrouplist( + user: *const c_char, + group: crate::gid_t, + groups: *mut crate::gid_t, + ngroups: *mut c_int, + ) -> c_int; + + pub fn setgroups(ngroups: size_t, ptr: *const crate::gid_t) -> c_int; + + pub fn acct(filename: *const c_char) -> c_int; + + pub fn setmntent(filename: *const c_char, ty: *const c_char) -> *mut crate::FILE; + pub fn getmntent(stream: *mut crate::FILE) -> *mut crate::mntent; + pub fn addmntent(stream: *mut crate::FILE, mnt: *const crate::mntent) -> c_int; + pub fn endmntent(streamp: *mut crate::FILE) -> c_int; + pub fn hasmntopt(mnt: *const crate::mntent, opt: *const c_char) -> *mut c_char; + + pub fn pthread_create( + native: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + f: extern "C" fn(*mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + pub fn pthread_kill(__threadid: crate::pthread_t, __signo: c_int) -> c_int; + pub fn pthread_cancel(thread: crate::pthread_t) -> c_int; + pub fn __pthread_equal(__t1: __pthread_t, __t2: __pthread_t) -> c_int; + + pub fn pthread_getattr_np(__thr: crate::pthread_t, __attr: *mut pthread_attr_t) -> c_int; + + pub fn pthread_attr_getguardsize( + __attr: *const pthread_attr_t, + __guardsize: *mut size_t, + ) -> c_int; + pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; + + pub fn pthread_attr_getstack( + __attr: *const pthread_attr_t, + __stackaddr: *mut *mut c_void, + __stacksize: *mut size_t, + ) -> c_int; + + pub fn pthread_mutexattr_getpshared( + attr: *const pthread_mutexattr_t, + pshared: *mut c_int, + ) -> c_int; + pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; + + pub fn pthread_mutex_timedlock( + lock: *mut pthread_mutex_t, + abstime: *const crate::timespec, + ) -> c_int; + + pub fn pthread_rwlockattr_getpshared( + attr: *const pthread_rwlockattr_t, + val: *mut c_int, + ) -> c_int; + pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, val: c_int) -> c_int; + + pub fn pthread_condattr_getclock( + attr: *const pthread_condattr_t, + clock_id: *mut clockid_t, + ) -> c_int; + pub fn pthread_condattr_setclock( + __attr: *mut pthread_condattr_t, + __clock_id: __clockid_t, + ) -> c_int; + pub fn pthread_condattr_getpshared( + attr: *const pthread_condattr_t, + pshared: *mut c_int, + ) -> c_int; + pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: c_int) -> c_int; + + pub fn pthread_once(control: *mut pthread_once_t, routine: extern "C" fn()) -> c_int; + + pub fn pthread_barrierattr_init(attr: *mut crate::pthread_barrierattr_t) -> c_int; + pub fn pthread_barrierattr_destroy(attr: *mut crate::pthread_barrierattr_t) -> c_int; + pub fn pthread_barrierattr_getpshared( + attr: *const crate::pthread_barrierattr_t, + shared: *mut c_int, + ) -> c_int; + pub fn pthread_barrierattr_setpshared( + attr: *mut crate::pthread_barrierattr_t, + shared: c_int, + ) -> c_int; + pub fn pthread_barrier_init( + barrier: *mut pthread_barrier_t, + attr: *const crate::pthread_barrierattr_t, + count: c_uint, + ) -> c_int; + pub fn pthread_barrier_destroy(barrier: *mut pthread_barrier_t) -> c_int; + pub fn pthread_barrier_wait(barrier: *mut pthread_barrier_t) -> c_int; + pub fn pthread_spin_init(lock: *mut crate::pthread_spinlock_t, pshared: c_int) -> c_int; + pub fn pthread_spin_destroy(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_spin_lock(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_spin_trylock(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_spin_unlock(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_atfork( + prepare: Option, + parent: Option, + child: Option, + ) -> c_int; + + pub fn pthread_sigmask( + __how: c_int, + __newmask: *const __sigset_t, + __oldmask: *mut __sigset_t, + ) -> c_int; + + pub fn sched_getparam(pid: crate::pid_t, param: *mut crate::sched_param) -> c_int; + pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; + pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; + pub fn sched_setscheduler( + pid: crate::pid_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + pub fn pthread_getschedparam( + native: crate::pthread_t, + policy: *mut c_int, + param: *mut crate::sched_param, + ) -> c_int; + pub fn pthread_setschedparam( + native: crate::pthread_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + + pub fn pthread_getcpuclockid(thread: crate::pthread_t, clk_id: *mut crate::clockid_t) -> c_int; + + pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; + pub fn sem_destroy(sem: *mut sem_t) -> c_int; + pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; + pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; + + pub fn clock_getres(__clock_id: clockid_t, __res: *mut crate::timespec) -> c_int; + pub fn clock_gettime(__clock_id: clockid_t, __tp: *mut crate::timespec) -> c_int; + pub fn clock_settime(__clock_id: clockid_t, __tp: *const crate::timespec) -> c_int; + pub fn clock_getcpuclockid(pid: crate::pid_t, clk_id: *mut crate::clockid_t) -> c_int; + + pub fn clock_nanosleep( + clk_id: crate::clockid_t, + flags: c_int, + rqtp: *const crate::timespec, + rmtp: *mut crate::timespec, + ) -> c_int; + + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut crate::timezone) -> c_int; + pub fn settimeofday(tv: *const crate::timeval, tz: *const crate::timezone) -> c_int; + + pub fn asctime_r(tm: *const crate::tm, buf: *mut c_char) -> *mut c_char; + pub fn ctime_r(timep: *const time_t, buf: *mut c_char) -> *mut c_char; + + pub fn strftime( + s: *mut c_char, + max: size_t, + format: *const c_char, + tm: *const crate::tm, + ) -> size_t; + pub fn strptime(s: *const c_char, format: *const c_char, tm: *mut crate::tm) -> *mut c_char; + + pub fn timer_create( + clockid: crate::clockid_t, + sevp: *mut crate::sigevent, + timerid: *mut crate::timer_t, + ) -> c_int; + pub fn timer_delete(timerid: crate::timer_t) -> c_int; + pub fn timer_getoverrun(timerid: crate::timer_t) -> c_int; + pub fn timer_gettime(timerid: crate::timer_t, curr_value: *mut crate::itimerspec) -> c_int; + pub fn timer_settime( + timerid: crate::timer_t, + flags: c_int, + new_value: *const crate::itimerspec, + old_value: *mut crate::itimerspec, + ) -> c_int; + + pub fn fstat(__fd: c_int, __buf: *mut stat) -> c_int; + pub fn fstat64(__fd: c_int, __buf: *mut stat64) -> c_int; + + pub fn fstatat(__fd: c_int, __file: *const c_char, __buf: *mut stat, __flag: c_int) -> c_int; + pub fn fstatat64( + __fd: c_int, + __file: *const c_char, + __buf: *mut stat64, + __flag: c_int, + ) -> c_int; + + pub fn statx( + dirfd: c_int, + pathname: *const c_char, + flags: c_int, + mask: c_uint, + statxbuf: *mut statx, + ) -> c_int; + + pub fn ftruncate(__fd: c_int, __length: __off_t) -> c_int; + pub fn ftruncate64(__fd: c_int, __length: __off64_t) -> c_int; + pub fn truncate64(__file: *const c_char, __length: __off64_t) -> c_int; + + pub fn lstat(__file: *const c_char, __buf: *mut stat) -> c_int; + pub fn lstat64(__file: *const c_char, __buf: *mut stat64) -> c_int; + + pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; + pub fn statfs64(__file: *const c_char, __buf: *mut statfs64) -> c_int; + pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; + pub fn fstatfs64(__fildes: c_int, __buf: *mut statfs64) -> c_int; + + pub fn statvfs(__file: *const c_char, __buf: *mut statvfs) -> c_int; + pub fn statvfs64(__file: *const c_char, __buf: *mut statvfs64) -> c_int; + pub fn fstatvfs(__fildes: c_int, __buf: *mut statvfs) -> c_int; + pub fn fstatvfs64(__fildes: c_int, __buf: *mut statvfs64) -> c_int; + + pub fn open(__file: *const c_char, __oflag: c_int, ...) -> c_int; + pub fn open64(__file: *const c_char, __oflag: c_int, ...) -> c_int; + + pub fn openat(__fd: c_int, __file: *const c_char, __oflag: c_int, ...) -> c_int; + pub fn openat64(__fd: c_int, __file: *const c_char, __oflag: c_int, ...) -> c_int; + + pub fn fopen64(filename: *const c_char, mode: *const c_char) -> *mut crate::FILE; + pub fn freopen64( + filename: *const c_char, + mode: *const c_char, + file: *mut crate::FILE, + ) -> *mut crate::FILE; + + pub fn creat64(path: *const c_char, mode: mode_t) -> c_int; + + pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; + pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; + pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; + pub fn tmpfile64() -> *mut crate::FILE; + + pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; + + pub fn getdtablesize() -> c_int; + + // Added in `glibc` 2.34 + pub fn close_range(first: c_uint, last: c_uint, flags: c_int) -> c_int; + + pub fn openpty( + __amaster: *mut c_int, + __aslave: *mut c_int, + __name: *mut c_char, + __termp: *const termios, + __winp: *const crate::winsize, + ) -> c_int; + + pub fn forkpty( + __amaster: *mut c_int, + __name: *mut c_char, + __termp: *const termios, + __winp: *const crate::winsize, + ) -> crate::pid_t; + + pub fn getpt() -> c_int; + pub fn ptsname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + pub fn login_tty(fd: c_int) -> c_int; + + pub fn ctermid(s: *mut c_char) -> *mut c_char; + + pub fn clearenv() -> c_int; + + pub fn execveat( + dirfd: c_int, + pathname: *const c_char, + argv: *const *mut c_char, + envp: *const *mut c_char, + flags: c_int, + ) -> c_int; + + // DIFF(main): changed to `*const *mut` in e77f551de9 + pub fn execvpe( + file: *const c_char, + argv: *const *const c_char, + envp: *const *const c_char, + ) -> c_int; + pub fn fexecve(fd: c_int, argv: *const *const c_char, envp: *const *const c_char) -> c_int; + + pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; + + // posix/spawn.h + pub fn posix_spawn( + pid: *mut crate::pid_t, + path: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnp( + pid: *mut crate::pid_t, + file: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_getsigdefault( + attr: *const posix_spawnattr_t, + default: *mut crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigdefault( + attr: *mut posix_spawnattr_t, + default: *const crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getsigmask( + attr: *const posix_spawnattr_t, + default: *mut crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigmask( + attr: *mut posix_spawnattr_t, + default: *const crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; + pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; + pub fn posix_spawnattr_getpgroup( + attr: *const posix_spawnattr_t, + flags: *mut crate::pid_t, + ) -> c_int; + pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: crate::pid_t) -> c_int; + pub fn posix_spawnattr_getschedpolicy( + attr: *const posix_spawnattr_t, + flags: *mut c_int, + ) -> c_int; + pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, flags: c_int) -> c_int; + pub fn posix_spawnattr_getschedparam( + attr: *const posix_spawnattr_t, + param: *mut crate::sched_param, + ) -> c_int; + pub fn posix_spawnattr_setschedparam( + attr: *mut posix_spawnattr_t, + param: *const crate::sched_param, + ) -> c_int; + + pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_addopen( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + path: *const c_char, + oflag: c_int, + mode: mode_t, + ) -> c_int; + pub fn posix_spawn_file_actions_addclose( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + ) -> c_int; + pub fn posix_spawn_file_actions_adddup2( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + newfd: c_int, + ) -> c_int; + + // Added in `glibc` 2.29 + pub fn posix_spawn_file_actions_addchdir_np( + actions: *mut crate::posix_spawn_file_actions_t, + path: *const c_char, + ) -> c_int; + // Added in `glibc` 2.29 + pub fn posix_spawn_file_actions_addfchdir_np( + actions: *mut crate::posix_spawn_file_actions_t, + fd: c_int, + ) -> c_int; + // Added in `glibc` 2.34 + pub fn posix_spawn_file_actions_addclosefrom_np( + actions: *mut crate::posix_spawn_file_actions_t, + from: c_int, + ) -> c_int; + // Added in `glibc` 2.35 + pub fn posix_spawn_file_actions_addtcsetpgrp_np( + actions: *mut crate::posix_spawn_file_actions_t, + tcfd: c_int, + ) -> c_int; + + pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; + pub fn shm_unlink(name: *const c_char) -> c_int; + + pub fn euidaccess(pathname: *const c_char, mode: c_int) -> c_int; + pub fn eaccess(pathname: *const c_char, mode: c_int) -> c_int; + + pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; + + pub fn stat(__file: *const c_char, __buf: *mut stat) -> c_int; + pub fn stat64(__file: *const c_char, __buf: *mut stat64) -> c_int; + + pub fn readdir(dirp: *mut crate::DIR) -> *mut crate::dirent; + pub fn readdir64(dirp: *mut crate::DIR) -> *mut crate::dirent64; + pub fn readdir_r( + dirp: *mut crate::DIR, + entry: *mut crate::dirent, + result: *mut *mut crate::dirent, + ) -> c_int; + pub fn readdir64_r( + dirp: *mut crate::DIR, + entry: *mut crate::dirent64, + result: *mut *mut crate::dirent64, + ) -> c_int; + pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); + pub fn telldir(dirp: *mut crate::DIR) -> c_long; + + pub fn dirfd(dirp: *mut crate::DIR) -> c_int; + + #[link_name = "__xpg_strerror_r"] + pub fn strerror_r(__errnum: c_int, __buf: *mut c_char, __buflen: size_t) -> c_int; + + pub fn __errno_location() -> *mut c_int; + + pub fn mmap64( + __addr: *mut c_void, + __len: size_t, + __prot: c_int, + __flags: c_int, + __fd: c_int, + __offset: __off64_t, + ) -> *mut c_void; + + pub fn mremap( + addr: *mut c_void, + len: size_t, + new_len: size_t, + flags: c_int, + ... + ) -> *mut c_void; + + pub fn mprotect(__addr: *mut c_void, __len: size_t, __prot: c_int) -> c_int; + + pub fn msync(__addr: *mut c_void, __len: size_t, __flags: c_int) -> c_int; + pub fn sync(); + pub fn syncfs(fd: c_int) -> c_int; + pub fn fdatasync(fd: c_int) -> c_int; + + pub fn fallocate64(fd: c_int, mode: c_int, offset: off64_t, len: off64_t) -> c_int; + pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; + pub fn posix_fallocate64(fd: c_int, offset: off64_t, len: off64_t) -> c_int; + + pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; + + pub fn posix_fadvise64(fd: c_int, offset: off64_t, len: off64_t, advise: c_int) -> c_int; + + pub fn madvise(__addr: *mut c_void, __len: size_t, __advice: c_int) -> c_int; + + pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + + pub fn getrlimit(resource: crate::__rlimit_resource_t, rlim: *mut crate::rlimit) -> c_int; + pub fn getrlimit64(resource: crate::__rlimit_resource_t, rlim: *mut crate::rlimit64) -> c_int; + pub fn setrlimit(resource: crate::__rlimit_resource_t, rlim: *const crate::rlimit) -> c_int; + pub fn setrlimit64(resource: crate::__rlimit_resource_t, rlim: *const crate::rlimit64) + -> c_int; + + pub fn getpriority(which: crate::__priority_which, who: crate::id_t) -> c_int; + pub fn setpriority(which: crate::__priority_which, who: crate::id_t, prio: c_int) -> c_int; + + pub fn getrandom(__buffer: *mut c_void, __length: size_t, __flags: c_uint) -> ssize_t; + pub fn getentropy(__buffer: *mut c_void, __length: size_t) -> c_int; + + pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; + pub fn memmem( + haystack: *const c_void, + haystacklen: size_t, + needle: *const c_void, + needlelen: size_t, + ) -> *mut c_void; + pub fn strchrnul(s: *const c_char, c: c_int) -> *mut c_char; + + pub fn abs(i: c_int) -> c_int; + pub fn labs(i: c_long) -> c_long; + pub fn rand() -> c_int; + pub fn srand(seed: c_uint); + + pub fn drand48() -> c_double; + pub fn erand48(xseed: *mut c_ushort) -> c_double; + pub fn lrand48() -> c_long; + pub fn nrand48(xseed: *mut c_ushort) -> c_long; + pub fn mrand48() -> c_long; + pub fn jrand48(xseed: *mut c_ushort) -> c_long; + pub fn srand48(seed: c_long); + pub fn seed48(xseed: *mut c_ushort) -> *mut c_ushort; + pub fn lcong48(p: *mut c_ushort); + + pub fn qsort_r( + base: *mut c_void, + num: size_t, + size: size_t, + compar: Option c_int>, + arg: *mut c_void, + ); + + pub fn brk(addr: *mut c_void) -> c_int; + pub fn sbrk(increment: intptr_t) -> *mut c_void; + + pub fn memalign(align: size_t, size: size_t) -> *mut c_void; + pub fn mallopt(param: c_int, value: c_int) -> c_int; + + pub fn mallinfo() -> crate::mallinfo; + pub fn mallinfo2() -> crate::mallinfo2; + pub fn malloc_info(options: c_int, stream: *mut crate::FILE) -> c_int; + pub fn malloc_usable_size(ptr: *mut c_void) -> size_t; + pub fn malloc_trim(__pad: size_t) -> c_int; + + pub fn iconv_open(tocode: *const c_char, fromcode: *const c_char) -> iconv_t; + pub fn iconv( + cd: iconv_t, + inbuf: *mut *mut c_char, + inbytesleft: *mut size_t, + outbuf: *mut *mut c_char, + outbytesleft: *mut size_t, + ) -> size_t; + pub fn iconv_close(cd: iconv_t) -> c_int; + + pub fn getopt_long( + argc: c_int, + argv: *const *mut c_char, + optstring: *const c_char, + longopts: *const option, + longindex: *mut c_int, + ) -> c_int; + + pub fn backtrace(buf: *mut *mut c_void, sz: c_int) -> c_int; + + pub fn reboot(how_to: c_int) -> c_int; + + pub fn getloadavg(loadavg: *mut c_double, nelem: c_int) -> c_int; + + pub fn regexec( + preg: *const crate::regex_t, + input: *const c_char, + nmatch: size_t, + pmatch: *mut regmatch_t, + eflags: c_int, + ) -> c_int; + + pub fn regerror( + errcode: c_int, + preg: *const crate::regex_t, + errbuf: *mut c_char, + errbuf_size: size_t, + ) -> size_t; + + pub fn regfree(preg: *mut crate::regex_t); + + pub fn glob( + pattern: *const c_char, + flags: c_int, + errfunc: Option c_int>, + pglob: *mut crate::glob_t, + ) -> c_int; + pub fn globfree(pglob: *mut crate::glob_t); + + pub fn glob64( + pattern: *const c_char, + flags: c_int, + errfunc: Option c_int>, + pglob: *mut glob64_t, + ) -> c_int; + pub fn globfree64(pglob: *mut glob64_t); + + pub fn getxattr( + path: *const c_char, + name: *const c_char, + value: *mut c_void, + size: size_t, + ) -> ssize_t; + pub fn lgetxattr( + path: *const c_char, + name: *const c_char, + value: *mut c_void, + size: size_t, + ) -> ssize_t; + pub fn fgetxattr( + filedes: c_int, + name: *const c_char, + value: *mut c_void, + size: size_t, + ) -> ssize_t; + pub fn setxattr( + path: *const c_char, + name: *const c_char, + value: *const c_void, + size: size_t, + flags: c_int, + ) -> c_int; + pub fn lsetxattr( + path: *const c_char, + name: *const c_char, + value: *const c_void, + size: size_t, + flags: c_int, + ) -> c_int; + pub fn fsetxattr( + filedes: c_int, + name: *const c_char, + value: *const c_void, + size: size_t, + flags: c_int, + ) -> c_int; + pub fn listxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; + pub fn llistxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; + pub fn flistxattr(filedes: c_int, list: *mut c_char, size: size_t) -> ssize_t; + pub fn removexattr(path: *const c_char, name: *const c_char) -> c_int; + pub fn lremovexattr(path: *const c_char, name: *const c_char) -> c_int; + pub fn fremovexattr(filedes: c_int, name: *const c_char) -> c_int; + + pub fn dirname(path: *mut c_char) -> *mut c_char; + /// POSIX version of `basename(3)`, defined in `libgen.h`. + #[link_name = "__xpg_basename"] + pub fn posix_basename(path: *mut c_char) -> *mut c_char; + /// GNU version of `basename(3)`, defined in `string.h`. + #[link_name = "basename"] + pub fn gnu_basename(path: *const c_char) -> *mut c_char; + + pub fn dlmopen(lmid: Lmid_t, filename: *const c_char, flag: c_int) -> *mut c_void; + pub fn dlinfo(handle: *mut c_void, request: c_int, info: *mut c_void) -> c_int; + pub fn dladdr1( + addr: *const c_void, + info: *mut crate::Dl_info, + extra_info: *mut *mut c_void, + flags: c_int, + ) -> c_int; + + pub fn duplocale(base: crate::locale_t) -> crate::locale_t; + pub fn freelocale(loc: crate::locale_t); + pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; + pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; + pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; + pub fn nl_langinfo_l(item: crate::nl_item, locale: crate::locale_t) -> *mut c_char; + + pub fn dl_iterate_phdr( + callback: Option< + unsafe extern "C" fn( + info: *mut crate::dl_phdr_info, + size: size_t, + data: *mut c_void, + ) -> c_int, + >, + data: *mut c_void, + ) -> c_int; + + pub fn gnu_get_libc_release() -> *const c_char; + pub fn gnu_get_libc_version() -> *const c_char; +} + +safe_f! { + pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { + let major = major as crate::dev_t; + let minor = minor as crate::dev_t; + let mut dev = 0; + dev |= major << 8; + dev |= minor; + dev + } + + pub const fn major(dev: crate::dev_t) -> c_uint { + ((dev >> 8) & 0xff) as c_uint + } + + pub const fn minor(dev: crate::dev_t) -> c_uint { + (dev & 0xffff00ff) as c_uint + } + + pub fn SIGRTMAX() -> c_int { + unsafe { __libc_current_sigrtmax() } + } + + pub fn SIGRTMIN() -> c_int { + unsafe { __libc_current_sigrtmin() } + } + + pub const fn WIFSTOPPED(status: c_int) -> bool { + (status & 0xff) == 0x7f + } + + pub const fn WSTOPSIG(status: c_int) -> c_int { + (status >> 8) & 0xff + } + + pub const fn WIFCONTINUED(status: c_int) -> bool { + status == 0xffff + } + + pub const fn WIFSIGNALED(status: c_int) -> bool { + ((status & 0x7f) + 1) as i8 >= 2 + } + + pub const fn WTERMSIG(status: c_int) -> c_int { + status & 0x7f + } + + pub const fn WIFEXITED(status: c_int) -> bool { + (status & 0x7f) == 0 + } + + pub const fn WEXITSTATUS(status: c_int) -> c_int { + (status >> 8) & 0xff + } + + pub const fn WCOREDUMP(status: c_int) -> bool { + (status & 0x80) != 0 + } + + pub const fn W_EXITCODE(ret: c_int, sig: c_int) -> c_int { + (ret << 8) | sig + } + + pub const fn W_STOPCODE(sig: c_int) -> c_int { + (sig << 8) | 0x7f + } + + pub const fn QCMD(cmd: c_int, type_: c_int) -> c_int { + (cmd << 8) | (type_ & 0x00ff) + } + + pub const fn IPOPT_COPIED(o: u8) -> u8 { + o & IPOPT_COPY + } + + pub const fn IPOPT_CLASS(o: u8) -> u8 { + o & IPOPT_CLASS_MASK + } + + pub const fn IPOPT_NUMBER(o: u8) -> u8 { + o & IPOPT_NUMBER_MASK + } + + pub const fn IPTOS_ECN(x: u8) -> u8 { + x & crate::IPTOS_ECN_MASK + } +} + +cfg_if! { + if #[cfg(target_pointer_width = "64")] { + mod b64; + pub use self::b64::*; + } else { + mod b32; + pub use self::b32::*; + } +} diff --git a/vendor/libc/src/unix/linux_like/android/b32/arm.rs b/vendor/libc/src/unix/linux_like/android/b32/arm.rs new file mode 100644 index 00000000000000..b78c8a83623eaf --- /dev/null +++ b/vendor/libc/src/unix/linux_like/android/b32/arm.rs @@ -0,0 +1,532 @@ +use crate::prelude::*; + +pub type wchar_t = u32; +pub type greg_t = i32; +pub type mcontext_t = sigcontext; + +s! { + pub struct sigcontext { + pub trap_no: c_ulong, + pub error_code: c_ulong, + pub oldmask: c_ulong, + pub arm_r0: c_ulong, + pub arm_r1: c_ulong, + pub arm_r2: c_ulong, + pub arm_r3: c_ulong, + pub arm_r4: c_ulong, + pub arm_r5: c_ulong, + pub arm_r6: c_ulong, + pub arm_r7: c_ulong, + pub arm_r8: c_ulong, + pub arm_r9: c_ulong, + pub arm_r10: c_ulong, + pub arm_fp: c_ulong, + pub arm_ip: c_ulong, + pub arm_sp: c_ulong, + pub arm_lr: c_ulong, + pub arm_pc: c_ulong, + pub arm_cpsr: c_ulong, + pub fault_address: c_ulong, + } +} + +s_no_extra_traits! { + pub struct __c_anonymous_uc_sigmask_with_padding { + pub uc_sigmask: crate::sigset_t, + /* Android has a wrong (smaller) sigset_t on x86. */ + __padding_rt_sigset: u32, + } + + pub union __c_anonymous_uc_sigmask { + uc_sigmask: __c_anonymous_uc_sigmask_with_padding, + uc_sigmask64: crate::sigset64_t, + } + + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_mcontext: mcontext_t, + pub uc_sigmask__c_anonymous_union: __c_anonymous_uc_sigmask, + /* The kernel adds extra padding after uc_sigmask to match + * glibc sigset_t on ARM. */ + __padding: [c_char; 120], + __align: [c_longlong; 0], + uc_regspace: [c_ulong; 128], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for __c_anonymous_uc_sigmask_with_padding { + fn eq(&self, other: &__c_anonymous_uc_sigmask_with_padding) -> bool { + self.uc_sigmask == other.uc_sigmask + // Ignore padding + } + } + impl Eq for __c_anonymous_uc_sigmask_with_padding {} + impl hash::Hash for __c_anonymous_uc_sigmask_with_padding { + fn hash(&self, state: &mut H) { + self.uc_sigmask.hash(state) + // Ignore padding + } + } + + impl PartialEq for __c_anonymous_uc_sigmask { + fn eq(&self, other: &__c_anonymous_uc_sigmask) -> bool { + unsafe { self.uc_sigmask == other.uc_sigmask } + } + } + impl Eq for __c_anonymous_uc_sigmask {} + impl hash::Hash for __c_anonymous_uc_sigmask { + fn hash(&self, state: &mut H) { + unsafe { self.uc_sigmask.hash(state) } + } + } + + impl PartialEq for ucontext_t { + fn eq(&self, other: &Self) -> bool { + self.uc_flags == other.uc_flags + && self.uc_link == other.uc_link + && self.uc_stack == other.uc_stack + && self.uc_mcontext == other.uc_mcontext + && self.uc_sigmask__c_anonymous_union == other.uc_sigmask__c_anonymous_union + && &self.uc_regspace[..] == &other.uc_regspace[..] + // Ignore padding field + } + } + impl Eq for ucontext_t {} + impl hash::Hash for ucontext_t { + fn hash(&self, state: &mut H) { + self.uc_flags.hash(state); + self.uc_link.hash(state); + self.uc_stack.hash(state); + self.uc_mcontext.hash(state); + self.uc_sigmask__c_anonymous_union.hash(state); + self.uc_regspace[..].hash(state); + // Ignore padding field + } + } + } +} + +pub const O_DIRECT: c_int = 0x10000; +pub const O_DIRECTORY: c_int = 0x4000; +pub const O_NOFOLLOW: c_int = 0x8000; +pub const O_LARGEFILE: c_int = 0o400000; + +pub const SYS_restart_syscall: c_long = 0; +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execve: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_lchown: c_long = 16; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_mount: c_long = 21; +pub const SYS_setuid: c_long = 23; +pub const SYS_getuid: c_long = 24; +pub const SYS_ptrace: c_long = 26; +pub const SYS_pause: c_long = 29; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_rename: c_long = 38; +pub const SYS_mkdir: c_long = 39; +pub const SYS_rmdir: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_brk: c_long = 45; +pub const SYS_setgid: c_long = 46; +pub const SYS_getgid: c_long = 47; +pub const SYS_geteuid: c_long = 49; +pub const SYS_getegid: c_long = 50; +pub const SYS_acct: c_long = 51; +pub const SYS_umount2: c_long = 52; +pub const SYS_ioctl: c_long = 54; +pub const SYS_fcntl: c_long = 55; +pub const SYS_setpgid: c_long = 57; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_ustat: c_long = 62; +pub const SYS_dup2: c_long = 63; +pub const SYS_getppid: c_long = 64; +pub const SYS_getpgrp: c_long = 65; +pub const SYS_setsid: c_long = 66; +pub const SYS_sigaction: c_long = 67; +pub const SYS_setreuid: c_long = 70; +pub const SYS_setregid: c_long = 71; +pub const SYS_sigsuspend: c_long = 72; +pub const SYS_sigpending: c_long = 73; +pub const SYS_sethostname: c_long = 74; +pub const SYS_setrlimit: c_long = 75; +pub const SYS_getrusage: c_long = 77; +pub const SYS_gettimeofday: c_long = 78; +pub const SYS_settimeofday: c_long = 79; +pub const SYS_getgroups: c_long = 80; +pub const SYS_setgroups: c_long = 81; +pub const SYS_symlink: c_long = 83; +pub const SYS_readlink: c_long = 85; +pub const SYS_uselib: c_long = 86; +pub const SYS_swapon: c_long = 87; +pub const SYS_reboot: c_long = 88; +pub const SYS_munmap: c_long = 91; +pub const SYS_truncate: c_long = 92; +pub const SYS_ftruncate: c_long = 93; +pub const SYS_fchmod: c_long = 94; +pub const SYS_fchown: c_long = 95; +pub const SYS_getpriority: c_long = 96; +pub const SYS_setpriority: c_long = 97; +pub const SYS_statfs: c_long = 99; +pub const SYS_fstatfs: c_long = 100; +pub const SYS_syslog: c_long = 103; +pub const SYS_setitimer: c_long = 104; +pub const SYS_getitimer: c_long = 105; +pub const SYS_stat: c_long = 106; +pub const SYS_lstat: c_long = 107; +pub const SYS_fstat: c_long = 108; +pub const SYS_vhangup: c_long = 111; +pub const SYS_wait4: c_long = 114; +pub const SYS_swapoff: c_long = 115; +pub const SYS_sysinfo: c_long = 116; +pub const SYS_fsync: c_long = 118; +pub const SYS_sigreturn: c_long = 119; +pub const SYS_clone: c_long = 120; +pub const SYS_setdomainname: c_long = 121; +pub const SYS_uname: c_long = 122; +pub const SYS_adjtimex: c_long = 124; +pub const SYS_mprotect: c_long = 125; +pub const SYS_sigprocmask: c_long = 126; +pub const SYS_init_module: c_long = 128; +pub const SYS_delete_module: c_long = 129; +pub const SYS_quotactl: c_long = 131; +pub const SYS_getpgid: c_long = 132; +pub const SYS_fchdir: c_long = 133; +pub const SYS_bdflush: c_long = 134; +pub const SYS_sysfs: c_long = 135; +pub const SYS_personality: c_long = 136; +pub const SYS_setfsuid: c_long = 138; +pub const SYS_setfsgid: c_long = 139; +pub const SYS_getdents: c_long = 141; +pub const SYS_flock: c_long = 143; +pub const SYS_msync: c_long = 144; +pub const SYS_readv: c_long = 145; +pub const SYS_writev: c_long = 146; +pub const SYS_getsid: c_long = 147; +pub const SYS_fdatasync: c_long = 148; +pub const SYS_mlock: c_long = 150; +pub const SYS_munlock: c_long = 151; +pub const SYS_mlockall: c_long = 152; +pub const SYS_munlockall: c_long = 153; +pub const SYS_sched_setparam: c_long = 154; +pub const SYS_sched_getparam: c_long = 155; +pub const SYS_sched_setscheduler: c_long = 156; +pub const SYS_sched_getscheduler: c_long = 157; +pub const SYS_sched_yield: c_long = 158; +pub const SYS_sched_get_priority_max: c_long = 159; +pub const SYS_sched_get_priority_min: c_long = 160; +pub const SYS_sched_rr_get_interval: c_long = 161; +pub const SYS_nanosleep: c_long = 162; +pub const SYS_mremap: c_long = 163; +pub const SYS_setresuid: c_long = 164; +pub const SYS_getresuid: c_long = 165; +pub const SYS_poll: c_long = 168; +pub const SYS_nfsservctl: c_long = 169; +pub const SYS_setresgid: c_long = 170; +pub const SYS_getresgid: c_long = 171; +pub const SYS_prctl: c_long = 172; +pub const SYS_rt_sigreturn: c_long = 173; +pub const SYS_rt_sigaction: c_long = 174; +pub const SYS_rt_sigprocmask: c_long = 175; +pub const SYS_rt_sigpending: c_long = 176; +pub const SYS_rt_sigtimedwait: c_long = 177; +pub const SYS_rt_sigqueueinfo: c_long = 178; +pub const SYS_rt_sigsuspend: c_long = 179; +pub const SYS_pread64: c_long = 180; +pub const SYS_pwrite64: c_long = 181; +pub const SYS_chown: c_long = 182; +pub const SYS_getcwd: c_long = 183; +pub const SYS_capget: c_long = 184; +pub const SYS_capset: c_long = 185; +pub const SYS_sigaltstack: c_long = 186; +pub const SYS_sendfile: c_long = 187; +pub const SYS_vfork: c_long = 190; +pub const SYS_ugetrlimit: c_long = 191; +pub const SYS_mmap2: c_long = 192; +pub const SYS_truncate64: c_long = 193; +pub const SYS_ftruncate64: c_long = 194; +pub const SYS_stat64: c_long = 195; +pub const SYS_lstat64: c_long = 196; +pub const SYS_fstat64: c_long = 197; +pub const SYS_lchown32: c_long = 198; +pub const SYS_getuid32: c_long = 199; +pub const SYS_getgid32: c_long = 200; +pub const SYS_geteuid32: c_long = 201; +pub const SYS_getegid32: c_long = 202; +pub const SYS_setreuid32: c_long = 203; +pub const SYS_setregid32: c_long = 204; +pub const SYS_getgroups32: c_long = 205; +pub const SYS_setgroups32: c_long = 206; +pub const SYS_fchown32: c_long = 207; +pub const SYS_setresuid32: c_long = 208; +pub const SYS_getresuid32: c_long = 209; +pub const SYS_setresgid32: c_long = 210; +pub const SYS_getresgid32: c_long = 211; +pub const SYS_chown32: c_long = 212; +pub const SYS_setuid32: c_long = 213; +pub const SYS_setgid32: c_long = 214; +pub const SYS_setfsuid32: c_long = 215; +pub const SYS_setfsgid32: c_long = 216; +pub const SYS_getdents64: c_long = 217; +pub const SYS_pivot_root: c_long = 218; +pub const SYS_mincore: c_long = 219; +pub const SYS_madvise: c_long = 220; +pub const SYS_fcntl64: c_long = 221; +pub const SYS_gettid: c_long = 224; +pub const SYS_readahead: c_long = 225; +pub const SYS_setxattr: c_long = 226; +pub const SYS_lsetxattr: c_long = 227; +pub const SYS_fsetxattr: c_long = 228; +pub const SYS_getxattr: c_long = 229; +pub const SYS_lgetxattr: c_long = 230; +pub const SYS_fgetxattr: c_long = 231; +pub const SYS_listxattr: c_long = 232; +pub const SYS_llistxattr: c_long = 233; +pub const SYS_flistxattr: c_long = 234; +pub const SYS_removexattr: c_long = 235; +pub const SYS_lremovexattr: c_long = 236; +pub const SYS_fremovexattr: c_long = 237; +pub const SYS_tkill: c_long = 238; +pub const SYS_sendfile64: c_long = 239; +pub const SYS_futex: c_long = 240; +pub const SYS_sched_setaffinity: c_long = 241; +pub const SYS_sched_getaffinity: c_long = 242; +pub const SYS_io_setup: c_long = 243; +pub const SYS_io_destroy: c_long = 244; +pub const SYS_io_getevents: c_long = 245; +pub const SYS_io_submit: c_long = 246; +pub const SYS_io_cancel: c_long = 247; +pub const SYS_exit_group: c_long = 248; +pub const SYS_lookup_dcookie: c_long = 249; +pub const SYS_epoll_create: c_long = 250; +pub const SYS_epoll_ctl: c_long = 251; +pub const SYS_epoll_wait: c_long = 252; +pub const SYS_remap_file_pages: c_long = 253; +pub const SYS_set_tid_address: c_long = 256; +pub const SYS_timer_create: c_long = 257; +pub const SYS_timer_settime: c_long = 258; +pub const SYS_timer_gettime: c_long = 259; +pub const SYS_timer_getoverrun: c_long = 260; +pub const SYS_timer_delete: c_long = 261; +pub const SYS_clock_settime: c_long = 262; +pub const SYS_clock_gettime: c_long = 263; +pub const SYS_clock_getres: c_long = 264; +pub const SYS_clock_nanosleep: c_long = 265; +pub const SYS_statfs64: c_long = 266; +pub const SYS_fstatfs64: c_long = 267; +pub const SYS_tgkill: c_long = 268; +pub const SYS_utimes: c_long = 269; +pub const SYS_arm_fadvise64_64: c_long = 270; +pub const SYS_pciconfig_iobase: c_long = 271; +pub const SYS_pciconfig_read: c_long = 272; +pub const SYS_pciconfig_write: c_long = 273; +pub const SYS_mq_open: c_long = 274; +pub const SYS_mq_unlink: c_long = 275; +pub const SYS_mq_timedsend: c_long = 276; +pub const SYS_mq_timedreceive: c_long = 277; +pub const SYS_mq_notify: c_long = 278; +pub const SYS_mq_getsetattr: c_long = 279; +pub const SYS_waitid: c_long = 280; +pub const SYS_socket: c_long = 281; +pub const SYS_bind: c_long = 282; +pub const SYS_connect: c_long = 283; +pub const SYS_listen: c_long = 284; +pub const SYS_accept: c_long = 285; +pub const SYS_getsockname: c_long = 286; +pub const SYS_getpeername: c_long = 287; +pub const SYS_socketpair: c_long = 288; +pub const SYS_send: c_long = 289; +pub const SYS_sendto: c_long = 290; +pub const SYS_recv: c_long = 291; +pub const SYS_recvfrom: c_long = 292; +pub const SYS_shutdown: c_long = 293; +pub const SYS_setsockopt: c_long = 294; +pub const SYS_getsockopt: c_long = 295; +pub const SYS_sendmsg: c_long = 296; +pub const SYS_recvmsg: c_long = 297; +pub const SYS_semop: c_long = 298; +pub const SYS_semget: c_long = 299; +pub const SYS_semctl: c_long = 300; +pub const SYS_msgsnd: c_long = 301; +pub const SYS_msgrcv: c_long = 302; +pub const SYS_msgget: c_long = 303; +pub const SYS_msgctl: c_long = 304; +pub const SYS_shmat: c_long = 305; +pub const SYS_shmdt: c_long = 306; +pub const SYS_shmget: c_long = 307; +pub const SYS_shmctl: c_long = 308; +pub const SYS_add_key: c_long = 309; +pub const SYS_request_key: c_long = 310; +pub const SYS_keyctl: c_long = 311; +pub const SYS_semtimedop: c_long = 312; +pub const SYS_vserver: c_long = 313; +pub const SYS_ioprio_set: c_long = 314; +pub const SYS_ioprio_get: c_long = 315; +pub const SYS_inotify_init: c_long = 316; +pub const SYS_inotify_add_watch: c_long = 317; +pub const SYS_inotify_rm_watch: c_long = 318; +pub const SYS_mbind: c_long = 319; +pub const SYS_get_mempolicy: c_long = 320; +pub const SYS_set_mempolicy: c_long = 321; +pub const SYS_openat: c_long = 322; +pub const SYS_mkdirat: c_long = 323; +pub const SYS_mknodat: c_long = 324; +pub const SYS_fchownat: c_long = 325; +pub const SYS_futimesat: c_long = 326; +pub const SYS_fstatat64: c_long = 327; +pub const SYS_unlinkat: c_long = 328; +pub const SYS_renameat: c_long = 329; +pub const SYS_linkat: c_long = 330; +pub const SYS_symlinkat: c_long = 331; +pub const SYS_readlinkat: c_long = 332; +pub const SYS_fchmodat: c_long = 333; +pub const SYS_faccessat: c_long = 334; +pub const SYS_pselect6: c_long = 335; +pub const SYS_ppoll: c_long = 336; +pub const SYS_unshare: c_long = 337; +pub const SYS_set_robust_list: c_long = 338; +pub const SYS_get_robust_list: c_long = 339; +pub const SYS_splice: c_long = 340; +pub const SYS_arm_sync_file_range: c_long = 341; +pub const SYS_tee: c_long = 342; +pub const SYS_vmsplice: c_long = 343; +pub const SYS_move_pages: c_long = 344; +pub const SYS_getcpu: c_long = 345; +pub const SYS_epoll_pwait: c_long = 346; +pub const SYS_kexec_load: c_long = 347; +pub const SYS_utimensat: c_long = 348; +pub const SYS_signalfd: c_long = 349; +pub const SYS_timerfd_create: c_long = 350; +pub const SYS_eventfd: c_long = 351; +pub const SYS_fallocate: c_long = 352; +pub const SYS_timerfd_settime: c_long = 353; +pub const SYS_timerfd_gettime: c_long = 354; +pub const SYS_signalfd4: c_long = 355; +pub const SYS_eventfd2: c_long = 356; +pub const SYS_epoll_create1: c_long = 357; +pub const SYS_dup3: c_long = 358; +pub const SYS_pipe2: c_long = 359; +pub const SYS_inotify_init1: c_long = 360; +pub const SYS_preadv: c_long = 361; +pub const SYS_pwritev: c_long = 362; +pub const SYS_rt_tgsigqueueinfo: c_long = 363; +pub const SYS_perf_event_open: c_long = 364; +pub const SYS_recvmmsg: c_long = 365; +pub const SYS_accept4: c_long = 366; +pub const SYS_fanotify_init: c_long = 367; +pub const SYS_fanotify_mark: c_long = 368; +pub const SYS_prlimit64: c_long = 369; +pub const SYS_name_to_handle_at: c_long = 370; +pub const SYS_open_by_handle_at: c_long = 371; +pub const SYS_clock_adjtime: c_long = 372; +pub const SYS_syncfs: c_long = 373; +pub const SYS_sendmmsg: c_long = 374; +pub const SYS_setns: c_long = 375; +pub const SYS_process_vm_readv: c_long = 376; +pub const SYS_process_vm_writev: c_long = 377; +pub const SYS_kcmp: c_long = 378; +pub const SYS_finit_module: c_long = 379; +pub const SYS_sched_setattr: c_long = 380; +pub const SYS_sched_getattr: c_long = 381; +pub const SYS_renameat2: c_long = 382; +pub const SYS_seccomp: c_long = 383; +pub const SYS_getrandom: c_long = 384; +pub const SYS_memfd_create: c_long = 385; +pub const SYS_bpf: c_long = 386; +pub const SYS_execveat: c_long = 387; +pub const SYS_userfaultfd: c_long = 388; +pub const SYS_membarrier: c_long = 389; +pub const SYS_mlock2: c_long = 390; +pub const SYS_copy_file_range: c_long = 391; +pub const SYS_preadv2: c_long = 392; +pub const SYS_pwritev2: c_long = 393; +pub const SYS_pkey_mprotect: c_long = 394; +pub const SYS_pkey_alloc: c_long = 395; +pub const SYS_pkey_free: c_long = 396; +pub const SYS_statx: c_long = 397; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; + +// offsets in mcontext_t.gregs from sys/ucontext.h +pub const REG_R0: c_int = 0; +pub const REG_R1: c_int = 1; +pub const REG_R2: c_int = 2; +pub const REG_R3: c_int = 3; +pub const REG_R4: c_int = 4; +pub const REG_R5: c_int = 5; +pub const REG_R6: c_int = 6; +pub const REG_R7: c_int = 7; +pub const REG_R8: c_int = 8; +pub const REG_R9: c_int = 9; +pub const REG_R10: c_int = 10; +pub const REG_R11: c_int = 11; +pub const REG_R12: c_int = 12; +pub const REG_R13: c_int = 13; +pub const REG_R14: c_int = 14; +pub const REG_R15: c_int = 15; + +pub const NGREG: c_int = 18; + +// From NDK's asm/auxvec.h +pub const AT_SYSINFO_EHDR: c_ulong = 33; + +f! { + // Sadly, Android before 5.0 (API level 21), the accept4 syscall is not + // exposed by the libc. As work-around, we implement it through `syscall` + // directly. This workaround can be removed if the minimum version of + // Android is bumped. When the workaround is removed, `accept4` can be + // moved back to `linux_like/mod.rs` + pub fn accept4( + fd: c_int, + addr: *mut crate::sockaddr, + len: *mut crate::socklen_t, + flg: c_int, + ) -> c_int { + crate::syscall(SYS_accept4, fd, addr, len, flg) as c_int + } +} diff --git a/vendor/libc/src/unix/linux_like/android/b32/mod.rs b/vendor/libc/src/unix/linux_like/android/b32/mod.rs new file mode 100644 index 00000000000000..d02dbf92d79246 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/android/b32/mod.rs @@ -0,0 +1,239 @@ +use crate::prelude::*; + +// The following definitions are correct for arm and i686, +// but may be wrong for mips + +pub type mode_t = u16; +pub type off64_t = c_longlong; +pub type sigset_t = c_ulong; +pub type socklen_t = i32; +pub type time64_t = i64; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; + +s! { + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct rlimit64 { + pub rlim_cur: u64, + pub rlim_max: u64, + } + + pub struct stat { + pub st_dev: c_ulonglong, + __pad0: [c_uchar; 4], + __st_ino: crate::ino_t, + pub st_mode: c_uint, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: c_ulonglong, + __pad3: [c_uchar; 4], + pub st_size: c_longlong, + pub st_blksize: crate::blksize_t, + pub st_blocks: c_ulonglong, + pub st_atime: c_long, + pub st_atime_nsec: c_long, + pub st_mtime: c_long, + pub st_mtime_nsec: c_long, + pub st_ctime: c_long, + pub st_ctime_nsec: c_long, + pub st_ino: c_ulonglong, + } + + pub struct stat64 { + pub st_dev: c_ulonglong, + __pad0: [c_uchar; 4], + __st_ino: crate::ino_t, + pub st_mode: c_uint, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: c_ulonglong, + __pad3: [c_uchar; 4], + pub st_size: c_longlong, + pub st_blksize: crate::blksize_t, + pub st_blocks: c_ulonglong, + pub st_atime: c_long, + pub st_atime_nsec: c_long, + pub st_mtime: c_long, + pub st_mtime_nsec: c_long, + pub st_ctime: c_long, + pub st_ctime_nsec: c_long, + pub st_ino: c_ulonglong, + } + + pub struct statfs64 { + pub f_type: u32, + pub f_bsize: u32, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::__fsid_t, + pub f_namelen: u32, + pub f_frsize: u32, + pub f_flags: u32, + pub f_spare: [u32; 4], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: c_ulong, + pub f_bfree: c_ulong, + pub f_bavail: c_ulong, + pub f_files: c_ulong, + pub f_ffree: c_ulong, + pub f_favail: c_ulong, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + } + + pub struct pthread_attr_t { + pub flags: u32, + pub stack_base: *mut c_void, + pub stack_size: size_t, + pub guard_size: size_t, + pub sched_policy: i32, + pub sched_priority: i32, + } + + pub struct pthread_mutex_t { + value: c_int, + } + + pub struct pthread_cond_t { + value: c_int, + } + + pub struct pthread_rwlock_t { + lock: pthread_mutex_t, + cond: pthread_cond_t, + numLocks: c_int, + writerThreadId: c_int, + pendingReaders: c_int, + pendingWriters: c_int, + attr: i32, + __reserved: [c_char; 12], + } + + pub struct pthread_barrier_t { + __private: [i32; 8], + } + + pub struct pthread_spinlock_t { + __private: [i32; 2], + } + + pub struct passwd { + pub pw_name: *mut c_char, + pub pw_passwd: *mut c_char, + pub pw_uid: crate::uid_t, + pub pw_gid: crate::gid_t, + pub pw_dir: *mut c_char, + pub pw_shell: *mut c_char, + } + + pub struct statfs { + pub f_type: u32, + pub f_bsize: u32, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::__fsid_t, + pub f_namelen: u32, + pub f_frsize: u32, + pub f_flags: u32, + pub f_spare: [u32; 4], + } + + pub struct sysinfo { + pub uptime: c_long, + pub loads: [c_ulong; 3], + pub totalram: c_ulong, + pub freeram: c_ulong, + pub sharedram: c_ulong, + pub bufferram: c_ulong, + pub totalswap: c_ulong, + pub freeswap: c_ulong, + pub procs: c_ushort, + pub pad: c_ushort, + pub totalhigh: c_ulong, + pub freehigh: c_ulong, + pub mem_unit: c_uint, + pub _f: [c_char; 8], + } +} + +s_no_extra_traits! { + pub struct sigset64_t { + __bits: [c_ulong; 2], + } +} + +// These constants must be of the same type of sigaction.sa_flags +pub const SA_NOCLDSTOP: c_int = 0x00000001; +pub const SA_NOCLDWAIT: c_int = 0x00000002; +pub const SA_NODEFER: c_int = 0x40000000; +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_RESETHAND: c_int = 0x80000000; +pub const SA_RESTART: c_int = 0x10000000; +pub const SA_SIGINFO: c_int = 0x00000004; + +pub const RTLD_GLOBAL: c_int = 2; +pub const RTLD_NOW: c_int = 0; +pub const RTLD_DEFAULT: *mut c_void = -1isize as *mut c_void; + +pub const PTRACE_GETFPREGS: c_int = 14; +pub const PTRACE_SETFPREGS: c_int = 15; + +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { value: 0 }; +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { value: 0 }; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + lock: PTHREAD_MUTEX_INITIALIZER, + cond: PTHREAD_COND_INITIALIZER, + numLocks: 0, + writerThreadId: 0, + pendingReaders: 0, + pendingWriters: 0, + attr: 0, + __reserved: [0; 12], +}; +pub const PTHREAD_STACK_MIN: size_t = 4096 * 2; +pub const CPU_SETSIZE: size_t = 32; +pub const __CPU_BITS: size_t = 32; + +pub const UT_LINESIZE: usize = 8; +pub const UT_NAMESIZE: usize = 8; +pub const UT_HOSTSIZE: usize = 16; + +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; + +extern "C" { + pub fn timegm64(tm: *const crate::tm) -> crate::time64_t; +} + +cfg_if! { + if #[cfg(target_arch = "x86")] { + mod x86; + pub use self::x86::*; + } else if #[cfg(target_arch = "arm")] { + mod arm; + pub use self::arm::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/unix/linux_like/android/b32/x86/mod.rs b/vendor/libc/src/unix/linux_like/android/b32/x86/mod.rs new file mode 100644 index 00000000000000..ca46c3c462246a --- /dev/null +++ b/vendor/libc/src/unix/linux_like/android/b32/x86/mod.rs @@ -0,0 +1,604 @@ +use crate::prelude::*; + +pub type wchar_t = i32; +pub type greg_t = i32; + +s! { + pub struct _libc_fpreg { + pub significand: [u16; 4], + pub exponent: u16, + } + + pub struct _libc_fpstate { + pub cw: c_ulong, + pub sw: c_ulong, + pub tag: c_ulong, + pub ipoff: c_ulong, + pub cssel: c_ulong, + pub dataoff: c_ulong, + pub datasel: c_ulong, + pub _st: [_libc_fpreg; 8], + pub status: c_ulong, + } + + pub struct mcontext_t { + pub gregs: [greg_t; 19], + pub fpregs: *mut _libc_fpstate, + pub oldmask: c_ulong, + pub cr2: c_ulong, + } +} + +s_no_extra_traits! { + pub struct __c_anonymous_uc_sigmask_with_padding { + pub uc_sigmask: crate::sigset_t, + /* Android has a wrong (smaller) sigset_t on x86. */ + __padding_rt_sigset: u32, + } + + pub union __c_anonymous_uc_sigmask { + uc_sigmask: __c_anonymous_uc_sigmask_with_padding, + uc_sigmask64: crate::sigset64_t, + } + + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_mcontext: mcontext_t, + pub uc_sigmask__c_anonymous_union: __c_anonymous_uc_sigmask, + __padding_rt_sigset: u32, + __fpregs_mem: _libc_fpstate, + } + + #[repr(align(8))] + pub struct max_align_t { + priv_: [f64; 2], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for __c_anonymous_uc_sigmask_with_padding { + fn eq(&self, other: &__c_anonymous_uc_sigmask_with_padding) -> bool { + self.uc_sigmask == other.uc_sigmask + // Ignore padding + } + } + impl Eq for __c_anonymous_uc_sigmask_with_padding {} + impl hash::Hash for __c_anonymous_uc_sigmask_with_padding { + fn hash(&self, state: &mut H) { + self.uc_sigmask.hash(state) + // Ignore padding + } + } + + impl PartialEq for __c_anonymous_uc_sigmask { + fn eq(&self, other: &__c_anonymous_uc_sigmask) -> bool { + unsafe { self.uc_sigmask == other.uc_sigmask } + } + } + impl Eq for __c_anonymous_uc_sigmask {} + impl hash::Hash for __c_anonymous_uc_sigmask { + fn hash(&self, state: &mut H) { + unsafe { self.uc_sigmask.hash(state) } + } + } + + impl PartialEq for ucontext_t { + fn eq(&self, other: &Self) -> bool { + self.uc_flags == other.uc_flags + && self.uc_link == other.uc_link + && self.uc_stack == other.uc_stack + && self.uc_mcontext == other.uc_mcontext + && self.uc_sigmask__c_anonymous_union == other.uc_sigmask__c_anonymous_union + // Ignore padding field + } + } + impl Eq for ucontext_t {} + impl hash::Hash for ucontext_t { + fn hash(&self, state: &mut H) { + self.uc_flags.hash(state); + self.uc_link.hash(state); + self.uc_stack.hash(state); + self.uc_mcontext.hash(state); + self.uc_sigmask__c_anonymous_union.hash(state); + // Ignore padding field + } + } + } +} + +pub const O_DIRECT: c_int = 0x4000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_NOFOLLOW: c_int = 0x20000; +pub const O_LARGEFILE: c_int = 0o0100000; + +pub const MAP_32BIT: c_int = 0x40; + +// Syscall table +pub const SYS_restart_syscall: c_long = 0; +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_waitpid: c_long = 7; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execve: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_time: c_long = 13; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_lchown: c_long = 16; +pub const SYS_break: c_long = 17; +pub const SYS_oldstat: c_long = 18; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_mount: c_long = 21; +pub const SYS_umount: c_long = 22; +pub const SYS_setuid: c_long = 23; +pub const SYS_getuid: c_long = 24; +pub const SYS_stime: c_long = 25; +pub const SYS_ptrace: c_long = 26; +pub const SYS_alarm: c_long = 27; +pub const SYS_oldfstat: c_long = 28; +pub const SYS_pause: c_long = 29; +pub const SYS_utime: c_long = 30; +pub const SYS_stty: c_long = 31; +pub const SYS_gtty: c_long = 32; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_ftime: c_long = 35; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_rename: c_long = 38; +pub const SYS_mkdir: c_long = 39; +pub const SYS_rmdir: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_prof: c_long = 44; +pub const SYS_brk: c_long = 45; +pub const SYS_setgid: c_long = 46; +pub const SYS_getgid: c_long = 47; +pub const SYS_signal: c_long = 48; +pub const SYS_geteuid: c_long = 49; +pub const SYS_getegid: c_long = 50; +pub const SYS_acct: c_long = 51; +pub const SYS_umount2: c_long = 52; +pub const SYS_lock: c_long = 53; +pub const SYS_ioctl: c_long = 54; +pub const SYS_fcntl: c_long = 55; +pub const SYS_mpx: c_long = 56; +pub const SYS_setpgid: c_long = 57; +pub const SYS_ulimit: c_long = 58; +pub const SYS_oldolduname: c_long = 59; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_ustat: c_long = 62; +pub const SYS_dup2: c_long = 63; +pub const SYS_getppid: c_long = 64; +pub const SYS_getpgrp: c_long = 65; +pub const SYS_setsid: c_long = 66; +pub const SYS_sigaction: c_long = 67; +pub const SYS_sgetmask: c_long = 68; +pub const SYS_ssetmask: c_long = 69; +pub const SYS_setreuid: c_long = 70; +pub const SYS_setregid: c_long = 71; +pub const SYS_sigsuspend: c_long = 72; +pub const SYS_sigpending: c_long = 73; +pub const SYS_sethostname: c_long = 74; +pub const SYS_setrlimit: c_long = 75; +pub const SYS_getrlimit: c_long = 76; +pub const SYS_getrusage: c_long = 77; +pub const SYS_gettimeofday: c_long = 78; +pub const SYS_settimeofday: c_long = 79; +pub const SYS_getgroups: c_long = 80; +pub const SYS_setgroups: c_long = 81; +pub const SYS_select: c_long = 82; +pub const SYS_symlink: c_long = 83; +pub const SYS_oldlstat: c_long = 84; +pub const SYS_readlink: c_long = 85; +pub const SYS_uselib: c_long = 86; +pub const SYS_swapon: c_long = 87; +pub const SYS_reboot: c_long = 88; +pub const SYS_readdir: c_long = 89; +pub const SYS_mmap: c_long = 90; +pub const SYS_munmap: c_long = 91; +pub const SYS_truncate: c_long = 92; +pub const SYS_ftruncate: c_long = 93; +pub const SYS_fchmod: c_long = 94; +pub const SYS_fchown: c_long = 95; +pub const SYS_getpriority: c_long = 96; +pub const SYS_setpriority: c_long = 97; +pub const SYS_profil: c_long = 98; +pub const SYS_statfs: c_long = 99; +pub const SYS_fstatfs: c_long = 100; +pub const SYS_ioperm: c_long = 101; +pub const SYS_socketcall: c_long = 102; +pub const SYS_syslog: c_long = 103; +pub const SYS_setitimer: c_long = 104; +pub const SYS_getitimer: c_long = 105; +pub const SYS_stat: c_long = 106; +pub const SYS_lstat: c_long = 107; +pub const SYS_fstat: c_long = 108; +pub const SYS_olduname: c_long = 109; +pub const SYS_iopl: c_long = 110; +pub const SYS_vhangup: c_long = 111; +pub const SYS_idle: c_long = 112; +pub const SYS_vm86old: c_long = 113; +pub const SYS_wait4: c_long = 114; +pub const SYS_swapoff: c_long = 115; +pub const SYS_sysinfo: c_long = 116; +pub const SYS_ipc: c_long = 117; +pub const SYS_fsync: c_long = 118; +pub const SYS_sigreturn: c_long = 119; +pub const SYS_clone: c_long = 120; +pub const SYS_setdomainname: c_long = 121; +pub const SYS_uname: c_long = 122; +pub const SYS_modify_ldt: c_long = 123; +pub const SYS_adjtimex: c_long = 124; +pub const SYS_mprotect: c_long = 125; +pub const SYS_sigprocmask: c_long = 126; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 127; +pub const SYS_init_module: c_long = 128; +pub const SYS_delete_module: c_long = 129; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 130; +pub const SYS_quotactl: c_long = 131; +pub const SYS_getpgid: c_long = 132; +pub const SYS_fchdir: c_long = 133; +pub const SYS_bdflush: c_long = 134; +pub const SYS_sysfs: c_long = 135; +pub const SYS_personality: c_long = 136; +pub const SYS_afs_syscall: c_long = 137; +pub const SYS_setfsuid: c_long = 138; +pub const SYS_setfsgid: c_long = 139; +// FIXME(android): SYS__llseek is in the NDK sources but for some reason is +// not available in the tests +// pub const SYS__llseek: c_long = 140; +pub const SYS_getdents: c_long = 141; +// FIXME(android): SYS__newselect is in the NDK sources but for some reason is +// not available in the tests +// pub const SYS__newselect: c_long = 142; +pub const SYS_flock: c_long = 143; +pub const SYS_msync: c_long = 144; +pub const SYS_readv: c_long = 145; +pub const SYS_writev: c_long = 146; +pub const SYS_getsid: c_long = 147; +pub const SYS_fdatasync: c_long = 148; +// FIXME(android): SYS__llseek is in the NDK sources but for some reason is +// not available in the tests +// pub const SYS__sysctl: c_long = 149; +pub const SYS_mlock: c_long = 150; +pub const SYS_munlock: c_long = 151; +pub const SYS_mlockall: c_long = 152; +pub const SYS_munlockall: c_long = 153; +pub const SYS_sched_setparam: c_long = 154; +pub const SYS_sched_getparam: c_long = 155; +pub const SYS_sched_setscheduler: c_long = 156; +pub const SYS_sched_getscheduler: c_long = 157; +pub const SYS_sched_yield: c_long = 158; +pub const SYS_sched_get_priority_max: c_long = 159; +pub const SYS_sched_get_priority_min: c_long = 160; +pub const SYS_sched_rr_get_interval: c_long = 161; +pub const SYS_nanosleep: c_long = 162; +pub const SYS_mremap: c_long = 163; +pub const SYS_setresuid: c_long = 164; +pub const SYS_getresuid: c_long = 165; +pub const SYS_vm86: c_long = 166; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 167; +pub const SYS_poll: c_long = 168; +pub const SYS_nfsservctl: c_long = 169; +pub const SYS_setresgid: c_long = 170; +pub const SYS_getresgid: c_long = 171; +pub const SYS_prctl: c_long = 172; +pub const SYS_rt_sigreturn: c_long = 173; +pub const SYS_rt_sigaction: c_long = 174; +pub const SYS_rt_sigprocmask: c_long = 175; +pub const SYS_rt_sigpending: c_long = 176; +pub const SYS_rt_sigtimedwait: c_long = 177; +pub const SYS_rt_sigqueueinfo: c_long = 178; +pub const SYS_rt_sigsuspend: c_long = 179; +pub const SYS_pread64: c_long = 180; +pub const SYS_pwrite64: c_long = 181; +pub const SYS_chown: c_long = 182; +pub const SYS_getcwd: c_long = 183; +pub const SYS_capget: c_long = 184; +pub const SYS_capset: c_long = 185; +pub const SYS_sigaltstack: c_long = 186; +pub const SYS_sendfile: c_long = 187; +pub const SYS_getpmsg: c_long = 188; +pub const SYS_putpmsg: c_long = 189; +pub const SYS_vfork: c_long = 190; +pub const SYS_ugetrlimit: c_long = 191; +pub const SYS_mmap2: c_long = 192; +pub const SYS_truncate64: c_long = 193; +pub const SYS_ftruncate64: c_long = 194; +pub const SYS_stat64: c_long = 195; +pub const SYS_lstat64: c_long = 196; +pub const SYS_fstat64: c_long = 197; +pub const SYS_lchown32: c_long = 198; +pub const SYS_getuid32: c_long = 199; +pub const SYS_getgid32: c_long = 200; +pub const SYS_geteuid32: c_long = 201; +pub const SYS_getegid32: c_long = 202; +pub const SYS_setreuid32: c_long = 203; +pub const SYS_setregid32: c_long = 204; +pub const SYS_getgroups32: c_long = 205; +pub const SYS_setgroups32: c_long = 206; +pub const SYS_fchown32: c_long = 207; +pub const SYS_setresuid32: c_long = 208; +pub const SYS_getresuid32: c_long = 209; +pub const SYS_setresgid32: c_long = 210; +pub const SYS_getresgid32: c_long = 211; +pub const SYS_chown32: c_long = 212; +pub const SYS_setuid32: c_long = 213; +pub const SYS_setgid32: c_long = 214; +pub const SYS_setfsuid32: c_long = 215; +pub const SYS_setfsgid32: c_long = 216; +pub const SYS_pivot_root: c_long = 217; +pub const SYS_mincore: c_long = 218; +pub const SYS_madvise: c_long = 219; +pub const SYS_getdents64: c_long = 220; +pub const SYS_fcntl64: c_long = 221; +pub const SYS_gettid: c_long = 224; +pub const SYS_readahead: c_long = 225; +pub const SYS_setxattr: c_long = 226; +pub const SYS_lsetxattr: c_long = 227; +pub const SYS_fsetxattr: c_long = 228; +pub const SYS_getxattr: c_long = 229; +pub const SYS_lgetxattr: c_long = 230; +pub const SYS_fgetxattr: c_long = 231; +pub const SYS_listxattr: c_long = 232; +pub const SYS_llistxattr: c_long = 233; +pub const SYS_flistxattr: c_long = 234; +pub const SYS_removexattr: c_long = 235; +pub const SYS_lremovexattr: c_long = 236; +pub const SYS_fremovexattr: c_long = 237; +pub const SYS_tkill: c_long = 238; +pub const SYS_sendfile64: c_long = 239; +pub const SYS_futex: c_long = 240; +pub const SYS_sched_setaffinity: c_long = 241; +pub const SYS_sched_getaffinity: c_long = 242; +pub const SYS_set_thread_area: c_long = 243; +pub const SYS_get_thread_area: c_long = 244; +pub const SYS_io_setup: c_long = 245; +pub const SYS_io_destroy: c_long = 246; +pub const SYS_io_getevents: c_long = 247; +pub const SYS_io_submit: c_long = 248; +pub const SYS_io_cancel: c_long = 249; +pub const SYS_fadvise64: c_long = 250; +pub const SYS_exit_group: c_long = 252; +pub const SYS_lookup_dcookie: c_long = 253; +pub const SYS_epoll_create: c_long = 254; +pub const SYS_epoll_ctl: c_long = 255; +pub const SYS_epoll_wait: c_long = 256; +pub const SYS_remap_file_pages: c_long = 257; +pub const SYS_set_tid_address: c_long = 258; +pub const SYS_timer_create: c_long = 259; +pub const SYS_timer_settime: c_long = 260; +pub const SYS_timer_gettime: c_long = 261; +pub const SYS_timer_getoverrun: c_long = 262; +pub const SYS_timer_delete: c_long = 263; +pub const SYS_clock_settime: c_long = 264; +pub const SYS_clock_gettime: c_long = 265; +pub const SYS_clock_getres: c_long = 266; +pub const SYS_clock_nanosleep: c_long = 267; +pub const SYS_statfs64: c_long = 268; +pub const SYS_fstatfs64: c_long = 269; +pub const SYS_tgkill: c_long = 270; +pub const SYS_utimes: c_long = 271; +pub const SYS_fadvise64_64: c_long = 272; +pub const SYS_vserver: c_long = 273; +pub const SYS_mbind: c_long = 274; +pub const SYS_get_mempolicy: c_long = 275; +pub const SYS_set_mempolicy: c_long = 276; +pub const SYS_mq_open: c_long = 277; +pub const SYS_mq_unlink: c_long = 278; +pub const SYS_mq_timedsend: c_long = 279; +pub const SYS_mq_timedreceive: c_long = 280; +pub const SYS_mq_notify: c_long = 281; +pub const SYS_mq_getsetattr: c_long = 282; +pub const SYS_kexec_load: c_long = 283; +pub const SYS_waitid: c_long = 284; +pub const SYS_add_key: c_long = 286; +pub const SYS_request_key: c_long = 287; +pub const SYS_keyctl: c_long = 288; +pub const SYS_ioprio_set: c_long = 289; +pub const SYS_ioprio_get: c_long = 290; +pub const SYS_inotify_init: c_long = 291; +pub const SYS_inotify_add_watch: c_long = 292; +pub const SYS_inotify_rm_watch: c_long = 293; +pub const SYS_migrate_pages: c_long = 294; +pub const SYS_openat: c_long = 295; +pub const SYS_mkdirat: c_long = 296; +pub const SYS_mknodat: c_long = 297; +pub const SYS_fchownat: c_long = 298; +pub const SYS_futimesat: c_long = 299; +pub const SYS_fstatat64: c_long = 300; +pub const SYS_unlinkat: c_long = 301; +pub const SYS_renameat: c_long = 302; +pub const SYS_linkat: c_long = 303; +pub const SYS_symlinkat: c_long = 304; +pub const SYS_readlinkat: c_long = 305; +pub const SYS_fchmodat: c_long = 306; +pub const SYS_faccessat: c_long = 307; +pub const SYS_pselect6: c_long = 308; +pub const SYS_ppoll: c_long = 309; +pub const SYS_unshare: c_long = 310; +pub const SYS_set_robust_list: c_long = 311; +pub const SYS_get_robust_list: c_long = 312; +pub const SYS_splice: c_long = 313; +pub const SYS_sync_file_range: c_long = 314; +pub const SYS_tee: c_long = 315; +pub const SYS_vmsplice: c_long = 316; +pub const SYS_move_pages: c_long = 317; +pub const SYS_getcpu: c_long = 318; +pub const SYS_epoll_pwait: c_long = 319; +pub const SYS_utimensat: c_long = 320; +pub const SYS_signalfd: c_long = 321; +pub const SYS_timerfd_create: c_long = 322; +pub const SYS_eventfd: c_long = 323; +pub const SYS_fallocate: c_long = 324; +pub const SYS_timerfd_settime: c_long = 325; +pub const SYS_timerfd_gettime: c_long = 326; +pub const SYS_signalfd4: c_long = 327; +pub const SYS_eventfd2: c_long = 328; +pub const SYS_epoll_create1: c_long = 329; +pub const SYS_dup3: c_long = 330; +pub const SYS_pipe2: c_long = 331; +pub const SYS_inotify_init1: c_long = 332; +pub const SYS_preadv: c_long = 333; +pub const SYS_pwritev: c_long = 334; +pub const SYS_rt_tgsigqueueinfo: c_long = 335; +pub const SYS_perf_event_open: c_long = 336; +pub const SYS_recvmmsg: c_long = 337; +pub const SYS_fanotify_init: c_long = 338; +pub const SYS_fanotify_mark: c_long = 339; +pub const SYS_prlimit64: c_long = 340; +pub const SYS_name_to_handle_at: c_long = 341; +pub const SYS_open_by_handle_at: c_long = 342; +pub const SYS_clock_adjtime: c_long = 343; +pub const SYS_syncfs: c_long = 344; +pub const SYS_sendmmsg: c_long = 345; +pub const SYS_setns: c_long = 346; +pub const SYS_process_vm_readv: c_long = 347; +pub const SYS_process_vm_writev: c_long = 348; +pub const SYS_kcmp: c_long = 349; +pub const SYS_finit_module: c_long = 350; +pub const SYS_sched_setattr: c_long = 351; +pub const SYS_sched_getattr: c_long = 352; +pub const SYS_renameat2: c_long = 353; +pub const SYS_seccomp: c_long = 354; +pub const SYS_getrandom: c_long = 355; +pub const SYS_memfd_create: c_long = 356; +pub const SYS_bpf: c_long = 357; +pub const SYS_execveat: c_long = 358; +pub const SYS_socket: c_long = 359; +pub const SYS_socketpair: c_long = 360; +pub const SYS_bind: c_long = 361; +pub const SYS_connect: c_long = 362; +pub const SYS_listen: c_long = 363; +pub const SYS_accept4: c_long = 364; +pub const SYS_getsockopt: c_long = 365; +pub const SYS_setsockopt: c_long = 366; +pub const SYS_getsockname: c_long = 367; +pub const SYS_getpeername: c_long = 368; +pub const SYS_sendto: c_long = 369; +pub const SYS_sendmsg: c_long = 370; +pub const SYS_recvfrom: c_long = 371; +pub const SYS_recvmsg: c_long = 372; +pub const SYS_shutdown: c_long = 373; +pub const SYS_userfaultfd: c_long = 374; +pub const SYS_membarrier: c_long = 375; +pub const SYS_mlock2: c_long = 376; +pub const SYS_copy_file_range: c_long = 377; +pub const SYS_preadv2: c_long = 378; +pub const SYS_pwritev2: c_long = 379; +pub const SYS_pkey_mprotect: c_long = 380; +pub const SYS_pkey_alloc: c_long = 381; +pub const SYS_pkey_free: c_long = 382; +pub const SYS_statx: c_long = 383; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; + +// offsets in user_regs_structs, from sys/reg.h +pub const EBX: c_int = 0; +pub const ECX: c_int = 1; +pub const EDX: c_int = 2; +pub const ESI: c_int = 3; +pub const EDI: c_int = 4; +pub const EBP: c_int = 5; +pub const EAX: c_int = 6; +pub const DS: c_int = 7; +pub const ES: c_int = 8; +pub const FS: c_int = 9; +pub const GS: c_int = 10; +pub const ORIG_EAX: c_int = 11; +pub const EIP: c_int = 12; +pub const CS: c_int = 13; +pub const EFL: c_int = 14; +pub const UESP: c_int = 15; +pub const SS: c_int = 16; + +// offsets in mcontext_t.gregs from sys/ucontext.h +pub const REG_GS: c_int = 0; +pub const REG_FS: c_int = 1; +pub const REG_ES: c_int = 2; +pub const REG_DS: c_int = 3; +pub const REG_EDI: c_int = 4; +pub const REG_ESI: c_int = 5; +pub const REG_EBP: c_int = 6; +pub const REG_ESP: c_int = 7; +pub const REG_EBX: c_int = 8; +pub const REG_EDX: c_int = 9; +pub const REG_ECX: c_int = 10; +pub const REG_EAX: c_int = 11; +pub const REG_TRAPNO: c_int = 12; +pub const REG_ERR: c_int = 13; +pub const REG_EIP: c_int = 14; +pub const REG_CS: c_int = 15; +pub const REG_EFL: c_int = 16; +pub const REG_UESP: c_int = 17; +pub const REG_SS: c_int = 18; + +// From NDK's asm/auxvec.h +pub const AT_SYSINFO: c_ulong = 32; +pub const AT_SYSINFO_EHDR: c_ulong = 33; +pub const AT_VECTOR_SIZE_ARCH: c_ulong = 3; + +// socketcall values from linux/net.h (only the needed ones, and not public) +const SYS_ACCEPT4: c_int = 18; + +f! { + // Sadly, Android before 5.0 (API level 21), the accept4 syscall is not + // exposed by the libc. As work-around, we implement it as raw syscall. + // Note that for x86, the `accept4` syscall is not available either, + // and we must use the `socketcall` syscall instead. + // This workaround can be removed if the minimum Android version is bumped. + // When the workaround is removed, `accept4` can be moved back + // to `linux_like/mod.rs` + pub fn accept4( + fd: c_int, + addr: *mut crate::sockaddr, + len: *mut crate::socklen_t, + flg: c_int, + ) -> c_int { + // Arguments are passed as array of `long int` + // (which is big enough on x86 for a pointer). + let mut args = [fd as c_long, addr as c_long, len as c_long, flg as c_long]; + crate::syscall(SYS_socketcall, SYS_ACCEPT4, args[..].as_mut_ptr()) + } +} diff --git a/vendor/libc/src/unix/linux_like/android/b64/aarch64/mod.rs b/vendor/libc/src/unix/linux_like/android/b64/aarch64/mod.rs new file mode 100644 index 00000000000000..3c6131089ee892 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/android/b64/aarch64/mod.rs @@ -0,0 +1,473 @@ +use crate::off64_t; +use crate::prelude::*; + +pub type wchar_t = u32; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: c_uint, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad1: c_ulong, + pub st_size: off64_t, + pub st_blksize: c_int, + __pad2: c_int, + pub st_blocks: c_long, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused4: c_uint, + __unused5: c_uint, + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: c_uint, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad1: c_ulong, + pub st_size: off64_t, + pub st_blksize: c_int, + __pad2: c_int, + pub st_blocks: c_long, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused4: c_uint, + __unused5: c_uint, + } + + pub struct user_regs_struct { + pub regs: [u64; 31], + pub sp: u64, + pub pc: u64, + pub pstate: u64, + } + + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_sigmask: crate::sigset_t, + pub uc_mcontext: mcontext_t, + } + + #[repr(align(16))] + pub struct mcontext_t { + pub fault_address: c_ulonglong, + pub regs: [c_ulonglong; 31], + pub sp: c_ulonglong, + pub pc: c_ulonglong, + pub pstate: c_ulonglong, + __reserved: [u64; 512], + } + + pub struct user_fpsimd_struct { + pub vregs: [crate::__uint128_t; 32], + pub fpsr: u32, + pub fpcr: u32, + } +} + +s_no_extra_traits! { + #[repr(align(16))] + pub struct max_align_t { + priv_: [f32; 8], + } +} + +pub const O_DIRECT: c_int = 0x10000; +pub const O_DIRECTORY: c_int = 0x4000; +pub const O_NOFOLLOW: c_int = 0x8000; +pub const O_LARGEFILE: c_int = 0o400000; + +pub const SIGSTKSZ: size_t = 16384; +pub const MINSIGSTKSZ: size_t = 5120; + +// From NDK's asm/hwcap.h +pub const HWCAP_FP: c_ulong = 1 << 0; +pub const HWCAP_ASIMD: c_ulong = 1 << 1; +pub const HWCAP_EVTSTRM: c_ulong = 1 << 2; +pub const HWCAP_AES: c_ulong = 1 << 3; +pub const HWCAP_PMULL: c_ulong = 1 << 4; +pub const HWCAP_SHA1: c_ulong = 1 << 5; +pub const HWCAP_SHA2: c_ulong = 1 << 6; +pub const HWCAP_CRC32: c_ulong = 1 << 7; +pub const HWCAP_ATOMICS: c_ulong = 1 << 8; +pub const HWCAP_FPHP: c_ulong = 1 << 9; +pub const HWCAP_ASIMDHP: c_ulong = 1 << 10; +pub const HWCAP_CPUID: c_ulong = 1 << 11; +pub const HWCAP_ASIMDRDM: c_ulong = 1 << 12; +pub const HWCAP_JSCVT: c_ulong = 1 << 13; +pub const HWCAP_FCMA: c_ulong = 1 << 14; +pub const HWCAP_LRCPC: c_ulong = 1 << 15; +pub const HWCAP_DCPOP: c_ulong = 1 << 16; +pub const HWCAP_SHA3: c_ulong = 1 << 17; +pub const HWCAP_SM3: c_ulong = 1 << 18; +pub const HWCAP_SM4: c_ulong = 1 << 19; +pub const HWCAP_ASIMDDP: c_ulong = 1 << 20; +pub const HWCAP_SHA512: c_ulong = 1 << 21; +pub const HWCAP_SVE: c_ulong = 1 << 22; +pub const HWCAP_ASIMDFHM: c_ulong = 1 << 23; +pub const HWCAP_DIT: c_ulong = 1 << 24; +pub const HWCAP_USCAT: c_ulong = 1 << 25; +pub const HWCAP_ILRCPC: c_ulong = 1 << 26; +pub const HWCAP_FLAGM: c_ulong = 1 << 27; +pub const HWCAP_SSBS: c_ulong = 1 << 28; +pub const HWCAP_SB: c_ulong = 1 << 29; +pub const HWCAP_PACA: c_ulong = 1 << 30; +pub const HWCAP_PACG: c_ulong = 1 << 31; +pub const HWCAP2_DCPODP: c_ulong = 1 << 0; +pub const HWCAP2_SVE2: c_ulong = 1 << 1; +pub const HWCAP2_SVEAES: c_ulong = 1 << 2; +pub const HWCAP2_SVEPMULL: c_ulong = 1 << 3; +pub const HWCAP2_SVEBITPERM: c_ulong = 1 << 4; +pub const HWCAP2_SVESHA3: c_ulong = 1 << 5; +pub const HWCAP2_SVESM4: c_ulong = 1 << 6; +pub const HWCAP2_FLAGM2: c_ulong = 1 << 7; +pub const HWCAP2_FRINT: c_ulong = 1 << 8; +pub const HWCAP2_SVEI8MM: c_ulong = 1 << 9; +pub const HWCAP2_SVEF32MM: c_ulong = 1 << 10; +pub const HWCAP2_SVEF64MM: c_ulong = 1 << 11; +pub const HWCAP2_SVEBF16: c_ulong = 1 << 12; +pub const HWCAP2_I8MM: c_ulong = 1 << 13; +pub const HWCAP2_BF16: c_ulong = 1 << 14; +pub const HWCAP2_DGH: c_ulong = 1 << 15; +pub const HWCAP2_RNG: c_ulong = 1 << 16; +pub const HWCAP2_BTI: c_ulong = 1 << 17; +pub const HWCAP2_MTE: c_ulong = 1 << 18; +pub const HWCAP2_ECV: c_ulong = 1 << 19; +pub const HWCAP2_AFP: c_ulong = 1 << 20; +pub const HWCAP2_RPRES: c_ulong = 1 << 21; +pub const HWCAP2_MTE3: c_ulong = 1 << 22; +pub const HWCAP2_SME: c_ulong = 1 << 23; +pub const HWCAP2_SME_I16I64: c_ulong = 1 << 24; +pub const HWCAP2_SME_F64F64: c_ulong = 1 << 25; +pub const HWCAP2_SME_I8I32: c_ulong = 1 << 26; +pub const HWCAP2_SME_F16F32: c_ulong = 1 << 27; +pub const HWCAP2_SME_B16F32: c_ulong = 1 << 28; +pub const HWCAP2_SME_F32F32: c_ulong = 1 << 29; +pub const HWCAP2_SME_FA64: c_ulong = 1 << 30; +pub const HWCAP2_WFXT: c_ulong = 1 << 31; +pub const HWCAP2_EBF16: c_ulong = 1 << 32; +pub const HWCAP2_SVE_EBF16: c_ulong = 1 << 33; + +pub const SYS_io_setup: c_long = 0; +pub const SYS_io_destroy: c_long = 1; +pub const SYS_io_submit: c_long = 2; +pub const SYS_io_cancel: c_long = 3; +pub const SYS_io_getevents: c_long = 4; +pub const SYS_setxattr: c_long = 5; +pub const SYS_lsetxattr: c_long = 6; +pub const SYS_fsetxattr: c_long = 7; +pub const SYS_getxattr: c_long = 8; +pub const SYS_lgetxattr: c_long = 9; +pub const SYS_fgetxattr: c_long = 10; +pub const SYS_listxattr: c_long = 11; +pub const SYS_llistxattr: c_long = 12; +pub const SYS_flistxattr: c_long = 13; +pub const SYS_removexattr: c_long = 14; +pub const SYS_lremovexattr: c_long = 15; +pub const SYS_fremovexattr: c_long = 16; +pub const SYS_getcwd: c_long = 17; +pub const SYS_lookup_dcookie: c_long = 18; +pub const SYS_eventfd2: c_long = 19; +pub const SYS_epoll_create1: c_long = 20; +pub const SYS_epoll_ctl: c_long = 21; +pub const SYS_epoll_pwait: c_long = 22; +pub const SYS_dup: c_long = 23; +pub const SYS_dup3: c_long = 24; +pub const SYS_fcntl: c_long = 25; +pub const SYS_inotify_init1: c_long = 26; +pub const SYS_inotify_add_watch: c_long = 27; +pub const SYS_inotify_rm_watch: c_long = 28; +pub const SYS_ioctl: c_long = 29; +pub const SYS_ioprio_set: c_long = 30; +pub const SYS_ioprio_get: c_long = 31; +pub const SYS_flock: c_long = 32; +pub const SYS_mknodat: c_long = 33; +pub const SYS_mkdirat: c_long = 34; +pub const SYS_unlinkat: c_long = 35; +pub const SYS_symlinkat: c_long = 36; +pub const SYS_linkat: c_long = 37; +pub const SYS_renameat: c_long = 38; +pub const SYS_umount2: c_long = 39; +pub const SYS_mount: c_long = 40; +pub const SYS_pivot_root: c_long = 41; +pub const SYS_nfsservctl: c_long = 42; +pub const SYS_fallocate: c_long = 47; +pub const SYS_faccessat: c_long = 48; +pub const SYS_chdir: c_long = 49; +pub const SYS_fchdir: c_long = 50; +pub const SYS_chroot: c_long = 51; +pub const SYS_fchmod: c_long = 52; +pub const SYS_fchmodat: c_long = 53; +pub const SYS_fchownat: c_long = 54; +pub const SYS_fchown: c_long = 55; +pub const SYS_openat: c_long = 56; +pub const SYS_close: c_long = 57; +pub const SYS_vhangup: c_long = 58; +pub const SYS_pipe2: c_long = 59; +pub const SYS_quotactl: c_long = 60; +pub const SYS_getdents64: c_long = 61; +pub const SYS_lseek: c_long = 62; +pub const SYS_read: c_long = 63; +pub const SYS_write: c_long = 64; +pub const SYS_readv: c_long = 65; +pub const SYS_writev: c_long = 66; +pub const SYS_pread64: c_long = 67; +pub const SYS_pwrite64: c_long = 68; +pub const SYS_preadv: c_long = 69; +pub const SYS_pwritev: c_long = 70; +pub const SYS_pselect6: c_long = 72; +pub const SYS_ppoll: c_long = 73; +pub const SYS_signalfd4: c_long = 74; +pub const SYS_vmsplice: c_long = 75; +pub const SYS_splice: c_long = 76; +pub const SYS_tee: c_long = 77; +pub const SYS_readlinkat: c_long = 78; +pub const SYS_sync: c_long = 81; +pub const SYS_fsync: c_long = 82; +pub const SYS_fdatasync: c_long = 83; +pub const SYS_sync_file_range: c_long = 84; +pub const SYS_timerfd_create: c_long = 85; +pub const SYS_timerfd_settime: c_long = 86; +pub const SYS_timerfd_gettime: c_long = 87; +pub const SYS_utimensat: c_long = 88; +pub const SYS_acct: c_long = 89; +pub const SYS_capget: c_long = 90; +pub const SYS_capset: c_long = 91; +pub const SYS_personality: c_long = 92; +pub const SYS_exit: c_long = 93; +pub const SYS_exit_group: c_long = 94; +pub const SYS_waitid: c_long = 95; +pub const SYS_set_tid_address: c_long = 96; +pub const SYS_unshare: c_long = 97; +pub const SYS_futex: c_long = 98; +pub const SYS_set_robust_list: c_long = 99; +pub const SYS_get_robust_list: c_long = 100; +pub const SYS_nanosleep: c_long = 101; +pub const SYS_getitimer: c_long = 102; +pub const SYS_setitimer: c_long = 103; +pub const SYS_kexec_load: c_long = 104; +pub const SYS_init_module: c_long = 105; +pub const SYS_delete_module: c_long = 106; +pub const SYS_timer_create: c_long = 107; +pub const SYS_timer_gettime: c_long = 108; +pub const SYS_timer_getoverrun: c_long = 109; +pub const SYS_timer_settime: c_long = 110; +pub const SYS_timer_delete: c_long = 111; +pub const SYS_clock_settime: c_long = 112; +pub const SYS_clock_gettime: c_long = 113; +pub const SYS_clock_getres: c_long = 114; +pub const SYS_clock_nanosleep: c_long = 115; +pub const SYS_syslog: c_long = 116; +pub const SYS_ptrace: c_long = 117; +pub const SYS_sched_setparam: c_long = 118; +pub const SYS_sched_setscheduler: c_long = 119; +pub const SYS_sched_getscheduler: c_long = 120; +pub const SYS_sched_getparam: c_long = 121; +pub const SYS_sched_setaffinity: c_long = 122; +pub const SYS_sched_getaffinity: c_long = 123; +pub const SYS_sched_yield: c_long = 124; +pub const SYS_sched_get_priority_max: c_long = 125; +pub const SYS_sched_get_priority_min: c_long = 126; +pub const SYS_sched_rr_get_interval: c_long = 127; +pub const SYS_restart_syscall: c_long = 128; +pub const SYS_kill: c_long = 129; +pub const SYS_tkill: c_long = 130; +pub const SYS_tgkill: c_long = 131; +pub const SYS_sigaltstack: c_long = 132; +pub const SYS_rt_sigsuspend: c_long = 133; +pub const SYS_rt_sigaction: c_long = 134; +pub const SYS_rt_sigprocmask: c_long = 135; +pub const SYS_rt_sigpending: c_long = 136; +pub const SYS_rt_sigtimedwait: c_long = 137; +pub const SYS_rt_sigqueueinfo: c_long = 138; +pub const SYS_rt_sigreturn: c_long = 139; +pub const SYS_setpriority: c_long = 140; +pub const SYS_getpriority: c_long = 141; +pub const SYS_reboot: c_long = 142; +pub const SYS_setregid: c_long = 143; +pub const SYS_setgid: c_long = 144; +pub const SYS_setreuid: c_long = 145; +pub const SYS_setuid: c_long = 146; +pub const SYS_setresuid: c_long = 147; +pub const SYS_getresuid: c_long = 148; +pub const SYS_setresgid: c_long = 149; +pub const SYS_getresgid: c_long = 150; +pub const SYS_setfsuid: c_long = 151; +pub const SYS_setfsgid: c_long = 152; +pub const SYS_times: c_long = 153; +pub const SYS_setpgid: c_long = 154; +pub const SYS_getpgid: c_long = 155; +pub const SYS_getsid: c_long = 156; +pub const SYS_setsid: c_long = 157; +pub const SYS_getgroups: c_long = 158; +pub const SYS_setgroups: c_long = 159; +pub const SYS_uname: c_long = 160; +pub const SYS_sethostname: c_long = 161; +pub const SYS_setdomainname: c_long = 162; +pub const SYS_getrlimit: c_long = 163; +pub const SYS_setrlimit: c_long = 164; +pub const SYS_getrusage: c_long = 165; +pub const SYS_umask: c_long = 166; +pub const SYS_prctl: c_long = 167; +pub const SYS_getcpu: c_long = 168; +pub const SYS_gettimeofday: c_long = 169; +pub const SYS_settimeofday: c_long = 170; +pub const SYS_adjtimex: c_long = 171; +pub const SYS_getpid: c_long = 172; +pub const SYS_getppid: c_long = 173; +pub const SYS_getuid: c_long = 174; +pub const SYS_geteuid: c_long = 175; +pub const SYS_getgid: c_long = 176; +pub const SYS_getegid: c_long = 177; +pub const SYS_gettid: c_long = 178; +pub const SYS_sysinfo: c_long = 179; +pub const SYS_mq_open: c_long = 180; +pub const SYS_mq_unlink: c_long = 181; +pub const SYS_mq_timedsend: c_long = 182; +pub const SYS_mq_timedreceive: c_long = 183; +pub const SYS_mq_notify: c_long = 184; +pub const SYS_mq_getsetattr: c_long = 185; +pub const SYS_msgget: c_long = 186; +pub const SYS_msgctl: c_long = 187; +pub const SYS_msgrcv: c_long = 188; +pub const SYS_msgsnd: c_long = 189; +pub const SYS_semget: c_long = 190; +pub const SYS_semctl: c_long = 191; +pub const SYS_semtimedop: c_long = 192; +pub const SYS_semop: c_long = 193; +pub const SYS_shmget: c_long = 194; +pub const SYS_shmctl: c_long = 195; +pub const SYS_shmat: c_long = 196; +pub const SYS_shmdt: c_long = 197; +pub const SYS_socket: c_long = 198; +pub const SYS_socketpair: c_long = 199; +pub const SYS_bind: c_long = 200; +pub const SYS_listen: c_long = 201; +pub const SYS_accept: c_long = 202; +pub const SYS_connect: c_long = 203; +pub const SYS_getsockname: c_long = 204; +pub const SYS_getpeername: c_long = 205; +pub const SYS_sendto: c_long = 206; +pub const SYS_recvfrom: c_long = 207; +pub const SYS_setsockopt: c_long = 208; +pub const SYS_getsockopt: c_long = 209; +pub const SYS_shutdown: c_long = 210; +pub const SYS_sendmsg: c_long = 211; +pub const SYS_recvmsg: c_long = 212; +pub const SYS_readahead: c_long = 213; +pub const SYS_brk: c_long = 214; +pub const SYS_munmap: c_long = 215; +pub const SYS_mremap: c_long = 216; +pub const SYS_add_key: c_long = 217; +pub const SYS_request_key: c_long = 218; +pub const SYS_keyctl: c_long = 219; +pub const SYS_clone: c_long = 220; +pub const SYS_execve: c_long = 221; +pub const SYS_mmap: c_long = 222; +pub const SYS_swapon: c_long = 224; +pub const SYS_swapoff: c_long = 225; +pub const SYS_mprotect: c_long = 226; +pub const SYS_msync: c_long = 227; +pub const SYS_mlock: c_long = 228; +pub const SYS_munlock: c_long = 229; +pub const SYS_mlockall: c_long = 230; +pub const SYS_munlockall: c_long = 231; +pub const SYS_mincore: c_long = 232; +pub const SYS_madvise: c_long = 233; +pub const SYS_remap_file_pages: c_long = 234; +pub const SYS_mbind: c_long = 235; +pub const SYS_get_mempolicy: c_long = 236; +pub const SYS_set_mempolicy: c_long = 237; +pub const SYS_migrate_pages: c_long = 238; +pub const SYS_move_pages: c_long = 239; +pub const SYS_rt_tgsigqueueinfo: c_long = 240; +pub const SYS_perf_event_open: c_long = 241; +pub const SYS_accept4: c_long = 242; +pub const SYS_recvmmsg: c_long = 243; +pub const SYS_arch_specific_syscall: c_long = 244; +pub const SYS_wait4: c_long = 260; +pub const SYS_prlimit64: c_long = 261; +pub const SYS_fanotify_init: c_long = 262; +pub const SYS_fanotify_mark: c_long = 263; +pub const SYS_name_to_handle_at: c_long = 264; +pub const SYS_open_by_handle_at: c_long = 265; +pub const SYS_clock_adjtime: c_long = 266; +pub const SYS_syncfs: c_long = 267; +pub const SYS_setns: c_long = 268; +pub const SYS_sendmmsg: c_long = 269; +pub const SYS_process_vm_readv: c_long = 270; +pub const SYS_process_vm_writev: c_long = 271; +pub const SYS_kcmp: c_long = 272; +pub const SYS_finit_module: c_long = 273; +pub const SYS_sched_setattr: c_long = 274; +pub const SYS_sched_getattr: c_long = 275; +pub const SYS_renameat2: c_long = 276; +pub const SYS_seccomp: c_long = 277; +pub const SYS_getrandom: c_long = 278; +pub const SYS_memfd_create: c_long = 279; +pub const SYS_bpf: c_long = 280; +pub const SYS_execveat: c_long = 281; +pub const SYS_userfaultfd: c_long = 282; +pub const SYS_membarrier: c_long = 283; +pub const SYS_mlock2: c_long = 284; +pub const SYS_copy_file_range: c_long = 285; +pub const SYS_preadv2: c_long = 286; +pub const SYS_pwritev2: c_long = 287; +pub const SYS_pkey_mprotect: c_long = 288; +pub const SYS_pkey_alloc: c_long = 289; +pub const SYS_pkey_free: c_long = 290; +pub const SYS_statx: c_long = 291; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; +pub const SYS_syscalls: c_long = 451; + +pub const PROT_BTI: c_int = 0x10; +pub const PROT_MTE: c_int = 0x20; + +// From NDK's asm/auxvec.h +pub const AT_SYSINFO_EHDR: c_ulong = 33; +pub const AT_VECTOR_SIZE_ARCH: c_ulong = 2; diff --git a/vendor/libc/src/unix/linux_like/android/b64/mod.rs b/vendor/libc/src/unix/linux_like/android/b64/mod.rs new file mode 100644 index 00000000000000..46ceed4c6dcba2 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/android/b64/mod.rs @@ -0,0 +1,292 @@ +use crate::prelude::*; + +// The following definitions are correct for aarch64 and x86_64, +// but may be wrong for mips64 + +pub type mode_t = u32; +pub type off64_t = i64; +pub type socklen_t = u32; + +s! { + pub struct sigset_t { + __val: [c_ulong; 1], + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_flags: c_int, + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_restorer: Option, + } + + pub struct rlimit64 { + pub rlim_cur: c_ulonglong, + pub rlim_max: c_ulonglong, + } + + pub struct pthread_attr_t { + pub flags: u32, + pub stack_base: *mut c_void, + pub stack_size: size_t, + pub guard_size: size_t, + pub sched_policy: i32, + pub sched_priority: i32, + __reserved: [c_char; 16], + } + + pub struct passwd { + pub pw_name: *mut c_char, + pub pw_passwd: *mut c_char, + pub pw_uid: crate::uid_t, + pub pw_gid: crate::gid_t, + pub pw_gecos: *mut c_char, + pub pw_dir: *mut c_char, + pub pw_shell: *mut c_char, + } + + pub struct statfs { + pub f_type: u64, + pub f_bsize: u64, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::__fsid_t, + pub f_namelen: u64, + pub f_frsize: u64, + pub f_flags: u64, + pub f_spare: [u64; 4], + } + + pub struct sysinfo { + pub uptime: c_long, + pub loads: [c_ulong; 3], + pub totalram: c_ulong, + pub freeram: c_ulong, + pub sharedram: c_ulong, + pub bufferram: c_ulong, + pub totalswap: c_ulong, + pub freeswap: c_ulong, + pub procs: c_ushort, + pub pad: c_ushort, + pub totalhigh: c_ulong, + pub freehigh: c_ulong, + pub mem_unit: c_uint, + pub _f: [c_char; 0], + } + + pub struct statfs64 { + pub f_type: u64, + pub f_bsize: u64, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::__fsid_t, + pub f_namelen: u64, + pub f_frsize: u64, + pub f_flags: u64, + pub f_spare: [u64; 4], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct pthread_barrier_t { + __private: [i64; 4], + } + + pub struct pthread_spinlock_t { + __private: i64, + } +} + +s_no_extra_traits! { + pub struct pthread_mutex_t { + value: c_int, + __reserved: [c_char; 36], + } + + pub struct pthread_cond_t { + value: c_int, + __reserved: [c_char; 44], + } + + pub struct pthread_rwlock_t { + numLocks: c_int, + writerThreadId: c_int, + pendingReaders: c_int, + pendingWriters: c_int, + attr: i32, + __reserved: [c_char; 36], + } + + pub struct sigset64_t { + __bits: [c_ulong; 1], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for pthread_mutex_t { + fn eq(&self, other: &pthread_mutex_t) -> bool { + self.value == other.value + && self + .__reserved + .iter() + .zip(other.__reserved.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for pthread_mutex_t {} + + impl hash::Hash for pthread_mutex_t { + fn hash(&self, state: &mut H) { + self.value.hash(state); + self.__reserved.hash(state); + } + } + + impl PartialEq for pthread_cond_t { + fn eq(&self, other: &pthread_cond_t) -> bool { + self.value == other.value + && self + .__reserved + .iter() + .zip(other.__reserved.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for pthread_cond_t {} + + impl hash::Hash for pthread_cond_t { + fn hash(&self, state: &mut H) { + self.value.hash(state); + self.__reserved.hash(state); + } + } + + impl PartialEq for pthread_rwlock_t { + fn eq(&self, other: &pthread_rwlock_t) -> bool { + self.numLocks == other.numLocks + && self.writerThreadId == other.writerThreadId + && self.pendingReaders == other.pendingReaders + && self.pendingWriters == other.pendingWriters + && self.attr == other.attr + && self + .__reserved + .iter() + .zip(other.__reserved.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for pthread_rwlock_t {} + + impl hash::Hash for pthread_rwlock_t { + fn hash(&self, state: &mut H) { + self.numLocks.hash(state); + self.writerThreadId.hash(state); + self.pendingReaders.hash(state); + self.pendingWriters.hash(state); + self.attr.hash(state); + self.__reserved.hash(state); + } + } + } +} + +// These constants must be of the same type of sigaction.sa_flags +pub const SA_NOCLDSTOP: c_int = 0x00000001; +pub const SA_NOCLDWAIT: c_int = 0x00000002; +pub const SA_NODEFER: c_int = 0x40000000; +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_RESETHAND: c_int = 0x80000000; +pub const SA_RESTART: c_int = 0x10000000; +pub const SA_SIGINFO: c_int = 0x00000004; + +pub const RTLD_GLOBAL: c_int = 0x00100; +pub const RTLD_NOW: c_int = 2; +pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); + +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + value: 0, + __reserved: [0; 36], +}; +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + value: 0, + __reserved: [0; 44], +}; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + numLocks: 0, + writerThreadId: 0, + pendingReaders: 0, + pendingWriters: 0, + attr: 0, + __reserved: [0; 36], +}; +pub const PTHREAD_STACK_MIN: size_t = 4096 * 4; +pub const CPU_SETSIZE: size_t = 1024; +pub const __CPU_BITS: size_t = 64; + +pub const UT_LINESIZE: usize = 32; +pub const UT_NAMESIZE: usize = 32; +pub const UT_HOSTSIZE: usize = 256; + +f! { + // Sadly, Android before 5.0 (API level 21), the accept4 syscall is not + // exposed by the libc. As work-around, we implement it through `syscall` + // directly. This workaround can be removed if the minimum version of + // Android is bumped. When the workaround is removed, `accept4` can be + // moved back to `linux_like/mod.rs` + pub fn accept4( + fd: c_int, + addr: *mut crate::sockaddr, + len: *mut crate::socklen_t, + flg: c_int, + ) -> c_int { + crate::syscall(SYS_accept4, fd, addr, len, flg) as c_int + } +} + +extern "C" { + pub fn __system_property_wait( + pi: *const crate::prop_info, + __old_serial: u32, + __new_serial_ptr: *mut u32, + __relative_timeout: *const crate::timespec, + ) -> bool; +} + +cfg_if! { + if #[cfg(target_arch = "x86_64")] { + mod x86_64; + pub use self::x86_64::*; + } else if #[cfg(target_arch = "aarch64")] { + mod aarch64; + pub use self::aarch64::*; + } else if #[cfg(target_arch = "riscv64")] { + mod riscv64; + pub use self::riscv64::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/unix/linux_like/android/b64/riscv64/mod.rs b/vendor/libc/src/unix/linux_like/android/b64/riscv64/mod.rs new file mode 100644 index 00000000000000..ca8c727164ad74 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/android/b64/riscv64/mod.rs @@ -0,0 +1,384 @@ +use crate::off64_t; +use crate::prelude::*; + +pub type wchar_t = u32; +pub type greg_t = i64; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: c_uint, + pub st_nlink: c_uint, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad1: c_ulong, + pub st_size: off64_t, + pub st_blksize: c_int, + __pad2: c_int, + pub st_blocks: c_long, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused4: c_uint, + __unused5: c_uint, + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: c_uint, + pub st_nlink: c_uint, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad1: c_ulong, + pub st_size: off64_t, + pub st_blksize: c_int, + __pad2: c_int, + pub st_blocks: c_long, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused4: c_uint, + __unused5: c_uint, + } +} + +s_no_extra_traits! { + #[repr(align(16))] + pub struct max_align_t { + priv_: [f32; 8], + } +} + +pub const O_DIRECT: c_int = 0x40000; +pub const O_DIRECTORY: c_int = 0x200000; +pub const O_NOFOLLOW: c_int = 0x400000; +pub const O_LARGEFILE: c_int = 0x100000; + +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; + +// From NDK's asm/hwcap.h +pub const COMPAT_HWCAP_ISA_I: c_ulong = 1 << (b'I' - b'A'); +pub const COMPAT_HWCAP_ISA_M: c_ulong = 1 << (b'M' - b'A'); +pub const COMPAT_HWCAP_ISA_A: c_ulong = 1 << (b'A' - b'A'); +pub const COMPAT_HWCAP_ISA_F: c_ulong = 1 << (b'F' - b'A'); +pub const COMPAT_HWCAP_ISA_D: c_ulong = 1 << (b'D' - b'A'); +pub const COMPAT_HWCAP_ISA_C: c_ulong = 1 << (b'C' - b'A'); + +pub const SYS_io_setup: c_long = 0; +pub const SYS_io_destroy: c_long = 1; +pub const SYS_io_submit: c_long = 2; +pub const SYS_io_cancel: c_long = 3; +pub const SYS_io_getevents: c_long = 4; +pub const SYS_setxattr: c_long = 5; +pub const SYS_lsetxattr: c_long = 6; +pub const SYS_fsetxattr: c_long = 7; +pub const SYS_getxattr: c_long = 8; +pub const SYS_lgetxattr: c_long = 9; +pub const SYS_fgetxattr: c_long = 10; +pub const SYS_listxattr: c_long = 11; +pub const SYS_llistxattr: c_long = 12; +pub const SYS_flistxattr: c_long = 13; +pub const SYS_removexattr: c_long = 14; +pub const SYS_lremovexattr: c_long = 15; +pub const SYS_fremovexattr: c_long = 16; +pub const SYS_getcwd: c_long = 17; +pub const SYS_lookup_dcookie: c_long = 18; +pub const SYS_eventfd2: c_long = 19; +pub const SYS_epoll_create1: c_long = 20; +pub const SYS_epoll_ctl: c_long = 21; +pub const SYS_epoll_pwait: c_long = 22; +pub const SYS_dup: c_long = 23; +pub const SYS_dup3: c_long = 24; +pub const SYS_inotify_init1: c_long = 26; +pub const SYS_inotify_add_watch: c_long = 27; +pub const SYS_inotify_rm_watch: c_long = 28; +pub const SYS_ioctl: c_long = 29; +pub const SYS_ioprio_set: c_long = 30; +pub const SYS_ioprio_get: c_long = 31; +pub const SYS_flock: c_long = 32; +pub const SYS_mknodat: c_long = 33; +pub const SYS_mkdirat: c_long = 34; +pub const SYS_unlinkat: c_long = 35; +pub const SYS_symlinkat: c_long = 36; +pub const SYS_linkat: c_long = 37; +pub const SYS_renameat: c_long = 38; +pub const SYS_umount2: c_long = 39; +pub const SYS_mount: c_long = 40; +pub const SYS_pivot_root: c_long = 41; +pub const SYS_nfsservctl: c_long = 42; +pub const SYS_fallocate: c_long = 47; +pub const SYS_faccessat: c_long = 48; +pub const SYS_chdir: c_long = 49; +pub const SYS_fchdir: c_long = 50; +pub const SYS_chroot: c_long = 51; +pub const SYS_fchmod: c_long = 52; +pub const SYS_fchmodat: c_long = 53; +pub const SYS_fchownat: c_long = 54; +pub const SYS_fchown: c_long = 55; +pub const SYS_openat: c_long = 56; +pub const SYS_close: c_long = 57; +pub const SYS_vhangup: c_long = 58; +pub const SYS_pipe2: c_long = 59; +pub const SYS_quotactl: c_long = 60; +pub const SYS_getdents64: c_long = 61; +pub const SYS_read: c_long = 63; +pub const SYS_write: c_long = 64; +pub const SYS_readv: c_long = 65; +pub const SYS_writev: c_long = 66; +pub const SYS_pread64: c_long = 67; +pub const SYS_pwrite64: c_long = 68; +pub const SYS_preadv: c_long = 69; +pub const SYS_pwritev: c_long = 70; +pub const SYS_pselect6: c_long = 72; +pub const SYS_ppoll: c_long = 73; +pub const SYS_signalfd4: c_long = 74; +pub const SYS_vmsplice: c_long = 75; +pub const SYS_splice: c_long = 76; +pub const SYS_tee: c_long = 77; +pub const SYS_readlinkat: c_long = 78; +pub const SYS_sync: c_long = 81; +pub const SYS_fsync: c_long = 82; +pub const SYS_fdatasync: c_long = 83; +pub const SYS_sync_file_range: c_long = 84; +pub const SYS_timerfd_create: c_long = 85; +pub const SYS_timerfd_settime: c_long = 86; +pub const SYS_timerfd_gettime: c_long = 87; +pub const SYS_utimensat: c_long = 88; +pub const SYS_acct: c_long = 89; +pub const SYS_capget: c_long = 90; +pub const SYS_capset: c_long = 91; +pub const SYS_personality: c_long = 92; +pub const SYS_exit: c_long = 93; +pub const SYS_exit_group: c_long = 94; +pub const SYS_waitid: c_long = 95; +pub const SYS_set_tid_address: c_long = 96; +pub const SYS_unshare: c_long = 97; +pub const SYS_futex: c_long = 98; +pub const SYS_set_robust_list: c_long = 99; +pub const SYS_get_robust_list: c_long = 100; +pub const SYS_nanosleep: c_long = 101; +pub const SYS_getitimer: c_long = 102; +pub const SYS_setitimer: c_long = 103; +pub const SYS_kexec_load: c_long = 104; +pub const SYS_init_module: c_long = 105; +pub const SYS_delete_module: c_long = 106; +pub const SYS_timer_create: c_long = 107; +pub const SYS_timer_gettime: c_long = 108; +pub const SYS_timer_getoverrun: c_long = 109; +pub const SYS_timer_settime: c_long = 110; +pub const SYS_timer_delete: c_long = 111; +pub const SYS_clock_settime: c_long = 112; +pub const SYS_clock_gettime: c_long = 113; +pub const SYS_clock_getres: c_long = 114; +pub const SYS_clock_nanosleep: c_long = 115; +pub const SYS_syslog: c_long = 116; +pub const SYS_ptrace: c_long = 117; +pub const SYS_sched_setparam: c_long = 118; +pub const SYS_sched_setscheduler: c_long = 119; +pub const SYS_sched_getscheduler: c_long = 120; +pub const SYS_sched_getparam: c_long = 121; +pub const SYS_sched_setaffinity: c_long = 122; +pub const SYS_sched_getaffinity: c_long = 123; +pub const SYS_sched_yield: c_long = 124; +pub const SYS_sched_get_priority_max: c_long = 125; +pub const SYS_sched_get_priority_min: c_long = 126; +pub const SYS_sched_rr_get_interval: c_long = 127; +pub const SYS_restart_syscall: c_long = 128; +pub const SYS_kill: c_long = 129; +pub const SYS_tkill: c_long = 130; +pub const SYS_tgkill: c_long = 131; +pub const SYS_sigaltstack: c_long = 132; +pub const SYS_rt_sigsuspend: c_long = 133; +pub const SYS_rt_sigaction: c_long = 134; +pub const SYS_rt_sigprocmask: c_long = 135; +pub const SYS_rt_sigpending: c_long = 136; +pub const SYS_rt_sigtimedwait: c_long = 137; +pub const SYS_rt_sigqueueinfo: c_long = 138; +pub const SYS_rt_sigreturn: c_long = 139; +pub const SYS_setpriority: c_long = 140; +pub const SYS_getpriority: c_long = 141; +pub const SYS_reboot: c_long = 142; +pub const SYS_setregid: c_long = 143; +pub const SYS_setgid: c_long = 144; +pub const SYS_setreuid: c_long = 145; +pub const SYS_setuid: c_long = 146; +pub const SYS_setresuid: c_long = 147; +pub const SYS_getresuid: c_long = 148; +pub const SYS_setresgid: c_long = 149; +pub const SYS_getresgid: c_long = 150; +pub const SYS_setfsuid: c_long = 151; +pub const SYS_setfsgid: c_long = 152; +pub const SYS_times: c_long = 153; +pub const SYS_setpgid: c_long = 154; +pub const SYS_getpgid: c_long = 155; +pub const SYS_getsid: c_long = 156; +pub const SYS_setsid: c_long = 157; +pub const SYS_getgroups: c_long = 158; +pub const SYS_setgroups: c_long = 159; +pub const SYS_uname: c_long = 160; +pub const SYS_sethostname: c_long = 161; +pub const SYS_setdomainname: c_long = 162; +pub const SYS_getrlimit: c_long = 163; +pub const SYS_setrlimit: c_long = 164; +pub const SYS_getrusage: c_long = 165; +pub const SYS_umask: c_long = 166; +pub const SYS_prctl: c_long = 167; +pub const SYS_getcpu: c_long = 168; +pub const SYS_gettimeofday: c_long = 169; +pub const SYS_settimeofday: c_long = 170; +pub const SYS_adjtimex: c_long = 171; +pub const SYS_getpid: c_long = 172; +pub const SYS_getppid: c_long = 173; +pub const SYS_getuid: c_long = 174; +pub const SYS_geteuid: c_long = 175; +pub const SYS_getgid: c_long = 176; +pub const SYS_getegid: c_long = 177; +pub const SYS_gettid: c_long = 178; +pub const SYS_sysinfo: c_long = 179; +pub const SYS_mq_open: c_long = 180; +pub const SYS_mq_unlink: c_long = 181; +pub const SYS_mq_timedsend: c_long = 182; +pub const SYS_mq_timedreceive: c_long = 183; +pub const SYS_mq_notify: c_long = 184; +pub const SYS_mq_getsetattr: c_long = 185; +pub const SYS_msgget: c_long = 186; +pub const SYS_msgctl: c_long = 187; +pub const SYS_msgrcv: c_long = 188; +pub const SYS_msgsnd: c_long = 189; +pub const SYS_semget: c_long = 190; +pub const SYS_semctl: c_long = 191; +pub const SYS_semtimedop: c_long = 192; +pub const SYS_semop: c_long = 193; +pub const SYS_shmget: c_long = 194; +pub const SYS_shmctl: c_long = 195; +pub const SYS_shmat: c_long = 196; +pub const SYS_shmdt: c_long = 197; +pub const SYS_socket: c_long = 198; +pub const SYS_socketpair: c_long = 199; +pub const SYS_bind: c_long = 200; +pub const SYS_listen: c_long = 201; +pub const SYS_accept: c_long = 202; +pub const SYS_connect: c_long = 203; +pub const SYS_getsockname: c_long = 204; +pub const SYS_getpeername: c_long = 205; +pub const SYS_sendto: c_long = 206; +pub const SYS_recvfrom: c_long = 207; +pub const SYS_setsockopt: c_long = 208; +pub const SYS_getsockopt: c_long = 209; +pub const SYS_shutdown: c_long = 210; +pub const SYS_sendmsg: c_long = 211; +pub const SYS_recvmsg: c_long = 212; +pub const SYS_readahead: c_long = 213; +pub const SYS_brk: c_long = 214; +pub const SYS_munmap: c_long = 215; +pub const SYS_mremap: c_long = 216; +pub const SYS_add_key: c_long = 217; +pub const SYS_request_key: c_long = 218; +pub const SYS_keyctl: c_long = 219; +pub const SYS_clone: c_long = 220; +pub const SYS_execve: c_long = 221; +pub const SYS_swapon: c_long = 224; +pub const SYS_swapoff: c_long = 225; +pub const SYS_mprotect: c_long = 226; +pub const SYS_msync: c_long = 227; +pub const SYS_mlock: c_long = 228; +pub const SYS_munlock: c_long = 229; +pub const SYS_mlockall: c_long = 230; +pub const SYS_munlockall: c_long = 231; +pub const SYS_mincore: c_long = 232; +pub const SYS_madvise: c_long = 233; +pub const SYS_remap_file_pages: c_long = 234; +pub const SYS_mbind: c_long = 235; +pub const SYS_get_mempolicy: c_long = 236; +pub const SYS_set_mempolicy: c_long = 237; +pub const SYS_migrate_pages: c_long = 238; +pub const SYS_move_pages: c_long = 239; +pub const SYS_rt_tgsigqueueinfo: c_long = 240; +pub const SYS_perf_event_open: c_long = 241; +pub const SYS_accept4: c_long = 242; +pub const SYS_recvmmsg: c_long = 243; +pub const SYS_arch_specific_syscall: c_long = 244; +pub const SYS_wait4: c_long = 260; +pub const SYS_prlimit64: c_long = 261; +pub const SYS_fanotify_init: c_long = 262; +pub const SYS_fanotify_mark: c_long = 263; +pub const SYS_name_to_handle_at: c_long = 264; +pub const SYS_open_by_handle_at: c_long = 265; +pub const SYS_clock_adjtime: c_long = 266; +pub const SYS_syncfs: c_long = 267; +pub const SYS_setns: c_long = 268; +pub const SYS_sendmmsg: c_long = 269; +pub const SYS_process_vm_readv: c_long = 270; +pub const SYS_process_vm_writev: c_long = 271; +pub const SYS_kcmp: c_long = 272; +pub const SYS_finit_module: c_long = 273; +pub const SYS_sched_setattr: c_long = 274; +pub const SYS_sched_getattr: c_long = 275; +pub const SYS_renameat2: c_long = 276; +pub const SYS_seccomp: c_long = 277; +pub const SYS_getrandom: c_long = 278; +pub const SYS_memfd_create: c_long = 279; +pub const SYS_bpf: c_long = 280; +pub const SYS_execveat: c_long = 281; +pub const SYS_userfaultfd: c_long = 282; +pub const SYS_membarrier: c_long = 283; +pub const SYS_mlock2: c_long = 284; +pub const SYS_copy_file_range: c_long = 285; +pub const SYS_preadv2: c_long = 286; +pub const SYS_pwritev2: c_long = 287; +pub const SYS_pkey_mprotect: c_long = 288; +pub const SYS_pkey_alloc: c_long = 289; +pub const SYS_pkey_free: c_long = 290; +pub const SYS_statx: c_long = 291; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; + +// From NDK's asm/auxvec.h +pub const AT_SYSINFO_EHDR: c_ulong = 33; +pub const AT_L1I_CACHESIZE: c_ulong = 40; +pub const AT_L1I_CACHEGEOMETRY: c_ulong = 41; +pub const AT_L1D_CACHESIZE: c_ulong = 42; +pub const AT_L1D_CACHEGEOMETRY: c_ulong = 43; +pub const AT_L2_CACHESIZE: c_ulong = 44; +pub const AT_L2_CACHEGEOMETRY: c_ulong = 45; +pub const AT_L3_CACHESIZE: c_ulong = 46; +pub const AT_L3_CACHEGEOMETRY: c_ulong = 47; +pub const AT_VECTOR_SIZE_ARCH: c_ulong = 9; diff --git a/vendor/libc/src/unix/linux_like/android/b64/x86_64/mod.rs b/vendor/libc/src/unix/linux_like/android/b64/x86_64/mod.rs new file mode 100644 index 00000000000000..0fddeb7bc267f5 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/android/b64/x86_64/mod.rs @@ -0,0 +1,748 @@ +use crate::off64_t; +use crate::prelude::*; + +pub type wchar_t = i32; +pub type greg_t = i64; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: c_ulong, + pub st_mode: c_uint, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_size: off64_t, + pub st_blksize: c_long, + pub st_blocks: c_long, + pub st_atime: c_long, + pub st_atime_nsec: c_long, + pub st_mtime: c_long, + pub st_mtime_nsec: c_long, + pub st_ctime: c_long, + pub st_ctime_nsec: c_long, + __unused: [c_long; 3], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: c_ulong, + pub st_mode: c_uint, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_size: off64_t, + pub st_blksize: c_long, + pub st_blocks: c_long, + pub st_atime: c_long, + pub st_atime_nsec: c_long, + pub st_mtime: c_long, + pub st_mtime_nsec: c_long, + pub st_ctime: c_long, + pub st_ctime_nsec: c_long, + __unused: [c_long; 3], + } + + pub struct _libc_xmmreg { + pub element: [u32; 4], + } + + pub struct user_regs_struct { + pub r15: c_ulong, + pub r14: c_ulong, + pub r13: c_ulong, + pub r12: c_ulong, + pub rbp: c_ulong, + pub rbx: c_ulong, + pub r11: c_ulong, + pub r10: c_ulong, + pub r9: c_ulong, + pub r8: c_ulong, + pub rax: c_ulong, + pub rcx: c_ulong, + pub rdx: c_ulong, + pub rsi: c_ulong, + pub rdi: c_ulong, + pub orig_rax: c_ulong, + pub rip: c_ulong, + pub cs: c_ulong, + pub eflags: c_ulong, + pub rsp: c_ulong, + pub ss: c_ulong, + pub fs_base: c_ulong, + pub gs_base: c_ulong, + pub ds: c_ulong, + pub es: c_ulong, + pub fs: c_ulong, + pub gs: c_ulong, + } + + pub struct user { + pub regs: user_regs_struct, + pub u_fpvalid: c_int, + pub i387: user_fpregs_struct, + pub u_tsize: c_ulong, + pub u_dsize: c_ulong, + pub u_ssize: c_ulong, + pub start_code: c_ulong, + pub start_stack: c_ulong, + pub signal: c_long, + __reserved: c_int, + #[cfg(target_pointer_width = "32")] + __pad1: u32, + pub u_ar0: *mut user_regs_struct, + #[cfg(target_pointer_width = "32")] + __pad2: u32, + pub u_fpstate: *mut user_fpregs_struct, + pub magic: c_ulong, + pub u_comm: [c_char; 32], + pub u_debugreg: [c_ulong; 8], + pub error_code: c_ulong, + pub fault_address: c_ulong, + } +} + +s_no_extra_traits! { + pub union __c_anonymous_uc_sigmask { + uc_sigmask: crate::sigset_t, + uc_sigmask64: crate::sigset64_t, + } + + #[repr(align(16))] + pub struct max_align_t { + priv_: [f64; 4], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for __c_anonymous_uc_sigmask { + fn eq(&self, other: &__c_anonymous_uc_sigmask) -> bool { + unsafe { self.uc_sigmask == other.uc_sigmask } + } + } + impl Eq for __c_anonymous_uc_sigmask {} + impl hash::Hash for __c_anonymous_uc_sigmask { + fn hash(&self, state: &mut H) { + unsafe { self.uc_sigmask.hash(state) } + } + } + } +} + +s_no_extra_traits! { + pub struct _libc_fpxreg { + pub significand: [u16; 4], + pub exponent: u16, + __padding: [u16; 3], + } + + pub struct _libc_fpstate { + pub cwd: u16, + pub swd: u16, + pub ftw: u16, + pub fop: u16, + pub rip: u64, + pub rdp: u64, + pub mxcsr: u32, + pub mxcr_mask: u32, + pub _st: [_libc_fpxreg; 8], + pub _xmm: [_libc_xmmreg; 16], + __private: [u32; 24], + } + + pub struct mcontext_t { + pub gregs: [greg_t; 23], + pub fpregs: *mut _libc_fpstate, + __private: [u64; 8], + } + + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_mcontext: mcontext_t, + pub uc_sigmask64: __c_anonymous_uc_sigmask, + __fpregs_mem: _libc_fpstate, + } + + pub struct user_fpregs_struct { + pub cwd: c_ushort, + pub swd: c_ushort, + pub ftw: c_ushort, + pub fop: c_ushort, + pub rip: c_ulong, + pub rdp: c_ulong, + pub mxcsr: c_uint, + pub mxcr_mask: c_uint, + pub st_space: [c_uint; 32], + pub xmm_space: [c_uint; 64], + padding: [c_uint; 24], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for _libc_fpxreg { + fn eq(&self, other: &Self) -> bool { + self.significand == other.significand && self.exponent == other.exponent + // Ignore padding field + } + } + impl Eq for _libc_fpxreg {} + impl hash::Hash for _libc_fpxreg { + fn hash(&self, state: &mut H) { + self.significand.hash(state); + self.exponent.hash(state); + // Ignore padding field + } + } + + impl PartialEq for _libc_fpstate { + fn eq(&self, other: &Self) -> bool { + self.cwd == other.cwd + && self.swd == other.swd + && self.ftw == other.ftw + && self.fop == other.fop + && self.rip == other.rip + && self.rdp == other.rdp + && self.mxcsr == other.mxcsr + && self.mxcr_mask == other.mxcr_mask + && self._st == other._st + && self._xmm == other._xmm + // Ignore padding field + } + } + impl Eq for _libc_fpstate {} + impl hash::Hash for _libc_fpstate { + fn hash(&self, state: &mut H) { + self.cwd.hash(state); + self.swd.hash(state); + self.ftw.hash(state); + self.fop.hash(state); + self.rip.hash(state); + self.rdp.hash(state); + self.mxcsr.hash(state); + self.mxcr_mask.hash(state); + self._st.hash(state); + self._xmm.hash(state); + // Ignore padding field + } + } + + impl PartialEq for mcontext_t { + fn eq(&self, other: &Self) -> bool { + self.gregs == other.gregs && self.fpregs == other.fpregs + // Ignore padding field + } + } + impl Eq for mcontext_t {} + impl hash::Hash for mcontext_t { + fn hash(&self, state: &mut H) { + self.gregs.hash(state); + self.fpregs.hash(state); + // Ignore padding field + } + } + + impl PartialEq for ucontext_t { + fn eq(&self, other: &Self) -> bool { + self.uc_flags == other.uc_flags + && self.uc_link == other.uc_link + && self.uc_stack == other.uc_stack + && self.uc_mcontext == other.uc_mcontext + && self.uc_sigmask64 == other.uc_sigmask64 + // Ignore padding field + } + } + impl Eq for ucontext_t {} + impl hash::Hash for ucontext_t { + fn hash(&self, state: &mut H) { + self.uc_flags.hash(state); + self.uc_link.hash(state); + self.uc_stack.hash(state); + self.uc_mcontext.hash(state); + self.uc_sigmask64.hash(state); + // Ignore padding field + } + } + + impl PartialEq for user_fpregs_struct { + fn eq(&self, other: &user_fpregs_struct) -> bool { + self.cwd == other.cwd + && self.swd == other.swd + && self.ftw == other.ftw + && self.fop == other.fop + && self.rip == other.rip + && self.rdp == other.rdp + && self.mxcsr == other.mxcsr + && self.mxcr_mask == other.mxcr_mask + && self.st_space == other.st_space + && self + .xmm_space + .iter() + .zip(other.xmm_space.iter()) + .all(|(a, b)| a == b) + // Ignore padding field + } + } + + impl Eq for user_fpregs_struct {} + + impl hash::Hash for user_fpregs_struct { + fn hash(&self, state: &mut H) { + self.cwd.hash(state); + self.swd.hash(state); + self.ftw.hash(state); + self.fop.hash(state); + self.rip.hash(state); + self.rdp.hash(state); + self.mxcsr.hash(state); + self.mxcr_mask.hash(state); + self.st_space.hash(state); + self.xmm_space.hash(state); + // Ignore padding field + } + } + } +} + +pub const O_DIRECT: c_int = 0x4000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_NOFOLLOW: c_int = 0x20000; +pub const O_LARGEFILE: c_int = 0o0100000; + +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; + +pub const MAP_32BIT: c_int = 0x40; + +// Syscall table + +pub const SYS_read: c_long = 0; +pub const SYS_write: c_long = 1; +pub const SYS_open: c_long = 2; +pub const SYS_close: c_long = 3; +pub const SYS_stat: c_long = 4; +pub const SYS_fstat: c_long = 5; +pub const SYS_lstat: c_long = 6; +pub const SYS_poll: c_long = 7; +pub const SYS_lseek: c_long = 8; +pub const SYS_mmap: c_long = 9; +pub const SYS_mprotect: c_long = 10; +pub const SYS_munmap: c_long = 11; +pub const SYS_brk: c_long = 12; +pub const SYS_rt_sigaction: c_long = 13; +pub const SYS_rt_sigprocmask: c_long = 14; +pub const SYS_rt_sigreturn: c_long = 15; +pub const SYS_ioctl: c_long = 16; +pub const SYS_pread64: c_long = 17; +pub const SYS_pwrite64: c_long = 18; +pub const SYS_readv: c_long = 19; +pub const SYS_writev: c_long = 20; +pub const SYS_access: c_long = 21; +pub const SYS_pipe: c_long = 22; +pub const SYS_select: c_long = 23; +pub const SYS_sched_yield: c_long = 24; +pub const SYS_mremap: c_long = 25; +pub const SYS_msync: c_long = 26; +pub const SYS_mincore: c_long = 27; +pub const SYS_madvise: c_long = 28; +pub const SYS_shmget: c_long = 29; +pub const SYS_shmat: c_long = 30; +pub const SYS_shmctl: c_long = 31; +pub const SYS_dup: c_long = 32; +pub const SYS_dup2: c_long = 33; +pub const SYS_pause: c_long = 34; +pub const SYS_nanosleep: c_long = 35; +pub const SYS_getitimer: c_long = 36; +pub const SYS_alarm: c_long = 37; +pub const SYS_setitimer: c_long = 38; +pub const SYS_getpid: c_long = 39; +pub const SYS_sendfile: c_long = 40; +pub const SYS_socket: c_long = 41; +pub const SYS_connect: c_long = 42; +pub const SYS_accept: c_long = 43; +pub const SYS_sendto: c_long = 44; +pub const SYS_recvfrom: c_long = 45; +pub const SYS_sendmsg: c_long = 46; +pub const SYS_recvmsg: c_long = 47; +pub const SYS_shutdown: c_long = 48; +pub const SYS_bind: c_long = 49; +pub const SYS_listen: c_long = 50; +pub const SYS_getsockname: c_long = 51; +pub const SYS_getpeername: c_long = 52; +pub const SYS_socketpair: c_long = 53; +pub const SYS_setsockopt: c_long = 54; +pub const SYS_getsockopt: c_long = 55; +pub const SYS_clone: c_long = 56; +pub const SYS_fork: c_long = 57; +pub const SYS_vfork: c_long = 58; +pub const SYS_execve: c_long = 59; +pub const SYS_exit: c_long = 60; +pub const SYS_wait4: c_long = 61; +pub const SYS_kill: c_long = 62; +pub const SYS_uname: c_long = 63; +pub const SYS_semget: c_long = 64; +pub const SYS_semop: c_long = 65; +pub const SYS_semctl: c_long = 66; +pub const SYS_shmdt: c_long = 67; +pub const SYS_msgget: c_long = 68; +pub const SYS_msgsnd: c_long = 69; +pub const SYS_msgrcv: c_long = 70; +pub const SYS_msgctl: c_long = 71; +pub const SYS_fcntl: c_long = 72; +pub const SYS_flock: c_long = 73; +pub const SYS_fsync: c_long = 74; +pub const SYS_fdatasync: c_long = 75; +pub const SYS_truncate: c_long = 76; +pub const SYS_ftruncate: c_long = 77; +pub const SYS_getdents: c_long = 78; +pub const SYS_getcwd: c_long = 79; +pub const SYS_chdir: c_long = 80; +pub const SYS_fchdir: c_long = 81; +pub const SYS_rename: c_long = 82; +pub const SYS_mkdir: c_long = 83; +pub const SYS_rmdir: c_long = 84; +pub const SYS_creat: c_long = 85; +pub const SYS_link: c_long = 86; +pub const SYS_unlink: c_long = 87; +pub const SYS_symlink: c_long = 88; +pub const SYS_readlink: c_long = 89; +pub const SYS_chmod: c_long = 90; +pub const SYS_fchmod: c_long = 91; +pub const SYS_chown: c_long = 92; +pub const SYS_fchown: c_long = 93; +pub const SYS_lchown: c_long = 94; +pub const SYS_umask: c_long = 95; +pub const SYS_gettimeofday: c_long = 96; +pub const SYS_getrlimit: c_long = 97; +pub const SYS_getrusage: c_long = 98; +pub const SYS_sysinfo: c_long = 99; +pub const SYS_times: c_long = 100; +pub const SYS_ptrace: c_long = 101; +pub const SYS_getuid: c_long = 102; +pub const SYS_syslog: c_long = 103; +pub const SYS_getgid: c_long = 104; +pub const SYS_setuid: c_long = 105; +pub const SYS_setgid: c_long = 106; +pub const SYS_geteuid: c_long = 107; +pub const SYS_getegid: c_long = 108; +pub const SYS_setpgid: c_long = 109; +pub const SYS_getppid: c_long = 110; +pub const SYS_getpgrp: c_long = 111; +pub const SYS_setsid: c_long = 112; +pub const SYS_setreuid: c_long = 113; +pub const SYS_setregid: c_long = 114; +pub const SYS_getgroups: c_long = 115; +pub const SYS_setgroups: c_long = 116; +pub const SYS_setresuid: c_long = 117; +pub const SYS_getresuid: c_long = 118; +pub const SYS_setresgid: c_long = 119; +pub const SYS_getresgid: c_long = 120; +pub const SYS_getpgid: c_long = 121; +pub const SYS_setfsuid: c_long = 122; +pub const SYS_setfsgid: c_long = 123; +pub const SYS_getsid: c_long = 124; +pub const SYS_capget: c_long = 125; +pub const SYS_capset: c_long = 126; +pub const SYS_rt_sigpending: c_long = 127; +pub const SYS_rt_sigtimedwait: c_long = 128; +pub const SYS_rt_sigqueueinfo: c_long = 129; +pub const SYS_rt_sigsuspend: c_long = 130; +pub const SYS_sigaltstack: c_long = 131; +pub const SYS_utime: c_long = 132; +pub const SYS_mknod: c_long = 133; +pub const SYS_uselib: c_long = 134; +pub const SYS_personality: c_long = 135; +pub const SYS_ustat: c_long = 136; +pub const SYS_statfs: c_long = 137; +pub const SYS_fstatfs: c_long = 138; +pub const SYS_sysfs: c_long = 139; +pub const SYS_getpriority: c_long = 140; +pub const SYS_setpriority: c_long = 141; +pub const SYS_sched_setparam: c_long = 142; +pub const SYS_sched_getparam: c_long = 143; +pub const SYS_sched_setscheduler: c_long = 144; +pub const SYS_sched_getscheduler: c_long = 145; +pub const SYS_sched_get_priority_max: c_long = 146; +pub const SYS_sched_get_priority_min: c_long = 147; +pub const SYS_sched_rr_get_interval: c_long = 148; +pub const SYS_mlock: c_long = 149; +pub const SYS_munlock: c_long = 150; +pub const SYS_mlockall: c_long = 151; +pub const SYS_munlockall: c_long = 152; +pub const SYS_vhangup: c_long = 153; +pub const SYS_modify_ldt: c_long = 154; +pub const SYS_pivot_root: c_long = 155; +// FIXME(android): SYS__sysctl is in the NDK sources but for some reason is +// not available in the tests +// pub const SYS__sysctl: c_long = 156; +pub const SYS_prctl: c_long = 157; +pub const SYS_arch_prctl: c_long = 158; +pub const SYS_adjtimex: c_long = 159; +pub const SYS_setrlimit: c_long = 160; +pub const SYS_chroot: c_long = 161; +pub const SYS_sync: c_long = 162; +pub const SYS_acct: c_long = 163; +pub const SYS_settimeofday: c_long = 164; +pub const SYS_mount: c_long = 165; +pub const SYS_umount2: c_long = 166; +pub const SYS_swapon: c_long = 167; +pub const SYS_swapoff: c_long = 168; +pub const SYS_reboot: c_long = 169; +pub const SYS_sethostname: c_long = 170; +pub const SYS_setdomainname: c_long = 171; +pub const SYS_iopl: c_long = 172; +pub const SYS_ioperm: c_long = 173; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 174; +pub const SYS_init_module: c_long = 175; +pub const SYS_delete_module: c_long = 176; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 177; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 178; +pub const SYS_quotactl: c_long = 179; +pub const SYS_nfsservctl: c_long = 180; +pub const SYS_getpmsg: c_long = 181; +pub const SYS_putpmsg: c_long = 182; +pub const SYS_afs_syscall: c_long = 183; +pub const SYS_tuxcall: c_long = 184; +pub const SYS_security: c_long = 185; +pub const SYS_gettid: c_long = 186; +pub const SYS_readahead: c_long = 187; +pub const SYS_setxattr: c_long = 188; +pub const SYS_lsetxattr: c_long = 189; +pub const SYS_fsetxattr: c_long = 190; +pub const SYS_getxattr: c_long = 191; +pub const SYS_lgetxattr: c_long = 192; +pub const SYS_fgetxattr: c_long = 193; +pub const SYS_listxattr: c_long = 194; +pub const SYS_llistxattr: c_long = 195; +pub const SYS_flistxattr: c_long = 196; +pub const SYS_removexattr: c_long = 197; +pub const SYS_lremovexattr: c_long = 198; +pub const SYS_fremovexattr: c_long = 199; +pub const SYS_tkill: c_long = 200; +pub const SYS_time: c_long = 201; +pub const SYS_futex: c_long = 202; +pub const SYS_sched_setaffinity: c_long = 203; +pub const SYS_sched_getaffinity: c_long = 204; +pub const SYS_set_thread_area: c_long = 205; +pub const SYS_io_setup: c_long = 206; +pub const SYS_io_destroy: c_long = 207; +pub const SYS_io_getevents: c_long = 208; +pub const SYS_io_submit: c_long = 209; +pub const SYS_io_cancel: c_long = 210; +pub const SYS_get_thread_area: c_long = 211; +pub const SYS_lookup_dcookie: c_long = 212; +pub const SYS_epoll_create: c_long = 213; +pub const SYS_epoll_ctl_old: c_long = 214; +pub const SYS_epoll_wait_old: c_long = 215; +pub const SYS_remap_file_pages: c_long = 216; +pub const SYS_getdents64: c_long = 217; +pub const SYS_set_tid_address: c_long = 218; +pub const SYS_restart_syscall: c_long = 219; +pub const SYS_semtimedop: c_long = 220; +pub const SYS_fadvise64: c_long = 221; +pub const SYS_timer_create: c_long = 222; +pub const SYS_timer_settime: c_long = 223; +pub const SYS_timer_gettime: c_long = 224; +pub const SYS_timer_getoverrun: c_long = 225; +pub const SYS_timer_delete: c_long = 226; +pub const SYS_clock_settime: c_long = 227; +pub const SYS_clock_gettime: c_long = 228; +pub const SYS_clock_getres: c_long = 229; +pub const SYS_clock_nanosleep: c_long = 230; +pub const SYS_exit_group: c_long = 231; +pub const SYS_epoll_wait: c_long = 232; +pub const SYS_epoll_ctl: c_long = 233; +pub const SYS_tgkill: c_long = 234; +pub const SYS_utimes: c_long = 235; +pub const SYS_vserver: c_long = 236; +pub const SYS_mbind: c_long = 237; +pub const SYS_set_mempolicy: c_long = 238; +pub const SYS_get_mempolicy: c_long = 239; +pub const SYS_mq_open: c_long = 240; +pub const SYS_mq_unlink: c_long = 241; +pub const SYS_mq_timedsend: c_long = 242; +pub const SYS_mq_timedreceive: c_long = 243; +pub const SYS_mq_notify: c_long = 244; +pub const SYS_mq_getsetattr: c_long = 245; +pub const SYS_kexec_load: c_long = 246; +pub const SYS_waitid: c_long = 247; +pub const SYS_add_key: c_long = 248; +pub const SYS_request_key: c_long = 249; +pub const SYS_keyctl: c_long = 250; +pub const SYS_ioprio_set: c_long = 251; +pub const SYS_ioprio_get: c_long = 252; +pub const SYS_inotify_init: c_long = 253; +pub const SYS_inotify_add_watch: c_long = 254; +pub const SYS_inotify_rm_watch: c_long = 255; +pub const SYS_migrate_pages: c_long = 256; +pub const SYS_openat: c_long = 257; +pub const SYS_mkdirat: c_long = 258; +pub const SYS_mknodat: c_long = 259; +pub const SYS_fchownat: c_long = 260; +pub const SYS_futimesat: c_long = 261; +pub const SYS_newfstatat: c_long = 262; +pub const SYS_unlinkat: c_long = 263; +pub const SYS_renameat: c_long = 264; +pub const SYS_linkat: c_long = 265; +pub const SYS_symlinkat: c_long = 266; +pub const SYS_readlinkat: c_long = 267; +pub const SYS_fchmodat: c_long = 268; +pub const SYS_faccessat: c_long = 269; +pub const SYS_pselect6: c_long = 270; +pub const SYS_ppoll: c_long = 271; +pub const SYS_unshare: c_long = 272; +pub const SYS_set_robust_list: c_long = 273; +pub const SYS_get_robust_list: c_long = 274; +pub const SYS_splice: c_long = 275; +pub const SYS_tee: c_long = 276; +pub const SYS_sync_file_range: c_long = 277; +pub const SYS_vmsplice: c_long = 278; +pub const SYS_move_pages: c_long = 279; +pub const SYS_utimensat: c_long = 280; +pub const SYS_epoll_pwait: c_long = 281; +pub const SYS_signalfd: c_long = 282; +pub const SYS_timerfd_create: c_long = 283; +pub const SYS_eventfd: c_long = 284; +pub const SYS_fallocate: c_long = 285; +pub const SYS_timerfd_settime: c_long = 286; +pub const SYS_timerfd_gettime: c_long = 287; +pub const SYS_accept4: c_long = 288; +pub const SYS_signalfd4: c_long = 289; +pub const SYS_eventfd2: c_long = 290; +pub const SYS_epoll_create1: c_long = 291; +pub const SYS_dup3: c_long = 292; +pub const SYS_pipe2: c_long = 293; +pub const SYS_inotify_init1: c_long = 294; +pub const SYS_preadv: c_long = 295; +pub const SYS_pwritev: c_long = 296; +pub const SYS_rt_tgsigqueueinfo: c_long = 297; +pub const SYS_perf_event_open: c_long = 298; +pub const SYS_recvmmsg: c_long = 299; +pub const SYS_fanotify_init: c_long = 300; +pub const SYS_fanotify_mark: c_long = 301; +pub const SYS_prlimit64: c_long = 302; +pub const SYS_name_to_handle_at: c_long = 303; +pub const SYS_open_by_handle_at: c_long = 304; +pub const SYS_clock_adjtime: c_long = 305; +pub const SYS_syncfs: c_long = 306; +pub const SYS_sendmmsg: c_long = 307; +pub const SYS_setns: c_long = 308; +pub const SYS_getcpu: c_long = 309; +pub const SYS_process_vm_readv: c_long = 310; +pub const SYS_process_vm_writev: c_long = 311; +pub const SYS_kcmp: c_long = 312; +pub const SYS_finit_module: c_long = 313; +pub const SYS_sched_setattr: c_long = 314; +pub const SYS_sched_getattr: c_long = 315; +pub const SYS_renameat2: c_long = 316; +pub const SYS_seccomp: c_long = 317; +pub const SYS_getrandom: c_long = 318; +pub const SYS_memfd_create: c_long = 319; +pub const SYS_kexec_file_load: c_long = 320; +pub const SYS_bpf: c_long = 321; +pub const SYS_execveat: c_long = 322; +pub const SYS_userfaultfd: c_long = 323; +pub const SYS_membarrier: c_long = 324; +pub const SYS_mlock2: c_long = 325; +pub const SYS_copy_file_range: c_long = 326; +pub const SYS_preadv2: c_long = 327; +pub const SYS_pwritev2: c_long = 328; +pub const SYS_pkey_mprotect: c_long = 329; +pub const SYS_pkey_alloc: c_long = 330; +pub const SYS_pkey_free: c_long = 331; +pub const SYS_statx: c_long = 332; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; + +// offsets in user_regs_structs, from sys/reg.h +pub const R15: c_int = 0; +pub const R14: c_int = 1; +pub const R13: c_int = 2; +pub const R12: c_int = 3; +pub const RBP: c_int = 4; +pub const RBX: c_int = 5; +pub const R11: c_int = 6; +pub const R10: c_int = 7; +pub const R9: c_int = 8; +pub const R8: c_int = 9; +pub const RAX: c_int = 10; +pub const RCX: c_int = 11; +pub const RDX: c_int = 12; +pub const RSI: c_int = 13; +pub const RDI: c_int = 14; +pub const ORIG_RAX: c_int = 15; +pub const RIP: c_int = 16; +pub const CS: c_int = 17; +pub const EFLAGS: c_int = 18; +pub const RSP: c_int = 19; +pub const SS: c_int = 20; +pub const FS_BASE: c_int = 21; +pub const GS_BASE: c_int = 22; +pub const DS: c_int = 23; +pub const ES: c_int = 24; +pub const FS: c_int = 25; +pub const GS: c_int = 26; + +// offsets in mcontext_t.gregs from sys/ucontext.h +pub const REG_R8: c_int = 0; +pub const REG_R9: c_int = 1; +pub const REG_R10: c_int = 2; +pub const REG_R11: c_int = 3; +pub const REG_R12: c_int = 4; +pub const REG_R13: c_int = 5; +pub const REG_R14: c_int = 6; +pub const REG_R15: c_int = 7; +pub const REG_RDI: c_int = 8; +pub const REG_RSI: c_int = 9; +pub const REG_RBP: c_int = 10; +pub const REG_RBX: c_int = 11; +pub const REG_RDX: c_int = 12; +pub const REG_RAX: c_int = 13; +pub const REG_RCX: c_int = 14; +pub const REG_RSP: c_int = 15; +pub const REG_RIP: c_int = 16; +pub const REG_EFL: c_int = 17; +pub const REG_CSGSFS: c_int = 18; +pub const REG_ERR: c_int = 19; +pub const REG_TRAPNO: c_int = 20; +pub const REG_OLDMASK: c_int = 21; +pub const REG_CR2: c_int = 22; + +// From NDK's asm/auxvec.h +pub const AT_SYSINFO_EHDR: c_ulong = 33; +pub const AT_VECTOR_SIZE_ARCH: c_ulong = 3; diff --git a/vendor/libc/src/unix/linux_like/android/mod.rs b/vendor/libc/src/unix/linux_like/android/mod.rs new file mode 100644 index 00000000000000..fbd8ac2f87cfc9 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/android/mod.rs @@ -0,0 +1,4157 @@ +//! Android-specific definitions for linux-like values + +use crate::prelude::*; +use crate::{cmsghdr, msghdr}; + +cfg_if! { + if #[cfg(doc)] { + pub(crate) type Ioctl = c_int; + } else { + #[doc(hidden)] + pub type Ioctl = c_int; + } +} + +pub type clock_t = c_long; +pub type time_t = c_long; +pub type suseconds_t = c_long; +pub type off_t = c_long; +pub type blkcnt_t = c_ulong; +pub type blksize_t = c_ulong; +pub type nlink_t = u32; +pub type useconds_t = u32; +pub type pthread_t = c_long; +pub type pthread_mutexattr_t = c_long; +pub type pthread_rwlockattr_t = c_long; +pub type pthread_barrierattr_t = c_int; +pub type pthread_condattr_t = c_long; +pub type pthread_key_t = c_int; +pub type fsfilcnt_t = c_ulong; +pub type fsblkcnt_t = c_ulong; +pub type nfds_t = c_uint; +pub type rlim_t = c_ulong; +pub type dev_t = c_ulong; +pub type ino_t = c_ulong; +pub type ino64_t = u64; +pub type __CPU_BITTYPE = c_ulong; +pub type idtype_t = c_int; +pub type loff_t = c_longlong; +pub type __kernel_loff_t = c_longlong; +pub type __kernel_pid_t = c_int; + +pub type __u8 = c_uchar; +pub type __u16 = c_ushort; +pub type __s16 = c_short; +pub type __u32 = c_uint; +pub type __s32 = c_int; + +// linux/elf.h + +pub type Elf32_Addr = u32; +pub type Elf32_Half = u16; +pub type Elf32_Off = u32; +pub type Elf32_Word = u32; + +pub type Elf64_Addr = u64; +pub type Elf64_Half = u16; +pub type Elf64_Off = u64; +pub type Elf64_Word = u32; +pub type Elf64_Xword = u64; + +pub type eventfd_t = u64; + +// these structs sit behind a heap allocation on Android +pub type posix_spawn_file_actions_t = *mut c_void; +pub type posix_spawnattr_t = *mut c_void; + +s! { + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct __fsid_t { + __val: [c_int; 2], + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; crate::NCCS], + } + + pub struct termios2 { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; 19], + pub c_ispeed: crate::speed_t, + pub c_ospeed: crate::speed_t, + } + + pub struct mallinfo { + pub arena: size_t, + pub ordblks: size_t, + pub smblks: size_t, + pub hblks: size_t, + pub hblkhd: size_t, + pub usmblks: size_t, + pub fsmblks: size_t, + pub uordblks: size_t, + pub fordblks: size_t, + pub keepcost: size_t, + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: crate::__kernel_loff_t, + pub l_len: crate::__kernel_loff_t, + pub l_pid: crate::__kernel_pid_t, + } + + pub struct cpu_set_t { + #[cfg(target_pointer_width = "64")] + __bits: [__CPU_BITTYPE; 16], + #[cfg(target_pointer_width = "32")] + __bits: [__CPU_BITTYPE; 1], + } + + pub struct sem_t { + count: c_uint, + #[cfg(target_pointer_width = "64")] + __reserved: [c_int; 3], + } + + pub struct exit_status { + pub e_termination: c_short, + pub e_exit: c_short, + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + #[cfg(target_pointer_width = "64")] + __f_reserved: [u32; 6], + } + + pub struct signalfd_siginfo { + pub ssi_signo: u32, + pub ssi_errno: i32, + pub ssi_code: i32, + pub ssi_pid: u32, + pub ssi_uid: u32, + pub ssi_fd: i32, + pub ssi_tid: u32, + pub ssi_band: u32, + pub ssi_overrun: u32, + pub ssi_trapno: u32, + pub ssi_status: i32, + pub ssi_int: i32, + pub ssi_ptr: c_ulonglong, + pub ssi_utime: c_ulonglong, + pub ssi_stime: c_ulonglong, + pub ssi_addr: c_ulonglong, + pub ssi_addr_lsb: u16, + _pad2: u16, + pub ssi_syscall: i32, + pub ssi_call_addr: u64, + pub ssi_arch: u32, + _pad: [u8; 28], + } + + pub struct itimerspec { + pub it_interval: crate::timespec, + pub it_value: crate::timespec, + } + + pub struct genlmsghdr { + pub cmd: u8, + pub version: u8, + pub reserved: u16, + } + + pub struct nlmsghdr { + pub nlmsg_len: u32, + pub nlmsg_type: u16, + pub nlmsg_flags: u16, + pub nlmsg_seq: u32, + pub nlmsg_pid: u32, + } + + pub struct nlmsgerr { + pub error: c_int, + pub msg: nlmsghdr, + } + + pub struct nl_pktinfo { + pub group: u32, + } + + pub struct nl_mmap_req { + pub nm_block_size: c_uint, + pub nm_block_nr: c_uint, + pub nm_frame_size: c_uint, + pub nm_frame_nr: c_uint, + } + + pub struct nl_mmap_hdr { + pub nm_status: c_uint, + pub nm_len: c_uint, + pub nm_group: u32, + pub nm_pid: u32, + pub nm_uid: u32, + pub nm_gid: u32, + } + + pub struct nlattr { + pub nla_len: u16, + pub nla_type: u16, + } + + pub struct in6_pktinfo { + pub ipi6_addr: crate::in6_addr, + pub ipi6_ifindex: c_int, + } + + pub struct inotify_event { + pub wd: c_int, + pub mask: u32, + pub cookie: u32, + pub len: u32, + } + + pub struct sock_extended_err { + pub ee_errno: u32, + pub ee_origin: u8, + pub ee_type: u8, + pub ee_code: u8, + pub ee_pad: u8, + pub ee_info: u32, + pub ee_data: u32, + } + + pub struct regex_t { + re_magic: c_int, + re_nsub: size_t, + re_endp: *const c_char, + re_guts: *mut c_void, + } + + pub struct regmatch_t { + pub rm_so: ssize_t, + pub rm_eo: ssize_t, + } + + pub struct sockaddr_vm { + pub svm_family: crate::sa_family_t, + pub svm_reserved1: c_ushort, + pub svm_port: c_uint, + pub svm_cid: c_uint, + pub svm_zero: [u8; 4], + } + + // linux/elf.h + + pub struct Elf32_Phdr { + pub p_type: Elf32_Word, + pub p_offset: Elf32_Off, + pub p_vaddr: Elf32_Addr, + pub p_paddr: Elf32_Addr, + pub p_filesz: Elf32_Word, + pub p_memsz: Elf32_Word, + pub p_flags: Elf32_Word, + pub p_align: Elf32_Word, + } + + pub struct Elf64_Phdr { + pub p_type: Elf64_Word, + pub p_flags: Elf64_Word, + pub p_offset: Elf64_Off, + pub p_vaddr: Elf64_Addr, + pub p_paddr: Elf64_Addr, + pub p_filesz: Elf64_Xword, + pub p_memsz: Elf64_Xword, + pub p_align: Elf64_Xword, + } + + // link.h + + pub struct dl_phdr_info { + #[cfg(target_pointer_width = "64")] + pub dlpi_addr: Elf64_Addr, + #[cfg(target_pointer_width = "32")] + pub dlpi_addr: Elf32_Addr, + + pub dlpi_name: *const c_char, + + #[cfg(target_pointer_width = "64")] + pub dlpi_phdr: *const Elf64_Phdr, + #[cfg(target_pointer_width = "32")] + pub dlpi_phdr: *const Elf32_Phdr, + + #[cfg(target_pointer_width = "64")] + pub dlpi_phnum: Elf64_Half, + #[cfg(target_pointer_width = "32")] + pub dlpi_phnum: Elf32_Half, + + // These fields were added in Android R + pub dlpi_adds: c_ulonglong, + pub dlpi_subs: c_ulonglong, + pub dlpi_tls_modid: size_t, + pub dlpi_tls_data: *mut c_void, + } + + // linux/seccomp.h + pub struct seccomp_data { + pub nr: c_int, + pub arch: crate::__u32, + pub instruction_pointer: crate::__u64, + pub args: [crate::__u64; 6], + } + + pub struct seccomp_metadata { + pub filter_off: crate::__u64, + pub flags: crate::__u64, + } + + pub struct ptrace_peeksiginfo_args { + pub off: crate::__u64, + pub flags: crate::__u32, + pub nr: crate::__s32, + } + + // linux/input.h + pub struct input_event { + pub time: crate::timeval, + pub type_: crate::__u16, + pub code: crate::__u16, + pub value: crate::__s32, + } + + pub struct input_id { + pub bustype: crate::__u16, + pub vendor: crate::__u16, + pub product: crate::__u16, + pub version: crate::__u16, + } + + pub struct input_absinfo { + pub value: crate::__s32, + pub minimum: crate::__s32, + pub maximum: crate::__s32, + pub fuzz: crate::__s32, + pub flat: crate::__s32, + pub resolution: crate::__s32, + } + + pub struct input_keymap_entry { + pub flags: crate::__u8, + pub len: crate::__u8, + pub index: crate::__u16, + pub keycode: crate::__u32, + pub scancode: [crate::__u8; 32], + } + + pub struct input_mask { + pub type_: crate::__u32, + pub codes_size: crate::__u32, + pub codes_ptr: crate::__u64, + } + + pub struct ff_replay { + pub length: crate::__u16, + pub delay: crate::__u16, + } + + pub struct ff_trigger { + pub button: crate::__u16, + pub interval: crate::__u16, + } + + pub struct ff_envelope { + pub attack_length: crate::__u16, + pub attack_level: crate::__u16, + pub fade_length: crate::__u16, + pub fade_level: crate::__u16, + } + + pub struct ff_constant_effect { + pub level: crate::__s16, + pub envelope: ff_envelope, + } + + pub struct ff_ramp_effect { + pub start_level: crate::__s16, + pub end_level: crate::__s16, + pub envelope: ff_envelope, + } + + pub struct ff_condition_effect { + pub right_saturation: crate::__u16, + pub left_saturation: crate::__u16, + + pub right_coeff: crate::__s16, + pub left_coeff: crate::__s16, + + pub deadband: crate::__u16, + pub center: crate::__s16, + } + + pub struct ff_periodic_effect { + pub waveform: crate::__u16, + pub period: crate::__u16, + pub magnitude: crate::__s16, + pub offset: crate::__s16, + pub phase: crate::__u16, + + pub envelope: ff_envelope, + + pub custom_len: crate::__u32, + pub custom_data: *mut crate::__s16, + } + + pub struct ff_rumble_effect { + pub strong_magnitude: crate::__u16, + pub weak_magnitude: crate::__u16, + } + + pub struct ff_effect { + pub type_: crate::__u16, + pub id: crate::__s16, + pub direction: crate::__u16, + pub trigger: ff_trigger, + pub replay: ff_replay, + // FIXME(1.0): this is actually a union + #[cfg(target_pointer_width = "64")] + pub u: [u64; 4], + #[cfg(target_pointer_width = "32")] + pub u: [u32; 7], + } + + // linux/uinput.h + pub struct uinput_ff_upload { + pub request_id: crate::__u32, + pub retval: crate::__s32, + pub effect: ff_effect, + pub old: ff_effect, + } + + pub struct uinput_ff_erase { + pub request_id: crate::__u32, + pub retval: crate::__s32, + pub effect_id: crate::__u32, + } + + pub struct uinput_abs_setup { + pub code: crate::__u16, + pub absinfo: input_absinfo, + } + + pub struct option { + pub name: *const c_char, + pub has_arg: c_int, + pub flag: *mut c_int, + pub val: c_int, + } + + pub struct __c_anonymous_ifru_map { + pub mem_start: c_ulong, + pub mem_end: c_ulong, + pub base_addr: c_ushort, + pub irq: c_uchar, + pub dma: c_uchar, + pub port: c_uchar, + } + + pub struct in6_ifreq { + pub ifr6_addr: crate::in6_addr, + pub ifr6_prefixlen: u32, + pub ifr6_ifindex: c_int, + } + + pub struct if_nameindex { + pub if_index: c_uint, + pub if_name: *mut c_char, + } +} + +s_no_extra_traits! { + pub struct sockaddr_nl { + pub nl_family: crate::sa_family_t, + nl_pad: c_ushort, + pub nl_pid: u32, + pub nl_groups: u32, + } + + pub struct dirent { + pub d_ino: u64, + pub d_off: i64, + pub d_reclen: c_ushort, + pub d_type: c_uchar, + pub d_name: [c_char; 256], + } + + pub struct dirent64 { + pub d_ino: u64, + pub d_off: i64, + pub d_reclen: c_ushort, + pub d_type: c_uchar, + pub d_name: [c_char; 256], + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + pub _pad: [c_int; 29], + _align: [usize; 0], + } + + pub struct lastlog { + ll_time: crate::time_t, + ll_line: [c_char; UT_LINESIZE], + ll_host: [c_char; UT_HOSTSIZE], + } + + pub struct utmp { + pub ut_type: c_short, + pub ut_pid: crate::pid_t, + pub ut_line: [c_char; UT_LINESIZE], + pub ut_id: [c_char; 4], + pub ut_user: [c_char; UT_NAMESIZE], + pub ut_host: [c_char; UT_HOSTSIZE], + pub ut_exit: exit_status, + pub ut_session: c_long, + pub ut_tv: crate::timeval, + pub ut_addr_v6: [i32; 4], + unused: [c_char; 20], + } + + pub struct sockaddr_alg { + pub salg_family: crate::sa_family_t, + pub salg_type: [c_uchar; 14], + pub salg_feat: u32, + pub salg_mask: u32, + pub salg_name: [c_uchar; 64], + } + + pub struct uinput_setup { + pub id: input_id, + pub name: [c_char; UINPUT_MAX_NAME_SIZE], + pub ff_effects_max: crate::__u32, + } + + pub struct uinput_user_dev { + pub name: [c_char; UINPUT_MAX_NAME_SIZE], + pub id: input_id, + pub ff_effects_max: crate::__u32, + pub absmax: [crate::__s32; ABS_CNT], + pub absmin: [crate::__s32; ABS_CNT], + pub absfuzz: [crate::__s32; ABS_CNT], + pub absflat: [crate::__s32; ABS_CNT], + } + + /// WARNING: The `PartialEq`, `Eq` and `Hash` implementations of this + /// type are unsound and will be removed in the future. + #[deprecated( + note = "this struct has unsafe trait implementations that will be \ + removed in the future", + since = "0.2.80" + )] + pub struct af_alg_iv { + pub ivlen: u32, + pub iv: [c_uchar; 0], + } + + pub struct prop_info { + __name: [c_char; 32], + __serial: c_uint, + __value: [c_char; 92], + } + + pub union __c_anonymous_ifr_ifru { + pub ifru_addr: crate::sockaddr, + pub ifru_dstaddr: crate::sockaddr, + pub ifru_broadaddr: crate::sockaddr, + pub ifru_netmask: crate::sockaddr, + pub ifru_hwaddr: crate::sockaddr, + pub ifru_flags: c_short, + pub ifru_ifindex: c_int, + pub ifru_metric: c_int, + pub ifru_mtu: c_int, + pub ifru_map: __c_anonymous_ifru_map, + pub ifru_slave: [c_char; crate::IFNAMSIZ], + pub ifru_newname: [c_char; crate::IFNAMSIZ], + pub ifru_data: *mut c_char, + } + + pub struct ifreq { + /// interface name, e.g. "en0" + pub ifr_name: [c_char; crate::IFNAMSIZ], + pub ifr_ifru: __c_anonymous_ifr_ifru, + } + + pub union __c_anonymous_ifc_ifcu { + pub ifcu_buf: *mut c_char, + pub ifcu_req: *mut crate::ifreq, + } + + /* Structure used in SIOCGIFCONF request. Used to retrieve interface + configuration for machine (useful for programs which must know all + networks accessible). */ + pub struct ifconf { + pub ifc_len: c_int, /* Size of buffer. */ + pub ifc_ifcu: __c_anonymous_ifc_ifcu, + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for sockaddr_nl { + fn eq(&self, other: &sockaddr_nl) -> bool { + self.nl_family == other.nl_family + && self.nl_pid == other.nl_pid + && self.nl_groups == other.nl_groups + } + } + impl Eq for sockaddr_nl {} + impl hash::Hash for sockaddr_nl { + fn hash(&self, state: &mut H) { + self.nl_family.hash(state); + self.nl_pid.hash(state); + self.nl_groups.hash(state); + } + } + + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_ino == other.d_ino + && self.d_off == other.d_off + && self.d_reclen == other.d_reclen + && self.d_type == other.d_type + && self + .d_name + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for dirent {} + + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_ino.hash(state); + self.d_off.hash(state); + self.d_reclen.hash(state); + self.d_type.hash(state); + self.d_name.hash(state); + } + } + + impl PartialEq for dirent64 { + fn eq(&self, other: &dirent64) -> bool { + self.d_ino == other.d_ino + && self.d_off == other.d_off + && self.d_reclen == other.d_reclen + && self.d_type == other.d_type + && self + .d_name + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for dirent64 {} + + impl hash::Hash for dirent64 { + fn hash(&self, state: &mut H) { + self.d_ino.hash(state); + self.d_off.hash(state); + self.d_reclen.hash(state); + self.d_type.hash(state); + self.d_name.hash(state); + } + } + + impl PartialEq for siginfo_t { + fn eq(&self, other: &siginfo_t) -> bool { + self.si_signo == other.si_signo + && self.si_errno == other.si_errno + && self.si_code == other.si_code + // Ignore _pad + // Ignore _align + } + } + + impl Eq for siginfo_t {} + + impl hash::Hash for siginfo_t { + fn hash(&self, state: &mut H) { + self.si_signo.hash(state); + self.si_errno.hash(state); + self.si_code.hash(state); + // Ignore _pad + // Ignore _align + } + } + + impl PartialEq for lastlog { + fn eq(&self, other: &lastlog) -> bool { + self.ll_time == other.ll_time + && self + .ll_line + .iter() + .zip(other.ll_line.iter()) + .all(|(a, b)| a == b) + && self + .ll_host + .iter() + .zip(other.ll_host.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for lastlog {} + + impl hash::Hash for lastlog { + fn hash(&self, state: &mut H) { + self.ll_time.hash(state); + self.ll_line.hash(state); + self.ll_host.hash(state); + } + } + + impl PartialEq for utmp { + fn eq(&self, other: &utmp) -> bool { + self.ut_type == other.ut_type + && self.ut_pid == other.ut_pid + && self + .ut_line + .iter() + .zip(other.ut_line.iter()) + .all(|(a, b)| a == b) + && self.ut_id == other.ut_id + && self + .ut_user + .iter() + .zip(other.ut_user.iter()) + .all(|(a, b)| a == b) + && self + .ut_host + .iter() + .zip(other.ut_host.iter()) + .all(|(a, b)| a == b) + && self.ut_exit == other.ut_exit + && self.ut_session == other.ut_session + && self.ut_tv == other.ut_tv + && self.ut_addr_v6 == other.ut_addr_v6 + && self.unused == other.unused + } + } + + impl Eq for utmp {} + + impl hash::Hash for utmp { + fn hash(&self, state: &mut H) { + self.ut_type.hash(state); + self.ut_pid.hash(state); + self.ut_line.hash(state); + self.ut_id.hash(state); + self.ut_user.hash(state); + self.ut_host.hash(state); + self.ut_exit.hash(state); + self.ut_session.hash(state); + self.ut_tv.hash(state); + self.ut_addr_v6.hash(state); + self.unused.hash(state); + } + } + + impl PartialEq for sockaddr_alg { + fn eq(&self, other: &sockaddr_alg) -> bool { + self.salg_family == other.salg_family + && self + .salg_type + .iter() + .zip(other.salg_type.iter()) + .all(|(a, b)| a == b) + && self.salg_feat == other.salg_feat + && self.salg_mask == other.salg_mask + && self + .salg_name + .iter() + .zip(other.salg_name.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for sockaddr_alg {} + + impl hash::Hash for sockaddr_alg { + fn hash(&self, state: &mut H) { + self.salg_family.hash(state); + self.salg_type.hash(state); + self.salg_feat.hash(state); + self.salg_mask.hash(state); + self.salg_name.hash(state); + } + } + + impl PartialEq for uinput_setup { + fn eq(&self, other: &uinput_setup) -> bool { + self.id == other.id + && self.name[..] == other.name[..] + && self.ff_effects_max == other.ff_effects_max + } + } + impl Eq for uinput_setup {} + + impl hash::Hash for uinput_setup { + fn hash(&self, state: &mut H) { + self.id.hash(state); + self.name.hash(state); + self.ff_effects_max.hash(state); + } + } + + impl PartialEq for uinput_user_dev { + fn eq(&self, other: &uinput_user_dev) -> bool { + self.name[..] == other.name[..] + && self.id == other.id + && self.ff_effects_max == other.ff_effects_max + && self.absmax[..] == other.absmax[..] + && self.absmin[..] == other.absmin[..] + && self.absfuzz[..] == other.absfuzz[..] + && self.absflat[..] == other.absflat[..] + } + } + impl Eq for uinput_user_dev {} + + impl hash::Hash for uinput_user_dev { + fn hash(&self, state: &mut H) { + self.name.hash(state); + self.id.hash(state); + self.ff_effects_max.hash(state); + self.absmax.hash(state); + self.absmin.hash(state); + self.absfuzz.hash(state); + self.absflat.hash(state); + } + } + + #[allow(deprecated)] + impl af_alg_iv { + fn as_slice(&self) -> &[u8] { + unsafe { ::core::slice::from_raw_parts(self.iv.as_ptr(), self.ivlen as usize) } + } + } + + #[allow(deprecated)] + impl PartialEq for af_alg_iv { + fn eq(&self, other: &af_alg_iv) -> bool { + *self.as_slice() == *other.as_slice() + } + } + + #[allow(deprecated)] + impl Eq for af_alg_iv {} + + #[allow(deprecated)] + impl hash::Hash for af_alg_iv { + fn hash(&self, state: &mut H) { + self.as_slice().hash(state); + } + } + + impl PartialEq for prop_info { + fn eq(&self, other: &prop_info) -> bool { + self.__name == other.__name + && self.__serial == other.__serial + && self.__value == other.__value + } + } + impl Eq for prop_info {} + } +} + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MS_NOUSER: c_ulong = 0xffffffff80000000; +pub const MS_RMT_MASK: c_ulong = 0x02800051; + +pub const O_TRUNC: c_int = 512; +pub const O_CLOEXEC: c_int = 0x80000; +pub const O_PATH: c_int = 0o10000000; +pub const O_NOATIME: c_int = 0o1000000; + +pub const EBFONT: c_int = 59; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENONET: c_int = 64; +pub const ENOPKG: c_int = 65; +pub const EREMOTE: c_int = 66; +pub const ENOLINK: c_int = 67; +pub const EADV: c_int = 68; +pub const ESRMNT: c_int = 69; +pub const ECOMM: c_int = 70; +pub const EPROTO: c_int = 71; +pub const EDOTDOT: c_int = 73; + +pub const EPOLL_CLOEXEC: c_int = 0x80000; + +// sys/eventfd.h +pub const EFD_SEMAPHORE: c_int = 0x1; +pub const EFD_CLOEXEC: c_int = O_CLOEXEC; +pub const EFD_NONBLOCK: c_int = O_NONBLOCK; + +// sys/timerfd.h +pub const TFD_CLOEXEC: c_int = O_CLOEXEC; +pub const TFD_NONBLOCK: c_int = O_NONBLOCK; +pub const TFD_TIMER_ABSTIME: c_int = 1; +pub const TFD_TIMER_CANCEL_ON_SET: c_int = 2; + +pub const USER_PROCESS: c_short = 7; + +pub const _POSIX_VDISABLE: crate::cc_t = 0; + +// linux/falloc.h +pub const FALLOC_FL_KEEP_SIZE: c_int = 0x01; +pub const FALLOC_FL_PUNCH_HOLE: c_int = 0x02; +pub const FALLOC_FL_NO_HIDE_STALE: c_int = 0x04; +pub const FALLOC_FL_COLLAPSE_RANGE: c_int = 0x08; +pub const FALLOC_FL_ZERO_RANGE: c_int = 0x10; +pub const FALLOC_FL_INSERT_RANGE: c_int = 0x20; +pub const FALLOC_FL_UNSHARE_RANGE: c_int = 0x40; + +pub const BUFSIZ: c_uint = 1024; +pub const FILENAME_MAX: c_uint = 4096; +pub const FOPEN_MAX: c_uint = 20; +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; +pub const L_tmpnam: c_uint = 4096; +pub const TMP_MAX: c_uint = 308915776; +pub const _PC_LINK_MAX: c_int = 1; +pub const _PC_MAX_CANON: c_int = 2; +pub const _PC_MAX_INPUT: c_int = 3; +pub const _PC_NAME_MAX: c_int = 4; +pub const _PC_PATH_MAX: c_int = 5; +pub const _PC_PIPE_BUF: c_int = 6; +pub const _PC_2_SYMLINKS: c_int = 7; +pub const _PC_ALLOC_SIZE_MIN: c_int = 8; +pub const _PC_REC_INCR_XFER_SIZE: c_int = 9; +pub const _PC_REC_MAX_XFER_SIZE: c_int = 10; +pub const _PC_REC_MIN_XFER_SIZE: c_int = 11; +pub const _PC_REC_XFER_ALIGN: c_int = 12; +pub const _PC_SYMLINK_MAX: c_int = 13; +pub const _PC_CHOWN_RESTRICTED: c_int = 14; +pub const _PC_NO_TRUNC: c_int = 15; +pub const _PC_VDISABLE: c_int = 16; +pub const _PC_ASYNC_IO: c_int = 17; +pub const _PC_PRIO_IO: c_int = 18; +pub const _PC_SYNC_IO: c_int = 19; + +pub const FIONBIO: c_int = 0x5421; + +pub const _SC_ARG_MAX: c_int = 0x0000; +pub const _SC_BC_BASE_MAX: c_int = 0x0001; +pub const _SC_BC_DIM_MAX: c_int = 0x0002; +pub const _SC_BC_SCALE_MAX: c_int = 0x0003; +pub const _SC_BC_STRING_MAX: c_int = 0x0004; +pub const _SC_CHILD_MAX: c_int = 0x0005; +pub const _SC_CLK_TCK: c_int = 0x0006; +pub const _SC_COLL_WEIGHTS_MAX: c_int = 0x0007; +pub const _SC_EXPR_NEST_MAX: c_int = 0x0008; +pub const _SC_LINE_MAX: c_int = 0x0009; +pub const _SC_NGROUPS_MAX: c_int = 0x000a; +pub const _SC_OPEN_MAX: c_int = 0x000b; +pub const _SC_PASS_MAX: c_int = 0x000c; +pub const _SC_2_C_BIND: c_int = 0x000d; +pub const _SC_2_C_DEV: c_int = 0x000e; +pub const _SC_2_C_VERSION: c_int = 0x000f; +pub const _SC_2_CHAR_TERM: c_int = 0x0010; +pub const _SC_2_FORT_DEV: c_int = 0x0011; +pub const _SC_2_FORT_RUN: c_int = 0x0012; +pub const _SC_2_LOCALEDEF: c_int = 0x0013; +pub const _SC_2_SW_DEV: c_int = 0x0014; +pub const _SC_2_UPE: c_int = 0x0015; +pub const _SC_2_VERSION: c_int = 0x0016; +pub const _SC_JOB_CONTROL: c_int = 0x0017; +pub const _SC_SAVED_IDS: c_int = 0x0018; +pub const _SC_VERSION: c_int = 0x0019; +pub const _SC_RE_DUP_MAX: c_int = 0x001a; +pub const _SC_STREAM_MAX: c_int = 0x001b; +pub const _SC_TZNAME_MAX: c_int = 0x001c; +pub const _SC_XOPEN_CRYPT: c_int = 0x001d; +pub const _SC_XOPEN_ENH_I18N: c_int = 0x001e; +pub const _SC_XOPEN_SHM: c_int = 0x001f; +pub const _SC_XOPEN_VERSION: c_int = 0x0020; +pub const _SC_XOPEN_XCU_VERSION: c_int = 0x0021; +pub const _SC_XOPEN_REALTIME: c_int = 0x0022; +pub const _SC_XOPEN_REALTIME_THREADS: c_int = 0x0023; +pub const _SC_XOPEN_LEGACY: c_int = 0x0024; +pub const _SC_ATEXIT_MAX: c_int = 0x0025; +pub const _SC_IOV_MAX: c_int = 0x0026; +pub const _SC_UIO_MAXIOV: c_int = _SC_IOV_MAX; +pub const _SC_PAGESIZE: c_int = 0x0027; +pub const _SC_PAGE_SIZE: c_int = 0x0028; +pub const _SC_XOPEN_UNIX: c_int = 0x0029; +pub const _SC_XBS5_ILP32_OFF32: c_int = 0x002a; +pub const _SC_XBS5_ILP32_OFFBIG: c_int = 0x002b; +pub const _SC_XBS5_LP64_OFF64: c_int = 0x002c; +pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 0x002d; +pub const _SC_AIO_LISTIO_MAX: c_int = 0x002e; +pub const _SC_AIO_MAX: c_int = 0x002f; +pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 0x0030; +pub const _SC_DELAYTIMER_MAX: c_int = 0x0031; +pub const _SC_MQ_OPEN_MAX: c_int = 0x0032; +pub const _SC_MQ_PRIO_MAX: c_int = 0x0033; +pub const _SC_RTSIG_MAX: c_int = 0x0034; +pub const _SC_SEM_NSEMS_MAX: c_int = 0x0035; +pub const _SC_SEM_VALUE_MAX: c_int = 0x0036; +pub const _SC_SIGQUEUE_MAX: c_int = 0x0037; +pub const _SC_TIMER_MAX: c_int = 0x0038; +pub const _SC_ASYNCHRONOUS_IO: c_int = 0x0039; +pub const _SC_FSYNC: c_int = 0x003a; +pub const _SC_MAPPED_FILES: c_int = 0x003b; +pub const _SC_MEMLOCK: c_int = 0x003c; +pub const _SC_MEMLOCK_RANGE: c_int = 0x003d; +pub const _SC_MEMORY_PROTECTION: c_int = 0x003e; +pub const _SC_MESSAGE_PASSING: c_int = 0x003f; +pub const _SC_PRIORITIZED_IO: c_int = 0x0040; +pub const _SC_PRIORITY_SCHEDULING: c_int = 0x0041; +pub const _SC_REALTIME_SIGNALS: c_int = 0x0042; +pub const _SC_SEMAPHORES: c_int = 0x0043; +pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 0x0044; +pub const _SC_SYNCHRONIZED_IO: c_int = 0x0045; +pub const _SC_TIMERS: c_int = 0x0046; +pub const _SC_GETGR_R_SIZE_MAX: c_int = 0x0047; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 0x0048; +pub const _SC_LOGIN_NAME_MAX: c_int = 0x0049; +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 0x004a; +pub const _SC_THREAD_KEYS_MAX: c_int = 0x004b; +pub const _SC_THREAD_STACK_MIN: c_int = 0x004c; +pub const _SC_THREAD_THREADS_MAX: c_int = 0x004d; +pub const _SC_TTY_NAME_MAX: c_int = 0x004e; +pub const _SC_THREADS: c_int = 0x004f; +pub const _SC_THREAD_ATTR_STACKADDR: c_int = 0x0050; +pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 0x0051; +pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 0x0052; +pub const _SC_THREAD_PRIO_INHERIT: c_int = 0x0053; +pub const _SC_THREAD_PRIO_PROTECT: c_int = 0x0054; +pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 0x0055; +pub const _SC_NPROCESSORS_CONF: c_int = 0x0060; +pub const _SC_NPROCESSORS_ONLN: c_int = 0x0061; +pub const _SC_PHYS_PAGES: c_int = 0x0062; +pub const _SC_AVPHYS_PAGES: c_int = 0x0063; +pub const _SC_MONOTONIC_CLOCK: c_int = 0x0064; +pub const _SC_2_PBS: c_int = 0x0065; +pub const _SC_2_PBS_ACCOUNTING: c_int = 0x0066; +pub const _SC_2_PBS_CHECKPOINT: c_int = 0x0067; +pub const _SC_2_PBS_LOCATE: c_int = 0x0068; +pub const _SC_2_PBS_MESSAGE: c_int = 0x0069; +pub const _SC_2_PBS_TRACK: c_int = 0x006a; +pub const _SC_ADVISORY_INFO: c_int = 0x006b; +pub const _SC_BARRIERS: c_int = 0x006c; +pub const _SC_CLOCK_SELECTION: c_int = 0x006d; +pub const _SC_CPUTIME: c_int = 0x006e; +pub const _SC_HOST_NAME_MAX: c_int = 0x006f; +pub const _SC_IPV6: c_int = 0x0070; +pub const _SC_RAW_SOCKETS: c_int = 0x0071; +pub const _SC_READER_WRITER_LOCKS: c_int = 0x0072; +pub const _SC_REGEXP: c_int = 0x0073; +pub const _SC_SHELL: c_int = 0x0074; +pub const _SC_SPAWN: c_int = 0x0075; +pub const _SC_SPIN_LOCKS: c_int = 0x0076; +pub const _SC_SPORADIC_SERVER: c_int = 0x0077; +pub const _SC_SS_REPL_MAX: c_int = 0x0078; +pub const _SC_SYMLOOP_MAX: c_int = 0x0079; +pub const _SC_THREAD_CPUTIME: c_int = 0x007a; +pub const _SC_THREAD_PROCESS_SHARED: c_int = 0x007b; +pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 0x007c; +pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 0x007d; +pub const _SC_THREAD_SPORADIC_SERVER: c_int = 0x007e; +pub const _SC_TIMEOUTS: c_int = 0x007f; +pub const _SC_TRACE: c_int = 0x0080; +pub const _SC_TRACE_EVENT_FILTER: c_int = 0x0081; +pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 0x0082; +pub const _SC_TRACE_INHERIT: c_int = 0x0083; +pub const _SC_TRACE_LOG: c_int = 0x0084; +pub const _SC_TRACE_NAME_MAX: c_int = 0x0085; +pub const _SC_TRACE_SYS_MAX: c_int = 0x0086; +pub const _SC_TRACE_USER_EVENT_MAX: c_int = 0x0087; +pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 0x0088; +pub const _SC_V7_ILP32_OFF32: c_int = 0x0089; +pub const _SC_V7_ILP32_OFFBIG: c_int = 0x008a; +pub const _SC_V7_LP64_OFF64: c_int = 0x008b; +pub const _SC_V7_LPBIG_OFFBIG: c_int = 0x008c; +pub const _SC_XOPEN_STREAMS: c_int = 0x008d; +pub const _SC_XOPEN_UUCP: c_int = 0x008e; +pub const _SC_LEVEL1_ICACHE_SIZE: c_int = 0x008f; +pub const _SC_LEVEL1_ICACHE_ASSOC: c_int = 0x0090; +pub const _SC_LEVEL1_ICACHE_LINESIZE: c_int = 0x0091; +pub const _SC_LEVEL1_DCACHE_SIZE: c_int = 0x0092; +pub const _SC_LEVEL1_DCACHE_ASSOC: c_int = 0x0093; +pub const _SC_LEVEL1_DCACHE_LINESIZE: c_int = 0x0094; +pub const _SC_LEVEL2_CACHE_SIZE: c_int = 0x0095; +pub const _SC_LEVEL2_CACHE_ASSOC: c_int = 0x0096; +pub const _SC_LEVEL2_CACHE_LINESIZE: c_int = 0x0097; +pub const _SC_LEVEL3_CACHE_SIZE: c_int = 0x0098; +pub const _SC_LEVEL3_CACHE_ASSOC: c_int = 0x0099; +pub const _SC_LEVEL3_CACHE_LINESIZE: c_int = 0x009a; +pub const _SC_LEVEL4_CACHE_SIZE: c_int = 0x009b; +pub const _SC_LEVEL4_CACHE_ASSOC: c_int = 0x009c; +pub const _SC_LEVEL4_CACHE_LINESIZE: c_int = 0x009d; + +pub const F_LOCK: c_int = 1; +pub const F_TEST: c_int = 3; +pub const F_TLOCK: c_int = 2; +pub const F_ULOCK: c_int = 0; + +pub const F_SEAL_FUTURE_WRITE: c_int = 0x0010; +pub const F_SEAL_EXEC: c_int = 0x0020; + +pub const IFF_LOWER_UP: c_int = 0x10000; +pub const IFF_DORMANT: c_int = 0x20000; +pub const IFF_ECHO: c_int = 0x40000; + +pub const PTHREAD_BARRIER_SERIAL_THREAD: c_int = -1; +pub const PTHREAD_MUTEX_NORMAL: c_int = 0; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; +pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; + +pub const PTHREAD_EXPLICIT_SCHED: c_int = 0; +pub const PTHREAD_INHERIT_SCHED: c_int = 1; + +// stdio.h +pub const RENAME_NOREPLACE: c_int = 1; +pub const RENAME_EXCHANGE: c_int = 2; +pub const RENAME_WHITEOUT: c_int = 4; + +pub const FIOCLEX: c_int = 0x5451; +pub const FIONCLEX: c_int = 0x5450; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] +pub const SIGUNUSED: c_int = 31; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const RUSAGE_CHILDREN: c_int = -1; + +pub const LC_PAPER: c_int = 7; +pub const LC_NAME: c_int = 8; +pub const LC_ADDRESS: c_int = 9; +pub const LC_TELEPHONE: c_int = 10; +pub const LC_MEASUREMENT: c_int = 11; +pub const LC_IDENTIFICATION: c_int = 12; +pub const LC_PAPER_MASK: c_int = 1 << LC_PAPER; +pub const LC_NAME_MASK: c_int = 1 << LC_NAME; +pub const LC_ADDRESS_MASK: c_int = 1 << LC_ADDRESS; +pub const LC_TELEPHONE_MASK: c_int = 1 << LC_TELEPHONE; +pub const LC_MEASUREMENT_MASK: c_int = 1 << LC_MEASUREMENT; +pub const LC_IDENTIFICATION_MASK: c_int = 1 << LC_IDENTIFICATION; +pub const LC_ALL_MASK: c_int = crate::LC_CTYPE_MASK + | crate::LC_NUMERIC_MASK + | crate::LC_TIME_MASK + | crate::LC_COLLATE_MASK + | crate::LC_MONETARY_MASK + | crate::LC_MESSAGES_MASK + | LC_PAPER_MASK + | LC_NAME_MASK + | LC_ADDRESS_MASK + | LC_TELEPHONE_MASK + | LC_MEASUREMENT_MASK + | LC_IDENTIFICATION_MASK; + +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_ANONYMOUS: c_int = 0x0020; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; + +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; + +pub const EMULTIHOP: c_int = 72; +pub const EBADMSG: c_int = 74; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SOCK_SEQPACKET: c_int = 5; +pub const SOCK_DCCP: c_int = 6; +#[deprecated(since = "0.2.70", note = "AF_PACKET must be used instead")] +pub const SOCK_PACKET: c_int = 10; + +pub const IPPROTO_MAX: c_int = 256; + +pub const SOL_SOCKET: c_int = 1; +pub const SOL_SCTP: c_int = 132; +pub const SOL_IPX: c_int = 256; +pub const SOL_AX25: c_int = 257; +pub const SOL_ATALK: c_int = 258; +pub const SOL_NETROM: c_int = 259; +pub const SOL_ROSE: c_int = 260; + +/* UDP socket options */ +// include/uapi/linux/udp.h +pub const UDP_CORK: c_int = 1; +pub const UDP_ENCAP: c_int = 100; +pub const UDP_NO_CHECK6_TX: c_int = 101; +pub const UDP_NO_CHECK6_RX: c_int = 102; +pub const UDP_SEGMENT: c_int = 103; +pub const UDP_GRO: c_int = 104; + +/* DCCP socket options */ +pub const DCCP_SOCKOPT_PACKET_SIZE: c_int = 1; +pub const DCCP_SOCKOPT_SERVICE: c_int = 2; +pub const DCCP_SOCKOPT_CHANGE_L: c_int = 3; +pub const DCCP_SOCKOPT_CHANGE_R: c_int = 4; +pub const DCCP_SOCKOPT_GET_CUR_MPS: c_int = 5; +pub const DCCP_SOCKOPT_SERVER_TIMEWAIT: c_int = 6; +pub const DCCP_SOCKOPT_SEND_CSCOV: c_int = 10; +pub const DCCP_SOCKOPT_RECV_CSCOV: c_int = 11; +pub const DCCP_SOCKOPT_AVAILABLE_CCIDS: c_int = 12; +pub const DCCP_SOCKOPT_CCID: c_int = 13; +pub const DCCP_SOCKOPT_TX_CCID: c_int = 14; +pub const DCCP_SOCKOPT_RX_CCID: c_int = 15; +pub const DCCP_SOCKOPT_QPOLICY_ID: c_int = 16; +pub const DCCP_SOCKOPT_QPOLICY_TXQLEN: c_int = 17; +pub const DCCP_SOCKOPT_CCID_RX_INFO: c_int = 128; +pub const DCCP_SOCKOPT_CCID_TX_INFO: c_int = 192; + +/// maximum number of services provided on the same listening port +pub const DCCP_SERVICE_LIST_MAX_LEN: c_int = 32; + +pub const SO_REUSEADDR: c_int = 2; +pub const SO_TYPE: c_int = 3; +pub const SO_ERROR: c_int = 4; +pub const SO_DONTROUTE: c_int = 5; +pub const SO_BROADCAST: c_int = 6; +pub const SO_SNDBUF: c_int = 7; +pub const SO_RCVBUF: c_int = 8; +pub const SO_KEEPALIVE: c_int = 9; +pub const SO_OOBINLINE: c_int = 10; +pub const SO_PRIORITY: c_int = 12; +pub const SO_LINGER: c_int = 13; +pub const SO_BSDCOMPAT: c_int = 14; +pub const SO_REUSEPORT: c_int = 15; +pub const SO_PASSCRED: c_int = 16; +pub const SO_PEERCRED: c_int = 17; +pub const SO_RCVLOWAT: c_int = 18; +pub const SO_SNDLOWAT: c_int = 19; +pub const SO_RCVTIMEO: c_int = 20; +pub const SO_SNDTIMEO: c_int = 21; +pub const SO_BINDTODEVICE: c_int = 25; +pub const SO_ATTACH_FILTER: c_int = 26; +pub const SO_DETACH_FILTER: c_int = 27; +pub const SO_GET_FILTER: c_int = SO_ATTACH_FILTER; +pub const SO_TIMESTAMP: c_int = 29; +pub const SO_ACCEPTCONN: c_int = 30; +pub const SO_PEERSEC: c_int = 31; +pub const SO_SNDBUFFORCE: c_int = 32; +pub const SO_RCVBUFFORCE: c_int = 33; +pub const SO_PASSSEC: c_int = 34; +pub const SO_TIMESTAMPNS: c_int = 35; +// pub const SO_TIMESTAMPNS_OLD: c_int = 35; +pub const SO_MARK: c_int = 36; +pub const SO_TIMESTAMPING: c_int = 37; +// pub const SO_TIMESTAMPING_OLD: c_int = 37; +pub const SO_PROTOCOL: c_int = 38; +pub const SO_DOMAIN: c_int = 39; +pub const SO_RXQ_OVFL: c_int = 40; +pub const SO_PEEK_OFF: c_int = 42; +pub const SO_BUSY_POLL: c_int = 46; +pub const SCM_TIMESTAMPING_OPT_STATS: c_int = 54; +pub const SCM_TIMESTAMPING_PKTINFO: c_int = 58; +pub const SO_BINDTOIFINDEX: c_int = 62; +pub const SO_TIMESTAMP_NEW: c_int = 63; +pub const SO_TIMESTAMPNS_NEW: c_int = 64; +pub const SO_TIMESTAMPING_NEW: c_int = 65; + +// Defined in unix/linux_like/mod.rs +// pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; +pub const SCM_TIMESTAMPNS: c_int = SO_TIMESTAMPNS; +pub const SCM_TIMESTAMPING: c_int = SO_TIMESTAMPING; + +pub const IPTOS_ECN_NOTECT: u8 = 0x00; + +pub const O_ACCMODE: c_int = 3; +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 0x101000; +pub const O_ASYNC: c_int = 0x2000; +pub const O_NDELAY: c_int = 0x800; +pub const O_DSYNC: c_int = 4096; +pub const O_RSYNC: c_int = O_SYNC; + +pub const NI_MAXHOST: size_t = 1025; +pub const NI_MAXSERV: size_t = 32; + +pub const NI_NOFQDN: c_int = 0x00000001; +pub const NI_NUMERICHOST: c_int = 0x00000002; +pub const NI_NAMEREQD: c_int = 0x00000004; +pub const NI_NUMERICSERV: c_int = 0x00000008; +pub const NI_DGRAM: c_int = 0x00000010; + +pub const NCCS: usize = 19; +pub const TCSBRKP: c_int = 0x5425; +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 0x1; +pub const TCSAFLUSH: c_int = 0x2; +pub const VEOF: usize = 4; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; +pub const EXTPROC: crate::tcflag_t = 0o200000; + +pub const MAP_HUGETLB: c_int = 0x040000; + +pub const PTRACE_TRACEME: c_int = 0; +pub const PTRACE_PEEKTEXT: c_int = 1; +pub const PTRACE_PEEKDATA: c_int = 2; +pub const PTRACE_PEEKUSER: c_int = 3; +pub const PTRACE_POKETEXT: c_int = 4; +pub const PTRACE_POKEDATA: c_int = 5; +pub const PTRACE_POKEUSER: c_int = 6; +pub const PTRACE_CONT: c_int = 7; +pub const PTRACE_KILL: c_int = 8; +pub const PTRACE_SINGLESTEP: c_int = 9; +pub const PTRACE_GETREGS: c_int = 12; +pub const PTRACE_SETREGS: c_int = 13; +pub const PTRACE_ATTACH: c_int = 16; +pub const PTRACE_DETACH: c_int = 17; +pub const PTRACE_SYSCALL: c_int = 24; +pub const PTRACE_SETOPTIONS: c_int = 0x4200; +pub const PTRACE_GETEVENTMSG: c_int = 0x4201; +pub const PTRACE_GETSIGINFO: c_int = 0x4202; +pub const PTRACE_SETSIGINFO: c_int = 0x4203; +pub const PTRACE_GETREGSET: c_int = 0x4204; +pub const PTRACE_SETREGSET: c_int = 0x4205; +pub const PTRACE_SECCOMP_GET_METADATA: c_int = 0x420d; + +pub const PTRACE_EVENT_STOP: c_int = 128; + +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETOWN: c_int = 8; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_RDLCK: c_int = 0; +pub const F_WRLCK: c_int = 1; +pub const F_UNLCK: c_int = 2; +pub const F_OFD_GETLK: c_int = 36; +pub const F_OFD_SETLK: c_int = 37; +pub const F_OFD_SETLKW: c_int = 38; + +pub const RLIMIT_CPU: c_int = 0; +pub const RLIMIT_FSIZE: c_int = 1; +pub const RLIMIT_DATA: c_int = 2; +pub const RLIMIT_STACK: c_int = 3; +pub const RLIMIT_CORE: c_int = 4; +pub const RLIMIT_RSS: c_int = 5; +pub const RLIMIT_NPROC: c_int = 6; +pub const RLIMIT_NOFILE: c_int = 7; +pub const RLIMIT_MEMLOCK: c_int = 8; +pub const RLIMIT_AS: c_int = 9; +pub const RLIMIT_LOCKS: c_int = 10; +pub const RLIMIT_SIGPENDING: c_int = 11; +pub const RLIMIT_MSGQUEUE: c_int = 12; +pub const RLIMIT_NICE: c_int = 13; +pub const RLIMIT_RTPRIO: c_int = 14; + +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIM_NLIMITS: c_int = 16; +pub const RLIM_INFINITY: crate::rlim_t = !0; + +pub const TCGETS: c_int = 0x5401; +pub const TCSETS: c_int = 0x5402; +pub const TCSETSW: c_int = 0x5403; +pub const TCSETSF: c_int = 0x5404; +pub const TCGETS2: c_int = 0x802c542a; +pub const TCSETS2: c_int = 0x402c542b; +pub const TCSETSW2: c_int = 0x402c542c; +pub const TCSETSF2: c_int = 0x402c542d; +pub const TCGETA: c_int = 0x5405; +pub const TCSETA: c_int = 0x5406; +pub const TCSETAW: c_int = 0x5407; +pub const TCSETAF: c_int = 0x5408; +pub const TCSBRK: c_int = 0x5409; +pub const TCXONC: c_int = 0x540A; +pub const TCFLSH: c_int = 0x540B; +pub const TIOCGSOFTCAR: c_int = 0x5419; +pub const TIOCSSOFTCAR: c_int = 0x541A; +pub const TIOCINQ: c_int = 0x541B; +pub const TIOCLINUX: c_int = 0x541C; +pub const TIOCGSERIAL: c_int = 0x541E; +pub const TIOCEXCL: c_int = 0x540C; +pub const TIOCNXCL: c_int = 0x540D; +pub const TIOCSCTTY: c_int = 0x540E; +pub const TIOCGPGRP: c_int = 0x540F; +pub const TIOCSPGRP: c_int = 0x5410; +pub const TIOCOUTQ: c_int = 0x5411; +pub const TIOCSTI: c_int = 0x5412; +pub const TIOCGWINSZ: c_int = 0x5413; +pub const TIOCSWINSZ: c_int = 0x5414; +pub const TIOCMGET: c_int = 0x5415; +pub const TIOCMBIS: c_int = 0x5416; +pub const TIOCMBIC: c_int = 0x5417; +pub const TIOCMSET: c_int = 0x5418; +pub const FIONREAD: c_int = 0x541B; +pub const TIOCCONS: c_int = 0x541D; +pub const TIOCSBRK: c_int = 0x5427; +pub const TIOCCBRK: c_int = 0x5428; + +pub const ST_RDONLY: c_ulong = 1; +pub const ST_NOSUID: c_ulong = 2; +pub const ST_NODEV: c_ulong = 4; +pub const ST_NOEXEC: c_ulong = 8; +pub const ST_SYNCHRONOUS: c_ulong = 16; +pub const ST_MANDLOCK: c_ulong = 64; +pub const ST_NOATIME: c_ulong = 1024; +pub const ST_NODIRATIME: c_ulong = 2048; +pub const ST_RELATIME: c_ulong = 4096; + +pub const RTLD_NOLOAD: c_int = 0x4; +pub const RTLD_NODELETE: c_int = 0x1000; + +pub const SEM_FAILED: *mut sem_t = ptr::null_mut(); + +pub const AI_PASSIVE: c_int = 0x00000001; +pub const AI_CANONNAME: c_int = 0x00000002; +pub const AI_NUMERICHOST: c_int = 0x00000004; +pub const AI_NUMERICSERV: c_int = 0x00000008; +pub const AI_MASK: c_int = + AI_PASSIVE | AI_CANONNAME | AI_NUMERICHOST | AI_NUMERICSERV | AI_ADDRCONFIG; +pub const AI_ALL: c_int = 0x00000100; +pub const AI_V4MAPPED_CFG: c_int = 0x00000200; +pub const AI_ADDRCONFIG: c_int = 0x00000400; +pub const AI_V4MAPPED: c_int = 0x00000800; +pub const AI_DEFAULT: c_int = AI_V4MAPPED_CFG | AI_ADDRCONFIG; + +// linux/kexec.h +pub const KEXEC_ON_CRASH: c_int = 0x00000001; +pub const KEXEC_PRESERVE_CONTEXT: c_int = 0x00000002; +pub const KEXEC_ARCH_MASK: c_int = 0xffff0000; +pub const KEXEC_FILE_UNLOAD: c_int = 0x00000001; +pub const KEXEC_FILE_ON_CRASH: c_int = 0x00000002; +pub const KEXEC_FILE_NO_INITRAMFS: c_int = 0x00000004; + +pub const LINUX_REBOOT_MAGIC1: c_int = 0xfee1dead; +pub const LINUX_REBOOT_MAGIC2: c_int = 672274793; +pub const LINUX_REBOOT_MAGIC2A: c_int = 85072278; +pub const LINUX_REBOOT_MAGIC2B: c_int = 369367448; +pub const LINUX_REBOOT_MAGIC2C: c_int = 537993216; + +pub const LINUX_REBOOT_CMD_RESTART: c_int = 0x01234567; +pub const LINUX_REBOOT_CMD_HALT: c_int = 0xCDEF0123; +pub const LINUX_REBOOT_CMD_CAD_ON: c_int = 0x89ABCDEF; +pub const LINUX_REBOOT_CMD_CAD_OFF: c_int = 0x00000000; +pub const LINUX_REBOOT_CMD_POWER_OFF: c_int = 0x4321FEDC; +pub const LINUX_REBOOT_CMD_RESTART2: c_int = 0xA1B2C3D4; +pub const LINUX_REBOOT_CMD_SW_SUSPEND: c_int = 0xD000FCE2; +pub const LINUX_REBOOT_CMD_KEXEC: c_int = 0x45584543; + +pub const REG_BASIC: c_int = 0; +pub const REG_EXTENDED: c_int = 1; +pub const REG_ICASE: c_int = 2; +pub const REG_NOSUB: c_int = 4; +pub const REG_NEWLINE: c_int = 8; +pub const REG_NOSPEC: c_int = 16; +pub const REG_PEND: c_int = 32; +pub const REG_DUMP: c_int = 128; + +pub const REG_NOMATCH: c_int = 1; +pub const REG_BADPAT: c_int = 2; +pub const REG_ECOLLATE: c_int = 3; +pub const REG_ECTYPE: c_int = 4; +pub const REG_EESCAPE: c_int = 5; +pub const REG_ESUBREG: c_int = 6; +pub const REG_EBRACK: c_int = 7; +pub const REG_EPAREN: c_int = 8; +pub const REG_EBRACE: c_int = 9; +pub const REG_BADBR: c_int = 10; +pub const REG_ERANGE: c_int = 11; +pub const REG_ESPACE: c_int = 12; +pub const REG_BADRPT: c_int = 13; +pub const REG_EMPTY: c_int = 14; +pub const REG_ASSERT: c_int = 15; +pub const REG_INVARG: c_int = 16; +pub const REG_ATOI: c_int = 255; +pub const REG_ITOA: c_int = 256; + +pub const REG_NOTBOL: c_int = 1; +pub const REG_NOTEOL: c_int = 2; +pub const REG_STARTEND: c_int = 4; +pub const REG_TRACE: c_int = 256; +pub const REG_LARGE: c_int = 512; +pub const REG_BACKR: c_int = 1024; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; + +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: crate::tcflag_t = 0x00000800; +pub const TAB2: crate::tcflag_t = 0x00001000; +pub const TAB3: crate::tcflag_t = 0x00001800; +pub const CR1: crate::tcflag_t = 0x00000200; +pub const CR2: crate::tcflag_t = 0x00000400; +pub const CR3: crate::tcflag_t = 0x00000600; +pub const FF1: crate::tcflag_t = 0x00008000; +pub const BS1: crate::tcflag_t = 0x00002000; +pub const VT1: crate::tcflag_t = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const BOTHER: crate::speed_t = 0o010000; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; +pub const IBSHIFT: crate::tcflag_t = 16; + +pub const BLKIOMIN: c_int = 0x1278; +pub const BLKIOOPT: c_int = 0x1279; +pub const BLKSSZGET: c_int = 0x1268; +pub const BLKPBSZGET: c_int = 0x127B; + +pub const EAI_AGAIN: c_int = 2; +pub const EAI_BADFLAGS: c_int = 3; +pub const EAI_FAIL: c_int = 4; +pub const EAI_FAMILY: c_int = 5; +pub const EAI_MEMORY: c_int = 6; +pub const EAI_NODATA: c_int = 7; +pub const EAI_NONAME: c_int = 8; +pub const EAI_SERVICE: c_int = 9; +pub const EAI_SOCKTYPE: c_int = 10; +pub const EAI_SYSTEM: c_int = 11; +pub const EAI_OVERFLOW: c_int = 14; + +pub const NETLINK_ROUTE: c_int = 0; +pub const NETLINK_UNUSED: c_int = 1; +pub const NETLINK_USERSOCK: c_int = 2; +pub const NETLINK_FIREWALL: c_int = 3; +pub const NETLINK_SOCK_DIAG: c_int = 4; +pub const NETLINK_NFLOG: c_int = 5; +pub const NETLINK_XFRM: c_int = 6; +pub const NETLINK_SELINUX: c_int = 7; +pub const NETLINK_ISCSI: c_int = 8; +pub const NETLINK_AUDIT: c_int = 9; +pub const NETLINK_FIB_LOOKUP: c_int = 10; +pub const NETLINK_CONNECTOR: c_int = 11; +pub const NETLINK_NETFILTER: c_int = 12; +pub const NETLINK_IP6_FW: c_int = 13; +pub const NETLINK_DNRTMSG: c_int = 14; +pub const NETLINK_KOBJECT_UEVENT: c_int = 15; +pub const NETLINK_GENERIC: c_int = 16; +pub const NETLINK_SCSITRANSPORT: c_int = 18; +pub const NETLINK_ECRYPTFS: c_int = 19; +pub const NETLINK_RDMA: c_int = 20; +pub const NETLINK_CRYPTO: c_int = 21; +pub const NETLINK_INET_DIAG: c_int = NETLINK_SOCK_DIAG; + +pub const MAX_LINKS: c_int = 32; + +pub const NLM_F_REQUEST: c_int = 1; +pub const NLM_F_MULTI: c_int = 2; +pub const NLM_F_ACK: c_int = 4; +pub const NLM_F_ECHO: c_int = 8; +pub const NLM_F_DUMP_INTR: c_int = 16; +pub const NLM_F_DUMP_FILTERED: c_int = 32; + +pub const NLM_F_ROOT: c_int = 0x100; +pub const NLM_F_MATCH: c_int = 0x200; +pub const NLM_F_ATOMIC: c_int = 0x400; +pub const NLM_F_DUMP: c_int = NLM_F_ROOT | NLM_F_MATCH; + +pub const NLM_F_REPLACE: c_int = 0x100; +pub const NLM_F_EXCL: c_int = 0x200; +pub const NLM_F_CREATE: c_int = 0x400; +pub const NLM_F_APPEND: c_int = 0x800; + +pub const NLM_F_NONREC: c_int = 0x100; +pub const NLM_F_BULK: c_int = 0x200; + +pub const NLM_F_CAPPED: c_int = 0x100; +pub const NLM_F_ACK_TLVS: c_int = 0x200; + +pub const NLMSG_NOOP: c_int = 0x1; +pub const NLMSG_ERROR: c_int = 0x2; +pub const NLMSG_DONE: c_int = 0x3; +pub const NLMSG_OVERRUN: c_int = 0x4; +pub const NLMSG_MIN_TYPE: c_int = 0x10; + +// linux/netfilter/nfnetlink.h +pub const NFNLGRP_NONE: c_int = 0; +pub const NFNLGRP_CONNTRACK_NEW: c_int = 1; +pub const NFNLGRP_CONNTRACK_UPDATE: c_int = 2; +pub const NFNLGRP_CONNTRACK_DESTROY: c_int = 3; +pub const NFNLGRP_CONNTRACK_EXP_NEW: c_int = 4; +pub const NFNLGRP_CONNTRACK_EXP_UPDATE: c_int = 5; +pub const NFNLGRP_CONNTRACK_EXP_DESTROY: c_int = 6; +pub const NFNLGRP_NFTABLES: c_int = 7; +pub const NFNLGRP_ACCT_QUOTA: c_int = 8; + +pub const NFNETLINK_V0: c_int = 0; + +pub const NFNL_SUBSYS_NONE: c_int = 0; +pub const NFNL_SUBSYS_CTNETLINK: c_int = 1; +pub const NFNL_SUBSYS_CTNETLINK_EXP: c_int = 2; +pub const NFNL_SUBSYS_QUEUE: c_int = 3; +pub const NFNL_SUBSYS_ULOG: c_int = 4; +pub const NFNL_SUBSYS_OSF: c_int = 5; +pub const NFNL_SUBSYS_IPSET: c_int = 6; +pub const NFNL_SUBSYS_ACCT: c_int = 7; +pub const NFNL_SUBSYS_CTNETLINK_TIMEOUT: c_int = 8; +pub const NFNL_SUBSYS_CTHELPER: c_int = 9; +pub const NFNL_SUBSYS_NFTABLES: c_int = 10; +pub const NFNL_SUBSYS_NFT_COMPAT: c_int = 11; +pub const NFNL_SUBSYS_COUNT: c_int = 12; + +pub const NFNL_MSG_BATCH_BEGIN: c_int = NLMSG_MIN_TYPE; +pub const NFNL_MSG_BATCH_END: c_int = NLMSG_MIN_TYPE + 1; + +// linux/netfilter/nfnetlink_log.h +pub const NFULNL_MSG_PACKET: c_int = 0; +pub const NFULNL_MSG_CONFIG: c_int = 1; + +pub const NFULA_UNSPEC: c_int = 0; +pub const NFULA_PACKET_HDR: c_int = 1; +pub const NFULA_MARK: c_int = 2; +pub const NFULA_TIMESTAMP: c_int = 3; +pub const NFULA_IFINDEX_INDEV: c_int = 4; +pub const NFULA_IFINDEX_OUTDEV: c_int = 5; +pub const NFULA_IFINDEX_PHYSINDEV: c_int = 6; +pub const NFULA_IFINDEX_PHYSOUTDEV: c_int = 7; +pub const NFULA_HWADDR: c_int = 8; +pub const NFULA_PAYLOAD: c_int = 9; +pub const NFULA_PREFIX: c_int = 10; +pub const NFULA_UID: c_int = 11; +pub const NFULA_SEQ: c_int = 12; +pub const NFULA_SEQ_GLOBAL: c_int = 13; +pub const NFULA_GID: c_int = 14; +pub const NFULA_HWTYPE: c_int = 15; +pub const NFULA_HWHEADER: c_int = 16; +pub const NFULA_HWLEN: c_int = 17; +pub const NFULA_CT: c_int = 18; +pub const NFULA_CT_INFO: c_int = 19; + +pub const NFULNL_CFG_CMD_NONE: c_int = 0; +pub const NFULNL_CFG_CMD_BIND: c_int = 1; +pub const NFULNL_CFG_CMD_UNBIND: c_int = 2; +pub const NFULNL_CFG_CMD_PF_BIND: c_int = 3; +pub const NFULNL_CFG_CMD_PF_UNBIND: c_int = 4; + +pub const NFULA_CFG_UNSPEC: c_int = 0; +pub const NFULA_CFG_CMD: c_int = 1; +pub const NFULA_CFG_MODE: c_int = 2; +pub const NFULA_CFG_NLBUFSIZ: c_int = 3; +pub const NFULA_CFG_TIMEOUT: c_int = 4; +pub const NFULA_CFG_QTHRESH: c_int = 5; +pub const NFULA_CFG_FLAGS: c_int = 6; + +pub const NFULNL_COPY_NONE: c_int = 0x00; +pub const NFULNL_COPY_META: c_int = 0x01; +pub const NFULNL_COPY_PACKET: c_int = 0x02; + +pub const NFULNL_CFG_F_SEQ: c_int = 0x0001; +pub const NFULNL_CFG_F_SEQ_GLOBAL: c_int = 0x0002; +pub const NFULNL_CFG_F_CONNTRACK: c_int = 0x0004; + +// linux/netfilter/nfnetlink_log.h +pub const NFQNL_MSG_PACKET: c_int = 0; +pub const NFQNL_MSG_VERDICT: c_int = 1; +pub const NFQNL_MSG_CONFIG: c_int = 2; +pub const NFQNL_MSG_VERDICT_BATCH: c_int = 3; + +pub const NFQA_UNSPEC: c_int = 0; +pub const NFQA_PACKET_HDR: c_int = 1; +pub const NFQA_VERDICT_HDR: c_int = 2; +pub const NFQA_MARK: c_int = 3; +pub const NFQA_TIMESTAMP: c_int = 4; +pub const NFQA_IFINDEX_INDEV: c_int = 5; +pub const NFQA_IFINDEX_OUTDEV: c_int = 6; +pub const NFQA_IFINDEX_PHYSINDEV: c_int = 7; +pub const NFQA_IFINDEX_PHYSOUTDEV: c_int = 8; +pub const NFQA_HWADDR: c_int = 9; +pub const NFQA_PAYLOAD: c_int = 10; +pub const NFQA_CT: c_int = 11; +pub const NFQA_CT_INFO: c_int = 12; +pub const NFQA_CAP_LEN: c_int = 13; +pub const NFQA_SKB_INFO: c_int = 14; +pub const NFQA_EXP: c_int = 15; +pub const NFQA_UID: c_int = 16; +pub const NFQA_GID: c_int = 17; +pub const NFQA_SECCTX: c_int = 18; +/* + FIXME: These are not yet available in musl sanitized kernel headers and + make the tests fail. Enable them once musl has them. + + See https://github.com/rust-lang/libc/pull/1628 for more details. +pub const NFQA_VLAN: c_int = 19; +pub const NFQA_L2HDR: c_int = 20; + +pub const NFQA_VLAN_UNSPEC: c_int = 0; +pub const NFQA_VLAN_PROTO: c_int = 1; +pub const NFQA_VLAN_TCI: c_int = 2; +*/ + +pub const NFQNL_CFG_CMD_NONE: c_int = 0; +pub const NFQNL_CFG_CMD_BIND: c_int = 1; +pub const NFQNL_CFG_CMD_UNBIND: c_int = 2; +pub const NFQNL_CFG_CMD_PF_BIND: c_int = 3; +pub const NFQNL_CFG_CMD_PF_UNBIND: c_int = 4; + +pub const NFQNL_COPY_NONE: c_int = 0; +pub const NFQNL_COPY_META: c_int = 1; +pub const NFQNL_COPY_PACKET: c_int = 2; + +pub const NFQA_CFG_UNSPEC: c_int = 0; +pub const NFQA_CFG_CMD: c_int = 1; +pub const NFQA_CFG_PARAMS: c_int = 2; +pub const NFQA_CFG_QUEUE_MAXLEN: c_int = 3; +pub const NFQA_CFG_MASK: c_int = 4; +pub const NFQA_CFG_FLAGS: c_int = 5; + +pub const NFQA_CFG_F_FAIL_OPEN: c_int = 0x0001; +pub const NFQA_CFG_F_CONNTRACK: c_int = 0x0002; +pub const NFQA_CFG_F_GSO: c_int = 0x0004; +pub const NFQA_CFG_F_UID_GID: c_int = 0x0008; +pub const NFQA_CFG_F_SECCTX: c_int = 0x0010; +pub const NFQA_CFG_F_MAX: c_int = 0x0020; + +pub const NFQA_SKB_CSUMNOTREADY: c_int = 0x0001; +pub const NFQA_SKB_GSO: c_int = 0x0002; +pub const NFQA_SKB_CSUM_NOTVERIFIED: c_int = 0x0004; + +pub const GENL_NAMSIZ: c_int = 16; + +pub const GENL_MIN_ID: c_int = NLMSG_MIN_TYPE; +pub const GENL_MAX_ID: c_int = 1023; + +pub const GENL_ADMIN_PERM: c_int = 0x01; +pub const GENL_CMD_CAP_DO: c_int = 0x02; +pub const GENL_CMD_CAP_DUMP: c_int = 0x04; +pub const GENL_CMD_CAP_HASPOL: c_int = 0x08; +pub const GENL_UNS_ADMIN_PERM: c_int = 0x10; + +pub const GENL_ID_CTRL: c_int = NLMSG_MIN_TYPE; +pub const GENL_ID_VFS_DQUOT: c_int = NLMSG_MIN_TYPE + 1; +pub const GENL_ID_PMCRAID: c_int = NLMSG_MIN_TYPE + 2; + +pub const CTRL_CMD_UNSPEC: c_int = 0; +pub const CTRL_CMD_NEWFAMILY: c_int = 1; +pub const CTRL_CMD_DELFAMILY: c_int = 2; +pub const CTRL_CMD_GETFAMILY: c_int = 3; +pub const CTRL_CMD_NEWOPS: c_int = 4; +pub const CTRL_CMD_DELOPS: c_int = 5; +pub const CTRL_CMD_GETOPS: c_int = 6; +pub const CTRL_CMD_NEWMCAST_GRP: c_int = 7; +pub const CTRL_CMD_DELMCAST_GRP: c_int = 8; +pub const CTRL_CMD_GETMCAST_GRP: c_int = 9; + +pub const CTRL_ATTR_UNSPEC: c_int = 0; +pub const CTRL_ATTR_FAMILY_ID: c_int = 1; +pub const CTRL_ATTR_FAMILY_NAME: c_int = 2; +pub const CTRL_ATTR_VERSION: c_int = 3; +pub const CTRL_ATTR_HDRSIZE: c_int = 4; +pub const CTRL_ATTR_MAXATTR: c_int = 5; +pub const CTRL_ATTR_OPS: c_int = 6; +pub const CTRL_ATTR_MCAST_GROUPS: c_int = 7; + +pub const CTRL_ATTR_OP_UNSPEC: c_int = 0; +pub const CTRL_ATTR_OP_ID: c_int = 1; +pub const CTRL_ATTR_OP_FLAGS: c_int = 2; + +pub const CTRL_ATTR_MCAST_GRP_UNSPEC: c_int = 0; +pub const CTRL_ATTR_MCAST_GRP_NAME: c_int = 1; +pub const CTRL_ATTR_MCAST_GRP_ID: c_int = 2; + +pub const NETLINK_ADD_MEMBERSHIP: c_int = 1; +pub const NETLINK_DROP_MEMBERSHIP: c_int = 2; +pub const NETLINK_PKTINFO: c_int = 3; +pub const NETLINK_BROADCAST_ERROR: c_int = 4; +pub const NETLINK_NO_ENOBUFS: c_int = 5; +pub const NETLINK_RX_RING: c_int = 6; +pub const NETLINK_TX_RING: c_int = 7; +pub const NETLINK_LISTEN_ALL_NSID: c_int = 8; +pub const NETLINK_LIST_MEMBERSHIPS: c_int = 9; +pub const NETLINK_CAP_ACK: c_int = 10; +pub const NETLINK_EXT_ACK: c_int = 11; +pub const NETLINK_GET_STRICT_CHK: c_int = 12; + +pub const GRND_NONBLOCK: c_uint = 0x0001; +pub const GRND_RANDOM: c_uint = 0x0002; +pub const GRND_INSECURE: c_uint = 0x0004; + +// +pub const SECCOMP_MODE_DISABLED: c_uint = 0; +pub const SECCOMP_MODE_STRICT: c_uint = 1; +pub const SECCOMP_MODE_FILTER: c_uint = 2; + +pub const SECCOMP_SET_MODE_STRICT: c_uint = 0; +pub const SECCOMP_SET_MODE_FILTER: c_uint = 1; +pub const SECCOMP_GET_ACTION_AVAIL: c_uint = 2; +pub const SECCOMP_GET_NOTIF_SIZES: c_uint = 3; + +pub const SECCOMP_FILTER_FLAG_TSYNC: c_ulong = 1 << 0; +pub const SECCOMP_FILTER_FLAG_LOG: c_ulong = 1 << 1; +pub const SECCOMP_FILTER_FLAG_SPEC_ALLOW: c_ulong = 1 << 2; +pub const SECCOMP_FILTER_FLAG_NEW_LISTENER: c_ulong = 1 << 3; +pub const SECCOMP_FILTER_FLAG_TSYNC_ESRCH: c_ulong = 1 << 4; +pub const SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV: c_ulong = 1 << 5; + +pub const SECCOMP_RET_KILL_PROCESS: c_uint = 0x80000000; +pub const SECCOMP_RET_KILL_THREAD: c_uint = 0x00000000; +pub const SECCOMP_RET_KILL: c_uint = SECCOMP_RET_KILL_THREAD; +pub const SECCOMP_RET_TRAP: c_uint = 0x00030000; +pub const SECCOMP_RET_ERRNO: c_uint = 0x00050000; +pub const SECCOMP_RET_USER_NOTIF: c_uint = 0x7fc00000; +pub const SECCOMP_RET_TRACE: c_uint = 0x7ff00000; +pub const SECCOMP_RET_LOG: c_uint = 0x7ffc0000; +pub const SECCOMP_RET_ALLOW: c_uint = 0x7fff0000; + +pub const SECCOMP_RET_ACTION_FULL: c_uint = 0xffff0000; +pub const SECCOMP_RET_ACTION: c_uint = 0x7fff0000; +pub const SECCOMP_RET_DATA: c_uint = 0x0000ffff; + +pub const SECCOMP_USER_NOTIF_FLAG_CONTINUE: c_ulong = 1; + +pub const SECCOMP_ADDFD_FLAG_SETFD: c_ulong = 1; +pub const SECCOMP_ADDFD_FLAG_SEND: c_ulong = 2; + +pub const NLA_F_NESTED: c_int = 1 << 15; +pub const NLA_F_NET_BYTEORDER: c_int = 1 << 14; +pub const NLA_TYPE_MASK: c_int = !(NLA_F_NESTED | NLA_F_NET_BYTEORDER); + +pub const NLA_ALIGNTO: c_int = 4; + +pub const SIGEV_THREAD_ID: c_int = 4; + +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; + +pub const TIOCM_LE: c_int = 0x001; +pub const TIOCM_DTR: c_int = 0x002; +pub const TIOCM_RTS: c_int = 0x004; +pub const TIOCM_ST: c_int = 0x008; +pub const TIOCM_SR: c_int = 0x010; +pub const TIOCM_CTS: c_int = 0x020; +pub const TIOCM_CAR: c_int = 0x040; +pub const TIOCM_RNG: c_int = 0x080; +pub const TIOCM_DSR: c_int = 0x100; +pub const TIOCM_CD: c_int = TIOCM_CAR; +pub const TIOCM_RI: c_int = TIOCM_RNG; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const SFD_CLOEXEC: c_int = O_CLOEXEC; +pub const SFD_NONBLOCK: c_int = O_NONBLOCK; + +pub const SOCK_NONBLOCK: c_int = O_NONBLOCK; + +pub const SO_ORIGINAL_DST: c_int = 80; + +pub const IP_RECVFRAGSIZE: c_int = 25; + +pub const IPV6_FLOWINFO: c_int = 11; +pub const IPV6_MULTICAST_ALL: c_int = 29; +pub const IPV6_ROUTER_ALERT_ISOLATE: c_int = 30; +pub const IPV6_FLOWLABEL_MGR: c_int = 32; +pub const IPV6_FLOWINFO_SEND: c_int = 33; +pub const IPV6_RECVFRAGSIZE: c_int = 77; +pub const IPV6_FREEBIND: c_int = 78; +pub const IPV6_FLOWINFO_FLOWLABEL: c_int = 0x000fffff; +pub const IPV6_FLOWINFO_PRIORITY: c_int = 0x0ff00000; + +pub const IUTF8: crate::tcflag_t = 0x00004000; +pub const CMSPAR: crate::tcflag_t = 0o10000000000; +pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; + +pub const MFD_CLOEXEC: c_uint = 0x0001; +pub const MFD_ALLOW_SEALING: c_uint = 0x0002; +pub const MFD_HUGETLB: c_uint = 0x0004; +pub const MFD_NOEXEC_SEAL: c_uint = 0x0008; +pub const MFD_EXEC: c_uint = 0x0010; +pub const MFD_HUGE_64KB: c_uint = 0x40000000; +pub const MFD_HUGE_512KB: c_uint = 0x4c000000; +pub const MFD_HUGE_1MB: c_uint = 0x50000000; +pub const MFD_HUGE_2MB: c_uint = 0x54000000; +pub const MFD_HUGE_8MB: c_uint = 0x5c000000; +pub const MFD_HUGE_16MB: c_uint = 0x60000000; +pub const MFD_HUGE_32MB: c_uint = 0x64000000; +pub const MFD_HUGE_256MB: c_uint = 0x70000000; +pub const MFD_HUGE_512MB: c_uint = 0x74000000; +pub const MFD_HUGE_1GB: c_uint = 0x78000000; +pub const MFD_HUGE_2GB: c_uint = 0x7c000000; +pub const MFD_HUGE_16GB: c_uint = 0x88000000; +pub const MFD_HUGE_MASK: c_uint = 63; +pub const MFD_HUGE_SHIFT: c_uint = 26; + +// these are used in the p_type field of Elf32_Phdr and Elf64_Phdr, which has +// the type Elf32Word and Elf64Word respectively. Luckily, both of those are u32 +// so we can use that type here to avoid having to cast. +pub const PT_NULL: u32 = 0; +pub const PT_LOAD: u32 = 1; +pub const PT_DYNAMIC: u32 = 2; +pub const PT_INTERP: u32 = 3; +pub const PT_NOTE: u32 = 4; +pub const PT_SHLIB: u32 = 5; +pub const PT_PHDR: u32 = 6; +pub const PT_TLS: u32 = 7; +pub const PT_LOOS: u32 = 0x60000000; +pub const PT_GNU_EH_FRAME: u32 = 0x6474e550; +pub const PT_GNU_STACK: u32 = 0x6474e551; +pub const PT_GNU_RELRO: u32 = 0x6474e552; +pub const PT_HIOS: u32 = 0x6fffffff; +pub const PT_LOPROC: u32 = 0x70000000; +pub const PT_HIPROC: u32 = 0x7fffffff; + +// uapi/linux/mount.h +pub const OPEN_TREE_CLONE: c_uint = 0x01; +pub const OPEN_TREE_CLOEXEC: c_uint = O_CLOEXEC as c_uint; + +// linux/netfilter.h +pub const NF_DROP: c_int = 0; +pub const NF_ACCEPT: c_int = 1; +pub const NF_STOLEN: c_int = 2; +pub const NF_QUEUE: c_int = 3; +pub const NF_REPEAT: c_int = 4; +pub const NF_STOP: c_int = 5; +pub const NF_MAX_VERDICT: c_int = NF_STOP; + +pub const NF_VERDICT_MASK: c_int = 0x000000ff; +pub const NF_VERDICT_FLAG_QUEUE_BYPASS: c_int = 0x00008000; + +pub const NF_VERDICT_QMASK: c_int = 0xffff0000; +pub const NF_VERDICT_QBITS: c_int = 16; + +pub const NF_VERDICT_BITS: c_int = 16; + +pub const NF_INET_PRE_ROUTING: c_int = 0; +pub const NF_INET_LOCAL_IN: c_int = 1; +pub const NF_INET_FORWARD: c_int = 2; +pub const NF_INET_LOCAL_OUT: c_int = 3; +pub const NF_INET_POST_ROUTING: c_int = 4; +pub const NF_INET_NUMHOOKS: c_int = 5; +pub const NF_INET_INGRESS: c_int = NF_INET_NUMHOOKS; + +pub const NF_NETDEV_INGRESS: c_int = 0; +pub const NF_NETDEV_EGRESS: c_int = 1; +pub const NF_NETDEV_NUMHOOKS: c_int = 2; + +pub const NFPROTO_UNSPEC: c_int = 0; +pub const NFPROTO_INET: c_int = 1; +pub const NFPROTO_IPV4: c_int = 2; +pub const NFPROTO_ARP: c_int = 3; +pub const NFPROTO_NETDEV: c_int = 5; +pub const NFPROTO_BRIDGE: c_int = 7; +pub const NFPROTO_IPV6: c_int = 10; +pub const NFPROTO_DECNET: c_int = 12; +pub const NFPROTO_NUMPROTO: c_int = 13; + +// linux/netfilter_arp.h +pub const NF_ARP: c_int = 0; +pub const NF_ARP_IN: c_int = 0; +pub const NF_ARP_OUT: c_int = 1; +pub const NF_ARP_FORWARD: c_int = 2; +pub const NF_ARP_NUMHOOKS: c_int = 3; + +// linux/netfilter_bridge.h +pub const NF_BR_PRE_ROUTING: c_int = 0; +pub const NF_BR_LOCAL_IN: c_int = 1; +pub const NF_BR_FORWARD: c_int = 2; +pub const NF_BR_LOCAL_OUT: c_int = 3; +pub const NF_BR_POST_ROUTING: c_int = 4; +pub const NF_BR_BROUTING: c_int = 5; +pub const NF_BR_NUMHOOKS: c_int = 6; + +pub const NF_BR_PRI_FIRST: c_int = crate::INT_MIN; +pub const NF_BR_PRI_NAT_DST_BRIDGED: c_int = -300; +pub const NF_BR_PRI_FILTER_BRIDGED: c_int = -200; +pub const NF_BR_PRI_BRNF: c_int = 0; +pub const NF_BR_PRI_NAT_DST_OTHER: c_int = 100; +pub const NF_BR_PRI_FILTER_OTHER: c_int = 200; +pub const NF_BR_PRI_NAT_SRC: c_int = 300; +pub const NF_BR_PRI_LAST: c_int = crate::INT_MAX; + +// linux/netfilter_ipv4.h +pub const NF_IP_PRE_ROUTING: c_int = 0; +pub const NF_IP_LOCAL_IN: c_int = 1; +pub const NF_IP_FORWARD: c_int = 2; +pub const NF_IP_LOCAL_OUT: c_int = 3; +pub const NF_IP_POST_ROUTING: c_int = 4; +pub const NF_IP_NUMHOOKS: c_int = 5; + +pub const NF_IP_PRI_FIRST: c_int = crate::INT_MIN; +pub const NF_IP_PRI_RAW_BEFORE_DEFRAG: c_int = -450; +pub const NF_IP_PRI_CONNTRACK_DEFRAG: c_int = -400; +pub const NF_IP_PRI_RAW: c_int = -300; +pub const NF_IP_PRI_SELINUX_FIRST: c_int = -225; +pub const NF_IP_PRI_CONNTRACK: c_int = -200; +pub const NF_IP_PRI_MANGLE: c_int = -150; +pub const NF_IP_PRI_NAT_DST: c_int = -100; +pub const NF_IP_PRI_FILTER: c_int = 0; +pub const NF_IP_PRI_SECURITY: c_int = 50; +pub const NF_IP_PRI_NAT_SRC: c_int = 100; +pub const NF_IP_PRI_SELINUX_LAST: c_int = 225; +pub const NF_IP_PRI_CONNTRACK_HELPER: c_int = 300; +pub const NF_IP_PRI_CONNTRACK_CONFIRM: c_int = crate::INT_MAX; +pub const NF_IP_PRI_LAST: c_int = crate::INT_MAX; + +// linux/netfilter_ipv6.h +pub const NF_IP6_PRE_ROUTING: c_int = 0; +pub const NF_IP6_LOCAL_IN: c_int = 1; +pub const NF_IP6_FORWARD: c_int = 2; +pub const NF_IP6_LOCAL_OUT: c_int = 3; +pub const NF_IP6_POST_ROUTING: c_int = 4; +pub const NF_IP6_NUMHOOKS: c_int = 5; + +pub const NF_IP6_PRI_FIRST: c_int = crate::INT_MIN; +pub const NF_IP6_PRI_RAW_BEFORE_DEFRAG: c_int = -450; +pub const NF_IP6_PRI_CONNTRACK_DEFRAG: c_int = -400; +pub const NF_IP6_PRI_RAW: c_int = -300; +pub const NF_IP6_PRI_SELINUX_FIRST: c_int = -225; +pub const NF_IP6_PRI_CONNTRACK: c_int = -200; +pub const NF_IP6_PRI_MANGLE: c_int = -150; +pub const NF_IP6_PRI_NAT_DST: c_int = -100; +pub const NF_IP6_PRI_FILTER: c_int = 0; +pub const NF_IP6_PRI_SECURITY: c_int = 50; +pub const NF_IP6_PRI_NAT_SRC: c_int = 100; +pub const NF_IP6_PRI_SELINUX_LAST: c_int = 225; +pub const NF_IP6_PRI_CONNTRACK_HELPER: c_int = 300; +pub const NF_IP6_PRI_LAST: c_int = crate::INT_MAX; + +// linux/netfilter_ipv6/ip6_tables.h +pub const IP6T_SO_ORIGINAL_DST: c_int = 80; + +// linux/netfilter/nf_tables.h +pub const NFT_TABLE_MAXNAMELEN: c_int = 256; +pub const NFT_CHAIN_MAXNAMELEN: c_int = 256; +pub const NFT_SET_MAXNAMELEN: c_int = 256; +pub const NFT_OBJ_MAXNAMELEN: c_int = 256; +pub const NFT_USERDATA_MAXLEN: c_int = 256; + +pub const NFT_REG_VERDICT: c_int = 0; +pub const NFT_REG_1: c_int = 1; +pub const NFT_REG_2: c_int = 2; +pub const NFT_REG_3: c_int = 3; +pub const NFT_REG_4: c_int = 4; +pub const __NFT_REG_MAX: c_int = 5; +pub const NFT_REG32_00: c_int = 8; +pub const NFT_REG32_01: c_int = 9; +pub const NFT_REG32_02: c_int = 10; +pub const NFT_REG32_03: c_int = 11; +pub const NFT_REG32_04: c_int = 12; +pub const NFT_REG32_05: c_int = 13; +pub const NFT_REG32_06: c_int = 14; +pub const NFT_REG32_07: c_int = 15; +pub const NFT_REG32_08: c_int = 16; +pub const NFT_REG32_09: c_int = 17; +pub const NFT_REG32_10: c_int = 18; +pub const NFT_REG32_11: c_int = 19; +pub const NFT_REG32_12: c_int = 20; +pub const NFT_REG32_13: c_int = 21; +pub const NFT_REG32_14: c_int = 22; +pub const NFT_REG32_15: c_int = 23; + +pub const NFT_REG_SIZE: c_int = 16; +pub const NFT_REG32_SIZE: c_int = 4; + +pub const NFT_CONTINUE: c_int = -1; +pub const NFT_BREAK: c_int = -2; +pub const NFT_JUMP: c_int = -3; +pub const NFT_GOTO: c_int = -4; +pub const NFT_RETURN: c_int = -5; + +pub const NFT_MSG_NEWTABLE: c_int = 0; +pub const NFT_MSG_GETTABLE: c_int = 1; +pub const NFT_MSG_DELTABLE: c_int = 2; +pub const NFT_MSG_NEWCHAIN: c_int = 3; +pub const NFT_MSG_GETCHAIN: c_int = 4; +pub const NFT_MSG_DELCHAIN: c_int = 5; +pub const NFT_MSG_NEWRULE: c_int = 6; +pub const NFT_MSG_GETRULE: c_int = 7; +pub const NFT_MSG_DELRULE: c_int = 8; +pub const NFT_MSG_NEWSET: c_int = 9; +pub const NFT_MSG_GETSET: c_int = 10; +pub const NFT_MSG_DELSET: c_int = 11; +pub const NFT_MSG_NEWSETELEM: c_int = 12; +pub const NFT_MSG_GETSETELEM: c_int = 13; +pub const NFT_MSG_DELSETELEM: c_int = 14; +pub const NFT_MSG_NEWGEN: c_int = 15; +pub const NFT_MSG_GETGEN: c_int = 16; +pub const NFT_MSG_TRACE: c_int = 17; +pub const NFT_MSG_NEWOBJ: c_int = 18; +pub const NFT_MSG_GETOBJ: c_int = 19; +pub const NFT_MSG_DELOBJ: c_int = 20; +pub const NFT_MSG_GETOBJ_RESET: c_int = 21; +pub const NFT_MSG_MAX: c_int = 25; + +pub const NFT_SET_ANONYMOUS: c_int = 0x1; +pub const NFT_SET_CONSTANT: c_int = 0x2; +pub const NFT_SET_INTERVAL: c_int = 0x4; +pub const NFT_SET_MAP: c_int = 0x8; +pub const NFT_SET_TIMEOUT: c_int = 0x10; +pub const NFT_SET_EVAL: c_int = 0x20; + +pub const NFT_SET_POL_PERFORMANCE: c_int = 0; +pub const NFT_SET_POL_MEMORY: c_int = 1; + +pub const NFT_SET_ELEM_INTERVAL_END: c_int = 0x1; + +pub const NFT_DATA_VALUE: c_uint = 0; +pub const NFT_DATA_VERDICT: c_uint = 0xffffff00; + +pub const NFT_DATA_RESERVED_MASK: c_uint = 0xffffff00; + +pub const NFT_DATA_VALUE_MAXLEN: c_int = 64; + +pub const NFT_BYTEORDER_NTOH: c_int = 0; +pub const NFT_BYTEORDER_HTON: c_int = 1; + +pub const NFT_CMP_EQ: c_int = 0; +pub const NFT_CMP_NEQ: c_int = 1; +pub const NFT_CMP_LT: c_int = 2; +pub const NFT_CMP_LTE: c_int = 3; +pub const NFT_CMP_GT: c_int = 4; +pub const NFT_CMP_GTE: c_int = 5; + +pub const NFT_RANGE_EQ: c_int = 0; +pub const NFT_RANGE_NEQ: c_int = 1; + +pub const NFT_LOOKUP_F_INV: c_int = 1 << 0; + +pub const NFT_DYNSET_OP_ADD: c_int = 0; +pub const NFT_DYNSET_OP_UPDATE: c_int = 1; + +pub const NFT_DYNSET_F_INV: c_int = 1 << 0; + +pub const NFT_PAYLOAD_LL_HEADER: c_int = 0; +pub const NFT_PAYLOAD_NETWORK_HEADER: c_int = 1; +pub const NFT_PAYLOAD_TRANSPORT_HEADER: c_int = 2; + +pub const NFT_PAYLOAD_CSUM_NONE: c_int = 0; +pub const NFT_PAYLOAD_CSUM_INET: c_int = 1; + +pub const NFT_META_LEN: c_int = 0; +pub const NFT_META_PROTOCOL: c_int = 1; +pub const NFT_META_PRIORITY: c_int = 2; +pub const NFT_META_MARK: c_int = 3; +pub const NFT_META_IIF: c_int = 4; +pub const NFT_META_OIF: c_int = 5; +pub const NFT_META_IIFNAME: c_int = 6; +pub const NFT_META_OIFNAME: c_int = 7; +pub const NFT_META_IIFTYPE: c_int = 8; +pub const NFT_META_OIFTYPE: c_int = 9; +pub const NFT_META_SKUID: c_int = 10; +pub const NFT_META_SKGID: c_int = 11; +pub const NFT_META_NFTRACE: c_int = 12; +pub const NFT_META_RTCLASSID: c_int = 13; +pub const NFT_META_SECMARK: c_int = 14; +pub const NFT_META_NFPROTO: c_int = 15; +pub const NFT_META_L4PROTO: c_int = 16; +pub const NFT_META_BRI_IIFNAME: c_int = 17; +pub const NFT_META_BRI_OIFNAME: c_int = 18; +pub const NFT_META_PKTTYPE: c_int = 19; +pub const NFT_META_CPU: c_int = 20; +pub const NFT_META_IIFGROUP: c_int = 21; +pub const NFT_META_OIFGROUP: c_int = 22; +pub const NFT_META_CGROUP: c_int = 23; +pub const NFT_META_PRANDOM: c_int = 24; + +pub const NFT_CT_STATE: c_int = 0; +pub const NFT_CT_DIRECTION: c_int = 1; +pub const NFT_CT_STATUS: c_int = 2; +pub const NFT_CT_MARK: c_int = 3; +pub const NFT_CT_SECMARK: c_int = 4; +pub const NFT_CT_EXPIRATION: c_int = 5; +pub const NFT_CT_HELPER: c_int = 6; +pub const NFT_CT_L3PROTOCOL: c_int = 7; +pub const NFT_CT_SRC: c_int = 8; +pub const NFT_CT_DST: c_int = 9; +pub const NFT_CT_PROTOCOL: c_int = 10; +pub const NFT_CT_PROTO_SRC: c_int = 11; +pub const NFT_CT_PROTO_DST: c_int = 12; +pub const NFT_CT_LABELS: c_int = 13; +pub const NFT_CT_PKTS: c_int = 14; +pub const NFT_CT_BYTES: c_int = 15; +pub const NFT_CT_AVGPKT: c_int = 16; +pub const NFT_CT_ZONE: c_int = 17; +pub const NFT_CT_EVENTMASK: c_int = 18; +pub const NFT_CT_SRC_IP: c_int = 19; +pub const NFT_CT_DST_IP: c_int = 20; +pub const NFT_CT_SRC_IP6: c_int = 21; +pub const NFT_CT_DST_IP6: c_int = 22; +pub const NFT_CT_ID: c_int = 23; + +pub const NFT_LIMIT_PKTS: c_int = 0; +pub const NFT_LIMIT_PKT_BYTES: c_int = 1; + +pub const NFT_LIMIT_F_INV: c_int = 1 << 0; + +pub const NFT_QUEUE_FLAG_BYPASS: c_int = 0x01; +pub const NFT_QUEUE_FLAG_CPU_FANOUT: c_int = 0x02; +pub const NFT_QUEUE_FLAG_MASK: c_int = 0x03; + +pub const NFT_QUOTA_F_INV: c_int = 1 << 0; + +pub const NFT_REJECT_ICMP_UNREACH: c_int = 0; +pub const NFT_REJECT_TCP_RST: c_int = 1; +pub const NFT_REJECT_ICMPX_UNREACH: c_int = 2; + +pub const NFT_REJECT_ICMPX_NO_ROUTE: c_int = 0; +pub const NFT_REJECT_ICMPX_PORT_UNREACH: c_int = 1; +pub const NFT_REJECT_ICMPX_HOST_UNREACH: c_int = 2; +pub const NFT_REJECT_ICMPX_ADMIN_PROHIBITED: c_int = 3; + +pub const NFT_NAT_SNAT: c_int = 0; +pub const NFT_NAT_DNAT: c_int = 1; + +pub const NFT_TRACETYPE_UNSPEC: c_int = 0; +pub const NFT_TRACETYPE_POLICY: c_int = 1; +pub const NFT_TRACETYPE_RETURN: c_int = 2; +pub const NFT_TRACETYPE_RULE: c_int = 3; + +pub const NFT_NG_INCREMENTAL: c_int = 0; +pub const NFT_NG_RANDOM: c_int = 1; + +// linux/input.h +pub const FF_MAX: crate::__u16 = 0x7f; +pub const FF_CNT: usize = FF_MAX as usize + 1; + +// linux/input-event-codes.h +pub const INPUT_PROP_MAX: crate::__u16 = 0x1f; +pub const INPUT_PROP_CNT: usize = INPUT_PROP_MAX as usize + 1; +pub const EV_MAX: crate::__u16 = 0x1f; +pub const EV_CNT: usize = EV_MAX as usize + 1; +pub const SYN_MAX: crate::__u16 = 0xf; +pub const SYN_CNT: usize = SYN_MAX as usize + 1; +pub const KEY_MAX: crate::__u16 = 0x2ff; +pub const KEY_CNT: usize = KEY_MAX as usize + 1; +pub const REL_MAX: crate::__u16 = 0x0f; +pub const REL_CNT: usize = REL_MAX as usize + 1; +pub const ABS_MAX: crate::__u16 = 0x3f; +pub const ABS_CNT: usize = ABS_MAX as usize + 1; +pub const SW_MAX: crate::__u16 = 0x0f; +pub const SW_CNT: usize = SW_MAX as usize + 1; +pub const MSC_MAX: crate::__u16 = 0x07; +pub const MSC_CNT: usize = MSC_MAX as usize + 1; +pub const LED_MAX: crate::__u16 = 0x0f; +pub const LED_CNT: usize = LED_MAX as usize + 1; +pub const REP_MAX: crate::__u16 = 0x01; +pub const REP_CNT: usize = REP_MAX as usize + 1; +pub const SND_MAX: crate::__u16 = 0x07; +pub const SND_CNT: usize = SND_MAX as usize + 1; + +// linux/uinput.h +pub const UINPUT_VERSION: c_uint = 5; +pub const UINPUT_MAX_NAME_SIZE: usize = 80; + +// start android/platform/bionic/libc/kernel/uapi/linux/if_ether.h +// from https://android.googlesource.com/platform/bionic/+/HEAD/libc/kernel/uapi/linux/if_ether.h +pub const ETH_ALEN: c_int = 6; +pub const ETH_HLEN: c_int = 14; +pub const ETH_ZLEN: c_int = 60; +pub const ETH_DATA_LEN: c_int = 1500; +pub const ETH_FRAME_LEN: c_int = 1514; +pub const ETH_FCS_LEN: c_int = 4; +pub const ETH_MIN_MTU: c_int = 68; +pub const ETH_MAX_MTU: c_int = 0xFFFF; +pub const ETH_P_LOOP: c_int = 0x0060; +pub const ETH_P_PUP: c_int = 0x0200; +pub const ETH_P_PUPAT: c_int = 0x0201; +pub const ETH_P_TSN: c_int = 0x22F0; +pub const ETH_P_IP: c_int = 0x0800; +pub const ETH_P_X25: c_int = 0x0805; +pub const ETH_P_ARP: c_int = 0x0806; +pub const ETH_P_BPQ: c_int = 0x08FF; +pub const ETH_P_IEEEPUP: c_int = 0x0a00; +pub const ETH_P_IEEEPUPAT: c_int = 0x0a01; +pub const ETH_P_BATMAN: c_int = 0x4305; +pub const ETH_P_DEC: c_int = 0x6000; +pub const ETH_P_DNA_DL: c_int = 0x6001; +pub const ETH_P_DNA_RC: c_int = 0x6002; +pub const ETH_P_DNA_RT: c_int = 0x6003; +pub const ETH_P_LAT: c_int = 0x6004; +pub const ETH_P_DIAG: c_int = 0x6005; +pub const ETH_P_CUST: c_int = 0x6006; +pub const ETH_P_SCA: c_int = 0x6007; +pub const ETH_P_TEB: c_int = 0x6558; +pub const ETH_P_RARP: c_int = 0x8035; +pub const ETH_P_ATALK: c_int = 0x809B; +pub const ETH_P_AARP: c_int = 0x80F3; +pub const ETH_P_8021Q: c_int = 0x8100; +/* see rust-lang/libc#924 pub const ETH_P_ERSPAN: c_int = 0x88BE;*/ +pub const ETH_P_IPX: c_int = 0x8137; +pub const ETH_P_IPV6: c_int = 0x86DD; +pub const ETH_P_PAUSE: c_int = 0x8808; +pub const ETH_P_SLOW: c_int = 0x8809; +pub const ETH_P_WCCP: c_int = 0x883E; +pub const ETH_P_MPLS_UC: c_int = 0x8847; +pub const ETH_P_MPLS_MC: c_int = 0x8848; +pub const ETH_P_ATMMPOA: c_int = 0x884c; +pub const ETH_P_PPP_DISC: c_int = 0x8863; +pub const ETH_P_PPP_SES: c_int = 0x8864; +pub const ETH_P_LINK_CTL: c_int = 0x886c; +pub const ETH_P_ATMFATE: c_int = 0x8884; +pub const ETH_P_PAE: c_int = 0x888E; +pub const ETH_P_AOE: c_int = 0x88A2; +pub const ETH_P_8021AD: c_int = 0x88A8; +pub const ETH_P_802_EX1: c_int = 0x88B5; +pub const ETH_P_TIPC: c_int = 0x88CA; +pub const ETH_P_MACSEC: c_int = 0x88E5; +pub const ETH_P_8021AH: c_int = 0x88E7; +pub const ETH_P_MVRP: c_int = 0x88F5; +pub const ETH_P_1588: c_int = 0x88F7; +pub const ETH_P_NCSI: c_int = 0x88F8; +pub const ETH_P_PRP: c_int = 0x88FB; +pub const ETH_P_FCOE: c_int = 0x8906; +/* see rust-lang/libc#924 pub const ETH_P_IBOE: c_int = 0x8915;*/ +pub const ETH_P_TDLS: c_int = 0x890D; +pub const ETH_P_FIP: c_int = 0x8914; +pub const ETH_P_80221: c_int = 0x8917; +pub const ETH_P_HSR: c_int = 0x892F; +/* see rust-lang/libc#924 pub const ETH_P_NSH: c_int = 0x894F;*/ +pub const ETH_P_LOOPBACK: c_int = 0x9000; +pub const ETH_P_QINQ1: c_int = 0x9100; +pub const ETH_P_QINQ2: c_int = 0x9200; +pub const ETH_P_QINQ3: c_int = 0x9300; +pub const ETH_P_EDSA: c_int = 0xDADA; +/* see rust-lang/libc#924 pub const ETH_P_IFE: c_int = 0xED3E;*/ +pub const ETH_P_AF_IUCV: c_int = 0xFBFB; +pub const ETH_P_802_3_MIN: c_int = 0x0600; +pub const ETH_P_802_3: c_int = 0x0001; +pub const ETH_P_AX25: c_int = 0x0002; +pub const ETH_P_ALL: c_int = 0x0003; +pub const ETH_P_802_2: c_int = 0x0004; +pub const ETH_P_SNAP: c_int = 0x0005; +pub const ETH_P_DDCMP: c_int = 0x0006; +pub const ETH_P_WAN_PPP: c_int = 0x0007; +pub const ETH_P_PPP_MP: c_int = 0x0008; +pub const ETH_P_LOCALTALK: c_int = 0x0009; +pub const ETH_P_CAN: c_int = 0x000C; +pub const ETH_P_CANFD: c_int = 0x000D; +pub const ETH_P_PPPTALK: c_int = 0x0010; +pub const ETH_P_TR_802_2: c_int = 0x0011; +pub const ETH_P_MOBITEX: c_int = 0x0015; +pub const ETH_P_CONTROL: c_int = 0x0016; +pub const ETH_P_IRDA: c_int = 0x0017; +pub const ETH_P_ECONET: c_int = 0x0018; +pub const ETH_P_HDLC: c_int = 0x0019; +pub const ETH_P_ARCNET: c_int = 0x001A; +pub const ETH_P_DSA: c_int = 0x001B; +pub const ETH_P_TRAILER: c_int = 0x001C; +pub const ETH_P_PHONET: c_int = 0x00F5; +pub const ETH_P_IEEE802154: c_int = 0x00F6; +pub const ETH_P_CAIF: c_int = 0x00F7; +pub const ETH_P_XDSA: c_int = 0x00F8; +/* see rust-lang/libc#924 pub const ETH_P_MAP: c_int = 0x00F9;*/ +// end android/platform/bionic/libc/kernel/uapi/linux/if_ether.h + +// start android/platform/bionic/libc/kernel/uapi/linux/neighbour.h +pub const NDA_UNSPEC: c_ushort = 0; +pub const NDA_DST: c_ushort = 1; +pub const NDA_LLADDR: c_ushort = 2; +pub const NDA_CACHEINFO: c_ushort = 3; +pub const NDA_PROBES: c_ushort = 4; +pub const NDA_VLAN: c_ushort = 5; +pub const NDA_PORT: c_ushort = 6; +pub const NDA_VNI: c_ushort = 7; +pub const NDA_IFINDEX: c_ushort = 8; +pub const NDA_MASTER: c_ushort = 9; +pub const NDA_LINK_NETNSID: c_ushort = 10; +pub const NDA_SRC_VNI: c_ushort = 11; +pub const NDA_PROTOCOL: c_ushort = 12; +pub const NDA_NH_ID: c_ushort = 13; +pub const NDA_FDB_EXT_ATTRS: c_ushort = 14; +pub const NDA_FLAGS_EXT: c_ushort = 15; +pub const NDA_NDM_STATE_MASK: c_ushort = 16; +pub const NDA_NDM_FLAGS_MASK: c_ushort = 17; + +pub const NTF_USE: u8 = 0x01; +pub const NTF_SELF: u8 = 0x02; +pub const NTF_MASTER: u8 = 0x04; +pub const NTF_PROXY: u8 = 0x08; +pub const NTF_EXT_LEARNED: u8 = 0x10; +pub const NTF_OFFLOADED: u8 = 0x20; +pub const NTF_STICKY: u8 = 0x40; +pub const NTF_ROUTER: u8 = 0x80; + +pub const NTF_EXT_MANAGED: u8 = 0x01; +pub const NTF_EXT_LOCKED: u8 = 0x02; + +pub const NUD_NONE: u16 = 0x00; +pub const NUD_INCOMPLETE: u16 = 0x01; +pub const NUD_REACHABLE: u16 = 0x02; +pub const NUD_STALE: u16 = 0x04; +pub const NUD_DELAY: u16 = 0x08; +pub const NUD_PROBE: u16 = 0x10; +pub const NUD_FAILED: u16 = 0x20; +pub const NUD_NOARP: u16 = 0x40; +pub const NUD_PERMANENT: u16 = 0x80; + +pub const NDTPA_UNSPEC: c_ushort = 0; +pub const NDTPA_IFINDEX: c_ushort = 1; +pub const NDTPA_REFCNT: c_ushort = 2; +pub const NDTPA_REACHABLE_TIME: c_ushort = 3; +pub const NDTPA_BASE_REACHABLE_TIME: c_ushort = 4; +pub const NDTPA_RETRANS_TIME: c_ushort = 5; +pub const NDTPA_GC_STALETIME: c_ushort = 6; +pub const NDTPA_DELAY_PROBE_TIME: c_ushort = 7; +pub const NDTPA_QUEUE_LEN: c_ushort = 8; +pub const NDTPA_APP_PROBES: c_ushort = 9; +pub const NDTPA_UCAST_PROBES: c_ushort = 10; +pub const NDTPA_MCAST_PROBES: c_ushort = 11; +pub const NDTPA_ANYCAST_DELAY: c_ushort = 12; +pub const NDTPA_PROXY_DELAY: c_ushort = 13; +pub const NDTPA_PROXY_QLEN: c_ushort = 14; +pub const NDTPA_LOCKTIME: c_ushort = 15; +pub const NDTPA_QUEUE_LENBYTES: c_ushort = 16; +pub const NDTPA_MCAST_REPROBES: c_ushort = 17; +pub const NDTPA_PAD: c_ushort = 18; +pub const NDTPA_INTERVAL_PROBE_TIME_MS: c_ushort = 19; + +pub const NDTA_UNSPEC: c_ushort = 0; +pub const NDTA_NAME: c_ushort = 1; +pub const NDTA_THRESH1: c_ushort = 2; +pub const NDTA_THRESH2: c_ushort = 3; +pub const NDTA_THRESH3: c_ushort = 4; +pub const NDTA_CONFIG: c_ushort = 5; +pub const NDTA_PARMS: c_ushort = 6; +pub const NDTA_STATS: c_ushort = 7; +pub const NDTA_GC_INTERVAL: c_ushort = 8; +pub const NDTA_PAD: c_ushort = 9; + +pub const FDB_NOTIFY_BIT: u16 = 0x01; +pub const FDB_NOTIFY_INACTIVE_BIT: u16 = 0x02; + +pub const NFEA_UNSPEC: c_ushort = 0; +pub const NFEA_ACTIVITY_NOTIFY: c_ushort = 1; +pub const NFEA_DONT_REFRESH: c_ushort = 2; +// end android/platform/bionic/libc/kernel/uapi/linux/neighbour.h + +pub const SIOCADDRT: c_ulong = 0x0000890B; +pub const SIOCDELRT: c_ulong = 0x0000890C; +pub const SIOCRTMSG: c_ulong = 0x0000890D; +pub const SIOCGIFNAME: c_ulong = 0x00008910; +pub const SIOCSIFLINK: c_ulong = 0x00008911; +pub const SIOCGIFCONF: c_ulong = 0x00008912; +pub const SIOCGIFFLAGS: c_ulong = 0x00008913; +pub const SIOCSIFFLAGS: c_ulong = 0x00008914; +pub const SIOCGIFADDR: c_ulong = 0x00008915; +pub const SIOCSIFADDR: c_ulong = 0x00008916; +pub const SIOCGIFDSTADDR: c_ulong = 0x00008917; +pub const SIOCSIFDSTADDR: c_ulong = 0x00008918; +pub const SIOCGIFBRDADDR: c_ulong = 0x00008919; +pub const SIOCSIFBRDADDR: c_ulong = 0x0000891A; +pub const SIOCGIFNETMASK: c_ulong = 0x0000891B; +pub const SIOCSIFNETMASK: c_ulong = 0x0000891C; +pub const SIOCGIFMETRIC: c_ulong = 0x0000891D; +pub const SIOCSIFMETRIC: c_ulong = 0x0000891E; +pub const SIOCGIFMEM: c_ulong = 0x0000891F; +pub const SIOCSIFMEM: c_ulong = 0x00008920; +pub const SIOCGIFMTU: c_ulong = 0x00008921; +pub const SIOCSIFMTU: c_ulong = 0x00008922; +pub const SIOCSIFNAME: c_ulong = 0x00008923; +pub const SIOCSIFHWADDR: c_ulong = 0x00008924; +pub const SIOCGIFENCAP: c_ulong = 0x00008925; +pub const SIOCSIFENCAP: c_ulong = 0x00008926; +pub const SIOCGIFHWADDR: c_ulong = 0x00008927; +pub const SIOCGIFSLAVE: c_ulong = 0x00008929; +pub const SIOCSIFSLAVE: c_ulong = 0x00008930; +pub const SIOCADDMULTI: c_ulong = 0x00008931; +pub const SIOCDELMULTI: c_ulong = 0x00008932; +pub const SIOCGIFINDEX: c_ulong = 0x00008933; +pub const SIOGIFINDEX: c_ulong = SIOCGIFINDEX; +pub const SIOCSIFPFLAGS: c_ulong = 0x00008934; +pub const SIOCGIFPFLAGS: c_ulong = 0x00008935; +pub const SIOCDIFADDR: c_ulong = 0x00008936; +pub const SIOCSIFHWBROADCAST: c_ulong = 0x00008937; +pub const SIOCGIFCOUNT: c_ulong = 0x00008938; +pub const SIOCGIFBR: c_ulong = 0x00008940; +pub const SIOCSIFBR: c_ulong = 0x00008941; +pub const SIOCGIFTXQLEN: c_ulong = 0x00008942; +pub const SIOCSIFTXQLEN: c_ulong = 0x00008943; +pub const SIOCETHTOOL: c_ulong = 0x00008946; +pub const SIOCGMIIPHY: c_ulong = 0x00008947; +pub const SIOCGMIIREG: c_ulong = 0x00008948; +pub const SIOCSMIIREG: c_ulong = 0x00008949; +pub const SIOCWANDEV: c_ulong = 0x0000894A; +pub const SIOCOUTQNSD: c_ulong = 0x0000894B; +pub const SIOCGSKNS: c_ulong = 0x0000894C; +pub const SIOCDARP: c_ulong = 0x00008953; +pub const SIOCGARP: c_ulong = 0x00008954; +pub const SIOCSARP: c_ulong = 0x00008955; +pub const SIOCDRARP: c_ulong = 0x00008960; +pub const SIOCGRARP: c_ulong = 0x00008961; +pub const SIOCSRARP: c_ulong = 0x00008962; +pub const SIOCGIFMAP: c_ulong = 0x00008970; +pub const SIOCSIFMAP: c_ulong = 0x00008971; +pub const SIOCADDDLCI: c_ulong = 0x00008980; +pub const SIOCDELDLCI: c_ulong = 0x00008981; +pub const SIOCGIFVLAN: c_ulong = 0x00008982; +pub const SIOCSIFVLAN: c_ulong = 0x00008983; +pub const SIOCBONDENSLAVE: c_ulong = 0x00008990; +pub const SIOCBONDRELEASE: c_ulong = 0x00008991; +pub const SIOCBONDSETHWADDR: c_ulong = 0x00008992; +pub const SIOCBONDSLAVEINFOQUERY: c_ulong = 0x00008993; +pub const SIOCBONDINFOQUERY: c_ulong = 0x00008994; +pub const SIOCBONDCHANGEACTIVE: c_ulong = 0x00008995; +pub const SIOCBRADDBR: c_ulong = 0x000089a0; +pub const SIOCBRDELBR: c_ulong = 0x000089a1; +pub const SIOCBRADDIF: c_ulong = 0x000089a2; +pub const SIOCBRDELIF: c_ulong = 0x000089a3; +pub const SIOCSHWTSTAMP: c_ulong = 0x000089b0; +pub const SIOCGHWTSTAMP: c_ulong = 0x000089b1; +pub const SIOCDEVPRIVATE: c_ulong = 0x000089F0; +pub const SIOCPROTOPRIVATE: c_ulong = 0x000089E0; + +// linux/module.h +pub const MODULE_INIT_IGNORE_MODVERSIONS: c_uint = 0x0001; +pub const MODULE_INIT_IGNORE_VERMAGIC: c_uint = 0x0002; + +// linux/net_tstamp.h +pub const SOF_TIMESTAMPING_TX_HARDWARE: c_uint = 1 << 0; +pub const SOF_TIMESTAMPING_TX_SOFTWARE: c_uint = 1 << 1; +pub const SOF_TIMESTAMPING_RX_HARDWARE: c_uint = 1 << 2; +pub const SOF_TIMESTAMPING_RX_SOFTWARE: c_uint = 1 << 3; +pub const SOF_TIMESTAMPING_SOFTWARE: c_uint = 1 << 4; +pub const SOF_TIMESTAMPING_SYS_HARDWARE: c_uint = 1 << 5; +pub const SOF_TIMESTAMPING_RAW_HARDWARE: c_uint = 1 << 6; +pub const SOF_TIMESTAMPING_OPT_ID: c_uint = 1 << 7; +pub const SOF_TIMESTAMPING_TX_SCHED: c_uint = 1 << 8; +pub const SOF_TIMESTAMPING_TX_ACK: c_uint = 1 << 9; +pub const SOF_TIMESTAMPING_OPT_CMSG: c_uint = 1 << 10; +pub const SOF_TIMESTAMPING_OPT_TSONLY: c_uint = 1 << 11; +pub const SOF_TIMESTAMPING_OPT_STATS: c_uint = 1 << 12; +pub const SOF_TIMESTAMPING_OPT_PKTINFO: c_uint = 1 << 13; +pub const SOF_TIMESTAMPING_OPT_TX_SWHW: c_uint = 1 << 14; +pub const SOF_TIMESTAMPING_BIND_PHC: c_uint = 1 << 15; +pub const SOF_TIMESTAMPING_OPT_ID_TCP: c_uint = 1 << 16; +pub const SOF_TIMESTAMPING_OPT_RX_FILTER: c_uint = 1 << 17; + +#[deprecated( + since = "0.2.55", + note = "ENOATTR is not available on Android; use ENODATA instead" +)] +pub const ENOATTR: c_int = crate::ENODATA; + +// linux/if_alg.h +pub const ALG_SET_KEY: c_int = 1; +pub const ALG_SET_IV: c_int = 2; +pub const ALG_SET_OP: c_int = 3; +pub const ALG_SET_AEAD_ASSOCLEN: c_int = 4; +pub const ALG_SET_AEAD_AUTHSIZE: c_int = 5; +pub const ALG_SET_DRBG_ENTROPY: c_int = 6; + +pub const ALG_OP_DECRYPT: c_int = 0; +pub const ALG_OP_ENCRYPT: c_int = 1; + +// sys/mman.h +pub const MLOCK_ONFAULT: c_int = 0x01; + +// uapi/linux/vm_sockets.h +pub const VMADDR_CID_ANY: c_uint = 0xFFFFFFFF; +pub const VMADDR_CID_HYPERVISOR: c_uint = 0; +pub const VMADDR_CID_LOCAL: c_uint = 1; +pub const VMADDR_CID_HOST: c_uint = 2; +pub const VMADDR_PORT_ANY: c_uint = 0xFFFFFFFF; + +// uapi/linux/inotify.h +pub const IN_ACCESS: u32 = 0x0000_0001; +pub const IN_MODIFY: u32 = 0x0000_0002; +pub const IN_ATTRIB: u32 = 0x0000_0004; +pub const IN_CLOSE_WRITE: u32 = 0x0000_0008; +pub const IN_CLOSE_NOWRITE: u32 = 0x0000_0010; +pub const IN_CLOSE: u32 = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE; +pub const IN_OPEN: u32 = 0x0000_0020; +pub const IN_MOVED_FROM: u32 = 0x0000_0040; +pub const IN_MOVED_TO: u32 = 0x0000_0080; +pub const IN_MOVE: u32 = IN_MOVED_FROM | IN_MOVED_TO; +pub const IN_CREATE: u32 = 0x0000_0100; +pub const IN_DELETE: u32 = 0x0000_0200; +pub const IN_DELETE_SELF: u32 = 0x0000_0400; +pub const IN_MOVE_SELF: u32 = 0x0000_0800; +pub const IN_UNMOUNT: u32 = 0x0000_2000; +pub const IN_Q_OVERFLOW: u32 = 0x0000_4000; +pub const IN_IGNORED: u32 = 0x0000_8000; +pub const IN_ONLYDIR: u32 = 0x0100_0000; +pub const IN_DONT_FOLLOW: u32 = 0x0200_0000; +pub const IN_EXCL_UNLINK: u32 = 0x0400_0000; + +pub const IN_MASK_CREATE: u32 = 0x1000_0000; +pub const IN_MASK_ADD: u32 = 0x2000_0000; +pub const IN_ISDIR: u32 = 0x4000_0000; +pub const IN_ONESHOT: u32 = 0x8000_0000; + +pub const IN_ALL_EVENTS: u32 = IN_ACCESS + | IN_MODIFY + | IN_ATTRIB + | IN_CLOSE_WRITE + | IN_CLOSE_NOWRITE + | IN_OPEN + | IN_MOVED_FROM + | IN_MOVED_TO + | IN_DELETE + | IN_CREATE + | IN_DELETE_SELF + | IN_MOVE_SELF; + +pub const IN_CLOEXEC: c_int = O_CLOEXEC; +pub const IN_NONBLOCK: c_int = O_NONBLOCK; + +pub const FUTEX_WAIT: c_int = 0; +pub const FUTEX_WAKE: c_int = 1; +pub const FUTEX_FD: c_int = 2; +pub const FUTEX_REQUEUE: c_int = 3; +pub const FUTEX_CMP_REQUEUE: c_int = 4; +pub const FUTEX_WAKE_OP: c_int = 5; +pub const FUTEX_LOCK_PI: c_int = 6; +pub const FUTEX_UNLOCK_PI: c_int = 7; +pub const FUTEX_TRYLOCK_PI: c_int = 8; +pub const FUTEX_WAIT_BITSET: c_int = 9; +pub const FUTEX_WAKE_BITSET: c_int = 10; +pub const FUTEX_WAIT_REQUEUE_PI: c_int = 11; +pub const FUTEX_CMP_REQUEUE_PI: c_int = 12; +pub const FUTEX_LOCK_PI2: c_int = 13; + +pub const FUTEX_PRIVATE_FLAG: c_int = 128; +pub const FUTEX_CLOCK_REALTIME: c_int = 256; +pub const FUTEX_CMD_MASK: c_int = !(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME); + +// linux/errqueue.h +pub const SO_EE_ORIGIN_NONE: u8 = 0; +pub const SO_EE_ORIGIN_LOCAL: u8 = 1; +pub const SO_EE_ORIGIN_ICMP: u8 = 2; +pub const SO_EE_ORIGIN_ICMP6: u8 = 3; +pub const SO_EE_ORIGIN_TXSTATUS: u8 = 4; +pub const SO_EE_ORIGIN_TIMESTAMPING: u8 = SO_EE_ORIGIN_TXSTATUS; + +// errno.h +pub const EPERM: c_int = 1; +pub const ENOENT: c_int = 2; +pub const ESRCH: c_int = 3; +pub const EINTR: c_int = 4; +pub const EIO: c_int = 5; +pub const ENXIO: c_int = 6; +pub const E2BIG: c_int = 7; +pub const ENOEXEC: c_int = 8; +pub const EBADF: c_int = 9; +pub const ECHILD: c_int = 10; +pub const EAGAIN: c_int = 11; +pub const ENOMEM: c_int = 12; +pub const EACCES: c_int = 13; +pub const EFAULT: c_int = 14; +pub const ENOTBLK: c_int = 15; +pub const EBUSY: c_int = 16; +pub const EEXIST: c_int = 17; +pub const EXDEV: c_int = 18; +pub const ENODEV: c_int = 19; +pub const ENOTDIR: c_int = 20; +pub const EISDIR: c_int = 21; +pub const EINVAL: c_int = 22; +pub const ENFILE: c_int = 23; +pub const EMFILE: c_int = 24; +pub const ENOTTY: c_int = 25; +pub const ETXTBSY: c_int = 26; +pub const EFBIG: c_int = 27; +pub const ENOSPC: c_int = 28; +pub const ESPIPE: c_int = 29; +pub const EROFS: c_int = 30; +pub const EMLINK: c_int = 31; +pub const EPIPE: c_int = 32; +pub const EDOM: c_int = 33; +pub const ERANGE: c_int = 34; +pub const EWOULDBLOCK: c_int = EAGAIN; + +pub const PRIO_PROCESS: c_int = 0; +pub const PRIO_PGRP: c_int = 1; +pub const PRIO_USER: c_int = 2; + +// linux/sched.h +pub const SCHED_NORMAL: c_int = 0; +pub const SCHED_FIFO: c_int = 1; +pub const SCHED_RR: c_int = 2; +pub const SCHED_BATCH: c_int = 3; +pub const SCHED_IDLE: c_int = 5; +pub const SCHED_DEADLINE: c_int = 6; + +pub const SCHED_RESET_ON_FORK: c_int = 0x40000000; + +pub const CLONE_PIDFD: c_int = 0x1000; +pub const CLONE_CLEAR_SIGHAND: c_ulonglong = 0x100000000; +pub const CLONE_INTO_CGROUP: c_ulonglong = 0x200000000; + +// linux/membarrier.h +pub const MEMBARRIER_CMD_QUERY: c_int = 0; +pub const MEMBARRIER_CMD_GLOBAL: c_int = 1 << 0; +pub const MEMBARRIER_CMD_GLOBAL_EXPEDITED: c_int = 1 << 1; +pub const MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED: c_int = 1 << 2; +pub const MEMBARRIER_CMD_PRIVATE_EXPEDITED: c_int = 1 << 3; +pub const MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED: c_int = 1 << 4; +pub const MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE: c_int = 1 << 5; +pub const MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE: c_int = 1 << 6; +pub const MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ: c_int = 1 << 7; +pub const MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ: c_int = 1 << 8; + +// linux/mempolicy.h +pub const MPOL_DEFAULT: c_int = 0; +pub const MPOL_PREFERRED: c_int = 1; +pub const MPOL_BIND: c_int = 2; +pub const MPOL_INTERLEAVE: c_int = 3; +pub const MPOL_LOCAL: c_int = 4; +pub const MPOL_F_NUMA_BALANCING: c_int = 1 << 13; +pub const MPOL_F_RELATIVE_NODES: c_int = 1 << 14; +pub const MPOL_F_STATIC_NODES: c_int = 1 << 15; + +// bits/seek_constants.h +pub const SEEK_DATA: c_int = 3; +pub const SEEK_HOLE: c_int = 4; + +// sys/socket.h +pub const AF_NFC: c_int = 39; +pub const AF_VSOCK: c_int = 40; +pub const PF_NFC: c_int = AF_NFC; +pub const PF_VSOCK: c_int = AF_VSOCK; + +pub const SOMAXCONN: c_int = 128; + +// sys/system_properties.h +pub const PROP_VALUE_MAX: c_int = 92; +pub const PROP_NAME_MAX: c_int = 32; + +// sys/prctl.h +pub const PR_SET_PDEATHSIG: c_int = 1; +pub const PR_GET_PDEATHSIG: c_int = 2; +pub const PR_GET_DUMPABLE: c_int = 3; +pub const PR_SET_DUMPABLE: c_int = 4; +pub const PR_GET_UNALIGN: c_int = 5; +pub const PR_SET_UNALIGN: c_int = 6; +pub const PR_UNALIGN_NOPRINT: c_int = 1; +pub const PR_UNALIGN_SIGBUS: c_int = 2; +pub const PR_GET_KEEPCAPS: c_int = 7; +pub const PR_SET_KEEPCAPS: c_int = 8; +pub const PR_GET_FPEMU: c_int = 9; +pub const PR_SET_FPEMU: c_int = 10; +pub const PR_FPEMU_NOPRINT: c_int = 1; +pub const PR_FPEMU_SIGFPE: c_int = 2; +pub const PR_GET_FPEXC: c_int = 11; +pub const PR_SET_FPEXC: c_int = 12; +pub const PR_FP_EXC_SW_ENABLE: c_int = 0x80; +pub const PR_FP_EXC_DIV: c_int = 0x010000; +pub const PR_FP_EXC_OVF: c_int = 0x020000; +pub const PR_FP_EXC_UND: c_int = 0x040000; +pub const PR_FP_EXC_RES: c_int = 0x080000; +pub const PR_FP_EXC_INV: c_int = 0x100000; +pub const PR_FP_EXC_DISABLED: c_int = 0; +pub const PR_FP_EXC_NONRECOV: c_int = 1; +pub const PR_FP_EXC_ASYNC: c_int = 2; +pub const PR_FP_EXC_PRECISE: c_int = 3; +pub const PR_GET_TIMING: c_int = 13; +pub const PR_SET_TIMING: c_int = 14; +pub const PR_TIMING_STATISTICAL: c_int = 0; +pub const PR_TIMING_TIMESTAMP: c_int = 1; +pub const PR_SET_NAME: c_int = 15; +pub const PR_GET_NAME: c_int = 16; +pub const PR_GET_ENDIAN: c_int = 19; +pub const PR_SET_ENDIAN: c_int = 20; +pub const PR_ENDIAN_BIG: c_int = 0; +pub const PR_ENDIAN_LITTLE: c_int = 1; +pub const PR_ENDIAN_PPC_LITTLE: c_int = 2; +pub const PR_GET_SECCOMP: c_int = 21; +pub const PR_SET_SECCOMP: c_int = 22; +pub const PR_CAPBSET_READ: c_int = 23; +pub const PR_CAPBSET_DROP: c_int = 24; +pub const PR_GET_TSC: c_int = 25; +pub const PR_SET_TSC: c_int = 26; +pub const PR_TSC_ENABLE: c_int = 1; +pub const PR_TSC_SIGSEGV: c_int = 2; +pub const PR_GET_SECUREBITS: c_int = 27; +pub const PR_SET_SECUREBITS: c_int = 28; +pub const PR_SET_TIMERSLACK: c_int = 29; +pub const PR_GET_TIMERSLACK: c_int = 30; +pub const PR_TASK_PERF_EVENTS_DISABLE: c_int = 31; +pub const PR_TASK_PERF_EVENTS_ENABLE: c_int = 32; +pub const PR_MCE_KILL: c_int = 33; +pub const PR_MCE_KILL_CLEAR: c_int = 0; +pub const PR_MCE_KILL_SET: c_int = 1; +pub const PR_MCE_KILL_LATE: c_int = 0; +pub const PR_MCE_KILL_EARLY: c_int = 1; +pub const PR_MCE_KILL_DEFAULT: c_int = 2; +pub const PR_MCE_KILL_GET: c_int = 34; +pub const PR_SET_MM: c_int = 35; +pub const PR_SET_MM_START_CODE: c_int = 1; +pub const PR_SET_MM_END_CODE: c_int = 2; +pub const PR_SET_MM_START_DATA: c_int = 3; +pub const PR_SET_MM_END_DATA: c_int = 4; +pub const PR_SET_MM_START_STACK: c_int = 5; +pub const PR_SET_MM_START_BRK: c_int = 6; +pub const PR_SET_MM_BRK: c_int = 7; +pub const PR_SET_MM_ARG_START: c_int = 8; +pub const PR_SET_MM_ARG_END: c_int = 9; +pub const PR_SET_MM_ENV_START: c_int = 10; +pub const PR_SET_MM_ENV_END: c_int = 11; +pub const PR_SET_MM_AUXV: c_int = 12; +pub const PR_SET_MM_EXE_FILE: c_int = 13; +pub const PR_SET_MM_MAP: c_int = 14; +pub const PR_SET_MM_MAP_SIZE: c_int = 15; +pub const PR_SET_PTRACER: c_int = 0x59616d61; +pub const PR_SET_PTRACER_ANY: c_ulong = 0xffffffffffffffff; +pub const PR_SET_CHILD_SUBREAPER: c_int = 36; +pub const PR_GET_CHILD_SUBREAPER: c_int = 37; +pub const PR_SET_NO_NEW_PRIVS: c_int = 38; +pub const PR_GET_NO_NEW_PRIVS: c_int = 39; +pub const PR_GET_TID_ADDRESS: c_int = 40; +pub const PR_SET_THP_DISABLE: c_int = 41; +pub const PR_GET_THP_DISABLE: c_int = 42; +pub const PR_MPX_ENABLE_MANAGEMENT: c_int = 43; +pub const PR_MPX_DISABLE_MANAGEMENT: c_int = 44; +pub const PR_SET_FP_MODE: c_int = 45; +pub const PR_GET_FP_MODE: c_int = 46; +pub const PR_FP_MODE_FR: c_int = 1 << 0; +pub const PR_FP_MODE_FRE: c_int = 1 << 1; +pub const PR_CAP_AMBIENT: c_int = 47; +pub const PR_CAP_AMBIENT_IS_SET: c_int = 1; +pub const PR_CAP_AMBIENT_RAISE: c_int = 2; +pub const PR_CAP_AMBIENT_LOWER: c_int = 3; +pub const PR_CAP_AMBIENT_CLEAR_ALL: c_int = 4; +pub const PR_SVE_SET_VL: c_int = 50; +pub const PR_SVE_SET_VL_ONEXEC: c_int = 1 << 18; +pub const PR_SVE_GET_VL: c_int = 51; +pub const PR_SVE_VL_LEN_MASK: c_int = 0xffff; +pub const PR_SVE_VL_INHERIT: c_int = 1 << 17; +pub const PR_GET_SPECULATION_CTRL: c_int = 52; +pub const PR_SET_SPECULATION_CTRL: c_int = 53; +pub const PR_SPEC_STORE_BYPASS: c_int = 0; +pub const PR_SPEC_INDIRECT_BRANCH: c_int = 1; +pub const PR_SPEC_L1D_FLUSH: c_int = 2; +pub const PR_SPEC_NOT_AFFECTED: c_int = 0; +pub const PR_SPEC_PRCTL: c_ulong = 1 << 0; +pub const PR_SPEC_ENABLE: c_ulong = 1 << 1; +pub const PR_SPEC_DISABLE: c_ulong = 1 << 2; +pub const PR_SPEC_FORCE_DISABLE: c_ulong = 1 << 3; +pub const PR_SPEC_DISABLE_NOEXEC: c_ulong = 1 << 4; +pub const PR_PAC_RESET_KEYS: c_int = 54; +pub const PR_PAC_APIAKEY: c_ulong = 1 << 0; +pub const PR_PAC_APIBKEY: c_ulong = 1 << 1; +pub const PR_PAC_APDAKEY: c_ulong = 1 << 2; +pub const PR_PAC_APDBKEY: c_ulong = 1 << 3; +pub const PR_PAC_APGAKEY: c_ulong = 1 << 4; +pub const PR_SET_TAGGED_ADDR_CTRL: c_int = 55; +pub const PR_GET_TAGGED_ADDR_CTRL: c_int = 56; +pub const PR_TAGGED_ADDR_ENABLE: c_ulong = 1 << 0; +pub const PR_MTE_TCF_NONE: c_ulong = 0; +pub const PR_MTE_TCF_SYNC: c_ulong = 1 << 1; +pub const PR_MTE_TCF_ASYNC: c_ulong = 1 << 2; +pub const PR_MTE_TCF_MASK: c_ulong = PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC; +pub const PR_MTE_TAG_SHIFT: c_ulong = 3; +pub const PR_MTE_TAG_MASK: c_ulong = 0xffff << PR_MTE_TAG_SHIFT; +pub const PR_MTE_TCF_SHIFT: c_ulong = 1; +pub const PR_SET_IO_FLUSHER: c_int = 57; +pub const PR_GET_IO_FLUSHER: c_int = 58; +pub const PR_SET_SYSCALL_USER_DISPATCH: c_int = 59; +pub const PR_SYS_DISPATCH_OFF: c_int = 0; +pub const PR_SYS_DISPATCH_ON: c_int = 1; +pub const SYSCALL_DISPATCH_FILTER_ALLOW: c_int = 0; +pub const SYSCALL_DISPATCH_FILTER_BLOCK: c_int = 1; +pub const PR_PAC_SET_ENABLED_KEYS: c_int = 60; +pub const PR_PAC_GET_ENABLED_KEYS: c_int = 61; +pub const PR_SCHED_CORE: c_int = 62; +pub const PR_SCHED_CORE_GET: c_int = 0; +pub const PR_SCHED_CORE_CREATE: c_int = 1; +pub const PR_SCHED_CORE_SHARE_TO: c_int = 2; +pub const PR_SCHED_CORE_SHARE_FROM: c_int = 3; +pub const PR_SCHED_CORE_MAX: c_int = 4; +pub const PR_SCHED_CORE_SCOPE_THREAD: c_int = 0; +pub const PR_SCHED_CORE_SCOPE_THREAD_GROUP: c_int = 1; +pub const PR_SCHED_CORE_SCOPE_PROCESS_GROUP: c_int = 2; +pub const PR_SME_SET_VL: c_int = 63; +pub const PR_SME_SET_VL_ONEXEC: c_int = 1 << 18; +pub const PR_SME_GET_VL: c_int = 64; +pub const PR_SME_VL_LEN_MASK: c_int = 0xffff; +pub const PR_SME_VL_INHERIT: c_int = 1 << 17; +pub const PR_SET_MDWE: c_int = 65; +pub const PR_MDWE_REFUSE_EXEC_GAIN: c_ulong = 1 << 0; +pub const PR_MDWE_NO_INHERIT: c_ulong = 1 << 1; +pub const PR_GET_MDWE: c_int = 66; +pub const PR_SET_VMA: c_int = 0x53564d41; +pub const PR_SET_VMA_ANON_NAME: c_int = 0; +pub const PR_GET_AUXV: c_int = 0x41555856; +pub const PR_SET_MEMORY_MERGE: c_int = 67; +pub const PR_GET_MEMORY_MERGE: c_int = 68; +pub const PR_RISCV_V_SET_CONTROL: c_int = 69; +pub const PR_RISCV_V_GET_CONTROL: c_int = 70; +pub const PR_RISCV_V_VSTATE_CTRL_DEFAULT: c_int = 0; +pub const PR_RISCV_V_VSTATE_CTRL_OFF: c_int = 1; +pub const PR_RISCV_V_VSTATE_CTRL_ON: c_int = 2; +pub const PR_RISCV_V_VSTATE_CTRL_INHERIT: c_int = 1 << 4; +pub const PR_RISCV_V_VSTATE_CTRL_CUR_MASK: c_int = 0x3; +pub const PR_RISCV_V_VSTATE_CTRL_NEXT_MASK: c_int = 0xc; +pub const PR_RISCV_V_VSTATE_CTRL_MASK: c_int = 0x1f; + +// linux/if_addr.h +pub const IFA_UNSPEC: c_ushort = 0; +pub const IFA_ADDRESS: c_ushort = 1; +pub const IFA_LOCAL: c_ushort = 2; +pub const IFA_LABEL: c_ushort = 3; +pub const IFA_BROADCAST: c_ushort = 4; +pub const IFA_ANYCAST: c_ushort = 5; +pub const IFA_CACHEINFO: c_ushort = 6; +pub const IFA_MULTICAST: c_ushort = 7; + +pub const IFA_F_SECONDARY: u32 = 0x01; +pub const IFA_F_TEMPORARY: u32 = 0x01; +pub const IFA_F_NODAD: u32 = 0x02; +pub const IFA_F_OPTIMISTIC: u32 = 0x04; +pub const IFA_F_DADFAILED: u32 = 0x08; +pub const IFA_F_HOMEADDRESS: u32 = 0x10; +pub const IFA_F_DEPRECATED: u32 = 0x20; +pub const IFA_F_TENTATIVE: u32 = 0x40; +pub const IFA_F_PERMANENT: u32 = 0x80; + +// linux/if_link.h +pub const IFLA_UNSPEC: c_ushort = 0; +pub const IFLA_ADDRESS: c_ushort = 1; +pub const IFLA_BROADCAST: c_ushort = 2; +pub const IFLA_IFNAME: c_ushort = 3; +pub const IFLA_MTU: c_ushort = 4; +pub const IFLA_LINK: c_ushort = 5; +pub const IFLA_QDISC: c_ushort = 6; +pub const IFLA_STATS: c_ushort = 7; +pub const IFLA_COST: c_ushort = 8; +pub const IFLA_PRIORITY: c_ushort = 9; +pub const IFLA_MASTER: c_ushort = 10; +pub const IFLA_WIRELESS: c_ushort = 11; +pub const IFLA_PROTINFO: c_ushort = 12; +pub const IFLA_TXQLEN: c_ushort = 13; +pub const IFLA_MAP: c_ushort = 14; +pub const IFLA_WEIGHT: c_ushort = 15; +pub const IFLA_OPERSTATE: c_ushort = 16; +pub const IFLA_LINKMODE: c_ushort = 17; +pub const IFLA_LINKINFO: c_ushort = 18; +pub const IFLA_NET_NS_PID: c_ushort = 19; +pub const IFLA_IFALIAS: c_ushort = 20; +pub const IFLA_NUM_VF: c_ushort = 21; +pub const IFLA_VFINFO_LIST: c_ushort = 22; +pub const IFLA_STATS64: c_ushort = 23; +pub const IFLA_VF_PORTS: c_ushort = 24; +pub const IFLA_PORT_SELF: c_ushort = 25; +pub const IFLA_AF_SPEC: c_ushort = 26; +pub const IFLA_GROUP: c_ushort = 27; +pub const IFLA_NET_NS_FD: c_ushort = 28; +pub const IFLA_EXT_MASK: c_ushort = 29; +pub const IFLA_PROMISCUITY: c_ushort = 30; +pub const IFLA_NUM_TX_QUEUES: c_ushort = 31; +pub const IFLA_NUM_RX_QUEUES: c_ushort = 32; +pub const IFLA_CARRIER: c_ushort = 33; +pub const IFLA_PHYS_PORT_ID: c_ushort = 34; +pub const IFLA_CARRIER_CHANGES: c_ushort = 35; +pub const IFLA_PHYS_SWITCH_ID: c_ushort = 36; +pub const IFLA_LINK_NETNSID: c_ushort = 37; +pub const IFLA_PHYS_PORT_NAME: c_ushort = 38; +pub const IFLA_PROTO_DOWN: c_ushort = 39; +pub const IFLA_GSO_MAX_SEGS: c_ushort = 40; +pub const IFLA_GSO_MAX_SIZE: c_ushort = 41; +pub const IFLA_PAD: c_ushort = 42; +pub const IFLA_XDP: c_ushort = 43; +pub const IFLA_EVENT: c_ushort = 44; +pub const IFLA_NEW_NETNSID: c_ushort = 45; +pub const IFLA_IF_NETNSID: c_ushort = 46; +pub const IFLA_TARGET_NETNSID: c_ushort = IFLA_IF_NETNSID; +pub const IFLA_CARRIER_UP_COUNT: c_ushort = 47; +pub const IFLA_CARRIER_DOWN_COUNT: c_ushort = 48; +pub const IFLA_NEW_IFINDEX: c_ushort = 49; +pub const IFLA_MIN_MTU: c_ushort = 50; +pub const IFLA_MAX_MTU: c_ushort = 51; +pub const IFLA_PROP_LIST: c_ushort = 52; +pub const IFLA_ALT_IFNAME: c_ushort = 53; +pub const IFLA_PERM_ADDRESS: c_ushort = 54; +pub const IFLA_PROTO_DOWN_REASON: c_ushort = 55; +pub const IFLA_PARENT_DEV_NAME: c_ushort = 56; +pub const IFLA_PARENT_DEV_BUS_NAME: c_ushort = 57; +pub const IFLA_GRO_MAX_SIZE: c_ushort = 58; +pub const IFLA_TSO_MAX_SIZE: c_ushort = 59; +pub const IFLA_TSO_MAX_SEGS: c_ushort = 60; +pub const IFLA_ALLMULTI: c_ushort = 61; +pub const IFLA_DEVLINK_PORT: c_ushort = 62; +pub const IFLA_GSO_IPV4_MAX_SIZE: c_ushort = 63; +pub const IFLA_GRO_IPV4_MAX_SIZE: c_ushort = 64; + +pub const IFLA_INFO_UNSPEC: c_ushort = 0; +pub const IFLA_INFO_KIND: c_ushort = 1; +pub const IFLA_INFO_DATA: c_ushort = 2; +pub const IFLA_INFO_XSTATS: c_ushort = 3; +pub const IFLA_INFO_SLAVE_KIND: c_ushort = 4; +pub const IFLA_INFO_SLAVE_DATA: c_ushort = 5; + +// linux/rtnetlink.h +pub const TCA_UNSPEC: c_ushort = 0; +pub const TCA_KIND: c_ushort = 1; +pub const TCA_OPTIONS: c_ushort = 2; +pub const TCA_STATS: c_ushort = 3; +pub const TCA_XSTATS: c_ushort = 4; +pub const TCA_RATE: c_ushort = 5; +pub const TCA_FCNT: c_ushort = 6; +pub const TCA_STATS2: c_ushort = 7; +pub const TCA_STAB: c_ushort = 8; + +pub const RTM_NEWLINK: u16 = 16; +pub const RTM_DELLINK: u16 = 17; +pub const RTM_GETLINK: u16 = 18; +pub const RTM_SETLINK: u16 = 19; +pub const RTM_NEWADDR: u16 = 20; +pub const RTM_DELADDR: u16 = 21; +pub const RTM_GETADDR: u16 = 22; +pub const RTM_NEWROUTE: u16 = 24; +pub const RTM_DELROUTE: u16 = 25; +pub const RTM_GETROUTE: u16 = 26; +pub const RTM_NEWNEIGH: u16 = 28; +pub const RTM_DELNEIGH: u16 = 29; +pub const RTM_GETNEIGH: u16 = 30; +pub const RTM_NEWRULE: u16 = 32; +pub const RTM_DELRULE: u16 = 33; +pub const RTM_GETRULE: u16 = 34; +pub const RTM_NEWQDISC: u16 = 36; +pub const RTM_DELQDISC: u16 = 37; +pub const RTM_GETQDISC: u16 = 38; +pub const RTM_NEWTCLASS: u16 = 40; +pub const RTM_DELTCLASS: u16 = 41; +pub const RTM_GETTCLASS: u16 = 42; +pub const RTM_NEWTFILTER: u16 = 44; +pub const RTM_DELTFILTER: u16 = 45; +pub const RTM_GETTFILTER: u16 = 46; +pub const RTM_NEWACTION: u16 = 48; +pub const RTM_DELACTION: u16 = 49; +pub const RTM_GETACTION: u16 = 50; +pub const RTM_NEWPREFIX: u16 = 52; +pub const RTM_GETMULTICAST: u16 = 58; +pub const RTM_GETANYCAST: u16 = 62; +pub const RTM_NEWNEIGHTBL: u16 = 64; +pub const RTM_GETNEIGHTBL: u16 = 66; +pub const RTM_SETNEIGHTBL: u16 = 67; +pub const RTM_NEWNDUSEROPT: u16 = 68; +pub const RTM_NEWADDRLABEL: u16 = 72; +pub const RTM_DELADDRLABEL: u16 = 73; +pub const RTM_GETADDRLABEL: u16 = 74; +pub const RTM_GETDCB: u16 = 78; +pub const RTM_SETDCB: u16 = 79; +pub const RTM_NEWNETCONF: u16 = 80; +pub const RTM_GETNETCONF: u16 = 82; +pub const RTM_NEWMDB: u16 = 84; +pub const RTM_DELMDB: u16 = 85; +pub const RTM_GETMDB: u16 = 86; +pub const RTM_NEWNSID: u16 = 88; +pub const RTM_DELNSID: u16 = 89; +pub const RTM_GETNSID: u16 = 90; + +pub const RTM_F_NOTIFY: c_uint = 0x100; +pub const RTM_F_CLONED: c_uint = 0x200; +pub const RTM_F_EQUALIZE: c_uint = 0x400; +pub const RTM_F_PREFIX: c_uint = 0x800; + +pub const RTA_UNSPEC: c_ushort = 0; +pub const RTA_DST: c_ushort = 1; +pub const RTA_SRC: c_ushort = 2; +pub const RTA_IIF: c_ushort = 3; +pub const RTA_OIF: c_ushort = 4; +pub const RTA_GATEWAY: c_ushort = 5; +pub const RTA_PRIORITY: c_ushort = 6; +pub const RTA_PREFSRC: c_ushort = 7; +pub const RTA_METRICS: c_ushort = 8; +pub const RTA_MULTIPATH: c_ushort = 9; +pub const RTA_PROTOINFO: c_ushort = 10; // No longer used +pub const RTA_FLOW: c_ushort = 11; +pub const RTA_CACHEINFO: c_ushort = 12; +pub const RTA_SESSION: c_ushort = 13; // No longer used +pub const RTA_MP_ALGO: c_ushort = 14; // No longer used +pub const RTA_TABLE: c_ushort = 15; +pub const RTA_MARK: c_ushort = 16; +pub const RTA_MFC_STATS: c_ushort = 17; + +pub const RTN_UNSPEC: c_uchar = 0; +pub const RTN_UNICAST: c_uchar = 1; +pub const RTN_LOCAL: c_uchar = 2; +pub const RTN_BROADCAST: c_uchar = 3; +pub const RTN_ANYCAST: c_uchar = 4; +pub const RTN_MULTICAST: c_uchar = 5; +pub const RTN_BLACKHOLE: c_uchar = 6; +pub const RTN_UNREACHABLE: c_uchar = 7; +pub const RTN_PROHIBIT: c_uchar = 8; +pub const RTN_THROW: c_uchar = 9; +pub const RTN_NAT: c_uchar = 10; +pub const RTN_XRESOLVE: c_uchar = 11; + +pub const RTPROT_UNSPEC: c_uchar = 0; +pub const RTPROT_REDIRECT: c_uchar = 1; +pub const RTPROT_KERNEL: c_uchar = 2; +pub const RTPROT_BOOT: c_uchar = 3; +pub const RTPROT_STATIC: c_uchar = 4; + +pub const RT_SCOPE_UNIVERSE: c_uchar = 0; +pub const RT_SCOPE_SITE: c_uchar = 200; +pub const RT_SCOPE_LINK: c_uchar = 253; +pub const RT_SCOPE_HOST: c_uchar = 254; +pub const RT_SCOPE_NOWHERE: c_uchar = 255; + +pub const RT_TABLE_UNSPEC: c_uchar = 0; +pub const RT_TABLE_COMPAT: c_uchar = 252; +pub const RT_TABLE_DEFAULT: c_uchar = 253; +pub const RT_TABLE_MAIN: c_uchar = 254; +pub const RT_TABLE_LOCAL: c_uchar = 255; + +pub const RTMSG_NEWDEVICE: u32 = 0x11; +pub const RTMSG_DELDEVICE: u32 = 0x12; +pub const RTMSG_NEWROUTE: u32 = 0x21; +pub const RTMSG_DELROUTE: u32 = 0x22; + +pub const CTL_KERN: c_int = 1; +pub const CTL_VM: c_int = 2; +pub const CTL_NET: c_int = 3; +pub const CTL_FS: c_int = 5; +pub const CTL_DEBUG: c_int = 6; +pub const CTL_DEV: c_int = 7; +pub const CTL_BUS: c_int = 8; +pub const CTL_ABI: c_int = 9; +pub const CTL_CPU: c_int = 10; + +pub const CTL_BUS_ISA: c_int = 1; + +pub const INOTIFY_MAX_USER_INSTANCES: c_int = 1; +pub const INOTIFY_MAX_USER_WATCHES: c_int = 2; +pub const INOTIFY_MAX_QUEUED_EVENTS: c_int = 3; + +pub const KERN_OSTYPE: c_int = 1; +pub const KERN_OSRELEASE: c_int = 2; +pub const KERN_OSREV: c_int = 3; +pub const KERN_VERSION: c_int = 4; +pub const KERN_SECUREMASK: c_int = 5; +pub const KERN_PROF: c_int = 6; +pub const KERN_NODENAME: c_int = 7; +pub const KERN_DOMAINNAME: c_int = 8; +pub const KERN_PANIC: c_int = 15; +pub const KERN_REALROOTDEV: c_int = 16; +pub const KERN_SPARC_REBOOT: c_int = 21; +pub const KERN_CTLALTDEL: c_int = 22; +pub const KERN_PRINTK: c_int = 23; +pub const KERN_NAMETRANS: c_int = 24; +pub const KERN_PPC_HTABRECLAIM: c_int = 25; +pub const KERN_PPC_ZEROPAGED: c_int = 26; +pub const KERN_PPC_POWERSAVE_NAP: c_int = 27; +pub const KERN_MODPROBE: c_int = 28; +pub const KERN_SG_BIG_BUFF: c_int = 29; +pub const KERN_ACCT: c_int = 30; +pub const KERN_PPC_L2CR: c_int = 31; +pub const KERN_RTSIGNR: c_int = 32; +pub const KERN_RTSIGMAX: c_int = 33; +pub const KERN_SHMMAX: c_int = 34; +pub const KERN_MSGMAX: c_int = 35; +pub const KERN_MSGMNB: c_int = 36; +pub const KERN_MSGPOOL: c_int = 37; +pub const KERN_SYSRQ: c_int = 38; +pub const KERN_MAX_THREADS: c_int = 39; +pub const KERN_RANDOM: c_int = 40; +pub const KERN_SHMALL: c_int = 41; +pub const KERN_MSGMNI: c_int = 42; +pub const KERN_SEM: c_int = 43; +pub const KERN_SPARC_STOP_A: c_int = 44; +pub const KERN_SHMMNI: c_int = 45; +pub const KERN_OVERFLOWUID: c_int = 46; +pub const KERN_OVERFLOWGID: c_int = 47; +pub const KERN_SHMPATH: c_int = 48; +pub const KERN_HOTPLUG: c_int = 49; +pub const KERN_IEEE_EMULATION_WARNINGS: c_int = 50; +pub const KERN_S390_USER_DEBUG_LOGGING: c_int = 51; +pub const KERN_CORE_USES_PID: c_int = 52; +pub const KERN_TAINTED: c_int = 53; +pub const KERN_CADPID: c_int = 54; +pub const KERN_PIDMAX: c_int = 55; +pub const KERN_CORE_PATTERN: c_int = 56; +pub const KERN_PANIC_ON_OOPS: c_int = 57; +pub const KERN_HPPA_PWRSW: c_int = 58; +pub const KERN_HPPA_UNALIGNED: c_int = 59; +pub const KERN_PRINTK_RATELIMIT: c_int = 60; +pub const KERN_PRINTK_RATELIMIT_BURST: c_int = 61; +pub const KERN_PTY: c_int = 62; +pub const KERN_NGROUPS_MAX: c_int = 63; +pub const KERN_SPARC_SCONS_PWROFF: c_int = 64; +pub const KERN_HZ_TIMER: c_int = 65; +pub const KERN_UNKNOWN_NMI_PANIC: c_int = 66; +pub const KERN_BOOTLOADER_TYPE: c_int = 67; +pub const KERN_RANDOMIZE: c_int = 68; +pub const KERN_SETUID_DUMPABLE: c_int = 69; +pub const KERN_SPIN_RETRY: c_int = 70; +pub const KERN_ACPI_VIDEO_FLAGS: c_int = 71; +pub const KERN_IA64_UNALIGNED: c_int = 72; +pub const KERN_COMPAT_LOG: c_int = 73; +pub const KERN_MAX_LOCK_DEPTH: c_int = 74; + +pub const VM_OVERCOMMIT_MEMORY: c_int = 5; +pub const VM_PAGE_CLUSTER: c_int = 10; +pub const VM_DIRTY_BACKGROUND: c_int = 11; +pub const VM_DIRTY_RATIO: c_int = 12; +pub const VM_DIRTY_WB_CS: c_int = 13; +pub const VM_DIRTY_EXPIRE_CS: c_int = 14; +pub const VM_NR_PDFLUSH_THREADS: c_int = 15; +pub const VM_OVERCOMMIT_RATIO: c_int = 16; +pub const VM_PAGEBUF: c_int = 17; +pub const VM_HUGETLB_PAGES: c_int = 18; +pub const VM_SWAPPINESS: c_int = 19; +pub const VM_LOWMEM_RESERVE_RATIO: c_int = 20; +pub const VM_MIN_FREE_KBYTES: c_int = 21; +pub const VM_MAX_MAP_COUNT: c_int = 22; +pub const VM_LAPTOP_MODE: c_int = 23; +pub const VM_BLOCK_DUMP: c_int = 24; +pub const VM_HUGETLB_GROUP: c_int = 25; +pub const VM_VFS_CACHE_PRESSURE: c_int = 26; +pub const VM_LEGACY_VA_LAYOUT: c_int = 27; +pub const VM_SWAP_TOKEN_TIMEOUT: c_int = 28; +pub const VM_DROP_PAGECACHE: c_int = 29; +pub const VM_PERCPU_PAGELIST_FRACTION: c_int = 30; +pub const VM_ZONE_RECLAIM_MODE: c_int = 31; +pub const VM_MIN_UNMAPPED: c_int = 32; +pub const VM_PANIC_ON_OOM: c_int = 33; +pub const VM_VDSO_ENABLED: c_int = 34; + +pub const NET_CORE: c_int = 1; +pub const NET_ETHER: c_int = 2; +pub const NET_802: c_int = 3; +pub const NET_UNIX: c_int = 4; +pub const NET_IPV4: c_int = 5; +pub const NET_IPX: c_int = 6; +pub const NET_ATALK: c_int = 7; +pub const NET_NETROM: c_int = 8; +pub const NET_AX25: c_int = 9; +pub const NET_BRIDGE: c_int = 10; +pub const NET_ROSE: c_int = 11; +pub const NET_IPV6: c_int = 12; +pub const NET_X25: c_int = 13; +pub const NET_TR: c_int = 14; +pub const NET_DECNET: c_int = 15; +pub const NET_ECONET: c_int = 16; +pub const NET_SCTP: c_int = 17; +pub const NET_LLC: c_int = 18; +pub const NET_NETFILTER: c_int = 19; +pub const NET_DCCP: c_int = 20; +pub const HUGETLB_FLAG_ENCODE_SHIFT: c_int = 26; +pub const MAP_HUGE_SHIFT: c_int = HUGETLB_FLAG_ENCODE_SHIFT; + +// include/linux/sched.h +pub const PF_VCPU: c_int = 0x00000001; +pub const PF_IDLE: c_int = 0x00000002; +pub const PF_EXITING: c_int = 0x00000004; +pub const PF_POSTCOREDUMP: c_int = 0x00000008; +pub const PF_IO_WORKER: c_int = 0x00000010; +pub const PF_WQ_WORKER: c_int = 0x00000020; +pub const PF_FORKNOEXEC: c_int = 0x00000040; +pub const PF_MCE_PROCESS: c_int = 0x00000080; +pub const PF_SUPERPRIV: c_int = 0x00000100; +pub const PF_DUMPCORE: c_int = 0x00000200; +pub const PF_SIGNALED: c_int = 0x00000400; +pub const PF_MEMALLOC: c_int = 0x00000800; +pub const PF_NPROC_EXCEEDED: c_int = 0x00001000; +pub const PF_USED_MATH: c_int = 0x00002000; +pub const PF_USER_WORKER: c_int = 0x00004000; +pub const PF_NOFREEZE: c_int = 0x00008000; + +pub const PF_KSWAPD: c_int = 0x00020000; +pub const PF_MEMALLOC_NOFS: c_int = 0x00040000; +pub const PF_MEMALLOC_NOIO: c_int = 0x00080000; +pub const PF_LOCAL_THROTTLE: c_int = 0x00100000; +pub const PF_KTHREAD: c_int = 0x00200000; +pub const PF_RANDOMIZE: c_int = 0x00400000; + +pub const PF_NO_SETAFFINITY: c_int = 0x04000000; +pub const PF_MCE_EARLY: c_int = 0x08000000; +pub const PF_MEMALLOC_PIN: c_int = 0x10000000; + +pub const PF_SUSPEND_TASK: c_int = 0x80000000; + +pub const KLOG_CLOSE: c_int = 0; +pub const KLOG_OPEN: c_int = 1; +pub const KLOG_READ: c_int = 2; +pub const KLOG_READ_ALL: c_int = 3; +pub const KLOG_READ_CLEAR: c_int = 4; +pub const KLOG_CLEAR: c_int = 5; +pub const KLOG_CONSOLE_OFF: c_int = 6; +pub const KLOG_CONSOLE_ON: c_int = 7; +pub const KLOG_CONSOLE_LEVEL: c_int = 8; +pub const KLOG_SIZE_UNREAD: c_int = 9; +pub const KLOG_SIZE_BUFFER: c_int = 10; + +// From NDK's linux/auxvec.h +pub const AT_NULL: c_ulong = 0; +pub const AT_IGNORE: c_ulong = 1; +pub const AT_EXECFD: c_ulong = 2; +pub const AT_PHDR: c_ulong = 3; +pub const AT_PHENT: c_ulong = 4; +pub const AT_PHNUM: c_ulong = 5; +pub const AT_PAGESZ: c_ulong = 6; +pub const AT_BASE: c_ulong = 7; +pub const AT_FLAGS: c_ulong = 8; +pub const AT_ENTRY: c_ulong = 9; +pub const AT_NOTELF: c_ulong = 10; +pub const AT_UID: c_ulong = 11; +pub const AT_EUID: c_ulong = 12; +pub const AT_GID: c_ulong = 13; +pub const AT_EGID: c_ulong = 14; +pub const AT_PLATFORM: c_ulong = 15; +pub const AT_HWCAP: c_ulong = 16; +pub const AT_CLKTCK: c_ulong = 17; +pub const AT_SECURE: c_ulong = 23; +pub const AT_BASE_PLATFORM: c_ulong = 24; +pub const AT_RANDOM: c_ulong = 25; +pub const AT_HWCAP2: c_ulong = 26; +pub const AT_RSEQ_FEATURE_SIZE: c_ulong = 27; +pub const AT_RSEQ_ALIGN: c_ulong = 28; +pub const AT_EXECFN: c_ulong = 31; +pub const AT_MINSIGSTKSZ: c_ulong = 51; + +// siginfo.h +pub const SI_DETHREAD: c_int = -7; +pub const TRAP_PERF: c_int = 6; + +// Most `*_SUPER_MAGIC` constants are defined at the `linux_like` level; the +// following are only available on newer Linux versions than the versions +// currently used in CI in some configurations, so we define them here. +cfg_if! { + if #[cfg(not(target_arch = "s390x"))] { + pub const XFS_SUPER_MAGIC: c_long = 0x58465342; + } else if #[cfg(target_arch = "s390x")] { + pub const XFS_SUPER_MAGIC: c_uint = 0x58465342; + } +} + +f! { + pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + let next = (cmsg as usize + super::CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr; + let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; + if (next.offset(1)) as usize > max { + core::ptr::null_mut::() + } else { + next as *mut cmsghdr + } + } + + pub fn CPU_ALLOC_SIZE(count: c_int) -> size_t { + let _dummy: cpu_set_t = mem::zeroed(); + let size_in_bits = 8 * size_of_val(&_dummy.__bits[0]); + ((count as size_t + size_in_bits - 1) / 8) as size_t + } + + pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { + for slot in cpuset.__bits.iter_mut() { + *slot = 0; + } + } + + pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let size_in_bits = 8 * size_of_val(&cpuset.__bits[0]); // 32, 64 etc + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + cpuset.__bits[idx] |= 1 << offset; + () + } + + pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let size_in_bits = 8 * size_of_val(&cpuset.__bits[0]); // 32, 64 etc + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + cpuset.__bits[idx] &= !(1 << offset); + () + } + + pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { + let size_in_bits = 8 * size_of_val(&cpuset.__bits[0]); + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + 0 != (cpuset.__bits[idx] & (1 << offset)) + } + + pub fn CPU_COUNT_S(size: usize, cpuset: &cpu_set_t) -> c_int { + let mut s: u32 = 0; + let size_of_mask = size_of_val(&cpuset.__bits[0]); + for i in cpuset.__bits[..(size / size_of_mask)].iter() { + s += i.count_ones(); + } + s as c_int + } + + pub fn CPU_COUNT(cpuset: &cpu_set_t) -> c_int { + CPU_COUNT_S(size_of::(), cpuset) + } + + pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { + set1.__bits == set2.__bits + } + + pub fn NLA_ALIGN(len: c_int) -> c_int { + return ((len) + NLA_ALIGNTO - 1) & !(NLA_ALIGNTO - 1); + } + + pub fn SO_EE_OFFENDER(ee: *const crate::sock_extended_err) -> *mut crate::sockaddr { + ee.offset(1) as *mut crate::sockaddr + } +} + +safe_f! { + pub const fn makedev(ma: c_uint, mi: c_uint) -> crate::dev_t { + let ma = ma as crate::dev_t; + let mi = mi as crate::dev_t; + ((ma & 0xfff) << 8) | (mi & 0xff) | ((mi & 0xfff00) << 12) + } + + pub const fn major(dev: crate::dev_t) -> c_int { + ((dev >> 8) & 0xfff) as c_int + } + + pub const fn minor(dev: crate::dev_t) -> c_int { + ((dev & 0xff) | ((dev >> 12) & 0xfff00)) as c_int + } +} + +extern "C" { + pub fn setgrent(); + pub fn endgrent(); + pub fn getgrent() -> *mut crate::group; + pub fn getrlimit64(resource: c_int, rlim: *mut rlimit64) -> c_int; + pub fn setrlimit64(resource: c_int, rlim: *const rlimit64) -> c_int; + pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; + pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; + pub fn prlimit( + pid: crate::pid_t, + resource: c_int, + new_limit: *const crate::rlimit, + old_limit: *mut crate::rlimit, + ) -> c_int; + pub fn prlimit64( + pid: crate::pid_t, + resource: c_int, + new_limit: *const crate::rlimit64, + old_limit: *mut crate::rlimit64, + ) -> c_int; + pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut crate::timezone) -> c_int; + pub fn mlock2(addr: *const c_void, len: size_t, flags: c_int) -> c_int; + pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn getnameinfo( + sa: *const crate::sockaddr, + salen: crate::socklen_t, + host: *mut c_char, + hostlen: size_t, + serv: *mut c_char, + servlen: size_t, + flags: c_int, + ) -> c_int; + pub fn preadv(fd: c_int, iov: *const crate::iovec, count: c_int, offset: off_t) -> ssize_t; + pub fn pwritev(fd: c_int, iov: *const crate::iovec, count: c_int, offset: off_t) -> ssize_t; + pub fn process_vm_readv( + pid: crate::pid_t, + local_iov: *const crate::iovec, + local_iov_count: c_ulong, + remote_iov: *const crate::iovec, + remote_iov_count: c_ulong, + flags: c_ulong, + ) -> ssize_t; + pub fn process_vm_writev( + pid: crate::pid_t, + local_iov: *const crate::iovec, + local_iov_count: c_ulong, + remote_iov: *const crate::iovec, + remote_iov_count: c_ulong, + flags: c_ulong, + ) -> ssize_t; + pub fn ptrace(request: c_int, ...) -> c_long; + pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; + pub fn setpriority(which: c_int, who: crate::id_t, prio: c_int) -> c_int; + pub fn __sched_cpualloc(count: size_t) -> *mut crate::cpu_set_t; + pub fn __sched_cpufree(set: *mut crate::cpu_set_t); + pub fn __sched_cpucount(setsize: size_t, set: *const cpu_set_t) -> c_int; + pub fn sched_getcpu() -> c_int; + pub fn mallinfo() -> crate::mallinfo; + // available from API 23 + pub fn malloc_info(options: c_int, stream: *mut crate::FILE) -> c_int; + + pub fn malloc_usable_size(ptr: *const c_void) -> size_t; + + pub fn utmpname(name: *const c_char) -> c_int; + pub fn setutent(); + pub fn getutent() -> *mut utmp; + + pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); + pub fn telldir(dirp: *mut crate::DIR) -> c_long; + pub fn fallocate(fd: c_int, mode: c_int, offset: off_t, len: off_t) -> c_int; + pub fn fallocate64(fd: c_int, mode: c_int, offset: off64_t, len: off64_t) -> c_int; + pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; + pub fn posix_fallocate64(fd: c_int, offset: off64_t, len: off64_t) -> c_int; + pub fn getxattr( + path: *const c_char, + name: *const c_char, + value: *mut c_void, + size: size_t, + ) -> ssize_t; + pub fn lgetxattr( + path: *const c_char, + name: *const c_char, + value: *mut c_void, + size: size_t, + ) -> ssize_t; + pub fn fgetxattr( + filedes: c_int, + name: *const c_char, + value: *mut c_void, + size: size_t, + ) -> ssize_t; + pub fn setxattr( + path: *const c_char, + name: *const c_char, + value: *const c_void, + size: size_t, + flags: c_int, + ) -> c_int; + pub fn lsetxattr( + path: *const c_char, + name: *const c_char, + value: *const c_void, + size: size_t, + flags: c_int, + ) -> c_int; + pub fn fsetxattr( + filedes: c_int, + name: *const c_char, + value: *const c_void, + size: size_t, + flags: c_int, + ) -> c_int; + pub fn listxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; + pub fn llistxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; + pub fn flistxattr(filedes: c_int, list: *mut c_char, size: size_t) -> ssize_t; + pub fn removexattr(path: *const c_char, name: *const c_char) -> c_int; + pub fn lremovexattr(path: *const c_char, name: *const c_char) -> c_int; + pub fn fremovexattr(filedes: c_int, name: *const c_char) -> c_int; + pub fn signalfd(fd: c_int, mask: *const crate::sigset_t, flags: c_int) -> c_int; + pub fn timerfd_create(clock: crate::clockid_t, flags: c_int) -> c_int; + pub fn timerfd_gettime(fd: c_int, current_value: *mut itimerspec) -> c_int; + pub fn timerfd_settime( + fd: c_int, + flags: c_int, + new_value: *const itimerspec, + old_value: *mut itimerspec, + ) -> c_int; + pub fn syscall(num: c_long, ...) -> c_long; + pub fn sched_getaffinity( + pid: crate::pid_t, + cpusetsize: size_t, + cpuset: *mut cpu_set_t, + ) -> c_int; + pub fn sched_setaffinity( + pid: crate::pid_t, + cpusetsize: size_t, + cpuset: *const cpu_set_t, + ) -> c_int; + pub fn epoll_create(size: c_int) -> c_int; + pub fn epoll_create1(flags: c_int) -> c_int; + pub fn epoll_wait( + epfd: c_int, + events: *mut crate::epoll_event, + maxevents: c_int, + timeout: c_int, + ) -> c_int; + pub fn epoll_ctl(epfd: c_int, op: c_int, fd: c_int, event: *mut crate::epoll_event) -> c_int; + pub fn pthread_getschedparam( + native: crate::pthread_t, + policy: *mut c_int, + param: *mut crate::sched_param, + ) -> c_int; + pub fn unshare(flags: c_int) -> c_int; + pub fn umount(target: *const c_char) -> c_int; + pub fn sched_get_priority_max(policy: c_int) -> c_int; + pub fn tee(fd_in: c_int, fd_out: c_int, len: size_t, flags: c_uint) -> ssize_t; + pub fn settimeofday(tv: *const crate::timeval, tz: *const crate::timezone) -> c_int; + pub fn splice( + fd_in: c_int, + off_in: *mut crate::loff_t, + fd_out: c_int, + off_out: *mut crate::loff_t, + len: size_t, + flags: c_uint, + ) -> ssize_t; + pub fn eventfd(init: c_uint, flags: c_int) -> c_int; + pub fn eventfd_read(fd: c_int, value: *mut eventfd_t) -> c_int; + pub fn eventfd_write(fd: c_int, value: eventfd_t) -> c_int; + pub fn sched_rr_get_interval(pid: crate::pid_t, tp: *mut crate::timespec) -> c_int; + pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; + pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; + pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; + pub fn setns(fd: c_int, nstype: c_int) -> c_int; + pub fn swapoff(puath: *const c_char) -> c_int; + pub fn vmsplice(fd: c_int, iov: *const crate::iovec, nr_segs: size_t, flags: c_uint) + -> ssize_t; + pub fn mount( + src: *const c_char, + target: *const c_char, + fstype: *const c_char, + flags: c_ulong, + data: *const c_void, + ) -> c_int; + pub fn personality(persona: c_uint) -> c_int; + pub fn prctl(option: c_int, ...) -> c_int; + pub fn sched_getparam(pid: crate::pid_t, param: *mut crate::sched_param) -> c_int; + pub fn ppoll( + fds: *mut crate::pollfd, + nfds: nfds_t, + timeout: *const crate::timespec, + sigmask: *const sigset_t, + ) -> c_int; + pub fn pthread_mutex_timedlock( + lock: *mut pthread_mutex_t, + abstime: *const crate::timespec, + ) -> c_int; + pub fn pthread_barrierattr_init(attr: *mut crate::pthread_barrierattr_t) -> c_int; + pub fn pthread_barrierattr_destroy(attr: *mut crate::pthread_barrierattr_t) -> c_int; + pub fn pthread_barrierattr_getpshared( + attr: *const crate::pthread_barrierattr_t, + shared: *mut c_int, + ) -> c_int; + pub fn pthread_barrierattr_setpshared( + attr: *mut crate::pthread_barrierattr_t, + shared: c_int, + ) -> c_int; + pub fn pthread_barrier_init( + barrier: *mut pthread_barrier_t, + attr: *const crate::pthread_barrierattr_t, + count: c_uint, + ) -> c_int; + pub fn pthread_barrier_destroy(barrier: *mut pthread_barrier_t) -> c_int; + pub fn pthread_barrier_wait(barrier: *mut pthread_barrier_t) -> c_int; + pub fn pthread_spin_init(lock: *mut crate::pthread_spinlock_t, pshared: c_int) -> c_int; + pub fn pthread_spin_destroy(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_spin_lock(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_spin_trylock(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_spin_unlock(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn clone( + cb: extern "C" fn(*mut c_void) -> c_int, + child_stack: *mut c_void, + flags: c_int, + arg: *mut c_void, + ... + ) -> c_int; + pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; + pub fn clock_nanosleep( + clk_id: crate::clockid_t, + flags: c_int, + rqtp: *const crate::timespec, + rmtp: *mut crate::timespec, + ) -> c_int; + pub fn pthread_attr_getguardsize( + attr: *const crate::pthread_attr_t, + guardsize: *mut size_t, + ) -> c_int; + pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; + pub fn pthread_attr_getinheritsched( + attr: *const crate::pthread_attr_t, + flag: *mut c_int, + ) -> c_int; + pub fn pthread_attr_setinheritsched(attr: *mut crate::pthread_attr_t, flag: c_int) -> c_int; + pub fn sethostname(name: *const c_char, len: size_t) -> c_int; + pub fn sched_get_priority_min(policy: c_int) -> c_int; + pub fn pthread_condattr_getpshared( + attr: *const pthread_condattr_t, + pshared: *mut c_int, + ) -> c_int; + pub fn sysinfo(info: *mut crate::sysinfo) -> c_int; + pub fn umount2(target: *const c_char, flags: c_int) -> c_int; + pub fn pthread_setschedparam( + native: crate::pthread_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + pub fn swapon(path: *const c_char, swapflags: c_int) -> c_int; + pub fn sched_setscheduler( + pid: crate::pid_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + pub fn sendfile(out_fd: c_int, in_fd: c_int, offset: *mut off_t, count: size_t) -> ssize_t; + pub fn sendfile64(out_fd: c_int, in_fd: c_int, offset: *mut off64_t, count: size_t) -> ssize_t; + pub fn setfsgid(gid: crate::gid_t) -> c_int; + pub fn setfsuid(uid: crate::uid_t) -> c_int; + pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; + pub fn getgrgid_r( + gid: crate::gid_t, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; + pub fn sem_close(sem: *mut sem_t) -> c_int; + pub fn getgrnam_r( + name: *const c_char, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; + pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; + pub fn getgrnam(name: *const c_char) -> *mut crate::group; + pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; + pub fn sem_unlink(name: *const c_char) -> c_int; + pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; + pub fn getpwnam_r( + name: *const c_char, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + pub fn getpwuid_r( + uid: crate::uid_t, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + pub fn sigtimedwait( + set: *const sigset_t, + info: *mut siginfo_t, + timeout: *const crate::timespec, + ) -> c_int; + pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; + pub fn pthread_atfork( + prepare: Option, + parent: Option, + child: Option, + ) -> c_int; + pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; + pub fn getgrouplist( + user: *const c_char, + group: crate::gid_t, + groups: *mut crate::gid_t, + ngroups: *mut c_int, + ) -> c_int; + pub fn initgroups(user: *const c_char, group: crate::gid_t) -> c_int; + pub fn pthread_mutexattr_getpshared( + attr: *const pthread_mutexattr_t, + pshared: *mut c_int, + ) -> c_int; + pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; + pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; + pub fn pthread_create( + native: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + f: extern "C" fn(*mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + pub fn __errno() -> *mut c_int; + pub fn inotify_rm_watch(fd: c_int, wd: u32) -> c_int; + pub fn inotify_init() -> c_int; + pub fn inotify_init1(flags: c_int) -> c_int; + pub fn inotify_add_watch(fd: c_int, path: *const c_char, mask: u32) -> c_int; + + pub fn regcomp(preg: *mut crate::regex_t, pattern: *const c_char, cflags: c_int) -> c_int; + + pub fn regexec( + preg: *const crate::regex_t, + input: *const c_char, + nmatch: size_t, + pmatch: *mut regmatch_t, + eflags: c_int, + ) -> c_int; + + pub fn regerror( + errcode: c_int, + preg: *const crate::regex_t, + errbuf: *mut c_char, + errbuf_size: size_t, + ) -> size_t; + + pub fn regfree(preg: *mut crate::regex_t); + + pub fn android_set_abort_message(msg: *const c_char); + + pub fn gettid() -> crate::pid_t; + + pub fn getauxval(type_: c_ulong) -> c_ulong; + + /// Only available in API Version 28+ + pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; + pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; + + pub fn pthread_setname_np(thread: crate::pthread_t, name: *const c_char) -> c_int; + + pub fn __system_property_set(__name: *const c_char, __value: *const c_char) -> c_int; + pub fn __system_property_get(__name: *const c_char, __value: *mut c_char) -> c_int; + pub fn __system_property_find(__name: *const c_char) -> *const prop_info; + pub fn __system_property_find_nth(__n: c_uint) -> *const prop_info; + pub fn __system_property_foreach( + __callback: unsafe extern "C" fn(__pi: *const prop_info, __cookie: *mut c_void), + __cookie: *mut c_void, + ) -> c_int; + + // #include + /// Only available in API Version 21+ + pub fn dl_iterate_phdr( + callback: Option< + unsafe extern "C" fn(info: *mut dl_phdr_info, size: usize, data: *mut c_void) -> c_int, + >, + data: *mut c_void, + ) -> c_int; + + pub fn arc4random() -> u32; + pub fn arc4random_uniform(__upper_bound: u32) -> u32; + pub fn arc4random_buf(__buf: *mut c_void, __n: size_t); + + pub fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void; + + pub fn pthread_getcpuclockid(thread: crate::pthread_t, clk_id: *mut crate::clockid_t) -> c_int; + + pub fn dirname(path: *const c_char) -> *mut c_char; + pub fn basename(path: *const c_char) -> *mut c_char; + pub fn getopt_long( + argc: c_int, + argv: *const *mut c_char, + optstring: *const c_char, + longopts: *const option, + longindex: *mut c_int, + ) -> c_int; + + pub fn sync(); + pub fn syncfs(fd: c_int) -> c_int; + + pub fn memmem( + haystack: *const c_void, + haystacklen: size_t, + needle: *const c_void, + needlelen: size_t, + ) -> *mut c_void; + pub fn fread_unlocked( + buf: *mut c_void, + size: size_t, + nobj: size_t, + stream: *mut crate::FILE, + ) -> size_t; + pub fn fwrite_unlocked( + buf: *const c_void, + size: size_t, + nobj: size_t, + stream: *mut crate::FILE, + ) -> size_t; + pub fn fflush_unlocked(stream: *mut crate::FILE) -> c_int; + pub fn fgets_unlocked(buf: *mut c_char, size: c_int, stream: *mut crate::FILE) -> *mut c_char; + + pub fn klogctl(syslog_type: c_int, bufp: *mut c_char, len: c_int) -> c_int; + + pub fn memfd_create(name: *const c_char, flags: c_uint) -> c_int; + pub fn renameat2( + olddirfd: c_int, + oldpath: *const c_char, + newdirfd: c_int, + newpath: *const c_char, + flags: c_uint, + ) -> c_int; + + pub fn if_nameindex() -> *mut if_nameindex; + pub fn if_freenameindex(ptr: *mut if_nameindex); +} + +cfg_if! { + if #[cfg(target_pointer_width = "32")] { + mod b32; + pub use self::b32::*; + } else if #[cfg(target_pointer_width = "64")] { + mod b64; + pub use self::b64::*; + } else { + // Unknown target_pointer_width + } +} + +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut c_void { + #[repr(C)] + struct siginfo_sigfault { + _si_signo: c_int, + _si_errno: c_int, + _si_code: c_int, + si_addr: *mut c_void, + } + (*(self as *const siginfo_t as *const siginfo_sigfault)).si_addr + } + + pub unsafe fn si_value(&self) -> crate::sigval { + #[repr(C)] + struct siginfo_timer { + _si_signo: c_int, + _si_errno: c_int, + _si_code: c_int, + _si_tid: c_int, + _si_overrun: c_int, + si_sigval: crate::sigval, + } + (*(self as *const siginfo_t as *const siginfo_timer)).si_sigval + } +} + +// Internal, for casts to access union fields +#[repr(C)] +struct sifields_sigchld { + si_pid: crate::pid_t, + si_uid: crate::uid_t, + si_status: c_int, + si_utime: c_long, + si_stime: c_long, +} +impl Copy for sifields_sigchld {} +impl Clone for sifields_sigchld { + fn clone(&self) -> sifields_sigchld { + *self + } +} + +// Internal, for casts to access union fields +#[repr(C)] +union sifields { + _align_pointer: *mut c_void, + sigchld: sifields_sigchld, +} + +// Internal, for casts to access union fields. Note that some variants +// of sifields start with a pointer, which makes the alignment of +// sifields vary on 32-bit and 64-bit architectures. +#[repr(C)] +struct siginfo_f { + _siginfo_base: [c_int; 3], + sifields: sifields, +} + +impl siginfo_t { + unsafe fn sifields(&self) -> &sifields { + &(*(self as *const siginfo_t as *const siginfo_f)).sifields + } + + pub unsafe fn si_pid(&self) -> crate::pid_t { + self.sifields().sigchld.si_pid + } + + pub unsafe fn si_uid(&self) -> crate::uid_t { + self.sifields().sigchld.si_uid + } + + pub unsafe fn si_status(&self) -> c_int { + self.sifields().sigchld.si_status + } + + pub unsafe fn si_utime(&self) -> c_long { + self.sifields().sigchld.si_utime + } + + pub unsafe fn si_stime(&self) -> c_long { + self.sifields().sigchld.si_stime + } +} diff --git a/vendor/libc/src/unix/linux_like/emscripten/lfs64.rs b/vendor/libc/src/unix/linux_like/emscripten/lfs64.rs new file mode 100644 index 00000000000000..06be875446bb6a --- /dev/null +++ b/vendor/libc/src/unix/linux_like/emscripten/lfs64.rs @@ -0,0 +1,211 @@ +use crate::off64_t; +use crate::prelude::*; + +// In-sync with ../linux/musl/lfs64.rs except for fallocate64, prlimit64 and sendfile64 + +#[inline] +pub unsafe extern "C" fn creat64(path: *const c_char, mode: crate::mode_t) -> c_int { + crate::creat(path, mode) +} + +#[inline] +pub unsafe extern "C" fn fgetpos64(stream: *mut crate::FILE, pos: *mut crate::fpos64_t) -> c_int { + crate::fgetpos(stream, pos as *mut _) +} + +#[inline] +pub unsafe extern "C" fn fopen64(pathname: *const c_char, mode: *const c_char) -> *mut crate::FILE { + crate::fopen(pathname, mode) +} + +#[inline] +pub unsafe extern "C" fn freopen64( + pathname: *const c_char, + mode: *const c_char, + stream: *mut crate::FILE, +) -> *mut crate::FILE { + crate::freopen(pathname, mode, stream) +} + +#[inline] +pub unsafe extern "C" fn fseeko64( + stream: *mut crate::FILE, + offset: off64_t, + whence: c_int, +) -> c_int { + crate::fseeko(stream, offset, whence) +} + +#[inline] +pub unsafe extern "C" fn fsetpos64(stream: *mut crate::FILE, pos: *const crate::fpos64_t) -> c_int { + crate::fsetpos(stream, pos as *mut _) +} + +#[inline] +pub unsafe extern "C" fn fstat64(fildes: c_int, buf: *mut crate::stat64) -> c_int { + crate::fstat(fildes, buf as *mut _) +} + +#[inline] +pub unsafe extern "C" fn fstatat64( + fd: c_int, + path: *const c_char, + buf: *mut crate::stat64, + flag: c_int, +) -> c_int { + crate::fstatat(fd, path, buf as *mut _, flag) +} + +#[inline] +pub unsafe extern "C" fn fstatfs64(fd: c_int, buf: *mut crate::statfs64) -> c_int { + crate::fstatfs(fd, buf as *mut _) +} + +#[inline] +pub unsafe extern "C" fn fstatvfs64(fd: c_int, buf: *mut crate::statvfs64) -> c_int { + crate::fstatvfs(fd, buf as *mut _) +} + +#[inline] +pub unsafe extern "C" fn ftello64(stream: *mut crate::FILE) -> off64_t { + crate::ftello(stream) +} + +#[inline] +pub unsafe extern "C" fn ftruncate64(fd: c_int, length: off64_t) -> c_int { + crate::ftruncate(fd, length) +} + +#[inline] +pub unsafe extern "C" fn getrlimit64(resource: c_int, rlim: *mut crate::rlimit64) -> c_int { + crate::getrlimit(resource, rlim as *mut _) +} + +#[inline] +pub unsafe extern "C" fn lseek64(fd: c_int, offset: off64_t, whence: c_int) -> off64_t { + crate::lseek(fd, offset, whence) +} + +#[inline] +pub unsafe extern "C" fn lstat64(path: *const c_char, buf: *mut crate::stat64) -> c_int { + crate::lstat(path, buf as *mut _) +} + +#[inline] +pub unsafe extern "C" fn mmap64( + addr: *mut c_void, + length: size_t, + prot: c_int, + flags: c_int, + fd: c_int, + offset: off64_t, +) -> *mut c_void { + crate::mmap(addr, length, prot, flags, fd, offset) +} + +// These functions are variadic in the C ABI since the `mode` argument is "optional". Variadic +// `extern "C"` functions are unstable in Rust so we cannot write a shim function for these +// entrypoints. See https://github.com/rust-lang/rust/issues/44930. +// +// These aliases are mostly fine though, neither function takes a LFS64-namespaced type as an +// argument, nor do their names clash with any declared types. +pub use crate::{open as open64, openat as openat64}; + +#[inline] +pub unsafe extern "C" fn posix_fadvise64( + fd: c_int, + offset: off64_t, + len: off64_t, + advice: c_int, +) -> c_int { + crate::posix_fadvise(fd, offset, len, advice) +} + +#[inline] +pub unsafe extern "C" fn posix_fallocate64(fd: c_int, offset: off64_t, len: off64_t) -> c_int { + crate::posix_fallocate(fd, offset, len) +} + +#[inline] +pub unsafe extern "C" fn pread64( + fd: c_int, + buf: *mut c_void, + count: size_t, + offset: off64_t, +) -> ssize_t { + crate::pread(fd, buf, count, offset) +} + +#[inline] +pub unsafe extern "C" fn preadv64( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: off64_t, +) -> ssize_t { + crate::preadv(fd, iov, iovcnt, offset) +} + +#[inline] +pub unsafe extern "C" fn pwrite64( + fd: c_int, + buf: *const c_void, + count: size_t, + offset: off64_t, +) -> ssize_t { + crate::pwrite(fd, buf, count, offset) +} + +#[inline] +pub unsafe extern "C" fn pwritev64( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: off64_t, +) -> ssize_t { + crate::pwritev(fd, iov, iovcnt, offset) +} + +#[inline] +pub unsafe extern "C" fn readdir64(dirp: *mut crate::DIR) -> *mut crate::dirent64 { + crate::readdir(dirp) as *mut _ +} + +#[inline] +pub unsafe extern "C" fn readdir64_r( + dirp: *mut crate::DIR, + entry: *mut crate::dirent64, + result: *mut *mut crate::dirent64, +) -> c_int { + crate::readdir_r(dirp, entry as *mut _, result as *mut _) +} + +#[inline] +pub unsafe extern "C" fn setrlimit64(resource: c_int, rlim: *const crate::rlimit64) -> c_int { + crate::setrlimit(resource, rlim as *mut _) +} + +#[inline] +pub unsafe extern "C" fn stat64(pathname: *const c_char, statbuf: *mut crate::stat64) -> c_int { + crate::stat(pathname, statbuf as *mut _) +} + +#[inline] +pub unsafe extern "C" fn statfs64(pathname: *const c_char, buf: *mut crate::statfs64) -> c_int { + crate::statfs(pathname, buf as *mut _) +} + +#[inline] +pub unsafe extern "C" fn statvfs64(path: *const c_char, buf: *mut crate::statvfs64) -> c_int { + crate::statvfs(path, buf as *mut _) +} + +#[inline] +pub unsafe extern "C" fn tmpfile64() -> *mut crate::FILE { + crate::tmpfile() +} + +#[inline] +pub unsafe extern "C" fn truncate64(path: *const c_char, length: off64_t) -> c_int { + crate::truncate(path, length) +} diff --git a/vendor/libc/src/unix/linux_like/emscripten/mod.rs b/vendor/libc/src/unix/linux_like/emscripten/mod.rs new file mode 100644 index 00000000000000..417e3e593bc5eb --- /dev/null +++ b/vendor/libc/src/unix/linux_like/emscripten/mod.rs @@ -0,0 +1,1589 @@ +use crate::prelude::*; + +pub type wchar_t = i32; +pub type useconds_t = u32; +pub type dev_t = u32; +pub type socklen_t = u32; +pub type pthread_t = c_ulong; +pub type mode_t = u32; +pub type shmatt_t = c_ulong; +pub type mqd_t = c_int; +pub type msgqnum_t = c_ulong; +pub type msglen_t = c_ulong; +pub type nfds_t = c_ulong; +pub type nl_item = c_int; +pub type idtype_t = c_uint; +pub type loff_t = i64; +pub type pthread_key_t = c_uint; + +pub type clock_t = c_long; +pub type time_t = i64; +pub type suseconds_t = c_long; +pub type ino_t = u64; +pub type off_t = i64; +pub type blkcnt_t = i32; + +pub type blksize_t = c_long; +pub type fsblkcnt_t = u32; +pub type fsfilcnt_t = u32; +pub type rlim_t = u64; +pub type nlink_t = u32; + +pub type ino64_t = crate::ino_t; +pub type off64_t = off_t; +pub type blkcnt64_t = crate::blkcnt_t; +pub type rlim64_t = crate::rlim_t; + +pub type rlimit64 = crate::rlimit; +pub type flock64 = crate::flock; +pub type stat64 = crate::stat; +pub type statfs64 = crate::statfs; +pub type statvfs64 = crate::statvfs; +pub type dirent64 = crate::dirent; + +#[derive(Debug)] +pub enum fpos64_t {} // FIXME(emscripten): fill this out with a struct +impl Copy for fpos64_t {} +impl Clone for fpos64_t { + fn clone(&self) -> fpos64_t { + *self + } +} + +s! { + pub struct glob_t { + pub gl_pathc: size_t, + pub gl_pathv: *mut *mut c_char, + pub gl_offs: size_t, + pub gl_flags: c_int, + + __unused1: *mut c_void, + __unused2: *mut c_void, + __unused3: *mut c_void, + __unused4: *mut c_void, + __unused5: *mut c_void, + } + + pub struct passwd { + pub pw_name: *mut c_char, + pub pw_passwd: *mut c_char, + pub pw_uid: crate::uid_t, + pub pw_gid: crate::gid_t, + pub pw_gecos: *mut c_char, + pub pw_dir: *mut c_char, + pub pw_shell: *mut c_char, + } + + pub struct spwd { + pub sp_namp: *mut c_char, + pub sp_pwdp: *mut c_char, + pub sp_lstchg: c_long, + pub sp_min: c_long, + pub sp_max: c_long, + pub sp_warn: c_long, + pub sp_inact: c_long, + pub sp_expire: c_long, + pub sp_flag: c_ulong, + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + __f_unused: c_int, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct signalfd_siginfo { + pub ssi_signo: u32, + pub ssi_errno: i32, + pub ssi_code: i32, + pub ssi_pid: u32, + pub ssi_uid: u32, + pub ssi_fd: i32, + pub ssi_tid: u32, + pub ssi_band: u32, + pub ssi_overrun: u32, + pub ssi_trapno: u32, + pub ssi_status: i32, + pub ssi_int: i32, + pub ssi_ptr: u64, + pub ssi_utime: u64, + pub ssi_stime: u64, + pub ssi_addr: u64, + pub ssi_addr_lsb: u16, + _pad2: u16, + pub ssi_syscall: i32, + pub ssi_call_addr: u64, + pub ssi_arch: u32, + _pad: [u8; 28], + } + + pub struct fsid_t { + __val: [c_int; 2], + } + + pub struct cpu_set_t { + bits: [u32; 32], + } + + pub struct if_nameindex { + pub if_index: c_uint, + pub if_name: *mut c_char, + } + + // System V IPC + pub struct msginfo { + pub msgpool: c_int, + pub msgmap: c_int, + pub msgmax: c_int, + pub msgmnb: c_int, + pub msgmni: c_int, + pub msgssz: c_int, + pub msgtql: c_int, + pub msgseg: c_ushort, + } + + pub struct sembuf { + pub sem_num: c_ushort, + pub sem_op: c_short, + pub sem_flg: c_short, + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct ipc_perm { + pub __ipc_perm_key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: mode_t, + pub __seq: c_int, + __unused1: c_long, + __unused2: c_long, + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; crate::NCCS], + pub __c_ispeed: crate::speed_t, + pub __c_ospeed: crate::speed_t, + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct pthread_attr_t { + __size: [u32; 11], + } + + pub struct sigset_t { + __val: [c_ulong; 32], + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: c_int, + pub msg_control: *mut c_void, + pub msg_controllen: crate::socklen_t, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + pub cmsg_len: crate::socklen_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct sem_t { + __val: [c_int; 4], + } + pub struct stat { + pub st_dev: crate::dev_t, + #[cfg(emscripten_old_stat_abi)] + __st_dev_padding: c_int, + #[cfg(emscripten_old_stat_abi)] + __st_ino_truncated: c_long, + pub st_mode: mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + #[cfg(emscripten_old_stat_abi)] + __st_rdev_padding: c_int, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_ino: crate::ino_t, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: c_ulong, + __pad1: c_ulong, + __pad2: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + pub msg_rtime: crate::time_t, + pub msg_ctime: crate::time_t, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __pad1: c_ulong, + __pad2: c_ulong, + } + + pub struct statfs { + pub f_type: c_ulong, + pub f_bsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_ulong, + pub f_frsize: c_ulong, + pub f_flags: c_ulong, + pub f_spare: [c_ulong; 4], + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + pub _pad: [c_int; 29], + _align: [usize; 0], + } + + pub struct arpd_request { + pub req: c_ushort, + pub ip: u32, + pub dev: c_ulong, + pub stamp: c_ulong, + pub updated: c_ulong, + pub ha: [c_uchar; crate::MAX_ADDR_LEN], + } + + #[repr(align(4))] + pub struct pthread_mutex_t { + size: [u8; crate::__SIZEOF_PTHREAD_MUTEX_T], + } + + #[repr(align(4))] + pub struct pthread_rwlock_t { + size: [u8; crate::__SIZEOF_PTHREAD_RWLOCK_T], + } + + #[repr(align(4))] + pub struct pthread_mutexattr_t { + size: [u8; crate::__SIZEOF_PTHREAD_MUTEXATTR_T], + } + + #[repr(align(4))] + pub struct pthread_rwlockattr_t { + size: [u8; crate::__SIZEOF_PTHREAD_RWLOCKATTR_T], + } + + #[repr(align(4))] + pub struct pthread_condattr_t { + size: [u8; crate::__SIZEOF_PTHREAD_CONDATTR_T], + } +} + +s_no_extra_traits! { + pub struct dirent { + pub d_ino: crate::ino_t, + pub d_off: off_t, + pub d_reclen: c_ushort, + pub d_type: c_uchar, + pub d_name: [c_char; 256], + } + + pub struct sysinfo { + pub uptime: c_ulong, + pub loads: [c_ulong; 3], + pub totalram: c_ulong, + pub freeram: c_ulong, + pub sharedram: c_ulong, + pub bufferram: c_ulong, + pub totalswap: c_ulong, + pub freeswap: c_ulong, + pub procs: c_ushort, + pub pad: c_ushort, + pub totalhigh: c_ulong, + pub freehigh: c_ulong, + pub mem_unit: c_uint, + pub __reserved: [c_char; 256], + } + + pub struct mq_attr { + pub mq_flags: c_long, + pub mq_maxmsg: c_long, + pub mq_msgsize: c_long, + pub mq_curmsgs: c_long, + pad: [c_long; 4], + } + + #[cfg_attr(target_pointer_width = "32", repr(align(4)))] + #[cfg_attr(target_pointer_width = "64", repr(align(8)))] + pub struct pthread_cond_t { + size: [u8; crate::__SIZEOF_PTHREAD_COND_T], + } + + #[repr(align(8))] + pub struct max_align_t { + priv_: [f64; 3], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_ino == other.d_ino + && self.d_off == other.d_off + && self.d_reclen == other.d_reclen + && self.d_type == other.d_type + && self + .d_name + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for dirent {} + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_ino.hash(state); + self.d_off.hash(state); + self.d_reclen.hash(state); + self.d_type.hash(state); + self.d_name.hash(state); + } + } + + impl PartialEq for sysinfo { + fn eq(&self, other: &sysinfo) -> bool { + self.uptime == other.uptime + && self.loads == other.loads + && self.totalram == other.totalram + && self.freeram == other.freeram + && self.sharedram == other.sharedram + && self.bufferram == other.bufferram + && self.totalswap == other.totalswap + && self.freeswap == other.freeswap + && self.procs == other.procs + && self.pad == other.pad + && self.totalhigh == other.totalhigh + && self.freehigh == other.freehigh + && self.mem_unit == other.mem_unit + && self + .__reserved + .iter() + .zip(other.__reserved.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for sysinfo {} + impl hash::Hash for sysinfo { + fn hash(&self, state: &mut H) { + self.uptime.hash(state); + self.loads.hash(state); + self.totalram.hash(state); + self.freeram.hash(state); + self.sharedram.hash(state); + self.bufferram.hash(state); + self.totalswap.hash(state); + self.freeswap.hash(state); + self.procs.hash(state); + self.pad.hash(state); + self.totalhigh.hash(state); + self.freehigh.hash(state); + self.mem_unit.hash(state); + self.__reserved.hash(state); + } + } + + impl PartialEq for mq_attr { + fn eq(&self, other: &mq_attr) -> bool { + self.mq_flags == other.mq_flags + && self.mq_maxmsg == other.mq_maxmsg + && self.mq_msgsize == other.mq_msgsize + && self.mq_curmsgs == other.mq_curmsgs + } + } + impl Eq for mq_attr {} + impl hash::Hash for mq_attr { + fn hash(&self, state: &mut H) { + self.mq_flags.hash(state); + self.mq_maxmsg.hash(state); + self.mq_msgsize.hash(state); + self.mq_curmsgs.hash(state); + } + } + + impl PartialEq for pthread_cond_t { + fn eq(&self, other: &pthread_cond_t) -> bool { + self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) + } + } + impl Eq for pthread_cond_t {} + impl hash::Hash for pthread_cond_t { + fn hash(&self, state: &mut H) { + self.size.hash(state); + } + } + } +} + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MS_NOUSER: c_ulong = 0x80000000; +pub const MS_RMT_MASK: c_ulong = 0x02800051; + +pub const ABDAY_1: crate::nl_item = 0x20000; +pub const ABDAY_2: crate::nl_item = 0x20001; +pub const ABDAY_3: crate::nl_item = 0x20002; +pub const ABDAY_4: crate::nl_item = 0x20003; +pub const ABDAY_5: crate::nl_item = 0x20004; +pub const ABDAY_6: crate::nl_item = 0x20005; +pub const ABDAY_7: crate::nl_item = 0x20006; + +pub const DAY_1: crate::nl_item = 0x20007; +pub const DAY_2: crate::nl_item = 0x20008; +pub const DAY_3: crate::nl_item = 0x20009; +pub const DAY_4: crate::nl_item = 0x2000A; +pub const DAY_5: crate::nl_item = 0x2000B; +pub const DAY_6: crate::nl_item = 0x2000C; +pub const DAY_7: crate::nl_item = 0x2000D; + +pub const ABMON_1: crate::nl_item = 0x2000E; +pub const ABMON_2: crate::nl_item = 0x2000F; +pub const ABMON_3: crate::nl_item = 0x20010; +pub const ABMON_4: crate::nl_item = 0x20011; +pub const ABMON_5: crate::nl_item = 0x20012; +pub const ABMON_6: crate::nl_item = 0x20013; +pub const ABMON_7: crate::nl_item = 0x20014; +pub const ABMON_8: crate::nl_item = 0x20015; +pub const ABMON_9: crate::nl_item = 0x20016; +pub const ABMON_10: crate::nl_item = 0x20017; +pub const ABMON_11: crate::nl_item = 0x20018; +pub const ABMON_12: crate::nl_item = 0x20019; + +pub const MON_1: crate::nl_item = 0x2001A; +pub const MON_2: crate::nl_item = 0x2001B; +pub const MON_3: crate::nl_item = 0x2001C; +pub const MON_4: crate::nl_item = 0x2001D; +pub const MON_5: crate::nl_item = 0x2001E; +pub const MON_6: crate::nl_item = 0x2001F; +pub const MON_7: crate::nl_item = 0x20020; +pub const MON_8: crate::nl_item = 0x20021; +pub const MON_9: crate::nl_item = 0x20022; +pub const MON_10: crate::nl_item = 0x20023; +pub const MON_11: crate::nl_item = 0x20024; +pub const MON_12: crate::nl_item = 0x20025; + +pub const AM_STR: crate::nl_item = 0x20026; +pub const PM_STR: crate::nl_item = 0x20027; + +pub const D_T_FMT: crate::nl_item = 0x20028; +pub const D_FMT: crate::nl_item = 0x20029; +pub const T_FMT: crate::nl_item = 0x2002A; +pub const T_FMT_AMPM: crate::nl_item = 0x2002B; + +pub const ERA: crate::nl_item = 0x2002C; +pub const ERA_D_FMT: crate::nl_item = 0x2002E; +pub const ALT_DIGITS: crate::nl_item = 0x2002F; +pub const ERA_D_T_FMT: crate::nl_item = 0x20030; +pub const ERA_T_FMT: crate::nl_item = 0x20031; + +pub const CODESET: crate::nl_item = 14; + +pub const CRNCYSTR: crate::nl_item = 0x4000F; + +pub const RUSAGE_THREAD: c_int = 1; +pub const RUSAGE_CHILDREN: c_int = -1; + +pub const RADIXCHAR: crate::nl_item = 0x10000; +pub const THOUSEP: crate::nl_item = 0x10001; + +pub const YESEXPR: crate::nl_item = 0x50000; +pub const NOEXPR: crate::nl_item = 0x50001; +pub const YESSTR: crate::nl_item = 0x50002; +pub const NOSTR: crate::nl_item = 0x50003; + +pub const FILENAME_MAX: c_uint = 4096; +pub const L_tmpnam: c_uint = 20; +pub const _PC_LINK_MAX: c_int = 0; +pub const _PC_MAX_CANON: c_int = 1; +pub const _PC_MAX_INPUT: c_int = 2; +pub const _PC_NAME_MAX: c_int = 3; +pub const _PC_PATH_MAX: c_int = 4; +pub const _PC_PIPE_BUF: c_int = 5; +pub const _PC_CHOWN_RESTRICTED: c_int = 6; +pub const _PC_NO_TRUNC: c_int = 7; +pub const _PC_VDISABLE: c_int = 8; +pub const _PC_SYNC_IO: c_int = 9; +pub const _PC_ASYNC_IO: c_int = 10; +pub const _PC_PRIO_IO: c_int = 11; +pub const _PC_SOCK_MAXBUF: c_int = 12; +pub const _PC_FILESIZEBITS: c_int = 13; +pub const _PC_REC_INCR_XFER_SIZE: c_int = 14; +pub const _PC_REC_MAX_XFER_SIZE: c_int = 15; +pub const _PC_REC_MIN_XFER_SIZE: c_int = 16; +pub const _PC_REC_XFER_ALIGN: c_int = 17; +pub const _PC_ALLOC_SIZE_MIN: c_int = 18; +pub const _PC_SYMLINK_MAX: c_int = 19; +pub const _PC_2_SYMLINKS: c_int = 20; + +pub const _SC_ARG_MAX: c_int = 0; +pub const _SC_CHILD_MAX: c_int = 1; +pub const _SC_CLK_TCK: c_int = 2; +pub const _SC_NGROUPS_MAX: c_int = 3; +pub const _SC_OPEN_MAX: c_int = 4; +pub const _SC_STREAM_MAX: c_int = 5; +pub const _SC_TZNAME_MAX: c_int = 6; +pub const _SC_JOB_CONTROL: c_int = 7; +pub const _SC_SAVED_IDS: c_int = 8; +pub const _SC_REALTIME_SIGNALS: c_int = 9; +pub const _SC_PRIORITY_SCHEDULING: c_int = 10; +pub const _SC_TIMERS: c_int = 11; +pub const _SC_ASYNCHRONOUS_IO: c_int = 12; +pub const _SC_PRIORITIZED_IO: c_int = 13; +pub const _SC_SYNCHRONIZED_IO: c_int = 14; +pub const _SC_FSYNC: c_int = 15; +pub const _SC_MAPPED_FILES: c_int = 16; +pub const _SC_MEMLOCK: c_int = 17; +pub const _SC_MEMLOCK_RANGE: c_int = 18; +pub const _SC_MEMORY_PROTECTION: c_int = 19; +pub const _SC_MESSAGE_PASSING: c_int = 20; +pub const _SC_SEMAPHORES: c_int = 21; +pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 22; +pub const _SC_AIO_LISTIO_MAX: c_int = 23; +pub const _SC_AIO_MAX: c_int = 24; +pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 25; +pub const _SC_DELAYTIMER_MAX: c_int = 26; +pub const _SC_MQ_OPEN_MAX: c_int = 27; +pub const _SC_MQ_PRIO_MAX: c_int = 28; +pub const _SC_VERSION: c_int = 29; +pub const _SC_PAGESIZE: c_int = 30; +pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; +pub const _SC_RTSIG_MAX: c_int = 31; +pub const _SC_SEM_NSEMS_MAX: c_int = 32; +pub const _SC_SEM_VALUE_MAX: c_int = 33; +pub const _SC_SIGQUEUE_MAX: c_int = 34; +pub const _SC_TIMER_MAX: c_int = 35; +pub const _SC_BC_BASE_MAX: c_int = 36; +pub const _SC_BC_DIM_MAX: c_int = 37; +pub const _SC_BC_SCALE_MAX: c_int = 38; +pub const _SC_BC_STRING_MAX: c_int = 39; +pub const _SC_COLL_WEIGHTS_MAX: c_int = 40; +pub const _SC_EXPR_NEST_MAX: c_int = 42; +pub const _SC_LINE_MAX: c_int = 43; +pub const _SC_RE_DUP_MAX: c_int = 44; +pub const _SC_2_VERSION: c_int = 46; +pub const _SC_2_C_BIND: c_int = 47; +pub const _SC_2_C_DEV: c_int = 48; +pub const _SC_2_FORT_DEV: c_int = 49; +pub const _SC_2_FORT_RUN: c_int = 50; +pub const _SC_2_SW_DEV: c_int = 51; +pub const _SC_2_LOCALEDEF: c_int = 52; +pub const _SC_UIO_MAXIOV: c_int = 60; +pub const _SC_IOV_MAX: c_int = 60; +pub const _SC_THREADS: c_int = 67; +pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 68; +pub const _SC_GETGR_R_SIZE_MAX: c_int = 69; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 70; +pub const _SC_LOGIN_NAME_MAX: c_int = 71; +pub const _SC_TTY_NAME_MAX: c_int = 72; +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 73; +pub const _SC_THREAD_KEYS_MAX: c_int = 74; +pub const _SC_THREAD_STACK_MIN: c_int = 75; +pub const _SC_THREAD_THREADS_MAX: c_int = 76; +pub const _SC_THREAD_ATTR_STACKADDR: c_int = 77; +pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 78; +pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 79; +pub const _SC_THREAD_PRIO_INHERIT: c_int = 80; +pub const _SC_THREAD_PRIO_PROTECT: c_int = 81; +pub const _SC_THREAD_PROCESS_SHARED: c_int = 82; +pub const _SC_NPROCESSORS_CONF: c_int = 83; +pub const _SC_NPROCESSORS_ONLN: c_int = 84; +pub const _SC_PHYS_PAGES: c_int = 85; +pub const _SC_AVPHYS_PAGES: c_int = 86; +pub const _SC_ATEXIT_MAX: c_int = 87; +pub const _SC_PASS_MAX: c_int = 88; +pub const _SC_XOPEN_VERSION: c_int = 89; +pub const _SC_XOPEN_XCU_VERSION: c_int = 90; +pub const _SC_XOPEN_UNIX: c_int = 91; +pub const _SC_XOPEN_CRYPT: c_int = 92; +pub const _SC_XOPEN_ENH_I18N: c_int = 93; +pub const _SC_XOPEN_SHM: c_int = 94; +pub const _SC_2_CHAR_TERM: c_int = 95; +pub const _SC_2_UPE: c_int = 97; +pub const _SC_XOPEN_XPG2: c_int = 98; +pub const _SC_XOPEN_XPG3: c_int = 99; +pub const _SC_XOPEN_XPG4: c_int = 100; +pub const _SC_NZERO: c_int = 109; +pub const _SC_XBS5_ILP32_OFF32: c_int = 125; +pub const _SC_XBS5_ILP32_OFFBIG: c_int = 126; +pub const _SC_XBS5_LP64_OFF64: c_int = 127; +pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 128; +pub const _SC_XOPEN_LEGACY: c_int = 129; +pub const _SC_XOPEN_REALTIME: c_int = 130; +pub const _SC_XOPEN_REALTIME_THREADS: c_int = 131; +pub const _SC_ADVISORY_INFO: c_int = 132; +pub const _SC_BARRIERS: c_int = 133; +pub const _SC_CLOCK_SELECTION: c_int = 137; +pub const _SC_CPUTIME: c_int = 138; +pub const _SC_THREAD_CPUTIME: c_int = 139; +pub const _SC_MONOTONIC_CLOCK: c_int = 149; +pub const _SC_READER_WRITER_LOCKS: c_int = 153; +pub const _SC_SPIN_LOCKS: c_int = 154; +pub const _SC_REGEXP: c_int = 155; +pub const _SC_SHELL: c_int = 157; +pub const _SC_SPAWN: c_int = 159; +pub const _SC_SPORADIC_SERVER: c_int = 160; +pub const _SC_THREAD_SPORADIC_SERVER: c_int = 161; +pub const _SC_TIMEOUTS: c_int = 164; +pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 165; +pub const _SC_2_PBS: c_int = 168; +pub const _SC_2_PBS_ACCOUNTING: c_int = 169; +pub const _SC_2_PBS_LOCATE: c_int = 170; +pub const _SC_2_PBS_MESSAGE: c_int = 171; +pub const _SC_2_PBS_TRACK: c_int = 172; +pub const _SC_SYMLOOP_MAX: c_int = 173; +pub const _SC_STREAMS: c_int = 174; +pub const _SC_2_PBS_CHECKPOINT: c_int = 175; +pub const _SC_V6_ILP32_OFF32: c_int = 176; +pub const _SC_V6_ILP32_OFFBIG: c_int = 177; +pub const _SC_V6_LP64_OFF64: c_int = 178; +pub const _SC_V6_LPBIG_OFFBIG: c_int = 179; +pub const _SC_HOST_NAME_MAX: c_int = 180; +pub const _SC_TRACE: c_int = 181; +pub const _SC_TRACE_EVENT_FILTER: c_int = 182; +pub const _SC_TRACE_INHERIT: c_int = 183; +pub const _SC_TRACE_LOG: c_int = 184; +pub const _SC_IPV6: c_int = 235; +pub const _SC_RAW_SOCKETS: c_int = 236; +pub const _SC_V7_ILP32_OFF32: c_int = 237; +pub const _SC_V7_ILP32_OFFBIG: c_int = 238; +pub const _SC_V7_LP64_OFF64: c_int = 239; +pub const _SC_V7_LPBIG_OFFBIG: c_int = 240; +pub const _SC_SS_REPL_MAX: c_int = 241; +pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 242; +pub const _SC_TRACE_NAME_MAX: c_int = 243; +pub const _SC_TRACE_SYS_MAX: c_int = 244; +pub const _SC_TRACE_USER_EVENT_MAX: c_int = 245; +pub const _SC_XOPEN_STREAMS: c_int = 246; +pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 247; +pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 248; + +pub const RLIM_SAVED_MAX: crate::rlim_t = RLIM_INFINITY; +pub const RLIM_SAVED_CUR: crate::rlim_t = RLIM_INFINITY; + +pub const GLOB_ERR: c_int = 1 << 0; +pub const GLOB_MARK: c_int = 1 << 1; +pub const GLOB_NOSORT: c_int = 1 << 2; +pub const GLOB_DOOFFS: c_int = 1 << 3; +pub const GLOB_NOCHECK: c_int = 1 << 4; +pub const GLOB_APPEND: c_int = 1 << 5; +pub const GLOB_NOESCAPE: c_int = 1 << 6; + +pub const GLOB_NOSPACE: c_int = 1; +pub const GLOB_ABORTED: c_int = 2; +pub const GLOB_NOMATCH: c_int = 3; + +pub const POSIX_MADV_NORMAL: c_int = 0; +pub const POSIX_MADV_RANDOM: c_int = 1; +pub const POSIX_MADV_SEQUENTIAL: c_int = 2; +pub const POSIX_MADV_WILLNEED: c_int = 3; + +pub const AT_EACCESS: c_int = 0x200; + +pub const S_IEXEC: mode_t = 0o0100; +pub const S_IWRITE: mode_t = 0o0200; +pub const S_IREAD: mode_t = 0o0400; + +pub const F_LOCK: c_int = 1; +pub const F_TEST: c_int = 3; +pub const F_TLOCK: c_int = 2; +pub const F_ULOCK: c_int = 0; + +pub const ST_RDONLY: c_ulong = 1; +pub const ST_NOSUID: c_ulong = 2; +pub const ST_NODEV: c_ulong = 4; +pub const ST_NOEXEC: c_ulong = 8; +pub const ST_SYNCHRONOUS: c_ulong = 16; +pub const ST_MANDLOCK: c_ulong = 64; +pub const ST_WRITE: c_ulong = 128; +pub const ST_APPEND: c_ulong = 256; +pub const ST_IMMUTABLE: c_ulong = 512; +pub const ST_NOATIME: c_ulong = 1024; +pub const ST_NODIRATIME: c_ulong = 2048; + +pub const RTLD_NEXT: *mut c_void = -1i64 as *mut c_void; +pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); +pub const RTLD_NODELETE: c_int = 0x1000; +pub const RTLD_NOW: c_int = 0x2; + +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + size: [0; __SIZEOF_PTHREAD_MUTEX_T], +}; +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + size: [0; __SIZEOF_PTHREAD_COND_T], +}; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + size: [0; __SIZEOF_PTHREAD_RWLOCK_T], +}; + +pub const PTHREAD_MUTEX_NORMAL: c_int = 0; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; +pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; +pub const PTHREAD_PROCESS_PRIVATE: c_int = 0; +pub const PTHREAD_PROCESS_SHARED: c_int = 1; +pub const __SIZEOF_PTHREAD_COND_T: usize = 48; + +pub const SCHED_OTHER: c_int = 0; +pub const SCHED_FIFO: c_int = 1; +pub const SCHED_RR: c_int = 2; +pub const SCHED_BATCH: c_int = 3; +pub const SCHED_IDLE: c_int = 5; + +pub const AF_IB: c_int = 27; +pub const AF_MPLS: c_int = 28; +pub const AF_NFC: c_int = 39; +pub const AF_VSOCK: c_int = 40; +pub const PF_IB: c_int = AF_IB; +pub const PF_MPLS: c_int = AF_MPLS; +pub const PF_NFC: c_int = AF_NFC; +pub const PF_VSOCK: c_int = AF_VSOCK; + +// System V IPC +pub const IPC_PRIVATE: crate::key_t = 0; + +pub const IPC_CREAT: c_int = 0o1000; +pub const IPC_EXCL: c_int = 0o2000; +pub const IPC_NOWAIT: c_int = 0o4000; + +pub const IPC_RMID: c_int = 0; +pub const IPC_SET: c_int = 1; +pub const IPC_STAT: c_int = 2; +pub const IPC_INFO: c_int = 3; +pub const MSG_STAT: c_int = 11; +pub const MSG_INFO: c_int = 12; + +pub const MSG_NOERROR: c_int = 0o10000; +pub const MSG_EXCEPT: c_int = 0o20000; + +pub const SHM_R: c_int = 0o400; +pub const SHM_W: c_int = 0o200; + +pub const SHM_RDONLY: c_int = 0o10000; +pub const SHM_RND: c_int = 0o20000; +pub const SHM_REMAP: c_int = 0o40000; +pub const SHM_EXEC: c_int = 0o100000; + +pub const SHM_LOCK: c_int = 11; +pub const SHM_UNLOCK: c_int = 12; + +pub const SHM_HUGETLB: c_int = 0o4000; +pub const SHM_NORESERVE: c_int = 0o10000; + +pub const LOG_NFACILITIES: c_int = 24; + +pub const SEM_FAILED: *mut crate::sem_t = ptr::null_mut(); + +pub const AI_PASSIVE: c_int = 0x0001; +pub const AI_CANONNAME: c_int = 0x0002; +pub const AI_NUMERICHOST: c_int = 0x0004; +pub const AI_V4MAPPED: c_int = 0x0008; +pub const AI_ALL: c_int = 0x0010; +pub const AI_ADDRCONFIG: c_int = 0x0020; + +pub const AI_NUMERICSERV: c_int = 0x0400; + +pub const EAI_BADFLAGS: c_int = -1; +pub const EAI_NONAME: c_int = -2; +pub const EAI_AGAIN: c_int = -3; +pub const EAI_FAIL: c_int = -4; +pub const EAI_FAMILY: c_int = -6; +pub const EAI_SOCKTYPE: c_int = -7; +pub const EAI_SERVICE: c_int = -8; +pub const EAI_MEMORY: c_int = -10; +pub const EAI_OVERFLOW: c_int = -12; + +pub const NI_NUMERICHOST: c_int = 1; +pub const NI_NUMERICSERV: c_int = 2; +pub const NI_NOFQDN: c_int = 4; +pub const NI_NAMEREQD: c_int = 8; +pub const NI_DGRAM: c_int = 16; + +pub const SYNC_FILE_RANGE_WAIT_BEFORE: c_uint = 1; +pub const SYNC_FILE_RANGE_WRITE: c_uint = 2; +pub const SYNC_FILE_RANGE_WAIT_AFTER: c_uint = 4; + +pub const EAI_SYSTEM: c_int = -11; + +pub const MREMAP_MAYMOVE: c_int = 1; +pub const MREMAP_FIXED: c_int = 2; + +pub const ITIMER_REAL: c_int = 0; +pub const ITIMER_VIRTUAL: c_int = 1; +pub const ITIMER_PROF: c_int = 2; + +pub const _POSIX_VDISABLE: crate::cc_t = 0; + +pub const FALLOC_FL_KEEP_SIZE: c_int = 0x01; +pub const FALLOC_FL_PUNCH_HOLE: c_int = 0x02; + +pub const NCCS: usize = 32; + +pub const O_TRUNC: c_int = 512; +pub const O_NOATIME: c_int = 0o1000000; +pub const O_CLOEXEC: c_int = 0x80000; + +// Defined as wasi value. +pub const EPERM: c_int = 63; +pub const ENOENT: c_int = 44; +pub const ESRCH: c_int = 71; +pub const EINTR: c_int = 27; +pub const EIO: c_int = 29; +pub const ENXIO: c_int = 60; +pub const E2BIG: c_int = 1; +pub const ENOEXEC: c_int = 45; +pub const EBADF: c_int = 8; +pub const ECHILD: c_int = 12; +pub const EAGAIN: c_int = 6; +pub const ENOMEM: c_int = 48; +pub const EACCES: c_int = 2; +pub const EFAULT: c_int = 21; +pub const ENOTBLK: c_int = 105; +pub const EBUSY: c_int = 10; +pub const EEXIST: c_int = 20; +pub const EXDEV: c_int = 75; +pub const ENODEV: c_int = 43; +pub const ENOTDIR: c_int = 54; +pub const EISDIR: c_int = 31; +pub const EINVAL: c_int = 28; +pub const ENFILE: c_int = 41; +pub const EMFILE: c_int = 33; +pub const ENOTTY: c_int = 59; +pub const ETXTBSY: c_int = 74; +pub const EFBIG: c_int = 22; +pub const ENOSPC: c_int = 51; +pub const ESPIPE: c_int = 70; +pub const EROFS: c_int = 69; +pub const EMLINK: c_int = 34; +pub const EPIPE: c_int = 64; +pub const EDOM: c_int = 18; +pub const ERANGE: c_int = 68; +pub const EWOULDBLOCK: c_int = EAGAIN; +pub const ENOLINK: c_int = 47; +pub const EPROTO: c_int = 65; +pub const EDEADLK: c_int = 16; +pub const EDEADLOCK: c_int = EDEADLK; +pub const ENAMETOOLONG: c_int = 37; +pub const ENOLCK: c_int = 46; +pub const ENOSYS: c_int = 52; +pub const ENOTEMPTY: c_int = 55; +pub const ELOOP: c_int = 32; +pub const ENOMSG: c_int = 49; +pub const EIDRM: c_int = 24; +pub const EMULTIHOP: c_int = 36; +pub const EBADMSG: c_int = 9; +pub const EOVERFLOW: c_int = 61; +pub const EILSEQ: c_int = 25; +pub const ENOTSOCK: c_int = 57; +pub const EDESTADDRREQ: c_int = 17; +pub const EMSGSIZE: c_int = 35; +pub const EPROTOTYPE: c_int = 67; +pub const ENOPROTOOPT: c_int = 50; +pub const EPROTONOSUPPORT: c_int = 66; +pub const EAFNOSUPPORT: c_int = 5; +pub const EADDRINUSE: c_int = 3; +pub const EADDRNOTAVAIL: c_int = 4; +pub const ENETDOWN: c_int = 38; +pub const ENETUNREACH: c_int = 40; +pub const ENETRESET: c_int = 39; +pub const ECONNABORTED: c_int = 13; +pub const ECONNRESET: c_int = 15; +pub const ENOBUFS: c_int = 42; +pub const EISCONN: c_int = 30; +pub const ENOTCONN: c_int = 53; +pub const ETIMEDOUT: c_int = 73; +pub const ECONNREFUSED: c_int = 14; +pub const EHOSTUNREACH: c_int = 23; +pub const EALREADY: c_int = 7; +pub const EINPROGRESS: c_int = 26; +pub const ESTALE: c_int = 72; +pub const EDQUOT: c_int = 19; +pub const ECANCELED: c_int = 11; +pub const EOWNERDEAD: c_int = 62; +pub const ENOTRECOVERABLE: c_int = 56; + +pub const ENOSTR: c_int = 100; +pub const EBFONT: c_int = 101; +pub const EBADSLT: c_int = 102; +pub const EBADRQC: c_int = 103; +pub const ENOANO: c_int = 104; +pub const ECHRNG: c_int = 106; +pub const EL3HLT: c_int = 107; +pub const EL3RST: c_int = 108; +pub const ELNRNG: c_int = 109; +pub const EUNATCH: c_int = 110; +pub const ENOCSI: c_int = 111; +pub const EL2HLT: c_int = 112; +pub const EBADE: c_int = 113; +pub const EBADR: c_int = 114; +pub const EXFULL: c_int = 115; +pub const ENODATA: c_int = 116; +pub const ETIME: c_int = 117; +pub const ENOSR: c_int = 118; +pub const ENONET: c_int = 119; +pub const ENOPKG: c_int = 120; +pub const EREMOTE: c_int = 121; +pub const EADV: c_int = 122; +pub const ESRMNT: c_int = 123; +pub const ECOMM: c_int = 124; +pub const EDOTDOT: c_int = 125; +pub const ENOTUNIQ: c_int = 126; +pub const EBADFD: c_int = 127; +pub const EREMCHG: c_int = 128; +pub const ELIBACC: c_int = 129; +pub const ELIBBAD: c_int = 130; +pub const ELIBSCN: c_int = 131; +pub const ELIBMAX: c_int = 132; +pub const ELIBEXEC: c_int = 133; +pub const ERESTART: c_int = 134; +pub const ESTRPIPE: c_int = 135; +pub const EUSERS: c_int = 136; +pub const ESOCKTNOSUPPORT: c_int = 137; +pub const EOPNOTSUPP: c_int = 138; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 139; +pub const ESHUTDOWN: c_int = 140; +pub const ETOOMANYREFS: c_int = 141; +pub const EHOSTDOWN: c_int = 142; +pub const EUCLEAN: c_int = 143; +pub const ENOTNAM: c_int = 144; +pub const ENAVAIL: c_int = 145; +pub const EISNAM: c_int = 146; +pub const EREMOTEIO: c_int = 147; +pub const ENOMEDIUM: c_int = 148; +pub const EMEDIUMTYPE: c_int = 149; +pub const ENOKEY: c_int = 150; +pub const EKEYEXPIRED: c_int = 151; +pub const EKEYREVOKED: c_int = 152; +pub const EKEYREJECTED: c_int = 153; +pub const ERFKILL: c_int = 154; +pub const EHWPOISON: c_int = 155; +pub const EL2NSYNC: c_int = 156; + +pub const SA_NODEFER: c_int = 0x40000000; +pub const SA_RESETHAND: c_int = 0x80000000; +pub const SA_RESTART: c_int = 0x10000000; +pub const SA_NOCLDSTOP: c_int = 0x00000001; + +pub const BUFSIZ: c_uint = 1024; +pub const TMP_MAX: c_uint = 10000; +pub const FOPEN_MAX: c_uint = 1000; +pub const O_PATH: c_int = 0o10000000; +pub const O_EXEC: c_int = 0o10000000; +pub const O_SEARCH: c_int = 0o10000000; +pub const O_ACCMODE: c_int = 0o10000003; +pub const O_NDELAY: c_int = O_NONBLOCK; +pub const NI_MAXHOST: crate::socklen_t = 255; +pub const PTHREAD_STACK_MIN: size_t = 2048; +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; + +pub const POSIX_MADV_DONTNEED: c_int = 4; + +pub const RLIM_INFINITY: crate::rlim_t = !0; +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIMIT_NLIMITS: c_int = 16; +#[allow(deprecated)] +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIM_NLIMITS: c_int = RLIMIT_NLIMITS; + +pub const MAP_ANONYMOUS: c_int = MAP_ANON; + +#[doc(hidden)] +#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] +pub const SIGUNUSED: c_int = crate::SIGSYS; + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; + +pub const CPU_SETSIZE: c_int = 1024; + +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +pub const TIOCINQ: c_int = crate::FIONREAD; + +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; + +pub const CLOCK_SGI_CYCLE: crate::clockid_t = 10; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; + +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: c_int = 0x00000800; +pub const TAB2: c_int = 0x00001000; +pub const TAB3: c_int = 0x00001800; +pub const CR1: c_int = 0x00000200; +pub const CR2: c_int = 0x00000400; +pub const CR3: c_int = 0x00000600; +pub const FF1: c_int = 0x00008000; +pub const BS1: c_int = 0x00002000; +pub const VT1: c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const SO_BINDTODEVICE: c_int = 25; +pub const SO_TIMESTAMP: c_int = 63; +pub const SO_MARK: c_int = 36; +pub const SO_RXQ_OVFL: c_int = 40; +pub const SO_PEEK_OFF: c_int = 42; +pub const SO_BUSY_POLL: c_int = 46; + +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24; + +pub const O_DIRECT: c_int = 0x4000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_NOFOLLOW: c_int = 0x20000; +pub const O_ASYNC: c_int = 0x2000; + +pub const FIOCLEX: c_int = 0x5451; +pub const FIONBIO: c_int = 0x5421; + +pub const RLIMIT_RSS: c_int = 5; +pub const RLIMIT_NOFILE: c_int = 7; +pub const RLIMIT_AS: c_int = 9; +pub const RLIMIT_NPROC: c_int = 6; +pub const RLIMIT_MEMLOCK: c_int = 8; +pub const RLIMIT_CPU: c_int = 0; +pub const RLIMIT_FSIZE: c_int = 1; +pub const RLIMIT_DATA: c_int = 2; +pub const RLIMIT_STACK: c_int = 3; +pub const RLIMIT_CORE: c_int = 4; +pub const RLIMIT_LOCKS: c_int = 10; +pub const RLIMIT_SIGPENDING: c_int = 11; +pub const RLIMIT_MSGQUEUE: c_int = 12; +pub const RLIMIT_NICE: c_int = 13; +pub const RLIMIT_RTPRIO: c_int = 14; + +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; + +pub const SOCK_NONBLOCK: c_int = 2048; + +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SOCK_SEQPACKET: c_int = 5; + +pub const IPPROTO_MAX: c_int = 263; + +pub const SOL_SOCKET: c_int = 1; + +pub const SO_REUSEADDR: c_int = 2; +pub const SO_TYPE: c_int = 3; +pub const SO_ERROR: c_int = 4; +pub const SO_DONTROUTE: c_int = 5; +pub const SO_BROADCAST: c_int = 6; +pub const SO_SNDBUF: c_int = 7; +pub const SO_RCVBUF: c_int = 8; +pub const SO_KEEPALIVE: c_int = 9; +pub const SO_OOBINLINE: c_int = 10; +pub const SO_LINGER: c_int = 13; +pub const SO_REUSEPORT: c_int = 15; +pub const SO_RCVLOWAT: c_int = 18; +pub const SO_SNDLOWAT: c_int = 19; +pub const SO_RCVTIMEO: c_int = 66; +pub const SO_SNDTIMEO: c_int = 67; +pub const SO_ACCEPTCONN: c_int = 30; + +pub const IPV6_RTHDR_LOOSE: c_int = 0; +pub const IPV6_RTHDR_STRICT: c_int = 1; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const EXTPROC: crate::tcflag_t = 0x00010000; + +pub const MAP_HUGETLB: c_int = 0x040000; + +pub const F_GETLK: c_int = 12; +pub const F_GETOWN: c_int = 9; +pub const F_SETLK: c_int = 13; +pub const F_SETLKW: c_int = 14; +pub const F_SETOWN: c_int = 8; +pub const F_OFD_GETLK: c_int = 36; +pub const F_OFD_SETLK: c_int = 37; +pub const F_OFD_SETLKW: c_int = 38; + +pub const VEOF: usize = 4; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; + +pub const TCGETS: c_int = 0x5401; +pub const TCSETS: c_int = 0x5402; +pub const TCSETSW: c_int = 0x5403; +pub const TCSETSF: c_int = 0x5404; +pub const TCGETA: c_int = 0x5405; +pub const TCSETA: c_int = 0x5406; +pub const TCSETAW: c_int = 0x5407; +pub const TCSETAF: c_int = 0x5408; +pub const TCSBRK: c_int = 0x5409; +pub const TCXONC: c_int = 0x540A; +pub const TCFLSH: c_int = 0x540B; +pub const TIOCGSOFTCAR: c_int = 0x5419; +pub const TIOCSSOFTCAR: c_int = 0x541A; +pub const TIOCLINUX: c_int = 0x541C; +pub const TIOCGSERIAL: c_int = 0x541E; +pub const TIOCEXCL: c_int = 0x540C; +pub const TIOCNXCL: c_int = 0x540D; +pub const TIOCSCTTY: c_int = 0x540E; +pub const TIOCGPGRP: c_int = 0x540F; +pub const TIOCSPGRP: c_int = 0x5410; +pub const TIOCOUTQ: c_int = 0x5411; +pub const TIOCSTI: c_int = 0x5412; +pub const TIOCGWINSZ: c_int = 0x5413; +pub const TIOCSWINSZ: c_int = 0x5414; +pub const TIOCMGET: c_int = 0x5415; +pub const TIOCMBIS: c_int = 0x5416; +pub const TIOCMBIC: c_int = 0x5417; +pub const TIOCMSET: c_int = 0x5418; +pub const FIONREAD: c_int = 0x541B; +pub const TIOCCONS: c_int = 0x541D; + +pub const SYS_gettid: c_long = 224; // Valid for arm (32-bit) and x86 (32-bit) + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const TIOCM_LE: c_int = 0x001; +pub const TIOCM_DTR: c_int = 0x002; +pub const TIOCM_RTS: c_int = 0x004; +pub const TIOCM_ST: c_int = 0x008; +pub const TIOCM_SR: c_int = 0x010; +pub const TIOCM_CTS: c_int = 0x020; +pub const TIOCM_CAR: c_int = 0x040; +pub const TIOCM_RNG: c_int = 0x080; +pub const TIOCM_DSR: c_int = 0x100; +pub const TIOCM_CD: c_int = TIOCM_CAR; +pub const TIOCM_RI: c_int = TIOCM_RNG; +pub const O_TMPFILE: c_int = 0x410000; + +pub const MAX_ADDR_LEN: usize = 7; +pub const ARPD_UPDATE: c_ushort = 0x01; +pub const ARPD_LOOKUP: c_ushort = 0x02; +pub const ARPD_FLUSH: c_ushort = 0x03; +pub const ATF_MAGIC: c_int = 0x80; + +pub const PRIO_PROCESS: c_int = 0; +pub const PRIO_PGRP: c_int = 1; +pub const PRIO_USER: c_int = 2; + +pub const SOMAXCONN: c_int = 128; + +f! { + pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + if ((*cmsg).cmsg_len as usize) < size_of::() { + return core::ptr::null_mut::(); + } + let next = (cmsg as usize + super::CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr; + let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; + if (next.offset(1)) as usize > max { + core::ptr::null_mut::() + } else { + next as *mut cmsghdr + } + } + + pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { + for slot in cpuset.bits.iter_mut() { + *slot = 0; + } + } + + pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + cpuset.bits[idx] |= 1 << offset; + () + } + + pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + cpuset.bits[idx] &= !(1 << offset); + () + } + + pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { + let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + 0 != (cpuset.bits[idx] & (1 << offset)) + } + + pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { + set1.bits == set2.bits + } +} + +safe_f! { + pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { + let major = major as crate::dev_t; + let minor = minor as crate::dev_t; + let mut dev = 0; + dev |= (major & 0xfffff000) << 31 << 1; + dev |= (major & 0x00000fff) << 8; + dev |= (minor & 0xffffff00) << 12; + dev |= minor & 0x000000ff; + dev + } + + pub const fn major(dev: crate::dev_t) -> c_uint { + // see + // https://github.com/emscripten-core/emscripten/blob/ + // main/system/lib/libc/musl/include/sys/sysmacros.h + let mut major = 0; + major |= (dev >> 31 >> 1) & 0xfffff000; + major |= (dev >> 8) & 0x00000fff; + major as c_uint + } + + pub const fn minor(dev: crate::dev_t) -> c_uint { + // see + // https://github.com/emscripten-core/emscripten/blob/ + // main/system/lib/libc/musl/include/sys/sysmacros.h + let mut minor = 0; + minor |= (dev >> 12) & 0xffffff00; + minor |= dev & 0x000000ff; + minor as c_uint + } +} + +extern "C" { + pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; + pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; + pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + + pub fn abs(i: c_int) -> c_int; + pub fn labs(i: c_long) -> c_long; + pub fn rand() -> c_int; + pub fn srand(seed: c_uint); + + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; + + pub fn setpwent(); + pub fn endpwent(); + pub fn getpwent() -> *mut passwd; + + pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; + + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn __errno_location() -> *mut c_int; + + pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; + pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn dup3(oldfd: c_int, newfd: c_int, flags: c_int) -> c_int; + pub fn nl_langinfo_l(item: crate::nl_item, locale: crate::locale_t) -> *mut c_char; + pub fn accept4( + fd: c_int, + addr: *mut crate::sockaddr, + len: *mut crate::socklen_t, + flg: c_int, + ) -> c_int; + pub fn getnameinfo( + sa: *const crate::sockaddr, + salen: crate::socklen_t, + host: *mut c_char, + hostlen: crate::socklen_t, + serv: *mut c_char, + servlen: crate::socklen_t, + flags: c_int, + ) -> c_int; + pub fn getloadavg(loadavg: *mut c_double, nelem: c_int) -> c_int; + + pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; + pub fn if_nameindex() -> *mut if_nameindex; + pub fn if_freenameindex(ptr: *mut if_nameindex); + + pub fn mremap( + addr: *mut c_void, + len: size_t, + new_len: size_t, + flags: c_int, + ... + ) -> *mut c_void; + + pub fn glob( + pattern: *const c_char, + flags: c_int, + errfunc: Option c_int>, + pglob: *mut crate::glob_t, + ) -> c_int; + pub fn globfree(pglob: *mut crate::glob_t); + + pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + + pub fn shm_unlink(name: *const c_char) -> c_int; + + pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); + + pub fn telldir(dirp: *mut crate::DIR) -> c_long; + pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + + pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; + + pub fn recvfrom( + socket: c_int, + buf: *mut c_void, + len: size_t, + flags: c_int, + addr: *mut crate::sockaddr, + addrlen: *mut crate::socklen_t, + ) -> ssize_t; + pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; + pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; + + pub fn sendmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: c_uint, + flags: c_uint, + ) -> c_int; + pub fn recvmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: c_uint, + flags: c_uint, + timeout: *mut crate::timespec, + ) -> c_int; + pub fn sync(); + pub fn ioctl(fd: c_int, request: c_int, ...) -> c_int; + pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; + pub fn setpriority(which: c_int, who: crate::id_t, prio: c_int) -> c_int; + pub fn pthread_create( + native: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + f: extern "C" fn(*mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + + pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; + + pub fn getpwnam_r( + name: *const c_char, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + pub fn getpwuid_r( + uid: crate::uid_t, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + + // grp.h + pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; + pub fn getgrnam(name: *const c_char) -> *mut crate::group; + pub fn getgrnam_r( + name: *const c_char, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn getgrgid_r( + gid: crate::gid_t, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; +} + +// Alias to 64 to mimic glibc's LFS64 support +mod lfs64; +pub use self::lfs64::*; diff --git a/vendor/libc/src/unix/linux_like/linux/arch/generic/mod.rs b/vendor/libc/src/unix/linux_like/linux/arch/generic/mod.rs new file mode 100644 index 00000000000000..465ceddeab64ec --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/arch/generic/mod.rs @@ -0,0 +1,334 @@ +use crate::prelude::*; +use crate::Ioctl; + +s! { + pub struct termios2 { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; 19], + pub c_ispeed: crate::speed_t, + pub c_ospeed: crate::speed_t, + } +} + +// include/uapi/asm-generic/socket.h +// arch/alpha/include/uapi/asm/socket.h +// tools/include/uapi/asm-generic/socket.h +// arch/mips/include/uapi/asm/socket.h +pub const SOL_SOCKET: c_int = 1; + +// Defined in unix/linux_like/mod.rs +// pub const SO_DEBUG: c_int = 1; +pub const SO_REUSEADDR: c_int = 2; +pub const SO_TYPE: c_int = 3; +pub const SO_ERROR: c_int = 4; +pub const SO_DONTROUTE: c_int = 5; +pub const SO_BROADCAST: c_int = 6; +pub const SO_SNDBUF: c_int = 7; +pub const SO_RCVBUF: c_int = 8; +pub const SO_KEEPALIVE: c_int = 9; +pub const SO_OOBINLINE: c_int = 10; +pub const SO_NO_CHECK: c_int = 11; +pub const SO_PRIORITY: c_int = 12; +pub const SO_LINGER: c_int = 13; +pub const SO_BSDCOMPAT: c_int = 14; +pub const SO_REUSEPORT: c_int = 15; +pub const SO_PASSCRED: c_int = 16; +pub const SO_PEERCRED: c_int = 17; +pub const SO_RCVLOWAT: c_int = 18; +pub const SO_SNDLOWAT: c_int = 19; +pub const SO_SECURITY_AUTHENTICATION: c_int = 22; +pub const SO_SECURITY_ENCRYPTION_TRANSPORT: c_int = 23; +pub const SO_SECURITY_ENCRYPTION_NETWORK: c_int = 24; +pub const SO_BINDTODEVICE: c_int = 25; +pub const SO_ATTACH_FILTER: c_int = 26; +pub const SO_DETACH_FILTER: c_int = 27; +pub const SO_GET_FILTER: c_int = SO_ATTACH_FILTER; +pub const SO_PEERNAME: c_int = 28; + +cfg_if! { + if #[cfg(all( + linux_time_bits64, + any(target_arch = "arm", target_arch = "x86"), + not(any(target_env = "musl", target_env = "ohos")) + ))] { + pub const SO_TIMESTAMP: c_int = SO_TIMESTAMP_NEW; + pub const SO_TIMESTAMPNS: c_int = SO_TIMESTAMPNS_NEW; + pub const SO_TIMESTAMPING: c_int = SO_TIMESTAMPING_NEW; + pub const SO_RCVTIMEO: c_int = SO_RCVTIMEO_NEW; + pub const SO_SNDTIMEO: c_int = SO_SNDTIMEO_NEW; + } else if #[cfg(all( + linux_time_bits64, + any(target_arch = "arm", target_arch = "x86"), + any(target_env = "musl", target_env = "ohos") + ))] { + pub const SO_TIMESTAMP: c_int = 63; + pub const SO_TIMESTAMPNS: c_int = 64; + pub const SO_TIMESTAMPING: c_int = 65; + pub const SO_RCVTIMEO: c_int = 66; + pub const SO_SNDTIMEO: c_int = 67; + } else { + const SO_TIMESTAMP_OLD: c_int = 29; + const SO_TIMESTAMPNS_OLD: c_int = 35; + const SO_TIMESTAMPING_OLD: c_int = 37; + const SO_RCVTIMEO_OLD: c_int = 20; + const SO_SNDTIMEO_OLD: c_int = 21; + + pub const SO_TIMESTAMP: c_int = SO_TIMESTAMP_OLD; + pub const SO_TIMESTAMPNS: c_int = SO_TIMESTAMPNS_OLD; + pub const SO_TIMESTAMPING: c_int = SO_TIMESTAMPING_OLD; + pub const SO_RCVTIMEO: c_int = SO_RCVTIMEO_OLD; + pub const SO_SNDTIMEO: c_int = SO_SNDTIMEO_OLD; + } +} + +pub const SO_ACCEPTCONN: c_int = 30; +pub const SO_PEERSEC: c_int = 31; +pub const SO_SNDBUFFORCE: c_int = 32; +pub const SO_RCVBUFFORCE: c_int = 33; +pub const SO_PASSSEC: c_int = 34; +pub const SO_MARK: c_int = 36; +pub const SO_PROTOCOL: c_int = 38; +pub const SO_DOMAIN: c_int = 39; +pub const SO_RXQ_OVFL: c_int = 40; +pub const SO_WIFI_STATUS: c_int = 41; +pub const SCM_WIFI_STATUS: c_int = SO_WIFI_STATUS; +pub const SO_PEEK_OFF: c_int = 42; +pub const SO_NOFCS: c_int = 43; +pub const SO_LOCK_FILTER: c_int = 44; +pub const SO_SELECT_ERR_QUEUE: c_int = 45; +pub const SO_BUSY_POLL: c_int = 46; +pub const SO_MAX_PACING_RATE: c_int = 47; +pub const SO_BPF_EXTENSIONS: c_int = 48; +pub const SO_INCOMING_CPU: c_int = 49; +pub const SO_ATTACH_BPF: c_int = 50; +pub const SO_DETACH_BPF: c_int = SO_DETACH_FILTER; +pub const SO_ATTACH_REUSEPORT_CBPF: c_int = 51; +pub const SO_ATTACH_REUSEPORT_EBPF: c_int = 52; +pub const SO_CNX_ADVICE: c_int = 53; +pub const SCM_TIMESTAMPING_OPT_STATS: c_int = 54; +pub const SO_MEMINFO: c_int = 55; +pub const SO_INCOMING_NAPI_ID: c_int = 56; +pub const SO_COOKIE: c_int = 57; +pub const SCM_TIMESTAMPING_PKTINFO: c_int = 58; +pub const SO_PEERGROUPS: c_int = 59; +pub const SO_ZEROCOPY: c_int = 60; +pub const SO_TXTIME: c_int = 61; +pub const SCM_TXTIME: c_int = SO_TXTIME; +pub const SO_BINDTOIFINDEX: c_int = 62; +cfg_if! { + // Some of these platforms in CI already have these constants. + // But they may still not have those _OLD ones. + if #[cfg(all( + any( + target_arch = "x86", + target_arch = "x86_64", + target_arch = "arm", + target_arch = "aarch64", + target_arch = "csky", + target_arch = "loongarch64" + ), + // FIXME(musl): + // Musl hardcodes the SO_* constants instead + // of inheriting them from the kernel headers. + // For new constants you might need consider updating + // musl in the CI as well. + not(any(target_env = "musl", target_env = "ohos")) + ))] { + pub const SO_TIMESTAMP_NEW: c_int = 63; + pub const SO_TIMESTAMPNS_NEW: c_int = 64; + pub const SO_TIMESTAMPING_NEW: c_int = 65; + pub const SO_RCVTIMEO_NEW: c_int = 66; + pub const SO_SNDTIMEO_NEW: c_int = 67; + pub const SO_DETACH_REUSEPORT_BPF: c_int = 68; + } +} +pub const SO_PREFER_BUSY_POLL: c_int = 69; +pub const SO_BUSY_POLL_BUDGET: c_int = 70; +pub const SO_NETNS_COOKIE: c_int = 71; +pub const SO_BUF_LOCK: c_int = 72; +pub const SO_RESERVE_MEM: c_int = 73; +pub const SO_TXREHASH: c_int = 74; +pub const SO_RCVMARK: c_int = 75; +pub const SO_PASSPIDFD: c_int = 76; +pub const SO_PEERPIDFD: c_int = 77; +pub const SO_DEVMEM_LINEAR: c_int = 78; +pub const SO_DEVMEM_DMABUF: c_int = 79; +pub const SO_DEVMEM_DONTNEED: c_int = 80; + +// Defined in unix/linux_like/mod.rs +// pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; +pub const SCM_TIMESTAMPNS: c_int = SO_TIMESTAMPNS; +pub const SCM_TIMESTAMPING: c_int = SO_TIMESTAMPING; + +pub const SCM_DEVMEM_LINEAR: c_int = SO_DEVMEM_LINEAR; +pub const SCM_DEVMEM_DMABUF: c_int = SO_DEVMEM_DMABUF; + +// Ioctl Constants + +pub const TCGETS: Ioctl = 0x5401; +pub const TCSETS: Ioctl = 0x5402; +pub const TCSETSW: Ioctl = 0x5403; +pub const TCSETSF: Ioctl = 0x5404; +pub const TCGETA: Ioctl = 0x5405; +pub const TCSETA: Ioctl = 0x5406; +pub const TCSETAW: Ioctl = 0x5407; +pub const TCSETAF: Ioctl = 0x5408; +pub const TCSBRK: Ioctl = 0x5409; +pub const TCXONC: Ioctl = 0x540A; +pub const TCFLSH: Ioctl = 0x540B; +pub const TIOCEXCL: Ioctl = 0x540C; +pub const TIOCNXCL: Ioctl = 0x540D; +pub const TIOCSCTTY: Ioctl = 0x540E; +pub const TIOCGPGRP: Ioctl = 0x540F; +pub const TIOCSPGRP: Ioctl = 0x5410; +pub const TIOCOUTQ: Ioctl = 0x5411; +pub const TIOCSTI: Ioctl = 0x5412; +pub const TIOCGWINSZ: Ioctl = 0x5413; +pub const TIOCSWINSZ: Ioctl = 0x5414; +pub const TIOCMGET: Ioctl = 0x5415; +pub const TIOCMBIS: Ioctl = 0x5416; +pub const TIOCMBIC: Ioctl = 0x5417; +pub const TIOCMSET: Ioctl = 0x5418; +pub const TIOCGSOFTCAR: Ioctl = 0x5419; +pub const TIOCSSOFTCAR: Ioctl = 0x541A; +pub const FIONREAD: Ioctl = 0x541B; +pub const TIOCINQ: Ioctl = FIONREAD; +pub const TIOCLINUX: Ioctl = 0x541C; +pub const TIOCCONS: Ioctl = 0x541D; +pub const TIOCGSERIAL: Ioctl = 0x541E; +pub const TIOCSSERIAL: Ioctl = 0x541F; +pub const TIOCPKT: Ioctl = 0x5420; +pub const FIONBIO: Ioctl = 0x5421; +pub const TIOCNOTTY: Ioctl = 0x5422; +pub const TIOCSETD: Ioctl = 0x5423; +pub const TIOCGETD: Ioctl = 0x5424; +pub const TCSBRKP: Ioctl = 0x5425; +pub const TIOCSBRK: Ioctl = 0x5427; +pub const TIOCCBRK: Ioctl = 0x5428; +pub const TIOCGSID: Ioctl = 0x5429; +pub const TCGETS2: Ioctl = 0x802c542a; +pub const TCSETS2: Ioctl = 0x402c542b; +pub const TCSETSW2: Ioctl = 0x402c542c; +pub const TCSETSF2: Ioctl = 0x402c542d; +pub const TIOCGRS485: Ioctl = 0x542E; +pub const TIOCSRS485: Ioctl = 0x542F; +pub const TIOCGPTN: Ioctl = 0x80045430; +pub const TIOCSPTLCK: Ioctl = 0x40045431; +pub const TIOCGDEV: Ioctl = 0x80045432; +pub const TCGETX: Ioctl = 0x5432; +pub const TCSETX: Ioctl = 0x5433; +pub const TCSETXF: Ioctl = 0x5434; +pub const TCSETXW: Ioctl = 0x5435; +pub const TIOCSIG: Ioctl = 0x40045436; +pub const TIOCVHANGUP: Ioctl = 0x5437; +pub const TIOCGPKT: Ioctl = 0x80045438; +pub const TIOCGPTLCK: Ioctl = 0x80045439; +pub const TIOCGEXCL: Ioctl = 0x80045440; +pub const TIOCGPTPEER: Ioctl = 0x5441; +// pub const TIOCGISO7816: Ioctl = 0x80285442; +// pub const TIOCSISO7816: Ioctl = 0xc0285443; +pub const FIONCLEX: Ioctl = 0x5450; +pub const FIOCLEX: Ioctl = 0x5451; +pub const FIOASYNC: Ioctl = 0x5452; +pub const TIOCSERCONFIG: Ioctl = 0x5453; +pub const TIOCSERGWILD: Ioctl = 0x5454; +pub const TIOCSERSWILD: Ioctl = 0x5455; +pub const TIOCGLCKTRMIOS: Ioctl = 0x5456; +pub const TIOCSLCKTRMIOS: Ioctl = 0x5457; +pub const TIOCSERGSTRUCT: Ioctl = 0x5458; +pub const TIOCSERGETLSR: Ioctl = 0x5459; +pub const TIOCSERGETMULTI: Ioctl = 0x545A; +pub const TIOCSERSETMULTI: Ioctl = 0x545B; +pub const TIOCMIWAIT: Ioctl = 0x545C; +pub const TIOCGICOUNT: Ioctl = 0x545D; +pub const BLKIOMIN: Ioctl = 0x1278; +pub const BLKIOOPT: Ioctl = 0x1279; +pub const BLKSSZGET: Ioctl = 0x1268; +pub const BLKPBSZGET: Ioctl = 0x127B; + +cfg_if! { + if #[cfg(any(target_arch = "arm", target_arch = "s390x"))] { + pub const FIOQSIZE: Ioctl = 0x545E; + } else { + pub const FIOQSIZE: Ioctl = 0x5460; + } +} + +pub const TIOCM_LE: c_int = 0x001; +pub const TIOCM_DTR: c_int = 0x002; +pub const TIOCM_RTS: c_int = 0x004; +pub const TIOCM_ST: c_int = 0x008; +pub const TIOCM_SR: c_int = 0x010; +pub const TIOCM_CTS: c_int = 0x020; +pub const TIOCM_CAR: c_int = 0x040; +pub const TIOCM_CD: c_int = TIOCM_CAR; +pub const TIOCM_RNG: c_int = 0x080; +pub const TIOCM_RI: c_int = TIOCM_RNG; +pub const TIOCM_DSR: c_int = 0x100; + +pub const BOTHER: crate::speed_t = 0o010000; +pub const IBSHIFT: crate::tcflag_t = 16; + +// RLIMIT Constants + +cfg_if! { + if #[cfg(any(target_env = "gnu", target_env = "uclibc"))] { + pub const RLIMIT_CPU: crate::__rlimit_resource_t = 0; + pub const RLIMIT_FSIZE: crate::__rlimit_resource_t = 1; + pub const RLIMIT_DATA: crate::__rlimit_resource_t = 2; + pub const RLIMIT_STACK: crate::__rlimit_resource_t = 3; + pub const RLIMIT_CORE: crate::__rlimit_resource_t = 4; + pub const RLIMIT_RSS: crate::__rlimit_resource_t = 5; + pub const RLIMIT_NPROC: crate::__rlimit_resource_t = 6; + pub const RLIMIT_NOFILE: crate::__rlimit_resource_t = 7; + pub const RLIMIT_MEMLOCK: crate::__rlimit_resource_t = 8; + pub const RLIMIT_AS: crate::__rlimit_resource_t = 9; + pub const RLIMIT_LOCKS: crate::__rlimit_resource_t = 10; + pub const RLIMIT_SIGPENDING: crate::__rlimit_resource_t = 11; + pub const RLIMIT_MSGQUEUE: crate::__rlimit_resource_t = 12; + pub const RLIMIT_NICE: crate::__rlimit_resource_t = 13; + pub const RLIMIT_RTPRIO: crate::__rlimit_resource_t = 14; + pub const RLIMIT_RTTIME: crate::__rlimit_resource_t = 15; + #[allow(deprecated)] + #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] + pub const RLIMIT_NLIMITS: crate::__rlimit_resource_t = RLIM_NLIMITS; + } else if #[cfg(any(target_env = "musl", target_env = "ohos"))] { + pub const RLIMIT_CPU: c_int = 0; + pub const RLIMIT_FSIZE: c_int = 1; + pub const RLIMIT_DATA: c_int = 2; + pub const RLIMIT_STACK: c_int = 3; + pub const RLIMIT_CORE: c_int = 4; + pub const RLIMIT_RSS: c_int = 5; + pub const RLIMIT_NPROC: c_int = 6; + pub const RLIMIT_NOFILE: c_int = 7; + pub const RLIMIT_MEMLOCK: c_int = 8; + pub const RLIMIT_AS: c_int = 9; + pub const RLIMIT_LOCKS: c_int = 10; + pub const RLIMIT_SIGPENDING: c_int = 11; + pub const RLIMIT_MSGQUEUE: c_int = 12; + pub const RLIMIT_NICE: c_int = 13; + pub const RLIMIT_RTPRIO: c_int = 14; + pub const RLIMIT_RTTIME: c_int = 15; + #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] + pub const RLIM_NLIMITS: c_int = 16; + #[allow(deprecated)] + #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] + pub const RLIMIT_NLIMITS: c_int = RLIM_NLIMITS; + } +} + +cfg_if! { + if #[cfg(target_env = "gnu")] { + #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] + pub const RLIM_NLIMITS: crate::__rlimit_resource_t = 16; + } else if #[cfg(target_env = "uclibc")] { + #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] + pub const RLIM_NLIMITS: crate::__rlimit_resource_t = 15; + } +} + +pub const RLIM_INFINITY: crate::rlim_t = !0; diff --git a/vendor/libc/src/unix/linux_like/linux/arch/mips/mod.rs b/vendor/libc/src/unix/linux_like/linux/arch/mips/mod.rs new file mode 100644 index 00000000000000..ba688948a906d2 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/arch/mips/mod.rs @@ -0,0 +1,333 @@ +use crate::prelude::*; +use crate::Ioctl; + +s! { + pub struct termios2 { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; 23], + pub c_ispeed: crate::speed_t, + pub c_ospeed: crate::speed_t, + } +} + +// arch/mips/include/uapi/asm/socket.h +pub const SOL_SOCKET: c_int = 0xffff; + +// Defined in unix/linux_like/mod.rs +// pub const SO_DEBUG: c_int = 0x0001; +pub const SO_REUSEADDR: c_int = 0x0004; +pub const SO_KEEPALIVE: c_int = 0x0008; +pub const SO_DONTROUTE: c_int = 0x0010; +pub const SO_BROADCAST: c_int = 0x0020; +pub const SO_LINGER: c_int = 0x0080; +pub const SO_OOBINLINE: c_int = 0x0100; +pub const SO_REUSEPORT: c_int = 0x0200; +pub const SO_TYPE: c_int = 0x1008; +// pub const SO_STYLE: c_int = SO_TYPE; +pub const SO_ERROR: c_int = 0x1007; +pub const SO_SNDBUF: c_int = 0x1001; +pub const SO_RCVBUF: c_int = 0x1002; +pub const SO_SNDLOWAT: c_int = 0x1003; +pub const SO_RCVLOWAT: c_int = 0x1004; +cfg_if! { + if #[cfg(linux_time_bits64)] { + const SO_RCVTIMEO_NEW: c_int = 66; + const SO_SNDTIMEO_NEW: c_int = 67; + + pub const SO_SNDTIMEO: c_int = SO_SNDTIMEO_NEW; + pub const SO_RCVTIMEO: c_int = SO_RCVTIMEO_NEW; + } else { + const SO_SNDTIMEO_OLD: c_int = 0x1005; + const SO_RCVTIMEO_OLD: c_int = 0x1006; + + pub const SO_SNDTIMEO: c_int = SO_SNDTIMEO_OLD; + pub const SO_RCVTIMEO: c_int = SO_RCVTIMEO_OLD; + } +} +pub const SO_ACCEPTCONN: c_int = 0x1009; +pub const SO_PROTOCOL: c_int = 0x1028; +pub const SO_DOMAIN: c_int = 0x1029; + +pub const SO_NO_CHECK: c_int = 11; +pub const SO_PRIORITY: c_int = 12; +pub const SO_BSDCOMPAT: c_int = 14; +pub const SO_PASSCRED: c_int = 17; +pub const SO_PEERCRED: c_int = 18; +pub const SO_SECURITY_AUTHENTICATION: c_int = 22; +pub const SO_SECURITY_ENCRYPTION_TRANSPORT: c_int = 23; +pub const SO_SECURITY_ENCRYPTION_NETWORK: c_int = 24; +pub const SO_BINDTODEVICE: c_int = 25; +pub const SO_ATTACH_FILTER: c_int = 26; +pub const SO_DETACH_FILTER: c_int = 27; +pub const SO_GET_FILTER: c_int = SO_ATTACH_FILTER; +pub const SO_PEERNAME: c_int = 28; +pub const SO_PEERSEC: c_int = 30; +pub const SO_SNDBUFFORCE: c_int = 31; +pub const SO_RCVBUFFORCE: c_int = 33; +pub const SO_PASSSEC: c_int = 34; +pub const SO_MARK: c_int = 36; +pub const SO_RXQ_OVFL: c_int = 40; +pub const SO_WIFI_STATUS: c_int = 41; +pub const SCM_WIFI_STATUS: c_int = SO_WIFI_STATUS; +pub const SO_PEEK_OFF: c_int = 42; +pub const SO_NOFCS: c_int = 43; +pub const SO_LOCK_FILTER: c_int = 44; +pub const SO_SELECT_ERR_QUEUE: c_int = 45; +pub const SO_BUSY_POLL: c_int = 46; +pub const SO_MAX_PACING_RATE: c_int = 47; +pub const SO_BPF_EXTENSIONS: c_int = 48; +pub const SO_INCOMING_CPU: c_int = 49; +pub const SO_ATTACH_BPF: c_int = 50; +pub const SO_DETACH_BPF: c_int = SO_DETACH_FILTER; +pub const SO_ATTACH_REUSEPORT_CBPF: c_int = 51; +pub const SO_ATTACH_REUSEPORT_EBPF: c_int = 52; +pub const SO_CNX_ADVICE: c_int = 53; +pub const SCM_TIMESTAMPING_OPT_STATS: c_int = 54; +pub const SO_MEMINFO: c_int = 55; +pub const SO_INCOMING_NAPI_ID: c_int = 56; +pub const SO_COOKIE: c_int = 57; +pub const SCM_TIMESTAMPING_PKTINFO: c_int = 58; +pub const SO_PEERGROUPS: c_int = 59; +pub const SO_ZEROCOPY: c_int = 60; +pub const SO_TXTIME: c_int = 61; +pub const SCM_TXTIME: c_int = SO_TXTIME; +pub const SO_BINDTOIFINDEX: c_int = 62; + +cfg_if! { + if #[cfg(linux_time_bits64)] { + const SO_TIMESTAMP_NEW: c_int = 63; + const SO_TIMESTAMPNS_NEW: c_int = 64; + const SO_TIMESTAMPING_NEW: c_int = 65; + + pub const SO_TIMESTAMP: c_int = SO_TIMESTAMP_NEW; + pub const SO_TIMESTAMPNS: c_int = SO_TIMESTAMPNS_NEW; + pub const SO_TIMESTAMPING: c_int = SO_TIMESTAMPING_NEW; + } else { + const SO_TIMESTAMP_OLD: c_int = 29; + const SO_TIMESTAMPNS_OLD: c_int = 35; + const SO_TIMESTAMPING_OLD: c_int = 37; + + pub const SO_TIMESTAMP: c_int = SO_TIMESTAMP_OLD; + pub const SO_TIMESTAMPNS: c_int = SO_TIMESTAMPNS_OLD; + pub const SO_TIMESTAMPING: c_int = SO_TIMESTAMPING_OLD; + } +} + +// pub const SO_DETACH_REUSEPORT_BPF: c_int = 68; +pub const SO_PREFER_BUSY_POLL: c_int = 69; +pub const SO_BUSY_POLL_BUDGET: c_int = 70; +pub const SO_NETNS_COOKIE: c_int = 71; +pub const SO_BUF_LOCK: c_int = 72; +pub const SO_RESERVE_MEM: c_int = 73; +pub const SO_TXREHASH: c_int = 74; +pub const SO_RCVMARK: c_int = 75; +pub const SO_PASSPIDFD: c_int = 76; +pub const SO_PEERPIDFD: c_int = 77; +pub const SO_DEVMEM_LINEAR: c_int = 78; +pub const SO_DEVMEM_DMABUF: c_int = 79; +pub const SO_DEVMEM_DONTNEED: c_int = 80; + +// Defined in unix/linux_like/mod.rs +// pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; +pub const SCM_TIMESTAMPNS: c_int = SO_TIMESTAMPNS; +pub const SCM_TIMESTAMPING: c_int = SO_TIMESTAMPING; + +pub const SCM_DEVMEM_LINEAR: c_int = SO_DEVMEM_LINEAR; +pub const SCM_DEVMEM_DMABUF: c_int = SO_DEVMEM_DMABUF; + +// Ioctl Constants + +pub const TCGETS: Ioctl = 0x540d; +pub const TCSETS: Ioctl = 0x540e; +pub const TCSETSW: Ioctl = 0x540f; +pub const TCSETSF: Ioctl = 0x5410; +pub const TCGETA: Ioctl = 0x5401; +pub const TCSETA: Ioctl = 0x5402; +pub const TCSETAW: Ioctl = 0x5403; +pub const TCSETAF: Ioctl = 0x5404; +pub const TCSBRK: Ioctl = 0x5405; +pub const TCXONC: Ioctl = 0x5406; +pub const TCFLSH: Ioctl = 0x5407; +pub const TIOCEXCL: Ioctl = 0x740d; +pub const TIOCNXCL: Ioctl = 0x740e; +pub const TIOCSCTTY: Ioctl = 0x5480; +pub const TIOCGPGRP: Ioctl = 0x40047477; +pub const TIOCSPGRP: Ioctl = 0x80047476; +pub const TIOCOUTQ: Ioctl = 0x7472; +pub const TIOCSTI: Ioctl = 0x5472; +pub const TIOCGWINSZ: Ioctl = 0x40087468; +pub const TIOCSWINSZ: Ioctl = 0x80087467; +pub const TIOCMGET: Ioctl = 0x741d; +pub const TIOCMBIS: Ioctl = 0x741b; +pub const TIOCMBIC: Ioctl = 0x741c; +pub const TIOCMSET: Ioctl = 0x741a; +pub const TIOCGSOFTCAR: Ioctl = 0x5481; +pub const TIOCSSOFTCAR: Ioctl = 0x5482; +pub const FIONREAD: Ioctl = 0x467f; +pub const TIOCINQ: Ioctl = FIONREAD; +pub const TIOCLINUX: Ioctl = 0x5483; +pub const TIOCCONS: Ioctl = 0x80047478; +pub const TIOCGSERIAL: Ioctl = 0x5484; +pub const TIOCSSERIAL: Ioctl = 0x5485; +pub const TIOCPKT: Ioctl = 0x5470; +pub const FIONBIO: Ioctl = 0x667e; +pub const TIOCNOTTY: Ioctl = 0x5471; +pub const TIOCSETD: Ioctl = 0x7401; +pub const TIOCGETD: Ioctl = 0x7400; +pub const TCSBRKP: Ioctl = 0x5486; +pub const TIOCSBRK: Ioctl = 0x5427; +pub const TIOCCBRK: Ioctl = 0x5428; +pub const TIOCGSID: Ioctl = 0x7416; +pub const TCGETS2: Ioctl = 0x4030542a; +pub const TCSETS2: Ioctl = 0x8030542b; +pub const TCSETSW2: Ioctl = 0x8030542c; +pub const TCSETSF2: Ioctl = 0x8030542d; +pub const TIOCGPTN: Ioctl = 0x40045430; +pub const TIOCSPTLCK: Ioctl = 0x80045431; +pub const TIOCGDEV: Ioctl = 0x40045432; +pub const TIOCSIG: Ioctl = 0x80045436; +pub const TIOCVHANGUP: Ioctl = 0x5437; +pub const TIOCGPKT: Ioctl = 0x40045438; +pub const TIOCGPTLCK: Ioctl = 0x40045439; +pub const TIOCGEXCL: Ioctl = 0x40045440; +pub const TIOCGPTPEER: Ioctl = 0x20005441; +//pub const TIOCGISO7816: Ioctl = 0x40285442; +//pub const TIOCSISO7816: Ioctl = 0xc0285443; +pub const FIONCLEX: Ioctl = 0x6602; +pub const FIOCLEX: Ioctl = 0x6601; +pub const FIOASYNC: Ioctl = 0x667d; +pub const TIOCSERCONFIG: Ioctl = 0x5488; +pub const TIOCSERGWILD: Ioctl = 0x5489; +pub const TIOCSERSWILD: Ioctl = 0x548a; +pub const TIOCGLCKTRMIOS: Ioctl = 0x548b; +pub const TIOCSLCKTRMIOS: Ioctl = 0x548c; +pub const TIOCSERGSTRUCT: Ioctl = 0x548d; +pub const TIOCSERGETLSR: Ioctl = 0x548e; +pub const TIOCSERGETMULTI: Ioctl = 0x548f; +pub const TIOCSERSETMULTI: Ioctl = 0x5490; +pub const TIOCMIWAIT: Ioctl = 0x5491; +pub const TIOCGICOUNT: Ioctl = 0x5492; +pub const FIOQSIZE: Ioctl = 0x667f; +pub const TIOCSLTC: Ioctl = 0x7475; +pub const TIOCGETP: Ioctl = 0x7408; +pub const TIOCSETP: Ioctl = 0x7409; +pub const TIOCSETN: Ioctl = 0x740a; +pub const BLKIOMIN: Ioctl = 0x20001278; +pub const BLKIOOPT: Ioctl = 0x20001279; +pub const BLKSSZGET: Ioctl = 0x20001268; +pub const BLKPBSZGET: Ioctl = 0x2000127B; + +cfg_if! { + if #[cfg(target_env = "musl")] { + pub const TIOCGRS485: Ioctl = 0x4020542e; + pub const TIOCSRS485: Ioctl = 0xc020542f; + } +} + +pub const TIOCM_LE: c_int = 0x001; +pub const TIOCM_DTR: c_int = 0x002; +pub const TIOCM_RTS: c_int = 0x004; +pub const TIOCM_ST: c_int = 0x010; +pub const TIOCM_SR: c_int = 0x020; +pub const TIOCM_CTS: c_int = 0x040; +pub const TIOCM_CAR: c_int = 0x100; +pub const TIOCM_CD: c_int = TIOCM_CAR; +pub const TIOCM_RNG: c_int = 0x200; +pub const TIOCM_RI: c_int = TIOCM_RNG; +pub const TIOCM_DSR: c_int = 0x400; + +pub const BOTHER: crate::speed_t = 0o010000; +pub const IBSHIFT: crate::tcflag_t = 16; + +// RLIMIT Constants + +cfg_if! { + if #[cfg(any(target_env = "gnu", target_env = "uclibc"))] { + pub const RLIMIT_CPU: crate::__rlimit_resource_t = 0; + pub const RLIMIT_FSIZE: crate::__rlimit_resource_t = 1; + pub const RLIMIT_DATA: crate::__rlimit_resource_t = 2; + pub const RLIMIT_STACK: crate::__rlimit_resource_t = 3; + pub const RLIMIT_CORE: crate::__rlimit_resource_t = 4; + pub const RLIMIT_NOFILE: crate::__rlimit_resource_t = 5; + pub const RLIMIT_AS: crate::__rlimit_resource_t = 6; + pub const RLIMIT_RSS: crate::__rlimit_resource_t = 7; + pub const RLIMIT_NPROC: crate::__rlimit_resource_t = 8; + pub const RLIMIT_MEMLOCK: crate::__rlimit_resource_t = 9; + pub const RLIMIT_LOCKS: crate::__rlimit_resource_t = 10; + pub const RLIMIT_SIGPENDING: crate::__rlimit_resource_t = 11; + pub const RLIMIT_MSGQUEUE: crate::__rlimit_resource_t = 12; + pub const RLIMIT_NICE: crate::__rlimit_resource_t = 13; + pub const RLIMIT_RTPRIO: crate::__rlimit_resource_t = 14; + pub const RLIMIT_RTTIME: crate::__rlimit_resource_t = 15; + #[allow(deprecated)] + #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] + pub const RLIMIT_NLIMITS: crate::__rlimit_resource_t = RLIM_NLIMITS; + } else if #[cfg(target_env = "musl")] { + pub const RLIMIT_CPU: c_int = 0; + pub const RLIMIT_FSIZE: c_int = 1; + pub const RLIMIT_DATA: c_int = 2; + pub const RLIMIT_STACK: c_int = 3; + pub const RLIMIT_CORE: c_int = 4; + pub const RLIMIT_NOFILE: c_int = 5; + pub const RLIMIT_AS: c_int = 6; + pub const RLIMIT_RSS: c_int = 7; + pub const RLIMIT_NPROC: c_int = 8; + pub const RLIMIT_MEMLOCK: c_int = 9; + pub const RLIMIT_LOCKS: c_int = 10; + pub const RLIMIT_SIGPENDING: c_int = 11; + pub const RLIMIT_MSGQUEUE: c_int = 12; + pub const RLIMIT_NICE: c_int = 13; + pub const RLIMIT_RTPRIO: c_int = 14; + pub const RLIMIT_RTTIME: c_int = 15; + #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] + pub const RLIM_NLIMITS: c_int = 16; + #[allow(deprecated)] + #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] + pub const RLIMIT_NLIMITS: c_int = RLIM_NLIMITS; + pub const RLIM_INFINITY: crate::rlim_t = !0; + } +} + +cfg_if! { + if #[cfg(target_env = "gnu")] { + #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] + pub const RLIM_NLIMITS: crate::__rlimit_resource_t = 16; + } else if #[cfg(target_env = "uclibc")] { + #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] + pub const RLIM_NLIMITS: crate::__rlimit_resource_t = 15; + } +} + +cfg_if! { + if #[cfg( + any(target_arch = "mips64", target_arch = "mips64r6"), + any(target_env = "gnu", target_env = "uclibc") + )] { + pub const RLIM_INFINITY: crate::rlim_t = !0; + } +} + +cfg_if! { + if #[cfg(all( + any(target_arch = "mips", target_arch = "mips32r6"), + any( + all(target_env = "uclibc", linux_time_bits64), + all( + target_env = "gnu", + any(linux_time_bits64, gnu_file_offset_bits64) + ) + ) + ))] { + pub const RLIM_INFINITY: crate::rlim_t = !0; + } else if #[cfg(all( + any(target_arch = "mips", target_arch = "mips32r6"), + any(target_env = "uclibc", target_env = "gnu"), + not(linux_time_bits64) + ))] { + pub const RLIM_INFINITY: crate::rlim_t = 0x7fffffff; + } +} diff --git a/vendor/libc/src/unix/linux_like/linux/arch/mod.rs b/vendor/libc/src/unix/linux_like/linux/arch/mod.rs new file mode 100644 index 00000000000000..00914a43ac1646 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/arch/mod.rs @@ -0,0 +1,20 @@ +cfg_if! { + if #[cfg(any( + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "mips64", + target_arch = "mips64r6" + ))] { + mod mips; + pub use self::mips::*; + } else if #[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))] { + mod powerpc; + pub use self::powerpc::*; + } else if #[cfg(any(target_arch = "sparc", target_arch = "sparc64"))] { + mod sparc; + pub use self::sparc::*; + } else { + mod generic; + pub use self::generic::*; + } +} diff --git a/vendor/libc/src/unix/linux_like/linux/arch/powerpc/mod.rs b/vendor/libc/src/unix/linux_like/linux/arch/powerpc/mod.rs new file mode 100644 index 00000000000000..3249a9f1b6a46d --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/arch/powerpc/mod.rs @@ -0,0 +1,280 @@ +use crate::prelude::*; +use crate::Ioctl; + +// arch/powerpc/include/uapi/asm/socket.h + +pub const SOL_SOCKET: c_int = 1; + +// Defined in unix/linux_like/mod.rs +// pub const SO_DEBUG: c_int = 1; +pub const SO_REUSEADDR: c_int = 2; +pub const SO_TYPE: c_int = 3; +pub const SO_ERROR: c_int = 4; +pub const SO_DONTROUTE: c_int = 5; +pub const SO_BROADCAST: c_int = 6; +pub const SO_SNDBUF: c_int = 7; +pub const SO_RCVBUF: c_int = 8; +pub const SO_KEEPALIVE: c_int = 9; +pub const SO_OOBINLINE: c_int = 10; +pub const SO_NO_CHECK: c_int = 11; +pub const SO_PRIORITY: c_int = 12; +pub const SO_LINGER: c_int = 13; +pub const SO_BSDCOMPAT: c_int = 14; +pub const SO_REUSEPORT: c_int = 15; +// powerpc only differs in these +pub const SO_RCVLOWAT: c_int = 16; +pub const SO_SNDLOWAT: c_int = 17; + +cfg_if! { + if #[cfg(linux_time_bits64)] { + const SO_RCVTIMEO_NEW: c_int = 66; + const SO_SNDTIMEO_NEW: c_int = 67; + + pub const SO_RCVTIMEO: c_int = SO_RCVTIMEO_NEW; + pub const SO_SNDTIMEO: c_int = SO_SNDTIMEO_NEW; + } else { + const SO_RCVTIMEO_OLD: c_int = 18; + const SO_SNDTIMEO_OLD: c_int = 19; + + pub const SO_RCVTIMEO: c_int = SO_RCVTIMEO_OLD; + pub const SO_SNDTIMEO: c_int = SO_SNDTIMEO_OLD; + } +} + +pub const SO_PASSCRED: c_int = 20; +pub const SO_PEERCRED: c_int = 21; +// end +pub const SO_SECURITY_AUTHENTICATION: c_int = 22; +pub const SO_SECURITY_ENCRYPTION_TRANSPORT: c_int = 23; +pub const SO_SECURITY_ENCRYPTION_NETWORK: c_int = 24; +pub const SO_BINDTODEVICE: c_int = 25; +pub const SO_ATTACH_FILTER: c_int = 26; +pub const SO_DETACH_FILTER: c_int = 27; +pub const SO_GET_FILTER: c_int = SO_ATTACH_FILTER; +pub const SO_PEERNAME: c_int = 28; +cfg_if! { + if #[cfg(linux_time_bits64)] { + const SO_TIMESTAMP_NEW: c_int = 63; + const SO_TIMESTAMPNS_NEW: c_int = 64; + const SO_TIMESTAMPING_NEW: c_int = 65; + + pub const SO_TIMESTAMP: c_int = SO_TIMESTAMP_NEW; + pub const SO_TIMESTAMPNS: c_int = SO_TIMESTAMPNS_NEW; + pub const SO_TIMESTAMPING: c_int = SO_TIMESTAMPING_NEW; + } else { + const SO_TIMESTAMP_OLD: c_int = 29; + const SO_TIMESTAMPNS_OLD: c_int = 35; + const SO_TIMESTAMPING_OLD: c_int = 37; + + pub const SO_TIMESTAMP: c_int = SO_TIMESTAMP_OLD; + pub const SO_TIMESTAMPNS: c_int = SO_TIMESTAMPNS_OLD; + pub const SO_TIMESTAMPING: c_int = SO_TIMESTAMPING_OLD; + } +} +pub const SO_ACCEPTCONN: c_int = 30; +pub const SO_PEERSEC: c_int = 31; +pub const SO_SNDBUFFORCE: c_int = 32; +pub const SO_RCVBUFFORCE: c_int = 33; +pub const SO_PASSSEC: c_int = 34; +pub const SO_MARK: c_int = 36; +pub const SO_PROTOCOL: c_int = 38; +pub const SO_DOMAIN: c_int = 39; +pub const SO_RXQ_OVFL: c_int = 40; +pub const SO_WIFI_STATUS: c_int = 41; +pub const SCM_WIFI_STATUS: c_int = SO_WIFI_STATUS; +pub const SO_PEEK_OFF: c_int = 42; +pub const SO_NOFCS: c_int = 43; +pub const SO_LOCK_FILTER: c_int = 44; +pub const SO_SELECT_ERR_QUEUE: c_int = 45; +pub const SO_BUSY_POLL: c_int = 46; +pub const SO_MAX_PACING_RATE: c_int = 47; +pub const SO_BPF_EXTENSIONS: c_int = 48; +pub const SO_INCOMING_CPU: c_int = 49; +pub const SO_ATTACH_BPF: c_int = 50; +pub const SO_DETACH_BPF: c_int = SO_DETACH_FILTER; +pub const SO_ATTACH_REUSEPORT_CBPF: c_int = 51; +pub const SO_ATTACH_REUSEPORT_EBPF: c_int = 52; +pub const SO_CNX_ADVICE: c_int = 53; +pub const SCM_TIMESTAMPING_OPT_STATS: c_int = 54; +pub const SO_MEMINFO: c_int = 55; +pub const SO_INCOMING_NAPI_ID: c_int = 56; +pub const SO_COOKIE: c_int = 57; +pub const SCM_TIMESTAMPING_PKTINFO: c_int = 58; +pub const SO_PEERGROUPS: c_int = 59; +pub const SO_ZEROCOPY: c_int = 60; +pub const SO_TXTIME: c_int = 61; +pub const SCM_TXTIME: c_int = SO_TXTIME; +pub const SO_BINDTOIFINDEX: c_int = 62; +// pub const SO_DETACH_REUSEPORT_BPF: c_int = 68; +pub const SO_PREFER_BUSY_POLL: c_int = 69; +pub const SO_BUSY_POLL_BUDGET: c_int = 70; +pub const SO_NETNS_COOKIE: c_int = 71; +pub const SO_BUF_LOCK: c_int = 72; +pub const SO_RESERVE_MEM: c_int = 73; +pub const SO_TXREHASH: c_int = 74; +pub const SO_RCVMARK: c_int = 75; +pub const SO_PASSPIDFD: c_int = 76; +pub const SO_PEERPIDFD: c_int = 77; +pub const SO_DEVMEM_LINEAR: c_int = 78; +pub const SO_DEVMEM_DMABUF: c_int = 79; +pub const SO_DEVMEM_DONTNEED: c_int = 80; + +// Defined in unix/linux_like/mod.rs +// pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; +pub const SCM_TIMESTAMPNS: c_int = SO_TIMESTAMPNS; +pub const SCM_TIMESTAMPING: c_int = SO_TIMESTAMPING; + +pub const SCM_DEVMEM_LINEAR: c_int = SO_DEVMEM_LINEAR; +pub const SCM_DEVMEM_DMABUF: c_int = SO_DEVMEM_DMABUF; + +// Ioctl Constants + +cfg_if! { + if #[cfg(target_env = "gnu")] { + pub const TCGETS: Ioctl = 0x403c7413; + pub const TCSETS: Ioctl = 0x803c7414; + pub const TCSETSW: Ioctl = 0x803c7415; + pub const TCSETSF: Ioctl = 0x803c7416; + } else if #[cfg(target_env = "musl")] { + pub const TCGETS: Ioctl = 0x402c7413; + pub const TCSETS: Ioctl = 0x802c7414; + pub const TCSETSW: Ioctl = 0x802c7415; + pub const TCSETSF: Ioctl = 0x802c7416; + } +} + +pub const TCGETA: Ioctl = 0x40147417; +pub const TCSETA: Ioctl = 0x80147418; +pub const TCSETAW: Ioctl = 0x80147419; +pub const TCSETAF: Ioctl = 0x8014741C; +pub const TCSBRK: Ioctl = 0x2000741D; +pub const TCXONC: Ioctl = 0x2000741E; +pub const TCFLSH: Ioctl = 0x2000741F; +pub const TIOCEXCL: Ioctl = 0x540C; +pub const TIOCNXCL: Ioctl = 0x540D; +pub const TIOCSCTTY: Ioctl = 0x540E; +pub const TIOCGPGRP: Ioctl = 0x40047477; +pub const TIOCSPGRP: Ioctl = 0x80047476; +pub const TIOCOUTQ: Ioctl = 0x40047473; +pub const TIOCSTI: Ioctl = 0x5412; +pub const TIOCGWINSZ: Ioctl = 0x40087468; +pub const TIOCSWINSZ: Ioctl = 0x80087467; +pub const TIOCMGET: Ioctl = 0x5415; +pub const TIOCMBIS: Ioctl = 0x5416; +pub const TIOCMBIC: Ioctl = 0x5417; +pub const TIOCMSET: Ioctl = 0x5418; +pub const TIOCGSOFTCAR: Ioctl = 0x5419; +pub const TIOCSSOFTCAR: Ioctl = 0x541A; +pub const FIONREAD: Ioctl = 0x4004667F; +pub const TIOCINQ: Ioctl = FIONREAD; +pub const TIOCLINUX: Ioctl = 0x541C; +pub const TIOCCONS: Ioctl = 0x541D; +pub const TIOCGSERIAL: Ioctl = 0x541E; +pub const TIOCSSERIAL: Ioctl = 0x541F; +pub const TIOCPKT: Ioctl = 0x5420; +pub const FIONBIO: Ioctl = 0x8004667e; +pub const TIOCNOTTY: Ioctl = 0x5422; +pub const TIOCSETD: Ioctl = 0x5423; +pub const TIOCGETD: Ioctl = 0x5424; +pub const TCSBRKP: Ioctl = 0x5425; +pub const TIOCSBRK: Ioctl = 0x5427; +pub const TIOCCBRK: Ioctl = 0x5428; +pub const TIOCGSID: Ioctl = 0x5429; +pub const TIOCGRS485: Ioctl = 0x542e; +pub const TIOCSRS485: Ioctl = 0x542f; +pub const TIOCGPTN: Ioctl = 0x40045430; +pub const TIOCSPTLCK: Ioctl = 0x80045431; +pub const TIOCGDEV: Ioctl = 0x40045432; +pub const TIOCSIG: Ioctl = 0x80045436; +pub const TIOCVHANGUP: Ioctl = 0x5437; +pub const TIOCGPKT: Ioctl = 0x40045438; +pub const TIOCGPTLCK: Ioctl = 0x40045439; +pub const TIOCGEXCL: Ioctl = 0x40045440; +pub const TIOCGPTPEER: Ioctl = 0x20005441; +//pub const TIOCGISO7816: Ioctl = 0x40285442; +//pub const TIOCSISO7816: Ioctl = 0xc0285443; +pub const FIONCLEX: Ioctl = 0x20006602; +pub const FIOCLEX: Ioctl = 0x20006601; +pub const FIOASYNC: Ioctl = 0x8004667d; +pub const TIOCSERCONFIG: Ioctl = 0x5453; +pub const TIOCSERGWILD: Ioctl = 0x5454; +pub const TIOCSERSWILD: Ioctl = 0x5455; +pub const TIOCGLCKTRMIOS: Ioctl = 0x5456; +pub const TIOCSLCKTRMIOS: Ioctl = 0x5457; +pub const TIOCSERGSTRUCT: Ioctl = 0x5458; +pub const TIOCSERGETLSR: Ioctl = 0x5459; +pub const TIOCSERGETMULTI: Ioctl = 0x545A; +pub const TIOCSERSETMULTI: Ioctl = 0x545B; +pub const TIOCMIWAIT: Ioctl = 0x545C; +pub const TIOCGICOUNT: Ioctl = 0x545D; +pub const BLKIOMIN: Ioctl = 0x20001278; +pub const BLKIOOPT: Ioctl = 0x20001279; +pub const BLKSSZGET: Ioctl = 0x20001268; +pub const BLKPBSZGET: Ioctl = 0x2000127B; +//pub const FIOQSIZE: Ioctl = 0x40086680; + +pub const TIOCM_LE: c_int = 0x001; +pub const TIOCM_DTR: c_int = 0x002; +pub const TIOCM_RTS: c_int = 0x004; +pub const TIOCM_ST: c_int = 0x008; +pub const TIOCM_SR: c_int = 0x010; +pub const TIOCM_CTS: c_int = 0x020; +pub const TIOCM_CAR: c_int = 0x040; +pub const TIOCM_CD: c_int = TIOCM_CAR; +pub const TIOCM_RNG: c_int = 0x080; +pub const TIOCM_RI: c_int = TIOCM_RNG; +pub const TIOCM_DSR: c_int = 0x100; + +pub const BOTHER: crate::speed_t = 0o0037; +pub const IBSHIFT: crate::tcflag_t = 16; + +// RLIMIT Constants + +cfg_if! { + if #[cfg(target_env = "gnu")] { + pub const RLIMIT_CPU: crate::__rlimit_resource_t = 0; + pub const RLIMIT_FSIZE: crate::__rlimit_resource_t = 1; + pub const RLIMIT_DATA: crate::__rlimit_resource_t = 2; + pub const RLIMIT_STACK: crate::__rlimit_resource_t = 3; + pub const RLIMIT_CORE: crate::__rlimit_resource_t = 4; + pub const RLIMIT_RSS: crate::__rlimit_resource_t = 5; + pub const RLIMIT_NPROC: crate::__rlimit_resource_t = 6; + pub const RLIMIT_NOFILE: crate::__rlimit_resource_t = 7; + pub const RLIMIT_MEMLOCK: crate::__rlimit_resource_t = 8; + pub const RLIMIT_AS: crate::__rlimit_resource_t = 9; + pub const RLIMIT_LOCKS: crate::__rlimit_resource_t = 10; + pub const RLIMIT_SIGPENDING: crate::__rlimit_resource_t = 11; + pub const RLIMIT_MSGQUEUE: crate::__rlimit_resource_t = 12; + pub const RLIMIT_NICE: crate::__rlimit_resource_t = 13; + pub const RLIMIT_RTPRIO: crate::__rlimit_resource_t = 14; + pub const RLIMIT_RTTIME: crate::__rlimit_resource_t = 15; + #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] + pub const RLIM_NLIMITS: crate::__rlimit_resource_t = 16; + #[allow(deprecated)] + #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] + pub const RLIMIT_NLIMITS: crate::__rlimit_resource_t = RLIM_NLIMITS; + } else if #[cfg(target_env = "musl")] { + pub const RLIMIT_CPU: c_int = 0; + pub const RLIMIT_FSIZE: c_int = 1; + pub const RLIMIT_DATA: c_int = 2; + pub const RLIMIT_STACK: c_int = 3; + pub const RLIMIT_CORE: c_int = 4; + pub const RLIMIT_RSS: c_int = 5; + pub const RLIMIT_NPROC: c_int = 6; + pub const RLIMIT_NOFILE: c_int = 7; + pub const RLIMIT_MEMLOCK: c_int = 8; + pub const RLIMIT_AS: c_int = 9; + pub const RLIMIT_LOCKS: c_int = 10; + pub const RLIMIT_SIGPENDING: c_int = 11; + pub const RLIMIT_MSGQUEUE: c_int = 12; + pub const RLIMIT_NICE: c_int = 13; + pub const RLIMIT_RTPRIO: c_int = 14; + pub const RLIMIT_RTTIME: c_int = 15; + #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] + pub const RLIM_NLIMITS: c_int = 16; + #[allow(deprecated)] + #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] + pub const RLIMIT_NLIMITS: c_int = RLIM_NLIMITS; + } +} +pub const RLIM_INFINITY: crate::rlim_t = !0; diff --git a/vendor/libc/src/unix/linux_like/linux/arch/sparc/mod.rs b/vendor/libc/src/unix/linux_like/linux/arch/sparc/mod.rs new file mode 100644 index 00000000000000..4c108ba7b71c1a --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/arch/sparc/mod.rs @@ -0,0 +1,247 @@ +use crate::prelude::*; +use crate::Ioctl; + +s! { + pub struct termios2 { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; 19], + pub c_ispeed: crate::speed_t, + pub c_ospeed: crate::speed_t, + } +} + +// arch/sparc/include/uapi/asm/socket.h +pub const SOL_SOCKET: c_int = 0xffff; + +// Defined in unix/linux_like/mod.rs +// pub const SO_DEBUG: c_int = 0x0001; +pub const SO_PASSCRED: c_int = 0x0002; +pub const SO_REUSEADDR: c_int = 0x0004; +pub const SO_KEEPALIVE: c_int = 0x0008; +pub const SO_DONTROUTE: c_int = 0x0010; +pub const SO_BROADCAST: c_int = 0x0020; +pub const SO_PEERCRED: c_int = 0x0040; +pub const SO_LINGER: c_int = 0x0080; +pub const SO_OOBINLINE: c_int = 0x0100; +pub const SO_REUSEPORT: c_int = 0x0200; +pub const SO_BSDCOMPAT: c_int = 0x0400; +pub const SO_RCVLOWAT: c_int = 0x0800; +pub const SO_SNDLOWAT: c_int = 0x1000; +pub const SO_RCVTIMEO: c_int = 0x2000; +pub const SO_SNDTIMEO: c_int = 0x4000; +// pub const SO_RCVTIMEO_OLD: c_int = 0x2000; +// pub const SO_SNDTIMEO_OLD: c_int = 0x4000; +pub const SO_ACCEPTCONN: c_int = 0x8000; +pub const SO_SNDBUF: c_int = 0x1001; +pub const SO_RCVBUF: c_int = 0x1002; +pub const SO_SNDBUFFORCE: c_int = 0x100a; +pub const SO_RCVBUFFORCE: c_int = 0x100b; +pub const SO_ERROR: c_int = 0x1007; +pub const SO_TYPE: c_int = 0x1008; +pub const SO_PROTOCOL: c_int = 0x1028; +pub const SO_DOMAIN: c_int = 0x1029; +pub const SO_NO_CHECK: c_int = 0x000b; +pub const SO_PRIORITY: c_int = 0x000c; +pub const SO_BINDTODEVICE: c_int = 0x000d; +pub const SO_ATTACH_FILTER: c_int = 0x001a; +pub const SO_DETACH_FILTER: c_int = 0x001b; +pub const SO_GET_FILTER: c_int = SO_ATTACH_FILTER; +pub const SO_PEERNAME: c_int = 0x001c; +pub const SO_PEERSEC: c_int = 0x001e; +pub const SO_PASSSEC: c_int = 0x001f; +pub const SO_MARK: c_int = 0x0022; +pub const SO_RXQ_OVFL: c_int = 0x0024; +pub const SO_WIFI_STATUS: c_int = 0x0025; +pub const SCM_WIFI_STATUS: c_int = SO_WIFI_STATUS; +pub const SO_PEEK_OFF: c_int = 0x0026; +pub const SO_NOFCS: c_int = 0x0027; +pub const SO_LOCK_FILTER: c_int = 0x0028; +pub const SO_SELECT_ERR_QUEUE: c_int = 0x0029; +pub const SO_BUSY_POLL: c_int = 0x0030; +pub const SO_MAX_PACING_RATE: c_int = 0x0031; +pub const SO_BPF_EXTENSIONS: c_int = 0x0032; +pub const SO_INCOMING_CPU: c_int = 0x0033; +pub const SO_ATTACH_BPF: c_int = 0x0034; +pub const SO_DETACH_BPF: c_int = SO_DETACH_FILTER; +pub const SO_ATTACH_REUSEPORT_CBPF: c_int = 0x0035; +pub const SO_ATTACH_REUSEPORT_EBPF: c_int = 0x0036; +pub const SO_CNX_ADVICE: c_int = 0x0037; +pub const SCM_TIMESTAMPING_OPT_STATS: c_int = 0x0038; +pub const SO_MEMINFO: c_int = 0x0039; +pub const SO_INCOMING_NAPI_ID: c_int = 0x003a; +pub const SO_COOKIE: c_int = 0x003b; +pub const SCM_TIMESTAMPING_PKTINFO: c_int = 0x003c; +pub const SO_PEERGROUPS: c_int = 0x003d; +pub const SO_ZEROCOPY: c_int = 0x003e; +pub const SO_TXTIME: c_int = 0x003f; +pub const SCM_TXTIME: c_int = SO_TXTIME; +pub const SO_BINDTOIFINDEX: c_int = 0x0041; +pub const SO_SECURITY_AUTHENTICATION: c_int = 0x5001; +pub const SO_SECURITY_ENCRYPTION_TRANSPORT: c_int = 0x5002; +pub const SO_SECURITY_ENCRYPTION_NETWORK: c_int = 0x5004; +pub const SO_TIMESTAMP: c_int = 0x001d; +pub const SO_TIMESTAMPNS: c_int = 0x0021; +pub const SO_TIMESTAMPING: c_int = 0x0023; +// pub const SO_TIMESTAMP_OLD: c_int = 0x001d; +// pub const SO_TIMESTAMPNS_OLD: c_int = 0x0021; +// pub const SO_TIMESTAMPING_OLD: c_int = 0x0023; +// pub const SO_TIMESTAMP_NEW: c_int = 0x0046; +// pub const SO_TIMESTAMPNS_NEW: c_int = 0x0042; +// pub const SO_TIMESTAMPING_NEW: c_int = 0x0043; +// pub const SO_RCVTIMEO_NEW: c_int = 0x0044; +// pub const SO_SNDTIMEO_NEW: c_int = 0x0045; +// pub const SO_DETACH_REUSEPORT_BPF: c_int = 0x0047; +pub const SO_PREFER_BUSY_POLL: c_int = 0x0048; +pub const SO_BUSY_POLL_BUDGET: c_int = 0x0049; +pub const SO_NETNS_COOKIE: c_int = 0x0050; +pub const SO_BUF_LOCK: c_int = 0x0051; +pub const SO_RESERVE_MEM: c_int = 0x0052; +pub const SO_TXREHASH: c_int = 0x0053; +pub const SO_RCVMARK: c_int = 0x0054; +pub const SO_PASSPIDFD: c_int = 0x0055; +pub const SO_PEERPIDFD: c_int = 0x0056; +pub const SO_DEVMEM_LINEAR: c_int = 0x0057; +pub const SO_DEVMEM_DMABUF: c_int = 0x0058; +pub const SO_DEVMEM_DONTNEED: c_int = 0x0059; + +// Defined in unix/linux_like/mod.rs +// pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; +pub const SCM_TIMESTAMPNS: c_int = SO_TIMESTAMPNS; +pub const SCM_TIMESTAMPING: c_int = SO_TIMESTAMPING; + +pub const SCM_DEVMEM_LINEAR: c_int = SO_DEVMEM_LINEAR; +pub const SCM_DEVMEM_DMABUF: c_int = SO_DEVMEM_DMABUF; + +// Ioctl Constants + +pub const TCGETS: Ioctl = 0x40245408; +pub const TCSETS: Ioctl = 0x80245409; +pub const TCSETSW: Ioctl = 0x8024540a; +pub const TCSETSF: Ioctl = 0x8024540b; +pub const TCGETA: Ioctl = 0x40125401; +pub const TCSETA: Ioctl = 0x80125402; +pub const TCSETAW: Ioctl = 0x80125403; +pub const TCSETAF: Ioctl = 0x80125404; +pub const TCSBRK: Ioctl = 0x20005405; +pub const TCXONC: Ioctl = 0x20005406; +pub const TCFLSH: Ioctl = 0x20005407; +pub const TIOCEXCL: Ioctl = 0x2000740d; +pub const TIOCNXCL: Ioctl = 0x2000740e; +pub const TIOCSCTTY: Ioctl = 0x20007484; +pub const TIOCGPGRP: Ioctl = 0x40047483; +pub const TIOCSPGRP: Ioctl = 0x80047482; +pub const TIOCOUTQ: Ioctl = 0x40047473; +pub const TIOCSTI: Ioctl = 0x80017472; +pub const TIOCGWINSZ: Ioctl = 0x40087468; +pub const TIOCSWINSZ: Ioctl = 0x80087467; +pub const TIOCMGET: Ioctl = 0x4004746a; +pub const TIOCMBIS: Ioctl = 0x8004746c; +pub const TIOCMBIC: Ioctl = 0x8004746b; +pub const TIOCMSET: Ioctl = 0x8004746d; +pub const TIOCGSOFTCAR: Ioctl = 0x40047464; +pub const TIOCSSOFTCAR: Ioctl = 0x80047465; +pub const FIONREAD: Ioctl = 0x4004667f; +pub const TIOCINQ: Ioctl = FIONREAD; +pub const TIOCLINUX: Ioctl = 0x541C; +pub const TIOCCONS: Ioctl = 0x20007424; +pub const TIOCGSERIAL: Ioctl = 0x541E; +pub const TIOCSSERIAL: Ioctl = 0x541F; +pub const TIOCPKT: Ioctl = 0x80047470; +pub const FIONBIO: Ioctl = 0x8004667e; +pub const TIOCNOTTY: Ioctl = 0x20007471; +pub const TIOCSETD: Ioctl = 0x80047401; +pub const TIOCGETD: Ioctl = 0x40047400; +pub const TCSBRKP: Ioctl = 0x5425; +pub const TIOCSBRK: Ioctl = 0x2000747b; +pub const TIOCCBRK: Ioctl = 0x2000747a; +pub const TIOCGSID: Ioctl = 0x40047485; +pub const TCGETS2: Ioctl = 0x402c540c; +pub const TCSETS2: Ioctl = 0x802c540d; +pub const TCSETSW2: Ioctl = 0x802c540e; +pub const TCSETSF2: Ioctl = 0x802c540f; +pub const TIOCGPTN: Ioctl = 0x40047486; +pub const TIOCSPTLCK: Ioctl = 0x80047487; +pub const TIOCGDEV: Ioctl = 0x40045432; +pub const TIOCSIG: Ioctl = 0x80047488; +pub const TIOCVHANGUP: Ioctl = 0x20005437; +pub const TIOCGPKT: Ioctl = 0x40045438; +pub const TIOCGPTLCK: Ioctl = 0x40045439; +pub const TIOCGEXCL: Ioctl = 0x40045440; +pub const TIOCGPTPEER: Ioctl = 0x20007489; +pub const FIONCLEX: Ioctl = 0x20006602; +pub const FIOCLEX: Ioctl = 0x20006601; +pub const TIOCSERCONFIG: Ioctl = 0x5453; +pub const TIOCSERGWILD: Ioctl = 0x5454; +pub const TIOCSERSWILD: Ioctl = 0x5455; +pub const TIOCGLCKTRMIOS: Ioctl = 0x5456; +pub const TIOCSLCKTRMIOS: Ioctl = 0x5457; +pub const TIOCSERGSTRUCT: Ioctl = 0x5458; +pub const TIOCSERGETLSR: Ioctl = 0x5459; +pub const TIOCSERGETMULTI: Ioctl = 0x545A; +pub const TIOCSERSETMULTI: Ioctl = 0x545B; +pub const TIOCMIWAIT: Ioctl = 0x545C; +pub const TIOCGICOUNT: Ioctl = 0x545D; +pub const TIOCSTART: Ioctl = 0x2000746e; +pub const TIOCSTOP: Ioctl = 0x2000746f; +pub const BLKIOMIN: Ioctl = 0x20001278; +pub const BLKIOOPT: Ioctl = 0x20001279; +pub const BLKSSZGET: Ioctl = 0x20001268; +pub const BLKPBSZGET: Ioctl = 0x2000127B; + +//pub const FIOASYNC: Ioctl = 0x4004667d; +//pub const FIOQSIZE: Ioctl = ; +//pub const TIOCGISO7816: Ioctl = 0x40285443; +//pub const TIOCSISO7816: Ioctl = 0xc0285444; +//pub const TIOCGRS485: Ioctl = 0x40205441; +//pub const TIOCSRS485: Ioctl = 0xc0205442; + +pub const TIOCM_LE: c_int = 0x001; +pub const TIOCM_DTR: c_int = 0x002; +pub const TIOCM_RTS: c_int = 0x004; +pub const TIOCM_ST: c_int = 0x008; +pub const TIOCM_SR: c_int = 0x010; +pub const TIOCM_CTS: c_int = 0x020; +pub const TIOCM_CAR: c_int = 0x040; +pub const TIOCM_CD: c_int = TIOCM_CAR; +pub const TIOCM_RNG: c_int = 0x080; +pub const TIOCM_RI: c_int = TIOCM_RNG; +pub const TIOCM_DSR: c_int = 0x100; + +pub const BOTHER: crate::speed_t = 0x1000; +pub const IBSHIFT: crate::tcflag_t = 16; + +// RLIMIT Constants + +pub const RLIMIT_CPU: crate::__rlimit_resource_t = 0; +pub const RLIMIT_FSIZE: crate::__rlimit_resource_t = 1; +pub const RLIMIT_DATA: crate::__rlimit_resource_t = 2; +pub const RLIMIT_STACK: crate::__rlimit_resource_t = 3; +pub const RLIMIT_CORE: crate::__rlimit_resource_t = 4; +pub const RLIMIT_RSS: crate::__rlimit_resource_t = 5; +pub const RLIMIT_NOFILE: crate::__rlimit_resource_t = 6; +pub const RLIMIT_NPROC: crate::__rlimit_resource_t = 7; +pub const RLIMIT_MEMLOCK: crate::__rlimit_resource_t = 8; +pub const RLIMIT_AS: crate::__rlimit_resource_t = 9; +pub const RLIMIT_LOCKS: crate::__rlimit_resource_t = 10; +pub const RLIMIT_SIGPENDING: crate::__rlimit_resource_t = 11; +pub const RLIMIT_MSGQUEUE: crate::__rlimit_resource_t = 12; +pub const RLIMIT_NICE: crate::__rlimit_resource_t = 13; +pub const RLIMIT_RTPRIO: crate::__rlimit_resource_t = 14; +pub const RLIMIT_RTTIME: crate::__rlimit_resource_t = 15; +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIM_NLIMITS: crate::__rlimit_resource_t = 16; +#[allow(deprecated)] +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIMIT_NLIMITS: crate::__rlimit_resource_t = RLIM_NLIMITS; + +cfg_if! { + if #[cfg(target_arch = "sparc64")] { + pub const RLIM_INFINITY: crate::rlim_t = !0; + } else if #[cfg(target_arch = "sparc")] { + pub const RLIM_INFINITY: crate::rlim_t = 0x7fffffff; + } +} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/arm/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/arm/mod.rs new file mode 100644 index 00000000000000..900851ab5f42c5 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b32/arm/mod.rs @@ -0,0 +1,928 @@ +use crate::prelude::*; +use crate::{off64_t, off_t}; + +pub type wchar_t = u32; + +s! { + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct statfs { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + f_spare: [crate::__fsword_t; 4], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_ushort, + __pad1: c_ushort, + pub __seq: c_ushort, + __pad2: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + #[cfg(not(gnu_time_bits64))] + __pad1: c_uint, + #[cfg(not(gnu_time_bits64))] + __st_ino: c_ulong, + #[cfg(gnu_time_bits64)] + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + #[cfg(not(gnu_time_bits64))] + __pad2: c_uint, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + #[cfg(gnu_time_bits64)] + _atime_pad: c_int, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + #[cfg(gnu_time_bits64)] + _mtime_pad: c_int, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + #[cfg(gnu_time_bits64)] + _ctime_pad: c_int, + #[cfg(not(gnu_time_bits64))] + pub st_ino: crate::ino64_t, + } + + pub struct statfs64 { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::fsid_t, + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + pub f_spare: [crate::__fsword_t; 4], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + __f_unused: c_int, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __unused1: c_ulong, + pub shm_dtime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __unused2: c_ulong, + pub shm_ctime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __unused3: c_ulong, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused4: c_ulong, + __unused5: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved1: c_ulong, + pub msg_rtime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved2: c_ulong, + pub msg_ctime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved3: c_ulong, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __glibc_reserved4: c_ulong, + __glibc_reserved5: c_ulong, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + #[doc(hidden)] + #[deprecated( + since = "0.2.54", + note = "Please leave a comment on \ + https://github.com/rust-lang/libc/pull/1316 if you're using \ + this field" + )] + pub _pad: [c_int; 29], + _align: [usize; 0], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct mcontext_t { + pub trap_no: c_ulong, + pub error_code: c_ulong, + pub oldmask: c_ulong, + pub arm_r0: c_ulong, + pub arm_r1: c_ulong, + pub arm_r2: c_ulong, + pub arm_r3: c_ulong, + pub arm_r4: c_ulong, + pub arm_r5: c_ulong, + pub arm_r6: c_ulong, + pub arm_r7: c_ulong, + pub arm_r8: c_ulong, + pub arm_r9: c_ulong, + pub arm_r10: c_ulong, + pub arm_fp: c_ulong, + pub arm_ip: c_ulong, + pub arm_sp: c_ulong, + pub arm_lr: c_ulong, + pub arm_pc: c_ulong, + pub arm_cpsr: c_ulong, + pub fault_address: c_ulong, + } + + pub struct user_regs { + pub arm_r0: c_ulong, + pub arm_r1: c_ulong, + pub arm_r2: c_ulong, + pub arm_r3: c_ulong, + pub arm_r4: c_ulong, + pub arm_r5: c_ulong, + pub arm_r6: c_ulong, + pub arm_r7: c_ulong, + pub arm_r8: c_ulong, + pub arm_r9: c_ulong, + pub arm_r10: c_ulong, + pub arm_fp: c_ulong, + pub arm_ip: c_ulong, + pub arm_sp: c_ulong, + pub arm_lr: c_ulong, + pub arm_pc: c_ulong, + pub arm_cpsr: c_ulong, + pub arm_orig_r0: c_ulong, + } +} + +s_no_extra_traits! { + #[repr(align(8))] + pub struct max_align_t { + priv_: [i64; 2], + } + + #[repr(align(8))] + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_mcontext: crate::mcontext_t, + pub uc_sigmask: crate::sigset_t, + pub uc_regspace: [c_ulong; 128], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for ucontext_t { + fn eq(&self, other: &ucontext_t) -> bool { + self.uc_flags == other.uc_flags + && self.uc_link == other.uc_link + && self.uc_stack == other.uc_stack + && self.uc_mcontext == other.uc_mcontext + && self.uc_sigmask == other.uc_sigmask + } + } + impl Eq for ucontext_t {} + impl hash::Hash for ucontext_t { + fn hash(&self, state: &mut H) { + self.uc_flags.hash(state); + self.uc_link.hash(state); + self.uc_stack.hash(state); + self.uc_mcontext.hash(state); + self.uc_sigmask.hash(state); + } + } + } +} + +pub const VEOF: usize = 4; +pub const RTLD_DEEPBIND: c_int = 0x8; +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; +pub const O_DIRECT: c_int = 0x10000; +pub const O_DIRECTORY: c_int = 0x4000; +pub const O_NOFOLLOW: c_int = 0x8000; +pub const O_LARGEFILE: c_int = 0o400000; +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_FSYNC: c_int = 0x101000; +pub const O_ASYNC: c_int = 0x2000; +pub const O_NDELAY: c_int = 0x800; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_ANONYMOUS: c_int = 0x0020; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_SYNC: c_int = 0x080000; + +pub const EDEADLOCK: c_int = 35; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const EHWPOISON: c_int = 133; +pub const ERFKILL: c_int = 132; + +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +cfg_if! { + if #[cfg(gnu_file_offset_bits64)] { + pub const F_GETLK: c_int = 12; + } else { + pub const F_GETLK: c_int = 5; + } +} +pub const F_GETOWN: c_int = 9; +pub const F_SETOWN: c_int = 8; + +pub const EFD_NONBLOCK: c_int = 0x800; +pub const SFD_NONBLOCK: c_int = 0x0800; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] +pub const SIGUNUSED: c_int = 31; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: crate::tcflag_t = 0x00000800; +pub const TAB2: crate::tcflag_t = 0x00001000; +pub const TAB3: crate::tcflag_t = 0x00001800; +pub const CR1: crate::tcflag_t = 0x00000200; +pub const CR2: crate::tcflag_t = 0x00000400; +pub const CR3: crate::tcflag_t = 0x00000600; +pub const FF1: crate::tcflag_t = 0x00008000; +pub const BS1: crate::tcflag_t = 0x00002000; +pub const VT1: crate::tcflag_t = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; +pub const EXTPROC: crate::tcflag_t = 0x00010000; + +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +// Syscall table +pub const SYS_restart_syscall: c_long = 0; +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execve: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_lchown: c_long = 16; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_mount: c_long = 21; +pub const SYS_setuid: c_long = 23; +pub const SYS_getuid: c_long = 24; +pub const SYS_ptrace: c_long = 26; +pub const SYS_pause: c_long = 29; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_rename: c_long = 38; +pub const SYS_mkdir: c_long = 39; +pub const SYS_rmdir: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_brk: c_long = 45; +pub const SYS_setgid: c_long = 46; +pub const SYS_getgid: c_long = 47; +pub const SYS_geteuid: c_long = 49; +pub const SYS_getegid: c_long = 50; +pub const SYS_acct: c_long = 51; +pub const SYS_umount2: c_long = 52; +pub const SYS_ioctl: c_long = 54; +pub const SYS_fcntl: c_long = 55; +pub const SYS_setpgid: c_long = 57; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_ustat: c_long = 62; +pub const SYS_dup2: c_long = 63; +pub const SYS_getppid: c_long = 64; +pub const SYS_getpgrp: c_long = 65; +pub const SYS_setsid: c_long = 66; +pub const SYS_sigaction: c_long = 67; +pub const SYS_setreuid: c_long = 70; +pub const SYS_setregid: c_long = 71; +pub const SYS_sigsuspend: c_long = 72; +pub const SYS_sigpending: c_long = 73; +pub const SYS_sethostname: c_long = 74; +pub const SYS_setrlimit: c_long = 75; +pub const SYS_getrusage: c_long = 77; +pub const SYS_gettimeofday: c_long = 78; +pub const SYS_settimeofday: c_long = 79; +pub const SYS_getgroups: c_long = 80; +pub const SYS_setgroups: c_long = 81; +pub const SYS_symlink: c_long = 83; +pub const SYS_readlink: c_long = 85; +pub const SYS_uselib: c_long = 86; +pub const SYS_swapon: c_long = 87; +pub const SYS_reboot: c_long = 88; +pub const SYS_munmap: c_long = 91; +pub const SYS_truncate: c_long = 92; +pub const SYS_ftruncate: c_long = 93; +pub const SYS_fchmod: c_long = 94; +pub const SYS_fchown: c_long = 95; +pub const SYS_getpriority: c_long = 96; +pub const SYS_setpriority: c_long = 97; +pub const SYS_statfs: c_long = 99; +pub const SYS_fstatfs: c_long = 100; +pub const SYS_syslog: c_long = 103; +pub const SYS_setitimer: c_long = 104; +pub const SYS_getitimer: c_long = 105; +pub const SYS_stat: c_long = 106; +pub const SYS_lstat: c_long = 107; +pub const SYS_fstat: c_long = 108; +pub const SYS_vhangup: c_long = 111; +pub const SYS_wait4: c_long = 114; +pub const SYS_swapoff: c_long = 115; +pub const SYS_sysinfo: c_long = 116; +pub const SYS_fsync: c_long = 118; +pub const SYS_sigreturn: c_long = 119; +pub const SYS_clone: c_long = 120; +pub const SYS_setdomainname: c_long = 121; +pub const SYS_uname: c_long = 122; +pub const SYS_adjtimex: c_long = 124; +pub const SYS_mprotect: c_long = 125; +pub const SYS_sigprocmask: c_long = 126; +pub const SYS_init_module: c_long = 128; +pub const SYS_delete_module: c_long = 129; +pub const SYS_quotactl: c_long = 131; +pub const SYS_getpgid: c_long = 132; +pub const SYS_fchdir: c_long = 133; +pub const SYS_bdflush: c_long = 134; +pub const SYS_sysfs: c_long = 135; +pub const SYS_personality: c_long = 136; +pub const SYS_setfsuid: c_long = 138; +pub const SYS_setfsgid: c_long = 139; +pub const SYS__llseek: c_long = 140; +pub const SYS_getdents: c_long = 141; +pub const SYS__newselect: c_long = 142; +pub const SYS_flock: c_long = 143; +pub const SYS_msync: c_long = 144; +pub const SYS_readv: c_long = 145; +pub const SYS_writev: c_long = 146; +pub const SYS_getsid: c_long = 147; +pub const SYS_fdatasync: c_long = 148; +pub const SYS__sysctl: c_long = 149; +pub const SYS_mlock: c_long = 150; +pub const SYS_munlock: c_long = 151; +pub const SYS_mlockall: c_long = 152; +pub const SYS_munlockall: c_long = 153; +pub const SYS_sched_setparam: c_long = 154; +pub const SYS_sched_getparam: c_long = 155; +pub const SYS_sched_setscheduler: c_long = 156; +pub const SYS_sched_getscheduler: c_long = 157; +pub const SYS_sched_yield: c_long = 158; +pub const SYS_sched_get_priority_max: c_long = 159; +pub const SYS_sched_get_priority_min: c_long = 160; +pub const SYS_sched_rr_get_interval: c_long = 161; +pub const SYS_nanosleep: c_long = 162; +pub const SYS_mremap: c_long = 163; +pub const SYS_setresuid: c_long = 164; +pub const SYS_getresuid: c_long = 165; +pub const SYS_poll: c_long = 168; +pub const SYS_nfsservctl: c_long = 169; +pub const SYS_setresgid: c_long = 170; +pub const SYS_getresgid: c_long = 171; +pub const SYS_prctl: c_long = 172; +pub const SYS_rt_sigreturn: c_long = 173; +pub const SYS_rt_sigaction: c_long = 174; +pub const SYS_rt_sigprocmask: c_long = 175; +pub const SYS_rt_sigpending: c_long = 176; +pub const SYS_rt_sigtimedwait: c_long = 177; +pub const SYS_rt_sigqueueinfo: c_long = 178; +pub const SYS_rt_sigsuspend: c_long = 179; +pub const SYS_pread64: c_long = 180; +pub const SYS_pwrite64: c_long = 181; +pub const SYS_chown: c_long = 182; +pub const SYS_getcwd: c_long = 183; +pub const SYS_capget: c_long = 184; +pub const SYS_capset: c_long = 185; +pub const SYS_sigaltstack: c_long = 186; +pub const SYS_sendfile: c_long = 187; +pub const SYS_vfork: c_long = 190; +pub const SYS_ugetrlimit: c_long = 191; +pub const SYS_mmap2: c_long = 192; +pub const SYS_truncate64: c_long = 193; +pub const SYS_ftruncate64: c_long = 194; +pub const SYS_stat64: c_long = 195; +pub const SYS_lstat64: c_long = 196; +pub const SYS_fstat64: c_long = 197; +pub const SYS_lchown32: c_long = 198; +pub const SYS_getuid32: c_long = 199; +pub const SYS_getgid32: c_long = 200; +pub const SYS_geteuid32: c_long = 201; +pub const SYS_getegid32: c_long = 202; +pub const SYS_setreuid32: c_long = 203; +pub const SYS_setregid32: c_long = 204; +pub const SYS_getgroups32: c_long = 205; +pub const SYS_setgroups32: c_long = 206; +pub const SYS_fchown32: c_long = 207; +pub const SYS_setresuid32: c_long = 208; +pub const SYS_getresuid32: c_long = 209; +pub const SYS_setresgid32: c_long = 210; +pub const SYS_getresgid32: c_long = 211; +pub const SYS_chown32: c_long = 212; +pub const SYS_setuid32: c_long = 213; +pub const SYS_setgid32: c_long = 214; +pub const SYS_setfsuid32: c_long = 215; +pub const SYS_setfsgid32: c_long = 216; +pub const SYS_getdents64: c_long = 217; +pub const SYS_pivot_root: c_long = 218; +pub const SYS_mincore: c_long = 219; +pub const SYS_madvise: c_long = 220; +pub const SYS_fcntl64: c_long = 221; +pub const SYS_gettid: c_long = 224; +pub const SYS_readahead: c_long = 225; +pub const SYS_setxattr: c_long = 226; +pub const SYS_lsetxattr: c_long = 227; +pub const SYS_fsetxattr: c_long = 228; +pub const SYS_getxattr: c_long = 229; +pub const SYS_lgetxattr: c_long = 230; +pub const SYS_fgetxattr: c_long = 231; +pub const SYS_listxattr: c_long = 232; +pub const SYS_llistxattr: c_long = 233; +pub const SYS_flistxattr: c_long = 234; +pub const SYS_removexattr: c_long = 235; +pub const SYS_lremovexattr: c_long = 236; +pub const SYS_fremovexattr: c_long = 237; +pub const SYS_tkill: c_long = 238; +pub const SYS_sendfile64: c_long = 239; +pub const SYS_futex: c_long = 240; +pub const SYS_sched_setaffinity: c_long = 241; +pub const SYS_sched_getaffinity: c_long = 242; +pub const SYS_io_setup: c_long = 243; +pub const SYS_io_destroy: c_long = 244; +pub const SYS_io_getevents: c_long = 245; +pub const SYS_io_submit: c_long = 246; +pub const SYS_io_cancel: c_long = 247; +pub const SYS_exit_group: c_long = 248; +pub const SYS_lookup_dcookie: c_long = 249; +pub const SYS_epoll_create: c_long = 250; +pub const SYS_epoll_ctl: c_long = 251; +pub const SYS_epoll_wait: c_long = 252; +pub const SYS_remap_file_pages: c_long = 253; +pub const SYS_set_tid_address: c_long = 256; +pub const SYS_timer_create: c_long = 257; +pub const SYS_timer_settime: c_long = 258; +pub const SYS_timer_gettime: c_long = 259; +pub const SYS_timer_getoverrun: c_long = 260; +pub const SYS_timer_delete: c_long = 261; +pub const SYS_clock_settime: c_long = 262; +pub const SYS_clock_gettime: c_long = 263; +pub const SYS_clock_getres: c_long = 264; +pub const SYS_clock_nanosleep: c_long = 265; +pub const SYS_statfs64: c_long = 266; +pub const SYS_fstatfs64: c_long = 267; +pub const SYS_tgkill: c_long = 268; +pub const SYS_utimes: c_long = 269; +pub const SYS_arm_fadvise64_64: c_long = 270; +pub const SYS_pciconfig_iobase: c_long = 271; +pub const SYS_pciconfig_read: c_long = 272; +pub const SYS_pciconfig_write: c_long = 273; +pub const SYS_mq_open: c_long = 274; +pub const SYS_mq_unlink: c_long = 275; +pub const SYS_mq_timedsend: c_long = 276; +pub const SYS_mq_timedreceive: c_long = 277; +pub const SYS_mq_notify: c_long = 278; +pub const SYS_mq_getsetattr: c_long = 279; +pub const SYS_waitid: c_long = 280; +pub const SYS_socket: c_long = 281; +pub const SYS_bind: c_long = 282; +pub const SYS_connect: c_long = 283; +pub const SYS_listen: c_long = 284; +pub const SYS_accept: c_long = 285; +pub const SYS_getsockname: c_long = 286; +pub const SYS_getpeername: c_long = 287; +pub const SYS_socketpair: c_long = 288; +pub const SYS_send: c_long = 289; +pub const SYS_sendto: c_long = 290; +pub const SYS_recv: c_long = 291; +pub const SYS_recvfrom: c_long = 292; +pub const SYS_shutdown: c_long = 293; +pub const SYS_setsockopt: c_long = 294; +pub const SYS_getsockopt: c_long = 295; +pub const SYS_sendmsg: c_long = 296; +pub const SYS_recvmsg: c_long = 297; +pub const SYS_semop: c_long = 298; +pub const SYS_semget: c_long = 299; +pub const SYS_semctl: c_long = 300; +pub const SYS_msgsnd: c_long = 301; +pub const SYS_msgrcv: c_long = 302; +pub const SYS_msgget: c_long = 303; +pub const SYS_msgctl: c_long = 304; +pub const SYS_shmat: c_long = 305; +pub const SYS_shmdt: c_long = 306; +pub const SYS_shmget: c_long = 307; +pub const SYS_shmctl: c_long = 308; +pub const SYS_add_key: c_long = 309; +pub const SYS_request_key: c_long = 310; +pub const SYS_keyctl: c_long = 311; +pub const SYS_semtimedop: c_long = 312; +pub const SYS_vserver: c_long = 313; +pub const SYS_ioprio_set: c_long = 314; +pub const SYS_ioprio_get: c_long = 315; +pub const SYS_inotify_init: c_long = 316; +pub const SYS_inotify_add_watch: c_long = 317; +pub const SYS_inotify_rm_watch: c_long = 318; +pub const SYS_mbind: c_long = 319; +pub const SYS_get_mempolicy: c_long = 320; +pub const SYS_set_mempolicy: c_long = 321; +pub const SYS_openat: c_long = 322; +pub const SYS_mkdirat: c_long = 323; +pub const SYS_mknodat: c_long = 324; +pub const SYS_fchownat: c_long = 325; +pub const SYS_futimesat: c_long = 326; +pub const SYS_fstatat64: c_long = 327; +pub const SYS_unlinkat: c_long = 328; +pub const SYS_renameat: c_long = 329; +pub const SYS_linkat: c_long = 330; +pub const SYS_symlinkat: c_long = 331; +pub const SYS_readlinkat: c_long = 332; +pub const SYS_fchmodat: c_long = 333; +pub const SYS_faccessat: c_long = 334; +pub const SYS_pselect6: c_long = 335; +pub const SYS_ppoll: c_long = 336; +pub const SYS_unshare: c_long = 337; +pub const SYS_set_robust_list: c_long = 338; +pub const SYS_get_robust_list: c_long = 339; +pub const SYS_splice: c_long = 340; +pub const SYS_arm_sync_file_range: c_long = 341; +pub const SYS_tee: c_long = 342; +pub const SYS_vmsplice: c_long = 343; +pub const SYS_move_pages: c_long = 344; +pub const SYS_getcpu: c_long = 345; +pub const SYS_epoll_pwait: c_long = 346; +pub const SYS_kexec_load: c_long = 347; +pub const SYS_utimensat: c_long = 348; +pub const SYS_signalfd: c_long = 349; +pub const SYS_timerfd_create: c_long = 350; +pub const SYS_eventfd: c_long = 351; +pub const SYS_fallocate: c_long = 352; +pub const SYS_timerfd_settime: c_long = 353; +pub const SYS_timerfd_gettime: c_long = 354; +pub const SYS_signalfd4: c_long = 355; +pub const SYS_eventfd2: c_long = 356; +pub const SYS_epoll_create1: c_long = 357; +pub const SYS_dup3: c_long = 358; +pub const SYS_pipe2: c_long = 359; +pub const SYS_inotify_init1: c_long = 360; +pub const SYS_preadv: c_long = 361; +pub const SYS_pwritev: c_long = 362; +pub const SYS_rt_tgsigqueueinfo: c_long = 363; +pub const SYS_perf_event_open: c_long = 364; +pub const SYS_recvmmsg: c_long = 365; +pub const SYS_accept4: c_long = 366; +pub const SYS_fanotify_init: c_long = 367; +pub const SYS_fanotify_mark: c_long = 368; +pub const SYS_prlimit64: c_long = 369; +pub const SYS_name_to_handle_at: c_long = 370; +pub const SYS_open_by_handle_at: c_long = 371; +pub const SYS_clock_adjtime: c_long = 372; +pub const SYS_syncfs: c_long = 373; +pub const SYS_sendmmsg: c_long = 374; +pub const SYS_setns: c_long = 375; +pub const SYS_process_vm_readv: c_long = 376; +pub const SYS_process_vm_writev: c_long = 377; +pub const SYS_kcmp: c_long = 378; +pub const SYS_finit_module: c_long = 379; +pub const SYS_sched_setattr: c_long = 380; +pub const SYS_sched_getattr: c_long = 381; +pub const SYS_renameat2: c_long = 382; +pub const SYS_seccomp: c_long = 383; +pub const SYS_getrandom: c_long = 384; +pub const SYS_memfd_create: c_long = 385; +pub const SYS_bpf: c_long = 386; +pub const SYS_execveat: c_long = 387; +pub const SYS_userfaultfd: c_long = 388; +pub const SYS_membarrier: c_long = 389; +pub const SYS_mlock2: c_long = 390; +pub const SYS_copy_file_range: c_long = 391; +pub const SYS_preadv2: c_long = 392; +pub const SYS_pwritev2: c_long = 393; +pub const SYS_pkey_mprotect: c_long = 394; +pub const SYS_pkey_alloc: c_long = 395; +pub const SYS_pkey_free: c_long = 396; +pub const SYS_statx: c_long = 397; +pub const SYS_rseq: c_long = 398; +pub const SYS_kexec_file_load: c_long = 401; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; +pub const SYS_mseal: c_long = 462; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/csky/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/csky/mod.rs new file mode 100644 index 00000000000000..95881894a4b943 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b32/csky/mod.rs @@ -0,0 +1,745 @@ +use crate::prelude::*; +use crate::{off64_t, off_t}; + +pub type wchar_t = u32; + +s! { + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct statfs { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + f_spare: [crate::__fsword_t; 5], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_ushort, + __pad1: c_ushort, + pub __seq: c_ushort, + __pad2: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + __pad1: c_uint, + __st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad2: c_uint, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_ino: crate::ino64_t, + } + + pub struct statfs64 { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::fsid_t, + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + pub f_spare: [crate::__fsword_t; 4], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + __f_unused: c_int, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + __unused1: c_ulong, + pub shm_dtime: crate::time_t, + __unused2: c_ulong, + pub shm_ctime: crate::time_t, + __unused3: c_ulong, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused4: c_ulong, + __unused5: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + __glibc_reserved1: c_ulong, + pub msg_rtime: crate::time_t, + __glibc_reserved2: c_ulong, + pub msg_ctime: crate::time_t, + __glibc_reserved3: c_ulong, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __glibc_reserved4: c_ulong, + __glibc_reserved5: c_ulong, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + #[doc(hidden)] + #[deprecated( + since = "0.2.54", + note = "Please leave a comment on \ + https://github.com/rust-lang/libc/pull/1316 if you're using \ + this field" + )] + pub _pad: [c_int; 29], + _align: [usize; 0], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } +} + +s_no_extra_traits! { + #[repr(align(8))] + pub struct max_align_t { + priv_: [i64; 2], + } +} + +pub const VEOF: usize = 4; +pub const RTLD_DEEPBIND: c_int = 0x8; +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; +pub const O_DIRECT: c_int = 0x4000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_NOFOLLOW: c_int = 0x20000; +pub const O_LARGEFILE: c_int = 0o100000; +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_FSYNC: c_int = 0x101000; +pub const O_ASYNC: c_int = 0x2000; +pub const O_NDELAY: c_int = 0x800; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_ANONYMOUS: c_int = 0x0020; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_SYNC: c_int = 0x080000; + +pub const EDEADLOCK: c_int = 35; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const EHWPOISON: c_int = 133; +pub const ERFKILL: c_int = 132; + +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETOWN: c_int = 8; + +pub const EFD_NONBLOCK: c_int = 0x800; +pub const SFD_NONBLOCK: c_int = 0x0800; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] +pub const SIGUNUSED: c_int = 31; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: crate::tcflag_t = 0x00000800; +pub const TAB2: crate::tcflag_t = 0x00001000; +pub const TAB3: crate::tcflag_t = 0x00001800; +pub const CR1: crate::tcflag_t = 0x00000200; +pub const CR2: crate::tcflag_t = 0x00000400; +pub const CR3: crate::tcflag_t = 0x00000600; +pub const FF1: crate::tcflag_t = 0x00008000; +pub const BS1: crate::tcflag_t = 0x00002000; +pub const VT1: crate::tcflag_t = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; +pub const EXTPROC: crate::tcflag_t = 0x00010000; + +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +// Syscall table +pub const SYS_read: c_long = 63; +pub const SYS_write: c_long = 64; +pub const SYS_close: c_long = 57; +pub const SYS_fstat: c_long = 80; +pub const SYS_lseek: c_long = 62; +pub const SYS_mmap: c_long = 222; +pub const SYS_mprotect: c_long = 226; +pub const SYS_munmap: c_long = 215; +pub const SYS_brk: c_long = 214; +pub const SYS_rt_sigaction: c_long = 134; +pub const SYS_rt_sigprocmask: c_long = 135; +pub const SYS_rt_sigreturn: c_long = 139; +pub const SYS_ioctl: c_long = 29; +pub const SYS_pread64: c_long = 67; +pub const SYS_pwrite64: c_long = 68; +pub const SYS_readv: c_long = 65; +pub const SYS_writev: c_long = 66; +pub const SYS_sched_yield: c_long = 124; +pub const SYS_mremap: c_long = 216; +pub const SYS_msync: c_long = 227; +pub const SYS_mincore: c_long = 232; +pub const SYS_madvise: c_long = 233; +pub const SYS_shmget: c_long = 194; +pub const SYS_shmat: c_long = 196; +pub const SYS_shmctl: c_long = 195; +pub const SYS_dup: c_long = 23; +pub const SYS_nanosleep: c_long = 101; +pub const SYS_getitimer: c_long = 102; +pub const SYS_setitimer: c_long = 103; +pub const SYS_getpid: c_long = 172; +pub const SYS_sendfile: c_long = 71; +pub const SYS_socket: c_long = 198; +pub const SYS_connect: c_long = 203; +pub const SYS_accept: c_long = 202; +pub const SYS_sendto: c_long = 206; +pub const SYS_recvfrom: c_long = 207; +pub const SYS_sendmsg: c_long = 211; +pub const SYS_recvmsg: c_long = 212; +pub const SYS_shutdown: c_long = 210; +pub const SYS_bind: c_long = 200; +pub const SYS_listen: c_long = 201; +pub const SYS_getsockname: c_long = 204; +pub const SYS_getpeername: c_long = 205; +pub const SYS_socketpair: c_long = 199; +pub const SYS_setsockopt: c_long = 208; +pub const SYS_getsockopt: c_long = 209; +pub const SYS_clone: c_long = 220; +pub const SYS_execve: c_long = 221; +pub const SYS_exit: c_long = 93; +pub const SYS_wait4: c_long = 260; +pub const SYS_kill: c_long = 129; +pub const SYS_uname: c_long = 160; +pub const SYS_semget: c_long = 190; +pub const SYS_semop: c_long = 193; +pub const SYS_semctl: c_long = 191; +pub const SYS_shmdt: c_long = 197; +pub const SYS_msgget: c_long = 186; +pub const SYS_msgsnd: c_long = 189; +pub const SYS_msgrcv: c_long = 188; +pub const SYS_msgctl: c_long = 187; +pub const SYS_fcntl: c_long = 25; +pub const SYS_flock: c_long = 32; +pub const SYS_fsync: c_long = 82; +pub const SYS_fdatasync: c_long = 83; +pub const SYS_truncate: c_long = 45; +pub const SYS_ftruncate: c_long = 46; +pub const SYS_getcwd: c_long = 17; +pub const SYS_chdir: c_long = 49; +pub const SYS_fchdir: c_long = 50; +pub const SYS_fchmod: c_long = 52; +pub const SYS_fchown: c_long = 55; +pub const SYS_umask: c_long = 166; +pub const SYS_gettimeofday: c_long = 169; +pub const SYS_getrlimit: c_long = 163; +pub const SYS_getrusage: c_long = 165; +pub const SYS_sysinfo: c_long = 179; +pub const SYS_times: c_long = 153; +pub const SYS_ptrace: c_long = 117; +pub const SYS_getuid: c_long = 174; +pub const SYS_syslog: c_long = 116; +pub const SYS_getgid: c_long = 176; +pub const SYS_setuid: c_long = 146; +pub const SYS_setgid: c_long = 144; +pub const SYS_geteuid: c_long = 175; +pub const SYS_getegid: c_long = 177; +pub const SYS_setpgid: c_long = 154; +pub const SYS_getppid: c_long = 173; +pub const SYS_setsid: c_long = 157; +pub const SYS_setreuid: c_long = 145; +pub const SYS_setregid: c_long = 143; +pub const SYS_getgroups: c_long = 158; +pub const SYS_setgroups: c_long = 159; +pub const SYS_setresuid: c_long = 147; +pub const SYS_getresuid: c_long = 148; +pub const SYS_setresgid: c_long = 149; +pub const SYS_getresgid: c_long = 150; +pub const SYS_getpgid: c_long = 155; +pub const SYS_setfsuid: c_long = 151; +pub const SYS_setfsgid: c_long = 152; +pub const SYS_getsid: c_long = 156; +pub const SYS_capget: c_long = 90; +pub const SYS_capset: c_long = 91; +pub const SYS_rt_sigpending: c_long = 136; +pub const SYS_rt_sigtimedwait: c_long = 137; +pub const SYS_rt_sigqueueinfo: c_long = 138; +pub const SYS_rt_sigsuspend: c_long = 133; +pub const SYS_sigaltstack: c_long = 132; +pub const SYS_personality: c_long = 92; +pub const SYS_statfs: c_long = 43; +pub const SYS_fstatfs: c_long = 44; +pub const SYS_getpriority: c_long = 141; +pub const SYS_setpriority: c_long = 140; +pub const SYS_sched_setparam: c_long = 118; +pub const SYS_sched_getparam: c_long = 121; +pub const SYS_sched_setscheduler: c_long = 119; +pub const SYS_sched_getscheduler: c_long = 120; +pub const SYS_sched_get_priority_max: c_long = 125; +pub const SYS_sched_get_priority_min: c_long = 126; +pub const SYS_sched_rr_get_interval: c_long = 127; +pub const SYS_mlock: c_long = 228; +pub const SYS_munlock: c_long = 229; +pub const SYS_mlockall: c_long = 230; +pub const SYS_munlockall: c_long = 231; +pub const SYS_vhangup: c_long = 58; +pub const SYS_pivot_root: c_long = 41; +pub const SYS_prctl: c_long = 167; +pub const SYS_adjtimex: c_long = 171; +pub const SYS_setrlimit: c_long = 164; +pub const SYS_chroot: c_long = 51; +pub const SYS_sync: c_long = 81; +pub const SYS_acct: c_long = 89; +pub const SYS_settimeofday: c_long = 170; +pub const SYS_mount: c_long = 40; +pub const SYS_umount2: c_long = 39; +pub const SYS_swapon: c_long = 224; +pub const SYS_swapoff: c_long = 225; +pub const SYS_reboot: c_long = 142; +pub const SYS_sethostname: c_long = 161; +pub const SYS_setdomainname: c_long = 162; +pub const SYS_init_module: c_long = 105; +pub const SYS_delete_module: c_long = 106; +pub const SYS_quotactl: c_long = 60; +pub const SYS_nfsservctl: c_long = 42; +pub const SYS_gettid: c_long = 178; +pub const SYS_readahead: c_long = 213; +pub const SYS_setxattr: c_long = 5; +pub const SYS_lsetxattr: c_long = 6; +pub const SYS_fsetxattr: c_long = 7; +pub const SYS_getxattr: c_long = 8; +pub const SYS_lgetxattr: c_long = 9; +pub const SYS_fgetxattr: c_long = 10; +pub const SYS_listxattr: c_long = 11; +pub const SYS_llistxattr: c_long = 12; +pub const SYS_flistxattr: c_long = 13; +pub const SYS_removexattr: c_long = 14; +pub const SYS_lremovexattr: c_long = 15; +pub const SYS_fremovexattr: c_long = 16; +pub const SYS_tkill: c_long = 130; +pub const SYS_futex: c_long = 98; +pub const SYS_sched_setaffinity: c_long = 122; +pub const SYS_sched_getaffinity: c_long = 123; +pub const SYS_io_setup: c_long = 0; +pub const SYS_io_destroy: c_long = 1; +pub const SYS_io_getevents: c_long = 4; +pub const SYS_io_submit: c_long = 2; +pub const SYS_io_cancel: c_long = 3; +pub const SYS_lookup_dcookie: c_long = 18; +pub const SYS_remap_file_pages: c_long = 234; +pub const SYS_getdents64: c_long = 61; +pub const SYS_set_tid_address: c_long = 96; +pub const SYS_restart_syscall: c_long = 128; +pub const SYS_semtimedop: c_long = 192; +pub const SYS_fadvise64: c_long = 223; +pub const SYS_timer_create: c_long = 107; +pub const SYS_timer_settime: c_long = 110; +pub const SYS_timer_gettime: c_long = 108; +pub const SYS_timer_getoverrun: c_long = 109; +pub const SYS_timer_delete: c_long = 111; +pub const SYS_clock_settime: c_long = 112; +pub const SYS_clock_gettime: c_long = 113; +pub const SYS_clock_getres: c_long = 114; +pub const SYS_clock_nanosleep: c_long = 115; +pub const SYS_exit_group: c_long = 94; +pub const SYS_epoll_ctl: c_long = 21; +pub const SYS_tgkill: c_long = 131; +pub const SYS_mbind: c_long = 235; +pub const SYS_set_mempolicy: c_long = 237; +pub const SYS_get_mempolicy: c_long = 236; +pub const SYS_mq_open: c_long = 180; +pub const SYS_mq_unlink: c_long = 181; +pub const SYS_mq_timedsend: c_long = 182; +pub const SYS_mq_timedreceive: c_long = 183; +pub const SYS_mq_notify: c_long = 184; +pub const SYS_mq_getsetattr: c_long = 185; +pub const SYS_kexec_load: c_long = 104; +pub const SYS_waitid: c_long = 95; +pub const SYS_add_key: c_long = 217; +pub const SYS_request_key: c_long = 218; +pub const SYS_keyctl: c_long = 219; +pub const SYS_ioprio_set: c_long = 30; +pub const SYS_ioprio_get: c_long = 31; +pub const SYS_inotify_add_watch: c_long = 27; +pub const SYS_inotify_rm_watch: c_long = 28; +pub const SYS_migrate_pages: c_long = 238; +pub const SYS_openat: c_long = 56; +pub const SYS_mkdirat: c_long = 34; +pub const SYS_mknodat: c_long = 33; +pub const SYS_fchownat: c_long = 54; +pub const SYS_newfstatat: c_long = 79; +pub const SYS_unlinkat: c_long = 35; +pub const SYS_linkat: c_long = 37; +pub const SYS_symlinkat: c_long = 36; +pub const SYS_readlinkat: c_long = 78; +pub const SYS_fchmodat: c_long = 53; +pub const SYS_faccessat: c_long = 48; +pub const SYS_pselect6: c_long = 72; +pub const SYS_ppoll: c_long = 73; +pub const SYS_unshare: c_long = 97; +pub const SYS_set_robust_list: c_long = 99; +pub const SYS_get_robust_list: c_long = 100; +pub const SYS_splice: c_long = 76; +pub const SYS_tee: c_long = 77; +pub const SYS_sync_file_range: c_long = 84; +pub const SYS_vmsplice: c_long = 75; +pub const SYS_move_pages: c_long = 239; +pub const SYS_utimensat: c_long = 88; +pub const SYS_epoll_pwait: c_long = 22; +pub const SYS_timerfd_create: c_long = 85; +pub const SYS_fallocate: c_long = 47; +pub const SYS_timerfd_settime: c_long = 86; +pub const SYS_timerfd_gettime: c_long = 87; +pub const SYS_accept4: c_long = 242; +pub const SYS_signalfd4: c_long = 74; +pub const SYS_eventfd2: c_long = 19; +pub const SYS_epoll_create1: c_long = 20; +pub const SYS_dup3: c_long = 24; +pub const SYS_pipe2: c_long = 59; +pub const SYS_inotify_init1: c_long = 26; +pub const SYS_preadv: c_long = 69; +pub const SYS_pwritev: c_long = 70; +pub const SYS_rt_tgsigqueueinfo: c_long = 240; +pub const SYS_perf_event_open: c_long = 241; +pub const SYS_recvmmsg: c_long = 243; +pub const SYS_fanotify_init: c_long = 262; +pub const SYS_fanotify_mark: c_long = 263; +pub const SYS_prlimit64: c_long = 261; +pub const SYS_name_to_handle_at: c_long = 264; +pub const SYS_open_by_handle_at: c_long = 265; +pub const SYS_clock_adjtime: c_long = 266; +pub const SYS_syncfs: c_long = 267; +pub const SYS_sendmmsg: c_long = 269; +pub const SYS_setns: c_long = 268; +pub const SYS_getcpu: c_long = 168; +pub const SYS_process_vm_readv: c_long = 270; +pub const SYS_process_vm_writev: c_long = 271; +pub const SYS_kcmp: c_long = 272; +pub const SYS_finit_module: c_long = 273; +pub const SYS_sched_setattr: c_long = 274; +pub const SYS_sched_getattr: c_long = 275; +pub const SYS_renameat2: c_long = 276; +pub const SYS_seccomp: c_long = 277; +pub const SYS_getrandom: c_long = 278; +pub const SYS_memfd_create: c_long = 279; +pub const SYS_bpf: c_long = 280; +pub const SYS_execveat: c_long = 281; +pub const SYS_userfaultfd: c_long = 282; +pub const SYS_membarrier: c_long = 283; +pub const SYS_mlock2: c_long = 284; +pub const SYS_copy_file_range: c_long = 285; +pub const SYS_preadv2: c_long = 286; +pub const SYS_pwritev2: c_long = 287; +pub const SYS_pkey_mprotect: c_long = 288; +pub const SYS_pkey_alloc: c_long = 289; +pub const SYS_pkey_free: c_long = 290; +pub const SYS_statx: c_long = 291; +pub const SYS_rseq: c_long = 293; +pub const SYS_syscall: c_long = 294; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/m68k/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/m68k/mod.rs new file mode 100644 index 00000000000000..d614fddeca9d90 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b32/m68k/mod.rs @@ -0,0 +1,863 @@ +use crate::prelude::*; +use crate::{off64_t, off_t}; + +pub type wchar_t = i32; + +s! { + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct statfs { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + f_spare: [crate::__fsword_t; 4], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + } + + pub struct ipc_perm { + __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + __seq: c_ushort, + __pad1: c_ushort, + __glibc_reserved1: c_ulong, + __glibc_reserved2: c_ulong, + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + __pad1: c_ushort, + pub __st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad2: c_ushort, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_ulong, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_ulong, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_ulong, + pub st_ino: crate::ino64_t, + } + + pub struct statfs64 { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: crate::fsblkcnt64_t, + pub f_bfree: crate::fsblkcnt64_t, + pub f_bavail: crate::fsblkcnt64_t, + pub f_files: crate::fsblkcnt64_t, + pub f_ffree: crate::fsblkcnt64_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + pub f_spare: [crate::__fsword_t; 4], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt64_t, + pub f_bfree: crate::fsblkcnt64_t, + pub f_bavail: crate::fsblkcnt64_t, + pub f_files: crate::fsblkcnt64_t, + pub f_ffree: crate::fsblkcnt64_t, + pub f_favail: crate::fsblkcnt64_t, + pub f_fsid: c_ulong, + __f_unused: c_int, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + __glibc_reserved1: c_long, + pub shm_dtime: crate::time_t, + __glibc_reserved2: c_long, + pub shm_ctime: crate::time_t, + __glibc_reserved3: c_long, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __glibc_reserved5: c_ulong, + __glibc_reserved6: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + __glibc_reserved1: c_uint, + pub msg_rtime: crate::time_t, + __glibc_reserved2: c_uint, + pub msg_ctime: crate::time_t, + __glibc_reserved3: c_uint, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __glibc_reserved4: c_ulong, + __glibc_reserved5: c_ulong, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_code: c_int, + pub si_errno: c_int, + _pad: [c_int; 29], + _align: [usize; 0], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } +} + +s_no_extra_traits! { + #[repr(align(2))] + pub struct max_align_t { + priv_: [i8; 20], + } +} + +pub const VEOF: usize = 4; +pub const RTLD_DEEPBIND: c_int = 0x8; +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; +pub const O_DIRECT: c_int = 0x10000; +pub const O_DIRECTORY: c_int = 0x4000; +pub const O_NOFOLLOW: c_int = 0x8000; +pub const O_LARGEFILE: c_int = 0x20000; +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_FSYNC: c_int = 0x101000; +pub const O_ASYNC: c_int = 0x2000; +pub const O_NDELAY: c_int = 0x800; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_ANONYMOUS: c_int = 0x0020; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_SYNC: c_int = 0x080000; + +pub const EDEADLOCK: c_int = 35; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const EHWPOISON: c_int = 133; +pub const ERFKILL: c_int = 132; + +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETOWN: c_int = 8; + +pub const PTRACE_GETFPXREGS: c_uint = 18; +pub const PTRACE_SETFPXREGS: c_uint = 19; +pub const PTRACE_SYSEMU: c_uint = 31; +pub const PTRACE_SYSEMU_SINGLESTEP: c_uint = 32; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const EFD_NONBLOCK: c_int = 0x800; +pub const SFD_NONBLOCK: c_int = 0x0800; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] +pub const SIGUNUSED: c_int = 31; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: crate::tcflag_t = 0x00000800; +pub const TAB2: crate::tcflag_t = 0x00001000; +pub const TAB3: crate::tcflag_t = 0x00001800; +pub const CR1: crate::tcflag_t = 0x00000200; +pub const CR2: crate::tcflag_t = 0x00000400; +pub const CR3: crate::tcflag_t = 0x00000600; +pub const FF1: crate::tcflag_t = 0x00008000; +pub const BS1: crate::tcflag_t = 0x00002000; +pub const VT1: crate::tcflag_t = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; +pub const EXTPROC: crate::tcflag_t = 0x00010000; + +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +pub const SYS_restart_syscall: c_long = 0; +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_waitpid: c_long = 7; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execve: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_time32: c_long = 13; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_chown16: c_long = 16; +pub const SYS_stat: c_long = 18; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_mount: c_long = 21; +pub const SYS_oldumount: c_long = 22; +pub const SYS_setuid16: c_long = 23; +pub const SYS_getuid16: c_long = 24; +pub const SYS_stime32: c_long = 25; +pub const SYS_ptrace: c_long = 26; +pub const SYS_alarm: c_long = 27; +pub const SYS_fstat: c_long = 28; +pub const SYS_pause: c_long = 29; +pub const SYS_utime32: c_long = 30; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_rename: c_long = 38; +pub const SYS_mkdir: c_long = 39; +pub const SYS_rmdir: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_brk: c_long = 45; +pub const SYS_setgid16: c_long = 46; +pub const SYS_getgid16: c_long = 47; +pub const SYS_signal: c_long = 48; +pub const SYS_geteuid16: c_long = 49; +pub const SYS_getegid16: c_long = 50; +pub const SYS_acct: c_long = 51; +pub const SYS_umount: c_long = 52; +pub const SYS_ioctl: c_long = 54; +pub const SYS_fcntl: c_long = 55; +pub const SYS_setpgid: c_long = 57; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_ustat: c_long = 62; +pub const SYS_dup2: c_long = 63; +pub const SYS_getppid: c_long = 64; +pub const SYS_getpgrp: c_long = 65; +pub const SYS_setsid: c_long = 66; +pub const SYS_sigaction: c_long = 67; +pub const SYS_sgetmask: c_long = 68; +pub const SYS_ssetmask: c_long = 69; +pub const SYS_setreuid16: c_long = 70; +pub const SYS_setregid16: c_long = 71; +pub const SYS_sigsuspend: c_long = 72; +pub const SYS_sigpending: c_long = 73; +pub const SYS_sethostname: c_long = 74; +pub const SYS_setrlimit: c_long = 75; +pub const SYS_old_getrlimit: c_long = 76; +pub const SYS_getrusage: c_long = 77; +pub const SYS_gettimeofday: c_long = 78; +pub const SYS_settimeofday: c_long = 79; +pub const SYS_getgroups16: c_long = 80; +pub const SYS_setgroups16: c_long = 81; +pub const SYS_old_select: c_long = 82; +pub const SYS_symlink: c_long = 83; +pub const SYS_lstat: c_long = 84; +pub const SYS_readlink: c_long = 85; +pub const SYS_uselib: c_long = 86; +pub const SYS_swapon: c_long = 87; +pub const SYS_reboot: c_long = 88; +pub const SYS_old_readdir: c_long = 89; +pub const SYS_old_mmap: c_long = 90; +pub const SYS_munmap: c_long = 91; +pub const SYS_truncate: c_long = 92; +pub const SYS_ftruncate: c_long = 93; +pub const SYS_fchmod: c_long = 94; +pub const SYS_fchown16: c_long = 95; +pub const SYS_getpriority: c_long = 96; +pub const SYS_setpriority: c_long = 97; +pub const SYS_statfs: c_long = 99; +pub const SYS_fstatfs: c_long = 100; +pub const SYS_socketcall: c_long = 102; +pub const SYS_syslog: c_long = 103; +pub const SYS_setitimer: c_long = 104; +pub const SYS_getitimer: c_long = 105; +pub const SYS_newstat: c_long = 106; +pub const SYS_newlstat: c_long = 107; +pub const SYS_newfstat: c_long = 108; +pub const SYS_vhangup: c_long = 111; +pub const SYS_wait4: c_long = 114; +pub const SYS_swapoff: c_long = 115; +pub const SYS_sysinfo: c_long = 116; +pub const SYS_ipc: c_long = 117; +pub const SYS_fsync: c_long = 118; +pub const SYS_sigreturn: c_long = 119; +pub const SYS_clone: c_long = 120; +pub const SYS_setdomainname: c_long = 121; +pub const SYS_newuname: c_long = 122; +pub const SYS_cacheflush: c_long = 123; +pub const SYS_adjtimex_time32: c_long = 124; +pub const SYS_mprotect: c_long = 125; +pub const SYS_sigprocmask: c_long = 126; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 127; +pub const SYS_init_module: c_long = 128; +pub const SYS_delete_module: c_long = 129; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 130; +pub const SYS_quotactl: c_long = 131; +pub const SYS_getpgid: c_long = 132; +pub const SYS_fchdir: c_long = 133; +pub const SYS_bdflush: c_long = 134; +pub const SYS_sysfs: c_long = 135; +pub const SYS_personality: c_long = 136; +pub const SYS_setfsuid16: c_long = 138; +pub const SYS_setfsgid16: c_long = 139; +pub const SYS_llseek: c_long = 140; +pub const SYS_getdents: c_long = 141; +pub const SYS_select: c_long = 142; +pub const SYS_flock: c_long = 143; +pub const SYS_msync: c_long = 144; +pub const SYS_readv: c_long = 145; +pub const SYS_writev: c_long = 146; +pub const SYS_getsid: c_long = 147; +pub const SYS_fdatasync: c_long = 148; +pub const SYS__sysctl: c_long = 149; +pub const SYS_mlock: c_long = 150; +pub const SYS_munlock: c_long = 151; +pub const SYS_mlockall: c_long = 152; +pub const SYS_munlockall: c_long = 153; +pub const SYS_sched_setparam: c_long = 154; +pub const SYS_sched_getparam: c_long = 155; +pub const SYS_sched_setscheduler: c_long = 156; +pub const SYS_sched_getscheduler: c_long = 157; +pub const SYS_sched_yield: c_long = 158; +pub const SYS_sched_get_priority_max: c_long = 159; +pub const SYS_sched_get_priority_min: c_long = 160; +pub const SYS_sched_rr_get_interval_time32: c_long = 161; +pub const SYS_nanosleep_time32: c_long = 162; +pub const SYS_mremap: c_long = 163; +pub const SYS_setresuid16: c_long = 164; +pub const SYS_getresuid16: c_long = 165; +pub const SYS_getpagesize: c_long = 166; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 167; +pub const SYS_poll: c_long = 168; +pub const SYS_nfsservctl: c_long = 169; +pub const SYS_setresgid16: c_long = 170; +pub const SYS_getresgid16: c_long = 171; +pub const SYS_prctl: c_long = 172; +pub const SYS_rt_sigreturn: c_long = 173; +pub const SYS_rt_sigaction: c_long = 174; +pub const SYS_rt_sigprocmask: c_long = 175; +pub const SYS_rt_sigpending: c_long = 176; +pub const SYS_rt_sigtimedwait_time32: c_long = 177; +pub const SYS_rt_sigqueueinfo: c_long = 178; +pub const SYS_rt_sigsuspend: c_long = 179; +pub const SYS_pread64: c_long = 180; +pub const SYS_pwrite64: c_long = 181; +pub const SYS_lchown16: c_long = 182; +pub const SYS_getcwd: c_long = 183; +pub const SYS_capget: c_long = 184; +pub const SYS_capset: c_long = 185; +pub const SYS_sigaltstack: c_long = 186; +pub const SYS_sendfile: c_long = 187; +pub const SYS_getpmsg: c_long = 188; +pub const SYS_putpmsg: c_long = 189; +pub const SYS_vfork: c_long = 190; +pub const SYS_getrlimit: c_long = 191; +pub const SYS_mmap2: c_long = 192; +pub const SYS_truncate64: c_long = 193; +pub const SYS_ftruncate64: c_long = 194; +pub const SYS_stat64: c_long = 195; +pub const SYS_lstat64: c_long = 196; +pub const SYS_fstat64: c_long = 197; +pub const SYS_chown: c_long = 198; +pub const SYS_getuid: c_long = 199; +pub const SYS_getgid: c_long = 200; +pub const SYS_geteuid: c_long = 201; +pub const SYS_getegid: c_long = 202; +pub const SYS_setreuid: c_long = 203; +pub const SYS_setregid: c_long = 204; +pub const SYS_getgroups: c_long = 205; +pub const SYS_setgroups: c_long = 206; +pub const SYS_fchown: c_long = 207; +pub const SYS_setresuid: c_long = 208; +pub const SYS_getresuid: c_long = 209; +pub const SYS_setresgid: c_long = 210; +pub const SYS_getresgid: c_long = 211; +pub const SYS_lchown: c_long = 212; +pub const SYS_setuid: c_long = 213; +pub const SYS_setgid: c_long = 214; +pub const SYS_setfsuid: c_long = 215; +pub const SYS_setfsgid: c_long = 216; +pub const SYS_pivot_root: c_long = 217; +pub const SYS_getdents64: c_long = 220; +pub const SYS_gettid: c_long = 221; +pub const SYS_tkill: c_long = 222; +pub const SYS_setxattr: c_long = 223; +pub const SYS_lsetxattr: c_long = 224; +pub const SYS_fsetxattr: c_long = 225; +pub const SYS_getxattr: c_long = 226; +pub const SYS_lgetxattr: c_long = 227; +pub const SYS_fgetxattr: c_long = 228; +pub const SYS_listxattr: c_long = 229; +pub const SYS_llistxattr: c_long = 230; +pub const SYS_flistxattr: c_long = 231; +pub const SYS_removexattr: c_long = 232; +pub const SYS_lremovexattr: c_long = 233; +pub const SYS_fremovexattr: c_long = 234; +pub const SYS_futex_time32: c_long = 235; +pub const SYS_sendfile64: c_long = 236; +pub const SYS_mincore: c_long = 237; +pub const SYS_madvise: c_long = 238; +pub const SYS_fcntl64: c_long = 239; +pub const SYS_readahead: c_long = 240; +pub const SYS_io_setup: c_long = 241; +pub const SYS_io_destroy: c_long = 242; +pub const SYS_io_getevents_time32: c_long = 243; +pub const SYS_io_submit: c_long = 244; +pub const SYS_io_cancel: c_long = 245; +pub const SYS_fadvise64: c_long = 246; +pub const SYS_exit_group: c_long = 247; +pub const SYS_lookup_dcookie: c_long = 248; +pub const SYS_epoll_create: c_long = 249; +pub const SYS_epoll_ctl: c_long = 250; +pub const SYS_epoll_wait: c_long = 251; +pub const SYS_remap_file_pages: c_long = 252; +pub const SYS_set_tid_address: c_long = 253; +pub const SYS_timer_create: c_long = 254; +pub const SYS_timer_settime32: c_long = 255; +pub const SYS_timer_gettime32: c_long = 256; +pub const SYS_timer_getoverrun: c_long = 257; +pub const SYS_timer_delete: c_long = 258; +pub const SYS_clock_settime32: c_long = 259; +pub const SYS_clock_gettime32: c_long = 260; +pub const SYS_clock_getres_time32: c_long = 261; +pub const SYS_clock_nanosleep_time32: c_long = 262; +pub const SYS_statfs64: c_long = 263; +pub const SYS_fstatfs64: c_long = 264; +pub const SYS_tgkill: c_long = 265; +pub const SYS_utimes_time32: c_long = 266; +pub const SYS_fadvise64_64: c_long = 267; +pub const SYS_mbind: c_long = 268; +pub const SYS_get_mempolicy: c_long = 269; +pub const SYS_set_mempolicy: c_long = 270; +pub const SYS_mq_open: c_long = 271; +pub const SYS_mq_unlink: c_long = 272; +pub const SYS_mq_timedsend_time32: c_long = 273; +pub const SYS_mq_timedreceive_time32: c_long = 274; +pub const SYS_mq_notify: c_long = 275; +pub const SYS_mq_getsetattr: c_long = 276; +pub const SYS_waitid: c_long = 277; +pub const SYS_add_key: c_long = 279; +pub const SYS_request_key: c_long = 280; +pub const SYS_keyctl: c_long = 281; +pub const SYS_ioprio_set: c_long = 282; +pub const SYS_ioprio_get: c_long = 283; +pub const SYS_inotify_init: c_long = 284; +pub const SYS_inotify_add_watch: c_long = 285; +pub const SYS_inotify_rm_watch: c_long = 286; +pub const SYS_migrate_pages: c_long = 287; +pub const SYS_openat: c_long = 288; +pub const SYS_mkdirat: c_long = 289; +pub const SYS_mknodat: c_long = 290; +pub const SYS_fchownat: c_long = 291; +pub const SYS_futimesat_time32: c_long = 292; +pub const SYS_fstatat64: c_long = 293; +pub const SYS_unlinkat: c_long = 294; +pub const SYS_renameat: c_long = 295; +pub const SYS_linkat: c_long = 296; +pub const SYS_symlinkat: c_long = 297; +pub const SYS_readlinkat: c_long = 298; +pub const SYS_fchmodat: c_long = 299; +pub const SYS_faccessat: c_long = 300; +pub const SYS_pselect6_time32: c_long = 301; +pub const SYS_ppoll_time32: c_long = 302; +pub const SYS_unshare: c_long = 303; +pub const SYS_set_robust_list: c_long = 304; +pub const SYS_get_robust_list: c_long = 305; +pub const SYS_splice: c_long = 306; +pub const SYS_sync_file_range: c_long = 307; +pub const SYS_tee: c_long = 308; +pub const SYS_vmsplice: c_long = 309; +pub const SYS_move_pages: c_long = 310; +pub const SYS_sched_setaffinity: c_long = 311; +pub const SYS_sched_getaffinity: c_long = 312; +pub const SYS_kexec_load: c_long = 313; +pub const SYS_getcpu: c_long = 314; +pub const SYS_epoll_pwait: c_long = 315; +pub const SYS_utimensat_time32: c_long = 316; +pub const SYS_signalfd: c_long = 317; +pub const SYS_timerfd_create: c_long = 318; +pub const SYS_eventfd: c_long = 319; +pub const SYS_fallocate: c_long = 320; +pub const SYS_timerfd_settime32: c_long = 321; +pub const SYS_timerfd_gettime32: c_long = 322; +pub const SYS_signalfd4: c_long = 323; +pub const SYS_eventfd2: c_long = 324; +pub const SYS_epoll_create1: c_long = 325; +pub const SYS_dup3: c_long = 326; +pub const SYS_pipe2: c_long = 327; +pub const SYS_inotify_init1: c_long = 328; +pub const SYS_preadv: c_long = 329; +pub const SYS_pwritev: c_long = 330; +pub const SYS_rt_tgsigqueueinfo: c_long = 331; +pub const SYS_perf_event_open: c_long = 332; +pub const SYS_get_thread_area: c_long = 333; +pub const SYS_set_thread_area: c_long = 334; +pub const SYS_atomic_cmpxchg_32: c_long = 335; +pub const SYS_atomic_barrier: c_long = 336; +pub const SYS_fanotify_init: c_long = 337; +pub const SYS_fanotify_mark: c_long = 338; +pub const SYS_prlimit64: c_long = 339; +pub const SYS_name_to_handle_at: c_long = 340; +pub const SYS_open_by_handle_at: c_long = 341; +pub const SYS_clock_adjtime32: c_long = 342; +pub const SYS_syncfs: c_long = 343; +pub const SYS_setns: c_long = 344; +pub const SYS_process_vm_readv: c_long = 345; +pub const SYS_process_vm_writev: c_long = 346; +pub const SYS_kcmp: c_long = 347; +pub const SYS_finit_module: c_long = 348; +pub const SYS_sched_setattr: c_long = 349; +pub const SYS_sched_getattr: c_long = 350; +pub const SYS_renameat2: c_long = 351; +pub const SYS_getrandom: c_long = 352; +pub const SYS_memfd_create: c_long = 353; +pub const SYS_bpf: c_long = 354; +pub const SYS_execveat: c_long = 355; +pub const SYS_socket: c_long = 356; +pub const SYS_socketpair: c_long = 357; +pub const SYS_bind: c_long = 358; +pub const SYS_connect: c_long = 359; +pub const SYS_listen: c_long = 360; +pub const SYS_accept4: c_long = 361; +pub const SYS_getsockopt: c_long = 362; +pub const SYS_setsockopt: c_long = 363; +pub const SYS_getsockname: c_long = 364; +pub const SYS_getpeername: c_long = 365; +pub const SYS_sendto: c_long = 366; +pub const SYS_sendmsg: c_long = 367; +pub const SYS_recvfrom: c_long = 368; +pub const SYS_recvmsg: c_long = 369; +pub const SYS_shutdown: c_long = 370; +pub const SYS_recvmmsg_time32: c_long = 371; +pub const SYS_sendmmsg: c_long = 372; +pub const SYS_userfaultfd: c_long = 373; +pub const SYS_membarrier: c_long = 374; +pub const SYS_mlock2: c_long = 375; +pub const SYS_copy_file_range: c_long = 376; +pub const SYS_preadv2: c_long = 377; +pub const SYS_pwritev2: c_long = 378; +pub const SYS_statx: c_long = 379; +pub const SYS_seccomp: c_long = 380; +pub const SYS_pkey_mprotect: c_long = 381; +pub const SYS_pkey_alloc: c_long = 382; +pub const SYS_pkey_free: c_long = 383; +pub const SYS_rseq: c_long = 384; +pub const SYS_semget: c_long = 393; +pub const SYS_semctl: c_long = 394; +pub const SYS_shmget: c_long = 395; +pub const SYS_shmctl: c_long = 396; +pub const SYS_shmat: c_long = 397; +pub const SYS_shmdt: c_long = 398; +pub const SYS_msgget: c_long = 399; +pub const SYS_msgsnd: c_long = 400; +pub const SYS_msgrcv: c_long = 401; +pub const SYS_msgctl: c_long = 402; +pub const SYS_clock_gettime: c_long = 403; +pub const SYS_clock_settime: c_long = 404; +pub const SYS_clock_adjtime: c_long = 405; +pub const SYS_clock_getres: c_long = 406; +pub const SYS_clock_nanosleep: c_long = 407; +pub const SYS_timer_gettime: c_long = 408; +pub const SYS_timer_settime: c_long = 409; +pub const SYS_timerfd_gettime: c_long = 410; +pub const SYS_timerfd_settime: c_long = 411; +pub const SYS_utimensat: c_long = 412; +pub const SYS_pselect6: c_long = 413; +pub const SYS_ppoll: c_long = 414; +pub const SYS_io_pgetevents: c_long = 416; +pub const SYS_recvmmsg: c_long = 417; +pub const SYS_mq_timedsend: c_long = 418; +pub const SYS_mq_timedreceive: c_long = 419; +pub const SYS_semtimedop: c_long = 420; +pub const SYS_rt_sigtimedwait: c_long = 421; +pub const SYS_futex: c_long = 422; +pub const SYS_sched_rr_get_interval: c_long = 423; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/mips/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/mips/mod.rs new file mode 100644 index 00000000000000..3d2775cd800ae7 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b32/mips/mod.rs @@ -0,0 +1,925 @@ +use crate::prelude::*; +use crate::{off64_t, off_t}; + +pub type wchar_t = i32; + +s! { + pub struct stat { + #[cfg(not(gnu_time_bits64))] + pub st_dev: c_ulong, + #[cfg(gnu_time_bits64)] + pub st_dev: crate::dev_t, + + #[cfg(not(gnu_time_bits64))] + st_pad1: [c_long; 3], + + pub st_ino: crate::ino_t, + + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + + #[cfg(not(gnu_time_bits64))] + pub st_rdev: c_ulong, + #[cfg(gnu_time_bits64)] + pub st_rdev: crate::dev_t, + + #[cfg(not(gnu_file_offset_bits64))] + st_pad2: [c_long; 2], + #[cfg(all(not(gnu_time_bits64), gnu_file_offset_bits64))] + st_pad2: [c_long; 3], + + pub st_size: off_t, + + #[cfg(not(gnu_file_offset_bits64))] + st_pad3: c_long, + + #[cfg(gnu_time_bits64)] + pub st_blksize: crate::blksize_t, + #[cfg(gnu_time_bits64)] + pub st_blocks: crate::blkcnt_t, + + pub st_atime: crate::time_t, + #[cfg(gnu_time_bits64)] + _atime_pad: c_int, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + #[cfg(gnu_time_bits64)] + _mtime_pad: c_int, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + #[cfg(gnu_time_bits64)] + _ctime_pad: c_int, + pub st_ctime_nsec: c_long, + + #[cfg(not(gnu_time_bits64))] + pub st_blksize: crate::blksize_t, + #[cfg(all(not(gnu_time_bits64), gnu_file_offset_bits64))] + st_pad4: c_long, + #[cfg(not(gnu_time_bits64))] + pub st_blocks: crate::blkcnt_t, + #[cfg(not(gnu_time_bits64))] + st_pad5: [c_long; 14], + } + + pub struct stat64 { + #[cfg(not(gnu_time_bits64))] + pub st_dev: c_ulong, + #[cfg(gnu_time_bits64)] + pub st_dev: crate::dev_t, + + #[cfg(not(gnu_time_bits64))] + st_pad1: [c_long; 3], + + pub st_ino: crate::ino64_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + + #[cfg(not(gnu_time_bits64))] + pub st_rdev: c_ulong, + #[cfg(gnu_time_bits64)] + pub st_rdev: crate::dev_t, + + #[cfg(not(gnu_time_bits64))] + st_pad2: [c_long; 3], + + pub st_size: off64_t, + + #[cfg(gnu_time_bits64)] + pub st_blksize: crate::blksize_t, + #[cfg(gnu_time_bits64)] + pub st_blocks: crate::blkcnt_t, + + pub st_atime: crate::time_t, + #[cfg(gnu_time_bits64)] + _atime_pad: c_int, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + #[cfg(gnu_time_bits64)] + _mtime_pad: c_int, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + #[cfg(gnu_time_bits64)] + _ctime_pad: c_int, + pub st_ctime_nsec: c_long, + + #[cfg(not(gnu_time_bits64))] + pub st_blksize: crate::blksize_t, + #[cfg(not(gnu_time_bits64))] + st_pad3: c_long, + #[cfg(not(gnu_time_bits64))] + pub st_blocks: crate::blkcnt64_t, + #[cfg(not(gnu_time_bits64))] + st_pad5: [c_long; 14], + } + + pub struct statfs { + pub f_type: c_long, + pub f_bsize: c_long, + pub f_frsize: c_long, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_files: crate::fsblkcnt_t, + pub f_ffree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_fsid: crate::fsid_t, + + pub f_namelen: c_long, + pub f_flags: c_long, + f_spare: [c_long; 5], + } + + pub struct statfs64 { + pub f_type: c_long, + pub f_bsize: c_long, + pub f_frsize: c_long, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_bavail: u64, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_long, + pub f_flags: c_long, + pub f_spare: [c_long; 5], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + __f_unused: c_int, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_flags: c_int, + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_restorer: Option, + _resv: [c_int; 1], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_code: c_int, + pub si_errno: c_int, + pub _pad: [c_int; 29], + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_uint, + pub __seq: c_ushort, + __pad1: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused4: c_ulong, + __unused5: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + #[cfg(all(not(gnu_time_bits64), target_endian = "big"))] + __glibc_reserved1: c_ulong, + pub msg_stime: crate::time_t, + #[cfg(all(not(gnu_time_bits64), target_endian = "little"))] + __glibc_reserved1: c_ulong, + #[cfg(all(not(gnu_time_bits64), target_endian = "big"))] + __glibc_reserved2: c_ulong, + pub msg_rtime: crate::time_t, + #[cfg(all(not(gnu_time_bits64), target_endian = "little"))] + __glibc_reserved2: c_ulong, + #[cfg(all(not(gnu_time_bits64), target_endian = "big"))] + __glibc_reserved3: c_ulong, + pub msg_ctime: crate::time_t, + #[cfg(target_endian = "little")] + __glibc_reserved3: c_ulong, + __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __glibc_reserved4: c_ulong, + __glibc_reserved5: c_ulong, + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + #[cfg(not(gnu_file_offset_bits64))] + pub l_sysid: c_long, + pub l_pid: crate::pid_t, + #[cfg(not(gnu_file_offset_bits64))] + __glibc_reserved0: [c_long; 4], + } +} + +s_no_extra_traits! { + #[repr(align(8))] + pub struct max_align_t { + priv_: [f32; 4], + } +} + +pub const O_LARGEFILE: c_int = 0x2000; + +pub const SYS_syscall: c_long = 4000 + 0; +pub const SYS_exit: c_long = 4000 + 1; +pub const SYS_fork: c_long = 4000 + 2; +pub const SYS_read: c_long = 4000 + 3; +pub const SYS_write: c_long = 4000 + 4; +pub const SYS_open: c_long = 4000 + 5; +pub const SYS_close: c_long = 4000 + 6; +pub const SYS_waitpid: c_long = 4000 + 7; +pub const SYS_creat: c_long = 4000 + 8; +pub const SYS_link: c_long = 4000 + 9; +pub const SYS_unlink: c_long = 4000 + 10; +pub const SYS_execve: c_long = 4000 + 11; +pub const SYS_chdir: c_long = 4000 + 12; +pub const SYS_time: c_long = 4000 + 13; +pub const SYS_mknod: c_long = 4000 + 14; +pub const SYS_chmod: c_long = 4000 + 15; +pub const SYS_lchown: c_long = 4000 + 16; +pub const SYS_break: c_long = 4000 + 17; +pub const SYS_lseek: c_long = 4000 + 19; +pub const SYS_getpid: c_long = 4000 + 20; +pub const SYS_mount: c_long = 4000 + 21; +pub const SYS_umount: c_long = 4000 + 22; +pub const SYS_setuid: c_long = 4000 + 23; +pub const SYS_getuid: c_long = 4000 + 24; +pub const SYS_stime: c_long = 4000 + 25; +pub const SYS_ptrace: c_long = 4000 + 26; +pub const SYS_alarm: c_long = 4000 + 27; +pub const SYS_pause: c_long = 4000 + 29; +pub const SYS_utime: c_long = 4000 + 30; +pub const SYS_stty: c_long = 4000 + 31; +pub const SYS_gtty: c_long = 4000 + 32; +pub const SYS_access: c_long = 4000 + 33; +pub const SYS_nice: c_long = 4000 + 34; +pub const SYS_ftime: c_long = 4000 + 35; +pub const SYS_sync: c_long = 4000 + 36; +pub const SYS_kill: c_long = 4000 + 37; +pub const SYS_rename: c_long = 4000 + 38; +pub const SYS_mkdir: c_long = 4000 + 39; +pub const SYS_rmdir: c_long = 4000 + 40; +pub const SYS_dup: c_long = 4000 + 41; +pub const SYS_pipe: c_long = 4000 + 42; +pub const SYS_times: c_long = 4000 + 43; +pub const SYS_prof: c_long = 4000 + 44; +pub const SYS_brk: c_long = 4000 + 45; +pub const SYS_setgid: c_long = 4000 + 46; +pub const SYS_getgid: c_long = 4000 + 47; +pub const SYS_signal: c_long = 4000 + 48; +pub const SYS_geteuid: c_long = 4000 + 49; +pub const SYS_getegid: c_long = 4000 + 50; +pub const SYS_acct: c_long = 4000 + 51; +pub const SYS_umount2: c_long = 4000 + 52; +pub const SYS_lock: c_long = 4000 + 53; +pub const SYS_ioctl: c_long = 4000 + 54; +pub const SYS_fcntl: c_long = 4000 + 55; +pub const SYS_mpx: c_long = 4000 + 56; +pub const SYS_setpgid: c_long = 4000 + 57; +pub const SYS_ulimit: c_long = 4000 + 58; +pub const SYS_umask: c_long = 4000 + 60; +pub const SYS_chroot: c_long = 4000 + 61; +pub const SYS_ustat: c_long = 4000 + 62; +pub const SYS_dup2: c_long = 4000 + 63; +pub const SYS_getppid: c_long = 4000 + 64; +pub const SYS_getpgrp: c_long = 4000 + 65; +pub const SYS_setsid: c_long = 4000 + 66; +pub const SYS_sigaction: c_long = 4000 + 67; +pub const SYS_sgetmask: c_long = 4000 + 68; +pub const SYS_ssetmask: c_long = 4000 + 69; +pub const SYS_setreuid: c_long = 4000 + 70; +pub const SYS_setregid: c_long = 4000 + 71; +pub const SYS_sigsuspend: c_long = 4000 + 72; +pub const SYS_sigpending: c_long = 4000 + 73; +pub const SYS_sethostname: c_long = 4000 + 74; +pub const SYS_setrlimit: c_long = 4000 + 75; +pub const SYS_getrlimit: c_long = 4000 + 76; +pub const SYS_getrusage: c_long = 4000 + 77; +pub const SYS_gettimeofday: c_long = 4000 + 78; +pub const SYS_settimeofday: c_long = 4000 + 79; +pub const SYS_getgroups: c_long = 4000 + 80; +pub const SYS_setgroups: c_long = 4000 + 81; +pub const SYS_symlink: c_long = 4000 + 83; +pub const SYS_readlink: c_long = 4000 + 85; +pub const SYS_uselib: c_long = 4000 + 86; +pub const SYS_swapon: c_long = 4000 + 87; +pub const SYS_reboot: c_long = 4000 + 88; +pub const SYS_readdir: c_long = 4000 + 89; +pub const SYS_mmap: c_long = 4000 + 90; +pub const SYS_munmap: c_long = 4000 + 91; +pub const SYS_truncate: c_long = 4000 + 92; +pub const SYS_ftruncate: c_long = 4000 + 93; +pub const SYS_fchmod: c_long = 4000 + 94; +pub const SYS_fchown: c_long = 4000 + 95; +pub const SYS_getpriority: c_long = 4000 + 96; +pub const SYS_setpriority: c_long = 4000 + 97; +pub const SYS_profil: c_long = 4000 + 98; +pub const SYS_statfs: c_long = 4000 + 99; +pub const SYS_fstatfs: c_long = 4000 + 100; +pub const SYS_ioperm: c_long = 4000 + 101; +pub const SYS_socketcall: c_long = 4000 + 102; +pub const SYS_syslog: c_long = 4000 + 103; +pub const SYS_setitimer: c_long = 4000 + 104; +pub const SYS_getitimer: c_long = 4000 + 105; +pub const SYS_stat: c_long = 4000 + 106; +pub const SYS_lstat: c_long = 4000 + 107; +pub const SYS_fstat: c_long = 4000 + 108; +pub const SYS_iopl: c_long = 4000 + 110; +pub const SYS_vhangup: c_long = 4000 + 111; +pub const SYS_idle: c_long = 4000 + 112; +pub const SYS_vm86: c_long = 4000 + 113; +pub const SYS_wait4: c_long = 4000 + 114; +pub const SYS_swapoff: c_long = 4000 + 115; +pub const SYS_sysinfo: c_long = 4000 + 116; +pub const SYS_ipc: c_long = 4000 + 117; +pub const SYS_fsync: c_long = 4000 + 118; +pub const SYS_sigreturn: c_long = 4000 + 119; +pub const SYS_clone: c_long = 4000 + 120; +pub const SYS_setdomainname: c_long = 4000 + 121; +pub const SYS_uname: c_long = 4000 + 122; +pub const SYS_modify_ldt: c_long = 4000 + 123; +pub const SYS_adjtimex: c_long = 4000 + 124; +pub const SYS_mprotect: c_long = 4000 + 125; +pub const SYS_sigprocmask: c_long = 4000 + 126; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 4000 + 127; +pub const SYS_init_module: c_long = 4000 + 128; +pub const SYS_delete_module: c_long = 4000 + 129; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 4000 + 130; +pub const SYS_quotactl: c_long = 4000 + 131; +pub const SYS_getpgid: c_long = 4000 + 132; +pub const SYS_fchdir: c_long = 4000 + 133; +pub const SYS_bdflush: c_long = 4000 + 134; +pub const SYS_sysfs: c_long = 4000 + 135; +pub const SYS_personality: c_long = 4000 + 136; +pub const SYS_afs_syscall: c_long = 4000 + 137; +pub const SYS_setfsuid: c_long = 4000 + 138; +pub const SYS_setfsgid: c_long = 4000 + 139; +pub const SYS__llseek: c_long = 4000 + 140; +pub const SYS_getdents: c_long = 4000 + 141; +pub const SYS__newselect: c_long = 4000 + 142; +pub const SYS_flock: c_long = 4000 + 143; +pub const SYS_msync: c_long = 4000 + 144; +pub const SYS_readv: c_long = 4000 + 145; +pub const SYS_writev: c_long = 4000 + 146; +pub const SYS_cacheflush: c_long = 4000 + 147; +pub const SYS_cachectl: c_long = 4000 + 148; +pub const SYS_sysmips: c_long = 4000 + 149; +pub const SYS_getsid: c_long = 4000 + 151; +pub const SYS_fdatasync: c_long = 4000 + 152; +pub const SYS__sysctl: c_long = 4000 + 153; +pub const SYS_mlock: c_long = 4000 + 154; +pub const SYS_munlock: c_long = 4000 + 155; +pub const SYS_mlockall: c_long = 4000 + 156; +pub const SYS_munlockall: c_long = 4000 + 157; +pub const SYS_sched_setparam: c_long = 4000 + 158; +pub const SYS_sched_getparam: c_long = 4000 + 159; +pub const SYS_sched_setscheduler: c_long = 4000 + 160; +pub const SYS_sched_getscheduler: c_long = 4000 + 161; +pub const SYS_sched_yield: c_long = 4000 + 162; +pub const SYS_sched_get_priority_max: c_long = 4000 + 163; +pub const SYS_sched_get_priority_min: c_long = 4000 + 164; +pub const SYS_sched_rr_get_interval: c_long = 4000 + 165; +pub const SYS_nanosleep: c_long = 4000 + 166; +pub const SYS_mremap: c_long = 4000 + 167; +pub const SYS_accept: c_long = 4000 + 168; +pub const SYS_bind: c_long = 4000 + 169; +pub const SYS_connect: c_long = 4000 + 170; +pub const SYS_getpeername: c_long = 4000 + 171; +pub const SYS_getsockname: c_long = 4000 + 172; +pub const SYS_getsockopt: c_long = 4000 + 173; +pub const SYS_listen: c_long = 4000 + 174; +pub const SYS_recv: c_long = 4000 + 175; +pub const SYS_recvfrom: c_long = 4000 + 176; +pub const SYS_recvmsg: c_long = 4000 + 177; +pub const SYS_send: c_long = 4000 + 178; +pub const SYS_sendmsg: c_long = 4000 + 179; +pub const SYS_sendto: c_long = 4000 + 180; +pub const SYS_setsockopt: c_long = 4000 + 181; +pub const SYS_shutdown: c_long = 4000 + 182; +pub const SYS_socket: c_long = 4000 + 183; +pub const SYS_socketpair: c_long = 4000 + 184; +pub const SYS_setresuid: c_long = 4000 + 185; +pub const SYS_getresuid: c_long = 4000 + 186; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 4000 + 187; +pub const SYS_poll: c_long = 4000 + 188; +pub const SYS_nfsservctl: c_long = 4000 + 189; +pub const SYS_setresgid: c_long = 4000 + 190; +pub const SYS_getresgid: c_long = 4000 + 191; +pub const SYS_prctl: c_long = 4000 + 192; +pub const SYS_rt_sigreturn: c_long = 4000 + 193; +pub const SYS_rt_sigaction: c_long = 4000 + 194; +pub const SYS_rt_sigprocmask: c_long = 4000 + 195; +pub const SYS_rt_sigpending: c_long = 4000 + 196; +pub const SYS_rt_sigtimedwait: c_long = 4000 + 197; +pub const SYS_rt_sigqueueinfo: c_long = 4000 + 198; +pub const SYS_rt_sigsuspend: c_long = 4000 + 199; +pub const SYS_pread64: c_long = 4000 + 200; +pub const SYS_pwrite64: c_long = 4000 + 201; +pub const SYS_chown: c_long = 4000 + 202; +pub const SYS_getcwd: c_long = 4000 + 203; +pub const SYS_capget: c_long = 4000 + 204; +pub const SYS_capset: c_long = 4000 + 205; +pub const SYS_sigaltstack: c_long = 4000 + 206; +pub const SYS_sendfile: c_long = 4000 + 207; +pub const SYS_getpmsg: c_long = 4000 + 208; +pub const SYS_putpmsg: c_long = 4000 + 209; +pub const SYS_mmap2: c_long = 4000 + 210; +pub const SYS_truncate64: c_long = 4000 + 211; +pub const SYS_ftruncate64: c_long = 4000 + 212; +pub const SYS_stat64: c_long = 4000 + 213; +pub const SYS_lstat64: c_long = 4000 + 214; +pub const SYS_fstat64: c_long = 4000 + 215; +pub const SYS_pivot_root: c_long = 4000 + 216; +pub const SYS_mincore: c_long = 4000 + 217; +pub const SYS_madvise: c_long = 4000 + 218; +pub const SYS_getdents64: c_long = 4000 + 219; +pub const SYS_fcntl64: c_long = 4000 + 220; +pub const SYS_gettid: c_long = 4000 + 222; +pub const SYS_readahead: c_long = 4000 + 223; +pub const SYS_setxattr: c_long = 4000 + 224; +pub const SYS_lsetxattr: c_long = 4000 + 225; +pub const SYS_fsetxattr: c_long = 4000 + 226; +pub const SYS_getxattr: c_long = 4000 + 227; +pub const SYS_lgetxattr: c_long = 4000 + 228; +pub const SYS_fgetxattr: c_long = 4000 + 229; +pub const SYS_listxattr: c_long = 4000 + 230; +pub const SYS_llistxattr: c_long = 4000 + 231; +pub const SYS_flistxattr: c_long = 4000 + 232; +pub const SYS_removexattr: c_long = 4000 + 233; +pub const SYS_lremovexattr: c_long = 4000 + 234; +pub const SYS_fremovexattr: c_long = 4000 + 235; +pub const SYS_tkill: c_long = 4000 + 236; +pub const SYS_sendfile64: c_long = 4000 + 237; +pub const SYS_futex: c_long = 4000 + 238; +pub const SYS_sched_setaffinity: c_long = 4000 + 239; +pub const SYS_sched_getaffinity: c_long = 4000 + 240; +pub const SYS_io_setup: c_long = 4000 + 241; +pub const SYS_io_destroy: c_long = 4000 + 242; +pub const SYS_io_getevents: c_long = 4000 + 243; +pub const SYS_io_submit: c_long = 4000 + 244; +pub const SYS_io_cancel: c_long = 4000 + 245; +pub const SYS_exit_group: c_long = 4000 + 246; +pub const SYS_lookup_dcookie: c_long = 4000 + 247; +pub const SYS_epoll_create: c_long = 4000 + 248; +pub const SYS_epoll_ctl: c_long = 4000 + 249; +pub const SYS_epoll_wait: c_long = 4000 + 250; +pub const SYS_remap_file_pages: c_long = 4000 + 251; +pub const SYS_set_tid_address: c_long = 4000 + 252; +pub const SYS_restart_syscall: c_long = 4000 + 253; +pub const SYS_fadvise64: c_long = 4000 + 254; +pub const SYS_statfs64: c_long = 4000 + 255; +pub const SYS_fstatfs64: c_long = 4000 + 256; +pub const SYS_timer_create: c_long = 4000 + 257; +pub const SYS_timer_settime: c_long = 4000 + 258; +pub const SYS_timer_gettime: c_long = 4000 + 259; +pub const SYS_timer_getoverrun: c_long = 4000 + 260; +pub const SYS_timer_delete: c_long = 4000 + 261; +pub const SYS_clock_settime: c_long = 4000 + 262; +pub const SYS_clock_gettime: c_long = 4000 + 263; +pub const SYS_clock_getres: c_long = 4000 + 264; +pub const SYS_clock_nanosleep: c_long = 4000 + 265; +pub const SYS_tgkill: c_long = 4000 + 266; +pub const SYS_utimes: c_long = 4000 + 267; +pub const SYS_mbind: c_long = 4000 + 268; +pub const SYS_get_mempolicy: c_long = 4000 + 269; +pub const SYS_set_mempolicy: c_long = 4000 + 270; +pub const SYS_mq_open: c_long = 4000 + 271; +pub const SYS_mq_unlink: c_long = 4000 + 272; +pub const SYS_mq_timedsend: c_long = 4000 + 273; +pub const SYS_mq_timedreceive: c_long = 4000 + 274; +pub const SYS_mq_notify: c_long = 4000 + 275; +pub const SYS_mq_getsetattr: c_long = 4000 + 276; +pub const SYS_vserver: c_long = 4000 + 277; +pub const SYS_waitid: c_long = 4000 + 278; +/* pub const SYS_sys_setaltroot: c_long = 4000 + 279; */ +pub const SYS_add_key: c_long = 4000 + 280; +pub const SYS_request_key: c_long = 4000 + 281; +pub const SYS_keyctl: c_long = 4000 + 282; +pub const SYS_set_thread_area: c_long = 4000 + 283; +pub const SYS_inotify_init: c_long = 4000 + 284; +pub const SYS_inotify_add_watch: c_long = 4000 + 285; +pub const SYS_inotify_rm_watch: c_long = 4000 + 286; +pub const SYS_migrate_pages: c_long = 4000 + 287; +pub const SYS_openat: c_long = 4000 + 288; +pub const SYS_mkdirat: c_long = 4000 + 289; +pub const SYS_mknodat: c_long = 4000 + 290; +pub const SYS_fchownat: c_long = 4000 + 291; +pub const SYS_futimesat: c_long = 4000 + 292; +pub const SYS_fstatat64: c_long = 4000 + 293; +pub const SYS_unlinkat: c_long = 4000 + 294; +pub const SYS_renameat: c_long = 4000 + 295; +pub const SYS_linkat: c_long = 4000 + 296; +pub const SYS_symlinkat: c_long = 4000 + 297; +pub const SYS_readlinkat: c_long = 4000 + 298; +pub const SYS_fchmodat: c_long = 4000 + 299; +pub const SYS_faccessat: c_long = 4000 + 300; +pub const SYS_pselect6: c_long = 4000 + 301; +pub const SYS_ppoll: c_long = 4000 + 302; +pub const SYS_unshare: c_long = 4000 + 303; +pub const SYS_splice: c_long = 4000 + 304; +pub const SYS_sync_file_range: c_long = 4000 + 305; +pub const SYS_tee: c_long = 4000 + 306; +pub const SYS_vmsplice: c_long = 4000 + 307; +pub const SYS_move_pages: c_long = 4000 + 308; +pub const SYS_set_robust_list: c_long = 4000 + 309; +pub const SYS_get_robust_list: c_long = 4000 + 310; +pub const SYS_kexec_load: c_long = 4000 + 311; +pub const SYS_getcpu: c_long = 4000 + 312; +pub const SYS_epoll_pwait: c_long = 4000 + 313; +pub const SYS_ioprio_set: c_long = 4000 + 314; +pub const SYS_ioprio_get: c_long = 4000 + 315; +pub const SYS_utimensat: c_long = 4000 + 316; +pub const SYS_signalfd: c_long = 4000 + 317; +pub const SYS_timerfd: c_long = 4000 + 318; +pub const SYS_eventfd: c_long = 4000 + 319; +pub const SYS_fallocate: c_long = 4000 + 320; +pub const SYS_timerfd_create: c_long = 4000 + 321; +pub const SYS_timerfd_gettime: c_long = 4000 + 322; +pub const SYS_timerfd_settime: c_long = 4000 + 323; +pub const SYS_signalfd4: c_long = 4000 + 324; +pub const SYS_eventfd2: c_long = 4000 + 325; +pub const SYS_epoll_create1: c_long = 4000 + 326; +pub const SYS_dup3: c_long = 4000 + 327; +pub const SYS_pipe2: c_long = 4000 + 328; +pub const SYS_inotify_init1: c_long = 4000 + 329; +pub const SYS_preadv: c_long = 4000 + 330; +pub const SYS_pwritev: c_long = 4000 + 331; +pub const SYS_rt_tgsigqueueinfo: c_long = 4000 + 332; +pub const SYS_perf_event_open: c_long = 4000 + 333; +pub const SYS_accept4: c_long = 4000 + 334; +pub const SYS_recvmmsg: c_long = 4000 + 335; +pub const SYS_fanotify_init: c_long = 4000 + 336; +pub const SYS_fanotify_mark: c_long = 4000 + 337; +pub const SYS_prlimit64: c_long = 4000 + 338; +pub const SYS_name_to_handle_at: c_long = 4000 + 339; +pub const SYS_open_by_handle_at: c_long = 4000 + 340; +pub const SYS_clock_adjtime: c_long = 4000 + 341; +pub const SYS_syncfs: c_long = 4000 + 342; +pub const SYS_sendmmsg: c_long = 4000 + 343; +pub const SYS_setns: c_long = 4000 + 344; +pub const SYS_process_vm_readv: c_long = 4000 + 345; +pub const SYS_process_vm_writev: c_long = 4000 + 346; +pub const SYS_kcmp: c_long = 4000 + 347; +pub const SYS_finit_module: c_long = 4000 + 348; +pub const SYS_sched_setattr: c_long = 4000 + 349; +pub const SYS_sched_getattr: c_long = 4000 + 350; +pub const SYS_renameat2: c_long = 4000 + 351; +pub const SYS_seccomp: c_long = 4000 + 352; +pub const SYS_getrandom: c_long = 4000 + 353; +pub const SYS_memfd_create: c_long = 4000 + 354; +pub const SYS_bpf: c_long = 4000 + 355; +pub const SYS_execveat: c_long = 4000 + 356; +pub const SYS_userfaultfd: c_long = 4000 + 357; +pub const SYS_membarrier: c_long = 4000 + 358; +pub const SYS_mlock2: c_long = 4000 + 359; +pub const SYS_copy_file_range: c_long = 4000 + 360; +pub const SYS_preadv2: c_long = 4000 + 361; +pub const SYS_pwritev2: c_long = 4000 + 362; +pub const SYS_pkey_mprotect: c_long = 4000 + 363; +pub const SYS_pkey_alloc: c_long = 4000 + 364; +pub const SYS_pkey_free: c_long = 4000 + 365; +pub const SYS_statx: c_long = 4000 + 366; +pub const SYS_rseq: c_long = 4000 + 367; +pub const SYS_pidfd_send_signal: c_long = 4000 + 424; +pub const SYS_io_uring_setup: c_long = 4000 + 425; +pub const SYS_io_uring_enter: c_long = 4000 + 426; +pub const SYS_io_uring_register: c_long = 4000 + 427; +pub const SYS_open_tree: c_long = 4000 + 428; +pub const SYS_move_mount: c_long = 4000 + 429; +pub const SYS_fsopen: c_long = 4000 + 430; +pub const SYS_fsconfig: c_long = 4000 + 431; +pub const SYS_fsmount: c_long = 4000 + 432; +pub const SYS_fspick: c_long = 4000 + 433; +pub const SYS_pidfd_open: c_long = 4000 + 434; +pub const SYS_clone3: c_long = 4000 + 435; +pub const SYS_close_range: c_long = 4000 + 436; +pub const SYS_openat2: c_long = 4000 + 437; +pub const SYS_pidfd_getfd: c_long = 4000 + 438; +pub const SYS_faccessat2: c_long = 4000 + 439; +pub const SYS_process_madvise: c_long = 4000 + 440; +pub const SYS_epoll_pwait2: c_long = 4000 + 441; +pub const SYS_mount_setattr: c_long = 4000 + 442; +pub const SYS_quotactl_fd: c_long = 4000 + 443; +pub const SYS_landlock_create_ruleset: c_long = 4000 + 444; +pub const SYS_landlock_add_rule: c_long = 4000 + 445; +pub const SYS_landlock_restrict_self: c_long = 4000 + 446; +pub const SYS_memfd_secret: c_long = 4000 + 447; +pub const SYS_process_mrelease: c_long = 4000 + 448; +pub const SYS_futex_waitv: c_long = 4000 + 449; +pub const SYS_set_mempolicy_home_node: c_long = 4000 + 450; + +pub const O_DIRECT: c_int = 0x8000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_NOFOLLOW: c_int = 0x20000; + +pub const O_APPEND: c_int = 8; +pub const O_CREAT: c_int = 256; +pub const O_EXCL: c_int = 1024; +pub const O_NOCTTY: c_int = 2048; +pub const O_NONBLOCK: c_int = 128; +pub const O_SYNC: c_int = 0x4010; +pub const O_RSYNC: c_int = 0x4010; +pub const O_DSYNC: c_int = 0x10; +pub const O_FSYNC: c_int = 0x4010; +pub const O_ASYNC: c_int = 0x1000; +pub const O_NDELAY: c_int = 0x80; + +pub const EDEADLK: c_int = 45; +pub const ENAMETOOLONG: c_int = 78; +pub const ENOLCK: c_int = 46; +pub const ENOSYS: c_int = 89; +pub const ENOTEMPTY: c_int = 93; +pub const ELOOP: c_int = 90; +pub const ENOMSG: c_int = 35; +pub const EIDRM: c_int = 36; +pub const ECHRNG: c_int = 37; +pub const EL2NSYNC: c_int = 38; +pub const EL3HLT: c_int = 39; +pub const EL3RST: c_int = 40; +pub const ELNRNG: c_int = 41; +pub const EUNATCH: c_int = 42; +pub const ENOCSI: c_int = 43; +pub const EL2HLT: c_int = 44; +pub const EBADE: c_int = 50; +pub const EBADR: c_int = 51; +pub const EXFULL: c_int = 52; +pub const ENOANO: c_int = 53; +pub const EBADRQC: c_int = 54; +pub const EBADSLT: c_int = 55; +pub const EDEADLOCK: c_int = 56; +pub const EMULTIHOP: c_int = 74; +pub const EOVERFLOW: c_int = 79; +pub const ENOTUNIQ: c_int = 80; +pub const EBADFD: c_int = 81; +pub const EBADMSG: c_int = 77; +pub const EREMCHG: c_int = 82; +pub const ELIBACC: c_int = 83; +pub const ELIBBAD: c_int = 84; +pub const ELIBSCN: c_int = 85; +pub const ELIBMAX: c_int = 86; +pub const ELIBEXEC: c_int = 87; +pub const EILSEQ: c_int = 88; +pub const ERESTART: c_int = 91; +pub const ESTRPIPE: c_int = 92; +pub const EUSERS: c_int = 94; +pub const ENOTSOCK: c_int = 95; +pub const EDESTADDRREQ: c_int = 96; +pub const EMSGSIZE: c_int = 97; +pub const EPROTOTYPE: c_int = 98; +pub const ENOPROTOOPT: c_int = 99; +pub const EPROTONOSUPPORT: c_int = 120; +pub const ESOCKTNOSUPPORT: c_int = 121; +pub const EOPNOTSUPP: c_int = 122; +pub const EPFNOSUPPORT: c_int = 123; +pub const EAFNOSUPPORT: c_int = 124; +pub const EADDRINUSE: c_int = 125; +pub const EADDRNOTAVAIL: c_int = 126; +pub const ENETDOWN: c_int = 127; +pub const ENETUNREACH: c_int = 128; +pub const ENETRESET: c_int = 129; +pub const ECONNABORTED: c_int = 130; +pub const ECONNRESET: c_int = 131; +pub const ENOBUFS: c_int = 132; +pub const EISCONN: c_int = 133; +pub const ENOTCONN: c_int = 134; +pub const ESHUTDOWN: c_int = 143; +pub const ETOOMANYREFS: c_int = 144; +pub const ETIMEDOUT: c_int = 145; +pub const ECONNREFUSED: c_int = 146; +pub const EHOSTDOWN: c_int = 147; +pub const EHOSTUNREACH: c_int = 148; +pub const EALREADY: c_int = 149; +pub const EINPROGRESS: c_int = 150; +pub const ESTALE: c_int = 151; +pub const EUCLEAN: c_int = 135; +pub const ENOTNAM: c_int = 137; +pub const ENAVAIL: c_int = 138; +pub const EISNAM: c_int = 139; +pub const EREMOTEIO: c_int = 140; +pub const EDQUOT: c_int = 1133; +pub const ENOMEDIUM: c_int = 159; +pub const EMEDIUMTYPE: c_int = 160; +pub const ECANCELED: c_int = 158; +pub const ENOKEY: c_int = 161; +pub const EKEYEXPIRED: c_int = 162; +pub const EKEYREVOKED: c_int = 163; +pub const EKEYREJECTED: c_int = 164; +pub const EOWNERDEAD: c_int = 165; +pub const ENOTRECOVERABLE: c_int = 166; +pub const ERFKILL: c_int = 167; + +pub const MAP_NORESERVE: c_int = 0x400; +pub const MAP_ANON: c_int = 0x800; +pub const MAP_ANONYMOUS: c_int = 0x800; +pub const MAP_GROWSDOWN: c_int = 0x1000; +pub const MAP_DENYWRITE: c_int = 0x2000; +pub const MAP_EXECUTABLE: c_int = 0x4000; +pub const MAP_LOCKED: c_int = 0x8000; +pub const MAP_POPULATE: c_int = 0x10000; +pub const MAP_NONBLOCK: c_int = 0x20000; +pub const MAP_STACK: c_int = 0x40000; + +pub const SOCK_STREAM: c_int = 2; +pub const SOCK_DGRAM: c_int = 1; + +pub const SA_SIGINFO: c_int = 0x00000008; +pub const SA_NOCLDWAIT: c_int = 0x00010000; + +pub const SIGEMT: c_int = 7; +pub const SIGCHLD: c_int = 18; +pub const SIGBUS: c_int = 10; +pub const SIGTTIN: c_int = 26; +pub const SIGTTOU: c_int = 27; +pub const SIGXCPU: c_int = 30; +pub const SIGXFSZ: c_int = 31; +pub const SIGVTALRM: c_int = 28; +pub const SIGPROF: c_int = 29; +pub const SIGWINCH: c_int = 20; +pub const SIGUSR1: c_int = 16; +pub const SIGUSR2: c_int = 17; +pub const SIGCONT: c_int = 25; +pub const SIGSTOP: c_int = 23; +pub const SIGTSTP: c_int = 24; +pub const SIGURG: c_int = 21; +pub const SIGIO: c_int = 22; +pub const SIGSYS: c_int = 12; +pub const SIGPOLL: c_int = 22; +pub const SIGPWR: c_int = 19; +pub const SIG_SETMASK: c_int = 3; +pub const SIG_BLOCK: c_int = 0x1; +pub const SIG_UNBLOCK: c_int = 0x2; + +pub const POLLWRNORM: c_short = 0x004; +pub const POLLWRBAND: c_short = 0x100; + +pub const VEOF: usize = 16; +pub const VEOL: usize = 17; +pub const VEOL2: usize = 6; +pub const VMIN: usize = 4; +pub const IEXTEN: crate::tcflag_t = 0x00000100; +pub const TOSTOP: crate::tcflag_t = 0x00008000; +pub const FLUSHO: crate::tcflag_t = 0x00002000; +pub const EXTPROC: crate::tcflag_t = 0o200000; +pub const TCSANOW: c_int = 0x540e; +pub const TCSADRAIN: c_int = 0x540f; +pub const TCSAFLUSH: c_int = 0x5410; + +pub const PTRACE_GETFPXREGS: c_uint = 18; +pub const PTRACE_SETFPXREGS: c_uint = 19; + +pub const MAP_HUGETLB: c_int = 0x080000; + +pub const EFD_NONBLOCK: c_int = 0x80; + +cfg_if! { + if #[cfg(gnu_file_offset_bits64)] { + pub const F_GETLK: c_int = 33; + } else { + pub const F_GETLK: c_int = 14; + } +} +pub const F_GETOWN: c_int = 23; +pub const F_SETOWN: c_int = 24; + +pub const SFD_NONBLOCK: c_int = 0x80; + +pub const RTLD_DEEPBIND: c_int = 0x10; +pub const RTLD_GLOBAL: c_int = 0x4; +pub const RTLD_NOLOAD: c_int = 0x8; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; + +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: crate::tcflag_t = 0x00000800; +pub const TAB2: crate::tcflag_t = 0x00001000; +pub const TAB3: crate::tcflag_t = 0x00001800; +pub const CR1: crate::tcflag_t = 0x00000200; +pub const CR2: crate::tcflag_t = 0x00000400; +pub const CR3: crate::tcflag_t = 0x00000600; +pub const FF1: crate::tcflag_t = 0x00008000; +pub const BS1: crate::tcflag_t = 0x00002000; +pub const VT1: crate::tcflag_t = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const EHWPOISON: c_int = 168; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/mod.rs new file mode 100644 index 00000000000000..fe843a7643207d --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b32/mod.rs @@ -0,0 +1,491 @@ +//! 32-bit specific definitions for linux-like values + +use crate::prelude::*; +use crate::pthread_mutex_t; + +pub type clock_t = i32; + +pub type shmatt_t = c_ulong; +pub type msgqnum_t = c_ulong; +pub type msglen_t = c_ulong; +pub type nlink_t = u32; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; +pub type __fsword_t = i32; +pub type fsblkcnt64_t = u64; +pub type fsfilcnt64_t = u64; +pub type __syscall_ulong_t = c_ulong; +pub type __suseconds64_t = i64; + +cfg_if! { + if #[cfg(target_arch = "riscv32")] { + pub type time_t = i64; + pub type suseconds_t = i64; + type __ino_t = c_ulong; + type __ino64_t = u64; + pub type ino_t = __ino64_t; + pub type off_t = i64; + pub type blkcnt_t = i64; + pub type fsblkcnt_t = u64; + pub type fsfilcnt_t = u64; + pub type rlim_t = u64; + pub type blksize_t = i64; + } else if #[cfg(gnu_time_bits64)] { + pub type time_t = i64; + pub type suseconds_t = i32; + type __ino_t = c_ulong; + type __ino64_t = u64; + pub type ino_t = __ino64_t; + pub type off_t = i64; + pub type blkcnt_t = i64; + pub type fsblkcnt_t = u64; + pub type fsfilcnt_t = u64; + pub type rlim_t = u64; + pub type blksize_t = i32; + } else if #[cfg(gnu_file_offset_bits64)] { + pub type time_t = i32; + pub type suseconds_t = i32; + type __ino_t = c_ulong; + type __ino64_t = u64; + pub type ino_t = __ino64_t; + pub type off_t = i64; + pub type blkcnt_t = i64; + pub type fsblkcnt_t = u64; + pub type fsfilcnt_t = u64; + pub type rlim_t = u64; + pub type blksize_t = i32; + } else { + pub type time_t = i32; + pub type suseconds_t = i32; + type __ino_t = c_ulong; + type __ino64_t = u64; + pub type ino_t = __ino_t; + pub type off_t = i32; + pub type blkcnt_t = i32; + pub type fsblkcnt_t = c_ulong; + pub type fsfilcnt_t = c_ulong; + pub type rlim_t = c_ulong; + pub type blksize_t = i32; + } +} + +cfg_if! { + if #[cfg(not(any( + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "powerpc", + target_arch = "sparc" + )))] { + s! { + pub struct stat { + pub st_dev: crate::dev_t, + + #[cfg(not(gnu_time_bits64))] + __pad1: c_uint, + + #[cfg(any(gnu_time_bits64, not(gnu_file_offset_bits64)))] + pub st_ino: crate::ino_t, + #[cfg(all(not(gnu_time_bits64), gnu_file_offset_bits64))] + __st_ino: __ino_t, + + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + + pub st_rdev: crate::dev_t, + + #[cfg(not(gnu_time_bits64))] + __pad2: c_uint, + + pub st_size: off_t, + + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + #[cfg(gnu_time_bits64)] + _atime_pad: c_int, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + #[cfg(gnu_time_bits64)] + _mtime_pad: c_int, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + #[cfg(gnu_time_bits64)] + _ctime_pad: c_int, + + #[cfg(not(gnu_file_offset_bits64))] + __glibc_reserved4: c_long, + #[cfg(not(gnu_file_offset_bits64))] + __glibc_reserved5: c_long, + #[cfg(all(not(gnu_time_bits64), gnu_file_offset_bits64))] + pub st_ino: crate::ino_t, + } + } + } +} + +s! { + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + __f_unused: c_int, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct pthread_attr_t { + __size: [u32; 9], + } + + pub struct sigset_t { + __val: [c_ulong; 32], + } + + pub struct sysinfo { + pub uptime: c_long, + pub loads: [c_ulong; 3], + pub totalram: c_ulong, + pub freeram: c_ulong, + pub sharedram: c_ulong, + pub bufferram: c_ulong, + pub totalswap: c_ulong, + pub freeswap: c_ulong, + pub procs: c_ushort, + #[deprecated( + since = "0.2.58", + note = "This padding field might become private in the future" + )] + pub pad: c_ushort, + pub totalhigh: c_ulong, + pub freehigh: c_ulong, + pub mem_unit: c_uint, + pub _f: [c_char; 8], + } + + pub struct semid_ds { + pub sem_perm: ipc_perm, + #[cfg(all(not(gnu_time_bits64), target_arch = "powerpc"))] + __reserved: crate::__syscall_ulong_t, + pub sem_otime: crate::time_t, + #[cfg(not(any( + gnu_time_bits64, + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "powerpc" + )))] + __reserved: crate::__syscall_ulong_t, + #[cfg(all(not(gnu_time_bits64), target_arch = "powerpc"))] + __reserved2: crate::__syscall_ulong_t, + pub sem_ctime: crate::time_t, + #[cfg(not(any( + gnu_time_bits64, + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "powerpc" + )))] + __reserved2: crate::__syscall_ulong_t, + pub sem_nsems: crate::__syscall_ulong_t, + #[cfg(all( + gnu_time_bits64, + not(any( + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "powerpc", + target_arch = "arm", + target_arch = "x86" + )) + ))] + __reserved2: crate::__syscall_ulong_t, + __glibc_reserved3: crate::__syscall_ulong_t, + __glibc_reserved4: crate::__syscall_ulong_t, + } + + #[cfg(gnu_time_bits64)] + pub struct timex { + pub modes: c_uint, + _pad1: c_int, + pub offset: c_longlong, + pub freq: c_longlong, + pub maxerror: c_longlong, + pub esterror: c_longlong, + pub status: c_int, + _pad2: c_int, + pub constant: c_longlong, + pub precision: c_longlong, + pub tolerance: c_longlong, + pub time: crate::timeval, + pub tick: c_longlong, + pub ppsfreq: c_longlong, + pub jitter: c_longlong, + pub shift: c_int, + _pad3: c_int, + pub stabil: c_longlong, + pub jitcnt: c_longlong, + pub calcnt: c_longlong, + pub errcnt: c_longlong, + pub stbcnt: c_longlong, + pub tai: c_int, + pub __unused1: i32, + pub __unused2: i32, + pub __unused3: i32, + pub __unused4: i32, + pub __unused5: i32, + pub __unused6: i32, + pub __unused7: i32, + pub __unused8: i32, + pub __unused9: i32, + pub __unused10: i32, + pub __unused11: i32, + } + + #[cfg(not(gnu_time_bits64))] + pub struct timex { + pub modes: c_uint, + pub offset: c_long, + pub freq: c_long, + pub maxerror: c_long, + pub esterror: c_long, + pub status: c_int, + pub constant: c_long, + pub precision: c_long, + pub tolerance: c_long, + pub time: crate::timeval, + pub tick: c_long, + pub ppsfreq: c_long, + pub jitter: c_long, + pub shift: c_int, + pub stabil: c_long, + pub jitcnt: c_long, + pub calcnt: c_long, + pub errcnt: c_long, + pub stbcnt: c_long, + pub tai: c_int, + pub __unused1: i32, + pub __unused2: i32, + pub __unused3: i32, + pub __unused4: i32, + pub __unused5: i32, + pub __unused6: i32, + pub __unused7: i32, + pub __unused8: i32, + pub __unused9: i32, + pub __unused10: i32, + pub __unused11: i32, + } +} + +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; + +pub const F_OFD_GETLK: c_int = 36; +pub const F_OFD_SETLK: c_int = 37; +pub const F_OFD_SETLKW: c_int = 38; + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 20; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; + +cfg_if! { + if #[cfg(target_arch = "sparc")] { + pub const O_NOATIME: c_int = 0x200000; + pub const O_PATH: c_int = 0x1000000; + pub const O_TMPFILE: c_int = 0x2000000 | O_DIRECTORY; + + pub const SA_ONSTACK: c_int = 1; + + pub const PTRACE_DETACH: c_uint = 11; + + pub const F_RDLCK: c_int = 1; + pub const F_WRLCK: c_int = 2; + pub const F_UNLCK: c_int = 3; + + pub const SFD_CLOEXEC: c_int = 0x400000; + + pub const NCCS: usize = 17; + + pub const O_TRUNC: c_int = 0x400; + pub const O_CLOEXEC: c_int = 0x400000; + + pub const EBFONT: c_int = 109; + pub const ENOSTR: c_int = 72; + pub const ENODATA: c_int = 111; + pub const ETIME: c_int = 73; + pub const ENOSR: c_int = 74; + pub const ENONET: c_int = 80; + pub const ENOPKG: c_int = 113; + pub const EREMOTE: c_int = 71; + pub const ENOLINK: c_int = 82; + pub const EADV: c_int = 83; + pub const ESRMNT: c_int = 84; + pub const ECOMM: c_int = 85; + pub const EPROTO: c_int = 86; + pub const EDOTDOT: c_int = 88; + + pub const SA_NODEFER: c_int = 0x20; + pub const SA_RESETHAND: c_int = 0x4; + pub const SA_RESTART: c_int = 0x2; + pub const SA_NOCLDSTOP: c_int = 0x00000008; + + pub const EPOLL_CLOEXEC: c_int = 0x400000; + + pub const EFD_CLOEXEC: c_int = 0x400000; + } else { + pub const O_NOATIME: c_int = 0o1000000; + pub const O_PATH: c_int = 0o10000000; + pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; + + pub const SA_ONSTACK: c_int = 0x08000000; + + pub const PTRACE_DETACH: c_uint = 17; + + pub const F_RDLCK: c_int = 0; + pub const F_WRLCK: c_int = 1; + pub const F_UNLCK: c_int = 2; + + pub const SFD_CLOEXEC: c_int = 0x080000; + + pub const NCCS: usize = 32; + + pub const O_TRUNC: c_int = 512; + pub const O_CLOEXEC: c_int = 0x80000; + pub const EBFONT: c_int = 59; + pub const ENOSTR: c_int = 60; + pub const ENODATA: c_int = 61; + pub const ETIME: c_int = 62; + pub const ENOSR: c_int = 63; + pub const ENONET: c_int = 64; + pub const ENOPKG: c_int = 65; + pub const EREMOTE: c_int = 66; + pub const ENOLINK: c_int = 67; + pub const EADV: c_int = 68; + pub const ESRMNT: c_int = 69; + pub const ECOMM: c_int = 70; + pub const EPROTO: c_int = 71; + pub const EDOTDOT: c_int = 73; + + pub const SA_NODEFER: c_int = 0x40000000; + pub const SA_RESETHAND: c_int = 0x80000000; + pub const SA_RESTART: c_int = 0x10000000; + pub const SA_NOCLDSTOP: c_int = 0x00000001; + + pub const EPOLL_CLOEXEC: c_int = 0x80000; + + pub const EFD_CLOEXEC: c_int = 0x80000; + } +} +cfg_if! { + if #[cfg(target_arch = "sparc")] { + pub const F_SETLK: c_int = 8; + pub const F_SETLKW: c_int = 9; + } else if #[cfg(all( + gnu_file_offset_bits64, + any(target_arch = "mips", target_arch = "mips32r6") + ))] { + pub const F_SETLK: c_int = 34; + pub const F_SETLKW: c_int = 35; + } else if #[cfg(gnu_file_offset_bits64)] { + pub const F_SETLK: c_int = 13; + pub const F_SETLKW: c_int = 14; + } else { + pub const F_SETLK: c_int = 6; + pub const F_SETLKW: c_int = 7; + } +} + +#[cfg(target_endian = "little")] +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "little")] +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "little")] +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; + +pub const PTRACE_GETFPREGS: c_uint = 14; +pub const PTRACE_SETFPREGS: c_uint = 15; +pub const PTRACE_GETREGS: c_uint = 12; +pub const PTRACE_SETREGS: c_uint = 13; + +extern "C" { + pub fn sysctl( + name: *mut c_int, + namelen: c_int, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *mut c_void, + newlen: size_t, + ) -> c_int; +} + +cfg_if! { + if #[cfg(target_arch = "x86")] { + mod x86; + pub use self::x86::*; + } else if #[cfg(target_arch = "arm")] { + mod arm; + pub use self::arm::*; + } else if #[cfg(any(target_arch = "mips", target_arch = "mips32r6"))] { + mod mips; + pub use self::mips::*; + } else if #[cfg(target_arch = "m68k")] { + mod m68k; + pub use self::m68k::*; + } else if #[cfg(target_arch = "powerpc")] { + mod powerpc; + pub use self::powerpc::*; + } else if #[cfg(target_arch = "sparc")] { + mod sparc; + pub use self::sparc::*; + } else if #[cfg(target_arch = "riscv32")] { + mod riscv32; + pub use self::riscv32::*; + } else if #[cfg(target_arch = "csky")] { + mod csky; + pub use self::csky::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/powerpc.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/powerpc.rs new file mode 100644 index 00000000000000..791f14956806d1 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b32/powerpc.rs @@ -0,0 +1,892 @@ +use crate::prelude::*; +use crate::{off64_t, off_t}; + +pub type wchar_t = i32; + +s! { + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct statfs { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + f_spare: [crate::__fsword_t; 4], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + } + + pub struct ipc_perm { + __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + __seq: u32, + __pad1: u32, + __glibc_reserved1: u64, + __glibc_reserved2: u64, + } + + pub struct stat { + pub st_dev: crate::dev_t, + #[cfg(not(gnu_file_offset_bits64))] + __pad1: c_ushort, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + #[cfg(not(gnu_time_bits64))] + __pad2: c_ushort, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + #[cfg(gnu_time_bits64)] + _atime_pad: c_int, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + #[cfg(gnu_time_bits64)] + _mtime_pad: c_int, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + #[cfg(gnu_time_bits64)] + _ctime_pad: c_int, + pub st_ctime_nsec: c_long, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved4: c_ulong, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved5: c_ulong, + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + #[cfg(not(gnu_time_bits64))] + __pad2: c_ushort, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + #[cfg(gnu_time_bits64)] + _atime_pad: c_int, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + #[cfg(gnu_time_bits64)] + _mtime_pad: c_int, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + #[cfg(gnu_time_bits64)] + _ctime_pad: c_int, + pub st_ctime_nsec: c_long, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved4: c_ulong, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved5: c_ulong, + } + + pub struct statfs64 { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::fsid_t, + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + pub f_spare: [crate::__fsword_t; 4], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + __f_unused: c_int, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + #[cfg(gnu_time_bits64)] + pub shm_segsz: size_t, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved1: c_uint, + pub shm_atime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved2: c_uint, + pub shm_dtime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved3: c_uint, + pub shm_ctime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved4: c_uint, + #[cfg(not(gnu_time_bits64))] + pub shm_segsz: size_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __glibc_reserved5: c_ulong, + __glibc_reserved6: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved1: c_uint, + pub msg_stime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved2: c_uint, + pub msg_rtime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved3: c_uint, + pub msg_ctime: crate::time_t, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __glibc_reserved4: c_ulong, + __glibc_reserved5: c_ulong, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + #[doc(hidden)] + #[deprecated( + since = "0.2.54", + note = "Please leave a comment on \ + https://github.com/rust-lang/libc/pull/1316 if you're using \ + this field" + )] + pub _pad: [c_int; 29], + _align: [usize; 0], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } +} + +pub const VEOF: usize = 4; +pub const RTLD_DEEPBIND: c_int = 0x8; +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; +pub const O_DIRECT: c_int = 0x20000; +pub const O_DIRECTORY: c_int = 0x4000; +pub const O_NOFOLLOW: c_int = 0x8000; +pub const O_LARGEFILE: c_int = 0o200000; +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_FSYNC: c_int = 0x101000; +pub const O_ASYNC: c_int = 0x2000; +pub const O_NDELAY: c_int = 0x800; +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_LOCKED: c_int = 0x00080; +pub const MAP_NORESERVE: c_int = 0x00040; +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_ANONYMOUS: c_int = 0x0020; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_SYNC: c_int = 0x080000; + +pub const EDEADLOCK: c_int = 58; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const EHWPOISON: c_int = 133; +pub const ERFKILL: c_int = 132; + +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const MCL_CURRENT: c_int = 0x2000; +pub const MCL_FUTURE: c_int = 0x4000; +pub const MCL_ONFAULT: c_int = 0x8000; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +cfg_if! { + if #[cfg(gnu_file_offset_bits64)] { + pub const F_GETLK: c_int = 12; + } else { + pub const F_GETLK: c_int = 5; + } +} +pub const F_GETOWN: c_int = 9; +pub const F_SETOWN: c_int = 8; + +pub const EFD_NONBLOCK: c_int = 0x800; +pub const SFD_NONBLOCK: c_int = 0x0800; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] +pub const SIGUNUSED: c_int = 31; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGSTKSZ: size_t = 0x4000; +pub const MINSIGSTKSZ: size_t = 4096; +pub const CBAUD: crate::tcflag_t = 0xff; +pub const TAB1: crate::tcflag_t = 0x400; +pub const TAB2: crate::tcflag_t = 0x800; +pub const TAB3: crate::tcflag_t = 0xc00; +pub const CR1: crate::tcflag_t = 0x1000; +pub const CR2: crate::tcflag_t = 0x2000; +pub const CR3: crate::tcflag_t = 0x3000; +pub const FF1: crate::tcflag_t = 0x4000; +pub const BS1: crate::tcflag_t = 0x8000; +pub const VT1: crate::tcflag_t = 0x10000; +pub const VWERASE: usize = 0xa; +pub const VREPRINT: usize = 0xb; +pub const VSUSP: usize = 0xc; +pub const VSTART: usize = 0xd; +pub const VSTOP: usize = 0xe; +pub const VDISCARD: usize = 0x10; +pub const VTIME: usize = 0x7; +pub const IXON: crate::tcflag_t = 0x200; +pub const IXOFF: crate::tcflag_t = 0x400; +pub const ONLCR: crate::tcflag_t = 0x2; +pub const CSIZE: crate::tcflag_t = 0x300; +pub const CS6: crate::tcflag_t = 0x100; +pub const CS7: crate::tcflag_t = 0x200; +pub const CS8: crate::tcflag_t = 0x300; +pub const CSTOPB: crate::tcflag_t = 0x400; +pub const CREAD: crate::tcflag_t = 0x800; +pub const PARENB: crate::tcflag_t = 0x1000; +pub const PARODD: crate::tcflag_t = 0x2000; +pub const HUPCL: crate::tcflag_t = 0x4000; +pub const CLOCAL: crate::tcflag_t = 0x8000; +pub const ECHOKE: crate::tcflag_t = 0x1; +pub const ECHOE: crate::tcflag_t = 0x2; +pub const ECHOK: crate::tcflag_t = 0x4; +pub const ECHONL: crate::tcflag_t = 0x10; +pub const ECHOPRT: crate::tcflag_t = 0x20; +pub const ECHOCTL: crate::tcflag_t = 0x40; +pub const ISIG: crate::tcflag_t = 0x80; +pub const ICANON: crate::tcflag_t = 0x100; +pub const PENDIN: crate::tcflag_t = 0x20000000; +pub const NOFLSH: crate::tcflag_t = 0x80000000; +pub const VSWTC: usize = 9; +pub const OLCUC: crate::tcflag_t = 0o000004; +pub const NLDLY: crate::tcflag_t = 0o001400; +pub const CRDLY: crate::tcflag_t = 0o030000; +pub const TABDLY: crate::tcflag_t = 0o006000; +pub const BSDLY: crate::tcflag_t = 0o100000; +pub const FFDLY: crate::tcflag_t = 0o040000; +pub const VTDLY: crate::tcflag_t = 0o200000; +pub const XTABS: crate::tcflag_t = 0o006000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const CBAUDEX: crate::speed_t = 0o000020; +pub const B57600: crate::speed_t = 0o0020; +pub const B115200: crate::speed_t = 0o0021; +pub const B230400: crate::speed_t = 0o0022; +pub const B460800: crate::speed_t = 0o0023; +pub const B500000: crate::speed_t = 0o0024; +pub const B576000: crate::speed_t = 0o0025; +pub const B921600: crate::speed_t = 0o0026; +pub const B1000000: crate::speed_t = 0o0027; +pub const B1152000: crate::speed_t = 0o0030; +pub const B1500000: crate::speed_t = 0o0031; +pub const B2000000: crate::speed_t = 0o0032; +pub const B2500000: crate::speed_t = 0o0033; +pub const B3000000: crate::speed_t = 0o0034; +pub const B3500000: crate::speed_t = 0o0035; +pub const B4000000: crate::speed_t = 0o0036; + +pub const VEOL: usize = 6; +pub const VEOL2: usize = 8; +pub const VMIN: usize = 5; +pub const IEXTEN: crate::tcflag_t = 0x400; +pub const TOSTOP: crate::tcflag_t = 0x400000; +pub const FLUSHO: crate::tcflag_t = 0x800000; +pub const EXTPROC: crate::tcflag_t = 0x10000000; + +pub const SYS_restart_syscall: c_long = 0; +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_waitpid: c_long = 7; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execve: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_time: c_long = 13; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_lchown: c_long = 16; +pub const SYS_break: c_long = 17; +pub const SYS_oldstat: c_long = 18; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_mount: c_long = 21; +pub const SYS_umount: c_long = 22; +pub const SYS_setuid: c_long = 23; +pub const SYS_getuid: c_long = 24; +pub const SYS_stime: c_long = 25; +pub const SYS_ptrace: c_long = 26; +pub const SYS_alarm: c_long = 27; +pub const SYS_oldfstat: c_long = 28; +pub const SYS_pause: c_long = 29; +pub const SYS_utime: c_long = 30; +pub const SYS_stty: c_long = 31; +pub const SYS_gtty: c_long = 32; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_ftime: c_long = 35; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_rename: c_long = 38; +pub const SYS_mkdir: c_long = 39; +pub const SYS_rmdir: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_prof: c_long = 44; +pub const SYS_brk: c_long = 45; +pub const SYS_setgid: c_long = 46; +pub const SYS_getgid: c_long = 47; +pub const SYS_signal: c_long = 48; +pub const SYS_geteuid: c_long = 49; +pub const SYS_getegid: c_long = 50; +pub const SYS_acct: c_long = 51; +pub const SYS_umount2: c_long = 52; +pub const SYS_lock: c_long = 53; +pub const SYS_ioctl: c_long = 54; +pub const SYS_fcntl: c_long = 55; +pub const SYS_mpx: c_long = 56; +pub const SYS_setpgid: c_long = 57; +pub const SYS_ulimit: c_long = 58; +pub const SYS_oldolduname: c_long = 59; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_ustat: c_long = 62; +pub const SYS_dup2: c_long = 63; +pub const SYS_getppid: c_long = 64; +pub const SYS_getpgrp: c_long = 65; +pub const SYS_setsid: c_long = 66; +pub const SYS_sigaction: c_long = 67; +pub const SYS_sgetmask: c_long = 68; +pub const SYS_ssetmask: c_long = 69; +pub const SYS_setreuid: c_long = 70; +pub const SYS_setregid: c_long = 71; +pub const SYS_sigsuspend: c_long = 72; +pub const SYS_sigpending: c_long = 73; +pub const SYS_sethostname: c_long = 74; +pub const SYS_setrlimit: c_long = 75; +pub const SYS_getrlimit: c_long = 76; +pub const SYS_getrusage: c_long = 77; +pub const SYS_gettimeofday: c_long = 78; +pub const SYS_settimeofday: c_long = 79; +pub const SYS_getgroups: c_long = 80; +pub const SYS_setgroups: c_long = 81; +pub const SYS_select: c_long = 82; +pub const SYS_symlink: c_long = 83; +pub const SYS_oldlstat: c_long = 84; +pub const SYS_readlink: c_long = 85; +pub const SYS_uselib: c_long = 86; +pub const SYS_swapon: c_long = 87; +pub const SYS_reboot: c_long = 88; +pub const SYS_readdir: c_long = 89; +pub const SYS_mmap: c_long = 90; +pub const SYS_munmap: c_long = 91; +pub const SYS_truncate: c_long = 92; +pub const SYS_ftruncate: c_long = 93; +pub const SYS_fchmod: c_long = 94; +pub const SYS_fchown: c_long = 95; +pub const SYS_getpriority: c_long = 96; +pub const SYS_setpriority: c_long = 97; +pub const SYS_profil: c_long = 98; +pub const SYS_statfs: c_long = 99; +pub const SYS_fstatfs: c_long = 100; +pub const SYS_ioperm: c_long = 101; +pub const SYS_socketcall: c_long = 102; +pub const SYS_syslog: c_long = 103; +pub const SYS_setitimer: c_long = 104; +pub const SYS_getitimer: c_long = 105; +pub const SYS_stat: c_long = 106; +pub const SYS_lstat: c_long = 107; +pub const SYS_fstat: c_long = 108; +pub const SYS_olduname: c_long = 109; +pub const SYS_iopl: c_long = 110; +pub const SYS_vhangup: c_long = 111; +pub const SYS_idle: c_long = 112; +pub const SYS_vm86: c_long = 113; +pub const SYS_wait4: c_long = 114; +pub const SYS_swapoff: c_long = 115; +pub const SYS_sysinfo: c_long = 116; +pub const SYS_ipc: c_long = 117; +pub const SYS_fsync: c_long = 118; +pub const SYS_sigreturn: c_long = 119; +pub const SYS_clone: c_long = 120; +pub const SYS_setdomainname: c_long = 121; +pub const SYS_uname: c_long = 122; +pub const SYS_modify_ldt: c_long = 123; +pub const SYS_adjtimex: c_long = 124; +pub const SYS_mprotect: c_long = 125; +pub const SYS_sigprocmask: c_long = 126; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 127; +pub const SYS_init_module: c_long = 128; +pub const SYS_delete_module: c_long = 129; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 130; +pub const SYS_quotactl: c_long = 131; +pub const SYS_getpgid: c_long = 132; +pub const SYS_fchdir: c_long = 133; +pub const SYS_bdflush: c_long = 134; +pub const SYS_sysfs: c_long = 135; +pub const SYS_personality: c_long = 136; +pub const SYS_afs_syscall: c_long = 137; /* Syscall for Andrew File System */ +pub const SYS_setfsuid: c_long = 138; +pub const SYS_setfsgid: c_long = 139; +pub const SYS__llseek: c_long = 140; +pub const SYS_getdents: c_long = 141; +pub const SYS__newselect: c_long = 142; +pub const SYS_flock: c_long = 143; +pub const SYS_msync: c_long = 144; +pub const SYS_readv: c_long = 145; +pub const SYS_writev: c_long = 146; +pub const SYS_getsid: c_long = 147; +pub const SYS_fdatasync: c_long = 148; +pub const SYS__sysctl: c_long = 149; +pub const SYS_mlock: c_long = 150; +pub const SYS_munlock: c_long = 151; +pub const SYS_mlockall: c_long = 152; +pub const SYS_munlockall: c_long = 153; +pub const SYS_sched_setparam: c_long = 154; +pub const SYS_sched_getparam: c_long = 155; +pub const SYS_sched_setscheduler: c_long = 156; +pub const SYS_sched_getscheduler: c_long = 157; +pub const SYS_sched_yield: c_long = 158; +pub const SYS_sched_get_priority_max: c_long = 159; +pub const SYS_sched_get_priority_min: c_long = 160; +pub const SYS_sched_rr_get_interval: c_long = 161; +pub const SYS_nanosleep: c_long = 162; +pub const SYS_mremap: c_long = 163; +pub const SYS_setresuid: c_long = 164; +pub const SYS_getresuid: c_long = 165; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 166; +pub const SYS_poll: c_long = 167; +pub const SYS_nfsservctl: c_long = 168; +pub const SYS_setresgid: c_long = 169; +pub const SYS_getresgid: c_long = 170; +pub const SYS_prctl: c_long = 171; +pub const SYS_rt_sigreturn: c_long = 172; +pub const SYS_rt_sigaction: c_long = 173; +pub const SYS_rt_sigprocmask: c_long = 174; +pub const SYS_rt_sigpending: c_long = 175; +pub const SYS_rt_sigtimedwait: c_long = 176; +pub const SYS_rt_sigqueueinfo: c_long = 177; +pub const SYS_rt_sigsuspend: c_long = 178; +pub const SYS_pread64: c_long = 179; +pub const SYS_pwrite64: c_long = 180; +pub const SYS_chown: c_long = 181; +pub const SYS_getcwd: c_long = 182; +pub const SYS_capget: c_long = 183; +pub const SYS_capset: c_long = 184; +pub const SYS_sigaltstack: c_long = 185; +pub const SYS_sendfile: c_long = 186; +pub const SYS_getpmsg: c_long = 187; /* some people actually want streams */ +pub const SYS_putpmsg: c_long = 188; /* some people actually want streams */ +pub const SYS_vfork: c_long = 189; +pub const SYS_ugetrlimit: c_long = 190; /* SuS compliant getrlimit */ +pub const SYS_readahead: c_long = 191; +pub const SYS_mmap2: c_long = 192; +pub const SYS_truncate64: c_long = 193; +pub const SYS_ftruncate64: c_long = 194; +pub const SYS_stat64: c_long = 195; +pub const SYS_lstat64: c_long = 196; +pub const SYS_fstat64: c_long = 197; +pub const SYS_pciconfig_read: c_long = 198; +pub const SYS_pciconfig_write: c_long = 199; +pub const SYS_pciconfig_iobase: c_long = 200; +pub const SYS_multiplexer: c_long = 201; +pub const SYS_getdents64: c_long = 202; +pub const SYS_pivot_root: c_long = 203; +pub const SYS_fcntl64: c_long = 204; +pub const SYS_madvise: c_long = 205; +pub const SYS_mincore: c_long = 206; +pub const SYS_gettid: c_long = 207; +pub const SYS_tkill: c_long = 208; +pub const SYS_setxattr: c_long = 209; +pub const SYS_lsetxattr: c_long = 210; +pub const SYS_fsetxattr: c_long = 211; +pub const SYS_getxattr: c_long = 212; +pub const SYS_lgetxattr: c_long = 213; +pub const SYS_fgetxattr: c_long = 214; +pub const SYS_listxattr: c_long = 215; +pub const SYS_llistxattr: c_long = 216; +pub const SYS_flistxattr: c_long = 217; +pub const SYS_removexattr: c_long = 218; +pub const SYS_lremovexattr: c_long = 219; +pub const SYS_fremovexattr: c_long = 220; +pub const SYS_futex: c_long = 221; +pub const SYS_sched_setaffinity: c_long = 222; +pub const SYS_sched_getaffinity: c_long = 223; +pub const SYS_tuxcall: c_long = 225; +pub const SYS_sendfile64: c_long = 226; +pub const SYS_io_setup: c_long = 227; +pub const SYS_io_destroy: c_long = 228; +pub const SYS_io_getevents: c_long = 229; +pub const SYS_io_submit: c_long = 230; +pub const SYS_io_cancel: c_long = 231; +pub const SYS_set_tid_address: c_long = 232; +pub const SYS_fadvise64: c_long = 233; +pub const SYS_exit_group: c_long = 234; +pub const SYS_lookup_dcookie: c_long = 235; +pub const SYS_epoll_create: c_long = 236; +pub const SYS_epoll_ctl: c_long = 237; +pub const SYS_epoll_wait: c_long = 238; +pub const SYS_remap_file_pages: c_long = 239; +pub const SYS_timer_create: c_long = 240; +pub const SYS_timer_settime: c_long = 241; +pub const SYS_timer_gettime: c_long = 242; +pub const SYS_timer_getoverrun: c_long = 243; +pub const SYS_timer_delete: c_long = 244; +pub const SYS_clock_settime: c_long = 245; +pub const SYS_clock_gettime: c_long = 246; +pub const SYS_clock_getres: c_long = 247; +pub const SYS_clock_nanosleep: c_long = 248; +pub const SYS_swapcontext: c_long = 249; +pub const SYS_tgkill: c_long = 250; +pub const SYS_utimes: c_long = 251; +pub const SYS_statfs64: c_long = 252; +pub const SYS_fstatfs64: c_long = 253; +pub const SYS_fadvise64_64: c_long = 254; +pub const SYS_rtas: c_long = 255; +pub const SYS_sys_debug_setcontext: c_long = 256; +pub const SYS_migrate_pages: c_long = 258; +pub const SYS_mbind: c_long = 259; +pub const SYS_get_mempolicy: c_long = 260; +pub const SYS_set_mempolicy: c_long = 261; +pub const SYS_mq_open: c_long = 262; +pub const SYS_mq_unlink: c_long = 263; +pub const SYS_mq_timedsend: c_long = 264; +pub const SYS_mq_timedreceive: c_long = 265; +pub const SYS_mq_notify: c_long = 266; +pub const SYS_mq_getsetattr: c_long = 267; +pub const SYS_kexec_load: c_long = 268; +pub const SYS_add_key: c_long = 269; +pub const SYS_request_key: c_long = 270; +pub const SYS_keyctl: c_long = 271; +pub const SYS_waitid: c_long = 272; +pub const SYS_ioprio_set: c_long = 273; +pub const SYS_ioprio_get: c_long = 274; +pub const SYS_inotify_init: c_long = 275; +pub const SYS_inotify_add_watch: c_long = 276; +pub const SYS_inotify_rm_watch: c_long = 277; +pub const SYS_spu_run: c_long = 278; +pub const SYS_spu_create: c_long = 279; +pub const SYS_pselect6: c_long = 280; +pub const SYS_ppoll: c_long = 281; +pub const SYS_unshare: c_long = 282; +pub const SYS_splice: c_long = 283; +pub const SYS_tee: c_long = 284; +pub const SYS_vmsplice: c_long = 285; +pub const SYS_openat: c_long = 286; +pub const SYS_mkdirat: c_long = 287; +pub const SYS_mknodat: c_long = 288; +pub const SYS_fchownat: c_long = 289; +pub const SYS_futimesat: c_long = 290; +pub const SYS_fstatat64: c_long = 291; +pub const SYS_unlinkat: c_long = 292; +pub const SYS_renameat: c_long = 293; +pub const SYS_linkat: c_long = 294; +pub const SYS_symlinkat: c_long = 295; +pub const SYS_readlinkat: c_long = 296; +pub const SYS_fchmodat: c_long = 297; +pub const SYS_faccessat: c_long = 298; +pub const SYS_get_robust_list: c_long = 299; +pub const SYS_set_robust_list: c_long = 300; +pub const SYS_move_pages: c_long = 301; +pub const SYS_getcpu: c_long = 302; +pub const SYS_epoll_pwait: c_long = 303; +pub const SYS_utimensat: c_long = 304; +pub const SYS_signalfd: c_long = 305; +pub const SYS_timerfd_create: c_long = 306; +pub const SYS_eventfd: c_long = 307; +pub const SYS_sync_file_range2: c_long = 308; +pub const SYS_fallocate: c_long = 309; +pub const SYS_subpage_prot: c_long = 310; +pub const SYS_timerfd_settime: c_long = 311; +pub const SYS_timerfd_gettime: c_long = 312; +pub const SYS_signalfd4: c_long = 313; +pub const SYS_eventfd2: c_long = 314; +pub const SYS_epoll_create1: c_long = 315; +pub const SYS_dup3: c_long = 316; +pub const SYS_pipe2: c_long = 317; +pub const SYS_inotify_init1: c_long = 318; +pub const SYS_perf_event_open: c_long = 319; +pub const SYS_preadv: c_long = 320; +pub const SYS_pwritev: c_long = 321; +pub const SYS_rt_tgsigqueueinfo: c_long = 322; +pub const SYS_fanotify_init: c_long = 323; +pub const SYS_fanotify_mark: c_long = 324; +pub const SYS_prlimit64: c_long = 325; +pub const SYS_socket: c_long = 326; +pub const SYS_bind: c_long = 327; +pub const SYS_connect: c_long = 328; +pub const SYS_listen: c_long = 329; +pub const SYS_accept: c_long = 330; +pub const SYS_getsockname: c_long = 331; +pub const SYS_getpeername: c_long = 332; +pub const SYS_socketpair: c_long = 333; +pub const SYS_send: c_long = 334; +pub const SYS_sendto: c_long = 335; +pub const SYS_recv: c_long = 336; +pub const SYS_recvfrom: c_long = 337; +pub const SYS_shutdown: c_long = 338; +pub const SYS_setsockopt: c_long = 339; +pub const SYS_getsockopt: c_long = 340; +pub const SYS_sendmsg: c_long = 341; +pub const SYS_recvmsg: c_long = 342; +pub const SYS_recvmmsg: c_long = 343; +pub const SYS_accept4: c_long = 344; +pub const SYS_name_to_handle_at: c_long = 345; +pub const SYS_open_by_handle_at: c_long = 346; +pub const SYS_clock_adjtime: c_long = 347; +pub const SYS_syncfs: c_long = 348; +pub const SYS_sendmmsg: c_long = 349; +pub const SYS_setns: c_long = 350; +pub const SYS_process_vm_readv: c_long = 351; +pub const SYS_process_vm_writev: c_long = 352; +pub const SYS_finit_module: c_long = 353; +pub const SYS_kcmp: c_long = 354; +pub const SYS_sched_setattr: c_long = 355; +pub const SYS_sched_getattr: c_long = 356; +pub const SYS_renameat2: c_long = 357; +pub const SYS_seccomp: c_long = 358; +pub const SYS_getrandom: c_long = 359; +pub const SYS_memfd_create: c_long = 360; +pub const SYS_bpf: c_long = 361; +pub const SYS_execveat: c_long = 362; +pub const SYS_switch_endian: c_long = 363; +pub const SYS_userfaultfd: c_long = 364; +pub const SYS_membarrier: c_long = 365; +pub const SYS_mlock2: c_long = 378; +pub const SYS_copy_file_range: c_long = 379; +pub const SYS_preadv2: c_long = 380; +pub const SYS_pwritev2: c_long = 381; +pub const SYS_kexec_file_load: c_long = 382; +pub const SYS_statx: c_long = 383; +pub const SYS_rseq: c_long = 387; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; +pub const SYS_mseal: c_long = 462; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/riscv32/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/riscv32/mod.rs new file mode 100644 index 00000000000000..b04ee50462745e --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b32/riscv32/mod.rs @@ -0,0 +1,808 @@ +//! RISC-V-specific definitions for 32-bit linux-like values + +use crate::prelude::*; +use crate::{off64_t, off_t}; + +pub type wchar_t = c_int; + +s! { + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + pub msg_rtime: crate::time_t, + pub msg_ctime: crate::time_t, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __glibc_reserved4: c_ulong, + __glibc_reserved5: c_ulong, + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub __pad1: crate::dev_t, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + pub __pad2: c_int, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_int; 2], + } + + pub struct statfs { + pub f_type: c_long, + pub f_bsize: c_long, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_long, + pub f_frsize: c_long, + pub f_flags: c_long, + pub f_spare: [c_long; 4], + } + + pub struct statfs64 { + pub f_type: c_long, + pub f_bsize: c_long, + pub f_blocks: crate::fsblkcnt64_t, + pub f_bfree: crate::fsblkcnt64_t, + pub f_bavail: crate::fsblkcnt64_t, + pub f_files: crate::fsfilcnt64_t, + pub f_ffree: crate::fsfilcnt64_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_long, + pub f_frsize: c_long, + pub f_flags: c_long, + pub f_spare: [c_long; 4], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt64_t, + pub f_bfree: crate::fsblkcnt64_t, + pub f_bavail: crate::fsblkcnt64_t, + pub f_files: crate::fsfilcnt64_t, + pub f_ffree: crate::fsfilcnt64_t, + pub f_favail: crate::fsfilcnt64_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + pub __f_spare: [c_int; 6], + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + #[doc(hidden)] + #[deprecated( + since = "0.2.54", + note = "Please leave a comment on \ + https://github.com/rust-lang/libc/pull/1316 if you're using \ + this field" + )] + pub _pad: [c_int; 29], + _align: [u64; 0], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_ushort, + __pad1: c_ushort, + pub __seq: c_ushort, + __pad2: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused5: c_ulong, + __unused6: c_ulong, + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + } + + pub struct user_regs_struct { + pub pc: c_ulong, + pub ra: c_ulong, + pub sp: c_ulong, + pub gp: c_ulong, + pub tp: c_ulong, + pub t0: c_ulong, + pub t1: c_ulong, + pub t2: c_ulong, + pub s0: c_ulong, + pub s1: c_ulong, + pub a0: c_ulong, + pub a1: c_ulong, + pub a2: c_ulong, + pub a3: c_ulong, + pub a4: c_ulong, + pub a5: c_ulong, + pub a6: c_ulong, + pub a7: c_ulong, + pub s2: c_ulong, + pub s3: c_ulong, + pub s4: c_ulong, + pub s5: c_ulong, + pub s6: c_ulong, + pub s7: c_ulong, + pub s8: c_ulong, + pub s9: c_ulong, + pub s10: c_ulong, + pub s11: c_ulong, + pub t3: c_ulong, + pub t4: c_ulong, + pub t5: c_ulong, + pub t6: c_ulong, + } +} + +s_no_extra_traits! { + pub struct ucontext_t { + pub __uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_sigmask: crate::sigset_t, + pub uc_mcontext: mcontext_t, + } + + #[repr(align(16))] + pub struct mcontext_t { + pub __gregs: [c_ulong; 32], + pub __fpregs: __riscv_mc_fp_state, + } + + pub union __riscv_mc_fp_state { + pub __f: __riscv_mc_f_ext_state, + pub __d: __riscv_mc_d_ext_state, + pub __q: __riscv_mc_q_ext_state, + } + + pub struct __riscv_mc_f_ext_state { + pub __f: [c_uint; 32], + pub __fcsr: c_uint, + } + + pub struct __riscv_mc_d_ext_state { + pub __f: [c_ulonglong; 32], + pub __fcsr: c_uint, + } + + #[repr(align(16))] + pub struct __riscv_mc_q_ext_state { + pub __f: [c_ulonglong; 64], + pub __fcsr: c_uint, + pub __glibc_reserved: [c_uint; 3], + } +} + +pub const O_LARGEFILE: c_int = 0; +pub const VEOF: usize = 4; +pub const RTLD_DEEPBIND: c_int = 0x8; +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_FSYNC: c_int = 1052672; +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_GROWSDOWN: c_int = 256; +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const EHWPOISON: c_int = 133; +pub const ERFKILL: c_int = 132; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SA_SIGINFO: c_int = 4; +pub const SA_NOCLDWAIT: c_int = 2; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0; +pub const SIG_UNBLOCK: c_int = 1; +pub const POLLWRNORM: c_short = 256; +pub const POLLWRBAND: c_short = 512; +pub const O_ASYNC: c_int = 8192; +pub const O_NDELAY: c_int = 2048; +pub const EFD_NONBLOCK: c_int = 2048; +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETOWN: c_int = 8; +pub const SFD_NONBLOCK: c_int = 2048; +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; +pub const O_DIRECT: c_int = 16384; +pub const O_DIRECTORY: c_int = 65536; +pub const O_NOFOLLOW: c_int = 131072; +pub const MAP_HUGETLB: c_int = 262144; +pub const MAP_LOCKED: c_int = 8192; +pub const MAP_NORESERVE: c_int = 16384; +pub const MAP_ANON: c_int = 32; +pub const MAP_ANONYMOUS: c_int = 32; +pub const MAP_DENYWRITE: c_int = 2048; +pub const MAP_EXECUTABLE: c_int = 4096; +pub const MAP_POPULATE: c_int = 32768; +pub const MAP_NONBLOCK: c_int = 65536; +pub const MAP_STACK: c_int = 131072; +pub const MAP_SYNC: c_int = 0x080000; +pub const EDEADLOCK: c_int = 35; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const MCL_CURRENT: c_int = 1; +pub const MCL_FUTURE: c_int = 2; +pub const MCL_ONFAULT: c_int = 4; +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; +pub const CBAUD: crate::tcflag_t = 4111; +pub const TAB1: crate::tcflag_t = 2048; +pub const TAB2: crate::tcflag_t = 4096; +pub const TAB3: crate::tcflag_t = 6144; +pub const CR1: crate::tcflag_t = 512; +pub const CR2: crate::tcflag_t = 1024; +pub const CR3: crate::tcflag_t = 1536; +pub const FF1: crate::tcflag_t = 32768; +pub const BS1: crate::tcflag_t = 8192; +pub const VT1: crate::tcflag_t = 16384; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 1024; +pub const IXOFF: crate::tcflag_t = 4096; +pub const ONLCR: crate::tcflag_t = 4; +pub const CSIZE: crate::tcflag_t = 48; +pub const CS6: crate::tcflag_t = 16; +pub const CS7: crate::tcflag_t = 32; +pub const CS8: crate::tcflag_t = 48; +pub const CSTOPB: crate::tcflag_t = 64; +pub const CREAD: crate::tcflag_t = 128; +pub const PARENB: crate::tcflag_t = 256; +pub const PARODD: crate::tcflag_t = 512; +pub const HUPCL: crate::tcflag_t = 1024; +pub const CLOCAL: crate::tcflag_t = 2048; +pub const ECHOKE: crate::tcflag_t = 2048; +pub const ECHOE: crate::tcflag_t = 16; +pub const ECHOK: crate::tcflag_t = 32; +pub const ECHONL: crate::tcflag_t = 64; +pub const ECHOPRT: crate::tcflag_t = 1024; +pub const ECHOCTL: crate::tcflag_t = 512; +pub const ISIG: crate::tcflag_t = 1; +pub const ICANON: crate::tcflag_t = 2; +pub const PENDIN: crate::tcflag_t = 16384; +pub const NOFLSH: crate::tcflag_t = 128; +pub const CIBAUD: crate::tcflag_t = 269418496; +pub const CBAUDEX: crate::tcflag_t = 4096; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 2; +pub const NLDLY: crate::tcflag_t = 256; +pub const CRDLY: crate::tcflag_t = 1536; +pub const TABDLY: crate::tcflag_t = 6144; +pub const BSDLY: crate::tcflag_t = 8192; +pub const FFDLY: crate::tcflag_t = 32768; +pub const VTDLY: crate::tcflag_t = 16384; +pub const XTABS: crate::tcflag_t = 6144; +pub const B0: crate::speed_t = 0; +pub const B50: crate::speed_t = 1; +pub const B75: crate::speed_t = 2; +pub const B110: crate::speed_t = 3; +pub const B134: crate::speed_t = 4; +pub const B150: crate::speed_t = 5; +pub const B200: crate::speed_t = 6; +pub const B300: crate::speed_t = 7; +pub const B600: crate::speed_t = 8; +pub const B1200: crate::speed_t = 9; +pub const B1800: crate::speed_t = 10; +pub const B2400: crate::speed_t = 11; +pub const B4800: crate::speed_t = 12; +pub const B9600: crate::speed_t = 13; +pub const B19200: crate::speed_t = 14; +pub const B38400: crate::speed_t = 15; +pub const EXTA: crate::speed_t = 14; +pub const EXTB: crate::speed_t = 15; +pub const B57600: crate::speed_t = 4097; +pub const B115200: crate::speed_t = 4098; +pub const B230400: crate::speed_t = 4099; +pub const B460800: crate::speed_t = 4100; +pub const B500000: crate::speed_t = 4101; +pub const B576000: crate::speed_t = 4102; +pub const B921600: crate::speed_t = 4103; +pub const B1000000: crate::speed_t = 4104; +pub const B1152000: crate::speed_t = 4105; +pub const B1500000: crate::speed_t = 4106; +pub const B2000000: crate::speed_t = 4107; +pub const B2500000: crate::speed_t = 4108; +pub const B3000000: crate::speed_t = 4109; +pub const B3500000: crate::speed_t = 4110; +pub const B4000000: crate::speed_t = 4111; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 32768; +pub const TOSTOP: crate::tcflag_t = 256; +pub const FLUSHO: crate::tcflag_t = 4096; +pub const EXTPROC: crate::tcflag_t = 65536; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; +pub const NGREG: usize = 32; +pub const REG_PC: usize = 0; +pub const REG_RA: usize = 1; +pub const REG_SP: usize = 2; +pub const REG_TP: usize = 4; +pub const REG_S0: usize = 8; +pub const REG_S1: usize = 9; +pub const REG_A0: usize = 10; +pub const REG_S2: usize = 18; +pub const REG_NARGS: usize = 8; + +pub const SYS_read: c_long = 63; +pub const SYS_write: c_long = 64; +pub const SYS_close: c_long = 57; +pub const SYS_fstat: c_long = 80; +pub const SYS_lseek: c_long = 62; +pub const SYS_mmap: c_long = 222; +pub const SYS_mprotect: c_long = 226; +pub const SYS_munmap: c_long = 215; +pub const SYS_brk: c_long = 214; +pub const SYS_rt_sigaction: c_long = 134; +pub const SYS_rt_sigprocmask: c_long = 135; +pub const SYS_rt_sigreturn: c_long = 139; +pub const SYS_ioctl: c_long = 29; +pub const SYS_pread64: c_long = 67; +pub const SYS_pwrite64: c_long = 68; +pub const SYS_readv: c_long = 65; +pub const SYS_writev: c_long = 66; +pub const SYS_sched_yield: c_long = 124; +pub const SYS_mremap: c_long = 216; +pub const SYS_msync: c_long = 227; +pub const SYS_mincore: c_long = 232; +pub const SYS_madvise: c_long = 233; +pub const SYS_shmget: c_long = 194; +pub const SYS_shmat: c_long = 196; +pub const SYS_shmctl: c_long = 195; +pub const SYS_dup: c_long = 23; +pub const SYS_nanosleep: c_long = 101; +pub const SYS_getitimer: c_long = 102; +pub const SYS_setitimer: c_long = 103; +pub const SYS_getpid: c_long = 172; +pub const SYS_sendfile: c_long = 71; +pub const SYS_socket: c_long = 198; +pub const SYS_connect: c_long = 203; +pub const SYS_accept: c_long = 202; +pub const SYS_sendto: c_long = 206; +pub const SYS_recvfrom: c_long = 207; +pub const SYS_sendmsg: c_long = 211; +pub const SYS_recvmsg: c_long = 212; +pub const SYS_shutdown: c_long = 210; +pub const SYS_bind: c_long = 200; +pub const SYS_listen: c_long = 201; +pub const SYS_getsockname: c_long = 204; +pub const SYS_getpeername: c_long = 205; +pub const SYS_socketpair: c_long = 199; +pub const SYS_setsockopt: c_long = 208; +pub const SYS_getsockopt: c_long = 209; +pub const SYS_clone: c_long = 220; +pub const SYS_execve: c_long = 221; +pub const SYS_exit: c_long = 93; +pub const SYS_wait4: c_long = 260; +pub const SYS_kill: c_long = 129; +pub const SYS_uname: c_long = 160; +pub const SYS_semget: c_long = 190; +pub const SYS_semop: c_long = 193; +pub const SYS_semctl: c_long = 191; +pub const SYS_shmdt: c_long = 197; +pub const SYS_msgget: c_long = 186; +pub const SYS_msgsnd: c_long = 189; +pub const SYS_msgrcv: c_long = 188; +pub const SYS_msgctl: c_long = 187; +pub const SYS_fcntl: c_long = 25; +pub const SYS_flock: c_long = 32; +pub const SYS_fsync: c_long = 82; +pub const SYS_fdatasync: c_long = 83; +pub const SYS_truncate: c_long = 45; +pub const SYS_ftruncate: c_long = 46; +pub const SYS_getcwd: c_long = 17; +pub const SYS_chdir: c_long = 49; +pub const SYS_fchdir: c_long = 50; +pub const SYS_fchmod: c_long = 52; +pub const SYS_fchown: c_long = 55; +pub const SYS_umask: c_long = 166; +pub const SYS_gettimeofday: c_long = 169; +pub const SYS_getrlimit: c_long = 163; +pub const SYS_getrusage: c_long = 165; +pub const SYS_sysinfo: c_long = 179; +pub const SYS_times: c_long = 153; +pub const SYS_ptrace: c_long = 117; +pub const SYS_getuid: c_long = 174; +pub const SYS_syslog: c_long = 116; +pub const SYS_getgid: c_long = 176; +pub const SYS_setuid: c_long = 146; +pub const SYS_setgid: c_long = 144; +pub const SYS_geteuid: c_long = 175; +pub const SYS_getegid: c_long = 177; +pub const SYS_setpgid: c_long = 154; +pub const SYS_getppid: c_long = 173; +pub const SYS_setsid: c_long = 157; +pub const SYS_setreuid: c_long = 145; +pub const SYS_setregid: c_long = 143; +pub const SYS_getgroups: c_long = 158; +pub const SYS_setgroups: c_long = 159; +pub const SYS_setresuid: c_long = 147; +pub const SYS_getresuid: c_long = 148; +pub const SYS_setresgid: c_long = 149; +pub const SYS_getresgid: c_long = 150; +pub const SYS_getpgid: c_long = 155; +pub const SYS_setfsuid: c_long = 151; +pub const SYS_setfsgid: c_long = 152; +pub const SYS_getsid: c_long = 156; +pub const SYS_capget: c_long = 90; +pub const SYS_capset: c_long = 91; +pub const SYS_rt_sigpending: c_long = 136; +pub const SYS_rt_sigtimedwait: c_long = 137; +pub const SYS_rt_sigqueueinfo: c_long = 138; +pub const SYS_rt_sigsuspend: c_long = 133; +pub const SYS_sigaltstack: c_long = 132; +pub const SYS_personality: c_long = 92; +pub const SYS_statfs: c_long = 43; +pub const SYS_fstatfs: c_long = 44; +pub const SYS_getpriority: c_long = 141; +pub const SYS_setpriority: c_long = 140; +pub const SYS_sched_setparam: c_long = 118; +pub const SYS_sched_getparam: c_long = 121; +pub const SYS_sched_setscheduler: c_long = 119; +pub const SYS_sched_getscheduler: c_long = 120; +pub const SYS_sched_get_priority_max: c_long = 125; +pub const SYS_sched_get_priority_min: c_long = 126; +pub const SYS_sched_rr_get_interval: c_long = 127; +pub const SYS_mlock: c_long = 228; +pub const SYS_munlock: c_long = 229; +pub const SYS_mlockall: c_long = 230; +pub const SYS_munlockall: c_long = 231; +pub const SYS_vhangup: c_long = 58; +pub const SYS_pivot_root: c_long = 41; +pub const SYS_prctl: c_long = 167; +pub const SYS_adjtimex: c_long = 171; +pub const SYS_setrlimit: c_long = 164; +pub const SYS_chroot: c_long = 51; +pub const SYS_sync: c_long = 81; +pub const SYS_acct: c_long = 89; +pub const SYS_settimeofday: c_long = 170; +pub const SYS_mount: c_long = 40; +pub const SYS_umount2: c_long = 39; +pub const SYS_swapon: c_long = 224; +pub const SYS_swapoff: c_long = 225; +pub const SYS_reboot: c_long = 142; +pub const SYS_sethostname: c_long = 161; +pub const SYS_setdomainname: c_long = 162; +pub const SYS_init_module: c_long = 105; +pub const SYS_delete_module: c_long = 106; +pub const SYS_quotactl: c_long = 60; +pub const SYS_nfsservctl: c_long = 42; +pub const SYS_gettid: c_long = 178; +pub const SYS_readahead: c_long = 213; +pub const SYS_setxattr: c_long = 5; +pub const SYS_lsetxattr: c_long = 6; +pub const SYS_fsetxattr: c_long = 7; +pub const SYS_getxattr: c_long = 8; +pub const SYS_lgetxattr: c_long = 9; +pub const SYS_fgetxattr: c_long = 10; +pub const SYS_listxattr: c_long = 11; +pub const SYS_llistxattr: c_long = 12; +pub const SYS_flistxattr: c_long = 13; +pub const SYS_removexattr: c_long = 14; +pub const SYS_lremovexattr: c_long = 15; +pub const SYS_fremovexattr: c_long = 16; +pub const SYS_tkill: c_long = 130; +pub const SYS_futex: c_long = 98; +pub const SYS_sched_setaffinity: c_long = 122; +pub const SYS_sched_getaffinity: c_long = 123; +pub const SYS_io_setup: c_long = 0; +pub const SYS_io_destroy: c_long = 1; +pub const SYS_io_getevents: c_long = 4; +pub const SYS_io_submit: c_long = 2; +pub const SYS_io_cancel: c_long = 3; +pub const SYS_lookup_dcookie: c_long = 18; +pub const SYS_remap_file_pages: c_long = 234; +pub const SYS_getdents64: c_long = 61; +pub const SYS_set_tid_address: c_long = 96; +pub const SYS_restart_syscall: c_long = 128; +pub const SYS_semtimedop: c_long = 192; +pub const SYS_fadvise64: c_long = 223; +pub const SYS_timer_create: c_long = 107; +pub const SYS_timer_settime: c_long = 110; +pub const SYS_timer_gettime: c_long = 108; +pub const SYS_timer_getoverrun: c_long = 109; +pub const SYS_timer_delete: c_long = 111; +pub const SYS_clock_settime: c_long = 112; +pub const SYS_clock_gettime: c_long = 113; +pub const SYS_clock_getres: c_long = 114; +pub const SYS_clock_nanosleep: c_long = 115; +pub const SYS_exit_group: c_long = 94; +pub const SYS_epoll_ctl: c_long = 21; +pub const SYS_tgkill: c_long = 131; +pub const SYS_mbind: c_long = 235; +pub const SYS_set_mempolicy: c_long = 237; +pub const SYS_get_mempolicy: c_long = 236; +pub const SYS_mq_open: c_long = 180; +pub const SYS_mq_unlink: c_long = 181; +pub const SYS_mq_timedsend: c_long = 182; +pub const SYS_mq_timedreceive: c_long = 183; +pub const SYS_mq_notify: c_long = 184; +pub const SYS_mq_getsetattr: c_long = 185; +pub const SYS_kexec_load: c_long = 104; +pub const SYS_waitid: c_long = 95; +pub const SYS_add_key: c_long = 217; +pub const SYS_request_key: c_long = 218; +pub const SYS_keyctl: c_long = 219; +pub const SYS_ioprio_set: c_long = 30; +pub const SYS_ioprio_get: c_long = 31; +pub const SYS_inotify_add_watch: c_long = 27; +pub const SYS_inotify_rm_watch: c_long = 28; +pub const SYS_migrate_pages: c_long = 238; +pub const SYS_openat: c_long = 56; +pub const SYS_mkdirat: c_long = 34; +pub const SYS_mknodat: c_long = 33; +pub const SYS_fchownat: c_long = 54; +pub const SYS_newfstatat: c_long = 79; +pub const SYS_unlinkat: c_long = 35; +pub const SYS_linkat: c_long = 37; +pub const SYS_symlinkat: c_long = 36; +pub const SYS_readlinkat: c_long = 78; +pub const SYS_fchmodat: c_long = 53; +pub const SYS_faccessat: c_long = 48; +pub const SYS_pselect6: c_long = 72; +pub const SYS_ppoll: c_long = 73; +pub const SYS_unshare: c_long = 97; +pub const SYS_set_robust_list: c_long = 99; +pub const SYS_get_robust_list: c_long = 100; +pub const SYS_splice: c_long = 76; +pub const SYS_tee: c_long = 77; +pub const SYS_sync_file_range: c_long = 84; +pub const SYS_vmsplice: c_long = 75; +pub const SYS_move_pages: c_long = 239; +pub const SYS_utimensat: c_long = 88; +pub const SYS_epoll_pwait: c_long = 22; +pub const SYS_timerfd_create: c_long = 85; +pub const SYS_fallocate: c_long = 47; +pub const SYS_timerfd_settime: c_long = 86; +pub const SYS_timerfd_gettime: c_long = 87; +pub const SYS_accept4: c_long = 242; +pub const SYS_signalfd4: c_long = 74; +pub const SYS_eventfd2: c_long = 19; +pub const SYS_epoll_create1: c_long = 20; +pub const SYS_dup3: c_long = 24; +pub const SYS_pipe2: c_long = 59; +pub const SYS_inotify_init1: c_long = 26; +pub const SYS_preadv: c_long = 69; +pub const SYS_pwritev: c_long = 70; +pub const SYS_rt_tgsigqueueinfo: c_long = 240; +pub const SYS_perf_event_open: c_long = 241; +pub const SYS_recvmmsg: c_long = 243; +pub const SYS_fanotify_init: c_long = 262; +pub const SYS_fanotify_mark: c_long = 263; +pub const SYS_prlimit64: c_long = 261; +pub const SYS_name_to_handle_at: c_long = 264; +pub const SYS_open_by_handle_at: c_long = 265; +pub const SYS_clock_adjtime: c_long = 266; +pub const SYS_syncfs: c_long = 267; +pub const SYS_sendmmsg: c_long = 269; +pub const SYS_setns: c_long = 268; +pub const SYS_getcpu: c_long = 168; +pub const SYS_process_vm_readv: c_long = 270; +pub const SYS_process_vm_writev: c_long = 271; +pub const SYS_kcmp: c_long = 272; +pub const SYS_finit_module: c_long = 273; +pub const SYS_sched_setattr: c_long = 274; +pub const SYS_sched_getattr: c_long = 275; +pub const SYS_renameat2: c_long = 276; +pub const SYS_seccomp: c_long = 277; +pub const SYS_getrandom: c_long = 278; +pub const SYS_memfd_create: c_long = 279; +pub const SYS_bpf: c_long = 280; +pub const SYS_execveat: c_long = 281; +pub const SYS_userfaultfd: c_long = 282; +pub const SYS_membarrier: c_long = 283; +pub const SYS_mlock2: c_long = 284; +pub const SYS_copy_file_range: c_long = 285; +pub const SYS_preadv2: c_long = 286; +pub const SYS_pwritev2: c_long = 287; +pub const SYS_pkey_mprotect: c_long = 288; +pub const SYS_pkey_alloc: c_long = 289; +pub const SYS_pkey_free: c_long = 290; +pub const SYS_statx: c_long = 291; +pub const SYS_rseq: c_long = 293; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/sparc/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/sparc/mod.rs new file mode 100644 index 00000000000000..801f31e2c0e340 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b32/sparc/mod.rs @@ -0,0 +1,865 @@ +//! SPARC-specific definitions for 32-bit linux-like values + +use crate::prelude::*; +use crate::{off64_t, off_t}; + +pub type wchar_t = i32; + +s! { + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct statfs { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + f_spare: [crate::__fsword_t; 4], + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + _pad: [c_int; 29], + _align: [usize; 0], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + __reserved: c_short, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct stat { + pub st_dev: crate::dev_t, + #[cfg(not(gnu_file_offset_bits64))] + __pad1: c_ushort, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad2: c_ushort, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __glibc_reserved4: c_ulong, + __glibc_reserved5: c_ulong, + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad2: c_ushort, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __glibc_reserved4: c_ulong, + __glibc_reserved5: c_ulong, + } + + pub struct statfs64 { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::fsid_t, + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + pub f_spare: [crate::__fsword_t; 4], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + __f_unused: c_int, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + __pad1: c_ushort, + pub mode: c_ushort, + __pad2: c_ushort, + pub __seq: c_ushort, + __unused1: c_ulonglong, + __unused2: c_ulonglong, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + #[cfg(gnu_time_bits64)] + pub shm_segsz: size_t, + #[cfg(not(gnu_time_bits64))] + __pad1: c_uint, + pub shm_atime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __pad2: c_uint, + pub shm_dtime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __pad3: c_uint, + pub shm_ctime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + pub shm_segsz: size_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __reserved1: c_ulong, + __reserved2: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + #[cfg(not(gnu_time_bits64))] + __pad1: c_uint, + pub msg_stime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __pad2: c_uint, + pub msg_rtime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __pad3: c_uint, + pub msg_ctime: crate::time_t, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __glibc_reserved4: c_ulong, + __glibc_reserved5: c_ulong, + } +} + +s_no_extra_traits! { + #[repr(align(8))] + pub struct max_align_t { + priv_: [i64; 3], + } +} + +pub const VEOF: usize = 4; +pub const RTLD_DEEPBIND: c_int = 0x8; +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; + +pub const O_APPEND: c_int = 0x8; +pub const O_CREAT: c_int = 0x200; +pub const O_EXCL: c_int = 0x800; +pub const O_NOCTTY: c_int = 0x8000; +pub const O_NONBLOCK: c_int = 0x4000; +pub const O_SYNC: c_int = 0x802000; +pub const O_RSYNC: c_int = 0x802000; +pub const O_DSYNC: c_int = 0x2000; +pub const O_FSYNC: c_int = 0x802000; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_GROWSDOWN: c_int = 0x0200; +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_ANONYMOUS: c_int = 0x0020; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_SYNC: c_int = 0x080000; + +pub const EDEADLK: c_int = 78; +pub const ENAMETOOLONG: c_int = 63; +pub const ENOLCK: c_int = 79; +pub const ENOSYS: c_int = 90; +pub const ENOTEMPTY: c_int = 66; +pub const ELOOP: c_int = 62; +pub const ENOMSG: c_int = 75; +pub const EIDRM: c_int = 77; +pub const ECHRNG: c_int = 94; +pub const EL2NSYNC: c_int = 95; +pub const EL3HLT: c_int = 96; +pub const EL3RST: c_int = 97; +pub const ELNRNG: c_int = 98; +pub const EUNATCH: c_int = 99; +pub const ENOCSI: c_int = 100; +pub const EL2HLT: c_int = 101; +pub const EBADE: c_int = 102; +pub const EBADR: c_int = 103; +pub const EXFULL: c_int = 104; +pub const ENOANO: c_int = 105; +pub const EBADRQC: c_int = 106; +pub const EBADSLT: c_int = 107; +pub const EMULTIHOP: c_int = 87; +pub const EOVERFLOW: c_int = 92; +pub const ENOTUNIQ: c_int = 115; +pub const EBADFD: c_int = 93; +pub const EBADMSG: c_int = 76; +pub const EREMCHG: c_int = 89; +pub const ELIBACC: c_int = 114; +pub const ELIBBAD: c_int = 112; +pub const ELIBSCN: c_int = 124; +pub const ELIBMAX: c_int = 123; +pub const ELIBEXEC: c_int = 110; +pub const EILSEQ: c_int = 122; +pub const ERESTART: c_int = 116; +pub const ESTRPIPE: c_int = 91; +pub const EUSERS: c_int = 68; +pub const ENOTSOCK: c_int = 38; +pub const EDESTADDRREQ: c_int = 39; +pub const EMSGSIZE: c_int = 40; +pub const EPROTOTYPE: c_int = 41; +pub const ENOPROTOOPT: c_int = 42; +pub const EPROTONOSUPPORT: c_int = 43; +pub const ESOCKTNOSUPPORT: c_int = 44; +pub const EOPNOTSUPP: c_int = 45; +pub const EPFNOSUPPORT: c_int = 46; +pub const EAFNOSUPPORT: c_int = 47; +pub const EADDRINUSE: c_int = 48; +pub const EADDRNOTAVAIL: c_int = 49; +pub const ENETDOWN: c_int = 50; +pub const ENETUNREACH: c_int = 51; +pub const ENETRESET: c_int = 52; +pub const ECONNABORTED: c_int = 53; +pub const ECONNRESET: c_int = 54; +pub const ENOBUFS: c_int = 55; +pub const EISCONN: c_int = 56; +pub const ENOTCONN: c_int = 57; +pub const ESHUTDOWN: c_int = 58; +pub const ETOOMANYREFS: c_int = 59; +pub const ETIMEDOUT: c_int = 60; +pub const ECONNREFUSED: c_int = 61; +pub const EHOSTDOWN: c_int = 64; +pub const EHOSTUNREACH: c_int = 65; +pub const EALREADY: c_int = 37; +pub const EINPROGRESS: c_int = 36; +pub const ESTALE: c_int = 70; +pub const EDQUOT: c_int = 69; +pub const ENOMEDIUM: c_int = 125; +pub const EMEDIUMTYPE: c_int = 126; +pub const ECANCELED: c_int = 127; +pub const ENOKEY: c_int = 128; +pub const EKEYEXPIRED: c_int = 129; +pub const EKEYREVOKED: c_int = 130; +pub const EKEYREJECTED: c_int = 131; +pub const EOWNERDEAD: c_int = 132; +pub const ENOTRECOVERABLE: c_int = 133; +pub const EHWPOISON: c_int = 135; +pub const ERFKILL: c_int = 134; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const SA_SIGINFO: c_int = 0x200; +pub const SA_NOCLDWAIT: c_int = 0x100; + +pub const SIGEMT: c_int = 7; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGCHLD: c_int = 20; +pub const SIGBUS: c_int = 10; +pub const SIGUSR1: c_int = 30; +pub const SIGUSR2: c_int = 31; +pub const SIGCONT: c_int = 19; +pub const SIGSTOP: c_int = 17; +pub const SIGTSTP: c_int = 18; +pub const SIGURG: c_int = 16; +pub const SIGIO: c_int = 23; +pub const SIGSYS: c_int = 12; +pub const SIGPOLL: c_int = 23; +pub const SIGPWR: c_int = 29; +pub const SIG_SETMASK: c_int = 4; +pub const SIG_BLOCK: c_int = 1; +pub const SIG_UNBLOCK: c_int = 2; + +pub const POLLWRNORM: c_short = 4; +pub const POLLWRBAND: c_short = 0x100; + +pub const O_ASYNC: c_int = 0x40; +pub const O_NDELAY: c_int = 0x4004; + +pub const EFD_NONBLOCK: c_int = 0x4000; + +pub const F_GETLK: c_int = 7; +pub const F_GETOWN: c_int = 5; +pub const F_SETOWN: c_int = 6; + +pub const SFD_NONBLOCK: c_int = 0x4000; + +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +pub const O_DIRECTORY: c_int = 0o200000; +pub const O_NOFOLLOW: c_int = 0o400000; +pub const O_LARGEFILE: c_int = 0x40000; +pub const O_DIRECT: c_int = 0x100000; + +pub const MAP_LOCKED: c_int = 0x0100; +pub const MAP_NORESERVE: c_int = 0x00040; + +pub const EDEADLOCK: c_int = 108; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; + +pub const MCL_CURRENT: c_int = 0x2000; +pub const MCL_FUTURE: c_int = 0x4000; +pub const MCL_ONFAULT: c_int = 0x8000; + +pub const SIGSTKSZ: size_t = 16384; +pub const MINSIGSTKSZ: size_t = 4096; +pub const CBAUD: crate::tcflag_t = 0x0000100f; +pub const TAB1: crate::tcflag_t = 0x800; +pub const TAB2: crate::tcflag_t = 0x1000; +pub const TAB3: crate::tcflag_t = 0x1800; +pub const CR1: crate::tcflag_t = 0x200; +pub const CR2: crate::tcflag_t = 0x400; +pub const CR3: crate::tcflag_t = 0x600; +pub const FF1: crate::tcflag_t = 0x8000; +pub const BS1: crate::tcflag_t = 0x2000; +pub const VT1: crate::tcflag_t = 0x4000; +pub const VWERASE: usize = 0xe; +pub const VREPRINT: usize = 0xc; +pub const VSUSP: usize = 0xa; +pub const VSTART: usize = 0x8; +pub const VSTOP: usize = 0x9; +pub const VDISCARD: usize = 0xd; +pub const VTIME: usize = 0x5; +pub const IXON: crate::tcflag_t = 0x400; +pub const IXOFF: crate::tcflag_t = 0x1000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x30; +pub const CS6: crate::tcflag_t = 0x10; +pub const CS7: crate::tcflag_t = 0x20; +pub const CS8: crate::tcflag_t = 0x30; +pub const CSTOPB: crate::tcflag_t = 0x40; +pub const CREAD: crate::tcflag_t = 0x80; +pub const PARENB: crate::tcflag_t = 0x100; +pub const PARODD: crate::tcflag_t = 0x200; +pub const HUPCL: crate::tcflag_t = 0x400; +pub const CLOCAL: crate::tcflag_t = 0x800; +pub const ECHOKE: crate::tcflag_t = 0x800; +pub const ECHOE: crate::tcflag_t = 0x10; +pub const ECHOK: crate::tcflag_t = 0x20; +pub const ECHONL: crate::tcflag_t = 0x40; +pub const ECHOPRT: crate::tcflag_t = 0x400; +pub const ECHOCTL: crate::tcflag_t = 0x200; +pub const ISIG: crate::tcflag_t = 0x1; +pub const ICANON: crate::tcflag_t = 0x2; +pub const PENDIN: crate::tcflag_t = 0x4000; +pub const NOFLSH: crate::tcflag_t = 0x80; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0x00001000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const B57600: crate::speed_t = 0x1001; +pub const B115200: crate::speed_t = 0x1002; +pub const B230400: crate::speed_t = 0x1003; +pub const B460800: crate::speed_t = 0x1004; +pub const B76800: crate::speed_t = 0x1005; +pub const B153600: crate::speed_t = 0x1006; +pub const B307200: crate::speed_t = 0x1007; +pub const B614400: crate::speed_t = 0x1008; +pub const B921600: crate::speed_t = 0x1009; +pub const B500000: crate::speed_t = 0x100a; +pub const B576000: crate::speed_t = 0x100b; +pub const B1000000: crate::speed_t = 0x100c; +pub const B1152000: crate::speed_t = 0x100d; +pub const B1500000: crate::speed_t = 0x100e; +pub const B2000000: crate::speed_t = 0x100f; + +pub const VEOL: usize = 5; +pub const VEOL2: usize = 6; +pub const VMIN: usize = 4; +pub const IEXTEN: crate::tcflag_t = 0x8000; +pub const TOSTOP: crate::tcflag_t = 0x100; +pub const FLUSHO: crate::tcflag_t = 0x1000; +pub const EXTPROC: crate::tcflag_t = 0x10000; + +pub const SYS_restart_syscall: c_long = 0; +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_wait4: c_long = 7; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execv: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_chown: c_long = 13; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_lchown: c_long = 16; +pub const SYS_brk: c_long = 17; +pub const SYS_perfctr: c_long = 18; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_capget: c_long = 21; +pub const SYS_capset: c_long = 22; +pub const SYS_setuid: c_long = 23; +pub const SYS_getuid: c_long = 24; +pub const SYS_vmsplice: c_long = 25; +pub const SYS_ptrace: c_long = 26; +pub const SYS_alarm: c_long = 27; +pub const SYS_sigaltstack: c_long = 28; +pub const SYS_pause: c_long = 29; +pub const SYS_utime: c_long = 30; +pub const SYS_lchown32: c_long = 31; +pub const SYS_fchown32: c_long = 32; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_chown32: c_long = 35; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_stat: c_long = 38; +pub const SYS_sendfile: c_long = 39; +pub const SYS_lstat: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_getuid32: c_long = 44; +pub const SYS_umount2: c_long = 45; +pub const SYS_setgid: c_long = 46; +pub const SYS_getgid: c_long = 47; +pub const SYS_signal: c_long = 48; +pub const SYS_geteuid: c_long = 49; +pub const SYS_getegid: c_long = 50; +pub const SYS_acct: c_long = 51; +pub const SYS_getgid32: c_long = 53; +pub const SYS_ioctl: c_long = 54; +pub const SYS_reboot: c_long = 55; +pub const SYS_mmap2: c_long = 56; +pub const SYS_symlink: c_long = 57; +pub const SYS_readlink: c_long = 58; +pub const SYS_execve: c_long = 59; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_fstat: c_long = 62; +pub const SYS_fstat64: c_long = 63; +pub const SYS_getpagesize: c_long = 64; +pub const SYS_msync: c_long = 65; +pub const SYS_vfork: c_long = 66; +pub const SYS_pread64: c_long = 67; +pub const SYS_pwrite64: c_long = 68; +pub const SYS_geteuid32: c_long = 69; +pub const SYS_getegid32: c_long = 70; +pub const SYS_mmap: c_long = 71; +pub const SYS_setreuid32: c_long = 72; +pub const SYS_munmap: c_long = 73; +pub const SYS_mprotect: c_long = 74; +pub const SYS_madvise: c_long = 75; +pub const SYS_vhangup: c_long = 76; +pub const SYS_truncate64: c_long = 77; +pub const SYS_mincore: c_long = 78; +pub const SYS_getgroups: c_long = 79; +pub const SYS_setgroups: c_long = 80; +pub const SYS_getpgrp: c_long = 81; +pub const SYS_setgroups32: c_long = 82; +pub const SYS_setitimer: c_long = 83; +pub const SYS_ftruncate64: c_long = 84; +pub const SYS_swapon: c_long = 85; +pub const SYS_getitimer: c_long = 86; +pub const SYS_setuid32: c_long = 87; +pub const SYS_sethostname: c_long = 88; +pub const SYS_setgid32: c_long = 89; +pub const SYS_dup2: c_long = 90; +pub const SYS_setfsuid32: c_long = 91; +pub const SYS_fcntl: c_long = 92; +pub const SYS_select: c_long = 93; +pub const SYS_setfsgid32: c_long = 94; +pub const SYS_fsync: c_long = 95; +pub const SYS_setpriority: c_long = 96; +pub const SYS_socket: c_long = 97; +pub const SYS_connect: c_long = 98; +pub const SYS_accept: c_long = 99; +pub const SYS_getpriority: c_long = 100; +pub const SYS_rt_sigreturn: c_long = 101; +pub const SYS_rt_sigaction: c_long = 102; +pub const SYS_rt_sigprocmask: c_long = 103; +pub const SYS_rt_sigpending: c_long = 104; +pub const SYS_rt_sigtimedwait: c_long = 105; +pub const SYS_rt_sigqueueinfo: c_long = 106; +pub const SYS_rt_sigsuspend: c_long = 107; +pub const SYS_setresuid32: c_long = 108; +pub const SYS_getresuid32: c_long = 109; +pub const SYS_setresgid32: c_long = 110; +pub const SYS_getresgid32: c_long = 111; +pub const SYS_setregid32: c_long = 112; +pub const SYS_recvmsg: c_long = 113; +pub const SYS_sendmsg: c_long = 114; +pub const SYS_getgroups32: c_long = 115; +pub const SYS_gettimeofday: c_long = 116; +pub const SYS_getrusage: c_long = 117; +pub const SYS_getsockopt: c_long = 118; +pub const SYS_getcwd: c_long = 119; +pub const SYS_readv: c_long = 120; +pub const SYS_writev: c_long = 121; +pub const SYS_settimeofday: c_long = 122; +pub const SYS_fchown: c_long = 123; +pub const SYS_fchmod: c_long = 124; +pub const SYS_recvfrom: c_long = 125; +pub const SYS_setreuid: c_long = 126; +pub const SYS_setregid: c_long = 127; +pub const SYS_rename: c_long = 128; +pub const SYS_truncate: c_long = 129; +pub const SYS_ftruncate: c_long = 130; +pub const SYS_flock: c_long = 131; +pub const SYS_lstat64: c_long = 132; +pub const SYS_sendto: c_long = 133; +pub const SYS_shutdown: c_long = 134; +pub const SYS_socketpair: c_long = 135; +pub const SYS_mkdir: c_long = 136; +pub const SYS_rmdir: c_long = 137; +pub const SYS_utimes: c_long = 138; +pub const SYS_stat64: c_long = 139; +pub const SYS_sendfile64: c_long = 140; +pub const SYS_getpeername: c_long = 141; +pub const SYS_futex: c_long = 142; +pub const SYS_gettid: c_long = 143; +pub const SYS_getrlimit: c_long = 144; +pub const SYS_setrlimit: c_long = 145; +pub const SYS_pivot_root: c_long = 146; +pub const SYS_prctl: c_long = 147; +pub const SYS_pciconfig_read: c_long = 148; +pub const SYS_pciconfig_write: c_long = 149; +pub const SYS_getsockname: c_long = 150; +pub const SYS_inotify_init: c_long = 151; +pub const SYS_inotify_add_watch: c_long = 152; +pub const SYS_poll: c_long = 153; +pub const SYS_getdents64: c_long = 154; +pub const SYS_fcntl64: c_long = 155; +pub const SYS_inotify_rm_watch: c_long = 156; +pub const SYS_statfs: c_long = 157; +pub const SYS_fstatfs: c_long = 158; +pub const SYS_umount: c_long = 159; +pub const SYS_sched_set_affinity: c_long = 160; +pub const SYS_sched_get_affinity: c_long = 161; +pub const SYS_getdomainname: c_long = 162; +pub const SYS_setdomainname: c_long = 163; +pub const SYS_quotactl: c_long = 165; +pub const SYS_set_tid_address: c_long = 166; +pub const SYS_mount: c_long = 167; +pub const SYS_ustat: c_long = 168; +pub const SYS_setxattr: c_long = 169; +pub const SYS_lsetxattr: c_long = 170; +pub const SYS_fsetxattr: c_long = 171; +pub const SYS_getxattr: c_long = 172; +pub const SYS_lgetxattr: c_long = 173; +pub const SYS_getdents: c_long = 174; +pub const SYS_setsid: c_long = 175; +pub const SYS_fchdir: c_long = 176; +pub const SYS_fgetxattr: c_long = 177; +pub const SYS_listxattr: c_long = 178; +pub const SYS_llistxattr: c_long = 179; +pub const SYS_flistxattr: c_long = 180; +pub const SYS_removexattr: c_long = 181; +pub const SYS_lremovexattr: c_long = 182; +pub const SYS_sigpending: c_long = 183; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 184; +pub const SYS_setpgid: c_long = 185; +pub const SYS_fremovexattr: c_long = 186; +pub const SYS_tkill: c_long = 187; +pub const SYS_exit_group: c_long = 188; +pub const SYS_uname: c_long = 189; +pub const SYS_init_module: c_long = 190; +pub const SYS_personality: c_long = 191; +pub const SYS_remap_file_pages: c_long = 192; +pub const SYS_epoll_create: c_long = 193; +pub const SYS_epoll_ctl: c_long = 194; +pub const SYS_epoll_wait: c_long = 195; +pub const SYS_ioprio_set: c_long = 196; +pub const SYS_getppid: c_long = 197; +pub const SYS_sigaction: c_long = 198; +pub const SYS_sgetmask: c_long = 199; +pub const SYS_ssetmask: c_long = 200; +pub const SYS_sigsuspend: c_long = 201; +pub const SYS_oldlstat: c_long = 202; +pub const SYS_uselib: c_long = 203; +pub const SYS_readdir: c_long = 204; +pub const SYS_readahead: c_long = 205; +pub const SYS_socketcall: c_long = 206; +pub const SYS_syslog: c_long = 207; +pub const SYS_lookup_dcookie: c_long = 208; +pub const SYS_fadvise64: c_long = 209; +pub const SYS_fadvise64_64: c_long = 210; +pub const SYS_tgkill: c_long = 211; +pub const SYS_waitpid: c_long = 212; +pub const SYS_swapoff: c_long = 213; +pub const SYS_sysinfo: c_long = 214; +pub const SYS_ipc: c_long = 215; +pub const SYS_sigreturn: c_long = 216; +pub const SYS_clone: c_long = 217; +pub const SYS_ioprio_get: c_long = 218; +pub const SYS_adjtimex: c_long = 219; +pub const SYS_sigprocmask: c_long = 220; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 221; +pub const SYS_delete_module: c_long = 222; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 223; +pub const SYS_getpgid: c_long = 224; +pub const SYS_bdflush: c_long = 225; +pub const SYS_sysfs: c_long = 226; +pub const SYS_afs_syscall: c_long = 227; +pub const SYS_setfsuid: c_long = 228; +pub const SYS_setfsgid: c_long = 229; +pub const SYS__newselect: c_long = 230; +pub const SYS_time: c_long = 231; +pub const SYS_splice: c_long = 232; +pub const SYS_stime: c_long = 233; +pub const SYS_statfs64: c_long = 234; +pub const SYS_fstatfs64: c_long = 235; +pub const SYS__llseek: c_long = 236; +pub const SYS_mlock: c_long = 237; +pub const SYS_munlock: c_long = 238; +pub const SYS_mlockall: c_long = 239; +pub const SYS_munlockall: c_long = 240; +pub const SYS_sched_setparam: c_long = 241; +pub const SYS_sched_getparam: c_long = 242; +pub const SYS_sched_setscheduler: c_long = 243; +pub const SYS_sched_getscheduler: c_long = 244; +pub const SYS_sched_yield: c_long = 245; +pub const SYS_sched_get_priority_max: c_long = 246; +pub const SYS_sched_get_priority_min: c_long = 247; +pub const SYS_sched_rr_get_interval: c_long = 248; +pub const SYS_nanosleep: c_long = 249; +pub const SYS_mremap: c_long = 250; +pub const SYS__sysctl: c_long = 251; +pub const SYS_getsid: c_long = 252; +pub const SYS_fdatasync: c_long = 253; +pub const SYS_nfsservctl: c_long = 254; +pub const SYS_sync_file_range: c_long = 255; +pub const SYS_clock_settime: c_long = 256; +pub const SYS_clock_gettime: c_long = 257; +pub const SYS_clock_getres: c_long = 258; +pub const SYS_clock_nanosleep: c_long = 259; +pub const SYS_sched_getaffinity: c_long = 260; +pub const SYS_sched_setaffinity: c_long = 261; +pub const SYS_timer_settime: c_long = 262; +pub const SYS_timer_gettime: c_long = 263; +pub const SYS_timer_getoverrun: c_long = 264; +pub const SYS_timer_delete: c_long = 265; +pub const SYS_timer_create: c_long = 266; +pub const SYS_io_setup: c_long = 268; +pub const SYS_io_destroy: c_long = 269; +pub const SYS_io_submit: c_long = 270; +pub const SYS_io_cancel: c_long = 271; +pub const SYS_io_getevents: c_long = 272; +pub const SYS_mq_open: c_long = 273; +pub const SYS_mq_unlink: c_long = 274; +pub const SYS_mq_timedsend: c_long = 275; +pub const SYS_mq_timedreceive: c_long = 276; +pub const SYS_mq_notify: c_long = 277; +pub const SYS_mq_getsetattr: c_long = 278; +pub const SYS_waitid: c_long = 279; +pub const SYS_tee: c_long = 280; +pub const SYS_add_key: c_long = 281; +pub const SYS_request_key: c_long = 282; +pub const SYS_keyctl: c_long = 283; +pub const SYS_openat: c_long = 284; +pub const SYS_mkdirat: c_long = 285; +pub const SYS_mknodat: c_long = 286; +pub const SYS_fchownat: c_long = 287; +pub const SYS_futimesat: c_long = 288; +pub const SYS_fstatat64: c_long = 289; +pub const SYS_unlinkat: c_long = 290; +pub const SYS_renameat: c_long = 291; +pub const SYS_linkat: c_long = 292; +pub const SYS_symlinkat: c_long = 293; +pub const SYS_readlinkat: c_long = 294; +pub const SYS_fchmodat: c_long = 295; +pub const SYS_faccessat: c_long = 296; +pub const SYS_pselect6: c_long = 297; +pub const SYS_ppoll: c_long = 298; +pub const SYS_unshare: c_long = 299; +pub const SYS_set_robust_list: c_long = 300; +pub const SYS_get_robust_list: c_long = 301; +pub const SYS_migrate_pages: c_long = 302; +pub const SYS_mbind: c_long = 303; +pub const SYS_get_mempolicy: c_long = 304; +pub const SYS_set_mempolicy: c_long = 305; +pub const SYS_kexec_load: c_long = 306; +pub const SYS_move_pages: c_long = 307; +pub const SYS_getcpu: c_long = 308; +pub const SYS_epoll_pwait: c_long = 309; +pub const SYS_utimensat: c_long = 310; +pub const SYS_signalfd: c_long = 311; +pub const SYS_timerfd_create: c_long = 312; +pub const SYS_eventfd: c_long = 313; +pub const SYS_fallocate: c_long = 314; +pub const SYS_timerfd_settime: c_long = 315; +pub const SYS_timerfd_gettime: c_long = 316; +pub const SYS_signalfd4: c_long = 317; +pub const SYS_eventfd2: c_long = 318; +pub const SYS_epoll_create1: c_long = 319; +pub const SYS_dup3: c_long = 320; +pub const SYS_pipe2: c_long = 321; +pub const SYS_inotify_init1: c_long = 322; +pub const SYS_accept4: c_long = 323; +pub const SYS_preadv: c_long = 324; +pub const SYS_pwritev: c_long = 325; +pub const SYS_rt_tgsigqueueinfo: c_long = 326; +pub const SYS_perf_event_open: c_long = 327; +pub const SYS_recvmmsg: c_long = 328; +pub const SYS_fanotify_init: c_long = 329; +pub const SYS_fanotify_mark: c_long = 330; +pub const SYS_prlimit64: c_long = 331; +pub const SYS_name_to_handle_at: c_long = 332; +pub const SYS_open_by_handle_at: c_long = 333; +pub const SYS_clock_adjtime: c_long = 334; +pub const SYS_syncfs: c_long = 335; +pub const SYS_sendmmsg: c_long = 336; +pub const SYS_setns: c_long = 337; +pub const SYS_process_vm_readv: c_long = 338; +pub const SYS_process_vm_writev: c_long = 339; +pub const SYS_kern_features: c_long = 340; +pub const SYS_kcmp: c_long = 341; +pub const SYS_finit_module: c_long = 342; +pub const SYS_sched_setattr: c_long = 343; +pub const SYS_sched_getattr: c_long = 344; +pub const SYS_renameat2: c_long = 345; +pub const SYS_seccomp: c_long = 346; +pub const SYS_getrandom: c_long = 347; +pub const SYS_memfd_create: c_long = 348; +pub const SYS_bpf: c_long = 349; +pub const SYS_execveat: c_long = 350; +pub const SYS_membarrier: c_long = 351; +pub const SYS_userfaultfd: c_long = 352; +pub const SYS_bind: c_long = 353; +pub const SYS_listen: c_long = 354; +pub const SYS_setsockopt: c_long = 355; +pub const SYS_mlock2: c_long = 356; +pub const SYS_copy_file_range: c_long = 357; +pub const SYS_preadv2: c_long = 358; +pub const SYS_pwritev2: c_long = 359; +pub const SYS_statx: c_long = 360; +pub const SYS_rseq: c_long = 365; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +// Reserved in the kernel, but not actually implemented yet +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/x86/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/x86/mod.rs new file mode 100644 index 00000000000000..5f0dfe90adf818 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b32/x86/mod.rs @@ -0,0 +1,1098 @@ +use crate::prelude::*; +use crate::{off64_t, off_t}; + +pub type wchar_t = i32; +pub type greg_t = i32; + +s! { + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct statfs { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + f_spare: [crate::__fsword_t; 4], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + } + + pub struct _libc_fpreg { + pub significand: [u16; 4], + pub exponent: u16, + } + + pub struct _libc_fpstate { + pub cw: c_ulong, + pub sw: c_ulong, + pub tag: c_ulong, + pub ipoff: c_ulong, + pub cssel: c_ulong, + pub dataoff: c_ulong, + pub datasel: c_ulong, + pub _st: [_libc_fpreg; 8], + pub status: c_ulong, + } + + pub struct user_fpregs_struct { + pub cwd: c_long, + pub swd: c_long, + pub twd: c_long, + pub fip: c_long, + pub fcs: c_long, + pub foo: c_long, + pub fos: c_long, + pub st_space: [c_long; 20], + } + + pub struct user_regs_struct { + pub ebx: c_long, + pub ecx: c_long, + pub edx: c_long, + pub esi: c_long, + pub edi: c_long, + pub ebp: c_long, + pub eax: c_long, + pub xds: c_long, + pub xes: c_long, + pub xfs: c_long, + pub xgs: c_long, + pub orig_eax: c_long, + pub eip: c_long, + pub xcs: c_long, + pub eflags: c_long, + pub esp: c_long, + pub xss: c_long, + } + + pub struct user { + pub regs: user_regs_struct, + pub u_fpvalid: c_int, + pub i387: user_fpregs_struct, + pub u_tsize: c_ulong, + pub u_dsize: c_ulong, + pub u_ssize: c_ulong, + pub start_code: c_ulong, + pub start_stack: c_ulong, + pub signal: c_long, + __reserved: c_int, + pub u_ar0: *mut user_regs_struct, + pub u_fpstate: *mut user_fpregs_struct, + pub magic: c_ulong, + pub u_comm: [c_char; 32], + pub u_debugreg: [c_int; 8], + } + + pub struct mcontext_t { + pub gregs: [greg_t; 19], + pub fpregs: *mut _libc_fpstate, + pub oldmask: c_ulong, + pub cr2: c_ulong, + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_ushort, + __pad1: c_ushort, + pub __seq: c_ushort, + __pad2: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + #[cfg(not(gnu_time_bits64))] + __pad1: c_uint, + #[cfg(not(gnu_time_bits64))] + __st_ino: c_ulong, + #[cfg(gnu_time_bits64)] + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + #[cfg(not(gnu_time_bits64))] + __pad2: c_uint, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + #[cfg(gnu_time_bits64)] + _atime_pad: c_int, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + #[cfg(gnu_time_bits64)] + _mtime_pad: c_int, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + #[cfg(gnu_time_bits64)] + _ctime_pad: c_int, + #[cfg(not(gnu_time_bits64))] + pub st_ino: crate::ino64_t, + } + + pub struct statfs64 { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::fsid_t, + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + pub f_spare: [crate::__fsword_t; 4], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + __f_unused: c_int, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __unused1: c_ulong, + pub shm_dtime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __unused2: c_ulong, + pub shm_ctime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __unused3: c_ulong, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused4: c_ulong, + __unused5: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved1: c_ulong, + pub msg_rtime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved2: c_ulong, + pub msg_ctime: crate::time_t, + #[cfg(not(gnu_time_bits64))] + __glibc_reserved3: c_ulong, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __glibc_reserved4: c_ulong, + __glibc_reserved5: c_ulong, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + #[doc(hidden)] + #[deprecated( + since = "0.2.54", + note = "Please leave a comment on \ + https://github.com/rust-lang/libc/pull/1316 if you're using \ + this field" + )] + pub _pad: [c_int; 29], + _align: [usize; 0], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } +} + +s_no_extra_traits! { + pub struct user_fpxregs_struct { + pub cwd: c_ushort, + pub swd: c_ushort, + pub twd: c_ushort, + pub fop: c_ushort, + pub fip: c_long, + pub fcs: c_long, + pub foo: c_long, + pub fos: c_long, + pub mxcsr: c_long, + __reserved: c_long, + pub st_space: [c_long; 32], + pub xmm_space: [c_long; 32], + padding: [c_long; 56], + } + + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_mcontext: mcontext_t, + pub uc_sigmask: crate::sigset_t, + __private: [u8; 112], + __ssp: [c_ulong; 4], + } + + #[repr(align(16))] + pub struct max_align_t { + priv_: [f64; 6], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for user_fpxregs_struct { + fn eq(&self, other: &user_fpxregs_struct) -> bool { + self.cwd == other.cwd + && self.swd == other.swd + && self.twd == other.twd + && self.fop == other.fop + && self.fip == other.fip + && self.fcs == other.fcs + && self.foo == other.foo + && self.fos == other.fos + && self.mxcsr == other.mxcsr + // Ignore __reserved field + && self.st_space == other.st_space + && self.xmm_space == other.xmm_space + // Ignore padding field + } + } + + impl Eq for user_fpxregs_struct {} + + impl hash::Hash for user_fpxregs_struct { + fn hash(&self, state: &mut H) { + self.cwd.hash(state); + self.swd.hash(state); + self.twd.hash(state); + self.fop.hash(state); + self.fip.hash(state); + self.fcs.hash(state); + self.foo.hash(state); + self.fos.hash(state); + self.mxcsr.hash(state); + // Ignore __reserved field + self.st_space.hash(state); + self.xmm_space.hash(state); + // Ignore padding field + } + } + + impl PartialEq for ucontext_t { + fn eq(&self, other: &ucontext_t) -> bool { + self.uc_flags == other.uc_flags + && self.uc_link == other.uc_link + && self.uc_stack == other.uc_stack + && self.uc_mcontext == other.uc_mcontext + && self.uc_sigmask == other.uc_sigmask + // Ignore __private field + } + } + + impl Eq for ucontext_t {} + + impl hash::Hash for ucontext_t { + fn hash(&self, state: &mut H) { + self.uc_flags.hash(state); + self.uc_link.hash(state); + self.uc_stack.hash(state); + self.uc_mcontext.hash(state); + self.uc_sigmask.hash(state); + // Ignore __private field + } + } + } +} + +pub const VEOF: usize = 4; +pub const RTLD_DEEPBIND: c_int = 0x8; +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; +pub const O_DIRECT: c_int = 0x4000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_NOFOLLOW: c_int = 0x20000; +pub const O_LARGEFILE: c_int = 0o0100000; +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_FSYNC: c_int = 0x101000; +pub const O_ASYNC: c_int = 0x2000; +pub const O_NDELAY: c_int = 0x800; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_32BIT: c_int = 0x0040; +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_ANONYMOUS: c_int = 0x0020; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_SYNC: c_int = 0x080000; + +pub const EDEADLOCK: c_int = 35; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const EHWPOISON: c_int = 133; +pub const ERFKILL: c_int = 132; + +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +cfg_if! { + if #[cfg(gnu_file_offset_bits64)] { + pub const F_GETLK: c_int = 12; + } else { + pub const F_GETLK: c_int = 5; + } +} +pub const F_GETOWN: c_int = 9; +pub const F_SETOWN: c_int = 8; + +pub const PTRACE_GETFPXREGS: c_uint = 18; +pub const PTRACE_SETFPXREGS: c_uint = 19; +pub const PTRACE_SYSEMU: c_uint = 31; +pub const PTRACE_SYSEMU_SINGLESTEP: c_uint = 32; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const EFD_NONBLOCK: c_int = 0x800; +pub const SFD_NONBLOCK: c_int = 0x0800; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] +pub const SIGUNUSED: c_int = 31; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: crate::tcflag_t = 0x00000800; +pub const TAB2: crate::tcflag_t = 0x00001000; +pub const TAB3: crate::tcflag_t = 0x00001800; +pub const CR1: crate::tcflag_t = 0x00000200; +pub const CR2: crate::tcflag_t = 0x00000400; +pub const CR3: crate::tcflag_t = 0x00000600; +pub const FF1: crate::tcflag_t = 0x00008000; +pub const BS1: crate::tcflag_t = 0x00002000; +pub const VT1: crate::tcflag_t = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; +pub const EXTPROC: crate::tcflag_t = 0x00010000; + +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +// Syscall table +pub const SYS_restart_syscall: c_long = 0; +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_waitpid: c_long = 7; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execve: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_time: c_long = 13; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_lchown: c_long = 16; +pub const SYS_break: c_long = 17; +pub const SYS_oldstat: c_long = 18; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_mount: c_long = 21; +pub const SYS_umount: c_long = 22; +pub const SYS_setuid: c_long = 23; +pub const SYS_getuid: c_long = 24; +pub const SYS_stime: c_long = 25; +pub const SYS_ptrace: c_long = 26; +pub const SYS_alarm: c_long = 27; +pub const SYS_oldfstat: c_long = 28; +pub const SYS_pause: c_long = 29; +pub const SYS_utime: c_long = 30; +pub const SYS_stty: c_long = 31; +pub const SYS_gtty: c_long = 32; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_ftime: c_long = 35; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_rename: c_long = 38; +pub const SYS_mkdir: c_long = 39; +pub const SYS_rmdir: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_prof: c_long = 44; +pub const SYS_brk: c_long = 45; +pub const SYS_setgid: c_long = 46; +pub const SYS_getgid: c_long = 47; +pub const SYS_signal: c_long = 48; +pub const SYS_geteuid: c_long = 49; +pub const SYS_getegid: c_long = 50; +pub const SYS_acct: c_long = 51; +pub const SYS_umount2: c_long = 52; +pub const SYS_lock: c_long = 53; +pub const SYS_ioctl: c_long = 54; +pub const SYS_fcntl: c_long = 55; +pub const SYS_mpx: c_long = 56; +pub const SYS_setpgid: c_long = 57; +pub const SYS_ulimit: c_long = 58; +pub const SYS_oldolduname: c_long = 59; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_ustat: c_long = 62; +pub const SYS_dup2: c_long = 63; +pub const SYS_getppid: c_long = 64; +pub const SYS_getpgrp: c_long = 65; +pub const SYS_setsid: c_long = 66; +pub const SYS_sigaction: c_long = 67; +pub const SYS_sgetmask: c_long = 68; +pub const SYS_ssetmask: c_long = 69; +pub const SYS_setreuid: c_long = 70; +pub const SYS_setregid: c_long = 71; +pub const SYS_sigsuspend: c_long = 72; +pub const SYS_sigpending: c_long = 73; +pub const SYS_sethostname: c_long = 74; +pub const SYS_setrlimit: c_long = 75; +pub const SYS_getrlimit: c_long = 76; +pub const SYS_getrusage: c_long = 77; +pub const SYS_gettimeofday: c_long = 78; +pub const SYS_settimeofday: c_long = 79; +pub const SYS_getgroups: c_long = 80; +pub const SYS_setgroups: c_long = 81; +pub const SYS_select: c_long = 82; +pub const SYS_symlink: c_long = 83; +pub const SYS_oldlstat: c_long = 84; +pub const SYS_readlink: c_long = 85; +pub const SYS_uselib: c_long = 86; +pub const SYS_swapon: c_long = 87; +pub const SYS_reboot: c_long = 88; +pub const SYS_readdir: c_long = 89; +pub const SYS_mmap: c_long = 90; +pub const SYS_munmap: c_long = 91; +pub const SYS_truncate: c_long = 92; +pub const SYS_ftruncate: c_long = 93; +pub const SYS_fchmod: c_long = 94; +pub const SYS_fchown: c_long = 95; +pub const SYS_getpriority: c_long = 96; +pub const SYS_setpriority: c_long = 97; +pub const SYS_profil: c_long = 98; +pub const SYS_statfs: c_long = 99; +pub const SYS_fstatfs: c_long = 100; +pub const SYS_ioperm: c_long = 101; +pub const SYS_socketcall: c_long = 102; +pub const SYS_syslog: c_long = 103; +pub const SYS_setitimer: c_long = 104; +pub const SYS_getitimer: c_long = 105; +pub const SYS_stat: c_long = 106; +pub const SYS_lstat: c_long = 107; +pub const SYS_fstat: c_long = 108; +pub const SYS_olduname: c_long = 109; +pub const SYS_iopl: c_long = 110; +pub const SYS_vhangup: c_long = 111; +pub const SYS_idle: c_long = 112; +pub const SYS_vm86old: c_long = 113; +pub const SYS_wait4: c_long = 114; +pub const SYS_swapoff: c_long = 115; +pub const SYS_sysinfo: c_long = 116; +pub const SYS_ipc: c_long = 117; +pub const SYS_fsync: c_long = 118; +pub const SYS_sigreturn: c_long = 119; +pub const SYS_clone: c_long = 120; +pub const SYS_setdomainname: c_long = 121; +pub const SYS_uname: c_long = 122; +pub const SYS_modify_ldt: c_long = 123; +pub const SYS_adjtimex: c_long = 124; +pub const SYS_mprotect: c_long = 125; +pub const SYS_sigprocmask: c_long = 126; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 127; +pub const SYS_init_module: c_long = 128; +pub const SYS_delete_module: c_long = 129; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 130; +pub const SYS_quotactl: c_long = 131; +pub const SYS_getpgid: c_long = 132; +pub const SYS_fchdir: c_long = 133; +pub const SYS_bdflush: c_long = 134; +pub const SYS_sysfs: c_long = 135; +pub const SYS_personality: c_long = 136; +pub const SYS_afs_syscall: c_long = 137; +pub const SYS_setfsuid: c_long = 138; +pub const SYS_setfsgid: c_long = 139; +pub const SYS__llseek: c_long = 140; +pub const SYS_getdents: c_long = 141; +pub const SYS__newselect: c_long = 142; +pub const SYS_flock: c_long = 143; +pub const SYS_msync: c_long = 144; +pub const SYS_readv: c_long = 145; +pub const SYS_writev: c_long = 146; +pub const SYS_getsid: c_long = 147; +pub const SYS_fdatasync: c_long = 148; +pub const SYS__sysctl: c_long = 149; +pub const SYS_mlock: c_long = 150; +pub const SYS_munlock: c_long = 151; +pub const SYS_mlockall: c_long = 152; +pub const SYS_munlockall: c_long = 153; +pub const SYS_sched_setparam: c_long = 154; +pub const SYS_sched_getparam: c_long = 155; +pub const SYS_sched_setscheduler: c_long = 156; +pub const SYS_sched_getscheduler: c_long = 157; +pub const SYS_sched_yield: c_long = 158; +pub const SYS_sched_get_priority_max: c_long = 159; +pub const SYS_sched_get_priority_min: c_long = 160; +pub const SYS_sched_rr_get_interval: c_long = 161; +pub const SYS_nanosleep: c_long = 162; +pub const SYS_mremap: c_long = 163; +pub const SYS_setresuid: c_long = 164; +pub const SYS_getresuid: c_long = 165; +pub const SYS_vm86: c_long = 166; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 167; +pub const SYS_poll: c_long = 168; +pub const SYS_nfsservctl: c_long = 169; +pub const SYS_setresgid: c_long = 170; +pub const SYS_getresgid: c_long = 171; +pub const SYS_prctl: c_long = 172; +pub const SYS_rt_sigreturn: c_long = 173; +pub const SYS_rt_sigaction: c_long = 174; +pub const SYS_rt_sigprocmask: c_long = 175; +pub const SYS_rt_sigpending: c_long = 176; +pub const SYS_rt_sigtimedwait: c_long = 177; +pub const SYS_rt_sigqueueinfo: c_long = 178; +pub const SYS_rt_sigsuspend: c_long = 179; +pub const SYS_pread64: c_long = 180; +pub const SYS_pwrite64: c_long = 181; +pub const SYS_chown: c_long = 182; +pub const SYS_getcwd: c_long = 183; +pub const SYS_capget: c_long = 184; +pub const SYS_capset: c_long = 185; +pub const SYS_sigaltstack: c_long = 186; +pub const SYS_sendfile: c_long = 187; +pub const SYS_getpmsg: c_long = 188; +pub const SYS_putpmsg: c_long = 189; +pub const SYS_vfork: c_long = 190; +pub const SYS_ugetrlimit: c_long = 191; +pub const SYS_mmap2: c_long = 192; +pub const SYS_truncate64: c_long = 193; +pub const SYS_ftruncate64: c_long = 194; +pub const SYS_stat64: c_long = 195; +pub const SYS_lstat64: c_long = 196; +pub const SYS_fstat64: c_long = 197; +pub const SYS_lchown32: c_long = 198; +pub const SYS_getuid32: c_long = 199; +pub const SYS_getgid32: c_long = 200; +pub const SYS_geteuid32: c_long = 201; +pub const SYS_getegid32: c_long = 202; +pub const SYS_setreuid32: c_long = 203; +pub const SYS_setregid32: c_long = 204; +pub const SYS_getgroups32: c_long = 205; +pub const SYS_setgroups32: c_long = 206; +pub const SYS_fchown32: c_long = 207; +pub const SYS_setresuid32: c_long = 208; +pub const SYS_getresuid32: c_long = 209; +pub const SYS_setresgid32: c_long = 210; +pub const SYS_getresgid32: c_long = 211; +pub const SYS_chown32: c_long = 212; +pub const SYS_setuid32: c_long = 213; +pub const SYS_setgid32: c_long = 214; +pub const SYS_setfsuid32: c_long = 215; +pub const SYS_setfsgid32: c_long = 216; +pub const SYS_pivot_root: c_long = 217; +pub const SYS_mincore: c_long = 218; +pub const SYS_madvise: c_long = 219; +pub const SYS_getdents64: c_long = 220; +pub const SYS_fcntl64: c_long = 221; +pub const SYS_gettid: c_long = 224; +pub const SYS_readahead: c_long = 225; +pub const SYS_setxattr: c_long = 226; +pub const SYS_lsetxattr: c_long = 227; +pub const SYS_fsetxattr: c_long = 228; +pub const SYS_getxattr: c_long = 229; +pub const SYS_lgetxattr: c_long = 230; +pub const SYS_fgetxattr: c_long = 231; +pub const SYS_listxattr: c_long = 232; +pub const SYS_llistxattr: c_long = 233; +pub const SYS_flistxattr: c_long = 234; +pub const SYS_removexattr: c_long = 235; +pub const SYS_lremovexattr: c_long = 236; +pub const SYS_fremovexattr: c_long = 237; +pub const SYS_tkill: c_long = 238; +pub const SYS_sendfile64: c_long = 239; +pub const SYS_futex: c_long = 240; +pub const SYS_sched_setaffinity: c_long = 241; +pub const SYS_sched_getaffinity: c_long = 242; +pub const SYS_set_thread_area: c_long = 243; +pub const SYS_get_thread_area: c_long = 244; +pub const SYS_io_setup: c_long = 245; +pub const SYS_io_destroy: c_long = 246; +pub const SYS_io_getevents: c_long = 247; +pub const SYS_io_submit: c_long = 248; +pub const SYS_io_cancel: c_long = 249; +pub const SYS_fadvise64: c_long = 250; +pub const SYS_exit_group: c_long = 252; +pub const SYS_lookup_dcookie: c_long = 253; +pub const SYS_epoll_create: c_long = 254; +pub const SYS_epoll_ctl: c_long = 255; +pub const SYS_epoll_wait: c_long = 256; +pub const SYS_remap_file_pages: c_long = 257; +pub const SYS_set_tid_address: c_long = 258; +pub const SYS_timer_create: c_long = 259; +pub const SYS_timer_settime: c_long = 260; +pub const SYS_timer_gettime: c_long = 261; +pub const SYS_timer_getoverrun: c_long = 262; +pub const SYS_timer_delete: c_long = 263; +pub const SYS_clock_settime: c_long = 264; +pub const SYS_clock_gettime: c_long = 265; +pub const SYS_clock_getres: c_long = 266; +pub const SYS_clock_nanosleep: c_long = 267; +pub const SYS_statfs64: c_long = 268; +pub const SYS_fstatfs64: c_long = 269; +pub const SYS_tgkill: c_long = 270; +pub const SYS_utimes: c_long = 271; +pub const SYS_fadvise64_64: c_long = 272; +pub const SYS_vserver: c_long = 273; +pub const SYS_mbind: c_long = 274; +pub const SYS_get_mempolicy: c_long = 275; +pub const SYS_set_mempolicy: c_long = 276; +pub const SYS_mq_open: c_long = 277; +pub const SYS_mq_unlink: c_long = 278; +pub const SYS_mq_timedsend: c_long = 279; +pub const SYS_mq_timedreceive: c_long = 280; +pub const SYS_mq_notify: c_long = 281; +pub const SYS_mq_getsetattr: c_long = 282; +pub const SYS_kexec_load: c_long = 283; +pub const SYS_waitid: c_long = 284; +pub const SYS_add_key: c_long = 286; +pub const SYS_request_key: c_long = 287; +pub const SYS_keyctl: c_long = 288; +pub const SYS_ioprio_set: c_long = 289; +pub const SYS_ioprio_get: c_long = 290; +pub const SYS_inotify_init: c_long = 291; +pub const SYS_inotify_add_watch: c_long = 292; +pub const SYS_inotify_rm_watch: c_long = 293; +pub const SYS_migrate_pages: c_long = 294; +pub const SYS_openat: c_long = 295; +pub const SYS_mkdirat: c_long = 296; +pub const SYS_mknodat: c_long = 297; +pub const SYS_fchownat: c_long = 298; +pub const SYS_futimesat: c_long = 299; +pub const SYS_fstatat64: c_long = 300; +pub const SYS_unlinkat: c_long = 301; +pub const SYS_renameat: c_long = 302; +pub const SYS_linkat: c_long = 303; +pub const SYS_symlinkat: c_long = 304; +pub const SYS_readlinkat: c_long = 305; +pub const SYS_fchmodat: c_long = 306; +pub const SYS_faccessat: c_long = 307; +pub const SYS_pselect6: c_long = 308; +pub const SYS_ppoll: c_long = 309; +pub const SYS_unshare: c_long = 310; +pub const SYS_set_robust_list: c_long = 311; +pub const SYS_get_robust_list: c_long = 312; +pub const SYS_splice: c_long = 313; +pub const SYS_sync_file_range: c_long = 314; +pub const SYS_tee: c_long = 315; +pub const SYS_vmsplice: c_long = 316; +pub const SYS_move_pages: c_long = 317; +pub const SYS_getcpu: c_long = 318; +pub const SYS_epoll_pwait: c_long = 319; +pub const SYS_utimensat: c_long = 320; +pub const SYS_signalfd: c_long = 321; +pub const SYS_timerfd_create: c_long = 322; +pub const SYS_eventfd: c_long = 323; +pub const SYS_fallocate: c_long = 324; +pub const SYS_timerfd_settime: c_long = 325; +pub const SYS_timerfd_gettime: c_long = 326; +pub const SYS_signalfd4: c_long = 327; +pub const SYS_eventfd2: c_long = 328; +pub const SYS_epoll_create1: c_long = 329; +pub const SYS_dup3: c_long = 330; +pub const SYS_pipe2: c_long = 331; +pub const SYS_inotify_init1: c_long = 332; +pub const SYS_preadv: c_long = 333; +pub const SYS_pwritev: c_long = 334; +pub const SYS_rt_tgsigqueueinfo: c_long = 335; +pub const SYS_perf_event_open: c_long = 336; +pub const SYS_recvmmsg: c_long = 337; +pub const SYS_fanotify_init: c_long = 338; +pub const SYS_fanotify_mark: c_long = 339; +pub const SYS_prlimit64: c_long = 340; +pub const SYS_name_to_handle_at: c_long = 341; +pub const SYS_open_by_handle_at: c_long = 342; +pub const SYS_clock_adjtime: c_long = 343; +pub const SYS_syncfs: c_long = 344; +pub const SYS_sendmmsg: c_long = 345; +pub const SYS_setns: c_long = 346; +pub const SYS_process_vm_readv: c_long = 347; +pub const SYS_process_vm_writev: c_long = 348; +pub const SYS_kcmp: c_long = 349; +pub const SYS_finit_module: c_long = 350; +pub const SYS_sched_setattr: c_long = 351; +pub const SYS_sched_getattr: c_long = 352; +pub const SYS_renameat2: c_long = 353; +pub const SYS_seccomp: c_long = 354; +pub const SYS_getrandom: c_long = 355; +pub const SYS_memfd_create: c_long = 356; +pub const SYS_bpf: c_long = 357; +pub const SYS_execveat: c_long = 358; +pub const SYS_socket: c_long = 359; +pub const SYS_socketpair: c_long = 360; +pub const SYS_bind: c_long = 361; +pub const SYS_connect: c_long = 362; +pub const SYS_listen: c_long = 363; +pub const SYS_accept4: c_long = 364; +pub const SYS_getsockopt: c_long = 365; +pub const SYS_setsockopt: c_long = 366; +pub const SYS_getsockname: c_long = 367; +pub const SYS_getpeername: c_long = 368; +pub const SYS_sendto: c_long = 369; +pub const SYS_sendmsg: c_long = 370; +pub const SYS_recvfrom: c_long = 371; +pub const SYS_recvmsg: c_long = 372; +pub const SYS_shutdown: c_long = 373; +pub const SYS_userfaultfd: c_long = 374; +pub const SYS_membarrier: c_long = 375; +pub const SYS_mlock2: c_long = 376; +pub const SYS_copy_file_range: c_long = 377; +pub const SYS_preadv2: c_long = 378; +pub const SYS_pwritev2: c_long = 379; +pub const SYS_pkey_mprotect: c_long = 380; +pub const SYS_pkey_alloc: c_long = 381; +pub const SYS_pkey_free: c_long = 382; +pub const SYS_statx: c_long = 383; +pub const SYS_rseq: c_long = 386; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; +pub const SYS_fchmodat2: c_long = 452; +pub const SYS_mseal: c_long = 462; + +// offsets in user_regs_structs, from sys/reg.h +pub const EBX: c_int = 0; +pub const ECX: c_int = 1; +pub const EDX: c_int = 2; +pub const ESI: c_int = 3; +pub const EDI: c_int = 4; +pub const EBP: c_int = 5; +pub const EAX: c_int = 6; +pub const DS: c_int = 7; +pub const ES: c_int = 8; +pub const FS: c_int = 9; +pub const GS: c_int = 10; +pub const ORIG_EAX: c_int = 11; +pub const EIP: c_int = 12; +pub const CS: c_int = 13; +pub const EFL: c_int = 14; +pub const UESP: c_int = 15; +pub const SS: c_int = 16; + +// offsets in mcontext_t.gregs from sys/ucontext.h +pub const REG_GS: c_int = 0; +pub const REG_FS: c_int = 1; +pub const REG_ES: c_int = 2; +pub const REG_DS: c_int = 3; +pub const REG_EDI: c_int = 4; +pub const REG_ESI: c_int = 5; +pub const REG_EBP: c_int = 6; +pub const REG_ESP: c_int = 7; +pub const REG_EBX: c_int = 8; +pub const REG_EDX: c_int = 9; +pub const REG_ECX: c_int = 10; +pub const REG_EAX: c_int = 11; +pub const REG_TRAPNO: c_int = 12; +pub const REG_ERR: c_int = 13; +pub const REG_EIP: c_int = 14; +pub const REG_CS: c_int = 15; +pub const REG_EFL: c_int = 16; +pub const REG_UESP: c_int = 17; +pub const REG_SS: c_int = 18; + +extern "C" { + pub fn getcontext(ucp: *mut ucontext_t) -> c_int; + pub fn setcontext(ucp: *const ucontext_t) -> c_int; + pub fn makecontext(ucp: *mut ucontext_t, func: extern "C" fn(), argc: c_int, ...); + pub fn swapcontext(uocp: *mut ucontext_t, ucp: *const ucontext_t) -> c_int; +} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/ilp32.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/ilp32.rs new file mode 100644 index 00000000000000..f808ff31f8cca5 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/ilp32.rs @@ -0,0 +1,54 @@ +use crate::prelude::*; +use crate::pthread_mutex_t; + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 32; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 48; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 20; + +#[cfg(target_endian = "little")] +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, + ], +}; +#[cfg(target_endian = "little")] +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, + ], +}; +#[cfg(target_endian = "little")] +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, + ], +}; + +pub const SYS_sync_file_range2: c_long = 84; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/lp64.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/lp64.rs new file mode 100644 index 00000000000000..960e5127806b34 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/lp64.rs @@ -0,0 +1,57 @@ +use crate::prelude::*; +use crate::pthread_mutex_t; + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 8; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 48; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 8; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 8; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; + +#[cfg(target_endian = "little")] +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "little")] +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "little")] +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; + +pub const SYS_renameat: c_long = 38; +pub const SYS_sync_file_range: c_long = 84; +pub const SYS_getrlimit: c_long = 163; +pub const SYS_setrlimit: c_long = 164; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/mod.rs new file mode 100644 index 00000000000000..28b4e40fde5438 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/mod.rs @@ -0,0 +1,973 @@ +//! AArch64-specific definitions for 64-bit linux-like values + +use crate::prelude::*; +use crate::{off64_t, off_t}; + +pub type wchar_t = u32; +pub type nlink_t = u32; +pub type blksize_t = i32; +pub type suseconds_t = i64; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; + +s! { + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + #[cfg(target_arch = "sparc64")] + __reserved0: c_int, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct statfs { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + f_spare: [crate::__fsword_t; 5], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad1: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + __pad2: c_int, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_int; 2], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad1: crate::dev_t, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + __pad2: c_int, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_int; 2], + } + + pub struct statfs64 { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::fsid_t, + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + pub f_spare: [crate::__fsword_t; 4], + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct pthread_attr_t { + __size: [usize; 8], + } + + pub struct user_regs_struct { + pub regs: [c_ulonglong; 31], + pub sp: c_ulonglong, + pub pc: c_ulonglong, + pub pstate: c_ulonglong, + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_uint, + pub __seq: c_ushort, + __pad1: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused4: c_ulong, + __unused5: c_ulong, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + #[doc(hidden)] + #[deprecated( + since = "0.2.54", + note = "Please leave a comment on \ + https://github.com/rust-lang/libc/pull/1316 if you're using \ + this field" + )] + pub _pad: [c_int; 29], + _align: [usize; 0], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_sigmask: crate::sigset_t, + pub uc_mcontext: mcontext_t, + } + + #[repr(align(16))] + pub struct mcontext_t { + pub fault_address: c_ulonglong, + pub regs: [c_ulonglong; 31], + pub sp: c_ulonglong, + pub pc: c_ulonglong, + pub pstate: c_ulonglong, + __reserved: [u64; 512], + } + + pub struct user_fpsimd_struct { + pub vregs: [crate::__uint128_t; 32], + pub fpsr: c_uint, + pub fpcr: c_uint, + } + + #[repr(align(8))] + pub struct clone_args { + pub flags: c_ulonglong, + pub pidfd: c_ulonglong, + pub child_tid: c_ulonglong, + pub parent_tid: c_ulonglong, + pub exit_signal: c_ulonglong, + pub stack: c_ulonglong, + pub stack_size: c_ulonglong, + pub tls: c_ulonglong, + pub set_tid: c_ulonglong, + pub set_tid_size: c_ulonglong, + pub cgroup: c_ulonglong, + } +} + +s_no_extra_traits! { + #[repr(align(16))] + pub struct max_align_t { + priv_: [f32; 8], + } +} + +pub const VEOF: usize = 4; + +pub const RTLD_DEEPBIND: c_int = 0x8; +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; + +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_FSYNC: c_int = 0x101000; +pub const O_NOATIME: c_int = 0o1000000; +pub const O_PATH: c_int = 0o10000000; +pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_GROWSDOWN: c_int = 0x0100; + +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const EHWPOISON: c_int = 133; +pub const ERFKILL: c_int = 132; + +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] +pub const SIGUNUSED: c_int = 31; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const O_ASYNC: c_int = 0x2000; +pub const O_NDELAY: c_int = 0x800; + +pub const PTRACE_DETACH: c_uint = 17; + +pub const EFD_NONBLOCK: c_int = 0x800; + +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETOWN: c_int = 8; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_OFD_GETLK: c_int = 36; +pub const F_OFD_SETLK: c_int = 37; +pub const F_OFD_SETLKW: c_int = 38; + +pub const F_RDLCK: c_int = 0; +pub const F_WRLCK: c_int = 1; +pub const F_UNLCK: c_int = 2; + +pub const SFD_NONBLOCK: c_int = 0x0800; + +pub const SFD_CLOEXEC: c_int = 0x080000; + +pub const NCCS: usize = 32; + +pub const O_TRUNC: c_int = 512; + +pub const O_CLOEXEC: c_int = 0x80000; + +pub const EBFONT: c_int = 59; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENONET: c_int = 64; +pub const ENOPKG: c_int = 65; +pub const EREMOTE: c_int = 66; +pub const ENOLINK: c_int = 67; +pub const EADV: c_int = 68; +pub const ESRMNT: c_int = 69; +pub const ECOMM: c_int = 70; +pub const EPROTO: c_int = 71; +pub const EDOTDOT: c_int = 73; + +pub const SA_NODEFER: c_int = 0x40000000; +pub const SA_RESETHAND: c_int = 0x80000000; +pub const SA_RESTART: c_int = 0x10000000; +pub const SA_NOCLDSTOP: c_int = 0x00000001; + +pub const EPOLL_CLOEXEC: c_int = 0x80000; + +pub const EFD_CLOEXEC: c_int = 0x80000; + +pub const O_DIRECT: c_int = 0x10000; +pub const O_DIRECTORY: c_int = 0x4000; +pub const O_NOFOLLOW: c_int = 0x8000; + +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_ANONYMOUS: c_int = 0x0020; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_SYNC: c_int = 0x080000; + +pub const EDEADLOCK: c_int = 35; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; + +pub const SIGSTKSZ: size_t = 16384; +pub const MINSIGSTKSZ: size_t = 5120; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: crate::tcflag_t = 0x00000800; +pub const TAB2: crate::tcflag_t = 0x00001000; +pub const TAB3: crate::tcflag_t = 0x00001800; +pub const CR1: crate::tcflag_t = 0x00000200; +pub const CR2: crate::tcflag_t = 0x00000400; +pub const CR3: crate::tcflag_t = 0x00000600; +pub const FF1: crate::tcflag_t = 0x00008000; +pub const BS1: crate::tcflag_t = 0x00002000; +pub const VT1: crate::tcflag_t = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; +pub const EXTPROC: crate::tcflag_t = 0x00010000; + +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +// sys/auxv.h +pub const HWCAP_FP: c_ulong = 1 << 0; +pub const HWCAP_ASIMD: c_ulong = 1 << 1; +pub const HWCAP_EVTSTRM: c_ulong = 1 << 2; +pub const HWCAP_AES: c_ulong = 1 << 3; +pub const HWCAP_PMULL: c_ulong = 1 << 4; +pub const HWCAP_SHA1: c_ulong = 1 << 5; +pub const HWCAP_SHA2: c_ulong = 1 << 6; +pub const HWCAP_CRC32: c_ulong = 1 << 7; +pub const HWCAP_ATOMICS: c_ulong = 1 << 8; +pub const HWCAP_FPHP: c_ulong = 1 << 9; +pub const HWCAP_ASIMDHP: c_ulong = 1 << 10; +pub const HWCAP_CPUID: c_ulong = 1 << 11; +pub const HWCAP_ASIMDRDM: c_ulong = 1 << 12; +pub const HWCAP_JSCVT: c_ulong = 1 << 13; +pub const HWCAP_FCMA: c_ulong = 1 << 14; +pub const HWCAP_LRCPC: c_ulong = 1 << 15; +pub const HWCAP_DCPOP: c_ulong = 1 << 16; +pub const HWCAP_SHA3: c_ulong = 1 << 17; +pub const HWCAP_SM3: c_ulong = 1 << 18; +pub const HWCAP_SM4: c_ulong = 1 << 19; +pub const HWCAP_ASIMDDP: c_ulong = 1 << 20; +pub const HWCAP_SHA512: c_ulong = 1 << 21; +pub const HWCAP_SVE: c_ulong = 1 << 22; +pub const HWCAP_ASIMDFHM: c_ulong = 1 << 23; +pub const HWCAP_DIT: c_ulong = 1 << 24; +pub const HWCAP_USCAT: c_ulong = 1 << 25; +pub const HWCAP_ILRCPC: c_ulong = 1 << 26; +pub const HWCAP_FLAGM: c_ulong = 1 << 27; +pub const HWCAP_SSBS: c_ulong = 1 << 28; +pub const HWCAP_SB: c_ulong = 1 << 29; +pub const HWCAP_PACA: c_ulong = 1 << 30; +pub const HWCAP_PACG: c_ulong = 1 << 31; +// FIXME(linux): enable these again once linux-api-headers are up to date enough on CI. +// See discussion in https://github.com/rust-lang/libc/pull/1638 +//pub const HWCAP2_DCPODP: c_ulong = 1 << 0; +//pub const HWCAP2_SVE2: c_ulong = 1 << 1; +//pub const HWCAP2_SVEAES: c_ulong = 1 << 2; +//pub const HWCAP2_SVEPMULL: c_ulong = 1 << 3; +//pub const HWCAP2_SVEBITPERM: c_ulong = 1 << 4; +//pub const HWCAP2_SVESHA3: c_ulong = 1 << 5; +//pub const HWCAP2_SVESM4: c_ulong = 1 << 6; +//pub const HWCAP2_FLAGM2: c_ulong = 1 << 7; +//pub const HWCAP2_FRINT: c_ulong = 1 << 8; +//pub const HWCAP2_MTE: c_ulong = 1 << 18; + +// linux/prctl.h +pub const PR_PAC_RESET_KEYS: c_int = 54; +pub const PR_SET_TAGGED_ADDR_CTRL: c_int = 55; +pub const PR_GET_TAGGED_ADDR_CTRL: c_int = 56; +pub const PR_PAC_SET_ENABLED_KEYS: c_int = 60; +pub const PR_PAC_GET_ENABLED_KEYS: c_int = 61; + +pub const PR_TAGGED_ADDR_ENABLE: c_ulong = 1; + +pub const PR_PAC_APIAKEY: c_ulong = 1 << 0; +pub const PR_PAC_APIBKEY: c_ulong = 1 << 1; +pub const PR_PAC_APDAKEY: c_ulong = 1 << 2; +pub const PR_PAC_APDBKEY: c_ulong = 1 << 3; +pub const PR_PAC_APGAKEY: c_ulong = 1 << 4; + +pub const PR_SME_SET_VL: c_int = 63; +pub const PR_SME_GET_VL: c_int = 64; +pub const PR_SME_VL_LEN_MAX: c_int = 0xffff; + +pub const PR_SME_SET_VL_INHERIT: c_ulong = 1 << 17; +pub const PR_SME_SET_VL_ONE_EXEC: c_ulong = 1 << 18; + +// Syscall table +pub const SYS_io_setup: c_long = 0; +pub const SYS_io_destroy: c_long = 1; +pub const SYS_io_submit: c_long = 2; +pub const SYS_io_cancel: c_long = 3; +pub const SYS_io_getevents: c_long = 4; +pub const SYS_setxattr: c_long = 5; +pub const SYS_lsetxattr: c_long = 6; +pub const SYS_fsetxattr: c_long = 7; +pub const SYS_getxattr: c_long = 8; +pub const SYS_lgetxattr: c_long = 9; +pub const SYS_fgetxattr: c_long = 10; +pub const SYS_listxattr: c_long = 11; +pub const SYS_llistxattr: c_long = 12; +pub const SYS_flistxattr: c_long = 13; +pub const SYS_removexattr: c_long = 14; +pub const SYS_lremovexattr: c_long = 15; +pub const SYS_fremovexattr: c_long = 16; +pub const SYS_getcwd: c_long = 17; +pub const SYS_lookup_dcookie: c_long = 18; +pub const SYS_eventfd2: c_long = 19; +pub const SYS_epoll_create1: c_long = 20; +pub const SYS_epoll_ctl: c_long = 21; +pub const SYS_epoll_pwait: c_long = 22; +pub const SYS_dup: c_long = 23; +pub const SYS_dup3: c_long = 24; +pub const SYS_fcntl: c_long = 25; +pub const SYS_inotify_init1: c_long = 26; +pub const SYS_inotify_add_watch: c_long = 27; +pub const SYS_inotify_rm_watch: c_long = 28; +pub const SYS_ioctl: c_long = 29; +pub const SYS_ioprio_set: c_long = 30; +pub const SYS_ioprio_get: c_long = 31; +pub const SYS_flock: c_long = 32; +pub const SYS_mknodat: c_long = 33; +pub const SYS_mkdirat: c_long = 34; +pub const SYS_unlinkat: c_long = 35; +pub const SYS_symlinkat: c_long = 36; +pub const SYS_linkat: c_long = 37; +// 38 is renameat only on LP64 +pub const SYS_umount2: c_long = 39; +pub const SYS_mount: c_long = 40; +pub const SYS_pivot_root: c_long = 41; +pub const SYS_nfsservctl: c_long = 42; +pub const SYS_statfs: c_long = 43; +pub const SYS_fstatfs: c_long = 44; +pub const SYS_truncate: c_long = 45; +pub const SYS_ftruncate: c_long = 46; +pub const SYS_fallocate: c_long = 47; +pub const SYS_faccessat: c_long = 48; +pub const SYS_chdir: c_long = 49; +pub const SYS_fchdir: c_long = 50; +pub const SYS_chroot: c_long = 51; +pub const SYS_fchmod: c_long = 52; +pub const SYS_fchmodat: c_long = 53; +pub const SYS_fchownat: c_long = 54; +pub const SYS_fchown: c_long = 55; +pub const SYS_openat: c_long = 56; +pub const SYS_close: c_long = 57; +pub const SYS_vhangup: c_long = 58; +pub const SYS_pipe2: c_long = 59; +pub const SYS_quotactl: c_long = 60; +pub const SYS_getdents64: c_long = 61; +pub const SYS_lseek: c_long = 62; +pub const SYS_read: c_long = 63; +pub const SYS_write: c_long = 64; +pub const SYS_readv: c_long = 65; +pub const SYS_writev: c_long = 66; +pub const SYS_pread64: c_long = 67; +pub const SYS_pwrite64: c_long = 68; +pub const SYS_preadv: c_long = 69; +pub const SYS_pwritev: c_long = 70; +pub const SYS_pselect6: c_long = 72; +pub const SYS_ppoll: c_long = 73; +pub const SYS_signalfd4: c_long = 74; +pub const SYS_vmsplice: c_long = 75; +pub const SYS_splice: c_long = 76; +pub const SYS_tee: c_long = 77; +pub const SYS_readlinkat: c_long = 78; +pub const SYS_newfstatat: c_long = 79; +pub const SYS_fstat: c_long = 80; +pub const SYS_sync: c_long = 81; +pub const SYS_fsync: c_long = 82; +pub const SYS_fdatasync: c_long = 83; +// 84 sync_file_range on LP64 and sync_file_range2 on ILP32 +pub const SYS_timerfd_create: c_long = 85; +pub const SYS_timerfd_settime: c_long = 86; +pub const SYS_timerfd_gettime: c_long = 87; +pub const SYS_utimensat: c_long = 88; +pub const SYS_acct: c_long = 89; +pub const SYS_capget: c_long = 90; +pub const SYS_capset: c_long = 91; +pub const SYS_personality: c_long = 92; +pub const SYS_exit: c_long = 93; +pub const SYS_exit_group: c_long = 94; +pub const SYS_waitid: c_long = 95; +pub const SYS_set_tid_address: c_long = 96; +pub const SYS_unshare: c_long = 97; +pub const SYS_futex: c_long = 98; +pub const SYS_set_robust_list: c_long = 99; +pub const SYS_get_robust_list: c_long = 100; +pub const SYS_nanosleep: c_long = 101; +pub const SYS_getitimer: c_long = 102; +pub const SYS_setitimer: c_long = 103; +pub const SYS_kexec_load: c_long = 104; +pub const SYS_init_module: c_long = 105; +pub const SYS_delete_module: c_long = 106; +pub const SYS_timer_create: c_long = 107; +pub const SYS_timer_gettime: c_long = 108; +pub const SYS_timer_getoverrun: c_long = 109; +pub const SYS_timer_settime: c_long = 110; +pub const SYS_timer_delete: c_long = 111; +pub const SYS_clock_settime: c_long = 112; +pub const SYS_clock_gettime: c_long = 113; +pub const SYS_clock_getres: c_long = 114; +pub const SYS_clock_nanosleep: c_long = 115; +pub const SYS_syslog: c_long = 116; +pub const SYS_ptrace: c_long = 117; +pub const SYS_sched_setparam: c_long = 118; +pub const SYS_sched_setscheduler: c_long = 119; +pub const SYS_sched_getscheduler: c_long = 120; +pub const SYS_sched_getparam: c_long = 121; +pub const SYS_sched_setaffinity: c_long = 122; +pub const SYS_sched_getaffinity: c_long = 123; +pub const SYS_sched_yield: c_long = 124; +pub const SYS_sched_get_priority_max: c_long = 125; +pub const SYS_sched_get_priority_min: c_long = 126; +pub const SYS_sched_rr_get_interval: c_long = 127; +pub const SYS_restart_syscall: c_long = 128; +pub const SYS_kill: c_long = 129; +pub const SYS_tkill: c_long = 130; +pub const SYS_tgkill: c_long = 131; +pub const SYS_sigaltstack: c_long = 132; +pub const SYS_rt_sigsuspend: c_long = 133; +pub const SYS_rt_sigaction: c_long = 134; +pub const SYS_rt_sigprocmask: c_long = 135; +pub const SYS_rt_sigpending: c_long = 136; +pub const SYS_rt_sigtimedwait: c_long = 137; +pub const SYS_rt_sigqueueinfo: c_long = 138; +pub const SYS_rt_sigreturn: c_long = 139; +pub const SYS_setpriority: c_long = 140; +pub const SYS_getpriority: c_long = 141; +pub const SYS_reboot: c_long = 142; +pub const SYS_setregid: c_long = 143; +pub const SYS_setgid: c_long = 144; +pub const SYS_setreuid: c_long = 145; +pub const SYS_setuid: c_long = 146; +pub const SYS_setresuid: c_long = 147; +pub const SYS_getresuid: c_long = 148; +pub const SYS_setresgid: c_long = 149; +pub const SYS_getresgid: c_long = 150; +pub const SYS_setfsuid: c_long = 151; +pub const SYS_setfsgid: c_long = 152; +pub const SYS_times: c_long = 153; +pub const SYS_setpgid: c_long = 154; +pub const SYS_getpgid: c_long = 155; +pub const SYS_getsid: c_long = 156; +pub const SYS_setsid: c_long = 157; +pub const SYS_getgroups: c_long = 158; +pub const SYS_setgroups: c_long = 159; +pub const SYS_uname: c_long = 160; +pub const SYS_sethostname: c_long = 161; +pub const SYS_setdomainname: c_long = 162; +// 163 is getrlimit only on LP64 +// 164 is setrlimit only on LP64 +pub const SYS_getrusage: c_long = 165; +pub const SYS_umask: c_long = 166; +pub const SYS_prctl: c_long = 167; +pub const SYS_getcpu: c_long = 168; +pub const SYS_gettimeofday: c_long = 169; +pub const SYS_settimeofday: c_long = 170; +pub const SYS_adjtimex: c_long = 171; +pub const SYS_getpid: c_long = 172; +pub const SYS_getppid: c_long = 173; +pub const SYS_getuid: c_long = 174; +pub const SYS_geteuid: c_long = 175; +pub const SYS_getgid: c_long = 176; +pub const SYS_getegid: c_long = 177; +pub const SYS_gettid: c_long = 178; +pub const SYS_sysinfo: c_long = 179; +pub const SYS_mq_open: c_long = 180; +pub const SYS_mq_unlink: c_long = 181; +pub const SYS_mq_timedsend: c_long = 182; +pub const SYS_mq_timedreceive: c_long = 183; +pub const SYS_mq_notify: c_long = 184; +pub const SYS_mq_getsetattr: c_long = 185; +pub const SYS_msgget: c_long = 186; +pub const SYS_msgctl: c_long = 187; +pub const SYS_msgrcv: c_long = 188; +pub const SYS_msgsnd: c_long = 189; +pub const SYS_semget: c_long = 190; +pub const SYS_semctl: c_long = 191; +pub const SYS_semtimedop: c_long = 192; +pub const SYS_semop: c_long = 193; +pub const SYS_shmget: c_long = 194; +pub const SYS_shmctl: c_long = 195; +pub const SYS_shmat: c_long = 196; +pub const SYS_shmdt: c_long = 197; +pub const SYS_socket: c_long = 198; +pub const SYS_socketpair: c_long = 199; +pub const SYS_bind: c_long = 200; +pub const SYS_listen: c_long = 201; +pub const SYS_accept: c_long = 202; +pub const SYS_connect: c_long = 203; +pub const SYS_getsockname: c_long = 204; +pub const SYS_getpeername: c_long = 205; +pub const SYS_sendto: c_long = 206; +pub const SYS_recvfrom: c_long = 207; +pub const SYS_setsockopt: c_long = 208; +pub const SYS_getsockopt: c_long = 209; +pub const SYS_shutdown: c_long = 210; +pub const SYS_sendmsg: c_long = 211; +pub const SYS_recvmsg: c_long = 212; +pub const SYS_readahead: c_long = 213; +pub const SYS_brk: c_long = 214; +pub const SYS_munmap: c_long = 215; +pub const SYS_mremap: c_long = 216; +pub const SYS_add_key: c_long = 217; +pub const SYS_request_key: c_long = 218; +pub const SYS_keyctl: c_long = 219; +pub const SYS_clone: c_long = 220; +pub const SYS_execve: c_long = 221; +pub const SYS_mmap: c_long = 222; +pub const SYS_swapon: c_long = 224; +pub const SYS_swapoff: c_long = 225; +pub const SYS_mprotect: c_long = 226; +pub const SYS_msync: c_long = 227; +pub const SYS_mlock: c_long = 228; +pub const SYS_munlock: c_long = 229; +pub const SYS_mlockall: c_long = 230; +pub const SYS_munlockall: c_long = 231; +pub const SYS_mincore: c_long = 232; +pub const SYS_madvise: c_long = 233; +pub const SYS_remap_file_pages: c_long = 234; +pub const SYS_mbind: c_long = 235; +pub const SYS_get_mempolicy: c_long = 236; +pub const SYS_set_mempolicy: c_long = 237; +pub const SYS_migrate_pages: c_long = 238; +pub const SYS_move_pages: c_long = 239; +pub const SYS_rt_tgsigqueueinfo: c_long = 240; +pub const SYS_perf_event_open: c_long = 241; +pub const SYS_accept4: c_long = 242; +pub const SYS_recvmmsg: c_long = 243; +pub const SYS_wait4: c_long = 260; +pub const SYS_prlimit64: c_long = 261; +pub const SYS_fanotify_init: c_long = 262; +pub const SYS_fanotify_mark: c_long = 263; +pub const SYS_name_to_handle_at: c_long = 264; +pub const SYS_open_by_handle_at: c_long = 265; +pub const SYS_clock_adjtime: c_long = 266; +pub const SYS_syncfs: c_long = 267; +pub const SYS_setns: c_long = 268; +pub const SYS_sendmmsg: c_long = 269; +pub const SYS_process_vm_readv: c_long = 270; +pub const SYS_process_vm_writev: c_long = 271; +pub const SYS_kcmp: c_long = 272; +pub const SYS_finit_module: c_long = 273; +pub const SYS_sched_setattr: c_long = 274; +pub const SYS_sched_getattr: c_long = 275; +pub const SYS_renameat2: c_long = 276; +pub const SYS_seccomp: c_long = 277; +pub const SYS_getrandom: c_long = 278; +pub const SYS_memfd_create: c_long = 279; +pub const SYS_bpf: c_long = 280; +pub const SYS_execveat: c_long = 281; +pub const SYS_userfaultfd: c_long = 282; +pub const SYS_membarrier: c_long = 283; +pub const SYS_mlock2: c_long = 284; +pub const SYS_copy_file_range: c_long = 285; +pub const SYS_preadv2: c_long = 286; +pub const SYS_pwritev2: c_long = 287; +pub const SYS_pkey_mprotect: c_long = 288; +pub const SYS_pkey_alloc: c_long = 289; +pub const SYS_pkey_free: c_long = 290; +pub const SYS_statx: c_long = 291; +pub const SYS_rseq: c_long = 293; +pub const SYS_kexec_file_load: c_long = 294; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; +pub const SYS_mseal: c_long = 462; + +pub const PROT_BTI: c_int = 0x10; +pub const PROT_MTE: c_int = 0x20; + +extern "C" { + pub fn sysctl( + name: *mut c_int, + namelen: c_int, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *mut c_void, + newlen: size_t, + ) -> c_int; + + pub fn getcontext(ucp: *mut ucontext_t) -> c_int; + pub fn setcontext(ucp: *const ucontext_t) -> c_int; + pub fn makecontext(ucp: *mut ucontext_t, func: extern "C" fn(), argc: c_int, ...); + pub fn swapcontext(uocp: *mut ucontext_t, ucp: *const ucontext_t) -> c_int; +} + +cfg_if! { + if #[cfg(target_pointer_width = "32")] { + mod ilp32; + pub use self::ilp32::*; + } else { + mod lp64; + pub use self::lp64::*; + } +} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/loongarch64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/loongarch64/mod.rs new file mode 100644 index 00000000000000..8f15ce4d1529a9 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b64/loongarch64/mod.rs @@ -0,0 +1,922 @@ +use crate::prelude::*; +use crate::{off64_t, off_t, pthread_mutex_t}; + +pub type wchar_t = i32; + +pub type blksize_t = i32; +pub type nlink_t = u32; +pub type suseconds_t = i64; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad1: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + __pad2: c_int, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_int; 2], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub __pad1: crate::dev_t, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + pub __pad2: c_int, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_int; 2], + } + + pub struct statfs { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + pub f_spare: [crate::__fsword_t; 4], + } + + pub struct statfs64 { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::fsid_t, + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + pub f_spare: [crate::__fsword_t; 4], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct pthread_attr_t { + __size: [c_ulong; 7], + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + #[doc(hidden)] + #[deprecated( + since = "0.2.54", + note = "Please leave a comment on \ + https://github.com/rust-lang/libc/pull/1316 if you're using \ + this field" + )] + pub _pad: [c_int; 29], + _align: [u64; 0], + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_uint, + pub __seq: c_ushort, + __pad2: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused4: c_ulong, + __unused5: c_ulong, + } + + pub struct user_regs_struct { + pub regs: [u64; 32], + pub orig_a0: u64, + pub csr_era: u64, + pub csr_badv: u64, + pub reserved: [u64; 10], + } + + pub struct user_fp_struct { + pub fpr: [u64; 32], + pub fcc: u64, + pub fcsr: u32, + } + + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_sigmask: crate::sigset_t, + pub uc_mcontext: mcontext_t, + } + + #[repr(align(16))] + pub struct mcontext_t { + pub __pc: c_ulonglong, + pub __gregs: [c_ulonglong; 32], + pub __flags: c_uint, + pub __extcontext: [c_ulonglong; 0], + } + + #[repr(align(8))] + pub struct clone_args { + pub flags: c_ulonglong, + pub pidfd: c_ulonglong, + pub child_tid: c_ulonglong, + pub parent_tid: c_ulonglong, + pub exit_signal: c_ulonglong, + pub stack: c_ulonglong, + pub stack_size: c_ulonglong, + pub tls: c_ulonglong, + pub set_tid: c_ulonglong, + pub set_tid_size: c_ulonglong, + pub cgroup: c_ulonglong, + } +} + +s_no_extra_traits! { + #[repr(align(16))] + pub struct max_align_t { + priv_: [f64; 4], + } +} + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; + +#[cfg(target_endian = "little")] +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "little")] +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "little")] +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; + +pub const HWCAP_LOONGARCH_CPUCFG: c_ulong = 1 << 0; +pub const HWCAP_LOONGARCH_LAM: c_ulong = 1 << 1; +pub const HWCAP_LOONGARCH_UAL: c_ulong = 1 << 2; +pub const HWCAP_LOONGARCH_FPU: c_ulong = 1 << 3; +pub const HWCAP_LOONGARCH_LSX: c_ulong = 1 << 4; +pub const HWCAP_LOONGARCH_LASX: c_ulong = 1 << 5; +pub const HWCAP_LOONGARCH_CRC32: c_ulong = 1 << 6; +pub const HWCAP_LOONGARCH_COMPLEX: c_ulong = 1 << 7; +pub const HWCAP_LOONGARCH_CRYPTO: c_ulong = 1 << 8; +pub const HWCAP_LOONGARCH_LVZ: c_ulong = 1 << 9; +pub const HWCAP_LOONGARCH_LBT_X86: c_ulong = 1 << 10; +pub const HWCAP_LOONGARCH_LBT_ARM: c_ulong = 1 << 11; +pub const HWCAP_LOONGARCH_LBT_MIPS: c_ulong = 1 << 12; +pub const HWCAP_LOONGARCH_PTW: c_ulong = 1 << 13; + +pub const SYS_io_setup: c_long = 0; +pub const SYS_io_destroy: c_long = 1; +pub const SYS_io_submit: c_long = 2; +pub const SYS_io_cancel: c_long = 3; +pub const SYS_io_getevents: c_long = 4; +pub const SYS_setxattr: c_long = 5; +pub const SYS_lsetxattr: c_long = 6; +pub const SYS_fsetxattr: c_long = 7; +pub const SYS_getxattr: c_long = 8; +pub const SYS_lgetxattr: c_long = 9; +pub const SYS_fgetxattr: c_long = 10; +pub const SYS_listxattr: c_long = 11; +pub const SYS_llistxattr: c_long = 12; +pub const SYS_flistxattr: c_long = 13; +pub const SYS_removexattr: c_long = 14; +pub const SYS_lremovexattr: c_long = 15; +pub const SYS_fremovexattr: c_long = 16; +pub const SYS_getcwd: c_long = 17; +pub const SYS_lookup_dcookie: c_long = 18; +pub const SYS_eventfd2: c_long = 19; +pub const SYS_epoll_create1: c_long = 20; +pub const SYS_epoll_ctl: c_long = 21; +pub const SYS_epoll_pwait: c_long = 22; +pub const SYS_dup: c_long = 23; +pub const SYS_dup3: c_long = 24; +pub const SYS_fcntl: c_long = 25; +pub const SYS_inotify_init1: c_long = 26; +pub const SYS_inotify_add_watch: c_long = 27; +pub const SYS_inotify_rm_watch: c_long = 28; +pub const SYS_ioctl: c_long = 29; +pub const SYS_ioprio_set: c_long = 30; +pub const SYS_ioprio_get: c_long = 31; +pub const SYS_flock: c_long = 32; +pub const SYS_mknodat: c_long = 33; +pub const SYS_mkdirat: c_long = 34; +pub const SYS_unlinkat: c_long = 35; +pub const SYS_symlinkat: c_long = 36; +pub const SYS_linkat: c_long = 37; +pub const SYS_umount2: c_long = 39; +pub const SYS_mount: c_long = 40; +pub const SYS_pivot_root: c_long = 41; +pub const SYS_nfsservctl: c_long = 42; +pub const SYS_statfs: c_long = 43; +pub const SYS_fstatfs: c_long = 44; +pub const SYS_truncate: c_long = 45; +pub const SYS_ftruncate: c_long = 46; +pub const SYS_fallocate: c_long = 47; +pub const SYS_faccessat: c_long = 48; +pub const SYS_chdir: c_long = 49; +pub const SYS_fchdir: c_long = 50; +pub const SYS_chroot: c_long = 51; +pub const SYS_fchmod: c_long = 52; +pub const SYS_fchmodat: c_long = 53; +pub const SYS_fchownat: c_long = 54; +pub const SYS_fchown: c_long = 55; +pub const SYS_openat: c_long = 56; +pub const SYS_close: c_long = 57; +pub const SYS_vhangup: c_long = 58; +pub const SYS_pipe2: c_long = 59; +pub const SYS_quotactl: c_long = 60; +pub const SYS_getdents64: c_long = 61; +pub const SYS_lseek: c_long = 62; +pub const SYS_read: c_long = 63; +pub const SYS_write: c_long = 64; +pub const SYS_readv: c_long = 65; +pub const SYS_writev: c_long = 66; +pub const SYS_pread64: c_long = 67; +pub const SYS_pwrite64: c_long = 68; +pub const SYS_preadv: c_long = 69; +pub const SYS_pwritev: c_long = 70; +pub const SYS_sendfile: c_long = 71; +pub const SYS_pselect6: c_long = 72; +pub const SYS_ppoll: c_long = 73; +pub const SYS_signalfd4: c_long = 74; +pub const SYS_vmsplice: c_long = 75; +pub const SYS_splice: c_long = 76; +pub const SYS_tee: c_long = 77; +pub const SYS_readlinkat: c_long = 78; +pub const SYS_sync: c_long = 81; +pub const SYS_fsync: c_long = 82; +pub const SYS_fdatasync: c_long = 83; +pub const SYS_sync_file_range: c_long = 84; +pub const SYS_timerfd_create: c_long = 85; +pub const SYS_timerfd_settime: c_long = 86; +pub const SYS_timerfd_gettime: c_long = 87; +pub const SYS_utimensat: c_long = 88; +pub const SYS_acct: c_long = 89; +pub const SYS_capget: c_long = 90; +pub const SYS_capset: c_long = 91; +pub const SYS_personality: c_long = 92; +pub const SYS_exit: c_long = 93; +pub const SYS_exit_group: c_long = 94; +pub const SYS_waitid: c_long = 95; +pub const SYS_set_tid_address: c_long = 96; +pub const SYS_unshare: c_long = 97; +pub const SYS_futex: c_long = 98; +pub const SYS_set_robust_list: c_long = 99; +pub const SYS_get_robust_list: c_long = 100; +pub const SYS_nanosleep: c_long = 101; +pub const SYS_getitimer: c_long = 102; +pub const SYS_setitimer: c_long = 103; +pub const SYS_kexec_load: c_long = 104; +pub const SYS_init_module: c_long = 105; +pub const SYS_delete_module: c_long = 106; +pub const SYS_timer_create: c_long = 107; +pub const SYS_timer_gettime: c_long = 108; +pub const SYS_timer_getoverrun: c_long = 109; +pub const SYS_timer_settime: c_long = 110; +pub const SYS_timer_delete: c_long = 111; +pub const SYS_clock_settime: c_long = 112; +pub const SYS_clock_gettime: c_long = 113; +pub const SYS_clock_getres: c_long = 114; +pub const SYS_clock_nanosleep: c_long = 115; +pub const SYS_syslog: c_long = 116; +pub const SYS_ptrace: c_long = 117; +pub const SYS_sched_setparam: c_long = 118; +pub const SYS_sched_setscheduler: c_long = 119; +pub const SYS_sched_getscheduler: c_long = 120; +pub const SYS_sched_getparam: c_long = 121; +pub const SYS_sched_setaffinity: c_long = 122; +pub const SYS_sched_getaffinity: c_long = 123; +pub const SYS_sched_yield: c_long = 124; +pub const SYS_sched_get_priority_max: c_long = 125; +pub const SYS_sched_get_priority_min: c_long = 126; +pub const SYS_sched_rr_get_interval: c_long = 127; +pub const SYS_restart_syscall: c_long = 128; +pub const SYS_kill: c_long = 129; +pub const SYS_tkill: c_long = 130; +pub const SYS_tgkill: c_long = 131; +pub const SYS_sigaltstack: c_long = 132; +pub const SYS_rt_sigsuspend: c_long = 133; +pub const SYS_rt_sigaction: c_long = 134; +pub const SYS_rt_sigprocmask: c_long = 135; +pub const SYS_rt_sigpending: c_long = 136; +pub const SYS_rt_sigtimedwait: c_long = 137; +pub const SYS_rt_sigqueueinfo: c_long = 138; +pub const SYS_rt_sigreturn: c_long = 139; +pub const SYS_setpriority: c_long = 140; +pub const SYS_getpriority: c_long = 141; +pub const SYS_reboot: c_long = 142; +pub const SYS_setregid: c_long = 143; +pub const SYS_setgid: c_long = 144; +pub const SYS_setreuid: c_long = 145; +pub const SYS_setuid: c_long = 146; +pub const SYS_setresuid: c_long = 147; +pub const SYS_getresuid: c_long = 148; +pub const SYS_setresgid: c_long = 149; +pub const SYS_getresgid: c_long = 150; +pub const SYS_setfsuid: c_long = 151; +pub const SYS_setfsgid: c_long = 152; +pub const SYS_times: c_long = 153; +pub const SYS_setpgid: c_long = 154; +pub const SYS_getpgid: c_long = 155; +pub const SYS_getsid: c_long = 156; +pub const SYS_setsid: c_long = 157; +pub const SYS_getgroups: c_long = 158; +pub const SYS_setgroups: c_long = 159; +pub const SYS_uname: c_long = 160; +pub const SYS_sethostname: c_long = 161; +pub const SYS_setdomainname: c_long = 162; +pub const SYS_getrusage: c_long = 165; +pub const SYS_umask: c_long = 166; +pub const SYS_prctl: c_long = 167; +pub const SYS_getcpu: c_long = 168; +pub const SYS_gettimeofday: c_long = 169; +pub const SYS_settimeofday: c_long = 170; +pub const SYS_adjtimex: c_long = 171; +pub const SYS_getpid: c_long = 172; +pub const SYS_getppid: c_long = 173; +pub const SYS_getuid: c_long = 174; +pub const SYS_geteuid: c_long = 175; +pub const SYS_getgid: c_long = 176; +pub const SYS_getegid: c_long = 177; +pub const SYS_gettid: c_long = 178; +pub const SYS_sysinfo: c_long = 179; +pub const SYS_mq_open: c_long = 180; +pub const SYS_mq_unlink: c_long = 181; +pub const SYS_mq_timedsend: c_long = 182; +pub const SYS_mq_timedreceive: c_long = 183; +pub const SYS_mq_notify: c_long = 184; +pub const SYS_mq_getsetattr: c_long = 185; +pub const SYS_msgget: c_long = 186; +pub const SYS_msgctl: c_long = 187; +pub const SYS_msgrcv: c_long = 188; +pub const SYS_msgsnd: c_long = 189; +pub const SYS_semget: c_long = 190; +pub const SYS_semctl: c_long = 191; +pub const SYS_semtimedop: c_long = 192; +pub const SYS_semop: c_long = 193; +pub const SYS_shmget: c_long = 194; +pub const SYS_shmctl: c_long = 195; +pub const SYS_shmat: c_long = 196; +pub const SYS_shmdt: c_long = 197; +pub const SYS_socket: c_long = 198; +pub const SYS_socketpair: c_long = 199; +pub const SYS_bind: c_long = 200; +pub const SYS_listen: c_long = 201; +pub const SYS_accept: c_long = 202; +pub const SYS_connect: c_long = 203; +pub const SYS_getsockname: c_long = 204; +pub const SYS_getpeername: c_long = 205; +pub const SYS_sendto: c_long = 206; +pub const SYS_recvfrom: c_long = 207; +pub const SYS_setsockopt: c_long = 208; +pub const SYS_getsockopt: c_long = 209; +pub const SYS_shutdown: c_long = 210; +pub const SYS_sendmsg: c_long = 211; +pub const SYS_recvmsg: c_long = 212; +pub const SYS_readahead: c_long = 213; +pub const SYS_brk: c_long = 214; +pub const SYS_munmap: c_long = 215; +pub const SYS_mremap: c_long = 216; +pub const SYS_add_key: c_long = 217; +pub const SYS_request_key: c_long = 218; +pub const SYS_keyctl: c_long = 219; +pub const SYS_clone: c_long = 220; +pub const SYS_execve: c_long = 221; +pub const SYS_mmap: c_long = 222; +pub const SYS_fadvise64: c_long = 223; +pub const SYS_swapon: c_long = 224; +pub const SYS_swapoff: c_long = 225; +pub const SYS_mprotect: c_long = 226; +pub const SYS_msync: c_long = 227; +pub const SYS_mlock: c_long = 228; +pub const SYS_munlock: c_long = 229; +pub const SYS_mlockall: c_long = 230; +pub const SYS_munlockall: c_long = 231; +pub const SYS_mincore: c_long = 232; +pub const SYS_madvise: c_long = 233; +pub const SYS_remap_file_pages: c_long = 234; +pub const SYS_mbind: c_long = 235; +pub const SYS_get_mempolicy: c_long = 236; +pub const SYS_set_mempolicy: c_long = 237; +pub const SYS_migrate_pages: c_long = 238; +pub const SYS_move_pages: c_long = 239; +pub const SYS_rt_tgsigqueueinfo: c_long = 240; +pub const SYS_perf_event_open: c_long = 241; +pub const SYS_accept4: c_long = 242; +pub const SYS_recvmmsg: c_long = 243; +//pub const SYS_arch_specific_syscall: c_long = 244; +pub const SYS_wait4: c_long = 260; +pub const SYS_prlimit64: c_long = 261; +pub const SYS_fanotify_init: c_long = 262; +pub const SYS_fanotify_mark: c_long = 263; +pub const SYS_name_to_handle_at: c_long = 264; +pub const SYS_open_by_handle_at: c_long = 265; +pub const SYS_clock_adjtime: c_long = 266; +pub const SYS_syncfs: c_long = 267; +pub const SYS_setns: c_long = 268; +pub const SYS_sendmmsg: c_long = 269; +pub const SYS_process_vm_readv: c_long = 270; +pub const SYS_process_vm_writev: c_long = 271; +pub const SYS_kcmp: c_long = 272; +pub const SYS_finit_module: c_long = 273; +pub const SYS_sched_setattr: c_long = 274; +pub const SYS_sched_getattr: c_long = 275; +pub const SYS_renameat2: c_long = 276; +pub const SYS_seccomp: c_long = 277; +pub const SYS_getrandom: c_long = 278; +pub const SYS_memfd_create: c_long = 279; +pub const SYS_bpf: c_long = 280; +pub const SYS_execveat: c_long = 281; +pub const SYS_userfaultfd: c_long = 282; +pub const SYS_membarrier: c_long = 283; +pub const SYS_mlock2: c_long = 284; +pub const SYS_copy_file_range: c_long = 285; +pub const SYS_preadv2: c_long = 286; +pub const SYS_pwritev2: c_long = 287; +pub const SYS_pkey_mprotect: c_long = 288; +pub const SYS_pkey_alloc: c_long = 289; +pub const SYS_pkey_free: c_long = 290; +pub const SYS_statx: c_long = 291; +pub const SYS_io_pgetevents: c_long = 292; +pub const SYS_rseq: c_long = 293; +pub const SYS_kexec_file_load: c_long = 294; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; + +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; +pub const O_DIRECT: c_int = 0o00040000; +pub const O_DIRECTORY: c_int = 0o00200000; +pub const O_NOFOLLOW: c_int = 0o00400000; +pub const O_TRUNC: c_int = 0o00001000; +pub const O_NOATIME: c_int = 0o1000000; +pub const O_CLOEXEC: c_int = 0o02000000; +pub const O_PATH: c_int = 0o10000000; +pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; +pub const O_APPEND: c_int = 0o00002000; +pub const O_CREAT: c_int = 0o00000100; +pub const O_EXCL: c_int = 0o00000200; +pub const O_NOCTTY: c_int = 0o00000400; +pub const O_NONBLOCK: c_int = 0o00004000; +pub const FASYNC: c_int = 0o00020000; +pub const O_SYNC: c_int = 0o04010000; +pub const O_RSYNC: c_int = 0o04010000; +pub const O_FSYNC: c_int = O_SYNC; +pub const O_ASYNC: c_int = 0o00020000; +pub const O_DSYNC: c_int = 0o00010000; +pub const O_NDELAY: c_int = O_NONBLOCK; +pub const F_RDLCK: c_int = 0; +pub const F_WRLCK: c_int = 1; +pub const F_UNLCK: c_int = 2; +pub const F_GETLK: c_int = 5; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_SETOWN: c_int = 8; +pub const F_GETOWN: c_int = 9; +pub const F_OFD_GETLK: c_int = 36; +pub const F_OFD_SETLK: c_int = 37; +pub const F_OFD_SETLKW: c_int = 38; + +pub const EDEADLK: c_int = 35; +pub const EDEADLOCK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EBFONT: c_int = 59; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENONET: c_int = 64; +pub const ENOPKG: c_int = 65; +pub const EREMOTE: c_int = 66; +pub const ENOLINK: c_int = 67; +pub const EADV: c_int = 68; +pub const ESRMNT: c_int = 69; +pub const ECOMM: c_int = 70; +pub const EPROTO: c_int = 71; +pub const EDOTDOT: c_int = 73; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const ERFKILL: c_int = 132; +pub const EHWPOISON: c_int = 133; + +pub const MADV_SOFT_OFFLINE: c_int = 101; + +pub const MAP_NORESERVE: c_int = 0x4000; +pub const MAP_ANONYMOUS: c_int = 0x0020; +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x1000; +pub const MAP_LOCKED: c_int = 0x2000; +pub const MAP_POPULATE: c_int = 0x8000; +pub const MAP_NONBLOCK: c_int = 0x10000; +pub const MAP_STACK: c_int = 0x20000; +pub const MAP_HUGETLB: c_int = 0x40000; +pub const MAP_SYNC: c_int = 0x080000; +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const SFD_NONBLOCK: c_int = 0x800; +pub const SFD_CLOEXEC: c_int = 0x080000; +pub const SA_NODEFER: c_int = 0x40000000; +pub const SA_RESETHAND: c_int = 0x80000000; +pub const SA_RESTART: c_int = 0x10000000; +pub const SA_NOCLDSTOP: c_int = 0x00000001; +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; +pub const SIG_BLOCK: c_int = 0; +pub const SIG_UNBLOCK: c_int = 1; +pub const SIG_SETMASK: c_int = 2; +pub const SIGBUS: c_int = 7; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGSTKFLT: c_int = 16; +pub const SIGCHLD: c_int = 17; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGURG: c_int = 23; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGIO: c_int = 29; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIGSYS: c_int = 31; +pub const SIGUNUSED: c_int = 31; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const PTRACE_GETFPREGS: c_uint = 14; +pub const PTRACE_SETFPREGS: c_uint = 15; +pub const PTRACE_DETACH: c_uint = 17; +pub const PTRACE_GETFPXREGS: c_uint = 18; +pub const PTRACE_SETFPXREGS: c_uint = 19; +pub const PTRACE_GETREGS: c_uint = 12; +pub const PTRACE_SETREGS: c_uint = 13; +pub const PTRACE_SYSEMU: c_uint = 31; +pub const PTRACE_SYSEMU_SINGLESTEP: c_uint = 32; + +pub const RTLD_DEEPBIND: c_int = 0x8; +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; + +pub const VEOF: usize = 4; +pub const VTIME: usize = 5; +pub const VMIN: usize = 6; +pub const VSWTC: usize = 7; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VSUSP: usize = 10; +pub const VEOL: usize = 11; +pub const VREPRINT: usize = 12; +pub const VDISCARD: usize = 13; +pub const VWERASE: usize = 14; +pub const VEOL2: usize = 16; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; +pub const EXTPROC: crate::tcflag_t = 0x00010000; +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; +pub const SIGSTKSZ: size_t = 16384; +pub const MINSIGSTKSZ: size_t = 4096; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; +pub const TAB1: crate::tcflag_t = 0x00000800; +pub const TAB2: crate::tcflag_t = 0x00001000; +pub const TAB3: crate::tcflag_t = 0x00001800; +pub const CR1: crate::tcflag_t = 0x00000200; +pub const CR2: crate::tcflag_t = 0x00000400; +pub const CR3: crate::tcflag_t = 0x00000600; +pub const FF1: crate::tcflag_t = 0x00008000; +pub const BS1: crate::tcflag_t = 0x00002000; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; +pub const VT1: crate::tcflag_t = 0x00004000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const XCASE: crate::tcflag_t = 0x00000004; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; + +pub const NCCS: usize = 32; + +pub const EPOLL_CLOEXEC: c_int = 0x80000; + +pub const EFD_CLOEXEC: c_int = 0x80000; +pub const EFD_NONBLOCK: c_int = 0x800; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/mips64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/mips64/mod.rs new file mode 100644 index 00000000000000..7f66330d9c7ed2 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b64/mips64/mod.rs @@ -0,0 +1,930 @@ +use crate::prelude::*; +use crate::{off64_t, off_t, pthread_mutex_t}; + +pub type blksize_t = i64; +pub type nlink_t = u64; +pub type suseconds_t = i64; +pub type wchar_t = i32; +pub type __u64 = c_ulong; +pub type __s64 = c_long; + +s! { + pub struct stat { + pub st_dev: c_ulong, + st_pad1: [c_long; 2], + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: c_ulong, + st_pad2: [c_ulong; 1], + pub st_size: off_t, + st_pad3: c_long, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: crate::blksize_t, + st_pad4: c_long, + pub st_blocks: crate::blkcnt_t, + st_pad5: [c_long; 7], + } + + pub struct statfs { + pub f_type: c_long, + pub f_bsize: c_long, + pub f_frsize: c_long, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_files: crate::fsblkcnt_t, + pub f_ffree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_fsid: crate::fsid_t, + + pub f_namelen: c_long, + f_spare: [c_long; 6], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + } + + pub struct stat64 { + pub st_dev: c_ulong, + st_pad1: [c_long; 2], + pub st_ino: crate::ino64_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: c_ulong, + st_pad2: [c_long; 2], + pub st_size: off64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: crate::blksize_t, + st_pad3: c_long, + pub st_blocks: crate::blkcnt64_t, + st_pad5: [c_long; 7], + } + + pub struct statfs64 { + pub f_type: c_long, + pub f_bsize: c_long, + pub f_frsize: c_long, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_bavail: u64, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_long, + pub f_flags: c_long, + pub f_spare: [c_long; 5], + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct pthread_attr_t { + __size: [c_ulong; 7], + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_flags: c_int, + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_restorer: Option, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_code: c_int, + pub si_errno: c_int, + _pad: c_int, + _pad2: [c_long; 14], + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_uint, + pub __seq: c_ushort, + __pad1: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused4: c_ulong, + __unused5: c_ulong, + } +} + +s_no_extra_traits! { + #[repr(align(16))] + pub struct max_align_t { + priv_: [f64; 4], + } +} + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; + +#[cfg(target_endian = "little")] +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "little")] +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "little")] +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; + +pub const SYS_read: c_long = 5000 + 0; +pub const SYS_write: c_long = 5000 + 1; +pub const SYS_open: c_long = 5000 + 2; +pub const SYS_close: c_long = 5000 + 3; +pub const SYS_stat: c_long = 5000 + 4; +pub const SYS_fstat: c_long = 5000 + 5; +pub const SYS_lstat: c_long = 5000 + 6; +pub const SYS_poll: c_long = 5000 + 7; +pub const SYS_lseek: c_long = 5000 + 8; +pub const SYS_mmap: c_long = 5000 + 9; +pub const SYS_mprotect: c_long = 5000 + 10; +pub const SYS_munmap: c_long = 5000 + 11; +pub const SYS_brk: c_long = 5000 + 12; +pub const SYS_rt_sigaction: c_long = 5000 + 13; +pub const SYS_rt_sigprocmask: c_long = 5000 + 14; +pub const SYS_ioctl: c_long = 5000 + 15; +pub const SYS_pread64: c_long = 5000 + 16; +pub const SYS_pwrite64: c_long = 5000 + 17; +pub const SYS_readv: c_long = 5000 + 18; +pub const SYS_writev: c_long = 5000 + 19; +pub const SYS_access: c_long = 5000 + 20; +pub const SYS_pipe: c_long = 5000 + 21; +pub const SYS__newselect: c_long = 5000 + 22; +pub const SYS_sched_yield: c_long = 5000 + 23; +pub const SYS_mremap: c_long = 5000 + 24; +pub const SYS_msync: c_long = 5000 + 25; +pub const SYS_mincore: c_long = 5000 + 26; +pub const SYS_madvise: c_long = 5000 + 27; +pub const SYS_shmget: c_long = 5000 + 28; +pub const SYS_shmat: c_long = 5000 + 29; +pub const SYS_shmctl: c_long = 5000 + 30; +pub const SYS_dup: c_long = 5000 + 31; +pub const SYS_dup2: c_long = 5000 + 32; +pub const SYS_pause: c_long = 5000 + 33; +pub const SYS_nanosleep: c_long = 5000 + 34; +pub const SYS_getitimer: c_long = 5000 + 35; +pub const SYS_setitimer: c_long = 5000 + 36; +pub const SYS_alarm: c_long = 5000 + 37; +pub const SYS_getpid: c_long = 5000 + 38; +pub const SYS_sendfile: c_long = 5000 + 39; +pub const SYS_socket: c_long = 5000 + 40; +pub const SYS_connect: c_long = 5000 + 41; +pub const SYS_accept: c_long = 5000 + 42; +pub const SYS_sendto: c_long = 5000 + 43; +pub const SYS_recvfrom: c_long = 5000 + 44; +pub const SYS_sendmsg: c_long = 5000 + 45; +pub const SYS_recvmsg: c_long = 5000 + 46; +pub const SYS_shutdown: c_long = 5000 + 47; +pub const SYS_bind: c_long = 5000 + 48; +pub const SYS_listen: c_long = 5000 + 49; +pub const SYS_getsockname: c_long = 5000 + 50; +pub const SYS_getpeername: c_long = 5000 + 51; +pub const SYS_socketpair: c_long = 5000 + 52; +pub const SYS_setsockopt: c_long = 5000 + 53; +pub const SYS_getsockopt: c_long = 5000 + 54; +pub const SYS_clone: c_long = 5000 + 55; +pub const SYS_fork: c_long = 5000 + 56; +pub const SYS_execve: c_long = 5000 + 57; +pub const SYS_exit: c_long = 5000 + 58; +pub const SYS_wait4: c_long = 5000 + 59; +pub const SYS_kill: c_long = 5000 + 60; +pub const SYS_uname: c_long = 5000 + 61; +pub const SYS_semget: c_long = 5000 + 62; +pub const SYS_semop: c_long = 5000 + 63; +pub const SYS_semctl: c_long = 5000 + 64; +pub const SYS_shmdt: c_long = 5000 + 65; +pub const SYS_msgget: c_long = 5000 + 66; +pub const SYS_msgsnd: c_long = 5000 + 67; +pub const SYS_msgrcv: c_long = 5000 + 68; +pub const SYS_msgctl: c_long = 5000 + 69; +pub const SYS_fcntl: c_long = 5000 + 70; +pub const SYS_flock: c_long = 5000 + 71; +pub const SYS_fsync: c_long = 5000 + 72; +pub const SYS_fdatasync: c_long = 5000 + 73; +pub const SYS_truncate: c_long = 5000 + 74; +pub const SYS_ftruncate: c_long = 5000 + 75; +pub const SYS_getdents: c_long = 5000 + 76; +pub const SYS_getcwd: c_long = 5000 + 77; +pub const SYS_chdir: c_long = 5000 + 78; +pub const SYS_fchdir: c_long = 5000 + 79; +pub const SYS_rename: c_long = 5000 + 80; +pub const SYS_mkdir: c_long = 5000 + 81; +pub const SYS_rmdir: c_long = 5000 + 82; +pub const SYS_creat: c_long = 5000 + 83; +pub const SYS_link: c_long = 5000 + 84; +pub const SYS_unlink: c_long = 5000 + 85; +pub const SYS_symlink: c_long = 5000 + 86; +pub const SYS_readlink: c_long = 5000 + 87; +pub const SYS_chmod: c_long = 5000 + 88; +pub const SYS_fchmod: c_long = 5000 + 89; +pub const SYS_chown: c_long = 5000 + 90; +pub const SYS_fchown: c_long = 5000 + 91; +pub const SYS_lchown: c_long = 5000 + 92; +pub const SYS_umask: c_long = 5000 + 93; +pub const SYS_gettimeofday: c_long = 5000 + 94; +pub const SYS_getrlimit: c_long = 5000 + 95; +pub const SYS_getrusage: c_long = 5000 + 96; +pub const SYS_sysinfo: c_long = 5000 + 97; +pub const SYS_times: c_long = 5000 + 98; +pub const SYS_ptrace: c_long = 5000 + 99; +pub const SYS_getuid: c_long = 5000 + 100; +pub const SYS_syslog: c_long = 5000 + 101; +pub const SYS_getgid: c_long = 5000 + 102; +pub const SYS_setuid: c_long = 5000 + 103; +pub const SYS_setgid: c_long = 5000 + 104; +pub const SYS_geteuid: c_long = 5000 + 105; +pub const SYS_getegid: c_long = 5000 + 106; +pub const SYS_setpgid: c_long = 5000 + 107; +pub const SYS_getppid: c_long = 5000 + 108; +pub const SYS_getpgrp: c_long = 5000 + 109; +pub const SYS_setsid: c_long = 5000 + 110; +pub const SYS_setreuid: c_long = 5000 + 111; +pub const SYS_setregid: c_long = 5000 + 112; +pub const SYS_getgroups: c_long = 5000 + 113; +pub const SYS_setgroups: c_long = 5000 + 114; +pub const SYS_setresuid: c_long = 5000 + 115; +pub const SYS_getresuid: c_long = 5000 + 116; +pub const SYS_setresgid: c_long = 5000 + 117; +pub const SYS_getresgid: c_long = 5000 + 118; +pub const SYS_getpgid: c_long = 5000 + 119; +pub const SYS_setfsuid: c_long = 5000 + 120; +pub const SYS_setfsgid: c_long = 5000 + 121; +pub const SYS_getsid: c_long = 5000 + 122; +pub const SYS_capget: c_long = 5000 + 123; +pub const SYS_capset: c_long = 5000 + 124; +pub const SYS_rt_sigpending: c_long = 5000 + 125; +pub const SYS_rt_sigtimedwait: c_long = 5000 + 126; +pub const SYS_rt_sigqueueinfo: c_long = 5000 + 127; +pub const SYS_rt_sigsuspend: c_long = 5000 + 128; +pub const SYS_sigaltstack: c_long = 5000 + 129; +pub const SYS_utime: c_long = 5000 + 130; +pub const SYS_mknod: c_long = 5000 + 131; +pub const SYS_personality: c_long = 5000 + 132; +pub const SYS_ustat: c_long = 5000 + 133; +pub const SYS_statfs: c_long = 5000 + 134; +pub const SYS_fstatfs: c_long = 5000 + 135; +pub const SYS_sysfs: c_long = 5000 + 136; +pub const SYS_getpriority: c_long = 5000 + 137; +pub const SYS_setpriority: c_long = 5000 + 138; +pub const SYS_sched_setparam: c_long = 5000 + 139; +pub const SYS_sched_getparam: c_long = 5000 + 140; +pub const SYS_sched_setscheduler: c_long = 5000 + 141; +pub const SYS_sched_getscheduler: c_long = 5000 + 142; +pub const SYS_sched_get_priority_max: c_long = 5000 + 143; +pub const SYS_sched_get_priority_min: c_long = 5000 + 144; +pub const SYS_sched_rr_get_interval: c_long = 5000 + 145; +pub const SYS_mlock: c_long = 5000 + 146; +pub const SYS_munlock: c_long = 5000 + 147; +pub const SYS_mlockall: c_long = 5000 + 148; +pub const SYS_munlockall: c_long = 5000 + 149; +pub const SYS_vhangup: c_long = 5000 + 150; +pub const SYS_pivot_root: c_long = 5000 + 151; +pub const SYS__sysctl: c_long = 5000 + 152; +pub const SYS_prctl: c_long = 5000 + 153; +pub const SYS_adjtimex: c_long = 5000 + 154; +pub const SYS_setrlimit: c_long = 5000 + 155; +pub const SYS_chroot: c_long = 5000 + 156; +pub const SYS_sync: c_long = 5000 + 157; +pub const SYS_acct: c_long = 5000 + 158; +pub const SYS_settimeofday: c_long = 5000 + 159; +pub const SYS_mount: c_long = 5000 + 160; +pub const SYS_umount2: c_long = 5000 + 161; +pub const SYS_swapon: c_long = 5000 + 162; +pub const SYS_swapoff: c_long = 5000 + 163; +pub const SYS_reboot: c_long = 5000 + 164; +pub const SYS_sethostname: c_long = 5000 + 165; +pub const SYS_setdomainname: c_long = 5000 + 166; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 5000 + 167; +pub const SYS_init_module: c_long = 5000 + 168; +pub const SYS_delete_module: c_long = 5000 + 169; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 5000 + 170; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 5000 + 171; +pub const SYS_quotactl: c_long = 5000 + 172; +pub const SYS_nfsservctl: c_long = 5000 + 173; +pub const SYS_getpmsg: c_long = 5000 + 174; +pub const SYS_putpmsg: c_long = 5000 + 175; +pub const SYS_afs_syscall: c_long = 5000 + 176; +pub const SYS_gettid: c_long = 5000 + 178; +pub const SYS_readahead: c_long = 5000 + 179; +pub const SYS_setxattr: c_long = 5000 + 180; +pub const SYS_lsetxattr: c_long = 5000 + 181; +pub const SYS_fsetxattr: c_long = 5000 + 182; +pub const SYS_getxattr: c_long = 5000 + 183; +pub const SYS_lgetxattr: c_long = 5000 + 184; +pub const SYS_fgetxattr: c_long = 5000 + 185; +pub const SYS_listxattr: c_long = 5000 + 186; +pub const SYS_llistxattr: c_long = 5000 + 187; +pub const SYS_flistxattr: c_long = 5000 + 188; +pub const SYS_removexattr: c_long = 5000 + 189; +pub const SYS_lremovexattr: c_long = 5000 + 190; +pub const SYS_fremovexattr: c_long = 5000 + 191; +pub const SYS_tkill: c_long = 5000 + 192; +pub const SYS_futex: c_long = 5000 + 194; +pub const SYS_sched_setaffinity: c_long = 5000 + 195; +pub const SYS_sched_getaffinity: c_long = 5000 + 196; +pub const SYS_cacheflush: c_long = 5000 + 197; +pub const SYS_cachectl: c_long = 5000 + 198; +pub const SYS_sysmips: c_long = 5000 + 199; +pub const SYS_io_setup: c_long = 5000 + 200; +pub const SYS_io_destroy: c_long = 5000 + 201; +pub const SYS_io_getevents: c_long = 5000 + 202; +pub const SYS_io_submit: c_long = 5000 + 203; +pub const SYS_io_cancel: c_long = 5000 + 204; +pub const SYS_exit_group: c_long = 5000 + 205; +pub const SYS_lookup_dcookie: c_long = 5000 + 206; +pub const SYS_epoll_create: c_long = 5000 + 207; +pub const SYS_epoll_ctl: c_long = 5000 + 208; +pub const SYS_epoll_wait: c_long = 5000 + 209; +pub const SYS_remap_file_pages: c_long = 5000 + 210; +pub const SYS_rt_sigreturn: c_long = 5000 + 211; +pub const SYS_set_tid_address: c_long = 5000 + 212; +pub const SYS_restart_syscall: c_long = 5000 + 213; +pub const SYS_semtimedop: c_long = 5000 + 214; +pub const SYS_fadvise64: c_long = 5000 + 215; +pub const SYS_timer_create: c_long = 5000 + 216; +pub const SYS_timer_settime: c_long = 5000 + 217; +pub const SYS_timer_gettime: c_long = 5000 + 218; +pub const SYS_timer_getoverrun: c_long = 5000 + 219; +pub const SYS_timer_delete: c_long = 5000 + 220; +pub const SYS_clock_settime: c_long = 5000 + 221; +pub const SYS_clock_gettime: c_long = 5000 + 222; +pub const SYS_clock_getres: c_long = 5000 + 223; +pub const SYS_clock_nanosleep: c_long = 5000 + 224; +pub const SYS_tgkill: c_long = 5000 + 225; +pub const SYS_utimes: c_long = 5000 + 226; +pub const SYS_mbind: c_long = 5000 + 227; +pub const SYS_get_mempolicy: c_long = 5000 + 228; +pub const SYS_set_mempolicy: c_long = 5000 + 229; +pub const SYS_mq_open: c_long = 5000 + 230; +pub const SYS_mq_unlink: c_long = 5000 + 231; +pub const SYS_mq_timedsend: c_long = 5000 + 232; +pub const SYS_mq_timedreceive: c_long = 5000 + 233; +pub const SYS_mq_notify: c_long = 5000 + 234; +pub const SYS_mq_getsetattr: c_long = 5000 + 235; +pub const SYS_vserver: c_long = 5000 + 236; +pub const SYS_waitid: c_long = 5000 + 237; +/* pub const SYS_sys_setaltroot: c_long = 5000 + 238; */ +pub const SYS_add_key: c_long = 5000 + 239; +pub const SYS_request_key: c_long = 5000 + 240; +pub const SYS_keyctl: c_long = 5000 + 241; +pub const SYS_set_thread_area: c_long = 5000 + 242; +pub const SYS_inotify_init: c_long = 5000 + 243; +pub const SYS_inotify_add_watch: c_long = 5000 + 244; +pub const SYS_inotify_rm_watch: c_long = 5000 + 245; +pub const SYS_migrate_pages: c_long = 5000 + 246; +pub const SYS_openat: c_long = 5000 + 247; +pub const SYS_mkdirat: c_long = 5000 + 248; +pub const SYS_mknodat: c_long = 5000 + 249; +pub const SYS_fchownat: c_long = 5000 + 250; +pub const SYS_futimesat: c_long = 5000 + 251; +pub const SYS_newfstatat: c_long = 5000 + 252; +pub const SYS_unlinkat: c_long = 5000 + 253; +pub const SYS_renameat: c_long = 5000 + 254; +pub const SYS_linkat: c_long = 5000 + 255; +pub const SYS_symlinkat: c_long = 5000 + 256; +pub const SYS_readlinkat: c_long = 5000 + 257; +pub const SYS_fchmodat: c_long = 5000 + 258; +pub const SYS_faccessat: c_long = 5000 + 259; +pub const SYS_pselect6: c_long = 5000 + 260; +pub const SYS_ppoll: c_long = 5000 + 261; +pub const SYS_unshare: c_long = 5000 + 262; +pub const SYS_splice: c_long = 5000 + 263; +pub const SYS_sync_file_range: c_long = 5000 + 264; +pub const SYS_tee: c_long = 5000 + 265; +pub const SYS_vmsplice: c_long = 5000 + 266; +pub const SYS_move_pages: c_long = 5000 + 267; +pub const SYS_set_robust_list: c_long = 5000 + 268; +pub const SYS_get_robust_list: c_long = 5000 + 269; +pub const SYS_kexec_load: c_long = 5000 + 270; +pub const SYS_getcpu: c_long = 5000 + 271; +pub const SYS_epoll_pwait: c_long = 5000 + 272; +pub const SYS_ioprio_set: c_long = 5000 + 273; +pub const SYS_ioprio_get: c_long = 5000 + 274; +pub const SYS_utimensat: c_long = 5000 + 275; +pub const SYS_signalfd: c_long = 5000 + 276; +pub const SYS_timerfd: c_long = 5000 + 277; +pub const SYS_eventfd: c_long = 5000 + 278; +pub const SYS_fallocate: c_long = 5000 + 279; +pub const SYS_timerfd_create: c_long = 5000 + 280; +pub const SYS_timerfd_gettime: c_long = 5000 + 281; +pub const SYS_timerfd_settime: c_long = 5000 + 282; +pub const SYS_signalfd4: c_long = 5000 + 283; +pub const SYS_eventfd2: c_long = 5000 + 284; +pub const SYS_epoll_create1: c_long = 5000 + 285; +pub const SYS_dup3: c_long = 5000 + 286; +pub const SYS_pipe2: c_long = 5000 + 287; +pub const SYS_inotify_init1: c_long = 5000 + 288; +pub const SYS_preadv: c_long = 5000 + 289; +pub const SYS_pwritev: c_long = 5000 + 290; +pub const SYS_rt_tgsigqueueinfo: c_long = 5000 + 291; +pub const SYS_perf_event_open: c_long = 5000 + 292; +pub const SYS_accept4: c_long = 5000 + 293; +pub const SYS_recvmmsg: c_long = 5000 + 294; +pub const SYS_fanotify_init: c_long = 5000 + 295; +pub const SYS_fanotify_mark: c_long = 5000 + 296; +pub const SYS_prlimit64: c_long = 5000 + 297; +pub const SYS_name_to_handle_at: c_long = 5000 + 298; +pub const SYS_open_by_handle_at: c_long = 5000 + 299; +pub const SYS_clock_adjtime: c_long = 5000 + 300; +pub const SYS_syncfs: c_long = 5000 + 301; +pub const SYS_sendmmsg: c_long = 5000 + 302; +pub const SYS_setns: c_long = 5000 + 303; +pub const SYS_process_vm_readv: c_long = 5000 + 304; +pub const SYS_process_vm_writev: c_long = 5000 + 305; +pub const SYS_kcmp: c_long = 5000 + 306; +pub const SYS_finit_module: c_long = 5000 + 307; +pub const SYS_getdents64: c_long = 5000 + 308; +pub const SYS_sched_setattr: c_long = 5000 + 309; +pub const SYS_sched_getattr: c_long = 5000 + 310; +pub const SYS_renameat2: c_long = 5000 + 311; +pub const SYS_seccomp: c_long = 5000 + 312; +pub const SYS_getrandom: c_long = 5000 + 313; +pub const SYS_memfd_create: c_long = 5000 + 314; +pub const SYS_bpf: c_long = 5000 + 315; +pub const SYS_execveat: c_long = 5000 + 316; +pub const SYS_userfaultfd: c_long = 5000 + 317; +pub const SYS_membarrier: c_long = 5000 + 318; +pub const SYS_mlock2: c_long = 5000 + 319; +pub const SYS_copy_file_range: c_long = 5000 + 320; +pub const SYS_preadv2: c_long = 5000 + 321; +pub const SYS_pwritev2: c_long = 5000 + 322; +pub const SYS_pkey_mprotect: c_long = 5000 + 323; +pub const SYS_pkey_alloc: c_long = 5000 + 324; +pub const SYS_pkey_free: c_long = 5000 + 325; +pub const SYS_statx: c_long = 5000 + 326; +pub const SYS_rseq: c_long = 5000 + 327; +pub const SYS_pidfd_send_signal: c_long = 5000 + 424; +pub const SYS_io_uring_setup: c_long = 5000 + 425; +pub const SYS_io_uring_enter: c_long = 5000 + 426; +pub const SYS_io_uring_register: c_long = 5000 + 427; +pub const SYS_open_tree: c_long = 5000 + 428; +pub const SYS_move_mount: c_long = 5000 + 429; +pub const SYS_fsopen: c_long = 5000 + 430; +pub const SYS_fsconfig: c_long = 5000 + 431; +pub const SYS_fsmount: c_long = 5000 + 432; +pub const SYS_fspick: c_long = 5000 + 433; +pub const SYS_pidfd_open: c_long = 5000 + 434; +pub const SYS_clone3: c_long = 5000 + 435; +pub const SYS_close_range: c_long = 5000 + 436; +pub const SYS_openat2: c_long = 5000 + 437; +pub const SYS_pidfd_getfd: c_long = 5000 + 438; +pub const SYS_faccessat2: c_long = 5000 + 439; +pub const SYS_process_madvise: c_long = 5000 + 440; +pub const SYS_epoll_pwait2: c_long = 5000 + 441; +pub const SYS_mount_setattr: c_long = 5000 + 442; +pub const SYS_quotactl_fd: c_long = 5000 + 443; +pub const SYS_landlock_create_ruleset: c_long = 5000 + 444; +pub const SYS_landlock_add_rule: c_long = 5000 + 445; +pub const SYS_landlock_restrict_self: c_long = 5000 + 446; +pub const SYS_memfd_secret: c_long = 5000 + 447; +pub const SYS_process_mrelease: c_long = 5000 + 448; +pub const SYS_futex_waitv: c_long = 5000 + 449; +pub const SYS_set_mempolicy_home_node: c_long = 5000 + 450; + +pub const SFD_CLOEXEC: c_int = 0x080000; + +pub const NCCS: usize = 32; + +pub const O_TRUNC: c_int = 512; + +pub const O_NOATIME: c_int = 0o1000000; +pub const O_CLOEXEC: c_int = 0x80000; +pub const O_PATH: c_int = 0o10000000; +pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; + +pub const EBFONT: c_int = 59; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENONET: c_int = 64; +pub const ENOPKG: c_int = 65; +pub const EREMOTE: c_int = 66; +pub const ENOLINK: c_int = 67; +pub const EADV: c_int = 68; +pub const ESRMNT: c_int = 69; +pub const ECOMM: c_int = 70; +pub const EPROTO: c_int = 71; +pub const EDOTDOT: c_int = 73; + +pub const SA_NODEFER: c_int = 0x40000000; +pub const SA_RESETHAND: c_int = 0x80000000; +pub const SA_RESTART: c_int = 0x10000000; +pub const SA_NOCLDSTOP: c_int = 0x00000001; + +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; + +pub const EPOLL_CLOEXEC: c_int = 0x80000; + +pub const EFD_CLOEXEC: c_int = 0x80000; + +pub const O_DIRECT: c_int = 0x8000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_NOFOLLOW: c_int = 0x20000; + +pub const O_APPEND: c_int = 8; +pub const O_CREAT: c_int = 256; +pub const O_EXCL: c_int = 1024; +pub const O_NOCTTY: c_int = 2048; +pub const O_NONBLOCK: c_int = 128; +pub const O_SYNC: c_int = 0x4010; +pub const O_RSYNC: c_int = 0x4010; +pub const O_DSYNC: c_int = 0x10; +pub const O_FSYNC: c_int = 0x4010; +pub const O_ASYNC: c_int = 0x1000; +pub const O_NDELAY: c_int = 0x80; + +pub const EDEADLK: c_int = 45; +pub const ENAMETOOLONG: c_int = 78; +pub const ENOLCK: c_int = 46; +pub const ENOSYS: c_int = 89; +pub const ENOTEMPTY: c_int = 93; +pub const ELOOP: c_int = 90; +pub const ENOMSG: c_int = 35; +pub const EIDRM: c_int = 36; +pub const ECHRNG: c_int = 37; +pub const EL2NSYNC: c_int = 38; +pub const EL3HLT: c_int = 39; +pub const EL3RST: c_int = 40; +pub const ELNRNG: c_int = 41; +pub const EUNATCH: c_int = 42; +pub const ENOCSI: c_int = 43; +pub const EL2HLT: c_int = 44; +pub const EBADE: c_int = 50; +pub const EBADR: c_int = 51; +pub const EXFULL: c_int = 52; +pub const ENOANO: c_int = 53; +pub const EBADRQC: c_int = 54; +pub const EBADSLT: c_int = 55; +pub const EDEADLOCK: c_int = 56; +pub const EMULTIHOP: c_int = 74; +pub const EOVERFLOW: c_int = 79; +pub const ENOTUNIQ: c_int = 80; +pub const EBADFD: c_int = 81; +pub const EBADMSG: c_int = 77; +pub const EREMCHG: c_int = 82; +pub const ELIBACC: c_int = 83; +pub const ELIBBAD: c_int = 84; +pub const ELIBSCN: c_int = 85; +pub const ELIBMAX: c_int = 86; +pub const ELIBEXEC: c_int = 87; +pub const EILSEQ: c_int = 88; +pub const ERESTART: c_int = 91; +pub const ESTRPIPE: c_int = 92; +pub const EUSERS: c_int = 94; +pub const ENOTSOCK: c_int = 95; +pub const EDESTADDRREQ: c_int = 96; +pub const EMSGSIZE: c_int = 97; +pub const EPROTOTYPE: c_int = 98; +pub const ENOPROTOOPT: c_int = 99; +pub const EPROTONOSUPPORT: c_int = 120; +pub const ESOCKTNOSUPPORT: c_int = 121; +pub const EOPNOTSUPP: c_int = 122; +pub const EPFNOSUPPORT: c_int = 123; +pub const EAFNOSUPPORT: c_int = 124; +pub const EADDRINUSE: c_int = 125; +pub const EADDRNOTAVAIL: c_int = 126; +pub const ENETDOWN: c_int = 127; +pub const ENETUNREACH: c_int = 128; +pub const ENETRESET: c_int = 129; +pub const ECONNABORTED: c_int = 130; +pub const ECONNRESET: c_int = 131; +pub const ENOBUFS: c_int = 132; +pub const EISCONN: c_int = 133; +pub const ENOTCONN: c_int = 134; +pub const ESHUTDOWN: c_int = 143; +pub const ETOOMANYREFS: c_int = 144; +pub const ETIMEDOUT: c_int = 145; +pub const ECONNREFUSED: c_int = 146; +pub const EHOSTDOWN: c_int = 147; +pub const EHOSTUNREACH: c_int = 148; +pub const EALREADY: c_int = 149; +pub const EINPROGRESS: c_int = 150; +pub const ESTALE: c_int = 151; +pub const EUCLEAN: c_int = 135; +pub const ENOTNAM: c_int = 137; +pub const ENAVAIL: c_int = 138; +pub const EISNAM: c_int = 139; +pub const EREMOTEIO: c_int = 140; +pub const EDQUOT: c_int = 1133; +pub const ENOMEDIUM: c_int = 159; +pub const EMEDIUMTYPE: c_int = 160; +pub const ECANCELED: c_int = 158; +pub const ENOKEY: c_int = 161; +pub const EKEYEXPIRED: c_int = 162; +pub const EKEYREVOKED: c_int = 163; +pub const EKEYREJECTED: c_int = 164; +pub const EOWNERDEAD: c_int = 165; +pub const ENOTRECOVERABLE: c_int = 166; +pub const ERFKILL: c_int = 167; + +pub const MAP_NORESERVE: c_int = 0x400; +pub const MAP_ANON: c_int = 0x800; +pub const MAP_ANONYMOUS: c_int = 0x800; +pub const MAP_GROWSDOWN: c_int = 0x1000; +pub const MAP_DENYWRITE: c_int = 0x2000; +pub const MAP_EXECUTABLE: c_int = 0x4000; +pub const MAP_LOCKED: c_int = 0x8000; +pub const MAP_POPULATE: c_int = 0x10000; +pub const MAP_NONBLOCK: c_int = 0x20000; +pub const MAP_STACK: c_int = 0x40000; +pub const MAP_HUGETLB: c_int = 0x080000; + +pub const SOCK_STREAM: c_int = 2; +pub const SOCK_DGRAM: c_int = 1; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000008; +pub const SA_NOCLDWAIT: c_int = 0x00010000; + +pub const SIGEMT: c_int = 7; +pub const SIGCHLD: c_int = 18; +pub const SIGBUS: c_int = 10; +pub const SIGTTIN: c_int = 26; +pub const SIGTTOU: c_int = 27; +pub const SIGXCPU: c_int = 30; +pub const SIGXFSZ: c_int = 31; +pub const SIGVTALRM: c_int = 28; +pub const SIGPROF: c_int = 29; +pub const SIGWINCH: c_int = 20; +pub const SIGUSR1: c_int = 16; +pub const SIGUSR2: c_int = 17; +pub const SIGCONT: c_int = 25; +pub const SIGSTOP: c_int = 23; +pub const SIGTSTP: c_int = 24; +pub const SIGURG: c_int = 21; +pub const SIGIO: c_int = 22; +pub const SIGSYS: c_int = 12; +pub const SIGPOLL: c_int = 22; +pub const SIGPWR: c_int = 19; +pub const SIG_SETMASK: c_int = 3; +pub const SIG_BLOCK: c_int = 0x1; +pub const SIG_UNBLOCK: c_int = 0x2; + +pub const POLLWRNORM: c_short = 0x004; +pub const POLLWRBAND: c_short = 0x100; + +pub const VEOF: usize = 16; +pub const VEOL: usize = 17; +pub const VEOL2: usize = 6; +pub const VMIN: usize = 4; +pub const IEXTEN: crate::tcflag_t = 0x00000100; +pub const TOSTOP: crate::tcflag_t = 0x00008000; +pub const FLUSHO: crate::tcflag_t = 0x00002000; +pub const EXTPROC: crate::tcflag_t = 0o200000; +pub const TCSANOW: c_int = 0x540e; +pub const TCSADRAIN: c_int = 0x540f; +pub const TCSAFLUSH: c_int = 0x5410; + +pub const PTRACE_GETFPREGS: c_uint = 14; +pub const PTRACE_SETFPREGS: c_uint = 15; +pub const PTRACE_DETACH: c_uint = 17; +pub const PTRACE_GETFPXREGS: c_uint = 18; +pub const PTRACE_SETFPXREGS: c_uint = 19; +pub const PTRACE_GETREGS: c_uint = 12; +pub const PTRACE_SETREGS: c_uint = 13; + +pub const EFD_NONBLOCK: c_int = 0x80; + +pub const F_RDLCK: c_int = 0; +pub const F_WRLCK: c_int = 1; +pub const F_UNLCK: c_int = 2; +pub const F_GETLK: c_int = 14; +pub const F_GETOWN: c_int = 23; +pub const F_SETOWN: c_int = 24; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_OFD_GETLK: c_int = 36; +pub const F_OFD_SETLK: c_int = 37; +pub const F_OFD_SETLKW: c_int = 38; + +pub const SFD_NONBLOCK: c_int = 0x80; + +pub const RTLD_DEEPBIND: c_int = 0x10; +pub const RTLD_GLOBAL: c_int = 0x4; +pub const RTLD_NOLOAD: c_int = 0x8; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; + +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: crate::tcflag_t = 0x00000800; +pub const TAB2: crate::tcflag_t = 0x00001000; +pub const TAB3: crate::tcflag_t = 0x00001800; +pub const CR1: crate::tcflag_t = 0x00000200; +pub const CR2: crate::tcflag_t = 0x00000400; +pub const CR3: crate::tcflag_t = 0x00000600; +pub const FF1: crate::tcflag_t = 0x00008000; +pub const BS1: crate::tcflag_t = 0x00002000; +pub const VT1: crate::tcflag_t = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const EHWPOISON: c_int = 168; + +extern "C" { + pub fn sysctl( + name: *mut c_int, + namelen: c_int, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *mut c_void, + newlen: size_t, + ) -> c_int; +} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/mod.rs new file mode 100644 index 00000000000000..ba5678b4597952 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b64/mod.rs @@ -0,0 +1,213 @@ +//! 64-bit specific definitions for linux-like values + +use crate::prelude::*; + +pub type ino_t = u64; +pub type off_t = i64; +pub type blkcnt_t = i64; +pub type shmatt_t = u64; +pub type msgqnum_t = u64; +pub type msglen_t = u64; +pub type fsblkcnt_t = u64; +pub type fsfilcnt_t = u64; +pub type rlim_t = u64; +#[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] +pub type __syscall_ulong_t = c_ulonglong; +#[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] +pub type __syscall_ulong_t = c_ulong; + +cfg_if! { + if #[cfg(all(target_arch = "aarch64", target_pointer_width = "32"))] { + pub type clock_t = i32; + pub type time_t = i32; + pub type __fsword_t = i32; + } else { + pub type __fsword_t = i64; + pub type clock_t = i64; + pub type time_t = i64; + } +} + +s! { + pub struct sigset_t { + #[cfg(target_pointer_width = "32")] + __val: [u32; 32], + #[cfg(target_pointer_width = "64")] + __val: [u64; 16], + } + + pub struct sysinfo { + pub uptime: i64, + pub loads: [u64; 3], + pub totalram: u64, + pub freeram: u64, + pub sharedram: u64, + pub bufferram: u64, + pub totalswap: u64, + pub freeswap: u64, + pub procs: c_ushort, + pub pad: c_ushort, + pub totalhigh: u64, + pub freehigh: u64, + pub mem_unit: c_uint, + pub _f: [c_char; 0], + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + pub msg_rtime: crate::time_t, + pub msg_ctime: crate::time_t, + pub __msg_cbytes: u64, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __glibc_reserved4: u64, + __glibc_reserved5: u64, + } + + pub struct semid_ds { + pub sem_perm: ipc_perm, + pub sem_otime: crate::time_t, + #[cfg(not(any( + target_arch = "aarch64", + target_arch = "loongarch64", + target_arch = "mips64", + target_arch = "mips64r6", + target_arch = "powerpc64", + target_arch = "riscv64", + target_arch = "sparc64", + target_arch = "s390x", + )))] + __reserved: crate::__syscall_ulong_t, + pub sem_ctime: crate::time_t, + #[cfg(not(any( + target_arch = "aarch64", + target_arch = "loongarch64", + target_arch = "mips64", + target_arch = "mips64r6", + target_arch = "powerpc64", + target_arch = "riscv64", + target_arch = "sparc64", + target_arch = "s390x", + )))] + __reserved2: crate::__syscall_ulong_t, + pub sem_nsems: crate::__syscall_ulong_t, + __glibc_reserved3: crate::__syscall_ulong_t, + __glibc_reserved4: crate::__syscall_ulong_t, + } + + pub struct timex { + pub modes: c_uint, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub offset: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub offset: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub freq: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub freq: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub maxerror: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub maxerror: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub esterror: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub esterror: c_long, + pub status: c_int, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub constant: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub constant: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub precision: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub precision: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub tolerance: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub tolerance: c_long, + pub time: crate::timeval, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub tick: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub tick: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub ppsfreq: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub ppsfreq: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub jitter: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub jitter: c_long, + pub shift: c_int, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub stabil: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub stabil: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub jitcnt: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub jitcnt: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub calcnt: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub calcnt: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub errcnt: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub errcnt: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub stbcnt: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub stbcnt: c_long, + pub tai: c_int, + pub __unused1: i32, + pub __unused2: i32, + pub __unused3: i32, + pub __unused4: i32, + pub __unused5: i32, + pub __unused6: i32, + pub __unused7: i32, + pub __unused8: i32, + pub __unused9: i32, + pub __unused10: i32, + pub __unused11: i32, + } +} + +pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; + +pub const O_LARGEFILE: c_int = 0; + +cfg_if! { + if #[cfg(target_arch = "aarch64")] { + mod aarch64; + pub use self::aarch64::*; + } else if #[cfg(any(target_arch = "powerpc64"))] { + mod powerpc64; + pub use self::powerpc64::*; + } else if #[cfg(any(target_arch = "sparc64"))] { + mod sparc64; + pub use self::sparc64::*; + } else if #[cfg(any(target_arch = "mips64", target_arch = "mips64r6"))] { + mod mips64; + pub use self::mips64::*; + } else if #[cfg(any(target_arch = "s390x"))] { + mod s390x; + pub use self::s390x::*; + } else if #[cfg(target_arch = "x86_64")] { + mod x86_64; + pub use self::x86_64::*; + } else if #[cfg(any(target_arch = "riscv64"))] { + mod riscv64; + pub use self::riscv64::*; + } else if #[cfg(any(target_arch = "loongarch64"))] { + mod loongarch64; + pub use self::loongarch64::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/powerpc64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/powerpc64/mod.rs new file mode 100644 index 00000000000000..047efe55b1a388 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b64/powerpc64/mod.rs @@ -0,0 +1,974 @@ +//! PowerPC64-specific definitions for 64-bit linux-like values + +use crate::prelude::*; +use crate::{off64_t, off_t, pthread_mutex_t}; + +pub type wchar_t = i32; +pub type nlink_t = u64; +pub type blksize_t = i64; +pub type suseconds_t = i64; +pub type __u64 = c_ulong; +pub type __s64 = c_long; + +s! { + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + #[cfg(target_arch = "sparc64")] + __reserved0: c_int, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct statfs { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + f_spare: [crate::__fsword_t; 5], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + __pad0: c_int, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_long; 3], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + __pad0: c_int, + pub st_rdev: crate::dev_t, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __reserved: [c_long; 3], + } + + pub struct statfs64 { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::fsid_t, + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + pub f_spare: [crate::__fsword_t; 4], + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct pthread_attr_t { + __size: [u64; 7], + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + pub __seq: u32, + __pad1: u32, + __unused1: u64, + __unused2: c_ulong, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_segsz: size_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused4: c_ulong, + __unused5: c_ulong, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + #[doc(hidden)] + #[deprecated( + since = "0.2.54", + note = "Please leave a comment on \ + https://github.com/rust-lang/libc/pull/1316 if you're using \ + this field" + )] + pub _pad: [c_int; 29], + _align: [usize; 0], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } +} + +s_no_extra_traits! { + #[repr(align(16))] + pub struct max_align_t { + priv_: [i64; 4], + } +} + +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; + +pub const RTLD_DEEPBIND: c_int = 0x8; +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; +pub const VEOF: usize = 4; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; + +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_FSYNC: c_int = 0x101000; +pub const O_NOATIME: c_int = 0o1000000; +pub const O_PATH: c_int = 0o10000000; +pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_ANONYMOUS: c_int = 0x0020; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; + +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const EHWPOISON: c_int = 133; +pub const ERFKILL: c_int = 132; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] +pub const SIGUNUSED: c_int = 31; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const O_ASYNC: c_int = 0x2000; +pub const O_NDELAY: c_int = 0x800; + +pub const PTRACE_DETACH: c_uint = 17; + +pub const EFD_NONBLOCK: c_int = 0x800; + +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETOWN: c_int = 8; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_OFD_GETLK: c_int = 36; +pub const F_OFD_SETLK: c_int = 37; +pub const F_OFD_SETLKW: c_int = 38; + +pub const F_RDLCK: c_int = 0; +pub const F_WRLCK: c_int = 1; +pub const F_UNLCK: c_int = 2; + +pub const SFD_NONBLOCK: c_int = 0x0800; + +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +pub const SFD_CLOEXEC: c_int = 0x080000; + +pub const NCCS: usize = 32; + +pub const O_TRUNC: c_int = 512; + +pub const O_CLOEXEC: c_int = 0x80000; + +pub const EBFONT: c_int = 59; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENONET: c_int = 64; +pub const ENOPKG: c_int = 65; +pub const EREMOTE: c_int = 66; +pub const ENOLINK: c_int = 67; +pub const EADV: c_int = 68; +pub const ESRMNT: c_int = 69; +pub const ECOMM: c_int = 70; +pub const EPROTO: c_int = 71; +pub const EDOTDOT: c_int = 73; + +pub const SA_NODEFER: c_int = 0x40000000; +pub const SA_RESETHAND: c_int = 0x80000000; +pub const SA_RESTART: c_int = 0x10000000; +pub const SA_NOCLDSTOP: c_int = 0x00000001; + +pub const EPOLL_CLOEXEC: c_int = 0x80000; + +pub const EFD_CLOEXEC: c_int = 0x80000; + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; + +#[cfg(target_endian = "little")] +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "little")] +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "little")] +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; + +pub const O_DIRECTORY: c_int = 0x4000; +pub const O_NOFOLLOW: c_int = 0x8000; +pub const O_DIRECT: c_int = 0x20000; + +pub const MAP_LOCKED: c_int = 0x00080; +pub const MAP_NORESERVE: c_int = 0x00040; +pub const MAP_SYNC: c_int = 0x080000; + +pub const EDEADLOCK: c_int = 58; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; + +pub const MCL_CURRENT: c_int = 0x2000; +pub const MCL_FUTURE: c_int = 0x4000; +pub const MCL_ONFAULT: c_int = 0x8000; + +pub const SIGSTKSZ: size_t = 0x4000; +pub const MINSIGSTKSZ: size_t = 4096; +pub const CBAUD: crate::tcflag_t = 0xff; +pub const TAB1: crate::tcflag_t = 0x400; +pub const TAB2: crate::tcflag_t = 0x800; +pub const TAB3: crate::tcflag_t = 0xc00; +pub const CR1: crate::tcflag_t = 0x1000; +pub const CR2: crate::tcflag_t = 0x2000; +pub const CR3: crate::tcflag_t = 0x3000; +pub const FF1: crate::tcflag_t = 0x4000; +pub const BS1: crate::tcflag_t = 0x8000; +pub const VT1: crate::tcflag_t = 0x10000; +pub const VWERASE: usize = 0xa; +pub const VREPRINT: usize = 0xb; +pub const VSUSP: usize = 0xc; +pub const VSTART: usize = 0xd; +pub const VSTOP: usize = 0xe; +pub const VDISCARD: usize = 0x10; +pub const VTIME: usize = 0x7; +pub const IXON: crate::tcflag_t = 0x200; +pub const IXOFF: crate::tcflag_t = 0x400; +pub const ONLCR: crate::tcflag_t = 0x2; +pub const CSIZE: crate::tcflag_t = 0x300; +pub const CS6: crate::tcflag_t = 0x100; +pub const CS7: crate::tcflag_t = 0x200; +pub const CS8: crate::tcflag_t = 0x300; +pub const CSTOPB: crate::tcflag_t = 0x400; +pub const CREAD: crate::tcflag_t = 0x800; +pub const PARENB: crate::tcflag_t = 0x1000; +pub const PARODD: crate::tcflag_t = 0x2000; +pub const HUPCL: crate::tcflag_t = 0x4000; +pub const CLOCAL: crate::tcflag_t = 0x8000; +pub const ECHOKE: crate::tcflag_t = 0x1; +pub const ECHOE: crate::tcflag_t = 0x2; +pub const ECHOK: crate::tcflag_t = 0x4; +pub const ECHONL: crate::tcflag_t = 0x10; +pub const ECHOPRT: crate::tcflag_t = 0x20; +pub const ECHOCTL: crate::tcflag_t = 0x40; +pub const ISIG: crate::tcflag_t = 0x80; +pub const ICANON: crate::tcflag_t = 0x100; +pub const PENDIN: crate::tcflag_t = 0x20000000; +pub const NOFLSH: crate::tcflag_t = 0x80000000; +pub const VSWTC: usize = 9; +pub const OLCUC: crate::tcflag_t = 0o000004; +pub const NLDLY: crate::tcflag_t = 0o001400; +pub const CRDLY: crate::tcflag_t = 0o030000; +pub const TABDLY: crate::tcflag_t = 0o006000; +pub const BSDLY: crate::tcflag_t = 0o100000; +pub const FFDLY: crate::tcflag_t = 0o040000; +pub const VTDLY: crate::tcflag_t = 0o200000; +pub const XTABS: crate::tcflag_t = 0o006000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const CBAUDEX: crate::speed_t = 0o000020; +pub const B57600: crate::speed_t = 0o0020; +pub const B115200: crate::speed_t = 0o0021; +pub const B230400: crate::speed_t = 0o0022; +pub const B460800: crate::speed_t = 0o0023; +pub const B500000: crate::speed_t = 0o0024; +pub const B576000: crate::speed_t = 0o0025; +pub const B921600: crate::speed_t = 0o0026; +pub const B1000000: crate::speed_t = 0o0027; +pub const B1152000: crate::speed_t = 0o0030; +pub const B1500000: crate::speed_t = 0o0031; +pub const B2000000: crate::speed_t = 0o0032; +pub const B2500000: crate::speed_t = 0o0033; +pub const B3000000: crate::speed_t = 0o0034; +pub const B3500000: crate::speed_t = 0o0035; +pub const B4000000: crate::speed_t = 0o0036; + +pub const VEOL: usize = 6; +pub const VEOL2: usize = 8; +pub const VMIN: usize = 5; +pub const IEXTEN: crate::tcflag_t = 0x400; +pub const TOSTOP: crate::tcflag_t = 0x400000; +pub const FLUSHO: crate::tcflag_t = 0x800000; +pub const EXTPROC: crate::tcflag_t = 0x10000000; + +// Syscall table +pub const SYS_restart_syscall: c_long = 0; +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_waitpid: c_long = 7; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execve: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_time: c_long = 13; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_lchown: c_long = 16; +pub const SYS_break: c_long = 17; +pub const SYS_oldstat: c_long = 18; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_mount: c_long = 21; +pub const SYS_umount: c_long = 22; +pub const SYS_setuid: c_long = 23; +pub const SYS_getuid: c_long = 24; +pub const SYS_stime: c_long = 25; +pub const SYS_ptrace: c_long = 26; +pub const SYS_alarm: c_long = 27; +pub const SYS_oldfstat: c_long = 28; +pub const SYS_pause: c_long = 29; +pub const SYS_utime: c_long = 30; +pub const SYS_stty: c_long = 31; +pub const SYS_gtty: c_long = 32; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_ftime: c_long = 35; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_rename: c_long = 38; +pub const SYS_mkdir: c_long = 39; +pub const SYS_rmdir: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_prof: c_long = 44; +pub const SYS_brk: c_long = 45; +pub const SYS_setgid: c_long = 46; +pub const SYS_getgid: c_long = 47; +pub const SYS_signal: c_long = 48; +pub const SYS_geteuid: c_long = 49; +pub const SYS_getegid: c_long = 50; +pub const SYS_acct: c_long = 51; +pub const SYS_umount2: c_long = 52; +pub const SYS_lock: c_long = 53; +pub const SYS_ioctl: c_long = 54; +pub const SYS_fcntl: c_long = 55; +pub const SYS_mpx: c_long = 56; +pub const SYS_setpgid: c_long = 57; +pub const SYS_ulimit: c_long = 58; +pub const SYS_oldolduname: c_long = 59; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_ustat: c_long = 62; +pub const SYS_dup2: c_long = 63; +pub const SYS_getppid: c_long = 64; +pub const SYS_getpgrp: c_long = 65; +pub const SYS_setsid: c_long = 66; +pub const SYS_sigaction: c_long = 67; +pub const SYS_sgetmask: c_long = 68; +pub const SYS_ssetmask: c_long = 69; +pub const SYS_setreuid: c_long = 70; +pub const SYS_setregid: c_long = 71; +pub const SYS_sigsuspend: c_long = 72; +pub const SYS_sigpending: c_long = 73; +pub const SYS_sethostname: c_long = 74; +pub const SYS_setrlimit: c_long = 75; +pub const SYS_getrlimit: c_long = 76; +pub const SYS_getrusage: c_long = 77; +pub const SYS_gettimeofday: c_long = 78; +pub const SYS_settimeofday: c_long = 79; +pub const SYS_getgroups: c_long = 80; +pub const SYS_setgroups: c_long = 81; +pub const SYS_select: c_long = 82; +pub const SYS_symlink: c_long = 83; +pub const SYS_oldlstat: c_long = 84; +pub const SYS_readlink: c_long = 85; +pub const SYS_uselib: c_long = 86; +pub const SYS_swapon: c_long = 87; +pub const SYS_reboot: c_long = 88; +pub const SYS_readdir: c_long = 89; +pub const SYS_mmap: c_long = 90; +pub const SYS_munmap: c_long = 91; +pub const SYS_truncate: c_long = 92; +pub const SYS_ftruncate: c_long = 93; +pub const SYS_fchmod: c_long = 94; +pub const SYS_fchown: c_long = 95; +pub const SYS_getpriority: c_long = 96; +pub const SYS_setpriority: c_long = 97; +pub const SYS_profil: c_long = 98; +pub const SYS_statfs: c_long = 99; +pub const SYS_fstatfs: c_long = 100; +pub const SYS_ioperm: c_long = 101; +pub const SYS_socketcall: c_long = 102; +pub const SYS_syslog: c_long = 103; +pub const SYS_setitimer: c_long = 104; +pub const SYS_getitimer: c_long = 105; +pub const SYS_stat: c_long = 106; +pub const SYS_lstat: c_long = 107; +pub const SYS_fstat: c_long = 108; +pub const SYS_olduname: c_long = 109; +pub const SYS_iopl: c_long = 110; +pub const SYS_vhangup: c_long = 111; +pub const SYS_idle: c_long = 112; +pub const SYS_vm86: c_long = 113; +pub const SYS_wait4: c_long = 114; +pub const SYS_swapoff: c_long = 115; +pub const SYS_sysinfo: c_long = 116; +pub const SYS_ipc: c_long = 117; +pub const SYS_fsync: c_long = 118; +pub const SYS_sigreturn: c_long = 119; +pub const SYS_clone: c_long = 120; +pub const SYS_setdomainname: c_long = 121; +pub const SYS_uname: c_long = 122; +pub const SYS_modify_ldt: c_long = 123; +pub const SYS_adjtimex: c_long = 124; +pub const SYS_mprotect: c_long = 125; +pub const SYS_sigprocmask: c_long = 126; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 127; +pub const SYS_init_module: c_long = 128; +pub const SYS_delete_module: c_long = 129; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 130; +pub const SYS_quotactl: c_long = 131; +pub const SYS_getpgid: c_long = 132; +pub const SYS_fchdir: c_long = 133; +pub const SYS_bdflush: c_long = 134; +pub const SYS_sysfs: c_long = 135; +pub const SYS_personality: c_long = 136; +pub const SYS_afs_syscall: c_long = 137; /* Syscall for Andrew File System */ +pub const SYS_setfsuid: c_long = 138; +pub const SYS_setfsgid: c_long = 139; +pub const SYS__llseek: c_long = 140; +pub const SYS_getdents: c_long = 141; +pub const SYS__newselect: c_long = 142; +pub const SYS_flock: c_long = 143; +pub const SYS_msync: c_long = 144; +pub const SYS_readv: c_long = 145; +pub const SYS_writev: c_long = 146; +pub const SYS_getsid: c_long = 147; +pub const SYS_fdatasync: c_long = 148; +pub const SYS__sysctl: c_long = 149; +pub const SYS_mlock: c_long = 150; +pub const SYS_munlock: c_long = 151; +pub const SYS_mlockall: c_long = 152; +pub const SYS_munlockall: c_long = 153; +pub const SYS_sched_setparam: c_long = 154; +pub const SYS_sched_getparam: c_long = 155; +pub const SYS_sched_setscheduler: c_long = 156; +pub const SYS_sched_getscheduler: c_long = 157; +pub const SYS_sched_yield: c_long = 158; +pub const SYS_sched_get_priority_max: c_long = 159; +pub const SYS_sched_get_priority_min: c_long = 160; +pub const SYS_sched_rr_get_interval: c_long = 161; +pub const SYS_nanosleep: c_long = 162; +pub const SYS_mremap: c_long = 163; +pub const SYS_setresuid: c_long = 164; +pub const SYS_getresuid: c_long = 165; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 166; +pub const SYS_poll: c_long = 167; +pub const SYS_nfsservctl: c_long = 168; +pub const SYS_setresgid: c_long = 169; +pub const SYS_getresgid: c_long = 170; +pub const SYS_prctl: c_long = 171; +pub const SYS_rt_sigreturn: c_long = 172; +pub const SYS_rt_sigaction: c_long = 173; +pub const SYS_rt_sigprocmask: c_long = 174; +pub const SYS_rt_sigpending: c_long = 175; +pub const SYS_rt_sigtimedwait: c_long = 176; +pub const SYS_rt_sigqueueinfo: c_long = 177; +pub const SYS_rt_sigsuspend: c_long = 178; +pub const SYS_pread64: c_long = 179; +pub const SYS_pwrite64: c_long = 180; +pub const SYS_chown: c_long = 181; +pub const SYS_getcwd: c_long = 182; +pub const SYS_capget: c_long = 183; +pub const SYS_capset: c_long = 184; +pub const SYS_sigaltstack: c_long = 185; +pub const SYS_sendfile: c_long = 186; +pub const SYS_getpmsg: c_long = 187; /* some people actually want streams */ +pub const SYS_putpmsg: c_long = 188; /* some people actually want streams */ +pub const SYS_vfork: c_long = 189; +pub const SYS_ugetrlimit: c_long = 190; /* SuS compliant getrlimit */ +pub const SYS_readahead: c_long = 191; +pub const SYS_pciconfig_read: c_long = 198; +pub const SYS_pciconfig_write: c_long = 199; +pub const SYS_pciconfig_iobase: c_long = 200; +pub const SYS_multiplexer: c_long = 201; +pub const SYS_getdents64: c_long = 202; +pub const SYS_pivot_root: c_long = 203; +pub const SYS_madvise: c_long = 205; +pub const SYS_mincore: c_long = 206; +pub const SYS_gettid: c_long = 207; +pub const SYS_tkill: c_long = 208; +pub const SYS_setxattr: c_long = 209; +pub const SYS_lsetxattr: c_long = 210; +pub const SYS_fsetxattr: c_long = 211; +pub const SYS_getxattr: c_long = 212; +pub const SYS_lgetxattr: c_long = 213; +pub const SYS_fgetxattr: c_long = 214; +pub const SYS_listxattr: c_long = 215; +pub const SYS_llistxattr: c_long = 216; +pub const SYS_flistxattr: c_long = 217; +pub const SYS_removexattr: c_long = 218; +pub const SYS_lremovexattr: c_long = 219; +pub const SYS_fremovexattr: c_long = 220; +pub const SYS_futex: c_long = 221; +pub const SYS_sched_setaffinity: c_long = 222; +pub const SYS_sched_getaffinity: c_long = 223; +pub const SYS_tuxcall: c_long = 225; +pub const SYS_io_setup: c_long = 227; +pub const SYS_io_destroy: c_long = 228; +pub const SYS_io_getevents: c_long = 229; +pub const SYS_io_submit: c_long = 230; +pub const SYS_io_cancel: c_long = 231; +pub const SYS_set_tid_address: c_long = 232; +pub const SYS_exit_group: c_long = 234; +pub const SYS_lookup_dcookie: c_long = 235; +pub const SYS_epoll_create: c_long = 236; +pub const SYS_epoll_ctl: c_long = 237; +pub const SYS_epoll_wait: c_long = 238; +pub const SYS_remap_file_pages: c_long = 239; +pub const SYS_timer_create: c_long = 240; +pub const SYS_timer_settime: c_long = 241; +pub const SYS_timer_gettime: c_long = 242; +pub const SYS_timer_getoverrun: c_long = 243; +pub const SYS_timer_delete: c_long = 244; +pub const SYS_clock_settime: c_long = 245; +pub const SYS_clock_gettime: c_long = 246; +pub const SYS_clock_getres: c_long = 247; +pub const SYS_clock_nanosleep: c_long = 248; +pub const SYS_swapcontext: c_long = 249; +pub const SYS_tgkill: c_long = 250; +pub const SYS_utimes: c_long = 251; +pub const SYS_statfs64: c_long = 252; +pub const SYS_fstatfs64: c_long = 253; +pub const SYS_rtas: c_long = 255; +pub const SYS_sys_debug_setcontext: c_long = 256; +pub const SYS_migrate_pages: c_long = 258; +pub const SYS_mbind: c_long = 259; +pub const SYS_get_mempolicy: c_long = 260; +pub const SYS_set_mempolicy: c_long = 261; +pub const SYS_mq_open: c_long = 262; +pub const SYS_mq_unlink: c_long = 263; +pub const SYS_mq_timedsend: c_long = 264; +pub const SYS_mq_timedreceive: c_long = 265; +pub const SYS_mq_notify: c_long = 266; +pub const SYS_mq_getsetattr: c_long = 267; +pub const SYS_kexec_load: c_long = 268; +pub const SYS_add_key: c_long = 269; +pub const SYS_request_key: c_long = 270; +pub const SYS_keyctl: c_long = 271; +pub const SYS_waitid: c_long = 272; +pub const SYS_ioprio_set: c_long = 273; +pub const SYS_ioprio_get: c_long = 274; +pub const SYS_inotify_init: c_long = 275; +pub const SYS_inotify_add_watch: c_long = 276; +pub const SYS_inotify_rm_watch: c_long = 277; +pub const SYS_spu_run: c_long = 278; +pub const SYS_spu_create: c_long = 279; +pub const SYS_pselect6: c_long = 280; +pub const SYS_ppoll: c_long = 281; +pub const SYS_unshare: c_long = 282; +pub const SYS_splice: c_long = 283; +pub const SYS_tee: c_long = 284; +pub const SYS_vmsplice: c_long = 285; +pub const SYS_openat: c_long = 286; +pub const SYS_mkdirat: c_long = 287; +pub const SYS_mknodat: c_long = 288; +pub const SYS_fchownat: c_long = 289; +pub const SYS_futimesat: c_long = 290; +pub const SYS_newfstatat: c_long = 291; +pub const SYS_unlinkat: c_long = 292; +pub const SYS_renameat: c_long = 293; +pub const SYS_linkat: c_long = 294; +pub const SYS_symlinkat: c_long = 295; +pub const SYS_readlinkat: c_long = 296; +pub const SYS_fchmodat: c_long = 297; +pub const SYS_faccessat: c_long = 298; +pub const SYS_get_robust_list: c_long = 299; +pub const SYS_set_robust_list: c_long = 300; +pub const SYS_move_pages: c_long = 301; +pub const SYS_getcpu: c_long = 302; +pub const SYS_epoll_pwait: c_long = 303; +pub const SYS_utimensat: c_long = 304; +pub const SYS_signalfd: c_long = 305; +pub const SYS_timerfd_create: c_long = 306; +pub const SYS_eventfd: c_long = 307; +pub const SYS_sync_file_range2: c_long = 308; +pub const SYS_fallocate: c_long = 309; +pub const SYS_subpage_prot: c_long = 310; +pub const SYS_timerfd_settime: c_long = 311; +pub const SYS_timerfd_gettime: c_long = 312; +pub const SYS_signalfd4: c_long = 313; +pub const SYS_eventfd2: c_long = 314; +pub const SYS_epoll_create1: c_long = 315; +pub const SYS_dup3: c_long = 316; +pub const SYS_pipe2: c_long = 317; +pub const SYS_inotify_init1: c_long = 318; +pub const SYS_perf_event_open: c_long = 319; +pub const SYS_preadv: c_long = 320; +pub const SYS_pwritev: c_long = 321; +pub const SYS_rt_tgsigqueueinfo: c_long = 322; +pub const SYS_fanotify_init: c_long = 323; +pub const SYS_fanotify_mark: c_long = 324; +pub const SYS_prlimit64: c_long = 325; +pub const SYS_socket: c_long = 326; +pub const SYS_bind: c_long = 327; +pub const SYS_connect: c_long = 328; +pub const SYS_listen: c_long = 329; +pub const SYS_accept: c_long = 330; +pub const SYS_getsockname: c_long = 331; +pub const SYS_getpeername: c_long = 332; +pub const SYS_socketpair: c_long = 333; +pub const SYS_send: c_long = 334; +pub const SYS_sendto: c_long = 335; +pub const SYS_recv: c_long = 336; +pub const SYS_recvfrom: c_long = 337; +pub const SYS_shutdown: c_long = 338; +pub const SYS_setsockopt: c_long = 339; +pub const SYS_getsockopt: c_long = 340; +pub const SYS_sendmsg: c_long = 341; +pub const SYS_recvmsg: c_long = 342; +pub const SYS_recvmmsg: c_long = 343; +pub const SYS_accept4: c_long = 344; +pub const SYS_name_to_handle_at: c_long = 345; +pub const SYS_open_by_handle_at: c_long = 346; +pub const SYS_clock_adjtime: c_long = 347; +pub const SYS_syncfs: c_long = 348; +pub const SYS_sendmmsg: c_long = 349; +pub const SYS_setns: c_long = 350; +pub const SYS_process_vm_readv: c_long = 351; +pub const SYS_process_vm_writev: c_long = 352; +pub const SYS_finit_module: c_long = 353; +pub const SYS_kcmp: c_long = 354; +pub const SYS_sched_setattr: c_long = 355; +pub const SYS_sched_getattr: c_long = 356; +pub const SYS_renameat2: c_long = 357; +pub const SYS_seccomp: c_long = 358; +pub const SYS_getrandom: c_long = 359; +pub const SYS_memfd_create: c_long = 360; +pub const SYS_bpf: c_long = 361; +pub const SYS_execveat: c_long = 362; +pub const SYS_switch_endian: c_long = 363; +pub const SYS_userfaultfd: c_long = 364; +pub const SYS_membarrier: c_long = 365; +pub const SYS_mlock2: c_long = 378; +pub const SYS_copy_file_range: c_long = 379; +pub const SYS_preadv2: c_long = 380; +pub const SYS_pwritev2: c_long = 381; +pub const SYS_kexec_file_load: c_long = 382; +pub const SYS_statx: c_long = 383; +pub const SYS_rseq: c_long = 387; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; + +extern "C" { + pub fn sysctl( + name: *mut c_int, + namelen: c_int, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *mut c_void, + newlen: size_t, + ) -> c_int; +} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/riscv64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/riscv64/mod.rs new file mode 100644 index 00000000000000..bfbc8ee5cf6833 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b64/riscv64/mod.rs @@ -0,0 +1,910 @@ +//! RISC-V-specific definitions for 64-bit linux-like values + +use crate::prelude::*; +use crate::{off64_t, off_t}; + +pub type wchar_t = c_int; + +pub type nlink_t = c_uint; +pub type blksize_t = c_int; +pub type fsblkcnt64_t = c_ulong; +pub type fsfilcnt64_t = c_ulong; +pub type suseconds_t = i64; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; + +s! { + pub struct pthread_attr_t { + __size: [c_ulong; 7], + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub __pad1: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub __pad2: c_int, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_int; 2usize], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub __pad1: crate::dev_t, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + pub __pad2: c_int, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_int; 2], + } + + pub struct statfs { + pub f_type: c_long, + pub f_bsize: c_long, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_long, + pub f_frsize: c_long, + pub f_flags: c_long, + pub f_spare: [c_long; 4], + } + + pub struct statfs64 { + pub f_type: c_long, + pub f_bsize: c_long, + pub f_blocks: crate::fsblkcnt64_t, + pub f_bfree: crate::fsblkcnt64_t, + pub f_bavail: crate::fsblkcnt64_t, + pub f_files: crate::fsfilcnt64_t, + pub f_ffree: crate::fsfilcnt64_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_long, + pub f_frsize: c_long, + pub f_flags: c_long, + pub f_spare: [c_long; 4], + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + pub __f_spare: [c_int; 6], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt64_t, + pub f_bfree: crate::fsblkcnt64_t, + pub f_bavail: crate::fsblkcnt64_t, + pub f_files: crate::fsfilcnt64_t, + pub f_ffree: crate::fsfilcnt64_t, + pub f_favail: crate::fsfilcnt64_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + pub __f_spare: [c_int; 6], + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + #[doc(hidden)] + #[deprecated( + since = "0.2.54", + note = "Please leave a comment on \ + https://github.com/rust-lang/libc/pull/1316 if you're using \ + this field" + )] + pub _pad: [c_int; 29], + _align: [u64; 0], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_ushort, + __pad1: c_ushort, + pub __seq: c_ushort, + __pad2: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused5: c_ulong, + __unused6: c_ulong, + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + } + + pub struct user_regs_struct { + pub pc: c_ulong, + pub ra: c_ulong, + pub sp: c_ulong, + pub gp: c_ulong, + pub tp: c_ulong, + pub t0: c_ulong, + pub t1: c_ulong, + pub t2: c_ulong, + pub s0: c_ulong, + pub s1: c_ulong, + pub a0: c_ulong, + pub a1: c_ulong, + pub a2: c_ulong, + pub a3: c_ulong, + pub a4: c_ulong, + pub a5: c_ulong, + pub a6: c_ulong, + pub a7: c_ulong, + pub s2: c_ulong, + pub s3: c_ulong, + pub s4: c_ulong, + pub s5: c_ulong, + pub s6: c_ulong, + pub s7: c_ulong, + pub s8: c_ulong, + pub s9: c_ulong, + pub s10: c_ulong, + pub s11: c_ulong, + pub t3: c_ulong, + pub t4: c_ulong, + pub t5: c_ulong, + pub t6: c_ulong, + } + + #[repr(align(8))] + pub struct clone_args { + pub flags: c_ulonglong, + pub pidfd: c_ulonglong, + pub child_tid: c_ulonglong, + pub parent_tid: c_ulonglong, + pub exit_signal: c_ulonglong, + pub stack: c_ulonglong, + pub stack_size: c_ulonglong, + pub tls: c_ulonglong, + pub set_tid: c_ulonglong, + pub set_tid_size: c_ulonglong, + pub cgroup: c_ulonglong, + } +} + +s_no_extra_traits! { + pub struct ucontext_t { + pub __uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_sigmask: crate::sigset_t, + pub uc_mcontext: mcontext_t, + } + + #[repr(align(16))] + pub struct mcontext_t { + pub __gregs: [c_ulong; 32], + pub __fpregs: __riscv_mc_fp_state, + } + + pub union __riscv_mc_fp_state { + pub __f: __riscv_mc_f_ext_state, + pub __d: __riscv_mc_d_ext_state, + pub __q: __riscv_mc_q_ext_state, + } + + pub struct __riscv_mc_f_ext_state { + pub __f: [c_uint; 32], + pub __fcsr: c_uint, + } + + pub struct __riscv_mc_d_ext_state { + pub __f: [c_ulonglong; 32], + pub __fcsr: c_uint, + } + + #[repr(align(16))] + pub struct __riscv_mc_q_ext_state { + pub __f: [c_ulonglong; 64], + pub __fcsr: c_uint, + pub __glibc_reserved: [c_uint; 3], + } +} + +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; +pub const VEOF: usize = 4; +pub const RTLD_DEEPBIND: c_int = 0x8; +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_FSYNC: c_int = 1052672; +pub const O_NOATIME: c_int = 262144; +pub const O_PATH: c_int = 2097152; +pub const O_TMPFILE: c_int = 4259840; +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_GROWSDOWN: c_int = 256; +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const EHWPOISON: c_int = 133; +pub const ERFKILL: c_int = 132; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SA_ONSTACK: c_int = 134217728; +pub const SA_SIGINFO: c_int = 4; +pub const SA_NOCLDWAIT: c_int = 2; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0; +pub const SIG_UNBLOCK: c_int = 1; +pub const POLLWRNORM: c_short = 256; +pub const POLLWRBAND: c_short = 512; +pub const O_ASYNC: c_int = 8192; +pub const O_NDELAY: c_int = 2048; +pub const PTRACE_DETACH: c_uint = 17; +pub const EFD_NONBLOCK: c_int = 2048; +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETOWN: c_int = 8; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_RDLCK: c_int = 0; +pub const F_WRLCK: c_int = 1; +pub const F_UNLCK: c_int = 2; +pub const F_OFD_GETLK: c_int = 36; +pub const F_OFD_SETLK: c_int = 37; +pub const F_OFD_SETLKW: c_int = 38; +pub const SFD_NONBLOCK: c_int = 2048; +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; +pub const SFD_CLOEXEC: c_int = 524288; +pub const NCCS: usize = 32; +pub const O_TRUNC: c_int = 512; +pub const O_CLOEXEC: c_int = 524288; +pub const EBFONT: c_int = 59; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENONET: c_int = 64; +pub const ENOPKG: c_int = 65; +pub const EREMOTE: c_int = 66; +pub const ENOLINK: c_int = 67; +pub const EADV: c_int = 68; +pub const ESRMNT: c_int = 69; +pub const ECOMM: c_int = 70; +pub const EPROTO: c_int = 71; +pub const EDOTDOT: c_int = 73; +pub const SA_NODEFER: c_int = 1073741824; +pub const SA_RESETHAND: c_int = -2147483648; +pub const SA_RESTART: c_int = 268435456; +pub const SA_NOCLDSTOP: c_int = 1; +pub const EPOLL_CLOEXEC: c_int = 524288; +pub const EFD_CLOEXEC: c_int = 524288; +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; +pub const O_DIRECT: c_int = 16384; +pub const O_DIRECTORY: c_int = 65536; +pub const O_NOFOLLOW: c_int = 131072; +pub const MAP_HUGETLB: c_int = 262144; +pub const MAP_LOCKED: c_int = 8192; +pub const MAP_NORESERVE: c_int = 16384; +pub const MAP_ANON: c_int = 32; +pub const MAP_ANONYMOUS: c_int = 32; +pub const MAP_DENYWRITE: c_int = 2048; +pub const MAP_EXECUTABLE: c_int = 4096; +pub const MAP_POPULATE: c_int = 32768; +pub const MAP_NONBLOCK: c_int = 65536; +pub const MAP_STACK: c_int = 131072; +pub const MAP_SYNC: c_int = 0x080000; +pub const EDEADLOCK: c_int = 35; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const PTRACE_GETFPREGS: c_uint = 14; +pub const PTRACE_SETFPREGS: c_uint = 15; +pub const PTRACE_GETFPXREGS: c_uint = 18; +pub const PTRACE_SETFPXREGS: c_uint = 19; +pub const PTRACE_GETREGS: c_uint = 12; +pub const PTRACE_SETREGS: c_uint = 13; +pub const MCL_CURRENT: c_int = 1; +pub const MCL_FUTURE: c_int = 2; +pub const MCL_ONFAULT: c_int = 4; +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; +pub const CBAUD: crate::tcflag_t = 4111; +pub const TAB1: crate::tcflag_t = 2048; +pub const TAB2: crate::tcflag_t = 4096; +pub const TAB3: crate::tcflag_t = 6144; +pub const CR1: crate::tcflag_t = 512; +pub const CR2: crate::tcflag_t = 1024; +pub const CR3: crate::tcflag_t = 1536; +pub const FF1: crate::tcflag_t = 32768; +pub const BS1: crate::tcflag_t = 8192; +pub const VT1: crate::tcflag_t = 16384; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 1024; +pub const IXOFF: crate::tcflag_t = 4096; +pub const ONLCR: crate::tcflag_t = 4; +pub const CSIZE: crate::tcflag_t = 48; +pub const CS6: crate::tcflag_t = 16; +pub const CS7: crate::tcflag_t = 32; +pub const CS8: crate::tcflag_t = 48; +pub const CSTOPB: crate::tcflag_t = 64; +pub const CREAD: crate::tcflag_t = 128; +pub const PARENB: crate::tcflag_t = 256; +pub const PARODD: crate::tcflag_t = 512; +pub const HUPCL: crate::tcflag_t = 1024; +pub const CLOCAL: crate::tcflag_t = 2048; +pub const ECHOKE: crate::tcflag_t = 2048; +pub const ECHOE: crate::tcflag_t = 16; +pub const ECHOK: crate::tcflag_t = 32; +pub const ECHONL: crate::tcflag_t = 64; +pub const ECHOPRT: crate::tcflag_t = 1024; +pub const ECHOCTL: crate::tcflag_t = 512; +pub const ISIG: crate::tcflag_t = 1; +pub const ICANON: crate::tcflag_t = 2; +pub const PENDIN: crate::tcflag_t = 16384; +pub const NOFLSH: crate::tcflag_t = 128; +pub const CIBAUD: crate::tcflag_t = 269418496; +pub const CBAUDEX: crate::tcflag_t = 4096; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 2; +pub const NLDLY: crate::tcflag_t = 256; +pub const CRDLY: crate::tcflag_t = 1536; +pub const TABDLY: crate::tcflag_t = 6144; +pub const BSDLY: crate::tcflag_t = 8192; +pub const FFDLY: crate::tcflag_t = 32768; +pub const VTDLY: crate::tcflag_t = 16384; +pub const XTABS: crate::tcflag_t = 6144; +pub const B0: crate::speed_t = 0; +pub const B50: crate::speed_t = 1; +pub const B75: crate::speed_t = 2; +pub const B110: crate::speed_t = 3; +pub const B134: crate::speed_t = 4; +pub const B150: crate::speed_t = 5; +pub const B200: crate::speed_t = 6; +pub const B300: crate::speed_t = 7; +pub const B600: crate::speed_t = 8; +pub const B1200: crate::speed_t = 9; +pub const B1800: crate::speed_t = 10; +pub const B2400: crate::speed_t = 11; +pub const B4800: crate::speed_t = 12; +pub const B9600: crate::speed_t = 13; +pub const B19200: crate::speed_t = 14; +pub const B38400: crate::speed_t = 15; +pub const EXTA: crate::speed_t = 14; +pub const EXTB: crate::speed_t = 15; +pub const B57600: crate::speed_t = 4097; +pub const B115200: crate::speed_t = 4098; +pub const B230400: crate::speed_t = 4099; +pub const B460800: crate::speed_t = 4100; +pub const B500000: crate::speed_t = 4101; +pub const B576000: crate::speed_t = 4102; +pub const B921600: crate::speed_t = 4103; +pub const B1000000: crate::speed_t = 4104; +pub const B1152000: crate::speed_t = 4105; +pub const B1500000: crate::speed_t = 4106; +pub const B2000000: crate::speed_t = 4107; +pub const B2500000: crate::speed_t = 4108; +pub const B3000000: crate::speed_t = 4109; +pub const B3500000: crate::speed_t = 4110; +pub const B4000000: crate::speed_t = 4111; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 32768; +pub const TOSTOP: crate::tcflag_t = 256; +pub const FLUSHO: crate::tcflag_t = 4096; +pub const EXTPROC: crate::tcflag_t = 65536; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; +pub const NGREG: usize = 32; +pub const REG_PC: usize = 0; +pub const REG_RA: usize = 1; +pub const REG_SP: usize = 2; +pub const REG_TP: usize = 4; +pub const REG_S0: usize = 8; +pub const REG_S1: usize = 9; +pub const REG_A0: usize = 10; +pub const REG_S2: usize = 18; +pub const REG_NARGS: usize = 8; + +pub const COMPAT_HWCAP_ISA_I: c_ulong = 1 << (b'I' - b'A'); +pub const COMPAT_HWCAP_ISA_M: c_ulong = 1 << (b'M' - b'A'); +#[allow(clippy::eq_op)] +pub const COMPAT_HWCAP_ISA_A: c_ulong = 1 << (b'A' - b'A'); +pub const COMPAT_HWCAP_ISA_F: c_ulong = 1 << (b'F' - b'A'); +pub const COMPAT_HWCAP_ISA_D: c_ulong = 1 << (b'D' - b'A'); +pub const COMPAT_HWCAP_ISA_C: c_ulong = 1 << (b'C' - b'A'); +pub const COMPAT_HWCAP_ISA_V: c_ulong = 1 << (b'V' - b'A'); + +pub const SYS_read: c_long = 63; +pub const SYS_write: c_long = 64; +pub const SYS_close: c_long = 57; +pub const SYS_fstat: c_long = 80; +pub const SYS_lseek: c_long = 62; +pub const SYS_mmap: c_long = 222; +pub const SYS_mprotect: c_long = 226; +pub const SYS_munmap: c_long = 215; +pub const SYS_brk: c_long = 214; +pub const SYS_rt_sigaction: c_long = 134; +pub const SYS_rt_sigprocmask: c_long = 135; +pub const SYS_rt_sigreturn: c_long = 139; +pub const SYS_ioctl: c_long = 29; +pub const SYS_pread64: c_long = 67; +pub const SYS_pwrite64: c_long = 68; +pub const SYS_readv: c_long = 65; +pub const SYS_writev: c_long = 66; +pub const SYS_sched_yield: c_long = 124; +pub const SYS_mremap: c_long = 216; +pub const SYS_msync: c_long = 227; +pub const SYS_mincore: c_long = 232; +pub const SYS_madvise: c_long = 233; +pub const SYS_shmget: c_long = 194; +pub const SYS_shmat: c_long = 196; +pub const SYS_shmctl: c_long = 195; +pub const SYS_dup: c_long = 23; +pub const SYS_nanosleep: c_long = 101; +pub const SYS_getitimer: c_long = 102; +pub const SYS_setitimer: c_long = 103; +pub const SYS_getpid: c_long = 172; +pub const SYS_sendfile: c_long = 71; +pub const SYS_socket: c_long = 198; +pub const SYS_connect: c_long = 203; +pub const SYS_accept: c_long = 202; +pub const SYS_sendto: c_long = 206; +pub const SYS_recvfrom: c_long = 207; +pub const SYS_sendmsg: c_long = 211; +pub const SYS_recvmsg: c_long = 212; +pub const SYS_shutdown: c_long = 210; +pub const SYS_bind: c_long = 200; +pub const SYS_listen: c_long = 201; +pub const SYS_getsockname: c_long = 204; +pub const SYS_getpeername: c_long = 205; +pub const SYS_socketpair: c_long = 199; +pub const SYS_setsockopt: c_long = 208; +pub const SYS_getsockopt: c_long = 209; +pub const SYS_clone: c_long = 220; +pub const SYS_execve: c_long = 221; +pub const SYS_exit: c_long = 93; +pub const SYS_wait4: c_long = 260; +pub const SYS_kill: c_long = 129; +pub const SYS_uname: c_long = 160; +pub const SYS_semget: c_long = 190; +pub const SYS_semop: c_long = 193; +pub const SYS_semctl: c_long = 191; +pub const SYS_shmdt: c_long = 197; +pub const SYS_msgget: c_long = 186; +pub const SYS_msgsnd: c_long = 189; +pub const SYS_msgrcv: c_long = 188; +pub const SYS_msgctl: c_long = 187; +pub const SYS_fcntl: c_long = 25; +pub const SYS_flock: c_long = 32; +pub const SYS_fsync: c_long = 82; +pub const SYS_fdatasync: c_long = 83; +pub const SYS_truncate: c_long = 45; +pub const SYS_ftruncate: c_long = 46; +pub const SYS_getcwd: c_long = 17; +pub const SYS_chdir: c_long = 49; +pub const SYS_fchdir: c_long = 50; +pub const SYS_fchmod: c_long = 52; +pub const SYS_fchown: c_long = 55; +pub const SYS_umask: c_long = 166; +pub const SYS_gettimeofday: c_long = 169; +pub const SYS_getrlimit: c_long = 163; +pub const SYS_getrusage: c_long = 165; +pub const SYS_sysinfo: c_long = 179; +pub const SYS_times: c_long = 153; +pub const SYS_ptrace: c_long = 117; +pub const SYS_getuid: c_long = 174; +pub const SYS_syslog: c_long = 116; +pub const SYS_getgid: c_long = 176; +pub const SYS_setuid: c_long = 146; +pub const SYS_setgid: c_long = 144; +pub const SYS_geteuid: c_long = 175; +pub const SYS_getegid: c_long = 177; +pub const SYS_setpgid: c_long = 154; +pub const SYS_getppid: c_long = 173; +pub const SYS_setsid: c_long = 157; +pub const SYS_setreuid: c_long = 145; +pub const SYS_setregid: c_long = 143; +pub const SYS_getgroups: c_long = 158; +pub const SYS_setgroups: c_long = 159; +pub const SYS_setresuid: c_long = 147; +pub const SYS_getresuid: c_long = 148; +pub const SYS_setresgid: c_long = 149; +pub const SYS_getresgid: c_long = 150; +pub const SYS_getpgid: c_long = 155; +pub const SYS_setfsuid: c_long = 151; +pub const SYS_setfsgid: c_long = 152; +pub const SYS_getsid: c_long = 156; +pub const SYS_capget: c_long = 90; +pub const SYS_capset: c_long = 91; +pub const SYS_rt_sigpending: c_long = 136; +pub const SYS_rt_sigtimedwait: c_long = 137; +pub const SYS_rt_sigqueueinfo: c_long = 138; +pub const SYS_rt_sigsuspend: c_long = 133; +pub const SYS_sigaltstack: c_long = 132; +pub const SYS_personality: c_long = 92; +pub const SYS_statfs: c_long = 43; +pub const SYS_fstatfs: c_long = 44; +pub const SYS_getpriority: c_long = 141; +pub const SYS_setpriority: c_long = 140; +pub const SYS_sched_setparam: c_long = 118; +pub const SYS_sched_getparam: c_long = 121; +pub const SYS_sched_setscheduler: c_long = 119; +pub const SYS_sched_getscheduler: c_long = 120; +pub const SYS_sched_get_priority_max: c_long = 125; +pub const SYS_sched_get_priority_min: c_long = 126; +pub const SYS_sched_rr_get_interval: c_long = 127; +pub const SYS_mlock: c_long = 228; +pub const SYS_munlock: c_long = 229; +pub const SYS_mlockall: c_long = 230; +pub const SYS_munlockall: c_long = 231; +pub const SYS_vhangup: c_long = 58; +pub const SYS_pivot_root: c_long = 41; +pub const SYS_prctl: c_long = 167; +pub const SYS_adjtimex: c_long = 171; +pub const SYS_setrlimit: c_long = 164; +pub const SYS_chroot: c_long = 51; +pub const SYS_sync: c_long = 81; +pub const SYS_acct: c_long = 89; +pub const SYS_settimeofday: c_long = 170; +pub const SYS_mount: c_long = 40; +pub const SYS_umount2: c_long = 39; +pub const SYS_swapon: c_long = 224; +pub const SYS_swapoff: c_long = 225; +pub const SYS_reboot: c_long = 142; +pub const SYS_sethostname: c_long = 161; +pub const SYS_setdomainname: c_long = 162; +pub const SYS_init_module: c_long = 105; +pub const SYS_delete_module: c_long = 106; +pub const SYS_quotactl: c_long = 60; +pub const SYS_nfsservctl: c_long = 42; +pub const SYS_gettid: c_long = 178; +pub const SYS_readahead: c_long = 213; +pub const SYS_setxattr: c_long = 5; +pub const SYS_lsetxattr: c_long = 6; +pub const SYS_fsetxattr: c_long = 7; +pub const SYS_getxattr: c_long = 8; +pub const SYS_lgetxattr: c_long = 9; +pub const SYS_fgetxattr: c_long = 10; +pub const SYS_listxattr: c_long = 11; +pub const SYS_llistxattr: c_long = 12; +pub const SYS_flistxattr: c_long = 13; +pub const SYS_removexattr: c_long = 14; +pub const SYS_lremovexattr: c_long = 15; +pub const SYS_fremovexattr: c_long = 16; +pub const SYS_tkill: c_long = 130; +pub const SYS_futex: c_long = 98; +pub const SYS_sched_setaffinity: c_long = 122; +pub const SYS_sched_getaffinity: c_long = 123; +pub const SYS_io_setup: c_long = 0; +pub const SYS_io_destroy: c_long = 1; +pub const SYS_io_getevents: c_long = 4; +pub const SYS_io_submit: c_long = 2; +pub const SYS_io_cancel: c_long = 3; +pub const SYS_lookup_dcookie: c_long = 18; +pub const SYS_remap_file_pages: c_long = 234; +pub const SYS_getdents64: c_long = 61; +pub const SYS_set_tid_address: c_long = 96; +pub const SYS_restart_syscall: c_long = 128; +pub const SYS_semtimedop: c_long = 192; +pub const SYS_fadvise64: c_long = 223; +pub const SYS_timer_create: c_long = 107; +pub const SYS_timer_settime: c_long = 110; +pub const SYS_timer_gettime: c_long = 108; +pub const SYS_timer_getoverrun: c_long = 109; +pub const SYS_timer_delete: c_long = 111; +pub const SYS_clock_settime: c_long = 112; +pub const SYS_clock_gettime: c_long = 113; +pub const SYS_clock_getres: c_long = 114; +pub const SYS_clock_nanosleep: c_long = 115; +pub const SYS_exit_group: c_long = 94; +pub const SYS_epoll_ctl: c_long = 21; +pub const SYS_tgkill: c_long = 131; +pub const SYS_mbind: c_long = 235; +pub const SYS_set_mempolicy: c_long = 237; +pub const SYS_get_mempolicy: c_long = 236; +pub const SYS_mq_open: c_long = 180; +pub const SYS_mq_unlink: c_long = 181; +pub const SYS_mq_timedsend: c_long = 182; +pub const SYS_mq_timedreceive: c_long = 183; +pub const SYS_mq_notify: c_long = 184; +pub const SYS_mq_getsetattr: c_long = 185; +pub const SYS_kexec_load: c_long = 104; +pub const SYS_waitid: c_long = 95; +pub const SYS_add_key: c_long = 217; +pub const SYS_request_key: c_long = 218; +pub const SYS_keyctl: c_long = 219; +pub const SYS_ioprio_set: c_long = 30; +pub const SYS_ioprio_get: c_long = 31; +pub const SYS_inotify_add_watch: c_long = 27; +pub const SYS_inotify_rm_watch: c_long = 28; +pub const SYS_migrate_pages: c_long = 238; +pub const SYS_openat: c_long = 56; +pub const SYS_mkdirat: c_long = 34; +pub const SYS_mknodat: c_long = 33; +pub const SYS_fchownat: c_long = 54; +pub const SYS_newfstatat: c_long = 79; +pub const SYS_unlinkat: c_long = 35; +pub const SYS_linkat: c_long = 37; +pub const SYS_symlinkat: c_long = 36; +pub const SYS_readlinkat: c_long = 78; +pub const SYS_fchmodat: c_long = 53; +pub const SYS_faccessat: c_long = 48; +pub const SYS_pselect6: c_long = 72; +pub const SYS_ppoll: c_long = 73; +pub const SYS_unshare: c_long = 97; +pub const SYS_set_robust_list: c_long = 99; +pub const SYS_get_robust_list: c_long = 100; +pub const SYS_splice: c_long = 76; +pub const SYS_tee: c_long = 77; +pub const SYS_sync_file_range: c_long = 84; +pub const SYS_vmsplice: c_long = 75; +pub const SYS_move_pages: c_long = 239; +pub const SYS_utimensat: c_long = 88; +pub const SYS_epoll_pwait: c_long = 22; +pub const SYS_timerfd_create: c_long = 85; +pub const SYS_fallocate: c_long = 47; +pub const SYS_timerfd_settime: c_long = 86; +pub const SYS_timerfd_gettime: c_long = 87; +pub const SYS_accept4: c_long = 242; +pub const SYS_signalfd4: c_long = 74; +pub const SYS_eventfd2: c_long = 19; +pub const SYS_epoll_create1: c_long = 20; +pub const SYS_dup3: c_long = 24; +pub const SYS_pipe2: c_long = 59; +pub const SYS_inotify_init1: c_long = 26; +pub const SYS_preadv: c_long = 69; +pub const SYS_pwritev: c_long = 70; +pub const SYS_rt_tgsigqueueinfo: c_long = 240; +pub const SYS_perf_event_open: c_long = 241; +pub const SYS_recvmmsg: c_long = 243; +pub const SYS_fanotify_init: c_long = 262; +pub const SYS_fanotify_mark: c_long = 263; +pub const SYS_prlimit64: c_long = 261; +pub const SYS_name_to_handle_at: c_long = 264; +pub const SYS_open_by_handle_at: c_long = 265; +pub const SYS_clock_adjtime: c_long = 266; +pub const SYS_syncfs: c_long = 267; +pub const SYS_sendmmsg: c_long = 269; +pub const SYS_setns: c_long = 268; +pub const SYS_getcpu: c_long = 168; +pub const SYS_process_vm_readv: c_long = 270; +pub const SYS_process_vm_writev: c_long = 271; +pub const SYS_kcmp: c_long = 272; +pub const SYS_finit_module: c_long = 273; +pub const SYS_sched_setattr: c_long = 274; +pub const SYS_sched_getattr: c_long = 275; +pub const SYS_renameat2: c_long = 276; +pub const SYS_seccomp: c_long = 277; +pub const SYS_getrandom: c_long = 278; +pub const SYS_memfd_create: c_long = 279; +pub const SYS_bpf: c_long = 280; +pub const SYS_execveat: c_long = 281; +pub const SYS_userfaultfd: c_long = 282; +pub const SYS_membarrier: c_long = 283; +pub const SYS_mlock2: c_long = 284; +pub const SYS_copy_file_range: c_long = 285; +pub const SYS_preadv2: c_long = 286; +pub const SYS_pwritev2: c_long = 287; +pub const SYS_pkey_mprotect: c_long = 288; +pub const SYS_pkey_alloc: c_long = 289; +pub const SYS_pkey_free: c_long = 290; +pub const SYS_statx: c_long = 291; +pub const SYS_rseq: c_long = 293; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/s390x.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/s390x.rs new file mode 100644 index 00000000000000..029485c5b4a328 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b64/s390x.rs @@ -0,0 +1,955 @@ +//! s390x + +use crate::prelude::*; +use crate::{off64_t, off_t, pthread_mutex_t}; + +pub type blksize_t = i64; +pub type nlink_t = u64; +pub type suseconds_t = i64; +pub type wchar_t = i32; +pub type greg_t = u64; +pub type __u64 = u64; +pub type __s64 = i64; + +s! { + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + __glibc_reserved0: c_int, + pub sa_flags: c_int, + pub sa_restorer: Option, + pub sa_mask: crate::sigset_t, + } + + pub struct statfs { + pub f_type: c_uint, + pub f_bsize: c_uint, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_uint, + pub f_frsize: c_uint, + pub f_flags: c_uint, + f_spare: [c_uint; 4], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + _pad: c_int, + _pad2: [c_long; 14], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + st_pad0: c_int, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + __glibc_reserved: [c_long; 3], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + st_pad0: c_int, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + __glibc_reserved: [c_long; 3], + } + + pub struct pthread_attr_t { + __size: [c_ulong; 7], + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + pub __seq: c_ushort, + __pad1: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused4: c_ulong, + __unused5: c_ulong, + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct __psw_t { + pub mask: u64, + pub addr: u64, + } + + pub struct fpregset_t { + pub fpc: u32, + __pad: u32, + pub fprs: [fpreg_t; 16], + } + + pub struct mcontext_t { + pub psw: __psw_t, + pub gregs: [u64; 16], + pub aregs: [u32; 16], + pub fpregs: fpregset_t, + } + + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_mcontext: mcontext_t, + pub uc_sigmask: crate::sigset_t, + } + + pub struct statfs64 { + pub f_type: c_uint, + pub f_bsize: c_uint, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_uint, + pub f_frsize: c_uint, + pub f_flags: c_uint, + pub f_spare: [c_uint; 4], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } +} + +s_no_extra_traits! { + // FIXME(union): This is actually a union. + pub struct fpreg_t { + pub d: c_double, + // f: c_float, + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for fpreg_t { + fn eq(&self, other: &fpreg_t) -> bool { + self.d == other.d + } + } + + impl Eq for fpreg_t {} + + impl hash::Hash for fpreg_t { + fn hash(&self, state: &mut H) { + let d: u64 = self.d.to_bits(); + d.hash(state); + } + } + } +} + +pub const POSIX_FADV_DONTNEED: c_int = 6; +pub const POSIX_FADV_NOREUSE: c_int = 7; + +pub const VEOF: usize = 4; +pub const RTLD_DEEPBIND: c_int = 0x8; +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; +pub const SFD_CLOEXEC: c_int = 0x080000; + +pub const NCCS: usize = 32; + +pub const O_TRUNC: c_int = 512; +pub const O_NOATIME: c_int = 0o1000000; +pub const O_CLOEXEC: c_int = 0x80000; +pub const O_PATH: c_int = 0o10000000; +pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; + +pub const EBFONT: c_int = 59; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENONET: c_int = 64; +pub const ENOPKG: c_int = 65; +pub const EREMOTE: c_int = 66; +pub const ENOLINK: c_int = 67; +pub const EADV: c_int = 68; +pub const ESRMNT: c_int = 69; +pub const ECOMM: c_int = 70; +pub const EPROTO: c_int = 71; +pub const EDOTDOT: c_int = 73; + +pub const SA_NODEFER: c_int = 0x40000000; +pub const SA_RESETHAND: c_int = 0x80000000; +pub const SA_RESTART: c_int = 0x10000000; +pub const SA_NOCLDSTOP: c_int = 0x00000001; + +pub const EPOLL_CLOEXEC: c_int = 0x80000; + +pub const EFD_CLOEXEC: c_int = 0x80000; + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; + +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; + +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ECONNABORTED: c_int = 103; +pub const ECONNREFUSED: c_int = 111; +pub const ECONNRESET: c_int = 104; +pub const EDEADLK: c_int = 35; +pub const ENOSYS: c_int = 38; +pub const ENOTCONN: c_int = 107; +pub const ETIMEDOUT: c_int = 110; +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NONBLOCK: c_int = 2048; +pub const SA_NOCLDWAIT: c_int = 2; +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 4; +pub const SIGBUS: c_int = 7; +pub const SIGSTKSZ: size_t = 0x2000; +pub const MINSIGSTKSZ: size_t = 2048; +pub const SIG_SETMASK: c_int = 2; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const O_NOCTTY: c_int = 256; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_FSYNC: c_int = 0x101000; +pub const O_DIRECT: c_int = 0x4000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_NOFOLLOW: c_int = 0x20000; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_ANONYMOUS: c_int = 0x0020; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_SYNC: c_int = 0x080000; + +pub const EDEADLOCK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const EHWPOISON: c_int = 133; +pub const ERFKILL: c_int = 132; + +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGCHLD: c_int = 17; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] +pub const SIGUNUSED: c_int = 31; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const O_ASYNC: c_int = 0x2000; +pub const O_NDELAY: c_int = 0x800; + +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; + +pub const EXTPROC: crate::tcflag_t = 0x00010000; + +pub const PTRACE_DETACH: c_uint = 17; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; + +pub const EFD_NONBLOCK: c_int = 0x800; + +pub const F_RDLCK: c_int = 0; +pub const F_WRLCK: c_int = 1; +pub const F_UNLCK: c_int = 2; +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETOWN: c_int = 8; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_OFD_GETLK: c_int = 36; +pub const F_OFD_SETLK: c_int = 37; +pub const F_OFD_SETLKW: c_int = 38; + +pub const SFD_NONBLOCK: c_int = 0x0800; + +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +pub const VTIME: usize = 5; +pub const VSWTC: usize = 7; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VSUSP: usize = 10; +pub const VREPRINT: usize = 12; +pub const VDISCARD: usize = 13; +pub const VWERASE: usize = 14; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const ONLCR: crate::tcflag_t = 0o000004; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const CR1: crate::tcflag_t = 0x00000200; +pub const CR2: crate::tcflag_t = 0x00000400; +pub const CR3: crate::tcflag_t = 0x00000600; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const TAB1: crate::tcflag_t = 0x00000800; +pub const TAB2: crate::tcflag_t = 0x00001000; +pub const TAB3: crate::tcflag_t = 0x00001800; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const BS1: crate::tcflag_t = 0x00002000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const FF1: crate::tcflag_t = 0x00008000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const VT1: crate::tcflag_t = 0x00004000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const CBAUD: crate::speed_t = 0o010017; +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const CSIZE: crate::tcflag_t = 0o000060; +pub const CS6: crate::tcflag_t = 0o000020; +pub const CS7: crate::tcflag_t = 0o000040; +pub const CS8: crate::tcflag_t = 0o000060; +pub const CSTOPB: crate::tcflag_t = 0o000100; +pub const CREAD: crate::tcflag_t = 0o000200; +pub const PARENB: crate::tcflag_t = 0o000400; +pub const PARODD: crate::tcflag_t = 0o001000; +pub const HUPCL: crate::tcflag_t = 0o002000; +pub const CLOCAL: crate::tcflag_t = 0o004000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; + +pub const ISIG: crate::tcflag_t = 0o000001; +pub const ICANON: crate::tcflag_t = 0o000002; +pub const XCASE: crate::tcflag_t = 0o000004; +pub const ECHOE: crate::tcflag_t = 0o000020; +pub const ECHOK: crate::tcflag_t = 0o000040; +pub const ECHONL: crate::tcflag_t = 0o000100; +pub const NOFLSH: crate::tcflag_t = 0o000200; +pub const ECHOCTL: crate::tcflag_t = 0o001000; +pub const ECHOPRT: crate::tcflag_t = 0o002000; +pub const ECHOKE: crate::tcflag_t = 0o004000; +pub const PENDIN: crate::tcflag_t = 0o040000; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const IXON: crate::tcflag_t = 0o002000; +pub const IXOFF: crate::tcflag_t = 0o010000; + +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_restart_syscall: c_long = 7; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execve: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_mount: c_long = 21; +pub const SYS_umount: c_long = 22; +pub const SYS_ptrace: c_long = 26; +pub const SYS_alarm: c_long = 27; +pub const SYS_pause: c_long = 29; +pub const SYS_utime: c_long = 30; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_rename: c_long = 38; +pub const SYS_mkdir: c_long = 39; +pub const SYS_rmdir: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_brk: c_long = 45; +pub const SYS_signal: c_long = 48; +pub const SYS_acct: c_long = 51; +pub const SYS_umount2: c_long = 52; +pub const SYS_ioctl: c_long = 54; +pub const SYS_fcntl: c_long = 55; +pub const SYS_setpgid: c_long = 57; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_ustat: c_long = 62; +pub const SYS_dup2: c_long = 63; +pub const SYS_getppid: c_long = 64; +pub const SYS_getpgrp: c_long = 65; +pub const SYS_setsid: c_long = 66; +pub const SYS_sigaction: c_long = 67; +pub const SYS_sigsuspend: c_long = 72; +pub const SYS_sigpending: c_long = 73; +pub const SYS_sethostname: c_long = 74; +pub const SYS_setrlimit: c_long = 75; +pub const SYS_getrusage: c_long = 77; +pub const SYS_gettimeofday: c_long = 78; +pub const SYS_settimeofday: c_long = 79; +pub const SYS_symlink: c_long = 83; +pub const SYS_readlink: c_long = 85; +pub const SYS_uselib: c_long = 86; +pub const SYS_swapon: c_long = 87; +pub const SYS_reboot: c_long = 88; +pub const SYS_readdir: c_long = 89; +pub const SYS_mmap: c_long = 90; +pub const SYS_munmap: c_long = 91; +pub const SYS_truncate: c_long = 92; +pub const SYS_ftruncate: c_long = 93; +pub const SYS_fchmod: c_long = 94; +pub const SYS_getpriority: c_long = 96; +pub const SYS_setpriority: c_long = 97; +pub const SYS_statfs: c_long = 99; +pub const SYS_fstatfs: c_long = 100; +pub const SYS_socketcall: c_long = 102; +pub const SYS_syslog: c_long = 103; +pub const SYS_setitimer: c_long = 104; +pub const SYS_getitimer: c_long = 105; +pub const SYS_stat: c_long = 106; +pub const SYS_lstat: c_long = 107; +pub const SYS_fstat: c_long = 108; +pub const SYS_lookup_dcookie: c_long = 110; +pub const SYS_vhangup: c_long = 111; +pub const SYS_idle: c_long = 112; +pub const SYS_wait4: c_long = 114; +pub const SYS_swapoff: c_long = 115; +pub const SYS_sysinfo: c_long = 116; +pub const SYS_ipc: c_long = 117; +pub const SYS_fsync: c_long = 118; +pub const SYS_sigreturn: c_long = 119; +pub const SYS_clone: c_long = 120; +pub const SYS_setdomainname: c_long = 121; +pub const SYS_uname: c_long = 122; +pub const SYS_adjtimex: c_long = 124; +pub const SYS_mprotect: c_long = 125; +pub const SYS_sigprocmask: c_long = 126; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 127; +pub const SYS_init_module: c_long = 128; +pub const SYS_delete_module: c_long = 129; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 130; +pub const SYS_quotactl: c_long = 131; +pub const SYS_getpgid: c_long = 132; +pub const SYS_fchdir: c_long = 133; +pub const SYS_bdflush: c_long = 134; +pub const SYS_sysfs: c_long = 135; +pub const SYS_personality: c_long = 136; +pub const SYS_afs_syscall: c_long = 137; /* Syscall for Andrew File System */ +pub const SYS_getdents: c_long = 141; +pub const SYS_flock: c_long = 143; +pub const SYS_msync: c_long = 144; +pub const SYS_readv: c_long = 145; +pub const SYS_writev: c_long = 146; +pub const SYS_getsid: c_long = 147; +pub const SYS_fdatasync: c_long = 148; +pub const SYS__sysctl: c_long = 149; +pub const SYS_mlock: c_long = 150; +pub const SYS_munlock: c_long = 151; +pub const SYS_mlockall: c_long = 152; +pub const SYS_munlockall: c_long = 153; +pub const SYS_sched_setparam: c_long = 154; +pub const SYS_sched_getparam: c_long = 155; +pub const SYS_sched_setscheduler: c_long = 156; +pub const SYS_sched_getscheduler: c_long = 157; +pub const SYS_sched_yield: c_long = 158; +pub const SYS_sched_get_priority_max: c_long = 159; +pub const SYS_sched_get_priority_min: c_long = 160; +pub const SYS_sched_rr_get_interval: c_long = 161; +pub const SYS_nanosleep: c_long = 162; +pub const SYS_mremap: c_long = 163; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 167; +pub const SYS_poll: c_long = 168; +pub const SYS_nfsservctl: c_long = 169; +pub const SYS_prctl: c_long = 172; +pub const SYS_rt_sigreturn: c_long = 173; +pub const SYS_rt_sigaction: c_long = 174; +pub const SYS_rt_sigprocmask: c_long = 175; +pub const SYS_rt_sigpending: c_long = 176; +pub const SYS_rt_sigtimedwait: c_long = 177; +pub const SYS_rt_sigqueueinfo: c_long = 178; +pub const SYS_rt_sigsuspend: c_long = 179; +pub const SYS_pread64: c_long = 180; +pub const SYS_pwrite64: c_long = 181; +pub const SYS_getcwd: c_long = 183; +pub const SYS_capget: c_long = 184; +pub const SYS_capset: c_long = 185; +pub const SYS_sigaltstack: c_long = 186; +pub const SYS_sendfile: c_long = 187; +pub const SYS_getpmsg: c_long = 188; +pub const SYS_putpmsg: c_long = 189; +pub const SYS_vfork: c_long = 190; +pub const SYS_pivot_root: c_long = 217; +pub const SYS_mincore: c_long = 218; +pub const SYS_madvise: c_long = 219; +pub const SYS_getdents64: c_long = 220; +pub const SYS_readahead: c_long = 222; +pub const SYS_setxattr: c_long = 224; +pub const SYS_lsetxattr: c_long = 225; +pub const SYS_fsetxattr: c_long = 226; +pub const SYS_getxattr: c_long = 227; +pub const SYS_lgetxattr: c_long = 228; +pub const SYS_fgetxattr: c_long = 229; +pub const SYS_listxattr: c_long = 230; +pub const SYS_llistxattr: c_long = 231; +pub const SYS_flistxattr: c_long = 232; +pub const SYS_removexattr: c_long = 233; +pub const SYS_lremovexattr: c_long = 234; +pub const SYS_fremovexattr: c_long = 235; +pub const SYS_gettid: c_long = 236; +pub const SYS_tkill: c_long = 237; +pub const SYS_futex: c_long = 238; +pub const SYS_sched_setaffinity: c_long = 239; +pub const SYS_sched_getaffinity: c_long = 240; +pub const SYS_tgkill: c_long = 241; +pub const SYS_io_setup: c_long = 243; +pub const SYS_io_destroy: c_long = 244; +pub const SYS_io_getevents: c_long = 245; +pub const SYS_io_submit: c_long = 246; +pub const SYS_io_cancel: c_long = 247; +pub const SYS_exit_group: c_long = 248; +pub const SYS_epoll_create: c_long = 249; +pub const SYS_epoll_ctl: c_long = 250; +pub const SYS_epoll_wait: c_long = 251; +pub const SYS_set_tid_address: c_long = 252; +pub const SYS_fadvise64: c_long = 253; +pub const SYS_timer_create: c_long = 254; +pub const SYS_timer_settime: c_long = 255; +pub const SYS_timer_gettime: c_long = 256; +pub const SYS_timer_getoverrun: c_long = 257; +pub const SYS_timer_delete: c_long = 258; +pub const SYS_clock_settime: c_long = 259; +pub const SYS_clock_gettime: c_long = 260; +pub const SYS_clock_getres: c_long = 261; +pub const SYS_clock_nanosleep: c_long = 262; +pub const SYS_statfs64: c_long = 265; +pub const SYS_fstatfs64: c_long = 266; +pub const SYS_remap_file_pages: c_long = 267; +pub const SYS_mbind: c_long = 268; +pub const SYS_get_mempolicy: c_long = 269; +pub const SYS_set_mempolicy: c_long = 270; +pub const SYS_mq_open: c_long = 271; +pub const SYS_mq_unlink: c_long = 272; +pub const SYS_mq_timedsend: c_long = 273; +pub const SYS_mq_timedreceive: c_long = 274; +pub const SYS_mq_notify: c_long = 275; +pub const SYS_mq_getsetattr: c_long = 276; +pub const SYS_kexec_load: c_long = 277; +pub const SYS_add_key: c_long = 278; +pub const SYS_request_key: c_long = 279; +pub const SYS_keyctl: c_long = 280; +pub const SYS_waitid: c_long = 281; +pub const SYS_ioprio_set: c_long = 282; +pub const SYS_ioprio_get: c_long = 283; +pub const SYS_inotify_init: c_long = 284; +pub const SYS_inotify_add_watch: c_long = 285; +pub const SYS_inotify_rm_watch: c_long = 286; +pub const SYS_migrate_pages: c_long = 287; +pub const SYS_openat: c_long = 288; +pub const SYS_mkdirat: c_long = 289; +pub const SYS_mknodat: c_long = 290; +pub const SYS_fchownat: c_long = 291; +pub const SYS_futimesat: c_long = 292; +pub const SYS_unlinkat: c_long = 294; +pub const SYS_renameat: c_long = 295; +pub const SYS_linkat: c_long = 296; +pub const SYS_symlinkat: c_long = 297; +pub const SYS_readlinkat: c_long = 298; +pub const SYS_fchmodat: c_long = 299; +pub const SYS_faccessat: c_long = 300; +pub const SYS_pselect6: c_long = 301; +pub const SYS_ppoll: c_long = 302; +pub const SYS_unshare: c_long = 303; +pub const SYS_set_robust_list: c_long = 304; +pub const SYS_get_robust_list: c_long = 305; +pub const SYS_splice: c_long = 306; +pub const SYS_sync_file_range: c_long = 307; +pub const SYS_tee: c_long = 308; +pub const SYS_vmsplice: c_long = 309; +pub const SYS_move_pages: c_long = 310; +pub const SYS_getcpu: c_long = 311; +pub const SYS_epoll_pwait: c_long = 312; +pub const SYS_utimes: c_long = 313; +pub const SYS_fallocate: c_long = 314; +pub const SYS_utimensat: c_long = 315; +pub const SYS_signalfd: c_long = 316; +pub const SYS_timerfd: c_long = 317; +pub const SYS_eventfd: c_long = 318; +pub const SYS_timerfd_create: c_long = 319; +pub const SYS_timerfd_settime: c_long = 320; +pub const SYS_timerfd_gettime: c_long = 321; +pub const SYS_signalfd4: c_long = 322; +pub const SYS_eventfd2: c_long = 323; +pub const SYS_inotify_init1: c_long = 324; +pub const SYS_pipe2: c_long = 325; +pub const SYS_dup3: c_long = 326; +pub const SYS_epoll_create1: c_long = 327; +pub const SYS_preadv: c_long = 328; +pub const SYS_pwritev: c_long = 329; +pub const SYS_rt_tgsigqueueinfo: c_long = 330; +pub const SYS_perf_event_open: c_long = 331; +pub const SYS_fanotify_init: c_long = 332; +pub const SYS_fanotify_mark: c_long = 333; +pub const SYS_prlimit64: c_long = 334; +pub const SYS_name_to_handle_at: c_long = 335; +pub const SYS_open_by_handle_at: c_long = 336; +pub const SYS_clock_adjtime: c_long = 337; +pub const SYS_syncfs: c_long = 338; +pub const SYS_setns: c_long = 339; +pub const SYS_process_vm_readv: c_long = 340; +pub const SYS_process_vm_writev: c_long = 341; +pub const SYS_s390_runtime_instr: c_long = 342; +pub const SYS_kcmp: c_long = 343; +pub const SYS_finit_module: c_long = 344; +pub const SYS_sched_setattr: c_long = 345; +pub const SYS_sched_getattr: c_long = 346; +pub const SYS_renameat2: c_long = 347; +pub const SYS_seccomp: c_long = 348; +pub const SYS_getrandom: c_long = 349; +pub const SYS_memfd_create: c_long = 350; +pub const SYS_bpf: c_long = 351; +pub const SYS_s390_pci_mmio_write: c_long = 352; +pub const SYS_s390_pci_mmio_read: c_long = 353; +pub const SYS_execveat: c_long = 354; +pub const SYS_userfaultfd: c_long = 355; +pub const SYS_membarrier: c_long = 356; +pub const SYS_recvmmsg: c_long = 357; +pub const SYS_sendmmsg: c_long = 358; +pub const SYS_socket: c_long = 359; +pub const SYS_socketpair: c_long = 360; +pub const SYS_bind: c_long = 361; +pub const SYS_connect: c_long = 362; +pub const SYS_listen: c_long = 363; +pub const SYS_accept4: c_long = 364; +pub const SYS_getsockopt: c_long = 365; +pub const SYS_setsockopt: c_long = 366; +pub const SYS_getsockname: c_long = 367; +pub const SYS_getpeername: c_long = 368; +pub const SYS_sendto: c_long = 369; +pub const SYS_sendmsg: c_long = 370; +pub const SYS_recvfrom: c_long = 371; +pub const SYS_recvmsg: c_long = 372; +pub const SYS_shutdown: c_long = 373; +pub const SYS_mlock2: c_long = 374; +pub const SYS_copy_file_range: c_long = 375; +pub const SYS_preadv2: c_long = 376; +pub const SYS_pwritev2: c_long = 377; +pub const SYS_lchown: c_long = 198; +pub const SYS_setuid: c_long = 213; +pub const SYS_getuid: c_long = 199; +pub const SYS_setgid: c_long = 214; +pub const SYS_getgid: c_long = 200; +pub const SYS_geteuid: c_long = 201; +pub const SYS_setreuid: c_long = 203; +pub const SYS_setregid: c_long = 204; +pub const SYS_getrlimit: c_long = 191; +pub const SYS_getgroups: c_long = 205; +pub const SYS_fchown: c_long = 207; +pub const SYS_setresuid: c_long = 208; +pub const SYS_setresgid: c_long = 210; +pub const SYS_getresgid: c_long = 211; +pub const SYS_select: c_long = 142; +pub const SYS_getegid: c_long = 202; +pub const SYS_setgroups: c_long = 206; +pub const SYS_getresuid: c_long = 209; +pub const SYS_chown: c_long = 212; +pub const SYS_setfsuid: c_long = 215; +pub const SYS_setfsgid: c_long = 216; +pub const SYS_newfstatat: c_long = 293; +pub const SYS_statx: c_long = 379; +pub const SYS_rseq: c_long = 383; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; +pub const SYS_mseal: c_long = 462; + +extern "C" { + + pub fn sysctl( + name: *mut c_int, + namelen: c_int, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *mut c_void, + newlen: size_t, + ) -> c_int; + pub fn getcontext(ucp: *mut crate::ucontext_t) -> c_int; + pub fn setcontext(ucp: *const crate::ucontext_t) -> c_int; + pub fn makecontext(ucp: *mut crate::ucontext_t, func: extern "C" fn(), argc: c_int, ...); + pub fn swapcontext(uocp: *mut crate::ucontext_t, ucp: *const crate::ucontext_t) -> c_int; +} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/sparc64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/sparc64/mod.rs new file mode 100644 index 00000000000000..f18e53a99b4661 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b64/sparc64/mod.rs @@ -0,0 +1,930 @@ +//! SPARC64-specific definitions for 64-bit linux-like values + +use crate::prelude::*; +use crate::{off64_t, off_t, pthread_mutex_t}; + +pub type wchar_t = i32; +pub type nlink_t = u32; +pub type blksize_t = i64; +pub type suseconds_t = i32; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; + +s! { + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + #[cfg(target_arch = "sparc64")] + __reserved0: c_int, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct statfs { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + f_spare: [crate::__fsword_t; 5], + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + #[doc(hidden)] + #[deprecated( + since = "0.2.54", + note = "Please leave a comment on \ + https://github.com/rust-lang/libc/pull/1316 if you're using \ + this field" + )] + pub _pad: [c_int; 29], + _align: [usize; 0], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + __reserved: c_short, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct stat { + pub st_dev: crate::dev_t, + __pad0: u64, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad1: u64, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_long; 2], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + __pad0: u64, + pub st_ino: crate::ino64_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad2: c_int, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __reserved: [c_long; 2], + } + + pub struct statfs64 { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::fsid_t, + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + pub f_spare: [crate::__fsword_t; 4], + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct pthread_attr_t { + __size: [u64; 7], + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + __pad0: u16, + pub __seq: c_ushort, + __unused1: c_ulonglong, + __unused2: c_ulonglong, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_segsz: size_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __reserved1: c_ulong, + __reserved2: c_ulong, + } +} + +s_no_extra_traits! { + #[repr(align(16))] + pub struct max_align_t { + priv_: [i64; 4], + } +} + +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; + +pub const VEOF: usize = 4; +pub const RTLD_DEEPBIND: c_int = 0x8; +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; + +pub const O_APPEND: c_int = 0x8; +pub const O_CREAT: c_int = 0x200; +pub const O_EXCL: c_int = 0x800; +pub const O_NOCTTY: c_int = 0x8000; +pub const O_NONBLOCK: c_int = 0x4000; +pub const O_SYNC: c_int = 0x802000; +pub const O_RSYNC: c_int = 0x802000; +pub const O_DSYNC: c_int = 0x2000; +pub const O_FSYNC: c_int = 0x802000; +pub const O_NOATIME: c_int = 0x200000; +pub const O_PATH: c_int = 0x1000000; +pub const O_TMPFILE: c_int = 0x2000000 | O_DIRECTORY; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_GROWSDOWN: c_int = 0x0200; +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_ANONYMOUS: c_int = 0x0020; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_SYNC: c_int = 0x080000; + +pub const EDEADLK: c_int = 78; +pub const ENAMETOOLONG: c_int = 63; +pub const ENOLCK: c_int = 79; +pub const ENOSYS: c_int = 90; +pub const ENOTEMPTY: c_int = 66; +pub const ELOOP: c_int = 62; +pub const ENOMSG: c_int = 75; +pub const EIDRM: c_int = 77; +pub const ECHRNG: c_int = 94; +pub const EL2NSYNC: c_int = 95; +pub const EL3HLT: c_int = 96; +pub const EL3RST: c_int = 97; +pub const ELNRNG: c_int = 98; +pub const EUNATCH: c_int = 99; +pub const ENOCSI: c_int = 100; +pub const EL2HLT: c_int = 101; +pub const EBADE: c_int = 102; +pub const EBADR: c_int = 103; +pub const EXFULL: c_int = 104; +pub const ENOANO: c_int = 105; +pub const EBADRQC: c_int = 106; +pub const EBADSLT: c_int = 107; +pub const EMULTIHOP: c_int = 87; +pub const EOVERFLOW: c_int = 92; +pub const ENOTUNIQ: c_int = 115; +pub const EBADFD: c_int = 93; +pub const EBADMSG: c_int = 76; +pub const EREMCHG: c_int = 89; +pub const ELIBACC: c_int = 114; +pub const ELIBBAD: c_int = 112; +pub const ELIBSCN: c_int = 124; +pub const ELIBMAX: c_int = 123; +pub const ELIBEXEC: c_int = 110; +pub const EILSEQ: c_int = 122; +pub const ERESTART: c_int = 116; +pub const ESTRPIPE: c_int = 91; +pub const EUSERS: c_int = 68; +pub const ENOTSOCK: c_int = 38; +pub const EDESTADDRREQ: c_int = 39; +pub const EMSGSIZE: c_int = 40; +pub const EPROTOTYPE: c_int = 41; +pub const ENOPROTOOPT: c_int = 42; +pub const EPROTONOSUPPORT: c_int = 43; +pub const ESOCKTNOSUPPORT: c_int = 44; +pub const EOPNOTSUPP: c_int = 45; +pub const EPFNOSUPPORT: c_int = 46; +pub const EAFNOSUPPORT: c_int = 47; +pub const EADDRINUSE: c_int = 48; +pub const EADDRNOTAVAIL: c_int = 49; +pub const ENETDOWN: c_int = 50; +pub const ENETUNREACH: c_int = 51; +pub const ENETRESET: c_int = 52; +pub const ECONNABORTED: c_int = 53; +pub const ECONNRESET: c_int = 54; +pub const ENOBUFS: c_int = 55; +pub const EISCONN: c_int = 56; +pub const ENOTCONN: c_int = 57; +pub const ESHUTDOWN: c_int = 58; +pub const ETOOMANYREFS: c_int = 59; +pub const ETIMEDOUT: c_int = 60; +pub const ECONNREFUSED: c_int = 61; +pub const EHOSTDOWN: c_int = 64; +pub const EHOSTUNREACH: c_int = 65; +pub const EALREADY: c_int = 37; +pub const EINPROGRESS: c_int = 36; +pub const ESTALE: c_int = 70; +pub const EDQUOT: c_int = 69; +pub const ENOMEDIUM: c_int = 125; +pub const EMEDIUMTYPE: c_int = 126; +pub const ECANCELED: c_int = 127; +pub const ENOKEY: c_int = 128; +pub const EKEYEXPIRED: c_int = 129; +pub const EKEYREVOKED: c_int = 130; +pub const EKEYREJECTED: c_int = 131; +pub const EOWNERDEAD: c_int = 132; +pub const ENOTRECOVERABLE: c_int = 133; +pub const EHWPOISON: c_int = 135; +pub const ERFKILL: c_int = 134; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const SA_ONSTACK: c_int = 1; +pub const SA_SIGINFO: c_int = 0x200; +pub const SA_NOCLDWAIT: c_int = 0x100; + +pub const SIGEMT: c_int = 7; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGCHLD: c_int = 20; +pub const SIGBUS: c_int = 10; +pub const SIGUSR1: c_int = 30; +pub const SIGUSR2: c_int = 31; +pub const SIGCONT: c_int = 19; +pub const SIGSTOP: c_int = 17; +pub const SIGTSTP: c_int = 18; +pub const SIGURG: c_int = 16; +pub const SIGIO: c_int = 23; +pub const SIGSYS: c_int = 12; +pub const SIGPOLL: c_int = 23; +pub const SIGPWR: c_int = 29; +pub const SIG_SETMASK: c_int = 4; +pub const SIG_BLOCK: c_int = 1; +pub const SIG_UNBLOCK: c_int = 2; + +pub const POLLWRNORM: c_short = 4; +pub const POLLWRBAND: c_short = 0x100; + +pub const O_ASYNC: c_int = 0x40; +pub const O_NDELAY: c_int = 0x4004; + +pub const PTRACE_DETACH: c_uint = 17; + +pub const EFD_NONBLOCK: c_int = 0x4000; + +pub const F_GETLK: c_int = 7; +pub const F_GETOWN: c_int = 5; +pub const F_SETOWN: c_int = 6; +pub const F_SETLK: c_int = 8; +pub const F_SETLKW: c_int = 9; +pub const F_OFD_GETLK: c_int = 36; +pub const F_OFD_SETLK: c_int = 37; +pub const F_OFD_SETLKW: c_int = 38; + +pub const F_RDLCK: c_int = 1; +pub const F_WRLCK: c_int = 2; +pub const F_UNLCK: c_int = 3; + +pub const SFD_NONBLOCK: c_int = 0x4000; + +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +pub const SFD_CLOEXEC: c_int = 0x400000; + +pub const NCCS: usize = 17; +pub const O_TRUNC: c_int = 0x400; + +pub const O_CLOEXEC: c_int = 0x400000; + +pub const EBFONT: c_int = 109; +pub const ENOSTR: c_int = 72; +pub const ENODATA: c_int = 111; +pub const ETIME: c_int = 73; +pub const ENOSR: c_int = 74; +pub const ENONET: c_int = 80; +pub const ENOPKG: c_int = 113; +pub const EREMOTE: c_int = 71; +pub const ENOLINK: c_int = 82; +pub const EADV: c_int = 83; +pub const ESRMNT: c_int = 84; +pub const ECOMM: c_int = 85; +pub const EPROTO: c_int = 86; +pub const EDOTDOT: c_int = 88; + +pub const SA_NODEFER: c_int = 0x20; +pub const SA_RESETHAND: c_int = 0x4; +pub const SA_RESTART: c_int = 0x2; +pub const SA_NOCLDSTOP: c_int = 0x00000008; + +pub const EPOLL_CLOEXEC: c_int = 0x400000; + +pub const EFD_CLOEXEC: c_int = 0x400000; +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; + +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; + +pub const O_DIRECTORY: c_int = 0o200000; +pub const O_NOFOLLOW: c_int = 0o400000; +pub const O_DIRECT: c_int = 0x100000; + +pub const MAP_LOCKED: c_int = 0x0100; +pub const MAP_NORESERVE: c_int = 0x00040; + +pub const EDEADLOCK: c_int = 108; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; + +pub const MCL_CURRENT: c_int = 0x2000; +pub const MCL_FUTURE: c_int = 0x4000; +pub const MCL_ONFAULT: c_int = 0x8000; + +pub const SIGSTKSZ: size_t = 16384; +pub const MINSIGSTKSZ: size_t = 4096; +pub const CBAUD: crate::tcflag_t = 0x0000100f; +pub const TAB1: crate::tcflag_t = 0x800; +pub const TAB2: crate::tcflag_t = 0x1000; +pub const TAB3: crate::tcflag_t = 0x1800; +pub const CR1: crate::tcflag_t = 0x200; +pub const CR2: crate::tcflag_t = 0x400; +pub const CR3: crate::tcflag_t = 0x600; +pub const FF1: crate::tcflag_t = 0x8000; +pub const BS1: crate::tcflag_t = 0x2000; +pub const VT1: crate::tcflag_t = 0x4000; +pub const VWERASE: usize = 0xe; +pub const VREPRINT: usize = 0xc; +pub const VSUSP: usize = 0xa; +pub const VSTART: usize = 0x8; +pub const VSTOP: usize = 0x9; +pub const VDISCARD: usize = 0xd; +pub const VTIME: usize = 0x5; +pub const IXON: crate::tcflag_t = 0x400; +pub const IXOFF: crate::tcflag_t = 0x1000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x30; +pub const CS6: crate::tcflag_t = 0x10; +pub const CS7: crate::tcflag_t = 0x20; +pub const CS8: crate::tcflag_t = 0x30; +pub const CSTOPB: crate::tcflag_t = 0x40; +pub const CREAD: crate::tcflag_t = 0x80; +pub const PARENB: crate::tcflag_t = 0x100; +pub const PARODD: crate::tcflag_t = 0x200; +pub const HUPCL: crate::tcflag_t = 0x400; +pub const CLOCAL: crate::tcflag_t = 0x800; +pub const ECHOKE: crate::tcflag_t = 0x800; +pub const ECHOE: crate::tcflag_t = 0x10; +pub const ECHOK: crate::tcflag_t = 0x20; +pub const ECHONL: crate::tcflag_t = 0x40; +pub const ECHOPRT: crate::tcflag_t = 0x400; +pub const ECHOCTL: crate::tcflag_t = 0x200; +pub const ISIG: crate::tcflag_t = 0x1; +pub const ICANON: crate::tcflag_t = 0x2; +pub const PENDIN: crate::tcflag_t = 0x4000; +pub const NOFLSH: crate::tcflag_t = 0x80; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0x00001000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const B57600: crate::speed_t = 0x1001; +pub const B115200: crate::speed_t = 0x1002; +pub const B230400: crate::speed_t = 0x1003; +pub const B460800: crate::speed_t = 0x1004; +pub const B76800: crate::speed_t = 0x1005; +pub const B153600: crate::speed_t = 0x1006; +pub const B307200: crate::speed_t = 0x1007; +pub const B614400: crate::speed_t = 0x1008; +pub const B921600: crate::speed_t = 0x1009; +pub const B500000: crate::speed_t = 0x100a; +pub const B576000: crate::speed_t = 0x100b; +pub const B1000000: crate::speed_t = 0x100c; +pub const B1152000: crate::speed_t = 0x100d; +pub const B1500000: crate::speed_t = 0x100e; +pub const B2000000: crate::speed_t = 0x100f; + +pub const VEOL: usize = 5; +pub const VEOL2: usize = 6; +pub const VMIN: usize = 4; +pub const IEXTEN: crate::tcflag_t = 0x8000; +pub const TOSTOP: crate::tcflag_t = 0x100; +pub const FLUSHO: crate::tcflag_t = 0x1000; +pub const EXTPROC: crate::tcflag_t = 0x10000; + +pub const SYS_restart_syscall: c_long = 0; +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_wait4: c_long = 7; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execv: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_chown: c_long = 13; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_lchown: c_long = 16; +pub const SYS_brk: c_long = 17; +pub const SYS_perfctr: c_long = 18; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_capget: c_long = 21; +pub const SYS_capset: c_long = 22; +pub const SYS_setuid: c_long = 23; +pub const SYS_getuid: c_long = 24; +pub const SYS_vmsplice: c_long = 25; +pub const SYS_ptrace: c_long = 26; +pub const SYS_alarm: c_long = 27; +pub const SYS_sigaltstack: c_long = 28; +pub const SYS_pause: c_long = 29; +pub const SYS_utime: c_long = 30; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_stat: c_long = 38; +pub const SYS_sendfile: c_long = 39; +pub const SYS_lstat: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_umount2: c_long = 45; +pub const SYS_setgid: c_long = 46; +pub const SYS_getgid: c_long = 47; +pub const SYS_signal: c_long = 48; +pub const SYS_geteuid: c_long = 49; +pub const SYS_getegid: c_long = 50; +pub const SYS_acct: c_long = 51; +pub const SYS_memory_ordering: c_long = 52; +pub const SYS_ioctl: c_long = 54; +pub const SYS_reboot: c_long = 55; +pub const SYS_symlink: c_long = 57; +pub const SYS_readlink: c_long = 58; +pub const SYS_execve: c_long = 59; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_fstat: c_long = 62; +pub const SYS_fstat64: c_long = 63; +pub const SYS_getpagesize: c_long = 64; +pub const SYS_msync: c_long = 65; +pub const SYS_vfork: c_long = 66; +pub const SYS_pread64: c_long = 67; +pub const SYS_pwrite64: c_long = 68; +pub const SYS_mmap: c_long = 71; +pub const SYS_munmap: c_long = 73; +pub const SYS_mprotect: c_long = 74; +pub const SYS_madvise: c_long = 75; +pub const SYS_vhangup: c_long = 76; +pub const SYS_mincore: c_long = 78; +pub const SYS_getgroups: c_long = 79; +pub const SYS_setgroups: c_long = 80; +pub const SYS_getpgrp: c_long = 81; +pub const SYS_setitimer: c_long = 83; +pub const SYS_swapon: c_long = 85; +pub const SYS_getitimer: c_long = 86; +pub const SYS_sethostname: c_long = 88; +pub const SYS_dup2: c_long = 90; +pub const SYS_fcntl: c_long = 92; +pub const SYS_select: c_long = 93; +pub const SYS_fsync: c_long = 95; +pub const SYS_setpriority: c_long = 96; +pub const SYS_socket: c_long = 97; +pub const SYS_connect: c_long = 98; +pub const SYS_accept: c_long = 99; +pub const SYS_getpriority: c_long = 100; +pub const SYS_rt_sigreturn: c_long = 101; +pub const SYS_rt_sigaction: c_long = 102; +pub const SYS_rt_sigprocmask: c_long = 103; +pub const SYS_rt_sigpending: c_long = 104; +pub const SYS_rt_sigtimedwait: c_long = 105; +pub const SYS_rt_sigqueueinfo: c_long = 106; +pub const SYS_rt_sigsuspend: c_long = 107; +pub const SYS_setresuid: c_long = 108; +pub const SYS_getresuid: c_long = 109; +pub const SYS_setresgid: c_long = 110; +pub const SYS_getresgid: c_long = 111; +pub const SYS_recvmsg: c_long = 113; +pub const SYS_sendmsg: c_long = 114; +pub const SYS_gettimeofday: c_long = 116; +pub const SYS_getrusage: c_long = 117; +pub const SYS_getsockopt: c_long = 118; +pub const SYS_getcwd: c_long = 119; +pub const SYS_readv: c_long = 120; +pub const SYS_writev: c_long = 121; +pub const SYS_settimeofday: c_long = 122; +pub const SYS_fchown: c_long = 123; +pub const SYS_fchmod: c_long = 124; +pub const SYS_recvfrom: c_long = 125; +pub const SYS_setreuid: c_long = 126; +pub const SYS_setregid: c_long = 127; +pub const SYS_rename: c_long = 128; +pub const SYS_truncate: c_long = 129; +pub const SYS_ftruncate: c_long = 130; +pub const SYS_flock: c_long = 131; +pub const SYS_lstat64: c_long = 132; +pub const SYS_sendto: c_long = 133; +pub const SYS_shutdown: c_long = 134; +pub const SYS_socketpair: c_long = 135; +pub const SYS_mkdir: c_long = 136; +pub const SYS_rmdir: c_long = 137; +pub const SYS_utimes: c_long = 138; +pub const SYS_stat64: c_long = 139; +pub const SYS_sendfile64: c_long = 140; +pub const SYS_getpeername: c_long = 141; +pub const SYS_futex: c_long = 142; +pub const SYS_gettid: c_long = 143; +pub const SYS_getrlimit: c_long = 144; +pub const SYS_setrlimit: c_long = 145; +pub const SYS_pivot_root: c_long = 146; +pub const SYS_prctl: c_long = 147; +pub const SYS_pciconfig_read: c_long = 148; +pub const SYS_pciconfig_write: c_long = 149; +pub const SYS_getsockname: c_long = 150; +pub const SYS_inotify_init: c_long = 151; +pub const SYS_inotify_add_watch: c_long = 152; +pub const SYS_poll: c_long = 153; +pub const SYS_getdents64: c_long = 154; +pub const SYS_inotify_rm_watch: c_long = 156; +pub const SYS_statfs: c_long = 157; +pub const SYS_fstatfs: c_long = 158; +pub const SYS_umount: c_long = 159; +pub const SYS_sched_set_affinity: c_long = 160; +pub const SYS_sched_get_affinity: c_long = 161; +pub const SYS_getdomainname: c_long = 162; +pub const SYS_setdomainname: c_long = 163; +pub const SYS_utrap_install: c_long = 164; +pub const SYS_quotactl: c_long = 165; +pub const SYS_set_tid_address: c_long = 166; +pub const SYS_mount: c_long = 167; +pub const SYS_ustat: c_long = 168; +pub const SYS_setxattr: c_long = 169; +pub const SYS_lsetxattr: c_long = 170; +pub const SYS_fsetxattr: c_long = 171; +pub const SYS_getxattr: c_long = 172; +pub const SYS_lgetxattr: c_long = 173; +pub const SYS_getdents: c_long = 174; +pub const SYS_setsid: c_long = 175; +pub const SYS_fchdir: c_long = 176; +pub const SYS_fgetxattr: c_long = 177; +pub const SYS_listxattr: c_long = 178; +pub const SYS_llistxattr: c_long = 179; +pub const SYS_flistxattr: c_long = 180; +pub const SYS_removexattr: c_long = 181; +pub const SYS_lremovexattr: c_long = 182; +pub const SYS_sigpending: c_long = 183; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 184; +pub const SYS_setpgid: c_long = 185; +pub const SYS_fremovexattr: c_long = 186; +pub const SYS_tkill: c_long = 187; +pub const SYS_exit_group: c_long = 188; +pub const SYS_uname: c_long = 189; +pub const SYS_init_module: c_long = 190; +pub const SYS_personality: c_long = 191; +pub const SYS_remap_file_pages: c_long = 192; +pub const SYS_epoll_create: c_long = 193; +pub const SYS_epoll_ctl: c_long = 194; +pub const SYS_epoll_wait: c_long = 195; +pub const SYS_ioprio_set: c_long = 196; +pub const SYS_getppid: c_long = 197; +pub const SYS_sigaction: c_long = 198; +pub const SYS_sgetmask: c_long = 199; +pub const SYS_ssetmask: c_long = 200; +pub const SYS_sigsuspend: c_long = 201; +pub const SYS_oldlstat: c_long = 202; +pub const SYS_uselib: c_long = 203; +pub const SYS_readdir: c_long = 204; +pub const SYS_readahead: c_long = 205; +pub const SYS_socketcall: c_long = 206; +pub const SYS_syslog: c_long = 207; +pub const SYS_lookup_dcookie: c_long = 208; +pub const SYS_fadvise64: c_long = 209; +pub const SYS_fadvise64_64: c_long = 210; +pub const SYS_tgkill: c_long = 211; +pub const SYS_waitpid: c_long = 212; +pub const SYS_swapoff: c_long = 213; +pub const SYS_sysinfo: c_long = 214; +pub const SYS_ipc: c_long = 215; +pub const SYS_sigreturn: c_long = 216; +pub const SYS_clone: c_long = 217; +pub const SYS_ioprio_get: c_long = 218; +pub const SYS_adjtimex: c_long = 219; +pub const SYS_sigprocmask: c_long = 220; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 221; +pub const SYS_delete_module: c_long = 222; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 223; +pub const SYS_getpgid: c_long = 224; +pub const SYS_bdflush: c_long = 225; +pub const SYS_sysfs: c_long = 226; +pub const SYS_afs_syscall: c_long = 227; +pub const SYS_setfsuid: c_long = 228; +pub const SYS_setfsgid: c_long = 229; +pub const SYS__newselect: c_long = 230; +pub const SYS_splice: c_long = 232; +pub const SYS_stime: c_long = 233; +pub const SYS_statfs64: c_long = 234; +pub const SYS_fstatfs64: c_long = 235; +pub const SYS__llseek: c_long = 236; +pub const SYS_mlock: c_long = 237; +pub const SYS_munlock: c_long = 238; +pub const SYS_mlockall: c_long = 239; +pub const SYS_munlockall: c_long = 240; +pub const SYS_sched_setparam: c_long = 241; +pub const SYS_sched_getparam: c_long = 242; +pub const SYS_sched_setscheduler: c_long = 243; +pub const SYS_sched_getscheduler: c_long = 244; +pub const SYS_sched_yield: c_long = 245; +pub const SYS_sched_get_priority_max: c_long = 246; +pub const SYS_sched_get_priority_min: c_long = 247; +pub const SYS_sched_rr_get_interval: c_long = 248; +pub const SYS_nanosleep: c_long = 249; +pub const SYS_mremap: c_long = 250; +pub const SYS__sysctl: c_long = 251; +pub const SYS_getsid: c_long = 252; +pub const SYS_fdatasync: c_long = 253; +pub const SYS_nfsservctl: c_long = 254; +pub const SYS_sync_file_range: c_long = 255; +pub const SYS_clock_settime: c_long = 256; +pub const SYS_clock_gettime: c_long = 257; +pub const SYS_clock_getres: c_long = 258; +pub const SYS_clock_nanosleep: c_long = 259; +pub const SYS_sched_getaffinity: c_long = 260; +pub const SYS_sched_setaffinity: c_long = 261; +pub const SYS_timer_settime: c_long = 262; +pub const SYS_timer_gettime: c_long = 263; +pub const SYS_timer_getoverrun: c_long = 264; +pub const SYS_timer_delete: c_long = 265; +pub const SYS_timer_create: c_long = 266; +pub const SYS_io_setup: c_long = 268; +pub const SYS_io_destroy: c_long = 269; +pub const SYS_io_submit: c_long = 270; +pub const SYS_io_cancel: c_long = 271; +pub const SYS_io_getevents: c_long = 272; +pub const SYS_mq_open: c_long = 273; +pub const SYS_mq_unlink: c_long = 274; +pub const SYS_mq_timedsend: c_long = 275; +pub const SYS_mq_timedreceive: c_long = 276; +pub const SYS_mq_notify: c_long = 277; +pub const SYS_mq_getsetattr: c_long = 278; +pub const SYS_waitid: c_long = 279; +pub const SYS_tee: c_long = 280; +pub const SYS_add_key: c_long = 281; +pub const SYS_request_key: c_long = 282; +pub const SYS_keyctl: c_long = 283; +pub const SYS_openat: c_long = 284; +pub const SYS_mkdirat: c_long = 285; +pub const SYS_mknodat: c_long = 286; +pub const SYS_fchownat: c_long = 287; +pub const SYS_futimesat: c_long = 288; +pub const SYS_fstatat64: c_long = 289; +pub const SYS_unlinkat: c_long = 290; +pub const SYS_renameat: c_long = 291; +pub const SYS_linkat: c_long = 292; +pub const SYS_symlinkat: c_long = 293; +pub const SYS_readlinkat: c_long = 294; +pub const SYS_fchmodat: c_long = 295; +pub const SYS_faccessat: c_long = 296; +pub const SYS_pselect6: c_long = 297; +pub const SYS_ppoll: c_long = 298; +pub const SYS_unshare: c_long = 299; +pub const SYS_set_robust_list: c_long = 300; +pub const SYS_get_robust_list: c_long = 301; +pub const SYS_migrate_pages: c_long = 302; +pub const SYS_mbind: c_long = 303; +pub const SYS_get_mempolicy: c_long = 304; +pub const SYS_set_mempolicy: c_long = 305; +pub const SYS_kexec_load: c_long = 306; +pub const SYS_move_pages: c_long = 307; +pub const SYS_getcpu: c_long = 308; +pub const SYS_epoll_pwait: c_long = 309; +pub const SYS_utimensat: c_long = 310; +pub const SYS_signalfd: c_long = 311; +pub const SYS_timerfd_create: c_long = 312; +pub const SYS_eventfd: c_long = 313; +pub const SYS_fallocate: c_long = 314; +pub const SYS_timerfd_settime: c_long = 315; +pub const SYS_timerfd_gettime: c_long = 316; +pub const SYS_signalfd4: c_long = 317; +pub const SYS_eventfd2: c_long = 318; +pub const SYS_epoll_create1: c_long = 319; +pub const SYS_dup3: c_long = 320; +pub const SYS_pipe2: c_long = 321; +pub const SYS_inotify_init1: c_long = 322; +pub const SYS_accept4: c_long = 323; +pub const SYS_preadv: c_long = 324; +pub const SYS_pwritev: c_long = 325; +pub const SYS_rt_tgsigqueueinfo: c_long = 326; +pub const SYS_perf_event_open: c_long = 327; +pub const SYS_recvmmsg: c_long = 328; +pub const SYS_fanotify_init: c_long = 329; +pub const SYS_fanotify_mark: c_long = 330; +pub const SYS_prlimit64: c_long = 331; +pub const SYS_name_to_handle_at: c_long = 332; +pub const SYS_open_by_handle_at: c_long = 333; +pub const SYS_clock_adjtime: c_long = 334; +pub const SYS_syncfs: c_long = 335; +pub const SYS_sendmmsg: c_long = 336; +pub const SYS_setns: c_long = 337; +pub const SYS_process_vm_readv: c_long = 338; +pub const SYS_process_vm_writev: c_long = 339; +pub const SYS_kern_features: c_long = 340; +pub const SYS_kcmp: c_long = 341; +pub const SYS_finit_module: c_long = 342; +pub const SYS_sched_setattr: c_long = 343; +pub const SYS_sched_getattr: c_long = 344; +pub const SYS_renameat2: c_long = 345; +pub const SYS_seccomp: c_long = 346; +pub const SYS_getrandom: c_long = 347; +pub const SYS_memfd_create: c_long = 348; +pub const SYS_bpf: c_long = 349; +pub const SYS_execveat: c_long = 350; +pub const SYS_membarrier: c_long = 351; +pub const SYS_userfaultfd: c_long = 352; +pub const SYS_bind: c_long = 353; +pub const SYS_listen: c_long = 354; +pub const SYS_setsockopt: c_long = 355; +pub const SYS_mlock2: c_long = 356; +pub const SYS_copy_file_range: c_long = 357; +pub const SYS_preadv2: c_long = 358; +pub const SYS_pwritev2: c_long = 359; +pub const SYS_statx: c_long = 360; +pub const SYS_rseq: c_long = 365; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +// Reserved in the kernel, but not actually implemented yet +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; + +extern "C" { + pub fn sysctl( + name: *mut c_int, + namelen: c_int, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *mut c_void, + newlen: size_t, + ) -> c_int; +} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/mod.rs new file mode 100644 index 00000000000000..f4555ee4202308 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/mod.rs @@ -0,0 +1,809 @@ +//! x86_64-specific definitions for 64-bit linux-like values + +use crate::prelude::*; +use crate::{off64_t, off_t}; + +pub type wchar_t = i32; +pub type nlink_t = u64; +pub type blksize_t = i64; +pub type greg_t = i64; +pub type suseconds_t = i64; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; + +s! { + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + #[cfg(target_arch = "sparc64")] + __reserved0: c_int, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + pub struct statfs { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + f_spare: [crate::__fsword_t; 5], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + #[doc(hidden)] + #[deprecated( + since = "0.2.54", + note = "Please leave a comment on \ + https://github.com/rust-lang/libc/pull/1316 if you're using \ + this field" + )] + pub _pad: [c_int; 29], + _align: [u64; 0], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + __pad0: c_int, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: i64, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: i64, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: i64, + __unused: [i64; 3], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + __pad0: c_int, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: i64, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: i64, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: i64, + __reserved: [i64; 3], + } + + pub struct statfs64 { + pub f_type: crate::__fsword_t, + pub f_bsize: crate::__fsword_t, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_fsid: crate::fsid_t, + pub f_namelen: crate::__fsword_t, + pub f_frsize: crate::__fsword_t, + pub f_flags: crate::__fsword_t, + pub f_spare: [crate::__fsword_t; 4], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct pthread_attr_t { + #[cfg(target_pointer_width = "32")] + __size: [u32; 8], + #[cfg(target_pointer_width = "64")] + __size: [u64; 7], + } + + pub struct _libc_fpxreg { + pub significand: [u16; 4], + pub exponent: u16, + __private: [u16; 3], + } + + pub struct _libc_xmmreg { + pub element: [u32; 4], + } + + pub struct _libc_fpstate { + pub cwd: u16, + pub swd: u16, + pub ftw: u16, + pub fop: u16, + pub rip: u64, + pub rdp: u64, + pub mxcsr: u32, + pub mxcr_mask: u32, + pub _st: [_libc_fpxreg; 8], + pub _xmm: [_libc_xmmreg; 16], + __private: [u64; 12], + } + + pub struct user_regs_struct { + pub r15: c_ulonglong, + pub r14: c_ulonglong, + pub r13: c_ulonglong, + pub r12: c_ulonglong, + pub rbp: c_ulonglong, + pub rbx: c_ulonglong, + pub r11: c_ulonglong, + pub r10: c_ulonglong, + pub r9: c_ulonglong, + pub r8: c_ulonglong, + pub rax: c_ulonglong, + pub rcx: c_ulonglong, + pub rdx: c_ulonglong, + pub rsi: c_ulonglong, + pub rdi: c_ulonglong, + pub orig_rax: c_ulonglong, + pub rip: c_ulonglong, + pub cs: c_ulonglong, + pub eflags: c_ulonglong, + pub rsp: c_ulonglong, + pub ss: c_ulonglong, + pub fs_base: c_ulonglong, + pub gs_base: c_ulonglong, + pub ds: c_ulonglong, + pub es: c_ulonglong, + pub fs: c_ulonglong, + pub gs: c_ulonglong, + } + + pub struct user { + pub regs: user_regs_struct, + pub u_fpvalid: c_int, + pub i387: user_fpregs_struct, + pub u_tsize: c_ulonglong, + pub u_dsize: c_ulonglong, + pub u_ssize: c_ulonglong, + pub start_code: c_ulonglong, + pub start_stack: c_ulonglong, + pub signal: c_longlong, + __reserved: c_int, + #[cfg(target_pointer_width = "32")] + __pad1: u32, + pub u_ar0: *mut user_regs_struct, + #[cfg(target_pointer_width = "32")] + __pad2: u32, + pub u_fpstate: *mut user_fpregs_struct, + pub magic: c_ulonglong, + pub u_comm: [c_char; 32], + pub u_debugreg: [c_ulonglong; 8], + } + + pub struct mcontext_t { + pub gregs: [greg_t; 23], + pub fpregs: *mut _libc_fpstate, + __private: [u64; 8], + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_ushort, + __pad1: c_ushort, + pub __seq: c_ushort, + __pad2: c_ushort, + __unused1: u64, + __unused2: u64, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused4: u64, + __unused5: u64, + } + + pub struct ptrace_rseq_configuration { + pub rseq_abi_pointer: crate::__u64, + pub rseq_abi_size: crate::__u32, + pub signature: crate::__u32, + pub flags: crate::__u32, + pub pad: crate::__u32, + } + + #[repr(align(8))] + pub struct clone_args { + pub flags: c_ulonglong, + pub pidfd: c_ulonglong, + pub child_tid: c_ulonglong, + pub parent_tid: c_ulonglong, + pub exit_signal: c_ulonglong, + pub stack: c_ulonglong, + pub stack_size: c_ulonglong, + pub tls: c_ulonglong, + pub set_tid: c_ulonglong, + pub set_tid_size: c_ulonglong, + pub cgroup: c_ulonglong, + } +} + +s_no_extra_traits! { + pub struct user_fpregs_struct { + pub cwd: c_ushort, + pub swd: c_ushort, + pub ftw: c_ushort, + pub fop: c_ushort, + pub rip: c_ulonglong, + pub rdp: c_ulonglong, + pub mxcsr: c_uint, + pub mxcr_mask: c_uint, + pub st_space: [c_uint; 32], + pub xmm_space: [c_uint; 64], + padding: [c_uint; 24], + } + + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_mcontext: mcontext_t, + pub uc_sigmask: crate::sigset_t, + __private: [u8; 512], + // FIXME(glibc): the shadow stack field requires glibc >= 2.28. + // Re-add once we drop compatibility with glibc versions older than + // 2.28. + // + // __ssp: [c_ulonglong; 4], + } + + #[repr(align(16))] + pub struct max_align_t { + priv_: [f64; 4], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for user_fpregs_struct { + fn eq(&self, other: &user_fpregs_struct) -> bool { + self.cwd == other.cwd + && self.swd == other.swd + && self.ftw == other.ftw + && self.fop == other.fop + && self.rip == other.rip + && self.rdp == other.rdp + && self.mxcsr == other.mxcsr + && self.mxcr_mask == other.mxcr_mask + && self.st_space == other.st_space + && self + .xmm_space + .iter() + .zip(other.xmm_space.iter()) + .all(|(a, b)| a == b) + // Ignore padding field + } + } + + impl Eq for user_fpregs_struct {} + + impl hash::Hash for user_fpregs_struct { + fn hash(&self, state: &mut H) { + self.cwd.hash(state); + self.ftw.hash(state); + self.fop.hash(state); + self.rip.hash(state); + self.rdp.hash(state); + self.mxcsr.hash(state); + self.mxcr_mask.hash(state); + self.st_space.hash(state); + self.xmm_space.hash(state); + // Ignore padding field + } + } + + impl PartialEq for ucontext_t { + fn eq(&self, other: &ucontext_t) -> bool { + self.uc_flags == other.uc_flags + && self.uc_link == other.uc_link + && self.uc_stack == other.uc_stack + && self.uc_mcontext == other.uc_mcontext + && self.uc_sigmask == other.uc_sigmask + // Ignore __private field + } + } + + impl Eq for ucontext_t {} + + impl hash::Hash for ucontext_t { + fn hash(&self, state: &mut H) { + self.uc_flags.hash(state); + self.uc_link.hash(state); + self.uc_stack.hash(state); + self.uc_mcontext.hash(state); + self.uc_sigmask.hash(state); + // Ignore __private field + } + } + } +} + +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; + +pub const VEOF: usize = 4; +pub const RTLD_DEEPBIND: c_int = 0x8; +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; + +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_FSYNC: c_int = 0x101000; +pub const O_NOATIME: c_int = 0o1000000; +pub const O_PATH: c_int = 0o10000000; +pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_GROWSDOWN: c_int = 0x0100; + +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const EHWPOISON: c_int = 133; +pub const ERFKILL: c_int = 132; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] +pub const SIGUNUSED: c_int = 31; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const O_ASYNC: c_int = 0x2000; +pub const O_NDELAY: c_int = 0x800; + +pub const PTRACE_DETACH: c_uint = 17; +pub const PTRACE_GET_RSEQ_CONFIGURATION: c_uint = 0x420f; + +pub const EFD_NONBLOCK: c_int = 0x800; + +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETOWN: c_int = 8; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_OFD_GETLK: c_int = 36; +pub const F_OFD_SETLK: c_int = 37; +pub const F_OFD_SETLKW: c_int = 38; + +pub const F_RDLCK: c_int = 0; +pub const F_WRLCK: c_int = 1; +pub const F_UNLCK: c_int = 2; + +pub const SFD_NONBLOCK: c_int = 0x0800; + +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +pub const SFD_CLOEXEC: c_int = 0x080000; + +pub const NCCS: usize = 32; + +pub const O_TRUNC: c_int = 512; + +pub const O_CLOEXEC: c_int = 0x80000; + +pub const EBFONT: c_int = 59; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENONET: c_int = 64; +pub const ENOPKG: c_int = 65; +pub const EREMOTE: c_int = 66; +pub const ENOLINK: c_int = 67; +pub const EADV: c_int = 68; +pub const ESRMNT: c_int = 69; +pub const ECOMM: c_int = 70; +pub const EPROTO: c_int = 71; +pub const EDOTDOT: c_int = 73; + +pub const SA_NODEFER: c_int = 0x40000000; +pub const SA_RESETHAND: c_int = 0x80000000; +pub const SA_RESTART: c_int = 0x10000000; +pub const SA_NOCLDSTOP: c_int = 0x00000001; + +pub const EPOLL_CLOEXEC: c_int = 0x80000; + +pub const EFD_CLOEXEC: c_int = 0x80000; + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; + +pub const O_DIRECT: c_int = 0x4000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_NOFOLLOW: c_int = 0x20000; + +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_32BIT: c_int = 0x0040; +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_ANONYMOUS: c_int = 0x0020; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_SYNC: c_int = 0x080000; + +pub const EDEADLOCK: c_int = 35; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; + +pub const PTRACE_GETFPREGS: c_uint = 14; +pub const PTRACE_SETFPREGS: c_uint = 15; +pub const PTRACE_GETFPXREGS: c_uint = 18; +pub const PTRACE_SETFPXREGS: c_uint = 19; +pub const PTRACE_GETREGS: c_uint = 12; +pub const PTRACE_SETREGS: c_uint = 13; +pub const PTRACE_PEEKSIGINFO_SHARED: c_uint = 1; +pub const PTRACE_SYSEMU: c_uint = 31; +pub const PTRACE_SYSEMU_SINGLESTEP: c_uint = 32; + +pub const PR_GET_SPECULATION_CTRL: c_int = 52; +pub const PR_SET_SPECULATION_CTRL: c_int = 53; +pub const PR_SPEC_NOT_AFFECTED: c_uint = 0; +pub const PR_SPEC_PRCTL: c_uint = 1 << 0; +pub const PR_SPEC_ENABLE: c_uint = 1 << 1; +pub const PR_SPEC_DISABLE: c_uint = 1 << 2; +pub const PR_SPEC_FORCE_DISABLE: c_uint = 1 << 3; +pub const PR_SPEC_DISABLE_NOEXEC: c_uint = 1 << 4; +pub const PR_SPEC_STORE_BYPASS: c_int = 0; +pub const PR_SPEC_INDIRECT_BRANCH: c_int = 1; +// FIXME(linux): perharps for later +//pub const PR_SPEC_L1D_FLUSH: c_int = 2; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; + +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: crate::tcflag_t = 0x00000800; +pub const TAB2: crate::tcflag_t = 0x00001000; +pub const TAB3: crate::tcflag_t = 0x00001800; +pub const CR1: crate::tcflag_t = 0x00000200; +pub const CR2: crate::tcflag_t = 0x00000400; +pub const CR3: crate::tcflag_t = 0x00000600; +pub const FF1: crate::tcflag_t = 0x00008000; +pub const BS1: crate::tcflag_t = 0x00002000; +pub const VT1: crate::tcflag_t = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; +pub const EXTPROC: crate::tcflag_t = 0x00010000; + +// offsets in user_regs_structs, from sys/reg.h +pub const R15: c_int = 0; +pub const R14: c_int = 1; +pub const R13: c_int = 2; +pub const R12: c_int = 3; +pub const RBP: c_int = 4; +pub const RBX: c_int = 5; +pub const R11: c_int = 6; +pub const R10: c_int = 7; +pub const R9: c_int = 8; +pub const R8: c_int = 9; +pub const RAX: c_int = 10; +pub const RCX: c_int = 11; +pub const RDX: c_int = 12; +pub const RSI: c_int = 13; +pub const RDI: c_int = 14; +pub const ORIG_RAX: c_int = 15; +pub const RIP: c_int = 16; +pub const CS: c_int = 17; +pub const EFLAGS: c_int = 18; +pub const RSP: c_int = 19; +pub const SS: c_int = 20; +pub const FS_BASE: c_int = 21; +pub const GS_BASE: c_int = 22; +pub const DS: c_int = 23; +pub const ES: c_int = 24; +pub const FS: c_int = 25; +pub const GS: c_int = 26; + +// offsets in mcontext_t.gregs from sys/ucontext.h +pub const REG_R8: c_int = 0; +pub const REG_R9: c_int = 1; +pub const REG_R10: c_int = 2; +pub const REG_R11: c_int = 3; +pub const REG_R12: c_int = 4; +pub const REG_R13: c_int = 5; +pub const REG_R14: c_int = 6; +pub const REG_R15: c_int = 7; +pub const REG_RDI: c_int = 8; +pub const REG_RSI: c_int = 9; +pub const REG_RBP: c_int = 10; +pub const REG_RBX: c_int = 11; +pub const REG_RDX: c_int = 12; +pub const REG_RAX: c_int = 13; +pub const REG_RCX: c_int = 14; +pub const REG_RSP: c_int = 15; +pub const REG_RIP: c_int = 16; +pub const REG_EFL: c_int = 17; +pub const REG_CSGSFS: c_int = 18; +pub const REG_ERR: c_int = 19; +pub const REG_TRAPNO: c_int = 20; +pub const REG_OLDMASK: c_int = 21; +pub const REG_CR2: c_int = 22; + +extern "C" { + pub fn getcontext(ucp: *mut ucontext_t) -> c_int; + pub fn setcontext(ucp: *const ucontext_t) -> c_int; + pub fn makecontext(ucp: *mut ucontext_t, func: extern "C" fn(), argc: c_int, ...); + pub fn swapcontext(uocp: *mut ucontext_t, ucp: *const ucontext_t) -> c_int; +} + +cfg_if! { + if #[cfg(target_pointer_width = "32")] { + mod x32; + pub use self::x32::*; + } else { + mod not_x32; + pub use self::not_x32::*; + } +} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/not_x32.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/not_x32.rs new file mode 100644 index 00000000000000..27b96a60aabd83 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/not_x32.rs @@ -0,0 +1,446 @@ +use crate::prelude::*; +use crate::pthread_mutex_t; + +s! { + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } +} + +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; + +#[cfg(target_endian = "little")] +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "little")] +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "little")] +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; +#[cfg(target_endian = "big")] +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], +}; + +// Syscall table + +pub const SYS_read: c_long = 0; +pub const SYS_write: c_long = 1; +pub const SYS_open: c_long = 2; +pub const SYS_close: c_long = 3; +pub const SYS_stat: c_long = 4; +pub const SYS_fstat: c_long = 5; +pub const SYS_lstat: c_long = 6; +pub const SYS_poll: c_long = 7; +pub const SYS_lseek: c_long = 8; +pub const SYS_mmap: c_long = 9; +pub const SYS_mprotect: c_long = 10; +pub const SYS_munmap: c_long = 11; +pub const SYS_brk: c_long = 12; +pub const SYS_rt_sigaction: c_long = 13; +pub const SYS_rt_sigprocmask: c_long = 14; +pub const SYS_rt_sigreturn: c_long = 15; +pub const SYS_ioctl: c_long = 16; +pub const SYS_pread64: c_long = 17; +pub const SYS_pwrite64: c_long = 18; +pub const SYS_readv: c_long = 19; +pub const SYS_writev: c_long = 20; +pub const SYS_access: c_long = 21; +pub const SYS_pipe: c_long = 22; +pub const SYS_select: c_long = 23; +pub const SYS_sched_yield: c_long = 24; +pub const SYS_mremap: c_long = 25; +pub const SYS_msync: c_long = 26; +pub const SYS_mincore: c_long = 27; +pub const SYS_madvise: c_long = 28; +pub const SYS_shmget: c_long = 29; +pub const SYS_shmat: c_long = 30; +pub const SYS_shmctl: c_long = 31; +pub const SYS_dup: c_long = 32; +pub const SYS_dup2: c_long = 33; +pub const SYS_pause: c_long = 34; +pub const SYS_nanosleep: c_long = 35; +pub const SYS_getitimer: c_long = 36; +pub const SYS_alarm: c_long = 37; +pub const SYS_setitimer: c_long = 38; +pub const SYS_getpid: c_long = 39; +pub const SYS_sendfile: c_long = 40; +pub const SYS_socket: c_long = 41; +pub const SYS_connect: c_long = 42; +pub const SYS_accept: c_long = 43; +pub const SYS_sendto: c_long = 44; +pub const SYS_recvfrom: c_long = 45; +pub const SYS_sendmsg: c_long = 46; +pub const SYS_recvmsg: c_long = 47; +pub const SYS_shutdown: c_long = 48; +pub const SYS_bind: c_long = 49; +pub const SYS_listen: c_long = 50; +pub const SYS_getsockname: c_long = 51; +pub const SYS_getpeername: c_long = 52; +pub const SYS_socketpair: c_long = 53; +pub const SYS_setsockopt: c_long = 54; +pub const SYS_getsockopt: c_long = 55; +pub const SYS_clone: c_long = 56; +pub const SYS_fork: c_long = 57; +pub const SYS_vfork: c_long = 58; +pub const SYS_execve: c_long = 59; +pub const SYS_exit: c_long = 60; +pub const SYS_wait4: c_long = 61; +pub const SYS_kill: c_long = 62; +pub const SYS_uname: c_long = 63; +pub const SYS_semget: c_long = 64; +pub const SYS_semop: c_long = 65; +pub const SYS_semctl: c_long = 66; +pub const SYS_shmdt: c_long = 67; +pub const SYS_msgget: c_long = 68; +pub const SYS_msgsnd: c_long = 69; +pub const SYS_msgrcv: c_long = 70; +pub const SYS_msgctl: c_long = 71; +pub const SYS_fcntl: c_long = 72; +pub const SYS_flock: c_long = 73; +pub const SYS_fsync: c_long = 74; +pub const SYS_fdatasync: c_long = 75; +pub const SYS_truncate: c_long = 76; +pub const SYS_ftruncate: c_long = 77; +pub const SYS_getdents: c_long = 78; +pub const SYS_getcwd: c_long = 79; +pub const SYS_chdir: c_long = 80; +pub const SYS_fchdir: c_long = 81; +pub const SYS_rename: c_long = 82; +pub const SYS_mkdir: c_long = 83; +pub const SYS_rmdir: c_long = 84; +pub const SYS_creat: c_long = 85; +pub const SYS_link: c_long = 86; +pub const SYS_unlink: c_long = 87; +pub const SYS_symlink: c_long = 88; +pub const SYS_readlink: c_long = 89; +pub const SYS_chmod: c_long = 90; +pub const SYS_fchmod: c_long = 91; +pub const SYS_chown: c_long = 92; +pub const SYS_fchown: c_long = 93; +pub const SYS_lchown: c_long = 94; +pub const SYS_umask: c_long = 95; +pub const SYS_gettimeofday: c_long = 96; +pub const SYS_getrlimit: c_long = 97; +pub const SYS_getrusage: c_long = 98; +pub const SYS_sysinfo: c_long = 99; +pub const SYS_times: c_long = 100; +pub const SYS_ptrace: c_long = 101; +pub const SYS_getuid: c_long = 102; +pub const SYS_syslog: c_long = 103; +pub const SYS_getgid: c_long = 104; +pub const SYS_setuid: c_long = 105; +pub const SYS_setgid: c_long = 106; +pub const SYS_geteuid: c_long = 107; +pub const SYS_getegid: c_long = 108; +pub const SYS_setpgid: c_long = 109; +pub const SYS_getppid: c_long = 110; +pub const SYS_getpgrp: c_long = 111; +pub const SYS_setsid: c_long = 112; +pub const SYS_setreuid: c_long = 113; +pub const SYS_setregid: c_long = 114; +pub const SYS_getgroups: c_long = 115; +pub const SYS_setgroups: c_long = 116; +pub const SYS_setresuid: c_long = 117; +pub const SYS_getresuid: c_long = 118; +pub const SYS_setresgid: c_long = 119; +pub const SYS_getresgid: c_long = 120; +pub const SYS_getpgid: c_long = 121; +pub const SYS_setfsuid: c_long = 122; +pub const SYS_setfsgid: c_long = 123; +pub const SYS_getsid: c_long = 124; +pub const SYS_capget: c_long = 125; +pub const SYS_capset: c_long = 126; +pub const SYS_rt_sigpending: c_long = 127; +pub const SYS_rt_sigtimedwait: c_long = 128; +pub const SYS_rt_sigqueueinfo: c_long = 129; +pub const SYS_rt_sigsuspend: c_long = 130; +pub const SYS_sigaltstack: c_long = 131; +pub const SYS_utime: c_long = 132; +pub const SYS_mknod: c_long = 133; +pub const SYS_uselib: c_long = 134; +pub const SYS_personality: c_long = 135; +pub const SYS_ustat: c_long = 136; +pub const SYS_statfs: c_long = 137; +pub const SYS_fstatfs: c_long = 138; +pub const SYS_sysfs: c_long = 139; +pub const SYS_getpriority: c_long = 140; +pub const SYS_setpriority: c_long = 141; +pub const SYS_sched_setparam: c_long = 142; +pub const SYS_sched_getparam: c_long = 143; +pub const SYS_sched_setscheduler: c_long = 144; +pub const SYS_sched_getscheduler: c_long = 145; +pub const SYS_sched_get_priority_max: c_long = 146; +pub const SYS_sched_get_priority_min: c_long = 147; +pub const SYS_sched_rr_get_interval: c_long = 148; +pub const SYS_mlock: c_long = 149; +pub const SYS_munlock: c_long = 150; +pub const SYS_mlockall: c_long = 151; +pub const SYS_munlockall: c_long = 152; +pub const SYS_vhangup: c_long = 153; +pub const SYS_modify_ldt: c_long = 154; +pub const SYS_pivot_root: c_long = 155; +pub const SYS__sysctl: c_long = 156; +pub const SYS_prctl: c_long = 157; +pub const SYS_arch_prctl: c_long = 158; +pub const SYS_adjtimex: c_long = 159; +pub const SYS_setrlimit: c_long = 160; +pub const SYS_chroot: c_long = 161; +pub const SYS_sync: c_long = 162; +pub const SYS_acct: c_long = 163; +pub const SYS_settimeofday: c_long = 164; +pub const SYS_mount: c_long = 165; +pub const SYS_umount2: c_long = 166; +pub const SYS_swapon: c_long = 167; +pub const SYS_swapoff: c_long = 168; +pub const SYS_reboot: c_long = 169; +pub const SYS_sethostname: c_long = 170; +pub const SYS_setdomainname: c_long = 171; +pub const SYS_iopl: c_long = 172; +pub const SYS_ioperm: c_long = 173; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 174; +pub const SYS_init_module: c_long = 175; +pub const SYS_delete_module: c_long = 176; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 177; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 178; +pub const SYS_quotactl: c_long = 179; +pub const SYS_nfsservctl: c_long = 180; +pub const SYS_getpmsg: c_long = 181; +pub const SYS_putpmsg: c_long = 182; +pub const SYS_afs_syscall: c_long = 183; +pub const SYS_tuxcall: c_long = 184; +pub const SYS_security: c_long = 185; +pub const SYS_gettid: c_long = 186; +pub const SYS_readahead: c_long = 187; +pub const SYS_setxattr: c_long = 188; +pub const SYS_lsetxattr: c_long = 189; +pub const SYS_fsetxattr: c_long = 190; +pub const SYS_getxattr: c_long = 191; +pub const SYS_lgetxattr: c_long = 192; +pub const SYS_fgetxattr: c_long = 193; +pub const SYS_listxattr: c_long = 194; +pub const SYS_llistxattr: c_long = 195; +pub const SYS_flistxattr: c_long = 196; +pub const SYS_removexattr: c_long = 197; +pub const SYS_lremovexattr: c_long = 198; +pub const SYS_fremovexattr: c_long = 199; +pub const SYS_tkill: c_long = 200; +pub const SYS_time: c_long = 201; +pub const SYS_futex: c_long = 202; +pub const SYS_sched_setaffinity: c_long = 203; +pub const SYS_sched_getaffinity: c_long = 204; +pub const SYS_set_thread_area: c_long = 205; +pub const SYS_io_setup: c_long = 206; +pub const SYS_io_destroy: c_long = 207; +pub const SYS_io_getevents: c_long = 208; +pub const SYS_io_submit: c_long = 209; +pub const SYS_io_cancel: c_long = 210; +pub const SYS_get_thread_area: c_long = 211; +pub const SYS_lookup_dcookie: c_long = 212; +pub const SYS_epoll_create: c_long = 213; +pub const SYS_epoll_ctl_old: c_long = 214; +pub const SYS_epoll_wait_old: c_long = 215; +pub const SYS_remap_file_pages: c_long = 216; +pub const SYS_getdents64: c_long = 217; +pub const SYS_set_tid_address: c_long = 218; +pub const SYS_restart_syscall: c_long = 219; +pub const SYS_semtimedop: c_long = 220; +pub const SYS_fadvise64: c_long = 221; +pub const SYS_timer_create: c_long = 222; +pub const SYS_timer_settime: c_long = 223; +pub const SYS_timer_gettime: c_long = 224; +pub const SYS_timer_getoverrun: c_long = 225; +pub const SYS_timer_delete: c_long = 226; +pub const SYS_clock_settime: c_long = 227; +pub const SYS_clock_gettime: c_long = 228; +pub const SYS_clock_getres: c_long = 229; +pub const SYS_clock_nanosleep: c_long = 230; +pub const SYS_exit_group: c_long = 231; +pub const SYS_epoll_wait: c_long = 232; +pub const SYS_epoll_ctl: c_long = 233; +pub const SYS_tgkill: c_long = 234; +pub const SYS_utimes: c_long = 235; +pub const SYS_vserver: c_long = 236; +pub const SYS_mbind: c_long = 237; +pub const SYS_set_mempolicy: c_long = 238; +pub const SYS_get_mempolicy: c_long = 239; +pub const SYS_mq_open: c_long = 240; +pub const SYS_mq_unlink: c_long = 241; +pub const SYS_mq_timedsend: c_long = 242; +pub const SYS_mq_timedreceive: c_long = 243; +pub const SYS_mq_notify: c_long = 244; +pub const SYS_mq_getsetattr: c_long = 245; +pub const SYS_kexec_load: c_long = 246; +pub const SYS_waitid: c_long = 247; +pub const SYS_add_key: c_long = 248; +pub const SYS_request_key: c_long = 249; +pub const SYS_keyctl: c_long = 250; +pub const SYS_ioprio_set: c_long = 251; +pub const SYS_ioprio_get: c_long = 252; +pub const SYS_inotify_init: c_long = 253; +pub const SYS_inotify_add_watch: c_long = 254; +pub const SYS_inotify_rm_watch: c_long = 255; +pub const SYS_migrate_pages: c_long = 256; +pub const SYS_openat: c_long = 257; +pub const SYS_mkdirat: c_long = 258; +pub const SYS_mknodat: c_long = 259; +pub const SYS_fchownat: c_long = 260; +pub const SYS_futimesat: c_long = 261; +pub const SYS_newfstatat: c_long = 262; +pub const SYS_unlinkat: c_long = 263; +pub const SYS_renameat: c_long = 264; +pub const SYS_linkat: c_long = 265; +pub const SYS_symlinkat: c_long = 266; +pub const SYS_readlinkat: c_long = 267; +pub const SYS_fchmodat: c_long = 268; +pub const SYS_faccessat: c_long = 269; +pub const SYS_pselect6: c_long = 270; +pub const SYS_ppoll: c_long = 271; +pub const SYS_unshare: c_long = 272; +pub const SYS_set_robust_list: c_long = 273; +pub const SYS_get_robust_list: c_long = 274; +pub const SYS_splice: c_long = 275; +pub const SYS_tee: c_long = 276; +pub const SYS_sync_file_range: c_long = 277; +pub const SYS_vmsplice: c_long = 278; +pub const SYS_move_pages: c_long = 279; +pub const SYS_utimensat: c_long = 280; +pub const SYS_epoll_pwait: c_long = 281; +pub const SYS_signalfd: c_long = 282; +pub const SYS_timerfd_create: c_long = 283; +pub const SYS_eventfd: c_long = 284; +pub const SYS_fallocate: c_long = 285; +pub const SYS_timerfd_settime: c_long = 286; +pub const SYS_timerfd_gettime: c_long = 287; +pub const SYS_accept4: c_long = 288; +pub const SYS_signalfd4: c_long = 289; +pub const SYS_eventfd2: c_long = 290; +pub const SYS_epoll_create1: c_long = 291; +pub const SYS_dup3: c_long = 292; +pub const SYS_pipe2: c_long = 293; +pub const SYS_inotify_init1: c_long = 294; +pub const SYS_preadv: c_long = 295; +pub const SYS_pwritev: c_long = 296; +pub const SYS_rt_tgsigqueueinfo: c_long = 297; +pub const SYS_perf_event_open: c_long = 298; +pub const SYS_recvmmsg: c_long = 299; +pub const SYS_fanotify_init: c_long = 300; +pub const SYS_fanotify_mark: c_long = 301; +pub const SYS_prlimit64: c_long = 302; +pub const SYS_name_to_handle_at: c_long = 303; +pub const SYS_open_by_handle_at: c_long = 304; +pub const SYS_clock_adjtime: c_long = 305; +pub const SYS_syncfs: c_long = 306; +pub const SYS_sendmmsg: c_long = 307; +pub const SYS_setns: c_long = 308; +pub const SYS_getcpu: c_long = 309; +pub const SYS_process_vm_readv: c_long = 310; +pub const SYS_process_vm_writev: c_long = 311; +pub const SYS_kcmp: c_long = 312; +pub const SYS_finit_module: c_long = 313; +pub const SYS_sched_setattr: c_long = 314; +pub const SYS_sched_getattr: c_long = 315; +pub const SYS_renameat2: c_long = 316; +pub const SYS_seccomp: c_long = 317; +pub const SYS_getrandom: c_long = 318; +pub const SYS_memfd_create: c_long = 319; +pub const SYS_kexec_file_load: c_long = 320; +pub const SYS_bpf: c_long = 321; +pub const SYS_execveat: c_long = 322; +pub const SYS_userfaultfd: c_long = 323; +pub const SYS_membarrier: c_long = 324; +pub const SYS_mlock2: c_long = 325; +pub const SYS_copy_file_range: c_long = 326; +pub const SYS_preadv2: c_long = 327; +pub const SYS_pwritev2: c_long = 328; +pub const SYS_pkey_mprotect: c_long = 329; +pub const SYS_pkey_alloc: c_long = 330; +pub const SYS_pkey_free: c_long = 331; +pub const SYS_statx: c_long = 332; +pub const SYS_rseq: c_long = 334; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; +pub const SYS_fchmodat2: c_long = 452; +pub const SYS_mseal: c_long = 462; + +extern "C" { + pub fn sysctl( + name: *mut c_int, + namelen: c_int, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *mut c_void, + newlen: size_t, + ) -> c_int; +} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/x32.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/x32.rs new file mode 100644 index 00000000000000..1a1cd34be035f7 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/x32.rs @@ -0,0 +1,398 @@ +use crate::prelude::*; +use crate::pthread_mutex_t; + +s! { + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } +} + +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 32; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 44; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 20; + +pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, + ], +}; +pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, + ], +}; +pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, + ], +}; + +// Syscall table + +pub const __X32_SYSCALL_BIT: c_long = 0x40000000; + +pub const SYS_read: c_long = __X32_SYSCALL_BIT + 0; +pub const SYS_write: c_long = __X32_SYSCALL_BIT + 1; +pub const SYS_open: c_long = __X32_SYSCALL_BIT + 2; +pub const SYS_close: c_long = __X32_SYSCALL_BIT + 3; +pub const SYS_stat: c_long = __X32_SYSCALL_BIT + 4; +pub const SYS_fstat: c_long = __X32_SYSCALL_BIT + 5; +pub const SYS_lstat: c_long = __X32_SYSCALL_BIT + 6; +pub const SYS_poll: c_long = __X32_SYSCALL_BIT + 7; +pub const SYS_lseek: c_long = __X32_SYSCALL_BIT + 8; +pub const SYS_mmap: c_long = __X32_SYSCALL_BIT + 9; +pub const SYS_mprotect: c_long = __X32_SYSCALL_BIT + 10; +pub const SYS_munmap: c_long = __X32_SYSCALL_BIT + 11; +pub const SYS_brk: c_long = __X32_SYSCALL_BIT + 12; +pub const SYS_rt_sigprocmask: c_long = __X32_SYSCALL_BIT + 14; +pub const SYS_pread64: c_long = __X32_SYSCALL_BIT + 17; +pub const SYS_pwrite64: c_long = __X32_SYSCALL_BIT + 18; +pub const SYS_access: c_long = __X32_SYSCALL_BIT + 21; +pub const SYS_pipe: c_long = __X32_SYSCALL_BIT + 22; +pub const SYS_select: c_long = __X32_SYSCALL_BIT + 23; +pub const SYS_sched_yield: c_long = __X32_SYSCALL_BIT + 24; +pub const SYS_mremap: c_long = __X32_SYSCALL_BIT + 25; +pub const SYS_msync: c_long = __X32_SYSCALL_BIT + 26; +pub const SYS_mincore: c_long = __X32_SYSCALL_BIT + 27; +pub const SYS_madvise: c_long = __X32_SYSCALL_BIT + 28; +pub const SYS_shmget: c_long = __X32_SYSCALL_BIT + 29; +pub const SYS_shmat: c_long = __X32_SYSCALL_BIT + 30; +pub const SYS_shmctl: c_long = __X32_SYSCALL_BIT + 31; +pub const SYS_dup: c_long = __X32_SYSCALL_BIT + 32; +pub const SYS_dup2: c_long = __X32_SYSCALL_BIT + 33; +pub const SYS_pause: c_long = __X32_SYSCALL_BIT + 34; +pub const SYS_nanosleep: c_long = __X32_SYSCALL_BIT + 35; +pub const SYS_getitimer: c_long = __X32_SYSCALL_BIT + 36; +pub const SYS_alarm: c_long = __X32_SYSCALL_BIT + 37; +pub const SYS_setitimer: c_long = __X32_SYSCALL_BIT + 38; +pub const SYS_getpid: c_long = __X32_SYSCALL_BIT + 39; +pub const SYS_sendfile: c_long = __X32_SYSCALL_BIT + 40; +pub const SYS_socket: c_long = __X32_SYSCALL_BIT + 41; +pub const SYS_connect: c_long = __X32_SYSCALL_BIT + 42; +pub const SYS_accept: c_long = __X32_SYSCALL_BIT + 43; +pub const SYS_sendto: c_long = __X32_SYSCALL_BIT + 44; +pub const SYS_shutdown: c_long = __X32_SYSCALL_BIT + 48; +pub const SYS_bind: c_long = __X32_SYSCALL_BIT + 49; +pub const SYS_listen: c_long = __X32_SYSCALL_BIT + 50; +pub const SYS_getsockname: c_long = __X32_SYSCALL_BIT + 51; +pub const SYS_getpeername: c_long = __X32_SYSCALL_BIT + 52; +pub const SYS_socketpair: c_long = __X32_SYSCALL_BIT + 53; +pub const SYS_clone: c_long = __X32_SYSCALL_BIT + 56; +pub const SYS_fork: c_long = __X32_SYSCALL_BIT + 57; +pub const SYS_vfork: c_long = __X32_SYSCALL_BIT + 58; +pub const SYS_exit: c_long = __X32_SYSCALL_BIT + 60; +pub const SYS_wait4: c_long = __X32_SYSCALL_BIT + 61; +pub const SYS_kill: c_long = __X32_SYSCALL_BIT + 62; +pub const SYS_uname: c_long = __X32_SYSCALL_BIT + 63; +pub const SYS_semget: c_long = __X32_SYSCALL_BIT + 64; +pub const SYS_semop: c_long = __X32_SYSCALL_BIT + 65; +pub const SYS_semctl: c_long = __X32_SYSCALL_BIT + 66; +pub const SYS_shmdt: c_long = __X32_SYSCALL_BIT + 67; +pub const SYS_msgget: c_long = __X32_SYSCALL_BIT + 68; +pub const SYS_msgsnd: c_long = __X32_SYSCALL_BIT + 69; +pub const SYS_msgrcv: c_long = __X32_SYSCALL_BIT + 70; +pub const SYS_msgctl: c_long = __X32_SYSCALL_BIT + 71; +pub const SYS_fcntl: c_long = __X32_SYSCALL_BIT + 72; +pub const SYS_flock: c_long = __X32_SYSCALL_BIT + 73; +pub const SYS_fsync: c_long = __X32_SYSCALL_BIT + 74; +pub const SYS_fdatasync: c_long = __X32_SYSCALL_BIT + 75; +pub const SYS_truncate: c_long = __X32_SYSCALL_BIT + 76; +pub const SYS_ftruncate: c_long = __X32_SYSCALL_BIT + 77; +pub const SYS_getdents: c_long = __X32_SYSCALL_BIT + 78; +pub const SYS_getcwd: c_long = __X32_SYSCALL_BIT + 79; +pub const SYS_chdir: c_long = __X32_SYSCALL_BIT + 80; +pub const SYS_fchdir: c_long = __X32_SYSCALL_BIT + 81; +pub const SYS_rename: c_long = __X32_SYSCALL_BIT + 82; +pub const SYS_mkdir: c_long = __X32_SYSCALL_BIT + 83; +pub const SYS_rmdir: c_long = __X32_SYSCALL_BIT + 84; +pub const SYS_creat: c_long = __X32_SYSCALL_BIT + 85; +pub const SYS_link: c_long = __X32_SYSCALL_BIT + 86; +pub const SYS_unlink: c_long = __X32_SYSCALL_BIT + 87; +pub const SYS_symlink: c_long = __X32_SYSCALL_BIT + 88; +pub const SYS_readlink: c_long = __X32_SYSCALL_BIT + 89; +pub const SYS_chmod: c_long = __X32_SYSCALL_BIT + 90; +pub const SYS_fchmod: c_long = __X32_SYSCALL_BIT + 91; +pub const SYS_chown: c_long = __X32_SYSCALL_BIT + 92; +pub const SYS_fchown: c_long = __X32_SYSCALL_BIT + 93; +pub const SYS_lchown: c_long = __X32_SYSCALL_BIT + 94; +pub const SYS_umask: c_long = __X32_SYSCALL_BIT + 95; +pub const SYS_gettimeofday: c_long = __X32_SYSCALL_BIT + 96; +pub const SYS_getrlimit: c_long = __X32_SYSCALL_BIT + 97; +pub const SYS_getrusage: c_long = __X32_SYSCALL_BIT + 98; +pub const SYS_sysinfo: c_long = __X32_SYSCALL_BIT + 99; +pub const SYS_times: c_long = __X32_SYSCALL_BIT + 100; +pub const SYS_getuid: c_long = __X32_SYSCALL_BIT + 102; +pub const SYS_syslog: c_long = __X32_SYSCALL_BIT + 103; +pub const SYS_getgid: c_long = __X32_SYSCALL_BIT + 104; +pub const SYS_setuid: c_long = __X32_SYSCALL_BIT + 105; +pub const SYS_setgid: c_long = __X32_SYSCALL_BIT + 106; +pub const SYS_geteuid: c_long = __X32_SYSCALL_BIT + 107; +pub const SYS_getegid: c_long = __X32_SYSCALL_BIT + 108; +pub const SYS_setpgid: c_long = __X32_SYSCALL_BIT + 109; +pub const SYS_getppid: c_long = __X32_SYSCALL_BIT + 110; +pub const SYS_getpgrp: c_long = __X32_SYSCALL_BIT + 111; +pub const SYS_setsid: c_long = __X32_SYSCALL_BIT + 112; +pub const SYS_setreuid: c_long = __X32_SYSCALL_BIT + 113; +pub const SYS_setregid: c_long = __X32_SYSCALL_BIT + 114; +pub const SYS_getgroups: c_long = __X32_SYSCALL_BIT + 115; +pub const SYS_setgroups: c_long = __X32_SYSCALL_BIT + 116; +pub const SYS_setresuid: c_long = __X32_SYSCALL_BIT + 117; +pub const SYS_getresuid: c_long = __X32_SYSCALL_BIT + 118; +pub const SYS_setresgid: c_long = __X32_SYSCALL_BIT + 119; +pub const SYS_getresgid: c_long = __X32_SYSCALL_BIT + 120; +pub const SYS_getpgid: c_long = __X32_SYSCALL_BIT + 121; +pub const SYS_setfsuid: c_long = __X32_SYSCALL_BIT + 122; +pub const SYS_setfsgid: c_long = __X32_SYSCALL_BIT + 123; +pub const SYS_getsid: c_long = __X32_SYSCALL_BIT + 124; +pub const SYS_capget: c_long = __X32_SYSCALL_BIT + 125; +pub const SYS_capset: c_long = __X32_SYSCALL_BIT + 126; +pub const SYS_rt_sigsuspend: c_long = __X32_SYSCALL_BIT + 130; +pub const SYS_utime: c_long = __X32_SYSCALL_BIT + 132; +pub const SYS_mknod: c_long = __X32_SYSCALL_BIT + 133; +pub const SYS_personality: c_long = __X32_SYSCALL_BIT + 135; +pub const SYS_ustat: c_long = __X32_SYSCALL_BIT + 136; +pub const SYS_statfs: c_long = __X32_SYSCALL_BIT + 137; +pub const SYS_fstatfs: c_long = __X32_SYSCALL_BIT + 138; +pub const SYS_sysfs: c_long = __X32_SYSCALL_BIT + 139; +pub const SYS_getpriority: c_long = __X32_SYSCALL_BIT + 140; +pub const SYS_setpriority: c_long = __X32_SYSCALL_BIT + 141; +pub const SYS_sched_setparam: c_long = __X32_SYSCALL_BIT + 142; +pub const SYS_sched_getparam: c_long = __X32_SYSCALL_BIT + 143; +pub const SYS_sched_setscheduler: c_long = __X32_SYSCALL_BIT + 144; +pub const SYS_sched_getscheduler: c_long = __X32_SYSCALL_BIT + 145; +pub const SYS_sched_get_priority_max: c_long = __X32_SYSCALL_BIT + 146; +pub const SYS_sched_get_priority_min: c_long = __X32_SYSCALL_BIT + 147; +pub const SYS_sched_rr_get_interval: c_long = __X32_SYSCALL_BIT + 148; +pub const SYS_mlock: c_long = __X32_SYSCALL_BIT + 149; +pub const SYS_munlock: c_long = __X32_SYSCALL_BIT + 150; +pub const SYS_mlockall: c_long = __X32_SYSCALL_BIT + 151; +pub const SYS_munlockall: c_long = __X32_SYSCALL_BIT + 152; +pub const SYS_vhangup: c_long = __X32_SYSCALL_BIT + 153; +pub const SYS_modify_ldt: c_long = __X32_SYSCALL_BIT + 154; +pub const SYS_pivot_root: c_long = __X32_SYSCALL_BIT + 155; +pub const SYS_prctl: c_long = __X32_SYSCALL_BIT + 157; +pub const SYS_arch_prctl: c_long = __X32_SYSCALL_BIT + 158; +pub const SYS_adjtimex: c_long = __X32_SYSCALL_BIT + 159; +pub const SYS_setrlimit: c_long = __X32_SYSCALL_BIT + 160; +pub const SYS_chroot: c_long = __X32_SYSCALL_BIT + 161; +pub const SYS_sync: c_long = __X32_SYSCALL_BIT + 162; +pub const SYS_acct: c_long = __X32_SYSCALL_BIT + 163; +pub const SYS_settimeofday: c_long = __X32_SYSCALL_BIT + 164; +pub const SYS_mount: c_long = __X32_SYSCALL_BIT + 165; +pub const SYS_umount2: c_long = __X32_SYSCALL_BIT + 166; +pub const SYS_swapon: c_long = __X32_SYSCALL_BIT + 167; +pub const SYS_swapoff: c_long = __X32_SYSCALL_BIT + 168; +pub const SYS_reboot: c_long = __X32_SYSCALL_BIT + 169; +pub const SYS_sethostname: c_long = __X32_SYSCALL_BIT + 170; +pub const SYS_setdomainname: c_long = __X32_SYSCALL_BIT + 171; +pub const SYS_iopl: c_long = __X32_SYSCALL_BIT + 172; +pub const SYS_ioperm: c_long = __X32_SYSCALL_BIT + 173; +pub const SYS_init_module: c_long = __X32_SYSCALL_BIT + 175; +pub const SYS_delete_module: c_long = __X32_SYSCALL_BIT + 176; +pub const SYS_quotactl: c_long = __X32_SYSCALL_BIT + 179; +pub const SYS_getpmsg: c_long = __X32_SYSCALL_BIT + 181; +pub const SYS_putpmsg: c_long = __X32_SYSCALL_BIT + 182; +pub const SYS_afs_syscall: c_long = __X32_SYSCALL_BIT + 183; +pub const SYS_tuxcall: c_long = __X32_SYSCALL_BIT + 184; +pub const SYS_security: c_long = __X32_SYSCALL_BIT + 185; +pub const SYS_gettid: c_long = __X32_SYSCALL_BIT + 186; +pub const SYS_readahead: c_long = __X32_SYSCALL_BIT + 187; +pub const SYS_setxattr: c_long = __X32_SYSCALL_BIT + 188; +pub const SYS_lsetxattr: c_long = __X32_SYSCALL_BIT + 189; +pub const SYS_fsetxattr: c_long = __X32_SYSCALL_BIT + 190; +pub const SYS_getxattr: c_long = __X32_SYSCALL_BIT + 191; +pub const SYS_lgetxattr: c_long = __X32_SYSCALL_BIT + 192; +pub const SYS_fgetxattr: c_long = __X32_SYSCALL_BIT + 193; +pub const SYS_listxattr: c_long = __X32_SYSCALL_BIT + 194; +pub const SYS_llistxattr: c_long = __X32_SYSCALL_BIT + 195; +pub const SYS_flistxattr: c_long = __X32_SYSCALL_BIT + 196; +pub const SYS_removexattr: c_long = __X32_SYSCALL_BIT + 197; +pub const SYS_lremovexattr: c_long = __X32_SYSCALL_BIT + 198; +pub const SYS_fremovexattr: c_long = __X32_SYSCALL_BIT + 199; +pub const SYS_tkill: c_long = __X32_SYSCALL_BIT + 200; +pub const SYS_time: c_long = __X32_SYSCALL_BIT + 201; +pub const SYS_futex: c_long = __X32_SYSCALL_BIT + 202; +pub const SYS_sched_setaffinity: c_long = __X32_SYSCALL_BIT + 203; +pub const SYS_sched_getaffinity: c_long = __X32_SYSCALL_BIT + 204; +pub const SYS_io_destroy: c_long = __X32_SYSCALL_BIT + 207; +pub const SYS_io_getevents: c_long = __X32_SYSCALL_BIT + 208; +pub const SYS_io_cancel: c_long = __X32_SYSCALL_BIT + 210; +pub const SYS_lookup_dcookie: c_long = __X32_SYSCALL_BIT + 212; +pub const SYS_epoll_create: c_long = __X32_SYSCALL_BIT + 213; +pub const SYS_remap_file_pages: c_long = __X32_SYSCALL_BIT + 216; +pub const SYS_getdents64: c_long = __X32_SYSCALL_BIT + 217; +pub const SYS_set_tid_address: c_long = __X32_SYSCALL_BIT + 218; +pub const SYS_restart_syscall: c_long = __X32_SYSCALL_BIT + 219; +pub const SYS_semtimedop: c_long = __X32_SYSCALL_BIT + 220; +pub const SYS_fadvise64: c_long = __X32_SYSCALL_BIT + 221; +pub const SYS_timer_settime: c_long = __X32_SYSCALL_BIT + 223; +pub const SYS_timer_gettime: c_long = __X32_SYSCALL_BIT + 224; +pub const SYS_timer_getoverrun: c_long = __X32_SYSCALL_BIT + 225; +pub const SYS_timer_delete: c_long = __X32_SYSCALL_BIT + 226; +pub const SYS_clock_settime: c_long = __X32_SYSCALL_BIT + 227; +pub const SYS_clock_gettime: c_long = __X32_SYSCALL_BIT + 228; +pub const SYS_clock_getres: c_long = __X32_SYSCALL_BIT + 229; +pub const SYS_clock_nanosleep: c_long = __X32_SYSCALL_BIT + 230; +pub const SYS_exit_group: c_long = __X32_SYSCALL_BIT + 231; +pub const SYS_epoll_wait: c_long = __X32_SYSCALL_BIT + 232; +pub const SYS_epoll_ctl: c_long = __X32_SYSCALL_BIT + 233; +pub const SYS_tgkill: c_long = __X32_SYSCALL_BIT + 234; +pub const SYS_utimes: c_long = __X32_SYSCALL_BIT + 235; +pub const SYS_mbind: c_long = __X32_SYSCALL_BIT + 237; +pub const SYS_set_mempolicy: c_long = __X32_SYSCALL_BIT + 238; +pub const SYS_get_mempolicy: c_long = __X32_SYSCALL_BIT + 239; +pub const SYS_mq_open: c_long = __X32_SYSCALL_BIT + 240; +pub const SYS_mq_unlink: c_long = __X32_SYSCALL_BIT + 241; +pub const SYS_mq_timedsend: c_long = __X32_SYSCALL_BIT + 242; +pub const SYS_mq_timedreceive: c_long = __X32_SYSCALL_BIT + 243; +pub const SYS_mq_getsetattr: c_long = __X32_SYSCALL_BIT + 245; +pub const SYS_add_key: c_long = __X32_SYSCALL_BIT + 248; +pub const SYS_request_key: c_long = __X32_SYSCALL_BIT + 249; +pub const SYS_keyctl: c_long = __X32_SYSCALL_BIT + 250; +pub const SYS_ioprio_set: c_long = __X32_SYSCALL_BIT + 251; +pub const SYS_ioprio_get: c_long = __X32_SYSCALL_BIT + 252; +pub const SYS_inotify_init: c_long = __X32_SYSCALL_BIT + 253; +pub const SYS_inotify_add_watch: c_long = __X32_SYSCALL_BIT + 254; +pub const SYS_inotify_rm_watch: c_long = __X32_SYSCALL_BIT + 255; +pub const SYS_migrate_pages: c_long = __X32_SYSCALL_BIT + 256; +pub const SYS_openat: c_long = __X32_SYSCALL_BIT + 257; +pub const SYS_mkdirat: c_long = __X32_SYSCALL_BIT + 258; +pub const SYS_mknodat: c_long = __X32_SYSCALL_BIT + 259; +pub const SYS_fchownat: c_long = __X32_SYSCALL_BIT + 260; +pub const SYS_futimesat: c_long = __X32_SYSCALL_BIT + 261; +pub const SYS_newfstatat: c_long = __X32_SYSCALL_BIT + 262; +pub const SYS_unlinkat: c_long = __X32_SYSCALL_BIT + 263; +pub const SYS_renameat: c_long = __X32_SYSCALL_BIT + 264; +pub const SYS_linkat: c_long = __X32_SYSCALL_BIT + 265; +pub const SYS_symlinkat: c_long = __X32_SYSCALL_BIT + 266; +pub const SYS_readlinkat: c_long = __X32_SYSCALL_BIT + 267; +pub const SYS_fchmodat: c_long = __X32_SYSCALL_BIT + 268; +pub const SYS_faccessat: c_long = __X32_SYSCALL_BIT + 269; +pub const SYS_pselect6: c_long = __X32_SYSCALL_BIT + 270; +pub const SYS_ppoll: c_long = __X32_SYSCALL_BIT + 271; +pub const SYS_unshare: c_long = __X32_SYSCALL_BIT + 272; +pub const SYS_splice: c_long = __X32_SYSCALL_BIT + 275; +pub const SYS_tee: c_long = __X32_SYSCALL_BIT + 276; +pub const SYS_sync_file_range: c_long = __X32_SYSCALL_BIT + 277; +pub const SYS_utimensat: c_long = __X32_SYSCALL_BIT + 280; +pub const SYS_epoll_pwait: c_long = __X32_SYSCALL_BIT + 281; +pub const SYS_signalfd: c_long = __X32_SYSCALL_BIT + 282; +pub const SYS_timerfd_create: c_long = __X32_SYSCALL_BIT + 283; +pub const SYS_eventfd: c_long = __X32_SYSCALL_BIT + 284; +pub const SYS_fallocate: c_long = __X32_SYSCALL_BIT + 285; +pub const SYS_timerfd_settime: c_long = __X32_SYSCALL_BIT + 286; +pub const SYS_timerfd_gettime: c_long = __X32_SYSCALL_BIT + 287; +pub const SYS_accept4: c_long = __X32_SYSCALL_BIT + 288; +pub const SYS_signalfd4: c_long = __X32_SYSCALL_BIT + 289; +pub const SYS_eventfd2: c_long = __X32_SYSCALL_BIT + 290; +pub const SYS_epoll_create1: c_long = __X32_SYSCALL_BIT + 291; +pub const SYS_dup3: c_long = __X32_SYSCALL_BIT + 292; +pub const SYS_pipe2: c_long = __X32_SYSCALL_BIT + 293; +pub const SYS_inotify_init1: c_long = __X32_SYSCALL_BIT + 294; +pub const SYS_perf_event_open: c_long = __X32_SYSCALL_BIT + 298; +pub const SYS_fanotify_init: c_long = __X32_SYSCALL_BIT + 300; +pub const SYS_fanotify_mark: c_long = __X32_SYSCALL_BIT + 301; +pub const SYS_prlimit64: c_long = __X32_SYSCALL_BIT + 302; +pub const SYS_name_to_handle_at: c_long = __X32_SYSCALL_BIT + 303; +pub const SYS_open_by_handle_at: c_long = __X32_SYSCALL_BIT + 304; +pub const SYS_clock_adjtime: c_long = __X32_SYSCALL_BIT + 305; +pub const SYS_syncfs: c_long = __X32_SYSCALL_BIT + 306; +pub const SYS_setns: c_long = __X32_SYSCALL_BIT + 308; +pub const SYS_getcpu: c_long = __X32_SYSCALL_BIT + 309; +pub const SYS_kcmp: c_long = __X32_SYSCALL_BIT + 312; +pub const SYS_finit_module: c_long = __X32_SYSCALL_BIT + 313; +pub const SYS_sched_setattr: c_long = __X32_SYSCALL_BIT + 314; +pub const SYS_sched_getattr: c_long = __X32_SYSCALL_BIT + 315; +pub const SYS_renameat2: c_long = __X32_SYSCALL_BIT + 316; +pub const SYS_seccomp: c_long = __X32_SYSCALL_BIT + 317; +pub const SYS_getrandom: c_long = __X32_SYSCALL_BIT + 318; +pub const SYS_memfd_create: c_long = __X32_SYSCALL_BIT + 319; +pub const SYS_kexec_file_load: c_long = __X32_SYSCALL_BIT + 320; +pub const SYS_bpf: c_long = __X32_SYSCALL_BIT + 321; +pub const SYS_userfaultfd: c_long = __X32_SYSCALL_BIT + 323; +pub const SYS_membarrier: c_long = __X32_SYSCALL_BIT + 324; +pub const SYS_mlock2: c_long = __X32_SYSCALL_BIT + 325; +pub const SYS_copy_file_range: c_long = __X32_SYSCALL_BIT + 326; +pub const SYS_pkey_mprotect: c_long = __X32_SYSCALL_BIT + 329; +pub const SYS_pkey_alloc: c_long = __X32_SYSCALL_BIT + 330; +pub const SYS_pkey_free: c_long = __X32_SYSCALL_BIT + 331; +pub const SYS_statx: c_long = __X32_SYSCALL_BIT + 332; +pub const SYS_rseq: c_long = __X32_SYSCALL_BIT + 334; +pub const SYS_pidfd_send_signal: c_long = __X32_SYSCALL_BIT + 424; +pub const SYS_io_uring_setup: c_long = __X32_SYSCALL_BIT + 425; +pub const SYS_io_uring_enter: c_long = __X32_SYSCALL_BIT + 426; +pub const SYS_io_uring_register: c_long = __X32_SYSCALL_BIT + 427; +pub const SYS_open_tree: c_long = __X32_SYSCALL_BIT + 428; +pub const SYS_move_mount: c_long = __X32_SYSCALL_BIT + 429; +pub const SYS_fsopen: c_long = __X32_SYSCALL_BIT + 430; +pub const SYS_fsconfig: c_long = __X32_SYSCALL_BIT + 431; +pub const SYS_fsmount: c_long = __X32_SYSCALL_BIT + 432; +pub const SYS_fspick: c_long = __X32_SYSCALL_BIT + 433; +pub const SYS_pidfd_open: c_long = __X32_SYSCALL_BIT + 434; +pub const SYS_clone3: c_long = __X32_SYSCALL_BIT + 435; +pub const SYS_close_range: c_long = __X32_SYSCALL_BIT + 436; +pub const SYS_openat2: c_long = __X32_SYSCALL_BIT + 437; +pub const SYS_pidfd_getfd: c_long = __X32_SYSCALL_BIT + 438; +pub const SYS_faccessat2: c_long = __X32_SYSCALL_BIT + 439; +pub const SYS_process_madvise: c_long = __X32_SYSCALL_BIT + 440; +pub const SYS_epoll_pwait2: c_long = __X32_SYSCALL_BIT + 441; +pub const SYS_mount_setattr: c_long = __X32_SYSCALL_BIT + 442; +pub const SYS_quotactl_fd: c_long = __X32_SYSCALL_BIT + 443; +pub const SYS_landlock_create_ruleset: c_long = __X32_SYSCALL_BIT + 444; +pub const SYS_landlock_add_rule: c_long = __X32_SYSCALL_BIT + 445; +pub const SYS_landlock_restrict_self: c_long = __X32_SYSCALL_BIT + 446; +pub const SYS_memfd_secret: c_long = __X32_SYSCALL_BIT + 447; +pub const SYS_process_mrelease: c_long = __X32_SYSCALL_BIT + 448; +pub const SYS_futex_waitv: c_long = __X32_SYSCALL_BIT + 449; +pub const SYS_set_mempolicy_home_node: c_long = __X32_SYSCALL_BIT + 450; +pub const SYS_fchmodat2: c_long = __X32_SYSCALL_BIT + 452; +pub const SYS_rt_sigaction: c_long = __X32_SYSCALL_BIT + 512; +pub const SYS_rt_sigreturn: c_long = __X32_SYSCALL_BIT + 513; +pub const SYS_ioctl: c_long = __X32_SYSCALL_BIT + 514; +pub const SYS_readv: c_long = __X32_SYSCALL_BIT + 515; +pub const SYS_writev: c_long = __X32_SYSCALL_BIT + 516; +pub const SYS_recvfrom: c_long = __X32_SYSCALL_BIT + 517; +pub const SYS_sendmsg: c_long = __X32_SYSCALL_BIT + 518; +pub const SYS_recvmsg: c_long = __X32_SYSCALL_BIT + 519; +pub const SYS_execve: c_long = __X32_SYSCALL_BIT + 520; +pub const SYS_ptrace: c_long = __X32_SYSCALL_BIT + 521; +pub const SYS_rt_sigpending: c_long = __X32_SYSCALL_BIT + 522; +pub const SYS_rt_sigtimedwait: c_long = __X32_SYSCALL_BIT + 523; +pub const SYS_rt_sigqueueinfo: c_long = __X32_SYSCALL_BIT + 524; +pub const SYS_sigaltstack: c_long = __X32_SYSCALL_BIT + 525; +pub const SYS_timer_create: c_long = __X32_SYSCALL_BIT + 526; +pub const SYS_mq_notify: c_long = __X32_SYSCALL_BIT + 527; +pub const SYS_kexec_load: c_long = __X32_SYSCALL_BIT + 528; +pub const SYS_waitid: c_long = __X32_SYSCALL_BIT + 529; +pub const SYS_set_robust_list: c_long = __X32_SYSCALL_BIT + 530; +pub const SYS_get_robust_list: c_long = __X32_SYSCALL_BIT + 531; +pub const SYS_vmsplice: c_long = __X32_SYSCALL_BIT + 532; +pub const SYS_move_pages: c_long = __X32_SYSCALL_BIT + 533; +pub const SYS_preadv: c_long = __X32_SYSCALL_BIT + 534; +pub const SYS_pwritev: c_long = __X32_SYSCALL_BIT + 535; +pub const SYS_rt_tgsigqueueinfo: c_long = __X32_SYSCALL_BIT + 536; +pub const SYS_recvmmsg: c_long = __X32_SYSCALL_BIT + 537; +pub const SYS_sendmmsg: c_long = __X32_SYSCALL_BIT + 538; +pub const SYS_process_vm_readv: c_long = __X32_SYSCALL_BIT + 539; +pub const SYS_process_vm_writev: c_long = __X32_SYSCALL_BIT + 540; +pub const SYS_setsockopt: c_long = __X32_SYSCALL_BIT + 541; +pub const SYS_getsockopt: c_long = __X32_SYSCALL_BIT + 542; +pub const SYS_io_setup: c_long = __X32_SYSCALL_BIT + 543; +pub const SYS_io_submit: c_long = __X32_SYSCALL_BIT + 544; +pub const SYS_execveat: c_long = __X32_SYSCALL_BIT + 545; +pub const SYS_preadv2: c_long = __X32_SYSCALL_BIT + 546; +pub const SYS_pwritev2: c_long = __X32_SYSCALL_BIT + 547; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/mod.rs new file mode 100644 index 00000000000000..17d11d27a4deab --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/gnu/mod.rs @@ -0,0 +1,1382 @@ +use crate::off64_t; +use crate::prelude::*; + +pub type pthread_t = c_ulong; +pub type __priority_which_t = c_uint; +pub type __rlimit_resource_t = c_uint; +pub type Lmid_t = c_long; +pub type regoff_t = c_int; +pub type __kernel_rwf_t = c_int; + +cfg_if! { + if #[cfg(doc)] { + // Used in `linux::arch` to define ioctl constants. + pub(crate) type Ioctl = c_ulong; + } else { + #[doc(hidden)] + pub type Ioctl = c_ulong; + } +} + +s! { + pub struct aiocb { + pub aio_fildes: c_int, + pub aio_lio_opcode: c_int, + pub aio_reqprio: c_int, + pub aio_buf: *mut c_void, + pub aio_nbytes: size_t, + pub aio_sigevent: crate::sigevent, + __next_prio: *mut aiocb, + __abs_prio: c_int, + __policy: c_int, + __error_code: c_int, + __return_value: ssize_t, + pub aio_offset: off_t, + #[cfg(all( + not(gnu_file_offset_bits64), + not(target_arch = "x86_64"), + target_pointer_width = "32" + ))] + __unused1: [c_char; 4], + __glibc_reserved: [c_char; 32], + } + + pub struct __exit_status { + pub e_termination: c_short, + pub e_exit: c_short, + } + + pub struct __timeval { + pub tv_sec: i32, + pub tv_usec: i32, + } + + pub struct glob64_t { + pub gl_pathc: size_t, + pub gl_pathv: *mut *mut c_char, + pub gl_offs: size_t, + pub gl_flags: c_int, + + __unused1: *mut c_void, + __unused2: *mut c_void, + __unused3: *mut c_void, + __unused4: *mut c_void, + __unused5: *mut c_void, + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: size_t, + pub msg_control: *mut c_void, + pub msg_controllen: size_t, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + pub cmsg_len: size_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; crate::NCCS], + #[cfg(not(any( + target_arch = "sparc", + target_arch = "sparc64", + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "mips64", + target_arch = "mips64r6" + )))] + pub c_ispeed: crate::speed_t, + #[cfg(not(any( + target_arch = "sparc", + target_arch = "sparc64", + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "mips64", + target_arch = "mips64r6" + )))] + pub c_ospeed: crate::speed_t, + } + + pub struct mallinfo { + pub arena: c_int, + pub ordblks: c_int, + pub smblks: c_int, + pub hblks: c_int, + pub hblkhd: c_int, + pub usmblks: c_int, + pub fsmblks: c_int, + pub uordblks: c_int, + pub fordblks: c_int, + pub keepcost: c_int, + } + + pub struct mallinfo2 { + pub arena: size_t, + pub ordblks: size_t, + pub smblks: size_t, + pub hblks: size_t, + pub hblkhd: size_t, + pub usmblks: size_t, + pub fsmblks: size_t, + pub uordblks: size_t, + pub fordblks: size_t, + pub keepcost: size_t, + } + + pub struct nl_pktinfo { + pub group: u32, + } + + pub struct nl_mmap_req { + pub nm_block_size: c_uint, + pub nm_block_nr: c_uint, + pub nm_frame_size: c_uint, + pub nm_frame_nr: c_uint, + } + + pub struct nl_mmap_hdr { + pub nm_status: c_uint, + pub nm_len: c_uint, + pub nm_group: u32, + pub nm_pid: u32, + pub nm_uid: u32, + pub nm_gid: u32, + } + + pub struct rtentry { + pub rt_pad1: c_ulong, + pub rt_dst: crate::sockaddr, + pub rt_gateway: crate::sockaddr, + pub rt_genmask: crate::sockaddr, + pub rt_flags: c_ushort, + pub rt_pad2: c_short, + pub rt_pad3: c_ulong, + pub rt_tos: c_uchar, + pub rt_class: c_uchar, + #[cfg(target_pointer_width = "64")] + pub rt_pad4: [c_short; 3usize], + #[cfg(not(target_pointer_width = "64"))] + pub rt_pad4: c_short, + pub rt_metric: c_short, + pub rt_dev: *mut c_char, + pub rt_mtu: c_ulong, + pub rt_window: c_ulong, + pub rt_irtt: c_ushort, + } + + pub struct ntptimeval { + pub time: crate::timeval, + pub maxerror: c_long, + pub esterror: c_long, + pub tai: c_long, + pub __glibc_reserved1: c_long, + pub __glibc_reserved2: c_long, + pub __glibc_reserved3: c_long, + pub __glibc_reserved4: c_long, + } + + pub struct regex_t { + __buffer: *mut c_void, + __allocated: size_t, + __used: size_t, + __syntax: c_ulong, + __fastmap: *mut c_char, + __translate: *mut c_char, + __re_nsub: size_t, + __bitfield: u8, + } + + pub struct Elf64_Chdr { + pub ch_type: crate::Elf64_Word, + pub ch_reserved: crate::Elf64_Word, + pub ch_size: crate::Elf64_Xword, + pub ch_addralign: crate::Elf64_Xword, + } + + pub struct Elf32_Chdr { + pub ch_type: crate::Elf32_Word, + pub ch_size: crate::Elf32_Word, + pub ch_addralign: crate::Elf32_Word, + } + + pub struct seminfo { + pub semmap: c_int, + pub semmni: c_int, + pub semmns: c_int, + pub semmnu: c_int, + pub semmsl: c_int, + pub semopm: c_int, + pub semume: c_int, + pub semusz: c_int, + pub semvmx: c_int, + pub semaem: c_int, + } + + pub struct ptrace_peeksiginfo_args { + pub off: crate::__u64, + pub flags: crate::__u32, + pub nr: crate::__s32, + } + + pub struct __c_anonymous_ptrace_syscall_info_entry { + pub nr: crate::__u64, + pub args: [crate::__u64; 6], + } + + pub struct __c_anonymous_ptrace_syscall_info_exit { + pub sval: crate::__s64, + pub is_error: crate::__u8, + } + + pub struct __c_anonymous_ptrace_syscall_info_seccomp { + pub nr: crate::__u64, + pub args: [crate::__u64; 6], + pub ret_data: crate::__u32, + } + + pub struct ptrace_syscall_info { + pub op: crate::__u8, + pub pad: [crate::__u8; 3], + pub arch: crate::__u32, + pub instruction_pointer: crate::__u64, + pub stack_pointer: crate::__u64, + pub u: __c_anonymous_ptrace_syscall_info_data, + } + + pub struct ptrace_sud_config { + pub mode: crate::__u64, + pub selector: crate::__u64, + pub offset: crate::__u64, + pub len: crate::__u64, + } + + pub struct iocb { + pub aio_data: crate::__u64, + #[cfg(target_endian = "little")] + pub aio_key: crate::__u32, + #[cfg(target_endian = "little")] + pub aio_rw_flags: crate::__kernel_rwf_t, + #[cfg(target_endian = "big")] + pub aio_rw_flags: crate::__kernel_rwf_t, + #[cfg(target_endian = "big")] + pub aio_key: crate::__u32, + pub aio_lio_opcode: crate::__u16, + pub aio_reqprio: crate::__s16, + pub aio_fildes: crate::__u32, + pub aio_buf: crate::__u64, + pub aio_nbytes: crate::__u64, + pub aio_offset: crate::__s64, + aio_reserved2: crate::__u64, + pub aio_flags: crate::__u32, + pub aio_resfd: crate::__u32, + } + + // netinet/tcp.h + + pub struct tcp_info { + pub tcpi_state: u8, + pub tcpi_ca_state: u8, + pub tcpi_retransmits: u8, + pub tcpi_probes: u8, + pub tcpi_backoff: u8, + pub tcpi_options: u8, + /// This contains the bitfields `tcpi_snd_wscale` and `tcpi_rcv_wscale`. + /// Each is 4 bits. + pub tcpi_snd_rcv_wscale: u8, + pub tcpi_rto: u32, + pub tcpi_ato: u32, + pub tcpi_snd_mss: u32, + pub tcpi_rcv_mss: u32, + pub tcpi_unacked: u32, + pub tcpi_sacked: u32, + pub tcpi_lost: u32, + pub tcpi_retrans: u32, + pub tcpi_fackets: u32, + pub tcpi_last_data_sent: u32, + pub tcpi_last_ack_sent: u32, + pub tcpi_last_data_recv: u32, + pub tcpi_last_ack_recv: u32, + pub tcpi_pmtu: u32, + pub tcpi_rcv_ssthresh: u32, + pub tcpi_rtt: u32, + pub tcpi_rttvar: u32, + pub tcpi_snd_ssthresh: u32, + pub tcpi_snd_cwnd: u32, + pub tcpi_advmss: u32, + pub tcpi_reordering: u32, + pub tcpi_rcv_rtt: u32, + pub tcpi_rcv_space: u32, + pub tcpi_total_retrans: u32, + } + + pub struct fanotify_event_info_pidfd { + pub hdr: crate::fanotify_event_info_header, + pub pidfd: crate::__s32, + } + + pub struct fanotify_event_info_error { + pub hdr: crate::fanotify_event_info_header, + pub error: crate::__s32, + pub error_count: crate::__u32, + } + + // FIXME(1.0) this is actually a union + #[cfg_attr(target_pointer_width = "32", repr(align(4)))] + #[cfg_attr(target_pointer_width = "64", repr(align(8)))] + pub struct sem_t { + #[cfg(target_pointer_width = "32")] + __size: [c_char; 16], + #[cfg(target_pointer_width = "64")] + __size: [c_char; 32], + } + + pub struct mbstate_t { + __count: c_int, + __wchb: [c_char; 4], + } + + pub struct fpos64_t { + __pos: off64_t, + __state: crate::mbstate_t, + } + + pub struct fpos_t { + #[cfg(not(gnu_file_offset_bits64))] + __pos: off_t, + #[cfg(gnu_file_offset_bits64)] + __pos: off64_t, + __state: crate::mbstate_t, + } + + // linux x32 compatibility + // See https://sourceware.org/bugzilla/show_bug.cgi?id=16437 + pub struct timespec { + pub tv_sec: time_t, + #[cfg(all(gnu_time_bits64, target_endian = "big"))] + __pad: i32, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub tv_nsec: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub tv_nsec: i64, + #[cfg(all(gnu_time_bits64, target_endian = "little"))] + __pad: i32, + } +} + +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut c_void { + #[repr(C)] + struct siginfo_sigfault { + _si_signo: c_int, + _si_errno: c_int, + _si_code: c_int, + si_addr: *mut c_void, + } + (*(self as *const siginfo_t).cast::()).si_addr + } + + pub unsafe fn si_value(&self) -> crate::sigval { + #[repr(C)] + struct siginfo_timer { + _si_signo: c_int, + _si_errno: c_int, + _si_code: c_int, + _si_tid: c_int, + _si_overrun: c_int, + si_sigval: crate::sigval, + } + (*(self as *const siginfo_t).cast::()).si_sigval + } +} + +// Internal, for casts to access union fields +#[repr(C)] +struct sifields_sigchld { + si_pid: crate::pid_t, + si_uid: crate::uid_t, + si_status: c_int, + si_utime: c_long, + si_stime: c_long, +} +impl Copy for sifields_sigchld {} +impl Clone for sifields_sigchld { + fn clone(&self) -> sifields_sigchld { + *self + } +} + +// Internal, for casts to access union fields +#[repr(C)] +union sifields { + _align_pointer: *mut c_void, + sigchld: sifields_sigchld, +} + +// Internal, for casts to access union fields. Note that some variants +// of sifields start with a pointer, which makes the alignment of +// sifields vary on 32-bit and 64-bit architectures. +#[repr(C)] +struct siginfo_f { + _siginfo_base: [c_int; 3], + sifields: sifields, +} + +impl siginfo_t { + unsafe fn sifields(&self) -> &sifields { + &(*(self as *const siginfo_t).cast::()).sifields + } + + pub unsafe fn si_pid(&self) -> crate::pid_t { + self.sifields().sigchld.si_pid + } + + pub unsafe fn si_uid(&self) -> crate::uid_t { + self.sifields().sigchld.si_uid + } + + pub unsafe fn si_status(&self) -> c_int { + self.sifields().sigchld.si_status + } + + pub unsafe fn si_utime(&self) -> c_long { + self.sifields().sigchld.si_utime + } + + pub unsafe fn si_stime(&self) -> c_long { + self.sifields().sigchld.si_stime + } +} + +s_no_extra_traits! { + pub union __c_anonymous_ptrace_syscall_info_data { + pub entry: __c_anonymous_ptrace_syscall_info_entry, + pub exit: __c_anonymous_ptrace_syscall_info_exit, + pub seccomp: __c_anonymous_ptrace_syscall_info_seccomp, + } + + pub struct utmpx { + pub ut_type: c_short, + pub ut_pid: crate::pid_t, + pub ut_line: [c_char; __UT_LINESIZE], + pub ut_id: [c_char; 4], + + pub ut_user: [c_char; __UT_NAMESIZE], + pub ut_host: [c_char; __UT_HOSTSIZE], + pub ut_exit: __exit_status, + + #[cfg(any( + target_arch = "aarch64", + target_arch = "s390x", + target_arch = "loongarch64", + all(target_pointer_width = "32", not(target_arch = "x86_64")) + ))] + pub ut_session: c_long, + #[cfg(any( + target_arch = "aarch64", + target_arch = "s390x", + target_arch = "loongarch64", + all(target_pointer_width = "32", not(target_arch = "x86_64")) + ))] + pub ut_tv: crate::timeval, + + #[cfg(not(any( + target_arch = "aarch64", + target_arch = "s390x", + target_arch = "loongarch64", + all(target_pointer_width = "32", not(target_arch = "x86_64")) + )))] + pub ut_session: i32, + #[cfg(not(any( + target_arch = "aarch64", + target_arch = "s390x", + target_arch = "loongarch64", + all(target_pointer_width = "32", not(target_arch = "x86_64")) + )))] + pub ut_tv: __timeval, + + pub ut_addr_v6: [i32; 4], + __glibc_reserved: [c_char; 20], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for utmpx { + fn eq(&self, other: &utmpx) -> bool { + self.ut_type == other.ut_type + && self.ut_pid == other.ut_pid + && self.ut_line == other.ut_line + && self.ut_id == other.ut_id + && self.ut_user == other.ut_user + && self + .ut_host + .iter() + .zip(other.ut_host.iter()) + .all(|(a, b)| a == b) + && self.ut_exit == other.ut_exit + && self.ut_session == other.ut_session + && self.ut_tv == other.ut_tv + && self.ut_addr_v6 == other.ut_addr_v6 + && self.__glibc_reserved == other.__glibc_reserved + } + } + + impl Eq for utmpx {} + + impl hash::Hash for utmpx { + fn hash(&self, state: &mut H) { + self.ut_type.hash(state); + self.ut_pid.hash(state); + self.ut_line.hash(state); + self.ut_id.hash(state); + self.ut_user.hash(state); + self.ut_host.hash(state); + self.ut_exit.hash(state); + self.ut_session.hash(state); + self.ut_tv.hash(state); + self.ut_addr_v6.hash(state); + self.__glibc_reserved.hash(state); + } + } + + impl PartialEq for __c_anonymous_ptrace_syscall_info_data { + fn eq(&self, other: &__c_anonymous_ptrace_syscall_info_data) -> bool { + unsafe { + self.entry == other.entry + || self.exit == other.exit + || self.seccomp == other.seccomp + } + } + } + + impl Eq for __c_anonymous_ptrace_syscall_info_data {} + + impl hash::Hash for __c_anonymous_ptrace_syscall_info_data { + fn hash(&self, state: &mut H) { + unsafe { + self.entry.hash(state); + self.exit.hash(state); + self.seccomp.hash(state); + } + } + } + } +} + +// include/uapi/asm-generic/hugetlb_encode.h +pub const HUGETLB_FLAG_ENCODE_SHIFT: c_int = 26; +pub const HUGETLB_FLAG_ENCODE_MASK: c_int = 0x3f; + +pub const HUGETLB_FLAG_ENCODE_64KB: c_int = 16 << HUGETLB_FLAG_ENCODE_SHIFT; +pub const HUGETLB_FLAG_ENCODE_512KB: c_int = 19 << HUGETLB_FLAG_ENCODE_SHIFT; +pub const HUGETLB_FLAG_ENCODE_1MB: c_int = 20 << HUGETLB_FLAG_ENCODE_SHIFT; +pub const HUGETLB_FLAG_ENCODE_2MB: c_int = 21 << HUGETLB_FLAG_ENCODE_SHIFT; +pub const HUGETLB_FLAG_ENCODE_8MB: c_int = 23 << HUGETLB_FLAG_ENCODE_SHIFT; +pub const HUGETLB_FLAG_ENCODE_16MB: c_int = 24 << HUGETLB_FLAG_ENCODE_SHIFT; +pub const HUGETLB_FLAG_ENCODE_32MB: c_int = 25 << HUGETLB_FLAG_ENCODE_SHIFT; +pub const HUGETLB_FLAG_ENCODE_256MB: c_int = 28 << HUGETLB_FLAG_ENCODE_SHIFT; +pub const HUGETLB_FLAG_ENCODE_512MB: c_int = 29 << HUGETLB_FLAG_ENCODE_SHIFT; +pub const HUGETLB_FLAG_ENCODE_1GB: c_int = 30 << HUGETLB_FLAG_ENCODE_SHIFT; +pub const HUGETLB_FLAG_ENCODE_2GB: c_int = 31 << HUGETLB_FLAG_ENCODE_SHIFT; +pub const HUGETLB_FLAG_ENCODE_16GB: c_int = 34 << HUGETLB_FLAG_ENCODE_SHIFT; + +// include/uapi/linux/mman.h +/* + * Huge page size encoding when MAP_HUGETLB is specified, and a huge page + * size other than the default is desired. See hugetlb_encode.h. + * All known huge page size encodings are provided here. It is the + * responsibility of the application to know which sizes are supported on + * the running system. See mmap(2) man page for details. + */ +pub const MAP_HUGE_SHIFT: c_int = HUGETLB_FLAG_ENCODE_SHIFT; +pub const MAP_HUGE_MASK: c_int = HUGETLB_FLAG_ENCODE_MASK; + +pub const MAP_HUGE_64KB: c_int = HUGETLB_FLAG_ENCODE_64KB; +pub const MAP_HUGE_512KB: c_int = HUGETLB_FLAG_ENCODE_512KB; +pub const MAP_HUGE_1MB: c_int = HUGETLB_FLAG_ENCODE_1MB; +pub const MAP_HUGE_2MB: c_int = HUGETLB_FLAG_ENCODE_2MB; +pub const MAP_HUGE_8MB: c_int = HUGETLB_FLAG_ENCODE_8MB; +pub const MAP_HUGE_16MB: c_int = HUGETLB_FLAG_ENCODE_16MB; +pub const MAP_HUGE_32MB: c_int = HUGETLB_FLAG_ENCODE_32MB; +pub const MAP_HUGE_256MB: c_int = HUGETLB_FLAG_ENCODE_256MB; +pub const MAP_HUGE_512MB: c_int = HUGETLB_FLAG_ENCODE_512MB; +pub const MAP_HUGE_1GB: c_int = HUGETLB_FLAG_ENCODE_1GB; +pub const MAP_HUGE_2GB: c_int = HUGETLB_FLAG_ENCODE_2GB; +pub const MAP_HUGE_16GB: c_int = HUGETLB_FLAG_ENCODE_16GB; + +pub const PRIO_PROCESS: crate::__priority_which_t = 0; +pub const PRIO_PGRP: crate::__priority_which_t = 1; +pub const PRIO_USER: crate::__priority_which_t = 2; + +pub const MS_RMT_MASK: c_ulong = 0x02800051; + +pub const __UT_LINESIZE: usize = 32; +pub const __UT_NAMESIZE: usize = 32; +pub const __UT_HOSTSIZE: usize = 256; +pub const EMPTY: c_short = 0; +pub const RUN_LVL: c_short = 1; +pub const BOOT_TIME: c_short = 2; +pub const NEW_TIME: c_short = 3; +pub const OLD_TIME: c_short = 4; +pub const INIT_PROCESS: c_short = 5; +pub const LOGIN_PROCESS: c_short = 6; +pub const USER_PROCESS: c_short = 7; +pub const DEAD_PROCESS: c_short = 8; +pub const ACCOUNTING: c_short = 9; + +// dlfcn.h +pub const LM_ID_BASE: c_long = 0; +pub const LM_ID_NEWLM: c_long = -1; + +pub const RTLD_DI_LMID: c_int = 1; +pub const RTLD_DI_LINKMAP: c_int = 2; +pub const RTLD_DI_CONFIGADDR: c_int = 3; +pub const RTLD_DI_SERINFO: c_int = 4; +pub const RTLD_DI_SERINFOSIZE: c_int = 5; +pub const RTLD_DI_ORIGIN: c_int = 6; +pub const RTLD_DI_PROFILENAME: c_int = 7; +pub const RTLD_DI_PROFILEOUT: c_int = 8; +pub const RTLD_DI_TLS_MODID: c_int = 9; +pub const RTLD_DI_TLS_DATA: c_int = 10; + +pub const SOCK_NONBLOCK: c_int = O_NONBLOCK; + +pub const SOL_RXRPC: c_int = 272; +pub const SOL_PPPOL2TP: c_int = 273; +pub const SOL_PNPIPE: c_int = 275; +pub const SOL_RDS: c_int = 276; +pub const SOL_IUCV: c_int = 277; +pub const SOL_CAIF: c_int = 278; +pub const SOL_NFC: c_int = 280; + +pub const MSG_TRYHARD: c_int = 4; + +pub const LC_PAPER: c_int = 7; +pub const LC_NAME: c_int = 8; +pub const LC_ADDRESS: c_int = 9; +pub const LC_TELEPHONE: c_int = 10; +pub const LC_MEASUREMENT: c_int = 11; +pub const LC_IDENTIFICATION: c_int = 12; +pub const LC_PAPER_MASK: c_int = 1 << LC_PAPER; +pub const LC_NAME_MASK: c_int = 1 << LC_NAME; +pub const LC_ADDRESS_MASK: c_int = 1 << LC_ADDRESS; +pub const LC_TELEPHONE_MASK: c_int = 1 << LC_TELEPHONE; +pub const LC_MEASUREMENT_MASK: c_int = 1 << LC_MEASUREMENT; +pub const LC_IDENTIFICATION_MASK: c_int = 1 << LC_IDENTIFICATION; +pub const LC_ALL_MASK: c_int = crate::LC_CTYPE_MASK + | crate::LC_NUMERIC_MASK + | crate::LC_TIME_MASK + | crate::LC_COLLATE_MASK + | crate::LC_MONETARY_MASK + | crate::LC_MESSAGES_MASK + | LC_PAPER_MASK + | LC_NAME_MASK + | LC_ADDRESS_MASK + | LC_TELEPHONE_MASK + | LC_MEASUREMENT_MASK + | LC_IDENTIFICATION_MASK; + +pub const ENOTSUP: c_int = EOPNOTSUPP; + +pub const SOCK_SEQPACKET: c_int = 5; +pub const SOCK_DCCP: c_int = 6; +#[deprecated(since = "0.2.70", note = "AF_PACKET must be used instead")] +pub const SOCK_PACKET: c_int = 10; + +pub const AF_IB: c_int = 27; +pub const AF_MPLS: c_int = 28; +pub const AF_NFC: c_int = 39; +pub const AF_VSOCK: c_int = 40; +pub const AF_XDP: c_int = 44; +pub const PF_IB: c_int = AF_IB; +pub const PF_MPLS: c_int = AF_MPLS; +pub const PF_NFC: c_int = AF_NFC; +pub const PF_VSOCK: c_int = AF_VSOCK; +pub const PF_XDP: c_int = AF_XDP; + +pub const SIGEV_THREAD_ID: c_int = 4; + +pub const BUFSIZ: c_uint = 8192; +pub const TMP_MAX: c_uint = 238328; +pub const FOPEN_MAX: c_uint = 16; +pub const FILENAME_MAX: c_uint = 4096; +pub const POSIX_MADV_DONTNEED: c_int = 4; +pub const _CS_GNU_LIBC_VERSION: c_int = 2; +pub const _CS_GNU_LIBPTHREAD_VERSION: c_int = 3; +pub const _CS_V6_ENV: c_int = 1148; +pub const _CS_V7_ENV: c_int = 1149; +pub const _SC_EQUIV_CLASS_MAX: c_int = 41; +pub const _SC_CHARCLASS_NAME_MAX: c_int = 45; +pub const _SC_PII: c_int = 53; +pub const _SC_PII_XTI: c_int = 54; +pub const _SC_PII_SOCKET: c_int = 55; +pub const _SC_PII_INTERNET: c_int = 56; +pub const _SC_PII_OSI: c_int = 57; +pub const _SC_POLL: c_int = 58; +pub const _SC_SELECT: c_int = 59; +pub const _SC_PII_INTERNET_STREAM: c_int = 61; +pub const _SC_PII_INTERNET_DGRAM: c_int = 62; +pub const _SC_PII_OSI_COTS: c_int = 63; +pub const _SC_PII_OSI_CLTS: c_int = 64; +pub const _SC_PII_OSI_M: c_int = 65; +pub const _SC_T_IOV_MAX: c_int = 66; +pub const _SC_2_C_VERSION: c_int = 96; +pub const _SC_CHAR_BIT: c_int = 101; +pub const _SC_CHAR_MAX: c_int = 102; +pub const _SC_CHAR_MIN: c_int = 103; +pub const _SC_INT_MAX: c_int = 104; +pub const _SC_INT_MIN: c_int = 105; +pub const _SC_LONG_BIT: c_int = 106; +pub const _SC_WORD_BIT: c_int = 107; +pub const _SC_MB_LEN_MAX: c_int = 108; +pub const _SC_SSIZE_MAX: c_int = 110; +pub const _SC_SCHAR_MAX: c_int = 111; +pub const _SC_SCHAR_MIN: c_int = 112; +pub const _SC_SHRT_MAX: c_int = 113; +pub const _SC_SHRT_MIN: c_int = 114; +pub const _SC_UCHAR_MAX: c_int = 115; +pub const _SC_UINT_MAX: c_int = 116; +pub const _SC_ULONG_MAX: c_int = 117; +pub const _SC_USHRT_MAX: c_int = 118; +pub const _SC_NL_ARGMAX: c_int = 119; +pub const _SC_NL_LANGMAX: c_int = 120; +pub const _SC_NL_MSGMAX: c_int = 121; +pub const _SC_NL_NMAX: c_int = 122; +pub const _SC_NL_SETMAX: c_int = 123; +pub const _SC_NL_TEXTMAX: c_int = 124; +pub const _SC_BASE: c_int = 134; +pub const _SC_C_LANG_SUPPORT: c_int = 135; +pub const _SC_C_LANG_SUPPORT_R: c_int = 136; +pub const _SC_DEVICE_IO: c_int = 140; +pub const _SC_DEVICE_SPECIFIC: c_int = 141; +pub const _SC_DEVICE_SPECIFIC_R: c_int = 142; +pub const _SC_FD_MGMT: c_int = 143; +pub const _SC_FIFO: c_int = 144; +pub const _SC_PIPE: c_int = 145; +pub const _SC_FILE_ATTRIBUTES: c_int = 146; +pub const _SC_FILE_LOCKING: c_int = 147; +pub const _SC_FILE_SYSTEM: c_int = 148; +pub const _SC_MULTI_PROCESS: c_int = 150; +pub const _SC_SINGLE_PROCESS: c_int = 151; +pub const _SC_NETWORKING: c_int = 152; +pub const _SC_REGEX_VERSION: c_int = 156; +pub const _SC_SIGNALS: c_int = 158; +pub const _SC_SYSTEM_DATABASE: c_int = 162; +pub const _SC_SYSTEM_DATABASE_R: c_int = 163; +pub const _SC_USER_GROUPS: c_int = 166; +pub const _SC_USER_GROUPS_R: c_int = 167; +pub const _SC_LEVEL1_ICACHE_SIZE: c_int = 185; +pub const _SC_LEVEL1_ICACHE_ASSOC: c_int = 186; +pub const _SC_LEVEL1_ICACHE_LINESIZE: c_int = 187; +pub const _SC_LEVEL1_DCACHE_SIZE: c_int = 188; +pub const _SC_LEVEL1_DCACHE_ASSOC: c_int = 189; +pub const _SC_LEVEL1_DCACHE_LINESIZE: c_int = 190; +pub const _SC_LEVEL2_CACHE_SIZE: c_int = 191; +pub const _SC_LEVEL2_CACHE_ASSOC: c_int = 192; +pub const _SC_LEVEL2_CACHE_LINESIZE: c_int = 193; +pub const _SC_LEVEL3_CACHE_SIZE: c_int = 194; +pub const _SC_LEVEL3_CACHE_ASSOC: c_int = 195; +pub const _SC_LEVEL3_CACHE_LINESIZE: c_int = 196; +pub const _SC_LEVEL4_CACHE_SIZE: c_int = 197; +pub const _SC_LEVEL4_CACHE_ASSOC: c_int = 198; +pub const _SC_LEVEL4_CACHE_LINESIZE: c_int = 199; +pub const O_ACCMODE: c_int = 3; +pub const ST_RELATIME: c_ulong = 4096; +pub const NI_MAXHOST: crate::socklen_t = 1025; + +// Most `*_SUPER_MAGIC` constants are defined at the `linux_like` level; the +// following are only available on newer Linux versions than the versions +// currently used in CI in some configurations, so we define them here. +cfg_if! { + if #[cfg(not(target_arch = "s390x"))] { + pub const BINDERFS_SUPER_MAGIC: c_long = 0x6c6f6f70; + pub const XFS_SUPER_MAGIC: c_long = 0x58465342; + } else if #[cfg(target_arch = "s390x")] { + pub const BINDERFS_SUPER_MAGIC: c_uint = 0x6c6f6f70; + pub const XFS_SUPER_MAGIC: c_uint = 0x58465342; + } +} + +pub const CPU_SETSIZE: c_int = 0x400; + +pub const PTRACE_TRACEME: c_uint = 0; +pub const PTRACE_PEEKTEXT: c_uint = 1; +pub const PTRACE_PEEKDATA: c_uint = 2; +pub const PTRACE_PEEKUSER: c_uint = 3; +pub const PTRACE_POKETEXT: c_uint = 4; +pub const PTRACE_POKEDATA: c_uint = 5; +pub const PTRACE_POKEUSER: c_uint = 6; +pub const PTRACE_CONT: c_uint = 7; +pub const PTRACE_KILL: c_uint = 8; +pub const PTRACE_SINGLESTEP: c_uint = 9; +pub const PTRACE_ATTACH: c_uint = 16; +pub const PTRACE_SYSCALL: c_uint = 24; +pub const PTRACE_SETOPTIONS: c_uint = 0x4200; +pub const PTRACE_GETEVENTMSG: c_uint = 0x4201; +pub const PTRACE_GETSIGINFO: c_uint = 0x4202; +pub const PTRACE_SETSIGINFO: c_uint = 0x4203; +pub const PTRACE_GETREGSET: c_uint = 0x4204; +pub const PTRACE_SETREGSET: c_uint = 0x4205; +pub const PTRACE_SEIZE: c_uint = 0x4206; +pub const PTRACE_INTERRUPT: c_uint = 0x4207; +pub const PTRACE_LISTEN: c_uint = 0x4208; +pub const PTRACE_PEEKSIGINFO: c_uint = 0x4209; +pub const PTRACE_GETSIGMASK: c_uint = 0x420a; +pub const PTRACE_SETSIGMASK: c_uint = 0x420b; +pub const PTRACE_GET_SYSCALL_INFO: c_uint = 0x420e; +pub const PTRACE_SYSCALL_INFO_NONE: crate::__u8 = 0; +pub const PTRACE_SYSCALL_INFO_ENTRY: crate::__u8 = 1; +pub const PTRACE_SYSCALL_INFO_EXIT: crate::__u8 = 2; +pub const PTRACE_SYSCALL_INFO_SECCOMP: crate::__u8 = 3; +pub const PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG: crate::__u8 = 0x4210; +pub const PTRACE_GET_SYSCALL_USER_DISPATCH_CONFIG: crate::__u8 = 0x4211; + +// linux/rtnetlink.h +pub const TCA_PAD: c_ushort = 9; +pub const TCA_DUMP_INVISIBLE: c_ushort = 10; +pub const TCA_CHAIN: c_ushort = 11; +pub const TCA_HW_OFFLOAD: c_ushort = 12; + +pub const RTM_DELNETCONF: u16 = 81; +pub const RTM_NEWSTATS: u16 = 92; +pub const RTM_GETSTATS: u16 = 94; +pub const RTM_NEWCACHEREPORT: u16 = 96; + +pub const RTM_F_LOOKUP_TABLE: c_uint = 0x1000; +pub const RTM_F_FIB_MATCH: c_uint = 0x2000; + +pub const RTA_VIA: c_ushort = 18; +pub const RTA_NEWDST: c_ushort = 19; +pub const RTA_PREF: c_ushort = 20; +pub const RTA_ENCAP_TYPE: c_ushort = 21; +pub const RTA_ENCAP: c_ushort = 22; +pub const RTA_EXPIRES: c_ushort = 23; +pub const RTA_PAD: c_ushort = 24; +pub const RTA_UID: c_ushort = 25; +pub const RTA_TTL_PROPAGATE: c_ushort = 26; + +// linux/neighbor.h +pub const NTF_EXT_LEARNED: u8 = 0x10; +pub const NTF_OFFLOADED: u8 = 0x20; + +pub const NDA_MASTER: c_ushort = 9; +pub const NDA_LINK_NETNSID: c_ushort = 10; +pub const NDA_SRC_VNI: c_ushort = 11; + +// linux/personality.h +pub const UNAME26: c_int = 0x0020000; +pub const FDPIC_FUNCPTRS: c_int = 0x0080000; + +pub const MAX_LINKS: c_int = 32; + +pub const GENL_UNS_ADMIN_PERM: c_int = 0x10; + +pub const GENL_ID_VFS_DQUOT: c_int = crate::NLMSG_MIN_TYPE + 1; +pub const GENL_ID_PMCRAID: c_int = crate::NLMSG_MIN_TYPE + 2; + +pub const ELFOSABI_ARM_AEABI: u8 = 64; + +// linux/sched.h +pub const CLONE_NEWTIME: c_int = 0x80; +// DIFF(main): changed to `c_ulonglong` in e9abac9ac2 +pub const CLONE_CLEAR_SIGHAND: c_int = 0x100000000; +pub const CLONE_INTO_CGROUP: c_int = 0x200000000; + +// linux/keyctl.h +pub const KEYCTL_DH_COMPUTE: u32 = 23; +pub const KEYCTL_PKEY_QUERY: u32 = 24; +pub const KEYCTL_PKEY_ENCRYPT: u32 = 25; +pub const KEYCTL_PKEY_DECRYPT: u32 = 26; +pub const KEYCTL_PKEY_SIGN: u32 = 27; +pub const KEYCTL_PKEY_VERIFY: u32 = 28; +pub const KEYCTL_RESTRICT_KEYRING: u32 = 29; + +pub const KEYCTL_SUPPORTS_ENCRYPT: u32 = 0x01; +pub const KEYCTL_SUPPORTS_DECRYPT: u32 = 0x02; +pub const KEYCTL_SUPPORTS_SIGN: u32 = 0x04; +pub const KEYCTL_SUPPORTS_VERIFY: u32 = 0x08; +cfg_if! { + if #[cfg(not(any( + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "mips64", + target_arch = "mips64r6" + )))] { + pub const KEYCTL_MOVE: u32 = 30; + pub const KEYCTL_CAPABILITIES: u32 = 31; + + pub const KEYCTL_CAPS0_CAPABILITIES: u32 = 0x01; + pub const KEYCTL_CAPS0_PERSISTENT_KEYRINGS: u32 = 0x02; + pub const KEYCTL_CAPS0_DIFFIE_HELLMAN: u32 = 0x04; + pub const KEYCTL_CAPS0_PUBLIC_KEY: u32 = 0x08; + pub const KEYCTL_CAPS0_BIG_KEY: u32 = 0x10; + pub const KEYCTL_CAPS0_INVALIDATE: u32 = 0x20; + pub const KEYCTL_CAPS0_RESTRICT_KEYRING: u32 = 0x40; + pub const KEYCTL_CAPS0_MOVE: u32 = 0x80; + pub const KEYCTL_CAPS1_NS_KEYRING_NAME: u32 = 0x01; + pub const KEYCTL_CAPS1_NS_KEY_TAG: u32 = 0x02; + } +} + +pub const M_MXFAST: c_int = 1; +pub const M_NLBLKS: c_int = 2; +pub const M_GRAIN: c_int = 3; +pub const M_KEEP: c_int = 4; +pub const M_TRIM_THRESHOLD: c_int = -1; +pub const M_TOP_PAD: c_int = -2; +pub const M_MMAP_THRESHOLD: c_int = -3; +pub const M_MMAP_MAX: c_int = -4; +pub const M_CHECK_ACTION: c_int = -5; +pub const M_PERTURB: c_int = -6; +pub const M_ARENA_TEST: c_int = -7; +pub const M_ARENA_MAX: c_int = -8; + +pub const SOMAXCONN: c_int = 4096; + +// linux/mount.h +pub const MOVE_MOUNT_F_SYMLINKS: c_uint = 0x00000001; +pub const MOVE_MOUNT_F_AUTOMOUNTS: c_uint = 0x00000002; +pub const MOVE_MOUNT_F_EMPTY_PATH: c_uint = 0x00000004; +pub const MOVE_MOUNT_T_SYMLINKS: c_uint = 0x00000010; +pub const MOVE_MOUNT_T_AUTOMOUNTS: c_uint = 0x00000020; +pub const MOVE_MOUNT_T_EMPTY_PATH: c_uint = 0x00000040; +pub const MOVE_MOUNT_SET_GROUP: c_uint = 0x00000100; +pub const MOVE_MOUNT_BENEATH: c_uint = 0x00000200; + +// sys/timex.h +pub const ADJ_OFFSET: c_uint = 0x0001; +pub const ADJ_FREQUENCY: c_uint = 0x0002; +pub const ADJ_MAXERROR: c_uint = 0x0004; +pub const ADJ_ESTERROR: c_uint = 0x0008; +pub const ADJ_STATUS: c_uint = 0x0010; +pub const ADJ_TIMECONST: c_uint = 0x0020; +pub const ADJ_TAI: c_uint = 0x0080; +pub const ADJ_SETOFFSET: c_uint = 0x0100; +pub const ADJ_MICRO: c_uint = 0x1000; +pub const ADJ_NANO: c_uint = 0x2000; +pub const ADJ_TICK: c_uint = 0x4000; +pub const ADJ_OFFSET_SINGLESHOT: c_uint = 0x8001; +pub const ADJ_OFFSET_SS_READ: c_uint = 0xa001; +pub const MOD_OFFSET: c_uint = ADJ_OFFSET; +pub const MOD_FREQUENCY: c_uint = ADJ_FREQUENCY; +pub const MOD_MAXERROR: c_uint = ADJ_MAXERROR; +pub const MOD_ESTERROR: c_uint = ADJ_ESTERROR; +pub const MOD_STATUS: c_uint = ADJ_STATUS; +pub const MOD_TIMECONST: c_uint = ADJ_TIMECONST; +pub const MOD_CLKB: c_uint = ADJ_TICK; +pub const MOD_CLKA: c_uint = ADJ_OFFSET_SINGLESHOT; +pub const MOD_TAI: c_uint = ADJ_TAI; +pub const MOD_MICRO: c_uint = ADJ_MICRO; +pub const MOD_NANO: c_uint = ADJ_NANO; +pub const STA_PLL: c_int = 0x0001; +pub const STA_PPSFREQ: c_int = 0x0002; +pub const STA_PPSTIME: c_int = 0x0004; +pub const STA_FLL: c_int = 0x0008; +pub const STA_INS: c_int = 0x0010; +pub const STA_DEL: c_int = 0x0020; +pub const STA_UNSYNC: c_int = 0x0040; +pub const STA_FREQHOLD: c_int = 0x0080; +pub const STA_PPSSIGNAL: c_int = 0x0100; +pub const STA_PPSJITTER: c_int = 0x0200; +pub const STA_PPSWANDER: c_int = 0x0400; +pub const STA_PPSERROR: c_int = 0x0800; +pub const STA_CLOCKERR: c_int = 0x1000; +pub const STA_NANO: c_int = 0x2000; +pub const STA_MODE: c_int = 0x4000; +pub const STA_CLK: c_int = 0x8000; +pub const STA_RONLY: c_int = STA_PPSSIGNAL + | STA_PPSJITTER + | STA_PPSWANDER + | STA_PPSERROR + | STA_CLOCKERR + | STA_NANO + | STA_MODE + | STA_CLK; +pub const NTP_API: c_int = 4; +pub const TIME_OK: c_int = 0; +pub const TIME_INS: c_int = 1; +pub const TIME_DEL: c_int = 2; +pub const TIME_OOP: c_int = 3; +pub const TIME_WAIT: c_int = 4; +pub const TIME_ERROR: c_int = 5; +pub const TIME_BAD: c_int = TIME_ERROR; +pub const MAXTC: c_long = 6; + +// Portable GLOB_* flags are defined at the `linux_like` level. +// The following are GNU extensions. +pub const GLOB_PERIOD: c_int = 1 << 7; +pub const GLOB_ALTDIRFUNC: c_int = 1 << 9; +pub const GLOB_BRACE: c_int = 1 << 10; +pub const GLOB_NOMAGIC: c_int = 1 << 11; +pub const GLOB_TILDE: c_int = 1 << 12; +pub const GLOB_ONLYDIR: c_int = 1 << 13; +pub const GLOB_TILDE_CHECK: c_int = 1 << 14; + +pub const MADV_COLLAPSE: c_int = 25; + +cfg_if! { + if #[cfg(any( + target_arch = "arm", + target_arch = "x86", + target_arch = "x86_64", + target_arch = "s390x", + target_arch = "riscv64", + target_arch = "riscv32" + ))] { + pub const PTHREAD_STACK_MIN: size_t = 16384; + } else if #[cfg(any(target_arch = "sparc", target_arch = "sparc64"))] { + pub const PTHREAD_STACK_MIN: size_t = 0x6000; + } else { + pub const PTHREAD_STACK_MIN: size_t = 131072; + } +} +pub const PTHREAD_MUTEX_ADAPTIVE_NP: c_int = 3; + +pub const REG_STARTEND: c_int = 4; + +pub const REG_EEND: c_int = 14; +pub const REG_ESIZE: c_int = 15; +pub const REG_ERPAREN: c_int = 16; + +extern "C" { + pub fn fgetspent_r( + fp: *mut crate::FILE, + spbuf: *mut crate::spwd, + buf: *mut c_char, + buflen: size_t, + spbufp: *mut *mut crate::spwd, + ) -> c_int; + pub fn sgetspent_r( + s: *const c_char, + spbuf: *mut crate::spwd, + buf: *mut c_char, + buflen: size_t, + spbufp: *mut *mut crate::spwd, + ) -> c_int; + pub fn getspent_r( + spbuf: *mut crate::spwd, + buf: *mut c_char, + buflen: size_t, + spbufp: *mut *mut crate::spwd, + ) -> c_int; + pub fn qsort_r( + base: *mut c_void, + num: size_t, + size: size_t, + compar: Option c_int>, + arg: *mut c_void, + ); + #[cfg_attr(gnu_time_bits64, link_name = "__sendmmsg64")] + pub fn sendmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: c_uint, + flags: c_int, + ) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__recvmmsg64")] + pub fn recvmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: c_uint, + flags: c_int, + timeout: *mut crate::timespec, + ) -> c_int; + + pub fn getrlimit64(resource: crate::__rlimit_resource_t, rlim: *mut crate::rlimit64) -> c_int; + pub fn setrlimit64(resource: crate::__rlimit_resource_t, rlim: *const crate::rlimit64) + -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "getrlimit64")] + pub fn getrlimit(resource: crate::__rlimit_resource_t, rlim: *mut crate::rlimit) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "setrlimit64")] + pub fn setrlimit(resource: crate::__rlimit_resource_t, rlim: *const crate::rlimit) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "prlimit64")] + pub fn prlimit( + pid: crate::pid_t, + resource: crate::__rlimit_resource_t, + new_limit: *const crate::rlimit, + old_limit: *mut crate::rlimit, + ) -> c_int; + pub fn prlimit64( + pid: crate::pid_t, + resource: crate::__rlimit_resource_t, + new_limit: *const crate::rlimit64, + old_limit: *mut crate::rlimit64, + ) -> c_int; + pub fn utmpname(file: *const c_char) -> c_int; + pub fn utmpxname(file: *const c_char) -> c_int; + pub fn getutxent() -> *mut utmpx; + pub fn getutxid(ut: *const utmpx) -> *mut utmpx; + pub fn getutxline(ut: *const utmpx) -> *mut utmpx; + pub fn pututxline(ut: *const utmpx) -> *mut utmpx; + pub fn setutxent(); + pub fn endutxent(); + pub fn getpt() -> c_int; + pub fn mallopt(param: c_int, value: c_int) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__gettimeofday64")] + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut crate::timezone) -> c_int; + pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; + pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; + pub fn getauxval(type_: c_ulong) -> c_ulong; + + #[cfg_attr(gnu_time_bits64, link_name = "___adjtimex64")] + pub fn adjtimex(buf: *mut timex) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "___adjtimex64")] + pub fn ntp_adjtime(buf: *mut timex) -> c_int; + #[cfg_attr(not(gnu_time_bits64), link_name = "ntp_gettimex")] + #[cfg_attr(gnu_time_bits64, link_name = "__ntp_gettime64")] + pub fn ntp_gettime(buf: *mut ntptimeval) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__clock_adjtime64")] + pub fn clock_adjtime(clk_id: crate::clockid_t, buf: *mut crate::timex) -> c_int; + + pub fn fanotify_mark( + fd: c_int, + flags: c_uint, + mask: u64, + dirfd: c_int, + path: *const c_char, + ) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "preadv64v2")] + pub fn preadv2( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: off_t, + flags: c_int, + ) -> ssize_t; + #[cfg_attr(gnu_file_offset_bits64, link_name = "pwritev64v2")] + pub fn pwritev2( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: off_t, + flags: c_int, + ) -> ssize_t; + pub fn preadv64v2( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: off64_t, + flags: c_int, + ) -> ssize_t; + pub fn pwritev64v2( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: off64_t, + flags: c_int, + ) -> ssize_t; + pub fn renameat2( + olddirfd: c_int, + oldpath: *const c_char, + newdirfd: c_int, + newpath: *const c_char, + flags: c_uint, + ) -> c_int; + + // Added in `glibc` 2.25 + pub fn explicit_bzero(s: *mut c_void, len: size_t); + // Added in `glibc` 2.29 + pub fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void; + + pub fn ctermid(s: *mut c_char) -> *mut c_char; + pub fn backtrace(buf: *mut *mut c_void, sz: c_int) -> c_int; + pub fn backtrace_symbols(buffer: *const *mut c_void, len: c_int) -> *mut *mut c_char; + pub fn backtrace_symbols_fd(buffer: *const *mut c_void, len: c_int, fd: c_int); + #[cfg_attr(gnu_time_bits64, link_name = "__glob64_time64")] + pub fn glob64( + pattern: *const c_char, + flags: c_int, + errfunc: Option c_int>, + pglob: *mut glob64_t, + ) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__globfree64_time64")] + pub fn globfree64(pglob: *mut glob64_t); + pub fn ptrace(request: c_uint, ...) -> c_long; + pub fn pthread_attr_getaffinity_np( + attr: *const crate::pthread_attr_t, + cpusetsize: size_t, + cpuset: *mut crate::cpu_set_t, + ) -> c_int; + pub fn pthread_attr_setaffinity_np( + attr: *mut crate::pthread_attr_t, + cpusetsize: size_t, + cpuset: *const crate::cpu_set_t, + ) -> c_int; + pub fn getpriority(which: crate::__priority_which_t, who: crate::id_t) -> c_int; + pub fn setpriority(which: crate::__priority_which_t, who: crate::id_t, prio: c_int) -> c_int; + pub fn pthread_rwlockattr_getkind_np( + attr: *const crate::pthread_rwlockattr_t, + val: *mut c_int, + ) -> c_int; + pub fn pthread_rwlockattr_setkind_np( + attr: *mut crate::pthread_rwlockattr_t, + val: c_int, + ) -> c_int; + pub fn pthread_sigqueue(thread: crate::pthread_t, sig: c_int, value: crate::sigval) -> c_int; + pub fn mallinfo() -> crate::mallinfo; + pub fn mallinfo2() -> crate::mallinfo2; + pub fn malloc_stats(); + pub fn malloc_info(options: c_int, stream: *mut crate::FILE) -> c_int; + pub fn malloc_usable_size(ptr: *mut c_void) -> size_t; + pub fn getpwent_r( + pwd: *mut crate::passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::passwd, + ) -> c_int; + pub fn getgrent_r( + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn fgetpwent_r( + stream: *mut crate::FILE, + pwd: *mut crate::passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::passwd, + ) -> c_int; + pub fn fgetgrent_r( + stream: *mut crate::FILE, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + + pub fn putpwent(p: *const crate::passwd, stream: *mut crate::FILE) -> c_int; + pub fn putgrent(grp: *const crate::group, stream: *mut crate::FILE) -> c_int; + + pub fn sethostid(hostid: c_long) -> c_int; + + pub fn memfd_create(name: *const c_char, flags: c_uint) -> c_int; + pub fn mlock2(addr: *const c_void, len: size_t, flags: c_uint) -> c_int; + + pub fn euidaccess(pathname: *const c_char, mode: c_int) -> c_int; + pub fn eaccess(pathname: *const c_char, mode: c_int) -> c_int; + + pub fn asctime_r(tm: *const crate::tm, buf: *mut c_char) -> *mut c_char; + #[cfg_attr(gnu_time_bits64, link_name = "__ctime64_r")] + pub fn ctime_r(timep: *const time_t, buf: *mut c_char) -> *mut c_char; + + pub fn dirname(path: *mut c_char) -> *mut c_char; + /// POSIX version of `basename(3)`, defined in `libgen.h`. + #[link_name = "__xpg_basename"] + pub fn posix_basename(path: *mut c_char) -> *mut c_char; + /// GNU version of `basename(3)`, defined in `string.h`. + #[link_name = "basename"] + pub fn gnu_basename(path: *const c_char) -> *mut c_char; + pub fn dlmopen(lmid: Lmid_t, filename: *const c_char, flag: c_int) -> *mut c_void; + pub fn dlinfo(handle: *mut c_void, request: c_int, info: *mut c_void) -> c_int; + pub fn dladdr1( + addr: *const c_void, + info: *mut crate::Dl_info, + extra_info: *mut *mut c_void, + flags: c_int, + ) -> c_int; + pub fn dlvsym( + handle: *mut c_void, + symbol: *const c_char, + version: *const c_char, + ) -> *mut c_void; + pub fn malloc_trim(__pad: size_t) -> c_int; + pub fn gnu_get_libc_release() -> *const c_char; + pub fn gnu_get_libc_version() -> *const c_char; + + // posix/spawn.h + // Added in `glibc` 2.29 + pub fn posix_spawn_file_actions_addchdir_np( + actions: *mut crate::posix_spawn_file_actions_t, + path: *const c_char, + ) -> c_int; + // Added in `glibc` 2.29 + pub fn posix_spawn_file_actions_addfchdir_np( + actions: *mut crate::posix_spawn_file_actions_t, + fd: c_int, + ) -> c_int; + // Added in `glibc` 2.34 + pub fn posix_spawn_file_actions_addclosefrom_np( + actions: *mut crate::posix_spawn_file_actions_t, + from: c_int, + ) -> c_int; + // Added in `glibc` 2.35 + pub fn posix_spawn_file_actions_addtcsetpgrp_np( + actions: *mut crate::posix_spawn_file_actions_t, + tcfd: c_int, + ) -> c_int; + + // mntent.h + pub fn getmntent_r( + stream: *mut crate::FILE, + mntbuf: *mut crate::mntent, + buf: *mut c_char, + buflen: c_int, + ) -> *mut crate::mntent; + + pub fn execveat( + dirfd: c_int, + pathname: *const c_char, + argv: *const *mut c_char, + envp: *const *mut c_char, + flags: c_int, + ) -> c_int; + + // Added in `glibc` 2.34 + pub fn close_range(first: c_uint, last: c_uint, flags: c_int) -> c_int; + + pub fn mq_notify(mqdes: crate::mqd_t, sevp: *const crate::sigevent) -> c_int; + + #[cfg_attr(gnu_time_bits64, link_name = "__epoll_pwait2_time64")] + pub fn epoll_pwait2( + epfd: c_int, + events: *mut crate::epoll_event, + maxevents: c_int, + timeout: *const crate::timespec, + sigmask: *const crate::sigset_t, + ) -> c_int; + + pub fn mempcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; +} + +cfg_if! { + if #[cfg(any( + target_arch = "x86", + target_arch = "arm", + target_arch = "m68k", + target_arch = "csky", + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "powerpc", + target_arch = "sparc", + target_arch = "riscv32" + ))] { + mod b32; + pub use self::b32::*; + } else if #[cfg(any( + target_arch = "x86_64", + target_arch = "aarch64", + target_arch = "powerpc64", + target_arch = "mips64", + target_arch = "mips64r6", + target_arch = "s390x", + target_arch = "sparc64", + target_arch = "riscv64", + target_arch = "loongarch64" + ))] { + mod b64; + pub use self::b64::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/unix/linux_like/linux/mod.rs b/vendor/libc/src/unix/linux_like/linux/mod.rs new file mode 100644 index 00000000000000..14401077479ed4 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/mod.rs @@ -0,0 +1,6830 @@ +//! Linux-specific definitions for linux-like values + +use crate::prelude::*; +use crate::{sock_filter, _IO, _IOR, _IOW, _IOWR}; + +pub type useconds_t = u32; +pub type dev_t = u64; +pub type socklen_t = u32; +pub type mode_t = u32; +pub type ino64_t = u64; +pub type off64_t = i64; +pub type blkcnt64_t = i64; +pub type rlim64_t = u64; +pub type mqd_t = c_int; +pub type nfds_t = c_ulong; +pub type nl_item = c_int; +pub type idtype_t = c_uint; +pub type loff_t = c_longlong; +pub type pthread_key_t = c_uint; +pub type pthread_once_t = c_int; +pub type pthread_spinlock_t = c_int; +pub type __kernel_fsid_t = __c_anonymous__kernel_fsid_t; +pub type __kernel_clockid_t = c_int; + +pub type __u8 = c_uchar; +pub type __u16 = c_ushort; +pub type __s16 = c_short; +pub type __u32 = c_uint; +pub type __s32 = c_int; + +pub type Elf32_Half = u16; +pub type Elf32_Word = u32; +pub type Elf32_Off = u32; +pub type Elf32_Addr = u32; +pub type Elf32_Xword = u64; +pub type Elf32_Sword = i32; + +pub type Elf64_Half = u16; +pub type Elf64_Word = u32; +pub type Elf64_Off = u64; +pub type Elf64_Addr = u64; +pub type Elf64_Xword = u64; +pub type Elf64_Sxword = i64; +pub type Elf64_Sword = i32; + +pub type Elf32_Section = u16; +pub type Elf64_Section = u16; + +pub type Elf32_Relr = Elf32_Word; +pub type Elf64_Relr = Elf32_Xword; +pub type Elf32_Rel = __c_anonymous_elf32_rel; +pub type Elf64_Rel = __c_anonymous_elf64_rel; + +cfg_if! { + if #[cfg(not(target_arch = "sparc64"))] { + pub type Elf32_Rela = __c_anonymous_elf32_rela; + pub type Elf64_Rela = __c_anonymous_elf64_rela; + } +} + +pub type iconv_t = *mut c_void; + +// linux/sctp.h +pub type sctp_assoc_t = __s32; + +pub type eventfd_t = u64; + +cfg_if! { + if #[cfg(not(target_env = "gnu"))] { + missing! { + #[derive(Debug)] + pub enum fpos64_t {} // FIXME(linux): fill this out with a struct + } + } +} + +e! { + #[repr(u32)] + pub enum tpacket_versions { + TPACKET_V1, + TPACKET_V2, + TPACKET_V3, + } +} + +c_enum! { + pub enum pid_type { + PIDTYPE_PID, + PIDTYPE_TGID, + PIDTYPE_PGID, + PIDTYPE_SID, + PIDTYPE_MAX, + } +} + +s! { + pub struct glob_t { + pub gl_pathc: size_t, + pub gl_pathv: *mut *mut c_char, + pub gl_offs: size_t, + pub gl_flags: c_int, + + __unused1: *mut c_void, + __unused2: *mut c_void, + __unused3: *mut c_void, + __unused4: *mut c_void, + __unused5: *mut c_void, + } + + pub struct passwd { + pub pw_name: *mut c_char, + pub pw_passwd: *mut c_char, + pub pw_uid: crate::uid_t, + pub pw_gid: crate::gid_t, + pub pw_gecos: *mut c_char, + pub pw_dir: *mut c_char, + pub pw_shell: *mut c_char, + } + + pub struct spwd { + pub sp_namp: *mut c_char, + pub sp_pwdp: *mut c_char, + pub sp_lstchg: c_long, + pub sp_min: c_long, + pub sp_max: c_long, + pub sp_warn: c_long, + pub sp_inact: c_long, + pub sp_expire: c_long, + pub sp_flag: c_ulong, + } + + pub struct dqblk { + pub dqb_bhardlimit: u64, + pub dqb_bsoftlimit: u64, + pub dqb_curspace: u64, + pub dqb_ihardlimit: u64, + pub dqb_isoftlimit: u64, + pub dqb_curinodes: u64, + pub dqb_btime: u64, + pub dqb_itime: u64, + pub dqb_valid: u32, + } + + pub struct signalfd_siginfo { + pub ssi_signo: u32, + pub ssi_errno: i32, + pub ssi_code: i32, + pub ssi_pid: u32, + pub ssi_uid: u32, + pub ssi_fd: i32, + pub ssi_tid: u32, + pub ssi_band: u32, + pub ssi_overrun: u32, + pub ssi_trapno: u32, + pub ssi_status: i32, + pub ssi_int: i32, + pub ssi_ptr: u64, + pub ssi_utime: u64, + pub ssi_stime: u64, + pub ssi_addr: u64, + pub ssi_addr_lsb: u16, + _pad2: u16, + pub ssi_syscall: i32, + pub ssi_call_addr: u64, + pub ssi_arch: u32, + _pad: [u8; 28], + } + + pub struct itimerspec { + pub it_interval: crate::timespec, + pub it_value: crate::timespec, + } + + pub struct fsid_t { + __val: [c_int; 2], + } + + pub struct fanout_args { + #[cfg(target_endian = "little")] + pub id: __u16, + pub type_flags: __u16, + #[cfg(target_endian = "big")] + pub id: __u16, + pub max_num_members: __u32, + } + + pub struct packet_mreq { + pub mr_ifindex: c_int, + pub mr_type: c_ushort, + pub mr_alen: c_ushort, + pub mr_address: [c_uchar; 8], + } + + #[deprecated(since = "0.2.70", note = "sockaddr_ll type must be used instead")] + pub struct sockaddr_pkt { + pub spkt_family: c_ushort, + pub spkt_device: [c_uchar; 14], + pub spkt_protocol: c_ushort, + } + + pub struct tpacket_auxdata { + pub tp_status: __u32, + pub tp_len: __u32, + pub tp_snaplen: __u32, + pub tp_mac: __u16, + pub tp_net: __u16, + pub tp_vlan_tci: __u16, + pub tp_vlan_tpid: __u16, + } + + pub struct tpacket_hdr { + pub tp_status: c_ulong, + pub tp_len: c_uint, + pub tp_snaplen: c_uint, + pub tp_mac: c_ushort, + pub tp_net: c_ushort, + pub tp_sec: c_uint, + pub tp_usec: c_uint, + } + + pub struct tpacket_hdr_variant1 { + pub tp_rxhash: __u32, + pub tp_vlan_tci: __u32, + pub tp_vlan_tpid: __u16, + pub tp_padding: __u16, + } + + pub struct tpacket2_hdr { + pub tp_status: __u32, + pub tp_len: __u32, + pub tp_snaplen: __u32, + pub tp_mac: __u16, + pub tp_net: __u16, + pub tp_sec: __u32, + pub tp_nsec: __u32, + pub tp_vlan_tci: __u16, + pub tp_vlan_tpid: __u16, + pub tp_padding: [__u8; 4], + } + + pub struct tpacket_req { + pub tp_block_size: c_uint, + pub tp_block_nr: c_uint, + pub tp_frame_size: c_uint, + pub tp_frame_nr: c_uint, + } + + pub struct tpacket_req3 { + pub tp_block_size: c_uint, + pub tp_block_nr: c_uint, + pub tp_frame_size: c_uint, + pub tp_frame_nr: c_uint, + pub tp_retire_blk_tov: c_uint, + pub tp_sizeof_priv: c_uint, + pub tp_feature_req_word: c_uint, + } + + #[repr(align(8))] + pub struct tpacket_rollover_stats { + pub tp_all: crate::__u64, + pub tp_huge: crate::__u64, + pub tp_failed: crate::__u64, + } + + pub struct tpacket_stats { + pub tp_packets: c_uint, + pub tp_drops: c_uint, + } + + pub struct tpacket_stats_v3 { + pub tp_packets: c_uint, + pub tp_drops: c_uint, + pub tp_freeze_q_cnt: c_uint, + } + + pub struct tpacket3_hdr { + pub tp_next_offset: __u32, + pub tp_sec: __u32, + pub tp_nsec: __u32, + pub tp_snaplen: __u32, + pub tp_len: __u32, + pub tp_status: __u32, + pub tp_mac: __u16, + pub tp_net: __u16, + pub hv1: crate::tpacket_hdr_variant1, + pub tp_padding: [__u8; 8], + } + + pub struct tpacket_bd_ts { + pub ts_sec: c_uint, + pub ts_usec: c_uint, + } + + #[repr(align(8))] + pub struct tpacket_hdr_v1 { + pub block_status: __u32, + pub num_pkts: __u32, + pub offset_to_first_pkt: __u32, + pub blk_len: __u32, + pub seq_num: crate::__u64, + pub ts_first_pkt: crate::tpacket_bd_ts, + pub ts_last_pkt: crate::tpacket_bd_ts, + } + + pub struct cpu_set_t { + #[cfg(all(target_pointer_width = "32", not(target_arch = "x86_64")))] + bits: [u32; 32], + #[cfg(not(all(target_pointer_width = "32", not(target_arch = "x86_64"))))] + bits: [u64; 16], + } + + pub struct if_nameindex { + pub if_index: c_uint, + pub if_name: *mut c_char, + } + + // System V IPC + pub struct msginfo { + pub msgpool: c_int, + pub msgmap: c_int, + pub msgmax: c_int, + pub msgmnb: c_int, + pub msgmni: c_int, + pub msgssz: c_int, + pub msgtql: c_int, + pub msgseg: c_ushort, + } + + pub struct sembuf { + pub sem_num: c_ushort, + pub sem_op: c_short, + pub sem_flg: c_short, + } + + pub struct input_event { + // FIXME(1.0): Change to the commented variant, see https://github.com/rust-lang/libc/pull/4148#discussion_r1857511742 + #[cfg(any(target_pointer_width = "64", not(linux_time_bits64)))] + pub time: crate::timeval, + // #[cfg(any(target_pointer_width = "64", not(linux_time_bits64)))] + // pub input_event_sec: time_t, + // #[cfg(any(target_pointer_width = "64", not(linux_time_bits64)))] + // pub input_event_usec: suseconds_t, + // #[cfg(target_arch = "sparc64")] + // _pad1: c_int, + #[cfg(all(target_pointer_width = "32", linux_time_bits64))] + pub input_event_sec: c_ulong, + + #[cfg(all(target_pointer_width = "32", linux_time_bits64))] + pub input_event_usec: c_ulong, + + pub type_: __u16, + pub code: __u16, + pub value: __s32, + } + + pub struct input_id { + pub bustype: __u16, + pub vendor: __u16, + pub product: __u16, + pub version: __u16, + } + + pub struct input_absinfo { + pub value: __s32, + pub minimum: __s32, + pub maximum: __s32, + pub fuzz: __s32, + pub flat: __s32, + pub resolution: __s32, + } + + pub struct input_keymap_entry { + pub flags: __u8, + pub len: __u8, + pub index: __u16, + pub keycode: __u32, + pub scancode: [__u8; 32], + } + + pub struct input_mask { + pub type_: __u32, + pub codes_size: __u32, + pub codes_ptr: crate::__u64, + } + + pub struct ff_replay { + pub length: __u16, + pub delay: __u16, + } + + pub struct ff_trigger { + pub button: __u16, + pub interval: __u16, + } + + pub struct ff_envelope { + pub attack_length: __u16, + pub attack_level: __u16, + pub fade_length: __u16, + pub fade_level: __u16, + } + + pub struct ff_constant_effect { + pub level: __s16, + pub envelope: ff_envelope, + } + + pub struct ff_ramp_effect { + pub start_level: __s16, + pub end_level: __s16, + pub envelope: ff_envelope, + } + + pub struct ff_condition_effect { + pub right_saturation: __u16, + pub left_saturation: __u16, + + pub right_coeff: __s16, + pub left_coeff: __s16, + + pub deadband: __u16, + pub center: __s16, + } + + pub struct ff_periodic_effect { + pub waveform: __u16, + pub period: __u16, + pub magnitude: __s16, + pub offset: __s16, + pub phase: __u16, + + pub envelope: ff_envelope, + + pub custom_len: __u32, + pub custom_data: *mut __s16, + } + + pub struct ff_rumble_effect { + pub strong_magnitude: __u16, + pub weak_magnitude: __u16, + } + + pub struct ff_effect { + pub type_: __u16, + pub id: __s16, + pub direction: __u16, + pub trigger: ff_trigger, + pub replay: ff_replay, + // FIXME(1.0): this is actually a union + #[cfg(target_pointer_width = "64")] + pub u: [u64; 4], + #[cfg(target_pointer_width = "32")] + pub u: [u32; 7], + } + + pub struct uinput_ff_upload { + pub request_id: __u32, + pub retval: __s32, + pub effect: ff_effect, + pub old: ff_effect, + } + + pub struct uinput_ff_erase { + pub request_id: __u32, + pub retval: __s32, + pub effect_id: __u32, + } + + pub struct uinput_abs_setup { + pub code: __u16, + pub absinfo: input_absinfo, + } + + pub struct dl_phdr_info { + #[cfg(target_pointer_width = "64")] + pub dlpi_addr: Elf64_Addr, + #[cfg(target_pointer_width = "32")] + pub dlpi_addr: Elf32_Addr, + + pub dlpi_name: *const c_char, + + #[cfg(target_pointer_width = "64")] + pub dlpi_phdr: *const Elf64_Phdr, + #[cfg(target_pointer_width = "32")] + pub dlpi_phdr: *const Elf32_Phdr, + + #[cfg(target_pointer_width = "64")] + pub dlpi_phnum: Elf64_Half, + #[cfg(target_pointer_width = "32")] + pub dlpi_phnum: Elf32_Half, + + // As of uClibc 1.0.36, the following fields are + // gated behind a "#if 0" block which always evaluates + // to false. So I'm just removing these, and if uClibc changes + // the #if block in the future to include the following fields, these + // will probably need including here. tsidea, skrap + // QNX (NTO) platform does not define these fields + #[cfg(not(any(target_env = "uclibc", target_os = "nto")))] + pub dlpi_adds: c_ulonglong, + #[cfg(not(any(target_env = "uclibc", target_os = "nto")))] + pub dlpi_subs: c_ulonglong, + #[cfg(not(any(target_env = "uclibc", target_os = "nto")))] + pub dlpi_tls_modid: size_t, + #[cfg(not(any(target_env = "uclibc", target_os = "nto")))] + pub dlpi_tls_data: *mut c_void, + } + + pub struct Elf32_Ehdr { + pub e_ident: [c_uchar; 16], + pub e_type: Elf32_Half, + pub e_machine: Elf32_Half, + pub e_version: Elf32_Word, + pub e_entry: Elf32_Addr, + pub e_phoff: Elf32_Off, + pub e_shoff: Elf32_Off, + pub e_flags: Elf32_Word, + pub e_ehsize: Elf32_Half, + pub e_phentsize: Elf32_Half, + pub e_phnum: Elf32_Half, + pub e_shentsize: Elf32_Half, + pub e_shnum: Elf32_Half, + pub e_shstrndx: Elf32_Half, + } + + pub struct Elf64_Ehdr { + pub e_ident: [c_uchar; 16], + pub e_type: Elf64_Half, + pub e_machine: Elf64_Half, + pub e_version: Elf64_Word, + pub e_entry: Elf64_Addr, + pub e_phoff: Elf64_Off, + pub e_shoff: Elf64_Off, + pub e_flags: Elf64_Word, + pub e_ehsize: Elf64_Half, + pub e_phentsize: Elf64_Half, + pub e_phnum: Elf64_Half, + pub e_shentsize: Elf64_Half, + pub e_shnum: Elf64_Half, + pub e_shstrndx: Elf64_Half, + } + + pub struct Elf32_Sym { + pub st_name: Elf32_Word, + pub st_value: Elf32_Addr, + pub st_size: Elf32_Word, + pub st_info: c_uchar, + pub st_other: c_uchar, + pub st_shndx: Elf32_Section, + } + + pub struct Elf64_Sym { + pub st_name: Elf64_Word, + pub st_info: c_uchar, + pub st_other: c_uchar, + pub st_shndx: Elf64_Section, + pub st_value: Elf64_Addr, + pub st_size: Elf64_Xword, + } + + pub struct Elf32_Phdr { + pub p_type: Elf32_Word, + pub p_offset: Elf32_Off, + pub p_vaddr: Elf32_Addr, + pub p_paddr: Elf32_Addr, + pub p_filesz: Elf32_Word, + pub p_memsz: Elf32_Word, + pub p_flags: Elf32_Word, + pub p_align: Elf32_Word, + } + + pub struct Elf64_Phdr { + pub p_type: Elf64_Word, + pub p_flags: Elf64_Word, + pub p_offset: Elf64_Off, + pub p_vaddr: Elf64_Addr, + pub p_paddr: Elf64_Addr, + pub p_filesz: Elf64_Xword, + pub p_memsz: Elf64_Xword, + pub p_align: Elf64_Xword, + } + + pub struct Elf32_Shdr { + pub sh_name: Elf32_Word, + pub sh_type: Elf32_Word, + pub sh_flags: Elf32_Word, + pub sh_addr: Elf32_Addr, + pub sh_offset: Elf32_Off, + pub sh_size: Elf32_Word, + pub sh_link: Elf32_Word, + pub sh_info: Elf32_Word, + pub sh_addralign: Elf32_Word, + pub sh_entsize: Elf32_Word, + } + + pub struct Elf64_Shdr { + pub sh_name: Elf64_Word, + pub sh_type: Elf64_Word, + pub sh_flags: Elf64_Xword, + pub sh_addr: Elf64_Addr, + pub sh_offset: Elf64_Off, + pub sh_size: Elf64_Xword, + pub sh_link: Elf64_Word, + pub sh_info: Elf64_Word, + pub sh_addralign: Elf64_Xword, + pub sh_entsize: Elf64_Xword, + } + + pub struct __c_anonymous_elf32_rel { + pub r_offset: Elf32_Addr, + pub r_info: Elf32_Word, + } + + pub struct __c_anonymous_elf64_rel { + pub r_offset: Elf64_Addr, + pub r_info: Elf64_Xword, + } + + pub struct __c_anonymous__kernel_fsid_t { + pub val: [c_int; 2], + } + + pub struct ucred { + pub pid: crate::pid_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + } + + pub struct mntent { + pub mnt_fsname: *mut c_char, + pub mnt_dir: *mut c_char, + pub mnt_type: *mut c_char, + pub mnt_opts: *mut c_char, + pub mnt_freq: c_int, + pub mnt_passno: c_int, + } + + pub struct posix_spawn_file_actions_t { + __allocated: c_int, + __used: c_int, + __actions: *mut c_int, + __pad: [c_int; 16], + } + + pub struct posix_spawnattr_t { + __flags: c_short, + __pgrp: crate::pid_t, + __sd: crate::sigset_t, + __ss: crate::sigset_t, + #[cfg(any(target_env = "musl", target_env = "ohos"))] + __prio: c_int, + #[cfg(not(any(target_env = "musl", target_env = "ohos")))] + __sp: crate::sched_param, + __policy: c_int, + __pad: [c_int; 16], + } + + pub struct genlmsghdr { + pub cmd: u8, + pub version: u8, + pub reserved: u16, + } + + pub struct in6_pktinfo { + pub ipi6_addr: crate::in6_addr, + pub ipi6_ifindex: c_uint, + } + + pub struct arpd_request { + pub req: c_ushort, + pub ip: u32, + pub dev: c_ulong, + pub stamp: c_ulong, + pub updated: c_ulong, + pub ha: [c_uchar; crate::MAX_ADDR_LEN], + } + + pub struct inotify_event { + pub wd: c_int, + pub mask: u32, + pub cookie: u32, + pub len: u32, + } + + pub struct fanotify_response { + pub fd: c_int, + pub response: __u32, + } + + pub struct fanotify_event_info_header { + pub info_type: __u8, + pub pad: __u8, + pub len: __u16, + } + + pub struct fanotify_event_info_fid { + pub hdr: fanotify_event_info_header, + pub fsid: crate::__kernel_fsid_t, + pub handle: [c_uchar; 0], + } + + pub struct sockaddr_vm { + pub svm_family: crate::sa_family_t, + pub svm_reserved1: c_ushort, + pub svm_port: c_uint, + pub svm_cid: c_uint, + pub svm_zero: [u8; 4], + } + + pub struct regmatch_t { + pub rm_so: regoff_t, + pub rm_eo: regoff_t, + } + + pub struct sock_extended_err { + pub ee_errno: u32, + pub ee_origin: u8, + pub ee_type: u8, + pub ee_code: u8, + pub ee_pad: u8, + pub ee_info: u32, + pub ee_data: u32, + } + + // linux/seccomp.h + pub struct seccomp_data { + pub nr: c_int, + pub arch: __u32, + pub instruction_pointer: crate::__u64, + pub args: [crate::__u64; 6], + } + + pub struct seccomp_notif_sizes { + pub seccomp_notif: __u16, + pub seccomp_notif_resp: __u16, + pub seccomp_data: __u16, + } + + pub struct seccomp_notif { + pub id: crate::__u64, + pub pid: __u32, + pub flags: __u32, + pub data: seccomp_data, + } + + pub struct seccomp_notif_resp { + pub id: crate::__u64, + pub val: crate::__s64, + pub error: __s32, + pub flags: __u32, + } + + pub struct seccomp_notif_addfd { + pub id: crate::__u64, + pub flags: __u32, + pub srcfd: __u32, + pub newfd: __u32, + pub newfd_flags: __u32, + } + + pub struct nlmsghdr { + pub nlmsg_len: u32, + pub nlmsg_type: u16, + pub nlmsg_flags: u16, + pub nlmsg_seq: u32, + pub nlmsg_pid: u32, + } + + pub struct nlmsgerr { + pub error: c_int, + pub msg: nlmsghdr, + } + + pub struct nlattr { + pub nla_len: u16, + pub nla_type: u16, + } + + pub struct __c_anonymous_ifru_map { + pub mem_start: c_ulong, + pub mem_end: c_ulong, + pub base_addr: c_ushort, + pub irq: c_uchar, + pub dma: c_uchar, + pub port: c_uchar, + } + + pub struct in6_ifreq { + pub ifr6_addr: crate::in6_addr, + pub ifr6_prefixlen: u32, + pub ifr6_ifindex: c_int, + } + + pub struct option { + pub name: *const c_char, + pub has_arg: c_int, + pub flag: *mut c_int, + pub val: c_int, + } + + // linux/openat2.h + #[non_exhaustive] + pub struct open_how { + pub flags: crate::__u64, + pub mode: crate::__u64, + pub resolve: crate::__u64, + } + + // linux/ptp_clock.h + pub struct ptp_clock_time { + pub sec: crate::__s64, + pub nsec: __u32, + pub reserved: __u32, + } + + pub struct ptp_extts_request { + pub index: c_uint, + pub flags: c_uint, + pub rsv: [c_uint; 2], + } + + pub struct ptp_sys_offset_extended { + pub n_samples: c_uint, + pub clockid: __kernel_clockid_t, + pub rsv: [c_uint; 2], + pub ts: [[ptp_clock_time; 3]; PTP_MAX_SAMPLES as usize], + } + + pub struct ptp_sys_offset_precise { + pub device: ptp_clock_time, + pub sys_realtime: ptp_clock_time, + pub sys_monoraw: ptp_clock_time, + pub rsv: [c_uint; 4], + } + + pub struct ptp_extts_event { + pub t: ptp_clock_time, + index: c_uint, + flags: c_uint, + rsv: [c_uint; 2], + } + + // linux/sctp.h + + pub struct sctp_initmsg { + pub sinit_num_ostreams: __u16, + pub sinit_max_instreams: __u16, + pub sinit_max_attempts: __u16, + pub sinit_max_init_timeo: __u16, + } + + pub struct sctp_sndrcvinfo { + pub sinfo_stream: __u16, + pub sinfo_ssn: __u16, + pub sinfo_flags: __u16, + pub sinfo_ppid: __u32, + pub sinfo_context: __u32, + pub sinfo_timetolive: __u32, + pub sinfo_tsn: __u32, + pub sinfo_cumtsn: __u32, + pub sinfo_assoc_id: crate::sctp_assoc_t, + } + + pub struct sctp_sndinfo { + pub snd_sid: __u16, + pub snd_flags: __u16, + pub snd_ppid: __u32, + pub snd_context: __u32, + pub snd_assoc_id: crate::sctp_assoc_t, + } + + pub struct sctp_rcvinfo { + pub rcv_sid: __u16, + pub rcv_ssn: __u16, + pub rcv_flags: __u16, + pub rcv_ppid: __u32, + pub rcv_tsn: __u32, + pub rcv_cumtsn: __u32, + pub rcv_context: __u32, + pub rcv_assoc_id: crate::sctp_assoc_t, + } + + pub struct sctp_nxtinfo { + pub nxt_sid: __u16, + pub nxt_flags: __u16, + pub nxt_ppid: __u32, + pub nxt_length: __u32, + pub nxt_assoc_id: crate::sctp_assoc_t, + } + + pub struct sctp_prinfo { + pub pr_policy: __u16, + pub pr_value: __u32, + } + + pub struct sctp_authinfo { + pub auth_keynumber: __u16, + } + + pub struct rlimit64 { + pub rlim_cur: rlim64_t, + pub rlim_max: rlim64_t, + } + + // linux/tls.h + + pub struct tls_crypto_info { + pub version: __u16, + pub cipher_type: __u16, + } + + pub struct tls12_crypto_info_aes_gcm_128 { + pub info: tls_crypto_info, + pub iv: [c_uchar; TLS_CIPHER_AES_GCM_128_IV_SIZE], + pub key: [c_uchar; TLS_CIPHER_AES_GCM_128_KEY_SIZE], + pub salt: [c_uchar; TLS_CIPHER_AES_GCM_128_SALT_SIZE], + pub rec_seq: [c_uchar; TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE], + } + + pub struct tls12_crypto_info_aes_gcm_256 { + pub info: tls_crypto_info, + pub iv: [c_uchar; TLS_CIPHER_AES_GCM_256_IV_SIZE], + pub key: [c_uchar; TLS_CIPHER_AES_GCM_256_KEY_SIZE], + pub salt: [c_uchar; TLS_CIPHER_AES_GCM_256_SALT_SIZE], + pub rec_seq: [c_uchar; TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE], + } + + pub struct tls12_crypto_info_aes_ccm_128 { + pub info: tls_crypto_info, + pub iv: [c_uchar; TLS_CIPHER_AES_CCM_128_IV_SIZE], + pub key: [c_uchar; TLS_CIPHER_AES_CCM_128_KEY_SIZE], + pub salt: [c_uchar; TLS_CIPHER_AES_CCM_128_SALT_SIZE], + pub rec_seq: [c_uchar; TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE], + } + + pub struct tls12_crypto_info_chacha20_poly1305 { + pub info: tls_crypto_info, + pub iv: [c_uchar; TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE], + pub key: [c_uchar; TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE], + pub salt: [c_uchar; TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE], + pub rec_seq: [c_uchar; TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE], + } + + pub struct tls12_crypto_info_sm4_gcm { + pub info: tls_crypto_info, + pub iv: [c_uchar; TLS_CIPHER_SM4_GCM_IV_SIZE], + pub key: [c_uchar; TLS_CIPHER_SM4_GCM_KEY_SIZE], + pub salt: [c_uchar; TLS_CIPHER_SM4_GCM_SALT_SIZE], + pub rec_seq: [c_uchar; TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE], + } + + pub struct tls12_crypto_info_sm4_ccm { + pub info: tls_crypto_info, + pub iv: [c_uchar; TLS_CIPHER_SM4_CCM_IV_SIZE], + pub key: [c_uchar; TLS_CIPHER_SM4_CCM_KEY_SIZE], + pub salt: [c_uchar; TLS_CIPHER_SM4_CCM_SALT_SIZE], + pub rec_seq: [c_uchar; TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE], + } + + pub struct tls12_crypto_info_aria_gcm_128 { + pub info: tls_crypto_info, + pub iv: [c_uchar; TLS_CIPHER_ARIA_GCM_128_IV_SIZE], + pub key: [c_uchar; TLS_CIPHER_ARIA_GCM_128_KEY_SIZE], + pub salt: [c_uchar; TLS_CIPHER_ARIA_GCM_128_SALT_SIZE], + pub rec_seq: [c_uchar; TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE], + } + + pub struct tls12_crypto_info_aria_gcm_256 { + pub info: tls_crypto_info, + pub iv: [c_uchar; TLS_CIPHER_ARIA_GCM_256_IV_SIZE], + pub key: [c_uchar; TLS_CIPHER_ARIA_GCM_256_KEY_SIZE], + pub salt: [c_uchar; TLS_CIPHER_ARIA_GCM_256_SALT_SIZE], + pub rec_seq: [c_uchar; TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE], + } + + // linux/wireless.h + + pub struct iw_param { + pub value: __s32, + pub fixed: __u8, + pub disabled: __u8, + pub flags: __u16, + } + + pub struct iw_point { + pub pointer: *mut c_void, + pub length: __u16, + pub flags: __u16, + } + + pub struct iw_freq { + pub m: __s32, + pub e: __s16, + pub i: __u8, + pub flags: __u8, + } + + pub struct iw_quality { + pub qual: __u8, + pub level: __u8, + pub noise: __u8, + pub updated: __u8, + } + + pub struct iw_discarded { + pub nwid: __u32, + pub code: __u32, + pub fragment: __u32, + pub retries: __u32, + pubmisc: __u32, + } + + pub struct iw_missed { + pub beacon: __u32, + } + + pub struct iw_scan_req { + pub scan_type: __u8, + pub essid_len: __u8, + pub num_channels: __u8, + pub flags: __u8, + pub bssid: crate::sockaddr, + pub essid: [__u8; IW_ESSID_MAX_SIZE], + pub min_channel_time: __u32, + pub max_channel_time: __u32, + pub channel_list: [iw_freq; IW_MAX_FREQUENCIES], + } + + pub struct iw_encode_ext { + pub ext_flags: __u32, + pub tx_seq: [__u8; IW_ENCODE_SEQ_MAX_SIZE], + pub rx_seq: [__u8; IW_ENCODE_SEQ_MAX_SIZE], + pub addr: crate::sockaddr, + pub alg: __u16, + pub key_len: __u16, + pub key: [__u8; 0], + } + + pub struct iw_pmksa { + pub cmd: __u32, + pub bssid: crate::sockaddr, + pub pmkid: [__u8; IW_PMKID_LEN], + } + + pub struct iw_pmkid_cand { + pub flags: __u32, + pub index: __u32, + pub bssid: crate::sockaddr, + } + + pub struct iw_statistics { + pub status: __u16, + pub qual: iw_quality, + pub discard: iw_discarded, + pub miss: iw_missed, + } + + pub struct iw_range { + pub throughput: __u32, + pub min_nwid: __u32, + pub max_nwid: __u32, + pub old_num_channels: __u16, + pub old_num_frequency: __u8, + pub scan_capa: __u8, + pub event_capa: [__u32; 6], + pub sensitivity: __s32, + pub max_qual: iw_quality, + pub avg_qual: iw_quality, + pub num_bitrates: __u8, + pub bitrate: [__s32; IW_MAX_BITRATES], + pub min_rts: __s32, + pub max_rts: __s32, + pub min_frag: __s32, + pub max_frag: __s32, + pub min_pmp: __s32, + pub max_pmp: __s32, + pub min_pmt: __s32, + pub max_pmt: __s32, + pub pmp_flags: __u16, + pub pmt_flags: __u16, + pub pm_capa: __u16, + pub encoding_size: [__u16; IW_MAX_ENCODING_SIZES], + pub num_encoding_sizes: __u8, + pub max_encoding_tokens: __u8, + pub encoding_login_index: __u8, + pub txpower_capa: __u16, + pub num_txpower: __u8, + pub txpower: [__s32; IW_MAX_TXPOWER], + pub we_version_compiled: __u8, + pub we_version_source: __u8, + pub retry_capa: __u16, + pub retry_flags: __u16, + pub r_time_flags: __u16, + pub min_retry: __s32, + pub max_retry: __s32, + pub min_r_time: __s32, + pub max_r_time: __s32, + pub num_channels: __u16, + pub num_frequency: __u8, + pub freq: [iw_freq; IW_MAX_FREQUENCIES], + pub enc_capa: __u32, + } + + pub struct iw_priv_args { + pub cmd: __u32, + pub set_args: __u16, + pub get_args: __u16, + pub name: [c_char; crate::IFNAMSIZ], + } + + // #include + + pub struct epoll_params { + pub busy_poll_usecs: u32, + pub busy_poll_budget: u16, + pub prefer_busy_poll: u8, + pub __pad: u8, // Must be zero + } + + #[cfg_attr( + any( + target_pointer_width = "32", + target_arch = "x86_64", + target_arch = "powerpc64", + target_arch = "mips64", + target_arch = "mips64r6", + target_arch = "s390x", + target_arch = "sparc64", + target_arch = "aarch64", + target_arch = "riscv64", + target_arch = "riscv32", + target_arch = "loongarch64" + ), + repr(align(4)) + )] + #[cfg_attr( + not(any( + target_pointer_width = "32", + target_arch = "x86_64", + target_arch = "powerpc64", + target_arch = "mips64", + target_arch = "mips64r6", + target_arch = "s390x", + target_arch = "sparc64", + target_arch = "aarch64", + target_arch = "riscv64", + target_arch = "riscv32", + target_arch = "loongarch64" + )), + repr(align(8)) + )] + pub struct pthread_mutexattr_t { + #[doc(hidden)] + size: [u8; crate::__SIZEOF_PTHREAD_MUTEXATTR_T], + } + + #[cfg_attr( + any(target_env = "musl", target_env = "ohos", target_pointer_width = "32"), + repr(align(4)) + )] + #[cfg_attr( + all( + not(target_env = "musl"), + not(target_env = "ohos"), + target_pointer_width = "64" + ), + repr(align(8)) + )] + pub struct pthread_rwlockattr_t { + #[doc(hidden)] + size: [u8; crate::__SIZEOF_PTHREAD_RWLOCKATTR_T], + } + + #[repr(align(4))] + pub struct pthread_condattr_t { + #[doc(hidden)] + size: [u8; crate::__SIZEOF_PTHREAD_CONDATTR_T], + } + + #[repr(align(4))] + pub struct pthread_barrierattr_t { + #[doc(hidden)] + size: [u8; crate::__SIZEOF_PTHREAD_BARRIERATTR_T], + } + + #[cfg(not(target_env = "musl"))] + #[repr(align(8))] + pub struct fanotify_event_metadata { + pub event_len: __u32, + pub vers: __u8, + pub reserved: __u8, + pub metadata_len: __u16, + pub mask: __u64, + pub fd: c_int, + pub pid: c_int, + } + + // linux/ptp_clock.h + + pub struct ptp_sys_offset { + pub n_samples: c_uint, + pub rsv: [c_uint; 3], + // FIXME(garando): replace length with `2 * PTP_MAX_SAMPLES + 1` when supported + pub ts: [ptp_clock_time; 51], + } + + pub struct ptp_pin_desc { + pub name: [c_char; 64], + pub index: c_uint, + pub func: c_uint, + pub chan: c_uint, + pub rsv: [c_uint; 5], + } + + pub struct ptp_clock_caps { + pub max_adj: c_int, + pub n_alarm: c_int, + pub n_ext_ts: c_int, + pub n_per_out: c_int, + pub pps: c_int, + pub n_pins: c_int, + pub cross_timestamping: c_int, + pub adjust_phase: c_int, + pub max_phase_adj: c_int, + pub rsv: [c_int; 11], + } + + // linux/if_xdp.h + + pub struct sockaddr_xdp { + pub sxdp_family: crate::__u16, + pub sxdp_flags: crate::__u16, + pub sxdp_ifindex: crate::__u32, + pub sxdp_queue_id: crate::__u32, + pub sxdp_shared_umem_fd: crate::__u32, + } + + pub struct xdp_ring_offset { + pub producer: crate::__u64, + pub consumer: crate::__u64, + pub desc: crate::__u64, + pub flags: crate::__u64, + } + + pub struct xdp_mmap_offsets { + pub rx: xdp_ring_offset, + pub tx: xdp_ring_offset, + pub fr: xdp_ring_offset, + pub cr: xdp_ring_offset, + } + + pub struct xdp_ring_offset_v1 { + pub producer: crate::__u64, + pub consumer: crate::__u64, + pub desc: crate::__u64, + } + + pub struct xdp_mmap_offsets_v1 { + pub rx: xdp_ring_offset_v1, + pub tx: xdp_ring_offset_v1, + pub fr: xdp_ring_offset_v1, + pub cr: xdp_ring_offset_v1, + } + + pub struct xdp_umem_reg { + pub addr: crate::__u64, + pub len: crate::__u64, + pub chunk_size: crate::__u32, + pub headroom: crate::__u32, + pub flags: crate::__u32, + pub tx_metadata_len: crate::__u32, + } + + pub struct xdp_umem_reg_v1 { + pub addr: crate::__u64, + pub len: crate::__u64, + pub chunk_size: crate::__u32, + pub headroom: crate::__u32, + } + + pub struct xdp_statistics { + pub rx_dropped: crate::__u64, + pub rx_invalid_descs: crate::__u64, + pub tx_invalid_descs: crate::__u64, + pub rx_ring_full: crate::__u64, + pub rx_fill_ring_empty_descs: crate::__u64, + pub tx_ring_empty_descs: crate::__u64, + } + + pub struct xdp_statistics_v1 { + pub rx_dropped: crate::__u64, + pub rx_invalid_descs: crate::__u64, + pub tx_invalid_descs: crate::__u64, + } + + pub struct xdp_options { + pub flags: crate::__u32, + } + + pub struct xdp_desc { + pub addr: crate::__u64, + pub len: crate::__u32, + pub options: crate::__u32, + } + + pub struct xsk_tx_metadata_completion { + pub tx_timestamp: crate::__u64, + } + + pub struct xsk_tx_metadata_request { + pub csum_start: __u16, + pub csum_offset: __u16, + } + + // linux/mount.h + + pub struct mount_attr { + pub attr_set: crate::__u64, + pub attr_clr: crate::__u64, + pub propagation: crate::__u64, + pub userns_fd: crate::__u64, + } + + // linux/nsfs.h + pub struct mnt_ns_info { + pub size: crate::__u32, + pub nr_mounts: crate::__u32, + pub mnt_ns_id: crate::__u64, + } + + // linux/pidfd.h + + #[non_exhaustive] + pub struct pidfd_info { + pub mask: crate::__u64, + pub cgroupid: crate::__u64, + pub pid: crate::__u32, + pub tgid: crate::__u32, + pub ppid: crate::__u32, + pub ruid: crate::__u32, + pub rgid: crate::__u32, + pub euid: crate::__u32, + pub egid: crate::__u32, + pub suid: crate::__u32, + pub sgid: crate::__u32, + pub fsuid: crate::__u32, + pub fsgid: crate::__u32, + pub exit_code: crate::__s32, + } + + // linux/uio.h + + pub struct dmabuf_cmsg { + pub frag_offset: crate::__u64, + pub frag_size: crate::__u32, + pub frag_token: crate::__u32, + pub dmabuf_id: crate::__u32, + pub flags: crate::__u32, + } + + pub struct dmabuf_token { + pub token_start: crate::__u32, + pub token_count: crate::__u32, + } +} + +cfg_if! { + if #[cfg(not(target_arch = "sparc64"))] { + s! { + pub struct iw_thrspy { + pub addr: crate::sockaddr, + pub qual: iw_quality, + pub low: iw_quality, + pub high: iw_quality, + } + + pub struct iw_mlme { + pub cmd: __u16, + pub reason_code: __u16, + pub addr: crate::sockaddr, + } + + pub struct iw_michaelmicfailure { + pub flags: __u32, + pub src_addr: crate::sockaddr, + pub tsc: [__u8; IW_ENCODE_SEQ_MAX_SIZE], + } + + pub struct __c_anonymous_elf32_rela { + pub r_offset: Elf32_Addr, + pub r_info: Elf32_Word, + pub r_addend: Elf32_Sword, + } + + pub struct __c_anonymous_elf64_rela { + pub r_offset: Elf64_Addr, + pub r_info: Elf64_Xword, + pub r_addend: Elf64_Sxword, + } + } + } +} + +s_no_extra_traits! { + pub struct sockaddr_nl { + pub nl_family: crate::sa_family_t, + nl_pad: c_ushort, + pub nl_pid: u32, + pub nl_groups: u32, + } + + pub struct dirent { + pub d_ino: crate::ino_t, + pub d_off: off_t, + pub d_reclen: c_ushort, + pub d_type: c_uchar, + pub d_name: [c_char; 256], + } + + pub struct sockaddr_alg { + pub salg_family: crate::sa_family_t, + pub salg_type: [c_uchar; 14], + pub salg_feat: u32, + pub salg_mask: u32, + pub salg_name: [c_uchar; 64], + } + + pub struct uinput_setup { + pub id: input_id, + pub name: [c_char; UINPUT_MAX_NAME_SIZE], + pub ff_effects_max: __u32, + } + + pub struct uinput_user_dev { + pub name: [c_char; UINPUT_MAX_NAME_SIZE], + pub id: input_id, + pub ff_effects_max: __u32, + pub absmax: [__s32; ABS_CNT], + pub absmin: [__s32; ABS_CNT], + pub absfuzz: [__s32; ABS_CNT], + pub absflat: [__s32; ABS_CNT], + } + + /// WARNING: The `PartialEq`, `Eq` and `Hash` implementations of this + /// type are unsound and will be removed in the future. + #[deprecated( + note = "this struct has unsafe trait implementations that will be \ + removed in the future", + since = "0.2.80" + )] + pub struct af_alg_iv { + pub ivlen: u32, + pub iv: [c_uchar; 0], + } + + // x32 compatibility + // See https://sourceware.org/bugzilla/show_bug.cgi?id=21279 + pub struct mq_attr { + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub mq_flags: i64, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub mq_maxmsg: i64, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub mq_msgsize: i64, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub mq_curmsgs: i64, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pad: [i64; 4], + + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub mq_flags: c_long, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub mq_maxmsg: c_long, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub mq_msgsize: c_long, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub mq_curmsgs: c_long, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pad: [c_long; 4], + } + + pub union __c_anonymous_ifr_ifru { + pub ifru_addr: crate::sockaddr, + pub ifru_dstaddr: crate::sockaddr, + pub ifru_broadaddr: crate::sockaddr, + pub ifru_netmask: crate::sockaddr, + pub ifru_hwaddr: crate::sockaddr, + pub ifru_flags: c_short, + pub ifru_ifindex: c_int, + pub ifru_metric: c_int, + pub ifru_mtu: c_int, + pub ifru_map: __c_anonymous_ifru_map, + pub ifru_slave: [c_char; crate::IFNAMSIZ], + pub ifru_newname: [c_char; crate::IFNAMSIZ], + pub ifru_data: *mut c_char, + } + + pub struct ifreq { + /// interface name, e.g. "en0" + pub ifr_name: [c_char; crate::IFNAMSIZ], + pub ifr_ifru: __c_anonymous_ifr_ifru, + } + + pub union __c_anonymous_ifc_ifcu { + pub ifcu_buf: *mut c_char, + pub ifcu_req: *mut crate::ifreq, + } + + /// Structure used in SIOCGIFCONF request. Used to retrieve interface configuration for + /// machine (useful for programs which must know all networks accessible). + pub struct ifconf { + /// Size of buffer + pub ifc_len: c_int, + pub ifc_ifcu: __c_anonymous_ifc_ifcu, + } + + pub struct hwtstamp_config { + pub flags: c_int, + pub tx_type: c_int, + pub rx_filter: c_int, + } + + pub struct dirent64 { + pub d_ino: crate::ino64_t, + pub d_off: off64_t, + pub d_reclen: c_ushort, + pub d_type: c_uchar, + pub d_name: [c_char; 256], + } + + pub struct sched_attr { + pub size: __u32, + pub sched_policy: __u32, + pub sched_flags: crate::__u64, + pub sched_nice: __s32, + pub sched_priority: __u32, + pub sched_runtime: crate::__u64, + pub sched_deadline: crate::__u64, + pub sched_period: crate::__u64, + } + + pub union tpacket_req_u { + pub req: crate::tpacket_req, + pub req3: crate::tpacket_req3, + } + + pub union tpacket_bd_header_u { + pub bh1: crate::tpacket_hdr_v1, + } + + pub struct tpacket_block_desc { + pub version: __u32, + pub offset_to_priv: __u32, + pub hdr: crate::tpacket_bd_header_u, + } + + #[cfg_attr( + all( + any(target_env = "musl", target_env = "ohos"), + target_pointer_width = "32" + ), + repr(align(4)) + )] + #[cfg_attr( + all( + any(target_env = "musl", target_env = "ohos"), + target_pointer_width = "64" + ), + repr(align(8)) + )] + #[cfg_attr( + all( + not(any(target_env = "musl", target_env = "ohos")), + target_arch = "x86" + ), + repr(align(4)) + )] + #[cfg_attr( + all( + not(any(target_env = "musl", target_env = "ohos")), + not(target_arch = "x86") + ), + repr(align(8)) + )] + pub struct pthread_cond_t { + #[doc(hidden)] + size: [u8; crate::__SIZEOF_PTHREAD_COND_T], + } + + #[cfg_attr( + all( + target_pointer_width = "32", + any( + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "arm", + target_arch = "hexagon", + target_arch = "m68k", + target_arch = "csky", + target_arch = "powerpc", + target_arch = "sparc", + target_arch = "x86_64", + target_arch = "x86" + ) + ), + repr(align(4)) + )] + #[cfg_attr( + any( + target_pointer_width = "64", + not(any( + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "arm", + target_arch = "hexagon", + target_arch = "m68k", + target_arch = "csky", + target_arch = "powerpc", + target_arch = "sparc", + target_arch = "x86_64", + target_arch = "x86" + )) + ), + repr(align(8)) + )] + pub struct pthread_mutex_t { + #[doc(hidden)] + size: [u8; crate::__SIZEOF_PTHREAD_MUTEX_T], + } + + #[cfg_attr( + all( + target_pointer_width = "32", + any( + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "arm", + target_arch = "hexagon", + target_arch = "m68k", + target_arch = "csky", + target_arch = "powerpc", + target_arch = "sparc", + target_arch = "x86_64", + target_arch = "x86" + ) + ), + repr(align(4)) + )] + #[cfg_attr( + any( + target_pointer_width = "64", + not(any( + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "arm", + target_arch = "hexagon", + target_arch = "m68k", + target_arch = "powerpc", + target_arch = "sparc", + target_arch = "x86_64", + target_arch = "x86" + )) + ), + repr(align(8)) + )] + pub struct pthread_rwlock_t { + size: [u8; crate::__SIZEOF_PTHREAD_RWLOCK_T], + } + + #[cfg_attr( + all( + target_pointer_width = "32", + any( + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "arm", + target_arch = "hexagon", + target_arch = "m68k", + target_arch = "csky", + target_arch = "powerpc", + target_arch = "sparc", + target_arch = "x86_64", + target_arch = "x86" + ) + ), + repr(align(4)) + )] + #[cfg_attr( + any( + target_pointer_width = "64", + not(any( + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "arm", + target_arch = "hexagon", + target_arch = "m68k", + target_arch = "csky", + target_arch = "powerpc", + target_arch = "sparc", + target_arch = "x86_64", + target_arch = "x86" + )) + ), + repr(align(8)) + )] + pub struct pthread_barrier_t { + size: [u8; crate::__SIZEOF_PTHREAD_BARRIER_T], + } + + // linux/net_tstamp.h + pub struct sock_txtime { + pub clockid: crate::clockid_t, + pub flags: __u32, + } + + // linux/wireless.h + pub union iwreq_data { + pub name: [c_char; crate::IFNAMSIZ], + pub essid: iw_point, + pub nwid: iw_param, + pub freq: iw_freq, + pub sens: iw_param, + pub bitrate: iw_param, + pub txpower: iw_param, + pub rts: iw_param, + pub frag: iw_param, + pub mode: __u32, + pub retry: iw_param, + pub encoding: iw_point, + pub power: iw_param, + pub qual: iw_quality, + pub ap_addr: crate::sockaddr, + pub addr: crate::sockaddr, + pub param: iw_param, + pub data: iw_point, + } + + pub struct iw_event { + pub len: __u16, + pub cmd: __u16, + pub u: iwreq_data, + } + + pub union __c_anonymous_iwreq { + pub ifrn_name: [c_char; crate::IFNAMSIZ], + } + + pub struct iwreq { + pub ifr_ifrn: __c_anonymous_iwreq, + pub u: iwreq_data, + } + + // linux/ptp_clock.h + pub union __c_anonymous_ptp_perout_request_1 { + pub start: ptp_clock_time, + pub phase: ptp_clock_time, + } + + pub union __c_anonymous_ptp_perout_request_2 { + pub on: ptp_clock_time, + pub rsv: [c_uint; 4], + } + + pub struct ptp_perout_request { + pub anonymous_1: __c_anonymous_ptp_perout_request_1, + pub period: ptp_clock_time, + pub index: c_uint, + pub flags: c_uint, + pub anonymous_2: __c_anonymous_ptp_perout_request_2, + } + + // linux/if_xdp.h + pub struct xsk_tx_metadata { + pub flags: crate::__u64, + pub xsk_tx_metadata_union: __c_anonymous_xsk_tx_metadata_union, + } + + pub union __c_anonymous_xsk_tx_metadata_union { + pub request: xsk_tx_metadata_request, + pub completion: xsk_tx_metadata_completion, + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for sockaddr_nl { + fn eq(&self, other: &sockaddr_nl) -> bool { + self.nl_family == other.nl_family + && self.nl_pid == other.nl_pid + && self.nl_groups == other.nl_groups + } + } + impl Eq for sockaddr_nl {} + impl hash::Hash for sockaddr_nl { + fn hash(&self, state: &mut H) { + self.nl_family.hash(state); + self.nl_pid.hash(state); + self.nl_groups.hash(state); + } + } + + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_ino == other.d_ino + && self.d_off == other.d_off + && self.d_reclen == other.d_reclen + && self.d_type == other.d_type + && self + .d_name + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for dirent {} + + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_ino.hash(state); + self.d_off.hash(state); + self.d_reclen.hash(state); + self.d_type.hash(state); + self.d_name.hash(state); + } + } + + impl PartialEq for dirent64 { + fn eq(&self, other: &dirent64) -> bool { + self.d_ino == other.d_ino + && self.d_off == other.d_off + && self.d_reclen == other.d_reclen + && self.d_type == other.d_type + && self + .d_name + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for dirent64 {} + + impl hash::Hash for dirent64 { + fn hash(&self, state: &mut H) { + self.d_ino.hash(state); + self.d_off.hash(state); + self.d_reclen.hash(state); + self.d_type.hash(state); + self.d_name.hash(state); + } + } + + impl PartialEq for pthread_cond_t { + fn eq(&self, other: &pthread_cond_t) -> bool { + self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) + } + } + + impl Eq for pthread_cond_t {} + + impl hash::Hash for pthread_cond_t { + fn hash(&self, state: &mut H) { + self.size.hash(state); + } + } + + impl PartialEq for pthread_mutex_t { + fn eq(&self, other: &pthread_mutex_t) -> bool { + self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) + } + } + + impl Eq for pthread_mutex_t {} + + impl hash::Hash for pthread_mutex_t { + fn hash(&self, state: &mut H) { + self.size.hash(state); + } + } + + impl PartialEq for pthread_rwlock_t { + fn eq(&self, other: &pthread_rwlock_t) -> bool { + self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) + } + } + + impl Eq for pthread_rwlock_t {} + + impl hash::Hash for pthread_rwlock_t { + fn hash(&self, state: &mut H) { + self.size.hash(state); + } + } + + impl PartialEq for pthread_barrier_t { + fn eq(&self, other: &pthread_barrier_t) -> bool { + self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) + } + } + + impl Eq for pthread_barrier_t {} + + impl hash::Hash for pthread_barrier_t { + fn hash(&self, state: &mut H) { + self.size.hash(state); + } + } + + impl PartialEq for sockaddr_alg { + fn eq(&self, other: &sockaddr_alg) -> bool { + self.salg_family == other.salg_family + && self + .salg_type + .iter() + .zip(other.salg_type.iter()) + .all(|(a, b)| a == b) + && self.salg_feat == other.salg_feat + && self.salg_mask == other.salg_mask + && self + .salg_name + .iter() + .zip(other.salg_name.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for sockaddr_alg {} + + impl hash::Hash for sockaddr_alg { + fn hash(&self, state: &mut H) { + self.salg_family.hash(state); + self.salg_type.hash(state); + self.salg_feat.hash(state); + self.salg_mask.hash(state); + self.salg_name.hash(state); + } + } + + impl PartialEq for uinput_setup { + fn eq(&self, other: &uinput_setup) -> bool { + self.id == other.id + && self.name[..] == other.name[..] + && self.ff_effects_max == other.ff_effects_max + } + } + impl Eq for uinput_setup {} + + impl hash::Hash for uinput_setup { + fn hash(&self, state: &mut H) { + self.id.hash(state); + self.name.hash(state); + self.ff_effects_max.hash(state); + } + } + + impl PartialEq for uinput_user_dev { + fn eq(&self, other: &uinput_user_dev) -> bool { + self.name[..] == other.name[..] + && self.id == other.id + && self.ff_effects_max == other.ff_effects_max + && self.absmax[..] == other.absmax[..] + && self.absmin[..] == other.absmin[..] + && self.absfuzz[..] == other.absfuzz[..] + && self.absflat[..] == other.absflat[..] + } + } + impl Eq for uinput_user_dev {} + + impl hash::Hash for uinput_user_dev { + fn hash(&self, state: &mut H) { + self.name.hash(state); + self.id.hash(state); + self.ff_effects_max.hash(state); + self.absmax.hash(state); + self.absmin.hash(state); + self.absfuzz.hash(state); + self.absflat.hash(state); + } + } + + #[allow(deprecated)] + impl af_alg_iv { + fn as_slice(&self) -> &[u8] { + unsafe { ::core::slice::from_raw_parts(self.iv.as_ptr(), self.ivlen as usize) } + } + } + + #[allow(deprecated)] + impl PartialEq for af_alg_iv { + fn eq(&self, other: &af_alg_iv) -> bool { + *self.as_slice() == *other.as_slice() + } + } + + #[allow(deprecated)] + impl Eq for af_alg_iv {} + + #[allow(deprecated)] + impl hash::Hash for af_alg_iv { + fn hash(&self, state: &mut H) { + self.as_slice().hash(state); + } + } + + impl PartialEq for mq_attr { + fn eq(&self, other: &mq_attr) -> bool { + self.mq_flags == other.mq_flags + && self.mq_maxmsg == other.mq_maxmsg + && self.mq_msgsize == other.mq_msgsize + && self.mq_curmsgs == other.mq_curmsgs + } + } + impl Eq for mq_attr {} + impl hash::Hash for mq_attr { + fn hash(&self, state: &mut H) { + self.mq_flags.hash(state); + self.mq_maxmsg.hash(state); + self.mq_msgsize.hash(state); + self.mq_curmsgs.hash(state); + } + } + impl PartialEq for hwtstamp_config { + fn eq(&self, other: &hwtstamp_config) -> bool { + self.flags == other.flags + && self.tx_type == other.tx_type + && self.rx_filter == other.rx_filter + } + } + impl Eq for hwtstamp_config {} + impl hash::Hash for hwtstamp_config { + fn hash(&self, state: &mut H) { + self.flags.hash(state); + self.tx_type.hash(state); + self.rx_filter.hash(state); + } + } + + impl PartialEq for sched_attr { + fn eq(&self, other: &sched_attr) -> bool { + self.size == other.size + && self.sched_policy == other.sched_policy + && self.sched_flags == other.sched_flags + && self.sched_nice == other.sched_nice + && self.sched_priority == other.sched_priority + && self.sched_runtime == other.sched_runtime + && self.sched_deadline == other.sched_deadline + && self.sched_period == other.sched_period + } + } + impl Eq for sched_attr {} + impl hash::Hash for sched_attr { + fn hash(&self, state: &mut H) { + self.size.hash(state); + self.sched_policy.hash(state); + self.sched_flags.hash(state); + self.sched_nice.hash(state); + self.sched_priority.hash(state); + self.sched_runtime.hash(state); + self.sched_deadline.hash(state); + self.sched_period.hash(state); + } + } + } +} + +cfg_if! { + if #[cfg(any( + target_env = "gnu", + target_env = "musl", + target_env = "ohos" + ))] { + pub const ABDAY_1: crate::nl_item = 0x20000; + pub const ABDAY_2: crate::nl_item = 0x20001; + pub const ABDAY_3: crate::nl_item = 0x20002; + pub const ABDAY_4: crate::nl_item = 0x20003; + pub const ABDAY_5: crate::nl_item = 0x20004; + pub const ABDAY_6: crate::nl_item = 0x20005; + pub const ABDAY_7: crate::nl_item = 0x20006; + + pub const DAY_1: crate::nl_item = 0x20007; + pub const DAY_2: crate::nl_item = 0x20008; + pub const DAY_3: crate::nl_item = 0x20009; + pub const DAY_4: crate::nl_item = 0x2000A; + pub const DAY_5: crate::nl_item = 0x2000B; + pub const DAY_6: crate::nl_item = 0x2000C; + pub const DAY_7: crate::nl_item = 0x2000D; + + pub const ABMON_1: crate::nl_item = 0x2000E; + pub const ABMON_2: crate::nl_item = 0x2000F; + pub const ABMON_3: crate::nl_item = 0x20010; + pub const ABMON_4: crate::nl_item = 0x20011; + pub const ABMON_5: crate::nl_item = 0x20012; + pub const ABMON_6: crate::nl_item = 0x20013; + pub const ABMON_7: crate::nl_item = 0x20014; + pub const ABMON_8: crate::nl_item = 0x20015; + pub const ABMON_9: crate::nl_item = 0x20016; + pub const ABMON_10: crate::nl_item = 0x20017; + pub const ABMON_11: crate::nl_item = 0x20018; + pub const ABMON_12: crate::nl_item = 0x20019; + + pub const MON_1: crate::nl_item = 0x2001A; + pub const MON_2: crate::nl_item = 0x2001B; + pub const MON_3: crate::nl_item = 0x2001C; + pub const MON_4: crate::nl_item = 0x2001D; + pub const MON_5: crate::nl_item = 0x2001E; + pub const MON_6: crate::nl_item = 0x2001F; + pub const MON_7: crate::nl_item = 0x20020; + pub const MON_8: crate::nl_item = 0x20021; + pub const MON_9: crate::nl_item = 0x20022; + pub const MON_10: crate::nl_item = 0x20023; + pub const MON_11: crate::nl_item = 0x20024; + pub const MON_12: crate::nl_item = 0x20025; + + pub const AM_STR: crate::nl_item = 0x20026; + pub const PM_STR: crate::nl_item = 0x20027; + + pub const D_T_FMT: crate::nl_item = 0x20028; + pub const D_FMT: crate::nl_item = 0x20029; + pub const T_FMT: crate::nl_item = 0x2002A; + pub const T_FMT_AMPM: crate::nl_item = 0x2002B; + + pub const ERA: crate::nl_item = 0x2002C; + pub const ERA_D_FMT: crate::nl_item = 0x2002E; + pub const ALT_DIGITS: crate::nl_item = 0x2002F; + pub const ERA_D_T_FMT: crate::nl_item = 0x20030; + pub const ERA_T_FMT: crate::nl_item = 0x20031; + + pub const CODESET: crate::nl_item = 14; + pub const CRNCYSTR: crate::nl_item = 0x4000F; + pub const RADIXCHAR: crate::nl_item = 0x10000; + pub const THOUSEP: crate::nl_item = 0x10001; + pub const YESEXPR: crate::nl_item = 0x50000; + pub const NOEXPR: crate::nl_item = 0x50001; + pub const YESSTR: crate::nl_item = 0x50002; + pub const NOSTR: crate::nl_item = 0x50003; + } +} + +pub const RUSAGE_CHILDREN: c_int = -1; +pub const L_tmpnam: c_uint = 20; +pub const _PC_LINK_MAX: c_int = 0; +pub const _PC_MAX_CANON: c_int = 1; +pub const _PC_MAX_INPUT: c_int = 2; +pub const _PC_NAME_MAX: c_int = 3; +pub const _PC_PATH_MAX: c_int = 4; +pub const _PC_PIPE_BUF: c_int = 5; +pub const _PC_CHOWN_RESTRICTED: c_int = 6; +pub const _PC_NO_TRUNC: c_int = 7; +pub const _PC_VDISABLE: c_int = 8; +pub const _PC_SYNC_IO: c_int = 9; +pub const _PC_ASYNC_IO: c_int = 10; +pub const _PC_PRIO_IO: c_int = 11; +pub const _PC_SOCK_MAXBUF: c_int = 12; +pub const _PC_FILESIZEBITS: c_int = 13; +pub const _PC_REC_INCR_XFER_SIZE: c_int = 14; +pub const _PC_REC_MAX_XFER_SIZE: c_int = 15; +pub const _PC_REC_MIN_XFER_SIZE: c_int = 16; +pub const _PC_REC_XFER_ALIGN: c_int = 17; +pub const _PC_ALLOC_SIZE_MIN: c_int = 18; +pub const _PC_SYMLINK_MAX: c_int = 19; +pub const _PC_2_SYMLINKS: c_int = 20; + +pub const MS_NOUSER: c_ulong = 0xffffffff80000000; + +pub const _SC_ARG_MAX: c_int = 0; +pub const _SC_CHILD_MAX: c_int = 1; +pub const _SC_CLK_TCK: c_int = 2; +pub const _SC_NGROUPS_MAX: c_int = 3; +pub const _SC_OPEN_MAX: c_int = 4; +pub const _SC_STREAM_MAX: c_int = 5; +pub const _SC_TZNAME_MAX: c_int = 6; +pub const _SC_JOB_CONTROL: c_int = 7; +pub const _SC_SAVED_IDS: c_int = 8; +pub const _SC_REALTIME_SIGNALS: c_int = 9; +pub const _SC_PRIORITY_SCHEDULING: c_int = 10; +pub const _SC_TIMERS: c_int = 11; +pub const _SC_ASYNCHRONOUS_IO: c_int = 12; +pub const _SC_PRIORITIZED_IO: c_int = 13; +pub const _SC_SYNCHRONIZED_IO: c_int = 14; +pub const _SC_FSYNC: c_int = 15; +pub const _SC_MAPPED_FILES: c_int = 16; +pub const _SC_MEMLOCK: c_int = 17; +pub const _SC_MEMLOCK_RANGE: c_int = 18; +pub const _SC_MEMORY_PROTECTION: c_int = 19; +pub const _SC_MESSAGE_PASSING: c_int = 20; +pub const _SC_SEMAPHORES: c_int = 21; +pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 22; +pub const _SC_AIO_LISTIO_MAX: c_int = 23; +pub const _SC_AIO_MAX: c_int = 24; +pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 25; +pub const _SC_DELAYTIMER_MAX: c_int = 26; +pub const _SC_MQ_OPEN_MAX: c_int = 27; +pub const _SC_MQ_PRIO_MAX: c_int = 28; +pub const _SC_VERSION: c_int = 29; +pub const _SC_PAGESIZE: c_int = 30; +pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; +pub const _SC_RTSIG_MAX: c_int = 31; +pub const _SC_SEM_NSEMS_MAX: c_int = 32; +pub const _SC_SEM_VALUE_MAX: c_int = 33; +pub const _SC_SIGQUEUE_MAX: c_int = 34; +pub const _SC_TIMER_MAX: c_int = 35; +pub const _SC_BC_BASE_MAX: c_int = 36; +pub const _SC_BC_DIM_MAX: c_int = 37; +pub const _SC_BC_SCALE_MAX: c_int = 38; +pub const _SC_BC_STRING_MAX: c_int = 39; +pub const _SC_COLL_WEIGHTS_MAX: c_int = 40; +pub const _SC_EXPR_NEST_MAX: c_int = 42; +pub const _SC_LINE_MAX: c_int = 43; +pub const _SC_RE_DUP_MAX: c_int = 44; +pub const _SC_2_VERSION: c_int = 46; +pub const _SC_2_C_BIND: c_int = 47; +pub const _SC_2_C_DEV: c_int = 48; +pub const _SC_2_FORT_DEV: c_int = 49; +pub const _SC_2_FORT_RUN: c_int = 50; +pub const _SC_2_SW_DEV: c_int = 51; +pub const _SC_2_LOCALEDEF: c_int = 52; +pub const _SC_UIO_MAXIOV: c_int = 60; +pub const _SC_IOV_MAX: c_int = 60; +pub const _SC_THREADS: c_int = 67; +pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 68; +pub const _SC_GETGR_R_SIZE_MAX: c_int = 69; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 70; +pub const _SC_LOGIN_NAME_MAX: c_int = 71; +pub const _SC_TTY_NAME_MAX: c_int = 72; +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 73; +pub const _SC_THREAD_KEYS_MAX: c_int = 74; +pub const _SC_THREAD_STACK_MIN: c_int = 75; +pub const _SC_THREAD_THREADS_MAX: c_int = 76; +pub const _SC_THREAD_ATTR_STACKADDR: c_int = 77; +pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 78; +pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 79; +pub const _SC_THREAD_PRIO_INHERIT: c_int = 80; +pub const _SC_THREAD_PRIO_PROTECT: c_int = 81; +pub const _SC_THREAD_PROCESS_SHARED: c_int = 82; +pub const _SC_NPROCESSORS_CONF: c_int = 83; +pub const _SC_NPROCESSORS_ONLN: c_int = 84; +pub const _SC_PHYS_PAGES: c_int = 85; +pub const _SC_AVPHYS_PAGES: c_int = 86; +pub const _SC_ATEXIT_MAX: c_int = 87; +pub const _SC_PASS_MAX: c_int = 88; +pub const _SC_XOPEN_VERSION: c_int = 89; +pub const _SC_XOPEN_XCU_VERSION: c_int = 90; +pub const _SC_XOPEN_UNIX: c_int = 91; +pub const _SC_XOPEN_CRYPT: c_int = 92; +pub const _SC_XOPEN_ENH_I18N: c_int = 93; +pub const _SC_XOPEN_SHM: c_int = 94; +pub const _SC_2_CHAR_TERM: c_int = 95; +pub const _SC_2_UPE: c_int = 97; +pub const _SC_XOPEN_XPG2: c_int = 98; +pub const _SC_XOPEN_XPG3: c_int = 99; +pub const _SC_XOPEN_XPG4: c_int = 100; +pub const _SC_NZERO: c_int = 109; +pub const _SC_XBS5_ILP32_OFF32: c_int = 125; +pub const _SC_XBS5_ILP32_OFFBIG: c_int = 126; +pub const _SC_XBS5_LP64_OFF64: c_int = 127; +pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 128; +pub const _SC_XOPEN_LEGACY: c_int = 129; +pub const _SC_XOPEN_REALTIME: c_int = 130; +pub const _SC_XOPEN_REALTIME_THREADS: c_int = 131; +pub const _SC_ADVISORY_INFO: c_int = 132; +pub const _SC_BARRIERS: c_int = 133; +pub const _SC_CLOCK_SELECTION: c_int = 137; +pub const _SC_CPUTIME: c_int = 138; +pub const _SC_THREAD_CPUTIME: c_int = 139; +pub const _SC_MONOTONIC_CLOCK: c_int = 149; +pub const _SC_READER_WRITER_LOCKS: c_int = 153; +pub const _SC_SPIN_LOCKS: c_int = 154; +pub const _SC_REGEXP: c_int = 155; +pub const _SC_SHELL: c_int = 157; +pub const _SC_SPAWN: c_int = 159; +pub const _SC_SPORADIC_SERVER: c_int = 160; +pub const _SC_THREAD_SPORADIC_SERVER: c_int = 161; +pub const _SC_TIMEOUTS: c_int = 164; +pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 165; +pub const _SC_2_PBS: c_int = 168; +pub const _SC_2_PBS_ACCOUNTING: c_int = 169; +pub const _SC_2_PBS_LOCATE: c_int = 170; +pub const _SC_2_PBS_MESSAGE: c_int = 171; +pub const _SC_2_PBS_TRACK: c_int = 172; +pub const _SC_SYMLOOP_MAX: c_int = 173; +pub const _SC_STREAMS: c_int = 174; +pub const _SC_2_PBS_CHECKPOINT: c_int = 175; +pub const _SC_V6_ILP32_OFF32: c_int = 176; +pub const _SC_V6_ILP32_OFFBIG: c_int = 177; +pub const _SC_V6_LP64_OFF64: c_int = 178; +pub const _SC_V6_LPBIG_OFFBIG: c_int = 179; +pub const _SC_HOST_NAME_MAX: c_int = 180; +pub const _SC_TRACE: c_int = 181; +pub const _SC_TRACE_EVENT_FILTER: c_int = 182; +pub const _SC_TRACE_INHERIT: c_int = 183; +pub const _SC_TRACE_LOG: c_int = 184; +pub const _SC_IPV6: c_int = 235; +pub const _SC_RAW_SOCKETS: c_int = 236; +pub const _SC_V7_ILP32_OFF32: c_int = 237; +pub const _SC_V7_ILP32_OFFBIG: c_int = 238; +pub const _SC_V7_LP64_OFF64: c_int = 239; +pub const _SC_V7_LPBIG_OFFBIG: c_int = 240; +pub const _SC_SS_REPL_MAX: c_int = 241; +pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 242; +pub const _SC_TRACE_NAME_MAX: c_int = 243; +pub const _SC_TRACE_SYS_MAX: c_int = 244; +pub const _SC_TRACE_USER_EVENT_MAX: c_int = 245; +pub const _SC_XOPEN_STREAMS: c_int = 246; +pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 247; +pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 248; + +pub const _CS_PATH: c_int = 0; +pub const _CS_POSIX_V6_WIDTH_RESTRICTED_ENVS: c_int = 1; +pub const _CS_POSIX_V5_WIDTH_RESTRICTED_ENVS: c_int = 4; +pub const _CS_POSIX_V7_WIDTH_RESTRICTED_ENVS: c_int = 5; +pub const _CS_POSIX_V6_ILP32_OFF32_CFLAGS: c_int = 1116; +pub const _CS_POSIX_V6_ILP32_OFF32_LDFLAGS: c_int = 1117; +pub const _CS_POSIX_V6_ILP32_OFF32_LIBS: c_int = 1118; +pub const _CS_POSIX_V6_ILP32_OFF32_LINTFLAGS: c_int = 1119; +pub const _CS_POSIX_V6_ILP32_OFFBIG_CFLAGS: c_int = 1120; +pub const _CS_POSIX_V6_ILP32_OFFBIG_LDFLAGS: c_int = 1121; +pub const _CS_POSIX_V6_ILP32_OFFBIG_LIBS: c_int = 1122; +pub const _CS_POSIX_V6_ILP32_OFFBIG_LINTFLAGS: c_int = 1123; +pub const _CS_POSIX_V6_LP64_OFF64_CFLAGS: c_int = 1124; +pub const _CS_POSIX_V6_LP64_OFF64_LDFLAGS: c_int = 1125; +pub const _CS_POSIX_V6_LP64_OFF64_LIBS: c_int = 1126; +pub const _CS_POSIX_V6_LP64_OFF64_LINTFLAGS: c_int = 1127; +pub const _CS_POSIX_V6_LPBIG_OFFBIG_CFLAGS: c_int = 1128; +pub const _CS_POSIX_V6_LPBIG_OFFBIG_LDFLAGS: c_int = 1129; +pub const _CS_POSIX_V6_LPBIG_OFFBIG_LIBS: c_int = 1130; +pub const _CS_POSIX_V6_LPBIG_OFFBIG_LINTFLAGS: c_int = 1131; +pub const _CS_POSIX_V7_ILP32_OFF32_CFLAGS: c_int = 1132; +pub const _CS_POSIX_V7_ILP32_OFF32_LDFLAGS: c_int = 1133; +pub const _CS_POSIX_V7_ILP32_OFF32_LIBS: c_int = 1134; +pub const _CS_POSIX_V7_ILP32_OFF32_LINTFLAGS: c_int = 1135; +pub const _CS_POSIX_V7_ILP32_OFFBIG_CFLAGS: c_int = 1136; +pub const _CS_POSIX_V7_ILP32_OFFBIG_LDFLAGS: c_int = 1137; +pub const _CS_POSIX_V7_ILP32_OFFBIG_LIBS: c_int = 1138; +pub const _CS_POSIX_V7_ILP32_OFFBIG_LINTFLAGS: c_int = 1139; +pub const _CS_POSIX_V7_LP64_OFF64_CFLAGS: c_int = 1140; +pub const _CS_POSIX_V7_LP64_OFF64_LDFLAGS: c_int = 1141; +pub const _CS_POSIX_V7_LP64_OFF64_LIBS: c_int = 1142; +pub const _CS_POSIX_V7_LP64_OFF64_LINTFLAGS: c_int = 1143; +pub const _CS_POSIX_V7_LPBIG_OFFBIG_CFLAGS: c_int = 1144; +pub const _CS_POSIX_V7_LPBIG_OFFBIG_LDFLAGS: c_int = 1145; +pub const _CS_POSIX_V7_LPBIG_OFFBIG_LIBS: c_int = 1146; +pub const _CS_POSIX_V7_LPBIG_OFFBIG_LINTFLAGS: c_int = 1147; + +pub const RLIM_SAVED_MAX: crate::rlim_t = RLIM_INFINITY; +pub const RLIM_SAVED_CUR: crate::rlim_t = RLIM_INFINITY; + +// elf.h - Fields in the e_ident array. +pub const EI_NIDENT: usize = 16; + +pub const EI_MAG0: usize = 0; +pub const ELFMAG0: u8 = 0x7f; +pub const EI_MAG1: usize = 1; +pub const ELFMAG1: u8 = b'E'; +pub const EI_MAG2: usize = 2; +pub const ELFMAG2: u8 = b'L'; +pub const EI_MAG3: usize = 3; +pub const ELFMAG3: u8 = b'F'; +pub const SELFMAG: usize = 4; + +pub const EI_CLASS: usize = 4; +pub const ELFCLASSNONE: u8 = 0; +pub const ELFCLASS32: u8 = 1; +pub const ELFCLASS64: u8 = 2; +pub const ELFCLASSNUM: usize = 3; + +pub const EI_DATA: usize = 5; +pub const ELFDATANONE: u8 = 0; +pub const ELFDATA2LSB: u8 = 1; +pub const ELFDATA2MSB: u8 = 2; +pub const ELFDATANUM: usize = 3; + +pub const EI_VERSION: usize = 6; + +pub const EI_OSABI: usize = 7; +pub const ELFOSABI_NONE: u8 = 0; +pub const ELFOSABI_SYSV: u8 = 0; +pub const ELFOSABI_HPUX: u8 = 1; +pub const ELFOSABI_NETBSD: u8 = 2; +pub const ELFOSABI_GNU: u8 = 3; +pub const ELFOSABI_LINUX: u8 = ELFOSABI_GNU; +pub const ELFOSABI_SOLARIS: u8 = 6; +pub const ELFOSABI_AIX: u8 = 7; +pub const ELFOSABI_IRIX: u8 = 8; +pub const ELFOSABI_FREEBSD: u8 = 9; +pub const ELFOSABI_TRU64: u8 = 10; +pub const ELFOSABI_MODESTO: u8 = 11; +pub const ELFOSABI_OPENBSD: u8 = 12; +pub const ELFOSABI_ARM: u8 = 97; +pub const ELFOSABI_STANDALONE: u8 = 255; + +pub const EI_ABIVERSION: usize = 8; + +pub const EI_PAD: usize = 9; + +// elf.h - Legal values for e_type (object file type). +pub const ET_NONE: u16 = 0; +pub const ET_REL: u16 = 1; +pub const ET_EXEC: u16 = 2; +pub const ET_DYN: u16 = 3; +pub const ET_CORE: u16 = 4; +pub const ET_NUM: u16 = 5; +pub const ET_LOOS: u16 = 0xfe00; +pub const ET_HIOS: u16 = 0xfeff; +pub const ET_LOPROC: u16 = 0xff00; +pub const ET_HIPROC: u16 = 0xffff; + +// elf.h - Legal values for e_machine (architecture). +pub const EM_NONE: u16 = 0; +pub const EM_M32: u16 = 1; +pub const EM_SPARC: u16 = 2; +pub const EM_386: u16 = 3; +pub const EM_68K: u16 = 4; +pub const EM_88K: u16 = 5; +pub const EM_860: u16 = 7; +pub const EM_MIPS: u16 = 8; +pub const EM_S370: u16 = 9; +pub const EM_MIPS_RS3_LE: u16 = 10; +pub const EM_PARISC: u16 = 15; +pub const EM_VPP500: u16 = 17; +pub const EM_SPARC32PLUS: u16 = 18; +pub const EM_960: u16 = 19; +pub const EM_PPC: u16 = 20; +pub const EM_PPC64: u16 = 21; +pub const EM_S390: u16 = 22; +pub const EM_V800: u16 = 36; +pub const EM_FR20: u16 = 37; +pub const EM_RH32: u16 = 38; +pub const EM_RCE: u16 = 39; +pub const EM_ARM: u16 = 40; +pub const EM_FAKE_ALPHA: u16 = 41; +pub const EM_SH: u16 = 42; +pub const EM_SPARCV9: u16 = 43; +pub const EM_TRICORE: u16 = 44; +pub const EM_ARC: u16 = 45; +pub const EM_H8_300: u16 = 46; +pub const EM_H8_300H: u16 = 47; +pub const EM_H8S: u16 = 48; +pub const EM_H8_500: u16 = 49; +pub const EM_IA_64: u16 = 50; +pub const EM_MIPS_X: u16 = 51; +pub const EM_COLDFIRE: u16 = 52; +pub const EM_68HC12: u16 = 53; +pub const EM_MMA: u16 = 54; +pub const EM_PCP: u16 = 55; +pub const EM_NCPU: u16 = 56; +pub const EM_NDR1: u16 = 57; +pub const EM_STARCORE: u16 = 58; +pub const EM_ME16: u16 = 59; +pub const EM_ST100: u16 = 60; +pub const EM_TINYJ: u16 = 61; +pub const EM_X86_64: u16 = 62; +pub const EM_PDSP: u16 = 63; +pub const EM_FX66: u16 = 66; +pub const EM_ST9PLUS: u16 = 67; +pub const EM_ST7: u16 = 68; +pub const EM_68HC16: u16 = 69; +pub const EM_68HC11: u16 = 70; +pub const EM_68HC08: u16 = 71; +pub const EM_68HC05: u16 = 72; +pub const EM_SVX: u16 = 73; +pub const EM_ST19: u16 = 74; +pub const EM_VAX: u16 = 75; +pub const EM_CRIS: u16 = 76; +pub const EM_JAVELIN: u16 = 77; +pub const EM_FIREPATH: u16 = 78; +pub const EM_ZSP: u16 = 79; +pub const EM_MMIX: u16 = 80; +pub const EM_HUANY: u16 = 81; +pub const EM_PRISM: u16 = 82; +pub const EM_AVR: u16 = 83; +pub const EM_FR30: u16 = 84; +pub const EM_D10V: u16 = 85; +pub const EM_D30V: u16 = 86; +pub const EM_V850: u16 = 87; +pub const EM_M32R: u16 = 88; +pub const EM_MN10300: u16 = 89; +pub const EM_MN10200: u16 = 90; +pub const EM_PJ: u16 = 91; +pub const EM_OPENRISC: u16 = 92; +pub const EM_ARC_A5: u16 = 93; +pub const EM_XTENSA: u16 = 94; +pub const EM_AARCH64: u16 = 183; +pub const EM_TILEPRO: u16 = 188; +pub const EM_TILEGX: u16 = 191; +pub const EM_RISCV: u16 = 243; +pub const EM_ALPHA: u16 = 0x9026; + +// elf.h - Legal values for e_version (version). +pub const EV_NONE: u32 = 0; +pub const EV_CURRENT: u32 = 1; +pub const EV_NUM: u32 = 2; + +// elf.h - Legal values for p_type (segment type). +pub const PT_NULL: u32 = 0; +pub const PT_LOAD: u32 = 1; +pub const PT_DYNAMIC: u32 = 2; +pub const PT_INTERP: u32 = 3; +pub const PT_NOTE: u32 = 4; +pub const PT_SHLIB: u32 = 5; +pub const PT_PHDR: u32 = 6; +pub const PT_TLS: u32 = 7; +pub const PT_NUM: u32 = 8; +pub const PT_LOOS: u32 = 0x60000000; +pub const PT_GNU_EH_FRAME: u32 = 0x6474e550; +pub const PT_GNU_STACK: u32 = 0x6474e551; +pub const PT_GNU_RELRO: u32 = 0x6474e552; +pub const PT_LOSUNW: u32 = 0x6ffffffa; +pub const PT_SUNWBSS: u32 = 0x6ffffffa; +pub const PT_SUNWSTACK: u32 = 0x6ffffffb; +pub const PT_HISUNW: u32 = 0x6fffffff; +pub const PT_HIOS: u32 = 0x6fffffff; +pub const PT_LOPROC: u32 = 0x70000000; +pub const PT_HIPROC: u32 = 0x7fffffff; + +// Legal values for p_flags (segment flags). +pub const PF_X: u32 = 1 << 0; +pub const PF_W: u32 = 1 << 1; +pub const PF_R: u32 = 1 << 2; +pub const PF_MASKOS: u32 = 0x0ff00000; +pub const PF_MASKPROC: u32 = 0xf0000000; + +// elf.h - Legal values for a_type (entry type). +pub const AT_NULL: c_ulong = 0; +pub const AT_IGNORE: c_ulong = 1; +pub const AT_EXECFD: c_ulong = 2; +pub const AT_PHDR: c_ulong = 3; +pub const AT_PHENT: c_ulong = 4; +pub const AT_PHNUM: c_ulong = 5; +pub const AT_PAGESZ: c_ulong = 6; +pub const AT_BASE: c_ulong = 7; +pub const AT_FLAGS: c_ulong = 8; +pub const AT_ENTRY: c_ulong = 9; +pub const AT_NOTELF: c_ulong = 10; +pub const AT_UID: c_ulong = 11; +pub const AT_EUID: c_ulong = 12; +pub const AT_GID: c_ulong = 13; +pub const AT_EGID: c_ulong = 14; +pub const AT_PLATFORM: c_ulong = 15; +pub const AT_HWCAP: c_ulong = 16; +pub const AT_CLKTCK: c_ulong = 17; + +pub const AT_SECURE: c_ulong = 23; +pub const AT_BASE_PLATFORM: c_ulong = 24; +pub const AT_RANDOM: c_ulong = 25; +pub const AT_HWCAP2: c_ulong = 26; + +pub const AT_EXECFN: c_ulong = 31; + +// defined in arch//include/uapi/asm/auxvec.h but has the same value +// wherever it is defined. +pub const AT_SYSINFO_EHDR: c_ulong = 33; +pub const AT_MINSIGSTKSZ: c_ulong = 51; + +pub const GLOB_ERR: c_int = 1 << 0; +pub const GLOB_MARK: c_int = 1 << 1; +pub const GLOB_NOSORT: c_int = 1 << 2; +pub const GLOB_DOOFFS: c_int = 1 << 3; +pub const GLOB_NOCHECK: c_int = 1 << 4; +pub const GLOB_APPEND: c_int = 1 << 5; +pub const GLOB_NOESCAPE: c_int = 1 << 6; + +pub const GLOB_NOSPACE: c_int = 1; +pub const GLOB_ABORTED: c_int = 2; +pub const GLOB_NOMATCH: c_int = 3; + +pub const POSIX_MADV_NORMAL: c_int = 0; +pub const POSIX_MADV_RANDOM: c_int = 1; +pub const POSIX_MADV_SEQUENTIAL: c_int = 2; +pub const POSIX_MADV_WILLNEED: c_int = 3; +pub const POSIX_SPAWN_USEVFORK: c_int = 64; +pub const POSIX_SPAWN_SETSID: c_int = 128; + +pub const S_IEXEC: mode_t = 0o0100; +pub const S_IWRITE: mode_t = 0o0200; +pub const S_IREAD: mode_t = 0o0400; + +pub const F_LOCK: c_int = 1; +pub const F_TEST: c_int = 3; +pub const F_TLOCK: c_int = 2; +pub const F_ULOCK: c_int = 0; + +pub const F_SEAL_FUTURE_WRITE: c_int = 0x0010; +pub const F_SEAL_EXEC: c_int = 0x0020; + +pub const IFF_LOWER_UP: c_int = 0x10000; +pub const IFF_DORMANT: c_int = 0x20000; +pub const IFF_ECHO: c_int = 0x40000; + +// linux/if_addr.h +pub const IFA_UNSPEC: c_ushort = 0; +pub const IFA_ADDRESS: c_ushort = 1; +pub const IFA_LOCAL: c_ushort = 2; +pub const IFA_LABEL: c_ushort = 3; +pub const IFA_BROADCAST: c_ushort = 4; +pub const IFA_ANYCAST: c_ushort = 5; +pub const IFA_CACHEINFO: c_ushort = 6; +pub const IFA_MULTICAST: c_ushort = 7; +pub const IFA_FLAGS: c_ushort = 8; + +pub const IFA_F_SECONDARY: u32 = 0x01; +pub const IFA_F_TEMPORARY: u32 = 0x01; +pub const IFA_F_NODAD: u32 = 0x02; +pub const IFA_F_OPTIMISTIC: u32 = 0x04; +pub const IFA_F_DADFAILED: u32 = 0x08; +pub const IFA_F_HOMEADDRESS: u32 = 0x10; +pub const IFA_F_DEPRECATED: u32 = 0x20; +pub const IFA_F_TENTATIVE: u32 = 0x40; +pub const IFA_F_PERMANENT: u32 = 0x80; +pub const IFA_F_MANAGETEMPADDR: u32 = 0x100; +pub const IFA_F_NOPREFIXROUTE: u32 = 0x200; +pub const IFA_F_MCAUTOJOIN: u32 = 0x400; +pub const IFA_F_STABLE_PRIVACY: u32 = 0x800; + +// linux/fs.h + +// Flags for preadv2/pwritev2 +pub const RWF_HIPRI: c_int = 0x00000001; +pub const RWF_DSYNC: c_int = 0x00000002; +pub const RWF_SYNC: c_int = 0x00000004; +pub const RWF_NOWAIT: c_int = 0x00000008; +pub const RWF_APPEND: c_int = 0x00000010; +pub const RWF_NOAPPEND: c_int = 0x00000020; +pub const RWF_ATOMIC: c_int = 0x00000040; +pub const RWF_DONTCACHE: c_int = 0x00000080; + +// linux/if_link.h +pub const IFLA_UNSPEC: c_ushort = 0; +pub const IFLA_ADDRESS: c_ushort = 1; +pub const IFLA_BROADCAST: c_ushort = 2; +pub const IFLA_IFNAME: c_ushort = 3; +pub const IFLA_MTU: c_ushort = 4; +pub const IFLA_LINK: c_ushort = 5; +pub const IFLA_QDISC: c_ushort = 6; +pub const IFLA_STATS: c_ushort = 7; +pub const IFLA_COST: c_ushort = 8; +pub const IFLA_PRIORITY: c_ushort = 9; +pub const IFLA_MASTER: c_ushort = 10; +pub const IFLA_WIRELESS: c_ushort = 11; +pub const IFLA_PROTINFO: c_ushort = 12; +pub const IFLA_TXQLEN: c_ushort = 13; +pub const IFLA_MAP: c_ushort = 14; +pub const IFLA_WEIGHT: c_ushort = 15; +pub const IFLA_OPERSTATE: c_ushort = 16; +pub const IFLA_LINKMODE: c_ushort = 17; +pub const IFLA_LINKINFO: c_ushort = 18; +pub const IFLA_NET_NS_PID: c_ushort = 19; +pub const IFLA_IFALIAS: c_ushort = 20; +pub const IFLA_NUM_VF: c_ushort = 21; +pub const IFLA_VFINFO_LIST: c_ushort = 22; +pub const IFLA_STATS64: c_ushort = 23; +pub const IFLA_VF_PORTS: c_ushort = 24; +pub const IFLA_PORT_SELF: c_ushort = 25; +pub const IFLA_AF_SPEC: c_ushort = 26; +pub const IFLA_GROUP: c_ushort = 27; +pub const IFLA_NET_NS_FD: c_ushort = 28; +pub const IFLA_EXT_MASK: c_ushort = 29; +pub const IFLA_PROMISCUITY: c_ushort = 30; +pub const IFLA_NUM_TX_QUEUES: c_ushort = 31; +pub const IFLA_NUM_RX_QUEUES: c_ushort = 32; +pub const IFLA_CARRIER: c_ushort = 33; +pub const IFLA_PHYS_PORT_ID: c_ushort = 34; +pub const IFLA_CARRIER_CHANGES: c_ushort = 35; +pub const IFLA_PHYS_SWITCH_ID: c_ushort = 36; +pub const IFLA_LINK_NETNSID: c_ushort = 37; +pub const IFLA_PHYS_PORT_NAME: c_ushort = 38; +pub const IFLA_PROTO_DOWN: c_ushort = 39; +pub const IFLA_GSO_MAX_SEGS: c_ushort = 40; +pub const IFLA_GSO_MAX_SIZE: c_ushort = 41; +pub const IFLA_PAD: c_ushort = 42; +pub const IFLA_XDP: c_ushort = 43; +pub const IFLA_EVENT: c_ushort = 44; +pub const IFLA_NEW_NETNSID: c_ushort = 45; +pub const IFLA_IF_NETNSID: c_ushort = 46; +pub const IFLA_TARGET_NETNSID: c_ushort = IFLA_IF_NETNSID; +pub const IFLA_CARRIER_UP_COUNT: c_ushort = 47; +pub const IFLA_CARRIER_DOWN_COUNT: c_ushort = 48; +pub const IFLA_NEW_IFINDEX: c_ushort = 49; +pub const IFLA_MIN_MTU: c_ushort = 50; +pub const IFLA_MAX_MTU: c_ushort = 51; +pub const IFLA_PROP_LIST: c_ushort = 52; +pub const IFLA_ALT_IFNAME: c_ushort = 53; +pub const IFLA_PERM_ADDRESS: c_ushort = 54; +pub const IFLA_PROTO_DOWN_REASON: c_ushort = 55; +pub const IFLA_PARENT_DEV_NAME: c_ushort = 56; +pub const IFLA_PARENT_DEV_BUS_NAME: c_ushort = 57; +pub const IFLA_GRO_MAX_SIZE: c_ushort = 58; +pub const IFLA_TSO_MAX_SIZE: c_ushort = 59; +pub const IFLA_TSO_MAX_SEGS: c_ushort = 60; +pub const IFLA_ALLMULTI: c_ushort = 61; + +pub const IFLA_INFO_UNSPEC: c_ushort = 0; +pub const IFLA_INFO_KIND: c_ushort = 1; +pub const IFLA_INFO_DATA: c_ushort = 2; +pub const IFLA_INFO_XSTATS: c_ushort = 3; +pub const IFLA_INFO_SLAVE_KIND: c_ushort = 4; +pub const IFLA_INFO_SLAVE_DATA: c_ushort = 5; + +// Since Linux 3.1 +pub const SEEK_DATA: c_int = 3; +pub const SEEK_HOLE: c_int = 4; + +pub const ST_RDONLY: c_ulong = 1; +pub const ST_NOSUID: c_ulong = 2; +pub const ST_NODEV: c_ulong = 4; +pub const ST_NOEXEC: c_ulong = 8; +pub const ST_SYNCHRONOUS: c_ulong = 16; +pub const ST_MANDLOCK: c_ulong = 64; +pub const ST_WRITE: c_ulong = 128; +pub const ST_APPEND: c_ulong = 256; +pub const ST_IMMUTABLE: c_ulong = 512; +pub const ST_NOATIME: c_ulong = 1024; +pub const ST_NODIRATIME: c_ulong = 2048; + +pub const RTLD_NEXT: *mut c_void = -1i64 as *mut c_void; +pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); +pub const RTLD_NODELETE: c_int = 0x1000; +pub const RTLD_NOW: c_int = 0x2; + +pub const AT_EACCESS: c_int = 0x200; + +// linux/mempolicy.h +pub const MPOL_DEFAULT: c_int = 0; +pub const MPOL_PREFERRED: c_int = 1; +pub const MPOL_BIND: c_int = 2; +pub const MPOL_INTERLEAVE: c_int = 3; +pub const MPOL_LOCAL: c_int = 4; +pub const MPOL_F_NUMA_BALANCING: c_int = 1 << 13; +pub const MPOL_F_RELATIVE_NODES: c_int = 1 << 14; +pub const MPOL_F_STATIC_NODES: c_int = 1 << 15; + +// linux/membarrier.h +pub const MEMBARRIER_CMD_QUERY: c_int = 0; +pub const MEMBARRIER_CMD_GLOBAL: c_int = 1 << 0; +pub const MEMBARRIER_CMD_GLOBAL_EXPEDITED: c_int = 1 << 1; +pub const MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED: c_int = 1 << 2; +pub const MEMBARRIER_CMD_PRIVATE_EXPEDITED: c_int = 1 << 3; +pub const MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED: c_int = 1 << 4; +pub const MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE: c_int = 1 << 5; +pub const MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE: c_int = 1 << 6; +pub const MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ: c_int = 1 << 7; +pub const MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ: c_int = 1 << 8; + +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + size: [0; __SIZEOF_PTHREAD_MUTEX_T], +}; +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + size: [0; __SIZEOF_PTHREAD_COND_T], +}; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + size: [0; __SIZEOF_PTHREAD_RWLOCK_T], +}; + +pub const PTHREAD_BARRIER_SERIAL_THREAD: c_int = -1; +pub const PTHREAD_ONCE_INIT: pthread_once_t = 0; +pub const PTHREAD_MUTEX_NORMAL: c_int = 0; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; +pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; +pub const PTHREAD_MUTEX_STALLED: c_int = 0; +pub const PTHREAD_MUTEX_ROBUST: c_int = 1; +pub const PTHREAD_PRIO_NONE: c_int = 0; +pub const PTHREAD_PRIO_INHERIT: c_int = 1; +pub const PTHREAD_PRIO_PROTECT: c_int = 2; +pub const PTHREAD_PROCESS_PRIVATE: c_int = 0; +pub const PTHREAD_PROCESS_SHARED: c_int = 1; +pub const PTHREAD_INHERIT_SCHED: c_int = 0; +pub const PTHREAD_EXPLICIT_SCHED: c_int = 1; +pub const __SIZEOF_PTHREAD_COND_T: usize = 48; + +pub const RENAME_NOREPLACE: c_uint = 1; +pub const RENAME_EXCHANGE: c_uint = 2; +pub const RENAME_WHITEOUT: c_uint = 4; + +// netinet/in.h +// NOTE: These are in addition to the constants defined in src/unix/mod.rs + +#[deprecated( + since = "0.2.80", + note = "This value was increased in the newer kernel \ + and we'll change this following upstream in the future release. \ + See #1896 for more info." +)] +pub const IPPROTO_MAX: c_int = 256; + +// System V IPC +pub const IPC_PRIVATE: crate::key_t = 0; + +pub const IPC_CREAT: c_int = 0o1000; +pub const IPC_EXCL: c_int = 0o2000; +pub const IPC_NOWAIT: c_int = 0o4000; + +pub const IPC_RMID: c_int = 0; +pub const IPC_SET: c_int = 1; +pub const IPC_STAT: c_int = 2; +pub const IPC_INFO: c_int = 3; +pub const MSG_STAT: c_int = 11; +pub const MSG_INFO: c_int = 12; +pub const MSG_NOTIFICATION: c_int = 0x8000; + +pub const MSG_NOERROR: c_int = 0o10000; +pub const MSG_EXCEPT: c_int = 0o20000; +pub const MSG_ZEROCOPY: c_int = 0x4000000; + +pub const SEM_UNDO: c_int = 0x1000; + +pub const GETPID: c_int = 11; +pub const GETVAL: c_int = 12; +pub const GETALL: c_int = 13; +pub const GETNCNT: c_int = 14; +pub const GETZCNT: c_int = 15; +pub const SETVAL: c_int = 16; +pub const SETALL: c_int = 17; +pub const SEM_STAT: c_int = 18; +pub const SEM_INFO: c_int = 19; +pub const SEM_STAT_ANY: c_int = 20; + +pub const SHM_R: c_int = 0o400; +pub const SHM_W: c_int = 0o200; + +pub const SHM_RDONLY: c_int = 0o10000; +pub const SHM_RND: c_int = 0o20000; +pub const SHM_REMAP: c_int = 0o40000; + +pub const SHM_LOCK: c_int = 11; +pub const SHM_UNLOCK: c_int = 12; + +pub const SHM_HUGETLB: c_int = 0o4000; +#[cfg(not(all(target_env = "uclibc", target_arch = "mips")))] +pub const SHM_NORESERVE: c_int = 0o10000; + +pub const QFMT_VFS_OLD: c_int = 1; +pub const QFMT_VFS_V0: c_int = 2; +pub const QFMT_VFS_V1: c_int = 4; + +pub const EFD_SEMAPHORE: c_int = 0x1; + +pub const LOG_NFACILITIES: c_int = 24; + +pub const SEM_FAILED: *mut crate::sem_t = ptr::null_mut(); + +pub const RB_AUTOBOOT: c_int = 0x01234567u32 as i32; +pub const RB_HALT_SYSTEM: c_int = 0xcdef0123u32 as i32; +pub const RB_ENABLE_CAD: c_int = 0x89abcdefu32 as i32; +pub const RB_DISABLE_CAD: c_int = 0x00000000u32 as i32; +pub const RB_POWER_OFF: c_int = 0x4321fedcu32 as i32; +pub const RB_SW_SUSPEND: c_int = 0xd000fce2u32 as i32; +pub const RB_KEXEC: c_int = 0x45584543u32 as i32; + +pub const AI_PASSIVE: c_int = 0x0001; +pub const AI_CANONNAME: c_int = 0x0002; +pub const AI_NUMERICHOST: c_int = 0x0004; +pub const AI_V4MAPPED: c_int = 0x0008; +pub const AI_ALL: c_int = 0x0010; +pub const AI_ADDRCONFIG: c_int = 0x0020; + +pub const AI_NUMERICSERV: c_int = 0x0400; + +pub const EAI_BADFLAGS: c_int = -1; +pub const EAI_NONAME: c_int = -2; +pub const EAI_AGAIN: c_int = -3; +pub const EAI_FAIL: c_int = -4; +pub const EAI_NODATA: c_int = -5; +pub const EAI_FAMILY: c_int = -6; +pub const EAI_SOCKTYPE: c_int = -7; +pub const EAI_SERVICE: c_int = -8; +pub const EAI_MEMORY: c_int = -10; +pub const EAI_SYSTEM: c_int = -11; +pub const EAI_OVERFLOW: c_int = -12; + +pub const NI_NUMERICHOST: c_int = 1; +pub const NI_NUMERICSERV: c_int = 2; +pub const NI_NOFQDN: c_int = 4; +pub const NI_NAMEREQD: c_int = 8; +pub const NI_DGRAM: c_int = 16; +pub const NI_IDN: c_int = 32; + +pub const SYNC_FILE_RANGE_WAIT_BEFORE: c_uint = 1; +pub const SYNC_FILE_RANGE_WRITE: c_uint = 2; +pub const SYNC_FILE_RANGE_WAIT_AFTER: c_uint = 4; + +cfg_if! { + if #[cfg(not(target_env = "uclibc"))] { + pub const AIO_CANCELED: c_int = 0; + pub const AIO_NOTCANCELED: c_int = 1; + pub const AIO_ALLDONE: c_int = 2; + pub const LIO_READ: c_int = 0; + pub const LIO_WRITE: c_int = 1; + pub const LIO_NOP: c_int = 2; + pub const LIO_WAIT: c_int = 0; + pub const LIO_NOWAIT: c_int = 1; + pub const RUSAGE_THREAD: c_int = 1; + pub const MSG_COPY: c_int = 0o40000; + pub const SHM_EXEC: c_int = 0o100000; + pub const IPV6_MULTICAST_ALL: c_int = 29; + pub const IPV6_ROUTER_ALERT_ISOLATE: c_int = 30; + pub const PACKET_MR_UNICAST: c_int = 3; + pub const PTRACE_EVENT_STOP: c_int = 128; + pub const UDP_SEGMENT: c_int = 103; + pub const UDP_GRO: c_int = 104; + } +} + +pub const MREMAP_MAYMOVE: c_int = 1; +pub const MREMAP_FIXED: c_int = 2; +pub const MREMAP_DONTUNMAP: c_int = 4; + +// linux/nsfs.h +const NSIO: c_uint = 0xb7; + +pub const NS_GET_USERNS: Ioctl = _IO(NSIO, 0x1); +pub const NS_GET_PARENT: Ioctl = _IO(NSIO, 0x2); +pub const NS_GET_NSTYPE: Ioctl = _IO(NSIO, 0x3); +pub const NS_GET_OWNER_UID: Ioctl = _IO(NSIO, 0x4); + +pub const NS_GET_MNTNS_ID: Ioctl = _IOR::<__u64>(NSIO, 0x5); + +pub const NS_GET_PID_FROM_PIDNS: Ioctl = _IOR::(NSIO, 0x6); +pub const NS_GET_TGID_FROM_PIDNS: Ioctl = _IOR::(NSIO, 0x7); +pub const NS_GET_PID_IN_PIDNS: Ioctl = _IOR::(NSIO, 0x8); +pub const NS_GET_TGID_IN_PIDNS: Ioctl = _IOR::(NSIO, 0x9); + +pub const MNT_NS_INFO_SIZE_VER0: Ioctl = 16; + +pub const NS_MNT_GET_INFO: Ioctl = _IOR::(NSIO, 10); +pub const NS_MNT_GET_NEXT: Ioctl = _IOR::(NSIO, 11); +pub const NS_MNT_GET_PREV: Ioctl = _IOR::(NSIO, 12); + +// linux/pidfd.h +pub const PIDFD_NONBLOCK: c_uint = O_NONBLOCK as c_uint; +pub const PIDFD_THREAD: c_uint = O_EXCL as c_uint; + +pub const PIDFD_SIGNAL_THREAD: c_uint = 1 << 0; +pub const PIDFD_SIGNAL_THREAD_GROUP: c_uint = 1 << 1; +pub const PIDFD_SIGNAL_PROCESS_GROUP: c_uint = 1 << 2; + +pub const PIDFD_INFO_PID: c_uint = 1 << 0; +pub const PIDFD_INFO_CREDS: c_uint = 1 << 1; +pub const PIDFD_INFO_CGROUPID: c_uint = 1 << 2; +pub const PIDFD_INFO_EXIT: c_uint = 1 << 3; + +pub const PIDFD_INFO_SIZE_VER0: c_uint = 64; + +const PIDFS_IOCTL_MAGIC: c_uint = 0xFF; +pub const PIDFD_GET_CGROUP_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 1); +pub const PIDFD_GET_IPC_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 2); +pub const PIDFD_GET_MNT_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 3); +pub const PIDFD_GET_NET_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 4); +pub const PIDFD_GET_PID_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 5); +pub const PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 6); +pub const PIDFD_GET_TIME_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 7); +pub const PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 8); +pub const PIDFD_GET_USER_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 9); +pub const PIDFD_GET_UTS_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 10); +pub const PIDFD_GET_INFO: Ioctl = _IOWR::(PIDFS_IOCTL_MAGIC, 11); + +// linux/prctl.h +pub const PR_SET_PDEATHSIG: c_int = 1; +pub const PR_GET_PDEATHSIG: c_int = 2; + +pub const PR_GET_DUMPABLE: c_int = 3; +pub const PR_SET_DUMPABLE: c_int = 4; + +pub const PR_GET_UNALIGN: c_int = 5; +pub const PR_SET_UNALIGN: c_int = 6; +pub const PR_UNALIGN_NOPRINT: c_int = 1; +pub const PR_UNALIGN_SIGBUS: c_int = 2; + +pub const PR_GET_KEEPCAPS: c_int = 7; +pub const PR_SET_KEEPCAPS: c_int = 8; + +pub const PR_GET_FPEMU: c_int = 9; +pub const PR_SET_FPEMU: c_int = 10; +pub const PR_FPEMU_NOPRINT: c_int = 1; +pub const PR_FPEMU_SIGFPE: c_int = 2; + +pub const PR_GET_FPEXC: c_int = 11; +pub const PR_SET_FPEXC: c_int = 12; +pub const PR_FP_EXC_SW_ENABLE: c_int = 0x80; +pub const PR_FP_EXC_DIV: c_int = 0x010000; +pub const PR_FP_EXC_OVF: c_int = 0x020000; +pub const PR_FP_EXC_UND: c_int = 0x040000; +pub const PR_FP_EXC_RES: c_int = 0x080000; +pub const PR_FP_EXC_INV: c_int = 0x100000; +pub const PR_FP_EXC_DISABLED: c_int = 0; +pub const PR_FP_EXC_NONRECOV: c_int = 1; +pub const PR_FP_EXC_ASYNC: c_int = 2; +pub const PR_FP_EXC_PRECISE: c_int = 3; + +pub const PR_GET_TIMING: c_int = 13; +pub const PR_SET_TIMING: c_int = 14; +pub const PR_TIMING_STATISTICAL: c_int = 0; +pub const PR_TIMING_TIMESTAMP: c_int = 1; + +pub const PR_SET_NAME: c_int = 15; +pub const PR_GET_NAME: c_int = 16; + +pub const PR_GET_ENDIAN: c_int = 19; +pub const PR_SET_ENDIAN: c_int = 20; +pub const PR_ENDIAN_BIG: c_int = 0; +pub const PR_ENDIAN_LITTLE: c_int = 1; +pub const PR_ENDIAN_PPC_LITTLE: c_int = 2; + +pub const PR_GET_SECCOMP: c_int = 21; +pub const PR_SET_SECCOMP: c_int = 22; + +pub const PR_CAPBSET_READ: c_int = 23; +pub const PR_CAPBSET_DROP: c_int = 24; + +pub const PR_GET_TSC: c_int = 25; +pub const PR_SET_TSC: c_int = 26; +pub const PR_TSC_ENABLE: c_int = 1; +pub const PR_TSC_SIGSEGV: c_int = 2; + +pub const PR_GET_SECUREBITS: c_int = 27; +pub const PR_SET_SECUREBITS: c_int = 28; + +pub const PR_SET_TIMERSLACK: c_int = 29; +pub const PR_GET_TIMERSLACK: c_int = 30; + +pub const PR_TASK_PERF_EVENTS_DISABLE: c_int = 31; +pub const PR_TASK_PERF_EVENTS_ENABLE: c_int = 32; + +pub const PR_MCE_KILL: c_int = 33; +pub const PR_MCE_KILL_CLEAR: c_int = 0; +pub const PR_MCE_KILL_SET: c_int = 1; + +pub const PR_MCE_KILL_LATE: c_int = 0; +pub const PR_MCE_KILL_EARLY: c_int = 1; +pub const PR_MCE_KILL_DEFAULT: c_int = 2; + +pub const PR_MCE_KILL_GET: c_int = 34; + +pub const PR_SET_MM: c_int = 35; +pub const PR_SET_MM_START_CODE: c_int = 1; +pub const PR_SET_MM_END_CODE: c_int = 2; +pub const PR_SET_MM_START_DATA: c_int = 3; +pub const PR_SET_MM_END_DATA: c_int = 4; +pub const PR_SET_MM_START_STACK: c_int = 5; +pub const PR_SET_MM_START_BRK: c_int = 6; +pub const PR_SET_MM_BRK: c_int = 7; +pub const PR_SET_MM_ARG_START: c_int = 8; +pub const PR_SET_MM_ARG_END: c_int = 9; +pub const PR_SET_MM_ENV_START: c_int = 10; +pub const PR_SET_MM_ENV_END: c_int = 11; +pub const PR_SET_MM_AUXV: c_int = 12; +pub const PR_SET_MM_EXE_FILE: c_int = 13; +pub const PR_SET_MM_MAP: c_int = 14; +pub const PR_SET_MM_MAP_SIZE: c_int = 15; + +pub const PR_SET_PTRACER: c_int = 0x59616d61; +pub const PR_SET_PTRACER_ANY: c_ulong = 0xffffffffffffffff; + +pub const PR_SET_CHILD_SUBREAPER: c_int = 36; +pub const PR_GET_CHILD_SUBREAPER: c_int = 37; + +pub const PR_SET_NO_NEW_PRIVS: c_int = 38; +pub const PR_GET_NO_NEW_PRIVS: c_int = 39; + +pub const PR_SET_MDWE: c_int = 65; +pub const PR_GET_MDWE: c_int = 66; +pub const PR_MDWE_REFUSE_EXEC_GAIN: c_uint = 1 << 0; +pub const PR_MDWE_NO_INHERIT: c_uint = 1 << 1; + +pub const PR_GET_TID_ADDRESS: c_int = 40; + +pub const PR_SET_THP_DISABLE: c_int = 41; +pub const PR_GET_THP_DISABLE: c_int = 42; + +pub const PR_MPX_ENABLE_MANAGEMENT: c_int = 43; +pub const PR_MPX_DISABLE_MANAGEMENT: c_int = 44; + +pub const PR_SET_FP_MODE: c_int = 45; +pub const PR_GET_FP_MODE: c_int = 46; +pub const PR_FP_MODE_FR: c_int = 1 << 0; +pub const PR_FP_MODE_FRE: c_int = 1 << 1; + +pub const PR_CAP_AMBIENT: c_int = 47; +pub const PR_CAP_AMBIENT_IS_SET: c_int = 1; +pub const PR_CAP_AMBIENT_RAISE: c_int = 2; +pub const PR_CAP_AMBIENT_LOWER: c_int = 3; +pub const PR_CAP_AMBIENT_CLEAR_ALL: c_int = 4; + +pub const PR_SET_VMA: c_int = 0x53564d41; +pub const PR_SET_VMA_ANON_NAME: c_int = 0; + +pub const PR_SCHED_CORE: c_int = 62; +pub const PR_SCHED_CORE_GET: c_int = 0; +pub const PR_SCHED_CORE_CREATE: c_int = 1; +pub const PR_SCHED_CORE_SHARE_TO: c_int = 2; +pub const PR_SCHED_CORE_SHARE_FROM: c_int = 3; +pub const PR_SCHED_CORE_MAX: c_int = 4; +pub const PR_SCHED_CORE_SCOPE_THREAD: c_int = 0; +pub const PR_SCHED_CORE_SCOPE_THREAD_GROUP: c_int = 1; +pub const PR_SCHED_CORE_SCOPE_PROCESS_GROUP: c_int = 2; + +pub const GRND_NONBLOCK: c_uint = 0x0001; +pub const GRND_RANDOM: c_uint = 0x0002; +pub const GRND_INSECURE: c_uint = 0x0004; + +// +pub const SECCOMP_MODE_DISABLED: c_uint = 0; +pub const SECCOMP_MODE_STRICT: c_uint = 1; +pub const SECCOMP_MODE_FILTER: c_uint = 2; + +pub const SECCOMP_SET_MODE_STRICT: c_uint = 0; +pub const SECCOMP_SET_MODE_FILTER: c_uint = 1; +pub const SECCOMP_GET_ACTION_AVAIL: c_uint = 2; +pub const SECCOMP_GET_NOTIF_SIZES: c_uint = 3; + +pub const SECCOMP_FILTER_FLAG_TSYNC: c_ulong = 1 << 0; +pub const SECCOMP_FILTER_FLAG_LOG: c_ulong = 1 << 1; +pub const SECCOMP_FILTER_FLAG_SPEC_ALLOW: c_ulong = 1 << 2; +pub const SECCOMP_FILTER_FLAG_NEW_LISTENER: c_ulong = 1 << 3; +pub const SECCOMP_FILTER_FLAG_TSYNC_ESRCH: c_ulong = 1 << 4; +pub const SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV: c_ulong = 1 << 5; + +pub const SECCOMP_RET_KILL_PROCESS: c_uint = 0x80000000; +pub const SECCOMP_RET_KILL_THREAD: c_uint = 0x00000000; +pub const SECCOMP_RET_KILL: c_uint = SECCOMP_RET_KILL_THREAD; +pub const SECCOMP_RET_TRAP: c_uint = 0x00030000; +pub const SECCOMP_RET_ERRNO: c_uint = 0x00050000; +pub const SECCOMP_RET_USER_NOTIF: c_uint = 0x7fc00000; +pub const SECCOMP_RET_TRACE: c_uint = 0x7ff00000; +pub const SECCOMP_RET_LOG: c_uint = 0x7ffc0000; +pub const SECCOMP_RET_ALLOW: c_uint = 0x7fff0000; + +pub const SECCOMP_RET_ACTION_FULL: c_uint = 0xffff0000; +pub const SECCOMP_RET_ACTION: c_uint = 0x7fff0000; +pub const SECCOMP_RET_DATA: c_uint = 0x0000ffff; + +pub const SECCOMP_USER_NOTIF_FLAG_CONTINUE: c_ulong = 1; + +pub const SECCOMP_ADDFD_FLAG_SETFD: c_ulong = 1; +pub const SECCOMP_ADDFD_FLAG_SEND: c_ulong = 2; + +pub const ITIMER_REAL: c_int = 0; +pub const ITIMER_VIRTUAL: c_int = 1; +pub const ITIMER_PROF: c_int = 2; + +pub const TFD_CLOEXEC: c_int = O_CLOEXEC; +pub const TFD_NONBLOCK: c_int = O_NONBLOCK; +pub const TFD_TIMER_ABSTIME: c_int = 1; +pub const TFD_TIMER_CANCEL_ON_SET: c_int = 2; + +pub const _POSIX_VDISABLE: crate::cc_t = 0; + +pub const FALLOC_FL_KEEP_SIZE: c_int = 0x01; +pub const FALLOC_FL_PUNCH_HOLE: c_int = 0x02; +pub const FALLOC_FL_COLLAPSE_RANGE: c_int = 0x08; +pub const FALLOC_FL_ZERO_RANGE: c_int = 0x10; +pub const FALLOC_FL_INSERT_RANGE: c_int = 0x20; +pub const FALLOC_FL_UNSHARE_RANGE: c_int = 0x40; + +#[deprecated( + since = "0.2.55", + note = "ENOATTR is not available on Linux; use ENODATA instead" +)] +pub const ENOATTR: c_int = crate::ENODATA; + +pub const SO_ORIGINAL_DST: c_int = 80; + +pub const IP_RECVFRAGSIZE: c_int = 25; + +pub const IPV6_FLOWINFO: c_int = 11; +pub const IPV6_FLOWLABEL_MGR: c_int = 32; +pub const IPV6_FLOWINFO_SEND: c_int = 33; +pub const IPV6_RECVFRAGSIZE: c_int = 77; +pub const IPV6_FREEBIND: c_int = 78; +pub const IPV6_FLOWINFO_FLOWLABEL: c_int = 0x000fffff; +pub const IPV6_FLOWINFO_PRIORITY: c_int = 0x0ff00000; + +pub const IPV6_RTHDR_LOOSE: c_int = 0; +pub const IPV6_RTHDR_STRICT: c_int = 1; + +// SO_MEMINFO offsets +pub const SK_MEMINFO_RMEM_ALLOC: c_int = 0; +pub const SK_MEMINFO_RCVBUF: c_int = 1; +pub const SK_MEMINFO_WMEM_ALLOC: c_int = 2; +pub const SK_MEMINFO_SNDBUF: c_int = 3; +pub const SK_MEMINFO_FWD_ALLOC: c_int = 4; +pub const SK_MEMINFO_WMEM_QUEUED: c_int = 5; +pub const SK_MEMINFO_OPTMEM: c_int = 6; +pub const SK_MEMINFO_BACKLOG: c_int = 7; +pub const SK_MEMINFO_DROPS: c_int = 8; + +pub const IUTF8: crate::tcflag_t = 0x00004000; +#[cfg(not(all(target_env = "uclibc", target_arch = "mips")))] +pub const CMSPAR: crate::tcflag_t = 0o10000000000; + +pub const MFD_CLOEXEC: c_uint = 0x0001; +pub const MFD_ALLOW_SEALING: c_uint = 0x0002; +pub const MFD_HUGETLB: c_uint = 0x0004; +pub const MFD_NOEXEC_SEAL: c_uint = 0x0008; +pub const MFD_EXEC: c_uint = 0x0010; +pub const MFD_HUGE_64KB: c_uint = 0x40000000; +pub const MFD_HUGE_512KB: c_uint = 0x4c000000; +pub const MFD_HUGE_1MB: c_uint = 0x50000000; +pub const MFD_HUGE_2MB: c_uint = 0x54000000; +pub const MFD_HUGE_8MB: c_uint = 0x5c000000; +pub const MFD_HUGE_16MB: c_uint = 0x60000000; +pub const MFD_HUGE_32MB: c_uint = 0x64000000; +pub const MFD_HUGE_256MB: c_uint = 0x70000000; +pub const MFD_HUGE_512MB: c_uint = 0x74000000; +pub const MFD_HUGE_1GB: c_uint = 0x78000000; +pub const MFD_HUGE_2GB: c_uint = 0x7c000000; +pub const MFD_HUGE_16GB: c_uint = 0x88000000; +pub const MFD_HUGE_MASK: c_uint = 63; +pub const MFD_HUGE_SHIFT: c_uint = 26; + +// linux/close_range.h +pub const CLOSE_RANGE_UNSHARE: c_uint = 1 << 1; +pub const CLOSE_RANGE_CLOEXEC: c_uint = 1 << 2; + +// linux/filter.h +pub const SKF_AD_OFF: c_int = -0x1000; +pub const SKF_AD_PROTOCOL: c_int = 0; +pub const SKF_AD_PKTTYPE: c_int = 4; +pub const SKF_AD_IFINDEX: c_int = 8; +pub const SKF_AD_NLATTR: c_int = 12; +pub const SKF_AD_NLATTR_NEST: c_int = 16; +pub const SKF_AD_MARK: c_int = 20; +pub const SKF_AD_QUEUE: c_int = 24; +pub const SKF_AD_HATYPE: c_int = 28; +pub const SKF_AD_RXHASH: c_int = 32; +pub const SKF_AD_CPU: c_int = 36; +pub const SKF_AD_ALU_XOR_X: c_int = 40; +pub const SKF_AD_VLAN_TAG: c_int = 44; +pub const SKF_AD_VLAN_TAG_PRESENT: c_int = 48; +pub const SKF_AD_PAY_OFFSET: c_int = 52; +pub const SKF_AD_RANDOM: c_int = 56; +pub const SKF_AD_VLAN_TPID: c_int = 60; +pub const SKF_AD_MAX: c_int = 64; +pub const SKF_NET_OFF: c_int = -0x100000; +pub const SKF_LL_OFF: c_int = -0x200000; +pub const BPF_NET_OFF: c_int = SKF_NET_OFF; +pub const BPF_LL_OFF: c_int = SKF_LL_OFF; +pub const BPF_MEMWORDS: c_int = 16; +pub const BPF_MAXINSNS: c_int = 4096; + +// linux/bpf_common.h +pub const BPF_LD: __u32 = 0x00; +pub const BPF_LDX: __u32 = 0x01; +pub const BPF_ST: __u32 = 0x02; +pub const BPF_STX: __u32 = 0x03; +pub const BPF_ALU: __u32 = 0x04; +pub const BPF_JMP: __u32 = 0x05; +pub const BPF_RET: __u32 = 0x06; +pub const BPF_MISC: __u32 = 0x07; +pub const BPF_W: __u32 = 0x00; +pub const BPF_H: __u32 = 0x08; +pub const BPF_B: __u32 = 0x10; +pub const BPF_IMM: __u32 = 0x00; +pub const BPF_ABS: __u32 = 0x20; +pub const BPF_IND: __u32 = 0x40; +pub const BPF_MEM: __u32 = 0x60; +pub const BPF_LEN: __u32 = 0x80; +pub const BPF_MSH: __u32 = 0xa0; +pub const BPF_ADD: __u32 = 0x00; +pub const BPF_SUB: __u32 = 0x10; +pub const BPF_MUL: __u32 = 0x20; +pub const BPF_DIV: __u32 = 0x30; +pub const BPF_OR: __u32 = 0x40; +pub const BPF_AND: __u32 = 0x50; +pub const BPF_LSH: __u32 = 0x60; +pub const BPF_RSH: __u32 = 0x70; +pub const BPF_NEG: __u32 = 0x80; +pub const BPF_MOD: __u32 = 0x90; +pub const BPF_XOR: __u32 = 0xa0; +pub const BPF_JA: __u32 = 0x00; +pub const BPF_JEQ: __u32 = 0x10; +pub const BPF_JGT: __u32 = 0x20; +pub const BPF_JGE: __u32 = 0x30; +pub const BPF_JSET: __u32 = 0x40; +pub const BPF_K: __u32 = 0x00; +pub const BPF_X: __u32 = 0x08; + +// linux/filter.h + +pub const BPF_A: __u32 = 0x10; +pub const BPF_TAX: __u32 = 0x00; +pub const BPF_TXA: __u32 = 0x80; + +// linux/openat2.h +pub const RESOLVE_NO_XDEV: crate::__u64 = 0x01; +pub const RESOLVE_NO_MAGICLINKS: crate::__u64 = 0x02; +pub const RESOLVE_NO_SYMLINKS: crate::__u64 = 0x04; +pub const RESOLVE_BENEATH: crate::__u64 = 0x08; +pub const RESOLVE_IN_ROOT: crate::__u64 = 0x10; +pub const RESOLVE_CACHED: crate::__u64 = 0x20; + +// linux/if_ether.h +pub const ETH_ALEN: c_int = 6; +pub const ETH_HLEN: c_int = 14; +pub const ETH_ZLEN: c_int = 60; +pub const ETH_DATA_LEN: c_int = 1500; +pub const ETH_FRAME_LEN: c_int = 1514; +pub const ETH_FCS_LEN: c_int = 4; + +// These are the defined Ethernet Protocol ID's. +pub const ETH_P_LOOP: c_int = 0x0060; +pub const ETH_P_PUP: c_int = 0x0200; +pub const ETH_P_PUPAT: c_int = 0x0201; +pub const ETH_P_IP: c_int = 0x0800; +pub const ETH_P_X25: c_int = 0x0805; +pub const ETH_P_ARP: c_int = 0x0806; +pub const ETH_P_BPQ: c_int = 0x08FF; +pub const ETH_P_IEEEPUP: c_int = 0x0a00; +pub const ETH_P_IEEEPUPAT: c_int = 0x0a01; +pub const ETH_P_BATMAN: c_int = 0x4305; +pub const ETH_P_DEC: c_int = 0x6000; +pub const ETH_P_DNA_DL: c_int = 0x6001; +pub const ETH_P_DNA_RC: c_int = 0x6002; +pub const ETH_P_DNA_RT: c_int = 0x6003; +pub const ETH_P_LAT: c_int = 0x6004; +pub const ETH_P_DIAG: c_int = 0x6005; +pub const ETH_P_CUST: c_int = 0x6006; +pub const ETH_P_SCA: c_int = 0x6007; +pub const ETH_P_TEB: c_int = 0x6558; +pub const ETH_P_RARP: c_int = 0x8035; +pub const ETH_P_ATALK: c_int = 0x809B; +pub const ETH_P_AARP: c_int = 0x80F3; +pub const ETH_P_8021Q: c_int = 0x8100; +pub const ETH_P_IPX: c_int = 0x8137; +pub const ETH_P_IPV6: c_int = 0x86DD; +pub const ETH_P_PAUSE: c_int = 0x8808; +pub const ETH_P_SLOW: c_int = 0x8809; +pub const ETH_P_WCCP: c_int = 0x883E; +pub const ETH_P_MPLS_UC: c_int = 0x8847; +pub const ETH_P_MPLS_MC: c_int = 0x8848; +pub const ETH_P_ATMMPOA: c_int = 0x884c; +pub const ETH_P_PPP_DISC: c_int = 0x8863; +pub const ETH_P_PPP_SES: c_int = 0x8864; +pub const ETH_P_LINK_CTL: c_int = 0x886c; +pub const ETH_P_ATMFATE: c_int = 0x8884; +pub const ETH_P_PAE: c_int = 0x888E; +pub const ETH_P_AOE: c_int = 0x88A2; +pub const ETH_P_8021AD: c_int = 0x88A8; +pub const ETH_P_802_EX1: c_int = 0x88B5; +pub const ETH_P_TIPC: c_int = 0x88CA; +pub const ETH_P_MACSEC: c_int = 0x88E5; +pub const ETH_P_8021AH: c_int = 0x88E7; +pub const ETH_P_MVRP: c_int = 0x88F5; +pub const ETH_P_1588: c_int = 0x88F7; +pub const ETH_P_PRP: c_int = 0x88FB; +pub const ETH_P_FCOE: c_int = 0x8906; +pub const ETH_P_TDLS: c_int = 0x890D; +pub const ETH_P_FIP: c_int = 0x8914; +pub const ETH_P_80221: c_int = 0x8917; +pub const ETH_P_LOOPBACK: c_int = 0x9000; +pub const ETH_P_QINQ1: c_int = 0x9100; +pub const ETH_P_QINQ2: c_int = 0x9200; +pub const ETH_P_QINQ3: c_int = 0x9300; +pub const ETH_P_EDSA: c_int = 0xDADA; +pub const ETH_P_AF_IUCV: c_int = 0xFBFB; + +pub const ETH_P_802_3_MIN: c_int = 0x0600; + +// Non DIX types. Won't clash for 1500 types. +pub const ETH_P_802_3: c_int = 0x0001; +pub const ETH_P_AX25: c_int = 0x0002; +pub const ETH_P_ALL: c_int = 0x0003; +pub const ETH_P_802_2: c_int = 0x0004; +pub const ETH_P_SNAP: c_int = 0x0005; +pub const ETH_P_DDCMP: c_int = 0x0006; +pub const ETH_P_WAN_PPP: c_int = 0x0007; +pub const ETH_P_PPP_MP: c_int = 0x0008; +pub const ETH_P_LOCALTALK: c_int = 0x0009; +pub const ETH_P_CANFD: c_int = 0x000D; +pub const ETH_P_PPPTALK: c_int = 0x0010; +pub const ETH_P_TR_802_2: c_int = 0x0011; +pub const ETH_P_MOBITEX: c_int = 0x0015; +pub const ETH_P_CONTROL: c_int = 0x0016; +pub const ETH_P_IRDA: c_int = 0x0017; +pub const ETH_P_ECONET: c_int = 0x0018; +pub const ETH_P_HDLC: c_int = 0x0019; +pub const ETH_P_ARCNET: c_int = 0x001A; +pub const ETH_P_DSA: c_int = 0x001B; +pub const ETH_P_TRAILER: c_int = 0x001C; +pub const ETH_P_PHONET: c_int = 0x00F5; +pub const ETH_P_IEEE802154: c_int = 0x00F6; +pub const ETH_P_CAIF: c_int = 0x00F7; + +// DIFF(main): changed to `c_short` in f62eb023ab +pub const POSIX_SPAWN_RESETIDS: c_int = 0x01; +pub const POSIX_SPAWN_SETPGROUP: c_int = 0x02; +pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x04; +pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x08; +pub const POSIX_SPAWN_SETSCHEDPARAM: c_int = 0x10; +pub const POSIX_SPAWN_SETSCHEDULER: c_int = 0x20; + +pub const NLMSG_NOOP: c_int = 0x1; +pub const NLMSG_ERROR: c_int = 0x2; +pub const NLMSG_DONE: c_int = 0x3; +pub const NLMSG_OVERRUN: c_int = 0x4; +pub const NLMSG_MIN_TYPE: c_int = 0x10; + +// linux/netfilter/nfnetlink.h +pub const NFNLGRP_NONE: c_int = 0; +pub const NFNLGRP_CONNTRACK_NEW: c_int = 1; +pub const NFNLGRP_CONNTRACK_UPDATE: c_int = 2; +pub const NFNLGRP_CONNTRACK_DESTROY: c_int = 3; +pub const NFNLGRP_CONNTRACK_EXP_NEW: c_int = 4; +pub const NFNLGRP_CONNTRACK_EXP_UPDATE: c_int = 5; +pub const NFNLGRP_CONNTRACK_EXP_DESTROY: c_int = 6; +pub const NFNLGRP_NFTABLES: c_int = 7; +pub const NFNLGRP_ACCT_QUOTA: c_int = 8; +pub const NFNLGRP_NFTRACE: c_int = 9; + +pub const NFNETLINK_V0: c_int = 0; + +pub const NFNL_SUBSYS_NONE: c_int = 0; +pub const NFNL_SUBSYS_CTNETLINK: c_int = 1; +pub const NFNL_SUBSYS_CTNETLINK_EXP: c_int = 2; +pub const NFNL_SUBSYS_QUEUE: c_int = 3; +pub const NFNL_SUBSYS_ULOG: c_int = 4; +pub const NFNL_SUBSYS_OSF: c_int = 5; +pub const NFNL_SUBSYS_IPSET: c_int = 6; +pub const NFNL_SUBSYS_ACCT: c_int = 7; +pub const NFNL_SUBSYS_CTNETLINK_TIMEOUT: c_int = 8; +pub const NFNL_SUBSYS_CTHELPER: c_int = 9; +pub const NFNL_SUBSYS_NFTABLES: c_int = 10; +pub const NFNL_SUBSYS_NFT_COMPAT: c_int = 11; +pub const NFNL_SUBSYS_HOOK: c_int = 12; +pub const NFNL_SUBSYS_COUNT: c_int = 13; + +pub const NFNL_MSG_BATCH_BEGIN: c_int = NLMSG_MIN_TYPE; +pub const NFNL_MSG_BATCH_END: c_int = NLMSG_MIN_TYPE + 1; + +pub const NFNL_BATCH_UNSPEC: c_int = 0; +pub const NFNL_BATCH_GENID: c_int = 1; + +// linux/netfilter/nfnetlink_log.h +pub const NFULNL_MSG_PACKET: c_int = 0; +pub const NFULNL_MSG_CONFIG: c_int = 1; + +pub const NFULA_VLAN_UNSPEC: c_int = 0; +pub const NFULA_VLAN_PROTO: c_int = 1; +pub const NFULA_VLAN_TCI: c_int = 2; + +pub const NFULA_UNSPEC: c_int = 0; +pub const NFULA_PACKET_HDR: c_int = 1; +pub const NFULA_MARK: c_int = 2; +pub const NFULA_TIMESTAMP: c_int = 3; +pub const NFULA_IFINDEX_INDEV: c_int = 4; +pub const NFULA_IFINDEX_OUTDEV: c_int = 5; +pub const NFULA_IFINDEX_PHYSINDEV: c_int = 6; +pub const NFULA_IFINDEX_PHYSOUTDEV: c_int = 7; +pub const NFULA_HWADDR: c_int = 8; +pub const NFULA_PAYLOAD: c_int = 9; +pub const NFULA_PREFIX: c_int = 10; +pub const NFULA_UID: c_int = 11; +pub const NFULA_SEQ: c_int = 12; +pub const NFULA_SEQ_GLOBAL: c_int = 13; +pub const NFULA_GID: c_int = 14; +pub const NFULA_HWTYPE: c_int = 15; +pub const NFULA_HWHEADER: c_int = 16; +pub const NFULA_HWLEN: c_int = 17; +pub const NFULA_CT: c_int = 18; +pub const NFULA_CT_INFO: c_int = 19; +pub const NFULA_VLAN: c_int = 20; +pub const NFULA_L2HDR: c_int = 21; + +pub const NFULNL_CFG_CMD_NONE: c_int = 0; +pub const NFULNL_CFG_CMD_BIND: c_int = 1; +pub const NFULNL_CFG_CMD_UNBIND: c_int = 2; +pub const NFULNL_CFG_CMD_PF_BIND: c_int = 3; +pub const NFULNL_CFG_CMD_PF_UNBIND: c_int = 4; + +pub const NFULA_CFG_UNSPEC: c_int = 0; +pub const NFULA_CFG_CMD: c_int = 1; +pub const NFULA_CFG_MODE: c_int = 2; +pub const NFULA_CFG_NLBUFSIZ: c_int = 3; +pub const NFULA_CFG_TIMEOUT: c_int = 4; +pub const NFULA_CFG_QTHRESH: c_int = 5; +pub const NFULA_CFG_FLAGS: c_int = 6; + +pub const NFULNL_COPY_NONE: c_int = 0x00; +pub const NFULNL_COPY_META: c_int = 0x01; +pub const NFULNL_COPY_PACKET: c_int = 0x02; + +pub const NFULNL_CFG_F_SEQ: c_int = 0x0001; +pub const NFULNL_CFG_F_SEQ_GLOBAL: c_int = 0x0002; +pub const NFULNL_CFG_F_CONNTRACK: c_int = 0x0004; + +// linux/netfilter/nfnetlink_queue.h +pub const NFQNL_MSG_PACKET: c_int = 0; +pub const NFQNL_MSG_VERDICT: c_int = 1; +pub const NFQNL_MSG_CONFIG: c_int = 2; +pub const NFQNL_MSG_VERDICT_BATCH: c_int = 3; + +pub const NFQA_UNSPEC: c_int = 0; +pub const NFQA_PACKET_HDR: c_int = 1; +pub const NFQA_VERDICT_HDR: c_int = 2; +pub const NFQA_MARK: c_int = 3; +pub const NFQA_TIMESTAMP: c_int = 4; +pub const NFQA_IFINDEX_INDEV: c_int = 5; +pub const NFQA_IFINDEX_OUTDEV: c_int = 6; +pub const NFQA_IFINDEX_PHYSINDEV: c_int = 7; +pub const NFQA_IFINDEX_PHYSOUTDEV: c_int = 8; +pub const NFQA_HWADDR: c_int = 9; +pub const NFQA_PAYLOAD: c_int = 10; +pub const NFQA_CT: c_int = 11; +pub const NFQA_CT_INFO: c_int = 12; +pub const NFQA_CAP_LEN: c_int = 13; +pub const NFQA_SKB_INFO: c_int = 14; +pub const NFQA_EXP: c_int = 15; +pub const NFQA_UID: c_int = 16; +pub const NFQA_GID: c_int = 17; +pub const NFQA_SECCTX: c_int = 18; +pub const NFQA_VLAN: c_int = 19; +pub const NFQA_L2HDR: c_int = 20; +pub const NFQA_PRIORITY: c_int = 21; + +pub const NFQA_VLAN_UNSPEC: c_int = 0; +pub const NFQA_VLAN_PROTO: c_int = 1; +pub const NFQA_VLAN_TCI: c_int = 2; + +pub const NFQNL_CFG_CMD_NONE: c_int = 0; +pub const NFQNL_CFG_CMD_BIND: c_int = 1; +pub const NFQNL_CFG_CMD_UNBIND: c_int = 2; +pub const NFQNL_CFG_CMD_PF_BIND: c_int = 3; +pub const NFQNL_CFG_CMD_PF_UNBIND: c_int = 4; + +pub const NFQNL_COPY_NONE: c_int = 0; +pub const NFQNL_COPY_META: c_int = 1; +pub const NFQNL_COPY_PACKET: c_int = 2; + +pub const NFQA_CFG_UNSPEC: c_int = 0; +pub const NFQA_CFG_CMD: c_int = 1; +pub const NFQA_CFG_PARAMS: c_int = 2; +pub const NFQA_CFG_QUEUE_MAXLEN: c_int = 3; +pub const NFQA_CFG_MASK: c_int = 4; +pub const NFQA_CFG_FLAGS: c_int = 5; + +pub const NFQA_CFG_F_FAIL_OPEN: c_int = 0x0001; +pub const NFQA_CFG_F_CONNTRACK: c_int = 0x0002; +pub const NFQA_CFG_F_GSO: c_int = 0x0004; +pub const NFQA_CFG_F_UID_GID: c_int = 0x0008; +pub const NFQA_CFG_F_SECCTX: c_int = 0x0010; +pub const NFQA_CFG_F_MAX: c_int = 0x0020; + +pub const NFQA_SKB_CSUMNOTREADY: c_int = 0x0001; +pub const NFQA_SKB_GSO: c_int = 0x0002; +pub const NFQA_SKB_CSUM_NOTVERIFIED: c_int = 0x0004; + +// linux/genetlink.h + +pub const GENL_NAMSIZ: c_int = 16; + +pub const GENL_MIN_ID: c_int = NLMSG_MIN_TYPE; +pub const GENL_MAX_ID: c_int = 1023; + +pub const GENL_ADMIN_PERM: c_int = 0x01; +pub const GENL_CMD_CAP_DO: c_int = 0x02; +pub const GENL_CMD_CAP_DUMP: c_int = 0x04; +pub const GENL_CMD_CAP_HASPOL: c_int = 0x08; + +pub const GENL_ID_CTRL: c_int = NLMSG_MIN_TYPE; + +pub const CTRL_CMD_UNSPEC: c_int = 0; +pub const CTRL_CMD_NEWFAMILY: c_int = 1; +pub const CTRL_CMD_DELFAMILY: c_int = 2; +pub const CTRL_CMD_GETFAMILY: c_int = 3; +pub const CTRL_CMD_NEWOPS: c_int = 4; +pub const CTRL_CMD_DELOPS: c_int = 5; +pub const CTRL_CMD_GETOPS: c_int = 6; +pub const CTRL_CMD_NEWMCAST_GRP: c_int = 7; +pub const CTRL_CMD_DELMCAST_GRP: c_int = 8; +pub const CTRL_CMD_GETMCAST_GRP: c_int = 9; + +pub const CTRL_ATTR_UNSPEC: c_int = 0; +pub const CTRL_ATTR_FAMILY_ID: c_int = 1; +pub const CTRL_ATTR_FAMILY_NAME: c_int = 2; +pub const CTRL_ATTR_VERSION: c_int = 3; +pub const CTRL_ATTR_HDRSIZE: c_int = 4; +pub const CTRL_ATTR_MAXATTR: c_int = 5; +pub const CTRL_ATTR_OPS: c_int = 6; +pub const CTRL_ATTR_MCAST_GROUPS: c_int = 7; + +pub const CTRL_ATTR_OP_UNSPEC: c_int = 0; +pub const CTRL_ATTR_OP_ID: c_int = 1; +pub const CTRL_ATTR_OP_FLAGS: c_int = 2; + +pub const CTRL_ATTR_MCAST_GRP_UNSPEC: c_int = 0; +pub const CTRL_ATTR_MCAST_GRP_NAME: c_int = 1; +pub const CTRL_ATTR_MCAST_GRP_ID: c_int = 2; + +// linux/if_packet.h +pub const PACKET_HOST: c_uchar = 0; +pub const PACKET_BROADCAST: c_uchar = 1; +pub const PACKET_MULTICAST: c_uchar = 2; +pub const PACKET_OTHERHOST: c_uchar = 3; +pub const PACKET_OUTGOING: c_uchar = 4; +pub const PACKET_LOOPBACK: c_uchar = 5; +pub const PACKET_USER: c_uchar = 6; +pub const PACKET_KERNEL: c_uchar = 7; + +pub const PACKET_ADD_MEMBERSHIP: c_int = 1; +pub const PACKET_DROP_MEMBERSHIP: c_int = 2; +pub const PACKET_RECV_OUTPUT: c_int = 3; +pub const PACKET_RX_RING: c_int = 5; +pub const PACKET_STATISTICS: c_int = 6; +pub const PACKET_COPY_THRESH: c_int = 7; +pub const PACKET_AUXDATA: c_int = 8; +pub const PACKET_ORIGDEV: c_int = 9; +pub const PACKET_VERSION: c_int = 10; +pub const PACKET_HDRLEN: c_int = 11; +pub const PACKET_RESERVE: c_int = 12; +pub const PACKET_TX_RING: c_int = 13; +pub const PACKET_LOSS: c_int = 14; +pub const PACKET_VNET_HDR: c_int = 15; +pub const PACKET_TX_TIMESTAMP: c_int = 16; +pub const PACKET_TIMESTAMP: c_int = 17; +pub const PACKET_FANOUT: c_int = 18; +pub const PACKET_TX_HAS_OFF: c_int = 19; +pub const PACKET_QDISC_BYPASS: c_int = 20; +pub const PACKET_ROLLOVER_STATS: c_int = 21; +pub const PACKET_FANOUT_DATA: c_int = 22; +pub const PACKET_IGNORE_OUTGOING: c_int = 23; +pub const PACKET_VNET_HDR_SZ: c_int = 24; + +pub const PACKET_FANOUT_HASH: c_uint = 0; +pub const PACKET_FANOUT_LB: c_uint = 1; +pub const PACKET_FANOUT_CPU: c_uint = 2; +pub const PACKET_FANOUT_ROLLOVER: c_uint = 3; +pub const PACKET_FANOUT_RND: c_uint = 4; +pub const PACKET_FANOUT_QM: c_uint = 5; +pub const PACKET_FANOUT_CBPF: c_uint = 6; +pub const PACKET_FANOUT_EBPF: c_uint = 7; +pub const PACKET_FANOUT_FLAG_ROLLOVER: c_uint = 0x1000; +pub const PACKET_FANOUT_FLAG_UNIQUEID: c_uint = 0x2000; +pub const PACKET_FANOUT_FLAG_IGNORE_OUTGOING: c_uint = 0x4000; +pub const PACKET_FANOUT_FLAG_DEFRAG: c_uint = 0x8000; + +pub const PACKET_MR_MULTICAST: c_int = 0; +pub const PACKET_MR_PROMISC: c_int = 1; +pub const PACKET_MR_ALLMULTI: c_int = 2; + +pub const TP_STATUS_KERNEL: __u32 = 0; +pub const TP_STATUS_USER: __u32 = 1 << 0; +pub const TP_STATUS_COPY: __u32 = 1 << 1; +pub const TP_STATUS_LOSING: __u32 = 1 << 2; +pub const TP_STATUS_CSUMNOTREADY: __u32 = 1 << 3; +pub const TP_STATUS_VLAN_VALID: __u32 = 1 << 4; +pub const TP_STATUS_BLK_TMO: __u32 = 1 << 5; +pub const TP_STATUS_VLAN_TPID_VALID: __u32 = 1 << 6; +pub const TP_STATUS_CSUM_VALID: __u32 = 1 << 7; + +pub const TP_STATUS_AVAILABLE: __u32 = 0; +pub const TP_STATUS_SEND_REQUEST: __u32 = 1 << 0; +pub const TP_STATUS_SENDING: __u32 = 1 << 1; +pub const TP_STATUS_WRONG_FORMAT: __u32 = 1 << 2; + +pub const TP_STATUS_TS_SOFTWARE: __u32 = 1 << 29; +pub const TP_STATUS_TS_SYS_HARDWARE: __u32 = 1 << 30; +pub const TP_STATUS_TS_RAW_HARDWARE: __u32 = 1 << 31; + +pub const TP_FT_REQ_FILL_RXHASH: __u32 = 1; + +pub const TPACKET_ALIGNMENT: usize = 16; + +pub const TPACKET_HDRLEN: usize = ((size_of::() + TPACKET_ALIGNMENT - 1) + & !(TPACKET_ALIGNMENT - 1)) + + size_of::(); +pub const TPACKET2_HDRLEN: usize = ((size_of::() + TPACKET_ALIGNMENT - 1) + & !(TPACKET_ALIGNMENT - 1)) + + size_of::(); +pub const TPACKET3_HDRLEN: usize = ((size_of::() + TPACKET_ALIGNMENT - 1) + & !(TPACKET_ALIGNMENT - 1)) + + size_of::(); + +// linux/netfilter.h +pub const NF_DROP: c_int = 0; +pub const NF_ACCEPT: c_int = 1; +pub const NF_STOLEN: c_int = 2; +pub const NF_QUEUE: c_int = 3; +pub const NF_REPEAT: c_int = 4; +pub const NF_STOP: c_int = 5; +pub const NF_MAX_VERDICT: c_int = NF_STOP; + +pub const NF_VERDICT_MASK: c_int = 0x000000ff; +pub const NF_VERDICT_FLAG_QUEUE_BYPASS: c_int = 0x00008000; + +pub const NF_VERDICT_QMASK: c_int = 0xffff0000; +pub const NF_VERDICT_QBITS: c_int = 16; + +pub const NF_VERDICT_BITS: c_int = 16; + +pub const NF_INET_PRE_ROUTING: c_int = 0; +pub const NF_INET_LOCAL_IN: c_int = 1; +pub const NF_INET_FORWARD: c_int = 2; +pub const NF_INET_LOCAL_OUT: c_int = 3; +pub const NF_INET_POST_ROUTING: c_int = 4; +pub const NF_INET_NUMHOOKS: c_int = 5; +pub const NF_INET_INGRESS: c_int = NF_INET_NUMHOOKS; + +pub const NF_NETDEV_INGRESS: c_int = 0; +pub const NF_NETDEV_EGRESS: c_int = 1; +pub const NF_NETDEV_NUMHOOKS: c_int = 2; + +// Some NFPROTO are not compatible with musl and are defined in submodules. +pub const NFPROTO_UNSPEC: c_int = 0; +pub const NFPROTO_INET: c_int = 1; +pub const NFPROTO_IPV4: c_int = 2; +pub const NFPROTO_ARP: c_int = 3; +pub const NFPROTO_NETDEV: c_int = 5; +pub const NFPROTO_BRIDGE: c_int = 7; +pub const NFPROTO_IPV6: c_int = 10; +pub const NFPROTO_DECNET: c_int = 12; +pub const NFPROTO_NUMPROTO: c_int = 13; + +// linux/netfilter_arp.h +pub const NF_ARP: c_int = 0; +pub const NF_ARP_IN: c_int = 0; +pub const NF_ARP_OUT: c_int = 1; +pub const NF_ARP_FORWARD: c_int = 2; +pub const NF_ARP_NUMHOOKS: c_int = 3; + +// linux/netfilter_bridge.h +pub const NF_BR_PRE_ROUTING: c_int = 0; +pub const NF_BR_LOCAL_IN: c_int = 1; +pub const NF_BR_FORWARD: c_int = 2; +pub const NF_BR_LOCAL_OUT: c_int = 3; +pub const NF_BR_POST_ROUTING: c_int = 4; +pub const NF_BR_BROUTING: c_int = 5; +pub const NF_BR_NUMHOOKS: c_int = 6; + +pub const NF_BR_PRI_FIRST: c_int = crate::INT_MIN; +pub const NF_BR_PRI_NAT_DST_BRIDGED: c_int = -300; +pub const NF_BR_PRI_FILTER_BRIDGED: c_int = -200; +pub const NF_BR_PRI_BRNF: c_int = 0; +pub const NF_BR_PRI_NAT_DST_OTHER: c_int = 100; +pub const NF_BR_PRI_FILTER_OTHER: c_int = 200; +pub const NF_BR_PRI_NAT_SRC: c_int = 300; +pub const NF_BR_PRI_LAST: c_int = crate::INT_MAX; + +// linux/netfilter_ipv4.h +pub const NF_IP_PRE_ROUTING: c_int = 0; +pub const NF_IP_LOCAL_IN: c_int = 1; +pub const NF_IP_FORWARD: c_int = 2; +pub const NF_IP_LOCAL_OUT: c_int = 3; +pub const NF_IP_POST_ROUTING: c_int = 4; +pub const NF_IP_NUMHOOKS: c_int = 5; + +pub const NF_IP_PRI_FIRST: c_int = crate::INT_MIN; +pub const NF_IP_PRI_RAW_BEFORE_DEFRAG: c_int = -450; +pub const NF_IP_PRI_CONNTRACK_DEFRAG: c_int = -400; +pub const NF_IP_PRI_RAW: c_int = -300; +pub const NF_IP_PRI_SELINUX_FIRST: c_int = -225; +pub const NF_IP_PRI_CONNTRACK: c_int = -200; +pub const NF_IP_PRI_MANGLE: c_int = -150; +pub const NF_IP_PRI_NAT_DST: c_int = -100; +pub const NF_IP_PRI_FILTER: c_int = 0; +pub const NF_IP_PRI_SECURITY: c_int = 50; +pub const NF_IP_PRI_NAT_SRC: c_int = 100; +pub const NF_IP_PRI_SELINUX_LAST: c_int = 225; +pub const NF_IP_PRI_CONNTRACK_HELPER: c_int = 300; +pub const NF_IP_PRI_CONNTRACK_CONFIRM: c_int = crate::INT_MAX; +pub const NF_IP_PRI_LAST: c_int = crate::INT_MAX; + +// linux/netfilter_ipv6.h +pub const NF_IP6_PRE_ROUTING: c_int = 0; +pub const NF_IP6_LOCAL_IN: c_int = 1; +pub const NF_IP6_FORWARD: c_int = 2; +pub const NF_IP6_LOCAL_OUT: c_int = 3; +pub const NF_IP6_POST_ROUTING: c_int = 4; +pub const NF_IP6_NUMHOOKS: c_int = 5; + +pub const NF_IP6_PRI_FIRST: c_int = crate::INT_MIN; +pub const NF_IP6_PRI_RAW_BEFORE_DEFRAG: c_int = -450; +pub const NF_IP6_PRI_CONNTRACK_DEFRAG: c_int = -400; +pub const NF_IP6_PRI_RAW: c_int = -300; +pub const NF_IP6_PRI_SELINUX_FIRST: c_int = -225; +pub const NF_IP6_PRI_CONNTRACK: c_int = -200; +pub const NF_IP6_PRI_MANGLE: c_int = -150; +pub const NF_IP6_PRI_NAT_DST: c_int = -100; +pub const NF_IP6_PRI_FILTER: c_int = 0; +pub const NF_IP6_PRI_SECURITY: c_int = 50; +pub const NF_IP6_PRI_NAT_SRC: c_int = 100; +pub const NF_IP6_PRI_SELINUX_LAST: c_int = 225; +pub const NF_IP6_PRI_CONNTRACK_HELPER: c_int = 300; +pub const NF_IP6_PRI_LAST: c_int = crate::INT_MAX; + +// linux/netfilter_ipv6/ip6_tables.h +pub const IP6T_SO_ORIGINAL_DST: c_int = 80; + +pub const SIOCADDRT: c_ulong = 0x0000890B; +pub const SIOCDELRT: c_ulong = 0x0000890C; +pub const SIOCGIFNAME: c_ulong = 0x00008910; +pub const SIOCSIFLINK: c_ulong = 0x00008911; +pub const SIOCGIFCONF: c_ulong = 0x00008912; +pub const SIOCGIFFLAGS: c_ulong = 0x00008913; +pub const SIOCSIFFLAGS: c_ulong = 0x00008914; +pub const SIOCGIFADDR: c_ulong = 0x00008915; +pub const SIOCSIFADDR: c_ulong = 0x00008916; +pub const SIOCGIFDSTADDR: c_ulong = 0x00008917; +pub const SIOCSIFDSTADDR: c_ulong = 0x00008918; +pub const SIOCGIFBRDADDR: c_ulong = 0x00008919; +pub const SIOCSIFBRDADDR: c_ulong = 0x0000891A; +pub const SIOCGIFNETMASK: c_ulong = 0x0000891B; +pub const SIOCSIFNETMASK: c_ulong = 0x0000891C; +pub const SIOCGIFMETRIC: c_ulong = 0x0000891D; +pub const SIOCSIFMETRIC: c_ulong = 0x0000891E; +pub const SIOCGIFMEM: c_ulong = 0x0000891F; +pub const SIOCSIFMEM: c_ulong = 0x00008920; +pub const SIOCGIFMTU: c_ulong = 0x00008921; +pub const SIOCSIFMTU: c_ulong = 0x00008922; +pub const SIOCSIFNAME: c_ulong = 0x00008923; +pub const SIOCSIFHWADDR: c_ulong = 0x00008924; +pub const SIOCGIFENCAP: c_ulong = 0x00008925; +pub const SIOCSIFENCAP: c_ulong = 0x00008926; +pub const SIOCGIFHWADDR: c_ulong = 0x00008927; +pub const SIOCGIFSLAVE: c_ulong = 0x00008929; +pub const SIOCSIFSLAVE: c_ulong = 0x00008930; +pub const SIOCADDMULTI: c_ulong = 0x00008931; +pub const SIOCDELMULTI: c_ulong = 0x00008932; +pub const SIOCGIFINDEX: c_ulong = 0x00008933; +pub const SIOGIFINDEX: c_ulong = SIOCGIFINDEX; +pub const SIOCSIFPFLAGS: c_ulong = 0x00008934; +pub const SIOCGIFPFLAGS: c_ulong = 0x00008935; +pub const SIOCDIFADDR: c_ulong = 0x00008936; +pub const SIOCSIFHWBROADCAST: c_ulong = 0x00008937; +pub const SIOCGIFCOUNT: c_ulong = 0x00008938; +pub const SIOCGIFBR: c_ulong = 0x00008940; +pub const SIOCSIFBR: c_ulong = 0x00008941; +pub const SIOCGIFTXQLEN: c_ulong = 0x00008942; +pub const SIOCSIFTXQLEN: c_ulong = 0x00008943; +pub const SIOCETHTOOL: c_ulong = 0x00008946; +pub const SIOCGMIIPHY: c_ulong = 0x00008947; +pub const SIOCGMIIREG: c_ulong = 0x00008948; +pub const SIOCSMIIREG: c_ulong = 0x00008949; +pub const SIOCWANDEV: c_ulong = 0x0000894A; +pub const SIOCOUTQNSD: c_ulong = 0x0000894B; +pub const SIOCGSKNS: c_ulong = 0x0000894C; +pub const SIOCDARP: c_ulong = 0x00008953; +pub const SIOCGARP: c_ulong = 0x00008954; +pub const SIOCSARP: c_ulong = 0x00008955; +pub const SIOCDRARP: c_ulong = 0x00008960; +pub const SIOCGRARP: c_ulong = 0x00008961; +pub const SIOCSRARP: c_ulong = 0x00008962; +pub const SIOCGIFMAP: c_ulong = 0x00008970; +pub const SIOCSIFMAP: c_ulong = 0x00008971; +pub const SIOCSHWTSTAMP: c_ulong = 0x000089b0; +pub const SIOCGHWTSTAMP: c_ulong = 0x000089b1; + +// wireless.h +pub const WIRELESS_EXT: c_ulong = 0x16; + +pub const SIOCSIWCOMMIT: c_ulong = 0x8B00; +pub const SIOCGIWNAME: c_ulong = 0x8B01; + +pub const SIOCSIWNWID: c_ulong = 0x8B02; +pub const SIOCGIWNWID: c_ulong = 0x8B03; +pub const SIOCSIWFREQ: c_ulong = 0x8B04; +pub const SIOCGIWFREQ: c_ulong = 0x8B05; +pub const SIOCSIWMODE: c_ulong = 0x8B06; +pub const SIOCGIWMODE: c_ulong = 0x8B07; +pub const SIOCSIWSENS: c_ulong = 0x8B08; +pub const SIOCGIWSENS: c_ulong = 0x8B09; + +pub const SIOCSIWRANGE: c_ulong = 0x8B0A; +pub const SIOCGIWRANGE: c_ulong = 0x8B0B; +pub const SIOCSIWPRIV: c_ulong = 0x8B0C; +pub const SIOCGIWPRIV: c_ulong = 0x8B0D; +pub const SIOCSIWSTATS: c_ulong = 0x8B0E; +pub const SIOCGIWSTATS: c_ulong = 0x8B0F; + +pub const SIOCSIWSPY: c_ulong = 0x8B10; +pub const SIOCGIWSPY: c_ulong = 0x8B11; +pub const SIOCSIWTHRSPY: c_ulong = 0x8B12; +pub const SIOCGIWTHRSPY: c_ulong = 0x8B13; + +pub const SIOCSIWAP: c_ulong = 0x8B14; +pub const SIOCGIWAP: c_ulong = 0x8B15; +pub const SIOCGIWAPLIST: c_ulong = 0x8B17; +pub const SIOCSIWSCAN: c_ulong = 0x8B18; +pub const SIOCGIWSCAN: c_ulong = 0x8B19; + +pub const SIOCSIWESSID: c_ulong = 0x8B1A; +pub const SIOCGIWESSID: c_ulong = 0x8B1B; +pub const SIOCSIWNICKN: c_ulong = 0x8B1C; +pub const SIOCGIWNICKN: c_ulong = 0x8B1D; + +pub const SIOCSIWRATE: c_ulong = 0x8B20; +pub const SIOCGIWRATE: c_ulong = 0x8B21; +pub const SIOCSIWRTS: c_ulong = 0x8B22; +pub const SIOCGIWRTS: c_ulong = 0x8B23; +pub const SIOCSIWFRAG: c_ulong = 0x8B24; +pub const SIOCGIWFRAG: c_ulong = 0x8B25; +pub const SIOCSIWTXPOW: c_ulong = 0x8B26; +pub const SIOCGIWTXPOW: c_ulong = 0x8B27; +pub const SIOCSIWRETRY: c_ulong = 0x8B28; +pub const SIOCGIWRETRY: c_ulong = 0x8B29; + +pub const SIOCSIWENCODE: c_ulong = 0x8B2A; +pub const SIOCGIWENCODE: c_ulong = 0x8B2B; + +pub const SIOCSIWPOWER: c_ulong = 0x8B2C; +pub const SIOCGIWPOWER: c_ulong = 0x8B2D; + +pub const SIOCSIWGENIE: c_ulong = 0x8B30; +pub const SIOCGIWGENIE: c_ulong = 0x8B31; + +pub const SIOCSIWMLME: c_ulong = 0x8B16; + +pub const SIOCSIWAUTH: c_ulong = 0x8B32; +pub const SIOCGIWAUTH: c_ulong = 0x8B33; + +pub const SIOCSIWENCODEEXT: c_ulong = 0x8B34; +pub const SIOCGIWENCODEEXT: c_ulong = 0x8B35; + +pub const SIOCSIWPMKSA: c_ulong = 0x8B36; + +pub const SIOCIWFIRSTPRIV: c_ulong = 0x8BE0; +pub const SIOCIWLASTPRIV: c_ulong = 0x8BFF; + +pub const SIOCIWFIRST: c_ulong = 0x8B00; +pub const SIOCIWLAST: c_ulong = SIOCIWLASTPRIV; + +pub const IWEVTXDROP: c_ulong = 0x8C00; +pub const IWEVQUAL: c_ulong = 0x8C01; +pub const IWEVCUSTOM: c_ulong = 0x8C02; +pub const IWEVREGISTERED: c_ulong = 0x8C03; +pub const IWEVEXPIRED: c_ulong = 0x8C04; +pub const IWEVGENIE: c_ulong = 0x8C05; +pub const IWEVMICHAELMICFAILURE: c_ulong = 0x8C06; +pub const IWEVASSOCREQIE: c_ulong = 0x8C07; +pub const IWEVASSOCRESPIE: c_ulong = 0x8C08; +pub const IWEVPMKIDCAND: c_ulong = 0x8C09; +pub const IWEVFIRST: c_ulong = 0x8C00; + +pub const IW_PRIV_TYPE_MASK: c_ulong = 0x7000; +pub const IW_PRIV_TYPE_NONE: c_ulong = 0x0000; +pub const IW_PRIV_TYPE_BYTE: c_ulong = 0x1000; +pub const IW_PRIV_TYPE_CHAR: c_ulong = 0x2000; +pub const IW_PRIV_TYPE_INT: c_ulong = 0x4000; +pub const IW_PRIV_TYPE_FLOAT: c_ulong = 0x5000; +pub const IW_PRIV_TYPE_ADDR: c_ulong = 0x6000; + +pub const IW_PRIV_SIZE_FIXED: c_ulong = 0x0800; + +pub const IW_PRIV_SIZE_MASK: c_ulong = 0x07FF; + +pub const IW_MAX_FREQUENCIES: usize = 32; +pub const IW_MAX_BITRATES: usize = 32; +pub const IW_MAX_TXPOWER: usize = 8; +pub const IW_MAX_SPY: usize = 8; +pub const IW_MAX_AP: usize = 64; +pub const IW_ESSID_MAX_SIZE: usize = 32; + +pub const IW_MODE_AUTO: usize = 0; +pub const IW_MODE_ADHOC: usize = 1; +pub const IW_MODE_INFRA: usize = 2; +pub const IW_MODE_MASTER: usize = 3; +pub const IW_MODE_REPEAT: usize = 4; +pub const IW_MODE_SECOND: usize = 5; +pub const IW_MODE_MONITOR: usize = 6; +pub const IW_MODE_MESH: usize = 7; + +pub const IW_QUAL_QUAL_UPDATED: c_ulong = 0x01; +pub const IW_QUAL_LEVEL_UPDATED: c_ulong = 0x02; +pub const IW_QUAL_NOISE_UPDATED: c_ulong = 0x04; +pub const IW_QUAL_ALL_UPDATED: c_ulong = 0x07; +pub const IW_QUAL_DBM: c_ulong = 0x08; +pub const IW_QUAL_QUAL_INVALID: c_ulong = 0x10; +pub const IW_QUAL_LEVEL_INVALID: c_ulong = 0x20; +pub const IW_QUAL_NOISE_INVALID: c_ulong = 0x40; +pub const IW_QUAL_RCPI: c_ulong = 0x80; +pub const IW_QUAL_ALL_INVALID: c_ulong = 0x70; + +pub const IW_FREQ_AUTO: c_ulong = 0x00; +pub const IW_FREQ_FIXED: c_ulong = 0x01; + +pub const IW_MAX_ENCODING_SIZES: usize = 8; +pub const IW_ENCODING_TOKEN_MAX: usize = 64; + +pub const IW_ENCODE_INDEX: c_ulong = 0x00FF; +pub const IW_ENCODE_FLAGS: c_ulong = 0xFF00; +pub const IW_ENCODE_MODE: c_ulong = 0xF000; +pub const IW_ENCODE_DISABLED: c_ulong = 0x8000; +pub const IW_ENCODE_ENABLED: c_ulong = 0x0000; +pub const IW_ENCODE_RESTRICTED: c_ulong = 0x4000; +pub const IW_ENCODE_OPEN: c_ulong = 0x2000; +pub const IW_ENCODE_NOKEY: c_ulong = 0x0800; +pub const IW_ENCODE_TEMP: c_ulong = 0x0400; + +pub const IW_POWER_ON: c_ulong = 0x0000; +pub const IW_POWER_TYPE: c_ulong = 0xF000; +pub const IW_POWER_PERIOD: c_ulong = 0x1000; +pub const IW_POWER_TIMEOUT: c_ulong = 0x2000; +pub const IW_POWER_MODE: c_ulong = 0x0F00; +pub const IW_POWER_UNICAST_R: c_ulong = 0x0100; +pub const IW_POWER_MULTICAST_R: c_ulong = 0x0200; +pub const IW_POWER_ALL_R: c_ulong = 0x0300; +pub const IW_POWER_FORCE_S: c_ulong = 0x0400; +pub const IW_POWER_REPEATER: c_ulong = 0x0800; +pub const IW_POWER_MODIFIER: c_ulong = 0x000F; +pub const IW_POWER_MIN: c_ulong = 0x0001; +pub const IW_POWER_MAX: c_ulong = 0x0002; +pub const IW_POWER_RELATIVE: c_ulong = 0x0004; + +pub const IW_TXPOW_TYPE: c_ulong = 0x00FF; +pub const IW_TXPOW_DBM: c_ulong = 0x0000; +pub const IW_TXPOW_MWATT: c_ulong = 0x0001; +pub const IW_TXPOW_RELATIVE: c_ulong = 0x0002; +pub const IW_TXPOW_RANGE: c_ulong = 0x1000; + +pub const IW_RETRY_ON: c_ulong = 0x0000; +pub const IW_RETRY_TYPE: c_ulong = 0xF000; +pub const IW_RETRY_LIMIT: c_ulong = 0x1000; +pub const IW_RETRY_LIFETIME: c_ulong = 0x2000; +pub const IW_RETRY_MODIFIER: c_ulong = 0x00FF; +pub const IW_RETRY_MIN: c_ulong = 0x0001; +pub const IW_RETRY_MAX: c_ulong = 0x0002; +pub const IW_RETRY_RELATIVE: c_ulong = 0x0004; +pub const IW_RETRY_SHORT: c_ulong = 0x0010; +pub const IW_RETRY_LONG: c_ulong = 0x0020; + +pub const IW_SCAN_DEFAULT: c_ulong = 0x0000; +pub const IW_SCAN_ALL_ESSID: c_ulong = 0x0001; +pub const IW_SCAN_THIS_ESSID: c_ulong = 0x0002; +pub const IW_SCAN_ALL_FREQ: c_ulong = 0x0004; +pub const IW_SCAN_THIS_FREQ: c_ulong = 0x0008; +pub const IW_SCAN_ALL_MODE: c_ulong = 0x0010; +pub const IW_SCAN_THIS_MODE: c_ulong = 0x0020; +pub const IW_SCAN_ALL_RATE: c_ulong = 0x0040; +pub const IW_SCAN_THIS_RATE: c_ulong = 0x0080; + +pub const IW_SCAN_TYPE_ACTIVE: usize = 0; +pub const IW_SCAN_TYPE_PASSIVE: usize = 1; + +pub const IW_SCAN_MAX_DATA: usize = 4096; + +pub const IW_SCAN_CAPA_NONE: c_ulong = 0x00; +pub const IW_SCAN_CAPA_ESSID: c_ulong = 0x01; +pub const IW_SCAN_CAPA_BSSID: c_ulong = 0x02; +pub const IW_SCAN_CAPA_CHANNEL: c_ulong = 0x04; +pub const IW_SCAN_CAPA_MODE: c_ulong = 0x08; +pub const IW_SCAN_CAPA_RATE: c_ulong = 0x10; +pub const IW_SCAN_CAPA_TYPE: c_ulong = 0x20; +pub const IW_SCAN_CAPA_TIME: c_ulong = 0x40; + +pub const IW_CUSTOM_MAX: c_ulong = 256; + +pub const IW_GENERIC_IE_MAX: c_ulong = 1024; + +pub const IW_MLME_DEAUTH: c_ulong = 0; +pub const IW_MLME_DISASSOC: c_ulong = 1; +pub const IW_MLME_AUTH: c_ulong = 2; +pub const IW_MLME_ASSOC: c_ulong = 3; + +pub const IW_AUTH_INDEX: c_ulong = 0x0FFF; +pub const IW_AUTH_FLAGS: c_ulong = 0xF000; + +pub const IW_AUTH_WPA_VERSION: usize = 0; +pub const IW_AUTH_CIPHER_PAIRWISE: usize = 1; +pub const IW_AUTH_CIPHER_GROUP: usize = 2; +pub const IW_AUTH_KEY_MGMT: usize = 3; +pub const IW_AUTH_TKIP_COUNTERMEASURES: usize = 4; +pub const IW_AUTH_DROP_UNENCRYPTED: usize = 5; +pub const IW_AUTH_80211_AUTH_ALG: usize = 6; +pub const IW_AUTH_WPA_ENABLED: usize = 7; +pub const IW_AUTH_RX_UNENCRYPTED_EAPOL: usize = 8; +pub const IW_AUTH_ROAMING_CONTROL: usize = 9; +pub const IW_AUTH_PRIVACY_INVOKED: usize = 10; +pub const IW_AUTH_CIPHER_GROUP_MGMT: usize = 11; +pub const IW_AUTH_MFP: usize = 12; + +pub const IW_AUTH_WPA_VERSION_DISABLED: c_ulong = 0x00000001; +pub const IW_AUTH_WPA_VERSION_WPA: c_ulong = 0x00000002; +pub const IW_AUTH_WPA_VERSION_WPA2: c_ulong = 0x00000004; + +pub const IW_AUTH_CIPHER_NONE: c_ulong = 0x00000001; +pub const IW_AUTH_CIPHER_WEP40: c_ulong = 0x00000002; +pub const IW_AUTH_CIPHER_TKIP: c_ulong = 0x00000004; +pub const IW_AUTH_CIPHER_CCMP: c_ulong = 0x00000008; +pub const IW_AUTH_CIPHER_WEP104: c_ulong = 0x00000010; +pub const IW_AUTH_CIPHER_AES_CMAC: c_ulong = 0x00000020; + +pub const IW_AUTH_KEY_MGMT_802_1X: usize = 1; +pub const IW_AUTH_KEY_MGMT_PSK: usize = 2; + +pub const IW_AUTH_ALG_OPEN_SYSTEM: c_ulong = 0x00000001; +pub const IW_AUTH_ALG_SHARED_KEY: c_ulong = 0x00000002; +pub const IW_AUTH_ALG_LEAP: c_ulong = 0x00000004; + +pub const IW_AUTH_ROAMING_ENABLE: usize = 0; +pub const IW_AUTH_ROAMING_DISABLE: usize = 1; + +pub const IW_AUTH_MFP_DISABLED: usize = 0; +pub const IW_AUTH_MFP_OPTIONAL: usize = 1; +pub const IW_AUTH_MFP_REQUIRED: usize = 2; + +pub const IW_ENCODE_SEQ_MAX_SIZE: usize = 8; + +pub const IW_ENCODE_ALG_NONE: usize = 0; +pub const IW_ENCODE_ALG_WEP: usize = 1; +pub const IW_ENCODE_ALG_TKIP: usize = 2; +pub const IW_ENCODE_ALG_CCMP: usize = 3; +pub const IW_ENCODE_ALG_PMK: usize = 4; +pub const IW_ENCODE_ALG_AES_CMAC: usize = 5; + +pub const IW_ENCODE_EXT_TX_SEQ_VALID: c_ulong = 0x00000001; +pub const IW_ENCODE_EXT_RX_SEQ_VALID: c_ulong = 0x00000002; +pub const IW_ENCODE_EXT_GROUP_KEY: c_ulong = 0x00000004; +pub const IW_ENCODE_EXT_SET_TX_KEY: c_ulong = 0x00000008; + +pub const IW_MICFAILURE_KEY_ID: c_ulong = 0x00000003; +pub const IW_MICFAILURE_GROUP: c_ulong = 0x00000004; +pub const IW_MICFAILURE_PAIRWISE: c_ulong = 0x00000008; +pub const IW_MICFAILURE_STAKEY: c_ulong = 0x00000010; +pub const IW_MICFAILURE_COUNT: c_ulong = 0x00000060; + +pub const IW_ENC_CAPA_WPA: c_ulong = 0x00000001; +pub const IW_ENC_CAPA_WPA2: c_ulong = 0x00000002; +pub const IW_ENC_CAPA_CIPHER_TKIP: c_ulong = 0x00000004; +pub const IW_ENC_CAPA_CIPHER_CCMP: c_ulong = 0x00000008; +pub const IW_ENC_CAPA_4WAY_HANDSHAKE: c_ulong = 0x00000010; + +pub const IW_EVENT_CAPA_K_0: c_ulong = 0x4000050; // IW_EVENT_CAPA_MASK(0x8B04) | IW_EVENT_CAPA_MASK(0x8B06) | IW_EVENT_CAPA_MASK(0x8B1A); +pub const IW_EVENT_CAPA_K_1: c_ulong = 0x400; // W_EVENT_CAPA_MASK(0x8B2A); + +pub const IW_PMKSA_ADD: usize = 1; +pub const IW_PMKSA_REMOVE: usize = 2; +pub const IW_PMKSA_FLUSH: usize = 3; + +pub const IW_PMKID_LEN: usize = 16; + +pub const IW_PMKID_CAND_PREAUTH: c_ulong = 0x00000001; + +pub const IW_EV_LCP_PK_LEN: usize = 4; + +pub const IW_EV_CHAR_PK_LEN: usize = 20; // IW_EV_LCP_PK_LEN + crate::IFNAMSIZ; +pub const IW_EV_UINT_PK_LEN: usize = 8; // IW_EV_LCP_PK_LEN + size_of::(); +pub const IW_EV_FREQ_PK_LEN: usize = 12; // IW_EV_LCP_PK_LEN + size_of::(); +pub const IW_EV_PARAM_PK_LEN: usize = 12; // IW_EV_LCP_PK_LEN + size_of::(); +pub const IW_EV_ADDR_PK_LEN: usize = 20; // IW_EV_LCP_PK_LEN + size_of::(); +pub const IW_EV_QUAL_PK_LEN: usize = 8; // IW_EV_LCP_PK_LEN + size_of::(); +pub const IW_EV_POINT_PK_LEN: usize = 8; // IW_EV_LCP_PK_LEN + 4; + +pub const IPTOS_TOS_MASK: u8 = 0x1E; +pub const IPTOS_PREC_MASK: u8 = 0xE0; + +pub const IPTOS_ECN_NOT_ECT: u8 = 0x00; + +pub const RTF_UP: c_ushort = 0x0001; +pub const RTF_GATEWAY: c_ushort = 0x0002; + +pub const RTF_HOST: c_ushort = 0x0004; +pub const RTF_REINSTATE: c_ushort = 0x0008; +pub const RTF_DYNAMIC: c_ushort = 0x0010; +pub const RTF_MODIFIED: c_ushort = 0x0020; +pub const RTF_MTU: c_ushort = 0x0040; +pub const RTF_MSS: c_ushort = RTF_MTU; +pub const RTF_WINDOW: c_ushort = 0x0080; +pub const RTF_IRTT: c_ushort = 0x0100; +pub const RTF_REJECT: c_ushort = 0x0200; +pub const RTF_STATIC: c_ushort = 0x0400; +pub const RTF_XRESOLVE: c_ushort = 0x0800; +pub const RTF_NOFORWARD: c_ushort = 0x1000; +pub const RTF_THROW: c_ushort = 0x2000; +pub const RTF_NOPMTUDISC: c_ushort = 0x4000; + +pub const RTF_DEFAULT: u32 = 0x00010000; +pub const RTF_ALLONLINK: u32 = 0x00020000; +pub const RTF_ADDRCONF: u32 = 0x00040000; +pub const RTF_LINKRT: u32 = 0x00100000; +pub const RTF_NONEXTHOP: u32 = 0x00200000; +pub const RTF_CACHE: u32 = 0x01000000; +pub const RTF_FLOW: u32 = 0x02000000; +pub const RTF_POLICY: u32 = 0x04000000; + +pub const RTCF_VALVE: u32 = 0x00200000; +pub const RTCF_MASQ: u32 = 0x00400000; +pub const RTCF_NAT: u32 = 0x00800000; +pub const RTCF_DOREDIRECT: u32 = 0x01000000; +pub const RTCF_LOG: u32 = 0x02000000; +pub const RTCF_DIRECTSRC: u32 = 0x04000000; + +pub const RTF_LOCAL: u32 = 0x80000000; +pub const RTF_INTERFACE: u32 = 0x40000000; +pub const RTF_MULTICAST: u32 = 0x20000000; +pub const RTF_BROADCAST: u32 = 0x10000000; +pub const RTF_NAT: u32 = 0x08000000; +pub const RTF_ADDRCLASSMASK: u32 = 0xF8000000; + +pub const RT_CLASS_UNSPEC: u8 = 0; +pub const RT_CLASS_DEFAULT: u8 = 253; +pub const RT_CLASS_MAIN: u8 = 254; +pub const RT_CLASS_LOCAL: u8 = 255; +pub const RT_CLASS_MAX: u8 = 255; + +// linux/neighbor.h +pub const NUD_NONE: u16 = 0x00; +pub const NUD_INCOMPLETE: u16 = 0x01; +pub const NUD_REACHABLE: u16 = 0x02; +pub const NUD_STALE: u16 = 0x04; +pub const NUD_DELAY: u16 = 0x08; +pub const NUD_PROBE: u16 = 0x10; +pub const NUD_FAILED: u16 = 0x20; +pub const NUD_NOARP: u16 = 0x40; +pub const NUD_PERMANENT: u16 = 0x80; + +pub const NTF_USE: u8 = 0x01; +pub const NTF_SELF: u8 = 0x02; +pub const NTF_MASTER: u8 = 0x04; +pub const NTF_PROXY: u8 = 0x08; +pub const NTF_ROUTER: u8 = 0x80; + +pub const NDA_UNSPEC: c_ushort = 0; +pub const NDA_DST: c_ushort = 1; +pub const NDA_LLADDR: c_ushort = 2; +pub const NDA_CACHEINFO: c_ushort = 3; +pub const NDA_PROBES: c_ushort = 4; +pub const NDA_VLAN: c_ushort = 5; +pub const NDA_PORT: c_ushort = 6; +pub const NDA_VNI: c_ushort = 7; +pub const NDA_IFINDEX: c_ushort = 8; + +// linux/netlink.h +pub const NLA_ALIGNTO: c_int = 4; + +pub const NETLINK_ROUTE: c_int = 0; +pub const NETLINK_UNUSED: c_int = 1; +pub const NETLINK_USERSOCK: c_int = 2; +pub const NETLINK_FIREWALL: c_int = 3; +pub const NETLINK_SOCK_DIAG: c_int = 4; +pub const NETLINK_NFLOG: c_int = 5; +pub const NETLINK_XFRM: c_int = 6; +pub const NETLINK_SELINUX: c_int = 7; +pub const NETLINK_ISCSI: c_int = 8; +pub const NETLINK_AUDIT: c_int = 9; +pub const NETLINK_FIB_LOOKUP: c_int = 10; +pub const NETLINK_CONNECTOR: c_int = 11; +pub const NETLINK_NETFILTER: c_int = 12; +pub const NETLINK_IP6_FW: c_int = 13; +pub const NETLINK_DNRTMSG: c_int = 14; +pub const NETLINK_KOBJECT_UEVENT: c_int = 15; +pub const NETLINK_GENERIC: c_int = 16; +pub const NETLINK_SCSITRANSPORT: c_int = 18; +pub const NETLINK_ECRYPTFS: c_int = 19; +pub const NETLINK_RDMA: c_int = 20; +pub const NETLINK_CRYPTO: c_int = 21; +pub const NETLINK_INET_DIAG: c_int = NETLINK_SOCK_DIAG; + +pub const NLM_F_REQUEST: c_int = 1; +pub const NLM_F_MULTI: c_int = 2; +pub const NLM_F_ACK: c_int = 4; +pub const NLM_F_ECHO: c_int = 8; +pub const NLM_F_DUMP_INTR: c_int = 16; +pub const NLM_F_DUMP_FILTERED: c_int = 32; + +pub const NLM_F_ROOT: c_int = 0x100; +pub const NLM_F_MATCH: c_int = 0x200; +pub const NLM_F_ATOMIC: c_int = 0x400; +pub const NLM_F_DUMP: c_int = NLM_F_ROOT | NLM_F_MATCH; + +pub const NLM_F_REPLACE: c_int = 0x100; +pub const NLM_F_EXCL: c_int = 0x200; +pub const NLM_F_CREATE: c_int = 0x400; +pub const NLM_F_APPEND: c_int = 0x800; + +pub const NLM_F_NONREC: c_int = 0x100; +pub const NLM_F_BULK: c_int = 0x200; + +pub const NLM_F_CAPPED: c_int = 0x100; +pub const NLM_F_ACK_TLVS: c_int = 0x200; + +pub const NETLINK_ADD_MEMBERSHIP: c_int = 1; +pub const NETLINK_DROP_MEMBERSHIP: c_int = 2; +pub const NETLINK_PKTINFO: c_int = 3; +pub const NETLINK_BROADCAST_ERROR: c_int = 4; +pub const NETLINK_NO_ENOBUFS: c_int = 5; +pub const NETLINK_RX_RING: c_int = 6; +pub const NETLINK_TX_RING: c_int = 7; +pub const NETLINK_LISTEN_ALL_NSID: c_int = 8; +pub const NETLINK_LIST_MEMBERSHIPS: c_int = 9; +pub const NETLINK_CAP_ACK: c_int = 10; +pub const NETLINK_EXT_ACK: c_int = 11; +pub const NETLINK_GET_STRICT_CHK: c_int = 12; + +pub const NLA_F_NESTED: c_int = 1 << 15; +pub const NLA_F_NET_BYTEORDER: c_int = 1 << 14; +pub const NLA_TYPE_MASK: c_int = !(NLA_F_NESTED | NLA_F_NET_BYTEORDER); + +// linux/rtnetlink.h +pub const TCA_UNSPEC: c_ushort = 0; +pub const TCA_KIND: c_ushort = 1; +pub const TCA_OPTIONS: c_ushort = 2; +pub const TCA_STATS: c_ushort = 3; +pub const TCA_XSTATS: c_ushort = 4; +pub const TCA_RATE: c_ushort = 5; +pub const TCA_FCNT: c_ushort = 6; +pub const TCA_STATS2: c_ushort = 7; +pub const TCA_STAB: c_ushort = 8; + +pub const RTM_NEWLINK: u16 = 16; +pub const RTM_DELLINK: u16 = 17; +pub const RTM_GETLINK: u16 = 18; +pub const RTM_SETLINK: u16 = 19; +pub const RTM_NEWADDR: u16 = 20; +pub const RTM_DELADDR: u16 = 21; +pub const RTM_GETADDR: u16 = 22; +pub const RTM_NEWROUTE: u16 = 24; +pub const RTM_DELROUTE: u16 = 25; +pub const RTM_GETROUTE: u16 = 26; +pub const RTM_NEWNEIGH: u16 = 28; +pub const RTM_DELNEIGH: u16 = 29; +pub const RTM_GETNEIGH: u16 = 30; +pub const RTM_NEWRULE: u16 = 32; +pub const RTM_DELRULE: u16 = 33; +pub const RTM_GETRULE: u16 = 34; +pub const RTM_NEWQDISC: u16 = 36; +pub const RTM_DELQDISC: u16 = 37; +pub const RTM_GETQDISC: u16 = 38; +pub const RTM_NEWTCLASS: u16 = 40; +pub const RTM_DELTCLASS: u16 = 41; +pub const RTM_GETTCLASS: u16 = 42; +pub const RTM_NEWTFILTER: u16 = 44; +pub const RTM_DELTFILTER: u16 = 45; +pub const RTM_GETTFILTER: u16 = 46; +pub const RTM_NEWACTION: u16 = 48; +pub const RTM_DELACTION: u16 = 49; +pub const RTM_GETACTION: u16 = 50; +pub const RTM_NEWPREFIX: u16 = 52; +pub const RTM_GETMULTICAST: u16 = 58; +pub const RTM_GETANYCAST: u16 = 62; +pub const RTM_NEWNEIGHTBL: u16 = 64; +pub const RTM_GETNEIGHTBL: u16 = 66; +pub const RTM_SETNEIGHTBL: u16 = 67; +pub const RTM_NEWNDUSEROPT: u16 = 68; +pub const RTM_NEWADDRLABEL: u16 = 72; +pub const RTM_DELADDRLABEL: u16 = 73; +pub const RTM_GETADDRLABEL: u16 = 74; +pub const RTM_GETDCB: u16 = 78; +pub const RTM_SETDCB: u16 = 79; +pub const RTM_NEWNETCONF: u16 = 80; +pub const RTM_GETNETCONF: u16 = 82; +pub const RTM_NEWMDB: u16 = 84; +pub const RTM_DELMDB: u16 = 85; +pub const RTM_GETMDB: u16 = 86; +pub const RTM_NEWNSID: u16 = 88; +pub const RTM_DELNSID: u16 = 89; +pub const RTM_GETNSID: u16 = 90; + +pub const RTM_F_NOTIFY: c_uint = 0x100; +pub const RTM_F_CLONED: c_uint = 0x200; +pub const RTM_F_EQUALIZE: c_uint = 0x400; +pub const RTM_F_PREFIX: c_uint = 0x800; + +pub const RTA_UNSPEC: c_ushort = 0; +pub const RTA_DST: c_ushort = 1; +pub const RTA_SRC: c_ushort = 2; +pub const RTA_IIF: c_ushort = 3; +pub const RTA_OIF: c_ushort = 4; +pub const RTA_GATEWAY: c_ushort = 5; +pub const RTA_PRIORITY: c_ushort = 6; +pub const RTA_PREFSRC: c_ushort = 7; +pub const RTA_METRICS: c_ushort = 8; +pub const RTA_MULTIPATH: c_ushort = 9; +pub const RTA_PROTOINFO: c_ushort = 10; // No longer used +pub const RTA_FLOW: c_ushort = 11; +pub const RTA_CACHEINFO: c_ushort = 12; +pub const RTA_SESSION: c_ushort = 13; // No longer used +pub const RTA_MP_ALGO: c_ushort = 14; // No longer used +pub const RTA_TABLE: c_ushort = 15; +pub const RTA_MARK: c_ushort = 16; +pub const RTA_MFC_STATS: c_ushort = 17; + +pub const RTN_UNSPEC: c_uchar = 0; +pub const RTN_UNICAST: c_uchar = 1; +pub const RTN_LOCAL: c_uchar = 2; +pub const RTN_BROADCAST: c_uchar = 3; +pub const RTN_ANYCAST: c_uchar = 4; +pub const RTN_MULTICAST: c_uchar = 5; +pub const RTN_BLACKHOLE: c_uchar = 6; +pub const RTN_UNREACHABLE: c_uchar = 7; +pub const RTN_PROHIBIT: c_uchar = 8; +pub const RTN_THROW: c_uchar = 9; +pub const RTN_NAT: c_uchar = 10; +pub const RTN_XRESOLVE: c_uchar = 11; + +pub const RTPROT_UNSPEC: c_uchar = 0; +pub const RTPROT_REDIRECT: c_uchar = 1; +pub const RTPROT_KERNEL: c_uchar = 2; +pub const RTPROT_BOOT: c_uchar = 3; +pub const RTPROT_STATIC: c_uchar = 4; + +pub const RT_SCOPE_UNIVERSE: c_uchar = 0; +pub const RT_SCOPE_SITE: c_uchar = 200; +pub const RT_SCOPE_LINK: c_uchar = 253; +pub const RT_SCOPE_HOST: c_uchar = 254; +pub const RT_SCOPE_NOWHERE: c_uchar = 255; + +pub const RT_TABLE_UNSPEC: c_uchar = 0; +pub const RT_TABLE_COMPAT: c_uchar = 252; +pub const RT_TABLE_DEFAULT: c_uchar = 253; +pub const RT_TABLE_MAIN: c_uchar = 254; +pub const RT_TABLE_LOCAL: c_uchar = 255; + +pub const RTMSG_OVERRUN: u32 = crate::NLMSG_OVERRUN as u32; +pub const RTMSG_NEWDEVICE: u32 = 0x11; +pub const RTMSG_DELDEVICE: u32 = 0x12; +pub const RTMSG_NEWROUTE: u32 = 0x21; +pub const RTMSG_DELROUTE: u32 = 0x22; +pub const RTMSG_NEWRULE: u32 = 0x31; +pub const RTMSG_DELRULE: u32 = 0x32; +pub const RTMSG_CONTROL: u32 = 0x40; +pub const RTMSG_AR_FAILED: u32 = 0x51; + +pub const MAX_ADDR_LEN: usize = 7; +pub const ARPD_UPDATE: c_ushort = 0x01; +pub const ARPD_LOOKUP: c_ushort = 0x02; +pub const ARPD_FLUSH: c_ushort = 0x03; +pub const ATF_MAGIC: c_int = 0x80; + +pub const RTEXT_FILTER_VF: c_int = 1 << 0; +pub const RTEXT_FILTER_BRVLAN: c_int = 1 << 1; +pub const RTEXT_FILTER_BRVLAN_COMPRESSED: c_int = 1 << 2; +pub const RTEXT_FILTER_SKIP_STATS: c_int = 1 << 3; +pub const RTEXT_FILTER_MRP: c_int = 1 << 4; +pub const RTEXT_FILTER_CFM_CONFIG: c_int = 1 << 5; +pub const RTEXT_FILTER_CFM_STATUS: c_int = 1 << 6; + +// userspace compat definitions for RTNLGRP_* +pub const RTMGRP_LINK: c_int = 0x00001; +pub const RTMGRP_NOTIFY: c_int = 0x00002; +pub const RTMGRP_NEIGH: c_int = 0x00004; +pub const RTMGRP_TC: c_int = 0x00008; +pub const RTMGRP_IPV4_IFADDR: c_int = 0x00010; +pub const RTMGRP_IPV4_MROUTE: c_int = 0x00020; +pub const RTMGRP_IPV4_ROUTE: c_int = 0x00040; +pub const RTMGRP_IPV4_RULE: c_int = 0x00080; +pub const RTMGRP_IPV6_IFADDR: c_int = 0x00100; +pub const RTMGRP_IPV6_MROUTE: c_int = 0x00200; +pub const RTMGRP_IPV6_ROUTE: c_int = 0x00400; +pub const RTMGRP_IPV6_IFINFO: c_int = 0x00800; +pub const RTMGRP_DECnet_IFADDR: c_int = 0x01000; +pub const RTMGRP_DECnet_ROUTE: c_int = 0x04000; +pub const RTMGRP_IPV6_PREFIX: c_int = 0x20000; + +// enum rtnetlink_groups +pub const RTNLGRP_NONE: c_uint = 0x00; +pub const RTNLGRP_LINK: c_uint = 0x01; +pub const RTNLGRP_NOTIFY: c_uint = 0x02; +pub const RTNLGRP_NEIGH: c_uint = 0x03; +pub const RTNLGRP_TC: c_uint = 0x04; +pub const RTNLGRP_IPV4_IFADDR: c_uint = 0x05; +pub const RTNLGRP_IPV4_MROUTE: c_uint = 0x06; +pub const RTNLGRP_IPV4_ROUTE: c_uint = 0x07; +pub const RTNLGRP_IPV4_RULE: c_uint = 0x08; +pub const RTNLGRP_IPV6_IFADDR: c_uint = 0x09; +pub const RTNLGRP_IPV6_MROUTE: c_uint = 0x0a; +pub const RTNLGRP_IPV6_ROUTE: c_uint = 0x0b; +pub const RTNLGRP_IPV6_IFINFO: c_uint = 0x0c; +pub const RTNLGRP_DECnet_IFADDR: c_uint = 0x0d; +pub const RTNLGRP_NOP2: c_uint = 0x0e; +pub const RTNLGRP_DECnet_ROUTE: c_uint = 0x0f; +pub const RTNLGRP_DECnet_RULE: c_uint = 0x10; +pub const RTNLGRP_NOP4: c_uint = 0x11; +pub const RTNLGRP_IPV6_PREFIX: c_uint = 0x12; +pub const RTNLGRP_IPV6_RULE: c_uint = 0x13; +pub const RTNLGRP_ND_USEROPT: c_uint = 0x14; +pub const RTNLGRP_PHONET_IFADDR: c_uint = 0x15; +pub const RTNLGRP_PHONET_ROUTE: c_uint = 0x16; +pub const RTNLGRP_DCB: c_uint = 0x17; +pub const RTNLGRP_IPV4_NETCONF: c_uint = 0x18; +pub const RTNLGRP_IPV6_NETCONF: c_uint = 0x19; +pub const RTNLGRP_MDB: c_uint = 0x1a; +pub const RTNLGRP_MPLS_ROUTE: c_uint = 0x1b; +pub const RTNLGRP_NSID: c_uint = 0x1c; +pub const RTNLGRP_MPLS_NETCONF: c_uint = 0x1d; +pub const RTNLGRP_IPV4_MROUTE_R: c_uint = 0x1e; +pub const RTNLGRP_IPV6_MROUTE_R: c_uint = 0x1f; +pub const RTNLGRP_NEXTHOP: c_uint = 0x20; +pub const RTNLGRP_BRVLAN: c_uint = 0x21; +pub const RTNLGRP_MCTP_IFADDR: c_uint = 0x22; +pub const RTNLGRP_TUNNEL: c_uint = 0x23; +pub const RTNLGRP_STATS: c_uint = 0x24; + +// linux/cn_proc.h +c_enum! { + pub enum proc_cn_mcast_op { + PROC_CN_MCAST_LISTEN = 1, + PROC_CN_MCAST_IGNORE = 2, + } + + pub enum proc_cn_event { + PROC_EVENT_NONE = 0x00000000, + PROC_EVENT_FORK = 0x00000001, + PROC_EVENT_EXEC = 0x00000002, + PROC_EVENT_UID = 0x00000004, + PROC_EVENT_GID = 0x00000040, + PROC_EVENT_SID = 0x00000080, + PROC_EVENT_PTRACE = 0x00000100, + PROC_EVENT_COMM = 0x00000200, + PROC_EVENT_NONZERO_EXIT = 0x20000000, + PROC_EVENT_COREDUMP = 0x40000000, + PROC_EVENT_EXIT = 0x80000000, + } +} + +// linux/connector.h +pub const CN_IDX_PROC: c_uint = 0x1; +pub const CN_VAL_PROC: c_uint = 0x1; +pub const CN_IDX_CIFS: c_uint = 0x2; +pub const CN_VAL_CIFS: c_uint = 0x1; +pub const CN_W1_IDX: c_uint = 0x3; +pub const CN_W1_VAL: c_uint = 0x1; +pub const CN_IDX_V86D: c_uint = 0x4; +pub const CN_VAL_V86D_UVESAFB: c_uint = 0x1; +pub const CN_IDX_BB: c_uint = 0x5; +pub const CN_DST_IDX: c_uint = 0x6; +pub const CN_DST_VAL: c_uint = 0x1; +pub const CN_IDX_DM: c_uint = 0x7; +pub const CN_VAL_DM_USERSPACE_LOG: c_uint = 0x1; +pub const CN_IDX_DRBD: c_uint = 0x8; +pub const CN_VAL_DRBD: c_uint = 0x1; +pub const CN_KVP_IDX: c_uint = 0x9; +pub const CN_KVP_VAL: c_uint = 0x1; +pub const CN_VSS_IDX: c_uint = 0xA; +pub const CN_VSS_VAL: c_uint = 0x1; + +// linux/module.h +pub const MODULE_INIT_IGNORE_MODVERSIONS: c_uint = 0x0001; +pub const MODULE_INIT_IGNORE_VERMAGIC: c_uint = 0x0002; + +// linux/net_tstamp.h +pub const SOF_TIMESTAMPING_TX_HARDWARE: c_uint = 1 << 0; +pub const SOF_TIMESTAMPING_TX_SOFTWARE: c_uint = 1 << 1; +pub const SOF_TIMESTAMPING_RX_HARDWARE: c_uint = 1 << 2; +pub const SOF_TIMESTAMPING_RX_SOFTWARE: c_uint = 1 << 3; +pub const SOF_TIMESTAMPING_SOFTWARE: c_uint = 1 << 4; +pub const SOF_TIMESTAMPING_SYS_HARDWARE: c_uint = 1 << 5; +pub const SOF_TIMESTAMPING_RAW_HARDWARE: c_uint = 1 << 6; +pub const SOF_TIMESTAMPING_OPT_ID: c_uint = 1 << 7; +pub const SOF_TIMESTAMPING_TX_SCHED: c_uint = 1 << 8; +pub const SOF_TIMESTAMPING_TX_ACK: c_uint = 1 << 9; +pub const SOF_TIMESTAMPING_OPT_CMSG: c_uint = 1 << 10; +pub const SOF_TIMESTAMPING_OPT_TSONLY: c_uint = 1 << 11; +pub const SOF_TIMESTAMPING_OPT_STATS: c_uint = 1 << 12; +pub const SOF_TIMESTAMPING_OPT_PKTINFO: c_uint = 1 << 13; +pub const SOF_TIMESTAMPING_OPT_TX_SWHW: c_uint = 1 << 14; +pub const SOF_TIMESTAMPING_BIND_PHC: c_uint = 1 << 15; +pub const SOF_TIMESTAMPING_OPT_ID_TCP: c_uint = 1 << 16; +pub const SOF_TIMESTAMPING_OPT_RX_FILTER: c_uint = 1 << 17; +pub const SOF_TXTIME_DEADLINE_MODE: u32 = 1 << 0; +pub const SOF_TXTIME_REPORT_ERRORS: u32 = 1 << 1; + +pub const HWTSTAMP_TX_OFF: c_uint = 0; +pub const HWTSTAMP_TX_ON: c_uint = 1; +pub const HWTSTAMP_TX_ONESTEP_SYNC: c_uint = 2; +pub const HWTSTAMP_TX_ONESTEP_P2P: c_uint = 3; + +pub const HWTSTAMP_FILTER_NONE: c_uint = 0; +pub const HWTSTAMP_FILTER_ALL: c_uint = 1; +pub const HWTSTAMP_FILTER_SOME: c_uint = 2; +pub const HWTSTAMP_FILTER_PTP_V1_L4_EVENT: c_uint = 3; +pub const HWTSTAMP_FILTER_PTP_V1_L4_SYNC: c_uint = 4; +pub const HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: c_uint = 5; +pub const HWTSTAMP_FILTER_PTP_V2_L4_EVENT: c_uint = 6; +pub const HWTSTAMP_FILTER_PTP_V2_L4_SYNC: c_uint = 7; +pub const HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: c_uint = 8; +pub const HWTSTAMP_FILTER_PTP_V2_L2_EVENT: c_uint = 9; +pub const HWTSTAMP_FILTER_PTP_V2_L2_SYNC: c_uint = 10; +pub const HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: c_uint = 11; +pub const HWTSTAMP_FILTER_PTP_V2_EVENT: c_uint = 12; +pub const HWTSTAMP_FILTER_PTP_V2_SYNC: c_uint = 13; +pub const HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: c_uint = 14; +pub const HWTSTAMP_FILTER_NTP_ALL: c_uint = 15; + +// linux/ptp_clock.h +pub const PTP_MAX_SAMPLES: c_uint = 25; // Maximum allowed offset measurement samples. + +const PTP_CLK_MAGIC: u32 = b'=' as u32; + +pub const PTP_CLOCK_GETCAPS: Ioctl = _IOR::(PTP_CLK_MAGIC, 1); +pub const PTP_EXTTS_REQUEST: Ioctl = _IOW::(PTP_CLK_MAGIC, 2); +pub const PTP_PEROUT_REQUEST: Ioctl = _IOW::(PTP_CLK_MAGIC, 3); +pub const PTP_ENABLE_PPS: Ioctl = _IOW::(PTP_CLK_MAGIC, 4); +pub const PTP_SYS_OFFSET: Ioctl = _IOW::(PTP_CLK_MAGIC, 5); +pub const PTP_PIN_GETFUNC: Ioctl = _IOWR::(PTP_CLK_MAGIC, 6); +pub const PTP_PIN_SETFUNC: Ioctl = _IOW::(PTP_CLK_MAGIC, 7); +pub const PTP_SYS_OFFSET_PRECISE: Ioctl = _IOWR::(PTP_CLK_MAGIC, 8); +pub const PTP_SYS_OFFSET_EXTENDED: Ioctl = _IOWR::(PTP_CLK_MAGIC, 9); + +pub const PTP_CLOCK_GETCAPS2: Ioctl = _IOR::(PTP_CLK_MAGIC, 10); +pub const PTP_EXTTS_REQUEST2: Ioctl = _IOW::(PTP_CLK_MAGIC, 11); +pub const PTP_PEROUT_REQUEST2: Ioctl = _IOW::(PTP_CLK_MAGIC, 12); +pub const PTP_ENABLE_PPS2: Ioctl = _IOW::(PTP_CLK_MAGIC, 13); +pub const PTP_SYS_OFFSET2: Ioctl = _IOW::(PTP_CLK_MAGIC, 14); +pub const PTP_PIN_GETFUNC2: Ioctl = _IOWR::(PTP_CLK_MAGIC, 15); +pub const PTP_PIN_SETFUNC2: Ioctl = _IOW::(PTP_CLK_MAGIC, 16); +pub const PTP_SYS_OFFSET_PRECISE2: Ioctl = _IOWR::(PTP_CLK_MAGIC, 17); +pub const PTP_SYS_OFFSET_EXTENDED2: Ioctl = _IOWR::(PTP_CLK_MAGIC, 18); + +// enum ptp_pin_function +pub const PTP_PF_NONE: c_uint = 0; +pub const PTP_PF_EXTTS: c_uint = 1; +pub const PTP_PF_PEROUT: c_uint = 2; +pub const PTP_PF_PHYSYNC: c_uint = 3; + +// linux/tls.h +pub const TLS_TX: c_int = 1; +pub const TLS_RX: c_int = 2; + +pub const TLS_TX_ZEROCOPY_RO: c_int = 3; +pub const TLS_RX_EXPECT_NO_PAD: c_int = 4; + +pub const TLS_1_2_VERSION_MAJOR: __u8 = 0x3; +pub const TLS_1_2_VERSION_MINOR: __u8 = 0x3; +pub const TLS_1_2_VERSION: __u16 = + ((TLS_1_2_VERSION_MAJOR as __u16) << 8) | (TLS_1_2_VERSION_MINOR as __u16); + +pub const TLS_1_3_VERSION_MAJOR: __u8 = 0x3; +pub const TLS_1_3_VERSION_MINOR: __u8 = 0x4; +pub const TLS_1_3_VERSION: __u16 = + ((TLS_1_3_VERSION_MAJOR as __u16) << 8) | (TLS_1_3_VERSION_MINOR as __u16); + +pub const TLS_CIPHER_AES_GCM_128: __u16 = 51; +pub const TLS_CIPHER_AES_GCM_128_IV_SIZE: usize = 8; +pub const TLS_CIPHER_AES_GCM_128_KEY_SIZE: usize = 16; +pub const TLS_CIPHER_AES_GCM_128_SALT_SIZE: usize = 4; +pub const TLS_CIPHER_AES_GCM_128_TAG_SIZE: usize = 16; +pub const TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE: usize = 8; + +pub const TLS_CIPHER_AES_GCM_256: __u16 = 52; +pub const TLS_CIPHER_AES_GCM_256_IV_SIZE: usize = 8; +pub const TLS_CIPHER_AES_GCM_256_KEY_SIZE: usize = 32; +pub const TLS_CIPHER_AES_GCM_256_SALT_SIZE: usize = 4; +pub const TLS_CIPHER_AES_GCM_256_TAG_SIZE: usize = 16; +pub const TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE: usize = 8; + +pub const TLS_CIPHER_AES_CCM_128: __u16 = 53; +pub const TLS_CIPHER_AES_CCM_128_IV_SIZE: usize = 8; +pub const TLS_CIPHER_AES_CCM_128_KEY_SIZE: usize = 16; +pub const TLS_CIPHER_AES_CCM_128_SALT_SIZE: usize = 4; +pub const TLS_CIPHER_AES_CCM_128_TAG_SIZE: usize = 16; +pub const TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE: usize = 8; + +pub const TLS_CIPHER_CHACHA20_POLY1305: __u16 = 54; +pub const TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE: usize = 12; +pub const TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE: usize = 32; +pub const TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE: usize = 0; +pub const TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE: usize = 16; +pub const TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE: usize = 8; + +pub const TLS_CIPHER_SM4_GCM: __u16 = 55; +pub const TLS_CIPHER_SM4_GCM_IV_SIZE: usize = 8; +pub const TLS_CIPHER_SM4_GCM_KEY_SIZE: usize = 16; +pub const TLS_CIPHER_SM4_GCM_SALT_SIZE: usize = 4; +pub const TLS_CIPHER_SM4_GCM_TAG_SIZE: usize = 16; +pub const TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE: usize = 8; + +pub const TLS_CIPHER_SM4_CCM: __u16 = 56; +pub const TLS_CIPHER_SM4_CCM_IV_SIZE: usize = 8; +pub const TLS_CIPHER_SM4_CCM_KEY_SIZE: usize = 16; +pub const TLS_CIPHER_SM4_CCM_SALT_SIZE: usize = 4; +pub const TLS_CIPHER_SM4_CCM_TAG_SIZE: usize = 16; +pub const TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE: usize = 8; + +pub const TLS_CIPHER_ARIA_GCM_128: __u16 = 57; +pub const TLS_CIPHER_ARIA_GCM_128_IV_SIZE: usize = 8; +pub const TLS_CIPHER_ARIA_GCM_128_KEY_SIZE: usize = 16; +pub const TLS_CIPHER_ARIA_GCM_128_SALT_SIZE: usize = 4; +pub const TLS_CIPHER_ARIA_GCM_128_TAG_SIZE: usize = 16; +pub const TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE: usize = 8; + +pub const TLS_CIPHER_ARIA_GCM_256: __u16 = 58; +pub const TLS_CIPHER_ARIA_GCM_256_IV_SIZE: usize = 8; +pub const TLS_CIPHER_ARIA_GCM_256_KEY_SIZE: usize = 32; +pub const TLS_CIPHER_ARIA_GCM_256_SALT_SIZE: usize = 4; +pub const TLS_CIPHER_ARIA_GCM_256_TAG_SIZE: usize = 16; +pub const TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE: usize = 8; + +pub const TLS_SET_RECORD_TYPE: c_int = 1; +pub const TLS_GET_RECORD_TYPE: c_int = 2; + +pub const SOL_TLS: c_int = 282; + +// enum +pub const TLS_INFO_UNSPEC: c_int = 0x00; +pub const TLS_INFO_VERSION: c_int = 0x01; +pub const TLS_INFO_CIPHER: c_int = 0x02; +pub const TLS_INFO_TXCONF: c_int = 0x03; +pub const TLS_INFO_RXCONF: c_int = 0x04; +pub const TLS_INFO_ZC_RO_TX: c_int = 0x05; +pub const TLS_INFO_RX_NO_PAD: c_int = 0x06; + +pub const TLS_CONF_BASE: c_int = 1; +pub const TLS_CONF_SW: c_int = 2; +pub const TLS_CONF_HW: c_int = 3; +pub const TLS_CONF_HW_RECORD: c_int = 4; + +// linux/if_alg.h +pub const ALG_SET_KEY: c_int = 1; +pub const ALG_SET_IV: c_int = 2; +pub const ALG_SET_OP: c_int = 3; +pub const ALG_SET_AEAD_ASSOCLEN: c_int = 4; +pub const ALG_SET_AEAD_AUTHSIZE: c_int = 5; +pub const ALG_SET_DRBG_ENTROPY: c_int = 6; +pub const ALG_SET_KEY_BY_KEY_SERIAL: c_int = 7; + +pub const ALG_OP_DECRYPT: c_int = 0; +pub const ALG_OP_ENCRYPT: c_int = 1; + +// include/uapi/linux/if.h +pub const IF_OPER_UNKNOWN: c_int = 0; +pub const IF_OPER_NOTPRESENT: c_int = 1; +pub const IF_OPER_DOWN: c_int = 2; +pub const IF_OPER_LOWERLAYERDOWN: c_int = 3; +pub const IF_OPER_TESTING: c_int = 4; +pub const IF_OPER_DORMANT: c_int = 5; +pub const IF_OPER_UP: c_int = 6; + +pub const IF_LINK_MODE_DEFAULT: c_int = 0; +pub const IF_LINK_MODE_DORMANT: c_int = 1; +pub const IF_LINK_MODE_TESTING: c_int = 2; + +// include/uapi/linux/udp.h +pub const UDP_CORK: c_int = 1; +pub const UDP_ENCAP: c_int = 100; +pub const UDP_NO_CHECK6_TX: c_int = 101; +pub const UDP_NO_CHECK6_RX: c_int = 102; + +// include/uapi/linux/mman.h +pub const MAP_SHARED_VALIDATE: c_int = 0x3; +pub const MAP_DROPPABLE: c_int = 0x8; + +// include/uapi/asm-generic/mman-common.h +pub const MAP_FIXED_NOREPLACE: c_int = 0x100000; +pub const MLOCK_ONFAULT: c_uint = 0x01; + +// uapi/linux/vm_sockets.h +pub const VMADDR_CID_ANY: c_uint = 0xFFFFFFFF; +pub const VMADDR_CID_HYPERVISOR: c_uint = 0; +#[deprecated( + since = "0.2.74", + note = "VMADDR_CID_RESERVED is removed since Linux v5.6 and \ + replaced with VMADDR_CID_LOCAL" +)] +pub const VMADDR_CID_RESERVED: c_uint = 1; +pub const VMADDR_CID_LOCAL: c_uint = 1; +pub const VMADDR_CID_HOST: c_uint = 2; +pub const VMADDR_PORT_ANY: c_uint = 0xFFFFFFFF; + +// uapi/linux/inotify.h +pub const IN_ACCESS: u32 = 0x0000_0001; +pub const IN_MODIFY: u32 = 0x0000_0002; +pub const IN_ATTRIB: u32 = 0x0000_0004; +pub const IN_CLOSE_WRITE: u32 = 0x0000_0008; +pub const IN_CLOSE_NOWRITE: u32 = 0x0000_0010; +pub const IN_CLOSE: u32 = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE; +pub const IN_OPEN: u32 = 0x0000_0020; +pub const IN_MOVED_FROM: u32 = 0x0000_0040; +pub const IN_MOVED_TO: u32 = 0x0000_0080; +pub const IN_MOVE: u32 = IN_MOVED_FROM | IN_MOVED_TO; +pub const IN_CREATE: u32 = 0x0000_0100; +pub const IN_DELETE: u32 = 0x0000_0200; +pub const IN_DELETE_SELF: u32 = 0x0000_0400; +pub const IN_MOVE_SELF: u32 = 0x0000_0800; +pub const IN_UNMOUNT: u32 = 0x0000_2000; +pub const IN_Q_OVERFLOW: u32 = 0x0000_4000; +pub const IN_IGNORED: u32 = 0x0000_8000; +pub const IN_ONLYDIR: u32 = 0x0100_0000; +pub const IN_DONT_FOLLOW: u32 = 0x0200_0000; +pub const IN_EXCL_UNLINK: u32 = 0x0400_0000; + +// uapi/linux/securebits.h +const SECURE_NOROOT: c_int = 0; +const SECURE_NOROOT_LOCKED: c_int = 1; + +pub const SECBIT_NOROOT: c_int = issecure_mask(SECURE_NOROOT); +pub const SECBIT_NOROOT_LOCKED: c_int = issecure_mask(SECURE_NOROOT_LOCKED); + +const SECURE_NO_SETUID_FIXUP: c_int = 2; +const SECURE_NO_SETUID_FIXUP_LOCKED: c_int = 3; + +pub const SECBIT_NO_SETUID_FIXUP: c_int = issecure_mask(SECURE_NO_SETUID_FIXUP); +pub const SECBIT_NO_SETUID_FIXUP_LOCKED: c_int = issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED); + +const SECURE_KEEP_CAPS: c_int = 4; +const SECURE_KEEP_CAPS_LOCKED: c_int = 5; + +pub const SECBIT_KEEP_CAPS: c_int = issecure_mask(SECURE_KEEP_CAPS); +pub const SECBIT_KEEP_CAPS_LOCKED: c_int = issecure_mask(SECURE_KEEP_CAPS_LOCKED); + +const SECURE_NO_CAP_AMBIENT_RAISE: c_int = 6; +const SECURE_NO_CAP_AMBIENT_RAISE_LOCKED: c_int = 7; + +pub const SECBIT_NO_CAP_AMBIENT_RAISE: c_int = issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE); +pub const SECBIT_NO_CAP_AMBIENT_RAISE_LOCKED: c_int = + issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE_LOCKED); + +const SECURE_EXEC_RESTRICT_FILE: c_int = 8; +const SECURE_EXEC_RESTRICT_FILE_LOCKED: c_int = 9; + +pub const SECBIT_EXEC_RESTRICT_FILE: c_int = issecure_mask(SECURE_EXEC_RESTRICT_FILE); +pub const SECBIT_EXEC_RESTRICT_FILE_LOCKED: c_int = issecure_mask(SECURE_EXEC_RESTRICT_FILE_LOCKED); + +const SECURE_EXEC_DENY_INTERACTIVE: c_int = 10; +const SECURE_EXEC_DENY_INTERACTIVE_LOCKED: c_int = 11; + +pub const SECBIT_EXEC_DENY_INTERACTIVE: c_int = issecure_mask(SECURE_EXEC_DENY_INTERACTIVE); +pub const SECBIT_EXEC_DENY_INTERACTIVE_LOCKED: c_int = + issecure_mask(SECURE_EXEC_DENY_INTERACTIVE_LOCKED); + +pub const SECUREBITS_DEFAULT: c_int = 0x00000000; +pub const SECURE_ALL_BITS: c_int = SECBIT_NOROOT + | SECBIT_NO_SETUID_FIXUP + | SECBIT_KEEP_CAPS + | SECBIT_NO_CAP_AMBIENT_RAISE + | SECBIT_EXEC_RESTRICT_FILE + | SECBIT_EXEC_DENY_INTERACTIVE; +pub const SECURE_ALL_LOCKS: c_int = SECURE_ALL_BITS << 1; + +pub const SECURE_ALL_UNPRIVILEGED: c_int = + issecure_mask(SECURE_EXEC_RESTRICT_FILE) | issecure_mask(SECURE_EXEC_DENY_INTERACTIVE); + +const fn issecure_mask(x: c_int) -> c_int { + 1 << x +} + +// linux/keyctl.h +pub const KEY_SPEC_THREAD_KEYRING: i32 = -1; +pub const KEY_SPEC_PROCESS_KEYRING: i32 = -2; +pub const KEY_SPEC_SESSION_KEYRING: i32 = -3; +pub const KEY_SPEC_USER_KEYRING: i32 = -4; +pub const KEY_SPEC_USER_SESSION_KEYRING: i32 = -5; +pub const KEY_SPEC_GROUP_KEYRING: i32 = -6; +pub const KEY_SPEC_REQKEY_AUTH_KEY: i32 = -7; +pub const KEY_SPEC_REQUESTOR_KEYRING: i32 = -8; + +pub const KEY_REQKEY_DEFL_NO_CHANGE: i32 = -1; +pub const KEY_REQKEY_DEFL_DEFAULT: i32 = 0; +pub const KEY_REQKEY_DEFL_THREAD_KEYRING: i32 = 1; +pub const KEY_REQKEY_DEFL_PROCESS_KEYRING: i32 = 2; +pub const KEY_REQKEY_DEFL_SESSION_KEYRING: i32 = 3; +pub const KEY_REQKEY_DEFL_USER_KEYRING: i32 = 4; +pub const KEY_REQKEY_DEFL_USER_SESSION_KEYRING: i32 = 5; +pub const KEY_REQKEY_DEFL_GROUP_KEYRING: i32 = 6; +pub const KEY_REQKEY_DEFL_REQUESTOR_KEYRING: i32 = 7; + +pub const KEYCTL_GET_KEYRING_ID: u32 = 0; +pub const KEYCTL_JOIN_SESSION_KEYRING: u32 = 1; +pub const KEYCTL_UPDATE: u32 = 2; +pub const KEYCTL_REVOKE: u32 = 3; +pub const KEYCTL_CHOWN: u32 = 4; +pub const KEYCTL_SETPERM: u32 = 5; +pub const KEYCTL_DESCRIBE: u32 = 6; +pub const KEYCTL_CLEAR: u32 = 7; +pub const KEYCTL_LINK: u32 = 8; +pub const KEYCTL_UNLINK: u32 = 9; +pub const KEYCTL_SEARCH: u32 = 10; +pub const KEYCTL_READ: u32 = 11; +pub const KEYCTL_INSTANTIATE: u32 = 12; +pub const KEYCTL_NEGATE: u32 = 13; +pub const KEYCTL_SET_REQKEY_KEYRING: u32 = 14; +pub const KEYCTL_SET_TIMEOUT: u32 = 15; +pub const KEYCTL_ASSUME_AUTHORITY: u32 = 16; +pub const KEYCTL_GET_SECURITY: u32 = 17; +pub const KEYCTL_SESSION_TO_PARENT: u32 = 18; +pub const KEYCTL_REJECT: u32 = 19; +pub const KEYCTL_INSTANTIATE_IOV: u32 = 20; +pub const KEYCTL_INVALIDATE: u32 = 21; +pub const KEYCTL_GET_PERSISTENT: u32 = 22; + +pub const IN_MASK_CREATE: u32 = 0x1000_0000; +pub const IN_MASK_ADD: u32 = 0x2000_0000; +pub const IN_ISDIR: u32 = 0x4000_0000; +pub const IN_ONESHOT: u32 = 0x8000_0000; + +pub const IN_ALL_EVENTS: u32 = IN_ACCESS + | IN_MODIFY + | IN_ATTRIB + | IN_CLOSE_WRITE + | IN_CLOSE_NOWRITE + | IN_OPEN + | IN_MOVED_FROM + | IN_MOVED_TO + | IN_DELETE + | IN_CREATE + | IN_DELETE_SELF + | IN_MOVE_SELF; + +pub const IN_CLOEXEC: c_int = O_CLOEXEC; +pub const IN_NONBLOCK: c_int = O_NONBLOCK; + +// uapi/linux/mount.h +pub const OPEN_TREE_CLONE: c_uint = 0x01; +pub const OPEN_TREE_CLOEXEC: c_uint = O_CLOEXEC as c_uint; + +// uapi/linux/netfilter/nf_tables.h +pub const NFT_TABLE_MAXNAMELEN: c_int = 256; +pub const NFT_CHAIN_MAXNAMELEN: c_int = 256; +pub const NFT_SET_MAXNAMELEN: c_int = 256; +pub const NFT_OBJ_MAXNAMELEN: c_int = 256; +pub const NFT_USERDATA_MAXLEN: c_int = 256; + +pub const NFT_REG_VERDICT: c_int = 0; +pub const NFT_REG_1: c_int = 1; +pub const NFT_REG_2: c_int = 2; +pub const NFT_REG_3: c_int = 3; +pub const NFT_REG_4: c_int = 4; +pub const __NFT_REG_MAX: c_int = 5; +pub const NFT_REG32_00: c_int = 8; +pub const NFT_REG32_01: c_int = 9; +pub const NFT_REG32_02: c_int = 10; +pub const NFT_REG32_03: c_int = 11; +pub const NFT_REG32_04: c_int = 12; +pub const NFT_REG32_05: c_int = 13; +pub const NFT_REG32_06: c_int = 14; +pub const NFT_REG32_07: c_int = 15; +pub const NFT_REG32_08: c_int = 16; +pub const NFT_REG32_09: c_int = 17; +pub const NFT_REG32_10: c_int = 18; +pub const NFT_REG32_11: c_int = 19; +pub const NFT_REG32_12: c_int = 20; +pub const NFT_REG32_13: c_int = 21; +pub const NFT_REG32_14: c_int = 22; +pub const NFT_REG32_15: c_int = 23; + +pub const NFT_REG_SIZE: c_int = 16; +pub const NFT_REG32_SIZE: c_int = 4; + +pub const NFT_CONTINUE: c_int = -1; +pub const NFT_BREAK: c_int = -2; +pub const NFT_JUMP: c_int = -3; +pub const NFT_GOTO: c_int = -4; +pub const NFT_RETURN: c_int = -5; + +pub const NFT_MSG_NEWTABLE: c_int = 0; +pub const NFT_MSG_GETTABLE: c_int = 1; +pub const NFT_MSG_DELTABLE: c_int = 2; +pub const NFT_MSG_NEWCHAIN: c_int = 3; +pub const NFT_MSG_GETCHAIN: c_int = 4; +pub const NFT_MSG_DELCHAIN: c_int = 5; +pub const NFT_MSG_NEWRULE: c_int = 6; +pub const NFT_MSG_GETRULE: c_int = 7; +pub const NFT_MSG_DELRULE: c_int = 8; +pub const NFT_MSG_NEWSET: c_int = 9; +pub const NFT_MSG_GETSET: c_int = 10; +pub const NFT_MSG_DELSET: c_int = 11; +pub const NFT_MSG_NEWSETELEM: c_int = 12; +pub const NFT_MSG_GETSETELEM: c_int = 13; +pub const NFT_MSG_DELSETELEM: c_int = 14; +pub const NFT_MSG_NEWGEN: c_int = 15; +pub const NFT_MSG_GETGEN: c_int = 16; +pub const NFT_MSG_TRACE: c_int = 17; +cfg_if! { + if #[cfg(not(target_arch = "sparc64"))] { + pub const NFT_MSG_NEWOBJ: c_int = 18; + pub const NFT_MSG_GETOBJ: c_int = 19; + pub const NFT_MSG_DELOBJ: c_int = 20; + pub const NFT_MSG_GETOBJ_RESET: c_int = 21; + } +} +pub const NFT_MSG_MAX: c_int = 25; + +pub const NFT_SET_ANONYMOUS: c_int = 0x1; +pub const NFT_SET_CONSTANT: c_int = 0x2; +pub const NFT_SET_INTERVAL: c_int = 0x4; +pub const NFT_SET_MAP: c_int = 0x8; +pub const NFT_SET_TIMEOUT: c_int = 0x10; +pub const NFT_SET_EVAL: c_int = 0x20; + +pub const NFT_SET_POL_PERFORMANCE: c_int = 0; +pub const NFT_SET_POL_MEMORY: c_int = 1; + +pub const NFT_SET_ELEM_INTERVAL_END: c_int = 0x1; + +pub const NFT_DATA_VALUE: c_uint = 0; +pub const NFT_DATA_VERDICT: c_uint = 0xffffff00; + +pub const NFT_DATA_RESERVED_MASK: c_uint = 0xffffff00; + +pub const NFT_DATA_VALUE_MAXLEN: c_int = 64; + +pub const NFT_BYTEORDER_NTOH: c_int = 0; +pub const NFT_BYTEORDER_HTON: c_int = 1; + +pub const NFT_CMP_EQ: c_int = 0; +pub const NFT_CMP_NEQ: c_int = 1; +pub const NFT_CMP_LT: c_int = 2; +pub const NFT_CMP_LTE: c_int = 3; +pub const NFT_CMP_GT: c_int = 4; +pub const NFT_CMP_GTE: c_int = 5; + +pub const NFT_RANGE_EQ: c_int = 0; +pub const NFT_RANGE_NEQ: c_int = 1; + +pub const NFT_LOOKUP_F_INV: c_int = 1 << 0; + +pub const NFT_DYNSET_OP_ADD: c_int = 0; +pub const NFT_DYNSET_OP_UPDATE: c_int = 1; + +pub const NFT_DYNSET_F_INV: c_int = 1 << 0; + +pub const NFT_PAYLOAD_LL_HEADER: c_int = 0; +pub const NFT_PAYLOAD_NETWORK_HEADER: c_int = 1; +pub const NFT_PAYLOAD_TRANSPORT_HEADER: c_int = 2; + +pub const NFT_PAYLOAD_CSUM_NONE: c_int = 0; +pub const NFT_PAYLOAD_CSUM_INET: c_int = 1; + +pub const NFT_META_LEN: c_int = 0; +pub const NFT_META_PROTOCOL: c_int = 1; +pub const NFT_META_PRIORITY: c_int = 2; +pub const NFT_META_MARK: c_int = 3; +pub const NFT_META_IIF: c_int = 4; +pub const NFT_META_OIF: c_int = 5; +pub const NFT_META_IIFNAME: c_int = 6; +pub const NFT_META_OIFNAME: c_int = 7; +pub const NFT_META_IIFTYPE: c_int = 8; +pub const NFT_META_OIFTYPE: c_int = 9; +pub const NFT_META_SKUID: c_int = 10; +pub const NFT_META_SKGID: c_int = 11; +pub const NFT_META_NFTRACE: c_int = 12; +pub const NFT_META_RTCLASSID: c_int = 13; +pub const NFT_META_SECMARK: c_int = 14; +pub const NFT_META_NFPROTO: c_int = 15; +pub const NFT_META_L4PROTO: c_int = 16; +pub const NFT_META_BRI_IIFNAME: c_int = 17; +pub const NFT_META_BRI_OIFNAME: c_int = 18; +pub const NFT_META_PKTTYPE: c_int = 19; +pub const NFT_META_CPU: c_int = 20; +pub const NFT_META_IIFGROUP: c_int = 21; +pub const NFT_META_OIFGROUP: c_int = 22; +pub const NFT_META_CGROUP: c_int = 23; +pub const NFT_META_PRANDOM: c_int = 24; + +pub const NFT_CT_STATE: c_int = 0; +pub const NFT_CT_DIRECTION: c_int = 1; +pub const NFT_CT_STATUS: c_int = 2; +pub const NFT_CT_MARK: c_int = 3; +pub const NFT_CT_SECMARK: c_int = 4; +pub const NFT_CT_EXPIRATION: c_int = 5; +pub const NFT_CT_HELPER: c_int = 6; +pub const NFT_CT_L3PROTOCOL: c_int = 7; +pub const NFT_CT_SRC: c_int = 8; +pub const NFT_CT_DST: c_int = 9; +pub const NFT_CT_PROTOCOL: c_int = 10; +pub const NFT_CT_PROTO_SRC: c_int = 11; +pub const NFT_CT_PROTO_DST: c_int = 12; +pub const NFT_CT_LABELS: c_int = 13; +pub const NFT_CT_PKTS: c_int = 14; +pub const NFT_CT_BYTES: c_int = 15; +pub const NFT_CT_AVGPKT: c_int = 16; +pub const NFT_CT_ZONE: c_int = 17; +pub const NFT_CT_EVENTMASK: c_int = 18; +pub const NFT_CT_SRC_IP: c_int = 19; +pub const NFT_CT_DST_IP: c_int = 20; +pub const NFT_CT_SRC_IP6: c_int = 21; +pub const NFT_CT_DST_IP6: c_int = 22; + +pub const NFT_LIMIT_PKTS: c_int = 0; +pub const NFT_LIMIT_PKT_BYTES: c_int = 1; + +pub const NFT_LIMIT_F_INV: c_int = 1 << 0; + +pub const NFT_QUEUE_FLAG_BYPASS: c_int = 0x01; +pub const NFT_QUEUE_FLAG_CPU_FANOUT: c_int = 0x02; +pub const NFT_QUEUE_FLAG_MASK: c_int = 0x03; + +pub const NFT_QUOTA_F_INV: c_int = 1 << 0; + +pub const NFT_REJECT_ICMP_UNREACH: c_int = 0; +pub const NFT_REJECT_TCP_RST: c_int = 1; +pub const NFT_REJECT_ICMPX_UNREACH: c_int = 2; + +pub const NFT_REJECT_ICMPX_NO_ROUTE: c_int = 0; +pub const NFT_REJECT_ICMPX_PORT_UNREACH: c_int = 1; +pub const NFT_REJECT_ICMPX_HOST_UNREACH: c_int = 2; +pub const NFT_REJECT_ICMPX_ADMIN_PROHIBITED: c_int = 3; + +pub const NFT_NAT_SNAT: c_int = 0; +pub const NFT_NAT_DNAT: c_int = 1; + +pub const NFT_TRACETYPE_UNSPEC: c_int = 0; +pub const NFT_TRACETYPE_POLICY: c_int = 1; +pub const NFT_TRACETYPE_RETURN: c_int = 2; +pub const NFT_TRACETYPE_RULE: c_int = 3; + +pub const NFT_NG_INCREMENTAL: c_int = 0; +pub const NFT_NG_RANDOM: c_int = 1; + +// linux/input.h +pub const FF_MAX: __u16 = 0x7f; +pub const FF_CNT: usize = FF_MAX as usize + 1; + +// linux/input-event-codes.h +pub const INPUT_PROP_POINTER: __u16 = 0x00; +pub const INPUT_PROP_DIRECT: __u16 = 0x01; +pub const INPUT_PROP_BUTTONPAD: __u16 = 0x02; +pub const INPUT_PROP_SEMI_MT: __u16 = 0x03; +pub const INPUT_PROP_TOPBUTTONPAD: __u16 = 0x04; +pub const INPUT_PROP_POINTING_STICK: __u16 = 0x05; +pub const INPUT_PROP_ACCELEROMETER: __u16 = 0x06; +pub const INPUT_PROP_MAX: __u16 = 0x1f; +pub const INPUT_PROP_CNT: usize = INPUT_PROP_MAX as usize + 1; +pub const EV_MAX: __u16 = 0x1f; +pub const EV_CNT: usize = EV_MAX as usize + 1; +pub const SYN_MAX: __u16 = 0xf; +pub const SYN_CNT: usize = SYN_MAX as usize + 1; +pub const KEY_MAX: __u16 = 0x2ff; +pub const KEY_CNT: usize = KEY_MAX as usize + 1; +pub const REL_MAX: __u16 = 0x0f; +pub const REL_CNT: usize = REL_MAX as usize + 1; +pub const ABS_MAX: __u16 = 0x3f; +pub const ABS_CNT: usize = ABS_MAX as usize + 1; +pub const SW_MAX: __u16 = 0x10; +pub const SW_CNT: usize = SW_MAX as usize + 1; +pub const MSC_MAX: __u16 = 0x07; +pub const MSC_CNT: usize = MSC_MAX as usize + 1; +pub const LED_MAX: __u16 = 0x0f; +pub const LED_CNT: usize = LED_MAX as usize + 1; +pub const REP_MAX: __u16 = 0x01; +pub const REP_CNT: usize = REP_MAX as usize + 1; +pub const SND_MAX: __u16 = 0x07; +pub const SND_CNT: usize = SND_MAX as usize + 1; + +// linux/uinput.h +pub const UINPUT_VERSION: c_uint = 5; +pub const UINPUT_MAX_NAME_SIZE: usize = 80; + +// uapi/linux/fanotify.h +pub const FAN_ACCESS: u64 = 0x0000_0001; +pub const FAN_MODIFY: u64 = 0x0000_0002; +pub const FAN_ATTRIB: u64 = 0x0000_0004; +pub const FAN_CLOSE_WRITE: u64 = 0x0000_0008; +pub const FAN_CLOSE_NOWRITE: u64 = 0x0000_0010; +pub const FAN_OPEN: u64 = 0x0000_0020; +pub const FAN_MOVED_FROM: u64 = 0x0000_0040; +pub const FAN_MOVED_TO: u64 = 0x0000_0080; +pub const FAN_CREATE: u64 = 0x0000_0100; +pub const FAN_DELETE: u64 = 0x0000_0200; +pub const FAN_DELETE_SELF: u64 = 0x0000_0400; +pub const FAN_MOVE_SELF: u64 = 0x0000_0800; +pub const FAN_OPEN_EXEC: u64 = 0x0000_1000; + +pub const FAN_Q_OVERFLOW: u64 = 0x0000_4000; +pub const FAN_FS_ERROR: u64 = 0x0000_8000; + +pub const FAN_OPEN_PERM: u64 = 0x0001_0000; +pub const FAN_ACCESS_PERM: u64 = 0x0002_0000; +pub const FAN_OPEN_EXEC_PERM: u64 = 0x0004_0000; + +pub const FAN_EVENT_ON_CHILD: u64 = 0x0800_0000; + +pub const FAN_RENAME: u64 = 0x1000_0000; + +pub const FAN_ONDIR: u64 = 0x4000_0000; + +pub const FAN_CLOSE: u64 = FAN_CLOSE_WRITE | FAN_CLOSE_NOWRITE; +pub const FAN_MOVE: u64 = FAN_MOVED_FROM | FAN_MOVED_TO; + +pub const FAN_CLOEXEC: c_uint = 0x0000_0001; +pub const FAN_NONBLOCK: c_uint = 0x0000_0002; + +pub const FAN_CLASS_NOTIF: c_uint = 0x0000_0000; +pub const FAN_CLASS_CONTENT: c_uint = 0x0000_0004; +pub const FAN_CLASS_PRE_CONTENT: c_uint = 0x0000_0008; + +pub const FAN_UNLIMITED_QUEUE: c_uint = 0x0000_0010; +pub const FAN_UNLIMITED_MARKS: c_uint = 0x0000_0020; +pub const FAN_ENABLE_AUDIT: c_uint = 0x0000_0040; + +pub const FAN_REPORT_PIDFD: c_uint = 0x0000_0080; +pub const FAN_REPORT_TID: c_uint = 0x0000_0100; +pub const FAN_REPORT_FID: c_uint = 0x0000_0200; +pub const FAN_REPORT_DIR_FID: c_uint = 0x0000_0400; +pub const FAN_REPORT_NAME: c_uint = 0x0000_0800; +pub const FAN_REPORT_TARGET_FID: c_uint = 0x0000_1000; + +pub const FAN_REPORT_DFID_NAME: c_uint = FAN_REPORT_DIR_FID | FAN_REPORT_NAME; +pub const FAN_REPORT_DFID_NAME_TARGET: c_uint = + FAN_REPORT_DFID_NAME | FAN_REPORT_FID | FAN_REPORT_TARGET_FID; + +pub const FAN_MARK_ADD: c_uint = 0x0000_0001; +pub const FAN_MARK_REMOVE: c_uint = 0x0000_0002; +pub const FAN_MARK_DONT_FOLLOW: c_uint = 0x0000_0004; +pub const FAN_MARK_ONLYDIR: c_uint = 0x0000_0008; +pub const FAN_MARK_IGNORED_MASK: c_uint = 0x0000_0020; +pub const FAN_MARK_IGNORED_SURV_MODIFY: c_uint = 0x0000_0040; +pub const FAN_MARK_FLUSH: c_uint = 0x0000_0080; +pub const FAN_MARK_EVICTABLE: c_uint = 0x0000_0200; +pub const FAN_MARK_IGNORE: c_uint = 0x0000_0400; + +pub const FAN_MARK_INODE: c_uint = 0x0000_0000; +pub const FAN_MARK_MOUNT: c_uint = 0x0000_0010; +pub const FAN_MARK_FILESYSTEM: c_uint = 0x0000_0100; + +pub const FAN_MARK_IGNORE_SURV: c_uint = FAN_MARK_IGNORE | FAN_MARK_IGNORED_SURV_MODIFY; + +pub const FANOTIFY_METADATA_VERSION: u8 = 3; + +pub const FAN_EVENT_INFO_TYPE_FID: u8 = 1; +pub const FAN_EVENT_INFO_TYPE_DFID_NAME: u8 = 2; +pub const FAN_EVENT_INFO_TYPE_DFID: u8 = 3; +pub const FAN_EVENT_INFO_TYPE_PIDFD: u8 = 4; +pub const FAN_EVENT_INFO_TYPE_ERROR: u8 = 5; + +pub const FAN_EVENT_INFO_TYPE_OLD_DFID_NAME: u8 = 10; +pub const FAN_EVENT_INFO_TYPE_NEW_DFID_NAME: u8 = 12; + +pub const FAN_RESPONSE_INFO_NONE: u8 = 0; +pub const FAN_RESPONSE_INFO_AUDIT_RULE: u8 = 1; + +pub const FAN_ALLOW: u32 = 0x01; +pub const FAN_DENY: u32 = 0x02; +pub const FAN_AUDIT: u32 = 0x10; +pub const FAN_INFO: u32 = 0x20; + +pub const FAN_NOFD: c_int = -1; +pub const FAN_NOPIDFD: c_int = FAN_NOFD; +pub const FAN_EPIDFD: c_int = -2; + +// linux/futex.h +pub const FUTEX_WAIT: c_int = 0; +pub const FUTEX_WAKE: c_int = 1; +pub const FUTEX_FD: c_int = 2; +pub const FUTEX_REQUEUE: c_int = 3; +pub const FUTEX_CMP_REQUEUE: c_int = 4; +pub const FUTEX_WAKE_OP: c_int = 5; +pub const FUTEX_LOCK_PI: c_int = 6; +pub const FUTEX_UNLOCK_PI: c_int = 7; +pub const FUTEX_TRYLOCK_PI: c_int = 8; +pub const FUTEX_WAIT_BITSET: c_int = 9; +pub const FUTEX_WAKE_BITSET: c_int = 10; +pub const FUTEX_WAIT_REQUEUE_PI: c_int = 11; +pub const FUTEX_CMP_REQUEUE_PI: c_int = 12; +pub const FUTEX_LOCK_PI2: c_int = 13; + +pub const FUTEX_PRIVATE_FLAG: c_int = 128; +pub const FUTEX_CLOCK_REALTIME: c_int = 256; +pub const FUTEX_CMD_MASK: c_int = !(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME); + +pub const FUTEX_WAITERS: u32 = 0x80000000; +pub const FUTEX_OWNER_DIED: u32 = 0x40000000; +pub const FUTEX_TID_MASK: u32 = 0x3fffffff; + +pub const FUTEX_BITSET_MATCH_ANY: c_int = 0xffffffff; + +pub const FUTEX_OP_SET: c_int = 0; +pub const FUTEX_OP_ADD: c_int = 1; +pub const FUTEX_OP_OR: c_int = 2; +pub const FUTEX_OP_ANDN: c_int = 3; +pub const FUTEX_OP_XOR: c_int = 4; + +pub const FUTEX_OP_OPARG_SHIFT: c_int = 8; + +pub const FUTEX_OP_CMP_EQ: c_int = 0; +pub const FUTEX_OP_CMP_NE: c_int = 1; +pub const FUTEX_OP_CMP_LT: c_int = 2; +pub const FUTEX_OP_CMP_LE: c_int = 3; +pub const FUTEX_OP_CMP_GT: c_int = 4; +pub const FUTEX_OP_CMP_GE: c_int = 5; + +pub fn FUTEX_OP(op: c_int, oparg: c_int, cmp: c_int, cmparg: c_int) -> c_int { + ((op & 0xf) << 28) | ((cmp & 0xf) << 24) | ((oparg & 0xfff) << 12) | (cmparg & 0xfff) +} + +// linux/kexec.h +pub const KEXEC_ON_CRASH: c_int = 0x00000001; +pub const KEXEC_PRESERVE_CONTEXT: c_int = 0x00000002; +pub const KEXEC_ARCH_MASK: c_int = 0xffff0000; +pub const KEXEC_FILE_UNLOAD: c_int = 0x00000001; +pub const KEXEC_FILE_ON_CRASH: c_int = 0x00000002; +pub const KEXEC_FILE_NO_INITRAMFS: c_int = 0x00000004; + +// linux/reboot.h +pub const LINUX_REBOOT_MAGIC1: c_int = 0xfee1dead; +pub const LINUX_REBOOT_MAGIC2: c_int = 672274793; +pub const LINUX_REBOOT_MAGIC2A: c_int = 85072278; +pub const LINUX_REBOOT_MAGIC2B: c_int = 369367448; +pub const LINUX_REBOOT_MAGIC2C: c_int = 537993216; + +pub const LINUX_REBOOT_CMD_RESTART: c_int = 0x01234567; +pub const LINUX_REBOOT_CMD_HALT: c_int = 0xCDEF0123; +pub const LINUX_REBOOT_CMD_CAD_ON: c_int = 0x89ABCDEF; +pub const LINUX_REBOOT_CMD_CAD_OFF: c_int = 0x00000000; +pub const LINUX_REBOOT_CMD_POWER_OFF: c_int = 0x4321FEDC; +pub const LINUX_REBOOT_CMD_RESTART2: c_int = 0xA1B2C3D4; +pub const LINUX_REBOOT_CMD_SW_SUSPEND: c_int = 0xD000FCE2; +pub const LINUX_REBOOT_CMD_KEXEC: c_int = 0x45584543; + +pub const REG_EXTENDED: c_int = 1; +pub const REG_ICASE: c_int = 2; +pub const REG_NEWLINE: c_int = 4; +pub const REG_NOSUB: c_int = 8; + +pub const REG_NOTBOL: c_int = 1; +pub const REG_NOTEOL: c_int = 2; + +pub const REG_ENOSYS: c_int = -1; +pub const REG_NOMATCH: c_int = 1; +pub const REG_BADPAT: c_int = 2; +pub const REG_ECOLLATE: c_int = 3; +pub const REG_ECTYPE: c_int = 4; +pub const REG_EESCAPE: c_int = 5; +pub const REG_ESUBREG: c_int = 6; +pub const REG_EBRACK: c_int = 7; +pub const REG_EPAREN: c_int = 8; +pub const REG_EBRACE: c_int = 9; +pub const REG_BADBR: c_int = 10; +pub const REG_ERANGE: c_int = 11; +pub const REG_ESPACE: c_int = 12; +pub const REG_BADRPT: c_int = 13; + +// linux/errqueue.h +pub const SO_EE_ORIGIN_NONE: u8 = 0; +pub const SO_EE_ORIGIN_LOCAL: u8 = 1; +pub const SO_EE_ORIGIN_ICMP: u8 = 2; +pub const SO_EE_ORIGIN_ICMP6: u8 = 3; +pub const SO_EE_ORIGIN_TXSTATUS: u8 = 4; +pub const SO_EE_ORIGIN_TIMESTAMPING: u8 = SO_EE_ORIGIN_TXSTATUS; + +// errno.h +pub const EPERM: c_int = 1; +pub const ENOENT: c_int = 2; +pub const ESRCH: c_int = 3; +pub const EINTR: c_int = 4; +pub const EIO: c_int = 5; +pub const ENXIO: c_int = 6; +pub const E2BIG: c_int = 7; +pub const ENOEXEC: c_int = 8; +pub const EBADF: c_int = 9; +pub const ECHILD: c_int = 10; +pub const EAGAIN: c_int = 11; +pub const ENOMEM: c_int = 12; +pub const EACCES: c_int = 13; +pub const EFAULT: c_int = 14; +pub const ENOTBLK: c_int = 15; +pub const EBUSY: c_int = 16; +pub const EEXIST: c_int = 17; +pub const EXDEV: c_int = 18; +pub const ENODEV: c_int = 19; +pub const ENOTDIR: c_int = 20; +pub const EISDIR: c_int = 21; +pub const EINVAL: c_int = 22; +pub const ENFILE: c_int = 23; +pub const EMFILE: c_int = 24; +pub const ENOTTY: c_int = 25; +pub const ETXTBSY: c_int = 26; +pub const EFBIG: c_int = 27; +pub const ENOSPC: c_int = 28; +pub const ESPIPE: c_int = 29; +pub const EROFS: c_int = 30; +pub const EMLINK: c_int = 31; +pub const EPIPE: c_int = 32; +pub const EDOM: c_int = 33; +pub const ERANGE: c_int = 34; +pub const EWOULDBLOCK: c_int = EAGAIN; + +// linux/sctp.h +pub const SCTP_FUTURE_ASSOC: c_int = 0; +pub const SCTP_CURRENT_ASSOC: c_int = 1; +pub const SCTP_ALL_ASSOC: c_int = 2; +pub const SCTP_RTOINFO: c_int = 0; +pub const SCTP_ASSOCINFO: c_int = 1; +pub const SCTP_INITMSG: c_int = 2; +pub const SCTP_NODELAY: c_int = 3; +pub const SCTP_AUTOCLOSE: c_int = 4; +pub const SCTP_SET_PEER_PRIMARY_ADDR: c_int = 5; +pub const SCTP_PRIMARY_ADDR: c_int = 6; +pub const SCTP_ADAPTATION_LAYER: c_int = 7; +pub const SCTP_DISABLE_FRAGMENTS: c_int = 8; +pub const SCTP_PEER_ADDR_PARAMS: c_int = 9; +pub const SCTP_DEFAULT_SEND_PARAM: c_int = 10; +pub const SCTP_EVENTS: c_int = 11; +pub const SCTP_I_WANT_MAPPED_V4_ADDR: c_int = 12; +pub const SCTP_MAXSEG: c_int = 13; +pub const SCTP_STATUS: c_int = 14; +pub const SCTP_GET_PEER_ADDR_INFO: c_int = 15; +pub const SCTP_DELAYED_ACK_TIME: c_int = 16; +pub const SCTP_DELAYED_ACK: c_int = SCTP_DELAYED_ACK_TIME; +pub const SCTP_DELAYED_SACK: c_int = SCTP_DELAYED_ACK_TIME; +pub const SCTP_CONTEXT: c_int = 17; +pub const SCTP_FRAGMENT_INTERLEAVE: c_int = 18; +pub const SCTP_PARTIAL_DELIVERY_POINT: c_int = 19; +pub const SCTP_MAX_BURST: c_int = 20; +pub const SCTP_AUTH_CHUNK: c_int = 21; +pub const SCTP_HMAC_IDENT: c_int = 22; +pub const SCTP_AUTH_KEY: c_int = 23; +pub const SCTP_AUTH_ACTIVE_KEY: c_int = 24; +pub const SCTP_AUTH_DELETE_KEY: c_int = 25; +pub const SCTP_PEER_AUTH_CHUNKS: c_int = 26; +pub const SCTP_LOCAL_AUTH_CHUNKS: c_int = 27; +pub const SCTP_GET_ASSOC_NUMBER: c_int = 28; +pub const SCTP_GET_ASSOC_ID_LIST: c_int = 29; +pub const SCTP_AUTO_ASCONF: c_int = 30; +pub const SCTP_PEER_ADDR_THLDS: c_int = 31; +pub const SCTP_RECVRCVINFO: c_int = 32; +pub const SCTP_RECVNXTINFO: c_int = 33; +pub const SCTP_DEFAULT_SNDINFO: c_int = 34; +pub const SCTP_AUTH_DEACTIVATE_KEY: c_int = 35; +pub const SCTP_REUSE_PORT: c_int = 36; +pub const SCTP_PEER_ADDR_THLDS_V2: c_int = 37; +pub const SCTP_PR_SCTP_NONE: c_int = 0x0000; +pub const SCTP_PR_SCTP_TTL: c_int = 0x0010; +pub const SCTP_PR_SCTP_RTX: c_int = 0x0020; +pub const SCTP_PR_SCTP_PRIO: c_int = 0x0030; +pub const SCTP_PR_SCTP_MAX: c_int = SCTP_PR_SCTP_PRIO; +pub const SCTP_PR_SCTP_MASK: c_int = 0x0030; +pub const SCTP_ENABLE_RESET_STREAM_REQ: c_int = 0x01; +pub const SCTP_ENABLE_RESET_ASSOC_REQ: c_int = 0x02; +pub const SCTP_ENABLE_CHANGE_ASSOC_REQ: c_int = 0x04; +pub const SCTP_ENABLE_STRRESET_MASK: c_int = 0x07; +pub const SCTP_STREAM_RESET_INCOMING: c_int = 0x01; +pub const SCTP_STREAM_RESET_OUTGOING: c_int = 0x02; + +pub const SCTP_INIT: c_int = 0; +pub const SCTP_SNDRCV: c_int = 1; +pub const SCTP_SNDINFO: c_int = 2; +pub const SCTP_RCVINFO: c_int = 3; +pub const SCTP_NXTINFO: c_int = 4; +pub const SCTP_PRINFO: c_int = 5; +pub const SCTP_AUTHINFO: c_int = 6; +pub const SCTP_DSTADDRV4: c_int = 7; +pub const SCTP_DSTADDRV6: c_int = 8; + +pub const SCTP_UNORDERED: c_int = 1 << 0; +pub const SCTP_ADDR_OVER: c_int = 1 << 1; +pub const SCTP_ABORT: c_int = 1 << 2; +pub const SCTP_SACK_IMMEDIATELY: c_int = 1 << 3; +pub const SCTP_SENDALL: c_int = 1 << 6; +pub const SCTP_PR_SCTP_ALL: c_int = 1 << 7; +pub const SCTP_NOTIFICATION: c_int = MSG_NOTIFICATION; +pub const SCTP_EOF: c_int = crate::MSG_FIN; + +/* DCCP socket options */ +pub const DCCP_SOCKOPT_PACKET_SIZE: c_int = 1; +pub const DCCP_SOCKOPT_SERVICE: c_int = 2; +pub const DCCP_SOCKOPT_CHANGE_L: c_int = 3; +pub const DCCP_SOCKOPT_CHANGE_R: c_int = 4; +pub const DCCP_SOCKOPT_GET_CUR_MPS: c_int = 5; +pub const DCCP_SOCKOPT_SERVER_TIMEWAIT: c_int = 6; +pub const DCCP_SOCKOPT_SEND_CSCOV: c_int = 10; +pub const DCCP_SOCKOPT_RECV_CSCOV: c_int = 11; +pub const DCCP_SOCKOPT_AVAILABLE_CCIDS: c_int = 12; +pub const DCCP_SOCKOPT_CCID: c_int = 13; +pub const DCCP_SOCKOPT_TX_CCID: c_int = 14; +pub const DCCP_SOCKOPT_RX_CCID: c_int = 15; +pub const DCCP_SOCKOPT_QPOLICY_ID: c_int = 16; +pub const DCCP_SOCKOPT_QPOLICY_TXQLEN: c_int = 17; +pub const DCCP_SOCKOPT_CCID_RX_INFO: c_int = 128; +pub const DCCP_SOCKOPT_CCID_TX_INFO: c_int = 192; + +/// maximum number of services provided on the same listening port +pub const DCCP_SERVICE_LIST_MAX_LEN: c_int = 32; + +pub const CTL_KERN: c_int = 1; +pub const CTL_VM: c_int = 2; +pub const CTL_NET: c_int = 3; +pub const CTL_FS: c_int = 5; +pub const CTL_DEBUG: c_int = 6; +pub const CTL_DEV: c_int = 7; +pub const CTL_BUS: c_int = 8; +pub const CTL_ABI: c_int = 9; +pub const CTL_CPU: c_int = 10; + +pub const CTL_BUS_ISA: c_int = 1; + +pub const INOTIFY_MAX_USER_INSTANCES: c_int = 1; +pub const INOTIFY_MAX_USER_WATCHES: c_int = 2; +pub const INOTIFY_MAX_QUEUED_EVENTS: c_int = 3; + +pub const KERN_OSTYPE: c_int = 1; +pub const KERN_OSRELEASE: c_int = 2; +pub const KERN_OSREV: c_int = 3; +pub const KERN_VERSION: c_int = 4; +pub const KERN_SECUREMASK: c_int = 5; +pub const KERN_PROF: c_int = 6; +pub const KERN_NODENAME: c_int = 7; +pub const KERN_DOMAINNAME: c_int = 8; +pub const KERN_PANIC: c_int = 15; +pub const KERN_REALROOTDEV: c_int = 16; +pub const KERN_SPARC_REBOOT: c_int = 21; +pub const KERN_CTLALTDEL: c_int = 22; +pub const KERN_PRINTK: c_int = 23; +pub const KERN_NAMETRANS: c_int = 24; +pub const KERN_PPC_HTABRECLAIM: c_int = 25; +pub const KERN_PPC_ZEROPAGED: c_int = 26; +pub const KERN_PPC_POWERSAVE_NAP: c_int = 27; +pub const KERN_MODPROBE: c_int = 28; +pub const KERN_SG_BIG_BUFF: c_int = 29; +pub const KERN_ACCT: c_int = 30; +pub const KERN_PPC_L2CR: c_int = 31; +pub const KERN_RTSIGNR: c_int = 32; +pub const KERN_RTSIGMAX: c_int = 33; +pub const KERN_SHMMAX: c_int = 34; +pub const KERN_MSGMAX: c_int = 35; +pub const KERN_MSGMNB: c_int = 36; +pub const KERN_MSGPOOL: c_int = 37; +pub const KERN_SYSRQ: c_int = 38; +pub const KERN_MAX_THREADS: c_int = 39; +pub const KERN_RANDOM: c_int = 40; +pub const KERN_SHMALL: c_int = 41; +pub const KERN_MSGMNI: c_int = 42; +pub const KERN_SEM: c_int = 43; +pub const KERN_SPARC_STOP_A: c_int = 44; +pub const KERN_SHMMNI: c_int = 45; +pub const KERN_OVERFLOWUID: c_int = 46; +pub const KERN_OVERFLOWGID: c_int = 47; +pub const KERN_SHMPATH: c_int = 48; +pub const KERN_HOTPLUG: c_int = 49; +pub const KERN_IEEE_EMULATION_WARNINGS: c_int = 50; +pub const KERN_S390_USER_DEBUG_LOGGING: c_int = 51; +pub const KERN_CORE_USES_PID: c_int = 52; +pub const KERN_TAINTED: c_int = 53; +pub const KERN_CADPID: c_int = 54; +pub const KERN_PIDMAX: c_int = 55; +pub const KERN_CORE_PATTERN: c_int = 56; +pub const KERN_PANIC_ON_OOPS: c_int = 57; +pub const KERN_HPPA_PWRSW: c_int = 58; +pub const KERN_HPPA_UNALIGNED: c_int = 59; +pub const KERN_PRINTK_RATELIMIT: c_int = 60; +pub const KERN_PRINTK_RATELIMIT_BURST: c_int = 61; +pub const KERN_PTY: c_int = 62; +pub const KERN_NGROUPS_MAX: c_int = 63; +pub const KERN_SPARC_SCONS_PWROFF: c_int = 64; +pub const KERN_HZ_TIMER: c_int = 65; +pub const KERN_UNKNOWN_NMI_PANIC: c_int = 66; +pub const KERN_BOOTLOADER_TYPE: c_int = 67; +pub const KERN_RANDOMIZE: c_int = 68; +pub const KERN_SETUID_DUMPABLE: c_int = 69; +pub const KERN_SPIN_RETRY: c_int = 70; +pub const KERN_ACPI_VIDEO_FLAGS: c_int = 71; +pub const KERN_IA64_UNALIGNED: c_int = 72; +pub const KERN_COMPAT_LOG: c_int = 73; +pub const KERN_MAX_LOCK_DEPTH: c_int = 74; +pub const KERN_NMI_WATCHDOG: c_int = 75; +pub const KERN_PANIC_ON_NMI: c_int = 76; + +pub const VM_OVERCOMMIT_MEMORY: c_int = 5; +pub const VM_PAGE_CLUSTER: c_int = 10; +pub const VM_DIRTY_BACKGROUND: c_int = 11; +pub const VM_DIRTY_RATIO: c_int = 12; +pub const VM_DIRTY_WB_CS: c_int = 13; +pub const VM_DIRTY_EXPIRE_CS: c_int = 14; +pub const VM_NR_PDFLUSH_THREADS: c_int = 15; +pub const VM_OVERCOMMIT_RATIO: c_int = 16; +pub const VM_PAGEBUF: c_int = 17; +pub const VM_HUGETLB_PAGES: c_int = 18; +pub const VM_SWAPPINESS: c_int = 19; +pub const VM_LOWMEM_RESERVE_RATIO: c_int = 20; +pub const VM_MIN_FREE_KBYTES: c_int = 21; +pub const VM_MAX_MAP_COUNT: c_int = 22; +pub const VM_LAPTOP_MODE: c_int = 23; +pub const VM_BLOCK_DUMP: c_int = 24; +pub const VM_HUGETLB_GROUP: c_int = 25; +pub const VM_VFS_CACHE_PRESSURE: c_int = 26; +pub const VM_LEGACY_VA_LAYOUT: c_int = 27; +pub const VM_SWAP_TOKEN_TIMEOUT: c_int = 28; +pub const VM_DROP_PAGECACHE: c_int = 29; +pub const VM_PERCPU_PAGELIST_FRACTION: c_int = 30; +pub const VM_ZONE_RECLAIM_MODE: c_int = 31; +pub const VM_MIN_UNMAPPED: c_int = 32; +pub const VM_PANIC_ON_OOM: c_int = 33; +pub const VM_VDSO_ENABLED: c_int = 34; +pub const VM_MIN_SLAB: c_int = 35; + +pub const NET_CORE: c_int = 1; +pub const NET_ETHER: c_int = 2; +pub const NET_802: c_int = 3; +pub const NET_UNIX: c_int = 4; +pub const NET_IPV4: c_int = 5; +pub const NET_IPX: c_int = 6; +pub const NET_ATALK: c_int = 7; +pub const NET_NETROM: c_int = 8; +pub const NET_AX25: c_int = 9; +pub const NET_BRIDGE: c_int = 10; +pub const NET_ROSE: c_int = 11; +pub const NET_IPV6: c_int = 12; +pub const NET_X25: c_int = 13; +pub const NET_TR: c_int = 14; +pub const NET_DECNET: c_int = 15; +pub const NET_ECONET: c_int = 16; +pub const NET_SCTP: c_int = 17; +pub const NET_LLC: c_int = 18; +pub const NET_NETFILTER: c_int = 19; +pub const NET_DCCP: c_int = 20; +pub const NET_IRDA: c_int = 412; + +// include/linux/sched.h +/// I'm a virtual CPU. +pub const PF_VCPU: c_int = 0x00000001; +/// I am an IDLE thread. +pub const PF_IDLE: c_int = 0x00000002; +/// Getting shut down. +pub const PF_EXITING: c_int = 0x00000004; +/// Coredumps should ignore this task. +pub const PF_POSTCOREDUMP: c_int = 0x00000008; +/// Task is an IO worker. +pub const PF_IO_WORKER: c_int = 0x00000010; +/// I'm a workqueue worker. +pub const PF_WQ_WORKER: c_int = 0x00000020; +/// Forked but didn't exec. +pub const PF_FORKNOEXEC: c_int = 0x00000040; +/// Process policy on mce errors. +pub const PF_MCE_PROCESS: c_int = 0x00000080; +/// Used super-user privileges. +pub const PF_SUPERPRIV: c_int = 0x00000100; +/// Dumped core. +pub const PF_DUMPCORE: c_int = 0x00000200; +/// Killed by a signal. +pub const PF_SIGNALED: c_int = 0x00000400; +/// Allocating memory to free memory. +/// +/// See `memalloc_noreclaim_save()`. +pub const PF_MEMALLOC: c_int = 0x00000800; +/// `set_user()` noticed that `RLIMIT_NPROC` was exceeded. +pub const PF_NPROC_EXCEEDED: c_int = 0x00001000; +/// If unset the fpu must be initialized before use. +pub const PF_USED_MATH: c_int = 0x00002000; +/// Kernel thread cloned from userspace thread. +pub const PF_USER_WORKER: c_int = 0x00004000; +/// This thread should not be frozen. +pub const PF_NOFREEZE: c_int = 0x00008000; +/// I am `kswapd`. +pub const PF_KSWAPD: c_int = 0x00020000; +/// All allocations inherit `GFP_NOFS`. +/// +/// See `memalloc_nfs_save()`. +pub const PF_MEMALLOC_NOFS: c_int = 0x00040000; +/// All allocations inherit `GFP_NOIO`. +/// +/// See `memalloc_noio_save()`. +pub const PF_MEMALLOC_NOIO: c_int = 0x00080000; +/// Throttle writes only against the bdi I write to, I am cleaning +/// dirty pages from some other bdi. +pub const PF_LOCAL_THROTTLE: c_int = 0x00100000; +/// I am a kernel thread. +pub const PF_KTHREAD: c_int = 0x00200000; +/// Randomize virtual address space. +pub const PF_RANDOMIZE: c_int = 0x00400000; +/// Userland is not allowed to meddle with `cpus_mask`. +pub const PF_NO_SETAFFINITY: c_int = 0x04000000; +/// Early kill for mce process policy. +pub const PF_MCE_EARLY: c_int = 0x08000000; +/// Allocations constrained to zones which allow long term pinning. +/// +/// See `memalloc_pin_save()`. +pub const PF_MEMALLOC_PIN: c_int = 0x10000000; +/// Plug has ts that needs updating. +pub const PF_BLOCK_TS: c_int = 0x20000000; +/// This thread called `freeze_processes()` and should not be frozen. +pub const PF_SUSPEND_TASK: c_int = PF_SUSPEND_TASK_UINT as _; +// The used value is the highest possible bit fitting on 32 bits, so directly +// defining it as a signed integer causes the compiler to report an overflow. +// Use instead a private intermediary that assuringly has the correct type and +// cast it where necessary to the wanted final type, which preserves the +// desired information as-is in terms of integer representation. +const PF_SUSPEND_TASK_UINT: c_uint = 0x80000000; + +pub const CSIGNAL: c_int = 0x000000ff; + +pub const SCHED_NORMAL: c_int = 0; +pub const SCHED_OTHER: c_int = 0; +pub const SCHED_FIFO: c_int = 1; +pub const SCHED_RR: c_int = 2; +pub const SCHED_BATCH: c_int = 3; +pub const SCHED_IDLE: c_int = 5; +pub const SCHED_DEADLINE: c_int = 6; + +pub const SCHED_RESET_ON_FORK: c_int = 0x40000000; + +pub const CLONE_PIDFD: c_int = 0x1000; + +pub const SCHED_FLAG_RESET_ON_FORK: c_int = 0x01; +pub const SCHED_FLAG_RECLAIM: c_int = 0x02; +pub const SCHED_FLAG_DL_OVERRUN: c_int = 0x04; +pub const SCHED_FLAG_KEEP_POLICY: c_int = 0x08; +pub const SCHED_FLAG_KEEP_PARAMS: c_int = 0x10; +pub const SCHED_FLAG_UTIL_CLAMP_MIN: c_int = 0x20; +pub const SCHED_FLAG_UTIL_CLAMP_MAX: c_int = 0x40; + +// linux/if_xdp.h +pub const XDP_SHARED_UMEM: crate::__u16 = 1 << 0; +pub const XDP_COPY: crate::__u16 = 1 << 1; +pub const XDP_ZEROCOPY: crate::__u16 = 1 << 2; +pub const XDP_USE_NEED_WAKEUP: crate::__u16 = 1 << 3; +pub const XDP_USE_SG: crate::__u16 = 1 << 4; + +pub const XDP_UMEM_UNALIGNED_CHUNK_FLAG: crate::__u32 = 1 << 0; + +pub const XDP_RING_NEED_WAKEUP: crate::__u32 = 1 << 0; + +pub const XDP_MMAP_OFFSETS: c_int = 1; +pub const XDP_RX_RING: c_int = 2; +pub const XDP_TX_RING: c_int = 3; +pub const XDP_UMEM_REG: c_int = 4; +pub const XDP_UMEM_FILL_RING: c_int = 5; +pub const XDP_UMEM_COMPLETION_RING: c_int = 6; +pub const XDP_STATISTICS: c_int = 7; +pub const XDP_OPTIONS: c_int = 8; + +pub const XDP_OPTIONS_ZEROCOPY: crate::__u32 = 1 << 0; + +pub const XDP_PGOFF_RX_RING: crate::off_t = 0; +pub const XDP_PGOFF_TX_RING: crate::off_t = 0x80000000; +pub const XDP_UMEM_PGOFF_FILL_RING: crate::c_ulonglong = 0x100000000; +pub const XDP_UMEM_PGOFF_COMPLETION_RING: crate::c_ulonglong = 0x180000000; + +pub const XSK_UNALIGNED_BUF_OFFSET_SHIFT: crate::c_int = 48; +pub const XSK_UNALIGNED_BUF_ADDR_MASK: crate::c_ulonglong = + (1 << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1; + +pub const XDP_PKT_CONTD: crate::__u32 = 1 << 0; + +pub const XDP_UMEM_TX_SW_CSUM: crate::__u32 = 1 << 1; +pub const XDP_UMEM_TX_METADATA_LEN: crate::__u32 = 1 << 2; + +pub const XDP_TXMD_FLAGS_TIMESTAMP: crate::__u32 = 1 << 0; +pub const XDP_TXMD_FLAGS_CHECKSUM: crate::__u32 = 1 << 1; + +pub const XDP_TX_METADATA: crate::__u32 = 1 << 1; + +pub const SOL_XDP: c_int = 283; + +// linux/mount.h +pub const MOUNT_ATTR_RDONLY: crate::__u64 = 0x00000001; +pub const MOUNT_ATTR_NOSUID: crate::__u64 = 0x00000002; +pub const MOUNT_ATTR_NODEV: crate::__u64 = 0x00000004; +pub const MOUNT_ATTR_NOEXEC: crate::__u64 = 0x00000008; +pub const MOUNT_ATTR__ATIME: crate::__u64 = 0x00000070; +pub const MOUNT_ATTR_RELATIME: crate::__u64 = 0x00000000; +pub const MOUNT_ATTR_NOATIME: crate::__u64 = 0x00000010; +pub const MOUNT_ATTR_STRICTATIME: crate::__u64 = 0x00000020; +pub const MOUNT_ATTR_NODIRATIME: crate::__u64 = 0x00000080; +pub const MOUNT_ATTR_IDMAP: crate::__u64 = 0x00100000; +pub const MOUNT_ATTR_NOSYMFOLLOW: crate::__u64 = 0x00200000; + +pub const MOUNT_ATTR_SIZE_VER0: c_int = 32; + +// elf.h +pub const NT_PRSTATUS: c_int = 1; +pub const NT_PRFPREG: c_int = 2; +pub const NT_FPREGSET: c_int = 2; +pub const NT_PRPSINFO: c_int = 3; +pub const NT_PRXREG: c_int = 4; +pub const NT_TASKSTRUCT: c_int = 4; +pub const NT_PLATFORM: c_int = 5; +pub const NT_AUXV: c_int = 6; +pub const NT_GWINDOWS: c_int = 7; +pub const NT_ASRS: c_int = 8; +pub const NT_PSTATUS: c_int = 10; +pub const NT_PSINFO: c_int = 13; +pub const NT_PRCRED: c_int = 14; +pub const NT_UTSNAME: c_int = 15; +pub const NT_LWPSTATUS: c_int = 16; +pub const NT_LWPSINFO: c_int = 17; +pub const NT_PRFPXREG: c_int = 20; + +pub const SCHED_FLAG_KEEP_ALL: c_int = SCHED_FLAG_KEEP_POLICY | SCHED_FLAG_KEEP_PARAMS; + +pub const SCHED_FLAG_UTIL_CLAMP: c_int = SCHED_FLAG_UTIL_CLAMP_MIN | SCHED_FLAG_UTIL_CLAMP_MAX; + +pub const SCHED_FLAG_ALL: c_int = SCHED_FLAG_RESET_ON_FORK + | SCHED_FLAG_RECLAIM + | SCHED_FLAG_DL_OVERRUN + | SCHED_FLAG_KEEP_ALL + | SCHED_FLAG_UTIL_CLAMP; + +// ioctl_eventpoll: added in Linux 6.9 +pub const EPIOCSPARAMS: Ioctl = 0x40088a01; +pub const EPIOCGPARAMS: Ioctl = 0x80088a02; + +// siginfo.h +pub const SI_DETHREAD: c_int = -7; +pub const TRAP_PERF: c_int = 6; + +f! { + pub fn NLA_ALIGN(len: c_int) -> c_int { + return ((len) + NLA_ALIGNTO - 1) & !(NLA_ALIGNTO - 1); + } + + pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + if ((*cmsg).cmsg_len as usize) < size_of::() { + return core::ptr::null_mut::(); + } + let next = (cmsg as usize + super::CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr; + let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; + if (next.wrapping_offset(1)) as usize > max + || next as usize + super::CMSG_ALIGN((*next).cmsg_len as usize) > max + { + core::ptr::null_mut::() + } else { + next + } + } + + pub fn CPU_ALLOC_SIZE(count: c_int) -> size_t { + let _dummy: cpu_set_t = mem::zeroed(); + let size_in_bits = 8 * size_of_val(&_dummy.bits[0]); + ((count as size_t + size_in_bits - 1) / 8) as size_t + } + + pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { + for slot in &mut cpuset.bits { + *slot = 0; + } + } + + pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + cpuset.bits[idx] |= 1 << offset; + } + + pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { + let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + cpuset.bits[idx] &= !(1 << offset); + } + + pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { + let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); + let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); + 0 != (cpuset.bits[idx] & (1 << offset)) + } + + pub fn CPU_COUNT_S(size: usize, cpuset: &cpu_set_t) -> c_int { + let mut s: u32 = 0; + let size_of_mask = size_of_val(&cpuset.bits[0]); + for i in &cpuset.bits[..(size / size_of_mask)] { + s += i.count_ones(); + } + s as c_int + } + + pub fn CPU_COUNT(cpuset: &cpu_set_t) -> c_int { + CPU_COUNT_S(size_of::(), cpuset) + } + + pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { + set1.bits == set2.bits + } + + pub fn SCTP_PR_INDEX(policy: c_int) -> c_int { + policy >> (4 - 1) + } + + pub fn SCTP_PR_POLICY(policy: c_int) -> c_int { + policy & SCTP_PR_SCTP_MASK + } + + pub fn SCTP_PR_SET_POLICY(flags: &mut c_int, policy: c_int) -> () { + *flags &= !SCTP_PR_SCTP_MASK; + *flags |= policy; + } + + pub fn IPTOS_TOS(tos: u8) -> u8 { + tos & IPTOS_TOS_MASK + } + + pub fn IPTOS_PREC(tos: u8) -> u8 { + tos & IPTOS_PREC_MASK + } + + pub fn RT_TOS(tos: u8) -> u8 { + tos & crate::IPTOS_TOS_MASK + } + + pub fn RT_ADDRCLASS(flags: u32) -> u32 { + flags >> 23 + } + + pub fn RT_LOCALADDR(flags: u32) -> bool { + (flags & RTF_ADDRCLASSMASK) == (RTF_LOCAL | RTF_INTERFACE) + } + + pub fn SO_EE_OFFENDER(ee: *const crate::sock_extended_err) -> *mut crate::sockaddr { + ee.offset(1) as *mut crate::sockaddr + } + + pub fn TPACKET_ALIGN(x: usize) -> usize { + (x + TPACKET_ALIGNMENT - 1) & !(TPACKET_ALIGNMENT - 1) + } + + pub fn BPF_CLASS(code: __u32) -> __u32 { + code & 0x07 + } + + pub fn BPF_SIZE(code: __u32) -> __u32 { + code & 0x18 + } + + pub fn BPF_MODE(code: __u32) -> __u32 { + code & 0xe0 + } + + pub fn BPF_OP(code: __u32) -> __u32 { + code & 0xf0 + } + + pub fn BPF_SRC(code: __u32) -> __u32 { + code & 0x08 + } + + pub fn BPF_RVAL(code: __u32) -> __u32 { + code & 0x18 + } + + pub fn BPF_MISCOP(code: __u32) -> __u32 { + code & 0xf8 + } + + pub fn BPF_STMT(code: __u16, k: __u32) -> sock_filter { + sock_filter { + code, + jt: 0, + jf: 0, + k, + } + } + + pub fn BPF_JUMP(code: __u16, k: __u32, jt: __u8, jf: __u8) -> sock_filter { + sock_filter { code, jt, jf, k } + } + + pub fn ELF32_R_SYM(val: Elf32_Word) -> Elf32_Word { + val >> 8 + } + + pub fn ELF32_R_TYPE(val: Elf32_Word) -> Elf32_Word { + val & 0xff + } + + pub fn ELF32_R_INFO(sym: Elf32_Word, t: Elf32_Word) -> Elf32_Word { + sym << (8 + t) & 0xff + } + + pub fn ELF64_R_SYM(val: Elf64_Xword) -> Elf64_Xword { + val >> 32 + } + + pub fn ELF64_R_TYPE(val: Elf64_Xword) -> Elf64_Xword { + val & 0xffffffff + } + + pub fn ELF64_R_INFO(sym: Elf64_Xword, t: Elf64_Xword) -> Elf64_Xword { + sym << (32 + t) + } +} + +safe_f! { + pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { + let major = major as crate::dev_t; + let minor = minor as crate::dev_t; + let mut dev = 0; + dev |= (major & 0x00000fff) << 8; + dev |= (major & 0xfffff000) << 32; + dev |= (minor & 0x000000ff) << 0; + dev |= (minor & 0xffffff00) << 12; + dev + } + + pub const fn major(dev: crate::dev_t) -> c_uint { + let mut major = 0; + major |= (dev & 0x00000000000fff00) >> 8; + major |= (dev & 0xfffff00000000000) >> 32; + major as c_uint + } + + pub const fn minor(dev: crate::dev_t) -> c_uint { + let mut minor = 0; + minor |= (dev & 0x00000000000000ff) >> 0; + minor |= (dev & 0x00000ffffff00000) >> 12; + minor as c_uint + } + + pub const fn SCTP_PR_TTL_ENABLED(policy: c_int) -> bool { + policy == SCTP_PR_SCTP_TTL + } + + pub const fn SCTP_PR_RTX_ENABLED(policy: c_int) -> bool { + policy == SCTP_PR_SCTP_RTX + } + + pub const fn SCTP_PR_PRIO_ENABLED(policy: c_int) -> bool { + policy == SCTP_PR_SCTP_PRIO + } +} + +cfg_if! { + if #[cfg(all( + any(target_env = "gnu", target_env = "musl", target_env = "ohos"), + any(target_arch = "x86_64", target_arch = "x86") + ))] { + extern "C" { + pub fn iopl(level: c_int) -> c_int; + pub fn ioperm(from: c_ulong, num: c_ulong, turn_on: c_int) -> c_int; + } + } +} + +cfg_if! { + if #[cfg(all(not(target_env = "uclibc"), not(target_env = "ohos")))] { + extern "C" { + #[cfg_attr(gnu_file_offset_bits64, link_name = "aio_read64")] + pub fn aio_read(aiocbp: *mut aiocb) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "aio_write64")] + pub fn aio_write(aiocbp: *mut aiocb) -> c_int; + pub fn aio_fsync(op: c_int, aiocbp: *mut aiocb) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "aio_error64")] + pub fn aio_error(aiocbp: *const aiocb) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "aio_return64")] + pub fn aio_return(aiocbp: *mut aiocb) -> ssize_t; + #[cfg_attr(gnu_time_bits64, link_name = "__aio_suspend_time64")] + pub fn aio_suspend( + aiocb_list: *const *const aiocb, + nitems: c_int, + timeout: *const crate::timespec, + ) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "aio_cancel64")] + pub fn aio_cancel(fd: c_int, aiocbp: *mut aiocb) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "lio_listio64")] + pub fn lio_listio( + mode: c_int, + aiocb_list: *const *mut aiocb, + nitems: c_int, + sevp: *mut crate::sigevent, + ) -> c_int; + } + } +} + +cfg_if! { + if #[cfg(not(target_env = "uclibc"))] { + extern "C" { + #[cfg_attr(gnu_file_offset_bits64, link_name = "pwritev64")] + pub fn pwritev( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: off_t, + ) -> ssize_t; + #[cfg_attr(gnu_file_offset_bits64, link_name = "preadv64")] + pub fn preadv( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: off_t, + ) -> ssize_t; + pub fn getnameinfo( + sa: *const crate::sockaddr, + salen: crate::socklen_t, + host: *mut c_char, + hostlen: crate::socklen_t, + serv: *mut c_char, + servlen: crate::socklen_t, + flags: c_int, + ) -> c_int; + pub fn getloadavg(loadavg: *mut c_double, nelem: c_int) -> c_int; + pub fn process_vm_readv( + pid: crate::pid_t, + local_iov: *const crate::iovec, + liovcnt: c_ulong, + remote_iov: *const crate::iovec, + riovcnt: c_ulong, + flags: c_ulong, + ) -> isize; + pub fn process_vm_writev( + pid: crate::pid_t, + local_iov: *const crate::iovec, + liovcnt: c_ulong, + remote_iov: *const crate::iovec, + riovcnt: c_ulong, + flags: c_ulong, + ) -> isize; + #[cfg_attr(gnu_time_bits64, link_name = "__futimes64")] + pub fn futimes(fd: c_int, times: *const crate::timeval) -> c_int; + } + } +} + +// These functions are not available on OpenHarmony +cfg_if! { + if #[cfg(not(target_env = "ohos"))] { + extern "C" { + // Only `getspnam_r` is implemented for musl, out of all of the reenterant + // functions from `shadow.h`. + // https://git.musl-libc.org/cgit/musl/tree/include/shadow.h + pub fn getspnam_r( + name: *const c_char, + spbuf: *mut spwd, + buf: *mut c_char, + buflen: size_t, + spbufp: *mut *mut spwd, + ) -> c_int; + + pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> crate::mqd_t; + pub fn mq_close(mqd: crate::mqd_t) -> c_int; + pub fn mq_unlink(name: *const c_char) -> c_int; + pub fn mq_receive( + mqd: crate::mqd_t, + msg_ptr: *mut c_char, + msg_len: size_t, + msg_prio: *mut c_uint, + ) -> ssize_t; + #[cfg_attr(gnu_time_bits64, link_name = "__mq_timedreceive_time64")] + pub fn mq_timedreceive( + mqd: crate::mqd_t, + msg_ptr: *mut c_char, + msg_len: size_t, + msg_prio: *mut c_uint, + abs_timeout: *const crate::timespec, + ) -> ssize_t; + pub fn mq_send( + mqd: crate::mqd_t, + msg_ptr: *const c_char, + msg_len: size_t, + msg_prio: c_uint, + ) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__mq_timedsend_time64")] + pub fn mq_timedsend( + mqd: crate::mqd_t, + msg_ptr: *const c_char, + msg_len: size_t, + msg_prio: c_uint, + abs_timeout: *const crate::timespec, + ) -> c_int; + pub fn mq_getattr(mqd: crate::mqd_t, attr: *mut crate::mq_attr) -> c_int; + pub fn mq_setattr( + mqd: crate::mqd_t, + newattr: *const crate::mq_attr, + oldattr: *mut crate::mq_attr, + ) -> c_int; + + pub fn pthread_mutex_consistent(mutex: *mut pthread_mutex_t) -> c_int; + pub fn pthread_cancel(thread: crate::pthread_t) -> c_int; + pub fn pthread_mutexattr_getrobust( + attr: *const pthread_mutexattr_t, + robustness: *mut c_int, + ) -> c_int; + pub fn pthread_mutexattr_setrobust( + attr: *mut pthread_mutexattr_t, + robustness: c_int, + ) -> c_int; + } + } +} + +extern "C" { + #[cfg_attr( + not(any(target_env = "musl", target_env = "ohos")), + link_name = "__xpg_strerror_r" + )] + pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + + pub fn abs(i: c_int) -> c_int; + pub fn labs(i: c_long) -> c_long; + pub fn rand() -> c_int; + pub fn srand(seed: c_uint); + + pub fn drand48() -> c_double; + pub fn erand48(xseed: *mut c_ushort) -> c_double; + pub fn lrand48() -> c_long; + pub fn nrand48(xseed: *mut c_ushort) -> c_long; + pub fn mrand48() -> c_long; + pub fn jrand48(xseed: *mut c_ushort) -> c_long; + pub fn srand48(seed: c_long); + pub fn seed48(xseed: *mut c_ushort) -> *mut c_ushort; + pub fn lcong48(p: *mut c_ushort); + + #[cfg_attr(gnu_time_bits64, link_name = "__lutimes64")] + pub fn lutimes(file: *const c_char, times: *const crate::timeval) -> c_int; + + pub fn setpwent(); + pub fn endpwent(); + pub fn getpwent() -> *mut passwd; + pub fn setgrent(); + pub fn endgrent(); + pub fn getgrent() -> *mut crate::group; + pub fn setspent(); + pub fn endspent(); + pub fn getspent() -> *mut spwd; + + pub fn getspnam(name: *const c_char) -> *mut spwd; + + pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; + pub fn shm_unlink(name: *const c_char) -> c_int; + + // System V IPC + pub fn shmget(key: crate::key_t, size: size_t, shmflg: c_int) -> c_int; + pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; + pub fn shmdt(shmaddr: *const c_void) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__shmctl64")] + pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; + pub fn ftok(pathname: *const c_char, proj_id: c_int) -> crate::key_t; + pub fn semget(key: crate::key_t, nsems: c_int, semflag: c_int) -> c_int; + pub fn semop(semid: c_int, sops: *mut crate::sembuf, nsops: size_t) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__semctl64")] + pub fn semctl(semid: c_int, semnum: c_int, cmd: c_int, ...) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__msgctl64")] + pub fn msgctl(msqid: c_int, cmd: c_int, buf: *mut msqid_ds) -> c_int; + pub fn msgget(key: crate::key_t, msgflg: c_int) -> c_int; + pub fn msgrcv( + msqid: c_int, + msgp: *mut c_void, + msgsz: size_t, + msgtyp: c_long, + msgflg: c_int, + ) -> ssize_t; + pub fn msgsnd(msqid: c_int, msgp: *const c_void, msgsz: size_t, msgflg: c_int) -> c_int; + + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn __errno_location() -> *mut c_int; + + #[cfg_attr(gnu_file_offset_bits64, link_name = "fallocate64")] + pub fn fallocate(fd: c_int, mode: c_int, offset: off_t, len: off_t) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "posix_fallocate64")] + pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; + pub fn readahead(fd: c_int, offset: off64_t, count: size_t) -> ssize_t; + pub fn getxattr( + path: *const c_char, + name: *const c_char, + value: *mut c_void, + size: size_t, + ) -> ssize_t; + pub fn lgetxattr( + path: *const c_char, + name: *const c_char, + value: *mut c_void, + size: size_t, + ) -> ssize_t; + pub fn fgetxattr( + filedes: c_int, + name: *const c_char, + value: *mut c_void, + size: size_t, + ) -> ssize_t; + pub fn setxattr( + path: *const c_char, + name: *const c_char, + value: *const c_void, + size: size_t, + flags: c_int, + ) -> c_int; + pub fn lsetxattr( + path: *const c_char, + name: *const c_char, + value: *const c_void, + size: size_t, + flags: c_int, + ) -> c_int; + pub fn fsetxattr( + filedes: c_int, + name: *const c_char, + value: *const c_void, + size: size_t, + flags: c_int, + ) -> c_int; + pub fn listxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; + pub fn llistxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; + pub fn flistxattr(filedes: c_int, list: *mut c_char, size: size_t) -> ssize_t; + pub fn removexattr(path: *const c_char, name: *const c_char) -> c_int; + pub fn lremovexattr(path: *const c_char, name: *const c_char) -> c_int; + pub fn fremovexattr(filedes: c_int, name: *const c_char) -> c_int; + pub fn signalfd(fd: c_int, mask: *const crate::sigset_t, flags: c_int) -> c_int; + pub fn timerfd_create(clockid: crate::clockid_t, flags: c_int) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__timerfd_gettime64")] + pub fn timerfd_gettime(fd: c_int, curr_value: *mut itimerspec) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__timerfd_settime64")] + pub fn timerfd_settime( + fd: c_int, + flags: c_int, + new_value: *const itimerspec, + old_value: *mut itimerspec, + ) -> c_int; + pub fn quotactl(cmd: c_int, special: *const c_char, id: c_int, data: *mut c_char) -> c_int; + pub fn epoll_pwait( + epfd: c_int, + events: *mut crate::epoll_event, + maxevents: c_int, + timeout: c_int, + sigmask: *const crate::sigset_t, + ) -> c_int; + pub fn dup3(oldfd: c_int, newfd: c_int, flags: c_int) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__sigtimedwait64")] + pub fn sigtimedwait( + set: *const sigset_t, + info: *mut siginfo_t, + timeout: *const crate::timespec, + ) -> c_int; + pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; + pub fn nl_langinfo_l(item: crate::nl_item, locale: crate::locale_t) -> *mut c_char; + pub fn accept4( + fd: c_int, + addr: *mut crate::sockaddr, + len: *mut crate::socklen_t, + flg: c_int, + ) -> c_int; + pub fn pthread_getaffinity_np( + thread: crate::pthread_t, + cpusetsize: size_t, + cpuset: *mut crate::cpu_set_t, + ) -> c_int; + pub fn pthread_setaffinity_np( + thread: crate::pthread_t, + cpusetsize: size_t, + cpuset: *const crate::cpu_set_t, + ) -> c_int; + pub fn pthread_setschedprio(native: crate::pthread_t, priority: c_int) -> c_int; + pub fn reboot(how_to: c_int) -> c_int; + pub fn setfsgid(gid: crate::gid_t) -> c_int; + pub fn setfsuid(uid: crate::uid_t) -> c_int; + + // Not available now on Android + pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; + pub fn if_nameindex() -> *mut if_nameindex; + pub fn if_freenameindex(ptr: *mut if_nameindex); + pub fn sync_file_range(fd: c_int, offset: off64_t, nbytes: off64_t, flags: c_uint) -> c_int; + pub fn mremap( + addr: *mut c_void, + len: size_t, + new_len: size_t, + flags: c_int, + ... + ) -> *mut c_void; + + #[cfg_attr(gnu_time_bits64, link_name = "__glob64_time64")] + #[cfg_attr( + all(not(gnu_time_bits64), gnu_file_offset_bits64), + link_name = "glob64" + )] + pub fn glob( + pattern: *const c_char, + flags: c_int, + errfunc: Option c_int>, + pglob: *mut crate::glob_t, + ) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__globfree64_time64")] + #[cfg_attr( + all(not(gnu_time_bits64), gnu_file_offset_bits64), + link_name = "globfree64" + )] + pub fn globfree(pglob: *mut crate::glob_t); + + pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + + pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); + + pub fn telldir(dirp: *mut crate::DIR) -> c_long; + pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + + pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; + pub fn remap_file_pages( + addr: *mut c_void, + size: size_t, + prot: c_int, + pgoff: size_t, + flags: c_int, + ) -> c_int; + pub fn recvfrom( + socket: c_int, + buf: *mut c_void, + len: size_t, + flags: c_int, + addr: *mut crate::sockaddr, + addrlen: *mut crate::socklen_t, + ) -> ssize_t; + #[cfg_attr(gnu_file_offset_bits64, link_name = "mkstemps64")] + pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; + + pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; + + pub fn vhangup() -> c_int; + pub fn sync(); + pub fn syncfs(fd: c_int) -> c_int; + pub fn syscall(num: c_long, ...) -> c_long; + pub fn sched_getaffinity( + pid: crate::pid_t, + cpusetsize: size_t, + cpuset: *mut cpu_set_t, + ) -> c_int; + pub fn sched_setaffinity( + pid: crate::pid_t, + cpusetsize: size_t, + cpuset: *const cpu_set_t, + ) -> c_int; + pub fn epoll_create(size: c_int) -> c_int; + pub fn epoll_create1(flags: c_int) -> c_int; + pub fn epoll_wait( + epfd: c_int, + events: *mut crate::epoll_event, + maxevents: c_int, + timeout: c_int, + ) -> c_int; + pub fn epoll_ctl(epfd: c_int, op: c_int, fd: c_int, event: *mut crate::epoll_event) -> c_int; + pub fn pthread_getschedparam( + native: crate::pthread_t, + policy: *mut c_int, + param: *mut crate::sched_param, + ) -> c_int; + pub fn unshare(flags: c_int) -> c_int; + pub fn umount(target: *const c_char) -> c_int; + pub fn sched_get_priority_max(policy: c_int) -> c_int; + pub fn tee(fd_in: c_int, fd_out: c_int, len: size_t, flags: c_uint) -> ssize_t; + #[cfg_attr(gnu_time_bits64, link_name = "__settimeofday64")] + pub fn settimeofday(tv: *const crate::timeval, tz: *const crate::timezone) -> c_int; + pub fn splice( + fd_in: c_int, + off_in: *mut crate::loff_t, + fd_out: c_int, + off_out: *mut crate::loff_t, + len: size_t, + flags: c_uint, + ) -> ssize_t; + pub fn eventfd(init: c_uint, flags: c_int) -> c_int; + pub fn eventfd_read(fd: c_int, value: *mut eventfd_t) -> c_int; + pub fn eventfd_write(fd: c_int, value: eventfd_t) -> c_int; + + #[cfg_attr(gnu_time_bits64, link_name = "__sched_rr_get_interval64")] + pub fn sched_rr_get_interval(pid: crate::pid_t, tp: *mut crate::timespec) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__sem_timedwait64")] + pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; + pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; + pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; + pub fn setns(fd: c_int, nstype: c_int) -> c_int; + pub fn swapoff(path: *const c_char) -> c_int; + pub fn vmsplice(fd: c_int, iov: *const crate::iovec, nr_segs: size_t, flags: c_uint) + -> ssize_t; + pub fn mount( + src: *const c_char, + target: *const c_char, + fstype: *const c_char, + flags: c_ulong, + data: *const c_void, + ) -> c_int; + pub fn personality(persona: c_ulong) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__prctl_time64")] + pub fn prctl(option: c_int, ...) -> c_int; + pub fn sched_getparam(pid: crate::pid_t, param: *mut crate::sched_param) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__ppoll64")] + pub fn ppoll( + fds: *mut crate::pollfd, + nfds: nfds_t, + timeout: *const crate::timespec, + sigmask: *const sigset_t, + ) -> c_int; + pub fn pthread_mutexattr_getprotocol( + attr: *const pthread_mutexattr_t, + protocol: *mut c_int, + ) -> c_int; + pub fn pthread_mutexattr_setprotocol(attr: *mut pthread_mutexattr_t, protocol: c_int) -> c_int; + + #[cfg_attr(gnu_time_bits64, link_name = "__pthread_mutex_timedlock64")] + pub fn pthread_mutex_timedlock( + lock: *mut pthread_mutex_t, + abstime: *const crate::timespec, + ) -> c_int; + pub fn pthread_barrierattr_init(attr: *mut crate::pthread_barrierattr_t) -> c_int; + pub fn pthread_barrierattr_destroy(attr: *mut crate::pthread_barrierattr_t) -> c_int; + pub fn pthread_barrierattr_getpshared( + attr: *const crate::pthread_barrierattr_t, + shared: *mut c_int, + ) -> c_int; + pub fn pthread_barrierattr_setpshared( + attr: *mut crate::pthread_barrierattr_t, + shared: c_int, + ) -> c_int; + pub fn pthread_barrier_init( + barrier: *mut pthread_barrier_t, + attr: *const crate::pthread_barrierattr_t, + count: c_uint, + ) -> c_int; + pub fn pthread_barrier_destroy(barrier: *mut pthread_barrier_t) -> c_int; + pub fn pthread_barrier_wait(barrier: *mut pthread_barrier_t) -> c_int; + pub fn pthread_spin_init(lock: *mut crate::pthread_spinlock_t, pshared: c_int) -> c_int; + pub fn pthread_spin_destroy(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_spin_lock(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_spin_trylock(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_spin_unlock(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn clone( + cb: extern "C" fn(*mut c_void) -> c_int, + child_stack: *mut c_void, + flags: c_int, + arg: *mut c_void, + ... + ) -> c_int; + pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__clock_nanosleep_time64")] + pub fn clock_nanosleep( + clk_id: crate::clockid_t, + flags: c_int, + rqtp: *const crate::timespec, + rmtp: *mut crate::timespec, + ) -> c_int; + pub fn pthread_attr_getguardsize( + attr: *const crate::pthread_attr_t, + guardsize: *mut size_t, + ) -> c_int; + pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; + pub fn pthread_attr_getinheritsched( + attr: *const crate::pthread_attr_t, + inheritsched: *mut c_int, + ) -> c_int; + pub fn pthread_attr_setinheritsched( + attr: *mut crate::pthread_attr_t, + inheritsched: c_int, + ) -> c_int; + pub fn pthread_attr_getschedpolicy( + attr: *const crate::pthread_attr_t, + policy: *mut c_int, + ) -> c_int; + pub fn pthread_attr_setschedpolicy(attr: *mut crate::pthread_attr_t, policy: c_int) -> c_int; + pub fn pthread_attr_getschedparam( + attr: *const crate::pthread_attr_t, + param: *mut crate::sched_param, + ) -> c_int; + pub fn pthread_attr_setschedparam( + attr: *mut crate::pthread_attr_t, + param: *const crate::sched_param, + ) -> c_int; + pub fn sethostname(name: *const c_char, len: size_t) -> c_int; + pub fn sched_get_priority_min(policy: c_int) -> c_int; + pub fn pthread_condattr_getpshared( + attr: *const pthread_condattr_t, + pshared: *mut c_int, + ) -> c_int; + pub fn sysinfo(info: *mut crate::sysinfo) -> c_int; + pub fn umount2(target: *const c_char, flags: c_int) -> c_int; + pub fn pthread_setschedparam( + native: crate::pthread_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + pub fn swapon(path: *const c_char, swapflags: c_int) -> c_int; + pub fn sched_setscheduler( + pid: crate::pid_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "sendfile64")] + pub fn sendfile(out_fd: c_int, in_fd: c_int, offset: *mut off_t, count: size_t) -> ssize_t; + pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; + pub fn getgrgid_r( + gid: crate::gid_t, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; + pub fn sem_close(sem: *mut sem_t) -> c_int; + pub fn getdtablesize() -> c_int; + pub fn getgrnam_r( + name: *const c_char, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn initgroups(user: *const c_char, group: crate::gid_t) -> c_int; + pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; + pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; + pub fn getgrnam(name: *const c_char) -> *mut crate::group; + pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; + pub fn sem_unlink(name: *const c_char) -> c_int; + pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; + pub fn getpwnam_r( + name: *const c_char, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + pub fn getpwuid_r( + uid: crate::uid_t, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; + pub fn pthread_atfork( + prepare: Option, + parent: Option, + child: Option, + ) -> c_int; + pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; + pub fn getgrouplist( + user: *const c_char, + group: crate::gid_t, + groups: *mut crate::gid_t, + ngroups: *mut c_int, + ) -> c_int; + pub fn pthread_mutexattr_getpshared( + attr: *const pthread_mutexattr_t, + pshared: *mut c_int, + ) -> c_int; + pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; + pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; + pub fn pthread_create( + native: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + f: extern "C" fn(*mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + pub fn dl_iterate_phdr( + callback: Option< + unsafe extern "C" fn( + info: *mut crate::dl_phdr_info, + size: size_t, + data: *mut c_void, + ) -> c_int, + >, + data: *mut c_void, + ) -> c_int; + + pub fn setmntent(filename: *const c_char, ty: *const c_char) -> *mut crate::FILE; + pub fn getmntent(stream: *mut crate::FILE) -> *mut crate::mntent; + pub fn addmntent(stream: *mut crate::FILE, mnt: *const crate::mntent) -> c_int; + pub fn endmntent(streamp: *mut crate::FILE) -> c_int; + pub fn hasmntopt(mnt: *const crate::mntent, opt: *const c_char) -> *mut c_char; + + pub fn posix_spawn( + pid: *mut crate::pid_t, + path: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnp( + pid: *mut crate::pid_t, + file: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_getsigdefault( + attr: *const posix_spawnattr_t, + default: *mut crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigdefault( + attr: *mut posix_spawnattr_t, + default: *const crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getsigmask( + attr: *const posix_spawnattr_t, + default: *mut crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigmask( + attr: *mut posix_spawnattr_t, + default: *const crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; + pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; + pub fn posix_spawnattr_getpgroup( + attr: *const posix_spawnattr_t, + flags: *mut crate::pid_t, + ) -> c_int; + pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: crate::pid_t) -> c_int; + pub fn posix_spawnattr_getschedpolicy( + attr: *const posix_spawnattr_t, + flags: *mut c_int, + ) -> c_int; + pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, flags: c_int) -> c_int; + pub fn posix_spawnattr_getschedparam( + attr: *const posix_spawnattr_t, + param: *mut crate::sched_param, + ) -> c_int; + pub fn posix_spawnattr_setschedparam( + attr: *mut posix_spawnattr_t, + param: *const crate::sched_param, + ) -> c_int; + + pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_addopen( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + path: *const c_char, + oflag: c_int, + mode: mode_t, + ) -> c_int; + pub fn posix_spawn_file_actions_addclose( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + ) -> c_int; + pub fn posix_spawn_file_actions_adddup2( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + newfd: c_int, + ) -> c_int; + pub fn fread_unlocked( + buf: *mut c_void, + size: size_t, + nobj: size_t, + stream: *mut crate::FILE, + ) -> size_t; + pub fn inotify_rm_watch(fd: c_int, wd: c_int) -> c_int; + pub fn inotify_init() -> c_int; + pub fn inotify_init1(flags: c_int) -> c_int; + pub fn inotify_add_watch(fd: c_int, path: *const c_char, mask: u32) -> c_int; + pub fn fanotify_init(flags: c_uint, event_f_flags: c_uint) -> c_int; + + pub fn regcomp(preg: *mut crate::regex_t, pattern: *const c_char, cflags: c_int) -> c_int; + + pub fn regexec( + preg: *const crate::regex_t, + input: *const c_char, + nmatch: size_t, + pmatch: *mut regmatch_t, + eflags: c_int, + ) -> c_int; + + pub fn regerror( + errcode: c_int, + preg: *const crate::regex_t, + errbuf: *mut c_char, + errbuf_size: size_t, + ) -> size_t; + + pub fn regfree(preg: *mut crate::regex_t); + + pub fn iconv_open(tocode: *const c_char, fromcode: *const c_char) -> iconv_t; + pub fn iconv( + cd: iconv_t, + inbuf: *mut *mut c_char, + inbytesleft: *mut size_t, + outbuf: *mut *mut c_char, + outbytesleft: *mut size_t, + ) -> size_t; + pub fn iconv_close(cd: iconv_t) -> c_int; + + pub fn gettid() -> crate::pid_t; + + pub fn timer_create( + clockid: crate::clockid_t, + sevp: *mut crate::sigevent, + timerid: *mut crate::timer_t, + ) -> c_int; + pub fn timer_delete(timerid: crate::timer_t) -> c_int; + pub fn timer_getoverrun(timerid: crate::timer_t) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__timer_gettime64")] + pub fn timer_gettime(timerid: crate::timer_t, curr_value: *mut crate::itimerspec) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__timer_settime64")] + pub fn timer_settime( + timerid: crate::timer_t, + flags: c_int, + new_value: *const crate::itimerspec, + old_value: *mut crate::itimerspec, + ) -> c_int; + + pub fn gethostid() -> c_long; + + pub fn pthread_getcpuclockid(thread: crate::pthread_t, clk_id: *mut crate::clockid_t) -> c_int; + pub fn memmem( + haystack: *const c_void, + haystacklen: size_t, + needle: *const c_void, + needlelen: size_t, + ) -> *mut c_void; + pub fn sched_getcpu() -> c_int; + + pub fn pthread_getname_np(thread: crate::pthread_t, name: *mut c_char, len: size_t) -> c_int; + pub fn pthread_setname_np(thread: crate::pthread_t, name: *const c_char) -> c_int; + pub fn getopt_long( + argc: c_int, + argv: *const *mut c_char, + optstring: *const c_char, + longopts: *const option, + longindex: *mut c_int, + ) -> c_int; + + pub fn pthread_once(control: *mut pthread_once_t, routine: extern "C" fn()) -> c_int; + + pub fn copy_file_range( + fd_in: c_int, + off_in: *mut off64_t, + fd_out: c_int, + off_out: *mut off64_t, + len: size_t, + flags: c_uint, + ) -> ssize_t; + + pub fn klogctl(syslog_type: c_int, bufp: *mut c_char, len: c_int) -> c_int; +} + +// LFS64 extensions +// +// * musl has 64-bit versions only so aliases the LFS64 symbols to the standard ones +cfg_if! { + if #[cfg(not(target_env = "musl"))] { + extern "C" { + pub fn fallocate64(fd: c_int, mode: c_int, offset: off64_t, len: off64_t) -> c_int; + pub fn fgetpos64(stream: *mut crate::FILE, ptr: *mut fpos64_t) -> c_int; + pub fn fopen64(filename: *const c_char, mode: *const c_char) -> *mut crate::FILE; + pub fn freopen64( + filename: *const c_char, + mode: *const c_char, + file: *mut crate::FILE, + ) -> *mut crate::FILE; + pub fn fseeko64(stream: *mut crate::FILE, offset: off64_t, whence: c_int) -> c_int; + pub fn fsetpos64(stream: *mut crate::FILE, ptr: *const fpos64_t) -> c_int; + pub fn ftello64(stream: *mut crate::FILE) -> off64_t; + pub fn posix_fallocate64(fd: c_int, offset: off64_t, len: off64_t) -> c_int; + pub fn sendfile64( + out_fd: c_int, + in_fd: c_int, + offset: *mut off64_t, + count: size_t, + ) -> ssize_t; + pub fn tmpfile64() -> *mut crate::FILE; + } + } +} + +cfg_if! { + if #[cfg(target_env = "uclibc")] { + mod uclibc; + pub use self::uclibc::*; + } else if #[cfg(any(target_env = "musl", target_env = "ohos"))] { + mod musl; + pub use self::musl::*; + } else if #[cfg(target_env = "gnu")] { + mod gnu; + pub use self::gnu::*; + } +} + +mod arch; +pub use self::arch::*; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b32/arm/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b32/arm/mod.rs new file mode 100644 index 00000000000000..a04f05ea50db8c --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b32/arm/mod.rs @@ -0,0 +1,792 @@ +use crate::off_t; +use crate::prelude::*; + +pub type wchar_t = u32; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + __st_dev_padding: c_int, + __st_ino_truncated: c_long, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __st_rdev_padding: c_int, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_ino: crate::ino_t, + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + __st_dev_padding: c_int, + __st_ino_truncated: c_long, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __st_rdev_padding: c_int, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_ino: crate::ino_t, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct ipc_perm { + #[cfg(musl_v1_2_3)] + pub __key: crate::key_t, + #[cfg(not(musl_v1_2_3))] + #[deprecated( + since = "0.2.173", + note = "This field is incorrectly named and will be changed + to __key in a future release." + )] + pub __ipc_perm_key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + pub __seq: c_int, + __unused1: c_long, + __unused2: c_long, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + __unused1: c_int, + pub shm_dtime: crate::time_t, + __unused2: c_int, + pub shm_ctime: crate::time_t, + __unused3: c_int, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: c_ulong, + __pad1: c_ulong, + __pad2: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + __unused1: c_int, + pub msg_rtime: crate::time_t, + __unused2: c_int, + pub msg_ctime: crate::time_t, + __unused3: c_int, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __pad1: c_ulong, + __pad2: c_ulong, + } + + pub struct mcontext_t { + pub trap_no: c_ulong, + pub error_code: c_ulong, + pub oldmask: c_ulong, + pub arm_r0: c_ulong, + pub arm_r1: c_ulong, + pub arm_r2: c_ulong, + pub arm_r3: c_ulong, + pub arm_r4: c_ulong, + pub arm_r5: c_ulong, + pub arm_r6: c_ulong, + pub arm_r7: c_ulong, + pub arm_r8: c_ulong, + pub arm_r9: c_ulong, + pub arm_r10: c_ulong, + pub arm_fp: c_ulong, + pub arm_ip: c_ulong, + pub arm_sp: c_ulong, + pub arm_lr: c_ulong, + pub arm_pc: c_ulong, + pub arm_cpsr: c_ulong, + pub fault_address: c_ulong, + } +} + +s_no_extra_traits! { + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_mcontext: mcontext_t, + pub uc_sigmask: crate::sigset_t, + pub uc_regspace: [c_ulonglong; 64], + } + + #[repr(align(8))] + pub struct max_align_t { + priv_: (i64, i64), + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for ucontext_t { + fn eq(&self, other: &ucontext_t) -> bool { + self.uc_flags == other.uc_flags + && self.uc_link == other.uc_link + && self.uc_stack == other.uc_stack + && self.uc_mcontext == other.uc_mcontext + && self.uc_sigmask == other.uc_sigmask + } + } + impl Eq for ucontext_t {} + impl hash::Hash for ucontext_t { + fn hash(&self, state: &mut H) { + self.uc_flags.hash(state); + self.uc_link.hash(state); + self.uc_stack.hash(state); + self.uc_mcontext.hash(state); + self.uc_sigmask.hash(state); + } + } + } +} + +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; + +pub const O_DIRECT: c_int = 0x10000; +pub const O_DIRECTORY: c_int = 0x4000; +pub const O_NOFOLLOW: c_int = 0x8000; +pub const O_ASYNC: c_int = 0x2000; +pub const O_LARGEFILE: c_int = 0o400000; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: c_int = 0x00000800; +pub const TAB2: c_int = 0x00001000; +pub const TAB3: c_int = 0x00001800; +pub const CR1: c_int = 0x00000200; +pub const CR2: c_int = 0x00000400; +pub const CR3: c_int = 0x00000600; +pub const FF1: c_int = 0x00008000; +pub const BS1: c_int = 0x00002000; +pub const VT1: c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; + +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_SYNC: c_int = 0x080000; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EDEADLOCK: c_int = EDEADLK; +pub const EMULTIHOP: c_int = 72; +pub const EBADMSG: c_int = 74; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const ERFKILL: c_int = 132; +pub const EHWPOISON: c_int = 133; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const EXTPROC: crate::tcflag_t = 0x00010000; + +pub const MAP_HUGETLB: c_int = 0x040000; + +pub const F_GETLK: c_int = 12; +pub const F_GETOWN: c_int = 9; +pub const F_SETLK: c_int = 13; +pub const F_SETLKW: c_int = 14; +pub const F_SETOWN: c_int = 8; + +pub const VEOF: usize = 4; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +// Syscall table +pub const SYS_restart_syscall: c_long = 0; +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execve: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_lchown: c_long = 16; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_mount: c_long = 21; +pub const SYS_setuid: c_long = 23; +pub const SYS_getuid: c_long = 24; +pub const SYS_ptrace: c_long = 26; +pub const SYS_pause: c_long = 29; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_rename: c_long = 38; +pub const SYS_mkdir: c_long = 39; +pub const SYS_rmdir: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_brk: c_long = 45; +pub const SYS_setgid: c_long = 46; +pub const SYS_getgid: c_long = 47; +pub const SYS_geteuid: c_long = 49; +pub const SYS_getegid: c_long = 50; +pub const SYS_acct: c_long = 51; +pub const SYS_umount2: c_long = 52; +pub const SYS_ioctl: c_long = 54; +pub const SYS_fcntl: c_long = 55; +pub const SYS_setpgid: c_long = 57; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_ustat: c_long = 62; +pub const SYS_dup2: c_long = 63; +pub const SYS_getppid: c_long = 64; +pub const SYS_getpgrp: c_long = 65; +pub const SYS_setsid: c_long = 66; +pub const SYS_sigaction: c_long = 67; +pub const SYS_setreuid: c_long = 70; +pub const SYS_setregid: c_long = 71; +pub const SYS_sigsuspend: c_long = 72; +pub const SYS_sigpending: c_long = 73; +pub const SYS_sethostname: c_long = 74; +pub const SYS_setrlimit: c_long = 75; +pub const SYS_getrusage: c_long = 77; +pub const SYS_gettimeofday: c_long = 78; +pub const SYS_settimeofday: c_long = 79; +pub const SYS_getgroups: c_long = 80; +pub const SYS_setgroups: c_long = 81; +pub const SYS_symlink: c_long = 83; +pub const SYS_readlink: c_long = 85; +pub const SYS_uselib: c_long = 86; +pub const SYS_swapon: c_long = 87; +pub const SYS_reboot: c_long = 88; +pub const SYS_munmap: c_long = 91; +pub const SYS_truncate: c_long = 92; +pub const SYS_ftruncate: c_long = 93; +pub const SYS_fchmod: c_long = 94; +pub const SYS_fchown: c_long = 95; +pub const SYS_getpriority: c_long = 96; +pub const SYS_setpriority: c_long = 97; +pub const SYS_statfs: c_long = 99; +pub const SYS_fstatfs: c_long = 100; +pub const SYS_syslog: c_long = 103; +pub const SYS_setitimer: c_long = 104; +pub const SYS_getitimer: c_long = 105; +pub const SYS_stat: c_long = 106; +pub const SYS_lstat: c_long = 107; +pub const SYS_fstat: c_long = 108; +pub const SYS_vhangup: c_long = 111; +pub const SYS_wait4: c_long = 114; +pub const SYS_swapoff: c_long = 115; +pub const SYS_sysinfo: c_long = 116; +pub const SYS_fsync: c_long = 118; +pub const SYS_sigreturn: c_long = 119; +pub const SYS_clone: c_long = 120; +pub const SYS_setdomainname: c_long = 121; +pub const SYS_uname: c_long = 122; +pub const SYS_adjtimex: c_long = 124; +pub const SYS_mprotect: c_long = 125; +pub const SYS_sigprocmask: c_long = 126; +pub const SYS_init_module: c_long = 128; +pub const SYS_delete_module: c_long = 129; +pub const SYS_quotactl: c_long = 131; +pub const SYS_getpgid: c_long = 132; +pub const SYS_fchdir: c_long = 133; +pub const SYS_bdflush: c_long = 134; +pub const SYS_sysfs: c_long = 135; +pub const SYS_personality: c_long = 136; +pub const SYS_setfsuid: c_long = 138; +pub const SYS_setfsgid: c_long = 139; +pub const SYS__llseek: c_long = 140; +pub const SYS_getdents: c_long = 141; +pub const SYS__newselect: c_long = 142; +pub const SYS_flock: c_long = 143; +pub const SYS_msync: c_long = 144; +pub const SYS_readv: c_long = 145; +pub const SYS_writev: c_long = 146; +pub const SYS_getsid: c_long = 147; +pub const SYS_fdatasync: c_long = 148; +pub const SYS__sysctl: c_long = 149; +pub const SYS_mlock: c_long = 150; +pub const SYS_munlock: c_long = 151; +pub const SYS_mlockall: c_long = 152; +pub const SYS_munlockall: c_long = 153; +pub const SYS_sched_setparam: c_long = 154; +pub const SYS_sched_getparam: c_long = 155; +pub const SYS_sched_setscheduler: c_long = 156; +pub const SYS_sched_getscheduler: c_long = 157; +pub const SYS_sched_yield: c_long = 158; +pub const SYS_sched_get_priority_max: c_long = 159; +pub const SYS_sched_get_priority_min: c_long = 160; +pub const SYS_sched_rr_get_interval: c_long = 161; +pub const SYS_nanosleep: c_long = 162; +pub const SYS_mremap: c_long = 163; +pub const SYS_setresuid: c_long = 164; +pub const SYS_getresuid: c_long = 165; +pub const SYS_poll: c_long = 168; +pub const SYS_nfsservctl: c_long = 169; +pub const SYS_setresgid: c_long = 170; +pub const SYS_getresgid: c_long = 171; +pub const SYS_prctl: c_long = 172; +pub const SYS_rt_sigreturn: c_long = 173; +pub const SYS_rt_sigaction: c_long = 174; +pub const SYS_rt_sigprocmask: c_long = 175; +pub const SYS_rt_sigpending: c_long = 176; +pub const SYS_rt_sigtimedwait: c_long = 177; +pub const SYS_rt_sigqueueinfo: c_long = 178; +pub const SYS_rt_sigsuspend: c_long = 179; +pub const SYS_pread64: c_long = 180; +pub const SYS_pwrite64: c_long = 181; +pub const SYS_chown: c_long = 182; +pub const SYS_getcwd: c_long = 183; +pub const SYS_capget: c_long = 184; +pub const SYS_capset: c_long = 185; +pub const SYS_sigaltstack: c_long = 186; +pub const SYS_sendfile: c_long = 187; +pub const SYS_vfork: c_long = 190; +pub const SYS_ugetrlimit: c_long = 191; +pub const SYS_mmap2: c_long = 192; +pub const SYS_truncate64: c_long = 193; +pub const SYS_ftruncate64: c_long = 194; +pub const SYS_stat64: c_long = 195; +pub const SYS_lstat64: c_long = 196; +pub const SYS_fstat64: c_long = 197; +pub const SYS_lchown32: c_long = 198; +pub const SYS_getuid32: c_long = 199; +pub const SYS_getgid32: c_long = 200; +pub const SYS_geteuid32: c_long = 201; +pub const SYS_getegid32: c_long = 202; +pub const SYS_setreuid32: c_long = 203; +pub const SYS_setregid32: c_long = 204; +pub const SYS_getgroups32: c_long = 205; +pub const SYS_setgroups32: c_long = 206; +pub const SYS_fchown32: c_long = 207; +pub const SYS_setresuid32: c_long = 208; +pub const SYS_getresuid32: c_long = 209; +pub const SYS_setresgid32: c_long = 210; +pub const SYS_getresgid32: c_long = 211; +pub const SYS_chown32: c_long = 212; +pub const SYS_setuid32: c_long = 213; +pub const SYS_setgid32: c_long = 214; +pub const SYS_setfsuid32: c_long = 215; +pub const SYS_setfsgid32: c_long = 216; +pub const SYS_getdents64: c_long = 217; +pub const SYS_pivot_root: c_long = 218; +pub const SYS_mincore: c_long = 219; +pub const SYS_madvise: c_long = 220; +pub const SYS_fcntl64: c_long = 221; +pub const SYS_gettid: c_long = 224; +pub const SYS_readahead: c_long = 225; +pub const SYS_setxattr: c_long = 226; +pub const SYS_lsetxattr: c_long = 227; +pub const SYS_fsetxattr: c_long = 228; +pub const SYS_getxattr: c_long = 229; +pub const SYS_lgetxattr: c_long = 230; +pub const SYS_fgetxattr: c_long = 231; +pub const SYS_listxattr: c_long = 232; +pub const SYS_llistxattr: c_long = 233; +pub const SYS_flistxattr: c_long = 234; +pub const SYS_removexattr: c_long = 235; +pub const SYS_lremovexattr: c_long = 236; +pub const SYS_fremovexattr: c_long = 237; +pub const SYS_tkill: c_long = 238; +pub const SYS_sendfile64: c_long = 239; +pub const SYS_futex: c_long = 240; +pub const SYS_sched_setaffinity: c_long = 241; +pub const SYS_sched_getaffinity: c_long = 242; +pub const SYS_io_setup: c_long = 243; +pub const SYS_io_destroy: c_long = 244; +pub const SYS_io_getevents: c_long = 245; +pub const SYS_io_submit: c_long = 246; +pub const SYS_io_cancel: c_long = 247; +pub const SYS_exit_group: c_long = 248; +pub const SYS_lookup_dcookie: c_long = 249; +pub const SYS_epoll_create: c_long = 250; +pub const SYS_epoll_ctl: c_long = 251; +pub const SYS_epoll_wait: c_long = 252; +pub const SYS_remap_file_pages: c_long = 253; +pub const SYS_set_tid_address: c_long = 256; +pub const SYS_timer_create: c_long = 257; +pub const SYS_timer_settime: c_long = 258; +pub const SYS_timer_gettime: c_long = 259; +pub const SYS_timer_getoverrun: c_long = 260; +pub const SYS_timer_delete: c_long = 261; +pub const SYS_clock_settime: c_long = 262; +pub const SYS_clock_gettime: c_long = 263; +pub const SYS_clock_getres: c_long = 264; +pub const SYS_clock_nanosleep: c_long = 265; +pub const SYS_statfs64: c_long = 266; +pub const SYS_fstatfs64: c_long = 267; +pub const SYS_tgkill: c_long = 268; +pub const SYS_utimes: c_long = 269; +pub const SYS_pciconfig_iobase: c_long = 271; +pub const SYS_pciconfig_read: c_long = 272; +pub const SYS_pciconfig_write: c_long = 273; +pub const SYS_mq_open: c_long = 274; +pub const SYS_mq_unlink: c_long = 275; +pub const SYS_mq_timedsend: c_long = 276; +pub const SYS_mq_timedreceive: c_long = 277; +pub const SYS_mq_notify: c_long = 278; +pub const SYS_mq_getsetattr: c_long = 279; +pub const SYS_waitid: c_long = 280; +pub const SYS_socket: c_long = 281; +pub const SYS_bind: c_long = 282; +pub const SYS_connect: c_long = 283; +pub const SYS_listen: c_long = 284; +pub const SYS_accept: c_long = 285; +pub const SYS_getsockname: c_long = 286; +pub const SYS_getpeername: c_long = 287; +pub const SYS_socketpair: c_long = 288; +pub const SYS_send: c_long = 289; +pub const SYS_sendto: c_long = 290; +pub const SYS_recv: c_long = 291; +pub const SYS_recvfrom: c_long = 292; +pub const SYS_shutdown: c_long = 293; +pub const SYS_setsockopt: c_long = 294; +pub const SYS_getsockopt: c_long = 295; +pub const SYS_sendmsg: c_long = 296; +pub const SYS_recvmsg: c_long = 297; +pub const SYS_semop: c_long = 298; +pub const SYS_semget: c_long = 299; +pub const SYS_semctl: c_long = 300; +pub const SYS_msgsnd: c_long = 301; +pub const SYS_msgrcv: c_long = 302; +pub const SYS_msgget: c_long = 303; +pub const SYS_msgctl: c_long = 304; +pub const SYS_shmat: c_long = 305; +pub const SYS_shmdt: c_long = 306; +pub const SYS_shmget: c_long = 307; +pub const SYS_shmctl: c_long = 308; +pub const SYS_add_key: c_long = 309; +pub const SYS_request_key: c_long = 310; +pub const SYS_keyctl: c_long = 311; +pub const SYS_semtimedop: c_long = 312; +pub const SYS_vserver: c_long = 313; +pub const SYS_ioprio_set: c_long = 314; +pub const SYS_ioprio_get: c_long = 315; +pub const SYS_inotify_init: c_long = 316; +pub const SYS_inotify_add_watch: c_long = 317; +pub const SYS_inotify_rm_watch: c_long = 318; +pub const SYS_mbind: c_long = 319; +pub const SYS_get_mempolicy: c_long = 320; +pub const SYS_set_mempolicy: c_long = 321; +pub const SYS_openat: c_long = 322; +pub const SYS_mkdirat: c_long = 323; +pub const SYS_mknodat: c_long = 324; +pub const SYS_fchownat: c_long = 325; +pub const SYS_futimesat: c_long = 326; +pub const SYS_fstatat64: c_long = 327; +pub const SYS_unlinkat: c_long = 328; +pub const SYS_renameat: c_long = 329; +pub const SYS_linkat: c_long = 330; +pub const SYS_symlinkat: c_long = 331; +pub const SYS_readlinkat: c_long = 332; +pub const SYS_fchmodat: c_long = 333; +pub const SYS_faccessat: c_long = 334; +pub const SYS_pselect6: c_long = 335; +pub const SYS_ppoll: c_long = 336; +pub const SYS_unshare: c_long = 337; +pub const SYS_set_robust_list: c_long = 338; +pub const SYS_get_robust_list: c_long = 339; +pub const SYS_splice: c_long = 340; +pub const SYS_tee: c_long = 342; +pub const SYS_vmsplice: c_long = 343; +pub const SYS_move_pages: c_long = 344; +pub const SYS_getcpu: c_long = 345; +pub const SYS_epoll_pwait: c_long = 346; +pub const SYS_kexec_load: c_long = 347; +pub const SYS_utimensat: c_long = 348; +pub const SYS_signalfd: c_long = 349; +pub const SYS_timerfd_create: c_long = 350; +pub const SYS_eventfd: c_long = 351; +pub const SYS_fallocate: c_long = 352; +pub const SYS_timerfd_settime: c_long = 353; +pub const SYS_timerfd_gettime: c_long = 354; +pub const SYS_signalfd4: c_long = 355; +pub const SYS_eventfd2: c_long = 356; +pub const SYS_epoll_create1: c_long = 357; +pub const SYS_dup3: c_long = 358; +pub const SYS_pipe2: c_long = 359; +pub const SYS_inotify_init1: c_long = 360; +pub const SYS_preadv: c_long = 361; +pub const SYS_pwritev: c_long = 362; +pub const SYS_rt_tgsigqueueinfo: c_long = 363; +pub const SYS_perf_event_open: c_long = 364; +pub const SYS_recvmmsg: c_long = 365; +pub const SYS_accept4: c_long = 366; +pub const SYS_fanotify_init: c_long = 367; +pub const SYS_fanotify_mark: c_long = 368; +pub const SYS_prlimit64: c_long = 369; +pub const SYS_name_to_handle_at: c_long = 370; +pub const SYS_open_by_handle_at: c_long = 371; +pub const SYS_clock_adjtime: c_long = 372; +pub const SYS_syncfs: c_long = 373; +pub const SYS_sendmmsg: c_long = 374; +pub const SYS_setns: c_long = 375; +pub const SYS_process_vm_readv: c_long = 376; +pub const SYS_process_vm_writev: c_long = 377; +pub const SYS_kcmp: c_long = 378; +pub const SYS_finit_module: c_long = 379; +pub const SYS_sched_setattr: c_long = 380; +pub const SYS_sched_getattr: c_long = 381; +pub const SYS_renameat2: c_long = 382; +pub const SYS_seccomp: c_long = 383; +pub const SYS_getrandom: c_long = 384; +pub const SYS_memfd_create: c_long = 385; +pub const SYS_bpf: c_long = 386; +pub const SYS_execveat: c_long = 387; +pub const SYS_userfaultfd: c_long = 388; +pub const SYS_membarrier: c_long = 389; +pub const SYS_mlock2: c_long = 390; +pub const SYS_copy_file_range: c_long = 391; +pub const SYS_preadv2: c_long = 392; +pub const SYS_pwritev2: c_long = 393; +pub const SYS_pkey_mprotect: c_long = 394; +pub const SYS_pkey_alloc: c_long = 395; +pub const SYS_pkey_free: c_long = 396; +pub const SYS_statx: c_long = 397; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; +pub const SYS_mseal: c_long = 462; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b32/hexagon.rs b/vendor/libc/src/unix/linux_like/linux/musl/b32/hexagon.rs new file mode 100644 index 00000000000000..b6879535541848 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b32/hexagon.rs @@ -0,0 +1,621 @@ +use crate::prelude::*; + +pub type wchar_t = u32; +pub type stat64 = crate::stat; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: c_ulonglong, + pub st_mode: c_uint, + pub st_nlink: c_uint, + pub st_uid: c_uint, + pub st_gid: c_uint, + pub st_rdev: c_ulonglong, + __st_rdev_padding: c_ulong, + pub st_size: c_longlong, + pub st_blksize: crate::blksize_t, + __st_blksize_padding: c_int, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + + __unused: [c_int; 2], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct ipc_perm { + #[cfg(musl_v1_2_3)] + pub __key: crate::key_t, + #[cfg(not(musl_v1_2_3))] + #[deprecated( + since = "0.2.173", + note = "This field is incorrectly named and will be changed + to __key in a future release" + )] + pub __ipc_perm_key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + pub __seq: c_ushort, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + __unused1: c_int, + pub shm_dtime: crate::time_t, + __unused2: c_int, + pub shm_ctime: crate::time_t, + __unused3: c_int, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: c_ulong, + __pad1: c_ulong, + __pad2: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + __unused1: c_int, + pub msg_rtime: crate::time_t, + __unused2: c_int, + pub msg_ctime: crate::time_t, + __unused3: c_int, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __pad1: c_ulong, + __pad2: c_ulong, + } +} + +pub const AF_FILE: c_int = 1; +pub const AF_KCM: c_int = 41; +pub const AF_MAX: c_int = 43; +pub const AF_QIPCRTR: c_int = 42; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const EAFNOSUPPORT: c_int = 97; +pub const EALREADY: c_int = 114; +pub const EBADE: c_int = 52; +pub const EBADMSG: c_int = 74; +pub const EBADR: c_int = 53; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const ECANCELED: c_int = 125; +pub const ECHRNG: c_int = 44; +pub const ECONNABORTED: c_int = 103; +pub const ECONNREFUSED: c_int = 111; +pub const ECONNRESET: c_int = 104; +pub const EDEADLK: c_int = 35; +pub const EDEADLOCK: c_int = 35; +pub const EDESTADDRREQ: c_int = 89; +pub const EDQUOT: c_int = 122; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EHWPOISON: c_int = 133; +pub const EIDRM: c_int = 43; +pub const EILSEQ: c_int = 84; +pub const EINPROGRESS: c_int = 115; +pub const EISCONN: c_int = 106; +pub const EISNAM: c_int = 120; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREJECTED: c_int = 129; +pub const EKEYREVOKED: c_int = 128; +pub const EL2HLT: c_int = 51; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBEXEC: c_int = 83; +pub const ELIBMAX: c_int = 82; +pub const ELIBSCN: c_int = 81; +pub const ELNRNG: c_int = 48; +pub const ELOOP: c_int = 40; +pub const EMEDIUMTYPE: c_int = 124; +pub const EMSGSIZE: c_int = 90; +pub const EMULTIHOP: c_int = 72; +pub const ENAMETOOLONG: c_int = 36; +pub const ENAVAIL: c_int = 119; +pub const ENETDOWN: c_int = 100; +pub const ENETRESET: c_int = 102; +pub const ENETUNREACH: c_int = 101; +pub const ENOANO: c_int = 55; +pub const ENOBUFS: c_int = 105; +pub const ENOCSI: c_int = 50; +pub const ENOKEY: c_int = 126; +pub const ENOLCK: c_int = 37; +pub const ENOMEDIUM: c_int = 123; +pub const ENOMSG: c_int = 42; +pub const ENOPROTOOPT: c_int = 92; +pub const ENOSYS: c_int = 38; +pub const ENOTCONN: c_int = 107; +pub const ENOTEMPTY: c_int = 39; +pub const ENOTNAM: c_int = 118; +pub const ENOTRECOVERABLE: c_int = 131; +pub const ENOTSOCK: c_int = 88; +pub const ENOTSUP: c_int = 95; +pub const ENOTUNIQ: c_int = 76; +pub const EOPNOTSUPP: c_int = 95; +pub const EOVERFLOW: c_int = 75; +pub const EOWNERDEAD: c_int = 130; +pub const EPFNOSUPPORT: c_int = 96; +pub const EREMCHG: c_int = 78; +pub const ERESTART: c_int = 85; +pub const ERFKILL: c_int = 132; +pub const ESHUTDOWN: c_int = 108; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const ESTALE: c_int = 116; +pub const ESTRPIPE: c_int = 86; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const EUCLEAN: c_int = 117; +pub const EUNATCH: c_int = 49; +pub const EUSERS: c_int = 87; +pub const EXFULL: c_int = 54; +pub const EXTPROC: c_int = 65536; +pub const F_EXLCK: c_int = 4; +pub const F_GETLK: c_int = 12; +pub const F_GETOWN: c_int = 9; +pub const F_GETOWNER_UIDS: c_int = 17; +pub const F_GETOWN_EX: c_int = 16; +pub const F_GETSIG: c_int = 11; +pub const F_LINUX_SPECIFIC_BASE: c_int = 1024; +pub const FLUSHO: c_int = 4096; +pub const F_OWNER_PGRP: c_int = 2; +pub const F_OWNER_PID: c_int = 1; +pub const F_OWNER_TID: c_int = 0; +pub const F_SETLK: c_int = 13; +pub const F_SETLKW: c_int = 14; +pub const F_SETOWN: c_int = 8; +pub const F_SETOWN_EX: c_int = 15; +pub const F_SETSIG: c_int = 10; +pub const F_SHLCK: c_int = 8; +pub const IEXTEN: c_int = 32768; +pub const MAP_ANON: c_int = 32; +pub const MAP_DENYWRITE: c_int = 2048; +pub const MAP_EXECUTABLE: c_int = 4096; +pub const MAP_GROWSDOWN: c_int = 256; +pub const MAP_HUGETLB: c_int = 262144; +pub const MAP_LOCKED: c_int = 8192; +pub const MAP_NONBLOCK: c_int = 65536; +pub const MAP_NORESERVE: c_int = 16384; +pub const MAP_POPULATE: c_int = 32768; +pub const MAP_STACK: c_int = 131072; +pub const MAP_UNINITIALIZED: c_int = 0; +pub const O_APPEND: c_int = 1024; +pub const O_ASYNC: c_int = 8192; +pub const O_CREAT: c_int = 64; +pub const O_DIRECT: c_int = 16384; +pub const O_DIRECTORY: c_int = 65536; +pub const O_DSYNC: c_int = 4096; +pub const O_EXCL: c_int = 128; +pub const O_LARGEFILE: c_int = 32768; +pub const O_NOCTTY: c_int = 256; +pub const O_NOFOLLOW: c_int = 131072; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const PF_FILE: c_int = 1; +pub const PF_KCM: c_int = 41; +pub const PF_MAX: c_int = 43; +pub const PF_QIPCRTR: c_int = 42; +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; +pub const SIGBUS: c_int = 7; +pub const SIGCHLD: c_int = 17; +pub const SIGCONT: c_int = 18; +pub const SIGIO: c_int = 29; +pub const SIGPOLL: c_int = 29; +pub const SIGPROF: c_int = 27; +pub const SIGPWR: c_int = 30; +pub const SIGSTKFLT: c_int = 16; +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; +pub const SIGSTOP: c_int = 19; +pub const SIGSYS: c_int = 31; +pub const SIGTSTP: c_int = 20; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGURG: c_int = 23; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGVTALRM: c_int = 26; +pub const SIGWINCH: c_int = 28; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIG_SETMASK: c_int = 2; // FIXME(musl) check these +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; +pub const SOCK_DGRAM: c_int = 2; +pub const SOCK_STREAM: c_int = 1; +pub const SOL_CAIF: c_int = 278; +pub const SOL_IUCV: c_int = 277; +pub const SOL_KCM: c_int = 281; +pub const SOL_NFC: c_int = 280; +pub const SOL_PNPIPE: c_int = 275; +pub const SOL_PPPOL2TP: c_int = 273; +pub const SOL_RDS: c_int = 276; +pub const SOL_RXRPC: c_int = 272; + +pub const SYS3264_fadvise64: c_int = 223; +pub const SYS3264_fcntl: c_int = 25; +pub const SYS3264_fstatat: c_int = 79; +pub const SYS3264_fstat: c_int = 80; +pub const SYS3264_fstatfs: c_int = 44; +pub const SYS3264_ftruncate: c_int = 46; +pub const SYS3264_lseek: c_int = 62; +pub const SYS3264_lstat: c_int = 1039; +pub const SYS3264_mmap: c_int = 222; +pub const SYS3264_sendfile: c_int = 71; +pub const SYS3264_stat: c_int = 1038; +pub const SYS3264_statfs: c_int = 43; +pub const SYS3264_truncate: c_int = 45; +pub const SYS_accept4: c_int = 242; +pub const SYS_accept: c_int = 202; +pub const SYS_access: c_int = 1033; +pub const SYS_acct: c_int = 89; +pub const SYS_add_key: c_int = 217; +pub const SYS_adjtimex: c_int = 171; +pub const SYS_alarm: c_int = 1059; +pub const SYS_arch_specific_syscall: c_int = 244; +pub const SYS_bdflush: c_int = 1075; +pub const SYS_bind: c_int = 200; +pub const SYS_bpf: c_int = 280; +pub const SYS_brk: c_int = 214; +pub const SYS_capget: c_int = 90; +pub const SYS_capset: c_int = 91; +pub const SYS_chdir: c_int = 49; +pub const SYS_chmod: c_int = 1028; +pub const SYS_chown: c_int = 1029; +pub const SYS_chroot: c_int = 51; +pub const SYS_clock_adjtime: c_int = 266; +pub const SYS_clock_getres: c_int = 114; +pub const SYS_clock_gettime: c_int = 113; +pub const SYS_clock_nanosleep: c_int = 115; +pub const SYS_clock_settime: c_int = 112; +pub const SYS_clone: c_int = 220; +pub const SYS_close: c_int = 57; +pub const SYS_connect: c_int = 203; +pub const SYS_copy_file_range: c_int = -1; // FIXME(hexagon) +pub const SYS_creat: c_int = 1064; +pub const SYS_delete_module: c_int = 106; +pub const SYS_dup2: c_int = 1041; +pub const SYS_dup3: c_int = 24; +pub const SYS_dup: c_int = 23; +pub const SYS_epoll_create1: c_int = 20; +pub const SYS_epoll_create: c_int = 1042; +pub const SYS_epoll_ctl: c_int = 21; +pub const SYS_epoll_pwait: c_int = 22; +pub const SYS_epoll_wait: c_int = 1069; +pub const SYS_eventfd2: c_int = 19; +pub const SYS_eventfd: c_int = 1044; +pub const SYS_execveat: c_int = 281; +pub const SYS_execve: c_int = 221; +pub const SYS_exit: c_int = 93; +pub const SYS_exit_group: c_int = 94; +pub const SYS_faccessat: c_int = 48; +pub const SYS_fadvise64_64: c_int = 223; +pub const SYS_fallocate: c_int = 47; +pub const SYS_fanotify_init: c_int = 262; +pub const SYS_fanotify_mark: c_int = 263; +pub const SYS_fchdir: c_int = 50; +pub const SYS_fchmodat: c_int = 53; +pub const SYS_fchmod: c_int = 52; +pub const SYS_fchownat: c_int = 54; +pub const SYS_fchown: c_int = 55; +pub const SYS_fcntl64: c_int = 25; +pub const SYS_fcntl: c_int = 25; +pub const SYS_fdatasync: c_int = 83; +pub const SYS_fgetxattr: c_int = 10; +pub const SYS_finit_module: c_int = 273; +pub const SYS_flistxattr: c_int = 13; +pub const SYS_flock: c_int = 32; +pub const SYS_fork: c_int = 1079; +pub const SYS_fremovexattr: c_int = 16; +pub const SYS_fsetxattr: c_int = 7; +pub const SYS_fstat64: c_int = 80; +pub const SYS_fstatat64: c_int = 79; +pub const SYS_fstatfs64: c_int = 44; +pub const SYS_fstatfs: c_int = 44; +pub const SYS_fsync: c_int = 82; +pub const SYS_ftruncate64: c_int = 46; +pub const SYS_ftruncate: c_int = 46; +pub const SYS_futex: c_int = 98; +pub const SYS_futimesat: c_int = 1066; +pub const SYS_getcpu: c_int = 168; +pub const SYS_getcwd: c_int = 17; +pub const SYS_getdents64: c_int = 61; +pub const SYS_getdents: c_int = 1065; +pub const SYS_getegid: c_int = 177; +pub const SYS_geteuid: c_int = 175; +pub const SYS_getgid: c_int = 176; +pub const SYS_getgroups: c_int = 158; +pub const SYS_getitimer: c_int = 102; +pub const SYS_get_mempolicy: c_int = 236; +pub const SYS_getpeername: c_int = 205; +pub const SYS_getpgid: c_int = 155; +pub const SYS_getpgrp: c_int = 1060; +pub const SYS_getpid: c_int = 172; +pub const SYS_getppid: c_int = 173; +pub const SYS_getpriority: c_int = 141; +pub const SYS_getrandom: c_int = 278; +pub const SYS_getresgid: c_int = 150; +pub const SYS_getresuid: c_int = 148; +pub const SYS_getrlimit: c_int = 163; +pub const SYS_get_robust_list: c_int = 100; +pub const SYS_getrusage: c_int = 165; +pub const SYS_getsid: c_int = 156; +pub const SYS_getsockname: c_int = 204; +pub const SYS_getsockopt: c_int = 209; +pub const SYS_gettid: c_int = 178; +pub const SYS_gettimeofday: c_int = 169; +pub const SYS_getuid: c_int = 174; +pub const SYS_getxattr: c_int = 8; +pub const SYS_init_module: c_int = 105; +pub const SYS_inotify_add_watch: c_int = 27; +pub const SYS_inotify_init1: c_int = 26; +pub const SYS_inotify_init: c_int = 1043; +pub const SYS_inotify_rm_watch: c_int = 28; +pub const SYS_io_cancel: c_int = 3; +pub const SYS_ioctl: c_int = 29; +pub const SYS_io_destroy: c_int = 1; +pub const SYS_io_getevents: c_int = 4; +pub const SYS_ioprio_get: c_int = 31; +pub const SYS_ioprio_set: c_int = 30; +pub const SYS_io_setup: c_int = 0; +pub const SYS_io_submit: c_int = 2; +pub const SYS_kcmp: c_int = 272; +pub const SYS_kexec_load: c_int = 104; +pub const SYS_keyctl: c_int = 219; +pub const SYS_kill: c_int = 129; +pub const SYS_lchown: c_int = 1032; +pub const SYS_lgetxattr: c_int = 9; +pub const SYS_linkat: c_int = 37; +pub const SYS_link: c_int = 1025; +pub const SYS_listen: c_int = 201; +pub const SYS_listxattr: c_int = 11; +pub const SYS_llistxattr: c_int = 12; +pub const SYS__llseek: c_int = 62; +pub const SYS_lookup_dcookie: c_int = 18; +pub const SYS_lremovexattr: c_int = 15; +pub const SYS_lseek: c_int = 62; +pub const SYS_lsetxattr: c_int = 6; +pub const SYS_lstat64: c_int = 1039; +pub const SYS_lstat: c_int = 1039; +pub const SYS_madvise: c_int = 233; +pub const SYS_mbind: c_int = 235; +pub const SYS_memfd_create: c_int = 279; +pub const SYS_migrate_pages: c_int = 238; +pub const SYS_mincore: c_int = 232; +pub const SYS_mkdirat: c_int = 34; +pub const SYS_mkdir: c_int = 1030; +pub const SYS_mknodat: c_int = 33; +pub const SYS_mknod: c_int = 1027; +pub const SYS_mlockall: c_int = 230; +pub const SYS_mlock: c_int = 228; +pub const SYS_mmap2: c_int = 222; +pub const SYS_mount: c_int = 40; +pub const SYS_move_pages: c_int = 239; +pub const SYS_mprotect: c_int = 226; +pub const SYS_mq_getsetattr: c_int = 185; +pub const SYS_mq_notify: c_int = 184; +pub const SYS_mq_open: c_int = 180; +pub const SYS_mq_timedreceive: c_int = 183; +pub const SYS_mq_timedsend: c_int = 182; +pub const SYS_mq_unlink: c_int = 181; +pub const SYS_mremap: c_int = 216; +pub const SYS_msgctl: c_int = 187; +pub const SYS_msgget: c_int = 186; +pub const SYS_msgrcv: c_int = 188; +pub const SYS_msgsnd: c_int = 189; +pub const SYS_msync: c_int = 227; +pub const SYS_munlockall: c_int = 231; +pub const SYS_munlock: c_int = 229; +pub const SYS_munmap: c_int = 215; +pub const SYS_name_to_handle_at: c_int = 264; +pub const SYS_nanosleep: c_int = 101; +pub const SYS_newfstatat: c_int = 79; +pub const SYS_nfsservctl: c_int = 42; +pub const SYS_oldwait4: c_int = 1072; +pub const SYS_openat: c_int = 56; +pub const SYS_open_by_handle_at: c_int = 265; +pub const SYS_open: c_int = 1024; +pub const SYS_pause: c_int = 1061; +pub const SYS_perf_event_open: c_int = 241; +pub const SYS_personality: c_int = 92; +pub const SYS_pipe2: c_int = 59; +pub const SYS_pipe: c_int = 1040; +pub const SYS_pivot_root: c_int = 41; +pub const SYS_poll: c_int = 1068; +pub const SYS_ppoll: c_int = 73; +pub const SYS_prctl: c_int = 167; +pub const SYS_pread64: c_int = 67; +pub const SYS_preadv: c_int = 69; +pub const SYS_prlimit64: c_int = 261; +pub const SYS_process_vm_readv: c_int = 270; +pub const SYS_process_vm_writev: c_int = 271; +pub const SYS_pselect6: c_int = 72; +pub const SYS_ptrace: c_int = 117; +pub const SYS_pwrite64: c_int = 68; +pub const SYS_pwritev: c_int = 70; +pub const SYS_quotactl: c_int = 60; +pub const SYS_readahead: c_int = 213; +pub const SYS_read: c_int = 63; +pub const SYS_readlinkat: c_int = 78; +pub const SYS_readlink: c_int = 1035; +pub const SYS_readv: c_int = 65; +pub const SYS_reboot: c_int = 142; +pub const SYS_recv: c_int = 1073; +pub const SYS_recvfrom: c_int = 207; +pub const SYS_recvmmsg: c_int = 243; +pub const SYS_recvmsg: c_int = 212; +pub const SYS_remap_file_pages: c_int = 234; +pub const SYS_removexattr: c_int = 14; +pub const SYS_renameat2: c_int = 276; +pub const SYS_renameat: c_int = 38; +pub const SYS_rename: c_int = 1034; +pub const SYS_request_key: c_int = 218; +pub const SYS_restart_syscall: c_int = 128; +pub const SYS_rmdir: c_int = 1031; +pub const SYS_rt_sigaction: c_int = 134; +pub const SYS_rt_sigpending: c_int = 136; +pub const SYS_rt_sigprocmask: c_int = 135; +pub const SYS_rt_sigqueueinfo: c_int = 138; +pub const SYS_rt_sigreturn: c_int = 139; +pub const SYS_rt_sigsuspend: c_int = 133; +pub const SYS_rt_sigtimedwait: c_int = 137; +pub const SYS_rt_tgsigqueueinfo: c_int = 240; +pub const SYS_sched_getaffinity: c_int = 123; +pub const SYS_sched_getattr: c_int = 275; +pub const SYS_sched_getparam: c_int = 121; +pub const SYS_sched_get_priority_max: c_int = 125; +pub const SYS_sched_get_priority_min: c_int = 126; +pub const SYS_sched_getscheduler: c_int = 120; +pub const SYS_sched_rr_get_interval: c_int = 127; +pub const SYS_sched_setaffinity: c_int = 122; +pub const SYS_sched_setattr: c_int = 274; +pub const SYS_sched_setparam: c_int = 118; +pub const SYS_sched_setscheduler: c_int = 119; +pub const SYS_sched_yield: c_int = 124; +pub const SYS_seccomp: c_int = 277; +pub const SYS_select: c_int = 1067; +pub const SYS_semctl: c_int = 191; +pub const SYS_semget: c_int = 190; +pub const SYS_semop: c_int = 193; +pub const SYS_semtimedop: c_int = 192; +pub const SYS_send: c_int = 1074; +pub const SYS_sendfile64: c_int = 71; +pub const SYS_sendfile: c_int = 71; +pub const SYS_sendmmsg: c_int = 269; +pub const SYS_sendmsg: c_int = 211; +pub const SYS_sendto: c_int = 206; +pub const SYS_setdomainname: c_int = 162; +pub const SYS_setfsgid: c_int = 152; +pub const SYS_setfsuid: c_int = 151; +pub const SYS_setgid: c_int = 144; +pub const SYS_setgroups: c_int = 159; +pub const SYS_sethostname: c_int = 161; +pub const SYS_setitimer: c_int = 103; +pub const SYS_set_mempolicy: c_int = 237; +pub const SYS_setns: c_int = 268; +pub const SYS_setpgid: c_int = 154; +pub const SYS_setpriority: c_int = 140; +pub const SYS_setregid: c_int = 143; +pub const SYS_setresgid: c_int = 149; +pub const SYS_setresuid: c_int = 147; +pub const SYS_setreuid: c_int = 145; +pub const SYS_setrlimit: c_int = 164; +pub const SYS_set_robust_list: c_int = 99; +pub const SYS_setsid: c_int = 157; +pub const SYS_setsockopt: c_int = 208; +pub const SYS_set_tid_address: c_int = 96; +pub const SYS_settimeofday: c_int = 170; +pub const SYS_setuid: c_int = 146; +pub const SYS_setxattr: c_int = 5; +pub const SYS_shmat: c_int = 196; +pub const SYS_shmctl: c_int = 195; +pub const SYS_shmdt: c_int = 197; +pub const SYS_shmget: c_int = 194; +pub const SYS_shutdown: c_int = 210; +pub const SYS_sigaltstack: c_int = 132; +pub const SYS_signalfd4: c_int = 74; +pub const SYS_signalfd: c_int = 1045; +pub const SYS_socket: c_int = 198; +pub const SYS_socketpair: c_int = 199; +pub const SYS_splice: c_int = 76; +pub const SYS_stat64: c_int = 1038; +pub const SYS_stat: c_int = 1038; +pub const SYS_statfs64: c_int = 43; +pub const SYS_swapoff: c_int = 225; +pub const SYS_swapon: c_int = 224; +pub const SYS_symlinkat: c_int = 36; +pub const SYS_symlink: c_int = 1036; +pub const SYS_sync: c_int = 81; +pub const SYS_sync_file_range2: c_int = 84; +pub const SYS_sync_file_range: c_int = 84; +pub const SYS_syncfs: c_int = 267; +pub const SYS_syscalls: c_int = 1080; +pub const SYS__sysctl: c_int = 1078; +pub const SYS_sysinfo: c_int = 179; +pub const SYS_syslog: c_int = 116; +pub const SYS_tee: c_int = 77; +pub const SYS_tgkill: c_int = 131; +pub const SYS_time: c_int = 1062; +pub const SYS_timer_create: c_int = 107; +pub const SYS_timer_delete: c_int = 111; +pub const SYS_timerfd_create: c_int = 85; +pub const SYS_timerfd_gettime: c_int = 87; +pub const SYS_timerfd_settime: c_int = 86; +pub const SYS_timer_getoverrun: c_int = 109; +pub const SYS_timer_gettime: c_int = 108; +pub const SYS_timer_settime: c_int = 110; +pub const SYS_times: c_int = 153; +pub const SYS_tkill: c_int = 130; +pub const SYS_truncate64: c_int = 45; +pub const SYS_truncate: c_int = 45; +pub const SYS_umask: c_int = 166; +pub const SYS_umount2: c_int = 39; +pub const SYS_umount: c_int = 1076; +pub const SYS_uname: c_int = 160; +pub const SYS_unlinkat: c_int = 35; +pub const SYS_unlink: c_int = 1026; +pub const SYS_unshare: c_int = 97; +pub const SYS_uselib: c_int = 1077; +pub const SYS_ustat: c_int = 1070; +pub const SYS_utime: c_int = 1063; +pub const SYS_utimensat: c_int = 88; +pub const SYS_utimes: c_int = 1037; +pub const SYS_vfork: c_int = 1071; +pub const SYS_vhangup: c_int = 58; +pub const SYS_vmsplice: c_int = 75; +pub const SYS_wait4: c_int = 260; +pub const SYS_waitid: c_int = 95; +pub const SYS_write: c_int = 64; +pub const SYS_writev: c_int = 66; +pub const SYS_statx: c_int = 291; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const TIOCM_LOOP: c_int = 32768; +pub const TIOCM_OUT1: c_int = 8192; +pub const TIOCM_OUT2: c_int = 16384; +pub const TIOCSER_TEMT: c_int = 1; +pub const TOSTOP: c_int = 256; +pub const VEOF: c_int = 4; +pub const VEOL2: c_int = 16; +pub const VEOL: c_int = 11; +pub const VMIN: c_int = 6; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b32/mips/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b32/mips/mod.rs new file mode 100644 index 00000000000000..a623ff9a9f7574 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b32/mips/mod.rs @@ -0,0 +1,775 @@ +use crate::off_t; +use crate::prelude::*; + +pub type wchar_t = c_int; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + __st_padding1: [c_long; 2], + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __st_padding2: [c_long; 2], + pub st_size: off_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: crate::blksize_t, + __st_padding3: c_long, + pub st_blocks: crate::blkcnt_t, + __st_padding4: [c_long; 14], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + __st_padding1: [c_long; 2], + pub st_ino: crate::ino64_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __st_padding2: [c_long; 2], + pub st_size: off_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: crate::blksize_t, + __st_padding3: c_long, + pub st_blocks: crate::blkcnt64_t, + __st_padding4: [c_long; 14], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } + + pub struct ipc_perm { + #[cfg(musl_v1_2_3)] + pub __key: crate::key_t, + #[cfg(not(musl_v1_2_3))] + #[deprecated( + since = "0.2.173", + note = "This field is incorrectly named and will be changed + to __key in a future release." + )] + pub __ipc_perm_key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + pub __seq: c_int, + __unused1: c_long, + __unused2: c_long, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: c_ulong, + __pad1: c_ulong, + __pad2: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + #[cfg(target_endian = "big")] + __unused1: c_int, + pub msg_stime: crate::time_t, + #[cfg(target_endian = "little")] + __unused1: c_int, + #[cfg(target_endian = "big")] + __unused2: c_int, + pub msg_rtime: crate::time_t, + #[cfg(target_endian = "little")] + __unused2: c_int, + #[cfg(target_endian = "big")] + __unused3: c_int, + pub msg_ctime: crate::time_t, + #[cfg(target_endian = "little")] + __unused3: c_int, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __pad1: c_ulong, + __pad2: c_ulong, + } + + pub struct statfs { + pub f_type: c_ulong, + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_ulong, + pub f_flags: c_ulong, + pub f_spare: [c_ulong; 5], + } + + pub struct statfs64 { + pub f_type: c_ulong, + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_ulong, + pub f_flags: c_ulong, + pub f_spare: [c_ulong; 5], + } +} + +s_no_extra_traits! { + #[repr(align(8))] + pub struct max_align_t { + priv_: [f32; 4], + } +} + +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; + +pub const O_DIRECT: c_int = 0o100000; +pub const O_DIRECTORY: c_int = 0o200000; +pub const O_NOFOLLOW: c_int = 0o400000; +pub const O_ASYNC: c_int = 0o10000; +pub const O_LARGEFILE: c_int = 0x2000; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: c_int = 0x00000800; +pub const TAB2: c_int = 0x00001000; +pub const TAB3: c_int = 0x00001800; +pub const CR1: c_int = 0x00000200; +pub const CR2: c_int = 0x00000400; +pub const CR3: c_int = 0x00000600; +pub const FF1: c_int = 0x00008000; +pub const BS1: c_int = 0x00002000; +pub const VT1: c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const O_APPEND: c_int = 0o010; +pub const O_CREAT: c_int = 0o400; +pub const O_EXCL: c_int = 0o2000; +pub const O_NOCTTY: c_int = 0o4000; +pub const O_NONBLOCK: c_int = 0o200; +pub const O_SYNC: c_int = 0o40020; +pub const O_RSYNC: c_int = 0o40020; +pub const O_DSYNC: c_int = 0o020; + +pub const MAP_ANON: c_int = 0x800; +pub const MAP_GROWSDOWN: c_int = 0x1000; +pub const MAP_DENYWRITE: c_int = 0x2000; +pub const MAP_EXECUTABLE: c_int = 0x4000; +pub const MAP_LOCKED: c_int = 0x8000; +pub const MAP_NORESERVE: c_int = 0x0400; +pub const MAP_POPULATE: c_int = 0x10000; +pub const MAP_NONBLOCK: c_int = 0x20000; +pub const MAP_STACK: c_int = 0x40000; +pub const MAP_HUGETLB: c_int = 0x80000; + +pub const EDEADLK: c_int = 45; +pub const ENAMETOOLONG: c_int = 78; +pub const ENOLCK: c_int = 46; +pub const ENOSYS: c_int = 89; +pub const ENOTEMPTY: c_int = 93; +pub const ELOOP: c_int = 90; +pub const ENOMSG: c_int = 35; +pub const EIDRM: c_int = 36; +pub const ECHRNG: c_int = 37; +pub const EL2NSYNC: c_int = 38; +pub const EL3HLT: c_int = 39; +pub const EL3RST: c_int = 40; +pub const ELNRNG: c_int = 41; +pub const EUNATCH: c_int = 42; +pub const ENOCSI: c_int = 43; +pub const EL2HLT: c_int = 44; +pub const EBADE: c_int = 50; +pub const EBADR: c_int = 51; +pub const EXFULL: c_int = 52; +pub const ENOANO: c_int = 53; +pub const EBADRQC: c_int = 54; +pub const EBADSLT: c_int = 55; +pub const EDEADLOCK: c_int = 56; +pub const EMULTIHOP: c_int = 74; +pub const EOVERFLOW: c_int = 79; +pub const ENOTUNIQ: c_int = 80; +pub const EBADFD: c_int = 81; +pub const EBADMSG: c_int = 77; +pub const EREMCHG: c_int = 82; +pub const ELIBACC: c_int = 83; +pub const ELIBBAD: c_int = 84; +pub const ELIBSCN: c_int = 85; +pub const ELIBMAX: c_int = 86; +pub const ELIBEXEC: c_int = 87; +pub const EILSEQ: c_int = 88; +pub const ERESTART: c_int = 91; +pub const ESTRPIPE: c_int = 92; +pub const EUSERS: c_int = 94; +pub const ENOTSOCK: c_int = 95; +pub const EDESTADDRREQ: c_int = 96; +pub const EMSGSIZE: c_int = 97; +pub const EPROTOTYPE: c_int = 98; +pub const ENOPROTOOPT: c_int = 99; +pub const EPROTONOSUPPORT: c_int = 120; +pub const ESOCKTNOSUPPORT: c_int = 121; +pub const EOPNOTSUPP: c_int = 122; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 123; +pub const EAFNOSUPPORT: c_int = 124; +pub const EADDRINUSE: c_int = 125; +pub const EADDRNOTAVAIL: c_int = 126; +pub const ENETDOWN: c_int = 127; +pub const ENETUNREACH: c_int = 128; +pub const ENETRESET: c_int = 129; +pub const ECONNABORTED: c_int = 130; +pub const ECONNRESET: c_int = 131; +pub const ENOBUFS: c_int = 132; +pub const EISCONN: c_int = 133; +pub const ENOTCONN: c_int = 134; +pub const ESHUTDOWN: c_int = 143; +pub const ETOOMANYREFS: c_int = 144; +pub const ETIMEDOUT: c_int = 145; +pub const ECONNREFUSED: c_int = 146; +pub const EHOSTDOWN: c_int = 147; +pub const EHOSTUNREACH: c_int = 148; +pub const EALREADY: c_int = 149; +pub const EINPROGRESS: c_int = 150; +pub const ESTALE: c_int = 151; +pub const EUCLEAN: c_int = 135; +pub const ENOTNAM: c_int = 137; +pub const ENAVAIL: c_int = 138; +pub const EISNAM: c_int = 139; +pub const EREMOTEIO: c_int = 140; +pub const EDQUOT: c_int = 1133; +pub const ENOMEDIUM: c_int = 159; +pub const EMEDIUMTYPE: c_int = 160; +pub const ECANCELED: c_int = 158; +pub const ENOKEY: c_int = 161; +pub const EKEYEXPIRED: c_int = 162; +pub const EKEYREVOKED: c_int = 163; +pub const EKEYREJECTED: c_int = 164; +pub const EOWNERDEAD: c_int = 165; +pub const ENOTRECOVERABLE: c_int = 166; +pub const EHWPOISON: c_int = 168; +pub const ERFKILL: c_int = 167; + +pub const SOCK_STREAM: c_int = 2; +pub const SOCK_DGRAM: c_int = 1; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 8; +pub const SA_NOCLDWAIT: c_int = 0x10000; + +pub const SIGEMT: c_int = 7; +pub const SIGCHLD: c_int = 18; +pub const SIGBUS: c_int = 10; +pub const SIGTTIN: c_int = 26; +pub const SIGTTOU: c_int = 27; +pub const SIGXCPU: c_int = 30; +pub const SIGXFSZ: c_int = 31; +pub const SIGVTALRM: c_int = 28; +pub const SIGPROF: c_int = 29; +pub const SIGWINCH: c_int = 20; +pub const SIGUSR1: c_int = 16; +pub const SIGUSR2: c_int = 17; +pub const SIGCONT: c_int = 25; +pub const SIGSTOP: c_int = 23; +pub const SIGTSTP: c_int = 24; +pub const SIGURG: c_int = 21; +pub const SIGIO: c_int = 22; +pub const SIGSYS: c_int = 12; +pub const SIGSTKFLT: c_int = 7; +pub const SIGPOLL: c_int = crate::SIGIO; +pub const SIGPWR: c_int = 19; +pub const SIG_SETMASK: c_int = 3; +pub const SIG_BLOCK: c_int = 1; +pub const SIG_UNBLOCK: c_int = 2; + +pub const EXTPROC: crate::tcflag_t = 0o200000; + +pub const F_GETLK: c_int = 33; +pub const F_GETOWN: c_int = 23; +pub const F_SETLK: c_int = 34; +pub const F_SETLKW: c_int = 35; +pub const F_SETOWN: c_int = 24; + +pub const VEOF: usize = 16; +pub const VEOL: usize = 17; +pub const VEOL2: usize = 6; +pub const VMIN: usize = 4; +pub const IEXTEN: crate::tcflag_t = 0o000400; +pub const TOSTOP: crate::tcflag_t = 0o100000; +pub const FLUSHO: crate::tcflag_t = 0o020000; + +pub const POLLWRNORM: c_short = 0x4; +pub const POLLWRBAND: c_short = 0x100; + +pub const SYS_syscall: c_long = 4000 + 0; +pub const SYS_exit: c_long = 4000 + 1; +pub const SYS_fork: c_long = 4000 + 2; +pub const SYS_read: c_long = 4000 + 3; +pub const SYS_write: c_long = 4000 + 4; +pub const SYS_open: c_long = 4000 + 5; +pub const SYS_close: c_long = 4000 + 6; +pub const SYS_waitpid: c_long = 4000 + 7; +pub const SYS_creat: c_long = 4000 + 8; +pub const SYS_link: c_long = 4000 + 9; +pub const SYS_unlink: c_long = 4000 + 10; +pub const SYS_execve: c_long = 4000 + 11; +pub const SYS_chdir: c_long = 4000 + 12; +pub const SYS_time: c_long = 4000 + 13; +pub const SYS_mknod: c_long = 4000 + 14; +pub const SYS_chmod: c_long = 4000 + 15; +pub const SYS_lchown: c_long = 4000 + 16; +pub const SYS_break: c_long = 4000 + 17; +pub const SYS_lseek: c_long = 4000 + 19; +pub const SYS_getpid: c_long = 4000 + 20; +pub const SYS_mount: c_long = 4000 + 21; +pub const SYS_umount: c_long = 4000 + 22; +pub const SYS_setuid: c_long = 4000 + 23; +pub const SYS_getuid: c_long = 4000 + 24; +pub const SYS_stime: c_long = 4000 + 25; +pub const SYS_ptrace: c_long = 4000 + 26; +pub const SYS_alarm: c_long = 4000 + 27; +pub const SYS_pause: c_long = 4000 + 29; +pub const SYS_utime: c_long = 4000 + 30; +pub const SYS_stty: c_long = 4000 + 31; +pub const SYS_gtty: c_long = 4000 + 32; +pub const SYS_access: c_long = 4000 + 33; +pub const SYS_nice: c_long = 4000 + 34; +pub const SYS_ftime: c_long = 4000 + 35; +pub const SYS_sync: c_long = 4000 + 36; +pub const SYS_kill: c_long = 4000 + 37; +pub const SYS_rename: c_long = 4000 + 38; +pub const SYS_mkdir: c_long = 4000 + 39; +pub const SYS_rmdir: c_long = 4000 + 40; +pub const SYS_dup: c_long = 4000 + 41; +pub const SYS_pipe: c_long = 4000 + 42; +pub const SYS_times: c_long = 4000 + 43; +pub const SYS_prof: c_long = 4000 + 44; +pub const SYS_brk: c_long = 4000 + 45; +pub const SYS_setgid: c_long = 4000 + 46; +pub const SYS_getgid: c_long = 4000 + 47; +pub const SYS_signal: c_long = 4000 + 48; +pub const SYS_geteuid: c_long = 4000 + 49; +pub const SYS_getegid: c_long = 4000 + 50; +pub const SYS_acct: c_long = 4000 + 51; +pub const SYS_umount2: c_long = 4000 + 52; +pub const SYS_lock: c_long = 4000 + 53; +pub const SYS_ioctl: c_long = 4000 + 54; +pub const SYS_fcntl: c_long = 4000 + 55; +pub const SYS_mpx: c_long = 4000 + 56; +pub const SYS_setpgid: c_long = 4000 + 57; +pub const SYS_ulimit: c_long = 4000 + 58; +pub const SYS_umask: c_long = 4000 + 60; +pub const SYS_chroot: c_long = 4000 + 61; +pub const SYS_ustat: c_long = 4000 + 62; +pub const SYS_dup2: c_long = 4000 + 63; +pub const SYS_getppid: c_long = 4000 + 64; +pub const SYS_getpgrp: c_long = 4000 + 65; +pub const SYS_setsid: c_long = 4000 + 66; +pub const SYS_sigaction: c_long = 4000 + 67; +pub const SYS_sgetmask: c_long = 4000 + 68; +pub const SYS_ssetmask: c_long = 4000 + 69; +pub const SYS_setreuid: c_long = 4000 + 70; +pub const SYS_setregid: c_long = 4000 + 71; +pub const SYS_sigsuspend: c_long = 4000 + 72; +pub const SYS_sigpending: c_long = 4000 + 73; +pub const SYS_sethostname: c_long = 4000 + 74; +pub const SYS_setrlimit: c_long = 4000 + 75; +pub const SYS_getrlimit: c_long = 4000 + 76; +pub const SYS_getrusage: c_long = 4000 + 77; +pub const SYS_gettimeofday: c_long = 4000 + 78; +pub const SYS_settimeofday: c_long = 4000 + 79; +pub const SYS_getgroups: c_long = 4000 + 80; +pub const SYS_setgroups: c_long = 4000 + 81; +pub const SYS_symlink: c_long = 4000 + 83; +pub const SYS_readlink: c_long = 4000 + 85; +pub const SYS_uselib: c_long = 4000 + 86; +pub const SYS_swapon: c_long = 4000 + 87; +pub const SYS_reboot: c_long = 4000 + 88; +pub const SYS_readdir: c_long = 4000 + 89; +pub const SYS_mmap: c_long = 4000 + 90; +pub const SYS_munmap: c_long = 4000 + 91; +pub const SYS_truncate: c_long = 4000 + 92; +pub const SYS_ftruncate: c_long = 4000 + 93; +pub const SYS_fchmod: c_long = 4000 + 94; +pub const SYS_fchown: c_long = 4000 + 95; +pub const SYS_getpriority: c_long = 4000 + 96; +pub const SYS_setpriority: c_long = 4000 + 97; +pub const SYS_profil: c_long = 4000 + 98; +pub const SYS_statfs: c_long = 4000 + 99; +pub const SYS_fstatfs: c_long = 4000 + 100; +pub const SYS_ioperm: c_long = 4000 + 101; +pub const SYS_socketcall: c_long = 4000 + 102; +pub const SYS_syslog: c_long = 4000 + 103; +pub const SYS_setitimer: c_long = 4000 + 104; +pub const SYS_getitimer: c_long = 4000 + 105; +pub const SYS_stat: c_long = 4000 + 106; +pub const SYS_lstat: c_long = 4000 + 107; +pub const SYS_fstat: c_long = 4000 + 108; +pub const SYS_iopl: c_long = 4000 + 110; +pub const SYS_vhangup: c_long = 4000 + 111; +pub const SYS_idle: c_long = 4000 + 112; +pub const SYS_vm86: c_long = 4000 + 113; +pub const SYS_wait4: c_long = 4000 + 114; +pub const SYS_swapoff: c_long = 4000 + 115; +pub const SYS_sysinfo: c_long = 4000 + 116; +pub const SYS_ipc: c_long = 4000 + 117; +pub const SYS_fsync: c_long = 4000 + 118; +pub const SYS_sigreturn: c_long = 4000 + 119; +pub const SYS_clone: c_long = 4000 + 120; +pub const SYS_setdomainname: c_long = 4000 + 121; +pub const SYS_uname: c_long = 4000 + 122; +pub const SYS_modify_ldt: c_long = 4000 + 123; +pub const SYS_adjtimex: c_long = 4000 + 124; +pub const SYS_mprotect: c_long = 4000 + 125; +pub const SYS_sigprocmask: c_long = 4000 + 126; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 4000 + 127; +pub const SYS_init_module: c_long = 4000 + 128; +pub const SYS_delete_module: c_long = 4000 + 129; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 4000 + 130; +pub const SYS_quotactl: c_long = 4000 + 131; +pub const SYS_getpgid: c_long = 4000 + 132; +pub const SYS_fchdir: c_long = 4000 + 133; +pub const SYS_bdflush: c_long = 4000 + 134; +pub const SYS_sysfs: c_long = 4000 + 135; +pub const SYS_personality: c_long = 4000 + 136; +pub const SYS_afs_syscall: c_long = 4000 + 137; +pub const SYS_setfsuid: c_long = 4000 + 138; +pub const SYS_setfsgid: c_long = 4000 + 139; +pub const SYS__llseek: c_long = 4000 + 140; +pub const SYS_getdents: c_long = 4000 + 141; +pub const SYS_flock: c_long = 4000 + 143; +pub const SYS_msync: c_long = 4000 + 144; +pub const SYS_readv: c_long = 4000 + 145; +pub const SYS_writev: c_long = 4000 + 146; +pub const SYS_cacheflush: c_long = 4000 + 147; +pub const SYS_cachectl: c_long = 4000 + 148; +pub const SYS_sysmips: c_long = 4000 + 149; +pub const SYS_getsid: c_long = 4000 + 151; +pub const SYS_fdatasync: c_long = 4000 + 152; +pub const SYS__sysctl: c_long = 4000 + 153; +pub const SYS_mlock: c_long = 4000 + 154; +pub const SYS_munlock: c_long = 4000 + 155; +pub const SYS_mlockall: c_long = 4000 + 156; +pub const SYS_munlockall: c_long = 4000 + 157; +pub const SYS_sched_setparam: c_long = 4000 + 158; +pub const SYS_sched_getparam: c_long = 4000 + 159; +pub const SYS_sched_setscheduler: c_long = 4000 + 160; +pub const SYS_sched_getscheduler: c_long = 4000 + 161; +pub const SYS_sched_yield: c_long = 4000 + 162; +pub const SYS_sched_get_priority_max: c_long = 4000 + 163; +pub const SYS_sched_get_priority_min: c_long = 4000 + 164; +pub const SYS_sched_rr_get_interval: c_long = 4000 + 165; +pub const SYS_nanosleep: c_long = 4000 + 166; +pub const SYS_mremap: c_long = 4000 + 167; +pub const SYS_accept: c_long = 4000 + 168; +pub const SYS_bind: c_long = 4000 + 169; +pub const SYS_connect: c_long = 4000 + 170; +pub const SYS_getpeername: c_long = 4000 + 171; +pub const SYS_getsockname: c_long = 4000 + 172; +pub const SYS_getsockopt: c_long = 4000 + 173; +pub const SYS_listen: c_long = 4000 + 174; +pub const SYS_recv: c_long = 4000 + 175; +pub const SYS_recvfrom: c_long = 4000 + 176; +pub const SYS_recvmsg: c_long = 4000 + 177; +pub const SYS_send: c_long = 4000 + 178; +pub const SYS_sendmsg: c_long = 4000 + 179; +pub const SYS_sendto: c_long = 4000 + 180; +pub const SYS_setsockopt: c_long = 4000 + 181; +pub const SYS_shutdown: c_long = 4000 + 182; +pub const SYS_socket: c_long = 4000 + 183; +pub const SYS_socketpair: c_long = 4000 + 184; +pub const SYS_setresuid: c_long = 4000 + 185; +pub const SYS_getresuid: c_long = 4000 + 186; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 4000 + 187; +pub const SYS_poll: c_long = 4000 + 188; +pub const SYS_nfsservctl: c_long = 4000 + 189; +pub const SYS_setresgid: c_long = 4000 + 190; +pub const SYS_getresgid: c_long = 4000 + 191; +pub const SYS_prctl: c_long = 4000 + 192; +pub const SYS_rt_sigreturn: c_long = 4000 + 193; +pub const SYS_rt_sigaction: c_long = 4000 + 194; +pub const SYS_rt_sigprocmask: c_long = 4000 + 195; +pub const SYS_rt_sigpending: c_long = 4000 + 196; +pub const SYS_rt_sigtimedwait: c_long = 4000 + 197; +pub const SYS_rt_sigqueueinfo: c_long = 4000 + 198; +pub const SYS_rt_sigsuspend: c_long = 4000 + 199; +pub const SYS_chown: c_long = 4000 + 202; +pub const SYS_getcwd: c_long = 4000 + 203; +pub const SYS_capget: c_long = 4000 + 204; +pub const SYS_capset: c_long = 4000 + 205; +pub const SYS_sigaltstack: c_long = 4000 + 206; +pub const SYS_sendfile: c_long = 4000 + 207; +pub const SYS_getpmsg: c_long = 4000 + 208; +pub const SYS_putpmsg: c_long = 4000 + 209; +pub const SYS_mmap2: c_long = 4000 + 210; +pub const SYS_truncate64: c_long = 4000 + 211; +pub const SYS_ftruncate64: c_long = 4000 + 212; +pub const SYS_stat64: c_long = 4000 + 213; +pub const SYS_lstat64: c_long = 4000 + 214; +pub const SYS_fstat64: c_long = 4000 + 215; +pub const SYS_pivot_root: c_long = 4000 + 216; +pub const SYS_mincore: c_long = 4000 + 217; +pub const SYS_madvise: c_long = 4000 + 218; +pub const SYS_getdents64: c_long = 4000 + 219; +pub const SYS_fcntl64: c_long = 4000 + 220; +pub const SYS_gettid: c_long = 4000 + 222; +pub const SYS_readahead: c_long = 4000 + 223; +pub const SYS_setxattr: c_long = 4000 + 224; +pub const SYS_lsetxattr: c_long = 4000 + 225; +pub const SYS_fsetxattr: c_long = 4000 + 226; +pub const SYS_getxattr: c_long = 4000 + 227; +pub const SYS_lgetxattr: c_long = 4000 + 228; +pub const SYS_fgetxattr: c_long = 4000 + 229; +pub const SYS_listxattr: c_long = 4000 + 230; +pub const SYS_llistxattr: c_long = 4000 + 231; +pub const SYS_flistxattr: c_long = 4000 + 232; +pub const SYS_removexattr: c_long = 4000 + 233; +pub const SYS_lremovexattr: c_long = 4000 + 234; +pub const SYS_fremovexattr: c_long = 4000 + 235; +pub const SYS_tkill: c_long = 4000 + 236; +pub const SYS_sendfile64: c_long = 4000 + 237; +pub const SYS_futex: c_long = 4000 + 238; +pub const SYS_sched_setaffinity: c_long = 4000 + 239; +pub const SYS_sched_getaffinity: c_long = 4000 + 240; +pub const SYS_io_setup: c_long = 4000 + 241; +pub const SYS_io_destroy: c_long = 4000 + 242; +pub const SYS_io_getevents: c_long = 4000 + 243; +pub const SYS_io_submit: c_long = 4000 + 244; +pub const SYS_io_cancel: c_long = 4000 + 245; +pub const SYS_exit_group: c_long = 4000 + 246; +pub const SYS_lookup_dcookie: c_long = 4000 + 247; +pub const SYS_epoll_create: c_long = 4000 + 248; +pub const SYS_epoll_ctl: c_long = 4000 + 249; +pub const SYS_epoll_wait: c_long = 4000 + 250; +pub const SYS_remap_file_pages: c_long = 4000 + 251; +pub const SYS_set_tid_address: c_long = 4000 + 252; +pub const SYS_restart_syscall: c_long = 4000 + 253; +pub const SYS_statfs64: c_long = 4000 + 255; +pub const SYS_fstatfs64: c_long = 4000 + 256; +pub const SYS_timer_create: c_long = 4000 + 257; +pub const SYS_timer_settime: c_long = 4000 + 258; +pub const SYS_timer_gettime: c_long = 4000 + 259; +pub const SYS_timer_getoverrun: c_long = 4000 + 260; +pub const SYS_timer_delete: c_long = 4000 + 261; +pub const SYS_clock_settime: c_long = 4000 + 262; +pub const SYS_clock_gettime: c_long = 4000 + 263; +pub const SYS_clock_getres: c_long = 4000 + 264; +pub const SYS_clock_nanosleep: c_long = 4000 + 265; +pub const SYS_tgkill: c_long = 4000 + 266; +pub const SYS_utimes: c_long = 4000 + 267; +pub const SYS_mbind: c_long = 4000 + 268; +pub const SYS_get_mempolicy: c_long = 4000 + 269; +pub const SYS_set_mempolicy: c_long = 4000 + 270; +pub const SYS_mq_open: c_long = 4000 + 271; +pub const SYS_mq_unlink: c_long = 4000 + 272; +pub const SYS_mq_timedsend: c_long = 4000 + 273; +pub const SYS_mq_timedreceive: c_long = 4000 + 274; +pub const SYS_mq_notify: c_long = 4000 + 275; +pub const SYS_mq_getsetattr: c_long = 4000 + 276; +pub const SYS_vserver: c_long = 4000 + 277; +pub const SYS_waitid: c_long = 4000 + 278; +/* pub const SYS_sys_setaltroot: c_long = 4000 + 279; */ +pub const SYS_add_key: c_long = 4000 + 280; +pub const SYS_request_key: c_long = 4000 + 281; +pub const SYS_keyctl: c_long = 4000 + 282; +pub const SYS_set_thread_area: c_long = 4000 + 283; +pub const SYS_inotify_init: c_long = 4000 + 284; +pub const SYS_inotify_add_watch: c_long = 4000 + 285; +pub const SYS_inotify_rm_watch: c_long = 4000 + 286; +pub const SYS_migrate_pages: c_long = 4000 + 287; +pub const SYS_openat: c_long = 4000 + 288; +pub const SYS_mkdirat: c_long = 4000 + 289; +pub const SYS_mknodat: c_long = 4000 + 290; +pub const SYS_fchownat: c_long = 4000 + 291; +pub const SYS_futimesat: c_long = 4000 + 292; +pub const SYS_unlinkat: c_long = 4000 + 294; +pub const SYS_renameat: c_long = 4000 + 295; +pub const SYS_linkat: c_long = 4000 + 296; +pub const SYS_symlinkat: c_long = 4000 + 297; +pub const SYS_readlinkat: c_long = 4000 + 298; +pub const SYS_fchmodat: c_long = 4000 + 299; +pub const SYS_faccessat: c_long = 4000 + 300; +pub const SYS_pselect6: c_long = 4000 + 301; +pub const SYS_ppoll: c_long = 4000 + 302; +pub const SYS_unshare: c_long = 4000 + 303; +pub const SYS_splice: c_long = 4000 + 304; +pub const SYS_sync_file_range: c_long = 4000 + 305; +pub const SYS_tee: c_long = 4000 + 306; +pub const SYS_vmsplice: c_long = 4000 + 307; +pub const SYS_move_pages: c_long = 4000 + 308; +pub const SYS_set_robust_list: c_long = 4000 + 309; +pub const SYS_get_robust_list: c_long = 4000 + 310; +pub const SYS_kexec_load: c_long = 4000 + 311; +pub const SYS_getcpu: c_long = 4000 + 312; +pub const SYS_epoll_pwait: c_long = 4000 + 313; +pub const SYS_ioprio_set: c_long = 4000 + 314; +pub const SYS_ioprio_get: c_long = 4000 + 315; +pub const SYS_utimensat: c_long = 4000 + 316; +pub const SYS_signalfd: c_long = 4000 + 317; +pub const SYS_timerfd: c_long = 4000 + 318; +pub const SYS_eventfd: c_long = 4000 + 319; +pub const SYS_fallocate: c_long = 4000 + 320; +pub const SYS_timerfd_create: c_long = 4000 + 321; +pub const SYS_timerfd_gettime: c_long = 4000 + 322; +pub const SYS_timerfd_settime: c_long = 4000 + 323; +pub const SYS_signalfd4: c_long = 4000 + 324; +pub const SYS_eventfd2: c_long = 4000 + 325; +pub const SYS_epoll_create1: c_long = 4000 + 326; +pub const SYS_dup3: c_long = 4000 + 327; +pub const SYS_pipe2: c_long = 4000 + 328; +pub const SYS_inotify_init1: c_long = 4000 + 329; +pub const SYS_preadv: c_long = 4000 + 330; +pub const SYS_pwritev: c_long = 4000 + 331; +pub const SYS_rt_tgsigqueueinfo: c_long = 4000 + 332; +pub const SYS_perf_event_open: c_long = 4000 + 333; +pub const SYS_accept4: c_long = 4000 + 334; +pub const SYS_recvmmsg: c_long = 4000 + 335; +pub const SYS_fanotify_init: c_long = 4000 + 336; +pub const SYS_fanotify_mark: c_long = 4000 + 337; +pub const SYS_prlimit64: c_long = 4000 + 338; +pub const SYS_name_to_handle_at: c_long = 4000 + 339; +pub const SYS_open_by_handle_at: c_long = 4000 + 340; +pub const SYS_clock_adjtime: c_long = 4000 + 341; +pub const SYS_syncfs: c_long = 4000 + 342; +pub const SYS_sendmmsg: c_long = 4000 + 343; +pub const SYS_setns: c_long = 4000 + 344; +pub const SYS_process_vm_readv: c_long = 4000 + 345; +pub const SYS_process_vm_writev: c_long = 4000 + 346; +pub const SYS_kcmp: c_long = 4000 + 347; +pub const SYS_finit_module: c_long = 4000 + 348; +pub const SYS_sched_setattr: c_long = 4000 + 349; +pub const SYS_sched_getattr: c_long = 4000 + 350; +pub const SYS_renameat2: c_long = 4000 + 351; +pub const SYS_seccomp: c_long = 4000 + 352; +pub const SYS_getrandom: c_long = 4000 + 353; +pub const SYS_memfd_create: c_long = 4000 + 354; +pub const SYS_bpf: c_long = 4000 + 355; +pub const SYS_execveat: c_long = 4000 + 356; +pub const SYS_userfaultfd: c_long = 4000 + 357; +pub const SYS_membarrier: c_long = 4000 + 358; +pub const SYS_mlock2: c_long = 4000 + 359; +pub const SYS_copy_file_range: c_long = 4000 + 360; +pub const SYS_preadv2: c_long = 4000 + 361; +pub const SYS_pwritev2: c_long = 4000 + 362; +pub const SYS_pkey_mprotect: c_long = 4000 + 363; +pub const SYS_pkey_alloc: c_long = 4000 + 364; +pub const SYS_pkey_free: c_long = 4000 + 365; +pub const SYS_statx: c_long = 4000 + 366; +pub const SYS_pidfd_send_signal: c_long = 4000 + 424; +pub const SYS_io_uring_setup: c_long = 4000 + 425; +pub const SYS_io_uring_enter: c_long = 4000 + 426; +pub const SYS_io_uring_register: c_long = 4000 + 427; +pub const SYS_open_tree: c_long = 4000 + 428; +pub const SYS_move_mount: c_long = 4000 + 429; +pub const SYS_fsopen: c_long = 4000 + 430; +pub const SYS_fsconfig: c_long = 4000 + 431; +pub const SYS_fsmount: c_long = 4000 + 432; +pub const SYS_fspick: c_long = 4000 + 433; +pub const SYS_pidfd_open: c_long = 4000 + 434; +pub const SYS_clone3: c_long = 4000 + 435; +pub const SYS_close_range: c_long = 4000 + 436; +pub const SYS_openat2: c_long = 4000 + 437; +pub const SYS_pidfd_getfd: c_long = 4000 + 438; +pub const SYS_faccessat2: c_long = 4000 + 439; +pub const SYS_process_madvise: c_long = 4000 + 440; +pub const SYS_epoll_pwait2: c_long = 4000 + 441; +pub const SYS_mount_setattr: c_long = 4000 + 442; +pub const SYS_quotactl_fd: c_long = 4000 + 443; +pub const SYS_landlock_create_ruleset: c_long = 4000 + 444; +pub const SYS_landlock_add_rule: c_long = 4000 + 445; +pub const SYS_landlock_restrict_self: c_long = 4000 + 446; +pub const SYS_memfd_secret: c_long = 4000 + 447; +pub const SYS_process_mrelease: c_long = 4000 + 448; +pub const SYS_futex_waitv: c_long = 4000 + 449; +pub const SYS_set_mempolicy_home_node: c_long = 4000 + 450; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b32/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b32/mod.rs new file mode 100644 index 00000000000000..00b3d7705090fb --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b32/mod.rs @@ -0,0 +1,65 @@ +use crate::prelude::*; + +pub type nlink_t = u32; +pub type blksize_t = c_long; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; +pub type regoff_t = c_int; + +s! { + pub struct pthread_attr_t { + __size: [u32; 9], + } + + pub struct sigset_t { + __val: [c_ulong; 32], + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: c_int, + pub msg_control: *mut c_void, + pub msg_controllen: crate::socklen_t, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + pub cmsg_len: crate::socklen_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct sem_t { + __val: [c_int; 4], + } +} + +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 20; + +cfg_if! { + if #[cfg(any(target_arch = "x86"))] { + mod x86; + pub use self::x86::*; + } else if #[cfg(any(target_arch = "mips"))] { + mod mips; + pub use self::mips::*; + } else if #[cfg(any(target_arch = "arm"))] { + mod arm; + pub use self::arm::*; + } else if #[cfg(any(target_arch = "powerpc"))] { + mod powerpc; + pub use self::powerpc::*; + } else if #[cfg(any(target_arch = "hexagon"))] { + mod hexagon; + pub use self::hexagon::*; + } else if #[cfg(any(target_arch = "riscv32"))] { + mod riscv32; + pub use self::riscv32::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b32/powerpc.rs b/vendor/libc/src/unix/linux_like/linux/musl/b32/powerpc.rs new file mode 100644 index 00000000000000..a07dfda17794e8 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b32/powerpc.rs @@ -0,0 +1,766 @@ +use crate::off_t; +use crate::prelude::*; + +pub type wchar_t = i32; + +s! { + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_cc: [crate::cc_t; crate::NCCS], + pub c_line: crate::cc_t, + pub __c_ispeed: crate::speed_t, + pub __c_ospeed: crate::speed_t, + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __st_rdev_padding: c_short, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_long; 2], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __st_rdev_padding: c_short, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_long; 2], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct ipc_perm { + #[cfg(musl_v1_2_3)] + pub __key: crate::key_t, + #[cfg(not(musl_v1_2_3))] + #[deprecated( + since = "0.2.173", + note = "This field is incorrectly named and will be changed + to __key in a future release." + )] + pub __ipc_perm_key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + pub __seq: c_int, + __pad1: c_int, + __pad2: c_longlong, + __pad3: c_longlong, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + __unused1: c_int, + pub shm_atime: crate::time_t, + __unused2: c_int, + pub shm_dtime: crate::time_t, + __unused3: c_int, + pub shm_ctime: crate::time_t, + __unused4: c_int, + pub shm_segsz: size_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: c_ulong, + __pad1: c_ulong, + __pad2: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + __unused1: c_int, + pub msg_stime: crate::time_t, + __unused2: c_int, + pub msg_rtime: crate::time_t, + __unused3: c_int, + pub msg_ctime: crate::time_t, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __pad1: c_ulong, + __pad2: c_ulong, + } +} + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const SIGSTKSZ: size_t = 10240; +pub const MINSIGSTKSZ: size_t = 4096; + +pub const O_DIRECT: c_int = 0x20000; +pub const O_DIRECTORY: c_int = 0x4000; +pub const O_NOFOLLOW: c_int = 0x8000; +pub const O_ASYNC: c_int = 0x2000; +pub const O_LARGEFILE: c_int = 0x10000; + +pub const MCL_CURRENT: c_int = 0x2000; +pub const MCL_FUTURE: c_int = 0x4000; +pub const MCL_ONFAULT: c_int = 0x8000; +pub const CBAUD: crate::tcflag_t = 0o0000377; +pub const TAB1: c_int = 0x00000400; +pub const TAB2: c_int = 0x00000800; +pub const TAB3: c_int = 0x00000C00; +pub const CR1: c_int = 0x00001000; +pub const CR2: c_int = 0x00002000; +pub const CR3: c_int = 0x00003000; +pub const FF1: c_int = 0x00004000; +pub const BS1: c_int = 0x00008000; +pub const VT1: c_int = 0x00010000; +pub const VWERASE: usize = 10; +pub const VREPRINT: usize = 11; +pub const VSUSP: usize = 12; +pub const VSTART: usize = 13; +pub const VSTOP: usize = 14; +pub const VDISCARD: usize = 16; +pub const VTIME: usize = 7; +pub const IXON: crate::tcflag_t = 0x00000200; +pub const IXOFF: crate::tcflag_t = 0x00000400; +pub const ONLCR: crate::tcflag_t = 0x00000002; +pub const CSIZE: crate::tcflag_t = 0x00000300; +pub const CS6: crate::tcflag_t = 0x00000100; +pub const CS7: crate::tcflag_t = 0x00000200; +pub const CS8: crate::tcflag_t = 0x00000300; +pub const CSTOPB: crate::tcflag_t = 0x00000400; +pub const CREAD: crate::tcflag_t = 0x00000800; +pub const PARENB: crate::tcflag_t = 0x00001000; +pub const PARODD: crate::tcflag_t = 0x00002000; +pub const HUPCL: crate::tcflag_t = 0x00004000; +pub const CLOCAL: crate::tcflag_t = 0x00008000; +pub const ECHOKE: crate::tcflag_t = 0x00000001; +pub const ECHOE: crate::tcflag_t = 0x00000002; +pub const ECHOK: crate::tcflag_t = 0x00000004; +pub const ECHONL: crate::tcflag_t = 0x00000010; +pub const ECHOPRT: crate::tcflag_t = 0x00000020; +pub const ECHOCTL: crate::tcflag_t = 0x00000040; +pub const ISIG: crate::tcflag_t = 0x00000080; +pub const ICANON: crate::tcflag_t = 0x00000100; +pub const PENDIN: crate::tcflag_t = 0x20000000; +pub const NOFLSH: crate::tcflag_t = 0x80000000; +pub const CIBAUD: crate::tcflag_t = 0o00077600000; +pub const CBAUDEX: crate::tcflag_t = 0o000020; +pub const VSWTC: usize = 9; +pub const OLCUC: crate::tcflag_t = 0o000004; +pub const NLDLY: crate::tcflag_t = 0o001400; +pub const CRDLY: crate::tcflag_t = 0o030000; +pub const TABDLY: crate::tcflag_t = 0o006000; +pub const BSDLY: crate::tcflag_t = 0o100000; +pub const FFDLY: crate::tcflag_t = 0o040000; +pub const VTDLY: crate::tcflag_t = 0o200000; +pub const XTABS: crate::tcflag_t = 0o006000; +pub const B57600: crate::speed_t = 0o000020; +pub const B115200: crate::speed_t = 0o000021; +pub const B230400: crate::speed_t = 0o000022; +pub const B460800: crate::speed_t = 0o000023; +pub const B500000: crate::speed_t = 0o000024; +pub const B576000: crate::speed_t = 0o000025; +pub const B921600: crate::speed_t = 0o000026; +pub const B1000000: crate::speed_t = 0o000027; +pub const B1152000: crate::speed_t = 0o000030; +pub const B1500000: crate::speed_t = 0o000031; +pub const B2000000: crate::speed_t = 0o000032; +pub const B2500000: crate::speed_t = 0o000033; +pub const B3000000: crate::speed_t = 0o000034; +pub const B3500000: crate::speed_t = 0o000035; +pub const B4000000: crate::speed_t = 0o000036; + +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; + +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_LOCKED: c_int = 0x00080; +pub const MAP_NORESERVE: c_int = 0x00040; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_SYNC: c_int = 0x080000; + +pub const PTRACE_SYSEMU: c_int = 0x1d; +pub const PTRACE_SYSEMU_SINGLESTEP: c_int = 0x1e; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EDEADLOCK: c_int = 58; +pub const EMULTIHOP: c_int = 72; +pub const EBADMSG: c_int = 74; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const ERFKILL: c_int = 132; +pub const EHWPOISON: c_int = 133; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const EXTPROC: crate::tcflag_t = 0x10000000; + +pub const F_GETLK: c_int = 12; +pub const F_GETOWN: c_int = 9; +pub const F_SETLK: c_int = 13; +pub const F_SETLKW: c_int = 14; +pub const F_SETOWN: c_int = 8; + +pub const VEOF: usize = 4; +pub const VEOL: usize = 6; +pub const VEOL2: usize = 8; +pub const VMIN: usize = 5; +pub const IEXTEN: crate::tcflag_t = 0x00000400; +pub const TOSTOP: crate::tcflag_t = 0x00400000; +pub const FLUSHO: crate::tcflag_t = 0x00800000; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +// Syscall table +pub const SYS_restart_syscall: c_long = 0; +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_waitpid: c_long = 7; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execve: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_time: c_long = 13; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_lchown: c_long = 16; +pub const SYS_break: c_long = 17; +pub const SYS_oldstat: c_long = 18; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_mount: c_long = 21; +pub const SYS_umount: c_long = 22; +pub const SYS_setuid: c_long = 23; +pub const SYS_getuid: c_long = 24; +pub const SYS_stime: c_long = 25; +pub const SYS_ptrace: c_long = 26; +pub const SYS_alarm: c_long = 27; +pub const SYS_oldfstat: c_long = 28; +pub const SYS_pause: c_long = 29; +pub const SYS_utime: c_long = 30; +pub const SYS_stty: c_long = 31; +pub const SYS_gtty: c_long = 32; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_ftime: c_long = 35; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_rename: c_long = 38; +pub const SYS_mkdir: c_long = 39; +pub const SYS_rmdir: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_prof: c_long = 44; +pub const SYS_brk: c_long = 45; +pub const SYS_setgid: c_long = 46; +pub const SYS_getgid: c_long = 47; +pub const SYS_signal: c_long = 48; +pub const SYS_geteuid: c_long = 49; +pub const SYS_getegid: c_long = 50; +pub const SYS_acct: c_long = 51; +pub const SYS_umount2: c_long = 52; +pub const SYS_lock: c_long = 53; +pub const SYS_ioctl: c_long = 54; +pub const SYS_fcntl: c_long = 55; +pub const SYS_mpx: c_long = 56; +pub const SYS_setpgid: c_long = 57; +pub const SYS_ulimit: c_long = 58; +pub const SYS_oldolduname: c_long = 59; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_ustat: c_long = 62; +pub const SYS_dup2: c_long = 63; +pub const SYS_getppid: c_long = 64; +pub const SYS_getpgrp: c_long = 65; +pub const SYS_setsid: c_long = 66; +pub const SYS_sigaction: c_long = 67; +pub const SYS_sgetmask: c_long = 68; +pub const SYS_ssetmask: c_long = 69; +pub const SYS_setreuid: c_long = 70; +pub const SYS_setregid: c_long = 71; +pub const SYS_sigsuspend: c_long = 72; +pub const SYS_sigpending: c_long = 73; +pub const SYS_sethostname: c_long = 74; +pub const SYS_setrlimit: c_long = 75; +pub const SYS_getrlimit: c_long = 76; +pub const SYS_getrusage: c_long = 77; +pub const SYS_gettimeofday: c_long = 78; +pub const SYS_settimeofday: c_long = 79; +pub const SYS_getgroups: c_long = 80; +pub const SYS_setgroups: c_long = 81; +pub const SYS_select: c_long = 82; +pub const SYS_symlink: c_long = 83; +pub const SYS_oldlstat: c_long = 84; +pub const SYS_readlink: c_long = 85; +pub const SYS_uselib: c_long = 86; +pub const SYS_swapon: c_long = 87; +pub const SYS_reboot: c_long = 88; +pub const SYS_readdir: c_long = 89; +pub const SYS_mmap: c_long = 90; +pub const SYS_munmap: c_long = 91; +pub const SYS_truncate: c_long = 92; +pub const SYS_ftruncate: c_long = 93; +pub const SYS_fchmod: c_long = 94; +pub const SYS_fchown: c_long = 95; +pub const SYS_getpriority: c_long = 96; +pub const SYS_setpriority: c_long = 97; +pub const SYS_profil: c_long = 98; +pub const SYS_statfs: c_long = 99; +pub const SYS_fstatfs: c_long = 100; +pub const SYS_ioperm: c_long = 101; +pub const SYS_socketcall: c_long = 102; +pub const SYS_syslog: c_long = 103; +pub const SYS_setitimer: c_long = 104; +pub const SYS_getitimer: c_long = 105; +pub const SYS_stat: c_long = 106; +pub const SYS_lstat: c_long = 107; +pub const SYS_fstat: c_long = 108; +pub const SYS_olduname: c_long = 109; +pub const SYS_iopl: c_long = 110; +pub const SYS_vhangup: c_long = 111; +pub const SYS_idle: c_long = 112; +pub const SYS_vm86: c_long = 113; +pub const SYS_wait4: c_long = 114; +pub const SYS_swapoff: c_long = 115; +pub const SYS_sysinfo: c_long = 116; +pub const SYS_ipc: c_long = 117; +pub const SYS_fsync: c_long = 118; +pub const SYS_sigreturn: c_long = 119; +pub const SYS_clone: c_long = 120; +pub const SYS_setdomainname: c_long = 121; +pub const SYS_uname: c_long = 122; +pub const SYS_modify_ldt: c_long = 123; +pub const SYS_adjtimex: c_long = 124; +pub const SYS_mprotect: c_long = 125; +pub const SYS_sigprocmask: c_long = 126; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 127; +pub const SYS_init_module: c_long = 128; +pub const SYS_delete_module: c_long = 129; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 130; +pub const SYS_quotactl: c_long = 131; +pub const SYS_getpgid: c_long = 132; +pub const SYS_fchdir: c_long = 133; +pub const SYS_bdflush: c_long = 134; +pub const SYS_sysfs: c_long = 135; +pub const SYS_personality: c_long = 136; +pub const SYS_afs_syscall: c_long = 137; +pub const SYS_setfsuid: c_long = 138; +pub const SYS_setfsgid: c_long = 139; +pub const SYS__llseek: c_long = 140; +pub const SYS_getdents: c_long = 141; +pub const SYS__newselect: c_long = 142; +pub const SYS_flock: c_long = 143; +pub const SYS_msync: c_long = 144; +pub const SYS_readv: c_long = 145; +pub const SYS_writev: c_long = 146; +pub const SYS_getsid: c_long = 147; +pub const SYS_fdatasync: c_long = 148; +pub const SYS__sysctl: c_long = 149; +pub const SYS_mlock: c_long = 150; +pub const SYS_munlock: c_long = 151; +pub const SYS_mlockall: c_long = 152; +pub const SYS_munlockall: c_long = 153; +pub const SYS_sched_setparam: c_long = 154; +pub const SYS_sched_getparam: c_long = 155; +pub const SYS_sched_setscheduler: c_long = 156; +pub const SYS_sched_getscheduler: c_long = 157; +pub const SYS_sched_yield: c_long = 158; +pub const SYS_sched_get_priority_max: c_long = 159; +pub const SYS_sched_get_priority_min: c_long = 160; +pub const SYS_sched_rr_get_interval: c_long = 161; +pub const SYS_nanosleep: c_long = 162; +pub const SYS_mremap: c_long = 163; +pub const SYS_setresuid: c_long = 164; +pub const SYS_getresuid: c_long = 165; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 166; +pub const SYS_poll: c_long = 167; +pub const SYS_nfsservctl: c_long = 168; +pub const SYS_setresgid: c_long = 169; +pub const SYS_getresgid: c_long = 170; +pub const SYS_prctl: c_long = 171; +pub const SYS_rt_sigreturn: c_long = 172; +pub const SYS_rt_sigaction: c_long = 173; +pub const SYS_rt_sigprocmask: c_long = 174; +pub const SYS_rt_sigpending: c_long = 175; +pub const SYS_rt_sigtimedwait: c_long = 176; +pub const SYS_rt_sigqueueinfo: c_long = 177; +pub const SYS_rt_sigsuspend: c_long = 178; +pub const SYS_pread64: c_long = 179; +pub const SYS_pwrite64: c_long = 180; +pub const SYS_chown: c_long = 181; +pub const SYS_getcwd: c_long = 182; +pub const SYS_capget: c_long = 183; +pub const SYS_capset: c_long = 184; +pub const SYS_sigaltstack: c_long = 185; +pub const SYS_sendfile: c_long = 186; +pub const SYS_getpmsg: c_long = 187; +pub const SYS_putpmsg: c_long = 188; +pub const SYS_vfork: c_long = 189; +pub const SYS_ugetrlimit: c_long = 190; +pub const SYS_readahead: c_long = 191; +pub const SYS_mmap2: c_long = 192; +pub const SYS_truncate64: c_long = 193; +pub const SYS_ftruncate64: c_long = 194; +pub const SYS_stat64: c_long = 195; +pub const SYS_lstat64: c_long = 196; +pub const SYS_fstat64: c_long = 197; +pub const SYS_pciconfig_read: c_long = 198; +pub const SYS_pciconfig_write: c_long = 199; +pub const SYS_pciconfig_iobase: c_long = 200; +pub const SYS_multiplexer: c_long = 201; +pub const SYS_getdents64: c_long = 202; +pub const SYS_pivot_root: c_long = 203; +pub const SYS_fcntl64: c_long = 204; +pub const SYS_madvise: c_long = 205; +pub const SYS_mincore: c_long = 206; +pub const SYS_gettid: c_long = 207; +pub const SYS_tkill: c_long = 208; +pub const SYS_setxattr: c_long = 209; +pub const SYS_lsetxattr: c_long = 210; +pub const SYS_fsetxattr: c_long = 211; +pub const SYS_getxattr: c_long = 212; +pub const SYS_lgetxattr: c_long = 213; +pub const SYS_fgetxattr: c_long = 214; +pub const SYS_listxattr: c_long = 215; +pub const SYS_llistxattr: c_long = 216; +pub const SYS_flistxattr: c_long = 217; +pub const SYS_removexattr: c_long = 218; +pub const SYS_lremovexattr: c_long = 219; +pub const SYS_fremovexattr: c_long = 220; +pub const SYS_futex: c_long = 221; +pub const SYS_sched_setaffinity: c_long = 222; +pub const SYS_sched_getaffinity: c_long = 223; +pub const SYS_tuxcall: c_long = 225; +pub const SYS_sendfile64: c_long = 226; +pub const SYS_io_setup: c_long = 227; +pub const SYS_io_destroy: c_long = 228; +pub const SYS_io_getevents: c_long = 229; +pub const SYS_io_submit: c_long = 230; +pub const SYS_io_cancel: c_long = 231; +pub const SYS_set_tid_address: c_long = 232; +pub const SYS_fadvise64: c_long = 233; +pub const SYS_exit_group: c_long = 234; +pub const SYS_lookup_dcookie: c_long = 235; +pub const SYS_epoll_create: c_long = 236; +pub const SYS_epoll_ctl: c_long = 237; +pub const SYS_epoll_wait: c_long = 238; +pub const SYS_remap_file_pages: c_long = 239; +pub const SYS_timer_create: c_long = 240; +pub const SYS_timer_settime: c_long = 241; +pub const SYS_timer_gettime: c_long = 242; +pub const SYS_timer_getoverrun: c_long = 243; +pub const SYS_timer_delete: c_long = 244; +pub const SYS_clock_settime: c_long = 245; +pub const SYS_clock_gettime: c_long = 246; +pub const SYS_clock_getres: c_long = 247; +pub const SYS_clock_nanosleep: c_long = 248; +pub const SYS_swapcontext: c_long = 249; +pub const SYS_tgkill: c_long = 250; +pub const SYS_utimes: c_long = 251; +pub const SYS_statfs64: c_long = 252; +pub const SYS_fstatfs64: c_long = 253; +pub const SYS_fadvise64_64: c_long = 254; +pub const SYS_rtas: c_long = 255; +pub const SYS_sys_debug_setcontext: c_long = 256; +pub const SYS_migrate_pages: c_long = 258; +pub const SYS_mbind: c_long = 259; +pub const SYS_get_mempolicy: c_long = 260; +pub const SYS_set_mempolicy: c_long = 261; +pub const SYS_mq_open: c_long = 262; +pub const SYS_mq_unlink: c_long = 263; +pub const SYS_mq_timedsend: c_long = 264; +pub const SYS_mq_timedreceive: c_long = 265; +pub const SYS_mq_notify: c_long = 266; +pub const SYS_mq_getsetattr: c_long = 267; +pub const SYS_kexec_load: c_long = 268; +pub const SYS_add_key: c_long = 269; +pub const SYS_request_key: c_long = 270; +pub const SYS_keyctl: c_long = 271; +pub const SYS_waitid: c_long = 272; +pub const SYS_ioprio_set: c_long = 273; +pub const SYS_ioprio_get: c_long = 274; +pub const SYS_inotify_init: c_long = 275; +pub const SYS_inotify_add_watch: c_long = 276; +pub const SYS_inotify_rm_watch: c_long = 277; +pub const SYS_spu_run: c_long = 278; +pub const SYS_spu_create: c_long = 279; +pub const SYS_pselect6: c_long = 280; +pub const SYS_ppoll: c_long = 281; +pub const SYS_unshare: c_long = 282; +pub const SYS_splice: c_long = 283; +pub const SYS_tee: c_long = 284; +pub const SYS_vmsplice: c_long = 285; +pub const SYS_openat: c_long = 286; +pub const SYS_mkdirat: c_long = 287; +pub const SYS_mknodat: c_long = 288; +pub const SYS_fchownat: c_long = 289; +pub const SYS_futimesat: c_long = 290; +pub const SYS_fstatat64: c_long = 291; +pub const SYS_unlinkat: c_long = 292; +pub const SYS_renameat: c_long = 293; +pub const SYS_linkat: c_long = 294; +pub const SYS_symlinkat: c_long = 295; +pub const SYS_readlinkat: c_long = 296; +pub const SYS_fchmodat: c_long = 297; +pub const SYS_faccessat: c_long = 298; +pub const SYS_get_robust_list: c_long = 299; +pub const SYS_set_robust_list: c_long = 300; +pub const SYS_move_pages: c_long = 301; +pub const SYS_getcpu: c_long = 302; +pub const SYS_epoll_pwait: c_long = 303; +pub const SYS_utimensat: c_long = 304; +pub const SYS_signalfd: c_long = 305; +pub const SYS_timerfd_create: c_long = 306; +pub const SYS_eventfd: c_long = 307; +pub const SYS_sync_file_range2: c_long = 308; +pub const SYS_fallocate: c_long = 309; +pub const SYS_subpage_prot: c_long = 310; +pub const SYS_timerfd_settime: c_long = 311; +pub const SYS_timerfd_gettime: c_long = 312; +pub const SYS_signalfd4: c_long = 313; +pub const SYS_eventfd2: c_long = 314; +pub const SYS_epoll_create1: c_long = 315; +pub const SYS_dup3: c_long = 316; +pub const SYS_pipe2: c_long = 317; +pub const SYS_inotify_init1: c_long = 318; +pub const SYS_perf_event_open: c_long = 319; +pub const SYS_preadv: c_long = 320; +pub const SYS_pwritev: c_long = 321; +pub const SYS_rt_tgsigqueueinfo: c_long = 322; +pub const SYS_fanotify_init: c_long = 323; +pub const SYS_fanotify_mark: c_long = 324; +pub const SYS_prlimit64: c_long = 325; +pub const SYS_socket: c_long = 326; +pub const SYS_bind: c_long = 327; +pub const SYS_connect: c_long = 328; +pub const SYS_listen: c_long = 329; +pub const SYS_accept: c_long = 330; +pub const SYS_getsockname: c_long = 331; +pub const SYS_getpeername: c_long = 332; +pub const SYS_socketpair: c_long = 333; +pub const SYS_send: c_long = 334; +pub const SYS_sendto: c_long = 335; +pub const SYS_recv: c_long = 336; +pub const SYS_recvfrom: c_long = 337; +pub const SYS_shutdown: c_long = 338; +pub const SYS_setsockopt: c_long = 339; +pub const SYS_getsockopt: c_long = 340; +pub const SYS_sendmsg: c_long = 341; +pub const SYS_recvmsg: c_long = 342; +pub const SYS_recvmmsg: c_long = 343; +pub const SYS_accept4: c_long = 344; +pub const SYS_name_to_handle_at: c_long = 345; +pub const SYS_open_by_handle_at: c_long = 346; +pub const SYS_clock_adjtime: c_long = 347; +pub const SYS_syncfs: c_long = 348; +pub const SYS_sendmmsg: c_long = 349; +pub const SYS_setns: c_long = 350; +pub const SYS_process_vm_readv: c_long = 351; +pub const SYS_process_vm_writev: c_long = 352; +pub const SYS_finit_module: c_long = 353; +pub const SYS_kcmp: c_long = 354; +pub const SYS_sched_setattr: c_long = 355; +pub const SYS_sched_getattr: c_long = 356; +pub const SYS_renameat2: c_long = 357; +pub const SYS_seccomp: c_long = 358; +pub const SYS_getrandom: c_long = 359; +pub const SYS_memfd_create: c_long = 360; +pub const SYS_bpf: c_long = 361; +pub const SYS_execveat: c_long = 362; +pub const SYS_switch_endian: c_long = 363; +pub const SYS_userfaultfd: c_long = 364; +pub const SYS_membarrier: c_long = 365; +pub const SYS_mlock2: c_long = 378; +pub const SYS_copy_file_range: c_long = 379; +pub const SYS_preadv2: c_long = 380; +pub const SYS_pwritev2: c_long = 381; +pub const SYS_kexec_file_load: c_long = 382; +pub const SYS_statx: c_long = 383; +pub const SYS_pkey_alloc: c_long = 384; +pub const SYS_pkey_free: c_long = 385; +pub const SYS_pkey_mprotect: c_long = 386; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; +pub const SYS_mseal: c_long = 462; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b32/riscv32/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b32/riscv32/mod.rs new file mode 100644 index 00000000000000..ea4b51f006f0f5 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b32/riscv32/mod.rs @@ -0,0 +1,655 @@ +//! RISC-V-specific definitions for 32-bit linux-like values + +use crate::prelude::*; +use crate::{off64_t, off_t}; + +pub type wchar_t = c_int; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub __pad1: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub __pad2: c_int, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_int; 2usize], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub __pad1: crate::dev_t, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + pub __pad2: c_int, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_int; 2], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_ushort, + __pad1: c_ushort, + pub __seq: c_ushort, + __pad2: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused5: c_ulong, + __unused6: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + __unused1: c_int, + pub msg_rtime: crate::time_t, + __unused2: c_int, + pub msg_ctime: crate::time_t, + __unused3: c_int, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __pad1: c_ulong, + __pad2: c_ulong, + } +} + +s_no_extra_traits! { + #[repr(align(8))] + pub struct max_align_t { + priv_: (i64, f64), + } +} + +//pub const RLIM_INFINITY: crate::rlim_t = !0; +pub const VEOF: usize = 4; +//pub const RLIMIT_RSS: crate::__rlimit_resource_t = 5; +//pub const RLIMIT_AS: crate::__rlimit_resource_t = 9; +//pub const RLIMIT_MEMLOCK: crate::__rlimit_resource_t = 8; +//pub const RLIMIT_NOFILE: crate::__rlimit_resource_t = 7; +//pub const RLIMIT_NPROC: crate::__rlimit_resource_t = 6; +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const MAP_GROWSDOWN: c_int = 256; +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const EHWPOISON: c_int = 133; +pub const ERFKILL: c_int = 132; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 4; +pub const SA_NOCLDWAIT: c_int = 2; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0; +pub const SIG_UNBLOCK: c_int = 1; +pub const POLLWRNORM: c_short = 256; +pub const POLLWRBAND: c_short = 512; +pub const O_ASYNC: c_int = 8192; +pub const F_SETOWN: c_int = 8; +pub const F_GETOWN: c_int = 9; +pub const F_GETLK: c_int = 12; +pub const F_SETLK: c_int = 13; +pub const F_SETLKW: c_int = 14; + +pub const O_DIRECT: c_int = 16384; +pub const O_DIRECTORY: c_int = 65536; +pub const O_LARGEFILE: c_int = 0o0100000; +pub const O_NOFOLLOW: c_int = 131072; +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_HUGETLB: c_int = 262144; +pub const MAP_LOCKED: c_int = 8192; +pub const MAP_NORESERVE: c_int = 16384; +pub const MAP_ANON: c_int = 32; +pub const MAP_DENYWRITE: c_int = 2048; +pub const MAP_EXECUTABLE: c_int = 4096; +pub const MAP_POPULATE: c_int = 32768; +pub const MAP_NONBLOCK: c_int = 65536; +pub const MAP_STACK: c_int = 131072; +pub const MAP_SYNC: c_int = 0x080000; +pub const EDEADLOCK: c_int = 35; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const MCL_CURRENT: c_int = 1; +pub const MCL_FUTURE: c_int = 2; +pub const MCL_ONFAULT: c_int = 4; +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; +pub const CBAUD: crate::tcflag_t = 4111; +pub const TAB1: crate::tcflag_t = 2048; +pub const TAB2: crate::tcflag_t = 4096; +pub const TAB3: crate::tcflag_t = 6144; +pub const CR1: crate::tcflag_t = 512; +pub const CR2: crate::tcflag_t = 1024; +pub const CR3: crate::tcflag_t = 1536; +pub const FF1: crate::tcflag_t = 32768; +pub const BS1: crate::tcflag_t = 8192; +pub const VT1: crate::tcflag_t = 16384; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 1024; +pub const IXOFF: crate::tcflag_t = 4096; +pub const ONLCR: crate::tcflag_t = 4; +pub const CSIZE: crate::tcflag_t = 48; +pub const CS6: crate::tcflag_t = 16; +pub const CS7: crate::tcflag_t = 32; +pub const CS8: crate::tcflag_t = 48; +pub const CSTOPB: crate::tcflag_t = 64; +pub const CREAD: crate::tcflag_t = 128; +pub const PARENB: crate::tcflag_t = 256; +pub const PARODD: crate::tcflag_t = 512; +pub const HUPCL: crate::tcflag_t = 1024; +pub const CLOCAL: crate::tcflag_t = 2048; +pub const ECHOKE: crate::tcflag_t = 2048; +pub const ECHOE: crate::tcflag_t = 16; +pub const ECHOK: crate::tcflag_t = 32; +pub const ECHONL: crate::tcflag_t = 64; +pub const ECHOPRT: crate::tcflag_t = 1024; +pub const ECHOCTL: crate::tcflag_t = 512; +pub const ISIG: crate::tcflag_t = 1; +pub const ICANON: crate::tcflag_t = 2; +pub const PENDIN: crate::tcflag_t = 16384; +pub const NOFLSH: crate::tcflag_t = 128; +pub const CIBAUD: crate::tcflag_t = 269418496; +pub const CBAUDEX: crate::tcflag_t = 4096; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 2; +pub const NLDLY: crate::tcflag_t = 256; +pub const CRDLY: crate::tcflag_t = 1536; +pub const TABDLY: crate::tcflag_t = 6144; +pub const BSDLY: crate::tcflag_t = 8192; +pub const FFDLY: crate::tcflag_t = 32768; +pub const VTDLY: crate::tcflag_t = 16384; +pub const XTABS: crate::tcflag_t = 6144; +pub const B57600: crate::speed_t = 4097; +pub const B115200: crate::speed_t = 4098; +pub const B230400: crate::speed_t = 4099; +pub const B460800: crate::speed_t = 4100; +pub const B500000: crate::speed_t = 4101; +pub const B576000: crate::speed_t = 4102; +pub const B921600: crate::speed_t = 4103; +pub const B1000000: crate::speed_t = 4104; +pub const B1152000: crate::speed_t = 4105; +pub const B1500000: crate::speed_t = 4106; +pub const B2000000: crate::speed_t = 4107; +pub const B2500000: crate::speed_t = 4108; +pub const B3000000: crate::speed_t = 4109; +pub const B3500000: crate::speed_t = 4110; +pub const B4000000: crate::speed_t = 4111; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 32768; +pub const TOSTOP: crate::tcflag_t = 256; +pub const FLUSHO: crate::tcflag_t = 4096; +pub const EXTPROC: crate::tcflag_t = 65536; + +pub const SYS_read: c_long = 63; +pub const SYS_write: c_long = 64; +pub const SYS_close: c_long = 57; +// RISC-V don't have SYS_fstat, use statx instead. +pub const SYS_lseek: c_long = 62; +pub const SYS_mmap: c_long = 222; +pub const SYS_mprotect: c_long = 226; +pub const SYS_munmap: c_long = 215; +pub const SYS_brk: c_long = 214; +pub const SYS_rt_sigaction: c_long = 134; +pub const SYS_rt_sigprocmask: c_long = 135; +pub const SYS_rt_sigreturn: c_long = 139; +pub const SYS_ioctl: c_long = 29; +pub const SYS_pread64: c_long = 67; +pub const SYS_pwrite64: c_long = 68; +pub const SYS_readv: c_long = 65; +pub const SYS_writev: c_long = 66; +pub const SYS_sched_yield: c_long = 124; +pub const SYS_mremap: c_long = 216; +pub const SYS_msync: c_long = 227; +pub const SYS_mincore: c_long = 232; +pub const SYS_madvise: c_long = 233; +pub const SYS_shmget: c_long = 194; +pub const SYS_shmat: c_long = 196; +pub const SYS_shmctl: c_long = 195; +pub const SYS_dup: c_long = 23; +pub const SYS_getitimer: c_long = 102; +pub const SYS_setitimer: c_long = 103; +pub const SYS_getpid: c_long = 172; +pub const SYS_sendfile: c_long = 71; +pub const SYS_socket: c_long = 198; +pub const SYS_connect: c_long = 203; +pub const SYS_accept: c_long = 202; +pub const SYS_sendto: c_long = 206; +pub const SYS_recvfrom: c_long = 207; +pub const SYS_sendmsg: c_long = 211; +pub const SYS_recvmsg: c_long = 212; +pub const SYS_shutdown: c_long = 210; +pub const SYS_bind: c_long = 200; +pub const SYS_listen: c_long = 201; +pub const SYS_getsockname: c_long = 204; +pub const SYS_getpeername: c_long = 205; +pub const SYS_socketpair: c_long = 199; +pub const SYS_setsockopt: c_long = 208; +pub const SYS_getsockopt: c_long = 209; +pub const SYS_clone: c_long = 220; +pub const SYS_execve: c_long = 221; +pub const SYS_exit: c_long = 93; +// RISC-V don't have wait4, use waitid instead. +pub const SYS_kill: c_long = 129; +pub const SYS_uname: c_long = 160; +pub const SYS_semget: c_long = 190; +pub const SYS_semop: c_long = 193; +pub const SYS_semctl: c_long = 191; +pub const SYS_shmdt: c_long = 197; +pub const SYS_msgget: c_long = 186; +pub const SYS_msgsnd: c_long = 189; +pub const SYS_msgrcv: c_long = 188; +pub const SYS_msgctl: c_long = 187; +pub const SYS_fcntl: c_long = 25; +pub const SYS_flock: c_long = 32; +pub const SYS_fsync: c_long = 82; +pub const SYS_fdatasync: c_long = 83; +pub const SYS_truncate: c_long = 45; +pub const SYS_ftruncate: c_long = 46; +pub const SYS_getcwd: c_long = 17; +pub const SYS_chdir: c_long = 49; +pub const SYS_fchdir: c_long = 50; +pub const SYS_fchmod: c_long = 52; +pub const SYS_fchown: c_long = 55; +pub const SYS_umask: c_long = 166; +// RISC-V don't have gettimeofday, use clock_gettime64 instead. +// RISC-V don't have getrlimit, use prlimit64 instead. +pub const SYS_getrusage: c_long = 165; +pub const SYS_sysinfo: c_long = 179; +pub const SYS_times: c_long = 153; +pub const SYS_ptrace: c_long = 117; +pub const SYS_getuid: c_long = 174; +pub const SYS_syslog: c_long = 116; +pub const SYS_getgid: c_long = 176; +pub const SYS_setuid: c_long = 146; +pub const SYS_setgid: c_long = 144; +pub const SYS_geteuid: c_long = 175; +pub const SYS_getegid: c_long = 177; +pub const SYS_setpgid: c_long = 154; +pub const SYS_getppid: c_long = 173; +pub const SYS_setsid: c_long = 157; +pub const SYS_setreuid: c_long = 145; +pub const SYS_setregid: c_long = 143; +pub const SYS_getgroups: c_long = 158; +pub const SYS_setgroups: c_long = 159; +pub const SYS_setresuid: c_long = 147; +pub const SYS_getresuid: c_long = 148; +pub const SYS_setresgid: c_long = 149; +pub const SYS_getresgid: c_long = 150; +pub const SYS_getpgid: c_long = 155; +pub const SYS_setfsuid: c_long = 151; +pub const SYS_setfsgid: c_long = 152; +pub const SYS_getsid: c_long = 156; +pub const SYS_capget: c_long = 90; +pub const SYS_capset: c_long = 91; +pub const SYS_rt_sigpending: c_long = 136; +pub const SYS_rt_sigtimedwait_time64: c_long = 421; +pub const SYS_rt_sigqueueinfo: c_long = 138; +pub const SYS_rt_sigsuspend: c_long = 133; +pub const SYS_sigaltstack: c_long = 132; +pub const SYS_personality: c_long = 92; +pub const SYS_statfs: c_long = 43; +pub const SYS_fstatfs: c_long = 44; +pub const SYS_getpriority: c_long = 141; +pub const SYS_setpriority: c_long = 140; +pub const SYS_sched_setparam: c_long = 118; +pub const SYS_sched_getparam: c_long = 121; +pub const SYS_sched_setscheduler: c_long = 119; +pub const SYS_sched_getscheduler: c_long = 120; +pub const SYS_sched_get_priority_max: c_long = 125; +pub const SYS_sched_get_priority_min: c_long = 126; +pub const SYS_sched_rr_get_interval_time64: c_long = 423; +pub const SYS_mlock: c_long = 228; +pub const SYS_munlock: c_long = 229; +pub const SYS_mlockall: c_long = 230; +pub const SYS_munlockall: c_long = 231; +pub const SYS_vhangup: c_long = 58; +pub const SYS_pivot_root: c_long = 41; +pub const SYS_prctl: c_long = 167; +// RISC-V don't have setrlimit, use prlimit64 instead. +pub const SYS_chroot: c_long = 51; +pub const SYS_sync: c_long = 81; +pub const SYS_acct: c_long = 89; +// RISC-V don't have settimeofday, use clock_settime64 instead. +pub const SYS_mount: c_long = 40; +pub const SYS_umount2: c_long = 39; +pub const SYS_swapon: c_long = 224; +pub const SYS_swapoff: c_long = 225; +pub const SYS_reboot: c_long = 142; +pub const SYS_sethostname: c_long = 161; +pub const SYS_setdomainname: c_long = 162; +pub const SYS_init_module: c_long = 105; +pub const SYS_delete_module: c_long = 106; +pub const SYS_quotactl: c_long = 60; +pub const SYS_nfsservctl: c_long = 42; +pub const SYS_gettid: c_long = 178; +pub const SYS_readahead: c_long = 213; +pub const SYS_setxattr: c_long = 5; +pub const SYS_lsetxattr: c_long = 6; +pub const SYS_fsetxattr: c_long = 7; +pub const SYS_getxattr: c_long = 8; +pub const SYS_lgetxattr: c_long = 9; +pub const SYS_fgetxattr: c_long = 10; +pub const SYS_listxattr: c_long = 11; +pub const SYS_llistxattr: c_long = 12; +pub const SYS_flistxattr: c_long = 13; +pub const SYS_removexattr: c_long = 14; +pub const SYS_lremovexattr: c_long = 15; +pub const SYS_fremovexattr: c_long = 16; +pub const SYS_tkill: c_long = 130; +pub const SYS_futex_time64: c_long = 422; +pub const SYS_sched_setaffinity: c_long = 122; +pub const SYS_sched_getaffinity: c_long = 123; +pub const SYS_io_setup: c_long = 0; +pub const SYS_io_destroy: c_long = 1; +pub const SYS_io_pgetevents_time64: c_long = 416; +pub const SYS_io_submit: c_long = 2; +pub const SYS_io_cancel: c_long = 3; +pub const SYS_lookup_dcookie: c_long = 18; +pub const SYS_remap_file_pages: c_long = 234; +pub const SYS_getdents64: c_long = 61; +pub const SYS_set_tid_address: c_long = 96; +pub const SYS_restart_syscall: c_long = 128; +pub const SYS_semtimedop_time64: c_long = 420; +pub const SYS_fadvise64: c_long = 223; +pub const SYS_timer_create: c_long = 107; +pub const SYS_timer_settime64: c_long = 409; +pub const SYS_timer_gettime64: c_long = 408; +pub const SYS_timer_getoverrun: c_long = 109; +pub const SYS_timer_delete: c_long = 111; +pub const SYS_clock_settime64: c_long = 404; +pub const SYS_clock_gettime64: c_long = 403; +pub const SYS_clock_getres_time64: c_long = 406; +pub const SYS_clock_nanosleep_time64: c_long = 407; +pub const SYS_exit_group: c_long = 94; +pub const SYS_epoll_ctl: c_long = 21; +pub const SYS_tgkill: c_long = 131; +pub const SYS_mbind: c_long = 235; +pub const SYS_set_mempolicy: c_long = 237; +pub const SYS_get_mempolicy: c_long = 236; +pub const SYS_mq_open: c_long = 180; +pub const SYS_mq_unlink: c_long = 181; +pub const SYS_mq_timedsend_time64: c_long = 418; +pub const SYS_mq_timedreceive_time64: c_long = 419; +pub const SYS_mq_notify: c_long = 184; +pub const SYS_mq_getsetattr: c_long = 185; +pub const SYS_kexec_load: c_long = 104; +pub const SYS_waitid: c_long = 95; +pub const SYS_add_key: c_long = 217; +pub const SYS_request_key: c_long = 218; +pub const SYS_keyctl: c_long = 219; +pub const SYS_ioprio_set: c_long = 30; +pub const SYS_ioprio_get: c_long = 31; +pub const SYS_inotify_add_watch: c_long = 27; +pub const SYS_inotify_rm_watch: c_long = 28; +pub const SYS_migrate_pages: c_long = 238; +pub const SYS_openat: c_long = 56; +pub const SYS_mkdirat: c_long = 34; +pub const SYS_mknodat: c_long = 33; +pub const SYS_fchownat: c_long = 54; +// RISC-V don't have newfstatat, use statx instead. +pub const SYS_unlinkat: c_long = 35; +pub const SYS_linkat: c_long = 37; +pub const SYS_symlinkat: c_long = 36; +pub const SYS_readlinkat: c_long = 78; +pub const SYS_fchmodat: c_long = 53; +pub const SYS_faccessat: c_long = 48; +pub const SYS_pselect6_time64: c_long = 413; +pub const SYS_ppoll_time64: c_long = 414; +pub const SYS_unshare: c_long = 97; +pub const SYS_set_robust_list: c_long = 99; +pub const SYS_get_robust_list: c_long = 100; +pub const SYS_splice: c_long = 76; +pub const SYS_tee: c_long = 77; +pub const SYS_sync_file_range: c_long = 84; +pub const SYS_vmsplice: c_long = 75; +pub const SYS_move_pages: c_long = 239; +pub const SYS_utimensat_time64: c_long = 412; +pub const SYS_epoll_pwait: c_long = 22; +pub const SYS_timerfd_create: c_long = 85; +pub const SYS_fallocate: c_long = 47; +pub const SYS_timerfd_settime64: c_long = 411; +pub const SYS_timerfd_gettime64: c_long = 410; +pub const SYS_accept4: c_long = 242; +pub const SYS_signalfd4: c_long = 74; +pub const SYS_eventfd2: c_long = 19; +pub const SYS_epoll_create1: c_long = 20; +pub const SYS_dup3: c_long = 24; +pub const SYS_pipe2: c_long = 59; +pub const SYS_inotify_init1: c_long = 26; +pub const SYS_preadv: c_long = 69; +pub const SYS_pwritev: c_long = 70; +pub const SYS_rt_tgsigqueueinfo: c_long = 240; +pub const SYS_perf_event_open: c_long = 241; +pub const SYS_recvmmsg_time64: c_long = 417; +pub const SYS_fanotify_init: c_long = 262; +pub const SYS_fanotify_mark: c_long = 263; +pub const SYS_prlimit64: c_long = 261; +pub const SYS_name_to_handle_at: c_long = 264; +pub const SYS_open_by_handle_at: c_long = 265; +pub const SYS_clock_adjtime64: c_long = 405; +pub const SYS_syncfs: c_long = 267; +pub const SYS_sendmmsg: c_long = 269; +pub const SYS_setns: c_long = 268; +pub const SYS_getcpu: c_long = 168; +pub const SYS_process_vm_readv: c_long = 270; +pub const SYS_process_vm_writev: c_long = 271; +pub const SYS_kcmp: c_long = 272; +pub const SYS_finit_module: c_long = 273; +pub const SYS_sched_setattr: c_long = 274; +pub const SYS_sched_getattr: c_long = 275; +pub const SYS_renameat2: c_long = 276; +pub const SYS_seccomp: c_long = 277; +pub const SYS_getrandom: c_long = 278; +pub const SYS_memfd_create: c_long = 279; +pub const SYS_bpf: c_long = 280; +pub const SYS_execveat: c_long = 281; +pub const SYS_userfaultfd: c_long = 282; +pub const SYS_membarrier: c_long = 283; +pub const SYS_mlock2: c_long = 284; +pub const SYS_copy_file_range: c_long = 285; +pub const SYS_preadv2: c_long = 286; +pub const SYS_pwritev2: c_long = 287; +pub const SYS_pkey_mprotect: c_long = 288; +pub const SYS_pkey_alloc: c_long = 289; +pub const SYS_pkey_free: c_long = 290; +pub const SYS_statx: c_long = 291; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; + +// Plain syscalls aliased to their time64 variants +pub const SYS_clock_gettime: c_long = SYS_clock_gettime64; +pub const SYS_clock_settime: c_long = SYS_clock_settime64; +pub const SYS_clock_adjtime: c_long = SYS_clock_adjtime64; +pub const SYS_clock_getres: c_long = SYS_clock_getres_time64; +pub const SYS_clock_nanosleep: c_long = SYS_clock_nanosleep_time64; +pub const SYS_timer_gettime: c_long = SYS_timer_gettime64; +pub const SYS_timer_settime: c_long = SYS_timer_settime64; +pub const SYS_timerfd_gettime: c_long = SYS_timerfd_gettime64; +pub const SYS_timerfd_settime: c_long = SYS_timerfd_settime64; +pub const SYS_utimensat: c_long = SYS_utimensat_time64; +pub const SYS_pselect6: c_long = SYS_pselect6_time64; +pub const SYS_ppoll: c_long = SYS_ppoll_time64; +pub const SYS_recvmmsg: c_long = SYS_recvmmsg_time64; +pub const SYS_mq_timedsend: c_long = SYS_mq_timedsend_time64; +pub const SYS_mq_timedreceive: c_long = SYS_mq_timedreceive_time64; +pub const SYS_rt_sigtimedwait: c_long = SYS_rt_sigtimedwait_time64; +pub const SYS_futex: c_long = SYS_futex_time64; +pub const SYS_sched_rr_get_interval: c_long = SYS_sched_rr_get_interval_time64; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b32/x86/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b32/x86/mod.rs new file mode 100644 index 00000000000000..ae8b7d761dd6f1 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b32/x86/mod.rs @@ -0,0 +1,889 @@ +use crate::off_t; +use crate::prelude::*; + +pub type wchar_t = i32; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + __st_dev_padding: c_int, + __st_ino_truncated: c_long, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __st_rdev_padding: c_int, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_ino: crate::ino_t, + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + __st_dev_padding: c_int, + __st_ino_truncated: c_long, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __st_rdev_padding: c_int, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_ino: crate::ino_t, + } + + pub struct mcontext_t { + __private: [u32; 22], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct ipc_perm { + #[cfg(musl_v1_2_3)] + pub __key: crate::key_t, + #[cfg(not(musl_v1_2_3))] + #[deprecated( + since = "0.2.173", + note = "This field is incorrectly named and will be changed + to __key in a future release." + )] + pub __ipc_perm_key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + pub __seq: c_int, + __unused1: c_long, + __unused2: c_long, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + __unused1: c_int, + pub shm_dtime: crate::time_t, + __unused2: c_int, + pub shm_ctime: crate::time_t, + __unused3: c_int, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: c_ulong, + __pad1: c_ulong, + __pad2: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + __unused1: c_int, + pub msg_rtime: crate::time_t, + __unused2: c_int, + pub msg_ctime: crate::time_t, + __unused3: c_int, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __pad1: c_ulong, + __pad2: c_ulong, + } +} + +s_no_extra_traits! { + pub struct user_fpxregs_struct { + pub cwd: c_ushort, + pub swd: c_ushort, + pub twd: c_ushort, + pub fop: c_ushort, + pub fip: c_long, + pub fcs: c_long, + pub foo: c_long, + pub fos: c_long, + pub mxcsr: c_long, + __reserved: c_long, + pub st_space: [c_long; 32], + pub xmm_space: [c_long; 32], + padding: [c_long; 56], + } + + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_mcontext: mcontext_t, + pub uc_sigmask: crate::sigset_t, + __private: [u8; 112], + } + + #[repr(align(8))] + pub struct max_align_t { + priv_: [f64; 3], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for user_fpxregs_struct { + fn eq(&self, other: &user_fpxregs_struct) -> bool { + self.cwd == other.cwd + && self.swd == other.swd + && self.twd == other.twd + && self.fop == other.fop + && self.fip == other.fip + && self.fcs == other.fcs + && self.foo == other.foo + && self.fos == other.fos + && self.mxcsr == other.mxcsr + // Ignore __reserved field + && self.st_space == other.st_space + && self.xmm_space == other.xmm_space + // Ignore padding field + } + } + + impl Eq for user_fpxregs_struct {} + + impl hash::Hash for user_fpxregs_struct { + fn hash(&self, state: &mut H) { + self.cwd.hash(state); + self.swd.hash(state); + self.twd.hash(state); + self.fop.hash(state); + self.fip.hash(state); + self.fcs.hash(state); + self.foo.hash(state); + self.fos.hash(state); + self.mxcsr.hash(state); + // Ignore __reserved field + self.st_space.hash(state); + self.xmm_space.hash(state); + // Ignore padding field + } + } + + impl PartialEq for ucontext_t { + fn eq(&self, other: &ucontext_t) -> bool { + self.uc_flags == other.uc_flags + && self.uc_link == other.uc_link + && self.uc_stack == other.uc_stack + && self.uc_mcontext == other.uc_mcontext + && self.uc_sigmask == other.uc_sigmask + && self + .__private + .iter() + .zip(other.__private.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for ucontext_t {} + + impl hash::Hash for ucontext_t { + fn hash(&self, state: &mut H) { + self.uc_flags.hash(state); + self.uc_link.hash(state); + self.uc_stack.hash(state); + self.uc_mcontext.hash(state); + self.uc_sigmask.hash(state); + self.__private.hash(state); + } + } + } +} + +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; + +pub const O_DIRECT: c_int = 0x4000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_NOFOLLOW: c_int = 0x20000; +pub const O_ASYNC: c_int = 0x2000; +pub const O_LARGEFILE: c_int = 0o0100000; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: c_int = 0x00000800; +pub const TAB2: c_int = 0x00001000; +pub const TAB3: c_int = 0x00001800; +pub const CR1: c_int = 0x00000200; +pub const CR2: c_int = 0x00000400; +pub const CR3: c_int = 0x00000600; +pub const FF1: c_int = 0x00008000; +pub const BS1: c_int = 0x00002000; +pub const VT1: c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; + +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_SYNC: c_int = 0x080000; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const EDEADLK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EDEADLOCK: c_int = EDEADLK; +pub const EMULTIHOP: c_int = 72; +pub const EBADMSG: c_int = 74; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const ERFKILL: c_int = 132; +pub const EHWPOISON: c_int = 133; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const EXTPROC: crate::tcflag_t = 0x00010000; + +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_32BIT: c_int = 0x0040; + +pub const F_GETLK: c_int = 12; +pub const F_GETOWN: c_int = 9; +pub const F_SETLK: c_int = 13; +pub const F_SETLKW: c_int = 14; +pub const F_SETOWN: c_int = 8; + +pub const VEOF: usize = 4; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const PTRACE_SYSEMU: c_int = 31; +pub const PTRACE_SYSEMU_SINGLESTEP: c_int = 32; + +// Syscall table +pub const SYS_restart_syscall: c_long = 0; +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_waitpid: c_long = 7; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execve: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_time: c_long = 13; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_lchown: c_long = 16; +pub const SYS_break: c_long = 17; +pub const SYS_oldstat: c_long = 18; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_mount: c_long = 21; +pub const SYS_umount: c_long = 22; +pub const SYS_setuid: c_long = 23; +pub const SYS_getuid: c_long = 24; +pub const SYS_stime: c_long = 25; +pub const SYS_ptrace: c_long = 26; +pub const SYS_alarm: c_long = 27; +pub const SYS_oldfstat: c_long = 28; +pub const SYS_pause: c_long = 29; +pub const SYS_utime: c_long = 30; +pub const SYS_stty: c_long = 31; +pub const SYS_gtty: c_long = 32; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_ftime: c_long = 35; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_rename: c_long = 38; +pub const SYS_mkdir: c_long = 39; +pub const SYS_rmdir: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_prof: c_long = 44; +pub const SYS_brk: c_long = 45; +pub const SYS_setgid: c_long = 46; +pub const SYS_getgid: c_long = 47; +pub const SYS_signal: c_long = 48; +pub const SYS_geteuid: c_long = 49; +pub const SYS_getegid: c_long = 50; +pub const SYS_acct: c_long = 51; +pub const SYS_umount2: c_long = 52; +pub const SYS_lock: c_long = 53; +pub const SYS_ioctl: c_long = 54; +pub const SYS_fcntl: c_long = 55; +pub const SYS_mpx: c_long = 56; +pub const SYS_setpgid: c_long = 57; +pub const SYS_ulimit: c_long = 58; +pub const SYS_oldolduname: c_long = 59; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_ustat: c_long = 62; +pub const SYS_dup2: c_long = 63; +pub const SYS_getppid: c_long = 64; +pub const SYS_getpgrp: c_long = 65; +pub const SYS_setsid: c_long = 66; +pub const SYS_sigaction: c_long = 67; +pub const SYS_sgetmask: c_long = 68; +pub const SYS_ssetmask: c_long = 69; +pub const SYS_setreuid: c_long = 70; +pub const SYS_setregid: c_long = 71; +pub const SYS_sigsuspend: c_long = 72; +pub const SYS_sigpending: c_long = 73; +pub const SYS_sethostname: c_long = 74; +pub const SYS_setrlimit: c_long = 75; +pub const SYS_getrlimit: c_long = 76; +pub const SYS_getrusage: c_long = 77; +pub const SYS_gettimeofday: c_long = 78; +pub const SYS_settimeofday: c_long = 79; +pub const SYS_getgroups: c_long = 80; +pub const SYS_setgroups: c_long = 81; +pub const SYS_select: c_long = 82; +pub const SYS_symlink: c_long = 83; +pub const SYS_oldlstat: c_long = 84; +pub const SYS_readlink: c_long = 85; +pub const SYS_uselib: c_long = 86; +pub const SYS_swapon: c_long = 87; +pub const SYS_reboot: c_long = 88; +pub const SYS_readdir: c_long = 89; +pub const SYS_mmap: c_long = 90; +pub const SYS_munmap: c_long = 91; +pub const SYS_truncate: c_long = 92; +pub const SYS_ftruncate: c_long = 93; +pub const SYS_fchmod: c_long = 94; +pub const SYS_fchown: c_long = 95; +pub const SYS_getpriority: c_long = 96; +pub const SYS_setpriority: c_long = 97; +pub const SYS_profil: c_long = 98; +pub const SYS_statfs: c_long = 99; +pub const SYS_fstatfs: c_long = 100; +pub const SYS_ioperm: c_long = 101; +pub const SYS_socketcall: c_long = 102; +pub const SYS_syslog: c_long = 103; +pub const SYS_setitimer: c_long = 104; +pub const SYS_getitimer: c_long = 105; +pub const SYS_stat: c_long = 106; +pub const SYS_lstat: c_long = 107; +pub const SYS_fstat: c_long = 108; +pub const SYS_olduname: c_long = 109; +pub const SYS_iopl: c_long = 110; +pub const SYS_vhangup: c_long = 111; +pub const SYS_idle: c_long = 112; +pub const SYS_vm86old: c_long = 113; +pub const SYS_wait4: c_long = 114; +pub const SYS_swapoff: c_long = 115; +pub const SYS_sysinfo: c_long = 116; +pub const SYS_ipc: c_long = 117; +pub const SYS_fsync: c_long = 118; +pub const SYS_sigreturn: c_long = 119; +pub const SYS_clone: c_long = 120; +pub const SYS_setdomainname: c_long = 121; +pub const SYS_uname: c_long = 122; +pub const SYS_modify_ldt: c_long = 123; +pub const SYS_adjtimex: c_long = 124; +pub const SYS_mprotect: c_long = 125; +pub const SYS_sigprocmask: c_long = 126; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 127; +pub const SYS_init_module: c_long = 128; +pub const SYS_delete_module: c_long = 129; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 130; +pub const SYS_quotactl: c_long = 131; +pub const SYS_getpgid: c_long = 132; +pub const SYS_fchdir: c_long = 133; +pub const SYS_bdflush: c_long = 134; +pub const SYS_sysfs: c_long = 135; +pub const SYS_personality: c_long = 136; +pub const SYS_afs_syscall: c_long = 137; +pub const SYS_setfsuid: c_long = 138; +pub const SYS_setfsgid: c_long = 139; +pub const SYS__llseek: c_long = 140; +pub const SYS_getdents: c_long = 141; +pub const SYS__newselect: c_long = 142; +pub const SYS_flock: c_long = 143; +pub const SYS_msync: c_long = 144; +pub const SYS_readv: c_long = 145; +pub const SYS_writev: c_long = 146; +pub const SYS_getsid: c_long = 147; +pub const SYS_fdatasync: c_long = 148; +pub const SYS__sysctl: c_long = 149; +pub const SYS_mlock: c_long = 150; +pub const SYS_munlock: c_long = 151; +pub const SYS_mlockall: c_long = 152; +pub const SYS_munlockall: c_long = 153; +pub const SYS_sched_setparam: c_long = 154; +pub const SYS_sched_getparam: c_long = 155; +pub const SYS_sched_setscheduler: c_long = 156; +pub const SYS_sched_getscheduler: c_long = 157; +pub const SYS_sched_yield: c_long = 158; +pub const SYS_sched_get_priority_max: c_long = 159; +pub const SYS_sched_get_priority_min: c_long = 160; +pub const SYS_sched_rr_get_interval: c_long = 161; +pub const SYS_nanosleep: c_long = 162; +pub const SYS_mremap: c_long = 163; +pub const SYS_setresuid: c_long = 164; +pub const SYS_getresuid: c_long = 165; +pub const SYS_vm86: c_long = 166; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 167; +pub const SYS_poll: c_long = 168; +pub const SYS_nfsservctl: c_long = 169; +pub const SYS_setresgid: c_long = 170; +pub const SYS_getresgid: c_long = 171; +pub const SYS_prctl: c_long = 172; +pub const SYS_rt_sigreturn: c_long = 173; +pub const SYS_rt_sigaction: c_long = 174; +pub const SYS_rt_sigprocmask: c_long = 175; +pub const SYS_rt_sigpending: c_long = 176; +pub const SYS_rt_sigtimedwait: c_long = 177; +pub const SYS_rt_sigqueueinfo: c_long = 178; +pub const SYS_rt_sigsuspend: c_long = 179; +pub const SYS_pread64: c_long = 180; +pub const SYS_pwrite64: c_long = 181; +pub const SYS_chown: c_long = 182; +pub const SYS_getcwd: c_long = 183; +pub const SYS_capget: c_long = 184; +pub const SYS_capset: c_long = 185; +pub const SYS_sigaltstack: c_long = 186; +pub const SYS_sendfile: c_long = 187; +pub const SYS_getpmsg: c_long = 188; +pub const SYS_putpmsg: c_long = 189; +pub const SYS_vfork: c_long = 190; +pub const SYS_ugetrlimit: c_long = 191; +pub const SYS_mmap2: c_long = 192; +pub const SYS_truncate64: c_long = 193; +pub const SYS_ftruncate64: c_long = 194; +pub const SYS_stat64: c_long = 195; +pub const SYS_lstat64: c_long = 196; +pub const SYS_fstat64: c_long = 197; +pub const SYS_lchown32: c_long = 198; +pub const SYS_getuid32: c_long = 199; +pub const SYS_getgid32: c_long = 200; +pub const SYS_geteuid32: c_long = 201; +pub const SYS_getegid32: c_long = 202; +pub const SYS_setreuid32: c_long = 203; +pub const SYS_setregid32: c_long = 204; +pub const SYS_getgroups32: c_long = 205; +pub const SYS_setgroups32: c_long = 206; +pub const SYS_fchown32: c_long = 207; +pub const SYS_setresuid32: c_long = 208; +pub const SYS_getresuid32: c_long = 209; +pub const SYS_setresgid32: c_long = 210; +pub const SYS_getresgid32: c_long = 211; +pub const SYS_chown32: c_long = 212; +pub const SYS_setuid32: c_long = 213; +pub const SYS_setgid32: c_long = 214; +pub const SYS_setfsuid32: c_long = 215; +pub const SYS_setfsgid32: c_long = 216; +pub const SYS_pivot_root: c_long = 217; +pub const SYS_mincore: c_long = 218; +pub const SYS_madvise: c_long = 219; +pub const SYS_getdents64: c_long = 220; +pub const SYS_fcntl64: c_long = 221; +pub const SYS_gettid: c_long = 224; +pub const SYS_readahead: c_long = 225; +pub const SYS_setxattr: c_long = 226; +pub const SYS_lsetxattr: c_long = 227; +pub const SYS_fsetxattr: c_long = 228; +pub const SYS_getxattr: c_long = 229; +pub const SYS_lgetxattr: c_long = 230; +pub const SYS_fgetxattr: c_long = 231; +pub const SYS_listxattr: c_long = 232; +pub const SYS_llistxattr: c_long = 233; +pub const SYS_flistxattr: c_long = 234; +pub const SYS_removexattr: c_long = 235; +pub const SYS_lremovexattr: c_long = 236; +pub const SYS_fremovexattr: c_long = 237; +pub const SYS_tkill: c_long = 238; +pub const SYS_sendfile64: c_long = 239; +pub const SYS_futex: c_long = 240; +pub const SYS_sched_setaffinity: c_long = 241; +pub const SYS_sched_getaffinity: c_long = 242; +pub const SYS_set_thread_area: c_long = 243; +pub const SYS_get_thread_area: c_long = 244; +pub const SYS_io_setup: c_long = 245; +pub const SYS_io_destroy: c_long = 246; +pub const SYS_io_getevents: c_long = 247; +pub const SYS_io_submit: c_long = 248; +pub const SYS_io_cancel: c_long = 249; +pub const SYS_fadvise64: c_long = 250; +pub const SYS_exit_group: c_long = 252; +pub const SYS_lookup_dcookie: c_long = 253; +pub const SYS_epoll_create: c_long = 254; +pub const SYS_epoll_ctl: c_long = 255; +pub const SYS_epoll_wait: c_long = 256; +pub const SYS_remap_file_pages: c_long = 257; +pub const SYS_set_tid_address: c_long = 258; +pub const SYS_timer_create: c_long = 259; +pub const SYS_timer_settime: c_long = 260; +pub const SYS_timer_gettime: c_long = 261; +pub const SYS_timer_getoverrun: c_long = 262; +pub const SYS_timer_delete: c_long = 263; +pub const SYS_clock_settime: c_long = 264; +pub const SYS_clock_gettime: c_long = 265; +pub const SYS_clock_getres: c_long = 266; +pub const SYS_clock_nanosleep: c_long = 267; +pub const SYS_statfs64: c_long = 268; +pub const SYS_fstatfs64: c_long = 269; +pub const SYS_tgkill: c_long = 270; +pub const SYS_utimes: c_long = 271; +pub const SYS_fadvise64_64: c_long = 272; +pub const SYS_vserver: c_long = 273; +pub const SYS_mbind: c_long = 274; +pub const SYS_get_mempolicy: c_long = 275; +pub const SYS_set_mempolicy: c_long = 276; +pub const SYS_mq_open: c_long = 277; +pub const SYS_mq_unlink: c_long = 278; +pub const SYS_mq_timedsend: c_long = 279; +pub const SYS_mq_timedreceive: c_long = 280; +pub const SYS_mq_notify: c_long = 281; +pub const SYS_mq_getsetattr: c_long = 282; +pub const SYS_kexec_load: c_long = 283; +pub const SYS_waitid: c_long = 284; +pub const SYS_add_key: c_long = 286; +pub const SYS_request_key: c_long = 287; +pub const SYS_keyctl: c_long = 288; +pub const SYS_ioprio_set: c_long = 289; +pub const SYS_ioprio_get: c_long = 290; +pub const SYS_inotify_init: c_long = 291; +pub const SYS_inotify_add_watch: c_long = 292; +pub const SYS_inotify_rm_watch: c_long = 293; +pub const SYS_migrate_pages: c_long = 294; +pub const SYS_openat: c_long = 295; +pub const SYS_mkdirat: c_long = 296; +pub const SYS_mknodat: c_long = 297; +pub const SYS_fchownat: c_long = 298; +pub const SYS_futimesat: c_long = 299; +pub const SYS_fstatat64: c_long = 300; +pub const SYS_unlinkat: c_long = 301; +pub const SYS_renameat: c_long = 302; +pub const SYS_linkat: c_long = 303; +pub const SYS_symlinkat: c_long = 304; +pub const SYS_readlinkat: c_long = 305; +pub const SYS_fchmodat: c_long = 306; +pub const SYS_faccessat: c_long = 307; +pub const SYS_pselect6: c_long = 308; +pub const SYS_ppoll: c_long = 309; +pub const SYS_unshare: c_long = 310; +pub const SYS_set_robust_list: c_long = 311; +pub const SYS_get_robust_list: c_long = 312; +pub const SYS_splice: c_long = 313; +pub const SYS_sync_file_range: c_long = 314; +pub const SYS_tee: c_long = 315; +pub const SYS_vmsplice: c_long = 316; +pub const SYS_move_pages: c_long = 317; +pub const SYS_getcpu: c_long = 318; +pub const SYS_epoll_pwait: c_long = 319; +pub const SYS_utimensat: c_long = 320; +pub const SYS_signalfd: c_long = 321; +pub const SYS_timerfd_create: c_long = 322; +pub const SYS_eventfd: c_long = 323; +pub const SYS_fallocate: c_long = 324; +pub const SYS_timerfd_settime: c_long = 325; +pub const SYS_timerfd_gettime: c_long = 326; +pub const SYS_signalfd4: c_long = 327; +pub const SYS_eventfd2: c_long = 328; +pub const SYS_epoll_create1: c_long = 329; +pub const SYS_dup3: c_long = 330; +pub const SYS_pipe2: c_long = 331; +pub const SYS_inotify_init1: c_long = 332; +pub const SYS_preadv: c_long = 333; +pub const SYS_pwritev: c_long = 334; +pub const SYS_rt_tgsigqueueinfo: c_long = 335; +pub const SYS_perf_event_open: c_long = 336; +pub const SYS_recvmmsg: c_long = 337; +pub const SYS_fanotify_init: c_long = 338; +pub const SYS_fanotify_mark: c_long = 339; +pub const SYS_prlimit64: c_long = 340; +pub const SYS_name_to_handle_at: c_long = 341; +pub const SYS_open_by_handle_at: c_long = 342; +pub const SYS_clock_adjtime: c_long = 343; +pub const SYS_syncfs: c_long = 344; +pub const SYS_sendmmsg: c_long = 345; +pub const SYS_setns: c_long = 346; +pub const SYS_process_vm_readv: c_long = 347; +pub const SYS_process_vm_writev: c_long = 348; +pub const SYS_kcmp: c_long = 349; +pub const SYS_finit_module: c_long = 350; +pub const SYS_sched_setattr: c_long = 351; +pub const SYS_sched_getattr: c_long = 352; +pub const SYS_renameat2: c_long = 353; +pub const SYS_seccomp: c_long = 354; +pub const SYS_getrandom: c_long = 355; +pub const SYS_memfd_create: c_long = 356; +pub const SYS_bpf: c_long = 357; +pub const SYS_execveat: c_long = 358; +pub const SYS_socket: c_long = 359; +pub const SYS_socketpair: c_long = 360; +pub const SYS_bind: c_long = 361; +pub const SYS_connect: c_long = 362; +pub const SYS_listen: c_long = 363; +pub const SYS_accept4: c_long = 364; +pub const SYS_getsockopt: c_long = 365; +pub const SYS_setsockopt: c_long = 366; +pub const SYS_getsockname: c_long = 367; +pub const SYS_getpeername: c_long = 368; +pub const SYS_sendto: c_long = 369; +pub const SYS_sendmsg: c_long = 370; +pub const SYS_recvfrom: c_long = 371; +pub const SYS_recvmsg: c_long = 372; +pub const SYS_shutdown: c_long = 373; +pub const SYS_userfaultfd: c_long = 374; +pub const SYS_membarrier: c_long = 375; +pub const SYS_mlock2: c_long = 376; +pub const SYS_copy_file_range: c_long = 377; +pub const SYS_preadv2: c_long = 378; +pub const SYS_pwritev2: c_long = 379; +pub const SYS_pkey_mprotect: c_long = 380; +pub const SYS_pkey_alloc: c_long = 381; +pub const SYS_pkey_free: c_long = 382; +pub const SYS_statx: c_long = 383; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; +pub const SYS_fchmodat2: c_long = 452; + +// offsets in user_regs_structs, from sys/reg.h +pub const EBX: c_int = 0; +pub const ECX: c_int = 1; +pub const EDX: c_int = 2; +pub const ESI: c_int = 3; +pub const EDI: c_int = 4; +pub const EBP: c_int = 5; +pub const EAX: c_int = 6; +pub const DS: c_int = 7; +pub const ES: c_int = 8; +pub const FS: c_int = 9; +pub const GS: c_int = 10; +pub const ORIG_EAX: c_int = 11; +pub const EIP: c_int = 12; +pub const CS: c_int = 13; +pub const EFL: c_int = 14; +pub const UESP: c_int = 15; +pub const SS: c_int = 16; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/mod.rs new file mode 100644 index 00000000000000..67151a8d37116f --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/mod.rs @@ -0,0 +1,712 @@ +use crate::off_t; +use crate::prelude::*; + +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; +pub type wchar_t = u32; +pub type nlink_t = u32; +pub type blksize_t = c_int; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad0: c_ulong, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + __pad1: c_int, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_uint; 2], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad0: c_ulong, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + __pad1: c_int, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_uint; 2], + } + + pub struct user_regs_struct { + pub regs: [c_ulonglong; 31], + pub sp: c_ulonglong, + pub pc: c_ulonglong, + pub pstate: c_ulonglong, + } + + pub struct ipc_perm { + #[cfg(musl_v1_2_3)] + pub __key: crate::key_t, + #[cfg(not(musl_v1_2_3))] + #[deprecated( + since = "0.2.173", + note = "This field is incorrectly named and will be changed + to __key in a future release." + )] + pub __ipc_perm_key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + + #[cfg(musl_v1_2_3)] + pub __seq: c_int, + #[cfg(not(musl_v1_2_3))] + #[deprecated( + since = "0.2.173", + note = "The type of this field has changed from c_ushort to c_int, + we'll follow that change in the future release." + )] + pub __seq: c_ushort, + __unused1: c_long, + __unused2: c_long, + } + + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_sigmask: crate::sigset_t, + pub uc_mcontext: mcontext_t, + } + + #[repr(align(16))] + pub struct mcontext_t { + pub fault_address: c_ulong, + pub regs: [c_ulong; 31], + pub sp: c_ulong, + pub pc: c_ulong, + pub pstate: c_ulong, + __reserved: [u64; 512], + } + + #[repr(align(8))] + pub struct clone_args { + pub flags: c_ulonglong, + pub pidfd: c_ulonglong, + pub child_tid: c_ulonglong, + pub parent_tid: c_ulonglong, + pub exit_signal: c_ulonglong, + pub stack: c_ulonglong, + pub stack_size: c_ulonglong, + pub tls: c_ulonglong, + pub set_tid: c_ulonglong, + pub set_tid_size: c_ulonglong, + pub cgroup: c_ulonglong, + } + + pub struct user_fpsimd_struct { + pub vregs: [crate::__uint128_t; 32], + pub fpsr: u32, + pub fpcr: u32, + } +} + +s_no_extra_traits! { + #[repr(align(16))] + pub struct max_align_t { + priv_: [f32; 8], + } +} + +pub const O_APPEND: c_int = 1024; +pub const O_DIRECT: c_int = 0x10000; +pub const O_DIRECTORY: c_int = 0x4000; +pub const O_LARGEFILE: c_int = 0x20000; +pub const O_NOFOLLOW: c_int = 0x8000; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_ASYNC: c_int = 0x2000; + +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EBADMSG: c_int = 74; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const ERFKILL: c_int = 132; +pub const EHWPOISON: c_int = 133; + +// bits/hwcap.h +pub const HWCAP_FP: c_ulong = 1 << 0; +pub const HWCAP_ASIMD: c_ulong = 1 << 1; +pub const HWCAP_EVTSTRM: c_ulong = 1 << 2; +pub const HWCAP_AES: c_ulong = 1 << 3; +pub const HWCAP_PMULL: c_ulong = 1 << 4; +pub const HWCAP_SHA1: c_ulong = 1 << 5; +pub const HWCAP_SHA2: c_ulong = 1 << 6; +pub const HWCAP_CRC32: c_ulong = 1 << 7; +pub const HWCAP_ATOMICS: c_ulong = 1 << 8; +pub const HWCAP_FPHP: c_ulong = 1 << 9; +pub const HWCAP_ASIMDHP: c_ulong = 1 << 10; +pub const HWCAP_CPUID: c_ulong = 1 << 11; +pub const HWCAP_ASIMDRDM: c_ulong = 1 << 12; +pub const HWCAP_JSCVT: c_ulong = 1 << 13; +pub const HWCAP_FCMA: c_ulong = 1 << 14; +pub const HWCAP_LRCPC: c_ulong = 1 << 15; +pub const HWCAP_DCPOP: c_ulong = 1 << 16; +pub const HWCAP_SHA3: c_ulong = 1 << 17; +pub const HWCAP_SM3: c_ulong = 1 << 18; +pub const HWCAP_SM4: c_ulong = 1 << 19; +pub const HWCAP_ASIMDDP: c_ulong = 1 << 20; +pub const HWCAP_SHA512: c_ulong = 1 << 21; +pub const HWCAP_SVE: c_ulong = 1 << 22; +pub const HWCAP_ASIMDFHM: c_ulong = 1 << 23; +pub const HWCAP_DIT: c_ulong = 1 << 24; +pub const HWCAP_USCAT: c_ulong = 1 << 25; +pub const HWCAP_ILRCPC: c_ulong = 1 << 26; +pub const HWCAP_FLAGM: c_ulong = 1 << 27; +pub const HWCAP_SSBS: c_ulong = 1 << 28; +pub const HWCAP_SB: c_ulong = 1 << 29; +pub const HWCAP_PACA: c_ulong = 1 << 30; +pub const HWCAP_PACG: c_ulong = 1 << 31; + +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_SYNC: c_int = 0x080000; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_SETOWN: c_int = 8; + +pub const VEOF: usize = 4; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const MINSIGSTKSZ: size_t = 6144; +pub const SIGSTKSZ: size_t = 12288; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const SYS_io_setup: c_long = 0; +pub const SYS_io_destroy: c_long = 1; +pub const SYS_io_submit: c_long = 2; +pub const SYS_io_cancel: c_long = 3; +pub const SYS_io_getevents: c_long = 4; +pub const SYS_setxattr: c_long = 5; +pub const SYS_lsetxattr: c_long = 6; +pub const SYS_fsetxattr: c_long = 7; +pub const SYS_getxattr: c_long = 8; +pub const SYS_lgetxattr: c_long = 9; +pub const SYS_fgetxattr: c_long = 10; +pub const SYS_listxattr: c_long = 11; +pub const SYS_llistxattr: c_long = 12; +pub const SYS_flistxattr: c_long = 13; +pub const SYS_removexattr: c_long = 14; +pub const SYS_lremovexattr: c_long = 15; +pub const SYS_fremovexattr: c_long = 16; +pub const SYS_getcwd: c_long = 17; +pub const SYS_lookup_dcookie: c_long = 18; +pub const SYS_eventfd2: c_long = 19; +pub const SYS_epoll_create1: c_long = 20; +pub const SYS_epoll_ctl: c_long = 21; +pub const SYS_epoll_pwait: c_long = 22; +pub const SYS_dup: c_long = 23; +pub const SYS_dup3: c_long = 24; +pub const SYS_fcntl: c_long = 25; +pub const SYS_inotify_init1: c_long = 26; +pub const SYS_inotify_add_watch: c_long = 27; +pub const SYS_inotify_rm_watch: c_long = 28; +pub const SYS_ioctl: c_long = 29; +pub const SYS_ioprio_set: c_long = 30; +pub const SYS_ioprio_get: c_long = 31; +pub const SYS_flock: c_long = 32; +pub const SYS_mknodat: c_long = 33; +pub const SYS_mkdirat: c_long = 34; +pub const SYS_unlinkat: c_long = 35; +pub const SYS_symlinkat: c_long = 36; +pub const SYS_linkat: c_long = 37; +pub const SYS_renameat: c_long = 38; +pub const SYS_umount2: c_long = 39; +pub const SYS_mount: c_long = 40; +pub const SYS_pivot_root: c_long = 41; +pub const SYS_nfsservctl: c_long = 42; +pub const SYS_statfs: c_long = 43; +pub const SYS_fstatfs: c_long = 44; +pub const SYS_truncate: c_long = 45; +pub const SYS_ftruncate: c_long = 46; +pub const SYS_fallocate: c_long = 47; +pub const SYS_faccessat: c_long = 48; +pub const SYS_chdir: c_long = 49; +pub const SYS_fchdir: c_long = 50; +pub const SYS_chroot: c_long = 51; +pub const SYS_fchmod: c_long = 52; +pub const SYS_fchmodat: c_long = 53; +pub const SYS_fchownat: c_long = 54; +pub const SYS_fchown: c_long = 55; +pub const SYS_openat: c_long = 56; +pub const SYS_close: c_long = 57; +pub const SYS_vhangup: c_long = 58; +pub const SYS_pipe2: c_long = 59; +pub const SYS_quotactl: c_long = 60; +pub const SYS_getdents64: c_long = 61; +pub const SYS_lseek: c_long = 62; +pub const SYS_read: c_long = 63; +pub const SYS_write: c_long = 64; +pub const SYS_readv: c_long = 65; +pub const SYS_writev: c_long = 66; +pub const SYS_pread64: c_long = 67; +pub const SYS_pwrite64: c_long = 68; +pub const SYS_preadv: c_long = 69; +pub const SYS_pwritev: c_long = 70; +pub const SYS_pselect6: c_long = 72; +pub const SYS_ppoll: c_long = 73; +pub const SYS_signalfd4: c_long = 74; +pub const SYS_vmsplice: c_long = 75; +pub const SYS_splice: c_long = 76; +pub const SYS_tee: c_long = 77; +pub const SYS_readlinkat: c_long = 78; +pub const SYS_newfstatat: c_long = 79; +pub const SYS_fstat: c_long = 80; +pub const SYS_sync: c_long = 81; +pub const SYS_fsync: c_long = 82; +pub const SYS_fdatasync: c_long = 83; +pub const SYS_sync_file_range: c_long = 84; +pub const SYS_timerfd_create: c_long = 85; +pub const SYS_timerfd_settime: c_long = 86; +pub const SYS_timerfd_gettime: c_long = 87; +pub const SYS_utimensat: c_long = 88; +pub const SYS_acct: c_long = 89; +pub const SYS_capget: c_long = 90; +pub const SYS_capset: c_long = 91; +pub const SYS_personality: c_long = 92; +pub const SYS_exit: c_long = 93; +pub const SYS_exit_group: c_long = 94; +pub const SYS_waitid: c_long = 95; +pub const SYS_set_tid_address: c_long = 96; +pub const SYS_unshare: c_long = 97; +pub const SYS_futex: c_long = 98; +pub const SYS_set_robust_list: c_long = 99; +pub const SYS_get_robust_list: c_long = 100; +pub const SYS_nanosleep: c_long = 101; +pub const SYS_getitimer: c_long = 102; +pub const SYS_setitimer: c_long = 103; +pub const SYS_kexec_load: c_long = 104; +pub const SYS_init_module: c_long = 105; +pub const SYS_delete_module: c_long = 106; +pub const SYS_timer_create: c_long = 107; +pub const SYS_timer_gettime: c_long = 108; +pub const SYS_timer_getoverrun: c_long = 109; +pub const SYS_timer_settime: c_long = 110; +pub const SYS_timer_delete: c_long = 111; +pub const SYS_clock_settime: c_long = 112; +pub const SYS_clock_gettime: c_long = 113; +pub const SYS_clock_getres: c_long = 114; +pub const SYS_clock_nanosleep: c_long = 115; +pub const SYS_syslog: c_long = 116; +pub const SYS_ptrace: c_long = 117; +pub const SYS_sched_setparam: c_long = 118; +pub const SYS_sched_setscheduler: c_long = 119; +pub const SYS_sched_getscheduler: c_long = 120; +pub const SYS_sched_getparam: c_long = 121; +pub const SYS_sched_setaffinity: c_long = 122; +pub const SYS_sched_getaffinity: c_long = 123; +pub const SYS_sched_yield: c_long = 124; +pub const SYS_sched_get_priority_max: c_long = 125; +pub const SYS_sched_get_priority_min: c_long = 126; +pub const SYS_sched_rr_get_interval: c_long = 127; +pub const SYS_restart_syscall: c_long = 128; +pub const SYS_kill: c_long = 129; +pub const SYS_tkill: c_long = 130; +pub const SYS_tgkill: c_long = 131; +pub const SYS_sigaltstack: c_long = 132; +pub const SYS_rt_sigsuspend: c_long = 133; +pub const SYS_rt_sigaction: c_long = 134; +pub const SYS_rt_sigprocmask: c_long = 135; +pub const SYS_rt_sigpending: c_long = 136; +pub const SYS_rt_sigtimedwait: c_long = 137; +pub const SYS_rt_sigqueueinfo: c_long = 138; +pub const SYS_rt_sigreturn: c_long = 139; +pub const SYS_setpriority: c_long = 140; +pub const SYS_getpriority: c_long = 141; +pub const SYS_reboot: c_long = 142; +pub const SYS_setregid: c_long = 143; +pub const SYS_setgid: c_long = 144; +pub const SYS_setreuid: c_long = 145; +pub const SYS_setuid: c_long = 146; +pub const SYS_setresuid: c_long = 147; +pub const SYS_getresuid: c_long = 148; +pub const SYS_setresgid: c_long = 149; +pub const SYS_getresgid: c_long = 150; +pub const SYS_setfsuid: c_long = 151; +pub const SYS_setfsgid: c_long = 152; +pub const SYS_times: c_long = 153; +pub const SYS_setpgid: c_long = 154; +pub const SYS_getpgid: c_long = 155; +pub const SYS_getsid: c_long = 156; +pub const SYS_setsid: c_long = 157; +pub const SYS_getgroups: c_long = 158; +pub const SYS_setgroups: c_long = 159; +pub const SYS_uname: c_long = 160; +pub const SYS_sethostname: c_long = 161; +pub const SYS_setdomainname: c_long = 162; +pub const SYS_getrlimit: c_long = 163; +pub const SYS_setrlimit: c_long = 164; +pub const SYS_getrusage: c_long = 165; +pub const SYS_umask: c_long = 166; +pub const SYS_prctl: c_long = 167; +pub const SYS_getcpu: c_long = 168; +pub const SYS_gettimeofday: c_long = 169; +pub const SYS_settimeofday: c_long = 170; +pub const SYS_adjtimex: c_long = 171; +pub const SYS_getpid: c_long = 172; +pub const SYS_getppid: c_long = 173; +pub const SYS_getuid: c_long = 174; +pub const SYS_geteuid: c_long = 175; +pub const SYS_getgid: c_long = 176; +pub const SYS_getegid: c_long = 177; +pub const SYS_gettid: c_long = 178; +pub const SYS_sysinfo: c_long = 179; +pub const SYS_mq_open: c_long = 180; +pub const SYS_mq_unlink: c_long = 181; +pub const SYS_mq_timedsend: c_long = 182; +pub const SYS_mq_timedreceive: c_long = 183; +pub const SYS_mq_notify: c_long = 184; +pub const SYS_mq_getsetattr: c_long = 185; +pub const SYS_msgget: c_long = 186; +pub const SYS_msgctl: c_long = 187; +pub const SYS_msgrcv: c_long = 188; +pub const SYS_msgsnd: c_long = 189; +pub const SYS_semget: c_long = 190; +pub const SYS_semctl: c_long = 191; +pub const SYS_semtimedop: c_long = 192; +pub const SYS_semop: c_long = 193; +pub const SYS_shmget: c_long = 194; +pub const SYS_shmctl: c_long = 195; +pub const SYS_shmat: c_long = 196; +pub const SYS_shmdt: c_long = 197; +pub const SYS_socket: c_long = 198; +pub const SYS_socketpair: c_long = 199; +pub const SYS_bind: c_long = 200; +pub const SYS_listen: c_long = 201; +pub const SYS_accept: c_long = 202; +pub const SYS_connect: c_long = 203; +pub const SYS_getsockname: c_long = 204; +pub const SYS_getpeername: c_long = 205; +pub const SYS_sendto: c_long = 206; +pub const SYS_recvfrom: c_long = 207; +pub const SYS_setsockopt: c_long = 208; +pub const SYS_getsockopt: c_long = 209; +pub const SYS_shutdown: c_long = 210; +pub const SYS_sendmsg: c_long = 211; +pub const SYS_recvmsg: c_long = 212; +pub const SYS_readahead: c_long = 213; +pub const SYS_brk: c_long = 214; +pub const SYS_munmap: c_long = 215; +pub const SYS_mremap: c_long = 216; +pub const SYS_add_key: c_long = 217; +pub const SYS_request_key: c_long = 218; +pub const SYS_keyctl: c_long = 219; +pub const SYS_clone: c_long = 220; +pub const SYS_execve: c_long = 221; +pub const SYS_mmap: c_long = 222; +pub const SYS_swapon: c_long = 224; +pub const SYS_swapoff: c_long = 225; +pub const SYS_mprotect: c_long = 226; +pub const SYS_msync: c_long = 227; +pub const SYS_mlock: c_long = 228; +pub const SYS_munlock: c_long = 229; +pub const SYS_mlockall: c_long = 230; +pub const SYS_munlockall: c_long = 231; +pub const SYS_mincore: c_long = 232; +pub const SYS_madvise: c_long = 233; +pub const SYS_remap_file_pages: c_long = 234; +pub const SYS_mbind: c_long = 235; +pub const SYS_get_mempolicy: c_long = 236; +pub const SYS_set_mempolicy: c_long = 237; +pub const SYS_migrate_pages: c_long = 238; +pub const SYS_move_pages: c_long = 239; +pub const SYS_rt_tgsigqueueinfo: c_long = 240; +pub const SYS_perf_event_open: c_long = 241; +pub const SYS_accept4: c_long = 242; +pub const SYS_recvmmsg: c_long = 243; +pub const SYS_wait4: c_long = 260; +pub const SYS_prlimit64: c_long = 261; +pub const SYS_fanotify_init: c_long = 262; +pub const SYS_fanotify_mark: c_long = 263; +pub const SYS_name_to_handle_at: c_long = 264; +pub const SYS_open_by_handle_at: c_long = 265; +pub const SYS_clock_adjtime: c_long = 266; +pub const SYS_syncfs: c_long = 267; +pub const SYS_setns: c_long = 268; +pub const SYS_sendmmsg: c_long = 269; +pub const SYS_process_vm_readv: c_long = 270; +pub const SYS_process_vm_writev: c_long = 271; +pub const SYS_kcmp: c_long = 272; +pub const SYS_finit_module: c_long = 273; +pub const SYS_sched_setattr: c_long = 274; +pub const SYS_sched_getattr: c_long = 275; +pub const SYS_renameat2: c_long = 276; +pub const SYS_seccomp: c_long = 277; +pub const SYS_getrandom: c_long = 278; +pub const SYS_memfd_create: c_long = 279; +pub const SYS_bpf: c_long = 280; +pub const SYS_execveat: c_long = 281; +pub const SYS_userfaultfd: c_long = 282; +pub const SYS_membarrier: c_long = 283; +pub const SYS_mlock2: c_long = 284; +pub const SYS_copy_file_range: c_long = 285; +pub const SYS_preadv2: c_long = 286; +pub const SYS_pwritev2: c_long = 287; +pub const SYS_pkey_mprotect: c_long = 288; +pub const SYS_pkey_alloc: c_long = 289; +pub const SYS_pkey_free: c_long = 290; +pub const SYS_statx: c_long = 291; +pub const SYS_io_pgetevents: c_long = 292; +pub const SYS_rseq: c_long = 293; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; +pub const SYS_mseal: c_long = 462; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: c_int = 0x00000800; +pub const TAB2: c_int = 0x00001000; +pub const TAB3: c_int = 0x00001800; +pub const CR1: c_int = 0x00000200; +pub const CR2: c_int = 0x00000400; +pub const CR3: c_int = 0x00000600; +pub const FF1: c_int = 0x00008000; +pub const BS1: c_int = 0x00002000; +pub const VT1: c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const EDEADLK: c_int = 35; +pub const EDEADLOCK: c_int = EDEADLK; + +pub const EXTPROC: crate::tcflag_t = 0x00010000; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/loongarch64/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/loongarch64/mod.rs new file mode 100644 index 00000000000000..e014fbf48c0dae --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b64/loongarch64/mod.rs @@ -0,0 +1,667 @@ +//! LoongArch-specific definitions for 64-bit linux-like values + +use crate::prelude::*; +use crate::{off64_t, off_t}; + +pub type wchar_t = c_int; + +pub type nlink_t = c_uint; +pub type blksize_t = c_int; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad1: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + __pad2: c_int, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_int; 2usize], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub __pad1: crate::dev_t, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + pub __pad2: c_int, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_int; 2], + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_uint, + pub __seq: c_int, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct user_regs_struct { + pub regs: [u64; 32], + pub orig_a0: u64, + pub csr_era: u64, + pub csr_badv: u64, + pub reserved: [u64; 10], + } + + pub struct user_fp_struct { + pub fpr: [u64; 32], + pub fcc: u64, + pub fcsr: u32, + } + + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_sigmask: crate::sigset_t, + pub uc_mcontext: mcontext_t, + } + + #[repr(align(16))] + pub struct mcontext_t { + pub __pc: c_ulong, + pub __gregs: [c_ulong; 32], + pub __flags: c_uint, + pub __extcontext: [c_ulong; 0], + } + + #[repr(align(8))] + pub struct clone_args { + pub flags: c_ulonglong, + pub pidfd: c_ulonglong, + pub child_tid: c_ulonglong, + pub parent_tid: c_ulonglong, + pub exit_signal: c_ulonglong, + pub stack: c_ulonglong, + pub stack_size: c_ulonglong, + pub tls: c_ulonglong, + pub set_tid: c_ulonglong, + pub set_tid_size: c_ulonglong, + pub cgroup: c_ulonglong, + } +} + +s_no_extra_traits! { + #[repr(align(16))] + pub struct max_align_t { + priv_: [f64; 4], + } +} + +pub const SYS_io_setup: c_long = 0; +pub const SYS_io_destroy: c_long = 1; +pub const SYS_io_submit: c_long = 2; +pub const SYS_io_cancel: c_long = 3; +pub const SYS_io_getevents: c_long = 4; +pub const SYS_setxattr: c_long = 5; +pub const SYS_lsetxattr: c_long = 6; +pub const SYS_fsetxattr: c_long = 7; +pub const SYS_getxattr: c_long = 8; +pub const SYS_lgetxattr: c_long = 9; +pub const SYS_fgetxattr: c_long = 10; +pub const SYS_listxattr: c_long = 11; +pub const SYS_llistxattr: c_long = 12; +pub const SYS_flistxattr: c_long = 13; +pub const SYS_removexattr: c_long = 14; +pub const SYS_lremovexattr: c_long = 15; +pub const SYS_fremovexattr: c_long = 16; +pub const SYS_getcwd: c_long = 17; +pub const SYS_lookup_dcookie: c_long = 18; +pub const SYS_eventfd2: c_long = 19; +pub const SYS_epoll_create1: c_long = 20; +pub const SYS_epoll_ctl: c_long = 21; +pub const SYS_epoll_pwait: c_long = 22; +pub const SYS_dup: c_long = 23; +pub const SYS_dup3: c_long = 24; +pub const SYS_fcntl: c_long = 25; +pub const SYS_inotify_init1: c_long = 26; +pub const SYS_inotify_add_watch: c_long = 27; +pub const SYS_inotify_rm_watch: c_long = 28; +pub const SYS_ioctl: c_long = 29; +pub const SYS_ioprio_set: c_long = 30; +pub const SYS_ioprio_get: c_long = 31; +pub const SYS_flock: c_long = 32; +pub const SYS_mknodat: c_long = 33; +pub const SYS_mkdirat: c_long = 34; +pub const SYS_unlinkat: c_long = 35; +pub const SYS_symlinkat: c_long = 36; +pub const SYS_linkat: c_long = 37; +pub const SYS_umount2: c_long = 39; +pub const SYS_mount: c_long = 40; +pub const SYS_pivot_root: c_long = 41; +pub const SYS_nfsservctl: c_long = 42; +pub const SYS_statfs: c_long = 43; +pub const SYS_fstatfs: c_long = 44; +pub const SYS_truncate: c_long = 45; +pub const SYS_ftruncate: c_long = 46; +pub const SYS_fallocate: c_long = 47; +pub const SYS_faccessat: c_long = 48; +pub const SYS_chdir: c_long = 49; +pub const SYS_fchdir: c_long = 50; +pub const SYS_chroot: c_long = 51; +pub const SYS_fchmod: c_long = 52; +pub const SYS_fchmodat: c_long = 53; +pub const SYS_fchownat: c_long = 54; +pub const SYS_fchown: c_long = 55; +pub const SYS_openat: c_long = 56; +pub const SYS_close: c_long = 57; +pub const SYS_vhangup: c_long = 58; +pub const SYS_pipe2: c_long = 59; +pub const SYS_quotactl: c_long = 60; +pub const SYS_getdents64: c_long = 61; +pub const SYS_lseek: c_long = 62; +pub const SYS_read: c_long = 63; +pub const SYS_write: c_long = 64; +pub const SYS_readv: c_long = 65; +pub const SYS_writev: c_long = 66; +pub const SYS_pread64: c_long = 67; +pub const SYS_pwrite64: c_long = 68; +pub const SYS_preadv: c_long = 69; +pub const SYS_pwritev: c_long = 70; +pub const SYS_sendfile: c_long = 71; +pub const SYS_pselect6: c_long = 72; +pub const SYS_ppoll: c_long = 73; +pub const SYS_signalfd4: c_long = 74; +pub const SYS_vmsplice: c_long = 75; +pub const SYS_splice: c_long = 76; +pub const SYS_tee: c_long = 77; +pub const SYS_readlinkat: c_long = 78; +pub const SYS_sync: c_long = 81; +pub const SYS_fsync: c_long = 82; +pub const SYS_fdatasync: c_long = 83; +pub const SYS_sync_file_range: c_long = 84; +pub const SYS_timerfd_create: c_long = 85; +pub const SYS_timerfd_settime: c_long = 86; +pub const SYS_timerfd_gettime: c_long = 87; +pub const SYS_utimensat: c_long = 88; +pub const SYS_acct: c_long = 89; +pub const SYS_capget: c_long = 90; +pub const SYS_capset: c_long = 91; +pub const SYS_personality: c_long = 92; +pub const SYS_exit: c_long = 93; +pub const SYS_exit_group: c_long = 94; +pub const SYS_waitid: c_long = 95; +pub const SYS_set_tid_address: c_long = 96; +pub const SYS_unshare: c_long = 97; +pub const SYS_futex: c_long = 98; +pub const SYS_set_robust_list: c_long = 99; +pub const SYS_get_robust_list: c_long = 100; +pub const SYS_nanosleep: c_long = 101; +pub const SYS_getitimer: c_long = 102; +pub const SYS_setitimer: c_long = 103; +pub const SYS_kexec_load: c_long = 104; +pub const SYS_init_module: c_long = 105; +pub const SYS_delete_module: c_long = 106; +pub const SYS_timer_create: c_long = 107; +pub const SYS_timer_gettime: c_long = 108; +pub const SYS_timer_getoverrun: c_long = 109; +pub const SYS_timer_settime: c_long = 110; +pub const SYS_timer_delete: c_long = 111; +pub const SYS_clock_settime: c_long = 112; +pub const SYS_clock_gettime: c_long = 113; +pub const SYS_clock_getres: c_long = 114; +pub const SYS_clock_nanosleep: c_long = 115; +pub const SYS_syslog: c_long = 116; +pub const SYS_ptrace: c_long = 117; +pub const SYS_sched_setparam: c_long = 118; +pub const SYS_sched_setscheduler: c_long = 119; +pub const SYS_sched_getscheduler: c_long = 120; +pub const SYS_sched_getparam: c_long = 121; +pub const SYS_sched_setaffinity: c_long = 122; +pub const SYS_sched_getaffinity: c_long = 123; +pub const SYS_sched_yield: c_long = 124; +pub const SYS_sched_get_priority_max: c_long = 125; +pub const SYS_sched_get_priority_min: c_long = 126; +pub const SYS_sched_rr_get_interval: c_long = 127; +pub const SYS_restart_syscall: c_long = 128; +pub const SYS_kill: c_long = 129; +pub const SYS_tkill: c_long = 130; +pub const SYS_tgkill: c_long = 131; +pub const SYS_sigaltstack: c_long = 132; +pub const SYS_rt_sigsuspend: c_long = 133; +pub const SYS_rt_sigaction: c_long = 134; +pub const SYS_rt_sigprocmask: c_long = 135; +pub const SYS_rt_sigpending: c_long = 136; +pub const SYS_rt_sigtimedwait: c_long = 137; +pub const SYS_rt_sigqueueinfo: c_long = 138; +pub const SYS_rt_sigreturn: c_long = 139; +pub const SYS_setpriority: c_long = 140; +pub const SYS_getpriority: c_long = 141; +pub const SYS_reboot: c_long = 142; +pub const SYS_setregid: c_long = 143; +pub const SYS_setgid: c_long = 144; +pub const SYS_setreuid: c_long = 145; +pub const SYS_setuid: c_long = 146; +pub const SYS_setresuid: c_long = 147; +pub const SYS_getresuid: c_long = 148; +pub const SYS_setresgid: c_long = 149; +pub const SYS_getresgid: c_long = 150; +pub const SYS_setfsuid: c_long = 151; +pub const SYS_setfsgid: c_long = 152; +pub const SYS_times: c_long = 153; +pub const SYS_setpgid: c_long = 154; +pub const SYS_getpgid: c_long = 155; +pub const SYS_getsid: c_long = 156; +pub const SYS_setsid: c_long = 157; +pub const SYS_getgroups: c_long = 158; +pub const SYS_setgroups: c_long = 159; +pub const SYS_uname: c_long = 160; +pub const SYS_sethostname: c_long = 161; +pub const SYS_setdomainname: c_long = 162; +pub const SYS_getrusage: c_long = 165; +pub const SYS_umask: c_long = 166; +pub const SYS_prctl: c_long = 167; +pub const SYS_getcpu: c_long = 168; +pub const SYS_gettimeofday: c_long = 169; +pub const SYS_settimeofday: c_long = 170; +pub const SYS_adjtimex: c_long = 171; +pub const SYS_getpid: c_long = 172; +pub const SYS_getppid: c_long = 173; +pub const SYS_getuid: c_long = 174; +pub const SYS_geteuid: c_long = 175; +pub const SYS_getgid: c_long = 176; +pub const SYS_getegid: c_long = 177; +pub const SYS_gettid: c_long = 178; +pub const SYS_sysinfo: c_long = 179; +pub const SYS_mq_open: c_long = 180; +pub const SYS_mq_unlink: c_long = 181; +pub const SYS_mq_timedsend: c_long = 182; +pub const SYS_mq_timedreceive: c_long = 183; +pub const SYS_mq_notify: c_long = 184; +pub const SYS_mq_getsetattr: c_long = 185; +pub const SYS_msgget: c_long = 186; +pub const SYS_msgctl: c_long = 187; +pub const SYS_msgrcv: c_long = 188; +pub const SYS_msgsnd: c_long = 189; +pub const SYS_semget: c_long = 190; +pub const SYS_semctl: c_long = 191; +pub const SYS_semtimedop: c_long = 192; +pub const SYS_semop: c_long = 193; +pub const SYS_shmget: c_long = 194; +pub const SYS_shmctl: c_long = 195; +pub const SYS_shmat: c_long = 196; +pub const SYS_shmdt: c_long = 197; +pub const SYS_socket: c_long = 198; +pub const SYS_socketpair: c_long = 199; +pub const SYS_bind: c_long = 200; +pub const SYS_listen: c_long = 201; +pub const SYS_accept: c_long = 202; +pub const SYS_connect: c_long = 203; +pub const SYS_getsockname: c_long = 204; +pub const SYS_getpeername: c_long = 205; +pub const SYS_sendto: c_long = 206; +pub const SYS_recvfrom: c_long = 207; +pub const SYS_setsockopt: c_long = 208; +pub const SYS_getsockopt: c_long = 209; +pub const SYS_shutdown: c_long = 210; +pub const SYS_sendmsg: c_long = 211; +pub const SYS_recvmsg: c_long = 212; +pub const SYS_readahead: c_long = 213; +pub const SYS_brk: c_long = 214; +pub const SYS_munmap: c_long = 215; +pub const SYS_mremap: c_long = 216; +pub const SYS_add_key: c_long = 217; +pub const SYS_request_key: c_long = 218; +pub const SYS_keyctl: c_long = 219; +pub const SYS_clone: c_long = 220; +pub const SYS_execve: c_long = 221; +pub const SYS_mmap: c_long = 222; +pub const SYS_fadvise64: c_long = 223; +pub const SYS_swapon: c_long = 224; +pub const SYS_swapoff: c_long = 225; +pub const SYS_mprotect: c_long = 226; +pub const SYS_msync: c_long = 227; +pub const SYS_mlock: c_long = 228; +pub const SYS_munlock: c_long = 229; +pub const SYS_mlockall: c_long = 230; +pub const SYS_munlockall: c_long = 231; +pub const SYS_mincore: c_long = 232; +pub const SYS_madvise: c_long = 233; +pub const SYS_remap_file_pages: c_long = 234; +pub const SYS_mbind: c_long = 235; +pub const SYS_get_mempolicy: c_long = 236; +pub const SYS_set_mempolicy: c_long = 237; +pub const SYS_migrate_pages: c_long = 238; +pub const SYS_move_pages: c_long = 239; +pub const SYS_rt_tgsigqueueinfo: c_long = 240; +pub const SYS_perf_event_open: c_long = 241; +pub const SYS_accept4: c_long = 242; +pub const SYS_recvmmsg: c_long = 243; +pub const SYS_arch_specific_syscall: c_long = 244; +pub const SYS_wait4: c_long = 260; +pub const SYS_prlimit64: c_long = 261; +pub const SYS_fanotify_init: c_long = 262; +pub const SYS_fanotify_mark: c_long = 263; +pub const SYS_name_to_handle_at: c_long = 264; +pub const SYS_open_by_handle_at: c_long = 265; +pub const SYS_clock_adjtime: c_long = 266; +pub const SYS_syncfs: c_long = 267; +pub const SYS_setns: c_long = 268; +pub const SYS_sendmmsg: c_long = 269; +pub const SYS_process_vm_readv: c_long = 270; +pub const SYS_process_vm_writev: c_long = 271; +pub const SYS_kcmp: c_long = 272; +pub const SYS_finit_module: c_long = 273; +pub const SYS_sched_setattr: c_long = 274; +pub const SYS_sched_getattr: c_long = 275; +pub const SYS_renameat2: c_long = 276; +pub const SYS_seccomp: c_long = 277; +pub const SYS_getrandom: c_long = 278; +pub const SYS_memfd_create: c_long = 279; +pub const SYS_bpf: c_long = 280; +pub const SYS_execveat: c_long = 281; +pub const SYS_userfaultfd: c_long = 282; +pub const SYS_membarrier: c_long = 283; +pub const SYS_mlock2: c_long = 284; +pub const SYS_copy_file_range: c_long = 285; +pub const SYS_preadv2: c_long = 286; +pub const SYS_pwritev2: c_long = 287; +pub const SYS_pkey_mprotect: c_long = 288; +pub const SYS_pkey_alloc: c_long = 289; +pub const SYS_pkey_free: c_long = 290; +pub const SYS_statx: c_long = 291; +pub const SYS_io_pgetevents: c_long = 292; +pub const SYS_rseq: c_long = 293; +pub const SYS_kexec_file_load: c_long = 294; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; +pub const SYS_cachestat: c_long = 451; +pub const SYS_fchmodat2: c_long = 452; +pub const SYS_map_shadow_stack: c_long = 453; +pub const SYS_futex_wake: c_long = 454; +pub const SYS_futex_wait: c_long = 455; +pub const SYS_futex_requeue: c_long = 456; + +pub const O_APPEND: c_int = 1024; +pub const O_DIRECT: c_int = 0x4000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_LARGEFILE: c_int = 0o0100000; +pub const O_NOFOLLOW: c_int = 0x20000; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_ASYNC: c_int = 0o20000; + +pub const SIGSTKSZ: size_t = 16384; +pub const MINSIGSTKSZ: size_t = 4096; + +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const EHWPOISON: c_int = 133; +pub const ERFKILL: c_int = 132; + +pub const MADV_SOFT_OFFLINE: c_int = 101; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0; +pub const SIG_UNBLOCK: c_int = 1; + +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_SETOWN: c_int = 8; + +pub const VEOF: usize = 4; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_SYNC: c_int = 0x080000; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: c_int = 0x00000800; +pub const TAB2: c_int = 0x00001000; +pub const TAB3: c_int = 0x00001800; +pub const CR1: c_int = 0x00000200; +pub const CR2: c_int = 0x00000400; +pub const CR3: c_int = 0x00000600; +pub const FF1: c_int = 0x00008000; +pub const BS1: c_int = 0x00002000; +pub const VT1: c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const XCASE: crate::tcflag_t = 0x00000004; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const EDEADLK: c_int = 35; +pub const EDEADLOCK: c_int = EDEADLK; +pub const EXTPROC: crate::tcflag_t = 0x00010000; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/mips64.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/mips64.rs new file mode 100644 index 00000000000000..95dd37c8898042 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b64/mips64.rs @@ -0,0 +1,708 @@ +use crate::off_t; +use crate::prelude::*; + +pub type wchar_t = i32; +pub type __u64 = c_ulong; +pub type __s64 = c_long; +pub type nlink_t = c_uint; +pub type blksize_t = i64; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + __pad1: [c_int; 3], + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad2: [c_uint; 2], + pub st_size: off_t, + __pad3: c_int, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: crate::blksize_t, + __pad4: c_uint, + pub st_blocks: crate::blkcnt_t, + __pad5: [c_int; 14], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + __pad1: [c_int; 3], + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + __pad2: [c_uint; 2], + pub st_size: off_t, + __pad3: c_int, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: crate::blksize_t, + __pad4: c_uint, + pub st_blocks: crate::blkcnt_t, + __pad5: [c_int; 14], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } + + pub struct ipc_perm { + #[cfg(musl_v1_2_3)] + pub __key: crate::key_t, + #[cfg(not(musl_v1_2_3))] + #[deprecated( + since = "0.2.173", + note = "This field is incorrectly named and will be changed + to __key in a future release." + )] + pub __ipc_perm_key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + pub __seq: c_int, + __pad1: c_int, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct statfs { + pub f_type: c_ulong, + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_ulong, + pub f_flags: c_ulong, + pub f_spare: [c_ulong; 5], + } + + pub struct statfs64 { + pub f_type: c_ulong, + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt64_t, + pub f_bfree: crate::fsblkcnt64_t, + pub f_files: crate::fsfilcnt64_t, + pub f_ffree: crate::fsfilcnt64_t, + pub f_bavail: crate::fsblkcnt64_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_ulong, + pub f_flags: c_ulong, + pub f_spare: [c_ulong; 5], + } +} + +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; + +pub const SYS_read: c_long = 5000 + 0; +pub const SYS_write: c_long = 5000 + 1; +pub const SYS_open: c_long = 5000 + 2; +pub const SYS_close: c_long = 5000 + 3; +pub const SYS_stat: c_long = 5000 + 4; +pub const SYS_fstat: c_long = 5000 + 5; +pub const SYS_lstat: c_long = 5000 + 6; +pub const SYS_poll: c_long = 5000 + 7; +pub const SYS_lseek: c_long = 5000 + 8; +pub const SYS_mmap: c_long = 5000 + 9; +pub const SYS_mprotect: c_long = 5000 + 10; +pub const SYS_munmap: c_long = 5000 + 11; +pub const SYS_brk: c_long = 5000 + 12; +pub const SYS_rt_sigaction: c_long = 5000 + 13; +pub const SYS_rt_sigprocmask: c_long = 5000 + 14; +pub const SYS_ioctl: c_long = 5000 + 15; +pub const SYS_pread64: c_long = 5000 + 16; +pub const SYS_pwrite64: c_long = 5000 + 17; +pub const SYS_readv: c_long = 5000 + 18; +pub const SYS_writev: c_long = 5000 + 19; +pub const SYS_access: c_long = 5000 + 20; +pub const SYS_pipe: c_long = 5000 + 21; +pub const SYS__newselect: c_long = 5000 + 22; +pub const SYS_sched_yield: c_long = 5000 + 23; +pub const SYS_mremap: c_long = 5000 + 24; +pub const SYS_msync: c_long = 5000 + 25; +pub const SYS_mincore: c_long = 5000 + 26; +pub const SYS_madvise: c_long = 5000 + 27; +pub const SYS_shmget: c_long = 5000 + 28; +pub const SYS_shmat: c_long = 5000 + 29; +pub const SYS_shmctl: c_long = 5000 + 30; +pub const SYS_dup: c_long = 5000 + 31; +pub const SYS_dup2: c_long = 5000 + 32; +pub const SYS_pause: c_long = 5000 + 33; +pub const SYS_nanosleep: c_long = 5000 + 34; +pub const SYS_getitimer: c_long = 5000 + 35; +pub const SYS_setitimer: c_long = 5000 + 36; +pub const SYS_alarm: c_long = 5000 + 37; +pub const SYS_getpid: c_long = 5000 + 38; +pub const SYS_sendfile: c_long = 5000 + 39; +pub const SYS_socket: c_long = 5000 + 40; +pub const SYS_connect: c_long = 5000 + 41; +pub const SYS_accept: c_long = 5000 + 42; +pub const SYS_sendto: c_long = 5000 + 43; +pub const SYS_recvfrom: c_long = 5000 + 44; +pub const SYS_sendmsg: c_long = 5000 + 45; +pub const SYS_recvmsg: c_long = 5000 + 46; +pub const SYS_shutdown: c_long = 5000 + 47; +pub const SYS_bind: c_long = 5000 + 48; +pub const SYS_listen: c_long = 5000 + 49; +pub const SYS_getsockname: c_long = 5000 + 50; +pub const SYS_getpeername: c_long = 5000 + 51; +pub const SYS_socketpair: c_long = 5000 + 52; +pub const SYS_setsockopt: c_long = 5000 + 53; +pub const SYS_getsockopt: c_long = 5000 + 54; +pub const SYS_clone: c_long = 5000 + 55; +pub const SYS_fork: c_long = 5000 + 56; +pub const SYS_execve: c_long = 5000 + 57; +pub const SYS_exit: c_long = 5000 + 58; +pub const SYS_wait4: c_long = 5000 + 59; +pub const SYS_kill: c_long = 5000 + 60; +pub const SYS_uname: c_long = 5000 + 61; +pub const SYS_semget: c_long = 5000 + 62; +pub const SYS_semop: c_long = 5000 + 63; +pub const SYS_semctl: c_long = 5000 + 64; +pub const SYS_shmdt: c_long = 5000 + 65; +pub const SYS_msgget: c_long = 5000 + 66; +pub const SYS_msgsnd: c_long = 5000 + 67; +pub const SYS_msgrcv: c_long = 5000 + 68; +pub const SYS_msgctl: c_long = 5000 + 69; +pub const SYS_fcntl: c_long = 5000 + 70; +pub const SYS_flock: c_long = 5000 + 71; +pub const SYS_fsync: c_long = 5000 + 72; +pub const SYS_fdatasync: c_long = 5000 + 73; +pub const SYS_truncate: c_long = 5000 + 74; +pub const SYS_ftruncate: c_long = 5000 + 75; +pub const SYS_getdents: c_long = 5000 + 76; +pub const SYS_getcwd: c_long = 5000 + 77; +pub const SYS_chdir: c_long = 5000 + 78; +pub const SYS_fchdir: c_long = 5000 + 79; +pub const SYS_rename: c_long = 5000 + 80; +pub const SYS_mkdir: c_long = 5000 + 81; +pub const SYS_rmdir: c_long = 5000 + 82; +pub const SYS_creat: c_long = 5000 + 83; +pub const SYS_link: c_long = 5000 + 84; +pub const SYS_unlink: c_long = 5000 + 85; +pub const SYS_symlink: c_long = 5000 + 86; +pub const SYS_readlink: c_long = 5000 + 87; +pub const SYS_chmod: c_long = 5000 + 88; +pub const SYS_fchmod: c_long = 5000 + 89; +pub const SYS_chown: c_long = 5000 + 90; +pub const SYS_fchown: c_long = 5000 + 91; +pub const SYS_lchown: c_long = 5000 + 92; +pub const SYS_umask: c_long = 5000 + 93; +pub const SYS_gettimeofday: c_long = 5000 + 94; +pub const SYS_getrlimit: c_long = 5000 + 95; +pub const SYS_getrusage: c_long = 5000 + 96; +pub const SYS_sysinfo: c_long = 5000 + 97; +pub const SYS_times: c_long = 5000 + 98; +pub const SYS_ptrace: c_long = 5000 + 99; +pub const SYS_getuid: c_long = 5000 + 100; +pub const SYS_syslog: c_long = 5000 + 101; +pub const SYS_getgid: c_long = 5000 + 102; +pub const SYS_setuid: c_long = 5000 + 103; +pub const SYS_setgid: c_long = 5000 + 104; +pub const SYS_geteuid: c_long = 5000 + 105; +pub const SYS_getegid: c_long = 5000 + 106; +pub const SYS_setpgid: c_long = 5000 + 107; +pub const SYS_getppid: c_long = 5000 + 108; +pub const SYS_getpgrp: c_long = 5000 + 109; +pub const SYS_setsid: c_long = 5000 + 110; +pub const SYS_setreuid: c_long = 5000 + 111; +pub const SYS_setregid: c_long = 5000 + 112; +pub const SYS_getgroups: c_long = 5000 + 113; +pub const SYS_setgroups: c_long = 5000 + 114; +pub const SYS_setresuid: c_long = 5000 + 115; +pub const SYS_getresuid: c_long = 5000 + 116; +pub const SYS_setresgid: c_long = 5000 + 117; +pub const SYS_getresgid: c_long = 5000 + 118; +pub const SYS_getpgid: c_long = 5000 + 119; +pub const SYS_setfsuid: c_long = 5000 + 120; +pub const SYS_setfsgid: c_long = 5000 + 121; +pub const SYS_getsid: c_long = 5000 + 122; +pub const SYS_capget: c_long = 5000 + 123; +pub const SYS_capset: c_long = 5000 + 124; +pub const SYS_rt_sigpending: c_long = 5000 + 125; +pub const SYS_rt_sigtimedwait: c_long = 5000 + 126; +pub const SYS_rt_sigqueueinfo: c_long = 5000 + 127; +pub const SYS_rt_sigsuspend: c_long = 5000 + 128; +pub const SYS_sigaltstack: c_long = 5000 + 129; +pub const SYS_utime: c_long = 5000 + 130; +pub const SYS_mknod: c_long = 5000 + 131; +pub const SYS_personality: c_long = 5000 + 132; +pub const SYS_ustat: c_long = 5000 + 133; +pub const SYS_statfs: c_long = 5000 + 134; +pub const SYS_fstatfs: c_long = 5000 + 135; +pub const SYS_sysfs: c_long = 5000 + 136; +pub const SYS_getpriority: c_long = 5000 + 137; +pub const SYS_setpriority: c_long = 5000 + 138; +pub const SYS_sched_setparam: c_long = 5000 + 139; +pub const SYS_sched_getparam: c_long = 5000 + 140; +pub const SYS_sched_setscheduler: c_long = 5000 + 141; +pub const SYS_sched_getscheduler: c_long = 5000 + 142; +pub const SYS_sched_get_priority_max: c_long = 5000 + 143; +pub const SYS_sched_get_priority_min: c_long = 5000 + 144; +pub const SYS_sched_rr_get_interval: c_long = 5000 + 145; +pub const SYS_mlock: c_long = 5000 + 146; +pub const SYS_munlock: c_long = 5000 + 147; +pub const SYS_mlockall: c_long = 5000 + 148; +pub const SYS_munlockall: c_long = 5000 + 149; +pub const SYS_vhangup: c_long = 5000 + 150; +pub const SYS_pivot_root: c_long = 5000 + 151; +pub const SYS__sysctl: c_long = 5000 + 152; +pub const SYS_prctl: c_long = 5000 + 153; +pub const SYS_adjtimex: c_long = 5000 + 154; +pub const SYS_setrlimit: c_long = 5000 + 155; +pub const SYS_chroot: c_long = 5000 + 156; +pub const SYS_sync: c_long = 5000 + 157; +pub const SYS_acct: c_long = 5000 + 158; +pub const SYS_settimeofday: c_long = 5000 + 159; +pub const SYS_mount: c_long = 5000 + 160; +pub const SYS_umount2: c_long = 5000 + 161; +pub const SYS_swapon: c_long = 5000 + 162; +pub const SYS_swapoff: c_long = 5000 + 163; +pub const SYS_reboot: c_long = 5000 + 164; +pub const SYS_sethostname: c_long = 5000 + 165; +pub const SYS_setdomainname: c_long = 5000 + 166; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 5000 + 167; +pub const SYS_init_module: c_long = 5000 + 168; +pub const SYS_delete_module: c_long = 5000 + 169; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 5000 + 170; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 5000 + 171; +pub const SYS_quotactl: c_long = 5000 + 172; +pub const SYS_nfsservctl: c_long = 5000 + 173; +pub const SYS_getpmsg: c_long = 5000 + 174; +pub const SYS_putpmsg: c_long = 5000 + 175; +pub const SYS_afs_syscall: c_long = 5000 + 176; +pub const SYS_gettid: c_long = 5000 + 178; +pub const SYS_readahead: c_long = 5000 + 179; +pub const SYS_setxattr: c_long = 5000 + 180; +pub const SYS_lsetxattr: c_long = 5000 + 181; +pub const SYS_fsetxattr: c_long = 5000 + 182; +pub const SYS_getxattr: c_long = 5000 + 183; +pub const SYS_lgetxattr: c_long = 5000 + 184; +pub const SYS_fgetxattr: c_long = 5000 + 185; +pub const SYS_listxattr: c_long = 5000 + 186; +pub const SYS_llistxattr: c_long = 5000 + 187; +pub const SYS_flistxattr: c_long = 5000 + 188; +pub const SYS_removexattr: c_long = 5000 + 189; +pub const SYS_lremovexattr: c_long = 5000 + 190; +pub const SYS_fremovexattr: c_long = 5000 + 191; +pub const SYS_tkill: c_long = 5000 + 192; +pub const SYS_futex: c_long = 5000 + 194; +pub const SYS_sched_setaffinity: c_long = 5000 + 195; +pub const SYS_sched_getaffinity: c_long = 5000 + 196; +pub const SYS_cacheflush: c_long = 5000 + 197; +pub const SYS_cachectl: c_long = 5000 + 198; +pub const SYS_sysmips: c_long = 5000 + 199; +pub const SYS_io_setup: c_long = 5000 + 200; +pub const SYS_io_destroy: c_long = 5000 + 201; +pub const SYS_io_getevents: c_long = 5000 + 202; +pub const SYS_io_submit: c_long = 5000 + 203; +pub const SYS_io_cancel: c_long = 5000 + 204; +pub const SYS_exit_group: c_long = 5000 + 205; +pub const SYS_lookup_dcookie: c_long = 5000 + 206; +pub const SYS_epoll_create: c_long = 5000 + 207; +pub const SYS_epoll_ctl: c_long = 5000 + 208; +pub const SYS_epoll_wait: c_long = 5000 + 209; +pub const SYS_remap_file_pages: c_long = 5000 + 210; +pub const SYS_rt_sigreturn: c_long = 5000 + 211; +pub const SYS_set_tid_address: c_long = 5000 + 212; +pub const SYS_restart_syscall: c_long = 5000 + 213; +pub const SYS_semtimedop: c_long = 5000 + 214; +pub const SYS_fadvise64: c_long = 5000 + 215; +pub const SYS_timer_create: c_long = 5000 + 216; +pub const SYS_timer_settime: c_long = 5000 + 217; +pub const SYS_timer_gettime: c_long = 5000 + 218; +pub const SYS_timer_getoverrun: c_long = 5000 + 219; +pub const SYS_timer_delete: c_long = 5000 + 220; +pub const SYS_clock_settime: c_long = 5000 + 221; +pub const SYS_clock_gettime: c_long = 5000 + 222; +pub const SYS_clock_getres: c_long = 5000 + 223; +pub const SYS_clock_nanosleep: c_long = 5000 + 224; +pub const SYS_tgkill: c_long = 5000 + 225; +pub const SYS_utimes: c_long = 5000 + 226; +pub const SYS_mbind: c_long = 5000 + 227; +pub const SYS_get_mempolicy: c_long = 5000 + 228; +pub const SYS_set_mempolicy: c_long = 5000 + 229; +pub const SYS_mq_open: c_long = 5000 + 230; +pub const SYS_mq_unlink: c_long = 5000 + 231; +pub const SYS_mq_timedsend: c_long = 5000 + 232; +pub const SYS_mq_timedreceive: c_long = 5000 + 233; +pub const SYS_mq_notify: c_long = 5000 + 234; +pub const SYS_mq_getsetattr: c_long = 5000 + 235; +pub const SYS_vserver: c_long = 5000 + 236; +pub const SYS_waitid: c_long = 5000 + 237; +/* pub const SYS_sys_setaltroot: c_long = 5000 + 238; */ +pub const SYS_add_key: c_long = 5000 + 239; +pub const SYS_request_key: c_long = 5000 + 240; +pub const SYS_keyctl: c_long = 5000 + 241; +pub const SYS_set_thread_area: c_long = 5000 + 242; +pub const SYS_inotify_init: c_long = 5000 + 243; +pub const SYS_inotify_add_watch: c_long = 5000 + 244; +pub const SYS_inotify_rm_watch: c_long = 5000 + 245; +pub const SYS_migrate_pages: c_long = 5000 + 246; +pub const SYS_openat: c_long = 5000 + 247; +pub const SYS_mkdirat: c_long = 5000 + 248; +pub const SYS_mknodat: c_long = 5000 + 249; +pub const SYS_fchownat: c_long = 5000 + 250; +pub const SYS_futimesat: c_long = 5000 + 251; +pub const SYS_newfstatat: c_long = 5000 + 252; +pub const SYS_unlinkat: c_long = 5000 + 253; +pub const SYS_renameat: c_long = 5000 + 254; +pub const SYS_linkat: c_long = 5000 + 255; +pub const SYS_symlinkat: c_long = 5000 + 256; +pub const SYS_readlinkat: c_long = 5000 + 257; +pub const SYS_fchmodat: c_long = 5000 + 258; +pub const SYS_faccessat: c_long = 5000 + 259; +pub const SYS_pselect6: c_long = 5000 + 260; +pub const SYS_ppoll: c_long = 5000 + 261; +pub const SYS_unshare: c_long = 5000 + 262; +pub const SYS_splice: c_long = 5000 + 263; +pub const SYS_sync_file_range: c_long = 5000 + 264; +pub const SYS_tee: c_long = 5000 + 265; +pub const SYS_vmsplice: c_long = 5000 + 266; +pub const SYS_move_pages: c_long = 5000 + 267; +pub const SYS_set_robust_list: c_long = 5000 + 268; +pub const SYS_get_robust_list: c_long = 5000 + 269; +pub const SYS_kexec_load: c_long = 5000 + 270; +pub const SYS_getcpu: c_long = 5000 + 271; +pub const SYS_epoll_pwait: c_long = 5000 + 272; +pub const SYS_ioprio_set: c_long = 5000 + 273; +pub const SYS_ioprio_get: c_long = 5000 + 274; +pub const SYS_utimensat: c_long = 5000 + 275; +pub const SYS_signalfd: c_long = 5000 + 276; +pub const SYS_timerfd: c_long = 5000 + 277; +pub const SYS_eventfd: c_long = 5000 + 278; +pub const SYS_fallocate: c_long = 5000 + 279; +pub const SYS_timerfd_create: c_long = 5000 + 280; +pub const SYS_timerfd_gettime: c_long = 5000 + 281; +pub const SYS_timerfd_settime: c_long = 5000 + 282; +pub const SYS_signalfd4: c_long = 5000 + 283; +pub const SYS_eventfd2: c_long = 5000 + 284; +pub const SYS_epoll_create1: c_long = 5000 + 285; +pub const SYS_dup3: c_long = 5000 + 286; +pub const SYS_pipe2: c_long = 5000 + 287; +pub const SYS_inotify_init1: c_long = 5000 + 288; +pub const SYS_preadv: c_long = 5000 + 289; +pub const SYS_pwritev: c_long = 5000 + 290; +pub const SYS_rt_tgsigqueueinfo: c_long = 5000 + 291; +pub const SYS_perf_event_open: c_long = 5000 + 292; +pub const SYS_accept4: c_long = 5000 + 293; +pub const SYS_recvmmsg: c_long = 5000 + 294; +pub const SYS_fanotify_init: c_long = 5000 + 295; +pub const SYS_fanotify_mark: c_long = 5000 + 296; +pub const SYS_prlimit64: c_long = 5000 + 297; +pub const SYS_name_to_handle_at: c_long = 5000 + 298; +pub const SYS_open_by_handle_at: c_long = 5000 + 299; +pub const SYS_clock_adjtime: c_long = 5000 + 300; +pub const SYS_syncfs: c_long = 5000 + 301; +pub const SYS_sendmmsg: c_long = 5000 + 302; +pub const SYS_setns: c_long = 5000 + 303; +pub const SYS_process_vm_readv: c_long = 5000 + 304; +pub const SYS_process_vm_writev: c_long = 5000 + 305; +pub const SYS_kcmp: c_long = 5000 + 306; +pub const SYS_finit_module: c_long = 5000 + 307; +pub const SYS_getdents64: c_long = 5000 + 308; +pub const SYS_sched_setattr: c_long = 5000 + 309; +pub const SYS_sched_getattr: c_long = 5000 + 310; +pub const SYS_renameat2: c_long = 5000 + 311; +pub const SYS_seccomp: c_long = 5000 + 312; +pub const SYS_getrandom: c_long = 5000 + 313; +pub const SYS_memfd_create: c_long = 5000 + 314; +pub const SYS_bpf: c_long = 5000 + 315; +pub const SYS_execveat: c_long = 5000 + 316; +pub const SYS_userfaultfd: c_long = 5000 + 317; +pub const SYS_membarrier: c_long = 5000 + 318; +pub const SYS_mlock2: c_long = 5000 + 319; +pub const SYS_copy_file_range: c_long = 5000 + 320; +pub const SYS_preadv2: c_long = 5000 + 321; +pub const SYS_pwritev2: c_long = 5000 + 322; +pub const SYS_pkey_mprotect: c_long = 5000 + 323; +pub const SYS_pkey_alloc: c_long = 5000 + 324; +pub const SYS_pkey_free: c_long = 5000 + 325; +pub const SYS_statx: c_long = 5000 + 326; +pub const SYS_pidfd_send_signal: c_long = 5000 + 424; +pub const SYS_io_uring_setup: c_long = 5000 + 425; +pub const SYS_io_uring_enter: c_long = 5000 + 426; +pub const SYS_io_uring_register: c_long = 5000 + 427; +pub const SYS_open_tree: c_long = 5000 + 428; +pub const SYS_move_mount: c_long = 5000 + 429; +pub const SYS_fsopen: c_long = 5000 + 430; +pub const SYS_fsconfig: c_long = 5000 + 431; +pub const SYS_fsmount: c_long = 5000 + 432; +pub const SYS_fspick: c_long = 5000 + 433; +pub const SYS_pidfd_open: c_long = 5000 + 434; +pub const SYS_clone3: c_long = 5000 + 435; +pub const SYS_close_range: c_long = 5000 + 436; +pub const SYS_openat2: c_long = 5000 + 437; +pub const SYS_pidfd_getfd: c_long = 5000 + 438; +pub const SYS_faccessat2: c_long = 5000 + 439; +pub const SYS_process_madvise: c_long = 5000 + 440; +pub const SYS_epoll_pwait2: c_long = 5000 + 441; +pub const SYS_mount_setattr: c_long = 5000 + 442; +pub const SYS_quotactl_fd: c_long = 5000 + 443; +pub const SYS_landlock_create_ruleset: c_long = 5000 + 444; +pub const SYS_landlock_add_rule: c_long = 5000 + 445; +pub const SYS_landlock_restrict_self: c_long = 5000 + 446; +pub const SYS_memfd_secret: c_long = 5000 + 447; +pub const SYS_process_mrelease: c_long = 5000 + 448; +pub const SYS_futex_waitv: c_long = 5000 + 449; +pub const SYS_set_mempolicy_home_node: c_long = 5000 + 450; + +pub const O_DIRECT: c_int = 0x8000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_NOFOLLOW: c_int = 0x20000; + +pub const O_APPEND: c_int = 8; +pub const O_CREAT: c_int = 256; +pub const O_EXCL: c_int = 1024; +pub const O_NOCTTY: c_int = 2048; +pub const O_NONBLOCK: c_int = 128; +pub const O_SYNC: c_int = 0x4010; +pub const O_RSYNC: c_int = 0x4010; +pub const O_DSYNC: c_int = 0x10; +pub const O_ASYNC: c_int = 0x1000; +pub const O_LARGEFILE: c_int = 0x2000; + +pub const EDEADLK: c_int = 45; +pub const ENAMETOOLONG: c_int = 78; +pub const ENOLCK: c_int = 46; +pub const ENOSYS: c_int = 89; +pub const ENOTEMPTY: c_int = 93; +pub const ELOOP: c_int = 90; +pub const ENOMSG: c_int = 35; +pub const EIDRM: c_int = 36; +pub const ECHRNG: c_int = 37; +pub const EL2NSYNC: c_int = 38; +pub const EL3HLT: c_int = 39; +pub const EL3RST: c_int = 40; +pub const ELNRNG: c_int = 41; +pub const EUNATCH: c_int = 42; +pub const ENOCSI: c_int = 43; +pub const EL2HLT: c_int = 44; +pub const EBADE: c_int = 50; +pub const EBADR: c_int = 51; +pub const EXFULL: c_int = 52; +pub const ENOANO: c_int = 53; +pub const EBADRQC: c_int = 54; +pub const EBADSLT: c_int = 55; +pub const EDEADLOCK: c_int = 56; +pub const EMULTIHOP: c_int = 74; +pub const EOVERFLOW: c_int = 79; +pub const ENOTUNIQ: c_int = 80; +pub const EBADFD: c_int = 81; +pub const EBADMSG: c_int = 77; +pub const EREMCHG: c_int = 82; +pub const ELIBACC: c_int = 83; +pub const ELIBBAD: c_int = 84; +pub const ELIBSCN: c_int = 85; +pub const ELIBMAX: c_int = 86; +pub const ELIBEXEC: c_int = 87; +pub const EILSEQ: c_int = 88; +pub const ERESTART: c_int = 91; +pub const ESTRPIPE: c_int = 92; +pub const EUSERS: c_int = 94; +pub const ENOTSOCK: c_int = 95; +pub const EDESTADDRREQ: c_int = 96; +pub const EMSGSIZE: c_int = 97; +pub const EPROTOTYPE: c_int = 98; +pub const ENOPROTOOPT: c_int = 99; +pub const EPROTONOSUPPORT: c_int = 120; +pub const ESOCKTNOSUPPORT: c_int = 121; +pub const EOPNOTSUPP: c_int = 122; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 123; +pub const EAFNOSUPPORT: c_int = 124; +pub const EADDRINUSE: c_int = 125; +pub const EADDRNOTAVAIL: c_int = 126; +pub const ENETDOWN: c_int = 127; +pub const ENETUNREACH: c_int = 128; +pub const ENETRESET: c_int = 129; +pub const ECONNABORTED: c_int = 130; +pub const ECONNRESET: c_int = 131; +pub const ENOBUFS: c_int = 132; +pub const EISCONN: c_int = 133; +pub const ENOTCONN: c_int = 134; +pub const ESHUTDOWN: c_int = 143; +pub const ETOOMANYREFS: c_int = 144; +pub const ETIMEDOUT: c_int = 145; +pub const ECONNREFUSED: c_int = 146; +pub const EHOSTDOWN: c_int = 147; +pub const EHOSTUNREACH: c_int = 148; +pub const EALREADY: c_int = 149; +pub const EINPROGRESS: c_int = 150; +pub const ESTALE: c_int = 151; +pub const EUCLEAN: c_int = 135; +pub const ENOTNAM: c_int = 137; +pub const ENAVAIL: c_int = 138; +pub const EISNAM: c_int = 139; +pub const EREMOTEIO: c_int = 140; +pub const EDQUOT: c_int = 1133; +pub const ENOMEDIUM: c_int = 159; +pub const EMEDIUMTYPE: c_int = 160; +pub const ECANCELED: c_int = 158; +pub const ENOKEY: c_int = 161; +pub const EKEYEXPIRED: c_int = 162; +pub const EKEYREVOKED: c_int = 163; +pub const EKEYREJECTED: c_int = 164; +pub const EOWNERDEAD: c_int = 165; +pub const ENOTRECOVERABLE: c_int = 166; +pub const ERFKILL: c_int = 167; + +pub const MAP_ANON: c_int = 0x800; +pub const MAP_GROWSDOWN: c_int = 0x1000; +pub const MAP_DENYWRITE: c_int = 0x2000; +pub const MAP_EXECUTABLE: c_int = 0x4000; +pub const MAP_LOCKED: c_int = 0x8000; +pub const MAP_NORESERVE: c_int = 0x400; +pub const MAP_POPULATE: c_int = 0x10000; +pub const MAP_NONBLOCK: c_int = 0x20000; +pub const MAP_STACK: c_int = 0x40000; +pub const MAP_HUGETLB: c_int = 0x080000; + +pub const SOCK_STREAM: c_int = 2; +pub const SOCK_DGRAM: c_int = 1; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000008; +pub const SA_NOCLDWAIT: c_int = 0x00010000; + +pub const SIGEMT: c_int = 7; +pub const SIGCHLD: c_int = 18; +pub const SIGBUS: c_int = 10; +pub const SIGTTIN: c_int = 26; +pub const SIGTTOU: c_int = 27; +pub const SIGXCPU: c_int = 30; +pub const SIGXFSZ: c_int = 31; +pub const SIGVTALRM: c_int = 28; +pub const SIGPROF: c_int = 29; +pub const SIGWINCH: c_int = 20; +pub const SIGUSR1: c_int = 16; +pub const SIGUSR2: c_int = 17; +pub const SIGCONT: c_int = 25; +pub const SIGSTOP: c_int = 23; +pub const SIGTSTP: c_int = 24; +pub const SIGURG: c_int = 21; +pub const SIGIO: c_int = 22; +pub const SIGSYS: c_int = 12; +pub const SIGPOLL: c_int = 22; +pub const SIGPWR: c_int = 19; +pub const SIG_SETMASK: c_int = 3; +pub const SIG_BLOCK: c_int = 0x1; +pub const SIG_UNBLOCK: c_int = 0x2; + +pub const POLLWRNORM: c_short = 0x004; +pub const POLLWRBAND: c_short = 0x100; + +pub const VEOF: usize = 16; +pub const VEOL: usize = 17; +pub const VEOL2: usize = 6; +pub const VMIN: usize = 4; +pub const IEXTEN: crate::tcflag_t = 0x00000100; +pub const TOSTOP: crate::tcflag_t = 0x00008000; +pub const FLUSHO: crate::tcflag_t = 0x00002000; +pub const EXTPROC: crate::tcflag_t = 0o200000; + +pub const F_GETLK: c_int = 14; +pub const F_GETOWN: c_int = 23; +pub const F_SETOWN: c_int = 24; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; + +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: crate::tcflag_t = 0x00000800; +pub const TAB2: crate::tcflag_t = 0x00001000; +pub const TAB3: crate::tcflag_t = 0x00001800; +pub const CR1: crate::tcflag_t = 0x00000200; +pub const CR2: crate::tcflag_t = 0x00000400; +pub const CR3: crate::tcflag_t = 0x00000600; +pub const FF1: crate::tcflag_t = 0x00008000; +pub const BS1: crate::tcflag_t = 0x00002000; +pub const VT1: crate::tcflag_t = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const EHWPOISON: c_int = 168; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/mod.rs new file mode 100644 index 00000000000000..1bfd812ab2a344 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b64/mod.rs @@ -0,0 +1,116 @@ +use crate::prelude::*; + +pub type regoff_t = c_long; + +s! { + // MIPS implementation is special, see the subfolder. + #[cfg(not(target_arch = "mips64"))] + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct pthread_attr_t { + __size: [u64; 7], + } + + pub struct sigset_t { + __val: [c_ulong; 16], + } + + // PowerPC implementation is special, see the subfolder. + #[cfg(not(target_arch = "powerpc64"))] + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: c_ulong, + __pad1: c_ulong, + __pad2: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + pub msg_rtime: crate::time_t, + pub msg_ctime: crate::time_t, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __pad1: c_ulong, + __pad2: c_ulong, + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + #[cfg(target_endian = "big")] + __pad1: c_int, + pub msg_iovlen: c_int, + #[cfg(target_endian = "little")] + __pad1: c_int, + pub msg_control: *mut c_void, + #[cfg(target_endian = "big")] + __pad2: c_int, + pub msg_controllen: crate::socklen_t, + #[cfg(target_endian = "little")] + __pad2: c_int, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + #[cfg(target_endian = "big")] + pub __pad1: c_int, + pub cmsg_len: crate::socklen_t, + #[cfg(target_endian = "little")] + pub __pad1: c_int, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct sem_t { + __val: [c_int; 8], + } +} + +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; + +cfg_if! { + if #[cfg(target_arch = "aarch64")] { + mod aarch64; + pub use self::aarch64::*; + } else if #[cfg(target_arch = "mips64")] { + mod mips64; + pub use self::mips64::*; + } else if #[cfg(any(target_arch = "powerpc64"))] { + mod powerpc64; + pub use self::powerpc64::*; + } else if #[cfg(any(target_arch = "s390x"))] { + mod s390x; + pub use self::s390x::*; + } else if #[cfg(any(target_arch = "x86_64"))] { + mod x86_64; + pub use self::x86_64::*; + } else if #[cfg(any(target_arch = "riscv64"))] { + mod riscv64; + pub use self::riscv64::*; + } else if #[cfg(any(target_arch = "loongarch64"))] { + mod loongarch64; + pub use self::loongarch64::*; + } else if #[cfg(any(target_arch = "wasm32"))] { + mod wasm32; + pub use self::wasm32::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/powerpc64.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/powerpc64.rs new file mode 100644 index 00000000000000..bbcd382211dfde --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b64/powerpc64.rs @@ -0,0 +1,752 @@ +use crate::off_t; +use crate::prelude::*; + +pub type wchar_t = i32; +pub type __u64 = c_ulong; +pub type __s64 = c_long; +pub type nlink_t = u64; +pub type blksize_t = c_long; + +s! { + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_cc: [crate::cc_t; crate::NCCS], + pub c_line: crate::cc_t, + pub __c_ispeed: crate::speed_t, + pub __c_ospeed: crate::speed_t, + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + __pad0: c_int, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_long; 3], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + __pad0: c_int, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __reserved: [c_long; 3], + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_segsz: size_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: c_ulong, + __unused: [c_ulong; 2], + } + + pub struct ipc_perm { + #[cfg(musl_v1_2_3)] + pub __key: crate::key_t, + #[cfg(not(musl_v1_2_3))] + #[deprecated( + since = "0.2.173", + note = "This field is incorrectly named and will be changed + to __key in a future release." + )] + pub __ipc_perm_key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + pub __seq: c_int, + __unused1: c_long, + __unused2: c_long, + } +} + +pub const MADV_SOFT_OFFLINE: c_int = 101; +#[deprecated( + since = "0.2.175", + note = "Linux does not define MAP_32BIT on any architectures \ + other than x86 and x86_64, this constant will be removed in the future" +)] +pub const MAP_32BIT: c_int = 0x0040; +pub const O_APPEND: c_int = 1024; +pub const O_DIRECT: c_int = 0x20000; +pub const O_DIRECTORY: c_int = 0x4000; +pub const O_LARGEFILE: c_int = 0x10000; +pub const O_NOFOLLOW: c_int = 0x8000; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_ASYNC: c_int = 0x2000; + +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EBADMSG: c_int = 74; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const ERFKILL: c_int = 132; +pub const EHWPOISON: c_int = 133; + +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_LOCKED: c_int = 0x80; +pub const MAP_NORESERVE: c_int = 0x40; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_SYNC: c_int = 0x080000; + +pub const PTRACE_SYSEMU: c_int = 0x1d; +pub const PTRACE_SYSEMU_SINGLESTEP: c_int = 0x1e; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_SETOWN: c_int = 8; + +pub const VEOF: usize = 4; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const SIGSTKSZ: size_t = 10240; +pub const MINSIGSTKSZ: size_t = 4096; + +// Syscall table +pub const SYS_restart_syscall: c_long = 0; +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_waitpid: c_long = 7; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execve: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_time: c_long = 13; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_lchown: c_long = 16; +pub const SYS_break: c_long = 17; +pub const SYS_oldstat: c_long = 18; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_mount: c_long = 21; +pub const SYS_umount: c_long = 22; +pub const SYS_setuid: c_long = 23; +pub const SYS_getuid: c_long = 24; +pub const SYS_stime: c_long = 25; +pub const SYS_ptrace: c_long = 26; +pub const SYS_alarm: c_long = 27; +pub const SYS_oldfstat: c_long = 28; +pub const SYS_pause: c_long = 29; +pub const SYS_utime: c_long = 30; +pub const SYS_stty: c_long = 31; +pub const SYS_gtty: c_long = 32; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_ftime: c_long = 35; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_rename: c_long = 38; +pub const SYS_mkdir: c_long = 39; +pub const SYS_rmdir: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_prof: c_long = 44; +pub const SYS_brk: c_long = 45; +pub const SYS_setgid: c_long = 46; +pub const SYS_getgid: c_long = 47; +pub const SYS_signal: c_long = 48; +pub const SYS_geteuid: c_long = 49; +pub const SYS_getegid: c_long = 50; +pub const SYS_acct: c_long = 51; +pub const SYS_umount2: c_long = 52; +pub const SYS_lock: c_long = 53; +pub const SYS_ioctl: c_long = 54; +pub const SYS_fcntl: c_long = 55; +pub const SYS_mpx: c_long = 56; +pub const SYS_setpgid: c_long = 57; +pub const SYS_ulimit: c_long = 58; +pub const SYS_oldolduname: c_long = 59; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_ustat: c_long = 62; +pub const SYS_dup2: c_long = 63; +pub const SYS_getppid: c_long = 64; +pub const SYS_getpgrp: c_long = 65; +pub const SYS_setsid: c_long = 66; +pub const SYS_sigaction: c_long = 67; +pub const SYS_sgetmask: c_long = 68; +pub const SYS_ssetmask: c_long = 69; +pub const SYS_setreuid: c_long = 70; +pub const SYS_setregid: c_long = 71; +pub const SYS_sigsuspend: c_long = 72; +pub const SYS_sigpending: c_long = 73; +pub const SYS_sethostname: c_long = 74; +pub const SYS_setrlimit: c_long = 75; +pub const SYS_getrlimit: c_long = 76; +pub const SYS_getrusage: c_long = 77; +pub const SYS_gettimeofday: c_long = 78; +pub const SYS_settimeofday: c_long = 79; +pub const SYS_getgroups: c_long = 80; +pub const SYS_setgroups: c_long = 81; +pub const SYS_select: c_long = 82; +pub const SYS_symlink: c_long = 83; +pub const SYS_oldlstat: c_long = 84; +pub const SYS_readlink: c_long = 85; +pub const SYS_uselib: c_long = 86; +pub const SYS_swapon: c_long = 87; +pub const SYS_reboot: c_long = 88; +pub const SYS_readdir: c_long = 89; +pub const SYS_mmap: c_long = 90; +pub const SYS_munmap: c_long = 91; +pub const SYS_truncate: c_long = 92; +pub const SYS_ftruncate: c_long = 93; +pub const SYS_fchmod: c_long = 94; +pub const SYS_fchown: c_long = 95; +pub const SYS_getpriority: c_long = 96; +pub const SYS_setpriority: c_long = 97; +pub const SYS_profil: c_long = 98; +pub const SYS_statfs: c_long = 99; +pub const SYS_fstatfs: c_long = 100; +pub const SYS_ioperm: c_long = 101; +pub const SYS_socketcall: c_long = 102; +pub const SYS_syslog: c_long = 103; +pub const SYS_setitimer: c_long = 104; +pub const SYS_getitimer: c_long = 105; +pub const SYS_stat: c_long = 106; +pub const SYS_lstat: c_long = 107; +pub const SYS_fstat: c_long = 108; +pub const SYS_olduname: c_long = 109; +pub const SYS_iopl: c_long = 110; +pub const SYS_vhangup: c_long = 111; +pub const SYS_idle: c_long = 112; +pub const SYS_vm86: c_long = 113; +pub const SYS_wait4: c_long = 114; +pub const SYS_swapoff: c_long = 115; +pub const SYS_sysinfo: c_long = 116; +pub const SYS_ipc: c_long = 117; +pub const SYS_fsync: c_long = 118; +pub const SYS_sigreturn: c_long = 119; +pub const SYS_clone: c_long = 120; +pub const SYS_setdomainname: c_long = 121; +pub const SYS_uname: c_long = 122; +pub const SYS_modify_ldt: c_long = 123; +pub const SYS_adjtimex: c_long = 124; +pub const SYS_mprotect: c_long = 125; +pub const SYS_sigprocmask: c_long = 126; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 127; +pub const SYS_init_module: c_long = 128; +pub const SYS_delete_module: c_long = 129; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 130; +pub const SYS_quotactl: c_long = 131; +pub const SYS_getpgid: c_long = 132; +pub const SYS_fchdir: c_long = 133; +pub const SYS_bdflush: c_long = 134; +pub const SYS_sysfs: c_long = 135; +pub const SYS_personality: c_long = 136; +pub const SYS_afs_syscall: c_long = 137; /* Syscall for Andrew File System */ +pub const SYS_setfsuid: c_long = 138; +pub const SYS_setfsgid: c_long = 139; +pub const SYS__llseek: c_long = 140; +pub const SYS_getdents: c_long = 141; +pub const SYS__newselect: c_long = 142; +pub const SYS_flock: c_long = 143; +pub const SYS_msync: c_long = 144; +pub const SYS_readv: c_long = 145; +pub const SYS_writev: c_long = 146; +pub const SYS_getsid: c_long = 147; +pub const SYS_fdatasync: c_long = 148; +pub const SYS__sysctl: c_long = 149; +pub const SYS_mlock: c_long = 150; +pub const SYS_munlock: c_long = 151; +pub const SYS_mlockall: c_long = 152; +pub const SYS_munlockall: c_long = 153; +pub const SYS_sched_setparam: c_long = 154; +pub const SYS_sched_getparam: c_long = 155; +pub const SYS_sched_setscheduler: c_long = 156; +pub const SYS_sched_getscheduler: c_long = 157; +pub const SYS_sched_yield: c_long = 158; +pub const SYS_sched_get_priority_max: c_long = 159; +pub const SYS_sched_get_priority_min: c_long = 160; +pub const SYS_sched_rr_get_interval: c_long = 161; +pub const SYS_nanosleep: c_long = 162; +pub const SYS_mremap: c_long = 163; +pub const SYS_setresuid: c_long = 164; +pub const SYS_getresuid: c_long = 165; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 166; +pub const SYS_poll: c_long = 167; +pub const SYS_nfsservctl: c_long = 168; +pub const SYS_setresgid: c_long = 169; +pub const SYS_getresgid: c_long = 170; +pub const SYS_prctl: c_long = 171; +pub const SYS_rt_sigreturn: c_long = 172; +pub const SYS_rt_sigaction: c_long = 173; +pub const SYS_rt_sigprocmask: c_long = 174; +pub const SYS_rt_sigpending: c_long = 175; +pub const SYS_rt_sigtimedwait: c_long = 176; +pub const SYS_rt_sigqueueinfo: c_long = 177; +pub const SYS_rt_sigsuspend: c_long = 178; +pub const SYS_pread64: c_long = 179; +pub const SYS_pwrite64: c_long = 180; +pub const SYS_chown: c_long = 181; +pub const SYS_getcwd: c_long = 182; +pub const SYS_capget: c_long = 183; +pub const SYS_capset: c_long = 184; +pub const SYS_sigaltstack: c_long = 185; +pub const SYS_sendfile: c_long = 186; +pub const SYS_getpmsg: c_long = 187; /* some people actually want streams */ +pub const SYS_putpmsg: c_long = 188; /* some people actually want streams */ +pub const SYS_vfork: c_long = 189; +pub const SYS_ugetrlimit: c_long = 190; /* SuS compliant getrlimit */ +pub const SYS_readahead: c_long = 191; +pub const SYS_pciconfig_read: c_long = 198; +pub const SYS_pciconfig_write: c_long = 199; +pub const SYS_pciconfig_iobase: c_long = 200; +pub const SYS_multiplexer: c_long = 201; +pub const SYS_getdents64: c_long = 202; +pub const SYS_pivot_root: c_long = 203; +pub const SYS_madvise: c_long = 205; +pub const SYS_mincore: c_long = 206; +pub const SYS_gettid: c_long = 207; +pub const SYS_tkill: c_long = 208; +pub const SYS_setxattr: c_long = 209; +pub const SYS_lsetxattr: c_long = 210; +pub const SYS_fsetxattr: c_long = 211; +pub const SYS_getxattr: c_long = 212; +pub const SYS_lgetxattr: c_long = 213; +pub const SYS_fgetxattr: c_long = 214; +pub const SYS_listxattr: c_long = 215; +pub const SYS_llistxattr: c_long = 216; +pub const SYS_flistxattr: c_long = 217; +pub const SYS_removexattr: c_long = 218; +pub const SYS_lremovexattr: c_long = 219; +pub const SYS_fremovexattr: c_long = 220; +pub const SYS_futex: c_long = 221; +pub const SYS_sched_setaffinity: c_long = 222; +pub const SYS_sched_getaffinity: c_long = 223; +pub const SYS_tuxcall: c_long = 225; +pub const SYS_io_setup: c_long = 227; +pub const SYS_io_destroy: c_long = 228; +pub const SYS_io_getevents: c_long = 229; +pub const SYS_io_submit: c_long = 230; +pub const SYS_io_cancel: c_long = 231; +pub const SYS_set_tid_address: c_long = 232; +pub const SYS_exit_group: c_long = 234; +pub const SYS_lookup_dcookie: c_long = 235; +pub const SYS_epoll_create: c_long = 236; +pub const SYS_epoll_ctl: c_long = 237; +pub const SYS_epoll_wait: c_long = 238; +pub const SYS_remap_file_pages: c_long = 239; +pub const SYS_timer_create: c_long = 240; +pub const SYS_timer_settime: c_long = 241; +pub const SYS_timer_gettime: c_long = 242; +pub const SYS_timer_getoverrun: c_long = 243; +pub const SYS_timer_delete: c_long = 244; +pub const SYS_clock_settime: c_long = 245; +pub const SYS_clock_gettime: c_long = 246; +pub const SYS_clock_getres: c_long = 247; +pub const SYS_clock_nanosleep: c_long = 248; +pub const SYS_swapcontext: c_long = 249; +pub const SYS_tgkill: c_long = 250; +pub const SYS_utimes: c_long = 251; +pub const SYS_statfs64: c_long = 252; +pub const SYS_fstatfs64: c_long = 253; +pub const SYS_rtas: c_long = 255; +pub const SYS_sys_debug_setcontext: c_long = 256; +pub const SYS_migrate_pages: c_long = 258; +pub const SYS_mbind: c_long = 259; +pub const SYS_get_mempolicy: c_long = 260; +pub const SYS_set_mempolicy: c_long = 261; +pub const SYS_mq_open: c_long = 262; +pub const SYS_mq_unlink: c_long = 263; +pub const SYS_mq_timedsend: c_long = 264; +pub const SYS_mq_timedreceive: c_long = 265; +pub const SYS_mq_notify: c_long = 266; +pub const SYS_mq_getsetattr: c_long = 267; +pub const SYS_kexec_load: c_long = 268; +pub const SYS_add_key: c_long = 269; +pub const SYS_request_key: c_long = 270; +pub const SYS_keyctl: c_long = 271; +pub const SYS_waitid: c_long = 272; +pub const SYS_ioprio_set: c_long = 273; +pub const SYS_ioprio_get: c_long = 274; +pub const SYS_inotify_init: c_long = 275; +pub const SYS_inotify_add_watch: c_long = 276; +pub const SYS_inotify_rm_watch: c_long = 277; +pub const SYS_spu_run: c_long = 278; +pub const SYS_spu_create: c_long = 279; +pub const SYS_pselect6: c_long = 280; +pub const SYS_ppoll: c_long = 281; +pub const SYS_unshare: c_long = 282; +pub const SYS_splice: c_long = 283; +pub const SYS_tee: c_long = 284; +pub const SYS_vmsplice: c_long = 285; +pub const SYS_openat: c_long = 286; +pub const SYS_mkdirat: c_long = 287; +pub const SYS_mknodat: c_long = 288; +pub const SYS_fchownat: c_long = 289; +pub const SYS_futimesat: c_long = 290; +pub const SYS_newfstatat: c_long = 291; +pub const SYS_unlinkat: c_long = 292; +pub const SYS_renameat: c_long = 293; +pub const SYS_linkat: c_long = 294; +pub const SYS_symlinkat: c_long = 295; +pub const SYS_readlinkat: c_long = 296; +pub const SYS_fchmodat: c_long = 297; +pub const SYS_faccessat: c_long = 298; +pub const SYS_get_robust_list: c_long = 299; +pub const SYS_set_robust_list: c_long = 300; +pub const SYS_move_pages: c_long = 301; +pub const SYS_getcpu: c_long = 302; +pub const SYS_epoll_pwait: c_long = 303; +pub const SYS_utimensat: c_long = 304; +pub const SYS_signalfd: c_long = 305; +pub const SYS_timerfd_create: c_long = 306; +pub const SYS_eventfd: c_long = 307; +pub const SYS_sync_file_range2: c_long = 308; +pub const SYS_fallocate: c_long = 309; +pub const SYS_subpage_prot: c_long = 310; +pub const SYS_timerfd_settime: c_long = 311; +pub const SYS_timerfd_gettime: c_long = 312; +pub const SYS_signalfd4: c_long = 313; +pub const SYS_eventfd2: c_long = 314; +pub const SYS_epoll_create1: c_long = 315; +pub const SYS_dup3: c_long = 316; +pub const SYS_pipe2: c_long = 317; +pub const SYS_inotify_init1: c_long = 318; +pub const SYS_perf_event_open: c_long = 319; +pub const SYS_preadv: c_long = 320; +pub const SYS_pwritev: c_long = 321; +pub const SYS_rt_tgsigqueueinfo: c_long = 322; +pub const SYS_fanotify_init: c_long = 323; +pub const SYS_fanotify_mark: c_long = 324; +pub const SYS_prlimit64: c_long = 325; +pub const SYS_socket: c_long = 326; +pub const SYS_bind: c_long = 327; +pub const SYS_connect: c_long = 328; +pub const SYS_listen: c_long = 329; +pub const SYS_accept: c_long = 330; +pub const SYS_getsockname: c_long = 331; +pub const SYS_getpeername: c_long = 332; +pub const SYS_socketpair: c_long = 333; +pub const SYS_send: c_long = 334; +pub const SYS_sendto: c_long = 335; +pub const SYS_recv: c_long = 336; +pub const SYS_recvfrom: c_long = 337; +pub const SYS_shutdown: c_long = 338; +pub const SYS_setsockopt: c_long = 339; +pub const SYS_getsockopt: c_long = 340; +pub const SYS_sendmsg: c_long = 341; +pub const SYS_recvmsg: c_long = 342; +pub const SYS_recvmmsg: c_long = 343; +pub const SYS_accept4: c_long = 344; +pub const SYS_name_to_handle_at: c_long = 345; +pub const SYS_open_by_handle_at: c_long = 346; +pub const SYS_clock_adjtime: c_long = 347; +pub const SYS_syncfs: c_long = 348; +pub const SYS_sendmmsg: c_long = 349; +pub const SYS_setns: c_long = 350; +pub const SYS_process_vm_readv: c_long = 351; +pub const SYS_process_vm_writev: c_long = 352; +pub const SYS_finit_module: c_long = 353; +pub const SYS_kcmp: c_long = 354; +pub const SYS_sched_setattr: c_long = 355; +pub const SYS_sched_getattr: c_long = 356; +pub const SYS_renameat2: c_long = 357; +pub const SYS_seccomp: c_long = 358; +pub const SYS_getrandom: c_long = 359; +pub const SYS_memfd_create: c_long = 360; +pub const SYS_bpf: c_long = 361; +pub const SYS_execveat: c_long = 362; +pub const SYS_switch_endian: c_long = 363; +pub const SYS_userfaultfd: c_long = 364; +pub const SYS_membarrier: c_long = 365; +pub const SYS_mlock2: c_long = 378; +pub const SYS_copy_file_range: c_long = 379; +pub const SYS_preadv2: c_long = 380; +pub const SYS_pwritev2: c_long = 381; +pub const SYS_kexec_file_load: c_long = 382; +pub const SYS_statx: c_long = 383; +pub const SYS_pkey_alloc: c_long = 384; +pub const SYS_pkey_free: c_long = 385; +pub const SYS_pkey_mprotect: c_long = 386; +pub const SYS_rseq: c_long = 387; +pub const SYS_io_pgetevents: c_long = 388; +pub const SYS_semtimedop: c_long = 392; +pub const SYS_semget: c_long = 393; +pub const SYS_semctl: c_long = 394; +pub const SYS_shmget: c_long = 395; +pub const SYS_shmctl: c_long = 396; +pub const SYS_shmat: c_long = 397; +pub const SYS_shmdt: c_long = 398; +pub const SYS_msgget: c_long = 399; +pub const SYS_msgsnd: c_long = 400; +pub const SYS_msgrcv: c_long = 401; +pub const SYS_msgctl: c_long = 402; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; + +pub const EDEADLK: c_int = 35; +pub const EDEADLOCK: c_int = 58; + +pub const EXTPROC: crate::tcflag_t = 0x10000000; +pub const VEOL: usize = 6; +pub const VEOL2: usize = 8; +pub const VMIN: usize = 5; +pub const IEXTEN: crate::tcflag_t = 0x00000400; +pub const TOSTOP: crate::tcflag_t = 0x00400000; +pub const FLUSHO: crate::tcflag_t = 0x00800000; + +pub const MCL_CURRENT: c_int = 0x2000; +pub const MCL_FUTURE: c_int = 0x4000; +pub const MCL_ONFAULT: c_int = 0x8000; +pub const CBAUD: crate::tcflag_t = 0xff; +pub const TAB1: c_int = 0x400; +pub const TAB2: c_int = 0x800; +pub const TAB3: c_int = 0xc00; +pub const CR1: c_int = 0x1000; +pub const CR2: c_int = 0x2000; +pub const CR3: c_int = 0x3000; +pub const FF1: c_int = 0x4000; +pub const BS1: c_int = 0x8000; +pub const VT1: c_int = 0x10000; +pub const VWERASE: usize = 10; +pub const VREPRINT: usize = 11; +pub const VSUSP: usize = 12; +pub const VSTART: usize = 13; +pub const VSTOP: usize = 14; +pub const VDISCARD: usize = 16; +pub const VTIME: usize = 7; +pub const IXON: crate::tcflag_t = 0x00000200; +pub const IXOFF: crate::tcflag_t = 0x00000400; +pub const ONLCR: crate::tcflag_t = 0x2; +pub const CSIZE: crate::tcflag_t = 0x00000300; + +pub const CS6: crate::tcflag_t = 0x00000100; +pub const CS7: crate::tcflag_t = 0x00000200; +pub const CS8: crate::tcflag_t = 0x00000300; +pub const CSTOPB: crate::tcflag_t = 0x00000400; +pub const CREAD: crate::tcflag_t = 0x00000800; +pub const PARENB: crate::tcflag_t = 0x00001000; +pub const PARODD: crate::tcflag_t = 0x00002000; +pub const HUPCL: crate::tcflag_t = 0x00004000; +pub const CLOCAL: crate::tcflag_t = 0x00008000; +pub const ECHOKE: crate::tcflag_t = 0x00000001; +pub const ECHOE: crate::tcflag_t = 0x00000002; +pub const ECHOK: crate::tcflag_t = 0x00000004; +pub const ECHONL: crate::tcflag_t = 0x00000010; +pub const ECHOPRT: crate::tcflag_t = 0x00000020; +pub const ECHOCTL: crate::tcflag_t = 0x00000040; +pub const ISIG: crate::tcflag_t = 0x00000080; +pub const ICANON: crate::tcflag_t = 0x00000100; +pub const PENDIN: crate::tcflag_t = 0x20000000; +pub const NOFLSH: crate::tcflag_t = 0x80000000; + +pub const CIBAUD: crate::tcflag_t = 0o77600000; +pub const CBAUDEX: crate::tcflag_t = 0o0000020; +pub const VSWTC: usize = 9; +pub const OLCUC: crate::tcflag_t = 0o000004; +pub const NLDLY: crate::tcflag_t = 0o0001400; +pub const CRDLY: crate::tcflag_t = 0o0030000; +pub const TABDLY: crate::tcflag_t = 0o0006000; +pub const BSDLY: crate::tcflag_t = 0o0100000; +pub const FFDLY: crate::tcflag_t = 0o0040000; +pub const VTDLY: crate::tcflag_t = 0o0200000; +pub const XTABS: crate::tcflag_t = 0o00006000; + +pub const B57600: crate::speed_t = 0o00020; +pub const B115200: crate::speed_t = 0o00021; +pub const B230400: crate::speed_t = 0o00022; +pub const B460800: crate::speed_t = 0o00023; +pub const B500000: crate::speed_t = 0o00024; +pub const B576000: crate::speed_t = 0o00025; +pub const B921600: crate::speed_t = 0o00026; +pub const B1000000: crate::speed_t = 0o00027; +pub const B1152000: crate::speed_t = 0o00030; +pub const B1500000: crate::speed_t = 0o00031; +pub const B2000000: crate::speed_t = 0o00032; +pub const B2500000: crate::speed_t = 0o00033; +pub const B3000000: crate::speed_t = 0o00034; +pub const B3500000: crate::speed_t = 0o00035; +pub const B4000000: crate::speed_t = 0o00036; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/riscv64/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/riscv64/mod.rs new file mode 100644 index 00000000000000..8389af961cf584 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b64/riscv64/mod.rs @@ -0,0 +1,672 @@ +//! RISC-V-specific definitions for 64-bit linux-like values + +use crate::prelude::*; +use crate::{off64_t, off_t}; + +pub type wchar_t = c_int; + +pub type nlink_t = c_uint; +pub type blksize_t = c_int; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub __pad1: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub __pad2: c_int, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_int; 2usize], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub __pad1: crate::dev_t, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + pub __pad2: c_int, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_int; 2], + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_ushort, + __pad1: c_ushort, + pub __seq: c_ushort, + __pad2: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + #[repr(align(8))] + pub struct clone_args { + pub flags: c_ulonglong, + pub pidfd: c_ulonglong, + pub child_tid: c_ulonglong, + pub parent_tid: c_ulonglong, + pub exit_signal: c_ulonglong, + pub stack: c_ulonglong, + pub stack_size: c_ulonglong, + pub tls: c_ulonglong, + pub set_tid: c_ulonglong, + pub set_tid_size: c_ulonglong, + pub cgroup: c_ulonglong, + } +} + +s_no_extra_traits! { + pub struct ucontext_t { + pub __uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_sigmask: crate::sigset_t, + pub uc_mcontext: mcontext_t, + } + + #[repr(align(16))] + pub struct mcontext_t { + pub __gregs: [c_ulong; 32], + pub __fpregs: __riscv_mc_fp_state, + } + + pub union __riscv_mc_fp_state { + pub __f: __riscv_mc_f_ext_state, + pub __d: __riscv_mc_d_ext_state, + pub __q: __riscv_mc_q_ext_state, + } + + pub struct __riscv_mc_f_ext_state { + pub __f: [c_uint; 32], + pub __fcsr: c_uint, + } + + pub struct __riscv_mc_d_ext_state { + pub __f: [c_ulonglong; 32], + pub __fcsr: c_uint, + } + + #[repr(align(16))] + pub struct __riscv_mc_q_ext_state { + pub __f: [c_ulonglong; 64], + pub __fcsr: c_uint, + pub __glibc_reserved: [c_uint; 3], + } +} + +pub const SYS_read: c_long = 63; +pub const SYS_write: c_long = 64; +pub const SYS_close: c_long = 57; +pub const SYS_fstat: c_long = 80; +pub const SYS_lseek: c_long = 62; +pub const SYS_mmap: c_long = 222; +pub const SYS_mprotect: c_long = 226; +pub const SYS_munmap: c_long = 215; +pub const SYS_brk: c_long = 214; +pub const SYS_rt_sigaction: c_long = 134; +pub const SYS_rt_sigprocmask: c_long = 135; +pub const SYS_rt_sigreturn: c_long = 139; +pub const SYS_ioctl: c_long = 29; +pub const SYS_pread64: c_long = 67; +pub const SYS_pwrite64: c_long = 68; +pub const SYS_readv: c_long = 65; +pub const SYS_writev: c_long = 66; +pub const SYS_sched_yield: c_long = 124; +pub const SYS_mremap: c_long = 216; +pub const SYS_msync: c_long = 227; +pub const SYS_mincore: c_long = 232; +pub const SYS_madvise: c_long = 233; +pub const SYS_shmget: c_long = 194; +pub const SYS_shmat: c_long = 196; +pub const SYS_shmctl: c_long = 195; +pub const SYS_dup: c_long = 23; +pub const SYS_nanosleep: c_long = 101; +pub const SYS_getitimer: c_long = 102; +pub const SYS_setitimer: c_long = 103; +pub const SYS_getpid: c_long = 172; +pub const SYS_sendfile: c_long = 71; +pub const SYS_socket: c_long = 198; +pub const SYS_connect: c_long = 203; +pub const SYS_accept: c_long = 202; +pub const SYS_sendto: c_long = 206; +pub const SYS_recvfrom: c_long = 207; +pub const SYS_sendmsg: c_long = 211; +pub const SYS_recvmsg: c_long = 212; +pub const SYS_shutdown: c_long = 210; +pub const SYS_bind: c_long = 200; +pub const SYS_listen: c_long = 201; +pub const SYS_getsockname: c_long = 204; +pub const SYS_getpeername: c_long = 205; +pub const SYS_socketpair: c_long = 199; +pub const SYS_setsockopt: c_long = 208; +pub const SYS_getsockopt: c_long = 209; +pub const SYS_clone: c_long = 220; +pub const SYS_execve: c_long = 221; +pub const SYS_exit: c_long = 93; +pub const SYS_wait4: c_long = 260; +pub const SYS_kill: c_long = 129; +pub const SYS_uname: c_long = 160; +pub const SYS_semget: c_long = 190; +pub const SYS_semop: c_long = 193; +pub const SYS_semctl: c_long = 191; +pub const SYS_shmdt: c_long = 197; +pub const SYS_msgget: c_long = 186; +pub const SYS_msgsnd: c_long = 189; +pub const SYS_msgrcv: c_long = 188; +pub const SYS_msgctl: c_long = 187; +pub const SYS_fcntl: c_long = 25; +pub const SYS_flock: c_long = 32; +pub const SYS_fsync: c_long = 82; +pub const SYS_fdatasync: c_long = 83; +pub const SYS_truncate: c_long = 45; +pub const SYS_ftruncate: c_long = 46; +pub const SYS_getcwd: c_long = 17; +pub const SYS_chdir: c_long = 49; +pub const SYS_fchdir: c_long = 50; +pub const SYS_fchmod: c_long = 52; +pub const SYS_fchown: c_long = 55; +pub const SYS_umask: c_long = 166; +pub const SYS_gettimeofday: c_long = 169; +pub const SYS_getrlimit: c_long = 163; +pub const SYS_getrusage: c_long = 165; +pub const SYS_sysinfo: c_long = 179; +pub const SYS_times: c_long = 153; +pub const SYS_ptrace: c_long = 117; +pub const SYS_getuid: c_long = 174; +pub const SYS_syslog: c_long = 116; +pub const SYS_getgid: c_long = 176; +pub const SYS_setuid: c_long = 146; +pub const SYS_setgid: c_long = 144; +pub const SYS_geteuid: c_long = 175; +pub const SYS_getegid: c_long = 177; +pub const SYS_setpgid: c_long = 154; +pub const SYS_getppid: c_long = 173; +pub const SYS_setsid: c_long = 157; +pub const SYS_setreuid: c_long = 145; +pub const SYS_setregid: c_long = 143; +pub const SYS_getgroups: c_long = 158; +pub const SYS_setgroups: c_long = 159; +pub const SYS_setresuid: c_long = 147; +pub const SYS_getresuid: c_long = 148; +pub const SYS_setresgid: c_long = 149; +pub const SYS_getresgid: c_long = 150; +pub const SYS_getpgid: c_long = 155; +pub const SYS_setfsuid: c_long = 151; +pub const SYS_setfsgid: c_long = 152; +pub const SYS_getsid: c_long = 156; +pub const SYS_capget: c_long = 90; +pub const SYS_capset: c_long = 91; +pub const SYS_rt_sigpending: c_long = 136; +pub const SYS_rt_sigtimedwait: c_long = 137; +pub const SYS_rt_sigqueueinfo: c_long = 138; +pub const SYS_rt_sigsuspend: c_long = 133; +pub const SYS_sigaltstack: c_long = 132; +pub const SYS_personality: c_long = 92; +pub const SYS_statfs: c_long = 43; +pub const SYS_fstatfs: c_long = 44; +pub const SYS_getpriority: c_long = 141; +pub const SYS_setpriority: c_long = 140; +pub const SYS_sched_setparam: c_long = 118; +pub const SYS_sched_getparam: c_long = 121; +pub const SYS_sched_setscheduler: c_long = 119; +pub const SYS_sched_getscheduler: c_long = 120; +pub const SYS_sched_get_priority_max: c_long = 125; +pub const SYS_sched_get_priority_min: c_long = 126; +pub const SYS_sched_rr_get_interval: c_long = 127; +pub const SYS_mlock: c_long = 228; +pub const SYS_munlock: c_long = 229; +pub const SYS_mlockall: c_long = 230; +pub const SYS_munlockall: c_long = 231; +pub const SYS_vhangup: c_long = 58; +pub const SYS_pivot_root: c_long = 41; +pub const SYS_prctl: c_long = 167; +pub const SYS_adjtimex: c_long = 171; +pub const SYS_setrlimit: c_long = 164; +pub const SYS_chroot: c_long = 51; +pub const SYS_sync: c_long = 81; +pub const SYS_acct: c_long = 89; +pub const SYS_settimeofday: c_long = 170; +pub const SYS_mount: c_long = 40; +pub const SYS_umount2: c_long = 39; +pub const SYS_swapon: c_long = 224; +pub const SYS_swapoff: c_long = 225; +pub const SYS_reboot: c_long = 142; +pub const SYS_sethostname: c_long = 161; +pub const SYS_setdomainname: c_long = 162; +pub const SYS_init_module: c_long = 105; +pub const SYS_delete_module: c_long = 106; +pub const SYS_quotactl: c_long = 60; +pub const SYS_nfsservctl: c_long = 42; +pub const SYS_gettid: c_long = 178; +pub const SYS_readahead: c_long = 213; +pub const SYS_setxattr: c_long = 5; +pub const SYS_lsetxattr: c_long = 6; +pub const SYS_fsetxattr: c_long = 7; +pub const SYS_getxattr: c_long = 8; +pub const SYS_lgetxattr: c_long = 9; +pub const SYS_fgetxattr: c_long = 10; +pub const SYS_listxattr: c_long = 11; +pub const SYS_llistxattr: c_long = 12; +pub const SYS_flistxattr: c_long = 13; +pub const SYS_removexattr: c_long = 14; +pub const SYS_lremovexattr: c_long = 15; +pub const SYS_fremovexattr: c_long = 16; +pub const SYS_tkill: c_long = 130; +pub const SYS_futex: c_long = 98; +pub const SYS_sched_setaffinity: c_long = 122; +pub const SYS_sched_getaffinity: c_long = 123; +pub const SYS_io_setup: c_long = 0; +pub const SYS_io_destroy: c_long = 1; +pub const SYS_io_getevents: c_long = 4; +pub const SYS_io_submit: c_long = 2; +pub const SYS_io_cancel: c_long = 3; +pub const SYS_lookup_dcookie: c_long = 18; +pub const SYS_remap_file_pages: c_long = 234; +pub const SYS_getdents64: c_long = 61; +pub const SYS_set_tid_address: c_long = 96; +pub const SYS_restart_syscall: c_long = 128; +pub const SYS_semtimedop: c_long = 192; +pub const SYS_fadvise64: c_long = 223; +pub const SYS_timer_create: c_long = 107; +pub const SYS_timer_settime: c_long = 110; +pub const SYS_timer_gettime: c_long = 108; +pub const SYS_timer_getoverrun: c_long = 109; +pub const SYS_timer_delete: c_long = 111; +pub const SYS_clock_settime: c_long = 112; +pub const SYS_clock_gettime: c_long = 113; +pub const SYS_clock_getres: c_long = 114; +pub const SYS_clock_nanosleep: c_long = 115; +pub const SYS_exit_group: c_long = 94; +pub const SYS_epoll_ctl: c_long = 21; +pub const SYS_tgkill: c_long = 131; +pub const SYS_mbind: c_long = 235; +pub const SYS_set_mempolicy: c_long = 237; +pub const SYS_get_mempolicy: c_long = 236; +pub const SYS_mq_open: c_long = 180; +pub const SYS_mq_unlink: c_long = 181; +pub const SYS_mq_timedsend: c_long = 182; +pub const SYS_mq_timedreceive: c_long = 183; +pub const SYS_mq_notify: c_long = 184; +pub const SYS_mq_getsetattr: c_long = 185; +pub const SYS_kexec_load: c_long = 104; +pub const SYS_waitid: c_long = 95; +pub const SYS_add_key: c_long = 217; +pub const SYS_request_key: c_long = 218; +pub const SYS_keyctl: c_long = 219; +pub const SYS_ioprio_set: c_long = 30; +pub const SYS_ioprio_get: c_long = 31; +pub const SYS_inotify_add_watch: c_long = 27; +pub const SYS_inotify_rm_watch: c_long = 28; +pub const SYS_migrate_pages: c_long = 238; +pub const SYS_openat: c_long = 56; +pub const SYS_mkdirat: c_long = 34; +pub const SYS_mknodat: c_long = 33; +pub const SYS_fchownat: c_long = 54; +pub const SYS_newfstatat: c_long = 79; +pub const SYS_unlinkat: c_long = 35; +pub const SYS_linkat: c_long = 37; +pub const SYS_symlinkat: c_long = 36; +pub const SYS_readlinkat: c_long = 78; +pub const SYS_fchmodat: c_long = 53; +pub const SYS_faccessat: c_long = 48; +pub const SYS_pselect6: c_long = 72; +pub const SYS_ppoll: c_long = 73; +pub const SYS_unshare: c_long = 97; +pub const SYS_set_robust_list: c_long = 99; +pub const SYS_get_robust_list: c_long = 100; +pub const SYS_splice: c_long = 76; +pub const SYS_tee: c_long = 77; +pub const SYS_sync_file_range: c_long = 84; +pub const SYS_vmsplice: c_long = 75; +pub const SYS_move_pages: c_long = 239; +pub const SYS_utimensat: c_long = 88; +pub const SYS_epoll_pwait: c_long = 22; +pub const SYS_timerfd_create: c_long = 85; +pub const SYS_fallocate: c_long = 47; +pub const SYS_timerfd_settime: c_long = 86; +pub const SYS_timerfd_gettime: c_long = 87; +pub const SYS_accept4: c_long = 242; +pub const SYS_signalfd4: c_long = 74; +pub const SYS_eventfd2: c_long = 19; +pub const SYS_epoll_create1: c_long = 20; +pub const SYS_dup3: c_long = 24; +pub const SYS_pipe2: c_long = 59; +pub const SYS_inotify_init1: c_long = 26; +pub const SYS_preadv: c_long = 69; +pub const SYS_pwritev: c_long = 70; +pub const SYS_rt_tgsigqueueinfo: c_long = 240; +pub const SYS_perf_event_open: c_long = 241; +pub const SYS_recvmmsg: c_long = 243; +pub const SYS_fanotify_init: c_long = 262; +pub const SYS_fanotify_mark: c_long = 263; +pub const SYS_prlimit64: c_long = 261; +pub const SYS_name_to_handle_at: c_long = 264; +pub const SYS_open_by_handle_at: c_long = 265; +pub const SYS_clock_adjtime: c_long = 266; +pub const SYS_syncfs: c_long = 267; +pub const SYS_sendmmsg: c_long = 269; +pub const SYS_setns: c_long = 268; +pub const SYS_getcpu: c_long = 168; +pub const SYS_process_vm_readv: c_long = 270; +pub const SYS_process_vm_writev: c_long = 271; +pub const SYS_kcmp: c_long = 272; +pub const SYS_finit_module: c_long = 273; +pub const SYS_sched_setattr: c_long = 274; +pub const SYS_sched_getattr: c_long = 275; +pub const SYS_renameat2: c_long = 276; +pub const SYS_seccomp: c_long = 277; +pub const SYS_getrandom: c_long = 278; +pub const SYS_memfd_create: c_long = 279; +pub const SYS_bpf: c_long = 280; +pub const SYS_execveat: c_long = 281; +pub const SYS_userfaultfd: c_long = 282; +pub const SYS_membarrier: c_long = 283; +pub const SYS_mlock2: c_long = 284; +pub const SYS_copy_file_range: c_long = 285; +pub const SYS_preadv2: c_long = 286; +pub const SYS_pwritev2: c_long = 287; +pub const SYS_pkey_mprotect: c_long = 288; +pub const SYS_pkey_alloc: c_long = 289; +pub const SYS_pkey_free: c_long = 290; +pub const SYS_statx: c_long = 291; +pub const SYS_io_pgetevents: c_long = 292; +pub const SYS_rseq: c_long = 293; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; + +pub const O_APPEND: c_int = 1024; +pub const O_DIRECT: c_int = 0x4000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_LARGEFILE: c_int = 0o100000; +pub const O_NOFOLLOW: c_int = 0x20000; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_ASYNC: c_int = 0x2000; + +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; + +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const EHWPOISON: c_int = 133; +pub const ERFKILL: c_int = 132; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_SETOWN: c_int = 8; + +pub const VEOF: usize = 4; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_SYNC: c_int = 0x080000; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: c_int = 0x00000800; +pub const TAB2: c_int = 0x00001000; +pub const TAB3: c_int = 0x00001800; +pub const CR1: c_int = 0x00000200; +pub const CR2: c_int = 0x00000400; +pub const CR3: c_int = 0x00000600; +pub const FF1: c_int = 0x00008000; +pub const BS1: c_int = 0x00002000; +pub const VT1: c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const EDEADLK: c_int = 35; +pub const EDEADLOCK: c_int = EDEADLK; +pub const EXTPROC: crate::tcflag_t = 0x00010000; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; + +pub const NGREG: usize = 32; +pub const REG_PC: usize = 0; +pub const REG_RA: usize = 1; +pub const REG_SP: usize = 2; +pub const REG_TP: usize = 4; +pub const REG_S0: usize = 8; +pub const REG_S1: usize = 9; +pub const REG_A0: usize = 10; +pub const REG_S2: usize = 18; +pub const REG_NARGS: usize = 8; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/s390x.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/s390x.rs new file mode 100644 index 00000000000000..06cc61685b7ac9 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b64/s390x.rs @@ -0,0 +1,732 @@ +use crate::off_t; +use crate::prelude::*; + +pub type blksize_t = i64; +pub type nlink_t = u64; +pub type wchar_t = i32; +pub type greg_t = u64; +pub type __u64 = u64; +pub type __s64 = i64; +pub type statfs64 = statfs; + +s! { + pub struct ipc_perm { + #[cfg(musl_v1_2_3)] + pub __key: crate::key_t, + #[cfg(not(musl_v1_2_3))] + #[deprecated( + since = "0.2.173", + note = "This field is incorrectly named and will be changed + to __key in a future release." + )] + pub __ipc_perm_key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + pub __seq: c_int, + __pad1: c_long, + __pad2: c_long, + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + __unused: [c_long; 3], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + __unused: [c_long; 3], + } + + pub struct statfs { + pub f_type: c_uint, + pub f_bsize: c_uint, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_uint, + pub f_frsize: c_uint, + pub f_flags: c_uint, + pub f_spare: [c_uint; 4], + } +} + +s_no_extra_traits! { + pub union fpreg_t { + pub d: c_double, + pub f: c_float, + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for fpreg_t { + fn eq(&self, _other: &fpreg_t) -> bool { + unimplemented!("traits") + } + } + + impl Eq for fpreg_t {} + + impl hash::Hash for fpreg_t { + fn hash(&self, _state: &mut H) { + unimplemented!("traits") + } + } + } +} + +pub const VEOF: usize = 4; + +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ECONNABORTED: c_int = 103; +pub const ECONNREFUSED: c_int = 111; +pub const ECONNRESET: c_int = 104; +pub const EDEADLK: c_int = 35; +pub const ENOSYS: c_int = 38; +pub const ENOTCONN: c_int = 107; +pub const ETIMEDOUT: c_int = 110; +pub const O_APPEND: c_int = 1024; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_LARGEFILE: c_int = 0x8000; +pub const O_NONBLOCK: c_int = 2048; +pub const SA_NOCLDWAIT: c_int = 2; +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 4; +pub const SIGBUS: c_int = 7; +pub const SIGSTKSZ: size_t = 0x2000; +pub const MINSIGSTKSZ: size_t = 2048; +pub const SIG_SETMASK: c_int = 2; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const O_NOCTTY: c_int = 256; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_DIRECT: c_int = 0x4000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_NOFOLLOW: c_int = 0x20000; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_SYNC: c_int = 0x080000; + +pub const PTRACE_SYSEMU: c_int = 31; +pub const PTRACE_SYSEMU_SINGLESTEP: c_int = 32; + +pub const EDEADLOCK: c_int = 35; +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EBADMSG: c_int = 74; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const EHWPOISON: c_int = 133; +pub const ERFKILL: c_int = 132; + +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGCHLD: c_int = 17; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const O_ASYNC: c_int = 0x2000; + +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; + +pub const EXTPROC: crate::tcflag_t = 0x00010000; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; + +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETOWN: c_int = 8; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; + +pub const VTIME: usize = 5; +pub const VSWTC: usize = 7; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VSUSP: usize = 10; +pub const VREPRINT: usize = 12; +pub const VDISCARD: usize = 13; +pub const VWERASE: usize = 14; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const ONLCR: crate::tcflag_t = 0o000004; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const CR1: crate::tcflag_t = 0x00000200; +pub const CR2: crate::tcflag_t = 0x00000400; +pub const CR3: crate::tcflag_t = 0x00000600; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const TAB1: crate::tcflag_t = 0x00000800; +pub const TAB2: crate::tcflag_t = 0x00001000; +pub const TAB3: crate::tcflag_t = 0x00001800; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const BS1: crate::tcflag_t = 0x00002000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const FF1: crate::tcflag_t = 0x00008000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const VT1: crate::tcflag_t = 0x00004000; +pub const XTABS: crate::tcflag_t = 0o014000; + +pub const CBAUD: crate::speed_t = 0o010017; +pub const CSIZE: crate::tcflag_t = 0o000060; +pub const CS6: crate::tcflag_t = 0o000020; +pub const CS7: crate::tcflag_t = 0o000040; +pub const CS8: crate::tcflag_t = 0o000060; +pub const CSTOPB: crate::tcflag_t = 0o000100; +pub const CREAD: crate::tcflag_t = 0o000200; +pub const PARENB: crate::tcflag_t = 0o000400; +pub const PARODD: crate::tcflag_t = 0o001000; +pub const HUPCL: crate::tcflag_t = 0o002000; +pub const CLOCAL: crate::tcflag_t = 0o004000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; + +pub const ISIG: crate::tcflag_t = 0o000001; +pub const ICANON: crate::tcflag_t = 0o000002; +pub const XCASE: crate::tcflag_t = 0o000004; +pub const ECHOE: crate::tcflag_t = 0o000020; +pub const ECHOK: crate::tcflag_t = 0o000040; +pub const ECHONL: crate::tcflag_t = 0o000100; +pub const NOFLSH: crate::tcflag_t = 0o000200; +pub const ECHOCTL: crate::tcflag_t = 0o001000; +pub const ECHOPRT: crate::tcflag_t = 0o002000; +pub const ECHOKE: crate::tcflag_t = 0o004000; +pub const PENDIN: crate::tcflag_t = 0o040000; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const IXON: crate::tcflag_t = 0o002000; +pub const IXOFF: crate::tcflag_t = 0o010000; + +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_restart_syscall: c_long = 7; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execve: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_mount: c_long = 21; +pub const SYS_umount: c_long = 22; +pub const SYS_ptrace: c_long = 26; +pub const SYS_alarm: c_long = 27; +pub const SYS_pause: c_long = 29; +pub const SYS_utime: c_long = 30; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_rename: c_long = 38; +pub const SYS_mkdir: c_long = 39; +pub const SYS_rmdir: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_brk: c_long = 45; +pub const SYS_signal: c_long = 48; +pub const SYS_acct: c_long = 51; +pub const SYS_umount2: c_long = 52; +pub const SYS_ioctl: c_long = 54; +pub const SYS_fcntl: c_long = 55; +pub const SYS_setpgid: c_long = 57; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_ustat: c_long = 62; +pub const SYS_dup2: c_long = 63; +pub const SYS_getppid: c_long = 64; +pub const SYS_getpgrp: c_long = 65; +pub const SYS_setsid: c_long = 66; +pub const SYS_sigaction: c_long = 67; +pub const SYS_sigsuspend: c_long = 72; +pub const SYS_sigpending: c_long = 73; +pub const SYS_sethostname: c_long = 74; +pub const SYS_setrlimit: c_long = 75; +pub const SYS_getrusage: c_long = 77; +pub const SYS_gettimeofday: c_long = 78; +pub const SYS_settimeofday: c_long = 79; +pub const SYS_symlink: c_long = 83; +pub const SYS_readlink: c_long = 85; +pub const SYS_uselib: c_long = 86; +pub const SYS_swapon: c_long = 87; +pub const SYS_reboot: c_long = 88; +pub const SYS_readdir: c_long = 89; +pub const SYS_mmap: c_long = 90; +pub const SYS_munmap: c_long = 91; +pub const SYS_truncate: c_long = 92; +pub const SYS_ftruncate: c_long = 93; +pub const SYS_fchmod: c_long = 94; +pub const SYS_getpriority: c_long = 96; +pub const SYS_setpriority: c_long = 97; +pub const SYS_statfs: c_long = 99; +pub const SYS_fstatfs: c_long = 100; +pub const SYS_socketcall: c_long = 102; +pub const SYS_syslog: c_long = 103; +pub const SYS_setitimer: c_long = 104; +pub const SYS_getitimer: c_long = 105; +pub const SYS_stat: c_long = 106; +pub const SYS_lstat: c_long = 107; +pub const SYS_fstat: c_long = 108; +pub const SYS_lookup_dcookie: c_long = 110; +pub const SYS_vhangup: c_long = 111; +pub const SYS_idle: c_long = 112; +pub const SYS_wait4: c_long = 114; +pub const SYS_swapoff: c_long = 115; +pub const SYS_sysinfo: c_long = 116; +pub const SYS_ipc: c_long = 117; +pub const SYS_fsync: c_long = 118; +pub const SYS_sigreturn: c_long = 119; +pub const SYS_clone: c_long = 120; +pub const SYS_setdomainname: c_long = 121; +pub const SYS_uname: c_long = 122; +pub const SYS_adjtimex: c_long = 124; +pub const SYS_mprotect: c_long = 125; +pub const SYS_sigprocmask: c_long = 126; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 127; +pub const SYS_init_module: c_long = 128; +pub const SYS_delete_module: c_long = 129; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 130; +pub const SYS_quotactl: c_long = 131; +pub const SYS_getpgid: c_long = 132; +pub const SYS_fchdir: c_long = 133; +pub const SYS_bdflush: c_long = 134; +pub const SYS_sysfs: c_long = 135; +pub const SYS_personality: c_long = 136; +pub const SYS_afs_syscall: c_long = 137; /* Syscall for Andrew File System */ +pub const SYS_getdents: c_long = 141; +pub const SYS_select: c_long = 142; +pub const SYS_flock: c_long = 143; +pub const SYS_msync: c_long = 144; +pub const SYS_readv: c_long = 145; +pub const SYS_writev: c_long = 146; +pub const SYS_getsid: c_long = 147; +pub const SYS_fdatasync: c_long = 148; +pub const SYS__sysctl: c_long = 149; +pub const SYS_mlock: c_long = 150; +pub const SYS_munlock: c_long = 151; +pub const SYS_mlockall: c_long = 152; +pub const SYS_munlockall: c_long = 153; +pub const SYS_sched_setparam: c_long = 154; +pub const SYS_sched_getparam: c_long = 155; +pub const SYS_sched_setscheduler: c_long = 156; +pub const SYS_sched_getscheduler: c_long = 157; +pub const SYS_sched_yield: c_long = 158; +pub const SYS_sched_get_priority_max: c_long = 159; +pub const SYS_sched_get_priority_min: c_long = 160; +pub const SYS_sched_rr_get_interval: c_long = 161; +pub const SYS_nanosleep: c_long = 162; +pub const SYS_mremap: c_long = 163; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 167; +pub const SYS_poll: c_long = 168; +pub const SYS_nfsservctl: c_long = 169; +pub const SYS_prctl: c_long = 172; +pub const SYS_rt_sigreturn: c_long = 173; +pub const SYS_rt_sigaction: c_long = 174; +pub const SYS_rt_sigprocmask: c_long = 175; +pub const SYS_rt_sigpending: c_long = 176; +pub const SYS_rt_sigtimedwait: c_long = 177; +pub const SYS_rt_sigqueueinfo: c_long = 178; +pub const SYS_rt_sigsuspend: c_long = 179; +pub const SYS_pread64: c_long = 180; +pub const SYS_pwrite64: c_long = 181; +pub const SYS_getcwd: c_long = 183; +pub const SYS_capget: c_long = 184; +pub const SYS_capset: c_long = 185; +pub const SYS_sigaltstack: c_long = 186; +pub const SYS_sendfile: c_long = 187; +pub const SYS_getpmsg: c_long = 188; +pub const SYS_putpmsg: c_long = 189; +pub const SYS_vfork: c_long = 190; +pub const SYS_getrlimit: c_long = 191; +pub const SYS_lchown: c_long = 198; +pub const SYS_getuid: c_long = 199; +pub const SYS_getgid: c_long = 200; +pub const SYS_geteuid: c_long = 201; +pub const SYS_getegid: c_long = 202; +pub const SYS_setreuid: c_long = 203; +pub const SYS_setregid: c_long = 204; +pub const SYS_getgroups: c_long = 205; +pub const SYS_setgroups: c_long = 206; +pub const SYS_fchown: c_long = 207; +pub const SYS_setresuid: c_long = 208; +pub const SYS_getresuid: c_long = 209; +pub const SYS_setresgid: c_long = 210; +pub const SYS_getresgid: c_long = 211; +pub const SYS_chown: c_long = 212; +pub const SYS_setuid: c_long = 213; +pub const SYS_setgid: c_long = 214; +pub const SYS_setfsuid: c_long = 215; +pub const SYS_setfsgid: c_long = 216; +pub const SYS_pivot_root: c_long = 217; +pub const SYS_mincore: c_long = 218; +pub const SYS_madvise: c_long = 219; +pub const SYS_getdents64: c_long = 220; +pub const SYS_readahead: c_long = 222; +pub const SYS_setxattr: c_long = 224; +pub const SYS_lsetxattr: c_long = 225; +pub const SYS_fsetxattr: c_long = 226; +pub const SYS_getxattr: c_long = 227; +pub const SYS_lgetxattr: c_long = 228; +pub const SYS_fgetxattr: c_long = 229; +pub const SYS_listxattr: c_long = 230; +pub const SYS_llistxattr: c_long = 231; +pub const SYS_flistxattr: c_long = 232; +pub const SYS_removexattr: c_long = 233; +pub const SYS_lremovexattr: c_long = 234; +pub const SYS_fremovexattr: c_long = 235; +pub const SYS_gettid: c_long = 236; +pub const SYS_tkill: c_long = 237; +pub const SYS_futex: c_long = 238; +pub const SYS_sched_setaffinity: c_long = 239; +pub const SYS_sched_getaffinity: c_long = 240; +pub const SYS_tgkill: c_long = 241; +pub const SYS_io_setup: c_long = 243; +pub const SYS_io_destroy: c_long = 244; +pub const SYS_io_getevents: c_long = 245; +pub const SYS_io_submit: c_long = 246; +pub const SYS_io_cancel: c_long = 247; +pub const SYS_exit_group: c_long = 248; +pub const SYS_epoll_create: c_long = 249; +pub const SYS_epoll_ctl: c_long = 250; +pub const SYS_epoll_wait: c_long = 251; +pub const SYS_set_tid_address: c_long = 252; +pub const SYS_fadvise64: c_long = 253; +pub const SYS_timer_create: c_long = 254; +pub const SYS_timer_settime: c_long = 255; +pub const SYS_timer_gettime: c_long = 256; +pub const SYS_timer_getoverrun: c_long = 257; +pub const SYS_timer_delete: c_long = 258; +pub const SYS_clock_settime: c_long = 259; +pub const SYS_clock_gettime: c_long = 260; +pub const SYS_clock_getres: c_long = 261; +pub const SYS_clock_nanosleep: c_long = 262; +pub const SYS_statfs64: c_long = 265; +pub const SYS_fstatfs64: c_long = 266; +pub const SYS_remap_file_pages: c_long = 267; +pub const SYS_mbind: c_long = 268; +pub const SYS_get_mempolicy: c_long = 269; +pub const SYS_set_mempolicy: c_long = 270; +pub const SYS_mq_open: c_long = 271; +pub const SYS_mq_unlink: c_long = 272; +pub const SYS_mq_timedsend: c_long = 273; +pub const SYS_mq_timedreceive: c_long = 274; +pub const SYS_mq_notify: c_long = 275; +pub const SYS_mq_getsetattr: c_long = 276; +pub const SYS_kexec_load: c_long = 277; +pub const SYS_add_key: c_long = 278; +pub const SYS_request_key: c_long = 279; +pub const SYS_keyctl: c_long = 280; +pub const SYS_waitid: c_long = 281; +pub const SYS_ioprio_set: c_long = 282; +pub const SYS_ioprio_get: c_long = 283; +pub const SYS_inotify_init: c_long = 284; +pub const SYS_inotify_add_watch: c_long = 285; +pub const SYS_inotify_rm_watch: c_long = 286; +pub const SYS_migrate_pages: c_long = 287; +pub const SYS_openat: c_long = 288; +pub const SYS_mkdirat: c_long = 289; +pub const SYS_mknodat: c_long = 290; +pub const SYS_fchownat: c_long = 291; +pub const SYS_futimesat: c_long = 292; +pub const SYS_newfstatat: c_long = 293; +pub const SYS_unlinkat: c_long = 294; +pub const SYS_renameat: c_long = 295; +pub const SYS_linkat: c_long = 296; +pub const SYS_symlinkat: c_long = 297; +pub const SYS_readlinkat: c_long = 298; +pub const SYS_fchmodat: c_long = 299; +pub const SYS_faccessat: c_long = 300; +pub const SYS_pselect6: c_long = 301; +pub const SYS_ppoll: c_long = 302; +pub const SYS_unshare: c_long = 303; +pub const SYS_set_robust_list: c_long = 304; +pub const SYS_get_robust_list: c_long = 305; +pub const SYS_splice: c_long = 306; +pub const SYS_sync_file_range: c_long = 307; +pub const SYS_tee: c_long = 308; +pub const SYS_vmsplice: c_long = 309; +pub const SYS_move_pages: c_long = 310; +pub const SYS_getcpu: c_long = 311; +pub const SYS_epoll_pwait: c_long = 312; +pub const SYS_utimes: c_long = 313; +pub const SYS_fallocate: c_long = 314; +pub const SYS_utimensat: c_long = 315; +pub const SYS_signalfd: c_long = 316; +pub const SYS_timerfd: c_long = 317; +pub const SYS_eventfd: c_long = 318; +pub const SYS_timerfd_create: c_long = 319; +pub const SYS_timerfd_settime: c_long = 320; +pub const SYS_timerfd_gettime: c_long = 321; +pub const SYS_signalfd4: c_long = 322; +pub const SYS_eventfd2: c_long = 323; +pub const SYS_inotify_init1: c_long = 324; +pub const SYS_pipe2: c_long = 325; +pub const SYS_dup3: c_long = 326; +pub const SYS_epoll_create1: c_long = 327; +pub const SYS_preadv: c_long = 328; +pub const SYS_pwritev: c_long = 329; +pub const SYS_rt_tgsigqueueinfo: c_long = 330; +pub const SYS_perf_event_open: c_long = 331; +pub const SYS_fanotify_init: c_long = 332; +pub const SYS_fanotify_mark: c_long = 333; +pub const SYS_prlimit64: c_long = 334; +pub const SYS_name_to_handle_at: c_long = 335; +pub const SYS_open_by_handle_at: c_long = 336; +pub const SYS_clock_adjtime: c_long = 337; +pub const SYS_syncfs: c_long = 338; +pub const SYS_setns: c_long = 339; +pub const SYS_process_vm_readv: c_long = 340; +pub const SYS_process_vm_writev: c_long = 341; +pub const SYS_s390_runtime_instr: c_long = 342; +pub const SYS_kcmp: c_long = 343; +pub const SYS_finit_module: c_long = 344; +pub const SYS_sched_setattr: c_long = 345; +pub const SYS_sched_getattr: c_long = 346; +pub const SYS_renameat2: c_long = 347; +pub const SYS_seccomp: c_long = 348; +pub const SYS_getrandom: c_long = 349; +pub const SYS_memfd_create: c_long = 350; +pub const SYS_bpf: c_long = 351; +pub const SYS_s390_pci_mmio_write: c_long = 352; +pub const SYS_s390_pci_mmio_read: c_long = 353; +pub const SYS_execveat: c_long = 354; +pub const SYS_userfaultfd: c_long = 355; +pub const SYS_membarrier: c_long = 356; +pub const SYS_recvmmsg: c_long = 357; +pub const SYS_sendmmsg: c_long = 358; +pub const SYS_socket: c_long = 359; +pub const SYS_socketpair: c_long = 360; +pub const SYS_bind: c_long = 361; +pub const SYS_connect: c_long = 362; +pub const SYS_listen: c_long = 363; +pub const SYS_accept4: c_long = 364; +pub const SYS_getsockopt: c_long = 365; +pub const SYS_setsockopt: c_long = 366; +pub const SYS_getsockname: c_long = 367; +pub const SYS_getpeername: c_long = 368; +pub const SYS_sendto: c_long = 369; +pub const SYS_sendmsg: c_long = 370; +pub const SYS_recvfrom: c_long = 371; +pub const SYS_recvmsg: c_long = 372; +pub const SYS_shutdown: c_long = 373; +pub const SYS_mlock2: c_long = 374; +pub const SYS_copy_file_range: c_long = 375; +pub const SYS_preadv2: c_long = 376; +pub const SYS_pwritev2: c_long = 377; +pub const SYS_s390_guarded_storage: c_long = 378; +pub const SYS_statx: c_long = 379; +pub const SYS_s390_sthyi: c_long = 380; +pub const SYS_kexec_file_load: c_long = 381; +pub const SYS_io_pgetevents: c_long = 382; +pub const SYS_rseq: c_long = 383; +pub const SYS_pkey_mprotect: c_long = 384; +pub const SYS_pkey_alloc: c_long = 385; +pub const SYS_pkey_free: c_long = 386; +pub const SYS_semtimedop: c_long = 392; +pub const SYS_semget: c_long = 393; +pub const SYS_semctl: c_long = 394; +pub const SYS_shmget: c_long = 395; +pub const SYS_shmctl: c_long = 396; +pub const SYS_shmat: c_long = 397; +pub const SYS_shmdt: c_long = 398; +pub const SYS_msgget: c_long = 399; +pub const SYS_msgsnd: c_long = 400; +pub const SYS_msgrcv: c_long = 401; +pub const SYS_msgctl: c_long = 402; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; +pub const SYS_cachestat: c_long = 451; +pub const SYS_fchmodat2: c_long = 452; +pub const SYS_mseal: c_long = 462; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/mod.rs new file mode 100644 index 00000000000000..29750e79e17e65 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/mod.rs @@ -0,0 +1,688 @@ +//! Wasm32 definitions conforming to the WALI ABI. +//! The WALI ABI closely mirrors `x86_64` Linux and is thus implemented within the `b64` module as opposed to `b32` +use crate::off_t; +use crate::prelude::*; + +pub type wchar_t = i32; +pub type nlink_t = u64; +pub type blksize_t = c_long; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + __pad0: c_int, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_long; 3], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + __pad0: c_int, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __reserved: [c_long; 3], + } + + pub struct ipc_perm { + #[cfg(musl_v1_2_3)] + pub __key: crate::key_t, + #[cfg(not(musl_v1_2_3))] + #[deprecated( + since = "0.2.173", + note = "This field is incorrectly named and will be changed + to __key in a future release." + )] + pub __ipc_perm_key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + pub __seq: c_int, + __unused1: c_long, + __unused2: c_long, + } +} + +// Syscall table +pub const SYS_read: c_long = 0; +pub const SYS_write: c_long = 1; +pub const SYS_open: c_long = 2; +pub const SYS_close: c_long = 3; +pub const SYS_stat: c_long = 4; +pub const SYS_fstat: c_long = 5; +pub const SYS_lstat: c_long = 6; +pub const SYS_poll: c_long = 7; +pub const SYS_lseek: c_long = 8; +pub const SYS_mmap: c_long = 9; +pub const SYS_mprotect: c_long = 10; +pub const SYS_munmap: c_long = 11; +pub const SYS_brk: c_long = 12; +pub const SYS_rt_sigaction: c_long = 13; +pub const SYS_rt_sigprocmask: c_long = 14; +pub const SYS_rt_sigreturn: c_long = 15; +pub const SYS_ioctl: c_long = 16; +pub const SYS_pread64: c_long = 17; +pub const SYS_pwrite64: c_long = 18; +pub const SYS_readv: c_long = 19; +pub const SYS_writev: c_long = 20; +pub const SYS_access: c_long = 21; +pub const SYS_pipe: c_long = 22; +pub const SYS_select: c_long = 23; +pub const SYS_sched_yield: c_long = 24; +pub const SYS_mremap: c_long = 25; +pub const SYS_msync: c_long = 26; +pub const SYS_mincore: c_long = 27; +pub const SYS_madvise: c_long = 28; +pub const SYS_shmget: c_long = 29; +pub const SYS_shmat: c_long = 30; +pub const SYS_shmctl: c_long = 31; +pub const SYS_dup: c_long = 32; +pub const SYS_dup2: c_long = 33; +pub const SYS_pause: c_long = 34; +pub const SYS_nanosleep: c_long = 35; +pub const SYS_getitimer: c_long = 36; +pub const SYS_alarm: c_long = 37; +pub const SYS_setitimer: c_long = 38; +pub const SYS_getpid: c_long = 39; +pub const SYS_sendfile: c_long = 40; +pub const SYS_socket: c_long = 41; +pub const SYS_connect: c_long = 42; +pub const SYS_accept: c_long = 43; +pub const SYS_sendto: c_long = 44; +pub const SYS_recvfrom: c_long = 45; +pub const SYS_sendmsg: c_long = 46; +pub const SYS_recvmsg: c_long = 47; +pub const SYS_shutdown: c_long = 48; +pub const SYS_bind: c_long = 49; +pub const SYS_listen: c_long = 50; +pub const SYS_getsockname: c_long = 51; +pub const SYS_getpeername: c_long = 52; +pub const SYS_socketpair: c_long = 53; +pub const SYS_setsockopt: c_long = 54; +pub const SYS_getsockopt: c_long = 55; +pub const SYS_clone: c_long = 56; +pub const SYS_fork: c_long = 57; +pub const SYS_vfork: c_long = 58; +pub const SYS_execve: c_long = 59; +pub const SYS_exit: c_long = 60; +pub const SYS_wait4: c_long = 61; +pub const SYS_kill: c_long = 62; +pub const SYS_uname: c_long = 63; +pub const SYS_semget: c_long = 64; +pub const SYS_semop: c_long = 65; +pub const SYS_semctl: c_long = 66; +pub const SYS_shmdt: c_long = 67; +pub const SYS_msgget: c_long = 68; +pub const SYS_msgsnd: c_long = 69; +pub const SYS_msgrcv: c_long = 70; +pub const SYS_msgctl: c_long = 71; +pub const SYS_fcntl: c_long = 72; +pub const SYS_flock: c_long = 73; +pub const SYS_fsync: c_long = 74; +pub const SYS_fdatasync: c_long = 75; +pub const SYS_truncate: c_long = 76; +pub const SYS_ftruncate: c_long = 77; +pub const SYS_getdents: c_long = 78; +pub const SYS_getcwd: c_long = 79; +pub const SYS_chdir: c_long = 80; +pub const SYS_fchdir: c_long = 81; +pub const SYS_rename: c_long = 82; +pub const SYS_mkdir: c_long = 83; +pub const SYS_rmdir: c_long = 84; +pub const SYS_creat: c_long = 85; +pub const SYS_link: c_long = 86; +pub const SYS_unlink: c_long = 87; +pub const SYS_symlink: c_long = 88; +pub const SYS_readlink: c_long = 89; +pub const SYS_chmod: c_long = 90; +pub const SYS_fchmod: c_long = 91; +pub const SYS_chown: c_long = 92; +pub const SYS_fchown: c_long = 93; +pub const SYS_lchown: c_long = 94; +pub const SYS_umask: c_long = 95; +pub const SYS_gettimeofday: c_long = 96; +pub const SYS_getrlimit: c_long = 97; +pub const SYS_getrusage: c_long = 98; +pub const SYS_sysinfo: c_long = 99; +pub const SYS_times: c_long = 100; +pub const SYS_ptrace: c_long = 101; +pub const SYS_getuid: c_long = 102; +pub const SYS_syslog: c_long = 103; +pub const SYS_getgid: c_long = 104; +pub const SYS_setuid: c_long = 105; +pub const SYS_setgid: c_long = 106; +pub const SYS_geteuid: c_long = 107; +pub const SYS_getegid: c_long = 108; +pub const SYS_setpgid: c_long = 109; +pub const SYS_getppid: c_long = 110; +pub const SYS_getpgrp: c_long = 111; +pub const SYS_setsid: c_long = 112; +pub const SYS_setreuid: c_long = 113; +pub const SYS_setregid: c_long = 114; +pub const SYS_getgroups: c_long = 115; +pub const SYS_setgroups: c_long = 116; +pub const SYS_setresuid: c_long = 117; +pub const SYS_getresuid: c_long = 118; +pub const SYS_setresgid: c_long = 119; +pub const SYS_getresgid: c_long = 120; +pub const SYS_getpgid: c_long = 121; +pub const SYS_setfsuid: c_long = 122; +pub const SYS_setfsgid: c_long = 123; +pub const SYS_getsid: c_long = 124; +pub const SYS_capget: c_long = 125; +pub const SYS_capset: c_long = 126; +pub const SYS_rt_sigpending: c_long = 127; +pub const SYS_rt_sigtimedwait: c_long = 128; +pub const SYS_rt_sigqueueinfo: c_long = 129; +pub const SYS_rt_sigsuspend: c_long = 130; +pub const SYS_sigaltstack: c_long = 131; +pub const SYS_utime: c_long = 132; +pub const SYS_mknod: c_long = 133; +pub const SYS_uselib: c_long = 134; +pub const SYS_personality: c_long = 135; +pub const SYS_ustat: c_long = 136; +pub const SYS_statfs: c_long = 137; +pub const SYS_fstatfs: c_long = 138; +pub const SYS_sysfs: c_long = 139; +pub const SYS_getpriority: c_long = 140; +pub const SYS_setpriority: c_long = 141; +pub const SYS_sched_setparam: c_long = 142; +pub const SYS_sched_getparam: c_long = 143; +pub const SYS_sched_setscheduler: c_long = 144; +pub const SYS_sched_getscheduler: c_long = 145; +pub const SYS_sched_get_priority_max: c_long = 146; +pub const SYS_sched_get_priority_min: c_long = 147; +pub const SYS_sched_rr_get_interval: c_long = 148; +pub const SYS_mlock: c_long = 149; +pub const SYS_munlock: c_long = 150; +pub const SYS_mlockall: c_long = 151; +pub const SYS_munlockall: c_long = 152; +pub const SYS_vhangup: c_long = 153; +pub const SYS_modify_ldt: c_long = 154; +pub const SYS_pivot_root: c_long = 155; +pub const SYS__sysctl: c_long = 156; +pub const SYS_prctl: c_long = 157; +pub const SYS_arch_prctl: c_long = 158; +pub const SYS_adjtimex: c_long = 159; +pub const SYS_setrlimit: c_long = 160; +pub const SYS_chroot: c_long = 161; +pub const SYS_sync: c_long = 162; +pub const SYS_acct: c_long = 163; +pub const SYS_settimeofday: c_long = 164; +pub const SYS_mount: c_long = 165; +pub const SYS_umount2: c_long = 166; +pub const SYS_swapon: c_long = 167; +pub const SYS_swapoff: c_long = 168; +pub const SYS_reboot: c_long = 169; +pub const SYS_sethostname: c_long = 170; +pub const SYS_setdomainname: c_long = 171; +pub const SYS_iopl: c_long = 172; +pub const SYS_ioperm: c_long = 173; +pub const SYS_create_module: c_long = 174; +pub const SYS_init_module: c_long = 175; +pub const SYS_delete_module: c_long = 176; +pub const SYS_get_kernel_syms: c_long = 177; +pub const SYS_query_module: c_long = 178; +pub const SYS_quotactl: c_long = 179; +pub const SYS_nfsservctl: c_long = 180; +pub const SYS_getpmsg: c_long = 181; +pub const SYS_putpmsg: c_long = 182; +pub const SYS_afs_syscall: c_long = 183; +pub const SYS_tuxcall: c_long = 184; +pub const SYS_security: c_long = 185; +pub const SYS_gettid: c_long = 186; +pub const SYS_readahead: c_long = 187; +pub const SYS_setxattr: c_long = 188; +pub const SYS_lsetxattr: c_long = 189; +pub const SYS_fsetxattr: c_long = 190; +pub const SYS_getxattr: c_long = 191; +pub const SYS_lgetxattr: c_long = 192; +pub const SYS_fgetxattr: c_long = 193; +pub const SYS_listxattr: c_long = 194; +pub const SYS_llistxattr: c_long = 195; +pub const SYS_flistxattr: c_long = 196; +pub const SYS_removexattr: c_long = 197; +pub const SYS_lremovexattr: c_long = 198; +pub const SYS_fremovexattr: c_long = 199; +pub const SYS_tkill: c_long = 200; +pub const SYS_time: c_long = 201; +pub const SYS_futex: c_long = 202; +pub const SYS_sched_setaffinity: c_long = 203; +pub const SYS_sched_getaffinity: c_long = 204; +pub const SYS_set_thread_area: c_long = 205; +pub const SYS_io_setup: c_long = 206; +pub const SYS_io_destroy: c_long = 207; +pub const SYS_io_getevents: c_long = 208; +pub const SYS_io_submit: c_long = 209; +pub const SYS_io_cancel: c_long = 210; +pub const SYS_get_thread_area: c_long = 211; +pub const SYS_lookup_dcookie: c_long = 212; +pub const SYS_epoll_create: c_long = 213; +pub const SYS_epoll_ctl_old: c_long = 214; +pub const SYS_epoll_wait_old: c_long = 215; +pub const SYS_remap_file_pages: c_long = 216; +pub const SYS_getdents64: c_long = 217; +pub const SYS_set_tid_address: c_long = 218; +pub const SYS_restart_syscall: c_long = 219; +pub const SYS_semtimedop: c_long = 220; +pub const SYS_fadvise64: c_long = 221; +pub const SYS_timer_create: c_long = 222; +pub const SYS_timer_settime: c_long = 223; +pub const SYS_timer_gettime: c_long = 224; +pub const SYS_timer_getoverrun: c_long = 225; +pub const SYS_timer_delete: c_long = 226; +pub const SYS_clock_settime: c_long = 227; +pub const SYS_clock_gettime: c_long = 228; +pub const SYS_clock_getres: c_long = 229; +pub const SYS_clock_nanosleep: c_long = 230; +pub const SYS_exit_group: c_long = 231; +pub const SYS_epoll_wait: c_long = 232; +pub const SYS_epoll_ctl: c_long = 233; +pub const SYS_tgkill: c_long = 234; +pub const SYS_utimes: c_long = 235; +pub const SYS_vserver: c_long = 236; +pub const SYS_mbind: c_long = 237; +pub const SYS_set_mempolicy: c_long = 238; +pub const SYS_get_mempolicy: c_long = 239; +pub const SYS_mq_open: c_long = 240; +pub const SYS_mq_unlink: c_long = 241; +pub const SYS_mq_timedsend: c_long = 242; +pub const SYS_mq_timedreceive: c_long = 243; +pub const SYS_mq_notify: c_long = 244; +pub const SYS_mq_getsetattr: c_long = 245; +pub const SYS_kexec_load: c_long = 246; +pub const SYS_waitid: c_long = 247; +pub const SYS_add_key: c_long = 248; +pub const SYS_request_key: c_long = 249; +pub const SYS_keyctl: c_long = 250; +pub const SYS_ioprio_set: c_long = 251; +pub const SYS_ioprio_get: c_long = 252; +pub const SYS_inotify_init: c_long = 253; +pub const SYS_inotify_add_watch: c_long = 254; +pub const SYS_inotify_rm_watch: c_long = 255; +pub const SYS_migrate_pages: c_long = 256; +pub const SYS_openat: c_long = 257; +pub const SYS_mkdirat: c_long = 258; +pub const SYS_mknodat: c_long = 259; +pub const SYS_fchownat: c_long = 260; +pub const SYS_futimesat: c_long = 261; +pub const SYS_newfstatat: c_long = 262; +pub const SYS_unlinkat: c_long = 263; +pub const SYS_renameat: c_long = 264; +pub const SYS_linkat: c_long = 265; +pub const SYS_symlinkat: c_long = 266; +pub const SYS_readlinkat: c_long = 267; +pub const SYS_fchmodat: c_long = 268; +pub const SYS_faccessat: c_long = 269; +pub const SYS_pselect6: c_long = 270; +pub const SYS_ppoll: c_long = 271; +pub const SYS_unshare: c_long = 272; +pub const SYS_set_robust_list: c_long = 273; +pub const SYS_get_robust_list: c_long = 274; +pub const SYS_splice: c_long = 275; +pub const SYS_tee: c_long = 276; +pub const SYS_sync_file_range: c_long = 277; +pub const SYS_vmsplice: c_long = 278; +pub const SYS_move_pages: c_long = 279; +pub const SYS_utimensat: c_long = 280; +pub const SYS_epoll_pwait: c_long = 281; +pub const SYS_signalfd: c_long = 282; +pub const SYS_timerfd_create: c_long = 283; +pub const SYS_eventfd: c_long = 284; +pub const SYS_fallocate: c_long = 285; +pub const SYS_timerfd_settime: c_long = 286; +pub const SYS_timerfd_gettime: c_long = 287; +pub const SYS_accept4: c_long = 288; +pub const SYS_signalfd4: c_long = 289; +pub const SYS_eventfd2: c_long = 290; +pub const SYS_epoll_create1: c_long = 291; +pub const SYS_dup3: c_long = 292; +pub const SYS_pipe2: c_long = 293; +pub const SYS_inotify_init1: c_long = 294; +pub const SYS_preadv: c_long = 295; +pub const SYS_pwritev: c_long = 296; +pub const SYS_rt_tgsigqueueinfo: c_long = 297; +pub const SYS_perf_event_open: c_long = 298; +pub const SYS_recvmmsg: c_long = 299; +pub const SYS_fanotify_init: c_long = 300; +pub const SYS_fanotify_mark: c_long = 301; +pub const SYS_prlimit64: c_long = 302; +pub const SYS_name_to_handle_at: c_long = 303; +pub const SYS_open_by_handle_at: c_long = 304; +pub const SYS_clock_adjtime: c_long = 305; +pub const SYS_syncfs: c_long = 306; +pub const SYS_sendmmsg: c_long = 307; +pub const SYS_setns: c_long = 308; +pub const SYS_getcpu: c_long = 309; +pub const SYS_process_vm_readv: c_long = 310; +pub const SYS_process_vm_writev: c_long = 311; +pub const SYS_kcmp: c_long = 312; +pub const SYS_finit_module: c_long = 313; +pub const SYS_sched_setattr: c_long = 314; +pub const SYS_sched_getattr: c_long = 315; +pub const SYS_renameat2: c_long = 316; +pub const SYS_seccomp: c_long = 317; +pub const SYS_getrandom: c_long = 318; +pub const SYS_memfd_create: c_long = 319; +pub const SYS_kexec_file_load: c_long = 320; +pub const SYS_bpf: c_long = 321; +pub const SYS_execveat: c_long = 322; +pub const SYS_userfaultfd: c_long = 323; +pub const SYS_membarrier: c_long = 324; +pub const SYS_mlock2: c_long = 325; +pub const SYS_copy_file_range: c_long = 326; +pub const SYS_preadv2: c_long = 327; +pub const SYS_pwritev2: c_long = 328; +pub const SYS_pkey_mprotect: c_long = 329; +pub const SYS_pkey_alloc: c_long = 330; +pub const SYS_pkey_free: c_long = 331; +pub const SYS_statx: c_long = 332; +pub const SYS_io_pgetevents: c_long = 333; +pub const SYS_rseq: c_long = 334; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; + +// Syscall aliases for WALI +pub const SYS_fadvise: c_long = SYS_fadvise64; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const O_APPEND: c_int = 1024; +pub const O_DIRECT: c_int = 0x4000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_LARGEFILE: c_int = 0; +pub const O_NOFOLLOW: c_int = 0x20000; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_ASYNC: c_int = 0x2000; + +pub const PTRACE_SYSEMU: c_int = 31; +pub const PTRACE_SYSEMU_SINGLESTEP: c_int = 32; + +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; + +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EBADMSG: c_int = 74; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const ERFKILL: c_int = 132; +pub const EHWPOISON: c_int = 133; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_SETOWN: c_int = 8; + +pub const VEOF: usize = 4; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_SYNC: c_int = 0x080000; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: c_int = 0x00000800; +pub const TAB2: c_int = 0x00001000; +pub const TAB3: c_int = 0x00001800; +pub const CR1: c_int = 0x00000200; +pub const CR2: c_int = 0x00000400; +pub const CR3: c_int = 0x00000600; +pub const FF1: c_int = 0x00008000; +pub const BS1: c_int = 0x00002000; +pub const VT1: c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const EDEADLK: c_int = 35; +pub const EDEADLOCK: c_int = EDEADLK; + +pub const EXTPROC: crate::tcflag_t = 0x00010000; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; + +cfg_if! { + if #[cfg(target_vendor = "wali")] { + mod wali; + pub use self::wali::*; + } +} diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/wali.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/wali.rs new file mode 100644 index 00000000000000..bda5c241c1d2d9 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/wali.rs @@ -0,0 +1,441 @@ +//! WebAssembly Linux Interface syscall specification + +// --- Autogenerated from WALI/scripts/autogen.py --- +#[link(wasm_import_module = "wali")] +extern "C" { + /* 0 */ + #[link_name = "SYS_read"] + pub fn __syscall_SYS_read(a1: i32, a2: i32, a3: u32) -> ::c_long; + /* 1 */ + #[link_name = "SYS_write"] + pub fn __syscall_SYS_write(a1: i32, a2: i32, a3: u32) -> ::c_long; + /* 2 */ + #[link_name = "SYS_open"] + pub fn __syscall_SYS_open(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 3 */ + #[link_name = "SYS_close"] + pub fn __syscall_SYS_close(a1: i32) -> ::c_long; + /* 4 */ + #[link_name = "SYS_stat"] + pub fn __syscall_SYS_stat(a1: i32, a2: i32) -> ::c_long; + /* 5 */ + #[link_name = "SYS_fstat"] + pub fn __syscall_SYS_fstat(a1: i32, a2: i32) -> ::c_long; + /* 6 */ + #[link_name = "SYS_lstat"] + pub fn __syscall_SYS_lstat(a1: i32, a2: i32) -> ::c_long; + /* 7 */ + #[link_name = "SYS_poll"] + pub fn __syscall_SYS_poll(a1: i32, a2: u32, a3: i32) -> ::c_long; + /* 8 */ + #[link_name = "SYS_lseek"] + pub fn __syscall_SYS_lseek(a1: i32, a2: i64, a3: i32) -> ::c_long; + /* 9 */ + #[link_name = "SYS_mmap"] + pub fn __syscall_SYS_mmap(a1: i32, a2: u32, a3: i32, a4: i32, a5: i32, a6: i64) -> ::c_long; + /* 10 */ + #[link_name = "SYS_mprotect"] + pub fn __syscall_SYS_mprotect(a1: i32, a2: u32, a3: i32) -> ::c_long; + /* 11 */ + #[link_name = "SYS_munmap"] + pub fn __syscall_SYS_munmap(a1: i32, a2: u32) -> ::c_long; + /* 12 */ + #[link_name = "SYS_brk"] + pub fn __syscall_SYS_brk(a1: i32) -> ::c_long; + /* 13 */ + #[link_name = "SYS_rt_sigaction"] + pub fn __syscall_SYS_rt_sigaction(a1: i32, a2: i32, a3: i32, a4: u32) -> ::c_long; + /* 14 */ + #[link_name = "SYS_rt_sigprocmask"] + pub fn __syscall_SYS_rt_sigprocmask(a1: i32, a2: i32, a3: i32, a4: u32) -> ::c_long; + /* 15 */ + #[link_name = "SYS_rt_sigreturn"] + pub fn __syscall_SYS_rt_sigreturn(a1: i64) -> ::c_long; + /* 16 */ + #[link_name = "SYS_ioctl"] + pub fn __syscall_SYS_ioctl(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 17 */ + #[link_name = "SYS_pread64"] + pub fn __syscall_SYS_pread64(a1: i32, a2: i32, a3: u32, a4: i64) -> ::c_long; + /* 18 */ + #[link_name = "SYS_pwrite64"] + pub fn __syscall_SYS_pwrite64(a1: i32, a2: i32, a3: u32, a4: i64) -> ::c_long; + /* 19 */ + #[link_name = "SYS_readv"] + pub fn __syscall_SYS_readv(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 20 */ + #[link_name = "SYS_writev"] + pub fn __syscall_SYS_writev(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 21 */ + #[link_name = "SYS_access"] + pub fn __syscall_SYS_access(a1: i32, a2: i32) -> ::c_long; + /* 22 */ + #[link_name = "SYS_pipe"] + pub fn __syscall_SYS_pipe(a1: i32) -> ::c_long; + /* 23 */ + #[link_name = "SYS_select"] + pub fn __syscall_SYS_select(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32) -> ::c_long; + /* 24 */ + #[link_name = "SYS_sched_yield"] + pub fn __syscall_SYS_sched_yield() -> ::c_long; + /* 25 */ + #[link_name = "SYS_mremap"] + pub fn __syscall_SYS_mremap(a1: i32, a2: u32, a3: u32, a4: i32, a5: i32) -> ::c_long; + /* 26 */ + #[link_name = "SYS_msync"] + pub fn __syscall_SYS_msync(a1: i32, a2: u32, a3: i32) -> ::c_long; + /* 28 */ + #[link_name = "SYS_madvise"] + pub fn __syscall_SYS_madvise(a1: i32, a2: u32, a3: i32) -> ::c_long; + /* 32 */ + #[link_name = "SYS_dup"] + pub fn __syscall_SYS_dup(a1: i32) -> ::c_long; + /* 33 */ + #[link_name = "SYS_dup2"] + pub fn __syscall_SYS_dup2(a1: i32, a2: i32) -> ::c_long; + /* 35 */ + #[link_name = "SYS_nanosleep"] + pub fn __syscall_SYS_nanosleep(a1: i32, a2: i32) -> ::c_long; + /* 37 */ + #[link_name = "SYS_alarm"] + pub fn __syscall_SYS_alarm(a1: i32) -> ::c_long; + /* 38 */ + #[link_name = "SYS_setitimer"] + pub fn __syscall_SYS_setitimer(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 39 */ + #[link_name = "SYS_getpid"] + pub fn __syscall_SYS_getpid() -> ::c_long; + /* 41 */ + #[link_name = "SYS_socket"] + pub fn __syscall_SYS_socket(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 42 */ + #[link_name = "SYS_connect"] + pub fn __syscall_SYS_connect(a1: i32, a2: i32, a3: u32) -> ::c_long; + /* 43 */ + #[link_name = "SYS_accept"] + pub fn __syscall_SYS_accept(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 44 */ + #[link_name = "SYS_sendto"] + pub fn __syscall_SYS_sendto(a1: i32, a2: i32, a3: u32, a4: i32, a5: i32, a6: u32) -> ::c_long; + /* 45 */ + #[link_name = "SYS_recvfrom"] + pub fn __syscall_SYS_recvfrom(a1: i32, a2: i32, a3: u32, a4: i32, a5: i32, a6: i32) + -> ::c_long; + /* 46 */ + #[link_name = "SYS_sendmsg"] + pub fn __syscall_SYS_sendmsg(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 47 */ + #[link_name = "SYS_recvmsg"] + pub fn __syscall_SYS_recvmsg(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 48 */ + #[link_name = "SYS_shutdown"] + pub fn __syscall_SYS_shutdown(a1: i32, a2: i32) -> ::c_long; + /* 49 */ + #[link_name = "SYS_bind"] + pub fn __syscall_SYS_bind(a1: i32, a2: i32, a3: u32) -> ::c_long; + /* 50 */ + #[link_name = "SYS_listen"] + pub fn __syscall_SYS_listen(a1: i32, a2: i32) -> ::c_long; + /* 51 */ + #[link_name = "SYS_getsockname"] + pub fn __syscall_SYS_getsockname(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 52 */ + #[link_name = "SYS_getpeername"] + pub fn __syscall_SYS_getpeername(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 53 */ + #[link_name = "SYS_socketpair"] + pub fn __syscall_SYS_socketpair(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; + /* 54 */ + #[link_name = "SYS_setsockopt"] + pub fn __syscall_SYS_setsockopt(a1: i32, a2: i32, a3: i32, a4: i32, a5: u32) -> ::c_long; + /* 55 */ + #[link_name = "SYS_getsockopt"] + pub fn __syscall_SYS_getsockopt(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32) -> ::c_long; + /* 57 */ + #[link_name = "SYS_fork"] + pub fn __syscall_SYS_fork() -> ::c_long; + /* 59 */ + #[link_name = "SYS_execve"] + pub fn __syscall_SYS_execve(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 60 */ + #[link_name = "SYS_exit"] + pub fn __syscall_SYS_exit(a1: i32) -> ::c_long; + /* 61 */ + #[link_name = "SYS_wait4"] + pub fn __syscall_SYS_wait4(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; + /* 62 */ + #[link_name = "SYS_kill"] + pub fn __syscall_SYS_kill(a1: i32, a2: i32) -> ::c_long; + /* 63 */ + #[link_name = "SYS_uname"] + pub fn __syscall_SYS_uname(a1: i32) -> ::c_long; + /* 72 */ + #[link_name = "SYS_fcntl"] + pub fn __syscall_SYS_fcntl(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 73 */ + #[link_name = "SYS_flock"] + pub fn __syscall_SYS_flock(a1: i32, a2: i32) -> ::c_long; + /* 74 */ + #[link_name = "SYS_fsync"] + pub fn __syscall_SYS_fsync(a1: i32) -> ::c_long; + /* 75 */ + #[link_name = "SYS_fdatasync"] + pub fn __syscall_SYS_fdatasync(a1: i32) -> ::c_long; + /* 77 */ + #[link_name = "SYS_ftruncate"] + pub fn __syscall_SYS_ftruncate(a1: i32, a2: i64) -> ::c_long; + /* 78 */ + #[link_name = "SYS_getdents"] + pub fn __syscall_SYS_getdents(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 79 */ + #[link_name = "SYS_getcwd"] + pub fn __syscall_SYS_getcwd(a1: i32, a2: u32) -> ::c_long; + /* 80 */ + #[link_name = "SYS_chdir"] + pub fn __syscall_SYS_chdir(a1: i32) -> ::c_long; + /* 81 */ + #[link_name = "SYS_fchdir"] + pub fn __syscall_SYS_fchdir(a1: i32) -> ::c_long; + /* 82 */ + #[link_name = "SYS_rename"] + pub fn __syscall_SYS_rename(a1: i32, a2: i32) -> ::c_long; + /* 83 */ + #[link_name = "SYS_mkdir"] + pub fn __syscall_SYS_mkdir(a1: i32, a2: i32) -> ::c_long; + /* 84 */ + #[link_name = "SYS_rmdir"] + pub fn __syscall_SYS_rmdir(a1: i32) -> ::c_long; + /* 86 */ + #[link_name = "SYS_link"] + pub fn __syscall_SYS_link(a1: i32, a2: i32) -> ::c_long; + /* 87 */ + #[link_name = "SYS_unlink"] + pub fn __syscall_SYS_unlink(a1: i32) -> ::c_long; + /* 88 */ + #[link_name = "SYS_symlink"] + pub fn __syscall_SYS_symlink(a1: i32, a2: i32) -> ::c_long; + /* 89 */ + #[link_name = "SYS_readlink"] + pub fn __syscall_SYS_readlink(a1: i32, a2: i32, a3: u32) -> ::c_long; + /* 90 */ + #[link_name = "SYS_chmod"] + pub fn __syscall_SYS_chmod(a1: i32, a2: i32) -> ::c_long; + /* 91 */ + #[link_name = "SYS_fchmod"] + pub fn __syscall_SYS_fchmod(a1: i32, a2: i32) -> ::c_long; + /* 92 */ + #[link_name = "SYS_chown"] + pub fn __syscall_SYS_chown(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 93 */ + #[link_name = "SYS_fchown"] + pub fn __syscall_SYS_fchown(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 95 */ + #[link_name = "SYS_umask"] + pub fn __syscall_SYS_umask(a1: i32) -> ::c_long; + /* 97 */ + #[link_name = "SYS_getrlimit"] + pub fn __syscall_SYS_getrlimit(a1: i32, a2: i32) -> ::c_long; + /* 98 */ + #[link_name = "SYS_getrusage"] + pub fn __syscall_SYS_getrusage(a1: i32, a2: i32) -> ::c_long; + /* 99 */ + #[link_name = "SYS_sysinfo"] + pub fn __syscall_SYS_sysinfo(a1: i32) -> ::c_long; + /* 102 */ + #[link_name = "SYS_getuid"] + pub fn __syscall_SYS_getuid() -> ::c_long; + /* 104 */ + #[link_name = "SYS_getgid"] + pub fn __syscall_SYS_getgid() -> ::c_long; + /* 105 */ + #[link_name = "SYS_setuid"] + pub fn __syscall_SYS_setuid(a1: i32) -> ::c_long; + /* 106 */ + #[link_name = "SYS_setgid"] + pub fn __syscall_SYS_setgid(a1: i32) -> ::c_long; + /* 107 */ + #[link_name = "SYS_geteuid"] + pub fn __syscall_SYS_geteuid() -> ::c_long; + /* 108 */ + #[link_name = "SYS_getegid"] + pub fn __syscall_SYS_getegid() -> ::c_long; + /* 109 */ + #[link_name = "SYS_setpgid"] + pub fn __syscall_SYS_setpgid(a1: i32, a2: i32) -> ::c_long; + /* 110 */ + #[link_name = "SYS_getppid"] + pub fn __syscall_SYS_getppid() -> ::c_long; + /* 112 */ + #[link_name = "SYS_setsid"] + pub fn __syscall_SYS_setsid() -> ::c_long; + /* 113 */ + #[link_name = "SYS_setreuid"] + pub fn __syscall_SYS_setreuid(a1: i32, a2: i32) -> ::c_long; + /* 114 */ + #[link_name = "SYS_setregid"] + pub fn __syscall_SYS_setregid(a1: i32, a2: i32) -> ::c_long; + /* 115 */ + #[link_name = "SYS_getgroups"] + pub fn __syscall_SYS_getgroups(a1: u32, a2: i32) -> ::c_long; + /* 116 */ + #[link_name = "SYS_setgroups"] + pub fn __syscall_SYS_setgroups(a1: u32, a2: i32) -> ::c_long; + /* 117 */ + #[link_name = "SYS_setresuid"] + pub fn __syscall_SYS_setresuid(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 119 */ + #[link_name = "SYS_setresgid"] + pub fn __syscall_SYS_setresgid(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 121 */ + #[link_name = "SYS_getpgid"] + pub fn __syscall_SYS_getpgid(a1: i32) -> ::c_long; + /* 124 */ + #[link_name = "SYS_getsid"] + pub fn __syscall_SYS_getsid(a1: i32) -> ::c_long; + /* 127 */ + #[link_name = "SYS_rt_sigpending"] + pub fn __syscall_SYS_rt_sigpending(a1: i32, a2: u32) -> ::c_long; + /* 130 */ + #[link_name = "SYS_rt_sigsuspend"] + pub fn __syscall_SYS_rt_sigsuspend(a1: i32, a2: u32) -> ::c_long; + /* 131 */ + #[link_name = "SYS_sigaltstack"] + pub fn __syscall_SYS_sigaltstack(a1: i32, a2: i32) -> ::c_long; + /* 132 */ + #[link_name = "SYS_utime"] + pub fn __syscall_SYS_utime(a1: i32, a2: i32) -> ::c_long; + /* 137 */ + #[link_name = "SYS_statfs"] + pub fn __syscall_SYS_statfs(a1: i32, a2: i32) -> ::c_long; + /* 138 */ + #[link_name = "SYS_fstatfs"] + pub fn __syscall_SYS_fstatfs(a1: i32, a2: i32) -> ::c_long; + /* 157 */ + #[link_name = "SYS_prctl"] + pub fn __syscall_SYS_prctl(a1: i32, a2: u64, a3: u64, a4: u64, a5: u64) -> ::c_long; + /* 160 */ + #[link_name = "SYS_setrlimit"] + pub fn __syscall_SYS_setrlimit(a1: i32, a2: i32) -> ::c_long; + /* 161 */ + #[link_name = "SYS_chroot"] + pub fn __syscall_SYS_chroot(a1: i32) -> ::c_long; + /* 186 */ + #[link_name = "SYS_gettid"] + pub fn __syscall_SYS_gettid() -> ::c_long; + /* 200 */ + #[link_name = "SYS_tkill"] + pub fn __syscall_SYS_tkill(a1: i32, a2: i32) -> ::c_long; + /* 202 */ + #[link_name = "SYS_futex"] + pub fn __syscall_SYS_futex(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32, a6: i32) -> ::c_long; + /* 204 */ + #[link_name = "SYS_sched_getaffinity"] + pub fn __syscall_SYS_sched_getaffinity(a1: i32, a2: u32, a3: i32) -> ::c_long; + /* 217 */ + #[link_name = "SYS_getdents64"] + pub fn __syscall_SYS_getdents64(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 218 */ + #[link_name = "SYS_set_tid_address"] + pub fn __syscall_SYS_set_tid_address(a1: i32) -> ::c_long; + /* 221 */ + #[link_name = "SYS_fadvise"] + pub fn __syscall_SYS_fadvise(a1: i32, a2: i64, a3: i64, a4: i32) -> ::c_long; + /* 228 */ + #[link_name = "SYS_clock_gettime"] + pub fn __syscall_SYS_clock_gettime(a1: i32, a2: i32) -> ::c_long; + /* 229 */ + #[link_name = "SYS_clock_getres"] + pub fn __syscall_SYS_clock_getres(a1: i32, a2: i32) -> ::c_long; + /* 230 */ + #[link_name = "SYS_clock_nanosleep"] + pub fn __syscall_SYS_clock_nanosleep(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; + /* 231 */ + #[link_name = "SYS_exit_group"] + pub fn __syscall_SYS_exit_group(a1: i32) -> ::c_long; + /* 233 */ + #[link_name = "SYS_epoll_ctl"] + pub fn __syscall_SYS_epoll_ctl(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; + /* 257 */ + #[link_name = "SYS_openat"] + pub fn __syscall_SYS_openat(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; + /* 258 */ + #[link_name = "SYS_mkdirat"] + pub fn __syscall_SYS_mkdirat(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 260 */ + #[link_name = "SYS_fchownat"] + pub fn __syscall_SYS_fchownat(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32) -> ::c_long; + /* 262 */ + #[link_name = "SYS_fstatat"] + pub fn __syscall_SYS_fstatat(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; + /* 263 */ + #[link_name = "SYS_unlinkat"] + pub fn __syscall_SYS_unlinkat(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 265 */ + #[link_name = "SYS_linkat"] + pub fn __syscall_SYS_linkat(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32) -> ::c_long; + /* 266 */ + #[link_name = "SYS_symlinkat"] + pub fn __syscall_SYS_symlinkat(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 267 */ + #[link_name = "SYS_readlinkat"] + pub fn __syscall_SYS_readlinkat(a1: i32, a2: i32, a3: i32, a4: u32) -> ::c_long; + /* 268 */ + #[link_name = "SYS_fchmodat"] + pub fn __syscall_SYS_fchmodat(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; + /* 269 */ + #[link_name = "SYS_faccessat"] + pub fn __syscall_SYS_faccessat(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; + /* 270 */ + #[link_name = "SYS_pselect6"] + pub fn __syscall_SYS_pselect6(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32, a6: i32) + -> ::c_long; + /* 271 */ + #[link_name = "SYS_ppoll"] + pub fn __syscall_SYS_ppoll(a1: i32, a2: u32, a3: i32, a4: i32, a5: u32) -> ::c_long; + /* 280 */ + #[link_name = "SYS_utimensat"] + pub fn __syscall_SYS_utimensat(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; + /* 281 */ + #[link_name = "SYS_epoll_pwait"] + pub fn __syscall_SYS_epoll_pwait( + a1: i32, + a2: i32, + a3: i32, + a4: i32, + a5: i32, + a6: u32, + ) -> ::c_long; + /* 284 */ + #[link_name = "SYS_eventfd"] + pub fn __syscall_SYS_eventfd(a1: i32) -> ::c_long; + /* 288 */ + #[link_name = "SYS_accept4"] + pub fn __syscall_SYS_accept4(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; + /* 290 */ + #[link_name = "SYS_eventfd2"] + pub fn __syscall_SYS_eventfd2(a1: i32, a2: i32) -> ::c_long; + /* 291 */ + #[link_name = "SYS_epoll_create1"] + pub fn __syscall_SYS_epoll_create1(a1: i32) -> ::c_long; + /* 292 */ + #[link_name = "SYS_dup3"] + pub fn __syscall_SYS_dup3(a1: i32, a2: i32, a3: i32) -> ::c_long; + /* 293 */ + #[link_name = "SYS_pipe2"] + pub fn __syscall_SYS_pipe2(a1: i32, a2: i32) -> ::c_long; + /* 302 */ + #[link_name = "SYS_prlimit64"] + pub fn __syscall_SYS_prlimit64(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; + /* 316 */ + #[link_name = "SYS_renameat2"] + pub fn __syscall_SYS_renameat2(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32) -> ::c_long; + /* 318 */ + #[link_name = "SYS_getrandom"] + pub fn __syscall_SYS_getrandom(a1: i32, a2: u32, a3: i32) -> ::c_long; + /* 332 */ + #[link_name = "SYS_statx"] + pub fn __syscall_SYS_statx(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32) -> ::c_long; + /* 439 */ + #[link_name = "SYS_faccessat2"] + pub fn __syscall_SYS_faccessat2(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; +} diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/x86_64/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/x86_64/mod.rs new file mode 100644 index 00000000000000..ce8319f015e975 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/b64/x86_64/mod.rs @@ -0,0 +1,915 @@ +use crate::off_t; +use crate::prelude::*; + +pub type wchar_t = i32; +pub type nlink_t = u64; +pub type blksize_t = c_long; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; +pub type greg_t = i64; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + __pad0: c_int, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused: [c_long; 3], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino64_t, + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + __pad0: c_int, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __reserved: [c_long; 3], + } + + pub struct user_regs_struct { + pub r15: c_ulong, + pub r14: c_ulong, + pub r13: c_ulong, + pub r12: c_ulong, + pub rbp: c_ulong, + pub rbx: c_ulong, + pub r11: c_ulong, + pub r10: c_ulong, + pub r9: c_ulong, + pub r8: c_ulong, + pub rax: c_ulong, + pub rcx: c_ulong, + pub rdx: c_ulong, + pub rsi: c_ulong, + pub rdi: c_ulong, + pub orig_rax: c_ulong, + pub rip: c_ulong, + pub cs: c_ulong, + pub eflags: c_ulong, + pub rsp: c_ulong, + pub ss: c_ulong, + pub fs_base: c_ulong, + pub gs_base: c_ulong, + pub ds: c_ulong, + pub es: c_ulong, + pub fs: c_ulong, + pub gs: c_ulong, + } + + pub struct user { + pub regs: user_regs_struct, + pub u_fpvalid: c_int, + pub i387: user_fpregs_struct, + pub u_tsize: c_ulong, + pub u_dsize: c_ulong, + pub u_ssize: c_ulong, + pub start_code: c_ulong, + pub start_stack: c_ulong, + pub signal: c_long, + __reserved: c_int, + #[cfg(target_pointer_width = "32")] + __pad1: u32, + pub u_ar0: *mut user_regs_struct, + #[cfg(target_pointer_width = "32")] + __pad2: u32, + pub u_fpstate: *mut user_fpregs_struct, + pub magic: c_ulong, + pub u_comm: [c_char; 32], + pub u_debugreg: [c_ulong; 8], + } + + // GitHub repo: ifduyue/musl/ + // commit: b4b1e10364c8737a632be61582e05a8d3acf5690 + // file: arch/x86_64/bits/signal.h#L80-L84 + pub struct mcontext_t { + pub gregs: [greg_t; 23], + __private: [u64; 9], + } + + pub struct ipc_perm { + #[cfg(musl_v1_2_3)] + pub __key: crate::key_t, + #[cfg(not(musl_v1_2_3))] + #[deprecated( + since = "0.2.173", + note = "This field is incorrectly named and will be changed + to __key in a future release." + )] + pub __ipc_perm_key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: crate::mode_t, + pub __seq: c_int, + __unused1: c_long, + __unused2: c_long, + } + + #[repr(align(8))] + pub struct clone_args { + pub flags: c_ulonglong, + pub pidfd: c_ulonglong, + pub child_tid: c_ulonglong, + pub parent_tid: c_ulonglong, + pub exit_signal: c_ulonglong, + pub stack: c_ulonglong, + pub stack_size: c_ulonglong, + pub tls: c_ulonglong, + pub set_tid: c_ulonglong, + pub set_tid_size: c_ulonglong, + pub cgroup: c_ulonglong, + } +} + +s_no_extra_traits! { + pub struct user_fpregs_struct { + pub cwd: c_ushort, + pub swd: c_ushort, + pub ftw: c_ushort, + pub fop: c_ushort, + pub rip: c_ulong, + pub rdp: c_ulong, + pub mxcsr: c_uint, + pub mxcr_mask: c_uint, + pub st_space: [c_uint; 32], + pub xmm_space: [c_uint; 64], + padding: [c_uint; 24], + } + + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_stack: crate::stack_t, + pub uc_mcontext: mcontext_t, + pub uc_sigmask: crate::sigset_t, + __private: [u8; 512], + } + + #[repr(align(16))] + pub struct max_align_t { + priv_: [f64; 4], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for user_fpregs_struct { + fn eq(&self, other: &user_fpregs_struct) -> bool { + self.cwd == other.cwd + && self.swd == other.swd + && self.ftw == other.ftw + && self.fop == other.fop + && self.rip == other.rip + && self.rdp == other.rdp + && self.mxcsr == other.mxcsr + && self.mxcr_mask == other.mxcr_mask + && self.st_space == other.st_space + && self + .xmm_space + .iter() + .zip(other.xmm_space.iter()) + .all(|(a, b)| a == b) + // Ignore padding field + } + } + + impl Eq for user_fpregs_struct {} + + impl hash::Hash for user_fpregs_struct { + fn hash(&self, state: &mut H) { + self.cwd.hash(state); + self.ftw.hash(state); + self.fop.hash(state); + self.rip.hash(state); + self.rdp.hash(state); + self.mxcsr.hash(state); + self.mxcr_mask.hash(state); + self.st_space.hash(state); + self.xmm_space.hash(state); + // Ignore padding field + } + } + + impl PartialEq for ucontext_t { + fn eq(&self, other: &ucontext_t) -> bool { + self.uc_flags == other.uc_flags + && self.uc_link == other.uc_link + && self.uc_stack == other.uc_stack + && self.uc_mcontext == other.uc_mcontext + && self.uc_sigmask == other.uc_sigmask + && self + .__private + .iter() + .zip(other.__private.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for ucontext_t {} + + impl hash::Hash for ucontext_t { + fn hash(&self, state: &mut H) { + self.uc_flags.hash(state); + self.uc_link.hash(state); + self.uc_stack.hash(state); + self.uc_mcontext.hash(state); + self.uc_sigmask.hash(state); + self.__private.hash(state); + } + } + } +} + +// Syscall table + +pub const SYS_read: c_long = 0; +pub const SYS_write: c_long = 1; +pub const SYS_open: c_long = 2; +pub const SYS_close: c_long = 3; +pub const SYS_stat: c_long = 4; +pub const SYS_fstat: c_long = 5; +pub const SYS_lstat: c_long = 6; +pub const SYS_poll: c_long = 7; +pub const SYS_lseek: c_long = 8; +pub const SYS_mmap: c_long = 9; +pub const SYS_mprotect: c_long = 10; +pub const SYS_munmap: c_long = 11; +pub const SYS_brk: c_long = 12; +pub const SYS_rt_sigaction: c_long = 13; +pub const SYS_rt_sigprocmask: c_long = 14; +pub const SYS_rt_sigreturn: c_long = 15; +pub const SYS_ioctl: c_long = 16; +pub const SYS_pread64: c_long = 17; +pub const SYS_pwrite64: c_long = 18; +pub const SYS_readv: c_long = 19; +pub const SYS_writev: c_long = 20; +pub const SYS_access: c_long = 21; +pub const SYS_pipe: c_long = 22; +pub const SYS_select: c_long = 23; +pub const SYS_sched_yield: c_long = 24; +pub const SYS_mremap: c_long = 25; +pub const SYS_msync: c_long = 26; +pub const SYS_mincore: c_long = 27; +pub const SYS_madvise: c_long = 28; +pub const SYS_shmget: c_long = 29; +pub const SYS_shmat: c_long = 30; +pub const SYS_shmctl: c_long = 31; +pub const SYS_dup: c_long = 32; +pub const SYS_dup2: c_long = 33; +pub const SYS_pause: c_long = 34; +pub const SYS_nanosleep: c_long = 35; +pub const SYS_getitimer: c_long = 36; +pub const SYS_alarm: c_long = 37; +pub const SYS_setitimer: c_long = 38; +pub const SYS_getpid: c_long = 39; +pub const SYS_sendfile: c_long = 40; +pub const SYS_socket: c_long = 41; +pub const SYS_connect: c_long = 42; +pub const SYS_accept: c_long = 43; +pub const SYS_sendto: c_long = 44; +pub const SYS_recvfrom: c_long = 45; +pub const SYS_sendmsg: c_long = 46; +pub const SYS_recvmsg: c_long = 47; +pub const SYS_shutdown: c_long = 48; +pub const SYS_bind: c_long = 49; +pub const SYS_listen: c_long = 50; +pub const SYS_getsockname: c_long = 51; +pub const SYS_getpeername: c_long = 52; +pub const SYS_socketpair: c_long = 53; +pub const SYS_setsockopt: c_long = 54; +pub const SYS_getsockopt: c_long = 55; +pub const SYS_clone: c_long = 56; +pub const SYS_fork: c_long = 57; +pub const SYS_vfork: c_long = 58; +pub const SYS_execve: c_long = 59; +pub const SYS_exit: c_long = 60; +pub const SYS_wait4: c_long = 61; +pub const SYS_kill: c_long = 62; +pub const SYS_uname: c_long = 63; +pub const SYS_semget: c_long = 64; +pub const SYS_semop: c_long = 65; +pub const SYS_semctl: c_long = 66; +pub const SYS_shmdt: c_long = 67; +pub const SYS_msgget: c_long = 68; +pub const SYS_msgsnd: c_long = 69; +pub const SYS_msgrcv: c_long = 70; +pub const SYS_msgctl: c_long = 71; +pub const SYS_fcntl: c_long = 72; +pub const SYS_flock: c_long = 73; +pub const SYS_fsync: c_long = 74; +pub const SYS_fdatasync: c_long = 75; +pub const SYS_truncate: c_long = 76; +pub const SYS_ftruncate: c_long = 77; +pub const SYS_getdents: c_long = 78; +pub const SYS_getcwd: c_long = 79; +pub const SYS_chdir: c_long = 80; +pub const SYS_fchdir: c_long = 81; +pub const SYS_rename: c_long = 82; +pub const SYS_mkdir: c_long = 83; +pub const SYS_rmdir: c_long = 84; +pub const SYS_creat: c_long = 85; +pub const SYS_link: c_long = 86; +pub const SYS_unlink: c_long = 87; +pub const SYS_symlink: c_long = 88; +pub const SYS_readlink: c_long = 89; +pub const SYS_chmod: c_long = 90; +pub const SYS_fchmod: c_long = 91; +pub const SYS_chown: c_long = 92; +pub const SYS_fchown: c_long = 93; +pub const SYS_lchown: c_long = 94; +pub const SYS_umask: c_long = 95; +pub const SYS_gettimeofday: c_long = 96; +pub const SYS_getrlimit: c_long = 97; +pub const SYS_getrusage: c_long = 98; +pub const SYS_sysinfo: c_long = 99; +pub const SYS_times: c_long = 100; +pub const SYS_ptrace: c_long = 101; +pub const SYS_getuid: c_long = 102; +pub const SYS_syslog: c_long = 103; +pub const SYS_getgid: c_long = 104; +pub const SYS_setuid: c_long = 105; +pub const SYS_setgid: c_long = 106; +pub const SYS_geteuid: c_long = 107; +pub const SYS_getegid: c_long = 108; +pub const SYS_setpgid: c_long = 109; +pub const SYS_getppid: c_long = 110; +pub const SYS_getpgrp: c_long = 111; +pub const SYS_setsid: c_long = 112; +pub const SYS_setreuid: c_long = 113; +pub const SYS_setregid: c_long = 114; +pub const SYS_getgroups: c_long = 115; +pub const SYS_setgroups: c_long = 116; +pub const SYS_setresuid: c_long = 117; +pub const SYS_getresuid: c_long = 118; +pub const SYS_setresgid: c_long = 119; +pub const SYS_getresgid: c_long = 120; +pub const SYS_getpgid: c_long = 121; +pub const SYS_setfsuid: c_long = 122; +pub const SYS_setfsgid: c_long = 123; +pub const SYS_getsid: c_long = 124; +pub const SYS_capget: c_long = 125; +pub const SYS_capset: c_long = 126; +pub const SYS_rt_sigpending: c_long = 127; +pub const SYS_rt_sigtimedwait: c_long = 128; +pub const SYS_rt_sigqueueinfo: c_long = 129; +pub const SYS_rt_sigsuspend: c_long = 130; +pub const SYS_sigaltstack: c_long = 131; +pub const SYS_utime: c_long = 132; +pub const SYS_mknod: c_long = 133; +pub const SYS_uselib: c_long = 134; +pub const SYS_personality: c_long = 135; +pub const SYS_ustat: c_long = 136; +pub const SYS_statfs: c_long = 137; +pub const SYS_fstatfs: c_long = 138; +pub const SYS_sysfs: c_long = 139; +pub const SYS_getpriority: c_long = 140; +pub const SYS_setpriority: c_long = 141; +pub const SYS_sched_setparam: c_long = 142; +pub const SYS_sched_getparam: c_long = 143; +pub const SYS_sched_setscheduler: c_long = 144; +pub const SYS_sched_getscheduler: c_long = 145; +pub const SYS_sched_get_priority_max: c_long = 146; +pub const SYS_sched_get_priority_min: c_long = 147; +pub const SYS_sched_rr_get_interval: c_long = 148; +pub const SYS_mlock: c_long = 149; +pub const SYS_munlock: c_long = 150; +pub const SYS_mlockall: c_long = 151; +pub const SYS_munlockall: c_long = 152; +pub const SYS_vhangup: c_long = 153; +pub const SYS_modify_ldt: c_long = 154; +pub const SYS_pivot_root: c_long = 155; +pub const SYS__sysctl: c_long = 156; +pub const SYS_prctl: c_long = 157; +pub const SYS_arch_prctl: c_long = 158; +pub const SYS_adjtimex: c_long = 159; +pub const SYS_setrlimit: c_long = 160; +pub const SYS_chroot: c_long = 161; +pub const SYS_sync: c_long = 162; +pub const SYS_acct: c_long = 163; +pub const SYS_settimeofday: c_long = 164; +pub const SYS_mount: c_long = 165; +pub const SYS_umount2: c_long = 166; +pub const SYS_swapon: c_long = 167; +pub const SYS_swapoff: c_long = 168; +pub const SYS_reboot: c_long = 169; +pub const SYS_sethostname: c_long = 170; +pub const SYS_setdomainname: c_long = 171; +pub const SYS_iopl: c_long = 172; +pub const SYS_ioperm: c_long = 173; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 174; +pub const SYS_init_module: c_long = 175; +pub const SYS_delete_module: c_long = 176; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 177; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 178; +pub const SYS_quotactl: c_long = 179; +pub const SYS_nfsservctl: c_long = 180; +pub const SYS_getpmsg: c_long = 181; +pub const SYS_putpmsg: c_long = 182; +pub const SYS_afs_syscall: c_long = 183; +pub const SYS_tuxcall: c_long = 184; +pub const SYS_security: c_long = 185; +pub const SYS_gettid: c_long = 186; +pub const SYS_readahead: c_long = 187; +pub const SYS_setxattr: c_long = 188; +pub const SYS_lsetxattr: c_long = 189; +pub const SYS_fsetxattr: c_long = 190; +pub const SYS_getxattr: c_long = 191; +pub const SYS_lgetxattr: c_long = 192; +pub const SYS_fgetxattr: c_long = 193; +pub const SYS_listxattr: c_long = 194; +pub const SYS_llistxattr: c_long = 195; +pub const SYS_flistxattr: c_long = 196; +pub const SYS_removexattr: c_long = 197; +pub const SYS_lremovexattr: c_long = 198; +pub const SYS_fremovexattr: c_long = 199; +pub const SYS_tkill: c_long = 200; +pub const SYS_time: c_long = 201; +pub const SYS_futex: c_long = 202; +pub const SYS_sched_setaffinity: c_long = 203; +pub const SYS_sched_getaffinity: c_long = 204; +pub const SYS_set_thread_area: c_long = 205; +pub const SYS_io_setup: c_long = 206; +pub const SYS_io_destroy: c_long = 207; +pub const SYS_io_getevents: c_long = 208; +pub const SYS_io_submit: c_long = 209; +pub const SYS_io_cancel: c_long = 210; +pub const SYS_get_thread_area: c_long = 211; +pub const SYS_lookup_dcookie: c_long = 212; +pub const SYS_epoll_create: c_long = 213; +pub const SYS_epoll_ctl_old: c_long = 214; +pub const SYS_epoll_wait_old: c_long = 215; +pub const SYS_remap_file_pages: c_long = 216; +pub const SYS_getdents64: c_long = 217; +pub const SYS_set_tid_address: c_long = 218; +pub const SYS_restart_syscall: c_long = 219; +pub const SYS_semtimedop: c_long = 220; +pub const SYS_fadvise64: c_long = 221; +pub const SYS_timer_create: c_long = 222; +pub const SYS_timer_settime: c_long = 223; +pub const SYS_timer_gettime: c_long = 224; +pub const SYS_timer_getoverrun: c_long = 225; +pub const SYS_timer_delete: c_long = 226; +pub const SYS_clock_settime: c_long = 227; +pub const SYS_clock_gettime: c_long = 228; +pub const SYS_clock_getres: c_long = 229; +pub const SYS_clock_nanosleep: c_long = 230; +pub const SYS_exit_group: c_long = 231; +pub const SYS_epoll_wait: c_long = 232; +pub const SYS_epoll_ctl: c_long = 233; +pub const SYS_tgkill: c_long = 234; +pub const SYS_utimes: c_long = 235; +pub const SYS_vserver: c_long = 236; +pub const SYS_mbind: c_long = 237; +pub const SYS_set_mempolicy: c_long = 238; +pub const SYS_get_mempolicy: c_long = 239; +pub const SYS_mq_open: c_long = 240; +pub const SYS_mq_unlink: c_long = 241; +pub const SYS_mq_timedsend: c_long = 242; +pub const SYS_mq_timedreceive: c_long = 243; +pub const SYS_mq_notify: c_long = 244; +pub const SYS_mq_getsetattr: c_long = 245; +pub const SYS_kexec_load: c_long = 246; +pub const SYS_waitid: c_long = 247; +pub const SYS_add_key: c_long = 248; +pub const SYS_request_key: c_long = 249; +pub const SYS_keyctl: c_long = 250; +pub const SYS_ioprio_set: c_long = 251; +pub const SYS_ioprio_get: c_long = 252; +pub const SYS_inotify_init: c_long = 253; +pub const SYS_inotify_add_watch: c_long = 254; +pub const SYS_inotify_rm_watch: c_long = 255; +pub const SYS_migrate_pages: c_long = 256; +pub const SYS_openat: c_long = 257; +pub const SYS_mkdirat: c_long = 258; +pub const SYS_mknodat: c_long = 259; +pub const SYS_fchownat: c_long = 260; +pub const SYS_futimesat: c_long = 261; +pub const SYS_newfstatat: c_long = 262; +pub const SYS_unlinkat: c_long = 263; +pub const SYS_renameat: c_long = 264; +pub const SYS_linkat: c_long = 265; +pub const SYS_symlinkat: c_long = 266; +pub const SYS_readlinkat: c_long = 267; +pub const SYS_fchmodat: c_long = 268; +pub const SYS_faccessat: c_long = 269; +pub const SYS_pselect6: c_long = 270; +pub const SYS_ppoll: c_long = 271; +pub const SYS_unshare: c_long = 272; +pub const SYS_set_robust_list: c_long = 273; +pub const SYS_get_robust_list: c_long = 274; +pub const SYS_splice: c_long = 275; +pub const SYS_tee: c_long = 276; +pub const SYS_sync_file_range: c_long = 277; +pub const SYS_vmsplice: c_long = 278; +pub const SYS_move_pages: c_long = 279; +pub const SYS_utimensat: c_long = 280; +pub const SYS_epoll_pwait: c_long = 281; +pub const SYS_signalfd: c_long = 282; +pub const SYS_timerfd_create: c_long = 283; +pub const SYS_eventfd: c_long = 284; +pub const SYS_fallocate: c_long = 285; +pub const SYS_timerfd_settime: c_long = 286; +pub const SYS_timerfd_gettime: c_long = 287; +pub const SYS_accept4: c_long = 288; +pub const SYS_signalfd4: c_long = 289; +pub const SYS_eventfd2: c_long = 290; +pub const SYS_epoll_create1: c_long = 291; +pub const SYS_dup3: c_long = 292; +pub const SYS_pipe2: c_long = 293; +pub const SYS_inotify_init1: c_long = 294; +pub const SYS_preadv: c_long = 295; +pub const SYS_pwritev: c_long = 296; +pub const SYS_rt_tgsigqueueinfo: c_long = 297; +pub const SYS_perf_event_open: c_long = 298; +pub const SYS_recvmmsg: c_long = 299; +pub const SYS_fanotify_init: c_long = 300; +pub const SYS_fanotify_mark: c_long = 301; +pub const SYS_prlimit64: c_long = 302; +pub const SYS_name_to_handle_at: c_long = 303; +pub const SYS_open_by_handle_at: c_long = 304; +pub const SYS_clock_adjtime: c_long = 305; +pub const SYS_syncfs: c_long = 306; +pub const SYS_sendmmsg: c_long = 307; +pub const SYS_setns: c_long = 308; +pub const SYS_getcpu: c_long = 309; +pub const SYS_process_vm_readv: c_long = 310; +pub const SYS_process_vm_writev: c_long = 311; +pub const SYS_kcmp: c_long = 312; +pub const SYS_finit_module: c_long = 313; +pub const SYS_sched_setattr: c_long = 314; +pub const SYS_sched_getattr: c_long = 315; +pub const SYS_renameat2: c_long = 316; +pub const SYS_seccomp: c_long = 317; +pub const SYS_getrandom: c_long = 318; +pub const SYS_memfd_create: c_long = 319; +pub const SYS_kexec_file_load: c_long = 320; +pub const SYS_bpf: c_long = 321; +pub const SYS_execveat: c_long = 322; +pub const SYS_userfaultfd: c_long = 323; +pub const SYS_membarrier: c_long = 324; +pub const SYS_mlock2: c_long = 325; +pub const SYS_copy_file_range: c_long = 326; +pub const SYS_preadv2: c_long = 327; +pub const SYS_pwritev2: c_long = 328; +pub const SYS_pkey_mprotect: c_long = 329; +pub const SYS_pkey_alloc: c_long = 330; +pub const SYS_pkey_free: c_long = 331; +pub const SYS_statx: c_long = 332; +pub const SYS_io_pgetevents: c_long = 333; +pub const SYS_rseq: c_long = 334; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; +pub const SYS_fchmodat2: c_long = 452; +pub const SYS_mseal: c_long = 462; + +// offsets in user_regs_structs, from sys/reg.h +pub const R15: c_int = 0; +pub const R14: c_int = 1; +pub const R13: c_int = 2; +pub const R12: c_int = 3; +pub const RBP: c_int = 4; +pub const RBX: c_int = 5; +pub const R11: c_int = 6; +pub const R10: c_int = 7; +pub const R9: c_int = 8; +pub const R8: c_int = 9; +pub const RAX: c_int = 10; +pub const RCX: c_int = 11; +pub const RDX: c_int = 12; +pub const RSI: c_int = 13; +pub const RDI: c_int = 14; +pub const ORIG_RAX: c_int = 15; +pub const RIP: c_int = 16; +pub const CS: c_int = 17; +pub const EFLAGS: c_int = 18; +pub const RSP: c_int = 19; +pub const SS: c_int = 20; +pub const FS_BASE: c_int = 21; +pub const GS_BASE: c_int = 22; +pub const DS: c_int = 23; +pub const ES: c_int = 24; +pub const FS: c_int = 25; +pub const GS: c_int = 26; + +// offsets in mcontext_t.gregs from bits/signal.h +// GitHub repo: ifduyue/musl/ +// commit: b4b1e10364c8737a632be61582e05a8d3acf5690 +// file: arch/x86_64/bits/signal.h#L9-L56 +pub const REG_R8: c_int = 0; +pub const REG_R9: c_int = 1; +pub const REG_R10: c_int = 2; +pub const REG_R11: c_int = 3; +pub const REG_R12: c_int = 4; +pub const REG_R13: c_int = 5; +pub const REG_R14: c_int = 6; +pub const REG_R15: c_int = 7; +pub const REG_RDI: c_int = 8; +pub const REG_RSI: c_int = 9; +pub const REG_RBP: c_int = 10; +pub const REG_RBX: c_int = 11; +pub const REG_RDX: c_int = 12; +pub const REG_RAX: c_int = 13; +pub const REG_RCX: c_int = 14; +pub const REG_RSP: c_int = 15; +pub const REG_RIP: c_int = 16; +pub const REG_EFL: c_int = 17; +pub const REG_CSGSFS: c_int = 18; +pub const REG_ERR: c_int = 19; +pub const REG_TRAPNO: c_int = 20; +pub const REG_OLDMASK: c_int = 21; +pub const REG_CR2: c_int = 22; + +pub const MADV_SOFT_OFFLINE: c_int = 101; +pub const MAP_32BIT: c_int = 0x0040; +pub const O_APPEND: c_int = 1024; +pub const O_DIRECT: c_int = 0x4000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_LARGEFILE: c_int = 0o0100000; +pub const O_NOFOLLOW: c_int = 0x20000; +pub const O_CREAT: c_int = 64; +pub const O_EXCL: c_int = 128; +pub const O_NOCTTY: c_int = 256; +pub const O_NONBLOCK: c_int = 2048; +pub const O_SYNC: c_int = 1052672; +pub const O_RSYNC: c_int = 1052672; +pub const O_DSYNC: c_int = 4096; +pub const O_ASYNC: c_int = 0x2000; + +pub const PTRACE_SYSEMU: c_int = 31; +pub const PTRACE_SYSEMU_SINGLESTEP: c_int = 32; + +pub const SIGSTKSZ: size_t = 8192; +pub const MINSIGSTKSZ: size_t = 2048; + +pub const ENAMETOOLONG: c_int = 36; +pub const ENOLCK: c_int = 37; +pub const ENOSYS: c_int = 38; +pub const ENOTEMPTY: c_int = 39; +pub const ELOOP: c_int = 40; +pub const ENOMSG: c_int = 42; +pub const EIDRM: c_int = 43; +pub const ECHRNG: c_int = 44; +pub const EL2NSYNC: c_int = 45; +pub const EL3HLT: c_int = 46; +pub const EL3RST: c_int = 47; +pub const ELNRNG: c_int = 48; +pub const EUNATCH: c_int = 49; +pub const ENOCSI: c_int = 50; +pub const EL2HLT: c_int = 51; +pub const EBADE: c_int = 52; +pub const EBADR: c_int = 53; +pub const EXFULL: c_int = 54; +pub const ENOANO: c_int = 55; +pub const EBADRQC: c_int = 56; +pub const EBADSLT: c_int = 57; +pub const EMULTIHOP: c_int = 72; +pub const EBADMSG: c_int = 74; +pub const EOVERFLOW: c_int = 75; +pub const ENOTUNIQ: c_int = 76; +pub const EBADFD: c_int = 77; +pub const EREMCHG: c_int = 78; +pub const ELIBACC: c_int = 79; +pub const ELIBBAD: c_int = 80; +pub const ELIBSCN: c_int = 81; +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; +pub const EILSEQ: c_int = 84; +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; +pub const EUSERS: c_int = 87; +pub const ENOTSOCK: c_int = 88; +pub const EDESTADDRREQ: c_int = 89; +pub const EMSGSIZE: c_int = 90; +pub const EPROTOTYPE: c_int = 91; +pub const ENOPROTOOPT: c_int = 92; +pub const EPROTONOSUPPORT: c_int = 93; +pub const ESOCKTNOSUPPORT: c_int = 94; +pub const EOPNOTSUPP: c_int = 95; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 96; +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; +pub const EADDRNOTAVAIL: c_int = 99; +pub const ENETDOWN: c_int = 100; +pub const ENETUNREACH: c_int = 101; +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EISCONN: c_int = 106; +pub const ENOTCONN: c_int = 107; +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; +pub const ETIMEDOUT: c_int = 110; +pub const ECONNREFUSED: c_int = 111; +pub const EHOSTDOWN: c_int = 112; +pub const EHOSTUNREACH: c_int = 113; +pub const EALREADY: c_int = 114; +pub const EINPROGRESS: c_int = 115; +pub const ESTALE: c_int = 116; +pub const EUCLEAN: c_int = 117; +pub const ENOTNAM: c_int = 118; +pub const ENAVAIL: c_int = 119; +pub const EISNAM: c_int = 120; +pub const EREMOTEIO: c_int = 121; +pub const EDQUOT: c_int = 122; +pub const ENOMEDIUM: c_int = 123; +pub const EMEDIUMTYPE: c_int = 124; +pub const ECANCELED: c_int = 125; +pub const ENOKEY: c_int = 126; +pub const EKEYEXPIRED: c_int = 127; +pub const EKEYREVOKED: c_int = 128; +pub const EKEYREJECTED: c_int = 129; +pub const EOWNERDEAD: c_int = 130; +pub const ENOTRECOVERABLE: c_int = 131; +pub const ERFKILL: c_int = 132; +pub const EHWPOISON: c_int = 133; + +pub const SA_ONSTACK: c_int = 0x08000000; +pub const SA_SIGINFO: c_int = 0x00000004; +pub const SA_NOCLDWAIT: c_int = 0x00000002; + +pub const SIGCHLD: c_int = 17; +pub const SIGBUS: c_int = 7; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGUSR1: c_int = 10; +pub const SIGUSR2: c_int = 12; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGURG: c_int = 23; +pub const SIGIO: c_int = 29; +pub const SIGSYS: c_int = 31; +pub const SIGSTKFLT: c_int = 16; +pub const SIGPOLL: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0x000000; +pub const SIG_UNBLOCK: c_int = 0x01; + +pub const F_GETLK: c_int = 5; +pub const F_GETOWN: c_int = 9; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_SETOWN: c_int = 8; + +pub const VEOF: usize = 4; + +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_GROWSDOWN: c_int = 0x0100; +pub const MAP_DENYWRITE: c_int = 0x0800; +pub const MAP_EXECUTABLE: c_int = 0x01000; +pub const MAP_LOCKED: c_int = 0x02000; +pub const MAP_NORESERVE: c_int = 0x04000; +pub const MAP_POPULATE: c_int = 0x08000; +pub const MAP_NONBLOCK: c_int = 0x010000; +pub const MAP_STACK: c_int = 0x020000; +pub const MAP_HUGETLB: c_int = 0x040000; +pub const MAP_SYNC: c_int = 0x080000; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const TAB1: c_int = 0x00000800; +pub const TAB2: c_int = 0x00001000; +pub const TAB3: c_int = 0x00001800; +pub const CR1: c_int = 0x00000200; +pub const CR2: c_int = 0x00000400; +pub const CR3: c_int = 0x00000600; +pub const FF1: c_int = 0x00008000; +pub const BS1: c_int = 0x00002000; +pub const VT1: c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const CIBAUD: crate::tcflag_t = 0o02003600000; +pub const CBAUDEX: crate::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const NLDLY: crate::tcflag_t = 0o000400; +pub const CRDLY: crate::tcflag_t = 0o003000; +pub const TABDLY: crate::tcflag_t = 0o014000; +pub const BSDLY: crate::tcflag_t = 0o020000; +pub const FFDLY: crate::tcflag_t = 0o100000; +pub const VTDLY: crate::tcflag_t = 0o040000; +pub const XTABS: crate::tcflag_t = 0o014000; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +pub const EDEADLK: c_int = 35; +pub const EDEADLOCK: c_int = EDEADLK; + +pub const EXTPROC: crate::tcflag_t = 0x00010000; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; +pub const FLUSHO: crate::tcflag_t = 0x00001000; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/lfs64.rs b/vendor/libc/src/unix/linux_like/linux/musl/lfs64.rs new file mode 100644 index 00000000000000..e6506fd3d385de --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/lfs64.rs @@ -0,0 +1,239 @@ +use crate::off64_t; +use crate::prelude::*; + +#[inline] +pub unsafe extern "C" fn creat64(path: *const c_char, mode: crate::mode_t) -> c_int { + crate::creat(path, mode) +} + +#[inline] +pub unsafe extern "C" fn fallocate64( + fd: c_int, + mode: c_int, + offset: off64_t, + len: off64_t, +) -> c_int { + crate::fallocate(fd, mode, offset, len) +} + +#[inline] +pub unsafe extern "C" fn fgetpos64(stream: *mut crate::FILE, pos: *mut crate::fpos64_t) -> c_int { + crate::fgetpos(stream, pos as *mut _) +} + +#[inline] +pub unsafe extern "C" fn fopen64(pathname: *const c_char, mode: *const c_char) -> *mut crate::FILE { + crate::fopen(pathname, mode) +} + +#[inline] +pub unsafe extern "C" fn freopen64( + pathname: *const c_char, + mode: *const c_char, + stream: *mut crate::FILE, +) -> *mut crate::FILE { + crate::freopen(pathname, mode, stream) +} + +#[inline] +pub unsafe extern "C" fn fseeko64( + stream: *mut crate::FILE, + offset: off64_t, + whence: c_int, +) -> c_int { + crate::fseeko(stream, offset, whence) +} + +#[inline] +pub unsafe extern "C" fn fsetpos64(stream: *mut crate::FILE, pos: *const crate::fpos64_t) -> c_int { + crate::fsetpos(stream, pos as *mut _) +} + +#[inline] +pub unsafe extern "C" fn fstat64(fildes: c_int, buf: *mut crate::stat64) -> c_int { + crate::fstat(fildes, buf as *mut _) +} + +#[inline] +pub unsafe extern "C" fn fstatat64( + fd: c_int, + path: *const c_char, + buf: *mut crate::stat64, + flag: c_int, +) -> c_int { + crate::fstatat(fd, path, buf as *mut _, flag) +} + +#[inline] +pub unsafe extern "C" fn fstatfs64(fd: c_int, buf: *mut crate::statfs64) -> c_int { + crate::fstatfs(fd, buf as *mut _) +} + +#[inline] +pub unsafe extern "C" fn fstatvfs64(fd: c_int, buf: *mut crate::statvfs64) -> c_int { + crate::fstatvfs(fd, buf as *mut _) +} + +#[inline] +pub unsafe extern "C" fn ftello64(stream: *mut crate::FILE) -> off64_t { + crate::ftello(stream) +} + +#[inline] +pub unsafe extern "C" fn ftruncate64(fd: c_int, length: off64_t) -> c_int { + crate::ftruncate(fd, length) +} + +#[inline] +pub unsafe extern "C" fn getrlimit64(resource: c_int, rlim: *mut crate::rlimit64) -> c_int { + crate::getrlimit(resource, rlim as *mut _) +} + +#[inline] +pub unsafe extern "C" fn lseek64(fd: c_int, offset: off64_t, whence: c_int) -> off64_t { + crate::lseek(fd, offset, whence) +} + +#[inline] +pub unsafe extern "C" fn lstat64(path: *const c_char, buf: *mut crate::stat64) -> c_int { + crate::lstat(path, buf as *mut _) +} + +#[inline] +pub unsafe extern "C" fn mmap64( + addr: *mut c_void, + length: size_t, + prot: c_int, + flags: c_int, + fd: c_int, + offset: off64_t, +) -> *mut c_void { + crate::mmap(addr, length, prot, flags, fd, offset) +} + +// These functions are variadic in the C ABI since the `mode` argument is "optional". Variadic +// `extern "C"` functions are unstable in Rust so we cannot write a shim function for these +// entrypoints. See https://github.com/rust-lang/rust/issues/44930. +// +// These aliases are mostly fine though, neither function takes a LFS64-namespaced type as an +// argument, nor do their names clash with any declared types. +pub use crate::{open as open64, openat as openat64}; + +#[inline] +pub unsafe extern "C" fn posix_fadvise64( + fd: c_int, + offset: off64_t, + len: off64_t, + advice: c_int, +) -> c_int { + crate::posix_fadvise(fd, offset, len, advice) +} + +#[inline] +pub unsafe extern "C" fn posix_fallocate64(fd: c_int, offset: off64_t, len: off64_t) -> c_int { + crate::posix_fallocate(fd, offset, len) +} + +#[inline] +pub unsafe extern "C" fn pread64( + fd: c_int, + buf: *mut c_void, + count: size_t, + offset: off64_t, +) -> ssize_t { + crate::pread(fd, buf, count, offset) +} + +#[inline] +pub unsafe extern "C" fn preadv64( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: off64_t, +) -> ssize_t { + crate::preadv(fd, iov, iovcnt, offset) +} + +#[inline] +pub unsafe extern "C" fn prlimit64( + pid: crate::pid_t, + resource: c_int, + new_limit: *const crate::rlimit64, + old_limit: *mut crate::rlimit64, +) -> c_int { + crate::prlimit(pid, resource, new_limit as *mut _, old_limit as *mut _) +} + +#[inline] +pub unsafe extern "C" fn pwrite64( + fd: c_int, + buf: *const c_void, + count: size_t, + offset: off64_t, +) -> ssize_t { + crate::pwrite(fd, buf, count, offset) +} + +#[inline] +pub unsafe extern "C" fn pwritev64( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: off64_t, +) -> ssize_t { + crate::pwritev(fd, iov, iovcnt, offset) +} + +#[inline] +pub unsafe extern "C" fn readdir64(dirp: *mut crate::DIR) -> *mut crate::dirent64 { + crate::readdir(dirp) as *mut _ +} + +#[inline] +pub unsafe extern "C" fn readdir64_r( + dirp: *mut crate::DIR, + entry: *mut crate::dirent64, + result: *mut *mut crate::dirent64, +) -> c_int { + crate::readdir_r(dirp, entry as *mut _, result as *mut _) +} + +#[inline] +pub unsafe extern "C" fn sendfile64( + out_fd: c_int, + in_fd: c_int, + offset: *mut off64_t, + count: size_t, +) -> ssize_t { + crate::sendfile(out_fd, in_fd, offset, count) +} + +#[inline] +pub unsafe extern "C" fn setrlimit64(resource: c_int, rlim: *const crate::rlimit64) -> c_int { + crate::setrlimit(resource, rlim as *mut _) +} + +#[inline] +pub unsafe extern "C" fn stat64(pathname: *const c_char, statbuf: *mut crate::stat64) -> c_int { + crate::stat(pathname, statbuf as *mut _) +} + +#[inline] +pub unsafe extern "C" fn statfs64(pathname: *const c_char, buf: *mut crate::statfs64) -> c_int { + crate::statfs(pathname, buf as *mut _) +} + +#[inline] +pub unsafe extern "C" fn statvfs64(path: *const c_char, buf: *mut crate::statvfs64) -> c_int { + crate::statvfs(path, buf as *mut _) +} + +#[inline] +pub unsafe extern "C" fn tmpfile64() -> *mut crate::FILE { + crate::tmpfile() +} + +#[inline] +pub unsafe extern "C" fn truncate64(path: *const c_char, length: off64_t) -> c_int { + crate::truncate(path, length) +} diff --git a/vendor/libc/src/unix/linux_like/linux/musl/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/mod.rs new file mode 100644 index 00000000000000..4bc11449145c7a --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/musl/mod.rs @@ -0,0 +1,1006 @@ +use crate::off64_t; +use crate::prelude::*; + +pub type pthread_t = *mut c_void; +pub type clock_t = c_long; +#[cfg_attr( + not(feature = "rustc-dep-of-std"), + deprecated( + since = "0.2.80", + note = "This type is changed to 64-bit in musl 1.2.0, \ + we'll follow that change in the future release. \ + See #1848 for more info." + ) +)] +pub type time_t = c_long; +pub type suseconds_t = c_long; +pub type ino_t = u64; +pub type off_t = i64; +pub type blkcnt_t = i64; + +pub type shmatt_t = c_ulong; +pub type msgqnum_t = c_ulong; +pub type msglen_t = c_ulong; +pub type fsblkcnt_t = c_ulonglong; +pub type fsblkcnt64_t = c_ulonglong; +pub type fsfilcnt_t = c_ulonglong; +pub type fsfilcnt64_t = c_ulonglong; +pub type rlim_t = c_ulonglong; + +cfg_if! { + if #[cfg(doc)] { + // Used in `linux::arch` to define ioctl constants. + pub(crate) type Ioctl = c_int; + } else { + #[doc(hidden)] + pub type Ioctl = c_int; + } +} + +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut c_void { + #[repr(C)] + struct siginfo_sigfault { + _si_signo: c_int, + _si_errno: c_int, + _si_code: c_int, + si_addr: *mut c_void, + } + (*(self as *const siginfo_t as *const siginfo_sigfault)).si_addr + } + + pub unsafe fn si_value(&self) -> crate::sigval { + #[repr(C)] + struct siginfo_si_value { + _si_signo: c_int, + _si_errno: c_int, + _si_code: c_int, + _si_timerid: c_int, + _si_overrun: c_int, + si_value: crate::sigval, + } + (*(self as *const siginfo_t as *const siginfo_si_value)).si_value + } +} + +// Internal, for casts to access union fields +#[repr(C)] +struct sifields_sigchld { + si_pid: crate::pid_t, + si_uid: crate::uid_t, + si_status: c_int, + si_utime: c_long, + si_stime: c_long, +} +impl Copy for sifields_sigchld {} +impl Clone for sifields_sigchld { + fn clone(&self) -> sifields_sigchld { + *self + } +} + +// Internal, for casts to access union fields +#[repr(C)] +union sifields { + _align_pointer: *mut c_void, + sigchld: sifields_sigchld, +} + +// Internal, for casts to access union fields. Note that some variants +// of sifields start with a pointer, which makes the alignment of +// sifields vary on 32-bit and 64-bit architectures. +#[repr(C)] +struct siginfo_f { + _siginfo_base: [c_int; 3], + sifields: sifields, +} + +impl siginfo_t { + unsafe fn sifields(&self) -> &sifields { + &(*(self as *const siginfo_t as *const siginfo_f)).sifields + } + + pub unsafe fn si_pid(&self) -> crate::pid_t { + self.sifields().sigchld.si_pid + } + + pub unsafe fn si_uid(&self) -> crate::uid_t { + self.sifields().sigchld.si_uid + } + + pub unsafe fn si_status(&self) -> c_int { + self.sifields().sigchld.si_status + } + + pub unsafe fn si_utime(&self) -> c_long { + self.sifields().sigchld.si_utime + } + + pub unsafe fn si_stime(&self) -> c_long { + self.sifields().sigchld.si_stime + } +} + +s! { + pub struct aiocb { + pub aio_fildes: c_int, + pub aio_lio_opcode: c_int, + pub aio_reqprio: c_int, + pub aio_buf: *mut c_void, + pub aio_nbytes: size_t, + pub aio_sigevent: crate::sigevent, + __td: *mut c_void, + __lock: [c_int; 2], + __err: c_int, + __ret: ssize_t, + pub aio_offset: off_t, + __next: *mut c_void, + __prev: *mut c_void, + __dummy4: [c_char; 32 - 2 * size_of::<*const ()>()], + } + + #[repr(align(8))] + pub struct fanotify_event_metadata { + pub event_len: c_uint, + pub vers: c_uchar, + pub reserved: c_uchar, + pub metadata_len: c_ushort, + pub mask: c_ulonglong, + pub fd: c_int, + pub pid: c_int, + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + pub sa_restorer: Option, + } + + // `mips*` targets swap the `s_errno` and `s_code` fields otherwise this struct is + // target-agnostic (see https://www.openwall.com/lists/musl/2016/01/27/1/2) + // + // FIXME(union): C implementation uses unions + pub struct siginfo_t { + pub si_signo: c_int, + #[cfg(not(any(target_arch = "mips", target_arch = "mips64")))] + pub si_errno: c_int, + pub si_code: c_int, + #[cfg(any(target_arch = "mips", target_arch = "mips64"))] + pub si_errno: c_int, + #[doc(hidden)] + #[deprecated( + since = "0.2.54", + note = "Please leave a comment on https://github.com/rust-lang/libc/pull/1316 \ + if you're using this field" + )] + pub _pad: [c_int; 29], + _align: [usize; 0], + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + #[cfg(target_endian = "little")] + pub f_fsid: c_ulong, + #[cfg(target_pointer_width = "32")] + __pad: c_int, + #[cfg(target_endian = "big")] + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_reserved: [c_int; 6], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt64_t, + pub f_bfree: crate::fsblkcnt64_t, + pub f_bavail: crate::fsblkcnt64_t, + pub f_files: crate::fsfilcnt64_t, + pub f_ffree: crate::fsfilcnt64_t, + pub f_favail: crate::fsfilcnt64_t, + #[cfg(target_endian = "little")] + pub f_fsid: c_ulong, + #[cfg(target_pointer_width = "32")] + __pad: c_int, + #[cfg(target_endian = "big")] + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_reserved: [c_int; 6], + } + + // PowerPC implementations are special, see the subfolders + #[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))] + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; crate::NCCS], + pub __c_ispeed: crate::speed_t, + pub __c_ospeed: crate::speed_t, + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct flock64 { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off64_t, + pub l_len: off64_t, + pub l_pid: crate::pid_t, + } + + pub struct regex_t { + __re_nsub: size_t, + __opaque: *mut c_void, + __padding: [*mut c_void; 4usize], + __nsub2: size_t, + __padding2: c_char, + } + + pub struct rtentry { + pub rt_pad1: c_ulong, + pub rt_dst: crate::sockaddr, + pub rt_gateway: crate::sockaddr, + pub rt_genmask: crate::sockaddr, + pub rt_flags: c_ushort, + pub rt_pad2: c_short, + pub rt_pad3: c_ulong, + pub rt_tos: c_uchar, + pub rt_class: c_uchar, + #[cfg(target_pointer_width = "64")] + pub rt_pad4: [c_short; 3usize], + #[cfg(not(target_pointer_width = "64"))] + pub rt_pad4: [c_short; 1usize], + pub rt_metric: c_short, + pub rt_dev: *mut c_char, + pub rt_mtu: c_ulong, + pub rt_window: c_ulong, + pub rt_irtt: c_ushort, + } + + pub struct __exit_status { + pub e_termination: c_short, + pub e_exit: c_short, + } + + pub struct Elf64_Chdr { + pub ch_type: crate::Elf64_Word, + pub ch_reserved: crate::Elf64_Word, + pub ch_size: crate::Elf64_Xword, + pub ch_addralign: crate::Elf64_Xword, + } + + pub struct Elf32_Chdr { + pub ch_type: crate::Elf32_Word, + pub ch_size: crate::Elf32_Word, + pub ch_addralign: crate::Elf32_Word, + } + + pub struct timex { + pub modes: c_uint, + pub offset: c_long, + pub freq: c_long, + pub maxerror: c_long, + pub esterror: c_long, + pub status: c_int, + pub constant: c_long, + pub precision: c_long, + pub tolerance: c_long, + pub time: crate::timeval, + pub tick: c_long, + pub ppsfreq: c_long, + pub jitter: c_long, + pub shift: c_int, + pub stabil: c_long, + pub jitcnt: c_long, + pub calcnt: c_long, + pub errcnt: c_long, + pub stbcnt: c_long, + pub tai: c_int, + pub __padding: [c_int; 11], + } + + pub struct ntptimeval { + pub time: crate::timeval, + pub maxerror: c_long, + pub esterror: c_long, + } + + // netinet/tcp.h + + pub struct tcp_info { + pub tcpi_state: u8, + pub tcpi_ca_state: u8, + pub tcpi_retransmits: u8, + pub tcpi_probes: u8, + pub tcpi_backoff: u8, + pub tcpi_options: u8, + /// This contains the bitfields `tcpi_snd_wscale` and `tcpi_rcv_wscale`. + /// Each is 4 bits. + pub tcpi_snd_rcv_wscale: u8, + /// This contains the bitfields `tcpi_delivery_rate_app_limited` (1 bit) and + /// `tcpi_fastopen_client_fail` (2 bits). + pub tcpi_delivery_fastopen_bitfields: u8, + pub tcpi_rto: u32, + pub tcpi_ato: u32, + pub tcpi_snd_mss: u32, + pub tcpi_rcv_mss: u32, + pub tcpi_unacked: u32, + pub tcpi_sacked: u32, + pub tcpi_lost: u32, + pub tcpi_retrans: u32, + pub tcpi_fackets: u32, + pub tcpi_last_data_sent: u32, + pub tcpi_last_ack_sent: u32, + pub tcpi_last_data_recv: u32, + pub tcpi_last_ack_recv: u32, + pub tcpi_pmtu: u32, + pub tcpi_rcv_ssthresh: u32, + pub tcpi_rtt: u32, + pub tcpi_rttvar: u32, + pub tcpi_snd_ssthresh: u32, + pub tcpi_snd_cwnd: u32, + pub tcpi_advmss: u32, + pub tcpi_reordering: u32, + pub tcpi_rcv_rtt: u32, + pub tcpi_rcv_space: u32, + pub tcpi_total_retrans: u32, + pub tcpi_pacing_rate: u64, + pub tcpi_max_pacing_rate: u64, + pub tcpi_bytes_acked: u64, + pub tcpi_bytes_received: u64, + pub tcpi_segs_out: u32, + pub tcpi_segs_in: u32, + pub tcpi_notsent_bytes: u32, + pub tcpi_min_rtt: u32, + pub tcpi_data_segs_in: u32, + pub tcpi_data_segs_out: u32, + pub tcpi_delivery_rate: u64, + pub tcpi_busy_time: u64, + pub tcpi_rwnd_limited: u64, + pub tcpi_sndbuf_limited: u64, + pub tcpi_delivered: u32, + pub tcpi_delivered_ce: u32, + pub tcpi_bytes_sent: u64, + pub tcpi_bytes_retrans: u64, + pub tcpi_dsack_dups: u32, + pub tcpi_reord_seen: u32, + pub tcpi_rcv_ooopack: u32, + pub tcpi_snd_wnd: u32, + } + + // MIPS/s390x implementation is special (see arch folders) + #[cfg(not(any(target_arch = "mips", target_arch = "mips64", target_arch = "s390x")))] + pub struct statfs { + pub f_type: c_ulong, + pub f_bsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_ulong, + pub f_frsize: c_ulong, + pub f_flags: c_ulong, + pub f_spare: [c_ulong; 4], + } + + // MIPS/s390x implementation is special (see arch folders) + #[cfg(not(any(target_arch = "mips", target_arch = "mips64", target_arch = "s390x")))] + pub struct statfs64 { + pub f_type: c_ulong, + pub f_bsize: c_ulong, + pub f_blocks: crate::fsblkcnt64_t, + pub f_bfree: crate::fsblkcnt64_t, + pub f_bavail: crate::fsblkcnt64_t, + pub f_files: crate::fsfilcnt64_t, + pub f_ffree: crate::fsfilcnt64_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_ulong, + pub f_frsize: c_ulong, + pub f_flags: c_ulong, + pub f_spare: [c_ulong; 4], + } +} + +s_no_extra_traits! { + pub struct sysinfo { + pub uptime: c_ulong, + pub loads: [c_ulong; 3], + pub totalram: c_ulong, + pub freeram: c_ulong, + pub sharedram: c_ulong, + pub bufferram: c_ulong, + pub totalswap: c_ulong, + pub freeswap: c_ulong, + pub procs: c_ushort, + pub pad: c_ushort, + pub totalhigh: c_ulong, + pub freehigh: c_ulong, + pub mem_unit: c_uint, + pub __reserved: [c_char; 256], + } + + pub struct utmpx { + pub ut_type: c_short, + __ut_pad1: c_short, + pub ut_pid: crate::pid_t, + pub ut_line: [c_char; 32], + pub ut_id: [c_char; 4], + pub ut_user: [c_char; 32], + pub ut_host: [c_char; 256], + pub ut_exit: __exit_status, + + #[cfg(not(musl_v1_2_3))] + #[deprecated( + since = "0.2.173", + note = "The ABI of this field has changed from c_long to c_int with padding, \ + we'll follow that change in the future release. See #4443 for more info." + )] + pub ut_session: c_long, + + #[cfg(musl_v1_2_3)] + #[cfg(not(target_endian = "little"))] + __ut_pad2: c_int, + + #[cfg(musl_v1_2_3)] + pub ut_session: c_int, + + #[cfg(musl_v1_2_3)] + #[cfg(target_endian = "little")] + __ut_pad2: c_int, + + pub ut_tv: crate::timeval, + pub ut_addr_v6: [c_uint; 4], + __unused: [c_char; 20], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for sysinfo { + fn eq(&self, other: &sysinfo) -> bool { + self.uptime == other.uptime + && self.loads == other.loads + && self.totalram == other.totalram + && self.freeram == other.freeram + && self.sharedram == other.sharedram + && self.bufferram == other.bufferram + && self.totalswap == other.totalswap + && self.freeswap == other.freeswap + && self.procs == other.procs + && self.pad == other.pad + && self.totalhigh == other.totalhigh + && self.freehigh == other.freehigh + && self.mem_unit == other.mem_unit + && self + .__reserved + .iter() + .zip(other.__reserved.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for sysinfo {} + + impl hash::Hash for sysinfo { + fn hash(&self, state: &mut H) { + self.uptime.hash(state); + self.loads.hash(state); + self.totalram.hash(state); + self.freeram.hash(state); + self.sharedram.hash(state); + self.bufferram.hash(state); + self.totalswap.hash(state); + self.freeswap.hash(state); + self.procs.hash(state); + self.pad.hash(state); + self.totalhigh.hash(state); + self.freehigh.hash(state); + self.mem_unit.hash(state); + self.__reserved.hash(state); + } + } + + impl PartialEq for utmpx { + #[allow(deprecated)] + fn eq(&self, other: &utmpx) -> bool { + self.ut_type == other.ut_type + //&& self.__ut_pad1 == other.__ut_pad1 + && self.ut_pid == other.ut_pid + && self.ut_line == other.ut_line + && self.ut_id == other.ut_id + && self.ut_user == other.ut_user + && self + .ut_host + .iter() + .zip(other.ut_host.iter()) + .all(|(a,b)| a == b) + && self.ut_exit == other.ut_exit + && self.ut_session == other.ut_session + //&& self.__ut_pad2 == other.__ut_pad2 + && self.ut_tv == other.ut_tv + && self.ut_addr_v6 == other.ut_addr_v6 + && self.__unused == other.__unused + } + } + + impl Eq for utmpx {} + + impl hash::Hash for utmpx { + #[allow(deprecated)] + fn hash(&self, state: &mut H) { + self.ut_type.hash(state); + //self.__ut_pad1.hash(state); + self.ut_pid.hash(state); + self.ut_line.hash(state); + self.ut_id.hash(state); + self.ut_user.hash(state); + self.ut_host.hash(state); + self.ut_exit.hash(state); + self.ut_session.hash(state); + //self.__ut_pad2.hash(state); + self.ut_tv.hash(state); + self.ut_addr_v6.hash(state); + self.__unused.hash(state); + } + } + } +} + +// include/sys/mman.h +/* + * Huge page size encoding when MAP_HUGETLB is specified, and a huge page + * size other than the default is desired. See hugetlb_encode.h. + * All known huge page size encodings are provided here. It is the + * responsibility of the application to know which sizes are supported on + * the running system. See mmap(2) man page for details. + */ +pub const MAP_HUGE_SHIFT: c_int = 26; +pub const MAP_HUGE_MASK: c_int = 0x3f; + +pub const MAP_HUGE_64KB: c_int = 16 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_512KB: c_int = 19 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_1MB: c_int = 20 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_2MB: c_int = 21 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_8MB: c_int = 23 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_16MB: c_int = 24 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_32MB: c_int = 25 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_256MB: c_int = 28 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_512MB: c_int = 29 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_1GB: c_int = 30 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_2GB: c_int = 31 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_16GB: c_int = 34 << MAP_HUGE_SHIFT; + +pub const MS_RMT_MASK: c_ulong = 0x02800051; + +// include/utmpx.h +pub const EMPTY: c_short = 0; +pub const RUN_LVL: c_short = 1; +pub const BOOT_TIME: c_short = 2; +pub const NEW_TIME: c_short = 3; +pub const OLD_TIME: c_short = 4; +pub const INIT_PROCESS: c_short = 5; +pub const LOGIN_PROCESS: c_short = 6; +pub const USER_PROCESS: c_short = 7; +pub const DEAD_PROCESS: c_short = 8; +pub const ACCOUNTING: c_short = 9; + +pub const SFD_CLOEXEC: c_int = 0x080000; + +#[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))] +pub const NCCS: usize = 32; +#[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))] +pub const NCCS: usize = 19; + +pub const O_TRUNC: c_int = 512; +pub const O_NOATIME: c_int = 0o1000000; +pub const O_CLOEXEC: c_int = 0x80000; +pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; + +pub const EBFONT: c_int = 59; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENONET: c_int = 64; +pub const ENOPKG: c_int = 65; +pub const EREMOTE: c_int = 66; +pub const ENOLINK: c_int = 67; +pub const EADV: c_int = 68; +pub const ESRMNT: c_int = 69; +pub const ECOMM: c_int = 70; +pub const EPROTO: c_int = 71; +pub const EDOTDOT: c_int = 73; + +pub const F_OFD_GETLK: c_int = 36; +pub const F_OFD_SETLK: c_int = 37; +pub const F_OFD_SETLKW: c_int = 38; + +pub const F_RDLCK: c_int = 0; +pub const F_WRLCK: c_int = 1; +pub const F_UNLCK: c_int = 2; + +pub const SA_NODEFER: c_int = 0x40000000; +pub const SA_RESETHAND: c_int = 0x80000000; +pub const SA_RESTART: c_int = 0x10000000; +pub const SA_NOCLDSTOP: c_int = 0x00000001; + +pub const EPOLL_CLOEXEC: c_int = 0x80000; + +pub const EFD_CLOEXEC: c_int = 0x80000; + +pub const BUFSIZ: c_uint = 1024; +pub const TMP_MAX: c_uint = 10000; +pub const FOPEN_MAX: c_uint = 1000; +pub const FILENAME_MAX: c_uint = 4096; +pub const O_PATH: c_int = 0o10000000; +pub const O_EXEC: c_int = 0o10000000; +pub const O_SEARCH: c_int = 0o10000000; +pub const O_ACCMODE: c_int = 0o10000003; +pub const O_NDELAY: c_int = O_NONBLOCK; +pub const NI_MAXHOST: crate::socklen_t = 255; +pub const PTHREAD_STACK_MIN: size_t = 2048; + +pub const POSIX_MADV_DONTNEED: c_int = 4; + +pub const MAP_ANONYMOUS: c_int = MAP_ANON; + +pub const SOCK_SEQPACKET: c_int = 5; +pub const SOCK_DCCP: c_int = 6; +pub const SOCK_NONBLOCK: c_int = O_NONBLOCK; +#[deprecated(since = "0.2.70", note = "AF_PACKET must be used instead")] +pub const SOCK_PACKET: c_int = 10; + +pub const SOMAXCONN: c_int = 128; + +#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] +pub const SIGUNUSED: c_int = crate::SIGSYS; + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; + +// FIXME(musl): Value is 1024 for all architectures since 1.2.4 +#[cfg(not(target_arch = "loongarch64"))] +pub const CPU_SETSIZE: c_int = 128; +#[cfg(target_arch = "loongarch64")] +pub const CPU_SETSIZE: c_int = 1024; + +pub const PTRACE_TRACEME: c_int = 0; +pub const PTRACE_PEEKTEXT: c_int = 1; +pub const PTRACE_PEEKDATA: c_int = 2; +pub const PTRACE_PEEKUSER: c_int = 3; +pub const PTRACE_POKETEXT: c_int = 4; +pub const PTRACE_POKEDATA: c_int = 5; +pub const PTRACE_POKEUSER: c_int = 6; +pub const PTRACE_CONT: c_int = 7; +pub const PTRACE_KILL: c_int = 8; +pub const PTRACE_SINGLESTEP: c_int = 9; +pub const PTRACE_GETREGS: c_int = 12; +pub const PTRACE_SETREGS: c_int = 13; +pub const PTRACE_GETFPREGS: c_int = 14; +pub const PTRACE_SETFPREGS: c_int = 15; +pub const PTRACE_ATTACH: c_int = 16; +pub const PTRACE_DETACH: c_int = 17; +pub const PTRACE_GETFPXREGS: c_int = 18; +pub const PTRACE_SETFPXREGS: c_int = 19; +pub const PTRACE_SYSCALL: c_int = 24; +pub const PTRACE_SETOPTIONS: c_int = 0x4200; +pub const PTRACE_GETEVENTMSG: c_int = 0x4201; +pub const PTRACE_GETSIGINFO: c_int = 0x4202; +pub const PTRACE_SETSIGINFO: c_int = 0x4203; +pub const PTRACE_GETREGSET: c_int = 0x4204; +pub const PTRACE_SETREGSET: c_int = 0x4205; +pub const PTRACE_SEIZE: c_int = 0x4206; +pub const PTRACE_INTERRUPT: c_int = 0x4207; +pub const PTRACE_LISTEN: c_int = 0x4208; +pub const PTRACE_PEEKSIGINFO: c_int = 0x4209; +pub const PTRACE_GETSIGMASK: c_uint = 0x420a; +pub const PTRACE_SETSIGMASK: c_uint = 0x420b; + +pub const AF_IB: c_int = 27; +pub const AF_MPLS: c_int = 28; +pub const AF_NFC: c_int = 39; +pub const AF_VSOCK: c_int = 40; +pub const AF_XDP: c_int = 44; +pub const PF_IB: c_int = AF_IB; +pub const PF_MPLS: c_int = AF_MPLS; +pub const PF_NFC: c_int = AF_NFC; +pub const PF_VSOCK: c_int = AF_VSOCK; +pub const PF_XDP: c_int = AF_XDP; + +pub const EFD_NONBLOCK: c_int = crate::O_NONBLOCK; + +pub const SFD_NONBLOCK: c_int = crate::O_NONBLOCK; + +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_NOLOAD: c_int = 0x4; + +pub const CLOCK_SGI_CYCLE: crate::clockid_t = 10; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const EXTA: crate::speed_t = B19200; +pub const EXTB: crate::speed_t = B38400; + +pub const REG_OK: c_int = 0; + +pub const PRIO_PROCESS: c_int = 0; +pub const PRIO_PGRP: c_int = 1; +pub const PRIO_USER: c_int = 2; + +pub const ADJ_OFFSET: c_uint = 0x0001; +pub const ADJ_FREQUENCY: c_uint = 0x0002; +pub const ADJ_MAXERROR: c_uint = 0x0004; +pub const ADJ_ESTERROR: c_uint = 0x0008; +pub const ADJ_STATUS: c_uint = 0x0010; +pub const ADJ_TIMECONST: c_uint = 0x0020; +pub const ADJ_TAI: c_uint = 0x0080; +pub const ADJ_SETOFFSET: c_uint = 0x0100; +pub const ADJ_MICRO: c_uint = 0x1000; +pub const ADJ_NANO: c_uint = 0x2000; +pub const ADJ_TICK: c_uint = 0x4000; +pub const ADJ_OFFSET_SINGLESHOT: c_uint = 0x8001; +pub const ADJ_OFFSET_SS_READ: c_uint = 0xa001; +pub const MOD_OFFSET: c_uint = ADJ_OFFSET; +pub const MOD_FREQUENCY: c_uint = ADJ_FREQUENCY; +pub const MOD_MAXERROR: c_uint = ADJ_MAXERROR; +pub const MOD_ESTERROR: c_uint = ADJ_ESTERROR; +pub const MOD_STATUS: c_uint = ADJ_STATUS; +pub const MOD_TIMECONST: c_uint = ADJ_TIMECONST; +pub const MOD_CLKB: c_uint = ADJ_TICK; +pub const MOD_CLKA: c_uint = ADJ_OFFSET_SINGLESHOT; +pub const MOD_TAI: c_uint = ADJ_TAI; +pub const MOD_MICRO: c_uint = ADJ_MICRO; +pub const MOD_NANO: c_uint = ADJ_NANO; +pub const STA_PLL: c_int = 0x0001; +pub const STA_PPSFREQ: c_int = 0x0002; +pub const STA_PPSTIME: c_int = 0x0004; +pub const STA_FLL: c_int = 0x0008; +pub const STA_INS: c_int = 0x0010; +pub const STA_DEL: c_int = 0x0020; +pub const STA_UNSYNC: c_int = 0x0040; +pub const STA_FREQHOLD: c_int = 0x0080; +pub const STA_PPSSIGNAL: c_int = 0x0100; +pub const STA_PPSJITTER: c_int = 0x0200; +pub const STA_PPSWANDER: c_int = 0x0400; +pub const STA_PPSERROR: c_int = 0x0800; +pub const STA_CLOCKERR: c_int = 0x1000; +pub const STA_NANO: c_int = 0x2000; +pub const STA_MODE: c_int = 0x4000; +pub const STA_CLK: c_int = 0x8000; +pub const STA_RONLY: c_int = STA_PPSSIGNAL + | STA_PPSJITTER + | STA_PPSWANDER + | STA_PPSERROR + | STA_CLOCKERR + | STA_NANO + | STA_MODE + | STA_CLK; + +pub const TIME_OK: c_int = 0; +pub const TIME_INS: c_int = 1; +pub const TIME_DEL: c_int = 2; +pub const TIME_OOP: c_int = 3; +pub const TIME_WAIT: c_int = 4; +pub const TIME_ERROR: c_int = 5; +pub const TIME_BAD: c_int = TIME_ERROR; +pub const MAXTC: c_long = 6; + +pub const _CS_V6_ENV: c_int = 1148; +pub const _CS_V7_ENV: c_int = 1149; + +pub const CLONE_NEWTIME: c_int = 0x80; + +pub const UT_HOSTSIZE: usize = 256; +pub const UT_LINESIZE: usize = 32; +pub const UT_NAMESIZE: usize = 32; + +cfg_if! { + if #[cfg(target_arch = "s390x")] { + pub const POSIX_FADV_DONTNEED: c_int = 6; + pub const POSIX_FADV_NOREUSE: c_int = 7; + } else { + pub const POSIX_FADV_DONTNEED: c_int = 4; + pub const POSIX_FADV_NOREUSE: c_int = 5; + } +} + +extern "C" { + pub fn sendmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: c_uint, + flags: c_uint, + ) -> c_int; + pub fn recvmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: c_uint, + flags: c_uint, + timeout: *mut crate::timespec, + ) -> c_int; + + pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; + pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; + pub fn prlimit( + pid: crate::pid_t, + resource: c_int, + new_limit: *const crate::rlimit, + old_limit: *mut crate::rlimit, + ) -> c_int; + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; + pub fn ptrace(request: c_int, ...) -> c_long; + pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; + pub fn setpriority(which: c_int, who: crate::id_t, prio: c_int) -> c_int; + // Musl targets need the `mask` argument of `fanotify_mark` be specified + // `c_ulonglong` instead of `u64` or there will be a type mismatch between + // `long long unsigned int` and the expected `uint64_t`. + pub fn fanotify_mark( + fd: c_int, + flags: c_uint, + mask: c_ulonglong, + dirfd: c_int, + path: *const c_char, + ) -> c_int; + pub fn preadv2( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: off_t, + flags: c_int, + ) -> ssize_t; + pub fn pwritev2( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: off_t, + flags: c_int, + ) -> ssize_t; + pub fn getauxval(type_: c_ulong) -> c_ulong; + + // Added in `musl` 1.1.20 + pub fn explicit_bzero(s: *mut c_void, len: size_t); + // Added in `musl` 1.2.2 + pub fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void; + + pub fn adjtimex(buf: *mut crate::timex) -> c_int; + pub fn clock_adjtime(clk_id: crate::clockid_t, buf: *mut crate::timex) -> c_int; + + pub fn ctermid(s: *mut c_char) -> *mut c_char; + + pub fn memfd_create(name: *const c_char, flags: c_uint) -> c_int; + pub fn mlock2(addr: *const c_void, len: size_t, flags: c_uint) -> c_int; + pub fn malloc_usable_size(ptr: *mut c_void) -> size_t; + + pub fn euidaccess(pathname: *const c_char, mode: c_int) -> c_int; + pub fn eaccess(pathname: *const c_char, mode: c_int) -> c_int; + + pub fn asctime_r(tm: *const crate::tm, buf: *mut c_char) -> *mut c_char; + + pub fn dirname(path: *mut c_char) -> *mut c_char; + pub fn basename(path: *mut c_char) -> *mut c_char; + + // Added in `musl` 1.1.20 + pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; + + // Added in `musl` 1.1.24 + pub fn posix_spawn_file_actions_addchdir_np( + actions: *mut crate::posix_spawn_file_actions_t, + path: *const c_char, + ) -> c_int; + // Added in `musl` 1.1.24 + pub fn posix_spawn_file_actions_addfchdir_np( + actions: *mut crate::posix_spawn_file_actions_t, + fd: c_int, + ) -> c_int; + + #[deprecated( + since = "0.2.172", + note = "musl provides `utmp` as stubs and an alternative should be preferred; see https://wiki.musl-libc.org/faq.html" + )] + pub fn getutxent() -> *mut utmpx; + #[deprecated( + since = "0.2.172", + note = "musl provides `utmp` as stubs and an alternative should be preferred; see https://wiki.musl-libc.org/faq.html" + )] + pub fn getutxid(ut: *const utmpx) -> *mut utmpx; + #[deprecated( + since = "0.2.172", + note = "musl provides `utmp` as stubs and an alternative should be preferred; see https://wiki.musl-libc.org/faq.html" + )] + pub fn getutxline(ut: *const utmpx) -> *mut utmpx; + #[deprecated( + since = "0.2.172", + note = "musl provides `utmp` as stubs and an alternative should be preferred; see https://wiki.musl-libc.org/faq.html" + )] + pub fn pututxline(ut: *const utmpx) -> *mut utmpx; + #[deprecated( + since = "0.2.172", + note = "musl provides `utmp` as stubs and an alternative should be preferred; see https://wiki.musl-libc.org/faq.html" + )] + pub fn setutxent(); + #[deprecated( + since = "0.2.172", + note = "musl provides `utmp` as stubs and an alternative should be preferred; see https://wiki.musl-libc.org/faq.html" + )] + pub fn endutxent(); + #[deprecated( + since = "0.2.172", + note = "musl provides `utmp` as stubs and an alternative should be preferred; see https://wiki.musl-libc.org/faq.html" + )] + pub fn utmpxname(file: *const c_char) -> c_int; +} + +// Alias to 64 to mimic glibc's LFS64 support +mod lfs64; +pub use self::lfs64::*; + +cfg_if! { + if #[cfg(any( + target_arch = "x86_64", + target_arch = "aarch64", + target_arch = "mips64", + target_arch = "powerpc64", + target_arch = "s390x", + target_arch = "riscv64", + target_arch = "loongarch64", + // musl-linux ABI for wasm32 follows b64 convention + target_arch = "wasm32", + ))] { + mod b64; + pub use self::b64::*; + } else if #[cfg(any( + target_arch = "x86", + target_arch = "mips", + target_arch = "powerpc", + target_arch = "hexagon", + target_arch = "riscv32", + target_arch = "arm" + ))] { + mod b32; + pub use self::b32::*; + } else { + } +} diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/arm/mod.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/arm/mod.rs new file mode 100644 index 00000000000000..c54d77b194c48f --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/uclibc/arm/mod.rs @@ -0,0 +1,925 @@ +use crate::off64_t; +use crate::prelude::*; + +pub type wchar_t = c_uint; +pub type time_t = c_long; + +pub type clock_t = c_long; +pub type fsblkcnt_t = c_ulong; +pub type fsfilcnt_t = c_ulong; +pub type ino_t = c_ulong; +pub type off_t = c_long; +pub type pthread_t = c_ulong; +pub type suseconds_t = c_long; + +pub type nlink_t = c_uint; +pub type blksize_t = c_long; +pub type blkcnt_t = c_long; + +pub type fsblkcnt64_t = u64; +pub type fsfilcnt64_t = u64; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; + +s! { + pub struct cmsghdr { + pub cmsg_len: size_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: c_int, + pub msg_control: *mut c_void, + pub msg_controllen: crate::socklen_t, + pub msg_flags: c_int, + } + + pub struct pthread_attr_t { + __size: [c_long; 9], + } + + pub struct stat { + pub st_dev: c_ulonglong, + __pad1: c_ushort, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: c_ulonglong, + __pad2: c_ushort, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + __unused4: c_ulong, + __unused5: c_ulong, + } + + pub struct stat64 { + pub st_dev: c_ulonglong, + pub __pad1: c_uint, + pub __st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: c_ulonglong, + pub __pad2: c_uint, + pub st_size: off64_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_ino: crate::ino64_t, + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + } + + pub struct sysinfo { + pub uptime: c_long, + pub loads: [c_ulong; 3], + pub totalram: c_ulong, + pub freeram: c_ulong, + pub sharedram: c_ulong, + pub bufferram: c_ulong, + pub totalswap: c_ulong, + pub freeswap: c_ulong, + pub procs: c_ushort, + pub pad: c_ushort, + pub totalhigh: c_ulong, + pub freehigh: c_ulong, + pub mem_unit: c_uint, + pub _f: [c_char; 8], + } + + pub struct statfs { + pub f_type: c_int, + pub f_bsize: c_int, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + + pub f_fsid: crate::fsid_t, + pub f_namelen: c_int, + pub f_frsize: c_int, + pub f_flags: c_int, + pub f_spare: [c_int; 4], + } + + pub struct statfs64 { + pub f_type: c_int, + pub f_bsize: c_int, + pub f_blocks: crate::fsblkcnt64_t, + pub f_bfree: crate::fsblkcnt64_t, + pub f_bavail: crate::fsblkcnt64_t, + pub f_files: crate::fsfilcnt64_t, + pub f_ffree: crate::fsfilcnt64_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_int, + pub f_frsize: c_int, + pub f_flags: c_int, + pub f_spare: [c_int; 4], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + __f_unused: c_int, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct sigset_t { + __val: [c_ulong; 2], + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_flags: c_ulong, + pub sa_restorer: Option, + pub sa_mask: sigset_t, + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; crate::NCCS], + pub c_ispeed: crate::speed_t, + pub c_ospeed: crate::speed_t, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + pub _pad: [c_int; 29], + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_ushort, + __pad1: c_ushort, + pub __seq: c_ushort, + __pad2: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + __unused1: c_ulong, + pub msg_rtime: crate::time_t, + __unused2: c_ulong, + pub msg_ctime: crate::time_t, + __unused3: c_ulong, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __unused4: c_ulong, + __unused5: c_ulong, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + __unused1: c_ulong, + pub shm_dtime: crate::time_t, + __unused2: c_ulong, + pub shm_ctime: crate::time_t, + __unused3: c_ulong, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused4: c_ulong, + __unused5: c_ulong, + } + + // FIXME(1.0) this is actually a union + #[cfg_attr(target_pointer_width = "32", repr(align(4)))] + #[cfg_attr(target_pointer_width = "64", repr(align(8)))] + pub struct sem_t { + #[cfg(target_pointer_width = "32")] + __size: [c_char; 16], + #[cfg(target_pointer_width = "64")] + __size: [c_char; 32], + } +} + +pub const O_CLOEXEC: c_int = 0o2000000; +pub const __SIZEOF_PTHREAD_ATTR_T: usize = 36; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_COND_COMPAT_T: usize = 12; +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; +pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 20; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; +pub const NCCS: usize = 32; + +// I wasn't able to find those constants +// in uclibc build environment for armv7 +pub const MAP_HUGETLB: c_int = 0x040000; // from linux/other/mod.rs + +// autogenerated constants with hand tuned types +pub const B0: crate::speed_t = 0; +pub const B1000000: crate::speed_t = 0x1008; +pub const B110: crate::speed_t = 0x3; +pub const B115200: crate::speed_t = 0x1002; +pub const B1152000: crate::speed_t = 0x1009; +pub const B1200: crate::speed_t = 0x9; +pub const B134: crate::speed_t = 0x4; +pub const B150: crate::speed_t = 0x5; +pub const B1500000: crate::speed_t = 0x100a; +pub const B1800: crate::speed_t = 0xa; +pub const B19200: crate::speed_t = 0xe; +pub const B200: crate::speed_t = 0x6; +pub const B2000000: crate::speed_t = 0x100b; +pub const B230400: crate::speed_t = 0x1003; +pub const B2400: crate::speed_t = 0xb; +pub const B2500000: crate::speed_t = 0x100c; +pub const B300: crate::speed_t = 0x7; +pub const B3000000: crate::speed_t = 0x100d; +pub const B3500000: crate::speed_t = 0x100e; +pub const B38400: crate::speed_t = 0xf; +pub const B4000000: crate::speed_t = 0x100f; +pub const B460800: crate::speed_t = 0x1004; +pub const B4800: crate::speed_t = 0xc; +pub const B50: crate::speed_t = 0x1; +pub const B500000: crate::speed_t = 0x1005; +pub const B57600: crate::speed_t = 0x1001; +pub const B576000: crate::speed_t = 0x1006; +pub const B600: crate::speed_t = 0x8; +pub const B75: crate::speed_t = 0x2; +pub const B921600: crate::speed_t = 0x1007; +pub const B9600: crate::speed_t = 0xd; +pub const BS1: c_int = 0x2000; +pub const BSDLY: c_int = 0x2000; +pub const CBAUD: crate::tcflag_t = 0x100f; +pub const CBAUDEX: crate::tcflag_t = 0x1000; +pub const CIBAUD: crate::tcflag_t = 0x100f0000; +pub const CLOCAL: crate::tcflag_t = 0x800; +pub const CPU_SETSIZE: c_int = 0x400; +pub const CR1: c_int = 0x200; +pub const CR2: c_int = 0x400; +pub const CR3: c_int = 0x600; +pub const CRDLY: c_int = 0x600; +pub const CREAD: crate::tcflag_t = 0x80; +pub const CS6: crate::tcflag_t = 0x10; +pub const CS7: crate::tcflag_t = 0x20; +pub const CS8: crate::tcflag_t = 0x30; +pub const CSIZE: crate::tcflag_t = 0x30; +pub const CSTOPB: crate::tcflag_t = 0x40; +pub const EADDRINUSE: c_int = 0x62; +pub const EADDRNOTAVAIL: c_int = 0x63; +pub const EADV: c_int = 0x44; +pub const EAFNOSUPPORT: c_int = 0x61; +pub const EALREADY: c_int = 0x72; +pub const EBADE: c_int = 0x34; +pub const EBADFD: c_int = 0x4d; +pub const EBADMSG: c_int = 0x4a; +pub const EBADR: c_int = 0x35; +pub const EBADRQC: c_int = 0x38; +pub const EBADSLT: c_int = 0x39; +pub const EBFONT: c_int = 0x3b; +pub const ECANCELED: c_int = 0x7d; +pub const ECHOCTL: crate::tcflag_t = 0x200; +pub const ECHOE: crate::tcflag_t = 0x10; +pub const ECHOK: crate::tcflag_t = 0x20; +pub const ECHOKE: crate::tcflag_t = 0x800; +pub const ECHONL: crate::tcflag_t = 0x40; +pub const ECHOPRT: crate::tcflag_t = 0x400; +pub const ECHRNG: c_int = 0x2c; +pub const ECOMM: c_int = 0x46; +pub const ECONNABORTED: c_int = 0x67; +pub const ECONNREFUSED: c_int = 0x6f; +pub const ECONNRESET: c_int = 0x68; +pub const EDEADLK: c_int = 0x23; +pub const EDESTADDRREQ: c_int = 0x59; +pub const EDOTDOT: c_int = 0x49; +pub const EDQUOT: c_int = 0x7a; +pub const EFD_CLOEXEC: c_int = 0x80000; +pub const EFD_NONBLOCK: c_int = 0x800; +pub const EHOSTDOWN: c_int = 0x70; +pub const EHOSTUNREACH: c_int = 0x71; +pub const EHWPOISON: c_int = 0x85; +pub const EIDRM: c_int = 0x2b; +pub const EILSEQ: c_int = 0x54; +pub const EINPROGRESS: c_int = 0x73; +pub const EISCONN: c_int = 0x6a; +pub const EISNAM: c_int = 0x78; +pub const EKEYEXPIRED: c_int = 0x7f; +pub const EKEYREJECTED: c_int = 0x81; +pub const EKEYREVOKED: c_int = 0x80; +pub const EL2HLT: c_int = 0x33; +pub const EL2NSYNC: c_int = 0x2d; +pub const EL3HLT: c_int = 0x2e; +pub const EL3RST: c_int = 0x2f; +pub const ELIBACC: c_int = 0x4f; +pub const ELIBBAD: c_int = 0x50; +pub const ELIBEXEC: c_int = 0x53; +pub const ELIBMAX: c_int = 0x52; +pub const ELIBSCN: c_int = 0x51; +pub const ELNRNG: c_int = 0x30; +pub const ELOOP: c_int = 0x28; +pub const EMEDIUMTYPE: c_int = 0x7c; +pub const EMSGSIZE: c_int = 0x5a; +pub const EMULTIHOP: c_int = 0x48; +pub const ENAMETOOLONG: c_int = 0x24; +pub const ENAVAIL: c_int = 0x77; +pub const ENETDOWN: c_int = 0x64; +pub const ENETRESET: c_int = 0x66; +pub const ENETUNREACH: c_int = 0x65; +pub const ENOANO: c_int = 0x37; +pub const ENOBUFS: c_int = 0x69; +pub const ENOCSI: c_int = 0x32; +pub const ENODATA: c_int = 0x3d; +pub const ENOKEY: c_int = 0x7e; +pub const ENOLCK: c_int = 0x25; +pub const ENOLINK: c_int = 0x43; +pub const ENOMEDIUM: c_int = 0x7b; +pub const ENOMSG: c_int = 0x2a; +pub const ENONET: c_int = 0x40; +pub const ENOPKG: c_int = 0x41; +pub const ENOPROTOOPT: c_int = 0x5c; +pub const ENOSR: c_int = 0x3f; +pub const ENOSTR: c_int = 0x3c; +pub const ENOSYS: c_int = 0x26; +pub const ENOTCONN: c_int = 0x6b; +pub const ENOTEMPTY: c_int = 0x27; +pub const ENOTNAM: c_int = 0x76; +pub const ENOTRECOVERABLE: c_int = 0x83; +pub const ENOTSOCK: c_int = 0x58; +pub const ENOTUNIQ: c_int = 0x4c; +pub const EOPNOTSUPP: c_int = 0x5f; +pub const EOVERFLOW: c_int = 0x4b; +pub const EOWNERDEAD: c_int = 0x82; +pub const EPFNOSUPPORT: c_int = 0x60; +pub const EPOLL_CLOEXEC: c_int = 0x80000; +pub const EPROTO: c_int = 0x47; +pub const EPROTONOSUPPORT: c_int = 0x5d; +pub const EPROTOTYPE: c_int = 0x5b; +pub const EREMCHG: c_int = 0x4e; +pub const EREMOTE: c_int = 0x42; +pub const EREMOTEIO: c_int = 0x79; +pub const ERESTART: c_int = 0x55; +pub const ERFKILL: c_int = 0x84; +pub const ESHUTDOWN: c_int = 0x6c; +pub const ESOCKTNOSUPPORT: c_int = 0x5e; +pub const ESRMNT: c_int = 0x45; +pub const ESTALE: c_int = 0x74; +pub const ESTRPIPE: c_int = 0x56; +pub const ETIME: c_int = 0x3e; +pub const ETIMEDOUT: c_int = 0x6e; +pub const ETOOMANYREFS: c_int = 0x6d; +pub const EUCLEAN: c_int = 0x75; +pub const EUNATCH: c_int = 0x31; +pub const EUSERS: c_int = 0x57; +pub const EXFULL: c_int = 0x36; +pub const FF1: c_int = 0x8000; +pub const FFDLY: c_int = 0x8000; +pub const FLUSHO: crate::tcflag_t = 0x1000; +pub const F_GETLK: c_int = 0x5; +pub const F_SETLK: c_int = 0x6; +pub const F_SETLKW: c_int = 0x7; +pub const HUPCL: crate::tcflag_t = 0x400; +pub const ICANON: crate::tcflag_t = 0x2; +pub const IEXTEN: crate::tcflag_t = 0x8000; +pub const ISIG: crate::tcflag_t = 0x1; +pub const IXOFF: crate::tcflag_t = 0x1000; +pub const IXON: crate::tcflag_t = 0x400; +pub const MAP_ANON: c_int = 0x20; +pub const MAP_ANONYMOUS: c_int = 0x20; +pub const MAP_DENYWRITE: c_int = 0x800; +pub const MAP_EXECUTABLE: c_int = 0x1000; +pub const MAP_GROWSDOWN: c_int = 0x100; +pub const MAP_LOCKED: c_int = 0x2000; +pub const MAP_NONBLOCK: c_int = 0x10000; +pub const MAP_NORESERVE: c_int = 0x4000; +pub const MAP_POPULATE: c_int = 0x8000; +pub const MAP_STACK: c_int = 0x20000; +pub const NLDLY: crate::tcflag_t = 0x100; +pub const NOFLSH: crate::tcflag_t = 0x80; +pub const OLCUC: crate::tcflag_t = 0x2; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const O_ACCMODE: c_int = 0x3; +pub const O_APPEND: c_int = 0x400; +pub const O_ASYNC: c_int = 0o20000; +pub const O_CREAT: c_int = 0x40; +pub const O_DIRECT: c_int = 0x10000; +pub const O_DIRECTORY: c_int = 0x4000; +pub const O_DSYNC: c_int = O_SYNC; +pub const O_EXCL: c_int = 0x80; +pub const O_FSYNC: c_int = O_SYNC; +pub const O_LARGEFILE: c_int = 0o400000; +pub const O_NDELAY: c_int = O_NONBLOCK; +pub const O_NOATIME: c_int = 0o1000000; +pub const O_NOCTTY: c_int = 0x100; +pub const O_NOFOLLOW: c_int = 0x8000; +pub const O_NONBLOCK: c_int = 0x800; +pub const O_PATH: c_int = 0o10000000; +pub const O_RSYNC: c_int = O_SYNC; +pub const O_SYNC: c_int = 0o10000; +pub const O_TRUNC: c_int = 0x200; +pub const PARENB: crate::tcflag_t = 0x100; +pub const PARODD: crate::tcflag_t = 0x200; +pub const PENDIN: crate::tcflag_t = 0x4000; +pub const POLLWRBAND: c_short = 0x200; +pub const POLLWRNORM: c_short = 0x100; +pub const PTHREAD_STACK_MIN: size_t = 16384; +pub const RTLD_GLOBAL: c_int = 0x00100; + +// These are typed unsigned to match sigaction +pub const SA_NOCLDSTOP: c_ulong = 0x1; +pub const SA_NOCLDWAIT: c_ulong = 0x2; +pub const SA_SIGINFO: c_ulong = 0x4; +pub const SA_NODEFER: c_ulong = 0x40000000; +pub const SA_ONSTACK: c_ulong = 0x8000000; +pub const SA_RESETHAND: c_ulong = 0x80000000; +pub const SA_RESTART: c_ulong = 0x10000000; + +pub const SFD_CLOEXEC: c_int = 0x80000; +pub const SFD_NONBLOCK: c_int = 0x800; +pub const SIGBUS: c_int = 0x7; +pub const SIGCHLD: c_int = 0x11; +pub const SIGCONT: c_int = 0x12; +pub const SIGIO: c_int = 0x1d; +pub const SIGPROF: c_int = 0x1b; +pub const SIGPWR: c_int = 0x1e; +pub const SIGSTKFLT: c_int = 0x10; +pub const SIGSTKSZ: size_t = 8192; +pub const SIGSTOP: c_int = 0x13; +pub const SIGSYS: c_int = 0x1f; +pub const SIGTSTP: c_int = 0x14; +pub const SIGTTIN: c_int = 0x15; +pub const SIGTTOU: c_int = 0x16; +pub const SIGURG: c_int = 0x17; +pub const SIGUSR1: c_int = 0xa; +pub const SIGUSR2: c_int = 0xc; +pub const SIGVTALRM: c_int = 0x1a; +pub const SIGWINCH: c_int = 0x1c; +pub const SIGXCPU: c_int = 0x18; +pub const SIGXFSZ: c_int = 0x19; +pub const SIG_BLOCK: c_int = 0; +pub const SIG_SETMASK: c_int = 0x2; +pub const SIG_UNBLOCK: c_int = 0x1; +pub const SOCK_DGRAM: c_int = 0x2; +pub const SOCK_NONBLOCK: c_int = 0o0004000; +pub const SOCK_SEQPACKET: c_int = 0x5; +pub const SOCK_STREAM: c_int = 0x1; + +pub const TAB1: c_int = 0x800; +pub const TAB2: c_int = 0x1000; +pub const TAB3: c_int = 0x1800; +pub const TABDLY: c_int = 0x1800; +pub const TCSADRAIN: c_int = 0x1; +pub const TCSAFLUSH: c_int = 0x2; +pub const TCSANOW: c_int = 0; +pub const TOSTOP: crate::tcflag_t = 0x100; +pub const VDISCARD: usize = 0xd; +pub const VEOF: usize = 0x4; +pub const VEOL: usize = 0xb; +pub const VEOL2: usize = 0x10; +pub const VMIN: usize = 0x6; +pub const VREPRINT: usize = 0xc; +pub const VSTART: usize = 0x8; +pub const VSTOP: usize = 0x9; +pub const VSUSP: usize = 0xa; +pub const VSWTC: usize = 0x7; +pub const VT1: c_int = 0x4000; +pub const VTDLY: c_int = 0x4000; +pub const VTIME: usize = 0x5; +pub const VWERASE: usize = 0xe; +pub const XTABS: crate::tcflag_t = 0x1800; + +pub const MADV_SOFT_OFFLINE: c_int = 101; + +// Syscall table is copied from src/unix/notbsd/linux/musl/b32/arm.rs +pub const SYS_restart_syscall: c_long = 0; +pub const SYS_exit: c_long = 1; +pub const SYS_fork: c_long = 2; +pub const SYS_read: c_long = 3; +pub const SYS_write: c_long = 4; +pub const SYS_open: c_long = 5; +pub const SYS_close: c_long = 6; +pub const SYS_creat: c_long = 8; +pub const SYS_link: c_long = 9; +pub const SYS_unlink: c_long = 10; +pub const SYS_execve: c_long = 11; +pub const SYS_chdir: c_long = 12; +pub const SYS_mknod: c_long = 14; +pub const SYS_chmod: c_long = 15; +pub const SYS_lchown: c_long = 16; +pub const SYS_lseek: c_long = 19; +pub const SYS_getpid: c_long = 20; +pub const SYS_mount: c_long = 21; +pub const SYS_setuid: c_long = 23; +pub const SYS_getuid: c_long = 24; +pub const SYS_ptrace: c_long = 26; +pub const SYS_pause: c_long = 29; +pub const SYS_access: c_long = 33; +pub const SYS_nice: c_long = 34; +pub const SYS_sync: c_long = 36; +pub const SYS_kill: c_long = 37; +pub const SYS_rename: c_long = 38; +pub const SYS_mkdir: c_long = 39; +pub const SYS_rmdir: c_long = 40; +pub const SYS_dup: c_long = 41; +pub const SYS_pipe: c_long = 42; +pub const SYS_times: c_long = 43; +pub const SYS_brk: c_long = 45; +pub const SYS_setgid: c_long = 46; +pub const SYS_getgid: c_long = 47; +pub const SYS_geteuid: c_long = 49; +pub const SYS_getegid: c_long = 50; +pub const SYS_acct: c_long = 51; +pub const SYS_umount2: c_long = 52; +pub const SYS_ioctl: c_long = 54; +pub const SYS_fcntl: c_long = 55; +pub const SYS_setpgid: c_long = 57; +pub const SYS_umask: c_long = 60; +pub const SYS_chroot: c_long = 61; +pub const SYS_ustat: c_long = 62; +pub const SYS_dup2: c_long = 63; +pub const SYS_getppid: c_long = 64; +pub const SYS_getpgrp: c_long = 65; +pub const SYS_setsid: c_long = 66; +pub const SYS_sigaction: c_long = 67; +pub const SYS_setreuid: c_long = 70; +pub const SYS_setregid: c_long = 71; +pub const SYS_sigsuspend: c_long = 72; +pub const SYS_sigpending: c_long = 73; +pub const SYS_sethostname: c_long = 74; +pub const SYS_setrlimit: c_long = 75; +pub const SYS_getrusage: c_long = 77; +pub const SYS_gettimeofday: c_long = 78; +pub const SYS_settimeofday: c_long = 79; +pub const SYS_getgroups: c_long = 80; +pub const SYS_setgroups: c_long = 81; +pub const SYS_symlink: c_long = 83; +pub const SYS_readlink: c_long = 85; +pub const SYS_uselib: c_long = 86; +pub const SYS_swapon: c_long = 87; +pub const SYS_reboot: c_long = 88; +pub const SYS_munmap: c_long = 91; +pub const SYS_truncate: c_long = 92; +pub const SYS_ftruncate: c_long = 93; +pub const SYS_fchmod: c_long = 94; +pub const SYS_fchown: c_long = 95; +pub const SYS_getpriority: c_long = 96; +pub const SYS_setpriority: c_long = 97; +pub const SYS_statfs: c_long = 99; +pub const SYS_fstatfs: c_long = 100; +pub const SYS_syslog: c_long = 103; +pub const SYS_setitimer: c_long = 104; +pub const SYS_getitimer: c_long = 105; +pub const SYS_stat: c_long = 106; +pub const SYS_lstat: c_long = 107; +pub const SYS_fstat: c_long = 108; +pub const SYS_vhangup: c_long = 111; +pub const SYS_wait4: c_long = 114; +pub const SYS_swapoff: c_long = 115; +pub const SYS_sysinfo: c_long = 116; +pub const SYS_fsync: c_long = 118; +pub const SYS_sigreturn: c_long = 119; +pub const SYS_clone: c_long = 120; +pub const SYS_setdomainname: c_long = 121; +pub const SYS_uname: c_long = 122; +pub const SYS_adjtimex: c_long = 124; +pub const SYS_mprotect: c_long = 125; +pub const SYS_sigprocmask: c_long = 126; +pub const SYS_init_module: c_long = 128; +pub const SYS_delete_module: c_long = 129; +pub const SYS_quotactl: c_long = 131; +pub const SYS_getpgid: c_long = 132; +pub const SYS_fchdir: c_long = 133; +pub const SYS_bdflush: c_long = 134; +pub const SYS_sysfs: c_long = 135; +pub const SYS_personality: c_long = 136; +pub const SYS_setfsuid: c_long = 138; +pub const SYS_setfsgid: c_long = 139; +pub const SYS__llseek: c_long = 140; +pub const SYS_getdents: c_long = 141; +pub const SYS__newselect: c_long = 142; +pub const SYS_flock: c_long = 143; +pub const SYS_msync: c_long = 144; +pub const SYS_readv: c_long = 145; +pub const SYS_writev: c_long = 146; +pub const SYS_getsid: c_long = 147; +pub const SYS_fdatasync: c_long = 148; +pub const SYS__sysctl: c_long = 149; +pub const SYS_mlock: c_long = 150; +pub const SYS_munlock: c_long = 151; +pub const SYS_mlockall: c_long = 152; +pub const SYS_munlockall: c_long = 153; +pub const SYS_sched_setparam: c_long = 154; +pub const SYS_sched_getparam: c_long = 155; +pub const SYS_sched_setscheduler: c_long = 156; +pub const SYS_sched_getscheduler: c_long = 157; +pub const SYS_sched_yield: c_long = 158; +pub const SYS_sched_get_priority_max: c_long = 159; +pub const SYS_sched_get_priority_min: c_long = 160; +pub const SYS_sched_rr_get_interval: c_long = 161; +pub const SYS_nanosleep: c_long = 162; +pub const SYS_mremap: c_long = 163; +pub const SYS_setresuid: c_long = 164; +pub const SYS_getresuid: c_long = 165; +pub const SYS_poll: c_long = 168; +pub const SYS_nfsservctl: c_long = 169; +pub const SYS_setresgid: c_long = 170; +pub const SYS_getresgid: c_long = 171; +pub const SYS_prctl: c_long = 172; +pub const SYS_rt_sigreturn: c_long = 173; +pub const SYS_rt_sigaction: c_long = 174; +pub const SYS_rt_sigprocmask: c_long = 175; +pub const SYS_rt_sigpending: c_long = 176; +pub const SYS_rt_sigtimedwait: c_long = 177; +pub const SYS_rt_sigqueueinfo: c_long = 178; +pub const SYS_rt_sigsuspend: c_long = 179; +pub const SYS_pread64: c_long = 180; +pub const SYS_pwrite64: c_long = 181; +pub const SYS_chown: c_long = 182; +pub const SYS_getcwd: c_long = 183; +pub const SYS_capget: c_long = 184; +pub const SYS_capset: c_long = 185; +pub const SYS_sigaltstack: c_long = 186; +pub const SYS_sendfile: c_long = 187; +pub const SYS_vfork: c_long = 190; +pub const SYS_ugetrlimit: c_long = 191; +pub const SYS_mmap2: c_long = 192; +pub const SYS_truncate64: c_long = 193; +pub const SYS_ftruncate64: c_long = 194; +pub const SYS_stat64: c_long = 195; +pub const SYS_lstat64: c_long = 196; +pub const SYS_fstat64: c_long = 197; +pub const SYS_lchown32: c_long = 198; +pub const SYS_getuid32: c_long = 199; +pub const SYS_getgid32: c_long = 200; +pub const SYS_geteuid32: c_long = 201; +pub const SYS_getegid32: c_long = 202; +pub const SYS_setreuid32: c_long = 203; +pub const SYS_setregid32: c_long = 204; +pub const SYS_getgroups32: c_long = 205; +pub const SYS_setgroups32: c_long = 206; +pub const SYS_fchown32: c_long = 207; +pub const SYS_setresuid32: c_long = 208; +pub const SYS_getresuid32: c_long = 209; +pub const SYS_setresgid32: c_long = 210; +pub const SYS_getresgid32: c_long = 211; +pub const SYS_chown32: c_long = 212; +pub const SYS_setuid32: c_long = 213; +pub const SYS_setgid32: c_long = 214; +pub const SYS_setfsuid32: c_long = 215; +pub const SYS_setfsgid32: c_long = 216; +pub const SYS_getdents64: c_long = 217; +pub const SYS_pivot_root: c_long = 218; +pub const SYS_mincore: c_long = 219; +pub const SYS_madvise: c_long = 220; +pub const SYS_fcntl64: c_long = 221; +pub const SYS_gettid: c_long = 224; +pub const SYS_readahead: c_long = 225; +pub const SYS_setxattr: c_long = 226; +pub const SYS_lsetxattr: c_long = 227; +pub const SYS_fsetxattr: c_long = 228; +pub const SYS_getxattr: c_long = 229; +pub const SYS_lgetxattr: c_long = 230; +pub const SYS_fgetxattr: c_long = 231; +pub const SYS_listxattr: c_long = 232; +pub const SYS_llistxattr: c_long = 233; +pub const SYS_flistxattr: c_long = 234; +pub const SYS_removexattr: c_long = 235; +pub const SYS_lremovexattr: c_long = 236; +pub const SYS_fremovexattr: c_long = 237; +pub const SYS_tkill: c_long = 238; +pub const SYS_sendfile64: c_long = 239; +pub const SYS_futex: c_long = 240; +pub const SYS_sched_setaffinity: c_long = 241; +pub const SYS_sched_getaffinity: c_long = 242; +pub const SYS_io_setup: c_long = 243; +pub const SYS_io_destroy: c_long = 244; +pub const SYS_io_getevents: c_long = 245; +pub const SYS_io_submit: c_long = 246; +pub const SYS_io_cancel: c_long = 247; +pub const SYS_exit_group: c_long = 248; +pub const SYS_lookup_dcookie: c_long = 249; +pub const SYS_epoll_create: c_long = 250; +pub const SYS_epoll_ctl: c_long = 251; +pub const SYS_epoll_wait: c_long = 252; +pub const SYS_remap_file_pages: c_long = 253; +pub const SYS_set_tid_address: c_long = 256; +pub const SYS_timer_create: c_long = 257; +pub const SYS_timer_settime: c_long = 258; +pub const SYS_timer_gettime: c_long = 259; +pub const SYS_timer_getoverrun: c_long = 260; +pub const SYS_timer_delete: c_long = 261; +pub const SYS_clock_settime: c_long = 262; +pub const SYS_clock_gettime: c_long = 263; +pub const SYS_clock_getres: c_long = 264; +pub const SYS_clock_nanosleep: c_long = 265; +pub const SYS_statfs64: c_long = 266; +pub const SYS_fstatfs64: c_long = 267; +pub const SYS_tgkill: c_long = 268; +pub const SYS_utimes: c_long = 269; +pub const SYS_pciconfig_iobase: c_long = 271; +pub const SYS_pciconfig_read: c_long = 272; +pub const SYS_pciconfig_write: c_long = 273; +pub const SYS_mq_open: c_long = 274; +pub const SYS_mq_unlink: c_long = 275; +pub const SYS_mq_timedsend: c_long = 276; +pub const SYS_mq_timedreceive: c_long = 277; +pub const SYS_mq_notify: c_long = 278; +pub const SYS_mq_getsetattr: c_long = 279; +pub const SYS_waitid: c_long = 280; +pub const SYS_socket: c_long = 281; +pub const SYS_bind: c_long = 282; +pub const SYS_connect: c_long = 283; +pub const SYS_listen: c_long = 284; +pub const SYS_accept: c_long = 285; +pub const SYS_getsockname: c_long = 286; +pub const SYS_getpeername: c_long = 287; +pub const SYS_socketpair: c_long = 288; +pub const SYS_send: c_long = 289; +pub const SYS_sendto: c_long = 290; +pub const SYS_recv: c_long = 291; +pub const SYS_recvfrom: c_long = 292; +pub const SYS_shutdown: c_long = 293; +pub const SYS_setsockopt: c_long = 294; +pub const SYS_getsockopt: c_long = 295; +pub const SYS_sendmsg: c_long = 296; +pub const SYS_recvmsg: c_long = 297; +pub const SYS_semop: c_long = 298; +pub const SYS_semget: c_long = 299; +pub const SYS_semctl: c_long = 300; +pub const SYS_msgsnd: c_long = 301; +pub const SYS_msgrcv: c_long = 302; +pub const SYS_msgget: c_long = 303; +pub const SYS_msgctl: c_long = 304; +pub const SYS_shmat: c_long = 305; +pub const SYS_shmdt: c_long = 306; +pub const SYS_shmget: c_long = 307; +pub const SYS_shmctl: c_long = 308; +pub const SYS_add_key: c_long = 309; +pub const SYS_request_key: c_long = 310; +pub const SYS_keyctl: c_long = 311; +pub const SYS_semtimedop: c_long = 312; +pub const SYS_vserver: c_long = 313; +pub const SYS_ioprio_set: c_long = 314; +pub const SYS_ioprio_get: c_long = 315; +pub const SYS_inotify_init: c_long = 316; +pub const SYS_inotify_add_watch: c_long = 317; +pub const SYS_inotify_rm_watch: c_long = 318; +pub const SYS_mbind: c_long = 319; +pub const SYS_get_mempolicy: c_long = 320; +pub const SYS_set_mempolicy: c_long = 321; +pub const SYS_openat: c_long = 322; +pub const SYS_mkdirat: c_long = 323; +pub const SYS_mknodat: c_long = 324; +pub const SYS_fchownat: c_long = 325; +pub const SYS_futimesat: c_long = 326; +pub const SYS_fstatat64: c_long = 327; +pub const SYS_unlinkat: c_long = 328; +pub const SYS_renameat: c_long = 329; +pub const SYS_linkat: c_long = 330; +pub const SYS_symlinkat: c_long = 331; +pub const SYS_readlinkat: c_long = 332; +pub const SYS_fchmodat: c_long = 333; +pub const SYS_faccessat: c_long = 334; +pub const SYS_pselect6: c_long = 335; +pub const SYS_ppoll: c_long = 336; +pub const SYS_unshare: c_long = 337; +pub const SYS_set_robust_list: c_long = 338; +pub const SYS_get_robust_list: c_long = 339; +pub const SYS_splice: c_long = 340; +pub const SYS_tee: c_long = 342; +pub const SYS_vmsplice: c_long = 343; +pub const SYS_move_pages: c_long = 344; +pub const SYS_getcpu: c_long = 345; +pub const SYS_epoll_pwait: c_long = 346; +pub const SYS_kexec_load: c_long = 347; +pub const SYS_utimensat: c_long = 348; +pub const SYS_signalfd: c_long = 349; +pub const SYS_timerfd_create: c_long = 350; +pub const SYS_eventfd: c_long = 351; +pub const SYS_fallocate: c_long = 352; +pub const SYS_timerfd_settime: c_long = 353; +pub const SYS_timerfd_gettime: c_long = 354; +pub const SYS_signalfd4: c_long = 355; +pub const SYS_eventfd2: c_long = 356; +pub const SYS_epoll_create1: c_long = 357; +pub const SYS_dup3: c_long = 358; +pub const SYS_pipe2: c_long = 359; +pub const SYS_inotify_init1: c_long = 360; +pub const SYS_preadv: c_long = 361; +pub const SYS_pwritev: c_long = 362; +pub const SYS_rt_tgsigqueueinfo: c_long = 363; +pub const SYS_perf_event_open: c_long = 364; +pub const SYS_recvmmsg: c_long = 365; +pub const SYS_accept4: c_long = 366; +pub const SYS_fanotify_init: c_long = 367; +pub const SYS_fanotify_mark: c_long = 368; +pub const SYS_prlimit64: c_long = 369; +pub const SYS_name_to_handle_at: c_long = 370; +pub const SYS_open_by_handle_at: c_long = 371; +pub const SYS_clock_adjtime: c_long = 372; +pub const SYS_syncfs: c_long = 373; +pub const SYS_sendmmsg: c_long = 374; +pub const SYS_setns: c_long = 375; +pub const SYS_process_vm_readv: c_long = 376; +pub const SYS_process_vm_writev: c_long = 377; +pub const SYS_kcmp: c_long = 378; +pub const SYS_finit_module: c_long = 379; +pub const SYS_sched_setattr: c_long = 380; +pub const SYS_sched_getattr: c_long = 381; +pub const SYS_renameat2: c_long = 382; +pub const SYS_seccomp: c_long = 383; +pub const SYS_getrandom: c_long = 384; +pub const SYS_memfd_create: c_long = 385; +pub const SYS_bpf: c_long = 386; +pub const SYS_execveat: c_long = 387; +pub const SYS_userfaultfd: c_long = 388; +pub const SYS_membarrier: c_long = 389; +pub const SYS_mlock2: c_long = 390; +pub const SYS_copy_file_range: c_long = 391; +pub const SYS_preadv2: c_long = 392; +pub const SYS_pwritev2: c_long = 393; +pub const SYS_pkey_mprotect: c_long = 394; +pub const SYS_pkey_alloc: c_long = 395; +pub const SYS_pkey_free: c_long = 396; +// FIXME(linux): should be a `c_long` too, but a bug slipped in. +pub const SYS_statx: c_int = 397; +pub const SYS_pidfd_send_signal: c_long = 424; +pub const SYS_io_uring_setup: c_long = 425; +pub const SYS_io_uring_enter: c_long = 426; +pub const SYS_io_uring_register: c_long = 427; +pub const SYS_open_tree: c_long = 428; +pub const SYS_move_mount: c_long = 429; +pub const SYS_fsopen: c_long = 430; +pub const SYS_fsconfig: c_long = 431; +pub const SYS_fsmount: c_long = 432; +pub const SYS_fspick: c_long = 433; +pub const SYS_pidfd_open: c_long = 434; +pub const SYS_clone3: c_long = 435; +pub const SYS_close_range: c_long = 436; +pub const SYS_openat2: c_long = 437; +pub const SYS_pidfd_getfd: c_long = 438; +pub const SYS_faccessat2: c_long = 439; +pub const SYS_process_madvise: c_long = 440; +pub const SYS_epoll_pwait2: c_long = 441; +pub const SYS_mount_setattr: c_long = 442; +pub const SYS_quotactl_fd: c_long = 443; +pub const SYS_landlock_create_ruleset: c_long = 444; +pub const SYS_landlock_add_rule: c_long = 445; +pub const SYS_landlock_restrict_self: c_long = 446; +pub const SYS_memfd_secret: c_long = 447; +pub const SYS_process_mrelease: c_long = 448; +pub const SYS_futex_waitv: c_long = 449; +pub const SYS_set_mempolicy_home_node: c_long = 450; diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips32/mod.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips32/mod.rs new file mode 100644 index 00000000000000..7dd04409078555 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips32/mod.rs @@ -0,0 +1,695 @@ +use crate::off64_t; +use crate::prelude::*; + +pub type clock_t = i32; +pub type time_t = i32; +pub type suseconds_t = i32; +pub type wchar_t = i32; +pub type off_t = i32; +pub type ino_t = u32; +pub type blkcnt_t = i32; +pub type blksize_t = i32; +pub type nlink_t = u32; +pub type fsblkcnt_t = c_ulong; +pub type fsfilcnt_t = c_ulong; +pub type __u64 = c_ulonglong; +pub type __s64 = c_longlong; +pub type fsblkcnt64_t = u64; +pub type fsfilcnt64_t = u64; + +s! { + pub struct stat { + pub st_dev: crate::dev_t, + st_pad1: [c_long; 2], + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_pad2: [c_long; 1], + pub st_size: off_t, + st_pad3: c_long, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + st_pad5: [c_long; 14], + } + + pub struct stat64 { + pub st_dev: crate::dev_t, + st_pad1: [c_long; 2], + pub st_ino: crate::ino64_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + st_pad2: [c_long; 2], + pub st_size: off64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: crate::blksize_t, + st_pad3: c_long, + pub st_blocks: crate::blkcnt64_t, + st_pad5: [c_long; 14], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt64_t, + pub f_bfree: crate::fsblkcnt64_t, + pub f_bavail: crate::fsblkcnt64_t, + pub f_files: crate::fsfilcnt64_t, + pub f_ffree: crate::fsfilcnt64_t, + pub f_favail: crate::fsfilcnt64_t, + pub f_fsid: c_ulong, + pub __f_unused: c_int, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + pub __f_spare: [c_int; 6], + } + + pub struct pthread_attr_t { + __size: [u32; 9], + } + + pub struct sigaction { + pub sa_flags: c_uint, + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: sigset_t, + _restorer: *mut c_void, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } + + pub struct sigset_t { + __val: [c_ulong; 4], + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_code: c_int, + pub si_errno: c_int, + pub _pad: [c_int; 29], + } + + pub struct glob64_t { + pub gl_pathc: size_t, + pub gl_pathv: *mut *mut c_char, + pub gl_offs: size_t, + pub gl_flags: c_int, + + __unused1: *mut c_void, + __unused2: *mut c_void, + __unused3: *mut c_void, + __unused4: *mut c_void, + __unused5: *mut c_void, + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_uint, + pub __seq: c_ushort, + __pad1: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused4: c_ulong, + __unused5: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + #[cfg(target_endian = "big")] + __glibc_reserved1: c_ulong, + pub msg_stime: crate::time_t, + #[cfg(target_endian = "little")] + __glibc_reserved1: c_ulong, + #[cfg(target_endian = "big")] + __glibc_reserved2: c_ulong, + pub msg_rtime: crate::time_t, + #[cfg(target_endian = "little")] + __glibc_reserved2: c_ulong, + #[cfg(target_endian = "big")] + __glibc_reserved3: c_ulong, + pub msg_ctime: crate::time_t, + #[cfg(target_endian = "little")] + __glibc_reserved3: c_ulong, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __glibc_reserved4: c_ulong, + __glibc_reserved5: c_ulong, + } + + pub struct statfs { + pub f_type: c_long, + pub f_bsize: c_long, + pub f_frsize: c_long, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_files: crate::fsblkcnt_t, + pub f_ffree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_fsid: crate::fsid_t, + + pub f_namelen: c_long, + f_spare: [c_long; 6], + } + + pub struct statfs64 { + pub f_type: c_long, + pub f_bsize: c_long, + pub f_frsize: c_long, + pub f_blocks: crate::fsblkcnt64_t, + pub f_bfree: crate::fsblkcnt64_t, + pub f_files: crate::fsblkcnt64_t, + pub f_ffree: crate::fsblkcnt64_t, + pub f_bavail: crate::fsblkcnt64_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_long, + pub f_flags: c_long, + pub f_spare: [c_long; 5], + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: c_int, + pub msg_control: *mut c_void, + pub msg_controllen: size_t, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + pub cmsg_len: size_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; crate::NCCS], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_sysid: c_long, + pub l_pid: crate::pid_t, + pad: [c_long; 4], + } + + pub struct sysinfo { + pub uptime: c_long, + pub loads: [c_ulong; 3], + pub totalram: c_ulong, + pub freeram: c_ulong, + pub sharedram: c_ulong, + pub bufferram: c_ulong, + pub totalswap: c_ulong, + pub freeswap: c_ulong, + pub procs: c_ushort, + pub pad: c_ushort, + pub totalhigh: c_ulong, + pub freehigh: c_ulong, + pub mem_unit: c_uint, + pub _f: [c_char; 8], + } + + // FIXME(1.0): this is actually a union + #[cfg_attr(target_pointer_width = "32", repr(align(4)))] + #[cfg_attr(target_pointer_width = "64", repr(align(8)))] + pub struct sem_t { + #[cfg(target_pointer_width = "32")] + __size: [c_char; 16], + #[cfg(target_pointer_width = "64")] + __size: [c_char; 32], + } +} + +pub const __SIZEOF_PTHREAD_ATTR_T: usize = 36; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; +pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 20; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; + +pub const SYS_syscall: c_long = 4000 + 0; +pub const SYS_exit: c_long = 4000 + 1; +pub const SYS_fork: c_long = 4000 + 2; +pub const SYS_read: c_long = 4000 + 3; +pub const SYS_write: c_long = 4000 + 4; +pub const SYS_open: c_long = 4000 + 5; +pub const SYS_close: c_long = 4000 + 6; +pub const SYS_waitpid: c_long = 4000 + 7; +pub const SYS_creat: c_long = 4000 + 8; +pub const SYS_link: c_long = 4000 + 9; +pub const SYS_unlink: c_long = 4000 + 10; +pub const SYS_execve: c_long = 4000 + 11; +pub const SYS_chdir: c_long = 4000 + 12; +pub const SYS_time: c_long = 4000 + 13; +pub const SYS_mknod: c_long = 4000 + 14; +pub const SYS_chmod: c_long = 4000 + 15; +pub const SYS_lchown: c_long = 4000 + 16; +pub const SYS_break: c_long = 4000 + 17; +pub const SYS_lseek: c_long = 4000 + 19; +pub const SYS_getpid: c_long = 4000 + 20; +pub const SYS_mount: c_long = 4000 + 21; +pub const SYS_umount: c_long = 4000 + 22; +pub const SYS_setuid: c_long = 4000 + 23; +pub const SYS_getuid: c_long = 4000 + 24; +pub const SYS_stime: c_long = 4000 + 25; +pub const SYS_ptrace: c_long = 4000 + 26; +pub const SYS_alarm: c_long = 4000 + 27; +pub const SYS_pause: c_long = 4000 + 29; +pub const SYS_utime: c_long = 4000 + 30; +pub const SYS_stty: c_long = 4000 + 31; +pub const SYS_gtty: c_long = 4000 + 32; +pub const SYS_access: c_long = 4000 + 33; +pub const SYS_nice: c_long = 4000 + 34; +pub const SYS_ftime: c_long = 4000 + 35; +pub const SYS_sync: c_long = 4000 + 36; +pub const SYS_kill: c_long = 4000 + 37; +pub const SYS_rename: c_long = 4000 + 38; +pub const SYS_mkdir: c_long = 4000 + 39; +pub const SYS_rmdir: c_long = 4000 + 40; +pub const SYS_dup: c_long = 4000 + 41; +pub const SYS_pipe: c_long = 4000 + 42; +pub const SYS_times: c_long = 4000 + 43; +pub const SYS_prof: c_long = 4000 + 44; +pub const SYS_brk: c_long = 4000 + 45; +pub const SYS_setgid: c_long = 4000 + 46; +pub const SYS_getgid: c_long = 4000 + 47; +pub const SYS_signal: c_long = 4000 + 48; +pub const SYS_geteuid: c_long = 4000 + 49; +pub const SYS_getegid: c_long = 4000 + 50; +pub const SYS_acct: c_long = 4000 + 51; +pub const SYS_umount2: c_long = 4000 + 52; +pub const SYS_lock: c_long = 4000 + 53; +pub const SYS_ioctl: c_long = 4000 + 54; +pub const SYS_fcntl: c_long = 4000 + 55; +pub const SYS_mpx: c_long = 4000 + 56; +pub const SYS_setpgid: c_long = 4000 + 57; +pub const SYS_ulimit: c_long = 4000 + 58; +pub const SYS_umask: c_long = 4000 + 60; +pub const SYS_chroot: c_long = 4000 + 61; +pub const SYS_ustat: c_long = 4000 + 62; +pub const SYS_dup2: c_long = 4000 + 63; +pub const SYS_getppid: c_long = 4000 + 64; +pub const SYS_getpgrp: c_long = 4000 + 65; +pub const SYS_setsid: c_long = 4000 + 66; +pub const SYS_sigaction: c_long = 4000 + 67; +pub const SYS_sgetmask: c_long = 4000 + 68; +pub const SYS_ssetmask: c_long = 4000 + 69; +pub const SYS_setreuid: c_long = 4000 + 70; +pub const SYS_setregid: c_long = 4000 + 71; +pub const SYS_sigsuspend: c_long = 4000 + 72; +pub const SYS_sigpending: c_long = 4000 + 73; +pub const SYS_sethostname: c_long = 4000 + 74; +pub const SYS_setrlimit: c_long = 4000 + 75; +pub const SYS_getrlimit: c_long = 4000 + 76; +pub const SYS_getrusage: c_long = 4000 + 77; +pub const SYS_gettimeofday: c_long = 4000 + 78; +pub const SYS_settimeofday: c_long = 4000 + 79; +pub const SYS_getgroups: c_long = 4000 + 80; +pub const SYS_setgroups: c_long = 4000 + 81; +pub const SYS_symlink: c_long = 4000 + 83; +pub const SYS_readlink: c_long = 4000 + 85; +pub const SYS_uselib: c_long = 4000 + 86; +pub const SYS_swapon: c_long = 4000 + 87; +pub const SYS_reboot: c_long = 4000 + 88; +pub const SYS_readdir: c_long = 4000 + 89; +pub const SYS_mmap: c_long = 4000 + 90; +pub const SYS_munmap: c_long = 4000 + 91; +pub const SYS_truncate: c_long = 4000 + 92; +pub const SYS_ftruncate: c_long = 4000 + 93; +pub const SYS_fchmod: c_long = 4000 + 94; +pub const SYS_fchown: c_long = 4000 + 95; +pub const SYS_getpriority: c_long = 4000 + 96; +pub const SYS_setpriority: c_long = 4000 + 97; +pub const SYS_profil: c_long = 4000 + 98; +pub const SYS_statfs: c_long = 4000 + 99; +pub const SYS_fstatfs: c_long = 4000 + 100; +pub const SYS_ioperm: c_long = 4000 + 101; +pub const SYS_socketcall: c_long = 4000 + 102; +pub const SYS_syslog: c_long = 4000 + 103; +pub const SYS_setitimer: c_long = 4000 + 104; +pub const SYS_getitimer: c_long = 4000 + 105; +pub const SYS_stat: c_long = 4000 + 106; +pub const SYS_lstat: c_long = 4000 + 107; +pub const SYS_fstat: c_long = 4000 + 108; +pub const SYS_iopl: c_long = 4000 + 110; +pub const SYS_vhangup: c_long = 4000 + 111; +pub const SYS_idle: c_long = 4000 + 112; +pub const SYS_vm86: c_long = 4000 + 113; +pub const SYS_wait4: c_long = 4000 + 114; +pub const SYS_swapoff: c_long = 4000 + 115; +pub const SYS_sysinfo: c_long = 4000 + 116; +pub const SYS_ipc: c_long = 4000 + 117; +pub const SYS_fsync: c_long = 4000 + 118; +pub const SYS_sigreturn: c_long = 4000 + 119; +pub const SYS_clone: c_long = 4000 + 120; +pub const SYS_setdomainname: c_long = 4000 + 121; +pub const SYS_uname: c_long = 4000 + 122; +pub const SYS_modify_ldt: c_long = 4000 + 123; +pub const SYS_adjtimex: c_long = 4000 + 124; +pub const SYS_mprotect: c_long = 4000 + 125; +pub const SYS_sigprocmask: c_long = 4000 + 126; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_create_module: c_long = 4000 + 127; +pub const SYS_init_module: c_long = 4000 + 128; +pub const SYS_delete_module: c_long = 4000 + 129; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_get_kernel_syms: c_long = 4000 + 130; +pub const SYS_quotactl: c_long = 4000 + 131; +pub const SYS_getpgid: c_long = 4000 + 132; +pub const SYS_fchdir: c_long = 4000 + 133; +pub const SYS_bdflush: c_long = 4000 + 134; +pub const SYS_sysfs: c_long = 4000 + 135; +pub const SYS_personality: c_long = 4000 + 136; +pub const SYS_afs_syscall: c_long = 4000 + 137; +pub const SYS_setfsuid: c_long = 4000 + 138; +pub const SYS_setfsgid: c_long = 4000 + 139; +pub const SYS__llseek: c_long = 4000 + 140; +pub const SYS_getdents: c_long = 4000 + 141; +pub const SYS__newselect: c_long = 4000 + 142; +pub const SYS_flock: c_long = 4000 + 143; +pub const SYS_msync: c_long = 4000 + 144; +pub const SYS_readv: c_long = 4000 + 145; +pub const SYS_writev: c_long = 4000 + 146; +pub const SYS_cacheflush: c_long = 4000 + 147; +pub const SYS_cachectl: c_long = 4000 + 148; +pub const SYS_sysmips: c_long = 4000 + 149; +pub const SYS_getsid: c_long = 4000 + 151; +pub const SYS_fdatasync: c_long = 4000 + 152; +pub const SYS__sysctl: c_long = 4000 + 153; +pub const SYS_mlock: c_long = 4000 + 154; +pub const SYS_munlock: c_long = 4000 + 155; +pub const SYS_mlockall: c_long = 4000 + 156; +pub const SYS_munlockall: c_long = 4000 + 157; +pub const SYS_sched_setparam: c_long = 4000 + 158; +pub const SYS_sched_getparam: c_long = 4000 + 159; +pub const SYS_sched_setscheduler: c_long = 4000 + 160; +pub const SYS_sched_getscheduler: c_long = 4000 + 161; +pub const SYS_sched_yield: c_long = 4000 + 162; +pub const SYS_sched_get_priority_max: c_long = 4000 + 163; +pub const SYS_sched_get_priority_min: c_long = 4000 + 164; +pub const SYS_sched_rr_get_interval: c_long = 4000 + 165; +pub const SYS_nanosleep: c_long = 4000 + 166; +pub const SYS_mremap: c_long = 4000 + 167; +pub const SYS_accept: c_long = 4000 + 168; +pub const SYS_bind: c_long = 4000 + 169; +pub const SYS_connect: c_long = 4000 + 170; +pub const SYS_getpeername: c_long = 4000 + 171; +pub const SYS_getsockname: c_long = 4000 + 172; +pub const SYS_getsockopt: c_long = 4000 + 173; +pub const SYS_listen: c_long = 4000 + 174; +pub const SYS_recv: c_long = 4000 + 175; +pub const SYS_recvfrom: c_long = 4000 + 176; +pub const SYS_recvmsg: c_long = 4000 + 177; +pub const SYS_send: c_long = 4000 + 178; +pub const SYS_sendmsg: c_long = 4000 + 179; +pub const SYS_sendto: c_long = 4000 + 180; +pub const SYS_setsockopt: c_long = 4000 + 181; +pub const SYS_shutdown: c_long = 4000 + 182; +pub const SYS_socket: c_long = 4000 + 183; +pub const SYS_socketpair: c_long = 4000 + 184; +pub const SYS_setresuid: c_long = 4000 + 185; +pub const SYS_getresuid: c_long = 4000 + 186; +#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] +pub const SYS_query_module: c_long = 4000 + 187; +pub const SYS_poll: c_long = 4000 + 188; +pub const SYS_nfsservctl: c_long = 4000 + 189; +pub const SYS_setresgid: c_long = 4000 + 190; +pub const SYS_getresgid: c_long = 4000 + 191; +pub const SYS_prctl: c_long = 4000 + 192; +pub const SYS_rt_sigreturn: c_long = 4000 + 193; +pub const SYS_rt_sigaction: c_long = 4000 + 194; +pub const SYS_rt_sigprocmask: c_long = 4000 + 195; +pub const SYS_rt_sigpending: c_long = 4000 + 196; +pub const SYS_rt_sigtimedwait: c_long = 4000 + 197; +pub const SYS_rt_sigqueueinfo: c_long = 4000 + 198; +pub const SYS_rt_sigsuspend: c_long = 4000 + 199; +pub const SYS_pread64: c_long = 4000 + 200; +pub const SYS_pwrite64: c_long = 4000 + 201; +pub const SYS_chown: c_long = 4000 + 202; +pub const SYS_getcwd: c_long = 4000 + 203; +pub const SYS_capget: c_long = 4000 + 204; +pub const SYS_capset: c_long = 4000 + 205; +pub const SYS_sigaltstack: c_long = 4000 + 206; +pub const SYS_sendfile: c_long = 4000 + 207; +pub const SYS_getpmsg: c_long = 4000 + 208; +pub const SYS_putpmsg: c_long = 4000 + 209; +pub const SYS_mmap2: c_long = 4000 + 210; +pub const SYS_truncate64: c_long = 4000 + 211; +pub const SYS_ftruncate64: c_long = 4000 + 212; +pub const SYS_stat64: c_long = 4000 + 213; +pub const SYS_lstat64: c_long = 4000 + 214; +pub const SYS_fstat64: c_long = 4000 + 215; +pub const SYS_pivot_root: c_long = 4000 + 216; +pub const SYS_mincore: c_long = 4000 + 217; +pub const SYS_madvise: c_long = 4000 + 218; +pub const SYS_getdents64: c_long = 4000 + 219; +pub const SYS_fcntl64: c_long = 4000 + 220; +pub const SYS_gettid: c_long = 4000 + 222; +pub const SYS_readahead: c_long = 4000 + 223; +pub const SYS_setxattr: c_long = 4000 + 224; +pub const SYS_lsetxattr: c_long = 4000 + 225; +pub const SYS_fsetxattr: c_long = 4000 + 226; +pub const SYS_getxattr: c_long = 4000 + 227; +pub const SYS_lgetxattr: c_long = 4000 + 228; +pub const SYS_fgetxattr: c_long = 4000 + 229; +pub const SYS_listxattr: c_long = 4000 + 230; +pub const SYS_llistxattr: c_long = 4000 + 231; +pub const SYS_flistxattr: c_long = 4000 + 232; +pub const SYS_removexattr: c_long = 4000 + 233; +pub const SYS_lremovexattr: c_long = 4000 + 234; +pub const SYS_fremovexattr: c_long = 4000 + 235; +pub const SYS_tkill: c_long = 4000 + 236; +pub const SYS_sendfile64: c_long = 4000 + 237; +pub const SYS_futex: c_long = 4000 + 238; +pub const SYS_sched_setaffinity: c_long = 4000 + 239; +pub const SYS_sched_getaffinity: c_long = 4000 + 240; +pub const SYS_io_setup: c_long = 4000 + 241; +pub const SYS_io_destroy: c_long = 4000 + 242; +pub const SYS_io_getevents: c_long = 4000 + 243; +pub const SYS_io_submit: c_long = 4000 + 244; +pub const SYS_io_cancel: c_long = 4000 + 245; +pub const SYS_exit_group: c_long = 4000 + 246; +pub const SYS_lookup_dcookie: c_long = 4000 + 247; +pub const SYS_epoll_create: c_long = 4000 + 248; +pub const SYS_epoll_ctl: c_long = 4000 + 249; +pub const SYS_epoll_wait: c_long = 4000 + 250; +pub const SYS_remap_file_pages: c_long = 4000 + 251; +pub const SYS_set_tid_address: c_long = 4000 + 252; +pub const SYS_restart_syscall: c_long = 4000 + 253; +pub const SYS_fadvise64: c_long = 4000 + 254; +pub const SYS_statfs64: c_long = 4000 + 255; +pub const SYS_fstatfs64: c_long = 4000 + 256; +pub const SYS_timer_create: c_long = 4000 + 257; +pub const SYS_timer_settime: c_long = 4000 + 258; +pub const SYS_timer_gettime: c_long = 4000 + 259; +pub const SYS_timer_getoverrun: c_long = 4000 + 260; +pub const SYS_timer_delete: c_long = 4000 + 261; +pub const SYS_clock_settime: c_long = 4000 + 262; +pub const SYS_clock_gettime: c_long = 4000 + 263; +pub const SYS_clock_getres: c_long = 4000 + 264; +pub const SYS_clock_nanosleep: c_long = 4000 + 265; +pub const SYS_tgkill: c_long = 4000 + 266; +pub const SYS_utimes: c_long = 4000 + 267; +pub const SYS_mbind: c_long = 4000 + 268; +pub const SYS_get_mempolicy: c_long = 4000 + 269; +pub const SYS_set_mempolicy: c_long = 4000 + 270; +pub const SYS_mq_open: c_long = 4000 + 271; +pub const SYS_mq_unlink: c_long = 4000 + 272; +pub const SYS_mq_timedsend: c_long = 4000 + 273; +pub const SYS_mq_timedreceive: c_long = 4000 + 274; +pub const SYS_mq_notify: c_long = 4000 + 275; +pub const SYS_mq_getsetattr: c_long = 4000 + 276; +pub const SYS_vserver: c_long = 4000 + 277; +pub const SYS_waitid: c_long = 4000 + 278; +/* pub const SYS_sys_setaltroot: c_long = 4000 + 279; */ +pub const SYS_add_key: c_long = 4000 + 280; +pub const SYS_request_key: c_long = 4000 + 281; +pub const SYS_keyctl: c_long = 4000 + 282; +pub const SYS_set_thread_area: c_long = 4000 + 283; +pub const SYS_inotify_init: c_long = 4000 + 284; +pub const SYS_inotify_add_watch: c_long = 4000 + 285; +pub const SYS_inotify_rm_watch: c_long = 4000 + 286; +pub const SYS_migrate_pages: c_long = 4000 + 287; +pub const SYS_openat: c_long = 4000 + 288; +pub const SYS_mkdirat: c_long = 4000 + 289; +pub const SYS_mknodat: c_long = 4000 + 290; +pub const SYS_fchownat: c_long = 4000 + 291; +pub const SYS_futimesat: c_long = 4000 + 292; +pub const SYS_fstatat64: c_long = 4000 + 293; +pub const SYS_unlinkat: c_long = 4000 + 294; +pub const SYS_renameat: c_long = 4000 + 295; +pub const SYS_linkat: c_long = 4000 + 296; +pub const SYS_symlinkat: c_long = 4000 + 297; +pub const SYS_readlinkat: c_long = 4000 + 298; +pub const SYS_fchmodat: c_long = 4000 + 299; +pub const SYS_faccessat: c_long = 4000 + 300; +pub const SYS_pselect6: c_long = 4000 + 301; +pub const SYS_ppoll: c_long = 4000 + 302; +pub const SYS_unshare: c_long = 4000 + 303; +pub const SYS_splice: c_long = 4000 + 304; +pub const SYS_sync_file_range: c_long = 4000 + 305; +pub const SYS_tee: c_long = 4000 + 306; +pub const SYS_vmsplice: c_long = 4000 + 307; +pub const SYS_move_pages: c_long = 4000 + 308; +pub const SYS_set_robust_list: c_long = 4000 + 309; +pub const SYS_get_robust_list: c_long = 4000 + 310; +pub const SYS_kexec_load: c_long = 4000 + 311; +pub const SYS_getcpu: c_long = 4000 + 312; +pub const SYS_epoll_pwait: c_long = 4000 + 313; +pub const SYS_ioprio_set: c_long = 4000 + 314; +pub const SYS_ioprio_get: c_long = 4000 + 315; +pub const SYS_utimensat: c_long = 4000 + 316; +pub const SYS_signalfd: c_long = 4000 + 317; +pub const SYS_timerfd: c_long = 4000 + 318; +pub const SYS_eventfd: c_long = 4000 + 319; +pub const SYS_fallocate: c_long = 4000 + 320; +pub const SYS_timerfd_create: c_long = 4000 + 321; +pub const SYS_timerfd_gettime: c_long = 4000 + 322; +pub const SYS_timerfd_settime: c_long = 4000 + 323; +pub const SYS_signalfd4: c_long = 4000 + 324; +pub const SYS_eventfd2: c_long = 4000 + 325; +pub const SYS_epoll_create1: c_long = 4000 + 326; +pub const SYS_dup3: c_long = 4000 + 327; +pub const SYS_pipe2: c_long = 4000 + 328; +pub const SYS_inotify_init1: c_long = 4000 + 329; +pub const SYS_preadv: c_long = 4000 + 330; +pub const SYS_pwritev: c_long = 4000 + 331; +pub const SYS_rt_tgsigqueueinfo: c_long = 4000 + 332; +pub const SYS_perf_event_open: c_long = 4000 + 333; +pub const SYS_accept4: c_long = 4000 + 334; +pub const SYS_recvmmsg: c_long = 4000 + 335; +pub const SYS_fanotify_init: c_long = 4000 + 336; +pub const SYS_fanotify_mark: c_long = 4000 + 337; +pub const SYS_prlimit64: c_long = 4000 + 338; +pub const SYS_name_to_handle_at: c_long = 4000 + 339; +pub const SYS_open_by_handle_at: c_long = 4000 + 340; +pub const SYS_clock_adjtime: c_long = 4000 + 341; +pub const SYS_syncfs: c_long = 4000 + 342; +pub const SYS_sendmmsg: c_long = 4000 + 343; +pub const SYS_setns: c_long = 4000 + 344; +pub const SYS_process_vm_readv: c_long = 4000 + 345; +pub const SYS_process_vm_writev: c_long = 4000 + 346; +pub const SYS_kcmp: c_long = 4000 + 347; +pub const SYS_finit_module: c_long = 4000 + 348; +pub const SYS_sched_setattr: c_long = 4000 + 349; +pub const SYS_sched_getattr: c_long = 4000 + 350; +pub const SYS_renameat2: c_long = 4000 + 351; +pub const SYS_seccomp: c_long = 4000 + 352; +pub const SYS_getrandom: c_long = 4000 + 353; +pub const SYS_memfd_create: c_long = 4000 + 354; +pub const SYS_bpf: c_long = 4000 + 355; +pub const SYS_execveat: c_long = 4000 + 356; +pub const SYS_userfaultfd: c_long = 4000 + 357; +pub const SYS_membarrier: c_long = 4000 + 358; +pub const SYS_mlock2: c_long = 4000 + 359; +pub const SYS_copy_file_range: c_long = 4000 + 360; +pub const SYS_preadv2: c_long = 4000 + 361; +pub const SYS_pwritev2: c_long = 4000 + 362; +pub const SYS_pkey_mprotect: c_long = 4000 + 363; +pub const SYS_pkey_alloc: c_long = 4000 + 364; +pub const SYS_pkey_free: c_long = 4000 + 365; +pub const SYS_statx: c_long = 4000 + 366; +pub const SYS_pidfd_send_signal: c_long = 4000 + 424; +pub const SYS_io_uring_setup: c_long = 4000 + 425; +pub const SYS_io_uring_enter: c_long = 4000 + 426; +pub const SYS_io_uring_register: c_long = 4000 + 427; +pub const SYS_open_tree: c_long = 4000 + 428; +pub const SYS_move_mount: c_long = 4000 + 429; +pub const SYS_fsopen: c_long = 4000 + 430; +pub const SYS_fsconfig: c_long = 4000 + 431; +pub const SYS_fsmount: c_long = 4000 + 432; +pub const SYS_fspick: c_long = 4000 + 433; +pub const SYS_pidfd_open: c_long = 4000 + 434; +pub const SYS_clone3: c_long = 4000 + 435; +pub const SYS_close_range: c_long = 4000 + 436; +pub const SYS_openat2: c_long = 4000 + 437; +pub const SYS_pidfd_getfd: c_long = 4000 + 438; +pub const SYS_faccessat2: c_long = 4000 + 439; +pub const SYS_process_madvise: c_long = 4000 + 440; +pub const SYS_epoll_pwait2: c_long = 4000 + 441; +pub const SYS_mount_setattr: c_long = 4000 + 442; +pub const SYS_quotactl_fd: c_long = 4000 + 443; +pub const SYS_landlock_create_ruleset: c_long = 4000 + 444; +pub const SYS_landlock_add_rule: c_long = 4000 + 445; +pub const SYS_landlock_restrict_self: c_long = 4000 + 446; +pub const SYS_memfd_secret: c_long = 4000 + 447; +pub const SYS_process_mrelease: c_long = 4000 + 448; +pub const SYS_futex_waitv: c_long = 4000 + 449; +pub const SYS_set_mempolicy_home_node: c_long = 4000 + 450; + +#[link(name = "util")] +extern "C" { + pub fn sysctl( + name: *mut c_int, + namelen: c_int, + oldp: *mut c_void, + oldlenp: *mut size_t, + newp: *mut c_void, + newlen: size_t, + ) -> c_int; + pub fn glob64( + pattern: *const c_char, + flags: c_int, + errfunc: Option c_int>, + pglob: *mut glob64_t, + ) -> c_int; + pub fn globfree64(pglob: *mut glob64_t); + pub fn pthread_attr_getaffinity_np( + attr: *const crate::pthread_attr_t, + cpusetsize: size_t, + cpuset: *mut crate::cpu_set_t, + ) -> c_int; + pub fn pthread_attr_setaffinity_np( + attr: *mut crate::pthread_attr_t, + cpusetsize: size_t, + cpuset: *const crate::cpu_set_t, + ) -> c_int; +} diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips64/mod.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips64/mod.rs new file mode 100644 index 00000000000000..39eb0242730d84 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips64/mod.rs @@ -0,0 +1,204 @@ +use crate::off64_t; +use crate::prelude::*; + +pub type blkcnt_t = i64; +pub type blksize_t = i64; +pub type fsblkcnt_t = c_ulong; +pub type fsfilcnt_t = c_ulong; +pub type ino_t = u64; +pub type nlink_t = u64; +pub type off_t = i64; +pub type suseconds_t = i64; +pub type time_t = i64; +pub type wchar_t = i32; + +s! { + pub struct stat { + pub st_dev: c_ulong, + st_pad1: [c_long; 2], + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: c_ulong, + st_pad2: [c_ulong; 1], + pub st_size: off_t, + st_pad3: c_long, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: crate::blksize_t, + st_pad4: c_long, + pub st_blocks: crate::blkcnt_t, + st_pad5: [c_long; 7], + } + + pub struct stat64 { + pub st_dev: c_ulong, + st_pad1: [c_long; 2], + pub st_ino: crate::ino64_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: c_ulong, + st_pad2: [c_long; 2], + pub st_size: off64_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: crate::blksize_t, + st_pad3: c_long, + pub st_blocks: crate::blkcnt64_t, + st_pad5: [c_long; 7], + } + + pub struct pthread_attr_t { + __size: [c_ulong; 7], + } + + pub struct sigaction { + pub sa_flags: c_int, + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: sigset_t, + _restorer: *mut c_void, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } + + pub struct sigset_t { + __size: [c_ulong; 16], + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_code: c_int, + pub si_errno: c_int, + _pad: c_int, + _pad2: [c_long; 14], + } + + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_uint, + pub __seq: c_ushort, + __pad1: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused4: c_ulong, + __unused5: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + pub msg_rtime: crate::time_t, + pub msg_ctime: crate::time_t, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __glibc_reserved4: c_ulong, + __glibc_reserved5: c_ulong, + } + + pub struct statfs { + pub f_type: c_long, + pub f_bsize: c_long, + pub f_frsize: c_long, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_files: crate::fsblkcnt_t, + pub f_ffree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_fsid: crate::fsid_t, + + pub f_namelen: c_long, + f_spare: [c_long; 6], + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: size_t, + pub msg_control: *mut c_void, + pub msg_controllen: size_t, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + pub cmsg_len: size_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; crate::NCCS], + } + + pub struct sysinfo { + pub uptime: c_long, + pub loads: [c_ulong; 3], + pub totalram: c_ulong, + pub freeram: c_ulong, + pub sharedram: c_ulong, + pub bufferram: c_ulong, + pub totalswap: c_ulong, + pub freeswap: c_ulong, + pub procs: c_ushort, + pub pad: c_ushort, + pub totalhigh: c_ulong, + pub freehigh: c_ulong, + pub mem_unit: c_uint, + pub _f: [c_char; 0], + } + + // FIXME(1.0): this is actually a union + #[cfg_attr(target_pointer_width = "32", repr(align(4)))] + #[cfg_attr(target_pointer_width = "64", repr(align(8)))] + pub struct sem_t { + __size: [c_char; 32], + } +} + +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; + +pub const SYS_gettid: c_long = 5178; // Valid for n64 diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mod.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mod.rs new file mode 100644 index 00000000000000..8d17aa8e98e9aa --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mod.rs @@ -0,0 +1,312 @@ +use crate::prelude::*; + +pub type pthread_t = c_ulong; + +pub const SFD_CLOEXEC: c_int = 0x080000; + +pub const NCCS: usize = 32; + +pub const O_TRUNC: c_int = 512; + +pub const O_CLOEXEC: c_int = 0x80000; + +pub const EBFONT: c_int = 59; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENONET: c_int = 64; +pub const ENOPKG: c_int = 65; +pub const EREMOTE: c_int = 66; +pub const ENOLINK: c_int = 67; +pub const EADV: c_int = 68; +pub const ESRMNT: c_int = 69; +pub const ECOMM: c_int = 70; +pub const EPROTO: c_int = 71; +pub const EDOTDOT: c_int = 73; + +pub const SA_NODEFER: c_uint = 0x40000000; +pub const SA_RESETHAND: c_uint = 0x80000000; +pub const SA_RESTART: c_uint = 0x10000000; +pub const SA_NOCLDSTOP: c_uint = 0x00000001; + +pub const EPOLL_CLOEXEC: c_int = 0x80000; + +pub const EFD_CLOEXEC: c_int = 0x80000; + +pub const TMP_MAX: c_uint = 238328; +pub const _SC_2_C_VERSION: c_int = 96; +pub const O_ACCMODE: c_int = 3; +pub const O_DIRECT: c_int = 0x8000; +pub const O_DIRECTORY: c_int = 0x10000; +pub const O_NOFOLLOW: c_int = 0x20000; +pub const O_NOATIME: c_int = 0x40000; +pub const O_PATH: c_int = 0o010000000; + +pub const O_APPEND: c_int = 8; +pub const O_CREAT: c_int = 256; +pub const O_EXCL: c_int = 1024; +pub const O_NOCTTY: c_int = 2048; +pub const O_NONBLOCK: c_int = 128; +pub const O_SYNC: c_int = 0x10; +pub const O_RSYNC: c_int = 0x10; +pub const O_DSYNC: c_int = 0x10; +pub const O_FSYNC: c_int = 0x10; +pub const O_ASYNC: c_int = 0x1000; +pub const O_LARGEFILE: c_int = 0x2000; +pub const O_NDELAY: c_int = 0x80; + +pub const SOCK_NONBLOCK: c_int = 128; + +pub const EDEADLK: c_int = 45; +pub const ENAMETOOLONG: c_int = 78; +pub const ENOLCK: c_int = 46; +pub const ENOSYS: c_int = 89; +pub const ENOTEMPTY: c_int = 93; +pub const ELOOP: c_int = 90; +pub const ENOMSG: c_int = 35; +pub const EIDRM: c_int = 36; +pub const ECHRNG: c_int = 37; +pub const EL2NSYNC: c_int = 38; +pub const EL3HLT: c_int = 39; +pub const EL3RST: c_int = 40; +pub const ELNRNG: c_int = 41; +pub const EUNATCH: c_int = 42; +pub const ENOCSI: c_int = 43; +pub const EL2HLT: c_int = 44; +pub const EBADE: c_int = 50; +pub const EBADR: c_int = 51; +pub const EXFULL: c_int = 52; +pub const FFDLY: c_int = 0o0100000; +pub const ENOANO: c_int = 53; +pub const EBADRQC: c_int = 54; +pub const EBADSLT: c_int = 55; +pub const EMULTIHOP: c_int = 74; +pub const EOVERFLOW: c_int = 79; +pub const ENOTUNIQ: c_int = 80; +pub const EBADFD: c_int = 81; +pub const EBADMSG: c_int = 77; +pub const EREMCHG: c_int = 82; +pub const ELIBACC: c_int = 83; +pub const ELIBBAD: c_int = 84; +pub const ELIBSCN: c_int = 85; +pub const ELIBMAX: c_int = 86; +pub const ELIBEXEC: c_int = 87; +pub const EILSEQ: c_int = 88; +pub const ERESTART: c_int = 91; +pub const ESTRPIPE: c_int = 92; +pub const EUSERS: c_int = 94; +pub const ENOTSOCK: c_int = 95; +pub const EDESTADDRREQ: c_int = 96; +pub const EMSGSIZE: c_int = 97; +pub const EPROTOTYPE: c_int = 98; +pub const ENOPROTOOPT: c_int = 99; +pub const EPROTONOSUPPORT: c_int = 120; +pub const ESOCKTNOSUPPORT: c_int = 121; +pub const EOPNOTSUPP: c_int = 122; +pub const EPFNOSUPPORT: c_int = 123; +pub const EAFNOSUPPORT: c_int = 124; +pub const EADDRINUSE: c_int = 125; +pub const EADDRNOTAVAIL: c_int = 126; +pub const ENETDOWN: c_int = 127; +pub const ENETUNREACH: c_int = 128; +pub const ENETRESET: c_int = 129; +pub const ECONNABORTED: c_int = 130; +pub const ECONNRESET: c_int = 131; +pub const ENOBUFS: c_int = 132; +pub const EISCONN: c_int = 133; +pub const ENOTCONN: c_int = 134; +pub const ESHUTDOWN: c_int = 143; +pub const ETOOMANYREFS: c_int = 144; +pub const ETIMEDOUT: c_int = 145; +pub const ECONNREFUSED: c_int = 146; +pub const EHOSTDOWN: c_int = 147; +pub const EHOSTUNREACH: c_int = 148; +pub const EALREADY: c_int = 149; +pub const EINPROGRESS: c_int = 150; +pub const ESTALE: c_int = 151; +pub const EUCLEAN: c_int = 135; +pub const ENOTNAM: c_int = 137; +pub const ENAVAIL: c_int = 138; +pub const EISNAM: c_int = 139; +pub const EREMOTEIO: c_int = 140; +pub const EDQUOT: c_int = 1133; +pub const ENOMEDIUM: c_int = 159; +pub const EMEDIUMTYPE: c_int = 160; +pub const ECANCELED: c_int = 158; +pub const ENOKEY: c_int = 161; +pub const EKEYEXPIRED: c_int = 162; +pub const EKEYREVOKED: c_int = 163; +pub const EKEYREJECTED: c_int = 164; +pub const EOWNERDEAD: c_int = 165; +pub const ENOTRECOVERABLE: c_int = 166; +pub const ERFKILL: c_int = 167; + +pub const MAP_NORESERVE: c_int = 0x400; +pub const MAP_ANON: c_int = 0x800; +pub const MAP_ANONYMOUS: c_int = 0x800; +pub const MAP_GROWSDOWN: c_int = 0x1000; +pub const MAP_DENYWRITE: c_int = 0x2000; +pub const MAP_EXECUTABLE: c_int = 0x4000; +pub const MAP_LOCKED: c_int = 0x8000; +pub const MAP_POPULATE: c_int = 0x10000; +pub const MAP_NONBLOCK: c_int = 0x20000; +pub const MAP_STACK: c_int = 0x40000; + +pub const NLDLY: crate::tcflag_t = 0o0000400; + +pub const SOCK_STREAM: c_int = 2; +pub const SOCK_DGRAM: c_int = 1; +pub const SOCK_SEQPACKET: c_int = 5; + +pub const SA_ONSTACK: c_uint = 0x08000000; +pub const SA_SIGINFO: c_uint = 0x00000008; +pub const SA_NOCLDWAIT: c_int = 0x00010000; + +pub const SIGEMT: c_int = 7; +pub const SIGCHLD: c_int = 18; +pub const SIGBUS: c_int = 10; +pub const SIGTTIN: c_int = 26; +pub const SIGTTOU: c_int = 27; +pub const SIGXCPU: c_int = 30; +pub const SIGXFSZ: c_int = 31; +pub const SIGVTALRM: c_int = 28; +pub const SIGPROF: c_int = 29; +pub const SIGWINCH: c_int = 20; +pub const SIGUSR1: c_int = 16; +pub const SIGUSR2: c_int = 17; +pub const SIGCONT: c_int = 25; +pub const SIGSTOP: c_int = 23; +pub const SIGTSTP: c_int = 24; +pub const SIGURG: c_int = 21; +pub const SIGIO: c_int = 22; +pub const SIGSYS: c_int = 12; +pub const SIGPWR: c_int = 19; +pub const SIG_SETMASK: c_int = 3; +pub const SIG_BLOCK: c_int = 0x1; +pub const SIG_UNBLOCK: c_int = 0x2; + +pub const POLLWRNORM: c_short = 0x004; +pub const POLLWRBAND: c_short = 0x100; + +pub const PTHREAD_STACK_MIN: size_t = 16384; + +pub const VEOF: usize = 16; +pub const VEOL: usize = 17; +pub const VEOL2: usize = 6; +pub const VMIN: usize = 4; +pub const IEXTEN: crate::tcflag_t = 0x00000100; +pub const TOSTOP: crate::tcflag_t = 0x00008000; +pub const FLUSHO: crate::tcflag_t = 0x00002000; +pub const TCSANOW: c_int = 0x540e; +pub const TCSADRAIN: c_int = 0x540f; +pub const TCSAFLUSH: c_int = 0x5410; + +pub const CPU_SETSIZE: c_int = 0x400; + +pub const EFD_NONBLOCK: c_int = 0x80; + +pub const F_GETLK: c_int = 14; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; + +pub const SFD_NONBLOCK: c_int = 0x80; + +pub const RTLD_GLOBAL: c_int = 0x4; + +pub const SIGSTKSZ: size_t = 8192; +pub const CBAUD: crate::tcflag_t = 0o0010017; +pub const CBAUDEX: crate::tcflag_t = 0o0010000; +pub const CIBAUD: crate::tcflag_t = 0o002003600000; +pub const TAB1: crate::tcflag_t = 0x00000800; +pub const TAB2: crate::tcflag_t = 0x00001000; +pub const TAB3: crate::tcflag_t = 0x00001800; +pub const TABDLY: crate::tcflag_t = 0o0014000; +pub const CR1: crate::tcflag_t = 0x00000200; +pub const CR2: crate::tcflag_t = 0x00000400; +pub const CR3: crate::tcflag_t = 0x00000600; +pub const FF1: crate::tcflag_t = 0x00008000; +pub const BS1: crate::tcflag_t = 0x00002000; +pub const BSDLY: crate::tcflag_t = 0o0020000; +pub const VT1: crate::tcflag_t = 0x00004000; +pub const VWERASE: usize = 14; +pub const XTABS: crate::tcflag_t = 0o0014000; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSWTC: usize = 7; +pub const VTDLY: c_int = 0o0040000; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const OLCUC: crate::tcflag_t = 0o0000002; +pub const ONLCR: crate::tcflag_t = 0x4; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x00000010; +pub const CS7: crate::tcflag_t = 0x00000020; +pub const CS8: crate::tcflag_t = 0x00000030; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CRDLY: c_int = 0o0003000; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOPRT: crate::tcflag_t = 0x00000400; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const PENDIN: crate::tcflag_t = 0x00004000; +pub const NOFLSH: crate::tcflag_t = 0x00000080; + +pub const MAP_HUGETLB: c_int = 0x80000; + +pub const B0: crate::speed_t = 0o000000; +pub const B50: crate::speed_t = 0o000001; +pub const B75: crate::speed_t = 0o000002; +pub const B110: crate::speed_t = 0o000003; +pub const B134: crate::speed_t = 0o000004; +pub const B150: crate::speed_t = 0o000005; +pub const B200: crate::speed_t = 0o000006; +pub const B300: crate::speed_t = 0o000007; +pub const B600: crate::speed_t = 0o000010; +pub const B1200: crate::speed_t = 0o000011; +pub const B1800: crate::speed_t = 0o000012; +pub const B2400: crate::speed_t = 0o000013; +pub const B4800: crate::speed_t = 0o000014; +pub const B9600: crate::speed_t = 0o000015; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; +pub const B57600: crate::speed_t = 0o010001; +pub const B115200: crate::speed_t = 0o010002; +pub const B230400: crate::speed_t = 0o010003; +pub const B460800: crate::speed_t = 0o010004; +pub const B500000: crate::speed_t = 0o010005; +pub const B576000: crate::speed_t = 0o010006; +pub const B921600: crate::speed_t = 0o010007; +pub const B1000000: crate::speed_t = 0o010010; +pub const B1152000: crate::speed_t = 0o010011; +pub const B1500000: crate::speed_t = 0o010012; +pub const B2000000: crate::speed_t = 0o010013; +pub const B2500000: crate::speed_t = 0o010014; +pub const B3000000: crate::speed_t = 0o010015; +pub const B3500000: crate::speed_t = 0o010016; +pub const B4000000: crate::speed_t = 0o010017; + +cfg_if! { + if #[cfg(target_arch = "mips")] { + mod mips32; + pub use self::mips32::*; + } else if #[cfg(target_arch = "mips64")] { + mod mips64; + pub use self::mips64::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/mod.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/mod.rs new file mode 100644 index 00000000000000..4fef82ed8e1671 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/uclibc/mod.rs @@ -0,0 +1,517 @@ +// FIXME(ulibc): this module has definitions that are redundant with the parent +#![allow(dead_code)] + +use crate::off64_t; +use crate::prelude::*; + +pub type shmatt_t = c_ulong; +pub type msgqnum_t = c_ulong; +pub type msglen_t = c_ulong; +pub type regoff_t = c_int; +pub type rlim_t = c_ulong; +pub type __rlimit_resource_t = c_ulong; +pub type __priority_which_t = c_uint; + +cfg_if! { + if #[cfg(doc)] { + // Used in `linux::arch` to define ioctl constants. + pub(crate) type Ioctl = c_ulong; + } else { + #[doc(hidden)] + pub type Ioctl = c_ulong; + } +} + +s! { + pub struct statvfs { + // Different than GNU! + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + #[cfg(target_endian = "little")] + pub f_fsid: c_ulong, + #[cfg(target_pointer_width = "32")] + __f_unused: c_int, + #[cfg(target_endian = "big")] + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct regex_t { + __buffer: *mut c_void, + __allocated: size_t, + __used: size_t, + __syntax: c_ulong, + __fastmap: *mut c_char, + __translate: *mut c_char, + __re_nsub: size_t, + __bitfield: u8, + } + + pub struct rtentry { + pub rt_pad1: c_ulong, + pub rt_dst: crate::sockaddr, + pub rt_gateway: crate::sockaddr, + pub rt_genmask: crate::sockaddr, + pub rt_flags: c_ushort, + pub rt_pad2: c_short, + pub rt_pad3: c_ulong, + pub rt_tos: c_uchar, + pub rt_class: c_uchar, + #[cfg(target_pointer_width = "64")] + pub rt_pad4: [c_short; 3usize], + #[cfg(not(target_pointer_width = "64"))] + pub rt_pad4: c_short, + pub rt_metric: c_short, + pub rt_dev: *mut c_char, + pub rt_mtu: c_ulong, + pub rt_window: c_ulong, + pub rt_irtt: c_ushort, + } + + pub struct __exit_status { + pub e_termination: c_short, + pub e_exit: c_short, + } + + pub struct ptrace_peeksiginfo_args { + pub off: crate::__u64, + pub flags: crate::__u32, + pub nr: crate::__s32, + } + + #[cfg_attr( + any( + target_pointer_width = "32", + target_arch = "x86_64", + target_arch = "powerpc64", + target_arch = "mips64", + target_arch = "s390x", + target_arch = "sparc64" + ), + repr(align(4)) + )] + #[cfg_attr( + not(any( + target_pointer_width = "32", + target_arch = "x86_64", + target_arch = "powerpc64", + target_arch = "mips64", + target_arch = "s390x", + target_arch = "sparc64" + )), + repr(align(8)) + )] + pub struct pthread_mutexattr_t { + size: [u8; crate::__SIZEOF_PTHREAD_MUTEXATTR_T], + } + + #[repr(align(4))] + pub struct pthread_condattr_t { + size: [u8; crate::__SIZEOF_PTHREAD_CONDATTR_T], + } + + pub struct tcp_info { + pub tcpi_state: u8, + pub tcpi_ca_state: u8, + pub tcpi_retransmits: u8, + pub tcpi_probes: u8, + pub tcpi_backoff: u8, + pub tcpi_options: u8, + /// This contains the bitfields `tcpi_snd_wscale` and `tcpi_rcv_wscale`. + /// Each is 4 bits. + pub tcpi_snd_rcv_wscale: u8, + pub tcpi_rto: u32, + pub tcpi_ato: u32, + pub tcpi_snd_mss: u32, + pub tcpi_rcv_mss: u32, + pub tcpi_unacked: u32, + pub tcpi_sacked: u32, + pub tcpi_lost: u32, + pub tcpi_retrans: u32, + pub tcpi_fackets: u32, + pub tcpi_last_data_sent: u32, + pub tcpi_last_ack_sent: u32, + pub tcpi_last_data_recv: u32, + pub tcpi_last_ack_recv: u32, + pub tcpi_pmtu: u32, + pub tcpi_rcv_ssthresh: u32, + pub tcpi_rtt: u32, + pub tcpi_rttvar: u32, + pub tcpi_snd_ssthresh: u32, + pub tcpi_snd_cwnd: u32, + pub tcpi_advmss: u32, + pub tcpi_reordering: u32, + pub tcpi_rcv_rtt: u32, + pub tcpi_rcv_space: u32, + pub tcpi_total_retrans: u32, + } +} + +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut c_void { + #[repr(C)] + struct siginfo_sigfault { + _si_signo: c_int, + _si_errno: c_int, + _si_code: c_int, + si_addr: *mut c_void, + } + (*(self as *const siginfo_t as *const siginfo_sigfault)).si_addr + } + + pub unsafe fn si_value(&self) -> crate::sigval { + #[repr(C)] + struct siginfo_si_value { + _si_signo: c_int, + _si_errno: c_int, + _si_code: c_int, + _si_timerid: c_int, + _si_overrun: c_int, + si_value: crate::sigval, + } + (*(self as *const siginfo_t as *const siginfo_si_value)).si_value + } +} + +// Internal, for casts to access union fields +#[repr(C)] +struct sifields_sigchld { + si_pid: crate::pid_t, + si_uid: crate::uid_t, + si_status: c_int, + si_utime: c_long, + si_stime: c_long, +} +impl Copy for sifields_sigchld {} +impl Clone for sifields_sigchld { + fn clone(&self) -> sifields_sigchld { + *self + } +} + +// Internal, for casts to access union fields +#[repr(C)] +union sifields { + _align_pointer: *mut c_void, + sigchld: sifields_sigchld, +} + +// Internal, for casts to access union fields. Note that some variants +// of sifields start with a pointer, which makes the alignment of +// sifields vary on 32-bit and 64-bit architectures. +#[repr(C)] +struct siginfo_f { + _siginfo_base: [c_int; 3], + sifields: sifields, +} + +impl siginfo_t { + unsafe fn sifields(&self) -> &sifields { + &(*(self as *const siginfo_t as *const siginfo_f)).sifields + } + + pub unsafe fn si_pid(&self) -> crate::pid_t { + self.sifields().sigchld.si_pid + } + + pub unsafe fn si_uid(&self) -> crate::uid_t { + self.sifields().sigchld.si_uid + } + + pub unsafe fn si_status(&self) -> c_int { + self.sifields().sigchld.si_status + } + + pub unsafe fn si_utime(&self) -> c_long { + self.sifields().sigchld.si_utime + } + + pub unsafe fn si_stime(&self) -> c_long { + self.sifields().sigchld.si_stime + } +} + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; +pub const MCL_ONFAULT: c_int = 0x0004; + +pub const SIGEV_THREAD_ID: c_int = 4; + +pub const AF_VSOCK: c_int = 40; + +// Most `*_SUPER_MAGIC` constants are defined at the `linux_like` level; the +// following are only available on newer Linux versions than the versions +// currently used in CI in some configurations, so we define them here. +pub const BINDERFS_SUPER_MAGIC: c_long = 0x6c6f6f70; +pub const XFS_SUPER_MAGIC: c_long = 0x58465342; + +pub const PTRACE_TRACEME: c_int = 0; +pub const PTRACE_PEEKTEXT: c_int = 1; +pub const PTRACE_PEEKDATA: c_int = 2; +pub const PTRACE_PEEKUSER: c_int = 3; +pub const PTRACE_POKETEXT: c_int = 4; +pub const PTRACE_POKEDATA: c_int = 5; +pub const PTRACE_POKEUSER: c_int = 6; +pub const PTRACE_CONT: c_int = 7; +pub const PTRACE_KILL: c_int = 8; +pub const PTRACE_SINGLESTEP: c_int = 9; +pub const PTRACE_GETREGS: c_int = 12; +pub const PTRACE_SETREGS: c_int = 13; +pub const PTRACE_GETFPREGS: c_int = 14; +pub const PTRACE_SETFPREGS: c_int = 15; +pub const PTRACE_ATTACH: c_int = 16; +pub const PTRACE_DETACH: c_int = 17; +pub const PTRACE_GETFPXREGS: c_int = 18; +pub const PTRACE_SETFPXREGS: c_int = 19; +pub const PTRACE_SYSCALL: c_int = 24; +pub const PTRACE_SETOPTIONS: c_int = 0x4200; +pub const PTRACE_GETEVENTMSG: c_int = 0x4201; +pub const PTRACE_GETSIGINFO: c_int = 0x4202; +pub const PTRACE_SETSIGINFO: c_int = 0x4203; +pub const PTRACE_GETREGSET: c_int = 0x4204; +pub const PTRACE_SETREGSET: c_int = 0x4205; +pub const PTRACE_SEIZE: c_int = 0x4206; +pub const PTRACE_INTERRUPT: c_int = 0x4207; +pub const PTRACE_LISTEN: c_int = 0x4208; + +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; + +// These are different than GNU! +pub const LC_CTYPE: c_int = 0; +pub const LC_NUMERIC: c_int = 1; +pub const LC_TIME: c_int = 3; +pub const LC_COLLATE: c_int = 4; +pub const LC_MONETARY: c_int = 2; +pub const LC_MESSAGES: c_int = 5; +pub const LC_ALL: c_int = 6; +// end different section + +// MS_ flags for mount(2) +pub const MS_RMT_MASK: c_ulong = + crate::MS_RDONLY | crate::MS_SYNCHRONOUS | crate::MS_MANDLOCK | crate::MS_I_VERSION; + +pub const ENOTSUP: c_int = EOPNOTSUPP; + +pub const IPV6_JOIN_GROUP: c_int = 20; +pub const IPV6_LEAVE_GROUP: c_int = 21; + +// These are different from GNU +pub const ABDAY_1: crate::nl_item = 0x300; +pub const ABDAY_2: crate::nl_item = 0x301; +pub const ABDAY_3: crate::nl_item = 0x302; +pub const ABDAY_4: crate::nl_item = 0x303; +pub const ABDAY_5: crate::nl_item = 0x304; +pub const ABDAY_6: crate::nl_item = 0x305; +pub const ABDAY_7: crate::nl_item = 0x306; +pub const DAY_1: crate::nl_item = 0x307; +pub const DAY_2: crate::nl_item = 0x308; +pub const DAY_3: crate::nl_item = 0x309; +pub const DAY_4: crate::nl_item = 0x30A; +pub const DAY_5: crate::nl_item = 0x30B; +pub const DAY_6: crate::nl_item = 0x30C; +pub const DAY_7: crate::nl_item = 0x30D; +pub const ABMON_1: crate::nl_item = 0x30E; +pub const ABMON_2: crate::nl_item = 0x30F; +pub const ABMON_3: crate::nl_item = 0x310; +pub const ABMON_4: crate::nl_item = 0x311; +pub const ABMON_5: crate::nl_item = 0x312; +pub const ABMON_6: crate::nl_item = 0x313; +pub const ABMON_7: crate::nl_item = 0x314; +pub const ABMON_8: crate::nl_item = 0x315; +pub const ABMON_9: crate::nl_item = 0x316; +pub const ABMON_10: crate::nl_item = 0x317; +pub const ABMON_11: crate::nl_item = 0x318; +pub const ABMON_12: crate::nl_item = 0x319; +pub const MON_1: crate::nl_item = 0x31A; +pub const MON_2: crate::nl_item = 0x31B; +pub const MON_3: crate::nl_item = 0x31C; +pub const MON_4: crate::nl_item = 0x31D; +pub const MON_5: crate::nl_item = 0x31E; +pub const MON_6: crate::nl_item = 0x31F; +pub const MON_7: crate::nl_item = 0x320; +pub const MON_8: crate::nl_item = 0x321; +pub const MON_9: crate::nl_item = 0x322; +pub const MON_10: crate::nl_item = 0x323; +pub const MON_11: crate::nl_item = 0x324; +pub const MON_12: crate::nl_item = 0x325; +pub const AM_STR: crate::nl_item = 0x326; +pub const PM_STR: crate::nl_item = 0x327; +pub const D_T_FMT: crate::nl_item = 0x328; +pub const D_FMT: crate::nl_item = 0x329; +pub const T_FMT: crate::nl_item = 0x32A; +pub const T_FMT_AMPM: crate::nl_item = 0x32B; +pub const ERA: crate::nl_item = 0x32C; +pub const ERA_D_FMT: crate::nl_item = 0x32E; +pub const ALT_DIGITS: crate::nl_item = 0x32F; +pub const ERA_D_T_FMT: crate::nl_item = 0x330; +pub const ERA_T_FMT: crate::nl_item = 0x331; +pub const CODESET: crate::nl_item = 10; +pub const CRNCYSTR: crate::nl_item = 0x215; +pub const RADIXCHAR: crate::nl_item = 0x100; +pub const THOUSEP: crate::nl_item = 0x101; +pub const NOEXPR: crate::nl_item = 0x501; +pub const YESSTR: crate::nl_item = 0x502; +pub const NOSTR: crate::nl_item = 0x503; + +// Different than Gnu. +pub const FILENAME_MAX: c_uint = 4095; + +pub const PRIO_PROCESS: c_int = 0; +pub const PRIO_PGRP: c_int = 1; +pub const PRIO_USER: c_int = 2; + +pub const SOMAXCONN: c_int = 128; + +pub const ST_RELATIME: c_ulong = 4096; + +pub const AF_NFC: c_int = PF_NFC; +pub const BUFSIZ: c_int = 4096; +pub const EDEADLOCK: c_int = EDEADLK; +pub const EXTA: c_uint = B19200; +pub const EXTB: c_uint = B38400; +pub const EXTPROC: crate::tcflag_t = 0o200000; +pub const FOPEN_MAX: c_int = 16; +pub const F_GETOWN: c_int = 9; +pub const F_OFD_GETLK: c_int = 36; +pub const F_OFD_SETLK: c_int = 37; +pub const F_OFD_SETLKW: c_int = 38; +pub const F_RDLCK: c_int = 0; +pub const F_SETOWN: c_int = 8; +pub const F_UNLCK: c_int = 2; +pub const F_WRLCK: c_int = 1; +pub const IPV6_MULTICAST_ALL: c_int = 29; +pub const IPV6_ROUTER_ALERT_ISOLATE: c_int = 30; +pub const MAP_HUGE_SHIFT: c_int = 26; +pub const MAP_HUGE_MASK: c_int = 0x3f; +pub const MAP_HUGE_64KB: c_int = 16 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_512KB: c_int = 19 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_1MB: c_int = 20 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_2MB: c_int = 21 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_8MB: c_int = 23 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_16MB: c_int = 24 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_32MB: c_int = 25 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_256MB: c_int = 28 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_512MB: c_int = 29 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_1GB: c_int = 30 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_2GB: c_int = 31 << MAP_HUGE_SHIFT; +pub const MAP_HUGE_16GB: c_int = 34 << MAP_HUGE_SHIFT; +pub const MINSIGSTKSZ: c_int = 2048; +pub const MSG_COPY: c_int = 0o40000; +pub const NI_MAXHOST: crate::socklen_t = 1025; +pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; +pub const PACKET_MR_UNICAST: c_int = 3; +pub const PF_NFC: c_int = 39; +pub const PF_VSOCK: c_int = 40; +pub const POSIX_MADV_DONTNEED: c_int = 4; +pub const PTRACE_EVENT_STOP: c_int = 128; +pub const PTRACE_GETSIGMASK: c_uint = 0x420a; +pub const PTRACE_PEEKSIGINFO: c_int = 0x4209; +pub const PTRACE_SETSIGMASK: c_uint = 0x420b; +pub const RTLD_NOLOAD: c_int = 0x00004; +pub const RUSAGE_THREAD: c_int = 1; +pub const SHM_EXEC: c_int = 0o100000; +pub const SIGPOLL: c_int = SIGIO; +pub const SOCK_DCCP: c_int = 6; +#[deprecated(since = "0.2.70", note = "AF_PACKET must be used instead")] +pub const SOCK_PACKET: c_int = 10; +pub const TCP_COOKIE_TRANSACTIONS: c_int = 15; +pub const UDP_GRO: c_int = 104; +pub const UDP_SEGMENT: c_int = 103; +pub const YESEXPR: c_int = ((5) << 8) | (0); + +extern "C" { + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut crate::timezone) -> c_int; + + pub fn pthread_rwlockattr_getkind_np( + attr: *const crate::pthread_rwlockattr_t, + val: *mut c_int, + ) -> c_int; + pub fn pthread_rwlockattr_setkind_np( + attr: *mut crate::pthread_rwlockattr_t, + val: c_int, + ) -> c_int; + + pub fn ptrace(request: c_uint, ...) -> c_long; + + pub fn sendmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: c_uint, + flags: c_int, + ) -> c_int; + pub fn recvmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: c_uint, + flags: c_int, + timeout: *mut crate::timespec, + ) -> c_int; + + pub fn openpty( + amaster: *mut c_int, + aslave: *mut c_int, + name: *mut c_char, + termp: *mut termios, + winp: *mut crate::winsize, + ) -> c_int; + pub fn forkpty( + amaster: *mut c_int, + name: *mut c_char, + termp: *mut termios, + winp: *mut crate::winsize, + ) -> crate::pid_t; + + pub fn getnameinfo( + sa: *const crate::sockaddr, + salen: crate::socklen_t, + host: *mut c_char, + hostlen: crate::socklen_t, + serv: *mut c_char, + servlen: crate::socklen_t, + flags: c_int, + ) -> c_int; + + pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off64_t) -> ssize_t; + pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off64_t) -> ssize_t; + + pub fn sethostid(hostid: c_long) -> c_int; + pub fn fanotify_mark( + fd: c_int, + flags: c_uint, + mask: u64, + dirfd: c_int, + path: *const c_char, + ) -> c_int; + pub fn getrlimit64(resource: crate::__rlimit_resource_t, rlim: *mut crate::rlimit64) -> c_int; + pub fn setrlimit64(resource: crate::__rlimit_resource_t, rlim: *const crate::rlimit64) + -> c_int; + pub fn getrlimit(resource: crate::__rlimit_resource_t, rlim: *mut crate::rlimit) -> c_int; + pub fn setrlimit(resource: crate::__rlimit_resource_t, rlim: *const crate::rlimit) -> c_int; + pub fn getpriority(which: crate::__priority_which_t, who: crate::id_t) -> c_int; + pub fn setpriority(which: crate::__priority_which_t, who: crate::id_t, prio: c_int) -> c_int; + pub fn getauxval(type_: c_ulong) -> c_ulong; +} + +cfg_if! { + if #[cfg(any(target_arch = "mips", target_arch = "mips64"))] { + mod mips; + pub use self::mips::*; + } else if #[cfg(target_arch = "x86_64")] { + mod x86_64; + pub use self::x86_64::*; + } else if #[cfg(target_arch = "arm")] { + mod arm; + pub use self::arm::*; + } else { + pub use unsupported_target; + } +} diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/l4re.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/l4re.rs new file mode 100644 index 00000000000000..536c716ca48682 --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/l4re.rs @@ -0,0 +1,53 @@ +use crate::prelude::*; + +/// L4Re specifics +/// This module contains definitions required by various L4Re libc backends. +/// Some of them are formally not part of the libc, but are a dependency of the +/// libc and hence we should provide them here. + +pub type l4_umword_t = c_ulong; // Unsigned machine word. +pub type pthread_t = *mut c_void; + +s! { + /// CPU sets. + pub struct l4_sched_cpu_set_t { + // from the L4Re docs + /// Combination of granularity and offset. + /// + /// The granularity defines how many CPUs each bit in map describes. + /// The offset is the number of the first CPU described by the first + /// bit in the bitmap. + /// offset must be a multiple of 2^graularity. + /// + /// | MSB | LSB | + /// | ---------------- | ------------------- | + /// | 8bit granularity | 24bit offset .. | + gran_offset: l4_umword_t, + /// Bitmap of CPUs. + map: l4_umword_t, + } + + pub struct pthread_attr_t { + pub __detachstate: c_int, + pub __schedpolicy: c_int, + pub __schedparam: super::__sched_param, + pub __inheritsched: c_int, + pub __scope: c_int, + pub __guardsize: size_t, + pub __stackaddr_set: c_int, + pub __stackaddr: *mut c_void, // better don't use it + pub __stacksize: size_t, + // L4Re specifics + pub affinity: l4_sched_cpu_set_t, + pub create_flags: c_uint, + } +} + +// L4Re requires a min stack size of 64k; that isn't defined in uClibc, but +// somewhere in the core libraries. uClibc wants 16k, but that's not enough. +pub const PTHREAD_STACK_MIN: usize = 65536; + +// Misc other constants required for building. +pub const SIGIO: c_int = 29; +pub const B19200: crate::speed_t = 0o000016; +pub const B38400: crate::speed_t = 0o000017; diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/mod.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/mod.rs new file mode 100644 index 00000000000000..1a2e4bcc1a897f --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/mod.rs @@ -0,0 +1,355 @@ +//! Definitions for uclibc on 64bit systems + +use crate::off64_t; +use crate::prelude::*; + +pub type blkcnt_t = i64; +pub type blksize_t = i64; +pub type clock_t = i64; +pub type fsblkcnt_t = c_ulong; +pub type fsfilcnt_t = c_ulong; +pub type fsword_t = c_long; +pub type ino_t = c_ulong; +pub type nlink_t = c_uint; +pub type off_t = c_long; +// [uClibc docs] Note stat64 has the same shape as stat for x86-64. +pub type stat64 = stat; +pub type suseconds_t = c_long; +pub type time_t = c_int; +pub type wchar_t = c_int; + +pub type fsblkcnt64_t = u64; +pub type fsfilcnt64_t = u64; +pub type __u64 = c_ulong; +pub type __s64 = c_long; + +s! { + pub struct ipc_perm { + pub __key: crate::key_t, + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: c_ushort, // read / write + __pad1: c_ushort, + pub __seq: c_ushort, + __pad2: c_ushort, + __unused1: c_ulong, + __unused2: c_ulong, + } + + #[cfg(not(target_os = "l4re"))] + pub struct pthread_attr_t { + __detachstate: c_int, + __schedpolicy: c_int, + __schedparam: __sched_param, + __inheritsched: c_int, + __scope: c_int, + __guardsize: size_t, + __stackaddr_set: c_int, + __stackaddr: *mut c_void, // better don't use it + __stacksize: size_t, + } + + pub struct __sched_param { + __sched_priority: c_int, + } + + pub struct siginfo_t { + si_signo: c_int, // signal number + si_errno: c_int, // if not zero: error value of signal, see errno.h + si_code: c_int, // signal code + pub _pad: [c_int; 28], // unported union + _align: [usize; 0], + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, // segment size in bytes + pub shm_atime: crate::time_t, // time of last shmat() + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_cpid: crate::pid_t, + pub shm_lpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + __unused1: c_ulong, + __unused2: c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_stime: crate::time_t, + pub msg_rtime: crate::time_t, + pub msg_ctime: crate::time_t, + pub __msg_cbytes: c_ulong, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + __ignored1: c_ulong, + __ignored2: c_ulong, + } + + pub struct sockaddr { + pub sa_family: crate::sa_family_t, + pub sa_data: [c_char; 14], + } + + pub struct sockaddr_in { + pub sin_family: crate::sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [u8; 8], + } + + pub struct sockaddr_in6 { + pub sin6_family: crate::sa_family_t, + pub sin6_port: crate::in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: u32, + } + + /* ------------------------------------------------------------ + * definitions below are *unverified* and might **break** the software + */ + + // pub struct in_addr { + // pub s_addr: in_addr_t, + // } + // + // pub struct in6_addr { + // pub s6_addr: [u8; 16], + // } + + pub struct stat { + pub st_dev: c_ulong, + pub st_ino: crate::ino_t, + // According to uclibc/libc/sysdeps/linux/x86_64/bits/stat.h, order of + // nlink and mode are swapped on 64 bit systems. + pub st_nlink: crate::nlink_t, + pub st_mode: crate::mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: c_ulong, // dev_t + pub st_size: off_t, // file size + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_ulong, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_ulong, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_ulong, + st_pad4: [c_long; 3], + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_handler: crate::sighandler_t, + pub sa_flags: c_ulong, + pub sa_restorer: Option, + pub sa_mask: crate::sigset_t, + } + + pub struct stack_t { + // FIXME(ulibc) + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: size_t, + } + + pub struct statfs { + // FIXME(ulibc) + pub f_type: fsword_t, + pub f_bsize: fsword_t, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: fsword_t, + pub f_frsize: fsword_t, + f_spare: [fsword_t; 5], + } + + pub struct statfs64 { + pub f_type: c_int, + pub f_bsize: c_int, + pub f_blocks: crate::fsblkcnt64_t, + pub f_bfree: crate::fsblkcnt64_t, + pub f_bavail: crate::fsblkcnt64_t, + pub f_files: crate::fsfilcnt64_t, + pub f_ffree: crate::fsfilcnt64_t, + pub f_fsid: crate::fsid_t, + pub f_namelen: c_int, + pub f_frsize: c_int, + pub f_flags: c_int, + pub f_spare: [c_int; 4], + } + + pub struct statvfs64 { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + pub f_fsid: c_ulong, + __f_unused: c_int, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + __f_spare: [c_int; 6], + } + + pub struct msghdr { + // FIXME(ulibc) + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: size_t, + pub msg_control: *mut c_void, + pub msg_controllen: size_t, + pub msg_flags: c_int, + } + + pub struct termios { + // FIXME(ulibc) + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; crate::NCCS], + } + + pub struct sigset_t { + // FIXME(ulibc) + __val: [c_ulong; 16], + } + + pub struct sysinfo { + // FIXME(ulibc) + pub uptime: c_long, + pub loads: [c_ulong; 3], + pub totalram: c_ulong, + pub freeram: c_ulong, + pub sharedram: c_ulong, + pub bufferram: c_ulong, + pub totalswap: c_ulong, + pub freeswap: c_ulong, + pub procs: c_ushort, + pub pad: c_ushort, + pub totalhigh: c_ulong, + pub freehigh: c_ulong, + pub mem_unit: c_uint, + pub _f: [c_char; 0], + } + + pub struct glob_t { + // FIXME(ulibc) + pub gl_pathc: size_t, + pub gl_pathv: *mut *mut c_char, + pub gl_offs: size_t, + pub gl_flags: c_int, + __unused1: *mut c_void, + __unused2: *mut c_void, + __unused3: *mut c_void, + __unused4: *mut c_void, + __unused5: *mut c_void, + } + + pub struct cpu_set_t { + // FIXME(ulibc) + #[cfg(target_pointer_width = "32")] + bits: [u32; 32], + #[cfg(target_pointer_width = "64")] + bits: [u64; 16], + } + + pub struct fsid_t { + // FIXME(ulibc) + __val: [c_int; 2], + } + + // FIXME(1.0): this is actually a union + pub struct sem_t { + #[cfg(target_pointer_width = "32")] + __size: [c_char; 16], + #[cfg(target_pointer_width = "64")] + __size: [c_char; 32], + __align: [c_long; 0], + } + + pub struct cmsghdr { + pub cmsg_len: size_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } +} + +s_no_extra_traits! { + pub struct dirent { + pub d_ino: crate::ino64_t, + pub d_off: off64_t, + pub d_reclen: u16, + pub d_type: u8, + pub d_name: [c_char; 256], + } +} + +// constants +pub const ENAMETOOLONG: c_int = 36; // File name too long +pub const ENOTEMPTY: c_int = 39; // Directory not empty +pub const ELOOP: c_int = 40; // Too many symbolic links encountered +pub const EADDRINUSE: c_int = 98; // Address already in use +pub const EADDRNOTAVAIL: c_int = 99; // Cannot assign requested address +pub const ENETDOWN: c_int = 100; // Network is down +pub const ENETUNREACH: c_int = 101; // Network is unreachable +pub const ECONNABORTED: c_int = 103; // Software caused connection abort +pub const ECONNREFUSED: c_int = 111; // Connection refused +pub const ECONNRESET: c_int = 104; // Connection reset by peer +pub const EDEADLK: c_int = 35; // Resource deadlock would occur +pub const ENOSYS: c_int = 38; // Function not implemented +pub const ENOTCONN: c_int = 107; // Transport endpoint is not connected +pub const ETIMEDOUT: c_int = 110; // connection timed out +pub const ESTALE: c_int = 116; // Stale file handle +pub const EHOSTUNREACH: c_int = 113; // No route to host +pub const EDQUOT: c_int = 122; // Quota exceeded +pub const EOPNOTSUPP: c_int = 0x5f; +pub const ENODATA: c_int = 0x3d; +pub const O_APPEND: c_int = 0o2000; +pub const O_ACCMODE: c_int = 0o003; +pub const O_CLOEXEC: c_int = 0x80000; +pub const O_CREAT: c_int = 0100; +pub const O_DIRECTORY: c_int = 0o200000; +pub const O_EXCL: c_int = 0o200; +pub const O_NOFOLLOW: c_int = 0x20000; +pub const O_NONBLOCK: c_int = 0o4000; +pub const O_TRUNC: c_int = 0o1000; +pub const NCCS: usize = 32; +pub const SIG_SETMASK: c_int = 2; // Set the set of blocked signals +pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; +pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +pub const SOCK_DGRAM: c_int = 2; // connectionless, unreliable datagrams +pub const SOCK_STREAM: c_int = 1; // …/common/bits/socket_type.h +pub const __SIZEOF_PTHREAD_COND_T: usize = 48; +pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; +pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; +pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; +pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; + +cfg_if! { + if #[cfg(target_os = "l4re")] { + mod l4re; + pub use self::l4re::*; + } else { + mod other; + pub use other::*; + } +} diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/other.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/other.rs new file mode 100644 index 00000000000000..dc16d02c87977a --- /dev/null +++ b/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/other.rs @@ -0,0 +1,7 @@ +use crate::prelude::*; + +// Thestyle checker discourages the use of #[cfg], so this has to go into a +// separate module +pub type pthread_t = c_ulong; + +pub const PTHREAD_STACK_MIN: usize = 16384; diff --git a/vendor/libc/src/unix/linux_like/mod.rs b/vendor/libc/src/unix/linux_like/mod.rs new file mode 100644 index 00000000000000..fd3fa996caad4b --- /dev/null +++ b/vendor/libc/src/unix/linux_like/mod.rs @@ -0,0 +1,2214 @@ +use crate::prelude::*; + +pub type sa_family_t = u16; +pub type speed_t = c_uint; +pub type tcflag_t = c_uint; +pub type clockid_t = c_int; +pub type timer_t = *mut c_void; +pub type key_t = c_int; +pub type id_t = c_uint; + +missing! { + #[derive(Debug)] + pub enum timezone {} +} + +s! { + pub struct in_addr { + pub s_addr: crate::in_addr_t, + } + + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct ip_mreqn { + pub imr_multiaddr: in_addr, + pub imr_address: in_addr, + pub imr_ifindex: c_int, + } + + pub struct ip_mreq_source { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + pub imr_sourceaddr: in_addr, + } + + pub struct sockaddr { + pub sa_family: sa_family_t, + pub sa_data: [c_char; 14], + } + + pub struct sockaddr_in { + pub sin_family: sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [u8; 8], + } + + pub struct sockaddr_in6 { + pub sin6_family: sa_family_t, + pub sin6_port: crate::in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: u32, + } + + // The order of the `ai_addr` field in this struct is crucial + // for converting between the Rust and C types. + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: socklen_t, + + #[cfg(any(target_os = "linux", target_os = "emscripten"))] + pub ai_addr: *mut crate::sockaddr, + + pub ai_canonname: *mut c_char, + + #[cfg(target_os = "android")] + pub ai_addr: *mut crate::sockaddr, + + pub ai_next: *mut addrinfo, + } + + pub struct sockaddr_ll { + pub sll_family: c_ushort, + pub sll_protocol: c_ushort, + pub sll_ifindex: c_int, + pub sll_hatype: c_ushort, + pub sll_pkttype: c_uchar, + pub sll_halen: c_uchar, + pub sll_addr: [c_uchar; 8], + } + + pub struct fd_set { + fds_bits: [c_ulong; FD_SETSIZE as usize / ULONG_SIZE], + } + + pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + pub tm_gmtoff: c_long, + pub tm_zone: *const c_char, + } + + pub struct sched_param { + pub sched_priority: c_int, + #[cfg(any(target_env = "musl", target_os = "emscripten", target_env = "ohos"))] + pub sched_ss_low_priority: c_int, + #[cfg(any(target_env = "musl", target_os = "emscripten", target_env = "ohos"))] + pub sched_ss_repl_period: crate::timespec, + #[cfg(any(target_env = "musl", target_os = "emscripten", target_env = "ohos"))] + pub sched_ss_init_budget: crate::timespec, + #[cfg(any(target_env = "musl", target_os = "emscripten", target_env = "ohos"))] + pub sched_ss_max_repl: c_int, + } + + pub struct Dl_info { + pub dli_fname: *const c_char, + pub dli_fbase: *mut c_void, + pub dli_sname: *const c_char, + pub dli_saddr: *mut c_void, + } + + pub struct lconv { + pub decimal_point: *mut c_char, + pub thousands_sep: *mut c_char, + pub grouping: *mut c_char, + pub int_curr_symbol: *mut c_char, + pub currency_symbol: *mut c_char, + pub mon_decimal_point: *mut c_char, + pub mon_thousands_sep: *mut c_char, + pub mon_grouping: *mut c_char, + pub positive_sign: *mut c_char, + pub negative_sign: *mut c_char, + pub int_frac_digits: c_char, + pub frac_digits: c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub p_sign_posn: c_char, + pub n_sign_posn: c_char, + pub int_p_cs_precedes: c_char, + pub int_p_sep_by_space: c_char, + pub int_n_cs_precedes: c_char, + pub int_n_sep_by_space: c_char, + pub int_p_sign_posn: c_char, + pub int_n_sign_posn: c_char, + } + + pub struct in_pktinfo { + pub ipi_ifindex: c_int, + pub ipi_spec_dst: crate::in_addr, + pub ipi_addr: crate::in_addr, + } + + pub struct ifaddrs { + pub ifa_next: *mut ifaddrs, + pub ifa_name: *mut c_char, + pub ifa_flags: c_uint, + pub ifa_addr: *mut crate::sockaddr, + pub ifa_netmask: *mut crate::sockaddr, + pub ifa_ifu: *mut crate::sockaddr, // FIXME(union) This should be a union + pub ifa_data: *mut c_void, + } + + pub struct in6_rtmsg { + rtmsg_dst: crate::in6_addr, + rtmsg_src: crate::in6_addr, + rtmsg_gateway: crate::in6_addr, + rtmsg_type: u32, + rtmsg_dst_len: u16, + rtmsg_src_len: u16, + rtmsg_metric: u32, + rtmsg_info: c_ulong, + rtmsg_flags: u32, + rtmsg_ifindex: c_int, + } + + pub struct arpreq { + pub arp_pa: crate::sockaddr, + pub arp_ha: crate::sockaddr, + pub arp_flags: c_int, + pub arp_netmask: crate::sockaddr, + pub arp_dev: [c_char; 16], + } + + pub struct arpreq_old { + pub arp_pa: crate::sockaddr, + pub arp_ha: crate::sockaddr, + pub arp_flags: c_int, + pub arp_netmask: crate::sockaddr, + } + + pub struct arphdr { + pub ar_hrd: u16, + pub ar_pro: u16, + pub ar_hln: u8, + pub ar_pln: u8, + pub ar_op: u16, + } + + pub struct mmsghdr { + pub msg_hdr: crate::msghdr, + pub msg_len: c_uint, + } +} + +cfg_if! { + if #[cfg(not(target_os = "emscripten"))] { + s! { + pub struct file_clone_range { + pub src_fd: crate::__s64, + pub src_offset: crate::__u64, + pub src_length: crate::__u64, + pub dest_offset: crate::__u64, + } + + // linux/filter.h + pub struct sock_filter { + pub code: __u16, + pub jt: __u8, + pub jf: __u8, + pub k: __u32, + } + + pub struct sock_fprog { + pub len: c_ushort, + pub filter: *mut sock_filter, + } + } + } +} + +cfg_if! { + if #[cfg(any( + target_env = "gnu", + target_os = "android", + all(target_env = "musl", musl_v1_2_3) + ))] { + s! { + pub struct statx { + pub stx_mask: crate::__u32, + pub stx_blksize: crate::__u32, + pub stx_attributes: crate::__u64, + pub stx_nlink: crate::__u32, + pub stx_uid: crate::__u32, + pub stx_gid: crate::__u32, + pub stx_mode: crate::__u16, + __statx_pad1: [crate::__u16; 1], + pub stx_ino: crate::__u64, + pub stx_size: crate::__u64, + pub stx_blocks: crate::__u64, + pub stx_attributes_mask: crate::__u64, + pub stx_atime: statx_timestamp, + pub stx_btime: statx_timestamp, + pub stx_ctime: statx_timestamp, + pub stx_mtime: statx_timestamp, + pub stx_rdev_major: crate::__u32, + pub stx_rdev_minor: crate::__u32, + pub stx_dev_major: crate::__u32, + pub stx_dev_minor: crate::__u32, + pub stx_mnt_id: crate::__u64, + pub stx_dio_mem_align: crate::__u32, + pub stx_dio_offset_align: crate::__u32, + __statx_pad3: [crate::__u64; 12], + } + + pub struct statx_timestamp { + pub tv_sec: crate::__s64, + pub tv_nsec: crate::__u32, + __statx_timestamp_pad1: [crate::__s32; 1], + } + } + } +} + +s_no_extra_traits! { + #[cfg_attr( + any(target_arch = "x86_64", all(target_arch = "x86", target_env = "gnu")), + repr(packed) + )] + pub struct epoll_event { + pub events: u32, + pub u64: u64, + } + + pub struct sockaddr_un { + pub sun_family: sa_family_t, + pub sun_path: [c_char; 108], + } + + pub struct sockaddr_storage { + pub ss_family: sa_family_t, + #[cfg(target_pointer_width = "32")] + __ss_pad2: [u8; 128 - 2 - 4], + #[cfg(target_pointer_width = "64")] + __ss_pad2: [u8; 128 - 2 - 8], + __ss_align: size_t, + } + + pub struct utsname { + pub sysname: [c_char; 65], + pub nodename: [c_char; 65], + pub release: [c_char; 65], + pub version: [c_char; 65], + pub machine: [c_char; 65], + pub domainname: [c_char; 65], + } + + pub struct sigevent { + pub sigev_value: crate::sigval, + pub sigev_signo: c_int, + pub sigev_notify: c_int, + // Actually a union. We only expose sigev_notify_thread_id because it's + // the most useful member + pub sigev_notify_thread_id: c_int, + #[cfg(target_pointer_width = "64")] + __unused1: [c_int; 11], + #[cfg(target_pointer_width = "32")] + __unused1: [c_int; 12], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for epoll_event { + fn eq(&self, other: &epoll_event) -> bool { + self.events == other.events && self.u64 == other.u64 + } + } + impl Eq for epoll_event {} + impl hash::Hash for epoll_event { + fn hash(&self, state: &mut H) { + let events = self.events; + let u64 = self.u64; + events.hash(state); + u64.hash(state); + } + } + + impl PartialEq for sockaddr_un { + fn eq(&self, other: &sockaddr_un) -> bool { + self.sun_family == other.sun_family + && self + .sun_path + .iter() + .zip(other.sun_path.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for sockaddr_un {} + impl hash::Hash for sockaddr_un { + fn hash(&self, state: &mut H) { + self.sun_family.hash(state); + self.sun_path.hash(state); + } + } + + impl PartialEq for sockaddr_storage { + fn eq(&self, other: &sockaddr_storage) -> bool { + self.ss_family == other.ss_family + && self + .__ss_pad2 + .iter() + .zip(other.__ss_pad2.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for sockaddr_storage {} + + impl hash::Hash for sockaddr_storage { + fn hash(&self, state: &mut H) { + self.ss_family.hash(state); + self.__ss_pad2.hash(state); + } + } + + impl PartialEq for utsname { + fn eq(&self, other: &utsname) -> bool { + self.sysname + .iter() + .zip(other.sysname.iter()) + .all(|(a, b)| a == b) + && self + .nodename + .iter() + .zip(other.nodename.iter()) + .all(|(a, b)| a == b) + && self + .release + .iter() + .zip(other.release.iter()) + .all(|(a, b)| a == b) + && self + .version + .iter() + .zip(other.version.iter()) + .all(|(a, b)| a == b) + && self + .machine + .iter() + .zip(other.machine.iter()) + .all(|(a, b)| a == b) + && self + .domainname + .iter() + .zip(other.domainname.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for utsname {} + + impl hash::Hash for utsname { + fn hash(&self, state: &mut H) { + self.sysname.hash(state); + self.nodename.hash(state); + self.release.hash(state); + self.version.hash(state); + self.machine.hash(state); + self.domainname.hash(state); + } + } + + impl PartialEq for sigevent { + fn eq(&self, other: &sigevent) -> bool { + self.sigev_value == other.sigev_value + && self.sigev_signo == other.sigev_signo + && self.sigev_notify == other.sigev_notify + && self.sigev_notify_thread_id == other.sigev_notify_thread_id + } + } + impl Eq for sigevent {} + impl hash::Hash for sigevent { + fn hash(&self, state: &mut H) { + self.sigev_value.hash(state); + self.sigev_signo.hash(state); + self.sigev_notify.hash(state); + self.sigev_notify_thread_id.hash(state); + } + } + } +} + +// intentionally not public, only used for fd_set +cfg_if! { + if #[cfg(target_pointer_width = "32")] { + const ULONG_SIZE: usize = 32; + } else if #[cfg(target_pointer_width = "64")] { + const ULONG_SIZE: usize = 64; + } else { + // Unknown target_pointer_width + } +} + +pub const EXIT_FAILURE: c_int = 1; +pub const EXIT_SUCCESS: c_int = 0; +pub const RAND_MAX: c_int = 2147483647; +pub const EOF: c_int = -1; +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; +pub const _IOFBF: c_int = 0; +pub const _IONBF: c_int = 2; +pub const _IOLBF: c_int = 1; + +pub const F_DUPFD: c_int = 0; +pub const F_GETFD: c_int = 1; +pub const F_SETFD: c_int = 2; +pub const F_GETFL: c_int = 3; +pub const F_SETFL: c_int = 4; + +// Linux-specific fcntls +pub const F_SETLEASE: c_int = 1024; +pub const F_GETLEASE: c_int = 1025; +pub const F_NOTIFY: c_int = 1026; +pub const F_CANCELLK: c_int = 1029; +pub const F_DUPFD_CLOEXEC: c_int = 1030; +pub const F_SETPIPE_SZ: c_int = 1031; +pub const F_GETPIPE_SZ: c_int = 1032; +pub const F_ADD_SEALS: c_int = 1033; +pub const F_GET_SEALS: c_int = 1034; + +pub const F_SEAL_SEAL: c_int = 0x0001; +pub const F_SEAL_SHRINK: c_int = 0x0002; +pub const F_SEAL_GROW: c_int = 0x0004; +pub const F_SEAL_WRITE: c_int = 0x0008; + +// FIXME(#235): Include file sealing fcntls once we have a way to verify them. + +pub const SIGTRAP: c_int = 5; + +pub const PTHREAD_CREATE_JOINABLE: c_int = 0; +pub const PTHREAD_CREATE_DETACHED: c_int = 1; + +pub const CLOCK_REALTIME: crate::clockid_t = 0; +pub const CLOCK_MONOTONIC: crate::clockid_t = 1; +pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 2; +pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 3; +pub const CLOCK_MONOTONIC_RAW: crate::clockid_t = 4; +pub const CLOCK_REALTIME_COARSE: crate::clockid_t = 5; +pub const CLOCK_MONOTONIC_COARSE: crate::clockid_t = 6; +pub const CLOCK_BOOTTIME: crate::clockid_t = 7; +pub const CLOCK_REALTIME_ALARM: crate::clockid_t = 8; +pub const CLOCK_BOOTTIME_ALARM: crate::clockid_t = 9; +pub const CLOCK_TAI: crate::clockid_t = 11; +pub const TIMER_ABSTIME: c_int = 1; + +pub const RUSAGE_SELF: c_int = 0; + +pub const O_RDONLY: c_int = 0; +pub const O_WRONLY: c_int = 1; +pub const O_RDWR: c_int = 2; + +pub const SOCK_CLOEXEC: c_int = O_CLOEXEC; + +pub const S_IFIFO: mode_t = 0o1_0000; +pub const S_IFCHR: mode_t = 0o2_0000; +pub const S_IFBLK: mode_t = 0o6_0000; +pub const S_IFDIR: mode_t = 0o4_0000; +pub const S_IFREG: mode_t = 0o10_0000; +pub const S_IFLNK: mode_t = 0o12_0000; +pub const S_IFSOCK: mode_t = 0o14_0000; +pub const S_IFMT: mode_t = 0o17_0000; +pub const S_IRWXU: mode_t = 0o0700; +pub const S_IXUSR: mode_t = 0o0100; +pub const S_IWUSR: mode_t = 0o0200; +pub const S_IRUSR: mode_t = 0o0400; +pub const S_IRWXG: mode_t = 0o0070; +pub const S_IXGRP: mode_t = 0o0010; +pub const S_IWGRP: mode_t = 0o0020; +pub const S_IRGRP: mode_t = 0o0040; +pub const S_IRWXO: mode_t = 0o0007; +pub const S_IXOTH: mode_t = 0o0001; +pub const S_IWOTH: mode_t = 0o0002; +pub const S_IROTH: mode_t = 0o0004; +pub const F_OK: c_int = 0; +pub const R_OK: c_int = 4; +pub const W_OK: c_int = 2; +pub const X_OK: c_int = 1; +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGABRT: c_int = 6; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGSEGV: c_int = 11; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; + +pub const PROT_NONE: c_int = 0; +pub const PROT_READ: c_int = 1; +pub const PROT_WRITE: c_int = 2; +pub const PROT_EXEC: c_int = 4; + +pub const XATTR_CREATE: c_int = 0x1; +pub const XATTR_REPLACE: c_int = 0x2; + +cfg_if! { + if #[cfg(target_os = "android")] { + pub const RLIM64_INFINITY: c_ulonglong = !0; + } else { + pub const RLIM64_INFINITY: crate::rlim64_t = !0; + } +} + +cfg_if! { + if #[cfg(target_env = "ohos")] { + pub const LC_CTYPE: c_int = 0; + pub const LC_NUMERIC: c_int = 1; + pub const LC_TIME: c_int = 2; + pub const LC_COLLATE: c_int = 3; + pub const LC_MONETARY: c_int = 4; + pub const LC_MESSAGES: c_int = 5; + pub const LC_PAPER: c_int = 6; + pub const LC_NAME: c_int = 7; + pub const LC_ADDRESS: c_int = 8; + pub const LC_TELEPHONE: c_int = 9; + pub const LC_MEASUREMENT: c_int = 10; + pub const LC_IDENTIFICATION: c_int = 11; + pub const LC_ALL: c_int = 12; + } else if #[cfg(not(target_env = "uclibc"))] { + pub const LC_CTYPE: c_int = 0; + pub const LC_NUMERIC: c_int = 1; + pub const LC_TIME: c_int = 2; + pub const LC_COLLATE: c_int = 3; + pub const LC_MONETARY: c_int = 4; + pub const LC_MESSAGES: c_int = 5; + pub const LC_ALL: c_int = 6; + } +} + +pub const LC_CTYPE_MASK: c_int = 1 << LC_CTYPE; +pub const LC_NUMERIC_MASK: c_int = 1 << LC_NUMERIC; +pub const LC_TIME_MASK: c_int = 1 << LC_TIME; +pub const LC_COLLATE_MASK: c_int = 1 << LC_COLLATE; +pub const LC_MONETARY_MASK: c_int = 1 << LC_MONETARY; +pub const LC_MESSAGES_MASK: c_int = 1 << LC_MESSAGES; +// LC_ALL_MASK defined per platform + +pub const MAP_FILE: c_int = 0x0000; +pub const MAP_SHARED: c_int = 0x0001; +pub const MAP_PRIVATE: c_int = 0x0002; +pub const MAP_FIXED: c_int = 0x0010; + +pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; + +// MS_ flags for msync(2) +pub const MS_ASYNC: c_int = 0x0001; +pub const MS_INVALIDATE: c_int = 0x0002; +pub const MS_SYNC: c_int = 0x0004; + +// MS_ flags for mount(2) +pub const MS_RDONLY: c_ulong = 0x01; +pub const MS_NOSUID: c_ulong = 0x02; +pub const MS_NODEV: c_ulong = 0x04; +pub const MS_NOEXEC: c_ulong = 0x08; +pub const MS_SYNCHRONOUS: c_ulong = 0x10; +pub const MS_REMOUNT: c_ulong = 0x20; +pub const MS_MANDLOCK: c_ulong = 0x40; +pub const MS_DIRSYNC: c_ulong = 0x80; +pub const MS_NOSYMFOLLOW: c_ulong = 0x100; +pub const MS_NOATIME: c_ulong = 0x0400; +pub const MS_NODIRATIME: c_ulong = 0x0800; +pub const MS_BIND: c_ulong = 0x1000; +pub const MS_MOVE: c_ulong = 0x2000; +pub const MS_REC: c_ulong = 0x4000; +pub const MS_SILENT: c_ulong = 0x8000; +pub const MS_POSIXACL: c_ulong = 0x010000; +pub const MS_UNBINDABLE: c_ulong = 0x020000; +pub const MS_PRIVATE: c_ulong = 0x040000; +pub const MS_SLAVE: c_ulong = 0x080000; +pub const MS_SHARED: c_ulong = 0x100000; +pub const MS_RELATIME: c_ulong = 0x200000; +pub const MS_KERNMOUNT: c_ulong = 0x400000; +pub const MS_I_VERSION: c_ulong = 0x800000; +pub const MS_STRICTATIME: c_ulong = 0x1000000; +pub const MS_LAZYTIME: c_ulong = 0x2000000; +pub const MS_ACTIVE: c_ulong = 0x40000000; +pub const MS_MGC_VAL: c_ulong = 0xc0ed0000; +pub const MS_MGC_MSK: c_ulong = 0xffff0000; + +pub const SCM_RIGHTS: c_int = 0x01; +pub const SCM_CREDENTIALS: c_int = 0x02; + +pub const PROT_GROWSDOWN: c_int = 0x1000000; +pub const PROT_GROWSUP: c_int = 0x2000000; + +pub const MAP_TYPE: c_int = 0x000f; + +pub const MADV_NORMAL: c_int = 0; +pub const MADV_RANDOM: c_int = 1; +pub const MADV_SEQUENTIAL: c_int = 2; +pub const MADV_WILLNEED: c_int = 3; +pub const MADV_DONTNEED: c_int = 4; +pub const MADV_FREE: c_int = 8; +pub const MADV_REMOVE: c_int = 9; +pub const MADV_DONTFORK: c_int = 10; +pub const MADV_DOFORK: c_int = 11; +pub const MADV_MERGEABLE: c_int = 12; +pub const MADV_UNMERGEABLE: c_int = 13; +pub const MADV_HUGEPAGE: c_int = 14; +pub const MADV_NOHUGEPAGE: c_int = 15; +pub const MADV_DONTDUMP: c_int = 16; +pub const MADV_DODUMP: c_int = 17; +pub const MADV_WIPEONFORK: c_int = 18; +pub const MADV_KEEPONFORK: c_int = 19; +pub const MADV_COLD: c_int = 20; +pub const MADV_PAGEOUT: c_int = 21; +pub const MADV_HWPOISON: c_int = 100; +cfg_if! { + if #[cfg(not(target_os = "emscripten"))] { + pub const MADV_POPULATE_READ: c_int = 22; + pub const MADV_POPULATE_WRITE: c_int = 23; + pub const MADV_DONTNEED_LOCKED: c_int = 24; + } +} + +pub const IFF_UP: c_int = 0x1; +pub const IFF_BROADCAST: c_int = 0x2; +pub const IFF_DEBUG: c_int = 0x4; +pub const IFF_LOOPBACK: c_int = 0x8; +pub const IFF_POINTOPOINT: c_int = 0x10; +pub const IFF_NOTRAILERS: c_int = 0x20; +pub const IFF_RUNNING: c_int = 0x40; +pub const IFF_NOARP: c_int = 0x80; +pub const IFF_PROMISC: c_int = 0x100; +pub const IFF_ALLMULTI: c_int = 0x200; +pub const IFF_MASTER: c_int = 0x400; +pub const IFF_SLAVE: c_int = 0x800; +pub const IFF_MULTICAST: c_int = 0x1000; +pub const IFF_PORTSEL: c_int = 0x2000; +pub const IFF_AUTOMEDIA: c_int = 0x4000; +pub const IFF_DYNAMIC: c_int = 0x8000; + +pub const SOL_IP: c_int = 0; +pub const SOL_TCP: c_int = 6; +pub const SOL_UDP: c_int = 17; +pub const SOL_IPV6: c_int = 41; +pub const SOL_ICMPV6: c_int = 58; +pub const SOL_RAW: c_int = 255; +pub const SOL_DECNET: c_int = 261; +pub const SOL_X25: c_int = 262; +pub const SOL_PACKET: c_int = 263; +pub const SOL_ATM: c_int = 264; +pub const SOL_AAL: c_int = 265; +pub const SOL_IRDA: c_int = 266; +pub const SOL_NETBEUI: c_int = 267; +pub const SOL_LLC: c_int = 268; +pub const SOL_DCCP: c_int = 269; +pub const SOL_NETLINK: c_int = 270; +pub const SOL_TIPC: c_int = 271; +pub const SOL_BLUETOOTH: c_int = 274; +pub const SOL_ALG: c_int = 279; + +pub const AF_UNSPEC: c_int = 0; +pub const AF_UNIX: c_int = 1; +pub const AF_LOCAL: c_int = 1; +pub const AF_INET: c_int = 2; +pub const AF_AX25: c_int = 3; +pub const AF_IPX: c_int = 4; +pub const AF_APPLETALK: c_int = 5; +pub const AF_NETROM: c_int = 6; +pub const AF_BRIDGE: c_int = 7; +pub const AF_ATMPVC: c_int = 8; +pub const AF_X25: c_int = 9; +pub const AF_INET6: c_int = 10; +pub const AF_ROSE: c_int = 11; +pub const AF_DECnet: c_int = 12; +pub const AF_NETBEUI: c_int = 13; +pub const AF_SECURITY: c_int = 14; +pub const AF_KEY: c_int = 15; +pub const AF_NETLINK: c_int = 16; +pub const AF_ROUTE: c_int = AF_NETLINK; +pub const AF_PACKET: c_int = 17; +pub const AF_ASH: c_int = 18; +pub const AF_ECONET: c_int = 19; +pub const AF_ATMSVC: c_int = 20; +pub const AF_RDS: c_int = 21; +pub const AF_SNA: c_int = 22; +pub const AF_IRDA: c_int = 23; +pub const AF_PPPOX: c_int = 24; +pub const AF_WANPIPE: c_int = 25; +pub const AF_LLC: c_int = 26; +pub const AF_CAN: c_int = 29; +pub const AF_TIPC: c_int = 30; +pub const AF_BLUETOOTH: c_int = 31; +pub const AF_IUCV: c_int = 32; +pub const AF_RXRPC: c_int = 33; +pub const AF_ISDN: c_int = 34; +pub const AF_PHONET: c_int = 35; +pub const AF_IEEE802154: c_int = 36; +pub const AF_CAIF: c_int = 37; +pub const AF_ALG: c_int = 38; + +pub const PF_UNSPEC: c_int = AF_UNSPEC; +pub const PF_UNIX: c_int = AF_UNIX; +pub const PF_LOCAL: c_int = AF_LOCAL; +pub const PF_INET: c_int = AF_INET; +pub const PF_AX25: c_int = AF_AX25; +pub const PF_IPX: c_int = AF_IPX; +pub const PF_APPLETALK: c_int = AF_APPLETALK; +pub const PF_NETROM: c_int = AF_NETROM; +pub const PF_BRIDGE: c_int = AF_BRIDGE; +pub const PF_ATMPVC: c_int = AF_ATMPVC; +pub const PF_X25: c_int = AF_X25; +pub const PF_INET6: c_int = AF_INET6; +pub const PF_ROSE: c_int = AF_ROSE; +pub const PF_DECnet: c_int = AF_DECnet; +pub const PF_NETBEUI: c_int = AF_NETBEUI; +pub const PF_SECURITY: c_int = AF_SECURITY; +pub const PF_KEY: c_int = AF_KEY; +pub const PF_NETLINK: c_int = AF_NETLINK; +pub const PF_ROUTE: c_int = AF_ROUTE; +pub const PF_PACKET: c_int = AF_PACKET; +pub const PF_ASH: c_int = AF_ASH; +pub const PF_ECONET: c_int = AF_ECONET; +pub const PF_ATMSVC: c_int = AF_ATMSVC; +pub const PF_RDS: c_int = AF_RDS; +pub const PF_SNA: c_int = AF_SNA; +pub const PF_IRDA: c_int = AF_IRDA; +pub const PF_PPPOX: c_int = AF_PPPOX; +pub const PF_WANPIPE: c_int = AF_WANPIPE; +pub const PF_LLC: c_int = AF_LLC; +pub const PF_CAN: c_int = AF_CAN; +pub const PF_TIPC: c_int = AF_TIPC; +pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; +pub const PF_IUCV: c_int = AF_IUCV; +pub const PF_RXRPC: c_int = AF_RXRPC; +pub const PF_ISDN: c_int = AF_ISDN; +pub const PF_PHONET: c_int = AF_PHONET; +pub const PF_IEEE802154: c_int = AF_IEEE802154; +pub const PF_CAIF: c_int = AF_CAIF; +pub const PF_ALG: c_int = AF_ALG; + +pub const MSG_OOB: c_int = 1; +pub const MSG_PEEK: c_int = 2; +pub const MSG_DONTROUTE: c_int = 4; +pub const MSG_CTRUNC: c_int = 8; +pub const MSG_TRUNC: c_int = 0x20; +pub const MSG_DONTWAIT: c_int = 0x40; +pub const MSG_EOR: c_int = 0x80; +pub const MSG_WAITALL: c_int = 0x100; +pub const MSG_FIN: c_int = 0x200; +pub const MSG_SYN: c_int = 0x400; +pub const MSG_CONFIRM: c_int = 0x800; +pub const MSG_RST: c_int = 0x1000; +pub const MSG_ERRQUEUE: c_int = 0x2000; +pub const MSG_NOSIGNAL: c_int = 0x4000; +pub const MSG_MORE: c_int = 0x8000; +pub const MSG_WAITFORONE: c_int = 0x10000; +pub const MSG_FASTOPEN: c_int = 0x20000000; +pub const MSG_CMSG_CLOEXEC: c_int = 0x40000000; + +pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; + +pub const SOCK_RAW: c_int = 3; +pub const SOCK_RDM: c_int = 4; +pub const IP_TOS: c_int = 1; +pub const IP_TTL: c_int = 2; +pub const IP_HDRINCL: c_int = 3; +pub const IP_OPTIONS: c_int = 4; +pub const IP_ROUTER_ALERT: c_int = 5; +pub const IP_RECVOPTS: c_int = 6; +pub const IP_RETOPTS: c_int = 7; +pub const IP_PKTINFO: c_int = 8; +pub const IP_PKTOPTIONS: c_int = 9; +pub const IP_MTU_DISCOVER: c_int = 10; +pub const IP_RECVERR: c_int = 11; +pub const IP_RECVTTL: c_int = 12; +pub const IP_RECVTOS: c_int = 13; +pub const IP_MTU: c_int = 14; +pub const IP_FREEBIND: c_int = 15; +pub const IP_IPSEC_POLICY: c_int = 16; +pub const IP_XFRM_POLICY: c_int = 17; +pub const IP_PASSSEC: c_int = 18; +pub const IP_TRANSPARENT: c_int = 19; +pub const IP_ORIGDSTADDR: c_int = 20; +pub const IP_RECVORIGDSTADDR: c_int = IP_ORIGDSTADDR; +pub const IP_MINTTL: c_int = 21; +pub const IP_NODEFRAG: c_int = 22; +pub const IP_CHECKSUM: c_int = 23; +pub const IP_BIND_ADDRESS_NO_PORT: c_int = 24; +pub const IP_MULTICAST_IF: c_int = 32; +pub const IP_MULTICAST_TTL: c_int = 33; +pub const IP_MULTICAST_LOOP: c_int = 34; +pub const IP_ADD_MEMBERSHIP: c_int = 35; +pub const IP_DROP_MEMBERSHIP: c_int = 36; +pub const IP_UNBLOCK_SOURCE: c_int = 37; +pub const IP_BLOCK_SOURCE: c_int = 38; +pub const IP_ADD_SOURCE_MEMBERSHIP: c_int = 39; +pub const IP_DROP_SOURCE_MEMBERSHIP: c_int = 40; +pub const IP_MSFILTER: c_int = 41; +pub const IP_MULTICAST_ALL: c_int = 49; +pub const IP_UNICAST_IF: c_int = 50; + +pub const IP_DEFAULT_MULTICAST_TTL: c_int = 1; +pub const IP_DEFAULT_MULTICAST_LOOP: c_int = 1; + +pub const IP_PMTUDISC_DONT: c_int = 0; +pub const IP_PMTUDISC_WANT: c_int = 1; +pub const IP_PMTUDISC_DO: c_int = 2; +pub const IP_PMTUDISC_PROBE: c_int = 3; +pub const IP_PMTUDISC_INTERFACE: c_int = 4; +pub const IP_PMTUDISC_OMIT: c_int = 5; + +// IPPROTO_IP defined in src/unix/mod.rs +/// Hop-by-hop option header +pub const IPPROTO_HOPOPTS: c_int = 0; +// IPPROTO_ICMP defined in src/unix/mod.rs +/// group mgmt protocol +pub const IPPROTO_IGMP: c_int = 2; +/// for compatibility +pub const IPPROTO_IPIP: c_int = 4; +// IPPROTO_TCP defined in src/unix/mod.rs +/// exterior gateway protocol +pub const IPPROTO_EGP: c_int = 8; +/// pup +pub const IPPROTO_PUP: c_int = 12; +// IPPROTO_UDP defined in src/unix/mod.rs +/// xns idp +pub const IPPROTO_IDP: c_int = 22; +/// tp-4 w/ class negotiation +pub const IPPROTO_TP: c_int = 29; +/// DCCP +pub const IPPROTO_DCCP: c_int = 33; +// IPPROTO_IPV6 defined in src/unix/mod.rs +/// IP6 routing header +pub const IPPROTO_ROUTING: c_int = 43; +/// IP6 fragmentation header +pub const IPPROTO_FRAGMENT: c_int = 44; +/// resource reservation +pub const IPPROTO_RSVP: c_int = 46; +/// General Routing Encap. +pub const IPPROTO_GRE: c_int = 47; +/// IP6 Encap Sec. Payload +pub const IPPROTO_ESP: c_int = 50; +/// IP6 Auth Header +pub const IPPROTO_AH: c_int = 51; +// IPPROTO_ICMPV6 defined in src/unix/mod.rs +/// IP6 no next header +pub const IPPROTO_NONE: c_int = 59; +/// IP6 destination option +pub const IPPROTO_DSTOPTS: c_int = 60; +pub const IPPROTO_MTP: c_int = 92; +/// encapsulation header +pub const IPPROTO_ENCAP: c_int = 98; +/// Protocol indep. multicast +pub const IPPROTO_PIM: c_int = 103; +/// IP Payload Comp. Protocol +pub const IPPROTO_COMP: c_int = 108; +/// SCTP +pub const IPPROTO_SCTP: c_int = 132; +pub const IPPROTO_MH: c_int = 135; +pub const IPPROTO_UDPLITE: c_int = 136; +/// raw IP packet +pub const IPPROTO_RAW: c_int = 255; +pub const IPPROTO_BEETPH: c_int = 94; +pub const IPPROTO_MPLS: c_int = 137; +/// Multipath TCP +pub const IPPROTO_MPTCP: c_int = 262; +/// Ethernet-within-IPv6 encapsulation. +pub const IPPROTO_ETHERNET: c_int = 143; + +pub const MCAST_EXCLUDE: c_int = 0; +pub const MCAST_INCLUDE: c_int = 1; +pub const MCAST_JOIN_GROUP: c_int = 42; +pub const MCAST_BLOCK_SOURCE: c_int = 43; +pub const MCAST_UNBLOCK_SOURCE: c_int = 44; +pub const MCAST_LEAVE_GROUP: c_int = 45; +pub const MCAST_JOIN_SOURCE_GROUP: c_int = 46; +pub const MCAST_LEAVE_SOURCE_GROUP: c_int = 47; +pub const MCAST_MSFILTER: c_int = 48; + +pub const IPV6_ADDRFORM: c_int = 1; +pub const IPV6_2292PKTINFO: c_int = 2; +pub const IPV6_2292HOPOPTS: c_int = 3; +pub const IPV6_2292DSTOPTS: c_int = 4; +pub const IPV6_2292RTHDR: c_int = 5; +pub const IPV6_2292PKTOPTIONS: c_int = 6; +pub const IPV6_CHECKSUM: c_int = 7; +pub const IPV6_2292HOPLIMIT: c_int = 8; +pub const IPV6_NEXTHOP: c_int = 9; +pub const IPV6_AUTHHDR: c_int = 10; +pub const IPV6_UNICAST_HOPS: c_int = 16; +pub const IPV6_MULTICAST_IF: c_int = 17; +pub const IPV6_MULTICAST_HOPS: c_int = 18; +pub const IPV6_MULTICAST_LOOP: c_int = 19; +pub const IPV6_ADD_MEMBERSHIP: c_int = 20; +pub const IPV6_DROP_MEMBERSHIP: c_int = 21; +pub const IPV6_ROUTER_ALERT: c_int = 22; +pub const IPV6_MTU_DISCOVER: c_int = 23; +pub const IPV6_MTU: c_int = 24; +pub const IPV6_RECVERR: c_int = 25; +pub const IPV6_V6ONLY: c_int = 26; +pub const IPV6_JOIN_ANYCAST: c_int = 27; +pub const IPV6_LEAVE_ANYCAST: c_int = 28; +pub const IPV6_IPSEC_POLICY: c_int = 34; +pub const IPV6_XFRM_POLICY: c_int = 35; +pub const IPV6_HDRINCL: c_int = 36; +pub const IPV6_RECVPKTINFO: c_int = 49; +pub const IPV6_PKTINFO: c_int = 50; +pub const IPV6_RECVHOPLIMIT: c_int = 51; +pub const IPV6_HOPLIMIT: c_int = 52; +pub const IPV6_RECVHOPOPTS: c_int = 53; +pub const IPV6_HOPOPTS: c_int = 54; +pub const IPV6_RTHDRDSTOPTS: c_int = 55; +pub const IPV6_RECVRTHDR: c_int = 56; +pub const IPV6_RTHDR: c_int = 57; +pub const IPV6_RECVDSTOPTS: c_int = 58; +pub const IPV6_DSTOPTS: c_int = 59; +pub const IPV6_RECVPATHMTU: c_int = 60; +pub const IPV6_PATHMTU: c_int = 61; +pub const IPV6_DONTFRAG: c_int = 62; +pub const IPV6_RECVTCLASS: c_int = 66; +pub const IPV6_TCLASS: c_int = 67; +pub const IPV6_AUTOFLOWLABEL: c_int = 70; +pub const IPV6_ADDR_PREFERENCES: c_int = 72; +pub const IPV6_MINHOPCOUNT: c_int = 73; +pub const IPV6_ORIGDSTADDR: c_int = 74; +pub const IPV6_RECVORIGDSTADDR: c_int = IPV6_ORIGDSTADDR; +pub const IPV6_TRANSPARENT: c_int = 75; +pub const IPV6_UNICAST_IF: c_int = 76; +pub const IPV6_PREFER_SRC_TMP: c_int = 0x0001; +pub const IPV6_PREFER_SRC_PUBLIC: c_int = 0x0002; +pub const IPV6_PREFER_SRC_PUBTMP_DEFAULT: c_int = 0x0100; +pub const IPV6_PREFER_SRC_COA: c_int = 0x0004; +pub const IPV6_PREFER_SRC_HOME: c_int = 0x0400; +pub const IPV6_PREFER_SRC_CGA: c_int = 0x0008; +pub const IPV6_PREFER_SRC_NONCGA: c_int = 0x0800; + +pub const IPV6_PMTUDISC_DONT: c_int = 0; +pub const IPV6_PMTUDISC_WANT: c_int = 1; +pub const IPV6_PMTUDISC_DO: c_int = 2; +pub const IPV6_PMTUDISC_PROBE: c_int = 3; +pub const IPV6_PMTUDISC_INTERFACE: c_int = 4; +pub const IPV6_PMTUDISC_OMIT: c_int = 5; + +pub const TCP_NODELAY: c_int = 1; +pub const TCP_MAXSEG: c_int = 2; +pub const TCP_CORK: c_int = 3; +pub const TCP_KEEPIDLE: c_int = 4; +pub const TCP_KEEPINTVL: c_int = 5; +pub const TCP_KEEPCNT: c_int = 6; +pub const TCP_SYNCNT: c_int = 7; +pub const TCP_LINGER2: c_int = 8; +pub const TCP_DEFER_ACCEPT: c_int = 9; +pub const TCP_WINDOW_CLAMP: c_int = 10; +pub const TCP_INFO: c_int = 11; +pub const TCP_QUICKACK: c_int = 12; +pub const TCP_CONGESTION: c_int = 13; +pub const TCP_MD5SIG: c_int = 14; +cfg_if! { + if #[cfg(all( + target_os = "linux", + any(target_env = "gnu", target_env = "musl", target_env = "ohos") + ))] { + // WARN: deprecated + pub const TCP_COOKIE_TRANSACTIONS: c_int = 15; + } +} +pub const TCP_THIN_LINEAR_TIMEOUTS: c_int = 16; +pub const TCP_THIN_DUPACK: c_int = 17; +pub const TCP_USER_TIMEOUT: c_int = 18; +pub const TCP_REPAIR: c_int = 19; +pub const TCP_REPAIR_QUEUE: c_int = 20; +pub const TCP_QUEUE_SEQ: c_int = 21; +pub const TCP_REPAIR_OPTIONS: c_int = 22; +pub const TCP_FASTOPEN: c_int = 23; +pub const TCP_TIMESTAMP: c_int = 24; +pub const TCP_NOTSENT_LOWAT: c_int = 25; +pub const TCP_CC_INFO: c_int = 26; +pub const TCP_SAVE_SYN: c_int = 27; +pub const TCP_SAVED_SYN: c_int = 28; +cfg_if! { + if #[cfg(not(target_os = "emscripten"))] { + // NOTE: emscripten doesn't support these options yet. + + pub const TCP_REPAIR_WINDOW: c_int = 29; + pub const TCP_FASTOPEN_CONNECT: c_int = 30; + pub const TCP_ULP: c_int = 31; + pub const TCP_MD5SIG_EXT: c_int = 32; + pub const TCP_FASTOPEN_KEY: c_int = 33; + pub const TCP_FASTOPEN_NO_COOKIE: c_int = 34; + pub const TCP_ZEROCOPY_RECEIVE: c_int = 35; + pub const TCP_INQ: c_int = 36; + pub const TCP_CM_INQ: c_int = TCP_INQ; + // NOTE: Some CI images doesn't have this option yet. + // pub const TCP_TX_DELAY: c_int = 37; + pub const TCP_MD5SIG_MAXKEYLEN: usize = 80; + } +} + +pub const SO_DEBUG: c_int = 1; + +pub const SHUT_RD: c_int = 0; +pub const SHUT_WR: c_int = 1; +pub const SHUT_RDWR: c_int = 2; + +pub const LOCK_SH: c_int = 1; +pub const LOCK_EX: c_int = 2; +pub const LOCK_NB: c_int = 4; +pub const LOCK_UN: c_int = 8; + +pub const SS_ONSTACK: c_int = 1; +pub const SS_DISABLE: c_int = 2; + +pub const PATH_MAX: c_int = 4096; + +pub const UIO_MAXIOV: c_int = 1024; + +pub const FD_SETSIZE: usize = 1024; + +pub const EPOLLIN: c_int = 0x1; +pub const EPOLLPRI: c_int = 0x2; +pub const EPOLLOUT: c_int = 0x4; +pub const EPOLLERR: c_int = 0x8; +pub const EPOLLHUP: c_int = 0x10; +pub const EPOLLRDNORM: c_int = 0x40; +pub const EPOLLRDBAND: c_int = 0x80; +pub const EPOLLWRNORM: c_int = 0x100; +pub const EPOLLWRBAND: c_int = 0x200; +pub const EPOLLMSG: c_int = 0x400; +pub const EPOLLRDHUP: c_int = 0x2000; +pub const EPOLLEXCLUSIVE: c_int = 0x10000000; +pub const EPOLLWAKEUP: c_int = 0x20000000; +pub const EPOLLONESHOT: c_int = 0x40000000; +pub const EPOLLET: c_int = 0x80000000; + +pub const EPOLL_CTL_ADD: c_int = 1; +pub const EPOLL_CTL_MOD: c_int = 3; +pub const EPOLL_CTL_DEL: c_int = 2; + +pub const MNT_FORCE: c_int = 0x1; +pub const MNT_DETACH: c_int = 0x2; +pub const MNT_EXPIRE: c_int = 0x4; +pub const UMOUNT_NOFOLLOW: c_int = 0x8; + +pub const Q_GETFMT: c_int = 0x800004; +pub const Q_GETINFO: c_int = 0x800005; +pub const Q_SETINFO: c_int = 0x800006; +pub const QIF_BLIMITS: u32 = 1; +pub const QIF_SPACE: u32 = 2; +pub const QIF_ILIMITS: u32 = 4; +pub const QIF_INODES: u32 = 8; +pub const QIF_BTIME: u32 = 16; +pub const QIF_ITIME: u32 = 32; +pub const QIF_LIMITS: u32 = 5; +pub const QIF_USAGE: u32 = 10; +pub const QIF_TIMES: u32 = 48; +pub const QIF_ALL: u32 = 63; + +pub const Q_SYNC: c_int = 0x800001; +pub const Q_QUOTAON: c_int = 0x800002; +pub const Q_QUOTAOFF: c_int = 0x800003; +pub const Q_GETQUOTA: c_int = 0x800007; +pub const Q_SETQUOTA: c_int = 0x800008; + +pub const TCIOFF: c_int = 2; +pub const TCION: c_int = 3; +pub const TCOOFF: c_int = 0; +pub const TCOON: c_int = 1; +pub const TCIFLUSH: c_int = 0; +pub const TCOFLUSH: c_int = 1; +pub const TCIOFLUSH: c_int = 2; +pub const NL0: crate::tcflag_t = 0x00000000; +pub const NL1: crate::tcflag_t = 0x00000100; +pub const TAB0: crate::tcflag_t = 0x00000000; +pub const CR0: crate::tcflag_t = 0x00000000; +pub const FF0: crate::tcflag_t = 0x00000000; +pub const BS0: crate::tcflag_t = 0x00000000; +pub const VT0: crate::tcflag_t = 0x00000000; +pub const VERASE: usize = 2; +pub const VKILL: usize = 3; +pub const VINTR: usize = 0; +pub const VQUIT: usize = 1; +pub const VLNEXT: usize = 15; +pub const IGNBRK: crate::tcflag_t = 0x00000001; +pub const BRKINT: crate::tcflag_t = 0x00000002; +pub const IGNPAR: crate::tcflag_t = 0x00000004; +pub const PARMRK: crate::tcflag_t = 0x00000008; +pub const INPCK: crate::tcflag_t = 0x00000010; +pub const ISTRIP: crate::tcflag_t = 0x00000020; +pub const INLCR: crate::tcflag_t = 0x00000040; +pub const IGNCR: crate::tcflag_t = 0x00000080; +pub const ICRNL: crate::tcflag_t = 0x00000100; +pub const IXANY: crate::tcflag_t = 0x00000800; +pub const IMAXBEL: crate::tcflag_t = 0x00002000; +pub const OPOST: crate::tcflag_t = 0x1; +pub const CS5: crate::tcflag_t = 0x00000000; +pub const CRTSCTS: crate::tcflag_t = 0x80000000; +pub const ECHO: crate::tcflag_t = 0x00000008; +pub const OCRNL: crate::tcflag_t = 0o000010; +pub const ONOCR: crate::tcflag_t = 0o000020; +pub const ONLRET: crate::tcflag_t = 0o000040; +pub const OFILL: crate::tcflag_t = 0o000100; +pub const OFDEL: crate::tcflag_t = 0o000200; + +pub const CLONE_VM: c_int = 0x100; +pub const CLONE_FS: c_int = 0x200; +pub const CLONE_FILES: c_int = 0x400; +pub const CLONE_SIGHAND: c_int = 0x800; +pub const CLONE_PTRACE: c_int = 0x2000; +pub const CLONE_VFORK: c_int = 0x4000; +pub const CLONE_PARENT: c_int = 0x8000; +pub const CLONE_THREAD: c_int = 0x10000; +pub const CLONE_NEWNS: c_int = 0x20000; +pub const CLONE_SYSVSEM: c_int = 0x40000; +pub const CLONE_SETTLS: c_int = 0x80000; +pub const CLONE_PARENT_SETTID: c_int = 0x100000; +pub const CLONE_CHILD_CLEARTID: c_int = 0x200000; +pub const CLONE_DETACHED: c_int = 0x400000; +pub const CLONE_UNTRACED: c_int = 0x800000; +pub const CLONE_CHILD_SETTID: c_int = 0x01000000; +pub const CLONE_NEWCGROUP: c_int = 0x02000000; +pub const CLONE_NEWUTS: c_int = 0x04000000; +pub const CLONE_NEWIPC: c_int = 0x08000000; +pub const CLONE_NEWUSER: c_int = 0x10000000; +pub const CLONE_NEWPID: c_int = 0x20000000; +pub const CLONE_NEWNET: c_int = 0x40000000; +pub const CLONE_IO: c_int = 0x80000000; + +pub const WNOHANG: c_int = 0x00000001; +pub const WUNTRACED: c_int = 0x00000002; +pub const WSTOPPED: c_int = WUNTRACED; +pub const WEXITED: c_int = 0x00000004; +pub const WCONTINUED: c_int = 0x00000008; +pub const WNOWAIT: c_int = 0x01000000; + +// Options for personality(2). +pub const ADDR_NO_RANDOMIZE: c_int = 0x0040000; +pub const MMAP_PAGE_ZERO: c_int = 0x0100000; +pub const ADDR_COMPAT_LAYOUT: c_int = 0x0200000; +pub const READ_IMPLIES_EXEC: c_int = 0x0400000; +pub const ADDR_LIMIT_32BIT: c_int = 0x0800000; +pub const SHORT_INODE: c_int = 0x1000000; +pub const WHOLE_SECONDS: c_int = 0x2000000; +pub const STICKY_TIMEOUTS: c_int = 0x4000000; +pub const ADDR_LIMIT_3GB: c_int = 0x8000000; + +// Options set using PTRACE_SETOPTIONS. +pub const PTRACE_O_TRACESYSGOOD: c_int = 0x00000001; +pub const PTRACE_O_TRACEFORK: c_int = 0x00000002; +pub const PTRACE_O_TRACEVFORK: c_int = 0x00000004; +pub const PTRACE_O_TRACECLONE: c_int = 0x00000008; +pub const PTRACE_O_TRACEEXEC: c_int = 0x00000010; +pub const PTRACE_O_TRACEVFORKDONE: c_int = 0x00000020; +pub const PTRACE_O_TRACEEXIT: c_int = 0x00000040; +pub const PTRACE_O_TRACESECCOMP: c_int = 0x00000080; +pub const PTRACE_O_SUSPEND_SECCOMP: c_int = 0x00200000; +pub const PTRACE_O_EXITKILL: c_int = 0x00100000; +pub const PTRACE_O_MASK: c_int = 0x003000ff; + +// Wait extended result codes for the above trace options. +pub const PTRACE_EVENT_FORK: c_int = 1; +pub const PTRACE_EVENT_VFORK: c_int = 2; +pub const PTRACE_EVENT_CLONE: c_int = 3; +pub const PTRACE_EVENT_EXEC: c_int = 4; +pub const PTRACE_EVENT_VFORK_DONE: c_int = 5; +pub const PTRACE_EVENT_EXIT: c_int = 6; +pub const PTRACE_EVENT_SECCOMP: c_int = 7; + +pub const __WNOTHREAD: c_int = 0x20000000; +pub const __WALL: c_int = 0x40000000; +pub const __WCLONE: c_int = 0x80000000; + +pub const SPLICE_F_MOVE: c_uint = 0x01; +pub const SPLICE_F_NONBLOCK: c_uint = 0x02; +pub const SPLICE_F_MORE: c_uint = 0x04; +pub const SPLICE_F_GIFT: c_uint = 0x08; + +pub const RTLD_LOCAL: c_int = 0; +pub const RTLD_LAZY: c_int = 1; + +pub const POSIX_FADV_NORMAL: c_int = 0; +pub const POSIX_FADV_RANDOM: c_int = 1; +pub const POSIX_FADV_SEQUENTIAL: c_int = 2; +pub const POSIX_FADV_WILLNEED: c_int = 3; + +pub const AT_FDCWD: c_int = -100; +pub const AT_SYMLINK_NOFOLLOW: c_int = 0x100; +pub const AT_REMOVEDIR: c_int = 0x200; +pub const AT_SYMLINK_FOLLOW: c_int = 0x400; +pub const AT_NO_AUTOMOUNT: c_int = 0x800; +pub const AT_EMPTY_PATH: c_int = 0x1000; +pub const AT_RECURSIVE: c_int = 0x8000; + +pub const LOG_CRON: c_int = 9 << 3; +pub const LOG_AUTHPRIV: c_int = 10 << 3; +pub const LOG_FTP: c_int = 11 << 3; +pub const LOG_PERROR: c_int = 0x20; + +pub const PIPE_BUF: usize = 4096; + +pub const SI_LOAD_SHIFT: c_uint = 16; + +// si_code values +pub const SI_USER: c_int = 0; +pub const SI_KERNEL: c_int = 0x80; +pub const SI_QUEUE: c_int = -1; +cfg_if! { + if #[cfg(not(any( + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "mips64" + )))] { + pub const SI_TIMER: c_int = -2; + pub const SI_MESGQ: c_int = -3; + pub const SI_ASYNCIO: c_int = -4; + } else { + pub const SI_TIMER: c_int = -3; + pub const SI_MESGQ: c_int = -4; + pub const SI_ASYNCIO: c_int = -2; + } +} +pub const SI_SIGIO: c_int = -5; +pub const SI_TKILL: c_int = -6; +pub const SI_ASYNCNL: c_int = -60; + +// si_code values for SIGBUS signal +pub const BUS_ADRALN: c_int = 1; +pub const BUS_ADRERR: c_int = 2; +pub const BUS_OBJERR: c_int = 3; +// Linux-specific si_code values for SIGBUS signal +pub const BUS_MCEERR_AR: c_int = 4; +pub const BUS_MCEERR_AO: c_int = 5; + +// si_code values for SIGTRAP +pub const TRAP_BRKPT: c_int = 1; +pub const TRAP_TRACE: c_int = 2; +pub const TRAP_BRANCH: c_int = 3; +pub const TRAP_HWBKPT: c_int = 4; +pub const TRAP_UNK: c_int = 5; + +// si_code values for SIGCHLD signal +pub const CLD_EXITED: c_int = 1; +pub const CLD_KILLED: c_int = 2; +pub const CLD_DUMPED: c_int = 3; +pub const CLD_TRAPPED: c_int = 4; +pub const CLD_STOPPED: c_int = 5; +pub const CLD_CONTINUED: c_int = 6; + +pub const SIGEV_SIGNAL: c_int = 0; +pub const SIGEV_NONE: c_int = 1; +pub const SIGEV_THREAD: c_int = 2; + +pub const P_ALL: idtype_t = 0; +pub const P_PID: idtype_t = 1; +pub const P_PGID: idtype_t = 2; +cfg_if! { + if #[cfg(not(target_os = "emscripten"))] { + pub const P_PIDFD: idtype_t = 3; + } +} + +pub const UTIME_OMIT: c_long = 1073741822; +pub const UTIME_NOW: c_long = 1073741823; + +pub const POLLIN: c_short = 0x1; +pub const POLLPRI: c_short = 0x2; +pub const POLLOUT: c_short = 0x4; +pub const POLLERR: c_short = 0x8; +pub const POLLHUP: c_short = 0x10; +pub const POLLNVAL: c_short = 0x20; +pub const POLLRDNORM: c_short = 0x040; +pub const POLLRDBAND: c_short = 0x080; +#[cfg(not(any(target_arch = "sparc", target_arch = "sparc64")))] +pub const POLLRDHUP: c_short = 0x2000; +#[cfg(any(target_arch = "sparc", target_arch = "sparc64"))] +pub const POLLRDHUP: c_short = 0x800; + +pub const IPTOS_LOWDELAY: u8 = 0x10; +pub const IPTOS_THROUGHPUT: u8 = 0x08; +pub const IPTOS_RELIABILITY: u8 = 0x04; +pub const IPTOS_MINCOST: u8 = 0x02; + +pub const IPTOS_PREC_NETCONTROL: u8 = 0xe0; +pub const IPTOS_PREC_INTERNETCONTROL: u8 = 0xc0; +pub const IPTOS_PREC_CRITIC_ECP: u8 = 0xa0; +pub const IPTOS_PREC_FLASHOVERRIDE: u8 = 0x80; +pub const IPTOS_PREC_FLASH: u8 = 0x60; +pub const IPTOS_PREC_IMMEDIATE: u8 = 0x40; +pub const IPTOS_PREC_PRIORITY: u8 = 0x20; +pub const IPTOS_PREC_ROUTINE: u8 = 0x00; + +pub const IPTOS_ECN_MASK: u8 = 0x03; +pub const IPTOS_ECN_ECT1: u8 = 0x01; +pub const IPTOS_ECN_ECT0: u8 = 0x02; +pub const IPTOS_ECN_CE: u8 = 0x03; + +pub const IPOPT_COPY: u8 = 0x80; +pub const IPOPT_CLASS_MASK: u8 = 0x60; +pub const IPOPT_NUMBER_MASK: u8 = 0x1f; + +pub const IPOPT_CONTROL: u8 = 0x00; +pub const IPOPT_RESERVED1: u8 = 0x20; +pub const IPOPT_MEASUREMENT: u8 = 0x40; +pub const IPOPT_RESERVED2: u8 = 0x60; +pub const IPOPT_END: u8 = 0 | IPOPT_CONTROL; +pub const IPOPT_NOOP: u8 = 1 | IPOPT_CONTROL; +pub const IPOPT_SEC: u8 = 2 | IPOPT_CONTROL | IPOPT_COPY; +pub const IPOPT_LSRR: u8 = 3 | IPOPT_CONTROL | IPOPT_COPY; +pub const IPOPT_TIMESTAMP: u8 = 4 | IPOPT_MEASUREMENT; +pub const IPOPT_RR: u8 = 7 | IPOPT_CONTROL; +pub const IPOPT_SID: u8 = 8 | IPOPT_CONTROL | IPOPT_COPY; +pub const IPOPT_SSRR: u8 = 9 | IPOPT_CONTROL | IPOPT_COPY; +pub const IPOPT_RA: u8 = 20 | IPOPT_CONTROL | IPOPT_COPY; +pub const IPVERSION: u8 = 4; +pub const MAXTTL: u8 = 255; +pub const IPDEFTTL: u8 = 64; +pub const IPOPT_OPTVAL: u8 = 0; +pub const IPOPT_OLEN: u8 = 1; +pub const IPOPT_OFFSET: u8 = 2; +pub const IPOPT_MINOFF: u8 = 4; +pub const MAX_IPOPTLEN: u8 = 40; +pub const IPOPT_NOP: u8 = IPOPT_NOOP; +pub const IPOPT_EOL: u8 = IPOPT_END; +pub const IPOPT_TS: u8 = IPOPT_TIMESTAMP; +pub const IPOPT_TS_TSONLY: u8 = 0; +pub const IPOPT_TS_TSANDADDR: u8 = 1; +pub const IPOPT_TS_PRESPEC: u8 = 3; + +pub const ARPOP_RREQUEST: u16 = 3; +pub const ARPOP_RREPLY: u16 = 4; +pub const ARPOP_InREQUEST: u16 = 8; +pub const ARPOP_InREPLY: u16 = 9; +pub const ARPOP_NAK: u16 = 10; + +pub const ATF_NETMASK: c_int = 0x20; +pub const ATF_DONTPUB: c_int = 0x40; + +pub const ARPHRD_NETROM: u16 = 0; +pub const ARPHRD_ETHER: u16 = 1; +pub const ARPHRD_EETHER: u16 = 2; +pub const ARPHRD_AX25: u16 = 3; +pub const ARPHRD_PRONET: u16 = 4; +pub const ARPHRD_CHAOS: u16 = 5; +pub const ARPHRD_IEEE802: u16 = 6; +pub const ARPHRD_ARCNET: u16 = 7; +pub const ARPHRD_APPLETLK: u16 = 8; +pub const ARPHRD_DLCI: u16 = 15; +pub const ARPHRD_ATM: u16 = 19; +pub const ARPHRD_METRICOM: u16 = 23; +pub const ARPHRD_IEEE1394: u16 = 24; +pub const ARPHRD_EUI64: u16 = 27; +pub const ARPHRD_INFINIBAND: u16 = 32; + +pub const ARPHRD_SLIP: u16 = 256; +pub const ARPHRD_CSLIP: u16 = 257; +pub const ARPHRD_SLIP6: u16 = 258; +pub const ARPHRD_CSLIP6: u16 = 259; +pub const ARPHRD_RSRVD: u16 = 260; +pub const ARPHRD_ADAPT: u16 = 264; +pub const ARPHRD_ROSE: u16 = 270; +pub const ARPHRD_X25: u16 = 271; +pub const ARPHRD_HWX25: u16 = 272; +pub const ARPHRD_CAN: u16 = 280; +pub const ARPHRD_PPP: u16 = 512; +pub const ARPHRD_CISCO: u16 = 513; +pub const ARPHRD_HDLC: u16 = ARPHRD_CISCO; +pub const ARPHRD_LAPB: u16 = 516; +pub const ARPHRD_DDCMP: u16 = 517; +pub const ARPHRD_RAWHDLC: u16 = 518; + +pub const ARPHRD_TUNNEL: u16 = 768; +pub const ARPHRD_TUNNEL6: u16 = 769; +pub const ARPHRD_FRAD: u16 = 770; +pub const ARPHRD_SKIP: u16 = 771; +pub const ARPHRD_LOOPBACK: u16 = 772; +pub const ARPHRD_LOCALTLK: u16 = 773; +pub const ARPHRD_FDDI: u16 = 774; +pub const ARPHRD_BIF: u16 = 775; +pub const ARPHRD_SIT: u16 = 776; +pub const ARPHRD_IPDDP: u16 = 777; +pub const ARPHRD_IPGRE: u16 = 778; +pub const ARPHRD_PIMREG: u16 = 779; +pub const ARPHRD_HIPPI: u16 = 780; +pub const ARPHRD_ASH: u16 = 781; +pub const ARPHRD_ECONET: u16 = 782; +pub const ARPHRD_IRDA: u16 = 783; +pub const ARPHRD_FCPP: u16 = 784; +pub const ARPHRD_FCAL: u16 = 785; +pub const ARPHRD_FCPL: u16 = 786; +pub const ARPHRD_FCFABRIC: u16 = 787; +pub const ARPHRD_IEEE802_TR: u16 = 800; +pub const ARPHRD_IEEE80211: u16 = 801; +pub const ARPHRD_IEEE80211_PRISM: u16 = 802; +pub const ARPHRD_IEEE80211_RADIOTAP: u16 = 803; +pub const ARPHRD_IEEE802154: u16 = 804; + +pub const ARPHRD_VOID: u16 = 0xFFFF; +pub const ARPHRD_NONE: u16 = 0xFFFE; + +cfg_if! { + if #[cfg(not(target_os = "emscripten"))] { + // linux/if_tun.h + /* TUNSETIFF ifr flags */ + pub const IFF_TUN: c_int = 0x0001; + pub const IFF_TAP: c_int = 0x0002; + pub const IFF_NAPI: c_int = 0x0010; + pub const IFF_NAPI_FRAGS: c_int = 0x0020; + // Used in TUNSETIFF to bring up tun/tap without carrier + pub const IFF_NO_CARRIER: c_int = 0x0040; + pub const IFF_NO_PI: c_int = 0x1000; + // Read queue size + pub const TUN_READQ_SIZE: c_short = 500; + // TUN device type flags: deprecated. Use IFF_TUN/IFF_TAP instead. + pub const TUN_TUN_DEV: c_short = crate::IFF_TUN as c_short; + pub const TUN_TAP_DEV: c_short = crate::IFF_TAP as c_short; + pub const TUN_TYPE_MASK: c_short = 0x000f; + // This flag has no real effect + pub const IFF_ONE_QUEUE: c_int = 0x2000; + pub const IFF_VNET_HDR: c_int = 0x4000; + pub const IFF_TUN_EXCL: c_int = 0x8000; + pub const IFF_MULTI_QUEUE: c_int = 0x0100; + pub const IFF_ATTACH_QUEUE: c_int = 0x0200; + pub const IFF_DETACH_QUEUE: c_int = 0x0400; + // read-only flag + pub const IFF_PERSIST: c_int = 0x0800; + pub const IFF_NOFILTER: c_int = 0x1000; + // Socket options + pub const TUN_TX_TIMESTAMP: c_int = 1; + // Features for GSO (TUNSETOFFLOAD) + pub const TUN_F_CSUM: c_uint = 0x01; + pub const TUN_F_TSO4: c_uint = 0x02; + pub const TUN_F_TSO6: c_uint = 0x04; + pub const TUN_F_TSO_ECN: c_uint = 0x08; + pub const TUN_F_UFO: c_uint = 0x10; + pub const TUN_F_USO4: c_uint = 0x20; + pub const TUN_F_USO6: c_uint = 0x40; + // Protocol info prepended to the packets (when IFF_NO_PI is not set) + pub const TUN_PKT_STRIP: c_int = 0x0001; + // Accept all multicast packets + pub const TUN_FLT_ALLMULTI: c_int = 0x0001; + // Ioctl operation codes + const T_TYPE: u32 = b'T' as u32; + pub const TUNSETNOCSUM: Ioctl = _IOW::(T_TYPE, 200); + pub const TUNSETDEBUG: Ioctl = _IOW::(T_TYPE, 201); + pub const TUNSETIFF: Ioctl = _IOW::(T_TYPE, 202); + pub const TUNSETPERSIST: Ioctl = _IOW::(T_TYPE, 203); + pub const TUNSETOWNER: Ioctl = _IOW::(T_TYPE, 204); + pub const TUNSETLINK: Ioctl = _IOW::(T_TYPE, 205); + pub const TUNSETGROUP: Ioctl = _IOW::(T_TYPE, 206); + pub const TUNGETFEATURES: Ioctl = _IOR::(T_TYPE, 207); + pub const TUNSETOFFLOAD: Ioctl = _IOW::(T_TYPE, 208); + pub const TUNSETTXFILTER: Ioctl = _IOW::(T_TYPE, 209); + pub const TUNGETIFF: Ioctl = _IOR::(T_TYPE, 210); + pub const TUNGETSNDBUF: Ioctl = _IOR::(T_TYPE, 211); + pub const TUNSETSNDBUF: Ioctl = _IOW::(T_TYPE, 212); + pub const TUNATTACHFILTER: Ioctl = _IOW::(T_TYPE, 213); + pub const TUNDETACHFILTER: Ioctl = _IOW::(T_TYPE, 214); + pub const TUNGETVNETHDRSZ: Ioctl = _IOR::(T_TYPE, 215); + pub const TUNSETVNETHDRSZ: Ioctl = _IOW::(T_TYPE, 216); + pub const TUNSETQUEUE: Ioctl = _IOW::(T_TYPE, 217); + pub const TUNSETIFINDEX: Ioctl = _IOW::(T_TYPE, 218); + pub const TUNGETFILTER: Ioctl = _IOR::(T_TYPE, 219); + pub const TUNSETVNETLE: Ioctl = _IOW::(T_TYPE, 220); + pub const TUNGETVNETLE: Ioctl = _IOR::(T_TYPE, 221); + pub const TUNSETVNETBE: Ioctl = _IOW::(T_TYPE, 222); + pub const TUNGETVNETBE: Ioctl = _IOR::(T_TYPE, 223); + pub const TUNSETSTEERINGEBPF: Ioctl = _IOR::(T_TYPE, 224); + pub const TUNSETFILTEREBPF: Ioctl = _IOR::(T_TYPE, 225); + pub const TUNSETCARRIER: Ioctl = _IOW::(T_TYPE, 226); + pub const TUNGETDEVNETNS: Ioctl = _IO(T_TYPE, 227); + + // linux/fs.h + pub const FS_IOC_GETFLAGS: Ioctl = _IOR::('f' as u32, 1); + pub const FS_IOC_SETFLAGS: Ioctl = _IOW::('f' as u32, 2); + pub const FS_IOC_GETVERSION: Ioctl = _IOR::('v' as u32, 1); + pub const FS_IOC_SETVERSION: Ioctl = _IOW::('v' as u32, 2); + pub const FS_IOC32_GETFLAGS: Ioctl = _IOR::('f' as u32, 1); + pub const FS_IOC32_SETFLAGS: Ioctl = _IOW::('f' as u32, 2); + pub const FS_IOC32_GETVERSION: Ioctl = _IOR::('v' as u32, 1); + pub const FS_IOC32_SETVERSION: Ioctl = _IOW::('v' as u32, 2); + + pub const FICLONE: Ioctl = _IOW::(0x94, 9); + pub const FICLONERANGE: Ioctl = _IOW::(0x94, 13); + } +} + +cfg_if! { + if #[cfg(target_os = "emscripten")] { + // Emscripten does not define any `*_SUPER_MAGIC` constants. + } else if #[cfg(not(target_arch = "s390x"))] { + pub const ADFS_SUPER_MAGIC: c_long = 0x0000adf5; + pub const AFFS_SUPER_MAGIC: c_long = 0x0000adff; + pub const AFS_SUPER_MAGIC: c_long = 0x5346414f; + pub const AUTOFS_SUPER_MAGIC: c_long = 0x0187; + pub const BPF_FS_MAGIC: c_long = 0xcafe4a11; + pub const BTRFS_SUPER_MAGIC: c_long = 0x9123683e; + pub const CGROUP2_SUPER_MAGIC: c_long = 0x63677270; + pub const CGROUP_SUPER_MAGIC: c_long = 0x27e0eb; + pub const CODA_SUPER_MAGIC: c_long = 0x73757245; + pub const CRAMFS_MAGIC: c_long = 0x28cd3d45; + pub const DEBUGFS_MAGIC: c_long = 0x64626720; + pub const DEVPTS_SUPER_MAGIC: c_long = 0x1cd1; + pub const ECRYPTFS_SUPER_MAGIC: c_long = 0xf15f; + pub const EFS_SUPER_MAGIC: c_long = 0x00414a53; + pub const EXT2_SUPER_MAGIC: c_long = 0x0000ef53; + pub const EXT3_SUPER_MAGIC: c_long = 0x0000ef53; + pub const EXT4_SUPER_MAGIC: c_long = 0x0000ef53; + pub const F2FS_SUPER_MAGIC: c_long = 0xf2f52010; + pub const FUSE_SUPER_MAGIC: c_long = 0x65735546; + pub const FUTEXFS_SUPER_MAGIC: c_long = 0xbad1dea; + pub const HOSTFS_SUPER_MAGIC: c_long = 0x00c0ffee; + pub const HPFS_SUPER_MAGIC: c_long = 0xf995e849; + pub const HUGETLBFS_MAGIC: c_long = 0x958458f6; + pub const ISOFS_SUPER_MAGIC: c_long = 0x00009660; + pub const JFFS2_SUPER_MAGIC: c_long = 0x000072b6; + pub const MINIX2_SUPER_MAGIC2: c_long = 0x00002478; + pub const MINIX2_SUPER_MAGIC: c_long = 0x00002468; + pub const MINIX3_SUPER_MAGIC: c_long = 0x4d5a; + pub const MINIX_SUPER_MAGIC2: c_long = 0x0000138f; + pub const MINIX_SUPER_MAGIC: c_long = 0x0000137f; + pub const MSDOS_SUPER_MAGIC: c_long = 0x00004d44; + pub const NCP_SUPER_MAGIC: c_long = 0x0000564c; + pub const NFS_SUPER_MAGIC: c_long = 0x00006969; + pub const NILFS_SUPER_MAGIC: c_long = 0x3434; + pub const OCFS2_SUPER_MAGIC: c_long = 0x7461636f; + pub const OPENPROM_SUPER_MAGIC: c_long = 0x00009fa1; + pub const OVERLAYFS_SUPER_MAGIC: c_long = 0x794c7630; + pub const PROC_SUPER_MAGIC: c_long = 0x00009fa0; + pub const QNX4_SUPER_MAGIC: c_long = 0x0000002f; + pub const QNX6_SUPER_MAGIC: c_long = 0x68191122; + pub const RDTGROUP_SUPER_MAGIC: c_long = 0x7655821; + pub const REISERFS_SUPER_MAGIC: c_long = 0x52654973; + pub const SECURITYFS_MAGIC: c_long = 0x73636673; + pub const SELINUX_MAGIC: c_long = 0xf97cff8c; + pub const SMACK_MAGIC: c_long = 0x43415d53; + pub const SMB_SUPER_MAGIC: c_long = 0x0000517b; + pub const SYSFS_MAGIC: c_long = 0x62656572; + pub const TMPFS_MAGIC: c_long = 0x01021994; + pub const TRACEFS_MAGIC: c_long = 0x74726163; + pub const UDF_SUPER_MAGIC: c_long = 0x15013346; + pub const USBDEVICE_SUPER_MAGIC: c_long = 0x00009fa2; + pub const XENFS_SUPER_MAGIC: c_long = 0xabba1974; + pub const NSFS_MAGIC: c_long = 0x6e736673; + } else if #[cfg(target_arch = "s390x")] { + pub const ADFS_SUPER_MAGIC: c_uint = 0x0000adf5; + pub const AFFS_SUPER_MAGIC: c_uint = 0x0000adff; + pub const AFS_SUPER_MAGIC: c_uint = 0x5346414f; + pub const AUTOFS_SUPER_MAGIC: c_uint = 0x0187; + pub const BPF_FS_MAGIC: c_uint = 0xcafe4a11; + pub const BTRFS_SUPER_MAGIC: c_uint = 0x9123683e; + pub const CGROUP2_SUPER_MAGIC: c_uint = 0x63677270; + pub const CGROUP_SUPER_MAGIC: c_uint = 0x27e0eb; + pub const CODA_SUPER_MAGIC: c_uint = 0x73757245; + pub const CRAMFS_MAGIC: c_uint = 0x28cd3d45; + pub const DEBUGFS_MAGIC: c_uint = 0x64626720; + pub const DEVPTS_SUPER_MAGIC: c_uint = 0x1cd1; + pub const ECRYPTFS_SUPER_MAGIC: c_uint = 0xf15f; + pub const EFS_SUPER_MAGIC: c_uint = 0x00414a53; + pub const EXT2_SUPER_MAGIC: c_uint = 0x0000ef53; + pub const EXT3_SUPER_MAGIC: c_uint = 0x0000ef53; + pub const EXT4_SUPER_MAGIC: c_uint = 0x0000ef53; + pub const F2FS_SUPER_MAGIC: c_uint = 0xf2f52010; + pub const FUSE_SUPER_MAGIC: c_uint = 0x65735546; + pub const FUTEXFS_SUPER_MAGIC: c_uint = 0xbad1dea; + pub const HOSTFS_SUPER_MAGIC: c_uint = 0x00c0ffee; + pub const HPFS_SUPER_MAGIC: c_uint = 0xf995e849; + pub const HUGETLBFS_MAGIC: c_uint = 0x958458f6; + pub const ISOFS_SUPER_MAGIC: c_uint = 0x00009660; + pub const JFFS2_SUPER_MAGIC: c_uint = 0x000072b6; + pub const MINIX2_SUPER_MAGIC2: c_uint = 0x00002478; + pub const MINIX2_SUPER_MAGIC: c_uint = 0x00002468; + pub const MINIX3_SUPER_MAGIC: c_uint = 0x4d5a; + pub const MINIX_SUPER_MAGIC2: c_uint = 0x0000138f; + pub const MINIX_SUPER_MAGIC: c_uint = 0x0000137f; + pub const MSDOS_SUPER_MAGIC: c_uint = 0x00004d44; + pub const NCP_SUPER_MAGIC: c_uint = 0x0000564c; + pub const NFS_SUPER_MAGIC: c_uint = 0x00006969; + pub const NILFS_SUPER_MAGIC: c_uint = 0x3434; + pub const OCFS2_SUPER_MAGIC: c_uint = 0x7461636f; + pub const OPENPROM_SUPER_MAGIC: c_uint = 0x00009fa1; + pub const OVERLAYFS_SUPER_MAGIC: c_uint = 0x794c7630; + pub const PROC_SUPER_MAGIC: c_uint = 0x00009fa0; + pub const QNX4_SUPER_MAGIC: c_uint = 0x0000002f; + pub const QNX6_SUPER_MAGIC: c_uint = 0x68191122; + pub const RDTGROUP_SUPER_MAGIC: c_uint = 0x7655821; + pub const REISERFS_SUPER_MAGIC: c_uint = 0x52654973; + pub const SECURITYFS_MAGIC: c_uint = 0x73636673; + pub const SELINUX_MAGIC: c_uint = 0xf97cff8c; + pub const SMACK_MAGIC: c_uint = 0x43415d53; + pub const SMB_SUPER_MAGIC: c_uint = 0x0000517b; + pub const SYSFS_MAGIC: c_uint = 0x62656572; + pub const TMPFS_MAGIC: c_uint = 0x01021994; + pub const TRACEFS_MAGIC: c_uint = 0x74726163; + pub const UDF_SUPER_MAGIC: c_uint = 0x15013346; + pub const USBDEVICE_SUPER_MAGIC: c_uint = 0x00009fa2; + pub const XENFS_SUPER_MAGIC: c_uint = 0xabba1974; + pub const NSFS_MAGIC: c_uint = 0x6e736673; + } +} + +cfg_if! { + if #[cfg(any( + target_env = "gnu", + target_os = "android", + all(target_env = "musl", musl_v1_2_3) + ))] { + pub const AT_STATX_SYNC_TYPE: c_int = 0x6000; + pub const AT_STATX_SYNC_AS_STAT: c_int = 0x0000; + pub const AT_STATX_FORCE_SYNC: c_int = 0x2000; + pub const AT_STATX_DONT_SYNC: c_int = 0x4000; + pub const STATX_TYPE: c_uint = 0x0001; + pub const STATX_MODE: c_uint = 0x0002; + pub const STATX_NLINK: c_uint = 0x0004; + pub const STATX_UID: c_uint = 0x0008; + pub const STATX_GID: c_uint = 0x0010; + pub const STATX_ATIME: c_uint = 0x0020; + pub const STATX_MTIME: c_uint = 0x0040; + pub const STATX_CTIME: c_uint = 0x0080; + pub const STATX_INO: c_uint = 0x0100; + pub const STATX_SIZE: c_uint = 0x0200; + pub const STATX_BLOCKS: c_uint = 0x0400; + pub const STATX_BASIC_STATS: c_uint = 0x07ff; + pub const STATX_BTIME: c_uint = 0x0800; + pub const STATX_ALL: c_uint = 0x0fff; + pub const STATX_MNT_ID: c_uint = 0x1000; + pub const STATX_DIOALIGN: c_uint = 0x2000; + pub const STATX__RESERVED: c_int = 0x80000000; + pub const STATX_ATTR_COMPRESSED: c_int = 0x0004; + pub const STATX_ATTR_IMMUTABLE: c_int = 0x0010; + pub const STATX_ATTR_APPEND: c_int = 0x0020; + pub const STATX_ATTR_NODUMP: c_int = 0x0040; + pub const STATX_ATTR_ENCRYPTED: c_int = 0x0800; + pub const STATX_ATTR_AUTOMOUNT: c_int = 0x1000; + pub const STATX_ATTR_MOUNT_ROOT: c_int = 0x2000; + pub const STATX_ATTR_VERITY: c_int = 0x100000; + pub const STATX_ATTR_DAX: c_int = 0x200000; + } +} + +// https://github.com/search?q=repo%3Atorvalds%2Flinux+%22%23define+_IOC_NONE%22&type=code +cfg_if! { + if #[cfg(not(target_os = "emscripten"))] { + const _IOC_NRBITS: u32 = 8; + const _IOC_TYPEBITS: u32 = 8; + + cfg_if! { + if #[cfg(any( + any(target_arch = "powerpc", target_arch = "powerpc64"), + any(target_arch = "sparc", target_arch = "sparc64"), + any(target_arch = "mips", target_arch = "mips64"), + ))] { + // https://github.com/torvalds/linux/blob/b311c1b497e51a628aa89e7cb954481e5f9dced2/arch/powerpc/include/uapi/asm/ioctl.h + // https://github.com/torvalds/linux/blob/b311c1b497e51a628aa89e7cb954481e5f9dced2/arch/sparc/include/uapi/asm/ioctl.h + // https://github.com/torvalds/linux/blob/b311c1b497e51a628aa89e7cb954481e5f9dced2/arch/mips/include/uapi/asm/ioctl.h + + const _IOC_SIZEBITS: u32 = 13; + const _IOC_DIRBITS: u32 = 3; + + const _IOC_NONE: u32 = 1; + const _IOC_READ: u32 = 2; + const _IOC_WRITE: u32 = 4; + } else { + // https://github.com/torvalds/linux/blob/b311c1b497e51a628aa89e7cb954481e5f9dced2/include/uapi/asm-generic/ioctl.h + + const _IOC_SIZEBITS: u32 = 14; + const _IOC_DIRBITS: u32 = 2; + + const _IOC_NONE: u32 = 0; + const _IOC_WRITE: u32 = 1; + const _IOC_READ: u32 = 2; + } + } + const _IOC_NRMASK: u32 = (1 << _IOC_NRBITS) - 1; + const _IOC_TYPEMASK: u32 = (1 << _IOC_TYPEBITS) - 1; + const _IOC_SIZEMASK: u32 = (1 << _IOC_SIZEBITS) - 1; + const _IOC_DIRMASK: u32 = (1 << _IOC_DIRBITS) - 1; + + const _IOC_NRSHIFT: u32 = 0; + const _IOC_TYPESHIFT: u32 = _IOC_NRSHIFT + _IOC_NRBITS; + const _IOC_SIZESHIFT: u32 = _IOC_TYPESHIFT + _IOC_TYPEBITS; + const _IOC_DIRSHIFT: u32 = _IOC_SIZESHIFT + _IOC_SIZEBITS; + + // adapted from https://github.com/torvalds/linux/blob/8a696a29c6905594e4abf78eaafcb62165ac61f1/rust/kernel/ioctl.rs + + /// Build an ioctl number, analogous to the C macro of the same name. + const fn _IOC(dir: u32, ty: u32, nr: u32, size: usize) -> Ioctl { + core::debug_assert!(dir <= _IOC_DIRMASK); + core::debug_assert!(ty <= _IOC_TYPEMASK); + core::debug_assert!(nr <= _IOC_NRMASK); + core::debug_assert!(size <= (_IOC_SIZEMASK as usize)); + + ((dir << _IOC_DIRSHIFT) + | (ty << _IOC_TYPESHIFT) + | (nr << _IOC_NRSHIFT) + | ((size as u32) << _IOC_SIZESHIFT)) as Ioctl + } + + /// Build an ioctl number for an argumentless ioctl. + pub const fn _IO(ty: u32, nr: u32) -> Ioctl { + _IOC(_IOC_NONE, ty, nr, 0) + } + + /// Build an ioctl number for an read-only ioctl. + pub const fn _IOR(ty: u32, nr: u32) -> Ioctl { + _IOC(_IOC_READ, ty, nr, size_of::()) + } + + /// Build an ioctl number for an write-only ioctl. + pub const fn _IOW(ty: u32, nr: u32) -> Ioctl { + _IOC(_IOC_WRITE, ty, nr, size_of::()) + } + + /// Build an ioctl number for a read-write ioctl. + pub const fn _IOWR(ty: u32, nr: u32) -> Ioctl { + _IOC(_IOC_READ | _IOC_WRITE, ty, nr, size_of::()) + } + + extern "C" { + #[cfg_attr(gnu_time_bits64, link_name = "__ioctl_time64")] + pub fn ioctl(fd: c_int, request: Ioctl, ...) -> c_int; + } + } +} + +const fn CMSG_ALIGN(len: usize) -> usize { + (len + size_of::() - 1) & !(size_of::() - 1) +} + +f! { + pub fn CMSG_FIRSTHDR(mhdr: *const crate::msghdr) -> *mut crate::cmsghdr { + if (*mhdr).msg_controllen as usize >= size_of::() { + (*mhdr).msg_control.cast::() + } else { + core::ptr::null_mut::() + } + } + + pub fn CMSG_DATA(cmsg: *const crate::cmsghdr) -> *mut c_uchar { + cmsg.offset(1) as *mut c_uchar + } + + pub const fn CMSG_SPACE(length: c_uint) -> c_uint { + (CMSG_ALIGN(length as usize) + CMSG_ALIGN(size_of::())) as c_uint + } + + pub const fn CMSG_LEN(length: c_uint) -> c_uint { + CMSG_ALIGN(size_of::()) as c_uint + length + } + + pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] &= !(1 << (fd % size)); + return; + } + + pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0; + } + + pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] |= 1 << (fd % size); + return; + } + + pub fn FD_ZERO(set: *mut fd_set) -> () { + for slot in &mut (*set).fds_bits { + *slot = 0; + } + } +} + +safe_f! { + pub fn SIGRTMAX() -> c_int { + unsafe { __libc_current_sigrtmax() } + } + + pub fn SIGRTMIN() -> c_int { + unsafe { __libc_current_sigrtmin() } + } + + pub const fn WIFSTOPPED(status: c_int) -> bool { + (status & 0xff) == 0x7f + } + + pub const fn WSTOPSIG(status: c_int) -> c_int { + (status >> 8) & 0xff + } + + pub const fn WIFCONTINUED(status: c_int) -> bool { + status == 0xffff + } + + pub const fn WIFSIGNALED(status: c_int) -> bool { + ((status & 0x7f) + 1) as i8 >= 2 + } + + pub const fn WTERMSIG(status: c_int) -> c_int { + status & 0x7f + } + + pub const fn WIFEXITED(status: c_int) -> bool { + (status & 0x7f) == 0 + } + + pub const fn WEXITSTATUS(status: c_int) -> c_int { + (status >> 8) & 0xff + } + + pub const fn WCOREDUMP(status: c_int) -> bool { + (status & 0x80) != 0 + } + + pub const fn W_EXITCODE(ret: c_int, sig: c_int) -> c_int { + (ret << 8) | sig + } + + pub const fn W_STOPCODE(sig: c_int) -> c_int { + (sig << 8) | 0x7f + } + + pub const fn QCMD(cmd: c_int, type_: c_int) -> c_int { + (cmd << 8) | (type_ & 0x00ff) + } + + pub const fn IPOPT_COPIED(o: u8) -> u8 { + o & IPOPT_COPY + } + + pub const fn IPOPT_CLASS(o: u8) -> u8 { + o & IPOPT_CLASS_MASK + } + + pub const fn IPOPT_NUMBER(o: u8) -> u8 { + o & IPOPT_NUMBER_MASK + } + + pub const fn IPTOS_ECN(x: u8) -> u8 { + x & crate::IPTOS_ECN_MASK + } + + #[allow(ellipsis_inclusive_range_patterns)] + pub const fn KERNEL_VERSION(a: u32, b: u32, c: u32) -> u32 { + ((a << 16) + (b << 8)) + if c > 255 { 255 } else { c } + } +} + +extern "C" { + #[doc(hidden)] + pub fn __libc_current_sigrtmax() -> c_int; + #[doc(hidden)] + pub fn __libc_current_sigrtmin() -> c_int; + + pub fn sem_destroy(sem: *mut sem_t) -> c_int; + pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; + pub fn fdatasync(fd: c_int) -> c_int; + pub fn mincore(addr: *mut c_void, len: size_t, vec: *mut c_uchar) -> c_int; + + #[cfg_attr(gnu_time_bits64, link_name = "__clock_getres64")] + pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__clock_gettime64")] + pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__clock_settime64")] + pub fn clock_settime(clk_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; + pub fn clock_getcpuclockid(pid: crate::pid_t, clk_id: *mut crate::clockid_t) -> c_int; + + pub fn dirfd(dirp: *mut crate::DIR) -> c_int; + + pub fn pthread_getattr_np(native: crate::pthread_t, attr: *mut crate::pthread_attr_t) -> c_int; + pub fn pthread_attr_getstack( + attr: *const crate::pthread_attr_t, + stackaddr: *mut *mut c_void, + stacksize: *mut size_t, + ) -> c_int; + pub fn pthread_attr_setstack( + attr: *mut crate::pthread_attr_t, + stackaddr: *mut c_void, + stacksize: size_t, + ) -> c_int; + pub fn memalign(align: size_t, size: size_t) -> *mut c_void; + pub fn setgroups(ngroups: size_t, ptr: *const crate::gid_t) -> c_int; + pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "statfs64")] + pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "fstatfs64")] + pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; + pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; + #[cfg_attr(gnu_file_offset_bits64, link_name = "posix_fadvise64")] + pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__futimens64")] + pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__utimensat64")] + pub fn utimensat( + dirfd: c_int, + path: *const c_char, + times: *const crate::timespec, + flag: c_int, + ) -> c_int; + pub fn duplocale(base: crate::locale_t) -> crate::locale_t; + pub fn freelocale(loc: crate::locale_t); + pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; + pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; + pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; + pub fn pthread_condattr_getclock( + attr: *const pthread_condattr_t, + clock_id: *mut clockid_t, + ) -> c_int; + pub fn pthread_condattr_setclock( + attr: *mut pthread_condattr_t, + clock_id: crate::clockid_t, + ) -> c_int; + pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: c_int) -> c_int; + pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; + pub fn pthread_rwlockattr_getpshared( + attr: *const pthread_rwlockattr_t, + val: *mut c_int, + ) -> c_int; + pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, val: c_int) -> c_int; + pub fn ptsname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + pub fn clearenv() -> c_int; + pub fn waitid( + idtype: idtype_t, + id: id_t, + infop: *mut crate::siginfo_t, + options: c_int, + ) -> c_int; + pub fn getresuid( + ruid: *mut crate::uid_t, + euid: *mut crate::uid_t, + suid: *mut crate::uid_t, + ) -> c_int; + pub fn getresgid( + rgid: *mut crate::gid_t, + egid: *mut crate::gid_t, + sgid: *mut crate::gid_t, + ) -> c_int; + pub fn acct(filename: *const c_char) -> c_int; + pub fn brk(addr: *mut c_void) -> c_int; + pub fn sbrk(increment: intptr_t) -> *mut c_void; + #[deprecated( + since = "0.2.66", + note = "causes memory corruption, see rust-lang/libc#1596" + )] + pub fn vfork() -> crate::pid_t; + pub fn setresgid(rgid: crate::gid_t, egid: crate::gid_t, sgid: crate::gid_t) -> c_int; + pub fn setresuid(ruid: crate::uid_t, euid: crate::uid_t, suid: crate::uid_t) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__wait4_time64")] + pub fn wait4( + pid: crate::pid_t, + status: *mut c_int, + options: c_int, + rusage: *mut crate::rusage, + ) -> crate::pid_t; + pub fn login_tty(fd: c_int) -> c_int; + + // DIFF(main): changed to `*const *mut` in e77f551de9 + pub fn execvpe( + file: *const c_char, + argv: *const *const c_char, + envp: *const *const c_char, + ) -> c_int; + pub fn fexecve(fd: c_int, argv: *const *const c_char, envp: *const *const c_char) -> c_int; + + pub fn getifaddrs(ifap: *mut *mut crate::ifaddrs) -> c_int; + pub fn freeifaddrs(ifa: *mut crate::ifaddrs); + pub fn bind( + socket: c_int, + address: *const crate::sockaddr, + address_len: crate::socklen_t, + ) -> c_int; + + pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + + #[cfg_attr(gnu_time_bits64, link_name = "__sendmsg64")] + pub fn sendmsg(fd: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; + #[cfg_attr(gnu_time_bits64, link_name = "__recvmsg64")] + pub fn recvmsg(fd: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; + pub fn uname(buf: *mut crate::utsname) -> c_int; + + pub fn strchrnul(s: *const c_char, c: c_int) -> *mut c_char; + + pub fn strftime( + s: *mut c_char, + max: size_t, + format: *const c_char, + tm: *const crate::tm, + ) -> size_t; + pub fn strftime_l( + s: *mut c_char, + max: size_t, + format: *const c_char, + tm: *const crate::tm, + locale: crate::locale_t, + ) -> size_t; + pub fn strptime(s: *const c_char, format: *const c_char, tm: *mut crate::tm) -> *mut c_char; + + #[cfg_attr(gnu_file_offset_bits64, link_name = "mkostemp64")] + pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "mkostemps64")] + pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; + + pub fn getdomainname(name: *mut c_char, len: size_t) -> c_int; + pub fn setdomainname(name: *const c_char, len: size_t) -> c_int; +} + +// LFS64 extensions +// +// * musl and Emscripten has 64-bit versions only so aliases the LFS64 symbols to the standard ones +// * ulibc doesn't have preadv64/pwritev64 +cfg_if! { + if #[cfg(not(any(target_env = "musl", target_os = "emscripten")))] { + extern "C" { + pub fn fstatfs64(fd: c_int, buf: *mut statfs64) -> c_int; + pub fn statvfs64(path: *const c_char, buf: *mut statvfs64) -> c_int; + pub fn fstatvfs64(fd: c_int, buf: *mut statvfs64) -> c_int; + pub fn statfs64(path: *const c_char, buf: *mut statfs64) -> c_int; + pub fn creat64(path: *const c_char, mode: mode_t) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__fstat64_time64")] + pub fn fstat64(fildes: c_int, buf: *mut stat64) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__fstatat64_time64")] + pub fn fstatat64( + dirfd: c_int, + pathname: *const c_char, + buf: *mut stat64, + flags: c_int, + ) -> c_int; + pub fn ftruncate64(fd: c_int, length: off64_t) -> c_int; + pub fn lseek64(fd: c_int, offset: off64_t, whence: c_int) -> off64_t; + #[cfg_attr(gnu_time_bits64, link_name = "__lstat64_time64")] + pub fn lstat64(path: *const c_char, buf: *mut stat64) -> c_int; + pub fn mmap64( + addr: *mut c_void, + len: size_t, + prot: c_int, + flags: c_int, + fd: c_int, + offset: off64_t, + ) -> *mut c_void; + pub fn open64(path: *const c_char, oflag: c_int, ...) -> c_int; + pub fn openat64(fd: c_int, path: *const c_char, oflag: c_int, ...) -> c_int; + pub fn posix_fadvise64( + fd: c_int, + offset: off64_t, + len: off64_t, + advise: c_int, + ) -> c_int; + pub fn pread64(fd: c_int, buf: *mut c_void, count: size_t, offset: off64_t) -> ssize_t; + pub fn pwrite64( + fd: c_int, + buf: *const c_void, + count: size_t, + offset: off64_t, + ) -> ssize_t; + pub fn readdir64(dirp: *mut crate::DIR) -> *mut crate::dirent64; + pub fn readdir64_r( + dirp: *mut crate::DIR, + entry: *mut crate::dirent64, + result: *mut *mut crate::dirent64, + ) -> c_int; + #[cfg_attr(gnu_time_bits64, link_name = "__stat64_time64")] + pub fn stat64(path: *const c_char, buf: *mut stat64) -> c_int; + pub fn truncate64(path: *const c_char, length: off64_t) -> c_int; + } + } +} + +cfg_if! { + if #[cfg(not(any( + target_env = "uclibc", + target_env = "musl", + target_os = "emscripten" + )))] { + extern "C" { + pub fn preadv64( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: off64_t, + ) -> ssize_t; + pub fn pwritev64( + fd: c_int, + iov: *const crate::iovec, + iovcnt: c_int, + offset: off64_t, + ) -> ssize_t; + } + } +} + +cfg_if! { + if #[cfg(not(target_env = "uclibc"))] { + extern "C" { + // uclibc has separate non-const version of this function + pub fn forkpty( + amaster: *mut c_int, + name: *mut c_char, + termp: *const termios, + winp: *const crate::winsize, + ) -> crate::pid_t; + // uclibc has separate non-const version of this function + pub fn openpty( + amaster: *mut c_int, + aslave: *mut c_int, + name: *mut c_char, + termp: *const termios, + winp: *const crate::winsize, + ) -> c_int; + } + } +} + +// The statx syscall, available on some libcs. +cfg_if! { + if #[cfg(any( + target_env = "gnu", + target_os = "android", + all(target_env = "musl", musl_v1_2_3) + ))] { + extern "C" { + pub fn statx( + dirfd: c_int, + pathname: *const c_char, + flags: c_int, + mask: c_uint, + statxbuf: *mut statx, + ) -> c_int; + } + } +} + +cfg_if! { + if #[cfg(target_os = "emscripten")] { + mod emscripten; + pub use self::emscripten::*; + } else if #[cfg(target_os = "linux")] { + mod linux; + pub use self::linux::*; + } else if #[cfg(target_os = "l4re")] { + mod linux; + pub use self::linux::*; + } else if #[cfg(target_os = "android")] { + mod android; + pub use self::android::*; + } else { + // Unknown target_os + } +} diff --git a/vendor/libc/src/unix/mod.rs b/vendor/libc/src/unix/mod.rs new file mode 100644 index 00000000000000..6ba5d87de7ca09 --- /dev/null +++ b/vendor/libc/src/unix/mod.rs @@ -0,0 +1,1901 @@ +//! Definitions found commonly among almost all Unix derivatives +//! +//! More functions and definitions can be found in the more specific modules +//! according to the platform in question. + +use crate::prelude::*; + +pub type intmax_t = i64; +pub type uintmax_t = u64; + +pub type size_t = usize; +pub type ptrdiff_t = isize; +pub type intptr_t = isize; +pub type uintptr_t = usize; +pub type ssize_t = isize; + +pub type pid_t = i32; +pub type in_addr_t = u32; +pub type in_port_t = u16; +pub type sighandler_t = size_t; +pub type cc_t = c_uchar; + +cfg_if! { + if #[cfg(any( + target_os = "espidf", + target_os = "horizon", + target_os = "vita" + ))] { + pub type uid_t = c_ushort; + pub type gid_t = c_ushort; + } else if #[cfg(target_os = "nto")] { + pub type uid_t = i32; + pub type gid_t = i32; + } else { + pub type uid_t = u32; + pub type gid_t = u32; + } +} + +missing! { + #[derive(Debug)] + pub enum DIR {} +} +pub type locale_t = *mut c_void; + +s! { + pub struct group { + pub gr_name: *mut c_char, + pub gr_passwd: *mut c_char, + pub gr_gid: crate::gid_t, + pub gr_mem: *mut *mut c_char, + } + + pub struct utimbuf { + pub actime: time_t, + pub modtime: time_t, + } + + pub struct timeval { + pub tv_sec: time_t, + #[cfg(not(gnu_time_bits64))] + pub tv_usec: suseconds_t, + // For 64 bit time on 32 bit linux glibc, suseconds_t is still + // a 32 bit type. Use __suseconds64_t instead + #[cfg(gnu_time_bits64)] + pub tv_usec: __suseconds64_t, + } + + // linux x32 compatibility + // See https://sourceware.org/bugzilla/show_bug.cgi?id=16437 + #[cfg(not(target_env = "gnu"))] + pub struct timespec { + pub tv_sec: time_t, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + pub tv_nsec: i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + pub tv_nsec: c_long, + } + + pub struct rlimit { + pub rlim_cur: rlim_t, + pub rlim_max: rlim_t, + } + + pub struct rusage { + pub ru_utime: timeval, + pub ru_stime: timeval, + pub ru_maxrss: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad1: u32, + pub ru_ixrss: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad2: u32, + pub ru_idrss: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad3: u32, + pub ru_isrss: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad4: u32, + pub ru_minflt: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad5: u32, + pub ru_majflt: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad6: u32, + pub ru_nswap: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad7: u32, + pub ru_inblock: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad8: u32, + pub ru_oublock: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad9: u32, + pub ru_msgsnd: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad10: u32, + pub ru_msgrcv: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad11: u32, + pub ru_nsignals: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad12: u32, + pub ru_nvcsw: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad13: u32, + pub ru_nivcsw: c_long, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + __pad14: u32, + + #[cfg(any(target_env = "musl", target_env = "ohos", target_os = "emscripten"))] + __reserved: [c_long; 16], + } + + pub struct ipv6_mreq { + pub ipv6mr_multiaddr: in6_addr, + #[cfg(target_os = "android")] + pub ipv6mr_interface: c_int, + #[cfg(not(target_os = "android"))] + pub ipv6mr_interface: c_uint, + } + + #[cfg(not(target_os = "cygwin"))] + pub struct hostent { + pub h_name: *mut c_char, + pub h_aliases: *mut *mut c_char, + pub h_addrtype: c_int, + pub h_length: c_int, + pub h_addr_list: *mut *mut c_char, + } + + pub struct iovec { + pub iov_base: *mut c_void, + pub iov_len: size_t, + } + + pub struct pollfd { + pub fd: c_int, + pub events: c_short, + pub revents: c_short, + } + + pub struct winsize { + pub ws_row: c_ushort, + pub ws_col: c_ushort, + pub ws_xpixel: c_ushort, + pub ws_ypixel: c_ushort, + } + + #[cfg(not(target_os = "cygwin"))] + pub struct linger { + pub l_onoff: c_int, + pub l_linger: c_int, + } + + pub struct sigval { + // Actually a union of an int and a void* + pub sival_ptr: *mut c_void, + } + + // + pub struct itimerval { + pub it_interval: crate::timeval, + pub it_value: crate::timeval, + } + + // + pub struct tms { + pub tms_utime: crate::clock_t, + pub tms_stime: crate::clock_t, + pub tms_cutime: crate::clock_t, + pub tms_cstime: crate::clock_t, + } + + pub struct servent { + pub s_name: *mut c_char, + pub s_aliases: *mut *mut c_char, + #[cfg(target_os = "cygwin")] + pub s_port: c_short, + #[cfg(not(target_os = "cygwin"))] + pub s_port: c_int, + pub s_proto: *mut c_char, + } + + pub struct protoent { + pub p_name: *mut c_char, + pub p_aliases: *mut *mut c_char, + #[cfg(not(target_os = "cygwin"))] + pub p_proto: c_int, + #[cfg(target_os = "cygwin")] + pub p_proto: c_short, + } + + #[repr(align(4))] + pub struct in6_addr { + pub s6_addr: [u8; 16], + } +} + +pub const INT_MIN: c_int = -2147483648; +pub const INT_MAX: c_int = 2147483647; + +pub const SIG_DFL: sighandler_t = 0 as sighandler_t; +pub const SIG_IGN: sighandler_t = 1 as sighandler_t; +pub const SIG_ERR: sighandler_t = !0 as sighandler_t; + +cfg_if! { + if #[cfg(all(not(target_os = "nto"), not(target_os = "aix")))] { + pub const DT_UNKNOWN: u8 = 0; + pub const DT_FIFO: u8 = 1; + pub const DT_CHR: u8 = 2; + pub const DT_DIR: u8 = 4; + pub const DT_BLK: u8 = 6; + pub const DT_REG: u8 = 8; + pub const DT_LNK: u8 = 10; + pub const DT_SOCK: u8 = 12; + } +} +cfg_if! { + if #[cfg(not(target_os = "redox"))] { + pub const FD_CLOEXEC: c_int = 0x1; + } +} + +cfg_if! { + if #[cfg(not(target_os = "nto"))] { + pub const USRQUOTA: c_int = 0; + pub const GRPQUOTA: c_int = 1; + } +} +pub const SIGIOT: c_int = 6; + +pub const S_ISUID: mode_t = 0o4000; +pub const S_ISGID: mode_t = 0o2000; +pub const S_ISVTX: mode_t = 0o1000; + +cfg_if! { + if #[cfg(not(any( + target_os = "haiku", + target_os = "illumos", + target_os = "solaris", + target_os = "cygwin" + )))] { + pub const IF_NAMESIZE: size_t = 16; + pub const IFNAMSIZ: size_t = IF_NAMESIZE; + } +} + +pub const LOG_EMERG: c_int = 0; +pub const LOG_ALERT: c_int = 1; +pub const LOG_CRIT: c_int = 2; +pub const LOG_ERR: c_int = 3; +pub const LOG_WARNING: c_int = 4; +pub const LOG_NOTICE: c_int = 5; +pub const LOG_INFO: c_int = 6; +pub const LOG_DEBUG: c_int = 7; + +pub const LOG_KERN: c_int = 0; +pub const LOG_USER: c_int = 1 << 3; +pub const LOG_MAIL: c_int = 2 << 3; +pub const LOG_DAEMON: c_int = 3 << 3; +pub const LOG_AUTH: c_int = 4 << 3; +pub const LOG_SYSLOG: c_int = 5 << 3; +pub const LOG_LPR: c_int = 6 << 3; +pub const LOG_NEWS: c_int = 7 << 3; +pub const LOG_UUCP: c_int = 8 << 3; +pub const LOG_LOCAL0: c_int = 16 << 3; +pub const LOG_LOCAL1: c_int = 17 << 3; +pub const LOG_LOCAL2: c_int = 18 << 3; +pub const LOG_LOCAL3: c_int = 19 << 3; +pub const LOG_LOCAL4: c_int = 20 << 3; +pub const LOG_LOCAL5: c_int = 21 << 3; +pub const LOG_LOCAL6: c_int = 22 << 3; +pub const LOG_LOCAL7: c_int = 23 << 3; + +cfg_if! { + if #[cfg(not(target_os = "haiku"))] { + pub const LOG_PID: c_int = 0x01; + pub const LOG_CONS: c_int = 0x02; + pub const LOG_ODELAY: c_int = 0x04; + pub const LOG_NDELAY: c_int = 0x08; + pub const LOG_NOWAIT: c_int = 0x10; + } +} +pub const LOG_PRIMASK: c_int = 7; +pub const LOG_FACMASK: c_int = 0x3f8; + +cfg_if! { + if #[cfg(not(target_os = "nto"))] { + pub const PRIO_MIN: c_int = -20; + pub const PRIO_MAX: c_int = 20; + } +} +pub const IPPROTO_ICMP: c_int = 1; +pub const IPPROTO_ICMPV6: c_int = 58; +pub const IPPROTO_TCP: c_int = 6; +pub const IPPROTO_UDP: c_int = 17; +pub const IPPROTO_IP: c_int = 0; +pub const IPPROTO_IPV6: c_int = 41; + +pub const INADDR_LOOPBACK: in_addr_t = 2130706433; +pub const INADDR_ANY: in_addr_t = 0; +pub const INADDR_BROADCAST: in_addr_t = 4294967295; +pub const INADDR_NONE: in_addr_t = 4294967295; + +pub const IN6ADDR_LOOPBACK_INIT: in6_addr = in6_addr { + s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], +}; + +pub const IN6ADDR_ANY_INIT: in6_addr = in6_addr { + s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], +}; + +pub const ARPOP_REQUEST: u16 = 1; +pub const ARPOP_REPLY: u16 = 2; + +pub const ATF_COM: c_int = 0x02; +pub const ATF_PERM: c_int = 0x04; +pub const ATF_PUBL: c_int = 0x08; +pub const ATF_USETRAILERS: c_int = 0x10; + +cfg_if! { + if #[cfg(any(target_os = "nto", target_os = "aix"))] { + pub const FNM_PERIOD: c_int = 1 << 1; + } else { + pub const FNM_PERIOD: c_int = 1 << 2; + } +} +pub const FNM_NOMATCH: c_int = 1; + +cfg_if! { + if #[cfg(any(target_os = "illumos", target_os = "solaris",))] { + pub const FNM_CASEFOLD: c_int = 1 << 3; + } else if #[cfg(not(target_os = "aix"))] { + pub const FNM_CASEFOLD: c_int = 1 << 4; + } +} + +cfg_if! { + if #[cfg(any( + target_os = "macos", + target_os = "freebsd", + target_os = "android", + target_os = "openbsd", + target_os = "cygwin", + ))] { + pub const FNM_PATHNAME: c_int = 1 << 1; + } else { + pub const FNM_PATHNAME: c_int = 1 << 0; + } +} + +cfg_if! { + if #[cfg(any( + target_os = "macos", + target_os = "freebsd", + target_os = "android", + target_os = "openbsd", + ))] { + pub const FNM_NOESCAPE: c_int = 1 << 0; + } else if #[cfg(target_os = "nto")] { + pub const FNM_NOESCAPE: c_int = 1 << 2; + } else if #[cfg(target_os = "aix")] { + pub const FNM_NOESCAPE: c_int = 1 << 3; + } else { + pub const FNM_NOESCAPE: c_int = 1 << 1; + } +} + +extern "C" { + pub static in6addr_loopback: in6_addr; + pub static in6addr_any: in6_addr; +} + +cfg_if! { + if #[cfg(any( + target_os = "l4re", + target_os = "espidf", + target_os = "nuttx" + ))] { + // required libraries are linked externally for these platforms: + // * L4Re + // * ESP-IDF + // * NuttX + } else if #[cfg(feature = "std")] { + // cargo build, don't pull in anything extra as the std dep + // already pulls in all libs. + } else if #[cfg(all( + any( + all( + target_os = "linux", + any(target_env = "gnu", target_env = "uclibc") + ), + target_os = "cygwin" + ), + feature = "rustc-dep-of-std" + ))] { + #[link( + name = "util", + kind = "static", + modifiers = "-bundle", + cfg(target_feature = "crt-static") + )] + #[link( + name = "rt", + kind = "static", + modifiers = "-bundle", + cfg(target_feature = "crt-static") + )] + #[link( + name = "pthread", + kind = "static", + modifiers = "-bundle", + cfg(target_feature = "crt-static") + )] + #[link( + name = "m", + kind = "static", + modifiers = "-bundle", + cfg(target_feature = "crt-static") + )] + #[link( + name = "dl", + kind = "static", + modifiers = "-bundle", + cfg(target_feature = "crt-static") + )] + #[link( + name = "c", + kind = "static", + modifiers = "-bundle", + cfg(target_feature = "crt-static") + )] + #[link( + name = "gcc_eh", + kind = "static", + modifiers = "-bundle", + cfg(target_feature = "crt-static") + )] + #[link( + name = "gcc", + kind = "static", + modifiers = "-bundle", + cfg(target_feature = "crt-static") + )] + #[link( + name = "c", + kind = "static", + modifiers = "-bundle", + cfg(target_feature = "crt-static") + )] + #[link(name = "util", cfg(not(target_feature = "crt-static")))] + #[link(name = "rt", cfg(not(target_feature = "crt-static")))] + #[link(name = "pthread", cfg(not(target_feature = "crt-static")))] + #[link(name = "m", cfg(not(target_feature = "crt-static")))] + #[link(name = "dl", cfg(not(target_feature = "crt-static")))] + #[link(name = "c", cfg(not(target_feature = "crt-static")))] + extern "C" {} + } else if #[cfg(any(target_env = "musl", target_env = "ohos"))] { + #[cfg_attr( + feature = "rustc-dep-of-std", + link( + name = "c", + kind = "static", + modifiers = "-bundle", + cfg(target_feature = "crt-static") + ) + )] + #[cfg_attr( + feature = "rustc-dep-of-std", + link(name = "c", cfg(not(target_feature = "crt-static"))) + )] + extern "C" {} + } else if #[cfg(target_os = "emscripten")] { + // Don't pass -lc to Emscripten, it breaks. See: + // https://github.com/emscripten-core/emscripten/issues/22758 + } else if #[cfg(all(target_os = "android", feature = "rustc-dep-of-std"))] { + #[link( + name = "c", + kind = "static", + modifiers = "-bundle", + cfg(target_feature = "crt-static") + )] + #[link( + name = "m", + kind = "static", + modifiers = "-bundle", + cfg(target_feature = "crt-static") + )] + #[link(name = "m", cfg(not(target_feature = "crt-static")))] + #[link(name = "c", cfg(not(target_feature = "crt-static")))] + extern "C" {} + } else if #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "tvos", + target_os = "watchos", + target_os = "visionos", + target_os = "android", + target_os = "openbsd", + target_os = "nto", + ))] { + #[link(name = "c")] + #[link(name = "m")] + extern "C" {} + } else if #[cfg(target_os = "haiku")] { + #[link(name = "root")] + #[link(name = "network")] + extern "C" {} + } else if #[cfg(target_env = "newlib")] { + #[link(name = "c")] + #[link(name = "m")] + extern "C" {} + } else if #[cfg(target_env = "illumos")] { + #[link(name = "c")] + #[link(name = "m")] + extern "C" {} + } else if #[cfg(target_os = "redox")] { + #[cfg_attr( + feature = "rustc-dep-of-std", + link( + name = "c", + kind = "static", + modifiers = "-bundle", + cfg(target_feature = "crt-static") + ) + )] + #[cfg_attr( + feature = "rustc-dep-of-std", + link(name = "c", cfg(not(target_feature = "crt-static"))) + )] + extern "C" {} + } else if #[cfg(target_os = "aix")] { + #[link(name = "c")] + #[link(name = "m")] + #[link(name = "bsd")] + #[link(name = "pthread")] + extern "C" {} + } else { + #[link(name = "c")] + #[link(name = "m")] + #[link(name = "rt")] + #[link(name = "pthread")] + extern "C" {} + } +} + +cfg_if! { + if #[cfg(not(all(target_os = "linux", target_env = "gnu")))] { + missing! { + #[derive(Debug)] + pub enum fpos_t {} // FIXME(unix): fill this out with a struct + } + } +} + +missing! { + #[derive(Debug)] + pub enum FILE {} +} + +extern "C" { + pub fn isalnum(c: c_int) -> c_int; + pub fn isalpha(c: c_int) -> c_int; + pub fn iscntrl(c: c_int) -> c_int; + pub fn isdigit(c: c_int) -> c_int; + pub fn isgraph(c: c_int) -> c_int; + pub fn islower(c: c_int) -> c_int; + pub fn isprint(c: c_int) -> c_int; + pub fn ispunct(c: c_int) -> c_int; + pub fn isspace(c: c_int) -> c_int; + pub fn isupper(c: c_int) -> c_int; + pub fn isxdigit(c: c_int) -> c_int; + pub fn isblank(c: c_int) -> c_int; + pub fn tolower(c: c_int) -> c_int; + pub fn toupper(c: c_int) -> c_int; + pub fn qsort( + base: *mut c_void, + num: size_t, + size: size_t, + compar: Option c_int>, + ); + pub fn bsearch( + key: *const c_void, + base: *const c_void, + num: size_t, + size: size_t, + compar: Option c_int>, + ) -> *mut c_void; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "fopen$UNIX2003" + )] + #[cfg_attr(gnu_file_offset_bits64, link_name = "fopen64")] + pub fn fopen(filename: *const c_char, mode: *const c_char) -> *mut FILE; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "freopen$UNIX2003" + )] + #[cfg_attr(gnu_file_offset_bits64, link_name = "freopen64")] + pub fn freopen(filename: *const c_char, mode: *const c_char, file: *mut FILE) -> *mut FILE; + + pub fn fflush(file: *mut FILE) -> c_int; + pub fn fclose(file: *mut FILE) -> c_int; + pub fn remove(filename: *const c_char) -> c_int; + pub fn rename(oldname: *const c_char, newname: *const c_char) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "tmpfile64")] + pub fn tmpfile() -> *mut FILE; + pub fn setvbuf(stream: *mut FILE, buffer: *mut c_char, mode: c_int, size: size_t) -> c_int; + pub fn setbuf(stream: *mut FILE, buf: *mut c_char); + pub fn getchar() -> c_int; + pub fn putchar(c: c_int) -> c_int; + pub fn fgetc(stream: *mut FILE) -> c_int; + pub fn fgets(buf: *mut c_char, n: c_int, stream: *mut FILE) -> *mut c_char; + pub fn fputc(c: c_int, stream: *mut FILE) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "fputs$UNIX2003" + )] + pub fn fputs(s: *const c_char, stream: *mut FILE) -> c_int; + pub fn puts(s: *const c_char) -> c_int; + pub fn ungetc(c: c_int, stream: *mut FILE) -> c_int; + pub fn fread(ptr: *mut c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "fwrite$UNIX2003" + )] + pub fn fwrite(ptr: *const c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; + pub fn fseek(stream: *mut FILE, offset: c_long, whence: c_int) -> c_int; + pub fn ftell(stream: *mut FILE) -> c_long; + pub fn rewind(stream: *mut FILE); + #[cfg_attr(target_os = "netbsd", link_name = "__fgetpos50")] + #[cfg_attr(gnu_file_offset_bits64, link_name = "fgetpos64")] + pub fn fgetpos(stream: *mut FILE, ptr: *mut fpos_t) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__fsetpos50")] + #[cfg_attr(gnu_file_offset_bits64, link_name = "fsetpos64")] + pub fn fsetpos(stream: *mut FILE, ptr: *const fpos_t) -> c_int; + pub fn feof(stream: *mut FILE) -> c_int; + pub fn ferror(stream: *mut FILE) -> c_int; + pub fn clearerr(stream: *mut FILE); + pub fn perror(s: *const c_char); + pub fn atof(s: *const c_char) -> c_double; + pub fn atoi(s: *const c_char) -> c_int; + pub fn atol(s: *const c_char) -> c_long; + pub fn atoll(s: *const c_char) -> c_longlong; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "strtod$UNIX2003" + )] + pub fn strtod(s: *const c_char, endp: *mut *mut c_char) -> c_double; + pub fn strtof(s: *const c_char, endp: *mut *mut c_char) -> c_float; + pub fn strtol(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_long; + pub fn strtoll(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_longlong; + pub fn strtoul(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulong; + pub fn strtoull(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulonglong; + #[cfg_attr(target_os = "aix", link_name = "vec_calloc")] + pub fn calloc(nobj: size_t, size: size_t) -> *mut c_void; + #[cfg_attr(target_os = "aix", link_name = "vec_malloc")] + pub fn malloc(size: size_t) -> *mut c_void; + pub fn realloc(p: *mut c_void, size: size_t) -> *mut c_void; + pub fn free(p: *mut c_void); + pub fn abort() -> !; + pub fn exit(status: c_int) -> !; + pub fn _exit(status: c_int) -> !; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "system$UNIX2003" + )] + pub fn system(s: *const c_char) -> c_int; + pub fn getenv(s: *const c_char) -> *mut c_char; + + pub fn strcpy(dst: *mut c_char, src: *const c_char) -> *mut c_char; + pub fn strncpy(dst: *mut c_char, src: *const c_char, n: size_t) -> *mut c_char; + pub fn stpcpy(dst: *mut c_char, src: *const c_char) -> *mut c_char; + pub fn strcat(s: *mut c_char, ct: *const c_char) -> *mut c_char; + pub fn strncat(s: *mut c_char, ct: *const c_char, n: size_t) -> *mut c_char; + pub fn strcmp(cs: *const c_char, ct: *const c_char) -> c_int; + pub fn strncmp(cs: *const c_char, ct: *const c_char, n: size_t) -> c_int; + pub fn strcoll(cs: *const c_char, ct: *const c_char) -> c_int; + pub fn strchr(cs: *const c_char, c: c_int) -> *mut c_char; + pub fn strrchr(cs: *const c_char, c: c_int) -> *mut c_char; + pub fn strspn(cs: *const c_char, ct: *const c_char) -> size_t; + pub fn strcspn(cs: *const c_char, ct: *const c_char) -> size_t; + pub fn strdup(cs: *const c_char) -> *mut c_char; + pub fn strndup(cs: *const c_char, n: size_t) -> *mut c_char; + pub fn strpbrk(cs: *const c_char, ct: *const c_char) -> *mut c_char; + pub fn strstr(cs: *const c_char, ct: *const c_char) -> *mut c_char; + pub fn strcasecmp(s1: *const c_char, s2: *const c_char) -> c_int; + pub fn strncasecmp(s1: *const c_char, s2: *const c_char, n: size_t) -> c_int; + pub fn strlen(cs: *const c_char) -> size_t; + pub fn strnlen(cs: *const c_char, maxlen: size_t) -> size_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "strerror$UNIX2003" + )] + pub fn strerror(n: c_int) -> *mut c_char; + pub fn strtok(s: *mut c_char, t: *const c_char) -> *mut c_char; + pub fn strtok_r(s: *mut c_char, t: *const c_char, p: *mut *mut c_char) -> *mut c_char; + pub fn strxfrm(s: *mut c_char, ct: *const c_char, n: size_t) -> size_t; + pub fn strsignal(sig: c_int) -> *mut c_char; + pub fn wcslen(buf: *const wchar_t) -> size_t; + pub fn wcstombs(dest: *mut c_char, src: *const wchar_t, n: size_t) -> size_t; + + pub fn memchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; + pub fn wmemchr(cx: *const wchar_t, c: wchar_t, n: size_t) -> *mut wchar_t; + pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; + pub fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; + pub fn memmove(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; + pub fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void; + pub fn memccpy(dest: *mut c_void, src: *const c_void, c: c_int, n: size_t) -> *mut c_void; +} + +extern "C" { + #[cfg_attr(target_os = "netbsd", link_name = "__getpwnam50")] + pub fn getpwnam(name: *const c_char) -> *mut passwd; + #[cfg_attr(target_os = "netbsd", link_name = "__getpwuid50")] + pub fn getpwuid(uid: crate::uid_t) -> *mut passwd; + + pub fn fprintf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; + pub fn printf(format: *const c_char, ...) -> c_int; + pub fn snprintf(s: *mut c_char, n: size_t, format: *const c_char, ...) -> c_int; + pub fn sprintf(s: *mut c_char, format: *const c_char, ...) -> c_int; + #[cfg_attr( + all(target_os = "linux", not(target_env = "uclibc")), + link_name = "__isoc99_fscanf" + )] + pub fn fscanf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; + #[cfg_attr( + all(target_os = "linux", not(target_env = "uclibc")), + link_name = "__isoc99_scanf" + )] + pub fn scanf(format: *const c_char, ...) -> c_int; + #[cfg_attr( + all(target_os = "linux", not(target_env = "uclibc")), + link_name = "__isoc99_sscanf" + )] + pub fn sscanf(s: *const c_char, format: *const c_char, ...) -> c_int; + pub fn getchar_unlocked() -> c_int; + pub fn putchar_unlocked(c: c_int) -> c_int; + + #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] + #[cfg_attr(target_os = "netbsd", link_name = "__socket30")] + #[cfg_attr(target_os = "illumos", link_name = "__xnet_socket")] + #[cfg_attr(target_os = "solaris", link_name = "__xnet7_socket")] + #[cfg_attr(target_os = "espidf", link_name = "lwip_socket")] + pub fn socket(domain: c_int, ty: c_int, protocol: c_int) -> c_int; + #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "connect$UNIX2003" + )] + #[cfg_attr( + any(target_os = "illumos", target_os = "solaris"), + link_name = "__xnet_connect" + )] + #[cfg_attr(target_os = "espidf", link_name = "lwip_connect")] + pub fn connect(socket: c_int, address: *const sockaddr, len: socklen_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "listen$UNIX2003" + )] + #[cfg_attr(target_os = "espidf", link_name = "lwip_listen")] + pub fn listen(socket: c_int, backlog: c_int) -> c_int; + #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "accept$UNIX2003" + )] + #[cfg_attr(target_os = "espidf", link_name = "lwip_accept")] + #[cfg_attr(target_os = "aix", link_name = "naccept")] + pub fn accept(socket: c_int, address: *mut sockaddr, address_len: *mut socklen_t) -> c_int; + #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "getpeername$UNIX2003" + )] + #[cfg_attr(target_os = "espidf", link_name = "lwip_getpeername")] + #[cfg_attr(target_os = "aix", link_name = "ngetpeername")] + pub fn getpeername(socket: c_int, address: *mut sockaddr, address_len: *mut socklen_t) + -> c_int; + #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "getsockname$UNIX2003" + )] + #[cfg_attr(target_os = "espidf", link_name = "lwip_getsockname")] + #[cfg_attr(target_os = "aix", link_name = "ngetsockname")] + pub fn getsockname(socket: c_int, address: *mut sockaddr, address_len: *mut socklen_t) + -> c_int; + #[cfg_attr(target_os = "espidf", link_name = "lwip_setsockopt")] + #[cfg_attr(gnu_time_bits64, link_name = "__setsockopt64")] + pub fn setsockopt( + socket: c_int, + level: c_int, + name: c_int, + value: *const c_void, + option_len: socklen_t, + ) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "socketpair$UNIX2003" + )] + #[cfg_attr( + any(target_os = "illumos", target_os = "solaris"), + link_name = "__xnet_socketpair" + )] + pub fn socketpair( + domain: c_int, + type_: c_int, + protocol: c_int, + socket_vector: *mut c_int, + ) -> c_int; + #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "sendto$UNIX2003" + )] + #[cfg_attr( + any(target_os = "illumos", target_os = "solaris"), + link_name = "__xnet_sendto" + )] + #[cfg_attr(target_os = "espidf", link_name = "lwip_sendto")] + pub fn sendto( + socket: c_int, + buf: *const c_void, + len: size_t, + flags: c_int, + addr: *const sockaddr, + addrlen: socklen_t, + ) -> ssize_t; + #[cfg_attr(target_os = "espidf", link_name = "lwip_shutdown")] + pub fn shutdown(socket: c_int, how: c_int) -> c_int; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "chmod$UNIX2003" + )] + pub fn chmod(path: *const c_char, mode: mode_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "fchmod$UNIX2003" + )] + pub fn fchmod(fd: c_int, mode: mode_t) -> c_int; + + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "fstat$INODE64" + )] + #[cfg_attr(target_os = "netbsd", link_name = "__fstat50")] + #[cfg_attr( + all(target_os = "freebsd", any(freebsd11, freebsd10)), + link_name = "fstat@FBSD_1.0" + )] + #[cfg_attr(gnu_time_bits64, link_name = "__fstat64_time64")] + #[cfg_attr( + all(not(gnu_time_bits64), gnu_file_offset_bits64), + link_name = "fstat64" + )] + pub fn fstat(fildes: c_int, buf: *mut stat) -> c_int; + + pub fn mkdir(path: *const c_char, mode: mode_t) -> c_int; + + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "stat$INODE64" + )] + #[cfg_attr(target_os = "netbsd", link_name = "__stat50")] + #[cfg_attr( + all(target_os = "freebsd", any(freebsd11, freebsd10)), + link_name = "stat@FBSD_1.0" + )] + #[cfg_attr(gnu_time_bits64, link_name = "__stat64_time64")] + #[cfg_attr( + all(not(gnu_time_bits64), gnu_file_offset_bits64), + link_name = "stat64" + )] + pub fn stat(path: *const c_char, buf: *mut stat) -> c_int; + + pub fn pclose(stream: *mut crate::FILE) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "fdopen$UNIX2003" + )] + pub fn fdopen(fd: c_int, mode: *const c_char) -> *mut crate::FILE; + pub fn fileno(stream: *mut crate::FILE) -> c_int; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "open$UNIX2003" + )] + #[cfg_attr(gnu_file_offset_bits64, link_name = "open64")] + pub fn open(path: *const c_char, oflag: c_int, ...) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "creat$UNIX2003" + )] + #[cfg_attr(gnu_file_offset_bits64, link_name = "creat64")] + pub fn creat(path: *const c_char, mode: mode_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "fcntl$UNIX2003" + )] + #[cfg_attr(gnu_time_bits64, link_name = "__fcntl_time64")] + #[cfg_attr( + all(not(gnu_time_bits64), gnu_file_offset_bits64), + link_name = "__fcntl_time64" + )] + pub fn fcntl(fd: c_int, cmd: c_int, ...) -> c_int; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86_64"), + link_name = "opendir$INODE64" + )] + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "opendir$INODE64$UNIX2003" + )] + #[cfg_attr(target_os = "netbsd", link_name = "__opendir30")] + pub fn opendir(dirname: *const c_char) -> *mut crate::DIR; + + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "readdir$INODE64" + )] + #[cfg_attr(target_os = "netbsd", link_name = "__readdir30")] + #[cfg_attr( + all(target_os = "freebsd", any(freebsd11, freebsd10)), + link_name = "readdir@FBSD_1.0" + )] + #[cfg_attr(gnu_file_offset_bits64, link_name = "readdir64")] + pub fn readdir(dirp: *mut crate::DIR) -> *mut crate::dirent; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "closedir$UNIX2003" + )] + pub fn closedir(dirp: *mut crate::DIR) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86_64"), + link_name = "rewinddir$INODE64" + )] + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "rewinddir$INODE64$UNIX2003" + )] + pub fn rewinddir(dirp: *mut crate::DIR); + + pub fn fchmodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, flags: c_int) -> c_int; + pub fn fchown(fd: c_int, owner: crate::uid_t, group: crate::gid_t) -> c_int; + pub fn fchownat( + dirfd: c_int, + pathname: *const c_char, + owner: crate::uid_t, + group: crate::gid_t, + flags: c_int, + ) -> c_int; + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "fstatat$INODE64" + )] + #[cfg_attr( + all(target_os = "freebsd", any(freebsd11, freebsd10)), + link_name = "fstatat@FBSD_1.1" + )] + #[cfg_attr(gnu_time_bits64, link_name = "__fstatat64_time64")] + #[cfg_attr( + all(not(gnu_time_bits64), gnu_file_offset_bits64), + link_name = "fstatat64" + )] + pub fn fstatat(dirfd: c_int, pathname: *const c_char, buf: *mut stat, flags: c_int) -> c_int; + pub fn linkat( + olddirfd: c_int, + oldpath: *const c_char, + newdirfd: c_int, + newpath: *const c_char, + flags: c_int, + ) -> c_int; + pub fn renameat( + olddirfd: c_int, + oldpath: *const c_char, + newdirfd: c_int, + newpath: *const c_char, + ) -> c_int; + pub fn symlinkat(target: *const c_char, newdirfd: c_int, linkpath: *const c_char) -> c_int; + pub fn unlinkat(dirfd: c_int, pathname: *const c_char, flags: c_int) -> c_int; + + pub fn access(path: *const c_char, amode: c_int) -> c_int; + pub fn alarm(seconds: c_uint) -> c_uint; + pub fn chdir(dir: *const c_char) -> c_int; + pub fn fchdir(dirfd: c_int) -> c_int; + pub fn chown(path: *const c_char, uid: uid_t, gid: gid_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "lchown$UNIX2003" + )] + pub fn lchown(path: *const c_char, uid: uid_t, gid: gid_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "close$NOCANCEL$UNIX2003" + )] + #[cfg_attr( + all(target_os = "macos", target_arch = "x86_64"), + link_name = "close$NOCANCEL" + )] + pub fn close(fd: c_int) -> c_int; + pub fn dup(fd: c_int) -> c_int; + pub fn dup2(src: c_int, dst: c_int) -> c_int; + + pub fn execl(path: *const c_char, arg0: *const c_char, ...) -> c_int; + pub fn execle(path: *const c_char, arg0: *const c_char, ...) -> c_int; + pub fn execlp(file: *const c_char, arg0: *const c_char, ...) -> c_int; + + // DIFF(main): changed to `*const *mut` in e77f551de9 + pub fn execv(prog: *const c_char, argv: *const *const c_char) -> c_int; + pub fn execve( + prog: *const c_char, + argv: *const *const c_char, + envp: *const *const c_char, + ) -> c_int; + pub fn execvp(c: *const c_char, argv: *const *const c_char) -> c_int; + + pub fn fork() -> pid_t; + pub fn fpathconf(filedes: c_int, name: c_int) -> c_long; + pub fn getcwd(buf: *mut c_char, size: size_t) -> *mut c_char; + pub fn getegid() -> gid_t; + pub fn geteuid() -> uid_t; + pub fn getgid() -> gid_t; + pub fn getgroups(ngroups_max: c_int, groups: *mut gid_t) -> c_int; + #[cfg_attr(target_os = "illumos", link_name = "getloginx")] + pub fn getlogin() -> *mut c_char; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "getopt$UNIX2003" + )] + pub fn getopt(argc: c_int, argv: *const *mut c_char, optstr: *const c_char) -> c_int; + pub fn getpgid(pid: pid_t) -> pid_t; + pub fn getpgrp() -> pid_t; + pub fn getpid() -> pid_t; + pub fn getppid() -> pid_t; + pub fn getuid() -> uid_t; + pub fn isatty(fd: c_int) -> c_int; + #[cfg_attr(target_os = "solaris", link_name = "__link_xpg4")] + pub fn link(src: *const c_char, dst: *const c_char) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "lseek64")] + pub fn lseek(fd: c_int, offset: off_t, whence: c_int) -> off_t; + pub fn pathconf(path: *const c_char, name: c_int) -> c_long; + pub fn pipe(fds: *mut c_int) -> c_int; + pub fn posix_memalign(memptr: *mut *mut c_void, align: size_t, size: size_t) -> c_int; + pub fn aligned_alloc(alignment: size_t, size: size_t) -> *mut c_void; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "read$UNIX2003" + )] + pub fn read(fd: c_int, buf: *mut c_void, count: size_t) -> ssize_t; + pub fn rmdir(path: *const c_char) -> c_int; + pub fn seteuid(uid: uid_t) -> c_int; + pub fn setegid(gid: gid_t) -> c_int; + pub fn setgid(gid: gid_t) -> c_int; + pub fn setpgid(pid: pid_t, pgid: pid_t) -> c_int; + pub fn setsid() -> pid_t; + pub fn setuid(uid: uid_t) -> c_int; + pub fn setreuid(ruid: uid_t, euid: uid_t) -> c_int; + pub fn setregid(rgid: gid_t, egid: gid_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "sleep$UNIX2003" + )] + pub fn sleep(secs: c_uint) -> c_uint; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "nanosleep$UNIX2003" + )] + #[cfg_attr(target_os = "netbsd", link_name = "__nanosleep50")] + #[cfg_attr(gnu_time_bits64, link_name = "__nanosleep64")] + pub fn nanosleep(rqtp: *const timespec, rmtp: *mut timespec) -> c_int; + pub fn tcgetpgrp(fd: c_int) -> pid_t; + pub fn tcsetpgrp(fd: c_int, pgrp: crate::pid_t) -> c_int; + pub fn ttyname(fd: c_int) -> *mut c_char; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "ttyname_r$UNIX2003" + )] + #[cfg_attr( + any(target_os = "illumos", target_os = "solaris"), + link_name = "__posix_ttyname_r" + )] + pub fn ttyname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + pub fn unlink(c: *const c_char) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "wait$UNIX2003" + )] + pub fn wait(status: *mut c_int) -> pid_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "waitpid$UNIX2003" + )] + pub fn waitpid(pid: pid_t, status: *mut c_int, options: c_int) -> pid_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "write$UNIX2003" + )] + pub fn write(fd: c_int, buf: *const c_void, count: size_t) -> ssize_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pread$UNIX2003" + )] + #[cfg_attr(gnu_file_offset_bits64, link_name = "pread64")] + pub fn pread(fd: c_int, buf: *mut c_void, count: size_t, offset: off_t) -> ssize_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pwrite$UNIX2003" + )] + #[cfg_attr(gnu_file_offset_bits64, link_name = "pwrite64")] + pub fn pwrite(fd: c_int, buf: *const c_void, count: size_t, offset: off_t) -> ssize_t; + pub fn umask(mask: mode_t) -> mode_t; + + #[cfg_attr(target_os = "netbsd", link_name = "__utime50")] + #[cfg_attr(gnu_time_bits64, link_name = "__utime64")] + pub fn utime(file: *const c_char, buf: *const utimbuf) -> c_int; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "kill$UNIX2003" + )] + pub fn kill(pid: pid_t, sig: c_int) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "killpg$UNIX2003" + )] + pub fn killpg(pgrp: pid_t, sig: c_int) -> c_int; + + pub fn mlock(addr: *const c_void, len: size_t) -> c_int; + pub fn munlock(addr: *const c_void, len: size_t) -> c_int; + pub fn mlockall(flags: c_int) -> c_int; + pub fn munlockall() -> c_int; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "mmap$UNIX2003" + )] + #[cfg_attr(gnu_file_offset_bits64, link_name = "mmap64")] + pub fn mmap( + addr: *mut c_void, + len: size_t, + prot: c_int, + flags: c_int, + fd: c_int, + offset: off_t, + ) -> *mut c_void; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "munmap$UNIX2003" + )] + pub fn munmap(addr: *mut c_void, len: size_t) -> c_int; + + pub fn if_nametoindex(ifname: *const c_char) -> c_uint; + pub fn if_indextoname(ifindex: c_uint, ifname: *mut c_char) -> *mut c_char; + + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "lstat$INODE64" + )] + #[cfg_attr(target_os = "netbsd", link_name = "__lstat50")] + #[cfg_attr( + all(target_os = "freebsd", any(freebsd11, freebsd10)), + link_name = "lstat@FBSD_1.0" + )] + #[cfg_attr(gnu_time_bits64, link_name = "__lstat64_time64")] + #[cfg_attr( + all(not(gnu_time_bits64), gnu_file_offset_bits64), + link_name = "lstat64" + )] + pub fn lstat(path: *const c_char, buf: *mut stat) -> c_int; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "fsync$UNIX2003" + )] + pub fn fsync(fd: c_int) -> c_int; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "setenv$UNIX2003" + )] + pub fn setenv(name: *const c_char, val: *const c_char, overwrite: c_int) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "unsetenv$UNIX2003" + )] + #[cfg_attr(target_os = "netbsd", link_name = "__unsetenv13")] + pub fn unsetenv(name: *const c_char) -> c_int; + + pub fn symlink(path1: *const c_char, path2: *const c_char) -> c_int; + + #[cfg_attr(gnu_file_offset_bits64, link_name = "truncate64")] + pub fn truncate(path: *const c_char, length: off_t) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "ftruncate64")] + pub fn ftruncate(fd: c_int, length: off_t) -> c_int; + + pub fn signal(signum: c_int, handler: sighandler_t) -> sighandler_t; + + #[cfg_attr(target_os = "netbsd", link_name = "__getrusage50")] + #[cfg_attr(gnu_time_bits64, link_name = "__getrusage64")] + pub fn getrusage(resource: c_int, usage: *mut rusage) -> c_int; + + #[cfg_attr( + any( + target_os = "macos", + target_os = "ios", + target_os = "tvos", + target_os = "watchos", + target_os = "visionos" + ), + link_name = "realpath$DARWIN_EXTSN" + )] + pub fn realpath(pathname: *const c_char, resolved: *mut c_char) -> *mut c_char; + + #[cfg_attr(target_os = "netbsd", link_name = "__times13")] + pub fn times(buf: *mut crate::tms) -> crate::clock_t; + + pub fn pthread_self() -> crate::pthread_t; + pub fn pthread_equal(t1: crate::pthread_t, t2: crate::pthread_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pthread_join$UNIX2003" + )] + pub fn pthread_join(native: crate::pthread_t, value: *mut *mut c_void) -> c_int; + pub fn pthread_exit(value: *mut c_void) -> !; + pub fn pthread_attr_init(attr: *mut crate::pthread_attr_t) -> c_int; + pub fn pthread_attr_destroy(attr: *mut crate::pthread_attr_t) -> c_int; + pub fn pthread_attr_getstacksize( + attr: *const crate::pthread_attr_t, + stacksize: *mut size_t, + ) -> c_int; + pub fn pthread_attr_setstacksize(attr: *mut crate::pthread_attr_t, stack_size: size_t) + -> c_int; + pub fn pthread_attr_setdetachstate(attr: *mut crate::pthread_attr_t, state: c_int) -> c_int; + pub fn pthread_detach(thread: crate::pthread_t) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__libc_thr_yield")] + pub fn sched_yield() -> c_int; + pub fn pthread_key_create( + key: *mut pthread_key_t, + dtor: Option, + ) -> c_int; + pub fn pthread_key_delete(key: pthread_key_t) -> c_int; + pub fn pthread_getspecific(key: pthread_key_t) -> *mut c_void; + pub fn pthread_setspecific(key: pthread_key_t, value: *const c_void) -> c_int; + pub fn pthread_mutex_init( + lock: *mut pthread_mutex_t, + attr: *const pthread_mutexattr_t, + ) -> c_int; + pub fn pthread_mutex_destroy(lock: *mut pthread_mutex_t) -> c_int; + pub fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> c_int; + pub fn pthread_mutex_trylock(lock: *mut pthread_mutex_t) -> c_int; + pub fn pthread_mutex_unlock(lock: *mut pthread_mutex_t) -> c_int; + + pub fn pthread_mutexattr_init(attr: *mut pthread_mutexattr_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pthread_mutexattr_destroy$UNIX2003" + )] + pub fn pthread_mutexattr_destroy(attr: *mut pthread_mutexattr_t) -> c_int; + pub fn pthread_mutexattr_settype(attr: *mut pthread_mutexattr_t, _type: c_int) -> c_int; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pthread_cond_init$UNIX2003" + )] + pub fn pthread_cond_init(cond: *mut pthread_cond_t, attr: *const pthread_condattr_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pthread_cond_wait$UNIX2003" + )] + pub fn pthread_cond_wait(cond: *mut pthread_cond_t, lock: *mut pthread_mutex_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pthread_cond_timedwait$UNIX2003" + )] + #[cfg_attr(gnu_time_bits64, link_name = "__pthread_cond_timedwait64")] + pub fn pthread_cond_timedwait( + cond: *mut pthread_cond_t, + lock: *mut pthread_mutex_t, + abstime: *const crate::timespec, + ) -> c_int; + pub fn pthread_cond_signal(cond: *mut pthread_cond_t) -> c_int; + pub fn pthread_cond_broadcast(cond: *mut pthread_cond_t) -> c_int; + pub fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> c_int; + pub fn pthread_condattr_init(attr: *mut pthread_condattr_t) -> c_int; + pub fn pthread_condattr_destroy(attr: *mut pthread_condattr_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pthread_rwlock_init$UNIX2003" + )] + pub fn pthread_rwlock_init( + lock: *mut pthread_rwlock_t, + attr: *const pthread_rwlockattr_t, + ) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pthread_rwlock_destroy$UNIX2003" + )] + pub fn pthread_rwlock_destroy(lock: *mut pthread_rwlock_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pthread_rwlock_rdlock$UNIX2003" + )] + pub fn pthread_rwlock_rdlock(lock: *mut pthread_rwlock_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pthread_rwlock_tryrdlock$UNIX2003" + )] + pub fn pthread_rwlock_tryrdlock(lock: *mut pthread_rwlock_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pthread_rwlock_wrlock$UNIX2003" + )] + pub fn pthread_rwlock_wrlock(lock: *mut pthread_rwlock_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pthread_rwlock_trywrlock$UNIX2003" + )] + pub fn pthread_rwlock_trywrlock(lock: *mut pthread_rwlock_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pthread_rwlock_unlock$UNIX2003" + )] + pub fn pthread_rwlock_unlock(lock: *mut pthread_rwlock_t) -> c_int; + pub fn pthread_rwlockattr_init(attr: *mut pthread_rwlockattr_t) -> c_int; + pub fn pthread_rwlockattr_destroy(attr: *mut pthread_rwlockattr_t) -> c_int; + + #[cfg_attr( + any(target_os = "illumos", target_os = "solaris"), + link_name = "__xnet_getsockopt" + )] + #[cfg_attr(target_os = "espidf", link_name = "lwip_getsockopt")] + #[cfg_attr(gnu_time_bits64, link_name = "__getsockopt64")] + pub fn getsockopt( + sockfd: c_int, + level: c_int, + optname: c_int, + optval: *mut c_void, + optlen: *mut crate::socklen_t, + ) -> c_int; + pub fn raise(signum: c_int) -> c_int; + + #[cfg_attr(target_os = "netbsd", link_name = "__utimes50")] + #[cfg_attr(gnu_time_bits64, link_name = "__utimes64")] + pub fn utimes(filename: *const c_char, times: *const crate::timeval) -> c_int; + pub fn dlopen(filename: *const c_char, flag: c_int) -> *mut c_void; + pub fn dlerror() -> *mut c_char; + pub fn dlsym(handle: *mut c_void, symbol: *const c_char) -> *mut c_void; + pub fn dlclose(handle: *mut c_void) -> c_int; + + #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] + #[cfg_attr( + any(target_os = "illumos", target_os = "solaris"), + link_name = "__xnet_getaddrinfo" + )] + #[cfg_attr(target_os = "espidf", link_name = "lwip_getaddrinfo")] + pub fn getaddrinfo( + node: *const c_char, + service: *const c_char, + hints: *const addrinfo, + res: *mut *mut addrinfo, + ) -> c_int; + #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] + #[cfg_attr(target_os = "espidf", link_name = "lwip_freeaddrinfo")] + pub fn freeaddrinfo(res: *mut addrinfo); + pub fn hstrerror(errcode: c_int) -> *const c_char; + pub fn gai_strerror(errcode: c_int) -> *const c_char; + #[cfg_attr( + any( + all( + target_os = "linux", + not(any(target_env = "musl", target_env = "ohos")) + ), + target_os = "freebsd", + target_os = "cygwin", + target_os = "dragonfly", + target_os = "haiku" + ), + link_name = "__res_init" + )] + #[cfg_attr( + any( + target_os = "macos", + target_os = "ios", + target_os = "tvos", + target_os = "watchos", + target_os = "visionos" + ), + link_name = "res_9_init" + )] + #[cfg_attr(target_os = "aix", link_name = "_res_init")] + pub fn res_init() -> c_int; + + #[cfg_attr(target_os = "netbsd", link_name = "__gmtime_r50")] + #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] + // FIXME(time): for `time_t` + #[cfg_attr(gnu_time_bits64, link_name = "__gmtime64_r")] + pub fn gmtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; + #[cfg_attr(target_os = "netbsd", link_name = "__localtime_r50")] + #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] + // FIXME(time): for `time_t` + #[cfg_attr(gnu_time_bits64, link_name = "__localtime64_r")] + pub fn localtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "mktime$UNIX2003" + )] + #[cfg_attr(target_os = "netbsd", link_name = "__mktime50")] + #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] + // FIXME: for `time_t` + #[cfg_attr(gnu_time_bits64, link_name = "__mktime64")] + pub fn mktime(tm: *mut tm) -> time_t; + #[cfg_attr(target_os = "netbsd", link_name = "__time50")] + #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] + // FIXME: for `time_t` + #[cfg_attr(gnu_time_bits64, link_name = "__time64")] + pub fn time(time: *mut time_t) -> time_t; + #[cfg_attr(target_os = "netbsd", link_name = "__gmtime50")] + #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] + // FIXME(time): for `time_t` + #[cfg_attr(gnu_time_bits64, link_name = "__gmtime64")] + pub fn gmtime(time_p: *const time_t) -> *mut tm; + #[cfg_attr(target_os = "netbsd", link_name = "__locatime50")] + #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] + // FIXME(time): for `time_t` + #[cfg_attr(gnu_time_bits64, link_name = "__localtime64")] + pub fn localtime(time_p: *const time_t) -> *mut tm; + #[cfg_attr(target_os = "netbsd", link_name = "__difftime50")] + #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] + // FIXME(time): for `time_t` + #[cfg_attr(gnu_time_bits64, link_name = "__difftime64")] + pub fn difftime(time1: time_t, time0: time_t) -> c_double; + #[cfg(not(target_os = "aix"))] + #[cfg_attr(target_os = "netbsd", link_name = "__timegm50")] + #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] + // FIXME(time): for `time_t` + #[cfg_attr(gnu_time_bits64, link_name = "__timegm64")] + pub fn timegm(tm: *mut crate::tm) -> time_t; + + #[cfg_attr(target_os = "netbsd", link_name = "__mknod50")] + #[cfg_attr( + all(target_os = "freebsd", any(freebsd11, freebsd10)), + link_name = "mknod@FBSD_1.0" + )] + pub fn mknod(pathname: *const c_char, mode: mode_t, dev: crate::dev_t) -> c_int; + pub fn gethostname(name: *mut c_char, len: size_t) -> c_int; + pub fn endservent(); + pub fn getservbyname(name: *const c_char, proto: *const c_char) -> *mut servent; + pub fn getservbyport(port: c_int, proto: *const c_char) -> *mut servent; + pub fn getservent() -> *mut servent; + pub fn setservent(stayopen: c_int); + pub fn getprotobyname(name: *const c_char) -> *mut protoent; + pub fn getprotobynumber(proto: c_int) -> *mut protoent; + pub fn chroot(name: *const c_char) -> c_int; + #[cfg(target_os = "cygwin")] + pub fn usleep(secs: useconds_t) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "usleep$UNIX2003" + )] + #[cfg(not(target_os = "cygwin"))] + pub fn usleep(secs: c_uint) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "send$UNIX2003" + )] + #[cfg_attr(target_os = "espidf", link_name = "lwip_send")] + pub fn send(socket: c_int, buf: *const c_void, len: size_t, flags: c_int) -> ssize_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "recv$UNIX2003" + )] + #[cfg_attr(target_os = "espidf", link_name = "lwip_recv")] + pub fn recv(socket: c_int, buf: *mut c_void, len: size_t, flags: c_int) -> ssize_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "putenv$UNIX2003" + )] + #[cfg_attr(target_os = "netbsd", link_name = "__putenv50")] + pub fn putenv(string: *mut c_char) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "poll$UNIX2003" + )] + pub fn poll(fds: *mut pollfd, nfds: nfds_t, timeout: c_int) -> c_int; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86_64"), + link_name = "select$1050" + )] + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "select$UNIX2003" + )] + #[cfg_attr(target_os = "netbsd", link_name = "__select50")] + #[cfg_attr(target_os = "aix", link_name = "__fd_select")] + #[cfg_attr(gnu_time_bits64, link_name = "__select64")] + pub fn select( + nfds: c_int, + readfds: *mut fd_set, + writefds: *mut fd_set, + errorfds: *mut fd_set, + timeout: *mut timeval, + ) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__setlocale50")] + pub fn setlocale(category: c_int, locale: *const c_char) -> *mut c_char; + pub fn localeconv() -> *mut lconv; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "sem_wait$UNIX2003" + )] + pub fn sem_wait(sem: *mut sem_t) -> c_int; + pub fn sem_trywait(sem: *mut sem_t) -> c_int; + pub fn sem_post(sem: *mut sem_t) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "statvfs64")] + pub fn statvfs(path: *const c_char, buf: *mut statvfs) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "fstatvfs64")] + pub fn fstatvfs(fd: c_int, buf: *mut statvfs) -> c_int; + + #[cfg_attr(target_os = "netbsd", link_name = "__sigemptyset14")] + pub fn sigemptyset(set: *mut sigset_t) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__sigaddset14")] + pub fn sigaddset(set: *mut sigset_t, signum: c_int) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__sigfillset14")] + pub fn sigfillset(set: *mut sigset_t) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__sigdelset14")] + pub fn sigdelset(set: *mut sigset_t, signum: c_int) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__sigismember14")] + pub fn sigismember(set: *const sigset_t, signum: c_int) -> c_int; + + #[cfg_attr(target_os = "netbsd", link_name = "__sigprocmask14")] + pub fn sigprocmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__sigpending14")] + pub fn sigpending(set: *mut sigset_t) -> c_int; + + #[cfg_attr(target_os = "solaris", link_name = "__sysconf_xpg7")] + pub fn sysconf(name: c_int) -> c_long; + + pub fn mkfifo(path: *const c_char, mode: mode_t) -> c_int; + + #[cfg_attr(gnu_file_offset_bits64, link_name = "fseeko64")] + pub fn fseeko(stream: *mut crate::FILE, offset: off_t, whence: c_int) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "ftello64")] + pub fn ftello(stream: *mut crate::FILE) -> off_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "tcdrain$UNIX2003" + )] + pub fn tcdrain(fd: c_int) -> c_int; + pub fn cfgetispeed(termios: *const crate::termios) -> crate::speed_t; + pub fn cfgetospeed(termios: *const crate::termios) -> crate::speed_t; + pub fn cfsetispeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int; + pub fn cfsetospeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int; + pub fn tcgetattr(fd: c_int, termios: *mut crate::termios) -> c_int; + pub fn tcsetattr(fd: c_int, optional_actions: c_int, termios: *const crate::termios) -> c_int; + pub fn tcflow(fd: c_int, action: c_int) -> c_int; + pub fn tcflush(fd: c_int, action: c_int) -> c_int; + pub fn tcgetsid(fd: c_int) -> crate::pid_t; + pub fn tcsendbreak(fd: c_int, duration: c_int) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "mkstemp64")] + pub fn mkstemp(template: *mut c_char) -> c_int; + pub fn mkdtemp(template: *mut c_char) -> *mut c_char; + + pub fn tmpnam(ptr: *mut c_char) -> *mut c_char; + + pub fn openlog(ident: *const c_char, logopt: c_int, facility: c_int); + pub fn closelog(); + pub fn setlogmask(maskpri: c_int) -> c_int; + #[cfg_attr(target_os = "macos", link_name = "syslog$DARWIN_EXTSN")] + pub fn syslog(priority: c_int, message: *const c_char, ...); + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "nice$UNIX2003" + )] + pub fn nice(incr: c_int) -> c_int; + + pub fn grantpt(fd: c_int) -> c_int; + pub fn posix_openpt(flags: c_int) -> c_int; + pub fn ptsname(fd: c_int) -> *mut c_char; + pub fn unlockpt(fd: c_int) -> c_int; + + #[cfg(not(target_os = "aix"))] + pub fn strcasestr(cs: *const c_char, ct: *const c_char) -> *mut c_char; + pub fn getline(lineptr: *mut *mut c_char, n: *mut size_t, stream: *mut FILE) -> ssize_t; + + #[cfg_attr(gnu_file_offset_bits64, link_name = "lockf64")] + pub fn lockf(fd: c_int, cmd: c_int, len: off_t) -> c_int; + +} + +safe_f! { + // It seems htonl, etc are macros on macOS. So we have to reimplement them. So let's + // reimplement them for all UNIX platforms + pub const fn htonl(hostlong: u32) -> u32 { + u32::to_be(hostlong) + } + pub const fn htons(hostshort: u16) -> u16 { + u16::to_be(hostshort) + } + pub const fn ntohl(netlong: u32) -> u32 { + u32::from_be(netlong) + } + pub const fn ntohs(netshort: u16) -> u16 { + u16::from_be(netshort) + } +} + +cfg_if! { + if #[cfg(not(any( + target_os = "emscripten", + target_os = "android", + target_os = "haiku", + target_os = "nto", + target_os = "solaris", + target_os = "cygwin", + target_os = "aix", + )))] { + extern "C" { + #[cfg_attr(gnu_time_bits64, link_name = "__adjtime64")] + pub fn adjtime(delta: *const timeval, olddelta: *mut timeval) -> c_int; + } + } else if #[cfg(target_os = "solaris")] { + extern "C" { + pub fn adjtime(delta: *mut timeval, olddelta: *mut timeval) -> c_int; + } + } +} + +cfg_if! { + if #[cfg(not(any( + target_os = "emscripten", + target_os = "android", + target_os = "nto" + )))] { + extern "C" { + pub fn stpncpy(dst: *mut c_char, src: *const c_char, n: size_t) -> *mut c_char; + } + } +} + +cfg_if! { + if #[cfg(not(any( + target_os = "dragonfly", + target_os = "emscripten", + target_os = "hurd", + target_os = "macos", + target_os = "openbsd", + )))] { + extern "C" { + pub fn sigqueue(pid: pid_t, sig: c_int, value: crate::sigval) -> c_int; + } + } +} + +cfg_if! { + if #[cfg(not(target_os = "android"))] { + extern "C" { + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "confstr$UNIX2003" + )] + #[cfg_attr(target_os = "solaris", link_name = "__confstr_xpg7")] + pub fn confstr(name: c_int, buf: *mut c_char, len: size_t) -> size_t; + } + } +} + +cfg_if! { + if #[cfg(not(target_os = "aix"))] { + extern "C" { + pub fn dladdr(addr: *const c_void, info: *mut Dl_info) -> c_int; + } + } +} + +cfg_if! { + if #[cfg(not(target_os = "solaris"))] { + extern "C" { + pub fn flock(fd: c_int, operation: c_int) -> c_int; + } + } +} + +cfg_if! { + if #[cfg(not(any(target_env = "uclibc", target_os = "nto")))] { + extern "C" { + pub fn open_wmemstream(ptr: *mut *mut wchar_t, sizeloc: *mut size_t) -> *mut FILE; + } + } +} + +cfg_if! { + if #[cfg(not(target_os = "redox"))] { + extern "C" { + pub fn getsid(pid: pid_t) -> pid_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pause$UNIX2003" + )] + pub fn pause() -> c_int; + + pub fn mkdirat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; + #[cfg_attr(gnu_file_offset_bits64, link_name = "openat64")] + pub fn openat(dirfd: c_int, pathname: *const c_char, flags: c_int, ...) -> c_int; + + #[cfg_attr( + all(target_os = "macos", target_arch = "x86_64"), + link_name = "fdopendir$INODE64" + )] + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "fdopendir$INODE64$UNIX2003" + )] + pub fn fdopendir(fd: c_int) -> *mut crate::DIR; + + #[cfg_attr( + all(target_os = "macos", not(target_arch = "aarch64")), + link_name = "readdir_r$INODE64" + )] + #[cfg_attr(target_os = "netbsd", link_name = "__readdir_r30")] + #[cfg_attr( + all(target_os = "freebsd", any(freebsd11, freebsd10)), + link_name = "readdir_r@FBSD_1.0" + )] + #[cfg_attr( + all(target_os = "freebsd", not(any(freebsd11, freebsd10))), + link_name = "readdir_r@FBSD_1.5" + )] + #[allow(non_autolinks)] // FIXME(docs): `<>` breaks line length limit. + /// The 64-bit libc on Solaris and illumos only has readdir_r. If a + /// 32-bit Solaris or illumos target is ever created, it should use + /// __posix_readdir_r. See libc(3LIB) on Solaris or illumos: + /// https://illumos.org/man/3lib/libc + /// https://docs.oracle.com/cd/E36784_01/html/E36873/libc-3lib.html + /// https://www.unix.com/man-page/opensolaris/3LIB/libc/ + #[cfg_attr(gnu_file_offset_bits64, link_name = "readdir64_r")] + pub fn readdir_r( + dirp: *mut crate::DIR, + entry: *mut crate::dirent, + result: *mut *mut crate::dirent, + ) -> c_int; + } + } +} + +cfg_if! { + if #[cfg(target_os = "nto")] { + extern "C" { + pub fn readlinkat( + dirfd: c_int, + pathname: *const c_char, + buf: *mut c_char, + bufsiz: size_t, + ) -> c_int; + pub fn readlink(path: *const c_char, buf: *mut c_char, bufsz: size_t) -> c_int; + pub fn pselect( + nfds: c_int, + readfds: *mut fd_set, + writefds: *mut fd_set, + errorfds: *mut fd_set, + timeout: *mut timespec, + sigmask: *const sigset_t, + ) -> c_int; + pub fn sigaction(signum: c_int, act: *const sigaction, oldact: *mut sigaction) + -> c_int; + } + } else { + extern "C" { + pub fn readlinkat( + dirfd: c_int, + pathname: *const c_char, + buf: *mut c_char, + bufsiz: size_t, + ) -> ssize_t; + pub fn fmemopen(buf: *mut c_void, size: size_t, mode: *const c_char) -> *mut FILE; + pub fn open_memstream(ptr: *mut *mut c_char, sizeloc: *mut size_t) -> *mut FILE; + pub fn atexit(cb: extern "C" fn()) -> c_int; + #[cfg_attr(target_os = "netbsd", link_name = "__sigaction14")] + pub fn sigaction(signum: c_int, act: *const sigaction, oldact: *mut sigaction) + -> c_int; + pub fn readlink(path: *const c_char, buf: *mut c_char, bufsz: size_t) -> ssize_t; + #[cfg_attr( + all(target_os = "macos", target_arch = "x86_64"), + link_name = "pselect$1050" + )] + #[cfg_attr( + all(target_os = "macos", target_arch = "x86"), + link_name = "pselect$UNIX2003" + )] + #[cfg_attr(target_os = "netbsd", link_name = "__pselect50")] + #[cfg_attr(gnu_time_bits64, link_name = "__pselect64")] + pub fn pselect( + nfds: c_int, + readfds: *mut fd_set, + writefds: *mut fd_set, + errorfds: *mut fd_set, + timeout: *const timespec, + sigmask: *const sigset_t, + ) -> c_int; + } + } +} + +cfg_if! { + if #[cfg(target_os = "aix")] { + extern "C" { + pub fn cfmakeraw(termios: *mut crate::termios) -> c_int; + pub fn cfsetspeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int; + } + } else if #[cfg(not(any( + target_os = "solaris", + target_os = "illumos", + target_os = "nto", + )))] { + extern "C" { + pub fn cfmakeraw(termios: *mut crate::termios); + pub fn cfsetspeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int; + } + } +} + +extern "C" { + pub fn fnmatch(pattern: *const c_char, name: *const c_char, flags: c_int) -> c_int; +} + +cfg_if! { + if #[cfg(target_env = "newlib")] { + mod newlib; + pub use self::newlib::*; + } else if #[cfg(any( + target_os = "linux", + target_os = "l4re", + target_os = "android", + target_os = "emscripten" + ))] { + mod linux_like; + pub use self::linux_like::*; + } else if #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "tvos", + target_os = "watchos", + target_os = "visionos", + target_os = "freebsd", + target_os = "dragonfly", + target_os = "openbsd", + target_os = "netbsd" + ))] { + mod bsd; + pub use self::bsd::*; + } else if #[cfg(any(target_os = "solaris", target_os = "illumos"))] { + mod solarish; + pub use self::solarish::*; + } else if #[cfg(target_os = "haiku")] { + mod haiku; + pub use self::haiku::*; + } else if #[cfg(target_os = "redox")] { + mod redox; + pub use self::redox::*; + } else if #[cfg(target_os = "cygwin")] { + mod cygwin; + pub use self::cygwin::*; + } else if #[cfg(target_os = "nto")] { + mod nto; + pub use self::nto::*; + } else if #[cfg(target_os = "aix")] { + mod aix; + pub use self::aix::*; + } else if #[cfg(target_os = "hurd")] { + mod hurd; + pub use self::hurd::*; + } else if #[cfg(target_os = "nuttx")] { + mod nuttx; + pub use self::nuttx::*; + } else { + // Unknown target_os + } +} diff --git a/vendor/libc/src/unix/newlib/aarch64/mod.rs b/vendor/libc/src/unix/newlib/aarch64/mod.rs new file mode 100644 index 00000000000000..e4640580e2478b --- /dev/null +++ b/vendor/libc/src/unix/newlib/aarch64/mod.rs @@ -0,0 +1,52 @@ +use crate::prelude::*; + +pub type clock_t = c_long; +pub type wchar_t = u32; + +s! { + pub struct sockaddr { + pub sa_len: u8, + pub sa_family: crate::sa_family_t, + pub sa_data: [c_char; 14], + } + + pub struct sockaddr_in6 { + pub sin6_len: u8, + pub sin6_family: crate::sa_family_t, + pub sin6_port: crate::in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: u32, + } + + pub struct sockaddr_in { + pub sin_len: u8, + pub sin_family: crate::sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [c_char; 8], + } +} + +pub const AF_INET6: c_int = 23; + +pub const FIONBIO: c_ulong = 1; + +pub const POLLIN: c_short = 0x1; +pub const POLLPRI: c_short = 0x2; +pub const POLLOUT: c_short = 0x4; +pub const POLLERR: c_short = 0x8; +pub const POLLHUP: c_short = 0x10; +pub const POLLNVAL: c_short = 0x20; + +pub const SOL_SOCKET: c_int = 65535; + +pub const MSG_OOB: c_int = 1; +pub const MSG_PEEK: c_int = 2; +pub const MSG_DONTWAIT: c_int = 4; +pub const MSG_DONTROUTE: c_int = 0; +pub const MSG_WAITALL: c_int = 0; +pub const MSG_MORE: c_int = 0; +pub const MSG_NOSIGNAL: c_int = 0; + +pub use crate::unix::newlib::generic::{dirent, sigset_t, stat}; diff --git a/vendor/libc/src/unix/newlib/arm/mod.rs b/vendor/libc/src/unix/newlib/arm/mod.rs new file mode 100644 index 00000000000000..aea4ed764b03c0 --- /dev/null +++ b/vendor/libc/src/unix/newlib/arm/mod.rs @@ -0,0 +1,54 @@ +use crate::prelude::*; + +pub type clock_t = c_long; +pub type wchar_t = u32; + +s! { + pub struct sockaddr { + pub sa_family: crate::sa_family_t, + pub sa_data: [c_char; 14], + } + + pub struct sockaddr_in6 { + pub sin6_family: crate::sa_family_t, + pub sin6_port: crate::in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: u32, + } + + pub struct sockaddr_in { + pub sin_family: crate::sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [u8; 8], + } + + pub struct sockaddr_storage { + pub ss_family: crate::sa_family_t, + pub __ss_padding: [u8; 26], + } +} + +pub const AF_INET6: c_int = 23; + +pub const FIONBIO: c_ulong = 1; + +pub const POLLIN: c_short = 0x1; +pub const POLLPRI: c_short = 0x2; +pub const POLLHUP: c_short = 0x4; +pub const POLLERR: c_short = 0x8; +pub const POLLOUT: c_short = 0x10; +pub const POLLNVAL: c_short = 0x20; + +pub const SOL_SOCKET: c_int = 65535; + +pub const MSG_OOB: c_int = 1; +pub const MSG_PEEK: c_int = 2; +pub const MSG_DONTWAIT: c_int = 4; +pub const MSG_DONTROUTE: c_int = 0; +pub const MSG_WAITALL: c_int = 0; +pub const MSG_MORE: c_int = 0; +pub const MSG_NOSIGNAL: c_int = 0; + +pub use crate::unix::newlib::generic::{dirent, sigset_t, stat}; diff --git a/vendor/libc/src/unix/newlib/espidf/mod.rs b/vendor/libc/src/unix/newlib/espidf/mod.rs new file mode 100644 index 00000000000000..57a033fcaf2637 --- /dev/null +++ b/vendor/libc/src/unix/newlib/espidf/mod.rs @@ -0,0 +1,120 @@ +use crate::prelude::*; + +pub type clock_t = c_ulong; +pub type wchar_t = u32; + +s! { + pub struct cmsghdr { + pub cmsg_len: crate::socklen_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: c_int, + pub msg_control: *mut c_void, + pub msg_controllen: crate::socklen_t, + pub msg_flags: c_int, + } + + pub struct sockaddr_un { + pub sun_family: crate::sa_family_t, + pub sun_path: [c_char; 108], + } + + pub struct sockaddr { + pub sa_len: u8, + pub sa_family: crate::sa_family_t, + pub sa_data: [c_char; 14], + } + + pub struct sockaddr_in6 { + pub sin6_len: u8, + pub sin6_family: crate::sa_family_t, + pub sin6_port: crate::in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: u32, + } + + pub struct sockaddr_in { + pub sin_len: u8, + pub sin_family: crate::sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [c_char; 8], + } + + pub struct sockaddr_storage { + pub s2_len: u8, + pub ss_family: crate::sa_family_t, + pub s2_data1: [c_char; 2], + pub s2_data2: [u32; 3], + pub s2_data3: [u32; 3], + } +} + +pub const AF_UNIX: c_int = 1; +pub const AF_INET6: c_int = 10; + +pub const FIONBIO: c_ulong = 2147772030; + +pub const POLLIN: c_short = 1 << 0; +pub const POLLRDNORM: c_short = 1 << 1; +pub const POLLRDBAND: c_short = 1 << 2; +pub const POLLPRI: c_short = POLLRDBAND; +pub const POLLOUT: c_short = 1 << 3; +pub const POLLWRNORM: c_short = POLLOUT; +pub const POLLWRBAND: c_short = 1 << 4; +pub const POLLERR: c_short = 1 << 5; +pub const POLLHUP: c_short = 1 << 6; + +pub const SOL_SOCKET: c_int = 0xfff; + +pub const MSG_OOB: c_int = 0x04; +pub const MSG_PEEK: c_int = 0x01; +pub const MSG_DONTWAIT: c_int = 0x08; +pub const MSG_DONTROUTE: c_int = 0x4; +pub const MSG_WAITALL: c_int = 0x02; +pub const MSG_MORE: c_int = 0x10; +pub const MSG_NOSIGNAL: c_int = 0x20; +pub const MSG_TRUNC: c_int = 0x04; +pub const MSG_CTRUNC: c_int = 0x08; +pub const MSG_EOR: c_int = 0x08; + +pub const PTHREAD_STACK_MIN: size_t = 768; + +pub const SIGABRT: c_int = 6; +pub const SIGFPE: c_int = 8; +pub const SIGILL: c_int = 4; +pub const SIGINT: c_int = 2; +pub const SIGSEGV: c_int = 11; +pub const SIGTERM: c_int = 15; +pub const SIGHUP: c_int = 1; +pub const SIGQUIT: c_int = 3; +pub const NSIG: size_t = 32; + +extern "C" { + pub fn pthread_create( + native: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + f: extern "C" fn(_: *mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + + pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; + + pub fn gethostname(name: *mut c_char, namelen: ssize_t); + + #[link_name = "lwip_sendmsg"] + pub fn sendmsg(s: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; + #[link_name = "lwip_recvmsg"] + pub fn recvmsg(s: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; + + pub fn eventfd(initval: c_uint, flags: c_int) -> c_int; +} + +pub use crate::unix::newlib::generic::{dirent, sigset_t, stat}; diff --git a/vendor/libc/src/unix/newlib/generic.rs b/vendor/libc/src/unix/newlib/generic.rs new file mode 100644 index 00000000000000..ba4dfbe528b69d --- /dev/null +++ b/vendor/libc/src/unix/newlib/generic.rs @@ -0,0 +1,39 @@ +//! Common types used by most newlib platforms + +use crate::off_t; +use crate::prelude::*; + +s! { + pub struct sigset_t { + #[cfg(target_os = "horizon")] + __val: [c_ulong; 16], + #[cfg(not(target_os = "horizon"))] + __val: u32, + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_atime: crate::time_t, + pub st_spare1: c_long, + pub st_mtime: crate::time_t, + pub st_spare2: c_long, + pub st_ctime: crate::time_t, + pub st_spare3: c_long, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_spare4: [c_long; 2usize], + } + + pub struct dirent { + pub d_ino: crate::ino_t, + pub d_type: c_uchar, + pub d_name: [c_char; 256usize], + } +} diff --git a/vendor/libc/src/unix/newlib/horizon/mod.rs b/vendor/libc/src/unix/newlib/horizon/mod.rs new file mode 100644 index 00000000000000..3958e02734adaf --- /dev/null +++ b/vendor/libc/src/unix/newlib/horizon/mod.rs @@ -0,0 +1,278 @@ +//! ARMv6K Nintendo 3DS C Newlib definitions + +use crate::off_t; +use crate::prelude::*; + +pub type wchar_t = c_uint; + +pub type u_register_t = c_uint; +pub type u_char = c_uchar; +pub type u_short = c_ushort; +pub type u_int = c_uint; +pub type u_long = c_ulong; +pub type ushort = c_ushort; +pub type uint = c_uint; +pub type ulong = c_ulong; +pub type clock_t = c_ulong; +pub type daddr_t = c_long; +pub type caddr_t = *mut c_char; +pub type sbintime_t = c_longlong; +pub type sigset_t = c_ulong; + +s! { + pub struct hostent { + pub h_name: *mut c_char, + pub h_aliases: *mut *mut c_char, + pub h_addrtype: u16, + pub h_length: u16, + pub h_addr_list: *mut *mut c_char, + } + + pub struct sockaddr { + pub sa_family: crate::sa_family_t, + pub sa_data: [c_char; 26usize], + } + + pub struct sockaddr_storage { + pub ss_family: crate::sa_family_t, + pub __ss_padding: [c_char; 26usize], + } + + pub struct sockaddr_in { + pub sin_family: crate::sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [c_char; 8], + } + + pub struct sockaddr_in6 { + pub sin6_family: crate::sa_family_t, + pub sin6_port: crate::in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: u32, + } + + pub struct sockaddr_un { + pub sun_len: c_uchar, + pub sun_family: crate::sa_family_t, + pub sun_path: [c_char; 104usize], + } + + pub struct sched_param { + pub sched_priority: c_int, + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_atim: crate::timespec, + pub st_mtim: crate::timespec, + pub st_ctim: crate::timespec, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_spare4: [c_long; 2usize], + } +} + +pub const SIGEV_NONE: c_int = 1; +pub const SIGEV_SIGNAL: c_int = 2; +pub const SIGEV_THREAD: c_int = 3; +pub const SA_NOCLDSTOP: c_int = 1; +pub const MINSIGSTKSZ: c_int = 2048; +pub const SIGSTKSZ: c_int = 8192; +pub const SS_ONSTACK: c_int = 1; +pub const SS_DISABLE: c_int = 2; +pub const SIG_SETMASK: c_int = 0; +pub const SIG_BLOCK: c_int = 1; +pub const SIG_UNBLOCK: c_int = 2; +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGTRAP: c_int = 5; +pub const SIGABRT: c_int = 6; +pub const SIGEMT: c_int = 7; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGBUS: c_int = 10; +pub const SIGSEGV: c_int = 11; +pub const SIGSYS: c_int = 12; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; +pub const SIGURG: c_int = 16; +pub const SIGSTOP: c_int = 17; +pub const SIGTSTP: c_int = 18; +pub const SIGCONT: c_int = 19; +pub const SIGCHLD: c_int = 20; +pub const SIGCLD: c_int = 20; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGIO: c_int = 23; +pub const SIGPOLL: c_int = 23; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGLOST: c_int = 29; +pub const SIGUSR1: c_int = 30; +pub const SIGUSR2: c_int = 31; +pub const NSIG: c_int = 32; +pub const CLOCK_ENABLED: c_uint = 1; +pub const CLOCK_DISABLED: c_uint = 0; +pub const CLOCK_ALLOWED: c_uint = 1; +pub const CLOCK_DISALLOWED: c_uint = 0; +pub const TIMER_ABSTIME: c_uint = 4; +pub const SOL_SOCKET: c_int = 65535; +pub const MSG_OOB: c_int = 1; +pub const MSG_PEEK: c_int = 2; +pub const MSG_DONTWAIT: c_int = 4; +pub const MSG_DONTROUTE: c_int = 0; +pub const MSG_WAITALL: c_int = 0; +pub const MSG_MORE: c_int = 0; +pub const MSG_NOSIGNAL: c_int = 0; +pub const SOL_CONFIG: c_uint = 65534; + +pub const _SC_PAGESIZE: c_int = 8; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 51; + +pub const PTHREAD_STACK_MIN: size_t = 4096; +pub const WNOHANG: c_int = 1; + +pub const POLLIN: c_short = 0x0001; +pub const POLLPRI: c_short = 0x0002; +pub const POLLOUT: c_short = 0x0004; +pub const POLLRDNORM: c_short = 0x0040; +pub const POLLWRNORM: c_short = POLLOUT; +pub const POLLRDBAND: c_short = 0x0080; +pub const POLLWRBAND: c_short = 0x0100; +pub const POLLERR: c_short = 0x0008; +pub const POLLHUP: c_short = 0x0010; +pub const POLLNVAL: c_short = 0x0020; + +pub const EAI_AGAIN: c_int = 2; +pub const EAI_BADFLAGS: c_int = 3; +pub const EAI_FAIL: c_int = 4; +pub const EAI_SERVICE: c_int = 9; +pub const EAI_SYSTEM: c_int = 11; +pub const EAI_BADHINTS: c_int = 12; +pub const EAI_PROTOCOL: c_int = 13; +pub const EAI_OVERFLOW: c_int = 14; +pub const EAI_MAX: c_int = 15; + +pub const AF_UNIX: c_int = 1; +pub const AF_INET6: c_int = 23; + +pub const FIONBIO: c_ulong = 1; + +pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); + +// For pthread get/setschedparam +pub const SCHED_FIFO: c_int = 1; +pub const SCHED_RR: c_int = 2; + +// For getrandom() +pub const GRND_NONBLOCK: c_uint = 0x1; +pub const GRND_RANDOM: c_uint = 0x2; + +// Horizon OS works doesn't or can't hold any of this information +safe_f! { + pub const fn WIFSTOPPED(_status: c_int) -> bool { + false + } + + pub const fn WSTOPSIG(_status: c_int) -> c_int { + 0 + } + + pub const fn WIFCONTINUED(_status: c_int) -> bool { + true + } + + pub const fn WIFSIGNALED(_status: c_int) -> bool { + false + } + + pub const fn WTERMSIG(_status: c_int) -> c_int { + 0 + } + + pub const fn WIFEXITED(_status: c_int) -> bool { + true + } + + pub const fn WEXITSTATUS(_status: c_int) -> c_int { + 0 + } + + pub const fn WCOREDUMP(_status: c_int) -> bool { + false + } +} + +extern "C" { + pub fn pthread_create( + native: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + f: extern "C" fn(_: *mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + + pub fn pthread_attr_getschedparam( + attr: *const crate::pthread_attr_t, + param: *mut sched_param, + ) -> c_int; + + pub fn pthread_attr_setschedparam( + attr: *mut crate::pthread_attr_t, + param: *const sched_param, + ) -> c_int; + + pub fn pthread_attr_getprocessorid_np( + attr: *const crate::pthread_attr_t, + processor_id: *mut c_int, + ) -> c_int; + + pub fn pthread_attr_setprocessorid_np( + attr: *mut crate::pthread_attr_t, + processor_id: c_int, + ) -> c_int; + + pub fn pthread_getschedparam( + native: crate::pthread_t, + policy: *mut c_int, + param: *mut crate::sched_param, + ) -> c_int; + + pub fn pthread_setschedparam( + native: crate::pthread_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + + pub fn pthread_condattr_getclock( + attr: *const crate::pthread_condattr_t, + clock_id: *mut crate::clockid_t, + ) -> c_int; + + pub fn pthread_condattr_setclock( + attr: *mut crate::pthread_condattr_t, + clock_id: crate::clockid_t, + ) -> c_int; + + pub fn pthread_getprocessorid_np() -> c_int; + + pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; + + pub fn gethostid() -> c_long; +} + +pub use crate::unix::newlib::generic::dirent; diff --git a/vendor/libc/src/unix/newlib/mod.rs b/vendor/libc/src/unix/newlib/mod.rs new file mode 100644 index 00000000000000..0193083f4e63b5 --- /dev/null +++ b/vendor/libc/src/unix/newlib/mod.rs @@ -0,0 +1,997 @@ +use crate::prelude::*; + +pub type blkcnt_t = i32; +pub type blksize_t = i32; + +pub type clockid_t = c_ulong; + +cfg_if! { + if #[cfg(any(target_os = "espidf"))] { + pub type dev_t = c_short; + pub type ino_t = c_ushort; + pub type off_t = c_long; + } else if #[cfg(any(target_os = "vita"))] { + pub type dev_t = c_short; + pub type ino_t = c_ushort; + pub type off_t = c_int; + } else { + pub type dev_t = u32; + pub type ino_t = u32; + pub type off_t = i64; + } +} + +pub type fsblkcnt_t = u64; +pub type fsfilcnt_t = u32; +pub type id_t = u32; +pub type key_t = c_int; +pub type loff_t = c_longlong; +pub type mode_t = c_uint; +pub type nfds_t = u32; +pub type nlink_t = c_ushort; +pub type pthread_t = c_ulong; +pub type pthread_key_t = c_uint; +pub type rlim_t = u32; + +cfg_if! { + if #[cfg(target_os = "horizon")] { + pub type sa_family_t = u16; + } else { + pub type sa_family_t = u8; + } +} + +pub type socklen_t = u32; +pub type speed_t = u32; +pub type suseconds_t = i32; +cfg_if! { + if #[cfg(target_os = "espidf")] { + pub type tcflag_t = u16; + } else { + pub type tcflag_t = c_uint; + } +} +pub type useconds_t = u32; + +cfg_if! { + if #[cfg(any( + target_os = "horizon", + all(target_os = "espidf", not(espidf_time32)) + ))] { + pub type time_t = c_longlong; + } else { + pub type time_t = i32; + } +} + +cfg_if! { + if #[cfg(not(target_os = "horizon"))] { + s! { + pub struct hostent { + pub h_name: *mut c_char, + pub h_aliases: *mut *mut c_char, + pub h_addrtype: c_int, + pub h_length: c_int, + pub h_addr_list: *mut *mut c_char, + pub h_addr: *mut c_char, + } + } + } +} + +s! { + // The order of the `ai_addr` field in this struct is crucial + // for converting between the Rust and C types. + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: socklen_t, + + #[cfg(target_os = "espidf")] + pub ai_addr: *mut sockaddr, + + pub ai_canonname: *mut c_char, + + #[cfg(not(any( + target_os = "espidf", + all(target_arch = "powerpc", target_vendor = "nintendo") + )))] + pub ai_addr: *mut sockaddr, + + pub ai_next: *mut addrinfo, + } + + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct linger { + pub l_onoff: c_int, + pub l_linger: c_int, + } + + pub struct in_addr { + pub s_addr: crate::in_addr_t, + } + + pub struct pollfd { + pub fd: c_int, + pub events: c_int, + pub revents: c_int, + } + + pub struct lconv { + pub decimal_point: *mut c_char, + pub thousands_sep: *mut c_char, + pub grouping: *mut c_char, + pub int_curr_symbol: *mut c_char, + pub currency_symbol: *mut c_char, + pub mon_decimal_point: *mut c_char, + pub mon_thousands_sep: *mut c_char, + pub mon_grouping: *mut c_char, + pub positive_sign: *mut c_char, + pub negative_sign: *mut c_char, + pub int_frac_digits: c_char, + pub frac_digits: c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub p_sign_posn: c_char, + pub n_sign_posn: c_char, + pub int_n_cs_precedes: c_char, + pub int_n_sep_by_space: c_char, + pub int_n_sign_posn: c_char, + pub int_p_cs_precedes: c_char, + pub int_p_sep_by_space: c_char, + pub int_p_sign_posn: c_char, + } + + pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: fsblkcnt_t, + pub f_bfree: fsblkcnt_t, + pub f_bavail: fsblkcnt_t, + pub f_files: fsfilcnt_t, + pub f_ffree: fsfilcnt_t, + pub f_favail: fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + } + + pub struct sigaction { + pub sa_handler: extern "C" fn(arg1: c_int), + pub sa_mask: sigset_t, + pub sa_flags: c_int, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: usize, + } + + pub struct fd_set { + // Unverified + fds_bits: [c_ulong; FD_SETSIZE as usize / ULONG_SIZE], + } + + pub struct passwd { + // Unverified + pub pw_name: *mut c_char, + pub pw_passwd: *mut c_char, + pub pw_uid: crate::uid_t, + pub pw_gid: crate::gid_t, + pub pw_gecos: *mut c_char, + pub pw_dir: *mut c_char, + pub pw_shell: *mut c_char, + } + + pub struct termios { + // Unverified + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; crate::NCCS], + #[cfg(target_os = "espidf")] + pub c_ispeed: u32, + #[cfg(target_os = "espidf")] + pub c_ospeed: u32, + } + + pub struct sem_t { + // Unverified + __size: [c_char; 16], + } + + pub struct Dl_info { + // Unverified + pub dli_fname: *const c_char, + pub dli_fbase: *mut c_void, + pub dli_sname: *const c_char, + pub dli_saddr: *mut c_void, + } + + pub struct utsname { + // Unverified + pub sysname: [c_char; 65], + pub nodename: [c_char; 65], + pub release: [c_char; 65], + pub version: [c_char; 65], + pub machine: [c_char; 65], + pub domainname: [c_char; 65], + } + + pub struct cpu_set_t { + // Unverified + bits: [u32; 32], + } + + pub struct pthread_attr_t { + // Unverified + #[cfg(not(target_os = "espidf"))] + __size: [u8; __SIZEOF_PTHREAD_ATTR_T], + #[cfg(target_os = "espidf")] + pub is_initialized: i32, + #[cfg(target_os = "espidf")] + pub stackaddr: *mut c_void, + #[cfg(target_os = "espidf")] + pub stacksize: i32, + #[cfg(target_os = "espidf")] + pub contentionscope: i32, + #[cfg(target_os = "espidf")] + pub inheritsched: i32, + #[cfg(target_os = "espidf")] + pub schedpolicy: i32, + #[cfg(target_os = "espidf")] + pub schedparam: i32, + #[cfg(target_os = "espidf")] + pub detachstate: i32, + } + + pub struct pthread_rwlockattr_t { + // Unverified + __size: [u8; __SIZEOF_PTHREAD_RWLOCKATTR_T], + } + + #[cfg_attr( + all( + target_pointer_width = "32", + any(target_arch = "mips", target_arch = "arm", target_arch = "powerpc") + ), + repr(align(4)) + )] + #[cfg_attr( + any( + target_pointer_width = "64", + not(any(target_arch = "mips", target_arch = "arm", target_arch = "powerpc")) + ), + repr(align(8)) + )] + pub struct pthread_mutex_t { + // Unverified + size: [u8; crate::__SIZEOF_PTHREAD_MUTEX_T], + } + + #[cfg_attr( + all( + target_pointer_width = "32", + any(target_arch = "mips", target_arch = "arm", target_arch = "powerpc") + ), + repr(align(4)) + )] + #[cfg_attr( + any( + target_pointer_width = "64", + not(any(target_arch = "mips", target_arch = "arm", target_arch = "powerpc")) + ), + repr(align(8)) + )] + pub struct pthread_rwlock_t { + // Unverified + size: [u8; crate::__SIZEOF_PTHREAD_RWLOCK_T], + } + + #[cfg_attr( + any( + target_pointer_width = "32", + target_arch = "x86_64", + target_arch = "powerpc64", + target_arch = "mips64", + target_arch = "s390x", + target_arch = "sparc64" + ), + repr(align(4)) + )] + #[cfg_attr( + not(any( + target_pointer_width = "32", + target_arch = "x86_64", + target_arch = "powerpc64", + target_arch = "mips64", + target_arch = "s390x", + target_arch = "sparc64" + )), + repr(align(8)) + )] + pub struct pthread_mutexattr_t { + // Unverified + size: [u8; crate::__SIZEOF_PTHREAD_MUTEXATTR_T], + } + + #[repr(align(8))] + pub struct pthread_cond_t { + // Unverified + size: [u8; crate::__SIZEOF_PTHREAD_COND_T], + } + + #[repr(align(4))] + pub struct pthread_condattr_t { + // Unverified + size: [u8; crate::__SIZEOF_PTHREAD_CONDATTR_T], + } +} + +// unverified constants +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + size: [__PTHREAD_INITIALIZER_BYTE; __SIZEOF_PTHREAD_MUTEX_T], +}; +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + size: [__PTHREAD_INITIALIZER_BYTE; __SIZEOF_PTHREAD_COND_T], +}; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + size: [__PTHREAD_INITIALIZER_BYTE; __SIZEOF_PTHREAD_RWLOCK_T], +}; + +cfg_if! { + if #[cfg(target_os = "espidf")] { + pub const NCCS: usize = 11; + } else { + pub const NCCS: usize = 32; + } +} + +cfg_if! { + if #[cfg(target_os = "espidf")] { + const __PTHREAD_INITIALIZER_BYTE: u8 = 0xff; + pub const __SIZEOF_PTHREAD_ATTR_T: usize = 32; + pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 4; + pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 12; + pub const __SIZEOF_PTHREAD_COND_T: usize = 4; + pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 8; + pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 4; + pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 12; + pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; + } else if #[cfg(target_os = "vita")] { + const __PTHREAD_INITIALIZER_BYTE: u8 = 0xff; + pub const __SIZEOF_PTHREAD_ATTR_T: usize = 4; + pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 4; + pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; + pub const __SIZEOF_PTHREAD_COND_T: usize = 4; + pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; + pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 4; + pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 4; + pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 4; + } else if #[cfg(target_os = "rtems")] { + const __PTHREAD_INITIALIZER_BYTE: u8 = 0x00; + pub const __SIZEOF_PTHREAD_ATTR_T: usize = 96; + pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 64; + pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 24; + pub const __SIZEOF_PTHREAD_COND_T: usize = 28; + pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 24; + pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; + pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; + pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; + } else { + const __PTHREAD_INITIALIZER_BYTE: u8 = 0; + pub const __SIZEOF_PTHREAD_ATTR_T: usize = 56; + pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; + pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; + pub const __SIZEOF_PTHREAD_COND_T: usize = 48; + pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; + pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; + pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; + pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; + } +} + +pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; +pub const __PTHREAD_MUTEX_HAVE_PREV: usize = 1; +pub const __PTHREAD_RWLOCK_INT_FLAGS_SHARED: usize = 1; +pub const PTHREAD_MUTEX_NORMAL: c_int = 0; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; + +cfg_if! { + if #[cfg(any(target_os = "horizon", target_os = "espidf"))] { + pub const FD_SETSIZE: usize = 64; + } else if #[cfg(target_os = "vita")] { + pub const FD_SETSIZE: usize = 256; + } else { + pub const FD_SETSIZE: usize = 1024; + } +} +// intentionally not public, only used for fd_set +const ULONG_SIZE: usize = 32; + +// Other constants +pub const EPERM: c_int = 1; +pub const ENOENT: c_int = 2; +pub const ESRCH: c_int = 3; +pub const EINTR: c_int = 4; +pub const EIO: c_int = 5; +pub const ENXIO: c_int = 6; +pub const E2BIG: c_int = 7; +pub const ENOEXEC: c_int = 8; +pub const EBADF: c_int = 9; +pub const ECHILD: c_int = 10; +pub const EAGAIN: c_int = 11; +pub const ENOMEM: c_int = 12; +pub const EACCES: c_int = 13; +pub const EFAULT: c_int = 14; +pub const EBUSY: c_int = 16; +pub const EEXIST: c_int = 17; +pub const EXDEV: c_int = 18; +pub const ENODEV: c_int = 19; +pub const ENOTDIR: c_int = 20; +pub const EISDIR: c_int = 21; +pub const EINVAL: c_int = 22; +pub const ENFILE: c_int = 23; +pub const EMFILE: c_int = 24; +pub const ENOTTY: c_int = 25; +pub const ETXTBSY: c_int = 26; +pub const EFBIG: c_int = 27; +pub const ENOSPC: c_int = 28; +pub const ESPIPE: c_int = 29; +pub const EROFS: c_int = 30; +pub const EMLINK: c_int = 31; +pub const EPIPE: c_int = 32; +pub const EDOM: c_int = 33; +pub const ERANGE: c_int = 34; +pub const ENOMSG: c_int = 35; +pub const EIDRM: c_int = 36; +pub const EDEADLK: c_int = 45; +pub const ENOLCK: c_int = 46; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENOLINK: c_int = 67; +pub const EPROTO: c_int = 71; +pub const EMULTIHOP: c_int = 74; +pub const EBADMSG: c_int = 77; +pub const EFTYPE: c_int = 79; +pub const ENOSYS: c_int = 88; +pub const ENOTEMPTY: c_int = 90; +pub const ENAMETOOLONG: c_int = 91; +pub const ELOOP: c_int = 92; +pub const EOPNOTSUPP: c_int = 95; +pub const EPFNOSUPPORT: c_int = 96; +pub const ECONNRESET: c_int = 104; +pub const ENOBUFS: c_int = 105; +pub const EAFNOSUPPORT: c_int = 106; +pub const EPROTOTYPE: c_int = 107; +pub const ENOTSOCK: c_int = 108; +pub const ENOPROTOOPT: c_int = 109; +pub const ECONNREFUSED: c_int = 111; +pub const EADDRINUSE: c_int = 112; +pub const ECONNABORTED: c_int = 113; +pub const ENETUNREACH: c_int = 114; +pub const ENETDOWN: c_int = 115; +pub const ETIMEDOUT: c_int = 116; +pub const EHOSTDOWN: c_int = 117; +pub const EHOSTUNREACH: c_int = 118; +pub const EINPROGRESS: c_int = 119; +pub const EALREADY: c_int = 120; +pub const EDESTADDRREQ: c_int = 121; +pub const EMSGSIZE: c_int = 122; +pub const EPROTONOSUPPORT: c_int = 123; +pub const EADDRNOTAVAIL: c_int = 125; +pub const ENETRESET: c_int = 126; +pub const EISCONN: c_int = 127; +pub const ENOTCONN: c_int = 128; +pub const ETOOMANYREFS: c_int = 129; +pub const EDQUOT: c_int = 132; +pub const ESTALE: c_int = 133; +pub const ENOTSUP: c_int = 134; +pub const EILSEQ: c_int = 138; +pub const EOVERFLOW: c_int = 139; +pub const ECANCELED: c_int = 140; +pub const ENOTRECOVERABLE: c_int = 141; +pub const EOWNERDEAD: c_int = 142; +pub const EWOULDBLOCK: c_int = 11; + +pub const F_DUPFD: c_int = 0; +pub const F_GETFD: c_int = 1; +pub const F_SETFD: c_int = 2; +pub const F_GETFL: c_int = 3; +pub const F_SETFL: c_int = 4; +pub const F_GETOWN: c_int = 5; +pub const F_SETOWN: c_int = 6; +pub const F_GETLK: c_int = 7; +pub const F_SETLK: c_int = 8; +pub const F_SETLKW: c_int = 9; +pub const F_RGETLK: c_int = 10; +pub const F_RSETLK: c_int = 11; +pub const F_CNVT: c_int = 12; +pub const F_RSETLKW: c_int = 13; +pub const F_DUPFD_CLOEXEC: c_int = 14; + +pub const O_RDONLY: c_int = 0; +pub const O_WRONLY: c_int = 1; +pub const O_RDWR: c_int = 2; +pub const O_APPEND: c_int = 8; +pub const O_CREAT: c_int = 512; +pub const O_TRUNC: c_int = 1024; +pub const O_EXCL: c_int = 2048; +pub const O_SYNC: c_int = 8192; +pub const O_NONBLOCK: c_int = 16384; + +pub const O_ACCMODE: c_int = 3; +cfg_if! { + if #[cfg(target_os = "espidf")] { + pub const O_CLOEXEC: c_int = 0x40000; + } else { + pub const O_CLOEXEC: c_int = 0x80000; + } +} + +pub const RTLD_LAZY: c_int = 0x1; + +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; + +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; + +pub const FIOCLEX: c_ulong = 0x20006601; +pub const FIONCLEX: c_ulong = 0x20006602; + +pub const S_BLKSIZE: mode_t = 1024; +pub const S_IREAD: mode_t = 0o0400; +pub const S_IWRITE: mode_t = 0o0200; +pub const S_IEXEC: mode_t = 0o0100; +pub const S_ENFMT: mode_t = 0o2000; +pub const S_IFMT: mode_t = 0o17_0000; +pub const S_IFDIR: mode_t = 0o4_0000; +pub const S_IFCHR: mode_t = 0o2_0000; +pub const S_IFBLK: mode_t = 0o6_0000; +pub const S_IFREG: mode_t = 0o10_0000; +pub const S_IFLNK: mode_t = 0o12_0000; +pub const S_IFSOCK: mode_t = 0o14_0000; +pub const S_IFIFO: mode_t = 0o1_0000; +pub const S_IRUSR: mode_t = 0o0400; +pub const S_IWUSR: mode_t = 0o0200; +pub const S_IXUSR: mode_t = 0o0100; +pub const S_IRGRP: mode_t = 0o0040; +pub const S_IWGRP: mode_t = 0o0020; +pub const S_IXGRP: mode_t = 0o0010; +pub const S_IROTH: mode_t = 0o0004; +pub const S_IWOTH: mode_t = 0o0002; +pub const S_IXOTH: mode_t = 0o0001; + +pub const SOL_TCP: c_int = 6; + +pub const PF_UNSPEC: c_int = 0; +pub const PF_INET: c_int = 2; +cfg_if! { + if #[cfg(target_os = "espidf")] { + pub const PF_INET6: c_int = 10; + } else { + pub const PF_INET6: c_int = 23; + } +} + +pub const AF_UNSPEC: c_int = 0; +pub const AF_INET: c_int = 2; + +pub const CLOCK_REALTIME: crate::clockid_t = 1; +pub const CLOCK_MONOTONIC: crate::clockid_t = 4; +pub const CLOCK_BOOTTIME: crate::clockid_t = 4; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; + +pub const SHUT_RD: c_int = 0; +pub const SHUT_WR: c_int = 1; +pub const SHUT_RDWR: c_int = 2; + +pub const SO_BINTIME: c_int = 0x2000; +pub const SO_NO_OFFLOAD: c_int = 0x4000; +pub const SO_NO_DDP: c_int = 0x8000; +pub const SO_REUSEPORT_LB: c_int = 0x10000; +pub const SO_LABEL: c_int = 0x1009; +pub const SO_PEERLABEL: c_int = 0x1010; +pub const SO_LISTENQLIMIT: c_int = 0x1011; +pub const SO_LISTENQLEN: c_int = 0x1012; +pub const SO_LISTENINCQLEN: c_int = 0x1013; +pub const SO_SETFIB: c_int = 0x1014; +pub const SO_USER_COOKIE: c_int = 0x1015; +pub const SO_PROTOCOL: c_int = 0x1016; +pub const SO_PROTOTYPE: c_int = SO_PROTOCOL; +pub const SO_VENDOR: c_int = 0x80000000; +pub const SO_DEBUG: c_int = 0x01; +pub const SO_ACCEPTCONN: c_int = 0x0002; +pub const SO_REUSEADDR: c_int = 0x0004; +pub const SO_KEEPALIVE: c_int = 0x0008; +pub const SO_DONTROUTE: c_int = 0x0010; +pub const SO_BROADCAST: c_int = 0x0020; +pub const SO_USELOOPBACK: c_int = 0x0040; +pub const SO_LINGER: c_int = 0x0080; +pub const SO_OOBINLINE: c_int = 0x0100; +pub const SO_REUSEPORT: c_int = 0x0200; +pub const SO_TIMESTAMP: c_int = 0x0400; +pub const SO_NOSIGPIPE: c_int = 0x0800; +pub const SO_ACCEPTFILTER: c_int = 0x1000; +pub const SO_SNDBUF: c_int = 0x1001; +pub const SO_RCVBUF: c_int = 0x1002; +pub const SO_SNDLOWAT: c_int = 0x1003; +pub const SO_RCVLOWAT: c_int = 0x1004; +pub const SO_SNDTIMEO: c_int = 0x1005; +pub const SO_RCVTIMEO: c_int = 0x1006; +cfg_if! { + if #[cfg(target_os = "horizon")] { + pub const SO_ERROR: c_int = 0x1009; + } else { + pub const SO_ERROR: c_int = 0x1007; + } +} +pub const SO_TYPE: c_int = 0x1008; + +pub const SOCK_CLOEXEC: c_int = O_CLOEXEC; + +pub const INET_ADDRSTRLEN: c_int = 16; + +// https://github.com/bminor/newlib/blob/HEAD/newlib/libc/sys/linux/include/net/if.h#L121 +pub const IFF_UP: c_int = 0x1; // interface is up +pub const IFF_BROADCAST: c_int = 0x2; // broadcast address valid +pub const IFF_DEBUG: c_int = 0x4; // turn on debugging +pub const IFF_LOOPBACK: c_int = 0x8; // is a loopback net +pub const IFF_POINTOPOINT: c_int = 0x10; // interface is point-to-point link +pub const IFF_NOTRAILERS: c_int = 0x20; // avoid use of trailers +pub const IFF_RUNNING: c_int = 0x40; // resources allocated +pub const IFF_NOARP: c_int = 0x80; // no address resolution protocol +pub const IFF_PROMISC: c_int = 0x100; // receive all packets +pub const IFF_ALLMULTI: c_int = 0x200; // receive all multicast packets +pub const IFF_OACTIVE: c_int = 0x400; // transmission in progress +pub const IFF_SIMPLEX: c_int = 0x800; // can't hear own transmissions +pub const IFF_LINK0: c_int = 0x1000; // per link layer defined bit +pub const IFF_LINK1: c_int = 0x2000; // per link layer defined bit +pub const IFF_LINK2: c_int = 0x4000; // per link layer defined bit +pub const IFF_ALTPHYS: c_int = IFF_LINK2; // use alternate physical connection +pub const IFF_MULTICAST: c_int = 0x8000; // supports multicast + +cfg_if! { + if #[cfg(target_os = "vita")] { + pub const TCP_NODELAY: c_int = 1; + pub const TCP_MAXSEG: c_int = 2; + } else if #[cfg(target_os = "espidf")] { + pub const TCP_NODELAY: c_int = 1; + pub const TCP_MAXSEG: c_int = 8194; + } else { + pub const TCP_NODELAY: c_int = 8193; + pub const TCP_MAXSEG: c_int = 8194; + } +} + +pub const TCP_NOPUSH: c_int = 4; +pub const TCP_NOOPT: c_int = 8; +cfg_if! { + if #[cfg(target_os = "espidf")] { + pub const TCP_KEEPIDLE: c_int = 3; + pub const TCP_KEEPINTVL: c_int = 4; + pub const TCP_KEEPCNT: c_int = 5; + } else { + pub const TCP_KEEPIDLE: c_int = 256; + pub const TCP_KEEPINTVL: c_int = 512; + pub const TCP_KEEPCNT: c_int = 1024; + } +} + +cfg_if! { + if #[cfg(target_os = "horizon")] { + pub const IP_TOS: c_int = 7; + } else if #[cfg(target_os = "espidf")] { + pub const IP_TOS: c_int = 1; + } else { + pub const IP_TOS: c_int = 3; + } +} +cfg_if! { + if #[cfg(target_os = "vita")] { + pub const IP_TTL: c_int = 4; + } else if #[cfg(target_os = "espidf")] { + pub const IP_TTL: c_int = 2; + } else { + pub const IP_TTL: c_int = 8; + } +} + +cfg_if! { + if #[cfg(target_os = "espidf")] { + pub const IP_MULTICAST_IF: c_int = 6; + pub const IP_MULTICAST_TTL: c_int = 5; + pub const IP_MULTICAST_LOOP: c_int = 7; + } else { + pub const IP_MULTICAST_IF: c_int = 9; + pub const IP_MULTICAST_TTL: c_int = 10; + pub const IP_MULTICAST_LOOP: c_int = 11; + } +} + +cfg_if! { + if #[cfg(target_os = "vita")] { + pub const IP_ADD_MEMBERSHIP: c_int = 12; + pub const IP_DROP_MEMBERSHIP: c_int = 13; + } else if #[cfg(target_os = "espidf")] { + pub const IP_ADD_MEMBERSHIP: c_int = 3; + pub const IP_DROP_MEMBERSHIP: c_int = 4; + } else { + pub const IP_ADD_MEMBERSHIP: c_int = 11; + pub const IP_DROP_MEMBERSHIP: c_int = 12; + } +} +pub const IPV6_UNICAST_HOPS: c_int = 4; +cfg_if! { + if #[cfg(target_os = "espidf")] { + pub const IPV6_MULTICAST_IF: c_int = 768; + pub const IPV6_MULTICAST_HOPS: c_int = 769; + pub const IPV6_MULTICAST_LOOP: c_int = 770; + } else { + pub const IPV6_MULTICAST_IF: c_int = 9; + pub const IPV6_MULTICAST_HOPS: c_int = 10; + pub const IPV6_MULTICAST_LOOP: c_int = 11; + } +} +pub const IPV6_V6ONLY: c_int = 27; +pub const IPV6_JOIN_GROUP: c_int = 12; +pub const IPV6_LEAVE_GROUP: c_int = 13; +pub const IPV6_ADD_MEMBERSHIP: c_int = 12; +pub const IPV6_DROP_MEMBERSHIP: c_int = 13; + +cfg_if! { + if #[cfg(target_os = "espidf")] { + pub const HOST_NOT_FOUND: c_int = 210; + pub const NO_DATA: c_int = 211; + pub const NO_RECOVERY: c_int = 212; + pub const TRY_AGAIN: c_int = 213; + } else { + pub const HOST_NOT_FOUND: c_int = 1; + pub const NO_DATA: c_int = 2; + pub const NO_RECOVERY: c_int = 3; + pub const TRY_AGAIN: c_int = 4; + } +} +pub const NO_ADDRESS: c_int = 2; + +pub const AI_PASSIVE: c_int = 1; +pub const AI_CANONNAME: c_int = 2; +pub const AI_NUMERICHOST: c_int = 4; +cfg_if! { + if #[cfg(target_os = "espidf")] { + pub const AI_NUMERICSERV: c_int = 8; + pub const AI_ADDRCONFIG: c_int = 64; + } else { + pub const AI_NUMERICSERV: c_int = 0; + pub const AI_ADDRCONFIG: c_int = 0; + } +} + +pub const NI_MAXHOST: c_int = 1025; +pub const NI_MAXSERV: c_int = 32; +pub const NI_NOFQDN: c_int = 1; +pub const NI_NUMERICHOST: c_int = 2; +pub const NI_NAMEREQD: c_int = 4; +cfg_if! { + if #[cfg(target_os = "espidf")] { + pub const NI_NUMERICSERV: c_int = 8; + pub const NI_DGRAM: c_int = 16; + } else { + pub const NI_NUMERICSERV: c_int = 0; + pub const NI_DGRAM: c_int = 0; + } +} + +cfg_if! { + // Defined in vita/mod.rs for "vita" + if #[cfg(target_os = "espidf")] { + pub const EAI_FAMILY: c_int = 204; + pub const EAI_MEMORY: c_int = 203; + pub const EAI_NONAME: c_int = 200; + pub const EAI_SOCKTYPE: c_int = 10; + } else if #[cfg(not(target_os = "vita"))] { + pub const EAI_FAMILY: c_int = -303; + pub const EAI_MEMORY: c_int = -304; + pub const EAI_NONAME: c_int = -305; + pub const EAI_SOCKTYPE: c_int = -307; + } +} + +pub const EXIT_SUCCESS: c_int = 0; +pub const EXIT_FAILURE: c_int = 1; + +pub const PRIO_PROCESS: c_int = 0; +pub const PRIO_PGRP: c_int = 1; +pub const PRIO_USER: c_int = 2; + +f! { + pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { + let bits = size_of_val(&(*set).fds_bits[0]) * 8; + let fd = fd as usize; + (*set).fds_bits[fd / bits] &= !(1 << (fd % bits)); + return; + } + + pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { + let bits = size_of_val(&(*set).fds_bits[0]) * 8; + let fd = fd as usize; + return ((*set).fds_bits[fd / bits] & (1 << (fd % bits))) != 0; + } + + pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { + let bits = size_of_val(&(*set).fds_bits[0]) * 8; + let fd = fd as usize; + (*set).fds_bits[fd / bits] |= 1 << (fd % bits); + return; + } + + pub fn FD_ZERO(set: *mut fd_set) -> () { + for slot in (*set).fds_bits.iter_mut() { + *slot = 0; + } + } +} + +extern "C" { + pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; + pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; + + #[cfg_attr(target_os = "linux", link_name = "__xpg_strerror_r")] + pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + + pub fn sem_destroy(sem: *mut sem_t) -> c_int; + pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; + + pub fn abs(i: c_int) -> c_int; + pub fn labs(i: c_long) -> c_long; + pub fn rand() -> c_int; + pub fn srand(seed: c_uint); + + #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] + #[cfg_attr(target_os = "espidf", link_name = "lwip_bind")] + pub fn bind(fd: c_int, addr: *const sockaddr, len: socklen_t) -> c_int; + pub fn clock_settime(clock_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; + pub fn clock_gettime(clock_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn clock_getres(clock_id: crate::clockid_t, res: *mut crate::timespec) -> c_int; + #[cfg_attr(target_os = "espidf", link_name = "lwip_close")] + pub fn closesocket(sockfd: c_int) -> c_int; + pub fn ioctl(fd: c_int, request: c_ulong, ...) -> c_int; + #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] + #[cfg_attr(target_os = "espidf", link_name = "lwip_recvfrom")] + pub fn recvfrom( + fd: c_int, + buf: *mut c_void, + n: usize, + flags: c_int, + addr: *mut sockaddr, + addr_len: *mut socklen_t, + ) -> isize; + #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] + pub fn getnameinfo( + sa: *const sockaddr, + salen: socklen_t, + host: *mut c_char, + hostlen: socklen_t, + serv: *mut c_char, + servlen: socklen_t, + flags: c_int, + ) -> c_int; + pub fn memalign(align: size_t, size: size_t) -> *mut c_void; + + // DIFF(main): changed to `*const *mut` in e77f551de9 + pub fn fexecve(fd: c_int, argv: *const *const c_char, envp: *const *const c_char) -> c_int; + + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; + pub fn getgrgid_r( + gid: crate::gid_t, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; + pub fn sem_close(sem: *mut sem_t) -> c_int; + pub fn getdtablesize() -> c_int; + pub fn getgrnam_r( + name: *const c_char, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; + pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; + pub fn getgrnam(name: *const c_char) -> *mut crate::group; + pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; + pub fn sem_unlink(name: *const c_char) -> c_int; + pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; + pub fn getpwnam_r( + name: *const c_char, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + pub fn getpwuid_r( + uid: crate::uid_t, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; + pub fn pthread_atfork( + prepare: Option, + parent: Option, + child: Option, + ) -> c_int; + pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; + pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; + pub fn uname(buf: *mut crate::utsname) -> c_int; +} + +mod generic; + +cfg_if! { + if #[cfg(target_os = "espidf")] { + mod espidf; + pub use self::espidf::*; + } else if #[cfg(target_os = "horizon")] { + mod horizon; + pub use self::horizon::*; + } else if #[cfg(target_os = "vita")] { + mod vita; + pub use self::vita::*; + } else if #[cfg(target_arch = "arm")] { + mod arm; + pub use self::arm::*; + } else if #[cfg(target_arch = "aarch64")] { + mod aarch64; + pub use self::aarch64::*; + } else if #[cfg(target_arch = "powerpc")] { + mod powerpc; + pub use self::powerpc::*; + } else { + // Only tested on ARM so far. Other platforms might have different + // definitions for types and constants. + pub use target_arch_not_implemented; + } +} + +cfg_if! { + if #[cfg(target_os = "rtems")] { + mod rtems; + pub use self::rtems::*; + } +} diff --git a/vendor/libc/src/unix/newlib/powerpc/mod.rs b/vendor/libc/src/unix/newlib/powerpc/mod.rs new file mode 100644 index 00000000000000..c4d4a2ed07c5ee --- /dev/null +++ b/vendor/libc/src/unix/newlib/powerpc/mod.rs @@ -0,0 +1,14 @@ +use crate::prelude::*; + +pub type clock_t = c_ulong; +pub type wchar_t = c_int; + +pub use crate::unix::newlib::generic::{dirent, sigset_t, stat}; + +// the newlib shipped with devkitPPC does not support the following components: +// - sockaddr +// - AF_INET6 +// - FIONBIO +// - POLL* +// - SOL_SOCKET +// - MSG_* diff --git a/vendor/libc/src/unix/newlib/rtems/mod.rs b/vendor/libc/src/unix/newlib/rtems/mod.rs new file mode 100644 index 00000000000000..0e23352744149b --- /dev/null +++ b/vendor/libc/src/unix/newlib/rtems/mod.rs @@ -0,0 +1,146 @@ +// defined in architecture specific module + +use crate::prelude::*; + +s! { + pub struct sockaddr_un { + pub sun_family: crate::sa_family_t, + pub sun_path: [c_char; 108usize], + } +} + +pub const AF_UNIX: c_int = 1; + +pub const RTLD_DEFAULT: *mut c_void = -2isize as *mut c_void; + +pub const UTIME_OMIT: c_long = -1; +pub const AT_FDCWD: c_int = -2; + +pub const O_DIRECTORY: c_int = 0x200000; +pub const O_NOFOLLOW: c_int = 0x100000; + +pub const AT_EACCESS: c_int = 1; +pub const AT_SYMLINK_NOFOLLOW: c_int = 2; +pub const AT_SYMLINK_FOLLOW: c_int = 4; +pub const AT_REMOVEDIR: c_int = 8; + +// signal.h +pub const SIG_BLOCK: c_int = 1; +pub const SIG_UNBLOCK: c_int = 2; +pub const SIG_SETMASK: c_int = 0; +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGTRAP: c_int = 5; +pub const SIGABRT: c_int = 6; +pub const SIGEMT: c_int = 7; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGBUS: c_int = 10; +pub const SIGSEGV: c_int = 11; +pub const SIGSYS: c_int = 12; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; +pub const SIGURG: c_int = 16; +pub const SIGSTOP: c_int = 17; +pub const SIGTSTP: c_int = 18; +pub const SIGCONT: c_int = 19; +pub const SIGCHLD: c_int = 20; +pub const SIGCLD: c_int = 20; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGIO: c_int = 23; +pub const SIGWINCH: c_int = 24; +pub const SIGUSR1: c_int = 25; +pub const SIGUSR2: c_int = 26; +pub const SIGRTMIN: c_int = 27; +pub const SIGRTMAX: c_int = 31; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; + +pub const SA_NOCLDSTOP: c_ulong = 0x00000001; +pub const SA_SIGINFO: c_ulong = 0x00000002; +pub const SA_ONSTACK: c_ulong = 0x00000004; + +pub const EAI_AGAIN: c_int = 2; +pub const EAI_BADFLAGS: c_int = 3; +pub const EAI_FAIL: c_int = 4; +pub const EAI_SERVICE: c_int = 9; +pub const EAI_SYSTEM: c_int = 11; +pub const EAI_OVERFLOW: c_int = 14; + +pub const _SC_PAGESIZE: c_int = 8; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 51; +pub const PTHREAD_STACK_MIN: size_t = 0; + +// sys/wait.h +pub const WNOHANG: c_int = 1; +pub const WUNTRACED: c_int = 2; + +// sys/socket.h +pub const SOMAXCONN: c_int = 128; + +safe_f! { + pub const fn WIFSTOPPED(status: c_int) -> bool { + (status & 0xff) == 0x7f + } + + pub const fn WSTOPSIG(status: c_int) -> c_int { + // (status >> 8) & 0xff + WEXITSTATUS(status) + } + + pub const fn WIFSIGNALED(status: c_int) -> bool { + ((status & 0x7f) > 0) && ((status & 0x7f) < 0x7f) + } + + pub const fn WTERMSIG(status: c_int) -> c_int { + status & 0x7f + } + + pub const fn WIFEXITED(status: c_int) -> bool { + (status & 0xff) == 0 + } + + pub const fn WEXITSTATUS(status: c_int) -> c_int { + (status >> 8) & 0xff + } + + // RTEMS doesn't have native WIFCONTINUED. + pub const fn WIFCONTINUED(_status: c_int) -> bool { + true + } + + // RTEMS doesn't have native WCOREDUMP. + pub const fn WCOREDUMP(_status: c_int) -> bool { + false + } +} + +extern "C" { + pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; + pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + + pub fn pthread_create( + native: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + f: extern "C" fn(_: *mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + + pub fn pthread_condattr_setclock( + attr: *mut crate::pthread_condattr_t, + clock_id: crate::clockid_t, + ) -> c_int; + + pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; + + pub fn arc4random_buf(buf: *mut core::ffi::c_void, nbytes: size_t); + + pub fn setgroups(ngroups: c_int, grouplist: *const crate::gid_t) -> c_int; +} diff --git a/vendor/libc/src/unix/newlib/vita/mod.rs b/vendor/libc/src/unix/newlib/vita/mod.rs new file mode 100644 index 00000000000000..62cd300e1d6f0f --- /dev/null +++ b/vendor/libc/src/unix/newlib/vita/mod.rs @@ -0,0 +1,235 @@ +use crate::off_t; +use crate::prelude::*; + +pub type clock_t = c_long; + +pub type wchar_t = u32; + +pub type sigset_t = c_ulong; + +s! { + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: c_int, + pub msg_control: *mut c_void, + pub msg_controllen: crate::socklen_t, + pub msg_flags: c_int, + } + + pub struct sockaddr { + pub sa_len: u8, + pub sa_family: crate::sa_family_t, + pub sa_data: [c_char; 14], + } + + pub struct sockaddr_in6 { + pub sin6_len: u8, + pub sin6_family: crate::sa_family_t, + pub sin6_port: crate::in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_vport: crate::in_port_t, + pub sin6_scope_id: u32, + } + + pub struct sockaddr_in { + pub sin_len: u8, + pub sin_family: crate::sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_vport: crate::in_port_t, + pub sin_zero: [u8; 6], + } + + pub struct sockaddr_un { + pub ss_len: u8, + pub sun_family: crate::sa_family_t, + pub sun_path: [c_char; 108usize], + } + + pub struct sockaddr_storage { + pub ss_len: u8, + pub ss_family: crate::sa_family_t, + pub __ss_pad1: [u8; 2], + pub __ss_align: i64, + pub __ss_pad2: [u8; 116], + } + + pub struct sched_param { + pub sched_priority: c_int, + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: crate::mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_atime: crate::time_t, + pub st_mtime: crate::time_t, + pub st_ctime: crate::time_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_spare4: [c_long; 2usize], + } + + #[repr(align(8))] + pub struct dirent { + __offset: [u8; 88], + pub d_name: [c_char; 256usize], + __pad: [u8; 8], + } +} + +pub const AF_UNIX: c_int = 1; +pub const AF_INET6: c_int = 24; + +pub const SOCK_RAW: c_int = 3; +pub const SOCK_RDM: c_int = 4; +pub const SOCK_SEQPACKET: c_int = 5; + +pub const SOMAXCONN: c_int = 128; + +pub const FIONBIO: c_ulong = 1; + +pub const POLLIN: c_short = 0x0001; +pub const POLLPRI: c_short = POLLIN; +pub const POLLOUT: c_short = 0x0004; +pub const POLLRDNORM: c_short = POLLIN; +pub const POLLRDBAND: c_short = POLLIN; +pub const POLLWRNORM: c_short = POLLOUT; +pub const POLLWRBAND: c_short = POLLOUT; +pub const POLLERR: c_short = 0x0008; +pub const POLLHUP: c_short = 0x0010; +pub const POLLNVAL: c_short = 0x0020; + +pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); + +pub const SOL_SOCKET: c_int = 0xffff; +pub const SO_NONBLOCK: c_int = 0x1100; + +pub const MSG_OOB: c_int = 0x1; +pub const MSG_PEEK: c_int = 0x2; +pub const MSG_DONTROUTE: c_int = 0x4; +pub const MSG_EOR: c_int = 0x8; +pub const MSG_TRUNC: c_int = 0x10; +pub const MSG_CTRUNC: c_int = 0x20; +pub const MSG_WAITALL: c_int = 0x40; +pub const MSG_DONTWAIT: c_int = 0x80; +pub const MSG_BCAST: c_int = 0x100; +pub const MSG_MCAST: c_int = 0x200; + +pub const UTIME_OMIT: c_long = -1; +pub const AT_FDCWD: c_int = -2; + +pub const O_DIRECTORY: c_int = 0x200000; +pub const O_NOFOLLOW: c_int = 0x100000; + +pub const AT_EACCESS: c_int = 1; +pub const AT_SYMLINK_NOFOLLOW: c_int = 2; +pub const AT_SYMLINK_FOLLOW: c_int = 4; +pub const AT_REMOVEDIR: c_int = 8; + +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGTRAP: c_int = 5; +pub const SIGABRT: c_int = 6; +pub const SIGEMT: c_int = 7; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGBUS: c_int = 10; +pub const SIGSEGV: c_int = 11; +pub const SIGSYS: c_int = 12; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; + +pub const EAI_BADFLAGS: c_int = -1; +pub const EAI_NONAME: c_int = -2; +pub const EAI_AGAIN: c_int = -3; +pub const EAI_FAIL: c_int = -4; +pub const EAI_NODATA: c_int = -5; +pub const EAI_FAMILY: c_int = -6; +pub const EAI_SOCKTYPE: c_int = -7; +pub const EAI_SERVICE: c_int = -8; +pub const EAI_ADDRFAMILY: c_int = -9; +pub const EAI_MEMORY: c_int = -10; +pub const EAI_SYSTEM: c_int = -11; +pub const EAI_OVERFLOW: c_int = -12; + +pub const _SC_PAGESIZE: c_int = 8; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 51; +pub const PTHREAD_STACK_MIN: size_t = 32 * 1024; + +pub const IP_HDRINCL: c_int = 2; + +extern "C" { + pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; + pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + + pub fn sendmsg(s: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; + pub fn recvmsg(s: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; + + pub fn pthread_create( + native: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + f: extern "C" fn(_: *mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + + pub fn pthread_attr_getschedparam( + attr: *const crate::pthread_attr_t, + param: *mut sched_param, + ) -> c_int; + + pub fn pthread_attr_setschedparam( + attr: *mut crate::pthread_attr_t, + param: *const sched_param, + ) -> c_int; + + pub fn pthread_attr_getprocessorid_np( + attr: *const crate::pthread_attr_t, + processor_id: *mut c_int, + ) -> c_int; + + pub fn pthread_attr_setprocessorid_np( + attr: *mut crate::pthread_attr_t, + processor_id: c_int, + ) -> c_int; + + pub fn pthread_getschedparam( + native: crate::pthread_t, + policy: *mut c_int, + param: *mut crate::sched_param, + ) -> c_int; + + pub fn pthread_setschedparam( + native: crate::pthread_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + + pub fn pthread_condattr_getclock( + attr: *const crate::pthread_condattr_t, + clock_id: *mut crate::clockid_t, + ) -> c_int; + + pub fn pthread_condattr_setclock( + attr: *mut crate::pthread_condattr_t, + clock_id: crate::clockid_t, + ) -> c_int; + + pub fn pthread_getprocessorid_np() -> c_int; + + pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; + + pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; +} diff --git a/vendor/libc/src/unix/nto/aarch64.rs b/vendor/libc/src/unix/nto/aarch64.rs new file mode 100644 index 00000000000000..559ab6e49a45dc --- /dev/null +++ b/vendor/libc/src/unix/nto/aarch64.rs @@ -0,0 +1,35 @@ +use crate::prelude::*; + +pub type wchar_t = u32; +pub type time_t = i64; + +s! { + pub struct aarch64_qreg_t { + pub qlo: u64, + pub qhi: u64, + } + + pub struct aarch64_fpu_registers { + pub reg: [crate::aarch64_qreg_t; 32], + pub fpsr: u32, + pub fpcr: u32, + } + + pub struct aarch64_cpu_registers { + pub gpr: [u64; 32], + pub elr: u64, + pub pstate: u64, + } + + #[repr(align(16))] + pub struct mcontext_t { + pub cpu: crate::aarch64_cpu_registers, + pub fpu: crate::aarch64_fpu_registers, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } +} diff --git a/vendor/libc/src/unix/nto/mod.rs b/vendor/libc/src/unix/nto/mod.rs new file mode 100644 index 00000000000000..75f5b56902f7f2 --- /dev/null +++ b/vendor/libc/src/unix/nto/mod.rs @@ -0,0 +1,3406 @@ +use crate::prelude::*; + +pub type clock_t = u32; + +pub type sa_family_t = u8; +pub type speed_t = c_uint; +pub type tcflag_t = c_uint; +pub type clockid_t = c_int; +pub type timer_t = c_int; +pub type key_t = c_uint; +pub type id_t = c_int; + +pub type useconds_t = u32; +pub type dev_t = u32; +pub type socklen_t = u32; +pub type mode_t = u32; +pub type rlim64_t = u64; +pub type mqd_t = c_int; +pub type nfds_t = c_uint; +pub type idtype_t = c_uint; +pub type errno_t = c_int; +pub type rsize_t = c_ulong; + +pub type Elf32_Half = u16; +pub type Elf32_Word = u32; +pub type Elf32_Off = u32; +pub type Elf32_Addr = u32; +pub type Elf32_Lword = u64; +pub type Elf32_Sword = i32; + +pub type Elf64_Half = u16; +pub type Elf64_Word = u32; +pub type Elf64_Off = u64; +pub type Elf64_Addr = u64; +pub type Elf64_Xword = u64; +pub type Elf64_Sxword = i64; +pub type Elf64_Lword = u64; +pub type Elf64_Sword = i32; + +pub type Elf32_Section = u16; +pub type Elf64_Section = u16; + +pub type _Time32t = u32; + +pub type pthread_t = c_int; +pub type regoff_t = ssize_t; + +pub type nlink_t = u32; +pub type blksize_t = u32; +pub type suseconds_t = i32; + +pub type ino_t = u64; +pub type off_t = i64; +pub type blkcnt_t = u64; +pub type msgqnum_t = u64; +pub type msglen_t = u64; +pub type fsblkcnt_t = u64; +pub type fsfilcnt_t = u64; +pub type rlim_t = u64; +pub type posix_spawn_file_actions_t = *mut c_void; +pub type posix_spawnattr_t = crate::uintptr_t; + +pub type pthread_mutex_t = crate::sync_t; +pub type pthread_mutexattr_t = crate::_sync_attr; +pub type pthread_cond_t = crate::sync_t; +pub type pthread_condattr_t = crate::_sync_attr; +pub type pthread_rwlockattr_t = crate::_sync_attr; +pub type pthread_key_t = c_int; +pub type pthread_spinlock_t = sync_t; +pub type pthread_barrierattr_t = _sync_attr; +pub type sem_t = sync_t; + +pub type nl_item = c_int; + +#[derive(Debug)] +pub enum timezone {} +impl Copy for timezone {} +impl Clone for timezone { + fn clone(&self) -> timezone { + *self + } +} + +s! { + pub struct dirent_extra { + pub d_datalen: u16, + pub d_type: u16, + pub d_reserved: u32, + } + + pub struct stat { + pub st_ino: crate::ino_t, + pub st_size: off_t, + pub st_dev: crate::dev_t, + pub st_rdev: crate::dev_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub __old_st_mtime: crate::_Time32t, + pub __old_st_atime: crate::_Time32t, + pub __old_st_ctime: crate::_Time32t, + pub st_mode: mode_t, + pub st_nlink: crate::nlink_t, + pub st_blocksize: crate::blksize_t, + pub st_nblocks: i32, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_mtim: crate::timespec, + pub st_atim: crate::timespec, + pub st_ctim: crate::timespec, + } + + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + #[cfg_attr(any(target_env = "nto71", target_env = "nto70"), repr(packed))] + pub struct in_addr { + pub s_addr: crate::in_addr_t, + } + + pub struct sockaddr { + pub sa_len: u8, + pub sa_family: sa_family_t, + pub sa_data: [c_char; 14], + } + + #[cfg(not(target_env = "nto71_iosock"))] + pub struct sockaddr_in { + pub sin_len: u8, + pub sin_family: sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [i8; 8], + } + + #[cfg(target_env = "nto71_iosock")] + pub struct sockaddr_in { + pub sin_len: u8, + pub sin_family: sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [c_char; 8], + } + + pub struct sockaddr_in6 { + pub sin6_len: u8, + pub sin6_family: sa_family_t, + pub sin6_port: crate::in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: u32, + } + + // The order of the `ai_addr` field in this struct is crucial + // for converting between the Rust and C types. + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: socklen_t, + pub ai_canonname: *mut c_char, + pub ai_addr: *mut crate::sockaddr, + pub ai_next: *mut addrinfo, + } + + pub struct fd_set { + fds_bits: [c_uint; 2 * FD_SETSIZE as usize / ULONG_SIZE], + } + + pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + pub tm_gmtoff: c_long, + pub tm_zone: *const c_char, + } + + #[repr(align(8))] + pub struct sched_param { + pub sched_priority: c_int, + pub sched_curpriority: c_int, + pub reserved: [c_int; 10], + } + + #[repr(align(8))] + pub struct __sched_param { + pub __sched_priority: c_int, + pub __sched_curpriority: c_int, + pub reserved: [c_int; 10], + } + + pub struct Dl_info { + pub dli_fname: *const c_char, + pub dli_fbase: *mut c_void, + pub dli_sname: *const c_char, + pub dli_saddr: *mut c_void, + } + + pub struct lconv { + pub currency_symbol: *mut c_char, + pub int_curr_symbol: *mut c_char, + pub mon_decimal_point: *mut c_char, + pub mon_grouping: *mut c_char, + pub mon_thousands_sep: *mut c_char, + pub negative_sign: *mut c_char, + pub positive_sign: *mut c_char, + pub frac_digits: c_char, + pub int_frac_digits: c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub n_sign_posn: c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub p_sign_posn: c_char, + + pub int_n_cs_precedes: c_char, + pub int_n_sep_by_space: c_char, + pub int_n_sign_posn: c_char, + pub int_p_cs_precedes: c_char, + pub int_p_sep_by_space: c_char, + pub int_p_sign_posn: c_char, + + pub decimal_point: *mut c_char, + pub grouping: *mut c_char, + pub thousands_sep: *mut c_char, + + pub _Frac_grouping: *mut c_char, + pub _Frac_sep: *mut c_char, + pub _False: *mut c_char, + pub _True: *mut c_char, + + pub _No: *mut c_char, + pub _Yes: *mut c_char, + pub _Nostr: *mut c_char, + pub _Yesstr: *mut c_char, + pub _Reserved: [*mut c_char; 8], + } + + // Does not exist in io-sock + #[cfg(not(target_env = "nto71_iosock"))] + pub struct in_pktinfo { + pub ipi_addr: crate::in_addr, + pub ipi_ifindex: c_uint, + } + + pub struct ifaddrs { + pub ifa_next: *mut ifaddrs, + pub ifa_name: *mut c_char, + pub ifa_flags: c_uint, + pub ifa_addr: *mut crate::sockaddr, + pub ifa_netmask: *mut crate::sockaddr, + pub ifa_dstaddr: *mut crate::sockaddr, + pub ifa_data: *mut c_void, + } + + pub struct arpreq { + pub arp_pa: crate::sockaddr, + pub arp_ha: crate::sockaddr, + pub arp_flags: c_int, + } + + #[cfg_attr(any(target_env = "nto71", target_env = "nto70"), repr(packed))] + pub struct arphdr { + pub ar_hrd: u16, + pub ar_pro: u16, + pub ar_hln: u8, + pub ar_pln: u8, + pub ar_op: u16, + } + + #[cfg(not(target_env = "nto71_iosock"))] + pub struct mmsghdr { + pub msg_hdr: crate::msghdr, + pub msg_len: c_uint, + } + + #[cfg(target_env = "nto71_iosock")] + pub struct mmsghdr { + pub msg_hdr: crate::msghdr, + pub msg_len: ssize_t, + } + + #[repr(align(8))] + pub struct siginfo_t { + pub si_signo: c_int, + pub si_code: c_int, + pub si_errno: c_int, + __data: [u8; 36], // union + } + + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_flags: c_int, + pub sa_mask: crate::sigset_t, + } + + pub struct _sync { + _union: c_uint, + __owner: c_uint, + } + pub struct rlimit64 { + pub rlim_cur: rlim64_t, + pub rlim_max: rlim64_t, + } + + pub struct glob_t { + pub gl_pathc: size_t, + pub gl_matchc: c_int, + pub gl_pathv: *mut *mut c_char, + pub gl_offs: size_t, + pub gl_flags: c_int, + pub gl_errfunc: extern "C" fn(*const c_char, c_int) -> c_int, + + __unused1: *mut c_void, + __unused2: *mut c_void, + __unused3: *mut c_void, + __unused4: *mut c_void, + __unused5: *mut c_void, + } + + pub struct passwd { + pub pw_name: *mut c_char, + pub pw_passwd: *mut c_char, + pub pw_uid: crate::uid_t, + pub pw_gid: crate::gid_t, + pub pw_age: *mut c_char, + pub pw_comment: *mut c_char, + pub pw_gecos: *mut c_char, + pub pw_dir: *mut c_char, + pub pw_shell: *mut c_char, + } + + pub struct if_nameindex { + pub if_index: c_uint, + pub if_name: *mut c_char, + } + + pub struct sembuf { + pub sem_num: c_ushort, + pub sem_op: c_short, + pub sem_flg: c_short, + } + + pub struct Elf32_Ehdr { + pub e_ident: [c_uchar; 16], + pub e_type: Elf32_Half, + pub e_machine: Elf32_Half, + pub e_version: Elf32_Word, + pub e_entry: Elf32_Addr, + pub e_phoff: Elf32_Off, + pub e_shoff: Elf32_Off, + pub e_flags: Elf32_Word, + pub e_ehsize: Elf32_Half, + pub e_phentsize: Elf32_Half, + pub e_phnum: Elf32_Half, + pub e_shentsize: Elf32_Half, + pub e_shnum: Elf32_Half, + pub e_shstrndx: Elf32_Half, + } + + pub struct Elf64_Ehdr { + pub e_ident: [c_uchar; 16], + pub e_type: Elf64_Half, + pub e_machine: Elf64_Half, + pub e_version: Elf64_Word, + pub e_entry: Elf64_Addr, + pub e_phoff: Elf64_Off, + pub e_shoff: Elf64_Off, + pub e_flags: Elf64_Word, + pub e_ehsize: Elf64_Half, + pub e_phentsize: Elf64_Half, + pub e_phnum: Elf64_Half, + pub e_shentsize: Elf64_Half, + pub e_shnum: Elf64_Half, + pub e_shstrndx: Elf64_Half, + } + + pub struct Elf32_Sym { + pub st_name: Elf32_Word, + pub st_value: Elf32_Addr, + pub st_size: Elf32_Word, + pub st_info: c_uchar, + pub st_other: c_uchar, + pub st_shndx: Elf32_Section, + } + + pub struct Elf64_Sym { + pub st_name: Elf64_Word, + pub st_info: c_uchar, + pub st_other: c_uchar, + pub st_shndx: Elf64_Section, + pub st_value: Elf64_Addr, + pub st_size: Elf64_Xword, + } + + pub struct Elf32_Phdr { + pub p_type: Elf32_Word, + pub p_offset: Elf32_Off, + pub p_vaddr: Elf32_Addr, + pub p_paddr: Elf32_Addr, + pub p_filesz: Elf32_Word, + pub p_memsz: Elf32_Word, + pub p_flags: Elf32_Word, + pub p_align: Elf32_Word, + } + + pub struct Elf64_Phdr { + pub p_type: Elf64_Word, + pub p_flags: Elf64_Word, + pub p_offset: Elf64_Off, + pub p_vaddr: Elf64_Addr, + pub p_paddr: Elf64_Addr, + pub p_filesz: Elf64_Xword, + pub p_memsz: Elf64_Xword, + pub p_align: Elf64_Xword, + } + + pub struct Elf32_Shdr { + pub sh_name: Elf32_Word, + pub sh_type: Elf32_Word, + pub sh_flags: Elf32_Word, + pub sh_addr: Elf32_Addr, + pub sh_offset: Elf32_Off, + pub sh_size: Elf32_Word, + pub sh_link: Elf32_Word, + pub sh_info: Elf32_Word, + pub sh_addralign: Elf32_Word, + pub sh_entsize: Elf32_Word, + } + + pub struct Elf64_Shdr { + pub sh_name: Elf64_Word, + pub sh_type: Elf64_Word, + pub sh_flags: Elf64_Xword, + pub sh_addr: Elf64_Addr, + pub sh_offset: Elf64_Off, + pub sh_size: Elf64_Xword, + pub sh_link: Elf64_Word, + pub sh_info: Elf64_Word, + pub sh_addralign: Elf64_Xword, + pub sh_entsize: Elf64_Xword, + } + + pub struct in6_pktinfo { + pub ipi6_addr: crate::in6_addr, + pub ipi6_ifindex: c_uint, + } + + pub struct inotify_event { + pub wd: c_int, + pub mask: u32, + pub cookie: u32, + pub len: u32, + } + + pub struct regmatch_t { + pub rm_so: regoff_t, + pub rm_eo: regoff_t, + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: c_int, + pub msg_control: *mut c_void, + pub msg_controllen: crate::socklen_t, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + pub cmsg_len: crate::socklen_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_cc: [crate::cc_t; crate::NCCS], + __reserved: [c_uint; 3], + pub c_ispeed: crate::speed_t, + pub c_ospeed: crate::speed_t, + } + + pub struct mallinfo { + pub arena: c_int, + pub ordblks: c_int, + pub smblks: c_int, + pub hblks: c_int, + pub hblkhd: c_int, + pub usmblks: c_int, + pub fsmblks: c_int, + pub uordblks: c_int, + pub fordblks: c_int, + pub keepcost: c_int, + } + + pub struct flock { + pub l_type: i16, + pub l_whence: i16, + pub l_zero1: i32, + pub l_start: off_t, + pub l_len: off_t, + pub l_pid: crate::pid_t, + pub l_sysid: u32, + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_basetype: [c_char; 16], + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + f_filler: [c_uint; 21], + } + + pub struct aiocb { + pub aio_fildes: c_int, + pub aio_reqprio: c_int, + pub aio_offset: off_t, + pub aio_buf: *mut c_void, + pub aio_nbytes: size_t, + pub aio_sigevent: crate::sigevent, + pub aio_lio_opcode: c_int, + pub _aio_lio_state: *mut c_void, + _aio_pad: [c_int; 3], + pub _aio_next: *mut crate::aiocb, + pub _aio_flag: c_uint, + pub _aio_iotype: c_uint, + pub _aio_result: ssize_t, + pub _aio_error: c_uint, + pub _aio_suspend: *mut c_void, + pub _aio_plist: *mut c_void, + pub _aio_policy: c_int, + pub _aio_param: crate::__sched_param, + } + + pub struct pthread_attr_t { + __data1: c_long, + __data2: [u8; 96], + } + + pub struct ipc_perm { + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: mode_t, + pub seq: c_uint, + pub key: crate::key_t, + _reserved: [c_int; 4], + } + + pub struct regex_t { + re_magic: c_int, + re_nsub: size_t, + re_endp: *const c_char, + re_g: *mut c_void, + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct _thread_attr { + pub __flags: c_int, + pub __stacksize: size_t, + pub __stackaddr: *mut c_void, + pub __exitfunc: Option, + pub __policy: c_int, + pub __param: crate::__sched_param, + pub __guardsize: c_uint, + pub __prealloc: c_uint, + __spare: [c_int; 2], + } + + pub struct _sync_attr { + pub __protocol: c_int, + pub __flags: c_int, + pub __prioceiling: c_int, + pub __clockid: c_int, + pub __count: c_int, + __reserved: [c_int; 3], + } + + pub struct sockcred { + pub sc_uid: crate::uid_t, + pub sc_euid: crate::uid_t, + pub sc_gid: crate::gid_t, + pub sc_egid: crate::gid_t, + pub sc_ngroups: c_int, + pub sc_groups: [crate::gid_t; 1], + } + + pub struct bpf_program { + pub bf_len: c_uint, + pub bf_insns: *mut crate::bpf_insn, + } + + #[cfg(not(target_env = "nto71_iosock"))] + pub struct bpf_stat { + pub bs_recv: u64, + pub bs_drop: u64, + pub bs_capt: u64, + bs_padding: [u64; 13], + } + + #[cfg(target_env = "nto71_iosock")] + pub struct bpf_stat { + pub bs_recv: c_uint, + pub bs_drop: c_uint, + } + + pub struct bpf_version { + pub bv_major: c_ushort, + pub bv_minor: c_ushort, + } + + pub struct bpf_hdr { + pub bh_tstamp: crate::timeval, + pub bh_caplen: u32, + pub bh_datalen: u32, + pub bh_hdrlen: u16, + } + + pub struct bpf_insn { + pub code: u16, + pub jt: c_uchar, + pub jf: c_uchar, + pub k: u32, + } + + pub struct bpf_dltlist { + pub bfl_len: c_uint, + pub bfl_list: *mut c_uint, + } + + // Does not exist in io-sock + #[cfg(not(target_env = "nto71_iosock"))] + pub struct unpcbid { + pub unp_pid: crate::pid_t, + pub unp_euid: crate::uid_t, + pub unp_egid: crate::gid_t, + } + + pub struct dl_phdr_info { + pub dlpi_addr: crate::Elf64_Addr, + pub dlpi_name: *const c_char, + pub dlpi_phdr: *const crate::Elf64_Phdr, + pub dlpi_phnum: crate::Elf64_Half, + } + + #[repr(align(8))] + pub struct ucontext_t { + pub uc_link: *mut ucontext_t, + pub uc_sigmask: crate::sigset_t, + pub uc_stack: stack_t, + pub uc_mcontext: mcontext_t, + } +} + +s_no_extra_traits! { + pub struct sockaddr_un { + pub sun_len: u8, + pub sun_family: sa_family_t, + pub sun_path: [c_char; 104], + } + + pub struct sockaddr_storage { + pub ss_len: u8, + pub ss_family: sa_family_t, + __ss_pad1: [c_char; 6], + __ss_align: i64, + __ss_pad2: [c_char; 112], + } + + pub struct utsname { + pub sysname: [c_char; _SYSNAME_SIZE], + pub nodename: [c_char; _SYSNAME_SIZE], + pub release: [c_char; _SYSNAME_SIZE], + pub version: [c_char; _SYSNAME_SIZE], + pub machine: [c_char; _SYSNAME_SIZE], + } + + pub struct sigevent { + pub sigev_notify: c_int, + pub __padding1: c_int, + pub sigev_signo: c_int, // union + pub __padding2: c_int, + pub sigev_value: crate::sigval, + __sigev_un2: usize, // union + } + pub struct dirent { + pub d_ino: crate::ino_t, + pub d_offset: off_t, + pub d_reclen: c_short, + pub d_namelen: c_short, + pub d_name: [c_char; 1], // flex array + } + + pub struct sigset_t { + __val: [u32; 2], + } + + pub struct mq_attr { + pub mq_maxmsg: c_long, + pub mq_msgsize: c_long, + pub mq_flags: c_long, + pub mq_curmsgs: c_long, + pub mq_sendwait: c_long, + pub mq_recvwait: c_long, + } + + pub struct msg { + pub msg_next: *mut crate::msg, + pub msg_type: c_long, + pub msg_ts: c_ushort, + pub msg_spot: c_short, + _pad: [u8; 4], + } + + pub struct msqid_ds { + pub msg_perm: crate::ipc_perm, + pub msg_first: *mut crate::msg, + pub msg_last: *mut crate::msg, + pub msg_cbytes: crate::msglen_t, + pub msg_qnum: crate::msgqnum_t, + pub msg_qbytes: crate::msglen_t, + pub msg_lspid: crate::pid_t, + pub msg_lrpid: crate::pid_t, + pub msg_stime: crate::time_t, + msg_pad1: c_long, + pub msg_rtime: crate::time_t, + msg_pad2: c_long, + pub msg_ctime: crate::time_t, + msg_pad3: c_long, + msg_pad4: [c_long; 4], + } + + #[cfg(not(target_env = "nto71_iosock"))] + pub struct sockaddr_dl { + pub sdl_len: c_uchar, + pub sdl_family: crate::sa_family_t, + pub sdl_index: u16, + pub sdl_type: c_uchar, + pub sdl_nlen: c_uchar, + pub sdl_alen: c_uchar, + pub sdl_slen: c_uchar, + pub sdl_data: [c_char; 12], + } + + #[cfg(target_env = "nto71_iosock")] + pub struct sockaddr_dl { + pub sdl_len: c_uchar, + pub sdl_family: c_uchar, + pub sdl_index: c_ushort, + pub sdl_type: c_uchar, + pub sdl_nlen: c_uchar, + pub sdl_alen: c_uchar, + pub sdl_slen: c_uchar, + pub sdl_data: [c_char; 46], + } + + pub struct sync_t { + __u: c_uint, // union + pub __owner: c_uint, + } + + #[repr(align(4))] + pub struct pthread_barrier_t { + // union + __pad: [u8; 28], // union + } + + pub struct pthread_rwlock_t { + pub __active: c_int, + pub __blockedwriters: c_int, + pub __blockedreaders: c_int, + pub __heavy: c_int, + pub __lock: crate::pthread_mutex_t, // union + pub __rcond: crate::pthread_cond_t, // union + pub __wcond: crate::pthread_cond_t, // union + pub __owner: c_uint, + pub __spare: c_uint, + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + // sigevent + impl PartialEq for sigevent { + fn eq(&self, other: &sigevent) -> bool { + self.sigev_notify == other.sigev_notify + && self.sigev_signo == other.sigev_signo + && self.sigev_value == other.sigev_value + && self.__sigev_un2 == other.__sigev_un2 + } + } + impl Eq for sigevent {} + impl hash::Hash for sigevent { + fn hash(&self, state: &mut H) { + self.sigev_notify.hash(state); + self.sigev_signo.hash(state); + self.sigev_value.hash(state); + self.__sigev_un2.hash(state); + } + } + + impl PartialEq for sockaddr_un { + fn eq(&self, other: &sockaddr_un) -> bool { + self.sun_len == other.sun_len + && self.sun_family == other.sun_family + && self + .sun_path + .iter() + .zip(other.sun_path.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for sockaddr_un {} + + impl hash::Hash for sockaddr_un { + fn hash(&self, state: &mut H) { + self.sun_len.hash(state); + self.sun_family.hash(state); + self.sun_path.hash(state); + } + } + + // sigset_t + impl PartialEq for sigset_t { + fn eq(&self, other: &sigset_t) -> bool { + self.__val == other.__val + } + } + impl Eq for sigset_t {} + impl hash::Hash for sigset_t { + fn hash(&self, state: &mut H) { + self.__val.hash(state); + } + } + + // msg + + // msqid_ds + + // sockaddr_dl + impl PartialEq for sockaddr_dl { + fn eq(&self, other: &sockaddr_dl) -> bool { + self.sdl_len == other.sdl_len + && self.sdl_family == other.sdl_family + && self.sdl_index == other.sdl_index + && self.sdl_type == other.sdl_type + && self.sdl_nlen == other.sdl_nlen + && self.sdl_alen == other.sdl_alen + && self.sdl_slen == other.sdl_slen + && self + .sdl_data + .iter() + .zip(other.sdl_data.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for sockaddr_dl {} + impl hash::Hash for sockaddr_dl { + fn hash(&self, state: &mut H) { + self.sdl_len.hash(state); + self.sdl_family.hash(state); + self.sdl_index.hash(state); + self.sdl_type.hash(state); + self.sdl_nlen.hash(state); + self.sdl_alen.hash(state); + self.sdl_slen.hash(state); + self.sdl_data.hash(state); + } + } + + impl PartialEq for utsname { + fn eq(&self, other: &utsname) -> bool { + self.sysname + .iter() + .zip(other.sysname.iter()) + .all(|(a, b)| a == b) + && self + .nodename + .iter() + .zip(other.nodename.iter()) + .all(|(a, b)| a == b) + && self + .release + .iter() + .zip(other.release.iter()) + .all(|(a, b)| a == b) + && self + .version + .iter() + .zip(other.version.iter()) + .all(|(a, b)| a == b) + && self + .machine + .iter() + .zip(other.machine.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for utsname {} + + impl hash::Hash for utsname { + fn hash(&self, state: &mut H) { + self.sysname.hash(state); + self.nodename.hash(state); + self.release.hash(state); + self.version.hash(state); + self.machine.hash(state); + } + } + + impl PartialEq for mq_attr { + fn eq(&self, other: &mq_attr) -> bool { + self.mq_maxmsg == other.mq_maxmsg + && self.mq_msgsize == other.mq_msgsize + && self.mq_flags == other.mq_flags + && self.mq_curmsgs == other.mq_curmsgs + && self.mq_msgsize == other.mq_msgsize + && self.mq_sendwait == other.mq_sendwait + && self.mq_recvwait == other.mq_recvwait + } + } + + impl Eq for mq_attr {} + + impl hash::Hash for mq_attr { + fn hash(&self, state: &mut H) { + self.mq_maxmsg.hash(state); + self.mq_msgsize.hash(state); + self.mq_flags.hash(state); + self.mq_curmsgs.hash(state); + self.mq_sendwait.hash(state); + self.mq_recvwait.hash(state); + } + } + + impl PartialEq for sockaddr_storage { + fn eq(&self, other: &sockaddr_storage) -> bool { + self.ss_len == other.ss_len + && self.ss_family == other.ss_family + && self.__ss_pad1 == other.__ss_pad1 + && self.__ss_align == other.__ss_align + && self + .__ss_pad2 + .iter() + .zip(other.__ss_pad2.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for sockaddr_storage {} + + impl hash::Hash for sockaddr_storage { + fn hash(&self, state: &mut H) { + self.ss_len.hash(state); + self.ss_family.hash(state); + self.__ss_pad1.hash(state); + self.__ss_align.hash(state); + self.__ss_pad2.hash(state); + } + } + + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_ino == other.d_ino + && self.d_offset == other.d_offset + && self.d_reclen == other.d_reclen + && self.d_namelen == other.d_namelen + && self.d_name[..self.d_namelen as _] + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for dirent {} + + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_ino.hash(state); + self.d_offset.hash(state); + self.d_reclen.hash(state); + self.d_namelen.hash(state); + self.d_name[..self.d_namelen as _].hash(state); + } + } + } +} + +pub const _SYSNAME_SIZE: usize = 256 + 1; +pub const RLIM_INFINITY: crate::rlim_t = 0xfffffffffffffffd; +pub const O_LARGEFILE: c_int = 0o0100000; + +// intentionally not public, only used for fd_set +cfg_if! { + if #[cfg(target_pointer_width = "32")] { + const ULONG_SIZE: usize = 32; + } else if #[cfg(target_pointer_width = "64")] { + const ULONG_SIZE: usize = 64; + } else { + // Unknown target_pointer_width + } +} + +pub const EXIT_FAILURE: c_int = 1; +pub const EXIT_SUCCESS: c_int = 0; +pub const RAND_MAX: c_int = 32767; +pub const EOF: c_int = -1; +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; +pub const _IOFBF: c_int = 0; +pub const _IONBF: c_int = 2; +pub const _IOLBF: c_int = 1; + +pub const F_DUPFD: c_int = 0; +pub const F_GETFD: c_int = 1; +pub const F_SETFD: c_int = 2; +pub const F_GETFL: c_int = 3; +pub const F_SETFL: c_int = 4; + +pub const F_DUPFD_CLOEXEC: c_int = 5; + +pub const SIGTRAP: c_int = 5; + +pub const CLOCK_REALTIME: crate::clockid_t = 0; +pub const CLOCK_MONOTONIC: crate::clockid_t = 2; +pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 3; +pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 4; +pub const TIMER_ABSTIME: c_uint = 0x80000000; + +pub const RUSAGE_SELF: c_int = 0; + +pub const F_OK: c_int = 0; +pub const X_OK: c_int = 1; +pub const W_OK: c_int = 2; +pub const R_OK: c_int = 4; + +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; + +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGABRT: c_int = 6; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGSEGV: c_int = 11; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; + +pub const PROT_NONE: c_int = 0x00000000; +pub const PROT_READ: c_int = 0x00000100; +pub const PROT_WRITE: c_int = 0x00000200; +pub const PROT_EXEC: c_int = 0x00000400; + +pub const MAP_FILE: c_int = 0; +pub const MAP_SHARED: c_int = 1; +pub const MAP_PRIVATE: c_int = 2; +pub const MAP_FIXED: c_int = 0x10; + +pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; + +pub const MS_ASYNC: c_int = 1; +pub const MS_INVALIDATE: c_int = 4; +pub const MS_SYNC: c_int = 2; + +pub const SCM_RIGHTS: c_int = 0x01; +pub const SCM_TIMESTAMP: c_int = 0x02; +cfg_if! { + if #[cfg(not(target_env = "nto71_iosock"))] { + pub const SCM_CREDS: c_int = 0x04; + pub const IFF_NOTRAILERS: c_int = 0x00000020; + pub const AF_INET6: c_int = 24; + pub const AF_BLUETOOTH: c_int = 31; + pub const pseudo_AF_KEY: c_int = 29; + pub const MSG_NOSIGNAL: c_int = 0x0800; + pub const MSG_WAITFORONE: c_int = 0x2000; + pub const IP_IPSEC_POLICY_COMPAT: c_int = 22; + pub const IP_PKTINFO: c_int = 25; + pub const IPPROTO_DIVERT: c_int = 259; + pub const IPV6_IPSEC_POLICY_COMPAT: c_int = 28; + pub const TCP_KEEPALIVE: c_int = 0x04; + pub const ARPHRD_ARCNET: u16 = 7; + pub const SO_BINDTODEVICE: c_int = 0x0800; + pub const EAI_NODATA: c_int = 7; + pub const IPTOS_ECN_NOT_ECT: u8 = 0x00; + pub const RTF_BROADCAST: u32 = 0x80000; + pub const UDP_ENCAP: c_int = 100; + pub const HW_IOSTATS: c_int = 9; + pub const HW_MACHINE_ARCH: c_int = 10; + pub const HW_ALIGNBYTES: c_int = 11; + pub const HW_CNMAGIC: c_int = 12; + pub const HW_PHYSMEM64: c_int = 13; + pub const HW_USERMEM64: c_int = 14; + pub const HW_IOSTATNAMES: c_int = 15; + pub const HW_MAXID: c_int = 15; + pub const CTL_UNSPEC: c_int = 0; + pub const CTL_QNX: c_int = 9; + pub const CTL_PROC: c_int = 10; + pub const CTL_VENDOR: c_int = 11; + pub const CTL_EMUL: c_int = 12; + pub const CTL_SECURITY: c_int = 13; + pub const CTL_MAXID: c_int = 14; + pub const AF_ARP: c_int = 28; + pub const AF_IEEE80211: c_int = 32; + pub const AF_NATM: c_int = 27; + pub const AF_NS: c_int = 6; + pub const BIOCGDLTLIST: c_int = -1072676233; + pub const BIOCGETIF: c_int = 1083196011; + pub const BIOCGSEESENT: c_int = 1074020984; + pub const BIOCGSTATS: c_int = 1082147439; + pub const BIOCSDLT: c_int = -2147204490; + pub const BIOCSETIF: c_int = -2138029460; + pub const BIOCSSEESENT: c_int = -2147204487; + pub const FIONSPACE: c_int = 1074030200; + pub const FIONWRITE: c_int = 1074030201; + pub const IFF_ACCEPTRTADV: c_int = 0x40000000; + pub const IFF_IP6FORWARDING: c_int = 0x20000000; + pub const IFF_SHIM: c_int = 0x80000000; + pub const KERN_ARND: c_int = 81; + pub const KERN_IOV_MAX: c_int = 38; + pub const KERN_LOGSIGEXIT: c_int = 46; + pub const KERN_MAXID: c_int = 83; + pub const KERN_PROC_ARGS: c_int = 48; + pub const KERN_PROC_ENV: c_int = 3; + pub const KERN_PROC_GID: c_int = 7; + pub const KERN_PROC_RGID: c_int = 8; + pub const LOCAL_CONNWAIT: c_int = 0x0002; + pub const LOCAL_CREDS: c_int = 0x0001; + pub const LOCAL_PEEREID: c_int = 0x0003; + pub const MSG_NOTIFICATION: c_int = 0x0400; + pub const NET_RT_IFLIST: c_int = 4; + pub const NI_NUMERICSCOPE: c_int = 0x00000040; + pub const PF_ARP: c_int = 28; + pub const PF_NATM: c_int = 27; + pub const pseudo_AF_HDRCMPLT: c_int = 30; + pub const SIOCGIFADDR: c_int = -1064277727; + pub const SO_FIB: c_int = 0x100a; + pub const SO_TXPRIO: c_int = 0x100b; + pub const SO_SETFIB: c_int = 0x100a; + pub const SO_VLANPRIO: c_int = 0x100c; + pub const USER_ATEXIT_MAX: c_int = 21; + pub const USER_MAXID: c_int = 22; + pub const SO_OVERFLOWED: c_int = 0x1009; + } else { + pub const SCM_CREDS: c_int = 0x03; + pub const AF_INET6: c_int = 28; + pub const AF_BLUETOOTH: c_int = 36; + pub const pseudo_AF_KEY: c_int = 27; + pub const MSG_NOSIGNAL: c_int = 0x20000; + pub const MSG_WAITFORONE: c_int = 0x00080000; + pub const IPPROTO_DIVERT: c_int = 258; + pub const RTF_BROADCAST: u32 = 0x400000; + pub const UDP_ENCAP: c_int = 1; + pub const HW_MACHINE_ARCH: c_int = 11; + pub const AF_ARP: c_int = 35; + pub const AF_IEEE80211: c_int = 37; + pub const AF_NATM: c_int = 29; + pub const BIOCGDLTLIST: c_ulong = 0xffffffffc0104279; + pub const BIOCGETIF: c_int = 0x4020426b; + pub const BIOCGSEESENT: c_int = 0x40044276; + pub const BIOCGSTATS: c_int = 0x4008426f; + pub const BIOCSDLT: c_int = 0x80044278; + pub const BIOCSETIF: c_int = 0x8020426c; + pub const BIOCSSEESENT: c_int = 0x80044277; + pub const KERN_ARND: c_int = 37; + pub const KERN_IOV_MAX: c_int = 35; + pub const KERN_LOGSIGEXIT: c_int = 34; + pub const KERN_PROC_ARGS: c_int = 7; + pub const KERN_PROC_ENV: c_int = 35; + pub const KERN_PROC_GID: c_int = 11; + pub const KERN_PROC_RGID: c_int = 10; + pub const LOCAL_CONNWAIT: c_int = 4; + pub const LOCAL_CREDS: c_int = 2; + pub const MSG_NOTIFICATION: c_int = 0x00002000; + pub const NET_RT_IFLIST: c_int = 3; + pub const NI_NUMERICSCOPE: c_int = 0x00000020; + pub const PF_ARP: c_int = AF_ARP; + pub const PF_NATM: c_int = AF_NATM; + pub const pseudo_AF_HDRCMPLT: c_int = 31; + pub const SIOCGIFADDR: c_int = 0xc0206921; + pub const SO_SETFIB: c_int = 0x1014; + } +} + +pub const MAP_TYPE: c_int = 0x3; + +pub const IFF_UP: c_int = 0x00000001; +pub const IFF_BROADCAST: c_int = 0x00000002; +pub const IFF_DEBUG: c_int = 0x00000004; +pub const IFF_LOOPBACK: c_int = 0x00000008; +pub const IFF_POINTOPOINT: c_int = 0x00000010; +pub const IFF_RUNNING: c_int = 0x00000040; +pub const IFF_NOARP: c_int = 0x00000080; +pub const IFF_PROMISC: c_int = 0x00000100; +pub const IFF_ALLMULTI: c_int = 0x00000200; +pub const IFF_MULTICAST: c_int = 0x00008000; + +pub const AF_UNSPEC: c_int = 0; +pub const AF_UNIX: c_int = AF_LOCAL; +pub const AF_LOCAL: c_int = 1; +pub const AF_INET: c_int = 2; +pub const AF_IPX: c_int = 23; +pub const AF_APPLETALK: c_int = 16; +pub const AF_ROUTE: c_int = 17; +pub const AF_SNA: c_int = 11; + +pub const AF_ISDN: c_int = 26; + +pub const PF_UNSPEC: c_int = AF_UNSPEC; +pub const PF_UNIX: c_int = PF_LOCAL; +pub const PF_LOCAL: c_int = AF_LOCAL; +pub const PF_INET: c_int = AF_INET; +pub const PF_IPX: c_int = AF_IPX; +pub const PF_APPLETALK: c_int = AF_APPLETALK; +pub const PF_INET6: c_int = AF_INET6; +pub const PF_KEY: c_int = pseudo_AF_KEY; +pub const PF_ROUTE: c_int = AF_ROUTE; +pub const PF_SNA: c_int = AF_SNA; + +pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; +pub const PF_ISDN: c_int = AF_ISDN; + +pub const SOMAXCONN: c_int = 128; + +pub const MSG_OOB: c_int = 0x0001; +pub const MSG_PEEK: c_int = 0x0002; +pub const MSG_DONTROUTE: c_int = 0x0004; +pub const MSG_CTRUNC: c_int = 0x0020; +pub const MSG_TRUNC: c_int = 0x0010; +pub const MSG_DONTWAIT: c_int = 0x0080; +pub const MSG_EOR: c_int = 0x0008; +pub const MSG_WAITALL: c_int = 0x0040; + +pub const IP_TOS: c_int = 3; +pub const IP_TTL: c_int = 4; +pub const IP_HDRINCL: c_int = 2; +pub const IP_OPTIONS: c_int = 1; +pub const IP_RECVOPTS: c_int = 5; +pub const IP_RETOPTS: c_int = 8; +pub const IP_MULTICAST_IF: c_int = 9; +pub const IP_MULTICAST_TTL: c_int = 10; +pub const IP_MULTICAST_LOOP: c_int = 11; +pub const IP_ADD_MEMBERSHIP: c_int = 12; +pub const IP_DROP_MEMBERSHIP: c_int = 13; +pub const IP_DEFAULT_MULTICAST_TTL: c_int = 1; +pub const IP_DEFAULT_MULTICAST_LOOP: c_int = 1; + +pub const IPPROTO_HOPOPTS: c_int = 0; +pub const IPPROTO_IGMP: c_int = 2; +pub const IPPROTO_IPIP: c_int = 4; +pub const IPPROTO_EGP: c_int = 8; +pub const IPPROTO_PUP: c_int = 12; +pub const IPPROTO_IDP: c_int = 22; +pub const IPPROTO_TP: c_int = 29; +pub const IPPROTO_ROUTING: c_int = 43; +pub const IPPROTO_FRAGMENT: c_int = 44; +pub const IPPROTO_RSVP: c_int = 46; +pub const IPPROTO_GRE: c_int = 47; +pub const IPPROTO_ESP: c_int = 50; +pub const IPPROTO_AH: c_int = 51; +pub const IPPROTO_NONE: c_int = 59; +pub const IPPROTO_DSTOPTS: c_int = 60; +pub const IPPROTO_ENCAP: c_int = 98; +pub const IPPROTO_PIM: c_int = 103; +pub const IPPROTO_SCTP: c_int = 132; +pub const IPPROTO_RAW: c_int = 255; +pub const IPPROTO_MAX: c_int = 256; +pub const IPPROTO_CARP: c_int = 112; +pub const IPPROTO_DONE: c_int = 257; +pub const IPPROTO_EON: c_int = 80; +pub const IPPROTO_ETHERIP: c_int = 97; +pub const IPPROTO_GGP: c_int = 3; +pub const IPPROTO_IPCOMP: c_int = 108; +pub const IPPROTO_MOBILE: c_int = 55; + +pub const IPV6_RTHDR_LOOSE: c_int = 0; +pub const IPV6_RTHDR_STRICT: c_int = 1; +pub const IPV6_UNICAST_HOPS: c_int = 4; +pub const IPV6_MULTICAST_IF: c_int = 9; +pub const IPV6_MULTICAST_HOPS: c_int = 10; +pub const IPV6_MULTICAST_LOOP: c_int = 11; +pub const IPV6_JOIN_GROUP: c_int = 12; +pub const IPV6_LEAVE_GROUP: c_int = 13; +pub const IPV6_CHECKSUM: c_int = 26; +pub const IPV6_V6ONLY: c_int = 27; +pub const IPV6_RTHDRDSTOPTS: c_int = 35; +pub const IPV6_RECVPKTINFO: c_int = 36; +pub const IPV6_RECVHOPLIMIT: c_int = 37; +pub const IPV6_RECVRTHDR: c_int = 38; +pub const IPV6_RECVHOPOPTS: c_int = 39; +pub const IPV6_RECVDSTOPTS: c_int = 40; +pub const IPV6_RECVPATHMTU: c_int = 43; +pub const IPV6_PATHMTU: c_int = 44; +pub const IPV6_PKTINFO: c_int = 46; +pub const IPV6_HOPLIMIT: c_int = 47; +pub const IPV6_NEXTHOP: c_int = 48; +pub const IPV6_HOPOPTS: c_int = 49; +pub const IPV6_DSTOPTS: c_int = 50; +pub const IPV6_RECVTCLASS: c_int = 57; +pub const IPV6_TCLASS: c_int = 61; +pub const IPV6_DONTFRAG: c_int = 62; + +pub const TCP_NODELAY: c_int = 0x01; +pub const TCP_MAXSEG: c_int = 0x02; +pub const TCP_MD5SIG: c_int = 0x10; + +pub const SHUT_RD: c_int = 0; +pub const SHUT_WR: c_int = 1; +pub const SHUT_RDWR: c_int = 2; + +pub const LOCK_SH: c_int = 0x1; +pub const LOCK_EX: c_int = 0x2; +pub const LOCK_NB: c_int = 0x4; +pub const LOCK_UN: c_int = 0x8; + +pub const SS_ONSTACK: c_int = 1; +pub const SS_DISABLE: c_int = 2; + +pub const PATH_MAX: c_int = 1024; + +pub const UIO_MAXIOV: c_int = 1024; + +pub const FD_SETSIZE: usize = 256; + +pub const TCIOFF: c_int = 0x0002; +pub const TCION: c_int = 0x0003; +pub const TCOOFF: c_int = 0x0000; +pub const TCOON: c_int = 0x0001; +pub const TCIFLUSH: c_int = 0; +pub const TCOFLUSH: c_int = 1; +pub const TCIOFLUSH: c_int = 2; +pub const NL0: crate::tcflag_t = 0x000; +pub const NL1: crate::tcflag_t = 0x100; +pub const TAB0: crate::tcflag_t = 0x0000; +pub const CR0: crate::tcflag_t = 0x000; +pub const FF0: crate::tcflag_t = 0x0000; +pub const BS0: crate::tcflag_t = 0x0000; +pub const VT0: crate::tcflag_t = 0x0000; +pub const VERASE: usize = 2; +pub const VKILL: usize = 3; +pub const VINTR: usize = 0; +pub const VQUIT: usize = 1; +pub const VLNEXT: usize = 15; +pub const IGNBRK: crate::tcflag_t = 0x00000001; +pub const BRKINT: crate::tcflag_t = 0x00000002; +pub const IGNPAR: crate::tcflag_t = 0x00000004; +pub const PARMRK: crate::tcflag_t = 0x00000008; +pub const INPCK: crate::tcflag_t = 0x00000010; +pub const ISTRIP: crate::tcflag_t = 0x00000020; +pub const INLCR: crate::tcflag_t = 0x00000040; +pub const IGNCR: crate::tcflag_t = 0x00000080; +pub const ICRNL: crate::tcflag_t = 0x00000100; +pub const IXANY: crate::tcflag_t = 0x00000800; +pub const IMAXBEL: crate::tcflag_t = 0x00002000; +pub const OPOST: crate::tcflag_t = 0x00000001; +pub const CS5: crate::tcflag_t = 0x00; +pub const ECHO: crate::tcflag_t = 0x00000008; +pub const OCRNL: crate::tcflag_t = 0x00000008; +pub const ONOCR: crate::tcflag_t = 0x00000010; +pub const ONLRET: crate::tcflag_t = 0x00000020; +pub const OFILL: crate::tcflag_t = 0x00000040; +pub const OFDEL: crate::tcflag_t = 0x00000080; + +pub const WNOHANG: c_int = 0x0040; +pub const WUNTRACED: c_int = 0x0004; +pub const WSTOPPED: c_int = WUNTRACED; +pub const WEXITED: c_int = 0x0001; +pub const WCONTINUED: c_int = 0x0008; +pub const WNOWAIT: c_int = 0x0080; +pub const WTRAPPED: c_int = 0x0002; + +pub const RTLD_LOCAL: c_int = 0x0200; +pub const RTLD_LAZY: c_int = 0x0001; + +pub const POSIX_FADV_NORMAL: c_int = 0; +pub const POSIX_FADV_RANDOM: c_int = 2; +pub const POSIX_FADV_SEQUENTIAL: c_int = 1; +pub const POSIX_FADV_WILLNEED: c_int = 3; + +pub const AT_FDCWD: c_int = -100; +pub const AT_EACCESS: c_int = 0x0001; +pub const AT_SYMLINK_NOFOLLOW: c_int = 0x0002; +pub const AT_SYMLINK_FOLLOW: c_int = 0x0004; +pub const AT_REMOVEDIR: c_int = 0x0008; + +pub const LOG_CRON: c_int = 9 << 3; +pub const LOG_AUTHPRIV: c_int = 10 << 3; +pub const LOG_FTP: c_int = 11 << 3; +pub const LOG_PERROR: c_int = 0x20; + +pub const PIPE_BUF: usize = 5120; + +pub const CLD_EXITED: c_int = 1; +pub const CLD_KILLED: c_int = 2; +pub const CLD_DUMPED: c_int = 3; +pub const CLD_TRAPPED: c_int = 4; +pub const CLD_STOPPED: c_int = 5; +pub const CLD_CONTINUED: c_int = 6; + +pub const UTIME_OMIT: c_long = 0x40000002; +pub const UTIME_NOW: c_long = 0x40000001; + +pub const POLLIN: c_short = POLLRDNORM | POLLRDBAND; +pub const POLLPRI: c_short = 0x0008; +pub const POLLOUT: c_short = 0x0002; +pub const POLLERR: c_short = 0x0020; +pub const POLLHUP: c_short = 0x0040; +pub const POLLNVAL: c_short = 0x1000; +pub const POLLRDNORM: c_short = 0x0001; +pub const POLLRDBAND: c_short = 0x0004; + +pub const IPTOS_LOWDELAY: u8 = 0x10; +pub const IPTOS_THROUGHPUT: u8 = 0x08; +pub const IPTOS_RELIABILITY: u8 = 0x04; +pub const IPTOS_MINCOST: u8 = 0x02; + +pub const IPTOS_PREC_NETCONTROL: u8 = 0xe0; +pub const IPTOS_PREC_INTERNETCONTROL: u8 = 0xc0; +pub const IPTOS_PREC_CRITIC_ECP: u8 = 0xa0; +pub const IPTOS_PREC_FLASHOVERRIDE: u8 = 0x80; +pub const IPTOS_PREC_FLASH: u8 = 0x60; +pub const IPTOS_PREC_IMMEDIATE: u8 = 0x40; +pub const IPTOS_PREC_PRIORITY: u8 = 0x20; +pub const IPTOS_PREC_ROUTINE: u8 = 0x00; + +pub const IPTOS_ECN_MASK: u8 = 0x03; +pub const IPTOS_ECN_ECT1: u8 = 0x01; +pub const IPTOS_ECN_ECT0: u8 = 0x02; +pub const IPTOS_ECN_CE: u8 = 0x03; + +pub const IPOPT_CONTROL: u8 = 0x00; +pub const IPOPT_RESERVED1: u8 = 0x20; +pub const IPOPT_RESERVED2: u8 = 0x60; +pub const IPOPT_LSRR: u8 = 131; +pub const IPOPT_RR: u8 = 7; +pub const IPOPT_SSRR: u8 = 137; +pub const IPDEFTTL: u8 = 64; +pub const IPOPT_OPTVAL: u8 = 0; +pub const IPOPT_OLEN: u8 = 1; +pub const IPOPT_OFFSET: u8 = 2; +pub const IPOPT_MINOFF: u8 = 4; +pub const IPOPT_NOP: u8 = 1; +pub const IPOPT_EOL: u8 = 0; +pub const IPOPT_TS: u8 = 68; +pub const IPOPT_TS_TSONLY: u8 = 0; +pub const IPOPT_TS_TSANDADDR: u8 = 1; +pub const IPOPT_TS_PRESPEC: u8 = 3; + +pub const MAX_IPOPTLEN: u8 = 40; +pub const IPVERSION: u8 = 4; +pub const MAXTTL: u8 = 255; + +pub const ARPHRD_ETHER: u16 = 1; +pub const ARPHRD_IEEE802: u16 = 6; +pub const ARPHRD_IEEE1394: u16 = 24; + +pub const SOL_SOCKET: c_int = 0xffff; + +pub const SO_DEBUG: c_int = 0x0001; +pub const SO_REUSEADDR: c_int = 0x0004; +pub const SO_TYPE: c_int = 0x1008; +pub const SO_ERROR: c_int = 0x1007; +pub const SO_DONTROUTE: c_int = 0x0010; +pub const SO_BROADCAST: c_int = 0x0020; +pub const SO_SNDBUF: c_int = 0x1001; +pub const SO_RCVBUF: c_int = 0x1002; +pub const SO_KEEPALIVE: c_int = 0x0008; +pub const SO_OOBINLINE: c_int = 0x0100; +pub const SO_LINGER: c_int = 0x0080; +pub const SO_REUSEPORT: c_int = 0x0200; +pub const SO_RCVLOWAT: c_int = 0x1004; +pub const SO_SNDLOWAT: c_int = 0x1003; +pub const SO_RCVTIMEO: c_int = 0x1006; +pub const SO_SNDTIMEO: c_int = 0x1005; +pub const SO_TIMESTAMP: c_int = 0x0400; +pub const SO_ACCEPTCONN: c_int = 0x0002; + +pub const TIOCM_LE: c_int = 0x0100; +pub const TIOCM_DTR: c_int = 0x0001; +pub const TIOCM_RTS: c_int = 0x0002; +pub const TIOCM_ST: c_int = 0x0200; +pub const TIOCM_SR: c_int = 0x0400; +pub const TIOCM_CTS: c_int = 0x1000; +pub const TIOCM_CAR: c_int = TIOCM_CD; +pub const TIOCM_CD: c_int = 0x8000; +pub const TIOCM_RNG: c_int = TIOCM_RI; +pub const TIOCM_RI: c_int = 0x4000; +pub const TIOCM_DSR: c_int = 0x2000; + +pub const SCHED_OTHER: c_int = 3; +pub const SCHED_FIFO: c_int = 1; +pub const SCHED_RR: c_int = 2; + +pub const IPC_PRIVATE: crate::key_t = 0; + +pub const IPC_CREAT: c_int = 0o001000; +pub const IPC_EXCL: c_int = 0o002000; +pub const IPC_NOWAIT: c_int = 0o004000; + +pub const IPC_RMID: c_int = 0; +pub const IPC_SET: c_int = 1; +pub const IPC_STAT: c_int = 2; + +pub const MSG_NOERROR: c_int = 0o010000; + +pub const LOG_NFACILITIES: c_int = 24; + +pub const SEM_FAILED: *mut crate::sem_t = 0xFFFFFFFFFFFFFFFF as *mut sem_t; + +pub const AI_PASSIVE: c_int = 0x00000001; +pub const AI_CANONNAME: c_int = 0x00000002; +pub const AI_NUMERICHOST: c_int = 0x00000004; + +pub const AI_NUMERICSERV: c_int = 0x00000008; + +pub const EAI_BADFLAGS: c_int = 3; +pub const EAI_NONAME: c_int = 8; +pub const EAI_AGAIN: c_int = 2; +pub const EAI_FAIL: c_int = 4; +pub const EAI_FAMILY: c_int = 5; +pub const EAI_SOCKTYPE: c_int = 10; +pub const EAI_SERVICE: c_int = 9; +pub const EAI_MEMORY: c_int = 6; +pub const EAI_SYSTEM: c_int = 11; +pub const EAI_OVERFLOW: c_int = 14; + +pub const NI_NUMERICHOST: c_int = 0x00000002; +pub const NI_NUMERICSERV: c_int = 0x00000008; +pub const NI_NOFQDN: c_int = 0x00000001; +pub const NI_NAMEREQD: c_int = 0x00000004; +pub const NI_DGRAM: c_int = 0x00000010; + +pub const AIO_CANCELED: c_int = 0; +pub const AIO_NOTCANCELED: c_int = 2; +pub const AIO_ALLDONE: c_int = 1; +pub const LIO_READ: c_int = 1; +pub const LIO_WRITE: c_int = 2; +pub const LIO_NOP: c_int = 0; +pub const LIO_WAIT: c_int = 1; +pub const LIO_NOWAIT: c_int = 0; + +pub const ITIMER_REAL: c_int = 0; +pub const ITIMER_VIRTUAL: c_int = 1; +pub const ITIMER_PROF: c_int = 2; + +// DIFF(main): changed to `c_short` in f62eb023ab +pub const POSIX_SPAWN_RESETIDS: c_int = 0x00000010; +pub const POSIX_SPAWN_SETPGROUP: c_int = 0x00000001; +pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x00000004; +pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x00000002; +pub const POSIX_SPAWN_SETSCHEDPARAM: c_int = 0x00000400; +pub const POSIX_SPAWN_SETSCHEDULER: c_int = 0x00000040; + +pub const RTF_UP: c_ushort = 0x0001; +pub const RTF_GATEWAY: c_ushort = 0x0002; + +pub const RTF_HOST: c_ushort = 0x0004; +pub const RTF_DYNAMIC: c_ushort = 0x0010; +pub const RTF_MODIFIED: c_ushort = 0x0020; +pub const RTF_REJECT: c_ushort = 0x0008; +pub const RTF_STATIC: c_ushort = 0x0800; +pub const RTF_XRESOLVE: c_ushort = 0x0200; +pub const RTM_NEWADDR: u16 = 0xc; +pub const RTM_DELADDR: u16 = 0xd; +pub const RTA_DST: c_ushort = 0x1; +pub const RTA_GATEWAY: c_ushort = 0x2; + +pub const IN_ACCESS: u32 = 0x00000001; +pub const IN_MODIFY: u32 = 0x00000002; +pub const IN_ATTRIB: u32 = 0x00000004; +pub const IN_CLOSE_WRITE: u32 = 0x00000008; +pub const IN_CLOSE_NOWRITE: u32 = 0x00000010; +pub const IN_CLOSE: u32 = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE; +pub const IN_OPEN: u32 = 0x00000020; +pub const IN_MOVED_FROM: u32 = 0x00000040; +pub const IN_MOVED_TO: u32 = 0x00000080; +pub const IN_MOVE: u32 = IN_MOVED_FROM | IN_MOVED_TO; +pub const IN_CREATE: u32 = 0x00000100; +pub const IN_DELETE: u32 = 0x00000200; +pub const IN_DELETE_SELF: u32 = 0x00000400; +pub const IN_MOVE_SELF: u32 = 0x00000800; +pub const IN_UNMOUNT: u32 = 0x00002000; +pub const IN_Q_OVERFLOW: u32 = 0x00004000; +pub const IN_IGNORED: u32 = 0x00008000; +pub const IN_ONLYDIR: u32 = 0x01000000; +pub const IN_DONT_FOLLOW: u32 = 0x02000000; + +pub const IN_ISDIR: u32 = 0x40000000; +pub const IN_ONESHOT: u32 = 0x80000000; + +pub const REG_EXTENDED: c_int = 0o0001; +pub const REG_ICASE: c_int = 0o0002; +pub const REG_NEWLINE: c_int = 0o0010; +pub const REG_NOSUB: c_int = 0o0004; + +pub const REG_NOTBOL: c_int = 0o00001; +pub const REG_NOTEOL: c_int = 0o00002; + +pub const REG_ENOSYS: c_int = 17; +pub const REG_NOMATCH: c_int = 1; +pub const REG_BADPAT: c_int = 2; +pub const REG_ECOLLATE: c_int = 3; +pub const REG_ECTYPE: c_int = 4; +pub const REG_EESCAPE: c_int = 5; +pub const REG_ESUBREG: c_int = 6; +pub const REG_EBRACK: c_int = 7; +pub const REG_EPAREN: c_int = 8; +pub const REG_EBRACE: c_int = 9; +pub const REG_BADBR: c_int = 10; +pub const REG_ERANGE: c_int = 11; +pub const REG_ESPACE: c_int = 12; +pub const REG_BADRPT: c_int = 13; + +// errno.h +pub const EOK: c_int = 0; +pub const EWOULDBLOCK: c_int = EAGAIN; +pub const EPERM: c_int = 1; +pub const ENOENT: c_int = 2; +pub const ESRCH: c_int = 3; +pub const EINTR: c_int = 4; +pub const EIO: c_int = 5; +pub const ENXIO: c_int = 6; +pub const E2BIG: c_int = 7; +pub const ENOEXEC: c_int = 8; +pub const EBADF: c_int = 9; +pub const ECHILD: c_int = 10; +pub const EAGAIN: c_int = 11; +pub const ENOMEM: c_int = 12; +pub const EACCES: c_int = 13; +pub const EFAULT: c_int = 14; +pub const ENOTBLK: c_int = 15; +pub const EBUSY: c_int = 16; +pub const EEXIST: c_int = 17; +pub const EXDEV: c_int = 18; +pub const ENODEV: c_int = 19; +pub const ENOTDIR: c_int = 20; +pub const EISDIR: c_int = 21; +pub const EINVAL: c_int = 22; +pub const ENFILE: c_int = 23; +pub const EMFILE: c_int = 24; +pub const ENOTTY: c_int = 25; +pub const ETXTBSY: c_int = 26; +pub const EFBIG: c_int = 27; +pub const ENOSPC: c_int = 28; +pub const ESPIPE: c_int = 29; +pub const EROFS: c_int = 30; +pub const EMLINK: c_int = 31; +pub const EPIPE: c_int = 32; +pub const EDOM: c_int = 33; +pub const ERANGE: c_int = 34; +pub const ENOMSG: c_int = 35; +pub const EIDRM: c_int = 36; +pub const ECHRNG: c_int = 37; +pub const EL2NSYNC: c_int = 38; +pub const EL3HLT: c_int = 39; +pub const EL3RST: c_int = 40; +pub const ELNRNG: c_int = 41; +pub const EUNATCH: c_int = 42; +pub const ENOCSI: c_int = 43; +pub const EL2HLT: c_int = 44; +pub const EDEADLK: c_int = 45; +pub const ENOLCK: c_int = 46; +pub const ECANCELED: c_int = 47; +pub const EDQUOT: c_int = 49; +pub const EBADE: c_int = 50; +pub const EBADR: c_int = 51; +pub const EXFULL: c_int = 52; +pub const ENOANO: c_int = 53; +pub const EBADRQC: c_int = 54; +pub const EBADSLT: c_int = 55; +pub const EDEADLOCK: c_int = 56; +pub const EBFONT: c_int = 57; +pub const EOWNERDEAD: c_int = 58; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENONET: c_int = 64; +pub const ENOPKG: c_int = 65; +pub const EREMOTE: c_int = 66; +pub const ENOLINK: c_int = 67; +pub const EADV: c_int = 68; +pub const ESRMNT: c_int = 69; +pub const ECOMM: c_int = 70; +pub const EPROTO: c_int = 71; +pub const EMULTIHOP: c_int = 74; +pub const EBADMSG: c_int = 77; +pub const ENAMETOOLONG: c_int = 78; +pub const EOVERFLOW: c_int = 79; +pub const ENOTUNIQ: c_int = 80; +pub const EBADFD: c_int = 81; +pub const EREMCHG: c_int = 82; +pub const ELIBACC: c_int = 83; +pub const ELIBBAD: c_int = 84; +pub const ELIBSCN: c_int = 85; +pub const ELIBMAX: c_int = 86; +pub const ELIBEXEC: c_int = 87; +pub const EILSEQ: c_int = 88; +pub const ENOSYS: c_int = 89; +pub const ELOOP: c_int = 90; +pub const ERESTART: c_int = 91; +pub const ESTRPIPE: c_int = 92; +pub const ENOTEMPTY: c_int = 93; +pub const EUSERS: c_int = 94; +pub const ENOTRECOVERABLE: c_int = 95; +pub const EOPNOTSUPP: c_int = 103; +pub const EFPOS: c_int = 110; +pub const ESTALE: c_int = 122; +pub const EINPROGRESS: c_int = 236; +pub const EALREADY: c_int = 237; +pub const ENOTSOCK: c_int = 238; +pub const EDESTADDRREQ: c_int = 239; +pub const EMSGSIZE: c_int = 240; +pub const EPROTOTYPE: c_int = 241; +pub const ENOPROTOOPT: c_int = 242; +pub const EPROTONOSUPPORT: c_int = 243; +pub const ESOCKTNOSUPPORT: c_int = 244; +pub const EPFNOSUPPORT: c_int = 246; +pub const EAFNOSUPPORT: c_int = 247; +pub const EADDRINUSE: c_int = 248; +pub const EADDRNOTAVAIL: c_int = 249; +pub const ENETDOWN: c_int = 250; +pub const ENETUNREACH: c_int = 251; +pub const ENETRESET: c_int = 252; +pub const ECONNABORTED: c_int = 253; +pub const ECONNRESET: c_int = 254; +pub const ENOBUFS: c_int = 255; +pub const EISCONN: c_int = 256; +pub const ENOTCONN: c_int = 257; +pub const ESHUTDOWN: c_int = 258; +pub const ETOOMANYREFS: c_int = 259; +pub const ETIMEDOUT: c_int = 260; +pub const ECONNREFUSED: c_int = 261; +pub const EHOSTDOWN: c_int = 264; +pub const EHOSTUNREACH: c_int = 265; +pub const EBADRPC: c_int = 272; +pub const ERPCMISMATCH: c_int = 273; +pub const EPROGUNAVAIL: c_int = 274; +pub const EPROGMISMATCH: c_int = 275; +pub const EPROCUNAVAIL: c_int = 276; +pub const ENOREMOTE: c_int = 300; +pub const ENONDP: c_int = 301; +pub const EBADFSYS: c_int = 302; +pub const EMORE: c_int = 309; +pub const ECTRLTERM: c_int = 310; +pub const ENOLIC: c_int = 311; +pub const ESRVRFAULT: c_int = 312; +pub const EENDIAN: c_int = 313; +pub const ESECTYPEINVAL: c_int = 314; + +pub const RUSAGE_CHILDREN: c_int = -1; +pub const L_tmpnam: c_uint = 255; + +pub const _PC_LINK_MAX: c_int = 1; +pub const _PC_MAX_CANON: c_int = 2; +pub const _PC_MAX_INPUT: c_int = 3; +pub const _PC_NAME_MAX: c_int = 4; +pub const _PC_PATH_MAX: c_int = 5; +pub const _PC_PIPE_BUF: c_int = 6; +pub const _PC_CHOWN_RESTRICTED: c_int = 9; +pub const _PC_NO_TRUNC: c_int = 7; +pub const _PC_VDISABLE: c_int = 8; +pub const _PC_SYNC_IO: c_int = 14; +pub const _PC_ASYNC_IO: c_int = 12; +pub const _PC_PRIO_IO: c_int = 13; +pub const _PC_SOCK_MAXBUF: c_int = 15; +pub const _PC_FILESIZEBITS: c_int = 16; +pub const _PC_REC_INCR_XFER_SIZE: c_int = 22; +pub const _PC_REC_MAX_XFER_SIZE: c_int = 23; +pub const _PC_REC_MIN_XFER_SIZE: c_int = 24; +pub const _PC_REC_XFER_ALIGN: c_int = 25; +pub const _PC_ALLOC_SIZE_MIN: c_int = 21; +pub const _PC_SYMLINK_MAX: c_int = 17; +pub const _PC_2_SYMLINKS: c_int = 20; + +pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; +pub const _SC_ARG_MAX: c_int = 1; +pub const _SC_CHILD_MAX: c_int = 2; +pub const _SC_CLK_TCK: c_int = 3; +pub const _SC_NGROUPS_MAX: c_int = 4; +pub const _SC_OPEN_MAX: c_int = 5; +pub const _SC_JOB_CONTROL: c_int = 6; +pub const _SC_SAVED_IDS: c_int = 7; +pub const _SC_VERSION: c_int = 8; +pub const _SC_PASS_MAX: c_int = 9; +pub const _SC_PAGESIZE: c_int = 11; +pub const _SC_XOPEN_VERSION: c_int = 12; +pub const _SC_STREAM_MAX: c_int = 13; +pub const _SC_TZNAME_MAX: c_int = 14; +pub const _SC_AIO_LISTIO_MAX: c_int = 15; +pub const _SC_AIO_MAX: c_int = 16; +pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 17; +pub const _SC_DELAYTIMER_MAX: c_int = 18; +pub const _SC_MQ_OPEN_MAX: c_int = 19; +pub const _SC_MQ_PRIO_MAX: c_int = 20; +pub const _SC_RTSIG_MAX: c_int = 21; +pub const _SC_SEM_NSEMS_MAX: c_int = 22; +pub const _SC_SEM_VALUE_MAX: c_int = 23; +pub const _SC_SIGQUEUE_MAX: c_int = 24; +pub const _SC_TIMER_MAX: c_int = 25; +pub const _SC_ASYNCHRONOUS_IO: c_int = 26; +pub const _SC_FSYNC: c_int = 27; +pub const _SC_MAPPED_FILES: c_int = 28; +pub const _SC_MEMLOCK: c_int = 29; +pub const _SC_MEMLOCK_RANGE: c_int = 30; +pub const _SC_MEMORY_PROTECTION: c_int = 31; +pub const _SC_MESSAGE_PASSING: c_int = 32; +pub const _SC_PRIORITIZED_IO: c_int = 33; +pub const _SC_PRIORITY_SCHEDULING: c_int = 34; +pub const _SC_REALTIME_SIGNALS: c_int = 35; +pub const _SC_SEMAPHORES: c_int = 36; +pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 37; +pub const _SC_SYNCHRONIZED_IO: c_int = 38; +pub const _SC_TIMERS: c_int = 39; +pub const _SC_GETGR_R_SIZE_MAX: c_int = 40; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 41; +pub const _SC_LOGIN_NAME_MAX: c_int = 42; +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 43; +pub const _SC_THREAD_KEYS_MAX: c_int = 44; +pub const _SC_THREAD_STACK_MIN: c_int = 45; +pub const _SC_THREAD_THREADS_MAX: c_int = 46; +pub const _SC_TTY_NAME_MAX: c_int = 47; +pub const _SC_THREADS: c_int = 48; +pub const _SC_THREAD_ATTR_STACKADDR: c_int = 49; +pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 50; +pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 51; +pub const _SC_THREAD_PRIO_INHERIT: c_int = 52; +pub const _SC_THREAD_PRIO_PROTECT: c_int = 53; +pub const _SC_THREAD_PROCESS_SHARED: c_int = 54; +pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 55; +pub const _SC_2_CHAR_TERM: c_int = 56; +pub const _SC_2_C_BIND: c_int = 57; +pub const _SC_2_C_DEV: c_int = 58; +pub const _SC_2_C_VERSION: c_int = 59; +pub const _SC_2_FORT_DEV: c_int = 60; +pub const _SC_2_FORT_RUN: c_int = 61; +pub const _SC_2_LOCALEDEF: c_int = 62; +pub const _SC_2_SW_DEV: c_int = 63; +pub const _SC_2_UPE: c_int = 64; +pub const _SC_2_VERSION: c_int = 65; +pub const _SC_ATEXIT_MAX: c_int = 66; +pub const _SC_AVPHYS_PAGES: c_int = 67; +pub const _SC_BC_BASE_MAX: c_int = 68; +pub const _SC_BC_DIM_MAX: c_int = 69; +pub const _SC_BC_SCALE_MAX: c_int = 70; +pub const _SC_BC_STRING_MAX: c_int = 71; +pub const _SC_CHARCLASS_NAME_MAX: c_int = 72; +pub const _SC_CHAR_BIT: c_int = 73; +pub const _SC_CHAR_MAX: c_int = 74; +pub const _SC_CHAR_MIN: c_int = 75; +pub const _SC_COLL_WEIGHTS_MAX: c_int = 76; +pub const _SC_EQUIV_CLASS_MAX: c_int = 77; +pub const _SC_EXPR_NEST_MAX: c_int = 78; +pub const _SC_INT_MAX: c_int = 79; +pub const _SC_INT_MIN: c_int = 80; +pub const _SC_LINE_MAX: c_int = 81; +pub const _SC_LONG_BIT: c_int = 82; +pub const _SC_MB_LEN_MAX: c_int = 83; +pub const _SC_NL_ARGMAX: c_int = 84; +pub const _SC_NL_LANGMAX: c_int = 85; +pub const _SC_NL_MSGMAX: c_int = 86; +pub const _SC_NL_NMAX: c_int = 87; +pub const _SC_NL_SETMAX: c_int = 88; +pub const _SC_NL_TEXTMAX: c_int = 89; +pub const _SC_NPROCESSORS_CONF: c_int = 90; +pub const _SC_NPROCESSORS_ONLN: c_int = 91; +pub const _SC_NZERO: c_int = 92; +pub const _SC_PHYS_PAGES: c_int = 93; +pub const _SC_PII: c_int = 94; +pub const _SC_PII_INTERNET: c_int = 95; +pub const _SC_PII_INTERNET_DGRAM: c_int = 96; +pub const _SC_PII_INTERNET_STREAM: c_int = 97; +pub const _SC_PII_OSI: c_int = 98; +pub const _SC_PII_OSI_CLTS: c_int = 99; +pub const _SC_PII_OSI_COTS: c_int = 100; +pub const _SC_PII_OSI_M: c_int = 101; +pub const _SC_PII_SOCKET: c_int = 102; +pub const _SC_PII_XTI: c_int = 103; +pub const _SC_POLL: c_int = 104; +pub const _SC_RE_DUP_MAX: c_int = 105; +pub const _SC_SCHAR_MAX: c_int = 106; +pub const _SC_SCHAR_MIN: c_int = 107; +pub const _SC_SELECT: c_int = 108; +pub const _SC_SHRT_MAX: c_int = 109; +pub const _SC_SHRT_MIN: c_int = 110; +pub const _SC_SSIZE_MAX: c_int = 111; +pub const _SC_T_IOV_MAX: c_int = 112; +pub const _SC_UCHAR_MAX: c_int = 113; +pub const _SC_UINT_MAX: c_int = 114; +pub const _SC_UIO_MAXIOV: c_int = 115; +pub const _SC_ULONG_MAX: c_int = 116; +pub const _SC_USHRT_MAX: c_int = 117; +pub const _SC_WORD_BIT: c_int = 118; +pub const _SC_XOPEN_CRYPT: c_int = 119; +pub const _SC_XOPEN_ENH_I18N: c_int = 120; +pub const _SC_XOPEN_SHM: c_int = 121; +pub const _SC_XOPEN_UNIX: c_int = 122; +pub const _SC_XOPEN_XCU_VERSION: c_int = 123; +pub const _SC_XOPEN_XPG2: c_int = 124; +pub const _SC_XOPEN_XPG3: c_int = 125; +pub const _SC_XOPEN_XPG4: c_int = 126; +pub const _SC_XBS5_ILP32_OFF32: c_int = 127; +pub const _SC_XBS5_ILP32_OFFBIG: c_int = 128; +pub const _SC_XBS5_LP64_OFF64: c_int = 129; +pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 130; +pub const _SC_ADVISORY_INFO: c_int = 131; +pub const _SC_CPUTIME: c_int = 132; +pub const _SC_SPAWN: c_int = 133; +pub const _SC_SPORADIC_SERVER: c_int = 134; +pub const _SC_THREAD_CPUTIME: c_int = 135; +pub const _SC_THREAD_SPORADIC_SERVER: c_int = 136; +pub const _SC_TIMEOUTS: c_int = 137; +pub const _SC_BARRIERS: c_int = 138; +pub const _SC_CLOCK_SELECTION: c_int = 139; +pub const _SC_MONOTONIC_CLOCK: c_int = 140; +pub const _SC_READER_WRITER_LOCKS: c_int = 141; +pub const _SC_SPIN_LOCKS: c_int = 142; +pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 143; +pub const _SC_TRACE_EVENT_FILTER: c_int = 144; +pub const _SC_TRACE: c_int = 145; +pub const _SC_TRACE_INHERIT: c_int = 146; +pub const _SC_TRACE_LOG: c_int = 147; +pub const _SC_2_PBS: c_int = 148; +pub const _SC_2_PBS_ACCOUNTING: c_int = 149; +pub const _SC_2_PBS_CHECKPOINT: c_int = 150; +pub const _SC_2_PBS_LOCATE: c_int = 151; +pub const _SC_2_PBS_MESSAGE: c_int = 152; +pub const _SC_2_PBS_TRACK: c_int = 153; +pub const _SC_HOST_NAME_MAX: c_int = 154; +pub const _SC_IOV_MAX: c_int = 155; +pub const _SC_IPV6: c_int = 156; +pub const _SC_RAW_SOCKETS: c_int = 157; +pub const _SC_REGEXP: c_int = 158; +pub const _SC_SHELL: c_int = 159; +pub const _SC_SS_REPL_MAX: c_int = 160; +pub const _SC_SYMLOOP_MAX: c_int = 161; +pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 162; +pub const _SC_TRACE_NAME_MAX: c_int = 163; +pub const _SC_TRACE_SYS_MAX: c_int = 164; +pub const _SC_TRACE_USER_EVENT_MAX: c_int = 165; +pub const _SC_V6_ILP32_OFF32: c_int = 166; +pub const _SC_V6_ILP32_OFFBIG: c_int = 167; +pub const _SC_V6_LP64_OFF64: c_int = 168; +pub const _SC_V6_LPBIG_OFFBIG: c_int = 169; +pub const _SC_XOPEN_REALTIME: c_int = 170; +pub const _SC_XOPEN_REALTIME_THREADS: c_int = 171; +pub const _SC_XOPEN_LEGACY: c_int = 172; +pub const _SC_XOPEN_STREAMS: c_int = 173; +pub const _SC_V7_ILP32_OFF32: c_int = 176; +pub const _SC_V7_ILP32_OFFBIG: c_int = 177; +pub const _SC_V7_LP64_OFF64: c_int = 178; +pub const _SC_V7_LPBIG_OFFBIG: c_int = 179; + +pub const GLOB_ERR: c_int = 0x0001; +pub const GLOB_MARK: c_int = 0x0002; +pub const GLOB_NOSORT: c_int = 0x0004; +pub const GLOB_DOOFFS: c_int = 0x0008; +pub const GLOB_NOCHECK: c_int = 0x0010; +pub const GLOB_APPEND: c_int = 0x0020; +pub const GLOB_NOESCAPE: c_int = 0x0040; + +pub const GLOB_NOSPACE: c_int = 1; +pub const GLOB_ABORTED: c_int = 2; +pub const GLOB_NOMATCH: c_int = 3; + +pub const S_IEXEC: mode_t = crate::S_IXUSR; +pub const S_IWRITE: mode_t = crate::S_IWUSR; +pub const S_IREAD: mode_t = crate::S_IRUSR; + +pub const S_IFIFO: mode_t = 0o1_0000; +pub const S_IFCHR: mode_t = 0o2_0000; +pub const S_IFDIR: mode_t = 0o4_0000; +pub const S_IFBLK: mode_t = 0o6_0000; +pub const S_IFREG: mode_t = 0o10_0000; +pub const S_IFLNK: mode_t = 0o12_0000; +pub const S_IFSOCK: mode_t = 0o14_0000; +pub const S_IFMT: mode_t = 0o17_0000; + +pub const S_IXOTH: mode_t = 0o0001; +pub const S_IWOTH: mode_t = 0o0002; +pub const S_IROTH: mode_t = 0o0004; +pub const S_IRWXO: mode_t = 0o0007; +pub const S_IXGRP: mode_t = 0o0010; +pub const S_IWGRP: mode_t = 0o0020; +pub const S_IRGRP: mode_t = 0o0040; +pub const S_IRWXG: mode_t = 0o0070; +pub const S_IXUSR: mode_t = 0o0100; +pub const S_IWUSR: mode_t = 0o0200; +pub const S_IRUSR: mode_t = 0o0400; +pub const S_IRWXU: mode_t = 0o0700; + +pub const F_LOCK: c_int = 1; +pub const F_TEST: c_int = 3; +pub const F_TLOCK: c_int = 2; +pub const F_ULOCK: c_int = 0; + +pub const ST_RDONLY: c_ulong = 0x01; +pub const ST_NOSUID: c_ulong = 0x04; +pub const ST_NOEXEC: c_ulong = 0x02; +pub const ST_NOATIME: c_ulong = 0x20; + +pub const RTLD_NEXT: *mut c_void = -3i64 as *mut c_void; +pub const RTLD_DEFAULT: *mut c_void = -2i64 as *mut c_void; +pub const RTLD_NODELETE: c_int = 0x1000; +pub const RTLD_NOW: c_int = 0x0002; + +pub const EMPTY: c_short = 0; +pub const RUN_LVL: c_short = 1; +pub const BOOT_TIME: c_short = 2; +pub const NEW_TIME: c_short = 4; +pub const OLD_TIME: c_short = 3; +pub const INIT_PROCESS: c_short = 5; +pub const LOGIN_PROCESS: c_short = 6; +pub const USER_PROCESS: c_short = 7; +pub const DEAD_PROCESS: c_short = 8; +pub const ACCOUNTING: c_short = 9; + +pub const ENOTSUP: c_int = 48; + +pub const BUFSIZ: c_uint = 1024; +pub const TMP_MAX: c_uint = 26 * 26 * 26; +pub const FOPEN_MAX: c_uint = 16; +pub const FILENAME_MAX: c_uint = 255; + +pub const NI_MAXHOST: crate::socklen_t = 1025; +pub const M_KEEP: c_int = 4; +pub const REG_STARTEND: c_int = 0o00004; +pub const VEOF: usize = 4; + +pub const RTLD_GLOBAL: c_int = 0x0100; +pub const RTLD_NOLOAD: c_int = 0x0004; + +pub const O_RDONLY: c_int = 0o000000; +pub const O_WRONLY: c_int = 0o000001; +pub const O_RDWR: c_int = 0o000002; + +pub const O_EXEC: c_int = 0o00003; +pub const O_ASYNC: c_int = 0o0200000; +pub const O_NDELAY: c_int = O_NONBLOCK; +pub const O_TRUNC: c_int = 0o001000; +pub const O_CLOEXEC: c_int = 0o020000; +pub const O_DIRECTORY: c_int = 0o4000000; +pub const O_ACCMODE: c_int = 0o000007; +pub const O_APPEND: c_int = 0o000010; +pub const O_CREAT: c_int = 0o000400; +pub const O_EXCL: c_int = 0o002000; +pub const O_NOCTTY: c_int = 0o004000; +pub const O_NONBLOCK: c_int = 0o000200; +pub const O_SYNC: c_int = 0o000040; +pub const O_RSYNC: c_int = 0o000100; +pub const O_DSYNC: c_int = 0o000020; +pub const O_NOFOLLOW: c_int = 0o010000; + +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; + +pub const SOCK_SEQPACKET: c_int = 5; +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SOCK_RAW: c_int = 3; +pub const SOCK_RDM: c_int = 4; +pub const SOCK_CLOEXEC: c_int = 0x10000000; + +pub const SA_SIGINFO: c_int = 0x0002; +pub const SA_NOCLDWAIT: c_int = 0x0020; +pub const SA_NODEFER: c_int = 0x0010; +pub const SA_RESETHAND: c_int = 0x0004; +pub const SA_NOCLDSTOP: c_int = 0x0001; + +pub const SIGTTIN: c_int = 26; +pub const SIGTTOU: c_int = 27; +pub const SIGXCPU: c_int = 30; +pub const SIGXFSZ: c_int = 31; +pub const SIGVTALRM: c_int = 28; +pub const SIGPROF: c_int = 29; +pub const SIGWINCH: c_int = 20; +pub const SIGCHLD: c_int = 18; +pub const SIGBUS: c_int = 10; +pub const SIGUSR1: c_int = 16; +pub const SIGUSR2: c_int = 17; +pub const SIGCONT: c_int = 25; +pub const SIGSTOP: c_int = 23; +pub const SIGTSTP: c_int = 24; +pub const SIGURG: c_int = 21; +pub const SIGIO: c_int = SIGPOLL; +pub const SIGSYS: c_int = 12; +pub const SIGPOLL: c_int = 22; +pub const SIGPWR: c_int = 19; +pub const SIG_SETMASK: c_int = 2; +pub const SIG_BLOCK: c_int = 0; +pub const SIG_UNBLOCK: c_int = 1; + +pub const POLLWRNORM: c_short = crate::POLLOUT; +pub const POLLWRBAND: c_short = 0x0010; + +pub const F_SETLK: c_int = 106; +pub const F_SETLKW: c_int = 107; +pub const F_ALLOCSP: c_int = 110; +pub const F_FREESP: c_int = 111; +pub const F_GETLK: c_int = 114; + +pub const F_RDLCK: c_int = 1; +pub const F_WRLCK: c_int = 2; +pub const F_UNLCK: c_int = 3; + +pub const NCCS: usize = 40; + +pub const MAP_ANON: c_int = MAP_ANONYMOUS; +pub const MAP_ANONYMOUS: c_int = 0x00080000; + +pub const MCL_CURRENT: c_int = 0x000000001; +pub const MCL_FUTURE: c_int = 0x000000002; + +pub const _TIO_CBAUD: crate::tcflag_t = 15; +pub const CBAUD: crate::tcflag_t = _TIO_CBAUD; +pub const TAB1: crate::tcflag_t = 0x0800; +pub const TAB2: crate::tcflag_t = 0x1000; +pub const TAB3: crate::tcflag_t = 0x1800; +pub const CR1: crate::tcflag_t = 0x200; +pub const CR2: crate::tcflag_t = 0x400; +pub const CR3: crate::tcflag_t = 0x600; +pub const FF1: crate::tcflag_t = 0x8000; +pub const BS1: crate::tcflag_t = 0x2000; +pub const VT1: crate::tcflag_t = 0x4000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 17; +pub const IXON: crate::tcflag_t = 0x00000400; +pub const IXOFF: crate::tcflag_t = 0x00001000; +pub const ONLCR: crate::tcflag_t = 0x00000004; +pub const CSIZE: crate::tcflag_t = 0x00000030; +pub const CS6: crate::tcflag_t = 0x10; +pub const CS7: crate::tcflag_t = 0x20; +pub const CS8: crate::tcflag_t = 0x30; +pub const CSTOPB: crate::tcflag_t = 0x00000040; +pub const CREAD: crate::tcflag_t = 0x00000080; +pub const PARENB: crate::tcflag_t = 0x00000100; +pub const PARODD: crate::tcflag_t = 0x00000200; +pub const HUPCL: crate::tcflag_t = 0x00000400; +pub const CLOCAL: crate::tcflag_t = 0x00000800; +pub const ECHOKE: crate::tcflag_t = 0x00000800; +pub const ECHOE: crate::tcflag_t = 0x00000010; +pub const ECHOK: crate::tcflag_t = 0x00000020; +pub const ECHONL: crate::tcflag_t = 0x00000040; +pub const ECHOCTL: crate::tcflag_t = 0x00000200; +pub const ISIG: crate::tcflag_t = 0x00000001; +pub const ICANON: crate::tcflag_t = 0x00000002; +pub const NOFLSH: crate::tcflag_t = 0x00000080; +pub const OLCUC: crate::tcflag_t = 0x00000002; +pub const NLDLY: crate::tcflag_t = 0x00000100; +pub const CRDLY: crate::tcflag_t = 0x00000600; +pub const TABDLY: crate::tcflag_t = 0x00001800; +pub const BSDLY: crate::tcflag_t = 0x00002000; +pub const FFDLY: crate::tcflag_t = 0x00008000; +pub const VTDLY: crate::tcflag_t = 0x00004000; +pub const XTABS: crate::tcflag_t = 0x1800; + +pub const B0: crate::speed_t = 0; +pub const B50: crate::speed_t = 1; +pub const B75: crate::speed_t = 2; +pub const B110: crate::speed_t = 3; +pub const B134: crate::speed_t = 4; +pub const B150: crate::speed_t = 5; +pub const B200: crate::speed_t = 6; +pub const B300: crate::speed_t = 7; +pub const B600: crate::speed_t = 8; +pub const B1200: crate::speed_t = 9; +pub const B1800: crate::speed_t = 10; +pub const B2400: crate::speed_t = 11; +pub const B4800: crate::speed_t = 12; +pub const B9600: crate::speed_t = 13; +pub const B19200: crate::speed_t = 14; +pub const B38400: crate::speed_t = 15; +pub const EXTA: crate::speed_t = 14; +pub const EXTB: crate::speed_t = 15; +pub const B57600: crate::speed_t = 57600; +pub const B115200: crate::speed_t = 115200; + +pub const VEOL: usize = 5; +pub const VEOL2: usize = 6; +pub const VMIN: usize = 16; +pub const IEXTEN: crate::tcflag_t = 0x00008000; +pub const TOSTOP: crate::tcflag_t = 0x00000100; + +pub const TCSANOW: c_int = 0x0001; +pub const TCSADRAIN: c_int = 0x0002; +pub const TCSAFLUSH: c_int = 0x0004; + +pub const HW_MACHINE: c_int = 1; +pub const HW_MODEL: c_int = 2; +pub const HW_NCPU: c_int = 3; +pub const HW_BYTEORDER: c_int = 4; +pub const HW_PHYSMEM: c_int = 5; +pub const HW_USERMEM: c_int = 6; +pub const HW_PAGESIZE: c_int = 7; +pub const HW_DISKNAMES: c_int = 8; +pub const CTL_KERN: c_int = 1; +pub const CTL_VM: c_int = 2; +pub const CTL_VFS: c_int = 3; +pub const CTL_NET: c_int = 4; +pub const CTL_DEBUG: c_int = 5; +pub const CTL_HW: c_int = 6; +pub const CTL_MACHDEP: c_int = 7; +pub const CTL_USER: c_int = 8; + +pub const DAY_1: crate::nl_item = 8; +pub const DAY_2: crate::nl_item = 9; +pub const DAY_3: crate::nl_item = 10; +pub const DAY_4: crate::nl_item = 11; +pub const DAY_5: crate::nl_item = 12; +pub const DAY_6: crate::nl_item = 13; +pub const DAY_7: crate::nl_item = 14; + +pub const MON_1: crate::nl_item = 22; +pub const MON_2: crate::nl_item = 23; +pub const MON_3: crate::nl_item = 24; +pub const MON_4: crate::nl_item = 25; +pub const MON_5: crate::nl_item = 26; +pub const MON_6: crate::nl_item = 27; +pub const MON_7: crate::nl_item = 28; +pub const MON_8: crate::nl_item = 29; +pub const MON_9: crate::nl_item = 30; +pub const MON_10: crate::nl_item = 31; +pub const MON_11: crate::nl_item = 32; +pub const MON_12: crate::nl_item = 33; + +pub const ABDAY_1: crate::nl_item = 15; +pub const ABDAY_2: crate::nl_item = 16; +pub const ABDAY_3: crate::nl_item = 17; +pub const ABDAY_4: crate::nl_item = 18; +pub const ABDAY_5: crate::nl_item = 19; +pub const ABDAY_6: crate::nl_item = 20; +pub const ABDAY_7: crate::nl_item = 21; + +pub const ABMON_1: crate::nl_item = 34; +pub const ABMON_2: crate::nl_item = 35; +pub const ABMON_3: crate::nl_item = 36; +pub const ABMON_4: crate::nl_item = 37; +pub const ABMON_5: crate::nl_item = 38; +pub const ABMON_6: crate::nl_item = 39; +pub const ABMON_7: crate::nl_item = 40; +pub const ABMON_8: crate::nl_item = 41; +pub const ABMON_9: crate::nl_item = 42; +pub const ABMON_10: crate::nl_item = 43; +pub const ABMON_11: crate::nl_item = 44; +pub const ABMON_12: crate::nl_item = 45; + +pub const AF_CCITT: c_int = 10; +pub const AF_CHAOS: c_int = 5; +pub const AF_CNT: c_int = 21; +pub const AF_COIP: c_int = 20; +pub const AF_DATAKIT: c_int = 9; +pub const AF_DECnet: c_int = 12; +pub const AF_DLI: c_int = 13; +pub const AF_E164: c_int = 26; +pub const AF_ECMA: c_int = 8; +pub const AF_HYLINK: c_int = 15; +pub const AF_IMPLINK: c_int = 3; +pub const AF_ISO: c_int = 7; +pub const AF_LAT: c_int = 14; +pub const AF_LINK: c_int = 18; +pub const AF_OSI: c_int = 7; +pub const AF_PUP: c_int = 4; +pub const ALT_DIGITS: crate::nl_item = 50; +pub const AM_STR: crate::nl_item = 6; +pub const B76800: crate::speed_t = 76800; + +pub const BIOCFLUSH: c_int = 17000; +pub const BIOCGBLEN: c_int = 1074020966; +pub const BIOCGDLT: c_int = 1074020970; +pub const BIOCGHDRCMPLT: c_int = 1074020980; +pub const BIOCGRTIMEOUT: c_int = 1074807406; +pub const BIOCIMMEDIATE: c_int = -2147204496; +pub const BIOCPROMISC: c_int = 17001; +pub const BIOCSBLEN: c_int = -1073462682; +pub const BIOCSETF: c_int = -2146418073; +pub const BIOCSHDRCMPLT: c_int = -2147204491; +pub const BIOCSRTIMEOUT: c_int = -2146418067; +pub const BIOCVERSION: c_int = 1074020977; + +pub const BPF_ALIGNMENT: usize = size_of::(); +pub const CHAR_BIT: usize = 8; +pub const CODESET: crate::nl_item = 1; +pub const CRNCYSTR: crate::nl_item = 55; + +pub const D_FLAG_FILTER: c_int = 0x00000001; +pub const D_FLAG_STAT: c_int = 0x00000002; +pub const D_FLAG_STAT_FORM_MASK: c_int = 0x000000f0; +pub const D_FLAG_STAT_FORM_T32_2001: c_int = 0x00000010; +pub const D_FLAG_STAT_FORM_T32_2008: c_int = 0x00000020; +pub const D_FLAG_STAT_FORM_T64_2008: c_int = 0x00000030; +pub const D_FLAG_STAT_FORM_UNSET: c_int = 0x00000000; + +pub const D_FMT: crate::nl_item = 3; +pub const D_GETFLAG: c_int = 1; +pub const D_SETFLAG: c_int = 2; +pub const D_T_FMT: crate::nl_item = 2; +pub const ERA: crate::nl_item = 46; +pub const ERA_D_FMT: crate::nl_item = 47; +pub const ERA_D_T_FMT: crate::nl_item = 48; +pub const ERA_T_FMT: crate::nl_item = 49; +pub const RADIXCHAR: crate::nl_item = 51; +pub const THOUSEP: crate::nl_item = 52; +pub const YESEXPR: crate::nl_item = 53; +pub const NOEXPR: crate::nl_item = 54; +pub const F_GETOWN: c_int = 35; + +pub const FIONBIO: c_int = -2147195266; +pub const FIOASYNC: c_int = -2147195267; +pub const FIOCLEX: c_int = 26113; +pub const FIOGETOWN: c_int = 1074030203; +pub const FIONCLEX: c_int = 26114; +pub const FIONREAD: c_int = 1074030207; +pub const FIOSETOWN: c_int = -2147195268; + +pub const F_SETOWN: c_int = 36; +pub const IFF_LINK0: c_int = 0x00001000; +pub const IFF_LINK1: c_int = 0x00002000; +pub const IFF_LINK2: c_int = 0x00004000; +pub const IFF_OACTIVE: c_int = 0x00000400; +pub const IFF_SIMPLEX: c_int = 0x00000800; +pub const IHFLOW: tcflag_t = 0x00000001; +pub const IIDLE: tcflag_t = 0x00000008; +pub const IP_RECVDSTADDR: c_int = 7; +pub const IP_RECVIF: c_int = 20; +pub const IPTOS_ECN_NOTECT: u8 = 0x00; +pub const IUCLC: tcflag_t = 0x00000200; +pub const IUTF8: tcflag_t = 0x0004000; + +pub const KERN_ARGMAX: c_int = 8; +pub const KERN_BOOTTIME: c_int = 21; +pub const KERN_CLOCKRATE: c_int = 12; +pub const KERN_FILE: c_int = 15; +pub const KERN_HOSTID: c_int = 11; +pub const KERN_HOSTNAME: c_int = 10; +pub const KERN_JOB_CONTROL: c_int = 19; +pub const KERN_MAXFILES: c_int = 7; +pub const KERN_MAXPROC: c_int = 6; +pub const KERN_MAXVNODES: c_int = 5; +pub const KERN_NGROUPS: c_int = 18; +pub const KERN_OSRELEASE: c_int = 2; +pub const KERN_OSREV: c_int = 3; +pub const KERN_OSTYPE: c_int = 1; +pub const KERN_POSIX1: c_int = 17; +pub const KERN_PROC: c_int = 14; +pub const KERN_PROC_ALL: c_int = 0; +pub const KERN_PROC_PGRP: c_int = 2; +pub const KERN_PROC_PID: c_int = 1; +pub const KERN_PROC_RUID: c_int = 6; +pub const KERN_PROC_SESSION: c_int = 3; +pub const KERN_PROC_TTY: c_int = 4; +pub const KERN_PROC_UID: c_int = 5; +pub const KERN_PROF: c_int = 16; +pub const KERN_SAVED_IDS: c_int = 20; +pub const KERN_SECURELVL: c_int = 9; +pub const KERN_VERSION: c_int = 4; +pub const KERN_VNODE: c_int = 13; + +pub const LC_ALL: c_int = 63; +pub const LC_COLLATE: c_int = 1; +pub const LC_CTYPE: c_int = 2; +pub const LC_MESSAGES: c_int = 32; +pub const LC_MONETARY: c_int = 4; +pub const LC_NUMERIC: c_int = 8; +pub const LC_TIME: c_int = 16; + +pub const MAP_STACK: c_int = 0x00001000; +pub const MNT_NOEXEC: c_int = 0x02; +pub const MNT_NOSUID: c_int = 0x04; +pub const MNT_RDONLY: c_int = 0x01; + +pub const NET_RT_DUMP: c_int = 1; +pub const NET_RT_FLAGS: c_int = 2; +pub const OHFLOW: tcflag_t = 0x00000002; +pub const P_ALL: idtype_t = 0; +pub const PARSTK: tcflag_t = 0x00000004; +pub const PF_CCITT: c_int = 10; +pub const PF_CHAOS: c_int = 5; +pub const PF_CNT: c_int = 21; +pub const PF_COIP: c_int = 20; +pub const PF_DATAKIT: c_int = 9; +pub const PF_DECnet: c_int = 12; +pub const PF_DLI: c_int = 13; +pub const PF_ECMA: c_int = 8; +pub const PF_HYLINK: c_int = 15; +pub const PF_IMPLINK: c_int = 3; +pub const PF_ISO: c_int = 7; +pub const PF_LAT: c_int = 14; +pub const PF_LINK: c_int = 18; +pub const PF_OSI: c_int = 7; +pub const PF_PIP: c_int = 25; +pub const PF_PUP: c_int = 4; +pub const PF_RTIP: c_int = 22; +pub const PF_XTP: c_int = 19; +pub const PM_STR: crate::nl_item = 7; +pub const POSIX_MADV_DONTNEED: c_int = 4; +pub const POSIX_MADV_NORMAL: c_int = 0; +pub const POSIX_MADV_RANDOM: c_int = 2; +pub const POSIX_MADV_SEQUENTIAL: c_int = 1; +pub const POSIX_MADV_WILLNEED: c_int = 3; +pub const _POSIX_VDISABLE: c_int = 0; +pub const P_PGID: idtype_t = 2; +pub const P_PID: idtype_t = 1; +pub const PRIO_PGRP: c_int = 1; +pub const PRIO_PROCESS: c_int = 0; +pub const PRIO_USER: c_int = 2; +pub const pseudo_AF_PIP: c_int = 25; +pub const pseudo_AF_RTIP: c_int = 22; +pub const pseudo_AF_XTP: c_int = 19; +pub const REG_ASSERT: c_int = 15; +pub const REG_ATOI: c_int = 255; +pub const REG_BACKR: c_int = 0x400; +pub const REG_BASIC: c_int = 0x00; +pub const REG_DUMP: c_int = 0x80; +pub const REG_EMPTY: c_int = 14; +pub const REG_INVARG: c_int = 16; +pub const REG_ITOA: c_int = 0o400; +pub const REG_LARGE: c_int = 0x200; +pub const REG_NOSPEC: c_int = 0x10; +pub const REG_OK: c_int = 0; +pub const REG_PEND: c_int = 0x20; +pub const REG_TRACE: c_int = 0x100; + +pub const RLIMIT_AS: c_int = 6; +pub const RLIMIT_CORE: c_int = 4; +pub const RLIMIT_CPU: c_int = 0; +pub const RLIMIT_DATA: c_int = 2; +pub const RLIMIT_FSIZE: c_int = 1; +pub const RLIMIT_MEMLOCK: c_int = 7; +pub const RLIMIT_NOFILE: c_int = 5; +pub const RLIMIT_NPROC: c_int = 8; +pub const RLIMIT_RSS: c_int = 6; +pub const RLIMIT_STACK: c_int = 3; +pub const RLIMIT_VMEM: c_int = 6; +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIM_NLIMITS: c_int = 14; + +pub const SCHED_ADJTOHEAD: c_int = 5; +pub const SCHED_ADJTOTAIL: c_int = 6; +pub const SCHED_MAXPOLICY: c_int = 7; +pub const SCHED_SETPRIO: c_int = 7; +pub const SCHED_SPORADIC: c_int = 4; + +pub const SHM_ANON: *mut c_char = -1isize as *mut c_char; +pub const SIGCLD: c_int = SIGCHLD; +pub const SIGDEADLK: c_int = 7; +pub const SIGEMT: c_int = 7; +pub const SIGEV_NONE: c_int = 0; +pub const SIGEV_SIGNAL: c_int = 129; +pub const SIGEV_THREAD: c_int = 135; +pub const SO_USELOOPBACK: c_int = 0x0040; +pub const _SS_ALIGNSIZE: usize = size_of::(); +pub const _SS_MAXSIZE: usize = 128; +pub const _SS_PAD1SIZE: usize = _SS_ALIGNSIZE - 2; +pub const _SS_PAD2SIZE: usize = _SS_MAXSIZE - 2 - _SS_PAD1SIZE - _SS_ALIGNSIZE; +pub const TC_CPOSIX: tcflag_t = CLOCAL | CREAD | CSIZE | CSTOPB | HUPCL | PARENB | PARODD; +pub const TCGETS: c_int = 0x404c540d; +pub const TC_IPOSIX: tcflag_t = + BRKINT | ICRNL | IGNBRK | IGNPAR | INLCR | INPCK | ISTRIP | IXOFF | IXON | PARMRK; +pub const TC_LPOSIX: tcflag_t = + ECHO | ECHOE | ECHOK | ECHONL | ICANON | IEXTEN | ISIG | NOFLSH | TOSTOP; +pub const TC_OPOSIX: tcflag_t = OPOST; +pub const T_FMT_AMPM: crate::nl_item = 5; + +pub const TIOCCBRK: c_int = 29818; +pub const TIOCCDTR: c_int = 29816; +pub const TIOCDRAIN: c_int = 29790; +pub const TIOCEXCL: c_int = 29709; +pub const TIOCFLUSH: c_int = -2147191792; +pub const TIOCGETA: c_int = 1078752275; +pub const TIOCGPGRP: c_int = 1074033783; +pub const TIOCGWINSZ: c_int = 1074295912; +pub const TIOCMBIC: c_int = -2147191701; +pub const TIOCMBIS: c_int = -2147191700; +pub const TIOCMGET: c_int = 1074033770; +pub const TIOCMSET: c_int = -2147191699; +pub const TIOCNOTTY: c_int = 29809; +pub const TIOCNXCL: c_int = 29710; +pub const TIOCOUTQ: c_int = 1074033779; +pub const TIOCPKT: c_int = -2147191696; +pub const TIOCPKT_DATA: c_int = 0x00; +pub const TIOCPKT_DOSTOP: c_int = 0x20; +pub const TIOCPKT_FLUSHREAD: c_int = 0x01; +pub const TIOCPKT_FLUSHWRITE: c_int = 0x02; +pub const TIOCPKT_IOCTL: c_int = 0x40; +pub const TIOCPKT_NOSTOP: c_int = 0x10; +pub const TIOCPKT_START: c_int = 0x08; +pub const TIOCPKT_STOP: c_int = 0x04; +pub const TIOCSBRK: c_int = 29819; +pub const TIOCSCTTY: c_int = 29793; +pub const TIOCSDTR: c_int = 29817; +pub const TIOCSETA: c_int = -2142473196; +pub const TIOCSETAF: c_int = -2142473194; +pub const TIOCSETAW: c_int = -2142473195; +pub const TIOCSPGRP: c_int = -2147191690; +pub const TIOCSTART: c_int = 29806; +pub const TIOCSTI: c_int = -2147388302; +pub const TIOCSTOP: c_int = 29807; +pub const TIOCSWINSZ: c_int = -2146929561; + +pub const USER_CS_PATH: c_int = 1; +pub const USER_BC_BASE_MAX: c_int = 2; +pub const USER_BC_DIM_MAX: c_int = 3; +pub const USER_BC_SCALE_MAX: c_int = 4; +pub const USER_BC_STRING_MAX: c_int = 5; +pub const USER_COLL_WEIGHTS_MAX: c_int = 6; +pub const USER_EXPR_NEST_MAX: c_int = 7; +pub const USER_LINE_MAX: c_int = 8; +pub const USER_RE_DUP_MAX: c_int = 9; +pub const USER_POSIX2_VERSION: c_int = 10; +pub const USER_POSIX2_C_BIND: c_int = 11; +pub const USER_POSIX2_C_DEV: c_int = 12; +pub const USER_POSIX2_CHAR_TERM: c_int = 13; +pub const USER_POSIX2_FORT_DEV: c_int = 14; +pub const USER_POSIX2_FORT_RUN: c_int = 15; +pub const USER_POSIX2_LOCALEDEF: c_int = 16; +pub const USER_POSIX2_SW_DEV: c_int = 17; +pub const USER_POSIX2_UPE: c_int = 18; +pub const USER_STREAM_MAX: c_int = 19; +pub const USER_TZNAME_MAX: c_int = 20; + +pub const VDOWN: usize = 31; +pub const VINS: usize = 32; +pub const VDEL: usize = 33; +pub const VRUB: usize = 34; +pub const VCAN: usize = 35; +pub const VHOME: usize = 36; +pub const VEND: usize = 37; +pub const VSPARE3: usize = 38; +pub const VSPARE4: usize = 39; +pub const VSWTCH: usize = 7; +pub const VDSUSP: usize = 11; +pub const VFWD: usize = 18; +pub const VLOGIN: usize = 19; +pub const VPREFIX: usize = 20; +pub const VSUFFIX: usize = 24; +pub const VLEFT: usize = 28; +pub const VRIGHT: usize = 29; +pub const VUP: usize = 30; +pub const XCASE: tcflag_t = 0x00000004; + +pub const PTHREAD_BARRIER_SERIAL_THREAD: c_int = -1; +pub const PTHREAD_CREATE_JOINABLE: c_int = 0x00; +pub const PTHREAD_CREATE_DETACHED: c_int = 0x01; + +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 1; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 2; +pub const PTHREAD_MUTEX_NORMAL: c_int = 3; +pub const PTHREAD_STACK_MIN: size_t = 256; +pub const PTHREAD_MUTEX_DEFAULT: c_int = 0; +pub const PTHREAD_MUTEX_STALLED: c_int = 0x00; +pub const PTHREAD_MUTEX_ROBUST: c_int = 0x10; +pub const PTHREAD_PROCESS_PRIVATE: c_int = 0x00; +pub const PTHREAD_PROCESS_SHARED: c_int = 0x01; + +pub const PTHREAD_KEYS_MAX: usize = 128; + +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + __u: 0x80000000, + __owner: 0xffffffff, +}; +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + __u: CLOCK_REALTIME as u32, + __owner: 0xfffffffb, +}; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + __active: 0, + __blockedwriters: 0, + __blockedreaders: 0, + __heavy: 0, + __lock: PTHREAD_MUTEX_INITIALIZER, + __rcond: PTHREAD_COND_INITIALIZER, + __wcond: PTHREAD_COND_INITIALIZER, + __owner: -2i32 as c_uint, + __spare: 0, +}; + +const fn _CMSG_ALIGN(len: usize) -> usize { + len + size_of::() - 1 & !(size_of::() - 1) +} + +const fn _ALIGN(p: usize, b: usize) -> usize { + (p + b - 1) & !(b - 1) +} + +f! { + pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr { + if (*mhdr).msg_controllen as usize >= size_of::() { + (*mhdr).msg_control as *mut cmsghdr + } else { + core::ptr::null_mut::() + } + } + + pub fn CMSG_NXTHDR(mhdr: *const crate::msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + let msg = _CMSG_ALIGN((*cmsg).cmsg_len as usize); + let next = cmsg as usize + msg + _CMSG_ALIGN(size_of::()); + if next > (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize { + core::ptr::null_mut::() + } else { + (cmsg as usize + msg) as *mut cmsghdr + } + } + + pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { + (cmsg as *mut c_uchar).offset(_CMSG_ALIGN(size_of::()) as isize) + } + + pub const fn CMSG_LEN(length: c_uint) -> c_uint { + _CMSG_ALIGN(size_of::()) as c_uint + length + } + + pub const fn CMSG_SPACE(length: c_uint) -> c_uint { + (_CMSG_ALIGN(size_of::()) + _CMSG_ALIGN(length as usize)) as c_uint + } + + pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] &= !(1 << (fd % size)); + return; + } + + pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0; + } + + pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] |= 1 << (fd % size); + return; + } + + pub fn FD_ZERO(set: *mut fd_set) -> () { + for slot in (*set).fds_bits.iter_mut() { + *slot = 0; + } + } + + pub fn _DEXTRA_FIRST(_d: *const dirent) -> *mut crate::dirent_extra { + let _f = &((*(_d)).d_name) as *const _; + let _s = _d as usize; + + _ALIGN(_s + _f as usize - _s + (*_d).d_namelen as usize + 1, 8) as *mut crate::dirent_extra + } + + pub fn _DEXTRA_VALID(_x: *const crate::dirent_extra, _d: *const dirent) -> bool { + let sz = _x as usize - _d as usize + size_of::(); + let rsz = (*_d).d_reclen as usize; + + if sz > rsz || sz + (*_x).d_datalen as usize > rsz { + false + } else { + true + } + } + + pub fn _DEXTRA_NEXT(_x: *const crate::dirent_extra) -> *mut crate::dirent_extra { + _ALIGN( + _x as usize + size_of::() + (*_x).d_datalen as usize, + 8, + ) as *mut crate::dirent_extra + } + + pub fn SOCKCREDSIZE(ngrps: usize) -> usize { + let ngrps = if ngrps > 0 { ngrps - 1 } else { 0 }; + size_of::() + size_of::() * ngrps + } +} + +safe_f! { + pub const fn WIFSTOPPED(status: c_int) -> bool { + (status & 0xff) == 0x7f + } + + pub const fn WSTOPSIG(status: c_int) -> c_int { + (status >> 8) & 0xff + } + + pub const fn WIFCONTINUED(status: c_int) -> bool { + status == 0xffff + } + + pub const fn WIFSIGNALED(status: c_int) -> bool { + ((status & 0x7f) + 1) as i8 >= 2 + } + + pub const fn WTERMSIG(status: c_int) -> c_int { + status & 0x7f + } + + pub const fn WIFEXITED(status: c_int) -> bool { + (status & 0x7f) == 0 + } + + pub const fn WEXITSTATUS(status: c_int) -> c_int { + (status >> 8) & 0xff + } + + pub const fn WCOREDUMP(status: c_int) -> bool { + (status & 0x80) != 0 + } + + pub const fn IPTOS_ECN(x: u8) -> u8 { + x & crate::IPTOS_ECN_MASK + } + + pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { + ((major << 10) | (minor)) as crate::dev_t + } + + pub const fn major(dev: crate::dev_t) -> c_uint { + ((dev as c_uint) >> 10) & 0x3f + } + + pub const fn minor(dev: crate::dev_t) -> c_uint { + (dev as c_uint) & 0x3ff + } +} + +cfg_if! { + if #[cfg(not(target_env = "nto71_iosock"))] { + extern "C" { + pub fn sendmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: c_uint, + flags: c_uint, + ) -> c_int; + pub fn recvmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: c_uint, + flags: c_uint, + timeout: *mut crate::timespec, + ) -> c_int; + } + } else { + extern "C" { + pub fn sendmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: size_t, + flags: c_int, + ) -> ssize_t; + pub fn recvmmsg( + sockfd: c_int, + msgvec: *mut crate::mmsghdr, + vlen: size_t, + flags: c_int, + timeout: *const crate::timespec, + ) -> ssize_t; + } + } +} + +// Network related functions are provided by libsocket and regex +// functions are provided by libregex. +// In QNX <=7.0, libregex functions were included in libc itself. +#[link(name = "socket")] +#[cfg_attr(not(target_env = "nto70"), link(name = "regex"))] +extern "C" { + pub fn sem_destroy(sem: *mut sem_t) -> c_int; + pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; + pub fn fdatasync(fd: c_int) -> c_int; + pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; + pub fn setpriority(which: c_int, who: crate::id_t, prio: c_int) -> c_int; + pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; + pub fn mknodat(__fd: c_int, pathname: *const c_char, mode: mode_t, dev: crate::dev_t) -> c_int; + + pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn clock_settime(clk_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; + pub fn clock_getcpuclockid(pid: crate::pid_t, clk_id: *mut crate::clockid_t) -> c_int; + + pub fn pthread_attr_getstack( + attr: *const crate::pthread_attr_t, + stackaddr: *mut *mut c_void, + stacksize: *mut size_t, + ) -> c_int; + pub fn memalign(align: size_t, size: size_t) -> *mut c_void; + pub fn setgroups(ngroups: c_int, ptr: *const crate::gid_t) -> c_int; + + pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; + pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; + pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; + + pub fn utimensat( + dirfd: c_int, + path: *const c_char, + times: *const crate::timespec, + flag: c_int, + ) -> c_int; + + pub fn pthread_condattr_getclock( + attr: *const pthread_condattr_t, + clock_id: *mut clockid_t, + ) -> c_int; + pub fn pthread_condattr_setclock( + attr: *mut pthread_condattr_t, + clock_id: crate::clockid_t, + ) -> c_int; + pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: c_int) -> c_int; + pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; + pub fn pthread_rwlockattr_getpshared( + attr: *const pthread_rwlockattr_t, + val: *mut c_int, + ) -> c_int; + pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, val: c_int) -> c_int; + pub fn ptsname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> *mut c_char; + pub fn clearenv() -> c_int; + pub fn waitid( + idtype: idtype_t, + id: id_t, + infop: *mut crate::siginfo_t, + options: c_int, + ) -> c_int; + pub fn wait4( + pid: crate::pid_t, + status: *mut c_int, + options: c_int, + rusage: *mut crate::rusage, + ) -> crate::pid_t; + + // DIFF(main): changed to `*const *mut` in e77f551de9 + pub fn execvpe( + file: *const c_char, + argv: *const *const c_char, + envp: *const *const c_char, + ) -> c_int; + + pub fn getifaddrs(ifap: *mut *mut crate::ifaddrs) -> c_int; + pub fn freeifaddrs(ifa: *mut crate::ifaddrs); + pub fn bind( + socket: c_int, + address: *const crate::sockaddr, + address_len: crate::socklen_t, + ) -> c_int; + + pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + + pub fn sendmsg(fd: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; + pub fn recvmsg(fd: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; + pub fn openpty( + amaster: *mut c_int, + aslave: *mut c_int, + name: *mut c_char, + termp: *mut termios, + winp: *mut crate::winsize, + ) -> c_int; + pub fn forkpty( + amaster: *mut c_int, + name: *mut c_char, + termp: *mut termios, + winp: *mut crate::winsize, + ) -> crate::pid_t; + pub fn login_tty(fd: c_int) -> c_int; + + pub fn uname(buf: *mut crate::utsname) -> c_int; + + pub fn getpeereid(socket: c_int, euid: *mut crate::uid_t, egid: *mut crate::gid_t) -> c_int; + + pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + + pub fn abs(i: c_int) -> c_int; + pub fn labs(i: c_long) -> c_long; + pub fn rand() -> c_int; + pub fn srand(seed: c_uint); + + pub fn setpwent(); + pub fn endpwent(); + pub fn getpwent() -> *mut passwd; + pub fn setgrent(); + pub fn endgrent(); + pub fn getgrent() -> *mut crate::group; + pub fn setspent(); + pub fn endspent(); + + pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; + + pub fn ftok(pathname: *const c_char, proj_id: c_int) -> crate::key_t; + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + + pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; + pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; + pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; + pub fn sigtimedwait( + set: *const sigset_t, + info: *mut siginfo_t, + timeout: *const crate::timespec, + ) -> c_int; + pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; + pub fn pthread_setschedprio(native: crate::pthread_t, priority: c_int) -> c_int; + + pub fn if_nameindex() -> *mut if_nameindex; + pub fn if_freenameindex(ptr: *mut if_nameindex); + + pub fn glob( + pattern: *const c_char, + flags: c_int, + errfunc: Option c_int>, + pglob: *mut crate::glob_t, + ) -> c_int; + pub fn globfree(pglob: *mut crate::glob_t); + + pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + + pub fn shm_unlink(name: *const c_char) -> c_int; + + pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); + + pub fn telldir(dirp: *mut crate::DIR) -> c_long; + + pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; + + pub fn recvfrom( + socket: c_int, + buf: *mut c_void, + len: size_t, + flags: c_int, + addr: *mut crate::sockaddr, + addrlen: *mut crate::socklen_t, + ) -> ssize_t; + pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; + + pub fn getdomainname(name: *mut c_char, len: size_t) -> c_int; + pub fn setdomainname(name: *const c_char, len: size_t) -> c_int; + pub fn sync(); + pub fn pthread_getschedparam( + native: crate::pthread_t, + policy: *mut c_int, + param: *mut crate::sched_param, + ) -> c_int; + pub fn umount(target: *const c_char, flags: c_int) -> c_int; + pub fn sched_get_priority_max(policy: c_int) -> c_int; + pub fn settimeofday(tv: *const crate::timeval, tz: *const c_void) -> c_int; + pub fn sched_rr_get_interval(pid: crate::pid_t, tp: *mut crate::timespec) -> c_int; + pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; + pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; + pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; + pub fn mount( + special_device: *const c_char, + mount_directory: *const c_char, + flags: c_int, + mount_type: *const c_char, + mount_data: *const c_void, + mount_datalen: c_int, + ) -> c_int; + pub fn sched_getparam(pid: crate::pid_t, param: *mut crate::sched_param) -> c_int; + pub fn pthread_mutex_consistent(mutex: *mut pthread_mutex_t) -> c_int; + pub fn pthread_mutex_timedlock( + lock: *mut pthread_mutex_t, + abstime: *const crate::timespec, + ) -> c_int; + pub fn pthread_spin_init(lock: *mut crate::pthread_spinlock_t, pshared: c_int) -> c_int; + pub fn pthread_spin_destroy(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_spin_lock(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_spin_trylock(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_spin_unlock(lock: *mut crate::pthread_spinlock_t) -> c_int; + pub fn pthread_barrierattr_init(__attr: *mut crate::pthread_barrierattr_t) -> c_int; + pub fn pthread_barrierattr_destroy(__attr: *mut crate::pthread_barrierattr_t) -> c_int; + pub fn pthread_barrierattr_getpshared( + __attr: *const crate::pthread_barrierattr_t, + __pshared: *mut c_int, + ) -> c_int; + pub fn pthread_barrierattr_setpshared( + __attr: *mut crate::pthread_barrierattr_t, + __pshared: c_int, + ) -> c_int; + pub fn pthread_barrier_init( + __barrier: *mut crate::pthread_barrier_t, + __attr: *const crate::pthread_barrierattr_t, + __count: c_uint, + ) -> c_int; + pub fn pthread_barrier_destroy(__barrier: *mut crate::pthread_barrier_t) -> c_int; + pub fn pthread_barrier_wait(__barrier: *mut crate::pthread_barrier_t) -> c_int; + + pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; + pub fn clock_nanosleep( + clk_id: crate::clockid_t, + flags: c_int, + rqtp: *const crate::timespec, + rmtp: *mut crate::timespec, + ) -> c_int; + pub fn pthread_attr_getguardsize( + attr: *const crate::pthread_attr_t, + guardsize: *mut size_t, + ) -> c_int; + pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; + pub fn sethostname(name: *const c_char, len: size_t) -> c_int; + pub fn sched_get_priority_min(policy: c_int) -> c_int; + pub fn pthread_condattr_getpshared( + attr: *const pthread_condattr_t, + pshared: *mut c_int, + ) -> c_int; + pub fn pthread_setschedparam( + native: crate::pthread_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + pub fn sched_setscheduler( + pid: crate::pid_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; + pub fn getgrgid_r( + gid: crate::gid_t, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn sem_close(sem: *mut sem_t) -> c_int; + pub fn getdtablesize() -> c_int; + pub fn getgrnam_r( + name: *const c_char, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn initgroups(user: *const c_char, group: crate::gid_t) -> c_int; + pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; + pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; + pub fn getgrnam(name: *const c_char) -> *mut crate::group; + pub fn pthread_cancel(thread: crate::pthread_t) -> c_int; + pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; + pub fn sem_unlink(name: *const c_char) -> c_int; + pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; + pub fn getpwnam_r( + name: *const c_char, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + pub fn getpwuid_r( + uid: crate::uid_t, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; + pub fn pthread_atfork( + prepare: Option, + parent: Option, + child: Option, + ) -> c_int; + pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; + pub fn getgrouplist( + user: *const c_char, + group: crate::gid_t, + groups: *mut crate::gid_t, + ngroups: *mut c_int, + ) -> c_int; + pub fn pthread_mutexattr_getpshared( + attr: *const pthread_mutexattr_t, + pshared: *mut c_int, + ) -> c_int; + pub fn pthread_mutexattr_getrobust( + attr: *const pthread_mutexattr_t, + robustness: *mut c_int, + ) -> c_int; + pub fn pthread_mutexattr_setrobust(attr: *mut pthread_mutexattr_t, robustness: c_int) -> c_int; + pub fn pthread_create( + native: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + f: extern "C" fn(*mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + pub fn getitimer(which: c_int, curr_value: *mut crate::itimerval) -> c_int; + pub fn setitimer( + which: c_int, + value: *const crate::itimerval, + ovalue: *mut crate::itimerval, + ) -> c_int; + pub fn posix_spawn( + pid: *mut crate::pid_t, + path: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnp( + pid: *mut crate::pid_t, + file: *const c_char, + file_actions: *const crate::posix_spawn_file_actions_t, + attrp: *const crate::posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_getsigdefault( + attr: *const posix_spawnattr_t, + default: *mut crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigdefault( + attr: *mut posix_spawnattr_t, + default: *const crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getsigmask( + attr: *const posix_spawnattr_t, + default: *mut crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigmask( + attr: *mut posix_spawnattr_t, + default: *const crate::sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; + pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; + pub fn posix_spawnattr_getpgroup( + attr: *const posix_spawnattr_t, + flags: *mut crate::pid_t, + ) -> c_int; + pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: crate::pid_t) -> c_int; + pub fn posix_spawnattr_getschedpolicy( + attr: *const posix_spawnattr_t, + flags: *mut c_int, + ) -> c_int; + pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, flags: c_int) -> c_int; + pub fn posix_spawnattr_getschedparam( + attr: *const posix_spawnattr_t, + param: *mut crate::sched_param, + ) -> c_int; + pub fn posix_spawnattr_setschedparam( + attr: *mut posix_spawnattr_t, + param: *const crate::sched_param, + ) -> c_int; + + pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_addopen( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + path: *const c_char, + oflag: c_int, + mode: mode_t, + ) -> c_int; + pub fn posix_spawn_file_actions_addclose( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + ) -> c_int; + pub fn posix_spawn_file_actions_adddup2( + actions: *mut posix_spawn_file_actions_t, + fd: c_int, + newfd: c_int, + ) -> c_int; + pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; + pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; + pub fn inotify_rm_watch(fd: c_int, wd: c_int) -> c_int; + pub fn inotify_init() -> c_int; + pub fn inotify_add_watch(fd: c_int, path: *const c_char, mask: u32) -> c_int; + + pub fn gettid() -> crate::pid_t; + + pub fn pthread_getcpuclockid(thread: crate::pthread_t, clk_id: *mut crate::clockid_t) -> c_int; + + pub fn getnameinfo( + sa: *const crate::sockaddr, + salen: crate::socklen_t, + host: *mut c_char, + hostlen: crate::socklen_t, + serv: *mut c_char, + servlen: crate::socklen_t, + flags: c_int, + ) -> c_int; + + pub fn mallopt(param: c_int, value: i64) -> c_int; + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; + + pub fn ctermid(s: *mut c_char) -> *mut c_char; + pub fn ioctl(fd: c_int, request: c_int, ...) -> c_int; + + pub fn mallinfo() -> crate::mallinfo; + pub fn getpwent_r( + pwd: *mut crate::passwd, + buf: *mut c_char, + __bufsize: c_int, + __result: *mut *mut crate::passwd, + ) -> c_int; + pub fn pthread_getname_np(thread: crate::pthread_t, name: *mut c_char, len: c_int) -> c_int; + pub fn pthread_setname_np(thread: crate::pthread_t, name: *const c_char) -> c_int; + + pub fn sysctl( + _: *const c_int, + _: c_uint, + _: *mut c_void, + _: *mut size_t, + _: *const c_void, + _: size_t, + ) -> c_int; + + pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; + pub fn setrlimit(resource: c_int, rlp: *const crate::rlimit) -> c_int; + + pub fn lio_listio( + __mode: c_int, + __list: *const *mut aiocb, + __nent: c_int, + __sig: *mut sigevent, + ) -> c_int; + + pub fn dl_iterate_phdr( + callback: Option< + unsafe extern "C" fn( + // The original .h file declares this as *const, but for consistency with other platforms, + // changing this to *mut to make it easier to use. + // Maybe in v0.3 all platforms should use this as a *const. + info: *mut dl_phdr_info, + size: size_t, + data: *mut c_void, + ) -> c_int, + >, + data: *mut c_void, + ) -> c_int; + + pub fn memset_s(s: *mut c_void, smax: size_t, c: c_int, n: size_t) -> c_int; + + pub fn regcomp(__preg: *mut crate::regex_t, __pattern: *const c_char, __cflags: c_int) + -> c_int; + pub fn regexec( + __preg: *const crate::regex_t, + __str: *const c_char, + __nmatch: size_t, + __pmatch: *mut crate::regmatch_t, + __eflags: c_int, + ) -> c_int; + pub fn regerror( + __errcode: c_int, + __preg: *const crate::regex_t, + __errbuf: *mut c_char, + __errbuf_size: size_t, + ) -> size_t; + pub fn regfree(__preg: *mut crate::regex_t); + pub fn dirfd(__dirp: *mut crate::DIR) -> c_int; + pub fn dircntl(dir: *mut crate::DIR, cmd: c_int, ...) -> c_int; + + pub fn aio_cancel(__fd: c_int, __aiocbp: *mut crate::aiocb) -> c_int; + pub fn aio_error(__aiocbp: *const crate::aiocb) -> c_int; + pub fn aio_fsync(__operation: c_int, __aiocbp: *mut crate::aiocb) -> c_int; + pub fn aio_read(__aiocbp: *mut crate::aiocb) -> c_int; + pub fn aio_return(__aiocpb: *mut crate::aiocb) -> ssize_t; + pub fn aio_suspend( + __list: *const *const crate::aiocb, + __nent: c_int, + __timeout: *const crate::timespec, + ) -> c_int; + pub fn aio_write(__aiocpb: *mut crate::aiocb) -> c_int; + + pub fn mq_close(__mqdes: crate::mqd_t) -> c_int; + pub fn mq_getattr(__mqdes: crate::mqd_t, __mqstat: *mut crate::mq_attr) -> c_int; + pub fn mq_notify(__mqdes: crate::mqd_t, __notification: *const crate::sigevent) -> c_int; + pub fn mq_open(__name: *const c_char, __oflag: c_int, ...) -> crate::mqd_t; + pub fn mq_receive( + __mqdes: crate::mqd_t, + __msg_ptr: *mut c_char, + __msg_len: size_t, + __msg_prio: *mut c_uint, + ) -> ssize_t; + pub fn mq_send( + __mqdes: crate::mqd_t, + __msg_ptr: *const c_char, + __msg_len: size_t, + __msg_prio: c_uint, + ) -> c_int; + pub fn mq_setattr( + __mqdes: crate::mqd_t, + __mqstat: *const mq_attr, + __omqstat: *mut mq_attr, + ) -> c_int; + pub fn mq_timedreceive( + __mqdes: crate::mqd_t, + __msg_ptr: *mut c_char, + __msg_len: size_t, + __msg_prio: *mut c_uint, + __abs_timeout: *const crate::timespec, + ) -> ssize_t; + pub fn mq_timedsend( + __mqdes: crate::mqd_t, + __msg_ptr: *const c_char, + __msg_len: size_t, + __msg_prio: c_uint, + __abs_timeout: *const crate::timespec, + ) -> c_int; + pub fn mq_unlink(__name: *const c_char) -> c_int; + pub fn __get_errno_ptr() -> *mut c_int; + + // System page, see https://www.qnx.com/developers/docs/7.1#com.qnx.doc.neutrino.building/topic/syspage/syspage_about.html + pub static mut _syspage_ptr: *mut syspage_entry; + + // Function on the stack after a call to pthread_create(). This is used + // as a sentinel to work around an infitnite loop in the unwinding code. + pub fn __my_thread_exit(value_ptr: *mut *const c_void); +} + +// Models the implementation in stdlib.h. Ctest will fail if trying to use the +// default symbol from libc +pub unsafe fn atexit(cb: extern "C" fn()) -> c_int { + extern "C" { + static __dso_handle: *mut c_void; + pub fn __cxa_atexit(cb: extern "C" fn(), __arg: *mut c_void, __dso: *mut c_void) -> c_int; + } + __cxa_atexit(cb, 0 as *mut c_void, __dso_handle) +} + +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut c_void { + #[repr(C)] + struct siginfo_si_addr { + _pad: [u8; 32], + si_addr: *mut c_void, + } + (*(self as *const siginfo_t as *const siginfo_si_addr)).si_addr + } + + pub unsafe fn si_value(&self) -> crate::sigval { + #[repr(C)] + struct siginfo_si_value { + _pad: [u8; 32], + si_value: crate::sigval, + } + (*(self as *const siginfo_t as *const siginfo_si_value)).si_value + } + + pub unsafe fn si_pid(&self) -> crate::pid_t { + #[repr(C)] + struct siginfo_si_pid { + _pad: [u8; 16], + si_pid: crate::pid_t, + } + (*(self as *const siginfo_t as *const siginfo_si_pid)).si_pid + } + + pub unsafe fn si_uid(&self) -> crate::uid_t { + #[repr(C)] + struct siginfo_si_uid { + _pad: [u8; 24], + si_uid: crate::uid_t, + } + (*(self as *const siginfo_t as *const siginfo_si_uid)).si_uid + } + + pub unsafe fn si_status(&self) -> c_int { + #[repr(C)] + struct siginfo_si_status { + _pad: [u8; 28], + si_status: c_int, + } + (*(self as *const siginfo_t as *const siginfo_si_status)).si_status + } +} + +cfg_if! { + if #[cfg(target_arch = "x86_64")] { + mod x86_64; + pub use self::x86_64::*; + } else if #[cfg(target_arch = "aarch64")] { + mod aarch64; + pub use self::aarch64::*; + } else { + panic!("Unsupported arch"); + } +} + +mod neutrino; +pub use self::neutrino::*; diff --git a/vendor/libc/src/unix/nto/neutrino.rs b/vendor/libc/src/unix/nto/neutrino.rs new file mode 100644 index 00000000000000..8aac4680097850 --- /dev/null +++ b/vendor/libc/src/unix/nto/neutrino.rs @@ -0,0 +1,1270 @@ +use crate::prelude::*; + +pub type nto_job_t = crate::sync_t; + +s! { + pub struct syspage_entry_info { + pub entry_off: u16, + pub entry_size: u16, + } + pub struct syspage_array_info { + entry_off: u16, + entry_size: u16, + element_size: u16, + } + + pub struct intrspin { + pub value: c_uint, // volatile + } + + pub struct iov_t { + pub iov_base: *mut c_void, // union + pub iov_len: size_t, + } + + pub struct _itimer { + pub nsec: u64, + pub interval_nsec: u64, + } + + pub struct _msg_info64 { + pub nd: u32, + pub srcnd: u32, + pub pid: crate::pid_t, + pub tid: i32, + pub chid: i32, + pub scoid: i32, + pub coid: i32, + pub priority: i16, + pub flags: i16, + pub msglen: isize, + pub srcmsglen: isize, + pub dstmsglen: isize, + pub type_id: u32, + reserved: u32, + } + + pub struct _cred_info { + pub ruid: crate::uid_t, + pub euid: crate::uid_t, + pub suid: crate::uid_t, + pub rgid: crate::gid_t, + pub egid: crate::gid_t, + pub sgid: crate::gid_t, + pub ngroups: u32, + pub grouplist: [crate::gid_t; 8], + } + + pub struct _client_info { + pub nd: u32, + pub pid: crate::pid_t, + pub sid: crate::pid_t, + pub flags: u32, + pub cred: crate::_cred_info, + } + + pub struct _client_able { + pub ability: u32, + pub flags: u32, + pub range_lo: u64, + pub range_hi: u64, + } + + pub struct nto_channel_config { + pub event: crate::sigevent, + pub num_pulses: c_uint, + pub rearm_threshold: c_uint, + pub options: c_uint, + reserved: [c_uint; 3], + } + + // TODO: The following structures are defined in a header file which doesn't + // appear as part of the default headers found in a standard installation + // of Neutrino 7.1 SDP. Commented out for now. + //pub struct _asyncmsg_put_header { + // pub err: c_int, + // pub iov: *mut crate::iov_t, + // pub parts: c_int, + // pub handle: c_uint, + // pub cb: Option< + // unsafe extern "C" fn( + // err: c_int, + // buf: *mut c_void, + // handle: c_uint, + // ) -> c_int>, + // pub put_hdr_flags: c_uint, + //} + + //pub struct _asyncmsg_connection_attr { + // pub call_back: Option< + // unsafe extern "C" fn( + // err: c_int, + // buff: *mut c_void, + // handle: c_uint, + // ) -> c_int>, + // pub buffer_size: size_t, + // pub max_num_buffer: c_uint, + // pub trigger_num_msg: c_uint, + // pub trigger_time: crate::_itimer, + // reserve: c_uint, + //} + + //pub struct _asyncmsg_connection_descriptor { + // pub flags: c_uint, + // pub sendq_size: c_uint, + // pub sendq_head: c_uint, + // pub sendq_tail: c_uint, + // pub sendq_free: c_uint, + // pub err: c_int, + // pub ev: crate::sigevent, + // pub num_curmsg: c_uint, + // pub ttimer: crate::timer_t, + // pub block_con: crate::pthread_cond_t, + // pub mu: crate::pthread_mutex_t, + // reserved: c_uint, + // pub attr: crate::_asyncmsg_connection_attr, + // pub reserves: [c_uint; 3], + // pub sendq: [crate::_asyncmsg_put_header; 1], // flexarray + //} + + pub struct __c_anonymous_struct_ev { + pub event: crate::sigevent, + pub coid: c_int, + } + + pub struct _channel_connect_attr { + // union + pub ev: crate::__c_anonymous_struct_ev, + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct _sighandler_info { + pub siginfo: crate::siginfo_t, + pub handler: Option, + pub context: *mut c_void, + } + + pub struct __c_anonymous_struct_time { + pub length: c_uint, + pub scale: c_uint, + } + + pub struct _idle_hook { + pub hook_size: c_uint, + pub cmd: c_uint, + pub mode: c_uint, + pub latency: c_uint, + pub next_fire: u64, + pub curr_time: u64, + pub tod_adjust: u64, + pub resp: c_uint, + pub time: __c_anonymous_struct_time, + pub trigger: crate::sigevent, + pub intrs: *mut c_uint, + pub block_stack_size: c_uint, + } + + pub struct _clockadjust { + pub tick_count: u32, + pub tick_nsec_inc: i32, + } + + pub struct qtime_entry { + pub cycles_per_sec: u64, + pub nsec_tod_adjust: u64, // volatile + pub nsec: u64, // volatile + pub nsec_inc: u32, + pub boot_time: u32, + pub adjust: _clockadjust, + pub timer_rate: u32, + pub timer_scale: i32, + pub timer_load: u32, + pub intr: i32, + pub epoch: u32, + pub flags: u32, + pub rr_interval_mul: u32, + pub timer_load_hi: u32, + pub nsec_stable: u64, // volatile + pub timer_load_max: u64, + pub timer_prog_time: u32, + spare: [u32; 7], + } + + pub struct _sched_info { + pub priority_min: c_int, + pub priority_max: c_int, + pub interval: u64, + pub priority_priv: c_int, + reserved: [c_int; 11], + } + + pub struct _timer_info { + pub itime: crate::_itimer, + pub otime: crate::_itimer, + pub flags: u32, + pub tid: i32, + pub notify: i32, + pub clockid: crate::clockid_t, + pub overruns: u32, + pub event: crate::sigevent, // union + } + + pub struct _clockperiod { + pub nsec: u32, + pub fract: i32, + } +} + +s_no_extra_traits! { + #[repr(align(8))] + pub struct syspage_entry { + pub size: u16, + pub total_size: u16, + pub type_: u16, + pub num_cpu: u16, + pub system_private: syspage_entry_info, + pub old_asinfo: syspage_entry_info, + pub __mangle_name_to_cause_compilation_errs_meminfo: syspage_entry_info, + pub hwinfo: syspage_entry_info, + pub old_cpuinfo: syspage_entry_info, + pub old_cacheattr: syspage_entry_info, + pub qtime: syspage_entry_info, + pub callout: syspage_entry_info, + pub callin: syspage_entry_info, + pub typed_strings: syspage_entry_info, + pub strings: syspage_entry_info, + pub old_intrinfo: syspage_entry_info, + pub smp: syspage_entry_info, + pub pminfo: syspage_entry_info, + pub old_mdriver: syspage_entry_info, + spare0: [u32; 1], + __reserved: [u8; 160], // anonymous union with architecture dependent structs + pub new_asinfo: syspage_array_info, + pub new_cpuinfo: syspage_array_info, + pub new_cacheattr: syspage_array_info, + pub new_intrinfo: syspage_array_info, + pub new_mdriver: syspage_array_info, + } +} + +pub const SYSMGR_PID: u32 = 1; +pub const SYSMGR_CHID: u32 = 1; +pub const SYSMGR_COID: u32 = _NTO_SIDE_CHANNEL; +pub const SYSMGR_HANDLE: u32 = 0; + +pub const STATE_DEAD: c_int = 0x00; +pub const STATE_RUNNING: c_int = 0x01; +pub const STATE_READY: c_int = 0x02; +pub const STATE_STOPPED: c_int = 0x03; +pub const STATE_SEND: c_int = 0x04; +pub const STATE_RECEIVE: c_int = 0x05; +pub const STATE_REPLY: c_int = 0x06; +pub const STATE_STACK: c_int = 0x07; +pub const STATE_WAITTHREAD: c_int = 0x08; +pub const STATE_WAITPAGE: c_int = 0x09; +pub const STATE_SIGSUSPEND: c_int = 0x0a; +pub const STATE_SIGWAITINFO: c_int = 0x0b; +pub const STATE_NANOSLEEP: c_int = 0x0c; +pub const STATE_MUTEX: c_int = 0x0d; +pub const STATE_CONDVAR: c_int = 0x0e; +pub const STATE_JOIN: c_int = 0x0f; +pub const STATE_INTR: c_int = 0x10; +pub const STATE_SEM: c_int = 0x11; +pub const STATE_WAITCTX: c_int = 0x12; +pub const STATE_NET_SEND: c_int = 0x13; +pub const STATE_NET_REPLY: c_int = 0x14; +pub const STATE_MAX: c_int = 0x18; + +pub const _NTO_TIMEOUT_RECEIVE: i32 = 1 << STATE_RECEIVE; +pub const _NTO_TIMEOUT_SEND: i32 = 1 << STATE_SEND; +pub const _NTO_TIMEOUT_REPLY: i32 = 1 << STATE_REPLY; +pub const _NTO_TIMEOUT_SIGSUSPEND: i32 = 1 << STATE_SIGSUSPEND; +pub const _NTO_TIMEOUT_SIGWAITINFO: i32 = 1 << STATE_SIGWAITINFO; +pub const _NTO_TIMEOUT_NANOSLEEP: i32 = 1 << STATE_NANOSLEEP; +pub const _NTO_TIMEOUT_MUTEX: i32 = 1 << STATE_MUTEX; +pub const _NTO_TIMEOUT_CONDVAR: i32 = 1 << STATE_CONDVAR; +pub const _NTO_TIMEOUT_JOIN: i32 = 1 << STATE_JOIN; +pub const _NTO_TIMEOUT_INTR: i32 = 1 << STATE_INTR; +pub const _NTO_TIMEOUT_SEM: i32 = 1 << STATE_SEM; + +pub const _NTO_MI_ENDIAN_BIG: u32 = 1; +pub const _NTO_MI_ENDIAN_DIFF: u32 = 2; +pub const _NTO_MI_UNBLOCK_REQ: u32 = 256; +pub const _NTO_MI_NET_CRED_DIRTY: u32 = 512; +pub const _NTO_MI_CONSTRAINED: u32 = 1024; +pub const _NTO_MI_CHROOT: u32 = 2048; +pub const _NTO_MI_BITS_64: u32 = 4096; +pub const _NTO_MI_BITS_DIFF: u32 = 8192; +pub const _NTO_MI_SANDBOX: u32 = 16384; + +pub const _NTO_CI_ENDIAN_BIG: u32 = 1; +pub const _NTO_CI_BKGND_PGRP: u32 = 4; +pub const _NTO_CI_ORPHAN_PGRP: u32 = 8; +pub const _NTO_CI_STOPPED: u32 = 128; +pub const _NTO_CI_UNABLE: u32 = 256; +pub const _NTO_CI_TYPE_ID: u32 = 512; +pub const _NTO_CI_CHROOT: u32 = 2048; +pub const _NTO_CI_BITS_64: u32 = 4096; +pub const _NTO_CI_SANDBOX: u32 = 16384; +pub const _NTO_CI_LOADER: u32 = 32768; +pub const _NTO_CI_FULL_GROUPS: u32 = 2147483648; + +pub const _NTO_TI_ACTIVE: u32 = 1; +pub const _NTO_TI_ABSOLUTE: u32 = 2; +pub const _NTO_TI_EXPIRED: u32 = 4; +pub const _NTO_TI_TOD_BASED: u32 = 8; +pub const _NTO_TI_TARGET_PROCESS: u32 = 16; +pub const _NTO_TI_REPORT_TOLERANCE: u32 = 32; +pub const _NTO_TI_PRECISE: u32 = 64; +pub const _NTO_TI_TOLERANT: u32 = 128; +pub const _NTO_TI_WAKEUP: u32 = 256; +pub const _NTO_TI_PROCESS_TOLERANT: u32 = 512; +pub const _NTO_TI_HIGH_RESOLUTION: u32 = 1024; + +pub const _PULSE_TYPE: u32 = 0; +pub const _PULSE_SUBTYPE: u32 = 0; +pub const _PULSE_CODE_UNBLOCK: i32 = -32; +pub const _PULSE_CODE_DISCONNECT: i32 = -33; +pub const _PULSE_CODE_THREADDEATH: i32 = -34; +pub const _PULSE_CODE_COIDDEATH: i32 = -35; +pub const _PULSE_CODE_NET_ACK: i32 = -36; +pub const _PULSE_CODE_NET_UNBLOCK: i32 = -37; +pub const _PULSE_CODE_NET_DETACH: i32 = -38; +pub const _PULSE_CODE_RESTART: i32 = -39; +pub const _PULSE_CODE_NORESTART: i32 = -40; +pub const _PULSE_CODE_UNBLOCK_RESTART: i32 = -41; +pub const _PULSE_CODE_UNBLOCK_TIMER: i32 = -42; +pub const _PULSE_CODE_MINAVAIL: u32 = 0; +pub const _PULSE_CODE_MAXAVAIL: u32 = 127; + +pub const _NTO_HARD_FLAGS_END: u32 = 1; + +pub const _NTO_PULSE_IF_UNIQUE: u32 = 4096; +pub const _NTO_PULSE_REPLACE: u32 = 8192; + +pub const _NTO_PF_NOCLDSTOP: u32 = 1; +pub const _NTO_PF_LOADING: u32 = 2; +pub const _NTO_PF_TERMING: u32 = 4; +pub const _NTO_PF_ZOMBIE: u32 = 8; +pub const _NTO_PF_NOZOMBIE: u32 = 16; +pub const _NTO_PF_FORKED: u32 = 32; +pub const _NTO_PF_ORPHAN_PGRP: u32 = 64; +pub const _NTO_PF_STOPPED: u32 = 128; +pub const _NTO_PF_DEBUG_STOPPED: u32 = 256; +pub const _NTO_PF_BKGND_PGRP: u32 = 512; +pub const _NTO_PF_NOISYNC: u32 = 1024; +pub const _NTO_PF_CONTINUED: u32 = 2048; +pub const _NTO_PF_CHECK_INTR: u32 = 4096; +pub const _NTO_PF_COREDUMP: u32 = 8192; +pub const _NTO_PF_RING0: u32 = 32768; +pub const _NTO_PF_SLEADER: u32 = 65536; +pub const _NTO_PF_WAITINFO: u32 = 131072; +pub const _NTO_PF_DESTROYALL: u32 = 524288; +pub const _NTO_PF_NOCOREDUMP: u32 = 1048576; +pub const _NTO_PF_WAITDONE: u32 = 4194304; +pub const _NTO_PF_TERM_WAITING: u32 = 8388608; +pub const _NTO_PF_ASLR: u32 = 16777216; +pub const _NTO_PF_EXECED: u32 = 33554432; +pub const _NTO_PF_APP_STOPPED: u32 = 67108864; +pub const _NTO_PF_64BIT: u32 = 134217728; +pub const _NTO_PF_NET: u32 = 268435456; +pub const _NTO_PF_NOLAZYSTACK: u32 = 536870912; +pub const _NTO_PF_NOEXEC_STACK: u32 = 1073741824; +pub const _NTO_PF_LOADER_PERMS: u32 = 2147483648; + +pub const _NTO_TF_INTR_PENDING: u32 = 65536; +pub const _NTO_TF_DETACHED: u32 = 131072; +pub const _NTO_TF_SHR_MUTEX: u32 = 262144; +pub const _NTO_TF_SHR_MUTEX_EUID: u32 = 524288; +pub const _NTO_TF_THREADS_HOLD: u32 = 1048576; +pub const _NTO_TF_UNBLOCK_REQ: u32 = 4194304; +pub const _NTO_TF_ALIGN_FAULT: u32 = 16777216; +pub const _NTO_TF_SSTEP: u32 = 33554432; +pub const _NTO_TF_ALLOCED_STACK: u32 = 67108864; +pub const _NTO_TF_NOMULTISIG: u32 = 134217728; +pub const _NTO_TF_LOW_LATENCY: u32 = 268435456; +pub const _NTO_TF_IOPRIV: u32 = 2147483648; + +pub const _NTO_TCTL_IO_PRIV: u32 = 1; +pub const _NTO_TCTL_THREADS_HOLD: u32 = 2; +pub const _NTO_TCTL_THREADS_CONT: u32 = 3; +pub const _NTO_TCTL_RUNMASK: u32 = 4; +pub const _NTO_TCTL_ALIGN_FAULT: u32 = 5; +pub const _NTO_TCTL_RUNMASK_GET_AND_SET: u32 = 6; +pub const _NTO_TCTL_PERFCOUNT: u32 = 7; +pub const _NTO_TCTL_ONE_THREAD_HOLD: u32 = 8; +pub const _NTO_TCTL_ONE_THREAD_CONT: u32 = 9; +pub const _NTO_TCTL_RUNMASK_GET_AND_SET_INHERIT: u32 = 10; +pub const _NTO_TCTL_NAME: u32 = 11; +pub const _NTO_TCTL_RCM_GET_AND_SET: u32 = 12; +pub const _NTO_TCTL_SHR_MUTEX: u32 = 13; +pub const _NTO_TCTL_IO: u32 = 14; +pub const _NTO_TCTL_NET_KIF_GET_AND_SET: u32 = 15; +pub const _NTO_TCTL_LOW_LATENCY: u32 = 16; +pub const _NTO_TCTL_ADD_EXIT_EVENT: u32 = 17; +pub const _NTO_TCTL_DEL_EXIT_EVENT: u32 = 18; +pub const _NTO_TCTL_IO_LEVEL: u32 = 19; +pub const _NTO_TCTL_RESERVED: u32 = 2147483648; +pub const _NTO_TCTL_IO_LEVEL_INHERIT: u32 = 1073741824; +pub const _NTO_IO_LEVEL_NONE: u32 = 1; +pub const _NTO_IO_LEVEL_1: u32 = 2; +pub const _NTO_IO_LEVEL_2: u32 = 3; + +pub const _NTO_THREAD_NAME_MAX: u32 = 100; + +pub const _NTO_CHF_FIXED_PRIORITY: u32 = 1; +pub const _NTO_CHF_UNBLOCK: u32 = 2; +pub const _NTO_CHF_THREAD_DEATH: u32 = 4; +pub const _NTO_CHF_DISCONNECT: u32 = 8; +pub const _NTO_CHF_NET_MSG: u32 = 16; +pub const _NTO_CHF_SENDER_LEN: u32 = 32; +pub const _NTO_CHF_COID_DISCONNECT: u32 = 64; +pub const _NTO_CHF_REPLY_LEN: u32 = 128; +pub const _NTO_CHF_PULSE_POOL: u32 = 256; +pub const _NTO_CHF_ASYNC_NONBLOCK: u32 = 512; +pub const _NTO_CHF_ASYNC: u32 = 1024; +pub const _NTO_CHF_GLOBAL: u32 = 2048; +pub const _NTO_CHF_PRIVATE: u32 = 4096; +pub const _NTO_CHF_MSG_PAUSING: u32 = 8192; +pub const _NTO_CHF_INHERIT_RUNMASK: u32 = 16384; +pub const _NTO_CHF_UNBLOCK_TIMER: u32 = 32768; + +pub const _NTO_CHO_CUSTOM_EVENT: u32 = 1; + +pub const _NTO_COF_CLOEXEC: u32 = 1; +pub const _NTO_COF_DEAD: u32 = 2; +pub const _NTO_COF_NOSHARE: u32 = 64; +pub const _NTO_COF_NETCON: u32 = 128; +pub const _NTO_COF_NONBLOCK: u32 = 256; +pub const _NTO_COF_ASYNC: u32 = 512; +pub const _NTO_COF_GLOBAL: u32 = 1024; +pub const _NTO_COF_NOEVENT: u32 = 2048; +pub const _NTO_COF_INSECURE: u32 = 4096; +pub const _NTO_COF_REG_EVENTS: u32 = 8192; +pub const _NTO_COF_UNREG_EVENTS: u32 = 16384; +pub const _NTO_COF_MASK: u32 = 65535; + +pub const _NTO_SIDE_CHANNEL: u32 = 1073741824; + +pub const _NTO_CONNECTION_SCOID: u32 = 65536; +pub const _NTO_GLOBAL_CHANNEL: u32 = 1073741824; + +pub const _NTO_TIMEOUT_MASK: u32 = (1 << STATE_MAX) - 1; +pub const _NTO_TIMEOUT_ACTIVE: u32 = 1 << STATE_MAX; +pub const _NTO_TIMEOUT_IMMEDIATE: u32 = 1 << (STATE_MAX + 1); + +pub const _NTO_IC_LATENCY: u32 = 0; + +pub const _NTO_INTR_FLAGS_END: u32 = 1; +pub const _NTO_INTR_FLAGS_NO_UNMASK: u32 = 2; +pub const _NTO_INTR_FLAGS_PROCESS: u32 = 4; +pub const _NTO_INTR_FLAGS_TRK_MSK: u32 = 8; +pub const _NTO_INTR_FLAGS_ARRAY: u32 = 16; +pub const _NTO_INTR_FLAGS_EXCLUSIVE: u32 = 32; +pub const _NTO_INTR_FLAGS_FPU: u32 = 64; + +pub const _NTO_INTR_CLASS_EXTERNAL: u32 = 0; +pub const _NTO_INTR_CLASS_SYNTHETIC: u32 = 2147418112; + +pub const _NTO_INTR_SPARE: u32 = 2147483647; + +pub const _NTO_HOOK_IDLE: u32 = 2147418113; +pub const _NTO_HOOK_OVERDRIVE: u32 = 2147418114; +pub const _NTO_HOOK_LAST: u32 = 2147418114; +pub const _NTO_HOOK_IDLE2_FLAG: u32 = 32768; + +pub const _NTO_IH_CMD_SLEEP_SETUP: u32 = 1; +pub const _NTO_IH_CMD_SLEEP_BLOCK: u32 = 2; +pub const _NTO_IH_CMD_SLEEP_WAKEUP: u32 = 4; +pub const _NTO_IH_CMD_SLEEP_ONLINE: u32 = 8; +pub const _NTO_IH_RESP_NEEDS_BLOCK: u32 = 1; +pub const _NTO_IH_RESP_NEEDS_WAKEUP: u32 = 2; +pub const _NTO_IH_RESP_NEEDS_ONLINE: u32 = 4; +pub const _NTO_IH_RESP_SYNC_TIME: u32 = 16; +pub const _NTO_IH_RESP_SYNC_TLB: u32 = 32; +pub const _NTO_IH_RESP_SUGGEST_OFFLINE: u32 = 256; +pub const _NTO_IH_RESP_SLEEP_MODE_REACHED: u32 = 512; +pub const _NTO_IH_RESP_DELIVER_INTRS: u32 = 1024; + +pub const _NTO_READIOV_SEND: u32 = 0; +pub const _NTO_READIOV_REPLY: u32 = 1; + +pub const _NTO_KEYDATA_VTID: u32 = 2147483648; + +pub const _NTO_KEYDATA_PATHSIGN: u32 = 32768; +pub const _NTO_KEYDATA_OP_MASK: u32 = 255; +pub const _NTO_KEYDATA_VERIFY: u32 = 0; +pub const _NTO_KEYDATA_CALCULATE: u32 = 1; +pub const _NTO_KEYDATA_CALCULATE_REUSE: u32 = 2; +pub const _NTO_KEYDATA_PATHSIGN_VERIFY: u32 = 32768; +pub const _NTO_KEYDATA_PATHSIGN_CALCULATE: u32 = 32769; +pub const _NTO_KEYDATA_PATHSIGN_CALCULATE_REUSE: u32 = 32770; + +pub const _NTO_SCTL_SETPRIOCEILING: u32 = 1; +pub const _NTO_SCTL_GETPRIOCEILING: u32 = 2; +pub const _NTO_SCTL_SETEVENT: u32 = 3; +pub const _NTO_SCTL_MUTEX_WAKEUP: u32 = 4; +pub const _NTO_SCTL_MUTEX_CONSISTENT: u32 = 5; +pub const _NTO_SCTL_SEM_VALUE: u32 = 6; + +pub const _NTO_CLIENTINFO_GETGROUPS: u32 = 1; +pub const _NTO_CLIENTINFO_GETTYPEID: u32 = 2; + +extern "C" { + pub fn ChannelCreate(__flags: c_uint) -> c_int; + pub fn ChannelCreate_r(__flags: c_uint) -> c_int; + pub fn ChannelCreatePulsePool(__flags: c_uint, __config: *const nto_channel_config) -> c_int; + pub fn ChannelCreateExt( + __flags: c_uint, + __mode: crate::mode_t, + __bufsize: usize, + __maxnumbuf: c_uint, + __ev: *const crate::sigevent, + __cred: *mut _cred_info, + ) -> c_int; + pub fn ChannelDestroy(__chid: c_int) -> c_int; + pub fn ChannelDestroy_r(__chid: c_int) -> c_int; + pub fn ConnectAttach( + __nd: u32, + __pid: crate::pid_t, + __chid: c_int, + __index: c_uint, + __flags: c_int, + ) -> c_int; + pub fn ConnectAttach_r( + __nd: u32, + __pid: crate::pid_t, + __chid: c_int, + __index: c_uint, + __flags: c_int, + ) -> c_int; + + // TODO: The following function uses a structure defined in a header file + // which doesn't appear as part of the default headers found in a + // standard installation of Neutrino 7.1 SDP. Commented out for now. + //pub fn ConnectAttachExt( + // __nd: u32, + // __pid: crate::pid_t, + // __chid: c_int, + // __index: c_uint, + // __flags: c_int, + // __cd: *mut _asyncmsg_connection_descriptor, + //) -> c_int; + pub fn ConnectDetach(__coid: c_int) -> c_int; + pub fn ConnectDetach_r(__coid: c_int) -> c_int; + pub fn ConnectServerInfo(__pid: crate::pid_t, __coid: c_int, __info: *mut _msg_info64) + -> c_int; + pub fn ConnectServerInfo_r( + __pid: crate::pid_t, + __coid: c_int, + __info: *mut _msg_info64, + ) -> c_int; + pub fn ConnectClientInfoExtraArgs( + __scoid: c_int, + __info_pp: *mut _client_info, + __ngroups: c_int, + __abilities: *mut _client_able, + __nable: c_int, + __type_id: *mut c_uint, + ) -> c_int; + pub fn ConnectClientInfoExtraArgs_r( + __scoid: c_int, + __info_pp: *mut _client_info, + __ngroups: c_int, + __abilities: *mut _client_able, + __nable: c_int, + __type_id: *mut c_uint, + ) -> c_int; + pub fn ConnectClientInfo(__scoid: c_int, __info: *mut _client_info, __ngroups: c_int) -> c_int; + pub fn ConnectClientInfo_r( + __scoid: c_int, + __info: *mut _client_info, + __ngroups: c_int, + ) -> c_int; + pub fn ConnectClientInfoExt( + __scoid: c_int, + __info_pp: *mut *mut _client_info, + flags: c_int, + ) -> c_int; + pub fn ClientInfoExtFree(__info_pp: *mut *mut _client_info) -> c_int; + pub fn ConnectClientInfoAble( + __scoid: c_int, + __info_pp: *mut *mut _client_info, + flags: c_int, + abilities: *mut _client_able, + nable: c_int, + ) -> c_int; + pub fn ConnectFlags( + __pid: crate::pid_t, + __coid: c_int, + __mask: c_uint, + __bits: c_uint, + ) -> c_int; + pub fn ConnectFlags_r( + __pid: crate::pid_t, + __coid: c_int, + __mask: c_uint, + __bits: c_uint, + ) -> c_int; + pub fn ChannelConnectAttr( + __id: c_uint, + __old_attr: *mut _channel_connect_attr, + __new_attr: *mut _channel_connect_attr, + __flags: c_uint, + ) -> c_int; + pub fn MsgSend( + __coid: c_int, + __smsg: *const c_void, + __sbytes: usize, + __rmsg: *mut c_void, + __rbytes: usize, + ) -> c_long; + pub fn MsgSend_r( + __coid: c_int, + __smsg: *const c_void, + __sbytes: usize, + __rmsg: *mut c_void, + __rbytes: usize, + ) -> c_long; + pub fn MsgSendnc( + __coid: c_int, + __smsg: *const c_void, + __sbytes: usize, + __rmsg: *mut c_void, + __rbytes: usize, + ) -> c_long; + pub fn MsgSendnc_r( + __coid: c_int, + __smsg: *const c_void, + __sbytes: usize, + __rmsg: *mut c_void, + __rbytes: usize, + ) -> c_long; + pub fn MsgSendsv( + __coid: c_int, + __smsg: *const c_void, + __sbytes: usize, + __riov: *const crate::iovec, + __rparts: usize, + ) -> c_long; + pub fn MsgSendsv_r( + __coid: c_int, + __smsg: *const c_void, + __sbytes: usize, + __riov: *const crate::iovec, + __rparts: usize, + ) -> c_long; + pub fn MsgSendsvnc( + __coid: c_int, + __smsg: *const c_void, + __sbytes: usize, + __riov: *const crate::iovec, + __rparts: usize, + ) -> c_long; + pub fn MsgSendsvnc_r( + __coid: c_int, + __smsg: *const c_void, + __sbytes: usize, + __riov: *const crate::iovec, + __rparts: usize, + ) -> c_long; + pub fn MsgSendvs( + __coid: c_int, + __siov: *const crate::iovec, + __sparts: usize, + __rmsg: *mut c_void, + __rbytes: usize, + ) -> c_long; + pub fn MsgSendvs_r( + __coid: c_int, + __siov: *const crate::iovec, + __sparts: usize, + __rmsg: *mut c_void, + __rbytes: usize, + ) -> c_long; + pub fn MsgSendvsnc( + __coid: c_int, + __siov: *const crate::iovec, + __sparts: usize, + __rmsg: *mut c_void, + __rbytes: usize, + ) -> c_long; + pub fn MsgSendvsnc_r( + __coid: c_int, + __siov: *const crate::iovec, + __sparts: usize, + __rmsg: *mut c_void, + __rbytes: usize, + ) -> c_long; + pub fn MsgSendv( + __coid: c_int, + __siov: *const crate::iovec, + __sparts: usize, + __riov: *const crate::iovec, + __rparts: usize, + ) -> c_long; + pub fn MsgSendv_r( + __coid: c_int, + __siov: *const crate::iovec, + __sparts: usize, + __riov: *const crate::iovec, + __rparts: usize, + ) -> c_long; + pub fn MsgSendvnc( + __coid: c_int, + __siov: *const crate::iovec, + __sparts: usize, + __riov: *const crate::iovec, + __rparts: usize, + ) -> c_long; + pub fn MsgSendvnc_r( + __coid: c_int, + __siov: *const crate::iovec, + __sparts: usize, + __riov: *const crate::iovec, + __rparts: usize, + ) -> c_long; + pub fn MsgReceive( + __chid: c_int, + __msg: *mut c_void, + __bytes: usize, + __info: *mut _msg_info64, + ) -> c_int; + pub fn MsgReceive_r( + __chid: c_int, + __msg: *mut c_void, + __bytes: usize, + __info: *mut _msg_info64, + ) -> c_int; + pub fn MsgReceivev( + __chid: c_int, + __iov: *const crate::iovec, + __parts: usize, + __info: *mut _msg_info64, + ) -> c_int; + pub fn MsgReceivev_r( + __chid: c_int, + __iov: *const crate::iovec, + __parts: usize, + __info: *mut _msg_info64, + ) -> c_int; + pub fn MsgReceivePulse( + __chid: c_int, + __pulse: *mut c_void, + __bytes: usize, + __info: *mut _msg_info64, + ) -> c_int; + pub fn MsgReceivePulse_r( + __chid: c_int, + __pulse: *mut c_void, + __bytes: usize, + __info: *mut _msg_info64, + ) -> c_int; + pub fn MsgReceivePulsev( + __chid: c_int, + __iov: *const crate::iovec, + __parts: usize, + __info: *mut _msg_info64, + ) -> c_int; + pub fn MsgReceivePulsev_r( + __chid: c_int, + __iov: *const crate::iovec, + __parts: usize, + __info: *mut _msg_info64, + ) -> c_int; + pub fn MsgReply( + __rcvid: c_int, + __status: c_long, + __msg: *const c_void, + __bytes: usize, + ) -> c_int; + pub fn MsgReply_r( + __rcvid: c_int, + __status: c_long, + __msg: *const c_void, + __bytes: usize, + ) -> c_int; + pub fn MsgReplyv( + __rcvid: c_int, + __status: c_long, + __iov: *const crate::iovec, + __parts: usize, + ) -> c_int; + pub fn MsgReplyv_r( + __rcvid: c_int, + __status: c_long, + __iov: *const crate::iovec, + __parts: usize, + ) -> c_int; + pub fn MsgReadiov( + __rcvid: c_int, + __iov: *const crate::iovec, + __parts: usize, + __offset: usize, + __flags: c_int, + ) -> isize; + pub fn MsgReadiov_r( + __rcvid: c_int, + __iov: *const crate::iovec, + __parts: usize, + __offset: usize, + __flags: c_int, + ) -> isize; + pub fn MsgRead(__rcvid: c_int, __msg: *mut c_void, __bytes: usize, __offset: usize) -> isize; + pub fn MsgRead_r(__rcvid: c_int, __msg: *mut c_void, __bytes: usize, __offset: usize) -> isize; + pub fn MsgReadv( + __rcvid: c_int, + __iov: *const crate::iovec, + __parts: usize, + __offset: usize, + ) -> isize; + pub fn MsgReadv_r( + __rcvid: c_int, + __iov: *const crate::iovec, + __parts: usize, + __offset: usize, + ) -> isize; + pub fn MsgWrite(__rcvid: c_int, __msg: *const c_void, __bytes: usize, __offset: usize) + -> isize; + pub fn MsgWrite_r( + __rcvid: c_int, + __msg: *const c_void, + __bytes: usize, + __offset: usize, + ) -> isize; + pub fn MsgWritev( + __rcvid: c_int, + __iov: *const crate::iovec, + __parts: usize, + __offset: usize, + ) -> isize; + pub fn MsgWritev_r( + __rcvid: c_int, + __iov: *const crate::iovec, + __parts: usize, + __offset: usize, + ) -> isize; + pub fn MsgSendPulse(__coid: c_int, __priority: c_int, __code: c_int, __value: c_int) -> c_int; + pub fn MsgSendPulse_r(__coid: c_int, __priority: c_int, __code: c_int, __value: c_int) + -> c_int; + pub fn MsgSendPulsePtr( + __coid: c_int, + __priority: c_int, + __code: c_int, + __value: *mut c_void, + ) -> c_int; + pub fn MsgSendPulsePtr_r( + __coid: c_int, + __priority: c_int, + __code: c_int, + __value: *mut c_void, + ) -> c_int; + pub fn MsgDeliverEvent(__rcvid: c_int, __event: *const crate::sigevent) -> c_int; + pub fn MsgDeliverEvent_r(__rcvid: c_int, __event: *const crate::sigevent) -> c_int; + pub fn MsgVerifyEvent(__rcvid: c_int, __event: *const crate::sigevent) -> c_int; + pub fn MsgVerifyEvent_r(__rcvid: c_int, __event: *const crate::sigevent) -> c_int; + pub fn MsgRegisterEvent(__event: *mut crate::sigevent, __coid: c_int) -> c_int; + pub fn MsgRegisterEvent_r(__event: *mut crate::sigevent, __coid: c_int) -> c_int; + pub fn MsgUnregisterEvent(__event: *const crate::sigevent) -> c_int; + pub fn MsgUnregisterEvent_r(__event: *const crate::sigevent) -> c_int; + pub fn MsgInfo(__rcvid: c_int, __info: *mut _msg_info64) -> c_int; + pub fn MsgInfo_r(__rcvid: c_int, __info: *mut _msg_info64) -> c_int; + pub fn MsgKeyData( + __rcvid: c_int, + __oper: c_int, + __key: u32, + __newkey: *mut u32, + __iov: *const crate::iovec, + __parts: c_int, + ) -> c_int; + pub fn MsgKeyData_r( + __rcvid: c_int, + __oper: c_int, + __key: u32, + __newkey: *mut u32, + __iov: *const crate::iovec, + __parts: c_int, + ) -> c_int; + pub fn MsgError(__rcvid: c_int, __err: c_int) -> c_int; + pub fn MsgError_r(__rcvid: c_int, __err: c_int) -> c_int; + pub fn MsgCurrent(__rcvid: c_int) -> c_int; + pub fn MsgCurrent_r(__rcvid: c_int) -> c_int; + pub fn MsgSendAsyncGbl( + __coid: c_int, + __smsg: *const c_void, + __sbytes: usize, + __msg_prio: c_uint, + ) -> c_int; + pub fn MsgSendAsync(__coid: c_int) -> c_int; + pub fn MsgReceiveAsyncGbl( + __chid: c_int, + __rmsg: *mut c_void, + __rbytes: usize, + __info: *mut _msg_info64, + __coid: c_int, + ) -> c_int; + pub fn MsgReceiveAsync(__chid: c_int, __iov: *const crate::iovec, __parts: c_uint) -> c_int; + pub fn MsgPause(__rcvid: c_int, __cookie: c_uint) -> c_int; + pub fn MsgPause_r(__rcvid: c_int, __cookie: c_uint) -> c_int; + + pub fn SignalKill( + __nd: u32, + __pid: crate::pid_t, + __tid: c_int, + __signo: c_int, + __code: c_int, + __value: c_int, + ) -> c_int; + pub fn SignalKill_r( + __nd: u32, + __pid: crate::pid_t, + __tid: c_int, + __signo: c_int, + __code: c_int, + __value: c_int, + ) -> c_int; + pub fn SignalKillSigval( + __nd: u32, + __pid: crate::pid_t, + __tid: c_int, + __signo: c_int, + __code: c_int, + __value: *const crate::sigval, + ) -> c_int; + pub fn SignalKillSigval_r( + __nd: u32, + __pid: crate::pid_t, + __tid: c_int, + __signo: c_int, + __code: c_int, + __value: *const crate::sigval, + ) -> c_int; + pub fn SignalReturn(__info: *mut _sighandler_info) -> c_int; + pub fn SignalFault(__sigcode: c_uint, __regs: *mut c_void, __refaddr: usize) -> c_int; + pub fn SignalAction( + __pid: crate::pid_t, + __sigstub: unsafe extern "C" fn(), + __signo: c_int, + __act: *const crate::sigaction, + __oact: *mut crate::sigaction, + ) -> c_int; + pub fn SignalAction_r( + __pid: crate::pid_t, + __sigstub: unsafe extern "C" fn(), + __signo: c_int, + __act: *const crate::sigaction, + __oact: *mut crate::sigaction, + ) -> c_int; + pub fn SignalProcmask( + __pid: crate::pid_t, + __tid: c_int, + __how: c_int, + __set: *const crate::sigset_t, + __oldset: *mut crate::sigset_t, + ) -> c_int; + pub fn SignalProcmask_r( + __pid: crate::pid_t, + __tid: c_int, + __how: c_int, + __set: *const crate::sigset_t, + __oldset: *mut crate::sigset_t, + ) -> c_int; + pub fn SignalSuspend(__set: *const crate::sigset_t) -> c_int; + pub fn SignalSuspend_r(__set: *const crate::sigset_t) -> c_int; + pub fn SignalWaitinfo(__set: *const crate::sigset_t, __info: *mut crate::siginfo_t) -> c_int; + pub fn SignalWaitinfo_r(__set: *const crate::sigset_t, __info: *mut crate::siginfo_t) -> c_int; + pub fn SignalWaitinfoMask( + __set: *const crate::sigset_t, + __info: *mut crate::siginfo_t, + __mask: *const crate::sigset_t, + ) -> c_int; + pub fn SignalWaitinfoMask_r( + __set: *const crate::sigset_t, + __info: *mut crate::siginfo_t, + __mask: *const crate::sigset_t, + ) -> c_int; + pub fn ThreadCreate( + __pid: crate::pid_t, + __func: unsafe extern "C" fn(__arg: *mut c_void) -> *mut c_void, + __arg: *mut c_void, + __attr: *const crate::_thread_attr, + ) -> c_int; + pub fn ThreadCreate_r( + __pid: crate::pid_t, + __func: unsafe extern "C" fn(__arg: *mut c_void) -> *mut c_void, + __arg: *mut c_void, + __attr: *const crate::_thread_attr, + ) -> c_int; + + pub fn ThreadDestroy(__tid: c_int, __priority: c_int, __status: *mut c_void) -> c_int; + pub fn ThreadDestroy_r(__tid: c_int, __priority: c_int, __status: *mut c_void) -> c_int; + pub fn ThreadDetach(__tid: c_int) -> c_int; + pub fn ThreadDetach_r(__tid: c_int) -> c_int; + pub fn ThreadJoin(__tid: c_int, __status: *mut *mut c_void) -> c_int; + pub fn ThreadJoin_r(__tid: c_int, __status: *mut *mut c_void) -> c_int; + pub fn ThreadCancel(__tid: c_int, __canstub: unsafe extern "C" fn()) -> c_int; + pub fn ThreadCancel_r(__tid: c_int, __canstub: unsafe extern "C" fn()) -> c_int; + pub fn ThreadCtl(__cmd: c_int, __data: *mut c_void) -> c_int; + pub fn ThreadCtl_r(__cmd: c_int, __data: *mut c_void) -> c_int; + pub fn ThreadCtlExt( + __pid: crate::pid_t, + __tid: c_int, + __cmd: c_int, + __data: *mut c_void, + ) -> c_int; + pub fn ThreadCtlExt_r( + __pid: crate::pid_t, + __tid: c_int, + __cmd: c_int, + __data: *mut c_void, + ) -> c_int; + + pub fn InterruptHookTrace( + __handler: Option *const crate::sigevent>, + __flags: c_uint, + ) -> c_int; + pub fn InterruptHookIdle( + __handler: Option, + __flags: c_uint, + ) -> c_int; + pub fn InterruptHookIdle2( + __handler: Option< + unsafe extern "C" fn(arg1: c_uint, arg2: *mut syspage_entry, arg3: *mut _idle_hook), + >, + __flags: c_uint, + ) -> c_int; + pub fn InterruptHookOverdriveEvent(__event: *const crate::sigevent, __flags: c_uint) -> c_int; + pub fn InterruptAttachEvent( + __intr: c_int, + __event: *const crate::sigevent, + __flags: c_uint, + ) -> c_int; + pub fn InterruptAttachEvent_r( + __intr: c_int, + __event: *const crate::sigevent, + __flags: c_uint, + ) -> c_int; + pub fn InterruptAttach( + __intr: c_int, + __handler: Option< + unsafe extern "C" fn(__area: *mut c_void, __id: c_int) -> *const crate::sigevent, + >, + __area: *const c_void, + __size: c_int, + __flags: c_uint, + ) -> c_int; + pub fn InterruptAttach_r( + __intr: c_int, + __handler: Option< + unsafe extern "C" fn(__area: *mut c_void, __id: c_int) -> *const crate::sigevent, + >, + __area: *const c_void, + __size: c_int, + __flags: c_uint, + ) -> c_int; + pub fn InterruptAttachArray( + __intr: c_int, + __handler: Option< + unsafe extern "C" fn(__area: *mut c_void, __id: c_int) -> *const *const crate::sigevent, + >, + __area: *const c_void, + __size: c_int, + __flags: c_uint, + ) -> c_int; + pub fn InterruptAttachArray_r( + __intr: c_int, + __handler: Option< + unsafe extern "C" fn(__area: *mut c_void, __id: c_int) -> *const *const crate::sigevent, + >, + __area: *const c_void, + __size: c_int, + __flags: c_uint, + ) -> c_int; + pub fn InterruptDetach(__id: c_int) -> c_int; + pub fn InterruptDetach_r(__id: c_int) -> c_int; + pub fn InterruptWait(__flags: c_int, __timeout: *const u64) -> c_int; + pub fn InterruptWait_r(__flags: c_int, __timeout: *const u64) -> c_int; + pub fn InterruptCharacteristic( + __type: c_int, + __id: c_int, + __new: *mut c_uint, + __old: *mut c_uint, + ) -> c_int; + pub fn InterruptCharacteristic_r( + __type: c_int, + __id: c_int, + __new: *mut c_uint, + __old: *mut c_uint, + ) -> c_int; + + pub fn SchedGet(__pid: crate::pid_t, __tid: c_int, __param: *mut crate::sched_param) -> c_int; + pub fn SchedGet_r(__pid: crate::pid_t, __tid: c_int, __param: *mut crate::sched_param) + -> c_int; + pub fn SchedGetCpuNum() -> c_uint; + pub fn SchedSet( + __pid: crate::pid_t, + __tid: c_int, + __algorithm: c_int, + __param: *const crate::sched_param, + ) -> c_int; + pub fn SchedSet_r( + __pid: crate::pid_t, + __tid: c_int, + __algorithm: c_int, + __param: *const crate::sched_param, + ) -> c_int; + pub fn SchedInfo( + __pid: crate::pid_t, + __algorithm: c_int, + __info: *mut crate::_sched_info, + ) -> c_int; + pub fn SchedInfo_r( + __pid: crate::pid_t, + __algorithm: c_int, + __info: *mut crate::_sched_info, + ) -> c_int; + pub fn SchedYield() -> c_int; + pub fn SchedYield_r() -> c_int; + pub fn SchedCtl(__cmd: c_int, __data: *mut c_void, __length: usize) -> c_int; + pub fn SchedCtl_r(__cmd: c_int, __data: *mut c_void, __length: usize) -> c_int; + pub fn SchedJobCreate(__job: *mut nto_job_t) -> c_int; + pub fn SchedJobCreate_r(__job: *mut nto_job_t) -> c_int; + pub fn SchedJobDestroy(__job: *mut nto_job_t) -> c_int; + pub fn SchedJobDestroy_r(__job: *mut nto_job_t) -> c_int; + pub fn SchedWaypoint( + __job: *mut nto_job_t, + __new: *const i64, + __max: *const i64, + __old: *mut i64, + ) -> c_int; + pub fn SchedWaypoint_r( + __job: *mut nto_job_t, + __new: *const i64, + __max: *const i64, + __old: *mut i64, + ) -> c_int; + + pub fn TimerCreate(__id: crate::clockid_t, __notify: *const crate::sigevent) -> c_int; + pub fn TimerCreate_r(__id: crate::clockid_t, __notify: *const crate::sigevent) -> c_int; + pub fn TimerDestroy(__id: crate::timer_t) -> c_int; + pub fn TimerDestroy_r(__id: crate::timer_t) -> c_int; + pub fn TimerSettime( + __id: crate::timer_t, + __flags: c_int, + __itime: *const crate::_itimer, + __oitime: *mut crate::_itimer, + ) -> c_int; + pub fn TimerSettime_r( + __id: crate::timer_t, + __flags: c_int, + __itime: *const crate::_itimer, + __oitime: *mut crate::_itimer, + ) -> c_int; + pub fn TimerInfo( + __pid: crate::pid_t, + __id: crate::timer_t, + __flags: c_int, + __info: *mut crate::_timer_info, + ) -> c_int; + pub fn TimerInfo_r( + __pid: crate::pid_t, + __id: crate::timer_t, + __flags: c_int, + __info: *mut crate::_timer_info, + ) -> c_int; + pub fn TimerAlarm( + __id: crate::clockid_t, + __itime: *const crate::_itimer, + __otime: *mut crate::_itimer, + ) -> c_int; + pub fn TimerAlarm_r( + __id: crate::clockid_t, + __itime: *const crate::_itimer, + __otime: *mut crate::_itimer, + ) -> c_int; + pub fn TimerTimeout( + __id: crate::clockid_t, + __flags: c_int, + __notify: *const crate::sigevent, + __ntime: *const u64, + __otime: *mut u64, + ) -> c_int; + pub fn TimerTimeout_r( + __id: crate::clockid_t, + __flags: c_int, + __notify: *const crate::sigevent, + __ntime: *const u64, + __otime: *mut u64, + ) -> c_int; + + pub fn SyncTypeCreate( + __type: c_uint, + __sync: *mut crate::sync_t, + __attr: *const crate::_sync_attr, + ) -> c_int; + pub fn SyncTypeCreate_r( + __type: c_uint, + __sync: *mut crate::sync_t, + __attr: *const crate::_sync_attr, + ) -> c_int; + pub fn SyncDestroy(__sync: *mut crate::sync_t) -> c_int; + pub fn SyncDestroy_r(__sync: *mut crate::sync_t) -> c_int; + pub fn SyncCtl(__cmd: c_int, __sync: *mut crate::sync_t, __data: *mut c_void) -> c_int; + pub fn SyncCtl_r(__cmd: c_int, __sync: *mut crate::sync_t, __data: *mut c_void) -> c_int; + pub fn SyncMutexEvent(__sync: *mut crate::sync_t, event: *const crate::sigevent) -> c_int; + pub fn SyncMutexEvent_r(__sync: *mut crate::sync_t, event: *const crate::sigevent) -> c_int; + pub fn SyncMutexLock(__sync: *mut crate::sync_t) -> c_int; + pub fn SyncMutexLock_r(__sync: *mut crate::sync_t) -> c_int; + pub fn SyncMutexUnlock(__sync: *mut crate::sync_t) -> c_int; + pub fn SyncMutexUnlock_r(__sync: *mut crate::sync_t) -> c_int; + pub fn SyncMutexRevive(__sync: *mut crate::sync_t) -> c_int; + pub fn SyncMutexRevive_r(__sync: *mut crate::sync_t) -> c_int; + pub fn SyncCondvarWait(__sync: *mut crate::sync_t, __mutex: *mut crate::sync_t) -> c_int; + pub fn SyncCondvarWait_r(__sync: *mut crate::sync_t, __mutex: *mut crate::sync_t) -> c_int; + pub fn SyncCondvarSignal(__sync: *mut crate::sync_t, __all: c_int) -> c_int; + pub fn SyncCondvarSignal_r(__sync: *mut crate::sync_t, __all: c_int) -> c_int; + pub fn SyncSemPost(__sync: *mut crate::sync_t) -> c_int; + pub fn SyncSemPost_r(__sync: *mut crate::sync_t) -> c_int; + pub fn SyncSemWait(__sync: *mut crate::sync_t, __tryto: c_int) -> c_int; + pub fn SyncSemWait_r(__sync: *mut crate::sync_t, __tryto: c_int) -> c_int; + + pub fn ClockTime(__id: crate::clockid_t, _new: *const u64, __old: *mut u64) -> c_int; + pub fn ClockTime_r(__id: crate::clockid_t, _new: *const u64, __old: *mut u64) -> c_int; + pub fn ClockAdjust( + __id: crate::clockid_t, + _new: *const crate::_clockadjust, + __old: *mut crate::_clockadjust, + ) -> c_int; + pub fn ClockAdjust_r( + __id: crate::clockid_t, + _new: *const crate::_clockadjust, + __old: *mut crate::_clockadjust, + ) -> c_int; + pub fn ClockPeriod( + __id: crate::clockid_t, + _new: *const crate::_clockperiod, + __old: *mut crate::_clockperiod, + __reserved: c_int, + ) -> c_int; + pub fn ClockPeriod_r( + __id: crate::clockid_t, + _new: *const crate::_clockperiod, + __old: *mut crate::_clockperiod, + __reserved: c_int, + ) -> c_int; + pub fn ClockId(__pid: crate::pid_t, __tid: c_int) -> c_int; + pub fn ClockId_r(__pid: crate::pid_t, __tid: c_int) -> c_int; + + // + //TODO: The following commented out functions are implemented in assembly. + // We can implmement them either via a C stub or rust's inline assembly. + // + //pub fn InterruptEnable(); + //pub fn InterruptDisable(); + pub fn InterruptMask(__intr: c_int, __id: c_int) -> c_int; + pub fn InterruptUnmask(__intr: c_int, __id: c_int) -> c_int; + //pub fn InterruptLock(__spin: *mut intrspin); + //pub fn InterruptUnlock(__spin: *mut intrspin); + //pub fn InterruptStatus() -> c_uint; +} diff --git a/vendor/libc/src/unix/nto/x86_64.rs b/vendor/libc/src/unix/nto/x86_64.rs new file mode 100644 index 00000000000000..521b5d4ab78796 --- /dev/null +++ b/vendor/libc/src/unix/nto/x86_64.rs @@ -0,0 +1,111 @@ +use crate::prelude::*; + +pub type wchar_t = u32; +pub type time_t = i64; + +s! { + #[repr(align(8))] + pub struct x86_64_cpu_registers { + pub rdi: u64, + pub rsi: u64, + pub rdx: u64, + pub r10: u64, + pub r8: u64, + pub r9: u64, + pub rax: u64, + pub rbx: u64, + pub rbp: u64, + pub rcx: u64, + pub r11: u64, + pub r12: u64, + pub r13: u64, + pub r14: u64, + pub r15: u64, + pub rip: u64, + pub cs: u32, + rsvd1: u32, + pub rflags: u64, + pub rsp: u64, + pub ss: u32, + rsvd2: u32, + } + + #[repr(align(8))] + pub struct mcontext_t { + pub cpu: x86_64_cpu_registers, + pub fpu: x86_64_fpu_registers, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } + + pub struct fsave_area_64 { + pub fpu_control_word: u32, + pub fpu_status_word: u32, + pub fpu_tag_word: u32, + pub fpu_ip: u32, + pub fpu_cs: u32, + pub fpu_op: u32, + pub fpu_ds: u32, + pub st_regs: [u8; 80], + } + + pub struct fxsave_area_64 { + pub fpu_control_word: u16, + pub fpu_status_word: u16, + pub fpu_tag_word: u16, + pub fpu_operand: u16, + pub fpu_rip: u64, + pub fpu_rdp: u64, + pub mxcsr: u32, + pub mxcsr_mask: u32, + pub st_regs: [u8; 128], + pub xmm_regs: [u8; 128], + reserved2: [u8; 224], + } + + pub struct fpu_extention_savearea_64 { + pub other: [u8; 512], + pub xstate_bv: u64, + pub xstate_undef: [u64; 7], + pub xstate_info: [u8; 224], + } +} + +s_no_extra_traits! { + pub union x86_64_fpu_registers { + pub fsave_area: fsave_area_64, + pub fxsave_area: fxsave_area_64, + pub xsave_area: fpu_extention_savearea_64, + pub data: [u8; 1024], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl Eq for x86_64_fpu_registers {} + + impl PartialEq for x86_64_fpu_registers { + fn eq(&self, other: &x86_64_fpu_registers) -> bool { + unsafe { + self.fsave_area == other.fsave_area + || self.fxsave_area == other.fxsave_area + || self.xsave_area == other.xsave_area + } + } + } + + impl hash::Hash for x86_64_fpu_registers { + fn hash(&self, state: &mut H) { + unsafe { + self.fsave_area.hash(state); + self.fxsave_area.hash(state); + self.xsave_area.hash(state); + } + } + } + } +} diff --git a/vendor/libc/src/unix/nuttx/mod.rs b/vendor/libc/src/unix/nuttx/mod.rs new file mode 100644 index 00000000000000..3d3e2c3448841d --- /dev/null +++ b/vendor/libc/src/unix/nuttx/mod.rs @@ -0,0 +1,597 @@ +use crate::prelude::*; +use crate::{in6_addr, in_addr_t, timespec, DIR}; + +pub type nlink_t = u16; +pub type ino_t = u16; +pub type blkcnt_t = u64; +pub type blksize_t = i16; +pub type cc_t = u8; +pub type clock_t = i64; +pub type dev_t = i32; +pub type fsblkcnt_t = u64; +pub type locale_t = *mut i8; +pub type mode_t = u32; +pub type nfds_t = u32; +pub type off_t = i64; +pub type pthread_key_t = i32; +pub type pthread_mutexattr_t = u8; +pub type pthread_rwlockattr_t = i32; +pub type pthread_t = i32; +pub type rlim_t = i64; +pub type sa_family_t = u16; +pub type socklen_t = u32; +pub type speed_t = usize; +pub type suseconds_t = i32; +pub type tcflag_t = u32; +pub type clockid_t = i32; +pub type time_t = i64; +pub type wchar_t = i32; + +s! { + pub struct stat { + pub st_dev: dev_t, + pub st_ino: ino_t, + pub st_mode: mode_t, + pub st_nlink: nlink_t, + pub st_uid: u32, + pub st_gid: u32, + pub st_rdev: dev_t, + pub st_size: off_t, + pub st_atim: timespec, + pub st_mtim: timespec, + pub st_ctim: timespec, + pub st_blksize: blksize_t, + pub st_blocks: i64, + __reserved: [usize; __DEFAULT_RESERVED_SIZE__], + } + + pub struct sockaddr { + pub sa_family: sa_family_t, + pub sa_data: [u8; 14], + } + + pub struct passwd { + pub pw_name: *const c_char, + pub pw_passwd: *const c_char, + pub pw_uid: u32, + pub pw_gid: u32, + pub pw_gecos: *const c_char, + pub pw_dir: *const c_char, + pub pw_shell: *const c_char, + __reserved: [usize; __DEFAULT_RESERVED_SIZE__], + } + + pub struct sem_t { + __val: [usize; __SEM_SIZE__], + } + + pub struct pthread_attr_t { + __val: [usize; __PTHREAD_ATTR_SIZE__], + } + + pub struct pthread_mutex_t { + __val: [usize; __PTHREAD_MUTEX_SIZE__], + } + + pub struct pthread_cond_t { + __val: [usize; __PTHREAD_COND_SIZE__], + } + + pub struct pthread_condattr_t { + __val: [usize; __PTHREAD_CONDATTR_SIZE__], + } + + pub struct Dl_info { + pub dli_fname: *const c_char, + pub dli_fbase: *mut c_void, + pub dli_sname: *const c_char, + pub dli_saddr: *mut c_void, + } + + pub struct lconv { + pub decimal_point: *const c_char, + pub thousands_sep: *const c_char, + pub grouping: *const c_char, + pub int_curr_symbol: *const c_char, + pub currency_symbol: *const c_char, + pub mon_decimal_point: *const c_char, + pub mon_thousands_sep: *const c_char, + pub mon_grouping: *const c_char, + pub positive_sign: *const c_char, + pub negative_sign: *const c_char, + pub int_frac_digits: i8, + pub frac_digits: i8, + pub p_cs_precedes: i8, + pub p_sep_by_space: i8, + pub n_cs_precedes: i8, + pub n_sep_by_space: i8, + pub p_sign_posn: i8, + pub n_sign_posn: i8, + pub int_n_cs_precedes: i8, + pub int_n_sep_by_space: i8, + pub int_n_sign_posn: i8, + pub int_p_cs_precedes: i8, + pub int_p_sep_by_space: i8, + pub int_p_sign_posn: i8, + __reserved: [usize; __DEFAULT_RESERVED_SIZE__], + } + + pub struct tm { + pub tm_sec: i32, + pub tm_min: i32, + pub tm_hour: i32, + pub tm_mday: i32, + pub tm_mon: i32, + pub tm_year: i32, + pub tm_wday: i32, + pub tm_yday: i32, + pub tm_isdst: i32, + pub tm_gmtoff: isize, + pub tm_zone: *const c_char, + __reserved: [usize; __DEFAULT_RESERVED_SIZE__], + } + + pub struct addrinfo { + pub ai_flags: i32, + pub ai_family: i32, + pub ai_socktype: i32, + pub ai_protocol: i32, + pub ai_addrlen: socklen_t, + pub ai_addr: *mut sockaddr, + pub ai_canonname: *mut c_char, + pub ai_next: *mut addrinfo, + __reserved: [usize; __DEFAULT_RESERVED_SIZE__], + } + + pub struct pthread_rwlock_t { + __val: [usize; __PTHREAD_RWLOCK_SIZE__], + } + + pub struct statvfs { + pub f_bsize: usize, + pub f_frsize: usize, + pub f_blocks: fsblkcnt_t, + pub f_bfree: fsblkcnt_t, + pub f_bavail: fsblkcnt_t, + pub f_files: fsblkcnt_t, + pub f_ffree: fsblkcnt_t, + pub f_favail: fsblkcnt_t, + pub f_fsid: usize, + pub f_flag: usize, + pub f_namemax: usize, + __reserved: [usize; __DEFAULT_RESERVED_SIZE__], + } + + pub struct dirent { + pub d_type: u8, + pub d_name: [c_char; __NAME_MAX__ + 1], + } + + pub struct fd_set { + __val: [u32; __FDSET_SIZE__], + } + + pub struct sigset_t { + __val: [u32; __SIGSET_SIZE__], + } + + pub struct sigaction { + pub sa_handler: usize, + pub sa_mask: sigset_t, + pub sa_flags: i32, + pub sa_user: usize, + __reserved: [usize; __DEFAULT_RESERVED_SIZE__], + } + + pub struct termios { + pub c_iflag: tcflag_t, + pub c_oflag: tcflag_t, + pub c_cflag: tcflag_t, + pub c_lflag: tcflag_t, + pub c_cc: [cc_t; 12], + pub c_speed: speed_t, + __reserved: [usize; __DEFAULT_RESERVED_SIZE__], + } + + pub struct in_addr { + pub s_addr: in_addr_t, + } + + pub struct sockaddr_in { + pub sin_family: sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [u8; 8], + } + + pub struct sockaddr_in6 { + pub sin6_family: sa_family_t, + pub sin6_port: crate::in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: u32, + } + + pub struct sockaddr_un { + pub sun_family: sa_family_t, + pub sun_path: [c_char; 108], + } + + pub struct sockaddr_storage { + pub ss_family: sa_family_t, + ss_data: [u32; __SOCKADDR_STORAGE_SIZE__], + } + + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct ipv6_mreq { + pub ipv6mr_multiaddr: in6_addr, + pub ipv6mr_interface: u32, + } + + pub struct timeval { + pub tv_sec: time_t, + pub tv_usec: suseconds_t, + } +} + +// Reserved two pointer size for reserved area for some structures. +// This ensures that the size of these structures is large enough +// if more fields are added in the NuttX side. +// +// These structures are that defined by POSIX but only necessary fields are included, +// for example, struct passwd, https://pubs.opengroup.org/onlinepubs/009695399/basedefs/pwd.h.html, +// POSIX only defines following fields in struct passwd: +// char *pw_name User's login name. +// char *pw_passwd Encrypted password. +// uid_t pw_uid Numerical user ID. +// gid_t pw_gid Numerical group ID. +// char *pw_dir Initial working directory. +// char *pw_shell Program to use as shell. +// Other fields can be different depending on the implementation. + +const __DEFAULT_RESERVED_SIZE__: usize = 2; + +const __SOCKADDR_STORAGE_SIZE__: usize = 36; +const __PTHREAD_ATTR_SIZE__: usize = 5; +const __PTHREAD_MUTEX_SIZE__: usize = 9; +const __PTHREAD_COND_SIZE__: usize = 7; +const __PTHREAD_CONDATTR_SIZE__: usize = 5; +const __PTHREAD_RWLOCK_SIZE__: usize = 17; +const __SEM_SIZE__: usize = 6; +const __NAME_MAX__: usize = 64; +const __FDSET_SIZE__: usize = 10; +const __SIGSET_SIZE__: usize = 8; + +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + __val: [0; __PTHREAD_COND_SIZE__], +}; +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + __val: [0; __PTHREAD_MUTEX_SIZE__], +}; + +// dlfcn.h +pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); + +// stdlib.h +pub const EXIT_SUCCESS: i32 = 0; +pub const EXIT_FAILURE: i32 = 1; + +// time.h +pub const CLOCK_REALTIME: i32 = 0; +pub const CLOCK_MONOTONIC: i32 = 1; + +// errno.h +pub const EPERM: i32 = 1; +pub const ENOENT: i32 = 2; +pub const ESRCH: i32 = 3; +pub const EINTR: i32 = 4; +pub const EIO: i32 = 5; +pub const ENXIO: i32 = 6; +pub const E2BIG: i32 = 7; +pub const ENOEXEC: i32 = 8; +pub const EBADF: i32 = 9; +pub const ECHILD: i32 = 10; +pub const EAGAIN: i32 = 11; +pub const ENOMEM: i32 = 12; +pub const EACCES: i32 = 13; +pub const EFAULT: i32 = 14; +pub const ENOTBLK: i32 = 15; +pub const EBUSY: i32 = 16; +pub const EEXIST: i32 = 17; +pub const EXDEV: i32 = 18; +pub const ENODEV: i32 = 19; +pub const ENOTDIR: i32 = 20; +pub const EISDIR: i32 = 21; +pub const EINVAL: i32 = 22; +pub const ENFILE: i32 = 23; +pub const EMFILE: i32 = 24; +pub const ENOTTY: i32 = 25; +pub const ETXTBSY: i32 = 26; +pub const EFBIG: i32 = 27; +pub const ENOSPC: i32 = 28; +pub const ESPIPE: i32 = 29; +pub const EROFS: i32 = 30; +pub const EMLINK: i32 = 31; +pub const EPIPE: i32 = 32; +pub const EDOM: i32 = 33; +pub const ERANGE: i32 = 34; +pub const EDEADLK: i32 = 35; +pub const ENAMETOOLONG: i32 = 36; +pub const ENOLCK: i32 = 37; +pub const ENOSYS: i32 = 38; +pub const ENOTEMPTY: i32 = 39; +pub const ELOOP: i32 = 40; +pub const EWOULDBLOCK: i32 = EAGAIN; +pub const ENOMSG: i32 = 42; +pub const EIDRM: i32 = 43; +pub const ECHRNG: i32 = 44; +pub const EL2NSYNC: i32 = 45; +pub const EL3HLT: i32 = 46; +pub const EL3RST: i32 = 47; +pub const ELNRNG: i32 = 48; +pub const EUNATCH: i32 = 49; +pub const ENOCSI: i32 = 50; +pub const EL2HLT: i32 = 51; +pub const EBADE: i32 = 52; +pub const EBADR: i32 = 53; +pub const EXFULL: i32 = 54; +pub const ENOANO: i32 = 55; +pub const EBADRQC: i32 = 56; +pub const EBADSLT: i32 = 57; +pub const EDEADLOCK: i32 = EDEADLK; +pub const EBFONT: i32 = 59; +pub const ENOSTR: i32 = 60; +pub const ENODATA: i32 = 61; +pub const ETIME: i32 = 62; +pub const ENOSR: i32 = 63; +pub const ENONET: i32 = 64; +pub const ENOPKG: i32 = 65; +pub const EREMOTE: i32 = 66; +pub const ENOLINK: i32 = 67; +pub const EADV: i32 = 68; +pub const ESRMNT: i32 = 69; +pub const ECOMM: i32 = 70; +pub const EPROTO: i32 = 71; +pub const EMULTIHOP: i32 = 72; +pub const EDOTDOT: i32 = 73; +pub const EBADMSG: i32 = 74; +pub const EOVERFLOW: i32 = 75; +pub const ENOTUNIQ: i32 = 76; +pub const EBADFD: i32 = 77; +pub const EREMCHG: i32 = 78; +pub const ELIBACC: i32 = 79; +pub const ELIBBAD: i32 = 80; +pub const ELIBSCN: i32 = 81; +pub const ELIBMAX: i32 = 82; +pub const ELIBEXEC: i32 = 83; +pub const EILSEQ: i32 = 84; +pub const ERESTART: i32 = 85; +pub const ESTRPIPE: i32 = 86; +pub const EUSERS: i32 = 87; +pub const ENOTSOCK: i32 = 88; +pub const EDESTADDRREQ: i32 = 89; +pub const EMSGSIZE: i32 = 90; +pub const EPROTOTYPE: i32 = 91; +pub const ENOPROTOOPT: i32 = 92; +pub const EPROTONOSUPPORT: i32 = 93; +pub const ESOCKTNOSUPPORT: i32 = 94; +pub const EOPNOTSUPP: i32 = 95; +pub const EPFNOSUPPORT: i32 = 96; +pub const EAFNOSUPPORT: i32 = 97; +pub const EADDRINUSE: i32 = 98; +pub const EADDRNOTAVAIL: i32 = 99; +pub const ENETDOWN: i32 = 100; +pub const ENETUNREACH: i32 = 101; +pub const ENETRESET: i32 = 102; +pub const ECONNABORTED: i32 = 103; +pub const ECONNRESET: i32 = 104; +pub const ENOBUFS: i32 = 105; +pub const EISCONN: i32 = 106; +pub const ENOTCONN: i32 = 107; +pub const ESHUTDOWN: i32 = 108; +pub const ETOOMANYREFS: i32 = 109; +pub const ETIMEDOUT: i32 = 110; +pub const ECONNREFUSED: i32 = 111; +pub const EHOSTDOWN: i32 = 112; +pub const EHOSTUNREACH: i32 = 113; +pub const EALREADY: i32 = 114; +pub const EINPROGRESS: i32 = 115; +pub const ESTALE: i32 = 116; +pub const EUCLEAN: i32 = 117; +pub const ENOTNAM: i32 = 118; +pub const ENAVAIL: i32 = 119; +pub const EISNAM: i32 = 120; +pub const EREMOTEIO: i32 = 121; +pub const EDQUOT: i32 = 122; +pub const ENOMEDIUM: i32 = 123; +pub const EMEDIUMTYPE: i32 = 124; +pub const ECANCELED: i32 = 125; +pub const ENOKEY: i32 = 126; +pub const EKEYEXPIRED: i32 = 127; +pub const EKEYREVOKED: i32 = 128; +pub const EKEYREJECTED: i32 = 129; +pub const EOWNERDEAD: i32 = 130; +pub const ENOTRECOVERABLE: i32 = 131; +pub const ERFKILL: i32 = 132; +pub const EHWPOISON: i32 = 133; +pub const ELBIN: i32 = 134; +pub const EFTYPE: i32 = 135; +pub const ENMFILE: i32 = 136; +pub const EPROCLIM: i32 = 137; +pub const ENOTSUP: i32 = 138; +pub const ENOSHARE: i32 = 139; +pub const ECASECLASH: i32 = 140; + +// fcntl.h +pub const FIOCLEX: i32 = 0x30b; +pub const F_SETFL: i32 = 0x9; +pub const F_DUPFD_CLOEXEC: i32 = 0x12; +pub const F_GETFD: i32 = 0x1; +pub const F_GETFL: i32 = 0x2; +pub const O_RDONLY: i32 = 0x1; +pub const O_WRONLY: i32 = 0x2; +pub const O_RDWR: i32 = 0x3; +pub const O_CREAT: i32 = 0x4; +pub const O_EXCL: i32 = 0x8; +pub const O_NOCTTY: i32 = 0x0; +pub const O_TRUNC: i32 = 0x20; +pub const O_APPEND: i32 = 0x10; +pub const O_NONBLOCK: i32 = 0x40; +pub const O_DSYNC: i32 = 0x80; +pub const O_DIRECT: i32 = 0x200; +pub const O_LARGEFILE: i32 = 0x2000; +pub const O_DIRECTORY: i32 = 0x800; +pub const O_NOFOLLOW: i32 = 0x1000; +pub const O_NOATIME: i32 = 0x40000; +pub const O_CLOEXEC: i32 = 0x400; +pub const O_ACCMODE: i32 = 0x0003; +pub const AT_FDCWD: i32 = -100; +pub const AT_REMOVEDIR: i32 = 0x200; + +// sys/types.h +pub const SEEK_SET: i32 = 0; +pub const SEEK_CUR: i32 = 1; +pub const SEEK_END: i32 = 2; + +// sys/stat.h +pub const S_IFDIR: u32 = 0x4000; +pub const S_IFLNK: u32 = 0xA000; +pub const S_IFREG: u32 = 0x8000; +pub const S_IFMT: u32 = 0xF000; +pub const S_IFIFO: u32 = 0x1000; +pub const S_IFSOCK: u32 = 0xc000; +pub const S_IFBLK: u32 = 0x6000; +pub const S_IFCHR: u32 = 0x2000; +pub const S_IRUSR: u32 = 0x100; +pub const S_IWUSR: u32 = 0x80; +pub const S_IXUSR: u32 = 0x40; +pub const S_IRGRP: u32 = 0x20; +pub const S_IWGRP: u32 = 0x10; +pub const S_IXGRP: u32 = 0x8; +pub const S_IROTH: u32 = 0x004; +pub const S_IWOTH: u32 = 0x002; +pub const S_IXOTH: u32 = 0x001; + +// sys/poll.h +pub const POLLIN: i16 = 0x01; +pub const POLLOUT: i16 = 0x04; +pub const POLLHUP: i16 = 0x10; +pub const POLLERR: i16 = 0x08; +pub const POLLNVAL: i16 = 0x20; + +// sys/socket.h +pub const AF_UNIX: i32 = 1; +pub const SOCK_DGRAM: i32 = 2; +pub const SOCK_STREAM: i32 = 1; +pub const AF_INET: i32 = 2; +pub const AF_INET6: i32 = 10; +pub const MSG_PEEK: i32 = 0x02; +pub const SOL_SOCKET: i32 = 1; +pub const SHUT_WR: i32 = 2; +pub const SHUT_RD: i32 = 1; +pub const SHUT_RDWR: i32 = 3; +pub const SO_ERROR: i32 = 4; +pub const SO_REUSEADDR: i32 = 11; +pub const SOMAXCONN: i32 = 8; +pub const SO_LINGER: i32 = 6; +pub const SO_RCVTIMEO: i32 = 0xa; +pub const SO_SNDTIMEO: i32 = 0xe; +pub const SO_BROADCAST: i32 = 1; + +// netinet/tcp.h +pub const TCP_NODELAY: i32 = 0x10; + +// nuttx/fs/ioctl.h +pub const FIONBIO: i32 = 0x30a; + +// unistd.h +pub const STDIN_FILENO: i32 = 0; +pub const STDOUT_FILENO: i32 = 1; +pub const STDERR_FILENO: i32 = 2; +pub const _SC_PAGESIZE: i32 = 0x36; +pub const _SC_THREAD_STACK_MIN: i32 = 0x58; +pub const _SC_GETPW_R_SIZE_MAX: i32 = 0x25; + +// signal.h +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGTRAP: c_int = 5; +pub const SIGABRT: c_int = 6; +pub const SIGIOT: c_int = 6; +pub const SIGBUS: c_int = 7; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGUSR1: c_int = 10; +pub const SIGSEGV: c_int = 11; +pub const SIGUSR2: c_int = 12; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; +pub const SIGSTKFLT: c_int = 16; +pub const SIGCHLD: c_int = 17; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGURG: c_int = 23; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGIO: c_int = 29; +pub const SIGPOLL: c_int = SIGIO; +pub const SIGPWR: c_int = 30; +pub const SIGSYS: c_int = 31; + +// pthread.h +pub const PTHREAD_MUTEX_NORMAL: i32 = 0; + +// netinet/in.h +pub const IP_TTL: i32 = 0x1e; +pub const IPV6_V6ONLY: i32 = 0x17; +pub const IPV6_JOIN_GROUP: i32 = 0x11; +pub const IPV6_LEAVE_GROUP: i32 = 0x12; +pub const IP_MULTICAST_LOOP: i32 = 0x13; +pub const IPV6_MULTICAST_LOOP: i32 = 0x15; +pub const IP_MULTICAST_TTL: i32 = 0x12; +pub const IP_ADD_MEMBERSHIP: i32 = 0x14; +pub const IP_DROP_MEMBERSHIP: i32 = 0x15; + +extern "C" { + pub fn __errno() -> *mut c_int; + pub fn bind(sockfd: i32, addr: *const sockaddr, addrlen: socklen_t) -> i32; + pub fn ioctl(fd: i32, request: i32, ...) -> i32; + pub fn dirfd(dirp: *mut DIR) -> i32; + pub fn recvfrom( + sockfd: i32, + buf: *mut c_void, + len: usize, + flags: i32, + src_addr: *mut sockaddr, + addrlen: *mut socklen_t, + ) -> i32; + + pub fn pthread_create( + thread: *mut pthread_t, + attr: *const pthread_attr_t, + start_routine: extern "C" fn(*mut c_void) -> *mut c_void, + arg: *mut c_void, + ) -> i32; + + pub fn clock_gettime(clockid: clockid_t, tp: *mut timespec) -> i32; + pub fn futimens(fd: i32, times: *const timespec) -> i32; + pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, clock_id: clockid_t) -> i32; + pub fn pthread_setname_np(thread: pthread_t, name: *const c_char) -> i32; + pub fn pthread_getname_np(thread: pthread_t, name: *mut c_char, len: usize) -> i32; + pub fn getrandom(buf: *mut c_void, buflen: usize, flags: u32) -> isize; + pub fn arc4random() -> u32; + pub fn arc4random_buf(bytes: *mut c_void, nbytes: usize); +} diff --git a/vendor/libc/src/unix/redox/mod.rs b/vendor/libc/src/unix/redox/mod.rs new file mode 100644 index 00000000000000..50bdaf4d4f06ba --- /dev/null +++ b/vendor/libc/src/unix/redox/mod.rs @@ -0,0 +1,1496 @@ +use crate::prelude::*; + +pub type wchar_t = i32; + +pub type blkcnt_t = c_ulong; +pub type blksize_t = c_long; +pub type clock_t = c_long; +pub type clockid_t = c_int; +pub type dev_t = c_long; +pub type fsblkcnt_t = c_ulong; +pub type fsfilcnt_t = c_ulong; +pub type ino_t = c_ulonglong; +pub type mode_t = c_int; +pub type nfds_t = c_ulong; +pub type nlink_t = c_ulong; +pub type off_t = c_longlong; +pub type pthread_t = *mut c_void; +// Must be usize due to library/std/sys_common/thread_local.rs, +// should technically be *mut c_void +pub type pthread_key_t = usize; +pub type rlim_t = c_ulonglong; +pub type sa_family_t = u16; +pub type sem_t = *mut c_void; +pub type sigset_t = c_ulonglong; +pub type socklen_t = u32; +pub type speed_t = u32; +pub type suseconds_t = c_int; +pub type tcflag_t = u32; +pub type time_t = c_longlong; +pub type id_t = c_uint; +pub type pid_t = usize; +pub type uid_t = c_int; +pub type gid_t = c_int; + +#[derive(Debug)] +pub enum timezone {} +impl Copy for timezone {} +impl Clone for timezone { + fn clone(&self) -> timezone { + *self + } +} + +s_no_extra_traits! { + #[repr(C)] + pub struct utsname { + pub sysname: [c_char; UTSLENGTH], + pub nodename: [c_char; UTSLENGTH], + pub release: [c_char; UTSLENGTH], + pub version: [c_char; UTSLENGTH], + pub machine: [c_char; UTSLENGTH], + pub domainname: [c_char; UTSLENGTH], + } + + pub struct dirent { + pub d_ino: crate::ino_t, + pub d_off: off_t, + pub d_reclen: c_ushort, + pub d_type: c_uchar, + pub d_name: [c_char; 256], + } + + pub struct sockaddr_un { + pub sun_family: crate::sa_family_t, + pub sun_path: [c_char; 108], + } + + pub struct sockaddr_storage { + pub ss_family: crate::sa_family_t, + __ss_padding: [u8; 128 - size_of::() - size_of::()], + __ss_align: c_ulong, + } +} + +s! { + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: size_t, + pub ai_canonname: *mut c_char, + pub ai_addr: *mut crate::sockaddr, + pub ai_next: *mut crate::addrinfo, + } + + pub struct Dl_info { + pub dli_fname: *const c_char, + pub dli_fbase: *mut c_void, + pub dli_sname: *const c_char, + pub dli_saddr: *mut c_void, + } + + pub struct epoll_event { + pub events: u32, + pub u64: u64, + pub _pad: u64, + } + + pub struct fd_set { + fds_bits: [c_ulong; crate::FD_SETSIZE as usize / ULONG_SIZE], + } + + pub struct in_addr { + pub s_addr: crate::in_addr_t, + } + + pub struct ip_mreq { + pub imr_multiaddr: crate::in_addr, + pub imr_interface: crate::in_addr, + } + + pub struct lconv { + pub currency_symbol: *const c_char, + pub decimal_point: *const c_char, + pub frac_digits: c_char, + pub grouping: *const c_char, + pub int_curr_symbol: *const c_char, + pub int_frac_digits: c_char, + pub mon_decimal_point: *const c_char, + pub mon_grouping: *const c_char, + pub mon_thousands_sep: *const c_char, + pub negative_sign: *const c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub n_sign_posn: c_char, + pub positive_sign: *const c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub p_sign_posn: c_char, + pub thousands_sep: *const c_char, + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: size_t, + pub msg_control: *mut c_void, + pub msg_controllen: size_t, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + pub cmsg_len: size_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct passwd { + pub pw_name: *mut c_char, + pub pw_passwd: *mut c_char, + pub pw_uid: crate::uid_t, + pub pw_gid: crate::gid_t, + pub pw_gecos: *mut c_char, + pub pw_dir: *mut c_char, + pub pw_shell: *mut c_char, + } + + // FIXME(1.0): This should not implement `PartialEq` + #[allow(unpredictable_function_pointer_comparisons)] + pub struct sigaction { + pub sa_sigaction: crate::sighandler_t, + pub sa_flags: c_ulong, + pub sa_restorer: Option, + pub sa_mask: crate::sigset_t, + } + + pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + _pad: [c_int; 29], + _align: [usize; 0], + } + + pub struct sockaddr { + pub sa_family: crate::sa_family_t, + pub sa_data: [c_char; 14], + } + + pub struct sockaddr_in { + pub sin_family: crate::sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [c_char; 8], + } + + pub struct sockaddr_in6 { + pub sin6_family: crate::sa_family_t, + pub sin6_port: crate::in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: u32, + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_nlink: crate::nlink_t, + pub st_mode: mode_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + _pad: [c_char; 24], + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_line: crate::cc_t, + pub c_cc: [crate::cc_t; crate::NCCS], + pub c_ispeed: crate::speed_t, + pub c_ospeed: crate::speed_t, + } + + pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + pub tm_gmtoff: c_long, + pub tm_zone: *const c_char, + } + + pub struct ucred { + pub pid: pid_t, + pub uid: uid_t, + pub gid: gid_t, + } + + #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))] + #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))] + pub struct pthread_attr_t { + bytes: [u8; _PTHREAD_ATTR_SIZE], + } + #[repr(C)] + #[repr(align(4))] + pub struct pthread_barrier_t { + bytes: [u8; _PTHREAD_BARRIER_SIZE], + } + #[repr(C)] + #[repr(align(4))] + pub struct pthread_barrierattr_t { + bytes: [u8; _PTHREAD_BARRIERATTR_SIZE], + } + #[repr(C)] + #[repr(align(4))] + pub struct pthread_mutex_t { + bytes: [u8; _PTHREAD_MUTEX_SIZE], + } + #[repr(C)] + #[repr(align(4))] + pub struct pthread_rwlock_t { + bytes: [u8; _PTHREAD_RWLOCK_SIZE], + } + #[repr(C)] + #[repr(align(4))] + pub struct pthread_mutexattr_t { + bytes: [u8; _PTHREAD_MUTEXATTR_SIZE], + } + #[repr(C)] + #[repr(align(1))] + pub struct pthread_rwlockattr_t { + bytes: [u8; _PTHREAD_RWLOCKATTR_SIZE], + } + #[repr(C)] + #[repr(align(4))] + pub struct pthread_cond_t { + bytes: [u8; _PTHREAD_COND_SIZE], + } + #[repr(C)] + #[repr(align(4))] + pub struct pthread_condattr_t { + bytes: [u8; _PTHREAD_CONDATTR_SIZE], + } + #[repr(C)] + #[repr(align(4))] + pub struct pthread_once_t { + bytes: [u8; _PTHREAD_ONCE_SIZE], + } + #[repr(C)] + #[repr(align(4))] + pub struct pthread_spinlock_t { + bytes: [u8; _PTHREAD_SPINLOCK_SIZE], + } +} +const _PTHREAD_ATTR_SIZE: usize = 32; +const _PTHREAD_RWLOCKATTR_SIZE: usize = 1; +const _PTHREAD_RWLOCK_SIZE: usize = 4; +const _PTHREAD_BARRIER_SIZE: usize = 24; +const _PTHREAD_BARRIERATTR_SIZE: usize = 4; +const _PTHREAD_CONDATTR_SIZE: usize = 8; +const _PTHREAD_COND_SIZE: usize = 8; +const _PTHREAD_MUTEX_SIZE: usize = 12; +const _PTHREAD_MUTEXATTR_SIZE: usize = 20; +const _PTHREAD_ONCE_SIZE: usize = 4; +const _PTHREAD_SPINLOCK_SIZE: usize = 4; + +pub const UTSLENGTH: usize = 65; + +// intentionally not public, only used for fd_set +cfg_if! { + if #[cfg(target_pointer_width = "32")] { + const ULONG_SIZE: usize = 32; + } else if #[cfg(target_pointer_width = "64")] { + const ULONG_SIZE: usize = 64; + } else { + // Unknown target_pointer_width + } +} + +// limits.h +pub const PATH_MAX: c_int = 4096; + +// fcntl.h +pub const F_GETLK: c_int = 5; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_ULOCK: c_int = 0; +pub const F_LOCK: c_int = 1; +pub const F_TLOCK: c_int = 2; +pub const F_TEST: c_int = 3; + +pub const AT_FDCWD: c_int = -100; + +// FIXME(redox): relibc { +pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); +// } + +// dlfcn.h +pub const RTLD_LAZY: c_int = 0x0001; +pub const RTLD_NOW: c_int = 0x0002; +pub const RTLD_GLOBAL: c_int = 0x0100; +pub const RTLD_LOCAL: c_int = 0x0000; + +// errno.h +pub const EPERM: c_int = 1; /* Operation not permitted */ +pub const ENOENT: c_int = 2; /* No such file or directory */ +pub const ESRCH: c_int = 3; /* No such process */ +pub const EINTR: c_int = 4; /* Interrupted system call */ +pub const EIO: c_int = 5; /* I/O error */ +pub const ENXIO: c_int = 6; /* No such device or address */ +pub const E2BIG: c_int = 7; /* Argument list too long */ +pub const ENOEXEC: c_int = 8; /* Exec format error */ +pub const EBADF: c_int = 9; /* Bad file number */ +pub const ECHILD: c_int = 10; /* No child processes */ +pub const EAGAIN: c_int = 11; /* Try again */ +pub const ENOMEM: c_int = 12; /* Out of memory */ +pub const EACCES: c_int = 13; /* Permission denied */ +pub const EFAULT: c_int = 14; /* Bad address */ +pub const ENOTBLK: c_int = 15; /* Block device required */ +pub const EBUSY: c_int = 16; /* Device or resource busy */ +pub const EEXIST: c_int = 17; /* File exists */ +pub const EXDEV: c_int = 18; /* Cross-device link */ +pub const ENODEV: c_int = 19; /* No such device */ +pub const ENOTDIR: c_int = 20; /* Not a directory */ +pub const EISDIR: c_int = 21; /* Is a directory */ +pub const EINVAL: c_int = 22; /* Invalid argument */ +pub const ENFILE: c_int = 23; /* File table overflow */ +pub const EMFILE: c_int = 24; /* Too many open files */ +pub const ENOTTY: c_int = 25; /* Not a typewriter */ +pub const ETXTBSY: c_int = 26; /* Text file busy */ +pub const EFBIG: c_int = 27; /* File too large */ +pub const ENOSPC: c_int = 28; /* No space left on device */ +pub const ESPIPE: c_int = 29; /* Illegal seek */ +pub const EROFS: c_int = 30; /* Read-only file system */ +pub const EMLINK: c_int = 31; /* Too many links */ +pub const EPIPE: c_int = 32; /* Broken pipe */ +pub const EDOM: c_int = 33; /* Math argument out of domain of func */ +pub const ERANGE: c_int = 34; /* Math result not representable */ +pub const EDEADLK: c_int = 35; /* Resource deadlock would occur */ +pub const ENAMETOOLONG: c_int = 36; /* File name too long */ +pub const ENOLCK: c_int = 37; /* No record locks available */ +pub const ENOSYS: c_int = 38; /* Function not implemented */ +pub const ENOTEMPTY: c_int = 39; /* Directory not empty */ +pub const ELOOP: c_int = 40; /* Too many symbolic links encountered */ +pub const EWOULDBLOCK: c_int = 41; /* Operation would block */ +pub const ENOMSG: c_int = 42; /* No message of desired type */ +pub const EIDRM: c_int = 43; /* Identifier removed */ +pub const ECHRNG: c_int = 44; /* Channel number out of range */ +pub const EL2NSYNC: c_int = 45; /* Level 2 not synchronized */ +pub const EL3HLT: c_int = 46; /* Level 3 halted */ +pub const EL3RST: c_int = 47; /* Level 3 reset */ +pub const ELNRNG: c_int = 48; /* Link number out of range */ +pub const EUNATCH: c_int = 49; /* Protocol driver not attached */ +pub const ENOCSI: c_int = 50; /* No CSI structure available */ +pub const EL2HLT: c_int = 51; /* Level 2 halted */ +pub const EBADE: c_int = 52; /* Invalid exchange */ +pub const EBADR: c_int = 53; /* Invalid request descriptor */ +pub const EXFULL: c_int = 54; /* Exchange full */ +pub const ENOANO: c_int = 55; /* No anode */ +pub const EBADRQC: c_int = 56; /* Invalid request code */ +pub const EBADSLT: c_int = 57; /* Invalid slot */ +pub const EDEADLOCK: c_int = 58; /* Resource deadlock would occur */ +pub const EBFONT: c_int = 59; /* Bad font file format */ +pub const ENOSTR: c_int = 60; /* Device not a stream */ +pub const ENODATA: c_int = 61; /* No data available */ +pub const ETIME: c_int = 62; /* Timer expired */ +pub const ENOSR: c_int = 63; /* Out of streams resources */ +pub const ENONET: c_int = 64; /* Machine is not on the network */ +pub const ENOPKG: c_int = 65; /* Package not installed */ +pub const EREMOTE: c_int = 66; /* Object is remote */ +pub const ENOLINK: c_int = 67; /* Link has been severed */ +pub const EADV: c_int = 68; /* Advertise error */ +pub const ESRMNT: c_int = 69; /* Srmount error */ +pub const ECOMM: c_int = 70; /* Communication error on send */ +pub const EPROTO: c_int = 71; /* Protocol error */ +pub const EMULTIHOP: c_int = 72; /* Multihop attempted */ +pub const EDOTDOT: c_int = 73; /* RFS specific error */ +pub const EBADMSG: c_int = 74; /* Not a data message */ +pub const EOVERFLOW: c_int = 75; /* Value too large for defined data type */ +pub const ENOTUNIQ: c_int = 76; /* Name not unique on network */ +pub const EBADFD: c_int = 77; /* File descriptor in bad state */ +pub const EREMCHG: c_int = 78; /* Remote address changed */ +pub const ELIBACC: c_int = 79; /* Can not access a needed shared library */ +pub const ELIBBAD: c_int = 80; /* Accessing a corrupted shared library */ +pub const ELIBSCN: c_int = 81; /* .lib section in a.out corrupted */ +/* Attempting to link in too many shared libraries */ +pub const ELIBMAX: c_int = 82; +pub const ELIBEXEC: c_int = 83; /* Cannot exec a shared library directly */ +pub const EILSEQ: c_int = 84; /* Illegal byte sequence */ +/* Interrupted system call should be restarted */ +pub const ERESTART: c_int = 85; +pub const ESTRPIPE: c_int = 86; /* Streams pipe error */ +pub const EUSERS: c_int = 87; /* Too many users */ +pub const ENOTSOCK: c_int = 88; /* Socket operation on non-socket */ +pub const EDESTADDRREQ: c_int = 89; /* Destination address required */ +pub const EMSGSIZE: c_int = 90; /* Message too long */ +pub const EPROTOTYPE: c_int = 91; /* Protocol wrong type for socket */ +pub const ENOPROTOOPT: c_int = 92; /* Protocol not available */ +pub const EPROTONOSUPPORT: c_int = 93; /* Protocol not supported */ +pub const ESOCKTNOSUPPORT: c_int = 94; /* Socket type not supported */ +/* Operation not supported on transport endpoint */ +pub const EOPNOTSUPP: c_int = 95; +pub const ENOTSUP: c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: c_int = 96; /* Protocol family not supported */ +/* Address family not supported by protocol */ +pub const EAFNOSUPPORT: c_int = 97; +pub const EADDRINUSE: c_int = 98; /* Address already in use */ +pub const EADDRNOTAVAIL: c_int = 99; /* Cannot assign requested address */ +pub const ENETDOWN: c_int = 100; /* Network is down */ +pub const ENETUNREACH: c_int = 101; /* Network is unreachable */ +/* Network dropped connection because of reset */ +pub const ENETRESET: c_int = 102; +pub const ECONNABORTED: c_int = 103; /* Software caused connection abort */ +pub const ECONNRESET: c_int = 104; /* Connection reset by peer */ +pub const ENOBUFS: c_int = 105; /* No buffer space available */ +pub const EISCONN: c_int = 106; /* Transport endpoint is already connected */ +pub const ENOTCONN: c_int = 107; /* Transport endpoint is not connected */ +/* Cannot send after transport endpoint shutdown */ +pub const ESHUTDOWN: c_int = 108; +pub const ETOOMANYREFS: c_int = 109; /* Too many references: cannot splice */ +pub const ETIMEDOUT: c_int = 110; /* Connection timed out */ +pub const ECONNREFUSED: c_int = 111; /* Connection refused */ +pub const EHOSTDOWN: c_int = 112; /* Host is down */ +pub const EHOSTUNREACH: c_int = 113; /* No route to host */ +pub const EALREADY: c_int = 114; /* Operation already in progress */ +pub const EINPROGRESS: c_int = 115; /* Operation now in progress */ +pub const ESTALE: c_int = 116; /* Stale NFS file handle */ +pub const EUCLEAN: c_int = 117; /* Structure needs cleaning */ +pub const ENOTNAM: c_int = 118; /* Not a XENIX named type file */ +pub const ENAVAIL: c_int = 119; /* No XENIX semaphores available */ +pub const EISNAM: c_int = 120; /* Is a named type file */ +pub const EREMOTEIO: c_int = 121; /* Remote I/O error */ +pub const EDQUOT: c_int = 122; /* Quota exceeded */ +pub const ENOMEDIUM: c_int = 123; /* No medium found */ +pub const EMEDIUMTYPE: c_int = 124; /* Wrong medium type */ +pub const ECANCELED: c_int = 125; /* Operation Canceled */ +pub const ENOKEY: c_int = 126; /* Required key not available */ +pub const EKEYEXPIRED: c_int = 127; /* Key has expired */ +pub const EKEYREVOKED: c_int = 128; /* Key has been revoked */ +pub const EKEYREJECTED: c_int = 129; /* Key was rejected by service */ +pub const EOWNERDEAD: c_int = 130; /* Owner died */ +pub const ENOTRECOVERABLE: c_int = 131; /* State not recoverable */ + +// fcntl.h +pub const F_DUPFD: c_int = 0; +pub const F_GETFD: c_int = 1; +pub const F_SETFD: c_int = 2; +pub const F_GETFL: c_int = 3; +pub const F_SETFL: c_int = 4; +// FIXME(redox): relibc { +pub const F_DUPFD_CLOEXEC: c_int = crate::F_DUPFD; +// } +pub const FD_CLOEXEC: c_int = 0x0100_0000; +pub const O_RDONLY: c_int = 0x0001_0000; +pub const O_WRONLY: c_int = 0x0002_0000; +pub const O_RDWR: c_int = 0x0003_0000; +pub const O_ACCMODE: c_int = 0x0003_0000; +pub const O_NONBLOCK: c_int = 0x0004_0000; +pub const O_NDELAY: c_int = O_NONBLOCK; +pub const O_APPEND: c_int = 0x0008_0000; +pub const O_SHLOCK: c_int = 0x0010_0000; +pub const O_EXLOCK: c_int = 0x0020_0000; +pub const O_ASYNC: c_int = 0x0040_0000; +pub const O_FSYNC: c_int = 0x0080_0000; +pub const O_CLOEXEC: c_int = 0x0100_0000; +pub const O_CREAT: c_int = 0x0200_0000; +pub const O_TRUNC: c_int = 0x0400_0000; +pub const O_EXCL: c_int = 0x0800_0000; +pub const O_DIRECTORY: c_int = 0x1000_0000; +pub const O_PATH: c_int = 0x2000_0000; +pub const O_SYMLINK: c_int = 0x4000_0000; +// Negative to allow it to be used as int +// FIXME(redox): Fix negative values missing from includes +pub const O_NOFOLLOW: c_int = -0x8000_0000; +pub const O_NOCTTY: c_int = 0x00000200; + +// locale.h +pub const LC_ALL: c_int = 0; +pub const LC_COLLATE: c_int = 1; +pub const LC_CTYPE: c_int = 2; +pub const LC_MESSAGES: c_int = 3; +pub const LC_MONETARY: c_int = 4; +pub const LC_NUMERIC: c_int = 5; +pub const LC_TIME: c_int = 6; + +// netdb.h +pub const AI_PASSIVE: c_int = 0x0001; +pub const AI_CANONNAME: c_int = 0x0002; +pub const AI_NUMERICHOST: c_int = 0x0004; +pub const AI_V4MAPPED: c_int = 0x0008; +pub const AI_ALL: c_int = 0x0010; +pub const AI_ADDRCONFIG: c_int = 0x0020; +pub const AI_NUMERICSERV: c_int = 0x0400; +pub const EAI_BADFLAGS: c_int = -1; +pub const EAI_NONAME: c_int = -2; +pub const EAI_AGAIN: c_int = -3; +pub const EAI_FAIL: c_int = -4; +pub const EAI_NODATA: c_int = -5; +pub const EAI_FAMILY: c_int = -6; +pub const EAI_SOCKTYPE: c_int = -7; +pub const EAI_SERVICE: c_int = -8; +pub const EAI_ADDRFAMILY: c_int = -9; +pub const EAI_MEMORY: c_int = -10; +pub const EAI_SYSTEM: c_int = -11; +pub const EAI_OVERFLOW: c_int = -12; +pub const NI_MAXHOST: c_int = 1025; +pub const NI_MAXSERV: c_int = 32; +pub const NI_NUMERICHOST: c_int = 0x0001; +pub const NI_NUMERICSERV: c_int = 0x0002; +pub const NI_NOFQDN: c_int = 0x0004; +pub const NI_NAMEREQD: c_int = 0x0008; +pub const NI_DGRAM: c_int = 0x0010; + +// netinet/in.h +// FIXME(redox): relibc { +pub const IP_TTL: c_int = 2; +pub const IPV6_UNICAST_HOPS: c_int = 16; +pub const IPV6_MULTICAST_IF: c_int = 17; +pub const IPV6_MULTICAST_HOPS: c_int = 18; +pub const IPV6_MULTICAST_LOOP: c_int = 19; +pub const IPV6_ADD_MEMBERSHIP: c_int = 20; +pub const IPV6_DROP_MEMBERSHIP: c_int = 21; +pub const IPV6_V6ONLY: c_int = 26; +pub const IP_MULTICAST_IF: c_int = 32; +pub const IP_MULTICAST_TTL: c_int = 33; +pub const IP_MULTICAST_LOOP: c_int = 34; +pub const IP_ADD_MEMBERSHIP: c_int = 35; +pub const IP_DROP_MEMBERSHIP: c_int = 36; +pub const IP_TOS: c_int = 1; +pub const IP_RECVTOS: c_int = 2; +pub const IPPROTO_IGMP: c_int = 2; +pub const IPPROTO_PUP: c_int = 12; +pub const IPPROTO_IDP: c_int = 22; +pub const IPPROTO_RAW: c_int = 255; +pub const IPPROTO_MAX: c_int = 255; +// } + +// netinet/tcp.h +pub const TCP_NODELAY: c_int = 1; +// FIXME(redox): relibc { +pub const TCP_KEEPIDLE: c_int = 1; +// } + +// poll.h +pub const POLLIN: c_short = 0x001; +pub const POLLPRI: c_short = 0x002; +pub const POLLOUT: c_short = 0x004; +pub const POLLERR: c_short = 0x008; +pub const POLLHUP: c_short = 0x010; +pub const POLLNVAL: c_short = 0x020; +pub const POLLRDNORM: c_short = 0x040; +pub const POLLRDBAND: c_short = 0x080; +pub const POLLWRNORM: c_short = 0x100; +pub const POLLWRBAND: c_short = 0x200; + +// pthread.h +pub const PTHREAD_MUTEX_NORMAL: c_int = 0; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; +pub const PTHREAD_MUTEX_INITIALIZER: crate::pthread_mutex_t = crate::pthread_mutex_t { + bytes: [0; _PTHREAD_MUTEX_SIZE], +}; +pub const PTHREAD_COND_INITIALIZER: crate::pthread_cond_t = crate::pthread_cond_t { + bytes: [0; _PTHREAD_COND_SIZE], +}; +pub const PTHREAD_RWLOCK_INITIALIZER: crate::pthread_rwlock_t = crate::pthread_rwlock_t { + bytes: [0; _PTHREAD_RWLOCK_SIZE], +}; +pub const PTHREAD_STACK_MIN: size_t = 4096; + +// signal.h +pub const SIG_BLOCK: c_int = 0; +pub const SIG_UNBLOCK: c_int = 1; +pub const SIG_SETMASK: c_int = 2; +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGTRAP: c_int = 5; +pub const SIGABRT: c_int = 6; +pub const SIGBUS: c_int = 7; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGUSR1: c_int = 10; +pub const SIGSEGV: c_int = 11; +pub const SIGUSR2: c_int = 12; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; +pub const SIGSTKFLT: c_int = 16; +pub const SIGCHLD: c_int = 17; +pub const SIGCONT: c_int = 18; +pub const SIGSTOP: c_int = 19; +pub const SIGTSTP: c_int = 20; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGURG: c_int = 23; +pub const SIGXCPU: c_int = 24; +pub const SIGXFSZ: c_int = 25; +pub const SIGVTALRM: c_int = 26; +pub const SIGPROF: c_int = 27; +pub const SIGWINCH: c_int = 28; +pub const SIGIO: c_int = 29; +pub const SIGPWR: c_int = 30; +pub const SIGSYS: c_int = 31; +pub const NSIG: c_int = 32; + +pub const SA_NOCLDWAIT: c_ulong = 0x0000_0002; +pub const SA_RESTORER: c_ulong = 0x0000_0004; // FIXME(redox): remove after relibc removes it +pub const SA_SIGINFO: c_ulong = 0x0200_0000; +pub const SA_ONSTACK: c_ulong = 0x0400_0000; +pub const SA_RESTART: c_ulong = 0x0800_0000; +pub const SA_NODEFER: c_ulong = 0x1000_0000; +pub const SA_RESETHAND: c_ulong = 0x2000_0000; +pub const SA_NOCLDSTOP: c_ulong = 0x4000_0000; + +// sys/file.h +pub const LOCK_SH: c_int = 1; +pub const LOCK_EX: c_int = 2; +pub const LOCK_NB: c_int = 4; +pub const LOCK_UN: c_int = 8; + +// sys/epoll.h +pub const EPOLL_CLOEXEC: c_int = 0x0100_0000; +pub const EPOLL_CTL_ADD: c_int = 1; +pub const EPOLL_CTL_DEL: c_int = 2; +pub const EPOLL_CTL_MOD: c_int = 3; +pub const EPOLLIN: c_int = 0x001; +pub const EPOLLPRI: c_int = 0x002; +pub const EPOLLOUT: c_int = 0x004; +pub const EPOLLERR: c_int = 0x008; +pub const EPOLLHUP: c_int = 0x010; +pub const EPOLLNVAL: c_int = 0x020; +pub const EPOLLRDNORM: c_int = 0x040; +pub const EPOLLRDBAND: c_int = 0x080; +pub const EPOLLWRNORM: c_int = 0x100; +pub const EPOLLWRBAND: c_int = 0x200; +pub const EPOLLMSG: c_int = 0x400; +pub const EPOLLRDHUP: c_int = 0x2000; +pub const EPOLLEXCLUSIVE: c_int = 1 << 28; +pub const EPOLLWAKEUP: c_int = 1 << 29; +pub const EPOLLONESHOT: c_int = 1 << 30; +pub const EPOLLET: c_int = 1 << 31; + +// sys/stat.h +pub const S_IFMT: c_int = 0o17_0000; +pub const S_IFDIR: c_int = 0o4_0000; +pub const S_IFCHR: c_int = 0o2_0000; +pub const S_IFBLK: c_int = 0o6_0000; +pub const S_IFREG: c_int = 0o10_0000; +pub const S_IFIFO: c_int = 0o1_0000; +pub const S_IFLNK: c_int = 0o12_0000; +pub const S_IFSOCK: c_int = 0o14_0000; +pub const S_IRWXU: c_int = 0o0700; +pub const S_IRUSR: c_int = 0o0400; +pub const S_IWUSR: c_int = 0o0200; +pub const S_IXUSR: c_int = 0o0100; +pub const S_IRWXG: c_int = 0o0070; +pub const S_IRGRP: c_int = 0o0040; +pub const S_IWGRP: c_int = 0o0020; +pub const S_IXGRP: c_int = 0o0010; +pub const S_IRWXO: c_int = 0o0007; +pub const S_IROTH: c_int = 0o0004; +pub const S_IWOTH: c_int = 0o0002; +pub const S_IXOTH: c_int = 0o0001; + +// stdlib.h +pub const EXIT_SUCCESS: c_int = 0; +pub const EXIT_FAILURE: c_int = 1; + +// sys/ioctl.h +// FIXME(redox): relibc { +pub const FIONREAD: c_ulong = 0x541B; +pub const FIONBIO: c_ulong = 0x5421; +pub const FIOCLEX: c_ulong = 0x5451; +// } +pub const TCGETS: c_ulong = 0x5401; +pub const TCSETS: c_ulong = 0x5402; +pub const TCFLSH: c_ulong = 0x540B; +pub const TIOCSCTTY: c_ulong = 0x540E; +pub const TIOCGPGRP: c_ulong = 0x540F; +pub const TIOCSPGRP: c_ulong = 0x5410; +pub const TIOCGWINSZ: c_ulong = 0x5413; +pub const TIOCSWINSZ: c_ulong = 0x5414; + +// sys/mman.h +pub const PROT_NONE: c_int = 0x0000; +pub const PROT_READ: c_int = 0x0004; +pub const PROT_WRITE: c_int = 0x0002; +pub const PROT_EXEC: c_int = 0x0001; + +pub const MADV_NORMAL: c_int = 0; +pub const MADV_RANDOM: c_int = 1; +pub const MADV_SEQUENTIAL: c_int = 2; +pub const MADV_WILLNEED: c_int = 3; +pub const MADV_DONTNEED: c_int = 4; + +pub const MAP_SHARED: c_int = 0x0001; +pub const MAP_PRIVATE: c_int = 0x0002; +pub const MAP_ANON: c_int = 0x0020; +pub const MAP_ANONYMOUS: c_int = MAP_ANON; +pub const MAP_FIXED: c_int = 0x0004; +pub const MAP_FAILED: *mut c_void = !0 as _; + +pub const MS_ASYNC: c_int = 0x0001; +pub const MS_INVALIDATE: c_int = 0x0002; +pub const MS_SYNC: c_int = 0x0004; + +// sys/resource.h +pub const RLIM_INFINITY: rlim_t = !0; +pub const RLIM_SAVED_CUR: rlim_t = RLIM_INFINITY; +pub const RLIM_SAVED_MAX: rlim_t = RLIM_INFINITY; +pub const RLIMIT_CPU: c_int = 0; +pub const RLIMIT_FSIZE: c_int = 1; +pub const RLIMIT_DATA: c_int = 2; +pub const RLIMIT_STACK: c_int = 3; +pub const RLIMIT_CORE: c_int = 4; +pub const RLIMIT_RSS: c_int = 5; +pub const RLIMIT_NPROC: c_int = 6; +pub const RLIMIT_NOFILE: c_int = 7; +pub const RLIMIT_MEMLOCK: c_int = 8; +pub const RLIMIT_AS: c_int = 9; +pub const RLIMIT_LOCKS: c_int = 10; +pub const RLIMIT_SIGPENDING: c_int = 11; +pub const RLIMIT_MSGQUEUE: c_int = 12; +pub const RLIMIT_NICE: c_int = 13; +pub const RLIMIT_RTPRIO: c_int = 14; +pub const RLIMIT_NLIMITS: c_int = 15; + +pub const RUSAGE_SELF: c_int = 0; +pub const RUSAGE_CHILDREN: c_int = -1; +pub const RUSAGE_BOTH: c_int = -2; +pub const RUSAGE_THREAD: c_int = 1; + +// sys/select.h +pub const FD_SETSIZE: usize = 1024; + +// sys/socket.h +pub const AF_INET: c_int = 2; +pub const AF_INET6: c_int = 10; +pub const AF_UNIX: c_int = 1; +pub const AF_UNSPEC: c_int = 0; +pub const PF_INET: c_int = 2; +pub const PF_INET6: c_int = 10; +pub const PF_UNIX: c_int = 1; +pub const PF_UNSPEC: c_int = 0; +pub const MSG_CTRUNC: c_int = 8; +pub const MSG_DONTROUTE: c_int = 4; +pub const MSG_EOR: c_int = 128; +pub const MSG_OOB: c_int = 1; +pub const MSG_PEEK: c_int = 2; +pub const MSG_TRUNC: c_int = 32; +pub const MSG_DONTWAIT: c_int = 64; +pub const MSG_WAITALL: c_int = 256; +pub const SCM_RIGHTS: c_int = 1; +pub const SHUT_RD: c_int = 0; +pub const SHUT_WR: c_int = 1; +pub const SHUT_RDWR: c_int = 2; +pub const SO_DEBUG: c_int = 1; +pub const SO_REUSEADDR: c_int = 2; +pub const SO_TYPE: c_int = 3; +pub const SO_ERROR: c_int = 4; +pub const SO_DONTROUTE: c_int = 5; +pub const SO_BROADCAST: c_int = 6; +pub const SO_SNDBUF: c_int = 7; +pub const SO_RCVBUF: c_int = 8; +pub const SO_KEEPALIVE: c_int = 9; +pub const SO_OOBINLINE: c_int = 10; +pub const SO_NO_CHECK: c_int = 11; +pub const SO_PRIORITY: c_int = 12; +pub const SO_LINGER: c_int = 13; +pub const SO_BSDCOMPAT: c_int = 14; +pub const SO_REUSEPORT: c_int = 15; +pub const SO_PASSCRED: c_int = 16; +pub const SO_PEERCRED: c_int = 17; +pub const SO_RCVLOWAT: c_int = 18; +pub const SO_SNDLOWAT: c_int = 19; +pub const SO_RCVTIMEO: c_int = 20; +pub const SO_SNDTIMEO: c_int = 21; +pub const SO_ACCEPTCONN: c_int = 30; +pub const SO_PEERSEC: c_int = 31; +pub const SO_SNDBUFFORCE: c_int = 32; +pub const SO_RCVBUFFORCE: c_int = 33; +pub const SO_PROTOCOL: c_int = 38; +pub const SO_DOMAIN: c_int = 39; +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SOCK_RAW: c_int = 3; +pub const SOCK_NONBLOCK: c_int = 0o4_000; +pub const SOCK_CLOEXEC: c_int = 0o2_000_000; +pub const SOCK_SEQPACKET: c_int = 5; +pub const SOL_SOCKET: c_int = 1; +pub const SOMAXCONN: c_int = 128; + +// sys/termios.h +pub const VEOF: usize = 0; +pub const VEOL: usize = 1; +pub const VEOL2: usize = 2; +pub const VERASE: usize = 3; +pub const VWERASE: usize = 4; +pub const VKILL: usize = 5; +pub const VREPRINT: usize = 6; +pub const VSWTC: usize = 7; +pub const VINTR: usize = 8; +pub const VQUIT: usize = 9; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 12; +pub const VSTOP: usize = 13; +pub const VLNEXT: usize = 14; +pub const VDISCARD: usize = 15; +pub const VMIN: usize = 16; +pub const VTIME: usize = 17; +pub const NCCS: usize = 32; + +pub const IGNBRK: crate::tcflag_t = 0o000_001; +pub const BRKINT: crate::tcflag_t = 0o000_002; +pub const IGNPAR: crate::tcflag_t = 0o000_004; +pub const PARMRK: crate::tcflag_t = 0o000_010; +pub const INPCK: crate::tcflag_t = 0o000_020; +pub const ISTRIP: crate::tcflag_t = 0o000_040; +pub const INLCR: crate::tcflag_t = 0o000_100; +pub const IGNCR: crate::tcflag_t = 0o000_200; +pub const ICRNL: crate::tcflag_t = 0o000_400; +pub const IXON: crate::tcflag_t = 0o001_000; +pub const IXOFF: crate::tcflag_t = 0o002_000; + +pub const OPOST: crate::tcflag_t = 0o000_001; +pub const ONLCR: crate::tcflag_t = 0o000_002; +pub const OLCUC: crate::tcflag_t = 0o000_004; +pub const OCRNL: crate::tcflag_t = 0o000_010; +pub const ONOCR: crate::tcflag_t = 0o000_020; +pub const ONLRET: crate::tcflag_t = 0o000_040; +pub const OFILL: crate::tcflag_t = 0o0000_100; +pub const OFDEL: crate::tcflag_t = 0o0000_200; + +pub const B0: speed_t = 0o000_000; +pub const B50: speed_t = 0o000_001; +pub const B75: speed_t = 0o000_002; +pub const B110: speed_t = 0o000_003; +pub const B134: speed_t = 0o000_004; +pub const B150: speed_t = 0o000_005; +pub const B200: speed_t = 0o000_006; +pub const B300: speed_t = 0o000_007; +pub const B600: speed_t = 0o000_010; +pub const B1200: speed_t = 0o000_011; +pub const B1800: speed_t = 0o000_012; +pub const B2400: speed_t = 0o000_013; +pub const B4800: speed_t = 0o000_014; +pub const B9600: speed_t = 0o000_015; +pub const B19200: speed_t = 0o000_016; +pub const B38400: speed_t = 0o000_017; + +pub const B57600: speed_t = 0o0_020; +pub const B115200: speed_t = 0o0_021; +pub const B230400: speed_t = 0o0_022; +pub const B460800: speed_t = 0o0_023; +pub const B500000: speed_t = 0o0_024; +pub const B576000: speed_t = 0o0_025; +pub const B921600: speed_t = 0o0_026; +pub const B1000000: speed_t = 0o0_027; +pub const B1152000: speed_t = 0o0_030; +pub const B1500000: speed_t = 0o0_031; +pub const B2000000: speed_t = 0o0_032; +pub const B2500000: speed_t = 0o0_033; +pub const B3000000: speed_t = 0o0_034; +pub const B3500000: speed_t = 0o0_035; +pub const B4000000: speed_t = 0o0_036; + +pub const CSIZE: crate::tcflag_t = 0o001_400; +pub const CS5: crate::tcflag_t = 0o000_000; +pub const CS6: crate::tcflag_t = 0o000_400; +pub const CS7: crate::tcflag_t = 0o001_000; +pub const CS8: crate::tcflag_t = 0o001_400; + +pub const CSTOPB: crate::tcflag_t = 0o002_000; +pub const CREAD: crate::tcflag_t = 0o004_000; +pub const PARENB: crate::tcflag_t = 0o010_000; +pub const PARODD: crate::tcflag_t = 0o020_000; +pub const HUPCL: crate::tcflag_t = 0o040_000; + +pub const CLOCAL: crate::tcflag_t = 0o0100000; + +pub const ISIG: crate::tcflag_t = 0x0000_0080; +pub const ICANON: crate::tcflag_t = 0x0000_0100; +pub const ECHO: crate::tcflag_t = 0x0000_0008; +pub const ECHOE: crate::tcflag_t = 0x0000_0002; +pub const ECHOK: crate::tcflag_t = 0x0000_0004; +pub const ECHONL: crate::tcflag_t = 0x0000_0010; +pub const NOFLSH: crate::tcflag_t = 0x8000_0000; +pub const TOSTOP: crate::tcflag_t = 0x0040_0000; +pub const IEXTEN: crate::tcflag_t = 0x0000_0400; + +pub const TCOOFF: c_int = 0; +pub const TCOON: c_int = 1; +pub const TCIOFF: c_int = 2; +pub const TCION: c_int = 3; + +pub const TCIFLUSH: c_int = 0; +pub const TCOFLUSH: c_int = 1; +pub const TCIOFLUSH: c_int = 2; + +pub const TCSANOW: c_int = 0; +pub const TCSADRAIN: c_int = 1; +pub const TCSAFLUSH: c_int = 2; + +pub const _POSIX_VDISABLE: crate::cc_t = 0; + +// sys/wait.h +pub const WNOHANG: c_int = 1; +pub const WUNTRACED: c_int = 2; + +pub const WSTOPPED: c_int = 2; +pub const WEXITED: c_int = 4; +pub const WCONTINUED: c_int = 8; +pub const WNOWAIT: c_int = 0x0100_0000; + +pub const __WNOTHREAD: c_int = 0x2000_0000; +pub const __WALL: c_int = 0x4000_0000; +#[allow(overflowing_literals)] +pub const __WCLONE: c_int = 0x8000_0000; + +// time.h +pub const CLOCK_REALTIME: c_int = 1; +pub const CLOCK_MONOTONIC: c_int = 4; +pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 2; +pub const CLOCKS_PER_SEC: crate::clock_t = 1_000_000; + +// unistd.h +// POSIX.1 { +pub const _SC_ARG_MAX: c_int = 0; +pub const _SC_CHILD_MAX: c_int = 1; +pub const _SC_CLK_TCK: c_int = 2; +pub const _SC_NGROUPS_MAX: c_int = 3; +pub const _SC_OPEN_MAX: c_int = 4; +pub const _SC_STREAM_MAX: c_int = 5; +pub const _SC_TZNAME_MAX: c_int = 6; +// ... +pub const _SC_VERSION: c_int = 29; +pub const _SC_PAGESIZE: c_int = 30; +pub const _SC_PAGE_SIZE: c_int = 30; +// ... +pub const _SC_RE_DUP_MAX: c_int = 44; + +pub const _SC_NPROCESSORS_CONF: c_int = 57; +pub const _SC_NPROCESSORS_ONLN: c_int = 58; + +// ... +pub const _SC_GETGR_R_SIZE_MAX: c_int = 69; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 70; +pub const _SC_LOGIN_NAME_MAX: c_int = 71; +pub const _SC_TTY_NAME_MAX: c_int = 72; +// ... +pub const _SC_SYMLOOP_MAX: c_int = 173; +// ... +pub const _SC_HOST_NAME_MAX: c_int = 180; +// ... +pub const _SC_SIGQUEUE_MAX: c_int = 190; +pub const _SC_REALTIME_SIGNALS: c_int = 191; +// } POSIX.1 + +// confstr +pub const _CS_PATH: c_int = 0; +pub const _CS_POSIX_V6_WIDTH_RESTRICTED_ENVS: c_int = 1; +pub const _CS_POSIX_V5_WIDTH_RESTRICTED_ENVS: c_int = 4; +pub const _CS_POSIX_V7_WIDTH_RESTRICTED_ENVS: c_int = 5; +pub const _CS_POSIX_V6_ILP32_OFF32_CFLAGS: c_int = 1116; +pub const _CS_POSIX_V6_ILP32_OFF32_LDFLAGS: c_int = 1117; +pub const _CS_POSIX_V6_ILP32_OFF32_LIBS: c_int = 1118; +pub const _CS_POSIX_V6_ILP32_OFF32_LINTFLAGS: c_int = 1119; +pub const _CS_POSIX_V6_ILP32_OFFBIG_CFLAGS: c_int = 1120; +pub const _CS_POSIX_V6_ILP32_OFFBIG_LDFLAGS: c_int = 1121; +pub const _CS_POSIX_V6_ILP32_OFFBIG_LIBS: c_int = 1122; +pub const _CS_POSIX_V6_ILP32_OFFBIG_LINTFLAGS: c_int = 1123; +pub const _CS_POSIX_V6_LP64_OFF64_CFLAGS: c_int = 1124; +pub const _CS_POSIX_V6_LP64_OFF64_LDFLAGS: c_int = 1125; +pub const _CS_POSIX_V6_LP64_OFF64_LIBS: c_int = 1126; +pub const _CS_POSIX_V6_LP64_OFF64_LINTFLAGS: c_int = 1127; +pub const _CS_POSIX_V6_LPBIG_OFFBIG_CFLAGS: c_int = 1128; +pub const _CS_POSIX_V6_LPBIG_OFFBIG_LDFLAGS: c_int = 1129; +pub const _CS_POSIX_V6_LPBIG_OFFBIG_LIBS: c_int = 1130; +pub const _CS_POSIX_V6_LPBIG_OFFBIG_LINTFLAGS: c_int = 1131; +pub const _CS_POSIX_V7_ILP32_OFF32_CFLAGS: c_int = 1132; +pub const _CS_POSIX_V7_ILP32_OFF32_LDFLAGS: c_int = 1133; +pub const _CS_POSIX_V7_ILP32_OFF32_LIBS: c_int = 1134; +pub const _CS_POSIX_V7_ILP32_OFF32_LINTFLAGS: c_int = 1135; +pub const _CS_POSIX_V7_ILP32_OFFBIG_CFLAGS: c_int = 1136; +pub const _CS_POSIX_V7_ILP32_OFFBIG_LDFLAGS: c_int = 1137; +pub const _CS_POSIX_V7_ILP32_OFFBIG_LIBS: c_int = 1138; +pub const _CS_POSIX_V7_ILP32_OFFBIG_LINTFLAGS: c_int = 1139; +pub const _CS_POSIX_V7_LP64_OFF64_CFLAGS: c_int = 1140; +pub const _CS_POSIX_V7_LP64_OFF64_LDFLAGS: c_int = 1141; +pub const _CS_POSIX_V7_LP64_OFF64_LIBS: c_int = 1142; +pub const _CS_POSIX_V7_LP64_OFF64_LINTFLAGS: c_int = 1143; +pub const _CS_POSIX_V7_LPBIG_OFFBIG_CFLAGS: c_int = 1144; +pub const _CS_POSIX_V7_LPBIG_OFFBIG_LDFLAGS: c_int = 1145; +pub const _CS_POSIX_V7_LPBIG_OFFBIG_LIBS: c_int = 1146; +pub const _CS_POSIX_V7_LPBIG_OFFBIG_LINTFLAGS: c_int = 1147; + +pub const F_OK: c_int = 0; +pub const R_OK: c_int = 4; +pub const W_OK: c_int = 2; +pub const X_OK: c_int = 1; + +// stdio.h +pub const BUFSIZ: c_uint = 1024; +pub const _IOFBF: c_int = 0; +pub const _IOLBF: c_int = 1; +pub const _IONBF: c_int = 2; +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; + +pub const _PC_LINK_MAX: c_int = 0; +pub const _PC_MAX_CANON: c_int = 1; +pub const _PC_MAX_INPUT: c_int = 2; +pub const _PC_NAME_MAX: c_int = 3; +pub const _PC_PATH_MAX: c_int = 4; +pub const _PC_PIPE_BUF: c_int = 5; +pub const _PC_CHOWN_RESTRICTED: c_int = 6; +pub const _PC_NO_TRUNC: c_int = 7; +pub const _PC_VDISABLE: c_int = 8; +pub const _PC_SYNC_IO: c_int = 9; +pub const _PC_ASYNC_IO: c_int = 10; +pub const _PC_PRIO_IO: c_int = 11; +pub const _PC_SOCK_MAXBUF: c_int = 12; +pub const _PC_FILESIZEBITS: c_int = 13; +pub const _PC_REC_INCR_XFER_SIZE: c_int = 14; +pub const _PC_REC_MAX_XFER_SIZE: c_int = 15; +pub const _PC_REC_MIN_XFER_SIZE: c_int = 16; +pub const _PC_REC_XFER_ALIGN: c_int = 17; +pub const _PC_ALLOC_SIZE_MIN: c_int = 18; +pub const _PC_SYMLINK_MAX: c_int = 19; +pub const _PC_2_SYMLINKS: c_int = 20; + +pub const PRIO_PROCESS: c_int = 0; +pub const PRIO_PGRP: c_int = 1; +pub const PRIO_USER: c_int = 2; + +f! { + //sys/socket.h + pub const fn CMSG_ALIGN(len: size_t) -> size_t { + (len + size_of::() - 1) & !(size_of::() - 1) + } + pub const fn CMSG_LEN(length: c_uint) -> c_uint { + (CMSG_ALIGN(size_of::()) + length as usize) as c_uint + } + pub const fn CMSG_SPACE(len: c_uint) -> c_uint { + (CMSG_ALIGN(len as size_t) + CMSG_ALIGN(size_of::())) as c_uint + } + + // wait.h + pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] &= !(1 << (fd % size)); + return; + } + + pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0; + } + + pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { + let fd = fd as usize; + let size = size_of_val(&(*set).fds_bits[0]) * 8; + (*set).fds_bits[fd / size] |= 1 << (fd % size); + return; + } + + pub fn FD_ZERO(set: *mut fd_set) -> () { + for slot in (*set).fds_bits.iter_mut() { + *slot = 0; + } + } +} + +safe_f! { + pub const fn WIFSTOPPED(status: c_int) -> bool { + (status & 0xff) == 0x7f + } + + pub const fn WSTOPSIG(status: c_int) -> c_int { + (status >> 8) & 0xff + } + + pub const fn WIFCONTINUED(status: c_int) -> bool { + status == 0xffff + } + + pub const fn WIFSIGNALED(status: c_int) -> bool { + ((status & 0x7f) + 1) as i8 >= 2 + } + + pub const fn WTERMSIG(status: c_int) -> c_int { + status & 0x7f + } + + pub const fn WIFEXITED(status: c_int) -> bool { + (status & 0x7f) == 0 + } + + pub const fn WEXITSTATUS(status: c_int) -> c_int { + (status >> 8) & 0xff + } + + pub const fn WCOREDUMP(status: c_int) -> bool { + (status & 0x80) != 0 + } +} + +extern "C" { + // errno.h + pub fn __errno_location() -> *mut c_int; + pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + + // dirent.h + pub fn dirfd(dirp: *mut crate::DIR) -> c_int; + + // unistd.h + pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; + pub fn getdtablesize() -> c_int; + + // grp.h + pub fn getgrent() -> *mut crate::group; + pub fn setgrent(); + pub fn endgrent(); + pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; + pub fn getgrgid_r( + gid: crate::gid_t, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn getgrnam(name: *const c_char) -> *mut crate::group; + pub fn getgrnam_r( + name: *const c_char, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn getgrouplist( + user: *const c_char, + group: crate::gid_t, + groups: *mut crate::gid_t, + ngroups: *mut c_int, + ) -> c_int; + + // malloc.h + pub fn memalign(align: size_t, size: size_t) -> *mut c_void; + + // netdb.h + pub fn getnameinfo( + addr: *const crate::sockaddr, + addrlen: crate::socklen_t, + host: *mut c_char, + hostlen: crate::socklen_t, + serv: *mut c_char, + servlen: crate::socklen_t, + flags: c_int, + ) -> c_int; + + // pthread.h + pub fn pthread_atfork( + prepare: Option, + parent: Option, + child: Option, + ) -> c_int; + pub fn pthread_create( + tid: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + start: extern "C" fn(*mut c_void) -> *mut c_void, + arg: *mut c_void, + ) -> c_int; + pub fn pthread_condattr_setclock( + attr: *mut pthread_condattr_t, + clock_id: crate::clockid_t, + ) -> c_int; + + //pty.h + pub fn openpty( + amaster: *mut c_int, + aslave: *mut c_int, + name: *mut c_char, + termp: *const termios, + winp: *const crate::winsize, + ) -> c_int; + + // pwd.h + pub fn getpwent() -> *mut passwd; + pub fn setpwent(); + pub fn endpwent(); + pub fn getpwnam_r( + name: *const c_char, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + pub fn getpwuid_r( + uid: crate::uid_t, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + + // signal.h + pub fn pthread_sigmask( + how: c_int, + set: *const crate::sigset_t, + oldset: *mut crate::sigset_t, + ) -> c_int; + pub fn pthread_cancel(thread: crate::pthread_t) -> c_int; + pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; + pub fn sigtimedwait( + set: *const sigset_t, + sig: *mut siginfo_t, + timeout: *const crate::timespec, + ) -> c_int; + pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; + + // stdlib.h + pub fn getsubopt( + optionp: *mut *mut c_char, + tokens: *const *mut c_char, + valuep: *mut *mut c_char, + ) -> c_int; + pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; + pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; + pub fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void; + + // string.h + pub fn explicit_bzero(p: *mut c_void, len: size_t); + pub fn strlcat(dst: *mut c_char, src: *const c_char, siz: size_t) -> size_t; + pub fn strlcpy(dst: *mut c_char, src: *const c_char, siz: size_t) -> size_t; + + // sys/epoll.h + pub fn epoll_create(size: c_int) -> c_int; + pub fn epoll_create1(flags: c_int) -> c_int; + pub fn epoll_wait( + epfd: c_int, + events: *mut crate::epoll_event, + maxevents: c_int, + timeout: c_int, + ) -> c_int; + pub fn epoll_ctl(epfd: c_int, op: c_int, fd: c_int, event: *mut crate::epoll_event) -> c_int; + + // sys/ioctl.h + pub fn ioctl(fd: c_int, request: c_ulong, ...) -> c_int; + + // sys/mman.h + pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; + pub fn shm_unlink(name: *const c_char) -> c_int; + + // sys/resource.h + pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; + pub fn setpriority(which: c_int, who: crate::id_t, prio: c_int) -> c_int; + pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; + pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; + + // sys/socket.h + pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar; + pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr; + pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr; + pub fn bind( + socket: c_int, + address: *const crate::sockaddr, + address_len: crate::socklen_t, + ) -> c_int; + pub fn recvfrom( + socket: c_int, + buf: *mut c_void, + len: size_t, + flags: c_int, + addr: *mut crate::sockaddr, + addrlen: *mut crate::socklen_t, + ) -> ssize_t; + pub fn recvmsg(socket: c_int, msg: *mut msghdr, flags: c_int) -> ssize_t; + pub fn sendmsg(socket: c_int, msg: *const msghdr, flags: c_int) -> ssize_t; + + // sys/stat.h + pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; + + // sys/uio.h + pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + + // sys/utsname.h + pub fn uname(utsname: *mut utsname) -> c_int; + + // time.h + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut crate::timezone) -> c_int; + pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn strftime( + s: *mut c_char, + max: size_t, + format: *const c_char, + tm: *const crate::tm, + ) -> size_t; + + // utmp.h + pub fn login_tty(fd: c_int) -> c_int; +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for dirent { + fn eq(&self, other: &dirent) -> bool { + self.d_ino == other.d_ino + && self.d_off == other.d_off + && self.d_reclen == other.d_reclen + && self.d_type == other.d_type + && self + .d_name + .iter() + .zip(other.d_name.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for dirent {} + + impl hash::Hash for dirent { + fn hash(&self, state: &mut H) { + self.d_ino.hash(state); + self.d_off.hash(state); + self.d_reclen.hash(state); + self.d_type.hash(state); + self.d_name.hash(state); + } + } + + impl PartialEq for sockaddr_un { + fn eq(&self, other: &sockaddr_un) -> bool { + self.sun_family == other.sun_family + && self + .sun_path + .iter() + .zip(other.sun_path.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for sockaddr_un {} + + impl hash::Hash for sockaddr_un { + fn hash(&self, state: &mut H) { + self.sun_family.hash(state); + self.sun_path.hash(state); + } + } + + impl PartialEq for sockaddr_storage { + fn eq(&self, other: &sockaddr_storage) -> bool { + self.ss_family == other.ss_family + && self.__ss_align == self.__ss_align + && self + .__ss_padding + .iter() + .zip(other.__ss_padding.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for sockaddr_storage {} + + impl hash::Hash for sockaddr_storage { + fn hash(&self, state: &mut H) { + self.ss_family.hash(state); + self.__ss_padding.hash(state); + self.__ss_align.hash(state); + } + } + + impl PartialEq for utsname { + fn eq(&self, other: &utsname) -> bool { + self.sysname + .iter() + .zip(other.sysname.iter()) + .all(|(a, b)| a == b) + && self + .nodename + .iter() + .zip(other.nodename.iter()) + .all(|(a, b)| a == b) + && self + .release + .iter() + .zip(other.release.iter()) + .all(|(a, b)| a == b) + && self + .version + .iter() + .zip(other.version.iter()) + .all(|(a, b)| a == b) + && self + .machine + .iter() + .zip(other.machine.iter()) + .all(|(a, b)| a == b) + && self + .domainname + .iter() + .zip(other.domainname.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for utsname {} + + impl hash::Hash for utsname { + fn hash(&self, state: &mut H) { + self.sysname.hash(state); + self.nodename.hash(state); + self.release.hash(state); + self.version.hash(state); + self.machine.hash(state); + self.domainname.hash(state); + } + } + } +} diff --git a/vendor/libc/src/unix/solarish/compat.rs b/vendor/libc/src/unix/solarish/compat.rs new file mode 100644 index 00000000000000..22bcf12edcc822 --- /dev/null +++ b/vendor/libc/src/unix/solarish/compat.rs @@ -0,0 +1,218 @@ +// Common functions that are unfortunately missing on illumos and +// Solaris, but often needed by other crates. +use core::cmp::min; + +use crate::unix::solarish::*; +use crate::{c_char, c_int, size_t}; + +pub unsafe fn cfmakeraw(termios: *mut crate::termios) { + (*termios).c_iflag &= + !(IMAXBEL | IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON); + (*termios).c_oflag &= !OPOST; + (*termios).c_lflag &= !(ECHO | ECHONL | ICANON | ISIG | IEXTEN); + (*termios).c_cflag &= !(CSIZE | PARENB); + (*termios).c_cflag |= CS8; + + // By default, most software expects a pending read to block until at + // least one byte becomes available. As per termio(7I), this requires + // setting the MIN and TIME parameters appropriately. + // + // As a somewhat unfortunate artefact of history, the MIN and TIME slots + // in the control character array overlap with the EOF and EOL slots used + // for canonical mode processing. Because the EOF character needs to be + // the ASCII EOT value (aka Control-D), it has the byte value 4. When + // switching to raw mode, this is interpreted as a MIN value of 4; i.e., + // reads will block until at least four bytes have been input. + // + // Other platforms with a distinct MIN slot like Linux and FreeBSD appear + // to default to a MIN value of 1, so we'll force that value here: + (*termios).c_cc[VMIN] = 1; + (*termios).c_cc[VTIME] = 0; +} + +pub unsafe fn cfsetspeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int { + // Neither of these functions on illumos or Solaris actually ever + // return an error + crate::cfsetispeed(termios, speed); + crate::cfsetospeed(termios, speed); + 0 +} + +#[cfg(target_os = "illumos")] +unsafe fn bail(fdm: c_int, fds: c_int) -> c_int { + let e = *___errno(); + if fds >= 0 { + crate::close(fds); + } + if fdm >= 0 { + crate::close(fdm); + } + *___errno() = e; + -1 +} + +#[cfg(target_os = "illumos")] +pub unsafe fn openpty( + amain: *mut c_int, + asubord: *mut c_int, + name: *mut c_char, + termp: *const termios, + winp: *const crate::winsize, +) -> c_int { + const PTEM: &[u8] = b"ptem\0"; + const LDTERM: &[u8] = b"ldterm\0"; + + // Open the main pseudo-terminal device, making sure not to set it as the + // controlling terminal for this process: + let fdm = crate::posix_openpt(O_RDWR | O_NOCTTY); + if fdm < 0 { + return -1; + } + + // Set permissions and ownership on the subordinate device and unlock it: + if crate::grantpt(fdm) < 0 || crate::unlockpt(fdm) < 0 { + return bail(fdm, -1); + } + + // Get the path name of the subordinate device: + let subordpath = crate::ptsname(fdm); + if subordpath.is_null() { + return bail(fdm, -1); + } + + // Open the subordinate device without setting it as the controlling + // terminal for this process: + let fds = crate::open(subordpath, O_RDWR | O_NOCTTY); + if fds < 0 { + return bail(fdm, -1); + } + + // Check if the STREAMS modules are already pushed: + let setup = crate::ioctl(fds, I_FIND, LDTERM.as_ptr()); + if setup < 0 { + return bail(fdm, fds); + } else if setup == 0 { + // The line discipline is not present, so push the appropriate STREAMS + // modules for the subordinate device: + if crate::ioctl(fds, I_PUSH, PTEM.as_ptr()) < 0 + || crate::ioctl(fds, I_PUSH, LDTERM.as_ptr()) < 0 + { + return bail(fdm, fds); + } + } + + // If provided, set the terminal parameters: + if !termp.is_null() && crate::tcsetattr(fds, TCSAFLUSH, termp) != 0 { + return bail(fdm, fds); + } + + // If provided, set the window size: + if !winp.is_null() && crate::ioctl(fds, TIOCSWINSZ, winp) < 0 { + return bail(fdm, fds); + } + + // If the caller wants the name of the subordinate device, copy it out. + // + // Note that this is a terrible interface: there appears to be no standard + // upper bound on the copy length for this pointer. Nobody should pass + // anything but NULL here, preferring instead to use ptsname(3C) directly. + if !name.is_null() { + crate::strcpy(name, subordpath); + } + + *amain = fdm; + *asubord = fds; + 0 +} + +#[cfg(target_os = "illumos")] +pub unsafe fn forkpty( + amain: *mut c_int, + name: *mut c_char, + termp: *const termios, + winp: *const crate::winsize, +) -> crate::pid_t { + let mut fds = -1; + + if openpty(amain, &mut fds, name, termp, winp) != 0 { + return -1; + } + + let pid = crate::fork(); + if pid < 0 { + return bail(*amain, fds); + } else if pid > 0 { + // In the parent process, we close the subordinate device and return the + // process ID of the new child: + crate::close(fds); + return pid; + } + + // The rest of this function executes in the child process. + + // Close the main side of the pseudo-terminal pair: + crate::close(*amain); + + // Use TIOCSCTTY to set the subordinate device as our controlling + // terminal. This will fail (with ENOTTY) if we are not the leader in + // our own session, so we call setsid() first. Finally, arrange for + // the pseudo-terminal to occupy the standard I/O descriptors. + if crate::setsid() < 0 + || crate::ioctl(fds, TIOCSCTTY, 0) < 0 + || crate::dup2(fds, 0) < 0 + || crate::dup2(fds, 1) < 0 + || crate::dup2(fds, 2) < 0 + { + // At this stage there are no particularly good ways to handle failure. + // Exit as abruptly as possible, using _exit() to avoid messing with any + // state still shared with the parent process. + crate::_exit(EXIT_FAILURE); + } + // Close the inherited descriptor, taking care to avoid closing the standard + // descriptors by mistake: + if fds > 2 { + crate::close(fds); + } + + 0 +} + +pub unsafe fn getpwent_r( + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, +) -> c_int { + let old_errno = *crate::___errno(); + *crate::___errno() = 0; + *result = native_getpwent_r(pwd, buf, min(buflen, c_int::MAX as size_t) as c_int); + + let ret = if (*result).is_null() { + *crate::___errno() + } else { + 0 + }; + *crate::___errno() = old_errno; + + ret +} + +pub unsafe fn getgrent_r( + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, +) -> c_int { + let old_errno = *crate::___errno(); + *crate::___errno() = 0; + *result = native_getgrent_r(grp, buf, min(buflen, c_int::MAX as size_t) as c_int); + + let ret = if (*result).is_null() { + *crate::___errno() + } else { + 0 + }; + *crate::___errno() = old_errno; + + ret +} diff --git a/vendor/libc/src/unix/solarish/illumos.rs b/vendor/libc/src/unix/solarish/illumos.rs new file mode 100644 index 00000000000000..fbeadaf344fa0c --- /dev/null +++ b/vendor/libc/src/unix/solarish/illumos.rs @@ -0,0 +1,343 @@ +use crate::prelude::*; +use crate::{ + exit_status, off_t, NET_MAC_AWARE, NET_MAC_AWARE_INHERIT, PRIV_AWARE_RESET, PRIV_DEBUG, + PRIV_PFEXEC, PRIV_XPOLICY, +}; + +pub type lgrp_rsrc_t = c_int; +pub type lgrp_affinity_t = c_int; + +s! { + pub struct aiocb { + pub aio_fildes: c_int, + pub aio_buf: *mut c_void, + pub aio_nbytes: size_t, + pub aio_offset: off_t, + pub aio_reqprio: c_int, + pub aio_sigevent: crate::sigevent, + pub aio_lio_opcode: c_int, + pub aio_resultp: crate::aio_result_t, + pub aio_state: c_int, + pub aio__pad: [c_int; 1], + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_amp: *mut c_void, + pub shm_lkcnt: c_ushort, + pub shm_lpid: crate::pid_t, + pub shm_cpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + pub shm_cnattch: c_ulong, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_pad4: [i64; 4], + } + + pub struct fil_info { + pub fi_flags: c_int, + pub fi_pos: c_int, + pub fi_name: [c_char; crate::FILNAME_MAX as usize], + } +} + +s_no_extra_traits! { + #[cfg_attr(any(target_arch = "x86", target_arch = "x86_64"), repr(packed(4)))] + pub struct epoll_event { + pub events: u32, + pub u64: u64, + } + + pub struct utmpx { + pub ut_user: [c_char; _UTX_USERSIZE], + pub ut_id: [c_char; _UTX_IDSIZE], + pub ut_line: [c_char; _UTX_LINESIZE], + pub ut_pid: crate::pid_t, + pub ut_type: c_short, + pub ut_exit: exit_status, + pub ut_tv: crate::timeval, + pub ut_session: c_int, + pub ut_pad: [c_int; _UTX_PADSIZE], + pub ut_syslen: c_short, + pub ut_host: [c_char; _UTX_HOSTSIZE], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for utmpx { + fn eq(&self, other: &utmpx) -> bool { + self.ut_type == other.ut_type + && self.ut_pid == other.ut_pid + && self.ut_user == other.ut_user + && self.ut_line == other.ut_line + && self.ut_id == other.ut_id + && self.ut_exit == other.ut_exit + && self.ut_session == other.ut_session + && self.ut_tv == other.ut_tv + && self.ut_syslen == other.ut_syslen + && self.ut_pad == other.ut_pad + && self + .ut_host + .iter() + .zip(other.ut_host.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for utmpx {} + + impl hash::Hash for utmpx { + fn hash(&self, state: &mut H) { + self.ut_user.hash(state); + self.ut_type.hash(state); + self.ut_pid.hash(state); + self.ut_line.hash(state); + self.ut_id.hash(state); + self.ut_host.hash(state); + self.ut_exit.hash(state); + self.ut_session.hash(state); + self.ut_tv.hash(state); + self.ut_syslen.hash(state); + self.ut_pad.hash(state); + } + } + + impl PartialEq for epoll_event { + fn eq(&self, other: &epoll_event) -> bool { + self.events == other.events && self.u64 == other.u64 + } + } + impl Eq for epoll_event {} + impl hash::Hash for epoll_event { + fn hash(&self, state: &mut H) { + let events = self.events; + let u64 = self.u64; + events.hash(state); + u64.hash(state); + } + } + } +} + +pub const _UTX_USERSIZE: usize = 32; +pub const _UTX_LINESIZE: usize = 32; +pub const _UTX_PADSIZE: usize = 5; +pub const _UTX_IDSIZE: usize = 4; +pub const _UTX_HOSTSIZE: usize = 257; + +pub const AF_LOCAL: c_int = 1; // AF_UNIX +pub const AF_FILE: c_int = 1; // AF_UNIX + +pub const EFD_SEMAPHORE: c_int = 0x1; +pub const EFD_NONBLOCK: c_int = 0x800; +pub const EFD_CLOEXEC: c_int = 0x80000; + +pub const POLLRDHUP: c_short = 0x4000; + +pub const TCP_KEEPIDLE: c_int = 34; +pub const TCP_KEEPCNT: c_int = 35; +pub const TCP_KEEPINTVL: c_int = 36; +pub const TCP_CONGESTION: c_int = 37; + +// These constants are correct for 64-bit programs or 32-bit programs that are +// not using large-file mode. If Rust ever supports anything other than 64-bit +// compilation on illumos, this may require adjustment: +pub const F_OFD_GETLK: c_int = 47; +pub const F_OFD_SETLK: c_int = 48; +pub const F_OFD_SETLKW: c_int = 49; +pub const F_FLOCK: c_int = 53; +pub const F_FLOCKW: c_int = 54; + +pub const F_DUPFD_CLOEXEC: c_int = 37; +pub const F_DUPFD_CLOFORK: c_int = 58; +pub const F_DUP2FD_CLOEXEC: c_int = 36; +pub const F_DUP2FD_CLOFORK: c_int = 57; +pub const F_DUP3FD: c_int = 59; + +pub const FD_CLOFORK: c_int = 2; + +pub const FIL_ATTACH: c_int = 0x1; +pub const FIL_DETACH: c_int = 0x2; +pub const FIL_LIST: c_int = 0x3; +pub const FILNAME_MAX: c_int = 32; +pub const FILF_PROG: c_int = 0x1; +pub const FILF_AUTO: c_int = 0x2; +pub const FILF_BYPASS: c_int = 0x4; +pub const SOL_FILTER: c_int = 0xfffc; + +pub const MADV_PURGE: c_int = 9; + +pub const POSIX_FADV_NORMAL: c_int = 0; +pub const POSIX_FADV_RANDOM: c_int = 1; +pub const POSIX_FADV_SEQUENTIAL: c_int = 2; +pub const POSIX_FADV_WILLNEED: c_int = 3; +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; + +pub const POSIX_SPAWN_SETSID: c_short = 0x40; + +pub const SIGINFO: c_int = 41; + +pub const O_DIRECT: c_int = 0x2000000; +pub const O_CLOFORK: c_int = 0x4000000; + +pub const MSG_CMSG_CLOEXEC: c_int = 0x1000; +pub const MSG_CMSG_CLOFORK: c_int = 0x2000; + +pub const PBIND_HARD: crate::processorid_t = -3; +pub const PBIND_SOFT: crate::processorid_t = -4; + +pub const PS_SYSTEM: c_int = 1; + +pub const MAP_FILE: c_int = 0; + +pub const MAP_32BIT: c_int = 0x80; + +pub const AF_NCA: c_int = 28; + +pub const PF_NCA: c_int = AF_NCA; + +pub const LOCK_SH: c_int = 1; +pub const LOCK_EX: c_int = 2; +pub const LOCK_NB: c_int = 4; +pub const LOCK_UN: c_int = 8; + +pub const _PC_LAST: c_int = 101; + +pub const VSTATUS: usize = 16; +pub const VERASE2: usize = 17; + +pub const EPOLLIN: c_int = 0x1; +pub const EPOLLPRI: c_int = 0x2; +pub const EPOLLOUT: c_int = 0x4; +pub const EPOLLRDNORM: c_int = 0x40; +pub const EPOLLRDBAND: c_int = 0x80; +pub const EPOLLWRNORM: c_int = 0x100; +pub const EPOLLWRBAND: c_int = 0x200; +pub const EPOLLMSG: c_int = 0x400; +pub const EPOLLERR: c_int = 0x8; +pub const EPOLLHUP: c_int = 0x10; +pub const EPOLLET: c_int = 0x80000000; +pub const EPOLLRDHUP: c_int = 0x2000; +pub const EPOLLONESHOT: c_int = 0x40000000; +pub const EPOLLWAKEUP: c_int = 0x20000000; +pub const EPOLLEXCLUSIVE: c_int = 0x10000000; +pub const EPOLL_CLOEXEC: c_int = 0x80000; +pub const EPOLL_CTL_ADD: c_int = 1; +pub const EPOLL_CTL_MOD: c_int = 3; +pub const EPOLL_CTL_DEL: c_int = 2; + +pub const PRIV_USER: c_uint = PRIV_DEBUG + | NET_MAC_AWARE + | NET_MAC_AWARE_INHERIT + | PRIV_XPOLICY + | PRIV_AWARE_RESET + | PRIV_PFEXEC; + +pub const LGRP_RSRC_COUNT: crate::lgrp_rsrc_t = 2; +pub const LGRP_RSRC_CPU: crate::lgrp_rsrc_t = 0; +pub const LGRP_RSRC_MEM: crate::lgrp_rsrc_t = 1; + +pub const P_DISABLED: c_int = 0x008; + +pub const AT_SUN_HWCAP2: c_uint = 2023; +pub const AT_SUN_FPTYPE: c_uint = 2027; + +pub const B1000000: crate::speed_t = 24; +pub const B1152000: crate::speed_t = 25; +pub const B1500000: crate::speed_t = 26; +pub const B2000000: crate::speed_t = 27; +pub const B2500000: crate::speed_t = 28; +pub const B3000000: crate::speed_t = 29; +pub const B3500000: crate::speed_t = 30; +pub const B4000000: crate::speed_t = 31; + +// sys/systeminfo.h +pub const SI_ADDRESS_WIDTH: c_int = 520; + +// sys/timerfd.h +pub const TFD_CLOEXEC: i32 = 0o2000000; +pub const TFD_NONBLOCK: i32 = 0o4000; +pub const TFD_TIMER_ABSTIME: i32 = 1 << 0; +pub const TFD_TIMER_CANCEL_ON_SET: i32 = 1 << 1; + +extern "C" { + pub fn eventfd(init: c_uint, flags: c_int) -> c_int; + + pub fn epoll_pwait( + epfd: c_int, + events: *mut crate::epoll_event, + maxevents: c_int, + timeout: c_int, + sigmask: *const crate::sigset_t, + ) -> c_int; + pub fn epoll_create(size: c_int) -> c_int; + pub fn epoll_create1(flags: c_int) -> c_int; + pub fn epoll_wait( + epfd: c_int, + events: *mut crate::epoll_event, + maxevents: c_int, + timeout: c_int, + ) -> c_int; + pub fn epoll_ctl(epfd: c_int, op: c_int, fd: c_int, event: *mut crate::epoll_event) -> c_int; + + pub fn mincore(addr: crate::caddr_t, len: size_t, vec: *mut c_char) -> c_int; + + pub fn pset_bind_lwp( + pset: crate::psetid_t, + id: crate::id_t, + pid: crate::pid_t, + opset: *mut crate::psetid_t, + ) -> c_int; + pub fn pset_getloadavg(pset: crate::psetid_t, load: *mut c_double, num: c_int) -> c_int; + + pub fn pthread_attr_get_np(thread: crate::pthread_t, attr: *mut crate::pthread_attr_t) + -> c_int; + pub fn pthread_attr_getstackaddr( + attr: *const crate::pthread_attr_t, + stackaddr: *mut *mut c_void, + ) -> c_int; + pub fn pthread_attr_setstack( + attr: *mut crate::pthread_attr_t, + stackaddr: *mut c_void, + stacksize: size_t, + ) -> c_int; + pub fn pthread_attr_setstackaddr( + attr: *mut crate::pthread_attr_t, + stackaddr: *mut c_void, + ) -> c_int; + + pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advice: c_int) -> c_int; + pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn getpagesizes2(pagesize: *mut size_t, nelem: c_int) -> c_int; + + pub fn posix_spawn_file_actions_addfchdir_np( + file_actions: *mut crate::posix_spawn_file_actions_t, + fd: c_int, + ) -> c_int; + + pub fn ptsname_r(fildes: c_int, name: *mut c_char, namelen: size_t) -> c_int; + + pub fn syncfs(fd: c_int) -> c_int; + + pub fn strcasecmp_l(s1: *const c_char, s2: *const c_char, loc: crate::locale_t) -> c_int; + pub fn strncasecmp_l( + s1: *const c_char, + s2: *const c_char, + n: size_t, + loc: crate::locale_t, + ) -> c_int; + + pub fn timerfd_create(clockid: c_int, flags: c_int) -> c_int; + pub fn timerfd_gettime(fd: c_int, curr_value: *mut crate::itimerspec) -> c_int; + pub fn timerfd_settime( + fd: c_int, + flags: c_int, + new_value: *const crate::itimerspec, + old_value: *mut crate::itimerspec, + ) -> c_int; +} diff --git a/vendor/libc/src/unix/solarish/mod.rs b/vendor/libc/src/unix/solarish/mod.rs new file mode 100644 index 00000000000000..d8b32dfc0aae9c --- /dev/null +++ b/vendor/libc/src/unix/solarish/mod.rs @@ -0,0 +1,3240 @@ +use crate::prelude::*; + +pub type caddr_t = *mut c_char; + +pub type clockid_t = c_int; +pub type blkcnt_t = c_long; +pub type clock_t = c_long; +pub type daddr_t = c_long; +pub type dev_t = c_ulong; +pub type fsblkcnt_t = c_ulong; +pub type fsfilcnt_t = c_ulong; +pub type ino_t = c_ulong; +pub type key_t = c_int; +pub type major_t = c_uint; +pub type minor_t = c_uint; +pub type mode_t = c_uint; +pub type nlink_t = c_uint; +pub type rlim_t = c_ulong; +pub type speed_t = c_uint; +pub type tcflag_t = c_uint; +pub type time_t = c_long; +pub type timer_t = c_int; +pub type wchar_t = c_int; +pub type nfds_t = c_ulong; +pub type projid_t = c_int; +pub type zoneid_t = c_int; +pub type psetid_t = c_int; +pub type processorid_t = c_int; +pub type chipid_t = c_int; +pub type ctid_t = crate::id_t; + +pub type suseconds_t = c_long; +pub type off_t = c_long; +pub type useconds_t = c_uint; +pub type socklen_t = c_uint; +pub type sa_family_t = u16; +pub type pthread_t = c_uint; +pub type pthread_key_t = c_uint; +pub type thread_t = c_uint; +pub type blksize_t = c_int; +pub type nl_item = c_int; +pub type mqd_t = *mut c_void; +pub type id_t = c_int; +pub type idtype_t = c_uint; +pub type shmatt_t = c_ulong; + +pub type lgrp_id_t = crate::id_t; +pub type lgrp_mem_size_t = c_longlong; +pub type lgrp_cookie_t = crate::uintptr_t; +pub type lgrp_content_t = c_uint; +pub type lgrp_lat_between_t = c_uint; +pub type lgrp_mem_size_flag_t = c_uint; +pub type lgrp_view_t = c_uint; + +pub type posix_spawnattr_t = *mut c_void; +pub type posix_spawn_file_actions_t = *mut c_void; + +#[derive(Debug)] +pub enum timezone {} +impl Copy for timezone {} +impl Clone for timezone { + fn clone(&self) -> timezone { + *self + } +} + +#[derive(Debug)] +pub enum ucred_t {} +impl Copy for ucred_t {} +impl Clone for ucred_t { + fn clone(&self) -> ucred_t { + *self + } +} + +s! { + pub struct in_addr { + pub s_addr: crate::in_addr_t, + } + + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct ip_mreq_source { + pub imr_multiaddr: in_addr, + pub imr_sourceaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct ipc_perm { + pub uid: crate::uid_t, + pub gid: crate::gid_t, + pub cuid: crate::uid_t, + pub cgid: crate::gid_t, + pub mode: mode_t, + pub seq: c_uint, + pub key: crate::key_t, + } + + pub struct sockaddr { + pub sa_family: sa_family_t, + pub sa_data: [c_char; 14], + } + + pub struct sockaddr_in { + pub sin_family: sa_family_t, + pub sin_port: crate::in_port_t, + pub sin_addr: crate::in_addr, + pub sin_zero: [c_char; 8], + } + + pub struct sockaddr_in6 { + pub sin6_family: sa_family_t, + pub sin6_port: crate::in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: u32, + pub __sin6_src_id: u32, + } + + pub struct in_pktinfo { + pub ipi_ifindex: c_uint, + pub ipi_spec_dst: crate::in_addr, + pub ipi_addr: crate::in_addr, + } + + pub struct in6_pktinfo { + pub ipi6_addr: crate::in6_addr, + pub ipi6_ifindex: c_uint, + } + + pub struct passwd { + pub pw_name: *mut c_char, + pub pw_passwd: *mut c_char, + pub pw_uid: crate::uid_t, + pub pw_gid: crate::gid_t, + pub pw_age: *mut c_char, + pub pw_comment: *mut c_char, + pub pw_gecos: *mut c_char, + pub pw_dir: *mut c_char, + pub pw_shell: *mut c_char, + } + + pub struct ifaddrs { + pub ifa_next: *mut ifaddrs, + pub ifa_name: *mut c_char, + pub ifa_flags: u64, + pub ifa_addr: *mut crate::sockaddr, + pub ifa_netmask: *mut crate::sockaddr, + pub ifa_dstaddr: *mut crate::sockaddr, + pub ifa_data: *mut c_void, + } + + pub struct itimerspec { + pub it_interval: crate::timespec, + pub it_value: crate::timespec, + } + + pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: crate::socklen_t, + pub msg_iov: *mut crate::iovec, + pub msg_iovlen: c_int, + pub msg_control: *mut c_void, + pub msg_controllen: crate::socklen_t, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + pub cmsg_len: crate::socklen_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + pub struct pthread_attr_t { + __pthread_attrp: *mut c_void, + } + + pub struct pthread_mutex_t { + __pthread_mutex_flag1: u16, + __pthread_mutex_flag2: u8, + __pthread_mutex_ceiling: u8, + __pthread_mutex_type: u16, + __pthread_mutex_magic: u16, + __pthread_mutex_lock: u64, + __pthread_mutex_data: u64, + } + + pub struct pthread_mutexattr_t { + __pthread_mutexattrp: *mut c_void, + } + + pub struct pthread_cond_t { + __pthread_cond_flag: [u8; 4], + __pthread_cond_type: u16, + __pthread_cond_magic: u16, + __pthread_cond_data: u64, + } + + pub struct pthread_condattr_t { + __pthread_condattrp: *mut c_void, + } + + pub struct pthread_rwlock_t { + __pthread_rwlock_readers: i32, + __pthread_rwlock_type: u16, + __pthread_rwlock_magic: u16, + __pthread_rwlock_mutex: crate::pthread_mutex_t, + __pthread_rwlock_readercv: crate::pthread_cond_t, + __pthread_rwlock_writercv: crate::pthread_cond_t, + } + + pub struct pthread_rwlockattr_t { + __pthread_rwlockattrp: *mut c_void, + } + + pub struct dirent { + pub d_ino: crate::ino_t, + pub d_off: off_t, + pub d_reclen: u16, + pub d_name: [c_char; 3], + } + + pub struct glob_t { + pub gl_pathc: size_t, + pub gl_pathv: *mut *mut c_char, + pub gl_offs: size_t, + __unused1: *mut c_void, + __unused2: c_int, + #[cfg(target_os = "illumos")] + __unused3: c_int, + #[cfg(target_os = "illumos")] + __unused4: c_int, + #[cfg(target_os = "illumos")] + __unused5: *mut c_void, + #[cfg(target_os = "illumos")] + __unused6: *mut c_void, + #[cfg(target_os = "illumos")] + __unused7: *mut c_void, + #[cfg(target_os = "illumos")] + __unused8: *mut c_void, + #[cfg(target_os = "illumos")] + __unused9: *mut c_void, + #[cfg(target_os = "illumos")] + __unused10: *mut c_void, + } + + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + #[cfg(target_arch = "sparc64")] + __sparcv9_pad: c_int, + pub ai_addrlen: crate::socklen_t, + pub ai_canonname: *mut c_char, + pub ai_addr: *mut crate::sockaddr, + pub ai_next: *mut addrinfo, + } + + pub struct sigset_t { + bits: [u32; 4], + } + + pub struct sigaction { + pub sa_flags: c_int, + pub sa_sigaction: crate::sighandler_t, + pub sa_mask: sigset_t, + } + + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } + + pub struct statvfs { + pub f_bsize: c_ulong, + pub f_frsize: c_ulong, + pub f_blocks: crate::fsblkcnt_t, + pub f_bfree: crate::fsblkcnt_t, + pub f_bavail: crate::fsblkcnt_t, + pub f_files: crate::fsfilcnt_t, + pub f_ffree: crate::fsfilcnt_t, + pub f_favail: crate::fsfilcnt_t, + pub f_fsid: c_ulong, + pub f_basetype: [c_char; 16], + pub f_flag: c_ulong, + pub f_namemax: c_ulong, + pub f_fstr: [c_char; 32], + } + + pub struct sendfilevec_t { + pub sfv_fd: c_int, + pub sfv_flag: c_uint, + pub sfv_off: off_t, + pub sfv_len: size_t, + } + + pub struct sched_param { + pub sched_priority: c_int, + sched_pad: [c_int; 8], + } + + pub struct Dl_info { + pub dli_fname: *const c_char, + pub dli_fbase: *mut c_void, + pub dli_sname: *const c_char, + pub dli_saddr: *mut c_void, + } + + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_atime: crate::time_t, + pub st_atime_nsec: c_long, + pub st_mtime: crate::time_t, + pub st_mtime_nsec: c_long, + pub st_ctime: crate::time_t, + pub st_ctime_nsec: c_long, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_fstype: [c_char; _ST_FSTYPSZ as usize], + } + + pub struct termios { + pub c_iflag: crate::tcflag_t, + pub c_oflag: crate::tcflag_t, + pub c_cflag: crate::tcflag_t, + pub c_lflag: crate::tcflag_t, + pub c_cc: [crate::cc_t; crate::NCCS], + } + + pub struct lconv { + pub decimal_point: *mut c_char, + pub thousands_sep: *mut c_char, + pub grouping: *mut c_char, + pub int_curr_symbol: *mut c_char, + pub currency_symbol: *mut c_char, + pub mon_decimal_point: *mut c_char, + pub mon_thousands_sep: *mut c_char, + pub mon_grouping: *mut c_char, + pub positive_sign: *mut c_char, + pub negative_sign: *mut c_char, + pub int_frac_digits: c_char, + pub frac_digits: c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub p_sign_posn: c_char, + pub n_sign_posn: c_char, + pub int_p_cs_precedes: c_char, + pub int_p_sep_by_space: c_char, + pub int_n_cs_precedes: c_char, + pub int_n_sep_by_space: c_char, + pub int_p_sign_posn: c_char, + pub int_n_sign_posn: c_char, + } + + pub struct sem_t { + pub sem_count: u32, + pub sem_type: u16, + pub sem_magic: u16, + pub sem_pad1: [u64; 3], + pub sem_pad2: [u64; 2], + } + + pub struct flock { + pub l_type: c_short, + pub l_whence: c_short, + pub l_start: off_t, + pub l_len: off_t, + pub l_sysid: c_int, + pub l_pid: crate::pid_t, + pub l_pad: [c_long; 4], + } + + pub struct if_nameindex { + pub if_index: c_uint, + pub if_name: *mut c_char, + } + + pub struct mq_attr { + pub mq_flags: c_long, + pub mq_maxmsg: c_long, + pub mq_msgsize: c_long, + pub mq_curmsgs: c_long, + _pad: [c_int; 12], + } + + pub struct port_event { + pub portev_events: c_int, + pub portev_source: c_ushort, + pub portev_pad: c_ushort, + pub portev_object: crate::uintptr_t, + pub portev_user: *mut c_void, + } + + pub struct port_notify { + pub portnfy_port: c_int, + pub portnfy_user: *mut c_void, + } + + pub struct aio_result_t { + pub aio_return: ssize_t, + pub aio_errno: c_int, + } + + pub struct exit_status { + e_termination: c_short, + e_exit: c_short, + } + + pub struct utmp { + pub ut_user: [c_char; 8], + pub ut_id: [c_char; 4], + pub ut_line: [c_char; 12], + pub ut_pid: c_short, + pub ut_type: c_short, + pub ut_exit: exit_status, + pub ut_time: crate::time_t, + } + + pub struct timex { + pub modes: u32, + pub offset: i32, + pub freq: i32, + pub maxerror: i32, + pub esterror: i32, + pub status: i32, + pub constant: i32, + pub precision: i32, + pub tolerance: i32, + pub ppsfreq: i32, + pub jitter: i32, + pub shift: i32, + pub stabil: i32, + pub jitcnt: i32, + pub calcnt: i32, + pub errcnt: i32, + pub stbcnt: i32, + } + + pub struct ntptimeval { + pub time: crate::timeval, + pub maxerror: i32, + pub esterror: i32, + } + + pub struct mmapobj_result_t { + pub mr_addr: crate::caddr_t, + pub mr_msize: size_t, + pub mr_fsize: size_t, + pub mr_offset: size_t, + pub mr_prot: c_uint, + pub mr_flags: c_uint, + } + + pub struct lgrp_affinity_args_t { + pub idtype: crate::idtype_t, + pub id: crate::id_t, + pub lgrp: crate::lgrp_id_t, + pub aff: crate::lgrp_affinity_t, + } + + pub struct processor_info_t { + pub pi_state: c_int, + pub pi_processor_type: [c_char; PI_TYPELEN as usize], + pub pi_fputypes: [c_char; PI_FPUTYPE as usize], + pub pi_clock: c_int, + } + + pub struct option { + pub name: *const c_char, + pub has_arg: c_int, + pub flag: *mut c_int, + pub val: c_int, + } +} + +s_no_extra_traits! { + pub struct sockaddr_un { + pub sun_family: sa_family_t, + pub sun_path: [c_char; 108], + } + + pub struct utsname { + pub sysname: [c_char; 257], + pub nodename: [c_char; 257], + pub release: [c_char; 257], + pub version: [c_char; 257], + pub machine: [c_char; 257], + } + + pub struct fd_set { + #[cfg(target_pointer_width = "64")] + fds_bits: [i64; FD_SETSIZE as usize / 64], + #[cfg(target_pointer_width = "32")] + fds_bits: [i32; FD_SETSIZE as usize / 32], + } + + pub struct sockaddr_storage { + pub ss_family: crate::sa_family_t, + __ss_pad1: [u8; 6], + __ss_align: i64, + __ss_pad2: [u8; 240], + } + + #[cfg_attr(target_pointer_width = "64", repr(align(8)))] + pub struct siginfo_t { + pub si_signo: c_int, + pub si_code: c_int, + pub si_errno: c_int, + #[cfg(target_pointer_width = "64")] + pub si_pad: c_int, + + __data_pad: [c_int; SIGINFO_DATA_SIZE], + } + + pub struct sockaddr_dl { + pub sdl_family: c_ushort, + pub sdl_index: c_ushort, + pub sdl_type: c_uchar, + pub sdl_nlen: c_uchar, + pub sdl_alen: c_uchar, + pub sdl_slen: c_uchar, + pub sdl_data: [c_char; 244], + } + + pub struct sigevent { + pub sigev_notify: c_int, + pub sigev_signo: c_int, + pub sigev_value: crate::sigval, + pub ss_sp: *mut c_void, + pub sigev_notify_attributes: *const crate::pthread_attr_t, + __sigev_pad2: c_int, + } + + #[repr(align(16))] + pub union pad128_t { + // pub _q in this structure would be a "long double", of 16 bytes + pub _l: [i32; 4], + } + + #[repr(align(16))] + pub union upad128_t { + // pub _q in this structure would be a "long double", of 16 bytes + pub _l: [u32; 4], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for sockaddr_un { + fn eq(&self, other: &sockaddr_un) -> bool { + self.sun_family == other.sun_family + && self + .sun_path + .iter() + .zip(other.sun_path.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for sockaddr_un {} + impl hash::Hash for sockaddr_un { + fn hash(&self, state: &mut H) { + self.sun_family.hash(state); + self.sun_path.hash(state); + } + } + + impl PartialEq for utsname { + fn eq(&self, other: &utsname) -> bool { + self.sysname + .iter() + .zip(other.sysname.iter()) + .all(|(a, b)| a == b) + && self + .nodename + .iter() + .zip(other.nodename.iter()) + .all(|(a, b)| a == b) + && self + .release + .iter() + .zip(other.release.iter()) + .all(|(a, b)| a == b) + && self + .version + .iter() + .zip(other.version.iter()) + .all(|(a, b)| a == b) + && self + .machine + .iter() + .zip(other.machine.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for utsname {} + impl hash::Hash for utsname { + fn hash(&self, state: &mut H) { + self.sysname.hash(state); + self.nodename.hash(state); + self.release.hash(state); + self.version.hash(state); + self.machine.hash(state); + } + } + + impl PartialEq for fd_set { + fn eq(&self, other: &fd_set) -> bool { + self.fds_bits + .iter() + .zip(other.fds_bits.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for fd_set {} + impl hash::Hash for fd_set { + fn hash(&self, state: &mut H) { + self.fds_bits.hash(state); + } + } + + impl PartialEq for sockaddr_storage { + fn eq(&self, other: &sockaddr_storage) -> bool { + self.ss_family == other.ss_family + && self.__ss_pad1 == other.__ss_pad1 + && self.__ss_align == other.__ss_align + && self + .__ss_pad2 + .iter() + .zip(other.__ss_pad2.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for sockaddr_storage {} + impl hash::Hash for sockaddr_storage { + fn hash(&self, state: &mut H) { + self.ss_family.hash(state); + self.__ss_pad1.hash(state); + self.__ss_align.hash(state); + self.__ss_pad2.hash(state); + } + } + + impl siginfo_t { + /// The siginfo_t will have differing contents based on the delivered signal. Based on + /// `si_signo`, this determines how many of the `c_int` pad fields contain valid data + /// exposed by the C unions. + /// + /// It is not yet exhausitive for the OS-defined types, and defaults to assuming the + /// entire data pad area is "valid" for otherwise unrecognized signal numbers. + fn data_field_count(&self) -> usize { + match self.si_signo { + SIGSEGV | SIGBUS | SIGILL | SIGTRAP | SIGFPE => { + size_of::() / size_of::() + } + SIGCLD => size_of::() / size_of::(), + SIGHUP + | SIGINT + | SIGQUIT + | SIGABRT + | SIGSYS + | SIGPIPE + | SIGALRM + | SIGTERM + | crate::SIGUSR1 + | crate::SIGUSR2 + | SIGPWR + | SIGWINCH + | SIGURG => size_of::() / size_of::(), + _ => SIGINFO_DATA_SIZE, + } + } + } + impl PartialEq for siginfo_t { + fn eq(&self, other: &siginfo_t) -> bool { + if self.si_signo == other.si_signo + && self.si_code == other.si_code + && self.si_errno == other.si_errno + { + // FIXME(solarish): The `si_pad` field in the 64-bit version of the struct is ignored + // (for now) when doing comparisons. + + let field_count = self.data_field_count(); + self.__data_pad[..field_count] + .iter() + .zip(other.__data_pad[..field_count].iter()) + .all(|(a, b)| a == b) + } else { + false + } + } + } + impl Eq for siginfo_t {} + impl hash::Hash for siginfo_t { + fn hash(&self, state: &mut H) { + self.si_signo.hash(state); + self.si_code.hash(state); + self.si_errno.hash(state); + + // FIXME(solarish): The `si_pad` field in the 64-bit version of the struct is ignored + // (for now) when doing hashing. + + let field_count = self.data_field_count(); + self.__data_pad[..field_count].hash(state) + } + } + + impl PartialEq for sockaddr_dl { + fn eq(&self, other: &sockaddr_dl) -> bool { + self.sdl_family == other.sdl_family + && self.sdl_index == other.sdl_index + && self.sdl_type == other.sdl_type + && self.sdl_nlen == other.sdl_nlen + && self.sdl_alen == other.sdl_alen + && self.sdl_slen == other.sdl_slen + && self + .sdl_data + .iter() + .zip(other.sdl_data.iter()) + .all(|(a, b)| a == b) + } + } + impl Eq for sockaddr_dl {} + impl hash::Hash for sockaddr_dl { + fn hash(&self, state: &mut H) { + self.sdl_family.hash(state); + self.sdl_index.hash(state); + self.sdl_type.hash(state); + self.sdl_nlen.hash(state); + self.sdl_alen.hash(state); + self.sdl_slen.hash(state); + self.sdl_data.hash(state); + } + } + + impl PartialEq for sigevent { + fn eq(&self, other: &sigevent) -> bool { + self.sigev_notify == other.sigev_notify + && self.sigev_signo == other.sigev_signo + && self.sigev_value == other.sigev_value + && self.ss_sp == other.ss_sp + && self.sigev_notify_attributes == other.sigev_notify_attributes + } + } + impl Eq for sigevent {} + impl hash::Hash for sigevent { + fn hash(&self, state: &mut H) { + self.sigev_notify.hash(state); + self.sigev_signo.hash(state); + self.sigev_value.hash(state); + self.ss_sp.hash(state); + self.sigev_notify_attributes.hash(state); + } + } + + impl PartialEq for pad128_t { + fn eq(&self, other: &pad128_t) -> bool { + unsafe { + // FIXME(solarish): self._q == other._q || + self._l == other._l + } + } + } + impl Eq for pad128_t {} + impl hash::Hash for pad128_t { + fn hash(&self, state: &mut H) { + unsafe { + // FIXME(solarish): state.write_i64(self._q as i64); + self._l.hash(state); + } + } + } + impl PartialEq for upad128_t { + fn eq(&self, other: &upad128_t) -> bool { + unsafe { + // FIXME(solarish): self._q == other._q || + self._l == other._l + } + } + } + impl Eq for upad128_t {} + impl hash::Hash for upad128_t { + fn hash(&self, state: &mut H) { + unsafe { + // FIXME(solarish): state.write_i64(self._q as i64); + self._l.hash(state); + } + } + } + } +} + +cfg_if! { + if #[cfg(target_pointer_width = "64")] { + const SIGINFO_DATA_SIZE: usize = 60; + } else { + const SIGINFO_DATA_SIZE: usize = 29; + } +} + +#[repr(C)] +struct siginfo_fault { + addr: *mut c_void, + trapno: c_int, + pc: *mut crate::caddr_t, +} +impl Copy for siginfo_fault {} +impl Clone for siginfo_fault { + fn clone(&self) -> Self { + *self + } +} + +#[repr(C)] +struct siginfo_cldval { + utime: crate::clock_t, + status: c_int, + stime: crate::clock_t, +} +impl Copy for siginfo_cldval {} +impl Clone for siginfo_cldval { + fn clone(&self) -> Self { + *self + } +} + +#[repr(C)] +struct siginfo_killval { + uid: crate::uid_t, + value: crate::sigval, + // Pad out to match the SIGCLD value size + _pad: *mut c_void, +} +impl Copy for siginfo_killval {} +impl Clone for siginfo_killval { + fn clone(&self) -> Self { + *self + } +} + +#[repr(C)] +struct siginfo_sigcld { + pid: crate::pid_t, + val: siginfo_cldval, + ctid: crate::ctid_t, + zoneid: crate::zoneid_t, +} +impl Copy for siginfo_sigcld {} +impl Clone for siginfo_sigcld { + fn clone(&self) -> Self { + *self + } +} + +#[repr(C)] +struct siginfo_kill { + pid: crate::pid_t, + val: siginfo_killval, + ctid: crate::ctid_t, + zoneid: crate::zoneid_t, +} +impl Copy for siginfo_kill {} +impl Clone for siginfo_kill { + fn clone(&self) -> Self { + *self + } +} + +impl siginfo_t { + unsafe fn sidata(&self) -> T { + *((&self.__data_pad) as *const c_int as *const T) + } + pub unsafe fn si_addr(&self) -> *mut c_void { + let sifault: siginfo_fault = self.sidata(); + sifault.addr + } + pub unsafe fn si_uid(&self) -> crate::uid_t { + let kill: siginfo_kill = self.sidata(); + kill.val.uid + } + pub unsafe fn si_value(&self) -> crate::sigval { + let kill: siginfo_kill = self.sidata(); + kill.val.value + } + pub unsafe fn si_pid(&self) -> crate::pid_t { + let sigcld: siginfo_sigcld = self.sidata(); + sigcld.pid + } + pub unsafe fn si_status(&self) -> c_int { + let sigcld: siginfo_sigcld = self.sidata(); + sigcld.val.status + } + pub unsafe fn si_utime(&self) -> c_long { + let sigcld: siginfo_sigcld = self.sidata(); + sigcld.val.utime + } + pub unsafe fn si_stime(&self) -> c_long { + let sigcld: siginfo_sigcld = self.sidata(); + sigcld.val.stime + } +} + +pub const LC_CTYPE: c_int = 0; +pub const LC_NUMERIC: c_int = 1; +pub const LC_TIME: c_int = 2; +pub const LC_COLLATE: c_int = 3; +pub const LC_MONETARY: c_int = 4; +pub const LC_MESSAGES: c_int = 5; +pub const LC_ALL: c_int = 6; +pub const LC_CTYPE_MASK: c_int = 1 << LC_CTYPE; +pub const LC_NUMERIC_MASK: c_int = 1 << LC_NUMERIC; +pub const LC_TIME_MASK: c_int = 1 << LC_TIME; +pub const LC_COLLATE_MASK: c_int = 1 << LC_COLLATE; +pub const LC_MONETARY_MASK: c_int = 1 << LC_MONETARY; +pub const LC_MESSAGES_MASK: c_int = 1 << LC_MESSAGES; +pub const LC_ALL_MASK: c_int = LC_CTYPE_MASK + | LC_NUMERIC_MASK + | LC_TIME_MASK + | LC_COLLATE_MASK + | LC_MONETARY_MASK + | LC_MESSAGES_MASK; + +pub const DAY_1: crate::nl_item = 1; +pub const DAY_2: crate::nl_item = 2; +pub const DAY_3: crate::nl_item = 3; +pub const DAY_4: crate::nl_item = 4; +pub const DAY_5: crate::nl_item = 5; +pub const DAY_6: crate::nl_item = 6; +pub const DAY_7: crate::nl_item = 7; + +pub const ABDAY_1: crate::nl_item = 8; +pub const ABDAY_2: crate::nl_item = 9; +pub const ABDAY_3: crate::nl_item = 10; +pub const ABDAY_4: crate::nl_item = 11; +pub const ABDAY_5: crate::nl_item = 12; +pub const ABDAY_6: crate::nl_item = 13; +pub const ABDAY_7: crate::nl_item = 14; + +pub const MON_1: crate::nl_item = 15; +pub const MON_2: crate::nl_item = 16; +pub const MON_3: crate::nl_item = 17; +pub const MON_4: crate::nl_item = 18; +pub const MON_5: crate::nl_item = 19; +pub const MON_6: crate::nl_item = 20; +pub const MON_7: crate::nl_item = 21; +pub const MON_8: crate::nl_item = 22; +pub const MON_9: crate::nl_item = 23; +pub const MON_10: crate::nl_item = 24; +pub const MON_11: crate::nl_item = 25; +pub const MON_12: crate::nl_item = 26; + +pub const ABMON_1: crate::nl_item = 27; +pub const ABMON_2: crate::nl_item = 28; +pub const ABMON_3: crate::nl_item = 29; +pub const ABMON_4: crate::nl_item = 30; +pub const ABMON_5: crate::nl_item = 31; +pub const ABMON_6: crate::nl_item = 32; +pub const ABMON_7: crate::nl_item = 33; +pub const ABMON_8: crate::nl_item = 34; +pub const ABMON_9: crate::nl_item = 35; +pub const ABMON_10: crate::nl_item = 36; +pub const ABMON_11: crate::nl_item = 37; +pub const ABMON_12: crate::nl_item = 38; + +pub const RADIXCHAR: crate::nl_item = 39; +pub const THOUSEP: crate::nl_item = 40; +pub const YESSTR: crate::nl_item = 41; +pub const NOSTR: crate::nl_item = 42; +pub const CRNCYSTR: crate::nl_item = 43; + +pub const D_T_FMT: crate::nl_item = 44; +pub const D_FMT: crate::nl_item = 45; +pub const T_FMT: crate::nl_item = 46; +pub const AM_STR: crate::nl_item = 47; +pub const PM_STR: crate::nl_item = 48; + +pub const CODESET: crate::nl_item = 49; +pub const T_FMT_AMPM: crate::nl_item = 50; +pub const ERA: crate::nl_item = 51; +pub const ERA_D_FMT: crate::nl_item = 52; +pub const ERA_D_T_FMT: crate::nl_item = 53; +pub const ERA_T_FMT: crate::nl_item = 54; +pub const ALT_DIGITS: crate::nl_item = 55; +pub const YESEXPR: crate::nl_item = 56; +pub const NOEXPR: crate::nl_item = 57; +pub const _DATE_FMT: crate::nl_item = 58; +pub const MAXSTRMSG: crate::nl_item = 58; + +pub const PATH_MAX: c_int = 1024; + +pub const SA_ONSTACK: c_int = 0x00000001; +pub const SA_RESETHAND: c_int = 0x00000002; +pub const SA_RESTART: c_int = 0x00000004; +pub const SA_SIGINFO: c_int = 0x00000008; +pub const SA_NODEFER: c_int = 0x00000010; +pub const SA_NOCLDWAIT: c_int = 0x00010000; +pub const SA_NOCLDSTOP: c_int = 0x00020000; + +pub const SS_ONSTACK: c_int = 1; +pub const SS_DISABLE: c_int = 2; + +pub const FIOCLEX: c_int = 0x20006601; +pub const FIONCLEX: c_int = 0x20006602; +pub const FIONREAD: c_int = 0x4004667f; +pub const FIONBIO: c_int = 0x8004667e; +pub const FIOASYNC: c_int = 0x8004667d; +pub const FIOSETOWN: c_int = 0x8004667c; +pub const FIOGETOWN: c_int = 0x4004667b; + +pub const SIGCHLD: c_int = 18; +pub const SIGCLD: c_int = SIGCHLD; +pub const SIGBUS: c_int = 10; +pub const SIG_BLOCK: c_int = 1; +pub const SIG_UNBLOCK: c_int = 2; +pub const SIG_SETMASK: c_int = 3; + +pub const AIO_CANCELED: c_int = 0; +pub const AIO_ALLDONE: c_int = 1; +pub const AIO_NOTCANCELED: c_int = 2; +pub const LIO_NOP: c_int = 0; +pub const LIO_READ: c_int = 1; +pub const LIO_WRITE: c_int = 2; +pub const LIO_NOWAIT: c_int = 0; +pub const LIO_WAIT: c_int = 1; + +pub const SIGEV_NONE: c_int = 1; +pub const SIGEV_SIGNAL: c_int = 2; +pub const SIGEV_THREAD: c_int = 3; +pub const SIGEV_PORT: c_int = 4; + +pub const CLD_EXITED: c_int = 1; +pub const CLD_KILLED: c_int = 2; +pub const CLD_DUMPED: c_int = 3; +pub const CLD_TRAPPED: c_int = 4; +pub const CLD_STOPPED: c_int = 5; +pub const CLD_CONTINUED: c_int = 6; + +pub const IP_RECVDSTADDR: c_int = 0x7; +pub const IP_PKTINFO: c_int = 0x1a; +pub const IP_DONTFRAG: c_int = 0x1b; +pub const IP_SEC_OPT: c_int = 0x22; + +pub const IPV6_UNICAST_HOPS: c_int = 0x5; +pub const IPV6_MULTICAST_IF: c_int = 0x6; +pub const IPV6_MULTICAST_HOPS: c_int = 0x7; +pub const IPV6_MULTICAST_LOOP: c_int = 0x8; +pub const IPV6_PKTINFO: c_int = 0xb; +pub const IPV6_RECVPKTINFO: c_int = 0x12; +pub const IPV6_RECVTCLASS: c_int = 0x19; +pub const IPV6_DONTFRAG: c_int = 0x21; +pub const IPV6_SEC_OPT: c_int = 0x22; +pub const IPV6_TCLASS: c_int = 0x26; +pub const IPV6_V6ONLY: c_int = 0x27; +pub const IPV6_BOUND_IF: c_int = 0x41; + +cfg_if! { + if #[cfg(target_pointer_width = "64")] { + pub const FD_SETSIZE: usize = 65536; + } else { + pub const FD_SETSIZE: usize = 1024; + } +} + +pub const ST_RDONLY: c_ulong = 1; +pub const ST_NOSUID: c_ulong = 2; + +pub const NI_MAXHOST: crate::socklen_t = 1025; +pub const NI_MAXSERV: crate::socklen_t = 32; + +pub const EXIT_FAILURE: c_int = 1; +pub const EXIT_SUCCESS: c_int = 0; +pub const RAND_MAX: c_int = 32767; +pub const EOF: c_int = -1; +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; +pub const SEEK_DATA: c_int = 3; +pub const SEEK_HOLE: c_int = 4; +pub const _IOFBF: c_int = 0; +pub const _IONBF: c_int = 4; +pub const _IOLBF: c_int = 64; +pub const BUFSIZ: c_uint = 1024; +pub const FOPEN_MAX: c_uint = 20; +pub const FILENAME_MAX: c_uint = 1024; +pub const L_tmpnam: c_uint = 25; +pub const TMP_MAX: c_uint = 17576; +pub const PIPE_BUF: c_int = 5120; + +pub const GRND_NONBLOCK: c_uint = 0x0001; +pub const GRND_RANDOM: c_uint = 0x0002; + +pub const O_RDONLY: c_int = 0; +pub const O_WRONLY: c_int = 1; +pub const O_RDWR: c_int = 2; +pub const O_NDELAY: c_int = 0x04; +pub const O_APPEND: c_int = 8; +pub const O_DSYNC: c_int = 0x40; +pub const O_RSYNC: c_int = 0x8000; +pub const O_CREAT: c_int = 256; +pub const O_EXCL: c_int = 1024; +pub const O_NOCTTY: c_int = 2048; +pub const O_TRUNC: c_int = 512; +pub const O_NOFOLLOW: c_int = 0x20000; +pub const O_SEARCH: c_int = 0x200000; +pub const O_EXEC: c_int = 0x400000; +pub const O_CLOEXEC: c_int = 0x800000; +pub const O_ACCMODE: c_int = 0x600003; +pub const O_XATTR: c_int = 0x4000; +pub const O_DIRECTORY: c_int = 0x1000000; +pub const S_IFIFO: mode_t = 0o1_0000; +pub const S_IFCHR: mode_t = 0o2_0000; +pub const S_IFBLK: mode_t = 0o6_0000; +pub const S_IFDIR: mode_t = 0o4_0000; +pub const S_IFREG: mode_t = 0o10_0000; +pub const S_IFLNK: mode_t = 0o12_0000; +pub const S_IFSOCK: mode_t = 0o14_0000; +pub const S_IFMT: mode_t = 0o17_0000; +pub const S_IEXEC: mode_t = 0o0100; +pub const S_IWRITE: mode_t = 0o0200; +pub const S_IREAD: mode_t = 0o0400; +pub const S_IRWXU: mode_t = 0o0700; +pub const S_IXUSR: mode_t = 0o0100; +pub const S_IWUSR: mode_t = 0o0200; +pub const S_IRUSR: mode_t = 0o0400; +pub const S_IRWXG: mode_t = 0o0070; +pub const S_IXGRP: mode_t = 0o0010; +pub const S_IWGRP: mode_t = 0o0020; +pub const S_IRGRP: mode_t = 0o0040; +pub const S_IRWXO: mode_t = 0o0007; +pub const S_IXOTH: mode_t = 0o0001; +pub const S_IWOTH: mode_t = 0o0002; +pub const S_IROTH: mode_t = 0o0004; +pub const F_OK: c_int = 0; +pub const R_OK: c_int = 4; +pub const W_OK: c_int = 2; +pub const X_OK: c_int = 1; +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; +pub const F_LOCK: c_int = 1; +pub const F_TEST: c_int = 3; +pub const F_TLOCK: c_int = 2; +pub const F_ULOCK: c_int = 0; +pub const F_SETLK: c_int = 6; +pub const F_SETLKW: c_int = 7; +pub const F_GETLK: c_int = 14; +pub const F_ALLOCSP: c_int = 10; +pub const F_FREESP: c_int = 11; +pub const F_BLOCKS: c_int = 18; +pub const F_BLKSIZE: c_int = 19; +pub const F_SHARE: c_int = 40; +pub const F_UNSHARE: c_int = 41; +pub const F_ISSTREAM: c_int = 13; +pub const F_PRIV: c_int = 15; +pub const F_NPRIV: c_int = 16; +pub const F_QUOTACTL: c_int = 17; +pub const F_GETOWN: c_int = 23; +pub const F_SETOWN: c_int = 24; +pub const F_REVOKE: c_int = 25; +pub const F_HASREMOTELOCKS: c_int = 26; +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGABRT: c_int = 6; +pub const SIGEMT: c_int = 7; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGSEGV: c_int = 11; +pub const SIGSYS: c_int = 12; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; +pub const SIGUSR1: c_int = 16; +pub const SIGUSR2: c_int = 17; +pub const SIGPWR: c_int = 19; +pub const SIGWINCH: c_int = 20; +pub const SIGURG: c_int = 21; +pub const SIGPOLL: c_int = 22; +pub const SIGIO: c_int = SIGPOLL; +pub const SIGSTOP: c_int = 23; +pub const SIGTSTP: c_int = 24; +pub const SIGCONT: c_int = 25; +pub const SIGTTIN: c_int = 26; +pub const SIGTTOU: c_int = 27; +pub const SIGVTALRM: c_int = 28; +pub const SIGPROF: c_int = 29; +pub const SIGXCPU: c_int = 30; +pub const SIGXFSZ: c_int = 31; + +pub const WNOHANG: c_int = 0x40; +pub const WUNTRACED: c_int = 0x04; + +pub const WEXITED: c_int = 0x01; +pub const WTRAPPED: c_int = 0x02; +pub const WSTOPPED: c_int = WUNTRACED; +pub const WCONTINUED: c_int = 0x08; +pub const WNOWAIT: c_int = 0x80; + +pub const AT_FDCWD: c_int = 0xffd19553; +pub const AT_SYMLINK_NOFOLLOW: c_int = 0x1000; +pub const AT_SYMLINK_FOLLOW: c_int = 0x2000; +pub const AT_REMOVEDIR: c_int = 0x1; +pub const _AT_TRIGGER: c_int = 0x2; +pub const AT_EACCESS: c_int = 0x4; + +pub const P_PID: idtype_t = 0; +pub const P_PPID: idtype_t = 1; +pub const P_PGID: idtype_t = 2; +pub const P_SID: idtype_t = 3; +pub const P_CID: idtype_t = 4; +pub const P_UID: idtype_t = 5; +pub const P_GID: idtype_t = 6; +pub const P_ALL: idtype_t = 7; +pub const P_LWPID: idtype_t = 8; +pub const P_TASKID: idtype_t = 9; +pub const P_PROJID: idtype_t = 10; +pub const P_POOLID: idtype_t = 11; +pub const P_ZONEID: idtype_t = 12; +pub const P_CTID: idtype_t = 13; +pub const P_CPUID: idtype_t = 14; +pub const P_PSETID: idtype_t = 15; + +pub const PBIND_NONE: crate::processorid_t = -1; +pub const PBIND_QUERY: crate::processorid_t = -2; + +pub const PS_NONE: c_int = -1; +pub const PS_QUERY: c_int = -2; +pub const PS_MYID: c_int = -3; +pub const PS_SOFT: c_int = -4; +pub const PS_HARD: c_int = -5; +pub const PS_QUERY_TYPE: c_int = -6; +pub const PS_PRIVATE: c_int = 2; + +pub const UTIME_OMIT: c_long = -2; +pub const UTIME_NOW: c_long = -1; + +pub const PROT_NONE: c_int = 0; +pub const PROT_READ: c_int = 1; +pub const PROT_WRITE: c_int = 2; +pub const PROT_EXEC: c_int = 4; + +pub const MAP_SHARED: c_int = 0x0001; +pub const MAP_PRIVATE: c_int = 0x0002; +pub const MAP_FIXED: c_int = 0x0010; +pub const MAP_NORESERVE: c_int = 0x40; +pub const MAP_ANON: c_int = 0x0100; +pub const MAP_ANONYMOUS: c_int = 0x0100; +pub const MAP_RENAME: c_int = 0x20; +pub const MAP_ALIGN: c_int = 0x200; +pub const MAP_TEXT: c_int = 0x400; +pub const MAP_INITDATA: c_int = 0x800; +pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; + +pub const MCL_CURRENT: c_int = 0x0001; +pub const MCL_FUTURE: c_int = 0x0002; + +pub const MS_SYNC: c_int = 0x0004; +pub const MS_ASYNC: c_int = 0x0001; +pub const MS_INVALIDATE: c_int = 0x0002; + +pub const MMOBJ_PADDING: c_uint = 0x10000; +pub const MMOBJ_INTERPRET: c_uint = 0x20000; +pub const MR_PADDING: c_uint = 0x1; +pub const MR_HDR_ELF: c_uint = 0x2; + +pub const EPERM: c_int = 1; +pub const ENOENT: c_int = 2; +pub const ESRCH: c_int = 3; +pub const EINTR: c_int = 4; +pub const EIO: c_int = 5; +pub const ENXIO: c_int = 6; +pub const E2BIG: c_int = 7; +pub const ENOEXEC: c_int = 8; +pub const EBADF: c_int = 9; +pub const ECHILD: c_int = 10; +pub const EAGAIN: c_int = 11; +pub const ENOMEM: c_int = 12; +pub const EACCES: c_int = 13; +pub const EFAULT: c_int = 14; +pub const ENOTBLK: c_int = 15; +pub const EBUSY: c_int = 16; +pub const EEXIST: c_int = 17; +pub const EXDEV: c_int = 18; +pub const ENODEV: c_int = 19; +pub const ENOTDIR: c_int = 20; +pub const EISDIR: c_int = 21; +pub const EINVAL: c_int = 22; +pub const ENFILE: c_int = 23; +pub const EMFILE: c_int = 24; +pub const ENOTTY: c_int = 25; +pub const ETXTBSY: c_int = 26; +pub const EFBIG: c_int = 27; +pub const ENOSPC: c_int = 28; +pub const ESPIPE: c_int = 29; +pub const EROFS: c_int = 30; +pub const EMLINK: c_int = 31; +pub const EPIPE: c_int = 32; +pub const EDOM: c_int = 33; +pub const ERANGE: c_int = 34; +pub const ENOMSG: c_int = 35; +pub const EIDRM: c_int = 36; +pub const ECHRNG: c_int = 37; +pub const EL2NSYNC: c_int = 38; +pub const EL3HLT: c_int = 39; +pub const EL3RST: c_int = 40; +pub const ELNRNG: c_int = 41; +pub const EUNATCH: c_int = 42; +pub const ENOCSI: c_int = 43; +pub const EL2HLT: c_int = 44; +pub const EDEADLK: c_int = 45; +pub const ENOLCK: c_int = 46; +pub const ECANCELED: c_int = 47; +pub const ENOTSUP: c_int = 48; +pub const EDQUOT: c_int = 49; +pub const EBADE: c_int = 50; +pub const EBADR: c_int = 51; +pub const EXFULL: c_int = 52; +pub const ENOANO: c_int = 53; +pub const EBADRQC: c_int = 54; +pub const EBADSLT: c_int = 55; +pub const EDEADLOCK: c_int = 56; +pub const EBFONT: c_int = 57; +pub const EOWNERDEAD: c_int = 58; +pub const ENOTRECOVERABLE: c_int = 59; +pub const ENOSTR: c_int = 60; +pub const ENODATA: c_int = 61; +pub const ETIME: c_int = 62; +pub const ENOSR: c_int = 63; +pub const ENONET: c_int = 64; +pub const ENOPKG: c_int = 65; +pub const EREMOTE: c_int = 66; +pub const ENOLINK: c_int = 67; +pub const EADV: c_int = 68; +pub const ESRMNT: c_int = 69; +pub const ECOMM: c_int = 70; +pub const EPROTO: c_int = 71; +pub const ELOCKUNMAPPED: c_int = 72; +pub const ENOTACTIVE: c_int = 73; +pub const EMULTIHOP: c_int = 74; +pub const EADI: c_int = 75; +pub const EBADMSG: c_int = 77; +pub const ENAMETOOLONG: c_int = 78; +pub const EOVERFLOW: c_int = 79; +pub const ENOTUNIQ: c_int = 80; +pub const EBADFD: c_int = 81; +pub const EREMCHG: c_int = 82; +pub const ELIBACC: c_int = 83; +pub const ELIBBAD: c_int = 84; +pub const ELIBSCN: c_int = 85; +pub const ELIBMAX: c_int = 86; +pub const ELIBEXEC: c_int = 87; +pub const EILSEQ: c_int = 88; +pub const ENOSYS: c_int = 89; +pub const ELOOP: c_int = 90; +pub const ERESTART: c_int = 91; +pub const ESTRPIPE: c_int = 92; +pub const ENOTEMPTY: c_int = 93; +pub const EUSERS: c_int = 94; +pub const ENOTSOCK: c_int = 95; +pub const EDESTADDRREQ: c_int = 96; +pub const EMSGSIZE: c_int = 97; +pub const EPROTOTYPE: c_int = 98; +pub const ENOPROTOOPT: c_int = 99; +pub const EPROTONOSUPPORT: c_int = 120; +pub const ESOCKTNOSUPPORT: c_int = 121; +pub const EOPNOTSUPP: c_int = 122; +pub const EPFNOSUPPORT: c_int = 123; +pub const EAFNOSUPPORT: c_int = 124; +pub const EADDRINUSE: c_int = 125; +pub const EADDRNOTAVAIL: c_int = 126; +pub const ENETDOWN: c_int = 127; +pub const ENETUNREACH: c_int = 128; +pub const ENETRESET: c_int = 129; +pub const ECONNABORTED: c_int = 130; +pub const ECONNRESET: c_int = 131; +pub const ENOBUFS: c_int = 132; +pub const EISCONN: c_int = 133; +pub const ENOTCONN: c_int = 134; +pub const ESHUTDOWN: c_int = 143; +pub const ETOOMANYREFS: c_int = 144; +pub const ETIMEDOUT: c_int = 145; +pub const ECONNREFUSED: c_int = 146; +pub const EHOSTDOWN: c_int = 147; +pub const EHOSTUNREACH: c_int = 148; +pub const EWOULDBLOCK: c_int = EAGAIN; +pub const EALREADY: c_int = 149; +pub const EINPROGRESS: c_int = 150; +pub const ESTALE: c_int = 151; + +pub const EAI_AGAIN: c_int = 2; +pub const EAI_BADFLAGS: c_int = 3; +pub const EAI_FAIL: c_int = 4; +pub const EAI_FAMILY: c_int = 5; +pub const EAI_MEMORY: c_int = 6; +pub const EAI_NODATA: c_int = 7; +pub const EAI_NONAME: c_int = 8; +pub const EAI_SERVICE: c_int = 9; +pub const EAI_SOCKTYPE: c_int = 10; +pub const EAI_SYSTEM: c_int = 11; +pub const EAI_OVERFLOW: c_int = 12; + +pub const NI_NOFQDN: c_uint = 0x0001; +pub const NI_NUMERICHOST: c_uint = 0x0002; +pub const NI_NAMEREQD: c_uint = 0x0004; +pub const NI_NUMERICSERV: c_uint = 0x0008; +pub const NI_DGRAM: c_uint = 0x0010; +pub const NI_WITHSCOPEID: c_uint = 0x0020; +pub const NI_NUMERICSCOPE: c_uint = 0x0040; + +pub const F_DUPFD: c_int = 0; +pub const F_DUP2FD: c_int = 9; +pub const F_GETFD: c_int = 1; +pub const F_SETFD: c_int = 2; +pub const F_GETFL: c_int = 3; +pub const F_SETFL: c_int = 4; +pub const F_GETXFL: c_int = 45; + +pub const SIGTRAP: c_int = 5; + +pub const GLOB_APPEND: c_int = 32; +pub const GLOB_DOOFFS: c_int = 16; +pub const GLOB_ERR: c_int = 1; +pub const GLOB_MARK: c_int = 2; +pub const GLOB_NOCHECK: c_int = 8; +pub const GLOB_NOSORT: c_int = 4; +pub const GLOB_NOESCAPE: c_int = 64; + +pub const GLOB_NOSPACE: c_int = -2; +pub const GLOB_ABORTED: c_int = -1; +pub const GLOB_NOMATCH: c_int = -3; + +pub const POLLIN: c_short = 0x1; +pub const POLLPRI: c_short = 0x2; +pub const POLLOUT: c_short = 0x4; +pub const POLLERR: c_short = 0x8; +pub const POLLHUP: c_short = 0x10; +pub const POLLNVAL: c_short = 0x20; +pub const POLLNORM: c_short = 0x0040; +pub const POLLRDNORM: c_short = 0x0040; +pub const POLLWRNORM: c_short = 0x4; /* POLLOUT */ +pub const POLLRDBAND: c_short = 0x0080; +pub const POLLWRBAND: c_short = 0x0100; + +pub const POSIX_MADV_NORMAL: c_int = 0; +pub const POSIX_MADV_RANDOM: c_int = 1; +pub const POSIX_MADV_SEQUENTIAL: c_int = 2; +pub const POSIX_MADV_WILLNEED: c_int = 3; +pub const POSIX_MADV_DONTNEED: c_int = 4; + +pub const POSIX_SPAWN_RESETIDS: c_short = 0x1; +pub const POSIX_SPAWN_SETPGROUP: c_short = 0x2; +pub const POSIX_SPAWN_SETSIGDEF: c_short = 0x4; +pub const POSIX_SPAWN_SETSIGMASK: c_short = 0x8; +pub const POSIX_SPAWN_SETSCHEDPARAM: c_short = 0x10; +pub const POSIX_SPAWN_SETSCHEDULER: c_short = 0x20; +pub const POSIX_SPAWN_SETSIGIGN_NP: c_short = 0x800; +pub const POSIX_SPAWN_NOSIGCHLD_NP: c_short = 0x1000; +pub const POSIX_SPAWN_WAITPID_NP: c_short = 0x2000; +pub const POSIX_SPAWN_NOEXECERR_NP: c_short = 0x4000; + +pub const PTHREAD_CREATE_JOINABLE: c_int = 0; +pub const PTHREAD_CREATE_DETACHED: c_int = 0x40; +pub const PTHREAD_PROCESS_SHARED: c_int = 1; +pub const PTHREAD_PROCESS_PRIVATE: c_ushort = 0; +pub const PTHREAD_STACK_MIN: size_t = 4096; + +pub const SIGSTKSZ: size_t = 8192; + +// https://illumos.org/man/3c/clock_gettime +// https://github.com/illumos/illumos-gate/ +// blob/HEAD/usr/src/lib/libc/amd64/sys/__clock_gettime.s +// clock_gettime(3c) doesn't seem to accept anything other than CLOCK_REALTIME +// or __CLOCK_REALTIME0 +// +// https://github.com/illumos/illumos-gate/ +// blob/HEAD/usr/src/uts/common/sys/time_impl.h +// Confusing! CLOCK_HIGHRES==CLOCK_MONOTONIC==4 +// __CLOCK_REALTIME0==0 is an obsoleted version of CLOCK_REALTIME==3 +pub const CLOCK_REALTIME: crate::clockid_t = 3; +pub const CLOCK_MONOTONIC: crate::clockid_t = 4; +pub const TIMER_RELTIME: c_int = 0; +pub const TIMER_ABSTIME: c_int = 1; + +pub const RLIMIT_CPU: c_int = 0; +pub const RLIMIT_FSIZE: c_int = 1; +pub const RLIMIT_DATA: c_int = 2; +pub const RLIMIT_STACK: c_int = 3; +pub const RLIMIT_CORE: c_int = 4; +pub const RLIMIT_NOFILE: c_int = 5; +pub const RLIMIT_VMEM: c_int = 6; +pub const RLIMIT_AS: c_int = RLIMIT_VMEM; + +#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] +pub const RLIM_NLIMITS: rlim_t = 7; +pub const RLIM_INFINITY: rlim_t = 0xfffffffffffffffd; + +pub const RUSAGE_SELF: c_int = 0; +pub const RUSAGE_CHILDREN: c_int = -1; + +pub const MADV_NORMAL: c_int = 0; +pub const MADV_RANDOM: c_int = 1; +pub const MADV_SEQUENTIAL: c_int = 2; +pub const MADV_WILLNEED: c_int = 3; +pub const MADV_DONTNEED: c_int = 4; +pub const MADV_FREE: c_int = 5; +pub const MADV_ACCESS_DEFAULT: c_int = 6; +pub const MADV_ACCESS_LWP: c_int = 7; +pub const MADV_ACCESS_MANY: c_int = 8; + +pub const AF_UNSPEC: c_int = 0; +pub const AF_UNIX: c_int = 1; +pub const AF_INET: c_int = 2; +pub const AF_IMPLINK: c_int = 3; +pub const AF_PUP: c_int = 4; +pub const AF_CHAOS: c_int = 5; +pub const AF_NS: c_int = 6; +pub const AF_NBS: c_int = 7; +pub const AF_ECMA: c_int = 8; +pub const AF_DATAKIT: c_int = 9; +pub const AF_CCITT: c_int = 10; +pub const AF_SNA: c_int = 11; +pub const AF_DECnet: c_int = 12; +pub const AF_DLI: c_int = 13; +pub const AF_LAT: c_int = 14; +pub const AF_HYLINK: c_int = 15; +pub const AF_APPLETALK: c_int = 16; +pub const AF_NIT: c_int = 17; +pub const AF_802: c_int = 18; +pub const AF_OSI: c_int = 19; +pub const AF_X25: c_int = 20; +pub const AF_OSINET: c_int = 21; +pub const AF_GOSIP: c_int = 22; +pub const AF_IPX: c_int = 23; +pub const AF_ROUTE: c_int = 24; +pub const AF_LINK: c_int = 25; +pub const AF_INET6: c_int = 26; +pub const AF_KEY: c_int = 27; +pub const AF_POLICY: c_int = 29; +pub const AF_INET_OFFLOAD: c_int = 30; +pub const AF_TRILL: c_int = 31; +pub const AF_PACKET: c_int = 32; + +pub const PF_UNSPEC: c_int = AF_UNSPEC; +pub const PF_UNIX: c_int = AF_UNIX; +pub const PF_LOCAL: c_int = PF_UNIX; +pub const PF_FILE: c_int = PF_UNIX; +pub const PF_INET: c_int = AF_INET; +pub const PF_IMPLINK: c_int = AF_IMPLINK; +pub const PF_PUP: c_int = AF_PUP; +pub const PF_CHAOS: c_int = AF_CHAOS; +pub const PF_NS: c_int = AF_NS; +pub const PF_NBS: c_int = AF_NBS; +pub const PF_ECMA: c_int = AF_ECMA; +pub const PF_DATAKIT: c_int = AF_DATAKIT; +pub const PF_CCITT: c_int = AF_CCITT; +pub const PF_SNA: c_int = AF_SNA; +pub const PF_DECnet: c_int = AF_DECnet; +pub const PF_DLI: c_int = AF_DLI; +pub const PF_LAT: c_int = AF_LAT; +pub const PF_HYLINK: c_int = AF_HYLINK; +pub const PF_APPLETALK: c_int = AF_APPLETALK; +pub const PF_NIT: c_int = AF_NIT; +pub const PF_802: c_int = AF_802; +pub const PF_OSI: c_int = AF_OSI; +pub const PF_X25: c_int = AF_X25; +pub const PF_OSINET: c_int = AF_OSINET; +pub const PF_GOSIP: c_int = AF_GOSIP; +pub const PF_IPX: c_int = AF_IPX; +pub const PF_ROUTE: c_int = AF_ROUTE; +pub const PF_LINK: c_int = AF_LINK; +pub const PF_INET6: c_int = AF_INET6; +pub const PF_KEY: c_int = AF_KEY; +pub const PF_POLICY: c_int = AF_POLICY; +pub const PF_INET_OFFLOAD: c_int = AF_INET_OFFLOAD; +pub const PF_TRILL: c_int = AF_TRILL; +pub const PF_PACKET: c_int = AF_PACKET; + +pub const SOCK_DGRAM: c_int = 1; +pub const SOCK_STREAM: c_int = 2; +pub const SOCK_RAW: c_int = 4; +pub const SOCK_RDM: c_int = 5; +pub const SOCK_SEQPACKET: c_int = 6; +pub const IP_MULTICAST_IF: c_int = 16; +pub const IP_MULTICAST_TTL: c_int = 17; +pub const IP_MULTICAST_LOOP: c_int = 18; +pub const IP_HDRINCL: c_int = 2; +pub const IP_TOS: c_int = 3; +pub const IP_TTL: c_int = 4; +pub const IP_ADD_MEMBERSHIP: c_int = 19; +pub const IP_DROP_MEMBERSHIP: c_int = 20; +pub const IPV6_JOIN_GROUP: c_int = 9; +pub const IPV6_LEAVE_GROUP: c_int = 10; +pub const IP_ADD_SOURCE_MEMBERSHIP: c_int = 23; +pub const IP_DROP_SOURCE_MEMBERSHIP: c_int = 24; +pub const IP_BLOCK_SOURCE: c_int = 21; +pub const IP_UNBLOCK_SOURCE: c_int = 22; +pub const IP_BOUND_IF: c_int = 0x41; + +// These TCP socket options are common between illumos and Solaris, while higher +// numbers have generally diverged: +pub const TCP_NODELAY: c_int = 0x1; +pub const TCP_MAXSEG: c_int = 0x2; +pub const TCP_KEEPALIVE: c_int = 0x8; +pub const TCP_NOTIFY_THRESHOLD: c_int = 0x10; +pub const TCP_ABORT_THRESHOLD: c_int = 0x11; +pub const TCP_CONN_NOTIFY_THRESHOLD: c_int = 0x12; +pub const TCP_CONN_ABORT_THRESHOLD: c_int = 0x13; +pub const TCP_RECVDSTADDR: c_int = 0x14; +pub const TCP_INIT_CWND: c_int = 0x15; +pub const TCP_KEEPALIVE_THRESHOLD: c_int = 0x16; +pub const TCP_KEEPALIVE_ABORT_THRESHOLD: c_int = 0x17; +pub const TCP_CORK: c_int = 0x18; +pub const TCP_RTO_INITIAL: c_int = 0x19; +pub const TCP_RTO_MIN: c_int = 0x1a; +pub const TCP_RTO_MAX: c_int = 0x1b; +pub const TCP_LINGER2: c_int = 0x1c; + +pub const UDP_NAT_T_ENDPOINT: c_int = 0x0103; + +pub const SOMAXCONN: c_int = 128; + +pub const SOL_SOCKET: c_int = 0xffff; +pub const SO_DEBUG: c_int = 0x01; +pub const SO_ACCEPTCONN: c_int = 0x0002; +pub const SO_REUSEADDR: c_int = 0x0004; +pub const SO_KEEPALIVE: c_int = 0x0008; +pub const SO_DONTROUTE: c_int = 0x0010; +pub const SO_BROADCAST: c_int = 0x0020; +pub const SO_USELOOPBACK: c_int = 0x0040; +pub const SO_LINGER: c_int = 0x0080; +pub const SO_OOBINLINE: c_int = 0x0100; +pub const SO_SNDBUF: c_int = 0x1001; +pub const SO_RCVBUF: c_int = 0x1002; +pub const SO_SNDLOWAT: c_int = 0x1003; +pub const SO_RCVLOWAT: c_int = 0x1004; +pub const SO_SNDTIMEO: c_int = 0x1005; +pub const SO_RCVTIMEO: c_int = 0x1006; +pub const SO_ERROR: c_int = 0x1007; +pub const SO_TYPE: c_int = 0x1008; +pub const SO_PROTOTYPE: c_int = 0x1009; +pub const SO_DOMAIN: c_int = 0x100c; +pub const SO_TIMESTAMP: c_int = 0x1013; +pub const SO_EXCLBIND: c_int = 0x1015; + +pub const SCM_RIGHTS: c_int = 0x1010; +pub const SCM_UCRED: c_int = 0x1012; +pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; + +pub const MSG_OOB: c_int = 0x1; +pub const MSG_PEEK: c_int = 0x2; +pub const MSG_DONTROUTE: c_int = 0x4; +pub const MSG_EOR: c_int = 0x8; +pub const MSG_CTRUNC: c_int = 0x10; +pub const MSG_TRUNC: c_int = 0x20; +pub const MSG_WAITALL: c_int = 0x40; +pub const MSG_DONTWAIT: c_int = 0x80; +pub const MSG_NOTIFICATION: c_int = 0x100; +pub const MSG_NOSIGNAL: c_int = 0x200; +pub const MSG_DUPCTRL: c_int = 0x800; +pub const MSG_XPG4_2: c_int = 0x8000; +pub const MSG_MAXIOVLEN: c_int = 16; + +pub const IF_NAMESIZE: size_t = 32; +pub const IFNAMSIZ: size_t = 16; + +// https://docs.oracle.com/cd/E23824_01/html/821-1475/if-7p.html +pub const IFF_UP: c_int = 0x0000000001; // Address is up +pub const IFF_BROADCAST: c_int = 0x0000000002; // Broadcast address valid +pub const IFF_DEBUG: c_int = 0x0000000004; // Turn on debugging +pub const IFF_LOOPBACK: c_int = 0x0000000008; // Loopback net +pub const IFF_POINTOPOINT: c_int = 0x0000000010; // Interface is p-to-p +pub const IFF_NOTRAILERS: c_int = 0x0000000020; // Avoid use of trailers +pub const IFF_RUNNING: c_int = 0x0000000040; // Resources allocated +pub const IFF_NOARP: c_int = 0x0000000080; // No address res. protocol +pub const IFF_PROMISC: c_int = 0x0000000100; // Receive all packets +pub const IFF_ALLMULTI: c_int = 0x0000000200; // Receive all multicast pkts +pub const IFF_INTELLIGENT: c_int = 0x0000000400; // Protocol code on board +pub const IFF_MULTICAST: c_int = 0x0000000800; // Supports multicast + +// Multicast using broadcst. add. +pub const IFF_MULTI_BCAST: c_int = 0x0000001000; +pub const IFF_UNNUMBERED: c_int = 0x0000002000; // Non-unique address +pub const IFF_DHCPRUNNING: c_int = 0x0000004000; // DHCP controls interface +pub const IFF_PRIVATE: c_int = 0x0000008000; // Do not advertise +pub const IFF_NOXMIT: c_int = 0x0000010000; // Do not transmit pkts + +// No address - just on-link subnet +pub const IFF_NOLOCAL: c_int = 0x0000020000; +pub const IFF_DEPRECATED: c_int = 0x0000040000; // Address is deprecated +pub const IFF_ADDRCONF: c_int = 0x0000080000; // Addr. from stateless addrconf +pub const IFF_ROUTER: c_int = 0x0000100000; // Router on interface +pub const IFF_NONUD: c_int = 0x0000200000; // No NUD on interface +pub const IFF_ANYCAST: c_int = 0x0000400000; // Anycast address +pub const IFF_NORTEXCH: c_int = 0x0000800000; // Don't xchange rout. info +pub const IFF_IPV4: c_int = 0x0001000000; // IPv4 interface +pub const IFF_IPV6: c_int = 0x0002000000; // IPv6 interface +pub const IFF_NOFAILOVER: c_int = 0x0008000000; // in.mpathd test address +pub const IFF_FAILED: c_int = 0x0010000000; // Interface has failed +pub const IFF_STANDBY: c_int = 0x0020000000; // Interface is a hot-spare +pub const IFF_INACTIVE: c_int = 0x0040000000; // Functioning but not used +pub const IFF_OFFLINE: c_int = 0x0080000000; // Interface is offline + // If CoS marking is supported +pub const IFF_COS_ENABLED: c_longlong = 0x0200000000; +pub const IFF_PREFERRED: c_longlong = 0x0400000000; // Prefer as source addr. +pub const IFF_TEMPORARY: c_longlong = 0x0800000000; // RFC3041 +pub const IFF_FIXEDMTU: c_longlong = 0x1000000000; // MTU set with SIOCSLIFMTU +pub const IFF_VIRTUAL: c_longlong = 0x2000000000; // Cannot send/receive pkts +pub const IFF_DUPLICATE: c_longlong = 0x4000000000; // Local address in use +pub const IFF_IPMP: c_longlong = 0x8000000000; // IPMP IP interface + +// sys/ipc.h: +pub const IPC_ALLOC: c_int = 0x8000; +pub const IPC_CREAT: c_int = 0x200; +pub const IPC_EXCL: c_int = 0x400; +pub const IPC_NOWAIT: c_int = 0x800; +pub const IPC_PRIVATE: key_t = 0; +pub const IPC_RMID: c_int = 10; +pub const IPC_SET: c_int = 11; +pub const IPC_SEAT: c_int = 12; + +// sys/shm.h +pub const SHM_R: c_int = 0o400; +pub const SHM_W: c_int = 0o200; +pub const SHM_RDONLY: c_int = 0o10000; +pub const SHM_RND: c_int = 0o20000; +pub const SHM_SHARE_MMU: c_int = 0o40000; +pub const SHM_PAGEABLE: c_int = 0o100000; + +pub const SHUT_RD: c_int = 0; +pub const SHUT_WR: c_int = 1; +pub const SHUT_RDWR: c_int = 2; + +pub const F_RDLCK: c_short = 1; +pub const F_WRLCK: c_short = 2; +pub const F_UNLCK: c_short = 3; + +pub const O_SYNC: c_int = 16; +pub const O_NONBLOCK: c_int = 128; + +pub const IPPROTO_RAW: c_int = 255; + +pub const _PC_LINK_MAX: c_int = 1; +pub const _PC_MAX_CANON: c_int = 2; +pub const _PC_MAX_INPUT: c_int = 3; +pub const _PC_NAME_MAX: c_int = 4; +pub const _PC_PATH_MAX: c_int = 5; +pub const _PC_PIPE_BUF: c_int = 6; +pub const _PC_NO_TRUNC: c_int = 7; +pub const _PC_VDISABLE: c_int = 8; +pub const _PC_CHOWN_RESTRICTED: c_int = 9; +pub const _PC_ASYNC_IO: c_int = 10; +pub const _PC_PRIO_IO: c_int = 11; +pub const _PC_SYNC_IO: c_int = 12; +pub const _PC_ALLOC_SIZE_MIN: c_int = 13; +pub const _PC_REC_INCR_XFER_SIZE: c_int = 14; +pub const _PC_REC_MAX_XFER_SIZE: c_int = 15; +pub const _PC_REC_MIN_XFER_SIZE: c_int = 16; +pub const _PC_REC_XFER_ALIGN: c_int = 17; +pub const _PC_SYMLINK_MAX: c_int = 18; +pub const _PC_2_SYMLINKS: c_int = 19; +pub const _PC_ACL_ENABLED: c_int = 20; +pub const _PC_MIN_HOLE_SIZE: c_int = 21; +pub const _PC_CASE_BEHAVIOR: c_int = 22; +pub const _PC_SATTR_ENABLED: c_int = 23; +pub const _PC_SATTR_EXISTS: c_int = 24; +pub const _PC_ACCESS_FILTERING: c_int = 25; +pub const _PC_TIMESTAMP_RESOLUTION: c_int = 26; +pub const _PC_FILESIZEBITS: c_int = 67; +pub const _PC_XATTR_ENABLED: c_int = 100; +pub const _PC_XATTR_EXISTS: c_int = 101; + +pub const _POSIX_VDISABLE: crate::cc_t = 0; + +pub const _SC_ARG_MAX: c_int = 1; +pub const _SC_CHILD_MAX: c_int = 2; +pub const _SC_CLK_TCK: c_int = 3; +pub const _SC_NGROUPS_MAX: c_int = 4; +pub const _SC_OPEN_MAX: c_int = 5; +pub const _SC_JOB_CONTROL: c_int = 6; +pub const _SC_SAVED_IDS: c_int = 7; +pub const _SC_VERSION: c_int = 8; +pub const _SC_PASS_MAX: c_int = 9; +pub const _SC_LOGNAME_MAX: c_int = 10; +pub const _SC_PAGESIZE: c_int = 11; +pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; +pub const _SC_XOPEN_VERSION: c_int = 12; +pub const _SC_NPROCESSORS_CONF: c_int = 14; +pub const _SC_NPROCESSORS_ONLN: c_int = 15; +pub const _SC_STREAM_MAX: c_int = 16; +pub const _SC_TZNAME_MAX: c_int = 17; +pub const _SC_AIO_LISTIO_MAX: c_int = 18; +pub const _SC_AIO_MAX: c_int = 19; +pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 20; +pub const _SC_ASYNCHRONOUS_IO: c_int = 21; +pub const _SC_DELAYTIMER_MAX: c_int = 22; +pub const _SC_FSYNC: c_int = 23; +pub const _SC_MAPPED_FILES: c_int = 24; +pub const _SC_MEMLOCK: c_int = 25; +pub const _SC_MEMLOCK_RANGE: c_int = 26; +pub const _SC_MEMORY_PROTECTION: c_int = 27; +pub const _SC_MESSAGE_PASSING: c_int = 28; +pub const _SC_MQ_OPEN_MAX: c_int = 29; +pub const _SC_MQ_PRIO_MAX: c_int = 30; +pub const _SC_PRIORITIZED_IO: c_int = 31; +pub const _SC_PRIORITY_SCHEDULING: c_int = 32; +pub const _SC_REALTIME_SIGNALS: c_int = 33; +pub const _SC_RTSIG_MAX: c_int = 34; +pub const _SC_SEMAPHORES: c_int = 35; +pub const _SC_SEM_NSEMS_MAX: c_int = 36; +pub const _SC_SEM_VALUE_MAX: c_int = 37; +pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 38; +pub const _SC_SIGQUEUE_MAX: c_int = 39; +pub const _SC_SIGRT_MIN: c_int = 40; +pub const _SC_SIGRT_MAX: c_int = 41; +pub const _SC_SYNCHRONIZED_IO: c_int = 42; +pub const _SC_TIMERS: c_int = 43; +pub const _SC_TIMER_MAX: c_int = 44; +pub const _SC_2_C_BIND: c_int = 45; +pub const _SC_2_C_DEV: c_int = 46; +pub const _SC_2_C_VERSION: c_int = 47; +pub const _SC_2_FORT_DEV: c_int = 48; +pub const _SC_2_FORT_RUN: c_int = 49; +pub const _SC_2_LOCALEDEF: c_int = 50; +pub const _SC_2_SW_DEV: c_int = 51; +pub const _SC_2_UPE: c_int = 52; +pub const _SC_2_VERSION: c_int = 53; +pub const _SC_BC_BASE_MAX: c_int = 54; +pub const _SC_BC_DIM_MAX: c_int = 55; +pub const _SC_BC_SCALE_MAX: c_int = 56; +pub const _SC_BC_STRING_MAX: c_int = 57; +pub const _SC_COLL_WEIGHTS_MAX: c_int = 58; +pub const _SC_EXPR_NEST_MAX: c_int = 59; +pub const _SC_LINE_MAX: c_int = 60; +pub const _SC_RE_DUP_MAX: c_int = 61; +pub const _SC_XOPEN_CRYPT: c_int = 62; +pub const _SC_XOPEN_ENH_I18N: c_int = 63; +pub const _SC_XOPEN_SHM: c_int = 64; +pub const _SC_2_CHAR_TERM: c_int = 66; +pub const _SC_XOPEN_XCU_VERSION: c_int = 67; +pub const _SC_ATEXIT_MAX: c_int = 76; +pub const _SC_IOV_MAX: c_int = 77; +pub const _SC_XOPEN_UNIX: c_int = 78; +pub const _SC_T_IOV_MAX: c_int = 79; +pub const _SC_PHYS_PAGES: c_int = 500; +pub const _SC_AVPHYS_PAGES: c_int = 501; +pub const _SC_COHER_BLKSZ: c_int = 503; +pub const _SC_SPLIT_CACHE: c_int = 504; +pub const _SC_ICACHE_SZ: c_int = 505; +pub const _SC_DCACHE_SZ: c_int = 506; +pub const _SC_ICACHE_LINESZ: c_int = 507; +pub const _SC_DCACHE_LINESZ: c_int = 508; +pub const _SC_ICACHE_BLKSZ: c_int = 509; +pub const _SC_DCACHE_BLKSZ: c_int = 510; +pub const _SC_DCACHE_TBLKSZ: c_int = 511; +pub const _SC_ICACHE_ASSOC: c_int = 512; +pub const _SC_DCACHE_ASSOC: c_int = 513; +pub const _SC_MAXPID: c_int = 514; +pub const _SC_STACK_PROT: c_int = 515; +pub const _SC_NPROCESSORS_MAX: c_int = 516; +pub const _SC_CPUID_MAX: c_int = 517; +pub const _SC_EPHID_MAX: c_int = 518; +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 568; +pub const _SC_GETGR_R_SIZE_MAX: c_int = 569; +pub const _SC_GETPW_R_SIZE_MAX: c_int = 570; +pub const _SC_LOGIN_NAME_MAX: c_int = 571; +pub const _SC_THREAD_KEYS_MAX: c_int = 572; +pub const _SC_THREAD_STACK_MIN: c_int = 573; +pub const _SC_THREAD_THREADS_MAX: c_int = 574; +pub const _SC_TTY_NAME_MAX: c_int = 575; +pub const _SC_THREADS: c_int = 576; +pub const _SC_THREAD_ATTR_STACKADDR: c_int = 577; +pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 578; +pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 579; +pub const _SC_THREAD_PRIO_INHERIT: c_int = 580; +pub const _SC_THREAD_PRIO_PROTECT: c_int = 581; +pub const _SC_THREAD_PROCESS_SHARED: c_int = 582; +pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 583; +pub const _SC_XOPEN_LEGACY: c_int = 717; +pub const _SC_XOPEN_REALTIME: c_int = 718; +pub const _SC_XOPEN_REALTIME_THREADS: c_int = 719; +pub const _SC_XBS5_ILP32_OFF32: c_int = 720; +pub const _SC_XBS5_ILP32_OFFBIG: c_int = 721; +pub const _SC_XBS5_LP64_OFF64: c_int = 722; +pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 723; +pub const _SC_2_PBS: c_int = 724; +pub const _SC_2_PBS_ACCOUNTING: c_int = 725; +pub const _SC_2_PBS_CHECKPOINT: c_int = 726; +pub const _SC_2_PBS_LOCATE: c_int = 728; +pub const _SC_2_PBS_MESSAGE: c_int = 729; +pub const _SC_2_PBS_TRACK: c_int = 730; +pub const _SC_ADVISORY_INFO: c_int = 731; +pub const _SC_BARRIERS: c_int = 732; +pub const _SC_CLOCK_SELECTION: c_int = 733; +pub const _SC_CPUTIME: c_int = 734; +pub const _SC_HOST_NAME_MAX: c_int = 735; +pub const _SC_MONOTONIC_CLOCK: c_int = 736; +pub const _SC_READER_WRITER_LOCKS: c_int = 737; +pub const _SC_REGEXP: c_int = 738; +pub const _SC_SHELL: c_int = 739; +pub const _SC_SPAWN: c_int = 740; +pub const _SC_SPIN_LOCKS: c_int = 741; +pub const _SC_SPORADIC_SERVER: c_int = 742; +pub const _SC_SS_REPL_MAX: c_int = 743; +pub const _SC_SYMLOOP_MAX: c_int = 744; +pub const _SC_THREAD_CPUTIME: c_int = 745; +pub const _SC_THREAD_SPORADIC_SERVER: c_int = 746; +pub const _SC_TIMEOUTS: c_int = 747; +pub const _SC_TRACE: c_int = 748; +pub const _SC_TRACE_EVENT_FILTER: c_int = 749; +pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 750; +pub const _SC_TRACE_INHERIT: c_int = 751; +pub const _SC_TRACE_LOG: c_int = 752; +pub const _SC_TRACE_NAME_MAX: c_int = 753; +pub const _SC_TRACE_SYS_MAX: c_int = 754; +pub const _SC_TRACE_USER_EVENT_MAX: c_int = 755; +pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 756; +pub const _SC_V6_ILP32_OFF32: c_int = 757; +pub const _SC_V6_ILP32_OFFBIG: c_int = 758; +pub const _SC_V6_LP64_OFF64: c_int = 759; +pub const _SC_V6_LPBIG_OFFBIG: c_int = 760; +pub const _SC_XOPEN_STREAMS: c_int = 761; +pub const _SC_IPV6: c_int = 762; +pub const _SC_RAW_SOCKETS: c_int = 763; + +pub const _ST_FSTYPSZ: c_int = 16; + +pub const _MUTEX_MAGIC: u16 = 0x4d58; // MX +pub const _COND_MAGIC: u16 = 0x4356; // CV +pub const _RWL_MAGIC: u16 = 0x5257; // RW + +pub const NCCS: usize = 19; + +pub const LOG_CRON: c_int = 15 << 3; + +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + __pthread_mutex_flag1: 0, + __pthread_mutex_flag2: 0, + __pthread_mutex_ceiling: 0, + __pthread_mutex_type: PTHREAD_PROCESS_PRIVATE, + __pthread_mutex_magic: _MUTEX_MAGIC, + __pthread_mutex_lock: 0, + __pthread_mutex_data: 0, +}; +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + __pthread_cond_flag: [0; 4], + __pthread_cond_type: PTHREAD_PROCESS_PRIVATE, + __pthread_cond_magic: _COND_MAGIC, + __pthread_cond_data: 0, +}; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + __pthread_rwlock_readers: 0, + __pthread_rwlock_type: PTHREAD_PROCESS_PRIVATE, + __pthread_rwlock_magic: _RWL_MAGIC, + __pthread_rwlock_mutex: PTHREAD_MUTEX_INITIALIZER, + __pthread_rwlock_readercv: PTHREAD_COND_INITIALIZER, + __pthread_rwlock_writercv: PTHREAD_COND_INITIALIZER, +}; +pub const PTHREAD_MUTEX_NORMAL: c_int = 0; +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 4; +pub const PTHREAD_MUTEX_DEFAULT: c_int = crate::PTHREAD_MUTEX_NORMAL; + +pub const RTLD_NEXT: *mut c_void = -1isize as *mut c_void; +pub const RTLD_DEFAULT: *mut c_void = -2isize as *mut c_void; +pub const RTLD_SELF: *mut c_void = -3isize as *mut c_void; +pub const RTLD_PROBE: *mut c_void = -4isize as *mut c_void; + +pub const RTLD_LAZY: c_int = 0x1; +pub const RTLD_NOW: c_int = 0x2; +pub const RTLD_NOLOAD: c_int = 0x4; +pub const RTLD_GLOBAL: c_int = 0x100; +pub const RTLD_LOCAL: c_int = 0x0; +pub const RTLD_PARENT: c_int = 0x200; +pub const RTLD_GROUP: c_int = 0x400; +pub const RTLD_WORLD: c_int = 0x800; +pub const RTLD_NODELETE: c_int = 0x1000; +pub const RTLD_FIRST: c_int = 0x2000; +pub const RTLD_CONFGEN: c_int = 0x10000; + +pub const PORT_SOURCE_AIO: c_int = 1; +pub const PORT_SOURCE_TIMER: c_int = 2; +pub const PORT_SOURCE_USER: c_int = 3; +pub const PORT_SOURCE_FD: c_int = 4; +pub const PORT_SOURCE_ALERT: c_int = 5; +pub const PORT_SOURCE_MQ: c_int = 6; +pub const PORT_SOURCE_FILE: c_int = 7; + +pub const NONROOT_USR: c_short = 2; + +pub const EMPTY: c_short = 0; +pub const RUN_LVL: c_short = 1; +pub const BOOT_TIME: c_short = 2; +pub const OLD_TIME: c_short = 3; +pub const NEW_TIME: c_short = 4; +pub const INIT_PROCESS: c_short = 5; +pub const LOGIN_PROCESS: c_short = 6; +pub const USER_PROCESS: c_short = 7; +pub const DEAD_PROCESS: c_short = 8; +pub const ACCOUNTING: c_short = 9; +pub const DOWN_TIME: c_short = 10; + +const _TIOC: c_int = ('T' as i32) << 8; +const tIOC: c_int = ('t' as i32) << 8; +pub const TCGETA: c_int = _TIOC | 1; +pub const TCSETA: c_int = _TIOC | 2; +pub const TCSETAW: c_int = _TIOC | 3; +pub const TCSETAF: c_int = _TIOC | 4; +pub const TCSBRK: c_int = _TIOC | 5; +pub const TCXONC: c_int = _TIOC | 6; +pub const TCFLSH: c_int = _TIOC | 7; +pub const TCDSET: c_int = _TIOC | 32; +pub const TCGETS: c_int = _TIOC | 13; +pub const TCSETS: c_int = _TIOC | 14; +pub const TCSANOW: c_int = _TIOC | 14; +pub const TCSETSW: c_int = _TIOC | 15; +pub const TCSADRAIN: c_int = _TIOC | 15; +pub const TCSETSF: c_int = _TIOC | 16; +pub const TCSAFLUSH: c_int = _TIOC | 16; +pub const TCIFLUSH: c_int = 0; +pub const TCOFLUSH: c_int = 1; +pub const TCIOFLUSH: c_int = 2; +pub const TCOOFF: c_int = 0; +pub const TCOON: c_int = 1; +pub const TCIOFF: c_int = 2; +pub const TCION: c_int = 3; +pub const TIOC: c_int = _TIOC; +pub const TIOCKBON: c_int = _TIOC | 8; +pub const TIOCKBOF: c_int = _TIOC | 9; +pub const TIOCGWINSZ: c_int = _TIOC | 104; +pub const TIOCSWINSZ: c_int = _TIOC | 103; +pub const TIOCGSOFTCAR: c_int = _TIOC | 105; +pub const TIOCSSOFTCAR: c_int = _TIOC | 106; +pub const TIOCGPPS: c_int = _TIOC | 125; +pub const TIOCSPPS: c_int = _TIOC | 126; +pub const TIOCGPPSEV: c_int = _TIOC | 127; +pub const TIOCGETD: c_int = tIOC | 0; +pub const TIOCSETD: c_int = tIOC | 1; +pub const TIOCHPCL: c_int = tIOC | 2; +pub const TIOCGETP: c_int = tIOC | 8; +pub const TIOCSETP: c_int = tIOC | 9; +pub const TIOCSETN: c_int = tIOC | 10; +pub const TIOCEXCL: c_int = tIOC | 13; +pub const TIOCNXCL: c_int = tIOC | 14; +pub const TIOCFLUSH: c_int = tIOC | 16; +pub const TIOCSETC: c_int = tIOC | 17; +pub const TIOCGETC: c_int = tIOC | 18; +pub const TIOCLBIS: c_int = tIOC | 127; +pub const TIOCLBIC: c_int = tIOC | 126; +pub const TIOCLSET: c_int = tIOC | 125; +pub const TIOCLGET: c_int = tIOC | 124; +pub const TIOCSBRK: c_int = tIOC | 123; +pub const TIOCCBRK: c_int = tIOC | 122; +pub const TIOCSDTR: c_int = tIOC | 121; +pub const TIOCCDTR: c_int = tIOC | 120; +pub const TIOCSLTC: c_int = tIOC | 117; +pub const TIOCGLTC: c_int = tIOC | 116; +pub const TIOCOUTQ: c_int = tIOC | 115; +pub const TIOCNOTTY: c_int = tIOC | 113; +pub const TIOCSCTTY: c_int = tIOC | 132; +pub const TIOCSTOP: c_int = tIOC | 111; +pub const TIOCSTART: c_int = tIOC | 110; +pub const TIOCSILOOP: c_int = tIOC | 109; +pub const TIOCCILOOP: c_int = tIOC | 108; +pub const TIOCGPGRP: c_int = tIOC | 20; +pub const TIOCSPGRP: c_int = tIOC | 21; +pub const TIOCGSID: c_int = tIOC | 22; +pub const TIOCSTI: c_int = tIOC | 23; +pub const TIOCMSET: c_int = tIOC | 26; +pub const TIOCMBIS: c_int = tIOC | 27; +pub const TIOCMBIC: c_int = tIOC | 28; +pub const TIOCMGET: c_int = tIOC | 29; +pub const TIOCREMOTE: c_int = tIOC | 30; +pub const TIOCSIGNAL: c_int = tIOC | 31; + +pub const TIOCM_LE: c_int = 0o0001; +pub const TIOCM_DTR: c_int = 0o0002; +pub const TIOCM_RTS: c_int = 0o0004; +pub const TIOCM_ST: c_int = 0o0010; +pub const TIOCM_SR: c_int = 0o0020; +pub const TIOCM_CTS: c_int = 0o0040; +pub const TIOCM_CAR: c_int = 0o0100; +pub const TIOCM_CD: c_int = TIOCM_CAR; +pub const TIOCM_RNG: c_int = 0o0200; +pub const TIOCM_RI: c_int = TIOCM_RNG; +pub const TIOCM_DSR: c_int = 0o0400; + +/* termios */ +pub const B0: speed_t = 0; +pub const B50: speed_t = 1; +pub const B75: speed_t = 2; +pub const B110: speed_t = 3; +pub const B134: speed_t = 4; +pub const B150: speed_t = 5; +pub const B200: speed_t = 6; +pub const B300: speed_t = 7; +pub const B600: speed_t = 8; +pub const B1200: speed_t = 9; +pub const B1800: speed_t = 10; +pub const B2400: speed_t = 11; +pub const B4800: speed_t = 12; +pub const B9600: speed_t = 13; +pub const B19200: speed_t = 14; +pub const B38400: speed_t = 15; +pub const B57600: speed_t = 16; +pub const B76800: speed_t = 17; +pub const B115200: speed_t = 18; +pub const B153600: speed_t = 19; +pub const B230400: speed_t = 20; +pub const B307200: speed_t = 21; +pub const B460800: speed_t = 22; +pub const B921600: speed_t = 23; +pub const CSTART: crate::tcflag_t = 0o21; +pub const CSTOP: crate::tcflag_t = 0o23; +pub const CSWTCH: crate::tcflag_t = 0o32; +pub const CBAUD: crate::tcflag_t = 0o17; +pub const CIBAUD: crate::tcflag_t = 0o3600000; +pub const CBAUDEXT: crate::tcflag_t = 0o10000000; +pub const CIBAUDEXT: crate::tcflag_t = 0o20000000; +pub const CSIZE: crate::tcflag_t = 0o000060; +pub const CS5: crate::tcflag_t = 0; +pub const CS6: crate::tcflag_t = 0o000020; +pub const CS7: crate::tcflag_t = 0o000040; +pub const CS8: crate::tcflag_t = 0o000060; +pub const CSTOPB: crate::tcflag_t = 0o000100; +pub const ECHO: crate::tcflag_t = 0o000010; +pub const ECHOE: crate::tcflag_t = 0o000020; +pub const ECHOK: crate::tcflag_t = 0o000040; +pub const ECHONL: crate::tcflag_t = 0o000100; +pub const ECHOCTL: crate::tcflag_t = 0o001000; +pub const ECHOPRT: crate::tcflag_t = 0o002000; +pub const ECHOKE: crate::tcflag_t = 0o004000; +pub const EXTPROC: crate::tcflag_t = 0o200000; +pub const IGNBRK: crate::tcflag_t = 0o000001; +pub const BRKINT: crate::tcflag_t = 0o000002; +pub const IGNPAR: crate::tcflag_t = 0o000004; +pub const PARMRK: crate::tcflag_t = 0o000010; +pub const INPCK: crate::tcflag_t = 0o000020; +pub const ISTRIP: crate::tcflag_t = 0o000040; +pub const INLCR: crate::tcflag_t = 0o000100; +pub const IGNCR: crate::tcflag_t = 0o000200; +pub const ICRNL: crate::tcflag_t = 0o000400; +pub const IUCLC: crate::tcflag_t = 0o001000; +pub const IXON: crate::tcflag_t = 0o002000; +pub const IXOFF: crate::tcflag_t = 0o010000; +pub const IXANY: crate::tcflag_t = 0o004000; +pub const IMAXBEL: crate::tcflag_t = 0o020000; +pub const DOSMODE: crate::tcflag_t = 0o100000; +pub const OPOST: crate::tcflag_t = 0o000001; +pub const OLCUC: crate::tcflag_t = 0o000002; +pub const ONLCR: crate::tcflag_t = 0o000004; +pub const OCRNL: crate::tcflag_t = 0o000010; +pub const ONOCR: crate::tcflag_t = 0o000020; +pub const ONLRET: crate::tcflag_t = 0o000040; +pub const OFILL: crate::tcflag_t = 0o0000100; +pub const OFDEL: crate::tcflag_t = 0o0000200; +pub const CREAD: crate::tcflag_t = 0o000200; +pub const PARENB: crate::tcflag_t = 0o000400; +pub const PARODD: crate::tcflag_t = 0o001000; +pub const HUPCL: crate::tcflag_t = 0o002000; +pub const CLOCAL: crate::tcflag_t = 0o004000; +pub const CRTSXOFF: crate::tcflag_t = 0o10000000000; +pub const CRTSCTS: crate::tcflag_t = 0o20000000000; +pub const ISIG: crate::tcflag_t = 0o000001; +pub const ICANON: crate::tcflag_t = 0o000002; +pub const IEXTEN: crate::tcflag_t = 0o100000; +pub const TOSTOP: crate::tcflag_t = 0o000400; +pub const FLUSHO: crate::tcflag_t = 0o020000; +pub const PENDIN: crate::tcflag_t = 0o040000; +pub const NOFLSH: crate::tcflag_t = 0o000200; +pub const VINTR: usize = 0; +pub const VQUIT: usize = 1; +pub const VERASE: usize = 2; +pub const VKILL: usize = 3; +pub const VEOF: usize = 4; +pub const VEOL: usize = 5; +pub const VEOL2: usize = 6; +pub const VMIN: usize = 4; +pub const VTIME: usize = 5; +pub const VSWTCH: usize = 7; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VSUSP: usize = 10; +pub const VDSUSP: usize = 11; +pub const VREPRINT: usize = 12; +pub const VDISCARD: usize = 13; +pub const VWERASE: usize = 14; +pub const VLNEXT: usize = 15; + +// +const STR: c_int = (b'S' as c_int) << 8; +pub const I_NREAD: c_int = STR | 0o1; +pub const I_PUSH: c_int = STR | 0o2; +pub const I_POP: c_int = STR | 0o3; +pub const I_LOOK: c_int = STR | 0o4; +pub const I_FLUSH: c_int = STR | 0o5; +pub const I_SRDOPT: c_int = STR | 0o6; +pub const I_GRDOPT: c_int = STR | 0o7; +pub const I_STR: c_int = STR | 0o10; +pub const I_SETSIG: c_int = STR | 0o11; +pub const I_GETSIG: c_int = STR | 0o12; +pub const I_FIND: c_int = STR | 0o13; +pub const I_LINK: c_int = STR | 0o14; +pub const I_UNLINK: c_int = STR | 0o15; +pub const I_PEEK: c_int = STR | 0o17; +pub const I_FDINSERT: c_int = STR | 0o20; +pub const I_SENDFD: c_int = STR | 0o21; +pub const I_RECVFD: c_int = STR | 0o16; +pub const I_SWROPT: c_int = STR | 0o23; +pub const I_GWROPT: c_int = STR | 0o24; +pub const I_LIST: c_int = STR | 0o25; +pub const I_PLINK: c_int = STR | 0o26; +pub const I_PUNLINK: c_int = STR | 0o27; +pub const I_ANCHOR: c_int = STR | 0o30; +pub const I_FLUSHBAND: c_int = STR | 0o34; +pub const I_CKBAND: c_int = STR | 0o35; +pub const I_GETBAND: c_int = STR | 0o36; +pub const I_ATMARK: c_int = STR | 0o37; +pub const I_SETCLTIME: c_int = STR | 0o40; +pub const I_GETCLTIME: c_int = STR | 0o41; +pub const I_CANPUT: c_int = STR | 0o42; +pub const I_SERROPT: c_int = STR | 0o43; +pub const I_GERROPT: c_int = STR | 0o44; +pub const I_ESETSIG: c_int = STR | 0o45; +pub const I_EGETSIG: c_int = STR | 0o46; +pub const __I_PUSH_NOCTTY: c_int = STR | 0o47; + +// 3SOCKET flags +pub const SOCK_CLOEXEC: c_int = 0x080000; +pub const SOCK_NONBLOCK: c_int = 0x100000; +pub const SOCK_NDELAY: c_int = 0x200000; + +// +pub const SCALE_KG: c_int = 1 << 6; +pub const SCALE_KF: c_int = 1 << 16; +pub const SCALE_KH: c_int = 1 << 2; +pub const MAXTC: c_int = 1 << 6; +pub const SCALE_PHASE: c_int = 1 << 22; +pub const SCALE_USEC: c_int = 1 << 16; +pub const SCALE_UPDATE: c_int = SCALE_KG * MAXTC; +pub const FINEUSEC: c_int = 1 << 22; +pub const MAXPHASE: c_int = 512000; +pub const MAXFREQ: c_int = 512 * SCALE_USEC; +pub const MAXTIME: c_int = 200 << PPS_AVG; +pub const MINSEC: c_int = 16; +pub const MAXSEC: c_int = 1200; +pub const PPS_AVG: c_int = 2; +pub const PPS_SHIFT: c_int = 2; +pub const PPS_SHIFTMAX: c_int = 8; +pub const PPS_VALID: c_int = 120; +pub const MAXGLITCH: c_int = 30; +pub const MOD_OFFSET: u32 = 0x0001; +pub const MOD_FREQUENCY: u32 = 0x0002; +pub const MOD_MAXERROR: u32 = 0x0004; +pub const MOD_ESTERROR: u32 = 0x0008; +pub const MOD_STATUS: u32 = 0x0010; +pub const MOD_TIMECONST: u32 = 0x0020; +pub const MOD_CLKB: u32 = 0x4000; +pub const MOD_CLKA: u32 = 0x8000; +pub const STA_PLL: u32 = 0x0001; +pub const STA_PPSFREQ: i32 = 0x0002; +pub const STA_PPSTIME: i32 = 0x0004; +pub const STA_FLL: i32 = 0x0008; +pub const STA_INS: i32 = 0x0010; +pub const STA_DEL: i32 = 0x0020; +pub const STA_UNSYNC: i32 = 0x0040; +pub const STA_FREQHOLD: i32 = 0x0080; +pub const STA_PPSSIGNAL: i32 = 0x0100; +pub const STA_PPSJITTER: i32 = 0x0200; +pub const STA_PPSWANDER: i32 = 0x0400; +pub const STA_PPSERROR: i32 = 0x0800; +pub const STA_CLOCKERR: i32 = 0x1000; +pub const STA_RONLY: i32 = + STA_PPSSIGNAL | STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR | STA_CLOCKERR; +pub const TIME_OK: i32 = 0; +pub const TIME_INS: i32 = 1; +pub const TIME_DEL: i32 = 2; +pub const TIME_OOP: i32 = 3; +pub const TIME_WAIT: i32 = 4; +pub const TIME_ERROR: i32 = 5; + +pub const PRIO_PROCESS: c_int = 0; +pub const PRIO_PGRP: c_int = 1; +pub const PRIO_USER: c_int = 2; + +pub const SCHED_OTHER: c_int = 0; +pub const SCHED_FIFO: c_int = 1; +pub const SCHED_RR: c_int = 2; +pub const SCHED_SYS: c_int = 3; +pub const SCHED_IA: c_int = 4; +pub const SCHED_FSS: c_int = 5; +pub const SCHED_FX: c_int = 6; + +// sys/priv.h +pub const PRIV_DEBUG: c_uint = 0x0001; +pub const PRIV_AWARE: c_uint = 0x0002; +pub const PRIV_AWARE_INHERIT: c_uint = 0x0004; +pub const __PROC_PROTECT: c_uint = 0x0008; +pub const NET_MAC_AWARE: c_uint = 0x0010; +pub const NET_MAC_AWARE_INHERIT: c_uint = 0x0020; +pub const PRIV_AWARE_RESET: c_uint = 0x0040; +pub const PRIV_XPOLICY: c_uint = 0x0080; +pub const PRIV_PFEXEC: c_uint = 0x0100; + +// sys/systeminfo.h +pub const SI_SYSNAME: c_int = 1; +pub const SI_HOSTNAME: c_int = 2; +pub const SI_RELEASE: c_int = 3; +pub const SI_VERSION: c_int = 4; +pub const SI_MACHINE: c_int = 5; +pub const SI_ARCHITECTURE: c_int = 6; +pub const SI_HW_SERIAL: c_int = 7; +pub const SI_HW_PROVIDER: c_int = 8; +pub const SI_SET_HOSTNAME: c_int = 258; +pub const SI_SET_SRPC_DOMAIN: c_int = 265; +pub const SI_PLATFORM: c_int = 513; +pub const SI_ISALIST: c_int = 514; +pub const SI_DHCP_CACHE: c_int = 515; +pub const SI_ARCHITECTURE_32: c_int = 516; +pub const SI_ARCHITECTURE_64: c_int = 517; +pub const SI_ARCHITECTURE_K: c_int = 518; +pub const SI_ARCHITECTURE_NATIVE: c_int = 519; + +// sys/lgrp_user.h +pub const LGRP_COOKIE_NONE: crate::lgrp_cookie_t = 0; +pub const LGRP_AFF_NONE: crate::lgrp_affinity_t = 0x0; +pub const LGRP_AFF_WEAK: crate::lgrp_affinity_t = 0x10; +pub const LGRP_AFF_STRONG: crate::lgrp_affinity_t = 0x100; +pub const LGRP_CONTENT_ALL: crate::lgrp_content_t = 0; +pub const LGRP_CONTENT_HIERARCHY: crate::lgrp_content_t = LGRP_CONTENT_ALL; +pub const LGRP_CONTENT_DIRECT: crate::lgrp_content_t = 1; +pub const LGRP_LAT_CPU_TO_MEM: crate::lgrp_lat_between_t = 0; +pub const LGRP_MEM_SZ_FREE: crate::lgrp_mem_size_flag_t = 0; +pub const LGRP_MEM_SZ_INSTALLED: crate::lgrp_mem_size_flag_t = 1; +pub const LGRP_VIEW_CALLER: crate::lgrp_view_t = 0; +pub const LGRP_VIEW_OS: crate::lgrp_view_t = 1; + +// sys/processor.h + +pub const P_OFFLINE: c_int = 0x001; +pub const P_ONLINE: c_int = 0x002; +pub const P_STATUS: c_int = 0x003; +pub const P_FAULTED: c_int = 0x004; +pub const P_POWEROFF: c_int = 0x005; +pub const P_NOINTR: c_int = 0x006; +pub const P_SPARE: c_int = 0x007; +pub const P_FORCED: c_int = 0x10000000; +pub const PI_TYPELEN: c_int = 16; +pub const PI_FPUTYPE: c_int = 32; + +// sys/auxv.h +pub const AT_SUN_HWCAP: c_uint = 2009; + +// As per sys/socket.h, header alignment must be 8 bytes on SPARC +// and 4 bytes everywhere else: +#[cfg(target_arch = "sparc64")] +const _CMSG_HDR_ALIGNMENT: usize = 8; +#[cfg(not(target_arch = "sparc64"))] +const _CMSG_HDR_ALIGNMENT: usize = 4; + +const _CMSG_DATA_ALIGNMENT: usize = size_of::(); + +const NEWDEV: c_int = 1; + +// sys/sendfile.h +pub const SFV_FD_SELF: c_int = -2; + +const fn _CMSG_HDR_ALIGN(p: usize) -> usize { + (p + _CMSG_HDR_ALIGNMENT - 1) & !(_CMSG_HDR_ALIGNMENT - 1) +} + +const fn _CMSG_DATA_ALIGN(p: usize) -> usize { + (p + _CMSG_DATA_ALIGNMENT - 1) & !(_CMSG_DATA_ALIGNMENT - 1) +} + +f! { + pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { + _CMSG_DATA_ALIGN(cmsg.offset(1) as usize) as *mut c_uchar + } + + pub const fn CMSG_LEN(length: c_uint) -> c_uint { + _CMSG_DATA_ALIGN(size_of::()) as c_uint + length + } + + pub fn CMSG_FIRSTHDR(mhdr: *const crate::msghdr) -> *mut cmsghdr { + if ((*mhdr).msg_controllen as usize) < size_of::() { + core::ptr::null_mut::() + } else { + (*mhdr).msg_control as *mut cmsghdr + } + } + + pub fn CMSG_NXTHDR(mhdr: *const crate::msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + if cmsg.is_null() { + return crate::CMSG_FIRSTHDR(mhdr); + } + let next = + _CMSG_HDR_ALIGN(cmsg as usize + (*cmsg).cmsg_len as usize + size_of::()); + let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; + if next > max { + core::ptr::null_mut::() + } else { + _CMSG_HDR_ALIGN(cmsg as usize + (*cmsg).cmsg_len as usize) as *mut cmsghdr + } + } + + pub const fn CMSG_SPACE(length: c_uint) -> c_uint { + _CMSG_HDR_ALIGN(size_of::() as usize + length as usize) as c_uint + } + + pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { + let bits = size_of_val(&(*set).fds_bits[0]) * 8; + let fd = fd as usize; + (*set).fds_bits[fd / bits] &= !(1 << (fd % bits)); + return; + } + + pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { + let bits = size_of_val(&(*set).fds_bits[0]) * 8; + let fd = fd as usize; + return ((*set).fds_bits[fd / bits] & (1 << (fd % bits))) != 0; + } + + pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { + let bits = size_of_val(&(*set).fds_bits[0]) * 8; + let fd = fd as usize; + (*set).fds_bits[fd / bits] |= 1 << (fd % bits); + return; + } + + pub fn FD_ZERO(set: *mut fd_set) -> () { + for slot in (*set).fds_bits.iter_mut() { + *slot = 0; + } + } +} + +safe_f! { + pub fn SIGRTMAX() -> c_int { + unsafe { crate::sysconf(_SC_SIGRT_MAX) as c_int } + } + + pub fn SIGRTMIN() -> c_int { + unsafe { crate::sysconf(_SC_SIGRT_MIN) as c_int } + } + + pub const fn WIFEXITED(status: c_int) -> bool { + (status & 0xFF) == 0 + } + + pub const fn WEXITSTATUS(status: c_int) -> c_int { + (status >> 8) & 0xFF + } + + pub const fn WTERMSIG(status: c_int) -> c_int { + status & 0x7F + } + + pub const fn WIFCONTINUED(status: c_int) -> bool { + (status & 0xffff) == 0xffff + } + + pub const fn WSTOPSIG(status: c_int) -> c_int { + (status & 0xff00) >> 8 + } + + pub const fn WIFSIGNALED(status: c_int) -> bool { + ((status & 0xff) > 0) && (status & 0xff00 == 0) + } + + pub const fn WIFSTOPPED(status: c_int) -> bool { + ((status & 0xff) == 0x7f) && ((status & 0xff00) != 0) + } + + pub const fn WCOREDUMP(status: c_int) -> bool { + (status & 0x80) != 0 + } + + pub const fn MR_GET_TYPE(flags: c_uint) -> c_uint { + flags & 0x0000ffff + } +} + +extern "C" { + pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; + pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; + + pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + + pub fn sem_destroy(sem: *mut sem_t) -> c_int; + pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; + + pub fn abs(i: c_int) -> c_int; + pub fn acct(filename: *const c_char) -> c_int; + pub fn dirfd(dirp: *mut crate::DIR) -> c_int; + pub fn labs(i: c_long) -> c_long; + pub fn rand() -> c_int; + pub fn srand(seed: c_uint); + pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; + pub fn getrandom(bbuf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; + + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; + pub fn settimeofday(tp: *const crate::timeval, tz: *const c_void) -> c_int; + pub fn getifaddrs(ifap: *mut *mut crate::ifaddrs) -> c_int; + pub fn freeifaddrs(ifa: *mut crate::ifaddrs); + + pub fn stack_getbounds(sp: *mut crate::stack_t) -> c_int; + pub fn getgrouplist( + name: *const c_char, + basegid: crate::gid_t, + groups: *mut crate::gid_t, + ngroups: *mut c_int, + ) -> c_int; + pub fn initgroups(name: *const c_char, basegid: crate::gid_t) -> c_int; + pub fn setgroups(ngroups: c_int, ptr: *const crate::gid_t) -> c_int; + pub fn ioctl(fildes: c_int, request: c_int, ...) -> c_int; + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn ___errno() -> *mut c_int; + pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + pub fn clock_nanosleep( + clk_id: crate::clockid_t, + flags: c_int, + rqtp: *const crate::timespec, + rmtp: *mut crate::timespec, + ) -> c_int; + pub fn clock_settime(clk_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; + pub fn getnameinfo( + sa: *const crate::sockaddr, + salen: crate::socklen_t, + host: *mut c_char, + hostlen: crate::socklen_t, + serv: *mut c_char, + servlen: crate::socklen_t, + flags: c_int, + ) -> c_int; + pub fn setpwent(); + pub fn endpwent(); + pub fn getpwent() -> *mut passwd; + pub fn fdatasync(fd: c_int) -> c_int; + pub fn nl_langinfo_l(item: crate::nl_item, locale: crate::locale_t) -> *mut c_char; + pub fn duplocale(base: crate::locale_t) -> crate::locale_t; + pub fn freelocale(loc: crate::locale_t); + pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; + pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; + pub fn getprogname() -> *const c_char; + pub fn setprogname(name: *const c_char); + pub fn getloadavg(loadavg: *mut c_double, nelem: c_int) -> c_int; + pub fn getpriority(which: c_int, who: c_int) -> c_int; + pub fn setpriority(which: c_int, who: c_int, prio: c_int) -> c_int; + + pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; + pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; + pub fn sethostname(name: *const c_char, len: c_int) -> c_int; + pub fn if_nameindex() -> *mut if_nameindex; + pub fn if_freenameindex(ptr: *mut if_nameindex); + pub fn pthread_create( + native: *mut crate::pthread_t, + attr: *const crate::pthread_attr_t, + f: extern "C" fn(*mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + pub fn pthread_attr_getstack( + attr: *const crate::pthread_attr_t, + stackaddr: *mut *mut c_void, + stacksize: *mut size_t, + ) -> c_int; + pub fn pthread_condattr_getclock( + attr: *const pthread_condattr_t, + clock_id: *mut clockid_t, + ) -> c_int; + pub fn pthread_condattr_setclock( + attr: *mut pthread_condattr_t, + clock_id: crate::clockid_t, + ) -> c_int; + pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; + pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; + pub fn pthread_mutex_timedlock( + lock: *mut pthread_mutex_t, + abstime: *const crate::timespec, + ) -> c_int; + pub fn pthread_getname_np(tid: crate::pthread_t, name: *mut c_char, len: size_t) -> c_int; + pub fn pthread_setname_np(tid: crate::pthread_t, name: *const c_char) -> c_int; + pub fn waitid( + idtype: idtype_t, + id: id_t, + infop: *mut crate::siginfo_t, + options: c_int, + ) -> c_int; + + #[cfg_attr(target_os = "illumos", link_name = "_glob_ext")] + pub fn glob( + pattern: *const c_char, + flags: c_int, + errfunc: Option c_int>, + pglob: *mut crate::glob_t, + ) -> c_int; + + #[cfg_attr(target_os = "illumos", link_name = "_globfree_ext")] + pub fn globfree(pglob: *mut crate::glob_t); + + pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; + pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + + pub fn posix_spawn( + pid: *mut crate::pid_t, + path: *const c_char, + file_actions: *const posix_spawn_file_actions_t, + attrp: *const posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + pub fn posix_spawnp( + pid: *mut crate::pid_t, + file: *const c_char, + file_actions: *const posix_spawn_file_actions_t, + attrp: *const posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> c_int; + + pub fn posix_spawn_file_actions_init(file_actions: *mut posix_spawn_file_actions_t) -> c_int; + pub fn posix_spawn_file_actions_destroy(file_actions: *mut posix_spawn_file_actions_t) + -> c_int; + pub fn posix_spawn_file_actions_addopen( + file_actions: *mut posix_spawn_file_actions_t, + fildes: c_int, + path: *const c_char, + oflag: c_int, + mode: mode_t, + ) -> c_int; + pub fn posix_spawn_file_actions_addclose( + file_actions: *mut posix_spawn_file_actions_t, + fildes: c_int, + ) -> c_int; + pub fn posix_spawn_file_actions_adddup2( + file_actions: *mut posix_spawn_file_actions_t, + fildes: c_int, + newfildes: c_int, + ) -> c_int; + pub fn posix_spawn_file_actions_addclosefrom_np( + file_actions: *mut posix_spawn_file_actions_t, + lowfiledes: c_int, + ) -> c_int; + pub fn posix_spawn_file_actions_addchdir( + file_actions: *mut posix_spawn_file_actions_t, + path: *const c_char, + ) -> c_int; + pub fn posix_spawn_file_actions_addchdir_np( + file_actions: *mut posix_spawn_file_actions_t, + path: *const c_char, + ) -> c_int; + pub fn posix_spawn_file_actions_addfchdir( + file_actions: *mut posix_spawn_file_actions_t, + fd: c_int, + ) -> c_int; + + pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; + pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; + pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; + pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, pgroup: crate::pid_t) -> c_int; + pub fn posix_spawnattr_getpgroup( + attr: *const posix_spawnattr_t, + _pgroup: *mut crate::pid_t, + ) -> c_int; + pub fn posix_spawnattr_setschedparam( + attr: *mut posix_spawnattr_t, + param: *const crate::sched_param, + ) -> c_int; + pub fn posix_spawnattr_getschedparam( + attr: *const posix_spawnattr_t, + param: *mut crate::sched_param, + ) -> c_int; + pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, policy: c_int) -> c_int; + pub fn posix_spawnattr_getschedpolicy( + attr: *const posix_spawnattr_t, + _policy: *mut c_int, + ) -> c_int; + pub fn posix_spawnattr_setsigdefault( + attr: *mut posix_spawnattr_t, + sigdefault: *const sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getsigdefault( + attr: *const posix_spawnattr_t, + sigdefault: *mut sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigignore_np( + attr: *mut posix_spawnattr_t, + sigignore: *const sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getsigignore_np( + attr: *const posix_spawnattr_t, + sigignore: *mut sigset_t, + ) -> c_int; + pub fn posix_spawnattr_setsigmask( + attr: *mut posix_spawnattr_t, + sigmask: *const sigset_t, + ) -> c_int; + pub fn posix_spawnattr_getsigmask( + attr: *const posix_spawnattr_t, + sigmask: *mut sigset_t, + ) -> c_int; + + pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; + + pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; + + pub fn shmdt(shmaddr: *const c_void) -> c_int; + + pub fn shmget(key: key_t, size: size_t, shmflg: c_int) -> c_int; + + pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; + pub fn shm_unlink(name: *const c_char) -> c_int; + + pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); + + pub fn telldir(dirp: *mut crate::DIR) -> c_long; + pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; + + pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; + + pub fn memalign(align: size_t, size: size_t) -> *mut c_void; + + pub fn recvfrom( + socket: c_int, + buf: *mut c_void, + len: size_t, + flags: c_int, + addr: *mut crate::sockaddr, + addrlen: *mut crate::socklen_t, + ) -> ssize_t; + pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; + pub fn futimesat(fd: c_int, path: *const c_char, times: *const crate::timeval) -> c_int; + pub fn futimens(dirfd: c_int, times: *const crate::timespec) -> c_int; + pub fn utimensat( + dirfd: c_int, + path: *const c_char, + times: *const crate::timespec, + flag: c_int, + ) -> c_int; + pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; + + #[link_name = "__xnet_bind"] + pub fn bind( + socket: c_int, + address: *const crate::sockaddr, + address_len: crate::socklen_t, + ) -> c_int; + + pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + + #[link_name = "__xnet_sendmsg"] + pub fn sendmsg(fd: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; + #[link_name = "__xnet_recvmsg"] + pub fn recvmsg(fd: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; + pub fn accept4( + fd: c_int, + address: *mut sockaddr, + address_len: *mut socklen_t, + flags: c_int, + ) -> c_int; + + pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> crate::mqd_t; + pub fn mq_close(mqd: crate::mqd_t) -> c_int; + pub fn mq_unlink(name: *const c_char) -> c_int; + pub fn mq_receive( + mqd: crate::mqd_t, + msg_ptr: *mut c_char, + msg_len: size_t, + msg_prio: *mut c_uint, + ) -> ssize_t; + pub fn mq_timedreceive( + mqd: crate::mqd_t, + msg_ptr: *mut c_char, + msg_len: size_t, + msg_prio: *mut c_uint, + abs_timeout: *const crate::timespec, + ) -> ssize_t; + pub fn mq_send( + mqd: crate::mqd_t, + msg_ptr: *const c_char, + msg_len: size_t, + msg_prio: c_uint, + ) -> c_int; + pub fn mq_timedsend( + mqd: crate::mqd_t, + msg_ptr: *const c_char, + msg_len: size_t, + msg_prio: c_uint, + abs_timeout: *const crate::timespec, + ) -> c_int; + pub fn mq_getattr(mqd: crate::mqd_t, attr: *mut crate::mq_attr) -> c_int; + pub fn mq_setattr( + mqd: crate::mqd_t, + newattr: *const crate::mq_attr, + oldattr: *mut crate::mq_attr, + ) -> c_int; + pub fn port_create() -> c_int; + pub fn port_associate( + port: c_int, + source: c_int, + object: crate::uintptr_t, + events: c_int, + user: *mut c_void, + ) -> c_int; + pub fn port_dissociate(port: c_int, source: c_int, object: crate::uintptr_t) -> c_int; + pub fn port_get(port: c_int, pe: *mut port_event, timeout: *mut crate::timespec) -> c_int; + pub fn port_getn( + port: c_int, + pe_list: *mut port_event, + max: c_uint, + nget: *mut c_uint, + timeout: *mut crate::timespec, + ) -> c_int; + pub fn port_send(port: c_int, events: c_int, user: *mut c_void) -> c_int; + pub fn port_sendn( + port_list: *mut c_int, + error_list: *mut c_int, + nent: c_uint, + events: c_int, + user: *mut c_void, + ) -> c_int; + #[cfg_attr( + any(target_os = "solaris", target_os = "illumos"), + link_name = "__posix_getgrgid_r" + )] + pub fn getgrgid_r( + gid: crate::gid_t, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; + pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; + pub fn sem_close(sem: *mut sem_t) -> c_int; + pub fn getdtablesize() -> c_int; + + #[cfg_attr( + any(target_os = "solaris", target_os = "illumos"), + link_name = "__posix_getgrnam_r" + )] + pub fn getgrnam_r( + name: *const c_char, + grp: *mut crate::group, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut crate::group, + ) -> c_int; + pub fn thr_self() -> crate::thread_t; + pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; + pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; + pub fn getgrnam(name: *const c_char) -> *mut crate::group; + #[cfg_attr(target_os = "solaris", link_name = "__pthread_kill_xpg7")] + pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; + pub fn sched_get_priority_min(policy: c_int) -> c_int; + pub fn sched_get_priority_max(policy: c_int) -> c_int; + pub fn sched_getparam(pid: crate::pid_t, param: *mut sched_param) -> c_int; + pub fn sched_setparam(pid: crate::pid_t, param: *const sched_param) -> c_int; + pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; + pub fn sched_setscheduler( + pid: crate::pid_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + pub fn sem_unlink(name: *const c_char) -> c_int; + pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; + #[cfg_attr( + any(target_os = "solaris", target_os = "illumos"), + link_name = "__posix_getpwnam_r" + )] + pub fn getpwnam_r( + name: *const c_char, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + #[cfg_attr( + any(target_os = "solaris", target_os = "illumos"), + link_name = "__posix_getpwuid_r" + )] + pub fn getpwuid_r( + uid: crate::uid_t, + pwd: *mut passwd, + buf: *mut c_char, + buflen: size_t, + result: *mut *mut passwd, + ) -> c_int; + #[cfg_attr( + any(target_os = "solaris", target_os = "illumos"), + link_name = "getpwent_r" + )] + fn native_getpwent_r(pwd: *mut passwd, buf: *mut c_char, buflen: c_int) -> *mut passwd; + #[cfg_attr( + any(target_os = "solaris", target_os = "illumos"), + link_name = "getgrent_r" + )] + fn native_getgrent_r( + grp: *mut crate::group, + buf: *mut c_char, + buflen: c_int, + ) -> *mut crate::group; + #[cfg_attr( + any(target_os = "solaris", target_os = "illumos"), + link_name = "__posix_sigwait" + )] + pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; + pub fn pthread_atfork( + prepare: Option, + parent: Option, + child: Option, + ) -> c_int; + pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; + pub fn setgrent(); + pub fn endgrent(); + pub fn getgrent() -> *mut crate::group; + pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; + + pub fn dup3(src: c_int, dst: c_int, flags: c_int) -> c_int; + pub fn uname(buf: *mut crate::utsname) -> c_int; + pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; + + pub fn makeutx(ux: *const utmpx) -> *mut utmpx; + pub fn modutx(ux: *const utmpx) -> *mut utmpx; + pub fn updwtmpx(file: *const c_char, ut: *mut utmpx); + pub fn utmpxname(file: *const c_char) -> c_int; + pub fn getutxent() -> *mut utmpx; + pub fn getutxid(ut: *const utmpx) -> *mut utmpx; + pub fn getutxline(ut: *const utmpx) -> *mut utmpx; + pub fn pututxline(ut: *const utmpx) -> *mut utmpx; + pub fn setutxent(); + pub fn endutxent(); + + pub fn endutent(); + pub fn getutent() -> *mut utmp; + pub fn getutid(u: *const utmp) -> *mut utmp; + pub fn getutline(u: *const utmp) -> *mut utmp; + pub fn pututline(u: *const utmp) -> *mut utmp; + pub fn setutent(); + pub fn utmpname(file: *const c_char) -> c_int; + + pub fn getutmp(ux: *const utmpx, u: *mut utmp); + pub fn getutmpx(u: *const utmp, ux: *mut utmpx); + pub fn updwtmp(file: *const c_char, u: *mut utmp); + + pub fn ntp_adjtime(buf: *mut timex) -> c_int; + pub fn ntp_gettime(buf: *mut ntptimeval) -> c_int; + + pub fn timer_create(clock_id: clockid_t, evp: *mut sigevent, timerid: *mut timer_t) -> c_int; + pub fn timer_delete(timerid: timer_t) -> c_int; + pub fn timer_getoverrun(timerid: timer_t) -> c_int; + pub fn timer_gettime(timerid: timer_t, value: *mut itimerspec) -> c_int; + pub fn timer_settime( + timerid: timer_t, + flags: c_int, + value: *const itimerspec, + ovalue: *mut itimerspec, + ) -> c_int; + + pub fn ucred_get(pid: crate::pid_t) -> *mut ucred_t; + pub fn getpeerucred(fd: c_int, ucred: *mut *mut ucred_t) -> c_int; + + pub fn ucred_free(ucred: *mut ucred_t); + + pub fn ucred_geteuid(ucred: *const ucred_t) -> crate::uid_t; + pub fn ucred_getruid(ucred: *const ucred_t) -> crate::uid_t; + pub fn ucred_getsuid(ucred: *const ucred_t) -> crate::uid_t; + pub fn ucred_getegid(ucred: *const ucred_t) -> crate::gid_t; + pub fn ucred_getrgid(ucred: *const ucred_t) -> crate::gid_t; + pub fn ucred_getsgid(ucred: *const ucred_t) -> crate::gid_t; + pub fn ucred_getgroups(ucred: *const ucred_t, groups: *mut *const crate::gid_t) -> c_int; + pub fn ucred_getpid(ucred: *const ucred_t) -> crate::pid_t; + pub fn ucred_getprojid(ucred: *const ucred_t) -> projid_t; + pub fn ucred_getzoneid(ucred: *const ucred_t) -> zoneid_t; + pub fn ucred_getpflags(ucred: *const ucred_t, flags: c_uint) -> c_uint; + + pub fn ucred_size() -> size_t; + + pub fn pset_create(newpset: *mut crate::psetid_t) -> c_int; + pub fn pset_destroy(pset: crate::psetid_t) -> c_int; + pub fn pset_assign( + pset: crate::psetid_t, + cpu: crate::processorid_t, + opset: *mut psetid_t, + ) -> c_int; + pub fn pset_info( + pset: crate::psetid_t, + tpe: *mut c_int, + numcpus: *mut c_uint, + cpulist: *mut processorid_t, + ) -> c_int; + pub fn pset_bind( + pset: crate::psetid_t, + idtype: crate::idtype_t, + id: crate::id_t, + opset: *mut psetid_t, + ) -> c_int; + pub fn pset_list(pset: *mut psetid_t, numpsets: *mut c_uint) -> c_int; + pub fn pset_setattr(pset: psetid_t, attr: c_uint) -> c_int; + pub fn pset_getattr(pset: psetid_t, attr: *mut c_uint) -> c_int; + pub fn processor_bind( + idtype: crate::idtype_t, + id: crate::id_t, + new_binding: crate::processorid_t, + old_binding: *mut processorid_t, + ) -> c_int; + pub fn p_online(processorid: crate::processorid_t, flag: c_int) -> c_int; + pub fn processor_info(processorid: crate::processorid_t, infop: *mut processor_info_t) + -> c_int; + + pub fn getexecname() -> *const c_char; + + pub fn gethostid() -> c_long; + + pub fn getpflags(flags: c_uint) -> c_uint; + pub fn setpflags(flags: c_uint, value: c_uint) -> c_int; + + pub fn sysinfo(command: c_int, buf: *mut c_char, count: c_long) -> c_int; + + pub fn faccessat(fd: c_int, path: *const c_char, amode: c_int, flag: c_int) -> c_int; + + // #include + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + pub fn dl_iterate_phdr( + callback: Option< + unsafe extern "C" fn(info: *mut dl_phdr_info, size: usize, data: *mut c_void) -> c_int, + >, + data: *mut c_void, + ) -> c_int; + pub fn getpagesize() -> c_int; + pub fn getpagesizes(pagesize: *mut size_t, nelem: c_int) -> c_int; + pub fn mmapobj( + fd: c_int, + flags: c_uint, + storage: *mut mmapobj_result_t, + elements: *mut c_uint, + arg: *mut c_void, + ) -> c_int; + pub fn meminfo( + inaddr: *const u64, + addr_count: c_int, + info_req: *const c_uint, + info_count: c_int, + outdata: *mut u64, + validity: *mut c_uint, + ) -> c_int; + + pub fn strsep(string: *mut *mut c_char, delim: *const c_char) -> *mut c_char; + + pub fn getisax(array: *mut u32, n: c_uint) -> c_uint; + + pub fn backtrace(buffer: *mut *mut c_void, size: c_int) -> c_int; + pub fn backtrace_symbols(buffer: *const *mut c_void, size: c_int) -> *mut *mut c_char; + pub fn backtrace_symbols_fd(buffer: *const *mut c_void, size: c_int, fd: c_int); + + pub fn getopt_long( + argc: c_int, + argv: *const *mut c_char, + optstring: *const c_char, + longopts: *const option, + longindex: *mut c_int, + ) -> c_int; + + pub fn sync(); + + pub fn aio_cancel(fd: c_int, aiocbp: *mut aiocb) -> c_int; + pub fn aio_error(aiocbp: *const aiocb) -> c_int; + pub fn aio_fsync(op: c_int, aiocbp: *mut aiocb) -> c_int; + pub fn aio_read(aiocbp: *mut aiocb) -> c_int; + pub fn aio_return(aiocbp: *mut aiocb) -> ssize_t; + pub fn aio_suspend( + aiocb_list: *const *const aiocb, + nitems: c_int, + timeout: *const crate::timespec, + ) -> c_int; + pub fn aio_waitn( + aiocb_list: *mut *mut aiocb, + nent: c_uint, + nwait: *mut c_uint, + timeout: *const crate::timespec, + ) -> c_int; + pub fn aio_write(aiocbp: *mut aiocb) -> c_int; + pub fn lio_listio( + mode: c_int, + aiocb_list: *const *mut aiocb, + nitems: c_int, + sevp: *mut sigevent, + ) -> c_int; + + pub fn __major(version: c_int, devnum: crate::dev_t) -> crate::major_t; + pub fn __minor(version: c_int, devnum: crate::dev_t) -> crate::minor_t; + pub fn __makedev( + version: c_int, + majdev: crate::major_t, + mindev: crate::minor_t, + ) -> crate::dev_t; + + pub fn arc4random() -> u32; + pub fn arc4random_buf(buf: *mut c_void, nbytes: size_t); + pub fn arc4random_uniform(upper_bound: u32) -> u32; + + pub fn secure_getenv(name: *const c_char) -> *mut c_char; + + #[cfg_attr(target_os = "solaris", link_name = "__strftime_xpg7")] + pub fn strftime( + s: *mut c_char, + maxsize: size_t, + format: *const c_char, + timeptr: *const crate::tm, + ) -> size_t; + pub fn strftime_l( + s: *mut c_char, + maxsize: size_t, + format: *const c_char, + timeptr: *const crate::tm, + loc: crate::locale_t, + ) -> size_t; +} + +#[link(name = "sendfile")] +extern "C" { + pub fn sendfile(out_fd: c_int, in_fd: c_int, off: *mut off_t, len: size_t) -> ssize_t; + pub fn sendfilev( + fildes: c_int, + vec: *const sendfilevec_t, + sfvcnt: c_int, + xferred: *mut size_t, + ) -> ssize_t; +} + +#[link(name = "lgrp")] +extern "C" { + pub fn lgrp_init(view: lgrp_view_t) -> lgrp_cookie_t; + pub fn lgrp_fini(cookie: lgrp_cookie_t) -> c_int; + pub fn lgrp_affinity_get( + idtype: crate::idtype_t, + id: crate::id_t, + lgrp: crate::lgrp_id_t, + ) -> crate::lgrp_affinity_t; + pub fn lgrp_affinity_set( + idtype: crate::idtype_t, + id: crate::id_t, + lgrp: crate::lgrp_id_t, + aff: lgrp_affinity_t, + ) -> c_int; + pub fn lgrp_cpus( + cookie: crate::lgrp_cookie_t, + lgrp: crate::lgrp_id_t, + cpuids: *mut crate::processorid_t, + count: c_uint, + content: crate::lgrp_content_t, + ) -> c_int; + pub fn lgrp_mem_size( + cookie: crate::lgrp_cookie_t, + lgrp: crate::lgrp_id_t, + tpe: crate::lgrp_mem_size_flag_t, + content: crate::lgrp_content_t, + ) -> crate::lgrp_mem_size_t; + pub fn lgrp_nlgrps(cookie: crate::lgrp_cookie_t) -> c_int; + pub fn lgrp_view(cookie: crate::lgrp_cookie_t) -> crate::lgrp_view_t; + pub fn lgrp_home(idtype: crate::idtype_t, id: crate::id_t) -> crate::lgrp_id_t; + pub fn lgrp_version(version: c_int) -> c_int; + pub fn lgrp_resources( + cookie: crate::lgrp_cookie_t, + lgrp: crate::lgrp_id_t, + lgrps: *mut crate::lgrp_id_t, + count: c_uint, + tpe: crate::lgrp_rsrc_t, + ) -> c_int; + pub fn lgrp_root(cookie: crate::lgrp_cookie_t) -> crate::lgrp_id_t; +} + +pub unsafe fn major(device: crate::dev_t) -> crate::major_t { + __major(NEWDEV, device) +} + +pub unsafe fn minor(device: crate::dev_t) -> crate::minor_t { + __minor(NEWDEV, device) +} + +pub unsafe fn makedev(maj: crate::major_t, min: crate::minor_t) -> crate::dev_t { + __makedev(NEWDEV, maj, min) +} + +mod compat; +pub use self::compat::*; + +cfg_if! { + if #[cfg(target_os = "illumos")] { + mod illumos; + pub use self::illumos::*; + } else if #[cfg(target_os = "solaris")] { + mod solaris; + pub use self::solaris::*; + } else { + // Unknown target_os + } +} + +cfg_if! { + if #[cfg(target_arch = "x86_64")] { + mod x86_64; + mod x86_common; + pub use self::x86_64::*; + pub use self::x86_common::*; + } else if #[cfg(target_arch = "x86")] { + mod x86; + mod x86_common; + pub use self::x86::*; + pub use self::x86_common::*; + } +} diff --git a/vendor/libc/src/unix/solarish/solaris.rs b/vendor/libc/src/unix/solarish/solaris.rs new file mode 100644 index 00000000000000..58b097a16269b9 --- /dev/null +++ b/vendor/libc/src/unix/solarish/solaris.rs @@ -0,0 +1,239 @@ +use crate::prelude::*; +use crate::{ + exit_status, off_t, termios, NET_MAC_AWARE, NET_MAC_AWARE_INHERIT, PRIV_AWARE_RESET, + PRIV_DEBUG, PRIV_PFEXEC, PRIV_XPOLICY, +}; + +pub type door_attr_t = c_uint; +pub type door_id_t = c_ulonglong; +pub type lgrp_affinity_t = c_uint; + +e! { + #[repr(u32)] + pub enum lgrp_rsrc_t { + LGRP_RSRC_CPU = 0, + LGRP_RSRC_MEM = 1, + LGRP_RSRC_TYPES = 2, + } +} + +s! { + pub struct aiocb { + pub aio_fildes: c_int, + pub aio_buf: *mut c_void, + pub aio_nbytes: size_t, + pub aio_offset: off_t, + pub aio_reqprio: c_int, + pub aio_sigevent: crate::sigevent, + pub aio_lio_opcode: c_int, + pub aio_resultp: crate::aio_result_t, + pub aio_state: c_char, + pub aio_returned: c_char, + pub aio__pad1: [c_char; 2], + pub aio_flags: c_int, + } + + pub struct shmid_ds { + pub shm_perm: crate::ipc_perm, + pub shm_segsz: size_t, + pub shm_flags: crate::uintptr_t, + pub shm_lkcnt: c_ushort, + pub shm_lpid: crate::pid_t, + pub shm_cpid: crate::pid_t, + pub shm_nattch: crate::shmatt_t, + pub shm_cnattch: c_ulong, + pub shm_atime: crate::time_t, + pub shm_dtime: crate::time_t, + pub shm_ctime: crate::time_t, + pub shm_amp: *mut c_void, + pub shm_gransize: u64, + pub shm_allocated: u64, + pub shm_pad4: [i64; 1], + } + + pub struct xrs_t { + pub xrs_id: c_ulong, + pub xrs_ptr: *mut c_char, + } +} + +s_no_extra_traits! { + #[repr(packed)] + pub struct door_desc_t__d_data__d_desc { + pub d_descriptor: c_int, + pub d_id: crate::door_id_t, + } + + pub union door_desc_t__d_data { + pub d_desc: door_desc_t__d_data__d_desc, + d_resv: [c_int; 5], /* Check out /usr/include/sys/door.h */ + } + + pub struct door_desc_t { + pub d_attributes: door_attr_t, + pub d_data: door_desc_t__d_data, + } + + pub struct door_arg_t { + pub data_ptr: *const c_char, + pub data_size: size_t, + pub desc_ptr: *const door_desc_t, + pub dec_num: c_uint, + pub rbuf: *const c_char, + pub rsize: size_t, + } + + pub struct utmpx { + pub ut_user: [c_char; _UTMP_USER_LEN], + pub ut_id: [c_char; _UTMP_ID_LEN], + pub ut_line: [c_char; _UTMP_LINE_LEN], + pub ut_pid: crate::pid_t, + pub ut_type: c_short, + pub ut_exit: exit_status, + pub ut_tv: crate::timeval, + pub ut_session: c_int, + pub pad: [c_int; 5], + pub ut_syslen: c_short, + pub ut_host: [c_char; 257], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for utmpx { + fn eq(&self, other: &utmpx) -> bool { + self.ut_type == other.ut_type + && self.ut_pid == other.ut_pid + && self.ut_user == other.ut_user + && self.ut_line == other.ut_line + && self.ut_id == other.ut_id + && self.ut_exit == other.ut_exit + && self.ut_session == other.ut_session + && self.ut_tv == other.ut_tv + && self.ut_syslen == other.ut_syslen + && self.pad == other.pad + && self + .ut_host + .iter() + .zip(other.ut_host.iter()) + .all(|(a, b)| a == b) + } + } + + impl Eq for utmpx {} + + impl hash::Hash for utmpx { + fn hash(&self, state: &mut H) { + self.ut_user.hash(state); + self.ut_type.hash(state); + self.ut_pid.hash(state); + self.ut_line.hash(state); + self.ut_id.hash(state); + self.ut_host.hash(state); + self.ut_exit.hash(state); + self.ut_session.hash(state); + self.ut_tv.hash(state); + self.ut_syslen.hash(state); + self.pad.hash(state); + } + } + } +} + +// FIXME(solaris): O_DIRECT and SIGINFO are NOT available on Solaris. +// But in past they were defined here and thus other crates expected them. +// Latest version v0.29.0 of Nix crate still expects this. Since last +// version of Nix crate is almost one year ago let's define these two +// temporarily before new Nix version is released. +pub const O_DIRECT: c_int = 0x2000000; +pub const SIGINFO: c_int = 41; + +pub const _UTMP_USER_LEN: usize = 32; +pub const _UTMP_LINE_LEN: usize = 32; +pub const _UTMP_ID_LEN: usize = 4; + +pub const PORT_SOURCE_POSTWAIT: c_int = 8; +pub const PORT_SOURCE_SIGNAL: c_int = 9; + +pub const AF_LOCAL: c_int = 1; // AF_UNIX +pub const AF_FILE: c_int = 1; // AF_UNIX + +pub const TCP_KEEPIDLE: c_int = 0x1d; +pub const TCP_KEEPINTVL: c_int = 0x1e; +pub const TCP_KEEPCNT: c_int = 0x1f; + +pub const F_DUPFD_CLOEXEC: c_int = 47; +pub const F_DUPFD_CLOFORK: c_int = 49; +pub const F_DUP2FD_CLOEXEC: c_int = 48; +pub const F_DUP2FD_CLOFORK: c_int = 50; + +pub const _PC_LAST: c_int = 102; + +pub const PRIV_PROC_SENSITIVE: c_uint = 0x0008; +pub const PRIV_PFEXEC_AUTH: c_uint = 0x0200; +pub const PRIV_PROC_TPD: c_uint = 0x0400; +pub const PRIV_TPD_UNSAFE: c_uint = 0x0800; +pub const PRIV_PROC_TPD_RESET: c_uint = 0x1000; +pub const PRIV_TPD_KILLABLE: c_uint = 0x2000; + +pub const POSIX_SPAWN_SETSID: c_short = 0x400; + +pub const PRIV_USER: c_uint = PRIV_DEBUG + | PRIV_PROC_SENSITIVE + | NET_MAC_AWARE + | NET_MAC_AWARE_INHERIT + | PRIV_XPOLICY + | PRIV_AWARE_RESET + | PRIV_PFEXEC + | PRIV_PFEXEC_AUTH + | PRIV_PROC_TPD + | PRIV_TPD_UNSAFE + | PRIV_TPD_KILLABLE + | PRIV_PROC_TPD_RESET; + +extern "C" { + // DIFF(main): changed to `*const *mut` in e77f551de9 + pub fn fexecve(fd: c_int, argv: *const *const c_char, envp: *const *const c_char) -> c_int; + + pub fn mincore(addr: *mut c_void, len: size_t, vec: *mut c_char) -> c_int; + + pub fn door_call(d: c_int, params: *mut door_arg_t) -> c_int; + pub fn door_return( + data_ptr: *mut c_char, + data_size: size_t, + desc_ptr: *mut door_desc_t, + num_desc: c_uint, + ) -> c_int; + pub fn door_create( + server_procedure: extern "C" fn( + cookie: *mut c_void, + argp: *mut c_char, + arg_size: size_t, + dp: *mut door_desc_t, + n_desc: c_uint, + ), + cookie: *mut c_void, + attributes: door_attr_t, + ) -> c_int; + + pub fn fattach(fildes: c_int, path: *const c_char) -> c_int; + + pub fn pthread_getattr_np(thread: crate::pthread_t, attr: *mut crate::pthread_attr_t) -> c_int; + + pub fn euidaccess(path: *const c_char, amode: c_int) -> c_int; + + pub fn openpty( + amain: *mut c_int, + asubord: *mut c_int, + name: *mut c_char, + termp: *mut termios, + winp: *mut crate::winsize, + ) -> c_int; + + pub fn forkpty( + amain: *mut c_int, + name: *mut c_char, + termp: *mut termios, + winp: *mut crate::winsize, + ) -> crate::pid_t; +} diff --git a/vendor/libc/src/unix/solarish/x86.rs b/vendor/libc/src/unix/solarish/x86.rs new file mode 100644 index 00000000000000..a37ed3d74e978c --- /dev/null +++ b/vendor/libc/src/unix/solarish/x86.rs @@ -0,0 +1,31 @@ +use crate::prelude::*; + +pub type Elf32_Addr = c_ulong; +pub type Elf32_Half = c_ushort; +pub type Elf32_Off = c_ulong; +pub type Elf32_Sword = c_long; +pub type Elf32_Word = c_ulong; +pub type Elf32_Lword = c_ulonglong; +pub type Elf32_Phdr = __c_anonymous_Elf32_Phdr; + +s! { + pub struct __c_anonymous_Elf32_Phdr { + pub p_type: Elf32_Word, + pub p_offset: Elf32_Off, + pub p_vaddr: Elf32_Addr, + pub p_paddr: Elf32_Addr, + pub p_filesz: Elf32_Word, + pub p_memsz: Elf32_Word, + pub p_flags: Elf32_Word, + pub p_align: Elf32_Word, + } + + pub struct dl_phdr_info { + pub dlpi_addr: Elf32_Addr, + pub dlpi_name: *const c_char, + pub dlpi_phdr: *const Elf32_Phdr, + pub dlpi_phnum: Elf32_Half, + pub dlpi_adds: c_ulonglong, + pub dlpi_subs: c_ulonglong, + } +} diff --git a/vendor/libc/src/unix/solarish/x86_64.rs b/vendor/libc/src/unix/solarish/x86_64.rs new file mode 100644 index 00000000000000..a45ca4b7d09761 --- /dev/null +++ b/vendor/libc/src/unix/solarish/x86_64.rs @@ -0,0 +1,170 @@ +use crate::prelude::*; + +cfg_if! { + if #[cfg(target_os = "solaris")] { + use crate::unix::solarish::solaris; + } +} + +pub type greg_t = c_long; + +pub type Elf64_Addr = c_ulong; +pub type Elf64_Half = c_ushort; +pub type Elf64_Off = c_ulong; +pub type Elf64_Sword = c_int; +pub type Elf64_Sxword = c_long; +pub type Elf64_Word = c_uint; +pub type Elf64_Xword = c_ulong; +pub type Elf64_Lword = c_ulong; +pub type Elf64_Phdr = __c_anonymous_Elf64_Phdr; + +s! { + pub struct __c_anonymous_fpchip_state { + pub cw: u16, + pub sw: u16, + pub fctw: u8, + pub __fx_rsvd: u8, + pub fop: u16, + pub rip: u64, + pub rdp: u64, + pub mxcsr: u32, + pub mxcsr_mask: u32, + pub st: [crate::upad128_t; 8], + pub xmm: [crate::upad128_t; 16], + pub __fx_ign: [crate::upad128_t; 6], + pub status: u32, + pub xstatus: u32, + } + + pub struct __c_anonymous_Elf64_Phdr { + pub p_type: crate::Elf64_Word, + pub p_flags: crate::Elf64_Word, + pub p_offset: crate::Elf64_Off, + pub p_vaddr: crate::Elf64_Addr, + pub p_paddr: crate::Elf64_Addr, + pub p_filesz: crate::Elf64_Xword, + pub p_memsz: crate::Elf64_Xword, + pub p_align: crate::Elf64_Xword, + } + + pub struct dl_phdr_info { + pub dlpi_addr: crate::Elf64_Addr, + pub dlpi_name: *const c_char, + pub dlpi_phdr: *const crate::Elf64_Phdr, + pub dlpi_phnum: crate::Elf64_Half, + pub dlpi_adds: c_ulonglong, + pub dlpi_subs: c_ulonglong, + #[cfg(target_os = "solaris")] + pub dlpi_tls_modid: c_ulong, + #[cfg(target_os = "solaris")] + pub dlpi_tls_data: *mut c_void, + } +} + +s_no_extra_traits! { + pub union __c_anonymous_fp_reg_set { + pub fpchip_state: __c_anonymous_fpchip_state, + pub f_fpregs: [[u32; 13]; 10], + } + + pub struct fpregset_t { + pub fp_reg_set: __c_anonymous_fp_reg_set, + } + + pub struct mcontext_t { + pub gregs: [crate::greg_t; 28], + pub fpregs: fpregset_t, + } + + pub struct ucontext_t { + pub uc_flags: c_ulong, + pub uc_link: *mut ucontext_t, + pub uc_sigmask: crate::sigset_t, + pub uc_stack: crate::stack_t, + pub uc_mcontext: mcontext_t, + #[cfg(target_os = "illumos")] + pub uc_brand_data: [*mut c_void; 3], + #[cfg(target_os = "illumos")] + pub uc_xsave: c_long, + #[cfg(target_os = "illumos")] + pub uc_filler: c_long, + #[cfg(target_os = "solaris")] + pub uc_xrs: solaris::xrs_t, + #[cfg(target_os = "solaris")] + pub uc_lwpid: c_uint, + #[cfg(target_os = "solaris")] + pub uc_filler: [c_long; 2], + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for __c_anonymous_fp_reg_set { + fn eq(&self, other: &__c_anonymous_fp_reg_set) -> bool { + unsafe { + self.fpchip_state == other.fpchip_state + || self + .f_fpregs + .iter() + .zip(other.f_fpregs.iter()) + .all(|(a, b)| a == b) + } + } + } + impl Eq for __c_anonymous_fp_reg_set {} + impl PartialEq for fpregset_t { + fn eq(&self, other: &fpregset_t) -> bool { + self.fp_reg_set == other.fp_reg_set + } + } + impl Eq for fpregset_t {} + impl PartialEq for mcontext_t { + fn eq(&self, other: &mcontext_t) -> bool { + self.gregs == other.gregs && self.fpregs == other.fpregs + } + } + impl Eq for mcontext_t {} + impl PartialEq for ucontext_t { + fn eq(&self, other: &ucontext_t) -> bool { + self.uc_flags == other.uc_flags + && self.uc_link == other.uc_link + && self.uc_sigmask == other.uc_sigmask + && self.uc_stack == other.uc_stack + && self.uc_mcontext == other.uc_mcontext + && self.uc_filler == other.uc_filler + } + } + impl Eq for ucontext_t {} + } +} + +// sys/regset.h + +pub const REG_GSBASE: c_int = 27; +pub const REG_FSBASE: c_int = 26; +pub const REG_DS: c_int = 25; +pub const REG_ES: c_int = 24; +pub const REG_GS: c_int = 23; +pub const REG_FS: c_int = 22; +pub const REG_SS: c_int = 21; +pub const REG_RSP: c_int = 20; +pub const REG_RFL: c_int = 19; +pub const REG_CS: c_int = 18; +pub const REG_RIP: c_int = 17; +pub const REG_ERR: c_int = 16; +pub const REG_TRAPNO: c_int = 15; +pub const REG_RAX: c_int = 14; +pub const REG_RCX: c_int = 13; +pub const REG_RDX: c_int = 12; +pub const REG_RBX: c_int = 11; +pub const REG_RBP: c_int = 10; +pub const REG_RSI: c_int = 9; +pub const REG_RDI: c_int = 8; +pub const REG_R8: c_int = 7; +pub const REG_R9: c_int = 6; +pub const REG_R10: c_int = 5; +pub const REG_R11: c_int = 4; +pub const REG_R12: c_int = 3; +pub const REG_R13: c_int = 2; +pub const REG_R14: c_int = 1; +pub const REG_R15: c_int = 0; diff --git a/vendor/libc/src/unix/solarish/x86_common.rs b/vendor/libc/src/unix/solarish/x86_common.rs new file mode 100644 index 00000000000000..e72a22a83b4178 --- /dev/null +++ b/vendor/libc/src/unix/solarish/x86_common.rs @@ -0,0 +1,69 @@ +// AT_SUN_HWCAP +pub const AV_386_FPU: u32 = 0x00001; +pub const AV_386_TSC: u32 = 0x00002; +pub const AV_386_CX8: u32 = 0x00004; +pub const AV_386_SEP: u32 = 0x00008; +pub const AV_386_AMD_SYSC: u32 = 0x00010; +pub const AV_386_CMOV: u32 = 0x00020; +pub const AV_386_MMX: u32 = 0x00040; +pub const AV_386_AMD_MMX: u32 = 0x00080; +pub const AV_386_AMD_3DNow: u32 = 0x00100; +pub const AV_386_AMD_3DNowx: u32 = 0x00200; +pub const AV_386_FXSR: u32 = 0x00400; +pub const AV_386_SSE: u32 = 0x00800; +pub const AV_386_SSE2: u32 = 0x01000; +pub const AV_386_CX16: u32 = 0x10000; +pub const AV_386_AHF: u32 = 0x20000; +pub const AV_386_TSCP: u32 = 0x40000; +pub const AV_386_AMD_SSE4A: u32 = 0x80000; +pub const AV_386_POPCNT: u32 = 0x100000; +pub const AV_386_AMD_LZCNT: u32 = 0x200000; +pub const AV_386_SSSE3: u32 = 0x400000; +pub const AV_386_SSE4_1: u32 = 0x800000; +pub const AV_386_SSE4_2: u32 = 0x1000000; +pub const AV_386_MOVBE: u32 = 0x2000000; +pub const AV_386_AES: u32 = 0x4000000; +pub const AV_386_PCLMULQDQ: u32 = 0x8000000; +pub const AV_386_XSAVE: u32 = 0x10000000; +pub const AV_386_AVX: u32 = 0x20000000; +cfg_if! { + if #[cfg(target_os = "illumos")] { + pub const AV_386_VMX: u32 = 0x40000000; + pub const AV_386_AMD_SVM: u32 = 0x80000000; + // AT_SUN_HWCAP2 + pub const AV_386_2_F16C: u32 = 0x00000001; + pub const AV_386_2_RDRAND: u32 = 0x00000002; + pub const AV_386_2_BMI1: u32 = 0x00000004; + pub const AV_386_2_BMI2: u32 = 0x00000008; + pub const AV_386_2_FMA: u32 = 0x00000010; + pub const AV_386_2_AVX2: u32 = 0x00000020; + pub const AV_386_2_ADX: u32 = 0x00000040; + pub const AV_386_2_RDSEED: u32 = 0x00000080; + pub const AV_386_2_AVX512F: u32 = 0x00000100; + pub const AV_386_2_AVX512DQ: u32 = 0x00000200; + pub const AV_386_2_AVX512IFMA: u32 = 0x00000400; + pub const AV_386_2_AVX512PF: u32 = 0x00000800; + pub const AV_386_2_AVX512ER: u32 = 0x00001000; + pub const AV_386_2_AVX512CD: u32 = 0x00002000; + pub const AV_386_2_AVX512BW: u32 = 0x00004000; + pub const AV_386_2_AVX512VL: u32 = 0x00008000; + pub const AV_386_2_AVX512VBMI: u32 = 0x00010000; + pub const AV_386_2_AVX512VPOPCDQ: u32 = 0x00020000; + pub const AV_386_2_AVX512_4NNIW: u32 = 0x00040000; + pub const AV_386_2_AVX512_4FMAPS: u32 = 0x00080000; + pub const AV_386_2_SHA: u32 = 0x00100000; + pub const AV_386_2_FSGSBASE: u32 = 0x00200000; + pub const AV_386_2_CLFLUSHOPT: u32 = 0x00400000; + pub const AV_386_2_CLWB: u32 = 0x00800000; + pub const AV_386_2_MONITORX: u32 = 0x01000000; + pub const AV_386_2_CLZERO: u32 = 0x02000000; + pub const AV_386_2_AVX512_VNNI: u32 = 0x04000000; + pub const AV_386_2_VPCLMULQDQ: u32 = 0x08000000; + pub const AV_386_2_VAES: u32 = 0x10000000; + // AT_SUN_FPTYPE + pub const AT_386_FPINFO_NONE: u32 = 0; + pub const AT_386_FPINFO_FXSAVE: u32 = 1; + pub const AT_386_FPINFO_XSAVE: u32 = 2; + pub const AT_386_FPINFO_XSAVE_AMD: u32 = 3; + } +} diff --git a/vendor/libc/src/vxworks/aarch64.rs b/vendor/libc/src/vxworks/aarch64.rs new file mode 100644 index 00000000000000..376783c8234baf --- /dev/null +++ b/vendor/libc/src/vxworks/aarch64.rs @@ -0,0 +1 @@ +pub type wchar_t = u32; diff --git a/vendor/libc/src/vxworks/arm.rs b/vendor/libc/src/vxworks/arm.rs new file mode 100644 index 00000000000000..376783c8234baf --- /dev/null +++ b/vendor/libc/src/vxworks/arm.rs @@ -0,0 +1 @@ +pub type wchar_t = u32; diff --git a/vendor/libc/src/vxworks/mod.rs b/vendor/libc/src/vxworks/mod.rs new file mode 100644 index 00000000000000..809640d1122216 --- /dev/null +++ b/vendor/libc/src/vxworks/mod.rs @@ -0,0 +1,2018 @@ +//! Interface to VxWorks C library + +use core::ptr::null_mut; + +use crate::prelude::*; + +#[derive(Debug)] +pub enum DIR {} +impl Copy for DIR {} +impl Clone for DIR { + fn clone(&self) -> DIR { + *self + } +} + +pub type intmax_t = i64; +pub type uintmax_t = u64; + +pub type uintptr_t = usize; +pub type intptr_t = isize; +pub type ptrdiff_t = isize; +pub type size_t = crate::uintptr_t; +pub type ssize_t = intptr_t; + +pub type pid_t = c_int; +pub type in_addr_t = u32; +pub type sighandler_t = size_t; +pub type cpuset_t = u32; + +pub type blkcnt_t = c_long; +pub type blksize_t = c_long; +pub type ino_t = c_ulong; + +pub type rlim_t = c_ulong; +pub type suseconds_t = c_long; +pub type time_t = c_longlong; + +pub type errno_t = c_int; + +pub type useconds_t = c_ulong; + +pub type socklen_t = c_uint; + +pub type pthread_t = c_ulong; + +pub type clockid_t = c_int; + +//defined for the structs +pub type dev_t = c_ulong; +pub type mode_t = c_int; +pub type nlink_t = c_ulong; +pub type uid_t = c_ushort; +pub type gid_t = c_ushort; +pub type sigset_t = c_ulonglong; +pub type key_t = c_long; + +pub type nfds_t = c_uint; +pub type stat64 = crate::stat; + +pub type pthread_key_t = c_ulong; + +// From b_off_t.h +pub type off_t = c_longlong; +pub type off64_t = off_t; + +// From b_BOOL.h +pub type BOOL = c_int; + +// From vxWind.h .. +pub type _Vx_OBJ_HANDLE = c_int; +pub type _Vx_TASK_ID = crate::_Vx_OBJ_HANDLE; +pub type _Vx_MSG_Q_ID = crate::_Vx_OBJ_HANDLE; +pub type _Vx_SEM_ID_KERNEL = crate::_Vx_OBJ_HANDLE; +pub type _Vx_RTP_ID = crate::_Vx_OBJ_HANDLE; +pub type _Vx_SD_ID = crate::_Vx_OBJ_HANDLE; +pub type _Vx_CONDVAR_ID = crate::_Vx_OBJ_HANDLE; +pub type _Vx_SEM_ID = *mut crate::_Vx_semaphore; +pub type OBJ_HANDLE = crate::_Vx_OBJ_HANDLE; +pub type TASK_ID = crate::OBJ_HANDLE; +pub type MSG_Q_ID = crate::OBJ_HANDLE; +pub type SEM_ID_KERNEL = crate::OBJ_HANDLE; +pub type RTP_ID = crate::OBJ_HANDLE; +pub type SD_ID = crate::OBJ_HANDLE; +pub type CONDVAR_ID = crate::OBJ_HANDLE; +pub type STATUS = crate::OBJ_HANDLE; + +// From vxTypes.h +pub type _Vx_usr_arg_t = isize; +pub type _Vx_exit_code_t = isize; +pub type _Vx_ticks_t = c_uint; +pub type _Vx_ticks64_t = c_ulonglong; + +pub type sa_family_t = c_uchar; + +// mqueue.h +pub type mqd_t = c_int; + +#[derive(Debug)] +pub enum _Vx_semaphore {} +impl Copy for _Vx_semaphore {} +impl Clone for _Vx_semaphore { + fn clone(&self) -> _Vx_semaphore { + *self + } +} + +impl siginfo_t { + pub unsafe fn si_addr(&self) -> *mut c_void { + self.si_addr + } + + pub unsafe fn si_value(&self) -> crate::sigval { + self.si_value + } + + pub unsafe fn si_pid(&self) -> crate::pid_t { + self.si_pid + } + + pub unsafe fn si_uid(&self) -> crate::uid_t { + self.si_uid + } + + pub unsafe fn si_status(&self) -> c_int { + self.si_status + } +} + +s! { + // b_pthread_condattr_t.h + pub struct pthread_condattr_t { + pub condAttrStatus: c_int, + pub condAttrPshared: c_int, + pub condAttrClockId: crate::clockid_t, + } + + // b_pthread_cond_t.h + pub struct pthread_cond_t { + pub condSemId: crate::_Vx_SEM_ID, + pub condValid: c_int, + pub condInitted: c_int, + pub condRefCount: c_int, + pub condMutex: *mut crate::pthread_mutex_t, + pub condAttr: crate::pthread_condattr_t, + pub condSemName: [c_char; _PTHREAD_SHARED_SEM_NAME_MAX], + } + + // b_pthread_rwlockattr_t.h + pub struct pthread_rwlockattr_t { + pub rwlockAttrStatus: c_int, + pub rwlockAttrPshared: c_int, + pub rwlockAttrMaxReaders: c_uint, + pub rwlockAttrConformOpt: c_uint, + } + + // b_pthread_rwlock_t.h + pub struct pthread_rwlock_t { + pub rwlockSemId: crate::_Vx_SEM_ID, + pub rwlockReadersRefCount: c_uint, + pub rwlockValid: c_int, + pub rwlockInitted: c_int, + pub rwlockAttr: crate::pthread_rwlockattr_t, + pub rwlockSemName: [c_char; _PTHREAD_SHARED_SEM_NAME_MAX], + } + + // b_struct_timeval.h + pub struct timeval { + pub tv_sec: crate::time_t, + pub tv_usec: crate::suseconds_t, + } + + // socket.h + pub struct linger { + pub l_onoff: c_int, + pub l_linger: c_int, + } + + pub struct sockaddr { + pub sa_len: c_uchar, + pub sa_family: sa_family_t, + pub sa_data: [c_char; 14], + } + + pub struct iovec { + pub iov_base: *mut c_void, + pub iov_len: size_t, + } + + pub struct msghdr { + pub msg_name: *mut c_void, + pub msg_namelen: socklen_t, + pub msg_iov: *mut iovec, + pub msg_iovlen: c_int, + pub msg_control: *mut c_void, + pub msg_controllen: socklen_t, + pub msg_flags: c_int, + } + + pub struct cmsghdr { + pub cmsg_len: socklen_t, + pub cmsg_level: c_int, + pub cmsg_type: c_int, + } + + // poll.h + pub struct pollfd { + pub fd: c_int, + pub events: c_short, + pub revents: c_short, + } + + // resource.h + pub struct rlimit { + pub rlim_cur: crate::rlim_t, + pub rlim_max: crate::rlim_t, + } + + // stat.h + pub struct stat { + pub st_dev: crate::dev_t, + pub st_ino: crate::ino_t, + pub st_mode: mode_t, + pub st_nlink: crate::nlink_t, + pub st_uid: crate::uid_t, + pub st_gid: crate::gid_t, + pub st_rdev: crate::dev_t, + pub st_size: off_t, + pub st_atime: crate::time_t, + pub st_mtime: crate::time_t, + pub st_ctime: crate::time_t, + pub st_blksize: crate::blksize_t, + pub st_blocks: crate::blkcnt_t, + pub st_attrib: c_uchar, + pub st_reserved1: c_int, + pub st_reserved2: c_int, + pub st_reserved3: c_int, + pub st_reserved4: c_int, + } + + //b_struct__Timespec.h + pub struct _Timespec { + pub tv_sec: crate::time_t, + pub tv_nsec: c_long, + } + + // b_struct__Sched_param.h + pub struct sched_param { + pub sched_priority: c_int, /* scheduling priority */ + pub sched_ss_low_priority: c_int, /* low scheduling priority */ + pub sched_ss_repl_period: crate::_Timespec, /* replenishment period */ + pub sched_ss_init_budget: crate::_Timespec, /* initial budget */ + pub sched_ss_max_repl: c_int, /* max pending replenishment */ + } + + // b_pthread_attr_t.h + pub struct pthread_attr_t { + pub threadAttrStatus: c_int, + pub threadAttrStacksize: size_t, + pub threadAttrStackaddr: *mut c_void, + pub threadAttrGuardsize: size_t, + pub threadAttrDetachstate: c_int, + pub threadAttrContentionscope: c_int, + pub threadAttrInheritsched: c_int, + pub threadAttrSchedpolicy: c_int, + pub threadAttrName: *mut c_char, + pub threadAttrOptions: c_int, + pub threadAttrSchedparam: crate::sched_param, + } + + // signal.h + + pub struct sigaction { + pub sa_u: crate::sa_u_t, + pub sa_mask: crate::sigset_t, + pub sa_flags: c_int, + } + + // b_stack_t.h + pub struct stack_t { + pub ss_sp: *mut c_void, + pub ss_size: size_t, + pub ss_flags: c_int, + } + + // signal.h + pub struct siginfo_t { + pub si_signo: c_int, + pub si_code: c_int, + pub si_value: crate::sigval, + pub si_errno: c_int, + pub si_status: c_int, + pub si_addr: *mut c_void, + pub si_uid: crate::uid_t, + pub si_pid: crate::pid_t, + } + + // pthread.h (krnl) + // b_pthread_mutexattr_t.h (usr) + pub struct pthread_mutexattr_t { + mutexAttrStatus: c_int, + mutexAttrPshared: c_int, + mutexAttrProtocol: c_int, + mutexAttrPrioceiling: c_int, + mutexAttrType: c_int, + } + + // pthread.h (krnl) + // b_pthread_mutex_t.h (usr) + pub struct pthread_mutex_t { + pub mutexSemId: crate::_Vx_SEM_ID, /*_Vx_SEM_ID ..*/ + pub mutexValid: c_int, + pub mutexInitted: c_int, + pub mutexCondRefCount: c_int, + pub mutexSavPriority: c_int, + pub mutexAttr: crate::pthread_mutexattr_t, + pub mutexSemName: [c_char; _PTHREAD_SHARED_SEM_NAME_MAX], + } + + // b_struct_timespec.h + pub struct timespec { + pub tv_sec: crate::time_t, + pub tv_nsec: c_long, + } + + // time.h + pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + } + + // in.h + pub struct in_addr { + pub s_addr: in_addr_t, + } + + // in.h + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + // in6.h + #[repr(align(4))] + pub struct in6_addr { + pub s6_addr: [u8; 16], + } + + // in6.h + pub struct ipv6_mreq { + pub ipv6mr_multiaddr: in6_addr, + pub ipv6mr_interface: c_uint, + } + + // netdb.h + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: size_t, + pub ai_canonname: *mut c_char, + pub ai_addr: *mut crate::sockaddr, + pub ai_next: *mut crate::addrinfo, + } + + // in.h + pub struct sockaddr_in { + pub sin_len: u8, + pub sin_family: u8, + pub sin_port: u16, + pub sin_addr: crate::in_addr, + pub sin_zero: [c_char; 8], + } + + // in6.h + pub struct sockaddr_in6 { + pub sin6_len: u8, + pub sin6_family: u8, + pub sin6_port: u16, + pub sin6_flowinfo: u32, + pub sin6_addr: crate::in6_addr, + pub sin6_scope_id: u32, + } + + pub struct Dl_info { + pub dli_fname: *const c_char, + pub dli_fbase: *mut c_void, + pub dli_sname: *const c_char, + pub dli_saddr: *mut c_void, + } + + pub struct mq_attr { + pub mq_maxmsg: c_long, + pub mq_msgsize: c_long, + pub mq_flags: c_long, + pub mq_curmsgs: c_long, + } +} + +s_no_extra_traits! { + // dirent.h + pub struct dirent { + pub d_ino: crate::ino_t, + pub d_name: [c_char; _PARM_NAME_MAX as usize + 1], + pub d_type: c_uchar, + } + + pub struct sockaddr_un { + pub sun_len: u8, + pub sun_family: sa_family_t, + pub sun_path: [c_char; 104], + } + + // rtpLibCommon.h + pub struct RTP_DESC { + pub status: c_int, + pub options: u32, + pub entrAddr: *mut c_void, + pub initTaskId: crate::TASK_ID, + pub parentId: crate::RTP_ID, + pub pathName: [c_char; VX_RTP_NAME_LENGTH as usize + 1], + pub taskCnt: c_int, + pub textStart: *mut c_void, + pub textEnd: *mut c_void, + } + // socket.h + pub struct sockaddr_storage { + pub ss_len: c_uchar, + pub ss_family: crate::sa_family_t, + pub __ss_pad1: [c_char; _SS_PAD1SIZE], + pub __ss_align: i32, + pub __ss_pad2: [c_char; _SS_PAD2SIZE], + } + + pub union sa_u_t { + pub sa_handler: Option !>, + pub sa_sigaction: + Option !>, + } + + pub union sigval { + pub sival_int: c_int, + pub sival_ptr: *mut c_void, + } +} + +cfg_if! { + if #[cfg(feature = "extra_traits")] { + impl PartialEq for sa_u_t { + fn eq(&self, other: &sa_u_t) -> bool { + unsafe { + let h1 = match self.sa_handler { + Some(handler) => handler as usize, + None => 0 as usize, + }; + let h2 = match other.sa_handler { + Some(handler) => handler as usize, + None => 0 as usize, + }; + h1 == h2 + } + } + } + impl Eq for sa_u_t {} + impl hash::Hash for sa_u_t { + fn hash(&self, state: &mut H) { + unsafe { + let h = match self.sa_handler { + Some(handler) => handler as usize, + None => 0 as usize, + }; + h.hash(state) + } + } + } + + impl PartialEq for sigval { + fn eq(&self, other: &sigval) -> bool { + unsafe { self.sival_ptr as usize == other.sival_ptr as usize } + } + } + impl Eq for sigval {} + impl hash::Hash for sigval { + fn hash(&self, state: &mut H) { + unsafe { (self.sival_ptr as usize).hash(state) }; + } + } + } +} + +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; + +pub const EXIT_SUCCESS: c_int = 0; +pub const EXIT_FAILURE: c_int = 1; + +pub const EAI_SERVICE: c_int = 9; +pub const EAI_SOCKTYPE: c_int = 10; +pub const EAI_SYSTEM: c_int = 11; + +// FIXME(vxworks): This is not defined in vxWorks, but we have to define it here +// to make the building pass for getrandom and std +pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); + +//Clock Lib Stuff +pub const CLOCK_REALTIME: c_int = 0x0; +pub const CLOCK_MONOTONIC: c_int = 0x1; +pub const CLOCK_PROCESS_CPUTIME_ID: c_int = 0x2; +pub const CLOCK_THREAD_CPUTIME_ID: c_int = 0x3; +pub const TIMER_ABSTIME: c_int = 0x1; +pub const TIMER_RELTIME: c_int = 0x0; + +// PTHREAD STUFF +pub const PTHREAD_INITIALIZED_OBJ: c_int = 0xF70990EF; +pub const PTHREAD_DESTROYED_OBJ: c_int = -1; +pub const PTHREAD_VALID_OBJ: c_int = 0xEC542A37; +pub const PTHREAD_INVALID_OBJ: c_int = -1; +pub const PTHREAD_UNUSED_YET_OBJ: c_int = -1; + +pub const PTHREAD_PRIO_NONE: c_int = 0; +pub const PTHREAD_PRIO_INHERIT: c_int = 1; +pub const PTHREAD_PRIO_PROTECT: c_int = 2; + +pub const PTHREAD_MUTEX_NORMAL: c_int = 0; +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 1; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 2; +pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; +pub const PTHREAD_STACK_MIN: usize = 4096; +pub const _PTHREAD_SHARED_SEM_NAME_MAX: usize = 30; + +//sched.h +pub const SCHED_FIFO: c_int = 0x01; +pub const SCHED_RR: c_int = 0x02; +pub const SCHED_OTHER: c_int = 0x04; +pub const SCHED_SPORADIC: c_int = 0x08; +pub const PRIO_PROCESS: c_uint = 0; +pub const SCHED_FIFO_HIGH_PRI: c_int = 255; +pub const SCHED_FIFO_LOW_PRI: c_int = 0; +pub const SCHED_RR_HIGH_PRI: c_int = 255; +pub const SCHED_RR_LOW_PRI: c_int = 0; +pub const SCHED_SPORADIC_HIGH_PRI: c_int = 255; +pub const SCHED_SPORADIC_LOW_PRI: c_int = 0; + +// ERRNO STUFF +pub const ERROR: c_int = -1; +pub const OK: c_int = 0; +pub const EPERM: c_int = 1; /* Not owner */ +pub const ENOENT: c_int = 2; /* No such file or directory */ +pub const ESRCH: c_int = 3; /* No such process */ +pub const EINTR: c_int = 4; /* Interrupted system call */ +pub const EIO: c_int = 5; /* I/O error */ +pub const ENXIO: c_int = 6; /* No such device or address */ +pub const E2BIG: c_int = 7; /* Arg list too long */ +pub const ENOEXEC: c_int = 8; /* Exec format error */ +pub const EBADF: c_int = 9; /* Bad file number */ +pub const ECHILD: c_int = 10; /* No children */ +pub const EAGAIN: c_int = 11; /* No more processes */ +pub const ENOMEM: c_int = 12; /* Not enough core */ +pub const EACCES: c_int = 13; /* Permission denied */ +pub const EFAULT: c_int = 14; +pub const ENOTEMPTY: c_int = 15; +pub const EBUSY: c_int = 16; +pub const EEXIST: c_int = 17; +pub const EXDEV: c_int = 18; +pub const ENODEV: c_int = 19; +pub const ENOTDIR: c_int = 20; +pub const EISDIR: c_int = 21; +pub const EINVAL: c_int = 22; +pub const ENFILE: c_int = 23; +pub const EMFILE: c_int = 24; +pub const ENOTTY: c_int = 25; +pub const ENAMETOOLONG: c_int = 26; +pub const EFBIG: c_int = 27; +pub const ENOSPC: c_int = 28; +pub const ESPIPE: c_int = 29; +pub const EROFS: c_int = 30; +pub const EMLINK: c_int = 31; +pub const EPIPE: c_int = 32; +pub const EDEADLK: c_int = 33; +pub const ENOLCK: c_int = 34; +pub const ENOTSUP: c_int = 35; +pub const EMSGSIZE: c_int = 36; +pub const EDOM: c_int = 37; +pub const ERANGE: c_int = 38; +pub const EDOOM: c_int = 39; +pub const EDESTADDRREQ: c_int = 40; +pub const EPROTOTYPE: c_int = 41; +pub const ENOPROTOOPT: c_int = 42; +pub const EPROTONOSUPPORT: c_int = 43; +pub const ESOCKTNOSUPPORT: c_int = 44; +pub const EOPNOTSUPP: c_int = 45; +pub const EPFNOSUPPORT: c_int = 46; +pub const EAFNOSUPPORT: c_int = 47; +pub const EADDRINUSE: c_int = 48; +pub const EADDRNOTAVAIL: c_int = 49; +pub const ENOTSOCK: c_int = 50; +pub const ENETUNREACH: c_int = 51; +pub const ENETRESET: c_int = 52; +pub const ECONNABORTED: c_int = 53; +pub const ECONNRESET: c_int = 54; +pub const ENOBUFS: c_int = 55; +pub const EISCONN: c_int = 56; +pub const ENOTCONN: c_int = 57; +pub const ESHUTDOWN: c_int = 58; +pub const ETOOMANYREFS: c_int = 59; +pub const ETIMEDOUT: c_int = 60; +pub const ECONNREFUSED: c_int = 61; +pub const ENETDOWN: c_int = 62; +pub const ETXTBSY: c_int = 63; +pub const ELOOP: c_int = 64; +pub const EHOSTUNREACH: c_int = 65; +pub const ENOTBLK: c_int = 66; +pub const EHOSTDOWN: c_int = 67; +pub const EINPROGRESS: c_int = 68; +pub const EALREADY: c_int = 69; +pub const EWOULDBLOCK: c_int = 70; +pub const ENOSYS: c_int = 71; +pub const ECANCELED: c_int = 72; +pub const ENOSR: c_int = 74; +pub const ENOSTR: c_int = 75; +pub const EPROTO: c_int = 76; +pub const EBADMSG: c_int = 77; +pub const ENODATA: c_int = 78; +pub const ETIME: c_int = 79; +pub const ENOMSG: c_int = 80; +pub const EFPOS: c_int = 81; +pub const EILSEQ: c_int = 82; +pub const EDQUOT: c_int = 83; +pub const EIDRM: c_int = 84; +pub const EOVERFLOW: c_int = 85; +pub const EMULTIHOP: c_int = 86; +pub const ENOLINK: c_int = 87; +pub const ESTALE: c_int = 88; +pub const EOWNERDEAD: c_int = 89; +pub const ENOTRECOVERABLE: c_int = 90; + +// NFS errnos: Refer to pkgs_v2/storage/fs/nfs/h/nfs/nfsCommon.h +const M_nfsStat: c_int = 48 << 16; +enum nfsstat { + NFSERR_REMOTE = 71, + NFSERR_WFLUSH = 99, + NFSERR_BADHANDLE = 10001, + NFSERR_NOT_SYNC = 10002, + NFSERR_BAD_COOKIE = 10003, + NFSERR_TOOSMALL = 10005, + NFSERR_BADTYPE = 10007, + NFSERR_JUKEBOX = 10008, +} + +pub const S_nfsLib_NFS_OK: c_int = OK; +pub const S_nfsLib_NFSERR_PERM: c_int = EPERM; +pub const S_nfsLib_NFSERR_NOENT: c_int = ENOENT; +pub const S_nfsLib_NFSERR_IO: c_int = EIO; +pub const S_nfsLib_NFSERR_NXIO: c_int = ENXIO; +pub const S_nfsLib_NFSERR_ACCESS: c_int = EACCES; +pub const S_nfsLib_NFSERR_EXIST: c_int = EEXIST; +pub const S_nfsLib_NFSERR_ENODEV: c_int = ENODEV; +pub const S_nfsLib_NFSERR_NOTDIR: c_int = ENOTDIR; +pub const S_nfsLib_NFSERR_ISDIR: c_int = EISDIR; +pub const S_nfsLib_NFSERR_INVAL: c_int = EINVAL; +pub const S_nfsLib_NFSERR_FBIG: c_int = EFBIG; +pub const S_nfsLib_NFSERR_NOSPC: c_int = ENOSPC; +pub const S_nfsLib_NFSERR_ROFS: c_int = EROFS; +pub const S_nfsLib_NFSERR_NAMETOOLONG: c_int = ENAMETOOLONG; +pub const S_nfsLib_NFSERR_NOTEMPTY: c_int = ENOTEMPTY; +pub const S_nfsLib_NFSERR_DQUOT: c_int = EDQUOT; +pub const S_nfsLib_NFSERR_STALE: c_int = ESTALE; +pub const S_nfsLib_NFSERR_WFLUSH: c_int = M_nfsStat | nfsstat::NFSERR_WFLUSH as c_int; +pub const S_nfsLib_NFSERR_REMOTE: c_int = M_nfsStat | nfsstat::NFSERR_REMOTE as c_int; +pub const S_nfsLib_NFSERR_BADHANDLE: c_int = M_nfsStat | nfsstat::NFSERR_BADHANDLE as c_int; +pub const S_nfsLib_NFSERR_NOT_SYNC: c_int = M_nfsStat | nfsstat::NFSERR_NOT_SYNC as c_int; +pub const S_nfsLib_NFSERR_BAD_COOKIE: c_int = M_nfsStat | nfsstat::NFSERR_BAD_COOKIE as c_int; +pub const S_nfsLib_NFSERR_NOTSUPP: c_int = EOPNOTSUPP; +pub const S_nfsLib_NFSERR_TOOSMALL: c_int = M_nfsStat | nfsstat::NFSERR_TOOSMALL as c_int; +pub const S_nfsLib_NFSERR_SERVERFAULT: c_int = EIO; +pub const S_nfsLib_NFSERR_BADTYPE: c_int = M_nfsStat | nfsstat::NFSERR_BADTYPE as c_int; +pub const S_nfsLib_NFSERR_JUKEBOX: c_int = M_nfsStat | nfsstat::NFSERR_JUKEBOX as c_int; + +// internal offset values for below constants +const taskErrorBase: c_int = 0x00030000; +const semErrorBase: c_int = 0x00160000; +const objErrorBase: c_int = 0x003d0000; + +// taskLibCommon.h +pub const S_taskLib_NAME_NOT_FOUND: c_int = taskErrorBase + 0x0065; +pub const S_taskLib_TASK_HOOK_TABLE_FULL: c_int = taskErrorBase + 0x0066; +pub const S_taskLib_TASK_HOOK_NOT_FOUND: c_int = taskErrorBase + 0x0067; +pub const S_taskLib_ILLEGAL_PRIORITY: c_int = taskErrorBase + 0x0068; + +// FIXME(vxworks): could also be useful for TASK_DESC type +pub const VX_TASK_NAME_LENGTH: c_int = 31; +pub const VX_TASK_RENAME_LENGTH: c_int = 16; + +// semLibCommon.h +pub const S_semLib_INVALID_STATE: c_int = semErrorBase + 0x0065; +pub const S_semLib_INVALID_OPTION: c_int = semErrorBase + 0x0066; +pub const S_semLib_INVALID_QUEUE_TYPE: c_int = semErrorBase + 0x0067; +pub const S_semLib_INVALID_OPERATION: c_int = semErrorBase + 0x0068; + +// objLibCommon.h +pub const S_objLib_OBJ_ID_ERROR: c_int = objErrorBase + 0x0001; +pub const S_objLib_OBJ_UNAVAILABLE: c_int = objErrorBase + 0x0002; +pub const S_objLib_OBJ_DELETED: c_int = objErrorBase + 0x0003; +pub const S_objLib_OBJ_TIMEOUT: c_int = objErrorBase + 0x0004; +pub const S_objLib_OBJ_NO_METHOD: c_int = objErrorBase + 0x0005; + +// in.h +pub const IPPROTO_IP: c_int = 0; +pub const IPPROTO_IPV6: c_int = 41; + +pub const IP_TTL: c_int = 4; +pub const IP_MULTICAST_IF: c_int = 9; +pub const IP_MULTICAST_TTL: c_int = 10; +pub const IP_MULTICAST_LOOP: c_int = 11; +pub const IP_ADD_MEMBERSHIP: c_int = 12; +pub const IP_DROP_MEMBERSHIP: c_int = 13; + +// in6.h +pub const IPV6_V6ONLY: c_int = 1; +pub const IPV6_UNICAST_HOPS: c_int = 4; +pub const IPV6_MULTICAST_IF: c_int = 9; +pub const IPV6_MULTICAST_HOPS: c_int = 10; +pub const IPV6_MULTICAST_LOOP: c_int = 11; +pub const IPV6_ADD_MEMBERSHIP: c_int = 12; +pub const IPV6_DROP_MEMBERSHIP: c_int = 13; + +// STAT Stuff +pub const S_IFMT: c_int = 0o17_0000; +pub const S_IFIFO: c_int = 0o1_0000; +pub const S_IFCHR: c_int = 0o2_0000; +pub const S_IFDIR: c_int = 0o4_0000; +pub const S_IFBLK: c_int = 0o6_0000; +pub const S_IFREG: c_int = 0o10_0000; +pub const S_IFLNK: c_int = 0o12_0000; +pub const S_IFSHM: c_int = 0o13_0000; +pub const S_IFSOCK: c_int = 0o14_0000; +pub const S_ISUID: c_int = 0o4000; +pub const S_ISGID: c_int = 0o2000; +pub const S_ISTXT: c_int = 0o1000; +pub const S_ISVTX: c_int = 0o1000; +pub const S_IRUSR: c_int = 0o0400; +pub const S_IWUSR: c_int = 0o0200; +pub const S_IXUSR: c_int = 0o0100; +pub const S_IRWXU: c_int = 0o0700; +pub const S_IRGRP: c_int = 0o0040; +pub const S_IWGRP: c_int = 0o0020; +pub const S_IXGRP: c_int = 0o0010; +pub const S_IRWXG: c_int = 0o0070; +pub const S_IROTH: c_int = 0o0004; +pub const S_IWOTH: c_int = 0o0002; +pub const S_IXOTH: c_int = 0o0001; +pub const S_IRWXO: c_int = 0o0007; + +pub const UTIME_OMIT: c_long = 0x3ffffffe; +pub const UTIME_NOW: c_long = 0x3fffffff; + +// socket.h +pub const SOL_SOCKET: c_int = 0xffff; +pub const SOMAXCONN: c_int = 128; + +pub const SO_DEBUG: c_int = 0x0001; +pub const SO_REUSEADDR: c_int = 0x0004; +pub const SO_KEEPALIVE: c_int = 0x0008; +pub const SO_DONTROUTE: c_int = 0x0010; +pub const SO_RCVLOWAT: c_int = 0x0012; +pub const SO_SNDLOWAT: c_int = 0x0013; +pub const SO_SNDTIMEO: c_int = 0x1005; +pub const SO_ACCEPTCONN: c_int = 0x001e; +pub const SO_BROADCAST: c_int = 0x0020; +pub const SO_USELOOPBACK: c_int = 0x0040; +pub const SO_LINGER: c_int = 0x0080; +pub const SO_REUSEPORT: c_int = 0x0200; + +pub const SO_VLAN: c_int = 0x8000; + +pub const SO_SNDBUF: c_int = 0x1001; +pub const SO_RCVBUF: c_int = 0x1002; +pub const SO_RCVTIMEO: c_int = 0x1006; +pub const SO_ERROR: c_int = 0x1007; +pub const SO_TYPE: c_int = 0x1008; +pub const SO_BINDTODEVICE: c_int = 0x1010; +pub const SO_OOBINLINE: c_int = 0x1011; +pub const SO_CONNTIMEO: c_int = 0x100a; + +pub const SOCK_STREAM: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SOCK_RAW: c_int = 3; +pub const SOCK_RDM: c_int = 4; +pub const SOCK_SEQPACKET: c_int = 5; +pub const SOCK_PACKET: c_int = 10; + +pub const _SS_MAXSIZE: usize = 128; +pub const _SS_ALIGNSIZE: usize = size_of::(); +pub const _SS_PAD1SIZE: usize = + _SS_ALIGNSIZE - size_of::() - size_of::(); +pub const _SS_PAD2SIZE: usize = _SS_MAXSIZE + - size_of::() + - size_of::() + - _SS_PAD1SIZE + - _SS_ALIGNSIZE; + +pub const MSG_OOB: c_int = 0x0001; +pub const MSG_PEEK: c_int = 0x0002; +pub const MSG_DONTROUTE: c_int = 0x0004; +pub const MSG_EOR: c_int = 0x0008; +pub const MSG_TRUNC: c_int = 0x0010; +pub const MSG_CTRUNC: c_int = 0x0020; +pub const MSG_WAITALL: c_int = 0x0040; +pub const MSG_DONTWAIT: c_int = 0x0080; +pub const MSG_EOF: c_int = 0x0100; +pub const MSG_EXP: c_int = 0x0200; +pub const MSG_MBUF: c_int = 0x0400; +pub const MSG_NOTIFICATION: c_int = 0x0800; +pub const MSG_COMPAT: c_int = 0x8000; + +pub const AF_UNSPEC: c_int = 0; +pub const AF_LOCAL: c_int = 1; +pub const AF_UNIX: c_int = AF_LOCAL; +pub const AF_INET: c_int = 2; +pub const AF_NETLINK: c_int = 16; +pub const AF_ROUTE: c_int = 17; +pub const AF_LINK: c_int = 18; +pub const AF_PACKET: c_int = 19; +pub const pseudo_AF_KEY: c_int = 27; +pub const AF_KEY: c_int = pseudo_AF_KEY; +pub const AF_INET6: c_int = 28; +pub const AF_SOCKDEV: c_int = 31; +pub const AF_TIPC: c_int = 33; +pub const AF_MIPC: c_int = 34; +pub const AF_MIPC_SAFE: c_int = 35; +pub const AF_MAX: c_int = 37; + +pub const SHUT_RD: c_int = 0; +pub const SHUT_WR: c_int = 1; +pub const SHUT_RDWR: c_int = 2; + +pub const IPPROTO_TCP: c_int = 6; +pub const TCP_NODELAY: c_int = 1; +pub const TCP_MAXSEG: c_int = 2; +pub const TCP_NOPUSH: c_int = 3; +pub const TCP_KEEPIDLE: c_int = 4; +pub const TCP_KEEPINTVL: c_int = 5; +pub const TCP_KEEPCNT: c_int = 6; + +// ioLib.h +pub const FIONREAD: c_int = 0x40040001; +pub const FIOFLUSH: c_int = 2; +pub const FIOOPTIONS: c_int = 3; +pub const FIOBAUDRATE: c_int = 4; +pub const FIODISKFORMAT: c_int = 5; +pub const FIODISKINIT: c_int = 6; +pub const FIOSEEK: c_int = 7; +pub const FIOWHERE: c_int = 8; +pub const FIODIRENTRY: c_int = 9; +pub const FIORENAME: c_int = 10; +pub const FIOREADYCHANGE: c_int = 11; +pub const FIODISKCHANGE: c_int = 13; +pub const FIOCANCEL: c_int = 14; +pub const FIOSQUEEZE: c_int = 15; +pub const FIOGETNAME: c_int = 18; +pub const FIONBIO: c_int = 0x90040010; + +// limits.h +pub const PATH_MAX: c_int = _PARM_PATH_MAX; +pub const _POSIX_PATH_MAX: c_int = 256; + +// Some poll stuff +pub const POLLIN: c_short = 0x0001; +pub const POLLPRI: c_short = 0x0002; +pub const POLLOUT: c_short = 0x0004; +pub const POLLRDNORM: c_short = 0x0040; +pub const POLLWRNORM: c_short = POLLOUT; +pub const POLLRDBAND: c_short = 0x0080; +pub const POLLWRBAND: c_short = 0x0100; +pub const POLLERR: c_short = 0x0008; +pub const POLLHUP: c_short = 0x0010; +pub const POLLNVAL: c_short = 0x0020; + +// fnctlcom.h +pub const FD_CLOEXEC: c_int = 1; +pub const F_DUPFD: c_int = 0; +pub const F_GETFD: c_int = 1; +pub const F_SETFD: c_int = 2; +pub const F_GETFL: c_int = 3; +pub const F_SETFL: c_int = 4; +pub const F_GETOWN: c_int = 5; +pub const F_SETOWN: c_int = 6; +pub const F_GETLK: c_int = 7; +pub const F_SETLK: c_int = 8; +pub const F_SETLKW: c_int = 9; +pub const F_DUPFD_CLOEXEC: c_int = 14; + +// signal.h +pub const SIG_DFL: sighandler_t = 0 as sighandler_t; +pub const SIG_IGN: sighandler_t = 1 as sighandler_t; +pub const SIG_ERR: sighandler_t = -1 as isize as sighandler_t; + +pub const SIGHUP: c_int = 1; +pub const SIGINT: c_int = 2; +pub const SIGQUIT: c_int = 3; +pub const SIGILL: c_int = 4; +pub const SIGTRAP: c_int = 5; +pub const SIGABRT: c_int = 6; +pub const SIGEMT: c_int = 7; +pub const SIGFPE: c_int = 8; +pub const SIGKILL: c_int = 9; +pub const SIGBUS: c_int = 10; +pub const SIGSEGV: c_int = 11; +pub const SIGFMT: c_int = 12; +pub const SIGPIPE: c_int = 13; +pub const SIGALRM: c_int = 14; +pub const SIGTERM: c_int = 15; +pub const SIGCNCL: c_int = 16; +pub const SIGSTOP: c_int = 17; +pub const SIGTSTP: c_int = 18; +pub const SIGCONT: c_int = 19; +pub const SIGCHLD: c_int = 20; +pub const SIGTTIN: c_int = 21; +pub const SIGTTOU: c_int = 22; +pub const SIGUSR1: c_int = 30; +pub const SIGUSR2: c_int = 31; +pub const SIGPOLL: c_int = 32; +pub const SIGPROF: c_int = 33; +pub const SIGSYS: c_int = 34; +pub const SIGURG: c_int = 35; +pub const SIGVTALRM: c_int = 36; +pub const SIGXCPU: c_int = 37; +pub const SIGXFSZ: c_int = 38; +pub const SIGRTMIN: c_int = 48; + +pub const SIGIO: c_int = SIGRTMIN; +pub const SIGWINCH: c_int = SIGRTMIN + 5; +pub const SIGLOST: c_int = SIGRTMIN + 6; + +pub const SIG_BLOCK: c_int = 1; +pub const SIG_UNBLOCK: c_int = 2; +pub const SIG_SETMASK: c_int = 3; + +pub const SA_NOCLDSTOP: c_int = 0x0001; +pub const SA_SIGINFO: c_int = 0x0002; +pub const SA_ONSTACK: c_int = 0x0004; +pub const SA_INTERRUPT: c_int = 0x0008; +pub const SA_RESETHAND: c_int = 0x0010; +pub const SA_RESTART: c_int = 0x0020; +pub const SA_NODEFER: c_int = 0x0040; +pub const SA_NOCLDWAIT: c_int = 0x0080; + +pub const SI_SYNC: c_int = 0; +pub const SI_USER: c_int = -1; +pub const SI_QUEUE: c_int = -2; +pub const SI_TIMER: c_int = -3; +pub const SI_ASYNCIO: c_int = -4; +pub const SI_MESGQ: c_int = -5; +pub const SI_CHILD: c_int = -6; +pub const SI_KILL: c_int = SI_USER; + +// vxParams.h definitions +pub const _PARM_NAME_MAX: c_int = 255; +pub const _PARM_PATH_MAX: c_int = 1024; + +// WAIT STUFF +pub const WNOHANG: c_int = 0x01; +pub const WUNTRACED: c_int = 0x02; + +const PTHREAD_MUTEXATTR_INITIALIZER: pthread_mutexattr_t = pthread_mutexattr_t { + mutexAttrStatus: PTHREAD_INITIALIZED_OBJ, + mutexAttrProtocol: PTHREAD_PRIO_NONE, + mutexAttrPrioceiling: 0, + mutexAttrType: PTHREAD_MUTEX_DEFAULT, + mutexAttrPshared: 1, +}; +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + mutexSemId: null_mut(), + mutexValid: PTHREAD_VALID_OBJ, + mutexInitted: PTHREAD_UNUSED_YET_OBJ, + mutexCondRefCount: 0, + mutexSavPriority: -1, + mutexAttr: PTHREAD_MUTEXATTR_INITIALIZER, + mutexSemName: [0; _PTHREAD_SHARED_SEM_NAME_MAX], +}; + +const PTHREAD_CONDATTR_INITIALIZER: pthread_condattr_t = pthread_condattr_t { + condAttrStatus: 0xf70990ef, + condAttrPshared: 1, + condAttrClockId: CLOCK_REALTIME, +}; +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + condSemId: null_mut(), + condValid: PTHREAD_VALID_OBJ, + condInitted: PTHREAD_UNUSED_YET_OBJ, + condRefCount: 0, + condMutex: null_mut(), + condAttr: PTHREAD_CONDATTR_INITIALIZER, + condSemName: [0; _PTHREAD_SHARED_SEM_NAME_MAX], +}; + +const PTHREAD_RWLOCKATTR_INITIALIZER: pthread_rwlockattr_t = pthread_rwlockattr_t { + rwlockAttrStatus: PTHREAD_INITIALIZED_OBJ, + rwlockAttrPshared: 1, + rwlockAttrMaxReaders: 0, + rwlockAttrConformOpt: 1, +}; +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + rwlockSemId: null_mut(), + rwlockReadersRefCount: 0, + rwlockValid: PTHREAD_VALID_OBJ, + rwlockInitted: PTHREAD_UNUSED_YET_OBJ, + rwlockAttr: PTHREAD_RWLOCKATTR_INITIALIZER, + rwlockSemName: [0; _PTHREAD_SHARED_SEM_NAME_MAX], +}; + +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; + +// rtpLibCommon.h +pub const VX_RTP_NAME_LENGTH: c_int = 255; +pub const RTP_ID_ERROR: crate::RTP_ID = -1; + +// h/public/unistd.h +pub const _SC_GETPW_R_SIZE_MAX: c_int = 21; // Via unistd.h +pub const _SC_PAGESIZE: c_int = 39; +pub const O_ACCMODE: c_int = 3; +pub const O_CLOEXEC: c_int = 0x100000; // fcntlcom +pub const O_EXCL: c_int = 0x0800; +pub const O_CREAT: c_int = 0x0200; +pub const O_TRUNC: c_int = 0x0400; +pub const O_APPEND: c_int = 0x0008; +pub const O_RDWR: c_int = 0x0002; +pub const O_WRONLY: c_int = 0x0001; +pub const O_RDONLY: c_int = 0; +pub const O_NONBLOCK: c_int = 0x4000; + +// mman.h +pub const PROT_NONE: c_int = 0x0000; +pub const PROT_READ: c_int = 0x0001; +pub const PROT_WRITE: c_int = 0x0002; +pub const PROT_EXEC: c_int = 0x0004; + +pub const MAP_SHARED: c_int = 0x0001; +pub const MAP_PRIVATE: c_int = 0x0002; +pub const MAP_ANON: c_int = 0x0004; +pub const MAP_ANONYMOUS: c_int = MAP_ANON; +pub const MAP_FIXED: c_int = 0x0010; +pub const MAP_CONTIG: c_int = 0x0020; + +pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; + +#[derive(Debug)] +pub enum FILE {} +impl Copy for FILE {} +impl Clone for FILE { + fn clone(&self) -> FILE { + *self + } +} +#[derive(Debug)] +pub enum fpos_t {} // FIXME(vxworks): fill this out with a struct +impl Copy for fpos_t {} +impl Clone for fpos_t { + fn clone(&self) -> fpos_t { + *self + } +} + +f! { + pub const fn CMSG_ALIGN(len: usize) -> usize { + len + size_of::() - 1 & !(size_of::() - 1) + } + + pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { + let next = cmsg as usize + + CMSG_ALIGN((*cmsg).cmsg_len as usize) + + CMSG_ALIGN(size_of::()); + let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; + if next <= max { + (cmsg as usize + CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr + } else { + core::ptr::null_mut::() + } + } + + pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr { + if (*mhdr).msg_controllen as usize > 0 { + (*mhdr).msg_control as *mut cmsghdr + } else { + core::ptr::null_mut::() + } + } + + pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { + (cmsg as *mut c_uchar).offset(CMSG_ALIGN(size_of::()) as isize) + } + + pub const fn CMSG_SPACE(length: c_uint) -> c_uint { + (CMSG_ALIGN(length as usize) + CMSG_ALIGN(size_of::())) as c_uint + } + + pub const fn CMSG_LEN(length: c_uint) -> c_uint { + CMSG_ALIGN(size_of::()) as c_uint + length + } +} + +extern "C" { + pub fn isalnum(c: c_int) -> c_int; + pub fn isalpha(c: c_int) -> c_int; + pub fn iscntrl(c: c_int) -> c_int; + pub fn isdigit(c: c_int) -> c_int; + pub fn isgraph(c: c_int) -> c_int; + pub fn islower(c: c_int) -> c_int; + pub fn isprint(c: c_int) -> c_int; + pub fn ispunct(c: c_int) -> c_int; + pub fn isspace(c: c_int) -> c_int; + pub fn isupper(c: c_int) -> c_int; + pub fn isxdigit(c: c_int) -> c_int; + pub fn isblank(c: c_int) -> c_int; + pub fn tolower(c: c_int) -> c_int; + pub fn toupper(c: c_int) -> c_int; + pub fn fopen(filename: *const c_char, mode: *const c_char) -> *mut FILE; + pub fn freopen(filename: *const c_char, mode: *const c_char, file: *mut FILE) -> *mut FILE; + pub fn fflush(file: *mut FILE) -> c_int; + pub fn fclose(file: *mut FILE) -> c_int; + pub fn remove(filename: *const c_char) -> c_int; + pub fn rename(oldname: *const c_char, newname: *const c_char) -> c_int; + pub fn tmpfile() -> *mut FILE; + pub fn setvbuf(stream: *mut FILE, buffer: *mut c_char, mode: c_int, size: size_t) -> c_int; + pub fn setbuf(stream: *mut FILE, buf: *mut c_char); + pub fn getchar() -> c_int; + pub fn putchar(c: c_int) -> c_int; + pub fn fgetc(stream: *mut FILE) -> c_int; + pub fn fgets(buf: *mut c_char, n: c_int, stream: *mut FILE) -> *mut c_char; + pub fn fputc(c: c_int, stream: *mut FILE) -> c_int; + pub fn fputs(s: *const c_char, stream: *mut FILE) -> c_int; + pub fn puts(s: *const c_char) -> c_int; + pub fn ungetc(c: c_int, stream: *mut FILE) -> c_int; + pub fn fread(ptr: *mut c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; + pub fn fwrite(ptr: *const c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; + pub fn fseek(stream: *mut FILE, offset: c_long, whence: c_int) -> c_int; + pub fn ftell(stream: *mut FILE) -> c_long; + pub fn rewind(stream: *mut FILE); + pub fn fgetpos(stream: *mut FILE, ptr: *mut fpos_t) -> c_int; + pub fn fsetpos(stream: *mut FILE, ptr: *const fpos_t) -> c_int; + pub fn feof(stream: *mut FILE) -> c_int; + pub fn ferror(stream: *mut FILE) -> c_int; + pub fn perror(s: *const c_char); + pub fn atof(s: *const c_char) -> c_double; + pub fn atoi(s: *const c_char) -> c_int; + pub fn atol(s: *const c_char) -> c_long; + pub fn atoll(s: *const c_char) -> c_longlong; + pub fn strtod(s: *const c_char, endp: *mut *mut c_char) -> c_double; + pub fn strtof(s: *const c_char, endp: *mut *mut c_char) -> c_float; + pub fn strtol(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_long; + pub fn strtoll(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_longlong; + pub fn strtoul(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulong; + pub fn strtoull(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulonglong; + pub fn calloc(nobj: size_t, size: size_t) -> *mut c_void; + pub fn malloc(size: size_t) -> *mut c_void; + pub fn realloc(p: *mut c_void, size: size_t) -> *mut c_void; + pub fn free(p: *mut c_void); + pub fn abort() -> !; + pub fn exit(status: c_int) -> !; + pub fn atexit(cb: extern "C" fn()) -> c_int; + pub fn system(s: *const c_char) -> c_int; + pub fn getenv(s: *const c_char) -> *mut c_char; + + pub fn strcpy(dst: *mut c_char, src: *const c_char) -> *mut c_char; + pub fn strncpy(dst: *mut c_char, src: *const c_char, n: size_t) -> *mut c_char; + pub fn strcat(s: *mut c_char, ct: *const c_char) -> *mut c_char; + pub fn strncat(s: *mut c_char, ct: *const c_char, n: size_t) -> *mut c_char; + pub fn strcmp(cs: *const c_char, ct: *const c_char) -> c_int; + pub fn strncmp(cs: *const c_char, ct: *const c_char, n: size_t) -> c_int; + pub fn strcoll(cs: *const c_char, ct: *const c_char) -> c_int; + pub fn strchr(cs: *const c_char, c: c_int) -> *mut c_char; + pub fn strrchr(cs: *const c_char, c: c_int) -> *mut c_char; + pub fn strspn(cs: *const c_char, ct: *const c_char) -> size_t; + pub fn strcspn(cs: *const c_char, ct: *const c_char) -> size_t; + pub fn strdup(cs: *const c_char) -> *mut c_char; + pub fn strpbrk(cs: *const c_char, ct: *const c_char) -> *mut c_char; + pub fn strstr(cs: *const c_char, ct: *const c_char) -> *mut c_char; + pub fn strcasecmp(s1: *const c_char, s2: *const c_char) -> c_int; + pub fn strncasecmp(s1: *const c_char, s2: *const c_char, n: size_t) -> c_int; + pub fn strlen(cs: *const c_char) -> size_t; + pub fn strerror(n: c_int) -> *mut c_char; + pub fn strtok(s: *mut c_char, t: *const c_char) -> *mut c_char; + pub fn strxfrm(s: *mut c_char, ct: *const c_char, n: size_t) -> size_t; + pub fn wcslen(buf: *const wchar_t) -> size_t; + pub fn wcstombs(dest: *mut c_char, src: *const wchar_t, n: size_t) -> size_t; + + pub fn memchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; + pub fn wmemchr(cx: *const wchar_t, c: wchar_t, n: size_t) -> *mut wchar_t; + pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; + pub fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; + pub fn memmove(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; + pub fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void; +} + +extern "C" { + pub fn fprintf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; + pub fn printf(format: *const c_char, ...) -> c_int; + pub fn snprintf(s: *mut c_char, n: size_t, format: *const c_char, ...) -> c_int; + pub fn sprintf(s: *mut c_char, format: *const c_char, ...) -> c_int; + pub fn fscanf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; + pub fn scanf(format: *const c_char, ...) -> c_int; + pub fn sscanf(s: *const c_char, format: *const c_char, ...) -> c_int; + pub fn getchar_unlocked() -> c_int; + pub fn putchar_unlocked(c: c_int) -> c_int; + pub fn stat(path: *const c_char, buf: *mut stat) -> c_int; + pub fn fdopen(fd: c_int, mode: *const c_char) -> *mut crate::FILE; + pub fn fileno(stream: *mut crate::FILE) -> c_int; + pub fn creat(path: *const c_char, mode: mode_t) -> c_int; + pub fn rewinddir(dirp: *mut crate::DIR); + pub fn fchown(fd: c_int, owner: crate::uid_t, group: crate::gid_t) -> c_int; + pub fn access(path: *const c_char, amode: c_int) -> c_int; + pub fn alarm(seconds: c_uint) -> c_uint; + pub fn fchdir(dirfd: c_int) -> c_int; + pub fn chown(path: *const c_char, uid: uid_t, gid: gid_t) -> c_int; + pub fn fpathconf(filedes: c_int, name: c_int) -> c_long; + pub fn getegid() -> gid_t; + pub fn geteuid() -> uid_t; + pub fn getgroups(ngroups_max: c_int, groups: *mut gid_t) -> c_int; + pub fn getlogin() -> *mut c_char; + pub fn getopt(argc: c_int, argv: *const *mut c_char, optstr: *const c_char) -> c_int; + pub fn pathconf(path: *const c_char, name: c_int) -> c_long; + pub fn pause() -> c_int; + pub fn seteuid(uid: uid_t) -> c_int; + pub fn setegid(gid: gid_t) -> c_int; + pub fn sleep(secs: c_uint) -> c_uint; + pub fn ttyname(fd: c_int) -> *mut c_char; + pub fn wait(status: *mut c_int) -> pid_t; + pub fn umask(mask: mode_t) -> mode_t; + pub fn mlock(addr: *const c_void, len: size_t) -> c_int; + pub fn mlockall(flags: c_int) -> c_int; + pub fn munlock(addr: *const c_void, len: size_t) -> c_int; + pub fn munlockall() -> c_int; + + pub fn mmap( + addr: *mut c_void, + len: size_t, + prot: c_int, + flags: c_int, + fd: c_int, + offset: off_t, + ) -> *mut c_void; + pub fn munmap(addr: *mut c_void, len: size_t) -> c_int; + + pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; + pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; + + pub fn truncate(path: *const c_char, length: off_t) -> c_int; + pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; + pub fn shm_unlink(name: *const c_char) -> c_int; + + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; + pub fn pthread_exit(value: *mut c_void) -> !; + pub fn pthread_attr_setdetachstate(attr: *mut crate::pthread_attr_t, state: c_int) -> c_int; + + pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + + pub fn sigaddset(set: *mut sigset_t, signum: c_int) -> c_int; + + pub fn sigaction(signum: c_int, act: *const sigaction, oldact: *mut sigaction) -> c_int; + + pub fn utimes(filename: *const c_char, times: *const crate::timeval) -> c_int; + + pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; + + #[link_name = "_rtld_dlopen"] + pub fn dlopen(filename: *const c_char, flag: c_int) -> *mut c_void; + + #[link_name = "_rtld_dlerror"] + pub fn dlerror() -> *mut c_char; + + #[link_name = "_rtld_dlsym"] + pub fn dlsym(handle: *mut c_void, symbol: *const c_char) -> *mut c_void; + + #[link_name = "_rtld_dlclose"] + pub fn dlclose(handle: *mut c_void) -> c_int; + + #[link_name = "_rtld_dladdr"] + pub fn dladdr(addr: *mut c_void, info: *mut Dl_info) -> c_int; + + // time.h + pub fn gmtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; + pub fn localtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; + pub fn mktime(tm: *mut tm) -> time_t; + pub fn time(time: *mut time_t) -> time_t; + pub fn gmtime(time_p: *const time_t) -> *mut tm; + pub fn localtime(time_p: *const time_t) -> *mut tm; + pub fn timegm(tm: *mut tm) -> time_t; + pub fn difftime(time1: time_t, time0: time_t) -> c_double; + pub fn gethostname(name: *mut c_char, len: size_t) -> c_int; + pub fn usleep(secs: crate::useconds_t) -> c_int; + pub fn putenv(string: *mut c_char) -> c_int; + pub fn setlocale(category: c_int, locale: *const c_char) -> *mut c_char; + + pub fn sigprocmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; + pub fn sigpending(set: *mut sigset_t) -> c_int; + + pub fn mkfifo(path: *const c_char, mode: mode_t) -> c_int; + + pub fn fseeko(stream: *mut crate::FILE, offset: off_t, whence: c_int) -> c_int; + pub fn ftello(stream: *mut crate::FILE) -> off_t; + pub fn mkstemp(template: *mut c_char) -> c_int; + + pub fn tmpnam(ptr: *mut c_char) -> *mut c_char; + + pub fn openlog(ident: *const c_char, logopt: c_int, facility: c_int); + pub fn closelog(); + pub fn setlogmask(maskpri: c_int) -> c_int; + pub fn syslog(priority: c_int, message: *const c_char, ...); + pub fn getline(lineptr: *mut *mut c_char, n: *mut size_t, stream: *mut FILE) -> ssize_t; + +} + +extern "C" { + // stdlib.h + pub fn memalign(block_size: size_t, size_arg: size_t) -> *mut c_void; + + // ioLib.h + pub fn getcwd(buf: *mut c_char, size: size_t) -> *mut c_char; + + // ioLib.h + pub fn chdir(attr: *const c_char) -> c_int; + + // pthread.h + pub fn pthread_mutexattr_init(attr: *mut pthread_mutexattr_t) -> c_int; + + // pthread.h + pub fn pthread_mutexattr_destroy(attr: *mut pthread_mutexattr_t) -> c_int; + + // pthread.h + pub fn pthread_mutexattr_settype(pAttr: *mut crate::pthread_mutexattr_t, pType: c_int) + -> c_int; + + // pthread.h + pub fn pthread_mutex_init( + mutex: *mut pthread_mutex_t, + attr: *const pthread_mutexattr_t, + ) -> c_int; + + // pthread.h + pub fn pthread_mutex_destroy(mutex: *mut pthread_mutex_t) -> c_int; + + // pthread.h + pub fn pthread_mutex_lock(mutex: *mut pthread_mutex_t) -> c_int; + + // pthread.h + pub fn pthread_mutex_trylock(mutex: *mut pthread_mutex_t) -> c_int; + + // pthread.h + pub fn pthread_mutex_timedlock(attr: *mut pthread_mutex_t, spec: *const timespec) -> c_int; + + // pthread.h + pub fn pthread_mutex_unlock(mutex: *mut pthread_mutex_t) -> c_int; + + // pthread.h + pub fn pthread_attr_setname(pAttr: *mut crate::pthread_attr_t, name: *mut c_char) -> c_int; + + // pthread.h + pub fn pthread_attr_setstacksize(attr: *mut crate::pthread_attr_t, stacksize: size_t) -> c_int; + + // pthread.h + pub fn pthread_attr_getstacksize( + attr: *const crate::pthread_attr_t, + size: *mut size_t, + ) -> c_int; + + // pthread.h + pub fn pthread_attr_init(attr: *mut crate::pthread_attr_t) -> c_int; + + // pthread.h + pub fn pthread_create( + pThread: *mut crate::pthread_t, + pAttr: *const crate::pthread_attr_t, + start_routine: extern "C" fn(*mut c_void) -> *mut c_void, + value: *mut c_void, + ) -> c_int; + + //pthread.h + pub fn pthread_setschedparam( + native: crate::pthread_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + + //pthread.h + pub fn pthread_getschedparam( + native: crate::pthread_t, + policy: *mut c_int, + param: *mut crate::sched_param, + ) -> c_int; + + //pthread.h + pub fn pthread_attr_setinheritsched( + attr: *mut crate::pthread_attr_t, + inheritsched: c_int, + ) -> c_int; + + //pthread.h + pub fn pthread_attr_setschedpolicy(attr: *mut crate::pthread_attr_t, policy: c_int) -> c_int; + + // pthread.h + pub fn pthread_attr_destroy(thread: *mut crate::pthread_attr_t) -> c_int; + + // pthread.h + pub fn pthread_detach(thread: crate::pthread_t) -> c_int; + + // int pthread_atfork (void (*)(void), void (*)(void), void (*)(void)); + pub fn pthread_atfork( + prepare: Option, + parent: Option, + child: Option, + ) -> c_int; + + // stat.h + pub fn fstat(fildes: c_int, buf: *mut stat) -> c_int; + + // stat.h + pub fn lstat(path: *const c_char, buf: *mut stat) -> c_int; + + // unistd.h + pub fn ftruncate(fd: c_int, length: off_t) -> c_int; + + // dirent.h + pub fn readdir_r( + pDir: *mut crate::DIR, + entry: *mut crate::dirent, + result: *mut *mut crate::dirent, + ) -> c_int; + + // dirent.h + pub fn readdir(pDir: *mut crate::DIR) -> *mut crate::dirent; + + // fcntl.h or + // ioLib.h + pub fn open(path: *const c_char, oflag: c_int, ...) -> c_int; + + // poll.h + pub fn poll(fds: *mut pollfd, nfds: nfds_t, timeout: c_int) -> c_int; + + // pthread.h + pub fn pthread_condattr_init(attr: *mut crate::pthread_condattr_t) -> c_int; + + // pthread.h + pub fn pthread_condattr_destroy(attr: *mut crate::pthread_condattr_t) -> c_int; + + // pthread.h + pub fn pthread_condattr_getclock( + pAttr: *const crate::pthread_condattr_t, + pClockId: *mut crate::clockid_t, + ) -> c_int; + + // pthread.h + pub fn pthread_condattr_setclock( + pAttr: *mut crate::pthread_condattr_t, + clockId: crate::clockid_t, + ) -> c_int; + + // pthread.h + pub fn pthread_cond_init( + cond: *mut crate::pthread_cond_t, + attr: *const crate::pthread_condattr_t, + ) -> c_int; + + // pthread.h + pub fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> c_int; + + // pthread.h + pub fn pthread_cond_signal(cond: *mut crate::pthread_cond_t) -> c_int; + + // pthread.h + pub fn pthread_cond_broadcast(cond: *mut crate::pthread_cond_t) -> c_int; + + // pthread.h + pub fn pthread_cond_wait( + cond: *mut crate::pthread_cond_t, + mutex: *mut crate::pthread_mutex_t, + ) -> c_int; + + // pthread.h + pub fn pthread_rwlockattr_init(attr: *mut crate::pthread_rwlockattr_t) -> c_int; + + // pthread.h + pub fn pthread_rwlockattr_destroy(attr: *mut crate::pthread_rwlockattr_t) -> c_int; + + // pthread.h + pub fn pthread_rwlockattr_setmaxreaders( + attr: *mut crate::pthread_rwlockattr_t, + attr2: c_uint, + ) -> c_int; + + // pthread.h + pub fn pthread_rwlock_init( + attr: *mut crate::pthread_rwlock_t, + host: *const crate::pthread_rwlockattr_t, + ) -> c_int; + + // pthread.h + pub fn pthread_rwlock_destroy(attr: *mut crate::pthread_rwlock_t) -> c_int; + + // pthread.h + pub fn pthread_rwlock_rdlock(attr: *mut crate::pthread_rwlock_t) -> c_int; + + // pthread.h + pub fn pthread_rwlock_tryrdlock(attr: *mut crate::pthread_rwlock_t) -> c_int; + + // pthread.h + pub fn pthread_rwlock_timedrdlock( + attr: *mut crate::pthread_rwlock_t, + host: *const crate::timespec, + ) -> c_int; + + // pthread.h + pub fn pthread_rwlock_wrlock(attr: *mut crate::pthread_rwlock_t) -> c_int; + + // pthread.h + pub fn pthread_rwlock_trywrlock(attr: *mut crate::pthread_rwlock_t) -> c_int; + + // pthread.h + pub fn pthread_rwlock_timedwrlock( + attr: *mut crate::pthread_rwlock_t, + host: *const crate::timespec, + ) -> c_int; + + // pthread.h + pub fn pthread_rwlock_unlock(attr: *mut crate::pthread_rwlock_t) -> c_int; + + // pthread.h + pub fn pthread_key_create( + key: *mut crate::pthread_key_t, + dtor: Option, + ) -> c_int; + + // pthread.h + pub fn pthread_key_delete(key: crate::pthread_key_t) -> c_int; + + // pthread.h + pub fn pthread_setspecific(key: crate::pthread_key_t, value: *const c_void) -> c_int; + + // pthread.h + pub fn pthread_getspecific(key: crate::pthread_key_t) -> *mut c_void; + + // pthread.h + pub fn pthread_cond_timedwait( + cond: *mut crate::pthread_cond_t, + mutex: *mut crate::pthread_mutex_t, + abstime: *const crate::timespec, + ) -> c_int; + + // pthread.h + pub fn pthread_attr_getname(attr: *mut crate::pthread_attr_t, name: *mut *mut c_char) -> c_int; + + // pthread.h + pub fn pthread_join(thread: crate::pthread_t, status: *mut *mut c_void) -> c_int; + + // pthread.h + pub fn pthread_self() -> crate::pthread_t; + + // clockLib.h + pub fn clock_gettime(clock_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; + + // clockLib.h + pub fn clock_settime(clock_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; + + // clockLib.h + pub fn clock_getres(clock_id: crate::clockid_t, res: *mut crate::timespec) -> c_int; + + // clockLib.h + pub fn clock_nanosleep( + clock_id: crate::clockid_t, + flags: c_int, + rqtp: *const crate::timespec, + rmtp: *mut crate::timespec, + ) -> c_int; + + // timerLib.h + pub fn nanosleep(rqtp: *const crate::timespec, rmtp: *mut crate::timespec) -> c_int; + + // socket.h + pub fn accept(s: c_int, addr: *mut crate::sockaddr, addrlen: *mut crate::socklen_t) -> c_int; + + // socket.h + pub fn bind(fd: c_int, addr: *const sockaddr, len: socklen_t) -> c_int; + + // socket.h + pub fn connect(s: c_int, name: *const crate::sockaddr, namelen: crate::socklen_t) -> c_int; + + // socket.h + pub fn getpeername( + s: c_int, + name: *mut crate::sockaddr, + namelen: *mut crate::socklen_t, + ) -> c_int; + + // socket.h + pub fn getsockname(socket: c_int, address: *mut sockaddr, address_len: *mut socklen_t) + -> c_int; + + // socket.h + pub fn getsockopt( + sockfd: c_int, + level: c_int, + optname: c_int, + optval: *mut c_void, + optlen: *mut crate::socklen_t, + ) -> c_int; + + // socket.h + pub fn listen(socket: c_int, backlog: c_int) -> c_int; + + // socket.h + pub fn recv(s: c_int, buf: *mut c_void, bufLen: size_t, flags: c_int) -> ssize_t; + + // socket.h + pub fn recvfrom( + s: c_int, + buf: *mut c_void, + bufLen: size_t, + flags: c_int, + from: *mut crate::sockaddr, + pFromLen: *mut crate::socklen_t, + ) -> ssize_t; + + pub fn recvmsg(socket: c_int, mp: *mut crate::msghdr, flags: c_int) -> ssize_t; + + // socket.h + pub fn send(socket: c_int, buf: *const c_void, len: size_t, flags: c_int) -> ssize_t; + + pub fn sendmsg(socket: c_int, mp: *const crate::msghdr, flags: c_int) -> ssize_t; + + // socket.h + pub fn sendto( + socket: c_int, + buf: *const c_void, + len: size_t, + flags: c_int, + addr: *const sockaddr, + addrlen: socklen_t, + ) -> ssize_t; + + // socket.h + pub fn setsockopt( + socket: c_int, + level: c_int, + name: c_int, + value: *const c_void, + option_len: socklen_t, + ) -> c_int; + + // socket.h + pub fn shutdown(s: c_int, how: c_int) -> c_int; + + // socket.h + pub fn socket(domain: c_int, _type: c_int, protocol: c_int) -> c_int; + + // icotl.h + pub fn ioctl(fd: c_int, request: c_int, ...) -> c_int; + + // fcntl.h + pub fn fcntl(fd: c_int, cmd: c_int, ...) -> c_int; + + // ntp_rfc2553.h for kernel + // netdb.h for user + pub fn gai_strerror(errcode: c_int) -> *mut c_char; + + // ioLib.h or + // unistd.h + pub fn close(fd: c_int) -> c_int; + + // ioLib.h or + // unistd.h + pub fn read(fd: c_int, buf: *mut c_void, count: size_t) -> ssize_t; + + // ioLib.h or + // unistd.h + pub fn write(fd: c_int, buf: *const c_void, count: size_t) -> ssize_t; + + // ioLib.h or + // unistd.h + pub fn isatty(fd: c_int) -> c_int; + + // ioLib.h or + // unistd.h + pub fn dup(src: c_int) -> c_int; + + // ioLib.h or + // unistd.h + pub fn dup2(src: c_int, dst: c_int) -> c_int; + + // ioLib.h or + // unistd.h + pub fn pipe(fds: *mut c_int) -> c_int; + + // ioLib.h or + // unistd.h + pub fn unlink(pathname: *const c_char) -> c_int; + + // unistd.h and + // ioLib.h + pub fn lseek(fd: c_int, offset: off_t, whence: c_int) -> off_t; + + // netdb.h + pub fn getaddrinfo( + node: *const c_char, + service: *const c_char, + hints: *const addrinfo, + res: *mut *mut addrinfo, + ) -> c_int; + + // netdb.h + pub fn freeaddrinfo(res: *mut addrinfo); + + // signal.h + pub fn signal(signum: c_int, handler: sighandler_t) -> sighandler_t; + + // unistd.h + pub fn getpid() -> pid_t; + + // unistd.h + pub fn getppid() -> pid_t; + + // wait.h + pub fn waitpid(pid: pid_t, status: *mut c_int, options: c_int) -> pid_t; + + // unistd.h + pub fn sysconf(attr: c_int) -> c_long; + + // stdlib.h + pub fn setenv( + // setenv.c + envVarName: *const c_char, + envVarValue: *const c_char, + overwrite: c_int, + ) -> c_int; + + // stdlib.h + pub fn unsetenv( + // setenv.c + envVarName: *const c_char, + ) -> c_int; + + // stdlib.h + pub fn realpath(fileName: *const c_char, resolvedName: *mut c_char) -> *mut c_char; + + // unistd.h + pub fn link(src: *const c_char, dst: *const c_char) -> c_int; + + // unistd.h + pub fn readlink(path: *const c_char, buf: *mut c_char, bufsize: size_t) -> ssize_t; + + // unistd.h + pub fn symlink(path1: *const c_char, path2: *const c_char) -> c_int; + + // dirent.h + pub fn opendir(name: *const c_char) -> *mut crate::DIR; + + // unistd.h + pub fn rmdir(path: *const c_char) -> c_int; + + // stat.h + pub fn mkdir(dirName: *const c_char, mode: mode_t) -> c_int; + + // stat.h + pub fn chmod(path: *const c_char, mode: mode_t) -> c_int; + + // stat.h + pub fn fchmod(attr1: c_int, attr2: mode_t) -> c_int; + + // unistd.h + pub fn fsync(fd: c_int) -> c_int; + + // dirent.h + pub fn closedir(ptr: *mut crate::DIR) -> c_int; + + //sched.h + pub fn sched_get_priority_max(policy: c_int) -> c_int; + + //sched.h + pub fn sched_get_priority_min(policy: c_int) -> c_int; + + //sched.h + pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; + + //sched.h + pub fn sched_getparam(pid: crate::pid_t, param: *mut crate::sched_param) -> c_int; + + //sched.h + pub fn sched_setscheduler( + pid: crate::pid_t, + policy: c_int, + param: *const crate::sched_param, + ) -> c_int; + + //sched.h + pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; + + //sched.h + pub fn sched_rr_get_interval(pid: crate::pid_t, tp: *mut crate::timespec) -> c_int; + + // sched.h + pub fn sched_yield() -> c_int; + + // errnoLib.h + pub fn errnoSet(err: c_int) -> c_int; + + // errnoLib.h + pub fn errnoGet() -> c_int; + + // unistd.h + pub fn _exit(status: c_int) -> !; + + // unistd.h + pub fn setgid(gid: crate::gid_t) -> c_int; + + // unistd.h + pub fn getgid() -> crate::gid_t; + + // unistd.h + pub fn setuid(uid: crate::uid_t) -> c_int; + + // unistd.h + pub fn getuid() -> crate::uid_t; + + // signal.h + pub fn sigemptyset(__set: *mut sigset_t) -> c_int; + + // pthread.h for kernel + // signal.h for user + pub fn pthread_sigmask(__how: c_int, __set: *const sigset_t, __oset: *mut sigset_t) -> c_int; + + // signal.h for user + pub fn kill(__pid: pid_t, __signo: c_int) -> c_int; + + // signal.h for user + pub fn sigqueue(__pid: pid_t, __signo: c_int, __value: crate::sigval) -> c_int; + + // signal.h for user + pub fn _sigqueue( + rtpId: crate::RTP_ID, + signo: c_int, + pValue: *const crate::sigval, + sigCode: c_int, + ) -> c_int; + + // signal.h + pub fn taskKill(taskId: crate::TASK_ID, signo: c_int) -> c_int; + + // signal.h + pub fn raise(__signo: c_int) -> c_int; + + // taskLibCommon.h + pub fn taskIdSelf() -> crate::TASK_ID; + pub fn taskDelay(ticks: crate::_Vx_ticks_t) -> c_int; + + // taskLib.h + pub fn taskNameSet(task_id: crate::TASK_ID, task_name: *mut c_char) -> c_int; + pub fn taskNameGet(task_id: crate::TASK_ID, buf_name: *mut c_char, bufsize: size_t) -> c_int; + + // rtpLibCommon.h + pub fn rtpInfoGet(rtpId: crate::RTP_ID, rtpStruct: *mut crate::RTP_DESC) -> c_int; + pub fn rtpSpawn( + pubrtpFileName: *const c_char, + argv: *mut *const c_char, + envp: *mut *const c_char, + priority: c_int, + uStackSize: size_t, + options: c_int, + taskOptions: c_int, + ) -> RTP_ID; + + // ioLib.h + pub fn _realpath(fileName: *const c_char, resolvedName: *mut c_char) -> *mut c_char; + + // pathLib.h + pub fn _pathIsAbsolute(filepath: *const c_char, pNameTail: *mut *const c_char) -> BOOL; + + pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + + // randomNumGen.h + pub fn randBytes(buf: *mut c_uchar, length: c_int) -> c_int; + pub fn randABytes(buf: *mut c_uchar, length: c_int) -> c_int; + pub fn randUBytes(buf: *mut c_uchar, length: c_int) -> c_int; + pub fn randSecure() -> c_int; + + // mqueue.h + pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> crate::mqd_t; + pub fn mq_close(mqd: crate::mqd_t) -> c_int; + pub fn mq_unlink(name: *const c_char) -> c_int; + pub fn mq_receive( + mqd: crate::mqd_t, + msg_ptr: *mut c_char, + msg_len: size_t, + msg_prio: *mut c_uint, + ) -> ssize_t; + pub fn mq_timedreceive( + mqd: crate::mqd_t, + msg_ptr: *mut c_char, + msg_len: size_t, + msg_prio: *mut c_uint, + abs_timeout: *const crate::timespec, + ) -> ssize_t; + pub fn mq_send( + mqd: crate::mqd_t, + msg_ptr: *const c_char, + msg_len: size_t, + msg_prio: c_uint, + ) -> c_int; + pub fn mq_timedsend( + mqd: crate::mqd_t, + msg_ptr: *const c_char, + msg_len: size_t, + msg_prio: c_uint, + abs_timeout: *const crate::timespec, + ) -> c_int; + pub fn mq_getattr(mqd: crate::mqd_t, attr: *mut crate::mq_attr) -> c_int; + pub fn mq_setattr( + mqd: crate::mqd_t, + newattr: *const crate::mq_attr, + oldattr: *mut crate::mq_attr, + ) -> c_int; + + // vxCpuLib.h + pub fn vxCpuEnabledGet() -> crate::cpuset_t; // Get set of running CPU's in the system + pub fn vxCpuConfiguredGet() -> crate::cpuset_t; // Get set of Configured CPU's in the system +} + +//Dummy functions, these don't really exist in VxWorks. + +// wait.h macros +safe_f! { + pub const fn WIFEXITED(status: c_int) -> bool { + (status & 0xFF00) == 0 + } + pub const fn WIFSIGNALED(status: c_int) -> bool { + (status & 0xFF00) != 0 + } + pub const fn WIFSTOPPED(status: c_int) -> bool { + (status & 0xFF0000) != 0 + } + pub const fn WEXITSTATUS(status: c_int) -> c_int { + status & 0xFF + } + pub const fn WTERMSIG(status: c_int) -> c_int { + (status >> 8) & 0xFF + } + pub const fn WSTOPSIG(status: c_int) -> c_int { + (status >> 16) & 0xFF + } +} + +pub fn pread(_fd: c_int, _buf: *mut c_void, _count: size_t, _offset: off64_t) -> ssize_t { + -1 +} + +pub fn pwrite(_fd: c_int, _buf: *const c_void, _count: size_t, _offset: off64_t) -> ssize_t { + -1 +} +pub fn posix_memalign(memptr: *mut *mut c_void, align: size_t, size: size_t) -> c_int { + // check to see if align is a power of 2 and if align is a multiple + // of sizeof(void *) + if (align & align - 1 != 0) || (align as usize % size_of::() != 0) { + return crate::EINVAL; + } + + unsafe { + // posix_memalign should not set errno + let e = crate::errnoGet(); + + let temp = memalign(align, size); + crate::errnoSet(e as c_int); + + if temp.is_null() { + crate::ENOMEM + } else { + *memptr = temp; + 0 + } + } +} + +cfg_if! { + if #[cfg(target_arch = "aarch64")] { + mod aarch64; + pub use self::aarch64::*; + } else if #[cfg(target_arch = "arm")] { + mod arm; + pub use self::arm::*; + } else if #[cfg(target_arch = "x86")] { + mod x86; + pub use self::x86::*; + } else if #[cfg(target_arch = "x86_64")] { + mod x86_64; + pub use self::x86_64::*; + } else if #[cfg(target_arch = "powerpc")] { + mod powerpc; + pub use self::powerpc::*; + } else if #[cfg(target_arch = "powerpc64")] { + mod powerpc64; + pub use self::powerpc64::*; + } else if #[cfg(target_arch = "riscv32")] { + mod riscv32; + pub use self::riscv32::*; + } else if #[cfg(target_arch = "riscv64")] { + mod riscv64; + pub use self::riscv64::*; + } else { + // Unknown target_arch + } +} diff --git a/vendor/libc/src/vxworks/powerpc.rs b/vendor/libc/src/vxworks/powerpc.rs new file mode 100644 index 00000000000000..376783c8234baf --- /dev/null +++ b/vendor/libc/src/vxworks/powerpc.rs @@ -0,0 +1 @@ +pub type wchar_t = u32; diff --git a/vendor/libc/src/vxworks/powerpc64.rs b/vendor/libc/src/vxworks/powerpc64.rs new file mode 100644 index 00000000000000..376783c8234baf --- /dev/null +++ b/vendor/libc/src/vxworks/powerpc64.rs @@ -0,0 +1 @@ +pub type wchar_t = u32; diff --git a/vendor/libc/src/vxworks/riscv32.rs b/vendor/libc/src/vxworks/riscv32.rs new file mode 100644 index 00000000000000..f562626f7fb2b7 --- /dev/null +++ b/vendor/libc/src/vxworks/riscv32.rs @@ -0,0 +1 @@ +pub type wchar_t = i32; diff --git a/vendor/libc/src/vxworks/riscv64.rs b/vendor/libc/src/vxworks/riscv64.rs new file mode 100644 index 00000000000000..f562626f7fb2b7 --- /dev/null +++ b/vendor/libc/src/vxworks/riscv64.rs @@ -0,0 +1 @@ +pub type wchar_t = i32; diff --git a/vendor/libc/src/vxworks/x86.rs b/vendor/libc/src/vxworks/x86.rs new file mode 100644 index 00000000000000..f562626f7fb2b7 --- /dev/null +++ b/vendor/libc/src/vxworks/x86.rs @@ -0,0 +1 @@ +pub type wchar_t = i32; diff --git a/vendor/libc/src/vxworks/x86_64.rs b/vendor/libc/src/vxworks/x86_64.rs new file mode 100644 index 00000000000000..f562626f7fb2b7 --- /dev/null +++ b/vendor/libc/src/vxworks/x86_64.rs @@ -0,0 +1 @@ +pub type wchar_t = i32; diff --git a/vendor/libc/src/wasi/mod.rs b/vendor/libc/src/wasi/mod.rs new file mode 100644 index 00000000000000..bb3b7295487801 --- /dev/null +++ b/vendor/libc/src/wasi/mod.rs @@ -0,0 +1,853 @@ +//! [wasi-libc](https://github.com/WebAssembly/wasi-libc) definitions. +//! +//! `wasi-libc` project provides multiple libraries including emulated features, but we list only +//! basic features with `libc.a` here. + +use core::iter::Iterator; + +use crate::prelude::*; + +pub type intmax_t = i64; +pub type uintmax_t = u64; +pub type size_t = usize; +pub type ssize_t = isize; +pub type ptrdiff_t = isize; +pub type intptr_t = isize; +pub type uintptr_t = usize; +pub type off_t = i64; +pub type pid_t = i32; +pub type clock_t = c_longlong; +pub type time_t = c_longlong; +pub type ino_t = u64; +pub type sigset_t = c_uchar; +pub type suseconds_t = c_longlong; +pub type mode_t = u32; +pub type dev_t = u64; +pub type uid_t = u32; +pub type gid_t = u32; +pub type nlink_t = u64; +pub type blksize_t = c_long; +pub type blkcnt_t = i64; +pub type nfds_t = c_ulong; +pub type wchar_t = i32; +pub type nl_item = c_int; +pub type __wasi_rights_t = u64; +pub type locale_t = *mut __locale_struct; + +s_no_extra_traits! { + #[repr(align(16))] + pub struct max_align_t { + priv_: [f64; 4], + } +} + +#[allow(missing_copy_implementations)] +#[derive(Debug)] +pub enum FILE {} +#[allow(missing_copy_implementations)] +#[derive(Debug)] +pub enum DIR {} +#[allow(missing_copy_implementations)] +#[derive(Debug)] +pub enum __locale_struct {} + +s_paren! { + // in wasi-libc clockid_t is const struct __clockid* (where __clockid is an opaque struct), + // but that's an implementation detail that we don't want to have to deal with + #[repr(transparent)] + #[allow(dead_code)] + pub struct clockid_t(*const u8); +} + +unsafe impl Send for clockid_t {} +unsafe impl Sync for clockid_t {} + +s! { + #[repr(align(8))] + pub struct fpos_t { + data: [u8; 16], + } + + pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + pub __tm_gmtoff: c_int, + pub __tm_zone: *const c_char, + pub __tm_nsec: c_int, + } + + pub struct timeval { + pub tv_sec: time_t, + pub tv_usec: suseconds_t, + } + + pub struct timespec { + pub tv_sec: time_t, + pub tv_nsec: c_long, + } + + pub struct tms { + pub tms_utime: clock_t, + pub tms_stime: clock_t, + pub tms_cutime: clock_t, + pub tms_cstime: clock_t, + } + + pub struct itimerspec { + pub it_interval: timespec, + pub it_value: timespec, + } + + pub struct iovec { + pub iov_base: *mut c_void, + pub iov_len: size_t, + } + + pub struct lconv { + pub decimal_point: *mut c_char, + pub thousands_sep: *mut c_char, + pub grouping: *mut c_char, + pub int_curr_symbol: *mut c_char, + pub currency_symbol: *mut c_char, + pub mon_decimal_point: *mut c_char, + pub mon_thousands_sep: *mut c_char, + pub mon_grouping: *mut c_char, + pub positive_sign: *mut c_char, + pub negative_sign: *mut c_char, + pub int_frac_digits: c_char, + pub frac_digits: c_char, + pub p_cs_precedes: c_char, + pub p_sep_by_space: c_char, + pub n_cs_precedes: c_char, + pub n_sep_by_space: c_char, + pub p_sign_posn: c_char, + pub n_sign_posn: c_char, + pub int_p_cs_precedes: c_char, + pub int_p_sep_by_space: c_char, + pub int_n_cs_precedes: c_char, + pub int_n_sep_by_space: c_char, + pub int_p_sign_posn: c_char, + pub int_n_sign_posn: c_char, + } + + pub struct pollfd { + pub fd: c_int, + pub events: c_short, + pub revents: c_short, + } + + pub struct rusage { + pub ru_utime: timeval, + pub ru_stime: timeval, + } + + pub struct stat { + pub st_dev: dev_t, + pub st_ino: ino_t, + pub st_nlink: nlink_t, + pub st_mode: mode_t, + pub st_uid: uid_t, + pub st_gid: gid_t, + __pad0: c_uint, + pub st_rdev: dev_t, + pub st_size: off_t, + pub st_blksize: blksize_t, + pub st_blocks: blkcnt_t, + pub st_atim: timespec, + pub st_mtim: timespec, + pub st_ctim: timespec, + __reserved: [c_longlong; 3], + } + + pub struct fd_set { + __nfds: usize, + __fds: [c_int; FD_SETSIZE as usize], + } +} + +// Declare dirent outside of s! so that it doesn't implement Copy, Eq, Hash, +// etc., since it contains a flexible array member with a dynamic size. +#[repr(C)] +#[allow(missing_copy_implementations)] +#[derive(Debug)] +pub struct dirent { + pub d_ino: ino_t, + pub d_type: c_uchar, + /// d_name is declared in WASI libc as a flexible array member, which + /// can't be directly expressed in Rust. As an imperfect workaround, + /// declare it as a zero-length array instead. + pub d_name: [c_char; 0], +} + +pub const EXIT_SUCCESS: c_int = 0; +pub const EXIT_FAILURE: c_int = 1; +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; +pub const _IOFBF: c_int = 0; +pub const _IONBF: c_int = 2; +pub const _IOLBF: c_int = 1; +pub const F_GETFD: c_int = 1; +pub const F_SETFD: c_int = 2; +pub const F_GETFL: c_int = 3; +pub const F_SETFL: c_int = 4; +pub const FD_CLOEXEC: c_int = 1; +pub const FD_SETSIZE: size_t = 1024; +pub const O_APPEND: c_int = 0x0001; +pub const O_DSYNC: c_int = 0x0002; +pub const O_NONBLOCK: c_int = 0x0004; +pub const O_RSYNC: c_int = 0x0008; +pub const O_SYNC: c_int = 0x0010; +pub const O_CREAT: c_int = 0x0001 << 12; +pub const O_DIRECTORY: c_int = 0x0002 << 12; +pub const O_EXCL: c_int = 0x0004 << 12; +pub const O_TRUNC: c_int = 0x0008 << 12; +pub const O_NOFOLLOW: c_int = 0x01000000; +pub const O_EXEC: c_int = 0x02000000; +pub const O_RDONLY: c_int = 0x04000000; +pub const O_SEARCH: c_int = 0x08000000; +pub const O_WRONLY: c_int = 0x10000000; +pub const O_CLOEXEC: c_int = 0x0; +pub const O_RDWR: c_int = O_WRONLY | O_RDONLY; +pub const O_ACCMODE: c_int = O_EXEC | O_RDWR | O_SEARCH; +pub const O_NOCTTY: c_int = 0x0; +pub const POSIX_FADV_DONTNEED: c_int = 4; +pub const POSIX_FADV_NOREUSE: c_int = 5; +pub const POSIX_FADV_NORMAL: c_int = 0; +pub const POSIX_FADV_RANDOM: c_int = 2; +pub const POSIX_FADV_SEQUENTIAL: c_int = 1; +pub const POSIX_FADV_WILLNEED: c_int = 3; +pub const AT_FDCWD: c_int = -2; +pub const AT_EACCESS: c_int = 0x0; +pub const AT_SYMLINK_NOFOLLOW: c_int = 0x1; +pub const AT_SYMLINK_FOLLOW: c_int = 0x2; +pub const AT_REMOVEDIR: c_int = 0x4; +pub const UTIME_OMIT: c_long = 0xfffffffe; +pub const UTIME_NOW: c_long = 0xffffffff; +pub const S_IFIFO: mode_t = 0o1_0000; +pub const S_IFCHR: mode_t = 0o2_0000; +pub const S_IFBLK: mode_t = 0o6_0000; +pub const S_IFDIR: mode_t = 0o4_0000; +pub const S_IFREG: mode_t = 0o10_0000; +pub const S_IFLNK: mode_t = 0o12_0000; +pub const S_IFSOCK: mode_t = 0o14_0000; +pub const S_IFMT: mode_t = 0o17_0000; +pub const S_IRWXO: mode_t = 0o0007; +pub const S_IXOTH: mode_t = 0o0001; +pub const S_IWOTH: mode_t = 0o0002; +pub const S_IROTH: mode_t = 0o0004; +pub const S_IRWXG: mode_t = 0o0070; +pub const S_IXGRP: mode_t = 0o0010; +pub const S_IWGRP: mode_t = 0o0020; +pub const S_IRGRP: mode_t = 0o0040; +pub const S_IRWXU: mode_t = 0o0700; +pub const S_IXUSR: mode_t = 0o0100; +pub const S_IWUSR: mode_t = 0o0200; +pub const S_IRUSR: mode_t = 0o0400; +pub const S_ISVTX: mode_t = 0o1000; +pub const S_ISGID: mode_t = 0o2000; +pub const S_ISUID: mode_t = 0o4000; +pub const DT_UNKNOWN: u8 = 0; +pub const DT_BLK: u8 = 1; +pub const DT_CHR: u8 = 2; +pub const DT_DIR: u8 = 3; +pub const DT_REG: u8 = 4; +pub const DT_LNK: u8 = 7; +pub const FIONREAD: c_int = 1; +pub const FIONBIO: c_int = 2; +pub const F_OK: c_int = 0; +pub const R_OK: c_int = 4; +pub const W_OK: c_int = 2; +pub const X_OK: c_int = 1; +pub const POLLIN: c_short = 0x1; +pub const POLLOUT: c_short = 0x2; +pub const POLLERR: c_short = 0x1000; +pub const POLLHUP: c_short = 0x2000; +pub const POLLNVAL: c_short = 0x4000; +pub const POLLRDNORM: c_short = 0x1; +pub const POLLWRNORM: c_short = 0x2; + +pub const E2BIG: c_int = 1; +pub const EACCES: c_int = 2; +pub const EADDRINUSE: c_int = 3; +pub const EADDRNOTAVAIL: c_int = 4; +pub const EAFNOSUPPORT: c_int = 5; +pub const EAGAIN: c_int = 6; +pub const EALREADY: c_int = 7; +pub const EBADF: c_int = 8; +pub const EBADMSG: c_int = 9; +pub const EBUSY: c_int = 10; +pub const ECANCELED: c_int = 11; +pub const ECHILD: c_int = 12; +pub const ECONNABORTED: c_int = 13; +pub const ECONNREFUSED: c_int = 14; +pub const ECONNRESET: c_int = 15; +pub const EDEADLK: c_int = 16; +pub const EDESTADDRREQ: c_int = 17; +pub const EDOM: c_int = 18; +pub const EDQUOT: c_int = 19; +pub const EEXIST: c_int = 20; +pub const EFAULT: c_int = 21; +pub const EFBIG: c_int = 22; +pub const EHOSTUNREACH: c_int = 23; +pub const EIDRM: c_int = 24; +pub const EILSEQ: c_int = 25; +pub const EINPROGRESS: c_int = 26; +pub const EINTR: c_int = 27; +pub const EINVAL: c_int = 28; +pub const EIO: c_int = 29; +pub const EISCONN: c_int = 30; +pub const EISDIR: c_int = 31; +pub const ELOOP: c_int = 32; +pub const EMFILE: c_int = 33; +pub const EMLINK: c_int = 34; +pub const EMSGSIZE: c_int = 35; +pub const EMULTIHOP: c_int = 36; +pub const ENAMETOOLONG: c_int = 37; +pub const ENETDOWN: c_int = 38; +pub const ENETRESET: c_int = 39; +pub const ENETUNREACH: c_int = 40; +pub const ENFILE: c_int = 41; +pub const ENOBUFS: c_int = 42; +pub const ENODEV: c_int = 43; +pub const ENOENT: c_int = 44; +pub const ENOEXEC: c_int = 45; +pub const ENOLCK: c_int = 46; +pub const ENOLINK: c_int = 47; +pub const ENOMEM: c_int = 48; +pub const ENOMSG: c_int = 49; +pub const ENOPROTOOPT: c_int = 50; +pub const ENOSPC: c_int = 51; +pub const ENOSYS: c_int = 52; +pub const ENOTCONN: c_int = 53; +pub const ENOTDIR: c_int = 54; +pub const ENOTEMPTY: c_int = 55; +pub const ENOTRECOVERABLE: c_int = 56; +pub const ENOTSOCK: c_int = 57; +pub const ENOTSUP: c_int = 58; +pub const ENOTTY: c_int = 59; +pub const ENXIO: c_int = 60; +pub const EOVERFLOW: c_int = 61; +pub const EOWNERDEAD: c_int = 62; +pub const EPERM: c_int = 63; +pub const EPIPE: c_int = 64; +pub const EPROTO: c_int = 65; +pub const EPROTONOSUPPORT: c_int = 66; +pub const EPROTOTYPE: c_int = 67; +pub const ERANGE: c_int = 68; +pub const EROFS: c_int = 69; +pub const ESPIPE: c_int = 70; +pub const ESRCH: c_int = 71; +pub const ESTALE: c_int = 72; +pub const ETIMEDOUT: c_int = 73; +pub const ETXTBSY: c_int = 74; +pub const EXDEV: c_int = 75; +pub const ENOTCAPABLE: c_int = 76; +pub const EOPNOTSUPP: c_int = ENOTSUP; +pub const EWOULDBLOCK: c_int = EAGAIN; + +pub const _SC_PAGESIZE: c_int = 30; +pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; +pub const _SC_IOV_MAX: c_int = 60; +pub const _SC_SYMLOOP_MAX: c_int = 173; + +// FIXME(msrv): `addr_of!(EXTERN_STATIC)` is now safe; remove `unsafe` when MSRV >= 1.82 +#[allow(unused_unsafe)] +pub static CLOCK_MONOTONIC: clockid_t = unsafe { clockid_t(core::ptr::addr_of!(_CLOCK_MONOTONIC)) }; +#[allow(unused_unsafe)] +pub static CLOCK_PROCESS_CPUTIME_ID: clockid_t = + unsafe { clockid_t(core::ptr::addr_of!(_CLOCK_PROCESS_CPUTIME_ID)) }; +#[allow(unused_unsafe)] +pub static CLOCK_REALTIME: clockid_t = unsafe { clockid_t(core::ptr::addr_of!(_CLOCK_REALTIME)) }; +#[allow(unused_unsafe)] +pub static CLOCK_THREAD_CPUTIME_ID: clockid_t = + unsafe { clockid_t(core::ptr::addr_of!(_CLOCK_THREAD_CPUTIME_ID)) }; + +pub const ABDAY_1: crate::nl_item = 0x20000; +pub const ABDAY_2: crate::nl_item = 0x20001; +pub const ABDAY_3: crate::nl_item = 0x20002; +pub const ABDAY_4: crate::nl_item = 0x20003; +pub const ABDAY_5: crate::nl_item = 0x20004; +pub const ABDAY_6: crate::nl_item = 0x20005; +pub const ABDAY_7: crate::nl_item = 0x20006; + +pub const DAY_1: crate::nl_item = 0x20007; +pub const DAY_2: crate::nl_item = 0x20008; +pub const DAY_3: crate::nl_item = 0x20009; +pub const DAY_4: crate::nl_item = 0x2000A; +pub const DAY_5: crate::nl_item = 0x2000B; +pub const DAY_6: crate::nl_item = 0x2000C; +pub const DAY_7: crate::nl_item = 0x2000D; + +pub const ABMON_1: crate::nl_item = 0x2000E; +pub const ABMON_2: crate::nl_item = 0x2000F; +pub const ABMON_3: crate::nl_item = 0x20010; +pub const ABMON_4: crate::nl_item = 0x20011; +pub const ABMON_5: crate::nl_item = 0x20012; +pub const ABMON_6: crate::nl_item = 0x20013; +pub const ABMON_7: crate::nl_item = 0x20014; +pub const ABMON_8: crate::nl_item = 0x20015; +pub const ABMON_9: crate::nl_item = 0x20016; +pub const ABMON_10: crate::nl_item = 0x20017; +pub const ABMON_11: crate::nl_item = 0x20018; +pub const ABMON_12: crate::nl_item = 0x20019; + +pub const MON_1: crate::nl_item = 0x2001A; +pub const MON_2: crate::nl_item = 0x2001B; +pub const MON_3: crate::nl_item = 0x2001C; +pub const MON_4: crate::nl_item = 0x2001D; +pub const MON_5: crate::nl_item = 0x2001E; +pub const MON_6: crate::nl_item = 0x2001F; +pub const MON_7: crate::nl_item = 0x20020; +pub const MON_8: crate::nl_item = 0x20021; +pub const MON_9: crate::nl_item = 0x20022; +pub const MON_10: crate::nl_item = 0x20023; +pub const MON_11: crate::nl_item = 0x20024; +pub const MON_12: crate::nl_item = 0x20025; + +pub const AM_STR: crate::nl_item = 0x20026; +pub const PM_STR: crate::nl_item = 0x20027; + +pub const D_T_FMT: crate::nl_item = 0x20028; +pub const D_FMT: crate::nl_item = 0x20029; +pub const T_FMT: crate::nl_item = 0x2002A; +pub const T_FMT_AMPM: crate::nl_item = 0x2002B; + +pub const ERA: crate::nl_item = 0x2002C; +pub const ERA_D_FMT: crate::nl_item = 0x2002E; +pub const ALT_DIGITS: crate::nl_item = 0x2002F; +pub const ERA_D_T_FMT: crate::nl_item = 0x20030; +pub const ERA_T_FMT: crate::nl_item = 0x20031; + +pub const CODESET: crate::nl_item = 14; +pub const CRNCYSTR: crate::nl_item = 0x4000F; +pub const RADIXCHAR: crate::nl_item = 0x10000; +pub const THOUSEP: crate::nl_item = 0x10001; +pub const YESEXPR: crate::nl_item = 0x50000; +pub const NOEXPR: crate::nl_item = 0x50001; +pub const YESSTR: crate::nl_item = 0x50002; +pub const NOSTR: crate::nl_item = 0x50003; + +f! { + pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { + let set = &*set; + let n = set.__nfds; + return set.__fds[..n].iter().any(|p| *p == fd); + } + + pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { + let set = &mut *set; + let n = set.__nfds; + if !set.__fds[..n].iter().any(|p| *p == fd) { + set.__nfds = n + 1; + set.__fds[n] = fd; + } + } + + pub fn FD_ZERO(set: *mut fd_set) -> () { + (*set).__nfds = 0; + return; + } +} + +#[cfg_attr( + feature = "rustc-dep-of-std", + link( + name = "c", + kind = "static", + modifiers = "-bundle", + cfg(target_feature = "crt-static") + ) +)] +#[cfg_attr( + feature = "rustc-dep-of-std", + link(name = "c", cfg(not(target_feature = "crt-static"))) +)] +extern "C" { + pub fn _Exit(code: c_int) -> !; + pub fn _exit(code: c_int) -> !; + pub fn abort() -> !; + pub fn aligned_alloc(a: size_t, b: size_t) -> *mut c_void; + pub fn calloc(amt: size_t, amt2: size_t) -> *mut c_void; + pub fn exit(code: c_int) -> !; + pub fn free(ptr: *mut c_void); + pub fn getenv(s: *const c_char) -> *mut c_char; + pub fn malloc(amt: size_t) -> *mut c_void; + pub fn malloc_usable_size(ptr: *mut c_void) -> size_t; + pub fn sbrk(increment: intptr_t) -> *mut c_void; + pub fn rand() -> c_int; + pub fn read(fd: c_int, ptr: *mut c_void, size: size_t) -> ssize_t; + pub fn realloc(ptr: *mut c_void, amt: size_t) -> *mut c_void; + pub fn setenv(k: *const c_char, v: *const c_char, a: c_int) -> c_int; + pub fn unsetenv(k: *const c_char) -> c_int; + pub fn clearenv() -> c_int; + pub fn write(fd: c_int, ptr: *const c_void, size: size_t) -> ssize_t; + pub static mut environ: *mut *mut c_char; + pub fn fopen(a: *const c_char, b: *const c_char) -> *mut FILE; + pub fn freopen(a: *const c_char, b: *const c_char, f: *mut FILE) -> *mut FILE; + pub fn fclose(f: *mut FILE) -> c_int; + pub fn remove(a: *const c_char) -> c_int; + pub fn rename(a: *const c_char, b: *const c_char) -> c_int; + pub fn feof(f: *mut FILE) -> c_int; + pub fn ferror(f: *mut FILE) -> c_int; + pub fn fflush(f: *mut FILE) -> c_int; + pub fn clearerr(f: *mut FILE); + pub fn fseek(f: *mut FILE, b: c_long, c: c_int) -> c_int; + pub fn ftell(f: *mut FILE) -> c_long; + pub fn rewind(f: *mut FILE); + pub fn fgetpos(f: *mut FILE, pos: *mut fpos_t) -> c_int; + pub fn fsetpos(f: *mut FILE, pos: *const fpos_t) -> c_int; + pub fn fread(buf: *mut c_void, a: size_t, b: size_t, f: *mut FILE) -> size_t; + pub fn fwrite(buf: *const c_void, a: size_t, b: size_t, f: *mut FILE) -> size_t; + pub fn fgetc(f: *mut FILE) -> c_int; + pub fn getc(f: *mut FILE) -> c_int; + pub fn getchar() -> c_int; + pub fn ungetc(a: c_int, f: *mut FILE) -> c_int; + pub fn fputc(a: c_int, f: *mut FILE) -> c_int; + pub fn putc(a: c_int, f: *mut FILE) -> c_int; + pub fn putchar(a: c_int) -> c_int; + pub fn fputs(a: *const c_char, f: *mut FILE) -> c_int; + pub fn puts(a: *const c_char) -> c_int; + pub fn perror(a: *const c_char); + pub fn srand(a: c_uint); + pub fn atexit(a: extern "C" fn()) -> c_int; + pub fn at_quick_exit(a: extern "C" fn()) -> c_int; + pub fn quick_exit(a: c_int) -> !; + pub fn posix_memalign(a: *mut *mut c_void, b: size_t, c: size_t) -> c_int; + pub fn rand_r(a: *mut c_uint) -> c_int; + pub fn random() -> c_long; + pub fn srandom(a: c_uint); + pub fn putenv(a: *mut c_char) -> c_int; + pub fn clock() -> clock_t; + pub fn time(a: *mut time_t) -> time_t; + pub fn difftime(a: time_t, b: time_t) -> c_double; + pub fn mktime(a: *mut tm) -> time_t; + pub fn strftime(a: *mut c_char, b: size_t, c: *const c_char, d: *const tm) -> size_t; + pub fn gmtime(a: *const time_t) -> *mut tm; + pub fn gmtime_r(a: *const time_t, b: *mut tm) -> *mut tm; + pub fn localtime(a: *const time_t) -> *mut tm; + pub fn localtime_r(a: *const time_t, b: *mut tm) -> *mut tm; + pub fn asctime_r(a: *const tm, b: *mut c_char) -> *mut c_char; + pub fn ctime_r(a: *const time_t, b: *mut c_char) -> *mut c_char; + + static _CLOCK_MONOTONIC: u8; + static _CLOCK_PROCESS_CPUTIME_ID: u8; + static _CLOCK_REALTIME: u8; + static _CLOCK_THREAD_CPUTIME_ID: u8; + pub fn nanosleep(a: *const timespec, b: *mut timespec) -> c_int; + pub fn clock_getres(a: clockid_t, b: *mut timespec) -> c_int; + pub fn clock_gettime(a: clockid_t, b: *mut timespec) -> c_int; + pub fn clock_nanosleep(a: clockid_t, a2: c_int, b: *const timespec, c: *mut timespec) -> c_int; + + pub fn isalnum(c: c_int) -> c_int; + pub fn isalpha(c: c_int) -> c_int; + pub fn iscntrl(c: c_int) -> c_int; + pub fn isdigit(c: c_int) -> c_int; + pub fn isgraph(c: c_int) -> c_int; + pub fn islower(c: c_int) -> c_int; + pub fn isprint(c: c_int) -> c_int; + pub fn ispunct(c: c_int) -> c_int; + pub fn isspace(c: c_int) -> c_int; + pub fn isupper(c: c_int) -> c_int; + pub fn isxdigit(c: c_int) -> c_int; + pub fn isblank(c: c_int) -> c_int; + pub fn tolower(c: c_int) -> c_int; + pub fn toupper(c: c_int) -> c_int; + pub fn setvbuf(stream: *mut FILE, buffer: *mut c_char, mode: c_int, size: size_t) -> c_int; + pub fn setbuf(stream: *mut FILE, buf: *mut c_char); + pub fn fgets(buf: *mut c_char, n: c_int, stream: *mut FILE) -> *mut c_char; + pub fn atof(s: *const c_char) -> c_double; + pub fn atoi(s: *const c_char) -> c_int; + pub fn atol(s: *const c_char) -> c_long; + pub fn atoll(s: *const c_char) -> c_longlong; + pub fn strtod(s: *const c_char, endp: *mut *mut c_char) -> c_double; + pub fn strtof(s: *const c_char, endp: *mut *mut c_char) -> c_float; + pub fn strtol(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_long; + pub fn strtoll(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_longlong; + pub fn strtoul(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulong; + pub fn strtoull(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulonglong; + + pub fn strcpy(dst: *mut c_char, src: *const c_char) -> *mut c_char; + pub fn strncpy(dst: *mut c_char, src: *const c_char, n: size_t) -> *mut c_char; + pub fn strcat(s: *mut c_char, ct: *const c_char) -> *mut c_char; + pub fn strncat(s: *mut c_char, ct: *const c_char, n: size_t) -> *mut c_char; + pub fn strcmp(cs: *const c_char, ct: *const c_char) -> c_int; + pub fn strncmp(cs: *const c_char, ct: *const c_char, n: size_t) -> c_int; + pub fn strcoll(cs: *const c_char, ct: *const c_char) -> c_int; + pub fn strchr(cs: *const c_char, c: c_int) -> *mut c_char; + pub fn strrchr(cs: *const c_char, c: c_int) -> *mut c_char; + pub fn strspn(cs: *const c_char, ct: *const c_char) -> size_t; + pub fn strcspn(cs: *const c_char, ct: *const c_char) -> size_t; + pub fn strdup(cs: *const c_char) -> *mut c_char; + pub fn strndup(cs: *const c_char, n: size_t) -> *mut c_char; + pub fn strpbrk(cs: *const c_char, ct: *const c_char) -> *mut c_char; + pub fn strstr(cs: *const c_char, ct: *const c_char) -> *mut c_char; + pub fn strcasecmp(s1: *const c_char, s2: *const c_char) -> c_int; + pub fn strncasecmp(s1: *const c_char, s2: *const c_char, n: size_t) -> c_int; + pub fn strlen(cs: *const c_char) -> size_t; + pub fn strnlen(cs: *const c_char, maxlen: size_t) -> size_t; + pub fn strerror(n: c_int) -> *mut c_char; + pub fn strtok(s: *mut c_char, t: *const c_char) -> *mut c_char; + pub fn strxfrm(s: *mut c_char, ct: *const c_char, n: size_t) -> size_t; + + pub fn memchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; + pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; + pub fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; + pub fn memmove(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; + pub fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void; + + pub fn fprintf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; + pub fn printf(format: *const c_char, ...) -> c_int; + pub fn snprintf(s: *mut c_char, n: size_t, format: *const c_char, ...) -> c_int; + pub fn sprintf(s: *mut c_char, format: *const c_char, ...) -> c_int; + pub fn fscanf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; + pub fn scanf(format: *const c_char, ...) -> c_int; + pub fn sscanf(s: *const c_char, format: *const c_char, ...) -> c_int; + pub fn getchar_unlocked() -> c_int; + pub fn putchar_unlocked(c: c_int) -> c_int; + + pub fn shutdown(socket: c_int, how: c_int) -> c_int; + pub fn fstat(fildes: c_int, buf: *mut stat) -> c_int; + pub fn mkdir(path: *const c_char, mode: mode_t) -> c_int; + pub fn stat(path: *const c_char, buf: *mut stat) -> c_int; + pub fn fdopen(fd: c_int, mode: *const c_char) -> *mut crate::FILE; + pub fn fileno(stream: *mut crate::FILE) -> c_int; + pub fn open(path: *const c_char, oflag: c_int, ...) -> c_int; + pub fn creat(path: *const c_char, mode: mode_t) -> c_int; + pub fn fcntl(fd: c_int, cmd: c_int, ...) -> c_int; + pub fn opendir(dirname: *const c_char) -> *mut crate::DIR; + pub fn fdopendir(fd: c_int) -> *mut crate::DIR; + pub fn readdir(dirp: *mut crate::DIR) -> *mut crate::dirent; + pub fn closedir(dirp: *mut crate::DIR) -> c_int; + pub fn rewinddir(dirp: *mut crate::DIR); + pub fn dirfd(dirp: *mut crate::DIR) -> c_int; + pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); + pub fn telldir(dirp: *mut crate::DIR) -> c_long; + + pub fn openat(dirfd: c_int, pathname: *const c_char, flags: c_int, ...) -> c_int; + pub fn fstatat(dirfd: c_int, pathname: *const c_char, buf: *mut stat, flags: c_int) -> c_int; + pub fn linkat( + olddirfd: c_int, + oldpath: *const c_char, + newdirfd: c_int, + newpath: *const c_char, + flags: c_int, + ) -> c_int; + pub fn mkdirat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; + pub fn readlinkat( + dirfd: c_int, + pathname: *const c_char, + buf: *mut c_char, + bufsiz: size_t, + ) -> ssize_t; + pub fn renameat( + olddirfd: c_int, + oldpath: *const c_char, + newdirfd: c_int, + newpath: *const c_char, + ) -> c_int; + pub fn symlinkat(target: *const c_char, newdirfd: c_int, linkpath: *const c_char) -> c_int; + pub fn unlinkat(dirfd: c_int, pathname: *const c_char, flags: c_int) -> c_int; + + pub fn access(path: *const c_char, amode: c_int) -> c_int; + pub fn close(fd: c_int) -> c_int; + pub fn fpathconf(filedes: c_int, name: c_int) -> c_long; + pub fn getopt(argc: c_int, argv: *const *mut c_char, optstr: *const c_char) -> c_int; + pub fn isatty(fd: c_int) -> c_int; + pub fn link(src: *const c_char, dst: *const c_char) -> c_int; + pub fn lseek(fd: c_int, offset: off_t, whence: c_int) -> off_t; + pub fn pathconf(path: *const c_char, name: c_int) -> c_long; + pub fn rmdir(path: *const c_char) -> c_int; + pub fn sleep(secs: c_uint) -> c_uint; + pub fn unlink(c: *const c_char) -> c_int; + pub fn pread(fd: c_int, buf: *mut c_void, count: size_t, offset: off_t) -> ssize_t; + pub fn pwrite(fd: c_int, buf: *const c_void, count: size_t, offset: off_t) -> ssize_t; + + pub fn lstat(path: *const c_char, buf: *mut stat) -> c_int; + + pub fn fsync(fd: c_int) -> c_int; + pub fn fdatasync(fd: c_int) -> c_int; + + pub fn symlink(path1: *const c_char, path2: *const c_char) -> c_int; + + pub fn truncate(path: *const c_char, length: off_t) -> c_int; + pub fn ftruncate(fd: c_int, length: off_t) -> c_int; + + pub fn getrusage(resource: c_int, usage: *mut rusage) -> c_int; + + pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; + pub fn times(buf: *mut crate::tms) -> crate::clock_t; + + pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; + + pub fn usleep(secs: c_uint) -> c_int; + pub fn send(socket: c_int, buf: *const c_void, len: size_t, flags: c_int) -> ssize_t; + pub fn recv(socket: c_int, buf: *mut c_void, len: size_t, flags: c_int) -> ssize_t; + pub fn poll(fds: *mut pollfd, nfds: nfds_t, timeout: c_int) -> c_int; + pub fn setlocale(category: c_int, locale: *const c_char) -> *mut c_char; + pub fn localeconv() -> *mut lconv; + + pub fn readlink(path: *const c_char, buf: *mut c_char, bufsz: size_t) -> ssize_t; + + pub fn timegm(tm: *mut crate::tm) -> time_t; + + pub fn sysconf(name: c_int) -> c_long; + + pub fn ioctl(fd: c_int, request: c_int, ...) -> c_int; + + pub fn fseeko(stream: *mut crate::FILE, offset: off_t, whence: c_int) -> c_int; + pub fn ftello(stream: *mut crate::FILE) -> off_t; + pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; + + pub fn strcasestr(cs: *const c_char, ct: *const c_char) -> *mut c_char; + pub fn getline(lineptr: *mut *mut c_char, n: *mut size_t, stream: *mut FILE) -> ssize_t; + + pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; + pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; + pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; + pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; + pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; + pub fn utimensat( + dirfd: c_int, + path: *const c_char, + times: *const crate::timespec, + flag: c_int, + ) -> c_int; + pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; + pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; + pub fn abs(i: c_int) -> c_int; + pub fn labs(i: c_long) -> c_long; + pub fn duplocale(base: crate::locale_t) -> crate::locale_t; + pub fn freelocale(loc: crate::locale_t); + pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; + pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; + pub fn sched_yield() -> c_int; + pub fn getcwd(buf: *mut c_char, size: size_t) -> *mut c_char; + pub fn chdir(dir: *const c_char) -> c_int; + + pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; + pub fn nl_langinfo_l(item: crate::nl_item, loc: crate::locale_t) -> *mut c_char; + + pub fn select( + nfds: c_int, + readfds: *mut fd_set, + writefds: *mut fd_set, + errorfds: *mut fd_set, + timeout: *const timeval, + ) -> c_int; + + pub fn __wasilibc_register_preopened_fd(fd: c_int, path: *const c_char) -> c_int; + pub fn __wasilibc_fd_renumber(fd: c_int, newfd: c_int) -> c_int; + pub fn __wasilibc_unlinkat(fd: c_int, path: *const c_char) -> c_int; + pub fn __wasilibc_rmdirat(fd: c_int, path: *const c_char) -> c_int; + pub fn __wasilibc_find_relpath( + path: *const c_char, + abs_prefix: *mut *const c_char, + relative_path: *mut *mut c_char, + relative_path_len: usize, + ) -> c_int; + pub fn __wasilibc_tell(fd: c_int) -> off_t; + pub fn __wasilibc_nocwd___wasilibc_unlinkat(dirfd: c_int, path: *const c_char) -> c_int; + pub fn __wasilibc_nocwd___wasilibc_rmdirat(dirfd: c_int, path: *const c_char) -> c_int; + pub fn __wasilibc_nocwd_linkat( + olddirfd: c_int, + oldpath: *const c_char, + newdirfd: c_int, + newpath: *const c_char, + flags: c_int, + ) -> c_int; + pub fn __wasilibc_nocwd_symlinkat( + target: *const c_char, + dirfd: c_int, + path: *const c_char, + ) -> c_int; + pub fn __wasilibc_nocwd_readlinkat( + dirfd: c_int, + path: *const c_char, + buf: *mut c_char, + bufsize: usize, + ) -> isize; + pub fn __wasilibc_nocwd_faccessat( + dirfd: c_int, + path: *const c_char, + mode: c_int, + flags: c_int, + ) -> c_int; + pub fn __wasilibc_nocwd_renameat( + olddirfd: c_int, + oldpath: *const c_char, + newdirfd: c_int, + newpath: *const c_char, + ) -> c_int; + pub fn __wasilibc_nocwd_openat_nomode(dirfd: c_int, path: *const c_char, flags: c_int) + -> c_int; + pub fn __wasilibc_nocwd_fstatat( + dirfd: c_int, + path: *const c_char, + buf: *mut stat, + flags: c_int, + ) -> c_int; + pub fn __wasilibc_nocwd_mkdirat_nomode(dirfd: c_int, path: *const c_char) -> c_int; + pub fn __wasilibc_nocwd_utimensat( + dirfd: c_int, + path: *const c_char, + times: *const crate::timespec, + flags: c_int, + ) -> c_int; + pub fn __wasilibc_nocwd_opendirat(dirfd: c_int, path: *const c_char) -> *mut crate::DIR; + pub fn __wasilibc_access(pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; + pub fn __wasilibc_stat(pathname: *const c_char, buf: *mut stat, flags: c_int) -> c_int; + pub fn __wasilibc_utimens( + pathname: *const c_char, + times: *const crate::timespec, + flags: c_int, + ) -> c_int; + pub fn __wasilibc_link(oldpath: *const c_char, newpath: *const c_char, flags: c_int) -> c_int; + pub fn __wasilibc_link_oldat( + olddirfd: c_int, + oldpath: *const c_char, + newpath: *const c_char, + flags: c_int, + ) -> c_int; + pub fn __wasilibc_link_newat( + oldpath: *const c_char, + newdirfd: c_int, + newpath: *const c_char, + flags: c_int, + ) -> c_int; + pub fn __wasilibc_rename_oldat( + olddirfd: c_int, + oldpath: *const c_char, + newpath: *const c_char, + ) -> c_int; + pub fn __wasilibc_rename_newat( + oldpath: *const c_char, + newdirfd: c_int, + newpath: *const c_char, + ) -> c_int; + + pub fn arc4random() -> u32; + pub fn arc4random_buf(a: *mut c_void, b: size_t); + pub fn arc4random_uniform(a: u32) -> u32; + + pub fn __errno_location() -> *mut c_int; +} + +cfg_if! { + if #[cfg(not(target_env = "p1"))] { + mod p2; + pub use self::p2::*; + } +} diff --git a/vendor/libc/src/wasi/p2.rs b/vendor/libc/src/wasi/p2.rs new file mode 100644 index 00000000000000..7332a779396d37 --- /dev/null +++ b/vendor/libc/src/wasi/p2.rs @@ -0,0 +1,188 @@ +use crate::prelude::*; + +pub type sa_family_t = c_ushort; +pub type in_port_t = c_ushort; +pub type in_addr_t = c_uint; + +pub type socklen_t = c_uint; + +s! { + #[repr(align(16))] + pub struct sockaddr { + pub sa_family: sa_family_t, + pub sa_data: [c_char; 0], + } + + pub struct in_addr { + pub s_addr: in_addr_t, + } + + #[repr(align(16))] + pub struct sockaddr_in { + pub sin_family: sa_family_t, + pub sin_port: in_port_t, + pub sin_addr: in_addr, + } + + #[repr(align(4))] + pub struct in6_addr { + pub s6_addr: [c_uchar; 16], + } + + #[repr(align(16))] + pub struct sockaddr_in6 { + pub sin6_family: sa_family_t, + pub sin6_port: in_port_t, + pub sin6_flowinfo: c_uint, + pub sin6_addr: in6_addr, + pub sin6_scope_id: c_uint, + } + + #[repr(align(16))] + pub struct sockaddr_storage { + pub ss_family: sa_family_t, + pub __ss_data: [c_char; 32], + } + + pub struct addrinfo { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: socklen_t, + pub ai_addr: *mut sockaddr, + pub ai_canonname: *mut c_char, + pub ai_next: *mut addrinfo, + } + + pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, + } + + pub struct ipv6_mreq { + pub ipv6mr_multiaddr: in6_addr, + pub ipv6mr_interface: c_uint, + } + + pub struct linger { + pub l_onoff: c_int, + pub l_linger: c_int, + } +} + +pub const SHUT_RD: c_int = 1 << 0; +pub const SHUT_WR: c_int = 1 << 1; +pub const SHUT_RDWR: c_int = SHUT_RD | SHUT_WR; + +pub const MSG_NOSIGNAL: c_int = 0x4000; +pub const MSG_PEEK: c_int = 0x0002; + +pub const SO_REUSEADDR: c_int = 2; +pub const SO_TYPE: c_int = 3; +pub const SO_ERROR: c_int = 4; +pub const SO_BROADCAST: c_int = 6; +pub const SO_SNDBUF: c_int = 7; +pub const SO_RCVBUF: c_int = 8; +pub const SO_KEEPALIVE: c_int = 9; +pub const SO_LINGER: c_int = 13; +pub const SO_ACCEPTCONN: c_int = 30; +pub const SO_PROTOCOL: c_int = 38; +pub const SO_DOMAIN: c_int = 39; +pub const SO_RCVTIMEO: c_int = 66; +pub const SO_SNDTIMEO: c_int = 67; + +pub const SOCK_DGRAM: c_int = 5; +pub const SOCK_STREAM: c_int = 6; +pub const SOCK_NONBLOCK: c_int = 0x00004000; + +pub const SOL_SOCKET: c_int = 0x7fffffff; + +pub const AF_UNSPEC: c_int = 0; +pub const AF_INET: c_int = 1; +pub const AF_INET6: c_int = 2; + +pub const IPPROTO_IP: c_int = 0; +pub const IPPROTO_TCP: c_int = 6; +pub const IPPROTO_UDP: c_int = 17; +pub const IPPROTO_IPV6: c_int = 41; + +pub const IP_TTL: c_int = 2; +pub const IP_MULTICAST_TTL: c_int = 33; +pub const IP_MULTICAST_LOOP: c_int = 34; +pub const IP_ADD_MEMBERSHIP: c_int = 35; +pub const IP_DROP_MEMBERSHIP: c_int = 36; + +pub const IPV6_UNICAST_HOPS: c_int = 16; +pub const IPV6_MULTICAST_LOOP: c_int = 19; +pub const IPV6_JOIN_GROUP: c_int = 20; +pub const IPV6_LEAVE_GROUP: c_int = 21; +pub const IPV6_V6ONLY: c_int = 26; + +pub const IPV6_ADD_MEMBERSHIP: c_int = IPV6_JOIN_GROUP; +pub const IPV6_DROP_MEMBERSHIP: c_int = IPV6_LEAVE_GROUP; + +pub const TCP_NODELAY: c_int = 1; +pub const TCP_KEEPIDLE: c_int = 4; +pub const TCP_KEEPINTVL: c_int = 5; +pub const TCP_KEEPCNT: c_int = 6; + +pub const EAI_SYSTEM: c_int = -11; + +extern "C" { + pub fn socket(domain: c_int, type_: c_int, protocol: c_int) -> c_int; + pub fn connect(fd: c_int, name: *const sockaddr, addrlen: socklen_t) -> c_int; + pub fn bind(socket: c_int, addr: *const sockaddr, addrlen: socklen_t) -> c_int; + pub fn listen(socket: c_int, backlog: c_int) -> c_int; + pub fn accept(socket: c_int, addr: *mut sockaddr, addrlen: *mut socklen_t) -> c_int; + pub fn accept4( + socket: c_int, + addr: *mut sockaddr, + addrlen: *mut socklen_t, + flags: c_int, + ) -> c_int; + + pub fn getsockname(socket: c_int, addr: *mut sockaddr, addrlen: *mut socklen_t) -> c_int; + pub fn getpeername(socket: c_int, addr: *mut sockaddr, addrlen: *mut socklen_t) -> c_int; + + pub fn sendto( + socket: c_int, + buffer: *const c_void, + length: size_t, + flags: c_int, + addr: *const sockaddr, + addrlen: socklen_t, + ) -> ssize_t; + pub fn recvfrom( + socket: c_int, + buffer: *mut c_void, + length: size_t, + flags: c_int, + addr: *mut sockaddr, + addrlen: *mut socklen_t, + ) -> ssize_t; + + pub fn getsockopt( + sockfd: c_int, + level: c_int, + optname: c_int, + optval: *mut c_void, + optlen: *mut socklen_t, + ) -> c_int; + pub fn setsockopt( + sockfd: c_int, + level: c_int, + optname: c_int, + optval: *const c_void, + optlen: socklen_t, + ) -> c_int; + + pub fn getaddrinfo( + host: *const c_char, + serv: *const c_char, + hint: *const addrinfo, + res: *mut *mut addrinfo, + ) -> c_int; + pub fn freeaddrinfo(p: *mut addrinfo); + pub fn gai_strerror(ecode: c_int) -> *const c_char; +} diff --git a/vendor/libc/src/windows/gnu/mod.rs b/vendor/libc/src/windows/gnu/mod.rs new file mode 100644 index 00000000000000..aee2c1efed1081 --- /dev/null +++ b/vendor/libc/src/windows/gnu/mod.rs @@ -0,0 +1,36 @@ +use crate::prelude::*; + +cfg_if! { + if #[cfg(target_pointer_width = "64")] { + s_no_extra_traits! { + #[repr(align(16))] + pub struct max_align_t { + priv_: [f64; 4], + } + } + } else if #[cfg(target_pointer_width = "32")] { + s_no_extra_traits! { + #[repr(align(16))] + pub struct max_align_t { + priv_: [i64; 6], + } + } + } +} + +pub const L_tmpnam: c_uint = 14; +pub const TMP_MAX: c_uint = 0x7fff; + +// stdio file descriptor numbers +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; + +extern "C" { + pub fn strcasecmp(s1: *const c_char, s2: *const c_char) -> c_int; + pub fn strncasecmp(s1: *const c_char, s2: *const c_char, n: size_t) -> c_int; + + // NOTE: For MSVC target, `wmemchr` is only a inline function in `` + // header file. We cannot find a way to link to that symbol from Rust. + pub fn wmemchr(cx: *const crate::wchar_t, c: crate::wchar_t, n: size_t) -> *mut crate::wchar_t; +} diff --git a/vendor/libc/src/windows/mod.rs b/vendor/libc/src/windows/mod.rs new file mode 100644 index 00000000000000..2f35af84c7493d --- /dev/null +++ b/vendor/libc/src/windows/mod.rs @@ -0,0 +1,611 @@ +//! Windows CRT definitions + +use crate::prelude::*; + +pub type intmax_t = i64; +pub type uintmax_t = u64; + +pub type size_t = usize; +pub type ptrdiff_t = isize; +pub type intptr_t = isize; +pub type uintptr_t = usize; +pub type ssize_t = isize; +pub type sighandler_t = usize; + +pub type wchar_t = u16; + +pub type clock_t = i32; + +pub type errno_t = c_int; + +cfg_if! { + if #[cfg(all(target_arch = "x86", target_env = "gnu"))] { + pub type time_t = i32; + } else { + pub type time_t = i64; + } +} + +pub type off_t = i32; +pub type dev_t = u32; +pub type ino_t = u16; +#[derive(Debug)] +pub enum timezone {} +impl Copy for timezone {} +impl Clone for timezone { + fn clone(&self) -> timezone { + *self + } +} +pub type time64_t = i64; + +pub type SOCKET = crate::uintptr_t; + +s! { + // note this is the struct called stat64 in Windows. Not stat, nor stati64. + pub struct stat { + pub st_dev: dev_t, + pub st_ino: ino_t, + pub st_mode: u16, + pub st_nlink: c_short, + pub st_uid: c_short, + pub st_gid: c_short, + pub st_rdev: dev_t, + pub st_size: i64, + pub st_atime: time64_t, + pub st_mtime: time64_t, + pub st_ctime: time64_t, + } + + // note that this is called utimbuf64 in Windows + pub struct utimbuf { + pub actime: time64_t, + pub modtime: time64_t, + } + + pub struct tm { + pub tm_sec: c_int, + pub tm_min: c_int, + pub tm_hour: c_int, + pub tm_mday: c_int, + pub tm_mon: c_int, + pub tm_year: c_int, + pub tm_wday: c_int, + pub tm_yday: c_int, + pub tm_isdst: c_int, + } + + pub struct timeval { + pub tv_sec: c_long, + pub tv_usec: c_long, + } + + pub struct timespec { + pub tv_sec: time_t, + pub tv_nsec: c_long, + } + + pub struct sockaddr { + pub sa_family: c_ushort, + pub sa_data: [c_char; 14], + } +} + +pub const INT_MIN: c_int = -2147483648; +pub const INT_MAX: c_int = 2147483647; + +pub const EXIT_FAILURE: c_int = 1; +pub const EXIT_SUCCESS: c_int = 0; +pub const RAND_MAX: c_int = 32767; +pub const EOF: c_int = -1; +pub const SEEK_SET: c_int = 0; +pub const SEEK_CUR: c_int = 1; +pub const SEEK_END: c_int = 2; +pub const _IOFBF: c_int = 0; +pub const _IONBF: c_int = 4; +pub const _IOLBF: c_int = 64; +pub const BUFSIZ: c_uint = 512; +pub const FOPEN_MAX: c_uint = 20; +pub const FILENAME_MAX: c_uint = 260; + +// fcntl.h +pub const O_RDONLY: c_int = 0x0000; +pub const O_WRONLY: c_int = 0x0001; +pub const O_RDWR: c_int = 0x0002; +pub const O_APPEND: c_int = 0x0008; +pub const O_CREAT: c_int = 0x0100; +pub const O_TRUNC: c_int = 0x0200; +pub const O_EXCL: c_int = 0x0400; +pub const O_TEXT: c_int = 0x4000; +pub const O_BINARY: c_int = 0x8000; +pub const _O_WTEXT: c_int = 0x10000; +pub const _O_U16TEXT: c_int = 0x20000; +pub const _O_U8TEXT: c_int = 0x40000; +pub const O_RAW: c_int = O_BINARY; +pub const O_NOINHERIT: c_int = 0x0080; +pub const O_TEMPORARY: c_int = 0x0040; +pub const _O_SHORT_LIVED: c_int = 0x1000; +pub const _O_OBTAIN_DIR: c_int = 0x2000; +pub const O_SEQUENTIAL: c_int = 0x0020; +pub const O_RANDOM: c_int = 0x0010; + +pub const S_IFCHR: c_int = 0o2_0000; +pub const S_IFDIR: c_int = 0o4_0000; +pub const S_IFREG: c_int = 0o10_0000; +pub const S_IFMT: c_int = 0o17_0000; +pub const S_IEXEC: c_int = 0o0100; +pub const S_IWRITE: c_int = 0o0200; +pub const S_IREAD: c_int = 0o0400; + +pub const LC_ALL: c_int = 0; +pub const LC_COLLATE: c_int = 1; +pub const LC_CTYPE: c_int = 2; +pub const LC_MONETARY: c_int = 3; +pub const LC_NUMERIC: c_int = 4; +pub const LC_TIME: c_int = 5; + +pub const EPERM: c_int = 1; +pub const ENOENT: c_int = 2; +pub const ESRCH: c_int = 3; +pub const EINTR: c_int = 4; +pub const EIO: c_int = 5; +pub const ENXIO: c_int = 6; +pub const E2BIG: c_int = 7; +pub const ENOEXEC: c_int = 8; +pub const EBADF: c_int = 9; +pub const ECHILD: c_int = 10; +pub const EAGAIN: c_int = 11; +pub const ENOMEM: c_int = 12; +pub const EACCES: c_int = 13; +pub const EFAULT: c_int = 14; +pub const EBUSY: c_int = 16; +pub const EEXIST: c_int = 17; +pub const EXDEV: c_int = 18; +pub const ENODEV: c_int = 19; +pub const ENOTDIR: c_int = 20; +pub const EISDIR: c_int = 21; +pub const EINVAL: c_int = 22; +pub const ENFILE: c_int = 23; +pub const EMFILE: c_int = 24; +pub const ENOTTY: c_int = 25; +pub const EFBIG: c_int = 27; +pub const ENOSPC: c_int = 28; +pub const ESPIPE: c_int = 29; +pub const EROFS: c_int = 30; +pub const EMLINK: c_int = 31; +pub const EPIPE: c_int = 32; +pub const EDOM: c_int = 33; +pub const ERANGE: c_int = 34; +pub const EDEADLK: c_int = 36; +pub const EDEADLOCK: c_int = 36; +pub const ENAMETOOLONG: c_int = 38; +pub const ENOLCK: c_int = 39; +pub const ENOSYS: c_int = 40; +pub const ENOTEMPTY: c_int = 41; +pub const EILSEQ: c_int = 42; +pub const STRUNCATE: c_int = 80; + +// POSIX Supplement (from errno.h) +pub const EADDRINUSE: c_int = 100; +pub const EADDRNOTAVAIL: c_int = 101; +pub const EAFNOSUPPORT: c_int = 102; +pub const EALREADY: c_int = 103; +pub const EBADMSG: c_int = 104; +pub const ECANCELED: c_int = 105; +pub const ECONNABORTED: c_int = 106; +pub const ECONNREFUSED: c_int = 107; +pub const ECONNRESET: c_int = 108; +pub const EDESTADDRREQ: c_int = 109; +pub const EHOSTUNREACH: c_int = 110; +pub const EIDRM: c_int = 111; +pub const EINPROGRESS: c_int = 112; +pub const EISCONN: c_int = 113; +pub const ELOOP: c_int = 114; +pub const EMSGSIZE: c_int = 115; +pub const ENETDOWN: c_int = 116; +pub const ENETRESET: c_int = 117; +pub const ENETUNREACH: c_int = 118; +pub const ENOBUFS: c_int = 119; +pub const ENODATA: c_int = 120; +pub const ENOLINK: c_int = 121; +pub const ENOMSG: c_int = 122; +pub const ENOPROTOOPT: c_int = 123; +pub const ENOSR: c_int = 124; +pub const ENOSTR: c_int = 125; +pub const ENOTCONN: c_int = 126; +pub const ENOTRECOVERABLE: c_int = 127; +pub const ENOTSOCK: c_int = 128; +pub const ENOTSUP: c_int = 129; +pub const EOPNOTSUPP: c_int = 130; +pub const EOVERFLOW: c_int = 132; +pub const EOWNERDEAD: c_int = 133; +pub const EPROTO: c_int = 134; +pub const EPROTONOSUPPORT: c_int = 135; +pub const EPROTOTYPE: c_int = 136; +pub const ETIME: c_int = 137; +pub const ETIMEDOUT: c_int = 138; +pub const ETXTBSY: c_int = 139; +pub const EWOULDBLOCK: c_int = 140; + +// signal codes +pub const SIGINT: c_int = 2; +pub const SIGILL: c_int = 4; +pub const SIGFPE: c_int = 8; +pub const SIGSEGV: c_int = 11; +pub const SIGTERM: c_int = 15; +pub const SIGABRT: c_int = 22; +pub const NSIG: c_int = 23; + +pub const SIG_ERR: c_int = -1; +pub const SIG_DFL: crate::sighandler_t = 0; +pub const SIG_IGN: crate::sighandler_t = 1; +pub const SIG_GET: crate::sighandler_t = 2; +pub const SIG_SGE: crate::sighandler_t = 3; +pub const SIG_ACK: crate::sighandler_t = 4; + +// DIFF(main): removed in 458c58f409 +// FIXME(msrv): done by `std` starting in 1.79.0 +// inline comment below appeases style checker +#[cfg(all(target_env = "msvc", feature = "rustc-dep-of-std"))] // " if " +#[link(name = "msvcrt", cfg(not(target_feature = "crt-static")))] +#[link(name = "libcmt", cfg(target_feature = "crt-static"))] +extern "C" {} + +#[derive(Debug)] +pub enum FILE {} +impl Copy for FILE {} +impl Clone for FILE { + fn clone(&self) -> FILE { + *self + } +} +#[derive(Debug)] +pub enum fpos_t {} // FIXME(windows): fill this out with a struct +impl Copy for fpos_t {} +impl Clone for fpos_t { + fn clone(&self) -> fpos_t { + *self + } +} + +// Special handling for all print and scan type functions because of https://github.com/rust-lang/libc/issues/2860 +cfg_if! { + if #[cfg(not(feature = "rustc-dep-of-std"))] { + #[cfg_attr( + all(windows, target_env = "msvc"), + link(name = "legacy_stdio_definitions") + )] + extern "C" { + pub fn printf(format: *const c_char, ...) -> c_int; + pub fn fprintf(stream: *mut FILE, format: *const c_char, ...) -> c_int; + } + } +} + +extern "C" { + pub fn isalnum(c: c_int) -> c_int; + pub fn isalpha(c: c_int) -> c_int; + pub fn iscntrl(c: c_int) -> c_int; + pub fn isdigit(c: c_int) -> c_int; + pub fn isgraph(c: c_int) -> c_int; + pub fn islower(c: c_int) -> c_int; + pub fn isprint(c: c_int) -> c_int; + pub fn ispunct(c: c_int) -> c_int; + pub fn isspace(c: c_int) -> c_int; + pub fn isupper(c: c_int) -> c_int; + pub fn isxdigit(c: c_int) -> c_int; + pub fn isblank(c: c_int) -> c_int; + pub fn tolower(c: c_int) -> c_int; + pub fn toupper(c: c_int) -> c_int; + pub fn qsort( + base: *mut c_void, + num: size_t, + size: size_t, + compar: Option c_int>, + ); + pub fn qsort_s( + base: *mut c_void, + num: size_t, + size: size_t, + compar: Option c_int>, + arg: *mut c_void, + ); + pub fn fopen(filename: *const c_char, mode: *const c_char) -> *mut FILE; + pub fn freopen(filename: *const c_char, mode: *const c_char, file: *mut FILE) -> *mut FILE; + pub fn fflush(file: *mut FILE) -> c_int; + pub fn fclose(file: *mut FILE) -> c_int; + pub fn remove(filename: *const c_char) -> c_int; + pub fn rename(oldname: *const c_char, newname: *const c_char) -> c_int; + pub fn tmpfile() -> *mut FILE; + pub fn setvbuf(stream: *mut FILE, buffer: *mut c_char, mode: c_int, size: size_t) -> c_int; + pub fn setbuf(stream: *mut FILE, buf: *mut c_char); + pub fn getchar() -> c_int; + pub fn putchar(c: c_int) -> c_int; + pub fn fgetc(stream: *mut FILE) -> c_int; + pub fn fgets(buf: *mut c_char, n: c_int, stream: *mut FILE) -> *mut c_char; + pub fn fputc(c: c_int, stream: *mut FILE) -> c_int; + pub fn fputs(s: *const c_char, stream: *mut FILE) -> c_int; + pub fn puts(s: *const c_char) -> c_int; + pub fn ungetc(c: c_int, stream: *mut FILE) -> c_int; + pub fn fread(ptr: *mut c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; + pub fn fwrite(ptr: *const c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; + pub fn fseek(stream: *mut FILE, offset: c_long, whence: c_int) -> c_int; + pub fn ftell(stream: *mut FILE) -> c_long; + pub fn rewind(stream: *mut FILE); + pub fn fgetpos(stream: *mut FILE, ptr: *mut fpos_t) -> c_int; + pub fn fsetpos(stream: *mut FILE, ptr: *const fpos_t) -> c_int; + pub fn feof(stream: *mut FILE) -> c_int; + pub fn ferror(stream: *mut FILE) -> c_int; + pub fn perror(s: *const c_char); + pub fn atof(s: *const c_char) -> c_double; + pub fn atoi(s: *const c_char) -> c_int; + pub fn atol(s: *const c_char) -> c_long; + pub fn atoll(s: *const c_char) -> c_longlong; + pub fn strtod(s: *const c_char, endp: *mut *mut c_char) -> c_double; + pub fn strtof(s: *const c_char, endp: *mut *mut c_char) -> c_float; + pub fn strtol(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_long; + pub fn strtoll(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_longlong; + pub fn strtoul(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulong; + pub fn strtoull(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulonglong; + pub fn calloc(nobj: size_t, size: size_t) -> *mut c_void; + pub fn malloc(size: size_t) -> *mut c_void; + pub fn _msize(p: *mut c_void) -> size_t; + pub fn realloc(p: *mut c_void, size: size_t) -> *mut c_void; + pub fn free(p: *mut c_void); + pub fn abort() -> !; + pub fn exit(status: c_int) -> !; + pub fn _exit(status: c_int) -> !; + pub fn atexit(cb: extern "C" fn()) -> c_int; + pub fn system(s: *const c_char) -> c_int; + pub fn getenv(s: *const c_char) -> *mut c_char; + + pub fn strcpy(dst: *mut c_char, src: *const c_char) -> *mut c_char; + pub fn strncpy(dst: *mut c_char, src: *const c_char, n: size_t) -> *mut c_char; + pub fn strcat(s: *mut c_char, ct: *const c_char) -> *mut c_char; + pub fn strncat(s: *mut c_char, ct: *const c_char, n: size_t) -> *mut c_char; + pub fn strcmp(cs: *const c_char, ct: *const c_char) -> c_int; + pub fn strncmp(cs: *const c_char, ct: *const c_char, n: size_t) -> c_int; + pub fn strcoll(cs: *const c_char, ct: *const c_char) -> c_int; + pub fn strchr(cs: *const c_char, c: c_int) -> *mut c_char; + pub fn strrchr(cs: *const c_char, c: c_int) -> *mut c_char; + pub fn strspn(cs: *const c_char, ct: *const c_char) -> size_t; + pub fn strcspn(cs: *const c_char, ct: *const c_char) -> size_t; + pub fn strdup(cs: *const c_char) -> *mut c_char; + pub fn strpbrk(cs: *const c_char, ct: *const c_char) -> *mut c_char; + pub fn strstr(cs: *const c_char, ct: *const c_char) -> *mut c_char; + pub fn strlen(cs: *const c_char) -> size_t; + pub fn strnlen(cs: *const c_char, maxlen: size_t) -> size_t; + pub fn strerror(n: c_int) -> *mut c_char; + pub fn strtok(s: *mut c_char, t: *const c_char) -> *mut c_char; + pub fn strxfrm(s: *mut c_char, ct: *const c_char, n: size_t) -> size_t; + pub fn wcslen(buf: *const wchar_t) -> size_t; + pub fn wcsnlen(str: *const wchar_t, numberOfElements: size_t) -> size_t; + pub fn wcstombs(dest: *mut c_char, src: *const wchar_t, n: size_t) -> size_t; + + pub fn memchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; + pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; + pub fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; + pub fn memmove(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; + pub fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void; + + pub fn abs(i: c_int) -> c_int; + pub fn labs(i: c_long) -> c_long; + pub fn rand() -> c_int; + pub fn srand(seed: c_uint); + + pub fn signal(signum: c_int, handler: sighandler_t) -> sighandler_t; + pub fn raise(signum: c_int) -> c_int; + + pub fn clock() -> clock_t; + pub fn ctime(sourceTime: *const time_t) -> *mut c_char; + pub fn difftime(timeEnd: time_t, timeStart: time_t) -> c_double; + #[link_name = "_gmtime64_s"] + pub fn gmtime_s(destTime: *mut tm, srcTime: *const time_t) -> c_int; + #[link_name = "_get_daylight"] + pub fn get_daylight(hours: *mut c_int) -> errno_t; + #[link_name = "_get_dstbias"] + pub fn get_dstbias(seconds: *mut c_long) -> errno_t; + #[link_name = "_get_timezone"] + pub fn get_timezone(seconds: *mut c_long) -> errno_t; + #[link_name = "_get_tzname"] + pub fn get_tzname( + p_return_value: *mut size_t, + time_zone_name: *mut c_char, + size_in_bytes: size_t, + index: c_int, + ) -> errno_t; + #[link_name = "_localtime64_s"] + pub fn localtime_s(tmDest: *mut tm, sourceTime: *const time_t) -> crate::errno_t; + #[link_name = "_time64"] + pub fn time(destTime: *mut time_t) -> time_t; + #[link_name = "_tzset"] + pub fn tzset(); + #[link_name = "_chmod"] + pub fn chmod(path: *const c_char, mode: c_int) -> c_int; + #[link_name = "_wchmod"] + pub fn wchmod(path: *const wchar_t, mode: c_int) -> c_int; + #[link_name = "_mkdir"] + pub fn mkdir(path: *const c_char) -> c_int; + #[link_name = "_wrmdir"] + pub fn wrmdir(path: *const wchar_t) -> c_int; + #[link_name = "_fstat64"] + pub fn fstat(fildes: c_int, buf: *mut stat) -> c_int; + #[link_name = "_stat64"] + pub fn stat(path: *const c_char, buf: *mut stat) -> c_int; + #[link_name = "_wstat64"] + pub fn wstat(path: *const wchar_t, buf: *mut stat) -> c_int; + #[link_name = "_wutime64"] + pub fn wutime(file: *const wchar_t, buf: *mut utimbuf) -> c_int; + #[link_name = "_popen"] + pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; + #[link_name = "_pclose"] + pub fn pclose(stream: *mut crate::FILE) -> c_int; + #[link_name = "_fdopen"] + pub fn fdopen(fd: c_int, mode: *const c_char) -> *mut crate::FILE; + #[link_name = "_fileno"] + pub fn fileno(stream: *mut crate::FILE) -> c_int; + #[link_name = "_open"] + pub fn open(path: *const c_char, oflag: c_int, ...) -> c_int; + #[link_name = "_wopen"] + pub fn wopen(path: *const wchar_t, oflag: c_int, ...) -> c_int; + #[link_name = "_creat"] + pub fn creat(path: *const c_char, mode: c_int) -> c_int; + #[link_name = "_access"] + pub fn access(path: *const c_char, amode: c_int) -> c_int; + #[link_name = "_chdir"] + pub fn chdir(dir: *const c_char) -> c_int; + #[link_name = "_close"] + pub fn close(fd: c_int) -> c_int; + #[link_name = "_dup"] + pub fn dup(fd: c_int) -> c_int; + #[link_name = "_dup2"] + pub fn dup2(src: c_int, dst: c_int) -> c_int; + #[link_name = "_execl"] + pub fn execl(path: *const c_char, arg0: *const c_char, ...) -> intptr_t; + #[link_name = "_wexecl"] + pub fn wexecl(path: *const wchar_t, arg0: *const wchar_t, ...) -> intptr_t; + #[link_name = "_execle"] + pub fn execle(path: *const c_char, arg0: *const c_char, ...) -> intptr_t; + #[link_name = "_wexecle"] + pub fn wexecle(path: *const wchar_t, arg0: *const wchar_t, ...) -> intptr_t; + #[link_name = "_execlp"] + pub fn execlp(path: *const c_char, arg0: *const c_char, ...) -> intptr_t; + #[link_name = "_wexeclp"] + pub fn wexeclp(path: *const wchar_t, arg0: *const wchar_t, ...) -> intptr_t; + #[link_name = "_execlpe"] + pub fn execlpe(path: *const c_char, arg0: *const c_char, ...) -> intptr_t; + #[link_name = "_wexeclpe"] + pub fn wexeclpe(path: *const wchar_t, arg0: *const wchar_t, ...) -> intptr_t; + #[link_name = "_execv"] + // DIFF(main): changed to `intptr_t` in e77f551de9 + pub fn execv(prog: *const c_char, argv: *const *const c_char) -> intptr_t; + #[link_name = "_execve"] + pub fn execve( + prog: *const c_char, + argv: *const *const c_char, + envp: *const *const c_char, + ) -> c_int; + #[link_name = "_execvp"] + pub fn execvp(c: *const c_char, argv: *const *const c_char) -> c_int; + #[link_name = "_execvpe"] + pub fn execvpe( + c: *const c_char, + argv: *const *const c_char, + envp: *const *const c_char, + ) -> c_int; + + #[link_name = "_wexecv"] + pub fn wexecv(prog: *const wchar_t, argv: *const *const wchar_t) -> intptr_t; + #[link_name = "_wexecve"] + pub fn wexecve( + prog: *const wchar_t, + argv: *const *const wchar_t, + envp: *const *const wchar_t, + ) -> intptr_t; + #[link_name = "_wexecvp"] + pub fn wexecvp(c: *const wchar_t, argv: *const *const wchar_t) -> intptr_t; + #[link_name = "_wexecvpe"] + pub fn wexecvpe( + c: *const wchar_t, + argv: *const *const wchar_t, + envp: *const *const wchar_t, + ) -> intptr_t; + #[link_name = "_getcwd"] + pub fn getcwd(buf: *mut c_char, size: c_int) -> *mut c_char; + #[link_name = "_getpid"] + pub fn getpid() -> c_int; + #[link_name = "_isatty"] + pub fn isatty(fd: c_int) -> c_int; + #[link_name = "_lseek"] + pub fn lseek(fd: c_int, offset: c_long, origin: c_int) -> c_long; + #[link_name = "_lseeki64"] + pub fn lseek64(fd: c_int, offset: c_longlong, origin: c_int) -> c_longlong; + #[link_name = "_pipe"] + pub fn pipe(fds: *mut c_int, psize: c_uint, textmode: c_int) -> c_int; + #[link_name = "_read"] + pub fn read(fd: c_int, buf: *mut c_void, count: c_uint) -> c_int; + #[link_name = "_rmdir"] + pub fn rmdir(path: *const c_char) -> c_int; + #[link_name = "_unlink"] + pub fn unlink(c: *const c_char) -> c_int; + #[link_name = "_write"] + pub fn write(fd: c_int, buf: *const c_void, count: c_uint) -> c_int; + #[link_name = "_commit"] + pub fn commit(fd: c_int) -> c_int; + #[link_name = "_get_osfhandle"] + pub fn get_osfhandle(fd: c_int) -> intptr_t; + #[link_name = "_open_osfhandle"] + pub fn open_osfhandle(osfhandle: intptr_t, flags: c_int) -> c_int; + pub fn setlocale(category: c_int, locale: *const c_char) -> *mut c_char; + #[link_name = "_wsetlocale"] + pub fn wsetlocale(category: c_int, locale: *const wchar_t) -> *mut wchar_t; + #[link_name = "_aligned_malloc"] + pub fn aligned_malloc(size: size_t, alignment: size_t) -> *mut c_void; + #[link_name = "_aligned_free"] + pub fn aligned_free(ptr: *mut c_void); + #[link_name = "_aligned_realloc"] + pub fn aligned_realloc(memblock: *mut c_void, size: size_t, alignment: size_t) -> *mut c_void; + #[link_name = "_putenv"] + pub fn putenv(envstring: *const c_char) -> c_int; + #[link_name = "_wputenv"] + pub fn wputenv(envstring: *const crate::wchar_t) -> c_int; + #[link_name = "_putenv_s"] + pub fn putenv_s(envstring: *const c_char, value_string: *const c_char) -> crate::errno_t; + #[link_name = "_wputenv_s"] + pub fn wputenv_s( + envstring: *const crate::wchar_t, + value_string: *const crate::wchar_t, + ) -> crate::errno_t; +} + +extern "system" { + pub fn listen(s: SOCKET, backlog: c_int) -> c_int; + pub fn accept(s: SOCKET, addr: *mut crate::sockaddr, addrlen: *mut c_int) -> SOCKET; + pub fn bind(s: SOCKET, name: *const crate::sockaddr, namelen: c_int) -> c_int; + pub fn connect(s: SOCKET, name: *const crate::sockaddr, namelen: c_int) -> c_int; + pub fn getpeername(s: SOCKET, name: *mut crate::sockaddr, nameln: *mut c_int) -> c_int; + pub fn getsockname(s: SOCKET, name: *mut crate::sockaddr, nameln: *mut c_int) -> c_int; + pub fn getsockopt( + s: SOCKET, + level: c_int, + optname: c_int, + optval: *mut c_char, + optlen: *mut c_int, + ) -> c_int; + pub fn recvfrom( + s: SOCKET, + buf: *mut c_char, + len: c_int, + flags: c_int, + from: *mut crate::sockaddr, + fromlen: *mut c_int, + ) -> c_int; + pub fn sendto( + s: SOCKET, + buf: *const c_char, + len: c_int, + flags: c_int, + to: *const crate::sockaddr, + tolen: c_int, + ) -> c_int; + pub fn setsockopt( + s: SOCKET, + level: c_int, + optname: c_int, + optval: *const c_char, + optlen: c_int, + ) -> c_int; + pub fn socket(af: c_int, socket_type: c_int, protocol: c_int) -> SOCKET; +} + +cfg_if! { + if #[cfg(all(target_env = "gnu"))] { + mod gnu; + pub use self::gnu::*; + } else if #[cfg(all(target_env = "msvc"))] { + mod msvc; + pub use self::msvc::*; + } else { + // Unknown target_env + } +} diff --git a/vendor/libc/src/windows/msvc/mod.rs b/vendor/libc/src/windows/msvc/mod.rs new file mode 100644 index 00000000000000..5b620bc6c1afa1 --- /dev/null +++ b/vendor/libc/src/windows/msvc/mod.rs @@ -0,0 +1,17 @@ +use crate::prelude::*; + +pub const L_tmpnam: c_uint = 260; +pub const TMP_MAX: c_uint = 0x7fff_ffff; + +// POSIX Supplement (from errno.h) +// This particular error code is only currently available in msvc toolchain +pub const EOTHER: c_int = 131; + +extern "C" { + #[link_name = "_stricmp"] + pub fn stricmp(s1: *const c_char, s2: *const c_char) -> c_int; + #[link_name = "_strnicmp"] + pub fn strnicmp(s1: *const c_char, s2: *const c_char, n: size_t) -> c_int; + #[link_name = "_memccpy"] + pub fn memccpy(dest: *mut c_void, src: *const c_void, c: c_int, count: size_t) -> *mut c_void; +} diff --git a/vendor/libc/src/xous.rs b/vendor/libc/src/xous.rs new file mode 100644 index 00000000000000..2415fd42824e1c --- /dev/null +++ b/vendor/libc/src/xous.rs @@ -0,0 +1,18 @@ +//! Xous C type definitions + +use crate::prelude::*; + +pub type intmax_t = i64; +pub type uintmax_t = u64; + +pub type size_t = usize; +pub type ptrdiff_t = isize; +pub type intptr_t = isize; +pub type uintptr_t = usize; +pub type ssize_t = isize; + +pub type off_t = i64; +pub type wchar_t = u32; + +pub const INT_MIN: c_int = -2147483648; +pub const INT_MAX: c_int = 2147483647; diff --git a/vendor/libc/tests/const_fn.rs b/vendor/libc/tests/const_fn.rs new file mode 100644 index 00000000000000..d9b41b8073c70d --- /dev/null +++ b/vendor/libc/tests/const_fn.rs @@ -0,0 +1,3 @@ +#[cfg(target_os = "linux")] +const _FOO: libc::c_uint = unsafe { libc::CMSG_SPACE(1) }; +//^ if CMSG_SPACE is not const, this will fail to compile diff --git a/vendor/libloading/.cargo-checksum.json b/vendor/libloading/.cargo-checksum.json new file mode 100644 index 00000000000000..49d35f1ffa9e92 --- /dev/null +++ b/vendor/libloading/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"2c2a09896cf605d801de7c91dc9c99904e92c46aa4ec8d7963f67a12bd33f93b",".github/workflows/libloading.yml":"46df13bc0dc7c70e42bd818084fd4a3f5b8ecb74cb9b45cc585542952f96ac54","Cargo.lock":"516b49980abf75390492d18d669219adfce3b87dbabf5186b68d260ae2225c8c","Cargo.toml":"c70a979f3e8987bcf85b377d310b895b4bbeae29f6b0a28735cf48230087d5e2","Cargo.toml.orig":"a3ba7155546bbf086806c8afe512a63af09288d2afd3b26e781d4f999534eb03","LICENSE":"b29f8b01452350c20dd1af16ef83b598fea3053578ccc1c7a0ef40e57be2620f","README.mkd":"707e1cae9fa4b691ce5cb8a3976573158fc60b67cb89948f8f5d51c5908bd0a8","src/changelog.rs":"e3683b87d485ac6369349b9bbd7b04957664b8fdbc2fee845a4c5e56ce226036","src/error.rs":"24dbe0edbe6e0c3635168cc8548a32ef8c9eb939a3f6b976d48e7b7c29d752de","src/lib.rs":"f54281f105189a23f88464b1ad02c5d5073a873e5b5736d59a03d94cb485a861","src/os/mod.rs":"6c59ef8c1120953ae6b6c32f27766c643ca90d85075c49c3545d2fe1ed82cedd","src/os/unix/consts.rs":"61a73d876c19ec0542c1ca32d43eddb3b9991761d05d79351ac831dc88900b2e","src/os/unix/mod.rs":"d080b693c0a235917d6fb462ff7ef39c344883b00d8f741b18dee184538e3530","src/os/windows/mod.rs":"bce75443921d24734fe6ebc38f4b0c5ffb6303db643c88dd54779c93014f2b38","src/safe.rs":"c91c743162488495b28a9735c20c5b9fb6ea3f06fe936cd3d19ba4d1ddb2707c","src/test_helpers.rs":"201403e143e5b3204864124cd38067cf8813d5273dc1a9099288a9dc4bdd15b6","src/util.rs":"0b6dcfb9eafff2d87966460ef6b1b99980f888813037e787ed92deee602f8c2b","tests/constants.rs":"4778c062605ed22238c1bed16de4c076d0857282f090f36e6d985dafb7b4544d","tests/functions.rs":"bfe07fc286693235b12e9e04d8d079722b320871722baf3867bcf20dfb69cc43","tests/library_filename.rs":"5f43ce556e7631a63fd5c1466c82afa8bc3fbc5613210ce185227b40431b81db","tests/markers.rs":"0ebc8f807b92e39452d35732988012cdca7ce96231c57eaac9c3f4217225ad39","tests/nagisa32.dll":"5c69b2bd9c8a6ad04165c221075fc9fade1dd66ca697399ace528a5a62328e36","tests/nagisa64.dll":"e20b95e3036f3289421abd100760874d4f455afd33c3b5b64fec56b191f7d477","tests/windows.rs":"d47752de5ce18b304697a957f430351a91b0188a80a881d6558d601df69f4036"},"package":"d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55"} \ No newline at end of file diff --git a/vendor/libloading/.cargo_vcs_info.json b/vendor/libloading/.cargo_vcs_info.json new file mode 100644 index 00000000000000..a5ed37147c280d --- /dev/null +++ b/vendor/libloading/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "f4ec9e702de2d0778bccff8525dc44e4cacac2d1" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/libloading/.github/workflows/libloading.yml b/vendor/libloading/.github/workflows/libloading.yml new file mode 100644 index 00000000000000..817d1df19e6eba --- /dev/null +++ b/vendor/libloading/.github/workflows/libloading.yml @@ -0,0 +1,126 @@ +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + push: + branches: [master] + paths-ignore: ['*.mkd', 'LICENSE'] + pull_request: + types: [opened, reopened, synchronize] + +jobs: + native-test: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + rust_toolchain: [nightly, stable, 1.71.0] + os: [ubuntu-latest, windows-latest, macOS-latest] + timeout-minutes: 20 + steps: + - uses: actions/checkout@v2 + - run: rustup install ${{ matrix.rust_toolchain }} --profile=minimal + - run: rustup default ${{ matrix.rust_toolchain }} + - run: rustup component add clippy + - run: cargo update -p libc --precise 0.2.155 + if: ${{ matrix.rust_toolchain == '1.71.0' }} + - run: cargo clippy + - run: cargo test -- --nocapture + - run: cargo test --release -- --nocapture + - run: cargo rustdoc -Zunstable-options --config 'build.rustdocflags=["--cfg", "libloading_docs", "-D", "rustdoc::broken_intra_doc_links"]' + if: ${{ matrix.rust_toolchain == 'nightly' }} + # pwsh.exe drops quotes kekw. https://stackoverflow.com/a/59036879 + shell: bash + + windows-test: + runs-on: windows-latest + strategy: + fail-fast: false + matrix: + rust_toolchain: [nightly, stable] + rust_target: + - x86_64-pc-windows-gnullvm + - i686-pc-windows-gnu + include: + - rust_target: x86_64-pc-windows-gnullvm + mingw_path: C:/msys64/clang64/bin + package: mingw-w64-clang-x86_64-clang + - rust_target: i686-pc-windows-gnu + mingw_path: C:/msys64/mingw32/bin + package: mingw-w64-i686-gcc + steps: + - uses: actions/checkout@v2 + - run: rustup install ${{ matrix.rust_toolchain }} --profile=minimal + - run: rustup default ${{ matrix.rust_toolchain }} + - run: rustup target add ${{ matrix.rust_target }} + - uses: msys2/setup-msys2@v2 + with: + release: false + install: ${{ matrix.package }} + - run: echo "${{ matrix.mingw_path }}" | Out-File -FilePath $env:GITHUB_PATH -Append + if: ${{ matrix.mingw_path }}" + - run: cargo test --target ${{ matrix.rust_target }} + env: + TARGET: ${{ matrix.rust_target}} + + msys2-test: + runs-on: windows-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v2 + - run: rustup install nightly --profile=minimal + - run: rustup default nightly + - run: rustup component add rust-src + - uses: msys2/setup-msys2@v2 + with: + release: false + install: gcc + - run: echo "INPUT(libmsys-2.0.a)" | Out-File -FilePath "C:\msys64\usr\lib\libcygwin.a" + - run: | + $env:PATH = "C:\msys64\usr\bin\;$env:PATH" + cargo test --target x86_64-pc-cygwin -Zbuild-std + env: + CARGO_TARGET_X86_64_PC_CYGWIN_LINKER: x86_64-pc-cygwin-gcc.exe + + bare-cross-build: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + rust_toolchain: [nightly] + rust_target: + # BSDs: could be tested with full system emulation + # - x86_64-unknown-dragonfly + # - x86_64-unknown-freebsd + - x86_64-unknown-haiku + # - x86_64-unknown-netbsd + - x86_64-unknown-openbsd + - x86_64-unknown-redox + - x86_64-unknown-fuchsia + - wasm32-unknown-unknown + timeout-minutes: 20 + steps: + - uses: actions/checkout@v2 + - run: rustup install ${{ matrix.rust_toolchain }} --profile=minimal + - run: rustup default ${{ matrix.rust_toolchain }} + - run: rustup component add rust-src --toolchain nightly --target ${{ matrix.rust_target }} + - run: cargo build --target ${{ matrix.rust_target }} -Zbuild-std + + cross-ios-build: + runs-on: macos-latest + strategy: + fail-fast: false + matrix: + rust_toolchain: [nightly, stable] + rust_target: + - aarch64-apple-ios + - x86_64-apple-ios + timeout-minutes: 20 + steps: + - uses: actions/checkout@v2 + - run: rustup install ${{ matrix.rust_toolchain }} --profile=minimal + - run: rustup default ${{ matrix.rust_toolchain }} + - run: rustup target add ${{ matrix.rust_target }} + - run: cargo build --target=${{ matrix.rust_target }} diff --git a/vendor/libloading/Cargo.lock b/vendor/libloading/Cargo.lock new file mode 100644 index 00000000000000..935af143d5477f --- /dev/null +++ b/vendor/libloading/Cargo.lock @@ -0,0 +1,47 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "cfg-if" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" + +[[package]] +name = "libc" +version = "0.2.175" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" + +[[package]] +name = "libloading" +version = "0.8.9" +dependencies = [ + "cfg-if", + "libc", + "static_assertions", + "windows-link", + "windows-sys", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "windows-link" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" + +[[package]] +name = "windows-sys" +version = "0.61.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e201184e40b2ede64bc2ea34968b28e33622acdbbf37104f0e4a33f7abe657aa" +dependencies = [ + "windows-link", +] diff --git a/vendor/libloading/Cargo.toml b/vendor/libloading/Cargo.toml new file mode 100644 index 00000000000000..2c8eb67f592a94 --- /dev/null +++ b/vendor/libloading/Cargo.toml @@ -0,0 +1,90 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2015" +rust-version = "1.71.0" +name = "libloading" +version = "0.8.9" +authors = ["Simonas Kazlauskas "] +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Bindings around the platform's dynamic library loading primitives with greatly improved memory safety." +documentation = "https://docs.rs/libloading/" +readme = "README.mkd" +keywords = [ + "dlopen", + "load", + "shared", + "dylib", +] +categories = ["api-bindings"] +license = "ISC" +repository = "https://github.com/nagisa/rust_libloading/" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "libloading_docs", +] + +[lib] +name = "libloading" +path = "src/lib.rs" + +[[test]] +name = "constants" +path = "tests/constants.rs" + +[[test]] +name = "functions" +path = "tests/functions.rs" + +[[test]] +name = "library_filename" +path = "tests/library_filename.rs" + +[[test]] +name = "markers" +path = "tests/markers.rs" + +[[test]] +name = "windows" +path = "tests/windows.rs" + +[dev-dependencies.libc] +version = "0.2" + +[dev-dependencies.static_assertions] +version = "1.1" + +[target."cfg(unix)".dependencies.cfg-if] +version = "1" + +[target."cfg(windows)".dependencies.windows-link] +version = "0.2" + +[target."cfg(windows)".dev-dependencies.windows-sys] +version = "0.61" +features = ["Win32_Foundation"] + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = [ + "cfg(libloading_docs)", + 'cfg(target_os, values("cygwin"))', +] diff --git a/vendor/libloading/LICENSE b/vendor/libloading/LICENSE new file mode 100644 index 00000000000000..9137d5607a4284 --- /dev/null +++ b/vendor/libloading/LICENSE @@ -0,0 +1,12 @@ +Copyright © 2015, Simonas Kazlauskas + +Permission to use, copy, modify, and/or distribute this software for any purpose with or without +fee is hereby granted, provided that the above copyright notice and this permission notice appear +in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS +SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE +AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. diff --git a/vendor/libloading/README.mkd b/vendor/libloading/README.mkd new file mode 100644 index 00000000000000..7ff55b04b2dfa4 --- /dev/null +++ b/vendor/libloading/README.mkd @@ -0,0 +1,16 @@ +# libloading + +Bindings around the platform's dynamic library loading primitives with greatly improved memory +safety. The most important safety guarantee of this library is the prevention of dangling `Symbol`s +that may occur after a `Library` is unloaded. + +Using this library allows the loading of dynamic libraries, also known as shared libraries, as well +as the use of the functions and static variables that these libraries may contain. + +* [Documentation][docs] +* [Changelog][changelog] + +[docs]: https://docs.rs/libloading/ +[changelog]: https://docs.rs/libloading/*/libloading/changelog/index.html + +libloading is available to use under ISC (MIT-like) license. diff --git a/vendor/libloading/src/changelog.rs b/vendor/libloading/src/changelog.rs new file mode 100644 index 00000000000000..181915e985dceb --- /dev/null +++ b/vendor/libloading/src/changelog.rs @@ -0,0 +1,405 @@ +//! The change log. + +/// Release 0.8.9 (2025-09-17) +/// +/// ## Non-breaking changes +/// +/// Migrate from windows-targets to windows-link for linking Windows API functions. +pub mod r0_8_9 {} + +/// Release 0.8.8 (2025-05-27) +/// +/// ## Non-breaking changes +/// +/// Add `os::window::Library::pin`. +pub mod r0_8_8 {} + +/// Release 0.8.7 (2025-04-26) +/// +/// ## Non-breaking changes +/// +/// Add support for the `*-pc-cygwin` target. +pub mod r0_8_7 {} + +/// Release 0.8.4 (2024-06-23) +/// +/// ## Non-breaking changes +/// +/// Compilation when targeting Apple's visionos, watchos and tvos targets has been fixed. +pub mod r0_8_4 {} + +/// Release 0.8.3 (2024-03-05) +/// +/// ## Non-breaking changes +/// +/// A `dev-dependency` on `windows-sys` that was unconditionally introduced in +/// [0.8.2](r0_8_2) has been made conditional. +pub mod r0_8_3 {} + +/// Release 0.8.2 (2024-03-01) +/// +/// ## (Potentially) breaking changes +/// +/// MSRV has been increased to 1.56.0. Since both rustc versions are ancient, this has been deemed +/// to not be breaking enough to warrant a semver-breaking release of libloading. If you're stick +/// with a version of rustc older than 1.56.0, lock `libloading` dependency to `0.8.1`. +/// +/// ## Non-breaking changes +/// +/// * The crate switches the dependency on `windows-sys` to a `windows-target` one for Windows +/// bindings. In order to enable this `libloading` defines any bindings necessary for its operation +/// internally, just like has been done for `unix` targets. This should result in leaner dependency +/// trees. +/// * `os::unix::with_dlerror` has been exposed for the users who need to invoke `dl*` family of +/// functions manually. +pub mod r0_8_2 {} + +/// Release 0.8.1 (2023-09-30) +/// +/// ## Non-breaking changes +/// +/// * Support for GNU Hurd. +pub mod r0_8_1 {} + +/// Release 0.8.0 (2023-04-11) +/// +/// ## (Potentially) breaking changes +/// +/// * `winapi` dependency has been replaced with `windows-sys`. +/// * As a result the MSRV has been increased to 1.48. +/// +/// ## Non-breaking changes +/// +/// * Support for the QNX Neutrino target has been added. +pub mod r0_8_0 {} + +/// Release 0.7.4 (2022-11-07) +/// +/// This release has no functional changes. +/// +/// `RTLD_LAZY`, `RTLD_GLOBAL` and `RTLD_LOCAL` constants have been implemented for AIX platforms. +pub mod r0_7_4 {} + +/// Release 0.7.3 (2022-01-15) +/// +/// This release has no functional changes. +/// +/// In this release the `docsrs` `cfg` has been renamed to `libloading_docs` to better reflect that +/// this `cfg` is intended to be only used by `libloading` and only specifically for the invocation +/// of `rustdoc` when documenting `libloading`. Setting this `cfg` in any other situation is +/// unsupported and will not work. +pub mod r0_7_3 {} + +/// Release 0.7.2 (2021-11-14) +/// +/// Cargo.toml now specifies the MSRV bounds, which enables tooling to report an early failure when +/// the version of the toolchain is insufficient. Refer to the [min-rust-version RFC] and its +/// [tracking issue]. +/// +/// [min-rust-version RFC]: https://rust-lang.github.io/rfcs/2495-min-rust-version.html +/// [tracking issue]: https://github.com/rust-lang/rust/issues/65262 +/// +/// Additionally, on platforms `libloading` has no support (today: `not(any(unix, windows))`), we +/// will no longer attempt to implement the cross-platform `Library` and `Symbol` types. This makes +/// `libloading` compile on targets such as `wasm32-unknown-unknown` and gives ability to the +/// downstream consumers of this library to decide how they want to handle the absence of the +/// library loading implementation in their code. One of such approaches could be depending on +/// `libloading` itself optionally as such: +/// +/// ```toml +/// [target.'cfg(any(unix, windows))'.dependencies.libloading] +/// version = "0.7" +/// ``` +pub mod r0_7_2 {} + +/// Release 0.7.1 (2021-10-09) +/// +/// Significantly improved the consistency and style of the documentation. +pub mod r0_7_1 {} + +/// Release 0.7.0 (2021-02-06) +/// +/// ## Breaking changes +/// +/// ### Loading functions are now `unsafe` +/// +/// A number of associated methods involved in loading a library were changed to +/// be `unsafe`. The affected functions are: [`Library::new`], [`os::unix::Library::new`], +/// [`os::unix::Library::open`], [`os::windows::Library::new`], +/// [`os::windows::Library::load_with_flags`]. This is the most prominent breaking change in this +/// release and affects majority of the users of `libloading`. +/// +/// In order to see why it was necessary, consider the following snippet of C++ code: +/// +/// ```c++ +/// #include +/// #include +/// +/// static std::vector UNSHUU = { 1, 2, 3 }; +/// +/// int main() { +/// std::cout << UNSHUU[0] << UNSHUU[1] << UNSHUU[2] << std::endl; // Prints 123 +/// return 0; +/// } +/// ``` +/// +/// The `std::vector` type, much like in Rust's `Vec`, stores its contents in a buffer allocated on +/// the heap. In this example the vector object itself is stored and initialized as a static +/// variable – a compile time construct. The heap, on the other hand, is a runtime construct. And +/// yet the code works exactly as you'd expect – the vector contains numbers 1, 2 and 3 stored in +/// a buffer on heap. So, _what_ makes it work out, exactly? +/// +/// Various executable and shared library formats define conventions and machinery to execute +/// arbitrary code when a program or a shared library is loaded. On systems using the PE format +/// (e.g. Windows) this is available via the optional `DllMain` initializer. Various systems +/// utilizing the ELF format take a slightly different approach of maintaining an array of function +/// pointers in the `.init_array` section. A very similar mechanism exists on systems that utilize +/// the Mach-O format. +/// +/// For the C++ program above, the object stored in the `UNSHUU` global variable is constructed +/// by code run as part of such an initializer routine. This initializer is run before the entry +/// point (the `main` function) is executed, allowing for this magical behaviour to be possible. +/// Were the C++ code built as a shared library instead, the initialization routines would run as +/// the resulting shared library is loaded. In case of `libloading` – during the call to +/// `Library::new` and other methods affected by this change. +/// +/// These initialization (and very closely related termination) routines can be utilized outside of +/// C++ too. Anybody can build a shared library in variety of different programming languages and +/// set up the initializers to execute arbitrary code. Potentially code that does all sorts of +/// wildly unsound stuff. +/// +/// The routines are executed by components that are an integral part of the operating system. +/// Changing or controlling the operation of these components is infeasible. With that in +/// mind, the initializer and termination routines are something anybody loading a library must +/// carefully evaluate the libraries loaded for soundness. +/// +/// In practice, a vast majority of the libraries can be considered a good citizen and their +/// initialization and termination routines, if they have any at all, can be trusted to be sound. +/// +/// Also see: [issue #86]. +/// +/// ### Better & more consistent default behaviour on UNIX systems +/// +/// On UNIX systems the [`Library::new`], [`os::unix::Library::new`] and +/// [`os::unix::Library::this`] methods have been changed to use +/// [RTLD_LAZY] | [RTLD_LOCAL] as the default set of loader options (previously: +/// [`RTLD_NOW`]). This has a couple benefits. Namely: +/// +/// * Lazy binding is generally quicker to execute when only a subset of symbols from a library are +/// used and is typically the default when neither `RTLD_LAZY` nor `RTLD_NOW` are specified when +/// calling the underlying `dlopen` API; +/// * On most UNIX systems (macOS being a notable exception) `RTLD_LOCAL` is the default when +/// neither `RTLD_LOCAL` nor [`RTLD_GLOBAL`] are specified. The explicit setting of the +/// `RTLD_LOCAL` flag makes this behaviour consistent across platforms. +/// +/// ### Dropped support for Windows XP/Vista +/// +/// The (broken) support for Windows XP and Windows Vista environments was removed. This was +/// prompted primarily by a similar policy change in the [Rust +/// project](https://github.com/rust-lang/compiler-team/issues/378) but also as an acknowledgement +/// to the fact that `libloading` never worked in these environments anyway. +/// +/// ### More accurate error variant names +/// +/// Finally, the `Error::LoadLibraryW` renamed to [`Error::LoadLibraryExW`] to more accurately +/// represent the underlying API that's failing. No functional changes as part of this rename +/// intended. +/// +/// [issue #86]: https://github.com/nagisa/rust_libloading/issues/86 +/// [`Library::new`]: crate::Library::new +/// [`Error::LoadLibraryExW`]: crate::Error::LoadLibraryExW +/// [`os::unix::Library::this`]: crate::os::unix::Library::this +/// [`os::unix::Library::new`]: crate::os::unix::Library::new +/// [`os::unix::Library::open`]: crate::os::unix::Library::new +/// [`os::windows::Library::new`]: crate::os::windows::Library::new +/// [`os::windows::Library::load_with_flags`]: crate::os::windows::Library::load_with_flags +/// [`RTLD_NOW`]: crate::os::unix::RTLD_NOW +/// [RTLD_LAZY]: crate::os::unix::RTLD_LAZY +/// [RTLD_LOCAL]: crate::os::unix::RTLD_LOCAL +/// [`RTLD_GLOBAL`]: crate::os::unix::RTLD_GLOBAL +pub mod r0_7_0 {} + +/// Release 0.6.7 (2021-01-14) +/// +/// * Added a [`os::windows::Library::open_already_loaded`] to obtain a handle to a library that +/// must already be loaded. There is no portable equivalent for all UNIX targets. Users who do +/// not care about portability across UNIX platforms may use [`os::unix::Library::open`] with +/// `libc::RTLD_NOLOAD`; +/// +/// [`os::windows::Library::open_already_loaded`]: crate::os::windows::Library::open_already_loaded +/// [`os::unix::Library::open`]: crate::os::unix::Library::open +pub mod r0_6_7 {} + +/// Release 0.6.6 (2020-12-03) +/// +/// * Fix a double-release of resources when [`Library::close`] or [`os::windows::Library::close`] +/// is used on Windows. +/// +/// [`Library::close`]: crate::Library::close +/// [`os::windows::Library::close`]: crate::os::windows::Library::close +pub mod r0_6_6 {} + +/// Release 0.6.5 (2020-10-23) +/// +/// * Upgrade cfg-if 0.1 to 1.0 +pub mod r0_6_5 {} + +/// Release 0.6.4 (2020-10-10) +/// +/// * Remove use of `build.rs` making it easier to build `libloading` without cargo. It also +/// almost halves the build time of this crate. +pub mod r0_6_4 {} + +/// Release 0.6.3 (2020-08-22) +/// +/// * Improve documentation, allowing to view all of the os-specific functionality from +/// documentation generated for any target; +/// * Add [`os::windows::Library::this`]; +/// * Added constants to use with OS-specific `Library::open`; +/// * Add [`library_filename`]. +/// +/// [`os::windows::Library::this`]: crate::os::windows::Library::this +/// [`library_filename`]: crate::library_filename +pub mod r0_6_3 {} + +/// Release 0.6.2 (2020-05-06) +/// +/// * Fixed building of this library on Illumos. +pub mod r0_6_2 {} + +/// Release 0.6.1 (2020-04-15) +/// +/// * Introduced a new method [`os::windows::Library::load_with_flags`]; +/// * Added support for the Illumos triple. +/// +/// [`os::windows::Library::load_with_flags`]: crate::os::windows::Library::load_with_flags +pub mod r0_6_1 {} + +/// Release 0.6.0 (2020-04-05) +/// +/// * Introduced a new method [`os::unix::Library::get_singlethreaded`]; +/// * Added (untested) support for building when targeting Redox and Fuchsia; +/// * The APIs exposed by this library no longer panic and instead return an `Err` when it used +/// to panic. +/// +/// ## Breaking changes +/// +/// * Minimum required (stable) version of Rust to build this library is now 1.40.0; +/// * This crate now implements a custom [`Error`] type and all APIs now return this type rather +/// than returning the `std::io::Error`; +/// * `libloading::Result` has been removed; +/// * Removed the dependency on the C compiler to build this library on UNIX-like platforms. +/// `libloading` used to utilize a snippet written in C to work-around the unlikely possibility +/// of the target having a thread-unsafe implementation of the `dlerror` function. The effect of +/// the work-around was very opportunistic: it would not work if the function was called by +/// forgoing `libloading`. +/// +/// Starting with 0.6.0, [`Library::get`] on platforms where `dlerror` is not MT-safe (such as +/// FreeBSD, DragonflyBSD or NetBSD) will unconditionally return an error when the underlying +/// `dlsym` returns a null pointer. For the use-cases where loading null pointers is necessary +/// consider using [`os::unix::Library::get_singlethreaded`] instead. +/// +/// [`Library::get`]: crate::Library::get +/// [`os::unix::Library::get_singlethreaded`]: crate::os::unix::Library::get_singlethreaded +/// [`Error`]: crate::Error +pub mod r0_6_0 {} + +/// Release 0.5.2 (2019-07-07) +/// +/// * Added API to convert OS-specific `Library` and `Symbol` conversion to underlying resources. +pub mod r0_5_2 {} + +/// Release 0.5.1 (2019-06-01) +/// +/// * Build on Haiku targets. +pub mod r0_5_1 {} + +/// Release 0.5.0 (2018-01-11) +/// +/// * Update to `winapi = ^0.3`; +/// +/// ## Breaking changes +/// +/// * libloading now requires a C compiler to build on UNIX; +/// * This is a temporary measure until the [`linkage`] attribute is stabilised; +/// * Necessary to resolve [#32]. +/// +/// [`linkage`]: https://github.com/rust-lang/rust/issues/29603 +/// [#32]: https://github.com/nagisa/rust_libloading/issues/32 +pub mod r0_5_0 {} + +/// Release 0.4.3 (2017-12-07) +/// +/// * Bump lazy-static dependency to `^1.0`; +/// * `cargo test --release` now works when testing libloading. +pub mod r0_4_3 {} + +/// Release 0.4.2 (2017-09-24) +/// +/// * Improved error and race-condition handling on Windows; +/// * Improved documentation about thread-safety of Library; +/// * Added `Symbol::::lift_option() -> Option>` convenience method. +pub mod r0_4_2 {} + +/// Release 0.4.1 (2017-08-29) +/// +/// * Solaris support +pub mod r0_4_1 {} + +/// Release 0.4.0 (2017-05-01) +/// +/// * Remove build-time dependency on target_build_utils (and by extension serde/phf); +/// * Require at least version 1.14.0 of rustc to build; +/// * Actually, it is cargo which has to be more recent here. The one shipped with rustc 1.14.0 +/// is what’s being required from now on. +pub mod r0_4_0 {} + +/// Release 0.3.4 (2017-03-25) +/// +/// * Remove rogue println! +pub mod r0_3_4 {} + +/// Release 0.3.3 (2017-03-25) +/// +/// * Panics when `Library::get` is called for incompatibly sized type such as named function +/// types (which are zero-sized). +pub mod r0_3_3 {} + +/// Release 0.3.2 (2017-02-10) +/// +/// * Minimum version required is now rustc 1.12.0; +/// * Updated dependency versions (most notably target_build_utils to 0.3.0) +pub mod r0_3_2 {} + +/// Release 0.3.1 (2016-10-01) +/// +/// * `Symbol` and `os::*::Symbol` now implement `Send` where `T: Send`; +/// * `Symbol` and `os::*::Symbol` now implement `Sync` where `T: Sync`; +/// * `Library` and `os::*::Library` now implement `Sync` (they were `Send` in 0.3.0 already). +pub mod r0_3_1 {} + +/// Release 0.3.0 (2016-07-27) +/// +/// * Greatly improved documentation, especially around platform-specific behaviours; +/// * Improved test suite by building our own library to test against; +/// * All `Library`-ies now implement `Send`. +/// * Added `impl From for Library` and `impl From for +/// os::platform::Library` allowing wrapping and extracting the platform-specific library handle; +/// * Added methods to wrap (`Symbol::from_raw`) and unwrap (`Symbol::into_raw`) the safe `Symbol` +/// wrapper into unsafe `os::platform::Symbol`. +/// +/// The last two additions focus on not restricting potential usecases of this library, allowing +/// users of the library to circumvent safety checks if need be. +/// +/// ## Breaking Changes +/// +/// `Library::new` defaults to `RTLD_NOW` instead of `RTLD_LAZY` on UNIX for more consistent +/// cross-platform behaviour. If a library loaded with `Library::new` had any linking errors, but +/// unresolved references weren’t forced to be resolved, the library would’ve “just worked”, +/// whereas now the call to `Library::new` will return an error signifying presence of such error. +/// +/// ## os::platform +/// * Added `os::unix::Library::open` which allows specifying arbitrary flags (e.g. `RTLD_LAZY`); +/// * Added `os::windows::Library::get_ordinal` which allows finding a function or variable by its +/// ordinal number; +pub mod r0_3_0 {} diff --git a/vendor/libloading/src/error.rs b/vendor/libloading/src/error.rs new file mode 100644 index 00000000000000..43cf320b1bcc63 --- /dev/null +++ b/vendor/libloading/src/error.rs @@ -0,0 +1,146 @@ +use std::ffi::{CStr, CString}; + +/// A `dlerror` error. +pub struct DlDescription(pub(crate) CString); + +impl std::fmt::Debug for DlDescription { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + std::fmt::Debug::fmt(&self.0, f) + } +} + +impl From<&CStr> for DlDescription { + fn from(value: &CStr) -> Self { + Self(value.into()) + } +} + +/// A Windows API error. +pub struct WindowsError(pub(crate) std::io::Error); + +impl std::fmt::Debug for WindowsError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + std::fmt::Debug::fmt(&self.0, f) + } +} + +/// Errors. +#[derive(Debug)] +#[non_exhaustive] +pub enum Error { + /// The `dlopen` call failed. + DlOpen { + /// The source error. + desc: DlDescription, + }, + /// The `dlopen` call failed and system did not report an error. + DlOpenUnknown, + /// The `dlsym` call failed. + DlSym { + /// The source error. + desc: DlDescription, + }, + /// The `dlsym` call failed and system did not report an error. + DlSymUnknown, + /// The `dlclose` call failed. + DlClose { + /// The source error. + desc: DlDescription, + }, + /// The `dlclose` call failed and system did not report an error. + DlCloseUnknown, + /// The `LoadLibraryW` call failed. + LoadLibraryExW { + /// The source error. + source: WindowsError, + }, + /// The `LoadLibraryW` call failed and system did not report an error. + LoadLibraryExWUnknown, + /// The `GetModuleHandleExW` call failed. + GetModuleHandleExW { + /// The source error. + source: WindowsError, + }, + /// The `GetModuleHandleExW` call failed and system did not report an error. + GetModuleHandleExWUnknown, + /// The `GetProcAddress` call failed. + GetProcAddress { + /// The source error. + source: WindowsError, + }, + /// The `GetProcAddressUnknown` call failed and system did not report an error. + GetProcAddressUnknown, + /// The `FreeLibrary` call failed. + FreeLibrary { + /// The source error. + source: WindowsError, + }, + /// The `FreeLibrary` call failed and system did not report an error. + FreeLibraryUnknown, + /// The requested type cannot possibly work. + IncompatibleSize, + /// Could not create a new CString. + CreateCString { + /// The source error. + source: std::ffi::NulError, + }, + /// Could not create a new CString from bytes with trailing null. + CreateCStringWithTrailing { + /// The source error. + source: std::ffi::FromBytesWithNulError, + }, +} + +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + use Error::*; + match *self { + CreateCString { ref source } => Some(source), + CreateCStringWithTrailing { ref source } => Some(source), + LoadLibraryExW { ref source } => Some(&source.0), + GetModuleHandleExW { ref source } => Some(&source.0), + GetProcAddress { ref source } => Some(&source.0), + FreeLibrary { ref source } => Some(&source.0), + _ => None, + } + } +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + use Error::*; + match *self { + DlOpen { ref desc } => write!(f, "{}", desc.0.to_string_lossy()), + DlOpenUnknown => write!(f, "dlopen failed, but system did not report the error"), + DlSym { ref desc } => write!(f, "{}", desc.0.to_string_lossy()), + DlSymUnknown => write!(f, "dlsym failed, but system did not report the error"), + DlClose { ref desc } => write!(f, "{}", desc.0.to_string_lossy()), + DlCloseUnknown => write!(f, "dlclose failed, but system did not report the error"), + LoadLibraryExW { .. } => write!(f, "LoadLibraryExW failed"), + LoadLibraryExWUnknown => write!( + f, + "LoadLibraryExW failed, but system did not report the error" + ), + GetModuleHandleExW { .. } => write!(f, "GetModuleHandleExW failed"), + GetModuleHandleExWUnknown => write!( + f, + "GetModuleHandleExWUnknown failed, but system did not report the error" + ), + GetProcAddress { .. } => write!(f, "GetProcAddress failed"), + GetProcAddressUnknown => write!( + f, + "GetProcAddress failed, but system did not report the error" + ), + FreeLibrary { .. } => write!(f, "FreeLibrary failed"), + FreeLibraryUnknown => { + write!(f, "FreeLibrary failed, but system did not report the error") + } + CreateCString { .. } => write!(f, "could not create a C string from bytes"), + CreateCStringWithTrailing { .. } => write!( + f, + "could not create a C string from bytes with trailing null" + ), + IncompatibleSize => write!(f, "requested type cannot possibly work"), + } + } +} diff --git a/vendor/libloading/src/lib.rs b/vendor/libloading/src/lib.rs new file mode 100644 index 00000000000000..d1e2ced62f180d --- /dev/null +++ b/vendor/libloading/src/lib.rs @@ -0,0 +1,81 @@ +//! Bindings around the platform's dynamic library loading primitives with greatly improved memory safety. +//! +//! Using this library allows the loading of [dynamic libraries](struct.Library.html), also known as +//! shared libraries, and the use of the functions and static variables they contain. +//! +//! The `libloading` crate exposes a cross-platform interface to load a library and make use of its +//! contents, but little is done to hide the differences in behaviour between platforms. +//! The API documentation strives to document such differences as much as possible. +//! +//! Platform-specific APIs are also available in the [`os`](crate::os) module. These APIs are more +//! flexible, but less safe. +//! +//! # Installation +//! +//! Add the `libloading` library to your dependencies in `Cargo.toml`: +//! +//! ```toml +//! [dependencies] +//! libloading = "0.8" +//! ``` +//! +//! # Usage +//! +//! In your code, run the following: +//! +//! ```no_run +//! fn call_dynamic() -> Result> { +//! unsafe { +//! let lib = libloading::Library::new("/path/to/liblibrary.so")?; +//! let func: libloading::Symbol u32> = lib.get(b"my_func")?; +//! Ok(func()) +//! } +//! } +//! ``` +//! +//! The compiler will ensure that the loaded function will not outlive the `Library` from which it comes, +//! preventing the most common memory-safety issues. +#![cfg_attr( + any(unix, windows), + deny(missing_docs, clippy::all, unreachable_pub, unused) +)] +#![cfg_attr(libloading_docs, feature(doc_cfg))] + +pub mod changelog; +mod error; +pub mod os; +#[cfg(any(unix, windows, libloading_docs))] +mod safe; +mod util; + +pub use self::error::Error; +#[cfg(any(unix, windows, libloading_docs))] +pub use self::safe::{Library, Symbol}; +use std::env::consts::{DLL_PREFIX, DLL_SUFFIX}; +use std::ffi::{OsStr, OsString}; + +/// Converts a library name to a filename generally appropriate for use on the system. +/// +/// This function will prepend prefixes (such as `lib`) and suffixes (such as `.so`) to the library +/// `name` to construct the filename. +/// +/// # Examples +/// +/// It can be used to load global libraries in a platform independent manner: +/// +/// ``` +/// use libloading::{Library, library_filename}; +/// // Will attempt to load `libLLVM.so` on Linux, `libLLVM.dylib` on macOS and `LLVM.dll` on +/// // Windows. +/// let library = unsafe { +/// Library::new(library_filename("LLVM")) +/// }; +/// ``` +pub fn library_filename>(name: S) -> OsString { + let name = name.as_ref(); + let mut string = OsString::with_capacity(name.len() + DLL_PREFIX.len() + DLL_SUFFIX.len()); + string.push(DLL_PREFIX); + string.push(name); + string.push(DLL_SUFFIX); + string +} diff --git a/vendor/libloading/src/os/mod.rs b/vendor/libloading/src/os/mod.rs new file mode 100644 index 00000000000000..710353f5ef3852 --- /dev/null +++ b/vendor/libloading/src/os/mod.rs @@ -0,0 +1,27 @@ +//! Unsafe but flexible platform-specific bindings to dynamic library loading facilities. +//! +//! These modules expose more extensive and powerful bindings to the dynamic +//! library loading facilities. Use of these bindings come at the cost of less (in most cases, +//! none at all) safety guarantees, which are provided by the top-level bindings. +//! +//! # Examples +//! +//! Using these modules will likely involve conditional compilation: +//! +//! ```ignore +//! # extern crate libloading; +//! #[cfg(unix)] +//! use libloading::os::unix::*; +//! #[cfg(windows)] +//! use libloading::os::windows::*; +//! ``` + +/// UNIX implementation of dynamic library loading. +#[cfg(any(unix, libloading_docs))] +#[cfg_attr(libloading_docs, doc(cfg(unix)))] +pub mod unix; + +/// Windows implementation of dynamic library loading. +#[cfg(any(windows, libloading_docs))] +#[cfg_attr(libloading_docs, doc(cfg(windows)))] +pub mod windows; diff --git a/vendor/libloading/src/os/unix/consts.rs b/vendor/libloading/src/os/unix/consts.rs new file mode 100644 index 00000000000000..4ae00592dad5a1 --- /dev/null +++ b/vendor/libloading/src/os/unix/consts.rs @@ -0,0 +1,265 @@ +use std::os::raw::c_int; + +/// Perform lazy binding. +/// +/// Relocations shall be performed at an implementation-defined time, ranging from the time +/// of the [`Library::open`] call until the first reference to a given symbol occurs. +/// Specifying `RTLD_LAZY` should improve performance on implementations supporting dynamic +/// symbol binding since a process might not reference all of the symbols in an executable +/// object file. And, for systems supporting dynamic symbol resolution for normal process +/// execution, this behaviour mimics the normal handling of process execution. +/// +/// Conflicts with [`RTLD_NOW`]. +/// +/// [`Library::open`]: crate::os::unix::Library::open +pub const RTLD_LAZY: c_int = posix::RTLD_LAZY; + +/// Perform eager binding. +/// +/// All necessary relocations shall be performed when the executable object file is first +/// loaded. This may waste some processing if relocations are performed for symbols +/// that are never referenced. This behaviour may be useful for applications that need to +/// know that all symbols referenced during execution will be available before +/// [`Library::open`] returns. +/// +/// Conflicts with [`RTLD_LAZY`]. +/// +/// [`Library::open`]: crate::os::unix::Library::open +pub const RTLD_NOW: c_int = posix::RTLD_NOW; + +/// Make loaded symbols available for resolution globally. +/// +/// The executable object file's symbols shall be made available for relocation processing of any +/// other executable object file. In addition, calls to [`Library::get`] on `Library` obtained from +/// [`Library::this`] allows executable object files loaded with this mode to be searched. +/// +/// [`Library::this`]: crate::os::unix::Library::this +/// [`Library::get`]: crate::os::unix::Library::get +pub const RTLD_GLOBAL: c_int = posix::RTLD_GLOBAL; + +/// Load symbols into an isolated namespace. +/// +/// The executable object file's symbols shall not be made available for relocation processing of +/// any other executable object file. This mode of operation is most appropriate for e.g. plugins. +pub const RTLD_LOCAL: c_int = posix::RTLD_LOCAL; + +#[cfg(all(libloading_docs, not(unix)))] +mod posix { + use super::c_int; + pub(super) const RTLD_LAZY: c_int = !0; + pub(super) const RTLD_NOW: c_int = !0; + pub(super) const RTLD_GLOBAL: c_int = !0; + pub(super) const RTLD_LOCAL: c_int = !0; +} + +#[cfg(any(not(libloading_docs), unix))] +mod posix { + extern crate cfg_if; + use self::cfg_if::cfg_if; + use super::c_int; + cfg_if! { + if #[cfg(target_os = "haiku")] { + pub(super) const RTLD_LAZY: c_int = 0; + } else if #[cfg(target_os = "aix")] { + pub(super) const RTLD_LAZY: c_int = 4; + } else if #[cfg(any( + target_os = "linux", + target_os = "android", + target_os = "emscripten", + + target_os = "macos", + target_os = "ios", + target_os = "tvos", + target_os = "visionos", + target_os = "watchos", + + target_os = "freebsd", + target_os = "dragonfly", + target_os = "openbsd", + target_os = "netbsd", + + target_os = "solaris", + target_os = "illumos", + + target_env = "uclibc", + target_env = "newlib", + + target_os = "fuchsia", + target_os = "redox", + target_os = "nto", + target_os = "hurd", + target_os = "cygwin", + ))] { + pub(super) const RTLD_LAZY: c_int = 1; + } else { + compile_error!( + "Target has no known `RTLD_LAZY` value. Please submit an issue or PR adding it." + ); + } + } + + cfg_if! { + if #[cfg(target_os = "haiku")] { + pub(super) const RTLD_NOW: c_int = 1; + } else if #[cfg(any( + target_os = "linux", + all(target_os = "android", target_pointer_width = "64"), + target_os = "emscripten", + + target_os = "macos", + target_os = "ios", + target_os = "tvos", + target_os = "visionos", + target_os = "watchos", + + target_os = "freebsd", + target_os = "dragonfly", + target_os = "openbsd", + target_os = "netbsd", + + target_os = "aix", + target_os = "solaris", + target_os = "illumos", + + target_env = "uclibc", + target_env = "newlib", + + target_os = "fuchsia", + target_os = "redox", + target_os = "nto", + target_os = "hurd", + target_os = "cygwin", + ))] { + pub(super) const RTLD_NOW: c_int = 2; + } else if #[cfg(all(target_os = "android",target_pointer_width = "32"))] { + pub(super) const RTLD_NOW: c_int = 0; + } else { + compile_error!( + "Target has no known `RTLD_NOW` value. Please submit an issue or PR adding it." + ); + } + } + + cfg_if! { + if #[cfg(any( + target_os = "haiku", + all(target_os = "android",target_pointer_width = "32"), + ))] { + pub(super) const RTLD_GLOBAL: c_int = 2; + } else if #[cfg(target_os = "aix")] { + pub(super) const RTLD_GLOBAL: c_int = 0x10000; + } else if #[cfg(any( + target_env = "uclibc", + all(target_os = "linux", target_arch = "mips"), + all(target_os = "linux", target_arch = "mips64"), + target_os = "cygwin", + ))] { + pub(super) const RTLD_GLOBAL: c_int = 4; + } else if #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "tvos", + target_os = "visionos", + target_os = "watchos", + ))] { + pub(super) const RTLD_GLOBAL: c_int = 8; + } else if #[cfg(any( + target_os = "linux", + all(target_os = "android", target_pointer_width = "64"), + target_os = "emscripten", + + target_os = "freebsd", + target_os = "dragonfly", + target_os = "openbsd", + target_os = "netbsd", + + target_os = "solaris", + target_os = "illumos", + + target_env = "newlib", + + target_os = "fuchsia", + target_os = "redox", + target_os = "nto", + target_os = "hurd", + ))] { + pub(super) const RTLD_GLOBAL: c_int = 0x100; + } else { + compile_error!( + "Target has no known `RTLD_GLOBAL` value. Please submit an issue or PR adding it." + ); + } + } + + cfg_if! { + if #[cfg(any( + target_os = "netbsd", + target_os = "nto", + ))] { + pub(super) const RTLD_LOCAL: c_int = 0x200; + } else if #[cfg(target_os = "aix")] { + pub(super) const RTLD_LOCAL: c_int = 0x80000; + } else if #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "tvos", + target_os = "visionos", + target_os = "watchos", + ))] { + pub(super) const RTLD_LOCAL: c_int = 4; + } else if #[cfg(any( + target_os = "linux", + target_os = "android", + target_os = "emscripten", + + target_os = "freebsd", + target_os = "dragonfly", + target_os = "openbsd", + + target_os = "haiku", + + target_os = "solaris", + target_os = "illumos", + + target_env = "uclibc", + target_env = "newlib", + + target_os = "fuchsia", + target_os = "redox", + target_os = "hurd", + target_os = "cygwin", + ))] { + pub(super) const RTLD_LOCAL: c_int = 0; + } else { + compile_error!( + "Target has no known `RTLD_LOCAL` value. Please submit an issue or PR adding it." + ); + } + } +} + +// Other constants that exist but are not bound because they are platform-specific (non-posix) +// extensions. Some of these constants are only relevant to `dlsym` or `dlmopen` calls. +// +// RTLD_CONFGEN +// RTLD_DEFAULT +// RTLD_DI_CONFIGADDR +// RTLD_DI_LINKMAP +// RTLD_DI_LMID +// RTLD_DI_ORIGIN +// RTLD_DI_PROFILENAME +// RTLD_DI_PROFILEOUT +// RTLD_DI_SERINFO +// RTLD_DI_SERINFOSIZE +// RTLD_DI_TLS_DATA +// RTLD_DI_TLS_MODID +// RTLD_FIRST +// RTLD_GROUP +// RTLD_NEXT +// RTLD_PARENT +// RTLD_PROBE +// RTLD_SELF +// RTLD_WORLD +// RTLD_NODELETE +// RTLD_NOLOAD +// RTLD_DEEPBIND diff --git a/vendor/libloading/src/os/unix/mod.rs b/vendor/libloading/src/os/unix/mod.rs new file mode 100644 index 00000000000000..0e42c50d9b19dd --- /dev/null +++ b/vendor/libloading/src/os/unix/mod.rs @@ -0,0 +1,485 @@ +// A hack for docs.rs to build documentation that has both windows and linux documentation in the +// same rustdoc build visible. +#[cfg(all(libloading_docs, not(unix)))] +mod unix_imports {} +#[cfg(any(not(libloading_docs), unix))] +mod unix_imports { + pub(super) use std::os::unix::ffi::OsStrExt; +} + +pub use self::consts::*; +use self::unix_imports::*; +use std::ffi::{CStr, OsStr}; +use std::os::raw; +use std::{fmt, marker, mem, ptr}; +use util::{cstr_cow_from_bytes, ensure_compatible_types}; + +mod consts; + +/// Run code and handle errors reported by `dlerror`. +/// +/// This function first executes the `closure` function containing calls to the functions that +/// report their errors via `dlerror`. This closure may return either `None` or `Some(*)` to +/// further affect operation of this function. +/// +/// In case the `closure` returns `None`, `with_dlerror` inspects the `dlerror`. `dlerror` may +/// decide to not provide any error description, in which case `Err(None)` is returned to the +/// caller. Otherwise the `error` callback is invoked to allow inspection and conversion of the +/// error message. The conversion result is returned as `Err(Some(Error))`. +/// +/// If the operations that report their errors via `dlerror` were all successful, `closure` should +/// return `Some(T)` instead. In this case `dlerror` is not inspected at all. +/// +/// # Notes +/// +/// The whole `dlerror` handling scheme is done via setting and querying some global state. For +/// that reason it is not safe to use dynamic library loading in MT-capable environment at all. +/// Only in POSIX 2008+TC1 a thread-local state was allowed for `dlerror`, making the dl* family of +/// functions possibly MT-safe, depending on the implementation of `dlerror`. +/// +/// In practice (as of 2020-04-01) most of the widely used targets use a thread-local for error +/// state and have been doing so for a long time. +pub fn with_dlerror(closure: F, error: fn(&CStr) -> Error) -> Result> +where + F: FnOnce() -> Option, +{ + // We used to guard all uses of dl* functions with our own mutex. This made them safe to use in + // MT programs provided the only way a program used dl* was via this library. However, it also + // had a number of downsides or cases where it failed to handle the problems. For instance, + // if any other library called `dlerror` internally concurrently with `libloading` things would + // still go awry. + // + // On platforms where `dlerror` is still MT-unsafe, `dlsym` (`Library::get`) can spuriously + // succeed and return a null pointer for a symbol when the actual symbol look-up operation + // fails. Instances where the actual symbol _could_ be `NULL` are platform specific. For + // instance on GNU glibc based-systems (an excerpt from dlsym(3)): + // + // > The value of a symbol returned by dlsym() will never be NULL if the shared object is the + // > result of normal compilation, since a global symbol is never placed at the NULL + // > address. There are nevertheless cases where a lookup using dlsym() may return NULL as the + // > value of a symbol. For example, the symbol value may be the result of a GNU indirect + // > function (IFUNC) resolver function that returns NULL as the resolved value. + + // While we could could call `dlerror` here to clear the previous error value, only the `dlsym` + // call depends on it being cleared beforehand and only in some cases too. We will instead + // clear the error inside the dlsym binding instead. + // + // In all the other cases, clearing the error here will only be hiding misuse of these bindings + // or a bug in implementation of dl* family of functions. + closure().ok_or_else(|| unsafe { + // This code will only get executed if the `closure` returns `None`. + let dlerror_str = dlerror(); + if dlerror_str.is_null() { + // In non-dlsym case this may happen when there’re bugs in our bindings or there’s + // non-libloading user of libdl; possibly in another thread. + None + } else { + // You can’t even rely on error string being static here; call to subsequent dlerror + // may invalidate or overwrite the error message. Why couldn’t they simply give up the + // ownership over the message? + // TODO: should do locale-aware conversion here. OTOH Rust doesn’t seem to work well in + // any system that uses non-utf8 locale, so I doubt there’s a problem here. + Some(error(CStr::from_ptr(dlerror_str))) + // Since we do a copy of the error string above, maybe we should call dlerror again to + // let libdl know it may free its copy of the string now? + } + }) +} + +/// A platform-specific counterpart of the cross-platform [`Library`](crate::Library). +pub struct Library { + handle: *mut raw::c_void, +} + +unsafe impl Send for Library {} + +// That being said... this section in the volume 2 of POSIX.1-2008 states: +// +// > All functions defined by this volume of POSIX.1-2008 shall be thread-safe, except that the +// > following functions need not be thread-safe. +// +// With notable absence of any dl* function other than dlerror in the list. By “this volume” +// I suppose they refer precisely to the “volume 2”. dl* family of functions are specified +// by this same volume, so the conclusion is indeed that dl* functions are required by POSIX +// to be thread-safe. Great! +// +// See for more details: +// +// * https://github.com/nagisa/rust_libloading/pull/17 +// * http://pubs.opengroup.org/onlinepubs/9699919799/functions/V2_chap02.html#tag_15_09_01 +unsafe impl Sync for Library {} + +impl Library { + /// Find and eagerly load a shared library (module). + /// + /// If the `filename` contains a [path separator], the `filename` is interpreted as a `path` to + /// a file. Otherwise, platform-specific algorithms are employed to find a library with a + /// matching file name. + /// + /// This is equivalent to [Library::open](filename, [RTLD_LAZY] | [RTLD_LOCAL]). + /// + /// [path separator]: std::path::MAIN_SEPARATOR + /// + /// # Safety + /// + /// When a library is loaded, initialisation routines contained within the library are executed. + /// For the purposes of safety, the execution of these routines is conceptually the same calling an + /// unknown foreign function and may impose arbitrary requirements on the caller for the call + /// to be sound. + /// + /// Additionally, the callers of this function must also ensure that execution of the + /// termination routines contained within the library is safe as well. These routines may be + /// executed when the library is unloaded. + #[inline] + pub unsafe fn new>(filename: P) -> Result { + Library::open(Some(filename), RTLD_LAZY | RTLD_LOCAL) + } + + /// Load the `Library` representing the current executable. + /// + /// [`Library::get`] calls of the returned `Library` will look for symbols in following + /// locations in order: + /// + /// 1. The original program image; + /// 2. Any executable object files (e.g. shared libraries) loaded at program startup; + /// 3. Any executable object files loaded at runtime (e.g. via other `Library::new` calls or via + /// calls to the `dlopen` function). + /// + /// Note that the behaviour of a `Library` loaded with this method is different from that of + /// Libraries loaded with [`os::windows::Library::this`]. + /// + /// This is equivalent to [Library::open](None, [RTLD_LAZY] | [RTLD_LOCAL]). + /// + /// [`os::windows::Library::this`]: crate::os::windows::Library::this + #[inline] + pub fn this() -> Library { + unsafe { + // SAFE: this does not load any new shared library images, no danger in it executing + // initialiser routines. + Library::open(None::<&OsStr>, RTLD_LAZY | RTLD_LOCAL).expect("this should never fail") + } + } + + /// Find and load an executable object file (shared library). + /// + /// See documentation for [`Library::this`] for further description of the behaviour + /// when the `filename` is `None`. Otherwise see [`Library::new`]. + /// + /// Corresponds to `dlopen(filename, flags)`. + /// + /// # Safety + /// + /// When a library is loaded, initialisation routines contained within the library are executed. + /// For the purposes of safety, the execution of these routines is conceptually the same calling an + /// unknown foreign function and may impose arbitrary requirements on the caller for the call + /// to be sound. + /// + /// Additionally, the callers of this function must also ensure that execution of the + /// termination routines contained within the library is safe as well. These routines may be + /// executed when the library is unloaded. + pub unsafe fn open

(filename: Option

(&self, predicate: P) -> Option + where + P: Fn(Self::Item) -> bool; + /// Get the byte offset from the element's position in the stream + fn slice_index(&self, count: usize) -> Result; +} + +/// Abstracts slicing operations +pub trait InputTake: Sized { + /// Returns a slice of `count` bytes. panics if count > length + fn take(&self, count: usize) -> Self; + /// Split the stream at the `count` byte offset. panics if count > length + fn take_split(&self, count: usize) -> (Self, Self); +} + +impl<'a> InputIter for &'a [u8] { + type Item = u8; + type Iter = Enumerate; + type IterElem = Copied>; + + #[inline] + fn iter_indices(&self) -> Self::Iter { + self.iter_elements().enumerate() + } + #[inline] + fn iter_elements(&self) -> Self::IterElem { + self.iter().copied() + } + #[inline] + fn position

(&self, predicate: P) -> Option + where + P: Fn(Self::Item) -> bool, + { + self.iter().position(|b| predicate(*b)) + } + #[inline] + fn slice_index(&self, count: usize) -> Result { + if self.len() >= count { + Ok(count) + } else { + Err(Needed::new(count - self.len())) + } + } +} + +impl<'a> InputTake for &'a [u8] { + #[inline] + fn take(&self, count: usize) -> Self { + &self[0..count] + } + #[inline] + fn take_split(&self, count: usize) -> (Self, Self) { + let (prefix, suffix) = self.split_at(count); + (suffix, prefix) + } +} + +impl<'a> InputIter for &'a str { + type Item = char; + type Iter = CharIndices<'a>; + type IterElem = Chars<'a>; + #[inline] + fn iter_indices(&self) -> Self::Iter { + self.char_indices() + } + #[inline] + fn iter_elements(&self) -> Self::IterElem { + self.chars() + } + fn position

(&self, predicate: P) -> Option + where + P: Fn(Self::Item) -> bool, + { + for (o, c) in self.char_indices() { + if predicate(c) { + return Some(o); + } + } + None + } + #[inline] + fn slice_index(&self, count: usize) -> Result { + let mut cnt = 0; + for (index, _) in self.char_indices() { + if cnt == count { + return Ok(index); + } + cnt += 1; + } + if cnt == count { + return Ok(self.len()); + } + Err(Needed::Unknown) + } +} + +impl<'a> InputTake for &'a str { + #[inline] + fn take(&self, count: usize) -> Self { + &self[..count] + } + + // return byte index + #[inline] + fn take_split(&self, count: usize) -> (Self, Self) { + let (prefix, suffix) = self.split_at(count); + (suffix, prefix) + } +} + +/// Dummy trait used for default implementations (currently only used for `InputTakeAtPosition` and `Compare`). +/// +/// When implementing a custom input type, it is possible to use directly the +/// default implementation: If the input type implements `InputLength`, `InputIter`, +/// `InputTake` and `Clone`, you can implement `UnspecializedInput` and get +/// a default version of `InputTakeAtPosition` and `Compare`. +/// +/// For performance reasons, you might want to write a custom implementation of +/// `InputTakeAtPosition` (like the one for `&[u8]`). +pub trait UnspecializedInput {} + +/// Methods to take as much input as possible until the provided function returns true for the current element. +/// +/// A large part of nom's basic parsers are built using this trait. +pub trait InputTakeAtPosition: Sized { + /// The current input type is a sequence of that `Item` type. + /// + /// Example: `u8` for `&[u8]` or `char` for `&str` + type Item; + + /// Looks for the first element of the input type for which the condition returns true, + /// and returns the input up to this position. + /// + /// *streaming version*: If no element is found matching the condition, this will return `Incomplete` + fn split_at_position>(&self, predicate: P) -> IResult + where + P: Fn(Self::Item) -> bool; + + /// Looks for the first element of the input type for which the condition returns true + /// and returns the input up to this position. + /// + /// Fails if the produced slice is empty. + /// + /// *streaming version*: If no element is found matching the condition, this will return `Incomplete` + fn split_at_position1>( + &self, + predicate: P, + e: ErrorKind, + ) -> IResult + where + P: Fn(Self::Item) -> bool; + + /// Looks for the first element of the input type for which the condition returns true, + /// and returns the input up to this position. + /// + /// *complete version*: If no element is found matching the condition, this will return the whole input + fn split_at_position_complete>( + &self, + predicate: P, + ) -> IResult + where + P: Fn(Self::Item) -> bool; + + /// Looks for the first element of the input type for which the condition returns true + /// and returns the input up to this position. + /// + /// Fails if the produced slice is empty. + /// + /// *complete version*: If no element is found matching the condition, this will return the whole input + fn split_at_position1_complete>( + &self, + predicate: P, + e: ErrorKind, + ) -> IResult + where + P: Fn(Self::Item) -> bool; +} + +impl InputTakeAtPosition + for T +{ + type Item = ::Item; + + fn split_at_position>(&self, predicate: P) -> IResult + where + P: Fn(Self::Item) -> bool, + { + match self.position(predicate) { + Some(n) => Ok(self.take_split(n)), + None => Err(Err::Incomplete(Needed::new(1))), + } + } + + fn split_at_position1>( + &self, + predicate: P, + e: ErrorKind, + ) -> IResult + where + P: Fn(Self::Item) -> bool, + { + match self.position(predicate) { + Some(0) => Err(Err::Error(E::from_error_kind(self.clone(), e))), + Some(n) => Ok(self.take_split(n)), + None => Err(Err::Incomplete(Needed::new(1))), + } + } + + fn split_at_position_complete>( + &self, + predicate: P, + ) -> IResult + where + P: Fn(Self::Item) -> bool, + { + match self.split_at_position(predicate) { + Err(Err::Incomplete(_)) => Ok(self.take_split(self.input_len())), + res => res, + } + } + + fn split_at_position1_complete>( + &self, + predicate: P, + e: ErrorKind, + ) -> IResult + where + P: Fn(Self::Item) -> bool, + { + match self.split_at_position1(predicate, e) { + Err(Err::Incomplete(_)) => { + if self.input_len() == 0 { + Err(Err::Error(E::from_error_kind(self.clone(), e))) + } else { + Ok(self.take_split(self.input_len())) + } + } + res => res, + } + } +} + +impl<'a> InputTakeAtPosition for &'a [u8] { + type Item = u8; + + fn split_at_position>(&self, predicate: P) -> IResult + where + P: Fn(Self::Item) -> bool, + { + match self.iter().position(|c| predicate(*c)) { + Some(i) => Ok(self.take_split(i)), + None => Err(Err::Incomplete(Needed::new(1))), + } + } + + fn split_at_position1>( + &self, + predicate: P, + e: ErrorKind, + ) -> IResult + where + P: Fn(Self::Item) -> bool, + { + match self.iter().position(|c| predicate(*c)) { + Some(0) => Err(Err::Error(E::from_error_kind(self, e))), + Some(i) => Ok(self.take_split(i)), + None => Err(Err::Incomplete(Needed::new(1))), + } + } + + fn split_at_position_complete>( + &self, + predicate: P, + ) -> IResult + where + P: Fn(Self::Item) -> bool, + { + match self.iter().position(|c| predicate(*c)) { + Some(i) => Ok(self.take_split(i)), + None => Ok(self.take_split(self.input_len())), + } + } + + fn split_at_position1_complete>( + &self, + predicate: P, + e: ErrorKind, + ) -> IResult + where + P: Fn(Self::Item) -> bool, + { + match self.iter().position(|c| predicate(*c)) { + Some(0) => Err(Err::Error(E::from_error_kind(self, e))), + Some(i) => Ok(self.take_split(i)), + None => { + if self.is_empty() { + Err(Err::Error(E::from_error_kind(self, e))) + } else { + Ok(self.take_split(self.input_len())) + } + } + } + } +} + +impl<'a> InputTakeAtPosition for &'a str { + type Item = char; + + fn split_at_position>(&self, predicate: P) -> IResult + where + P: Fn(Self::Item) -> bool, + { + match self.find(predicate) { + // find() returns a byte index that is already in the slice at a char boundary + Some(i) => unsafe { Ok((self.get_unchecked(i..), self.get_unchecked(..i))) }, + None => Err(Err::Incomplete(Needed::new(1))), + } + } + + fn split_at_position1>( + &self, + predicate: P, + e: ErrorKind, + ) -> IResult + where + P: Fn(Self::Item) -> bool, + { + match self.find(predicate) { + Some(0) => Err(Err::Error(E::from_error_kind(self, e))), + // find() returns a byte index that is already in the slice at a char boundary + Some(i) => unsafe { Ok((self.get_unchecked(i..), self.get_unchecked(..i))) }, + None => Err(Err::Incomplete(Needed::new(1))), + } + } + + fn split_at_position_complete>( + &self, + predicate: P, + ) -> IResult + where + P: Fn(Self::Item) -> bool, + { + match self.find(predicate) { + // find() returns a byte index that is already in the slice at a char boundary + Some(i) => unsafe { Ok((self.get_unchecked(i..), self.get_unchecked(..i))) }, + // the end of slice is a char boundary + None => unsafe { + Ok(( + self.get_unchecked(self.len()..), + self.get_unchecked(..self.len()), + )) + }, + } + } + + fn split_at_position1_complete>( + &self, + predicate: P, + e: ErrorKind, + ) -> IResult + where + P: Fn(Self::Item) -> bool, + { + match self.find(predicate) { + Some(0) => Err(Err::Error(E::from_error_kind(self, e))), + // find() returns a byte index that is already in the slice at a char boundary + Some(i) => unsafe { Ok((self.get_unchecked(i..), self.get_unchecked(..i))) }, + None => { + if self.is_empty() { + Err(Err::Error(E::from_error_kind(self, e))) + } else { + // the end of slice is a char boundary + unsafe { + Ok(( + self.get_unchecked(self.len()..), + self.get_unchecked(..self.len()), + )) + } + } + } + } + } +} + +/// Indicates whether a comparison was successful, an error, or +/// if more data was needed +#[derive(Debug, PartialEq)] +pub enum CompareResult { + /// Comparison was successful + Ok, + /// We need more data to be sure + Incomplete, + /// Comparison failed + Error, +} + +/// Abstracts comparison operations +pub trait Compare { + /// Compares self to another value for equality + fn compare(&self, t: T) -> CompareResult; + /// Compares self to another value for equality + /// independently of the case. + /// + /// Warning: for `&str`, the comparison is done + /// by lowercasing both strings and comparing + /// the result. This is a temporary solution until + /// a better one appears + fn compare_no_case(&self, t: T) -> CompareResult; +} + +fn lowercase_byte(c: u8) -> u8 { + match c { + b'A'..=b'Z' => c - b'A' + b'a', + _ => c, + } +} + +impl<'a, 'b> Compare<&'b [u8]> for &'a [u8] { + #[inline(always)] + fn compare(&self, t: &'b [u8]) -> CompareResult { + let pos = self.iter().zip(t.iter()).position(|(a, b)| a != b); + + match pos { + Some(_) => CompareResult::Error, + None => { + if self.len() >= t.len() { + CompareResult::Ok + } else { + CompareResult::Incomplete + } + } + } + + /* + let len = self.len(); + let blen = t.len(); + let m = if len < blen { len } else { blen }; + let reduced = &self[..m]; + let b = &t[..m]; + + if reduced != b { + CompareResult::Error + } else if m < blen { + CompareResult::Incomplete + } else { + CompareResult::Ok + } + */ + } + + #[inline(always)] + fn compare_no_case(&self, t: &'b [u8]) -> CompareResult { + if self + .iter() + .zip(t) + .any(|(a, b)| lowercase_byte(*a) != lowercase_byte(*b)) + { + CompareResult::Error + } else if self.len() < t.len() { + CompareResult::Incomplete + } else { + CompareResult::Ok + } + } +} + +impl< + T: InputLength + InputIter + InputTake + UnspecializedInput, + O: InputLength + InputIter + InputTake, + > Compare for T +{ + #[inline(always)] + fn compare(&self, t: O) -> CompareResult { + let pos = self + .iter_elements() + .zip(t.iter_elements()) + .position(|(a, b)| a != b); + + match pos { + Some(_) => CompareResult::Error, + None => { + if self.input_len() >= t.input_len() { + CompareResult::Ok + } else { + CompareResult::Incomplete + } + } + } + } + + #[inline(always)] + fn compare_no_case(&self, t: O) -> CompareResult { + if self + .iter_elements() + .zip(t.iter_elements()) + .any(|(a, b)| lowercase_byte(a) != lowercase_byte(b)) + { + CompareResult::Error + } else if self.input_len() < t.input_len() { + CompareResult::Incomplete + } else { + CompareResult::Ok + } + } +} + +impl<'a, 'b> Compare<&'b str> for &'a [u8] { + #[inline(always)] + fn compare(&self, t: &'b str) -> CompareResult { + self.compare(AsBytes::as_bytes(t)) + } + #[inline(always)] + fn compare_no_case(&self, t: &'b str) -> CompareResult { + self.compare_no_case(AsBytes::as_bytes(t)) + } +} + +impl<'a, 'b> Compare<&'b str> for &'a str { + #[inline(always)] + fn compare(&self, t: &'b str) -> CompareResult { + self.as_bytes().compare(t.as_bytes()) + } + + //FIXME: this version is too simple and does not use the current locale + #[inline(always)] + fn compare_no_case(&self, t: &'b str) -> CompareResult { + let pos = self + .chars() + .zip(t.chars()) + .position(|(a, b)| a.to_lowercase().ne(b.to_lowercase())); + + match pos { + Some(_) => CompareResult::Error, + None => { + if self.len() >= t.len() { + CompareResult::Ok + } else { + CompareResult::Incomplete + } + } + } + } +} + +impl<'a, 'b> Compare<&'b [u8]> for &'a str { + #[inline(always)] + fn compare(&self, t: &'b [u8]) -> CompareResult { + AsBytes::as_bytes(self).compare(t) + } + #[inline(always)] + fn compare_no_case(&self, t: &'b [u8]) -> CompareResult { + AsBytes::as_bytes(self).compare_no_case(t) + } +} + +/// Look for a token in self +pub trait FindToken { + /// Returns true if self contains the token + fn find_token(&self, token: T) -> bool; +} + +impl<'a> FindToken for &'a [u8] { + fn find_token(&self, token: u8) -> bool { + memchr::memchr(token, self).is_some() + } +} + +impl<'a> FindToken for &'a str { + fn find_token(&self, token: u8) -> bool { + self.as_bytes().find_token(token) + } +} + +impl<'a, 'b> FindToken<&'a u8> for &'b [u8] { + fn find_token(&self, token: &u8) -> bool { + self.find_token(*token) + } +} + +impl<'a, 'b> FindToken<&'a u8> for &'b str { + fn find_token(&self, token: &u8) -> bool { + self.as_bytes().find_token(token) + } +} + +impl<'a> FindToken for &'a [u8] { + fn find_token(&self, token: char) -> bool { + self.iter().any(|i| *i == token as u8) + } +} + +impl<'a> FindToken for &'a str { + fn find_token(&self, token: char) -> bool { + self.chars().any(|i| i == token) + } +} + +impl<'a> FindToken for &'a [char] { + fn find_token(&self, token: char) -> bool { + self.iter().any(|i| *i == token) + } +} + +impl<'a, 'b> FindToken<&'a char> for &'b [char] { + fn find_token(&self, token: &char) -> bool { + self.find_token(*token) + } +} + +/// Look for a substring in self +pub trait FindSubstring { + /// Returns the byte position of the substring if it is found + fn find_substring(&self, substr: T) -> Option; +} + +impl<'a, 'b> FindSubstring<&'b [u8]> for &'a [u8] { + fn find_substring(&self, substr: &'b [u8]) -> Option { + if substr.len() > self.len() { + return None; + } + + let (&substr_first, substr_rest) = match substr.split_first() { + Some(split) => split, + // an empty substring is found at position 0 + // This matches the behavior of str.find(""). + None => return Some(0), + }; + + if substr_rest.is_empty() { + return memchr::memchr(substr_first, self); + } + + let mut offset = 0; + let haystack = &self[..self.len() - substr_rest.len()]; + + while let Some(position) = memchr::memchr(substr_first, &haystack[offset..]) { + offset += position; + let next_offset = offset + 1; + if &self[next_offset..][..substr_rest.len()] == substr_rest { + return Some(offset); + } + + offset = next_offset; + } + + None + } +} + +impl<'a, 'b> FindSubstring<&'b str> for &'a [u8] { + fn find_substring(&self, substr: &'b str) -> Option { + self.find_substring(AsBytes::as_bytes(substr)) + } +} + +impl<'a, 'b> FindSubstring<&'b str> for &'a str { + //returns byte index + fn find_substring(&self, substr: &'b str) -> Option { + self.find(substr) + } +} + +/// Used to integrate `str`'s `parse()` method +pub trait ParseTo { + /// Succeeds if `parse()` succeeded. The byte slice implementation + /// will first convert it to a `&str`, then apply the `parse()` function + fn parse_to(&self) -> Option; +} + +impl<'a, R: FromStr> ParseTo for &'a [u8] { + fn parse_to(&self) -> Option { + from_utf8(self).ok().and_then(|s| s.parse().ok()) + } +} + +impl<'a, R: FromStr> ParseTo for &'a str { + fn parse_to(&self) -> Option { + self.parse().ok() + } +} + +/// Slicing operations using ranges. +/// +/// This trait is loosely based on +/// `Index`, but can actually return +/// something else than a `&[T]` or `&str` +pub trait Slice { + /// Slices self according to the range argument + fn slice(&self, range: R) -> Self; +} + +macro_rules! impl_fn_slice { + ( $ty:ty ) => { + fn slice(&self, range: $ty) -> Self { + &self[range] + } + }; +} + +macro_rules! slice_range_impl { + ( [ $for_type:ident ], $ty:ty ) => { + impl<'a, $for_type> Slice<$ty> for &'a [$for_type] { + impl_fn_slice!($ty); + } + }; + ( $for_type:ty, $ty:ty ) => { + impl<'a> Slice<$ty> for &'a $for_type { + impl_fn_slice!($ty); + } + }; +} + +macro_rules! slice_ranges_impl { + ( [ $for_type:ident ] ) => { + slice_range_impl! {[$for_type], Range} + slice_range_impl! {[$for_type], RangeTo} + slice_range_impl! {[$for_type], RangeFrom} + slice_range_impl! {[$for_type], RangeFull} + }; + ( $for_type:ty ) => { + slice_range_impl! {$for_type, Range} + slice_range_impl! {$for_type, RangeTo} + slice_range_impl! {$for_type, RangeFrom} + slice_range_impl! {$for_type, RangeFull} + }; +} + +slice_ranges_impl! {str} +slice_ranges_impl! {[T]} + +macro_rules! array_impls { + ($($N:expr)+) => { + $( + impl InputLength for [u8; $N] { + #[inline] + fn input_len(&self) -> usize { + self.len() + } + } + + impl<'a> InputLength for &'a [u8; $N] { + #[inline] + fn input_len(&self) -> usize { + self.len() + } + } + + impl<'a> InputIter for &'a [u8; $N] { + type Item = u8; + type Iter = Enumerate; + type IterElem = Copied>; + + fn iter_indices(&self) -> Self::Iter { + (&self[..]).iter_indices() + } + + fn iter_elements(&self) -> Self::IterElem { + (&self[..]).iter_elements() + } + + fn position

(&self, predicate: P) -> Option + where P: Fn(Self::Item) -> bool { + (&self[..]).position(predicate) + } + + fn slice_index(&self, count: usize) -> Result { + (&self[..]).slice_index(count) + } + } + + impl<'a> Compare<[u8; $N]> for &'a [u8] { + #[inline(always)] + fn compare(&self, t: [u8; $N]) -> CompareResult { + self.compare(&t[..]) + } + + #[inline(always)] + fn compare_no_case(&self, t: [u8;$N]) -> CompareResult { + self.compare_no_case(&t[..]) + } + } + + impl<'a,'b> Compare<&'b [u8; $N]> for &'a [u8] { + #[inline(always)] + fn compare(&self, t: &'b [u8; $N]) -> CompareResult { + self.compare(&t[..]) + } + + #[inline(always)] + fn compare_no_case(&self, t: &'b [u8;$N]) -> CompareResult { + self.compare_no_case(&t[..]) + } + } + + impl FindToken for [u8; $N] { + fn find_token(&self, token: u8) -> bool { + memchr::memchr(token, &self[..]).is_some() + } + } + + impl<'a> FindToken<&'a u8> for [u8; $N] { + fn find_token(&self, token: &u8) -> bool { + self.find_token(*token) + } + } + )+ + }; +} + +array_impls! { + 0 1 2 3 4 5 6 7 8 9 + 10 11 12 13 14 15 16 17 18 19 + 20 21 22 23 24 25 26 27 28 29 + 30 31 32 +} + +/// Abstracts something which can extend an `Extend`. +/// Used to build modified input slices in `escaped_transform` +pub trait ExtendInto { + /// The current input type is a sequence of that `Item` type. + /// + /// Example: `u8` for `&[u8]` or `char` for `&str` + type Item; + + /// The type that will be produced + type Extender; + + /// Create a new `Extend` of the correct type + fn new_builder(&self) -> Self::Extender; + /// Accumulate the input into an accumulator + fn extend_into(&self, acc: &mut Self::Extender); +} + +#[cfg(feature = "alloc")] +impl ExtendInto for [u8] { + type Item = u8; + type Extender = Vec; + + #[inline] + fn new_builder(&self) -> Vec { + Vec::new() + } + #[inline] + fn extend_into(&self, acc: &mut Vec) { + acc.extend(self.iter().cloned()); + } +} + +#[cfg(feature = "alloc")] +impl ExtendInto for &[u8] { + type Item = u8; + type Extender = Vec; + + #[inline] + fn new_builder(&self) -> Vec { + Vec::new() + } + #[inline] + fn extend_into(&self, acc: &mut Vec) { + acc.extend_from_slice(self); + } +} + +#[cfg(feature = "alloc")] +impl ExtendInto for str { + type Item = char; + type Extender = String; + + #[inline] + fn new_builder(&self) -> String { + String::new() + } + #[inline] + fn extend_into(&self, acc: &mut String) { + acc.push_str(self); + } +} + +#[cfg(feature = "alloc")] +impl ExtendInto for &str { + type Item = char; + type Extender = String; + + #[inline] + fn new_builder(&self) -> String { + String::new() + } + #[inline] + fn extend_into(&self, acc: &mut String) { + acc.push_str(self); + } +} + +#[cfg(feature = "alloc")] +impl ExtendInto for char { + type Item = char; + type Extender = String; + + #[inline] + fn new_builder(&self) -> String { + String::new() + } + #[inline] + fn extend_into(&self, acc: &mut String) { + acc.push(*self); + } +} + +/// Helper trait to convert numbers to usize. +/// +/// By default, usize implements `From` and `From` but not +/// `From` and `From` because that would be invalid on some +/// platforms. This trait implements the conversion for platforms +/// with 32 and 64 bits pointer platforms +pub trait ToUsize { + /// converts self to usize + fn to_usize(&self) -> usize; +} + +impl ToUsize for u8 { + #[inline] + fn to_usize(&self) -> usize { + *self as usize + } +} + +impl ToUsize for u16 { + #[inline] + fn to_usize(&self) -> usize { + *self as usize + } +} + +impl ToUsize for usize { + #[inline] + fn to_usize(&self) -> usize { + *self + } +} + +#[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] +impl ToUsize for u32 { + #[inline] + fn to_usize(&self) -> usize { + *self as usize + } +} + +#[cfg(target_pointer_width = "64")] +impl ToUsize for u64 { + #[inline] + fn to_usize(&self) -> usize { + *self as usize + } +} + +/// Equivalent From implementation to avoid orphan rules in bits parsers +pub trait ErrorConvert { + /// Transform to another error type + fn convert(self) -> E; +} + +impl ErrorConvert<(I, ErrorKind)> for ((I, usize), ErrorKind) { + fn convert(self) -> (I, ErrorKind) { + ((self.0).0, self.1) + } +} + +impl ErrorConvert<((I, usize), ErrorKind)> for (I, ErrorKind) { + fn convert(self) -> ((I, usize), ErrorKind) { + ((self.0, 0), self.1) + } +} + +use crate::error; +impl ErrorConvert> for error::Error<(I, usize)> { + fn convert(self) -> error::Error { + error::Error { + input: self.input.0, + code: self.code, + } + } +} + +impl ErrorConvert> for error::Error { + fn convert(self) -> error::Error<(I, usize)> { + error::Error { + input: (self.input, 0), + code: self.code, + } + } +} + +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +impl ErrorConvert> for error::VerboseError<(I, usize)> { + fn convert(self) -> error::VerboseError { + error::VerboseError { + errors: self.errors.into_iter().map(|(i, e)| (i.0, e)).collect(), + } + } +} + +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +impl ErrorConvert> for error::VerboseError { + fn convert(self) -> error::VerboseError<(I, usize)> { + error::VerboseError { + errors: self.errors.into_iter().map(|(i, e)| ((i, 0), e)).collect(), + } + } +} + +impl ErrorConvert<()> for () { + fn convert(self) {} +} + +#[cfg(feature = "std")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "std")))] +/// Helper trait to show a byte slice as a hex dump +pub trait HexDisplay { + /// Converts the value of `self` to a hex dump, returning the owned + /// `String`. + fn to_hex(&self, chunk_size: usize) -> String; + + /// Converts the value of `self` to a hex dump beginning at `from` address, returning the owned + /// `String`. + fn to_hex_from(&self, chunk_size: usize, from: usize) -> String; +} + +#[cfg(feature = "std")] +static CHARS: &[u8] = b"0123456789abcdef"; + +#[cfg(feature = "std")] +impl HexDisplay for [u8] { + #[allow(unused_variables)] + fn to_hex(&self, chunk_size: usize) -> String { + self.to_hex_from(chunk_size, 0) + } + + #[allow(unused_variables)] + fn to_hex_from(&self, chunk_size: usize, from: usize) -> String { + let mut v = Vec::with_capacity(self.len() * 3); + let mut i = from; + for chunk in self.chunks(chunk_size) { + let s = format!("{:08x}", i); + for &ch in s.as_bytes().iter() { + v.push(ch); + } + v.push(b'\t'); + + i += chunk_size; + + for &byte in chunk { + v.push(CHARS[(byte >> 4) as usize]); + v.push(CHARS[(byte & 0xf) as usize]); + v.push(b' '); + } + if chunk_size > chunk.len() { + for j in 0..(chunk_size - chunk.len()) { + v.push(b' '); + v.push(b' '); + v.push(b' '); + } + } + v.push(b'\t'); + + for &byte in chunk { + if (byte >= 32 && byte <= 126) || byte >= 128 { + v.push(byte); + } else { + v.push(b'.'); + } + } + v.push(b'\n'); + } + + String::from_utf8_lossy(&v[..]).into_owned() + } +} + +#[cfg(feature = "std")] +impl HexDisplay for str { + #[allow(unused_variables)] + fn to_hex(&self, chunk_size: usize) -> String { + self.to_hex_from(chunk_size, 0) + } + + #[allow(unused_variables)] + fn to_hex_from(&self, chunk_size: usize, from: usize) -> String { + self.as_bytes().to_hex_from(chunk_size, from) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_offset_u8() { + let s = b"abcd123"; + let a = &s[..]; + let b = &a[2..]; + let c = &a[..4]; + let d = &a[3..5]; + assert_eq!(a.offset(b), 2); + assert_eq!(a.offset(c), 0); + assert_eq!(a.offset(d), 3); + } + + #[test] + fn test_offset_str() { + let s = "abcřèÂßÇd123"; + let a = &s[..]; + let b = &a[7..]; + let c = &a[..5]; + let d = &a[5..9]; + assert_eq!(a.offset(b), 7); + assert_eq!(a.offset(c), 0); + assert_eq!(a.offset(d), 5); + } +} diff --git a/vendor/nom/tests/arithmetic.rs b/vendor/nom/tests/arithmetic.rs new file mode 100644 index 00000000000000..5b627a97a5c47c --- /dev/null +++ b/vendor/nom/tests/arithmetic.rs @@ -0,0 +1,94 @@ +use nom::{ + branch::alt, + bytes::complete::tag, + character::complete::char, + character::complete::{digit1 as digit, space0 as space}, + combinator::map_res, + multi::fold_many0, + sequence::{delimited, pair}, + IResult, +}; + +// Parser definition + +use std::str::FromStr; + +// We parse any expr surrounded by parens, ignoring all whitespaces around those +fn parens(i: &str) -> IResult<&str, i64> { + delimited(space, delimited(tag("("), expr, tag(")")), space)(i) +} + +// We transform an integer string into a i64, ignoring surrounding whitespaces +// We look for a digit suite, and try to convert it. +// If either str::from_utf8 or FromStr::from_str fail, +// we fallback to the parens parser defined above +fn factor(i: &str) -> IResult<&str, i64> { + alt(( + map_res(delimited(space, digit, space), FromStr::from_str), + parens, + ))(i) +} + +// We read an initial factor and for each time we find +// a * or / operator followed by another factor, we do +// the math by folding everything +fn term(i: &str) -> IResult<&str, i64> { + let (i, init) = factor(i)?; + + fold_many0( + pair(alt((char('*'), char('/'))), factor), + move || init, + |acc, (op, val): (char, i64)| { + if op == '*' { + acc * val + } else { + acc / val + } + }, + )(i) +} + +fn expr(i: &str) -> IResult<&str, i64> { + let (i, init) = term(i)?; + + fold_many0( + pair(alt((char('+'), char('-'))), term), + move || init, + |acc, (op, val): (char, i64)| { + if op == '+' { + acc + val + } else { + acc - val + } + }, + )(i) +} + +#[test] +fn factor_test() { + assert_eq!(factor("3"), Ok(("", 3))); + assert_eq!(factor(" 12"), Ok(("", 12))); + assert_eq!(factor("537 "), Ok(("", 537))); + assert_eq!(factor(" 24 "), Ok(("", 24))); +} + +#[test] +fn term_test() { + assert_eq!(term(" 12 *2 / 3"), Ok(("", 8))); + assert_eq!(term(" 2* 3 *2 *2 / 3"), Ok(("", 8))); + assert_eq!(term(" 48 / 3/2"), Ok(("", 8))); +} + +#[test] +fn expr_test() { + assert_eq!(expr(" 1 + 2 "), Ok(("", 3))); + assert_eq!(expr(" 12 + 6 - 4+ 3"), Ok(("", 17))); + assert_eq!(expr(" 1 + 2*3 + 4"), Ok(("", 11))); +} + +#[test] +fn parens_test() { + assert_eq!(expr(" ( 2 )"), Ok(("", 2))); + assert_eq!(expr(" 2* ( 3 + 4 ) "), Ok(("", 14))); + assert_eq!(expr(" 2*2 / ( 5 - 1) + 3"), Ok(("", 4))); +} diff --git a/vendor/nom/tests/arithmetic_ast.rs b/vendor/nom/tests/arithmetic_ast.rs new file mode 100644 index 00000000000000..ca1511096099a7 --- /dev/null +++ b/vendor/nom/tests/arithmetic_ast.rs @@ -0,0 +1,161 @@ +use std::fmt; +use std::fmt::{Debug, Display, Formatter}; + +use std::str::FromStr; + +use nom::{ + branch::alt, + bytes::complete::tag, + character::complete::{digit1 as digit, multispace0 as multispace}, + combinator::{map, map_res}, + multi::many0, + sequence::{delimited, preceded}, + IResult, +}; + +pub enum Expr { + Value(i64), + Add(Box, Box), + Sub(Box, Box), + Mul(Box, Box), + Div(Box, Box), + Paren(Box), +} + +#[derive(Debug)] +pub enum Oper { + Add, + Sub, + Mul, + Div, +} + +impl Display for Expr { + fn fmt(&self, format: &mut Formatter<'_>) -> fmt::Result { + use self::Expr::*; + match *self { + Value(val) => write!(format, "{}", val), + Add(ref left, ref right) => write!(format, "{} + {}", left, right), + Sub(ref left, ref right) => write!(format, "{} - {}", left, right), + Mul(ref left, ref right) => write!(format, "{} * {}", left, right), + Div(ref left, ref right) => write!(format, "{} / {}", left, right), + Paren(ref expr) => write!(format, "({})", expr), + } + } +} + +impl Debug for Expr { + fn fmt(&self, format: &mut Formatter<'_>) -> fmt::Result { + use self::Expr::*; + match *self { + Value(val) => write!(format, "{}", val), + Add(ref left, ref right) => write!(format, "({:?} + {:?})", left, right), + Sub(ref left, ref right) => write!(format, "({:?} - {:?})", left, right), + Mul(ref left, ref right) => write!(format, "({:?} * {:?})", left, right), + Div(ref left, ref right) => write!(format, "({:?} / {:?})", left, right), + Paren(ref expr) => write!(format, "[{:?}]", expr), + } + } +} + +fn parens(i: &str) -> IResult<&str, Expr> { + delimited( + multispace, + delimited(tag("("), map(expr, |e| Expr::Paren(Box::new(e))), tag(")")), + multispace, + )(i) +} + +fn factor(i: &str) -> IResult<&str, Expr> { + alt(( + map( + map_res(delimited(multispace, digit, multispace), FromStr::from_str), + Expr::Value, + ), + parens, + ))(i) +} + +fn fold_exprs(initial: Expr, remainder: Vec<(Oper, Expr)>) -> Expr { + remainder.into_iter().fold(initial, |acc, pair| { + let (oper, expr) = pair; + match oper { + Oper::Add => Expr::Add(Box::new(acc), Box::new(expr)), + Oper::Sub => Expr::Sub(Box::new(acc), Box::new(expr)), + Oper::Mul => Expr::Mul(Box::new(acc), Box::new(expr)), + Oper::Div => Expr::Div(Box::new(acc), Box::new(expr)), + } + }) +} + +fn term(i: &str) -> IResult<&str, Expr> { + let (i, initial) = factor(i)?; + let (i, remainder) = many0(alt(( + |i| { + let (i, mul) = preceded(tag("*"), factor)(i)?; + Ok((i, (Oper::Mul, mul))) + }, + |i| { + let (i, div) = preceded(tag("/"), factor)(i)?; + Ok((i, (Oper::Div, div))) + }, + )))(i)?; + + Ok((i, fold_exprs(initial, remainder))) +} + +fn expr(i: &str) -> IResult<&str, Expr> { + let (i, initial) = term(i)?; + let (i, remainder) = many0(alt(( + |i| { + let (i, add) = preceded(tag("+"), term)(i)?; + Ok((i, (Oper::Add, add))) + }, + |i| { + let (i, sub) = preceded(tag("-"), term)(i)?; + Ok((i, (Oper::Sub, sub))) + }, + )))(i)?; + + Ok((i, fold_exprs(initial, remainder))) +} + +#[test] +fn factor_test() { + assert_eq!( + factor(" 3 ").map(|(i, x)| (i, format!("{:?}", x))), + Ok(("", String::from("3"))) + ); +} + +#[test] +fn term_test() { + assert_eq!( + term(" 3 * 5 ").map(|(i, x)| (i, format!("{:?}", x))), + Ok(("", String::from("(3 * 5)"))) + ); +} + +#[test] +fn expr_test() { + assert_eq!( + expr(" 1 + 2 * 3 ").map(|(i, x)| (i, format!("{:?}", x))), + Ok(("", String::from("(1 + (2 * 3))"))) + ); + assert_eq!( + expr(" 1 + 2 * 3 / 4 - 5 ").map(|(i, x)| (i, format!("{:?}", x))), + Ok(("", String::from("((1 + ((2 * 3) / 4)) - 5)"))) + ); + assert_eq!( + expr(" 72 / 2 / 3 ").map(|(i, x)| (i, format!("{:?}", x))), + Ok(("", String::from("((72 / 2) / 3)"))) + ); +} + +#[test] +fn parens_test() { + assert_eq!( + expr(" ( 1 + 2 ) * 3 ").map(|(i, x)| (i, format!("{:?}", x))), + Ok(("", String::from("([(1 + 2)] * 3)"))) + ); +} diff --git a/vendor/nom/tests/css.rs b/vendor/nom/tests/css.rs new file mode 100644 index 00000000000000..ad3d72b8fae22a --- /dev/null +++ b/vendor/nom/tests/css.rs @@ -0,0 +1,45 @@ +use nom::bytes::complete::{tag, take_while_m_n}; +use nom::combinator::map_res; +use nom::sequence::tuple; +use nom::IResult; + +#[derive(Debug, PartialEq)] +pub struct Color { + pub red: u8, + pub green: u8, + pub blue: u8, +} + +fn from_hex(input: &str) -> Result { + u8::from_str_radix(input, 16) +} + +fn is_hex_digit(c: char) -> bool { + c.is_digit(16) +} + +fn hex_primary(input: &str) -> IResult<&str, u8> { + map_res(take_while_m_n(2, 2, is_hex_digit), from_hex)(input) +} + +fn hex_color(input: &str) -> IResult<&str, Color> { + let (input, _) = tag("#")(input)?; + let (input, (red, green, blue)) = tuple((hex_primary, hex_primary, hex_primary))(input)?; + + Ok((input, Color { red, green, blue })) +} + +#[test] +fn parse_color() { + assert_eq!( + hex_color("#2F14DF"), + Ok(( + "", + Color { + red: 47, + green: 20, + blue: 223, + } + )) + ); +} diff --git a/vendor/nom/tests/custom_errors.rs b/vendor/nom/tests/custom_errors.rs new file mode 100644 index 00000000000000..2021713341a205 --- /dev/null +++ b/vendor/nom/tests/custom_errors.rs @@ -0,0 +1,48 @@ +#![allow(dead_code)] + +use nom::bytes::streaming::tag; +use nom::character::streaming::digit1 as digit; +use nom::combinator::verify; +use nom::error::{ErrorKind, ParseError}; +#[cfg(feature = "alloc")] +use nom::multi::count; +use nom::sequence::terminated; +use nom::IResult; + +#[derive(Debug)] +pub struct CustomError(String); + +impl<'a> From<(&'a str, ErrorKind)> for CustomError { + fn from(error: (&'a str, ErrorKind)) -> Self { + CustomError(format!("error code was: {:?}", error)) + } +} + +impl<'a> ParseError<&'a str> for CustomError { + fn from_error_kind(_: &'a str, kind: ErrorKind) -> Self { + CustomError(format!("error code was: {:?}", kind)) + } + + fn append(_: &'a str, kind: ErrorKind, other: CustomError) -> Self { + CustomError(format!("{:?}\nerror code was: {:?}", other, kind)) + } +} + +fn test1(input: &str) -> IResult<&str, &str, CustomError> { + //fix_error!(input, CustomError, tag!("abcd")) + tag("abcd")(input) +} + +fn test2(input: &str) -> IResult<&str, &str, CustomError> { + //terminated!(input, test1, fix_error!(CustomError, digit)) + terminated(test1, digit)(input) +} + +fn test3(input: &str) -> IResult<&str, &str, CustomError> { + verify(test1, |s: &str| s.starts_with("abcd"))(input) +} + +#[cfg(feature = "alloc")] +fn test4(input: &str) -> IResult<&str, Vec<&str>, CustomError> { + count(test1, 4)(input) +} diff --git a/vendor/nom/tests/escaped.rs b/vendor/nom/tests/escaped.rs new file mode 100644 index 00000000000000..47c6a71e52613b --- /dev/null +++ b/vendor/nom/tests/escaped.rs @@ -0,0 +1,28 @@ +use nom::bytes::complete::escaped; +use nom::character::complete::digit1; +use nom::character::complete::one_of; +use nom::{error::ErrorKind, Err, IResult}; + +fn esc(s: &str) -> IResult<&str, &str, (&str, ErrorKind)> { + escaped(digit1, '\\', one_of("\"n\\"))(s) +} + +#[cfg(feature = "alloc")] +fn esc_trans(s: &str) -> IResult<&str, String, (&str, ErrorKind)> { + use nom::bytes::complete::{escaped_transform, tag}; + escaped_transform(digit1, '\\', tag("n"))(s) +} + +#[test] +fn test_escaped() { + assert_eq!(esc("abcd"), Err(Err::Error(("abcd", ErrorKind::Escaped)))); +} + +#[test] +#[cfg(feature = "alloc")] +fn test_escaped_transform() { + assert_eq!( + esc_trans("abcd"), + Err(Err::Error(("abcd", ErrorKind::EscapedTransform))) + ); +} diff --git a/vendor/nom/tests/float.rs b/vendor/nom/tests/float.rs new file mode 100644 index 00000000000000..634b189899bfba --- /dev/null +++ b/vendor/nom/tests/float.rs @@ -0,0 +1,46 @@ +use nom::branch::alt; +use nom::bytes::complete::tag; +use nom::character::streaming::digit1 as digit; +use nom::combinator::{map, map_res, opt, recognize}; +use nom::sequence::{delimited, pair}; +use nom::IResult; + +use std::str; +use std::str::FromStr; + +fn unsigned_float(i: &[u8]) -> IResult<&[u8], f32> { + let float_bytes = recognize(alt(( + delimited(digit, tag("."), opt(digit)), + delimited(opt(digit), tag("."), digit), + ))); + let float_str = map_res(float_bytes, str::from_utf8); + map_res(float_str, FromStr::from_str)(i) +} + +fn float(i: &[u8]) -> IResult<&[u8], f32> { + map( + pair(opt(alt((tag("+"), tag("-")))), unsigned_float), + |(sign, value)| { + sign + .and_then(|s| if s[0] == b'-' { Some(-1f32) } else { None }) + .unwrap_or(1f32) + * value + }, + )(i) +} + +#[test] +fn unsigned_float_test() { + assert_eq!(unsigned_float(&b"123.456;"[..]), Ok((&b";"[..], 123.456))); + assert_eq!(unsigned_float(&b"0.123;"[..]), Ok((&b";"[..], 0.123))); + assert_eq!(unsigned_float(&b"123.0;"[..]), Ok((&b";"[..], 123.0))); + assert_eq!(unsigned_float(&b"123.;"[..]), Ok((&b";"[..], 123.0))); + assert_eq!(unsigned_float(&b".123;"[..]), Ok((&b";"[..], 0.123))); +} + +#[test] +fn float_test() { + assert_eq!(float(&b"123.456;"[..]), Ok((&b";"[..], 123.456))); + assert_eq!(float(&b"+123.456;"[..]), Ok((&b";"[..], 123.456))); + assert_eq!(float(&b"-123.456;"[..]), Ok((&b";"[..], -123.456))); +} diff --git a/vendor/nom/tests/fnmut.rs b/vendor/nom/tests/fnmut.rs new file mode 100644 index 00000000000000..b1486cbe636c62 --- /dev/null +++ b/vendor/nom/tests/fnmut.rs @@ -0,0 +1,39 @@ +use nom::{ + bytes::complete::tag, + multi::{many0, many0_count}, +}; + +#[test] +fn parse() { + let mut counter = 0; + + let res = { + let mut parser = many0::<_, _, (), _>(|i| { + counter += 1; + tag("abc")(i) + }); + + parser("abcabcabcabc").unwrap() + }; + + println!("res: {:?}", res); + assert_eq!(counter, 5); +} + +#[test] +fn accumulate() { + let mut v = Vec::new(); + + let (_, count) = { + let mut parser = many0_count::<_, _, (), _>(|i| { + let (i, o) = tag("abc")(i)?; + v.push(o); + Ok((i, ())) + }); + parser("abcabcabcabc").unwrap() + }; + + println!("v: {:?}", v); + assert_eq!(count, 4); + assert_eq!(v.len(), 4); +} diff --git a/vendor/nom/tests/ini.rs b/vendor/nom/tests/ini.rs new file mode 100644 index 00000000000000..e556f44a3c073d --- /dev/null +++ b/vendor/nom/tests/ini.rs @@ -0,0 +1,207 @@ +use nom::{ + bytes::complete::take_while, + character::complete::{ + alphanumeric1 as alphanumeric, char, multispace0 as multispace, space0 as space, + }, + combinator::{map, map_res, opt}, + multi::many0, + sequence::{delimited, pair, separated_pair, terminated, tuple}, + IResult, +}; + +use std::collections::HashMap; +use std::str; + +fn category(i: &[u8]) -> IResult<&[u8], &str> { + map_res( + delimited(char('['), take_while(|c| c != b']'), char(']')), + str::from_utf8, + )(i) +} + +fn key_value(i: &[u8]) -> IResult<&[u8], (&str, &str)> { + let (i, key) = map_res(alphanumeric, str::from_utf8)(i)?; + let (i, _) = tuple((opt(space), char('='), opt(space)))(i)?; + let (i, val) = map_res(take_while(|c| c != b'\n' && c != b';'), str::from_utf8)(i)?; + let (i, _) = opt(pair(char(';'), take_while(|c| c != b'\n')))(i)?; + Ok((i, (key, val))) +} + +fn keys_and_values(i: &[u8]) -> IResult<&[u8], HashMap<&str, &str>> { + map(many0(terminated(key_value, opt(multispace))), |vec| { + vec.into_iter().collect() + })(i) +} + +fn category_and_keys(i: &[u8]) -> IResult<&[u8], (&str, HashMap<&str, &str>)> { + let (i, category) = terminated(category, opt(multispace))(i)?; + let (i, keys) = keys_and_values(i)?; + Ok((i, (category, keys))) +} + +fn categories(i: &[u8]) -> IResult<&[u8], HashMap<&str, HashMap<&str, &str>>> { + map( + many0(separated_pair( + category, + opt(multispace), + map( + many0(terminated(key_value, opt(multispace))), + |vec: Vec<_>| vec.into_iter().collect(), + ), + )), + |vec: Vec<_>| vec.into_iter().collect(), + )(i) +} + +#[test] +fn parse_category_test() { + let ini_file = &b"[category] + +parameter=value +key = value2"[..]; + + let ini_without_category = &b"\n\nparameter=value +key = value2"[..]; + + let res = category(ini_file); + println!("{:?}", res); + match res { + Ok((i, o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i), o), + _ => println!("error"), + } + + assert_eq!(res, Ok((ini_without_category, "category"))); +} + +#[test] +fn parse_key_value_test() { + let ini_file = &b"parameter=value +key = value2"[..]; + + let ini_without_key_value = &b"\nkey = value2"[..]; + + let res = key_value(ini_file); + println!("{:?}", res); + match res { + Ok((i, (o1, o2))) => println!("i: {:?} | o: ({:?},{:?})", str::from_utf8(i), o1, o2), + _ => println!("error"), + } + + assert_eq!(res, Ok((ini_without_key_value, ("parameter", "value")))); +} + +#[test] +fn parse_key_value_with_space_test() { + let ini_file = &b"parameter = value +key = value2"[..]; + + let ini_without_key_value = &b"\nkey = value2"[..]; + + let res = key_value(ini_file); + println!("{:?}", res); + match res { + Ok((i, (o1, o2))) => println!("i: {:?} | o: ({:?},{:?})", str::from_utf8(i), o1, o2), + _ => println!("error"), + } + + assert_eq!(res, Ok((ini_without_key_value, ("parameter", "value")))); +} + +#[test] +fn parse_key_value_with_comment_test() { + let ini_file = &b"parameter=value;abc +key = value2"[..]; + + let ini_without_key_value = &b"\nkey = value2"[..]; + + let res = key_value(ini_file); + println!("{:?}", res); + match res { + Ok((i, (o1, o2))) => println!("i: {:?} | o: ({:?},{:?})", str::from_utf8(i), o1, o2), + _ => println!("error"), + } + + assert_eq!(res, Ok((ini_without_key_value, ("parameter", "value")))); +} + +#[test] +fn parse_multiple_keys_and_values_test() { + let ini_file = &b"parameter=value;abc + +key = value2 + +[category]"[..]; + + let ini_without_key_value = &b"[category]"[..]; + + let res = keys_and_values(ini_file); + println!("{:?}", res); + match res { + Ok((i, ref o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i), o), + _ => println!("error"), + } + + let mut expected: HashMap<&str, &str> = HashMap::new(); + expected.insert("parameter", "value"); + expected.insert("key", "value2"); + assert_eq!(res, Ok((ini_without_key_value, expected))); +} + +#[test] +fn parse_category_then_multiple_keys_and_values_test() { + //FIXME: there can be an empty line or a comment line after a category + let ini_file = &b"[abcd] +parameter=value;abc + +key = value2 + +[category]"[..]; + + let ini_after_parser = &b"[category]"[..]; + + let res = category_and_keys(ini_file); + println!("{:?}", res); + match res { + Ok((i, ref o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i), o), + _ => println!("error"), + } + + let mut expected_h: HashMap<&str, &str> = HashMap::new(); + expected_h.insert("parameter", "value"); + expected_h.insert("key", "value2"); + assert_eq!(res, Ok((ini_after_parser, ("abcd", expected_h)))); +} + +#[test] +fn parse_multiple_categories_test() { + let ini_file = &b"[abcd] + +parameter=value;abc + +key = value2 + +[category] +parameter3=value3 +key4 = value4 +"[..]; + + let ini_after_parser = &b""[..]; + + let res = categories(ini_file); + //println!("{:?}", res); + match res { + Ok((i, ref o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i), o), + _ => println!("error"), + } + + let mut expected_1: HashMap<&str, &str> = HashMap::new(); + expected_1.insert("parameter", "value"); + expected_1.insert("key", "value2"); + let mut expected_2: HashMap<&str, &str> = HashMap::new(); + expected_2.insert("parameter3", "value3"); + expected_2.insert("key4", "value4"); + let mut expected_h: HashMap<&str, HashMap<&str, &str>> = HashMap::new(); + expected_h.insert("abcd", expected_1); + expected_h.insert("category", expected_2); + assert_eq!(res, Ok((ini_after_parser, expected_h))); +} diff --git a/vendor/nom/tests/ini_str.rs b/vendor/nom/tests/ini_str.rs new file mode 100644 index 00000000000000..3702303527615e --- /dev/null +++ b/vendor/nom/tests/ini_str.rs @@ -0,0 +1,217 @@ +use nom::{ + bytes::complete::{is_a, tag, take_till, take_while}, + character::complete::{alphanumeric1 as alphanumeric, char, space0 as space}, + combinator::opt, + multi::many0, + sequence::{delimited, pair, terminated, tuple}, + IResult, +}; + +use std::collections::HashMap; + +fn is_line_ending_or_comment(chr: char) -> bool { + chr == ';' || chr == '\n' +} + +fn not_line_ending(i: &str) -> IResult<&str, &str> { + take_while(|c| c != '\r' && c != '\n')(i) +} + +fn space_or_line_ending(i: &str) -> IResult<&str, &str> { + is_a(" \r\n")(i) +} + +fn category(i: &str) -> IResult<&str, &str> { + terminated( + delimited(char('['), take_while(|c| c != ']'), char(']')), + opt(is_a(" \r\n")), + )(i) +} + +fn key_value(i: &str) -> IResult<&str, (&str, &str)> { + let (i, key) = alphanumeric(i)?; + let (i, _) = tuple((opt(space), tag("="), opt(space)))(i)?; + let (i, val) = take_till(is_line_ending_or_comment)(i)?; + let (i, _) = opt(space)(i)?; + let (i, _) = opt(pair(tag(";"), not_line_ending))(i)?; + let (i, _) = opt(space_or_line_ending)(i)?; + + Ok((i, (key, val))) +} + +fn keys_and_values_aggregator(i: &str) -> IResult<&str, Vec<(&str, &str)>> { + many0(key_value)(i) +} + +fn keys_and_values(input: &str) -> IResult<&str, HashMap<&str, &str>> { + match keys_and_values_aggregator(input) { + Ok((i, tuple_vec)) => Ok((i, tuple_vec.into_iter().collect())), + Err(e) => Err(e), + } +} + +fn category_and_keys(i: &str) -> IResult<&str, (&str, HashMap<&str, &str>)> { + pair(category, keys_and_values)(i) +} + +fn categories_aggregator(i: &str) -> IResult<&str, Vec<(&str, HashMap<&str, &str>)>> { + many0(category_and_keys)(i) +} + +fn categories(input: &str) -> IResult<&str, HashMap<&str, HashMap<&str, &str>>> { + match categories_aggregator(input) { + Ok((i, tuple_vec)) => Ok((i, tuple_vec.into_iter().collect())), + Err(e) => Err(e), + } +} + +#[test] +fn parse_category_test() { + let ini_file = "[category] + +parameter=value +key = value2"; + + let ini_without_category = "parameter=value +key = value2"; + + let res = category(ini_file); + println!("{:?}", res); + match res { + Ok((i, o)) => println!("i: {} | o: {:?}", i, o), + _ => println!("error"), + } + + assert_eq!(res, Ok((ini_without_category, "category"))); +} + +#[test] +fn parse_key_value_test() { + let ini_file = "parameter=value +key = value2"; + + let ini_without_key_value = "key = value2"; + + let res = key_value(ini_file); + println!("{:?}", res); + match res { + Ok((i, (o1, o2))) => println!("i: {} | o: ({:?},{:?})", i, o1, o2), + _ => println!("error"), + } + + assert_eq!(res, Ok((ini_without_key_value, ("parameter", "value")))); +} + +#[test] +fn parse_key_value_with_space_test() { + let ini_file = "parameter = value +key = value2"; + + let ini_without_key_value = "key = value2"; + + let res = key_value(ini_file); + println!("{:?}", res); + match res { + Ok((i, (o1, o2))) => println!("i: {} | o: ({:?},{:?})", i, o1, o2), + _ => println!("error"), + } + + assert_eq!(res, Ok((ini_without_key_value, ("parameter", "value")))); +} + +#[test] +fn parse_key_value_with_comment_test() { + let ini_file = "parameter=value;abc +key = value2"; + + let ini_without_key_value = "key = value2"; + + let res = key_value(ini_file); + println!("{:?}", res); + match res { + Ok((i, (o1, o2))) => println!("i: {} | o: ({:?},{:?})", i, o1, o2), + _ => println!("error"), + } + + assert_eq!(res, Ok((ini_without_key_value, ("parameter", "value")))); +} + +#[test] +fn parse_multiple_keys_and_values_test() { + let ini_file = "parameter=value;abc + +key = value2 + +[category]"; + + let ini_without_key_value = "[category]"; + + let res = keys_and_values(ini_file); + println!("{:?}", res); + match res { + Ok((i, ref o)) => println!("i: {} | o: {:?}", i, o), + _ => println!("error"), + } + + let mut expected: HashMap<&str, &str> = HashMap::new(); + expected.insert("parameter", "value"); + expected.insert("key", "value2"); + assert_eq!(res, Ok((ini_without_key_value, expected))); +} + +#[test] +fn parse_category_then_multiple_keys_and_values_test() { + //FIXME: there can be an empty line or a comment line after a category + let ini_file = "[abcd] +parameter=value;abc + +key = value2 + +[category]"; + + let ini_after_parser = "[category]"; + + let res = category_and_keys(ini_file); + println!("{:?}", res); + match res { + Ok((i, ref o)) => println!("i: {} | o: {:?}", i, o), + _ => println!("error"), + } + + let mut expected_h: HashMap<&str, &str> = HashMap::new(); + expected_h.insert("parameter", "value"); + expected_h.insert("key", "value2"); + assert_eq!(res, Ok((ini_after_parser, ("abcd", expected_h)))); +} + +#[test] +fn parse_multiple_categories_test() { + let ini_file = "[abcd] + +parameter=value;abc + +key = value2 + +[category] +parameter3=value3 +key4 = value4 +"; + + let res = categories(ini_file); + //println!("{:?}", res); + match res { + Ok((i, ref o)) => println!("i: {} | o: {:?}", i, o), + _ => println!("error"), + } + + let mut expected_1: HashMap<&str, &str> = HashMap::new(); + expected_1.insert("parameter", "value"); + expected_1.insert("key", "value2"); + let mut expected_2: HashMap<&str, &str> = HashMap::new(); + expected_2.insert("parameter3", "value3"); + expected_2.insert("key4", "value4"); + let mut expected_h: HashMap<&str, HashMap<&str, &str>> = HashMap::new(); + expected_h.insert("abcd", expected_1); + expected_h.insert("category", expected_2); + assert_eq!(res, Ok(("", expected_h))); +} diff --git a/vendor/nom/tests/issues.rs b/vendor/nom/tests/issues.rs new file mode 100644 index 00000000000000..7985702f678530 --- /dev/null +++ b/vendor/nom/tests/issues.rs @@ -0,0 +1,242 @@ +//#![feature(trace_macros)] +#![allow(dead_code)] +#![cfg_attr(feature = "cargo-clippy", allow(redundant_closure))] + +use nom::{error::ErrorKind, Err, IResult, Needed}; + +#[allow(dead_code)] +struct Range { + start: char, + end: char, +} + +pub fn take_char(input: &[u8]) -> IResult<&[u8], char> { + if !input.is_empty() { + Ok((&input[1..], input[0] as char)) + } else { + Err(Err::Incomplete(Needed::new(1))) + } +} + +#[cfg(feature = "std")] +mod parse_int { + use nom::HexDisplay; + use nom::{ + character::streaming::{digit1 as digit, space1 as space}, + combinator::{complete, map, opt}, + multi::many0, + IResult, + }; + use std::str; + + fn parse_ints(input: &[u8]) -> IResult<&[u8], Vec> { + many0(spaces_or_int)(input) + } + + fn spaces_or_int(input: &[u8]) -> IResult<&[u8], i32> { + println!("{}", input.to_hex(8)); + let (i, _) = opt(complete(space))(input)?; + let (i, res) = map(complete(digit), |x| { + println!("x: {:?}", x); + let result = str::from_utf8(x).unwrap(); + println!("Result: {}", result); + println!("int is empty?: {}", x.is_empty()); + match result.parse() { + Ok(i) => i, + Err(e) => panic!("UH OH! NOT A DIGIT! {:?}", e), + } + })(i)?; + + Ok((i, res)) + } + + #[test] + fn issue_142() { + let subject = parse_ints(&b"12 34 5689a"[..]); + let expected = Ok((&b"a"[..], vec![12, 34, 5689])); + assert_eq!(subject, expected); + + let subject = parse_ints(&b"12 34 5689 "[..]); + let expected = Ok((&b" "[..], vec![12, 34, 5689])); + assert_eq!(subject, expected) + } +} + +#[test] +fn usize_length_bytes_issue() { + use nom::multi::length_data; + use nom::number::streaming::be_u16; + let _: IResult<&[u8], &[u8], (&[u8], ErrorKind)> = length_data(be_u16)(b"012346"); +} + +#[test] +fn take_till_issue() { + use nom::bytes::streaming::take_till; + + fn nothing(i: &[u8]) -> IResult<&[u8], &[u8]> { + take_till(|_| true)(i) + } + + assert_eq!(nothing(b""), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(nothing(b"abc"), Ok((&b"abc"[..], &b""[..]))); +} + +#[test] +fn issue_655() { + use nom::character::streaming::{line_ending, not_line_ending}; + fn twolines(i: &str) -> IResult<&str, (&str, &str)> { + let (i, l1) = not_line_ending(i)?; + let (i, _) = line_ending(i)?; + let (i, l2) = not_line_ending(i)?; + let (i, _) = line_ending(i)?; + + Ok((i, (l1, l2))) + } + + assert_eq!(twolines("foo\nbar\n"), Ok(("", ("foo", "bar")))); + assert_eq!(twolines("féo\nbar\n"), Ok(("", ("féo", "bar")))); + assert_eq!(twolines("foé\nbar\n"), Ok(("", ("foé", "bar")))); + assert_eq!(twolines("foé\r\nbar\n"), Ok(("", ("foé", "bar")))); +} + +#[cfg(feature = "alloc")] +fn issue_717(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + use nom::bytes::complete::{is_not, tag}; + use nom::multi::separated_list0; + + separated_list0(tag([0x0]), is_not([0x0u8]))(i) +} + +mod issue_647 { + use nom::bytes::streaming::tag; + use nom::combinator::complete; + use nom::multi::separated_list0; + use nom::{error::Error, number::streaming::be_f64, Err, IResult}; + pub type Input<'a> = &'a [u8]; + + #[derive(PartialEq, Debug, Clone)] + struct Data { + c: f64, + v: Vec, + } + + fn list<'a, 'b>( + input: Input<'a>, + _cs: &'b f64, + ) -> Result<(Input<'a>, Vec), Err>> { + separated_list0(complete(tag(",")), complete(be_f64))(input) + } + + fn data(input: Input<'_>) -> IResult, Data> { + let (i, c) = be_f64(input)?; + let (i, _) = tag("\n")(i)?; + let (i, v) = list(i, &c)?; + Ok((i, Data { c, v })) + } +} + +#[test] +fn issue_848_overflow_incomplete_bits_to_bytes() { + fn take(i: &[u8]) -> IResult<&[u8], &[u8]> { + use nom::bytes::streaming::take; + take(0x2000000000000000_usize)(i) + } + fn parser(i: &[u8]) -> IResult<&[u8], &[u8]> { + use nom::bits::{bits, bytes}; + + bits(bytes(take))(i) + } + assert_eq!( + parser(&b""[..]), + Err(Err::Failure(nom::error_position!( + &b""[..], + ErrorKind::TooLarge + ))) + ); +} + +#[test] +fn issue_942() { + use nom::error::{ContextError, ParseError}; + pub fn parser<'a, E: ParseError<&'a str> + ContextError<&'a str>>( + i: &'a str, + ) -> IResult<&'a str, usize, E> { + use nom::{character::complete::char, error::context, multi::many0_count}; + many0_count(context("char_a", char('a')))(i) + } + assert_eq!(parser::<()>("aaa"), Ok(("", 3))); +} + +#[test] +fn issue_many_m_n_with_zeros() { + use nom::character::complete::char; + use nom::multi::many_m_n; + let mut parser = many_m_n::<_, _, (), _>(0, 0, char('a')); + assert_eq!(parser("aaa"), Ok(("aaa", vec![]))); +} + +#[test] +fn issue_1027_convert_error_panic_nonempty() { + use nom::character::complete::char; + use nom::error::{convert_error, VerboseError}; + use nom::sequence::pair; + + let input = "a"; + + let result: IResult<_, _, VerboseError<&str>> = pair(char('a'), char('b'))(input); + let err = match result.unwrap_err() { + Err::Error(e) => e, + _ => unreachable!(), + }; + + let msg = convert_error(input, err); + assert_eq!( + msg, + "0: at line 1:\na\n ^\nexpected \'b\', got end of input\n\n" + ); +} + +#[test] +fn issue_1231_bits_expect_fn_closure() { + use nom::bits::{bits, complete::take}; + use nom::error::Error; + use nom::sequence::tuple; + pub fn example(input: &[u8]) -> IResult<&[u8], (u8, u8)> { + bits::<_, _, Error<_>, _, _>(tuple((take(1usize), take(1usize))))(input) + } + assert_eq!(example(&[0xff]), Ok((&b""[..], (1, 1)))); +} + +#[test] +fn issue_1282_findtoken_char() { + use nom::character::complete::one_of; + use nom::error::Error; + let parser = one_of::<_, _, Error<_>>(&['a', 'b', 'c'][..]); + assert_eq!(parser("aaa"), Ok(("aa", 'a'))); +} + +#[test] +fn issue_1459_clamp_capacity() { + use nom::character::complete::char; + + // shouldn't panic + use nom::multi::many_m_n; + let mut parser = many_m_n::<_, _, (), _>(usize::MAX, usize::MAX, char('a')); + assert_eq!(parser("a"), Err(nom::Err::Error(()))); + + // shouldn't panic + use nom::multi::count; + let mut parser = count::<_, _, (), _>(char('a'), usize::MAX); + assert_eq!(parser("a"), Err(nom::Err::Error(()))); +} + +#[test] +fn issue_1617_count_parser_returning_zero_size() { + use nom::{bytes::complete::tag, combinator::map, error::Error, multi::count}; + + // previously, `count()` panicked if the parser had type `O = ()` + let parser = map(tag::<_, _, Error<&str>>("abc"), |_| ()); + // shouldn't panic + let result = count(parser, 3)("abcabcabcdef").expect("parsing should succeed"); + assert_eq!(result, ("def", vec![(), (), ()])); +} diff --git a/vendor/nom/tests/json.rs b/vendor/nom/tests/json.rs new file mode 100644 index 00000000000000..e8a06fd778a137 --- /dev/null +++ b/vendor/nom/tests/json.rs @@ -0,0 +1,236 @@ +#![cfg(feature = "alloc")] + +use nom::{ + branch::alt, + bytes::complete::{tag, take}, + character::complete::{anychar, char, multispace0, none_of}, + combinator::{map, map_opt, map_res, value, verify}, + error::ParseError, + multi::{fold_many0, separated_list0}, + number::complete::double, + sequence::{delimited, preceded, separated_pair}, + IResult, Parser, +}; + +use std::collections::HashMap; + +#[derive(Debug, PartialEq, Clone)] +pub enum JsonValue { + Null, + Bool(bool), + Str(String), + Num(f64), + Array(Vec), + Object(HashMap), +} + +fn boolean(input: &str) -> IResult<&str, bool> { + alt((value(false, tag("false")), value(true, tag("true"))))(input) +} + +fn u16_hex(input: &str) -> IResult<&str, u16> { + map_res(take(4usize), |s| u16::from_str_radix(s, 16))(input) +} + +fn unicode_escape(input: &str) -> IResult<&str, char> { + map_opt( + alt(( + // Not a surrogate + map(verify(u16_hex, |cp| !(0xD800..0xE000).contains(cp)), |cp| { + cp as u32 + }), + // See https://en.wikipedia.org/wiki/UTF-16#Code_points_from_U+010000_to_U+10FFFF for details + map( + verify( + separated_pair(u16_hex, tag("\\u"), u16_hex), + |(high, low)| (0xD800..0xDC00).contains(high) && (0xDC00..0xE000).contains(low), + ), + |(high, low)| { + let high_ten = (high as u32) - 0xD800; + let low_ten = (low as u32) - 0xDC00; + (high_ten << 10) + low_ten + 0x10000 + }, + ), + )), + // Could be probably replaced with .unwrap() or _unchecked due to the verify checks + std::char::from_u32, + )(input) +} + +fn character(input: &str) -> IResult<&str, char> { + let (input, c) = none_of("\"")(input)?; + if c == '\\' { + alt(( + map_res(anychar, |c| { + Ok(match c { + '"' | '\\' | '/' => c, + 'b' => '\x08', + 'f' => '\x0C', + 'n' => '\n', + 'r' => '\r', + 't' => '\t', + _ => return Err(()), + }) + }), + preceded(char('u'), unicode_escape), + ))(input) + } else { + Ok((input, c)) + } +} + +fn string(input: &str) -> IResult<&str, String> { + delimited( + char('"'), + fold_many0(character, String::new, |mut string, c| { + string.push(c); + string + }), + char('"'), + )(input) +} + +fn ws<'a, O, E: ParseError<&'a str>, F: Parser<&'a str, O, E>>(f: F) -> impl Parser<&'a str, O, E> { + delimited(multispace0, f, multispace0) +} + +fn array(input: &str) -> IResult<&str, Vec> { + delimited( + char('['), + ws(separated_list0(ws(char(',')), json_value)), + char(']'), + )(input) +} + +fn object(input: &str) -> IResult<&str, HashMap> { + map( + delimited( + char('{'), + ws(separated_list0( + ws(char(',')), + separated_pair(string, ws(char(':')), json_value), + )), + char('}'), + ), + |key_values| key_values.into_iter().collect(), + )(input) +} + +fn json_value(input: &str) -> IResult<&str, JsonValue> { + use JsonValue::*; + + alt(( + value(Null, tag("null")), + map(boolean, Bool), + map(string, Str), + map(double, Num), + map(array, Array), + map(object, Object), + ))(input) +} + +fn json(input: &str) -> IResult<&str, JsonValue> { + ws(json_value).parse(input) +} + +#[test] +fn json_string() { + assert_eq!(string("\"\""), Ok(("", "".to_string()))); + assert_eq!(string("\"abc\""), Ok(("", "abc".to_string()))); + assert_eq!( + string("\"abc\\\"\\\\\\/\\b\\f\\n\\r\\t\\u0001\\u2014\u{2014}def\""), + Ok(("", "abc\"\\/\x08\x0C\n\r\t\x01——def".to_string())), + ); + assert_eq!(string("\"\\uD83D\\uDE10\""), Ok(("", "😐".to_string()))); + + assert!(string("\"").is_err()); + assert!(string("\"abc").is_err()); + assert!(string("\"\\\"").is_err()); + assert!(string("\"\\u123\"").is_err()); + assert!(string("\"\\uD800\"").is_err()); + assert!(string("\"\\uD800\\uD800\"").is_err()); + assert!(string("\"\\uDC00\"").is_err()); +} + +#[test] +fn json_object() { + use JsonValue::*; + + let input = r#"{"a":42,"b":"x"}"#; + + let expected = Object( + vec![ + ("a".to_string(), Num(42.0)), + ("b".to_string(), Str("x".to_string())), + ] + .into_iter() + .collect(), + ); + + assert_eq!(json(input), Ok(("", expected))); +} + +#[test] +fn json_array() { + use JsonValue::*; + + let input = r#"[42,"x"]"#; + + let expected = Array(vec![Num(42.0), Str("x".to_string())]); + + assert_eq!(json(input), Ok(("", expected))); +} + +#[test] +fn json_whitespace() { + use JsonValue::*; + + let input = r#" + { + "null" : null, + "true" :true , + "false": false , + "number" : 123e4 , + "string" : " abc 123 " , + "array" : [ false , 1 , "two" ] , + "object" : { "a" : 1.0 , "b" : "c" } , + "empty_array" : [ ] , + "empty_object" : { } + } + "#; + + assert_eq!( + json(input), + Ok(( + "", + Object( + vec![ + ("null".to_string(), Null), + ("true".to_string(), Bool(true)), + ("false".to_string(), Bool(false)), + ("number".to_string(), Num(123e4)), + ("string".to_string(), Str(" abc 123 ".to_string())), + ( + "array".to_string(), + Array(vec![Bool(false), Num(1.0), Str("two".to_string())]) + ), + ( + "object".to_string(), + Object( + vec![ + ("a".to_string(), Num(1.0)), + ("b".to_string(), Str("c".to_string())), + ] + .into_iter() + .collect() + ) + ), + ("empty_array".to_string(), Array(vec![]),), + ("empty_object".to_string(), Object(HashMap::new()),), + ] + .into_iter() + .collect() + ) + )) + ); +} diff --git a/vendor/nom/tests/mp4.rs b/vendor/nom/tests/mp4.rs new file mode 100644 index 00000000000000..852bf29555f6ea --- /dev/null +++ b/vendor/nom/tests/mp4.rs @@ -0,0 +1,320 @@ +#![allow(dead_code)] + +use nom::{ + branch::alt, + bytes::streaming::{tag, take}, + combinator::{map, map_res}, + error::ErrorKind, + multi::many0, + number::streaming::{be_f32, be_u16, be_u32, be_u64}, + Err, IResult, Needed, +}; + +use std::str; + +fn mp4_box(input: &[u8]) -> IResult<&[u8], &[u8]> { + match be_u32(input) { + Ok((i, offset)) => { + let sz: usize = offset as usize; + if i.len() >= sz - 4 { + Ok((&i[(sz - 4)..], &i[0..(sz - 4)])) + } else { + Err(Err::Incomplete(Needed::new(offset as usize + 4))) + } + } + Err(e) => Err(e), + } +} + +#[cfg_attr(rustfmt, rustfmt_skip)] +#[derive(PartialEq,Eq,Debug)] +struct FileType<'a> { + major_brand: &'a str, + major_brand_version: &'a [u8], + compatible_brands: Vec<&'a str> +} + +#[cfg_attr(rustfmt, rustfmt_skip)] +#[allow(non_snake_case)] +#[derive(Debug,Clone)] +pub struct Mvhd32 { + version_flags: u32, // actually: + // version: u8, + // flags: u24 // 3 bytes + created_date: u32, + modified_date: u32, + scale: u32, + duration: u32, + speed: f32, + volume: u16, // actually a 2 bytes decimal + /* 10 bytes reserved */ + scaleA: f32, + rotateB: f32, + angleU: f32, + rotateC: f32, + scaleD: f32, + angleV: f32, + positionX: f32, + positionY: f32, + scaleW: f32, + preview: u64, + poster: u32, + selection: u64, + current_time: u32, + track_id: u32 +} + +#[cfg_attr(rustfmt, rustfmt_skip)] +#[allow(non_snake_case)] +#[derive(Debug,Clone)] +pub struct Mvhd64 { + version_flags: u32, // actually: + // version: u8, + // flags: u24 // 3 bytes + created_date: u64, + modified_date: u64, + scale: u32, + duration: u64, + speed: f32, + volume: u16, // actually a 2 bytes decimal + /* 10 bytes reserved */ + scaleA: f32, + rotateB: f32, + angleU: f32, + rotateC: f32, + scaleD: f32, + angleV: f32, + positionX: f32, + positionY: f32, + scaleW: f32, + preview: u64, + poster: u32, + selection: u64, + current_time: u32, + track_id: u32 +} + +#[cfg_attr(rustfmt, rustfmt_skip)] +fn mvhd32(i: &[u8]) -> IResult<&[u8], MvhdBox> { + let (i, version_flags) = be_u32(i)?; + let (i, created_date) = be_u32(i)?; + let (i, modified_date) = be_u32(i)?; + let (i, scale) = be_u32(i)?; + let (i, duration) = be_u32(i)?; + let (i, speed) = be_f32(i)?; + let (i, volume) = be_u16(i)?; // actually a 2 bytes decimal + let (i, _) = take(10_usize)(i)?; + let (i, scale_a) = be_f32(i)?; + let (i, rotate_b) = be_f32(i)?; + let (i, angle_u) = be_f32(i)?; + let (i, rotate_c) = be_f32(i)?; + let (i, scale_d) = be_f32(i)?; + let (i, angle_v) = be_f32(i)?; + let (i, position_x) = be_f32(i)?; + let (i, position_y) = be_f32(i)?; + let (i, scale_w) = be_f32(i)?; + let (i, preview) = be_u64(i)?; + let (i, poster) = be_u32(i)?; + let (i, selection) = be_u64(i)?; + let (i, current_time) = be_u32(i)?; + let (i, track_id) = be_u32(i)?; + + let mvhd_box = MvhdBox::M32(Mvhd32 { + version_flags, + created_date, + modified_date, + scale, + duration, + speed, + volume, + scaleA: scale_a, + rotateB: rotate_b, + angleU: angle_u, + rotateC: rotate_c, + scaleD: scale_d, + angleV: angle_v, + positionX: position_x, + positionY: position_y, + scaleW: scale_w, + preview, + poster, + selection, + current_time, + track_id, + }); + + Ok((i, mvhd_box)) +} + +#[cfg_attr(rustfmt, rustfmt_skip)] +fn mvhd64(i: &[u8]) -> IResult<&[u8], MvhdBox> { + let (i, version_flags) = be_u32(i)?; + let (i, created_date) = be_u64(i)?; + let (i, modified_date) = be_u64(i)?; + let (i, scale) = be_u32(i)?; + let (i, duration) = be_u64(i)?; + let (i, speed) = be_f32(i)?; + let (i, volume) = be_u16(i)?; // actually a 2 bytes decimal + let (i, _) = take(10_usize)(i)?; + let (i, scale_a) = be_f32(i)?; + let (i, rotate_b) = be_f32(i)?; + let (i, angle_u) = be_f32(i)?; + let (i, rotate_c) = be_f32(i)?; + let (i, scale_d) = be_f32(i)?; + let (i, angle_v) = be_f32(i)?; + let (i, position_x) = be_f32(i)?; + let (i, position_y) = be_f32(i)?; + let (i, scale_w) = be_f32(i)?; + let (i, preview) = be_u64(i)?; + let (i, poster) = be_u32(i)?; + let (i, selection) = be_u64(i)?; + let (i, current_time) = be_u32(i)?; + let (i, track_id) = be_u32(i)?; + + let mvhd_box = MvhdBox::M64(Mvhd64 { + version_flags, + created_date, + modified_date, + scale, + duration, + speed, + volume, + scaleA: scale_a, + rotateB: rotate_b, + angleU: angle_u, + rotateC: rotate_c, + scaleD: scale_d, + angleV: angle_v, + positionX: position_x, + positionY: position_y, + scaleW: scale_w, + preview, + poster, + selection, + current_time, + track_id, + }); + + Ok((i, mvhd_box)) +} + +#[derive(Debug, Clone)] +pub enum MvhdBox { + M32(Mvhd32), + M64(Mvhd64), +} + +#[derive(Debug, Clone)] +pub enum MoovBox { + Mdra, + Dref, + Cmov, + Rmra, + Iods, + Mvhd(MvhdBox), + Clip, + Trak, + Udta, +} + +#[derive(Debug)] +enum MP4BoxType { + Ftyp, + Moov, + Mdat, + Free, + Skip, + Wide, + Mdra, + Dref, + Cmov, + Rmra, + Iods, + Mvhd, + Clip, + Trak, + Udta, + Unknown, +} + +#[derive(Debug)] +struct MP4BoxHeader { + length: u32, + tag: MP4BoxType, +} + +fn brand_name(input: &[u8]) -> IResult<&[u8], &str> { + map_res(take(4_usize), str::from_utf8)(input) +} + +fn filetype_parser(input: &[u8]) -> IResult<&[u8], FileType<'_>> { + let (i, name) = brand_name(input)?; + let (i, version) = take(4_usize)(i)?; + let (i, brands) = many0(brand_name)(i)?; + + let ft = FileType { + major_brand: name, + major_brand_version: version, + compatible_brands: brands, + }; + Ok((i, ft)) +} + +fn mvhd_box(input: &[u8]) -> IResult<&[u8], MvhdBox> { + let res = if input.len() < 100 { + Err(Err::Incomplete(Needed::new(100))) + } else if input.len() == 100 { + mvhd32(input) + } else if input.len() == 112 { + mvhd64(input) + } else { + Err(Err::Error(nom::error_position!(input, ErrorKind::TooLarge))) + }; + println!("res: {:?}", res); + res +} + +fn unknown_box_type(input: &[u8]) -> IResult<&[u8], MP4BoxType> { + Ok((input, MP4BoxType::Unknown)) +} + +fn box_type(input: &[u8]) -> IResult<&[u8], MP4BoxType> { + alt(( + map(tag("ftyp"), |_| MP4BoxType::Ftyp), + map(tag("moov"), |_| MP4BoxType::Moov), + map(tag("mdat"), |_| MP4BoxType::Mdat), + map(tag("free"), |_| MP4BoxType::Free), + map(tag("skip"), |_| MP4BoxType::Skip), + map(tag("wide"), |_| MP4BoxType::Wide), + unknown_box_type, + ))(input) +} + +// warning, an alt combinator with 9 branches containing a tag combinator +// can make the compilation very slow. Use functions as sub parsers, +// or split into multiple alt parsers if it gets slow +fn moov_type(input: &[u8]) -> IResult<&[u8], MP4BoxType> { + alt(( + map(tag("mdra"), |_| MP4BoxType::Mdra), + map(tag("dref"), |_| MP4BoxType::Dref), + map(tag("cmov"), |_| MP4BoxType::Cmov), + map(tag("rmra"), |_| MP4BoxType::Rmra), + map(tag("iods"), |_| MP4BoxType::Iods), + map(tag("mvhd"), |_| MP4BoxType::Mvhd), + map(tag("clip"), |_| MP4BoxType::Clip), + map(tag("trak"), |_| MP4BoxType::Trak), + map(tag("udta"), |_| MP4BoxType::Udta), + ))(input) +} + +fn box_header(input: &[u8]) -> IResult<&[u8], MP4BoxHeader> { + let (i, length) = be_u32(input)?; + let (i, tag) = box_type(i)?; + Ok((i, MP4BoxHeader { length, tag })) +} + +fn moov_header(input: &[u8]) -> IResult<&[u8], MP4BoxHeader> { + let (i, length) = be_u32(input)?; + let (i, tag) = moov_type(i)?; + Ok((i, MP4BoxHeader { length, tag })) +} diff --git a/vendor/nom/tests/multiline.rs b/vendor/nom/tests/multiline.rs new file mode 100644 index 00000000000000..7378b9e3b4ddf4 --- /dev/null +++ b/vendor/nom/tests/multiline.rs @@ -0,0 +1,31 @@ +use nom::{ + character::complete::{alphanumeric1 as alphanumeric, line_ending as eol}, + multi::many0, + sequence::terminated, + IResult, +}; + +pub fn end_of_line(input: &str) -> IResult<&str, &str> { + if input.is_empty() { + Ok((input, input)) + } else { + eol(input) + } +} + +pub fn read_line(input: &str) -> IResult<&str, &str> { + terminated(alphanumeric, end_of_line)(input) +} + +pub fn read_lines(input: &str) -> IResult<&str, Vec<&str>> { + many0(read_line)(input) +} + +#[cfg(feature = "alloc")] +#[test] +fn read_lines_test() { + let res = Ok(("", vec!["Duck", "Dog", "Cow"])); + + assert_eq!(read_lines("Duck\nDog\nCow\n"), res); + assert_eq!(read_lines("Duck\nDog\nCow"), res); +} diff --git a/vendor/nom/tests/overflow.rs b/vendor/nom/tests/overflow.rs new file mode 100644 index 00000000000000..ea513bb395bffe --- /dev/null +++ b/vendor/nom/tests/overflow.rs @@ -0,0 +1,145 @@ +#![cfg_attr(feature = "cargo-clippy", allow(unreadable_literal))] +#![cfg(target_pointer_width = "64")] + +use nom::bytes::streaming::take; +#[cfg(feature = "alloc")] +use nom::multi::{length_data, many0}; +#[cfg(feature = "alloc")] +use nom::number::streaming::be_u64; +use nom::sequence::tuple; +use nom::{Err, IResult, Needed}; + +// Parser definition + +// We request a length that would trigger an overflow if computing consumed + requested +fn parser02(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> { + tuple((take(1_usize), take(18446744073709551615_usize)))(i) +} + +#[test] +fn overflow_incomplete_tuple() { + assert_eq!( + parser02(&b"3"[..]), + Err(Err::Incomplete(Needed::new(18446744073709551615))) + ); +} + +#[test] +#[cfg(feature = "alloc")] +fn overflow_incomplete_length_bytes() { + fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + many0(length_data(be_u64))(i) + } + + // Trigger an overflow in length_data + assert_eq!( + multi(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xff"[..]), + Err(Err::Incomplete(Needed::new(18446744073709551615))) + ); +} + +#[test] +#[cfg(feature = "alloc")] +fn overflow_incomplete_many0() { + fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + many0(length_data(be_u64))(i) + } + + // Trigger an overflow in many0 + assert_eq!( + multi(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xef"[..]), + Err(Err::Incomplete(Needed::new(18446744073709551599))) + ); +} + +#[test] +#[cfg(feature = "alloc")] +fn overflow_incomplete_many1() { + use nom::multi::many1; + + fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + many1(length_data(be_u64))(i) + } + + // Trigger an overflow in many1 + assert_eq!( + multi(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xef"[..]), + Err(Err::Incomplete(Needed::new(18446744073709551599))) + ); +} + +#[test] +#[cfg(feature = "alloc")] +fn overflow_incomplete_many_till() { + use nom::{bytes::complete::tag, multi::many_till}; + + fn multi(i: &[u8]) -> IResult<&[u8], (Vec<&[u8]>, &[u8])> { + many_till(length_data(be_u64), tag("abc"))(i) + } + + // Trigger an overflow in many_till + assert_eq!( + multi(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xef"[..]), + Err(Err::Incomplete(Needed::new(18446744073709551599))) + ); +} + +#[test] +#[cfg(feature = "alloc")] +fn overflow_incomplete_many_m_n() { + use nom::multi::many_m_n; + + fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + many_m_n(2, 4, length_data(be_u64))(i) + } + + // Trigger an overflow in many_m_n + assert_eq!( + multi(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xef"[..]), + Err(Err::Incomplete(Needed::new(18446744073709551599))) + ); +} + +#[test] +#[cfg(feature = "alloc")] +fn overflow_incomplete_count() { + use nom::multi::count; + + fn counter(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + count(length_data(be_u64), 2)(i) + } + + assert_eq!( + counter(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xef"[..]), + Err(Err::Incomplete(Needed::new(18446744073709551599))) + ); +} + +#[test] +#[cfg(feature = "alloc")] +fn overflow_incomplete_length_count() { + use nom::multi::length_count; + use nom::number::streaming::be_u8; + + fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + length_count(be_u8, length_data(be_u64))(i) + } + + assert_eq!( + multi(&b"\x04\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xee"[..]), + Err(Err::Incomplete(Needed::new(18446744073709551598))) + ); +} + +#[test] +#[cfg(feature = "alloc")] +fn overflow_incomplete_length_data() { + fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + many0(length_data(be_u64))(i) + } + + assert_eq!( + multi(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xff"[..]), + Err(Err::Incomplete(Needed::new(18446744073709551615))) + ); +} diff --git a/vendor/nom/tests/reborrow_fold.rs b/vendor/nom/tests/reborrow_fold.rs new file mode 100644 index 00000000000000..486617e427268d --- /dev/null +++ b/vendor/nom/tests/reborrow_fold.rs @@ -0,0 +1,31 @@ +#![allow(dead_code)] +// #![allow(unused_variables)] + +use std::str; + +use nom::bytes::complete::is_not; +use nom::character::complete::char; +use nom::combinator::{map, map_res}; +use nom::multi::fold_many0; +use nom::sequence::delimited; +use nom::IResult; + +fn atom<'a>(_tomb: &'a mut ()) -> impl FnMut(&'a [u8]) -> IResult<&'a [u8], String> { + move |input| { + map( + map_res(is_not(" \t\r\n"), str::from_utf8), + ToString::to_string, + )(input) + } +} + +// FIXME: should we support the use case of borrowing data mutably in a parser? +fn list<'a>(i: &'a [u8], tomb: &'a mut ()) -> IResult<&'a [u8], String> { + delimited( + char('('), + fold_many0(atom(tomb), String::new, |acc: String, next: String| { + acc + next.as_str() + }), + char(')'), + )(i) +} diff --git a/vendor/prettyplease/.cargo-checksum.json b/vendor/prettyplease/.cargo-checksum.json new file mode 100644 index 00000000000000..16a3962905f02a --- /dev/null +++ b/vendor/prettyplease/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"a42290dfff809a03b196211cde4dccabf08a93b4957620c610ba293bfed0f910",".github/FUNDING.yml":"b017158736b3c9751a2d21edfce7fe61c8954e2fced8da8dd3013c2f3e295bd9",".github/workflows/ci.yml":"d39fda7f894021a5e8f3906e835be0d0320afd3e458f3756c2e1587cbbf051ee","Cargo.lock":"9a073091a7cdc7f92ae7002d1ad6fc6c0ca919aac280322318096f269abfe629","Cargo.toml":"fadc7182205c8cebaea0545496490a549f3d102d17d7b13ae26b2f6c24828a0b","Cargo.toml.orig":"1163b5012ba2567947bc3f662645c7769428f57f8ed033352579d46acfbe7b3f","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"a7e6d152cdc6ea603077e50b8d55af374d9d21fd9f62d08a008588b17d785e6e","build.rs":"79a5b2d260aa97aeac7105fbfa00774982f825cd708c100ea96d01c39974bb88","examples/.tokeignore":"23ab7a9f335a33747ebbc74f39c81b145cb7928e7fefe8055ca4db0a4fac7557","examples/input.rs":"53350088f12a346a99034af41ef432dedcc9e5d581c5592d9aae3807c42656c1","examples/output.prettyplease.rs":"fa63c118daadb64c456ec5b8d5e46e5d7fabbbeb6a6e61a08eabc23360a18fbd","examples/output.rustc.rs":"04647e9b01f2aa85982f849c2d897acf3b6931121c1ef953de5fb6a67b80e05a","examples/output.rustfmt.rs":"914a9aea1c51e097bfd80c9af4011811e6126c9df5fb0eac3d40b1203fba7c58","src/algorithm.rs":"901c91416d7526038bfad30e0066295a03d2bb995830016ace49a41540079010","src/attr.rs":"0a5c64b1c1f6fe4944c1d805c528ee4a9a8c6223e875afe9f48371cba66732ee","src/classify.rs":"2ce2d63ad9071aac10b1037e6382703736e0147d96b3ccf32a53182d12883f1b","src/convenience.rs":"dd392b009b691d3587c7d8e3caeaacf450303c4223792b5f89c336358e371c39","src/data.rs":"5bc2dce1cfa1aa5c1324ccdc2d76a6bd5df2382530c7e863d2bb50dea60cc4bc","src/expr.rs":"c73157238a80b0fb9a1949c6250cbb01f2df9217770f263dffcf5e5fbb570296","src/file.rs":"5689efa3c5959a6a0d8cfc2c13bf8a37ab0669e2b81dbded3f3c28884a88fca0","src/fixup.rs":"ecf87543c342fffc79bae54e4fa174cbfd5c341817315caa3b95cce0d49ebf7c","src/generics.rs":"1d33884399edf9ebb26afb998c5257b6d5238a77956b646f8e9fd728a6decee8","src/item.rs":"731732e0084c29ed77aa52ccc5e9cb970ccbfb3652035e329e4590f9b9274e8d","src/iter.rs":"38b2cd3b38719c6024fb6b3aa739f6f8736c83193fd21e2365d4f6c27bc41666","src/lib.rs":"ce24c5d146c17b70241b2b14a0b47a745af943fa22753f40975abd0e608dc01a","src/lifetime.rs":"6d420430168185b2da3409bc38a45f63cced9443915f04e6aec71367fc070dcf","src/lit.rs":"9ea6d25533e64df4ff01c084fa1c31ddf64fb3b159409eec7d80dbf281e5171e","src/mac.rs":"c1f8f9d60a6d116a63a7aa86d3dafdc5279c030b7f6a3e9bf119df109a913c8e","src/pat.rs":"8e53fd1b5382bb068210162bfab9921246093cfdd80dd93cd8627fcfdae39940","src/path.rs":"e73d83dc38f5c6c0c82f824da7eb090a16027f32fc40446b185580ee5e99be58","src/precedence.rs":"a8ce97ba0a25f442b5f238c64f078d70f4114b4b0f9df82764d533dd39a47abb","src/ring.rs":"517b1a02f8e0a9c1316830117daad1e30d17e1fcf6428c6b438c626aa43286ae","src/stmt.rs":"e17ab9647fed9daa4f5b2fbd007015128f2a7fc65686a988593444a37242f885","src/token.rs":"c288b1d81f2a35673d4ca1dd10d3386670b067460121df3038303e1ed73b41a7","src/ty.rs":"6fdae0aeb40d3cfb67a98f8806d0be29ad7517bf91fb2387cda0bcdbf3075ffe","tests/test.rs":"c6f8c7830b7491fca1d56e41aa4acc6256b683a3556a48982f57ae62d38aaaa2","tests/test_precedence.rs":"de0c770b9a72e5eba8a52dcac0614d6db8ff5041ba601e1e67f113d68c9afd50"},"package":"479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b"} \ No newline at end of file diff --git a/vendor/prettyplease/.cargo_vcs_info.json b/vendor/prettyplease/.cargo_vcs_info.json new file mode 100644 index 00000000000000..f99293c21f941e --- /dev/null +++ b/vendor/prettyplease/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "c971184fa8c5ef5a2828196e35bd99469455b46b" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/prettyplease/.github/FUNDING.yml b/vendor/prettyplease/.github/FUNDING.yml new file mode 100644 index 00000000000000..750707701cdae9 --- /dev/null +++ b/vendor/prettyplease/.github/FUNDING.yml @@ -0,0 +1 @@ +github: dtolnay diff --git a/vendor/prettyplease/.github/workflows/ci.yml b/vendor/prettyplease/.github/workflows/ci.yml new file mode 100644 index 00000000000000..e45d52e90582e7 --- /dev/null +++ b/vendor/prettyplease/.github/workflows/ci.yml @@ -0,0 +1,123 @@ +name: CI + +on: + push: + pull_request: + workflow_dispatch: + schedule: [cron: "40 1 * * *"] + +permissions: + contents: read + +env: + RUSTFLAGS: -Dwarnings + +jobs: + pre_ci: + uses: dtolnay/.github/.github/workflows/pre_ci.yml@master + + test: + name: Rust ${{matrix.rust}} + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + rust: [nightly, beta, stable, 1.62.0] + timeout-minutes: 45 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{matrix.rust}} + - run: cargo check + - run: cargo check --features verbatim + - run: cargo test + env: + RUSTFLAGS: ${{env.RUSTFLAGS}} ${{matrix.rust == 'nightly' && '--cfg exhaustive' || ''}} + - run: cargo test --release --test test_precedence + env: + RUSTFLAGS: ${{env.RUSTFLAGS}} ${{matrix.rust == 'nightly' && '--cfg exhaustive' || ''}} + if: matrix.rust != '1.62.0' + - uses: actions/upload-artifact@v4 + if: matrix.rust == 'nightly' && always() + with: + name: Cargo.lock + path: Cargo.lock + continue-on-error: true + + examples: + name: Examples + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + with: + components: llvm-tools, rustc-dev, rustfmt + - run: cargo run --manifest-path examples/update/Cargo.toml + - run: git diff --exit-code + - run: cargo run --manifest-path cargo-expand/update/Cargo.toml + - run: git diff --exit-code + + fuzz: + name: Fuzz + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + - uses: dtolnay/install@cargo-fuzz + - run: cargo fuzz check + + minimal: + name: Minimal versions + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + - run: cargo generate-lockfile -Z minimal-versions + - run: cargo check --locked + + doc: + name: Documentation + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + env: + RUSTDOCFLAGS: -Dwarnings + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + - uses: dtolnay/install@cargo-docs-rs + - run: cargo docs-rs + + clippy: + name: Clippy + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' + timeout-minutes: 45 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@clippy + - run: cargo clippy --features verbatim -- -Dclippy::all -Dclippy::pedantic + + outdated: + name: Outdated + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' + timeout-minutes: 45 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: dtolnay/install@cargo-outdated + - run: cargo outdated --workspace --exit-code 1 diff --git a/vendor/prettyplease/Cargo.lock b/vendor/prettyplease/Cargo.lock new file mode 100644 index 00000000000000..8eb36c84d7020e --- /dev/null +++ b/vendor/prettyplease/Cargo.lock @@ -0,0 +1,54 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "indoc" +version = "2.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd" + +[[package]] +name = "prettyplease" +version = "0.2.37" +dependencies = [ + "indoc", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "syn" +version = "2.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" diff --git a/vendor/prettyplease/Cargo.toml b/vendor/prettyplease/Cargo.toml new file mode 100644 index 00000000000000..ae2438cf4d2655 --- /dev/null +++ b/vendor/prettyplease/Cargo.toml @@ -0,0 +1,90 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.62" +name = "prettyplease" +version = "0.2.37" +authors = ["David Tolnay "] +build = "build.rs" +links = "prettyplease02" +exclude = ["cargo-expand"] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "A minimal `syn` syntax tree pretty-printer" +documentation = "https://docs.rs/prettyplease" +readme = "README.md" +keywords = ["rustfmt"] +categories = ["development-tools"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/prettyplease" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] +rustdoc-args = [ + "--generate-link-to-definition", + "--extern-html-root-url=core=https://doc.rust-lang.org", + "--extern-html-root-url=alloc=https://doc.rust-lang.org", + "--extern-html-root-url=std=https://doc.rust-lang.org", +] + +[package.metadata.playground] +features = ["verbatim"] + +[features] +verbatim = ["syn/parsing"] + +[lib] +name = "prettyplease" +path = "src/lib.rs" + +[[test]] +name = "test" +path = "tests/test.rs" + +[[test]] +name = "test_precedence" +path = "tests/test_precedence.rs" + +[dependencies.proc-macro2] +version = "1.0.80" +default-features = false + +[dependencies.syn] +version = "2.0.105" +features = ["full"] +default-features = false + +[dev-dependencies.indoc] +version = "2" + +[dev-dependencies.proc-macro2] +version = "1.0.80" +default-features = false + +[dev-dependencies.quote] +version = "1.0.35" +default-features = false + +[dev-dependencies.syn] +version = "2.0.105" +features = [ + "clone-impls", + "extra-traits", + "parsing", + "printing", + "visit-mut", +] +default-features = false diff --git a/vendor/prettyplease/LICENSE-APACHE b/vendor/prettyplease/LICENSE-APACHE new file mode 100644 index 00000000000000..1b5ec8b78e237b --- /dev/null +++ b/vendor/prettyplease/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/vendor/prettyplease/LICENSE-MIT b/vendor/prettyplease/LICENSE-MIT new file mode 100644 index 00000000000000..31aa79387f27e7 --- /dev/null +++ b/vendor/prettyplease/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/prettyplease/README.md b/vendor/prettyplease/README.md new file mode 100644 index 00000000000000..4584c48587644b --- /dev/null +++ b/vendor/prettyplease/README.md @@ -0,0 +1,312 @@ +prettyplease::unparse +===================== + +[github](https://github.com/dtolnay/prettyplease) +[crates.io](https://crates.io/crates/prettyplease) +[docs.rs](https://docs.rs/prettyplease) +[build status](https://github.com/dtolnay/prettyplease/actions?query=branch%3Amaster) + +A minimal `syn` syntax tree pretty-printer. + +
+ +## Overview + +This is a pretty-printer to turn a `syn` syntax tree into a `String` of +well-formatted source code. In contrast to rustfmt, this library is intended to +be suitable for arbitrary generated code. + +Rustfmt prioritizes high-quality output that is impeccable enough that you'd be +comfortable spending your career staring at its output — but that means +some heavyweight algorithms, and it has a tendency to bail out on code that is +hard to format (for example [rustfmt#3697], and there are dozens more issues +like it). That's not necessarily a big deal for human-generated code because +when code gets highly nested, the human will naturally be inclined to refactor +into more easily formattable code. But for generated code, having the formatter +just give up leaves it totally unreadable. + +[rustfmt#3697]: https://github.com/rust-lang/rustfmt/issues/3697 + +This library is designed using the simplest possible algorithm and data +structures that can deliver about 95% of the quality of rustfmt-formatted +output. In my experience testing real-world code, approximately 97-98% of output +lines come out identical between rustfmt's formatting and this crate's. The rest +have slightly different linebreak decisions, but still clearly follow the +dominant modern Rust style. + +The tradeoffs made by this crate are a good fit for generated code that you will +*not* spend your career staring at. For example, the output of `bindgen`, or the +output of `cargo-expand`. In those cases it's more important that the whole +thing be formattable without the formatter giving up, than that it be flawless. + +
+ +## Feature matrix + +Here are a few superficial comparisons of this crate against the AST +pretty-printer built into rustc, and rustfmt. The sections below go into more +detail comparing the output of each of these libraries. + +| | prettyplease | rustc | rustfmt | +|:---|:---:|:---:|:---:| +| non-pathological behavior on big or generated code | 💚 | ❌ | ❌ | +| idiomatic modern formatting ("locally indistinguishable from rustfmt") | 💚 | ❌ | 💚 | +| throughput | 60 MB/s | 39 MB/s | 2.8 MB/s | +| number of dependencies | 3 | 72 | 66 | +| compile time including dependencies | 2.4 sec | 23.1 sec | 29.8 sec | +| buildable using a stable Rust compiler | 💚 | ❌ | ❌ | +| published to crates.io | 💚 | ❌ | ❌ | +| extensively configurable output | ❌ | ❌ | 💚 | +| intended to accommodate hand-maintained source code | ❌ | ❌ | 💚 | + +
+ +## Comparison to rustfmt + +- [input.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/input.rs) +- [output.prettyplease.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.prettyplease.rs) +- [output.rustfmt.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.rustfmt.rs) + +If you weren't told which output file is which, it would be practically +impossible to tell — **except** for line 435 in the rustfmt output, which +is more than 1000 characters long because rustfmt just gave up formatting that +part of the file: + +```rust + match segments[5] { + 0 => write!(f, "::{}", ipv4), + 0xffff => write!(f, "::ffff:{}", ipv4), + _ => unreachable!(), + } + } else { # [derive (Copy , Clone , Default)] struct Span { start : usize , len : usize , } let zeroes = { let mut longest = Span :: default () ; let mut current = Span :: default () ; for (i , & segment) in segments . iter () . enumerate () { if segment == 0 { if current . len == 0 { current . start = i ; } current . len += 1 ; if current . len > longest . len { longest = current ; } } else { current = Span :: default () ; } } longest } ; # [doc = " Write a colon-separated part of the address"] # [inline] fn fmt_subslice (f : & mut fmt :: Formatter < '_ > , chunk : & [u16]) -> fmt :: Result { if let Some ((first , tail)) = chunk . split_first () { write ! (f , "{:x}" , first) ? ; for segment in tail { f . write_char (':') ? ; write ! (f , "{:x}" , segment) ? ; } } Ok (()) } if zeroes . len > 1 { fmt_subslice (f , & segments [.. zeroes . start]) ? ; f . write_str ("::") ? ; fmt_subslice (f , & segments [zeroes . start + zeroes . len ..]) } else { fmt_subslice (f , & segments) } } + } else { + const IPV6_BUF_LEN: usize = (4 * 8) + 7; + let mut buf = [0u8; IPV6_BUF_LEN]; + let mut buf_slice = &mut buf[..]; +``` + +This is a pretty typical manifestation of rustfmt bailing out in generated code +— a chunk of the input ends up on one line. The other manifestation is +that you're working on some code, running rustfmt on save like a conscientious +developer, but after a while notice it isn't doing anything. You introduce an +intentional formatting issue, like a stray indent or semicolon, and run rustfmt +to check your suspicion. Nope, it doesn't get cleaned up — rustfmt is just +not formatting the part of the file you are working on. + +The prettyplease library is designed to have no pathological cases that force a +bail out; the entire input you give it will get formatted in some "good enough" +form. + +Separately, rustfmt can be problematic to integrate into projects. It's written +using rustc's internal syntax tree, so it can't be built by a stable compiler. +Its releases are not regularly published to crates.io, so in Cargo builds you'd +need to depend on it as a git dependency, which precludes publishing your crate +to crates.io also. You can shell out to a `rustfmt` binary, but that'll be +whatever rustfmt version is installed on each developer's system (if any), which +can lead to spurious diffs in checked-in generated code formatted by different +versions. In contrast prettyplease is designed to be easy to pull in as a +library, and compiles fast. + +
+ +## Comparison to rustc_ast_pretty + +- [input.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/input.rs) +- [output.prettyplease.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.prettyplease.rs) +- [output.rustc.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.rustc.rs) + +This is the pretty-printer that gets used when rustc prints source code, such as +`rustc -Zunpretty=expanded`. It's used also by the standard library's +`stringify!` when stringifying an interpolated macro_rules AST fragment, like an +$:expr, and transitively by `dbg!` and many macros in the ecosystem. + +Rustc's formatting is mostly okay, but does not hew closely to the dominant +contemporary style of Rust formatting. Some things wouldn't ever be written on +one line, like this `match` expression, and certainly not with a comma in front +of the closing brace: + +```rust +fn eq(&self, other: &IpAddr) -> bool { + match other { IpAddr::V4(v4) => self == v4, IpAddr::V6(_) => false, } +} +``` + +Some places use non-multiple-of-4 indentation, which is definitely not the norm: + +```rust +pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr{inner: + c::in6_addr{s6_addr: + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, + 0xFF, a, b, c, d],},} +} +``` + +And although there isn't an egregious example of it in the link because the +input code is pretty tame, in general rustc_ast_pretty has pathological behavior +on generated code. It has a tendency to use excessive horizontal indentation and +rapidly run out of width: + +```rust +::std::io::_print(::core::fmt::Arguments::new_v1(&[""], + &match (&msg,) { + _args => + [::core::fmt::ArgumentV1::new(_args.0, + ::core::fmt::Display::fmt)], + })); +``` + +The snippets above are clearly different from modern rustfmt style. In contrast, +prettyplease is designed to have output that is practically indistinguishable +from rustfmt-formatted code. + +
+ +## Example + +```rust +// [dependencies] +// prettyplease = "0.2" +// syn = { version = "2", default-features = false, features = ["full", "parsing"] } + +const INPUT: &str = stringify! { + use crate::{ + lazy::{Lazy, SyncLazy, SyncOnceCell}, panic, + sync::{ atomic::{AtomicUsize, Ordering::SeqCst}, + mpsc::channel, Mutex, }, + thread, + }; + impl Into for T where U: From { + fn into(self) -> U { U::from(self) } + } +}; + +fn main() { + let syntax_tree = syn::parse_file(INPUT).unwrap(); + let formatted = prettyplease::unparse(&syntax_tree); + print!("{}", formatted); +} +``` + +
+ +## Algorithm notes + +The approach and terminology used in the implementation are derived from [*Derek +C. Oppen, "Pretty Printing" (1979)*][paper], on which rustc_ast_pretty is also +based, and from rustc_ast_pretty's implementation written by Graydon Hoare in +2011 (and modernized over the years by dozens of volunteer maintainers). + +[paper]: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/770/CS-TR-79-770.pdf + +The paper describes two language-agnostic interacting procedures `Scan()` and +`Print()`. Language-specific code decomposes an input data structure into a +stream of `string` and `break` tokens, and `begin` and `end` tokens for +grouping. Each `begin`–`end` range may be identified as either "consistent +breaking" or "inconsistent breaking". If a group is consistently breaking, then +if the whole contents do not fit on the line, *every* `break` token in the group +will receive a linebreak. This is appropriate, for example, for Rust struct +literals, or arguments of a function call. If a group is inconsistently +breaking, then the `string` tokens in the group are greedily placed on the line +until out of space, and linebroken only at those `break` tokens for which the +next string would not fit. For example, this is appropriate for the contents of +a braced `use` statement in Rust. + +Scan's job is to efficiently accumulate sizing information about groups and +breaks. For every `begin` token we compute the distance to the matched `end` +token, and for every `break` we compute the distance to the next `break`. The +algorithm uses a ringbuffer to hold tokens whose size is not yet ascertained. +The maximum size of the ringbuffer is bounded by the target line length and does +not grow indefinitely, regardless of deep nesting in the input stream. That's +because once a group is sufficiently big, the precise size can no longer make a +difference to linebreak decisions and we can effectively treat it as "infinity". + +Print's job is to use the sizing information to efficiently assign a "broken" or +"not broken" status to every `begin` token. At that point the output is easily +constructed by concatenating `string` tokens and breaking at `break` tokens +contained within a broken group. + +Leveraging these primitives (i.e. cleverly placing the all-or-nothing consistent +breaks and greedy inconsistent breaks) to yield rustfmt-compatible formatting +for all of Rust's syntax tree nodes is a fun challenge. + +Here is a visualization of some Rust tokens fed into the pretty printing +algorithm. Consistently breaking `begin`—`end` pairs are represented by +`«`⁠`»`, inconsistently breaking by `‹`⁠`›`, `break` by `·`, and the +rest of the non-whitespace are `string`. + +```text +use crate::«{· +‹ lazy::«{·‹Lazy,· SyncLazy,· SyncOnceCell›·}»,· + panic,· + sync::«{· +‹ atomic::«{·‹AtomicUsize,· Ordering::SeqCst›·}»,· + mpsc::channel,· Mutex›,· + }»,· + thread›,· +}»;· +«‹«impl<«·T‹›,· U‹›·»>» Into<«·U·»>· for T›· +where· + U:‹ From<«·T·»>›,· +{· +« fn into(·«·self·») -> U {· +‹ U::from(«·self·»)›· +» }· +»}· +``` + +The algorithm described in the paper is not quite sufficient for producing +well-formatted Rust code that is locally indistinguishable from rustfmt's style. +The reason is that in the paper, the complete non-whitespace contents are +assumed to be independent of linebreak decisions, with Scan and Print being only +in control of the whitespace (spaces and line breaks). In Rust as idiomatically +formatted by rustfmt, that is not the case. Trailing commas are one example; the +punctuation is only known *after* the broken vs non-broken status of the +surrounding group is known: + +```rust +let _ = Struct { x: 0, y: true }; + +let _ = Struct { + x: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx, + y: yyyyyyyyyyyyyyyyyyyyyyyyyyyyyy, //<- trailing comma if the expression wrapped +}; +``` + +The formatting of `match` expressions is another case; we want small arms on the +same line as the pattern, and big arms wrapped in a brace. The presence of the +brace punctuation, comma, and semicolon are all dependent on whether the arm +fits on the line: + +```rust +match total_nanos.checked_add(entry.nanos as u64) { + Some(n) => tmp = n, //<- small arm, inline with comma + None => { + total_secs = total_secs + .checked_add(total_nanos / NANOS_PER_SEC as u64) + .expect("overflow in iter::sum over durations"); + } //<- big arm, needs brace added, and also semicolon^ +} +``` + +The printing algorithm implementation in this crate accommodates all of these +situations with conditional punctuation tokens whose selection can be deferred +and populated after it's known that the group is or is not broken. + +
+ +#### License + + +Licensed under either of Apache License, Version +2.0 or MIT license at your option. + + +
+ + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this crate by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. + diff --git a/vendor/prettyplease/build.rs b/vendor/prettyplease/build.rs new file mode 100644 index 00000000000000..4fc36f7468cc27 --- /dev/null +++ b/vendor/prettyplease/build.rs @@ -0,0 +1,21 @@ +use std::env; +use std::ffi::OsString; +use std::process; + +fn main() { + println!("cargo:rerun-if-changed=build.rs"); + + println!("cargo:rustc-check-cfg=cfg(exhaustive)"); + println!("cargo:rustc-check-cfg=cfg(prettyplease_debug)"); + println!("cargo:rustc-check-cfg=cfg(prettyplease_debug_indent)"); + + let pkg_version = cargo_env_var("CARGO_PKG_VERSION"); + println!("cargo:VERSION={}", pkg_version.to_str().unwrap()); +} + +fn cargo_env_var(key: &str) -> OsString { + env::var_os(key).unwrap_or_else(|| { + eprintln!("Environment variable ${key} is not set during execution of build script"); + process::exit(1); + }) +} diff --git a/vendor/prettyplease/examples/.tokeignore b/vendor/prettyplease/examples/.tokeignore new file mode 100644 index 00000000000000..6f5f3d11d3ed50 --- /dev/null +++ b/vendor/prettyplease/examples/.tokeignore @@ -0,0 +1 @@ +*.rs diff --git a/vendor/prettyplease/examples/input.rs b/vendor/prettyplease/examples/input.rs new file mode 100644 index 00000000000000..ca3d9803a82aa6 --- /dev/null +++ b/vendor/prettyplease/examples/input.rs @@ -0,0 +1 @@ +use crate :: cmp :: Ordering ; use crate :: fmt :: { self , Write as FmtWrite } ; use crate :: hash ; use crate :: io :: Write as IoWrite ; use crate :: mem :: transmute ; use crate :: sys :: net :: netc as c ; use crate :: sys_common :: { AsInner , FromInner , IntoInner } ; # [derive (Copy , Clone , Eq , PartialEq , Hash , PartialOrd , Ord)] pub enum IpAddr { V4 (Ipv4Addr) , V6 (Ipv6Addr) , } # [derive (Copy)] pub struct Ipv4Addr { inner : c :: in_addr , } # [derive (Copy)] pub struct Ipv6Addr { inner : c :: in6_addr , } # [derive (Copy , PartialEq , Eq , Clone , Hash , Debug)] # [non_exhaustive] pub enum Ipv6MulticastScope { InterfaceLocal , LinkLocal , RealmLocal , AdminLocal , SiteLocal , OrganizationLocal , Global , } impl IpAddr { pub const fn is_unspecified (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_unspecified () , IpAddr :: V6 (ip) => ip . is_unspecified () , } } pub const fn is_loopback (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_loopback () , IpAddr :: V6 (ip) => ip . is_loopback () , } } pub const fn is_global (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_global () , IpAddr :: V6 (ip) => ip . is_global () , } } pub const fn is_multicast (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_multicast () , IpAddr :: V6 (ip) => ip . is_multicast () , } } pub const fn is_documentation (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_documentation () , IpAddr :: V6 (ip) => ip . is_documentation () , } } pub const fn is_benchmarking (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_benchmarking () , IpAddr :: V6 (ip) => ip . is_benchmarking () , } } pub const fn is_ipv4 (& self) -> bool { matches ! (self , IpAddr :: V4 (_)) } pub const fn is_ipv6 (& self) -> bool { matches ! (self , IpAddr :: V6 (_)) } pub const fn to_canonical (& self) -> IpAddr { match self { & v4 @ IpAddr :: V4 (_) => v4 , IpAddr :: V6 (v6) => v6 . to_canonical () , } } } impl Ipv4Addr { pub const fn new (a : u8 , b : u8 , c : u8 , d : u8) -> Ipv4Addr { Ipv4Addr { inner : c :: in_addr { s_addr : u32 :: from_ne_bytes ([a , b , c , d]) } } } pub const LOCALHOST : Self = Ipv4Addr :: new (127 , 0 , 0 , 1) ; # [doc (alias = "INADDR_ANY")] pub const UNSPECIFIED : Self = Ipv4Addr :: new (0 , 0 , 0 , 0) ; pub const BROADCAST : Self = Ipv4Addr :: new (255 , 255 , 255 , 255) ; pub const fn octets (& self) -> [u8 ; 4] { self . inner . s_addr . to_ne_bytes () } pub const fn is_unspecified (& self) -> bool { self . inner . s_addr == 0 } pub const fn is_loopback (& self) -> bool { self . octets () [0] == 127 } pub const fn is_private (& self) -> bool { match self . octets () { [10 , ..] => true , [172 , b , ..] if b >= 16 && b <= 31 => true , [192 , 168 , ..] => true , _ => false , } } pub const fn is_link_local (& self) -> bool { matches ! (self . octets () , [169 , 254 , ..]) } pub const fn is_global (& self) -> bool { if u32 :: from_be_bytes (self . octets ()) == 0xc0000009 || u32 :: from_be_bytes (self . octets ()) == 0xc000000a { return true ; } ! self . is_private () && ! self . is_loopback () && ! self . is_link_local () && ! self . is_broadcast () && ! self . is_documentation () && ! self . is_shared () && ! (self . octets () [0] == 192 && self . octets () [1] == 0 && self . octets () [2] == 0) && ! self . is_reserved () && ! self . is_benchmarking () && self . octets () [0] != 0 } pub const fn is_shared (& self) -> bool { self . octets () [0] == 100 && (self . octets () [1] & 0b1100_0000 == 0b0100_0000) } pub const fn is_benchmarking (& self) -> bool { self . octets () [0] == 198 && (self . octets () [1] & 0xfe) == 18 } pub const fn is_reserved (& self) -> bool { self . octets () [0] & 240 == 240 && ! self . is_broadcast () } pub const fn is_multicast (& self) -> bool { self . octets () [0] >= 224 && self . octets () [0] <= 239 } pub const fn is_broadcast (& self) -> bool { u32 :: from_be_bytes (self . octets ()) == u32 :: from_be_bytes (Self :: BROADCAST . octets ()) } pub const fn is_documentation (& self) -> bool { matches ! (self . octets () , [192 , 0 , 2 , _] | [198 , 51 , 100 , _] | [203 , 0 , 113 , _]) } pub const fn to_ipv6_compatible (& self) -> Ipv6Addr { let [a , b , c , d] = self . octets () ; Ipv6Addr { inner : c :: in6_addr { s6_addr : [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , a , b , c , d] } , } } pub const fn to_ipv6_mapped (& self) -> Ipv6Addr { let [a , b , c , d] = self . octets () ; Ipv6Addr { inner : c :: in6_addr { s6_addr : [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0xFF , 0xFF , a , b , c , d] } , } } } impl fmt :: Display for IpAddr { fn fmt (& self , fmt : & mut fmt :: Formatter < '_ >) -> fmt :: Result { match self { IpAddr :: V4 (ip) => ip . fmt (fmt) , IpAddr :: V6 (ip) => ip . fmt (fmt) , } } } impl fmt :: Debug for IpAddr { fn fmt (& self , fmt : & mut fmt :: Formatter < '_ >) -> fmt :: Result { fmt :: Display :: fmt (self , fmt) } } impl From < Ipv4Addr > for IpAddr { fn from (ipv4 : Ipv4Addr) -> IpAddr { IpAddr :: V4 (ipv4) } } impl From < Ipv6Addr > for IpAddr { fn from (ipv6 : Ipv6Addr) -> IpAddr { IpAddr :: V6 (ipv6) } } impl fmt :: Display for Ipv4Addr { fn fmt (& self , fmt : & mut fmt :: Formatter < '_ >) -> fmt :: Result { let octets = self . octets () ; if fmt . precision () . is_none () && fmt . width () . is_none () { write ! (fmt , "{}.{}.{}.{}" , octets [0] , octets [1] , octets [2] , octets [3]) } else { const IPV4_BUF_LEN : usize = 15 ; let mut buf = [0u8 ; IPV4_BUF_LEN] ; let mut buf_slice = & mut buf [..] ; write ! (buf_slice , "{}.{}.{}.{}" , octets [0] , octets [1] , octets [2] , octets [3]) . unwrap () ; let len = IPV4_BUF_LEN - buf_slice . len () ; let buf = unsafe { crate :: str :: from_utf8_unchecked (& buf [.. len]) } ; fmt . pad (buf) } } } impl fmt :: Debug for Ipv4Addr { fn fmt (& self , fmt : & mut fmt :: Formatter < '_ >) -> fmt :: Result { fmt :: Display :: fmt (self , fmt) } } impl Clone for Ipv4Addr { fn clone (& self) -> Ipv4Addr { * self } } impl PartialEq for Ipv4Addr { fn eq (& self , other : & Ipv4Addr) -> bool { self . inner . s_addr == other . inner . s_addr } } impl PartialEq < Ipv4Addr > for IpAddr { fn eq (& self , other : & Ipv4Addr) -> bool { match self { IpAddr :: V4 (v4) => v4 == other , IpAddr :: V6 (_) => false , } } } impl PartialEq < IpAddr > for Ipv4Addr { fn eq (& self , other : & IpAddr) -> bool { match other { IpAddr :: V4 (v4) => self == v4 , IpAddr :: V6 (_) => false , } } } impl Eq for Ipv4Addr { } impl hash :: Hash for Ipv4Addr { fn hash < H : hash :: Hasher > (& self , s : & mut H) { { self . inner . s_addr } . hash (s) } } impl PartialOrd for Ipv4Addr { fn partial_cmp (& self , other : & Ipv4Addr) -> Option < Ordering > { Some (self . cmp (other)) } } impl PartialOrd < Ipv4Addr > for IpAddr { fn partial_cmp (& self , other : & Ipv4Addr) -> Option < Ordering > { match self { IpAddr :: V4 (v4) => v4 . partial_cmp (other) , IpAddr :: V6 (_) => Some (Ordering :: Greater) , } } } impl PartialOrd < IpAddr > for Ipv4Addr { fn partial_cmp (& self , other : & IpAddr) -> Option < Ordering > { match other { IpAddr :: V4 (v4) => self . partial_cmp (v4) , IpAddr :: V6 (_) => Some (Ordering :: Less) , } } } impl Ord for Ipv4Addr { fn cmp (& self , other : & Ipv4Addr) -> Ordering { u32 :: from_be (self . inner . s_addr) . cmp (& u32 :: from_be (other . inner . s_addr)) } } impl IntoInner < c :: in_addr > for Ipv4Addr { fn into_inner (self) -> c :: in_addr { self . inner } } impl From < Ipv4Addr > for u32 { fn from (ip : Ipv4Addr) -> u32 { let ip = ip . octets () ; u32 :: from_be_bytes (ip) } } impl From < u32 > for Ipv4Addr { fn from (ip : u32) -> Ipv4Addr { Ipv4Addr :: from (ip . to_be_bytes ()) } } impl From < [u8 ; 4] > for Ipv4Addr { fn from (octets : [u8 ; 4]) -> Ipv4Addr { Ipv4Addr :: new (octets [0] , octets [1] , octets [2] , octets [3]) } } impl From < [u8 ; 4] > for IpAddr { fn from (octets : [u8 ; 4]) -> IpAddr { IpAddr :: V4 (Ipv4Addr :: from (octets)) } } impl Ipv6Addr { pub const fn new (a : u16 , b : u16 , c : u16 , d : u16 , e : u16 , f : u16 , g : u16 , h : u16) -> Ipv6Addr { let addr16 = [a . to_be () , b . to_be () , c . to_be () , d . to_be () , e . to_be () , f . to_be () , g . to_be () , h . to_be () ,] ; Ipv6Addr { inner : c :: in6_addr { s6_addr : unsafe { transmute :: < _ , [u8 ; 16] > (addr16) } , } , } } pub const LOCALHOST : Self = Ipv6Addr :: new (0 , 0 , 0 , 0 , 0 , 0 , 0 , 1) ; pub const UNSPECIFIED : Self = Ipv6Addr :: new (0 , 0 , 0 , 0 , 0 , 0 , 0 , 0) ; pub const fn segments (& self) -> [u16 ; 8] { let [a , b , c , d , e , f , g , h] = unsafe { transmute :: < _ , [u16 ; 8] > (self . inner . s6_addr) } ; [u16 :: from_be (a) , u16 :: from_be (b) , u16 :: from_be (c) , u16 :: from_be (d) , u16 :: from_be (e) , u16 :: from_be (f) , u16 :: from_be (g) , u16 :: from_be (h) ,] } pub const fn is_unspecified (& self) -> bool { u128 :: from_be_bytes (self . octets ()) == u128 :: from_be_bytes (Ipv6Addr :: UNSPECIFIED . octets ()) } pub const fn is_loopback (& self) -> bool { u128 :: from_be_bytes (self . octets ()) == u128 :: from_be_bytes (Ipv6Addr :: LOCALHOST . octets ()) } pub const fn is_global (& self) -> bool { match self . multicast_scope () { Some (Ipv6MulticastScope :: Global) => true , None => self . is_unicast_global () , _ => false , } } pub const fn is_unique_local (& self) -> bool { (self . segments () [0] & 0xfe00) == 0xfc00 } pub const fn is_unicast (& self) -> bool { ! self . is_multicast () } pub const fn is_unicast_link_local (& self) -> bool { (self . segments () [0] & 0xffc0) == 0xfe80 } pub const fn is_documentation (& self) -> bool { (self . segments () [0] == 0x2001) && (self . segments () [1] == 0xdb8) } pub const fn is_benchmarking (& self) -> bool { (self . segments () [0] == 0x2001) && (self . segments () [1] == 0x2) && (self . segments () [2] == 0) } pub const fn is_unicast_global (& self) -> bool { self . is_unicast () && ! self . is_loopback () && ! self . is_unicast_link_local () && ! self . is_unique_local () && ! self . is_unspecified () && ! self . is_documentation () } pub const fn multicast_scope (& self) -> Option < Ipv6MulticastScope > { if self . is_multicast () { match self . segments () [0] & 0x000f { 1 => Some (Ipv6MulticastScope :: InterfaceLocal) , 2 => Some (Ipv6MulticastScope :: LinkLocal) , 3 => Some (Ipv6MulticastScope :: RealmLocal) , 4 => Some (Ipv6MulticastScope :: AdminLocal) , 5 => Some (Ipv6MulticastScope :: SiteLocal) , 8 => Some (Ipv6MulticastScope :: OrganizationLocal) , 14 => Some (Ipv6MulticastScope :: Global) , _ => None , } } else { None } } pub const fn is_multicast (& self) -> bool { (self . segments () [0] & 0xff00) == 0xff00 } pub const fn to_ipv4_mapped (& self) -> Option < Ipv4Addr > { match self . octets () { [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0xff , 0xff , a , b , c , d] => { Some (Ipv4Addr :: new (a , b , c , d)) } _ => None , } } pub const fn to_ipv4 (& self) -> Option < Ipv4Addr > { if let [0 , 0 , 0 , 0 , 0 , 0 | 0xffff , ab , cd] = self . segments () { let [a , b] = ab . to_be_bytes () ; let [c , d] = cd . to_be_bytes () ; Some (Ipv4Addr :: new (a , b , c , d)) } else { None } } pub const fn to_canonical (& self) -> IpAddr { if let Some (mapped) = self . to_ipv4_mapped () { return IpAddr :: V4 (mapped) ; } IpAddr :: V6 (* self) } pub const fn octets (& self) -> [u8 ; 16] { self . inner . s6_addr } } impl fmt :: Display for Ipv6Addr { fn fmt (& self , f : & mut fmt :: Formatter < '_ >) -> fmt :: Result { if f . precision () . is_none () && f . width () . is_none () { let segments = self . segments () ; if self . is_unspecified () { f . write_str ("::") } else if self . is_loopback () { f . write_str ("::1") } else if let Some (ipv4) = self . to_ipv4 () { match segments [5] { 0 => write ! (f , "::{}" , ipv4) , 0xffff => write ! (f , "::ffff:{}" , ipv4) , _ => unreachable ! () , } } else { # [derive (Copy , Clone , Default)] struct Span { start : usize , len : usize , } let zeroes = { let mut longest = Span :: default () ; let mut current = Span :: default () ; for (i , & segment) in segments . iter () . enumerate () { if segment == 0 { if current . len == 0 { current . start = i ; } current . len += 1 ; if current . len > longest . len { longest = current ; } } else { current = Span :: default () ; } } longest } ; # [doc = " Write a colon-separated part of the address"] # [inline] fn fmt_subslice (f : & mut fmt :: Formatter < '_ > , chunk : & [u16]) -> fmt :: Result { if let Some ((first , tail)) = chunk . split_first () { write ! (f , "{:x}" , first) ? ; for segment in tail { f . write_char (':') ? ; write ! (f , "{:x}" , segment) ? ; } } Ok (()) } if zeroes . len > 1 { fmt_subslice (f , & segments [.. zeroes . start]) ? ; f . write_str ("::") ? ; fmt_subslice (f , & segments [zeroes . start + zeroes . len ..]) } else { fmt_subslice (f , & segments) } } } else { const IPV6_BUF_LEN : usize = (4 * 8) + 7 ; let mut buf = [0u8 ; IPV6_BUF_LEN] ; let mut buf_slice = & mut buf [..] ; write ! (buf_slice , "{}" , self) . unwrap () ; let len = IPV6_BUF_LEN - buf_slice . len () ; let buf = unsafe { crate :: str :: from_utf8_unchecked (& buf [.. len]) } ; f . pad (buf) } } } impl fmt :: Debug for Ipv6Addr { fn fmt (& self , fmt : & mut fmt :: Formatter < '_ >) -> fmt :: Result { fmt :: Display :: fmt (self , fmt) } } impl Clone for Ipv6Addr { fn clone (& self) -> Ipv6Addr { * self } } impl PartialEq for Ipv6Addr { fn eq (& self , other : & Ipv6Addr) -> bool { self . inner . s6_addr == other . inner . s6_addr } } impl PartialEq < IpAddr > for Ipv6Addr { fn eq (& self , other : & IpAddr) -> bool { match other { IpAddr :: V4 (_) => false , IpAddr :: V6 (v6) => self == v6 , } } } impl PartialEq < Ipv6Addr > for IpAddr { fn eq (& self , other : & Ipv6Addr) -> bool { match self { IpAddr :: V4 (_) => false , IpAddr :: V6 (v6) => v6 == other , } } } impl Eq for Ipv6Addr { } impl hash :: Hash for Ipv6Addr { fn hash < H : hash :: Hasher > (& self , s : & mut H) { self . inner . s6_addr . hash (s) } } impl PartialOrd for Ipv6Addr { fn partial_cmp (& self , other : & Ipv6Addr) -> Option < Ordering > { Some (self . cmp (other)) } } impl PartialOrd < Ipv6Addr > for IpAddr { fn partial_cmp (& self , other : & Ipv6Addr) -> Option < Ordering > { match self { IpAddr :: V4 (_) => Some (Ordering :: Less) , IpAddr :: V6 (v6) => v6 . partial_cmp (other) , } } } impl PartialOrd < IpAddr > for Ipv6Addr { fn partial_cmp (& self , other : & IpAddr) -> Option < Ordering > { match other { IpAddr :: V4 (_) => Some (Ordering :: Greater) , IpAddr :: V6 (v6) => self . partial_cmp (v6) , } } } impl Ord for Ipv6Addr { fn cmp (& self , other : & Ipv6Addr) -> Ordering { self . segments () . cmp (& other . segments ()) } } impl AsInner < c :: in6_addr > for Ipv6Addr { fn as_inner (& self) -> & c :: in6_addr { & self . inner } } impl FromInner < c :: in6_addr > for Ipv6Addr { fn from_inner (addr : c :: in6_addr) -> Ipv6Addr { Ipv6Addr { inner : addr } } } impl From < Ipv6Addr > for u128 { fn from (ip : Ipv6Addr) -> u128 { let ip = ip . octets () ; u128 :: from_be_bytes (ip) } } impl From < u128 > for Ipv6Addr { fn from (ip : u128) -> Ipv6Addr { Ipv6Addr :: from (ip . to_be_bytes ()) } } impl From < [u8 ; 16] > for Ipv6Addr { fn from (octets : [u8 ; 16]) -> Ipv6Addr { let inner = c :: in6_addr { s6_addr : octets } ; Ipv6Addr :: from_inner (inner) } } impl From < [u16 ; 8] > for Ipv6Addr { fn from (segments : [u16 ; 8]) -> Ipv6Addr { let [a , b , c , d , e , f , g , h] = segments ; Ipv6Addr :: new (a , b , c , d , e , f , g , h) } } impl From < [u8 ; 16] > for IpAddr { fn from (octets : [u8 ; 16]) -> IpAddr { IpAddr :: V6 (Ipv6Addr :: from (octets)) } } impl From < [u16 ; 8] > for IpAddr { fn from (segments : [u16 ; 8]) -> IpAddr { IpAddr :: V6 (Ipv6Addr :: from (segments)) } } diff --git a/vendor/prettyplease/examples/output.prettyplease.rs b/vendor/prettyplease/examples/output.prettyplease.rs new file mode 100644 index 00000000000000..45b65d00f103c7 --- /dev/null +++ b/vendor/prettyplease/examples/output.prettyplease.rs @@ -0,0 +1,593 @@ +use crate::cmp::Ordering; +use crate::fmt::{self, Write as FmtWrite}; +use crate::hash; +use crate::io::Write as IoWrite; +use crate::mem::transmute; +use crate::sys::net::netc as c; +use crate::sys_common::{AsInner, FromInner, IntoInner}; +#[derive(Copy, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)] +pub enum IpAddr { + V4(Ipv4Addr), + V6(Ipv6Addr), +} +#[derive(Copy)] +pub struct Ipv4Addr { + inner: c::in_addr, +} +#[derive(Copy)] +pub struct Ipv6Addr { + inner: c::in6_addr, +} +#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)] +#[non_exhaustive] +pub enum Ipv6MulticastScope { + InterfaceLocal, + LinkLocal, + RealmLocal, + AdminLocal, + SiteLocal, + OrganizationLocal, + Global, +} +impl IpAddr { + pub const fn is_unspecified(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_unspecified(), + IpAddr::V6(ip) => ip.is_unspecified(), + } + } + pub const fn is_loopback(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_loopback(), + IpAddr::V6(ip) => ip.is_loopback(), + } + } + pub const fn is_global(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_global(), + IpAddr::V6(ip) => ip.is_global(), + } + } + pub const fn is_multicast(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_multicast(), + IpAddr::V6(ip) => ip.is_multicast(), + } + } + pub const fn is_documentation(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_documentation(), + IpAddr::V6(ip) => ip.is_documentation(), + } + } + pub const fn is_benchmarking(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_benchmarking(), + IpAddr::V6(ip) => ip.is_benchmarking(), + } + } + pub const fn is_ipv4(&self) -> bool { + matches!(self, IpAddr::V4(_)) + } + pub const fn is_ipv6(&self) -> bool { + matches!(self, IpAddr::V6(_)) + } + pub const fn to_canonical(&self) -> IpAddr { + match self { + &v4 @ IpAddr::V4(_) => v4, + IpAddr::V6(v6) => v6.to_canonical(), + } + } +} +impl Ipv4Addr { + pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr { + Ipv4Addr { + inner: c::in_addr { + s_addr: u32::from_ne_bytes([a, b, c, d]), + }, + } + } + pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1); + #[doc(alias = "INADDR_ANY")] + pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0); + pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255); + pub const fn octets(&self) -> [u8; 4] { + self.inner.s_addr.to_ne_bytes() + } + pub const fn is_unspecified(&self) -> bool { + self.inner.s_addr == 0 + } + pub const fn is_loopback(&self) -> bool { + self.octets()[0] == 127 + } + pub const fn is_private(&self) -> bool { + match self.octets() { + [10, ..] => true, + [172, b, ..] if b >= 16 && b <= 31 => true, + [192, 168, ..] => true, + _ => false, + } + } + pub const fn is_link_local(&self) -> bool { + matches!(self.octets(), [169, 254, ..]) + } + pub const fn is_global(&self) -> bool { + if u32::from_be_bytes(self.octets()) == 0xc0000009 + || u32::from_be_bytes(self.octets()) == 0xc000000a + { + return true; + } + !self.is_private() && !self.is_loopback() && !self.is_link_local() + && !self.is_broadcast() && !self.is_documentation() && !self.is_shared() + && !(self.octets()[0] == 192 && self.octets()[1] == 0 + && self.octets()[2] == 0) && !self.is_reserved() + && !self.is_benchmarking() && self.octets()[0] != 0 + } + pub const fn is_shared(&self) -> bool { + self.octets()[0] == 100 && (self.octets()[1] & 0b1100_0000 == 0b0100_0000) + } + pub const fn is_benchmarking(&self) -> bool { + self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18 + } + pub const fn is_reserved(&self) -> bool { + self.octets()[0] & 240 == 240 && !self.is_broadcast() + } + pub const fn is_multicast(&self) -> bool { + self.octets()[0] >= 224 && self.octets()[0] <= 239 + } + pub const fn is_broadcast(&self) -> bool { + u32::from_be_bytes(self.octets()) == u32::from_be_bytes(Self::BROADCAST.octets()) + } + pub const fn is_documentation(&self) -> bool { + matches!(self.octets(), [192, 0, 2, _] | [198, 51, 100, _] | [203, 0, 113, _]) + } + pub const fn to_ipv6_compatible(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { + inner: c::in6_addr { + s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, b, c, d], + }, + } + } + pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { + inner: c::in6_addr { + s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, a, b, c, d], + }, + } + } +} +impl fmt::Display for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + IpAddr::V4(ip) => ip.fmt(fmt), + IpAddr::V6(ip) => ip.fmt(fmt), + } + } +} +impl fmt::Debug for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl From for IpAddr { + fn from(ipv4: Ipv4Addr) -> IpAddr { + IpAddr::V4(ipv4) + } +} +impl From for IpAddr { + fn from(ipv6: Ipv6Addr) -> IpAddr { + IpAddr::V6(ipv6) + } +} +impl fmt::Display for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let octets = self.octets(); + if fmt.precision().is_none() && fmt.width().is_none() { + write!(fmt, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]) + } else { + const IPV4_BUF_LEN: usize = 15; + let mut buf = [0u8; IPV4_BUF_LEN]; + let mut buf_slice = &mut buf[..]; + write!(buf_slice, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]) + .unwrap(); + let len = IPV4_BUF_LEN - buf_slice.len(); + let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; + fmt.pad(buf) + } + } +} +impl fmt::Debug for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl Clone for Ipv4Addr { + fn clone(&self) -> Ipv4Addr { + *self + } +} +impl PartialEq for Ipv4Addr { + fn eq(&self, other: &Ipv4Addr) -> bool { + self.inner.s_addr == other.inner.s_addr + } +} +impl PartialEq for IpAddr { + fn eq(&self, other: &Ipv4Addr) -> bool { + match self { + IpAddr::V4(v4) => v4 == other, + IpAddr::V6(_) => false, + } + } +} +impl PartialEq for Ipv4Addr { + fn eq(&self, other: &IpAddr) -> bool { + match other { + IpAddr::V4(v4) => self == v4, + IpAddr::V6(_) => false, + } + } +} +impl Eq for Ipv4Addr {} +impl hash::Hash for Ipv4Addr { + fn hash(&self, s: &mut H) { + { self.inner.s_addr }.hash(s) + } +} +impl PartialOrd for Ipv4Addr { + fn partial_cmp(&self, other: &Ipv4Addr) -> Option { + Some(self.cmp(other)) + } +} +impl PartialOrd for IpAddr { + fn partial_cmp(&self, other: &Ipv4Addr) -> Option { + match self { + IpAddr::V4(v4) => v4.partial_cmp(other), + IpAddr::V6(_) => Some(Ordering::Greater), + } + } +} +impl PartialOrd for Ipv4Addr { + fn partial_cmp(&self, other: &IpAddr) -> Option { + match other { + IpAddr::V4(v4) => self.partial_cmp(v4), + IpAddr::V6(_) => Some(Ordering::Less), + } + } +} +impl Ord for Ipv4Addr { + fn cmp(&self, other: &Ipv4Addr) -> Ordering { + u32::from_be(self.inner.s_addr).cmp(&u32::from_be(other.inner.s_addr)) + } +} +impl IntoInner for Ipv4Addr { + fn into_inner(self) -> c::in_addr { + self.inner + } +} +impl From for u32 { + fn from(ip: Ipv4Addr) -> u32 { + let ip = ip.octets(); + u32::from_be_bytes(ip) + } +} +impl From for Ipv4Addr { + fn from(ip: u32) -> Ipv4Addr { + Ipv4Addr::from(ip.to_be_bytes()) + } +} +impl From<[u8; 4]> for Ipv4Addr { + fn from(octets: [u8; 4]) -> Ipv4Addr { + Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3]) + } +} +impl From<[u8; 4]> for IpAddr { + fn from(octets: [u8; 4]) -> IpAddr { + IpAddr::V4(Ipv4Addr::from(octets)) + } +} +impl Ipv6Addr { + pub const fn new( + a: u16, + b: u16, + c: u16, + d: u16, + e: u16, + f: u16, + g: u16, + h: u16, + ) -> Ipv6Addr { + let addr16 = [ + a.to_be(), + b.to_be(), + c.to_be(), + d.to_be(), + e.to_be(), + f.to_be(), + g.to_be(), + h.to_be(), + ]; + Ipv6Addr { + inner: c::in6_addr { + s6_addr: unsafe { transmute::<_, [u8; 16]>(addr16) }, + }, + } + } + pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0); + pub const fn segments(&self) -> [u16; 8] { + let [a, b, c, d, e, f, g, h] = unsafe { + transmute::<_, [u16; 8]>(self.inner.s6_addr) + }; + [ + u16::from_be(a), + u16::from_be(b), + u16::from_be(c), + u16::from_be(d), + u16::from_be(e), + u16::from_be(f), + u16::from_be(g), + u16::from_be(h), + ] + } + pub const fn is_unspecified(&self) -> bool { + u128::from_be_bytes(self.octets()) + == u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets()) + } + pub const fn is_loopback(&self) -> bool { + u128::from_be_bytes(self.octets()) + == u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets()) + } + pub const fn is_global(&self) -> bool { + match self.multicast_scope() { + Some(Ipv6MulticastScope::Global) => true, + None => self.is_unicast_global(), + _ => false, + } + } + pub const fn is_unique_local(&self) -> bool { + (self.segments()[0] & 0xfe00) == 0xfc00 + } + pub const fn is_unicast(&self) -> bool { + !self.is_multicast() + } + pub const fn is_unicast_link_local(&self) -> bool { + (self.segments()[0] & 0xffc0) == 0xfe80 + } + pub const fn is_documentation(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8) + } + pub const fn is_benchmarking(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) + && (self.segments()[2] == 0) + } + pub const fn is_unicast_global(&self) -> bool { + self.is_unicast() && !self.is_loopback() && !self.is_unicast_link_local() + && !self.is_unique_local() && !self.is_unspecified() + && !self.is_documentation() + } + pub const fn multicast_scope(&self) -> Option { + if self.is_multicast() { + match self.segments()[0] & 0x000f { + 1 => Some(Ipv6MulticastScope::InterfaceLocal), + 2 => Some(Ipv6MulticastScope::LinkLocal), + 3 => Some(Ipv6MulticastScope::RealmLocal), + 4 => Some(Ipv6MulticastScope::AdminLocal), + 5 => Some(Ipv6MulticastScope::SiteLocal), + 8 => Some(Ipv6MulticastScope::OrganizationLocal), + 14 => Some(Ipv6MulticastScope::Global), + _ => None, + } + } else { + None + } + } + pub const fn is_multicast(&self) -> bool { + (self.segments()[0] & 0xff00) == 0xff00 + } + pub const fn to_ipv4_mapped(&self) -> Option { + match self.octets() { + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => { + Some(Ipv4Addr::new(a, b, c, d)) + } + _ => None, + } + } + pub const fn to_ipv4(&self) -> Option { + if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() { + let [a, b] = ab.to_be_bytes(); + let [c, d] = cd.to_be_bytes(); + Some(Ipv4Addr::new(a, b, c, d)) + } else { + None + } + } + pub const fn to_canonical(&self) -> IpAddr { + if let Some(mapped) = self.to_ipv4_mapped() { + return IpAddr::V4(mapped); + } + IpAddr::V6(*self) + } + pub const fn octets(&self) -> [u8; 16] { + self.inner.s6_addr + } +} +impl fmt::Display for Ipv6Addr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if f.precision().is_none() && f.width().is_none() { + let segments = self.segments(); + if self.is_unspecified() { + f.write_str("::") + } else if self.is_loopback() { + f.write_str("::1") + } else if let Some(ipv4) = self.to_ipv4() { + match segments[5] { + 0 => write!(f, "::{}", ipv4), + 0xffff => write!(f, "::ffff:{}", ipv4), + _ => unreachable!(), + } + } else { + #[derive(Copy, Clone, Default)] + struct Span { + start: usize, + len: usize, + } + let zeroes = { + let mut longest = Span::default(); + let mut current = Span::default(); + for (i, &segment) in segments.iter().enumerate() { + if segment == 0 { + if current.len == 0 { + current.start = i; + } + current.len += 1; + if current.len > longest.len { + longest = current; + } + } else { + current = Span::default(); + } + } + longest + }; + /// Write a colon-separated part of the address + #[inline] + fn fmt_subslice( + f: &mut fmt::Formatter<'_>, + chunk: &[u16], + ) -> fmt::Result { + if let Some((first, tail)) = chunk.split_first() { + write!(f, "{:x}", first)?; + for segment in tail { + f.write_char(':')?; + write!(f, "{:x}", segment)?; + } + } + Ok(()) + } + if zeroes.len > 1 { + fmt_subslice(f, &segments[..zeroes.start])?; + f.write_str("::")?; + fmt_subslice(f, &segments[zeroes.start + zeroes.len..]) + } else { + fmt_subslice(f, &segments) + } + } + } else { + const IPV6_BUF_LEN: usize = (4 * 8) + 7; + let mut buf = [0u8; IPV6_BUF_LEN]; + let mut buf_slice = &mut buf[..]; + write!(buf_slice, "{}", self).unwrap(); + let len = IPV6_BUF_LEN - buf_slice.len(); + let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; + f.pad(buf) + } + } +} +impl fmt::Debug for Ipv6Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl Clone for Ipv6Addr { + fn clone(&self) -> Ipv6Addr { + *self + } +} +impl PartialEq for Ipv6Addr { + fn eq(&self, other: &Ipv6Addr) -> bool { + self.inner.s6_addr == other.inner.s6_addr + } +} +impl PartialEq for Ipv6Addr { + fn eq(&self, other: &IpAddr) -> bool { + match other { + IpAddr::V4(_) => false, + IpAddr::V6(v6) => self == v6, + } + } +} +impl PartialEq for IpAddr { + fn eq(&self, other: &Ipv6Addr) -> bool { + match self { + IpAddr::V4(_) => false, + IpAddr::V6(v6) => v6 == other, + } + } +} +impl Eq for Ipv6Addr {} +impl hash::Hash for Ipv6Addr { + fn hash(&self, s: &mut H) { + self.inner.s6_addr.hash(s) + } +} +impl PartialOrd for Ipv6Addr { + fn partial_cmp(&self, other: &Ipv6Addr) -> Option { + Some(self.cmp(other)) + } +} +impl PartialOrd for IpAddr { + fn partial_cmp(&self, other: &Ipv6Addr) -> Option { + match self { + IpAddr::V4(_) => Some(Ordering::Less), + IpAddr::V6(v6) => v6.partial_cmp(other), + } + } +} +impl PartialOrd for Ipv6Addr { + fn partial_cmp(&self, other: &IpAddr) -> Option { + match other { + IpAddr::V4(_) => Some(Ordering::Greater), + IpAddr::V6(v6) => self.partial_cmp(v6), + } + } +} +impl Ord for Ipv6Addr { + fn cmp(&self, other: &Ipv6Addr) -> Ordering { + self.segments().cmp(&other.segments()) + } +} +impl AsInner for Ipv6Addr { + fn as_inner(&self) -> &c::in6_addr { + &self.inner + } +} +impl FromInner for Ipv6Addr { + fn from_inner(addr: c::in6_addr) -> Ipv6Addr { + Ipv6Addr { inner: addr } + } +} +impl From for u128 { + fn from(ip: Ipv6Addr) -> u128 { + let ip = ip.octets(); + u128::from_be_bytes(ip) + } +} +impl From for Ipv6Addr { + fn from(ip: u128) -> Ipv6Addr { + Ipv6Addr::from(ip.to_be_bytes()) + } +} +impl From<[u8; 16]> for Ipv6Addr { + fn from(octets: [u8; 16]) -> Ipv6Addr { + let inner = c::in6_addr { s6_addr: octets }; + Ipv6Addr::from_inner(inner) + } +} +impl From<[u16; 8]> for Ipv6Addr { + fn from(segments: [u16; 8]) -> Ipv6Addr { + let [a, b, c, d, e, f, g, h] = segments; + Ipv6Addr::new(a, b, c, d, e, f, g, h) + } +} +impl From<[u8; 16]> for IpAddr { + fn from(octets: [u8; 16]) -> IpAddr { + IpAddr::V6(Ipv6Addr::from(octets)) + } +} +impl From<[u16; 8]> for IpAddr { + fn from(segments: [u16; 8]) -> IpAddr { + IpAddr::V6(Ipv6Addr::from(segments)) + } +} diff --git a/vendor/prettyplease/examples/output.rustc.rs b/vendor/prettyplease/examples/output.rustc.rs new file mode 100644 index 00000000000000..a77a14a8ec077c --- /dev/null +++ b/vendor/prettyplease/examples/output.rustc.rs @@ -0,0 +1,506 @@ +use crate::cmp::Ordering;use crate::fmt::{self, Write as FmtWrite}; +use crate::hash; +use crate::io::Write as IoWrite; +use crate::mem::transmute; +use crate::sys::net::netc as c; +use crate::sys_common::{AsInner, FromInner, IntoInner}; +#[derive(Copy, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)] +pub enum IpAddr { V4(Ipv4Addr), V6(Ipv6Addr), } +#[derive(Copy)] +pub struct Ipv4Addr { + inner: c::in_addr, +} +#[derive(Copy)] +pub struct Ipv6Addr { + inner: c::in6_addr, +} +#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)] +#[non_exhaustive] +pub enum Ipv6MulticastScope { + InterfaceLocal, + LinkLocal, + RealmLocal, + AdminLocal, + SiteLocal, + OrganizationLocal, + Global, +} +impl IpAddr { + pub const fn is_unspecified(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_unspecified(), + IpAddr::V6(ip) => ip.is_unspecified(), + } + } + pub const fn is_loopback(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_loopback(), + IpAddr::V6(ip) => ip.is_loopback(), + } + } + pub const fn is_global(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_global(), + IpAddr::V6(ip) => ip.is_global(), + } + } + pub const fn is_multicast(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_multicast(), + IpAddr::V6(ip) => ip.is_multicast(), + } + } + pub const fn is_documentation(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_documentation(), + IpAddr::V6(ip) => ip.is_documentation(), + } + } + pub const fn is_benchmarking(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_benchmarking(), + IpAddr::V6(ip) => ip.is_benchmarking(), + } + } + pub const fn is_ipv4(&self) -> bool { matches!(self, IpAddr :: V4(_)) } + pub const fn is_ipv6(&self) -> bool { matches!(self, IpAddr :: V6(_)) } + pub const fn to_canonical(&self) -> IpAddr { + match self { + &v4 @ IpAddr::V4(_) => v4, + IpAddr::V6(v6) => v6.to_canonical(), + } + } +} +impl Ipv4Addr { + pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr { + Ipv4Addr { + inner: c::in_addr { s_addr: u32::from_ne_bytes([a, b, c, d]) }, + } + } + pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1); + #[doc(alias = "INADDR_ANY")] + pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0); + pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255); + pub const fn octets(&self) -> [u8; 4] { self.inner.s_addr.to_ne_bytes() } + pub const fn is_unspecified(&self) -> bool { self.inner.s_addr == 0 } + pub const fn is_loopback(&self) -> bool { self.octets()[0] == 127 } + pub const fn is_private(&self) -> bool { + match self.octets() { + [10, ..] => true, + [172, b, ..] if b >= 16 && b <= 31 => true, + [192, 168, ..] => true, + _ => false, + } + } + pub const fn is_link_local(&self) -> bool { + matches!(self.octets(), [169, 254, ..]) + } + pub const fn is_global(&self) -> bool { + if u32::from_be_bytes(self.octets()) == 0xc0000009 || + u32::from_be_bytes(self.octets()) == 0xc000000a { + return true; + } + !self.is_private() && !self.is_loopback() && !self.is_link_local() && + !self.is_broadcast() && !self.is_documentation() && + !self.is_shared() && + !(self.octets()[0] == 192 && self.octets()[1] == 0 && + self.octets()[2] == 0) && !self.is_reserved() && + !self.is_benchmarking() && self.octets()[0] != 0 + } + pub const fn is_shared(&self) -> bool { + self.octets()[0] == 100 && + (self.octets()[1] & 0b1100_0000 == 0b0100_0000) + } + pub const fn is_benchmarking(&self) -> bool { + self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18 + } + pub const fn is_reserved(&self) -> bool { + self.octets()[0] & 240 == 240 && !self.is_broadcast() + } + pub const fn is_multicast(&self) -> bool { + self.octets()[0] >= 224 && self.octets()[0] <= 239 + } + pub const fn is_broadcast(&self) -> bool { + u32::from_be_bytes(self.octets()) == + u32::from_be_bytes(Self::BROADCAST.octets()) + } + pub const fn is_documentation(&self) -> bool { + matches!(self.octets(), [192, 0, 2, _] | [198, 51, 100, _] | + [203, 0, 113, _]) + } + pub const fn to_ipv6_compatible(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { + inner: c::in6_addr { + s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, b, c, d], + }, + } + } + pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { + inner: c::in6_addr { + s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, a, b, c, + d], + }, + } + } +} +impl fmt::Display for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + IpAddr::V4(ip) => ip.fmt(fmt), + IpAddr::V6(ip) => ip.fmt(fmt), + } + } +} +impl fmt::Debug for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl From for IpAddr { + fn from(ipv4: Ipv4Addr) -> IpAddr { IpAddr::V4(ipv4) } +} +impl From for IpAddr { + fn from(ipv6: Ipv6Addr) -> IpAddr { IpAddr::V6(ipv6) } +} +impl fmt::Display for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let octets = self.octets(); + if fmt.precision().is_none() && fmt.width().is_none() { + write!(fmt, "{}.{}.{}.{}", octets [0], octets [1], octets [2], + octets [3]) + } else { + const IPV4_BUF_LEN: usize = 15; + let mut buf = [0u8; IPV4_BUF_LEN]; + let mut buf_slice = &mut buf[..]; + write!(buf_slice, "{}.{}.{}.{}", octets [0], octets [1], octets + [2], octets [3]).unwrap(); + let len = IPV4_BUF_LEN - buf_slice.len(); + let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; + fmt.pad(buf) + } + } +} +impl fmt::Debug for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl Clone for Ipv4Addr { + fn clone(&self) -> Ipv4Addr { *self } +} +impl PartialEq for Ipv4Addr { + fn eq(&self, other: &Ipv4Addr) -> bool { + self.inner.s_addr == other.inner.s_addr + } +} +impl PartialEq for IpAddr { + fn eq(&self, other: &Ipv4Addr) -> bool { + match self { IpAddr::V4(v4) => v4 == other, IpAddr::V6(_) => false, } + } +} +impl PartialEq for Ipv4Addr { + fn eq(&self, other: &IpAddr) -> bool { + match other { IpAddr::V4(v4) => self == v4, IpAddr::V6(_) => false, } + } +} +impl Eq for Ipv4Addr {} +impl hash::Hash for Ipv4Addr { + fn hash(&self, s: &mut H) { + { self.inner.s_addr }.hash(s) + } +} +impl PartialOrd for Ipv4Addr { + fn partial_cmp(&self, other: &Ipv4Addr) -> Option { + Some(self.cmp(other)) + } +} +impl PartialOrd for IpAddr { + fn partial_cmp(&self, other: &Ipv4Addr) -> Option { + match self { + IpAddr::V4(v4) => v4.partial_cmp(other), + IpAddr::V6(_) => Some(Ordering::Greater), + } + } +} +impl PartialOrd for Ipv4Addr { + fn partial_cmp(&self, other: &IpAddr) -> Option { + match other { + IpAddr::V4(v4) => self.partial_cmp(v4), + IpAddr::V6(_) => Some(Ordering::Less), + } + } +} +impl Ord for Ipv4Addr { + fn cmp(&self, other: &Ipv4Addr) -> Ordering { + u32::from_be(self.inner.s_addr).cmp(&u32::from_be(other.inner.s_addr)) + } +} +impl IntoInner for Ipv4Addr { + fn into_inner(self) -> c::in_addr { self.inner } +} +impl From for u32 { + fn from(ip: Ipv4Addr) -> u32 { + let ip = ip.octets(); + u32::from_be_bytes(ip) + } +} +impl From for Ipv4Addr { + fn from(ip: u32) -> Ipv4Addr { Ipv4Addr::from(ip.to_be_bytes()) } +} +impl From<[u8; 4]> for Ipv4Addr { + fn from(octets: [u8; 4]) -> Ipv4Addr { + Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3]) + } +} +impl From<[u8; 4]> for IpAddr { + fn from(octets: [u8; 4]) -> IpAddr { IpAddr::V4(Ipv4Addr::from(octets)) } +} +impl Ipv6Addr { + pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, + h: u16) -> Ipv6Addr { + let addr16 = + [a.to_be(), b.to_be(), c.to_be(), d.to_be(), e.to_be(), f.to_be(), + g.to_be(), h.to_be()]; + Ipv6Addr { + inner: c::in6_addr { + s6_addr: unsafe { transmute::<_, [u8; 16]>(addr16) }, + }, + } + } + pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0); + pub const fn segments(&self) -> [u16; 8] { + let [a, b, c, d, e, f, g, h] = + unsafe { transmute::<_, [u16; 8]>(self.inner.s6_addr) }; + [u16::from_be(a), u16::from_be(b), u16::from_be(c), u16::from_be(d), + u16::from_be(e), u16::from_be(f), u16::from_be(g), + u16::from_be(h)] + } + pub const fn is_unspecified(&self) -> bool { + u128::from_be_bytes(self.octets()) == + u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets()) + } + pub const fn is_loopback(&self) -> bool { + u128::from_be_bytes(self.octets()) == + u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets()) + } + pub const fn is_global(&self) -> bool { + match self.multicast_scope() { + Some(Ipv6MulticastScope::Global) => true, + None => self.is_unicast_global(), + _ => false, + } + } + pub const fn is_unique_local(&self) -> bool { + (self.segments()[0] & 0xfe00) == 0xfc00 + } + pub const fn is_unicast(&self) -> bool { !self.is_multicast() } + pub const fn is_unicast_link_local(&self) -> bool { + (self.segments()[0] & 0xffc0) == 0xfe80 + } + pub const fn is_documentation(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8) + } + pub const fn is_benchmarking(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) && + (self.segments()[2] == 0) + } + pub const fn is_unicast_global(&self) -> bool { + self.is_unicast() && !self.is_loopback() && + !self.is_unicast_link_local() && !self.is_unique_local() && + !self.is_unspecified() && !self.is_documentation() + } + pub const fn multicast_scope(&self) -> Option { + if self.is_multicast() { + match self.segments()[0] & 0x000f { + 1 => Some(Ipv6MulticastScope::InterfaceLocal), + 2 => Some(Ipv6MulticastScope::LinkLocal), + 3 => Some(Ipv6MulticastScope::RealmLocal), + 4 => Some(Ipv6MulticastScope::AdminLocal), + 5 => Some(Ipv6MulticastScope::SiteLocal), + 8 => Some(Ipv6MulticastScope::OrganizationLocal), + 14 => Some(Ipv6MulticastScope::Global), + _ => None, + } + } else { None } + } + pub const fn is_multicast(&self) -> bool { + (self.segments()[0] & 0xff00) == 0xff00 + } + pub const fn to_ipv4_mapped(&self) -> Option { + match self.octets() { + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => { + Some(Ipv4Addr::new(a, b, c, d)) + } + _ => None, + } + } + pub const fn to_ipv4(&self) -> Option { + if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() { + let [a, b] = ab.to_be_bytes(); + let [c, d] = cd.to_be_bytes(); + Some(Ipv4Addr::new(a, b, c, d)) + } else { None } + } + pub const fn to_canonical(&self) -> IpAddr { + if let Some(mapped) = self.to_ipv4_mapped() { + return IpAddr::V4(mapped); + } + IpAddr::V6(*self) + } + pub const fn octets(&self) -> [u8; 16] { self.inner.s6_addr } +} +impl fmt::Display for Ipv6Addr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if f.precision().is_none() && f.width().is_none() { + let segments = self.segments(); + if self.is_unspecified() { + f.write_str("::") + } else if self.is_loopback() { + f.write_str("::1") + } else if let Some(ipv4) = self.to_ipv4() { + match segments[5] { + 0 => write!(f, "::{}", ipv4), + 0xffff => write!(f, "::ffff:{}", ipv4), + _ => unreachable!(), + } + } else { + #[derive(Copy, Clone, Default)] + struct Span { + start: usize, + len: usize, + } + let zeroes = + { + let mut longest = Span::default(); + let mut current = Span::default(); + for (i, &segment) in segments.iter().enumerate() { + if segment == 0 { + if current.len == 0 { current.start = i; } + current.len += 1; + if current.len > longest.len { longest = current; } + } else { current = Span::default(); } + } + longest + }; + #[doc = " Write a colon-separated part of the address"] + #[inline] + fn fmt_subslice(f: &mut fmt::Formatter<'_>, chunk: &[u16]) + -> fmt::Result { + if let Some((first, tail)) = chunk.split_first() { + write!(f, "{:x}", first)?; + for segment in tail { + f.write_char(':')?; + write!(f, "{:x}", segment)?; + } + } + Ok(()) + } + if zeroes.len > 1 { + fmt_subslice(f, &segments[..zeroes.start])?; + f.write_str("::")?; + fmt_subslice(f, &segments[zeroes.start + zeroes.len..]) + } else { fmt_subslice(f, &segments) } + } + } else { + const IPV6_BUF_LEN: usize = (4 * 8) + 7; + let mut buf = [0u8; IPV6_BUF_LEN]; + let mut buf_slice = &mut buf[..]; + write!(buf_slice, "{}", self).unwrap(); + let len = IPV6_BUF_LEN - buf_slice.len(); + let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; + f.pad(buf) + } + } +} +impl fmt::Debug for Ipv6Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl Clone for Ipv6Addr { + fn clone(&self) -> Ipv6Addr { *self } +} +impl PartialEq for Ipv6Addr { + fn eq(&self, other: &Ipv6Addr) -> bool { + self.inner.s6_addr == other.inner.s6_addr + } +} +impl PartialEq for Ipv6Addr { + fn eq(&self, other: &IpAddr) -> bool { + match other { IpAddr::V4(_) => false, IpAddr::V6(v6) => self == v6, } + } +} +impl PartialEq for IpAddr { + fn eq(&self, other: &Ipv6Addr) -> bool { + match self { IpAddr::V4(_) => false, IpAddr::V6(v6) => v6 == other, } + } +} +impl Eq for Ipv6Addr {} +impl hash::Hash for Ipv6Addr { + fn hash(&self, s: &mut H) { self.inner.s6_addr.hash(s) } +} +impl PartialOrd for Ipv6Addr { + fn partial_cmp(&self, other: &Ipv6Addr) -> Option { + Some(self.cmp(other)) + } +} +impl PartialOrd for IpAddr { + fn partial_cmp(&self, other: &Ipv6Addr) -> Option { + match self { + IpAddr::V4(_) => Some(Ordering::Less), + IpAddr::V6(v6) => v6.partial_cmp(other), + } + } +} +impl PartialOrd for Ipv6Addr { + fn partial_cmp(&self, other: &IpAddr) -> Option { + match other { + IpAddr::V4(_) => Some(Ordering::Greater), + IpAddr::V6(v6) => self.partial_cmp(v6), + } + } +} +impl Ord for Ipv6Addr { + fn cmp(&self, other: &Ipv6Addr) -> Ordering { + self.segments().cmp(&other.segments()) + } +} +impl AsInner for Ipv6Addr { + fn as_inner(&self) -> &c::in6_addr { &self.inner } +} +impl FromInner for Ipv6Addr { + fn from_inner(addr: c::in6_addr) -> Ipv6Addr { Ipv6Addr { inner: addr } } +} +impl From for u128 { + fn from(ip: Ipv6Addr) -> u128 { + let ip = ip.octets(); + u128::from_be_bytes(ip) + } +} +impl From for Ipv6Addr { + fn from(ip: u128) -> Ipv6Addr { Ipv6Addr::from(ip.to_be_bytes()) } +} +impl From<[u8; 16]> for Ipv6Addr { + fn from(octets: [u8; 16]) -> Ipv6Addr { + let inner = c::in6_addr { s6_addr: octets }; + Ipv6Addr::from_inner(inner) + } +} +impl From<[u16; 8]> for Ipv6Addr { + fn from(segments: [u16; 8]) -> Ipv6Addr { + let [a, b, c, d, e, f, g, h] = segments; + Ipv6Addr::new(a, b, c, d, e, f, g, h) + } +} +impl From<[u8; 16]> for IpAddr { + fn from(octets: [u8; 16]) -> IpAddr { IpAddr::V6(Ipv6Addr::from(octets)) } +} +impl From<[u16; 8]> for IpAddr { + fn from(segments: [u16; 8]) -> IpAddr { + IpAddr::V6(Ipv6Addr::from(segments)) + } +} diff --git a/vendor/prettyplease/examples/output.rustfmt.rs b/vendor/prettyplease/examples/output.rustfmt.rs new file mode 100644 index 00000000000000..3c7181d8efda6f --- /dev/null +++ b/vendor/prettyplease/examples/output.rustfmt.rs @@ -0,0 +1,552 @@ +use crate::cmp::Ordering; +use crate::fmt::{self, Write as FmtWrite}; +use crate::hash; +use crate::io::Write as IoWrite; +use crate::mem::transmute; +use crate::sys::net::netc as c; +use crate::sys_common::{AsInner, FromInner, IntoInner}; +#[derive(Copy, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)] +pub enum IpAddr { + V4(Ipv4Addr), + V6(Ipv6Addr), +} +#[derive(Copy)] +pub struct Ipv4Addr { + inner: c::in_addr, +} +#[derive(Copy)] +pub struct Ipv6Addr { + inner: c::in6_addr, +} +#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)] +#[non_exhaustive] +pub enum Ipv6MulticastScope { + InterfaceLocal, + LinkLocal, + RealmLocal, + AdminLocal, + SiteLocal, + OrganizationLocal, + Global, +} +impl IpAddr { + pub const fn is_unspecified(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_unspecified(), + IpAddr::V6(ip) => ip.is_unspecified(), + } + } + pub const fn is_loopback(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_loopback(), + IpAddr::V6(ip) => ip.is_loopback(), + } + } + pub const fn is_global(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_global(), + IpAddr::V6(ip) => ip.is_global(), + } + } + pub const fn is_multicast(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_multicast(), + IpAddr::V6(ip) => ip.is_multicast(), + } + } + pub const fn is_documentation(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_documentation(), + IpAddr::V6(ip) => ip.is_documentation(), + } + } + pub const fn is_benchmarking(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_benchmarking(), + IpAddr::V6(ip) => ip.is_benchmarking(), + } + } + pub const fn is_ipv4(&self) -> bool { + matches!(self, IpAddr::V4(_)) + } + pub const fn is_ipv6(&self) -> bool { + matches!(self, IpAddr::V6(_)) + } + pub const fn to_canonical(&self) -> IpAddr { + match self { + &v4 @ IpAddr::V4(_) => v4, + IpAddr::V6(v6) => v6.to_canonical(), + } + } +} +impl Ipv4Addr { + pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr { + Ipv4Addr { + inner: c::in_addr { + s_addr: u32::from_ne_bytes([a, b, c, d]), + }, + } + } + pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1); + #[doc(alias = "INADDR_ANY")] + pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0); + pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255); + pub const fn octets(&self) -> [u8; 4] { + self.inner.s_addr.to_ne_bytes() + } + pub const fn is_unspecified(&self) -> bool { + self.inner.s_addr == 0 + } + pub const fn is_loopback(&self) -> bool { + self.octets()[0] == 127 + } + pub const fn is_private(&self) -> bool { + match self.octets() { + [10, ..] => true, + [172, b, ..] if b >= 16 && b <= 31 => true, + [192, 168, ..] => true, + _ => false, + } + } + pub const fn is_link_local(&self) -> bool { + matches!(self.octets(), [169, 254, ..]) + } + pub const fn is_global(&self) -> bool { + if u32::from_be_bytes(self.octets()) == 0xc0000009 + || u32::from_be_bytes(self.octets()) == 0xc000000a + { + return true; + } + !self.is_private() + && !self.is_loopback() + && !self.is_link_local() + && !self.is_broadcast() + && !self.is_documentation() + && !self.is_shared() + && !(self.octets()[0] == 192 && self.octets()[1] == 0 && self.octets()[2] == 0) + && !self.is_reserved() + && !self.is_benchmarking() + && self.octets()[0] != 0 + } + pub const fn is_shared(&self) -> bool { + self.octets()[0] == 100 && (self.octets()[1] & 0b1100_0000 == 0b0100_0000) + } + pub const fn is_benchmarking(&self) -> bool { + self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18 + } + pub const fn is_reserved(&self) -> bool { + self.octets()[0] & 240 == 240 && !self.is_broadcast() + } + pub const fn is_multicast(&self) -> bool { + self.octets()[0] >= 224 && self.octets()[0] <= 239 + } + pub const fn is_broadcast(&self) -> bool { + u32::from_be_bytes(self.octets()) == u32::from_be_bytes(Self::BROADCAST.octets()) + } + pub const fn is_documentation(&self) -> bool { + matches!( + self.octets(), + [192, 0, 2, _] | [198, 51, 100, _] | [203, 0, 113, _] + ) + } + pub const fn to_ipv6_compatible(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { + inner: c::in6_addr { + s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, b, c, d], + }, + } + } + pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { + inner: c::in6_addr { + s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, a, b, c, d], + }, + } + } +} +impl fmt::Display for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + IpAddr::V4(ip) => ip.fmt(fmt), + IpAddr::V6(ip) => ip.fmt(fmt), + } + } +} +impl fmt::Debug for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl From for IpAddr { + fn from(ipv4: Ipv4Addr) -> IpAddr { + IpAddr::V4(ipv4) + } +} +impl From for IpAddr { + fn from(ipv6: Ipv6Addr) -> IpAddr { + IpAddr::V6(ipv6) + } +} +impl fmt::Display for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let octets = self.octets(); + if fmt.precision().is_none() && fmt.width().is_none() { + write!( + fmt, + "{}.{}.{}.{}", + octets[0], octets[1], octets[2], octets[3] + ) + } else { + const IPV4_BUF_LEN: usize = 15; + let mut buf = [0u8; IPV4_BUF_LEN]; + let mut buf_slice = &mut buf[..]; + write!( + buf_slice, + "{}.{}.{}.{}", + octets[0], octets[1], octets[2], octets[3] + ) + .unwrap(); + let len = IPV4_BUF_LEN - buf_slice.len(); + let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; + fmt.pad(buf) + } + } +} +impl fmt::Debug for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl Clone for Ipv4Addr { + fn clone(&self) -> Ipv4Addr { + *self + } +} +impl PartialEq for Ipv4Addr { + fn eq(&self, other: &Ipv4Addr) -> bool { + self.inner.s_addr == other.inner.s_addr + } +} +impl PartialEq for IpAddr { + fn eq(&self, other: &Ipv4Addr) -> bool { + match self { + IpAddr::V4(v4) => v4 == other, + IpAddr::V6(_) => false, + } + } +} +impl PartialEq for Ipv4Addr { + fn eq(&self, other: &IpAddr) -> bool { + match other { + IpAddr::V4(v4) => self == v4, + IpAddr::V6(_) => false, + } + } +} +impl Eq for Ipv4Addr {} +impl hash::Hash for Ipv4Addr { + fn hash(&self, s: &mut H) { + { self.inner.s_addr }.hash(s) + } +} +impl PartialOrd for Ipv4Addr { + fn partial_cmp(&self, other: &Ipv4Addr) -> Option { + Some(self.cmp(other)) + } +} +impl PartialOrd for IpAddr { + fn partial_cmp(&self, other: &Ipv4Addr) -> Option { + match self { + IpAddr::V4(v4) => v4.partial_cmp(other), + IpAddr::V6(_) => Some(Ordering::Greater), + } + } +} +impl PartialOrd for Ipv4Addr { + fn partial_cmp(&self, other: &IpAddr) -> Option { + match other { + IpAddr::V4(v4) => self.partial_cmp(v4), + IpAddr::V6(_) => Some(Ordering::Less), + } + } +} +impl Ord for Ipv4Addr { + fn cmp(&self, other: &Ipv4Addr) -> Ordering { + u32::from_be(self.inner.s_addr).cmp(&u32::from_be(other.inner.s_addr)) + } +} +impl IntoInner for Ipv4Addr { + fn into_inner(self) -> c::in_addr { + self.inner + } +} +impl From for u32 { + fn from(ip: Ipv4Addr) -> u32 { + let ip = ip.octets(); + u32::from_be_bytes(ip) + } +} +impl From for Ipv4Addr { + fn from(ip: u32) -> Ipv4Addr { + Ipv4Addr::from(ip.to_be_bytes()) + } +} +impl From<[u8; 4]> for Ipv4Addr { + fn from(octets: [u8; 4]) -> Ipv4Addr { + Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3]) + } +} +impl From<[u8; 4]> for IpAddr { + fn from(octets: [u8; 4]) -> IpAddr { + IpAddr::V4(Ipv4Addr::from(octets)) + } +} +impl Ipv6Addr { + pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Ipv6Addr { + let addr16 = [ + a.to_be(), + b.to_be(), + c.to_be(), + d.to_be(), + e.to_be(), + f.to_be(), + g.to_be(), + h.to_be(), + ]; + Ipv6Addr { + inner: c::in6_addr { + s6_addr: unsafe { transmute::<_, [u8; 16]>(addr16) }, + }, + } + } + pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0); + pub const fn segments(&self) -> [u16; 8] { + let [a, b, c, d, e, f, g, h] = unsafe { transmute::<_, [u16; 8]>(self.inner.s6_addr) }; + [ + u16::from_be(a), + u16::from_be(b), + u16::from_be(c), + u16::from_be(d), + u16::from_be(e), + u16::from_be(f), + u16::from_be(g), + u16::from_be(h), + ] + } + pub const fn is_unspecified(&self) -> bool { + u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets()) + } + pub const fn is_loopback(&self) -> bool { + u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets()) + } + pub const fn is_global(&self) -> bool { + match self.multicast_scope() { + Some(Ipv6MulticastScope::Global) => true, + None => self.is_unicast_global(), + _ => false, + } + } + pub const fn is_unique_local(&self) -> bool { + (self.segments()[0] & 0xfe00) == 0xfc00 + } + pub const fn is_unicast(&self) -> bool { + !self.is_multicast() + } + pub const fn is_unicast_link_local(&self) -> bool { + (self.segments()[0] & 0xffc0) == 0xfe80 + } + pub const fn is_documentation(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8) + } + pub const fn is_benchmarking(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) && (self.segments()[2] == 0) + } + pub const fn is_unicast_global(&self) -> bool { + self.is_unicast() + && !self.is_loopback() + && !self.is_unicast_link_local() + && !self.is_unique_local() + && !self.is_unspecified() + && !self.is_documentation() + } + pub const fn multicast_scope(&self) -> Option { + if self.is_multicast() { + match self.segments()[0] & 0x000f { + 1 => Some(Ipv6MulticastScope::InterfaceLocal), + 2 => Some(Ipv6MulticastScope::LinkLocal), + 3 => Some(Ipv6MulticastScope::RealmLocal), + 4 => Some(Ipv6MulticastScope::AdminLocal), + 5 => Some(Ipv6MulticastScope::SiteLocal), + 8 => Some(Ipv6MulticastScope::OrganizationLocal), + 14 => Some(Ipv6MulticastScope::Global), + _ => None, + } + } else { + None + } + } + pub const fn is_multicast(&self) -> bool { + (self.segments()[0] & 0xff00) == 0xff00 + } + pub const fn to_ipv4_mapped(&self) -> Option { + match self.octets() { + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => { + Some(Ipv4Addr::new(a, b, c, d)) + } + _ => None, + } + } + pub const fn to_ipv4(&self) -> Option { + if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() { + let [a, b] = ab.to_be_bytes(); + let [c, d] = cd.to_be_bytes(); + Some(Ipv4Addr::new(a, b, c, d)) + } else { + None + } + } + pub const fn to_canonical(&self) -> IpAddr { + if let Some(mapped) = self.to_ipv4_mapped() { + return IpAddr::V4(mapped); + } + IpAddr::V6(*self) + } + pub const fn octets(&self) -> [u8; 16] { + self.inner.s6_addr + } +} +impl fmt::Display for Ipv6Addr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if f.precision().is_none() && f.width().is_none() { + let segments = self.segments(); + if self.is_unspecified() { + f.write_str("::") + } else if self.is_loopback() { + f.write_str("::1") + } else if let Some(ipv4) = self.to_ipv4() { + match segments[5] { + 0 => write!(f, "::{}", ipv4), + 0xffff => write!(f, "::ffff:{}", ipv4), + _ => unreachable!(), + } + } else { # [derive (Copy , Clone , Default)] struct Span { start : usize , len : usize , } let zeroes = { let mut longest = Span :: default () ; let mut current = Span :: default () ; for (i , & segment) in segments . iter () . enumerate () { if segment == 0 { if current . len == 0 { current . start = i ; } current . len += 1 ; if current . len > longest . len { longest = current ; } } else { current = Span :: default () ; } } longest } ; # [doc = " Write a colon-separated part of the address"] # [inline] fn fmt_subslice (f : & mut fmt :: Formatter < '_ > , chunk : & [u16]) -> fmt :: Result { if let Some ((first , tail)) = chunk . split_first () { write ! (f , "{:x}" , first) ? ; for segment in tail { f . write_char (':') ? ; write ! (f , "{:x}" , segment) ? ; } } Ok (()) } if zeroes . len > 1 { fmt_subslice (f , & segments [.. zeroes . start]) ? ; f . write_str ("::") ? ; fmt_subslice (f , & segments [zeroes . start + zeroes . len ..]) } else { fmt_subslice (f , & segments) } } + } else { + const IPV6_BUF_LEN: usize = (4 * 8) + 7; + let mut buf = [0u8; IPV6_BUF_LEN]; + let mut buf_slice = &mut buf[..]; + write!(buf_slice, "{}", self).unwrap(); + let len = IPV6_BUF_LEN - buf_slice.len(); + let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; + f.pad(buf) + } + } +} +impl fmt::Debug for Ipv6Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} +impl Clone for Ipv6Addr { + fn clone(&self) -> Ipv6Addr { + *self + } +} +impl PartialEq for Ipv6Addr { + fn eq(&self, other: &Ipv6Addr) -> bool { + self.inner.s6_addr == other.inner.s6_addr + } +} +impl PartialEq for Ipv6Addr { + fn eq(&self, other: &IpAddr) -> bool { + match other { + IpAddr::V4(_) => false, + IpAddr::V6(v6) => self == v6, + } + } +} +impl PartialEq for IpAddr { + fn eq(&self, other: &Ipv6Addr) -> bool { + match self { + IpAddr::V4(_) => false, + IpAddr::V6(v6) => v6 == other, + } + } +} +impl Eq for Ipv6Addr {} +impl hash::Hash for Ipv6Addr { + fn hash(&self, s: &mut H) { + self.inner.s6_addr.hash(s) + } +} +impl PartialOrd for Ipv6Addr { + fn partial_cmp(&self, other: &Ipv6Addr) -> Option { + Some(self.cmp(other)) + } +} +impl PartialOrd for IpAddr { + fn partial_cmp(&self, other: &Ipv6Addr) -> Option { + match self { + IpAddr::V4(_) => Some(Ordering::Less), + IpAddr::V6(v6) => v6.partial_cmp(other), + } + } +} +impl PartialOrd for Ipv6Addr { + fn partial_cmp(&self, other: &IpAddr) -> Option { + match other { + IpAddr::V4(_) => Some(Ordering::Greater), + IpAddr::V6(v6) => self.partial_cmp(v6), + } + } +} +impl Ord for Ipv6Addr { + fn cmp(&self, other: &Ipv6Addr) -> Ordering { + self.segments().cmp(&other.segments()) + } +} +impl AsInner for Ipv6Addr { + fn as_inner(&self) -> &c::in6_addr { + &self.inner + } +} +impl FromInner for Ipv6Addr { + fn from_inner(addr: c::in6_addr) -> Ipv6Addr { + Ipv6Addr { inner: addr } + } +} +impl From for u128 { + fn from(ip: Ipv6Addr) -> u128 { + let ip = ip.octets(); + u128::from_be_bytes(ip) + } +} +impl From for Ipv6Addr { + fn from(ip: u128) -> Ipv6Addr { + Ipv6Addr::from(ip.to_be_bytes()) + } +} +impl From<[u8; 16]> for Ipv6Addr { + fn from(octets: [u8; 16]) -> Ipv6Addr { + let inner = c::in6_addr { s6_addr: octets }; + Ipv6Addr::from_inner(inner) + } +} +impl From<[u16; 8]> for Ipv6Addr { + fn from(segments: [u16; 8]) -> Ipv6Addr { + let [a, b, c, d, e, f, g, h] = segments; + Ipv6Addr::new(a, b, c, d, e, f, g, h) + } +} +impl From<[u8; 16]> for IpAddr { + fn from(octets: [u8; 16]) -> IpAddr { + IpAddr::V6(Ipv6Addr::from(octets)) + } +} +impl From<[u16; 8]> for IpAddr { + fn from(segments: [u16; 8]) -> IpAddr { + IpAddr::V6(Ipv6Addr::from(segments)) + } +} diff --git a/vendor/prettyplease/src/algorithm.rs b/vendor/prettyplease/src/algorithm.rs new file mode 100644 index 00000000000000..482b3ad7d1e21c --- /dev/null +++ b/vendor/prettyplease/src/algorithm.rs @@ -0,0 +1,386 @@ +// Adapted from https://github.com/rust-lang/rust/blob/1.57.0/compiler/rustc_ast_pretty/src/pp.rs. +// See "Algorithm notes" in the crate-level rustdoc. + +use crate::ring::RingBuffer; +use crate::{MARGIN, MIN_SPACE}; +use std::borrow::Cow; +use std::cmp; +use std::collections::VecDeque; +use std::iter; + +#[derive(Clone, Copy, PartialEq)] +pub enum Breaks { + Consistent, + Inconsistent, +} + +#[derive(Clone, Copy, Default)] +pub struct BreakToken { + pub offset: isize, + pub blank_space: usize, + pub pre_break: Option, + pub post_break: &'static str, + pub no_break: Option, + pub if_nonempty: bool, + pub never_break: bool, +} + +#[derive(Clone, Copy)] +pub struct BeginToken { + pub offset: isize, + pub breaks: Breaks, +} + +#[derive(Clone)] +pub enum Token { + String(Cow<'static, str>), + Break(BreakToken), + Begin(BeginToken), + End, +} + +#[derive(Copy, Clone)] +enum PrintFrame { + Fits(Breaks), + Broken(usize, Breaks), +} + +pub const SIZE_INFINITY: isize = 0xffff; + +pub struct Printer { + out: String, + // Number of spaces left on line + space: isize, + // Ring-buffer of tokens and calculated sizes + buf: RingBuffer, + // Total size of tokens already printed + left_total: isize, + // Total size of tokens enqueued, including printed and not yet printed + right_total: isize, + // Holds the ring-buffer index of the Begin that started the current block, + // possibly with the most recent Break after that Begin (if there is any) on + // top of it. Values are pushed and popped on the back of the queue using it + // like stack, and elsewhere old values are popped from the front of the + // queue as they become irrelevant due to the primary ring-buffer advancing. + scan_stack: VecDeque, + // Stack of blocks-in-progress being flushed by print + print_stack: Vec, + // Level of indentation of current line + indent: usize, + // Buffered indentation to avoid writing trailing whitespace + pending_indentation: usize, +} + +#[derive(Clone)] +struct BufEntry { + token: Token, + size: isize, +} + +impl Printer { + pub fn new() -> Self { + Printer { + out: String::new(), + space: MARGIN, + buf: RingBuffer::new(), + left_total: 0, + right_total: 0, + scan_stack: VecDeque::new(), + print_stack: Vec::new(), + indent: 0, + pending_indentation: 0, + } + } + + pub fn eof(mut self) -> String { + if !self.scan_stack.is_empty() { + self.check_stack(0); + self.advance_left(); + } + self.out + } + + pub fn scan_begin(&mut self, token: BeginToken) { + if self.scan_stack.is_empty() { + self.left_total = 1; + self.right_total = 1; + self.buf.clear(); + } + let right = self.buf.push(BufEntry { + token: Token::Begin(token), + size: -self.right_total, + }); + self.scan_stack.push_back(right); + } + + pub fn scan_end(&mut self) { + if self.scan_stack.is_empty() { + self.print_end(); + } else { + if !self.buf.is_empty() { + if let Token::Break(break_token) = self.buf.last().token { + if self.buf.len() >= 2 { + if let Token::Begin(_) = self.buf.second_last().token { + self.buf.pop_last(); + self.buf.pop_last(); + self.scan_stack.pop_back(); + self.scan_stack.pop_back(); + self.right_total -= break_token.blank_space as isize; + return; + } + } + if break_token.if_nonempty { + self.buf.pop_last(); + self.scan_stack.pop_back(); + self.right_total -= break_token.blank_space as isize; + } + } + } + let right = self.buf.push(BufEntry { + token: Token::End, + size: -1, + }); + self.scan_stack.push_back(right); + } + } + + pub fn scan_break(&mut self, token: BreakToken) { + if self.scan_stack.is_empty() { + self.left_total = 1; + self.right_total = 1; + self.buf.clear(); + } else { + self.check_stack(0); + } + let right = self.buf.push(BufEntry { + token: Token::Break(token), + size: -self.right_total, + }); + self.scan_stack.push_back(right); + self.right_total += token.blank_space as isize; + } + + pub fn scan_string(&mut self, string: Cow<'static, str>) { + if self.scan_stack.is_empty() { + self.print_string(string); + } else { + let len = string.len() as isize; + self.buf.push(BufEntry { + token: Token::String(string), + size: len, + }); + self.right_total += len; + self.check_stream(); + } + } + + #[track_caller] + pub fn offset(&mut self, offset: isize) { + match &mut self.buf.last_mut().token { + Token::Break(token) => token.offset += offset, + Token::Begin(_) => {} + Token::String(_) | Token::End => unreachable!(), + } + } + + pub fn end_with_max_width(&mut self, max: isize) { + let mut depth = 1; + for &index in self.scan_stack.iter().rev() { + let entry = &self.buf[index]; + match entry.token { + Token::Begin(_) => { + depth -= 1; + if depth == 0 { + if entry.size < 0 { + let actual_width = entry.size + self.right_total; + if actual_width > max { + self.buf.push(BufEntry { + token: Token::String(Cow::Borrowed("")), + size: SIZE_INFINITY, + }); + self.right_total += SIZE_INFINITY; + } + } + break; + } + } + Token::End => depth += 1, + Token::Break(_) => {} + Token::String(_) => unreachable!(), + } + } + self.scan_end(); + } + + pub fn ends_with(&self, ch: char) -> bool { + for i in self.buf.index_range().rev() { + if let Token::String(token) = &self.buf[i].token { + return token.ends_with(ch); + } + } + self.out.ends_with(ch) + } + + fn check_stream(&mut self) { + while self.right_total - self.left_total > self.space { + if *self.scan_stack.front().unwrap() == self.buf.index_range().start { + self.scan_stack.pop_front().unwrap(); + self.buf.first_mut().size = SIZE_INFINITY; + } + + self.advance_left(); + + if self.buf.is_empty() { + break; + } + } + } + + fn advance_left(&mut self) { + while self.buf.first().size >= 0 { + let left = self.buf.pop_first(); + + match left.token { + Token::String(string) => { + self.left_total += left.size; + self.print_string(string); + } + Token::Break(token) => { + self.left_total += token.blank_space as isize; + self.print_break(token, left.size); + } + Token::Begin(token) => self.print_begin(token, left.size), + Token::End => self.print_end(), + } + + if self.buf.is_empty() { + break; + } + } + } + + fn check_stack(&mut self, mut depth: usize) { + while let Some(&index) = self.scan_stack.back() { + let entry = &mut self.buf[index]; + match entry.token { + Token::Begin(_) => { + if depth == 0 { + break; + } + self.scan_stack.pop_back().unwrap(); + entry.size += self.right_total; + depth -= 1; + } + Token::End => { + self.scan_stack.pop_back().unwrap(); + entry.size = 1; + depth += 1; + } + Token::Break(_) => { + self.scan_stack.pop_back().unwrap(); + entry.size += self.right_total; + if depth == 0 { + break; + } + } + Token::String(_) => unreachable!(), + } + } + } + + fn get_top(&self) -> PrintFrame { + const OUTER: PrintFrame = PrintFrame::Broken(0, Breaks::Inconsistent); + self.print_stack.last().map_or(OUTER, PrintFrame::clone) + } + + fn print_begin(&mut self, token: BeginToken, size: isize) { + if cfg!(prettyplease_debug) { + self.out.push(match token.breaks { + Breaks::Consistent => '«', + Breaks::Inconsistent => '‹', + }); + if cfg!(prettyplease_debug_indent) { + self.out + .extend(token.offset.to_string().chars().map(|ch| match ch { + '0'..='9' => ['₀', '₁', '₂', '₃', '₄', '₅', '₆', '₇', '₈', '₉'] + [(ch as u8 - b'0') as usize], + '-' => '₋', + _ => unreachable!(), + })); + } + } + if size > self.space { + self.print_stack + .push(PrintFrame::Broken(self.indent, token.breaks)); + self.indent = usize::try_from(self.indent as isize + token.offset).unwrap(); + } else { + self.print_stack.push(PrintFrame::Fits(token.breaks)); + } + } + + fn print_end(&mut self) { + let breaks = match self.print_stack.pop().unwrap() { + PrintFrame::Broken(indent, breaks) => { + self.indent = indent; + breaks + } + PrintFrame::Fits(breaks) => breaks, + }; + if cfg!(prettyplease_debug) { + self.out.push(match breaks { + Breaks::Consistent => '»', + Breaks::Inconsistent => '›', + }); + } + } + + fn print_break(&mut self, token: BreakToken, size: isize) { + let fits = token.never_break + || match self.get_top() { + PrintFrame::Fits(..) => true, + PrintFrame::Broken(.., Breaks::Consistent) => false, + PrintFrame::Broken(.., Breaks::Inconsistent) => size <= self.space, + }; + if fits { + self.pending_indentation += token.blank_space; + self.space -= token.blank_space as isize; + if let Some(no_break) = token.no_break { + self.out.push(no_break); + self.space -= no_break.len_utf8() as isize; + } + if cfg!(prettyplease_debug) { + self.out.push('·'); + } + } else { + if let Some(pre_break) = token.pre_break { + self.print_indent(); + self.out.push(pre_break); + } + if cfg!(prettyplease_debug) { + self.out.push('·'); + } + self.out.push('\n'); + let indent = self.indent as isize + token.offset; + self.pending_indentation = usize::try_from(indent).unwrap(); + self.space = cmp::max(MARGIN - indent, MIN_SPACE); + if !token.post_break.is_empty() { + self.print_indent(); + self.out.push_str(token.post_break); + self.space -= token.post_break.len() as isize; + } + } + } + + fn print_string(&mut self, string: Cow<'static, str>) { + self.print_indent(); + self.out.push_str(&string); + self.space -= string.len() as isize; + } + + fn print_indent(&mut self) { + self.out.reserve(self.pending_indentation); + self.out + .extend(iter::repeat(' ').take(self.pending_indentation)); + self.pending_indentation = 0; + } +} diff --git a/vendor/prettyplease/src/attr.rs b/vendor/prettyplease/src/attr.rs new file mode 100644 index 00000000000000..b436283f3c6969 --- /dev/null +++ b/vendor/prettyplease/src/attr.rs @@ -0,0 +1,288 @@ +use crate::algorithm::Printer; +use crate::fixup::FixupContext; +use crate::path::PathKind; +use crate::INDENT; +use proc_macro2::{Delimiter, Group, TokenStream, TokenTree}; +use syn::{AttrStyle, Attribute, Expr, Lit, MacroDelimiter, Meta, MetaList, MetaNameValue}; + +impl Printer { + pub fn outer_attrs(&mut self, attrs: &[Attribute]) { + for attr in attrs { + if let AttrStyle::Outer = attr.style { + self.attr(attr); + } + } + } + + pub fn inner_attrs(&mut self, attrs: &[Attribute]) { + for attr in attrs { + if let AttrStyle::Inner(_) = attr.style { + self.attr(attr); + } + } + } + + fn attr(&mut self, attr: &Attribute) { + if let Some(mut doc) = value_of_attribute("doc", attr) { + if !doc.contains('\n') + && match attr.style { + AttrStyle::Outer => !doc.starts_with('/'), + AttrStyle::Inner(_) => true, + } + { + trim_trailing_spaces(&mut doc); + self.word(match attr.style { + AttrStyle::Outer => "///", + AttrStyle::Inner(_) => "//!", + }); + self.word(doc); + self.hardbreak(); + return; + } else if can_be_block_comment(&doc) + && match attr.style { + AttrStyle::Outer => !doc.starts_with(&['*', '/'][..]), + AttrStyle::Inner(_) => true, + } + { + trim_interior_trailing_spaces(&mut doc); + self.word(match attr.style { + AttrStyle::Outer => "/**", + AttrStyle::Inner(_) => "/*!", + }); + self.word(doc); + self.word("*/"); + self.hardbreak(); + return; + } + } else if let Some(mut comment) = value_of_attribute("comment", attr) { + if !comment.contains('\n') { + trim_trailing_spaces(&mut comment); + self.word("//"); + self.word(comment); + self.hardbreak(); + return; + } else if can_be_block_comment(&comment) && !comment.starts_with(&['*', '!'][..]) { + trim_interior_trailing_spaces(&mut comment); + self.word("/*"); + self.word(comment); + self.word("*/"); + self.hardbreak(); + return; + } + } + + self.word(match attr.style { + AttrStyle::Outer => "#", + AttrStyle::Inner(_) => "#!", + }); + self.word("["); + self.meta(&attr.meta); + self.word("]"); + self.space(); + } + + fn meta(&mut self, meta: &Meta) { + match meta { + Meta::Path(path) => self.path(path, PathKind::Simple), + Meta::List(meta) => self.meta_list(meta), + Meta::NameValue(meta) => self.meta_name_value(meta), + } + } + + fn meta_list(&mut self, meta: &MetaList) { + self.path(&meta.path, PathKind::Simple); + let delimiter = match meta.delimiter { + MacroDelimiter::Paren(_) => Delimiter::Parenthesis, + MacroDelimiter::Brace(_) => Delimiter::Brace, + MacroDelimiter::Bracket(_) => Delimiter::Bracket, + }; + let group = Group::new(delimiter, meta.tokens.clone()); + self.attr_tokens(TokenStream::from(TokenTree::Group(group))); + } + + fn meta_name_value(&mut self, meta: &MetaNameValue) { + self.path(&meta.path, PathKind::Simple); + self.word(" = "); + self.expr(&meta.value, FixupContext::NONE); + } + + fn attr_tokens(&mut self, tokens: TokenStream) { + let mut stack = Vec::new(); + stack.push((tokens.into_iter().peekable(), Delimiter::None)); + let mut space = Self::nbsp as fn(&mut Self); + + #[derive(PartialEq)] + enum State { + Word, + Punct, + TrailingComma, + } + + use State::*; + let mut state = Word; + + while let Some((tokens, delimiter)) = stack.last_mut() { + match tokens.next() { + Some(TokenTree::Ident(ident)) => { + if let Word = state { + space(self); + } + self.ident(&ident); + state = Word; + } + Some(TokenTree::Punct(punct)) => { + let ch = punct.as_char(); + if let (Word, '=') = (state, ch) { + self.nbsp(); + } + if ch == ',' && tokens.peek().is_none() { + self.trailing_comma(true); + state = TrailingComma; + } else { + self.token_punct(ch); + if ch == '=' { + self.nbsp(); + } else if ch == ',' { + space(self); + } + state = Punct; + } + } + Some(TokenTree::Literal(literal)) => { + if let Word = state { + space(self); + } + self.token_literal(&literal); + state = Word; + } + Some(TokenTree::Group(group)) => { + let delimiter = group.delimiter(); + let stream = group.stream(); + match delimiter { + Delimiter::Parenthesis => { + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + state = Punct; + } + Delimiter::Brace => { + self.word("{"); + state = Punct; + } + Delimiter::Bracket => { + self.word("["); + state = Punct; + } + Delimiter::None => {} + } + stack.push((stream.into_iter().peekable(), delimiter)); + space = Self::space; + } + None => { + match delimiter { + Delimiter::Parenthesis => { + if state != TrailingComma { + self.zerobreak(); + } + self.offset(-INDENT); + self.end(); + self.word(")"); + state = Punct; + } + Delimiter::Brace => { + self.word("}"); + state = Punct; + } + Delimiter::Bracket => { + self.word("]"); + state = Punct; + } + Delimiter::None => {} + } + stack.pop(); + if stack.is_empty() { + space = Self::nbsp; + } + } + } + } + } +} + +fn value_of_attribute(requested: &str, attr: &Attribute) -> Option { + let value = match &attr.meta { + Meta::NameValue(meta) if meta.path.is_ident(requested) => &meta.value, + _ => return None, + }; + let lit = match value { + Expr::Lit(expr) if expr.attrs.is_empty() => &expr.lit, + _ => return None, + }; + match lit { + Lit::Str(string) => Some(string.value()), + _ => None, + } +} + +pub fn has_outer(attrs: &[Attribute]) -> bool { + for attr in attrs { + if let AttrStyle::Outer = attr.style { + return true; + } + } + false +} + +pub fn has_inner(attrs: &[Attribute]) -> bool { + for attr in attrs { + if let AttrStyle::Inner(_) = attr.style { + return true; + } + } + false +} + +fn trim_trailing_spaces(doc: &mut String) { + doc.truncate(doc.trim_end_matches(' ').len()); +} + +fn trim_interior_trailing_spaces(doc: &mut String) { + if !doc.contains(" \n") { + return; + } + let mut trimmed = String::with_capacity(doc.len()); + let mut lines = doc.split('\n').peekable(); + while let Some(line) = lines.next() { + if lines.peek().is_some() { + trimmed.push_str(line.trim_end_matches(' ')); + trimmed.push('\n'); + } else { + trimmed.push_str(line); + } + } + *doc = trimmed; +} + +fn can_be_block_comment(value: &str) -> bool { + let mut depth = 0usize; + let bytes = value.as_bytes(); + let mut i = 0usize; + let upper = bytes.len() - 1; + + while i < upper { + if bytes[i] == b'/' && bytes[i + 1] == b'*' { + depth += 1; + i += 2; + } else if bytes[i] == b'*' && bytes[i + 1] == b'/' { + if depth == 0 { + return false; + } + depth -= 1; + i += 2; + } else { + i += 1; + } + } + + depth == 0 && !value.ends_with('/') +} diff --git a/vendor/prettyplease/src/classify.rs b/vendor/prettyplease/src/classify.rs new file mode 100644 index 00000000000000..17648f6c8b7af8 --- /dev/null +++ b/vendor/prettyplease/src/classify.rs @@ -0,0 +1,324 @@ +use proc_macro2::{Delimiter, TokenStream, TokenTree}; +use std::ops::ControlFlow; +use syn::punctuated::Punctuated; +use syn::{Expr, MacroDelimiter, Path, PathArguments, ReturnType, Token, Type, TypeParamBound}; + +pub(crate) fn requires_semi_to_be_stmt(expr: &Expr) -> bool { + match expr { + Expr::Macro(expr) => !matches!(expr.mac.delimiter, MacroDelimiter::Brace(_)), + _ => requires_comma_to_be_match_arm(expr), + } +} + +pub(crate) fn requires_comma_to_be_match_arm(mut expr: &Expr) -> bool { + loop { + match expr { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Expr::If(_) + | Expr::Match(_) + | Expr::Block(_) | Expr::Unsafe(_) // both under ExprKind::Block in rustc + | Expr::While(_) + | Expr::Loop(_) + | Expr::ForLoop(_) + | Expr::TryBlock(_) + | Expr::Const(_) => return false, + + Expr::Array(_) + | Expr::Assign(_) + | Expr::Async(_) + | Expr::Await(_) + | Expr::Binary(_) + | Expr::Break(_) + | Expr::Call(_) + | Expr::Cast(_) + | Expr::Closure(_) + | Expr::Continue(_) + | Expr::Field(_) + | Expr::Index(_) + | Expr::Infer(_) + | Expr::Let(_) + | Expr::Lit(_) + | Expr::Macro(_) + | Expr::MethodCall(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Range(_) + | Expr::RawAddr(_) + | Expr::Reference(_) + | Expr::Repeat(_) + | Expr::Return(_) + | Expr::Struct(_) + | Expr::Try(_) + | Expr::Tuple(_) + | Expr::Unary(_) + | Expr::Yield(_) + | Expr::Verbatim(_) => return true, + + Expr::Group(group) => expr = &group.expr, + + _ => return true, + } + } +} + +pub(crate) fn trailing_unparameterized_path(mut ty: &Type) -> bool { + loop { + match ty { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Type::BareFn(t) => match &t.output { + ReturnType::Default => return false, + ReturnType::Type(_, ret) => ty = ret, + }, + Type::ImplTrait(t) => match last_type_in_bounds(&t.bounds) { + ControlFlow::Break(trailing_path) => return trailing_path, + ControlFlow::Continue(t) => ty = t, + }, + Type::Path(t) => match last_type_in_path(&t.path) { + ControlFlow::Break(trailing_path) => return trailing_path, + ControlFlow::Continue(t) => ty = t, + }, + Type::Ptr(t) => ty = &t.elem, + Type::Reference(t) => ty = &t.elem, + Type::TraitObject(t) => match last_type_in_bounds(&t.bounds) { + ControlFlow::Break(trailing_path) => return trailing_path, + ControlFlow::Continue(t) => ty = t, + }, + + Type::Array(_) + | Type::Group(_) + | Type::Infer(_) + | Type::Macro(_) + | Type::Never(_) + | Type::Paren(_) + | Type::Slice(_) + | Type::Tuple(_) + | Type::Verbatim(_) => return false, + + _ => return false, + } + } + + fn last_type_in_path(path: &Path) -> ControlFlow { + match &path.segments.last().unwrap().arguments { + PathArguments::None => ControlFlow::Break(true), + PathArguments::AngleBracketed(_) => ControlFlow::Break(false), + PathArguments::Parenthesized(arg) => match &arg.output { + ReturnType::Default => ControlFlow::Break(false), + ReturnType::Type(_, ret) => ControlFlow::Continue(ret), + }, + } + } + + fn last_type_in_bounds( + bounds: &Punctuated, + ) -> ControlFlow { + match bounds.last().unwrap() { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + TypeParamBound::Trait(t) => last_type_in_path(&t.path), + TypeParamBound::Lifetime(_) + | TypeParamBound::PreciseCapture(_) + | TypeParamBound::Verbatim(_) => ControlFlow::Break(false), + _ => ControlFlow::Break(false), + } + } +} + +/// Whether the expression's first token is the label of a loop/block. +pub(crate) fn expr_leading_label(mut expr: &Expr) -> bool { + loop { + match expr { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Expr::Block(e) => return e.label.is_some(), + Expr::ForLoop(e) => return e.label.is_some(), + Expr::Loop(e) => return e.label.is_some(), + Expr::While(e) => return e.label.is_some(), + + Expr::Assign(e) => expr = &e.left, + Expr::Await(e) => expr = &e.base, + Expr::Binary(e) => expr = &e.left, + Expr::Call(e) => expr = &e.func, + Expr::Cast(e) => expr = &e.expr, + Expr::Field(e) => expr = &e.base, + Expr::Index(e) => expr = &e.expr, + Expr::MethodCall(e) => expr = &e.receiver, + Expr::Range(e) => match &e.start { + Some(start) => expr = start, + None => return false, + }, + Expr::Try(e) => expr = &e.expr, + + Expr::Array(_) + | Expr::Async(_) + | Expr::Break(_) + | Expr::Closure(_) + | Expr::Const(_) + | Expr::Continue(_) + | Expr::If(_) + | Expr::Infer(_) + | Expr::Let(_) + | Expr::Lit(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::RawAddr(_) + | Expr::Reference(_) + | Expr::Repeat(_) + | Expr::Return(_) + | Expr::Struct(_) + | Expr::TryBlock(_) + | Expr::Tuple(_) + | Expr::Unary(_) + | Expr::Unsafe(_) + | Expr::Verbatim(_) + | Expr::Yield(_) => return false, + + Expr::Group(e) => { + if !e.attrs.is_empty() { + return false; + } + expr = &e.expr; + } + + _ => return false, + } + } +} + +/// Whether the expression's last token is `}`. +pub(crate) fn expr_trailing_brace(mut expr: &Expr) -> bool { + loop { + match expr { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Expr::Async(_) + | Expr::Block(_) + | Expr::Const(_) + | Expr::ForLoop(_) + | Expr::If(_) + | Expr::Loop(_) + | Expr::Match(_) + | Expr::Struct(_) + | Expr::TryBlock(_) + | Expr::Unsafe(_) + | Expr::While(_) => return true, + + Expr::Assign(e) => expr = &e.right, + Expr::Binary(e) => expr = &e.right, + Expr::Break(e) => match &e.expr { + Some(e) => expr = e, + None => return false, + }, + Expr::Cast(e) => return type_trailing_brace(&e.ty), + Expr::Closure(e) => expr = &e.body, + Expr::Group(e) => expr = &e.expr, + Expr::Let(e) => expr = &e.expr, + Expr::Macro(e) => return matches!(e.mac.delimiter, MacroDelimiter::Brace(_)), + Expr::Range(e) => match &e.end { + Some(end) => expr = end, + None => return false, + }, + Expr::RawAddr(e) => expr = &e.expr, + Expr::Reference(e) => expr = &e.expr, + Expr::Return(e) => match &e.expr { + Some(e) => expr = e, + None => return false, + }, + Expr::Unary(e) => expr = &e.expr, + Expr::Verbatim(e) => return tokens_trailing_brace(e), + Expr::Yield(e) => match &e.expr { + Some(e) => expr = e, + None => return false, + }, + + Expr::Array(_) + | Expr::Await(_) + | Expr::Call(_) + | Expr::Continue(_) + | Expr::Field(_) + | Expr::Index(_) + | Expr::Infer(_) + | Expr::Lit(_) + | Expr::MethodCall(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Repeat(_) + | Expr::Try(_) + | Expr::Tuple(_) => return false, + + _ => return false, + } + } + + fn type_trailing_brace(mut ty: &Type) -> bool { + loop { + match ty { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Type::BareFn(t) => match &t.output { + ReturnType::Default => return false, + ReturnType::Type(_, ret) => ty = ret, + }, + Type::ImplTrait(t) => match last_type_in_bounds(&t.bounds) { + ControlFlow::Break(trailing_brace) => return trailing_brace, + ControlFlow::Continue(t) => ty = t, + }, + Type::Macro(t) => return matches!(t.mac.delimiter, MacroDelimiter::Brace(_)), + Type::Path(t) => match last_type_in_path(&t.path) { + Some(t) => ty = t, + None => return false, + }, + Type::Ptr(t) => ty = &t.elem, + Type::Reference(t) => ty = &t.elem, + Type::TraitObject(t) => match last_type_in_bounds(&t.bounds) { + ControlFlow::Break(trailing_brace) => return trailing_brace, + ControlFlow::Continue(t) => ty = t, + }, + Type::Verbatim(t) => return tokens_trailing_brace(t), + + Type::Array(_) + | Type::Group(_) + | Type::Infer(_) + | Type::Never(_) + | Type::Paren(_) + | Type::Slice(_) + | Type::Tuple(_) => return false, + + _ => return false, + } + } + } + + fn last_type_in_path(path: &Path) -> Option<&Type> { + match &path.segments.last().unwrap().arguments { + PathArguments::None | PathArguments::AngleBracketed(_) => None, + PathArguments::Parenthesized(arg) => match &arg.output { + ReturnType::Default => None, + ReturnType::Type(_, ret) => Some(ret), + }, + } + } + + fn last_type_in_bounds( + bounds: &Punctuated, + ) -> ControlFlow { + match bounds.last().unwrap() { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + TypeParamBound::Trait(t) => match last_type_in_path(&t.path) { + Some(t) => ControlFlow::Continue(t), + None => ControlFlow::Break(false), + }, + TypeParamBound::Lifetime(_) | TypeParamBound::PreciseCapture(_) => { + ControlFlow::Break(false) + } + TypeParamBound::Verbatim(t) => ControlFlow::Break(tokens_trailing_brace(t)), + _ => ControlFlow::Break(false), + } + } + + fn tokens_trailing_brace(tokens: &TokenStream) -> bool { + if let Some(TokenTree::Group(last)) = tokens.clone().into_iter().last() { + last.delimiter() == Delimiter::Brace + } else { + false + } + } +} diff --git a/vendor/prettyplease/src/convenience.rs b/vendor/prettyplease/src/convenience.rs new file mode 100644 index 00000000000000..bc4add6e08be24 --- /dev/null +++ b/vendor/prettyplease/src/convenience.rs @@ -0,0 +1,98 @@ +use crate::algorithm::{self, BeginToken, BreakToken, Breaks, Printer}; +use std::borrow::Cow; + +impl Printer { + pub fn ibox(&mut self, indent: isize) { + self.scan_begin(BeginToken { + offset: indent, + breaks: Breaks::Inconsistent, + }); + } + + pub fn cbox(&mut self, indent: isize) { + self.scan_begin(BeginToken { + offset: indent, + breaks: Breaks::Consistent, + }); + } + + pub fn end(&mut self) { + self.scan_end(); + } + + pub fn word>>(&mut self, wrd: S) { + let s = wrd.into(); + self.scan_string(s); + } + + fn spaces(&mut self, n: usize) { + self.scan_break(BreakToken { + blank_space: n, + ..BreakToken::default() + }); + } + + pub fn zerobreak(&mut self) { + self.spaces(0); + } + + pub fn space(&mut self) { + self.spaces(1); + } + + pub fn nbsp(&mut self) { + self.word(" "); + } + + pub fn hardbreak(&mut self) { + self.spaces(algorithm::SIZE_INFINITY as usize); + } + + pub fn space_if_nonempty(&mut self) { + self.scan_break(BreakToken { + blank_space: 1, + if_nonempty: true, + ..BreakToken::default() + }); + } + + pub fn hardbreak_if_nonempty(&mut self) { + self.scan_break(BreakToken { + blank_space: algorithm::SIZE_INFINITY as usize, + if_nonempty: true, + ..BreakToken::default() + }); + } + + pub fn trailing_comma(&mut self, is_last: bool) { + if is_last { + self.scan_break(BreakToken { + pre_break: Some(','), + ..BreakToken::default() + }); + } else { + self.word(","); + self.space(); + } + } + + pub fn trailing_comma_or_space(&mut self, is_last: bool) { + if is_last { + self.scan_break(BreakToken { + blank_space: 1, + pre_break: Some(','), + ..BreakToken::default() + }); + } else { + self.word(","); + self.space(); + } + } + + pub fn neverbreak(&mut self) { + self.scan_break(BreakToken { + never_break: true, + ..BreakToken::default() + }); + } +} diff --git a/vendor/prettyplease/src/data.rs b/vendor/prettyplease/src/data.rs new file mode 100644 index 00000000000000..3561a49b4a1cdc --- /dev/null +++ b/vendor/prettyplease/src/data.rs @@ -0,0 +1,79 @@ +use crate::algorithm::Printer; +use crate::fixup::FixupContext; +use crate::iter::IterDelimited; +use crate::path::PathKind; +use crate::INDENT; +use syn::{Field, Fields, FieldsUnnamed, Variant, VisRestricted, Visibility}; + +impl Printer { + pub fn variant(&mut self, variant: &Variant) { + self.outer_attrs(&variant.attrs); + self.ident(&variant.ident); + match &variant.fields { + Fields::Named(fields) => { + self.nbsp(); + self.word("{"); + self.cbox(INDENT); + self.space(); + for field in fields.named.iter().delimited() { + self.field(&field); + self.trailing_comma_or_space(field.is_last); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } + Fields::Unnamed(fields) => { + self.cbox(INDENT); + self.fields_unnamed(fields); + self.end(); + } + Fields::Unit => {} + } + if let Some((_eq_token, discriminant)) = &variant.discriminant { + self.word(" = "); + self.expr(discriminant, FixupContext::NONE); + } + } + + pub fn fields_unnamed(&mut self, fields: &FieldsUnnamed) { + self.word("("); + self.zerobreak(); + for field in fields.unnamed.iter().delimited() { + self.field(&field); + self.trailing_comma(field.is_last); + } + self.offset(-INDENT); + self.word(")"); + } + + pub fn field(&mut self, field: &Field) { + self.outer_attrs(&field.attrs); + self.visibility(&field.vis); + if let Some(ident) = &field.ident { + self.ident(ident); + self.word(": "); + } + self.ty(&field.ty); + } + + pub fn visibility(&mut self, vis: &Visibility) { + match vis { + Visibility::Public(_) => self.word("pub "), + Visibility::Restricted(vis) => self.vis_restricted(vis), + Visibility::Inherited => {} + } + } + + fn vis_restricted(&mut self, vis: &VisRestricted) { + self.word("pub("); + let omit_in = vis.path.get_ident().map_or(false, |ident| { + matches!(ident.to_string().as_str(), "self" | "super" | "crate") + }); + if !omit_in { + self.word("in "); + } + self.path(&vis.path, PathKind::Simple); + self.word(") "); + } +} diff --git a/vendor/prettyplease/src/expr.rs b/vendor/prettyplease/src/expr.rs new file mode 100644 index 00000000000000..55b1b605531123 --- /dev/null +++ b/vendor/prettyplease/src/expr.rs @@ -0,0 +1,1533 @@ +use crate::algorithm::{BreakToken, Printer}; +use crate::attr; +use crate::classify; +use crate::fixup::FixupContext; +use crate::iter::IterDelimited; +use crate::path::PathKind; +use crate::precedence::Precedence; +use crate::stmt; +use crate::INDENT; +use proc_macro2::TokenStream; +use syn::punctuated::Punctuated; +use syn::{ + token, Arm, Attribute, BinOp, Block, Expr, ExprArray, ExprAssign, ExprAsync, ExprAwait, + ExprBinary, ExprBlock, ExprBreak, ExprCall, ExprCast, ExprClosure, ExprConst, ExprContinue, + ExprField, ExprForLoop, ExprGroup, ExprIf, ExprIndex, ExprInfer, ExprLet, ExprLit, ExprLoop, + ExprMacro, ExprMatch, ExprMethodCall, ExprParen, ExprPath, ExprRange, ExprRawAddr, + ExprReference, ExprRepeat, ExprReturn, ExprStruct, ExprTry, ExprTryBlock, ExprTuple, ExprUnary, + ExprUnsafe, ExprWhile, ExprYield, FieldValue, Index, Label, Lit, Member, PointerMutability, + RangeLimits, ReturnType, Stmt, Token, UnOp, +}; + +impl Printer { + pub fn expr(&mut self, expr: &Expr, mut fixup: FixupContext) { + let needs_paren = fixup.parenthesize(expr); + if needs_paren { + self.word("("); + fixup = FixupContext::NONE; + } + + let beginning_of_line = false; + + match expr { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Expr::Array(expr) => self.expr_array(expr), + Expr::Assign(expr) => self.expr_assign(expr, fixup), + Expr::Async(expr) => self.expr_async(expr), + Expr::Await(expr) => self.expr_await(expr, beginning_of_line, fixup), + Expr::Binary(expr) => self.expr_binary(expr, fixup), + Expr::Block(expr) => self.expr_block(expr), + Expr::Break(expr) => self.expr_break(expr, fixup), + Expr::Call(expr) => self.expr_call(expr, beginning_of_line, fixup), + Expr::Cast(expr) => self.expr_cast(expr, fixup), + Expr::Closure(expr) => self.expr_closure(expr, fixup), + Expr::Const(expr) => self.expr_const(expr), + Expr::Continue(expr) => self.expr_continue(expr), + Expr::Field(expr) => self.expr_field(expr, beginning_of_line, fixup), + Expr::ForLoop(expr) => self.expr_for_loop(expr), + Expr::Group(expr) => self.expr_group(expr, fixup), + Expr::If(expr) => self.expr_if(expr), + Expr::Index(expr) => self.expr_index(expr, beginning_of_line, fixup), + Expr::Infer(expr) => self.expr_infer(expr), + Expr::Let(expr) => self.expr_let(expr, fixup), + Expr::Lit(expr) => self.expr_lit(expr), + Expr::Loop(expr) => self.expr_loop(expr), + Expr::Macro(expr) => self.expr_macro(expr), + Expr::Match(expr) => self.expr_match(expr), + Expr::MethodCall(expr) => self.expr_method_call(expr, beginning_of_line, fixup), + Expr::Paren(expr) => self.expr_paren(expr), + Expr::Path(expr) => self.expr_path(expr), + Expr::Range(expr) => self.expr_range(expr, fixup), + Expr::RawAddr(expr) => self.expr_raw_addr(expr, fixup), + Expr::Reference(expr) => self.expr_reference(expr, fixup), + Expr::Repeat(expr) => self.expr_repeat(expr), + Expr::Return(expr) => self.expr_return(expr, fixup), + Expr::Struct(expr) => self.expr_struct(expr), + Expr::Try(expr) => self.expr_try(expr, beginning_of_line, fixup), + Expr::TryBlock(expr) => self.expr_try_block(expr), + Expr::Tuple(expr) => self.expr_tuple(expr), + Expr::Unary(expr) => self.expr_unary(expr, fixup), + Expr::Unsafe(expr) => self.expr_unsafe(expr), + Expr::Verbatim(expr) => self.expr_verbatim(expr, fixup), + Expr::While(expr) => self.expr_while(expr), + Expr::Yield(expr) => self.expr_yield(expr, fixup), + _ => unimplemented!("unknown Expr"), + } + + if needs_paren { + self.word(")"); + } + } + + pub fn expr_beginning_of_line( + &mut self, + expr: &Expr, + mut needs_paren: bool, + beginning_of_line: bool, + mut fixup: FixupContext, + ) { + needs_paren |= fixup.parenthesize(expr); + if needs_paren { + self.word("("); + fixup = FixupContext::NONE; + } + + match expr { + Expr::Await(expr) => self.expr_await(expr, beginning_of_line, fixup), + Expr::Field(expr) => self.expr_field(expr, beginning_of_line, fixup), + Expr::Index(expr) => self.expr_index(expr, beginning_of_line, fixup), + Expr::MethodCall(expr) => self.expr_method_call(expr, beginning_of_line, fixup), + Expr::Try(expr) => self.expr_try(expr, beginning_of_line, fixup), + _ => self.expr(expr, fixup), + } + + if needs_paren { + self.word(")"); + } + } + + fn prefix_subexpr( + &mut self, + expr: &Expr, + mut needs_paren: bool, + beginning_of_line: bool, + mut fixup: FixupContext, + ) { + needs_paren |= fixup.parenthesize(expr); + if needs_paren { + self.word("("); + fixup = FixupContext::NONE; + } + + match expr { + Expr::Await(expr) => self.prefix_subexpr_await(expr, beginning_of_line, fixup), + Expr::Call(expr) => self.prefix_subexpr_call(expr, fixup), + Expr::Field(expr) => self.prefix_subexpr_field(expr, beginning_of_line, fixup), + Expr::Index(expr) => self.prefix_subexpr_index(expr, beginning_of_line, fixup), + Expr::MethodCall(expr) => { + let unindent_call_args = false; + self.prefix_subexpr_method_call(expr, beginning_of_line, unindent_call_args, fixup); + } + Expr::Try(expr) => self.prefix_subexpr_try(expr, beginning_of_line, fixup), + _ => { + self.cbox(-INDENT); + self.expr(expr, fixup); + self.end(); + } + } + + if needs_paren { + self.word(")"); + } + } + + fn expr_condition(&mut self, expr: &Expr) { + self.cbox(0); + self.expr(expr, FixupContext::new_condition()); + if needs_newline_if_wrap(expr) { + self.space(); + } else { + self.nbsp(); + } + self.end(); + } + + pub fn subexpr(&mut self, expr: &Expr, needs_paren: bool, mut fixup: FixupContext) { + if needs_paren { + self.word("("); + fixup = FixupContext::NONE; + } + + self.expr(expr, fixup); + + if needs_paren { + self.word(")"); + } + } + + fn expr_array(&mut self, expr: &ExprArray) { + self.outer_attrs(&expr.attrs); + if expr.elems.is_empty() { + self.word("[]"); + } else if simple_array(&expr.elems) { + self.cbox(INDENT); + self.word("["); + self.zerobreak(); + self.ibox(0); + for elem in expr.elems.iter().delimited() { + self.expr(&elem, FixupContext::NONE); + if !elem.is_last { + self.word(","); + self.space(); + } + } + self.end(); + self.trailing_comma(true); + self.offset(-INDENT); + self.word("]"); + self.end(); + } else { + self.word("["); + self.cbox(INDENT); + self.zerobreak(); + for elem in expr.elems.iter().delimited() { + self.expr(&elem, FixupContext::NONE); + self.trailing_comma(elem.is_last); + } + self.offset(-INDENT); + self.end(); + self.word("]"); + } + } + + fn expr_assign(&mut self, expr: &ExprAssign, fixup: FixupContext) { + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( + &expr.left, + false, + false, + Precedence::Assign, + ); + let right_fixup = fixup.rightmost_subexpression_fixup(false, false, Precedence::Assign); + + self.outer_attrs(&expr.attrs); + self.ibox(0); + if !expr.attrs.is_empty() { + self.word("("); + } + self.subexpr(&expr.left, left_prec <= Precedence::Range, left_fixup); + self.word(" = "); + self.neverbreak(); + self.expr(&expr.right, right_fixup); + if !expr.attrs.is_empty() { + self.word(")"); + } + self.end(); + } + + fn expr_async(&mut self, expr: &ExprAsync) { + self.outer_attrs(&expr.attrs); + self.word("async "); + if expr.capture.is_some() { + self.word("move "); + } + self.cbox(INDENT); + self.small_block(&expr.block, &expr.attrs); + self.end(); + } + + fn expr_await(&mut self, expr: &ExprAwait, beginning_of_line: bool, fixup: FixupContext) { + self.outer_attrs(&expr.attrs); + self.cbox(INDENT); + self.prefix_subexpr_await(expr, beginning_of_line, fixup); + self.end(); + } + + fn prefix_subexpr_await( + &mut self, + expr: &ExprAwait, + beginning_of_line: bool, + fixup: FixupContext, + ) { + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&expr.base); + + self.prefix_subexpr( + &expr.base, + left_prec < Precedence::Unambiguous, + beginning_of_line, + left_fixup, + ); + if !(beginning_of_line && is_short_ident(&expr.base)) { + self.scan_break(BreakToken { + no_break: self.ends_with('.').then_some(' '), + ..BreakToken::default() + }); + } + self.word(".await"); + } + + fn expr_binary(&mut self, expr: &ExprBinary, fixup: FixupContext) { + let binop_prec = Precedence::of_binop(&expr.op); + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( + &expr.left, + match &expr.op { + BinOp::Sub(_) + | BinOp::Mul(_) + | BinOp::And(_) + | BinOp::Or(_) + | BinOp::BitAnd(_) + | BinOp::BitOr(_) + | BinOp::Shl(_) + | BinOp::Lt(_) => true, + _ => false, + }, + match &expr.op { + BinOp::Shl(_) | BinOp::Lt(_) => true, + _ => false, + }, + binop_prec, + ); + let left_needs_group = match binop_prec { + Precedence::Assign => left_prec <= Precedence::Range, + Precedence::Compare => left_prec <= binop_prec, + _ => left_prec < binop_prec, + }; + let right_fixup = fixup.rightmost_subexpression_fixup(false, false, binop_prec); + let right_needs_group = binop_prec != Precedence::Assign + && right_fixup.rightmost_subexpression_precedence(&expr.right) <= binop_prec; + + self.outer_attrs(&expr.attrs); + self.ibox(INDENT); + self.ibox(-INDENT); + if !expr.attrs.is_empty() { + self.word("("); + } + self.subexpr(&expr.left, left_needs_group, left_fixup); + self.end(); + self.space(); + self.binary_operator(&expr.op); + self.nbsp(); + self.subexpr(&expr.right, right_needs_group, right_fixup); + if !expr.attrs.is_empty() { + self.word(")"); + } + self.end(); + } + + pub fn expr_block(&mut self, expr: &ExprBlock) { + self.outer_attrs(&expr.attrs); + if let Some(label) = &expr.label { + self.label(label); + } + self.cbox(INDENT); + self.small_block(&expr.block, &expr.attrs); + self.end(); + } + + fn expr_break(&mut self, expr: &ExprBreak, fixup: FixupContext) { + self.outer_attrs(&expr.attrs); + self.word("break"); + if let Some(lifetime) = &expr.label { + self.nbsp(); + self.lifetime(lifetime); + } + if let Some(value) = &expr.expr { + self.nbsp(); + self.subexpr( + value, + expr.label.is_none() && classify::expr_leading_label(value), + fixup.rightmost_subexpression_fixup(true, true, Precedence::Jump), + ); + } + } + + fn expr_call(&mut self, expr: &ExprCall, beginning_of_line: bool, fixup: FixupContext) { + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( + &expr.func, + true, + false, + Precedence::Unambiguous, + ); + let needs_paren = if let Expr::Field(func) = &*expr.func { + matches!(func.member, Member::Named(_)) + } else { + left_prec < Precedence::Unambiguous + }; + + self.outer_attrs(&expr.attrs); + self.expr_beginning_of_line(&expr.func, needs_paren, beginning_of_line, left_fixup); + self.word("("); + self.call_args(&expr.args); + self.word(")"); + } + + fn prefix_subexpr_call(&mut self, expr: &ExprCall, fixup: FixupContext) { + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( + &expr.func, + true, + false, + Precedence::Unambiguous, + ); + let needs_paren = if let Expr::Field(func) = &*expr.func { + matches!(func.member, Member::Named(_)) + } else { + left_prec < Precedence::Unambiguous + }; + + let beginning_of_line = false; + self.prefix_subexpr(&expr.func, needs_paren, beginning_of_line, left_fixup); + self.word("("); + self.call_args(&expr.args); + self.word(")"); + } + + fn expr_cast(&mut self, expr: &ExprCast, fixup: FixupContext) { + let (left_prec, left_fixup) = + fixup.leftmost_subexpression_with_operator(&expr.expr, false, false, Precedence::Cast); + + self.outer_attrs(&expr.attrs); + self.ibox(INDENT); + self.ibox(-INDENT); + if !expr.attrs.is_empty() { + self.word("("); + } + self.subexpr(&expr.expr, left_prec < Precedence::Cast, left_fixup); + self.end(); + self.space(); + self.word("as "); + self.ty(&expr.ty); + if !expr.attrs.is_empty() { + self.word(")"); + } + self.end(); + } + + fn expr_closure(&mut self, expr: &ExprClosure, fixup: FixupContext) { + self.outer_attrs(&expr.attrs); + self.ibox(0); + if let Some(bound_lifetimes) = &expr.lifetimes { + self.bound_lifetimes(bound_lifetimes); + } + if expr.constness.is_some() { + self.word("const "); + } + if expr.movability.is_some() { + self.word("static "); + } + if expr.asyncness.is_some() { + self.word("async "); + } + if expr.capture.is_some() { + self.word("move "); + } + self.cbox(INDENT); + self.word("|"); + for pat in expr.inputs.iter().delimited() { + if pat.is_first { + self.zerobreak(); + } + self.pat(&pat); + if !pat.is_last { + self.word(","); + self.space(); + } + } + match &expr.output { + ReturnType::Default => { + self.word("|"); + self.space(); + self.offset(-INDENT); + self.end(); + self.neverbreak(); + let wrap_in_brace = match &*expr.body { + Expr::Match(ExprMatch { attrs, .. }) | Expr::Call(ExprCall { attrs, .. }) => { + attr::has_outer(attrs) + } + body => !is_blocklike(body), + }; + if wrap_in_brace { + self.cbox(INDENT); + let okay_to_brace = parseable_as_stmt(&expr.body); + self.scan_break(BreakToken { + pre_break: Some(if okay_to_brace { '{' } else { '(' }), + ..BreakToken::default() + }); + self.expr( + &expr.body, + fixup.rightmost_subexpression_fixup(false, false, Precedence::Jump), + ); + self.scan_break(BreakToken { + offset: -INDENT, + pre_break: (okay_to_brace && stmt::add_semi(&expr.body)).then_some(';'), + post_break: if okay_to_brace { "}" } else { ")" }, + ..BreakToken::default() + }); + self.end(); + } else { + self.expr( + &expr.body, + fixup.rightmost_subexpression_fixup(false, false, Precedence::Jump), + ); + } + } + ReturnType::Type(_arrow, ty) => { + if !expr.inputs.is_empty() { + self.trailing_comma(true); + self.offset(-INDENT); + } + self.word("|"); + self.end(); + self.word(" -> "); + self.ty(ty); + self.nbsp(); + self.neverbreak(); + if matches!(&*expr.body, Expr::Block(body) if body.attrs.is_empty() && body.label.is_none()) + { + self.expr( + &expr.body, + fixup.rightmost_subexpression_fixup(false, false, Precedence::Jump), + ); + } else { + self.cbox(INDENT); + self.expr_as_small_block(&expr.body, 0); + self.end(); + } + } + } + self.end(); + } + + pub fn expr_const(&mut self, expr: &ExprConst) { + self.outer_attrs(&expr.attrs); + self.word("const "); + self.cbox(INDENT); + self.small_block(&expr.block, &expr.attrs); + self.end(); + } + + fn expr_continue(&mut self, expr: &ExprContinue) { + self.outer_attrs(&expr.attrs); + self.word("continue"); + if let Some(lifetime) = &expr.label { + self.nbsp(); + self.lifetime(lifetime); + } + } + + fn expr_field(&mut self, expr: &ExprField, beginning_of_line: bool, fixup: FixupContext) { + self.outer_attrs(&expr.attrs); + self.cbox(INDENT); + self.prefix_subexpr_field(expr, beginning_of_line, fixup); + self.end(); + } + + fn prefix_subexpr_field( + &mut self, + expr: &ExprField, + beginning_of_line: bool, + fixup: FixupContext, + ) { + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&expr.base); + + self.prefix_subexpr( + &expr.base, + left_prec < Precedence::Unambiguous, + beginning_of_line, + left_fixup, + ); + if !(beginning_of_line && is_short_ident(&expr.base)) { + self.scan_break(BreakToken { + no_break: self.ends_with('.').then_some(' '), + ..BreakToken::default() + }); + } + self.word("."); + self.member(&expr.member); + } + + fn expr_for_loop(&mut self, expr: &ExprForLoop) { + self.outer_attrs(&expr.attrs); + self.ibox(0); + if let Some(label) = &expr.label { + self.label(label); + } + self.word("for "); + self.pat(&expr.pat); + self.word(" in "); + self.neverbreak(); + self.expr_condition(&expr.expr); + self.word("{"); + self.neverbreak(); + self.cbox(INDENT); + self.hardbreak_if_nonempty(); + self.inner_attrs(&expr.attrs); + for stmt in expr.body.stmts.iter().delimited() { + self.stmt(&stmt, stmt.is_last); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.end(); + } + + fn expr_group(&mut self, expr: &ExprGroup, fixup: FixupContext) { + self.outer_attrs(&expr.attrs); + self.expr(&expr.expr, fixup); + } + + fn expr_if(&mut self, expr: &ExprIf) { + self.outer_attrs(&expr.attrs); + self.cbox(INDENT); + self.word("if "); + self.cbox(-INDENT); + self.expr_condition(&expr.cond); + self.end(); + if let Some((_else_token, else_branch)) = &expr.else_branch { + let mut else_branch = &**else_branch; + self.small_block(&expr.then_branch, &[]); + loop { + self.word(" else "); + match else_branch { + Expr::If(expr) => { + self.word("if "); + self.cbox(-INDENT); + self.expr_condition(&expr.cond); + self.end(); + self.small_block(&expr.then_branch, &[]); + if let Some((_else_token, next)) = &expr.else_branch { + else_branch = next; + continue; + } + } + Expr::Block(expr) => { + self.small_block(&expr.block, &[]); + } + // If not one of the valid expressions to exist in an else + // clause, wrap in a block. + other => self.expr_as_small_block(other, INDENT), + } + break; + } + } else if expr.then_branch.stmts.is_empty() { + self.word("{}"); + } else { + self.word("{"); + self.hardbreak(); + for stmt in expr.then_branch.stmts.iter().delimited() { + self.stmt(&stmt, stmt.is_last); + } + self.offset(-INDENT); + self.word("}"); + } + self.end(); + } + + fn expr_index(&mut self, expr: &ExprIndex, beginning_of_line: bool, fixup: FixupContext) { + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( + &expr.expr, + true, + false, + Precedence::Unambiguous, + ); + + self.outer_attrs(&expr.attrs); + self.expr_beginning_of_line( + &expr.expr, + left_prec < Precedence::Unambiguous, + beginning_of_line, + left_fixup, + ); + self.word("["); + self.expr(&expr.index, FixupContext::NONE); + self.word("]"); + } + + fn prefix_subexpr_index( + &mut self, + expr: &ExprIndex, + beginning_of_line: bool, + fixup: FixupContext, + ) { + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( + &expr.expr, + true, + false, + Precedence::Unambiguous, + ); + + self.prefix_subexpr( + &expr.expr, + left_prec < Precedence::Unambiguous, + beginning_of_line, + left_fixup, + ); + self.word("["); + self.expr(&expr.index, FixupContext::NONE); + self.word("]"); + } + + fn expr_infer(&mut self, expr: &ExprInfer) { + self.outer_attrs(&expr.attrs); + self.word("_"); + } + + fn expr_let(&mut self, expr: &ExprLet, fixup: FixupContext) { + let (right_prec, right_fixup) = fixup.rightmost_subexpression(&expr.expr, Precedence::Let); + + self.outer_attrs(&expr.attrs); + self.ibox(0); + self.word("let "); + self.ibox(0); + self.pat(&expr.pat); + self.end(); + self.word(" = "); + self.neverbreak(); + self.ibox(0); + self.subexpr(&expr.expr, right_prec < Precedence::Let, right_fixup); + self.end(); + self.end(); + } + + pub fn expr_lit(&mut self, expr: &ExprLit) { + self.outer_attrs(&expr.attrs); + self.lit(&expr.lit); + } + + fn expr_loop(&mut self, expr: &ExprLoop) { + self.outer_attrs(&expr.attrs); + if let Some(label) = &expr.label { + self.label(label); + } + self.word("loop {"); + self.cbox(INDENT); + self.hardbreak_if_nonempty(); + self.inner_attrs(&expr.attrs); + for stmt in expr.body.stmts.iter().delimited() { + self.stmt(&stmt, stmt.is_last); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } + + pub fn expr_macro(&mut self, expr: &ExprMacro) { + self.outer_attrs(&expr.attrs); + let semicolon = false; + self.mac(&expr.mac, None, semicolon); + } + + fn expr_match(&mut self, expr: &ExprMatch) { + self.outer_attrs(&expr.attrs); + self.ibox(0); + self.word("match "); + self.expr_condition(&expr.expr); + self.word("{"); + self.neverbreak(); + self.cbox(INDENT); + self.hardbreak_if_nonempty(); + self.inner_attrs(&expr.attrs); + for arm in &expr.arms { + self.arm(arm); + self.hardbreak(); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.end(); + } + + fn expr_method_call( + &mut self, + expr: &ExprMethodCall, + beginning_of_line: bool, + fixup: FixupContext, + ) { + self.outer_attrs(&expr.attrs); + self.cbox(INDENT); + let unindent_call_args = beginning_of_line && is_short_ident(&expr.receiver); + self.prefix_subexpr_method_call(expr, beginning_of_line, unindent_call_args, fixup); + self.end(); + } + + fn prefix_subexpr_method_call( + &mut self, + expr: &ExprMethodCall, + beginning_of_line: bool, + unindent_call_args: bool, + fixup: FixupContext, + ) { + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&expr.receiver); + + self.prefix_subexpr( + &expr.receiver, + left_prec < Precedence::Unambiguous, + beginning_of_line, + left_fixup, + ); + if !(beginning_of_line && is_short_ident(&expr.receiver)) { + self.scan_break(BreakToken { + no_break: self.ends_with('.').then_some(' '), + ..BreakToken::default() + }); + } + self.word("."); + self.ident(&expr.method); + if let Some(turbofish) = &expr.turbofish { + self.angle_bracketed_generic_arguments(turbofish, PathKind::Expr); + } + self.cbox(if unindent_call_args { -INDENT } else { 0 }); + self.word("("); + self.call_args(&expr.args); + self.word(")"); + self.end(); + } + + fn expr_paren(&mut self, expr: &ExprParen) { + self.outer_attrs(&expr.attrs); + self.word("("); + self.expr(&expr.expr, FixupContext::NONE); + self.word(")"); + } + + pub fn expr_path(&mut self, expr: &ExprPath) { + self.outer_attrs(&expr.attrs); + self.qpath(&expr.qself, &expr.path, PathKind::Expr); + } + + pub fn expr_range(&mut self, expr: &ExprRange, fixup: FixupContext) { + self.outer_attrs(&expr.attrs); + if !expr.attrs.is_empty() { + self.word("("); + } + if let Some(start) = &expr.start { + let (left_prec, left_fixup) = + fixup.leftmost_subexpression_with_operator(start, true, false, Precedence::Range); + self.subexpr(start, left_prec <= Precedence::Range, left_fixup); + } else if self.ends_with('.') { + self.nbsp(); + } + self.word(match expr.limits { + RangeLimits::HalfOpen(_) => "..", + RangeLimits::Closed(_) => "..=", + }); + if let Some(end) = &expr.end { + let right_fixup = fixup.rightmost_subexpression_fixup(false, true, Precedence::Range); + let right_prec = right_fixup.rightmost_subexpression_precedence(end); + self.subexpr(end, right_prec <= Precedence::Range, right_fixup); + } + if !expr.attrs.is_empty() { + self.word(")"); + } + } + + fn expr_raw_addr(&mut self, expr: &ExprRawAddr, fixup: FixupContext) { + let (right_prec, right_fixup) = + fixup.rightmost_subexpression(&expr.expr, Precedence::Prefix); + + self.outer_attrs(&expr.attrs); + self.word("&raw "); + self.pointer_mutability(&expr.mutability); + self.nbsp(); + self.subexpr(&expr.expr, right_prec < Precedence::Prefix, right_fixup); + } + + fn expr_reference(&mut self, expr: &ExprReference, fixup: FixupContext) { + let (right_prec, right_fixup) = + fixup.rightmost_subexpression(&expr.expr, Precedence::Prefix); + + self.outer_attrs(&expr.attrs); + self.word("&"); + if expr.mutability.is_some() { + self.word("mut "); + } + self.subexpr(&expr.expr, right_prec < Precedence::Prefix, right_fixup); + } + + fn expr_repeat(&mut self, expr: &ExprRepeat) { + self.outer_attrs(&expr.attrs); + self.word("["); + self.expr(&expr.expr, FixupContext::NONE); + self.word("; "); + self.expr(&expr.len, FixupContext::NONE); + self.word("]"); + } + + fn expr_return(&mut self, expr: &ExprReturn, fixup: FixupContext) { + self.outer_attrs(&expr.attrs); + self.word("return"); + if let Some(value) = &expr.expr { + self.nbsp(); + self.expr( + value, + fixup.rightmost_subexpression_fixup(true, false, Precedence::Jump), + ); + } + } + + fn expr_struct(&mut self, expr: &ExprStruct) { + self.outer_attrs(&expr.attrs); + self.cbox(INDENT); + self.ibox(-INDENT); + self.qpath(&expr.qself, &expr.path, PathKind::Expr); + self.end(); + self.word(" {"); + self.space_if_nonempty(); + for field_value in expr.fields.iter().delimited() { + self.field_value(&field_value); + self.trailing_comma_or_space(field_value.is_last && expr.rest.is_none()); + } + if let Some(rest) = &expr.rest { + self.word(".."); + self.expr(rest, FixupContext::NONE); + self.space(); + } + self.offset(-INDENT); + self.end_with_max_width(34); + self.word("}"); + } + + fn expr_try(&mut self, expr: &ExprTry, beginning_of_line: bool, fixup: FixupContext) { + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&expr.expr); + + self.outer_attrs(&expr.attrs); + self.expr_beginning_of_line( + &expr.expr, + left_prec < Precedence::Unambiguous, + beginning_of_line, + left_fixup, + ); + self.word("?"); + } + + fn prefix_subexpr_try(&mut self, expr: &ExprTry, beginning_of_line: bool, fixup: FixupContext) { + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&expr.expr); + + self.prefix_subexpr( + &expr.expr, + left_prec < Precedence::Unambiguous, + beginning_of_line, + left_fixup, + ); + self.word("?"); + } + + fn expr_try_block(&mut self, expr: &ExprTryBlock) { + self.outer_attrs(&expr.attrs); + self.word("try "); + self.cbox(INDENT); + self.small_block(&expr.block, &expr.attrs); + self.end(); + } + + fn expr_tuple(&mut self, expr: &ExprTuple) { + self.outer_attrs(&expr.attrs); + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + for elem in expr.elems.iter().delimited() { + self.expr(&elem, FixupContext::NONE); + if expr.elems.len() == 1 { + self.word(","); + self.zerobreak(); + } else { + self.trailing_comma(elem.is_last); + } + } + self.offset(-INDENT); + self.end(); + self.word(")"); + } + + fn expr_unary(&mut self, expr: &ExprUnary, fixup: FixupContext) { + let (right_prec, right_fixup) = + fixup.rightmost_subexpression(&expr.expr, Precedence::Prefix); + + self.outer_attrs(&expr.attrs); + self.unary_operator(&expr.op); + self.subexpr(&expr.expr, right_prec < Precedence::Prefix, right_fixup); + } + + fn expr_unsafe(&mut self, expr: &ExprUnsafe) { + self.outer_attrs(&expr.attrs); + self.word("unsafe "); + self.cbox(INDENT); + self.small_block(&expr.block, &expr.attrs); + self.end(); + } + + #[cfg(not(feature = "verbatim"))] + fn expr_verbatim(&mut self, expr: &TokenStream, _fixup: FixupContext) { + if !expr.is_empty() { + unimplemented!("Expr::Verbatim `{}`", expr); + } + } + + #[cfg(feature = "verbatim")] + fn expr_verbatim(&mut self, tokens: &TokenStream, fixup: FixupContext) { + use syn::parse::discouraged::Speculative; + use syn::parse::{Parse, ParseStream, Result}; + use syn::{parenthesized, Ident}; + + enum ExprVerbatim { + Empty, + Ellipsis, + Become(Become), + Builtin(Builtin), + } + + struct Become { + attrs: Vec, + tail_call: Expr, + } + + struct Builtin { + attrs: Vec, + name: Ident, + args: TokenStream, + } + + mod kw { + syn::custom_keyword!(builtin); + syn::custom_keyword!(raw); + } + + impl Parse for ExprVerbatim { + fn parse(input: ParseStream) -> Result { + let ahead = input.fork(); + let attrs = ahead.call(Attribute::parse_outer)?; + let lookahead = ahead.lookahead1(); + if input.is_empty() { + Ok(ExprVerbatim::Empty) + } else if lookahead.peek(Token![become]) { + input.advance_to(&ahead); + input.parse::()?; + let tail_call: Expr = input.parse()?; + Ok(ExprVerbatim::Become(Become { attrs, tail_call })) + } else if lookahead.peek(kw::builtin) { + input.advance_to(&ahead); + input.parse::()?; + input.parse::()?; + let name: Ident = input.parse()?; + let args; + parenthesized!(args in input); + let args: TokenStream = args.parse()?; + Ok(ExprVerbatim::Builtin(Builtin { attrs, name, args })) + } else if lookahead.peek(Token![...]) { + input.parse::()?; + Ok(ExprVerbatim::Ellipsis) + } else { + Err(lookahead.error()) + } + } + } + + let expr: ExprVerbatim = match syn::parse2(tokens.clone()) { + Ok(expr) => expr, + Err(_) => unimplemented!("Expr::Verbatim `{}`", tokens), + }; + + match expr { + ExprVerbatim::Empty => {} + ExprVerbatim::Ellipsis => { + self.word("..."); + } + ExprVerbatim::Become(expr) => { + self.outer_attrs(&expr.attrs); + self.word("become"); + self.nbsp(); + self.expr( + &expr.tail_call, + fixup.rightmost_subexpression_fixup(true, false, Precedence::Jump), + ); + } + ExprVerbatim::Builtin(expr) => { + self.outer_attrs(&expr.attrs); + self.word("builtin # "); + self.ident(&expr.name); + self.word("("); + if !expr.args.is_empty() { + self.cbox(INDENT); + self.zerobreak(); + self.ibox(0); + self.macro_rules_tokens(expr.args, false); + self.end(); + self.zerobreak(); + self.offset(-INDENT); + self.end(); + } + self.word(")"); + } + } + } + + fn expr_while(&mut self, expr: &ExprWhile) { + self.outer_attrs(&expr.attrs); + if let Some(label) = &expr.label { + self.label(label); + } + self.word("while "); + self.expr_condition(&expr.cond); + self.word("{"); + self.neverbreak(); + self.cbox(INDENT); + self.hardbreak_if_nonempty(); + self.inner_attrs(&expr.attrs); + for stmt in expr.body.stmts.iter().delimited() { + self.stmt(&stmt, stmt.is_last); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } + + fn expr_yield(&mut self, expr: &ExprYield, fixup: FixupContext) { + self.outer_attrs(&expr.attrs); + self.word("yield"); + if let Some(value) = &expr.expr { + self.nbsp(); + self.expr( + value, + fixup.rightmost_subexpression_fixup(true, false, Precedence::Jump), + ); + } + } + + fn label(&mut self, label: &Label) { + self.lifetime(&label.name); + self.word(": "); + } + + fn field_value(&mut self, field_value: &FieldValue) { + self.outer_attrs(&field_value.attrs); + self.member(&field_value.member); + if field_value.colon_token.is_some() { + self.word(": "); + self.ibox(0); + self.expr(&field_value.expr, FixupContext::NONE); + self.end(); + } + } + + fn arm(&mut self, arm: &Arm) { + self.outer_attrs(&arm.attrs); + self.ibox(0); + self.pat(&arm.pat); + if let Some((_if_token, guard)) = &arm.guard { + self.word(" if "); + self.expr(guard, FixupContext::NONE); + } + self.word(" => "); + let empty_block; + let mut body = &*arm.body; + while let Expr::Block(expr) = body { + if expr.attrs.is_empty() && expr.label.is_none() { + let mut stmts = expr.block.stmts.iter(); + if let (Some(Stmt::Expr(inner, None)), None) = (stmts.next(), stmts.next()) { + body = inner; + continue; + } + } + break; + } + if let Expr::Tuple(expr) = body { + if expr.elems.is_empty() && expr.attrs.is_empty() { + empty_block = Expr::Block(ExprBlock { + attrs: Vec::new(), + label: None, + block: Block { + brace_token: token::Brace::default(), + stmts: Vec::new(), + }, + }); + body = &empty_block; + } + } + if let Expr::Block(body) = body { + if let Some(label) = &body.label { + self.label(label); + } + self.word("{"); + self.neverbreak(); + self.cbox(INDENT); + self.hardbreak_if_nonempty(); + self.inner_attrs(&body.attrs); + for stmt in body.block.stmts.iter().delimited() { + self.stmt(&stmt, stmt.is_last); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } else { + self.neverbreak(); + self.cbox(INDENT); + let okay_to_brace = parseable_as_stmt(body); + self.scan_break(BreakToken { + pre_break: Some(if okay_to_brace { '{' } else { '(' }), + ..BreakToken::default() + }); + self.expr_beginning_of_line(body, false, true, FixupContext::new_match_arm()); + self.scan_break(BreakToken { + offset: -INDENT, + pre_break: (okay_to_brace && stmt::add_semi(body)).then_some(';'), + post_break: if okay_to_brace { "}" } else { ")," }, + no_break: classify::requires_comma_to_be_match_arm(body).then_some(','), + ..BreakToken::default() + }); + self.end(); + } + self.end(); + } + + fn call_args(&mut self, args: &Punctuated) { + let mut iter = args.iter(); + match (iter.next(), iter.next()) { + (Some(expr), None) if is_blocklike(expr) => { + self.expr(expr, FixupContext::NONE); + } + _ => { + self.cbox(INDENT); + self.zerobreak(); + for arg in args.iter().delimited() { + self.expr(&arg, FixupContext::NONE); + self.trailing_comma(arg.is_last); + } + self.offset(-INDENT); + self.end(); + } + } + } + + pub fn small_block(&mut self, block: &Block, attrs: &[Attribute]) { + self.word("{"); + if attr::has_inner(attrs) || !block.stmts.is_empty() { + self.space(); + self.inner_attrs(attrs); + match block.stmts.as_slice() { + [Stmt::Expr(expr, None)] if stmt::break_after(expr) => { + self.ibox(0); + self.expr_beginning_of_line(expr, false, true, FixupContext::new_stmt()); + self.end(); + self.space(); + } + _ => { + for stmt in block.stmts.iter().delimited() { + self.stmt(&stmt, stmt.is_last); + } + } + } + self.offset(-INDENT); + } + self.word("}"); + } + + pub fn expr_as_small_block(&mut self, expr: &Expr, indent: isize) { + self.word("{"); + self.space(); + self.ibox(indent); + self.expr_beginning_of_line(expr, false, true, FixupContext::new_stmt()); + self.end(); + self.space(); + self.offset(-INDENT); + self.word("}"); + } + + pub fn member(&mut self, member: &Member) { + match member { + Member::Named(ident) => self.ident(ident), + Member::Unnamed(index) => self.index(index), + } + } + + fn index(&mut self, member: &Index) { + self.word(member.index.to_string()); + } + + fn binary_operator(&mut self, op: &BinOp) { + self.word( + match op { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + BinOp::Add(_) => "+", + BinOp::Sub(_) => "-", + BinOp::Mul(_) => "*", + BinOp::Div(_) => "/", + BinOp::Rem(_) => "%", + BinOp::And(_) => "&&", + BinOp::Or(_) => "||", + BinOp::BitXor(_) => "^", + BinOp::BitAnd(_) => "&", + BinOp::BitOr(_) => "|", + BinOp::Shl(_) => "<<", + BinOp::Shr(_) => ">>", + BinOp::Eq(_) => "==", + BinOp::Lt(_) => "<", + BinOp::Le(_) => "<=", + BinOp::Ne(_) => "!=", + BinOp::Ge(_) => ">=", + BinOp::Gt(_) => ">", + BinOp::AddAssign(_) => "+=", + BinOp::SubAssign(_) => "-=", + BinOp::MulAssign(_) => "*=", + BinOp::DivAssign(_) => "/=", + BinOp::RemAssign(_) => "%=", + BinOp::BitXorAssign(_) => "^=", + BinOp::BitAndAssign(_) => "&=", + BinOp::BitOrAssign(_) => "|=", + BinOp::ShlAssign(_) => "<<=", + BinOp::ShrAssign(_) => ">>=", + _ => unimplemented!("unknown BinOp"), + }, + ); + } + + fn unary_operator(&mut self, op: &UnOp) { + self.word( + match op { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + UnOp::Deref(_) => "*", + UnOp::Not(_) => "!", + UnOp::Neg(_) => "-", + _ => unimplemented!("unknown UnOp"), + }, + ); + } + + fn pointer_mutability(&mut self, mutability: &PointerMutability) { + match mutability { + PointerMutability::Const(_) => self.word("const"), + PointerMutability::Mut(_) => self.word("mut"), + } + } +} + +fn needs_newline_if_wrap(expr: &Expr) -> bool { + match expr { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Expr::Array(_) + | Expr::Async(_) + | Expr::Block(_) + | Expr::Break(ExprBreak { expr: None, .. }) + | Expr::Closure(_) + | Expr::Const(_) + | Expr::Continue(_) + | Expr::ForLoop(_) + | Expr::If(_) + | Expr::Infer(_) + | Expr::Lit(_) + | Expr::Loop(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::Path(_) + | Expr::Range(ExprRange { end: None, .. }) + | Expr::Repeat(_) + | Expr::Return(ExprReturn { expr: None, .. }) + | Expr::Struct(_) + | Expr::TryBlock(_) + | Expr::Tuple(_) + | Expr::Unsafe(_) + | Expr::Verbatim(_) + | Expr::While(_) + | Expr::Yield(ExprYield { expr: None, .. }) => false, + + Expr::Assign(_) + | Expr::Await(_) + | Expr::Binary(_) + | Expr::Cast(_) + | Expr::Field(_) + | Expr::Index(_) + | Expr::MethodCall(_) => true, + + Expr::Break(ExprBreak { expr: Some(e), .. }) + | Expr::Call(ExprCall { func: e, .. }) + | Expr::Group(ExprGroup { expr: e, .. }) + | Expr::Let(ExprLet { expr: e, .. }) + | Expr::Paren(ExprParen { expr: e, .. }) + | Expr::Range(ExprRange { end: Some(e), .. }) + | Expr::RawAddr(ExprRawAddr { expr: e, .. }) + | Expr::Reference(ExprReference { expr: e, .. }) + | Expr::Return(ExprReturn { expr: Some(e), .. }) + | Expr::Try(ExprTry { expr: e, .. }) + | Expr::Unary(ExprUnary { expr: e, .. }) + | Expr::Yield(ExprYield { expr: Some(e), .. }) => needs_newline_if_wrap(e), + + _ => false, + } +} + +fn is_short_ident(expr: &Expr) -> bool { + if let Expr::Path(expr) = expr { + return expr.attrs.is_empty() + && expr.qself.is_none() + && expr + .path + .get_ident() + .map_or(false, |ident| ident.to_string().len() as isize <= INDENT); + } + false +} + +fn is_blocklike(expr: &Expr) -> bool { + match expr { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Expr::Array(ExprArray { attrs, .. }) + | Expr::Async(ExprAsync { attrs, .. }) + | Expr::Block(ExprBlock { attrs, .. }) + | Expr::Closure(ExprClosure { attrs, .. }) + | Expr::Const(ExprConst { attrs, .. }) + | Expr::Struct(ExprStruct { attrs, .. }) + | Expr::TryBlock(ExprTryBlock { attrs, .. }) + | Expr::Tuple(ExprTuple { attrs, .. }) + | Expr::Unsafe(ExprUnsafe { attrs, .. }) => !attr::has_outer(attrs), + + Expr::Assign(_) + | Expr::Await(_) + | Expr::Binary(_) + | Expr::Break(_) + | Expr::Call(_) + | Expr::Cast(_) + | Expr::Continue(_) + | Expr::Field(_) + | Expr::ForLoop(_) + | Expr::If(_) + | Expr::Index(_) + | Expr::Infer(_) + | Expr::Let(_) + | Expr::Lit(_) + | Expr::Loop(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::MethodCall(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Range(_) + | Expr::RawAddr(_) + | Expr::Reference(_) + | Expr::Repeat(_) + | Expr::Return(_) + | Expr::Try(_) + | Expr::Unary(_) + | Expr::Verbatim(_) + | Expr::While(_) + | Expr::Yield(_) => false, + + Expr::Group(e) => is_blocklike(&e.expr), + + _ => false, + } +} + +pub fn simple_block(expr: &Expr) -> Option<&ExprBlock> { + if let Expr::Block(expr) = expr { + if expr.attrs.is_empty() && expr.label.is_none() { + return Some(expr); + } + } + None +} + +pub fn simple_array(elements: &Punctuated) -> bool { + for expr in elements { + if let Expr::Lit(expr) = expr { + match expr.lit { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Lit::Byte(_) | Lit::Char(_) | Lit::Int(_) | Lit::Bool(_) => {} + + Lit::Str(_) | Lit::ByteStr(_) | Lit::CStr(_) | Lit::Float(_) | Lit::Verbatim(_) => { + return false; + } + + _ => return false, + } + } else { + return false; + } + } + true +} + +// Expressions for which `$expr` and `{ $expr }` mean the same thing. +// +// This is not the case for all expressions. For example `{} | x | x` has some +// bitwise OR operators while `{ {} |x| x }` has a block followed by a closure. +fn parseable_as_stmt(mut expr: &Expr) -> bool { + loop { + match expr { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Expr::Array(_) + | Expr::Async(_) + | Expr::Block(_) + | Expr::Break(_) + | Expr::Closure(_) + | Expr::Const(_) + | Expr::Continue(_) + | Expr::ForLoop(_) + | Expr::If(_) + | Expr::Infer(_) + | Expr::Lit(_) + | Expr::Loop(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::RawAddr(_) + | Expr::Reference(_) + | Expr::Repeat(_) + | Expr::Return(_) + | Expr::Struct(_) + | Expr::TryBlock(_) + | Expr::Tuple(_) + | Expr::Unary(_) + | Expr::Unsafe(_) + | Expr::Verbatim(_) + | Expr::While(_) + | Expr::Yield(_) => return true, + + Expr::Let(_) => return false, + + Expr::Assign(e) => { + if !classify::requires_semi_to_be_stmt(&e.left) { + return false; + } + expr = &e.left; + } + Expr::Await(e) => expr = &e.base, + Expr::Binary(e) => { + if !classify::requires_semi_to_be_stmt(&e.left) { + return false; + } + expr = &e.left; + } + Expr::Call(e) => { + if !classify::requires_semi_to_be_stmt(&e.func) { + return false; + } + expr = &e.func; + } + Expr::Cast(e) => { + if !classify::requires_semi_to_be_stmt(&e.expr) { + return false; + } + expr = &e.expr; + } + Expr::Field(e) => expr = &e.base, + Expr::Group(e) => expr = &e.expr, + Expr::Index(e) => { + if !classify::requires_semi_to_be_stmt(&e.expr) { + return false; + } + expr = &e.expr; + } + Expr::MethodCall(e) => expr = &e.receiver, + Expr::Range(e) => match &e.start { + None => return true, + Some(start) => { + if !classify::requires_semi_to_be_stmt(start) { + return false; + } + expr = start; + } + }, + Expr::Try(e) => expr = &e.expr, + + _ => return false, + } + } +} diff --git a/vendor/prettyplease/src/file.rs b/vendor/prettyplease/src/file.rs new file mode 100644 index 00000000000000..e23bd120feba58 --- /dev/null +++ b/vendor/prettyplease/src/file.rs @@ -0,0 +1,17 @@ +use crate::algorithm::Printer; +use syn::File; + +impl Printer { + pub fn file(&mut self, file: &File) { + self.cbox(0); + if let Some(shebang) = &file.shebang { + self.word(shebang.clone()); + self.hardbreak(); + } + self.inner_attrs(&file.attrs); + for item in &file.items { + self.item(item); + } + self.end(); + } +} diff --git a/vendor/prettyplease/src/fixup.rs b/vendor/prettyplease/src/fixup.rs new file mode 100644 index 00000000000000..2355d4905f9567 --- /dev/null +++ b/vendor/prettyplease/src/fixup.rs @@ -0,0 +1,676 @@ +use crate::classify; +use crate::precedence::Precedence; +use syn::{ + Expr, ExprBreak, ExprRange, ExprRawAddr, ExprReference, ExprReturn, ExprUnary, ExprYield, +}; + +#[derive(Copy, Clone)] +pub struct FixupContext { + previous_operator: Precedence, + next_operator: Precedence, + + // Print expression such that it can be parsed back as a statement + // consisting of the original expression. + // + // The effect of this is for binary operators in statement position to set + // `leftmost_subexpression_in_stmt` when printing their left-hand operand. + // + // (match x {}) - 1; // match needs parens when LHS of binary operator + // + // match x {}; // not when its own statement + // + stmt: bool, + + // This is the difference between: + // + // (match x {}) - 1; // subexpression needs parens + // + // let _ = match x {} - 1; // no parens + // + // There are 3 distinguishable contexts in which `print_expr` might be + // called with the expression `$match` as its argument, where `$match` + // represents an expression of kind `ExprKind::Match`: + // + // - stmt=false leftmost_subexpression_in_stmt=false + // + // Example: `let _ = $match - 1;` + // + // No parentheses required. + // + // - stmt=false leftmost_subexpression_in_stmt=true + // + // Example: `$match - 1;` + // + // Must parenthesize `($match)`, otherwise parsing back the output as a + // statement would terminate the statement after the closing brace of + // the match, parsing `-1;` as a separate statement. + // + // - stmt=true leftmost_subexpression_in_stmt=false + // + // Example: `$match;` + // + // No parentheses required. + leftmost_subexpression_in_stmt: bool, + + // Print expression such that it can be parsed as a match arm. + // + // This is almost equivalent to `stmt`, but the grammar diverges a tiny bit + // between statements and match arms when it comes to braced macro calls. + // Macro calls with brace delimiter terminate a statement without a + // semicolon, but do not terminate a match-arm without comma. + // + // m! {} - 1; // two statements: a macro call followed by -1 literal + // + // match () { + // _ => m! {} - 1, // binary subtraction operator + // } + // + match_arm: bool, + + // This is almost equivalent to `leftmost_subexpression_in_stmt`, other than + // for braced macro calls. + // + // If we have `m! {} - 1` as an expression, the leftmost subexpression + // `m! {}` will need to be parenthesized in the statement case but not the + // match-arm case. + // + // (m! {}) - 1; // subexpression needs parens + // + // match () { + // _ => m! {} - 1, // no parens + // } + // + leftmost_subexpression_in_match_arm: bool, + + // This is the difference between: + // + // if let _ = (Struct {}) {} // needs parens + // + // match () { + // () if let _ = Struct {} => {} // no parens + // } + // + condition: bool, + + // This is the difference between: + // + // if break Struct {} == (break) {} // needs parens + // + // if break break == Struct {} {} // no parens + // + rightmost_subexpression_in_condition: bool, + + // This is the difference between: + // + // if break ({ x }).field + 1 {} needs parens + // + // if break 1 + { x }.field {} // no parens + // + leftmost_subexpression_in_optional_operand: bool, + + // This is the difference between: + // + // let _ = (return) - 1; // without paren, this would return -1 + // + // let _ = return + 1; // no paren because '+' cannot begin expr + // + next_operator_can_begin_expr: bool, + + // This is the difference between: + // + // let _ = 1 + return 1; // no parens if rightmost subexpression + // + // let _ = 1 + (return 1) + 1; // needs parens + // + next_operator_can_continue_expr: bool, + + // This is the difference between: + // + // let _ = x as u8 + T; + // + // let _ = (x as u8) < T; + // + // Without parens, the latter would want to parse `u8 Self { + FixupContext { + stmt: true, + ..FixupContext::NONE + } + } + + /// Create the initial fixup for printing an expression as the right-hand + /// side of a match arm. + pub fn new_match_arm() -> Self { + FixupContext { + match_arm: true, + ..FixupContext::NONE + } + } + + /// Create the initial fixup for printing an expression as the "condition" + /// of an `if` or `while`. There are a few other positions which are + /// grammatically equivalent and also use this, such as the iterator + /// expression in `for` and the scrutinee in `match`. + pub fn new_condition() -> Self { + FixupContext { + condition: true, + rightmost_subexpression_in_condition: true, + ..FixupContext::NONE + } + } + + /// Transform this fixup into the one that should apply when printing the + /// leftmost subexpression of the current expression. + /// + /// The leftmost subexpression is any subexpression that has the same first + /// token as the current expression, but has a different last token. + /// + /// For example in `$a + $b` and `$a.method()`, the subexpression `$a` is a + /// leftmost subexpression. + /// + /// Not every expression has a leftmost subexpression. For example neither + /// `-$a` nor `[$a]` have one. + pub fn leftmost_subexpression_with_operator( + self, + expr: &Expr, + next_operator_can_begin_expr: bool, + next_operator_can_begin_generics: bool, + precedence: Precedence, + ) -> (Precedence, Self) { + let fixup = FixupContext { + next_operator: precedence, + stmt: false, + leftmost_subexpression_in_stmt: self.stmt || self.leftmost_subexpression_in_stmt, + match_arm: false, + leftmost_subexpression_in_match_arm: self.match_arm + || self.leftmost_subexpression_in_match_arm, + rightmost_subexpression_in_condition: false, + next_operator_can_begin_expr, + next_operator_can_continue_expr: true, + next_operator_can_begin_generics, + ..self + }; + + (fixup.leftmost_subexpression_precedence(expr), fixup) + } + + /// Transform this fixup into the one that should apply when printing a + /// leftmost subexpression followed by a `.` or `?` token, which confer + /// different statement boundary rules compared to other leftmost + /// subexpressions. + pub fn leftmost_subexpression_with_dot(self, expr: &Expr) -> (Precedence, Self) { + let fixup = FixupContext { + next_operator: Precedence::Unambiguous, + stmt: self.stmt || self.leftmost_subexpression_in_stmt, + leftmost_subexpression_in_stmt: false, + match_arm: self.match_arm || self.leftmost_subexpression_in_match_arm, + leftmost_subexpression_in_match_arm: false, + rightmost_subexpression_in_condition: false, + next_operator_can_begin_expr: false, + next_operator_can_continue_expr: true, + next_operator_can_begin_generics: false, + ..self + }; + + (fixup.leftmost_subexpression_precedence(expr), fixup) + } + + fn leftmost_subexpression_precedence(self, expr: &Expr) -> Precedence { + if !self.next_operator_can_begin_expr || self.next_operator == Precedence::Range { + if let Scan::Bailout = scan_right(expr, self, Precedence::MIN, 0, 0) { + if scan_left(expr, self) { + return Precedence::Unambiguous; + } + } + } + + self.precedence(expr) + } + + /// Transform this fixup into the one that should apply when printing the + /// rightmost subexpression of the current expression. + /// + /// The rightmost subexpression is any subexpression that has a different + /// first token than the current expression, but has the same last token. + /// + /// For example in `$a + $b` and `-$b`, the subexpression `$b` is a + /// rightmost subexpression. + /// + /// Not every expression has a rightmost subexpression. For example neither + /// `[$b]` nor `$a.f($b)` have one. + pub fn rightmost_subexpression( + self, + expr: &Expr, + precedence: Precedence, + ) -> (Precedence, Self) { + let fixup = self.rightmost_subexpression_fixup(false, false, precedence); + (fixup.rightmost_subexpression_precedence(expr), fixup) + } + + pub fn rightmost_subexpression_fixup( + self, + reset_allow_struct: bool, + optional_operand: bool, + precedence: Precedence, + ) -> Self { + FixupContext { + previous_operator: precedence, + stmt: false, + leftmost_subexpression_in_stmt: false, + match_arm: false, + leftmost_subexpression_in_match_arm: false, + condition: self.condition && !reset_allow_struct, + leftmost_subexpression_in_optional_operand: self.condition && optional_operand, + ..self + } + } + + pub fn rightmost_subexpression_precedence(self, expr: &Expr) -> Precedence { + let default_prec = self.precedence(expr); + + if match self.previous_operator { + Precedence::Assign | Precedence::Let | Precedence::Prefix => { + default_prec < self.previous_operator + } + _ => default_prec <= self.previous_operator, + } && match self.next_operator { + Precedence::Range | Precedence::Or | Precedence::And => true, + _ => !self.next_operator_can_begin_expr, + } { + if let Scan::Bailout | Scan::Fail = scan_right(expr, self, self.previous_operator, 1, 0) + { + if scan_left(expr, self) { + return Precedence::Prefix; + } + } + } + + default_prec + } + + /// Determine whether parentheses are needed around the given expression to + /// head off the early termination of a statement or condition. + pub fn parenthesize(self, expr: &Expr) -> bool { + (self.leftmost_subexpression_in_stmt && !classify::requires_semi_to_be_stmt(expr)) + || ((self.stmt || self.leftmost_subexpression_in_stmt) && matches!(expr, Expr::Let(_))) + || (self.leftmost_subexpression_in_match_arm + && !classify::requires_comma_to_be_match_arm(expr)) + || (self.condition && matches!(expr, Expr::Struct(_))) + || (self.rightmost_subexpression_in_condition + && matches!( + expr, + Expr::Return(ExprReturn { expr: None, .. }) + | Expr::Yield(ExprYield { expr: None, .. }) + )) + || (self.rightmost_subexpression_in_condition + && !self.condition + && matches!( + expr, + Expr::Break(ExprBreak { expr: None, .. }) + | Expr::Path(_) + | Expr::Range(ExprRange { end: None, .. }) + )) + || (self.leftmost_subexpression_in_optional_operand + && matches!(expr, Expr::Block(expr) if expr.attrs.is_empty() && expr.label.is_none())) + } + + /// Determines the effective precedence of a subexpression. Some expressions + /// have higher or lower precedence when adjacent to particular operators. + fn precedence(self, expr: &Expr) -> Precedence { + if self.next_operator_can_begin_expr { + // Decrease precedence of value-less jumps when followed by an + // operator that would otherwise get interpreted as beginning a + // value for the jump. + if let Expr::Break(ExprBreak { expr: None, .. }) + | Expr::Return(ExprReturn { expr: None, .. }) + | Expr::Yield(ExprYield { expr: None, .. }) = expr + { + return Precedence::Jump; + } + } + + if !self.next_operator_can_continue_expr { + match expr { + // Increase precedence of expressions that extend to the end of + // current statement or group. + Expr::Break(_) + | Expr::Closure(_) + | Expr::Let(_) + | Expr::Return(_) + | Expr::Yield(_) => { + return Precedence::Prefix; + } + Expr::Range(e) if e.start.is_none() => return Precedence::Prefix, + _ => {} + } + } + + if self.next_operator_can_begin_generics { + if let Expr::Cast(cast) = expr { + if classify::trailing_unparameterized_path(&cast.ty) { + return Precedence::MIN; + } + } + } + + Precedence::of(expr) + } +} + +#[derive(Copy, Clone, PartialEq)] +enum Scan { + Fail, + Bailout, + Consume, +} + +fn scan_left(expr: &Expr, fixup: FixupContext) -> bool { + match expr { + Expr::Assign(_) => fixup.previous_operator <= Precedence::Assign, + Expr::Binary(e) => match Precedence::of_binop(&e.op) { + Precedence::Assign => fixup.previous_operator <= Precedence::Assign, + binop_prec => fixup.previous_operator < binop_prec, + }, + Expr::Cast(_) => fixup.previous_operator < Precedence::Cast, + Expr::Range(e) => e.start.is_none() || fixup.previous_operator < Precedence::Assign, + _ => true, + } +} + +fn scan_right( + expr: &Expr, + fixup: FixupContext, + precedence: Precedence, + fail_offset: u8, + bailout_offset: u8, +) -> Scan { + let consume_by_precedence = if match precedence { + Precedence::Assign | Precedence::Compare => precedence <= fixup.next_operator, + _ => precedence < fixup.next_operator, + } || fixup.next_operator == Precedence::MIN + { + Scan::Consume + } else { + Scan::Bailout + }; + if fixup.parenthesize(expr) { + return consume_by_precedence; + } + match expr { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Expr::Assign(e) if e.attrs.is_empty() => { + if match fixup.next_operator { + Precedence::Unambiguous => fail_offset >= 2, + _ => bailout_offset >= 1, + } { + return Scan::Consume; + } + let right_fixup = fixup.rightmost_subexpression_fixup(false, false, Precedence::Assign); + let scan = scan_right( + &e.right, + right_fixup, + Precedence::Assign, + match fixup.next_operator { + Precedence::Unambiguous => fail_offset, + _ => 1, + }, + 1, + ); + if let Scan::Bailout | Scan::Consume = scan { + Scan::Consume + } else if let Precedence::Unambiguous = fixup.next_operator { + Scan::Fail + } else { + Scan::Bailout + } + } + Expr::Binary(e) if e.attrs.is_empty() => { + if match fixup.next_operator { + Precedence::Unambiguous => { + fail_offset >= 2 + && (consume_by_precedence == Scan::Consume || bailout_offset >= 1) + } + _ => bailout_offset >= 1, + } { + return Scan::Consume; + } + let binop_prec = Precedence::of_binop(&e.op); + if binop_prec == Precedence::Compare && fixup.next_operator == Precedence::Compare { + return Scan::Consume; + } + let right_fixup = fixup.rightmost_subexpression_fixup(false, false, binop_prec); + let scan = scan_right( + &e.right, + right_fixup, + binop_prec, + match fixup.next_operator { + Precedence::Unambiguous => fail_offset, + _ => 1, + }, + consume_by_precedence as u8 - Scan::Bailout as u8, + ); + match scan { + Scan::Fail => {} + Scan::Bailout => return consume_by_precedence, + Scan::Consume => return Scan::Consume, + } + let right_needs_group = binop_prec != Precedence::Assign + && right_fixup.rightmost_subexpression_precedence(&e.right) <= binop_prec; + if right_needs_group { + consume_by_precedence + } else if let (Scan::Fail, Precedence::Unambiguous) = (scan, fixup.next_operator) { + Scan::Fail + } else { + Scan::Bailout + } + } + Expr::RawAddr(ExprRawAddr { expr, .. }) + | Expr::Reference(ExprReference { expr, .. }) + | Expr::Unary(ExprUnary { expr, .. }) => { + if match fixup.next_operator { + Precedence::Unambiguous => { + fail_offset >= 2 + && (consume_by_precedence == Scan::Consume || bailout_offset >= 1) + } + _ => bailout_offset >= 1, + } { + return Scan::Consume; + } + let right_fixup = fixup.rightmost_subexpression_fixup(false, false, Precedence::Prefix); + let scan = scan_right( + expr, + right_fixup, + precedence, + match fixup.next_operator { + Precedence::Unambiguous => fail_offset, + _ => 1, + }, + consume_by_precedence as u8 - Scan::Bailout as u8, + ); + match scan { + Scan::Fail => {} + Scan::Bailout => return consume_by_precedence, + Scan::Consume => return Scan::Consume, + } + if right_fixup.rightmost_subexpression_precedence(expr) < Precedence::Prefix { + consume_by_precedence + } else if let (Scan::Fail, Precedence::Unambiguous) = (scan, fixup.next_operator) { + Scan::Fail + } else { + Scan::Bailout + } + } + Expr::Range(e) if e.attrs.is_empty() => match &e.end { + Some(end) => { + if fail_offset >= 2 { + return Scan::Consume; + } + let right_fixup = + fixup.rightmost_subexpression_fixup(false, true, Precedence::Range); + let scan = scan_right( + end, + right_fixup, + Precedence::Range, + fail_offset, + match fixup.next_operator { + Precedence::Assign | Precedence::Range => 0, + _ => 1, + }, + ); + if match (scan, fixup.next_operator) { + (Scan::Fail, _) => false, + (Scan::Bailout, Precedence::Assign | Precedence::Range) => false, + (Scan::Bailout | Scan::Consume, _) => true, + } { + return Scan::Consume; + } + if right_fixup.rightmost_subexpression_precedence(end) <= Precedence::Range { + Scan::Consume + } else { + Scan::Fail + } + } + None => { + if fixup.next_operator_can_begin_expr { + Scan::Consume + } else { + Scan::Fail + } + } + }, + Expr::Break(e) => match &e.expr { + Some(value) => { + if bailout_offset >= 1 || e.label.is_none() && classify::expr_leading_label(value) { + return Scan::Consume; + } + let right_fixup = fixup.rightmost_subexpression_fixup(true, true, Precedence::Jump); + match scan_right(value, right_fixup, Precedence::Jump, 1, 1) { + Scan::Fail => Scan::Bailout, + Scan::Bailout | Scan::Consume => Scan::Consume, + } + } + None => match fixup.next_operator { + Precedence::Assign if precedence > Precedence::Assign => Scan::Fail, + _ => Scan::Consume, + }, + }, + Expr::Return(ExprReturn { expr, .. }) | Expr::Yield(ExprYield { expr, .. }) => match expr { + Some(e) => { + if bailout_offset >= 1 { + return Scan::Consume; + } + let right_fixup = + fixup.rightmost_subexpression_fixup(true, false, Precedence::Jump); + match scan_right(e, right_fixup, Precedence::Jump, 1, 1) { + Scan::Fail => Scan::Bailout, + Scan::Bailout | Scan::Consume => Scan::Consume, + } + } + None => match fixup.next_operator { + Precedence::Assign if precedence > Precedence::Assign => Scan::Fail, + _ => Scan::Consume, + }, + }, + Expr::Closure(_) => Scan::Consume, + Expr::Let(e) => { + if bailout_offset >= 1 { + return Scan::Consume; + } + let right_fixup = fixup.rightmost_subexpression_fixup(false, false, Precedence::Let); + let scan = scan_right( + &e.expr, + right_fixup, + Precedence::Let, + 1, + if fixup.next_operator < Precedence::Let { + 0 + } else { + 1 + }, + ); + match scan { + Scan::Fail | Scan::Bailout if fixup.next_operator < Precedence::Let => { + return Scan::Bailout; + } + Scan::Consume => return Scan::Consume, + _ => {} + } + if right_fixup.rightmost_subexpression_precedence(&e.expr) < Precedence::Let { + Scan::Consume + } else if let Scan::Fail = scan { + Scan::Bailout + } else { + Scan::Consume + } + } + Expr::Group(e) => scan_right(&e.expr, fixup, precedence, fail_offset, bailout_offset), + Expr::Array(_) + | Expr::Assign(_) + | Expr::Async(_) + | Expr::Await(_) + | Expr::Binary(_) + | Expr::Block(_) + | Expr::Call(_) + | Expr::Cast(_) + | Expr::Const(_) + | Expr::Continue(_) + | Expr::Field(_) + | Expr::ForLoop(_) + | Expr::If(_) + | Expr::Index(_) + | Expr::Infer(_) + | Expr::Lit(_) + | Expr::Loop(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::MethodCall(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Range(_) + | Expr::Repeat(_) + | Expr::Struct(_) + | Expr::Try(_) + | Expr::TryBlock(_) + | Expr::Tuple(_) + | Expr::Unsafe(_) + | Expr::Verbatim(_) + | Expr::While(_) => match fixup.next_operator { + Precedence::Assign | Precedence::Range if precedence == Precedence::Range => Scan::Fail, + _ if precedence == Precedence::Let && fixup.next_operator < Precedence::Let => { + Scan::Fail + } + _ => consume_by_precedence, + }, + + _ => match fixup.next_operator { + Precedence::Assign | Precedence::Range if precedence == Precedence::Range => Scan::Fail, + _ if precedence == Precedence::Let && fixup.next_operator < Precedence::Let => { + Scan::Fail + } + _ => consume_by_precedence, + }, + } +} diff --git a/vendor/prettyplease/src/generics.rs b/vendor/prettyplease/src/generics.rs new file mode 100644 index 00000000000000..6c9688b147064d --- /dev/null +++ b/vendor/prettyplease/src/generics.rs @@ -0,0 +1,426 @@ +use crate::algorithm::Printer; +use crate::iter::IterDelimited; +use crate::path::PathKind; +use crate::INDENT; +use proc_macro2::TokenStream; +use std::ptr; +use syn::{ + BoundLifetimes, CapturedParam, ConstParam, Expr, GenericParam, Generics, LifetimeParam, + PreciseCapture, PredicateLifetime, PredicateType, TraitBound, TraitBoundModifier, TypeParam, + TypeParamBound, WhereClause, WherePredicate, +}; + +impl Printer { + pub fn generics(&mut self, generics: &Generics) { + if generics.params.is_empty() { + return; + } + + self.word("<"); + self.cbox(0); + self.zerobreak(); + + // Print lifetimes before types and consts, regardless of their + // order in self.params. + #[derive(Ord, PartialOrd, Eq, PartialEq)] + enum Group { + First, + Second, + } + fn group(param: &GenericParam) -> Group { + match param { + GenericParam::Lifetime(_) => Group::First, + GenericParam::Type(_) | GenericParam::Const(_) => Group::Second, + } + } + let last = generics.params.iter().max_by_key(|param| group(param)); + for current_group in [Group::First, Group::Second] { + for param in &generics.params { + if group(param) == current_group { + self.generic_param(param); + self.trailing_comma(ptr::eq(param, last.unwrap())); + } + } + } + + self.offset(-INDENT); + self.end(); + self.word(">"); + } + + fn generic_param(&mut self, generic_param: &GenericParam) { + match generic_param { + GenericParam::Type(type_param) => self.type_param(type_param), + GenericParam::Lifetime(lifetime_param) => self.lifetime_param(lifetime_param), + GenericParam::Const(const_param) => self.const_param(const_param), + } + } + + pub fn bound_lifetimes(&mut self, bound_lifetimes: &BoundLifetimes) { + self.word("for<"); + for param in bound_lifetimes.lifetimes.iter().delimited() { + self.generic_param(¶m); + if !param.is_last { + self.word(", "); + } + } + self.word("> "); + } + + fn lifetime_param(&mut self, lifetime_param: &LifetimeParam) { + self.outer_attrs(&lifetime_param.attrs); + self.lifetime(&lifetime_param.lifetime); + for lifetime in lifetime_param.bounds.iter().delimited() { + if lifetime.is_first { + self.word(": "); + } else { + self.word(" + "); + } + self.lifetime(&lifetime); + } + } + + fn type_param(&mut self, type_param: &TypeParam) { + self.outer_attrs(&type_param.attrs); + self.ident(&type_param.ident); + self.ibox(INDENT); + for type_param_bound in type_param.bounds.iter().delimited() { + if type_param_bound.is_first { + self.word(": "); + } else { + self.space(); + self.word("+ "); + } + self.type_param_bound(&type_param_bound); + } + if let Some(default) = &type_param.default { + self.space(); + self.word("= "); + self.ty(default); + } + self.end(); + } + + pub fn type_param_bound(&mut self, type_param_bound: &TypeParamBound) { + match type_param_bound { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + TypeParamBound::Trait(trait_bound) => { + self.trait_bound(trait_bound, TraitBoundConst::None); + } + TypeParamBound::Lifetime(lifetime) => self.lifetime(lifetime), + TypeParamBound::PreciseCapture(precise_capture) => { + self.precise_capture(precise_capture); + } + TypeParamBound::Verbatim(bound) => self.type_param_bound_verbatim(bound), + _ => unimplemented!("unknown TypeParamBound"), + } + } + + fn trait_bound(&mut self, trait_bound: &TraitBound, constness: TraitBoundConst) { + if trait_bound.paren_token.is_some() { + self.word("("); + } + if let Some(bound_lifetimes) = &trait_bound.lifetimes { + self.bound_lifetimes(bound_lifetimes); + } + match constness { + TraitBoundConst::None => {} + #[cfg(feature = "verbatim")] + TraitBoundConst::Conditional => self.word("[const] "), + #[cfg(feature = "verbatim")] + TraitBoundConst::Unconditional => self.word("const "), + } + self.trait_bound_modifier(&trait_bound.modifier); + for segment in trait_bound.path.segments.iter().delimited() { + if !segment.is_first || trait_bound.path.leading_colon.is_some() { + self.word("::"); + } + self.path_segment(&segment, PathKind::Type); + } + if trait_bound.paren_token.is_some() { + self.word(")"); + } + } + + fn trait_bound_modifier(&mut self, trait_bound_modifier: &TraitBoundModifier) { + match trait_bound_modifier { + TraitBoundModifier::None => {} + TraitBoundModifier::Maybe(_question_mark) => self.word("?"), + } + } + + #[cfg(not(feature = "verbatim"))] + fn type_param_bound_verbatim(&mut self, bound: &TokenStream) { + unimplemented!("TypeParamBound::Verbatim `{}`", bound); + } + + #[cfg(feature = "verbatim")] + fn type_param_bound_verbatim(&mut self, tokens: &TokenStream) { + use syn::parse::{Parse, ParseStream, Result}; + use syn::{ + bracketed, parenthesized, token, ParenthesizedGenericArguments, Path, PathArguments, + Token, + }; + + enum TypeParamBoundVerbatim { + Ellipsis, + Const(TraitBound, TraitBoundConst), + } + + impl Parse for TypeParamBoundVerbatim { + fn parse(input: ParseStream) -> Result { + if input.peek(Token![...]) { + input.parse::()?; + return Ok(TypeParamBoundVerbatim::Ellipsis); + } + + let content; + let content = if input.peek(token::Paren) { + parenthesized!(content in input); + &content + } else { + input + }; + + let lifetimes: Option = content.parse()?; + + let constness = if content.peek(token::Bracket) { + let conditionally_const; + bracketed!(conditionally_const in content); + conditionally_const.parse::()?; + TraitBoundConst::Conditional + } else if content.peek(Token![const]) { + content.parse::()?; + TraitBoundConst::Unconditional + } else { + TraitBoundConst::None + }; + + let modifier: TraitBoundModifier = content.parse()?; + + let mut path: Path = content.parse()?; + if path.segments.last().unwrap().arguments.is_empty() + && (content.peek(token::Paren) + || content.peek(Token![::]) && content.peek3(token::Paren)) + { + content.parse::>()?; + let args: ParenthesizedGenericArguments = content.parse()?; + let parenthesized = PathArguments::Parenthesized(args); + path.segments.last_mut().unwrap().arguments = parenthesized; + } + + Ok(TypeParamBoundVerbatim::Const( + TraitBound { + paren_token: None, + modifier, + lifetimes, + path, + }, + constness, + )) + } + } + + let bound: TypeParamBoundVerbatim = match syn::parse2(tokens.clone()) { + Ok(bound) => bound, + Err(_) => unimplemented!("TypeParamBound::Verbatim `{}`", tokens), + }; + + match bound { + TypeParamBoundVerbatim::Ellipsis => { + self.word("..."); + } + TypeParamBoundVerbatim::Const(trait_bound, constness) => { + self.trait_bound(&trait_bound, constness); + } + } + } + + fn const_param(&mut self, const_param: &ConstParam) { + self.outer_attrs(&const_param.attrs); + self.word("const "); + self.ident(&const_param.ident); + self.word(": "); + self.ty(&const_param.ty); + if let Some(default) = &const_param.default { + self.word(" = "); + self.const_argument(default); + } + } + + pub fn where_clause_for_body(&mut self, where_clause: &Option) { + let hardbreaks = true; + let semi = false; + self.where_clause_impl(where_clause, hardbreaks, semi); + } + + pub fn where_clause_semi(&mut self, where_clause: &Option) { + let hardbreaks = true; + let semi = true; + self.where_clause_impl(where_clause, hardbreaks, semi); + } + + pub fn where_clause_oneline(&mut self, where_clause: &Option) { + let hardbreaks = false; + let semi = false; + self.where_clause_impl(where_clause, hardbreaks, semi); + } + + pub fn where_clause_oneline_semi(&mut self, where_clause: &Option) { + let hardbreaks = false; + let semi = true; + self.where_clause_impl(where_clause, hardbreaks, semi); + } + + fn where_clause_impl( + &mut self, + where_clause: &Option, + hardbreaks: bool, + semi: bool, + ) { + let where_clause = match where_clause { + Some(where_clause) if !where_clause.predicates.is_empty() => where_clause, + _ => { + if semi { + self.word(";"); + } else { + self.nbsp(); + } + return; + } + }; + if hardbreaks { + self.hardbreak(); + self.offset(-INDENT); + self.word("where"); + self.hardbreak(); + for predicate in where_clause.predicates.iter().delimited() { + self.where_predicate(&predicate); + if predicate.is_last && semi { + self.word(";"); + } else { + self.word(","); + self.hardbreak(); + } + } + if !semi { + self.offset(-INDENT); + } + } else { + self.space(); + self.offset(-INDENT); + self.word("where"); + self.space(); + for predicate in where_clause.predicates.iter().delimited() { + self.where_predicate(&predicate); + if predicate.is_last && semi { + self.word(";"); + } else { + self.trailing_comma_or_space(predicate.is_last); + } + } + if !semi { + self.offset(-INDENT); + } + } + } + + fn where_predicate(&mut self, predicate: &WherePredicate) { + match predicate { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + WherePredicate::Type(predicate) => self.predicate_type(predicate), + WherePredicate::Lifetime(predicate) => self.predicate_lifetime(predicate), + _ => unimplemented!("unknown WherePredicate"), + } + } + + fn predicate_type(&mut self, predicate: &PredicateType) { + if let Some(bound_lifetimes) = &predicate.lifetimes { + self.bound_lifetimes(bound_lifetimes); + } + self.ty(&predicate.bounded_ty); + self.word(":"); + if predicate.bounds.len() == 1 { + self.ibox(0); + } else { + self.ibox(INDENT); + } + for type_param_bound in predicate.bounds.iter().delimited() { + if type_param_bound.is_first { + self.nbsp(); + } else { + self.space(); + self.word("+ "); + } + self.type_param_bound(&type_param_bound); + } + self.end(); + } + + fn predicate_lifetime(&mut self, predicate: &PredicateLifetime) { + self.lifetime(&predicate.lifetime); + self.word(":"); + self.ibox(INDENT); + for lifetime in predicate.bounds.iter().delimited() { + if lifetime.is_first { + self.nbsp(); + } else { + self.space(); + self.word("+ "); + } + self.lifetime(&lifetime); + } + self.end(); + } + + fn precise_capture(&mut self, precise_capture: &PreciseCapture) { + self.word("use<"); + for capture in precise_capture.params.iter().delimited() { + self.captured_param(&capture); + if !capture.is_last { + self.word(", "); + } + } + self.word(">"); + } + + fn captured_param(&mut self, capture: &CapturedParam) { + match capture { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + CapturedParam::Lifetime(lifetime) => self.lifetime(lifetime), + CapturedParam::Ident(ident) => self.ident(ident), + _ => unimplemented!("unknown CapturedParam"), + } + } + + pub fn const_argument(&mut self, expr: &Expr) { + match expr { + #![cfg_attr(all(test, exhaustive), allow(non_exhaustive_omitted_patterns))] + Expr::Lit(expr) => self.expr_lit(expr), + + Expr::Path(expr) + if expr.attrs.is_empty() + && expr.qself.is_none() + && expr.path.get_ident().is_some() => + { + self.expr_path(expr); + } + + Expr::Block(expr) => self.expr_block(expr), + + _ => { + self.cbox(INDENT); + self.expr_as_small_block(expr, 0); + self.end(); + } + } + } +} + +enum TraitBoundConst { + None, + #[cfg(feature = "verbatim")] + Conditional, + #[cfg(feature = "verbatim")] + Unconditional, +} diff --git a/vendor/prettyplease/src/item.rs b/vendor/prettyplease/src/item.rs new file mode 100644 index 00000000000000..40623479ab23dd --- /dev/null +++ b/vendor/prettyplease/src/item.rs @@ -0,0 +1,1813 @@ +use crate::algorithm::Printer; +use crate::fixup::FixupContext; +use crate::iter::IterDelimited; +use crate::mac; +use crate::path::PathKind; +use crate::INDENT; +use proc_macro2::TokenStream; +use syn::{ + Fields, FnArg, ForeignItem, ForeignItemFn, ForeignItemMacro, ForeignItemStatic, + ForeignItemType, ImplItem, ImplItemConst, ImplItemFn, ImplItemMacro, ImplItemType, Item, + ItemConst, ItemEnum, ItemExternCrate, ItemFn, ItemForeignMod, ItemImpl, ItemMacro, ItemMod, + ItemStatic, ItemStruct, ItemTrait, ItemTraitAlias, ItemType, ItemUnion, ItemUse, Receiver, + Signature, StaticMutability, TraitItem, TraitItemConst, TraitItemFn, TraitItemMacro, + TraitItemType, Type, UseGlob, UseGroup, UseName, UsePath, UseRename, UseTree, Variadic, +}; + +impl Printer { + pub fn item(&mut self, item: &Item) { + match item { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Item::Const(item) => self.item_const(item), + Item::Enum(item) => self.item_enum(item), + Item::ExternCrate(item) => self.item_extern_crate(item), + Item::Fn(item) => self.item_fn(item), + Item::ForeignMod(item) => self.item_foreign_mod(item), + Item::Impl(item) => self.item_impl(item), + Item::Macro(item) => self.item_macro(item), + Item::Mod(item) => self.item_mod(item), + Item::Static(item) => self.item_static(item), + Item::Struct(item) => self.item_struct(item), + Item::Trait(item) => self.item_trait(item), + Item::TraitAlias(item) => self.item_trait_alias(item), + Item::Type(item) => self.item_type(item), + Item::Union(item) => self.item_union(item), + Item::Use(item) => self.item_use(item), + Item::Verbatim(item) => self.item_verbatim(item), + _ => unimplemented!("unknown Item"), + } + } + + fn item_const(&mut self, item: &ItemConst) { + self.outer_attrs(&item.attrs); + self.cbox(0); + self.visibility(&item.vis); + self.word("const "); + self.ident(&item.ident); + self.generics(&item.generics); + self.word(": "); + self.ty(&item.ty); + self.word(" = "); + self.neverbreak(); + self.expr(&item.expr, FixupContext::NONE); + self.word(";"); + self.end(); + self.hardbreak(); + } + + fn item_enum(&mut self, item: &ItemEnum) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + self.word("enum "); + self.ident(&item.ident); + self.generics(&item.generics); + self.where_clause_for_body(&item.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + for variant in &item.variants { + self.variant(variant); + self.word(","); + self.hardbreak(); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + + fn item_extern_crate(&mut self, item: &ItemExternCrate) { + self.outer_attrs(&item.attrs); + self.visibility(&item.vis); + self.word("extern crate "); + self.ident(&item.ident); + if let Some((_as_token, rename)) = &item.rename { + self.word(" as "); + self.ident(rename); + } + self.word(";"); + self.hardbreak(); + } + + fn item_fn(&mut self, item: &ItemFn) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + self.signature( + &item.sig, + #[cfg(feature = "verbatim")] + &verbatim::Safety::Disallowed, + ); + self.where_clause_for_body(&item.sig.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&item.attrs); + for stmt in item.block.stmts.iter().delimited() { + self.stmt(&stmt, stmt.is_last); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + + fn item_foreign_mod(&mut self, item: &ItemForeignMod) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + if item.unsafety.is_some() { + self.word("unsafe "); + } + self.abi(&item.abi); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&item.attrs); + for foreign_item in &item.items { + self.foreign_item(foreign_item); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + + fn item_impl(&mut self, item: &ItemImpl) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.ibox(-INDENT); + self.cbox(INDENT); + if item.defaultness.is_some() { + self.word("default "); + } + if item.unsafety.is_some() { + self.word("unsafe "); + } + self.word("impl"); + self.generics(&item.generics); + self.end(); + self.nbsp(); + if let Some((negative_polarity, path, _for_token)) = &item.trait_ { + if negative_polarity.is_some() { + self.word("!"); + } + self.path(path, PathKind::Type); + self.space(); + self.word("for "); + } + self.ty(&item.self_ty); + self.end(); + self.where_clause_for_body(&item.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&item.attrs); + for impl_item in &item.items { + self.impl_item(impl_item); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + + fn item_macro(&mut self, item: &ItemMacro) { + self.outer_attrs(&item.attrs); + let semicolon = mac::requires_semi(&item.mac.delimiter); + self.mac(&item.mac, item.ident.as_ref(), semicolon); + self.hardbreak(); + } + + fn item_mod(&mut self, item: &ItemMod) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + if item.unsafety.is_some() { + self.word("unsafe "); + } + self.word("mod "); + self.ident(&item.ident); + if let Some((_brace, items)) = &item.content { + self.word(" {"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&item.attrs); + for item in items { + self.item(item); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } else { + self.word(";"); + self.end(); + } + self.hardbreak(); + } + + fn item_static(&mut self, item: &ItemStatic) { + self.outer_attrs(&item.attrs); + self.cbox(0); + self.visibility(&item.vis); + self.word("static "); + self.static_mutability(&item.mutability); + self.ident(&item.ident); + self.word(": "); + self.ty(&item.ty); + self.word(" = "); + self.neverbreak(); + self.expr(&item.expr, FixupContext::NONE); + self.word(";"); + self.end(); + self.hardbreak(); + } + + fn item_struct(&mut self, item: &ItemStruct) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + self.word("struct "); + self.ident(&item.ident); + self.generics(&item.generics); + match &item.fields { + Fields::Named(fields) => { + self.where_clause_for_body(&item.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + for field in &fields.named { + self.field(field); + self.word(","); + self.hardbreak(); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } + Fields::Unnamed(fields) => { + self.fields_unnamed(fields); + self.where_clause_semi(&item.generics.where_clause); + self.end(); + } + Fields::Unit => { + self.where_clause_semi(&item.generics.where_clause); + self.end(); + } + } + self.hardbreak(); + } + + fn item_trait(&mut self, item: &ItemTrait) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + if item.unsafety.is_some() { + self.word("unsafe "); + } + if item.auto_token.is_some() { + self.word("auto "); + } + self.word("trait "); + self.ident(&item.ident); + self.generics(&item.generics); + for supertrait in item.supertraits.iter().delimited() { + if supertrait.is_first { + self.word(": "); + } else { + self.word(" + "); + } + self.type_param_bound(&supertrait); + } + self.where_clause_for_body(&item.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&item.attrs); + for trait_item in &item.items { + self.trait_item(trait_item); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + + fn item_trait_alias(&mut self, item: &ItemTraitAlias) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + self.word("trait "); + self.ident(&item.ident); + self.generics(&item.generics); + self.word(" = "); + self.neverbreak(); + for bound in item.bounds.iter().delimited() { + if !bound.is_first { + self.space(); + self.word("+ "); + } + self.type_param_bound(&bound); + } + self.where_clause_semi(&item.generics.where_clause); + self.end(); + self.hardbreak(); + } + + fn item_type(&mut self, item: &ItemType) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + self.word("type "); + self.ident(&item.ident); + self.generics(&item.generics); + self.where_clause_oneline(&item.generics.where_clause); + self.word("= "); + self.neverbreak(); + self.ibox(-INDENT); + self.ty(&item.ty); + self.end(); + self.word(";"); + self.end(); + self.hardbreak(); + } + + fn item_union(&mut self, item: &ItemUnion) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + self.word("union "); + self.ident(&item.ident); + self.generics(&item.generics); + self.where_clause_for_body(&item.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + for field in &item.fields.named { + self.field(field); + self.word(","); + self.hardbreak(); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + + fn item_use(&mut self, item: &ItemUse) { + self.outer_attrs(&item.attrs); + self.visibility(&item.vis); + self.word("use "); + if item.leading_colon.is_some() { + self.word("::"); + } + self.use_tree(&item.tree); + self.word(";"); + self.hardbreak(); + } + + #[cfg(not(feature = "verbatim"))] + fn item_verbatim(&mut self, item: &TokenStream) { + if !item.is_empty() { + unimplemented!("Item::Verbatim `{}`", item); + } + self.hardbreak(); + } + + #[cfg(feature = "verbatim")] + fn item_verbatim(&mut self, tokens: &TokenStream) { + use syn::parse::{Parse, ParseStream, Result}; + use syn::punctuated::Punctuated; + use syn::{ + braced, parenthesized, token, Attribute, Generics, Ident, Lifetime, Token, Visibility, + }; + use verbatim::{ + FlexibleItemConst, FlexibleItemFn, FlexibleItemStatic, FlexibleItemType, + WhereClauseLocation, + }; + + enum ItemVerbatim { + Empty, + Ellipsis, + ConstFlexible(FlexibleItemConst), + FnFlexible(FlexibleItemFn), + ImplFlexible(ImplFlexible), + Macro2(Macro2), + StaticFlexible(FlexibleItemStatic), + TypeFlexible(FlexibleItemType), + UseBrace(UseBrace), + } + + struct ImplFlexible { + attrs: Vec, + vis: Visibility, + defaultness: bool, + unsafety: bool, + generics: Generics, + constness: ImplConstness, + negative_impl: bool, + trait_: Option, + self_ty: Type, + items: Vec, + } + + enum ImplConstness { + None, + MaybeConst, + Const, + } + + struct Macro2 { + attrs: Vec, + vis: Visibility, + ident: Ident, + args: Option, + body: TokenStream, + } + + struct UseBrace { + attrs: Vec, + vis: Visibility, + trees: Punctuated, + } + + struct RootUseTree { + leading_colon: Option, + inner: UseTree, + } + + impl Parse for ImplConstness { + fn parse(input: ParseStream) -> Result { + if input.parse::>()?.is_some() { + input.parse::()?; + Ok(ImplConstness::MaybeConst) + } else if input.parse::>()?.is_some() { + Ok(ImplConstness::Const) + } else { + Ok(ImplConstness::None) + } + } + } + + impl Parse for RootUseTree { + fn parse(input: ParseStream) -> Result { + Ok(RootUseTree { + leading_colon: input.parse()?, + inner: input.parse()?, + }) + } + } + + impl Parse for ItemVerbatim { + fn parse(input: ParseStream) -> Result { + if input.is_empty() { + return Ok(ItemVerbatim::Empty); + } else if input.peek(Token![...]) { + input.parse::()?; + return Ok(ItemVerbatim::Ellipsis); + } + + let mut attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + + let lookahead = input.lookahead1(); + if lookahead.peek(Token![const]) && (input.peek2(Ident) || input.peek2(Token![_])) { + let defaultness = false; + let flexible_item = FlexibleItemConst::parse(attrs, vis, defaultness, input)?; + Ok(ItemVerbatim::ConstFlexible(flexible_item)) + } else if input.peek(Token![const]) + || lookahead.peek(Token![async]) + || lookahead.peek(Token![unsafe]) && !input.peek2(Token![impl]) + || lookahead.peek(Token![extern]) + || lookahead.peek(Token![fn]) + { + let defaultness = false; + let flexible_item = FlexibleItemFn::parse(attrs, vis, defaultness, input)?; + Ok(ItemVerbatim::FnFlexible(flexible_item)) + } else if lookahead.peek(Token![default]) + || input.peek(Token![unsafe]) + || lookahead.peek(Token![impl]) + { + let defaultness = input.parse::>()?.is_some(); + let unsafety = input.parse::>()?.is_some(); + input.parse::()?; + let has_generics = input.peek(Token![<]) + && (input.peek2(Token![>]) + || input.peek2(Token![#]) + || (input.peek2(Ident) || input.peek2(Lifetime)) + && (input.peek3(Token![:]) + || input.peek3(Token![,]) + || input.peek3(Token![>]) + || input.peek3(Token![=])) + || input.peek2(Token![const])); + let mut generics: Generics = if has_generics { + input.parse()? + } else { + Generics::default() + }; + let constness: ImplConstness = input.parse()?; + let negative_impl = + !input.peek2(token::Brace) && input.parse::>()?.is_some(); + let first_ty: Type = input.parse()?; + let (trait_, self_ty) = if input.parse::>()?.is_some() { + (Some(first_ty), input.parse()?) + } else { + (None, first_ty) + }; + generics.where_clause = input.parse()?; + let content; + braced!(content in input); + let inner_attrs = content.call(Attribute::parse_inner)?; + attrs.extend(inner_attrs); + let mut items = Vec::new(); + while !content.is_empty() { + items.push(content.parse()?); + } + Ok(ItemVerbatim::ImplFlexible(ImplFlexible { + attrs, + vis, + defaultness, + unsafety, + generics, + constness, + negative_impl, + trait_, + self_ty, + items, + })) + } else if lookahead.peek(Token![macro]) { + input.parse::()?; + let ident: Ident = input.parse()?; + let args = if input.peek(token::Paren) { + let paren_content; + parenthesized!(paren_content in input); + Some(paren_content.parse::()?) + } else { + None + }; + let brace_content; + braced!(brace_content in input); + let body: TokenStream = brace_content.parse()?; + Ok(ItemVerbatim::Macro2(Macro2 { + attrs, + vis, + ident, + args, + body, + })) + } else if lookahead.peek(Token![static]) { + let flexible_item = FlexibleItemStatic::parse(attrs, vis, input)?; + Ok(ItemVerbatim::StaticFlexible(flexible_item)) + } else if lookahead.peek(Token![type]) { + let defaultness = false; + let flexible_item = FlexibleItemType::parse( + attrs, + vis, + defaultness, + input, + WhereClauseLocation::BeforeEq, + )?; + Ok(ItemVerbatim::TypeFlexible(flexible_item)) + } else if lookahead.peek(Token![use]) { + input.parse::()?; + let content; + braced!(content in input); + let trees = content.parse_terminated(RootUseTree::parse, Token![,])?; + input.parse::()?; + Ok(ItemVerbatim::UseBrace(UseBrace { attrs, vis, trees })) + } else { + Err(lookahead.error()) + } + } + } + + let item: ItemVerbatim = match syn::parse2(tokens.clone()) { + Ok(item) => item, + Err(_) => unimplemented!("Item::Verbatim `{}`", tokens), + }; + + match item { + ItemVerbatim::Empty => { + self.hardbreak(); + } + ItemVerbatim::Ellipsis => { + self.word("..."); + self.hardbreak(); + } + ItemVerbatim::ConstFlexible(item) => { + self.flexible_item_const(&item); + } + ItemVerbatim::FnFlexible(item) => { + self.flexible_item_fn(&item); + } + ItemVerbatim::ImplFlexible(item) => { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.ibox(-INDENT); + self.cbox(INDENT); + self.visibility(&item.vis); + if item.defaultness { + self.word("default "); + } + if item.unsafety { + self.word("unsafe "); + } + self.word("impl"); + self.generics(&item.generics); + self.end(); + self.nbsp(); + match item.constness { + ImplConstness::None => {} + ImplConstness::MaybeConst => self.word("?const "), + ImplConstness::Const => self.word("const "), + } + if item.negative_impl { + self.word("!"); + } + if let Some(trait_) = &item.trait_ { + self.ty(trait_); + self.space(); + self.word("for "); + } + self.ty(&item.self_ty); + self.end(); + self.where_clause_for_body(&item.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&item.attrs); + for impl_item in &item.items { + self.impl_item(impl_item); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + ItemVerbatim::Macro2(item) => { + self.outer_attrs(&item.attrs); + self.visibility(&item.vis); + self.word("macro "); + self.ident(&item.ident); + if let Some(args) = &item.args { + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + self.ibox(0); + self.macro_rules_tokens(args.clone(), true); + self.end(); + self.zerobreak(); + self.offset(-INDENT); + self.end(); + self.word(")"); + } + self.word(" {"); + if !item.body.is_empty() { + self.neverbreak(); + self.cbox(INDENT); + self.hardbreak(); + self.ibox(0); + self.macro_rules_tokens(item.body.clone(), false); + self.end(); + self.hardbreak(); + self.offset(-INDENT); + self.end(); + } + self.word("}"); + self.hardbreak(); + } + ItemVerbatim::StaticFlexible(item) => { + self.flexible_item_static(&item); + } + ItemVerbatim::TypeFlexible(item) => { + self.flexible_item_type(&item); + } + ItemVerbatim::UseBrace(item) => { + self.outer_attrs(&item.attrs); + self.visibility(&item.vis); + self.word("use "); + if item.trees.len() == 1 { + self.word("::"); + self.use_tree(&item.trees[0].inner); + } else { + self.cbox(INDENT); + self.word("{"); + self.zerobreak(); + self.ibox(0); + for use_tree in item.trees.iter().delimited() { + if use_tree.leading_colon.is_some() { + self.word("::"); + } + self.use_tree(&use_tree.inner); + if !use_tree.is_last { + self.word(","); + let mut use_tree = &use_tree.inner; + while let UseTree::Path(use_path) = use_tree { + use_tree = &use_path.tree; + } + if let UseTree::Group(_) = use_tree { + self.hardbreak(); + } else { + self.space(); + } + } + } + self.end(); + self.trailing_comma(true); + self.offset(-INDENT); + self.word("}"); + self.end(); + } + self.word(";"); + self.hardbreak(); + } + } + } + + fn use_tree(&mut self, use_tree: &UseTree) { + match use_tree { + UseTree::Path(use_path) => self.use_path(use_path), + UseTree::Name(use_name) => self.use_name(use_name), + UseTree::Rename(use_rename) => self.use_rename(use_rename), + UseTree::Glob(use_glob) => self.use_glob(use_glob), + UseTree::Group(use_group) => self.use_group(use_group), + } + } + + fn use_path(&mut self, use_path: &UsePath) { + self.ident(&use_path.ident); + self.word("::"); + self.use_tree(&use_path.tree); + } + + fn use_name(&mut self, use_name: &UseName) { + self.ident(&use_name.ident); + } + + fn use_rename(&mut self, use_rename: &UseRename) { + self.ident(&use_rename.ident); + self.word(" as "); + self.ident(&use_rename.rename); + } + + fn use_glob(&mut self, use_glob: &UseGlob) { + let _ = use_glob; + self.word("*"); + } + + fn use_group(&mut self, use_group: &UseGroup) { + if use_group.items.is_empty() { + self.word("{}"); + } else if use_group.items.len() == 1 + && match &use_group.items[0] { + UseTree::Name(use_name) => use_name.ident != "self", + UseTree::Rename(use_rename) => use_rename.ident != "self", + _ => true, + } + { + self.use_tree(&use_group.items[0]); + } else { + self.cbox(INDENT); + self.word("{"); + self.zerobreak(); + self.ibox(0); + for use_tree in use_group.items.iter().delimited() { + self.use_tree(&use_tree); + if !use_tree.is_last { + self.word(","); + let mut use_tree = *use_tree; + while let UseTree::Path(use_path) = use_tree { + use_tree = &use_path.tree; + } + if let UseTree::Group(_) = use_tree { + self.hardbreak(); + } else { + self.space(); + } + } + } + self.end(); + self.trailing_comma(true); + self.offset(-INDENT); + self.word("}"); + self.end(); + } + } + + fn foreign_item(&mut self, foreign_item: &ForeignItem) { + match foreign_item { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + ForeignItem::Fn(item) => self.foreign_item_fn(item), + ForeignItem::Static(item) => self.foreign_item_static(item), + ForeignItem::Type(item) => self.foreign_item_type(item), + ForeignItem::Macro(item) => self.foreign_item_macro(item), + ForeignItem::Verbatim(item) => self.foreign_item_verbatim(item), + _ => unimplemented!("unknown ForeignItem"), + } + } + + fn foreign_item_fn(&mut self, foreign_item: &ForeignItemFn) { + self.outer_attrs(&foreign_item.attrs); + self.cbox(INDENT); + self.visibility(&foreign_item.vis); + self.signature( + &foreign_item.sig, + #[cfg(feature = "verbatim")] + &verbatim::Safety::Disallowed, + ); + self.where_clause_semi(&foreign_item.sig.generics.where_clause); + self.end(); + self.hardbreak(); + } + + fn foreign_item_static(&mut self, foreign_item: &ForeignItemStatic) { + self.outer_attrs(&foreign_item.attrs); + self.cbox(0); + self.visibility(&foreign_item.vis); + self.word("static "); + self.static_mutability(&foreign_item.mutability); + self.ident(&foreign_item.ident); + self.word(": "); + self.ty(&foreign_item.ty); + self.word(";"); + self.end(); + self.hardbreak(); + } + + fn foreign_item_type(&mut self, foreign_item: &ForeignItemType) { + self.outer_attrs(&foreign_item.attrs); + self.cbox(0); + self.visibility(&foreign_item.vis); + self.word("type "); + self.ident(&foreign_item.ident); + self.generics(&foreign_item.generics); + self.word(";"); + self.end(); + self.hardbreak(); + } + + fn foreign_item_macro(&mut self, foreign_item: &ForeignItemMacro) { + self.outer_attrs(&foreign_item.attrs); + let semicolon = mac::requires_semi(&foreign_item.mac.delimiter); + self.mac(&foreign_item.mac, None, semicolon); + self.hardbreak(); + } + + #[cfg(not(feature = "verbatim"))] + fn foreign_item_verbatim(&mut self, foreign_item: &TokenStream) { + if !foreign_item.is_empty() { + unimplemented!("ForeignItem::Verbatim `{}`", foreign_item); + } + self.hardbreak(); + } + + #[cfg(feature = "verbatim")] + fn foreign_item_verbatim(&mut self, tokens: &TokenStream) { + use syn::parse::{Parse, ParseStream, Result}; + use syn::{Abi, Attribute, Token, Visibility}; + use verbatim::{ + kw, FlexibleItemFn, FlexibleItemStatic, FlexibleItemType, WhereClauseLocation, + }; + + enum ForeignItemVerbatim { + Empty, + Ellipsis, + FnFlexible(FlexibleItemFn), + StaticFlexible(FlexibleItemStatic), + TypeFlexible(FlexibleItemType), + } + + fn peek_signature(input: ParseStream) -> bool { + let fork = input.fork(); + fork.parse::>().is_ok() + && fork.parse::>().is_ok() + && ((fork.peek(kw::safe) && fork.parse::().is_ok()) + || fork.parse::>().is_ok()) + && fork.parse::>().is_ok() + && fork.peek(Token![fn]) + } + + impl Parse for ForeignItemVerbatim { + fn parse(input: ParseStream) -> Result { + if input.is_empty() { + return Ok(ForeignItemVerbatim::Empty); + } else if input.peek(Token![...]) { + input.parse::()?; + return Ok(ForeignItemVerbatim::Ellipsis); + } + + let attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let defaultness = false; + + let lookahead = input.lookahead1(); + if lookahead.peek(Token![fn]) || peek_signature(input) { + let flexible_item = FlexibleItemFn::parse(attrs, vis, defaultness, input)?; + Ok(ForeignItemVerbatim::FnFlexible(flexible_item)) + } else if lookahead.peek(Token![static]) + || ((input.peek(Token![unsafe]) || input.peek(kw::safe)) + && input.peek2(Token![static])) + { + let flexible_item = FlexibleItemStatic::parse(attrs, vis, input)?; + Ok(ForeignItemVerbatim::StaticFlexible(flexible_item)) + } else if lookahead.peek(Token![type]) { + let flexible_item = FlexibleItemType::parse( + attrs, + vis, + defaultness, + input, + WhereClauseLocation::Both, + )?; + Ok(ForeignItemVerbatim::TypeFlexible(flexible_item)) + } else { + Err(lookahead.error()) + } + } + } + + let foreign_item: ForeignItemVerbatim = match syn::parse2(tokens.clone()) { + Ok(foreign_item) => foreign_item, + Err(_) => unimplemented!("ForeignItem::Verbatim `{}`", tokens), + }; + + match foreign_item { + ForeignItemVerbatim::Empty => { + self.hardbreak(); + } + ForeignItemVerbatim::Ellipsis => { + self.word("..."); + self.hardbreak(); + } + ForeignItemVerbatim::FnFlexible(foreign_item) => { + self.flexible_item_fn(&foreign_item); + } + ForeignItemVerbatim::StaticFlexible(foreign_item) => { + self.flexible_item_static(&foreign_item); + } + ForeignItemVerbatim::TypeFlexible(foreign_item) => { + self.flexible_item_type(&foreign_item); + } + } + } + + fn trait_item(&mut self, trait_item: &TraitItem) { + match trait_item { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + TraitItem::Const(item) => self.trait_item_const(item), + TraitItem::Fn(item) => self.trait_item_fn(item), + TraitItem::Type(item) => self.trait_item_type(item), + TraitItem::Macro(item) => self.trait_item_macro(item), + TraitItem::Verbatim(item) => self.trait_item_verbatim(item), + _ => unimplemented!("unknown TraitItem"), + } + } + + fn trait_item_const(&mut self, trait_item: &TraitItemConst) { + self.outer_attrs(&trait_item.attrs); + self.cbox(0); + self.word("const "); + self.ident(&trait_item.ident); + self.generics(&trait_item.generics); + self.word(": "); + self.ty(&trait_item.ty); + if let Some((_eq_token, default)) = &trait_item.default { + self.word(" = "); + self.neverbreak(); + self.expr(default, FixupContext::NONE); + } + self.word(";"); + self.end(); + self.hardbreak(); + } + + fn trait_item_fn(&mut self, trait_item: &TraitItemFn) { + self.outer_attrs(&trait_item.attrs); + self.cbox(INDENT); + self.signature( + &trait_item.sig, + #[cfg(feature = "verbatim")] + &verbatim::Safety::Disallowed, + ); + if let Some(block) = &trait_item.default { + self.where_clause_for_body(&trait_item.sig.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&trait_item.attrs); + for stmt in block.stmts.iter().delimited() { + self.stmt(&stmt, stmt.is_last); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } else { + self.where_clause_semi(&trait_item.sig.generics.where_clause); + self.end(); + } + self.hardbreak(); + } + + fn trait_item_type(&mut self, trait_item: &TraitItemType) { + self.outer_attrs(&trait_item.attrs); + self.cbox(INDENT); + self.word("type "); + self.ident(&trait_item.ident); + self.generics(&trait_item.generics); + for bound in trait_item.bounds.iter().delimited() { + if bound.is_first { + self.word(": "); + } else { + self.space(); + self.word("+ "); + } + self.type_param_bound(&bound); + } + if let Some((_eq_token, default)) = &trait_item.default { + self.word(" = "); + self.neverbreak(); + self.ibox(-INDENT); + self.ty(default); + self.end(); + } + self.where_clause_oneline_semi(&trait_item.generics.where_clause); + self.end(); + self.hardbreak(); + } + + fn trait_item_macro(&mut self, trait_item: &TraitItemMacro) { + self.outer_attrs(&trait_item.attrs); + let semicolon = mac::requires_semi(&trait_item.mac.delimiter); + self.mac(&trait_item.mac, None, semicolon); + self.hardbreak(); + } + + #[cfg(not(feature = "verbatim"))] + fn trait_item_verbatim(&mut self, trait_item: &TokenStream) { + if !trait_item.is_empty() { + unimplemented!("TraitItem::Verbatim `{}`", trait_item); + } + self.hardbreak(); + } + + #[cfg(feature = "verbatim")] + fn trait_item_verbatim(&mut self, tokens: &TokenStream) { + use syn::parse::{Parse, ParseStream, Result}; + use syn::{Attribute, Ident, Token, Visibility}; + use verbatim::{FlexibleItemConst, FlexibleItemType, WhereClauseLocation}; + + enum TraitItemVerbatim { + Empty, + Ellipsis, + ConstFlexible(FlexibleItemConst), + TypeFlexible(FlexibleItemType), + PubOrDefault(PubOrDefaultTraitItem), + } + + struct PubOrDefaultTraitItem { + attrs: Vec, + vis: Visibility, + defaultness: bool, + trait_item: TraitItem, + } + + impl Parse for TraitItemVerbatim { + fn parse(input: ParseStream) -> Result { + if input.is_empty() { + return Ok(TraitItemVerbatim::Empty); + } else if input.peek(Token![...]) { + input.parse::()?; + return Ok(TraitItemVerbatim::Ellipsis); + } + + let attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let defaultness = input.parse::>()?.is_some(); + + let lookahead = input.lookahead1(); + if lookahead.peek(Token![const]) && (input.peek2(Ident) || input.peek2(Token![_])) { + let flexible_item = FlexibleItemConst::parse(attrs, vis, defaultness, input)?; + Ok(TraitItemVerbatim::ConstFlexible(flexible_item)) + } else if lookahead.peek(Token![type]) { + let flexible_item = FlexibleItemType::parse( + attrs, + vis, + defaultness, + input, + WhereClauseLocation::AfterEq, + )?; + Ok(TraitItemVerbatim::TypeFlexible(flexible_item)) + } else if (input.peek(Token![const]) + || lookahead.peek(Token![async]) + || lookahead.peek(Token![unsafe]) + || lookahead.peek(Token![extern]) + || lookahead.peek(Token![fn])) + && (!matches!(vis, Visibility::Inherited) || defaultness) + { + Ok(TraitItemVerbatim::PubOrDefault(PubOrDefaultTraitItem { + attrs, + vis, + defaultness, + trait_item: input.parse()?, + })) + } else { + Err(lookahead.error()) + } + } + } + + let impl_item: TraitItemVerbatim = match syn::parse2(tokens.clone()) { + Ok(impl_item) => impl_item, + Err(_) => unimplemented!("TraitItem::Verbatim `{}`", tokens), + }; + + match impl_item { + TraitItemVerbatim::Empty => { + self.hardbreak(); + } + TraitItemVerbatim::Ellipsis => { + self.word("..."); + self.hardbreak(); + } + TraitItemVerbatim::ConstFlexible(trait_item) => { + self.flexible_item_const(&trait_item); + } + TraitItemVerbatim::TypeFlexible(trait_item) => { + self.flexible_item_type(&trait_item); + } + TraitItemVerbatim::PubOrDefault(trait_item) => { + self.outer_attrs(&trait_item.attrs); + self.visibility(&trait_item.vis); + if trait_item.defaultness { + self.word("default "); + } + self.trait_item(&trait_item.trait_item); + } + } + } + + fn impl_item(&mut self, impl_item: &ImplItem) { + match impl_item { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + ImplItem::Const(item) => self.impl_item_const(item), + ImplItem::Fn(item) => self.impl_item_fn(item), + ImplItem::Type(item) => self.impl_item_type(item), + ImplItem::Macro(item) => self.impl_item_macro(item), + ImplItem::Verbatim(item) => self.impl_item_verbatim(item), + _ => unimplemented!("unknown ImplItem"), + } + } + + fn impl_item_const(&mut self, impl_item: &ImplItemConst) { + self.outer_attrs(&impl_item.attrs); + self.cbox(0); + self.visibility(&impl_item.vis); + if impl_item.defaultness.is_some() { + self.word("default "); + } + self.word("const "); + self.ident(&impl_item.ident); + self.generics(&impl_item.generics); + self.word(": "); + self.ty(&impl_item.ty); + self.word(" = "); + self.neverbreak(); + self.expr(&impl_item.expr, FixupContext::NONE); + self.word(";"); + self.end(); + self.hardbreak(); + } + + fn impl_item_fn(&mut self, impl_item: &ImplItemFn) { + self.outer_attrs(&impl_item.attrs); + self.cbox(INDENT); + self.visibility(&impl_item.vis); + if impl_item.defaultness.is_some() { + self.word("default "); + } + self.signature( + &impl_item.sig, + #[cfg(feature = "verbatim")] + &verbatim::Safety::Disallowed, + ); + self.where_clause_for_body(&impl_item.sig.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&impl_item.attrs); + for stmt in impl_item.block.stmts.iter().delimited() { + self.stmt(&stmt, stmt.is_last); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + self.hardbreak(); + } + + fn impl_item_type(&mut self, impl_item: &ImplItemType) { + self.outer_attrs(&impl_item.attrs); + self.cbox(INDENT); + self.visibility(&impl_item.vis); + if impl_item.defaultness.is_some() { + self.word("default "); + } + self.word("type "); + self.ident(&impl_item.ident); + self.generics(&impl_item.generics); + self.word(" = "); + self.neverbreak(); + self.ibox(-INDENT); + self.ty(&impl_item.ty); + self.end(); + self.where_clause_oneline_semi(&impl_item.generics.where_clause); + self.end(); + self.hardbreak(); + } + + fn impl_item_macro(&mut self, impl_item: &ImplItemMacro) { + self.outer_attrs(&impl_item.attrs); + let semicolon = mac::requires_semi(&impl_item.mac.delimiter); + self.mac(&impl_item.mac, None, semicolon); + self.hardbreak(); + } + + #[cfg(not(feature = "verbatim"))] + fn impl_item_verbatim(&mut self, impl_item: &TokenStream) { + if !impl_item.is_empty() { + unimplemented!("ImplItem::Verbatim `{}`", impl_item); + } + self.hardbreak(); + } + + #[cfg(feature = "verbatim")] + fn impl_item_verbatim(&mut self, tokens: &TokenStream) { + use syn::parse::{Parse, ParseStream, Result}; + use syn::{Attribute, Ident, Token, Visibility}; + use verbatim::{FlexibleItemConst, FlexibleItemFn, FlexibleItemType, WhereClauseLocation}; + + enum ImplItemVerbatim { + Empty, + Ellipsis, + ConstFlexible(FlexibleItemConst), + FnFlexible(FlexibleItemFn), + TypeFlexible(FlexibleItemType), + } + + impl Parse for ImplItemVerbatim { + fn parse(input: ParseStream) -> Result { + if input.is_empty() { + return Ok(ImplItemVerbatim::Empty); + } else if input.peek(Token![...]) { + input.parse::()?; + return Ok(ImplItemVerbatim::Ellipsis); + } + + let attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let defaultness = input.parse::>()?.is_some(); + + let lookahead = input.lookahead1(); + if lookahead.peek(Token![const]) && (input.peek2(Ident) || input.peek2(Token![_])) { + let flexible_item = FlexibleItemConst::parse(attrs, vis, defaultness, input)?; + Ok(ImplItemVerbatim::ConstFlexible(flexible_item)) + } else if input.peek(Token![const]) + || lookahead.peek(Token![async]) + || lookahead.peek(Token![unsafe]) + || lookahead.peek(Token![extern]) + || lookahead.peek(Token![fn]) + { + let flexible_item = FlexibleItemFn::parse(attrs, vis, defaultness, input)?; + Ok(ImplItemVerbatim::FnFlexible(flexible_item)) + } else if lookahead.peek(Token![type]) { + let flexible_item = FlexibleItemType::parse( + attrs, + vis, + defaultness, + input, + WhereClauseLocation::AfterEq, + )?; + Ok(ImplItemVerbatim::TypeFlexible(flexible_item)) + } else { + Err(lookahead.error()) + } + } + } + + let impl_item: ImplItemVerbatim = match syn::parse2(tokens.clone()) { + Ok(impl_item) => impl_item, + Err(_) => unimplemented!("ImplItem::Verbatim `{}`", tokens), + }; + + match impl_item { + ImplItemVerbatim::Empty => { + self.hardbreak(); + } + ImplItemVerbatim::Ellipsis => { + self.word("..."); + self.hardbreak(); + } + ImplItemVerbatim::ConstFlexible(impl_item) => { + self.flexible_item_const(&impl_item); + } + ImplItemVerbatim::FnFlexible(impl_item) => { + self.flexible_item_fn(&impl_item); + } + ImplItemVerbatim::TypeFlexible(impl_item) => { + self.flexible_item_type(&impl_item); + } + } + } + + fn signature( + &mut self, + signature: &Signature, + #[cfg(feature = "verbatim")] safety: &verbatim::Safety, + ) { + if signature.constness.is_some() { + self.word("const "); + } + if signature.asyncness.is_some() { + self.word("async "); + } + #[cfg(feature = "verbatim")] + { + if let verbatim::Safety::Disallowed = safety { + if signature.unsafety.is_some() { + self.word("unsafe "); + } + } else { + self.safety(safety); + } + } + #[cfg(not(feature = "verbatim"))] + { + if signature.unsafety.is_some() { + self.word("unsafe "); + } + } + if let Some(abi) = &signature.abi { + self.abi(abi); + } + self.word("fn "); + self.ident(&signature.ident); + self.generics(&signature.generics); + self.word("("); + self.neverbreak(); + self.cbox(0); + self.zerobreak(); + for input in signature.inputs.iter().delimited() { + self.fn_arg(&input); + let is_last = input.is_last && signature.variadic.is_none(); + self.trailing_comma(is_last); + } + if let Some(variadic) = &signature.variadic { + self.variadic(variadic); + self.zerobreak(); + } + self.offset(-INDENT); + self.end(); + self.word(")"); + self.cbox(-INDENT); + self.return_type(&signature.output); + self.end(); + } + + fn fn_arg(&mut self, fn_arg: &FnArg) { + match fn_arg { + FnArg::Receiver(receiver) => self.receiver(receiver), + FnArg::Typed(pat_type) => self.pat_type(pat_type), + } + } + + fn receiver(&mut self, receiver: &Receiver) { + self.outer_attrs(&receiver.attrs); + if let Some((_ampersand, lifetime)) = &receiver.reference { + self.word("&"); + if let Some(lifetime) = lifetime { + self.lifetime(lifetime); + self.nbsp(); + } + } + if receiver.mutability.is_some() { + self.word("mut "); + } + self.word("self"); + if receiver.colon_token.is_some() { + self.word(": "); + self.ty(&receiver.ty); + } else { + let consistent = match (&receiver.reference, &receiver.mutability, &*receiver.ty) { + (Some(_), mutability, Type::Reference(ty)) => { + mutability.is_some() == ty.mutability.is_some() + && match &*ty.elem { + Type::Path(ty) => ty.qself.is_none() && ty.path.is_ident("Self"), + _ => false, + } + } + (None, _, Type::Path(ty)) => ty.qself.is_none() && ty.path.is_ident("Self"), + _ => false, + }; + if !consistent { + self.word(": "); + self.ty(&receiver.ty); + } + } + } + + fn variadic(&mut self, variadic: &Variadic) { + self.outer_attrs(&variadic.attrs); + if let Some((pat, _colon)) = &variadic.pat { + self.pat(pat); + self.word(": "); + } + self.word("..."); + } + + fn static_mutability(&mut self, mutability: &StaticMutability) { + match mutability { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + StaticMutability::Mut(_) => self.word("mut "), + StaticMutability::None => {} + _ => unimplemented!("unknown StaticMutability"), + } + } +} + +#[cfg(feature = "verbatim")] +mod verbatim { + use crate::algorithm::Printer; + use crate::fixup::FixupContext; + use crate::iter::IterDelimited; + use crate::INDENT; + use syn::ext::IdentExt; + use syn::parse::{Parse, ParseStream, Result}; + use syn::{ + braced, token, Attribute, Block, Expr, Generics, Ident, Signature, StaticMutability, Stmt, + Token, Type, TypeParamBound, Visibility, WhereClause, + }; + + pub mod kw { + syn::custom_keyword!(safe); + } + + pub struct FlexibleItemConst { + pub attrs: Vec, + pub vis: Visibility, + pub defaultness: bool, + pub ident: Ident, + pub generics: Generics, + pub ty: Type, + pub value: Option, + } + + pub struct FlexibleItemFn { + pub attrs: Vec, + pub vis: Visibility, + pub defaultness: bool, + pub safety: Safety, + pub sig: Signature, + pub body: Option>, + } + + pub struct FlexibleItemStatic { + pub attrs: Vec, + pub vis: Visibility, + pub safety: Safety, + pub mutability: StaticMutability, + pub ident: Ident, + pub ty: Option, + pub expr: Option, + } + + pub struct FlexibleItemType { + pub attrs: Vec, + pub vis: Visibility, + pub defaultness: bool, + pub ident: Ident, + pub generics: Generics, + pub bounds: Vec, + pub definition: Option, + pub where_clause_after_eq: Option, + } + + pub enum Safety { + Unsafe, + Safe, + Default, + Disallowed, + } + + pub enum WhereClauseLocation { + // type Ty where T: 'static = T; + BeforeEq, + // type Ty = T where T: 'static; + AfterEq, + // TODO: goes away once the migration period on rust-lang/rust#89122 is over + Both, + } + + impl FlexibleItemConst { + pub fn parse( + attrs: Vec, + vis: Visibility, + defaultness: bool, + input: ParseStream, + ) -> Result { + input.parse::()?; + let ident = input.call(Ident::parse_any)?; + let mut generics: Generics = input.parse()?; + input.parse::()?; + let ty: Type = input.parse()?; + let value = if input.parse::>()?.is_some() { + let expr: Expr = input.parse()?; + Some(expr) + } else { + None + }; + generics.where_clause = input.parse()?; + input.parse::()?; + + Ok(FlexibleItemConst { + attrs, + vis, + defaultness, + ident, + generics, + ty, + value, + }) + } + } + + impl FlexibleItemFn { + pub fn parse( + mut attrs: Vec, + vis: Visibility, + defaultness: bool, + input: ParseStream, + ) -> Result { + let constness: Option = input.parse()?; + let asyncness: Option = input.parse()?; + let safety: Safety = input.parse()?; + + let lookahead = input.lookahead1(); + let sig: Signature = if lookahead.peek(Token![extern]) || lookahead.peek(Token![fn]) { + input.parse()? + } else { + return Err(lookahead.error()); + }; + + let lookahead = input.lookahead1(); + let body = if lookahead.peek(Token![;]) { + input.parse::()?; + None + } else if lookahead.peek(token::Brace) { + let content; + braced!(content in input); + attrs.extend(content.call(Attribute::parse_inner)?); + Some(content.call(Block::parse_within)?) + } else { + return Err(lookahead.error()); + }; + + Ok(FlexibleItemFn { + attrs, + vis, + defaultness, + safety, + sig: Signature { + constness, + asyncness, + unsafety: None, + ..sig + }, + body, + }) + } + } + + impl FlexibleItemStatic { + pub fn parse(attrs: Vec, vis: Visibility, input: ParseStream) -> Result { + let safety: Safety = input.parse()?; + input.parse::()?; + let mutability: StaticMutability = input.parse()?; + let ident = input.parse()?; + + let lookahead = input.lookahead1(); + let has_type = lookahead.peek(Token![:]); + let has_expr = lookahead.peek(Token![=]); + if !has_type && !has_expr { + return Err(lookahead.error()); + } + + let ty: Option = if has_type { + input.parse::()?; + input.parse().map(Some)? + } else { + None + }; + + let expr: Option = if input.parse::>()?.is_some() { + input.parse().map(Some)? + } else { + None + }; + + input.parse::()?; + + Ok(FlexibleItemStatic { + attrs, + vis, + safety, + mutability, + ident, + ty, + expr, + }) + } + } + + impl FlexibleItemType { + pub fn parse( + attrs: Vec, + vis: Visibility, + defaultness: bool, + input: ParseStream, + where_clause_location: WhereClauseLocation, + ) -> Result { + input.parse::()?; + let ident: Ident = input.parse()?; + let mut generics: Generics = input.parse()?; + + let mut bounds = Vec::new(); + if input.parse::>()?.is_some() { + loop { + if input.peek(Token![where]) || input.peek(Token![=]) || input.peek(Token![;]) { + break; + } + bounds.push(input.parse::()?); + if input.peek(Token![where]) || input.peek(Token![=]) || input.peek(Token![;]) { + break; + } + input.parse::()?; + } + } + + match where_clause_location { + WhereClauseLocation::BeforeEq | WhereClauseLocation::Both => { + generics.where_clause = input.parse()?; + } + WhereClauseLocation::AfterEq => {} + } + + let definition = if input.parse::>()?.is_some() { + Some(input.parse()?) + } else { + None + }; + + let where_clause_after_eq = match where_clause_location { + WhereClauseLocation::AfterEq | WhereClauseLocation::Both + if generics.where_clause.is_none() => + { + input.parse()? + } + _ => None, + }; + + input.parse::()?; + + Ok(FlexibleItemType { + attrs, + vis, + defaultness, + ident, + generics, + bounds, + definition, + where_clause_after_eq, + }) + } + } + + impl Parse for Safety { + fn parse(input: ParseStream) -> Result { + if input.peek(Token![unsafe]) { + input.parse::()?; + Ok(Safety::Unsafe) + } else if input.peek(kw::safe) { + input.parse::()?; + Ok(Safety::Safe) + } else { + Ok(Safety::Default) + } + } + } + + impl Printer { + pub fn flexible_item_const(&mut self, item: &FlexibleItemConst) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + if item.defaultness { + self.word("default "); + } + self.word("const "); + self.ident(&item.ident); + self.generics(&item.generics); + self.word(": "); + self.cbox(-INDENT); + self.ty(&item.ty); + self.end(); + if let Some(value) = &item.value { + self.word(" = "); + self.neverbreak(); + self.ibox(-INDENT); + self.expr(value, FixupContext::NONE); + self.end(); + } + self.where_clause_oneline_semi(&item.generics.where_clause); + self.end(); + self.hardbreak(); + } + + pub fn flexible_item_fn(&mut self, item: &FlexibleItemFn) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + if item.defaultness { + self.word("default "); + } + self.signature(&item.sig, &item.safety); + if let Some(body) = &item.body { + self.where_clause_for_body(&item.sig.generics.where_clause); + self.word("{"); + self.hardbreak_if_nonempty(); + self.inner_attrs(&item.attrs); + for stmt in body.iter().delimited() { + self.stmt(&stmt, stmt.is_last); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } else { + self.where_clause_semi(&item.sig.generics.where_clause); + self.end(); + } + self.hardbreak(); + } + + pub fn flexible_item_static(&mut self, item: &FlexibleItemStatic) { + self.outer_attrs(&item.attrs); + self.cbox(0); + self.visibility(&item.vis); + self.safety(&item.safety); + self.word("static "); + self.static_mutability(&item.mutability); + self.ident(&item.ident); + if let Some(ty) = &item.ty { + self.word(": "); + self.ty(ty); + } + if let Some(expr) = &item.expr { + self.word(" = "); + self.neverbreak(); + self.expr(expr, FixupContext::NONE); + } + self.word(";"); + self.end(); + self.hardbreak(); + } + + pub fn flexible_item_type(&mut self, item: &FlexibleItemType) { + self.outer_attrs(&item.attrs); + self.cbox(INDENT); + self.visibility(&item.vis); + if item.defaultness { + self.word("default "); + } + self.word("type "); + self.ident(&item.ident); + self.generics(&item.generics); + for bound in item.bounds.iter().delimited() { + if bound.is_first { + self.word(": "); + } else { + self.space(); + self.word("+ "); + } + self.type_param_bound(&bound); + } + if let Some(definition) = &item.definition { + self.where_clause_oneline(&item.generics.where_clause); + self.word("= "); + self.neverbreak(); + self.ibox(-INDENT); + self.ty(definition); + self.end(); + self.where_clause_oneline_semi(&item.where_clause_after_eq); + } else { + self.where_clause_oneline_semi(&item.generics.where_clause); + } + self.end(); + self.hardbreak(); + } + + pub fn safety(&mut self, safety: &Safety) { + match safety { + Safety::Unsafe => self.word("unsafe "), + Safety::Safe => self.word("safe "), + Safety::Default => {} + Safety::Disallowed => unreachable!(), + } + } + } +} diff --git a/vendor/prettyplease/src/iter.rs b/vendor/prettyplease/src/iter.rs new file mode 100644 index 00000000000000..702c653f52364d --- /dev/null +++ b/vendor/prettyplease/src/iter.rs @@ -0,0 +1,46 @@ +use std::iter::Peekable; +use std::ops::Deref; + +pub struct Delimited { + is_first: bool, + iter: Peekable, +} + +pub trait IterDelimited: Iterator + Sized { + fn delimited(self) -> Delimited { + Delimited { + is_first: true, + iter: self.peekable(), + } + } +} + +impl IterDelimited for I {} + +pub struct IteratorItem { + value: T, + pub is_first: bool, + pub is_last: bool, +} + +impl Iterator for Delimited { + type Item = IteratorItem; + + fn next(&mut self) -> Option { + let item = IteratorItem { + value: self.iter.next()?, + is_first: self.is_first, + is_last: self.iter.peek().is_none(), + }; + self.is_first = false; + Some(item) + } +} + +impl Deref for IteratorItem { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.value + } +} diff --git a/vendor/prettyplease/src/lib.rs b/vendor/prettyplease/src/lib.rs new file mode 100644 index 00000000000000..2fc8846ecfa849 --- /dev/null +++ b/vendor/prettyplease/src/lib.rs @@ -0,0 +1,385 @@ +//! [![github]](https://github.com/dtolnay/prettyplease) [![crates-io]](https://crates.io/crates/prettyplease) [![docs-rs]](https://docs.rs/prettyplease) +//! +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust +//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs +//! +//!
+//! +//! **prettyplease::unparse** — a minimal `syn` syntax tree pretty-printer +//! +//!
+//! +//! # Overview +//! +//! This is a pretty-printer to turn a `syn` syntax tree into a `String` of +//! well-formatted source code. In contrast to rustfmt, this library is intended +//! to be suitable for arbitrary generated code. +//! +//! Rustfmt prioritizes high-quality output that is impeccable enough that you'd +//! be comfortable spending your career staring at its output — but that +//! means some heavyweight algorithms, and it has a tendency to bail out on code +//! that is hard to format (for example [rustfmt#3697], and there are dozens +//! more issues like it). That's not necessarily a big deal for human-generated +//! code because when code gets highly nested, the human will naturally be +//! inclined to refactor into more easily formattable code. But for generated +//! code, having the formatter just give up leaves it totally unreadable. +//! +//! [rustfmt#3697]: https://github.com/rust-lang/rustfmt/issues/3697 +//! +//! This library is designed using the simplest possible algorithm and data +//! structures that can deliver about 95% of the quality of rustfmt-formatted +//! output. In my experience testing real-world code, approximately 97-98% of +//! output lines come out identical between rustfmt's formatting and this +//! crate's. The rest have slightly different linebreak decisions, but still +//! clearly follow the dominant modern Rust style. +//! +//! The tradeoffs made by this crate are a good fit for generated code that you +//! will *not* spend your career staring at. For example, the output of +//! `bindgen`, or the output of `cargo-expand`. In those cases it's more +//! important that the whole thing be formattable without the formatter giving +//! up, than that it be flawless. +//! +//!
+//! +//! # Feature matrix +//! +//! Here are a few superficial comparisons of this crate against the AST +//! pretty-printer built into rustc, and rustfmt. The sections below go into +//! more detail comparing the output of each of these libraries. +//! +//! | | prettyplease | rustc | rustfmt | +//! |:---|:---:|:---:|:---:| +//! | non-pathological behavior on big or generated code | 💚 | ❌ | ❌ | +//! | idiomatic modern formatting ("locally indistinguishable from rustfmt") | 💚 | ❌ | 💚 | +//! | throughput | 60 MB/s | 39 MB/s | 2.8 MB/s | +//! | number of dependencies | 3 | 72 | 66 | +//! | compile time including dependencies | 2.4 sec | 23.1 sec | 29.8 sec | +//! | buildable using a stable Rust compiler | 💚 | ❌ | ❌ | +//! | published to crates.io | 💚 | ❌ | ❌ | +//! | extensively configurable output | ❌ | ❌ | 💚 | +//! | intended to accommodate hand-maintained source code | ❌ | ❌ | 💚 | +//! +//!
+//! +//! # Comparison to rustfmt +//! +//! - [input.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/input.rs) +//! - [output.prettyplease.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.prettyplease.rs) +//! - [output.rustfmt.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.rustfmt.rs) +//! +//! If you weren't told which output file is which, it would be practically +//! impossible to tell — **except** for line 435 in the rustfmt output, +//! which is more than 1000 characters long because rustfmt just gave up +//! formatting that part of the file: +//! +//! ``` +//! # const _: &str = stringify! {{{ +//! match segments[5] { +//! 0 => write!(f, "::{}", ipv4), +//! 0xffff => write!(f, "::ffff:{}", ipv4), +//! _ => unreachable!(), +//! } +//! } else { # [derive (Copy , Clone , Default)] struct Span { start : usize , len : usize , } let zeroes = { let mut longest = Span :: default () ; let mut current = Span :: default () ; for (i , & segment) in segments . iter () . enumerate () { if segment == 0 { if current . len == 0 { current . start = i ; } current . len += 1 ; if current . len > longest . len { longest = current ; } } else { current = Span :: default () ; } } longest } ; # [doc = " Write a colon-separated part of the address"] # [inline] fn fmt_subslice (f : & mut fmt :: Formatter < '_ > , chunk : & [u16]) -> fmt :: Result { if let Some ((first , tail)) = chunk . split_first () { write ! (f , "{:x}" , first) ? ; for segment in tail { f . write_char (':') ? ; write ! (f , "{:x}" , segment) ? ; } } Ok (()) } if zeroes . len > 1 { fmt_subslice (f , & segments [.. zeroes . start]) ? ; f . write_str ("::") ? ; fmt_subslice (f , & segments [zeroes . start + zeroes . len ..]) } else { fmt_subslice (f , & segments) } } +//! } else { +//! const IPV6_BUF_LEN: usize = (4 * 8) + 7; +//! let mut buf = [0u8; IPV6_BUF_LEN]; +//! let mut buf_slice = &mut buf[..]; +//! # }}; +//! ``` +//! +//! This is a pretty typical manifestation of rustfmt bailing out in generated +//! code — a chunk of the input ends up on one line. The other +//! manifestation is that you're working on some code, running rustfmt on save +//! like a conscientious developer, but after a while notice it isn't doing +//! anything. You introduce an intentional formatting issue, like a stray indent +//! or semicolon, and run rustfmt to check your suspicion. Nope, it doesn't get +//! cleaned up — rustfmt is just not formatting the part of the file you +//! are working on. +//! +//! The prettyplease library is designed to have no pathological cases that +//! force a bail out; the entire input you give it will get formatted in some +//! "good enough" form. +//! +//! Separately, rustfmt can be problematic to integrate into projects. It's +//! written using rustc's internal syntax tree, so it can't be built by a stable +//! compiler. Its releases are not regularly published to crates.io, so in Cargo +//! builds you'd need to depend on it as a git dependency, which precludes +//! publishing your crate to crates.io also. You can shell out to a `rustfmt` +//! binary, but that'll be whatever rustfmt version is installed on each +//! developer's system (if any), which can lead to spurious diffs in checked-in +//! generated code formatted by different versions. In contrast prettyplease is +//! designed to be easy to pull in as a library, and compiles fast. +//! +//!
+//! +//! # Comparison to rustc_ast_pretty +//! +//! - [input.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/input.rs) +//! - [output.prettyplease.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.prettyplease.rs) +//! - [output.rustc.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.rustc.rs) +//! +//! This is the pretty-printer that gets used when rustc prints source code, +//! such as `rustc -Zunpretty=expanded`. It's used also by the standard +//! library's `stringify!` when stringifying an interpolated macro_rules AST +//! fragment, like an $:expr, and transitively by `dbg!` and many macros in the +//! ecosystem. +//! +//! Rustc's formatting is mostly okay, but does not hew closely to the dominant +//! contemporary style of Rust formatting. Some things wouldn't ever be written +//! on one line, like this `match` expression, and certainly not with a comma in +//! front of the closing brace: +//! +//! ``` +//! # const _: &str = stringify! { +//! fn eq(&self, other: &IpAddr) -> bool { +//! match other { IpAddr::V4(v4) => self == v4, IpAddr::V6(_) => false, } +//! } +//! # }; +//! ``` +//! +//! Some places use non-multiple-of-4 indentation, which is definitely not the +//! norm: +//! +//! ``` +//! # const _: &str = stringify! { +//! pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { +//! let [a, b, c, d] = self.octets(); +//! Ipv6Addr{inner: +//! c::in6_addr{s6_addr: +//! [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, +//! 0xFF, a, b, c, d],},} +//! } +//! # }; +//! ``` +//! +//! And although there isn't an egregious example of it in the link because the +//! input code is pretty tame, in general rustc_ast_pretty has pathological +//! behavior on generated code. It has a tendency to use excessive horizontal +//! indentation and rapidly run out of width: +//! +//! ``` +//! # const _: &str = stringify! { +//! ::std::io::_print(::core::fmt::Arguments::new_v1(&[""], +//! &match (&msg,) { +//! _args => +//! [::core::fmt::ArgumentV1::new(_args.0, +//! ::core::fmt::Display::fmt)], +//! })); +//! # }; +//! ``` +//! +//! The snippets above are clearly different from modern rustfmt style. In +//! contrast, prettyplease is designed to have output that is practically +//! indistinguishable from rustfmt-formatted code. +//! +//!
+//! +//! # Example +//! +//! ``` +//! // [dependencies] +//! // prettyplease = "0.2" +//! // syn = { version = "2", default-features = false, features = ["full", "parsing"] } +//! +//! const INPUT: &str = stringify! { +//! use crate::{ +//! lazy::{Lazy, SyncLazy, SyncOnceCell}, panic, +//! sync::{ atomic::{AtomicUsize, Ordering::SeqCst}, +//! mpsc::channel, Mutex, }, +//! thread, +//! }; +//! impl Into for T where U: From { +//! fn into(self) -> U { U::from(self) } +//! } +//! }; +//! +//! fn main() { +//! let syntax_tree = syn::parse_file(INPUT).unwrap(); +//! let formatted = prettyplease::unparse(&syntax_tree); +//! print!("{}", formatted); +//! } +//! ``` +//! +//!
+//! +//! # Algorithm notes +//! +//! The approach and terminology used in the implementation are derived from +//! [*Derek C. Oppen, "Pretty Printing" (1979)*][paper], on which +//! rustc_ast_pretty is also based, and from rustc_ast_pretty's implementation +//! written by Graydon Hoare in 2011 (and modernized over the years by dozens of +//! volunteer maintainers). +//! +//! [paper]: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/770/CS-TR-79-770.pdf +//! +//! The paper describes two language-agnostic interacting procedures `Scan()` +//! and `Print()`. Language-specific code decomposes an input data structure +//! into a stream of `string` and `break` tokens, and `begin` and `end` tokens +//! for grouping. Each `begin`–`end` range may be identified as either +//! "consistent breaking" or "inconsistent breaking". If a group is consistently +//! breaking, then if the whole contents do not fit on the line, *every* `break` +//! token in the group will receive a linebreak. This is appropriate, for +//! example, for Rust struct literals, or arguments of a function call. If a +//! group is inconsistently breaking, then the `string` tokens in the group are +//! greedily placed on the line until out of space, and linebroken only at those +//! `break` tokens for which the next string would not fit. For example, this is +//! appropriate for the contents of a braced `use` statement in Rust. +//! +//! Scan's job is to efficiently accumulate sizing information about groups and +//! breaks. For every `begin` token we compute the distance to the matched `end` +//! token, and for every `break` we compute the distance to the next `break`. +//! The algorithm uses a ringbuffer to hold tokens whose size is not yet +//! ascertained. The maximum size of the ringbuffer is bounded by the target +//! line length and does not grow indefinitely, regardless of deep nesting in +//! the input stream. That's because once a group is sufficiently big, the +//! precise size can no longer make a difference to linebreak decisions and we +//! can effectively treat it as "infinity". +//! +//! Print's job is to use the sizing information to efficiently assign a +//! "broken" or "not broken" status to every `begin` token. At that point the +//! output is easily constructed by concatenating `string` tokens and breaking +//! at `break` tokens contained within a broken group. +//! +//! Leveraging these primitives (i.e. cleverly placing the all-or-nothing +//! consistent breaks and greedy inconsistent breaks) to yield +//! rustfmt-compatible formatting for all of Rust's syntax tree nodes is a fun +//! challenge. +//! +//! Here is a visualization of some Rust tokens fed into the pretty printing +//! algorithm. Consistently breaking `begin`—`end` pairs are represented +//! by `«`⁠`»`, inconsistently breaking by `‹`⁠`›`, `break` by `·`, +//! and the rest of the non-whitespace are `string`. +//! +//! ```text +//! use crate::«{· +//! ‹ lazy::«{·‹Lazy,· SyncLazy,· SyncOnceCell›·}»,· +//! panic,· +//! sync::«{· +//! ‹ atomic::«{·‹AtomicUsize,· Ordering::SeqCst›·}»,· +//! mpsc::channel,· Mutex›,· +//! }»,· +//! thread›,· +//! }»;· +//! «‹«impl<«·T‹›,· U‹›·»>» Into<«·U·»>· for T›· +//! where· +//! U:‹ From<«·T·»>›,· +//! {· +//! « fn into(·«·self·») -> U {· +//! ‹ U::from(«·self·»)›· +//! » }· +//! »}· +//! ``` +//! +//! The algorithm described in the paper is not quite sufficient for producing +//! well-formatted Rust code that is locally indistinguishable from rustfmt's +//! style. The reason is that in the paper, the complete non-whitespace contents +//! are assumed to be independent of linebreak decisions, with Scan and Print +//! being only in control of the whitespace (spaces and line breaks). In Rust as +//! idiomatically formatted by rustfmt, that is not the case. Trailing commas +//! are one example; the punctuation is only known *after* the broken vs +//! non-broken status of the surrounding group is known: +//! +//! ``` +//! # struct Struct { x: u64, y: bool } +//! # let xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx = 0; +//! # let yyyyyyyyyyyyyyyyyyyyyyyyyyyyyy = true; +//! # +//! let _ = Struct { x: 0, y: true }; +//! +//! let _ = Struct { +//! x: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx, +//! y: yyyyyyyyyyyyyyyyyyyyyyyyyyyyyy, //<- trailing comma if the expression wrapped +//! }; +//! ``` +//! +//! The formatting of `match` expressions is another case; we want small arms on +//! the same line as the pattern, and big arms wrapped in a brace. The presence +//! of the brace punctuation, comma, and semicolon are all dependent on whether +//! the arm fits on the line: +//! +//! ``` +//! # struct Entry { nanos: u32 } +//! # let total_nanos = 0u64; +//! # let mut total_secs = 0u64; +//! # let tmp; +//! # let entry = Entry { nanos: 0 }; +//! # const NANOS_PER_SEC: u32 = 1_000_000_000; +//! # +//! match total_nanos.checked_add(entry.nanos as u64) { +//! Some(n) => tmp = n, //<- small arm, inline with comma +//! None => { +//! total_secs = total_secs +//! .checked_add(total_nanos / NANOS_PER_SEC as u64) +//! .expect("overflow in iter::sum over durations"); +//! } //<- big arm, needs brace added, and also semicolon^ +//! } +//! ``` +//! +//! The printing algorithm implementation in this crate accommodates all of +//! these situations with conditional punctuation tokens whose selection can be +//! deferred and populated after it's known that the group is or is not broken. + +#![doc(html_root_url = "https://docs.rs/prettyplease/0.2.37")] +#![allow( + clippy::bool_to_int_with_if, + clippy::cast_possible_wrap, + clippy::cast_sign_loss, + clippy::derive_partial_eq_without_eq, + clippy::doc_markdown, + clippy::enum_glob_use, + clippy::items_after_statements, + clippy::let_underscore_untyped, + clippy::match_like_matches_macro, + clippy::match_same_arms, + clippy::module_name_repetitions, + clippy::must_use_candidate, + clippy::needless_pass_by_value, + clippy::ref_option, + clippy::similar_names, + clippy::struct_excessive_bools, + clippy::too_many_lines, + clippy::unused_self, + clippy::vec_init_then_push +)] +#![cfg_attr(all(test, exhaustive), feature(non_exhaustive_omitted_patterns_lint))] + +mod algorithm; +mod attr; +mod classify; +mod convenience; +mod data; +mod expr; +mod file; +mod fixup; +mod generics; +mod item; +mod iter; +mod lifetime; +mod lit; +mod mac; +mod pat; +mod path; +mod precedence; +mod ring; +mod stmt; +mod token; +mod ty; + +use crate::algorithm::Printer; +use syn::File; + +// Target line width. +const MARGIN: isize = 89; + +// Number of spaces increment at each level of block indentation. +const INDENT: isize = 4; + +// Every line is allowed at least this much space, even if highly indented. +const MIN_SPACE: isize = 60; + +pub fn unparse(file: &File) -> String { + let mut p = Printer::new(); + p.file(file); + p.eof() +} diff --git a/vendor/prettyplease/src/lifetime.rs b/vendor/prettyplease/src/lifetime.rs new file mode 100644 index 00000000000000..665caa324c6e1f --- /dev/null +++ b/vendor/prettyplease/src/lifetime.rs @@ -0,0 +1,9 @@ +use crate::algorithm::Printer; +use syn::Lifetime; + +impl Printer { + pub fn lifetime(&mut self, lifetime: &Lifetime) { + self.word("'"); + self.ident(&lifetime.ident); + } +} diff --git a/vendor/prettyplease/src/lit.rs b/vendor/prettyplease/src/lit.rs new file mode 100644 index 00000000000000..10a86e4b03e1fe --- /dev/null +++ b/vendor/prettyplease/src/lit.rs @@ -0,0 +1,57 @@ +use crate::algorithm::Printer; +use proc_macro2::Literal; +use syn::{Lit, LitBool, LitByte, LitByteStr, LitCStr, LitChar, LitFloat, LitInt, LitStr}; + +impl Printer { + pub fn lit(&mut self, lit: &Lit) { + match lit { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Lit::Str(lit) => self.lit_str(lit), + Lit::ByteStr(lit) => self.lit_byte_str(lit), + Lit::CStr(lit) => self.lit_c_str(lit), + Lit::Byte(lit) => self.lit_byte(lit), + Lit::Char(lit) => self.lit_char(lit), + Lit::Int(lit) => self.lit_int(lit), + Lit::Float(lit) => self.lit_float(lit), + Lit::Bool(lit) => self.lit_bool(lit), + Lit::Verbatim(lit) => self.lit_verbatim(lit), + _ => unimplemented!("unknown Lit"), + } + } + + pub fn lit_str(&mut self, lit: &LitStr) { + self.word(lit.token().to_string()); + } + + fn lit_byte_str(&mut self, lit: &LitByteStr) { + self.word(lit.token().to_string()); + } + + fn lit_c_str(&mut self, lit: &LitCStr) { + self.word(lit.token().to_string()); + } + + fn lit_byte(&mut self, lit: &LitByte) { + self.word(lit.token().to_string()); + } + + fn lit_char(&mut self, lit: &LitChar) { + self.word(lit.token().to_string()); + } + + fn lit_int(&mut self, lit: &LitInt) { + self.word(lit.token().to_string()); + } + + fn lit_float(&mut self, lit: &LitFloat) { + self.word(lit.token().to_string()); + } + + fn lit_bool(&mut self, lit: &LitBool) { + self.word(if lit.value { "true" } else { "false" }); + } + + fn lit_verbatim(&mut self, token: &Literal) { + self.word(token.to_string()); + } +} diff --git a/vendor/prettyplease/src/mac.rs b/vendor/prettyplease/src/mac.rs new file mode 100644 index 00000000000000..ddb2b5feebaa6e --- /dev/null +++ b/vendor/prettyplease/src/mac.rs @@ -0,0 +1,706 @@ +use crate::algorithm::Printer; +use crate::path::PathKind; +use crate::token::Token; +use crate::INDENT; +use proc_macro2::{Delimiter, Spacing, TokenStream}; +use syn::{Ident, Macro, MacroDelimiter}; + +impl Printer { + pub fn mac(&mut self, mac: &Macro, ident: Option<&Ident>, semicolon: bool) { + if mac.path.is_ident("macro_rules") { + if let Some(ident) = ident { + self.macro_rules(ident, &mac.tokens); + return; + } + } + #[cfg(feature = "verbatim")] + if ident.is_none() && self.standard_library_macro(mac, semicolon) { + return; + } + self.path(&mac.path, PathKind::Simple); + self.word("!"); + if let Some(ident) = ident { + self.nbsp(); + self.ident(ident); + } + let (open, close, delimiter_break) = match mac.delimiter { + MacroDelimiter::Paren(_) => ("(", ")", Self::zerobreak as fn(&mut Self)), + MacroDelimiter::Brace(_) => (" {", "}", Self::hardbreak as fn(&mut Self)), + MacroDelimiter::Bracket(_) => ("[", "]", Self::zerobreak as fn(&mut Self)), + }; + self.word(open); + if !mac.tokens.is_empty() { + self.cbox(INDENT); + delimiter_break(self); + self.ibox(0); + self.macro_rules_tokens(mac.tokens.clone(), false); + self.end(); + delimiter_break(self); + self.offset(-INDENT); + self.end(); + } + self.word(close); + if semicolon { + self.word(";"); + } + } + + fn macro_rules(&mut self, name: &Ident, rules: &TokenStream) { + enum State { + Start, + Matcher, + Equal, + Greater, + Expander, + } + + use State::*; + + self.word("macro_rules! "); + self.ident(name); + self.word(" {"); + self.cbox(INDENT); + self.hardbreak_if_nonempty(); + let mut state = State::Start; + for tt in rules.clone() { + let token = Token::from(tt); + match (state, token) { + (Start, Token::Group(delimiter, stream)) => { + self.delimiter_open(delimiter); + if !stream.is_empty() { + self.cbox(INDENT); + self.zerobreak(); + self.ibox(0); + self.macro_rules_tokens(stream, true); + self.end(); + self.zerobreak(); + self.offset(-INDENT); + self.end(); + } + self.delimiter_close(delimiter); + state = Matcher; + } + (Matcher, Token::Punct('=', Spacing::Joint)) => { + self.word(" ="); + state = Equal; + } + (Equal, Token::Punct('>', Spacing::Alone)) => { + self.word(">"); + state = Greater; + } + (Greater, Token::Group(_delimiter, stream)) => { + self.word(" {"); + self.neverbreak(); + if !stream.is_empty() { + self.cbox(INDENT); + self.hardbreak(); + self.ibox(0); + self.macro_rules_tokens(stream, false); + self.end(); + self.hardbreak(); + self.offset(-INDENT); + self.end(); + } + self.word("}"); + state = Expander; + } + (Expander, Token::Punct(';', Spacing::Alone)) => { + self.word(";"); + self.hardbreak(); + state = Start; + } + _ => unimplemented!("bad macro_rules syntax"), + } + } + match state { + Start => {} + Expander => { + self.word(";"); + self.hardbreak(); + } + _ => self.hardbreak(), + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } + + pub fn macro_rules_tokens(&mut self, stream: TokenStream, matcher: bool) { + #[derive(PartialEq)] + enum State { + Start, + Dollar, + DollarCrate, + DollarIdent, + DollarIdentColon, + DollarParen, + DollarParenSep, + Pound, + PoundBang, + Dot, + Colon, + Colon2, + Ident, + IdentBang, + Delim, + Other, + } + + use State::*; + + let mut state = Start; + let mut previous_is_joint = true; + for tt in stream { + let token = Token::from(tt); + let (needs_space, next_state) = match (&state, &token) { + (Dollar, Token::Ident(_)) if matcher => (false, DollarIdent), + (Dollar, Token::Ident(ident)) if ident == "crate" => (false, DollarCrate), + (Dollar, Token::Ident(_)) => (false, Other), + (DollarIdent, Token::Punct(':', Spacing::Alone)) => (false, DollarIdentColon), + (DollarIdentColon, Token::Ident(_)) => (false, Other), + (DollarParen, Token::Punct('+' | '*' | '?', Spacing::Alone)) => (false, Other), + (DollarParen, Token::Ident(_) | Token::Literal(_)) => (false, DollarParenSep), + (DollarParen, Token::Punct(_, Spacing::Joint)) => (false, DollarParen), + (DollarParen, Token::Punct(_, Spacing::Alone)) => (false, DollarParenSep), + (DollarParenSep, Token::Punct('+' | '*', _)) => (false, Other), + (Pound, Token::Punct('!', _)) => (false, PoundBang), + (Dollar, Token::Group(Delimiter::Parenthesis, _)) => (false, DollarParen), + (Pound | PoundBang, Token::Group(Delimiter::Bracket, _)) => (false, Other), + (Ident, Token::Group(Delimiter::Parenthesis | Delimiter::Bracket, _)) => { + (false, Delim) + } + (Ident, Token::Punct('!', Spacing::Alone)) => (false, IdentBang), + (IdentBang, Token::Group(Delimiter::Parenthesis | Delimiter::Bracket, _)) => { + (false, Other) + } + (Colon, Token::Punct(':', _)) => (false, Colon2), + (_, Token::Group(Delimiter::Parenthesis | Delimiter::Bracket, _)) => (true, Delim), + (_, Token::Group(Delimiter::Brace | Delimiter::None, _)) => (true, Other), + (_, Token::Ident(ident)) if !is_keyword(ident) => { + (state != Dot && state != Colon2, Ident) + } + (_, Token::Literal(lit)) if lit.to_string().ends_with('.') => (state != Dot, Other), + (_, Token::Literal(_)) => (state != Dot, Ident), + (_, Token::Punct(',' | ';', _)) => (false, Other), + (_, Token::Punct('.', _)) if !matcher => (state != Ident && state != Delim, Dot), + (_, Token::Punct(':', Spacing::Joint)) => { + (state != Ident && state != DollarCrate, Colon) + } + (_, Token::Punct('$', _)) => (true, Dollar), + (_, Token::Punct('#', _)) => (true, Pound), + (_, _) => (true, Other), + }; + if !previous_is_joint { + if needs_space { + self.space(); + } else if let Token::Punct('.', _) = token { + self.zerobreak(); + } + } + previous_is_joint = match token { + Token::Punct(_, Spacing::Joint) | Token::Punct('$', _) => true, + _ => false, + }; + self.single_token( + token, + if matcher { + |printer, stream| printer.macro_rules_tokens(stream, true) + } else { + |printer, stream| printer.macro_rules_tokens(stream, false) + }, + ); + state = next_state; + } + } +} + +pub(crate) fn requires_semi(delimiter: &MacroDelimiter) -> bool { + match delimiter { + MacroDelimiter::Paren(_) | MacroDelimiter::Bracket(_) => true, + MacroDelimiter::Brace(_) => false, + } +} + +fn is_keyword(ident: &Ident) -> bool { + match ident.to_string().as_str() { + "as" | "async" | "await" | "box" | "break" | "const" | "continue" | "crate" | "dyn" + | "else" | "enum" | "extern" | "fn" | "for" | "if" | "impl" | "in" | "let" | "loop" + | "macro" | "match" | "mod" | "move" | "mut" | "pub" | "ref" | "return" | "static" + | "struct" | "trait" | "type" | "unsafe" | "use" | "where" | "while" | "yield" => true, + _ => false, + } +} + +#[cfg(feature = "verbatim")] +mod standard_library { + use crate::algorithm::Printer; + use crate::expr; + use crate::fixup::FixupContext; + use crate::iter::IterDelimited; + use crate::path::PathKind; + use crate::INDENT; + use syn::ext::IdentExt; + use syn::parse::{Parse, ParseStream, Parser, Result}; + use syn::punctuated::Punctuated; + use syn::{ + parenthesized, token, Attribute, Expr, ExprAssign, ExprPath, Ident, Lit, Macro, Pat, Path, + Token, Type, Visibility, + }; + + enum KnownMacro { + Expr(Expr), + Exprs(Vec), + Cfg(Cfg), + Matches(Matches), + ThreadLocal(Vec), + VecArray(Punctuated), + VecRepeat { elem: Expr, n: Expr }, + } + + enum Cfg { + Eq(Ident, Option), + Call(Ident, Vec), + } + + struct Matches { + expression: Expr, + pattern: Pat, + guard: Option, + } + + struct ThreadLocal { + attrs: Vec, + vis: Visibility, + name: Ident, + ty: Type, + init: Expr, + } + + struct FormatArgs { + format_string: Expr, + args: Vec, + } + + impl Parse for FormatArgs { + fn parse(input: ParseStream) -> Result { + let format_string: Expr = input.parse()?; + + let mut args = Vec::new(); + while !input.is_empty() { + input.parse::()?; + if input.is_empty() { + break; + } + let arg = if input.peek(Ident::peek_any) + && input.peek2(Token![=]) + && !input.peek2(Token![==]) + { + let key = input.call(Ident::parse_any)?; + let eq_token: Token![=] = input.parse()?; + let value: Expr = input.parse()?; + Expr::Assign(ExprAssign { + attrs: Vec::new(), + left: Box::new(Expr::Path(ExprPath { + attrs: Vec::new(), + qself: None, + path: Path::from(key), + })), + eq_token, + right: Box::new(value), + }) + } else { + input.parse()? + }; + args.push(arg); + } + + Ok(FormatArgs { + format_string, + args, + }) + } + } + + impl KnownMacro { + fn parse_expr(input: ParseStream) -> Result { + let expr: Expr = input.parse()?; + Ok(KnownMacro::Expr(expr)) + } + + fn parse_expr_comma(input: ParseStream) -> Result { + let expr: Expr = input.parse()?; + input.parse::>()?; + Ok(KnownMacro::Exprs(vec![expr])) + } + + fn parse_exprs(input: ParseStream) -> Result { + let exprs = input.parse_terminated(Expr::parse, Token![,])?; + Ok(KnownMacro::Exprs(Vec::from_iter(exprs))) + } + + fn parse_assert(input: ParseStream) -> Result { + let mut exprs = Vec::new(); + let cond: Expr = input.parse()?; + exprs.push(cond); + if input.parse::>()?.is_some() && !input.is_empty() { + let format_args: FormatArgs = input.parse()?; + exprs.push(format_args.format_string); + exprs.extend(format_args.args); + } + Ok(KnownMacro::Exprs(exprs)) + } + + fn parse_assert_cmp(input: ParseStream) -> Result { + let mut exprs = Vec::new(); + let left: Expr = input.parse()?; + exprs.push(left); + input.parse::()?; + let right: Expr = input.parse()?; + exprs.push(right); + if input.parse::>()?.is_some() && !input.is_empty() { + let format_args: FormatArgs = input.parse()?; + exprs.push(format_args.format_string); + exprs.extend(format_args.args); + } + Ok(KnownMacro::Exprs(exprs)) + } + + fn parse_cfg(input: ParseStream) -> Result { + fn parse_single(input: ParseStream) -> Result { + let ident: Ident = input.parse()?; + if input.peek(token::Paren) && (ident == "all" || ident == "any") { + let content; + parenthesized!(content in input); + let list = content.call(parse_multiple)?; + Ok(Cfg::Call(ident, list)) + } else if input.peek(token::Paren) && ident == "not" { + let content; + parenthesized!(content in input); + let cfg = content.call(parse_single)?; + content.parse::>()?; + Ok(Cfg::Call(ident, vec![cfg])) + } else if input.peek(Token![=]) { + input.parse::()?; + let string: Lit = input.parse()?; + Ok(Cfg::Eq(ident, Some(string))) + } else { + Ok(Cfg::Eq(ident, None)) + } + } + + fn parse_multiple(input: ParseStream) -> Result> { + let mut vec = Vec::new(); + while !input.is_empty() { + let cfg = input.call(parse_single)?; + vec.push(cfg); + if input.is_empty() { + break; + } + input.parse::()?; + } + Ok(vec) + } + + let cfg = input.call(parse_single)?; + input.parse::>()?; + Ok(KnownMacro::Cfg(cfg)) + } + + fn parse_env(input: ParseStream) -> Result { + let mut exprs = Vec::new(); + let name: Expr = input.parse()?; + exprs.push(name); + if input.parse::>()?.is_some() && !input.is_empty() { + let error_msg: Expr = input.parse()?; + exprs.push(error_msg); + input.parse::>()?; + } + Ok(KnownMacro::Exprs(exprs)) + } + + fn parse_format_args(input: ParseStream) -> Result { + let format_args: FormatArgs = input.parse()?; + let mut exprs = format_args.args; + exprs.insert(0, format_args.format_string); + Ok(KnownMacro::Exprs(exprs)) + } + + fn parse_matches(input: ParseStream) -> Result { + let expression: Expr = input.parse()?; + input.parse::()?; + let pattern = input.call(Pat::parse_multi_with_leading_vert)?; + let guard = if input.parse::>()?.is_some() { + Some(input.parse()?) + } else { + None + }; + input.parse::>()?; + Ok(KnownMacro::Matches(Matches { + expression, + pattern, + guard, + })) + } + + fn parse_thread_local(input: ParseStream) -> Result { + let mut items = Vec::new(); + while !input.is_empty() { + let attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + input.parse::()?; + let name: Ident = input.parse()?; + input.parse::()?; + let ty: Type = input.parse()?; + input.parse::()?; + let init: Expr = input.parse()?; + if input.is_empty() { + break; + } + input.parse::()?; + items.push(ThreadLocal { + attrs, + vis, + name, + ty, + init, + }); + } + Ok(KnownMacro::ThreadLocal(items)) + } + + fn parse_vec(input: ParseStream) -> Result { + if input.is_empty() { + return Ok(KnownMacro::VecArray(Punctuated::new())); + } + let first: Expr = input.parse()?; + if input.parse::>()?.is_some() { + let len: Expr = input.parse()?; + Ok(KnownMacro::VecRepeat { + elem: first, + n: len, + }) + } else { + let mut vec = Punctuated::new(); + vec.push_value(first); + while !input.is_empty() { + let comma: Token![,] = input.parse()?; + vec.push_punct(comma); + if input.is_empty() { + break; + } + let next: Expr = input.parse()?; + vec.push_value(next); + } + Ok(KnownMacro::VecArray(vec)) + } + } + + fn parse_write(input: ParseStream) -> Result { + let mut exprs = Vec::new(); + let dst: Expr = input.parse()?; + exprs.push(dst); + input.parse::()?; + let format_args: FormatArgs = input.parse()?; + exprs.push(format_args.format_string); + exprs.extend(format_args.args); + Ok(KnownMacro::Exprs(exprs)) + } + + fn parse_writeln(input: ParseStream) -> Result { + let mut exprs = Vec::new(); + let dst: Expr = input.parse()?; + exprs.push(dst); + if input.parse::>()?.is_some() && !input.is_empty() { + let format_args: FormatArgs = input.parse()?; + exprs.push(format_args.format_string); + exprs.extend(format_args.args); + } + Ok(KnownMacro::Exprs(exprs)) + } + } + + impl Printer { + pub fn standard_library_macro(&mut self, mac: &Macro, mut semicolon: bool) -> bool { + let name = mac.path.segments.last().unwrap().ident.to_string(); + let parser = match name.as_str() { + "addr_of" | "addr_of_mut" => KnownMacro::parse_expr, + "assert" | "debug_assert" => KnownMacro::parse_assert, + "assert_eq" | "assert_ne" | "debug_assert_eq" | "debug_assert_ne" => { + KnownMacro::parse_assert_cmp + } + "cfg" => KnownMacro::parse_cfg, + "compile_error" | "include" | "include_bytes" | "include_str" | "option_env" => { + KnownMacro::parse_expr_comma + } + "concat" | "concat_bytes" | "dbg" => KnownMacro::parse_exprs, + "const_format_args" | "eprint" | "eprintln" | "format" | "format_args" + | "format_args_nl" | "panic" | "print" | "println" | "todo" | "unimplemented" + | "unreachable" => KnownMacro::parse_format_args, + "env" => KnownMacro::parse_env, + "matches" => KnownMacro::parse_matches, + "thread_local" => KnownMacro::parse_thread_local, + "vec" => KnownMacro::parse_vec, + "write" => KnownMacro::parse_write, + "writeln" => KnownMacro::parse_writeln, + _ => return false, + }; + + let known_macro = match parser.parse2(mac.tokens.clone()) { + Ok(known_macro) => known_macro, + Err(_) => return false, + }; + + self.path(&mac.path, PathKind::Simple); + self.word("!"); + + match &known_macro { + KnownMacro::Expr(expr) => { + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + self.expr(expr, FixupContext::NONE); + self.zerobreak(); + self.offset(-INDENT); + self.end(); + self.word(")"); + } + KnownMacro::Exprs(exprs) => { + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + for elem in exprs.iter().delimited() { + self.expr(&elem, FixupContext::NONE); + self.trailing_comma(elem.is_last); + } + self.offset(-INDENT); + self.end(); + self.word(")"); + } + KnownMacro::Cfg(cfg) => { + self.word("("); + self.cfg(cfg); + self.word(")"); + } + KnownMacro::Matches(matches) => { + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + self.expr(&matches.expression, FixupContext::NONE); + self.word(","); + self.space(); + self.pat(&matches.pattern); + if let Some(guard) = &matches.guard { + self.space(); + self.word("if "); + self.expr(guard, FixupContext::NONE); + } + self.zerobreak(); + self.offset(-INDENT); + self.end(); + self.word(")"); + } + KnownMacro::ThreadLocal(items) => { + self.word(" {"); + self.cbox(INDENT); + self.hardbreak_if_nonempty(); + for item in items { + self.outer_attrs(&item.attrs); + self.cbox(0); + self.visibility(&item.vis); + self.word("static "); + self.ident(&item.name); + self.word(": "); + self.ty(&item.ty); + self.word(" = "); + self.neverbreak(); + self.expr(&item.init, FixupContext::NONE); + self.word(";"); + self.end(); + self.hardbreak(); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + semicolon = false; + } + KnownMacro::VecArray(vec) => { + if vec.is_empty() { + self.word("[]"); + } else if expr::simple_array(vec) { + self.cbox(INDENT); + self.word("["); + self.zerobreak(); + self.ibox(0); + for elem in vec.iter().delimited() { + self.expr(&elem, FixupContext::NONE); + if !elem.is_last { + self.word(","); + self.space(); + } + } + self.end(); + self.trailing_comma(true); + self.offset(-INDENT); + self.word("]"); + self.end(); + } else { + self.word("["); + self.cbox(INDENT); + self.zerobreak(); + for elem in vec.iter().delimited() { + self.expr(&elem, FixupContext::NONE); + self.trailing_comma(elem.is_last); + } + self.offset(-INDENT); + self.end(); + self.word("]"); + } + } + KnownMacro::VecRepeat { elem, n } => { + self.word("["); + self.cbox(INDENT); + self.zerobreak(); + self.expr(elem, FixupContext::NONE); + self.word(";"); + self.space(); + self.expr(n, FixupContext::NONE); + self.zerobreak(); + self.offset(-INDENT); + self.end(); + self.word("]"); + } + } + + if semicolon { + self.word(";"); + } + + true + } + + fn cfg(&mut self, cfg: &Cfg) { + match cfg { + Cfg::Eq(ident, value) => { + self.ident(ident); + if let Some(value) = value { + self.word(" = "); + self.lit(value); + } + } + Cfg::Call(ident, args) => { + self.ident(ident); + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + for arg in args.iter().delimited() { + self.cfg(&arg); + self.trailing_comma(arg.is_last); + } + self.offset(-INDENT); + self.end(); + self.word(")"); + } + } + } + } +} diff --git a/vendor/prettyplease/src/pat.rs b/vendor/prettyplease/src/pat.rs new file mode 100644 index 00000000000000..23a38cbb3396f1 --- /dev/null +++ b/vendor/prettyplease/src/pat.rs @@ -0,0 +1,254 @@ +use crate::algorithm::Printer; +use crate::fixup::FixupContext; +use crate::iter::IterDelimited; +use crate::path::PathKind; +use crate::INDENT; +use proc_macro2::TokenStream; +use syn::{ + FieldPat, Pat, PatIdent, PatOr, PatParen, PatReference, PatRest, PatSlice, PatStruct, PatTuple, + PatTupleStruct, PatType, PatWild, +}; + +impl Printer { + pub fn pat(&mut self, pat: &Pat) { + match pat { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Pat::Const(pat) => self.expr_const(pat), + Pat::Ident(pat) => self.pat_ident(pat), + Pat::Lit(pat) => self.expr_lit(pat), + Pat::Macro(pat) => self.expr_macro(pat), + Pat::Or(pat) => self.pat_or(pat), + Pat::Paren(pat) => self.pat_paren(pat), + Pat::Path(pat) => self.expr_path(pat), + Pat::Range(pat) => self.expr_range(pat, FixupContext::NONE), + Pat::Reference(pat) => self.pat_reference(pat), + Pat::Rest(pat) => self.pat_rest(pat), + Pat::Slice(pat) => self.pat_slice(pat), + Pat::Struct(pat) => self.pat_struct(pat), + Pat::Tuple(pat) => self.pat_tuple(pat), + Pat::TupleStruct(pat) => self.pat_tuple_struct(pat), + Pat::Type(pat) => self.pat_type(pat), + Pat::Verbatim(pat) => self.pat_verbatim(pat), + Pat::Wild(pat) => self.pat_wild(pat), + _ => unimplemented!("unknown Pat"), + } + } + + fn pat_ident(&mut self, pat: &PatIdent) { + self.outer_attrs(&pat.attrs); + if pat.by_ref.is_some() { + self.word("ref "); + } + if pat.mutability.is_some() { + self.word("mut "); + } + self.ident(&pat.ident); + if let Some((_at_token, subpat)) = &pat.subpat { + self.word(" @ "); + self.pat(subpat); + } + } + + fn pat_or(&mut self, pat: &PatOr) { + self.outer_attrs(&pat.attrs); + let mut consistent_break = false; + for case in &pat.cases { + match case { + Pat::Lit(_) | Pat::Wild(_) => {} + _ => { + consistent_break = true; + break; + } + } + } + if consistent_break { + self.cbox(0); + } else { + self.ibox(0); + } + for case in pat.cases.iter().delimited() { + if !case.is_first { + self.space(); + self.word("| "); + } + self.pat(&case); + } + self.end(); + } + + fn pat_paren(&mut self, pat: &PatParen) { + self.outer_attrs(&pat.attrs); + self.word("("); + self.pat(&pat.pat); + self.word(")"); + } + + fn pat_reference(&mut self, pat: &PatReference) { + self.outer_attrs(&pat.attrs); + self.word("&"); + if pat.mutability.is_some() { + self.word("mut "); + } + self.pat(&pat.pat); + } + + fn pat_rest(&mut self, pat: &PatRest) { + self.outer_attrs(&pat.attrs); + self.word(".."); + } + + fn pat_slice(&mut self, pat: &PatSlice) { + self.outer_attrs(&pat.attrs); + self.word("["); + for elem in pat.elems.iter().delimited() { + self.pat(&elem); + self.trailing_comma(elem.is_last); + } + self.word("]"); + } + + fn pat_struct(&mut self, pat: &PatStruct) { + self.outer_attrs(&pat.attrs); + self.cbox(INDENT); + self.path(&pat.path, PathKind::Expr); + self.word(" {"); + self.space_if_nonempty(); + for field in pat.fields.iter().delimited() { + self.field_pat(&field); + self.trailing_comma_or_space(field.is_last && pat.rest.is_none()); + } + if let Some(rest) = &pat.rest { + self.pat_rest(rest); + self.space(); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } + + fn pat_tuple(&mut self, pat: &PatTuple) { + self.outer_attrs(&pat.attrs); + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + for elem in pat.elems.iter().delimited() { + self.pat(&elem); + if pat.elems.len() == 1 { + if pat.elems.trailing_punct() { + self.word(","); + } + self.zerobreak(); + } else { + self.trailing_comma(elem.is_last); + } + } + self.offset(-INDENT); + self.end(); + self.word(")"); + } + + fn pat_tuple_struct(&mut self, pat: &PatTupleStruct) { + self.outer_attrs(&pat.attrs); + self.path(&pat.path, PathKind::Expr); + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + for elem in pat.elems.iter().delimited() { + self.pat(&elem); + self.trailing_comma(elem.is_last); + } + self.offset(-INDENT); + self.end(); + self.word(")"); + } + + pub fn pat_type(&mut self, pat: &PatType) { + self.outer_attrs(&pat.attrs); + self.pat(&pat.pat); + self.word(": "); + self.ty(&pat.ty); + } + + #[cfg(not(feature = "verbatim"))] + fn pat_verbatim(&mut self, pat: &TokenStream) { + unimplemented!("Pat::Verbatim `{}`", pat); + } + + #[cfg(feature = "verbatim")] + fn pat_verbatim(&mut self, tokens: &TokenStream) { + use syn::parse::{Parse, ParseStream, Result}; + use syn::{braced, Attribute, Block, Token}; + + enum PatVerbatim { + Ellipsis, + Box(Pat), + Const(PatConst), + } + + struct PatConst { + attrs: Vec, + block: Block, + } + + impl Parse for PatVerbatim { + fn parse(input: ParseStream) -> Result { + let lookahead = input.lookahead1(); + if lookahead.peek(Token![box]) { + input.parse::()?; + let inner = Pat::parse_single(input)?; + Ok(PatVerbatim::Box(inner)) + } else if lookahead.peek(Token![const]) { + input.parse::()?; + let content; + let brace_token = braced!(content in input); + let attrs = content.call(Attribute::parse_inner)?; + let stmts = content.call(Block::parse_within)?; + Ok(PatVerbatim::Const(PatConst { + attrs, + block: Block { brace_token, stmts }, + })) + } else if lookahead.peek(Token![...]) { + input.parse::()?; + Ok(PatVerbatim::Ellipsis) + } else { + Err(lookahead.error()) + } + } + } + + let pat: PatVerbatim = match syn::parse2(tokens.clone()) { + Ok(pat) => pat, + Err(_) => unimplemented!("Pat::Verbatim `{}`", tokens), + }; + + match pat { + PatVerbatim::Ellipsis => { + self.word("..."); + } + PatVerbatim::Box(pat) => { + self.word("box "); + self.pat(&pat); + } + PatVerbatim::Const(pat) => { + self.word("const "); + self.cbox(INDENT); + self.small_block(&pat.block, &pat.attrs); + self.end(); + } + } + } + + fn pat_wild(&mut self, pat: &PatWild) { + self.outer_attrs(&pat.attrs); + self.word("_"); + } + + fn field_pat(&mut self, field_pat: &FieldPat) { + self.outer_attrs(&field_pat.attrs); + if field_pat.colon_token.is_some() { + self.member(&field_pat.member); + self.word(": "); + } + self.pat(&field_pat.pat); + } +} diff --git a/vendor/prettyplease/src/path.rs b/vendor/prettyplease/src/path.rs new file mode 100644 index 00000000000000..44428cc60f78ab --- /dev/null +++ b/vendor/prettyplease/src/path.rs @@ -0,0 +1,194 @@ +use crate::algorithm::Printer; +use crate::iter::IterDelimited; +use crate::INDENT; +use std::ptr; +use syn::{ + AngleBracketedGenericArguments, AssocConst, AssocType, Constraint, GenericArgument, + ParenthesizedGenericArguments, Path, PathArguments, PathSegment, QSelf, +}; + +#[derive(Copy, Clone, PartialEq)] +pub enum PathKind { + // a::B + Simple, + // a::B + Type, + // a::B:: + Expr, +} + +impl Printer { + pub fn path(&mut self, path: &Path, kind: PathKind) { + assert!(!path.segments.is_empty()); + for segment in path.segments.iter().delimited() { + if !segment.is_first || path.leading_colon.is_some() { + self.word("::"); + } + self.path_segment(&segment, kind); + } + } + + pub fn path_segment(&mut self, segment: &PathSegment, kind: PathKind) { + self.ident(&segment.ident); + self.path_arguments(&segment.arguments, kind); + } + + fn path_arguments(&mut self, arguments: &PathArguments, kind: PathKind) { + match arguments { + PathArguments::None => {} + PathArguments::AngleBracketed(arguments) => { + self.angle_bracketed_generic_arguments(arguments, kind); + } + PathArguments::Parenthesized(arguments) => { + self.parenthesized_generic_arguments(arguments); + } + } + } + + fn generic_argument(&mut self, arg: &GenericArgument) { + match arg { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + GenericArgument::Lifetime(lifetime) => self.lifetime(lifetime), + GenericArgument::Type(ty) => self.ty(ty), + GenericArgument::Const(expr) => self.const_argument(expr), + GenericArgument::AssocType(assoc) => self.assoc_type(assoc), + GenericArgument::AssocConst(assoc) => self.assoc_const(assoc), + GenericArgument::Constraint(constraint) => self.constraint(constraint), + _ => unimplemented!("unknown GenericArgument"), + } + } + + pub fn angle_bracketed_generic_arguments( + &mut self, + generic: &AngleBracketedGenericArguments, + path_kind: PathKind, + ) { + if generic.args.is_empty() || path_kind == PathKind::Simple { + return; + } + + if path_kind == PathKind::Expr { + self.word("::"); + } + self.word("<"); + self.cbox(INDENT); + self.zerobreak(); + + // Print lifetimes before types/consts/bindings, regardless of their + // order in self.args. + #[derive(Ord, PartialOrd, Eq, PartialEq)] + enum Group { + First, + Second, + } + fn group(arg: &GenericArgument) -> Group { + match arg { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + GenericArgument::Lifetime(_) => Group::First, + GenericArgument::Type(_) + | GenericArgument::Const(_) + | GenericArgument::AssocType(_) + | GenericArgument::AssocConst(_) + | GenericArgument::Constraint(_) => Group::Second, + _ => Group::Second, + } + } + let last = generic.args.iter().max_by_key(|param| group(param)); + for current_group in [Group::First, Group::Second] { + for arg in &generic.args { + if group(arg) == current_group { + self.generic_argument(arg); + self.trailing_comma(ptr::eq(arg, last.unwrap())); + } + } + } + + self.offset(-INDENT); + self.end(); + self.word(">"); + } + + fn assoc_type(&mut self, assoc: &AssocType) { + self.ident(&assoc.ident); + if let Some(generics) = &assoc.generics { + self.angle_bracketed_generic_arguments(generics, PathKind::Type); + } + self.word(" = "); + self.ty(&assoc.ty); + } + + fn assoc_const(&mut self, assoc: &AssocConst) { + self.ident(&assoc.ident); + if let Some(generics) = &assoc.generics { + self.angle_bracketed_generic_arguments(generics, PathKind::Type); + } + self.word(" = "); + self.const_argument(&assoc.value); + } + + fn constraint(&mut self, constraint: &Constraint) { + self.ident(&constraint.ident); + if let Some(generics) = &constraint.generics { + self.angle_bracketed_generic_arguments(generics, PathKind::Type); + } + self.ibox(INDENT); + for bound in constraint.bounds.iter().delimited() { + if bound.is_first { + self.word(": "); + } else { + self.space(); + self.word("+ "); + } + self.type_param_bound(&bound); + } + self.end(); + } + + fn parenthesized_generic_arguments(&mut self, arguments: &ParenthesizedGenericArguments) { + self.cbox(INDENT); + self.word("("); + self.zerobreak(); + for ty in arguments.inputs.iter().delimited() { + self.ty(&ty); + self.trailing_comma(ty.is_last); + } + self.offset(-INDENT); + self.word(")"); + self.return_type(&arguments.output); + self.end(); + } + + pub fn qpath(&mut self, qself: &Option, path: &Path, kind: PathKind) { + let qself = if let Some(qself) = qself { + qself + } else { + self.path(path, kind); + return; + }; + + assert!(qself.position < path.segments.len()); + + self.word("<"); + self.ty(&qself.ty); + + let mut segments = path.segments.iter(); + if qself.position > 0 { + self.word(" as "); + for segment in segments.by_ref().take(qself.position).delimited() { + if !segment.is_first || path.leading_colon.is_some() { + self.word("::"); + } + self.path_segment(&segment, PathKind::Type); + if segment.is_last { + self.word(">"); + } + } + } else { + self.word(">"); + } + for segment in segments { + self.word("::"); + self.path_segment(segment, kind); + } + } +} diff --git a/vendor/prettyplease/src/precedence.rs b/vendor/prettyplease/src/precedence.rs new file mode 100644 index 00000000000000..03117d56de7537 --- /dev/null +++ b/vendor/prettyplease/src/precedence.rs @@ -0,0 +1,148 @@ +use syn::{ + AttrStyle, Attribute, BinOp, Expr, ExprArray, ExprAsync, ExprAwait, ExprBlock, ExprBreak, + ExprCall, ExprConst, ExprContinue, ExprField, ExprForLoop, ExprIf, ExprIndex, ExprInfer, + ExprLit, ExprLoop, ExprMacro, ExprMatch, ExprMethodCall, ExprParen, ExprPath, ExprRepeat, + ExprReturn, ExprStruct, ExprTry, ExprTryBlock, ExprTuple, ExprUnsafe, ExprWhile, ExprYield, + ReturnType, +}; + +// Reference: https://doc.rust-lang.org/reference/expressions.html#expression-precedence +#[derive(Copy, Clone, PartialEq, PartialOrd)] +pub enum Precedence { + // return, break, closures + Jump, + // = += -= *= /= %= &= |= ^= <<= >>= + Assign, + // .. ..= + Range, + // || + Or, + // && + And, + // let + Let, + // == != < > <= >= + Compare, + // | + BitOr, + // ^ + BitXor, + // & + BitAnd, + // << >> + Shift, + // + - + Sum, + // * / % + Product, + // as + Cast, + // unary - * ! & &mut + Prefix, + // paths, loops, function calls, array indexing, field expressions, method calls + Unambiguous, +} + +impl Precedence { + pub(crate) const MIN: Self = Precedence::Jump; + + pub(crate) fn of_binop(op: &BinOp) -> Self { + match op { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + BinOp::Add(_) | BinOp::Sub(_) => Precedence::Sum, + BinOp::Mul(_) | BinOp::Div(_) | BinOp::Rem(_) => Precedence::Product, + BinOp::And(_) => Precedence::And, + BinOp::Or(_) => Precedence::Or, + BinOp::BitXor(_) => Precedence::BitXor, + BinOp::BitAnd(_) => Precedence::BitAnd, + BinOp::BitOr(_) => Precedence::BitOr, + BinOp::Shl(_) | BinOp::Shr(_) => Precedence::Shift, + + BinOp::Eq(_) + | BinOp::Lt(_) + | BinOp::Le(_) + | BinOp::Ne(_) + | BinOp::Ge(_) + | BinOp::Gt(_) => Precedence::Compare, + + BinOp::AddAssign(_) + | BinOp::SubAssign(_) + | BinOp::MulAssign(_) + | BinOp::DivAssign(_) + | BinOp::RemAssign(_) + | BinOp::BitXorAssign(_) + | BinOp::BitAndAssign(_) + | BinOp::BitOrAssign(_) + | BinOp::ShlAssign(_) + | BinOp::ShrAssign(_) => Precedence::Assign, + + _ => Precedence::MIN, + } + } + + pub(crate) fn of(e: &Expr) -> Self { + fn prefix_attrs(attrs: &[Attribute]) -> Precedence { + for attr in attrs { + if let AttrStyle::Outer = attr.style { + return Precedence::Prefix; + } + } + Precedence::Unambiguous + } + + match e { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Expr::Closure(e) => match e.output { + ReturnType::Default => Precedence::Jump, + ReturnType::Type(..) => prefix_attrs(&e.attrs), + }, + + Expr::Break(ExprBreak { expr, .. }) + | Expr::Return(ExprReturn { expr, .. }) + | Expr::Yield(ExprYield { expr, .. }) => match expr { + Some(_) => Precedence::Jump, + None => Precedence::Unambiguous, + }, + + Expr::Assign(_) => Precedence::Assign, + Expr::Range(_) => Precedence::Range, + Expr::Binary(e) => Precedence::of_binop(&e.op), + Expr::Let(_) => Precedence::Let, + Expr::Cast(_) => Precedence::Cast, + Expr::RawAddr(_) | Expr::Reference(_) | Expr::Unary(_) => Precedence::Prefix, + + Expr::Array(ExprArray { attrs, .. }) + | Expr::Async(ExprAsync { attrs, .. }) + | Expr::Await(ExprAwait { attrs, .. }) + | Expr::Block(ExprBlock { attrs, .. }) + | Expr::Call(ExprCall { attrs, .. }) + | Expr::Const(ExprConst { attrs, .. }) + | Expr::Continue(ExprContinue { attrs, .. }) + | Expr::Field(ExprField { attrs, .. }) + | Expr::ForLoop(ExprForLoop { attrs, .. }) + | Expr::If(ExprIf { attrs, .. }) + | Expr::Index(ExprIndex { attrs, .. }) + | Expr::Infer(ExprInfer { attrs, .. }) + | Expr::Lit(ExprLit { attrs, .. }) + | Expr::Loop(ExprLoop { attrs, .. }) + | Expr::Macro(ExprMacro { attrs, .. }) + | Expr::Match(ExprMatch { attrs, .. }) + | Expr::MethodCall(ExprMethodCall { attrs, .. }) + | Expr::Paren(ExprParen { attrs, .. }) + | Expr::Path(ExprPath { attrs, .. }) + | Expr::Repeat(ExprRepeat { attrs, .. }) + | Expr::Struct(ExprStruct { attrs, .. }) + | Expr::Try(ExprTry { attrs, .. }) + | Expr::TryBlock(ExprTryBlock { attrs, .. }) + | Expr::Tuple(ExprTuple { attrs, .. }) + | Expr::Unsafe(ExprUnsafe { attrs, .. }) + | Expr::While(ExprWhile { attrs, .. }) => prefix_attrs(attrs), + + Expr::Group(e) => Precedence::of(&e.expr), + + Expr::Verbatim(_) => Precedence::Unambiguous, + + _ => Precedence::Unambiguous, + } + } +} diff --git a/vendor/prettyplease/src/ring.rs b/vendor/prettyplease/src/ring.rs new file mode 100644 index 00000000000000..882a988ecd3ad7 --- /dev/null +++ b/vendor/prettyplease/src/ring.rs @@ -0,0 +1,81 @@ +use std::collections::VecDeque; +use std::ops::{Index, IndexMut, Range}; + +pub struct RingBuffer { + data: VecDeque, + // Abstract index of data[0] in infinitely sized queue + offset: usize, +} + +impl RingBuffer { + pub fn new() -> Self { + RingBuffer { + data: VecDeque::new(), + offset: 0, + } + } + + pub fn is_empty(&self) -> bool { + self.data.is_empty() + } + + pub fn len(&self) -> usize { + self.data.len() + } + + pub fn push(&mut self, value: T) -> usize { + let index = self.offset + self.data.len(); + self.data.push_back(value); + index + } + + pub fn clear(&mut self) { + self.data.clear(); + } + + pub fn index_range(&self) -> Range { + self.offset..self.offset + self.data.len() + } + + pub fn first(&self) -> &T { + &self.data[0] + } + + pub fn first_mut(&mut self) -> &mut T { + &mut self.data[0] + } + + pub fn pop_first(&mut self) -> T { + self.offset += 1; + self.data.pop_front().unwrap() + } + + pub fn last(&self) -> &T { + self.data.back().unwrap() + } + + pub fn last_mut(&mut self) -> &mut T { + self.data.back_mut().unwrap() + } + + pub fn second_last(&self) -> &T { + &self.data[self.data.len() - 2] + } + + pub fn pop_last(&mut self) { + self.data.pop_back().unwrap(); + } +} + +impl Index for RingBuffer { + type Output = T; + fn index(&self, index: usize) -> &Self::Output { + &self.data[index.checked_sub(self.offset).unwrap()] + } +} + +impl IndexMut for RingBuffer { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + &mut self.data[index.checked_sub(self.offset).unwrap()] + } +} diff --git a/vendor/prettyplease/src/stmt.rs b/vendor/prettyplease/src/stmt.rs new file mode 100644 index 00000000000000..ce58200e1977e8 --- /dev/null +++ b/vendor/prettyplease/src/stmt.rs @@ -0,0 +1,221 @@ +use crate::algorithm::Printer; +use crate::classify; +use crate::expr; +use crate::fixup::FixupContext; +use crate::mac; +use crate::INDENT; +use syn::{BinOp, Expr, Stmt}; + +impl Printer { + pub fn stmt(&mut self, stmt: &Stmt, is_last: bool) { + match stmt { + Stmt::Local(local) => { + self.outer_attrs(&local.attrs); + self.ibox(0); + self.word("let "); + self.pat(&local.pat); + if let Some(local_init) = &local.init { + self.word(" = "); + self.neverbreak(); + self.subexpr( + &local_init.expr, + local_init.diverge.is_some() + && classify::expr_trailing_brace(&local_init.expr), + FixupContext::NONE, + ); + if let Some((_else, diverge)) = &local_init.diverge { + self.space(); + self.word("else "); + self.end(); + self.neverbreak(); + self.cbox(INDENT); + if let Some(expr) = expr::simple_block(diverge) { + self.small_block(&expr.block, &[]); + } else { + self.expr_as_small_block(diverge, INDENT); + } + } + } + self.end(); + self.word(";"); + self.hardbreak(); + } + Stmt::Item(item) => self.item(item), + Stmt::Expr(expr, None) => { + if break_after(expr) { + self.ibox(0); + self.expr_beginning_of_line(expr, false, true, FixupContext::new_stmt()); + if add_semi(expr) { + self.word(";"); + } + self.end(); + self.hardbreak(); + } else { + self.expr_beginning_of_line(expr, false, true, FixupContext::new_stmt()); + } + } + Stmt::Expr(expr, Some(_semi)) => { + if let Expr::Verbatim(tokens) = expr { + if tokens.is_empty() { + return; + } + } + self.ibox(0); + self.expr_beginning_of_line(expr, false, true, FixupContext::new_stmt()); + if !remove_semi(expr) { + self.word(";"); + } + self.end(); + self.hardbreak(); + } + Stmt::Macro(stmt) => { + self.outer_attrs(&stmt.attrs); + let semicolon = stmt.semi_token.is_some() + || !is_last && mac::requires_semi(&stmt.mac.delimiter); + self.mac(&stmt.mac, None, semicolon); + self.hardbreak(); + } + } + } +} + +pub fn add_semi(expr: &Expr) -> bool { + match expr { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Expr::Assign(_) | Expr::Break(_) | Expr::Continue(_) | Expr::Return(_) | Expr::Yield(_) => { + true + } + Expr::Binary(expr) => + { + match expr.op { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + BinOp::AddAssign(_) + | BinOp::SubAssign(_) + | BinOp::MulAssign(_) + | BinOp::DivAssign(_) + | BinOp::RemAssign(_) + | BinOp::BitXorAssign(_) + | BinOp::BitAndAssign(_) + | BinOp::BitOrAssign(_) + | BinOp::ShlAssign(_) + | BinOp::ShrAssign(_) => true, + BinOp::Add(_) + | BinOp::Sub(_) + | BinOp::Mul(_) + | BinOp::Div(_) + | BinOp::Rem(_) + | BinOp::And(_) + | BinOp::Or(_) + | BinOp::BitXor(_) + | BinOp::BitAnd(_) + | BinOp::BitOr(_) + | BinOp::Shl(_) + | BinOp::Shr(_) + | BinOp::Eq(_) + | BinOp::Lt(_) + | BinOp::Le(_) + | BinOp::Ne(_) + | BinOp::Ge(_) + | BinOp::Gt(_) => false, + _ => unimplemented!("unknown BinOp"), + } + } + Expr::Group(group) => add_semi(&group.expr), + + Expr::Array(_) + | Expr::Async(_) + | Expr::Await(_) + | Expr::Block(_) + | Expr::Call(_) + | Expr::Cast(_) + | Expr::Closure(_) + | Expr::Const(_) + | Expr::Field(_) + | Expr::ForLoop(_) + | Expr::If(_) + | Expr::Index(_) + | Expr::Infer(_) + | Expr::Let(_) + | Expr::Lit(_) + | Expr::Loop(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::MethodCall(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Range(_) + | Expr::RawAddr(_) + | Expr::Reference(_) + | Expr::Repeat(_) + | Expr::Struct(_) + | Expr::Try(_) + | Expr::TryBlock(_) + | Expr::Tuple(_) + | Expr::Unary(_) + | Expr::Unsafe(_) + | Expr::Verbatim(_) + | Expr::While(_) => false, + + _ => false, + } +} + +pub fn break_after(expr: &Expr) -> bool { + if let Expr::Group(group) = expr { + if let Expr::Verbatim(verbatim) = group.expr.as_ref() { + return !verbatim.is_empty(); + } + } + true +} + +fn remove_semi(expr: &Expr) -> bool { + match expr { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Expr::ForLoop(_) | Expr::While(_) => true, + Expr::Group(group) => remove_semi(&group.expr), + Expr::If(expr) => match &expr.else_branch { + Some((_else_token, else_branch)) => remove_semi(else_branch), + None => true, + }, + + Expr::Array(_) + | Expr::Assign(_) + | Expr::Async(_) + | Expr::Await(_) + | Expr::Binary(_) + | Expr::Block(_) + | Expr::Break(_) + | Expr::Call(_) + | Expr::Cast(_) + | Expr::Closure(_) + | Expr::Continue(_) + | Expr::Const(_) + | Expr::Field(_) + | Expr::Index(_) + | Expr::Infer(_) + | Expr::Let(_) + | Expr::Lit(_) + | Expr::Loop(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::MethodCall(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Range(_) + | Expr::RawAddr(_) + | Expr::Reference(_) + | Expr::Repeat(_) + | Expr::Return(_) + | Expr::Struct(_) + | Expr::Try(_) + | Expr::TryBlock(_) + | Expr::Tuple(_) + | Expr::Unary(_) + | Expr::Unsafe(_) + | Expr::Verbatim(_) + | Expr::Yield(_) => false, + + _ => false, + } +} diff --git a/vendor/prettyplease/src/token.rs b/vendor/prettyplease/src/token.rs new file mode 100644 index 00000000000000..e41fd728a6f536 --- /dev/null +++ b/vendor/prettyplease/src/token.rs @@ -0,0 +1,80 @@ +use crate::algorithm::Printer; +use proc_macro2::{Delimiter, Ident, Literal, Spacing, TokenStream, TokenTree}; + +impl Printer { + pub fn single_token(&mut self, token: Token, group_contents: fn(&mut Self, TokenStream)) { + match token { + Token::Group(delimiter, stream) => self.token_group(delimiter, stream, group_contents), + Token::Ident(ident) => self.ident(&ident), + Token::Punct(ch, _spacing) => self.token_punct(ch), + Token::Literal(literal) => self.token_literal(&literal), + } + } + + fn token_group( + &mut self, + delimiter: Delimiter, + stream: TokenStream, + group_contents: fn(&mut Self, TokenStream), + ) { + self.delimiter_open(delimiter); + if !stream.is_empty() { + if delimiter == Delimiter::Brace { + self.space(); + } + group_contents(self, stream); + if delimiter == Delimiter::Brace { + self.space(); + } + } + self.delimiter_close(delimiter); + } + + pub fn ident(&mut self, ident: &Ident) { + self.word(ident.to_string()); + } + + pub fn token_punct(&mut self, ch: char) { + self.word(ch.to_string()); + } + + pub fn token_literal(&mut self, literal: &Literal) { + self.word(literal.to_string()); + } + + pub fn delimiter_open(&mut self, delimiter: Delimiter) { + self.word(match delimiter { + Delimiter::Parenthesis => "(", + Delimiter::Brace => "{", + Delimiter::Bracket => "[", + Delimiter::None => return, + }); + } + + pub fn delimiter_close(&mut self, delimiter: Delimiter) { + self.word(match delimiter { + Delimiter::Parenthesis => ")", + Delimiter::Brace => "}", + Delimiter::Bracket => "]", + Delimiter::None => return, + }); + } +} + +pub enum Token { + Group(Delimiter, TokenStream), + Ident(Ident), + Punct(char, Spacing), + Literal(Literal), +} + +impl From for Token { + fn from(tt: TokenTree) -> Self { + match tt { + TokenTree::Group(group) => Token::Group(group.delimiter(), group.stream()), + TokenTree::Ident(ident) => Token::Ident(ident), + TokenTree::Punct(punct) => Token::Punct(punct.as_char(), punct.spacing()), + TokenTree::Literal(literal) => Token::Literal(literal), + } + } +} diff --git a/vendor/prettyplease/src/ty.rs b/vendor/prettyplease/src/ty.rs new file mode 100644 index 00000000000000..36cd56879def61 --- /dev/null +++ b/vendor/prettyplease/src/ty.rs @@ -0,0 +1,326 @@ +use crate::algorithm::Printer; +use crate::fixup::FixupContext; +use crate::iter::IterDelimited; +use crate::path::PathKind; +use crate::INDENT; +use proc_macro2::TokenStream; +use syn::{ + Abi, BareFnArg, BareVariadic, ReturnType, Type, TypeArray, TypeBareFn, TypeGroup, + TypeImplTrait, TypeInfer, TypeMacro, TypeNever, TypeParen, TypePath, TypePtr, TypeReference, + TypeSlice, TypeTraitObject, TypeTuple, +}; + +impl Printer { + pub fn ty(&mut self, ty: &Type) { + match ty { + #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] + Type::Array(ty) => self.type_array(ty), + Type::BareFn(ty) => self.type_bare_fn(ty), + Type::Group(ty) => self.type_group(ty), + Type::ImplTrait(ty) => self.type_impl_trait(ty), + Type::Infer(ty) => self.type_infer(ty), + Type::Macro(ty) => self.type_macro(ty), + Type::Never(ty) => self.type_never(ty), + Type::Paren(ty) => self.type_paren(ty), + Type::Path(ty) => self.type_path(ty), + Type::Ptr(ty) => self.type_ptr(ty), + Type::Reference(ty) => self.type_reference(ty), + Type::Slice(ty) => self.type_slice(ty), + Type::TraitObject(ty) => self.type_trait_object(ty), + Type::Tuple(ty) => self.type_tuple(ty), + Type::Verbatim(ty) => self.type_verbatim(ty), + _ => unimplemented!("unknown Type"), + } + } + + fn type_array(&mut self, ty: &TypeArray) { + self.word("["); + self.ty(&ty.elem); + self.word("; "); + self.expr(&ty.len, FixupContext::NONE); + self.word("]"); + } + + fn type_bare_fn(&mut self, ty: &TypeBareFn) { + if let Some(bound_lifetimes) = &ty.lifetimes { + self.bound_lifetimes(bound_lifetimes); + } + if ty.unsafety.is_some() { + self.word("unsafe "); + } + if let Some(abi) = &ty.abi { + self.abi(abi); + } + self.word("fn("); + self.cbox(INDENT); + self.zerobreak(); + for bare_fn_arg in ty.inputs.iter().delimited() { + self.bare_fn_arg(&bare_fn_arg); + self.trailing_comma(bare_fn_arg.is_last && ty.variadic.is_none()); + } + if let Some(variadic) = &ty.variadic { + self.bare_variadic(variadic); + self.zerobreak(); + } + self.offset(-INDENT); + self.end(); + self.word(")"); + self.return_type(&ty.output); + } + + fn type_group(&mut self, ty: &TypeGroup) { + self.ty(&ty.elem); + } + + fn type_impl_trait(&mut self, ty: &TypeImplTrait) { + self.word("impl "); + for type_param_bound in ty.bounds.iter().delimited() { + if !type_param_bound.is_first { + self.word(" + "); + } + self.type_param_bound(&type_param_bound); + } + } + + fn type_infer(&mut self, ty: &TypeInfer) { + let _ = ty; + self.word("_"); + } + + fn type_macro(&mut self, ty: &TypeMacro) { + let semicolon = false; + self.mac(&ty.mac, None, semicolon); + } + + fn type_never(&mut self, ty: &TypeNever) { + let _ = ty; + self.word("!"); + } + + fn type_paren(&mut self, ty: &TypeParen) { + self.word("("); + self.ty(&ty.elem); + self.word(")"); + } + + fn type_path(&mut self, ty: &TypePath) { + self.qpath(&ty.qself, &ty.path, PathKind::Type); + } + + fn type_ptr(&mut self, ty: &TypePtr) { + self.word("*"); + if ty.mutability.is_some() { + self.word("mut "); + } else { + self.word("const "); + } + self.ty(&ty.elem); + } + + fn type_reference(&mut self, ty: &TypeReference) { + self.word("&"); + if let Some(lifetime) = &ty.lifetime { + self.lifetime(lifetime); + self.nbsp(); + } + if ty.mutability.is_some() { + self.word("mut "); + } + self.ty(&ty.elem); + } + + fn type_slice(&mut self, ty: &TypeSlice) { + self.word("["); + self.ty(&ty.elem); + self.word("]"); + } + + fn type_trait_object(&mut self, ty: &TypeTraitObject) { + self.word("dyn "); + for type_param_bound in ty.bounds.iter().delimited() { + if !type_param_bound.is_first { + self.word(" + "); + } + self.type_param_bound(&type_param_bound); + } + } + + fn type_tuple(&mut self, ty: &TypeTuple) { + self.word("("); + self.cbox(INDENT); + self.zerobreak(); + for elem in ty.elems.iter().delimited() { + self.ty(&elem); + if ty.elems.len() == 1 { + self.word(","); + self.zerobreak(); + } else { + self.trailing_comma(elem.is_last); + } + } + self.offset(-INDENT); + self.end(); + self.word(")"); + } + + #[cfg(not(feature = "verbatim"))] + fn type_verbatim(&mut self, ty: &TokenStream) { + unimplemented!("Type::Verbatim `{}`", ty); + } + + #[cfg(feature = "verbatim")] + fn type_verbatim(&mut self, tokens: &TokenStream) { + use syn::parse::{Parse, ParseStream, Result}; + use syn::punctuated::Punctuated; + use syn::{token, FieldsNamed, Token, TypeParamBound}; + + enum TypeVerbatim { + Ellipsis, + AnonStruct(AnonStruct), + AnonUnion(AnonUnion), + DynStar(DynStar), + MutSelf(MutSelf), + } + + struct AnonStruct { + fields: FieldsNamed, + } + + struct AnonUnion { + fields: FieldsNamed, + } + + struct DynStar { + bounds: Punctuated, + } + + struct MutSelf { + ty: Option, + } + + impl Parse for TypeVerbatim { + fn parse(input: ParseStream) -> Result { + let lookahead = input.lookahead1(); + if lookahead.peek(Token![struct]) { + input.parse::()?; + let fields: FieldsNamed = input.parse()?; + Ok(TypeVerbatim::AnonStruct(AnonStruct { fields })) + } else if lookahead.peek(Token![union]) && input.peek2(token::Brace) { + input.parse::()?; + let fields: FieldsNamed = input.parse()?; + Ok(TypeVerbatim::AnonUnion(AnonUnion { fields })) + } else if lookahead.peek(Token![dyn]) { + input.parse::()?; + input.parse::()?; + let bounds = input.parse_terminated(TypeParamBound::parse, Token![+])?; + Ok(TypeVerbatim::DynStar(DynStar { bounds })) + } else if lookahead.peek(Token![mut]) { + input.parse::()?; + input.parse::()?; + let ty = if input.is_empty() { + None + } else { + input.parse::()?; + let ty: Type = input.parse()?; + Some(ty) + }; + Ok(TypeVerbatim::MutSelf(MutSelf { ty })) + } else if lookahead.peek(Token![...]) { + input.parse::()?; + Ok(TypeVerbatim::Ellipsis) + } else { + Err(lookahead.error()) + } + } + } + + let ty: TypeVerbatim = match syn::parse2(tokens.clone()) { + Ok(ty) => ty, + Err(_) => unimplemented!("Type::Verbatim `{}`", tokens), + }; + + match ty { + TypeVerbatim::Ellipsis => { + self.word("..."); + } + TypeVerbatim::AnonStruct(ty) => { + self.cbox(INDENT); + self.word("struct {"); + self.hardbreak_if_nonempty(); + for field in &ty.fields.named { + self.field(field); + self.word(","); + self.hardbreak(); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } + TypeVerbatim::AnonUnion(ty) => { + self.cbox(INDENT); + self.word("union {"); + self.hardbreak_if_nonempty(); + for field in &ty.fields.named { + self.field(field); + self.word(","); + self.hardbreak(); + } + self.offset(-INDENT); + self.end(); + self.word("}"); + } + TypeVerbatim::DynStar(ty) => { + self.word("dyn* "); + for type_param_bound in ty.bounds.iter().delimited() { + if !type_param_bound.is_first { + self.word(" + "); + } + self.type_param_bound(&type_param_bound); + } + } + TypeVerbatim::MutSelf(bare_fn_arg) => { + self.word("mut self"); + if let Some(ty) = &bare_fn_arg.ty { + self.word(": "); + self.ty(ty); + } + } + } + } + + pub fn return_type(&mut self, ty: &ReturnType) { + match ty { + ReturnType::Default => {} + ReturnType::Type(_arrow, ty) => { + self.word(" -> "); + self.ty(ty); + } + } + } + + fn bare_fn_arg(&mut self, bare_fn_arg: &BareFnArg) { + self.outer_attrs(&bare_fn_arg.attrs); + if let Some((name, _colon)) = &bare_fn_arg.name { + self.ident(name); + self.word(": "); + } + self.ty(&bare_fn_arg.ty); + } + + fn bare_variadic(&mut self, variadic: &BareVariadic) { + self.outer_attrs(&variadic.attrs); + if let Some((name, _colon)) = &variadic.name { + self.ident(name); + self.word(": "); + } + self.word("..."); + } + + pub fn abi(&mut self, abi: &Abi) { + self.word("extern "); + if let Some(name) = &abi.name { + self.lit_str(name); + self.nbsp(); + } + } +} diff --git a/vendor/prettyplease/tests/test.rs b/vendor/prettyplease/tests/test.rs new file mode 100644 index 00000000000000..aa6b849fcfdc15 --- /dev/null +++ b/vendor/prettyplease/tests/test.rs @@ -0,0 +1,51 @@ +use indoc::indoc; +use proc_macro2::{Delimiter, Group, TokenStream}; +use quote::quote; + +#[track_caller] +fn test(tokens: TokenStream, expected: &str) { + let syntax_tree: syn::File = syn::parse2(tokens).unwrap(); + let pretty = prettyplease::unparse(&syntax_tree); + assert_eq!(pretty, expected); +} + +#[test] +fn test_parenthesize_cond() { + let s = Group::new(Delimiter::None, quote!(Struct {})); + test( + quote! { + fn main() { + if #s == #s {} + } + }, + indoc! {" + fn main() { + if (Struct {}) == (Struct {}) {} + } + "}, + ); +} + +#[test] +fn test_parenthesize_match_guard() { + let expr_struct = Group::new(Delimiter::None, quote!(Struct {})); + let expr_binary = Group::new(Delimiter::None, quote!(true && false)); + test( + quote! { + fn main() { + match () { + () if let _ = #expr_struct => {} + () if let _ = #expr_binary => {} + } + } + }, + indoc! {" + fn main() { + match () { + () if let _ = Struct {} => {} + () if let _ = (true && false) => {} + } + } + "}, + ); +} diff --git a/vendor/prettyplease/tests/test_precedence.rs b/vendor/prettyplease/tests/test_precedence.rs new file mode 100644 index 00000000000000..f1eec232ccbe09 --- /dev/null +++ b/vendor/prettyplease/tests/test_precedence.rs @@ -0,0 +1,900 @@ +use proc_macro2::{Ident, Span, TokenStream}; +use quote::ToTokens as _; +use std::mem; +use std::process::ExitCode; +use syn::punctuated::Punctuated; +use syn::visit_mut::{self, VisitMut}; +use syn::{ + token, AngleBracketedGenericArguments, Arm, BinOp, Block, Expr, ExprArray, ExprAssign, + ExprAsync, ExprAwait, ExprBinary, ExprBlock, ExprBreak, ExprCall, ExprCast, ExprClosure, + ExprConst, ExprContinue, ExprField, ExprForLoop, ExprIf, ExprIndex, ExprLet, ExprLit, ExprLoop, + ExprMacro, ExprMatch, ExprMethodCall, ExprPath, ExprRange, ExprRawAddr, ExprReference, + ExprReturn, ExprStruct, ExprTry, ExprTryBlock, ExprUnary, ExprUnsafe, ExprWhile, ExprYield, + File, GenericArgument, Generics, Item, ItemConst, Label, Lifetime, LifetimeParam, Lit, LitInt, + Macro, MacroDelimiter, Member, Pat, PatWild, Path, PathArguments, PathSegment, + PointerMutability, QSelf, RangeLimits, ReturnType, Stmt, StmtMacro, Token, Type, TypeInfer, + TypeParam, TypePath, UnOp, Visibility, +}; + +struct FlattenParens; + +impl VisitMut for FlattenParens { + fn visit_expr_mut(&mut self, e: &mut Expr) { + while let Expr::Paren(paren) = e { + *e = mem::replace(&mut *paren.expr, Expr::PLACEHOLDER); + } + visit_mut::visit_expr_mut(self, e); + } +} + +struct AsIfPrinted; + +impl VisitMut for AsIfPrinted { + fn visit_generics_mut(&mut self, generics: &mut Generics) { + if generics.params.is_empty() { + generics.lt_token = None; + generics.gt_token = None; + } + if let Some(where_clause) = &generics.where_clause { + if where_clause.predicates.is_empty() { + generics.where_clause = None; + } + } + visit_mut::visit_generics_mut(self, generics); + } + + fn visit_lifetime_param_mut(&mut self, param: &mut LifetimeParam) { + if param.bounds.is_empty() { + param.colon_token = None; + } + visit_mut::visit_lifetime_param_mut(self, param); + } + + fn visit_stmt_mut(&mut self, stmt: &mut Stmt) { + if let Stmt::Expr(expr, semi) = stmt { + if let Expr::Macro(e) = expr { + if match e.mac.delimiter { + MacroDelimiter::Brace(_) => true, + MacroDelimiter::Paren(_) | MacroDelimiter::Bracket(_) => semi.is_some(), + } { + let expr = match mem::replace(expr, Expr::PLACEHOLDER) { + Expr::Macro(expr) => expr, + _ => unreachable!(), + }; + *stmt = Stmt::Macro(StmtMacro { + attrs: expr.attrs, + mac: expr.mac, + semi_token: *semi, + }); + } + } + } + visit_mut::visit_stmt_mut(self, stmt); + } + + fn visit_type_param_mut(&mut self, param: &mut TypeParam) { + if param.bounds.is_empty() { + param.colon_token = None; + } + visit_mut::visit_type_param_mut(self, param); + } +} + +#[test] +fn test_permutations() -> ExitCode { + fn iter(depth: usize, f: &mut dyn FnMut(Expr)) { + let span = Span::call_site(); + + // Expr::Path + f(Expr::Path(ExprPath { + // `x` + attrs: Vec::new(), + qself: None, + path: Path::from(Ident::new("x", span)), + })); + if false { + f(Expr::Path(ExprPath { + // `x::` + attrs: Vec::new(), + qself: None, + path: Path { + leading_colon: None, + segments: Punctuated::from_iter([PathSegment { + ident: Ident::new("x", span), + arguments: PathArguments::AngleBracketed(AngleBracketedGenericArguments { + colon2_token: Some(Token![::](span)), + lt_token: Token![<](span), + args: Punctuated::from_iter([GenericArgument::Type(Type::Path( + TypePath { + qself: None, + path: Path::from(Ident::new("T", span)), + }, + ))]), + gt_token: Token![>](span), + }), + }]), + }, + })); + f(Expr::Path(ExprPath { + // `::CONST` + attrs: Vec::new(), + qself: Some(QSelf { + lt_token: Token![<](span), + ty: Box::new(Type::Path(TypePath { + qself: None, + path: Path::from(Ident::new("T", span)), + })), + position: 1, + as_token: Some(Token![as](span)), + gt_token: Token![>](span), + }), + path: Path { + leading_colon: None, + segments: Punctuated::from_iter([ + PathSegment::from(Ident::new("Trait", span)), + PathSegment::from(Ident::new("CONST", span)), + ]), + }, + })); + } + + let depth = match depth.checked_sub(1) { + Some(depth) => depth, + None => return, + }; + + // Expr::Assign + iter(depth, &mut |expr| { + iter(0, &mut |simple| { + f(Expr::Assign(ExprAssign { + // `x = $expr` + attrs: Vec::new(), + left: Box::new(simple.clone()), + eq_token: Token![=](span), + right: Box::new(expr.clone()), + })); + f(Expr::Assign(ExprAssign { + // `$expr = x` + attrs: Vec::new(), + left: Box::new(expr.clone()), + eq_token: Token![=](span), + right: Box::new(simple), + })); + }); + }); + + // Expr::Binary + iter(depth, &mut |expr| { + iter(0, &mut |simple| { + for op in [ + BinOp::Add(Token![+](span)), + //BinOp::Sub(Token![-](span)), + //BinOp::Mul(Token![*](span)), + //BinOp::Div(Token![/](span)), + //BinOp::Rem(Token![%](span)), + //BinOp::And(Token![&&](span)), + //BinOp::Or(Token![||](span)), + //BinOp::BitXor(Token![^](span)), + //BinOp::BitAnd(Token![&](span)), + //BinOp::BitOr(Token![|](span)), + //BinOp::Shl(Token![<<](span)), + //BinOp::Shr(Token![>>](span)), + //BinOp::Eq(Token![==](span)), + BinOp::Lt(Token![<](span)), + //BinOp::Le(Token![<=](span)), + //BinOp::Ne(Token![!=](span)), + //BinOp::Ge(Token![>=](span)), + //BinOp::Gt(Token![>](span)), + BinOp::ShlAssign(Token![<<=](span)), + ] { + f(Expr::Binary(ExprBinary { + // `x + $expr` + attrs: Vec::new(), + left: Box::new(simple.clone()), + op, + right: Box::new(expr.clone()), + })); + f(Expr::Binary(ExprBinary { + // `$expr + x` + attrs: Vec::new(), + left: Box::new(expr.clone()), + op, + right: Box::new(simple.clone()), + })); + } + }); + }); + + // Expr::Block + f(Expr::Block(ExprBlock { + // `{}` + attrs: Vec::new(), + label: None, + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + + // Expr::Break + f(Expr::Break(ExprBreak { + // `break` + attrs: Vec::new(), + break_token: Token![break](span), + label: None, + expr: None, + })); + iter(depth, &mut |expr| { + f(Expr::Break(ExprBreak { + // `break $expr` + attrs: Vec::new(), + break_token: Token![break](span), + label: None, + expr: Some(Box::new(expr)), + })); + }); + + // Expr::Call + iter(depth, &mut |expr| { + f(Expr::Call(ExprCall { + // `$expr()` + attrs: Vec::new(), + func: Box::new(expr), + paren_token: token::Paren(span), + args: Punctuated::new(), + })); + }); + + // Expr::Cast + iter(depth, &mut |expr| { + f(Expr::Cast(ExprCast { + // `$expr as T` + attrs: Vec::new(), + expr: Box::new(expr), + as_token: Token![as](span), + ty: Box::new(Type::Path(TypePath { + qself: None, + path: Path::from(Ident::new("T", span)), + })), + })); + }); + + // Expr::Closure + iter(depth, &mut |expr| { + f(Expr::Closure(ExprClosure { + // `|| $expr` + attrs: Vec::new(), + lifetimes: None, + constness: None, + movability: None, + asyncness: None, + capture: None, + or1_token: Token![|](span), + inputs: Punctuated::new(), + or2_token: Token![|](span), + output: ReturnType::Default, + body: Box::new(expr), + })); + }); + + // Expr::Field + iter(depth, &mut |expr| { + f(Expr::Field(ExprField { + // `$expr.field` + attrs: Vec::new(), + base: Box::new(expr), + dot_token: Token![.](span), + member: Member::Named(Ident::new("field", span)), + })); + }); + + // Expr::If + iter(depth, &mut |expr| { + f(Expr::If(ExprIf { + // `if $expr {}` + attrs: Vec::new(), + if_token: Token![if](span), + cond: Box::new(expr), + then_branch: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + else_branch: None, + })); + }); + + // Expr::Let + iter(depth, &mut |expr| { + f(Expr::Let(ExprLet { + attrs: Vec::new(), + let_token: Token![let](span), + pat: Box::new(Pat::Wild(PatWild { + attrs: Vec::new(), + underscore_token: Token![_](span), + })), + eq_token: Token![=](span), + expr: Box::new(expr), + })); + }); + + // Expr::Range + f(Expr::Range(ExprRange { + // `..` + attrs: Vec::new(), + start: None, + limits: RangeLimits::HalfOpen(Token![..](span)), + end: None, + })); + iter(depth, &mut |expr| { + f(Expr::Range(ExprRange { + // `..$expr` + attrs: Vec::new(), + start: None, + limits: RangeLimits::HalfOpen(Token![..](span)), + end: Some(Box::new(expr.clone())), + })); + f(Expr::Range(ExprRange { + // `$expr..` + attrs: Vec::new(), + start: Some(Box::new(expr)), + limits: RangeLimits::HalfOpen(Token![..](span)), + end: None, + })); + }); + + // Expr::Reference + iter(depth, &mut |expr| { + f(Expr::Reference(ExprReference { + // `&$expr` + attrs: Vec::new(), + and_token: Token![&](span), + mutability: None, + expr: Box::new(expr), + })); + }); + + // Expr::Return + f(Expr::Return(ExprReturn { + // `return` + attrs: Vec::new(), + return_token: Token![return](span), + expr: None, + })); + iter(depth, &mut |expr| { + f(Expr::Return(ExprReturn { + // `return $expr` + attrs: Vec::new(), + return_token: Token![return](span), + expr: Some(Box::new(expr)), + })); + }); + + // Expr::Try + iter(depth, &mut |expr| { + f(Expr::Try(ExprTry { + // `$expr?` + attrs: Vec::new(), + expr: Box::new(expr), + question_token: Token![?](span), + })); + }); + + // Expr::Unary + iter(depth, &mut |expr| { + for op in [ + UnOp::Deref(Token![*](span)), + //UnOp::Not(Token![!](span)), + //UnOp::Neg(Token![-](span)), + ] { + f(Expr::Unary(ExprUnary { + // `*$expr` + attrs: Vec::new(), + op, + expr: Box::new(expr.clone()), + })); + } + }); + + if false { + // Expr::Array + f(Expr::Array(ExprArray { + // `[]` + attrs: Vec::new(), + bracket_token: token::Bracket(span), + elems: Punctuated::new(), + })); + + // Expr::Async + f(Expr::Async(ExprAsync { + // `async {}` + attrs: Vec::new(), + async_token: Token![async](span), + capture: None, + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + + // Expr::Await + iter(depth, &mut |expr| { + f(Expr::Await(ExprAwait { + // `$expr.await` + attrs: Vec::new(), + base: Box::new(expr), + dot_token: Token![.](span), + await_token: Token![await](span), + })); + }); + + // Expr::Block + f(Expr::Block(ExprBlock { + // `'a: {}` + attrs: Vec::new(), + label: Some(Label { + name: Lifetime::new("'a", span), + colon_token: Token![:](span), + }), + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + iter(depth, &mut |expr| { + f(Expr::Block(ExprBlock { + // `{ $expr }` + attrs: Vec::new(), + label: None, + block: Block { + brace_token: token::Brace(span), + stmts: Vec::from([Stmt::Expr(expr.clone(), None)]), + }, + })); + f(Expr::Block(ExprBlock { + // `{ $expr; }` + attrs: Vec::new(), + label: None, + block: Block { + brace_token: token::Brace(span), + stmts: Vec::from([Stmt::Expr(expr, Some(Token![;](span)))]), + }, + })); + }); + + // Expr::Break + f(Expr::Break(ExprBreak { + // `break 'a` + attrs: Vec::new(), + break_token: Token![break](span), + label: Some(Lifetime::new("'a", span)), + expr: None, + })); + iter(depth, &mut |expr| { + f(Expr::Break(ExprBreak { + // `break 'a $expr` + attrs: Vec::new(), + break_token: Token![break](span), + label: Some(Lifetime::new("'a", span)), + expr: Some(Box::new(expr)), + })); + }); + + // Expr::Closure + f(Expr::Closure(ExprClosure { + // `|| -> T {}` + attrs: Vec::new(), + lifetimes: None, + constness: None, + movability: None, + asyncness: None, + capture: None, + or1_token: Token![|](span), + inputs: Punctuated::new(), + or2_token: Token![|](span), + output: ReturnType::Type( + Token![->](span), + Box::new(Type::Path(TypePath { + qself: None, + path: Path::from(Ident::new("T", span)), + })), + ), + body: Box::new(Expr::Block(ExprBlock { + attrs: Vec::new(), + label: None, + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })), + })); + + // Expr::Const + f(Expr::Const(ExprConst { + // `const {}` + attrs: Vec::new(), + const_token: Token![const](span), + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + + // Expr::Continue + f(Expr::Continue(ExprContinue { + // `continue` + attrs: Vec::new(), + continue_token: Token![continue](span), + label: None, + })); + f(Expr::Continue(ExprContinue { + // `continue 'a` + attrs: Vec::new(), + continue_token: Token![continue](span), + label: Some(Lifetime::new("'a", span)), + })); + + // Expr::ForLoop + iter(depth, &mut |expr| { + f(Expr::ForLoop(ExprForLoop { + // `for _ in $expr {}` + attrs: Vec::new(), + label: None, + for_token: Token![for](span), + pat: Box::new(Pat::Wild(PatWild { + attrs: Vec::new(), + underscore_token: Token![_](span), + })), + in_token: Token![in](span), + expr: Box::new(expr.clone()), + body: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + f(Expr::ForLoop(ExprForLoop { + // `'a: for _ in $expr {}` + attrs: Vec::new(), + label: Some(Label { + name: Lifetime::new("'a", span), + colon_token: Token![:](span), + }), + for_token: Token![for](span), + pat: Box::new(Pat::Wild(PatWild { + attrs: Vec::new(), + underscore_token: Token![_](span), + })), + in_token: Token![in](span), + expr: Box::new(expr), + body: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + }); + + // Expr::Index + iter(depth, &mut |expr| { + f(Expr::Index(ExprIndex { + // `$expr[0]` + attrs: Vec::new(), + expr: Box::new(expr), + bracket_token: token::Bracket(span), + index: Box::new(Expr::Lit(ExprLit { + attrs: Vec::new(), + lit: Lit::Int(LitInt::new("0", span)), + })), + })); + }); + + // Expr::Loop + f(Expr::Loop(ExprLoop { + // `loop {}` + attrs: Vec::new(), + label: None, + loop_token: Token![loop](span), + body: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + f(Expr::Loop(ExprLoop { + // `'a: loop {}` + attrs: Vec::new(), + label: Some(Label { + name: Lifetime::new("'a", span), + colon_token: Token![:](span), + }), + loop_token: Token![loop](span), + body: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + + // Expr::Macro + f(Expr::Macro(ExprMacro { + // `m!()` + attrs: Vec::new(), + mac: Macro { + path: Path::from(Ident::new("m", span)), + bang_token: Token![!](span), + delimiter: MacroDelimiter::Paren(token::Paren(span)), + tokens: TokenStream::new(), + }, + })); + f(Expr::Macro(ExprMacro { + // `m! {}` + attrs: Vec::new(), + mac: Macro { + path: Path::from(Ident::new("m", span)), + bang_token: Token![!](span), + delimiter: MacroDelimiter::Brace(token::Brace(span)), + tokens: TokenStream::new(), + }, + })); + + // Expr::Match + iter(depth, &mut |expr| { + f(Expr::Match(ExprMatch { + // `match $expr {}` + attrs: Vec::new(), + match_token: Token![match](span), + expr: Box::new(expr.clone()), + brace_token: token::Brace(span), + arms: Vec::new(), + })); + f(Expr::Match(ExprMatch { + // `match x { _ => $expr }` + attrs: Vec::new(), + match_token: Token![match](span), + expr: Box::new(Expr::Path(ExprPath { + attrs: Vec::new(), + qself: None, + path: Path::from(Ident::new("x", span)), + })), + brace_token: token::Brace(span), + arms: Vec::from([Arm { + attrs: Vec::new(), + pat: Pat::Wild(PatWild { + attrs: Vec::new(), + underscore_token: Token![_](span), + }), + guard: None, + fat_arrow_token: Token![=>](span), + body: Box::new(expr.clone()), + comma: None, + }]), + })); + f(Expr::Match(ExprMatch { + // `match x { _ if $expr => {} }` + attrs: Vec::new(), + match_token: Token![match](span), + expr: Box::new(Expr::Path(ExprPath { + attrs: Vec::new(), + qself: None, + path: Path::from(Ident::new("x", span)), + })), + brace_token: token::Brace(span), + arms: Vec::from([Arm { + attrs: Vec::new(), + pat: Pat::Wild(PatWild { + attrs: Vec::new(), + underscore_token: Token![_](span), + }), + guard: Some((Token![if](span), Box::new(expr))), + fat_arrow_token: Token![=>](span), + body: Box::new(Expr::Block(ExprBlock { + attrs: Vec::new(), + label: None, + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })), + comma: None, + }]), + })); + }); + + // Expr::MethodCall + iter(depth, &mut |expr| { + f(Expr::MethodCall(ExprMethodCall { + // `$expr.method()` + attrs: Vec::new(), + receiver: Box::new(expr.clone()), + dot_token: Token![.](span), + method: Ident::new("method", span), + turbofish: None, + paren_token: token::Paren(span), + args: Punctuated::new(), + })); + f(Expr::MethodCall(ExprMethodCall { + // `$expr.method::()` + attrs: Vec::new(), + receiver: Box::new(expr), + dot_token: Token![.](span), + method: Ident::new("method", span), + turbofish: Some(AngleBracketedGenericArguments { + colon2_token: Some(Token![::](span)), + lt_token: Token![<](span), + args: Punctuated::from_iter([GenericArgument::Type(Type::Path( + TypePath { + qself: None, + path: Path::from(Ident::new("T", span)), + }, + ))]), + gt_token: Token![>](span), + }), + paren_token: token::Paren(span), + args: Punctuated::new(), + })); + }); + + // Expr::RawAddr + iter(depth, &mut |expr| { + f(Expr::RawAddr(ExprRawAddr { + // `&raw const $expr` + attrs: Vec::new(), + and_token: Token![&](span), + raw: Token![raw](span), + mutability: PointerMutability::Const(Token![const](span)), + expr: Box::new(expr), + })); + }); + + // Expr::Struct + f(Expr::Struct(ExprStruct { + // `Struct {}` + attrs: Vec::new(), + qself: None, + path: Path::from(Ident::new("Struct", span)), + brace_token: token::Brace(span), + fields: Punctuated::new(), + dot2_token: None, + rest: None, + })); + + // Expr::TryBlock + f(Expr::TryBlock(ExprTryBlock { + // `try {}` + attrs: Vec::new(), + try_token: Token![try](span), + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + + // Expr::Unsafe + f(Expr::Unsafe(ExprUnsafe { + // `unsafe {}` + attrs: Vec::new(), + unsafe_token: Token![unsafe](span), + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + + // Expr::While + iter(depth, &mut |expr| { + f(Expr::While(ExprWhile { + // `while $expr {}` + attrs: Vec::new(), + label: None, + while_token: Token![while](span), + cond: Box::new(expr.clone()), + body: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + f(Expr::While(ExprWhile { + // `'a: while $expr {}` + attrs: Vec::new(), + label: Some(Label { + name: Lifetime::new("'a", span), + colon_token: Token![:](span), + }), + while_token: Token![while](span), + cond: Box::new(expr), + body: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + }); + + // Expr::Yield + f(Expr::Yield(ExprYield { + // `yield` + attrs: Vec::new(), + yield_token: Token![yield](span), + expr: None, + })); + iter(depth, &mut |expr| { + f(Expr::Yield(ExprYield { + // `yield $expr` + attrs: Vec::new(), + yield_token: Token![yield](span), + expr: Some(Box::new(expr)), + })); + }); + } + } + + let mut failures = 0; + macro_rules! fail { + ($($message:tt)*) => {{ + eprintln!($($message)*); + failures += 1; + return; + }}; + } + let mut assert = |mut original: Expr| { + let span = Span::call_site(); + // `const _: () = $expr;` + let pretty = prettyplease::unparse(&File { + shebang: None, + attrs: Vec::new(), + items: Vec::from([Item::Const(ItemConst { + attrs: Vec::new(), + vis: Visibility::Inherited, + const_token: Token![const](span), + ident: Ident::from(Token![_](span)), + generics: Generics::default(), + colon_token: Token![:](span), + ty: Box::new(Type::Infer(TypeInfer { + underscore_token: Token![_](span), + })), + eq_token: Token![=](span), + expr: Box::new(original.clone()), + semi_token: Token![;](span), + })]), + }); + let mut parsed = match syn::parse_file(&pretty) { + Ok(parsed) => parsed, + _ => fail!("failed to parse: {pretty}{original:#?}"), + }; + let item = match parsed.items.as_mut_slice() { + [Item::Const(item)] => item, + _ => unreachable!(), + }; + let mut parsed = mem::replace(&mut *item.expr, Expr::PLACEHOLDER); + AsIfPrinted.visit_expr_mut(&mut original); + FlattenParens.visit_expr_mut(&mut parsed); + if original != parsed { + fail!( + "before: {}\n{:#?}\nafter: {}\n{:#?}", + original.to_token_stream(), + original, + parsed.to_token_stream(), + parsed, + ); + } + if pretty.contains("(||") { + // https://github.com/dtolnay/prettyplease/issues/99 + return; + } + let no_paren = pretty.replace(['(', ')'], ""); + if pretty != no_paren { + if let Ok(mut parsed2) = syn::parse_file(&no_paren) { + let item = match parsed2.items.as_mut_slice() { + [Item::Const(item)] => item, + _ => unreachable!(), + }; + if original == *item.expr { + fail!("redundant parens: {}", pretty); + } + } + } + }; + + iter(if cfg!(debug_assertions) { 3 } else { 4 }, &mut assert); + if failures > 0 { + eprintln!("FAILURES: {failures}"); + ExitCode::FAILURE + } else { + ExitCode::SUCCESS + } +} diff --git a/vendor/proc-macro2/.cargo-checksum.json b/vendor/proc-macro2/.cargo-checksum.json new file mode 100644 index 00000000000000..9d997fa6ddedf1 --- /dev/null +++ b/vendor/proc-macro2/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"0c0e9279875a3f985b09df73eefab42d9e6f5566f26485c9e3a057e887d219b7",".github/FUNDING.yml":"b017158736b3c9751a2d21edfce7fe61c8954e2fced8da8dd3013c2f3e295bd9",".github/workflows/ci.yml":"b2004c92e8985c58c1338202b2ebef0f25fa50de01e9101fe46a17000ca59962","Cargo.lock":"4afb839b0f3299f791ccdda8213faddff1ee208d64a14e883b4e24ee48957aea","Cargo.toml":"8c059fba2000e51a2d88025b8ebdc7a0b0e26b3f67cb3baa96c222dafb9c31e4","Cargo.toml.orig":"42bf3a4709d2fcc1027a9c68f525054ea542683cedf626ef2c76b6b2ac63a5dc","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"e60d0a33eb3bfc8583866bb84ca53fbae5e5cb39b67acfbb3c1f35dae41e19a9","build.rs":"baeb20b52f6b536be8657a566591a507bb2e34a45cf8baa42b135510a0c3c729","rust-toolchain.toml":"6bbb61302978c736b2da03e4fb40e3beab908f85d533ab46fd541e637b5f3e0f","src/detection.rs":"ed9a5f9a979ab01247d7a68eeb1afa3c13209334c5bfff0f9289cb07e5bb4e8b","src/extra.rs":"29f094473279a29b71c3cc9f5fa27c2e2c30c670390cf7e4b7cf451486cc857e","src/fallback.rs":"962e1897fefb138101ae3f6fda9c46cecff787550cdfb9133066326379464d90","src/lib.rs":"c07a2ad1ccbda629d0f2018d6d7762f4dcb955e8d0714ffcf9c7f3d5cd0020f2","src/location.rs":"9225c5a55f03b56cce42bc55ceb509e8216a5e0b24c94aa1cd071b04e3d6c15f","src/marker.rs":"c11c5a1be8bdf18be3fcd224393f350a9aae7ce282e19ce583c84910c6903a8f","src/num.rs":"82d625cbcd255965e46231ac3af1b74ab8bff9787c799e8ed1f978de146cb0b5","src/parse.rs":"0c380fdbe8795d41e08a40e3a1e67e505e9aa9398277a6a794af7d96fab06ac6","src/probe.rs":"2b57e8ebf46a7c60ee2762f23f16d24ee9ddb8f1acd0a7faf7a99cf2e4187151","src/probe/proc_macro_span.rs":"f3f9c728438060c9450d4568621beca9125f559eb65faab9574d2e43e9b49643","src/probe/proc_macro_span_file.rs":"a20a1920d121b153ce33c8e2ea203b9370606744847b62e8ffd0c657d2545778","src/probe/proc_macro_span_location.rs":"71a4768f65f8a87e5a3c2bc2e05fb84d2562a0f4733780e9f919563f25ee07dc","src/rcvec.rs":"a159d246cac59aae2d51b899471ce34766f51f3c11c376ac36ee501ee3f12a7a","src/rustc_literal_escaper.rs":"188cbe8fffe7af3899977530cbb1b6c0b1dff51623db0ec115db1e082159e7b6","src/wrapper.rs":"057b7baa778e8205c0af47405c1af077d4fd19318ed4b79bd195ddceb4da0b15","tests/comments.rs":"11520f6baee23b9258db904f4c256fd3877493f754e2b99041f73a330e74a911","tests/features.rs":"7e52c0c801019b271bf11a994c2e1799a1429b0c1a3a34e551a23971797fe412","tests/marker.rs":"f16299460587d6c65603ed809f1a3b81853e4b99d6cb44d0b68bb07259d7e9f8","tests/test.rs":"c590a13e38c2b5d92a6181433652925dd9d19dd404c6839290abc7acbc3cb5a3","tests/test_fmt.rs":"b7743b612af65f2c88cbe109d50a093db7aa7e87f9e37bf45b7bbaeb240aa020","tests/test_size.rs":"62d8373ea46b669b87bc90a9c49b6d02f90ff4c21f9a25acebf60c9926e01fb7"},"package":"5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"} \ No newline at end of file diff --git a/vendor/proc-macro2/.cargo_vcs_info.json b/vendor/proc-macro2/.cargo_vcs_info.json new file mode 100644 index 00000000000000..bdeb94e109b454 --- /dev/null +++ b/vendor/proc-macro2/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "d1bf13ac1d90c3b65c1b7fc131a26f37a8e2d0db" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/proc-macro2/.github/FUNDING.yml b/vendor/proc-macro2/.github/FUNDING.yml new file mode 100644 index 00000000000000..750707701cdae9 --- /dev/null +++ b/vendor/proc-macro2/.github/FUNDING.yml @@ -0,0 +1 @@ +github: dtolnay diff --git a/vendor/proc-macro2/.github/workflows/ci.yml b/vendor/proc-macro2/.github/workflows/ci.yml new file mode 100644 index 00000000000000..669a88c9c4f747 --- /dev/null +++ b/vendor/proc-macro2/.github/workflows/ci.yml @@ -0,0 +1,232 @@ +name: CI + +on: + push: + pull_request: + workflow_dispatch: + schedule: [cron: "40 1 * * *"] + +permissions: + contents: read + +env: + RUSTFLAGS: -Dwarnings + +jobs: + pre_ci: + uses: dtolnay/.github/.github/workflows/pre_ci.yml@master + + test: + name: Rust ${{matrix.rust}} + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + rust: [1.80.0, stable, beta] + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{matrix.rust}} + components: rust-src + - run: cargo test + - run: cargo test --no-default-features + - run: cargo test --features span-locations + - name: RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo test + run: cargo test + env: + RUSTFLAGS: --cfg procmacro2_semver_exempt ${{env.RUSTFLAGS}} + - name: RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo test --no-default-features + run: cargo test --no-default-features + env: + RUSTFLAGS: --cfg procmacro2_semver_exempt ${{env.RUSTFLAGS}} + + nightly: + name: Rust nightly + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + with: + components: rust-src + - name: Enable type layout randomization + run: echo RUSTFLAGS=${RUSTFLAGS}\ -Zrandomize-layout\ --cfg=randomize_layout >> $GITHUB_ENV + - run: cargo check + env: + RUSTFLAGS: --cfg procmacro2_nightly_testing ${{env.RUSTFLAGS}} + - run: cargo test + - run: cargo test --no-default-features + - run: cargo test --no-default-features --test features -- --ignored make_sure_no_proc_macro # run the ignored test to make sure the `proc-macro` feature is disabled + - run: cargo test --features span-locations + - run: cargo test --manifest-path tests/ui/Cargo.toml + - name: RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo test + run: cargo test + env: + RUSTFLAGS: --cfg procmacro2_semver_exempt ${{env.RUSTFLAGS}} + - name: RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo test --no-default-features + run: cargo test --no-default-features + env: + RUSTFLAGS: --cfg procmacro2_semver_exempt ${{env.RUSTFLAGS}} + - name: RUSTFLAGS='-Z allow-features=' cargo test + run: cargo test + env: + RUSTFLAGS: -Z allow-features= --cfg procmacro2_backtrace ${{env.RUSTFLAGS}} + - uses: actions/upload-artifact@v4 + if: always() + with: + name: Cargo.lock + path: Cargo.lock + continue-on-error: true + + layout: + name: Layout + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + with: + components: rust-src + - run: cargo test --test test_size + - run: cargo test --test test_size --features span-locations + - run: cargo test --test test_size --no-default-features + - run: cargo test --test test_size --no-default-features --features span-locations + + msrv: + name: Rust 1.60.0 + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@1.60.0 + with: + components: rust-src + - run: cargo check + - run: cargo check --no-default-features + - run: cargo check --features span-locations + - name: RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo check + run: cargo check + env: + RUSTFLAGS: --cfg procmacro2_semver_exempt ${{env.RUSTFLAGS}} + - name: RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo check --no-default-features + run: cargo check --no-default-features + env: + RUSTFLAGS: --cfg procmacro2_semver_exempt ${{env.RUSTFLAGS}} + + minimal: + name: Minimal versions + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + - run: cargo generate-lockfile -Z minimal-versions + - run: cargo check --locked + + webassembly: + name: WebAssembly + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + with: + target: wasm32-unknown-unknown + components: rust-src + - name: Ignore WebAssembly linker warning + run: echo RUSTFLAGS=${RUSTFLAGS}\ -Alinker_messages >> $GITHUB_ENV + - run: cargo test --target wasm32-unknown-unknown --no-run + + fuzz: + name: Fuzz + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + with: + components: rust-src + - uses: dtolnay/install@cargo-fuzz + - run: cargo fuzz check + - run: cargo check --no-default-features --features afl + working-directory: fuzz + - uses: dtolnay/install@honggfuzz + - name: Run apt install binutils-dev libunwind-dev + run: | + sudo sed -i 's/^update_initramfs=yes$/update_initramfs=no/' /etc/initramfs-tools/update-initramfs.conf + sudo rm -f /var/lib/man-db/auto-update + sudo apt-get update + sudo apt-get install binutils-dev libunwind-dev + - run: cargo hfuzz build --no-default-features --features honggfuzz + working-directory: fuzz + + doc: + name: Documentation + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + env: + RUSTDOCFLAGS: -Dwarnings + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + with: + components: rust-src + - uses: dtolnay/install@cargo-docs-rs + - run: cargo docs-rs + + clippy: + name: Clippy + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + with: + components: clippy, rust-src + - run: cargo clippy --tests -- -Dclippy::all -Dclippy::pedantic + - run: cargo clippy --tests --all-features -- -Dclippy::all -Dclippy::pedantic + + miri: + name: Miri + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@miri + - run: cargo miri setup + - run: cargo miri test + env: + MIRIFLAGS: -Zmiri-strict-provenance + + outdated: + name: Outdated + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@stable + - uses: dtolnay/install@cargo-outdated + - run: cargo outdated --workspace --exit-code 1 + - run: cargo outdated --manifest-path fuzz/Cargo.toml --exit-code 1 diff --git a/vendor/proc-macro2/Cargo.lock b/vendor/proc-macro2/Cargo.lock new file mode 100644 index 00000000000000..e37ffdd48fc7d3 --- /dev/null +++ b/vendor/proc-macro2/Cargo.lock @@ -0,0 +1,326 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "bitflags" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "filetime" +version = "0.2.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" +dependencies = [ + "cfg-if", + "libc", + "libredox", + "windows-sys 0.60.2", +] + +[[package]] +name = "flate2" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc5a4e564e38c699f2880d3fda590bedc2e69f3f84cd48b457bd892ce61d0aa9" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "libc" +version = "0.2.177" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" + +[[package]] +name = "libredox" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +dependencies = [ + "bitflags", + "libc", + "redox_syscall", +] + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", + "simd-adler32", +] + +[[package]] +name = "proc-macro2" +version = "1.0.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e0f6df8eaa422d97d72edcd152e1451618fed47fabbdbd5a8864167b1d4aff7" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "proc-macro2" +version = "1.0.103" +dependencies = [ + "flate2", + "quote", + "rayon", + "rustversion", + "tar", + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" +dependencies = [ + "proc-macro2 1.0.102", +] + +[[package]] +name = "rayon" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags", +] + +[[package]] +name = "rustix" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + +[[package]] +name = "tar" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" +dependencies = [ + "filetime", + "libc", + "xattr", +] + +[[package]] +name = "unicode-ident" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "462eeb75aeb73aea900253ce739c8e18a67423fadf006037cd3ff27e82748a06" + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "xattr" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" +dependencies = [ + "libc", + "rustix", +] diff --git a/vendor/proc-macro2/Cargo.toml b/vendor/proc-macro2/Cargo.toml new file mode 100644 index 00000000000000..3f0173c7d1b4aa --- /dev/null +++ b/vendor/proc-macro2/Cargo.toml @@ -0,0 +1,105 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60" +name = "proc-macro2" +version = "1.0.103" +authors = [ + "David Tolnay ", + "Alex Crichton ", +] +build = "build.rs" +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "A substitute implementation of the compiler's `proc_macro` API to decouple token-based libraries from the procedural macro use case." +documentation = "https://docs.rs/proc-macro2" +readme = "README.md" +keywords = [ + "macros", + "syn", +] +categories = ["development-tools::procedural-macro-helpers"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/proc-macro2" + +[package.metadata.docs.rs] +rustc-args = ["--cfg=procmacro2_semver_exempt"] +targets = ["x86_64-unknown-linux-gnu"] +rustdoc-args = [ + "--cfg=procmacro2_semver_exempt", + "--generate-link-to-definition", + "--generate-macro-expansion", + "--extern-html-root-url=core=https://doc.rust-lang.org", + "--extern-html-root-url=alloc=https://doc.rust-lang.org", + "--extern-html-root-url=std=https://doc.rust-lang.org", + "--extern-html-root-url=proc_macro=https://doc.rust-lang.org", +] + +[package.metadata.playground] +features = ["span-locations"] + +[features] +default = ["proc-macro"] +nightly = [] +proc-macro = [] +span-locations = [] + +[lib] +name = "proc_macro2" +path = "src/lib.rs" + +[[test]] +name = "comments" +path = "tests/comments.rs" + +[[test]] +name = "features" +path = "tests/features.rs" + +[[test]] +name = "marker" +path = "tests/marker.rs" + +[[test]] +name = "test" +path = "tests/test.rs" + +[[test]] +name = "test_fmt" +path = "tests/test_fmt.rs" + +[[test]] +name = "test_size" +path = "tests/test_size.rs" + +[dependencies.unicode-ident] +version = "1.0" + +[dev-dependencies.flate2] +version = "1.0" + +[dev-dependencies.quote] +version = "1.0" +default-features = false + +[dev-dependencies.rayon] +version = "1.0" + +[dev-dependencies.rustversion] +version = "1" + +[dev-dependencies.tar] +version = "0.4" diff --git a/vendor/proc-macro2/LICENSE-APACHE b/vendor/proc-macro2/LICENSE-APACHE new file mode 100644 index 00000000000000..1b5ec8b78e237b --- /dev/null +++ b/vendor/proc-macro2/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/vendor/proc-macro2/LICENSE-MIT b/vendor/proc-macro2/LICENSE-MIT new file mode 100644 index 00000000000000..31aa79387f27e7 --- /dev/null +++ b/vendor/proc-macro2/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/proc-macro2/README.md b/vendor/proc-macro2/README.md new file mode 100644 index 00000000000000..0b6b490fa9cc6f --- /dev/null +++ b/vendor/proc-macro2/README.md @@ -0,0 +1,94 @@ +# proc-macro2 + +[github](https://github.com/dtolnay/proc-macro2) +[crates.io](https://crates.io/crates/proc-macro2) +[docs.rs](https://docs.rs/proc-macro2) +[build status](https://github.com/dtolnay/proc-macro2/actions?query=branch%3Amaster) + +A wrapper around the procedural macro API of the compiler's `proc_macro` crate. +This library serves two purposes: + +- **Bring proc-macro-like functionality to other contexts like build.rs and + main.rs.** Types from `proc_macro` are entirely specific to procedural macros + and cannot ever exist in code outside of a procedural macro. Meanwhile + `proc_macro2` types may exist anywhere including non-macro code. By developing + foundational libraries like [syn] and [quote] against `proc_macro2` rather + than `proc_macro`, the procedural macro ecosystem becomes easily applicable to + many other use cases and we avoid reimplementing non-macro equivalents of + those libraries. + +- **Make procedural macros unit testable.** As a consequence of being specific + to procedural macros, nothing that uses `proc_macro` can be executed from a + unit test. In order for helper libraries or components of a macro to be + testable in isolation, they must be implemented using `proc_macro2`. + +[syn]: https://github.com/dtolnay/syn +[quote]: https://github.com/dtolnay/quote + +## Usage + +```toml +[dependencies] +proc-macro2 = "1.0" +``` + +The skeleton of a typical procedural macro typically looks like this: + +```rust +extern crate proc_macro; + +#[proc_macro_derive(MyDerive)] +pub fn my_derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let input = proc_macro2::TokenStream::from(input); + + let output: proc_macro2::TokenStream = { + /* transform input */ + }; + + proc_macro::TokenStream::from(output) +} +``` + +If parsing with [Syn], you'll use [`parse_macro_input!`] instead to propagate +parse errors correctly back to the compiler when parsing fails. + +[`parse_macro_input!`]: https://docs.rs/syn/2.0/syn/macro.parse_macro_input.html + +## Unstable features + +The default feature set of proc-macro2 tracks the most recent stable compiler +API. Functionality in `proc_macro` that is not yet stable is not exposed by +proc-macro2 by default. + +To opt into the additional APIs available in the most recent nightly compiler, +the `procmacro2_semver_exempt` config flag must be passed to rustc. We will +polyfill those nightly-only APIs back to Rust 1.60.0. As these are unstable APIs +that track the nightly compiler, minor versions of proc-macro2 may make breaking +changes to them at any time. + +``` +RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build +``` + +Note that this must not only be done for your crate, but for any crate that +depends on your crate. This infectious nature is intentional, as it serves as a +reminder that you are outside of the normal semver guarantees. + +Semver exempt methods are marked as such in the proc-macro2 documentation. + +
+ +#### License + + +Licensed under either of Apache License, Version +2.0 or MIT license at your option. + + +
+ + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this crate by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. + diff --git a/vendor/proc-macro2/build.rs b/vendor/proc-macro2/build.rs new file mode 100644 index 00000000000000..26c3ed1bdcc519 --- /dev/null +++ b/vendor/proc-macro2/build.rs @@ -0,0 +1,267 @@ +#![allow(unknown_lints)] +#![allow(unexpected_cfgs)] +#![allow(clippy::uninlined_format_args)] + +use std::env; +use std::ffi::OsString; +use std::fs; +use std::io::ErrorKind; +use std::iter; +use std::path::Path; +use std::process::{self, Command, Stdio}; +use std::str; + +fn main() { + let rustc = rustc_minor_version().unwrap_or(u32::MAX); + + if rustc >= 80 { + println!("cargo:rustc-check-cfg=cfg(fuzzing)"); + println!("cargo:rustc-check-cfg=cfg(no_is_available)"); + println!("cargo:rustc-check-cfg=cfg(no_literal_byte_character)"); + println!("cargo:rustc-check-cfg=cfg(no_literal_c_string)"); + println!("cargo:rustc-check-cfg=cfg(no_source_text)"); + println!("cargo:rustc-check-cfg=cfg(proc_macro_span)"); + println!("cargo:rustc-check-cfg=cfg(proc_macro_span_file)"); + println!("cargo:rustc-check-cfg=cfg(proc_macro_span_location)"); + println!("cargo:rustc-check-cfg=cfg(procmacro2_backtrace)"); + println!("cargo:rustc-check-cfg=cfg(procmacro2_build_probe)"); + println!("cargo:rustc-check-cfg=cfg(procmacro2_nightly_testing)"); + println!("cargo:rustc-check-cfg=cfg(procmacro2_semver_exempt)"); + println!("cargo:rustc-check-cfg=cfg(randomize_layout)"); + println!("cargo:rustc-check-cfg=cfg(span_locations)"); + println!("cargo:rustc-check-cfg=cfg(super_unstable)"); + println!("cargo:rustc-check-cfg=cfg(wrap_proc_macro)"); + } + + let semver_exempt = cfg!(procmacro2_semver_exempt); + if semver_exempt { + // https://github.com/dtolnay/proc-macro2/issues/147 + println!("cargo:rustc-cfg=procmacro2_semver_exempt"); + } + + if semver_exempt || cfg!(feature = "span-locations") { + // Provide methods Span::start and Span::end which give the line/column + // location of a token. This is behind a cfg because tracking location + // inside spans is a performance hit. + println!("cargo:rustc-cfg=span_locations"); + } + + if rustc < 57 { + // Do not use proc_macro::is_available() to detect whether the proc + // macro API is available vs needs to be polyfilled. Instead, use the + // proc macro API unconditionally and catch the panic that occurs if it + // isn't available. + println!("cargo:rustc-cfg=no_is_available"); + } + + if rustc < 66 { + // Do not call libproc_macro's Span::source_text. Always return None. + println!("cargo:rustc-cfg=no_source_text"); + } + + if rustc < 79 { + // Do not call Literal::byte_character nor Literal::c_string. They can + // be emulated by way of Literal::from_str. + println!("cargo:rustc-cfg=no_literal_byte_character"); + println!("cargo:rustc-cfg=no_literal_c_string"); + } + + if !cfg!(feature = "proc-macro") { + println!("cargo:rerun-if-changed=build.rs"); + return; + } + + let proc_macro_span; + let consider_rustc_bootstrap; + if compile_probe_unstable("proc_macro_span", false) { + // This is a nightly or dev compiler, so it supports unstable features + // regardless of RUSTC_BOOTSTRAP. No need to rerun build script if + // RUSTC_BOOTSTRAP is changed. + proc_macro_span = true; + consider_rustc_bootstrap = false; + } else if let Some(rustc_bootstrap) = env::var_os("RUSTC_BOOTSTRAP") { + if compile_probe_unstable("proc_macro_span", true) { + // This is a stable or beta compiler for which the user has set + // RUSTC_BOOTSTRAP to turn on unstable features. Rerun build script + // if they change it. + proc_macro_span = true; + consider_rustc_bootstrap = true; + } else if rustc_bootstrap == "1" { + // This compiler does not support the proc macro Span API in the + // form that proc-macro2 expects. No need to pay attention to + // RUSTC_BOOTSTRAP. + proc_macro_span = false; + consider_rustc_bootstrap = false; + } else { + // This is a stable or beta compiler for which RUSTC_BOOTSTRAP is + // set to restrict the use of unstable features by this crate. + proc_macro_span = false; + consider_rustc_bootstrap = true; + } + } else { + // Without RUSTC_BOOTSTRAP, this compiler does not support the proc + // macro Span API in the form that proc-macro2 expects, but try again if + // the user turns on unstable features. + proc_macro_span = false; + consider_rustc_bootstrap = true; + } + + if proc_macro_span || !semver_exempt { + // Wrap types from libproc_macro rather than polyfilling the whole API. + // Enabled as long as procmacro2_semver_exempt is not set, because we + // can't emulate the unstable API without emulating everything else. + // Also enabled unconditionally on nightly, in which case the + // procmacro2_semver_exempt surface area is implemented by using the + // nightly-only proc_macro API. + println!("cargo:rustc-cfg=wrap_proc_macro"); + } + + if proc_macro_span { + // Enable non-dummy behavior of Span::byte_range and Span::join methods + // which requires an unstable compiler feature. Enabled when building + // with nightly, unless `-Z allow-feature` in RUSTFLAGS disallows + // unstable features. + println!("cargo:rustc-cfg=proc_macro_span"); + } + + if proc_macro_span || (rustc >= 88 && compile_probe_stable("proc_macro_span_location")) { + // Enable non-dummy behavior of Span::start and Span::end methods on + // Rust 1.88+. + println!("cargo:rustc-cfg=proc_macro_span_location"); + } + + if proc_macro_span || (rustc >= 88 && compile_probe_stable("proc_macro_span_file")) { + // Enable non-dummy behavior of Span::file and Span::local_file methods + // on Rust 1.88+. + println!("cargo:rustc-cfg=proc_macro_span_file"); + } + + if semver_exempt && proc_macro_span { + // Implement the semver exempt API in terms of the nightly-only + // proc_macro API. + println!("cargo:rustc-cfg=super_unstable"); + } + + if consider_rustc_bootstrap { + println!("cargo:rerun-if-env-changed=RUSTC_BOOTSTRAP"); + } +} + +fn compile_probe_unstable(feature: &str, rustc_bootstrap: bool) -> bool { + // RUSTC_STAGE indicates that this crate is being compiled as a dependency + // of a multistage rustc bootstrap. This environment uses Cargo in a highly + // non-standard way with issues such as: + // + // https://github.com/rust-lang/cargo/issues/11138 + // https://github.com/rust-lang/rust/issues/114839 + // + env::var_os("RUSTC_STAGE").is_none() && do_compile_probe(feature, rustc_bootstrap) +} + +fn compile_probe_stable(feature: &str) -> bool { + env::var_os("RUSTC_STAGE").is_some() || do_compile_probe(feature, true) +} + +fn do_compile_probe(feature: &str, rustc_bootstrap: bool) -> bool { + println!("cargo:rerun-if-changed=src/probe/{}.rs", feature); + + let rustc = cargo_env_var("RUSTC"); + let out_dir = cargo_env_var("OUT_DIR"); + let out_subdir = Path::new(&out_dir).join("probe"); + let probefile = Path::new("src") + .join("probe") + .join(feature) + .with_extension("rs"); + + if let Err(err) = fs::create_dir(&out_subdir) { + if err.kind() != ErrorKind::AlreadyExists { + eprintln!("Failed to create {}: {}", out_subdir.display(), err); + process::exit(1); + } + } + + let rustc_wrapper = env::var_os("RUSTC_WRAPPER").filter(|wrapper| !wrapper.is_empty()); + let rustc_workspace_wrapper = + env::var_os("RUSTC_WORKSPACE_WRAPPER").filter(|wrapper| !wrapper.is_empty()); + let mut rustc = rustc_wrapper + .into_iter() + .chain(rustc_workspace_wrapper) + .chain(iter::once(rustc)); + let mut cmd = Command::new(rustc.next().unwrap()); + cmd.args(rustc); + + if !rustc_bootstrap { + cmd.env_remove("RUSTC_BOOTSTRAP"); + } + + cmd.stderr(Stdio::null()) + .arg("--cfg=procmacro2_build_probe") + .arg("--edition=2021") + .arg("--crate-name=proc_macro2") + .arg("--crate-type=lib") + .arg("--cap-lints=allow") + .arg("--emit=dep-info,metadata") + .arg("--out-dir") + .arg(&out_subdir) + .arg(probefile); + + if let Some(target) = env::var_os("TARGET") { + cmd.arg("--target").arg(target); + } + + // If Cargo wants to set RUSTFLAGS, use that. + if let Ok(rustflags) = env::var("CARGO_ENCODED_RUSTFLAGS") { + if !rustflags.is_empty() { + for arg in rustflags.split('\x1f') { + cmd.arg(arg); + } + } + } + + let success = match cmd.status() { + Ok(status) => status.success(), + Err(_) => false, + }; + + // Clean up to avoid leaving nondeterministic absolute paths in the dep-info + // file in OUT_DIR, which causes nonreproducible builds in build systems + // that treat the entire OUT_DIR as an artifact. + if let Err(err) = fs::remove_dir_all(&out_subdir) { + // libc::ENOTEMPTY + // Some filesystems (NFSv3) have timing issues under load where '.nfs*' + // dummy files can continue to get created for a short period after the + // probe command completes, breaking remove_dir_all. + // To be replaced with ErrorKind::DirectoryNotEmpty (Rust 1.83+). + const ENOTEMPTY: i32 = 39; + + if !(err.kind() == ErrorKind::NotFound + || (cfg!(target_os = "linux") && err.raw_os_error() == Some(ENOTEMPTY))) + { + eprintln!("Failed to clean up {}: {}", out_subdir.display(), err); + process::exit(1); + } + } + + success +} + +fn rustc_minor_version() -> Option { + let rustc = cargo_env_var("RUSTC"); + let output = Command::new(rustc).arg("--version").output().ok()?; + let version = str::from_utf8(&output.stdout).ok()?; + let mut pieces = version.split('.'); + if pieces.next() != Some("rustc 1") { + return None; + } + pieces.next()?.parse().ok() +} + +fn cargo_env_var(key: &str) -> OsString { + env::var_os(key).unwrap_or_else(|| { + eprintln!( + "Environment variable ${} is not set during execution of build script", + key, + ); + process::exit(1); + }) +} diff --git a/vendor/proc-macro2/rust-toolchain.toml b/vendor/proc-macro2/rust-toolchain.toml new file mode 100644 index 00000000000000..20fe888c30ab44 --- /dev/null +++ b/vendor/proc-macro2/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +components = ["rust-src"] diff --git a/vendor/proc-macro2/src/detection.rs b/vendor/proc-macro2/src/detection.rs new file mode 100644 index 00000000000000..beba7b23739569 --- /dev/null +++ b/vendor/proc-macro2/src/detection.rs @@ -0,0 +1,75 @@ +use core::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Once; + +static WORKS: AtomicUsize = AtomicUsize::new(0); +static INIT: Once = Once::new(); + +pub(crate) fn inside_proc_macro() -> bool { + match WORKS.load(Ordering::Relaxed) { + 1 => return false, + 2 => return true, + _ => {} + } + + INIT.call_once(initialize); + inside_proc_macro() +} + +pub(crate) fn force_fallback() { + WORKS.store(1, Ordering::Relaxed); +} + +pub(crate) fn unforce_fallback() { + initialize(); +} + +#[cfg(not(no_is_available))] +fn initialize() { + let available = proc_macro::is_available(); + WORKS.store(available as usize + 1, Ordering::Relaxed); +} + +// Swap in a null panic hook to avoid printing "thread panicked" to stderr, +// then use catch_unwind to determine whether the compiler's proc_macro is +// working. When proc-macro2 is used from outside of a procedural macro all +// of the proc_macro crate's APIs currently panic. +// +// The Once is to prevent the possibility of this ordering: +// +// thread 1 calls take_hook, gets the user's original hook +// thread 1 calls set_hook with the null hook +// thread 2 calls take_hook, thinks null hook is the original hook +// thread 2 calls set_hook with the null hook +// thread 1 calls set_hook with the actual original hook +// thread 2 calls set_hook with what it thinks is the original hook +// +// in which the user's hook has been lost. +// +// There is still a race condition where a panic in a different thread can +// happen during the interval that the user's original panic hook is +// unregistered such that their hook is incorrectly not called. This is +// sufficiently unlikely and less bad than printing panic messages to stderr +// on correct use of this crate. Maybe there is a libstd feature request +// here. For now, if a user needs to guarantee that this failure mode does +// not occur, they need to call e.g. `proc_macro2::Span::call_site()` from +// the main thread before launching any other threads. +#[cfg(no_is_available)] +fn initialize() { + use std::panic::{self, PanicInfo}; + + type PanicHook = dyn Fn(&PanicInfo) + Sync + Send + 'static; + + let null_hook: Box = Box::new(|_panic_info| { /* ignore */ }); + let sanity_check = &*null_hook as *const PanicHook; + let original_hook = panic::take_hook(); + panic::set_hook(null_hook); + + let works = panic::catch_unwind(proc_macro::Span::call_site).is_ok(); + WORKS.store(works as usize + 1, Ordering::Relaxed); + + let hopefully_null_hook = panic::take_hook(); + panic::set_hook(original_hook); + if sanity_check != &*hopefully_null_hook { + panic!("observed race condition in proc_macro2::inside_proc_macro"); + } +} diff --git a/vendor/proc-macro2/src/extra.rs b/vendor/proc-macro2/src/extra.rs new file mode 100644 index 00000000000000..522a90e136bea4 --- /dev/null +++ b/vendor/proc-macro2/src/extra.rs @@ -0,0 +1,151 @@ +//! Items which do not have a correspondence to any API in the proc_macro crate, +//! but are necessary to include in proc-macro2. + +use crate::fallback; +use crate::imp; +use crate::marker::{ProcMacroAutoTraits, MARKER}; +use crate::Span; +use core::fmt::{self, Debug}; + +/// Invalidate any `proc_macro2::Span` that exist on the current thread. +/// +/// The implementation of `Span` uses thread-local data structures and this +/// function clears them. Calling any method on a `Span` on the current thread +/// created prior to the invalidation will return incorrect values or crash. +/// +/// This function is useful for programs that process more than 232 +/// bytes of Rust source code on the same thread. Just like rustc, proc-macro2 +/// uses 32-bit source locations, and these wrap around when the total source +/// code processed by the same thread exceeds 232 bytes (4 +/// gigabytes). After a wraparound, `Span` methods such as `source_text()` can +/// return wrong data. +/// +/// # Example +/// +/// As of late 2023, there is 200 GB of Rust code published on crates.io. +/// Looking at just the newest version of every crate, it is 16 GB of code. So a +/// workload that involves parsing it all would overflow a 32-bit source +/// location unless spans are being invalidated. +/// +/// ``` +/// use flate2::read::GzDecoder; +/// use std::ffi::OsStr; +/// use std::io::{BufReader, Read}; +/// use std::str::FromStr; +/// use tar::Archive; +/// +/// rayon::scope(|s| { +/// for krate in every_version_of_every_crate() { +/// s.spawn(move |_| { +/// proc_macro2::extra::invalidate_current_thread_spans(); +/// +/// let reader = BufReader::new(krate); +/// let tar = GzDecoder::new(reader); +/// let mut archive = Archive::new(tar); +/// for entry in archive.entries().unwrap() { +/// let mut entry = entry.unwrap(); +/// let path = entry.path().unwrap(); +/// if path.extension() != Some(OsStr::new("rs")) { +/// continue; +/// } +/// let mut content = String::new(); +/// entry.read_to_string(&mut content).unwrap(); +/// match proc_macro2::TokenStream::from_str(&content) { +/// Ok(tokens) => {/* ... */}, +/// Err(_) => continue, +/// } +/// } +/// }); +/// } +/// }); +/// # +/// # fn every_version_of_every_crate() -> Vec { +/// # Vec::new() +/// # } +/// ``` +/// +/// # Panics +/// +/// This function is not applicable to and will panic if called from a +/// procedural macro. +#[cfg(span_locations)] +#[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] +pub fn invalidate_current_thread_spans() { + crate::imp::invalidate_current_thread_spans(); +} + +/// An object that holds a [`Group`]'s `span_open()` and `span_close()` together +/// in a more compact representation than holding those 2 spans individually. +/// +/// [`Group`]: crate::Group +#[derive(Copy, Clone)] +pub struct DelimSpan { + inner: DelimSpanEnum, + _marker: ProcMacroAutoTraits, +} + +#[derive(Copy, Clone)] +enum DelimSpanEnum { + #[cfg(wrap_proc_macro)] + Compiler { + join: proc_macro::Span, + open: proc_macro::Span, + close: proc_macro::Span, + }, + Fallback(fallback::Span), +} + +impl DelimSpan { + pub(crate) fn new(group: &imp::Group) -> Self { + #[cfg(wrap_proc_macro)] + let inner = match group { + imp::Group::Compiler(group) => DelimSpanEnum::Compiler { + join: group.span(), + open: group.span_open(), + close: group.span_close(), + }, + imp::Group::Fallback(group) => DelimSpanEnum::Fallback(group.span()), + }; + + #[cfg(not(wrap_proc_macro))] + let inner = DelimSpanEnum::Fallback(group.span()); + + DelimSpan { + inner, + _marker: MARKER, + } + } + + /// Returns a span covering the entire delimited group. + pub fn join(&self) -> Span { + match &self.inner { + #[cfg(wrap_proc_macro)] + DelimSpanEnum::Compiler { join, .. } => Span::_new(imp::Span::Compiler(*join)), + DelimSpanEnum::Fallback(span) => Span::_new_fallback(*span), + } + } + + /// Returns a span for the opening punctuation of the group only. + pub fn open(&self) -> Span { + match &self.inner { + #[cfg(wrap_proc_macro)] + DelimSpanEnum::Compiler { open, .. } => Span::_new(imp::Span::Compiler(*open)), + DelimSpanEnum::Fallback(span) => Span::_new_fallback(span.first_byte()), + } + } + + /// Returns a span for the closing punctuation of the group only. + pub fn close(&self) -> Span { + match &self.inner { + #[cfg(wrap_proc_macro)] + DelimSpanEnum::Compiler { close, .. } => Span::_new(imp::Span::Compiler(*close)), + DelimSpanEnum::Fallback(span) => Span::_new_fallback(span.last_byte()), + } + } +} + +impl Debug for DelimSpan { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(&self.join(), f) + } +} diff --git a/vendor/proc-macro2/src/fallback.rs b/vendor/proc-macro2/src/fallback.rs new file mode 100644 index 00000000000000..61b7b91b08883e --- /dev/null +++ b/vendor/proc-macro2/src/fallback.rs @@ -0,0 +1,1256 @@ +#[cfg(wrap_proc_macro)] +use crate::imp; +#[cfg(span_locations)] +use crate::location::LineColumn; +use crate::parse::{self, Cursor}; +use crate::rcvec::{RcVec, RcVecBuilder, RcVecIntoIter, RcVecMut}; +use crate::{Delimiter, Spacing, TokenTree}; +#[cfg(all(span_locations, not(fuzzing)))] +use alloc::collections::BTreeMap; +#[cfg(all(span_locations, not(fuzzing)))] +use core::cell::RefCell; +#[cfg(span_locations)] +use core::cmp; +#[cfg(all(span_locations, not(fuzzing)))] +use core::cmp::Ordering; +use core::fmt::{self, Debug, Display, Write}; +use core::mem::ManuallyDrop; +#[cfg(span_locations)] +use core::ops::Range; +use core::ops::RangeBounds; +use core::ptr; +use core::str; +#[cfg(feature = "proc-macro")] +use core::str::FromStr; +use std::ffi::CStr; +#[cfg(wrap_proc_macro)] +use std::panic; +#[cfg(span_locations)] +use std::path::PathBuf; + +/// Force use of proc-macro2's fallback implementation of the API for now, even +/// if the compiler's implementation is available. +pub fn force() { + #[cfg(wrap_proc_macro)] + crate::detection::force_fallback(); +} + +/// Resume using the compiler's implementation of the proc macro API if it is +/// available. +pub fn unforce() { + #[cfg(wrap_proc_macro)] + crate::detection::unforce_fallback(); +} + +#[derive(Clone)] +pub(crate) struct TokenStream { + inner: RcVec, +} + +#[derive(Debug)] +pub(crate) struct LexError { + pub(crate) span: Span, +} + +impl LexError { + pub(crate) fn span(&self) -> Span { + self.span + } + + pub(crate) fn call_site() -> Self { + LexError { + span: Span::call_site(), + } + } +} + +impl TokenStream { + pub(crate) fn new() -> Self { + TokenStream { + inner: RcVecBuilder::new().build(), + } + } + + pub(crate) fn from_str_checked(src: &str) -> Result { + // Create a dummy file & add it to the source map + let mut cursor = get_cursor(src); + + // Strip a byte order mark if present + const BYTE_ORDER_MARK: &str = "\u{feff}"; + if cursor.starts_with(BYTE_ORDER_MARK) { + cursor = cursor.advance(BYTE_ORDER_MARK.len()); + } + + parse::token_stream(cursor) + } + + #[cfg(feature = "proc-macro")] + pub(crate) fn from_str_unchecked(src: &str) -> Self { + Self::from_str_checked(src).unwrap() + } + + pub(crate) fn is_empty(&self) -> bool { + self.inner.len() == 0 + } + + fn take_inner(self) -> RcVecBuilder { + let nodrop = ManuallyDrop::new(self); + unsafe { ptr::read(&nodrop.inner) }.make_owned() + } +} + +fn push_token_from_proc_macro(mut vec: RcVecMut, token: TokenTree) { + // https://github.com/dtolnay/proc-macro2/issues/235 + match token { + TokenTree::Literal(crate::Literal { + #[cfg(wrap_proc_macro)] + inner: crate::imp::Literal::Fallback(literal), + #[cfg(not(wrap_proc_macro))] + inner: literal, + .. + }) if literal.repr.starts_with('-') => { + push_negative_literal(vec, literal); + } + _ => vec.push(token), + } + + #[cold] + fn push_negative_literal(mut vec: RcVecMut, mut literal: Literal) { + literal.repr.remove(0); + let mut punct = crate::Punct::new('-', Spacing::Alone); + punct.set_span(crate::Span::_new_fallback(literal.span)); + vec.push(TokenTree::Punct(punct)); + vec.push(TokenTree::Literal(crate::Literal::_new_fallback(literal))); + } +} + +// Nonrecursive to prevent stack overflow. +impl Drop for TokenStream { + fn drop(&mut self) { + let mut stack = Vec::new(); + let mut current = match self.inner.get_mut() { + Some(inner) => inner.take().into_iter(), + None => return, + }; + loop { + while let Some(token) = current.next() { + let group = match token { + TokenTree::Group(group) => group.inner, + _ => continue, + }; + #[cfg(wrap_proc_macro)] + let group = match group { + crate::imp::Group::Fallback(group) => group, + crate::imp::Group::Compiler(_) => continue, + }; + let mut group = group; + if let Some(inner) = group.stream.inner.get_mut() { + stack.push(current); + current = inner.take().into_iter(); + } + } + match stack.pop() { + Some(next) => current = next, + None => return, + } + } + } +} + +pub(crate) struct TokenStreamBuilder { + inner: RcVecBuilder, +} + +impl TokenStreamBuilder { + pub(crate) fn new() -> Self { + TokenStreamBuilder { + inner: RcVecBuilder::new(), + } + } + + pub(crate) fn with_capacity(cap: usize) -> Self { + TokenStreamBuilder { + inner: RcVecBuilder::with_capacity(cap), + } + } + + pub(crate) fn push_token_from_parser(&mut self, tt: TokenTree) { + self.inner.push(tt); + } + + pub(crate) fn build(self) -> TokenStream { + TokenStream { + inner: self.inner.build(), + } + } +} + +#[cfg(span_locations)] +fn get_cursor(src: &str) -> Cursor { + #[cfg(fuzzing)] + return Cursor { rest: src, off: 1 }; + + // Create a dummy file & add it to the source map + #[cfg(not(fuzzing))] + SOURCE_MAP.with(|sm| { + let mut sm = sm.borrow_mut(); + let span = sm.add_file(src); + Cursor { + rest: src, + off: span.lo, + } + }) +} + +#[cfg(not(span_locations))] +fn get_cursor(src: &str) -> Cursor { + Cursor { rest: src } +} + +impl Display for LexError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("cannot parse string into token stream") + } +} + +impl Display for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut joint = false; + for (i, tt) in self.inner.iter().enumerate() { + if i != 0 && !joint { + write!(f, " ")?; + } + joint = false; + match tt { + TokenTree::Group(tt) => write!(f, "{}", tt), + TokenTree::Ident(tt) => write!(f, "{}", tt), + TokenTree::Punct(tt) => { + joint = tt.spacing() == Spacing::Joint; + write!(f, "{}", tt) + } + TokenTree::Literal(tt) => write!(f, "{}", tt), + }?; + } + + Ok(()) + } +} + +impl Debug for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("TokenStream ")?; + f.debug_list().entries(self.clone()).finish() + } +} + +#[cfg(feature = "proc-macro")] +impl From for TokenStream { + fn from(inner: proc_macro::TokenStream) -> Self { + TokenStream::from_str_unchecked(&inner.to_string()) + } +} + +#[cfg(feature = "proc-macro")] +impl From for proc_macro::TokenStream { + fn from(inner: TokenStream) -> Self { + proc_macro::TokenStream::from_str_unchecked(&inner.to_string()) + } +} + +impl From for TokenStream { + fn from(tree: TokenTree) -> Self { + let mut stream = RcVecBuilder::new(); + push_token_from_proc_macro(stream.as_mut(), tree); + TokenStream { + inner: stream.build(), + } + } +} + +impl FromIterator for TokenStream { + fn from_iter>(tokens: I) -> Self { + let mut stream = TokenStream::new(); + stream.extend(tokens); + stream + } +} + +impl FromIterator for TokenStream { + fn from_iter>(streams: I) -> Self { + let mut v = RcVecBuilder::new(); + + for stream in streams { + v.extend(stream.take_inner()); + } + + TokenStream { inner: v.build() } + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, tokens: I) { + let mut vec = self.inner.make_mut(); + tokens + .into_iter() + .for_each(|token| push_token_from_proc_macro(vec.as_mut(), token)); + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, streams: I) { + self.inner.make_mut().extend(streams.into_iter().flatten()); + } +} + +pub(crate) type TokenTreeIter = RcVecIntoIter; + +impl IntoIterator for TokenStream { + type Item = TokenTree; + type IntoIter = TokenTreeIter; + + fn into_iter(self) -> TokenTreeIter { + self.take_inner().into_iter() + } +} + +#[cfg(all(span_locations, not(fuzzing)))] +thread_local! { + static SOURCE_MAP: RefCell = RefCell::new(SourceMap { + // Start with a single dummy file which all call_site() and def_site() + // spans reference. + files: vec![FileInfo { + source_text: String::new(), + span: Span { lo: 0, hi: 0 }, + lines: vec![0], + char_index_to_byte_offset: BTreeMap::new(), + }], + }); +} + +#[cfg(span_locations)] +pub(crate) fn invalidate_current_thread_spans() { + #[cfg(not(fuzzing))] + SOURCE_MAP.with(|sm| sm.borrow_mut().files.truncate(1)); +} + +#[cfg(all(span_locations, not(fuzzing)))] +struct FileInfo { + source_text: String, + span: Span, + lines: Vec, + char_index_to_byte_offset: BTreeMap, +} + +#[cfg(all(span_locations, not(fuzzing)))] +impl FileInfo { + fn offset_line_column(&self, offset: usize) -> LineColumn { + assert!(self.span_within(Span { + lo: offset as u32, + hi: offset as u32, + })); + let offset = offset - self.span.lo as usize; + match self.lines.binary_search(&offset) { + Ok(found) => LineColumn { + line: found + 1, + column: 0, + }, + Err(idx) => LineColumn { + line: idx, + column: offset - self.lines[idx - 1], + }, + } + } + + fn span_within(&self, span: Span) -> bool { + span.lo >= self.span.lo && span.hi <= self.span.hi + } + + fn byte_range(&mut self, span: Span) -> Range { + let lo_char = (span.lo - self.span.lo) as usize; + + // Look up offset of the largest already-computed char index that is + // less than or equal to the current requested one. We resume counting + // chars from that point. + let (&last_char_index, &last_byte_offset) = self + .char_index_to_byte_offset + .range(..=lo_char) + .next_back() + .unwrap_or((&0, &0)); + + let lo_byte = if last_char_index == lo_char { + last_byte_offset + } else { + let total_byte_offset = match self.source_text[last_byte_offset..] + .char_indices() + .nth(lo_char - last_char_index) + { + Some((additional_offset, _ch)) => last_byte_offset + additional_offset, + None => self.source_text.len(), + }; + self.char_index_to_byte_offset + .insert(lo_char, total_byte_offset); + total_byte_offset + }; + + let trunc_lo = &self.source_text[lo_byte..]; + let char_len = (span.hi - span.lo) as usize; + lo_byte..match trunc_lo.char_indices().nth(char_len) { + Some((offset, _ch)) => lo_byte + offset, + None => self.source_text.len(), + } + } + + fn source_text(&mut self, span: Span) -> String { + let byte_range = self.byte_range(span); + self.source_text[byte_range].to_owned() + } +} + +/// Computes the offsets of each line in the given source string +/// and the total number of characters +#[cfg(all(span_locations, not(fuzzing)))] +fn lines_offsets(s: &str) -> (usize, Vec) { + let mut lines = vec![0]; + let mut total = 0; + + for ch in s.chars() { + total += 1; + if ch == '\n' { + lines.push(total); + } + } + + (total, lines) +} + +#[cfg(all(span_locations, not(fuzzing)))] +struct SourceMap { + files: Vec, +} + +#[cfg(all(span_locations, not(fuzzing)))] +impl SourceMap { + fn next_start_pos(&self) -> u32 { + // Add 1 so there's always space between files. + // + // We'll always have at least 1 file, as we initialize our files list + // with a dummy file. + self.files.last().unwrap().span.hi + 1 + } + + fn add_file(&mut self, src: &str) -> Span { + let (len, lines) = lines_offsets(src); + let lo = self.next_start_pos(); + let span = Span { + lo, + hi: lo + (len as u32), + }; + + self.files.push(FileInfo { + source_text: src.to_owned(), + span, + lines, + // Populated lazily by source_text(). + char_index_to_byte_offset: BTreeMap::new(), + }); + + span + } + + fn find(&self, span: Span) -> usize { + match self.files.binary_search_by(|file| { + if file.span.hi < span.lo { + Ordering::Less + } else if file.span.lo > span.hi { + Ordering::Greater + } else { + assert!(file.span_within(span)); + Ordering::Equal + } + }) { + Ok(i) => i, + Err(_) => unreachable!("Invalid span with no related FileInfo!"), + } + } + + fn filepath(&self, span: Span) -> String { + let i = self.find(span); + if i == 0 { + "".to_owned() + } else { + format!("", i) + } + } + + fn fileinfo(&self, span: Span) -> &FileInfo { + let i = self.find(span); + &self.files[i] + } + + fn fileinfo_mut(&mut self, span: Span) -> &mut FileInfo { + let i = self.find(span); + &mut self.files[i] + } +} + +#[derive(Clone, Copy, PartialEq, Eq)] +pub(crate) struct Span { + #[cfg(span_locations)] + pub(crate) lo: u32, + #[cfg(span_locations)] + pub(crate) hi: u32, +} + +impl Span { + #[cfg(not(span_locations))] + pub(crate) fn call_site() -> Self { + Span {} + } + + #[cfg(span_locations)] + pub(crate) fn call_site() -> Self { + Span { lo: 0, hi: 0 } + } + + pub(crate) fn mixed_site() -> Self { + Span::call_site() + } + + #[cfg(procmacro2_semver_exempt)] + pub(crate) fn def_site() -> Self { + Span::call_site() + } + + pub(crate) fn resolved_at(&self, _other: Span) -> Span { + // Stable spans consist only of line/column information, so + // `resolved_at` and `located_at` only select which span the + // caller wants line/column information from. + *self + } + + pub(crate) fn located_at(&self, other: Span) -> Span { + other + } + + #[cfg(span_locations)] + pub(crate) fn byte_range(&self) -> Range { + #[cfg(fuzzing)] + return 0..0; + + #[cfg(not(fuzzing))] + { + if self.is_call_site() { + 0..0 + } else { + SOURCE_MAP.with(|sm| sm.borrow_mut().fileinfo_mut(*self).byte_range(*self)) + } + } + } + + #[cfg(span_locations)] + pub(crate) fn start(&self) -> LineColumn { + #[cfg(fuzzing)] + return LineColumn { line: 0, column: 0 }; + + #[cfg(not(fuzzing))] + SOURCE_MAP.with(|sm| { + let sm = sm.borrow(); + let fi = sm.fileinfo(*self); + fi.offset_line_column(self.lo as usize) + }) + } + + #[cfg(span_locations)] + pub(crate) fn end(&self) -> LineColumn { + #[cfg(fuzzing)] + return LineColumn { line: 0, column: 0 }; + + #[cfg(not(fuzzing))] + SOURCE_MAP.with(|sm| { + let sm = sm.borrow(); + let fi = sm.fileinfo(*self); + fi.offset_line_column(self.hi as usize) + }) + } + + #[cfg(span_locations)] + pub(crate) fn file(&self) -> String { + #[cfg(fuzzing)] + return "".to_owned(); + + #[cfg(not(fuzzing))] + SOURCE_MAP.with(|sm| { + let sm = sm.borrow(); + sm.filepath(*self) + }) + } + + #[cfg(span_locations)] + pub(crate) fn local_file(&self) -> Option { + None + } + + #[cfg(not(span_locations))] + pub(crate) fn join(&self, _other: Span) -> Option { + Some(Span {}) + } + + #[cfg(span_locations)] + pub(crate) fn join(&self, other: Span) -> Option { + #[cfg(fuzzing)] + return { + let _ = other; + None + }; + + #[cfg(not(fuzzing))] + SOURCE_MAP.with(|sm| { + let sm = sm.borrow(); + // If `other` is not within the same FileInfo as us, return None. + if !sm.fileinfo(*self).span_within(other) { + return None; + } + Some(Span { + lo: cmp::min(self.lo, other.lo), + hi: cmp::max(self.hi, other.hi), + }) + }) + } + + #[cfg(not(span_locations))] + pub(crate) fn source_text(&self) -> Option { + None + } + + #[cfg(span_locations)] + pub(crate) fn source_text(&self) -> Option { + #[cfg(fuzzing)] + return None; + + #[cfg(not(fuzzing))] + { + if self.is_call_site() { + None + } else { + Some(SOURCE_MAP.with(|sm| sm.borrow_mut().fileinfo_mut(*self).source_text(*self))) + } + } + } + + #[cfg(not(span_locations))] + pub(crate) fn first_byte(self) -> Self { + self + } + + #[cfg(span_locations)] + pub(crate) fn first_byte(self) -> Self { + Span { + lo: self.lo, + hi: cmp::min(self.lo.saturating_add(1), self.hi), + } + } + + #[cfg(not(span_locations))] + pub(crate) fn last_byte(self) -> Self { + self + } + + #[cfg(span_locations)] + pub(crate) fn last_byte(self) -> Self { + Span { + lo: cmp::max(self.hi.saturating_sub(1), self.lo), + hi: self.hi, + } + } + + #[cfg(span_locations)] + fn is_call_site(&self) -> bool { + self.lo == 0 && self.hi == 0 + } +} + +impl Debug for Span { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + #[cfg(span_locations)] + return write!(f, "bytes({}..{})", self.lo, self.hi); + + #[cfg(not(span_locations))] + write!(f, "Span") + } +} + +pub(crate) fn debug_span_field_if_nontrivial(debug: &mut fmt::DebugStruct, span: Span) { + #[cfg(span_locations)] + { + if span.is_call_site() { + return; + } + } + + if cfg!(span_locations) { + debug.field("span", &span); + } +} + +#[derive(Clone)] +pub(crate) struct Group { + delimiter: Delimiter, + stream: TokenStream, + span: Span, +} + +impl Group { + pub(crate) fn new(delimiter: Delimiter, stream: TokenStream) -> Self { + Group { + delimiter, + stream, + span: Span::call_site(), + } + } + + pub(crate) fn delimiter(&self) -> Delimiter { + self.delimiter + } + + pub(crate) fn stream(&self) -> TokenStream { + self.stream.clone() + } + + pub(crate) fn span(&self) -> Span { + self.span + } + + pub(crate) fn span_open(&self) -> Span { + self.span.first_byte() + } + + pub(crate) fn span_close(&self) -> Span { + self.span.last_byte() + } + + pub(crate) fn set_span(&mut self, span: Span) { + self.span = span; + } +} + +impl Display for Group { + // We attempt to match libproc_macro's formatting. + // Empty parens: () + // Nonempty parens: (...) + // Empty brackets: [] + // Nonempty brackets: [...] + // Empty braces: { } + // Nonempty braces: { ... } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let (open, close) = match self.delimiter { + Delimiter::Parenthesis => ("(", ")"), + Delimiter::Brace => ("{ ", "}"), + Delimiter::Bracket => ("[", "]"), + Delimiter::None => ("", ""), + }; + + f.write_str(open)?; + Display::fmt(&self.stream, f)?; + if self.delimiter == Delimiter::Brace && !self.stream.inner.is_empty() { + f.write_str(" ")?; + } + f.write_str(close)?; + + Ok(()) + } +} + +impl Debug for Group { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let mut debug = fmt.debug_struct("Group"); + debug.field("delimiter", &self.delimiter); + debug.field("stream", &self.stream); + debug_span_field_if_nontrivial(&mut debug, self.span); + debug.finish() + } +} + +#[derive(Clone)] +pub(crate) struct Ident { + sym: Box, + span: Span, + raw: bool, +} + +impl Ident { + #[track_caller] + pub(crate) fn new_checked(string: &str, span: Span) -> Self { + validate_ident(string); + Ident::new_unchecked(string, span) + } + + pub(crate) fn new_unchecked(string: &str, span: Span) -> Self { + Ident { + sym: Box::from(string), + span, + raw: false, + } + } + + #[track_caller] + pub(crate) fn new_raw_checked(string: &str, span: Span) -> Self { + validate_ident_raw(string); + Ident::new_raw_unchecked(string, span) + } + + pub(crate) fn new_raw_unchecked(string: &str, span: Span) -> Self { + Ident { + sym: Box::from(string), + span, + raw: true, + } + } + + pub(crate) fn span(&self) -> Span { + self.span + } + + pub(crate) fn set_span(&mut self, span: Span) { + self.span = span; + } +} + +pub(crate) fn is_ident_start(c: char) -> bool { + c == '_' || unicode_ident::is_xid_start(c) +} + +pub(crate) fn is_ident_continue(c: char) -> bool { + unicode_ident::is_xid_continue(c) +} + +#[track_caller] +fn validate_ident(string: &str) { + if string.is_empty() { + panic!("Ident is not allowed to be empty; use Option"); + } + + if string.bytes().all(|digit| b'0' <= digit && digit <= b'9') { + panic!("Ident cannot be a number; use Literal instead"); + } + + fn ident_ok(string: &str) -> bool { + let mut chars = string.chars(); + let first = chars.next().unwrap(); + if !is_ident_start(first) { + return false; + } + for ch in chars { + if !is_ident_continue(ch) { + return false; + } + } + true + } + + if !ident_ok(string) { + panic!("{:?} is not a valid Ident", string); + } +} + +#[track_caller] +fn validate_ident_raw(string: &str) { + validate_ident(string); + + match string { + "_" | "super" | "self" | "Self" | "crate" => { + panic!("`r#{}` cannot be a raw identifier", string); + } + _ => {} + } +} + +impl PartialEq for Ident { + fn eq(&self, other: &Ident) -> bool { + self.sym == other.sym && self.raw == other.raw + } +} + +impl PartialEq for Ident +where + T: ?Sized + AsRef, +{ + fn eq(&self, other: &T) -> bool { + let other = other.as_ref(); + if self.raw { + other.starts_with("r#") && *self.sym == other[2..] + } else { + *self.sym == *other + } + } +} + +impl Display for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.raw { + f.write_str("r#")?; + } + f.write_str(&self.sym) + } +} + +#[allow(clippy::missing_fields_in_debug)] +impl Debug for Ident { + // Ident(proc_macro), Ident(r#union) + #[cfg(not(span_locations))] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut debug = f.debug_tuple("Ident"); + debug.field(&format_args!("{}", self)); + debug.finish() + } + + // Ident { + // sym: proc_macro, + // span: bytes(128..138) + // } + #[cfg(span_locations)] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut debug = f.debug_struct("Ident"); + debug.field("sym", &format_args!("{}", self)); + debug_span_field_if_nontrivial(&mut debug, self.span); + debug.finish() + } +} + +#[derive(Clone)] +pub(crate) struct Literal { + pub(crate) repr: String, + span: Span, +} + +macro_rules! suffixed_numbers { + ($($name:ident => $kind:ident,)*) => ($( + pub(crate) fn $name(n: $kind) -> Literal { + Literal::_new(format!(concat!("{}", stringify!($kind)), n)) + } + )*) +} + +macro_rules! unsuffixed_numbers { + ($($name:ident => $kind:ident,)*) => ($( + pub(crate) fn $name(n: $kind) -> Literal { + Literal::_new(n.to_string()) + } + )*) +} + +impl Literal { + pub(crate) fn _new(repr: String) -> Self { + Literal { + repr, + span: Span::call_site(), + } + } + + pub(crate) fn from_str_checked(repr: &str) -> Result { + let mut cursor = get_cursor(repr); + #[cfg(span_locations)] + let lo = cursor.off; + + let negative = cursor.starts_with_char('-'); + if negative { + cursor = cursor.advance(1); + if !cursor.starts_with_fn(|ch| ch.is_ascii_digit()) { + return Err(LexError::call_site()); + } + } + + if let Ok((rest, mut literal)) = parse::literal(cursor) { + if rest.is_empty() { + if negative { + literal.repr.insert(0, '-'); + } + literal.span = Span { + #[cfg(span_locations)] + lo, + #[cfg(span_locations)] + hi: rest.off, + }; + return Ok(literal); + } + } + Err(LexError::call_site()) + } + + pub(crate) unsafe fn from_str_unchecked(repr: &str) -> Self { + Literal::_new(repr.to_owned()) + } + + suffixed_numbers! { + u8_suffixed => u8, + u16_suffixed => u16, + u32_suffixed => u32, + u64_suffixed => u64, + u128_suffixed => u128, + usize_suffixed => usize, + i8_suffixed => i8, + i16_suffixed => i16, + i32_suffixed => i32, + i64_suffixed => i64, + i128_suffixed => i128, + isize_suffixed => isize, + + f32_suffixed => f32, + f64_suffixed => f64, + } + + unsuffixed_numbers! { + u8_unsuffixed => u8, + u16_unsuffixed => u16, + u32_unsuffixed => u32, + u64_unsuffixed => u64, + u128_unsuffixed => u128, + usize_unsuffixed => usize, + i8_unsuffixed => i8, + i16_unsuffixed => i16, + i32_unsuffixed => i32, + i64_unsuffixed => i64, + i128_unsuffixed => i128, + isize_unsuffixed => isize, + } + + pub(crate) fn f32_unsuffixed(f: f32) -> Literal { + let mut s = f.to_string(); + if !s.contains('.') { + s.push_str(".0"); + } + Literal::_new(s) + } + + pub(crate) fn f64_unsuffixed(f: f64) -> Literal { + let mut s = f.to_string(); + if !s.contains('.') { + s.push_str(".0"); + } + Literal::_new(s) + } + + pub(crate) fn string(string: &str) -> Literal { + let mut repr = String::with_capacity(string.len() + 2); + repr.push('"'); + escape_utf8(string, &mut repr); + repr.push('"'); + Literal::_new(repr) + } + + pub(crate) fn character(ch: char) -> Literal { + let mut repr = String::new(); + repr.push('\''); + if ch == '"' { + // escape_debug turns this into '\"' which is unnecessary. + repr.push(ch); + } else { + repr.extend(ch.escape_debug()); + } + repr.push('\''); + Literal::_new(repr) + } + + pub(crate) fn byte_character(byte: u8) -> Literal { + let mut repr = "b'".to_string(); + #[allow(clippy::match_overlapping_arm)] + match byte { + b'\0' => repr.push_str(r"\0"), + b'\t' => repr.push_str(r"\t"), + b'\n' => repr.push_str(r"\n"), + b'\r' => repr.push_str(r"\r"), + b'\'' => repr.push_str(r"\'"), + b'\\' => repr.push_str(r"\\"), + b'\x20'..=b'\x7E' => repr.push(byte as char), + _ => { + let _ = write!(repr, r"\x{:02X}", byte); + } + } + repr.push('\''); + Literal::_new(repr) + } + + pub(crate) fn byte_string(bytes: &[u8]) -> Literal { + let mut repr = "b\"".to_string(); + let mut bytes = bytes.iter(); + while let Some(&b) = bytes.next() { + #[allow(clippy::match_overlapping_arm)] + match b { + b'\0' => repr.push_str(match bytes.as_slice().first() { + // circumvent clippy::octal_escapes lint + Some(b'0'..=b'7') => r"\x00", + _ => r"\0", + }), + b'\t' => repr.push_str(r"\t"), + b'\n' => repr.push_str(r"\n"), + b'\r' => repr.push_str(r"\r"), + b'"' => repr.push_str("\\\""), + b'\\' => repr.push_str(r"\\"), + b'\x20'..=b'\x7E' => repr.push(b as char), + _ => { + let _ = write!(repr, r"\x{:02X}", b); + } + } + } + repr.push('"'); + Literal::_new(repr) + } + + pub(crate) fn c_string(string: &CStr) -> Literal { + let mut repr = "c\"".to_string(); + let mut bytes = string.to_bytes(); + while !bytes.is_empty() { + let (valid, invalid) = match str::from_utf8(bytes) { + Ok(all_valid) => { + bytes = b""; + (all_valid, bytes) + } + Err(utf8_error) => { + let (valid, rest) = bytes.split_at(utf8_error.valid_up_to()); + let valid = str::from_utf8(valid).unwrap(); + let invalid = utf8_error + .error_len() + .map_or(rest, |error_len| &rest[..error_len]); + bytes = &bytes[valid.len() + invalid.len()..]; + (valid, invalid) + } + }; + escape_utf8(valid, &mut repr); + for &byte in invalid { + let _ = write!(repr, r"\x{:02X}", byte); + } + } + repr.push('"'); + Literal::_new(repr) + } + + pub(crate) fn span(&self) -> Span { + self.span + } + + pub(crate) fn set_span(&mut self, span: Span) { + self.span = span; + } + + pub(crate) fn subspan>(&self, range: R) -> Option { + #[cfg(not(span_locations))] + { + let _ = range; + None + } + + #[cfg(span_locations)] + { + use core::ops::Bound; + + let lo = match range.start_bound() { + Bound::Included(start) => { + let start = u32::try_from(*start).ok()?; + self.span.lo.checked_add(start)? + } + Bound::Excluded(start) => { + let start = u32::try_from(*start).ok()?; + self.span.lo.checked_add(start)?.checked_add(1)? + } + Bound::Unbounded => self.span.lo, + }; + let hi = match range.end_bound() { + Bound::Included(end) => { + let end = u32::try_from(*end).ok()?; + self.span.lo.checked_add(end)?.checked_add(1)? + } + Bound::Excluded(end) => { + let end = u32::try_from(*end).ok()?; + self.span.lo.checked_add(end)? + } + Bound::Unbounded => self.span.hi, + }; + if lo <= hi && hi <= self.span.hi { + Some(Span { lo, hi }) + } else { + None + } + } + } +} + +impl Display for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Display::fmt(&self.repr, f) + } +} + +impl Debug for Literal { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let mut debug = fmt.debug_struct("Literal"); + debug.field("lit", &format_args!("{}", self.repr)); + debug_span_field_if_nontrivial(&mut debug, self.span); + debug.finish() + } +} + +fn escape_utf8(string: &str, repr: &mut String) { + let mut chars = string.chars(); + while let Some(ch) = chars.next() { + if ch == '\0' { + repr.push_str( + if chars + .as_str() + .starts_with(|next| '0' <= next && next <= '7') + { + // circumvent clippy::octal_escapes lint + r"\x00" + } else { + r"\0" + }, + ); + } else if ch == '\'' { + // escape_debug turns this into "\'" which is unnecessary. + repr.push(ch); + } else { + repr.extend(ch.escape_debug()); + } + } +} + +#[cfg(feature = "proc-macro")] +pub(crate) trait FromStr2: FromStr { + #[cfg(wrap_proc_macro)] + fn valid(src: &str) -> bool; + + #[cfg(wrap_proc_macro)] + fn from_str_checked(src: &str) -> Result { + // Validate using fallback parser, because rustc is incapable of + // returning a recoverable Err for certain invalid token streams, and + // will instead permanently poison the compilation. + if !Self::valid(src) { + return Err(imp::LexError::CompilerPanic); + } + + // Catch panic to work around https://github.com/rust-lang/rust/issues/58736. + match panic::catch_unwind(|| Self::from_str(src)) { + Ok(Ok(ok)) => Ok(ok), + Ok(Err(lex)) => Err(imp::LexError::Compiler(lex)), + Err(_panic) => Err(imp::LexError::CompilerPanic), + } + } + + fn from_str_unchecked(src: &str) -> Self { + Self::from_str(src).unwrap() + } +} + +#[cfg(feature = "proc-macro")] +impl FromStr2 for proc_macro::TokenStream { + #[cfg(wrap_proc_macro)] + fn valid(src: &str) -> bool { + TokenStream::from_str_checked(src).is_ok() + } +} + +#[cfg(feature = "proc-macro")] +impl FromStr2 for proc_macro::Literal { + #[cfg(wrap_proc_macro)] + fn valid(src: &str) -> bool { + Literal::from_str_checked(src).is_ok() + } +} diff --git a/vendor/proc-macro2/src/lib.rs b/vendor/proc-macro2/src/lib.rs new file mode 100644 index 00000000000000..7952afaa491bdc --- /dev/null +++ b/vendor/proc-macro2/src/lib.rs @@ -0,0 +1,1495 @@ +//! [![github]](https://github.com/dtolnay/proc-macro2) [![crates-io]](https://crates.io/crates/proc-macro2) [![docs-rs]](crate) +//! +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust +//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs +//! +//!
+//! +//! A wrapper around the procedural macro API of the compiler's [`proc_macro`] +//! crate. This library serves two purposes: +//! +//! - **Bring proc-macro-like functionality to other contexts like build.rs and +//! main.rs.** Types from `proc_macro` are entirely specific to procedural +//! macros and cannot ever exist in code outside of a procedural macro. +//! Meanwhile `proc_macro2` types may exist anywhere including non-macro code. +//! By developing foundational libraries like [syn] and [quote] against +//! `proc_macro2` rather than `proc_macro`, the procedural macro ecosystem +//! becomes easily applicable to many other use cases and we avoid +//! reimplementing non-macro equivalents of those libraries. +//! +//! - **Make procedural macros unit testable.** As a consequence of being +//! specific to procedural macros, nothing that uses `proc_macro` can be +//! executed from a unit test. In order for helper libraries or components of +//! a macro to be testable in isolation, they must be implemented using +//! `proc_macro2`. +//! +//! [syn]: https://github.com/dtolnay/syn +//! [quote]: https://github.com/dtolnay/quote +//! +//! # Usage +//! +//! The skeleton of a typical procedural macro typically looks like this: +//! +//! ``` +//! extern crate proc_macro; +//! +//! # const IGNORE: &str = stringify! { +//! #[proc_macro_derive(MyDerive)] +//! # }; +//! # #[cfg(wrap_proc_macro)] +//! pub fn my_derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream { +//! let input = proc_macro2::TokenStream::from(input); +//! +//! let output: proc_macro2::TokenStream = { +//! /* transform input */ +//! # input +//! }; +//! +//! proc_macro::TokenStream::from(output) +//! } +//! ``` +//! +//! If parsing with [Syn], you'll use [`parse_macro_input!`] instead to +//! propagate parse errors correctly back to the compiler when parsing fails. +//! +//! [`parse_macro_input!`]: https://docs.rs/syn/2.0/syn/macro.parse_macro_input.html +//! +//! # Unstable features +//! +//! The default feature set of proc-macro2 tracks the most recent stable +//! compiler API. Functionality in `proc_macro` that is not yet stable is not +//! exposed by proc-macro2 by default. +//! +//! To opt into the additional APIs available in the most recent nightly +//! compiler, the `procmacro2_semver_exempt` config flag must be passed to +//! rustc. We will polyfill those nightly-only APIs back to Rust 1.60.0. As +//! these are unstable APIs that track the nightly compiler, minor versions of +//! proc-macro2 may make breaking changes to them at any time. +//! +//! ```sh +//! RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build +//! ``` +//! +//! Note that this must not only be done for your crate, but for any crate that +//! depends on your crate. This infectious nature is intentional, as it serves +//! as a reminder that you are outside of the normal semver guarantees. +//! +//! Semver exempt methods are marked as such in the proc-macro2 documentation. +//! +//! # Thread-Safety +//! +//! Most types in this crate are `!Sync` because the underlying compiler +//! types make use of thread-local memory, meaning they cannot be accessed from +//! a different thread. + +// Proc-macro2 types in rustdoc of other crates get linked to here. +#![doc(html_root_url = "https://docs.rs/proc-macro2/1.0.103")] +#![cfg_attr(any(proc_macro_span, super_unstable), feature(proc_macro_span))] +#![cfg_attr(super_unstable, feature(proc_macro_def_site))] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![deny(unsafe_op_in_unsafe_fn)] +#![allow( + clippy::cast_lossless, + clippy::cast_possible_truncation, + clippy::checked_conversions, + clippy::doc_markdown, + clippy::elidable_lifetime_names, + clippy::incompatible_msrv, + clippy::items_after_statements, + clippy::iter_without_into_iter, + clippy::let_underscore_untyped, + clippy::manual_assert, + clippy::manual_range_contains, + clippy::missing_panics_doc, + clippy::missing_safety_doc, + clippy::must_use_candidate, + clippy::needless_doctest_main, + clippy::needless_lifetimes, + clippy::new_without_default, + clippy::return_self_not_must_use, + clippy::shadow_unrelated, + clippy::trivially_copy_pass_by_ref, + clippy::uninlined_format_args, + clippy::unnecessary_wraps, + clippy::unused_self, + clippy::used_underscore_binding, + clippy::vec_init_then_push +)] +#![allow(unknown_lints, mismatched_lifetime_syntaxes)] + +#[cfg(all(procmacro2_semver_exempt, wrap_proc_macro, not(super_unstable)))] +compile_error! {"\ + Something is not right. If you've tried to turn on \ + procmacro2_semver_exempt, you need to ensure that it \ + is turned on for the compilation of the proc-macro2 \ + build script as well. +"} + +#[cfg(all( + procmacro2_nightly_testing, + feature = "proc-macro", + not(proc_macro_span) +))] +compile_error! {"\ + Build script probe failed to compile. +"} + +extern crate alloc; + +#[cfg(feature = "proc-macro")] +extern crate proc_macro; + +mod marker; +mod parse; +mod probe; +mod rcvec; + +#[cfg(wrap_proc_macro)] +mod detection; + +// Public for proc_macro2::fallback::force() and unforce(), but those are quite +// a niche use case so we omit it from rustdoc. +#[doc(hidden)] +pub mod fallback; + +pub mod extra; + +#[cfg(not(wrap_proc_macro))] +use crate::fallback as imp; +#[path = "wrapper.rs"] +#[cfg(wrap_proc_macro)] +mod imp; + +#[cfg(span_locations)] +mod location; + +#[cfg(procmacro2_semver_exempt)] +mod num; +#[cfg(procmacro2_semver_exempt)] +#[allow(dead_code)] +mod rustc_literal_escaper; + +use crate::extra::DelimSpan; +use crate::marker::{ProcMacroAutoTraits, MARKER}; +#[cfg(procmacro2_semver_exempt)] +use crate::rustc_literal_escaper::MixedUnit; +use core::cmp::Ordering; +use core::fmt::{self, Debug, Display}; +use core::hash::{Hash, Hasher}; +#[cfg(span_locations)] +use core::ops::Range; +use core::ops::RangeBounds; +use core::str::FromStr; +use std::error::Error; +use std::ffi::CStr; +#[cfg(span_locations)] +use std::path::PathBuf; + +#[cfg(span_locations)] +#[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] +pub use crate::location::LineColumn; + +#[cfg(procmacro2_semver_exempt)] +#[cfg_attr(docsrs, doc(cfg(procmacro2_semver_exempt)))] +pub use crate::rustc_literal_escaper::EscapeError; + +/// An abstract stream of tokens, or more concretely a sequence of token trees. +/// +/// This type provides interfaces for iterating over token trees and for +/// collecting token trees into one stream. +/// +/// Token stream is both the input and output of `#[proc_macro]`, +/// `#[proc_macro_attribute]` and `#[proc_macro_derive]` definitions. +#[derive(Clone)] +pub struct TokenStream { + inner: imp::TokenStream, + _marker: ProcMacroAutoTraits, +} + +/// Error returned from `TokenStream::from_str`. +pub struct LexError { + inner: imp::LexError, + _marker: ProcMacroAutoTraits, +} + +impl TokenStream { + fn _new(inner: imp::TokenStream) -> Self { + TokenStream { + inner, + _marker: MARKER, + } + } + + fn _new_fallback(inner: fallback::TokenStream) -> Self { + TokenStream { + inner: imp::TokenStream::from(inner), + _marker: MARKER, + } + } + + /// Returns an empty `TokenStream` containing no token trees. + pub fn new() -> Self { + TokenStream::_new(imp::TokenStream::new()) + } + + /// Checks if this `TokenStream` is empty. + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } +} + +/// `TokenStream::default()` returns an empty stream, +/// i.e. this is equivalent with `TokenStream::new()`. +impl Default for TokenStream { + fn default() -> Self { + TokenStream::new() + } +} + +/// Attempts to break the string into tokens and parse those tokens into a token +/// stream. +/// +/// May fail for a number of reasons, for example, if the string contains +/// unbalanced delimiters or characters not existing in the language. +/// +/// NOTE: Some errors may cause panics instead of returning `LexError`. We +/// reserve the right to change these errors into `LexError`s later. +impl FromStr for TokenStream { + type Err = LexError; + + fn from_str(src: &str) -> Result { + match imp::TokenStream::from_str_checked(src) { + Ok(tokens) => Ok(TokenStream::_new(tokens)), + Err(lex) => Err(LexError { + inner: lex, + _marker: MARKER, + }), + } + } +} + +#[cfg(feature = "proc-macro")] +#[cfg_attr(docsrs, doc(cfg(feature = "proc-macro")))] +impl From for TokenStream { + fn from(inner: proc_macro::TokenStream) -> Self { + TokenStream::_new(imp::TokenStream::from(inner)) + } +} + +#[cfg(feature = "proc-macro")] +#[cfg_attr(docsrs, doc(cfg(feature = "proc-macro")))] +impl From for proc_macro::TokenStream { + fn from(inner: TokenStream) -> Self { + proc_macro::TokenStream::from(inner.inner) + } +} + +impl From for TokenStream { + fn from(token: TokenTree) -> Self { + TokenStream::_new(imp::TokenStream::from(token)) + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, streams: I) { + self.inner.extend(streams); + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, streams: I) { + self.inner + .extend(streams.into_iter().map(|stream| stream.inner)); + } +} + +/// Collects a number of token trees into a single stream. +impl FromIterator for TokenStream { + fn from_iter>(streams: I) -> Self { + TokenStream::_new(streams.into_iter().collect()) + } +} +impl FromIterator for TokenStream { + fn from_iter>(streams: I) -> Self { + TokenStream::_new(streams.into_iter().map(|i| i.inner).collect()) + } +} + +/// Prints the token stream as a string that is supposed to be losslessly +/// convertible back into the same token stream (modulo spans), except for +/// possibly `TokenTree::Group`s with `Delimiter::None` delimiters and negative +/// numeric literals. +impl Display for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Display::fmt(&self.inner, f) + } +} + +/// Prints token in a form convenient for debugging. +impl Debug for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(&self.inner, f) + } +} + +impl LexError { + pub fn span(&self) -> Span { + Span::_new(self.inner.span()) + } +} + +impl Debug for LexError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(&self.inner, f) + } +} + +impl Display for LexError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Display::fmt(&self.inner, f) + } +} + +impl Error for LexError {} + +/// A region of source code, along with macro expansion information. +#[derive(Copy, Clone)] +pub struct Span { + inner: imp::Span, + _marker: ProcMacroAutoTraits, +} + +impl Span { + fn _new(inner: imp::Span) -> Self { + Span { + inner, + _marker: MARKER, + } + } + + fn _new_fallback(inner: fallback::Span) -> Self { + Span { + inner: imp::Span::from(inner), + _marker: MARKER, + } + } + + /// The span of the invocation of the current procedural macro. + /// + /// Identifiers created with this span will be resolved as if they were + /// written directly at the macro call location (call-site hygiene) and + /// other code at the macro call site will be able to refer to them as well. + pub fn call_site() -> Self { + Span::_new(imp::Span::call_site()) + } + + /// The span located at the invocation of the procedural macro, but with + /// local variables, labels, and `$crate` resolved at the definition site + /// of the macro. This is the same hygiene behavior as `macro_rules`. + pub fn mixed_site() -> Self { + Span::_new(imp::Span::mixed_site()) + } + + /// A span that resolves at the macro definition site. + /// + /// This method is semver exempt and not exposed by default. + #[cfg(procmacro2_semver_exempt)] + #[cfg_attr(docsrs, doc(cfg(procmacro2_semver_exempt)))] + pub fn def_site() -> Self { + Span::_new(imp::Span::def_site()) + } + + /// Creates a new span with the same line/column information as `self` but + /// that resolves symbols as though it were at `other`. + pub fn resolved_at(&self, other: Span) -> Span { + Span::_new(self.inner.resolved_at(other.inner)) + } + + /// Creates a new span with the same name resolution behavior as `self` but + /// with the line/column information of `other`. + pub fn located_at(&self, other: Span) -> Span { + Span::_new(self.inner.located_at(other.inner)) + } + + /// Convert `proc_macro2::Span` to `proc_macro::Span`. + /// + /// This method is available when building with a nightly compiler, or when + /// building with rustc 1.29+ *without* semver exempt features. + /// + /// # Panics + /// + /// Panics if called from outside of a procedural macro. Unlike + /// `proc_macro2::Span`, the `proc_macro::Span` type can only exist within + /// the context of a procedural macro invocation. + #[cfg(wrap_proc_macro)] + pub fn unwrap(self) -> proc_macro::Span { + self.inner.unwrap() + } + + // Soft deprecated. Please use Span::unwrap. + #[cfg(wrap_proc_macro)] + #[doc(hidden)] + pub fn unstable(self) -> proc_macro::Span { + self.unwrap() + } + + /// Returns the span's byte position range in the source file. + /// + /// This method requires the `"span-locations"` feature to be enabled. + /// + /// When executing in a procedural macro context, the returned range is only + /// accurate if compiled with a nightly toolchain. The stable toolchain does + /// not have this information available. When executing outside of a + /// procedural macro, such as main.rs or build.rs, the byte range is always + /// accurate regardless of toolchain. + #[cfg(span_locations)] + #[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] + pub fn byte_range(&self) -> Range { + self.inner.byte_range() + } + + /// Get the starting line/column in the source file for this span. + /// + /// This method requires the `"span-locations"` feature to be enabled. + /// + /// When executing in a procedural macro context, the returned line/column + /// are only meaningful if compiled with a nightly toolchain. The stable + /// toolchain does not have this information available. When executing + /// outside of a procedural macro, such as main.rs or build.rs, the + /// line/column are always meaningful regardless of toolchain. + #[cfg(span_locations)] + #[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] + pub fn start(&self) -> LineColumn { + self.inner.start() + } + + /// Get the ending line/column in the source file for this span. + /// + /// This method requires the `"span-locations"` feature to be enabled. + /// + /// When executing in a procedural macro context, the returned line/column + /// are only meaningful if compiled with a nightly toolchain. The stable + /// toolchain does not have this information available. When executing + /// outside of a procedural macro, such as main.rs or build.rs, the + /// line/column are always meaningful regardless of toolchain. + #[cfg(span_locations)] + #[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] + pub fn end(&self) -> LineColumn { + self.inner.end() + } + + /// The path to the source file in which this span occurs, for display + /// purposes. + /// + /// This might not correspond to a valid file system path. It might be + /// remapped, or might be an artificial path such as `""`. + #[cfg(span_locations)] + #[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] + pub fn file(&self) -> String { + self.inner.file() + } + + /// The path to the source file in which this span occurs on disk. + /// + /// This is the actual path on disk. It is unaffected by path remapping. + /// + /// This path should not be embedded in the output of the macro; prefer + /// `file()` instead. + #[cfg(span_locations)] + #[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] + pub fn local_file(&self) -> Option { + self.inner.local_file() + } + + /// Create a new span encompassing `self` and `other`. + /// + /// Returns `None` if `self` and `other` are from different files. + /// + /// Warning: the underlying [`proc_macro::Span::join`] method is + /// nightly-only. When called from within a procedural macro not using a + /// nightly compiler, this method will always return `None`. + pub fn join(&self, other: Span) -> Option { + self.inner.join(other.inner).map(Span::_new) + } + + /// Compares two spans to see if they're equal. + /// + /// This method is semver exempt and not exposed by default. + #[cfg(procmacro2_semver_exempt)] + #[cfg_attr(docsrs, doc(cfg(procmacro2_semver_exempt)))] + pub fn eq(&self, other: &Span) -> bool { + self.inner.eq(&other.inner) + } + + /// Returns the source text behind a span. This preserves the original + /// source code, including spaces and comments. It only returns a result if + /// the span corresponds to real source code. + /// + /// Note: The observable result of a macro should only rely on the tokens + /// and not on this source text. The result of this function is a best + /// effort to be used for diagnostics only. + pub fn source_text(&self) -> Option { + self.inner.source_text() + } +} + +/// Prints a span in a form convenient for debugging. +impl Debug for Span { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(&self.inner, f) + } +} + +/// A single token or a delimited sequence of token trees (e.g. `[1, (), ..]`). +#[derive(Clone)] +pub enum TokenTree { + /// A token stream surrounded by bracket delimiters. + Group(Group), + /// An identifier. + Ident(Ident), + /// A single punctuation character (`+`, `,`, `$`, etc.). + Punct(Punct), + /// A literal character (`'a'`), string (`"hello"`), number (`2.3`), etc. + Literal(Literal), +} + +impl TokenTree { + /// Returns the span of this tree, delegating to the `span` method of + /// the contained token or a delimited stream. + pub fn span(&self) -> Span { + match self { + TokenTree::Group(t) => t.span(), + TokenTree::Ident(t) => t.span(), + TokenTree::Punct(t) => t.span(), + TokenTree::Literal(t) => t.span(), + } + } + + /// Configures the span for *only this token*. + /// + /// Note that if this token is a `Group` then this method will not configure + /// the span of each of the internal tokens, this will simply delegate to + /// the `set_span` method of each variant. + pub fn set_span(&mut self, span: Span) { + match self { + TokenTree::Group(t) => t.set_span(span), + TokenTree::Ident(t) => t.set_span(span), + TokenTree::Punct(t) => t.set_span(span), + TokenTree::Literal(t) => t.set_span(span), + } + } +} + +impl From for TokenTree { + fn from(g: Group) -> Self { + TokenTree::Group(g) + } +} + +impl From for TokenTree { + fn from(g: Ident) -> Self { + TokenTree::Ident(g) + } +} + +impl From for TokenTree { + fn from(g: Punct) -> Self { + TokenTree::Punct(g) + } +} + +impl From for TokenTree { + fn from(g: Literal) -> Self { + TokenTree::Literal(g) + } +} + +/// Prints the token tree as a string that is supposed to be losslessly +/// convertible back into the same token tree (modulo spans), except for +/// possibly `TokenTree::Group`s with `Delimiter::None` delimiters and negative +/// numeric literals. +impl Display for TokenTree { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + TokenTree::Group(t) => Display::fmt(t, f), + TokenTree::Ident(t) => Display::fmt(t, f), + TokenTree::Punct(t) => Display::fmt(t, f), + TokenTree::Literal(t) => Display::fmt(t, f), + } + } +} + +/// Prints token tree in a form convenient for debugging. +impl Debug for TokenTree { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // Each of these has the name in the struct type in the derived debug, + // so don't bother with an extra layer of indirection + match self { + TokenTree::Group(t) => Debug::fmt(t, f), + TokenTree::Ident(t) => { + let mut debug = f.debug_struct("Ident"); + debug.field("sym", &format_args!("{}", t)); + imp::debug_span_field_if_nontrivial(&mut debug, t.span().inner); + debug.finish() + } + TokenTree::Punct(t) => Debug::fmt(t, f), + TokenTree::Literal(t) => Debug::fmt(t, f), + } + } +} + +/// A delimited token stream. +/// +/// A `Group` internally contains a `TokenStream` which is surrounded by +/// `Delimiter`s. +#[derive(Clone)] +pub struct Group { + inner: imp::Group, +} + +/// Describes how a sequence of token trees is delimited. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum Delimiter { + /// `( ... )` + Parenthesis, + /// `{ ... }` + Brace, + /// `[ ... ]` + Bracket, + /// `∅ ... ∅` + /// + /// An invisible delimiter, that may, for example, appear around tokens + /// coming from a "macro variable" `$var`. It is important to preserve + /// operator priorities in cases like `$var * 3` where `$var` is `1 + 2`. + /// Invisible delimiters may not survive roundtrip of a token stream through + /// a string. + /// + ///

+ /// + /// Note: rustc currently can ignore the grouping of tokens delimited by `None` in the output + /// of a proc_macro. Only `None`-delimited groups created by a macro_rules macro in the input + /// of a proc_macro macro are preserved, and only in very specific circumstances. + /// Any `None`-delimited groups (re)created by a proc_macro will therefore not preserve + /// operator priorities as indicated above. The other `Delimiter` variants should be used + /// instead in this context. This is a rustc bug. For details, see + /// [rust-lang/rust#67062](https://github.com/rust-lang/rust/issues/67062). + /// + ///
+ None, +} + +impl Group { + fn _new(inner: imp::Group) -> Self { + Group { inner } + } + + fn _new_fallback(inner: fallback::Group) -> Self { + Group { + inner: imp::Group::from(inner), + } + } + + /// Creates a new `Group` with the given delimiter and token stream. + /// + /// This constructor will set the span for this group to + /// `Span::call_site()`. To change the span you can use the `set_span` + /// method below. + pub fn new(delimiter: Delimiter, stream: TokenStream) -> Self { + Group { + inner: imp::Group::new(delimiter, stream.inner), + } + } + + /// Returns the punctuation used as the delimiter for this group: a set of + /// parentheses, square brackets, or curly braces. + pub fn delimiter(&self) -> Delimiter { + self.inner.delimiter() + } + + /// Returns the `TokenStream` of tokens that are delimited in this `Group`. + /// + /// Note that the returned token stream does not include the delimiter + /// returned above. + pub fn stream(&self) -> TokenStream { + TokenStream::_new(self.inner.stream()) + } + + /// Returns the span for the delimiters of this token stream, spanning the + /// entire `Group`. + /// + /// ```text + /// pub fn span(&self) -> Span { + /// ^^^^^^^ + /// ``` + pub fn span(&self) -> Span { + Span::_new(self.inner.span()) + } + + /// Returns the span pointing to the opening delimiter of this group. + /// + /// ```text + /// pub fn span_open(&self) -> Span { + /// ^ + /// ``` + pub fn span_open(&self) -> Span { + Span::_new(self.inner.span_open()) + } + + /// Returns the span pointing to the closing delimiter of this group. + /// + /// ```text + /// pub fn span_close(&self) -> Span { + /// ^ + /// ``` + pub fn span_close(&self) -> Span { + Span::_new(self.inner.span_close()) + } + + /// Returns an object that holds this group's `span_open()` and + /// `span_close()` together (in a more compact representation than holding + /// those 2 spans individually). + pub fn delim_span(&self) -> DelimSpan { + DelimSpan::new(&self.inner) + } + + /// Configures the span for this `Group`'s delimiters, but not its internal + /// tokens. + /// + /// This method will **not** set the span of all the internal tokens spanned + /// by this group, but rather it will only set the span of the delimiter + /// tokens at the level of the `Group`. + pub fn set_span(&mut self, span: Span) { + self.inner.set_span(span.inner); + } +} + +/// Prints the group as a string that should be losslessly convertible back +/// into the same group (modulo spans), except for possibly `TokenTree::Group`s +/// with `Delimiter::None` delimiters. +impl Display for Group { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + Display::fmt(&self.inner, formatter) + } +} + +impl Debug for Group { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(&self.inner, formatter) + } +} + +/// A `Punct` is a single punctuation character like `+`, `-` or `#`. +/// +/// Multicharacter operators like `+=` are represented as two instances of +/// `Punct` with different forms of `Spacing` returned. +#[derive(Clone)] +pub struct Punct { + ch: char, + spacing: Spacing, + span: Span, +} + +/// Whether a `Punct` is followed immediately by another `Punct` or followed by +/// another token or whitespace. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum Spacing { + /// E.g. `+` is `Alone` in `+ =`, `+ident` or `+()`. + Alone, + /// E.g. `+` is `Joint` in `+=` or `'` is `Joint` in `'#`. + /// + /// Additionally, single quote `'` can join with identifiers to form + /// lifetimes `'ident`. + Joint, +} + +impl Punct { + /// Creates a new `Punct` from the given character and spacing. + /// + /// The `ch` argument must be a valid punctuation character permitted by the + /// language, otherwise the function will panic. + /// + /// The returned `Punct` will have the default span of `Span::call_site()` + /// which can be further configured with the `set_span` method below. + pub fn new(ch: char, spacing: Spacing) -> Self { + if let '!' | '#' | '$' | '%' | '&' | '\'' | '*' | '+' | ',' | '-' | '.' | '/' | ':' | ';' + | '<' | '=' | '>' | '?' | '@' | '^' | '|' | '~' = ch + { + Punct { + ch, + spacing, + span: Span::call_site(), + } + } else { + panic!("unsupported proc macro punctuation character {:?}", ch); + } + } + + /// Returns the value of this punctuation character as `char`. + pub fn as_char(&self) -> char { + self.ch + } + + /// Returns the spacing of this punctuation character, indicating whether + /// it's immediately followed by another `Punct` in the token stream, so + /// they can potentially be combined into a multicharacter operator + /// (`Joint`), or it's followed by some other token or whitespace (`Alone`) + /// so the operator has certainly ended. + pub fn spacing(&self) -> Spacing { + self.spacing + } + + /// Returns the span for this punctuation character. + pub fn span(&self) -> Span { + self.span + } + + /// Configure the span for this punctuation character. + pub fn set_span(&mut self, span: Span) { + self.span = span; + } +} + +/// Prints the punctuation character as a string that should be losslessly +/// convertible back into the same character. +impl Display for Punct { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Display::fmt(&self.ch, f) + } +} + +impl Debug for Punct { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let mut debug = fmt.debug_struct("Punct"); + debug.field("char", &self.ch); + debug.field("spacing", &self.spacing); + imp::debug_span_field_if_nontrivial(&mut debug, self.span.inner); + debug.finish() + } +} + +/// A word of Rust code, which may be a keyword or legal variable name. +/// +/// An identifier consists of at least one Unicode code point, the first of +/// which has the XID_Start property and the rest of which have the XID_Continue +/// property. +/// +/// - The empty string is not an identifier. Use `Option`. +/// - A lifetime is not an identifier. Use `syn::Lifetime` instead. +/// +/// An identifier constructed with `Ident::new` is permitted to be a Rust +/// keyword, though parsing one through its [`Parse`] implementation rejects +/// Rust keywords. Use `input.call(Ident::parse_any)` when parsing to match the +/// behaviour of `Ident::new`. +/// +/// [`Parse`]: https://docs.rs/syn/2.0/syn/parse/trait.Parse.html +/// +/// # Examples +/// +/// A new ident can be created from a string using the `Ident::new` function. +/// A span must be provided explicitly which governs the name resolution +/// behavior of the resulting identifier. +/// +/// ``` +/// use proc_macro2::{Ident, Span}; +/// +/// fn main() { +/// let call_ident = Ident::new("calligraphy", Span::call_site()); +/// +/// println!("{}", call_ident); +/// } +/// ``` +/// +/// An ident can be interpolated into a token stream using the `quote!` macro. +/// +/// ``` +/// use proc_macro2::{Ident, Span}; +/// use quote::quote; +/// +/// fn main() { +/// let ident = Ident::new("demo", Span::call_site()); +/// +/// // Create a variable binding whose name is this ident. +/// let expanded = quote! { let #ident = 10; }; +/// +/// // Create a variable binding with a slightly different name. +/// let temp_ident = Ident::new(&format!("new_{}", ident), Span::call_site()); +/// let expanded = quote! { let #temp_ident = 10; }; +/// } +/// ``` +/// +/// A string representation of the ident is available through the `to_string()` +/// method. +/// +/// ``` +/// # use proc_macro2::{Ident, Span}; +/// # +/// # let ident = Ident::new("another_identifier", Span::call_site()); +/// # +/// // Examine the ident as a string. +/// let ident_string = ident.to_string(); +/// if ident_string.len() > 60 { +/// println!("Very long identifier: {}", ident_string) +/// } +/// ``` +#[derive(Clone)] +pub struct Ident { + inner: imp::Ident, + _marker: ProcMacroAutoTraits, +} + +impl Ident { + fn _new(inner: imp::Ident) -> Self { + Ident { + inner, + _marker: MARKER, + } + } + + fn _new_fallback(inner: fallback::Ident) -> Self { + Ident { + inner: imp::Ident::from(inner), + _marker: MARKER, + } + } + + /// Creates a new `Ident` with the given `string` as well as the specified + /// `span`. + /// + /// The `string` argument must be a valid identifier permitted by the + /// language, otherwise the function will panic. + /// + /// Note that `span`, currently in rustc, configures the hygiene information + /// for this identifier. + /// + /// As of this time `Span::call_site()` explicitly opts-in to "call-site" + /// hygiene meaning that identifiers created with this span will be resolved + /// as if they were written directly at the location of the macro call, and + /// other code at the macro call site will be able to refer to them as well. + /// + /// Later spans like `Span::def_site()` will allow to opt-in to + /// "definition-site" hygiene meaning that identifiers created with this + /// span will be resolved at the location of the macro definition and other + /// code at the macro call site will not be able to refer to them. + /// + /// Due to the current importance of hygiene this constructor, unlike other + /// tokens, requires a `Span` to be specified at construction. + /// + /// # Panics + /// + /// Panics if the input string is neither a keyword nor a legal variable + /// name. If you are not sure whether the string contains an identifier and + /// need to handle an error case, use + /// syn::parse_str::<Ident> + /// rather than `Ident::new`. + #[track_caller] + pub fn new(string: &str, span: Span) -> Self { + Ident::_new(imp::Ident::new_checked(string, span.inner)) + } + + /// Same as `Ident::new`, but creates a raw identifier (`r#ident`). The + /// `string` argument must be a valid identifier permitted by the language + /// (including keywords, e.g. `fn`). Keywords which are usable in path + /// segments (e.g. `self`, `super`) are not supported, and will cause a + /// panic. + #[track_caller] + pub fn new_raw(string: &str, span: Span) -> Self { + Ident::_new(imp::Ident::new_raw_checked(string, span.inner)) + } + + /// Returns the span of this `Ident`. + pub fn span(&self) -> Span { + Span::_new(self.inner.span()) + } + + /// Configures the span of this `Ident`, possibly changing its hygiene + /// context. + pub fn set_span(&mut self, span: Span) { + self.inner.set_span(span.inner); + } +} + +impl PartialEq for Ident { + fn eq(&self, other: &Ident) -> bool { + self.inner == other.inner + } +} + +impl PartialEq for Ident +where + T: ?Sized + AsRef, +{ + fn eq(&self, other: &T) -> bool { + self.inner == other + } +} + +impl Eq for Ident {} + +impl PartialOrd for Ident { + fn partial_cmp(&self, other: &Ident) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Ident { + fn cmp(&self, other: &Ident) -> Ordering { + self.to_string().cmp(&other.to_string()) + } +} + +impl Hash for Ident { + fn hash(&self, hasher: &mut H) { + self.to_string().hash(hasher); + } +} + +/// Prints the identifier as a string that should be losslessly convertible back +/// into the same identifier. +impl Display for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Display::fmt(&self.inner, f) + } +} + +impl Debug for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(&self.inner, f) + } +} + +/// A literal string (`"hello"`), byte string (`b"hello"`), character (`'a'`), +/// byte character (`b'a'`), an integer or floating point number with or without +/// a suffix (`1`, `1u8`, `2.3`, `2.3f32`). +/// +/// Boolean literals like `true` and `false` do not belong here, they are +/// `Ident`s. +#[derive(Clone)] +pub struct Literal { + inner: imp::Literal, + _marker: ProcMacroAutoTraits, +} + +macro_rules! suffixed_int_literals { + ($($name:ident => $kind:ident,)*) => ($( + /// Creates a new suffixed integer literal with the specified value. + /// + /// This function will create an integer like `1u32` where the integer + /// value specified is the first part of the token and the integral is + /// also suffixed at the end. Literals created from negative numbers may + /// not survive roundtrips through `TokenStream` or strings and may be + /// broken into two tokens (`-` and positive literal). + /// + /// Literals created through this method have the `Span::call_site()` + /// span by default, which can be configured with the `set_span` method + /// below. + pub fn $name(n: $kind) -> Literal { + Literal::_new(imp::Literal::$name(n)) + } + )*) +} + +macro_rules! unsuffixed_int_literals { + ($($name:ident => $kind:ident,)*) => ($( + /// Creates a new unsuffixed integer literal with the specified value. + /// + /// This function will create an integer like `1` where the integer + /// value specified is the first part of the token. No suffix is + /// specified on this token, meaning that invocations like + /// `Literal::i8_unsuffixed(1)` are equivalent to + /// `Literal::u32_unsuffixed(1)`. Literals created from negative numbers + /// may not survive roundtrips through `TokenStream` or strings and may + /// be broken into two tokens (`-` and positive literal). + /// + /// Literals created through this method have the `Span::call_site()` + /// span by default, which can be configured with the `set_span` method + /// below. + pub fn $name(n: $kind) -> Literal { + Literal::_new(imp::Literal::$name(n)) + } + )*) +} + +impl Literal { + fn _new(inner: imp::Literal) -> Self { + Literal { + inner, + _marker: MARKER, + } + } + + fn _new_fallback(inner: fallback::Literal) -> Self { + Literal { + inner: imp::Literal::from(inner), + _marker: MARKER, + } + } + + suffixed_int_literals! { + u8_suffixed => u8, + u16_suffixed => u16, + u32_suffixed => u32, + u64_suffixed => u64, + u128_suffixed => u128, + usize_suffixed => usize, + i8_suffixed => i8, + i16_suffixed => i16, + i32_suffixed => i32, + i64_suffixed => i64, + i128_suffixed => i128, + isize_suffixed => isize, + } + + unsuffixed_int_literals! { + u8_unsuffixed => u8, + u16_unsuffixed => u16, + u32_unsuffixed => u32, + u64_unsuffixed => u64, + u128_unsuffixed => u128, + usize_unsuffixed => usize, + i8_unsuffixed => i8, + i16_unsuffixed => i16, + i32_unsuffixed => i32, + i64_unsuffixed => i64, + i128_unsuffixed => i128, + isize_unsuffixed => isize, + } + + /// Creates a new unsuffixed floating-point literal. + /// + /// This constructor is similar to those like `Literal::i8_unsuffixed` where + /// the float's value is emitted directly into the token but no suffix is + /// used, so it may be inferred to be a `f64` later in the compiler. + /// Literals created from negative numbers may not survive round-trips + /// through `TokenStream` or strings and may be broken into two tokens (`-` + /// and positive literal). + /// + /// # Panics + /// + /// This function requires that the specified float is finite, for example + /// if it is infinity or NaN this function will panic. + pub fn f64_unsuffixed(f: f64) -> Literal { + assert!(f.is_finite()); + Literal::_new(imp::Literal::f64_unsuffixed(f)) + } + + /// Creates a new suffixed floating-point literal. + /// + /// This constructor will create a literal like `1.0f64` where the value + /// specified is the preceding part of the token and `f64` is the suffix of + /// the token. This token will always be inferred to be an `f64` in the + /// compiler. Literals created from negative numbers may not survive + /// round-trips through `TokenStream` or strings and may be broken into two + /// tokens (`-` and positive literal). + /// + /// # Panics + /// + /// This function requires that the specified float is finite, for example + /// if it is infinity or NaN this function will panic. + pub fn f64_suffixed(f: f64) -> Literal { + assert!(f.is_finite()); + Literal::_new(imp::Literal::f64_suffixed(f)) + } + + /// Creates a new unsuffixed floating-point literal. + /// + /// This constructor is similar to those like `Literal::i8_unsuffixed` where + /// the float's value is emitted directly into the token but no suffix is + /// used, so it may be inferred to be a `f64` later in the compiler. + /// Literals created from negative numbers may not survive round-trips + /// through `TokenStream` or strings and may be broken into two tokens (`-` + /// and positive literal). + /// + /// # Panics + /// + /// This function requires that the specified float is finite, for example + /// if it is infinity or NaN this function will panic. + pub fn f32_unsuffixed(f: f32) -> Literal { + assert!(f.is_finite()); + Literal::_new(imp::Literal::f32_unsuffixed(f)) + } + + /// Creates a new suffixed floating-point literal. + /// + /// This constructor will create a literal like `1.0f32` where the value + /// specified is the preceding part of the token and `f32` is the suffix of + /// the token. This token will always be inferred to be an `f32` in the + /// compiler. Literals created from negative numbers may not survive + /// round-trips through `TokenStream` or strings and may be broken into two + /// tokens (`-` and positive literal). + /// + /// # Panics + /// + /// This function requires that the specified float is finite, for example + /// if it is infinity or NaN this function will panic. + pub fn f32_suffixed(f: f32) -> Literal { + assert!(f.is_finite()); + Literal::_new(imp::Literal::f32_suffixed(f)) + } + + /// String literal. + pub fn string(string: &str) -> Literal { + Literal::_new(imp::Literal::string(string)) + } + + /// Character literal. + pub fn character(ch: char) -> Literal { + Literal::_new(imp::Literal::character(ch)) + } + + /// Byte character literal. + pub fn byte_character(byte: u8) -> Literal { + Literal::_new(imp::Literal::byte_character(byte)) + } + + /// Byte string literal. + pub fn byte_string(bytes: &[u8]) -> Literal { + Literal::_new(imp::Literal::byte_string(bytes)) + } + + /// C string literal. + pub fn c_string(string: &CStr) -> Literal { + Literal::_new(imp::Literal::c_string(string)) + } + + /// Returns the span encompassing this literal. + pub fn span(&self) -> Span { + Span::_new(self.inner.span()) + } + + /// Configures the span associated for this literal. + pub fn set_span(&mut self, span: Span) { + self.inner.set_span(span.inner); + } + + /// Returns a `Span` that is a subset of `self.span()` containing only + /// the source bytes in range `range`. Returns `None` if the would-be + /// trimmed span is outside the bounds of `self`. + /// + /// Warning: the underlying [`proc_macro::Literal::subspan`] method is + /// nightly-only. When called from within a procedural macro not using a + /// nightly compiler, this method will always return `None`. + pub fn subspan>(&self, range: R) -> Option { + self.inner.subspan(range).map(Span::_new) + } + + /// Returns the unescaped string value if this is a string literal. + #[cfg(procmacro2_semver_exempt)] + pub fn str_value(&self) -> Result { + let repr = self.to_string(); + + if repr.starts_with('"') && repr[1..].ends_with('"') { + let quoted = &repr[1..repr.len() - 1]; + let mut value = String::with_capacity(quoted.len()); + let mut error = None; + rustc_literal_escaper::unescape_str(quoted, |_range, res| match res { + Ok(ch) => value.push(ch), + Err(err) => { + if err.is_fatal() { + error = Some(ConversionErrorKind::FailedToUnescape(err)); + } + } + }); + return match error { + Some(error) => Err(error), + None => Ok(value), + }; + } + + if repr.starts_with('r') { + if let Some(raw) = get_raw(&repr[1..]) { + return Ok(raw.to_owned()); + } + } + + Err(ConversionErrorKind::InvalidLiteralKind) + } + + /// Returns the unescaped string value (including nul terminator) if this is + /// a c-string literal. + #[cfg(procmacro2_semver_exempt)] + pub fn cstr_value(&self) -> Result, ConversionErrorKind> { + let repr = self.to_string(); + + if repr.starts_with("c\"") && repr[2..].ends_with('"') { + let quoted = &repr[2..repr.len() - 1]; + let mut value = Vec::with_capacity(quoted.len()); + let mut error = None; + rustc_literal_escaper::unescape_c_str(quoted, |_range, res| match res { + Ok(MixedUnit::Char(ch)) => { + value.extend_from_slice(ch.get().encode_utf8(&mut [0; 4]).as_bytes()); + } + Ok(MixedUnit::HighByte(byte)) => value.push(byte.get()), + Err(err) => { + if err.is_fatal() { + error = Some(ConversionErrorKind::FailedToUnescape(err)); + } + } + }); + return match error { + Some(error) => Err(error), + None => { + value.push(b'\0'); + Ok(value) + } + }; + } + + if repr.starts_with("cr") { + if let Some(raw) = get_raw(&repr[2..]) { + let mut value = Vec::with_capacity(raw.len() + 1); + value.extend_from_slice(raw.as_bytes()); + value.push(b'\0'); + return Ok(value); + } + } + + Err(ConversionErrorKind::InvalidLiteralKind) + } + + /// Returns the unescaped string value if this is a byte string literal. + #[cfg(procmacro2_semver_exempt)] + pub fn byte_str_value(&self) -> Result, ConversionErrorKind> { + let repr = self.to_string(); + + if repr.starts_with("b\"") && repr[2..].ends_with('"') { + let quoted = &repr[2..repr.len() - 1]; + let mut value = Vec::with_capacity(quoted.len()); + let mut error = None; + rustc_literal_escaper::unescape_byte_str(quoted, |_range, res| match res { + Ok(byte) => value.push(byte), + Err(err) => { + if err.is_fatal() { + error = Some(ConversionErrorKind::FailedToUnescape(err)); + } + } + }); + return match error { + Some(error) => Err(error), + None => Ok(value), + }; + } + + if repr.starts_with("br") { + if let Some(raw) = get_raw(&repr[2..]) { + return Ok(raw.as_bytes().to_owned()); + } + } + + Err(ConversionErrorKind::InvalidLiteralKind) + } + + // Intended for the `quote!` macro to use when constructing a proc-macro2 + // token out of a macro_rules $:literal token, which is already known to be + // a valid literal. This avoids reparsing/validating the literal's string + // representation. This is not public API other than for quote. + #[doc(hidden)] + pub unsafe fn from_str_unchecked(repr: &str) -> Self { + Literal::_new(unsafe { imp::Literal::from_str_unchecked(repr) }) + } +} + +impl FromStr for Literal { + type Err = LexError; + + fn from_str(repr: &str) -> Result { + match imp::Literal::from_str_checked(repr) { + Ok(lit) => Ok(Literal::_new(lit)), + Err(lex) => Err(LexError { + inner: lex, + _marker: MARKER, + }), + } + } +} + +impl Debug for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(&self.inner, f) + } +} + +impl Display for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Display::fmt(&self.inner, f) + } +} + +/// Error when retrieving a string literal's unescaped value. +#[cfg(procmacro2_semver_exempt)] +#[derive(Debug, PartialEq, Eq)] +pub enum ConversionErrorKind { + /// The literal is of the right string kind, but its contents are malformed + /// in a way that cannot be unescaped to a value. + FailedToUnescape(EscapeError), + /// The literal is not of the string kind whose value was requested, for + /// example byte string vs UTF-8 string. + InvalidLiteralKind, +} + +// ###"..."### -> ... +#[cfg(procmacro2_semver_exempt)] +fn get_raw(repr: &str) -> Option<&str> { + let pounds = repr.len() - repr.trim_start_matches('#').len(); + if repr.len() >= pounds + 1 + 1 + pounds + && repr[pounds..].starts_with('"') + && repr.trim_end_matches('#').len() + pounds == repr.len() + && repr[..repr.len() - pounds].ends_with('"') + { + Some(&repr[pounds + 1..repr.len() - pounds - 1]) + } else { + None + } +} + +/// Public implementation details for the `TokenStream` type, such as iterators. +pub mod token_stream { + use crate::marker::{ProcMacroAutoTraits, MARKER}; + use crate::{imp, TokenTree}; + use core::fmt::{self, Debug}; + + pub use crate::TokenStream; + + /// An iterator over `TokenStream`'s `TokenTree`s. + /// + /// The iteration is "shallow", e.g. the iterator doesn't recurse into + /// delimited groups, and returns whole groups as token trees. + #[derive(Clone)] + pub struct IntoIter { + inner: imp::TokenTreeIter, + _marker: ProcMacroAutoTraits, + } + + impl Iterator for IntoIter { + type Item = TokenTree; + + fn next(&mut self) -> Option { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } + } + + impl Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("TokenStream ")?; + f.debug_list().entries(self.clone()).finish() + } + } + + impl IntoIterator for TokenStream { + type Item = TokenTree; + type IntoIter = IntoIter; + + fn into_iter(self) -> IntoIter { + IntoIter { + inner: self.inner.into_iter(), + _marker: MARKER, + } + } + } +} diff --git a/vendor/proc-macro2/src/location.rs b/vendor/proc-macro2/src/location.rs new file mode 100644 index 00000000000000..7190e2d05255e0 --- /dev/null +++ b/vendor/proc-macro2/src/location.rs @@ -0,0 +1,29 @@ +use core::cmp::Ordering; + +/// A line-column pair representing the start or end of a `Span`. +/// +/// This type is semver exempt and not exposed by default. +#[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct LineColumn { + /// The 1-indexed line in the source file on which the span starts or ends + /// (inclusive). + pub line: usize, + /// The 0-indexed column (in UTF-8 characters) in the source file on which + /// the span starts or ends (inclusive). + pub column: usize, +} + +impl Ord for LineColumn { + fn cmp(&self, other: &Self) -> Ordering { + self.line + .cmp(&other.line) + .then(self.column.cmp(&other.column)) + } +} + +impl PartialOrd for LineColumn { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} diff --git a/vendor/proc-macro2/src/marker.rs b/vendor/proc-macro2/src/marker.rs new file mode 100644 index 00000000000000..23b94ce6fa853e --- /dev/null +++ b/vendor/proc-macro2/src/marker.rs @@ -0,0 +1,17 @@ +use alloc::rc::Rc; +use core::marker::PhantomData; +use core::panic::{RefUnwindSafe, UnwindSafe}; + +// Zero sized marker with the correct set of autotrait impls we want all proc +// macro types to have. +#[derive(Copy, Clone)] +#[cfg_attr( + all(procmacro2_semver_exempt, any(not(wrap_proc_macro), super_unstable)), + derive(PartialEq, Eq) +)] +pub(crate) struct ProcMacroAutoTraits(PhantomData>); + +pub(crate) const MARKER: ProcMacroAutoTraits = ProcMacroAutoTraits(PhantomData); + +impl UnwindSafe for ProcMacroAutoTraits {} +impl RefUnwindSafe for ProcMacroAutoTraits {} diff --git a/vendor/proc-macro2/src/num.rs b/vendor/proc-macro2/src/num.rs new file mode 100644 index 00000000000000..3ac82c8608df9a --- /dev/null +++ b/vendor/proc-macro2/src/num.rs @@ -0,0 +1,17 @@ +// TODO: use NonZero in Rust 1.89+ +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct NonZeroChar(char); + +impl NonZeroChar { + pub fn new(ch: char) -> Option { + if ch == '\0' { + None + } else { + Some(NonZeroChar(ch)) + } + } + + pub fn get(self) -> char { + self.0 + } +} diff --git a/vendor/proc-macro2/src/parse.rs b/vendor/proc-macro2/src/parse.rs new file mode 100644 index 00000000000000..b8be403f842f2c --- /dev/null +++ b/vendor/proc-macro2/src/parse.rs @@ -0,0 +1,995 @@ +use crate::fallback::{ + self, is_ident_continue, is_ident_start, Group, Ident, LexError, Literal, Span, TokenStream, + TokenStreamBuilder, +}; +use crate::{Delimiter, Punct, Spacing, TokenTree}; +use core::char; +use core::str::{Bytes, CharIndices, Chars}; + +#[derive(Copy, Clone, Eq, PartialEq)] +pub(crate) struct Cursor<'a> { + pub(crate) rest: &'a str, + #[cfg(span_locations)] + pub(crate) off: u32, +} + +impl<'a> Cursor<'a> { + pub(crate) fn advance(&self, bytes: usize) -> Cursor<'a> { + let (_front, rest) = self.rest.split_at(bytes); + Cursor { + rest, + #[cfg(span_locations)] + off: self.off + _front.chars().count() as u32, + } + } + + pub(crate) fn starts_with(&self, s: &str) -> bool { + self.rest.starts_with(s) + } + + pub(crate) fn starts_with_char(&self, ch: char) -> bool { + self.rest.starts_with(ch) + } + + pub(crate) fn starts_with_fn(&self, f: Pattern) -> bool + where + Pattern: FnMut(char) -> bool, + { + self.rest.starts_with(f) + } + + pub(crate) fn is_empty(&self) -> bool { + self.rest.is_empty() + } + + fn len(&self) -> usize { + self.rest.len() + } + + fn as_bytes(&self) -> &'a [u8] { + self.rest.as_bytes() + } + + fn bytes(&self) -> Bytes<'a> { + self.rest.bytes() + } + + fn chars(&self) -> Chars<'a> { + self.rest.chars() + } + + fn char_indices(&self) -> CharIndices<'a> { + self.rest.char_indices() + } + + fn parse(&self, tag: &str) -> Result, Reject> { + if self.starts_with(tag) { + Ok(self.advance(tag.len())) + } else { + Err(Reject) + } + } +} + +pub(crate) struct Reject; +type PResult<'a, O> = Result<(Cursor<'a>, O), Reject>; + +fn skip_whitespace(input: Cursor) -> Cursor { + let mut s = input; + + while !s.is_empty() { + let byte = s.as_bytes()[0]; + if byte == b'/' { + if s.starts_with("//") + && (!s.starts_with("///") || s.starts_with("////")) + && !s.starts_with("//!") + { + let (cursor, _) = take_until_newline_or_eof(s); + s = cursor; + continue; + } else if s.starts_with("/**/") { + s = s.advance(4); + continue; + } else if s.starts_with("/*") + && (!s.starts_with("/**") || s.starts_with("/***")) + && !s.starts_with("/*!") + { + match block_comment(s) { + Ok((rest, _)) => { + s = rest; + continue; + } + Err(Reject) => return s, + } + } + } + match byte { + b' ' | 0x09..=0x0d => { + s = s.advance(1); + continue; + } + b if b.is_ascii() => {} + _ => { + let ch = s.chars().next().unwrap(); + if is_whitespace(ch) { + s = s.advance(ch.len_utf8()); + continue; + } + } + } + return s; + } + s +} + +fn block_comment(input: Cursor) -> PResult<&str> { + if !input.starts_with("/*") { + return Err(Reject); + } + + let mut depth = 0usize; + let bytes = input.as_bytes(); + let mut i = 0usize; + let upper = bytes.len() - 1; + + while i < upper { + if bytes[i] == b'/' && bytes[i + 1] == b'*' { + depth += 1; + i += 1; // eat '*' + } else if bytes[i] == b'*' && bytes[i + 1] == b'/' { + depth -= 1; + if depth == 0 { + return Ok((input.advance(i + 2), &input.rest[..i + 2])); + } + i += 1; // eat '/' + } + i += 1; + } + + Err(Reject) +} + +fn is_whitespace(ch: char) -> bool { + // Rust treats left-to-right mark and right-to-left mark as whitespace + ch.is_whitespace() || ch == '\u{200e}' || ch == '\u{200f}' +} + +fn word_break(input: Cursor) -> Result { + match input.chars().next() { + Some(ch) if is_ident_continue(ch) => Err(Reject), + Some(_) | None => Ok(input), + } +} + +// Rustc's representation of a macro expansion error in expression position or +// type position. +const ERROR: &str = "(/*ERROR*/)"; + +pub(crate) fn token_stream(mut input: Cursor) -> Result { + let mut trees = TokenStreamBuilder::new(); + let mut stack = Vec::new(); + + loop { + input = skip_whitespace(input); + + if let Ok((rest, ())) = doc_comment(input, &mut trees) { + input = rest; + continue; + } + + #[cfg(span_locations)] + let lo = input.off; + + let first = match input.bytes().next() { + Some(first) => first, + None => match stack.last() { + None => return Ok(trees.build()), + #[cfg(span_locations)] + Some((lo, _frame)) => { + return Err(LexError { + span: Span { lo: *lo, hi: *lo }, + }) + } + #[cfg(not(span_locations))] + Some(_frame) => return Err(LexError { span: Span {} }), + }, + }; + + if let Some(open_delimiter) = match first { + b'(' if !input.starts_with(ERROR) => Some(Delimiter::Parenthesis), + b'[' => Some(Delimiter::Bracket), + b'{' => Some(Delimiter::Brace), + _ => None, + } { + input = input.advance(1); + let frame = (open_delimiter, trees); + #[cfg(span_locations)] + let frame = (lo, frame); + stack.push(frame); + trees = TokenStreamBuilder::new(); + } else if let Some(close_delimiter) = match first { + b')' => Some(Delimiter::Parenthesis), + b']' => Some(Delimiter::Bracket), + b'}' => Some(Delimiter::Brace), + _ => None, + } { + let frame = match stack.pop() { + Some(frame) => frame, + None => return Err(lex_error(input)), + }; + #[cfg(span_locations)] + let (lo, frame) = frame; + let (open_delimiter, outer) = frame; + if open_delimiter != close_delimiter { + return Err(lex_error(input)); + } + input = input.advance(1); + let mut g = Group::new(open_delimiter, trees.build()); + g.set_span(Span { + #[cfg(span_locations)] + lo, + #[cfg(span_locations)] + hi: input.off, + }); + trees = outer; + trees.push_token_from_parser(TokenTree::Group(crate::Group::_new_fallback(g))); + } else { + let (rest, mut tt) = match leaf_token(input) { + Ok((rest, tt)) => (rest, tt), + Err(Reject) => return Err(lex_error(input)), + }; + tt.set_span(crate::Span::_new_fallback(Span { + #[cfg(span_locations)] + lo, + #[cfg(span_locations)] + hi: rest.off, + })); + trees.push_token_from_parser(tt); + input = rest; + } + } +} + +fn lex_error(cursor: Cursor) -> LexError { + #[cfg(not(span_locations))] + let _ = cursor; + LexError { + span: Span { + #[cfg(span_locations)] + lo: cursor.off, + #[cfg(span_locations)] + hi: cursor.off, + }, + } +} + +fn leaf_token(input: Cursor) -> PResult { + if let Ok((input, l)) = literal(input) { + // must be parsed before ident + Ok((input, TokenTree::Literal(crate::Literal::_new_fallback(l)))) + } else if let Ok((input, p)) = punct(input) { + Ok((input, TokenTree::Punct(p))) + } else if let Ok((input, i)) = ident(input) { + Ok((input, TokenTree::Ident(i))) + } else if input.starts_with(ERROR) { + let rest = input.advance(ERROR.len()); + let repr = crate::Literal::_new_fallback(Literal::_new(ERROR.to_owned())); + Ok((rest, TokenTree::Literal(repr))) + } else { + Err(Reject) + } +} + +fn ident(input: Cursor) -> PResult { + if [ + "r\"", "r#\"", "r##", "b\"", "b\'", "br\"", "br#", "c\"", "cr\"", "cr#", + ] + .iter() + .any(|prefix| input.starts_with(prefix)) + { + Err(Reject) + } else { + ident_any(input) + } +} + +fn ident_any(input: Cursor) -> PResult { + let raw = input.starts_with("r#"); + let rest = input.advance((raw as usize) << 1); + + let (rest, sym) = ident_not_raw(rest)?; + + if !raw { + let ident = + crate::Ident::_new_fallback(Ident::new_unchecked(sym, fallback::Span::call_site())); + return Ok((rest, ident)); + } + + match sym { + "_" | "super" | "self" | "Self" | "crate" => return Err(Reject), + _ => {} + } + + let ident = + crate::Ident::_new_fallback(Ident::new_raw_unchecked(sym, fallback::Span::call_site())); + Ok((rest, ident)) +} + +fn ident_not_raw(input: Cursor) -> PResult<&str> { + let mut chars = input.char_indices(); + + match chars.next() { + Some((_, ch)) if is_ident_start(ch) => {} + _ => return Err(Reject), + } + + let mut end = input.len(); + for (i, ch) in chars { + if !is_ident_continue(ch) { + end = i; + break; + } + } + + Ok((input.advance(end), &input.rest[..end])) +} + +pub(crate) fn literal(input: Cursor) -> PResult { + let rest = literal_nocapture(input)?; + let end = input.len() - rest.len(); + Ok((rest, Literal::_new(input.rest[..end].to_string()))) +} + +fn literal_nocapture(input: Cursor) -> Result { + if let Ok(ok) = string(input) { + Ok(ok) + } else if let Ok(ok) = byte_string(input) { + Ok(ok) + } else if let Ok(ok) = c_string(input) { + Ok(ok) + } else if let Ok(ok) = byte(input) { + Ok(ok) + } else if let Ok(ok) = character(input) { + Ok(ok) + } else if let Ok(ok) = float(input) { + Ok(ok) + } else if let Ok(ok) = int(input) { + Ok(ok) + } else { + Err(Reject) + } +} + +fn literal_suffix(input: Cursor) -> Cursor { + match ident_not_raw(input) { + Ok((input, _)) => input, + Err(Reject) => input, + } +} + +fn string(input: Cursor) -> Result { + if let Ok(input) = input.parse("\"") { + cooked_string(input) + } else if let Ok(input) = input.parse("r") { + raw_string(input) + } else { + Err(Reject) + } +} + +fn cooked_string(mut input: Cursor) -> Result { + let mut chars = input.char_indices(); + + while let Some((i, ch)) = chars.next() { + match ch { + '"' => { + let input = input.advance(i + 1); + return Ok(literal_suffix(input)); + } + '\r' => match chars.next() { + Some((_, '\n')) => {} + _ => break, + }, + '\\' => match chars.next() { + Some((_, 'x')) => { + backslash_x_char(&mut chars)?; + } + Some((_, 'n' | 'r' | 't' | '\\' | '\'' | '"' | '0')) => {} + Some((_, 'u')) => { + backslash_u(&mut chars)?; + } + Some((newline, ch @ ('\n' | '\r'))) => { + input = input.advance(newline + 1); + trailing_backslash(&mut input, ch as u8)?; + chars = input.char_indices(); + } + _ => break, + }, + _ch => {} + } + } + Err(Reject) +} + +fn raw_string(input: Cursor) -> Result { + let (input, delimiter) = delimiter_of_raw_string(input)?; + let mut bytes = input.bytes().enumerate(); + while let Some((i, byte)) = bytes.next() { + match byte { + b'"' if input.rest[i + 1..].starts_with(delimiter) => { + let rest = input.advance(i + 1 + delimiter.len()); + return Ok(literal_suffix(rest)); + } + b'\r' => match bytes.next() { + Some((_, b'\n')) => {} + _ => break, + }, + _ => {} + } + } + Err(Reject) +} + +fn byte_string(input: Cursor) -> Result { + if let Ok(input) = input.parse("b\"") { + cooked_byte_string(input) + } else if let Ok(input) = input.parse("br") { + raw_byte_string(input) + } else { + Err(Reject) + } +} + +fn cooked_byte_string(mut input: Cursor) -> Result { + let mut bytes = input.bytes().enumerate(); + while let Some((offset, b)) = bytes.next() { + match b { + b'"' => { + let input = input.advance(offset + 1); + return Ok(literal_suffix(input)); + } + b'\r' => match bytes.next() { + Some((_, b'\n')) => {} + _ => break, + }, + b'\\' => match bytes.next() { + Some((_, b'x')) => { + backslash_x_byte(&mut bytes)?; + } + Some((_, b'n' | b'r' | b't' | b'\\' | b'0' | b'\'' | b'"')) => {} + Some((newline, b @ (b'\n' | b'\r'))) => { + input = input.advance(newline + 1); + trailing_backslash(&mut input, b)?; + bytes = input.bytes().enumerate(); + } + _ => break, + }, + b if b.is_ascii() => {} + _ => break, + } + } + Err(Reject) +} + +fn delimiter_of_raw_string(input: Cursor) -> PResult<&str> { + for (i, byte) in input.bytes().enumerate() { + match byte { + b'"' => { + if i > 255 { + // https://github.com/rust-lang/rust/pull/95251 + return Err(Reject); + } + return Ok((input.advance(i + 1), &input.rest[..i])); + } + b'#' => {} + _ => break, + } + } + Err(Reject) +} + +fn raw_byte_string(input: Cursor) -> Result { + let (input, delimiter) = delimiter_of_raw_string(input)?; + let mut bytes = input.bytes().enumerate(); + while let Some((i, byte)) = bytes.next() { + match byte { + b'"' if input.rest[i + 1..].starts_with(delimiter) => { + let rest = input.advance(i + 1 + delimiter.len()); + return Ok(literal_suffix(rest)); + } + b'\r' => match bytes.next() { + Some((_, b'\n')) => {} + _ => break, + }, + other => { + if !other.is_ascii() { + break; + } + } + } + } + Err(Reject) +} + +fn c_string(input: Cursor) -> Result { + if let Ok(input) = input.parse("c\"") { + cooked_c_string(input) + } else if let Ok(input) = input.parse("cr") { + raw_c_string(input) + } else { + Err(Reject) + } +} + +fn raw_c_string(input: Cursor) -> Result { + let (input, delimiter) = delimiter_of_raw_string(input)?; + let mut bytes = input.bytes().enumerate(); + while let Some((i, byte)) = bytes.next() { + match byte { + b'"' if input.rest[i + 1..].starts_with(delimiter) => { + let rest = input.advance(i + 1 + delimiter.len()); + return Ok(literal_suffix(rest)); + } + b'\r' => match bytes.next() { + Some((_, b'\n')) => {} + _ => break, + }, + b'\0' => break, + _ => {} + } + } + Err(Reject) +} + +fn cooked_c_string(mut input: Cursor) -> Result { + let mut chars = input.char_indices(); + + while let Some((i, ch)) = chars.next() { + match ch { + '"' => { + let input = input.advance(i + 1); + return Ok(literal_suffix(input)); + } + '\r' => match chars.next() { + Some((_, '\n')) => {} + _ => break, + }, + '\\' => match chars.next() { + Some((_, 'x')) => { + backslash_x_nonzero(&mut chars)?; + } + Some((_, 'n' | 'r' | 't' | '\\' | '\'' | '"')) => {} + Some((_, 'u')) => { + if backslash_u(&mut chars)? == '\0' { + break; + } + } + Some((newline, ch @ ('\n' | '\r'))) => { + input = input.advance(newline + 1); + trailing_backslash(&mut input, ch as u8)?; + chars = input.char_indices(); + } + _ => break, + }, + '\0' => break, + _ch => {} + } + } + Err(Reject) +} + +fn byte(input: Cursor) -> Result { + let input = input.parse("b'")?; + let mut bytes = input.bytes().enumerate(); + let ok = match bytes.next().map(|(_, b)| b) { + Some(b'\\') => match bytes.next().map(|(_, b)| b) { + Some(b'x') => backslash_x_byte(&mut bytes).is_ok(), + Some(b'n' | b'r' | b't' | b'\\' | b'0' | b'\'' | b'"') => true, + _ => false, + }, + b => b.is_some(), + }; + if !ok { + return Err(Reject); + } + let (offset, _) = bytes.next().ok_or(Reject)?; + if !input.chars().as_str().is_char_boundary(offset) { + return Err(Reject); + } + let input = input.advance(offset).parse("'")?; + Ok(literal_suffix(input)) +} + +fn character(input: Cursor) -> Result { + let input = input.parse("'")?; + let mut chars = input.char_indices(); + let ok = match chars.next().map(|(_, ch)| ch) { + Some('\\') => match chars.next().map(|(_, ch)| ch) { + Some('x') => backslash_x_char(&mut chars).is_ok(), + Some('u') => backslash_u(&mut chars).is_ok(), + Some('n' | 'r' | 't' | '\\' | '0' | '\'' | '"') => true, + _ => false, + }, + ch => ch.is_some(), + }; + if !ok { + return Err(Reject); + } + let (idx, _) = chars.next().ok_or(Reject)?; + let input = input.advance(idx).parse("'")?; + Ok(literal_suffix(input)) +} + +macro_rules! next_ch { + ($chars:ident @ $pat:pat) => { + match $chars.next() { + Some((_, ch)) => match ch { + $pat => ch, + _ => return Err(Reject), + }, + None => return Err(Reject), + } + }; +} + +fn backslash_x_char(chars: &mut I) -> Result<(), Reject> +where + I: Iterator, +{ + next_ch!(chars @ '0'..='7'); + next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F'); + Ok(()) +} + +fn backslash_x_byte(chars: &mut I) -> Result<(), Reject> +where + I: Iterator, +{ + next_ch!(chars @ b'0'..=b'9' | b'a'..=b'f' | b'A'..=b'F'); + next_ch!(chars @ b'0'..=b'9' | b'a'..=b'f' | b'A'..=b'F'); + Ok(()) +} + +fn backslash_x_nonzero(chars: &mut I) -> Result<(), Reject> +where + I: Iterator, +{ + let first = next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F'); + let second = next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F'); + if first == '0' && second == '0' { + Err(Reject) + } else { + Ok(()) + } +} + +fn backslash_u(chars: &mut I) -> Result +where + I: Iterator, +{ + next_ch!(chars @ '{'); + let mut value = 0; + let mut len = 0; + for (_, ch) in chars { + let digit = match ch { + '0'..='9' => ch as u8 - b'0', + 'a'..='f' => 10 + ch as u8 - b'a', + 'A'..='F' => 10 + ch as u8 - b'A', + '_' if len > 0 => continue, + '}' if len > 0 => return char::from_u32(value).ok_or(Reject), + _ => break, + }; + if len == 6 { + break; + } + value *= 0x10; + value += u32::from(digit); + len += 1; + } + Err(Reject) +} + +fn trailing_backslash(input: &mut Cursor, mut last: u8) -> Result<(), Reject> { + let mut whitespace = input.bytes().enumerate(); + loop { + if last == b'\r' && whitespace.next().map_or(true, |(_, b)| b != b'\n') { + return Err(Reject); + } + match whitespace.next() { + Some((_, b @ (b' ' | b'\t' | b'\n' | b'\r'))) => { + last = b; + } + Some((offset, _)) => { + *input = input.advance(offset); + return Ok(()); + } + None => return Err(Reject), + } + } +} + +fn float(input: Cursor) -> Result { + let mut rest = float_digits(input)?; + if let Some(ch) = rest.chars().next() { + if is_ident_start(ch) { + rest = ident_not_raw(rest)?.0; + } + } + word_break(rest) +} + +fn float_digits(input: Cursor) -> Result { + let mut chars = input.chars().peekable(); + match chars.next() { + Some(ch) if '0' <= ch && ch <= '9' => {} + _ => return Err(Reject), + } + + let mut len = 1; + let mut has_dot = false; + let mut has_exp = false; + while let Some(&ch) = chars.peek() { + match ch { + '0'..='9' | '_' => { + chars.next(); + len += 1; + } + '.' => { + if has_dot { + break; + } + chars.next(); + if chars + .peek() + .map_or(false, |&ch| ch == '.' || is_ident_start(ch)) + { + return Err(Reject); + } + len += 1; + has_dot = true; + } + 'e' | 'E' => { + chars.next(); + len += 1; + has_exp = true; + break; + } + _ => break, + } + } + + if !(has_dot || has_exp) { + return Err(Reject); + } + + if has_exp { + let token_before_exp = if has_dot { + Ok(input.advance(len - 1)) + } else { + Err(Reject) + }; + let mut has_sign = false; + let mut has_exp_value = false; + while let Some(&ch) = chars.peek() { + match ch { + '+' | '-' => { + if has_exp_value { + break; + } + if has_sign { + return token_before_exp; + } + chars.next(); + len += 1; + has_sign = true; + } + '0'..='9' => { + chars.next(); + len += 1; + has_exp_value = true; + } + '_' => { + chars.next(); + len += 1; + } + _ => break, + } + } + if !has_exp_value { + return token_before_exp; + } + } + + Ok(input.advance(len)) +} + +fn int(input: Cursor) -> Result { + let mut rest = digits(input)?; + if let Some(ch) = rest.chars().next() { + if is_ident_start(ch) { + rest = ident_not_raw(rest)?.0; + } + } + word_break(rest) +} + +fn digits(mut input: Cursor) -> Result { + let base = if input.starts_with("0x") { + input = input.advance(2); + 16 + } else if input.starts_with("0o") { + input = input.advance(2); + 8 + } else if input.starts_with("0b") { + input = input.advance(2); + 2 + } else { + 10 + }; + + let mut len = 0; + let mut empty = true; + for b in input.bytes() { + match b { + b'0'..=b'9' => { + let digit = (b - b'0') as u64; + if digit >= base { + return Err(Reject); + } + } + b'a'..=b'f' => { + let digit = 10 + (b - b'a') as u64; + if digit >= base { + break; + } + } + b'A'..=b'F' => { + let digit = 10 + (b - b'A') as u64; + if digit >= base { + break; + } + } + b'_' => { + if empty && base == 10 { + return Err(Reject); + } + len += 1; + continue; + } + _ => break, + } + len += 1; + empty = false; + } + if empty { + Err(Reject) + } else { + Ok(input.advance(len)) + } +} + +fn punct(input: Cursor) -> PResult { + let (rest, ch) = punct_char(input)?; + if ch == '\'' { + let (after_lifetime, _ident) = ident_any(rest)?; + if after_lifetime.starts_with_char('\'') + || (after_lifetime.starts_with_char('#') && !rest.starts_with("r#")) + { + Err(Reject) + } else { + Ok((rest, Punct::new('\'', Spacing::Joint))) + } + } else { + let kind = match punct_char(rest) { + Ok(_) => Spacing::Joint, + Err(Reject) => Spacing::Alone, + }; + Ok((rest, Punct::new(ch, kind))) + } +} + +fn punct_char(input: Cursor) -> PResult { + if input.starts_with("//") || input.starts_with("/*") { + // Do not accept `/` of a comment as a punct. + return Err(Reject); + } + + let mut chars = input.chars(); + let first = match chars.next() { + Some(ch) => ch, + None => { + return Err(Reject); + } + }; + let recognized = "~!@#$%^&*-=+|;:,<.>/?'"; + if recognized.contains(first) { + Ok((input.advance(first.len_utf8()), first)) + } else { + Err(Reject) + } +} + +fn doc_comment<'a>(input: Cursor<'a>, trees: &mut TokenStreamBuilder) -> PResult<'a, ()> { + #[cfg(span_locations)] + let lo = input.off; + let (rest, (comment, inner)) = doc_comment_contents(input)?; + let fallback_span = Span { + #[cfg(span_locations)] + lo, + #[cfg(span_locations)] + hi: rest.off, + }; + let span = crate::Span::_new_fallback(fallback_span); + + let mut scan_for_bare_cr = comment; + while let Some(cr) = scan_for_bare_cr.find('\r') { + let rest = &scan_for_bare_cr[cr + 1..]; + if !rest.starts_with('\n') { + return Err(Reject); + } + scan_for_bare_cr = rest; + } + + let mut pound = Punct::new('#', Spacing::Alone); + pound.set_span(span); + trees.push_token_from_parser(TokenTree::Punct(pound)); + + if inner { + let mut bang = Punct::new('!', Spacing::Alone); + bang.set_span(span); + trees.push_token_from_parser(TokenTree::Punct(bang)); + } + + let doc_ident = crate::Ident::_new_fallback(Ident::new_unchecked("doc", fallback_span)); + let mut equal = Punct::new('=', Spacing::Alone); + equal.set_span(span); + let mut literal = crate::Literal::_new_fallback(Literal::string(comment)); + literal.set_span(span); + let mut bracketed = TokenStreamBuilder::with_capacity(3); + bracketed.push_token_from_parser(TokenTree::Ident(doc_ident)); + bracketed.push_token_from_parser(TokenTree::Punct(equal)); + bracketed.push_token_from_parser(TokenTree::Literal(literal)); + let group = Group::new(Delimiter::Bracket, bracketed.build()); + let mut group = crate::Group::_new_fallback(group); + group.set_span(span); + trees.push_token_from_parser(TokenTree::Group(group)); + + Ok((rest, ())) +} + +fn doc_comment_contents(input: Cursor) -> PResult<(&str, bool)> { + if input.starts_with("//!") { + let input = input.advance(3); + let (input, s) = take_until_newline_or_eof(input); + Ok((input, (s, true))) + } else if input.starts_with("/*!") { + let (input, s) = block_comment(input)?; + Ok((input, (&s[3..s.len() - 2], true))) + } else if input.starts_with("///") { + let input = input.advance(3); + if input.starts_with_char('/') { + return Err(Reject); + } + let (input, s) = take_until_newline_or_eof(input); + Ok((input, (s, false))) + } else if input.starts_with("/**") && !input.rest[3..].starts_with('*') { + let (input, s) = block_comment(input)?; + Ok((input, (&s[3..s.len() - 2], false))) + } else { + Err(Reject) + } +} + +fn take_until_newline_or_eof(input: Cursor) -> (Cursor, &str) { + let chars = input.char_indices(); + + for (i, ch) in chars { + if ch == '\n' { + return (input.advance(i), &input.rest[..i]); + } else if ch == '\r' && input.rest[i + 1..].starts_with('\n') { + return (input.advance(i + 1), &input.rest[..i]); + } + } + + (input.advance(input.len()), input.rest) +} diff --git a/vendor/proc-macro2/src/probe.rs b/vendor/proc-macro2/src/probe.rs new file mode 100644 index 00000000000000..b67f52036218de --- /dev/null +++ b/vendor/proc-macro2/src/probe.rs @@ -0,0 +1,10 @@ +#![allow(dead_code)] + +#[cfg(proc_macro_span)] +pub(crate) mod proc_macro_span; + +#[cfg(proc_macro_span_file)] +pub(crate) mod proc_macro_span_file; + +#[cfg(proc_macro_span_location)] +pub(crate) mod proc_macro_span_location; diff --git a/vendor/proc-macro2/src/probe/proc_macro_span.rs b/vendor/proc-macro2/src/probe/proc_macro_span.rs new file mode 100644 index 00000000000000..2d7d44e07708b7 --- /dev/null +++ b/vendor/proc-macro2/src/probe/proc_macro_span.rs @@ -0,0 +1,51 @@ +// This code exercises the surface area that we expect of Span's unstable API. +// If the current toolchain is able to compile it, then proc-macro2 is able to +// offer these APIs too. + +#![cfg_attr(procmacro2_build_probe, feature(proc_macro_span))] + +extern crate proc_macro; + +use core::ops::{Range, RangeBounds}; +use proc_macro::{Literal, Span}; +use std::path::PathBuf; + +pub fn byte_range(this: &Span) -> Range { + this.byte_range() +} + +pub fn start(this: &Span) -> Span { + this.start() +} + +pub fn end(this: &Span) -> Span { + this.end() +} + +pub fn line(this: &Span) -> usize { + this.line() +} + +pub fn column(this: &Span) -> usize { + this.column() +} + +pub fn file(this: &Span) -> String { + this.file() +} + +pub fn local_file(this: &Span) -> Option { + this.local_file() +} + +pub fn join(this: &Span, other: Span) -> Option { + this.join(other) +} + +pub fn subspan>(this: &Literal, range: R) -> Option { + this.subspan(range) +} + +// Include in sccache cache key. +#[cfg(procmacro2_build_probe)] +const _: Option<&str> = option_env!("RUSTC_BOOTSTRAP"); diff --git a/vendor/proc-macro2/src/probe/proc_macro_span_file.rs b/vendor/proc-macro2/src/probe/proc_macro_span_file.rs new file mode 100644 index 00000000000000..8b76bdf5007b91 --- /dev/null +++ b/vendor/proc-macro2/src/probe/proc_macro_span_file.rs @@ -0,0 +1,14 @@ +// The subset of Span's API stabilized in Rust 1.88. + +extern crate proc_macro; + +use proc_macro::Span; +use std::path::PathBuf; + +pub fn file(this: &Span) -> String { + this.file() +} + +pub fn local_file(this: &Span) -> Option { + this.local_file() +} diff --git a/vendor/proc-macro2/src/probe/proc_macro_span_location.rs b/vendor/proc-macro2/src/probe/proc_macro_span_location.rs new file mode 100644 index 00000000000000..79da34af54afea --- /dev/null +++ b/vendor/proc-macro2/src/probe/proc_macro_span_location.rs @@ -0,0 +1,21 @@ +// The subset of Span's API stabilized in Rust 1.88. + +extern crate proc_macro; + +use proc_macro::Span; + +pub fn start(this: &Span) -> Span { + this.start() +} + +pub fn end(this: &Span) -> Span { + this.end() +} + +pub fn line(this: &Span) -> usize { + this.line() +} + +pub fn column(this: &Span) -> usize { + this.column() +} diff --git a/vendor/proc-macro2/src/rcvec.rs b/vendor/proc-macro2/src/rcvec.rs new file mode 100644 index 00000000000000..23edc77d597f91 --- /dev/null +++ b/vendor/proc-macro2/src/rcvec.rs @@ -0,0 +1,146 @@ +use alloc::rc::Rc; +use alloc::vec; +use core::mem; +use core::panic::RefUnwindSafe; +use core::slice; + +pub(crate) struct RcVec { + inner: Rc>, +} + +pub(crate) struct RcVecBuilder { + inner: Vec, +} + +pub(crate) struct RcVecMut<'a, T> { + inner: &'a mut Vec, +} + +#[derive(Clone)] +pub(crate) struct RcVecIntoIter { + inner: vec::IntoIter, +} + +impl RcVec { + pub(crate) fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + pub(crate) fn len(&self) -> usize { + self.inner.len() + } + + pub(crate) fn iter(&self) -> slice::Iter { + self.inner.iter() + } + + pub(crate) fn make_mut(&mut self) -> RcVecMut + where + T: Clone, + { + RcVecMut { + inner: Rc::make_mut(&mut self.inner), + } + } + + pub(crate) fn get_mut(&mut self) -> Option> { + let inner = Rc::get_mut(&mut self.inner)?; + Some(RcVecMut { inner }) + } + + pub(crate) fn make_owned(mut self) -> RcVecBuilder + where + T: Clone, + { + let vec = if let Some(owned) = Rc::get_mut(&mut self.inner) { + mem::take(owned) + } else { + Vec::clone(&self.inner) + }; + RcVecBuilder { inner: vec } + } +} + +impl RcVecBuilder { + pub(crate) fn new() -> Self { + RcVecBuilder { inner: Vec::new() } + } + + pub(crate) fn with_capacity(cap: usize) -> Self { + RcVecBuilder { + inner: Vec::with_capacity(cap), + } + } + + pub(crate) fn push(&mut self, element: T) { + self.inner.push(element); + } + + pub(crate) fn extend(&mut self, iter: impl IntoIterator) { + self.inner.extend(iter); + } + + pub(crate) fn as_mut(&mut self) -> RcVecMut { + RcVecMut { + inner: &mut self.inner, + } + } + + pub(crate) fn build(self) -> RcVec { + RcVec { + inner: Rc::new(self.inner), + } + } +} + +impl<'a, T> RcVecMut<'a, T> { + pub(crate) fn push(&mut self, element: T) { + self.inner.push(element); + } + + pub(crate) fn extend(&mut self, iter: impl IntoIterator) { + self.inner.extend(iter); + } + + pub(crate) fn as_mut(&mut self) -> RcVecMut { + RcVecMut { inner: self.inner } + } + + pub(crate) fn take(self) -> RcVecBuilder { + let vec = mem::take(self.inner); + RcVecBuilder { inner: vec } + } +} + +impl Clone for RcVec { + fn clone(&self) -> Self { + RcVec { + inner: Rc::clone(&self.inner), + } + } +} + +impl IntoIterator for RcVecBuilder { + type Item = T; + type IntoIter = RcVecIntoIter; + + fn into_iter(self) -> Self::IntoIter { + RcVecIntoIter { + inner: self.inner.into_iter(), + } + } +} + +impl Iterator for RcVecIntoIter { + type Item = T; + + fn next(&mut self) -> Option { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl RefUnwindSafe for RcVec where T: RefUnwindSafe {} diff --git a/vendor/proc-macro2/src/rustc_literal_escaper.rs b/vendor/proc-macro2/src/rustc_literal_escaper.rs new file mode 100644 index 00000000000000..8233e5d6565db8 --- /dev/null +++ b/vendor/proc-macro2/src/rustc_literal_escaper.rs @@ -0,0 +1,701 @@ +// Vendored from rustc-literal-escaper v0.0.5 +// https://github.com/rust-lang/literal-escaper/tree/v0.0.5 + +//! Utilities for validating (raw) string, char, and byte literals and +//! turning escape sequences into the values they represent. + +use crate::num::NonZeroChar; +use std::ffi::CStr; +use std::num::NonZeroU8; +use std::ops::Range; +use std::str::Chars; + +/// Errors and warnings that can occur during string, char, and byte unescaping. +/// +/// Mostly relating to malformed escape sequences, but also a few other problems. +#[derive(Debug, PartialEq, Eq)] +pub enum EscapeError { + /// Expected 1 char, but 0 were found. + ZeroChars, + /// Expected 1 char, but more than 1 were found. + MoreThanOneChar, + + /// Escaped '\' character without continuation. + LoneSlash, + /// Invalid escape character (e.g. '\z'). + InvalidEscape, + /// Raw '\r' encountered. + BareCarriageReturn, + /// Raw '\r' encountered in raw string. + BareCarriageReturnInRawString, + /// Unescaped character that was expected to be escaped (e.g. raw '\t'). + EscapeOnlyChar, + + /// Numeric character escape is too short (e.g. '\x1'). + TooShortHexEscape, + /// Invalid character in numeric escape (e.g. '\xz') + InvalidCharInHexEscape, + /// Character code in numeric escape is non-ascii (e.g. '\xFF'). + OutOfRangeHexEscape, + + /// '\u' not followed by '{'. + NoBraceInUnicodeEscape, + /// Non-hexadecimal value in '\u{..}'. + InvalidCharInUnicodeEscape, + /// '\u{}' + EmptyUnicodeEscape, + /// No closing brace in '\u{..}', e.g. '\u{12'. + UnclosedUnicodeEscape, + /// '\u{_12}' + LeadingUnderscoreUnicodeEscape, + /// More than 6 characters in '\u{..}', e.g. '\u{10FFFF_FF}' + OverlongUnicodeEscape, + /// Invalid in-bound unicode character code, e.g. '\u{DFFF}'. + LoneSurrogateUnicodeEscape, + /// Out of bounds unicode character code, e.g. '\u{FFFFFF}'. + OutOfRangeUnicodeEscape, + + /// Unicode escape code in byte literal. + UnicodeEscapeInByte, + /// Non-ascii character in byte literal, byte string literal, or raw byte string literal. + NonAsciiCharInByte, + + /// `\0` in a C string literal. + NulInCStr, + + /// After a line ending with '\', the next line contains whitespace + /// characters that are not skipped. + UnskippedWhitespaceWarning, + + /// After a line ending with '\', multiple lines are skipped. + MultipleSkippedLinesWarning, +} + +impl EscapeError { + /// Returns true for actual errors, as opposed to warnings. + pub fn is_fatal(&self) -> bool { + !matches!( + self, + EscapeError::UnskippedWhitespaceWarning | EscapeError::MultipleSkippedLinesWarning + ) + } +} + +/// Check a raw string literal for validity +/// +/// Takes the contents of a raw string literal (without quotes) +/// and produces a sequence of characters or errors, +/// which are returned by invoking `callback`. +/// NOTE: Does no escaping, but produces errors for bare carriage return ('\r'). +pub fn check_raw_str(src: &str, callback: impl FnMut(Range, Result)) { + str::check_raw(src, callback); +} + +/// Check a raw byte string literal for validity +/// +/// Takes the contents of a raw byte string literal (without quotes) +/// and produces a sequence of bytes or errors, +/// which are returned by invoking `callback`. +/// NOTE: Does no escaping, but produces errors for bare carriage return ('\r'). +pub fn check_raw_byte_str(src: &str, callback: impl FnMut(Range, Result)) { + <[u8]>::check_raw(src, callback); +} + +/// Check a raw C string literal for validity +/// +/// Takes the contents of a raw C string literal (without quotes) +/// and produces a sequence of characters or errors, +/// which are returned by invoking `callback`. +/// NOTE: Does no escaping, but produces errors for bare carriage return ('\r'). +pub fn check_raw_c_str( + src: &str, + callback: impl FnMut(Range, Result), +) { + CStr::check_raw(src, callback); +} + +/// Trait for checking raw string literals for validity +trait CheckRaw { + /// Unit type of the implementing string type (`char` for string, `u8` for byte string) + type RawUnit; + + /// Converts chars to the unit type of the literal type + fn char2raw_unit(c: char) -> Result; + + /// Takes the contents of a raw literal (without quotes) + /// and produces a sequence of `Result` + /// which are returned via `callback`. + /// + /// NOTE: Does no escaping, but produces errors for bare carriage return ('\r'). + fn check_raw( + src: &str, + mut callback: impl FnMut(Range, Result), + ) { + let mut chars = src.chars(); + while let Some(c) = chars.next() { + let start = src.len() - chars.as_str().len() - c.len_utf8(); + let res = match c { + '\r' => Err(EscapeError::BareCarriageReturnInRawString), + _ => Self::char2raw_unit(c), + }; + let end = src.len() - chars.as_str().len(); + callback(start..end, res); + } + + // Unfortunately, it is a bit unclear whether the following equivalent code is slower or faster: bug 141855 + // src.char_indices().for_each(|(pos, c)| { + // callback( + // pos..pos + c.len_utf8(), + // if c == '\r' { + // Err(EscapeError::BareCarriageReturnInRawString) + // } else { + // Self::char2raw_unit(c) + // }, + // ); + // }); + } +} + +impl CheckRaw for str { + type RawUnit = char; + + #[inline] + fn char2raw_unit(c: char) -> Result { + Ok(c) + } +} + +impl CheckRaw for [u8] { + type RawUnit = u8; + + #[inline] + fn char2raw_unit(c: char) -> Result { + char2byte(c) + } +} + +/// Turn an ascii char into a byte +#[inline] +fn char2byte(c: char) -> Result { + // do NOT do: c.try_into().ok_or(EscapeError::NonAsciiCharInByte) + if c.is_ascii() { + Ok(c as u8) + } else { + Err(EscapeError::NonAsciiCharInByte) + } +} + +impl CheckRaw for CStr { + type RawUnit = NonZeroChar; + + #[inline] + fn char2raw_unit(c: char) -> Result { + NonZeroChar::new(c).ok_or(EscapeError::NulInCStr) + } +} + +/// Unescape a char literal +/// +/// Takes the contents of a char literal (without quotes), +/// and returns an unescaped char or an error. +#[inline] +pub fn unescape_char(src: &str) -> Result { + str::unescape_single(&mut src.chars()) +} + +/// Unescape a byte literal +/// +/// Takes the contents of a byte literal (without quotes), +/// and returns an unescaped byte or an error. +#[inline] +pub fn unescape_byte(src: &str) -> Result { + <[u8]>::unescape_single(&mut src.chars()) +} + +/// Unescape a string literal +/// +/// Takes the contents of a string literal (without quotes) +/// and produces a sequence of escaped characters or errors, +/// which are returned by invoking `callback`. +pub fn unescape_str(src: &str, callback: impl FnMut(Range, Result)) { + str::unescape(src, callback) +} + +/// Unescape a byte string literal +/// +/// Takes the contents of a byte string literal (without quotes) +/// and produces a sequence of escaped bytes or errors, +/// which are returned by invoking `callback`. +pub fn unescape_byte_str(src: &str, callback: impl FnMut(Range, Result)) { + <[u8]>::unescape(src, callback) +} + +/// Unescape a C string literal +/// +/// Takes the contents of a C string literal (without quotes) +/// and produces a sequence of escaped MixedUnits or errors, +/// which are returned by invoking `callback`. +pub fn unescape_c_str( + src: &str, + callback: impl FnMut(Range, Result), +) { + CStr::unescape(src, callback) +} + +/// Enum representing either a char or a byte +/// +/// Used for mixed utf8 string literals, i.e. those that allow both unicode +/// chars and high bytes. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum MixedUnit { + /// Used for ASCII chars (written directly or via `\x00`..`\x7f` escapes) + /// and Unicode chars (written directly or via `\u` escapes). + /// + /// For example, if '¥' appears in a string it is represented here as + /// `MixedUnit::Char('¥')`, and it will be appended to the relevant byte + /// string as the two-byte UTF-8 sequence `[0xc2, 0xa5]` + Char(NonZeroChar), + + /// Used for high bytes (`\x80`..`\xff`). + /// + /// For example, if `\xa5` appears in a string it is represented here as + /// `MixedUnit::HighByte(0xa5)`, and it will be appended to the relevant + /// byte string as the single byte `0xa5`. + HighByte(NonZeroU8), +} + +impl From for MixedUnit { + #[inline] + fn from(c: NonZeroChar) -> Self { + MixedUnit::Char(c) + } +} + +impl From for MixedUnit { + #[inline] + fn from(byte: NonZeroU8) -> Self { + if byte.get().is_ascii() { + MixedUnit::Char(NonZeroChar::new(byte.get() as char).unwrap()) + } else { + MixedUnit::HighByte(byte) + } + } +} + +impl TryFrom for MixedUnit { + type Error = EscapeError; + + #[inline] + fn try_from(c: char) -> Result { + NonZeroChar::new(c) + .map(MixedUnit::Char) + .ok_or(EscapeError::NulInCStr) + } +} + +impl TryFrom for MixedUnit { + type Error = EscapeError; + + #[inline] + fn try_from(byte: u8) -> Result { + NonZeroU8::new(byte) + .map(From::from) + .ok_or(EscapeError::NulInCStr) + } +} + +/// Trait for unescaping escape sequences in strings +trait Unescape { + /// Unit type of the implementing string type (`char` for string, `u8` for byte string) + type Unit; + + /// Result of unescaping the zero char ('\0') + const ZERO_RESULT: Result; + + /// Converts non-zero bytes to the unit type + fn nonzero_byte2unit(b: NonZeroU8) -> Self::Unit; + + /// Converts chars to the unit type + fn char2unit(c: char) -> Result; + + /// Converts the byte of a hex escape to the unit type + fn hex2unit(b: u8) -> Result; + + /// Converts the result of a unicode escape to the unit type + fn unicode2unit(r: Result) -> Result; + + /// Unescape a single unit (single quote syntax) + fn unescape_single(chars: &mut Chars<'_>) -> Result { + let res = match chars.next().ok_or(EscapeError::ZeroChars)? { + '\\' => Self::unescape_1(chars), + '\n' | '\t' | '\'' => Err(EscapeError::EscapeOnlyChar), + '\r' => Err(EscapeError::BareCarriageReturn), + c => Self::char2unit(c), + }?; + if chars.next().is_some() { + return Err(EscapeError::MoreThanOneChar); + } + Ok(res) + } + + /// Unescape the first unit of a string (double quoted syntax) + fn unescape_1(chars: &mut Chars<'_>) -> Result { + // Previous character was '\\', unescape what follows. + let c = chars.next().ok_or(EscapeError::LoneSlash)?; + if c == '0' { + Self::ZERO_RESULT + } else { + simple_escape(c) + .map(|b| Self::nonzero_byte2unit(b)) + .or_else(|c| match c { + 'x' => Self::hex2unit(hex_escape(chars)?), + 'u' => Self::unicode2unit({ + let value = unicode_escape(chars)?; + if value > char::MAX as u32 { + Err(EscapeError::OutOfRangeUnicodeEscape) + } else { + char::from_u32(value).ok_or(EscapeError::LoneSurrogateUnicodeEscape) + } + }), + _ => Err(EscapeError::InvalidEscape), + }) + } + } + + /// Unescape a string literal + /// + /// Takes the contents of a raw string literal (without quotes) + /// and produces a sequence of `Result` + /// which are returned via `callback`. + fn unescape( + src: &str, + mut callback: impl FnMut(Range, Result), + ) { + let mut chars = src.chars(); + while let Some(c) = chars.next() { + let start = src.len() - chars.as_str().len() - c.len_utf8(); + let res = match c { + '\\' => { + if let Some(b'\n') = chars.as_str().as_bytes().first() { + let _ = chars.next(); + // skip whitespace for backslash newline, see [Rust language reference] + // (https://doc.rust-lang.org/reference/tokens.html#string-literals). + let callback_err = |range, err| callback(range, Err(err)); + skip_ascii_whitespace(&mut chars, start, callback_err); + continue; + } else { + Self::unescape_1(&mut chars) + } + } + '"' => Err(EscapeError::EscapeOnlyChar), + '\r' => Err(EscapeError::BareCarriageReturn), + c => Self::char2unit(c), + }; + let end = src.len() - chars.as_str().len(); + callback(start..end, res); + } + } +} + +/// Interpret a non-nul ASCII escape +/// +/// Parses the character of an ASCII escape (except nul) without the leading backslash. +#[inline] // single use in Unescape::unescape_1 +fn simple_escape(c: char) -> Result { + // Previous character was '\\', unescape what follows. + Ok(NonZeroU8::new(match c { + '"' => b'"', + 'n' => b'\n', + 'r' => b'\r', + 't' => b'\t', + '\\' => b'\\', + '\'' => b'\'', + _ => Err(c)?, + }) + .unwrap()) +} + +/// Interpret a hexadecimal escape +/// +/// Parses the two hexadecimal characters of a hexadecimal escape without the leading r"\x". +#[inline] // single use in Unescape::unescape_1 +fn hex_escape(chars: &mut impl Iterator) -> Result { + let hi = chars.next().ok_or(EscapeError::TooShortHexEscape)?; + let hi = hi.to_digit(16).ok_or(EscapeError::InvalidCharInHexEscape)?; + + let lo = chars.next().ok_or(EscapeError::TooShortHexEscape)?; + let lo = lo.to_digit(16).ok_or(EscapeError::InvalidCharInHexEscape)?; + + Ok((hi * 16 + lo) as u8) +} + +/// Interpret a unicode escape +/// +/// Parse the braces with hexadecimal characters (and underscores) part of a unicode escape. +/// This r"{...}" normally comes after r"\u" and cannot start with an underscore. +#[inline] // single use in Unescape::unescape_1 +fn unicode_escape(chars: &mut impl Iterator) -> Result { + if chars.next() != Some('{') { + return Err(EscapeError::NoBraceInUnicodeEscape); + } + + // First character must be a hexadecimal digit. + let mut value: u32 = match chars.next().ok_or(EscapeError::UnclosedUnicodeEscape)? { + '_' => return Err(EscapeError::LeadingUnderscoreUnicodeEscape), + '}' => return Err(EscapeError::EmptyUnicodeEscape), + c => c + .to_digit(16) + .ok_or(EscapeError::InvalidCharInUnicodeEscape)?, + }; + + // First character is valid, now parse the rest of the number + // and closing brace. + let mut n_digits = 1; + loop { + match chars.next() { + None => return Err(EscapeError::UnclosedUnicodeEscape), + Some('_') => continue, + Some('}') => { + // Incorrect syntax has higher priority for error reporting + // than unallowed value for a literal. + return if n_digits > 6 { + Err(EscapeError::OverlongUnicodeEscape) + } else { + Ok(value) + }; + } + Some(c) => { + let digit: u32 = c + .to_digit(16) + .ok_or(EscapeError::InvalidCharInUnicodeEscape)?; + n_digits += 1; + if n_digits > 6 { + // Stop updating value since we're sure that it's incorrect already. + continue; + } + value = value * 16 + digit; + } + }; + } +} + +/// Interpret a string continuation escape (https://doc.rust-lang.org/reference/expressions/literal-expr.html#string-continuation-escapes) +/// +/// Skip ASCII whitespace, except for the formfeed character +/// (see [this issue](https://github.com/rust-lang/rust/issues/136600)). +/// Warns on unescaped newline and following non-ASCII whitespace. +#[inline] // single use in Unescape::unescape +fn skip_ascii_whitespace( + chars: &mut Chars<'_>, + start: usize, + mut callback: impl FnMut(Range, EscapeError), +) { + let rest = chars.as_str(); + let first_non_space = rest + .bytes() + .position(|b| b != b' ' && b != b'\t' && b != b'\n' && b != b'\r') + .unwrap_or(rest.len()); + let (space, rest) = rest.split_at(first_non_space); + // backslash newline adds 2 bytes + let end = start + 2 + first_non_space; + if space.contains('\n') { + callback(start..end, EscapeError::MultipleSkippedLinesWarning); + } + *chars = rest.chars(); + if let Some(c) = chars.clone().next() { + if c.is_whitespace() { + // for error reporting, include the character that was not skipped in the span + callback( + start..end + c.len_utf8(), + EscapeError::UnskippedWhitespaceWarning, + ); + } + } +} + +impl Unescape for str { + type Unit = char; + + const ZERO_RESULT: Result = Ok('\0'); + + #[inline] + fn nonzero_byte2unit(b: NonZeroU8) -> Self::Unit { + b.get().into() + } + + #[inline] + fn char2unit(c: char) -> Result { + Ok(c) + } + + #[inline] + fn hex2unit(b: u8) -> Result { + if b.is_ascii() { + Ok(b as char) + } else { + Err(EscapeError::OutOfRangeHexEscape) + } + } + + #[inline] + fn unicode2unit(r: Result) -> Result { + r + } +} + +impl Unescape for [u8] { + type Unit = u8; + + const ZERO_RESULT: Result = Ok(b'\0'); + + #[inline] + fn nonzero_byte2unit(b: NonZeroU8) -> Self::Unit { + b.get() + } + + #[inline] + fn char2unit(c: char) -> Result { + char2byte(c) + } + + #[inline] + fn hex2unit(b: u8) -> Result { + Ok(b) + } + + #[inline] + fn unicode2unit(_r: Result) -> Result { + Err(EscapeError::UnicodeEscapeInByte) + } +} + +impl Unescape for CStr { + type Unit = MixedUnit; + + const ZERO_RESULT: Result = Err(EscapeError::NulInCStr); + + #[inline] + fn nonzero_byte2unit(b: NonZeroU8) -> Self::Unit { + b.into() + } + + #[inline] + fn char2unit(c: char) -> Result { + c.try_into() + } + + #[inline] + fn hex2unit(byte: u8) -> Result { + byte.try_into() + } + + #[inline] + fn unicode2unit(r: Result) -> Result { + Self::char2unit(r?) + } +} + +/// Enum of the different kinds of literal +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum Mode { + /// `'a'` + Char, + + /// `b'a'` + Byte, + + /// `"hello"` + Str, + /// `r"hello"` + RawStr, + + /// `b"hello"` + ByteStr, + /// `br"hello"` + RawByteStr, + + /// `c"hello"` + CStr, + /// `cr"hello"` + RawCStr, +} + +impl Mode { + pub fn in_double_quotes(self) -> bool { + match self { + Mode::Str + | Mode::RawStr + | Mode::ByteStr + | Mode::RawByteStr + | Mode::CStr + | Mode::RawCStr => true, + Mode::Char | Mode::Byte => false, + } + } + + pub fn prefix_noraw(self) -> &'static str { + match self { + Mode::Char | Mode::Str | Mode::RawStr => "", + Mode::Byte | Mode::ByteStr | Mode::RawByteStr => "b", + Mode::CStr | Mode::RawCStr => "c", + } + } +} + +/// Check a literal only for errors +/// +/// Takes the contents of a literal (without quotes) +/// and produces a sequence of only errors, +/// which are returned by invoking `error_callback`. +/// +/// NB Does not produce any output other than errors +pub fn check_for_errors( + src: &str, + mode: Mode, + mut error_callback: impl FnMut(Range, EscapeError), +) { + match mode { + Mode::Char => { + let mut chars = src.chars(); + if let Err(e) = str::unescape_single(&mut chars) { + error_callback(0..(src.len() - chars.as_str().len()), e); + } + } + Mode::Byte => { + let mut chars = src.chars(); + if let Err(e) = <[u8]>::unescape_single(&mut chars) { + error_callback(0..(src.len() - chars.as_str().len()), e); + } + } + Mode::Str => unescape_str(src, |range, res| { + if let Err(e) = res { + error_callback(range, e); + } + }), + Mode::ByteStr => unescape_byte_str(src, |range, res| { + if let Err(e) = res { + error_callback(range, e); + } + }), + Mode::CStr => unescape_c_str(src, |range, res| { + if let Err(e) = res { + error_callback(range, e); + } + }), + Mode::RawStr => check_raw_str(src, |range, res| { + if let Err(e) = res { + error_callback(range, e); + } + }), + Mode::RawByteStr => check_raw_byte_str(src, |range, res| { + if let Err(e) = res { + error_callback(range, e); + } + }), + Mode::RawCStr => check_raw_c_str(src, |range, res| { + if let Err(e) = res { + error_callback(range, e); + } + }), + } +} diff --git a/vendor/proc-macro2/src/wrapper.rs b/vendor/proc-macro2/src/wrapper.rs new file mode 100644 index 00000000000000..2e3eb5b4d04e28 --- /dev/null +++ b/vendor/proc-macro2/src/wrapper.rs @@ -0,0 +1,984 @@ +use crate::detection::inside_proc_macro; +use crate::fallback::{self, FromStr2 as _}; +#[cfg(span_locations)] +use crate::location::LineColumn; +#[cfg(proc_macro_span)] +use crate::probe::proc_macro_span; +#[cfg(all(span_locations, proc_macro_span_file))] +use crate::probe::proc_macro_span_file; +#[cfg(all(span_locations, proc_macro_span_location))] +use crate::probe::proc_macro_span_location; +use crate::{Delimiter, Punct, Spacing, TokenTree}; +use core::fmt::{self, Debug, Display}; +#[cfg(span_locations)] +use core::ops::Range; +use core::ops::RangeBounds; +use std::ffi::CStr; +#[cfg(span_locations)] +use std::path::PathBuf; + +#[derive(Clone)] +pub(crate) enum TokenStream { + Compiler(DeferredTokenStream), + Fallback(fallback::TokenStream), +} + +// Work around https://github.com/rust-lang/rust/issues/65080. +// In `impl Extend for TokenStream` which is used heavily by quote, +// we hold on to the appended tokens and do proc_macro::TokenStream::extend as +// late as possible to batch together consecutive uses of the Extend impl. +#[derive(Clone)] +pub(crate) struct DeferredTokenStream { + stream: proc_macro::TokenStream, + extra: Vec, +} + +pub(crate) enum LexError { + Compiler(proc_macro::LexError), + Fallback(fallback::LexError), + + // Rustc was supposed to return a LexError, but it panicked instead. + // https://github.com/rust-lang/rust/issues/58736 + CompilerPanic, +} + +#[cold] +fn mismatch(line: u32) -> ! { + #[cfg(procmacro2_backtrace)] + { + let backtrace = std::backtrace::Backtrace::force_capture(); + panic!("compiler/fallback mismatch L{}\n\n{}", line, backtrace) + } + #[cfg(not(procmacro2_backtrace))] + { + panic!("compiler/fallback mismatch L{}", line) + } +} + +impl DeferredTokenStream { + fn new(stream: proc_macro::TokenStream) -> Self { + DeferredTokenStream { + stream, + extra: Vec::new(), + } + } + + fn is_empty(&self) -> bool { + self.stream.is_empty() && self.extra.is_empty() + } + + fn evaluate_now(&mut self) { + // If-check provides a fast short circuit for the common case of `extra` + // being empty, which saves a round trip over the proc macro bridge. + // Improves macro expansion time in winrt by 6% in debug mode. + if !self.extra.is_empty() { + self.stream.extend(self.extra.drain(..)); + } + } + + fn into_token_stream(mut self) -> proc_macro::TokenStream { + self.evaluate_now(); + self.stream + } +} + +impl TokenStream { + pub(crate) fn new() -> Self { + if inside_proc_macro() { + TokenStream::Compiler(DeferredTokenStream::new(proc_macro::TokenStream::new())) + } else { + TokenStream::Fallback(fallback::TokenStream::new()) + } + } + + pub(crate) fn from_str_checked(src: &str) -> Result { + if inside_proc_macro() { + Ok(TokenStream::Compiler(DeferredTokenStream::new( + proc_macro::TokenStream::from_str_checked(src)?, + ))) + } else { + Ok(TokenStream::Fallback( + fallback::TokenStream::from_str_checked(src)?, + )) + } + } + + pub(crate) fn is_empty(&self) -> bool { + match self { + TokenStream::Compiler(tts) => tts.is_empty(), + TokenStream::Fallback(tts) => tts.is_empty(), + } + } + + fn unwrap_nightly(self) -> proc_macro::TokenStream { + match self { + TokenStream::Compiler(s) => s.into_token_stream(), + TokenStream::Fallback(_) => mismatch(line!()), + } + } + + fn unwrap_stable(self) -> fallback::TokenStream { + match self { + TokenStream::Compiler(_) => mismatch(line!()), + TokenStream::Fallback(s) => s, + } + } +} + +impl Display for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + TokenStream::Compiler(tts) => Display::fmt(&tts.clone().into_token_stream(), f), + TokenStream::Fallback(tts) => Display::fmt(tts, f), + } + } +} + +impl From for TokenStream { + fn from(inner: proc_macro::TokenStream) -> Self { + TokenStream::Compiler(DeferredTokenStream::new(inner)) + } +} + +impl From for proc_macro::TokenStream { + fn from(inner: TokenStream) -> Self { + match inner { + TokenStream::Compiler(inner) => inner.into_token_stream(), + TokenStream::Fallback(inner) => { + proc_macro::TokenStream::from_str_unchecked(&inner.to_string()) + } + } + } +} + +impl From for TokenStream { + fn from(inner: fallback::TokenStream) -> Self { + TokenStream::Fallback(inner) + } +} + +// Assumes inside_proc_macro(). +fn into_compiler_token(token: TokenTree) -> proc_macro::TokenTree { + match token { + TokenTree::Group(tt) => proc_macro::TokenTree::Group(tt.inner.unwrap_nightly()), + TokenTree::Punct(tt) => { + let spacing = match tt.spacing() { + Spacing::Joint => proc_macro::Spacing::Joint, + Spacing::Alone => proc_macro::Spacing::Alone, + }; + let mut punct = proc_macro::Punct::new(tt.as_char(), spacing); + punct.set_span(tt.span().inner.unwrap_nightly()); + proc_macro::TokenTree::Punct(punct) + } + TokenTree::Ident(tt) => proc_macro::TokenTree::Ident(tt.inner.unwrap_nightly()), + TokenTree::Literal(tt) => proc_macro::TokenTree::Literal(tt.inner.unwrap_nightly()), + } +} + +impl From for TokenStream { + fn from(token: TokenTree) -> Self { + if inside_proc_macro() { + TokenStream::Compiler(DeferredTokenStream::new(proc_macro::TokenStream::from( + into_compiler_token(token), + ))) + } else { + TokenStream::Fallback(fallback::TokenStream::from(token)) + } + } +} + +impl FromIterator for TokenStream { + fn from_iter>(trees: I) -> Self { + if inside_proc_macro() { + TokenStream::Compiler(DeferredTokenStream::new( + trees.into_iter().map(into_compiler_token).collect(), + )) + } else { + TokenStream::Fallback(trees.into_iter().collect()) + } + } +} + +impl FromIterator for TokenStream { + fn from_iter>(streams: I) -> Self { + let mut streams = streams.into_iter(); + match streams.next() { + Some(TokenStream::Compiler(mut first)) => { + first.evaluate_now(); + first.stream.extend(streams.map(|s| match s { + TokenStream::Compiler(s) => s.into_token_stream(), + TokenStream::Fallback(_) => mismatch(line!()), + })); + TokenStream::Compiler(first) + } + Some(TokenStream::Fallback(mut first)) => { + first.extend(streams.map(|s| match s { + TokenStream::Fallback(s) => s, + TokenStream::Compiler(_) => mismatch(line!()), + })); + TokenStream::Fallback(first) + } + None => TokenStream::new(), + } + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, stream: I) { + match self { + TokenStream::Compiler(tts) => { + // Here is the reason for DeferredTokenStream. + for token in stream { + tts.extra.push(into_compiler_token(token)); + } + } + TokenStream::Fallback(tts) => tts.extend(stream), + } + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, streams: I) { + match self { + TokenStream::Compiler(tts) => { + tts.evaluate_now(); + tts.stream + .extend(streams.into_iter().map(TokenStream::unwrap_nightly)); + } + TokenStream::Fallback(tts) => { + tts.extend(streams.into_iter().map(TokenStream::unwrap_stable)); + } + } + } +} + +impl Debug for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + TokenStream::Compiler(tts) => Debug::fmt(&tts.clone().into_token_stream(), f), + TokenStream::Fallback(tts) => Debug::fmt(tts, f), + } + } +} + +impl LexError { + pub(crate) fn span(&self) -> Span { + match self { + LexError::Compiler(_) | LexError::CompilerPanic => Span::call_site(), + LexError::Fallback(e) => Span::Fallback(e.span()), + } + } +} + +impl From for LexError { + fn from(e: proc_macro::LexError) -> Self { + LexError::Compiler(e) + } +} + +impl From for LexError { + fn from(e: fallback::LexError) -> Self { + LexError::Fallback(e) + } +} + +impl Debug for LexError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + LexError::Compiler(e) => Debug::fmt(e, f), + LexError::Fallback(e) => Debug::fmt(e, f), + LexError::CompilerPanic => { + let fallback = fallback::LexError::call_site(); + Debug::fmt(&fallback, f) + } + } + } +} + +impl Display for LexError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + LexError::Compiler(e) => Display::fmt(e, f), + LexError::Fallback(e) => Display::fmt(e, f), + LexError::CompilerPanic => { + let fallback = fallback::LexError::call_site(); + Display::fmt(&fallback, f) + } + } + } +} + +#[derive(Clone)] +pub(crate) enum TokenTreeIter { + Compiler(proc_macro::token_stream::IntoIter), + Fallback(fallback::TokenTreeIter), +} + +impl IntoIterator for TokenStream { + type Item = TokenTree; + type IntoIter = TokenTreeIter; + + fn into_iter(self) -> TokenTreeIter { + match self { + TokenStream::Compiler(tts) => { + TokenTreeIter::Compiler(tts.into_token_stream().into_iter()) + } + TokenStream::Fallback(tts) => TokenTreeIter::Fallback(tts.into_iter()), + } + } +} + +impl Iterator for TokenTreeIter { + type Item = TokenTree; + + fn next(&mut self) -> Option { + let token = match self { + TokenTreeIter::Compiler(iter) => iter.next()?, + TokenTreeIter::Fallback(iter) => return iter.next(), + }; + Some(match token { + proc_macro::TokenTree::Group(tt) => { + TokenTree::Group(crate::Group::_new(Group::Compiler(tt))) + } + proc_macro::TokenTree::Punct(tt) => { + let spacing = match tt.spacing() { + proc_macro::Spacing::Joint => Spacing::Joint, + proc_macro::Spacing::Alone => Spacing::Alone, + }; + let mut o = Punct::new(tt.as_char(), spacing); + o.set_span(crate::Span::_new(Span::Compiler(tt.span()))); + TokenTree::Punct(o) + } + proc_macro::TokenTree::Ident(s) => { + TokenTree::Ident(crate::Ident::_new(Ident::Compiler(s))) + } + proc_macro::TokenTree::Literal(l) => { + TokenTree::Literal(crate::Literal::_new(Literal::Compiler(l))) + } + }) + } + + fn size_hint(&self) -> (usize, Option) { + match self { + TokenTreeIter::Compiler(tts) => tts.size_hint(), + TokenTreeIter::Fallback(tts) => tts.size_hint(), + } + } +} + +#[derive(Copy, Clone)] +pub(crate) enum Span { + Compiler(proc_macro::Span), + Fallback(fallback::Span), +} + +impl Span { + pub(crate) fn call_site() -> Self { + if inside_proc_macro() { + Span::Compiler(proc_macro::Span::call_site()) + } else { + Span::Fallback(fallback::Span::call_site()) + } + } + + pub(crate) fn mixed_site() -> Self { + if inside_proc_macro() { + Span::Compiler(proc_macro::Span::mixed_site()) + } else { + Span::Fallback(fallback::Span::mixed_site()) + } + } + + #[cfg(super_unstable)] + pub(crate) fn def_site() -> Self { + if inside_proc_macro() { + Span::Compiler(proc_macro::Span::def_site()) + } else { + Span::Fallback(fallback::Span::def_site()) + } + } + + pub(crate) fn resolved_at(&self, other: Span) -> Span { + match (self, other) { + (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.resolved_at(b)), + (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.resolved_at(b)), + (Span::Compiler(_), Span::Fallback(_)) => mismatch(line!()), + (Span::Fallback(_), Span::Compiler(_)) => mismatch(line!()), + } + } + + pub(crate) fn located_at(&self, other: Span) -> Span { + match (self, other) { + (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.located_at(b)), + (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.located_at(b)), + (Span::Compiler(_), Span::Fallback(_)) => mismatch(line!()), + (Span::Fallback(_), Span::Compiler(_)) => mismatch(line!()), + } + } + + pub(crate) fn unwrap(self) -> proc_macro::Span { + match self { + Span::Compiler(s) => s, + Span::Fallback(_) => panic!("proc_macro::Span is only available in procedural macros"), + } + } + + #[cfg(span_locations)] + pub(crate) fn byte_range(&self) -> Range { + match self { + #[cfg(proc_macro_span)] + Span::Compiler(s) => proc_macro_span::byte_range(s), + #[cfg(not(proc_macro_span))] + Span::Compiler(_) => 0..0, + Span::Fallback(s) => s.byte_range(), + } + } + + #[cfg(span_locations)] + pub(crate) fn start(&self) -> LineColumn { + match self { + #[cfg(proc_macro_span_location)] + Span::Compiler(s) => LineColumn { + line: proc_macro_span_location::line(s), + column: proc_macro_span_location::column(s).saturating_sub(1), + }, + #[cfg(not(proc_macro_span_location))] + Span::Compiler(_) => LineColumn { line: 0, column: 0 }, + Span::Fallback(s) => s.start(), + } + } + + #[cfg(span_locations)] + pub(crate) fn end(&self) -> LineColumn { + match self { + #[cfg(proc_macro_span_location)] + Span::Compiler(s) => { + let end = proc_macro_span_location::end(s); + LineColumn { + line: proc_macro_span_location::line(&end), + column: proc_macro_span_location::column(&end).saturating_sub(1), + } + } + #[cfg(not(proc_macro_span_location))] + Span::Compiler(_) => LineColumn { line: 0, column: 0 }, + Span::Fallback(s) => s.end(), + } + } + + #[cfg(span_locations)] + pub(crate) fn file(&self) -> String { + match self { + #[cfg(proc_macro_span_file)] + Span::Compiler(s) => proc_macro_span_file::file(s), + #[cfg(not(proc_macro_span_file))] + Span::Compiler(_) => "".to_owned(), + Span::Fallback(s) => s.file(), + } + } + + #[cfg(span_locations)] + pub(crate) fn local_file(&self) -> Option { + match self { + #[cfg(proc_macro_span_file)] + Span::Compiler(s) => proc_macro_span_file::local_file(s), + #[cfg(not(proc_macro_span_file))] + Span::Compiler(_) => None, + Span::Fallback(s) => s.local_file(), + } + } + + pub(crate) fn join(&self, other: Span) -> Option { + let ret = match (self, other) { + #[cfg(proc_macro_span)] + (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(proc_macro_span::join(a, b)?), + (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.join(b)?), + _ => return None, + }; + Some(ret) + } + + #[cfg(super_unstable)] + pub(crate) fn eq(&self, other: &Span) -> bool { + match (self, other) { + (Span::Compiler(a), Span::Compiler(b)) => a.eq(b), + (Span::Fallback(a), Span::Fallback(b)) => a.eq(b), + _ => false, + } + } + + pub(crate) fn source_text(&self) -> Option { + match self { + #[cfg(not(no_source_text))] + Span::Compiler(s) => s.source_text(), + #[cfg(no_source_text)] + Span::Compiler(_) => None, + Span::Fallback(s) => s.source_text(), + } + } + + fn unwrap_nightly(self) -> proc_macro::Span { + match self { + Span::Compiler(s) => s, + Span::Fallback(_) => mismatch(line!()), + } + } +} + +impl From for crate::Span { + fn from(proc_span: proc_macro::Span) -> Self { + crate::Span::_new(Span::Compiler(proc_span)) + } +} + +impl From for Span { + fn from(inner: fallback::Span) -> Self { + Span::Fallback(inner) + } +} + +impl Debug for Span { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Span::Compiler(s) => Debug::fmt(s, f), + Span::Fallback(s) => Debug::fmt(s, f), + } + } +} + +pub(crate) fn debug_span_field_if_nontrivial(debug: &mut fmt::DebugStruct, span: Span) { + match span { + Span::Compiler(s) => { + debug.field("span", &s); + } + Span::Fallback(s) => fallback::debug_span_field_if_nontrivial(debug, s), + } +} + +#[derive(Clone)] +pub(crate) enum Group { + Compiler(proc_macro::Group), + Fallback(fallback::Group), +} + +impl Group { + pub(crate) fn new(delimiter: Delimiter, stream: TokenStream) -> Self { + match stream { + TokenStream::Compiler(tts) => { + let delimiter = match delimiter { + Delimiter::Parenthesis => proc_macro::Delimiter::Parenthesis, + Delimiter::Bracket => proc_macro::Delimiter::Bracket, + Delimiter::Brace => proc_macro::Delimiter::Brace, + Delimiter::None => proc_macro::Delimiter::None, + }; + Group::Compiler(proc_macro::Group::new(delimiter, tts.into_token_stream())) + } + TokenStream::Fallback(stream) => { + Group::Fallback(fallback::Group::new(delimiter, stream)) + } + } + } + + pub(crate) fn delimiter(&self) -> Delimiter { + match self { + Group::Compiler(g) => match g.delimiter() { + proc_macro::Delimiter::Parenthesis => Delimiter::Parenthesis, + proc_macro::Delimiter::Bracket => Delimiter::Bracket, + proc_macro::Delimiter::Brace => Delimiter::Brace, + proc_macro::Delimiter::None => Delimiter::None, + }, + Group::Fallback(g) => g.delimiter(), + } + } + + pub(crate) fn stream(&self) -> TokenStream { + match self { + Group::Compiler(g) => TokenStream::Compiler(DeferredTokenStream::new(g.stream())), + Group::Fallback(g) => TokenStream::Fallback(g.stream()), + } + } + + pub(crate) fn span(&self) -> Span { + match self { + Group::Compiler(g) => Span::Compiler(g.span()), + Group::Fallback(g) => Span::Fallback(g.span()), + } + } + + pub(crate) fn span_open(&self) -> Span { + match self { + Group::Compiler(g) => Span::Compiler(g.span_open()), + Group::Fallback(g) => Span::Fallback(g.span_open()), + } + } + + pub(crate) fn span_close(&self) -> Span { + match self { + Group::Compiler(g) => Span::Compiler(g.span_close()), + Group::Fallback(g) => Span::Fallback(g.span_close()), + } + } + + pub(crate) fn set_span(&mut self, span: Span) { + match (self, span) { + (Group::Compiler(g), Span::Compiler(s)) => g.set_span(s), + (Group::Fallback(g), Span::Fallback(s)) => g.set_span(s), + (Group::Compiler(_), Span::Fallback(_)) => mismatch(line!()), + (Group::Fallback(_), Span::Compiler(_)) => mismatch(line!()), + } + } + + fn unwrap_nightly(self) -> proc_macro::Group { + match self { + Group::Compiler(g) => g, + Group::Fallback(_) => mismatch(line!()), + } + } +} + +impl From for Group { + fn from(g: fallback::Group) -> Self { + Group::Fallback(g) + } +} + +impl Display for Group { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match self { + Group::Compiler(group) => Display::fmt(group, formatter), + Group::Fallback(group) => Display::fmt(group, formatter), + } + } +} + +impl Debug for Group { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match self { + Group::Compiler(group) => Debug::fmt(group, formatter), + Group::Fallback(group) => Debug::fmt(group, formatter), + } + } +} + +#[derive(Clone)] +pub(crate) enum Ident { + Compiler(proc_macro::Ident), + Fallback(fallback::Ident), +} + +impl Ident { + #[track_caller] + pub(crate) fn new_checked(string: &str, span: Span) -> Self { + match span { + Span::Compiler(s) => Ident::Compiler(proc_macro::Ident::new(string, s)), + Span::Fallback(s) => Ident::Fallback(fallback::Ident::new_checked(string, s)), + } + } + + #[track_caller] + pub(crate) fn new_raw_checked(string: &str, span: Span) -> Self { + match span { + Span::Compiler(s) => Ident::Compiler(proc_macro::Ident::new_raw(string, s)), + Span::Fallback(s) => Ident::Fallback(fallback::Ident::new_raw_checked(string, s)), + } + } + + pub(crate) fn span(&self) -> Span { + match self { + Ident::Compiler(t) => Span::Compiler(t.span()), + Ident::Fallback(t) => Span::Fallback(t.span()), + } + } + + pub(crate) fn set_span(&mut self, span: Span) { + match (self, span) { + (Ident::Compiler(t), Span::Compiler(s)) => t.set_span(s), + (Ident::Fallback(t), Span::Fallback(s)) => t.set_span(s), + (Ident::Compiler(_), Span::Fallback(_)) => mismatch(line!()), + (Ident::Fallback(_), Span::Compiler(_)) => mismatch(line!()), + } + } + + fn unwrap_nightly(self) -> proc_macro::Ident { + match self { + Ident::Compiler(s) => s, + Ident::Fallback(_) => mismatch(line!()), + } + } +} + +impl From for Ident { + fn from(inner: fallback::Ident) -> Self { + Ident::Fallback(inner) + } +} + +impl PartialEq for Ident { + fn eq(&self, other: &Ident) -> bool { + match (self, other) { + (Ident::Compiler(t), Ident::Compiler(o)) => t.to_string() == o.to_string(), + (Ident::Fallback(t), Ident::Fallback(o)) => t == o, + (Ident::Compiler(_), Ident::Fallback(_)) => mismatch(line!()), + (Ident::Fallback(_), Ident::Compiler(_)) => mismatch(line!()), + } + } +} + +impl PartialEq for Ident +where + T: ?Sized + AsRef, +{ + fn eq(&self, other: &T) -> bool { + let other = other.as_ref(); + match self { + Ident::Compiler(t) => t.to_string() == other, + Ident::Fallback(t) => t == other, + } + } +} + +impl Display for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Ident::Compiler(t) => Display::fmt(t, f), + Ident::Fallback(t) => Display::fmt(t, f), + } + } +} + +impl Debug for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Ident::Compiler(t) => Debug::fmt(t, f), + Ident::Fallback(t) => Debug::fmt(t, f), + } + } +} + +#[derive(Clone)] +pub(crate) enum Literal { + Compiler(proc_macro::Literal), + Fallback(fallback::Literal), +} + +macro_rules! suffixed_numbers { + ($($name:ident => $kind:ident,)*) => ($( + pub(crate) fn $name(n: $kind) -> Literal { + if inside_proc_macro() { + Literal::Compiler(proc_macro::Literal::$name(n)) + } else { + Literal::Fallback(fallback::Literal::$name(n)) + } + } + )*) +} + +macro_rules! unsuffixed_integers { + ($($name:ident => $kind:ident,)*) => ($( + pub(crate) fn $name(n: $kind) -> Literal { + if inside_proc_macro() { + Literal::Compiler(proc_macro::Literal::$name(n)) + } else { + Literal::Fallback(fallback::Literal::$name(n)) + } + } + )*) +} + +impl Literal { + pub(crate) fn from_str_checked(repr: &str) -> Result { + if inside_proc_macro() { + let literal = proc_macro::Literal::from_str_checked(repr)?; + Ok(Literal::Compiler(literal)) + } else { + let literal = fallback::Literal::from_str_checked(repr)?; + Ok(Literal::Fallback(literal)) + } + } + + pub(crate) unsafe fn from_str_unchecked(repr: &str) -> Self { + if inside_proc_macro() { + Literal::Compiler(proc_macro::Literal::from_str_unchecked(repr)) + } else { + Literal::Fallback(unsafe { fallback::Literal::from_str_unchecked(repr) }) + } + } + + suffixed_numbers! { + u8_suffixed => u8, + u16_suffixed => u16, + u32_suffixed => u32, + u64_suffixed => u64, + u128_suffixed => u128, + usize_suffixed => usize, + i8_suffixed => i8, + i16_suffixed => i16, + i32_suffixed => i32, + i64_suffixed => i64, + i128_suffixed => i128, + isize_suffixed => isize, + + f32_suffixed => f32, + f64_suffixed => f64, + } + + unsuffixed_integers! { + u8_unsuffixed => u8, + u16_unsuffixed => u16, + u32_unsuffixed => u32, + u64_unsuffixed => u64, + u128_unsuffixed => u128, + usize_unsuffixed => usize, + i8_unsuffixed => i8, + i16_unsuffixed => i16, + i32_unsuffixed => i32, + i64_unsuffixed => i64, + i128_unsuffixed => i128, + isize_unsuffixed => isize, + } + + pub(crate) fn f32_unsuffixed(f: f32) -> Literal { + if inside_proc_macro() { + Literal::Compiler(proc_macro::Literal::f32_unsuffixed(f)) + } else { + Literal::Fallback(fallback::Literal::f32_unsuffixed(f)) + } + } + + pub(crate) fn f64_unsuffixed(f: f64) -> Literal { + if inside_proc_macro() { + Literal::Compiler(proc_macro::Literal::f64_unsuffixed(f)) + } else { + Literal::Fallback(fallback::Literal::f64_unsuffixed(f)) + } + } + + pub(crate) fn string(string: &str) -> Literal { + if inside_proc_macro() { + Literal::Compiler(proc_macro::Literal::string(string)) + } else { + Literal::Fallback(fallback::Literal::string(string)) + } + } + + pub(crate) fn character(ch: char) -> Literal { + if inside_proc_macro() { + Literal::Compiler(proc_macro::Literal::character(ch)) + } else { + Literal::Fallback(fallback::Literal::character(ch)) + } + } + + pub(crate) fn byte_character(byte: u8) -> Literal { + if inside_proc_macro() { + Literal::Compiler({ + #[cfg(not(no_literal_byte_character))] + { + proc_macro::Literal::byte_character(byte) + } + + #[cfg(no_literal_byte_character)] + { + let fallback = fallback::Literal::byte_character(byte); + proc_macro::Literal::from_str_unchecked(&fallback.repr) + } + }) + } else { + Literal::Fallback(fallback::Literal::byte_character(byte)) + } + } + + pub(crate) fn byte_string(bytes: &[u8]) -> Literal { + if inside_proc_macro() { + Literal::Compiler(proc_macro::Literal::byte_string(bytes)) + } else { + Literal::Fallback(fallback::Literal::byte_string(bytes)) + } + } + + pub(crate) fn c_string(string: &CStr) -> Literal { + if inside_proc_macro() { + Literal::Compiler({ + #[cfg(not(no_literal_c_string))] + { + proc_macro::Literal::c_string(string) + } + + #[cfg(no_literal_c_string)] + { + let fallback = fallback::Literal::c_string(string); + proc_macro::Literal::from_str_unchecked(&fallback.repr) + } + }) + } else { + Literal::Fallback(fallback::Literal::c_string(string)) + } + } + + pub(crate) fn span(&self) -> Span { + match self { + Literal::Compiler(lit) => Span::Compiler(lit.span()), + Literal::Fallback(lit) => Span::Fallback(lit.span()), + } + } + + pub(crate) fn set_span(&mut self, span: Span) { + match (self, span) { + (Literal::Compiler(lit), Span::Compiler(s)) => lit.set_span(s), + (Literal::Fallback(lit), Span::Fallback(s)) => lit.set_span(s), + (Literal::Compiler(_), Span::Fallback(_)) => mismatch(line!()), + (Literal::Fallback(_), Span::Compiler(_)) => mismatch(line!()), + } + } + + pub(crate) fn subspan>(&self, range: R) -> Option { + match self { + #[cfg(proc_macro_span)] + Literal::Compiler(lit) => proc_macro_span::subspan(lit, range).map(Span::Compiler), + #[cfg(not(proc_macro_span))] + Literal::Compiler(_lit) => None, + Literal::Fallback(lit) => lit.subspan(range).map(Span::Fallback), + } + } + + fn unwrap_nightly(self) -> proc_macro::Literal { + match self { + Literal::Compiler(s) => s, + Literal::Fallback(_) => mismatch(line!()), + } + } +} + +impl From for Literal { + fn from(s: fallback::Literal) -> Self { + Literal::Fallback(s) + } +} + +impl Display for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Literal::Compiler(t) => Display::fmt(t, f), + Literal::Fallback(t) => Display::fmt(t, f), + } + } +} + +impl Debug for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Literal::Compiler(t) => Debug::fmt(t, f), + Literal::Fallback(t) => Debug::fmt(t, f), + } + } +} + +#[cfg(span_locations)] +pub(crate) fn invalidate_current_thread_spans() { + if inside_proc_macro() { + panic!( + "proc_macro2::extra::invalidate_current_thread_spans is not available in procedural macros" + ); + } else { + crate::fallback::invalidate_current_thread_spans(); + } +} diff --git a/vendor/proc-macro2/tests/comments.rs b/vendor/proc-macro2/tests/comments.rs new file mode 100644 index 00000000000000..34951f7f396ef7 --- /dev/null +++ b/vendor/proc-macro2/tests/comments.rs @@ -0,0 +1,105 @@ +#![allow(clippy::assertions_on_result_states, clippy::uninlined_format_args)] + +use proc_macro2::{Delimiter, Literal, Spacing, TokenStream, TokenTree}; + +// #[doc = "..."] -> "..." +fn lit_of_outer_doc_comment(tokens: &TokenStream) -> Literal { + lit_of_doc_comment(tokens, false) +} + +// #![doc = "..."] -> "..." +fn lit_of_inner_doc_comment(tokens: &TokenStream) -> Literal { + lit_of_doc_comment(tokens, true) +} + +fn lit_of_doc_comment(tokens: &TokenStream, inner: bool) -> Literal { + let mut iter = tokens.clone().into_iter(); + match iter.next().unwrap() { + TokenTree::Punct(punct) => { + assert_eq!(punct.as_char(), '#'); + assert_eq!(punct.spacing(), Spacing::Alone); + } + _ => panic!("wrong token {:?}", tokens), + } + if inner { + match iter.next().unwrap() { + TokenTree::Punct(punct) => { + assert_eq!(punct.as_char(), '!'); + assert_eq!(punct.spacing(), Spacing::Alone); + } + _ => panic!("wrong token {:?}", tokens), + } + } + iter = match iter.next().unwrap() { + TokenTree::Group(group) => { + assert_eq!(group.delimiter(), Delimiter::Bracket); + assert!(iter.next().is_none(), "unexpected token {:?}", tokens); + group.stream().into_iter() + } + _ => panic!("wrong token {:?}", tokens), + }; + match iter.next().unwrap() { + TokenTree::Ident(ident) => assert_eq!(ident.to_string(), "doc"), + _ => panic!("wrong token {:?}", tokens), + } + match iter.next().unwrap() { + TokenTree::Punct(punct) => { + assert_eq!(punct.as_char(), '='); + assert_eq!(punct.spacing(), Spacing::Alone); + } + _ => panic!("wrong token {:?}", tokens), + } + match iter.next().unwrap() { + TokenTree::Literal(literal) => { + assert!(iter.next().is_none(), "unexpected token {:?}", tokens); + literal + } + _ => panic!("wrong token {:?}", tokens), + } +} + +#[test] +fn closed_immediately() { + let stream = "/**/".parse::().unwrap(); + let tokens = stream.into_iter().collect::>(); + assert!(tokens.is_empty(), "not empty -- {:?}", tokens); +} + +#[test] +fn incomplete() { + assert!("/*/".parse::().is_err()); +} + +#[test] +fn lit() { + let stream = "/// doc".parse::().unwrap(); + let lit = lit_of_outer_doc_comment(&stream); + assert_eq!(lit.to_string(), "\" doc\""); + + let stream = "//! doc".parse::().unwrap(); + let lit = lit_of_inner_doc_comment(&stream); + assert_eq!(lit.to_string(), "\" doc\""); + + let stream = "/** doc */".parse::().unwrap(); + let lit = lit_of_outer_doc_comment(&stream); + assert_eq!(lit.to_string(), "\" doc \""); + + let stream = "/*! doc */".parse::().unwrap(); + let lit = lit_of_inner_doc_comment(&stream); + assert_eq!(lit.to_string(), "\" doc \""); +} + +#[test] +fn carriage_return() { + let stream = "///\r\n".parse::().unwrap(); + let lit = lit_of_outer_doc_comment(&stream); + assert_eq!(lit.to_string(), "\"\""); + + let stream = "/**\r\n*/".parse::().unwrap(); + let lit = lit_of_outer_doc_comment(&stream); + assert_eq!(lit.to_string(), "\"\\r\\n\""); + + "///\r".parse::().unwrap_err(); + "///\r \n".parse::().unwrap_err(); + "/**\r \n*/".parse::().unwrap_err(); +} diff --git a/vendor/proc-macro2/tests/features.rs b/vendor/proc-macro2/tests/features.rs new file mode 100644 index 00000000000000..ea1704d992f3be --- /dev/null +++ b/vendor/proc-macro2/tests/features.rs @@ -0,0 +1,10 @@ +#![allow(clippy::assertions_on_constants, clippy::ignore_without_reason)] + +#[test] +#[ignore] +fn make_sure_no_proc_macro() { + assert!( + !cfg!(feature = "proc-macro"), + "still compiled with proc_macro?" + ); +} diff --git a/vendor/proc-macro2/tests/marker.rs b/vendor/proc-macro2/tests/marker.rs new file mode 100644 index 00000000000000..af8932a1fef523 --- /dev/null +++ b/vendor/proc-macro2/tests/marker.rs @@ -0,0 +1,97 @@ +#![allow(clippy::extra_unused_type_parameters)] + +use proc_macro2::{ + Delimiter, Group, Ident, LexError, Literal, Punct, Spacing, Span, TokenStream, TokenTree, +}; + +macro_rules! assert_impl { + ($ty:ident is $($marker:ident) and +) => { + #[test] + #[allow(non_snake_case)] + fn $ty() { + fn assert_implemented() {} + assert_implemented::<$ty>(); + } + }; + + ($ty:ident is not $($marker:ident) or +) => { + #[test] + #[allow(non_snake_case)] + fn $ty() { + $( + { + // Implemented for types that implement $marker. + #[allow(dead_code)] + trait IsNotImplemented { + fn assert_not_implemented() {} + } + impl IsNotImplemented for T {} + + // Implemented for the type being tested. + trait IsImplemented { + fn assert_not_implemented() {} + } + impl IsImplemented for $ty {} + + // If $ty does not implement $marker, there is no ambiguity + // in the following trait method call. + <$ty>::assert_not_implemented(); + } + )+ + } + }; +} + +assert_impl!(Delimiter is Send and Sync); +assert_impl!(Spacing is Send and Sync); + +assert_impl!(Group is not Send or Sync); +assert_impl!(Ident is not Send or Sync); +assert_impl!(LexError is not Send or Sync); +assert_impl!(Literal is not Send or Sync); +assert_impl!(Punct is not Send or Sync); +assert_impl!(Span is not Send or Sync); +assert_impl!(TokenStream is not Send or Sync); +assert_impl!(TokenTree is not Send or Sync); + +#[cfg(procmacro2_semver_exempt)] +mod semver_exempt { + use proc_macro2::LineColumn; + + assert_impl!(LineColumn is Send and Sync); +} + +mod unwind_safe { + #[cfg(procmacro2_semver_exempt)] + use proc_macro2::LineColumn; + use proc_macro2::{ + Delimiter, Group, Ident, LexError, Literal, Punct, Spacing, Span, TokenStream, TokenTree, + }; + use std::panic::{RefUnwindSafe, UnwindSafe}; + + macro_rules! assert_unwind_safe { + ($($types:ident)*) => { + $( + assert_impl!($types is UnwindSafe and RefUnwindSafe); + )* + }; + } + + assert_unwind_safe! { + Delimiter + Group + Ident + LexError + Literal + Punct + Spacing + Span + TokenStream + TokenTree + } + + #[cfg(procmacro2_semver_exempt)] + assert_unwind_safe! { + LineColumn + } +} diff --git a/vendor/proc-macro2/tests/test.rs b/vendor/proc-macro2/tests/test.rs new file mode 100644 index 00000000000000..a9272716647014 --- /dev/null +++ b/vendor/proc-macro2/tests/test.rs @@ -0,0 +1,1094 @@ +#![allow( + clippy::assertions_on_result_states, + clippy::items_after_statements, + clippy::needless_pass_by_value, + clippy::needless_raw_string_hashes, + clippy::non_ascii_literal, + clippy::octal_escapes, + clippy::uninlined_format_args +)] + +use proc_macro2::{Ident, Literal, Punct, Spacing, Span, TokenStream, TokenTree}; +use std::ffi::CStr; +use std::iter; +use std::str::{self, FromStr}; + +#[test] +fn idents() { + assert_eq!( + Ident::new("String", Span::call_site()).to_string(), + "String" + ); + assert_eq!(Ident::new("fn", Span::call_site()).to_string(), "fn"); + assert_eq!(Ident::new("_", Span::call_site()).to_string(), "_"); +} + +#[test] +fn raw_idents() { + assert_eq!( + Ident::new_raw("String", Span::call_site()).to_string(), + "r#String" + ); + assert_eq!(Ident::new_raw("fn", Span::call_site()).to_string(), "r#fn"); +} + +#[test] +#[should_panic(expected = "`r#_` cannot be a raw identifier")] +fn ident_raw_underscore() { + Ident::new_raw("_", Span::call_site()); +} + +#[test] +#[should_panic(expected = "`r#super` cannot be a raw identifier")] +fn ident_raw_reserved() { + Ident::new_raw("super", Span::call_site()); +} + +#[test] +#[should_panic(expected = "Ident is not allowed to be empty; use Option")] +fn ident_empty() { + Ident::new("", Span::call_site()); +} + +#[test] +#[should_panic(expected = "Ident cannot be a number; use Literal instead")] +fn ident_number() { + Ident::new("255", Span::call_site()); +} + +#[test] +#[should_panic(expected = "\"a#\" is not a valid Ident")] +fn ident_invalid() { + Ident::new("a#", Span::call_site()); +} + +#[test] +#[should_panic(expected = "not a valid Ident")] +fn raw_ident_empty() { + Ident::new("r#", Span::call_site()); +} + +#[test] +#[should_panic(expected = "not a valid Ident")] +fn raw_ident_number() { + Ident::new("r#255", Span::call_site()); +} + +#[test] +#[should_panic(expected = "\"r#a#\" is not a valid Ident")] +fn raw_ident_invalid() { + Ident::new("r#a#", Span::call_site()); +} + +#[test] +#[should_panic(expected = "not a valid Ident")] +fn lifetime_empty() { + Ident::new("'", Span::call_site()); +} + +#[test] +#[should_panic(expected = "not a valid Ident")] +fn lifetime_number() { + Ident::new("'255", Span::call_site()); +} + +#[test] +#[should_panic(expected = r#""'a#" is not a valid Ident"#)] +fn lifetime_invalid() { + Ident::new("'a#", Span::call_site()); +} + +#[test] +fn literal_string() { + #[track_caller] + fn assert(literal: Literal, expected: &str) { + assert_eq!(literal.to_string(), expected.trim()); + } + + assert(Literal::string(""), r#" "" "#); + assert(Literal::string("aA"), r#" "aA" "#); + assert(Literal::string("\t"), r#" "\t" "#); + assert(Literal::string("❤"), r#" "❤" "#); + assert(Literal::string("'"), r#" "'" "#); + assert(Literal::string("\""), r#" "\"" "#); + assert(Literal::string("\0"), r#" "\0" "#); + assert(Literal::string("\u{1}"), r#" "\u{1}" "#); + assert( + Literal::string("a\00b\07c\08d\0e\0"), + r#" "a\x000b\x007c\08d\0e\0" "#, + ); + + "\"\\\r\n x\"".parse::().unwrap(); + "\"\\\r\n \rx\"".parse::().unwrap_err(); +} + +#[test] +fn literal_raw_string() { + "r\"\r\n\"".parse::().unwrap(); + + fn raw_string_literal_with_hashes(n: usize) -> String { + let mut literal = String::new(); + literal.push('r'); + literal.extend(iter::repeat('#').take(n)); + literal.push('"'); + literal.push('"'); + literal.extend(iter::repeat('#').take(n)); + literal + } + + raw_string_literal_with_hashes(255) + .parse::() + .unwrap(); + + // https://github.com/rust-lang/rust/pull/95251 + raw_string_literal_with_hashes(256) + .parse::() + .unwrap_err(); +} + +#[cfg(procmacro2_semver_exempt)] +#[test] +fn literal_string_value() { + for string in ["", "...", "...\t...", "...\\...", "...\0...", "...\u{1}..."] { + assert_eq!(string, Literal::string(string).str_value().unwrap()); + assert_eq!( + string, + format!("r\"{string}\"") + .parse::() + .unwrap() + .str_value() + .unwrap(), + ); + assert_eq!( + string, + format!("r##\"{string}\"##") + .parse::() + .unwrap() + .str_value() + .unwrap(), + ); + } +} + +#[test] +fn literal_byte_character() { + #[track_caller] + fn assert(literal: Literal, expected: &str) { + assert_eq!(literal.to_string(), expected.trim()); + } + + assert(Literal::byte_character(b'a'), r#" b'a' "#); + assert(Literal::byte_character(b'\0'), r#" b'\0' "#); + assert(Literal::byte_character(b'\t'), r#" b'\t' "#); + assert(Literal::byte_character(b'\n'), r#" b'\n' "#); + assert(Literal::byte_character(b'\r'), r#" b'\r' "#); + assert(Literal::byte_character(b'\''), r#" b'\'' "#); + assert(Literal::byte_character(b'\\'), r#" b'\\' "#); + assert(Literal::byte_character(b'\x1f'), r#" b'\x1F' "#); + assert(Literal::byte_character(b'"'), r#" b'"' "#); +} + +#[test] +fn literal_byte_string() { + #[track_caller] + fn assert(literal: Literal, expected: &str) { + assert_eq!(literal.to_string(), expected.trim()); + } + + assert(Literal::byte_string(b""), r#" b"" "#); + assert(Literal::byte_string(b"\0"), r#" b"\0" "#); + assert(Literal::byte_string(b"\t"), r#" b"\t" "#); + assert(Literal::byte_string(b"\n"), r#" b"\n" "#); + assert(Literal::byte_string(b"\r"), r#" b"\r" "#); + assert(Literal::byte_string(b"\""), r#" b"\"" "#); + assert(Literal::byte_string(b"\\"), r#" b"\\" "#); + assert(Literal::byte_string(b"\x1f"), r#" b"\x1F" "#); + assert(Literal::byte_string(b"'"), r#" b"'" "#); + assert( + Literal::byte_string(b"a\00b\07c\08d\0e\0"), + r#" b"a\x000b\x007c\08d\0e\0" "#, + ); + + "b\"\\\r\n x\"".parse::().unwrap(); + "b\"\\\r\n \rx\"".parse::().unwrap_err(); + "b\"\\\r\n \u{a0}x\"".parse::().unwrap_err(); + "br\"\u{a0}\"".parse::().unwrap_err(); +} + +#[cfg(procmacro2_semver_exempt)] +#[test] +fn literal_byte_string_value() { + for bytestr in [ + &b""[..], + b"...", + b"...\t...", + b"...\\...", + b"...\0...", + b"...\xF0...", + ] { + assert_eq!( + bytestr, + Literal::byte_string(bytestr).byte_str_value().unwrap(), + ); + if let Ok(string) = str::from_utf8(bytestr) { + assert_eq!( + bytestr, + format!("br\"{string}\"") + .parse::() + .unwrap() + .byte_str_value() + .unwrap(), + ); + assert_eq!( + bytestr, + format!("br##\"{string}\"##") + .parse::() + .unwrap() + .byte_str_value() + .unwrap(), + ); + } + } +} + +#[test] +fn literal_c_string() { + #[track_caller] + fn assert(literal: Literal, expected: &str) { + assert_eq!(literal.to_string(), expected.trim()); + } + + assert(Literal::c_string(<&CStr>::default()), r#" c"" "#); + assert( + Literal::c_string(CStr::from_bytes_with_nul(b"aA\0").unwrap()), + r#" c"aA" "#, + ); + assert( + Literal::c_string(CStr::from_bytes_with_nul(b"aA\0").unwrap()), + r#" c"aA" "#, + ); + assert( + Literal::c_string(CStr::from_bytes_with_nul(b"\t\0").unwrap()), + r#" c"\t" "#, + ); + assert( + Literal::c_string(CStr::from_bytes_with_nul(b"\xE2\x9D\xA4\0").unwrap()), + r#" c"❤" "#, + ); + assert( + Literal::c_string(CStr::from_bytes_with_nul(b"'\0").unwrap()), + r#" c"'" "#, + ); + assert( + Literal::c_string(CStr::from_bytes_with_nul(b"\"\0").unwrap()), + r#" c"\"" "#, + ); + assert( + Literal::c_string(CStr::from_bytes_with_nul(b"\x7F\xFF\xFE\xCC\xB3\0").unwrap()), + r#" c"\u{7f}\xFF\xFE\u{333}" "#, + ); + + let strings = r###" + c"hello\x80我叫\u{1F980}" // from the RFC + cr"\" + cr##"Hello "world"!"## + c"\t\n\r\"\\" + "###; + + let mut tokens = strings.parse::().unwrap().into_iter(); + + for expected in &[ + r#"c"hello\x80我叫\u{1F980}""#, + r#"cr"\""#, + r###"cr##"Hello "world"!"##"###, + r#"c"\t\n\r\"\\""#, + ] { + match tokens.next().unwrap() { + TokenTree::Literal(literal) => { + assert_eq!(literal.to_string(), *expected); + } + unexpected => panic!("unexpected token: {:?}", unexpected), + } + } + + if let Some(unexpected) = tokens.next() { + panic!("unexpected token: {:?}", unexpected); + } + + for invalid in &[r#"c"\0""#, r#"c"\x00""#, r#"c"\u{0}""#, "c\"\0\""] { + if let Ok(unexpected) = invalid.parse::() { + panic!("unexpected token: {:?}", unexpected); + } + } +} + +#[cfg(procmacro2_semver_exempt)] +#[test] +fn literal_c_string_value() { + for cstr in [ + c"", + c"...", + c"...\t...", + c"...\\...", + c"...\u{1}...", + c"...\xF0...", + ] { + assert_eq!( + cstr.to_bytes_with_nul(), + Literal::c_string(cstr).cstr_value().unwrap(), + ); + if let Ok(string) = cstr.to_str() { + assert_eq!( + cstr.to_bytes_with_nul(), + format!("cr\"{string}\"") + .parse::() + .unwrap() + .cstr_value() + .unwrap(), + ); + assert_eq!( + cstr.to_bytes_with_nul(), + format!("cr##\"{string}\"##") + .parse::() + .unwrap() + .cstr_value() + .unwrap(), + ); + } + } +} + +#[test] +fn literal_character() { + #[track_caller] + fn assert(literal: Literal, expected: &str) { + assert_eq!(literal.to_string(), expected.trim()); + } + + assert(Literal::character('a'), r#" 'a' "#); + assert(Literal::character('\t'), r#" '\t' "#); + assert(Literal::character('❤'), r#" '❤' "#); + assert(Literal::character('\''), r#" '\'' "#); + assert(Literal::character('"'), r#" '"' "#); + assert(Literal::character('\0'), r#" '\0' "#); + assert(Literal::character('\u{1}'), r#" '\u{1}' "#); +} + +#[test] +fn literal_integer() { + #[track_caller] + fn assert(literal: Literal, expected: &str) { + assert_eq!(literal.to_string(), expected); + } + + assert(Literal::u8_suffixed(10), "10u8"); + assert(Literal::u16_suffixed(10), "10u16"); + assert(Literal::u32_suffixed(10), "10u32"); + assert(Literal::u64_suffixed(10), "10u64"); + assert(Literal::u128_suffixed(10), "10u128"); + assert(Literal::usize_suffixed(10), "10usize"); + + assert(Literal::i8_suffixed(10), "10i8"); + assert(Literal::i16_suffixed(10), "10i16"); + assert(Literal::i32_suffixed(10), "10i32"); + assert(Literal::i64_suffixed(10), "10i64"); + assert(Literal::i128_suffixed(10), "10i128"); + assert(Literal::isize_suffixed(10), "10isize"); + + assert(Literal::u8_unsuffixed(10), "10"); + assert(Literal::u16_unsuffixed(10), "10"); + assert(Literal::u32_unsuffixed(10), "10"); + assert(Literal::u64_unsuffixed(10), "10"); + assert(Literal::u128_unsuffixed(10), "10"); + assert(Literal::usize_unsuffixed(10), "10"); + + assert(Literal::i8_unsuffixed(10), "10"); + assert(Literal::i16_unsuffixed(10), "10"); + assert(Literal::i32_unsuffixed(10), "10"); + assert(Literal::i64_unsuffixed(10), "10"); + assert(Literal::i128_unsuffixed(10), "10"); + assert(Literal::isize_unsuffixed(10), "10"); + + assert(Literal::i32_suffixed(-10), "-10i32"); + assert(Literal::i32_unsuffixed(-10), "-10"); +} + +#[test] +fn literal_float() { + #[track_caller] + fn assert(literal: Literal, expected: &str) { + assert_eq!(literal.to_string(), expected); + } + + assert(Literal::f32_suffixed(10.0), "10f32"); + assert(Literal::f32_suffixed(-10.0), "-10f32"); + assert(Literal::f64_suffixed(10.0), "10f64"); + assert(Literal::f64_suffixed(-10.0), "-10f64"); + + assert(Literal::f32_unsuffixed(10.0), "10.0"); + assert(Literal::f32_unsuffixed(-10.0), "-10.0"); + assert(Literal::f64_unsuffixed(10.0), "10.0"); + assert(Literal::f64_unsuffixed(-10.0), "-10.0"); + + assert( + Literal::f64_unsuffixed(1e100), + "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0", + ); +} + +#[test] +fn literal_suffix() { + fn token_count(p: &str) -> usize { + p.parse::().unwrap().into_iter().count() + } + + assert_eq!(token_count("999u256"), 1); + assert_eq!(token_count("999r#u256"), 3); + assert_eq!(token_count("1."), 1); + assert_eq!(token_count("1.f32"), 3); + assert_eq!(token_count("1.0_0"), 1); + assert_eq!(token_count("1._0"), 3); + assert_eq!(token_count("1._m"), 3); + assert_eq!(token_count("\"\"s"), 1); + assert_eq!(token_count("r\"\"r"), 1); + assert_eq!(token_count("r#\"\"#r"), 1); + assert_eq!(token_count("b\"\"b"), 1); + assert_eq!(token_count("br\"\"br"), 1); + assert_eq!(token_count("br#\"\"#br"), 1); + assert_eq!(token_count("c\"\"c"), 1); + assert_eq!(token_count("cr\"\"cr"), 1); + assert_eq!(token_count("cr#\"\"#cr"), 1); + assert_eq!(token_count("'c'c"), 1); + assert_eq!(token_count("b'b'b"), 1); + assert_eq!(token_count("0E"), 1); + assert_eq!(token_count("0o0A"), 1); + assert_eq!(token_count("0E--0"), 4); + assert_eq!(token_count("0.0ECMA"), 1); +} + +#[test] +fn literal_iter_negative() { + let negative_literal = Literal::i32_suffixed(-3); + let tokens = TokenStream::from(TokenTree::Literal(negative_literal)); + let mut iter = tokens.into_iter(); + match iter.next().unwrap() { + TokenTree::Punct(punct) => { + assert_eq!(punct.as_char(), '-'); + assert_eq!(punct.spacing(), Spacing::Alone); + } + unexpected => panic!("unexpected token {:?}", unexpected), + } + match iter.next().unwrap() { + TokenTree::Literal(literal) => { + assert_eq!(literal.to_string(), "3i32"); + } + unexpected => panic!("unexpected token {:?}", unexpected), + } + assert!(iter.next().is_none()); +} + +#[test] +fn literal_parse() { + assert!("1".parse::().is_ok()); + assert!("-1".parse::().is_ok()); + assert!("-1u12".parse::().is_ok()); + assert!("1.0".parse::().is_ok()); + assert!("-1.0".parse::().is_ok()); + assert!("-1.0f12".parse::().is_ok()); + assert!("'a'".parse::().is_ok()); + assert!("\"\n\"".parse::().is_ok()); + assert!("0 1".parse::().is_err()); + assert!(" 0".parse::().is_err()); + assert!("0 ".parse::().is_err()); + assert!("/* comment */0".parse::().is_err()); + assert!("0/* comment */".parse::().is_err()); + assert!("0// comment".parse::().is_err()); + assert!("- 1".parse::().is_err()); + assert!("- 1.0".parse::().is_err()); + assert!("-\"\"".parse::().is_err()); +} + +#[test] +fn literal_span() { + let positive = "0.1".parse::().unwrap(); + let negative = "-0.1".parse::().unwrap(); + let subspan = positive.subspan(1..2); + + #[cfg(not(span_locations))] + { + let _ = negative; + assert!(subspan.is_none()); + } + + #[cfg(span_locations)] + { + assert_eq!(positive.span().start().column, 0); + assert_eq!(positive.span().end().column, 3); + assert_eq!(negative.span().start().column, 0); + assert_eq!(negative.span().end().column, 4); + assert_eq!(subspan.unwrap().source_text().unwrap(), "."); + } + + assert!(positive.subspan(1..4).is_none()); +} + +#[cfg(span_locations)] +#[test] +fn source_text() { + let input = " 𓀕 a z "; + let mut tokens = input + .parse::() + .unwrap() + .into_iter(); + + let first = tokens.next().unwrap(); + assert_eq!("𓀕", first.span().source_text().unwrap()); + + let second = tokens.next().unwrap(); + let third = tokens.next().unwrap(); + assert_eq!("z", third.span().source_text().unwrap()); + assert_eq!("a", second.span().source_text().unwrap()); +} + +#[test] +fn lifetimes() { + let mut tokens = "'a 'static 'struct 'r#gen 'r#prefix#lifetime" + .parse::() + .unwrap() + .into_iter(); + assert!(match tokens.next() { + Some(TokenTree::Punct(punct)) => { + punct.as_char() == '\'' && punct.spacing() == Spacing::Joint + } + _ => false, + }); + assert!(match tokens.next() { + Some(TokenTree::Ident(ident)) => ident == "a", + _ => false, + }); + assert!(match tokens.next() { + Some(TokenTree::Punct(punct)) => { + punct.as_char() == '\'' && punct.spacing() == Spacing::Joint + } + _ => false, + }); + assert!(match tokens.next() { + Some(TokenTree::Ident(ident)) => ident == "static", + _ => false, + }); + assert!(match tokens.next() { + Some(TokenTree::Punct(punct)) => { + punct.as_char() == '\'' && punct.spacing() == Spacing::Joint + } + _ => false, + }); + assert!(match tokens.next() { + Some(TokenTree::Ident(ident)) => ident == "struct", + _ => false, + }); + assert!(match tokens.next() { + Some(TokenTree::Punct(punct)) => { + punct.as_char() == '\'' && punct.spacing() == Spacing::Joint + } + _ => false, + }); + assert!(match tokens.next() { + Some(TokenTree::Ident(ident)) => ident == "r#gen", + _ => false, + }); + assert!(match tokens.next() { + Some(TokenTree::Punct(punct)) => { + punct.as_char() == '\'' && punct.spacing() == Spacing::Joint + } + _ => false, + }); + assert!(match tokens.next() { + Some(TokenTree::Ident(ident)) => ident == "r#prefix", + _ => false, + }); + assert!(match tokens.next() { + Some(TokenTree::Punct(punct)) => { + punct.as_char() == '#' && punct.spacing() == Spacing::Alone + } + _ => false, + }); + assert!(match tokens.next() { + Some(TokenTree::Ident(ident)) => ident == "lifetime", + _ => false, + }); + + "' a".parse::().unwrap_err(); + "' r#gen".parse::().unwrap_err(); + "' prefix#lifetime".parse::().unwrap_err(); + "'prefix#lifetime".parse::().unwrap_err(); + "'aa'bb".parse::().unwrap_err(); + "'r#gen'a".parse::().unwrap_err(); +} + +#[test] +fn roundtrip() { + fn roundtrip(p: &str) { + println!("parse: {}", p); + let s = p.parse::().unwrap().to_string(); + println!("first: {}", s); + let s2 = s.parse::().unwrap().to_string(); + assert_eq!(s, s2); + } + roundtrip("a"); + roundtrip("<<"); + roundtrip("<<="); + roundtrip( + " + 1 + 1.0 + 1f32 + 2f64 + 1usize + 4isize + 4e10 + 1_000 + 1_0i32 + 8u8 + 9 + 0 + 0xffffffffffffffffffffffffffffffff + 1x + 1u80 + 1f320 + ", + ); + roundtrip("'a"); + roundtrip("'_"); + roundtrip("'static"); + roundtrip(r"'\u{10__FFFF}'"); + roundtrip("\"\\u{10_F0FF__}foo\\u{1_0_0_0__}\""); +} + +#[test] +fn fail() { + fn fail(p: &str) { + if let Ok(s) = p.parse::() { + panic!("should have failed to parse: {}\n{:#?}", p, s); + } + } + fail("' static"); + fail("r#1"); + fail("r#_"); + fail("\"\\u{0000000}\""); // overlong unicode escape (rust allows at most 6 hex digits) + fail("\"\\u{999999}\""); // outside of valid range of char + fail("\"\\u{_0}\""); // leading underscore + fail("\"\\u{}\""); // empty + fail("b\"\r\""); // bare carriage return in byte string + fail("r\"\r\""); // bare carriage return in raw string + fail("\"\\\r \""); // backslash carriage return + fail("'aa'aa"); + fail("br##\"\"#"); + fail("cr##\"\"#"); + fail("\"\\\n\u{85}\r\""); +} + +#[cfg(span_locations)] +#[test] +fn span_test() { + check_spans( + "\ +/// This is a document comment +testing 123 +{ + testing 234 +}", + &[ + (1, 0, 1, 30), // # + (1, 0, 1, 30), // [ ... ] + (1, 0, 1, 30), // doc + (1, 0, 1, 30), // = + (1, 0, 1, 30), // "This is..." + (2, 0, 2, 7), // testing + (2, 8, 2, 11), // 123 + (3, 0, 5, 1), // { ... } + (4, 2, 4, 9), // testing + (4, 10, 4, 13), // 234 + ], + ); +} + +#[cfg(procmacro2_semver_exempt)] +#[test] +fn default_span() { + let start = Span::call_site().start(); + assert_eq!(start.line, 1); + assert_eq!(start.column, 0); + let end = Span::call_site().end(); + assert_eq!(end.line, 1); + assert_eq!(end.column, 0); + assert_eq!(Span::call_site().file(), ""); + assert!(Span::call_site().local_file().is_none()); +} + +#[cfg(procmacro2_semver_exempt)] +#[test] +fn span_join() { + let source1 = "aaa\nbbb" + .parse::() + .unwrap() + .into_iter() + .collect::>(); + let source2 = "ccc\nddd" + .parse::() + .unwrap() + .into_iter() + .collect::>(); + + assert!(source1[0].span().file() != source2[0].span().file()); + assert_eq!(source1[0].span().file(), source1[1].span().file()); + + let joined1 = source1[0].span().join(source1[1].span()); + let joined2 = source1[0].span().join(source2[0].span()); + assert!(joined1.is_some()); + assert!(joined2.is_none()); + + let start = joined1.unwrap().start(); + let end = joined1.unwrap().end(); + assert_eq!(start.line, 1); + assert_eq!(start.column, 0); + assert_eq!(end.line, 2); + assert_eq!(end.column, 3); + + assert_eq!(joined1.unwrap().file(), source1[0].span().file()); +} + +#[test] +fn no_panic() { + let s = str::from_utf8(b"b\'\xc2\x86 \x00\x00\x00^\"").unwrap(); + assert!(s.parse::().is_err()); +} + +#[test] +fn punct_before_comment() { + let mut tts = TokenStream::from_str("~// comment").unwrap().into_iter(); + match tts.next().unwrap() { + TokenTree::Punct(tt) => { + assert_eq!(tt.as_char(), '~'); + assert_eq!(tt.spacing(), Spacing::Alone); + } + wrong => panic!("wrong token {:?}", wrong), + } +} + +#[test] +fn joint_last_token() { + // This test verifies that we match the behavior of libproc_macro *not* in + // the range nightly-2020-09-06 through nightly-2020-09-10, in which this + // behavior was temporarily broken. + // See https://github.com/rust-lang/rust/issues/76399 + + let joint_punct = Punct::new(':', Spacing::Joint); + let stream = TokenStream::from(TokenTree::Punct(joint_punct)); + let punct = match stream.into_iter().next().unwrap() { + TokenTree::Punct(punct) => punct, + _ => unreachable!(), + }; + assert_eq!(punct.spacing(), Spacing::Joint); +} + +#[test] +fn raw_identifier() { + let mut tts = TokenStream::from_str("r#dyn").unwrap().into_iter(); + match tts.next().unwrap() { + TokenTree::Ident(raw) => assert_eq!("r#dyn", raw.to_string()), + wrong => panic!("wrong token {:?}", wrong), + } + assert!(tts.next().is_none()); +} + +#[test] +fn test_display_ident() { + let ident = Ident::new("proc_macro", Span::call_site()); + assert_eq!(format!("{ident}"), "proc_macro"); + assert_eq!(format!("{ident:-^14}"), "proc_macro"); + + let ident = Ident::new_raw("proc_macro", Span::call_site()); + assert_eq!(format!("{ident}"), "r#proc_macro"); + assert_eq!(format!("{ident:-^14}"), "r#proc_macro"); +} + +#[test] +fn test_debug_ident() { + let ident = Ident::new("proc_macro", Span::call_site()); + let expected = if cfg!(span_locations) { + "Ident { sym: proc_macro }" + } else { + "Ident(proc_macro)" + }; + assert_eq!(expected, format!("{:?}", ident)); + + let ident = Ident::new_raw("proc_macro", Span::call_site()); + let expected = if cfg!(span_locations) { + "Ident { sym: r#proc_macro }" + } else { + "Ident(r#proc_macro)" + }; + assert_eq!(expected, format!("{:?}", ident)); +} + +#[test] +fn test_display_tokenstream() { + let tts = TokenStream::from_str("[a + 1]").unwrap(); + assert_eq!(format!("{tts}"), "[a + 1]"); + assert_eq!(format!("{tts:-^5}"), "[a + 1]"); +} + +#[test] +fn test_debug_tokenstream() { + let tts = TokenStream::from_str("[a + 1]").unwrap(); + + #[cfg(not(span_locations))] + let expected = "\ +TokenStream [ + Group { + delimiter: Bracket, + stream: TokenStream [ + Ident { + sym: a, + }, + Punct { + char: '+', + spacing: Alone, + }, + Literal { + lit: 1, + }, + ], + }, +]\ + "; + + #[cfg(not(span_locations))] + let expected_before_trailing_commas = "\ +TokenStream [ + Group { + delimiter: Bracket, + stream: TokenStream [ + Ident { + sym: a + }, + Punct { + char: '+', + spacing: Alone + }, + Literal { + lit: 1 + } + ] + } +]\ + "; + + #[cfg(span_locations)] + let expected = "\ +TokenStream [ + Group { + delimiter: Bracket, + stream: TokenStream [ + Ident { + sym: a, + span: bytes(2..3), + }, + Punct { + char: '+', + spacing: Alone, + span: bytes(4..5), + }, + Literal { + lit: 1, + span: bytes(6..7), + }, + ], + span: bytes(1..8), + }, +]\ + "; + + #[cfg(span_locations)] + let expected_before_trailing_commas = "\ +TokenStream [ + Group { + delimiter: Bracket, + stream: TokenStream [ + Ident { + sym: a, + span: bytes(2..3) + }, + Punct { + char: '+', + spacing: Alone, + span: bytes(4..5) + }, + Literal { + lit: 1, + span: bytes(6..7) + } + ], + span: bytes(1..8) + } +]\ + "; + + let actual = format!("{:#?}", tts); + if actual.ends_with(",\n]") { + assert_eq!(expected, actual); + } else { + assert_eq!(expected_before_trailing_commas, actual); + } +} + +#[test] +fn default_tokenstream_is_empty() { + let default_token_stream = ::default(); + + assert!(default_token_stream.is_empty()); +} + +#[test] +fn tokenstream_size_hint() { + let tokens = "a b (c d) e".parse::().unwrap(); + + assert_eq!(tokens.into_iter().size_hint(), (4, Some(4))); +} + +#[test] +fn tuple_indexing() { + // This behavior may change depending on https://github.com/rust-lang/rust/pull/71322 + let mut tokens = "tuple.0.0".parse::().unwrap().into_iter(); + assert_eq!("tuple", tokens.next().unwrap().to_string()); + assert_eq!(".", tokens.next().unwrap().to_string()); + assert_eq!("0.0", tokens.next().unwrap().to_string()); + assert!(tokens.next().is_none()); +} + +#[cfg(span_locations)] +#[test] +fn non_ascii_tokens() { + check_spans("// abc", &[]); + check_spans("// ábc", &[]); + check_spans("// abc x", &[]); + check_spans("// ábc x", &[]); + check_spans("/* abc */ x", &[(1, 10, 1, 11)]); + check_spans("/* ábc */ x", &[(1, 10, 1, 11)]); + check_spans("/* ab\nc */ x", &[(2, 5, 2, 6)]); + check_spans("/* áb\nc */ x", &[(2, 5, 2, 6)]); + check_spans("/*** abc */ x", &[(1, 12, 1, 13)]); + check_spans("/*** ábc */ x", &[(1, 12, 1, 13)]); + check_spans(r#""abc""#, &[(1, 0, 1, 5)]); + check_spans(r#""ábc""#, &[(1, 0, 1, 5)]); + check_spans(r##"r#"abc"#"##, &[(1, 0, 1, 8)]); + check_spans(r##"r#"ábc"#"##, &[(1, 0, 1, 8)]); + check_spans("r#\"a\nc\"#", &[(1, 0, 2, 3)]); + check_spans("r#\"á\nc\"#", &[(1, 0, 2, 3)]); + check_spans("'a'", &[(1, 0, 1, 3)]); + check_spans("'á'", &[(1, 0, 1, 3)]); + check_spans("//! abc", &[(1, 0, 1, 7), (1, 0, 1, 7), (1, 0, 1, 7)]); + check_spans("//! ábc", &[(1, 0, 1, 7), (1, 0, 1, 7), (1, 0, 1, 7)]); + check_spans("//! abc\n", &[(1, 0, 1, 7), (1, 0, 1, 7), (1, 0, 1, 7)]); + check_spans("//! ábc\n", &[(1, 0, 1, 7), (1, 0, 1, 7), (1, 0, 1, 7)]); + check_spans("/*! abc */", &[(1, 0, 1, 10), (1, 0, 1, 10), (1, 0, 1, 10)]); + check_spans("/*! ábc */", &[(1, 0, 1, 10), (1, 0, 1, 10), (1, 0, 1, 10)]); + check_spans("/*! a\nc */", &[(1, 0, 2, 4), (1, 0, 2, 4), (1, 0, 2, 4)]); + check_spans("/*! á\nc */", &[(1, 0, 2, 4), (1, 0, 2, 4), (1, 0, 2, 4)]); + check_spans("abc", &[(1, 0, 1, 3)]); + check_spans("ábc", &[(1, 0, 1, 3)]); + check_spans("ábć", &[(1, 0, 1, 3)]); + check_spans("abc// foo", &[(1, 0, 1, 3)]); + check_spans("ábc// foo", &[(1, 0, 1, 3)]); + check_spans("ábć// foo", &[(1, 0, 1, 3)]); + check_spans("b\"a\\\n c\"", &[(1, 0, 2, 3)]); +} + +#[cfg(span_locations)] +fn check_spans(p: &str, mut lines: &[(usize, usize, usize, usize)]) { + let ts = p.parse::().unwrap(); + check_spans_internal(ts, &mut lines); + assert!(lines.is_empty(), "leftover ranges: {:?}", lines); +} + +#[cfg(span_locations)] +fn check_spans_internal(ts: TokenStream, lines: &mut &[(usize, usize, usize, usize)]) { + for i in ts { + if let Some((&(sline, scol, eline, ecol), rest)) = lines.split_first() { + *lines = rest; + + let start = i.span().start(); + assert_eq!(start.line, sline, "sline did not match for {}", i); + assert_eq!(start.column, scol, "scol did not match for {}", i); + + let end = i.span().end(); + assert_eq!(end.line, eline, "eline did not match for {}", i); + assert_eq!(end.column, ecol, "ecol did not match for {}", i); + + if let TokenTree::Group(g) = i { + check_spans_internal(g.stream().clone(), lines); + } + } + } +} + +#[test] +fn whitespace() { + // space, horizontal tab, vertical tab, form feed, carriage return, line + // feed, non-breaking space, left-to-right mark, right-to-left mark + let various_spaces = " \t\u{b}\u{c}\r\n\u{a0}\u{200e}\u{200f}"; + let tokens = various_spaces.parse::().unwrap(); + assert_eq!(tokens.into_iter().count(), 0); + + let lone_carriage_returns = " \r \r\r\n "; + lone_carriage_returns.parse::().unwrap(); +} + +#[test] +fn byte_order_mark() { + let string = "\u{feff}foo"; + let tokens = string.parse::().unwrap(); + match tokens.into_iter().next().unwrap() { + TokenTree::Ident(ident) => assert_eq!(ident, "foo"), + _ => unreachable!(), + } + + let string = "foo\u{feff}"; + string.parse::().unwrap_err(); +} + +#[cfg(span_locations)] +fn create_span() -> proc_macro2::Span { + let tts: TokenStream = "1".parse().unwrap(); + match tts.into_iter().next().unwrap() { + TokenTree::Literal(literal) => literal.span(), + _ => unreachable!(), + } +} + +#[cfg(span_locations)] +#[test] +fn test_invalidate_current_thread_spans() { + let actual = format!("{:#?}", create_span()); + assert_eq!(actual, "bytes(1..2)"); + let actual = format!("{:#?}", create_span()); + assert_eq!(actual, "bytes(3..4)"); + + proc_macro2::extra::invalidate_current_thread_spans(); + + let actual = format!("{:#?}", create_span()); + // Test that span offsets have been reset after the call + // to invalidate_current_thread_spans() + assert_eq!(actual, "bytes(1..2)"); +} + +#[cfg(span_locations)] +#[test] +#[should_panic(expected = "Invalid span with no related FileInfo!")] +fn test_use_span_after_invalidation() { + let span = create_span(); + + proc_macro2::extra::invalidate_current_thread_spans(); + + span.source_text(); +} diff --git a/vendor/proc-macro2/tests/test_fmt.rs b/vendor/proc-macro2/tests/test_fmt.rs new file mode 100644 index 00000000000000..86a4c387634b33 --- /dev/null +++ b/vendor/proc-macro2/tests/test_fmt.rs @@ -0,0 +1,28 @@ +#![allow(clippy::from_iter_instead_of_collect)] + +use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream, TokenTree}; +use std::iter; + +#[test] +fn test_fmt_group() { + let ident = Ident::new("x", Span::call_site()); + let inner = TokenStream::from_iter(iter::once(TokenTree::Ident(ident))); + let parens_empty = Group::new(Delimiter::Parenthesis, TokenStream::new()); + let parens_nonempty = Group::new(Delimiter::Parenthesis, inner.clone()); + let brackets_empty = Group::new(Delimiter::Bracket, TokenStream::new()); + let brackets_nonempty = Group::new(Delimiter::Bracket, inner.clone()); + let braces_empty = Group::new(Delimiter::Brace, TokenStream::new()); + let braces_nonempty = Group::new(Delimiter::Brace, inner.clone()); + let none_empty = Group::new(Delimiter::None, TokenStream::new()); + let none_nonempty = Group::new(Delimiter::None, inner); + + // Matches libproc_macro. + assert_eq!("()", parens_empty.to_string()); + assert_eq!("(x)", parens_nonempty.to_string()); + assert_eq!("[]", brackets_empty.to_string()); + assert_eq!("[x]", brackets_nonempty.to_string()); + assert_eq!("{ }", braces_empty.to_string()); + assert_eq!("{ x }", braces_nonempty.to_string()); + assert_eq!("", none_empty.to_string()); + assert_eq!("x", none_nonempty.to_string()); +} diff --git a/vendor/proc-macro2/tests/test_size.rs b/vendor/proc-macro2/tests/test_size.rs new file mode 100644 index 00000000000000..8b6791518fe854 --- /dev/null +++ b/vendor/proc-macro2/tests/test_size.rs @@ -0,0 +1,81 @@ +#![allow(unused_attributes)] + +extern crate proc_macro; + +use std::mem; + +#[rustversion::attr(before(1.64), ignore = "requires Rust 1.64+")] +#[cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit")] +#[cfg_attr(randomize_layout, ignore = "disabled due to randomized layout")] +#[test] +fn test_proc_macro_size() { + assert_eq!(mem::size_of::(), 4); + assert_eq!(mem::size_of::>(), 4); + assert_eq!(mem::size_of::(), 20); + assert_eq!(mem::size_of::(), 12); + assert_eq!(mem::size_of::(), 8); + assert_eq!(mem::size_of::(), 16); + assert_eq!(mem::size_of::(), 4); +} + +#[cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit")] +#[cfg_attr(randomize_layout, ignore = "disabled due to randomized layout")] +#[cfg_attr(wrap_proc_macro, ignore = "wrapper mode")] +#[cfg_attr(span_locations, ignore = "span locations are on")] +#[test] +fn test_proc_macro2_fallback_size_without_locations() { + assert_eq!(mem::size_of::(), 0); + assert_eq!(mem::size_of::>(), 1); + assert_eq!(mem::size_of::(), 16); + assert_eq!(mem::size_of::(), 24); + assert_eq!(mem::size_of::(), 8); + assert_eq!(mem::size_of::(), 24); + assert_eq!(mem::size_of::(), 8); +} + +#[cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit")] +#[cfg_attr(randomize_layout, ignore = "disabled due to randomized layout")] +#[cfg_attr(wrap_proc_macro, ignore = "wrapper mode")] +#[cfg_attr(not(span_locations), ignore = "span locations are off")] +#[test] +fn test_proc_macro2_fallback_size_with_locations() { + assert_eq!(mem::size_of::(), 8); + assert_eq!(mem::size_of::>(), 12); + assert_eq!(mem::size_of::(), 24); + assert_eq!(mem::size_of::(), 32); + assert_eq!(mem::size_of::(), 16); + assert_eq!(mem::size_of::(), 32); + assert_eq!(mem::size_of::(), 8); +} + +#[rustversion::attr(before(1.71), ignore = "requires Rust 1.71+")] +#[cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit")] +#[cfg_attr(randomize_layout, ignore = "disabled due to randomized layout")] +#[cfg_attr(not(wrap_proc_macro), ignore = "fallback mode")] +#[cfg_attr(span_locations, ignore = "span locations are on")] +#[test] +fn test_proc_macro2_wrapper_size_without_locations() { + assert_eq!(mem::size_of::(), 4); + assert_eq!(mem::size_of::>(), 8); + assert_eq!(mem::size_of::(), 24); + assert_eq!(mem::size_of::(), 24); + assert_eq!(mem::size_of::(), 12); + assert_eq!(mem::size_of::(), 24); + assert_eq!(mem::size_of::(), 32); +} + +#[rustversion::attr(before(1.65), ignore = "requires Rust 1.65+")] +#[cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit")] +#[cfg_attr(randomize_layout, ignore = "disabled due to randomized layout")] +#[cfg_attr(not(wrap_proc_macro), ignore = "fallback mode")] +#[cfg_attr(not(span_locations), ignore = "span locations are off")] +#[test] +fn test_proc_macro2_wrapper_size_with_locations() { + assert_eq!(mem::size_of::(), 12); + assert_eq!(mem::size_of::>(), 12); + assert_eq!(mem::size_of::(), 32); + assert_eq!(mem::size_of::(), 32); + assert_eq!(mem::size_of::(), 20); + assert_eq!(mem::size_of::(), 32); + assert_eq!(mem::size_of::(), 32); +} diff --git a/vendor/quote/.cargo-checksum.json b/vendor/quote/.cargo-checksum.json new file mode 100644 index 00000000000000..0559d43caf7594 --- /dev/null +++ b/vendor/quote/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"31f077cccc677667ae9dbd3ca2a97807c645307199ec9dd6c2620fbf1b80015e",".github/FUNDING.yml":"b017158736b3c9751a2d21edfce7fe61c8954e2fced8da8dd3013c2f3e295bd9",".github/workflows/ci.yml":"a74a11b884e49e64e0af70d7b66a497dfe19f61d1e7375798fb7dcf46d074e30","Cargo.lock":"7f9f3eb56475b19bf94e20384421c6485c217ef1ab136867aa678b2dec7922b3","Cargo.toml":"f98585795e8fb0a2798c24fd5bc39d6de078f96cbe1c4be6532dee2f10ade5ae","Cargo.toml.orig":"8e7c7edea1aa52e0854b58bc77d5da20fb01a76138675757b162f03d2243c1c3","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"5bc59a97099fbdc7f9f8b69d3f9910e27629184647412b5009b274b5b8bfb6d1","build.rs":"cd6808c02e476b09a520105e2c6f6d325cccb1ecd542cbbcc836a0ae6f6fb0f1","rust-toolchain.toml":"6bbb61302978c736b2da03e4fb40e3beab908f85d533ab46fd541e637b5f3e0f","src/ext.rs":"33e41c8a11743de714c1cab1db37b242ce6df9cdb1dda43927c1f015b33701b3","src/format.rs":"141ee1049cfbe363f0d6e9210996dabc997bd3d1c67eb9695fab1c2a0b100e80","src/ident_fragment.rs":"0b3e6c2129e55910fd2d240e1e7efba6f1796801d24352d1c0bfbceb0e8b678f","src/lib.rs":"1f852ff55a08bc73e37ec76faf862bdd8769a8b825c2f49e5ca97e9b905b28c7","src/runtime.rs":"905008e29cb70a13845c2b334e531569121699b2a23be2acc7ab6070c45221e4","src/spanned.rs":"713678bf5cb3b4bf2f119dcf64d188a63dc59455a724c3d2567ceab83b734d73","src/to_tokens.rs":"5bd52437ed5764ae2b5d84843b23f29497ad0361f3ee3cfda621a4b91c70ef1c","tests/compiletest.rs":"4e381aa8ca3eabb7ac14d1e0c3700b3223e47640547a6988cfa13ad68255f60f","tests/test.rs":"c746974d738a6922b9a25eacb55416d0ef513cc418de3aa5ce5e12cacb7ee94d","tests/ui/does-not-have-iter-interpolated-dup.rs":"ad13eea21d4cdd2ab6c082f633392e1ff20fb0d1af5f2177041e0bf7f30da695","tests/ui/does-not-have-iter-interpolated-dup.stderr":"e5966b716290266591f97f1ab04107a47748d493e10ca99f19675fa76524f205","tests/ui/does-not-have-iter-interpolated.rs":"83a5b3f240651adcbe4b6e51076d76d653ad439b37442cf4054f1fd3c073f3b7","tests/ui/does-not-have-iter-interpolated.stderr":"a20403a06f36b54d45a195e455a11543cca7259e1c9f1bc78f0ce65cc0226347","tests/ui/does-not-have-iter-separated.rs":"fe413c48331d5e3a7ae5fef6a5892a90c72f610d54595879eb49d0a94154ba3f","tests/ui/does-not-have-iter-separated.stderr":"29718da7187e2da98c98bea9bfa405305a6df60af6c2f3c70cc27b7e13deead7","tests/ui/does-not-have-iter.rs":"09dc9499d861b63cebb0848b855b78e2dc9497bfde37ba6339f3625ae009a62f","tests/ui/does-not-have-iter.stderr":"691c985934330d5ba063fd4b172f89702673c710e610e8381e39ab78d729b0f1","tests/ui/not-quotable.rs":"5759d0884943417609f28faadc70254a3e2fd3d9bd6ff7297a3fb70a77fafd8a","tests/ui/not-quotable.stderr":"433a290bd53070d5cce6d623f9ef6f991756a78de109d3e486b46b699c2ce764","tests/ui/not-repeatable.rs":"a4b115c04e4e41049a05f5b69450503fbffeba031218b4189cb931839f7f9a9c","tests/ui/not-repeatable.stderr":"501ea5e47492b55bea457b02e991e0c624cd0c12601e0b759fff54a731370caf","tests/ui/wrong-type-span.rs":"6195e35ea844c0c52ba1cff5d790c3a371af6915d137d377834ad984229ef9ea","tests/ui/wrong-type-span.stderr":"cad072e40e0ecc04f375122ae41aede2f0da2a9244492b3fcf70249e59d1b128"},"package":"a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f"} \ No newline at end of file diff --git a/vendor/quote/.cargo_vcs_info.json b/vendor/quote/.cargo_vcs_info.json new file mode 100644 index 00000000000000..43e8425a26858b --- /dev/null +++ b/vendor/quote/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "bb9e7a46b3105e11c73416bd59b4455a71068949" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/quote/.github/FUNDING.yml b/vendor/quote/.github/FUNDING.yml new file mode 100644 index 00000000000000..750707701cdae9 --- /dev/null +++ b/vendor/quote/.github/FUNDING.yml @@ -0,0 +1 @@ +github: dtolnay diff --git a/vendor/quote/.github/workflows/ci.yml b/vendor/quote/.github/workflows/ci.yml new file mode 100644 index 00000000000000..9e25479aa14023 --- /dev/null +++ b/vendor/quote/.github/workflows/ci.yml @@ -0,0 +1,112 @@ +name: CI + +on: + push: + pull_request: + workflow_dispatch: + schedule: [cron: "40 1 * * *"] + +permissions: + contents: read + +env: + RUSTFLAGS: -Dwarnings + +jobs: + pre_ci: + uses: dtolnay/.github/.github/workflows/pre_ci.yml@master + + test: + name: Rust ${{matrix.rust}} + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + rust: [nightly, stable, beta, 1.76.0, 1.68.0] + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{matrix.rust}} + components: rust-src + - name: Enable type layout randomization + run: echo RUSTFLAGS=${RUSTFLAGS}\ -Zrandomize-layout >> $GITHUB_ENV + if: matrix.rust == 'nightly' + - run: cargo check + - run: cargo test + if: matrix.rust != '1.68.0' + - run: cargo run --manifest-path benches/Cargo.toml + - uses: actions/upload-artifact@v4 + if: matrix.rust == 'nightly' && always() + with: + name: Cargo.lock + path: Cargo.lock + continue-on-error: true + + minimal: + name: Minimal versions + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + - run: cargo generate-lockfile -Z minimal-versions + - run: cargo check --locked + + doc: + name: Documentation + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + env: + RUSTDOCFLAGS: -Dwarnings + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + with: + components: rust-src + - uses: dtolnay/install@cargo-docs-rs + - run: cargo docs-rs + + clippy: + name: Clippy + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + with: + components: clippy, rust-src + - run: cargo clippy --tests --workspace -- -Dclippy::all -Dclippy::pedantic + + miri: + name: Miri + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@miri + - run: cargo miri setup + - run: cargo miri test + env: + MIRIFLAGS: -Zmiri-strict-provenance + + outdated: + name: Outdated + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@stable + - uses: dtolnay/install@cargo-outdated + - run: cargo outdated --workspace --exit-code 1 diff --git a/vendor/quote/Cargo.lock b/vendor/quote/Cargo.lock new file mode 100644 index 00000000000000..038bc1b211a1e5 --- /dev/null +++ b/vendor/quote/Cargo.lock @@ -0,0 +1,256 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "dissimilar" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8975ffdaa0ef3661bfe02dbdcc06c9f829dfafe6a3c474de366a8d5e44276921" + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" + +[[package]] +name = "indexmap" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "proc-macro2" +version = "1.0.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "quote" +version = "1.0.42" +dependencies = [ + "proc-macro2", + "rustversion", + "trybuild", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote 1.0.41", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.145" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", + "serde_core", +] + +[[package]] +name = "serde_spanned" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392" +dependencies = [ + "serde_core", +] + +[[package]] +name = "syn" +version = "2.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f17c7e013e88258aa9543dcbe81aca68a667a9ac37cd69c9fbc07858bfe0e2f" +dependencies = [ + "proc-macro2", + "quote 1.0.41", + "unicode-ident", +] + +[[package]] +name = "target-triple" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "591ef38edfb78ca4771ee32cf494cb8771944bee237a9b91fc9c1424ac4b777b" + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "toml" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" +dependencies = [ + "indexmap", + "serde_core", + "serde_spanned", + "toml_datetime", + "toml_parser", + "toml_writer", + "winnow", +] + +[[package]] +name = "toml_datetime" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_parser" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +dependencies = [ + "winnow", +] + +[[package]] +name = "toml_writer" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" + +[[package]] +name = "trybuild" +version = "1.0.113" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "559b6a626c0815c942ac98d434746138b4f89ddd6a1b8cbb168c6845fb3376c5" +dependencies = [ + "dissimilar", + "glob", + "serde", + "serde_derive", + "serde_json", + "target-triple", + "termcolor", + "toml", +] + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "winnow" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" diff --git a/vendor/quote/Cargo.toml b/vendor/quote/Cargo.toml new file mode 100644 index 00000000000000..1b6fed14eafe7b --- /dev/null +++ b/vendor/quote/Cargo.toml @@ -0,0 +1,70 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.68" +name = "quote" +version = "1.0.42" +authors = ["David Tolnay "] +build = "build.rs" +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Quasi-quoting macro quote!(...)" +documentation = "https://docs.rs/quote/" +readme = "README.md" +keywords = [ + "macros", + "syn", +] +categories = ["development-tools::procedural-macro-helpers"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/quote" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] +rustdoc-args = [ + "--generate-link-to-definition", + "--generate-macro-expansion", + "--extern-html-root-url=core=https://doc.rust-lang.org", + "--extern-html-root-url=alloc=https://doc.rust-lang.org", + "--extern-html-root-url=std=https://doc.rust-lang.org", +] + +[features] +default = ["proc-macro"] +proc-macro = ["proc-macro2/proc-macro"] + +[lib] +name = "quote" +path = "src/lib.rs" + +[[test]] +name = "compiletest" +path = "tests/compiletest.rs" + +[[test]] +name = "test" +path = "tests/test.rs" + +[dependencies.proc-macro2] +version = "1.0.80" +default-features = false + +[dev-dependencies.rustversion] +version = "1.0" + +[dev-dependencies.trybuild] +version = "1.0.108" +features = ["diff"] diff --git a/vendor/quote/LICENSE-APACHE b/vendor/quote/LICENSE-APACHE new file mode 100644 index 00000000000000..1b5ec8b78e237b --- /dev/null +++ b/vendor/quote/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/vendor/quote/LICENSE-MIT b/vendor/quote/LICENSE-MIT new file mode 100644 index 00000000000000..31aa79387f27e7 --- /dev/null +++ b/vendor/quote/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/quote/README.md b/vendor/quote/README.md new file mode 100644 index 00000000000000..c4316be3b48219 --- /dev/null +++ b/vendor/quote/README.md @@ -0,0 +1,271 @@ +Rust Quasi-Quoting +================== + +[github](https://github.com/dtolnay/quote) +[crates.io](https://crates.io/crates/quote) +[docs.rs](https://docs.rs/quote) +[build status](https://github.com/dtolnay/quote/actions?query=branch%3Amaster) + +This crate provides the [`quote!`] macro for turning Rust syntax tree data +structures into tokens of source code. + +[`quote!`]: https://docs.rs/quote/1.0/quote/macro.quote.html + +Procedural macros in Rust receive a stream of tokens as input, execute arbitrary +Rust code to determine how to manipulate those tokens, and produce a stream of +tokens to hand back to the compiler to compile into the caller's crate. +Quasi-quoting is a solution to one piece of that — producing tokens to +return to the compiler. + +The idea of quasi-quoting is that we write *code* that we treat as *data*. +Within the `quote!` macro, we can write what looks like code to our text editor +or IDE. We get all the benefits of the editor's brace matching, syntax +highlighting, indentation, and maybe autocompletion. But rather than compiling +that as code into the current crate, we can treat it as data, pass it around, +mutate it, and eventually hand it back to the compiler as tokens to compile into +the macro caller's crate. + +This crate is motivated by the procedural macro use case, but is a +general-purpose Rust quasi-quoting library and is not specific to procedural +macros. + +```toml +[dependencies] +quote = "1.0" +``` + +*Version requirement: Quote supports rustc 1.68 and up.*
+[*Release notes*](https://github.com/dtolnay/quote/releases) + +
+ +## Syntax + +The quote crate provides a [`quote!`] macro within which you can write Rust code +that gets packaged into a [`TokenStream`] and can be treated as data. You should +think of `TokenStream` as representing a fragment of Rust source code. + +[`TokenStream`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.TokenStream.html + +Within the `quote!` macro, interpolation is done with `#var`. Any type +implementing the [`quote::ToTokens`] trait can be interpolated. This includes +most Rust primitive types as well as most of the syntax tree types from [`syn`]. + +[`quote::ToTokens`]: https://docs.rs/quote/1.0/quote/trait.ToTokens.html +[`syn`]: https://github.com/dtolnay/syn + +```rust +let tokens = quote! { + struct SerializeWith #generics #where_clause { + value: &'a #field_ty, + phantom: core::marker::PhantomData<#item_ty>, + } + + impl #generics serde::Serialize for SerializeWith #generics #where_clause { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + #path(self.value, serializer) + } + } + + SerializeWith { + value: #value, + phantom: core::marker::PhantomData::<#item_ty>, + } +}; +``` + +
+ +## Repetition + +Repetition is done using `#(...)*` or `#(...),*` similar to `macro_rules!`. This +iterates through the elements of any variable interpolated within the repetition +and inserts a copy of the repetition body for each one. The variables in an +interpolation may be a `Vec`, slice, `BTreeSet`, or any `Iterator`. + +- `#(#var)*` — no separators +- `#(#var),*` — the character before the asterisk is used as a separator +- `#( struct #var; )*` — the repetition can contain other things +- `#( #k => println!("{}", #v), )*` — even multiple interpolations + +Note that there is a difference between `#(#var ,)*` and `#(#var),*`—the latter +does not produce a trailing comma. This matches the behavior of delimiters in +`macro_rules!`. + +
+ +## Returning tokens to the compiler + +The `quote!` macro evaluates to an expression of type +`proc_macro2::TokenStream`. Meanwhile Rust procedural macros are expected to +return the type `proc_macro::TokenStream`. + +The difference between the two types is that `proc_macro` types are entirely +specific to procedural macros and cannot ever exist in code outside of a +procedural macro, while `proc_macro2` types may exist anywhere including tests +and non-macro code like main.rs and build.rs. This is why even the procedural +macro ecosystem is largely built around `proc_macro2`, because that ensures the +libraries are unit testable and accessible in non-macro contexts. + +There is a [`From`]-conversion in both directions so returning the output of +`quote!` from a procedural macro usually looks like `tokens.into()` or +`proc_macro::TokenStream::from(tokens)`. + +[`From`]: https://doc.rust-lang.org/std/convert/trait.From.html + +
+ +## Examples + +### Combining quoted fragments + +Usually you don't end up constructing an entire final `TokenStream` in one +piece. Different parts may come from different helper functions. The tokens +produced by `quote!` themselves implement `ToTokens` and so can be interpolated +into later `quote!` invocations to build up a final result. + +```rust +let type_definition = quote! {...}; +let methods = quote! {...}; + +let tokens = quote! { + #type_definition + #methods +}; +``` + +### Constructing identifiers + +Suppose we have an identifier `ident` which came from somewhere in a macro +input and we need to modify it in some way for the macro output. Let's consider +prepending the identifier with an underscore. + +Simply interpolating the identifier next to an underscore will not have the +behavior of concatenating them. The underscore and the identifier will continue +to be two separate tokens as if you had written `_ x`. + +```rust +// incorrect +quote! { + let mut _#ident = 0; +} +``` + +The solution is to build a new identifier token with the correct value. As this +is such a common case, the `format_ident!` macro provides a convenient utility +for doing so correctly. + +```rust +let varname = format_ident!("_{}", ident); +quote! { + let mut #varname = 0; +} +``` + +Alternatively, the APIs provided by Syn and proc-macro2 can be used to directly +build the identifier. This is roughly equivalent to the above, but will not +handle `ident` being a raw identifier. + +```rust +let concatenated = format!("_{}", ident); +let varname = syn::Ident::new(&concatenated, ident.span()); +quote! { + let mut #varname = 0; +} +``` + +### Making method calls + +Let's say our macro requires some type specified in the macro input to have a +constructor called `new`. We have the type in a variable called `field_type` of +type `syn::Type` and want to invoke the constructor. + +```rust +// incorrect +quote! { + let value = #field_type::new(); +} +``` + +This works only sometimes. If `field_type` is `String`, the expanded code +contains `String::new()` which is fine. But if `field_type` is something like +`Vec` then the expanded code is `Vec::new()` which is invalid syntax. +Ordinarily in handwritten Rust we would write `Vec::::new()` but for macros +often the following is more convenient. + +```rust +quote! { + let value = <#field_type>::new(); +} +``` + +This expands to `>::new()` which behaves correctly. + +A similar pattern is appropriate for trait methods. + +```rust +quote! { + let value = <#field_type as core::default::Default>::default(); +} +``` + +
+ +## Hygiene + +Any interpolated tokens preserve the `Span` information provided by their +`ToTokens` implementation. Tokens that originate within a `quote!` invocation +are spanned with [`Span::call_site()`]. + +[`Span::call_site()`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.Span.html#method.call_site + +A different span can be provided explicitly through the [`quote_spanned!`] +macro. + +[`quote_spanned!`]: https://docs.rs/quote/1.0/quote/macro.quote_spanned.html + +
+ +## Non-macro code generators + +When using `quote` in a build.rs or main.rs and writing the output out to a +file, consider having the code generator pass the tokens through [prettyplease] +before writing. This way if an error occurs in the generated code it is +convenient for a human to read and debug. + +Be aware that no kind of hygiene or span information is retained when tokens are +written to a file; the conversion from tokens to source code is lossy. + +Example usage in build.rs: + +```rust +let output = quote! { ... }; +let syntax_tree = syn::parse2(output).unwrap(); +let formatted = prettyplease::unparse(&syntax_tree); + +let out_dir = env::var_os("OUT_DIR").unwrap(); +let dest_path = Path::new(&out_dir).join("out.rs"); +fs::write(dest_path, formatted).unwrap(); +``` + +[prettyplease]: https://github.com/dtolnay/prettyplease + +
+ +#### License + + +Licensed under either of Apache License, Version +2.0 or MIT license at your option. + + +
+ + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this crate by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. + diff --git a/vendor/quote/build.rs b/vendor/quote/build.rs new file mode 100644 index 00000000000000..50f98cb3bda604 --- /dev/null +++ b/vendor/quote/build.rs @@ -0,0 +1,32 @@ +use std::env; +use std::process::Command; +use std::str; + +fn main() { + println!("cargo:rerun-if-changed=build.rs"); + + let Some(minor) = rustc_minor_version() else { + return; + }; + + if minor >= 77 { + println!("cargo:rustc-check-cfg=cfg(no_diagnostic_namespace)"); + } + + // Support for the `#[diagnostic]` tool attribute namespace + // https://blog.rust-lang.org/2024/05/02/Rust-1.78.0.html#diagnostic-attributes + if minor < 78 { + println!("cargo:rustc-cfg=no_diagnostic_namespace"); + } +} + +fn rustc_minor_version() -> Option { + let rustc = env::var_os("RUSTC")?; + let output = Command::new(rustc).arg("--version").output().ok()?; + let version = str::from_utf8(&output.stdout).ok()?; + let mut pieces = version.split('.'); + if pieces.next() != Some("rustc 1") { + return None; + } + pieces.next()?.parse().ok() +} diff --git a/vendor/quote/rust-toolchain.toml b/vendor/quote/rust-toolchain.toml new file mode 100644 index 00000000000000..20fe888c30ab44 --- /dev/null +++ b/vendor/quote/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +components = ["rust-src"] diff --git a/vendor/quote/src/ext.rs b/vendor/quote/src/ext.rs new file mode 100644 index 00000000000000..bc983a5d7d9166 --- /dev/null +++ b/vendor/quote/src/ext.rs @@ -0,0 +1,136 @@ +use super::ToTokens; +use core::iter; +use proc_macro2::{TokenStream, TokenTree}; + +/// TokenStream extension trait with methods for appending tokens. +/// +/// This trait is sealed and cannot be implemented outside of the `quote` crate. +pub trait TokenStreamExt: private::Sealed { + /// For use by `ToTokens` implementations. + /// + /// Appends the token specified to this list of tokens. + fn append(&mut self, token: U) + where + U: Into; + + /// For use by `ToTokens` implementations. + /// + /// ``` + /// # use quote::{quote, TokenStreamExt, ToTokens}; + /// # use proc_macro2::TokenStream; + /// # + /// struct X; + /// + /// impl ToTokens for X { + /// fn to_tokens(&self, tokens: &mut TokenStream) { + /// tokens.append_all(&[true, false]); + /// } + /// } + /// + /// let tokens = quote!(#X); + /// assert_eq!(tokens.to_string(), "true false"); + /// ``` + fn append_all(&mut self, iter: I) + where + I: IntoIterator, + I::Item: ToTokens; + + /// For use by `ToTokens` implementations. + /// + /// Appends all of the items in the iterator `I`, separated by the tokens + /// `U`. + fn append_separated(&mut self, iter: I, op: U) + where + I: IntoIterator, + I::Item: ToTokens, + U: ToTokens; + + /// For use by `ToTokens` implementations. + /// + /// Appends all tokens in the iterator `I`, appending `U` after each + /// element, including after the last element of the iterator. + fn append_terminated(&mut self, iter: I, term: U) + where + I: IntoIterator, + I::Item: ToTokens, + U: ToTokens; +} + +impl TokenStreamExt for TokenStream { + fn append(&mut self, token: U) + where + U: Into, + { + self.extend(iter::once(token.into())); + } + + fn append_all(&mut self, iter: I) + where + I: IntoIterator, + I::Item: ToTokens, + { + do_append_all(self, iter.into_iter()); + + fn do_append_all(stream: &mut TokenStream, iter: I) + where + I: Iterator, + I::Item: ToTokens, + { + for token in iter { + token.to_tokens(stream); + } + } + } + + fn append_separated(&mut self, iter: I, op: U) + where + I: IntoIterator, + I::Item: ToTokens, + U: ToTokens, + { + do_append_separated(self, iter.into_iter(), op); + + fn do_append_separated(stream: &mut TokenStream, iter: I, op: U) + where + I: Iterator, + I::Item: ToTokens, + U: ToTokens, + { + for (i, token) in iter.into_iter().enumerate() { + if i > 0 { + op.to_tokens(stream); + } + token.to_tokens(stream); + } + } + } + + fn append_terminated(&mut self, iter: I, term: U) + where + I: IntoIterator, + I::Item: ToTokens, + U: ToTokens, + { + do_append_terminated(self, iter.into_iter(), term); + + fn do_append_terminated(stream: &mut TokenStream, iter: I, term: U) + where + I: Iterator, + I::Item: ToTokens, + U: ToTokens, + { + for token in iter { + token.to_tokens(stream); + term.to_tokens(stream); + } + } + } +} + +mod private { + use proc_macro2::TokenStream; + + pub trait Sealed {} + + impl Sealed for TokenStream {} +} diff --git a/vendor/quote/src/format.rs b/vendor/quote/src/format.rs new file mode 100644 index 00000000000000..ec0bbf38ba3776 --- /dev/null +++ b/vendor/quote/src/format.rs @@ -0,0 +1,168 @@ +/// Formatting macro for constructing `Ident`s. +/// +///
+/// +/// # Syntax +/// +/// Syntax is copied from the [`format!`] macro, supporting both positional and +/// named arguments. +/// +/// Only a limited set of formatting traits are supported. The current mapping +/// of format types to traits is: +/// +/// * `{}` ⇒ [`IdentFragment`] +/// * `{:o}` ⇒ [`Octal`](std::fmt::Octal) +/// * `{:x}` ⇒ [`LowerHex`](std::fmt::LowerHex) +/// * `{:X}` ⇒ [`UpperHex`](std::fmt::UpperHex) +/// * `{:b}` ⇒ [`Binary`](std::fmt::Binary) +/// +/// See [`std::fmt`] for more information. +/// +///
+/// +/// # IdentFragment +/// +/// Unlike `format!`, this macro uses the [`IdentFragment`] formatting trait by +/// default. This trait is like `Display`, with a few differences: +/// +/// * `IdentFragment` is only implemented for a limited set of types, such as +/// unsigned integers and strings. +/// * [`Ident`] arguments will have their `r#` prefixes stripped, if present. +/// +/// [`IdentFragment`]: crate::IdentFragment +/// [`Ident`]: proc_macro2::Ident +/// +///
+/// +/// # Hygiene +/// +/// The [`Span`] of the first `Ident` argument is used as the span of the final +/// identifier, falling back to [`Span::call_site`] when no identifiers are +/// provided. +/// +/// ``` +/// # use quote::format_ident; +/// # let ident = format_ident!("Ident"); +/// // If `ident` is an Ident, the span of `my_ident` will be inherited from it. +/// let my_ident = format_ident!("My{}{}", ident, "IsCool"); +/// assert_eq!(my_ident, "MyIdentIsCool"); +/// ``` +/// +/// Alternatively, the span can be overridden by passing the `span` named +/// argument. +/// +/// ``` +/// # use quote::format_ident; +/// # const IGNORE_TOKENS: &'static str = stringify! { +/// let my_span = /* ... */; +/// # }; +/// # let my_span = proc_macro2::Span::call_site(); +/// format_ident!("MyIdent", span = my_span); +/// ``` +/// +/// [`Span`]: proc_macro2::Span +/// [`Span::call_site`]: proc_macro2::Span::call_site +/// +///


+/// +/// # Panics +/// +/// This method will panic if the resulting formatted string is not a valid +/// identifier. +/// +///
+/// +/// # Examples +/// +/// Composing raw and non-raw identifiers: +/// ``` +/// # use quote::format_ident; +/// let my_ident = format_ident!("My{}", "Ident"); +/// assert_eq!(my_ident, "MyIdent"); +/// +/// let raw = format_ident!("r#Raw"); +/// assert_eq!(raw, "r#Raw"); +/// +/// let my_ident_raw = format_ident!("{}Is{}", my_ident, raw); +/// assert_eq!(my_ident_raw, "MyIdentIsRaw"); +/// ``` +/// +/// Integer formatting options: +/// ``` +/// # use quote::format_ident; +/// let num: u32 = 10; +/// +/// let decimal = format_ident!("Id_{}", num); +/// assert_eq!(decimal, "Id_10"); +/// +/// let octal = format_ident!("Id_{:o}", num); +/// assert_eq!(octal, "Id_12"); +/// +/// let binary = format_ident!("Id_{:b}", num); +/// assert_eq!(binary, "Id_1010"); +/// +/// let lower_hex = format_ident!("Id_{:x}", num); +/// assert_eq!(lower_hex, "Id_a"); +/// +/// let upper_hex = format_ident!("Id_{:X}", num); +/// assert_eq!(upper_hex, "Id_A"); +/// ``` +#[macro_export] +macro_rules! format_ident { + ($fmt:expr) => { + $crate::format_ident_impl!([ + $crate::__private::Option::None, + $fmt + ]) + }; + + ($fmt:expr, $($rest:tt)*) => { + $crate::format_ident_impl!([ + $crate::__private::Option::None, + $fmt + ] $($rest)*) + }; +} + +#[macro_export] +#[doc(hidden)] +macro_rules! format_ident_impl { + // Final state + ([$span:expr, $($fmt:tt)*]) => { + $crate::__private::mk_ident( + &$crate::__private::format!($($fmt)*), + $span, + ) + }; + + // Span argument + ([$old:expr, $($fmt:tt)*] span = $span:expr) => { + $crate::format_ident_impl!([$old, $($fmt)*] span = $span,) + }; + ([$old:expr, $($fmt:tt)*] span = $span:expr, $($rest:tt)*) => { + $crate::format_ident_impl!([ + $crate::__private::Option::Some::<$crate::__private::Span>($span), + $($fmt)* + ] $($rest)*) + }; + + // Named argument + ([$span:expr, $($fmt:tt)*] $name:ident = $arg:expr) => { + $crate::format_ident_impl!([$span, $($fmt)*] $name = $arg,) + }; + ([$span:expr, $($fmt:tt)*] $name:ident = $arg:expr, $($rest:tt)*) => { + match $crate::__private::IdentFragmentAdapter(&$arg) { + arg => $crate::format_ident_impl!([$span.or(arg.span()), $($fmt)*, $name = arg] $($rest)*), + } + }; + + // Positional argument + ([$span:expr, $($fmt:tt)*] $arg:expr) => { + $crate::format_ident_impl!([$span, $($fmt)*] $arg,) + }; + ([$span:expr, $($fmt:tt)*] $arg:expr, $($rest:tt)*) => { + match $crate::__private::IdentFragmentAdapter(&$arg) { + arg => $crate::format_ident_impl!([$span.or(arg.span()), $($fmt)*, arg] $($rest)*), + } + }; +} diff --git a/vendor/quote/src/ident_fragment.rs b/vendor/quote/src/ident_fragment.rs new file mode 100644 index 00000000000000..6c2a9a87acb411 --- /dev/null +++ b/vendor/quote/src/ident_fragment.rs @@ -0,0 +1,88 @@ +use alloc::borrow::Cow; +use core::fmt; +use proc_macro2::{Ident, Span}; + +/// Specialized formatting trait used by `format_ident!`. +/// +/// [`Ident`] arguments formatted using this trait will have their `r#` prefix +/// stripped, if present. +/// +/// See [`format_ident!`] for more information. +/// +/// [`format_ident!`]: crate::format_ident +pub trait IdentFragment { + /// Format this value as an identifier fragment. + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result; + + /// Span associated with this `IdentFragment`. + /// + /// If non-`None`, may be inherited by formatted identifiers. + fn span(&self) -> Option { + None + } +} + +impl IdentFragment for &T { + fn span(&self) -> Option { + ::span(*self) + } + + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + IdentFragment::fmt(*self, f) + } +} + +impl IdentFragment for &mut T { + fn span(&self) -> Option { + ::span(*self) + } + + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + IdentFragment::fmt(*self, f) + } +} + +impl IdentFragment for Ident { + fn span(&self) -> Option { + Some(self.span()) + } + + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let id = self.to_string(); + if let Some(id) = id.strip_prefix("r#") { + fmt::Display::fmt(id, f) + } else { + fmt::Display::fmt(&id[..], f) + } + } +} + +impl IdentFragment for Cow<'_, T> +where + T: IdentFragment + ToOwned + ?Sized, +{ + fn span(&self) -> Option { + T::span(self) + } + + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + T::fmt(self, f) + } +} + +// Limited set of types which this is implemented for, as we want to avoid types +// which will often include non-identifier characters in their `Display` impl. +macro_rules! ident_fragment_display { + ($($T:ty),*) => { + $( + impl IdentFragment for $T { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } + } + )* + }; +} + +ident_fragment_display!(bool, str, String, char); +ident_fragment_display!(u8, u16, u32, u64, u128, usize); diff --git a/vendor/quote/src/lib.rs b/vendor/quote/src/lib.rs new file mode 100644 index 00000000000000..dd2f5b7cab62ed --- /dev/null +++ b/vendor/quote/src/lib.rs @@ -0,0 +1,1455 @@ +//! [![github]](https://github.com/dtolnay/quote) [![crates-io]](https://crates.io/crates/quote) [![docs-rs]](https://docs.rs/quote) +//! +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust +//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs +//! +//!
+//! +//! This crate provides the [`quote!`] macro for turning Rust syntax tree data +//! structures into tokens of source code. +//! +//! Procedural macros in Rust receive a stream of tokens as input, execute +//! arbitrary Rust code to determine how to manipulate those tokens, and produce +//! a stream of tokens to hand back to the compiler to compile into the caller's +//! crate. Quasi-quoting is a solution to one piece of that — producing +//! tokens to return to the compiler. +//! +//! The idea of quasi-quoting is that we write *code* that we treat as *data*. +//! Within the `quote!` macro, we can write what looks like code to our text +//! editor or IDE. We get all the benefits of the editor's brace matching, +//! syntax highlighting, indentation, and maybe autocompletion. But rather than +//! compiling that as code into the current crate, we can treat it as data, pass +//! it around, mutate it, and eventually hand it back to the compiler as tokens +//! to compile into the macro caller's crate. +//! +//! This crate is motivated by the procedural macro use case, but is a +//! general-purpose Rust quasi-quoting library and is not specific to procedural +//! macros. +//! +//! ```toml +//! [dependencies] +//! quote = "1.0" +//! ``` +//! +//!
+//! +//! # Example +//! +//! The following quasi-quoted block of code is something you might find in [a] +//! procedural macro having to do with data structure serialization. The `#var` +//! syntax performs interpolation of runtime variables into the quoted tokens. +//! Check out the documentation of the [`quote!`] macro for more detail about +//! the syntax. See also the [`quote_spanned!`] macro which is important for +//! implementing hygienic procedural macros. +//! +//! [a]: https://serde.rs/ +//! +//! ``` +//! # use quote::quote; +//! # +//! # let generics = ""; +//! # let where_clause = ""; +//! # let field_ty = ""; +//! # let item_ty = ""; +//! # let path = ""; +//! # let value = ""; +//! # +//! let tokens = quote! { +//! struct SerializeWith #generics #where_clause { +//! value: &'a #field_ty, +//! phantom: core::marker::PhantomData<#item_ty>, +//! } +//! +//! impl #generics serde::Serialize for SerializeWith #generics #where_clause { +//! fn serialize(&self, serializer: S) -> Result +//! where +//! S: serde::Serializer, +//! { +//! #path(self.value, serializer) +//! } +//! } +//! +//! SerializeWith { +//! value: #value, +//! phantom: core::marker::PhantomData::<#item_ty>, +//! } +//! }; +//! ``` +//! +//!
+//! +//! # Non-macro code generators +//! +//! When using `quote` in a build.rs or main.rs and writing the output out to a +//! file, consider having the code generator pass the tokens through +//! [prettyplease] before writing. This way if an error occurs in the generated +//! code it is convenient for a human to read and debug. +//! +//! [prettyplease]: https://github.com/dtolnay/prettyplease + +// Quote types in rustdoc of other crates get linked to here. +#![doc(html_root_url = "https://docs.rs/quote/1.0.42")] +#![allow( + clippy::doc_markdown, + clippy::elidable_lifetime_names, + clippy::items_after_statements, + clippy::missing_errors_doc, + clippy::missing_panics_doc, + clippy::module_name_repetitions, + clippy::needless_lifetimes, + // false positive https://github.com/rust-lang/rust-clippy/issues/6983 + clippy::wrong_self_convention, +)] + +extern crate alloc; + +#[cfg(feature = "proc-macro")] +extern crate proc_macro; + +mod ext; +mod format; +mod ident_fragment; +mod to_tokens; + +// Not public API. +#[doc(hidden)] +#[path = "runtime.rs"] +pub mod __private; + +pub use crate::ext::TokenStreamExt; +pub use crate::ident_fragment::IdentFragment; +pub use crate::to_tokens::ToTokens; + +// Not public API. +#[doc(hidden)] +pub mod spanned; + +macro_rules! __quote { + ($quote:item) => { + /// The whole point. + /// + /// Performs variable interpolation against the input and produces it as + /// [`proc_macro2::TokenStream`]. + /// + /// Note: for returning tokens to the compiler in a procedural macro, use + /// `.into()` on the result to convert to [`proc_macro::TokenStream`]. + /// + ///
+ /// + /// # Interpolation + /// + /// Variable interpolation is done with `#var` (similar to `$var` in + /// `macro_rules!` macros). This grabs the `var` variable that is currently in + /// scope and inserts it in that location in the output tokens. Any type + /// implementing the [`ToTokens`] trait can be interpolated. This includes most + /// Rust primitive types as well as most of the syntax tree types from the [Syn] + /// crate. + /// + /// [Syn]: https://github.com/dtolnay/syn + /// + /// Repetition is done using `#(...)*` or `#(...),*` again similar to + /// `macro_rules!`. This iterates through the elements of any variable + /// interpolated within the repetition and inserts a copy of the repetition body + /// for each one. The variables in an interpolation may be a `Vec`, slice, + /// `BTreeSet`, or any `Iterator`. + /// + /// - `#(#var)*` — no separators + /// - `#(#var),*` — the character before the asterisk is used as a separator + /// - `#( struct #var; )*` — the repetition can contain other tokens + /// - `#( #k => println!("{}", #v), )*` — even multiple interpolations + /// + ///
+ /// + /// # Hygiene + /// + /// Any interpolated tokens preserve the `Span` information provided by their + /// `ToTokens` implementation. Tokens that originate within the `quote!` + /// invocation are spanned with [`Span::call_site()`]. + /// + /// [`Span::call_site()`]: proc_macro2::Span::call_site + /// + /// A different span can be provided through the [`quote_spanned!`] macro. + /// + ///
+ /// + /// # Return type + /// + /// The macro evaluates to an expression of type `proc_macro2::TokenStream`. + /// Meanwhile Rust procedural macros are expected to return the type + /// `proc_macro::TokenStream`. + /// + /// The difference between the two types is that `proc_macro` types are entirely + /// specific to procedural macros and cannot ever exist in code outside of a + /// procedural macro, while `proc_macro2` types may exist anywhere including + /// tests and non-macro code like main.rs and build.rs. This is why even the + /// procedural macro ecosystem is largely built around `proc_macro2`, because + /// that ensures the libraries are unit testable and accessible in non-macro + /// contexts. + /// + /// There is a [`From`]-conversion in both directions so returning the output of + /// `quote!` from a procedural macro usually looks like `tokens.into()` or + /// `proc_macro::TokenStream::from(tokens)`. + /// + ///
+ /// + /// # Examples + /// + /// ### Procedural macro + /// + /// The structure of a basic procedural macro is as follows. Refer to the [Syn] + /// crate for further useful guidance on using `quote!` as part of a procedural + /// macro. + /// + /// [Syn]: https://github.com/dtolnay/syn + /// + /// ``` + /// # #[cfg(any())] + /// extern crate proc_macro; + /// # extern crate proc_macro2; + /// + /// # #[cfg(any())] + /// use proc_macro::TokenStream; + /// # use proc_macro2::TokenStream; + /// use quote::quote; + /// + /// # const IGNORE_TOKENS: &'static str = stringify! { + /// #[proc_macro_derive(HeapSize)] + /// # }; + /// pub fn derive_heap_size(input: TokenStream) -> TokenStream { + /// // Parse the input and figure out what implementation to generate... + /// # const IGNORE_TOKENS: &'static str = stringify! { + /// let name = /* ... */; + /// let expr = /* ... */; + /// # }; + /// # + /// # let name = 0; + /// # let expr = 0; + /// + /// let expanded = quote! { + /// // The generated impl. + /// impl heapsize::HeapSize for #name { + /// fn heap_size_of_children(&self) -> usize { + /// #expr + /// } + /// } + /// }; + /// + /// // Hand the output tokens back to the compiler. + /// TokenStream::from(expanded) + /// } + /// ``` + /// + ///


+ /// + /// ### Combining quoted fragments + /// + /// Usually you don't end up constructing an entire final `TokenStream` in one + /// piece. Different parts may come from different helper functions. The tokens + /// produced by `quote!` themselves implement `ToTokens` and so can be + /// interpolated into later `quote!` invocations to build up a final result. + /// + /// ``` + /// # use quote::quote; + /// # + /// let type_definition = quote! {...}; + /// let methods = quote! {...}; + /// + /// let tokens = quote! { + /// #type_definition + /// #methods + /// }; + /// ``` + /// + ///


+ /// + /// ### Constructing identifiers + /// + /// Suppose we have an identifier `ident` which came from somewhere in a macro + /// input and we need to modify it in some way for the macro output. Let's + /// consider prepending the identifier with an underscore. + /// + /// Simply interpolating the identifier next to an underscore will not have the + /// behavior of concatenating them. The underscore and the identifier will + /// continue to be two separate tokens as if you had written `_ x`. + /// + /// ``` + /// # use proc_macro2::{self as syn, Span}; + /// # use quote::quote; + /// # + /// # let ident = syn::Ident::new("i", Span::call_site()); + /// # + /// // incorrect + /// quote! { + /// let mut _#ident = 0; + /// } + /// # ; + /// ``` + /// + /// The solution is to build a new identifier token with the correct value. As + /// this is such a common case, the [`format_ident!`] macro provides a + /// convenient utility for doing so correctly. + /// + /// ``` + /// # use proc_macro2::{Ident, Span}; + /// # use quote::{format_ident, quote}; + /// # + /// # let ident = Ident::new("i", Span::call_site()); + /// # + /// let varname = format_ident!("_{}", ident); + /// quote! { + /// let mut #varname = 0; + /// } + /// # ; + /// ``` + /// + /// Alternatively, the APIs provided by Syn and proc-macro2 can be used to + /// directly build the identifier. This is roughly equivalent to the above, but + /// will not handle `ident` being a raw identifier. + /// + /// ``` + /// # use proc_macro2::{self as syn, Span}; + /// # use quote::quote; + /// # + /// # let ident = syn::Ident::new("i", Span::call_site()); + /// # + /// let concatenated = format!("_{}", ident); + /// let varname = syn::Ident::new(&concatenated, ident.span()); + /// quote! { + /// let mut #varname = 0; + /// } + /// # ; + /// ``` + /// + ///


+ /// + /// ### Making method calls + /// + /// Let's say our macro requires some type specified in the macro input to have + /// a constructor called `new`. We have the type in a variable called + /// `field_type` of type `syn::Type` and want to invoke the constructor. + /// + /// ``` + /// # use quote::quote; + /// # + /// # let field_type = quote!(...); + /// # + /// // incorrect + /// quote! { + /// let value = #field_type::new(); + /// } + /// # ; + /// ``` + /// + /// This works only sometimes. If `field_type` is `String`, the expanded code + /// contains `String::new()` which is fine. But if `field_type` is something + /// like `Vec` then the expanded code is `Vec::new()` which is invalid + /// syntax. Ordinarily in handwritten Rust we would write `Vec::::new()` + /// but for macros often the following is more convenient. + /// + /// ``` + /// # use quote::quote; + /// # + /// # let field_type = quote!(...); + /// # + /// quote! { + /// let value = <#field_type>::new(); + /// } + /// # ; + /// ``` + /// + /// This expands to `>::new()` which behaves correctly. + /// + /// A similar pattern is appropriate for trait methods. + /// + /// ``` + /// # use quote::quote; + /// # + /// # let field_type = quote!(...); + /// # + /// quote! { + /// let value = <#field_type as core::default::Default>::default(); + /// } + /// # ; + /// ``` + /// + ///


+ /// + /// ### Interpolating text inside of doc comments + /// + /// Neither doc comments nor string literals get interpolation behavior in + /// quote: + /// + /// ```compile_fail + /// quote! { + /// /// try to interpolate: #ident + /// /// + /// /// ... + /// } + /// ``` + /// + /// ```compile_fail + /// quote! { + /// #[doc = "try to interpolate: #ident"] + /// } + /// ``` + /// + /// Instead the best way to build doc comments that involve variables is by + /// formatting the doc string literal outside of quote. + /// + /// ```rust + /// # use proc_macro2::{Ident, Span}; + /// # use quote::quote; + /// # + /// # const IGNORE: &str = stringify! { + /// let msg = format!(...); + /// # }; + /// # + /// # let ident = Ident::new("var", Span::call_site()); + /// # let msg = format!("try to interpolate: {}", ident); + /// quote! { + /// #[doc = #msg] + /// /// + /// /// ... + /// } + /// # ; + /// ``` + /// + ///


+ /// + /// ### Indexing into a tuple struct + /// + /// When interpolating indices of a tuple or tuple struct, we need them not to + /// appears suffixed as integer literals by interpolating them as [`syn::Index`] + /// instead. + /// + /// [`syn::Index`]: https://docs.rs/syn/2.0/syn/struct.Index.html + /// + /// ```compile_fail + /// let i = 0usize..self.fields.len(); + /// + /// // expands to 0 + self.0usize.heap_size() + self.1usize.heap_size() + ... + /// // which is not valid syntax + /// quote! { + /// 0 #( + self.#i.heap_size() )* + /// } + /// ``` + /// + /// ``` + /// # use proc_macro2::{Ident, TokenStream}; + /// # use quote::quote; + /// # + /// # mod syn { + /// # use proc_macro2::{Literal, TokenStream}; + /// # use quote::{ToTokens, TokenStreamExt}; + /// # + /// # pub struct Index(usize); + /// # + /// # impl From for Index { + /// # fn from(i: usize) -> Self { + /// # Index(i) + /// # } + /// # } + /// # + /// # impl ToTokens for Index { + /// # fn to_tokens(&self, tokens: &mut TokenStream) { + /// # tokens.append(Literal::usize_unsuffixed(self.0)); + /// # } + /// # } + /// # } + /// # + /// # struct Struct { + /// # fields: Vec, + /// # } + /// # + /// # impl Struct { + /// # fn example(&self) -> TokenStream { + /// let i = (0..self.fields.len()).map(syn::Index::from); + /// + /// // expands to 0 + self.0.heap_size() + self.1.heap_size() + ... + /// quote! { + /// 0 #( + self.#i.heap_size() )* + /// } + /// # } + /// # } + /// ``` + $quote + }; +} + +#[cfg(doc)] +__quote![ + #[macro_export] + macro_rules! quote { + ($($tt:tt)*) => { + ... + }; + } +]; + +#[cfg(not(doc))] +__quote![ + #[macro_export] + macro_rules! quote { + () => { + $crate::__private::TokenStream::new() + }; + + // Special case rule for a single tt, for performance. + ($tt:tt) => {{ + let mut _s = $crate::__private::TokenStream::new(); + $crate::quote_token!{$tt _s} + _s + }}; + + // Special case rules for two tts, for performance. + (# $var:ident) => {{ + let mut _s = $crate::__private::TokenStream::new(); + $crate::ToTokens::to_tokens(&$var, &mut _s); + _s + }}; + ($tt1:tt $tt2:tt) => {{ + let mut _s = $crate::__private::TokenStream::new(); + $crate::quote_token!{$tt1 _s} + $crate::quote_token!{$tt2 _s} + _s + }}; + + // Rule for any other number of tokens. + ($($tt:tt)*) => {{ + let mut _s = $crate::__private::TokenStream::new(); + $crate::quote_each_token!{_s $($tt)*} + _s + }}; + } +]; + +macro_rules! __quote_spanned { + ($quote_spanned:item) => { + /// Same as `quote!`, but applies a given span to all tokens originating within + /// the macro invocation. + /// + ///
+ /// + /// # Syntax + /// + /// A span expression of type [`Span`], followed by `=>`, followed by the tokens + /// to quote. The span expression should be brief — use a variable for + /// anything more than a few characters. There should be no space before the + /// `=>` token. + /// + /// [`Span`]: proc_macro2::Span + /// + /// ``` + /// # use proc_macro2::Span; + /// # use quote::quote_spanned; + /// # + /// # const IGNORE_TOKENS: &'static str = stringify! { + /// let span = /* ... */; + /// # }; + /// # let span = Span::call_site(); + /// # let init = 0; + /// + /// // On one line, use parentheses. + /// let tokens = quote_spanned!(span=> Box::into_raw(Box::new(#init))); + /// + /// // On multiple lines, place the span at the top and use braces. + /// let tokens = quote_spanned! {span=> + /// Box::into_raw(Box::new(#init)) + /// }; + /// ``` + /// + /// The lack of space before the `=>` should look jarring to Rust programmers + /// and this is intentional. The formatting is designed to be visibly + /// off-balance and draw the eye a particular way, due to the span expression + /// being evaluated in the context of the procedural macro and the remaining + /// tokens being evaluated in the generated code. + /// + ///
+ /// + /// # Hygiene + /// + /// Any interpolated tokens preserve the `Span` information provided by their + /// `ToTokens` implementation. Tokens that originate within the `quote_spanned!` + /// invocation are spanned with the given span argument. + /// + ///
+ /// + /// # Example + /// + /// The following procedural macro code uses `quote_spanned!` to assert that a + /// particular Rust type implements the [`Sync`] trait so that references can be + /// safely shared between threads. + /// + /// ``` + /// # use quote::{quote_spanned, TokenStreamExt, ToTokens}; + /// # use proc_macro2::{Span, TokenStream}; + /// # + /// # struct Type; + /// # + /// # impl Type { + /// # fn span(&self) -> Span { + /// # Span::call_site() + /// # } + /// # } + /// # + /// # impl ToTokens for Type { + /// # fn to_tokens(&self, _tokens: &mut TokenStream) {} + /// # } + /// # + /// # let ty = Type; + /// # let call_site = Span::call_site(); + /// # + /// let ty_span = ty.span(); + /// let assert_sync = quote_spanned! {ty_span=> + /// struct _AssertSync where #ty: Sync; + /// }; + /// ``` + /// + /// If the assertion fails, the user will see an error like the following. The + /// input span of their type is highlighted in the error. + /// + /// ```text + /// error[E0277]: the trait bound `*const (): std::marker::Sync` is not satisfied + /// --> src/main.rs:10:21 + /// | + /// 10 | static ref PTR: *const () = &(); + /// | ^^^^^^^^^ `*const ()` cannot be shared between threads safely + /// ``` + /// + /// In this example it is important for the where-clause to be spanned with the + /// line/column information of the user's input type so that error messages are + /// placed appropriately by the compiler. + $quote_spanned + }; +} + +#[cfg(doc)] +__quote_spanned![ + #[macro_export] + macro_rules! quote_spanned { + ($span:expr=> $($tt:tt)*) => { + ... + }; + } +]; + +#[cfg(not(doc))] +__quote_spanned![ + #[macro_export] + macro_rules! quote_spanned { + ($span:expr=>) => {{ + let _: $crate::__private::Span = $crate::__private::get_span($span).__into_span(); + $crate::__private::TokenStream::new() + }}; + + // Special case rule for a single tt, for performance. + ($span:expr=> $tt:tt) => {{ + let mut _s = $crate::__private::TokenStream::new(); + let _span: $crate::__private::Span = $crate::__private::get_span($span).__into_span(); + $crate::quote_token_spanned!{$tt _s _span} + _s + }}; + + // Special case rules for two tts, for performance. + ($span:expr=> # $var:ident) => {{ + let mut _s = $crate::__private::TokenStream::new(); + let _: $crate::__private::Span = $crate::__private::get_span($span).__into_span(); + $crate::ToTokens::to_tokens(&$var, &mut _s); + _s + }}; + ($span:expr=> $tt1:tt $tt2:tt) => {{ + let mut _s = $crate::__private::TokenStream::new(); + let _span: $crate::__private::Span = $crate::__private::get_span($span).__into_span(); + $crate::quote_token_spanned!{$tt1 _s _span} + $crate::quote_token_spanned!{$tt2 _s _span} + _s + }}; + + // Rule for any other number of tokens. + ($span:expr=> $($tt:tt)*) => {{ + let mut _s = $crate::__private::TokenStream::new(); + let _span: $crate::__private::Span = $crate::__private::get_span($span).__into_span(); + $crate::quote_each_token_spanned!{_s _span $($tt)*} + _s + }}; + } +]; + +// Extract the names of all #metavariables and pass them to the $call macro. +// +// in: pounded_var_names!(then!(...) a #b c #( #d )* #e) +// out: then!(... b); +// then!(... d); +// then!(... e); +#[macro_export] +#[doc(hidden)] +macro_rules! pounded_var_names { + ($call:ident! $extra:tt $($tts:tt)*) => { + $crate::pounded_var_names_with_context!{$call! $extra + (@ $($tts)*) + ($($tts)* @) + } + }; +} + +#[macro_export] +#[doc(hidden)] +macro_rules! pounded_var_names_with_context { + ($call:ident! $extra:tt ($($b1:tt)*) ($($curr:tt)*)) => { + $( + $crate::pounded_var_with_context!{$call! $extra $b1 $curr} + )* + }; +} + +#[macro_export] +#[doc(hidden)] +macro_rules! pounded_var_with_context { + ($call:ident! $extra:tt $b1:tt ( $($inner:tt)* )) => { + $crate::pounded_var_names!{$call! $extra $($inner)*} + }; + + ($call:ident! $extra:tt $b1:tt [ $($inner:tt)* ]) => { + $crate::pounded_var_names!{$call! $extra $($inner)*} + }; + + ($call:ident! $extra:tt $b1:tt { $($inner:tt)* }) => { + $crate::pounded_var_names!{$call! $extra $($inner)*} + }; + + ($call:ident!($($extra:tt)*) # $var:ident) => { + $crate::$call!($($extra)* $var); + }; + + ($call:ident! $extra:tt $b1:tt $curr:tt) => {}; +} + +#[macro_export] +#[doc(hidden)] +macro_rules! quote_bind_into_iter { + ($has_iter:ident $var:ident) => { + // `mut` may be unused if $var occurs multiple times in the list. + #[allow(unused_mut)] + let (mut $var, i) = $var.quote_into_iter(); + let $has_iter = $has_iter | i; + }; +} + +#[macro_export] +#[doc(hidden)] +macro_rules! quote_bind_next_or_break { + ($var:ident) => { + let $var = match $var.next() { + Some(_x) => $crate::__private::RepInterp(_x), + None => break, + }; + }; +} + +// The obvious way to write this macro is as a tt muncher. This implementation +// does something more complex for two reasons. +// +// - With a tt muncher it's easy to hit Rust's built-in recursion_limit, which +// this implementation avoids because it isn't tail recursive. +// +// - Compile times for a tt muncher are quadratic relative to the length of +// the input. This implementation is linear, so it will be faster +// (potentially much faster) for big inputs. However, the constant factors +// of this implementation are higher than that of a tt muncher, so it is +// somewhat slower than a tt muncher if there are many invocations with +// short inputs. +// +// An invocation like this: +// +// quote_each_token!(_s a b c d e f g h i j); +// +// expands to this: +// +// quote_tokens_with_context!(_s +// (@ @ @ @ @ @ a b c d e f g h i j) +// (@ @ @ @ @ a b c d e f g h i j @) +// (@ @ @ @ a b c d e f g h i j @ @) +// (@ @ @ (a) (b) (c) (d) (e) (f) (g) (h) (i) (j) @ @ @) +// (@ @ a b c d e f g h i j @ @ @ @) +// (@ a b c d e f g h i j @ @ @ @ @) +// (a b c d e f g h i j @ @ @ @ @ @) +// ); +// +// which gets transposed and expanded to this: +// +// quote_token_with_context!(_s @ @ @ @ @ @ a); +// quote_token_with_context!(_s @ @ @ @ @ a b); +// quote_token_with_context!(_s @ @ @ @ a b c); +// quote_token_with_context!(_s @ @ @ (a) b c d); +// quote_token_with_context!(_s @ @ a (b) c d e); +// quote_token_with_context!(_s @ a b (c) d e f); +// quote_token_with_context!(_s a b c (d) e f g); +// quote_token_with_context!(_s b c d (e) f g h); +// quote_token_with_context!(_s c d e (f) g h i); +// quote_token_with_context!(_s d e f (g) h i j); +// quote_token_with_context!(_s e f g (h) i j @); +// quote_token_with_context!(_s f g h (i) j @ @); +// quote_token_with_context!(_s g h i (j) @ @ @); +// quote_token_with_context!(_s h i j @ @ @ @); +// quote_token_with_context!(_s i j @ @ @ @ @); +// quote_token_with_context!(_s j @ @ @ @ @ @); +// +// Without having used muncher-style recursion, we get one invocation of +// quote_token_with_context for each original tt, with three tts of context on +// either side. This is enough for the longest possible interpolation form (a +// repetition with separator, as in `# (#var) , *`) to be fully represented with +// the first or last tt in the middle. +// +// The middle tt (surrounded by parentheses) is the tt being processed. +// +// - When it is a `#`, quote_token_with_context can do an interpolation. The +// interpolation kind will depend on the three subsequent tts. +// +// - When it is within a later part of an interpolation, it can be ignored +// because the interpolation has already been done. +// +// - When it is not part of an interpolation it can be pushed as a single +// token into the output. +// +// - When the middle token is an unparenthesized `@`, that call is one of the +// first 3 or last 3 calls of quote_token_with_context and does not +// correspond to one of the original input tokens, so turns into nothing. +#[macro_export] +#[doc(hidden)] +macro_rules! quote_each_token { + ($tokens:ident $($tts:tt)*) => { + $crate::quote_tokens_with_context!{$tokens + (@ @ @ @ @ @ $($tts)*) + (@ @ @ @ @ $($tts)* @) + (@ @ @ @ $($tts)* @ @) + (@ @ @ $(($tts))* @ @ @) + (@ @ $($tts)* @ @ @ @) + (@ $($tts)* @ @ @ @ @) + ($($tts)* @ @ @ @ @ @) + } + }; +} + +// See the explanation on quote_each_token. +#[macro_export] +#[doc(hidden)] +macro_rules! quote_each_token_spanned { + ($tokens:ident $span:ident $($tts:tt)*) => { + $crate::quote_tokens_with_context_spanned!{$tokens $span + (@ @ @ @ @ @ $($tts)*) + (@ @ @ @ @ $($tts)* @) + (@ @ @ @ $($tts)* @ @) + (@ @ @ $(($tts))* @ @ @) + (@ @ $($tts)* @ @ @ @) + (@ $($tts)* @ @ @ @ @) + ($($tts)* @ @ @ @ @ @) + } + }; +} + +// See the explanation on quote_each_token. +#[macro_export] +#[doc(hidden)] +macro_rules! quote_tokens_with_context { + ($tokens:ident + ($($b3:tt)*) ($($b2:tt)*) ($($b1:tt)*) + ($($curr:tt)*) + ($($a1:tt)*) ($($a2:tt)*) ($($a3:tt)*) + ) => { + $( + $crate::quote_token_with_context!{$tokens $b3 $b2 $b1 $curr $a1 $a2 $a3} + )* + }; +} + +// See the explanation on quote_each_token. +#[macro_export] +#[doc(hidden)] +macro_rules! quote_tokens_with_context_spanned { + ($tokens:ident $span:ident + ($($b3:tt)*) ($($b2:tt)*) ($($b1:tt)*) + ($($curr:tt)*) + ($($a1:tt)*) ($($a2:tt)*) ($($a3:tt)*) + ) => { + $( + $crate::quote_token_with_context_spanned!{$tokens $span $b3 $b2 $b1 $curr $a1 $a2 $a3} + )* + }; +} + +// See the explanation on quote_each_token. +#[macro_export] +#[doc(hidden)] +macro_rules! quote_token_with_context { + // Unparenthesized `@` indicates this call does not correspond to one of the + // original input tokens. Ignore it. + ($tokens:ident $b3:tt $b2:tt $b1:tt @ $a1:tt $a2:tt $a3:tt) => {}; + + // A repetition with no separator. + ($tokens:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) * $a3:tt) => {{ + use $crate::__private::ext::*; + let has_iter = $crate::__private::HasIterator::; + $crate::pounded_var_names!{quote_bind_into_iter!(has_iter) () $($inner)*} + <_ as $crate::__private::CheckHasIterator>::check(has_iter); + // This is `while true` instead of `loop` because if there are no + // iterators used inside of this repetition then the body would not + // contain any `break`, so the compiler would emit unreachable code + // warnings on anything below the loop. We use has_iter to detect and + // fail to compile when there are no iterators, so here we just work + // around the unneeded extra warning. + while true { + $crate::pounded_var_names!{quote_bind_next_or_break!() () $($inner)*} + $crate::quote_each_token!{$tokens $($inner)*} + } + }}; + // ... and one step later. + ($tokens:ident $b3:tt $b2:tt # (( $($inner:tt)* )) * $a2:tt $a3:tt) => {}; + // ... and one step later. + ($tokens:ident $b3:tt # ( $($inner:tt)* ) (*) $a1:tt $a2:tt $a3:tt) => {}; + + // A repetition with separator. + ($tokens:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) $sep:tt *) => {{ + use $crate::__private::ext::*; + let mut _i = 0usize; + let has_iter = $crate::__private::HasIterator::; + $crate::pounded_var_names!{quote_bind_into_iter!(has_iter) () $($inner)*} + <_ as $crate::__private::CheckHasIterator>::check(has_iter); + while true { + $crate::pounded_var_names!{quote_bind_next_or_break!() () $($inner)*} + if _i > 0 { + $crate::quote_token!{$sep $tokens} + } + _i += 1; + $crate::quote_each_token!{$tokens $($inner)*} + } + }}; + // ... and one step later. + ($tokens:ident $b3:tt $b2:tt # (( $($inner:tt)* )) $sep:tt * $a3:tt) => {}; + // ... and one step later. + ($tokens:ident $b3:tt # ( $($inner:tt)* ) ($sep:tt) * $a2:tt $a3:tt) => {}; + // (A special case for `#(var)**`, where the first `*` is treated as the + // repetition symbol and the second `*` is treated as an ordinary token.) + ($tokens:ident # ( $($inner:tt)* ) * (*) $a1:tt $a2:tt $a3:tt) => { + // https://github.com/dtolnay/quote/issues/130 + $crate::quote_token!{* $tokens} + }; + // ... and one step later. + ($tokens:ident # ( $($inner:tt)* ) $sep:tt (*) $a1:tt $a2:tt $a3:tt) => {}; + + // A non-repetition interpolation. + ($tokens:ident $b3:tt $b2:tt $b1:tt (#) $var:ident $a2:tt $a3:tt) => { + $crate::ToTokens::to_tokens(&$var, &mut $tokens); + }; + // ... and one step later. + ($tokens:ident $b3:tt $b2:tt # ($var:ident) $a1:tt $a2:tt $a3:tt) => {}; + + // An ordinary token, not part of any interpolation. + ($tokens:ident $b3:tt $b2:tt $b1:tt ($curr:tt) $a1:tt $a2:tt $a3:tt) => { + $crate::quote_token!{$curr $tokens} + }; +} + +// See the explanation on quote_each_token, and on the individual rules of +// quote_token_with_context. +#[macro_export] +#[doc(hidden)] +macro_rules! quote_token_with_context_spanned { + ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt @ $a1:tt $a2:tt $a3:tt) => {}; + + ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) * $a3:tt) => {{ + use $crate::__private::ext::*; + let has_iter = $crate::__private::HasIterator::; + $crate::pounded_var_names!{quote_bind_into_iter!(has_iter) () $($inner)*} + <_ as $crate::__private::CheckHasIterator>::check(has_iter); + while true { + $crate::pounded_var_names!{quote_bind_next_or_break!() () $($inner)*} + $crate::quote_each_token_spanned!{$tokens $span $($inner)*} + } + }}; + ($tokens:ident $span:ident $b3:tt $b2:tt # (( $($inner:tt)* )) * $a2:tt $a3:tt) => {}; + ($tokens:ident $span:ident $b3:tt # ( $($inner:tt)* ) (*) $a1:tt $a2:tt $a3:tt) => {}; + + ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) $sep:tt *) => {{ + use $crate::__private::ext::*; + let mut _i = 0usize; + let has_iter = $crate::__private::HasIterator::; + $crate::pounded_var_names!{quote_bind_into_iter!(has_iter) () $($inner)*} + <_ as $crate::__private::CheckHasIterator>::check(has_iter); + while true { + $crate::pounded_var_names!{quote_bind_next_or_break!() () $($inner)*} + if _i > 0 { + $crate::quote_token_spanned!{$sep $tokens $span} + } + _i += 1; + $crate::quote_each_token_spanned!{$tokens $span $($inner)*} + } + }}; + ($tokens:ident $span:ident $b3:tt $b2:tt # (( $($inner:tt)* )) $sep:tt * $a3:tt) => {}; + ($tokens:ident $span:ident $b3:tt # ( $($inner:tt)* ) ($sep:tt) * $a2:tt $a3:tt) => {}; + ($tokens:ident $span:ident # ( $($inner:tt)* ) * (*) $a1:tt $a2:tt $a3:tt) => { + // https://github.com/dtolnay/quote/issues/130 + $crate::quote_token_spanned!{* $tokens $span} + }; + ($tokens:ident $span:ident # ( $($inner:tt)* ) $sep:tt (*) $a1:tt $a2:tt $a3:tt) => {}; + + ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) $var:ident $a2:tt $a3:tt) => { + $crate::ToTokens::to_tokens(&$var, &mut $tokens); + }; + ($tokens:ident $span:ident $b3:tt $b2:tt # ($var:ident) $a1:tt $a2:tt $a3:tt) => {}; + + ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt ($curr:tt) $a1:tt $a2:tt $a3:tt) => { + $crate::quote_token_spanned!{$curr $tokens $span} + }; +} + +// These rules are ordered by approximate token frequency, at least for the +// first 10 or so, to improve compile times. Having `ident` first is by far the +// most important because it's typically 2-3x more common than the next most +// common token. +// +// Separately, we put the token being matched in the very front so that failing +// rules may fail to match as quickly as possible. +#[macro_export] +#[doc(hidden)] +macro_rules! quote_token { + ($ident:ident $tokens:ident) => { + $crate::__private::push_ident(&mut $tokens, stringify!($ident)); + }; + + (:: $tokens:ident) => { + $crate::__private::push_colon2(&mut $tokens); + }; + + (( $($inner:tt)* ) $tokens:ident) => { + $crate::__private::push_group( + &mut $tokens, + $crate::__private::Delimiter::Parenthesis, + $crate::quote!($($inner)*), + ); + }; + + ([ $($inner:tt)* ] $tokens:ident) => { + $crate::__private::push_group( + &mut $tokens, + $crate::__private::Delimiter::Bracket, + $crate::quote!($($inner)*), + ); + }; + + ({ $($inner:tt)* } $tokens:ident) => { + $crate::__private::push_group( + &mut $tokens, + $crate::__private::Delimiter::Brace, + $crate::quote!($($inner)*), + ); + }; + + (# $tokens:ident) => { + $crate::__private::push_pound(&mut $tokens); + }; + + (, $tokens:ident) => { + $crate::__private::push_comma(&mut $tokens); + }; + + (. $tokens:ident) => { + $crate::__private::push_dot(&mut $tokens); + }; + + (; $tokens:ident) => { + $crate::__private::push_semi(&mut $tokens); + }; + + (: $tokens:ident) => { + $crate::__private::push_colon(&mut $tokens); + }; + + (+ $tokens:ident) => { + $crate::__private::push_add(&mut $tokens); + }; + + (+= $tokens:ident) => { + $crate::__private::push_add_eq(&mut $tokens); + }; + + (& $tokens:ident) => { + $crate::__private::push_and(&mut $tokens); + }; + + (&& $tokens:ident) => { + $crate::__private::push_and_and(&mut $tokens); + }; + + (&= $tokens:ident) => { + $crate::__private::push_and_eq(&mut $tokens); + }; + + (@ $tokens:ident) => { + $crate::__private::push_at(&mut $tokens); + }; + + (! $tokens:ident) => { + $crate::__private::push_bang(&mut $tokens); + }; + + (^ $tokens:ident) => { + $crate::__private::push_caret(&mut $tokens); + }; + + (^= $tokens:ident) => { + $crate::__private::push_caret_eq(&mut $tokens); + }; + + (/ $tokens:ident) => { + $crate::__private::push_div(&mut $tokens); + }; + + (/= $tokens:ident) => { + $crate::__private::push_div_eq(&mut $tokens); + }; + + (.. $tokens:ident) => { + $crate::__private::push_dot2(&mut $tokens); + }; + + (... $tokens:ident) => { + $crate::__private::push_dot3(&mut $tokens); + }; + + (..= $tokens:ident) => { + $crate::__private::push_dot_dot_eq(&mut $tokens); + }; + + (= $tokens:ident) => { + $crate::__private::push_eq(&mut $tokens); + }; + + (== $tokens:ident) => { + $crate::__private::push_eq_eq(&mut $tokens); + }; + + (>= $tokens:ident) => { + $crate::__private::push_ge(&mut $tokens); + }; + + (> $tokens:ident) => { + $crate::__private::push_gt(&mut $tokens); + }; + + (<= $tokens:ident) => { + $crate::__private::push_le(&mut $tokens); + }; + + (< $tokens:ident) => { + $crate::__private::push_lt(&mut $tokens); + }; + + (*= $tokens:ident) => { + $crate::__private::push_mul_eq(&mut $tokens); + }; + + (!= $tokens:ident) => { + $crate::__private::push_ne(&mut $tokens); + }; + + (| $tokens:ident) => { + $crate::__private::push_or(&mut $tokens); + }; + + (|= $tokens:ident) => { + $crate::__private::push_or_eq(&mut $tokens); + }; + + (|| $tokens:ident) => { + $crate::__private::push_or_or(&mut $tokens); + }; + + (? $tokens:ident) => { + $crate::__private::push_question(&mut $tokens); + }; + + (-> $tokens:ident) => { + $crate::__private::push_rarrow(&mut $tokens); + }; + + (<- $tokens:ident) => { + $crate::__private::push_larrow(&mut $tokens); + }; + + (% $tokens:ident) => { + $crate::__private::push_rem(&mut $tokens); + }; + + (%= $tokens:ident) => { + $crate::__private::push_rem_eq(&mut $tokens); + }; + + (=> $tokens:ident) => { + $crate::__private::push_fat_arrow(&mut $tokens); + }; + + (<< $tokens:ident) => { + $crate::__private::push_shl(&mut $tokens); + }; + + (<<= $tokens:ident) => { + $crate::__private::push_shl_eq(&mut $tokens); + }; + + (>> $tokens:ident) => { + $crate::__private::push_shr(&mut $tokens); + }; + + (>>= $tokens:ident) => { + $crate::__private::push_shr_eq(&mut $tokens); + }; + + (* $tokens:ident) => { + $crate::__private::push_star(&mut $tokens); + }; + + (- $tokens:ident) => { + $crate::__private::push_sub(&mut $tokens); + }; + + (-= $tokens:ident) => { + $crate::__private::push_sub_eq(&mut $tokens); + }; + + ($lifetime:lifetime $tokens:ident) => { + $crate::__private::push_lifetime(&mut $tokens, stringify!($lifetime)); + }; + + (_ $tokens:ident) => { + $crate::__private::push_underscore(&mut $tokens); + }; + + ($other:tt $tokens:ident) => { + $crate::__private::parse(&mut $tokens, stringify!($other)); + }; +} + +// See the comment above `quote_token!` about the rule ordering. +#[macro_export] +#[doc(hidden)] +macro_rules! quote_token_spanned { + ($ident:ident $tokens:ident $span:ident) => { + $crate::__private::push_ident_spanned(&mut $tokens, $span, stringify!($ident)); + }; + + (:: $tokens:ident $span:ident) => { + $crate::__private::push_colon2_spanned(&mut $tokens, $span); + }; + + (( $($inner:tt)* ) $tokens:ident $span:ident) => { + $crate::__private::push_group_spanned( + &mut $tokens, + $span, + $crate::__private::Delimiter::Parenthesis, + $crate::quote_spanned!($span=> $($inner)*), + ); + }; + + ([ $($inner:tt)* ] $tokens:ident $span:ident) => { + $crate::__private::push_group_spanned( + &mut $tokens, + $span, + $crate::__private::Delimiter::Bracket, + $crate::quote_spanned!($span=> $($inner)*), + ); + }; + + ({ $($inner:tt)* } $tokens:ident $span:ident) => { + $crate::__private::push_group_spanned( + &mut $tokens, + $span, + $crate::__private::Delimiter::Brace, + $crate::quote_spanned!($span=> $($inner)*), + ); + }; + + (# $tokens:ident $span:ident) => { + $crate::__private::push_pound_spanned(&mut $tokens, $span); + }; + + (, $tokens:ident $span:ident) => { + $crate::__private::push_comma_spanned(&mut $tokens, $span); + }; + + (. $tokens:ident $span:ident) => { + $crate::__private::push_dot_spanned(&mut $tokens, $span); + }; + + (; $tokens:ident $span:ident) => { + $crate::__private::push_semi_spanned(&mut $tokens, $span); + }; + + (: $tokens:ident $span:ident) => { + $crate::__private::push_colon_spanned(&mut $tokens, $span); + }; + + (+ $tokens:ident $span:ident) => { + $crate::__private::push_add_spanned(&mut $tokens, $span); + }; + + (+= $tokens:ident $span:ident) => { + $crate::__private::push_add_eq_spanned(&mut $tokens, $span); + }; + + (& $tokens:ident $span:ident) => { + $crate::__private::push_and_spanned(&mut $tokens, $span); + }; + + (&& $tokens:ident $span:ident) => { + $crate::__private::push_and_and_spanned(&mut $tokens, $span); + }; + + (&= $tokens:ident $span:ident) => { + $crate::__private::push_and_eq_spanned(&mut $tokens, $span); + }; + + (@ $tokens:ident $span:ident) => { + $crate::__private::push_at_spanned(&mut $tokens, $span); + }; + + (! $tokens:ident $span:ident) => { + $crate::__private::push_bang_spanned(&mut $tokens, $span); + }; + + (^ $tokens:ident $span:ident) => { + $crate::__private::push_caret_spanned(&mut $tokens, $span); + }; + + (^= $tokens:ident $span:ident) => { + $crate::__private::push_caret_eq_spanned(&mut $tokens, $span); + }; + + (/ $tokens:ident $span:ident) => { + $crate::__private::push_div_spanned(&mut $tokens, $span); + }; + + (/= $tokens:ident $span:ident) => { + $crate::__private::push_div_eq_spanned(&mut $tokens, $span); + }; + + (.. $tokens:ident $span:ident) => { + $crate::__private::push_dot2_spanned(&mut $tokens, $span); + }; + + (... $tokens:ident $span:ident) => { + $crate::__private::push_dot3_spanned(&mut $tokens, $span); + }; + + (..= $tokens:ident $span:ident) => { + $crate::__private::push_dot_dot_eq_spanned(&mut $tokens, $span); + }; + + (= $tokens:ident $span:ident) => { + $crate::__private::push_eq_spanned(&mut $tokens, $span); + }; + + (== $tokens:ident $span:ident) => { + $crate::__private::push_eq_eq_spanned(&mut $tokens, $span); + }; + + (>= $tokens:ident $span:ident) => { + $crate::__private::push_ge_spanned(&mut $tokens, $span); + }; + + (> $tokens:ident $span:ident) => { + $crate::__private::push_gt_spanned(&mut $tokens, $span); + }; + + (<= $tokens:ident $span:ident) => { + $crate::__private::push_le_spanned(&mut $tokens, $span); + }; + + (< $tokens:ident $span:ident) => { + $crate::__private::push_lt_spanned(&mut $tokens, $span); + }; + + (*= $tokens:ident $span:ident) => { + $crate::__private::push_mul_eq_spanned(&mut $tokens, $span); + }; + + (!= $tokens:ident $span:ident) => { + $crate::__private::push_ne_spanned(&mut $tokens, $span); + }; + + (| $tokens:ident $span:ident) => { + $crate::__private::push_or_spanned(&mut $tokens, $span); + }; + + (|= $tokens:ident $span:ident) => { + $crate::__private::push_or_eq_spanned(&mut $tokens, $span); + }; + + (|| $tokens:ident $span:ident) => { + $crate::__private::push_or_or_spanned(&mut $tokens, $span); + }; + + (? $tokens:ident $span:ident) => { + $crate::__private::push_question_spanned(&mut $tokens, $span); + }; + + (-> $tokens:ident $span:ident) => { + $crate::__private::push_rarrow_spanned(&mut $tokens, $span); + }; + + (<- $tokens:ident $span:ident) => { + $crate::__private::push_larrow_spanned(&mut $tokens, $span); + }; + + (% $tokens:ident $span:ident) => { + $crate::__private::push_rem_spanned(&mut $tokens, $span); + }; + + (%= $tokens:ident $span:ident) => { + $crate::__private::push_rem_eq_spanned(&mut $tokens, $span); + }; + + (=> $tokens:ident $span:ident) => { + $crate::__private::push_fat_arrow_spanned(&mut $tokens, $span); + }; + + (<< $tokens:ident $span:ident) => { + $crate::__private::push_shl_spanned(&mut $tokens, $span); + }; + + (<<= $tokens:ident $span:ident) => { + $crate::__private::push_shl_eq_spanned(&mut $tokens, $span); + }; + + (>> $tokens:ident $span:ident) => { + $crate::__private::push_shr_spanned(&mut $tokens, $span); + }; + + (>>= $tokens:ident $span:ident) => { + $crate::__private::push_shr_eq_spanned(&mut $tokens, $span); + }; + + (* $tokens:ident $span:ident) => { + $crate::__private::push_star_spanned(&mut $tokens, $span); + }; + + (- $tokens:ident $span:ident) => { + $crate::__private::push_sub_spanned(&mut $tokens, $span); + }; + + (-= $tokens:ident $span:ident) => { + $crate::__private::push_sub_eq_spanned(&mut $tokens, $span); + }; + + ($lifetime:lifetime $tokens:ident $span:ident) => { + $crate::__private::push_lifetime_spanned(&mut $tokens, $span, stringify!($lifetime)); + }; + + (_ $tokens:ident $span:ident) => { + $crate::__private::push_underscore_spanned(&mut $tokens, $span); + }; + + ($other:tt $tokens:ident $span:ident) => { + $crate::__private::parse_spanned(&mut $tokens, $span, stringify!($other)); + }; +} diff --git a/vendor/quote/src/runtime.rs b/vendor/quote/src/runtime.rs new file mode 100644 index 00000000000000..28fb60c7a5fca9 --- /dev/null +++ b/vendor/quote/src/runtime.rs @@ -0,0 +1,503 @@ +use self::get_span::{GetSpan, GetSpanBase, GetSpanInner}; +use crate::{IdentFragment, ToTokens, TokenStreamExt}; +use core::fmt; +use core::iter; +use core::ops::BitOr; +use proc_macro2::{Group, Ident, Punct, Spacing, TokenTree}; + +#[doc(hidden)] +pub use alloc::format; +#[doc(hidden)] +pub use core::option::Option; + +#[doc(hidden)] +pub type Delimiter = proc_macro2::Delimiter; +#[doc(hidden)] +pub type Span = proc_macro2::Span; +#[doc(hidden)] +pub type TokenStream = proc_macro2::TokenStream; + +#[doc(hidden)] +pub struct HasIterator; + +impl BitOr> for HasIterator { + type Output = HasIterator; + fn bitor(self, _rhs: HasIterator) -> HasIterator { + HasIterator:: + } +} + +impl BitOr> for HasIterator { + type Output = HasIterator; + fn bitor(self, _rhs: HasIterator) -> HasIterator { + HasIterator:: + } +} + +impl BitOr> for HasIterator { + type Output = HasIterator; + fn bitor(self, _rhs: HasIterator) -> HasIterator { + HasIterator:: + } +} + +impl BitOr> for HasIterator { + type Output = HasIterator; + fn bitor(self, _rhs: HasIterator) -> HasIterator { + HasIterator:: + } +} + +#[doc(hidden)] +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "repetition contains no interpolated value that is an iterator", + label = "none of the values interpolated inside this repetition are iterable" + ) +)] +pub trait CheckHasIterator: Sized { + fn check(self) {} +} + +impl CheckHasIterator for HasIterator {} + +/// Extension traits used by the implementation of `quote!`. These are defined +/// in separate traits, rather than as a single trait due to ambiguity issues. +/// +/// These traits expose a `quote_into_iter` method which should allow calling +/// whichever impl happens to be applicable. Calling that method repeatedly on +/// the returned value should be idempotent. +#[doc(hidden)] +pub mod ext { + use super::{HasIterator, RepInterp}; + use crate::ToTokens; + use alloc::collections::btree_set::{self, BTreeSet}; + use core::slice; + + /// Extension trait providing the `quote_into_iter` method on iterators. + #[doc(hidden)] + pub trait RepIteratorExt: Iterator + Sized { + fn quote_into_iter(self) -> (Self, HasIterator) { + (self, HasIterator::) + } + } + + impl RepIteratorExt for T {} + + /// Extension trait providing the `quote_into_iter` method for + /// non-iterable types. These types interpolate the same value in each + /// iteration of the repetition. + #[doc(hidden)] + pub trait RepToTokensExt { + /// Pretend to be an iterator for the purposes of `quote_into_iter`. + /// This allows repeated calls to `quote_into_iter` to continue + /// correctly returning HasIterator. + fn next(&self) -> Option<&Self> { + Some(self) + } + + fn quote_into_iter(&self) -> (&Self, HasIterator) { + (self, HasIterator::) + } + } + + impl RepToTokensExt for T {} + + /// Extension trait providing the `quote_into_iter` method for types that + /// can be referenced as an iterator. + #[doc(hidden)] + pub trait RepAsIteratorExt<'q> { + type Iter: Iterator; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator); + } + + impl<'q, T: RepAsIteratorExt<'q> + ?Sized> RepAsIteratorExt<'q> for &T { + type Iter = T::Iter; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator) { + ::quote_into_iter(*self) + } + } + + impl<'q, T: RepAsIteratorExt<'q> + ?Sized> RepAsIteratorExt<'q> for &mut T { + type Iter = T::Iter; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator) { + ::quote_into_iter(*self) + } + } + + impl<'q, T: 'q> RepAsIteratorExt<'q> for [T] { + type Iter = slice::Iter<'q, T>; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator) { + (self.iter(), HasIterator::) + } + } + + impl<'q, T: 'q, const N: usize> RepAsIteratorExt<'q> for [T; N] { + type Iter = slice::Iter<'q, T>; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator) { + (self.iter(), HasIterator::) + } + } + + impl<'q, T: 'q> RepAsIteratorExt<'q> for Vec { + type Iter = slice::Iter<'q, T>; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator) { + (self.iter(), HasIterator::) + } + } + + impl<'q, T: 'q> RepAsIteratorExt<'q> for BTreeSet { + type Iter = btree_set::Iter<'q, T>; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator) { + (self.iter(), HasIterator::) + } + } + + impl<'q, T: RepAsIteratorExt<'q>> RepAsIteratorExt<'q> for RepInterp { + type Iter = T::Iter; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator) { + self.0.quote_into_iter() + } + } +} + +// Helper type used within interpolations to allow for repeated binding names. +// Implements the relevant traits, and exports a dummy `next()` method. +#[derive(Copy, Clone)] +#[doc(hidden)] +pub struct RepInterp(pub T); + +impl RepInterp { + // This method is intended to look like `Iterator::next`, and is called when + // a name is bound multiple times, as the previous binding will shadow the + // original `Iterator` object. This allows us to avoid advancing the + // iterator multiple times per iteration. + pub fn next(self) -> Option { + Some(self.0) + } +} + +impl Iterator for RepInterp { + type Item = T::Item; + + fn next(&mut self) -> Option { + self.0.next() + } +} + +impl ToTokens for RepInterp { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.0.to_tokens(tokens); + } +} + +#[doc(hidden)] +#[inline] +pub fn get_span(span: T) -> GetSpan { + GetSpan(GetSpanInner(GetSpanBase(span))) +} + +mod get_span { + use core::ops::Deref; + use proc_macro2::extra::DelimSpan; + use proc_macro2::Span; + + pub struct GetSpan(pub(crate) GetSpanInner); + + pub struct GetSpanInner(pub(crate) GetSpanBase); + + pub struct GetSpanBase(pub(crate) T); + + impl GetSpan { + #[inline] + pub fn __into_span(self) -> Span { + ((self.0).0).0 + } + } + + impl GetSpanInner { + #[inline] + pub fn __into_span(&self) -> Span { + (self.0).0.join() + } + } + + impl GetSpanBase { + #[allow(clippy::unused_self)] + pub fn __into_span(&self) -> T { + unreachable!() + } + } + + impl Deref for GetSpan { + type Target = GetSpanInner; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + impl Deref for GetSpanInner { + type Target = GetSpanBase; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } + } +} + +#[doc(hidden)] +pub fn push_group(tokens: &mut TokenStream, delimiter: Delimiter, inner: TokenStream) { + tokens.append(Group::new(delimiter, inner)); +} + +#[doc(hidden)] +pub fn push_group_spanned( + tokens: &mut TokenStream, + span: Span, + delimiter: Delimiter, + inner: TokenStream, +) { + let mut g = Group::new(delimiter, inner); + g.set_span(span); + tokens.append(g); +} + +#[doc(hidden)] +pub fn parse(tokens: &mut TokenStream, s: &str) { + let s: TokenStream = s.parse().expect("invalid token stream"); + tokens.extend(iter::once(s)); +} + +#[doc(hidden)] +pub fn parse_spanned(tokens: &mut TokenStream, span: Span, s: &str) { + let s: TokenStream = s.parse().expect("invalid token stream"); + for token in s { + tokens.append(respan_token_tree(token, span)); + } +} + +// Token tree with every span replaced by the given one. +fn respan_token_tree(mut token: TokenTree, span: Span) -> TokenTree { + match &mut token { + TokenTree::Group(g) => { + let mut tokens = TokenStream::new(); + for token in g.stream() { + tokens.append(respan_token_tree(token, span)); + } + *g = Group::new(g.delimiter(), tokens); + g.set_span(span); + } + other => other.set_span(span), + } + token +} + +#[doc(hidden)] +pub fn push_ident(tokens: &mut TokenStream, s: &str) { + let span = Span::call_site(); + push_ident_spanned(tokens, span, s); +} + +#[doc(hidden)] +pub fn push_ident_spanned(tokens: &mut TokenStream, span: Span, s: &str) { + tokens.append(ident_maybe_raw(s, span)); +} + +#[doc(hidden)] +pub fn push_lifetime(tokens: &mut TokenStream, lifetime: &str) { + tokens.append(TokenTree::Punct(Punct::new('\'', Spacing::Joint))); + tokens.append(TokenTree::Ident(Ident::new( + &lifetime[1..], + Span::call_site(), + ))); +} + +#[doc(hidden)] +pub fn push_lifetime_spanned(tokens: &mut TokenStream, span: Span, lifetime: &str) { + tokens.append(TokenTree::Punct({ + let mut apostrophe = Punct::new('\'', Spacing::Joint); + apostrophe.set_span(span); + apostrophe + })); + tokens.append(TokenTree::Ident(Ident::new(&lifetime[1..], span))); +} + +macro_rules! push_punct { + ($name:ident $spanned:ident $char1:tt) => { + #[doc(hidden)] + pub fn $name(tokens: &mut TokenStream) { + tokens.append(Punct::new($char1, Spacing::Alone)); + } + #[doc(hidden)] + pub fn $spanned(tokens: &mut TokenStream, span: Span) { + let mut punct = Punct::new($char1, Spacing::Alone); + punct.set_span(span); + tokens.append(punct); + } + }; + ($name:ident $spanned:ident $char1:tt $char2:tt) => { + #[doc(hidden)] + pub fn $name(tokens: &mut TokenStream) { + tokens.append(Punct::new($char1, Spacing::Joint)); + tokens.append(Punct::new($char2, Spacing::Alone)); + } + #[doc(hidden)] + pub fn $spanned(tokens: &mut TokenStream, span: Span) { + let mut punct = Punct::new($char1, Spacing::Joint); + punct.set_span(span); + tokens.append(punct); + let mut punct = Punct::new($char2, Spacing::Alone); + punct.set_span(span); + tokens.append(punct); + } + }; + ($name:ident $spanned:ident $char1:tt $char2:tt $char3:tt) => { + #[doc(hidden)] + pub fn $name(tokens: &mut TokenStream) { + tokens.append(Punct::new($char1, Spacing::Joint)); + tokens.append(Punct::new($char2, Spacing::Joint)); + tokens.append(Punct::new($char3, Spacing::Alone)); + } + #[doc(hidden)] + pub fn $spanned(tokens: &mut TokenStream, span: Span) { + let mut punct = Punct::new($char1, Spacing::Joint); + punct.set_span(span); + tokens.append(punct); + let mut punct = Punct::new($char2, Spacing::Joint); + punct.set_span(span); + tokens.append(punct); + let mut punct = Punct::new($char3, Spacing::Alone); + punct.set_span(span); + tokens.append(punct); + } + }; +} + +push_punct!(push_add push_add_spanned '+'); +push_punct!(push_add_eq push_add_eq_spanned '+' '='); +push_punct!(push_and push_and_spanned '&'); +push_punct!(push_and_and push_and_and_spanned '&' '&'); +push_punct!(push_and_eq push_and_eq_spanned '&' '='); +push_punct!(push_at push_at_spanned '@'); +push_punct!(push_bang push_bang_spanned '!'); +push_punct!(push_caret push_caret_spanned '^'); +push_punct!(push_caret_eq push_caret_eq_spanned '^' '='); +push_punct!(push_colon push_colon_spanned ':'); +push_punct!(push_colon2 push_colon2_spanned ':' ':'); +push_punct!(push_comma push_comma_spanned ','); +push_punct!(push_div push_div_spanned '/'); +push_punct!(push_div_eq push_div_eq_spanned '/' '='); +push_punct!(push_dot push_dot_spanned '.'); +push_punct!(push_dot2 push_dot2_spanned '.' '.'); +push_punct!(push_dot3 push_dot3_spanned '.' '.' '.'); +push_punct!(push_dot_dot_eq push_dot_dot_eq_spanned '.' '.' '='); +push_punct!(push_eq push_eq_spanned '='); +push_punct!(push_eq_eq push_eq_eq_spanned '=' '='); +push_punct!(push_ge push_ge_spanned '>' '='); +push_punct!(push_gt push_gt_spanned '>'); +push_punct!(push_le push_le_spanned '<' '='); +push_punct!(push_lt push_lt_spanned '<'); +push_punct!(push_mul_eq push_mul_eq_spanned '*' '='); +push_punct!(push_ne push_ne_spanned '!' '='); +push_punct!(push_or push_or_spanned '|'); +push_punct!(push_or_eq push_or_eq_spanned '|' '='); +push_punct!(push_or_or push_or_or_spanned '|' '|'); +push_punct!(push_pound push_pound_spanned '#'); +push_punct!(push_question push_question_spanned '?'); +push_punct!(push_rarrow push_rarrow_spanned '-' '>'); +push_punct!(push_larrow push_larrow_spanned '<' '-'); +push_punct!(push_rem push_rem_spanned '%'); +push_punct!(push_rem_eq push_rem_eq_spanned '%' '='); +push_punct!(push_fat_arrow push_fat_arrow_spanned '=' '>'); +push_punct!(push_semi push_semi_spanned ';'); +push_punct!(push_shl push_shl_spanned '<' '<'); +push_punct!(push_shl_eq push_shl_eq_spanned '<' '<' '='); +push_punct!(push_shr push_shr_spanned '>' '>'); +push_punct!(push_shr_eq push_shr_eq_spanned '>' '>' '='); +push_punct!(push_star push_star_spanned '*'); +push_punct!(push_sub push_sub_spanned '-'); +push_punct!(push_sub_eq push_sub_eq_spanned '-' '='); + +#[doc(hidden)] +pub fn push_underscore(tokens: &mut TokenStream) { + push_underscore_spanned(tokens, Span::call_site()); +} + +#[doc(hidden)] +pub fn push_underscore_spanned(tokens: &mut TokenStream, span: Span) { + tokens.append(Ident::new("_", span)); +} + +// Helper method for constructing identifiers from the `format_ident!` macro, +// handling `r#` prefixes. +#[doc(hidden)] +pub fn mk_ident(id: &str, span: Option) -> Ident { + let span = span.unwrap_or_else(Span::call_site); + ident_maybe_raw(id, span) +} + +fn ident_maybe_raw(id: &str, span: Span) -> Ident { + if let Some(id) = id.strip_prefix("r#") { + Ident::new_raw(id, span) + } else { + Ident::new(id, span) + } +} + +// Adapts from `IdentFragment` to `fmt::Display` for use by the `format_ident!` +// macro, and exposes span information from these fragments. +// +// This struct also has forwarding implementations of the formatting traits +// `Octal`, `LowerHex`, `UpperHex`, and `Binary` to allow for their use within +// `format_ident!`. +#[derive(Copy, Clone)] +#[doc(hidden)] +pub struct IdentFragmentAdapter(pub T); + +impl IdentFragmentAdapter { + pub fn span(&self) -> Option { + self.0.span() + } +} + +impl fmt::Display for IdentFragmentAdapter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + IdentFragment::fmt(&self.0, f) + } +} + +impl fmt::Octal for IdentFragmentAdapter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Octal::fmt(&self.0, f) + } +} + +impl fmt::LowerHex for IdentFragmentAdapter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::LowerHex::fmt(&self.0, f) + } +} + +impl fmt::UpperHex for IdentFragmentAdapter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::UpperHex::fmt(&self.0, f) + } +} + +impl fmt::Binary for IdentFragmentAdapter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Binary::fmt(&self.0, f) + } +} diff --git a/vendor/quote/src/spanned.rs b/vendor/quote/src/spanned.rs new file mode 100644 index 00000000000000..6afc6b30355977 --- /dev/null +++ b/vendor/quote/src/spanned.rs @@ -0,0 +1,49 @@ +use crate::ToTokens; +use proc_macro2::extra::DelimSpan; +use proc_macro2::{Span, TokenStream}; + +// Not public API other than via the syn crate. Use syn::spanned::Spanned. +pub trait Spanned: private::Sealed { + fn __span(&self) -> Span; +} + +impl Spanned for Span { + fn __span(&self) -> Span { + *self + } +} + +impl Spanned for DelimSpan { + fn __span(&self) -> Span { + self.join() + } +} + +impl Spanned for T { + fn __span(&self) -> Span { + join_spans(self.into_token_stream()) + } +} + +fn join_spans(tokens: TokenStream) -> Span { + let mut iter = tokens.into_iter().map(|tt| tt.span()); + + let Some(first) = iter.next() else { + return Span::call_site(); + }; + + iter.fold(None, |_prev, next| Some(next)) + .and_then(|last| first.join(last)) + .unwrap_or(first) +} + +mod private { + use crate::ToTokens; + use proc_macro2::extra::DelimSpan; + use proc_macro2::Span; + + pub trait Sealed {} + impl Sealed for Span {} + impl Sealed for DelimSpan {} + impl Sealed for T {} +} diff --git a/vendor/quote/src/to_tokens.rs b/vendor/quote/src/to_tokens.rs new file mode 100644 index 00000000000000..f373092b650fcb --- /dev/null +++ b/vendor/quote/src/to_tokens.rs @@ -0,0 +1,271 @@ +use super::TokenStreamExt; +use alloc::borrow::Cow; +use alloc::rc::Rc; +use core::iter; +use proc_macro2::{Group, Ident, Literal, Punct, Span, TokenStream, TokenTree}; +use std::ffi::{CStr, CString}; + +/// Types that can be interpolated inside a `quote!` invocation. +pub trait ToTokens { + /// Write `self` to the given `TokenStream`. + /// + /// The token append methods provided by the [`TokenStreamExt`] extension + /// trait may be useful for implementing `ToTokens`. + /// + /// # Example + /// + /// Example implementation for a struct representing Rust paths like + /// `std::cmp::PartialEq`: + /// + /// ``` + /// use proc_macro2::{TokenTree, Spacing, Span, Punct, TokenStream}; + /// use quote::{TokenStreamExt, ToTokens}; + /// + /// pub struct Path { + /// pub global: bool, + /// pub segments: Vec, + /// } + /// + /// impl ToTokens for Path { + /// fn to_tokens(&self, tokens: &mut TokenStream) { + /// for (i, segment) in self.segments.iter().enumerate() { + /// if i > 0 || self.global { + /// // Double colon `::` + /// tokens.append(Punct::new(':', Spacing::Joint)); + /// tokens.append(Punct::new(':', Spacing::Alone)); + /// } + /// segment.to_tokens(tokens); + /// } + /// } + /// } + /// # + /// # pub struct PathSegment; + /// # + /// # impl ToTokens for PathSegment { + /// # fn to_tokens(&self, tokens: &mut TokenStream) { + /// # unimplemented!() + /// # } + /// # } + /// ``` + fn to_tokens(&self, tokens: &mut TokenStream); + + /// Convert `self` directly into a `TokenStream` object. + /// + /// This method is implicitly implemented using `to_tokens`, and acts as a + /// convenience method for consumers of the `ToTokens` trait. + fn to_token_stream(&self) -> TokenStream { + let mut tokens = TokenStream::new(); + self.to_tokens(&mut tokens); + tokens + } + + /// Convert `self` directly into a `TokenStream` object. + /// + /// This method is implicitly implemented using `to_tokens`, and acts as a + /// convenience method for consumers of the `ToTokens` trait. + fn into_token_stream(self) -> TokenStream + where + Self: Sized, + { + self.to_token_stream() + } +} + +impl ToTokens for &T { + fn to_tokens(&self, tokens: &mut TokenStream) { + (**self).to_tokens(tokens); + } +} + +impl ToTokens for &mut T { + fn to_tokens(&self, tokens: &mut TokenStream) { + (**self).to_tokens(tokens); + } +} + +impl<'a, T: ?Sized + ToOwned + ToTokens> ToTokens for Cow<'a, T> { + fn to_tokens(&self, tokens: &mut TokenStream) { + (**self).to_tokens(tokens); + } +} + +impl ToTokens for Box { + fn to_tokens(&self, tokens: &mut TokenStream) { + (**self).to_tokens(tokens); + } +} + +impl ToTokens for Rc { + fn to_tokens(&self, tokens: &mut TokenStream) { + (**self).to_tokens(tokens); + } +} + +impl ToTokens for Option { + fn to_tokens(&self, tokens: &mut TokenStream) { + if let Some(t) = self { + t.to_tokens(tokens); + } + } +} + +impl ToTokens for str { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::string(self)); + } +} + +impl ToTokens for String { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.as_str().to_tokens(tokens); + } +} + +impl ToTokens for i8 { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::i8_suffixed(*self)); + } +} + +impl ToTokens for i16 { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::i16_suffixed(*self)); + } +} + +impl ToTokens for i32 { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::i32_suffixed(*self)); + } +} + +impl ToTokens for i64 { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::i64_suffixed(*self)); + } +} + +impl ToTokens for i128 { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::i128_suffixed(*self)); + } +} + +impl ToTokens for isize { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::isize_suffixed(*self)); + } +} + +impl ToTokens for u8 { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::u8_suffixed(*self)); + } +} + +impl ToTokens for u16 { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::u16_suffixed(*self)); + } +} + +impl ToTokens for u32 { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::u32_suffixed(*self)); + } +} + +impl ToTokens for u64 { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::u64_suffixed(*self)); + } +} + +impl ToTokens for u128 { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::u128_suffixed(*self)); + } +} + +impl ToTokens for usize { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::usize_suffixed(*self)); + } +} + +impl ToTokens for f32 { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::f32_suffixed(*self)); + } +} + +impl ToTokens for f64 { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::f64_suffixed(*self)); + } +} + +impl ToTokens for char { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::character(*self)); + } +} + +impl ToTokens for bool { + fn to_tokens(&self, tokens: &mut TokenStream) { + let word = if *self { "true" } else { "false" }; + tokens.append(Ident::new(word, Span::call_site())); + } +} + +impl ToTokens for CStr { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::c_string(self)); + } +} + +impl ToTokens for CString { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::c_string(self)); + } +} + +impl ToTokens for Group { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(self.clone()); + } +} + +impl ToTokens for Ident { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(self.clone()); + } +} + +impl ToTokens for Punct { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(self.clone()); + } +} + +impl ToTokens for Literal { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(self.clone()); + } +} + +impl ToTokens for TokenTree { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(self.clone()); + } +} + +impl ToTokens for TokenStream { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.extend(iter::once(self.clone())); + } + + fn into_token_stream(self) -> TokenStream { + self + } +} diff --git a/vendor/quote/tests/compiletest.rs b/vendor/quote/tests/compiletest.rs new file mode 100644 index 00000000000000..23a6a065ec960a --- /dev/null +++ b/vendor/quote/tests/compiletest.rs @@ -0,0 +1,7 @@ +#[rustversion::attr(not(nightly), ignore = "requires nightly")] +#[cfg_attr(miri, ignore = "incompatible with miri")] +#[test] +fn ui() { + let t = trybuild::TestCases::new(); + t.compile_fail("tests/ui/*.rs"); +} diff --git a/vendor/quote/tests/test.rs b/vendor/quote/tests/test.rs new file mode 100644 index 00000000000000..e096780e1fee05 --- /dev/null +++ b/vendor/quote/tests/test.rs @@ -0,0 +1,568 @@ +#![allow( + clippy::disallowed_names, + clippy::let_underscore_untyped, + clippy::shadow_unrelated, + clippy::unseparated_literal_suffix, + clippy::used_underscore_binding +)] + +extern crate proc_macro; + +use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream}; +use quote::{format_ident, quote, quote_spanned, TokenStreamExt}; +use std::borrow::Cow; +use std::collections::BTreeSet; +use std::ffi::{CStr, CString}; + +struct X; + +impl quote::ToTokens for X { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Ident::new("X", Span::call_site())); + } +} + +#[test] +fn test_quote_impl() { + let tokens = quote! { + impl<'a, T: ToTokens> ToTokens for &'a T { + fn to_tokens(&self, tokens: &mut TokenStream) { + (**self).to_tokens(tokens) + } + } + }; + + let expected = concat!( + "impl < 'a , T : ToTokens > ToTokens for & 'a T { ", + "fn to_tokens (& self , tokens : & mut TokenStream) { ", + "(* * self) . to_tokens (tokens) ", + "} ", + "}" + ); + + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_quote_spanned_impl() { + let span = Span::call_site(); + let tokens = quote_spanned! {span=> + impl<'a, T: ToTokens> ToTokens for &'a T { + fn to_tokens(&self, tokens: &mut TokenStream) { + (**self).to_tokens(tokens) + } + } + }; + + let expected = concat!( + "impl < 'a , T : ToTokens > ToTokens for & 'a T { ", + "fn to_tokens (& self , tokens : & mut TokenStream) { ", + "(* * self) . to_tokens (tokens) ", + "} ", + "}" + ); + + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_substitution() { + let x = X; + let tokens = quote!(#x <#x> (#x) [#x] {#x}); + + let expected = "X < X > (X) [X] { X }"; + + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_iter() { + let primes = &[X, X, X, X]; + + assert_eq!("X X X X", quote!(#(#primes)*).to_string()); + + assert_eq!("X , X , X , X ,", quote!(#(#primes,)*).to_string()); + + assert_eq!("X , X , X , X", quote!(#(#primes),*).to_string()); +} + +#[test] +fn test_array() { + let array: [u8; 40] = [0; 40]; + let _ = quote!(#(#array #array)*); + + let ref_array: &[u8; 40] = &[0; 40]; + let _ = quote!(#(#ref_array #ref_array)*); + + let ref_slice: &[u8] = &[0; 40]; + let _ = quote!(#(#ref_slice #ref_slice)*); + + let array: [X; 2] = [X, X]; // !Copy + let _ = quote!(#(#array #array)*); + + let ref_array: &[X; 2] = &[X, X]; + let _ = quote!(#(#ref_array #ref_array)*); + + let ref_slice: &[X] = &[X, X]; + let _ = quote!(#(#ref_slice #ref_slice)*); + + let array_of_array: [[u8; 2]; 2] = [[0; 2]; 2]; + let _ = quote!(#(#(#array_of_array)*)*); +} + +#[test] +fn test_advanced() { + let generics = quote!( <'a, T> ); + + let where_clause = quote!( where T: Serialize ); + + let field_ty = quote!(String); + + let item_ty = quote!(Cow<'a, str>); + + let path = quote!(SomeTrait::serialize_with); + + let value = quote!(self.x); + + let tokens = quote! { + struct SerializeWith #generics #where_clause { + value: &'a #field_ty, + phantom: ::std::marker::PhantomData<#item_ty>, + } + + impl #generics ::serde::Serialize for SerializeWith #generics #where_clause { + fn serialize(&self, s: &mut S) -> Result<(), S::Error> + where S: ::serde::Serializer + { + #path(self.value, s) + } + } + + SerializeWith { + value: #value, + phantom: ::std::marker::PhantomData::<#item_ty>, + } + }; + + let expected = concat!( + "struct SerializeWith < 'a , T > where T : Serialize { ", + "value : & 'a String , ", + "phantom : :: std :: marker :: PhantomData < Cow < 'a , str > > , ", + "} ", + "impl < 'a , T > :: serde :: Serialize for SerializeWith < 'a , T > where T : Serialize { ", + "fn serialize < S > (& self , s : & mut S) -> Result < () , S :: Error > ", + "where S : :: serde :: Serializer ", + "{ ", + "SomeTrait :: serialize_with (self . value , s) ", + "} ", + "} ", + "SerializeWith { ", + "value : self . x , ", + "phantom : :: std :: marker :: PhantomData :: < Cow < 'a , str > > , ", + "}" + ); + + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_integer() { + let ii8 = -1i8; + let ii16 = -1i16; + let ii32 = -1i32; + let ii64 = -1i64; + let ii128 = -1i128; + let iisize = -1isize; + let uu8 = 1u8; + let uu16 = 1u16; + let uu32 = 1u32; + let uu64 = 1u64; + let uu128 = 1u128; + let uusize = 1usize; + + let tokens = quote! { + 1 1i32 1u256 + #ii8 #ii16 #ii32 #ii64 #ii128 #iisize + #uu8 #uu16 #uu32 #uu64 #uu128 #uusize + }; + let expected = + "1 1i32 1u256 - 1i8 - 1i16 - 1i32 - 1i64 - 1i128 - 1isize 1u8 1u16 1u32 1u64 1u128 1usize"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_floating() { + let e32 = 2.345f32; + + let e64 = 2.345f64; + + let tokens = quote! { + #e32 + #e64 + }; + let expected = "2.345f32 2.345f64"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_char() { + let zero = '\u{1}'; + let pound = '#'; + let quote = '"'; + let apost = '\''; + let newline = '\n'; + let heart = '\u{2764}'; + + let tokens = quote! { + #zero #pound #quote #apost #newline #heart + }; + let expected = "'\\u{1}' '#' '\"' '\\'' '\\n' '\u{2764}'"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_str() { + let s = "\u{1} a 'b \" c"; + let tokens = quote!(#s); + let expected = "\"\\u{1} a 'b \\\" c\""; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_string() { + let s = "\u{1} a 'b \" c".to_string(); + let tokens = quote!(#s); + let expected = "\"\\u{1} a 'b \\\" c\""; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_c_str() { + let s = CStr::from_bytes_with_nul(b"\x01 a 'b \" c\0").unwrap(); + let tokens = quote!(#s); + let expected = "c\"\\u{1} a 'b \\\" c\""; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_c_string() { + let s = CString::new(&b"\x01 a 'b \" c"[..]).unwrap(); + let tokens = quote!(#s); + let expected = "c\"\\u{1} a 'b \\\" c\""; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_interpolated_literal() { + macro_rules! m { + ($literal:literal) => { + quote!($literal) + }; + } + + let tokens = m!(1); + let expected = "1"; + assert_eq!(expected, tokens.to_string()); + + let tokens = m!(-1); + let expected = "- 1"; + assert_eq!(expected, tokens.to_string()); + + let tokens = m!(true); + let expected = "true"; + assert_eq!(expected, tokens.to_string()); + + let tokens = m!(-true); + let expected = "- true"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_ident() { + let foo = Ident::new("Foo", Span::call_site()); + let bar = Ident::new(&format!("Bar{}", 7), Span::call_site()); + let tokens = quote!(struct #foo; enum #bar {}); + let expected = "struct Foo ; enum Bar7 { }"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_underscore() { + let tokens = quote!(let _;); + let expected = "let _ ;"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_duplicate() { + let ch = 'x'; + + let tokens = quote!(#ch #ch); + + let expected = "'x' 'x'"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_fancy_repetition() { + let foo = vec!["a", "b"]; + let bar = vec![true, false]; + + let tokens = quote! { + #(#foo: #bar),* + }; + + let expected = r#""a" : true , "b" : false"#; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_nested_fancy_repetition() { + let nested = vec![vec!['a', 'b', 'c'], vec!['x', 'y', 'z']]; + + let tokens = quote! { + #( + #(#nested)* + ),* + }; + + let expected = "'a' 'b' 'c' , 'x' 'y' 'z'"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_duplicate_name_repetition() { + let foo = &["a", "b"]; + + let tokens = quote! { + #(#foo: #foo),* + #(#foo: #foo),* + }; + + let expected = r#""a" : "a" , "b" : "b" "a" : "a" , "b" : "b""#; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_duplicate_name_repetition_no_copy() { + let foo = vec!["a".to_owned(), "b".to_owned()]; + + let tokens = quote! { + #(#foo: #foo),* + }; + + let expected = r#""a" : "a" , "b" : "b""#; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_btreeset_repetition() { + let mut set = BTreeSet::new(); + set.insert("a".to_owned()); + set.insert("b".to_owned()); + + let tokens = quote! { + #(#set: #set),* + }; + + let expected = r#""a" : "a" , "b" : "b""#; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_variable_name_conflict() { + // The implementation of `#(...),*` uses the variable `_i` but it should be + // fine, if a little confusing when debugging. + let _i = vec!['a', 'b']; + let tokens = quote! { #(#_i),* }; + let expected = "'a' , 'b'"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_nonrep_in_repetition() { + let rep = vec!["a", "b"]; + let nonrep = "c"; + + let tokens = quote! { + #(#rep #rep : #nonrep #nonrep),* + }; + + let expected = r#""a" "a" : "c" "c" , "b" "b" : "c" "c""#; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_empty_quote() { + let tokens = quote!(); + assert_eq!("", tokens.to_string()); +} + +#[test] +fn test_box_str() { + let b = "str".to_owned().into_boxed_str(); + let tokens = quote! { #b }; + assert_eq!("\"str\"", tokens.to_string()); +} + +#[test] +fn test_cow() { + let owned: Cow = Cow::Owned(Ident::new("owned", Span::call_site())); + + let ident = Ident::new("borrowed", Span::call_site()); + let borrowed = Cow::Borrowed(&ident); + + let tokens = quote! { #owned #borrowed }; + assert_eq!("owned borrowed", tokens.to_string()); +} + +#[test] +fn test_closure() { + fn field_i(i: usize) -> Ident { + format_ident!("__field{}", i) + } + + let fields = (0usize..3) + .map(field_i as fn(_) -> _) + .map(|var| quote! { #var }); + + let tokens = quote! { #(#fields)* }; + assert_eq!("__field0 __field1 __field2", tokens.to_string()); +} + +#[test] +fn test_append_tokens() { + let mut a = quote!(a); + let b = quote!(b); + a.append_all(b); + assert_eq!("a b", a.to_string()); +} + +#[test] +fn test_format_ident() { + let id0 = format_ident!("Aa"); + let id1 = format_ident!("Hello{x}", x = id0); + let id2 = format_ident!("Hello{x}", x = 5usize); + let id3 = format_ident!("Hello{}_{x}", id0, x = 10usize); + let id4 = format_ident!("Aa", span = Span::call_site()); + let id5 = format_ident!("Hello{}", Cow::Borrowed("World")); + + assert_eq!(id0, "Aa"); + assert_eq!(id1, "HelloAa"); + assert_eq!(id2, "Hello5"); + assert_eq!(id3, "HelloAa_10"); + assert_eq!(id4, "Aa"); + assert_eq!(id5, "HelloWorld"); +} + +#[test] +fn test_format_ident_strip_raw() { + let id = format_ident!("r#struct"); + let my_id = format_ident!("MyId{}", id); + let raw_my_id = format_ident!("r#MyId{}", id); + + assert_eq!(id, "r#struct"); + assert_eq!(my_id, "MyIdstruct"); + assert_eq!(raw_my_id, "r#MyIdstruct"); +} + +#[test] +fn test_outer_line_comment() { + let tokens = quote! { + /// doc + }; + let expected = "# [doc = r\" doc\"]"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_inner_line_comment() { + let tokens = quote! { + //! doc + }; + let expected = "# ! [doc = r\" doc\"]"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_outer_block_comment() { + let tokens = quote! { + /** doc */ + }; + let expected = "# [doc = r\" doc \"]"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_inner_block_comment() { + let tokens = quote! { + /*! doc */ + }; + let expected = "# ! [doc = r\" doc \"]"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_outer_attr() { + let tokens = quote! { + #[inline] + }; + let expected = "# [inline]"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_inner_attr() { + let tokens = quote! { + #![no_std] + }; + let expected = "# ! [no_std]"; + assert_eq!(expected, tokens.to_string()); +} + +// https://github.com/dtolnay/quote/issues/130 +#[test] +fn test_star_after_repetition() { + let c = vec!['0', '1']; + let tokens = quote! { + #( + f(#c); + )* + *out = None; + }; + let expected = "f ('0') ; f ('1') ; * out = None ;"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_quote_raw_id() { + let id = quote!(r#raw_id); + assert_eq!(id.to_string(), "r#raw_id"); +} + +#[test] +fn test_type_inference_for_span() { + trait CallSite { + fn get() -> Self; + } + + impl CallSite for Span { + fn get() -> Self { + Span::call_site() + } + } + + let span = Span::call_site(); + let _ = quote_spanned!(span=> ...); + + let delim_span = Group::new(Delimiter::Parenthesis, TokenStream::new()).delim_span(); + let _ = quote_spanned!(delim_span=> ...); + + let inferred = CallSite::get(); + let _ = quote_spanned!(inferred=> ...); + + if false { + let proc_macro_span = proc_macro::Span::call_site(); + let _ = quote_spanned!(proc_macro_span.into()=> ...); + } +} diff --git a/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.rs b/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.rs new file mode 100644 index 00000000000000..0a39f4150704fb --- /dev/null +++ b/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.rs @@ -0,0 +1,9 @@ +use quote::quote; + +fn main() { + let nonrep = ""; + + // Without some protection against repetitions with no iterator somewhere + // inside, this would loop infinitely. + quote!(#(#nonrep #nonrep)*); +} diff --git a/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.stderr b/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.stderr new file mode 100644 index 00000000000000..96af816336d04e --- /dev/null +++ b/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.stderr @@ -0,0 +1,13 @@ +error[E0277]: repetition contains no interpolated value that is an iterator + --> tests/ui/does-not-have-iter-interpolated-dup.rs:8:5 + | +8 | quote!(#(#nonrep #nonrep)*); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ none of the values interpolated inside this repetition are iterable + | +help: the trait `CheckHasIterator` is not implemented for `HasIterator` + but it is implemented for `HasIterator` + --> src/runtime.rs + | + | impl CheckHasIterator for HasIterator {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `$crate::quote_token_with_context` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/vendor/quote/tests/ui/does-not-have-iter-interpolated.rs b/vendor/quote/tests/ui/does-not-have-iter-interpolated.rs new file mode 100644 index 00000000000000..2c740cc0830fd8 --- /dev/null +++ b/vendor/quote/tests/ui/does-not-have-iter-interpolated.rs @@ -0,0 +1,9 @@ +use quote::quote; + +fn main() { + let nonrep = ""; + + // Without some protection against repetitions with no iterator somewhere + // inside, this would loop infinitely. + quote!(#(#nonrep)*); +} diff --git a/vendor/quote/tests/ui/does-not-have-iter-interpolated.stderr b/vendor/quote/tests/ui/does-not-have-iter-interpolated.stderr new file mode 100644 index 00000000000000..0c0572c90887a0 --- /dev/null +++ b/vendor/quote/tests/ui/does-not-have-iter-interpolated.stderr @@ -0,0 +1,13 @@ +error[E0277]: repetition contains no interpolated value that is an iterator + --> tests/ui/does-not-have-iter-interpolated.rs:8:5 + | +8 | quote!(#(#nonrep)*); + | ^^^^^^^^^^^^^^^^^^^ none of the values interpolated inside this repetition are iterable + | +help: the trait `CheckHasIterator` is not implemented for `HasIterator` + but it is implemented for `HasIterator` + --> src/runtime.rs + | + | impl CheckHasIterator for HasIterator {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `$crate::quote_token_with_context` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/vendor/quote/tests/ui/does-not-have-iter-separated.rs b/vendor/quote/tests/ui/does-not-have-iter-separated.rs new file mode 100644 index 00000000000000..c027243ddac68f --- /dev/null +++ b/vendor/quote/tests/ui/does-not-have-iter-separated.rs @@ -0,0 +1,5 @@ +use quote::quote; + +fn main() { + quote!(#(a b),*); +} diff --git a/vendor/quote/tests/ui/does-not-have-iter-separated.stderr b/vendor/quote/tests/ui/does-not-have-iter-separated.stderr new file mode 100644 index 00000000000000..e899fb483052aa --- /dev/null +++ b/vendor/quote/tests/ui/does-not-have-iter-separated.stderr @@ -0,0 +1,13 @@ +error[E0277]: repetition contains no interpolated value that is an iterator + --> tests/ui/does-not-have-iter-separated.rs:4:5 + | +4 | quote!(#(a b),*); + | ^^^^^^^^^^^^^^^^ none of the values interpolated inside this repetition are iterable + | +help: the trait `CheckHasIterator` is not implemented for `HasIterator` + but it is implemented for `HasIterator` + --> src/runtime.rs + | + | impl CheckHasIterator for HasIterator {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `$crate::quote_token_with_context` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/vendor/quote/tests/ui/does-not-have-iter.rs b/vendor/quote/tests/ui/does-not-have-iter.rs new file mode 100644 index 00000000000000..8908353b57d738 --- /dev/null +++ b/vendor/quote/tests/ui/does-not-have-iter.rs @@ -0,0 +1,5 @@ +use quote::quote; + +fn main() { + quote!(#(a b)*); +} diff --git a/vendor/quote/tests/ui/does-not-have-iter.stderr b/vendor/quote/tests/ui/does-not-have-iter.stderr new file mode 100644 index 00000000000000..348071cc42f263 --- /dev/null +++ b/vendor/quote/tests/ui/does-not-have-iter.stderr @@ -0,0 +1,13 @@ +error[E0277]: repetition contains no interpolated value that is an iterator + --> tests/ui/does-not-have-iter.rs:4:5 + | +4 | quote!(#(a b)*); + | ^^^^^^^^^^^^^^^ none of the values interpolated inside this repetition are iterable + | +help: the trait `CheckHasIterator` is not implemented for `HasIterator` + but it is implemented for `HasIterator` + --> src/runtime.rs + | + | impl CheckHasIterator for HasIterator {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `$crate::quote_token_with_context` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/vendor/quote/tests/ui/not-quotable.rs b/vendor/quote/tests/ui/not-quotable.rs new file mode 100644 index 00000000000000..f991c1883d6d34 --- /dev/null +++ b/vendor/quote/tests/ui/not-quotable.rs @@ -0,0 +1,7 @@ +use quote::quote; +use std::net::Ipv4Addr; + +fn main() { + let ip = Ipv4Addr::LOCALHOST; + let _ = quote! { #ip }; +} diff --git a/vendor/quote/tests/ui/not-quotable.stderr b/vendor/quote/tests/ui/not-quotable.stderr new file mode 100644 index 00000000000000..15492463b6de6e --- /dev/null +++ b/vendor/quote/tests/ui/not-quotable.stderr @@ -0,0 +1,20 @@ +error[E0277]: the trait bound `Ipv4Addr: ToTokens` is not satisfied + --> tests/ui/not-quotable.rs:6:13 + | +6 | let _ = quote! { #ip }; + | ^^^^^^^^^^^^^^ + | | + | the trait `ToTokens` is not implemented for `Ipv4Addr` + | required by a bound introduced by this call + | + = help: the following other types implement trait `ToTokens`: + &T + &mut T + Box + CStr + CString + Cow<'a, T> + Option + Rc + and $N others + = note: this error originates in the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/vendor/quote/tests/ui/not-repeatable.rs b/vendor/quote/tests/ui/not-repeatable.rs new file mode 100644 index 00000000000000..a8f0fe773c5d17 --- /dev/null +++ b/vendor/quote/tests/ui/not-repeatable.rs @@ -0,0 +1,8 @@ +use quote::quote; + +struct Ipv4Addr; + +fn main() { + let ip = Ipv4Addr; + let _ = quote! { #(#ip)* }; +} diff --git a/vendor/quote/tests/ui/not-repeatable.stderr b/vendor/quote/tests/ui/not-repeatable.stderr new file mode 100644 index 00000000000000..d5e13b040b483e --- /dev/null +++ b/vendor/quote/tests/ui/not-repeatable.stderr @@ -0,0 +1,42 @@ +error[E0599]: the method `quote_into_iter` exists for struct `Ipv4Addr`, but its trait bounds were not satisfied + --> tests/ui/not-repeatable.rs:7:13 + | +3 | struct Ipv4Addr; + | --------------- method `quote_into_iter` not found for this struct because it doesn't satisfy `Ipv4Addr: Iterator`, `Ipv4Addr: ToTokens`, `Ipv4Addr: ext::RepIteratorExt` or `Ipv4Addr: ext::RepToTokensExt` +... +7 | let _ = quote! { #(#ip)* }; + | ^^^^^^^^^^^^^^^^^^ method cannot be called on `Ipv4Addr` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `Ipv4Addr: Iterator` + which is required by `Ipv4Addr: ext::RepIteratorExt` + `&Ipv4Addr: Iterator` + which is required by `&Ipv4Addr: ext::RepIteratorExt` + `Ipv4Addr: ToTokens` + which is required by `Ipv4Addr: ext::RepToTokensExt` + `&mut Ipv4Addr: Iterator` + which is required by `&mut Ipv4Addr: ext::RepIteratorExt` +note: the traits `Iterator` and `ToTokens` must be implemented + --> $RUST/core/src/iter/traits/iterator.rs + | + | pub trait Iterator { + | ^^^^^^^^^^^^^^^^^^ + | + ::: src/to_tokens.rs + | + | pub trait ToTokens { + | ^^^^^^^^^^^^^^^^^^ + = help: items from traits can only be used if the trait is implemented and in scope + = note: the following traits define an item `quote_into_iter`, perhaps you need to implement one of them: + candidate #1: `ext::RepAsIteratorExt` + candidate #2: `ext::RepIteratorExt` + candidate #3: `ext::RepToTokensExt` + = note: this error originates in the macro `$crate::quote_bind_into_iter` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0282]: type annotations needed + --> tests/ui/not-repeatable.rs:7:13 + | +7 | let _ = quote! { #(#ip)* }; + | ^^^^^^^^^^^^^^^^^^ cannot infer type + | + = note: this error originates in the macro `$crate::quote_bind_next_or_break` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/vendor/quote/tests/ui/wrong-type-span.rs b/vendor/quote/tests/ui/wrong-type-span.rs new file mode 100644 index 00000000000000..d5601c8a06f278 --- /dev/null +++ b/vendor/quote/tests/ui/wrong-type-span.rs @@ -0,0 +1,7 @@ +use quote::quote_spanned; + +fn main() { + let span = ""; + let x = 0i32; + quote_spanned!(span=> #x); +} diff --git a/vendor/quote/tests/ui/wrong-type-span.stderr b/vendor/quote/tests/ui/wrong-type-span.stderr new file mode 100644 index 00000000000000..12ad3077036572 --- /dev/null +++ b/vendor/quote/tests/ui/wrong-type-span.stderr @@ -0,0 +1,10 @@ +error[E0308]: mismatched types + --> tests/ui/wrong-type-span.rs:6:5 + | +6 | quote_spanned!(span=> #x); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | expected `Span`, found `&str` + | expected due to this + | + = note: this error originates in the macro `quote_spanned` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/vendor/regex-automata/.cargo-checksum.json b/vendor/regex-automata/.cargo-checksum.json new file mode 100644 index 00000000000000..84c43cc7be1227 --- /dev/null +++ b/vendor/regex-automata/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"813e732fc5802cefc31ee0cc60fea807d4f208a6f21997ab4352e0d9bd6cfbc6","Cargo.lock":"ec00a0a78cc268058c0df851b46025cee60832179b5cbd7f81479611ada4485a","Cargo.toml":"01dd259ddf18d6b99e84f799b4709fdaca8fbcbd30cb2ac2fbabc6309e2db06f","Cargo.toml.orig":"62d643cae321c8f8b42ac9a05fcad92609900ef6974f504ccdca54e287e915c6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"a7cfb89cd6d4de6b6e8b18e872227e5f1e47d91345e377aa1a75affc18c53aa1","src/dfa/accel.rs":"73f8e0c492a6c669fafbb872020091d6bfa5236503c9c0922aa94fd939fb2b1e","src/dfa/automaton.rs":"a2af61cdfb7f16a8419a25ccb3ae250afe736ff397c7a3101c8a77781d096a9b","src/dfa/dense.rs":"ec34a23a36464fa1b57cac01411ab2bdf6f2df5e1a497c7779ab10fd55d2515b","src/dfa/determinize.rs":"d72dc41a7e93b9289370d2a4e7d8524612be1870283504c703f9c08f9f3b316c","src/dfa/minimize.rs":"b5cadb462b9f24cd4aa7a665e75fb813cd06858a92b8986c9c5ae7fd9a60dfab","src/dfa/mod.rs":"530a1025d516a6df949eee46009acc5ef58c9e6788ec3d76702811734d76212f","src/dfa/onepass.rs":"b59ef139772cd2378f112ebaf3f88d75cf62f1592246449be02e47dd5300eb70","src/dfa/regex.rs":"567c7a59ca194117986f1818c092b31f825e860fb1b2c55c7de87de97eebb787","src/dfa/remapper.rs":"ca096abc0f8e45c43a2adf3a7743b8857714ae7411a623edea41cc3ce906a169","src/dfa/search.rs":"79b9ab2b0636177bc26d1ad6f0059ca033decf74824cb5a36f1ac19f020d2713","src/dfa/sparse.rs":"a5fde187faaf88f4f5b5dfeb69a1387e7dc7803f00b7cb0335dc30b7331c2f7e","src/dfa/special.rs":"1b939ad4addf2efb87fcd1ae67d7818b72540c017d895846ab7968cec267aee1","src/dfa/start.rs":"46b1dbaf8e4518ddddda6bbe596621aae36f8ba694390483a22355d9d799be8e","src/hybrid/dfa.rs":"cd0e71ad86161c9a49c6023d9dde8e07895ad03a9586723c3fb1f1c14bdb7faa","src/hybrid/error.rs":"2bca7eb9ff3859d2b2f5afcf00e618f5671c43e32f5ce8e7ab9de44b906a9422","src/hybrid/id.rs":"a529d45c5a7dd5ed64a471d3fab5a8c6a7aa2bd64bb3a81d5d6f1fcca424d41d","src/hybrid/mod.rs":"ca21e89062bdb5a0998d5cd1bc78609af1f6b795533e5982be969c383ac0463a","src/hybrid/regex.rs":"47815d025526330291f4cd749b4dd79b1122ef208fe6f0a49715c70fc1ea47c8","src/hybrid/search.rs":"76067f3f8675013dcdf7e9c9cc4d9d33d1107fb2cbcd7adcc05cfd42177d90cc","src/lib.rs":"47d562a98f5f50f7cbbffb1f103e277871c7419da05dbb2b0db78dee6e7b4c2e","src/macros.rs":"3e4b39252bfa471fad384160a43f113ebfec7bec46a85d16f006622881dd2081","src/meta/error.rs":"729ec5e2474ed2449fb47f3e0eeb65586ceeed0a6a67f00678f09eb5a46da931","src/meta/limited.rs":"182fb1b012a539cd091f0fa2f9c7806308c04293edcd4bae91a2a65904ea0f3e","src/meta/literal.rs":"2a4e71c5ffdd7b31f7f624a6a8bba3be0cddac1883ddbba6a01a48034a077978","src/meta/mod.rs":"f3b10b96fa08efaba3e4c9b81883cf40aac6e4c1f6ae55a497a534cf5805b46d","src/meta/regex.rs":"92295ff6a6b1e0e6d19fc1fe29679fa5681973160ee61e043d29bf29f44a65b5","src/meta/reverse_inner.rs":"945d6c2d4c7538e1609dbd430a096784d22abd33db58b1ba65c9c9af45a7d3c0","src/meta/stopat.rs":"acb6122e17d10a9b1b5e72d6030e6d95748227975bad0ff5cbbcc2587edfa6df","src/meta/strategy.rs":"3a59ea004755e34e3aeafd7e8c357e643b08554f5fb89e5cb3411a1de3637f26","src/meta/wrappers.rs":"d169ad27f3e5294fb4b2dcd6b179f72f741837ed6cb96d9d5cc654f40b9f43ae","src/nfa/mod.rs":"32a0ed46f4a0a9b4b81b53abf4aa7138e2fd456435897495fce74820b980d4d2","src/nfa/thompson/backtrack.rs":"041015ea153c1e485e9cf39ec60d1e51c7ab9e400ecd77cad2078af45775339b","src/nfa/thompson/builder.rs":"7adf6aba69171f6acd47fea0fec85ba589154fead83f2042a1c6fe9486aa4dbd","src/nfa/thompson/compiler.rs":"cc1fbc44f0106049f6c0020ee8beb879415bc2951bb53c9efdf76c8b6c2af816","src/nfa/thompson/error.rs":"12208c44486575f3ac505754e6559e0b93cac09351a9720ff63cd6fd548ba63d","src/nfa/thompson/literal_trie.rs":"3b5cf36842a31f8b50d820835e3959f878c0dedce0f17932bca449e1b6198651","src/nfa/thompson/map.rs":"fcd17ce7359b5179ef2e809fc9152dfa0b6c61d3d849d8c502497e1d0d8b0fa9","src/nfa/thompson/mod.rs":"dddbbd6f0e7076f369dd12a21aea4eb7e81e9c037d115201871e278cceed0a76","src/nfa/thompson/nfa.rs":"3ab46b912ece5218ba95e29e0c169b23a869796c7bcb138385c562a57c62a2a3","src/nfa/thompson/pikevm.rs":"230f879f05d7d9f868344407064d1e7a05131a13c0c90ab59d5a20f15af0ec56","src/nfa/thompson/range_trie.rs":"2304cab5cd580ca10961fbb14c75c163c2b7fcd29040622190d36f7935b446d3","src/util/alphabet.rs":"4f94d317459b43c7748e7a963935d61632fb70bd7c09dd9e536e354586d21df6","src/util/captures.rs":"81e48d060fc9bea41f98157676a0b262da54f6b1358be41e413c9e3e960f0155","src/util/determinize/mod.rs":"82f34f4e408aaf06f8e04a38a6c9ce0abdcc20b61581e29283ef099f84bc67a1","src/util/determinize/state.rs":"a850af545b7d0bd706f0bf72fdba504b6efdeea181763657109e10fef53aa88d","src/util/empty.rs":"13ec7d6cbd1520db5b4c1dae294f4419fa88d39d2bfc16f4ef258473d609f91c","src/util/escape.rs":"2c8275c56b75018a0d8f8363b5827eb620f2cb52e2e919d8dace2846e7e0cf3c","src/util/int.rs":"b863a62f8ba1edf24858416fc01f15b38bee7af2494ebeb037e1acbf0319415e","src/util/interpolate.rs":"1d716d26ed80beb0ba6526e1fb75fdb009b95122bf0907045237c7e9e4bfbe88","src/util/iter.rs":"d61335dc6b99b134d75c1b75e01f88e2dfe1174d48a36ac5a5e5efbc4c6114e2","src/util/lazy.rs":"116ff2eed0bb6d2aa574c812c74f732fb06c91beb1667e0e5d2a3210023d7db5","src/util/look.rs":"fca6dac7bf7b3b975f177db91e122af89e1510b3664d04210ca8b84738a08305","src/util/memchr.rs":"573109ce4983907083ae0b29a084a324b9b53da369b4d96f7f3a21fd5c8eb5c9","src/util/mod.rs":"6c828a493f0f88c8b515aee4f8faf91ba653eb07e8fc3c23c0524553410803f9","src/util/pool.rs":"22cd6f1a6fcabe6e1cb2759f6f7b87e64dfab8245fcf97b2ab2d3a6424015126","src/util/prefilter/aho_corasick.rs":"b5a56f0709ce718125256706234e1ff1bfa1c3bae2a7ccb72f679ca3d301bab6","src/util/prefilter/byteset.rs":"1c80fa432acc23223a75a5181e37c40034764dffe42410e4b77af6f24f48bd5c","src/util/prefilter/memchr.rs":"36c6fe6354b2e729db6830166dd4862e439bc48c9e59258d88e4b6c5654e20ef","src/util/prefilter/memmem.rs":"6f6ed9450b14abf3e4a33d395337e51fbaa9743a0a16aac0009f7680aa60c500","src/util/prefilter/mod.rs":"345787d5329a1712697700385979e6ee87925dd3447b1d5a0127c4fc222f0417","src/util/prefilter/teddy.rs":"ed54d26858b56e1c8c87e44afae5f63d81ab930787d79e671f3a3513f576e9cd","src/util/primitives.rs":"8a9cc19ef2e1ab183943cdc2d2f095b02252476e32b7e9fff4a06a251749b068","src/util/search.rs":"432720d85ede0fd4eac84069268bd4bb9c52c9680ddafa710ad01ee423e9d7fc","src/util/sparse_set.rs":"acbe7197f5e5fc95b45f54ba1e4e24f21226714af44a391e507d52b7b23cbaf6","src/util/start.rs":"1ab2dec7c452ae943118cd1c3b6becc84afba1fbb8b6894d81ef7d65141d95ab","src/util/syntax.rs":"cff4712c95fc5f94063e6e11a51b42d7678d5f5b82b492f11fcbb3928c3d6c8d","src/util/unicode_data/mod.rs":"54c3e10bbc393e9881bfac3295815b160f59e69e2056bc29ee7cf0addd8e3cf7","src/util/unicode_data/perl_word.rs":"30f073baae28ea34c373c7778c00f20c1621c3e644404eff031f7d1cc8e9c9e2","src/util/utf8.rs":"a02a559f0ec4013aa8bbc1a2d8717cfa0c82ed81577a4366fbbd8ef8660264fc","src/util/wire.rs":"f4eb8517d6d7ff165b39e829ce7ec7075ee40550878334456887abde486c5885","test":"39d79ce3532c31a51c0be89a2939816fad0e4868d2b03992c202cbe64dce9f6c","tests/dfa/api.rs":"c6ddbca1177c377a42bac1e19e79dc8c840a7a0af2042e6c3c08e46ba1a288fe","tests/dfa/mod.rs":"924d8fff500b9b7b140082623023e78007058a87323151cd8e361462945e4f16","tests/dfa/onepass/mod.rs":"d08f4ecb8ec243be584944c9602af1ed3a48a8732dd11cd573b0d1d182171303","tests/dfa/onepass/suite.rs":"f6a9cba40773db81fcd82ab605ba18ca415908f9857845e7621d47888cb67c91","tests/dfa/regression.rs":"ebcf2645290286aa7531eb2b7951385e5ed8167532437aeca2ad2049768fd796","tests/dfa/suite.rs":"26cfc5a89a2ceda338d15e9cde0aeb6a050ec4f751fb29b017eac54a9d9a0074","tests/fuzz/dense.rs":"3e1099a0cce61e85abc0ad81bc592e85f497f159ef0e5d1d32bac1936aa6f20c","tests/fuzz/mod.rs":"043773510e02f51def43ee0c2b8b867c53ecc8638c8a9233b2ac098de9c3ac1e","tests/fuzz/sparse.rs":"ba61db4927ab28953037a4b20317399c86d01b4d774e46c020ade19029215e25","tests/fuzz/testdata/deserialize_dense_crash-9486fb7c8a93b12c12a62166b43d31640c0208a9":"8961279a8237c3e318452024dd971b1d5a26b058260c297382a74daca1b7f0d1","tests/fuzz/testdata/deserialize_dense_minimized-from-9486fb7c8a93b12c12a62166b43d31640c0208a9":"c2d52e3dea78d3f159b5b521d433358a7fee45ce20ed1545067d461f45ef66b8","tests/fuzz/testdata/deserialize_sparse_crash-0da59c0434eaf35e5a6b470fa9244bb79c72b000":"5b2d273023de3fb04037eaf2e6b4f51cced4c5a08d2e6b44e4be540774f939b9","tests/fuzz/testdata/deserialize_sparse_crash-18cfc246f2ddfc3dfc92b0c7893178c7cf65efa9":"e2e22e2f46a9a75b5c876476442276cf675fe244c5cf918789e4f6b14078fbd9","tests/fuzz/testdata/deserialize_sparse_crash-61fd8e3003bf9d99f6c1e5a8488727eefd234b98":"24a12712e1f2ba0a40b5782707908a74dd19941dc372ef525d65a7134f91988c","tests/fuzz/testdata/deserialize_sparse_crash-a1b839d899ced76d5d7d0f78f9edb7a421505838":"a97f39b2febf9c73535681f7a86201e4b06d5a1ffcf135299c96c1cabfa9f6c4","tests/fuzz/testdata/deserialize_sparse_crash-c383ae07ec5e191422eadc492117439011816570":"44fe3ef878d35e2d51c2c17ff89bbbe3a4650e09d0cbbd48625c0f5e4dd0848b","tests/fuzz/testdata/deserialize_sparse_crash-d07703ceb94b10dcd9e4acb809f2051420449e2b":"d5534be36653b4af6cb94a7c63be58869bb8c204c5c63d67a4d6c986b44bb2e1","tests/fuzz/testdata/deserialize_sparse_crash-dbb8172d3984e7e7d03f4b5f8bb86ecd1460eff9":"77b844898610560afa09f2b8de73a85a0ba9a3b8cee4ff1bbf26b8c97ad4e8a2","tests/gen/README.md":"c36d7a7a0b8301234f861b6a94c68b4c6a8a8a5ac2a7a762acc241a96c9a8d46","tests/gen/dense/mod.rs":"5ae1cfb46212a674118ada2f66f37b25188e84643d406b95eb4665d722344262","tests/gen/dense/multi_pattern_v2.rs":"29b1e9a799adecbdbe7cd05e9748f664c2b915b10b1d2f5d36cfb6453826d1d2","tests/gen/dense/multi_pattern_v2_fwd.bigendian.dfa":"8421d5a1bfc0b6c3bdc8fc90dff591a046b0aaf8e06ef7de7cc293004a35d061","tests/gen/dense/multi_pattern_v2_fwd.littleendian.dfa":"dcf2fd5fd49f5f53cf1ec66f61623402f39401cb3aea30d6677b98bb1e9541bf","tests/gen/dense/multi_pattern_v2_rev.bigendian.dfa":"73c4f20d984e544dfa4cf05f3009d0a9b52fa84bc97b501ea0ccd179e2def4bc","tests/gen/dense/multi_pattern_v2_rev.littleendian.dfa":"74471209f05754e8e20c8a0222a5877b1b15b8b8f33cd8cac89ea65f708b4aff","tests/gen/mod.rs":"043773510e02f51def43ee0c2b8b867c53ecc8638c8a9233b2ac098de9c3ac1e","tests/gen/sparse/mod.rs":"5ae1cfb46212a674118ada2f66f37b25188e84643d406b95eb4665d722344262","tests/gen/sparse/multi_pattern_v2.rs":"e00fb2a510a215460aab84573196b1f51bb65884ff494c2382534c04f6fdbfe9","tests/gen/sparse/multi_pattern_v2_fwd.bigendian.dfa":"3287956bd2003cd69653b125f82aade95d99adbb20229bfdbb4958b8877c0a0b","tests/gen/sparse/multi_pattern_v2_fwd.littleendian.dfa":"bdf285901eaaac4596380115c5bbb20ab2f42f593d8d9e9238a00ed69863f9c9","tests/gen/sparse/multi_pattern_v2_rev.bigendian.dfa":"e466dc085dd68b2d2220932a0e4d28759edd161c1fdad652240aa3825fd85268","tests/gen/sparse/multi_pattern_v2_rev.littleendian.dfa":"80358d0c26c1cc7284065b0075f5b8804d83e673a8a8c8327f93a1c1ff455399","tests/hybrid/api.rs":"bd4862275c52f94c6f6737bf174c97e3de30f8075ca23f43c129c72a0d0afed7","tests/hybrid/mod.rs":"4856a49a4d9b5e9e079c2719a5e75c32408b37e9b76cbdea057b388a3537af6d","tests/hybrid/suite.rs":"d49081a07b13e923c9d31c211942439c015b970b2b9d2f38fd49935803e22bb1","tests/lib.rs":"9775b3c62fb338ea5c1bd3513a6589eff4b5c8d35c599439d9363dbf98c6f8d4","tests/meta/mod.rs":"d08f4ecb8ec243be584944c9602af1ed3a48a8732dd11cd573b0d1d182171303","tests/meta/suite.rs":"7cafd709c61481f2267de671768f880f8bbd4740f4cb523a449481abc80aa08a","tests/nfa/mod.rs":"49055c358e38d97e42acb1602c671f97dddf24cafe089490f0e79ed208d74d9b","tests/nfa/thompson/backtrack/mod.rs":"d08f4ecb8ec243be584944c9602af1ed3a48a8732dd11cd573b0d1d182171303","tests/nfa/thompson/backtrack/suite.rs":"c14b12ad3292b103d7f5be69c297c737adbeea65379cee12a75596601312c430","tests/nfa/thompson/mod.rs":"de9f5bcea1a8d1f03c85c55ad8c0747877d69e344fcd6c6886b0a402f0661291","tests/nfa/thompson/pikevm/mod.rs":"d08f4ecb8ec243be584944c9602af1ed3a48a8732dd11cd573b0d1d182171303","tests/nfa/thompson/pikevm/suite.rs":"cf21a58532f3dc8fd76df715093d1a9333b0c4072261b63c48ac8c86ca31fe25"},"package":"5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c"} \ No newline at end of file diff --git a/vendor/regex-automata/.cargo_vcs_info.json b/vendor/regex-automata/.cargo_vcs_info.json new file mode 100644 index 00000000000000..a8433855ae044e --- /dev/null +++ b/vendor/regex-automata/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "ab0b07171b82d1d4fdc8359505d12b2e818514d4" + }, + "path_in_vcs": "regex-automata" +} \ No newline at end of file diff --git a/vendor/regex-automata/Cargo.lock b/vendor/regex-automata/Cargo.lock new file mode 100644 index 00000000000000..36522cdf2ff0d1 --- /dev/null +++ b/vendor/regex-automata/Cargo.lock @@ -0,0 +1,372 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "log", + "memchr", +] + +[[package]] +name = "anyhow" +version = "1.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "bstr" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "cfg-if" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" + +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + +[[package]] +name = "env_logger" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" +dependencies = [ + "atty", + "humantime", + "log", + "termcolor", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "humantime" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" + +[[package]] +name = "indexmap" +version = "2.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "libc" +version = "0.2.177" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" + +[[package]] +name = "log" +version = "0.4.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +dependencies = [ + "log", +] + +[[package]] +name = "proc-macro2" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quickcheck" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" +dependencies = [ + "rand", +] + +[[package]] +name = "quote" +version = "1.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "regex-automata" +version = "0.4.13" +dependencies = [ + "aho-corasick", + "anyhow", + "bstr", + "doc-comment", + "env_logger", + "log", + "memchr", + "quickcheck", + "regex-syntax", + "regex-test", +] + +[[package]] +name = "regex-syntax" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" + +[[package]] +name = "regex-test" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da40f0939bc4c598b4326abdbb363a8987aa43d0526e5624aefcf3ed90344e62" +dependencies = [ + "anyhow", + "bstr", + "serde", + "toml", +] + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + +[[package]] +name = "syn" +version = "2.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + +[[package]] +name = "unicode-ident" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "winnow" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +dependencies = [ + "memchr", +] diff --git a/vendor/regex-automata/Cargo.toml b/vendor/regex-automata/Cargo.toml new file mode 100644 index 00000000000000..ac58e53a2dde98 --- /dev/null +++ b/vendor/regex-automata/Cargo.toml @@ -0,0 +1,200 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.65" +name = "regex-automata" +version = "0.4.13" +authors = [ + "The Rust Project Developers", + "Andrew Gallant ", +] +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Automata construction and matching using regular expressions." +homepage = "https://github.com/rust-lang/regex/tree/master/regex-automata" +documentation = "https://docs.rs/regex-automata" +readme = "README.md" +keywords = [ + "regex", + "dfa", + "automata", + "automaton", + "nfa", +] +categories = ["text-processing"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/regex" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs_regex", +] + +[features] +alloc = [] +default = [ + "std", + "syntax", + "perf", + "unicode", + "meta", + "nfa", + "dfa", + "hybrid", +] +dfa = [ + "dfa-build", + "dfa-search", + "dfa-onepass", +] +dfa-build = [ + "nfa-thompson", + "dfa-search", +] +dfa-onepass = ["nfa-thompson"] +dfa-search = [] +hybrid = [ + "alloc", + "nfa-thompson", +] +internal-instrument = ["internal-instrument-pikevm"] +internal-instrument-pikevm = [ + "logging", + "std", +] +logging = [ + "dep:log", + "aho-corasick?/logging", + "memchr?/logging", +] +meta = [ + "syntax", + "nfa-pikevm", +] +nfa = [ + "nfa-thompson", + "nfa-pikevm", + "nfa-backtrack", +] +nfa-backtrack = ["nfa-thompson"] +nfa-pikevm = ["nfa-thompson"] +nfa-thompson = ["alloc"] +perf = [ + "perf-inline", + "perf-literal", +] +perf-inline = [] +perf-literal = [ + "perf-literal-substring", + "perf-literal-multisubstring", +] +perf-literal-multisubstring = ["dep:aho-corasick"] +perf-literal-substring = [ + "aho-corasick?/perf-literal", + "dep:memchr", +] +std = [ + "regex-syntax?/std", + "memchr?/std", + "aho-corasick?/std", + "alloc", +] +syntax = [ + "dep:regex-syntax", + "alloc", +] +unicode = [ + "unicode-age", + "unicode-bool", + "unicode-case", + "unicode-gencat", + "unicode-perl", + "unicode-script", + "unicode-segment", + "unicode-word-boundary", + "regex-syntax?/unicode", +] +unicode-age = ["regex-syntax?/unicode-age"] +unicode-bool = ["regex-syntax?/unicode-bool"] +unicode-case = ["regex-syntax?/unicode-case"] +unicode-gencat = ["regex-syntax?/unicode-gencat"] +unicode-perl = ["regex-syntax?/unicode-perl"] +unicode-script = ["regex-syntax?/unicode-script"] +unicode-segment = ["regex-syntax?/unicode-segment"] +unicode-word-boundary = [] + +[lib] +name = "regex_automata" +path = "src/lib.rs" +bench = false + +[[test]] +name = "integration" +path = "tests/lib.rs" + +[dependencies.aho-corasick] +version = "1.0.0" +optional = true +default-features = false + +[dependencies.log] +version = "0.4.14" +optional = true + +[dependencies.memchr] +version = "2.6.0" +optional = true +default-features = false + +[dependencies.regex-syntax] +version = "0.8.5" +optional = true +default-features = false + +[dev-dependencies.anyhow] +version = "1.0.69" + +[dev-dependencies.bstr] +version = "1.3.0" +features = ["std"] +default-features = false + +[dev-dependencies.doc-comment] +version = "0.3.3" + +[dev-dependencies.env_logger] +version = "0.9.3" +features = [ + "atty", + "humantime", + "termcolor", +] +default-features = false + +[dev-dependencies.quickcheck] +version = "1.0.3" +default-features = false + +[dev-dependencies.regex-test] +version = "0.1.0" + +[lints.rust.unexpected_cfgs] +level = "allow" +priority = 0 +check-cfg = ["cfg(docsrs_regex)"] diff --git a/vendor/regex-automata/LICENSE-APACHE b/vendor/regex-automata/LICENSE-APACHE new file mode 100644 index 00000000000000..16fe87b06e802f --- /dev/null +++ b/vendor/regex-automata/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/regex-automata/LICENSE-MIT b/vendor/regex-automata/LICENSE-MIT new file mode 100644 index 00000000000000..39d4bdb5acd313 --- /dev/null +++ b/vendor/regex-automata/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/regex-automata/README.md b/vendor/regex-automata/README.md new file mode 100644 index 00000000000000..cb6e86c9f97cb5 --- /dev/null +++ b/vendor/regex-automata/README.md @@ -0,0 +1,117 @@ +regex-automata +============== +This crate exposes a variety of regex engines used by the `regex` crate. +It provides a vast, sprawling and "expert" level API to each regex engine. +The regex engines provided by this crate focus heavily on finite automata +implementations and specifically guarantee worst case `O(m * n)` time +complexity for all searches. (Where `m ~ len(regex)` and `n ~ len(haystack)`.) + +[![Build status](https://github.com/rust-lang/regex/workflows/ci/badge.svg)](https://github.com/rust-lang/regex/actions) +[![Crates.io](https://img.shields.io/crates/v/regex-automata.svg)](https://crates.io/crates/regex-automata) + + +### Documentation + +https://docs.rs/regex-automata + + +### Example + +This example shows how to search for matches of multiple regexes, where each +regex uses the same capture group names to parse different key-value formats. + +```rust +use regex_automata::{meta::Regex, PatternID}; + +let re = Regex::new_many(&[ + r#"(?m)^(?[[:word:]]+)=(?[[:word:]]+)$"#, + r#"(?m)^(?[[:word:]]+)="(?[^"]+)"$"#, + r#"(?m)^(?[[:word:]]+)='(?[^']+)'$"#, + r#"(?m)^(?[[:word:]]+):\s*(?[[:word:]]+)$"#, +]).unwrap(); +let hay = r#" +best_album="Blow Your Face Out" +best_quote='"then as it was, then again it will be"' +best_year=1973 +best_simpsons_episode: HOMR +"#; +let mut kvs = vec![]; +for caps in re.captures_iter(hay) { + // N.B. One could use capture indices '1' and '2' here + // as well. Capture indices are local to each pattern. + // (Just like names are.) + let key = &hay[caps.get_group_by_name("key").unwrap()]; + let val = &hay[caps.get_group_by_name("val").unwrap()]; + kvs.push((key, val)); +} +assert_eq!(kvs, vec![ + ("best_album", "Blow Your Face Out"), + ("best_quote", "\"then as it was, then again it will be\""), + ("best_year", "1973"), + ("best_simpsons_episode", "HOMR"), +]); +``` + + +### Safety + +**I welcome audits of `unsafe` code.** + +This crate tries to be extremely conservative in its use of `unsafe`, but does +use it in a few spots. In general, I am very open to removing uses of `unsafe` +if it doesn't result in measurable performance regressions and doesn't result +in significantly more complex code. + +Below is an outline of how `unsafe` is used in this crate. + +* `util::pool::Pool` makes use of `unsafe` to implement a fast path for +accessing an element of the pool. The fast path applies to the first thread +that uses the pool. In effect, the fast path is fast because it avoids a mutex +lock. `unsafe` is also used in the no-std version of `Pool` to implement a spin +lock for synchronization. +* `util::lazy::Lazy` uses `unsafe` to implement a variant of +`once_cell::sync::Lazy` that works in no-std environments. A no-std no-alloc +implementation is also provided that requires use of `unsafe`. +* The `dfa` module makes extensive use of `unsafe` to support zero-copy +deserialization of DFAs. The high level problem is that you need to get from +`&[u8]` to the internal representation of a DFA without doing any copies. +This is required for support in no-std no-alloc environments. It also makes +deserialization extremely cheap. +* The `dfa` and `hybrid` modules use `unsafe` to explicitly elide bounds checks +in the core search loops. This makes the codegen tighter and typically leads to +consistent 5-10% performance improvements on some workloads. + +In general, the above reflect the only uses of `unsafe` throughout the entire +`regex` crate. At present, there are no plans to meaningfully expand the use +of `unsafe`. With that said, one thing folks have been asking for is cheap +deserialization of a `regex::Regex`. My sense is that this feature will require +a lot more `unsafe` in places to support zero-copy deserialization. It is +unclear at this point whether this will be pursued. + + +### Motivation + +I started out building this crate because I wanted to re-work the `regex` +crate internals to make it more amenable to optimizations. It turns out that +there are a lot of different ways to build regex engines and even more ways to +compose them. Moreover, heuristic literal optimizations are often tricky to +get correct, but the fruit they bear is attractive. All of these things were +difficult to expand upon without risking the introduction of more bugs. So I +decided to tear things down and start fresh. + +In the course of doing so, I ended up designing strong boundaries between each +component so that each component could be reasoned and tested independently. +This also made it somewhat natural to expose the components as a library unto +itself. Namely, folks have been asking for more capabilities in the regex +crate for a long time, but these capabilities usually come with additional API +complexity that I didn't want to introduce in the `regex` crate proper. But +exposing them in an "expert" level crate like `regex-automata` seemed quite +fine. + +In the end, I do still somewhat consider this crate an experiment. It is +unclear whether the strong boundaries between components will be an impediment +to ongoing development or not. De-coupling tends to lead to slower development +in my experience, and when you mix in the added cost of not introducing +breaking changes all the time, things can get quite complicated. But, I +don't think anyone has ever release the internals of a regex engine as a +library before. So it will be interesting to see how it plays out! diff --git a/vendor/regex-automata/src/dfa/accel.rs b/vendor/regex-automata/src/dfa/accel.rs new file mode 100644 index 00000000000000..47c84604808913 --- /dev/null +++ b/vendor/regex-automata/src/dfa/accel.rs @@ -0,0 +1,517 @@ +// This module defines some core types for dealing with accelerated DFA states. +// Briefly, a DFA state can be "accelerated" if all of its transitions except +// for a few loop back to itself. This directly implies that the only way out +// of such a state is if a byte corresponding to one of those non-loopback +// transitions is found. Such states are often found in simple repetitions in +// non-Unicode regexes. For example, consider '(?-u)[^a]+a'. We can look at its +// DFA with regex-cli: +// +// $ regex-cli debug dense dfa -p '(?-u)[^a]+a' -BbC --no-table +// D 000000: +// Q 000001: +// *000002: +// A 000003: \x00-` => 3, a => 8, b-\xFF => 3 +// A 000004: \x00-` => 4, a => 7, b-\xFF => 4 +// 000005: \x00-` => 4, b-\xFF => 4 +// 000006: \x00-` => 3, a => 6, b-\xFF => 3 +// 000007: \x00-\xFF => 2, EOI => 2 +// 000008: \x00-\xFF => 2, EOI => 2 +// +// In particular, state 3 is accelerated (shown via the 'A' indicator) since +// the only way to leave that state once entered is to see an 'a' byte. If +// there is a long run of non-'a' bytes, then using something like 'memchr' +// to find the next 'a' byte can be significantly faster than just using the +// standard byte-at-a-time state machine. +// +// Unfortunately, this optimization rarely applies when Unicode is enabled. +// For example, patterns like '[^a]' don't actually match any byte that isn't +// 'a', but rather, any UTF-8 encoding of a Unicode scalar value that isn't +// 'a'. This makes the state machine much more complex---far beyond a single +// state---and removes the ability to easily accelerate it. (Because if the +// machine sees a non-UTF-8 sequence, then the machine won't match through it.) +// +// In practice, we only consider accelerating states that have 3 or fewer +// non-loop transitions. At a certain point, you get diminishing returns, but +// also because that's what the memchr crate supports. The structures below +// hard-code this assumption and provide (de)serialization APIs for use inside +// a DFA. +// +// And finally, note that there is some trickery involved in making it very +// fast to not only check whether a state is accelerated at search time, but +// also to access the bytes to search for to implement the acceleration itself. +// dfa/special.rs provides more detail, but the short story is that all +// accelerated states appear contiguously in a DFA. This means we can represent +// the ID space of all accelerated DFA states with a single range. So given +// a state ID, we can determine whether it's accelerated via +// +// min_accel_id <= id <= max_accel_id +// +// And find its corresponding accelerator with: +// +// accels.get((id - min_accel_id) / dfa_stride) + +#[cfg(feature = "dfa-build")] +use alloc::{vec, vec::Vec}; + +use crate::util::{ + int::Pointer, + memchr, + wire::{self, DeserializeError, Endian, SerializeError}, +}; + +/// The base type used to represent a collection of accelerators. +/// +/// While an `Accel` is represented as a fixed size array of bytes, a +/// *collection* of `Accel`s (called `Accels`) is represented internally as a +/// slice of u32. While it's a bit unnatural to do this and costs us a bit of +/// fairly low-risk not-safe code, it lets us remove the need for a second type +/// parameter in the definition of dense::DFA. (Which really wants everything +/// to be a slice of u32.) +type AccelTy = u32; + +/// The size of the unit of representation for accelerators. +/// +/// ACCEL_CAP *must* be a multiple of this size. +const ACCEL_TY_SIZE: usize = core::mem::size_of::(); + +/// The maximum length in bytes that a single Accel can be. This is distinct +/// from the capacity of an accelerator in that the length represents only the +/// bytes that should be read. +const ACCEL_LEN: usize = 4; + +/// The capacity of each accelerator, in bytes. We set this to 8 since it's a +/// multiple of 4 (our ID size) and because it gives us a little wiggle room +/// if we want to support more accel bytes in the future without a breaking +/// change. +/// +/// This MUST be a multiple of ACCEL_TY_SIZE. +const ACCEL_CAP: usize = 8; + +/// Search for between 1 and 3 needle bytes in the given haystack, starting the +/// search at the given position. If `needles` has a length other than 1-3, +/// then this panics. +#[cfg_attr(feature = "perf-inline", inline(always))] +pub(crate) fn find_fwd( + needles: &[u8], + haystack: &[u8], + at: usize, +) -> Option { + let bs = needles; + let i = match needles.len() { + 1 => memchr::memchr(bs[0], &haystack[at..])?, + 2 => memchr::memchr2(bs[0], bs[1], &haystack[at..])?, + 3 => memchr::memchr3(bs[0], bs[1], bs[2], &haystack[at..])?, + 0 => panic!("cannot find with empty needles"), + n => panic!("invalid needles length: {n}"), + }; + Some(at + i) +} + +/// Search for between 1 and 3 needle bytes in the given haystack in reverse, +/// starting the search at the given position. If `needles` has a length other +/// than 1-3, then this panics. +#[cfg_attr(feature = "perf-inline", inline(always))] +pub(crate) fn find_rev( + needles: &[u8], + haystack: &[u8], + at: usize, +) -> Option { + let bs = needles; + match needles.len() { + 1 => memchr::memrchr(bs[0], &haystack[..at]), + 2 => memchr::memrchr2(bs[0], bs[1], &haystack[..at]), + 3 => memchr::memrchr3(bs[0], bs[1], bs[2], &haystack[..at]), + 0 => panic!("cannot find with empty needles"), + n => panic!("invalid needles length: {n}"), + } +} + +/// Represents the accelerators for all accelerated states in a dense DFA. +/// +/// The `A` type parameter represents the type of the underlying bytes. +/// Generally, this is either `&[AccelTy]` or `Vec`. +#[derive(Clone)] +pub(crate) struct Accels { + /// A length prefixed slice of contiguous accelerators. See the top comment + /// in this module for more details on how we can jump from a DFA's state + /// ID to an accelerator in this list. + /// + /// The first 4 bytes always correspond to the number of accelerators + /// that follow. + accels: A, +} + +#[cfg(feature = "dfa-build")] +impl Accels> { + /// Create an empty sequence of accelerators for a DFA. + pub fn empty() -> Accels> { + Accels { accels: vec![0] } + } + + /// Add an accelerator to this sequence. + /// + /// This adds to the accelerator to the end of the sequence and therefore + /// should be done in correspondence with its state in the DFA. + /// + /// This panics if this results in more accelerators than AccelTy::MAX. + pub fn add(&mut self, accel: Accel) { + self.accels.extend_from_slice(&accel.as_accel_tys()); + let len = self.len(); + self.set_len(len + 1); + } + + /// Set the number of accelerators in this sequence, which is encoded in + /// the first 4 bytes of the underlying bytes. + fn set_len(&mut self, new_len: usize) { + // The only way an accelerator gets added is if a state exists for + // it, and if a state exists, then its index is guaranteed to be + // representable by a AccelTy by virtue of the guarantees provided by + // StateID. + let new_len = AccelTy::try_from(new_len).unwrap(); + self.accels[0] = new_len; + } +} + +impl<'a> Accels<&'a [AccelTy]> { + /// Deserialize a sequence of accelerators from the given bytes. If there + /// was a problem deserializing, then an error is returned. + /// + /// This is guaranteed to run in constant time. This does not guarantee + /// that every accelerator in the returned collection is valid. Thus, + /// accessing one may panic, or not-safe code that relies on accelerators + /// being correct my result in UB. + /// + /// Callers may check the validity of every accelerator with the `validate` + /// method. + pub fn from_bytes_unchecked( + mut slice: &'a [u8], + ) -> Result<(Accels<&'a [AccelTy]>, usize), DeserializeError> { + let slice_start = slice.as_ptr().as_usize(); + + let (accel_len, _) = + wire::try_read_u32_as_usize(slice, "accelerators length")?; + // The accelerator length is part of the accel_tys slice that + // we deserialize. This is perhaps a bit idiosyncratic. It would + // probably be better to split out the length into a real field. + + let accel_tys_len = wire::add( + wire::mul(accel_len, 2, "total number of accelerator accel_tys")?, + 1, + "total number of accel_tys", + )?; + let accel_tys_bytes_len = wire::mul( + ACCEL_TY_SIZE, + accel_tys_len, + "total number of bytes in accelerators", + )?; + wire::check_slice_len(slice, accel_tys_bytes_len, "accelerators")?; + wire::check_alignment::(slice)?; + let accel_tys = &slice[..accel_tys_bytes_len]; + slice = &slice[accel_tys_bytes_len..]; + // SAFETY: We've checked the length and alignment above, and since + // slice is just bytes and AccelTy is just a u32, we can safely cast to + // a slice of &[AccelTy]. + let accels = unsafe { + core::slice::from_raw_parts( + accel_tys.as_ptr().cast::(), + accel_tys_len, + ) + }; + Ok((Accels { accels }, slice.as_ptr().as_usize() - slice_start)) + } +} + +impl> Accels { + /// Return an owned version of the accelerators. + #[cfg(feature = "alloc")] + pub fn to_owned(&self) -> Accels> { + Accels { accels: self.accels.as_ref().to_vec() } + } + + /// Return a borrowed version of the accelerators. + pub fn as_ref(&self) -> Accels<&[AccelTy]> { + Accels { accels: self.accels.as_ref() } + } + + /// Return the bytes representing the serialization of the accelerators. + pub fn as_bytes(&self) -> &[u8] { + let accels = self.accels.as_ref(); + // SAFETY: This is safe because accels is a just a slice of AccelTy, + // and u8 always has a smaller alignment. + unsafe { + core::slice::from_raw_parts( + accels.as_ptr().cast::(), + accels.len() * ACCEL_TY_SIZE, + ) + } + } + + /// Returns the memory usage, in bytes, of these accelerators. + /// + /// The memory usage is computed based on the number of bytes used to + /// represent all of the accelerators. + /// + /// This does **not** include the stack size used by this value. + pub fn memory_usage(&self) -> usize { + self.as_bytes().len() + } + + /// Return the bytes to search for corresponding to the accelerator in this + /// sequence at index `i`. If no such accelerator exists, then this panics. + /// + /// The significance of the index is that it should be in correspondence + /// with the index of the corresponding DFA. That is, accelerated DFA + /// states are stored contiguously in the DFA and have an ordering implied + /// by their respective state IDs. The state's index in that sequence + /// corresponds to the index of its corresponding accelerator. + #[cfg_attr(feature = "perf-inline", inline(always))] + pub fn needles(&self, i: usize) -> &[u8] { + if i >= self.len() { + panic!("invalid accelerator index {i}"); + } + let bytes = self.as_bytes(); + let offset = ACCEL_TY_SIZE + i * ACCEL_CAP; + let len = usize::from(bytes[offset]); + &bytes[offset + 1..offset + 1 + len] + } + + /// Return the total number of accelerators in this sequence. + pub fn len(&self) -> usize { + // This should never panic since deserialization checks that the + // length can fit into a usize. + usize::try_from(self.accels.as_ref()[0]).unwrap() + } + + /// Return the accelerator in this sequence at index `i`. If no such + /// accelerator exists, then this returns None. + /// + /// See the docs for `needles` on the significance of the index. + fn get(&self, i: usize) -> Option { + if i >= self.len() { + return None; + } + let offset = ACCEL_TY_SIZE + i * ACCEL_CAP; + let accel = Accel::from_slice(&self.as_bytes()[offset..]) + .expect("Accels must contain valid accelerators"); + Some(accel) + } + + /// Returns an iterator of accelerators in this sequence. + fn iter(&self) -> IterAccels<'_, A> { + IterAccels { accels: self, i: 0 } + } + + /// Writes these accelerators to the given byte buffer using the indicated + /// endianness. If the given buffer is too small, then an error is + /// returned. Upon success, the total number of bytes written is returned. + /// The number of bytes written is guaranteed to be a multiple of 8. + pub fn write_to( + &self, + dst: &mut [u8], + ) -> Result { + let nwrite = self.write_to_len(); + assert_eq!( + nwrite % ACCEL_TY_SIZE, + 0, + "expected accelerator bytes written to be a multiple \ + of {ACCEL_TY_SIZE}", + ); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small("accelerators")); + } + + // The number of accelerators can never exceed AccelTy::MAX. + E::write_u32(AccelTy::try_from(self.len()).unwrap(), dst); + // The actual accelerators are just raw bytes and thus their endianness + // is irrelevant. So we can copy them as bytes. + dst[ACCEL_TY_SIZE..nwrite] + .copy_from_slice(&self.as_bytes()[ACCEL_TY_SIZE..nwrite]); + Ok(nwrite) + } + + /// Validates that every accelerator in this collection can be successfully + /// deserialized as a valid accelerator. + pub fn validate(&self) -> Result<(), DeserializeError> { + for chunk in self.as_bytes()[ACCEL_TY_SIZE..].chunks(ACCEL_CAP) { + let _ = Accel::from_slice(chunk)?; + } + Ok(()) + } + + /// Returns the total number of bytes written by `write_to`. + pub fn write_to_len(&self) -> usize { + self.as_bytes().len() + } +} + +impl> core::fmt::Debug for Accels { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "Accels(")?; + let mut list = f.debug_list(); + for a in self.iter() { + list.entry(&a); + } + list.finish()?; + write!(f, ")") + } +} + +#[derive(Debug)] +struct IterAccels<'a, A: AsRef<[AccelTy]>> { + accels: &'a Accels, + i: usize, +} + +impl<'a, A: AsRef<[AccelTy]>> Iterator for IterAccels<'a, A> { + type Item = Accel; + + fn next(&mut self) -> Option { + let accel = self.accels.get(self.i)?; + self.i += 1; + Some(accel) + } +} + +/// Accel represents a structure for determining how to "accelerate" a DFA +/// state. +/// +/// Namely, it contains zero or more bytes that must be seen in order for the +/// DFA to leave the state it is associated with. In practice, the actual range +/// is 1 to 3 bytes. +/// +/// The purpose of acceleration is to identify states whose vast majority +/// of transitions are just loops back to the same state. For example, +/// in the regex `(?-u)^[^a]+b`, the corresponding DFA will have a state +/// (corresponding to `[^a]+`) where all transitions *except* for `a` and +/// `b` loop back to itself. Thus, this state can be "accelerated" by simply +/// looking for the next occurrence of either `a` or `b` instead of explicitly +/// following transitions. (In this case, `b` transitions to the next state +/// where as `a` would transition to the dead state.) +#[derive(Clone)] +pub(crate) struct Accel { + /// The first byte is the length. Subsequent bytes are the accelerated + /// bytes. + /// + /// Note that we make every accelerator 8 bytes as a slightly wasteful + /// way of making sure alignment is always correct for state ID sizes of + /// 1, 2, 4 and 8. This should be okay since accelerated states aren't + /// particularly common, especially when Unicode is enabled. + bytes: [u8; ACCEL_CAP], +} + +impl Accel { + /// Returns an empty accel, where no bytes are accelerated. + #[cfg(feature = "dfa-build")] + pub fn new() -> Accel { + Accel { bytes: [0; ACCEL_CAP] } + } + + /// Returns a verified accelerator derived from the beginning of the given + /// slice. + /// + /// If the slice is not long enough or contains invalid bytes for an + /// accelerator, then this returns an error. + pub fn from_slice(mut slice: &[u8]) -> Result { + slice = &slice[..core::cmp::min(ACCEL_LEN, slice.len())]; + let bytes = slice + .try_into() + .map_err(|_| DeserializeError::buffer_too_small("accelerator"))?; + Accel::from_bytes(bytes) + } + + /// Returns a verified accelerator derived from raw bytes. + /// + /// If the given bytes are invalid, then this returns an error. + fn from_bytes(bytes: [u8; 4]) -> Result { + if usize::from(bytes[0]) >= ACCEL_LEN { + return Err(DeserializeError::generic( + "accelerator bytes cannot have length more than 3", + )); + } + Ok(Accel::from_bytes_unchecked(bytes)) + } + + /// Returns an accelerator derived from raw bytes. + /// + /// This does not check whether the given bytes are valid. Invalid bytes + /// cannot sacrifice memory safety, but may result in panics or silent + /// logic bugs. + fn from_bytes_unchecked(bytes: [u8; 4]) -> Accel { + Accel { bytes: [bytes[0], bytes[1], bytes[2], bytes[3], 0, 0, 0, 0] } + } + + /// Attempts to add the given byte to this accelerator. If the accelerator + /// is already full or thinks the byte is a poor accelerator, then this + /// returns false. Otherwise, returns true. + /// + /// If the given byte is already in this accelerator, then it panics. + #[cfg(feature = "dfa-build")] + pub fn add(&mut self, byte: u8) -> bool { + if self.len() >= 3 { + return false; + } + // As a special case, we totally reject trying to accelerate a state + // with an ASCII space. In most cases, it occurs very frequently, and + // tends to result in worse overall performance. + if byte == b' ' { + return false; + } + assert!( + !self.contains(byte), + "accelerator already contains {:?}", + crate::util::escape::DebugByte(byte) + ); + self.bytes[self.len() + 1] = byte; + self.bytes[0] += 1; + true + } + + /// Return the number of bytes in this accelerator. + pub fn len(&self) -> usize { + usize::from(self.bytes[0]) + } + + /// Returns true if and only if there are no bytes in this accelerator. + #[cfg(feature = "dfa-build")] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the slice of bytes to accelerate. + /// + /// If this accelerator is empty, then this returns an empty slice. + fn needles(&self) -> &[u8] { + &self.bytes[1..1 + self.len()] + } + + /// Returns true if and only if this accelerator will accelerate the given + /// byte. + #[cfg(feature = "dfa-build")] + fn contains(&self, byte: u8) -> bool { + self.needles().iter().position(|&b| b == byte).is_some() + } + + /// Returns the accelerator bytes as an array of AccelTys. + #[cfg(feature = "dfa-build")] + fn as_accel_tys(&self) -> [AccelTy; 2] { + assert_eq!(ACCEL_CAP, 8); + // These unwraps are OK since ACCEL_CAP is set to 8. + let first = + AccelTy::from_ne_bytes(self.bytes[0..4].try_into().unwrap()); + let second = + AccelTy::from_ne_bytes(self.bytes[4..8].try_into().unwrap()); + [first, second] + } +} + +impl core::fmt::Debug for Accel { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "Accel(")?; + let mut set = f.debug_set(); + for &b in self.needles() { + set.entry(&crate::util::escape::DebugByte(b)); + } + set.finish()?; + write!(f, ")") + } +} diff --git a/vendor/regex-automata/src/dfa/automaton.rs b/vendor/regex-automata/src/dfa/automaton.rs new file mode 100644 index 00000000000000..189700d83f05b9 --- /dev/null +++ b/vendor/regex-automata/src/dfa/automaton.rs @@ -0,0 +1,2260 @@ +#[cfg(feature = "alloc")] +use crate::util::search::PatternSet; +use crate::{ + dfa::search, + util::{ + empty, + prefilter::Prefilter, + primitives::{PatternID, StateID}, + search::{Anchored, HalfMatch, Input, MatchError}, + start, + }, +}; + +/// A trait describing the interface of a deterministic finite automaton (DFA). +/// +/// The complexity of this trait probably means that it's unlikely for others +/// to implement it. The primary purpose of the trait is to provide for a way +/// of abstracting over different types of DFAs. In this crate, that means +/// dense DFAs and sparse DFAs. (Dense DFAs are fast but memory hungry, where +/// as sparse DFAs are slower but come with a smaller memory footprint. But +/// they otherwise provide exactly equivalent expressive power.) For example, a +/// [`dfa::regex::Regex`](crate::dfa::regex::Regex) is generic over this trait. +/// +/// Normally, a DFA's execution model is very simple. You might have a single +/// start state, zero or more final or "match" states and a function that +/// transitions from one state to the next given the next byte of input. +/// Unfortunately, the interface described by this trait is significantly +/// more complicated than this. The complexity has a number of different +/// reasons, mostly motivated by performance, functionality or space savings: +/// +/// * A DFA can search for multiple patterns simultaneously. This +/// means extra information is returned when a match occurs. Namely, +/// a match is not just an offset, but an offset plus a pattern ID. +/// [`Automaton::pattern_len`] returns the number of patterns compiled into +/// the DFA, [`Automaton::match_len`] returns the total number of patterns +/// that match in a particular state and [`Automaton::match_pattern`] permits +/// iterating over the patterns that match in a particular state. +/// * A DFA can have multiple start states, and the choice of which start +/// state to use depends on the content of the string being searched and +/// position of the search, as well as whether the search is an anchored +/// search for a specific pattern in the DFA. Moreover, computing the start +/// state also depends on whether you're doing a forward or a reverse search. +/// [`Automaton::start_state_forward`] and [`Automaton::start_state_reverse`] +/// are used to compute the start state for forward and reverse searches, +/// respectively. +/// * All matches are delayed by one byte to support things like `$` and `\b` +/// at the end of a pattern. Therefore, every use of a DFA is required to use +/// [`Automaton::next_eoi_state`] +/// at the end of the search to compute the final transition. +/// * For optimization reasons, some states are treated specially. Every +/// state is either special or not, which can be determined via the +/// [`Automaton::is_special_state`] method. If it's special, then the state +/// must be at least one of a few possible types of states. (Note that some +/// types can overlap, for example, a match state can also be an accel state. +/// But some types can't. If a state is a dead state, then it can never be any +/// other type of state.) Those types are: +/// * A dead state. A dead state means the DFA will never enter a match +/// state. This can be queried via the [`Automaton::is_dead_state`] method. +/// * A quit state. A quit state occurs if the DFA had to stop the search +/// prematurely for some reason. This can be queried via the +/// [`Automaton::is_quit_state`] method. +/// * A match state. A match state occurs when a match is found. When a DFA +/// enters a match state, the search may stop immediately (when looking +/// for the earliest match), or it may continue to find the leftmost-first +/// match. This can be queried via the [`Automaton::is_match_state`] +/// method. +/// * A start state. A start state is where a search begins. For every +/// search, there is exactly one start state that is used, however, a +/// DFA may contain many start states. When the search is in a start +/// state, it may use a prefilter to quickly skip to candidate matches +/// without executing the DFA on every byte. This can be queried via the +/// [`Automaton::is_start_state`] method. +/// * An accel state. An accel state is a state that is accelerated. +/// That is, it is a state where _most_ of its transitions loop back to +/// itself and only a small number of transitions lead to other states. +/// This kind of state is said to be accelerated because a search routine +/// can quickly look for the bytes leading out of the state instead of +/// continuing to execute the DFA on each byte. This can be queried via the +/// [`Automaton::is_accel_state`] method. And the bytes that lead out of +/// the state can be queried via the [`Automaton::accelerator`] method. +/// +/// There are a number of provided methods on this trait that implement +/// efficient searching (for forwards and backwards) with a DFA using +/// all of the above features of this trait. In particular, given the +/// complexity of all these features, implementing a search routine in +/// this trait can be a little subtle. With that said, it is possible to +/// somewhat simplify the search routine. For example, handling accelerated +/// states is strictly optional, since it is always correct to assume that +/// `Automaton::is_accel_state` returns false. However, one complex part of +/// writing a search routine using this trait is handling the 1-byte delay of a +/// match. That is not optional. +/// +/// # Safety +/// +/// This trait is not safe to implement so that code may rely on the +/// correctness of implementations of this trait to avoid undefined behavior. +/// The primary correctness guarantees are: +/// +/// * `Automaton::start_state` always returns a valid state ID or an error or +/// panics. +/// * `Automaton::next_state`, when given a valid state ID, always returns +/// a valid state ID for all values of `anchored` and `byte`, or otherwise +/// panics. +/// +/// In general, the rest of the methods on `Automaton` need to uphold their +/// contracts as well. For example, `Automaton::is_dead` should only returns +/// true if the given state ID is actually a dead state. +pub unsafe trait Automaton { + /// Transitions from the current state to the next state, given the next + /// byte of input. + /// + /// Implementations must guarantee that the returned ID is always a valid + /// ID when `current` refers to a valid ID. Moreover, the transition + /// function must be defined for all possible values of `input`. + /// + /// # Panics + /// + /// If the given ID does not refer to a valid state, then this routine + /// may panic but it also may not panic and instead return an invalid ID. + /// However, if the caller provides an invalid ID then this must never + /// sacrifice memory safety. + /// + /// # Example + /// + /// This shows a simplistic example for walking a DFA for a given haystack + /// by using the `next_state` method. + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense}, Input}; + /// + /// let dfa = dense::DFA::new(r"[a-z]+r")?; + /// let haystack = "bar".as_bytes(); + /// + /// // The start state is determined by inspecting the position and the + /// // initial bytes of the haystack. + /// let mut state = dfa.start_state_forward(&Input::new(haystack))?; + /// // Walk all the bytes in the haystack. + /// for &b in haystack { + /// state = dfa.next_state(state, b); + /// } + /// // Matches are always delayed by 1 byte, so we must explicitly walk the + /// // special "EOI" transition at the end of the search. + /// state = dfa.next_eoi_state(state); + /// assert!(dfa.is_match_state(state)); + /// + /// # Ok::<(), Box>(()) + /// ``` + fn next_state(&self, current: StateID, input: u8) -> StateID; + + /// Transitions from the current state to the next state, given the next + /// byte of input. + /// + /// Unlike [`Automaton::next_state`], implementations may implement this + /// more efficiently by assuming that the `current` state ID is valid. + /// Typically, this manifests by eliding bounds checks. + /// + /// # Safety + /// + /// Callers of this method must guarantee that `current` refers to a valid + /// state ID. If `current` is not a valid state ID for this automaton, then + /// calling this routine may result in undefined behavior. + /// + /// If `current` is valid, then implementations must guarantee that the ID + /// returned is valid for all possible values of `input`. + unsafe fn next_state_unchecked( + &self, + current: StateID, + input: u8, + ) -> StateID; + + /// Transitions from the current state to the next state for the special + /// EOI symbol. + /// + /// Implementations must guarantee that the returned ID is always a valid + /// ID when `current` refers to a valid ID. + /// + /// This routine must be called at the end of every search in a correct + /// implementation of search. Namely, DFAs in this crate delay matches + /// by one byte in order to support look-around operators. Thus, after + /// reaching the end of a haystack, a search implementation must follow one + /// last EOI transition. + /// + /// It is best to think of EOI as an additional symbol in the alphabet of + /// a DFA that is distinct from every other symbol. That is, the alphabet + /// of DFAs in this crate has a logical size of 257 instead of 256, where + /// 256 corresponds to every possible inhabitant of `u8`. (In practice, the + /// physical alphabet size may be smaller because of alphabet compression + /// via equivalence classes, but EOI is always represented somehow in the + /// alphabet.) + /// + /// # Panics + /// + /// If the given ID does not refer to a valid state, then this routine + /// may panic but it also may not panic and instead return an invalid ID. + /// However, if the caller provides an invalid ID then this must never + /// sacrifice memory safety. + /// + /// # Example + /// + /// This shows a simplistic example for walking a DFA for a given haystack, + /// and then finishing the search with the final EOI transition. + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense}, Input}; + /// + /// let dfa = dense::DFA::new(r"[a-z]+r")?; + /// let haystack = "bar".as_bytes(); + /// + /// // The start state is determined by inspecting the position and the + /// // initial bytes of the haystack. + /// // + /// // The unwrap is OK because we aren't requesting a start state for a + /// // specific pattern. + /// let mut state = dfa.start_state_forward(&Input::new(haystack))?; + /// // Walk all the bytes in the haystack. + /// for &b in haystack { + /// state = dfa.next_state(state, b); + /// } + /// // Matches are always delayed by 1 byte, so we must explicitly walk + /// // the special "EOI" transition at the end of the search. Without this + /// // final transition, the assert below will fail since the DFA will not + /// // have entered a match state yet! + /// state = dfa.next_eoi_state(state); + /// assert!(dfa.is_match_state(state)); + /// + /// # Ok::<(), Box>(()) + /// ``` + fn next_eoi_state(&self, current: StateID) -> StateID; + + /// Return the ID of the start state for this DFA for the given starting + /// configuration. + /// + /// Unlike typical DFA implementations, the start state for DFAs in this + /// crate is dependent on a few different factors: + /// + /// * The [`Anchored`] mode of the search. Unanchored, anchored and + /// anchored searches for a specific [`PatternID`] all use different start + /// states. + /// * Whether a "look-behind" byte exists. For example, the `^` anchor + /// matches if and only if there is no look-behind byte. + /// * The specific value of that look-behind byte. For example, a `(?m:^)` + /// assertion only matches when there is either no look-behind byte, or + /// when the look-behind byte is a line terminator. + /// + /// The [starting configuration](start::Config) provides the above + /// information. + /// + /// This routine can be used for either forward or reverse searches. + /// Although, as a convenience, if you have an [`Input`], then it may + /// be more succinct to use [`Automaton::start_state_forward`] or + /// [`Automaton::start_state_reverse`]. Note, for example, that the + /// convenience routines return a [`MatchError`] on failure where as this + /// routine returns a [`StartError`]. + /// + /// # Errors + /// + /// This may return a [`StartError`] if the search needs to give up when + /// determining the start state (for example, if it sees a "quit" byte). + /// This can also return an error if the given configuration contains an + /// unsupported [`Anchored`] configuration. + fn start_state( + &self, + config: &start::Config, + ) -> Result; + + /// Return the ID of the start state for this DFA when executing a forward + /// search. + /// + /// This is a convenience routine for calling [`Automaton::start_state`] + /// that converts the given [`Input`] to a [start + /// configuration](start::Config). Additionally, if an error occurs, it is + /// converted from a [`StartError`] to a [`MatchError`] using the offset + /// information in the given [`Input`]. + /// + /// # Errors + /// + /// This may return a [`MatchError`] if the search needs to give up + /// when determining the start state (for example, if it sees a "quit" + /// byte). This can also return an error if the given `Input` contains an + /// unsupported [`Anchored`] configuration. + fn start_state_forward( + &self, + input: &Input<'_>, + ) -> Result { + let config = start::Config::from_input_forward(input); + self.start_state(&config).map_err(|err| match err { + StartError::Quit { byte } => { + let offset = input + .start() + .checked_sub(1) + .expect("no quit in start without look-behind"); + MatchError::quit(byte, offset) + } + StartError::UnsupportedAnchored { mode } => { + MatchError::unsupported_anchored(mode) + } + }) + } + + /// Return the ID of the start state for this DFA when executing a reverse + /// search. + /// + /// This is a convenience routine for calling [`Automaton::start_state`] + /// that converts the given [`Input`] to a [start + /// configuration](start::Config). Additionally, if an error occurs, it is + /// converted from a [`StartError`] to a [`MatchError`] using the offset + /// information in the given [`Input`]. + /// + /// # Errors + /// + /// This may return a [`MatchError`] if the search needs to give up + /// when determining the start state (for example, if it sees a "quit" + /// byte). This can also return an error if the given `Input` contains an + /// unsupported [`Anchored`] configuration. + fn start_state_reverse( + &self, + input: &Input<'_>, + ) -> Result { + let config = start::Config::from_input_reverse(input); + self.start_state(&config).map_err(|err| match err { + StartError::Quit { byte } => { + let offset = input.end(); + MatchError::quit(byte, offset) + } + StartError::UnsupportedAnchored { mode } => { + MatchError::unsupported_anchored(mode) + } + }) + } + + /// If this DFA has a universal starting state for the given anchor mode + /// and the DFA supports universal starting states, then this returns that + /// state's identifier. + /// + /// A DFA is said to have a universal starting state when the starting + /// state is invariant with respect to the haystack. Usually, the starting + /// state is chosen depending on the bytes immediately surrounding the + /// starting position of a search. However, the starting state only differs + /// when one or more of the patterns in the DFA have look-around assertions + /// in its prefix. + /// + /// Stated differently, if none of the patterns in a DFA have look-around + /// assertions in their prefix, then the DFA has a universal starting state + /// and _may_ be returned by this method. + /// + /// It is always correct for implementations to return `None`, and indeed, + /// this is what the default implementation does. When this returns `None`, + /// callers must use either `start_state_forward` or `start_state_reverse` + /// to get the starting state. + /// + /// # Use case + /// + /// There are a few reasons why one might want to use this: + /// + /// * If you know your regex patterns have no look-around assertions in + /// their prefix, then calling this routine is likely cheaper and perhaps + /// more semantically meaningful. + /// * When implementing prefilter support in a DFA regex implementation, + /// it is necessary to re-compute the start state after a candidate + /// is returned from the prefilter. However, this is only needed when + /// there isn't a universal start state. When one exists, one can avoid + /// re-computing the start state. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{ + /// dfa::{Automaton, dense::DFA}, + /// Anchored, + /// }; + /// + /// // There are no look-around assertions in the prefixes of any of the + /// // patterns, so we get a universal start state. + /// let dfa = DFA::new_many(&["[0-9]+", "[a-z]+$", "[A-Z]+"])?; + /// assert!(dfa.universal_start_state(Anchored::No).is_some()); + /// assert!(dfa.universal_start_state(Anchored::Yes).is_some()); + /// + /// // One of the patterns has a look-around assertion in its prefix, + /// // so this means there is no longer a universal start state. + /// let dfa = DFA::new_many(&["[0-9]+", "^[a-z]+$", "[A-Z]+"])?; + /// assert!(!dfa.universal_start_state(Anchored::No).is_some()); + /// assert!(!dfa.universal_start_state(Anchored::Yes).is_some()); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + fn universal_start_state(&self, _mode: Anchored) -> Option { + None + } + + /// Returns true if and only if the given identifier corresponds to a + /// "special" state. A special state is one or more of the following: + /// a dead state, a quit state, a match state, a start state or an + /// accelerated state. + /// + /// A correct implementation _may_ always return false for states that + /// are either start states or accelerated states, since that information + /// is only intended to be used for optimization purposes. Correct + /// implementations must return true if the state is a dead, quit or match + /// state. This is because search routines using this trait must be able + /// to rely on `is_special_state` as an indicator that a state may need + /// special treatment. (For example, when a search routine sees a dead + /// state, it must terminate.) + /// + /// This routine permits search implementations to use a single branch to + /// check whether a state needs special attention before executing the next + /// transition. The example below shows how to do this. + /// + /// # Example + /// + /// This example shows how `is_special_state` can be used to implement a + /// correct search routine with minimal branching. In particular, this + /// search routine implements "leftmost" matching, which means that it + /// doesn't immediately stop once a match is found. Instead, it continues + /// until it reaches a dead state. + /// + /// ``` + /// use regex_automata::{ + /// dfa::{Automaton, dense}, + /// HalfMatch, MatchError, Input, + /// }; + /// + /// fn find( + /// dfa: &A, + /// haystack: &[u8], + /// ) -> Result, MatchError> { + /// // The start state is determined by inspecting the position and the + /// // initial bytes of the haystack. Note that start states can never + /// // be match states (since DFAs in this crate delay matches by 1 + /// // byte), so we don't need to check if the start state is a match. + /// let mut state = dfa.start_state_forward(&Input::new(haystack))?; + /// let mut last_match = None; + /// // Walk all the bytes in the haystack. We can quit early if we see + /// // a dead or a quit state. The former means the automaton will + /// // never transition to any other state. The latter means that the + /// // automaton entered a condition in which its search failed. + /// for (i, &b) in haystack.iter().enumerate() { + /// state = dfa.next_state(state, b); + /// if dfa.is_special_state(state) { + /// if dfa.is_match_state(state) { + /// last_match = Some(HalfMatch::new( + /// dfa.match_pattern(state, 0), + /// i, + /// )); + /// } else if dfa.is_dead_state(state) { + /// return Ok(last_match); + /// } else if dfa.is_quit_state(state) { + /// // It is possible to enter into a quit state after + /// // observing a match has occurred. In that case, we + /// // should return the match instead of an error. + /// if last_match.is_some() { + /// return Ok(last_match); + /// } + /// return Err(MatchError::quit(b, i)); + /// } + /// // Implementors may also want to check for start or accel + /// // states and handle them differently for performance + /// // reasons. But it is not necessary for correctness. + /// } + /// } + /// // Matches are always delayed by 1 byte, so we must explicitly walk + /// // the special "EOI" transition at the end of the search. + /// state = dfa.next_eoi_state(state); + /// if dfa.is_match_state(state) { + /// last_match = Some(HalfMatch::new( + /// dfa.match_pattern(state, 0), + /// haystack.len(), + /// )); + /// } + /// Ok(last_match) + /// } + /// + /// // We use a greedy '+' operator to show how the search doesn't just + /// // stop once a match is detected. It continues extending the match. + /// // Using '[a-z]+?' would also work as expected and stop the search + /// // early. Greediness is built into the automaton. + /// let dfa = dense::DFA::new(r"[a-z]+")?; + /// let haystack = "123 foobar 4567".as_bytes(); + /// let mat = find(&dfa, haystack)?.unwrap(); + /// assert_eq!(mat.pattern().as_usize(), 0); + /// assert_eq!(mat.offset(), 10); + /// + /// // Here's another example that tests our handling of the special EOI + /// // transition. This will fail to find a match if we don't call + /// // 'next_eoi_state' at the end of the search since the match isn't + /// // found until the final byte in the haystack. + /// let dfa = dense::DFA::new(r"[0-9]{4}")?; + /// let haystack = "123 foobar 4567".as_bytes(); + /// let mat = find(&dfa, haystack)?.unwrap(); + /// assert_eq!(mat.pattern().as_usize(), 0); + /// assert_eq!(mat.offset(), 15); + /// + /// // And note that our search implementation above automatically works + /// // with multi-DFAs. Namely, `dfa.match_pattern(match_state, 0)` selects + /// // the appropriate pattern ID for us. + /// let dfa = dense::DFA::new_many(&[r"[a-z]+", r"[0-9]+"])?; + /// let haystack = "123 foobar 4567".as_bytes(); + /// let mat = find(&dfa, haystack)?.unwrap(); + /// assert_eq!(mat.pattern().as_usize(), 1); + /// assert_eq!(mat.offset(), 3); + /// let mat = find(&dfa, &haystack[3..])?.unwrap(); + /// assert_eq!(mat.pattern().as_usize(), 0); + /// assert_eq!(mat.offset(), 7); + /// let mat = find(&dfa, &haystack[10..])?.unwrap(); + /// assert_eq!(mat.pattern().as_usize(), 1); + /// assert_eq!(mat.offset(), 5); + /// + /// # Ok::<(), Box>(()) + /// ``` + fn is_special_state(&self, id: StateID) -> bool; + + /// Returns true if and only if the given identifier corresponds to a dead + /// state. When a DFA enters a dead state, it is impossible to leave. That + /// is, every transition on a dead state by definition leads back to the + /// same dead state. + /// + /// In practice, the dead state always corresponds to the identifier `0`. + /// Moreover, in practice, there is only one dead state. + /// + /// The existence of a dead state is not strictly required in the classical + /// model of finite state machines, where one generally only cares about + /// the question of whether an input sequence matches or not. Dead states + /// are not needed to answer that question, since one can immediately quit + /// as soon as one enters a final or "match" state. However, we don't just + /// care about matches but also care about the location of matches, and + /// more specifically, care about semantics like "greedy" matching. + /// + /// For example, given the pattern `a+` and the input `aaaz`, the dead + /// state won't be entered until the state machine reaches `z` in the + /// input, at which point, the search routine can quit. But without the + /// dead state, the search routine wouldn't know when to quit. In a + /// classical representation, the search routine would stop after seeing + /// the first `a` (which is when the search would enter a match state). But + /// this wouldn't implement "greedy" matching where `a+` matches as many + /// `a`'s as possible. + /// + /// # Example + /// + /// See the example for [`Automaton::is_special_state`] for how to use this + /// method correctly. + fn is_dead_state(&self, id: StateID) -> bool; + + /// Returns true if and only if the given identifier corresponds to a quit + /// state. A quit state is like a dead state (it has no transitions other + /// than to itself), except it indicates that the DFA failed to complete + /// the search. When this occurs, callers can neither accept or reject that + /// a match occurred. + /// + /// In practice, the quit state always corresponds to the state immediately + /// following the dead state. (Which is not usually represented by `1`, + /// since state identifiers are pre-multiplied by the state machine's + /// alphabet stride, and the alphabet stride varies between DFAs.) + /// + /// The typical way in which a quit state can occur is when heuristic + /// support for Unicode word boundaries is enabled via the + /// [`dense::Config::unicode_word_boundary`](crate::dfa::dense::Config::unicode_word_boundary) + /// option. But other options, like the lower level + /// [`dense::Config::quit`](crate::dfa::dense::Config::quit) + /// configuration, can also result in a quit state being entered. The + /// purpose of the quit state is to provide a way to execute a fast DFA + /// in common cases while delegating to slower routines when the DFA quits. + /// + /// The default search implementations provided by this crate will return a + /// [`MatchError::quit`] error when a quit state is entered. + /// + /// # Example + /// + /// See the example for [`Automaton::is_special_state`] for how to use this + /// method correctly. + fn is_quit_state(&self, id: StateID) -> bool; + + /// Returns true if and only if the given identifier corresponds to a + /// match state. A match state is also referred to as a "final" state and + /// indicates that a match has been found. + /// + /// If all you care about is whether a particular pattern matches in the + /// input sequence, then a search routine can quit early as soon as the + /// machine enters a match state. However, if you're looking for the + /// standard "leftmost-first" match location, then search _must_ continue + /// until either the end of the input or until the machine enters a dead + /// state. (Since either condition implies that no other useful work can + /// be done.) Namely, when looking for the location of a match, then + /// search implementations should record the most recent location in + /// which a match state was entered, but otherwise continue executing the + /// search as normal. (The search may even leave the match state.) Once + /// the termination condition is reached, the most recently recorded match + /// location should be returned. + /// + /// Finally, one additional power given to match states in this crate + /// is that they are always associated with a specific pattern in order + /// to support multi-DFAs. See [`Automaton::match_pattern`] for more + /// details and an example for how to query the pattern associated with a + /// particular match state. + /// + /// # Example + /// + /// See the example for [`Automaton::is_special_state`] for how to use this + /// method correctly. + fn is_match_state(&self, id: StateID) -> bool; + + /// Returns true only if the given identifier corresponds to a start + /// state + /// + /// A start state is a state in which a DFA begins a search. + /// All searches begin in a start state. Moreover, since all matches are + /// delayed by one byte, a start state can never be a match state. + /// + /// The main role of a start state is, as mentioned, to be a starting + /// point for a DFA. This starting point is determined via one of + /// [`Automaton::start_state_forward`] or + /// [`Automaton::start_state_reverse`], depending on whether one is doing + /// a forward or a reverse search, respectively. + /// + /// A secondary use of start states is for prefix acceleration. Namely, + /// while executing a search, if one detects that you're in a start state, + /// then it may be faster to look for the next match of a prefix of the + /// pattern, if one exists. If a prefix exists and since all matches must + /// begin with that prefix, then skipping ahead to occurrences of that + /// prefix may be much faster than executing the DFA. + /// + /// As mentioned in the documentation for + /// [`is_special_state`](Automaton::is_special_state) implementations + /// _may_ always return false, even if the given identifier is a start + /// state. This is because knowing whether a state is a start state or not + /// is not necessary for correctness and is only treated as a potential + /// performance optimization. (For example, the implementations of this + /// trait in this crate will only return true when the given identifier + /// corresponds to a start state and when [specialization of start + /// states](crate::dfa::dense::Config::specialize_start_states) was enabled + /// during DFA construction. If start state specialization is disabled + /// (which is the default), then this method will always return false.) + /// + /// # Example + /// + /// This example shows how to implement your own search routine that does + /// a prefix search whenever the search enters a start state. + /// + /// Note that you do not need to implement your own search routine + /// to make use of prefilters like this. The search routines + /// provided by this crate already implement prefilter support via + /// the [`Prefilter`](crate::util::prefilter::Prefilter) trait. + /// A prefilter can be added to your search configuration with + /// [`dense::Config::prefilter`](crate::dfa::dense::Config::prefilter) for + /// dense and sparse DFAs in this crate. + /// + /// This example is meant to show how you might deal with prefilters in a + /// simplified case if you are implementing your own search routine. + /// + /// ``` + /// use regex_automata::{ + /// dfa::{Automaton, dense}, + /// HalfMatch, MatchError, Input, + /// }; + /// + /// fn find_byte(slice: &[u8], at: usize, byte: u8) -> Option { + /// // Would be faster to use the memchr crate, but this is still + /// // faster than running through the DFA. + /// slice[at..].iter().position(|&b| b == byte).map(|i| at + i) + /// } + /// + /// fn find( + /// dfa: &A, + /// haystack: &[u8], + /// prefix_byte: Option, + /// ) -> Result, MatchError> { + /// // See the Automaton::is_special_state example for similar code + /// // with more comments. + /// + /// let mut state = dfa.start_state_forward(&Input::new(haystack))?; + /// let mut last_match = None; + /// let mut pos = 0; + /// while pos < haystack.len() { + /// let b = haystack[pos]; + /// state = dfa.next_state(state, b); + /// pos += 1; + /// if dfa.is_special_state(state) { + /// if dfa.is_match_state(state) { + /// last_match = Some(HalfMatch::new( + /// dfa.match_pattern(state, 0), + /// pos - 1, + /// )); + /// } else if dfa.is_dead_state(state) { + /// return Ok(last_match); + /// } else if dfa.is_quit_state(state) { + /// // It is possible to enter into a quit state after + /// // observing a match has occurred. In that case, we + /// // should return the match instead of an error. + /// if last_match.is_some() { + /// return Ok(last_match); + /// } + /// return Err(MatchError::quit(b, pos - 1)); + /// } else if dfa.is_start_state(state) { + /// // If we're in a start state and know all matches begin + /// // with a particular byte, then we can quickly skip to + /// // candidate matches without running the DFA through + /// // every byte inbetween. + /// if let Some(prefix_byte) = prefix_byte { + /// pos = match find_byte(haystack, pos, prefix_byte) { + /// Some(pos) => pos, + /// None => break, + /// }; + /// } + /// } + /// } + /// } + /// // Matches are always delayed by 1 byte, so we must explicitly walk + /// // the special "EOI" transition at the end of the search. + /// state = dfa.next_eoi_state(state); + /// if dfa.is_match_state(state) { + /// last_match = Some(HalfMatch::new( + /// dfa.match_pattern(state, 0), + /// haystack.len(), + /// )); + /// } + /// Ok(last_match) + /// } + /// + /// // In this example, it's obvious that all occurrences of our pattern + /// // begin with 'Z', so we pass in 'Z'. Note also that we need to + /// // enable start state specialization, or else it won't be possible to + /// // detect start states during a search. ('is_start_state' would always + /// // return false.) + /// let dfa = dense::DFA::builder() + /// .configure(dense::DFA::config().specialize_start_states(true)) + /// .build(r"Z[a-z]+")?; + /// let haystack = "123 foobar Zbaz quux".as_bytes(); + /// let mat = find(&dfa, haystack, Some(b'Z'))?.unwrap(); + /// assert_eq!(mat.pattern().as_usize(), 0); + /// assert_eq!(mat.offset(), 15); + /// + /// // But note that we don't need to pass in a prefix byte. If we don't, + /// // then the search routine does no acceleration. + /// let mat = find(&dfa, haystack, None)?.unwrap(); + /// assert_eq!(mat.pattern().as_usize(), 0); + /// assert_eq!(mat.offset(), 15); + /// + /// // However, if we pass an incorrect byte, then the prefix search will + /// // result in incorrect results. + /// assert_eq!(find(&dfa, haystack, Some(b'X'))?, None); + /// + /// # Ok::<(), Box>(()) + /// ``` + fn is_start_state(&self, id: StateID) -> bool; + + /// Returns true if and only if the given identifier corresponds to an + /// accelerated state. + /// + /// An accelerated state is a special optimization + /// trick implemented by this crate. Namely, if + /// [`dense::Config::accelerate`](crate::dfa::dense::Config::accelerate) is + /// enabled (and it is by default), then DFAs generated by this crate will + /// tag states meeting certain characteristics as accelerated. States meet + /// this criteria whenever most of their transitions are self-transitions. + /// That is, transitions that loop back to the same state. When a small + /// number of transitions aren't self-transitions, then it follows that + /// there are only a small number of bytes that can cause the DFA to leave + /// that state. Thus, there is an opportunity to look for those bytes + /// using more optimized routines rather than continuing to run through + /// the DFA. This trick is similar to the prefilter idea described in + /// the documentation of [`Automaton::is_start_state`] with two main + /// differences: + /// + /// 1. It is more limited since acceleration only applies to single bytes. + /// This means states are rarely accelerated when Unicode mode is enabled + /// (which is enabled by default). + /// 2. It can occur anywhere in the DFA, which increases optimization + /// opportunities. + /// + /// Like the prefilter idea, the main downside (and a possible reason to + /// disable it) is that it can lead to worse performance in some cases. + /// Namely, if a state is accelerated for very common bytes, then the + /// overhead of checking for acceleration and using the more optimized + /// routines to look for those bytes can cause overall performance to be + /// worse than if acceleration wasn't enabled at all. + /// + /// A simple example of a regex that has an accelerated state is + /// `(?-u)[^a]+a`. Namely, the `[^a]+` sub-expression gets compiled down + /// into a single state where all transitions except for `a` loop back to + /// itself, and where `a` is the only transition (other than the special + /// EOI transition) that goes to some other state. Thus, this state can + /// be accelerated and implemented more efficiently by calling an + /// optimized routine like `memchr` with `a` as the needle. Notice that + /// the `(?-u)` to disable Unicode is necessary here, as without it, + /// `[^a]` will match any UTF-8 encoding of any Unicode scalar value other + /// than `a`. This more complicated expression compiles down to many DFA + /// states and the simple acceleration optimization is no longer available. + /// + /// Typically, this routine is used to guard calls to + /// [`Automaton::accelerator`], which returns the accelerated bytes for + /// the specified state. + fn is_accel_state(&self, id: StateID) -> bool; + + /// Returns the total number of patterns compiled into this DFA. + /// + /// In the case of a DFA that contains no patterns, this must return `0`. + /// + /// # Example + /// + /// This example shows the pattern length for a DFA that never matches: + /// + /// ``` + /// use regex_automata::dfa::{Automaton, dense::DFA}; + /// + /// let dfa: DFA> = DFA::never_match()?; + /// assert_eq!(dfa.pattern_len(), 0); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// And another example for a DFA that matches at every position: + /// + /// ``` + /// use regex_automata::dfa::{Automaton, dense::DFA}; + /// + /// let dfa: DFA> = DFA::always_match()?; + /// assert_eq!(dfa.pattern_len(), 1); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// And finally, a DFA that was constructed from multiple patterns: + /// + /// ``` + /// use regex_automata::dfa::{Automaton, dense::DFA}; + /// + /// let dfa = DFA::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; + /// assert_eq!(dfa.pattern_len(), 3); + /// # Ok::<(), Box>(()) + /// ``` + fn pattern_len(&self) -> usize; + + /// Returns the total number of patterns that match in this state. + /// + /// If the given state is not a match state, then implementations may + /// panic. + /// + /// If the DFA was compiled with one pattern, then this must necessarily + /// always return `1` for all match states. + /// + /// Implementations must guarantee that [`Automaton::match_pattern`] can be + /// called with indices up to (but not including) the length returned by + /// this routine without panicking. + /// + /// # Panics + /// + /// Implementations are permitted to panic if the provided state ID does + /// not correspond to a match state. + /// + /// # Example + /// + /// This example shows a simple instance of implementing overlapping + /// matches. In particular, it shows not only how to determine how many + /// patterns have matched in a particular state, but also how to access + /// which specific patterns have matched. + /// + /// Notice that we must use + /// [`MatchKind::All`](crate::MatchKind::All) + /// when building the DFA. If we used + /// [`MatchKind::LeftmostFirst`](crate::MatchKind::LeftmostFirst) + /// instead, then the DFA would not be constructed in a way that + /// supports overlapping matches. (It would only report a single pattern + /// that matches at any particular point in time.) + /// + /// Another thing to take note of is the patterns used and the order in + /// which the pattern IDs are reported. In the example below, pattern `3` + /// is yielded first. Why? Because it corresponds to the match that + /// appears first. Namely, the `@` symbol is part of `\S+` but not part + /// of any of the other patterns. Since the `\S+` pattern has a match that + /// starts to the left of any other pattern, its ID is returned before any + /// other. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{dfa::{Automaton, dense}, Input, MatchKind}; + /// + /// let dfa = dense::Builder::new() + /// .configure(dense::Config::new().match_kind(MatchKind::All)) + /// .build_many(&[ + /// r"[[:word:]]+", r"[a-z]+", r"[A-Z]+", r"[[:^space:]]+", + /// ])?; + /// let haystack = "@bar".as_bytes(); + /// + /// // The start state is determined by inspecting the position and the + /// // initial bytes of the haystack. + /// let mut state = dfa.start_state_forward(&Input::new(haystack))?; + /// // Walk all the bytes in the haystack. + /// for &b in haystack { + /// state = dfa.next_state(state, b); + /// } + /// state = dfa.next_eoi_state(state); + /// + /// assert!(dfa.is_match_state(state)); + /// assert_eq!(dfa.match_len(state), 3); + /// // The following calls are guaranteed to not panic since `match_len` + /// // returned `3` above. + /// assert_eq!(dfa.match_pattern(state, 0).as_usize(), 3); + /// assert_eq!(dfa.match_pattern(state, 1).as_usize(), 0); + /// assert_eq!(dfa.match_pattern(state, 2).as_usize(), 1); + /// + /// # Ok::<(), Box>(()) + /// ``` + fn match_len(&self, id: StateID) -> usize; + + /// Returns the pattern ID corresponding to the given match index in the + /// given state. + /// + /// See [`Automaton::match_len`] for an example of how to use this + /// method correctly. Note that if you know your DFA is compiled with a + /// single pattern, then this routine is never necessary since it will + /// always return a pattern ID of `0` for an index of `0` when `id` + /// corresponds to a match state. + /// + /// Typically, this routine is used when implementing an overlapping + /// search, as the example for `Automaton::match_len` does. + /// + /// # Panics + /// + /// If the state ID is not a match state or if the match index is out + /// of bounds for the given state, then this routine may either panic + /// or produce an incorrect result. If the state ID is correct and the + /// match index is correct, then this routine must always produce a valid + /// `PatternID`. + fn match_pattern(&self, id: StateID, index: usize) -> PatternID; + + /// Returns true if and only if this automaton can match the empty string. + /// When it returns false, all possible matches are guaranteed to have a + /// non-zero length. + /// + /// This is useful as cheap way to know whether code needs to handle the + /// case of a zero length match. This is particularly important when UTF-8 + /// modes are enabled, as when UTF-8 mode is enabled, empty matches that + /// split a codepoint must never be reported. This extra handling can + /// sometimes be costly, and since regexes matching an empty string are + /// somewhat rare, it can be beneficial to treat such regexes specially. + /// + /// # Example + /// + /// This example shows a few different DFAs and whether they match the + /// empty string or not. Notice the empty string isn't merely a matter + /// of a string of length literally `0`, but rather, whether a match can + /// occur between specific pairs of bytes. + /// + /// ``` + /// use regex_automata::{dfa::{dense::DFA, Automaton}, util::syntax}; + /// + /// // The empty regex matches the empty string. + /// let dfa = DFA::new("")?; + /// assert!(dfa.has_empty(), "empty matches empty"); + /// // The '+' repetition operator requires at least one match, and so + /// // does not match the empty string. + /// let dfa = DFA::new("a+")?; + /// assert!(!dfa.has_empty(), "+ does not match empty"); + /// // But the '*' repetition operator does. + /// let dfa = DFA::new("a*")?; + /// assert!(dfa.has_empty(), "* does match empty"); + /// // And wrapping '+' in an operator that can match an empty string also + /// // causes it to match the empty string too. + /// let dfa = DFA::new("(a+)*")?; + /// assert!(dfa.has_empty(), "+ inside of * matches empty"); + /// + /// // If a regex is just made of a look-around assertion, even if the + /// // assertion requires some kind of non-empty string around it (such as + /// // \b), then it is still treated as if it matches the empty string. + /// // Namely, if a match occurs of just a look-around assertion, then the + /// // match returned is empty. + /// let dfa = DFA::builder() + /// .configure(DFA::config().unicode_word_boundary(true)) + /// .syntax(syntax::Config::new().utf8(false)) + /// .build(r"^$\A\z\b\B(?-u:\b\B)")?; + /// assert!(dfa.has_empty(), "assertions match empty"); + /// // Even when an assertion is wrapped in a '+', it still matches the + /// // empty string. + /// let dfa = DFA::new(r"^+")?; + /// assert!(dfa.has_empty(), "+ of an assertion matches empty"); + /// + /// // An alternation with even one branch that can match the empty string + /// // is also said to match the empty string overall. + /// let dfa = DFA::new("foo|(bar)?|quux")?; + /// assert!(dfa.has_empty(), "alternations can match empty"); + /// + /// // An NFA that matches nothing does not match the empty string. + /// let dfa = DFA::new("[a&&b]")?; + /// assert!(!dfa.has_empty(), "never matching means not matching empty"); + /// // But if it's wrapped in something that doesn't require a match at + /// // all, then it can match the empty string! + /// let dfa = DFA::new("[a&&b]*")?; + /// assert!(dfa.has_empty(), "* on never-match still matches empty"); + /// // Since a '+' requires a match, using it on something that can never + /// // match will itself produce a regex that can never match anything, + /// // and thus does not match the empty string. + /// let dfa = DFA::new("[a&&b]+")?; + /// assert!(!dfa.has_empty(), "+ on never-match still matches nothing"); + /// + /// # Ok::<(), Box>(()) + /// ``` + fn has_empty(&self) -> bool; + + /// Whether UTF-8 mode is enabled for this DFA or not. + /// + /// When UTF-8 mode is enabled, all matches reported by a DFA are + /// guaranteed to correspond to spans of valid UTF-8. This includes + /// zero-width matches. For example, the DFA must guarantee that the empty + /// regex will not match at the positions between code units in the UTF-8 + /// encoding of a single codepoint. + /// + /// See [`thompson::Config::utf8`](crate::nfa::thompson::Config::utf8) for + /// more information. + /// + /// # Example + /// + /// This example shows how UTF-8 mode can impact the match spans that may + /// be reported in certain cases. + /// + /// ``` + /// use regex_automata::{ + /// dfa::{dense::DFA, Automaton}, + /// nfa::thompson, + /// HalfMatch, Input, + /// }; + /// + /// // UTF-8 mode is enabled by default. + /// let re = DFA::new("")?; + /// assert!(re.is_utf8()); + /// let mut input = Input::new("☃"); + /// let got = re.try_search_fwd(&input)?; + /// assert_eq!(Some(HalfMatch::must(0, 0)), got); + /// + /// // Even though an empty regex matches at 1..1, our next match is + /// // 3..3 because 1..1 and 2..2 split the snowman codepoint (which is + /// // three bytes long). + /// input.set_start(1); + /// let got = re.try_search_fwd(&input)?; + /// assert_eq!(Some(HalfMatch::must(0, 3)), got); + /// + /// // But if we disable UTF-8, then we'll get matches at 1..1 and 2..2: + /// let re = DFA::builder() + /// .thompson(thompson::Config::new().utf8(false)) + /// .build("")?; + /// assert!(!re.is_utf8()); + /// let got = re.try_search_fwd(&input)?; + /// assert_eq!(Some(HalfMatch::must(0, 1)), got); + /// + /// input.set_start(2); + /// let got = re.try_search_fwd(&input)?; + /// assert_eq!(Some(HalfMatch::must(0, 2)), got); + /// + /// input.set_start(3); + /// let got = re.try_search_fwd(&input)?; + /// assert_eq!(Some(HalfMatch::must(0, 3)), got); + /// + /// input.set_start(4); + /// let got = re.try_search_fwd(&input)?; + /// assert_eq!(None, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + fn is_utf8(&self) -> bool; + + /// Returns true if and only if this DFA is limited to returning matches + /// whose start position is `0`. + /// + /// Note that if you're using DFAs provided by + /// this crate, then this is _orthogonal_ to + /// [`Config::start_kind`](crate::dfa::dense::Config::start_kind). + /// + /// This is useful in some cases because if a DFA is limited to producing + /// matches that start at offset `0`, then a reverse search is never + /// required for finding the start of a match. + /// + /// # Example + /// + /// ``` + /// use regex_automata::dfa::{dense::DFA, Automaton}; + /// + /// // The empty regex matches anywhere + /// let dfa = DFA::new("")?; + /// assert!(!dfa.is_always_start_anchored(), "empty matches anywhere"); + /// // 'a' matches anywhere. + /// let dfa = DFA::new("a")?; + /// assert!(!dfa.is_always_start_anchored(), "'a' matches anywhere"); + /// // '^' only matches at offset 0! + /// let dfa = DFA::new("^a")?; + /// assert!(dfa.is_always_start_anchored(), "'^a' matches only at 0"); + /// // But '(?m:^)' matches at 0 but at other offsets too. + /// let dfa = DFA::new("(?m:^)a")?; + /// assert!(!dfa.is_always_start_anchored(), "'(?m:^)a' matches anywhere"); + /// + /// # Ok::<(), Box>(()) + /// ``` + fn is_always_start_anchored(&self) -> bool; + + /// Return a slice of bytes to accelerate for the given state, if possible. + /// + /// If the given state has no accelerator, then an empty slice must be + /// returned. If `Automaton::is_accel_state` returns true for the given ID, + /// then this routine _must_ return a non-empty slice. But note that it is + /// not required for an implementation of this trait to ever return `true` + /// for `is_accel_state`, even if the state _could_ be accelerated. That + /// is, acceleration is an optional optimization. But the return values of + /// `is_accel_state` and `accelerator` must be in sync. + /// + /// If the given ID is not a valid state ID for this automaton, then + /// implementations may panic or produce incorrect results. + /// + /// See [`Automaton::is_accel_state`] for more details on state + /// acceleration. + /// + /// By default, this method will always return an empty slice. + /// + /// # Example + /// + /// This example shows a contrived case in which we build a regex that we + /// know is accelerated and extract the accelerator from a state. + /// + /// ``` + /// use regex_automata::{ + /// dfa::{Automaton, dense}, + /// util::{primitives::StateID, syntax}, + /// }; + /// + /// let dfa = dense::Builder::new() + /// // We disable Unicode everywhere and permit the regex to match + /// // invalid UTF-8. e.g., [^abc] matches \xFF, which is not valid + /// // UTF-8. If we left Unicode enabled, [^abc] would match any UTF-8 + /// // encoding of any Unicode scalar value except for 'a', 'b' or 'c'. + /// // That translates to a much more complicated DFA, and also + /// // inhibits the 'accelerator' optimization that we are trying to + /// // demonstrate in this example. + /// .syntax(syntax::Config::new().unicode(false).utf8(false)) + /// .build("[^abc]+a")?; + /// + /// // Here we just pluck out the state that we know is accelerated. + /// // While the stride calculations are something that can be relied + /// // on by callers, the specific position of the accelerated state is + /// // implementation defined. + /// // + /// // N.B. We get '3' by inspecting the state machine using 'regex-cli'. + /// // e.g., try `regex-cli debug dense dfa -p '[^abc]+a' -BbUC`. + /// let id = StateID::new(3 * dfa.stride()).unwrap(); + /// let accelerator = dfa.accelerator(id); + /// // The `[^abc]+` sub-expression permits [a, b, c] to be accelerated. + /// assert_eq!(accelerator, &[b'a', b'b', b'c']); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + fn accelerator(&self, _id: StateID) -> &[u8] { + &[] + } + + /// Returns the prefilter associated with a DFA, if one exists. + /// + /// The default implementation of this trait always returns `None`. And + /// indeed, it is always correct to return `None`. + /// + /// For DFAs in this crate, a prefilter can be attached to a DFA via + /// [`dense::Config::prefilter`](crate::dfa::dense::Config::prefilter). + /// + /// Do note that prefilters are not serialized by DFAs in this crate. + /// So if you deserialize a DFA that had a prefilter attached to it + /// at serialization time, then it will not have a prefilter after + /// deserialization. + #[inline] + fn get_prefilter(&self) -> Option<&Prefilter> { + None + } + + /// Executes a forward search and returns the end position of the leftmost + /// match that is found. If no match exists, then `None` is returned. + /// + /// In particular, this method continues searching even after it enters + /// a match state. The search only terminates once it has reached the + /// end of the input or when it has entered a dead or quit state. Upon + /// termination, the position of the last byte seen while still in a match + /// state is returned. + /// + /// # Errors + /// + /// This routine errors if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the DFA quitting. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search returns an error, callers cannot know whether a match + /// exists or not. + /// + /// # Notes for implementors + /// + /// Implementors of this trait are not required to implement any particular + /// match semantics (such as leftmost-first), which are instead manifest in + /// the DFA's transitions. But this search routine should behave as a + /// general "leftmost" search. + /// + /// In particular, this method must continue searching even after it enters + /// a match state. The search should only terminate once it has reached + /// the end of the input or when it has entered a dead or quit state. Upon + /// termination, the position of the last byte seen while still in a match + /// state is returned. + /// + /// Since this trait provides an implementation for this method by default, + /// it's unlikely that one will need to implement this. + /// + /// # Example + /// + /// This example shows how to use this method with a + /// [`dense::DFA`](crate::dfa::dense::DFA). + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; + /// + /// let dfa = dense::DFA::new("foo[0-9]+")?; + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new(b"foo12345"))?); + /// + /// // Even though a match is found after reading the first byte (`a`), + /// // the leftmost first match semantics demand that we find the earliest + /// // match that prefers earlier parts of the pattern over latter parts. + /// let dfa = dense::DFA::new("abc|a")?; + /// let expected = Some(HalfMatch::must(0, 3)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new(b"abc"))?); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: specific pattern search + /// + /// This example shows how to build a multi-DFA that permits searching for + /// specific patterns. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// dfa::{Automaton, dense}, + /// Anchored, HalfMatch, PatternID, Input, + /// }; + /// + /// let dfa = dense::Builder::new() + /// .configure(dense::Config::new().starts_for_each_pattern(true)) + /// .build_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?; + /// let haystack = "foo123".as_bytes(); + /// + /// // Since we are using the default leftmost-first match and both + /// // patterns match at the same starting position, only the first pattern + /// // will be returned in this case when doing a search for any of the + /// // patterns. + /// let expected = Some(HalfMatch::must(0, 6)); + /// let got = dfa.try_search_fwd(&Input::new(haystack))?; + /// assert_eq!(expected, got); + /// + /// // But if we want to check whether some other pattern matches, then we + /// // can provide its pattern ID. + /// let input = Input::new(haystack) + /// .anchored(Anchored::Pattern(PatternID::must(1))); + /// let expected = Some(HalfMatch::must(1, 6)); + /// let got = dfa.try_search_fwd(&input)?; + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: specifying the bounds of a search + /// + /// This example shows how providing the bounds of a search can produce + /// different results than simply sub-slicing the haystack. + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; + /// + /// // N.B. We disable Unicode here so that we use a simple ASCII word + /// // boundary. Alternatively, we could enable heuristic support for + /// // Unicode word boundaries. + /// let dfa = dense::DFA::new(r"(?-u)\b[0-9]{3}\b")?; + /// let haystack = "foo123bar".as_bytes(); + /// + /// // Since we sub-slice the haystack, the search doesn't know about the + /// // larger context and assumes that `123` is surrounded by word + /// // boundaries. And of course, the match position is reported relative + /// // to the sub-slice as well, which means we get `3` instead of `6`. + /// let input = Input::new(&haystack[3..6]); + /// let expected = Some(HalfMatch::must(0, 3)); + /// let got = dfa.try_search_fwd(&input)?; + /// assert_eq!(expected, got); + /// + /// // But if we provide the bounds of the search within the context of the + /// // entire haystack, then the search can take the surrounding context + /// // into account. (And if we did find a match, it would be reported + /// // as a valid offset into `haystack` instead of its sub-slice.) + /// let input = Input::new(haystack).range(3..6); + /// let expected = None; + /// let got = dfa.try_search_fwd(&input)?; + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + fn try_search_fwd( + &self, + input: &Input<'_>, + ) -> Result, MatchError> { + let utf8empty = self.has_empty() && self.is_utf8(); + let hm = match search::find_fwd(&self, input)? { + None => return Ok(None), + Some(hm) if !utf8empty => return Ok(Some(hm)), + Some(hm) => hm, + }; + // We get to this point when we know our DFA can match the empty string + // AND when UTF-8 mode is enabled. In this case, we skip any matches + // whose offset splits a codepoint. Such a match is necessarily a + // zero-width match, because UTF-8 mode requires the underlying NFA + // to be built such that all non-empty matches span valid UTF-8. + // Therefore, any match that ends in the middle of a codepoint cannot + // be part of a span of valid UTF-8 and thus must be an empty match. + // In such cases, we skip it, so as not to report matches that split a + // codepoint. + // + // Note that this is not a checked assumption. Callers *can* provide an + // NFA with UTF-8 mode enabled but produces non-empty matches that span + // invalid UTF-8. But doing so is documented to result in unspecified + // behavior. + empty::skip_splits_fwd(input, hm, hm.offset(), |input| { + let got = search::find_fwd(&self, input)?; + Ok(got.map(|hm| (hm, hm.offset()))) + }) + } + + /// Executes a reverse search and returns the start of the position of the + /// leftmost match that is found. If no match exists, then `None` is + /// returned. + /// + /// # Errors + /// + /// This routine errors if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the DFA quitting. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search returns an error, callers cannot know whether a match + /// exists or not. + /// + /// # Example + /// + /// This example shows how to use this method with a + /// [`dense::DFA`](crate::dfa::dense::DFA). In particular, this + /// routine is principally useful when used in conjunction with the + /// [`nfa::thompson::Config::reverse`](crate::nfa::thompson::Config::reverse) + /// configuration. In general, it's unlikely to be correct to use + /// both `try_search_fwd` and `try_search_rev` with the same DFA since + /// any particular DFA will only support searching in one direction with + /// respect to the pattern. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson, + /// dfa::{Automaton, dense}, + /// HalfMatch, Input, + /// }; + /// + /// let dfa = dense::Builder::new() + /// .thompson(thompson::Config::new().reverse(true)) + /// .build("foo[0-9]+")?; + /// let expected = Some(HalfMatch::must(0, 0)); + /// assert_eq!(expected, dfa.try_search_rev(&Input::new(b"foo12345"))?); + /// + /// // Even though a match is found after reading the last byte (`c`), + /// // the leftmost first match semantics demand that we find the earliest + /// // match that prefers earlier parts of the pattern over latter parts. + /// let dfa = dense::Builder::new() + /// .thompson(thompson::Config::new().reverse(true)) + /// .build("abc|c")?; + /// let expected = Some(HalfMatch::must(0, 0)); + /// assert_eq!(expected, dfa.try_search_rev(&Input::new(b"abc"))?); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: UTF-8 mode + /// + /// This examples demonstrates that UTF-8 mode applies to reverse + /// DFAs. When UTF-8 mode is enabled in the underlying NFA, then all + /// matches reported must correspond to valid UTF-8 spans. This includes + /// prohibiting zero-width matches that split a codepoint. + /// + /// UTF-8 mode is enabled by default. Notice below how the only zero-width + /// matches reported are those at UTF-8 boundaries: + /// + /// ``` + /// use regex_automata::{ + /// dfa::{dense::DFA, Automaton}, + /// nfa::thompson, + /// HalfMatch, Input, MatchKind, + /// }; + /// + /// let dfa = DFA::builder() + /// .thompson(thompson::Config::new().reverse(true)) + /// .build(r"")?; + /// + /// // Run the reverse DFA to collect all matches. + /// let mut input = Input::new("☃"); + /// let mut matches = vec![]; + /// loop { + /// match dfa.try_search_rev(&input)? { + /// None => break, + /// Some(hm) => { + /// matches.push(hm); + /// if hm.offset() == 0 || input.end() == 0 { + /// break; + /// } else if hm.offset() < input.end() { + /// input.set_end(hm.offset()); + /// } else { + /// // This is only necessary to handle zero-width + /// // matches, which of course occur in this example. + /// // Without this, the search would never advance + /// // backwards beyond the initial match. + /// input.set_end(input.end() - 1); + /// } + /// } + /// } + /// } + /// + /// // No matches split a codepoint. + /// let expected = vec![ + /// HalfMatch::must(0, 3), + /// HalfMatch::must(0, 0), + /// ]; + /// assert_eq!(expected, matches); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Now let's look at the same example, but with UTF-8 mode on the + /// original NFA disabled (which results in disabling UTF-8 mode on the + /// DFA): + /// + /// ``` + /// use regex_automata::{ + /// dfa::{dense::DFA, Automaton}, + /// nfa::thompson, + /// HalfMatch, Input, MatchKind, + /// }; + /// + /// let dfa = DFA::builder() + /// .thompson(thompson::Config::new().reverse(true).utf8(false)) + /// .build(r"")?; + /// + /// // Run the reverse DFA to collect all matches. + /// let mut input = Input::new("☃"); + /// let mut matches = vec![]; + /// loop { + /// match dfa.try_search_rev(&input)? { + /// None => break, + /// Some(hm) => { + /// matches.push(hm); + /// if hm.offset() == 0 || input.end() == 0 { + /// break; + /// } else if hm.offset() < input.end() { + /// input.set_end(hm.offset()); + /// } else { + /// // This is only necessary to handle zero-width + /// // matches, which of course occur in this example. + /// // Without this, the search would never advance + /// // backwards beyond the initial match. + /// input.set_end(input.end() - 1); + /// } + /// } + /// } + /// } + /// + /// // No matches split a codepoint. + /// let expected = vec![ + /// HalfMatch::must(0, 3), + /// HalfMatch::must(0, 2), + /// HalfMatch::must(0, 1), + /// HalfMatch::must(0, 0), + /// ]; + /// assert_eq!(expected, matches); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + fn try_search_rev( + &self, + input: &Input<'_>, + ) -> Result, MatchError> { + let utf8empty = self.has_empty() && self.is_utf8(); + let hm = match search::find_rev(self, input)? { + None => return Ok(None), + Some(hm) if !utf8empty => return Ok(Some(hm)), + Some(hm) => hm, + }; + empty::skip_splits_rev(input, hm, hm.offset(), |input| { + let got = search::find_rev(self, input)?; + Ok(got.map(|hm| (hm, hm.offset()))) + }) + } + + /// Executes an overlapping forward search. Matches, if one exists, can be + /// obtained via the [`OverlappingState::get_match`] method. + /// + /// This routine is principally only useful when searching for multiple + /// patterns on inputs where multiple patterns may match the same regions + /// of text. In particular, callers must preserve the automaton's search + /// state from prior calls so that the implementation knows where the last + /// match occurred. + /// + /// When using this routine to implement an iterator of overlapping + /// matches, the `start` of the search should always be set to the end + /// of the last match. If more patterns match at the previous location, + /// then they will be immediately returned. (This is tracked by the given + /// overlapping state.) Otherwise, the search continues at the starting + /// position given. + /// + /// If for some reason you want the search to forget about its previous + /// state and restart the search at a particular position, then setting the + /// state to [`OverlappingState::start`] will accomplish that. + /// + /// # Errors + /// + /// This routine errors if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the DFA quitting. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search returns an error, callers cannot know whether a match + /// exists or not. + /// + /// # Example + /// + /// This example shows how to run a basic overlapping search with a + /// [`dense::DFA`](crate::dfa::dense::DFA). Notice that we build the + /// automaton with a `MatchKind::All` configuration. Overlapping searches + /// are unlikely to work as one would expect when using the default + /// `MatchKind::LeftmostFirst` match semantics, since leftmost-first + /// matching is fundamentally incompatible with overlapping searches. + /// Namely, overlapping searches need to report matches as they are seen, + /// where as leftmost-first searches will continue searching even after a + /// match has been observed in order to find the conventional end position + /// of the match. More concretely, leftmost-first searches use dead states + /// to terminate a search after a specific match can no longer be extended. + /// Overlapping searches instead do the opposite by continuing the search + /// to find totally new matches (potentially of other patterns). + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// dfa::{Automaton, OverlappingState, dense}, + /// HalfMatch, Input, MatchKind, + /// }; + /// + /// let dfa = dense::Builder::new() + /// .configure(dense::Config::new().match_kind(MatchKind::All)) + /// .build_many(&[r"[[:word:]]+$", r"[[:^space:]]+$"])?; + /// let haystack = "@foo"; + /// let mut state = OverlappingState::start(); + /// + /// let expected = Some(HalfMatch::must(1, 4)); + /// dfa.try_search_overlapping_fwd(&Input::new(haystack), &mut state)?; + /// assert_eq!(expected, state.get_match()); + /// + /// // The first pattern also matches at the same position, so re-running + /// // the search will yield another match. Notice also that the first + /// // pattern is returned after the second. This is because the second + /// // pattern begins its match before the first, is therefore an earlier + /// // match and is thus reported first. + /// let expected = Some(HalfMatch::must(0, 4)); + /// dfa.try_search_overlapping_fwd(&Input::new(haystack), &mut state)?; + /// assert_eq!(expected, state.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + fn try_search_overlapping_fwd( + &self, + input: &Input<'_>, + state: &mut OverlappingState, + ) -> Result<(), MatchError> { + let utf8empty = self.has_empty() && self.is_utf8(); + search::find_overlapping_fwd(self, input, state)?; + match state.get_match() { + None => Ok(()), + Some(_) if !utf8empty => Ok(()), + Some(_) => skip_empty_utf8_splits_overlapping( + input, + state, + |input, state| { + search::find_overlapping_fwd(self, input, state) + }, + ), + } + } + + /// Executes a reverse overlapping forward search. Matches, if one exists, + /// can be obtained via the [`OverlappingState::get_match`] method. + /// + /// When using this routine to implement an iterator of overlapping + /// matches, the `start` of the search should remain invariant throughout + /// iteration. The `OverlappingState` given to the search will keep track + /// of the current position of the search. (This is because multiple + /// matches may be reported at the same position, so only the search + /// implementation itself knows when to advance the position.) + /// + /// If for some reason you want the search to forget about its previous + /// state and restart the search at a particular position, then setting the + /// state to [`OverlappingState::start`] will accomplish that. + /// + /// # Errors + /// + /// This routine errors if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the DFA quitting. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search returns an error, callers cannot know whether a match + /// exists or not. + /// + /// # Example: UTF-8 mode + /// + /// This examples demonstrates that UTF-8 mode applies to reverse + /// DFAs. When UTF-8 mode is enabled in the underlying NFA, then all + /// matches reported must correspond to valid UTF-8 spans. This includes + /// prohibiting zero-width matches that split a codepoint. + /// + /// UTF-8 mode is enabled by default. Notice below how the only zero-width + /// matches reported are those at UTF-8 boundaries: + /// + /// ``` + /// use regex_automata::{ + /// dfa::{dense::DFA, Automaton, OverlappingState}, + /// nfa::thompson, + /// HalfMatch, Input, MatchKind, + /// }; + /// + /// let dfa = DFA::builder() + /// .configure(DFA::config().match_kind(MatchKind::All)) + /// .thompson(thompson::Config::new().reverse(true)) + /// .build_many(&[r"", r"☃"])?; + /// + /// // Run the reverse DFA to collect all matches. + /// let input = Input::new("☃"); + /// let mut state = OverlappingState::start(); + /// let mut matches = vec![]; + /// loop { + /// dfa.try_search_overlapping_rev(&input, &mut state)?; + /// match state.get_match() { + /// None => break, + /// Some(hm) => matches.push(hm), + /// } + /// } + /// + /// // No matches split a codepoint. + /// let expected = vec![ + /// HalfMatch::must(0, 3), + /// HalfMatch::must(1, 0), + /// HalfMatch::must(0, 0), + /// ]; + /// assert_eq!(expected, matches); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Now let's look at the same example, but with UTF-8 mode on the + /// original NFA disabled (which results in disabling UTF-8 mode on the + /// DFA): + /// + /// ``` + /// use regex_automata::{ + /// dfa::{dense::DFA, Automaton, OverlappingState}, + /// nfa::thompson, + /// HalfMatch, Input, MatchKind, + /// }; + /// + /// let dfa = DFA::builder() + /// .configure(DFA::config().match_kind(MatchKind::All)) + /// .thompson(thompson::Config::new().reverse(true).utf8(false)) + /// .build_many(&[r"", r"☃"])?; + /// + /// // Run the reverse DFA to collect all matches. + /// let input = Input::new("☃"); + /// let mut state = OverlappingState::start(); + /// let mut matches = vec![]; + /// loop { + /// dfa.try_search_overlapping_rev(&input, &mut state)?; + /// match state.get_match() { + /// None => break, + /// Some(hm) => matches.push(hm), + /// } + /// } + /// + /// // Now *all* positions match, even within a codepoint, + /// // because we lifted the requirement that matches + /// // correspond to valid UTF-8 spans. + /// let expected = vec![ + /// HalfMatch::must(0, 3), + /// HalfMatch::must(0, 2), + /// HalfMatch::must(0, 1), + /// HalfMatch::must(1, 0), + /// HalfMatch::must(0, 0), + /// ]; + /// assert_eq!(expected, matches); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + fn try_search_overlapping_rev( + &self, + input: &Input<'_>, + state: &mut OverlappingState, + ) -> Result<(), MatchError> { + let utf8empty = self.has_empty() && self.is_utf8(); + search::find_overlapping_rev(self, input, state)?; + match state.get_match() { + None => Ok(()), + Some(_) if !utf8empty => Ok(()), + Some(_) => skip_empty_utf8_splits_overlapping( + input, + state, + |input, state| { + search::find_overlapping_rev(self, input, state) + }, + ), + } + } + + /// Writes the set of patterns that match anywhere in the given search + /// configuration to `patset`. If multiple patterns match at the same + /// position and the underlying DFA supports overlapping matches, then all + /// matching patterns are written to the given set. + /// + /// Unless all of the patterns in this DFA are anchored, then generally + /// speaking, this will visit every byte in the haystack. + /// + /// This search routine *does not* clear the pattern set. This gives some + /// flexibility to the caller (e.g., running multiple searches with the + /// same pattern set), but does make the API bug-prone if you're reusing + /// the same pattern set for multiple searches but intended them to be + /// independent. + /// + /// If a pattern ID matched but the given `PatternSet` does not have + /// sufficient capacity to store it, then it is not inserted and silently + /// dropped. + /// + /// # Errors + /// + /// This routine errors if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the DFA quitting. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search returns an error, callers cannot know whether a match + /// exists or not. + /// + /// # Example + /// + /// This example shows how to find all matching patterns in a haystack, + /// even when some patterns match at the same position as other patterns. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// dfa::{Automaton, dense::DFA}, + /// Input, MatchKind, PatternSet, + /// }; + /// + /// let patterns = &[ + /// r"[[:word:]]+", + /// r"[0-9]+", + /// r"[[:alpha:]]+", + /// r"foo", + /// r"bar", + /// r"barfoo", + /// r"foobar", + /// ]; + /// let dfa = DFA::builder() + /// .configure(DFA::config().match_kind(MatchKind::All)) + /// .build_many(patterns)?; + /// + /// let input = Input::new("foobar"); + /// let mut patset = PatternSet::new(dfa.pattern_len()); + /// dfa.try_which_overlapping_matches(&input, &mut patset)?; + /// let expected = vec![0, 2, 3, 4, 6]; + /// let got: Vec = patset.iter().map(|p| p.as_usize()).collect(); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "alloc")] + #[inline] + fn try_which_overlapping_matches( + &self, + input: &Input<'_>, + patset: &mut PatternSet, + ) -> Result<(), MatchError> { + let mut state = OverlappingState::start(); + while let Some(m) = { + self.try_search_overlapping_fwd(input, &mut state)?; + state.get_match() + } { + let _ = patset.insert(m.pattern()); + // There's nothing left to find, so we can stop. Or the caller + // asked us to. + if patset.is_full() || input.get_earliest() { + break; + } + } + Ok(()) + } +} + +unsafe impl<'a, A: Automaton + ?Sized> Automaton for &'a A { + #[inline] + fn next_state(&self, current: StateID, input: u8) -> StateID { + (**self).next_state(current, input) + } + + #[inline] + unsafe fn next_state_unchecked( + &self, + current: StateID, + input: u8, + ) -> StateID { + (**self).next_state_unchecked(current, input) + } + + #[inline] + fn next_eoi_state(&self, current: StateID) -> StateID { + (**self).next_eoi_state(current) + } + + #[inline] + fn start_state( + &self, + config: &start::Config, + ) -> Result { + (**self).start_state(config) + } + + #[inline] + fn start_state_forward( + &self, + input: &Input<'_>, + ) -> Result { + (**self).start_state_forward(input) + } + + #[inline] + fn start_state_reverse( + &self, + input: &Input<'_>, + ) -> Result { + (**self).start_state_reverse(input) + } + + #[inline] + fn universal_start_state(&self, mode: Anchored) -> Option { + (**self).universal_start_state(mode) + } + + #[inline] + fn is_special_state(&self, id: StateID) -> bool { + (**self).is_special_state(id) + } + + #[inline] + fn is_dead_state(&self, id: StateID) -> bool { + (**self).is_dead_state(id) + } + + #[inline] + fn is_quit_state(&self, id: StateID) -> bool { + (**self).is_quit_state(id) + } + + #[inline] + fn is_match_state(&self, id: StateID) -> bool { + (**self).is_match_state(id) + } + + #[inline] + fn is_start_state(&self, id: StateID) -> bool { + (**self).is_start_state(id) + } + + #[inline] + fn is_accel_state(&self, id: StateID) -> bool { + (**self).is_accel_state(id) + } + + #[inline] + fn pattern_len(&self) -> usize { + (**self).pattern_len() + } + + #[inline] + fn match_len(&self, id: StateID) -> usize { + (**self).match_len(id) + } + + #[inline] + fn match_pattern(&self, id: StateID, index: usize) -> PatternID { + (**self).match_pattern(id, index) + } + + #[inline] + fn has_empty(&self) -> bool { + (**self).has_empty() + } + + #[inline] + fn is_utf8(&self) -> bool { + (**self).is_utf8() + } + + #[inline] + fn is_always_start_anchored(&self) -> bool { + (**self).is_always_start_anchored() + } + + #[inline] + fn accelerator(&self, id: StateID) -> &[u8] { + (**self).accelerator(id) + } + + #[inline] + fn get_prefilter(&self) -> Option<&Prefilter> { + (**self).get_prefilter() + } + + #[inline] + fn try_search_fwd( + &self, + input: &Input<'_>, + ) -> Result, MatchError> { + (**self).try_search_fwd(input) + } + + #[inline] + fn try_search_rev( + &self, + input: &Input<'_>, + ) -> Result, MatchError> { + (**self).try_search_rev(input) + } + + #[inline] + fn try_search_overlapping_fwd( + &self, + input: &Input<'_>, + state: &mut OverlappingState, + ) -> Result<(), MatchError> { + (**self).try_search_overlapping_fwd(input, state) + } + + #[inline] + fn try_search_overlapping_rev( + &self, + input: &Input<'_>, + state: &mut OverlappingState, + ) -> Result<(), MatchError> { + (**self).try_search_overlapping_rev(input, state) + } + + #[cfg(feature = "alloc")] + #[inline] + fn try_which_overlapping_matches( + &self, + input: &Input<'_>, + patset: &mut PatternSet, + ) -> Result<(), MatchError> { + (**self).try_which_overlapping_matches(input, patset) + } +} + +/// Represents the current state of an overlapping search. +/// +/// This is used for overlapping searches since they need to know something +/// about the previous search. For example, when multiple patterns match at the +/// same position, this state tracks the last reported pattern so that the next +/// search knows whether to report another matching pattern or continue with +/// the search at the next position. Additionally, it also tracks which state +/// the last search call terminated in. +/// +/// This type provides little introspection capabilities. The only thing a +/// caller can do is construct it and pass it around to permit search routines +/// to use it to track state, and also ask whether a match has been found. +/// +/// Callers should always provide a fresh state constructed via +/// [`OverlappingState::start`] when starting a new search. Reusing state from +/// a previous search may result in incorrect results. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct OverlappingState { + /// The match reported by the most recent overlapping search to use this + /// state. + /// + /// If a search does not find any matches, then it is expected to clear + /// this value. + pub(crate) mat: Option, + /// The state ID of the state at which the search was in when the call + /// terminated. When this is a match state, `last_match` must be set to a + /// non-None value. + /// + /// A `None` value indicates the start state of the corresponding + /// automaton. We cannot use the actual ID, since any one automaton may + /// have many start states, and which one is in use depends on several + /// search-time factors. + pub(crate) id: Option, + /// The position of the search. + /// + /// When `id` is None (i.e., we are starting a search), this is set to + /// the beginning of the search as given by the caller regardless of its + /// current value. Subsequent calls to an overlapping search pick up at + /// this offset. + pub(crate) at: usize, + /// The index into the matching patterns of the next match to report if the + /// current state is a match state. Note that this may be 1 greater than + /// the total number of matches to report for the current match state. (In + /// which case, no more matches should be reported at the current position + /// and the search should advance to the next position.) + pub(crate) next_match_index: Option, + /// This is set to true when a reverse overlapping search has entered its + /// EOI transitions. + /// + /// This isn't used in a forward search because it knows to stop once the + /// position exceeds the end of the search range. In a reverse search, + /// since we use unsigned offsets, we don't "know" once we've gone past + /// `0`. So the only way to detect it is with this extra flag. The reverse + /// overlapping search knows to terminate specifically after it has + /// reported all matches after following the EOI transition. + pub(crate) rev_eoi: bool, +} + +impl OverlappingState { + /// Create a new overlapping state that begins at the start state of any + /// automaton. + pub fn start() -> OverlappingState { + OverlappingState { + mat: None, + id: None, + at: 0, + next_match_index: None, + rev_eoi: false, + } + } + + /// Return the match result of the most recent search to execute with this + /// state. + /// + /// A searches will clear this result automatically, such that if no + /// match is found, this will correctly report `None`. + pub fn get_match(&self) -> Option { + self.mat + } +} + +/// An error that can occur when computing the start state for a search. +/// +/// Computing a start state can fail for a few reasons, either based on +/// incorrect configuration or even based on whether the look-behind byte +/// triggers a quit state. Typically one does not need to handle this error +/// if you're using [`Automaton::start_state_forward`] (or its reverse +/// counterpart), as that routine automatically converts `StartError` to a +/// [`MatchError`] for you. +/// +/// This error may be returned by the [`Automaton::start_state`] routine. +/// +/// This error implements the `std::error::Error` trait when the `std` feature +/// is enabled. +/// +/// This error is marked as non-exhaustive. New variants may be added in a +/// semver compatible release. +#[non_exhaustive] +#[derive(Clone, Debug)] +pub enum StartError { + /// An error that occurs when a starting configuration's look-behind byte + /// is in this DFA's quit set. + Quit { + /// The quit byte that was found. + byte: u8, + }, + /// An error that occurs when the caller requests an anchored mode that + /// isn't supported by the DFA. + UnsupportedAnchored { + /// The anchored mode given that is unsupported. + mode: Anchored, + }, +} + +impl StartError { + pub(crate) fn quit(byte: u8) -> StartError { + StartError::Quit { byte } + } + + pub(crate) fn unsupported_anchored(mode: Anchored) -> StartError { + StartError::UnsupportedAnchored { mode } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for StartError {} + +impl core::fmt::Display for StartError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match *self { + StartError::Quit { byte } => write!( + f, + "error computing start state because the look-behind byte \ + {:?} triggered a quit state", + crate::util::escape::DebugByte(byte), + ), + StartError::UnsupportedAnchored { mode: Anchored::Yes } => { + write!( + f, + "error computing start state because \ + anchored searches are not supported or enabled" + ) + } + StartError::UnsupportedAnchored { mode: Anchored::No } => { + write!( + f, + "error computing start state because \ + unanchored searches are not supported or enabled" + ) + } + StartError::UnsupportedAnchored { + mode: Anchored::Pattern(pid), + } => { + write!( + f, + "error computing start state because \ + anchored searches for a specific pattern ({}) \ + are not supported or enabled", + pid.as_usize(), + ) + } + } + } +} + +/// Runs the given overlapping `search` function (forwards or backwards) until +/// a match is found whose offset does not split a codepoint. +/// +/// This is *not* always correct to call. It should only be called when the DFA +/// has UTF-8 mode enabled *and* it can produce zero-width matches. Calling +/// this when both of those things aren't true might result in legitimate +/// matches getting skipped. +#[cold] +#[inline(never)] +fn skip_empty_utf8_splits_overlapping( + input: &Input<'_>, + state: &mut OverlappingState, + mut search: F, +) -> Result<(), MatchError> +where + F: FnMut(&Input<'_>, &mut OverlappingState) -> Result<(), MatchError>, +{ + // Note that this routine works for forwards and reverse searches + // even though there's no code here to handle those cases. That's + // because overlapping searches drive themselves to completion via + // `OverlappingState`. So all we have to do is push it until no matches are + // found. + + let mut hm = match state.get_match() { + None => return Ok(()), + Some(hm) => hm, + }; + if input.get_anchored().is_anchored() { + if !input.is_char_boundary(hm.offset()) { + state.mat = None; + } + return Ok(()); + } + while !input.is_char_boundary(hm.offset()) { + search(input, state)?; + hm = match state.get_match() { + None => return Ok(()), + Some(hm) => hm, + }; + } + Ok(()) +} + +/// Write a prefix "state" indicator for fmt::Debug impls. +/// +/// Specifically, this tries to succinctly distinguish the different types of +/// states: dead states, quit states, accelerated states, start states and +/// match states. It even accounts for the possible overlapping of different +/// state types. +pub(crate) fn fmt_state_indicator( + f: &mut core::fmt::Formatter<'_>, + dfa: A, + id: StateID, +) -> core::fmt::Result { + if dfa.is_dead_state(id) { + write!(f, "D")?; + if dfa.is_start_state(id) { + write!(f, ">")?; + } else { + write!(f, " ")?; + } + } else if dfa.is_quit_state(id) { + write!(f, "Q ")?; + } else if dfa.is_start_state(id) { + if dfa.is_accel_state(id) { + write!(f, "A>")?; + } else { + write!(f, " >")?; + } + } else if dfa.is_match_state(id) { + if dfa.is_accel_state(id) { + write!(f, "A*")?; + } else { + write!(f, " *")?; + } + } else if dfa.is_accel_state(id) { + write!(f, "A ")?; + } else { + write!(f, " ")?; + } + Ok(()) +} + +#[cfg(all(test, feature = "syntax", feature = "dfa-build"))] +mod tests { + // A basic test ensuring that our Automaton trait is object safe. (This is + // the main reason why we don't define the search routines as generic over + // Into.) + #[test] + fn object_safe() { + use crate::{ + dfa::{dense, Automaton}, + HalfMatch, Input, + }; + + let dfa = dense::DFA::new("abc").unwrap(); + let dfa: &dyn Automaton = &dfa; + assert_eq!( + Ok(Some(HalfMatch::must(0, 6))), + dfa.try_search_fwd(&Input::new(b"xyzabcxyz")), + ); + } +} diff --git a/vendor/regex-automata/src/dfa/dense.rs b/vendor/regex-automata/src/dfa/dense.rs new file mode 100644 index 00000000000000..d47163afa583bb --- /dev/null +++ b/vendor/regex-automata/src/dfa/dense.rs @@ -0,0 +1,5260 @@ +/*! +Types and routines specific to dense DFAs. + +This module is the home of [`dense::DFA`](DFA). + +This module also contains a [`dense::Builder`](Builder) and a +[`dense::Config`](Config) for building and configuring a dense DFA. +*/ + +#[cfg(feature = "dfa-build")] +use core::cmp; +use core::{fmt, iter, mem::size_of, slice}; + +#[cfg(feature = "dfa-build")] +use alloc::{ + collections::{BTreeMap, BTreeSet}, + vec, + vec::Vec, +}; + +#[cfg(feature = "dfa-build")] +use crate::{ + dfa::{ + accel::Accel, determinize, minimize::Minimizer, remapper::Remapper, + sparse, + }, + nfa::thompson, + util::{look::LookMatcher, search::MatchKind}, +}; +use crate::{ + dfa::{ + accel::Accels, + automaton::{fmt_state_indicator, Automaton, StartError}, + special::Special, + start::StartKind, + DEAD, + }, + util::{ + alphabet::{self, ByteClasses, ByteSet}, + int::{Pointer, Usize}, + prefilter::Prefilter, + primitives::{PatternID, StateID}, + search::Anchored, + start::{self, Start, StartByteMap}, + wire::{self, DeserializeError, Endian, SerializeError}, + }, +}; + +/// The label that is pre-pended to a serialized DFA. +const LABEL: &str = "rust-regex-automata-dfa-dense"; + +/// The format version of dense regexes. This version gets incremented when a +/// change occurs. A change may not necessarily be a breaking change, but the +/// version does permit good error messages in the case where a breaking change +/// is made. +const VERSION: u32 = 2; + +/// The configuration used for compiling a dense DFA. +/// +/// As a convenience, [`DFA::config`] is an alias for [`Config::new`]. The +/// advantage of the former is that it often lets you avoid importing the +/// `Config` type directly. +/// +/// A dense DFA configuration is a simple data object that is typically used +/// with [`dense::Builder::configure`](self::Builder::configure). +/// +/// The default configuration guarantees that a search will never return +/// a "quit" error, although it is possible for a search to fail if +/// [`Config::starts_for_each_pattern`] wasn't enabled (which it is +/// not by default) and an [`Anchored::Pattern`] mode is requested via +/// [`Input`](crate::Input). +#[cfg(feature = "dfa-build")] +#[derive(Clone, Debug, Default)] +pub struct Config { + // As with other configuration types in this crate, we put all our knobs + // in options so that we can distinguish between "default" and "not set." + // This makes it possible to easily combine multiple configurations + // without default values overwriting explicitly specified values. See the + // 'overwrite' method. + // + // For docs on the fields below, see the corresponding method setters. + accelerate: Option, + pre: Option>, + minimize: Option, + match_kind: Option, + start_kind: Option, + starts_for_each_pattern: Option, + byte_classes: Option, + unicode_word_boundary: Option, + quitset: Option, + specialize_start_states: Option, + dfa_size_limit: Option>, + determinize_size_limit: Option>, +} + +#[cfg(feature = "dfa-build")] +impl Config { + /// Return a new default dense DFA compiler configuration. + pub fn new() -> Config { + Config::default() + } + + /// Enable state acceleration. + /// + /// When enabled, DFA construction will analyze each state to determine + /// whether it is eligible for simple acceleration. Acceleration typically + /// occurs when most of a state's transitions loop back to itself, leaving + /// only a select few bytes that will exit the state. When this occurs, + /// other routines like `memchr` can be used to look for those bytes which + /// may be much faster than traversing the DFA. + /// + /// Callers may elect to disable this if consistent performance is more + /// desirable than variable performance. Namely, acceleration can sometimes + /// make searching slower than it otherwise would be if the transitions + /// that leave accelerated states are traversed frequently. + /// + /// See [`Automaton::accelerator`] for an example. + /// + /// This is enabled by default. + pub fn accelerate(mut self, yes: bool) -> Config { + self.accelerate = Some(yes); + self + } + + /// Set a prefilter to be used whenever a start state is entered. + /// + /// A [`Prefilter`] in this context is meant to accelerate searches by + /// looking for literal prefixes that every match for the corresponding + /// pattern (or patterns) must start with. Once a prefilter produces a + /// match, the underlying search routine continues on to try and confirm + /// the match. + /// + /// Be warned that setting a prefilter does not guarantee that the search + /// will be faster. While it's usually a good bet, if the prefilter + /// produces a lot of false positive candidates (i.e., positions matched + /// by the prefilter but not by the regex), then the overall result can + /// be slower than if you had just executed the regex engine without any + /// prefilters. + /// + /// Note that unless [`Config::specialize_start_states`] has been + /// explicitly set, then setting this will also enable (when `pre` is + /// `Some`) or disable (when `pre` is `None`) start state specialization. + /// This occurs because without start state specialization, a prefilter + /// is likely to be less effective. And without a prefilter, start state + /// specialization is usually pointless. + /// + /// **WARNING:** Note that prefilters are not preserved as part of + /// serialization. Serializing a DFA will drop its prefilter. + /// + /// By default no prefilter is set. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{ + /// dfa::{dense::DFA, Automaton}, + /// util::prefilter::Prefilter, + /// Input, HalfMatch, MatchKind, + /// }; + /// + /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "bar"]); + /// let re = DFA::builder() + /// .configure(DFA::config().prefilter(pre)) + /// .build(r"(foo|bar)[a-z]+")?; + /// let input = Input::new("foo1 barfox bar"); + /// assert_eq!( + /// Some(HalfMatch::must(0, 11)), + /// re.try_search_fwd(&input)?, + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Be warned though that an incorrect prefilter can lead to incorrect + /// results! + /// + /// ``` + /// use regex_automata::{ + /// dfa::{dense::DFA, Automaton}, + /// util::prefilter::Prefilter, + /// Input, HalfMatch, MatchKind, + /// }; + /// + /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "car"]); + /// let re = DFA::builder() + /// .configure(DFA::config().prefilter(pre)) + /// .build(r"(foo|bar)[a-z]+")?; + /// let input = Input::new("foo1 barfox bar"); + /// assert_eq!( + /// // No match reported even though there clearly is one! + /// None, + /// re.try_search_fwd(&input)?, + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn prefilter(mut self, pre: Option) -> Config { + self.pre = Some(pre); + if self.specialize_start_states.is_none() { + self.specialize_start_states = + Some(self.get_prefilter().is_some()); + } + self + } + + /// Minimize the DFA. + /// + /// When enabled, the DFA built will be minimized such that it is as small + /// as possible. + /// + /// Whether one enables minimization or not depends on the types of costs + /// you're willing to pay and how much you care about its benefits. In + /// particular, minimization has worst case `O(n*k*logn)` time and `O(k*n)` + /// space, where `n` is the number of DFA states and `k` is the alphabet + /// size. In practice, minimization can be quite costly in terms of both + /// space and time, so it should only be done if you're willing to wait + /// longer to produce a DFA. In general, you might want a minimal DFA in + /// the following circumstances: + /// + /// 1. You would like to optimize for the size of the automaton. This can + /// manifest in one of two ways. Firstly, if you're converting the + /// DFA into Rust code (or a table embedded in the code), then a minimal + /// DFA will translate into a corresponding reduction in code size, and + /// thus, also the final compiled binary size. Secondly, if you are + /// building many DFAs and putting them on the heap, you'll be able to + /// fit more if they are smaller. Note though that building a minimal + /// DFA itself requires additional space; you only realize the space + /// savings once the minimal DFA is constructed (at which point, the + /// space used for minimization is freed). + /// 2. You've observed that a smaller DFA results in faster match + /// performance. Naively, this isn't guaranteed since there is no + /// inherent difference between matching with a bigger-than-minimal + /// DFA and a minimal DFA. However, a smaller DFA may make use of your + /// CPU's cache more efficiently. + /// 3. You are trying to establish an equivalence between regular + /// languages. The standard method for this is to build a minimal DFA + /// for each language and then compare them. If the DFAs are equivalent + /// (up to state renaming), then the languages are equivalent. + /// + /// Typically, minimization only makes sense as an offline process. That + /// is, one might minimize a DFA before serializing it to persistent + /// storage. In practical terms, minimization can take around an order of + /// magnitude more time than compiling the initial DFA via determinization. + /// + /// This option is disabled by default. + pub fn minimize(mut self, yes: bool) -> Config { + self.minimize = Some(yes); + self + } + + /// Set the desired match semantics. + /// + /// The default is [`MatchKind::LeftmostFirst`], which corresponds to the + /// match semantics of Perl-like regex engines. That is, when multiple + /// patterns would match at the same leftmost position, the pattern that + /// appears first in the concrete syntax is chosen. + /// + /// Currently, the only other kind of match semantics supported is + /// [`MatchKind::All`]. This corresponds to classical DFA construction + /// where all possible matches are added to the DFA. + /// + /// Typically, `All` is used when one wants to execute an overlapping + /// search and `LeftmostFirst` otherwise. In particular, it rarely makes + /// sense to use `All` with the various "leftmost" find routines, since the + /// leftmost routines depend on the `LeftmostFirst` automata construction + /// strategy. Specifically, `LeftmostFirst` adds dead states to the DFA + /// as a way to terminate the search and report a match. `LeftmostFirst` + /// also supports non-greedy matches using this strategy where as `All` + /// does not. + /// + /// # Example: overlapping search + /// + /// This example shows the typical use of `MatchKind::All`, which is to + /// report overlapping matches. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// dfa::{Automaton, OverlappingState, dense}, + /// HalfMatch, Input, MatchKind, + /// }; + /// + /// let dfa = dense::Builder::new() + /// .configure(dense::Config::new().match_kind(MatchKind::All)) + /// .build_many(&[r"\w+$", r"\S+$"])?; + /// let input = Input::new("@foo"); + /// let mut state = OverlappingState::start(); + /// + /// let expected = Some(HalfMatch::must(1, 4)); + /// dfa.try_search_overlapping_fwd(&input, &mut state)?; + /// assert_eq!(expected, state.get_match()); + /// + /// // The first pattern also matches at the same position, so re-running + /// // the search will yield another match. Notice also that the first + /// // pattern is returned after the second. This is because the second + /// // pattern begins its match before the first, is therefore an earlier + /// // match and is thus reported first. + /// let expected = Some(HalfMatch::must(0, 4)); + /// dfa.try_search_overlapping_fwd(&input, &mut state)?; + /// assert_eq!(expected, state.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: reverse automaton to find start of match + /// + /// Another example for using `MatchKind::All` is for constructing a + /// reverse automaton to find the start of a match. `All` semantics are + /// used for this in order to find the longest possible match, which + /// corresponds to the leftmost starting position. + /// + /// Note that if you need the starting position then + /// [`dfa::regex::Regex`](crate::dfa::regex::Regex) will handle this for + /// you, so it's usually not necessary to do this yourself. + /// + /// ``` + /// use regex_automata::{ + /// dfa::{dense, Automaton, StartKind}, + /// nfa::thompson::NFA, + /// Anchored, HalfMatch, Input, MatchKind, + /// }; + /// + /// let haystack = "123foobar456".as_bytes(); + /// let pattern = r"[a-z]+r"; + /// + /// let dfa_fwd = dense::DFA::new(pattern)?; + /// let dfa_rev = dense::Builder::new() + /// .thompson(NFA::config().reverse(true)) + /// .configure(dense::Config::new() + /// // This isn't strictly necessary since both anchored and + /// // unanchored searches are supported by default. But since + /// // finding the start-of-match only requires anchored searches, + /// // we can get rid of the unanchored configuration and possibly + /// // slim down our DFA considerably. + /// .start_kind(StartKind::Anchored) + /// .match_kind(MatchKind::All) + /// ) + /// .build(pattern)?; + /// let expected_fwd = HalfMatch::must(0, 9); + /// let expected_rev = HalfMatch::must(0, 3); + /// let got_fwd = dfa_fwd.try_search_fwd(&Input::new(haystack))?.unwrap(); + /// // Here we don't specify the pattern to search for since there's only + /// // one pattern and we're doing a leftmost search. But if this were an + /// // overlapping search, you'd need to specify the pattern that matched + /// // in the forward direction. (Otherwise, you might wind up finding the + /// // starting position of a match of some other pattern.) That in turn + /// // requires building the reverse automaton with starts_for_each_pattern + /// // enabled. Indeed, this is what Regex does internally. + /// let input = Input::new(haystack) + /// .range(..got_fwd.offset()) + /// .anchored(Anchored::Yes); + /// let got_rev = dfa_rev.try_search_rev(&input)?.unwrap(); + /// assert_eq!(expected_fwd, got_fwd); + /// assert_eq!(expected_rev, got_rev); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn match_kind(mut self, kind: MatchKind) -> Config { + self.match_kind = Some(kind); + self + } + + /// The type of starting state configuration to use for a DFA. + /// + /// By default, the starting state configuration is [`StartKind::Both`]. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{ + /// dfa::{dense::DFA, Automaton, StartKind}, + /// Anchored, HalfMatch, Input, + /// }; + /// + /// let haystack = "quux foo123"; + /// let expected = HalfMatch::must(0, 11); + /// + /// // By default, DFAs support both anchored and unanchored searches. + /// let dfa = DFA::new(r"[0-9]+")?; + /// let input = Input::new(haystack); + /// assert_eq!(Some(expected), dfa.try_search_fwd(&input)?); + /// + /// // But if we only need anchored searches, then we can build a DFA + /// // that only supports anchored searches. This leads to a smaller DFA + /// // (potentially significantly smaller in some cases), but a DFA that + /// // will panic if you try to use it with an unanchored search. + /// let dfa = DFA::builder() + /// .configure(DFA::config().start_kind(StartKind::Anchored)) + /// .build(r"[0-9]+")?; + /// let input = Input::new(haystack) + /// .range(8..) + /// .anchored(Anchored::Yes); + /// assert_eq!(Some(expected), dfa.try_search_fwd(&input)?); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn start_kind(mut self, kind: StartKind) -> Config { + self.start_kind = Some(kind); + self + } + + /// Whether to compile a separate start state for each pattern in the + /// automaton. + /// + /// When enabled, a separate **anchored** start state is added for each + /// pattern in the DFA. When this start state is used, then the DFA will + /// only search for matches for the pattern specified, even if there are + /// other patterns in the DFA. + /// + /// The main downside of this option is that it can potentially increase + /// the size of the DFA and/or increase the time it takes to build the DFA. + /// + /// There are a few reasons one might want to enable this (it's disabled + /// by default): + /// + /// 1. When looking for the start of an overlapping match (using a + /// reverse DFA), doing it correctly requires starting the reverse search + /// using the starting state of the pattern that matched in the forward + /// direction. Indeed, when building a [`Regex`](crate::dfa::regex::Regex), + /// it will automatically enable this option when building the reverse DFA + /// internally. + /// 2. When you want to use a DFA with multiple patterns to both search + /// for matches of any pattern or to search for anchored matches of one + /// particular pattern while using the same DFA. (Otherwise, you would need + /// to compile a new DFA for each pattern.) + /// 3. Since the start states added for each pattern are anchored, if you + /// compile an unanchored DFA with one pattern while also enabling this + /// option, then you can use the same DFA to perform anchored or unanchored + /// searches. The latter you get with the standard search APIs. The former + /// you get from the various `_at` search methods that allow you specify a + /// pattern ID to search for. + /// + /// By default this is disabled. + /// + /// # Example + /// + /// This example shows how to use this option to permit the same DFA to + /// run both anchored and unanchored searches for a single pattern. + /// + /// ``` + /// use regex_automata::{ + /// dfa::{dense, Automaton}, + /// Anchored, HalfMatch, PatternID, Input, + /// }; + /// + /// let dfa = dense::Builder::new() + /// .configure(dense::Config::new().starts_for_each_pattern(true)) + /// .build(r"foo[0-9]+")?; + /// let haystack = "quux foo123"; + /// + /// // Here's a normal unanchored search. Notice that we use 'None' for the + /// // pattern ID. Since the DFA was built as an unanchored machine, it + /// // use its default unanchored starting state. + /// let expected = HalfMatch::must(0, 11); + /// let input = Input::new(haystack); + /// assert_eq!(Some(expected), dfa.try_search_fwd(&input)?); + /// // But now if we explicitly specify the pattern to search ('0' being + /// // the only pattern in the DFA), then it will use the starting state + /// // for that specific pattern which is always anchored. Since the + /// // pattern doesn't have a match at the beginning of the haystack, we + /// // find nothing. + /// let input = Input::new(haystack) + /// .anchored(Anchored::Pattern(PatternID::must(0))); + /// assert_eq!(None, dfa.try_search_fwd(&input)?); + /// // And finally, an anchored search is not the same as putting a '^' at + /// // beginning of the pattern. An anchored search can only match at the + /// // beginning of the *search*, which we can change: + /// let input = Input::new(haystack) + /// .anchored(Anchored::Pattern(PatternID::must(0))) + /// .range(5..); + /// assert_eq!(Some(expected), dfa.try_search_fwd(&input)?); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn starts_for_each_pattern(mut self, yes: bool) -> Config { + self.starts_for_each_pattern = Some(yes); + self + } + + /// Whether to attempt to shrink the size of the DFA's alphabet or not. + /// + /// This option is enabled by default and should never be disabled unless + /// one is debugging a generated DFA. + /// + /// When enabled, the DFA will use a map from all possible bytes to their + /// corresponding equivalence class. Each equivalence class represents a + /// set of bytes that does not discriminate between a match and a non-match + /// in the DFA. For example, the pattern `[ab]+` has at least two + /// equivalence classes: a set containing `a` and `b` and a set containing + /// every byte except for `a` and `b`. `a` and `b` are in the same + /// equivalence class because they never discriminate between a match and a + /// non-match. + /// + /// The advantage of this map is that the size of the transition table + /// can be reduced drastically from `#states * 256 * sizeof(StateID)` to + /// `#states * k * sizeof(StateID)` where `k` is the number of equivalence + /// classes (rounded up to the nearest power of 2). As a result, total + /// space usage can decrease substantially. Moreover, since a smaller + /// alphabet is used, DFA compilation becomes faster as well. + /// + /// **WARNING:** This is only useful for debugging DFAs. Disabling this + /// does not yield any speed advantages. Namely, even when this is + /// disabled, a byte class map is still used while searching. The only + /// difference is that every byte will be forced into its own distinct + /// equivalence class. This is useful for debugging the actual generated + /// transitions because it lets one see the transitions defined on actual + /// bytes instead of the equivalence classes. + pub fn byte_classes(mut self, yes: bool) -> Config { + self.byte_classes = Some(yes); + self + } + + /// Heuristically enable Unicode word boundaries. + /// + /// When set, this will attempt to implement Unicode word boundaries as if + /// they were ASCII word boundaries. This only works when the search input + /// is ASCII only. If a non-ASCII byte is observed while searching, then a + /// [`MatchError::quit`](crate::MatchError::quit) error is returned. + /// + /// A possible alternative to enabling this option is to simply use an + /// ASCII word boundary, e.g., via `(?-u:\b)`. The main reason to use this + /// option is if you absolutely need Unicode support. This option lets one + /// use a fast search implementation (a DFA) for some potentially very + /// common cases, while providing the option to fall back to some other + /// regex engine to handle the general case when an error is returned. + /// + /// If the pattern provided has no Unicode word boundary in it, then this + /// option has no effect. (That is, quitting on a non-ASCII byte only + /// occurs when this option is enabled _and_ a Unicode word boundary is + /// present in the pattern.) + /// + /// This is almost equivalent to setting all non-ASCII bytes to be quit + /// bytes. The only difference is that this will cause non-ASCII bytes to + /// be quit bytes _only_ when a Unicode word boundary is present in the + /// pattern. + /// + /// When enabling this option, callers _must_ be prepared to handle + /// a [`MatchError`](crate::MatchError) error during search. + /// When using a [`Regex`](crate::dfa::regex::Regex), this corresponds + /// to using the `try_` suite of methods. Alternatively, if + /// callers can guarantee that their input is ASCII only, then a + /// [`MatchError::quit`](crate::MatchError::quit) error will never be + /// returned while searching. + /// + /// This is disabled by default. + /// + /// # Example + /// + /// This example shows how to heuristically enable Unicode word boundaries + /// in a pattern. It also shows what happens when a search comes across a + /// non-ASCII byte. + /// + /// ``` + /// use regex_automata::{ + /// dfa::{Automaton, dense}, + /// HalfMatch, Input, MatchError, + /// }; + /// + /// let dfa = dense::Builder::new() + /// .configure(dense::Config::new().unicode_word_boundary(true)) + /// .build(r"\b[0-9]+\b")?; + /// + /// // The match occurs before the search ever observes the snowman + /// // character, so no error occurs. + /// let haystack = "foo 123 ☃".as_bytes(); + /// let expected = Some(HalfMatch::must(0, 7)); + /// let got = dfa.try_search_fwd(&Input::new(haystack))?; + /// assert_eq!(expected, got); + /// + /// // Notice that this search fails, even though the snowman character + /// // occurs after the ending match offset. This is because search + /// // routines read one byte past the end of the search to account for + /// // look-around, and indeed, this is required here to determine whether + /// // the trailing \b matches. + /// let haystack = "foo 123 ☃".as_bytes(); + /// let expected = MatchError::quit(0xE2, 8); + /// let got = dfa.try_search_fwd(&Input::new(haystack)); + /// assert_eq!(Err(expected), got); + /// + /// // Another example is executing a search where the span of the haystack + /// // we specify is all ASCII, but there is non-ASCII just before it. This + /// // correctly also reports an error. + /// let input = Input::new("β123").range(2..); + /// let expected = MatchError::quit(0xB2, 1); + /// let got = dfa.try_search_fwd(&input); + /// assert_eq!(Err(expected), got); + /// + /// // And similarly for the trailing word boundary. + /// let input = Input::new("123β").range(..3); + /// let expected = MatchError::quit(0xCE, 3); + /// let got = dfa.try_search_fwd(&input); + /// assert_eq!(Err(expected), got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn unicode_word_boundary(mut self, yes: bool) -> Config { + // We have a separate option for this instead of just setting the + // appropriate quit bytes here because we don't want to set quit bytes + // for every regex. We only want to set them when the regex contains a + // Unicode word boundary. + self.unicode_word_boundary = Some(yes); + self + } + + /// Add a "quit" byte to the DFA. + /// + /// When a quit byte is seen during search time, then search will return + /// a [`MatchError::quit`](crate::MatchError::quit) error indicating the + /// offset at which the search stopped. + /// + /// A quit byte will always overrule any other aspects of a regex. For + /// example, if the `x` byte is added as a quit byte and the regex `\w` is + /// used, then observing `x` will cause the search to quit immediately + /// despite the fact that `x` is in the `\w` class. + /// + /// This mechanism is primarily useful for heuristically enabling certain + /// features like Unicode word boundaries in a DFA. Namely, if the input + /// to search is ASCII, then a Unicode word boundary can be implemented + /// via an ASCII word boundary with no change in semantics. Thus, a DFA + /// can attempt to match a Unicode word boundary but give up as soon as it + /// observes a non-ASCII byte. Indeed, if callers set all non-ASCII bytes + /// to be quit bytes, then Unicode word boundaries will be permitted when + /// building DFAs. Of course, callers should enable + /// [`Config::unicode_word_boundary`] if they want this behavior instead. + /// (The advantage being that non-ASCII quit bytes will only be added if a + /// Unicode word boundary is in the pattern.) + /// + /// When enabling this option, callers _must_ be prepared to handle a + /// [`MatchError`](crate::MatchError) error during search. When using a + /// [`Regex`](crate::dfa::regex::Regex), this corresponds to using the + /// `try_` suite of methods. + /// + /// By default, there are no quit bytes set. + /// + /// # Panics + /// + /// This panics if heuristic Unicode word boundaries are enabled and any + /// non-ASCII byte is removed from the set of quit bytes. Namely, enabling + /// Unicode word boundaries requires setting every non-ASCII byte to a quit + /// byte. So if the caller attempts to undo any of that, then this will + /// panic. + /// + /// # Example + /// + /// This example shows how to cause a search to terminate if it sees a + /// `\n` byte. This could be useful if, for example, you wanted to prevent + /// a user supplied pattern from matching across a line boundary. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{dfa::{Automaton, dense}, Input, MatchError}; + /// + /// let dfa = dense::Builder::new() + /// .configure(dense::Config::new().quit(b'\n', true)) + /// .build(r"foo\p{any}+bar")?; + /// + /// let haystack = "foo\nbar".as_bytes(); + /// // Normally this would produce a match, since \p{any} contains '\n'. + /// // But since we instructed the automaton to enter a quit state if a + /// // '\n' is observed, this produces a match error instead. + /// let expected = MatchError::quit(b'\n', 3); + /// let got = dfa.try_search_fwd(&Input::new(haystack)).unwrap_err(); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn quit(mut self, byte: u8, yes: bool) -> Config { + if self.get_unicode_word_boundary() && !byte.is_ascii() && !yes { + panic!( + "cannot set non-ASCII byte to be non-quit when \ + Unicode word boundaries are enabled" + ); + } + if self.quitset.is_none() { + self.quitset = Some(ByteSet::empty()); + } + if yes { + self.quitset.as_mut().unwrap().add(byte); + } else { + self.quitset.as_mut().unwrap().remove(byte); + } + self + } + + /// Enable specializing start states in the DFA. + /// + /// When start states are specialized, an implementor of a search routine + /// using a lazy DFA can tell when the search has entered a starting state. + /// When start states aren't specialized, then it is impossible to know + /// whether the search has entered a start state. + /// + /// Ideally, this option wouldn't need to exist and we could always + /// specialize start states. The problem is that start states can be quite + /// active. This in turn means that an efficient search routine is likely + /// to ping-pong between a heavily optimized hot loop that handles most + /// states and to a less optimized specialized handling of start states. + /// This causes branches to get heavily mispredicted and overall can + /// materially decrease throughput. Therefore, specializing start states + /// should only be enabled when it is needed. + /// + /// Knowing whether a search is in a start state is typically useful when a + /// prefilter is active for the search. A prefilter is typically only run + /// when in a start state and a prefilter can greatly accelerate a search. + /// Therefore, the possible cost of specializing start states is worth it + /// in this case. Otherwise, if you have no prefilter, there is likely no + /// reason to specialize start states. + /// + /// This is disabled by default, but note that it is automatically + /// enabled (or disabled) if [`Config::prefilter`] is set. Namely, unless + /// `specialize_start_states` has already been set, [`Config::prefilter`] + /// will automatically enable or disable it based on whether a prefilter + /// is present or not, respectively. This is done because a prefilter's + /// effectiveness is rooted in being executed whenever the DFA is in a + /// start state, and that's only possible to do when they are specialized. + /// + /// Note that it is plausibly reasonable to _disable_ this option + /// explicitly while _enabling_ a prefilter. In that case, a prefilter + /// will still be run at the beginning of a search, but never again. This + /// in theory could strike a good balance if you're in a situation where a + /// prefilter is likely to produce many false positive candidates. + /// + /// # Example + /// + /// This example shows how to enable start state specialization and then + /// shows how to check whether a state is a start state or not. + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense::DFA}, Input}; + /// + /// let dfa = DFA::builder() + /// .configure(DFA::config().specialize_start_states(true)) + /// .build(r"[a-z]+")?; + /// + /// let haystack = "123 foobar 4567".as_bytes(); + /// let sid = dfa.start_state_forward(&Input::new(haystack))?; + /// // The ID returned by 'start_state_forward' will always be tagged as + /// // a start state when start state specialization is enabled. + /// assert!(dfa.is_special_state(sid)); + /// assert!(dfa.is_start_state(sid)); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Compare the above with the default DFA configuration where start states + /// are _not_ specialized. In this case, the start state is not tagged at + /// all: + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense::DFA}, Input}; + /// + /// let dfa = DFA::new(r"[a-z]+")?; + /// + /// let haystack = "123 foobar 4567"; + /// let sid = dfa.start_state_forward(&Input::new(haystack))?; + /// // Start states are not special in the default configuration! + /// assert!(!dfa.is_special_state(sid)); + /// assert!(!dfa.is_start_state(sid)); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn specialize_start_states(mut self, yes: bool) -> Config { + self.specialize_start_states = Some(yes); + self + } + + /// Set a size limit on the total heap used by a DFA. + /// + /// This size limit is expressed in bytes and is applied during + /// determinization of an NFA into a DFA. If the DFA's heap usage, and only + /// the DFA, exceeds this configured limit, then determinization is stopped + /// and an error is returned. + /// + /// This limit does not apply to auxiliary storage used during + /// determinization that isn't part of the generated DFA. + /// + /// This limit is only applied during determinization. Currently, there is + /// no way to post-pone this check to after minimization if minimization + /// was enabled. + /// + /// The total limit on heap used during determinization is the sum of the + /// DFA and determinization size limits. + /// + /// The default is no limit. + /// + /// # Example + /// + /// This example shows a DFA that fails to build because of a configured + /// size limit. This particular example also serves as a cautionary tale + /// demonstrating just how big DFAs with large Unicode character classes + /// can get. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{dfa::{dense, Automaton}, Input}; + /// + /// // 6MB isn't enough! + /// dense::Builder::new() + /// .configure(dense::Config::new().dfa_size_limit(Some(6_000_000))) + /// .build(r"\w{20}") + /// .unwrap_err(); + /// + /// // ... but 7MB probably is! + /// // (Note that DFA sizes aren't necessarily stable between releases.) + /// let dfa = dense::Builder::new() + /// .configure(dense::Config::new().dfa_size_limit(Some(7_000_000))) + /// .build(r"\w{20}")?; + /// let haystack = "A".repeat(20).into_bytes(); + /// assert!(dfa.try_search_fwd(&Input::new(&haystack))?.is_some()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// While one needs a little more than 6MB to represent `\w{20}`, it + /// turns out that you only need a little more than 6KB to represent + /// `(?-u:\w{20})`. So only use Unicode if you need it! + /// + /// As with [`Config::determinize_size_limit`], the size of a DFA is + /// influenced by other factors, such as what start state configurations + /// to support. For example, if you only need unanchored searches and not + /// anchored searches, then configuring the DFA to only support unanchored + /// searches can reduce its size. By default, DFAs support both unanchored + /// and anchored searches. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{dfa::{dense, Automaton, StartKind}, Input}; + /// + /// // 3MB isn't enough! + /// dense::Builder::new() + /// .configure(dense::Config::new() + /// .dfa_size_limit(Some(3_000_000)) + /// .start_kind(StartKind::Unanchored) + /// ) + /// .build(r"\w{20}") + /// .unwrap_err(); + /// + /// // ... but 4MB probably is! + /// // (Note that DFA sizes aren't necessarily stable between releases.) + /// let dfa = dense::Builder::new() + /// .configure(dense::Config::new() + /// .dfa_size_limit(Some(4_000_000)) + /// .start_kind(StartKind::Unanchored) + /// ) + /// .build(r"\w{20}")?; + /// let haystack = "A".repeat(20).into_bytes(); + /// assert!(dfa.try_search_fwd(&Input::new(&haystack))?.is_some()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn dfa_size_limit(mut self, bytes: Option) -> Config { + self.dfa_size_limit = Some(bytes); + self + } + + /// Set a size limit on the total heap used by determinization. + /// + /// This size limit is expressed in bytes and is applied during + /// determinization of an NFA into a DFA. If the heap used for auxiliary + /// storage during determinization (memory that is not in the DFA but + /// necessary for building the DFA) exceeds this configured limit, then + /// determinization is stopped and an error is returned. + /// + /// This limit does not apply to heap used by the DFA itself. + /// + /// The total limit on heap used during determinization is the sum of the + /// DFA and determinization size limits. + /// + /// The default is no limit. + /// + /// # Example + /// + /// This example shows a DFA that fails to build because of a + /// configured size limit on the amount of heap space used by + /// determinization. This particular example complements the example for + /// [`Config::dfa_size_limit`] by demonstrating that not only does Unicode + /// potentially make DFAs themselves big, but it also results in more + /// auxiliary storage during determinization. (Although, auxiliary storage + /// is still not as much as the DFA itself.) + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// # if !cfg!(target_pointer_width = "64") { return Ok(()); } // see #1039 + /// use regex_automata::{dfa::{dense, Automaton}, Input}; + /// + /// // 700KB isn't enough! + /// dense::Builder::new() + /// .configure(dense::Config::new() + /// .determinize_size_limit(Some(700_000)) + /// ) + /// .build(r"\w{20}") + /// .unwrap_err(); + /// + /// // ... but 800KB probably is! + /// // (Note that auxiliary storage sizes aren't necessarily stable between + /// // releases.) + /// let dfa = dense::Builder::new() + /// .configure(dense::Config::new() + /// .determinize_size_limit(Some(800_000)) + /// ) + /// .build(r"\w{20}")?; + /// let haystack = "A".repeat(20).into_bytes(); + /// assert!(dfa.try_search_fwd(&Input::new(&haystack))?.is_some()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Note that some parts of the configuration on a DFA can have a + /// big impact on how big the DFA is, and thus, how much memory is + /// used. For example, the default setting for [`Config::start_kind`] is + /// [`StartKind::Both`]. But if you only need an anchored search, for + /// example, then it can be much cheaper to build a DFA that only supports + /// anchored searches. (Running an unanchored search with it would panic.) + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// # if !cfg!(target_pointer_width = "64") { return Ok(()); } // see #1039 + /// use regex_automata::{ + /// dfa::{dense, Automaton, StartKind}, + /// Anchored, Input, + /// }; + /// + /// // 200KB isn't enough! + /// dense::Builder::new() + /// .configure(dense::Config::new() + /// .determinize_size_limit(Some(200_000)) + /// .start_kind(StartKind::Anchored) + /// ) + /// .build(r"\w{20}") + /// .unwrap_err(); + /// + /// // ... but 300KB probably is! + /// // (Note that auxiliary storage sizes aren't necessarily stable between + /// // releases.) + /// let dfa = dense::Builder::new() + /// .configure(dense::Config::new() + /// .determinize_size_limit(Some(300_000)) + /// .start_kind(StartKind::Anchored) + /// ) + /// .build(r"\w{20}")?; + /// let haystack = "A".repeat(20).into_bytes(); + /// let input = Input::new(&haystack).anchored(Anchored::Yes); + /// assert!(dfa.try_search_fwd(&input)?.is_some()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn determinize_size_limit(mut self, bytes: Option) -> Config { + self.determinize_size_limit = Some(bytes); + self + } + + /// Returns whether this configuration has enabled simple state + /// acceleration. + pub fn get_accelerate(&self) -> bool { + self.accelerate.unwrap_or(true) + } + + /// Returns the prefilter attached to this configuration, if any. + pub fn get_prefilter(&self) -> Option<&Prefilter> { + self.pre.as_ref().unwrap_or(&None).as_ref() + } + + /// Returns whether this configuration has enabled the expensive process + /// of minimizing a DFA. + pub fn get_minimize(&self) -> bool { + self.minimize.unwrap_or(false) + } + + /// Returns the match semantics set in this configuration. + pub fn get_match_kind(&self) -> MatchKind { + self.match_kind.unwrap_or(MatchKind::LeftmostFirst) + } + + /// Returns the starting state configuration for a DFA. + pub fn get_starts(&self) -> StartKind { + self.start_kind.unwrap_or(StartKind::Both) + } + + /// Returns whether this configuration has enabled anchored starting states + /// for every pattern in the DFA. + pub fn get_starts_for_each_pattern(&self) -> bool { + self.starts_for_each_pattern.unwrap_or(false) + } + + /// Returns whether this configuration has enabled byte classes or not. + /// This is typically a debugging oriented option, as disabling it confers + /// no speed benefit. + pub fn get_byte_classes(&self) -> bool { + self.byte_classes.unwrap_or(true) + } + + /// Returns whether this configuration has enabled heuristic Unicode word + /// boundary support. When enabled, it is possible for a search to return + /// an error. + pub fn get_unicode_word_boundary(&self) -> bool { + self.unicode_word_boundary.unwrap_or(false) + } + + /// Returns whether this configuration will instruct the DFA to enter a + /// quit state whenever the given byte is seen during a search. When at + /// least one byte has this enabled, it is possible for a search to return + /// an error. + pub fn get_quit(&self, byte: u8) -> bool { + self.quitset.map_or(false, |q| q.contains(byte)) + } + + /// Returns whether this configuration will instruct the DFA to + /// "specialize" start states. When enabled, the DFA will mark start states + /// as "special" so that search routines using the DFA can detect when + /// it's in a start state and do some kind of optimization (like run a + /// prefilter). + pub fn get_specialize_start_states(&self) -> bool { + self.specialize_start_states.unwrap_or(false) + } + + /// Returns the DFA size limit of this configuration if one was set. + /// The size limit is total number of bytes on the heap that a DFA is + /// permitted to use. If the DFA exceeds this limit during construction, + /// then construction is stopped and an error is returned. + pub fn get_dfa_size_limit(&self) -> Option { + self.dfa_size_limit.unwrap_or(None) + } + + /// Returns the determinization size limit of this configuration if one + /// was set. The size limit is total number of bytes on the heap that + /// determinization is permitted to use. If determinization exceeds this + /// limit during construction, then construction is stopped and an error is + /// returned. + /// + /// This is different from the DFA size limit in that this only applies to + /// the auxiliary storage used during determinization. Once determinization + /// is complete, this memory is freed. + /// + /// The limit on the total heap memory used is the sum of the DFA and + /// determinization size limits. + pub fn get_determinize_size_limit(&self) -> Option { + self.determinize_size_limit.unwrap_or(None) + } + + /// Overwrite the default configuration such that the options in `o` are + /// always used. If an option in `o` is not set, then the corresponding + /// option in `self` is used. If it's not set in `self` either, then it + /// remains not set. + pub(crate) fn overwrite(&self, o: Config) -> Config { + Config { + accelerate: o.accelerate.or(self.accelerate), + pre: o.pre.or_else(|| self.pre.clone()), + minimize: o.minimize.or(self.minimize), + match_kind: o.match_kind.or(self.match_kind), + start_kind: o.start_kind.or(self.start_kind), + starts_for_each_pattern: o + .starts_for_each_pattern + .or(self.starts_for_each_pattern), + byte_classes: o.byte_classes.or(self.byte_classes), + unicode_word_boundary: o + .unicode_word_boundary + .or(self.unicode_word_boundary), + quitset: o.quitset.or(self.quitset), + specialize_start_states: o + .specialize_start_states + .or(self.specialize_start_states), + dfa_size_limit: o.dfa_size_limit.or(self.dfa_size_limit), + determinize_size_limit: o + .determinize_size_limit + .or(self.determinize_size_limit), + } + } +} + +/// A builder for constructing a deterministic finite automaton from regular +/// expressions. +/// +/// This builder provides two main things: +/// +/// 1. It provides a few different `build` routines for actually constructing +/// a DFA from different kinds of inputs. The most convenient is +/// [`Builder::build`], which builds a DFA directly from a pattern string. The +/// most flexible is [`Builder::build_from_nfa`], which builds a DFA straight +/// from an NFA. +/// 2. The builder permits configuring a number of things. +/// [`Builder::configure`] is used with [`Config`] to configure aspects of +/// the DFA and the construction process itself. [`Builder::syntax`] and +/// [`Builder::thompson`] permit configuring the regex parser and Thompson NFA +/// construction, respectively. The syntax and thompson configurations only +/// apply when building from a pattern string. +/// +/// This builder always constructs a *single* DFA. As such, this builder +/// can only be used to construct regexes that either detect the presence +/// of a match or find the end location of a match. A single DFA cannot +/// produce both the start and end of a match. For that information, use a +/// [`Regex`](crate::dfa::regex::Regex), which can be similarly configured +/// using [`regex::Builder`](crate::dfa::regex::Builder). The main reason to +/// use a DFA directly is if the end location of a match is enough for your use +/// case. Namely, a `Regex` will construct two DFAs instead of one, since a +/// second reverse DFA is needed to find the start of a match. +/// +/// Note that if one wants to build a sparse DFA, you must first build a dense +/// DFA and convert that to a sparse DFA. There is no way to build a sparse +/// DFA without first building a dense DFA. +/// +/// # Example +/// +/// This example shows how to build a minimized DFA that completely disables +/// Unicode. That is: +/// +/// * Things such as `\w`, `.` and `\b` are no longer Unicode-aware. `\w` +/// and `\b` are ASCII-only while `.` matches any byte except for `\n` +/// (instead of any UTF-8 encoding of a Unicode scalar value except for +/// `\n`). Things that are Unicode only, such as `\pL`, are not allowed. +/// * The pattern itself is permitted to match invalid UTF-8. For example, +/// things like `[^a]` that match any byte except for `a` are permitted. +/// +/// ``` +/// use regex_automata::{ +/// dfa::{Automaton, dense}, +/// util::syntax, +/// HalfMatch, Input, +/// }; +/// +/// let dfa = dense::Builder::new() +/// .configure(dense::Config::new().minimize(false)) +/// .syntax(syntax::Config::new().unicode(false).utf8(false)) +/// .build(r"foo[^b]ar.*")?; +/// +/// let haystack = b"\xFEfoo\xFFar\xE2\x98\xFF\n"; +/// let expected = Some(HalfMatch::must(0, 10)); +/// let got = dfa.try_search_fwd(&Input::new(haystack))?; +/// assert_eq!(expected, got); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[cfg(feature = "dfa-build")] +#[derive(Clone, Debug)] +pub struct Builder { + config: Config, + #[cfg(feature = "syntax")] + thompson: thompson::Compiler, +} + +#[cfg(feature = "dfa-build")] +impl Builder { + /// Create a new dense DFA builder with the default configuration. + pub fn new() -> Builder { + Builder { + config: Config::default(), + #[cfg(feature = "syntax")] + thompson: thompson::Compiler::new(), + } + } + + /// Build a DFA from the given pattern. + /// + /// If there was a problem parsing or compiling the pattern, then an error + /// is returned. + #[cfg(feature = "syntax")] + pub fn build(&self, pattern: &str) -> Result { + self.build_many(&[pattern]) + } + + /// Build a DFA from the given patterns. + /// + /// When matches are returned, the pattern ID corresponds to the index of + /// the pattern in the slice given. + #[cfg(feature = "syntax")] + pub fn build_many>( + &self, + patterns: &[P], + ) -> Result { + let nfa = self + .thompson + .clone() + // We can always forcefully disable captures because DFAs do not + // support them. + .configure( + thompson::Config::new() + .which_captures(thompson::WhichCaptures::None), + ) + .build_many(patterns) + .map_err(BuildError::nfa)?; + self.build_from_nfa(&nfa) + } + + /// Build a DFA from the given NFA. + /// + /// # Example + /// + /// This example shows how to build a DFA if you already have an NFA in + /// hand. + /// + /// ``` + /// use regex_automata::{ + /// dfa::{Automaton, dense}, + /// nfa::thompson::NFA, + /// HalfMatch, Input, + /// }; + /// + /// let haystack = "foo123bar".as_bytes(); + /// + /// // This shows how to set non-default options for building an NFA. + /// let nfa = NFA::compiler() + /// .configure(NFA::config().shrink(true)) + /// .build(r"[0-9]+")?; + /// let dfa = dense::Builder::new().build_from_nfa(&nfa)?; + /// let expected = Some(HalfMatch::must(0, 6)); + /// let got = dfa.try_search_fwd(&Input::new(haystack))?; + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn build_from_nfa( + &self, + nfa: &thompson::NFA, + ) -> Result { + let mut quitset = self.config.quitset.unwrap_or(ByteSet::empty()); + if self.config.get_unicode_word_boundary() + && nfa.look_set_any().contains_word_unicode() + { + for b in 0x80..=0xFF { + quitset.add(b); + } + } + let classes = if !self.config.get_byte_classes() { + // DFAs will always use the equivalence class map, but enabling + // this option is useful for debugging. Namely, this will cause all + // transitions to be defined over their actual bytes instead of an + // opaque equivalence class identifier. The former is much easier + // to grok as a human. + ByteClasses::singletons() + } else { + let mut set = nfa.byte_class_set().clone(); + // It is important to distinguish any "quit" bytes from all other + // bytes. Otherwise, a non-quit byte may end up in the same + // class as a quit byte, and thus cause the DFA to stop when it + // shouldn't. + // + // Test case: + // + // regex-cli find match dense --unicode-word-boundary \ + // -p '^#' -p '\b10\.55\.182\.100\b' -y @conn.json.1000x.log + if !quitset.is_empty() { + set.add_set(&quitset); + } + set.byte_classes() + }; + + let mut dfa = DFA::initial( + classes, + nfa.pattern_len(), + self.config.get_starts(), + nfa.look_matcher(), + self.config.get_starts_for_each_pattern(), + self.config.get_prefilter().map(|p| p.clone()), + quitset, + Flags::from_nfa(&nfa), + )?; + determinize::Config::new() + .match_kind(self.config.get_match_kind()) + .quit(quitset) + .dfa_size_limit(self.config.get_dfa_size_limit()) + .determinize_size_limit(self.config.get_determinize_size_limit()) + .run(nfa, &mut dfa)?; + if self.config.get_minimize() { + dfa.minimize(); + } + if self.config.get_accelerate() { + dfa.accelerate(); + } + // The state shuffling done before this point always assumes that start + // states should be marked as "special," even though it isn't the + // default configuration. State shuffling is complex enough as it is, + // so it's simpler to just "fix" our special state ID ranges to not + // include starting states after-the-fact. + if !self.config.get_specialize_start_states() { + dfa.special.set_no_special_start_states(); + } + // Look for and set the universal starting states. + dfa.set_universal_starts(); + dfa.tt.table.shrink_to_fit(); + dfa.st.table.shrink_to_fit(); + dfa.ms.slices.shrink_to_fit(); + dfa.ms.pattern_ids.shrink_to_fit(); + Ok(dfa) + } + + /// Apply the given dense DFA configuration options to this builder. + pub fn configure(&mut self, config: Config) -> &mut Builder { + self.config = self.config.overwrite(config); + self + } + + /// Set the syntax configuration for this builder using + /// [`syntax::Config`](crate::util::syntax::Config). + /// + /// This permits setting things like case insensitivity, Unicode and multi + /// line mode. + /// + /// These settings only apply when constructing a DFA directly from a + /// pattern. + #[cfg(feature = "syntax")] + pub fn syntax( + &mut self, + config: crate::util::syntax::Config, + ) -> &mut Builder { + self.thompson.syntax(config); + self + } + + /// Set the Thompson NFA configuration for this builder using + /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). + /// + /// This permits setting things like whether the DFA should match the regex + /// in reverse or if additional time should be spent shrinking the size of + /// the NFA. + /// + /// These settings only apply when constructing a DFA directly from a + /// pattern. + #[cfg(feature = "syntax")] + pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { + self.thompson.configure(config); + self + } +} + +#[cfg(feature = "dfa-build")] +impl Default for Builder { + fn default() -> Builder { + Builder::new() + } +} + +/// A convenience alias for an owned DFA. We use this particular instantiation +/// a lot in this crate, so it's worth giving it a name. This instantiation +/// is commonly used for mutable APIs on the DFA while building it. The main +/// reason for making DFAs generic is no_std support, and more generally, +/// making it possible to load a DFA from an arbitrary slice of bytes. +#[cfg(feature = "alloc")] +pub(crate) type OwnedDFA = DFA>; + +/// A dense table-based deterministic finite automaton (DFA). +/// +/// All dense DFAs have one or more start states, zero or more match states +/// and a transition table that maps the current state and the current byte +/// of input to the next state. A DFA can use this information to implement +/// fast searching. In particular, the use of a dense DFA generally makes the +/// trade off that match speed is the most valuable characteristic, even if +/// building the DFA may take significant time *and* space. (More concretely, +/// building a DFA takes time and space that is exponential in the size of the +/// pattern in the worst case.) As such, the processing of every byte of input +/// is done with a small constant number of operations that does not vary with +/// the pattern, its size or the size of the alphabet. If your needs don't line +/// up with this trade off, then a dense DFA may not be an adequate solution to +/// your problem. +/// +/// In contrast, a [`sparse::DFA`] makes the opposite +/// trade off: it uses less space but will execute a variable number of +/// instructions per byte at match time, which makes it slower for matching. +/// (Note that space usage is still exponential in the size of the pattern in +/// the worst case.) +/// +/// A DFA can be built using the default configuration via the +/// [`DFA::new`] constructor. Otherwise, one can +/// configure various aspects via [`dense::Builder`](Builder). +/// +/// A single DFA fundamentally supports the following operations: +/// +/// 1. Detection of a match. +/// 2. Location of the end of a match. +/// 3. In the case of a DFA with multiple patterns, which pattern matched is +/// reported as well. +/// +/// A notable absence from the above list of capabilities is the location of +/// the *start* of a match. In order to provide both the start and end of +/// a match, *two* DFAs are required. This functionality is provided by a +/// [`Regex`](crate::dfa::regex::Regex). +/// +/// # Type parameters +/// +/// A `DFA` has one type parameter, `T`, which is used to represent state IDs, +/// pattern IDs and accelerators. `T` is typically a `Vec` or a `&[u32]`. +/// +/// # The `Automaton` trait +/// +/// This type implements the [`Automaton`] trait, which means it can be used +/// for searching. For example: +/// +/// ``` +/// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; +/// +/// let dfa = DFA::new("foo[0-9]+")?; +/// let expected = HalfMatch::must(0, 8); +/// assert_eq!(Some(expected), dfa.try_search_fwd(&Input::new("foo12345"))?); +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone)] +pub struct DFA { + /// The transition table for this DFA. This includes the transitions + /// themselves, along with the stride, number of states and the equivalence + /// class mapping. + tt: TransitionTable, + /// The set of starting state identifiers for this DFA. The starting state + /// IDs act as pointers into the transition table. The specific starting + /// state chosen for each search is dependent on the context at which the + /// search begins. + st: StartTable, + /// The set of match states and the patterns that match for each + /// corresponding match state. + /// + /// This structure is technically only needed because of support for + /// multi-regexes. Namely, multi-regexes require answering not just whether + /// a match exists, but _which_ patterns match. So we need to store the + /// matching pattern IDs for each match state. We do this even when there + /// is only one pattern for the sake of simplicity. In practice, this uses + /// up very little space for the case of one pattern. + ms: MatchStates, + /// Information about which states are "special." Special states are states + /// that are dead, quit, matching, starting or accelerated. For more info, + /// see the docs for `Special`. + special: Special, + /// The accelerators for this DFA. + /// + /// If a state is accelerated, then there exist only a small number of + /// bytes that can cause the DFA to leave the state. This permits searching + /// to use optimized routines to find those specific bytes instead of using + /// the transition table. + /// + /// All accelerated states exist in a contiguous range in the DFA's + /// transition table. See dfa/special.rs for more details on how states are + /// arranged. + accels: Accels, + /// Any prefilter attached to this DFA. + /// + /// Note that currently prefilters are not serialized. When deserializing + /// a DFA from bytes, this is always set to `None`. + pre: Option, + /// The set of "quit" bytes for this DFA. + /// + /// This is only used when computing the start state for a particular + /// position in a haystack. Namely, in the case where there is a quit + /// byte immediately before the start of the search, this set needs to be + /// explicitly consulted. In all other cases, quit bytes are detected by + /// the DFA itself, by transitioning all quit bytes to a special "quit + /// state." + quitset: ByteSet, + /// Various flags describing the behavior of this DFA. + flags: Flags, +} + +#[cfg(feature = "dfa-build")] +impl OwnedDFA { + /// Parse the given regular expression using a default configuration and + /// return the corresponding DFA. + /// + /// If you want a non-default configuration, then use the + /// [`dense::Builder`](Builder) to set your own configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; + /// + /// let dfa = dense::DFA::new("foo[0-9]+bar")?; + /// let expected = Some(HalfMatch::must(0, 11)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn new(pattern: &str) -> Result { + Builder::new().build(pattern) + } + + /// Parse the given regular expressions using a default configuration and + /// return the corresponding multi-DFA. + /// + /// If you want a non-default configuration, then use the + /// [`dense::Builder`](Builder) to set your own configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; + /// + /// let dfa = dense::DFA::new_many(&["[0-9]+", "[a-z]+"])?; + /// let expected = Some(HalfMatch::must(1, 3)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn new_many>( + patterns: &[P], + ) -> Result { + Builder::new().build_many(patterns) + } +} + +#[cfg(feature = "dfa-build")] +impl OwnedDFA { + /// Create a new DFA that matches every input. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; + /// + /// let dfa = dense::DFA::always_match()?; + /// + /// let expected = Some(HalfMatch::must(0, 0)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new(""))?); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo"))?); + /// # Ok::<(), Box>(()) + /// ``` + pub fn always_match() -> Result { + let nfa = thompson::NFA::always_match(); + Builder::new().build_from_nfa(&nfa) + } + + /// Create a new DFA that never matches any input. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense}, Input}; + /// + /// let dfa = dense::DFA::never_match()?; + /// assert_eq!(None, dfa.try_search_fwd(&Input::new(""))?); + /// assert_eq!(None, dfa.try_search_fwd(&Input::new("foo"))?); + /// # Ok::<(), Box>(()) + /// ``` + pub fn never_match() -> Result { + let nfa = thompson::NFA::never_match(); + Builder::new().build_from_nfa(&nfa) + } + + /// Create an initial DFA with the given equivalence classes, pattern + /// length and whether anchored starting states are enabled for each + /// pattern. An initial DFA can be further mutated via determinization. + fn initial( + classes: ByteClasses, + pattern_len: usize, + starts: StartKind, + lookm: &LookMatcher, + starts_for_each_pattern: bool, + pre: Option, + quitset: ByteSet, + flags: Flags, + ) -> Result { + let start_pattern_len = + if starts_for_each_pattern { Some(pattern_len) } else { None }; + Ok(DFA { + tt: TransitionTable::minimal(classes), + st: StartTable::dead(starts, lookm, start_pattern_len)?, + ms: MatchStates::empty(pattern_len), + special: Special::new(), + accels: Accels::empty(), + pre, + quitset, + flags, + }) + } +} + +#[cfg(feature = "dfa-build")] +impl DFA<&[u32]> { + /// Return a new default dense DFA compiler configuration. + /// + /// This is a convenience routine to avoid needing to import the [`Config`] + /// type when customizing the construction of a dense DFA. + pub fn config() -> Config { + Config::new() + } + + /// Create a new dense DFA builder with the default configuration. + /// + /// This is a convenience routine to avoid needing to import the + /// [`Builder`] type in common cases. + pub fn builder() -> Builder { + Builder::new() + } +} + +impl> DFA { + /// Cheaply return a borrowed version of this dense DFA. Specifically, + /// the DFA returned always uses `&[u32]` for its transition table. + pub fn as_ref(&self) -> DFA<&'_ [u32]> { + DFA { + tt: self.tt.as_ref(), + st: self.st.as_ref(), + ms: self.ms.as_ref(), + special: self.special, + accels: self.accels(), + pre: self.pre.clone(), + quitset: self.quitset, + flags: self.flags, + } + } + + /// Return an owned version of this sparse DFA. Specifically, the DFA + /// returned always uses `Vec` for its transition table. + /// + /// Effectively, this returns a dense DFA whose transition table lives on + /// the heap. + #[cfg(feature = "alloc")] + pub fn to_owned(&self) -> OwnedDFA { + DFA { + tt: self.tt.to_owned(), + st: self.st.to_owned(), + ms: self.ms.to_owned(), + special: self.special, + accels: self.accels().to_owned(), + pre: self.pre.clone(), + quitset: self.quitset, + flags: self.flags, + } + } + + /// Returns the starting state configuration for this DFA. + /// + /// The default is [`StartKind::Both`], which means the DFA supports both + /// unanchored and anchored searches. However, this can generally lead to + /// bigger DFAs. Therefore, a DFA might be compiled with support for just + /// unanchored or anchored searches. In that case, running a search with + /// an unsupported configuration will panic. + pub fn start_kind(&self) -> StartKind { + self.st.kind + } + + /// Returns the start byte map used for computing the `Start` configuration + /// at the beginning of a search. + pub(crate) fn start_map(&self) -> &StartByteMap { + &self.st.start_map + } + + /// Returns true only if this DFA has starting states for each pattern. + /// + /// When a DFA has starting states for each pattern, then a search with the + /// DFA can be configured to only look for anchored matches of a specific + /// pattern. Specifically, APIs like [`Automaton::try_search_fwd`] can + /// accept a non-None `pattern_id` if and only if this method returns true. + /// Otherwise, calling `try_search_fwd` will panic. + /// + /// Note that if the DFA has no patterns, this always returns false. + pub fn starts_for_each_pattern(&self) -> bool { + self.st.pattern_len.is_some() + } + + /// Returns the equivalence classes that make up the alphabet for this DFA. + /// + /// Unless [`Config::byte_classes`] was disabled, it is possible that + /// multiple distinct bytes are grouped into the same equivalence class + /// if it is impossible for them to discriminate between a match and a + /// non-match. This has the effect of reducing the overall alphabet size + /// and in turn potentially substantially reducing the size of the DFA's + /// transition table. + /// + /// The downside of using equivalence classes like this is that every state + /// transition will automatically use this map to convert an arbitrary + /// byte to its corresponding equivalence class. In practice this has a + /// negligible impact on performance. + pub fn byte_classes(&self) -> &ByteClasses { + &self.tt.classes + } + + /// Returns the total number of elements in the alphabet for this DFA. + /// + /// That is, this returns the total number of transitions that each state + /// in this DFA must have. Typically, a normal byte oriented DFA would + /// always have an alphabet size of 256, corresponding to the number of + /// unique values in a single byte. However, this implementation has two + /// peculiarities that impact the alphabet length: + /// + /// * Every state has a special "EOI" transition that is only followed + /// after the end of some haystack is reached. This EOI transition is + /// necessary to account for one byte of look-ahead when implementing + /// things like `\b` and `$`. + /// * Bytes are grouped into equivalence classes such that no two bytes in + /// the same class can distinguish a match from a non-match. For example, + /// in the regex `^[a-z]+$`, the ASCII bytes `a-z` could all be in the + /// same equivalence class. This leads to a massive space savings. + /// + /// Note though that the alphabet length does _not_ necessarily equal the + /// total stride space taken up by a single DFA state in the transition + /// table. Namely, for performance reasons, the stride is always the + /// smallest power of two that is greater than or equal to the alphabet + /// length. For this reason, [`DFA::stride`] or [`DFA::stride2`] are + /// often more useful. The alphabet length is typically useful only for + /// informational purposes. + pub fn alphabet_len(&self) -> usize { + self.tt.alphabet_len() + } + + /// Returns the total stride for every state in this DFA, expressed as the + /// exponent of a power of 2. The stride is the amount of space each state + /// takes up in the transition table, expressed as a number of transitions. + /// (Unused transitions map to dead states.) + /// + /// The stride of a DFA is always equivalent to the smallest power of 2 + /// that is greater than or equal to the DFA's alphabet length. This + /// definition uses extra space, but permits faster translation between + /// premultiplied state identifiers and contiguous indices (by using shifts + /// instead of relying on integer division). + /// + /// For example, if the DFA's stride is 16 transitions, then its `stride2` + /// is `4` since `2^4 = 16`. + /// + /// The minimum `stride2` value is `1` (corresponding to a stride of `2`) + /// while the maximum `stride2` value is `9` (corresponding to a stride of + /// `512`). The maximum is not `8` since the maximum alphabet size is `257` + /// when accounting for the special EOI transition. However, an alphabet + /// length of that size is exceptionally rare since the alphabet is shrunk + /// into equivalence classes. + pub fn stride2(&self) -> usize { + self.tt.stride2 + } + + /// Returns the total stride for every state in this DFA. This corresponds + /// to the total number of transitions used by each state in this DFA's + /// transition table. + /// + /// Please see [`DFA::stride2`] for more information. In particular, this + /// returns the stride as the number of transitions, where as `stride2` + /// returns it as the exponent of a power of 2. + pub fn stride(&self) -> usize { + self.tt.stride() + } + + /// Returns the memory usage, in bytes, of this DFA. + /// + /// The memory usage is computed based on the number of bytes used to + /// represent this DFA. + /// + /// This does **not** include the stack size used up by this DFA. To + /// compute that, use `std::mem::size_of::()`. + pub fn memory_usage(&self) -> usize { + self.tt.memory_usage() + + self.st.memory_usage() + + self.ms.memory_usage() + + self.accels.memory_usage() + } +} + +/// Routines for converting a dense DFA to other representations, such as +/// sparse DFAs or raw bytes suitable for persistent storage. +impl> DFA { + /// Convert this dense DFA to a sparse DFA. + /// + /// If a `StateID` is too small to represent all states in the sparse + /// DFA, then this returns an error. In most cases, if a dense DFA is + /// constructable with `StateID` then a sparse DFA will be as well. + /// However, it is not guaranteed. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; + /// + /// let dense = dense::DFA::new("foo[0-9]+")?; + /// let sparse = dense.to_sparse()?; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, sparse.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "dfa-build")] + pub fn to_sparse(&self) -> Result>, BuildError> { + sparse::DFA::from_dense(self) + } + + /// Serialize this DFA as raw bytes to a `Vec` in little endian + /// format. Upon success, the `Vec` and the initial padding length are + /// returned. + /// + /// The written bytes are guaranteed to be deserialized correctly and + /// without errors in a semver compatible release of this crate by a + /// `DFA`'s deserialization APIs (assuming all other criteria for the + /// deserialization APIs has been satisfied): + /// + /// * [`DFA::from_bytes`] + /// * [`DFA::from_bytes_unchecked`] + /// + /// The padding returned is non-zero if the returned `Vec` starts at + /// an address that does not have the same alignment as `u32`. The padding + /// corresponds to the number of leading bytes written to the returned + /// `Vec`. + /// + /// # Example + /// + /// This example shows how to serialize and deserialize a DFA: + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; + /// + /// // Compile our original DFA. + /// let original_dfa = DFA::new("foo[0-9]+")?; + /// + /// // N.B. We use native endianness here to make the example work, but + /// // using to_bytes_little_endian would work on a little endian target. + /// let (buf, _) = original_dfa.to_bytes_native_endian(); + /// // Even if buf has initial padding, DFA::from_bytes will automatically + /// // ignore it. + /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf)?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "dfa-build")] + pub fn to_bytes_little_endian(&self) -> (Vec, usize) { + self.to_bytes::() + } + + /// Serialize this DFA as raw bytes to a `Vec` in big endian + /// format. Upon success, the `Vec` and the initial padding length are + /// returned. + /// + /// The written bytes are guaranteed to be deserialized correctly and + /// without errors in a semver compatible release of this crate by a + /// `DFA`'s deserialization APIs (assuming all other criteria for the + /// deserialization APIs has been satisfied): + /// + /// * [`DFA::from_bytes`] + /// * [`DFA::from_bytes_unchecked`] + /// + /// The padding returned is non-zero if the returned `Vec` starts at + /// an address that does not have the same alignment as `u32`. The padding + /// corresponds to the number of leading bytes written to the returned + /// `Vec`. + /// + /// # Example + /// + /// This example shows how to serialize and deserialize a DFA: + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; + /// + /// // Compile our original DFA. + /// let original_dfa = DFA::new("foo[0-9]+")?; + /// + /// // N.B. We use native endianness here to make the example work, but + /// // using to_bytes_big_endian would work on a big endian target. + /// let (buf, _) = original_dfa.to_bytes_native_endian(); + /// // Even if buf has initial padding, DFA::from_bytes will automatically + /// // ignore it. + /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf)?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "dfa-build")] + pub fn to_bytes_big_endian(&self) -> (Vec, usize) { + self.to_bytes::() + } + + /// Serialize this DFA as raw bytes to a `Vec` in native endian + /// format. Upon success, the `Vec` and the initial padding length are + /// returned. + /// + /// The written bytes are guaranteed to be deserialized correctly and + /// without errors in a semver compatible release of this crate by a + /// `DFA`'s deserialization APIs (assuming all other criteria for the + /// deserialization APIs has been satisfied): + /// + /// * [`DFA::from_bytes`] + /// * [`DFA::from_bytes_unchecked`] + /// + /// The padding returned is non-zero if the returned `Vec` starts at + /// an address that does not have the same alignment as `u32`. The padding + /// corresponds to the number of leading bytes written to the returned + /// `Vec`. + /// + /// Generally speaking, native endian format should only be used when + /// you know that the target you're compiling the DFA for matches the + /// endianness of the target on which you're compiling DFA. For example, + /// if serialization and deserialization happen in the same process or on + /// the same machine. Otherwise, when serializing a DFA for use in a + /// portable environment, you'll almost certainly want to serialize _both_ + /// a little endian and a big endian version and then load the correct one + /// based on the target's configuration. + /// + /// # Example + /// + /// This example shows how to serialize and deserialize a DFA: + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; + /// + /// // Compile our original DFA. + /// let original_dfa = DFA::new("foo[0-9]+")?; + /// + /// let (buf, _) = original_dfa.to_bytes_native_endian(); + /// // Even if buf has initial padding, DFA::from_bytes will automatically + /// // ignore it. + /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf)?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "dfa-build")] + pub fn to_bytes_native_endian(&self) -> (Vec, usize) { + self.to_bytes::() + } + + /// The implementation of the public `to_bytes` serialization methods, + /// which is generic over endianness. + #[cfg(feature = "dfa-build")] + fn to_bytes(&self) -> (Vec, usize) { + let len = self.write_to_len(); + let (mut buf, padding) = wire::alloc_aligned_buffer::(len); + // This should always succeed since the only possible serialization + // error is providing a buffer that's too small, but we've ensured that + // `buf` is big enough here. + self.as_ref().write_to::(&mut buf[padding..]).unwrap(); + (buf, padding) + } + + /// Serialize this DFA as raw bytes to the given slice, in little endian + /// format. Upon success, the total number of bytes written to `dst` is + /// returned. + /// + /// The written bytes are guaranteed to be deserialized correctly and + /// without errors in a semver compatible release of this crate by a + /// `DFA`'s deserialization APIs (assuming all other criteria for the + /// deserialization APIs has been satisfied): + /// + /// * [`DFA::from_bytes`] + /// * [`DFA::from_bytes_unchecked`] + /// + /// Note that unlike the various `to_byte_*` routines, this does not write + /// any padding. Callers are responsible for handling alignment correctly. + /// + /// # Errors + /// + /// This returns an error if the given destination slice is not big enough + /// to contain the full serialized DFA. If an error occurs, then nothing + /// is written to `dst`. + /// + /// # Example + /// + /// This example shows how to serialize and deserialize a DFA without + /// dynamic memory allocation. + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; + /// + /// // Compile our original DFA. + /// let original_dfa = DFA::new("foo[0-9]+")?; + /// + /// // Create a 4KB buffer on the stack to store our serialized DFA. We + /// // need to use a special type to force the alignment of our [u8; N] + /// // array to be aligned to a 4 byte boundary. Otherwise, deserializing + /// // the DFA may fail because of an alignment mismatch. + /// #[repr(C)] + /// struct Aligned { + /// _align: [u32; 0], + /// bytes: B, + /// } + /// let mut buf = Aligned { _align: [], bytes: [0u8; 4 * (1<<10)] }; + /// // N.B. We use native endianness here to make the example work, but + /// // using write_to_little_endian would work on a little endian target. + /// let written = original_dfa.write_to_native_endian(&mut buf.bytes)?; + /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf.bytes[..written])?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + pub fn write_to_little_endian( + &self, + dst: &mut [u8], + ) -> Result { + self.as_ref().write_to::(dst) + } + + /// Serialize this DFA as raw bytes to the given slice, in big endian + /// format. Upon success, the total number of bytes written to `dst` is + /// returned. + /// + /// The written bytes are guaranteed to be deserialized correctly and + /// without errors in a semver compatible release of this crate by a + /// `DFA`'s deserialization APIs (assuming all other criteria for the + /// deserialization APIs has been satisfied): + /// + /// * [`DFA::from_bytes`] + /// * [`DFA::from_bytes_unchecked`] + /// + /// Note that unlike the various `to_byte_*` routines, this does not write + /// any padding. Callers are responsible for handling alignment correctly. + /// + /// # Errors + /// + /// This returns an error if the given destination slice is not big enough + /// to contain the full serialized DFA. If an error occurs, then nothing + /// is written to `dst`. + /// + /// # Example + /// + /// This example shows how to serialize and deserialize a DFA without + /// dynamic memory allocation. + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; + /// + /// // Compile our original DFA. + /// let original_dfa = DFA::new("foo[0-9]+")?; + /// + /// // Create a 4KB buffer on the stack to store our serialized DFA. We + /// // need to use a special type to force the alignment of our [u8; N] + /// // array to be aligned to a 4 byte boundary. Otherwise, deserializing + /// // the DFA may fail because of an alignment mismatch. + /// #[repr(C)] + /// struct Aligned { + /// _align: [u32; 0], + /// bytes: B, + /// } + /// let mut buf = Aligned { _align: [], bytes: [0u8; 4 * (1<<10)] }; + /// // N.B. We use native endianness here to make the example work, but + /// // using write_to_big_endian would work on a big endian target. + /// let written = original_dfa.write_to_native_endian(&mut buf.bytes)?; + /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf.bytes[..written])?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + pub fn write_to_big_endian( + &self, + dst: &mut [u8], + ) -> Result { + self.as_ref().write_to::(dst) + } + + /// Serialize this DFA as raw bytes to the given slice, in native endian + /// format. Upon success, the total number of bytes written to `dst` is + /// returned. + /// + /// The written bytes are guaranteed to be deserialized correctly and + /// without errors in a semver compatible release of this crate by a + /// `DFA`'s deserialization APIs (assuming all other criteria for the + /// deserialization APIs has been satisfied): + /// + /// * [`DFA::from_bytes`] + /// * [`DFA::from_bytes_unchecked`] + /// + /// Generally speaking, native endian format should only be used when + /// you know that the target you're compiling the DFA for matches the + /// endianness of the target on which you're compiling DFA. For example, + /// if serialization and deserialization happen in the same process or on + /// the same machine. Otherwise, when serializing a DFA for use in a + /// portable environment, you'll almost certainly want to serialize _both_ + /// a little endian and a big endian version and then load the correct one + /// based on the target's configuration. + /// + /// Note that unlike the various `to_byte_*` routines, this does not write + /// any padding. Callers are responsible for handling alignment correctly. + /// + /// # Errors + /// + /// This returns an error if the given destination slice is not big enough + /// to contain the full serialized DFA. If an error occurs, then nothing + /// is written to `dst`. + /// + /// # Example + /// + /// This example shows how to serialize and deserialize a DFA without + /// dynamic memory allocation. + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; + /// + /// // Compile our original DFA. + /// let original_dfa = DFA::new("foo[0-9]+")?; + /// + /// // Create a 4KB buffer on the stack to store our serialized DFA. We + /// // need to use a special type to force the alignment of our [u8; N] + /// // array to be aligned to a 4 byte boundary. Otherwise, deserializing + /// // the DFA may fail because of an alignment mismatch. + /// #[repr(C)] + /// struct Aligned { + /// _align: [u32; 0], + /// bytes: B, + /// } + /// let mut buf = Aligned { _align: [], bytes: [0u8; 4 * (1<<10)] }; + /// let written = original_dfa.write_to_native_endian(&mut buf.bytes)?; + /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf.bytes[..written])?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + pub fn write_to_native_endian( + &self, + dst: &mut [u8], + ) -> Result { + self.as_ref().write_to::(dst) + } + + /// Return the total number of bytes required to serialize this DFA. + /// + /// This is useful for determining the size of the buffer required to pass + /// to one of the serialization routines: + /// + /// * [`DFA::write_to_little_endian`] + /// * [`DFA::write_to_big_endian`] + /// * [`DFA::write_to_native_endian`] + /// + /// Passing a buffer smaller than the size returned by this method will + /// result in a serialization error. Serialization routines are guaranteed + /// to succeed when the buffer is big enough. + /// + /// # Example + /// + /// This example shows how to dynamically allocate enough room to serialize + /// a DFA. + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; + /// + /// let original_dfa = DFA::new("foo[0-9]+")?; + /// + /// let mut buf = vec![0; original_dfa.write_to_len()]; + /// // This is guaranteed to succeed, because the only serialization error + /// // that can occur is when the provided buffer is too small. But + /// // write_to_len guarantees a correct size. + /// let written = original_dfa.write_to_native_endian(&mut buf).unwrap(); + /// // But this is not guaranteed to succeed! In particular, + /// // deserialization requires proper alignment for &[u32], but our buffer + /// // was allocated as a &[u8] whose required alignment is smaller than + /// // &[u32]. However, it's likely to work in practice because of how most + /// // allocators work. So if you write code like this, make sure to either + /// // handle the error correctly and/or run it under Miri since Miri will + /// // likely provoke the error by returning Vec buffers with alignment + /// // less than &[u32]. + /// let dfa: DFA<&[u32]> = match DFA::from_bytes(&buf[..written]) { + /// // As mentioned above, it is legal for an error to be returned + /// // here. It is quite difficult to get a Vec with a guaranteed + /// // alignment equivalent to Vec. + /// Err(_) => return Ok(()), + /// Ok((dfa, _)) => dfa, + /// }; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Note that this example isn't actually guaranteed to work! In + /// particular, if `buf` is not aligned to a 4-byte boundary, then the + /// `DFA::from_bytes` call will fail. If you need this to work, then you + /// either need to deal with adding some initial padding yourself, or use + /// one of the `to_bytes` methods, which will do it for you. + pub fn write_to_len(&self) -> usize { + wire::write_label_len(LABEL) + + wire::write_endianness_check_len() + + wire::write_version_len() + + size_of::() // unused, intended for future flexibility + + self.flags.write_to_len() + + self.tt.write_to_len() + + self.st.write_to_len() + + self.ms.write_to_len() + + self.special.write_to_len() + + self.accels.write_to_len() + + self.quitset.write_to_len() + } +} + +impl<'a> DFA<&'a [u32]> { + /// Safely deserialize a DFA with a specific state identifier + /// representation. Upon success, this returns both the deserialized DFA + /// and the number of bytes read from the given slice. Namely, the contents + /// of the slice beyond the DFA are not read. + /// + /// Deserializing a DFA using this routine will never allocate heap memory. + /// For safety purposes, the DFA's transition table will be verified such + /// that every transition points to a valid state. If this verification is + /// too costly, then a [`DFA::from_bytes_unchecked`] API is provided, which + /// will always execute in constant time. + /// + /// The bytes given must be generated by one of the serialization APIs + /// of a `DFA` using a semver compatible release of this crate. Those + /// include: + /// + /// * [`DFA::to_bytes_little_endian`] + /// * [`DFA::to_bytes_big_endian`] + /// * [`DFA::to_bytes_native_endian`] + /// * [`DFA::write_to_little_endian`] + /// * [`DFA::write_to_big_endian`] + /// * [`DFA::write_to_native_endian`] + /// + /// The `to_bytes` methods allocate and return a `Vec` for you, along + /// with handling alignment correctly. The `write_to` methods do not + /// allocate and write to an existing slice (which may be on the stack). + /// Since deserialization always uses the native endianness of the target + /// platform, the serialization API you use should match the endianness of + /// the target platform. (It's often a good idea to generate serialized + /// DFAs for both forms of endianness and then load the correct one based + /// on endianness.) + /// + /// # Errors + /// + /// Generally speaking, it's easier to state the conditions in which an + /// error is _not_ returned. All of the following must be true: + /// + /// * The bytes given must be produced by one of the serialization APIs + /// on this DFA, as mentioned above. + /// * The endianness of the target platform matches the endianness used to + /// serialized the provided DFA. + /// * The slice given must have the same alignment as `u32`. + /// + /// If any of the above are not true, then an error will be returned. + /// + /// # Panics + /// + /// This routine will never panic for any input. + /// + /// # Example + /// + /// This example shows how to serialize a DFA to raw bytes, deserialize it + /// and then use it for searching. + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; + /// + /// let initial = DFA::new("foo[0-9]+")?; + /// let (bytes, _) = initial.to_bytes_native_endian(); + /// let dfa: DFA<&[u32]> = DFA::from_bytes(&bytes)?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: dealing with alignment and padding + /// + /// In the above example, we used the `to_bytes_native_endian` method to + /// serialize a DFA, but we ignored part of its return value corresponding + /// to padding added to the beginning of the serialized DFA. This is OK + /// because deserialization will skip this initial padding. What matters + /// is that the address immediately following the padding has an alignment + /// that matches `u32`. That is, the following is an equivalent but + /// alternative way to write the above example: + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; + /// + /// let initial = DFA::new("foo[0-9]+")?; + /// // Serialization returns the number of leading padding bytes added to + /// // the returned Vec. + /// let (bytes, pad) = initial.to_bytes_native_endian(); + /// let dfa: DFA<&[u32]> = DFA::from_bytes(&bytes[pad..])?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// This padding is necessary because Rust's standard library does + /// not expose any safe and robust way of creating a `Vec` with a + /// guaranteed alignment other than 1. Now, in practice, the underlying + /// allocator is likely to provide a `Vec` that meets our alignment + /// requirements, which means `pad` is zero in practice most of the time. + /// + /// The purpose of exposing the padding like this is flexibility for the + /// caller. For example, if one wants to embed a serialized DFA into a + /// compiled program, then it's important to guarantee that it starts at a + /// `u32`-aligned address. The simplest way to do this is to discard the + /// padding bytes and set it up so that the serialized DFA itself begins at + /// a properly aligned address. We can show this in two parts. The first + /// part is serializing the DFA to a file: + /// + /// ```no_run + /// use regex_automata::dfa::dense::DFA; + /// + /// let dfa = DFA::new("foo[0-9]+")?; + /// + /// let (bytes, pad) = dfa.to_bytes_big_endian(); + /// // Write the contents of the DFA *without* the initial padding. + /// std::fs::write("foo.bigendian.dfa", &bytes[pad..])?; + /// + /// // Do it again, but this time for little endian. + /// let (bytes, pad) = dfa.to_bytes_little_endian(); + /// std::fs::write("foo.littleendian.dfa", &bytes[pad..])?; + /// # Ok::<(), Box>(()) + /// ``` + /// + /// And now the second part is embedding the DFA into the compiled program + /// and deserializing it at runtime on first use. We use conditional + /// compilation to choose the correct endianness. + /// + /// ```no_run + /// use regex_automata::{ + /// dfa::{Automaton, dense::DFA}, + /// util::{lazy::Lazy, wire::AlignAs}, + /// HalfMatch, Input, + /// }; + /// + /// // This crate provides its own "lazy" type, kind of like + /// // lazy_static! or once_cell::sync::Lazy. But it works in no-alloc + /// // no-std environments and let's us write this using completely + /// // safe code. + /// static RE: Lazy> = Lazy::new(|| { + /// # const _: &str = stringify! { + /// // This assignment is made possible (implicitly) via the + /// // CoerceUnsized trait. This is what guarantees that our + /// // bytes are stored in memory on a 4 byte boundary. You + /// // *must* do this or something equivalent for correct + /// // deserialization. + /// static ALIGNED: &AlignAs<[u8], u32> = &AlignAs { + /// _align: [], + /// #[cfg(target_endian = "big")] + /// bytes: *include_bytes!("foo.bigendian.dfa"), + /// #[cfg(target_endian = "little")] + /// bytes: *include_bytes!("foo.littleendian.dfa"), + /// }; + /// # }; + /// # static ALIGNED: &AlignAs<[u8], u32> = &AlignAs { + /// # _align: [], + /// # bytes: [], + /// # }; + /// + /// let (dfa, _) = DFA::from_bytes(&ALIGNED.bytes) + /// .expect("serialized DFA should be valid"); + /// dfa + /// }); + /// + /// let expected = Ok(Some(HalfMatch::must(0, 8))); + /// assert_eq!(expected, RE.try_search_fwd(&Input::new("foo12345"))); + /// ``` + /// + /// An alternative to [`util::lazy::Lazy`](crate::util::lazy::Lazy) + /// is [`lazy_static`](https://crates.io/crates/lazy_static) or + /// [`once_cell`](https://crates.io/crates/once_cell), which provide + /// stronger guarantees (like the initialization function only being + /// executed once). And `once_cell` in particular provides a more + /// expressive API. But a `Lazy` value from this crate is likely just fine + /// in most circumstances. + /// + /// Note that regardless of which initialization method you use, you + /// will still need to use the [`AlignAs`](crate::util::wire::AlignAs) + /// trick above to force correct alignment, but this is safe to do and + /// `from_bytes` will return an error if you get it wrong. + pub fn from_bytes( + slice: &'a [u8], + ) -> Result<(DFA<&'a [u32]>, usize), DeserializeError> { + // SAFETY: This is safe because we validate the transition table, start + // table, match states and accelerators below. If any validation fails, + // then we return an error. + let (dfa, nread) = unsafe { DFA::from_bytes_unchecked(slice)? }; + // Note that validation order is important here: + // + // * `MatchState::validate` can be called with an untrusted DFA. + // * `TransistionTable::validate` uses `dfa.ms` through `match_len`. + // * `StartTable::validate` needs a valid transition table. + // + // So... validate the match states first. + dfa.accels.validate()?; + dfa.ms.validate(&dfa)?; + dfa.tt.validate(&dfa)?; + dfa.st.validate(&dfa)?; + // N.B. dfa.special doesn't have a way to do unchecked deserialization, + // so it has already been validated. + for state in dfa.states() { + // If the state is an accel state, then it must have a non-empty + // accelerator. + if dfa.is_accel_state(state.id()) { + let index = dfa.accelerator_index(state.id()); + if index >= dfa.accels.len() { + return Err(DeserializeError::generic( + "found DFA state with invalid accelerator index", + )); + } + let needles = dfa.accels.needles(index); + if !(1 <= needles.len() && needles.len() <= 3) { + return Err(DeserializeError::generic( + "accelerator needles has invalid length", + )); + } + } + } + Ok((dfa, nread)) + } + + /// Deserialize a DFA with a specific state identifier representation in + /// constant time by omitting the verification of the validity of the + /// transition table and other data inside the DFA. + /// + /// This is just like [`DFA::from_bytes`], except it can potentially return + /// a DFA that exhibits undefined behavior if its transition table contains + /// invalid state identifiers. + /// + /// This routine is useful if you need to deserialize a DFA cheaply + /// and cannot afford the transition table validation performed by + /// `from_bytes`. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; + /// + /// let initial = DFA::new("foo[0-9]+")?; + /// let (bytes, _) = initial.to_bytes_native_endian(); + /// // SAFETY: This is guaranteed to be safe since the bytes given come + /// // directly from a compatible serialization routine. + /// let dfa: DFA<&[u32]> = unsafe { DFA::from_bytes_unchecked(&bytes)?.0 }; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + pub unsafe fn from_bytes_unchecked( + slice: &'a [u8], + ) -> Result<(DFA<&'a [u32]>, usize), DeserializeError> { + let mut nr = 0; + + nr += wire::skip_initial_padding(slice); + wire::check_alignment::(&slice[nr..])?; + nr += wire::read_label(&slice[nr..], LABEL)?; + nr += wire::read_endianness_check(&slice[nr..])?; + nr += wire::read_version(&slice[nr..], VERSION)?; + + let _unused = wire::try_read_u32(&slice[nr..], "unused space")?; + nr += size_of::(); + + let (flags, nread) = Flags::from_bytes(&slice[nr..])?; + nr += nread; + + let (tt, nread) = TransitionTable::from_bytes_unchecked(&slice[nr..])?; + nr += nread; + + let (st, nread) = StartTable::from_bytes_unchecked(&slice[nr..])?; + nr += nread; + + let (ms, nread) = MatchStates::from_bytes_unchecked(&slice[nr..])?; + nr += nread; + + let (special, nread) = Special::from_bytes(&slice[nr..])?; + nr += nread; + special.validate_state_len(tt.len(), tt.stride2)?; + + let (accels, nread) = Accels::from_bytes_unchecked(&slice[nr..])?; + nr += nread; + + let (quitset, nread) = ByteSet::from_bytes(&slice[nr..])?; + nr += nread; + + // Prefilters don't support serialization, so they're always absent. + let pre = None; + Ok((DFA { tt, st, ms, special, accels, pre, quitset, flags }, nr)) + } + + /// The implementation of the public `write_to` serialization methods, + /// which is generic over endianness. + /// + /// This is defined only for &[u32] to reduce binary size/compilation time. + fn write_to( + &self, + mut dst: &mut [u8], + ) -> Result { + let nwrite = self.write_to_len(); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small("dense DFA")); + } + dst = &mut dst[..nwrite]; + + let mut nw = 0; + nw += wire::write_label(LABEL, &mut dst[nw..])?; + nw += wire::write_endianness_check::(&mut dst[nw..])?; + nw += wire::write_version::(VERSION, &mut dst[nw..])?; + nw += { + // Currently unused, intended for future flexibility + E::write_u32(0, &mut dst[nw..]); + size_of::() + }; + nw += self.flags.write_to::(&mut dst[nw..])?; + nw += self.tt.write_to::(&mut dst[nw..])?; + nw += self.st.write_to::(&mut dst[nw..])?; + nw += self.ms.write_to::(&mut dst[nw..])?; + nw += self.special.write_to::(&mut dst[nw..])?; + nw += self.accels.write_to::(&mut dst[nw..])?; + nw += self.quitset.write_to::(&mut dst[nw..])?; + Ok(nw) + } +} + +/// Other routines that work for all `T`. +impl DFA { + /// Set or unset the prefilter attached to this DFA. + /// + /// This is useful when one has deserialized a DFA from `&[u8]`. + /// Deserialization does not currently include prefilters, so if you + /// want prefilter acceleration, you'll need to rebuild it and attach + /// it here. + pub fn set_prefilter(&mut self, prefilter: Option) { + self.pre = prefilter + } +} + +// The following methods implement mutable routines on the internal +// representation of a DFA. As such, we must fix the first type parameter to a +// `Vec` since a generic `T: AsRef<[u32]>` does not permit mutation. We +// can get away with this because these methods are internal to the crate and +// are exclusively used during construction of the DFA. +#[cfg(feature = "dfa-build")] +impl OwnedDFA { + /// Add a start state of this DFA. + pub(crate) fn set_start_state( + &mut self, + anchored: Anchored, + start: Start, + id: StateID, + ) { + assert!(self.tt.is_valid(id), "invalid start state"); + self.st.set_start(anchored, start, id); + } + + /// Set the given transition to this DFA. Both the `from` and `to` states + /// must already exist. + pub(crate) fn set_transition( + &mut self, + from: StateID, + byte: alphabet::Unit, + to: StateID, + ) { + self.tt.set(from, byte, to); + } + + /// An empty state (a state where all transitions lead to a dead state) + /// and return its identifier. The identifier returned is guaranteed to + /// not point to any other existing state. + /// + /// If adding a state would exceed `StateID::LIMIT`, then this returns an + /// error. + pub(crate) fn add_empty_state(&mut self) -> Result { + self.tt.add_empty_state() + } + + /// Swap the two states given in the transition table. + /// + /// This routine does not do anything to check the correctness of this + /// swap. Callers must ensure that other states pointing to id1 and id2 are + /// updated appropriately. + pub(crate) fn swap_states(&mut self, id1: StateID, id2: StateID) { + self.tt.swap(id1, id2); + } + + /// Remap all of the state identifiers in this DFA according to the map + /// function given. This includes all transitions and all starting state + /// identifiers. + pub(crate) fn remap(&mut self, map: impl Fn(StateID) -> StateID) { + // We could loop over each state ID and call 'remap_state' here, but + // this is more direct: just map every transition directly. This + // technically might do a little extra work since the alphabet length + // is likely less than the stride, but if that is indeed an issue we + // should benchmark it and fix it. + for sid in self.tt.table_mut().iter_mut() { + *sid = map(*sid); + } + for sid in self.st.table_mut().iter_mut() { + *sid = map(*sid); + } + } + + /// Remap the transitions for the state given according to the function + /// given. This applies the given map function to every transition in the + /// given state and changes the transition in place to the result of the + /// map function for that transition. + pub(crate) fn remap_state( + &mut self, + id: StateID, + map: impl Fn(StateID) -> StateID, + ) { + self.tt.remap(id, map); + } + + /// Truncate the states in this DFA to the given length. + /// + /// This routine does not do anything to check the correctness of this + /// truncation. Callers must ensure that other states pointing to truncated + /// states are updated appropriately. + pub(crate) fn truncate_states(&mut self, len: usize) { + self.tt.truncate(len); + } + + /// Minimize this DFA in place using Hopcroft's algorithm. + pub(crate) fn minimize(&mut self) { + Minimizer::new(self).run(); + } + + /// Updates the match state pattern ID map to use the one provided. + /// + /// This is useful when it's convenient to manipulate matching states + /// (and their corresponding pattern IDs) as a map. In particular, the + /// representation used by a DFA for this map is not amenable to mutation, + /// so if things need to be changed (like when shuffling states), it's + /// often easier to work with the map form. + pub(crate) fn set_pattern_map( + &mut self, + map: &BTreeMap>, + ) -> Result<(), BuildError> { + self.ms = self.ms.new_with_map(map)?; + Ok(()) + } + + /// Find states that have a small number of non-loop transitions and mark + /// them as candidates for acceleration during search. + pub(crate) fn accelerate(&mut self) { + // dead and quit states can never be accelerated. + if self.state_len() <= 2 { + return; + } + + // Go through every state and record their accelerator, if possible. + let mut accels = BTreeMap::new(); + // Count the number of accelerated match, start and non-match/start + // states. + let (mut cmatch, mut cstart, mut cnormal) = (0, 0, 0); + for state in self.states() { + if let Some(accel) = state.accelerate(self.byte_classes()) { + debug!( + "accelerating full DFA state {}: {:?}", + state.id().as_usize(), + accel, + ); + accels.insert(state.id(), accel); + if self.is_match_state(state.id()) { + cmatch += 1; + } else if self.is_start_state(state.id()) { + cstart += 1; + } else { + assert!(!self.is_dead_state(state.id())); + assert!(!self.is_quit_state(state.id())); + cnormal += 1; + } + } + } + // If no states were able to be accelerated, then we're done. + if accels.is_empty() { + return; + } + let original_accels_len = accels.len(); + + // A remapper keeps track of state ID changes. Once we're done + // shuffling, the remapper is used to rewrite all transitions in the + // DFA based on the new positions of states. + let mut remapper = Remapper::new(self); + + // As we swap states, if they are match states, we need to swap their + // pattern ID lists too (for multi-regexes). We do this by converting + // the lists to an easily swappable map, and then convert back to + // MatchStates once we're done. + let mut new_matches = self.ms.to_map(self); + + // There is at least one state that gets accelerated, so these are + // guaranteed to get set to sensible values below. + self.special.min_accel = StateID::MAX; + self.special.max_accel = StateID::ZERO; + let update_special_accel = + |special: &mut Special, accel_id: StateID| { + special.min_accel = cmp::min(special.min_accel, accel_id); + special.max_accel = cmp::max(special.max_accel, accel_id); + }; + + // Start by shuffling match states. Any match states that are + // accelerated get moved to the end of the match state range. + if cmatch > 0 && self.special.matches() { + // N.B. special.{min,max}_match do not need updating, since the + // range/number of match states does not change. Only the ordering + // of match states may change. + let mut next_id = self.special.max_match; + let mut cur_id = next_id; + while cur_id >= self.special.min_match { + if let Some(accel) = accels.remove(&cur_id) { + accels.insert(next_id, accel); + update_special_accel(&mut self.special, next_id); + + // No need to do any actual swapping for equivalent IDs. + if cur_id != next_id { + remapper.swap(self, cur_id, next_id); + + // Swap pattern IDs for match states. + let cur_pids = new_matches.remove(&cur_id).unwrap(); + let next_pids = new_matches.remove(&next_id).unwrap(); + new_matches.insert(cur_id, next_pids); + new_matches.insert(next_id, cur_pids); + } + next_id = self.tt.prev_state_id(next_id); + } + cur_id = self.tt.prev_state_id(cur_id); + } + } + + // This is where it gets tricky. Without acceleration, start states + // normally come right after match states. But we want accelerated + // states to be a single contiguous range (to make it very fast + // to determine whether a state *is* accelerated), while also keeping + // match and starting states as contiguous ranges for the same reason. + // So what we do here is shuffle states such that it looks like this: + // + // DQMMMMAAAAASSSSSSNNNNNNN + // | | + // |---------| + // accelerated states + // + // Where: + // D - dead state + // Q - quit state + // M - match state (may be accelerated) + // A - normal state that is accelerated + // S - start state (may be accelerated) + // N - normal state that is NOT accelerated + // + // We implement this by shuffling states, which is done by a sequence + // of pairwise swaps. We start by looking at all normal states to be + // accelerated. When we find one, we swap it with the earliest starting + // state, and then swap that with the earliest normal state. This + // preserves the contiguous property. + // + // Once we're done looking for accelerated normal states, now we look + // for accelerated starting states by moving them to the beginning + // of the starting state range (just like we moved accelerated match + // states to the end of the matching state range). + // + // For a more detailed/different perspective on this, see the docs + // in dfa/special.rs. + if cnormal > 0 { + // our next available starting and normal states for swapping. + let mut next_start_id = self.special.min_start; + let mut cur_id = self.to_state_id(self.state_len() - 1); + // This is guaranteed to exist since cnormal > 0. + let mut next_norm_id = + self.tt.next_state_id(self.special.max_start); + while cur_id >= next_norm_id { + if let Some(accel) = accels.remove(&cur_id) { + remapper.swap(self, next_start_id, cur_id); + remapper.swap(self, next_norm_id, cur_id); + // Keep our accelerator map updated with new IDs if the + // states we swapped were also accelerated. + if let Some(accel2) = accels.remove(&next_norm_id) { + accels.insert(cur_id, accel2); + } + if let Some(accel2) = accels.remove(&next_start_id) { + accels.insert(next_norm_id, accel2); + } + accels.insert(next_start_id, accel); + update_special_accel(&mut self.special, next_start_id); + // Our start range shifts one to the right now. + self.special.min_start = + self.tt.next_state_id(self.special.min_start); + self.special.max_start = + self.tt.next_state_id(self.special.max_start); + next_start_id = self.tt.next_state_id(next_start_id); + next_norm_id = self.tt.next_state_id(next_norm_id); + } + // This is pretty tricky, but if our 'next_norm_id' state also + // happened to be accelerated, then the result is that it is + // now in the position of cur_id, so we need to consider it + // again. This loop is still guaranteed to terminate though, + // because when accels contains cur_id, we're guaranteed to + // increment next_norm_id even if cur_id remains unchanged. + if !accels.contains_key(&cur_id) { + cur_id = self.tt.prev_state_id(cur_id); + } + } + } + // Just like we did for match states, but we want to move accelerated + // start states to the beginning of the range instead of the end. + if cstart > 0 { + // N.B. special.{min,max}_start do not need updating, since the + // range/number of start states does not change at this point. Only + // the ordering of start states may change. + let mut next_id = self.special.min_start; + let mut cur_id = next_id; + while cur_id <= self.special.max_start { + if let Some(accel) = accels.remove(&cur_id) { + remapper.swap(self, cur_id, next_id); + accels.insert(next_id, accel); + update_special_accel(&mut self.special, next_id); + next_id = self.tt.next_state_id(next_id); + } + cur_id = self.tt.next_state_id(cur_id); + } + } + + // Remap all transitions in our DFA and assert some things. + remapper.remap(self); + // This unwrap is OK because acceleration never changes the number of + // match states or patterns in those match states. Since acceleration + // runs after the pattern map has been set at least once, we know that + // our match states cannot error. + self.set_pattern_map(&new_matches).unwrap(); + self.special.set_max(); + self.special.validate().expect("special state ranges should validate"); + self.special + .validate_state_len(self.state_len(), self.stride2()) + .expect( + "special state ranges should be consistent with state length", + ); + assert_eq!( + self.special.accel_len(self.stride()), + // We record the number of accelerated states initially detected + // since the accels map is itself mutated in the process above. + // If mutated incorrectly, its size may change, and thus can't be + // trusted as a source of truth of how many accelerated states we + // expected there to be. + original_accels_len, + "mismatch with expected number of accelerated states", + ); + + // And finally record our accelerators. We kept our accels map updated + // as we shuffled states above, so the accelerators should now + // correspond to a contiguous range in the state ID space. (Which we + // assert.) + let mut prev: Option = None; + for (id, accel) in accels { + assert!(prev.map_or(true, |p| self.tt.next_state_id(p) == id)); + prev = Some(id); + self.accels.add(accel); + } + } + + /// Shuffle the states in this DFA so that starting states, match + /// states and accelerated states are all contiguous. + /// + /// See dfa/special.rs for more details. + pub(crate) fn shuffle( + &mut self, + mut matches: BTreeMap>, + ) -> Result<(), BuildError> { + // The determinizer always adds a quit state and it is always second. + self.special.quit_id = self.to_state_id(1); + // If all we have are the dead and quit states, then we're done and + // the DFA will never produce a match. + if self.state_len() <= 2 { + self.special.set_max(); + return Ok(()); + } + + // Collect all our non-DEAD start states into a convenient set and + // confirm there is no overlap with match states. In the classical DFA + // construction, start states can be match states. But because of + // look-around, we delay all matches by a byte, which prevents start + // states from being match states. + let mut is_start: BTreeSet = BTreeSet::new(); + for (start_id, _, _) in self.starts() { + // If a starting configuration points to a DEAD state, then we + // don't want to shuffle it. The DEAD state is always the first + // state with ID=0. So we can just leave it be. + if start_id == DEAD { + continue; + } + assert!( + !matches.contains_key(&start_id), + "{start_id:?} is both a start and a match state, \ + which is not allowed", + ); + is_start.insert(start_id); + } + + // We implement shuffling by a sequence of pairwise swaps of states. + // Since we have a number of things referencing states via their + // IDs and swapping them changes their IDs, we need to record every + // swap we make so that we can remap IDs. The remapper handles this + // book-keeping for us. + let mut remapper = Remapper::new(self); + + // Shuffle matching states. + if matches.is_empty() { + self.special.min_match = DEAD; + self.special.max_match = DEAD; + } else { + // The determinizer guarantees that the first two states are the + // dead and quit states, respectively. We want our match states to + // come right after quit. + let mut next_id = self.to_state_id(2); + let mut new_matches = BTreeMap::new(); + self.special.min_match = next_id; + for (id, pids) in matches { + remapper.swap(self, next_id, id); + new_matches.insert(next_id, pids); + // If we swapped a start state, then update our set. + if is_start.contains(&next_id) { + is_start.remove(&next_id); + is_start.insert(id); + } + next_id = self.tt.next_state_id(next_id); + } + matches = new_matches; + self.special.max_match = cmp::max( + self.special.min_match, + self.tt.prev_state_id(next_id), + ); + } + + // Shuffle starting states. + { + let mut next_id = self.to_state_id(2); + if self.special.matches() { + next_id = self.tt.next_state_id(self.special.max_match); + } + self.special.min_start = next_id; + for id in is_start { + remapper.swap(self, next_id, id); + next_id = self.tt.next_state_id(next_id); + } + self.special.max_start = cmp::max( + self.special.min_start, + self.tt.prev_state_id(next_id), + ); + } + + // Finally remap all transitions in our DFA. + remapper.remap(self); + self.set_pattern_map(&matches)?; + self.special.set_max(); + self.special.validate().expect("special state ranges should validate"); + self.special + .validate_state_len(self.state_len(), self.stride2()) + .expect( + "special state ranges should be consistent with state length", + ); + Ok(()) + } + + /// Checks whether there are universal start states (both anchored and + /// unanchored), and if so, sets the relevant fields to the start state + /// IDs. + /// + /// Universal start states occur precisely when the all patterns in the + /// DFA have no look-around assertions in their prefix. + fn set_universal_starts(&mut self) { + assert_eq!(6, Start::len(), "expected 6 start configurations"); + + let start_id = |dfa: &mut OwnedDFA, + anchored: Anchored, + start: Start| { + // This OK because we only call 'start' under conditions + // in which we know it will succeed. + dfa.st.start(anchored, start).expect("valid Input configuration") + }; + if self.start_kind().has_unanchored() { + let anchor = Anchored::No; + let sid = start_id(self, anchor, Start::NonWordByte); + if sid == start_id(self, anchor, Start::WordByte) + && sid == start_id(self, anchor, Start::Text) + && sid == start_id(self, anchor, Start::LineLF) + && sid == start_id(self, anchor, Start::LineCR) + && sid == start_id(self, anchor, Start::CustomLineTerminator) + { + self.st.universal_start_unanchored = Some(sid); + } + } + if self.start_kind().has_anchored() { + let anchor = Anchored::Yes; + let sid = start_id(self, anchor, Start::NonWordByte); + if sid == start_id(self, anchor, Start::WordByte) + && sid == start_id(self, anchor, Start::Text) + && sid == start_id(self, anchor, Start::LineLF) + && sid == start_id(self, anchor, Start::LineCR) + && sid == start_id(self, anchor, Start::CustomLineTerminator) + { + self.st.universal_start_anchored = Some(sid); + } + } + } +} + +// A variety of generic internal methods for accessing DFA internals. +impl> DFA { + /// Return the info about special states. + pub(crate) fn special(&self) -> &Special { + &self.special + } + + /// Return the info about special states as a mutable borrow. + #[cfg(feature = "dfa-build")] + pub(crate) fn special_mut(&mut self) -> &mut Special { + &mut self.special + } + + /// Returns the quit set (may be empty) used by this DFA. + pub(crate) fn quitset(&self) -> &ByteSet { + &self.quitset + } + + /// Returns the flags for this DFA. + pub(crate) fn flags(&self) -> &Flags { + &self.flags + } + + /// Returns an iterator over all states in this DFA. + /// + /// This iterator yields a tuple for each state. The first element of the + /// tuple corresponds to a state's identifier, and the second element + /// corresponds to the state itself (comprised of its transitions). + pub(crate) fn states(&self) -> StateIter<'_, T> { + self.tt.states() + } + + /// Return the total number of states in this DFA. Every DFA has at least + /// 1 state, even the empty DFA. + pub(crate) fn state_len(&self) -> usize { + self.tt.len() + } + + /// Return an iterator over all pattern IDs for the given match state. + /// + /// If the given state is not a match state, then this panics. + #[cfg(feature = "dfa-build")] + pub(crate) fn pattern_id_slice(&self, id: StateID) -> &[PatternID] { + assert!(self.is_match_state(id)); + self.ms.pattern_id_slice(self.match_state_index(id)) + } + + /// Return the total number of pattern IDs for the given match state. + /// + /// If the given state is not a match state, then this panics. + pub(crate) fn match_pattern_len(&self, id: StateID) -> usize { + assert!(self.is_match_state(id)); + self.ms.pattern_len(self.match_state_index(id)) + } + + /// Returns the total number of patterns matched by this DFA. + pub(crate) fn pattern_len(&self) -> usize { + self.ms.pattern_len + } + + /// Returns a map from match state ID to a list of pattern IDs that match + /// in that state. + #[cfg(feature = "dfa-build")] + pub(crate) fn pattern_map(&self) -> BTreeMap> { + self.ms.to_map(self) + } + + /// Returns the ID of the quit state for this DFA. + #[cfg(feature = "dfa-build")] + pub(crate) fn quit_id(&self) -> StateID { + self.to_state_id(1) + } + + /// Convert the given state identifier to the state's index. The state's + /// index corresponds to the position in which it appears in the transition + /// table. When a DFA is NOT premultiplied, then a state's identifier is + /// also its index. When a DFA is premultiplied, then a state's identifier + /// is equal to `index * alphabet_len`. This routine reverses that. + pub(crate) fn to_index(&self, id: StateID) -> usize { + self.tt.to_index(id) + } + + /// Convert an index to a state (in the range 0..self.state_len()) to an + /// actual state identifier. + /// + /// This is useful when using a `Vec` as an efficient map keyed by state + /// to some other information (such as a remapped state ID). + #[cfg(feature = "dfa-build")] + pub(crate) fn to_state_id(&self, index: usize) -> StateID { + self.tt.to_state_id(index) + } + + /// Return the table of state IDs for this DFA's start states. + pub(crate) fn starts(&self) -> StartStateIter<'_> { + self.st.iter() + } + + /// Returns the index of the match state for the given ID. If the + /// given ID does not correspond to a match state, then this may + /// panic or produce an incorrect result. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn match_state_index(&self, id: StateID) -> usize { + debug_assert!(self.is_match_state(id)); + // This is one of the places where we rely on the fact that match + // states are contiguous in the transition table. Namely, that the + // first match state ID always corresponds to dfa.special.min_match. + // From there, since we know the stride, we can compute the overall + // index of any match state given the match state's ID. + let min = self.special().min_match.as_usize(); + // CORRECTNESS: We're allowed to produce an incorrect result or panic, + // so both the subtraction and the unchecked StateID construction is + // OK. + self.to_index(StateID::new_unchecked(id.as_usize() - min)) + } + + /// Returns the index of the accelerator state for the given ID. If the + /// given ID does not correspond to an accelerator state, then this may + /// panic or produce an incorrect result. + fn accelerator_index(&self, id: StateID) -> usize { + let min = self.special().min_accel.as_usize(); + // CORRECTNESS: We're allowed to produce an incorrect result or panic, + // so both the subtraction and the unchecked StateID construction is + // OK. + self.to_index(StateID::new_unchecked(id.as_usize() - min)) + } + + /// Return the accelerators for this DFA. + fn accels(&self) -> Accels<&[u32]> { + self.accels.as_ref() + } + + /// Return this DFA's transition table as a slice. + fn trans(&self) -> &[StateID] { + self.tt.table() + } +} + +impl> fmt::Debug for DFA { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "dense::DFA(")?; + for state in self.states() { + fmt_state_indicator(f, self, state.id())?; + let id = if f.alternate() { + state.id().as_usize() + } else { + self.to_index(state.id()) + }; + write!(f, "{id:06?}: ")?; + state.fmt(f)?; + write!(f, "\n")?; + } + writeln!(f, "")?; + for (i, (start_id, anchored, sty)) in self.starts().enumerate() { + let id = if f.alternate() { + start_id.as_usize() + } else { + self.to_index(start_id) + }; + if i % self.st.stride == 0 { + match anchored { + Anchored::No => writeln!(f, "START-GROUP(unanchored)")?, + Anchored::Yes => writeln!(f, "START-GROUP(anchored)")?, + Anchored::Pattern(pid) => { + writeln!(f, "START_GROUP(pattern: {pid:?})")? + } + } + } + writeln!(f, " {sty:?} => {id:06?}")?; + } + if self.pattern_len() > 1 { + writeln!(f, "")?; + for i in 0..self.ms.len() { + let id = self.ms.match_state_id(self, i); + let id = if f.alternate() { + id.as_usize() + } else { + self.to_index(id) + }; + write!(f, "MATCH({id:06?}): ")?; + for (i, &pid) in self.ms.pattern_id_slice(i).iter().enumerate() + { + if i > 0 { + write!(f, ", ")?; + } + write!(f, "{pid:?}")?; + } + writeln!(f, "")?; + } + } + writeln!(f, "state length: {:?}", self.state_len())?; + writeln!(f, "pattern length: {:?}", self.pattern_len())?; + writeln!(f, "flags: {:?}", self.flags)?; + writeln!(f, ")")?; + Ok(()) + } +} + +// SAFETY: We assert that our implementation of each method is correct. +unsafe impl> Automaton for DFA { + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_special_state(&self, id: StateID) -> bool { + self.special.is_special_state(id) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_dead_state(&self, id: StateID) -> bool { + self.special.is_dead_state(id) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_quit_state(&self, id: StateID) -> bool { + self.special.is_quit_state(id) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_match_state(&self, id: StateID) -> bool { + self.special.is_match_state(id) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_start_state(&self, id: StateID) -> bool { + self.special.is_start_state(id) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_accel_state(&self, id: StateID) -> bool { + self.special.is_accel_state(id) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn next_state(&self, current: StateID, input: u8) -> StateID { + let input = self.byte_classes().get(input); + let o = current.as_usize() + usize::from(input); + self.trans()[o] + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + unsafe fn next_state_unchecked( + &self, + current: StateID, + byte: u8, + ) -> StateID { + // We don't (or shouldn't) need an unchecked variant for the byte + // class mapping, since bound checks should be omitted automatically + // by virtue of its representation. If this ends up not being true as + // confirmed by codegen, please file an issue. ---AG + let class = self.byte_classes().get(byte); + let o = current.as_usize() + usize::from(class); + let next = *self.trans().get_unchecked(o); + next + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn next_eoi_state(&self, current: StateID) -> StateID { + let eoi = self.byte_classes().eoi().as_usize(); + let o = current.as_usize() + eoi; + self.trans()[o] + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn pattern_len(&self) -> usize { + self.ms.pattern_len + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn match_len(&self, id: StateID) -> usize { + self.match_pattern_len(id) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn match_pattern(&self, id: StateID, match_index: usize) -> PatternID { + // This is an optimization for the very common case of a DFA with a + // single pattern. This conditional avoids a somewhat more costly path + // that finds the pattern ID from the state machine, which requires + // a bit of slicing/pointer-chasing. This optimization tends to only + // matter when matches are frequent. + if self.ms.pattern_len == 1 { + return PatternID::ZERO; + } + let state_index = self.match_state_index(id); + self.ms.pattern_id(state_index, match_index) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn has_empty(&self) -> bool { + self.flags.has_empty + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_utf8(&self) -> bool { + self.flags.is_utf8 + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_always_start_anchored(&self) -> bool { + self.flags.is_always_start_anchored + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn start_state( + &self, + config: &start::Config, + ) -> Result { + let anchored = config.get_anchored(); + let start = match config.get_look_behind() { + None => Start::Text, + Some(byte) => { + if !self.quitset.is_empty() && self.quitset.contains(byte) { + return Err(StartError::quit(byte)); + } + self.st.start_map.get(byte) + } + }; + self.st.start(anchored, start) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn universal_start_state(&self, mode: Anchored) -> Option { + match mode { + Anchored::No => self.st.universal_start_unanchored, + Anchored::Yes => self.st.universal_start_anchored, + Anchored::Pattern(_) => None, + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn accelerator(&self, id: StateID) -> &[u8] { + if !self.is_accel_state(id) { + return &[]; + } + self.accels.needles(self.accelerator_index(id)) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn get_prefilter(&self) -> Option<&Prefilter> { + self.pre.as_ref() + } +} + +/// The transition table portion of a dense DFA. +/// +/// The transition table is the core part of the DFA in that it describes how +/// to move from one state to another based on the input sequence observed. +#[derive(Clone)] +pub(crate) struct TransitionTable { + /// A contiguous region of memory representing the transition table in + /// row-major order. The representation is dense. That is, every state + /// has precisely the same number of transitions. The maximum number of + /// transitions per state is 257 (256 for each possible byte value, plus 1 + /// for the special EOI transition). If a DFA has been instructed to use + /// byte classes (the default), then the number of transitions is usually + /// substantially fewer. + /// + /// In practice, T is either `Vec` or `&[u32]`. + table: T, + /// A set of equivalence classes, where a single equivalence class + /// represents a set of bytes that never discriminate between a match + /// and a non-match in the DFA. Each equivalence class corresponds to a + /// single character in this DFA's alphabet, where the maximum number of + /// characters is 257 (each possible value of a byte plus the special + /// EOI transition). Consequently, the number of equivalence classes + /// corresponds to the number of transitions for each DFA state. Note + /// though that the *space* used by each DFA state in the transition table + /// may be larger. The total space used by each DFA state is known as the + /// stride. + /// + /// The only time the number of equivalence classes is fewer than 257 is if + /// the DFA's kind uses byte classes (which is the default). Equivalence + /// classes should generally only be disabled when debugging, so that + /// the transitions themselves aren't obscured. Disabling them has no + /// other benefit, since the equivalence class map is always used while + /// searching. In the vast majority of cases, the number of equivalence + /// classes is substantially smaller than 257, particularly when large + /// Unicode classes aren't used. + classes: ByteClasses, + /// The stride of each DFA state, expressed as a power-of-two exponent. + /// + /// The stride of a DFA corresponds to the total amount of space used by + /// each DFA state in the transition table. This may be bigger than the + /// size of a DFA's alphabet, since the stride is always the smallest + /// power of two greater than or equal to the alphabet size. + /// + /// While this wastes space, this avoids the need for integer division + /// to convert between premultiplied state IDs and their corresponding + /// indices. Instead, we can use simple bit-shifts. + /// + /// See the docs for the `stride2` method for more details. + /// + /// The minimum `stride2` value is `1` (corresponding to a stride of `2`) + /// while the maximum `stride2` value is `9` (corresponding to a stride of + /// `512`). The maximum is not `8` since the maximum alphabet size is `257` + /// when accounting for the special EOI transition. However, an alphabet + /// length of that size is exceptionally rare since the alphabet is shrunk + /// into equivalence classes. + stride2: usize, +} + +impl<'a> TransitionTable<&'a [u32]> { + /// Deserialize a transition table starting at the beginning of `slice`. + /// Upon success, return the total number of bytes read along with the + /// transition table. + /// + /// If there was a problem deserializing any part of the transition table, + /// then this returns an error. Notably, if the given slice does not have + /// the same alignment as `StateID`, then this will return an error (among + /// other possible errors). + /// + /// This is guaranteed to execute in constant time. + /// + /// # Safety + /// + /// This routine is not safe because it does not check the validity of the + /// transition table itself. In particular, the transition table can be + /// quite large, so checking its validity can be somewhat expensive. An + /// invalid transition table is not safe because other code may rely on the + /// transition table being correct (such as explicit bounds check elision). + /// Therefore, an invalid transition table can lead to undefined behavior. + /// + /// Callers that use this function must either pass on the safety invariant + /// or guarantee that the bytes given contain a valid transition table. + /// This guarantee is upheld by the bytes written by `write_to`. + unsafe fn from_bytes_unchecked( + mut slice: &'a [u8], + ) -> Result<(TransitionTable<&'a [u32]>, usize), DeserializeError> { + let slice_start = slice.as_ptr().as_usize(); + + let (state_len, nr) = + wire::try_read_u32_as_usize(slice, "state length")?; + slice = &slice[nr..]; + + let (stride2, nr) = wire::try_read_u32_as_usize(slice, "stride2")?; + slice = &slice[nr..]; + + let (classes, nr) = ByteClasses::from_bytes(slice)?; + slice = &slice[nr..]; + + // The alphabet length (determined by the byte class map) cannot be + // bigger than the stride (total space used by each DFA state). + if stride2 > 9 { + return Err(DeserializeError::generic( + "dense DFA has invalid stride2 (too big)", + )); + } + // It also cannot be zero, since even a DFA that never matches anything + // has a non-zero number of states with at least two equivalence + // classes: one for all 256 byte values and another for the EOI + // sentinel. + if stride2 < 1 { + return Err(DeserializeError::generic( + "dense DFA has invalid stride2 (too small)", + )); + } + // This is OK since 1 <= stride2 <= 9. + let stride = + 1usize.checked_shl(u32::try_from(stride2).unwrap()).unwrap(); + if classes.alphabet_len() > stride { + return Err(DeserializeError::generic( + "alphabet size cannot be bigger than transition table stride", + )); + } + + let trans_len = + wire::shl(state_len, stride2, "dense table transition length")?; + let table_bytes_len = wire::mul( + trans_len, + StateID::SIZE, + "dense table state byte length", + )?; + wire::check_slice_len(slice, table_bytes_len, "transition table")?; + wire::check_alignment::(slice)?; + let table_bytes = &slice[..table_bytes_len]; + slice = &slice[table_bytes_len..]; + // SAFETY: Since StateID is always representable as a u32, all we need + // to do is ensure that we have the proper length and alignment. We've + // checked both above, so the cast below is safe. + // + // N.B. This is the only not-safe code in this function. + let table = core::slice::from_raw_parts( + table_bytes.as_ptr().cast::(), + trans_len, + ); + let tt = TransitionTable { table, classes, stride2 }; + Ok((tt, slice.as_ptr().as_usize() - slice_start)) + } +} + +#[cfg(feature = "dfa-build")] +impl TransitionTable> { + /// Create a minimal transition table with just two states: a dead state + /// and a quit state. The alphabet length and stride of the transition + /// table is determined by the given set of equivalence classes. + fn minimal(classes: ByteClasses) -> TransitionTable> { + let mut tt = TransitionTable { + table: vec![], + classes, + stride2: classes.stride2(), + }; + // Two states, regardless of alphabet size, can always fit into u32. + tt.add_empty_state().unwrap(); // dead state + tt.add_empty_state().unwrap(); // quit state + tt + } + + /// Set a transition in this table. Both the `from` and `to` states must + /// already exist, otherwise this panics. `unit` should correspond to the + /// transition out of `from` to set to `to`. + fn set(&mut self, from: StateID, unit: alphabet::Unit, to: StateID) { + assert!(self.is_valid(from), "invalid 'from' state"); + assert!(self.is_valid(to), "invalid 'to' state"); + self.table[from.as_usize() + self.classes.get_by_unit(unit)] = + to.as_u32(); + } + + /// Add an empty state (a state where all transitions lead to a dead state) + /// and return its identifier. The identifier returned is guaranteed to + /// not point to any other existing state. + /// + /// If adding a state would exhaust the state identifier space, then this + /// returns an error. + fn add_empty_state(&mut self) -> Result { + // Normally, to get a fresh state identifier, we would just + // take the index of the next state added to the transition + // table. However, we actually perform an optimization here + // that pre-multiplies state IDs by the stride, such that they + // point immediately at the beginning of their transitions in + // the transition table. This avoids an extra multiplication + // instruction for state lookup at search time. + // + // Premultiplied identifiers means that instead of your matching + // loop looking something like this: + // + // state = dfa.start + // for byte in haystack: + // next = dfa.transitions[state * stride + byte] + // if dfa.is_match(next): + // return true + // return false + // + // it can instead look like this: + // + // state = dfa.start + // for byte in haystack: + // next = dfa.transitions[state + byte] + // if dfa.is_match(next): + // return true + // return false + // + // In other words, we save a multiplication instruction in the + // critical path. This turns out to be a decent performance win. + // The cost of using premultiplied state ids is that they can + // require a bigger state id representation. (And they also make + // the code a bit more complex, especially during minimization and + // when reshuffling states, as one needs to convert back and forth + // between state IDs and state indices.) + // + // To do this, we simply take the index of the state into the + // entire transition table, rather than the index of the state + // itself. e.g., If the stride is 64, then the ID of the 3rd state + // is 192, not 2. + let next = self.table.len(); + let id = + StateID::new(next).map_err(|_| BuildError::too_many_states())?; + self.table.extend(iter::repeat(0).take(self.stride())); + Ok(id) + } + + /// Swap the two states given in this transition table. + /// + /// This routine does not do anything to check the correctness of this + /// swap. Callers must ensure that other states pointing to id1 and id2 are + /// updated appropriately. + /// + /// Both id1 and id2 must point to valid states, otherwise this panics. + fn swap(&mut self, id1: StateID, id2: StateID) { + assert!(self.is_valid(id1), "invalid 'id1' state: {id1:?}"); + assert!(self.is_valid(id2), "invalid 'id2' state: {id2:?}"); + // We only need to swap the parts of the state that are used. So if the + // stride is 64, but the alphabet length is only 33, then we save a lot + // of work. + for b in 0..self.classes.alphabet_len() { + self.table.swap(id1.as_usize() + b, id2.as_usize() + b); + } + } + + /// Remap the transitions for the state given according to the function + /// given. This applies the given map function to every transition in the + /// given state and changes the transition in place to the result of the + /// map function for that transition. + fn remap(&mut self, id: StateID, map: impl Fn(StateID) -> StateID) { + for byte in 0..self.alphabet_len() { + let i = id.as_usize() + byte; + let next = self.table()[i]; + self.table_mut()[id.as_usize() + byte] = map(next); + } + } + + /// Truncate the states in this transition table to the given length. + /// + /// This routine does not do anything to check the correctness of this + /// truncation. Callers must ensure that other states pointing to truncated + /// states are updated appropriately. + fn truncate(&mut self, len: usize) { + self.table.truncate(len << self.stride2); + } +} + +impl> TransitionTable { + /// Writes a serialized form of this transition table to the buffer given. + /// If the buffer is too small, then an error is returned. To determine + /// how big the buffer must be, use `write_to_len`. + fn write_to( + &self, + mut dst: &mut [u8], + ) -> Result { + let nwrite = self.write_to_len(); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small("transition table")); + } + dst = &mut dst[..nwrite]; + + // write state length + // Unwrap is OK since number of states is guaranteed to fit in a u32. + E::write_u32(u32::try_from(self.len()).unwrap(), dst); + dst = &mut dst[size_of::()..]; + + // write state stride (as power of 2) + // Unwrap is OK since stride2 is guaranteed to be <= 9. + E::write_u32(u32::try_from(self.stride2).unwrap(), dst); + dst = &mut dst[size_of::()..]; + + // write byte class map + let n = self.classes.write_to(dst)?; + dst = &mut dst[n..]; + + // write actual transitions + for &sid in self.table() { + let n = wire::write_state_id::(sid, &mut dst); + dst = &mut dst[n..]; + } + Ok(nwrite) + } + + /// Returns the number of bytes the serialized form of this transition + /// table will use. + fn write_to_len(&self) -> usize { + size_of::() // state length + + size_of::() // stride2 + + self.classes.write_to_len() + + (self.table().len() * StateID::SIZE) + } + + /// Validates that every state ID in this transition table is valid. + /// + /// That is, every state ID can be used to correctly index a state in this + /// table. + fn validate(&self, dfa: &DFA) -> Result<(), DeserializeError> { + let sp = &dfa.special; + for state in self.states() { + // We check that the ID itself is well formed. That is, if it's + // a special state then it must actually be a quit, dead, accel, + // match or start state. + if sp.is_special_state(state.id()) { + let is_actually_special = sp.is_dead_state(state.id()) + || sp.is_quit_state(state.id()) + || sp.is_match_state(state.id()) + || sp.is_start_state(state.id()) + || sp.is_accel_state(state.id()); + if !is_actually_special { + // This is kind of a cryptic error message... + return Err(DeserializeError::generic( + "found dense state tagged as special but \ + wasn't actually special", + )); + } + if sp.is_match_state(state.id()) + && dfa.match_len(state.id()) == 0 + { + return Err(DeserializeError::generic( + "found match state with zero pattern IDs", + )); + } + } + for (_, to) in state.transitions() { + if !self.is_valid(to) { + return Err(DeserializeError::generic( + "found invalid state ID in transition table", + )); + } + } + } + Ok(()) + } + + /// Converts this transition table to a borrowed value. + fn as_ref(&self) -> TransitionTable<&'_ [u32]> { + TransitionTable { + table: self.table.as_ref(), + classes: self.classes.clone(), + stride2: self.stride2, + } + } + + /// Converts this transition table to an owned value. + #[cfg(feature = "alloc")] + fn to_owned(&self) -> TransitionTable> { + TransitionTable { + table: self.table.as_ref().to_vec(), + classes: self.classes.clone(), + stride2: self.stride2, + } + } + + /// Return the state for the given ID. If the given ID is not valid, then + /// this panics. + fn state(&self, id: StateID) -> State<'_> { + assert!(self.is_valid(id)); + + let i = id.as_usize(); + State { + id, + stride2: self.stride2, + transitions: &self.table()[i..i + self.alphabet_len()], + } + } + + /// Returns an iterator over all states in this transition table. + /// + /// This iterator yields a tuple for each state. The first element of the + /// tuple corresponds to a state's identifier, and the second element + /// corresponds to the state itself (comprised of its transitions). + fn states(&self) -> StateIter<'_, T> { + StateIter { + tt: self, + it: self.table().chunks(self.stride()).enumerate(), + } + } + + /// Convert a state identifier to an index to a state (in the range + /// 0..self.len()). + /// + /// This is useful when using a `Vec` as an efficient map keyed by state + /// to some other information (such as a remapped state ID). + /// + /// If the given ID is not valid, then this may panic or produce an + /// incorrect index. + fn to_index(&self, id: StateID) -> usize { + id.as_usize() >> self.stride2 + } + + /// Convert an index to a state (in the range 0..self.len()) to an actual + /// state identifier. + /// + /// This is useful when using a `Vec` as an efficient map keyed by state + /// to some other information (such as a remapped state ID). + /// + /// If the given index is not in the specified range, then this may panic + /// or produce an incorrect state ID. + fn to_state_id(&self, index: usize) -> StateID { + // CORRECTNESS: If the given index is not valid, then it is not + // required for this to panic or return a valid state ID. + StateID::new_unchecked(index << self.stride2) + } + + /// Returns the state ID for the state immediately following the one given. + /// + /// This does not check whether the state ID returned is invalid. In fact, + /// if the state ID given is the last state in this DFA, then the state ID + /// returned is guaranteed to be invalid. + #[cfg(feature = "dfa-build")] + fn next_state_id(&self, id: StateID) -> StateID { + self.to_state_id(self.to_index(id).checked_add(1).unwrap()) + } + + /// Returns the state ID for the state immediately preceding the one given. + /// + /// If the dead ID given (which is zero), then this panics. + #[cfg(feature = "dfa-build")] + fn prev_state_id(&self, id: StateID) -> StateID { + self.to_state_id(self.to_index(id).checked_sub(1).unwrap()) + } + + /// Returns the table as a slice of state IDs. + fn table(&self) -> &[StateID] { + wire::u32s_to_state_ids(self.table.as_ref()) + } + + /// Returns the total number of states in this transition table. + /// + /// Note that a DFA always has at least two states: the dead and quit + /// states. In particular, the dead state always has ID 0 and is + /// correspondingly always the first state. The dead state is never a match + /// state. + fn len(&self) -> usize { + self.table().len() >> self.stride2 + } + + /// Returns the total stride for every state in this DFA. This corresponds + /// to the total number of transitions used by each state in this DFA's + /// transition table. + fn stride(&self) -> usize { + 1 << self.stride2 + } + + /// Returns the total number of elements in the alphabet for this + /// transition table. This is always less than or equal to `self.stride()`. + /// It is only equal when the alphabet length is a power of 2. Otherwise, + /// it is always strictly less. + fn alphabet_len(&self) -> usize { + self.classes.alphabet_len() + } + + /// Returns true if and only if the given state ID is valid for this + /// transition table. Validity in this context means that the given ID can + /// be used as a valid offset with `self.stride()` to index this transition + /// table. + fn is_valid(&self, id: StateID) -> bool { + let id = id.as_usize(); + id < self.table().len() && id % self.stride() == 0 + } + + /// Return the memory usage, in bytes, of this transition table. + /// + /// This does not include the size of a `TransitionTable` value itself. + fn memory_usage(&self) -> usize { + self.table().len() * StateID::SIZE + } +} + +#[cfg(feature = "dfa-build")] +impl> TransitionTable { + /// Returns the table as a slice of state IDs. + fn table_mut(&mut self) -> &mut [StateID] { + wire::u32s_to_state_ids_mut(self.table.as_mut()) + } +} + +/// The set of all possible starting states in a DFA. +/// +/// The set of starting states corresponds to the possible choices one can make +/// in terms of starting a DFA. That is, before following the first transition, +/// you first need to select the state that you start in. +/// +/// Normally, a DFA converted from an NFA that has a single starting state +/// would itself just have one starting state. However, our support for look +/// around generally requires more starting states. The correct starting state +/// is chosen based on certain properties of the position at which we begin +/// our search. +/// +/// Before listing those properties, we first must define two terms: +/// +/// * `haystack` - The bytes to execute the search. The search always starts +/// at the beginning of `haystack` and ends before or at the end of +/// `haystack`. +/// * `context` - The (possibly empty) bytes surrounding `haystack`. `haystack` +/// must be contained within `context` such that `context` is at least as big +/// as `haystack`. +/// +/// This split is crucial for dealing with look-around. For example, consider +/// the context `foobarbaz`, the haystack `bar` and the regex `^bar$`. This +/// regex should _not_ match the haystack since `bar` does not appear at the +/// beginning of the input. Similarly, the regex `\Bbar\B` should match the +/// haystack because `bar` is not surrounded by word boundaries. But a search +/// that does not take context into account would not permit `\B` to match +/// since the beginning of any string matches a word boundary. Similarly, a +/// search that does not take context into account when searching `^bar$` in +/// the haystack `bar` would produce a match when it shouldn't. +/// +/// Thus, it follows that the starting state is chosen based on the following +/// criteria, derived from the position at which the search starts in the +/// `context` (corresponding to the start of `haystack`): +/// +/// 1. If the search starts at the beginning of `context`, then the `Text` +/// start state is used. (Since `^` corresponds to +/// `hir::Anchor::Start`.) +/// 2. If the search starts at a position immediately following a line +/// terminator, then the `Line` start state is used. (Since `(?m:^)` +/// corresponds to `hir::Anchor::StartLF`.) +/// 3. If the search starts at a position immediately following a byte +/// classified as a "word" character (`[_0-9a-zA-Z]`), then the `WordByte` +/// start state is used. (Since `(?-u:\b)` corresponds to a word boundary.) +/// 4. Otherwise, if the search starts at a position immediately following +/// a byte that is not classified as a "word" character (`[^_0-9a-zA-Z]`), +/// then the `NonWordByte` start state is used. (Since `(?-u:\B)` +/// corresponds to a not-word-boundary.) +/// +/// (N.B. Unicode word boundaries are not supported by the DFA because they +/// require multi-byte look-around and this is difficult to support in a DFA.) +/// +/// To further complicate things, we also support constructing individual +/// anchored start states for each pattern in the DFA. (Which is required to +/// implement overlapping regexes correctly, but is also generally useful.) +/// Thus, when individual start states for each pattern are enabled, then the +/// total number of start states represented is `4 + (4 * #patterns)`, where +/// the 4 comes from each of the 4 possibilities above. The first 4 represents +/// the starting states for the entire DFA, which support searching for +/// multiple patterns simultaneously (possibly unanchored). +/// +/// If individual start states are disabled, then this will only store 4 +/// start states. Typically, individual start states are only enabled when +/// constructing the reverse DFA for regex matching. But they are also useful +/// for building DFAs that can search for a specific pattern or even to support +/// both anchored and unanchored searches with the same DFA. +/// +/// Note though that while the start table always has either `4` or +/// `4 + (4 * #patterns)` starting state *ids*, the total number of states +/// might be considerably smaller. That is, many of the IDs may be duplicative. +/// (For example, if a regex doesn't have a `\b` sub-pattern, then there's no +/// reason to generate a unique starting state for handling word boundaries. +/// Similarly for start/end anchors.) +#[derive(Clone)] +pub(crate) struct StartTable { + /// The initial start state IDs. + /// + /// In practice, T is either `Vec` or `&[u32]`. + /// + /// The first `2 * stride` (currently always 8) entries always correspond + /// to the starts states for the entire DFA, with the first 4 entries being + /// for unanchored searches and the second 4 entries being for anchored + /// searches. To keep things simple, we always use 8 entries even if the + /// `StartKind` is not both. + /// + /// After that, there are `stride * patterns` state IDs, where `patterns` + /// may be zero in the case of a DFA with no patterns or in the case where + /// the DFA was built without enabling starting states for each pattern. + table: T, + /// The starting state configuration supported. When 'both', both + /// unanchored and anchored searches work. When 'unanchored', anchored + /// searches panic. When 'anchored', unanchored searches panic. + kind: StartKind, + /// The start state configuration for every possible byte. + start_map: StartByteMap, + /// The number of starting state IDs per pattern. + stride: usize, + /// The total number of patterns for which starting states are encoded. + /// This is `None` for DFAs that were built without start states for each + /// pattern. Thus, one cannot use this field to say how many patterns + /// are in the DFA in all cases. It is specific to how many patterns are + /// represented in this start table. + pattern_len: Option, + /// The universal starting state for unanchored searches. This is only + /// present when the DFA supports unanchored searches and when all starting + /// state IDs for an unanchored search are equivalent. + universal_start_unanchored: Option, + /// The universal starting state for anchored searches. This is only + /// present when the DFA supports anchored searches and when all starting + /// state IDs for an anchored search are equivalent. + universal_start_anchored: Option, +} + +#[cfg(feature = "dfa-build")] +impl StartTable> { + /// Create a valid set of start states all pointing to the dead state. + /// + /// When the corresponding DFA is constructed with start states for each + /// pattern, then `patterns` should be the number of patterns. Otherwise, + /// it should be zero. + /// + /// If the total table size could exceed the allocatable limit, then this + /// returns an error. In practice, this is unlikely to be able to occur, + /// since it's likely that allocation would have failed long before it got + /// to this point. + fn dead( + kind: StartKind, + lookm: &LookMatcher, + pattern_len: Option, + ) -> Result>, BuildError> { + if let Some(len) = pattern_len { + assert!(len <= PatternID::LIMIT); + } + let stride = Start::len(); + // OK because 2*4 is never going to overflow anything. + let starts_len = stride.checked_mul(2).unwrap(); + let pattern_starts_len = + match stride.checked_mul(pattern_len.unwrap_or(0)) { + Some(x) => x, + None => return Err(BuildError::too_many_start_states()), + }; + let table_len = match starts_len.checked_add(pattern_starts_len) { + Some(x) => x, + None => return Err(BuildError::too_many_start_states()), + }; + if let Err(_) = isize::try_from(table_len) { + return Err(BuildError::too_many_start_states()); + } + let table = vec![DEAD.as_u32(); table_len]; + let start_map = StartByteMap::new(lookm); + Ok(StartTable { + table, + kind, + start_map, + stride, + pattern_len, + universal_start_unanchored: None, + universal_start_anchored: None, + }) + } +} + +impl<'a> StartTable<&'a [u32]> { + /// Deserialize a table of start state IDs starting at the beginning of + /// `slice`. Upon success, return the total number of bytes read along with + /// the table of starting state IDs. + /// + /// If there was a problem deserializing any part of the starting IDs, + /// then this returns an error. Notably, if the given slice does not have + /// the same alignment as `StateID`, then this will return an error (among + /// other possible errors). + /// + /// This is guaranteed to execute in constant time. + /// + /// # Safety + /// + /// This routine is not safe because it does not check the validity of the + /// starting state IDs themselves. In particular, the number of starting + /// IDs can be of variable length, so it's possible that checking their + /// validity cannot be done in constant time. An invalid starting state + /// ID is not safe because other code may rely on the starting IDs being + /// correct (such as explicit bounds check elision). Therefore, an invalid + /// start ID can lead to undefined behavior. + /// + /// Callers that use this function must either pass on the safety invariant + /// or guarantee that the bytes given contain valid starting state IDs. + /// This guarantee is upheld by the bytes written by `write_to`. + unsafe fn from_bytes_unchecked( + mut slice: &'a [u8], + ) -> Result<(StartTable<&'a [u32]>, usize), DeserializeError> { + let slice_start = slice.as_ptr().as_usize(); + + let (kind, nr) = StartKind::from_bytes(slice)?; + slice = &slice[nr..]; + + let (start_map, nr) = StartByteMap::from_bytes(slice)?; + slice = &slice[nr..]; + + let (stride, nr) = + wire::try_read_u32_as_usize(slice, "start table stride")?; + slice = &slice[nr..]; + if stride != Start::len() { + return Err(DeserializeError::generic( + "invalid starting table stride", + )); + } + + let (maybe_pattern_len, nr) = + wire::try_read_u32_as_usize(slice, "start table patterns")?; + slice = &slice[nr..]; + let pattern_len = if maybe_pattern_len.as_u32() == u32::MAX { + None + } else { + Some(maybe_pattern_len) + }; + if pattern_len.map_or(false, |len| len > PatternID::LIMIT) { + return Err(DeserializeError::generic( + "invalid number of patterns", + )); + } + + let (universal_unanchored, nr) = + wire::try_read_u32(slice, "universal unanchored start")?; + slice = &slice[nr..]; + let universal_start_unanchored = if universal_unanchored == u32::MAX { + None + } else { + Some(StateID::try_from(universal_unanchored).map_err(|e| { + DeserializeError::state_id_error( + e, + "universal unanchored start", + ) + })?) + }; + + let (universal_anchored, nr) = + wire::try_read_u32(slice, "universal anchored start")?; + slice = &slice[nr..]; + let universal_start_anchored = if universal_anchored == u32::MAX { + None + } else { + Some(StateID::try_from(universal_anchored).map_err(|e| { + DeserializeError::state_id_error(e, "universal anchored start") + })?) + }; + + let pattern_table_size = wire::mul( + stride, + pattern_len.unwrap_or(0), + "invalid pattern length", + )?; + // Our start states always start with a two stride of start states for + // the entire automaton. The first stride is for unanchored starting + // states and the second stride is for anchored starting states. What + // follows it are an optional set of start states for each pattern. + let start_state_len = wire::add( + wire::mul(2, stride, "start state stride too big")?, + pattern_table_size, + "invalid 'any' pattern starts size", + )?; + let table_bytes_len = wire::mul( + start_state_len, + StateID::SIZE, + "pattern table bytes length", + )?; + wire::check_slice_len(slice, table_bytes_len, "start ID table")?; + wire::check_alignment::(slice)?; + let table_bytes = &slice[..table_bytes_len]; + slice = &slice[table_bytes_len..]; + // SAFETY: Since StateID is always representable as a u32, all we need + // to do is ensure that we have the proper length and alignment. We've + // checked both above, so the cast below is safe. + // + // N.B. This is the only not-safe code in this function. + let table = core::slice::from_raw_parts( + table_bytes.as_ptr().cast::(), + start_state_len, + ); + let st = StartTable { + table, + kind, + start_map, + stride, + pattern_len, + universal_start_unanchored, + universal_start_anchored, + }; + Ok((st, slice.as_ptr().as_usize() - slice_start)) + } +} + +impl> StartTable { + /// Writes a serialized form of this start table to the buffer given. If + /// the buffer is too small, then an error is returned. To determine how + /// big the buffer must be, use `write_to_len`. + fn write_to( + &self, + mut dst: &mut [u8], + ) -> Result { + let nwrite = self.write_to_len(); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small( + "starting table ids", + )); + } + dst = &mut dst[..nwrite]; + + // write start kind + let nw = self.kind.write_to::(dst)?; + dst = &mut dst[nw..]; + // write start byte map + let nw = self.start_map.write_to(dst)?; + dst = &mut dst[nw..]; + // write stride + // Unwrap is OK since the stride is always 4 (currently). + E::write_u32(u32::try_from(self.stride).unwrap(), dst); + dst = &mut dst[size_of::()..]; + // write pattern length + // Unwrap is OK since number of patterns is guaranteed to fit in a u32. + E::write_u32( + u32::try_from(self.pattern_len.unwrap_or(0xFFFF_FFFF)).unwrap(), + dst, + ); + dst = &mut dst[size_of::()..]; + // write universal start unanchored state id, u32::MAX if absent + E::write_u32( + self.universal_start_unanchored + .map_or(u32::MAX, |sid| sid.as_u32()), + dst, + ); + dst = &mut dst[size_of::()..]; + // write universal start anchored state id, u32::MAX if absent + E::write_u32( + self.universal_start_anchored.map_or(u32::MAX, |sid| sid.as_u32()), + dst, + ); + dst = &mut dst[size_of::()..]; + // write start IDs + for &sid in self.table() { + let n = wire::write_state_id::(sid, &mut dst); + dst = &mut dst[n..]; + } + Ok(nwrite) + } + + /// Returns the number of bytes the serialized form of this start ID table + /// will use. + fn write_to_len(&self) -> usize { + self.kind.write_to_len() + + self.start_map.write_to_len() + + size_of::() // stride + + size_of::() // # patterns + + size_of::() // universal unanchored start + + size_of::() // universal anchored start + + (self.table().len() * StateID::SIZE) + } + + /// Validates that every state ID in this start table is valid by checking + /// it against the given transition table (which must be for the same DFA). + /// + /// That is, every state ID can be used to correctly index a state. + fn validate(&self, dfa: &DFA) -> Result<(), DeserializeError> { + let tt = &dfa.tt; + if !self.universal_start_unanchored.map_or(true, |s| tt.is_valid(s)) { + return Err(DeserializeError::generic( + "found invalid universal unanchored starting state ID", + )); + } + if !self.universal_start_anchored.map_or(true, |s| tt.is_valid(s)) { + return Err(DeserializeError::generic( + "found invalid universal anchored starting state ID", + )); + } + for &id in self.table() { + if !tt.is_valid(id) { + return Err(DeserializeError::generic( + "found invalid starting state ID", + )); + } + } + Ok(()) + } + + /// Converts this start list to a borrowed value. + fn as_ref(&self) -> StartTable<&'_ [u32]> { + StartTable { + table: self.table.as_ref(), + kind: self.kind, + start_map: self.start_map.clone(), + stride: self.stride, + pattern_len: self.pattern_len, + universal_start_unanchored: self.universal_start_unanchored, + universal_start_anchored: self.universal_start_anchored, + } + } + + /// Converts this start list to an owned value. + #[cfg(feature = "alloc")] + fn to_owned(&self) -> StartTable> { + StartTable { + table: self.table.as_ref().to_vec(), + kind: self.kind, + start_map: self.start_map.clone(), + stride: self.stride, + pattern_len: self.pattern_len, + universal_start_unanchored: self.universal_start_unanchored, + universal_start_anchored: self.universal_start_anchored, + } + } + + /// Return the start state for the given input and starting configuration. + /// This returns an error if the input configuration is not supported by + /// this DFA. For example, requesting an unanchored search when the DFA was + /// not built with unanchored starting states. Or asking for an anchored + /// pattern search with an invalid pattern ID or on a DFA that was not + /// built with start states for each pattern. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn start( + &self, + anchored: Anchored, + start: Start, + ) -> Result { + let start_index = start.as_usize(); + let index = match anchored { + Anchored::No => { + if !self.kind.has_unanchored() { + return Err(StartError::unsupported_anchored(anchored)); + } + start_index + } + Anchored::Yes => { + if !self.kind.has_anchored() { + return Err(StartError::unsupported_anchored(anchored)); + } + self.stride + start_index + } + Anchored::Pattern(pid) => { + let len = match self.pattern_len { + None => { + return Err(StartError::unsupported_anchored(anchored)) + } + Some(len) => len, + }; + if pid.as_usize() >= len { + return Ok(DEAD); + } + (2 * self.stride) + + (self.stride * pid.as_usize()) + + start_index + } + }; + Ok(self.table()[index]) + } + + /// Returns an iterator over all start state IDs in this table. + /// + /// Each item is a triple of: start state ID, the start state type and the + /// pattern ID (if any). + fn iter(&self) -> StartStateIter<'_> { + StartStateIter { st: self.as_ref(), i: 0 } + } + + /// Returns the table as a slice of state IDs. + fn table(&self) -> &[StateID] { + wire::u32s_to_state_ids(self.table.as_ref()) + } + + /// Return the memory usage, in bytes, of this start list. + /// + /// This does not include the size of a `StartList` value itself. + fn memory_usage(&self) -> usize { + self.table().len() * StateID::SIZE + } +} + +#[cfg(feature = "dfa-build")] +impl> StartTable { + /// Set the start state for the given index and pattern. + /// + /// If the pattern ID or state ID are not valid, then this will panic. + fn set_start(&mut self, anchored: Anchored, start: Start, id: StateID) { + let start_index = start.as_usize(); + let index = match anchored { + Anchored::No => start_index, + Anchored::Yes => self.stride + start_index, + Anchored::Pattern(pid) => { + let pid = pid.as_usize(); + let len = self + .pattern_len + .expect("start states for each pattern enabled"); + assert!(pid < len, "invalid pattern ID {pid:?}"); + self.stride + .checked_mul(pid) + .unwrap() + .checked_add(self.stride.checked_mul(2).unwrap()) + .unwrap() + .checked_add(start_index) + .unwrap() + } + }; + self.table_mut()[index] = id; + } + + /// Returns the table as a mutable slice of state IDs. + fn table_mut(&mut self) -> &mut [StateID] { + wire::u32s_to_state_ids_mut(self.table.as_mut()) + } +} + +/// An iterator over start state IDs. +/// +/// This iterator yields a triple of start state ID, the anchored mode and the +/// start state type. If a pattern ID is relevant, then the anchored mode will +/// contain it. Start states with an anchored mode containing a pattern ID will +/// only occur when the DFA was compiled with start states for each pattern +/// (which is disabled by default). +pub(crate) struct StartStateIter<'a> { + st: StartTable<&'a [u32]>, + i: usize, +} + +impl<'a> Iterator for StartStateIter<'a> { + type Item = (StateID, Anchored, Start); + + fn next(&mut self) -> Option<(StateID, Anchored, Start)> { + let i = self.i; + let table = self.st.table(); + if i >= table.len() { + return None; + } + self.i += 1; + + // This unwrap is okay since the stride of the starting state table + // must always match the number of start state types. + let start_type = Start::from_usize(i % self.st.stride).unwrap(); + let anchored = if i < self.st.stride { + Anchored::No + } else if i < (2 * self.st.stride) { + Anchored::Yes + } else { + let pid = (i - (2 * self.st.stride)) / self.st.stride; + Anchored::Pattern(PatternID::new(pid).unwrap()) + }; + Some((table[i], anchored, start_type)) + } +} + +/// This type represents that patterns that should be reported whenever a DFA +/// enters a match state. This structure exists to support DFAs that search for +/// matches for multiple regexes. +/// +/// This structure relies on the fact that all match states in a DFA occur +/// contiguously in the DFA's transition table. (See dfa/special.rs for a more +/// detailed breakdown of the representation.) Namely, when a match occurs, we +/// know its state ID. Since we know the start and end of the contiguous region +/// of match states, we can use that to compute the position at which the match +/// state occurs. That in turn is used as an offset into this structure. +#[derive(Clone, Debug)] +struct MatchStates { + /// Slices is a flattened sequence of pairs, where each pair points to a + /// sub-slice of pattern_ids. The first element of the pair is an offset + /// into pattern_ids and the second element of the pair is the number + /// of 32-bit pattern IDs starting at that position. That is, each pair + /// corresponds to a single DFA match state and its corresponding match + /// IDs. The number of pairs always corresponds to the number of distinct + /// DFA match states. + /// + /// In practice, T is either Vec or &[u32]. + slices: T, + /// A flattened sequence of pattern IDs for each DFA match state. The only + /// way to correctly read this sequence is indirectly via `slices`. + /// + /// In practice, T is either Vec or &[u32]. + pattern_ids: T, + /// The total number of unique patterns represented by these match states. + pattern_len: usize, +} + +impl<'a> MatchStates<&'a [u32]> { + unsafe fn from_bytes_unchecked( + mut slice: &'a [u8], + ) -> Result<(MatchStates<&'a [u32]>, usize), DeserializeError> { + let slice_start = slice.as_ptr().as_usize(); + + // Read the total number of match states. + let (state_len, nr) = + wire::try_read_u32_as_usize(slice, "match state length")?; + slice = &slice[nr..]; + + // Read the slice start/length pairs. + let pair_len = wire::mul(2, state_len, "match state offset pairs")?; + let slices_bytes_len = wire::mul( + pair_len, + PatternID::SIZE, + "match state slice offset byte length", + )?; + wire::check_slice_len(slice, slices_bytes_len, "match state slices")?; + wire::check_alignment::(slice)?; + let slices_bytes = &slice[..slices_bytes_len]; + slice = &slice[slices_bytes_len..]; + // SAFETY: Since PatternID is always representable as a u32, all we + // need to do is ensure that we have the proper length and alignment. + // We've checked both above, so the cast below is safe. + // + // N.B. This is one of the few not-safe snippets in this function, + // so we mark it explicitly to call it out. + let slices = core::slice::from_raw_parts( + slices_bytes.as_ptr().cast::(), + pair_len, + ); + + // Read the total number of unique pattern IDs (which is always 1 more + // than the maximum pattern ID in this automaton, since pattern IDs are + // handed out contiguously starting at 0). + let (pattern_len, nr) = + wire::try_read_u32_as_usize(slice, "pattern length")?; + slice = &slice[nr..]; + + // Now read the pattern ID length. We don't need to store this + // explicitly, but we need it to know how many pattern IDs to read. + let (idlen, nr) = + wire::try_read_u32_as_usize(slice, "pattern ID length")?; + slice = &slice[nr..]; + + // Read the actual pattern IDs. + let pattern_ids_len = + wire::mul(idlen, PatternID::SIZE, "pattern ID byte length")?; + wire::check_slice_len(slice, pattern_ids_len, "match pattern IDs")?; + wire::check_alignment::(slice)?; + let pattern_ids_bytes = &slice[..pattern_ids_len]; + slice = &slice[pattern_ids_len..]; + // SAFETY: Since PatternID is always representable as a u32, all we + // need to do is ensure that we have the proper length and alignment. + // We've checked both above, so the cast below is safe. + // + // N.B. This is one of the few not-safe snippets in this function, + // so we mark it explicitly to call it out. + let pattern_ids = core::slice::from_raw_parts( + pattern_ids_bytes.as_ptr().cast::(), + idlen, + ); + + let ms = MatchStates { slices, pattern_ids, pattern_len }; + Ok((ms, slice.as_ptr().as_usize() - slice_start)) + } +} + +#[cfg(feature = "dfa-build")] +impl MatchStates> { + fn empty(pattern_len: usize) -> MatchStates> { + assert!(pattern_len <= PatternID::LIMIT); + MatchStates { slices: vec![], pattern_ids: vec![], pattern_len } + } + + fn new( + matches: &BTreeMap>, + pattern_len: usize, + ) -> Result>, BuildError> { + let mut m = MatchStates::empty(pattern_len); + for (_, pids) in matches.iter() { + let start = PatternID::new(m.pattern_ids.len()) + .map_err(|_| BuildError::too_many_match_pattern_ids())?; + m.slices.push(start.as_u32()); + // This is always correct since the number of patterns in a single + // match state can never exceed maximum number of allowable + // patterns. Why? Because a pattern can only appear once in a + // particular match state, by construction. (And since our pattern + // ID limit is one less than u32::MAX, we're guaranteed that the + // length fits in a u32.) + m.slices.push(u32::try_from(pids.len()).unwrap()); + for &pid in pids { + m.pattern_ids.push(pid.as_u32()); + } + } + m.pattern_len = pattern_len; + Ok(m) + } + + fn new_with_map( + &self, + matches: &BTreeMap>, + ) -> Result>, BuildError> { + MatchStates::new(matches, self.pattern_len) + } +} + +impl> MatchStates { + /// Writes a serialized form of these match states to the buffer given. If + /// the buffer is too small, then an error is returned. To determine how + /// big the buffer must be, use `write_to_len`. + fn write_to( + &self, + mut dst: &mut [u8], + ) -> Result { + let nwrite = self.write_to_len(); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small("match states")); + } + dst = &mut dst[..nwrite]; + + // write state ID length + // Unwrap is OK since number of states is guaranteed to fit in a u32. + E::write_u32(u32::try_from(self.len()).unwrap(), dst); + dst = &mut dst[size_of::()..]; + + // write slice offset pairs + for &pid in self.slices() { + let n = wire::write_pattern_id::(pid, &mut dst); + dst = &mut dst[n..]; + } + + // write unique pattern ID length + // Unwrap is OK since number of patterns is guaranteed to fit in a u32. + E::write_u32(u32::try_from(self.pattern_len).unwrap(), dst); + dst = &mut dst[size_of::()..]; + + // write pattern ID length + // Unwrap is OK since we check at construction (and deserialization) + // that the number of patterns is representable as a u32. + E::write_u32(u32::try_from(self.pattern_ids().len()).unwrap(), dst); + dst = &mut dst[size_of::()..]; + + // write pattern IDs + for &pid in self.pattern_ids() { + let n = wire::write_pattern_id::(pid, &mut dst); + dst = &mut dst[n..]; + } + + Ok(nwrite) + } + + /// Returns the number of bytes the serialized form of these match states + /// will use. + fn write_to_len(&self) -> usize { + size_of::() // match state length + + (self.slices().len() * PatternID::SIZE) + + size_of::() // unique pattern ID length + + size_of::() // pattern ID length + + (self.pattern_ids().len() * PatternID::SIZE) + } + + /// Validates that the match state info is itself internally consistent and + /// consistent with the recorded match state region in the given DFA. + fn validate(&self, dfa: &DFA) -> Result<(), DeserializeError> { + if self.len() != dfa.special.match_len(dfa.stride()) { + return Err(DeserializeError::generic( + "match state length mismatch", + )); + } + for si in 0..self.len() { + let start = self.slices()[si * 2].as_usize(); + let len = self.slices()[si * 2 + 1].as_usize(); + if start >= self.pattern_ids().len() { + return Err(DeserializeError::generic( + "invalid pattern ID start offset", + )); + } + if start + len > self.pattern_ids().len() { + return Err(DeserializeError::generic( + "invalid pattern ID length", + )); + } + for mi in 0..len { + let pid = self.pattern_id(si, mi); + if pid.as_usize() >= self.pattern_len { + return Err(DeserializeError::generic( + "invalid pattern ID", + )); + } + } + } + Ok(()) + } + + /// Converts these match states back into their map form. This is useful + /// when shuffling states, as the normal MatchStates representation is not + /// amenable to easy state swapping. But with this map, to swap id1 and + /// id2, all you need to do is: + /// + /// if let Some(pids) = map.remove(&id1) { + /// map.insert(id2, pids); + /// } + /// + /// Once shuffling is done, use MatchStates::new to convert back. + #[cfg(feature = "dfa-build")] + fn to_map(&self, dfa: &DFA) -> BTreeMap> { + let mut map = BTreeMap::new(); + for i in 0..self.len() { + let mut pids = vec![]; + for j in 0..self.pattern_len(i) { + pids.push(self.pattern_id(i, j)); + } + map.insert(self.match_state_id(dfa, i), pids); + } + map + } + + /// Converts these match states to a borrowed value. + fn as_ref(&self) -> MatchStates<&'_ [u32]> { + MatchStates { + slices: self.slices.as_ref(), + pattern_ids: self.pattern_ids.as_ref(), + pattern_len: self.pattern_len, + } + } + + /// Converts these match states to an owned value. + #[cfg(feature = "alloc")] + fn to_owned(&self) -> MatchStates> { + MatchStates { + slices: self.slices.as_ref().to_vec(), + pattern_ids: self.pattern_ids.as_ref().to_vec(), + pattern_len: self.pattern_len, + } + } + + /// Returns the match state ID given the match state index. (Where the + /// first match state corresponds to index 0.) + /// + /// This panics if there is no match state at the given index. + fn match_state_id(&self, dfa: &DFA, index: usize) -> StateID { + assert!(dfa.special.matches(), "no match states to index"); + // This is one of the places where we rely on the fact that match + // states are contiguous in the transition table. Namely, that the + // first match state ID always corresponds to dfa.special.min_start. + // From there, since we know the stride, we can compute the ID of any + // match state given its index. + let stride2 = u32::try_from(dfa.stride2()).unwrap(); + let offset = index.checked_shl(stride2).unwrap(); + let id = dfa.special.min_match.as_usize().checked_add(offset).unwrap(); + let sid = StateID::new(id).unwrap(); + assert!(dfa.is_match_state(sid)); + sid + } + + /// Returns the pattern ID at the given match index for the given match + /// state. + /// + /// The match state index is the state index minus the state index of the + /// first match state in the DFA. + /// + /// The match index is the index of the pattern ID for the given state. + /// The index must be less than `self.pattern_len(state_index)`. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn pattern_id(&self, state_index: usize, match_index: usize) -> PatternID { + self.pattern_id_slice(state_index)[match_index] + } + + /// Returns the number of patterns in the given match state. + /// + /// The match state index is the state index minus the state index of the + /// first match state in the DFA. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn pattern_len(&self, state_index: usize) -> usize { + self.slices()[state_index * 2 + 1].as_usize() + } + + /// Returns all of the pattern IDs for the given match state index. + /// + /// The match state index is the state index minus the state index of the + /// first match state in the DFA. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn pattern_id_slice(&self, state_index: usize) -> &[PatternID] { + let start = self.slices()[state_index * 2].as_usize(); + let len = self.pattern_len(state_index); + &self.pattern_ids()[start..start + len] + } + + /// Returns the pattern ID offset slice of u32 as a slice of PatternID. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn slices(&self) -> &[PatternID] { + wire::u32s_to_pattern_ids(self.slices.as_ref()) + } + + /// Returns the total number of match states. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn len(&self) -> usize { + assert_eq!(0, self.slices().len() % 2); + self.slices().len() / 2 + } + + /// Returns the pattern ID slice of u32 as a slice of PatternID. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn pattern_ids(&self) -> &[PatternID] { + wire::u32s_to_pattern_ids(self.pattern_ids.as_ref()) + } + + /// Return the memory usage, in bytes, of these match pairs. + fn memory_usage(&self) -> usize { + (self.slices().len() + self.pattern_ids().len()) * PatternID::SIZE + } +} + +/// A common set of flags for both dense and sparse DFAs. This primarily +/// centralizes the serialization format of these flags at a bitset. +#[derive(Clone, Copy, Debug)] +pub(crate) struct Flags { + /// Whether the DFA can match the empty string. When this is false, all + /// matches returned by this DFA are guaranteed to have non-zero length. + pub(crate) has_empty: bool, + /// Whether the DFA should only produce matches with spans that correspond + /// to valid UTF-8. This also includes omitting any zero-width matches that + /// split the UTF-8 encoding of a codepoint. + pub(crate) is_utf8: bool, + /// Whether the DFA is always anchored or not, regardless of `Input` + /// configuration. This is useful for avoiding a reverse scan even when + /// executing unanchored searches. + pub(crate) is_always_start_anchored: bool, +} + +impl Flags { + /// Creates a set of flags for a DFA from an NFA. + /// + /// N.B. This constructor was defined at the time of writing because all + /// of the flags are derived directly from the NFA. If this changes in the + /// future, we might be more thoughtful about how the `Flags` value is + /// itself built. + #[cfg(feature = "dfa-build")] + fn from_nfa(nfa: &thompson::NFA) -> Flags { + Flags { + has_empty: nfa.has_empty(), + is_utf8: nfa.is_utf8(), + is_always_start_anchored: nfa.is_always_start_anchored(), + } + } + + /// Deserializes the flags from the given slice. On success, this also + /// returns the number of bytes read from the slice. + pub(crate) fn from_bytes( + slice: &[u8], + ) -> Result<(Flags, usize), DeserializeError> { + let (bits, nread) = wire::try_read_u32(slice, "flag bitset")?; + let flags = Flags { + has_empty: bits & (1 << 0) != 0, + is_utf8: bits & (1 << 1) != 0, + is_always_start_anchored: bits & (1 << 2) != 0, + }; + Ok((flags, nread)) + } + + /// Writes these flags to the given byte slice. If the buffer is too small, + /// then an error is returned. To determine how big the buffer must be, + /// use `write_to_len`. + pub(crate) fn write_to( + &self, + dst: &mut [u8], + ) -> Result { + fn bool_to_int(b: bool) -> u32 { + if b { + 1 + } else { + 0 + } + } + + let nwrite = self.write_to_len(); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small("flag bitset")); + } + let bits = (bool_to_int(self.has_empty) << 0) + | (bool_to_int(self.is_utf8) << 1) + | (bool_to_int(self.is_always_start_anchored) << 2); + E::write_u32(bits, dst); + Ok(nwrite) + } + + /// Returns the number of bytes the serialized form of these flags + /// will use. + pub(crate) fn write_to_len(&self) -> usize { + size_of::() + } +} + +/// An iterator over all states in a DFA. +/// +/// This iterator yields a tuple for each state. The first element of the +/// tuple corresponds to a state's identifier, and the second element +/// corresponds to the state itself (comprised of its transitions). +/// +/// `'a` corresponding to the lifetime of original DFA, `T` corresponds to +/// the type of the transition table itself. +pub(crate) struct StateIter<'a, T> { + tt: &'a TransitionTable, + it: iter::Enumerate>, +} + +impl<'a, T: AsRef<[u32]>> Iterator for StateIter<'a, T> { + type Item = State<'a>; + + fn next(&mut self) -> Option> { + self.it.next().map(|(index, _)| { + let id = self.tt.to_state_id(index); + self.tt.state(id) + }) + } +} + +/// An immutable representation of a single DFA state. +/// +/// `'a` corresponding to the lifetime of a DFA's transition table. +pub(crate) struct State<'a> { + id: StateID, + stride2: usize, + transitions: &'a [StateID], +} + +impl<'a> State<'a> { + /// Return an iterator over all transitions in this state. This yields + /// a number of transitions equivalent to the alphabet length of the + /// corresponding DFA. + /// + /// Each transition is represented by a tuple. The first element is + /// the input byte for that transition and the second element is the + /// transitions itself. + pub(crate) fn transitions(&self) -> StateTransitionIter<'_> { + StateTransitionIter { + len: self.transitions.len(), + it: self.transitions.iter().enumerate(), + } + } + + /// Return an iterator over a sparse representation of the transitions in + /// this state. Only non-dead transitions are returned. + /// + /// The "sparse" representation in this case corresponds to a sequence of + /// triples. The first two elements of the triple comprise an inclusive + /// byte range while the last element corresponds to the transition taken + /// for all bytes in the range. + /// + /// This is somewhat more condensed than the classical sparse + /// representation (where you have an element for every non-dead + /// transition), but in practice, checking if a byte is in a range is very + /// cheap and using ranges tends to conserve quite a bit more space. + pub(crate) fn sparse_transitions(&self) -> StateSparseTransitionIter<'_> { + StateSparseTransitionIter { dense: self.transitions(), cur: None } + } + + /// Returns the identifier for this state. + pub(crate) fn id(&self) -> StateID { + self.id + } + + /// Analyzes this state to determine whether it can be accelerated. If so, + /// it returns an accelerator that contains at least one byte. + #[cfg(feature = "dfa-build")] + fn accelerate(&self, classes: &ByteClasses) -> Option { + // We just try to add bytes to our accelerator. Once adding fails + // (because we've added too many bytes), then give up. + let mut accel = Accel::new(); + for (class, id) in self.transitions() { + if id == self.id() { + continue; + } + for unit in classes.elements(class) { + if let Some(byte) = unit.as_u8() { + if !accel.add(byte) { + return None; + } + } + } + } + if accel.is_empty() { + None + } else { + Some(accel) + } + } +} + +impl<'a> fmt::Debug for State<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for (i, (start, end, sid)) in self.sparse_transitions().enumerate() { + let id = if f.alternate() { + sid.as_usize() + } else { + sid.as_usize() >> self.stride2 + }; + if i > 0 { + write!(f, ", ")?; + } + if start == end { + write!(f, "{start:?} => {id:?}")?; + } else { + write!(f, "{start:?}-{end:?} => {id:?}")?; + } + } + Ok(()) + } +} + +/// An iterator over all transitions in a single DFA state. This yields +/// a number of transitions equivalent to the alphabet length of the +/// corresponding DFA. +/// +/// Each transition is represented by a tuple. The first element is the input +/// byte for that transition and the second element is the transition itself. +#[derive(Debug)] +pub(crate) struct StateTransitionIter<'a> { + len: usize, + it: iter::Enumerate>, +} + +impl<'a> Iterator for StateTransitionIter<'a> { + type Item = (alphabet::Unit, StateID); + + fn next(&mut self) -> Option<(alphabet::Unit, StateID)> { + self.it.next().map(|(i, &id)| { + let unit = if i + 1 == self.len { + alphabet::Unit::eoi(i) + } else { + let b = u8::try_from(i) + .expect("raw byte alphabet is never exceeded"); + alphabet::Unit::u8(b) + }; + (unit, id) + }) + } +} + +/// An iterator over all non-DEAD transitions in a single DFA state using a +/// sparse representation. +/// +/// Each transition is represented by a triple. The first two elements of the +/// triple comprise an inclusive byte range while the last element corresponds +/// to the transition taken for all bytes in the range. +/// +/// As a convenience, this always returns `alphabet::Unit` values of the same +/// type. That is, you'll never get a (byte, EOI) or a (EOI, byte). Only (byte, +/// byte) and (EOI, EOI) values are yielded. +#[derive(Debug)] +pub(crate) struct StateSparseTransitionIter<'a> { + dense: StateTransitionIter<'a>, + cur: Option<(alphabet::Unit, alphabet::Unit, StateID)>, +} + +impl<'a> Iterator for StateSparseTransitionIter<'a> { + type Item = (alphabet::Unit, alphabet::Unit, StateID); + + fn next(&mut self) -> Option<(alphabet::Unit, alphabet::Unit, StateID)> { + while let Some((unit, next)) = self.dense.next() { + let (prev_start, prev_end, prev_next) = match self.cur { + Some(t) => t, + None => { + self.cur = Some((unit, unit, next)); + continue; + } + }; + if prev_next == next && !unit.is_eoi() { + self.cur = Some((prev_start, unit, prev_next)); + } else { + self.cur = Some((unit, unit, next)); + if prev_next != DEAD { + return Some((prev_start, prev_end, prev_next)); + } + } + } + if let Some((start, end, next)) = self.cur.take() { + if next != DEAD { + return Some((start, end, next)); + } + } + None + } +} + +/// An error that occurred during the construction of a DFA. +/// +/// This error does not provide many introspection capabilities. There are +/// generally only two things you can do with it: +/// +/// * Obtain a human readable message via its `std::fmt::Display` impl. +/// * Access an underlying [`nfa::thompson::BuildError`](thompson::BuildError) +/// type from its `source` method via the `std::error::Error` trait. This error +/// only occurs when using convenience routines for building a DFA directly +/// from a pattern string. +/// +/// When the `std` feature is enabled, this implements the `std::error::Error` +/// trait. +#[cfg(feature = "dfa-build")] +#[derive(Clone, Debug)] +pub struct BuildError { + kind: BuildErrorKind, +} + +#[cfg(feature = "dfa-build")] +impl BuildError { + /// Returns true if and only if this error corresponds to an error with DFA + /// construction that occurred because of exceeding a size limit. + /// + /// While this can occur when size limits like [`Config::dfa_size_limit`] + /// or [`Config::determinize_size_limit`] are exceeded, this can also occur + /// when the number of states or patterns exceeds a hard-coded maximum. + /// (Where these maximums are derived based on the values representable by + /// [`StateID`] and [`PatternID`].) + /// + /// This predicate is useful in contexts where you want to distinguish + /// between errors related to something provided by an end user (for + /// example, an invalid regex pattern) and errors related to configured + /// heuristics. For example, building a DFA might be an optimization that + /// you want to skip if construction fails because of an exceeded size + /// limit, but where you want to bubble up an error if it fails for some + /// other reason. + /// + /// # Example + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// # if !cfg!(target_pointer_width = "64") { return Ok(()); } // see #1039 + /// use regex_automata::{dfa::{dense, Automaton}, Input}; + /// + /// let err = dense::Builder::new() + /// .configure(dense::Config::new() + /// .determinize_size_limit(Some(100_000)) + /// ) + /// .build(r"\w{20}") + /// .unwrap_err(); + /// // This error occurs because a size limit was exceeded. + /// // But things are otherwise valid. + /// assert!(err.is_size_limit_exceeded()); + /// + /// let err = dense::Builder::new() + /// .build(r"\bxyz\b") + /// .unwrap_err(); + /// // This error occurs because a Unicode word boundary + /// // was used without enabling heuristic support for it. + /// // So... not related to size limits. + /// assert!(!err.is_size_limit_exceeded()); + /// + /// let err = dense::Builder::new() + /// .build(r"(xyz") + /// .unwrap_err(); + /// // This error occurs because the pattern is invalid. + /// // So... not related to size limits. + /// assert!(!err.is_size_limit_exceeded()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn is_size_limit_exceeded(&self) -> bool { + use self::BuildErrorKind::*; + + match self.kind { + NFA(_) | Unsupported(_) => false, + TooManyStates + | TooManyStartStates + | TooManyMatchPatternIDs + | DFAExceededSizeLimit { .. } + | DeterminizeExceededSizeLimit { .. } => true, + } + } +} + +/// The kind of error that occurred during the construction of a DFA. +/// +/// Note that this error is non-exhaustive. Adding new variants is not +/// considered a breaking change. +#[cfg(feature = "dfa-build")] +#[derive(Clone, Debug)] +enum BuildErrorKind { + /// An error that occurred while constructing an NFA as a precursor step + /// before a DFA is compiled. + NFA(thompson::BuildError), + /// An error that occurred because an unsupported regex feature was used. + /// The message string describes which unsupported feature was used. + /// + /// The primary regex feature that is unsupported by DFAs is the Unicode + /// word boundary look-around assertion (`\b`). This can be worked around + /// by either using an ASCII word boundary (`(?-u:\b)`) or by enabling + /// Unicode word boundaries when building a DFA. + Unsupported(&'static str), + /// An error that occurs if too many states are produced while building a + /// DFA. + TooManyStates, + /// An error that occurs if too many start states are needed while building + /// a DFA. + /// + /// This is a kind of oddball error that occurs when building a DFA with + /// start states enabled for each pattern and enough patterns to cause + /// the table of start states to overflow `usize`. + TooManyStartStates, + /// This is another oddball error that can occur if there are too many + /// patterns spread out across too many match states. + TooManyMatchPatternIDs, + /// An error that occurs if the DFA got too big during determinization. + DFAExceededSizeLimit { limit: usize }, + /// An error that occurs if auxiliary storage (not the DFA) used during + /// determinization got too big. + DeterminizeExceededSizeLimit { limit: usize }, +} + +#[cfg(feature = "dfa-build")] +impl BuildError { + /// Return the kind of this error. + fn kind(&self) -> &BuildErrorKind { + &self.kind + } + + pub(crate) fn nfa(err: thompson::BuildError) -> BuildError { + BuildError { kind: BuildErrorKind::NFA(err) } + } + + pub(crate) fn unsupported_dfa_word_boundary_unicode() -> BuildError { + let msg = "cannot build DFAs for regexes with Unicode word \ + boundaries; switch to ASCII word boundaries, or \ + heuristically enable Unicode word boundaries or use a \ + different regex engine"; + BuildError { kind: BuildErrorKind::Unsupported(msg) } + } + + pub(crate) fn too_many_states() -> BuildError { + BuildError { kind: BuildErrorKind::TooManyStates } + } + + pub(crate) fn too_many_start_states() -> BuildError { + BuildError { kind: BuildErrorKind::TooManyStartStates } + } + + pub(crate) fn too_many_match_pattern_ids() -> BuildError { + BuildError { kind: BuildErrorKind::TooManyMatchPatternIDs } + } + + pub(crate) fn dfa_exceeded_size_limit(limit: usize) -> BuildError { + BuildError { kind: BuildErrorKind::DFAExceededSizeLimit { limit } } + } + + pub(crate) fn determinize_exceeded_size_limit(limit: usize) -> BuildError { + BuildError { + kind: BuildErrorKind::DeterminizeExceededSizeLimit { limit }, + } + } +} + +#[cfg(all(feature = "std", feature = "dfa-build"))] +impl std::error::Error for BuildError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self.kind() { + BuildErrorKind::NFA(ref err) => Some(err), + _ => None, + } + } +} + +#[cfg(feature = "dfa-build")] +impl core::fmt::Display for BuildError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self.kind() { + BuildErrorKind::NFA(_) => write!(f, "error building NFA"), + BuildErrorKind::Unsupported(ref msg) => { + write!(f, "unsupported regex feature for DFAs: {msg}") + } + BuildErrorKind::TooManyStates => write!( + f, + "number of DFA states exceeds limit of {}", + StateID::LIMIT, + ), + BuildErrorKind::TooManyStartStates => { + let stride = Start::len(); + // The start table has `stride` entries for starting states for + // the entire DFA, and then `stride` entries for each pattern + // if start states for each pattern are enabled (which is the + // only way this error can occur). Thus, the total number of + // patterns that can fit in the table is `stride` less than + // what we can allocate. + let max = usize::try_from(core::isize::MAX).unwrap(); + let limit = (max - stride) / stride; + write!( + f, + "compiling DFA with start states exceeds pattern \ + pattern limit of {}", + limit, + ) + } + BuildErrorKind::TooManyMatchPatternIDs => write!( + f, + "compiling DFA with total patterns in all match states \ + exceeds limit of {}", + PatternID::LIMIT, + ), + BuildErrorKind::DFAExceededSizeLimit { limit } => write!( + f, + "DFA exceeded size limit of {limit:?} during determinization", + ), + BuildErrorKind::DeterminizeExceededSizeLimit { limit } => { + write!(f, "determinization exceeded size limit of {limit:?}") + } + } + } +} + +#[cfg(all(test, feature = "syntax", feature = "dfa-build"))] +mod tests { + use crate::{Input, MatchError}; + + use super::*; + + #[test] + fn errors_with_unicode_word_boundary() { + let pattern = r"\b"; + assert!(Builder::new().build(pattern).is_err()); + } + + #[test] + fn roundtrip_never_match() { + let dfa = DFA::never_match().unwrap(); + let (buf, _) = dfa.to_bytes_native_endian(); + let dfa: DFA<&[u32]> = DFA::from_bytes(&buf).unwrap().0; + + assert_eq!(None, dfa.try_search_fwd(&Input::new("foo12345")).unwrap()); + } + + #[test] + fn roundtrip_always_match() { + use crate::HalfMatch; + + let dfa = DFA::always_match().unwrap(); + let (buf, _) = dfa.to_bytes_native_endian(); + let dfa: DFA<&[u32]> = DFA::from_bytes(&buf).unwrap().0; + + assert_eq!( + Some(HalfMatch::must(0, 0)), + dfa.try_search_fwd(&Input::new("foo12345")).unwrap() + ); + } + + // See the analogous test in src/hybrid/dfa.rs. + #[test] + fn heuristic_unicode_reverse() { + let dfa = DFA::builder() + .configure(DFA::config().unicode_word_boundary(true)) + .thompson(thompson::Config::new().reverse(true)) + .build(r"\b[0-9]+\b") + .unwrap(); + + let input = Input::new("β123").range(2..); + let expected = MatchError::quit(0xB2, 1); + let got = dfa.try_search_rev(&input); + assert_eq!(Err(expected), got); + + let input = Input::new("123β").range(..3); + let expected = MatchError::quit(0xCE, 3); + let got = dfa.try_search_rev(&input); + assert_eq!(Err(expected), got); + } + + // This panics in `TransitionTable::validate` if the match states are not + // validated first. + // + // See: https://github.com/rust-lang/regex/pull/1295 + #[test] + fn regression_validation_order() { + let mut dfa = DFA::new("abc").unwrap(); + dfa.ms = MatchStates { + slices: vec![], + pattern_ids: vec![], + pattern_len: 1, + }; + let (buf, _) = dfa.to_bytes_native_endian(); + DFA::from_bytes(&buf).unwrap_err(); + } +} diff --git a/vendor/regex-automata/src/dfa/determinize.rs b/vendor/regex-automata/src/dfa/determinize.rs new file mode 100644 index 00000000000000..d53815cbde85cd --- /dev/null +++ b/vendor/regex-automata/src/dfa/determinize.rs @@ -0,0 +1,599 @@ +use alloc::{collections::BTreeMap, vec::Vec}; + +use crate::{ + dfa::{ + dense::{self, BuildError}, + DEAD, + }, + nfa::thompson, + util::{ + self, + alphabet::{self, ByteSet}, + determinize::{State, StateBuilderEmpty, StateBuilderNFA}, + primitives::{PatternID, StateID}, + search::{Anchored, MatchKind}, + sparse_set::SparseSets, + start::Start, + }, +}; + +/// A builder for configuring and running a DFA determinizer. +#[derive(Clone, Debug)] +pub(crate) struct Config { + match_kind: MatchKind, + quit: ByteSet, + dfa_size_limit: Option, + determinize_size_limit: Option, +} + +impl Config { + /// Create a new default config for a determinizer. The determinizer may be + /// configured before calling `run`. + pub fn new() -> Config { + Config { + match_kind: MatchKind::LeftmostFirst, + quit: ByteSet::empty(), + dfa_size_limit: None, + determinize_size_limit: None, + } + } + + /// Run determinization on the given NFA and write the resulting DFA into + /// the one given. The DFA given should be initialized but otherwise empty. + /// "Initialized" means that it is setup to handle the NFA's byte classes, + /// number of patterns and whether to build start states for each pattern. + pub fn run( + &self, + nfa: &thompson::NFA, + dfa: &mut dense::OwnedDFA, + ) -> Result<(), BuildError> { + let dead = State::dead(); + let quit = State::dead(); + let mut cache = StateMap::default(); + // We only insert the dead state here since its representation is + // identical to the quit state. And we never want anything pointing + // to the quit state other than specific transitions derived from the + // determinizer's configured "quit" bytes. + // + // We do put the quit state into 'builder_states' below. This ensures + // that a proper DFA state ID is allocated for it, and that no other + // DFA state uses the "location after the DEAD state." That is, it + // is assumed that the quit state is always the state immediately + // following the DEAD state. + cache.insert(dead.clone(), DEAD); + + let runner = Runner { + config: self.clone(), + nfa, + dfa, + builder_states: alloc::vec![dead, quit], + cache, + memory_usage_state: 0, + sparses: SparseSets::new(nfa.states().len()), + stack: alloc::vec![], + scratch_state_builder: StateBuilderEmpty::new(), + }; + runner.run() + } + + /// The match semantics to use for determinization. + /// + /// MatchKind::All corresponds to the standard textbook construction. + /// All possible match states are represented in the DFA. + /// MatchKind::LeftmostFirst permits greediness and otherwise tries to + /// simulate the match semantics of backtracking regex engines. Namely, + /// only a subset of match states are built, and dead states are used to + /// stop searches with an unanchored prefix. + /// + /// The default is MatchKind::LeftmostFirst. + pub fn match_kind(&mut self, kind: MatchKind) -> &mut Config { + self.match_kind = kind; + self + } + + /// The set of bytes to use that will cause the DFA to enter a quit state, + /// stop searching and return an error. By default, this is empty. + pub fn quit(&mut self, set: ByteSet) -> &mut Config { + self.quit = set; + self + } + + /// The limit, in bytes of the heap, that the DFA is permitted to use. This + /// does not include the auxiliary heap storage used by determinization. + pub fn dfa_size_limit(&mut self, bytes: Option) -> &mut Config { + self.dfa_size_limit = bytes; + self + } + + /// The limit, in bytes of the heap, that determinization itself is allowed + /// to use. This does not include the size of the DFA being built. + pub fn determinize_size_limit( + &mut self, + bytes: Option, + ) -> &mut Config { + self.determinize_size_limit = bytes; + self + } +} + +/// The actual implementation of determinization that converts an NFA to a DFA +/// through powerset construction. +/// +/// This determinizer roughly follows the typical powerset construction, where +/// each DFA state is comprised of one or more NFA states. In the worst case, +/// there is one DFA state for every possible combination of NFA states. In +/// practice, this only happens in certain conditions, typically when there are +/// bounded repetitions. +/// +/// The main differences between this implementation and typical deteminization +/// are that this implementation delays matches by one state and hackily makes +/// look-around work. Comments below attempt to explain this. +/// +/// The lifetime variable `'a` refers to the lifetime of the NFA or DFA, +/// whichever is shorter. +#[derive(Debug)] +struct Runner<'a> { + /// The configuration used to initialize determinization. + config: Config, + /// The NFA we're converting into a DFA. + nfa: &'a thompson::NFA, + /// The DFA we're building. + dfa: &'a mut dense::OwnedDFA, + /// Each DFA state being built is defined as an *ordered* set of NFA + /// states, along with some meta facts about the ordered set of NFA states. + /// + /// This is never empty. The first state is always a dummy state such that + /// a state id == 0 corresponds to a dead state. The second state is always + /// the quit state. + /// + /// Why do we have states in both a `Vec` and in a cache map below? + /// Well, they serve two different roles based on access patterns. + /// `builder_states` is the canonical home of each state, and provides + /// constant random access by a DFA state's ID. The cache map below, on + /// the other hand, provides a quick way of searching for identical DFA + /// states by using the DFA state as a key in the map. Of course, we use + /// reference counting to avoid actually duplicating the state's data + /// itself. (Although this has never been benchmarked.) Note that the cache + /// map does not give us full minimization; it just lets us avoid some very + /// obvious redundant states. + /// + /// Note that the index into this Vec isn't quite the DFA's state ID. + /// Rather, it's just an index. To get the state ID, you have to multiply + /// it by the DFA's stride. That's done by self.dfa.from_index. And the + /// inverse is self.dfa.to_index. + /// + /// Moreover, DFA states don't usually retain the IDs assigned to them + /// by their position in this Vec. After determinization completes, + /// states are shuffled around to support other optimizations. See the + /// sibling 'special' module for more details on that. (The reason for + /// mentioning this is that if you print out the DFA for debugging during + /// determinization, and then print out the final DFA after it is fully + /// built, then the state IDs likely won't match up.) + builder_states: Vec, + /// A cache of DFA states that already exist and can be easily looked up + /// via ordered sets of NFA states. + /// + /// See `builder_states` docs for why we store states in two different + /// ways. + cache: StateMap, + /// The memory usage, in bytes, used by builder_states and cache. We track + /// this as new states are added since states use a variable amount of + /// heap. Tracking this as we add states makes it possible to compute the + /// total amount of memory used by the determinizer in constant time. + memory_usage_state: usize, + /// A pair of sparse sets for tracking ordered sets of NFA state IDs. + /// These are reused throughout determinization. A bounded sparse set + /// gives us constant time insertion, membership testing and clearing. + sparses: SparseSets, + /// Scratch space for a stack of NFA states to visit, for depth first + /// visiting without recursion. + stack: Vec, + /// Scratch space for storing an ordered sequence of NFA states, for + /// amortizing allocation. This is principally useful for when we avoid + /// adding a new DFA state since it already exists. In order to detect this + /// case though, we still need an ordered set of NFA state IDs. So we use + /// this space to stage that ordered set before we know whether we need to + /// create a new DFA state or not. + scratch_state_builder: StateBuilderEmpty, +} + +/// A map from states to state identifiers. When using std, we use a standard +/// hashmap, since it's a bit faster for this use case. (Other maps, like +/// one's based on FNV, have not yet been benchmarked.) +/// +/// The main purpose of this map is to reuse states where possible. This won't +/// fully minimize the DFA, but it works well in a lot of cases. +#[cfg(feature = "std")] +type StateMap = std::collections::HashMap; +#[cfg(not(feature = "std"))] +type StateMap = BTreeMap; + +impl<'a> Runner<'a> { + /// Build the DFA. If there was a problem constructing the DFA (e.g., if + /// the chosen state identifier representation is too small), then an error + /// is returned. + fn run(mut self) -> Result<(), BuildError> { + if self.nfa.look_set_any().contains_word_unicode() + && !self.config.quit.contains_range(0x80, 0xFF) + { + return Err(BuildError::unsupported_dfa_word_boundary_unicode()); + } + + // A sequence of "representative" bytes drawn from each equivalence + // class. These representative bytes are fed to the NFA to compute + // state transitions. This allows us to avoid re-computing state + // transitions for bytes that are guaranteed to produce identical + // results. Since computing the representatives needs to do a little + // work, we do it once here because we'll be iterating over them a lot. + let representatives: Vec = + self.dfa.byte_classes().representatives(..).collect(); + // The set of all DFA state IDs that still need to have their + // transitions set. We start by seeding this with all starting states. + let mut uncompiled = alloc::vec![]; + self.add_all_starts(&mut uncompiled)?; + while let Some(dfa_id) = uncompiled.pop() { + for &unit in &representatives { + if unit.as_u8().map_or(false, |b| self.config.quit.contains(b)) + { + continue; + } + // In many cases, the state we transition to has already been + // computed. 'cached_state' will do the minimal amount of work + // to check this, and if it exists, immediately return an + // already existing state ID. + let (next_dfa_id, is_new) = self.cached_state(dfa_id, unit)?; + self.dfa.set_transition(dfa_id, unit, next_dfa_id); + // If the state ID we got back is newly created, then we need + // to compile it, so add it to our uncompiled frontier. + if is_new { + uncompiled.push(next_dfa_id); + } + } + } + debug!( + "determinization complete, memory usage: {}, \ + dense DFA size: {}, \ + is reverse? {}", + self.memory_usage(), + self.dfa.memory_usage(), + self.nfa.is_reverse(), + ); + + // A map from DFA state ID to one or more NFA match IDs. Each NFA match + // ID corresponds to a distinct regex pattern that matches in the state + // corresponding to the key. + let mut matches: BTreeMap> = BTreeMap::new(); + self.cache.clear(); + #[cfg(feature = "logging")] + let mut total_pat_len = 0; + for (i, state) in self.builder_states.into_iter().enumerate() { + if let Some(pat_ids) = state.match_pattern_ids() { + let id = self.dfa.to_state_id(i); + log! { + total_pat_len += pat_ids.len(); + } + matches.insert(id, pat_ids); + } + } + log! { + use core::mem::size_of; + let per_elem = size_of::() + size_of::>(); + let pats = total_pat_len * size_of::(); + let mem = (matches.len() * per_elem) + pats; + log::debug!("matches map built, memory usage: {mem}"); + } + // At this point, we shuffle the "special" states in the final DFA. + // This permits a DFA's match loop to detect a match condition (among + // other things) by merely inspecting the current state's identifier, + // and avoids the need for any additional auxiliary storage. + self.dfa.shuffle(matches)?; + Ok(()) + } + + /// Return the identifier for the next DFA state given an existing DFA + /// state and an input byte. If the next DFA state already exists, then + /// return its identifier from the cache. Otherwise, build the state, cache + /// it and return its identifier. + /// + /// This routine returns a boolean indicating whether a new state was + /// built. If a new state is built, then the caller needs to add it to its + /// frontier of uncompiled DFA states to compute transitions for. + fn cached_state( + &mut self, + dfa_id: StateID, + unit: alphabet::Unit, + ) -> Result<(StateID, bool), BuildError> { + // Compute the set of all reachable NFA states, including epsilons. + let empty_builder = self.get_state_builder(); + let builder = util::determinize::next( + self.nfa, + self.config.match_kind, + &mut self.sparses, + &mut self.stack, + &self.builder_states[self.dfa.to_index(dfa_id)], + unit, + empty_builder, + ); + self.maybe_add_state(builder) + } + + /// Compute the set of DFA start states and add their identifiers in + /// 'dfa_state_ids' (no duplicates are added). + fn add_all_starts( + &mut self, + dfa_state_ids: &mut Vec, + ) -> Result<(), BuildError> { + // These should be the first states added. + assert!(dfa_state_ids.is_empty()); + // We only want to add (un)anchored starting states that is consistent + // with our DFA's configuration. Unconditionally adding both (although + // it is the default) can make DFAs quite a bit bigger. + if self.dfa.start_kind().has_unanchored() { + self.add_start_group(Anchored::No, dfa_state_ids)?; + } + if self.dfa.start_kind().has_anchored() { + self.add_start_group(Anchored::Yes, dfa_state_ids)?; + } + // I previously has an 'assert' here checking that either + // 'dfa_state_ids' was non-empty, or the NFA had zero patterns. But it + // turns out this isn't always true. For example, the NFA might have + // one or more patterns but where all such patterns are just 'fail' + // states. These will ultimately just compile down to DFA dead states, + // and since the dead state was added earlier, no new DFA states are + // added. And thus, it is valid and okay for 'dfa_state_ids' to be + // empty even if there are a non-zero number of patterns in the NFA. + + // We only need to compute anchored start states for each pattern if it + // was requested to do so. + if self.dfa.starts_for_each_pattern() { + for pid in self.nfa.patterns() { + self.add_start_group(Anchored::Pattern(pid), dfa_state_ids)?; + } + } + Ok(()) + } + + /// Add a group of start states for the given match pattern ID. Any new + /// DFA states added are pushed on to 'dfa_state_ids'. (No duplicates are + /// pushed.) + /// + /// When pattern_id is None, then this will compile a group of unanchored + /// start states (if the DFA is unanchored). When the pattern_id is + /// present, then this will compile a group of anchored start states that + /// only match the given pattern. + /// + /// This panics if `anchored` corresponds to an invalid pattern ID. + fn add_start_group( + &mut self, + anchored: Anchored, + dfa_state_ids: &mut Vec, + ) -> Result<(), BuildError> { + let nfa_start = match anchored { + Anchored::No => self.nfa.start_unanchored(), + Anchored::Yes => self.nfa.start_anchored(), + Anchored::Pattern(pid) => { + self.nfa.start_pattern(pid).expect("valid pattern ID") + } + }; + + // When compiling start states, we're careful not to build additional + // states that aren't necessary. For example, if the NFA has no word + // boundary assertion, then there's no reason to have distinct start + // states for 'NonWordByte' and 'WordByte' starting configurations. + // Instead, the 'WordByte' starting configuration can just point + // directly to the start state for the 'NonWordByte' config. + // + // Note though that we only need to care about assertions in the prefix + // of an NFA since this only concerns the starting states. (Actually, + // the most precisely thing we could do it is look at the prefix + // assertions of each pattern when 'anchored == Anchored::Pattern', + // and then only compile extra states if the prefix is non-empty.) But + // we settle for simplicity here instead of absolute minimalism. It is + // somewhat rare, after all, for multiple patterns in the same regex to + // have different prefix look-arounds. + + let (id, is_new) = + self.add_one_start(nfa_start, Start::NonWordByte)?; + self.dfa.set_start_state(anchored, Start::NonWordByte, id); + if is_new { + dfa_state_ids.push(id); + } + + if !self.nfa.look_set_prefix_any().contains_word() { + self.dfa.set_start_state(anchored, Start::WordByte, id); + } else { + let (id, is_new) = + self.add_one_start(nfa_start, Start::WordByte)?; + self.dfa.set_start_state(anchored, Start::WordByte, id); + if is_new { + dfa_state_ids.push(id); + } + } + if !self.nfa.look_set_prefix_any().contains_anchor() { + self.dfa.set_start_state(anchored, Start::Text, id); + self.dfa.set_start_state(anchored, Start::LineLF, id); + self.dfa.set_start_state(anchored, Start::LineCR, id); + self.dfa.set_start_state( + anchored, + Start::CustomLineTerminator, + id, + ); + } else { + let (id, is_new) = self.add_one_start(nfa_start, Start::Text)?; + self.dfa.set_start_state(anchored, Start::Text, id); + if is_new { + dfa_state_ids.push(id); + } + + let (id, is_new) = self.add_one_start(nfa_start, Start::LineLF)?; + self.dfa.set_start_state(anchored, Start::LineLF, id); + if is_new { + dfa_state_ids.push(id); + } + + let (id, is_new) = self.add_one_start(nfa_start, Start::LineCR)?; + self.dfa.set_start_state(anchored, Start::LineCR, id); + if is_new { + dfa_state_ids.push(id); + } + + let (id, is_new) = + self.add_one_start(nfa_start, Start::CustomLineTerminator)?; + self.dfa.set_start_state( + anchored, + Start::CustomLineTerminator, + id, + ); + if is_new { + dfa_state_ids.push(id); + } + } + + Ok(()) + } + + /// Add a new DFA start state corresponding to the given starting NFA + /// state, and the starting search configuration. (The starting search + /// configuration essentially tells us which look-behind assertions are + /// true for this particular state.) + /// + /// The boolean returned indicates whether the state ID returned is a newly + /// created state, or a previously cached state. + fn add_one_start( + &mut self, + nfa_start: StateID, + start: Start, + ) -> Result<(StateID, bool), BuildError> { + // Compute the look-behind assertions that are true in this starting + // configuration, and the determine the epsilon closure. While + // computing the epsilon closure, we only follow conditional epsilon + // transitions that satisfy the look-behind assertions in 'look_have'. + let mut builder_matches = self.get_state_builder().into_matches(); + util::determinize::set_lookbehind_from_start( + self.nfa, + &start, + &mut builder_matches, + ); + self.sparses.set1.clear(); + util::determinize::epsilon_closure( + self.nfa, + nfa_start, + builder_matches.look_have(), + &mut self.stack, + &mut self.sparses.set1, + ); + let mut builder = builder_matches.into_nfa(); + util::determinize::add_nfa_states( + &self.nfa, + &self.sparses.set1, + &mut builder, + ); + self.maybe_add_state(builder) + } + + /// Adds the given state to the DFA being built depending on whether it + /// already exists in this determinizer's cache. + /// + /// If it does exist, then the memory used by 'state' is put back into the + /// determinizer and the previously created state's ID is returned. (Along + /// with 'false', indicating that no new state was added.) + /// + /// If it does not exist, then the state is added to the DFA being built + /// and a fresh ID is allocated (if ID allocation fails, then an error is + /// returned) and returned. (Along with 'true', indicating that a new state + /// was added.) + fn maybe_add_state( + &mut self, + builder: StateBuilderNFA, + ) -> Result<(StateID, bool), BuildError> { + if let Some(&cached_id) = self.cache.get(builder.as_bytes()) { + // Since we have a cached state, put the constructed state's + // memory back into our scratch space, so that it can be reused. + self.put_state_builder(builder); + return Ok((cached_id, false)); + } + self.add_state(builder).map(|sid| (sid, true)) + } + + /// Add the given state to the DFA and make it available in the cache. + /// + /// The state initially has no transitions. That is, it transitions to the + /// dead state for all possible inputs, and transitions to the quit state + /// for all quit bytes. + /// + /// If adding the state would exceed the maximum value for StateID, then an + /// error is returned. + fn add_state( + &mut self, + builder: StateBuilderNFA, + ) -> Result { + let id = self.dfa.add_empty_state()?; + if !self.config.quit.is_empty() { + for b in self.config.quit.iter() { + self.dfa.set_transition( + id, + alphabet::Unit::u8(b), + self.dfa.quit_id(), + ); + } + } + let state = builder.to_state(); + // States use reference counting internally, so we only need to count + // their memory usage once. + self.memory_usage_state += state.memory_usage(); + self.builder_states.push(state.clone()); + self.cache.insert(state, id); + self.put_state_builder(builder); + if let Some(limit) = self.config.dfa_size_limit { + if self.dfa.memory_usage() > limit { + return Err(BuildError::dfa_exceeded_size_limit(limit)); + } + } + if let Some(limit) = self.config.determinize_size_limit { + if self.memory_usage() > limit { + return Err(BuildError::determinize_exceeded_size_limit( + limit, + )); + } + } + Ok(id) + } + + /// Returns a state builder from this determinizer that might have existing + /// capacity. This helps avoid allocs in cases where a state is built that + /// turns out to already be cached. + /// + /// Callers must put the state builder back with 'put_state_builder', + /// otherwise the allocation reuse won't work. + fn get_state_builder(&mut self) -> StateBuilderEmpty { + core::mem::replace( + &mut self.scratch_state_builder, + StateBuilderEmpty::new(), + ) + } + + /// Puts the given state builder back into this determinizer for reuse. + /// + /// Note that building a 'State' from a builder always creates a new + /// alloc, so callers should always put the builder back. + fn put_state_builder(&mut self, builder: StateBuilderNFA) { + let _ = core::mem::replace( + &mut self.scratch_state_builder, + builder.clear(), + ); + } + + /// Return the memory usage, in bytes, of this determinizer at the current + /// point in time. This does not include memory used by the NFA or the + /// dense DFA itself. + fn memory_usage(&self) -> usize { + use core::mem::size_of; + + self.builder_states.len() * size_of::() + // Maps likely use more memory than this, but it's probably close. + + self.cache.len() * (size_of::() + size_of::()) + + self.memory_usage_state + + self.stack.capacity() * size_of::() + + self.scratch_state_builder.capacity() + } +} diff --git a/vendor/regex-automata/src/dfa/minimize.rs b/vendor/regex-automata/src/dfa/minimize.rs new file mode 100644 index 00000000000000..fea925bdc6cf72 --- /dev/null +++ b/vendor/regex-automata/src/dfa/minimize.rs @@ -0,0 +1,463 @@ +use core::{cell::RefCell, fmt, mem}; + +use alloc::{collections::BTreeMap, rc::Rc, vec, vec::Vec}; + +use crate::{ + dfa::{automaton::Automaton, dense, DEAD}, + util::{ + alphabet, + primitives::{PatternID, StateID}, + }, +}; + +/// An implementation of Hopcroft's algorithm for minimizing DFAs. +/// +/// The algorithm implemented here is mostly taken from Wikipedia: +/// https://en.wikipedia.org/wiki/DFA_minimization#Hopcroft's_algorithm +/// +/// This code has had some light optimization attention paid to it, +/// particularly in the form of reducing allocation as much as possible. +/// However, it is still generally slow. Future optimization work should +/// probably focus on the bigger picture rather than micro-optimizations. For +/// example: +/// +/// 1. Figure out how to more intelligently create initial partitions. That is, +/// Hopcroft's algorithm starts by creating two partitions of DFA states +/// that are known to NOT be equivalent: match states and non-match states. +/// The algorithm proceeds by progressively refining these partitions into +/// smaller partitions. If we could start with more partitions, then we +/// could reduce the amount of work that Hopcroft's algorithm needs to do. +/// 2. For every partition that we visit, we find all incoming transitions to +/// every state in the partition for *every* element in the alphabet. (This +/// is why using byte classes can significantly decrease minimization times, +/// since byte classes shrink the alphabet.) This is quite costly and there +/// is perhaps some redundant work being performed depending on the specific +/// states in the set. For example, we might be able to only visit some +/// elements of the alphabet based on the transitions. +/// 3. Move parts of minimization into determinization. If minimization has +/// fewer states to deal with, then it should run faster. A prime example +/// of this might be large Unicode classes, which are generated in way that +/// can create a lot of redundant states. (Some work has been done on this +/// point during NFA compilation via the algorithm described in the +/// "Incremental Construction of MinimalAcyclic Finite-State Automata" +/// paper.) +pub(crate) struct Minimizer<'a> { + dfa: &'a mut dense::OwnedDFA, + in_transitions: Vec>>, + partitions: Vec, + waiting: Vec, +} + +impl<'a> fmt::Debug for Minimizer<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Minimizer") + .field("dfa", &self.dfa) + .field("in_transitions", &self.in_transitions) + .field("partitions", &self.partitions) + .field("waiting", &self.waiting) + .finish() + } +} + +/// A set of states. A state set makes up a single partition in Hopcroft's +/// algorithm. +/// +/// It is represented by an ordered set of state identifiers. We use shared +/// ownership so that a single state set can be in both the set of partitions +/// and in the set of waiting sets simultaneously without an additional +/// allocation. Generally, once a state set is built, it becomes immutable. +/// +/// We use this representation because it avoids the overhead of more +/// traditional set data structures (HashSet/BTreeSet), and also because +/// computing intersection/subtraction on this representation is especially +/// fast. +#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord)] +struct StateSet { + ids: Rc>>, +} + +impl<'a> Minimizer<'a> { + pub fn new(dfa: &'a mut dense::OwnedDFA) -> Minimizer<'a> { + let in_transitions = Minimizer::incoming_transitions(dfa); + let partitions = Minimizer::initial_partitions(dfa); + let waiting = partitions.clone(); + Minimizer { dfa, in_transitions, partitions, waiting } + } + + pub fn run(mut self) { + let stride2 = self.dfa.stride2(); + let as_state_id = |index: usize| -> StateID { + StateID::new(index << stride2).unwrap() + }; + let as_index = |id: StateID| -> usize { id.as_usize() >> stride2 }; + + let mut incoming = StateSet::empty(); + let mut scratch1 = StateSet::empty(); + let mut scratch2 = StateSet::empty(); + let mut newparts = vec![]; + + // This loop is basically Hopcroft's algorithm. Everything else is just + // shuffling data around to fit our representation. + while let Some(set) = self.waiting.pop() { + for b in self.dfa.byte_classes().iter() { + self.find_incoming_to(b, &set, &mut incoming); + // If incoming is empty, then the intersection with any other + // set must also be empty. So 'newparts' just ends up being + // 'self.partitions'. So there's no need to go through the loop + // below. + // + // This actually turns out to be rather large optimization. On + // the order of making minimization 4-5x faster. It's likely + // that the vast majority of all states have very few incoming + // transitions. + if incoming.is_empty() { + continue; + } + + for p in 0..self.partitions.len() { + self.partitions[p].intersection(&incoming, &mut scratch1); + if scratch1.is_empty() { + newparts.push(self.partitions[p].clone()); + continue; + } + + self.partitions[p].subtract(&incoming, &mut scratch2); + if scratch2.is_empty() { + newparts.push(self.partitions[p].clone()); + continue; + } + + let (x, y) = + (scratch1.deep_clone(), scratch2.deep_clone()); + newparts.push(x.clone()); + newparts.push(y.clone()); + match self.find_waiting(&self.partitions[p]) { + Some(i) => { + self.waiting[i] = x; + self.waiting.push(y); + } + None => { + if x.len() <= y.len() { + self.waiting.push(x); + } else { + self.waiting.push(y); + } + } + } + } + newparts = mem::replace(&mut self.partitions, newparts); + newparts.clear(); + } + } + + // At this point, we now have a minimal partitioning of states, where + // each partition is an equivalence class of DFA states. Now we need to + // use this partitioning to update the DFA to only contain one state for + // each partition. + + // Create a map from DFA state ID to the representative ID of the + // equivalence class to which it belongs. The representative ID of an + // equivalence class of states is the minimum ID in that class. + let mut state_to_part = vec![DEAD; self.dfa.state_len()]; + for p in &self.partitions { + p.iter(|id| state_to_part[as_index(id)] = p.min()); + } + + // Generate a new contiguous sequence of IDs for minimal states, and + // create a map from equivalence IDs to the new IDs. Thus, the new + // minimal ID of *any* state in the unminimized DFA can be obtained + // with minimals_ids[state_to_part[old_id]]. + let mut minimal_ids = vec![DEAD; self.dfa.state_len()]; + let mut new_index = 0; + for state in self.dfa.states() { + if state_to_part[as_index(state.id())] == state.id() { + minimal_ids[as_index(state.id())] = as_state_id(new_index); + new_index += 1; + } + } + // The total number of states in the minimal DFA. + let minimal_count = new_index; + // Convenience function for remapping state IDs. This takes an old ID, + // looks up its Hopcroft partition and then maps that to the new ID + // range. + let remap = |old| minimal_ids[as_index(state_to_part[as_index(old)])]; + + // Re-map this DFA in place such that the only states remaining + // correspond to the representative states of every equivalence class. + for id in (0..self.dfa.state_len()).map(as_state_id) { + // If this state isn't a representative for an equivalence class, + // then we skip it since it won't appear in the minimal DFA. + if state_to_part[as_index(id)] != id { + continue; + } + self.dfa.remap_state(id, remap); + self.dfa.swap_states(id, minimal_ids[as_index(id)]); + } + // Trim off all unused states from the pre-minimized DFA. This + // represents all states that were merged into a non-singleton + // equivalence class of states, and appeared after the first state + // in each such class. (Because the state with the smallest ID in each + // equivalence class is its representative ID.) + self.dfa.truncate_states(minimal_count); + + // Update the new start states, which is now just the minimal ID of + // whatever state the old start state was collapsed into. Also, we + // collect everything before-hand to work around the borrow checker. + // We're already allocating so much that this is probably fine. If this + // turns out to be costly, then I guess add a `starts_mut` iterator. + let starts: Vec<_> = self.dfa.starts().collect(); + for (old_start_id, anchored, start_type) in starts { + self.dfa.set_start_state( + anchored, + start_type, + remap(old_start_id), + ); + } + + // Update the match state pattern ID list for multi-regexes. All we + // need to do is remap the match state IDs. The pattern ID lists are + // always the same as they were since match states with distinct + // pattern ID lists are always considered distinct states. + let mut pmap = BTreeMap::new(); + for (match_id, pattern_ids) in self.dfa.pattern_map() { + let new_id = remap(match_id); + pmap.insert(new_id, pattern_ids); + } + // This unwrap is OK because minimization never increases the number of + // match states or patterns in those match states. Since minimization + // runs after the pattern map has already been set at least once, we + // know that our match states cannot error. + self.dfa.set_pattern_map(&pmap).unwrap(); + + // In order to update the ID of the maximum match state, we need to + // find the maximum ID among all of the match states in the minimized + // DFA. This is not necessarily the new ID of the unminimized maximum + // match state, since that could have been collapsed with a much + // earlier match state. Therefore, to find the new max match state, + // we iterate over all previous match states, find their corresponding + // new minimal ID, and take the maximum of those. + let old = self.dfa.special().clone(); + let new = self.dfa.special_mut(); + // ... but only remap if we had match states. + if old.matches() { + new.min_match = StateID::MAX; + new.max_match = StateID::ZERO; + for i in as_index(old.min_match)..=as_index(old.max_match) { + let new_id = remap(as_state_id(i)); + if new_id < new.min_match { + new.min_match = new_id; + } + if new_id > new.max_match { + new.max_match = new_id; + } + } + } + // ... same, but for start states. + if old.starts() { + new.min_start = StateID::MAX; + new.max_start = StateID::ZERO; + for i in as_index(old.min_start)..=as_index(old.max_start) { + let new_id = remap(as_state_id(i)); + if new_id == DEAD { + continue; + } + if new_id < new.min_start { + new.min_start = new_id; + } + if new_id > new.max_start { + new.max_start = new_id; + } + } + if new.max_start == DEAD { + new.min_start = DEAD; + } + } + new.quit_id = remap(new.quit_id); + new.set_max(); + } + + fn find_waiting(&self, set: &StateSet) -> Option { + self.waiting.iter().position(|s| s == set) + } + + fn find_incoming_to( + &self, + b: alphabet::Unit, + set: &StateSet, + incoming: &mut StateSet, + ) { + incoming.clear(); + set.iter(|id| { + for &inid in + &self.in_transitions[self.dfa.to_index(id)][b.as_usize()] + { + incoming.add(inid); + } + }); + incoming.canonicalize(); + } + + fn initial_partitions(dfa: &dense::OwnedDFA) -> Vec { + // For match states, we know that two match states with different + // pattern ID lists will *always* be distinct, so we can partition them + // initially based on that. + let mut matching: BTreeMap, StateSet> = BTreeMap::new(); + let mut is_quit = StateSet::empty(); + let mut no_match = StateSet::empty(); + for state in dfa.states() { + if dfa.is_match_state(state.id()) { + let mut pids = vec![]; + for i in 0..dfa.match_len(state.id()) { + pids.push(dfa.match_pattern(state.id(), i)); + } + matching + .entry(pids) + .or_insert(StateSet::empty()) + .add(state.id()); + } else if dfa.is_quit_state(state.id()) { + is_quit.add(state.id()); + } else { + no_match.add(state.id()); + } + } + + let mut sets: Vec = + matching.into_iter().map(|(_, set)| set).collect(); + sets.push(no_match); + sets.push(is_quit); + sets + } + + fn incoming_transitions(dfa: &dense::OwnedDFA) -> Vec>> { + let mut incoming = vec![]; + for _ in dfa.states() { + incoming.push(vec![vec![]; dfa.alphabet_len()]); + } + for state in dfa.states() { + for (b, next) in state.transitions() { + incoming[dfa.to_index(next)][b.as_usize()].push(state.id()); + } + } + incoming + } +} + +impl StateSet { + fn empty() -> StateSet { + StateSet { ids: Rc::new(RefCell::new(vec![])) } + } + + fn add(&mut self, id: StateID) { + self.ids.borrow_mut().push(id); + } + + fn min(&self) -> StateID { + self.ids.borrow()[0] + } + + fn canonicalize(&mut self) { + self.ids.borrow_mut().sort(); + self.ids.borrow_mut().dedup(); + } + + fn clear(&mut self) { + self.ids.borrow_mut().clear(); + } + + fn len(&self) -> usize { + self.ids.borrow().len() + } + + fn is_empty(&self) -> bool { + self.len() == 0 + } + + fn deep_clone(&self) -> StateSet { + let ids = self.ids.borrow().iter().cloned().collect(); + StateSet { ids: Rc::new(RefCell::new(ids)) } + } + + fn iter(&self, mut f: F) { + for &id in self.ids.borrow().iter() { + f(id); + } + } + + fn intersection(&self, other: &StateSet, dest: &mut StateSet) { + dest.clear(); + if self.is_empty() || other.is_empty() { + return; + } + + let (seta, setb) = (self.ids.borrow(), other.ids.borrow()); + let (mut ita, mut itb) = (seta.iter().cloned(), setb.iter().cloned()); + let (mut a, mut b) = (ita.next().unwrap(), itb.next().unwrap()); + loop { + if a == b { + dest.add(a); + a = match ita.next() { + None => break, + Some(a) => a, + }; + b = match itb.next() { + None => break, + Some(b) => b, + }; + } else if a < b { + a = match ita.next() { + None => break, + Some(a) => a, + }; + } else { + b = match itb.next() { + None => break, + Some(b) => b, + }; + } + } + } + + fn subtract(&self, other: &StateSet, dest: &mut StateSet) { + dest.clear(); + if self.is_empty() || other.is_empty() { + self.iter(|s| dest.add(s)); + return; + } + + let (seta, setb) = (self.ids.borrow(), other.ids.borrow()); + let (mut ita, mut itb) = (seta.iter().cloned(), setb.iter().cloned()); + let (mut a, mut b) = (ita.next().unwrap(), itb.next().unwrap()); + loop { + if a == b { + a = match ita.next() { + None => break, + Some(a) => a, + }; + b = match itb.next() { + None => { + dest.add(a); + break; + } + Some(b) => b, + }; + } else if a < b { + dest.add(a); + a = match ita.next() { + None => break, + Some(a) => a, + }; + } else { + b = match itb.next() { + None => { + dest.add(a); + break; + } + Some(b) => b, + }; + } + } + for a in ita { + dest.add(a); + } + } +} diff --git a/vendor/regex-automata/src/dfa/mod.rs b/vendor/regex-automata/src/dfa/mod.rs new file mode 100644 index 00000000000000..ff718cc434790e --- /dev/null +++ b/vendor/regex-automata/src/dfa/mod.rs @@ -0,0 +1,360 @@ +/*! +A module for building and searching with deterministic finite automata (DFAs). + +Like other modules in this crate, DFAs support a rich regex syntax with Unicode +features. DFAs also have extensive options for configuring the best space vs +time trade off for your use case and provides support for cheap deserialization +of automata for use in `no_std` environments. + +If you're looking for lazy DFAs that build themselves incrementally during +search, then please see the top-level [`hybrid` module](crate::hybrid). + +# Overview + +This section gives a brief overview of the primary types in this module: + +* A [`regex::Regex`] provides a way to search for matches of a regular +expression using DFAs. This includes iterating over matches with both the start +and end positions of each match. +* A [`dense::DFA`] provides low level access to a DFA that uses a dense +representation (uses lots of space, but fast searching). +* A [`sparse::DFA`] provides the same API as a `dense::DFA`, but uses a sparse +representation (uses less space, but slower searching). +* An [`Automaton`] trait that defines an interface that both dense and sparse +DFAs implement. (A `regex::Regex` is generic over this trait.) +* Both dense DFAs and sparse DFAs support serialization to raw bytes (e.g., +[`dense::DFA::to_bytes_little_endian`]) and cheap deserialization (e.g., +[`dense::DFA::from_bytes`]). + +There is also a [`onepass`] module that provides a [one-pass +DFA](onepass::DFA). The unique advantage of this DFA is that, for the class +of regexes it can be built with, it supports reporting the spans of matching +capturing groups. It is the only DFA in this crate capable of such a thing. + +# Example: basic regex searching + +This example shows how to compile a regex using the default configuration +and then use it to find matches in a byte string: + +``` +use regex_automata::{Match, dfa::regex::Regex}; + +let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; +let text = b"2018-12-24 2016-10-08"; +let matches: Vec = re.find_iter(text).collect(); +assert_eq!(matches, vec![ + Match::must(0, 0..10), + Match::must(0, 11..21), +]); +# Ok::<(), Box>(()) +``` + +# Example: searching with regex sets + +The DFAs in this module all fully support searching with multiple regexes +simultaneously. You can use this support with standard leftmost-first style +searching to find non-overlapping matches: + +``` +# if cfg!(miri) { return Ok(()); } // miri takes too long +use regex_automata::{Match, dfa::regex::Regex}; + +let re = Regex::new_many(&[r"\w+", r"\S+"])?; +let text = b"@foo bar"; +let matches: Vec = re.find_iter(text).collect(); +assert_eq!(matches, vec![ + Match::must(1, 0..4), + Match::must(0, 5..8), +]); +# Ok::<(), Box>(()) +``` + +# Example: use sparse DFAs + +By default, compiling a regex will use dense DFAs internally. This uses more +memory, but executes searches more quickly. If you can abide slower searches +(somewhere around 3-5x), then sparse DFAs might make more sense since they can +use significantly less space. + +Using sparse DFAs is as easy as using `Regex::new_sparse` instead of +`Regex::new`: + +``` +use regex_automata::{Match, dfa::regex::Regex}; + +let re = Regex::new_sparse(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap(); +let text = b"2018-12-24 2016-10-08"; +let matches: Vec = re.find_iter(text).collect(); +assert_eq!(matches, vec![ + Match::must(0, 0..10), + Match::must(0, 11..21), +]); +# Ok::<(), Box>(()) +``` + +If you already have dense DFAs for some reason, they can be converted to sparse +DFAs and used to build a new `Regex`. For example: + +``` +use regex_automata::{Match, dfa::regex::Regex}; + +let dense_re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap(); +let sparse_re = Regex::builder().build_from_dfas( + dense_re.forward().to_sparse()?, + dense_re.reverse().to_sparse()?, +); +let text = b"2018-12-24 2016-10-08"; +let matches: Vec = sparse_re.find_iter(text).collect(); +assert_eq!(matches, vec![ + Match::must(0, 0..10), + Match::must(0, 11..21), +]); +# Ok::<(), Box>(()) +``` + +# Example: deserialize a DFA + +This shows how to first serialize a DFA into raw bytes, and then deserialize +those raw bytes back into a DFA. While this particular example is a +bit contrived, this same technique can be used in your program to +deserialize a DFA at start up time or by memory mapping a file. + +``` +use regex_automata::{Match, dfa::{dense, regex::Regex}}; + +let re1 = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap(); +// serialize both the forward and reverse DFAs, see note below +let (fwd_bytes, fwd_pad) = re1.forward().to_bytes_native_endian(); +let (rev_bytes, rev_pad) = re1.reverse().to_bytes_native_endian(); +// now deserialize both---we need to specify the correct type! +let fwd: dense::DFA<&[u32]> = dense::DFA::from_bytes(&fwd_bytes[fwd_pad..])?.0; +let rev: dense::DFA<&[u32]> = dense::DFA::from_bytes(&rev_bytes[rev_pad..])?.0; +// finally, reconstruct our regex +let re2 = Regex::builder().build_from_dfas(fwd, rev); + +// we can use it like normal +let text = b"2018-12-24 2016-10-08"; +let matches: Vec = re2.find_iter(text).collect(); +assert_eq!(matches, vec![ + Match::must(0, 0..10), + Match::must(0, 11..21), +]); +# Ok::<(), Box>(()) +``` + +There are a few points worth noting here: + +* We need to extract the raw DFAs used by the regex and serialize those. You +can build the DFAs manually yourself using [`dense::Builder`], but using +the DFAs from a `Regex` guarantees that the DFAs are built correctly. (In +particular, a `Regex` constructs a reverse DFA for finding the starting +location of matches.) +* To convert the DFA to raw bytes, we use the `to_bytes_native_endian` method. +In practice, you'll want to use either [`dense::DFA::to_bytes_little_endian`] +or [`dense::DFA::to_bytes_big_endian`], depending on which platform you're +deserializing your DFA from. If you intend to deserialize on either platform, +then you'll need to serialize both and deserialize the right one depending on +your target's endianness. +* Safely deserializing a DFA requires verifying the raw bytes, particularly if +they are untrusted, since an invalid DFA could cause logical errors, panics +or even undefined behavior. This verification step requires visiting all of +the transitions in the DFA, which can be costly. If cheaper verification is +desired, then [`dense::DFA::from_bytes_unchecked`] is available that only does +verification that can be performed in constant time. However, one can only use +this routine if the caller can guarantee that the bytes provided encoded a +valid DFA. + +The same process can be achieved with sparse DFAs as well: + +``` +use regex_automata::{Match, dfa::{sparse, regex::Regex}}; + +let re1 = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap(); +// serialize both +let fwd_bytes = re1.forward().to_sparse()?.to_bytes_native_endian(); +let rev_bytes = re1.reverse().to_sparse()?.to_bytes_native_endian(); +// now deserialize both---we need to specify the correct type! +let fwd: sparse::DFA<&[u8]> = sparse::DFA::from_bytes(&fwd_bytes)?.0; +let rev: sparse::DFA<&[u8]> = sparse::DFA::from_bytes(&rev_bytes)?.0; +// finally, reconstruct our regex +let re2 = Regex::builder().build_from_dfas(fwd, rev); + +// we can use it like normal +let text = b"2018-12-24 2016-10-08"; +let matches: Vec = re2.find_iter(text).collect(); +assert_eq!(matches, vec![ + Match::must(0, 0..10), + Match::must(0, 11..21), +]); +# Ok::<(), Box>(()) +``` + +Note that unlike dense DFAs, sparse DFAs have no alignment requirements. +Conversely, dense DFAs must be aligned to the same alignment as a +[`StateID`](crate::util::primitives::StateID). + +# Support for `no_std` and `alloc`-only + +This crate comes with `alloc` and `std` features that are enabled by default. +When the `alloc` or `std` features are enabled, the API of this module will +include the facilities necessary for compiling, serializing, deserializing +and searching with DFAs. When only the `alloc` feature is enabled, then +implementations of the `std::error::Error` trait are dropped, but everything +else generally remains the same. When both the `alloc` and `std` features are +disabled, the API of this module will shrink such that it only includes the +facilities necessary for deserializing and searching with DFAs. + +The intended workflow for `no_std` environments is thus as follows: + +* Write a program with the `alloc` or `std` features that compiles and +serializes a regular expression. You may need to serialize both little and big +endian versions of each DFA. (So that's 4 DFAs in total for each regex.) +* In your `no_std` environment, follow the examples above for deserializing +your previously serialized DFAs into regexes. You can then search with them as +you would any regex. + +Deserialization can happen anywhere. For example, with bytes embedded into a +binary or with a file memory mapped at runtime. + +The `regex-cli` command (found in the same repository as this crate) can be +used to serialize DFAs to files and generate Rust code to read them. + +# Syntax + +This module supports the same syntax as the `regex` crate, since they share the +same parser. You can find an exhaustive list of supported syntax in the +[documentation for the `regex` crate](https://docs.rs/regex/1/regex/#syntax). + +There are two things that are not supported by the DFAs in this module: + +* Capturing groups. The DFAs (and [`Regex`](regex::Regex)es built on top +of them) can only find the offsets of an entire match, but cannot resolve +the offsets of each capturing group. This is because DFAs do not have the +expressive power necessary. +* Unicode word boundaries. These present particularly difficult challenges for +DFA construction and would result in an explosion in the number of states. +One can enable [`dense::Config::unicode_word_boundary`] though, which provides +heuristic support for Unicode word boundaries that only works on ASCII text. +Otherwise, one can use `(?-u:\b)` for an ASCII word boundary, which will work +on any input. + +There are no plans to lift either of these limitations. + +Note that these restrictions are identical to the restrictions on lazy DFAs. + +# Differences with general purpose regexes + +The main goal of the [`regex`](https://docs.rs/regex) crate is to serve as a +general purpose regular expression engine. It aims to automatically balance low +compile times, fast search times and low memory usage, while also providing +a convenient API for users. In contrast, this module provides a lower level +regular expression interface based exclusively on DFAs that is a bit less +convenient while providing more explicit control over memory usage and search +times. + +Here are some specific negative differences: + +* **Compilation can take an exponential amount of time and space** in the size +of the regex pattern. While most patterns do not exhibit worst case exponential +time, such patterns do exist. For example, `[01]*1[01]{N}` will build a DFA +with approximately `2^(N+2)` states. For this reason, untrusted patterns should +not be compiled with this module. (In the future, the API may expose an option +to return an error if the DFA gets too big.) +* This module does not support sub-match extraction via capturing groups, which +can be achieved with the regex crate's "captures" API. +* While the regex crate doesn't necessarily sport fast compilation times, +the regexes in this module are almost universally slow to compile, especially +when they contain large Unicode character classes. For example, on my system, +compiling `\w{50}` takes about 1 second and almost 15MB of memory! (Compiling +a sparse regex takes about the same time but only uses about 1.2MB of +memory.) Conversely, compiling the same regex without Unicode support, e.g., +`(?-u)\w{50}`, takes under 1 millisecond and about 15KB of memory. For this +reason, you should only use Unicode character classes if you absolutely need +them! (They are enabled by default though.) +* This module does not support Unicode word boundaries. ASCII word boundaries +may be used though by disabling Unicode or selectively doing so in the syntax, +e.g., `(?-u:\b)`. There is also an option to +[heuristically enable Unicode word boundaries](crate::dfa::dense::Config::unicode_word_boundary), +where the corresponding DFA will give up if any non-ASCII byte is seen. +* As a lower level API, this module does not do literal optimizations +automatically. Although it does provide hooks in its API to make use of the +[`Prefilter`](crate::util::prefilter::Prefilter) trait. Missing literal +optimizations means that searches may run much slower than what you're +accustomed to, although, it does provide more predictable and consistent +performance. +* There is no `&str` API like in the regex crate. In this module, all APIs +operate on `&[u8]`. By default, match indices are +guaranteed to fall on UTF-8 boundaries, unless either of +[`syntax::Config::utf8`](crate::util::syntax::Config::utf8) or +[`thompson::Config::utf8`](crate::nfa::thompson::Config::utf8) are disabled. + +With some of the downsides out of the way, here are some positive differences: + +* Both dense and sparse DFAs can be serialized to raw bytes, and then cheaply +deserialized. Deserialization can be done in constant time with the unchecked +APIs, since searching can be performed directly on the raw serialized bytes of +a DFA. +* This module was specifically designed so that the searching phase of a +DFA has minimal runtime requirements, and can therefore be used in `no_std` +environments. While `no_std` environments cannot compile regexes, they can +deserialize pre-compiled regexes. +* Since this module builds DFAs ahead of time, it will generally out-perform +the `regex` crate on equivalent tasks. The performance difference is likely +not large. However, because of a complex set of optimizations in the regex +crate (like literal optimizations), an accurate performance comparison may be +difficult to do. +* Sparse DFAs provide a way to build a DFA ahead of time that sacrifices search +performance a small amount, but uses much less storage space. Potentially even +less than what the regex crate uses. +* This module exposes DFAs directly, such as [`dense::DFA`] and +[`sparse::DFA`], which enables one to do less work in some cases. For example, +if you only need the end of a match and not the start of a match, then you can +use a DFA directly without building a `Regex`, which always requires a second +DFA to find the start of a match. +* This module provides more control over memory usage. Aside from choosing +between dense and sparse DFAs, one can also choose a smaller state identifier +representation to use less space. Also, one can enable DFA minimization +via [`dense::Config::minimize`], but it can increase compilation times +dramatically. +*/ + +#[cfg(feature = "dfa-search")] +pub use crate::dfa::{ + automaton::{Automaton, OverlappingState, StartError}, + start::StartKind, +}; + +/// This is an alias for a state ID of zero. It has special significance +/// because it always corresponds to the first state in a DFA, and the first +/// state in a DFA is always "dead." That is, the dead state always has all +/// of its transitions set to itself. Moreover, the dead state is used as a +/// sentinel for various things. e.g., In search, reaching a dead state means +/// that the search must stop. +const DEAD: crate::util::primitives::StateID = + crate::util::primitives::StateID::ZERO; + +#[cfg(feature = "dfa-search")] +pub mod dense; +#[cfg(feature = "dfa-onepass")] +pub mod onepass; +#[cfg(feature = "dfa-search")] +pub mod regex; +#[cfg(feature = "dfa-search")] +pub mod sparse; + +#[cfg(feature = "dfa-search")] +pub(crate) mod accel; +#[cfg(feature = "dfa-search")] +mod automaton; +#[cfg(feature = "dfa-build")] +mod determinize; +#[cfg(feature = "dfa-build")] +mod minimize; +#[cfg(any(feature = "dfa-build", feature = "dfa-onepass"))] +mod remapper; +#[cfg(feature = "dfa-search")] +mod search; +#[cfg(feature = "dfa-search")] +mod special; +#[cfg(feature = "dfa-search")] +mod start; diff --git a/vendor/regex-automata/src/dfa/onepass.rs b/vendor/regex-automata/src/dfa/onepass.rs new file mode 100644 index 00000000000000..85f820ef547395 --- /dev/null +++ b/vendor/regex-automata/src/dfa/onepass.rs @@ -0,0 +1,3192 @@ +/*! +A DFA that can return spans for matching capturing groups. + +This module is the home of a [one-pass DFA](DFA). + +This module also contains a [`Builder`] and a [`Config`] for building and +configuring a one-pass DFA. +*/ + +// A note on naming and credit: +// +// As far as I know, Russ Cox came up with the practical vision and +// implementation of a "one-pass regex engine." He mentions and describes it +// briefly in the third article of his regexp article series: +// https://swtch.com/~rsc/regexp/regexp3.html +// +// Cox's implementation is in RE2, and the implementation below is most +// heavily inspired by RE2's. The key thing they have in common is that +// their transitions are defined over an alphabet of bytes. In contrast, +// Go's regex engine also has a one-pass engine, but its transitions are +// more firmly rooted on Unicode codepoints. The ideas are the same, but the +// implementations are different. +// +// RE2 tends to call this a "one-pass NFA." Here, we call it a "one-pass DFA." +// They're both true in their own ways: +// +// * The "one-pass" criterion is generally a property of the NFA itself. In +// particular, it is said that an NFA is one-pass if, after each byte of input +// during a search, there is at most one "VM thread" remaining to take for the +// next byte of input. That is, there is never any ambiguity as to the path to +// take through the NFA during a search. +// +// * On the other hand, once a one-pass NFA has its representation converted +// to something where a constant number of instructions is used for each byte +// of input, the implementation looks a lot more like a DFA. It's technically +// more powerful than a DFA since it has side effects (storing offsets inside +// of slots activated by a transition), but it is far closer to a DFA than an +// NFA simulation. +// +// Thus, in this crate, we call it a one-pass DFA. + +use alloc::{vec, vec::Vec}; + +use crate::{ + dfa::{remapper::Remapper, DEAD}, + nfa::thompson::{self, NFA}, + util::{ + alphabet::ByteClasses, + captures::Captures, + escape::DebugByte, + int::{Usize, U32, U64, U8}, + look::{Look, LookSet, UnicodeWordBoundaryError}, + primitives::{NonMaxUsize, PatternID, StateID}, + search::{Anchored, Input, Match, MatchError, MatchKind, Span}, + sparse_set::SparseSet, + }, +}; + +/// The configuration used for building a [one-pass DFA](DFA). +/// +/// A one-pass DFA configuration is a simple data object that is typically used +/// with [`Builder::configure`]. It can be cheaply cloned. +/// +/// A default configuration can be created either with `Config::new`, or +/// perhaps more conveniently, with [`DFA::config`]. +#[derive(Clone, Debug, Default)] +pub struct Config { + match_kind: Option, + starts_for_each_pattern: Option, + byte_classes: Option, + size_limit: Option>, +} + +impl Config { + /// Return a new default one-pass DFA configuration. + pub fn new() -> Config { + Config::default() + } + + /// Set the desired match semantics. + /// + /// The default is [`MatchKind::LeftmostFirst`], which corresponds to the + /// match semantics of Perl-like regex engines. That is, when multiple + /// patterns would match at the same leftmost position, the pattern that + /// appears first in the concrete syntax is chosen. + /// + /// Currently, the only other kind of match semantics supported is + /// [`MatchKind::All`]. This corresponds to "classical DFA" construction + /// where all possible matches are visited. + /// + /// When it comes to the one-pass DFA, it is rarer for preference order and + /// "longest match" to actually disagree. Since if they did disagree, then + /// the regex typically isn't one-pass. For example, searching `Samwise` + /// for `Sam|Samwise` will report `Sam` for leftmost-first matching and + /// `Samwise` for "longest match" or "all" matching. However, this regex is + /// not one-pass if taken literally. The equivalent regex, `Sam(?:|wise)` + /// is one-pass and `Sam|Samwise` may be optimized to it. + /// + /// The other main difference is that "all" match semantics don't support + /// non-greedy matches. "All" match semantics always try to match as much + /// as possible. + pub fn match_kind(mut self, kind: MatchKind) -> Config { + self.match_kind = Some(kind); + self + } + + /// Whether to compile a separate start state for each pattern in the + /// one-pass DFA. + /// + /// When enabled, a separate **anchored** start state is added for each + /// pattern in the DFA. When this start state is used, then the DFA will + /// only search for matches for the pattern specified, even if there are + /// other patterns in the DFA. + /// + /// The main downside of this option is that it can potentially increase + /// the size of the DFA and/or increase the time it takes to build the DFA. + /// + /// You might want to enable this option when you want to both search for + /// anchored matches of any pattern or to search for anchored matches of + /// one particular pattern while using the same DFA. (Otherwise, you would + /// need to compile a new DFA for each pattern.) + /// + /// By default this is disabled. + /// + /// # Example + /// + /// This example shows how to build a multi-regex and then search for + /// matches for a any of the patterns or matches for a specific pattern. + /// + /// ``` + /// use regex_automata::{ + /// dfa::onepass::DFA, Anchored, Input, Match, PatternID, + /// }; + /// + /// let re = DFA::builder() + /// .configure(DFA::config().starts_for_each_pattern(true)) + /// .build_many(&["[a-z]+", "[0-9]+"])?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let haystack = "123abc"; + /// let input = Input::new(haystack).anchored(Anchored::Yes); + /// + /// // A normal multi-pattern search will show pattern 1 matches. + /// re.try_search(&mut cache, &input, &mut caps)?; + /// assert_eq!(Some(Match::must(1, 0..3)), caps.get_match()); + /// + /// // If we only want to report pattern 0 matches, then we'll get no + /// // match here. + /// let input = input.anchored(Anchored::Pattern(PatternID::must(0))); + /// re.try_search(&mut cache, &input, &mut caps)?; + /// assert_eq!(None, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn starts_for_each_pattern(mut self, yes: bool) -> Config { + self.starts_for_each_pattern = Some(yes); + self + } + + /// Whether to attempt to shrink the size of the DFA's alphabet or not. + /// + /// This option is enabled by default and should never be disabled unless + /// one is debugging a one-pass DFA. + /// + /// When enabled, the DFA will use a map from all possible bytes to their + /// corresponding equivalence class. Each equivalence class represents a + /// set of bytes that does not discriminate between a match and a non-match + /// in the DFA. For example, the pattern `[ab]+` has at least two + /// equivalence classes: a set containing `a` and `b` and a set containing + /// every byte except for `a` and `b`. `a` and `b` are in the same + /// equivalence class because they never discriminate between a match and a + /// non-match. + /// + /// The advantage of this map is that the size of the transition table + /// can be reduced drastically from (approximately) `#states * 256 * + /// sizeof(StateID)` to `#states * k * sizeof(StateID)` where `k` is the + /// number of equivalence classes (rounded up to the nearest power of 2). + /// As a result, total space usage can decrease substantially. Moreover, + /// since a smaller alphabet is used, DFA compilation becomes faster as + /// well. + /// + /// **WARNING:** This is only useful for debugging DFAs. Disabling this + /// does not yield any speed advantages. Namely, even when this is + /// disabled, a byte class map is still used while searching. The only + /// difference is that every byte will be forced into its own distinct + /// equivalence class. This is useful for debugging the actual generated + /// transitions because it lets one see the transitions defined on actual + /// bytes instead of the equivalence classes. + pub fn byte_classes(mut self, yes: bool) -> Config { + self.byte_classes = Some(yes); + self + } + + /// Set a size limit on the total heap used by a one-pass DFA. + /// + /// This size limit is expressed in bytes and is applied during + /// construction of a one-pass DFA. If the DFA's heap usage exceeds + /// this configured limit, then construction is stopped and an error is + /// returned. + /// + /// The default is no limit. + /// + /// # Example + /// + /// This example shows a one-pass DFA that fails to build because of + /// a configured size limit. This particular example also serves as a + /// cautionary tale demonstrating just how big DFAs with large Unicode + /// character classes can get. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{dfa::onepass::DFA, Match}; + /// + /// // 6MB isn't enough! + /// DFA::builder() + /// .configure(DFA::config().size_limit(Some(6_000_000))) + /// .build(r"\w{20}") + /// .unwrap_err(); + /// + /// // ... but 7MB probably is! + /// // (Note that DFA sizes aren't necessarily stable between releases.) + /// let re = DFA::builder() + /// .configure(DFA::config().size_limit(Some(7_000_000))) + /// .build(r"\w{20}")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let haystack = "A".repeat(20); + /// re.captures(&mut cache, &haystack, &mut caps); + /// assert_eq!(Some(Match::must(0, 0..20)), caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// While one needs a little more than 3MB to represent `\w{20}`, it + /// turns out that you only need a little more than 4KB to represent + /// `(?-u:\w{20})`. So only use Unicode if you need it! + pub fn size_limit(mut self, limit: Option) -> Config { + self.size_limit = Some(limit); + self + } + + /// Returns the match semantics set in this configuration. + pub fn get_match_kind(&self) -> MatchKind { + self.match_kind.unwrap_or(MatchKind::LeftmostFirst) + } + + /// Returns whether this configuration has enabled anchored starting states + /// for every pattern in the DFA. + pub fn get_starts_for_each_pattern(&self) -> bool { + self.starts_for_each_pattern.unwrap_or(false) + } + + /// Returns whether this configuration has enabled byte classes or not. + /// This is typically a debugging oriented option, as disabling it confers + /// no speed benefit. + pub fn get_byte_classes(&self) -> bool { + self.byte_classes.unwrap_or(true) + } + + /// Returns the DFA size limit of this configuration if one was set. + /// The size limit is total number of bytes on the heap that a DFA is + /// permitted to use. If the DFA exceeds this limit during construction, + /// then construction is stopped and an error is returned. + pub fn get_size_limit(&self) -> Option { + self.size_limit.unwrap_or(None) + } + + /// Overwrite the default configuration such that the options in `o` are + /// always used. If an option in `o` is not set, then the corresponding + /// option in `self` is used. If it's not set in `self` either, then it + /// remains not set. + pub(crate) fn overwrite(&self, o: Config) -> Config { + Config { + match_kind: o.match_kind.or(self.match_kind), + starts_for_each_pattern: o + .starts_for_each_pattern + .or(self.starts_for_each_pattern), + byte_classes: o.byte_classes.or(self.byte_classes), + size_limit: o.size_limit.or(self.size_limit), + } + } +} + +/// A builder for a [one-pass DFA](DFA). +/// +/// This builder permits configuring options for the syntax of a pattern, the +/// NFA construction and the DFA construction. This builder is different from a +/// general purpose regex builder in that it permits fine grain configuration +/// of the construction process. The trade off for this is complexity, and +/// the possibility of setting a configuration that might not make sense. For +/// example, there are two different UTF-8 modes: +/// +/// * [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) controls +/// whether the pattern itself can contain sub-expressions that match invalid +/// UTF-8. +/// * [`thompson::Config::utf8`] controls whether empty matches that split a +/// Unicode codepoint are reported or not. +/// +/// Generally speaking, callers will want to either enable all of these or +/// disable all of these. +/// +/// # Example +/// +/// This example shows how to disable UTF-8 mode in the syntax and the NFA. +/// This is generally what you want for matching on arbitrary bytes. +/// +/// ``` +/// # if cfg!(miri) { return Ok(()); } // miri takes too long +/// use regex_automata::{ +/// dfa::onepass::DFA, +/// nfa::thompson, +/// util::syntax, +/// Match, +/// }; +/// +/// let re = DFA::builder() +/// .syntax(syntax::Config::new().utf8(false)) +/// .thompson(thompson::Config::new().utf8(false)) +/// .build(r"foo(?-u:[^b])ar.*")?; +/// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); +/// +/// let haystack = b"foo\xFFarzz\xE2\x98\xFF\n"; +/// re.captures(&mut cache, haystack, &mut caps); +/// // Notice that `(?-u:[^b])` matches invalid UTF-8, +/// // but the subsequent `.*` does not! Disabling UTF-8 +/// // on the syntax permits this. +/// // +/// // N.B. This example does not show the impact of +/// // disabling UTF-8 mode on a one-pass DFA Config, +/// // since that only impacts regexes that can +/// // produce matches of length 0. +/// assert_eq!(Some(Match::must(0, 0..8)), caps.get_match()); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct Builder { + config: Config, + #[cfg(feature = "syntax")] + thompson: thompson::Compiler, +} + +impl Builder { + /// Create a new one-pass DFA builder with the default configuration. + pub fn new() -> Builder { + Builder { + config: Config::default(), + #[cfg(feature = "syntax")] + thompson: thompson::Compiler::new(), + } + } + + /// Build a one-pass DFA from the given pattern. + /// + /// If there was a problem parsing or compiling the pattern, then an error + /// is returned. + #[cfg(feature = "syntax")] + pub fn build(&self, pattern: &str) -> Result { + self.build_many(&[pattern]) + } + + /// Build a one-pass DFA from the given patterns. + /// + /// When matches are returned, the pattern ID corresponds to the index of + /// the pattern in the slice given. + #[cfg(feature = "syntax")] + pub fn build_many>( + &self, + patterns: &[P], + ) -> Result { + let nfa = + self.thompson.build_many(patterns).map_err(BuildError::nfa)?; + self.build_from_nfa(nfa) + } + + /// Build a DFA from the given NFA. + /// + /// # Example + /// + /// This example shows how to build a DFA if you already have an NFA in + /// hand. + /// + /// ``` + /// use regex_automata::{dfa::onepass::DFA, nfa::thompson::NFA, Match}; + /// + /// // This shows how to set non-default options for building an NFA. + /// let nfa = NFA::compiler() + /// .configure(NFA::config().shrink(true)) + /// .build(r"[a-z0-9]+")?; + /// let re = DFA::builder().build_from_nfa(nfa)?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// re.captures(&mut cache, "foo123bar", &mut caps); + /// assert_eq!(Some(Match::must(0, 0..9)), caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn build_from_nfa(&self, nfa: NFA) -> Result { + // Why take ownership if we're just going to pass a reference to the + // NFA to our internal builder? Well, the first thing to note is that + // an NFA uses reference counting internally, so either choice is going + // to be cheap. So there isn't much cost either way. + // + // The real reason is that a one-pass DFA, semantically, shares + // ownership of an NFA. This is unlike other DFAs that don't share + // ownership of an NFA at all, primarily because they want to be + // self-contained in order to support cheap (de)serialization. + // + // But then why pass a '&nfa' below if we want to share ownership? + // Well, it turns out that using a '&NFA' in our internal builder + // separates its lifetime from the DFA we're building, and this turns + // out to make code a bit more composable. e.g., We can iterate over + // things inside the NFA while borrowing the builder as mutable because + // we know the NFA cannot be mutated. So TL;DR --- this weirdness is + // "because borrow checker." + InternalBuilder::new(self.config.clone(), &nfa).build() + } + + /// Apply the given one-pass DFA configuration options to this builder. + pub fn configure(&mut self, config: Config) -> &mut Builder { + self.config = self.config.overwrite(config); + self + } + + /// Set the syntax configuration for this builder using + /// [`syntax::Config`](crate::util::syntax::Config). + /// + /// This permits setting things like case insensitivity, Unicode and multi + /// line mode. + /// + /// These settings only apply when constructing a one-pass DFA directly + /// from a pattern. + #[cfg(feature = "syntax")] + pub fn syntax( + &mut self, + config: crate::util::syntax::Config, + ) -> &mut Builder { + self.thompson.syntax(config); + self + } + + /// Set the Thompson NFA configuration for this builder using + /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). + /// + /// This permits setting things like whether additional time should be + /// spent shrinking the size of the NFA. + /// + /// These settings only apply when constructing a DFA directly from a + /// pattern. + #[cfg(feature = "syntax")] + pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { + self.thompson.configure(config); + self + } +} + +/// An internal builder for encapsulating the state necessary to build a +/// one-pass DFA. Typical use is just `InternalBuilder::new(..).build()`. +/// +/// There is no separate pass for determining whether the NFA is one-pass or +/// not. We just try to build the DFA. If during construction we discover that +/// it is not one-pass, we bail out. This is likely to lead to some undesirable +/// expense in some cases, so it might make sense to try an identify common +/// patterns in the NFA that make it definitively not one-pass. That way, we +/// can avoid ever trying to build a one-pass DFA in the first place. For +/// example, '\w*\s' is not one-pass, and since '\w' is Unicode-aware by +/// default, it's probably not a trivial cost to try and build a one-pass DFA +/// for it and then fail. +/// +/// Note that some (immutable) fields are duplicated here. For example, the +/// 'nfa' and 'classes' fields are both in the 'DFA'. They are the same thing, +/// but we duplicate them because it makes composition easier below. Otherwise, +/// since the borrow checker can't see through method calls, the mutable borrow +/// we use to mutate the DFA winds up preventing borrowing from any other part +/// of the DFA, even though we aren't mutating those parts. We only do this +/// because the duplication is cheap. +#[derive(Debug)] +struct InternalBuilder<'a> { + /// The DFA we're building. + dfa: DFA, + /// An unordered collection of NFA state IDs that we haven't yet tried to + /// build into a DFA state yet. + /// + /// This collection does not ultimately wind up including every NFA state + /// ID. Instead, each ID represents a "start" state for a sub-graph of the + /// NFA. The set of NFA states we then use to build a DFA state consists + /// of that "start" state and all states reachable from it via epsilon + /// transitions. + uncompiled_nfa_ids: Vec, + /// A map from NFA state ID to DFA state ID. This is useful for easily + /// determining whether an NFA state has been used as a "starting" point + /// to build a DFA state yet. If it hasn't, then it is mapped to DEAD, + /// and since DEAD is specially added and never corresponds to any NFA + /// state, it follows that a mapping to DEAD implies the NFA state has + /// no corresponding DFA state yet. + nfa_to_dfa_id: Vec, + /// A stack used to traverse the NFA states that make up a single DFA + /// state. Traversal occurs until the stack is empty, and we only push to + /// the stack when the state ID isn't in 'seen'. Actually, even more than + /// that, if we try to push something on to this stack that is already in + /// 'seen', then we bail out on construction completely, since it implies + /// that the NFA is not one-pass. + stack: Vec<(StateID, Epsilons)>, + /// The set of NFA states that we've visited via 'stack'. + seen: SparseSet, + /// Whether a match NFA state has been observed while constructing a + /// one-pass DFA state. Once a match state is seen, assuming we are using + /// leftmost-first match semantics, then we don't add any more transitions + /// to the DFA state we're building. + matched: bool, + /// The config passed to the builder. + /// + /// This is duplicated in dfa.config. + config: Config, + /// The NFA we're building a one-pass DFA from. + /// + /// This is duplicated in dfa.nfa. + nfa: &'a NFA, + /// The equivalence classes that make up the alphabet for this DFA> + /// + /// This is duplicated in dfa.classes. + classes: ByteClasses, +} + +impl<'a> InternalBuilder<'a> { + /// Create a new builder with an initial empty DFA. + fn new(config: Config, nfa: &'a NFA) -> InternalBuilder<'a> { + let classes = if !config.get_byte_classes() { + // A one-pass DFA will always use the equivalence class map, but + // enabling this option is useful for debugging. Namely, this will + // cause all transitions to be defined over their actual bytes + // instead of an opaque equivalence class identifier. The former is + // much easier to grok as a human. + ByteClasses::singletons() + } else { + nfa.byte_classes().clone() + }; + // Normally a DFA alphabet includes the EOI symbol, but we don't need + // that in the one-pass DFA since we handle look-around explicitly + // without encoding it into the DFA. Thus, we don't need to delay + // matches by 1 byte. However, we reuse the space that *would* be used + // by the EOI transition by putting match information there (like which + // pattern matches and which look-around assertions need to hold). So + // this means our real alphabet length is 1 fewer than what the byte + // classes report, since we don't use EOI. + let alphabet_len = classes.alphabet_len().checked_sub(1).unwrap(); + let stride2 = classes.stride2(); + let dfa = DFA { + config: config.clone(), + nfa: nfa.clone(), + table: vec![], + starts: vec![], + // Since one-pass DFAs have a smaller state ID max than + // StateID::MAX, it follows that StateID::MAX is a valid initial + // value for min_match_id since no state ID can ever be greater + // than it. In the case of a one-pass DFA with no match states, the + // min_match_id will keep this sentinel value. + min_match_id: StateID::MAX, + classes: classes.clone(), + alphabet_len, + stride2, + pateps_offset: alphabet_len, + // OK because PatternID::MAX*2 is guaranteed not to overflow. + explicit_slot_start: nfa.pattern_len().checked_mul(2).unwrap(), + }; + InternalBuilder { + dfa, + uncompiled_nfa_ids: vec![], + nfa_to_dfa_id: vec![DEAD; nfa.states().len()], + stack: vec![], + seen: SparseSet::new(nfa.states().len()), + matched: false, + config, + nfa, + classes, + } + } + + /// Build the DFA from the NFA given to this builder. If the NFA is not + /// one-pass, then return an error. An error may also be returned if a + /// particular limit is exceeded. (Some limits, like the total heap memory + /// used, are configurable. Others, like the total patterns or slots, are + /// hard-coded based on representational limitations.) + fn build(mut self) -> Result { + self.nfa.look_set_any().available().map_err(BuildError::word)?; + for look in self.nfa.look_set_any().iter() { + // This is a future incompatibility check where if we add any + // more look-around assertions, then the one-pass DFA either + // needs to reject them (what we do here) or it needs to have its + // Transition representation modified to be capable of storing the + // new assertions. + if look.as_repr() > Look::WordUnicodeNegate.as_repr() { + return Err(BuildError::unsupported_look(look)); + } + } + if self.nfa.pattern_len().as_u64() > PatternEpsilons::PATTERN_ID_LIMIT + { + return Err(BuildError::too_many_patterns( + PatternEpsilons::PATTERN_ID_LIMIT, + )); + } + if self.nfa.group_info().explicit_slot_len() > Slots::LIMIT { + return Err(BuildError::not_one_pass( + "too many explicit capturing groups (max is 16)", + )); + } + assert_eq!(DEAD, self.add_empty_state()?); + + // This is where the explicit slots start. We care about this because + // we only need to track explicit slots. The implicit slots---two for + // each pattern---are tracked as part of the search routine itself. + let explicit_slot_start = self.nfa.pattern_len() * 2; + self.add_start_state(None, self.nfa.start_anchored())?; + if self.config.get_starts_for_each_pattern() { + for pid in self.nfa.patterns() { + self.add_start_state( + Some(pid), + self.nfa.start_pattern(pid).unwrap(), + )?; + } + } + // NOTE: One wonders what the effects of treating 'uncompiled_nfa_ids' + // as a stack are. It is really an unordered *set* of NFA state IDs. + // If it, for example, in practice led to discovering whether a regex + // was or wasn't one-pass later than if we processed NFA state IDs in + // ascending order, then that would make this routine more costly in + // the somewhat common case of a regex that isn't one-pass. + while let Some(nfa_id) = self.uncompiled_nfa_ids.pop() { + let dfa_id = self.nfa_to_dfa_id[nfa_id]; + // Once we see a match, we keep going, but don't add any new + // transitions. Normally we'd just stop, but we have to keep + // going in order to verify that our regex is actually one-pass. + self.matched = false; + // The NFA states we've already explored for this DFA state. + self.seen.clear(); + // The NFA states to explore via epsilon transitions. If we ever + // try to push an NFA state that we've already seen, then the NFA + // is not one-pass because it implies there are multiple epsilon + // transition paths that lead to the same NFA state. In other + // words, there is ambiguity. + self.stack_push(nfa_id, Epsilons::empty())?; + while let Some((id, epsilons)) = self.stack.pop() { + match *self.nfa.state(id) { + thompson::State::ByteRange { ref trans } => { + self.compile_transition(dfa_id, trans, epsilons)?; + } + thompson::State::Sparse(ref sparse) => { + for trans in sparse.transitions.iter() { + self.compile_transition(dfa_id, trans, epsilons)?; + } + } + thompson::State::Dense(ref dense) => { + for trans in dense.iter() { + self.compile_transition(dfa_id, &trans, epsilons)?; + } + } + thompson::State::Look { look, next } => { + let looks = epsilons.looks().insert(look); + self.stack_push(next, epsilons.set_looks(looks))?; + } + thompson::State::Union { ref alternates } => { + for &sid in alternates.iter().rev() { + self.stack_push(sid, epsilons)?; + } + } + thompson::State::BinaryUnion { alt1, alt2 } => { + self.stack_push(alt2, epsilons)?; + self.stack_push(alt1, epsilons)?; + } + thompson::State::Capture { next, slot, .. } => { + let slot = slot.as_usize(); + let epsilons = if slot < explicit_slot_start { + // If this is an implicit slot, we don't care + // about it, since we handle implicit slots in + // the search routine. We can get away with that + // because there are 2 implicit slots for every + // pattern. + epsilons + } else { + // Offset our explicit slots so that they start + // at index 0. + let offset = slot - explicit_slot_start; + epsilons.set_slots(epsilons.slots().insert(offset)) + }; + self.stack_push(next, epsilons)?; + } + thompson::State::Fail => { + continue; + } + thompson::State::Match { pattern_id } => { + // If we found two different paths to a match state + // for the same DFA state, then we have ambiguity. + // Thus, it's not one-pass. + if self.matched { + return Err(BuildError::not_one_pass( + "multiple epsilon transitions to match state", + )); + } + self.matched = true; + // Shove the matching pattern ID and the 'epsilons' + // into the current DFA state's pattern epsilons. The + // 'epsilons' includes the slots we need to capture + // before reporting the match and also the conditional + // epsilon transitions we need to check before we can + // report a match. + self.dfa.set_pattern_epsilons( + dfa_id, + PatternEpsilons::empty() + .set_pattern_id(pattern_id) + .set_epsilons(epsilons), + ); + // N.B. It is tempting to just bail out here when + // compiling a leftmost-first DFA, since we will never + // compile any more transitions in that case. But we + // actually need to keep going in order to verify that + // we actually have a one-pass regex. e.g., We might + // see more Match states (e.g., for other patterns) + // that imply that we don't have a one-pass regex. + // So instead, we mark that we've found a match and + // continue on. When we go to compile a new DFA state, + // we just skip that part. But otherwise check that the + // one-pass property is upheld. + } + } + } + } + self.shuffle_states(); + self.dfa.starts.shrink_to_fit(); + self.dfa.table.shrink_to_fit(); + Ok(self.dfa) + } + + /// Shuffle all match states to the end of the transition table and set + /// 'min_match_id' to the ID of the first such match state. + /// + /// The point of this is to make it extremely cheap to determine whether + /// a state is a match state or not. We need to check on this on every + /// transition during a search, so it being cheap is important. This + /// permits us to check it by simply comparing two state identifiers, as + /// opposed to looking for the pattern ID in the state's `PatternEpsilons`. + /// (Which requires a memory load and some light arithmetic.) + fn shuffle_states(&mut self) { + let mut remapper = Remapper::new(&self.dfa); + let mut next_dest = self.dfa.last_state_id(); + for i in (0..self.dfa.state_len()).rev() { + let id = StateID::must(i); + let is_match = + self.dfa.pattern_epsilons(id).pattern_id().is_some(); + if !is_match { + continue; + } + remapper.swap(&mut self.dfa, next_dest, id); + self.dfa.min_match_id = next_dest; + next_dest = self.dfa.prev_state_id(next_dest).expect( + "match states should be a proper subset of all states", + ); + } + remapper.remap(&mut self.dfa); + } + + /// Compile the given NFA transition into the DFA state given. + /// + /// 'Epsilons' corresponds to any conditional epsilon transitions that need + /// to be satisfied to follow this transition, and any slots that need to + /// be saved if the transition is followed. + /// + /// If this transition indicates that the NFA is not one-pass, then + /// this returns an error. (This occurs, for example, if the DFA state + /// already has a transition defined for the same input symbols as the + /// given transition, *and* the result of the old and new transitions is + /// different.) + fn compile_transition( + &mut self, + dfa_id: StateID, + trans: &thompson::Transition, + epsilons: Epsilons, + ) -> Result<(), BuildError> { + let next_dfa_id = self.add_dfa_state_for_nfa_state(trans.next)?; + for byte in self + .classes + .representatives(trans.start..=trans.end) + .filter_map(|r| r.as_u8()) + { + let oldtrans = self.dfa.transition(dfa_id, byte); + let newtrans = + Transition::new(self.matched, next_dfa_id, epsilons); + // If the old transition points to the DEAD state, then we know + // 'byte' has not been mapped to any transition for this DFA state + // yet. So set it unconditionally. Otherwise, we require that the + // old and new transitions are equivalent. Otherwise, there is + // ambiguity and thus the regex is not one-pass. + if oldtrans.state_id() == DEAD { + self.dfa.set_transition(dfa_id, byte, newtrans); + } else if oldtrans != newtrans { + return Err(BuildError::not_one_pass( + "conflicting transition", + )); + } + } + Ok(()) + } + + /// Add a start state to the DFA corresponding to the given NFA starting + /// state ID. + /// + /// If adding a state would blow any limits (configured or hard-coded), + /// then an error is returned. + /// + /// If the starting state is an anchored state for a particular pattern, + /// then callers must provide the pattern ID for that starting state. + /// Callers must also ensure that the first starting state added is the + /// start state for all patterns, and then each anchored starting state for + /// each pattern (if necessary) added in order. Otherwise, this panics. + fn add_start_state( + &mut self, + pid: Option, + nfa_id: StateID, + ) -> Result { + match pid { + // With no pid, this should be the start state for all patterns + // and thus be the first one. + None => assert!(self.dfa.starts.is_empty()), + // With a pid, we want it to be at self.dfa.starts[pid+1]. + Some(pid) => assert!(self.dfa.starts.len() == pid.one_more()), + } + let dfa_id = self.add_dfa_state_for_nfa_state(nfa_id)?; + self.dfa.starts.push(dfa_id); + Ok(dfa_id) + } + + /// Add a new DFA state corresponding to the given NFA state. If adding a + /// state would blow any limits (configured or hard-coded), then an error + /// is returned. If a DFA state already exists for the given NFA state, + /// then that DFA state's ID is returned and no new states are added. + /// + /// It is not expected that this routine is called for every NFA state. + /// Instead, an NFA state ID will usually correspond to the "start" state + /// for a sub-graph of the NFA, where all states in the sub-graph are + /// reachable via epsilon transitions (conditional or unconditional). That + /// sub-graph of NFA states is ultimately what produces a single DFA state. + fn add_dfa_state_for_nfa_state( + &mut self, + nfa_id: StateID, + ) -> Result { + // If we've already built a DFA state for the given NFA state, then + // just return that. We definitely do not want to have more than one + // DFA state in existence for the same NFA state, since all but one of + // them will likely become unreachable. And at least some of them are + // likely to wind up being incomplete. + let existing_dfa_id = self.nfa_to_dfa_id[nfa_id]; + if existing_dfa_id != DEAD { + return Ok(existing_dfa_id); + } + // If we don't have any DFA state yet, add it and then add the given + // NFA state to the list of states to explore. + let dfa_id = self.add_empty_state()?; + self.nfa_to_dfa_id[nfa_id] = dfa_id; + self.uncompiled_nfa_ids.push(nfa_id); + Ok(dfa_id) + } + + /// Unconditionally add a new empty DFA state. If adding it would exceed + /// any limits (configured or hard-coded), then an error is returned. The + /// ID of the new state is returned on success. + /// + /// The added state is *not* a match state. + fn add_empty_state(&mut self) -> Result { + let state_limit = Transition::STATE_ID_LIMIT; + // Note that unlike dense and lazy DFAs, we specifically do NOT + // premultiply our state IDs here. The reason is that we want to pack + // our state IDs into 64-bit transitions with other info, so the fewer + // the bits we use for state IDs the better. If we premultiply, then + // our state ID space shrinks. We justify this by the assumption that + // a one-pass DFA is just already doing a fair bit more work than a + // normal DFA anyway, so an extra multiplication to compute a state + // transition doesn't seem like a huge deal. + let next_id = self.dfa.table.len() >> self.dfa.stride2(); + let id = StateID::new(next_id) + .map_err(|_| BuildError::too_many_states(state_limit))?; + if id.as_u64() > Transition::STATE_ID_LIMIT { + return Err(BuildError::too_many_states(state_limit)); + } + self.dfa + .table + .extend(core::iter::repeat(Transition(0)).take(self.dfa.stride())); + // The default empty value for 'PatternEpsilons' is sadly not all + // zeroes. Instead, a special sentinel is used to indicate that there + // is no pattern. So we need to explicitly set the pattern epsilons to + // the correct "empty" PatternEpsilons. + self.dfa.set_pattern_epsilons(id, PatternEpsilons::empty()); + if let Some(size_limit) = self.config.get_size_limit() { + if self.dfa.memory_usage() > size_limit { + return Err(BuildError::exceeded_size_limit(size_limit)); + } + } + Ok(id) + } + + /// Push the given NFA state ID and its corresponding epsilons (slots and + /// conditional epsilon transitions) on to a stack for use in a depth first + /// traversal of a sub-graph of the NFA. + /// + /// If the given NFA state ID has already been pushed on to the stack, then + /// it indicates the regex is not one-pass and this correspondingly returns + /// an error. + fn stack_push( + &mut self, + nfa_id: StateID, + epsilons: Epsilons, + ) -> Result<(), BuildError> { + // If we already have seen a match and we are compiling a leftmost + // first DFA, then we shouldn't add any more states to look at. This is + // effectively how preference order and non-greediness is implemented. + // if !self.config.get_match_kind().continue_past_first_match() + // && self.matched + // { + // return Ok(()); + // } + if !self.seen.insert(nfa_id) { + return Err(BuildError::not_one_pass( + "multiple epsilon transitions to same state", + )); + } + self.stack.push((nfa_id, epsilons)); + Ok(()) + } +} + +/// A one-pass DFA for executing a subset of anchored regex searches while +/// resolving capturing groups. +/// +/// A one-pass DFA can be built from an NFA that is one-pass. An NFA is +/// one-pass when there is never any ambiguity about how to continue a search. +/// For example, `a*a` is not one-pass because during a search, it's not +/// possible to know whether to continue matching the `a*` or to move on to +/// the single `a`. However, `a*b` is one-pass, because for every byte in the +/// input, it's always clear when to move on from `a*` to `b`. +/// +/// # Only anchored searches are supported +/// +/// In this crate, especially for DFAs, unanchored searches are implemented by +/// treating the pattern as if it had a `(?s-u:.)*?` prefix. While the prefix +/// is one-pass on its own, adding anything after it, e.g., `(?s-u:.)*?a` will +/// make the overall pattern not one-pass. Why? Because the `(?s-u:.)` matches +/// any byte, and there is therefore ambiguity as to when the prefix should +/// stop matching and something else should start matching. +/// +/// Therefore, one-pass DFAs do not support unanchored searches. In addition +/// to many regexes simply not being one-pass, it implies that one-pass DFAs +/// have limited utility. With that said, when a one-pass DFA can be used, it +/// can potentially provide a dramatic speed up over alternatives like the +/// [`BoundedBacktracker`](crate::nfa::thompson::backtrack::BoundedBacktracker) +/// and the [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM). In particular, +/// a one-pass DFA is the only DFA capable of reporting the spans of matching +/// capturing groups. +/// +/// To clarify, when we say that unanchored searches are not supported, what +/// that actually means is: +/// +/// * The high level routines, [`DFA::is_match`] and [`DFA::captures`], always +/// do anchored searches. +/// * Since iterators are most useful in the context of unanchored searches, +/// there is no `DFA::captures_iter` method. +/// * For lower level routines like [`DFA::try_search`], an error will be +/// returned if the given [`Input`] is configured to do an unanchored search or +/// search for an invalid pattern ID. (Note that an [`Input`] is configured to +/// do an unanchored search by default, so just giving a `Input::new` is +/// guaranteed to return an error.) +/// +/// # Other limitations +/// +/// In addition to the [configurable heap limit](Config::size_limit) and +/// the requirement that a regex pattern be one-pass, there are some other +/// limitations: +/// +/// * There is an internal limit on the total number of explicit capturing +/// groups that appear across all patterns. It is somewhat small and there is +/// no way to configure it. If your pattern(s) exceed this limit, then building +/// a one-pass DFA will fail. +/// * If the number of patterns exceeds an internal unconfigurable limit, then +/// building a one-pass DFA will fail. This limit is quite large and you're +/// unlikely to hit it. +/// * If the total number of states exceeds an internal unconfigurable limit, +/// then building a one-pass DFA will fail. This limit is quite large and +/// you're unlikely to hit it. +/// +/// # Other examples of regexes that aren't one-pass +/// +/// One particularly unfortunate example is that enabling Unicode can cause +/// regexes that were one-pass to no longer be one-pass. Consider the regex +/// `(?-u)\w*\s` for example. It is one-pass because there is exactly no +/// overlap between the ASCII definitions of `\w` and `\s`. But `\w*\s` +/// (i.e., with Unicode enabled) is *not* one-pass because `\w` and `\s` get +/// translated to UTF-8 automatons. And while the *codepoints* in `\w` and `\s` +/// do not overlap, the underlying UTF-8 encodings do. Indeed, because of the +/// overlap between UTF-8 automata, the use of Unicode character classes will +/// tend to vastly increase the likelihood of a regex not being one-pass. +/// +/// # How does one know if a regex is one-pass or not? +/// +/// At the time of writing, the only way to know is to try and build a one-pass +/// DFA. The one-pass property is checked while constructing the DFA. +/// +/// This does mean that you might potentially waste some CPU cycles and memory +/// by optimistically trying to build a one-pass DFA. But this is currently the +/// only way. In the future, building a one-pass DFA might be able to use some +/// heuristics to detect common violations of the one-pass property and bail +/// more quickly. +/// +/// # Resource usage +/// +/// Unlike a general DFA, a one-pass DFA has stricter bounds on its resource +/// usage. Namely, construction of a one-pass DFA has a time and space +/// complexity of `O(n)`, where `n ~ nfa.states().len()`. (A general DFA's time +/// and space complexity is `O(2^n)`.) This smaller time bound is achieved +/// because there is at most one DFA state created for each NFA state. If +/// additional DFA states would be required, then the pattern is not one-pass +/// and construction will fail. +/// +/// Note though that currently, this DFA uses a fully dense representation. +/// This means that while its space complexity is no worse than an NFA, it may +/// in practice use more memory because of higher constant factors. The reason +/// for this trade off is two-fold. Firstly, a dense representation makes the +/// search faster. Secondly, the bigger an NFA, the more unlikely it is to be +/// one-pass. Therefore, most one-pass DFAs are usually pretty small. +/// +/// # Example +/// +/// This example shows that the one-pass DFA implements Unicode word boundaries +/// correctly while simultaneously reporting spans for capturing groups that +/// participate in a match. (This is the only DFA that implements full support +/// for Unicode word boundaries.) +/// +/// ``` +/// # if cfg!(miri) { return Ok(()); } // miri takes too long +/// use regex_automata::{dfa::onepass::DFA, Match, Span}; +/// +/// let re = DFA::new(r"\b(?P\w+)[[:space:]]+(?P\w+)\b")?; +/// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); +/// +/// re.captures(&mut cache, "Шерлок Холмс", &mut caps); +/// assert_eq!(Some(Match::must(0, 0..23)), caps.get_match()); +/// assert_eq!(Some(Span::from(0..12)), caps.get_group_by_name("first")); +/// assert_eq!(Some(Span::from(13..23)), caps.get_group_by_name("last")); +/// # Ok::<(), Box>(()) +/// ``` +/// +/// # Example: iteration +/// +/// Unlike other regex engines in this crate, this one does not provide +/// iterator search functions. This is because a one-pass DFA only supports +/// anchored searches, and so iterator functions are generally not applicable. +/// +/// However, if you know that all of your matches are +/// directly adjacent, then an iterator can be used. The +/// [`util::iter::Searcher`](crate::util::iter::Searcher) type can be used for +/// this purpose: +/// +/// ``` +/// # if cfg!(miri) { return Ok(()); } // miri takes too long +/// use regex_automata::{ +/// dfa::onepass::DFA, +/// util::iter::Searcher, +/// Anchored, Input, Span, +/// }; +/// +/// let re = DFA::new(r"\w(\d)\w")?; +/// let (mut cache, caps) = (re.create_cache(), re.create_captures()); +/// let input = Input::new("a1zb2yc3x").anchored(Anchored::Yes); +/// +/// let mut it = Searcher::new(input).into_captures_iter(caps, |input, caps| { +/// Ok(re.try_search(&mut cache, input, caps)?) +/// }).infallible(); +/// let caps0 = it.next().unwrap(); +/// assert_eq!(Some(Span::from(1..2)), caps0.get_group(1)); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone)] +pub struct DFA { + /// The configuration provided by the caller. + config: Config, + /// The NFA used to build this DFA. + /// + /// NOTE: We probably don't need to store the NFA here, but we use enough + /// bits from it that it's convenient to do so. And there really isn't much + /// cost to doing so either, since an NFA is reference counted internally. + nfa: NFA, + /// The transition table. Given a state ID 's' and a byte of haystack 'b', + /// the next state is `table[sid + classes[byte]]`. + /// + /// The stride of this table (i.e., the number of columns) is always + /// a power of 2, even if the alphabet length is smaller. This makes + /// converting between state IDs and state indices very cheap. + /// + /// Note that the stride always includes room for one extra "transition" + /// that isn't actually a transition. It is a 'PatternEpsilons' that is + /// used for match states only. Because of this, the maximum number of + /// active columns in the transition table is 257, which means the maximum + /// stride is 512 (the next power of 2 greater than or equal to 257). + table: Vec, + /// The DFA state IDs of the starting states. + /// + /// `starts[0]` is always present and corresponds to the starting state + /// when searching for matches of any pattern in the DFA. + /// + /// `starts[i]` where i>0 corresponds to the starting state for the pattern + /// ID 'i-1'. These starting states are optional. + starts: Vec, + /// Every state ID >= this value corresponds to a match state. + /// + /// This is what a search uses to detect whether a state is a match state + /// or not. It requires only a simple comparison instead of bit-unpacking + /// the PatternEpsilons from every state. + min_match_id: StateID, + /// The alphabet of this DFA, split into equivalence classes. Bytes in the + /// same equivalence class can never discriminate between a match and a + /// non-match. + classes: ByteClasses, + /// The number of elements in each state in the transition table. This may + /// be less than the stride, since the stride is always a power of 2 and + /// the alphabet length can be anything up to and including 256. + alphabet_len: usize, + /// The number of columns in the transition table, expressed as a power of + /// 2. + stride2: usize, + /// The offset at which the PatternEpsilons for a match state is stored in + /// the transition table. + /// + /// PERF: One wonders whether it would be better to put this in a separate + /// allocation, since only match states have a non-empty PatternEpsilons + /// and the number of match states tends be dwarfed by the number of + /// non-match states. So this would save '8*len(non_match_states)' for each + /// DFA. The question is whether moving this to a different allocation will + /// lead to a perf hit during searches. You might think dealing with match + /// states is rare, but some regexes spend a lot of time in match states + /// gobbling up input. But... match state handling is already somewhat + /// expensive, so maybe this wouldn't do much? Either way, it's worth + /// experimenting. + pateps_offset: usize, + /// The first explicit slot index. This refers to the first slot appearing + /// immediately after the last implicit slot. It is always 'patterns.len() + /// * 2'. + /// + /// We record this because we only store the explicit slots in our DFA + /// transition table that need to be saved. Implicit slots are handled + /// automatically as part of the search. + explicit_slot_start: usize, +} + +impl DFA { + /// Parse the given regular expression using the default configuration and + /// return the corresponding one-pass DFA. + /// + /// If you want a non-default configuration, then use the [`Builder`] to + /// set your own configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{dfa::onepass::DFA, Match}; + /// + /// let re = DFA::new("foo[0-9]+bar")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// re.captures(&mut cache, "foo12345barzzz", &mut caps); + /// assert_eq!(Some(Match::must(0, 0..11)), caps.get_match()); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + #[inline] + pub fn new(pattern: &str) -> Result { + DFA::builder().build(pattern) + } + + /// Like `new`, but parses multiple patterns into a single "multi regex." + /// This similarly uses the default regex configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{dfa::onepass::DFA, Match}; + /// + /// let re = DFA::new_many(&["[a-z]+", "[0-9]+"])?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// re.captures(&mut cache, "abc123", &mut caps); + /// assert_eq!(Some(Match::must(0, 0..3)), caps.get_match()); + /// + /// re.captures(&mut cache, "123abc", &mut caps); + /// assert_eq!(Some(Match::must(1, 0..3)), caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + #[inline] + pub fn new_many>(patterns: &[P]) -> Result { + DFA::builder().build_many(patterns) + } + + /// Like `new`, but builds a one-pass DFA directly from an NFA. This is + /// useful if you already have an NFA, or even if you hand-assembled the + /// NFA. + /// + /// # Example + /// + /// This shows how to hand assemble a regular expression via its HIR, + /// compile an NFA from it and build a one-pass DFA from the NFA. + /// + /// ``` + /// use regex_automata::{ + /// dfa::onepass::DFA, + /// nfa::thompson::NFA, + /// Match, + /// }; + /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; + /// + /// let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![ + /// ClassBytesRange::new(b'0', b'9'), + /// ClassBytesRange::new(b'A', b'Z'), + /// ClassBytesRange::new(b'_', b'_'), + /// ClassBytesRange::new(b'a', b'z'), + /// ]))); + /// + /// let config = NFA::config().nfa_size_limit(Some(1_000)); + /// let nfa = NFA::compiler().configure(config).build_from_hir(&hir)?; + /// + /// let re = DFA::new_from_nfa(nfa)?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let expected = Some(Match::must(0, 0..1)); + /// re.captures(&mut cache, "A", &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn new_from_nfa(nfa: NFA) -> Result { + DFA::builder().build_from_nfa(nfa) + } + + /// Create a new one-pass DFA that matches every input. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{dfa::onepass::DFA, Match}; + /// + /// let dfa = DFA::always_match()?; + /// let mut cache = dfa.create_cache(); + /// let mut caps = dfa.create_captures(); + /// + /// let expected = Match::must(0, 0..0); + /// dfa.captures(&mut cache, "", &mut caps); + /// assert_eq!(Some(expected), caps.get_match()); + /// dfa.captures(&mut cache, "foo", &mut caps); + /// assert_eq!(Some(expected), caps.get_match()); + /// # Ok::<(), Box>(()) + /// ``` + pub fn always_match() -> Result { + let nfa = thompson::NFA::always_match(); + Builder::new().build_from_nfa(nfa) + } + + /// Create a new one-pass DFA that never matches any input. + /// + /// # Example + /// + /// ``` + /// use regex_automata::dfa::onepass::DFA; + /// + /// let dfa = DFA::never_match()?; + /// let mut cache = dfa.create_cache(); + /// let mut caps = dfa.create_captures(); + /// + /// dfa.captures(&mut cache, "", &mut caps); + /// assert_eq!(None, caps.get_match()); + /// dfa.captures(&mut cache, "foo", &mut caps); + /// assert_eq!(None, caps.get_match()); + /// # Ok::<(), Box>(()) + /// ``` + pub fn never_match() -> Result { + let nfa = thompson::NFA::never_match(); + Builder::new().build_from_nfa(nfa) + } + + /// Return a default configuration for a DFA. + /// + /// This is a convenience routine to avoid needing to import the `Config` + /// type when customizing the construction of a DFA. + /// + /// # Example + /// + /// This example shows how to change the match semantics of this DFA from + /// its default "leftmost first" to "all." When using "all," non-greediness + /// doesn't apply and neither does preference order matching. Instead, the + /// longest match possible is always returned. (Although, by construction, + /// it's impossible for a one-pass DFA to have a different answer for + /// "preference order" vs "longest match.") + /// + /// ``` + /// use regex_automata::{dfa::onepass::DFA, Match, MatchKind}; + /// + /// let re = DFA::builder() + /// .configure(DFA::config().match_kind(MatchKind::All)) + /// .build(r"(abc)+?")?; + /// let mut cache = re.create_cache(); + /// let mut caps = re.create_captures(); + /// + /// re.captures(&mut cache, "abcabc", &mut caps); + /// // Normally, the non-greedy repetition would give us a 0..3 match. + /// assert_eq!(Some(Match::must(0, 0..6)), caps.get_match()); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn config() -> Config { + Config::new() + } + + /// Return a builder for configuring the construction of a DFA. + /// + /// This is a convenience routine to avoid needing to import the + /// [`Builder`] type in common cases. + /// + /// # Example + /// + /// This example shows how to use the builder to disable UTF-8 mode. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// dfa::onepass::DFA, + /// nfa::thompson, + /// util::syntax, + /// Match, + /// }; + /// + /// let re = DFA::builder() + /// .syntax(syntax::Config::new().utf8(false)) + /// .thompson(thompson::Config::new().utf8(false)) + /// .build(r"foo(?-u:[^b])ar.*")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// let haystack = b"foo\xFFarzz\xE2\x98\xFF\n"; + /// let expected = Some(Match::must(0, 0..8)); + /// re.captures(&mut cache, haystack, &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn builder() -> Builder { + Builder::new() + } + + /// Create a new empty set of capturing groups that is guaranteed to be + /// valid for the search APIs on this DFA. + /// + /// A `Captures` value created for a specific DFA cannot be used with any + /// other DFA. + /// + /// This is a convenience function for [`Captures::all`]. See the + /// [`Captures`] documentation for an explanation of its alternative + /// constructors that permit the DFA to do less work during a search, and + /// thus might make it faster. + #[inline] + pub fn create_captures(&self) -> Captures { + Captures::all(self.nfa.group_info().clone()) + } + + /// Create a new cache for this DFA. + /// + /// The cache returned should only be used for searches for this + /// DFA. If you want to reuse the cache for another DFA, then you + /// must call [`Cache::reset`] with that DFA (or, equivalently, + /// [`DFA::reset_cache`]). + #[inline] + pub fn create_cache(&self) -> Cache { + Cache::new(self) + } + + /// Reset the given cache such that it can be used for searching with the + /// this DFA (and only this DFA). + /// + /// A cache reset permits reusing memory already allocated in this cache + /// with a different DFA. + /// + /// # Example + /// + /// This shows how to re-purpose a cache for use with a different DFA. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{dfa::onepass::DFA, Match}; + /// + /// let re1 = DFA::new(r"\w")?; + /// let re2 = DFA::new(r"\W")?; + /// let mut caps1 = re1.create_captures(); + /// let mut caps2 = re2.create_captures(); + /// + /// let mut cache = re1.create_cache(); + /// assert_eq!( + /// Some(Match::must(0, 0..2)), + /// { re1.captures(&mut cache, "Δ", &mut caps1); caps1.get_match() }, + /// ); + /// + /// // Using 'cache' with re2 is not allowed. It may result in panics or + /// // incorrect results. In order to re-purpose the cache, we must reset + /// // it with the one-pass DFA we'd like to use it with. + /// // + /// // Similarly, after this reset, using the cache with 're1' is also not + /// // allowed. + /// re2.reset_cache(&mut cache); + /// assert_eq!( + /// Some(Match::must(0, 0..3)), + /// { re2.captures(&mut cache, "☃", &mut caps2); caps2.get_match() }, + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn reset_cache(&self, cache: &mut Cache) { + cache.reset(self); + } + + /// Return the config for this one-pass DFA. + #[inline] + pub fn get_config(&self) -> &Config { + &self.config + } + + /// Returns a reference to the underlying NFA. + #[inline] + pub fn get_nfa(&self) -> &NFA { + &self.nfa + } + + /// Returns the total number of patterns compiled into this DFA. + /// + /// In the case of a DFA that contains no patterns, this returns `0`. + #[inline] + pub fn pattern_len(&self) -> usize { + self.get_nfa().pattern_len() + } + + /// Returns the total number of states in this one-pass DFA. + /// + /// Note that unlike dense or sparse DFAs, a one-pass DFA does not expose + /// a low level DFA API. Therefore, this routine has little use other than + /// being informational. + #[inline] + pub fn state_len(&self) -> usize { + self.table.len() >> self.stride2() + } + + /// Returns the total number of elements in the alphabet for this DFA. + /// + /// That is, this returns the total number of transitions that each + /// state in this DFA must have. The maximum alphabet size is 256, which + /// corresponds to each possible byte value. + /// + /// The alphabet size may be less than 256 though, and unless + /// [`Config::byte_classes`] is disabled, it is typically must less than + /// 256. Namely, bytes are grouped into equivalence classes such that no + /// two bytes in the same class can distinguish a match from a non-match. + /// For example, in the regex `^[a-z]+$`, the ASCII bytes `a-z` could + /// all be in the same equivalence class. This leads to a massive space + /// savings. + /// + /// Note though that the alphabet length does _not_ necessarily equal the + /// total stride space taken up by a single DFA state in the transition + /// table. Namely, for performance reasons, the stride is always the + /// smallest power of two that is greater than or equal to the alphabet + /// length. For this reason, [`DFA::stride`] or [`DFA::stride2`] are + /// often more useful. The alphabet length is typically useful only for + /// informational purposes. + /// + /// Note also that unlike dense or sparse DFAs, a one-pass DFA does + /// not have a special end-of-input (EOI) transition. This is because + /// a one-pass DFA handles look-around assertions explicitly (like the + /// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM)) and does not build + /// them into the transitions of the DFA. + #[inline] + pub fn alphabet_len(&self) -> usize { + self.alphabet_len + } + + /// Returns the total stride for every state in this DFA, expressed as the + /// exponent of a power of 2. The stride is the amount of space each state + /// takes up in the transition table, expressed as a number of transitions. + /// (Unused transitions map to dead states.) + /// + /// The stride of a DFA is always equivalent to the smallest power of + /// 2 that is greater than or equal to the DFA's alphabet length. This + /// definition uses extra space, but possibly permits faster translation + /// between state identifiers and their corresponding offsets in this DFA's + /// transition table. + /// + /// For example, if the DFA's stride is 16 transitions, then its `stride2` + /// is `4` since `2^4 = 16`. + /// + /// The minimum `stride2` value is `1` (corresponding to a stride of `2`) + /// while the maximum `stride2` value is `9` (corresponding to a stride + /// of `512`). The maximum in theory should be `8`, but because of some + /// implementation quirks that may be relaxed in the future, it is one more + /// than `8`. (Do note that a maximal stride is incredibly rare, as it + /// would imply that there is almost no redundant in the regex pattern.) + /// + /// Note that unlike dense or sparse DFAs, a one-pass DFA does not expose + /// a low level DFA API. Therefore, this routine has little use other than + /// being informational. + #[inline] + pub fn stride2(&self) -> usize { + self.stride2 + } + + /// Returns the total stride for every state in this DFA. This corresponds + /// to the total number of transitions used by each state in this DFA's + /// transition table. + /// + /// Please see [`DFA::stride2`] for more information. In particular, this + /// returns the stride as the number of transitions, where as `stride2` + /// returns it as the exponent of a power of 2. + /// + /// Note that unlike dense or sparse DFAs, a one-pass DFA does not expose + /// a low level DFA API. Therefore, this routine has little use other than + /// being informational. + #[inline] + pub fn stride(&self) -> usize { + 1 << self.stride2() + } + + /// Returns the memory usage, in bytes, of this DFA. + /// + /// The memory usage is computed based on the number of bytes used to + /// represent this DFA. + /// + /// This does **not** include the stack size used up by this DFA. To + /// compute that, use `std::mem::size_of::()`. + #[inline] + pub fn memory_usage(&self) -> usize { + use core::mem::size_of; + + self.table.len() * size_of::() + + self.starts.len() * size_of::() + } +} + +impl DFA { + /// Executes an anchored leftmost forward search, and returns true if and + /// only if this one-pass DFA matches the given haystack. + /// + /// This routine may short circuit if it knows that scanning future + /// input will never lead to a different result. In particular, if the + /// underlying DFA enters a match state, then this routine will return + /// `true` immediately without inspecting any future input. (Consider how + /// this might make a difference given the regex `a+` on the haystack + /// `aaaaaaaaaaaaaaa`. This routine can stop after it sees the first `a`, + /// but routines like `find` need to continue searching because `+` is + /// greedy by default.) + /// + /// The given `Input` is forcefully set to use [`Anchored::Yes`] if the + /// given configuration was [`Anchored::No`] (which is the default). + /// + /// # Panics + /// + /// This routine panics if the search could not complete. This can occur + /// in the following circumstances: + /// + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. Concretely, + /// this occurs when using [`Anchored::Pattern`] without enabling + /// [`Config::starts_for_each_pattern`]. + /// + /// When a search panics, callers cannot know whether a match exists or + /// not. + /// + /// Use [`DFA::try_search`] if you want to handle these panics as error + /// values instead. + /// + /// # Example + /// + /// This shows basic usage: + /// + /// ``` + /// use regex_automata::dfa::onepass::DFA; + /// + /// let re = DFA::new("foo[0-9]+bar")?; + /// let mut cache = re.create_cache(); + /// + /// assert!(re.is_match(&mut cache, "foo12345bar")); + /// assert!(!re.is_match(&mut cache, "foobar")); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: consistency with search APIs + /// + /// `is_match` is guaranteed to return `true` whenever `captures` returns + /// a match. This includes searches that are executed entirely within a + /// codepoint: + /// + /// ``` + /// use regex_automata::{dfa::onepass::DFA, Input}; + /// + /// let re = DFA::new("a*")?; + /// let mut cache = re.create_cache(); + /// + /// assert!(!re.is_match(&mut cache, Input::new("☃").span(1..2))); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Notice that when UTF-8 mode is disabled, then the above reports a + /// match because the restriction against zero-width matches that split a + /// codepoint has been lifted: + /// + /// ``` + /// use regex_automata::{dfa::onepass::DFA, nfa::thompson::NFA, Input}; + /// + /// let re = DFA::builder() + /// .thompson(NFA::config().utf8(false)) + /// .build("a*")?; + /// let mut cache = re.create_cache(); + /// + /// assert!(re.is_match(&mut cache, Input::new("☃").span(1..2))); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn is_match<'h, I: Into>>( + &self, + cache: &mut Cache, + input: I, + ) -> bool { + let mut input = input.into().earliest(true); + if matches!(input.get_anchored(), Anchored::No) { + input.set_anchored(Anchored::Yes); + } + self.try_search_slots(cache, &input, &mut []).unwrap().is_some() + } + + /// Executes an anchored leftmost forward search, and returns a `Match` if + /// and only if this one-pass DFA matches the given haystack. + /// + /// This routine only includes the overall match span. To get access to the + /// individual spans of each capturing group, use [`DFA::captures`]. + /// + /// The given `Input` is forcefully set to use [`Anchored::Yes`] if the + /// given configuration was [`Anchored::No`] (which is the default). + /// + /// # Panics + /// + /// This routine panics if the search could not complete. This can occur + /// in the following circumstances: + /// + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. Concretely, + /// this occurs when using [`Anchored::Pattern`] without enabling + /// [`Config::starts_for_each_pattern`]. + /// + /// When a search panics, callers cannot know whether a match exists or + /// not. + /// + /// Use [`DFA::try_search`] if you want to handle these panics as error + /// values instead. + /// + /// # Example + /// + /// Leftmost first match semantics corresponds to the match with the + /// smallest starting offset, but where the end offset is determined by + /// preferring earlier branches in the original regular expression. For + /// example, `Sam|Samwise` will match `Sam` in `Samwise`, but `Samwise|Sam` + /// will match `Samwise` in `Samwise`. + /// + /// Generally speaking, the "leftmost first" match is how most backtracking + /// regular expressions tend to work. This is in contrast to POSIX-style + /// regular expressions that yield "leftmost longest" matches. Namely, + /// both `Sam|Samwise` and `Samwise|Sam` match `Samwise` when using + /// leftmost longest semantics. (This crate does not currently support + /// leftmost longest semantics.) + /// + /// ``` + /// use regex_automata::{dfa::onepass::DFA, Match}; + /// + /// let re = DFA::new("foo[0-9]+")?; + /// let mut cache = re.create_cache(); + /// let expected = Match::must(0, 0..8); + /// assert_eq!(Some(expected), re.find(&mut cache, "foo12345")); + /// + /// // Even though a match is found after reading the first byte (`a`), + /// // the leftmost first match semantics demand that we find the earliest + /// // match that prefers earlier parts of the pattern over later parts. + /// let re = DFA::new("abc|a")?; + /// let mut cache = re.create_cache(); + /// let expected = Match::must(0, 0..3); + /// assert_eq!(Some(expected), re.find(&mut cache, "abc")); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn find<'h, I: Into>>( + &self, + cache: &mut Cache, + input: I, + ) -> Option { + let mut input = input.into(); + if matches!(input.get_anchored(), Anchored::No) { + input.set_anchored(Anchored::Yes); + } + if self.get_nfa().pattern_len() == 1 { + let mut slots = [None, None]; + let pid = + self.try_search_slots(cache, &input, &mut slots).unwrap()?; + let start = slots[0].unwrap().get(); + let end = slots[1].unwrap().get(); + return Some(Match::new(pid, Span { start, end })); + } + let ginfo = self.get_nfa().group_info(); + let slots_len = ginfo.implicit_slot_len(); + let mut slots = vec![None; slots_len]; + let pid = self.try_search_slots(cache, &input, &mut slots).unwrap()?; + let start = slots[pid.as_usize() * 2].unwrap().get(); + let end = slots[pid.as_usize() * 2 + 1].unwrap().get(); + Some(Match::new(pid, Span { start, end })) + } + + /// Executes an anchored leftmost forward search and writes the spans + /// of capturing groups that participated in a match into the provided + /// [`Captures`] value. If no match was found, then [`Captures::is_match`] + /// is guaranteed to return `false`. + /// + /// The given `Input` is forcefully set to use [`Anchored::Yes`] if the + /// given configuration was [`Anchored::No`] (which is the default). + /// + /// # Panics + /// + /// This routine panics if the search could not complete. This can occur + /// in the following circumstances: + /// + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. Concretely, + /// this occurs when using [`Anchored::Pattern`] without enabling + /// [`Config::starts_for_each_pattern`]. + /// + /// When a search panics, callers cannot know whether a match exists or + /// not. + /// + /// Use [`DFA::try_search`] if you want to handle these panics as error + /// values instead. + /// + /// # Example + /// + /// This shows a simple example of a one-pass regex that extracts + /// capturing group spans. + /// + /// ``` + /// use regex_automata::{dfa::onepass::DFA, Match, Span}; + /// + /// let re = DFA::new( + /// // Notice that we use ASCII here. The corresponding Unicode regex + /// // is sadly not one-pass. + /// "(?P[[:alpha:]]+)[[:space:]]+(?P[[:alpha:]]+)", + /// )?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// re.captures(&mut cache, "Bruce Springsteen", &mut caps); + /// assert_eq!(Some(Match::must(0, 0..17)), caps.get_match()); + /// assert_eq!(Some(Span::from(0..5)), caps.get_group(1)); + /// assert_eq!(Some(Span::from(6..17)), caps.get_group_by_name("last")); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn captures<'h, I: Into>>( + &self, + cache: &mut Cache, + input: I, + caps: &mut Captures, + ) { + let mut input = input.into(); + if matches!(input.get_anchored(), Anchored::No) { + input.set_anchored(Anchored::Yes); + } + self.try_search(cache, &input, caps).unwrap(); + } + + /// Executes an anchored leftmost forward search and writes the spans + /// of capturing groups that participated in a match into the provided + /// [`Captures`] value. If no match was found, then [`Captures::is_match`] + /// is guaranteed to return `false`. + /// + /// The differences with [`DFA::captures`] are: + /// + /// 1. This returns an error instead of panicking if the search fails. + /// 2. Accepts an `&Input` instead of a `Into`. This permits reusing + /// the same input for multiple searches, which _may_ be important for + /// latency. + /// 3. This does not automatically change the [`Anchored`] mode from `No` + /// to `Yes`. Instead, if [`Input::anchored`] is `Anchored::No`, then an + /// error is returned. + /// + /// # Errors + /// + /// This routine errors if the search could not complete. This can occur + /// in the following circumstances: + /// + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. Concretely, + /// this occurs when using [`Anchored::Pattern`] without enabling + /// [`Config::starts_for_each_pattern`]. + /// + /// When a search returns an error, callers cannot know whether a match + /// exists or not. + /// + /// # Example: specific pattern search + /// + /// This example shows how to build a multi-regex that permits searching + /// for specific patterns. Note that this is somewhat less useful than + /// in other regex engines, since a one-pass DFA by definition has no + /// ambiguity about which pattern can match at a position. That is, if it + /// were possible for two different patterns to match at the same starting + /// position, then the multi-regex would not be one-pass and construction + /// would have failed. + /// + /// Nevertheless, this can still be useful if you only care about matches + /// for a specific pattern, and want the DFA to report "no match" even if + /// some other pattern would have matched. + /// + /// Note that in order to make use of this functionality, + /// [`Config::starts_for_each_pattern`] must be enabled. It is disabled + /// by default since it may result in higher memory usage. + /// + /// ``` + /// use regex_automata::{ + /// dfa::onepass::DFA, Anchored, Input, Match, PatternID, + /// }; + /// + /// let re = DFA::builder() + /// .configure(DFA::config().starts_for_each_pattern(true)) + /// .build_many(&["[a-z]+", "[0-9]+"])?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let haystack = "123abc"; + /// let input = Input::new(haystack).anchored(Anchored::Yes); + /// + /// // A normal multi-pattern search will show pattern 1 matches. + /// re.try_search(&mut cache, &input, &mut caps)?; + /// assert_eq!(Some(Match::must(1, 0..3)), caps.get_match()); + /// + /// // If we only want to report pattern 0 matches, then we'll get no + /// // match here. + /// let input = input.anchored(Anchored::Pattern(PatternID::must(0))); + /// re.try_search(&mut cache, &input, &mut caps)?; + /// assert_eq!(None, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: specifying the bounds of a search + /// + /// This example shows how providing the bounds of a search can produce + /// different results than simply sub-slicing the haystack. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{dfa::onepass::DFA, Anchored, Input, Match}; + /// + /// // one-pass DFAs fully support Unicode word boundaries! + /// // A sad joke is that a Unicode aware regex like \w+\s is not one-pass. + /// // :-( + /// let re = DFA::new(r"\b[0-9]{3}\b")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let haystack = "foo123bar"; + /// + /// // Since we sub-slice the haystack, the search doesn't know about + /// // the larger context and assumes that `123` is surrounded by word + /// // boundaries. And of course, the match position is reported relative + /// // to the sub-slice as well, which means we get `0..3` instead of + /// // `3..6`. + /// let expected = Some(Match::must(0, 0..3)); + /// let input = Input::new(&haystack[3..6]).anchored(Anchored::Yes); + /// re.try_search(&mut cache, &input, &mut caps)?; + /// assert_eq!(expected, caps.get_match()); + /// + /// // But if we provide the bounds of the search within the context of the + /// // entire haystack, then the search can take the surrounding context + /// // into account. (And if we did find a match, it would be reported + /// // as a valid offset into `haystack` instead of its sub-slice.) + /// let expected = None; + /// let input = Input::new(haystack).range(3..6).anchored(Anchored::Yes); + /// re.try_search(&mut cache, &input, &mut caps)?; + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn try_search( + &self, + cache: &mut Cache, + input: &Input<'_>, + caps: &mut Captures, + ) -> Result<(), MatchError> { + let pid = self.try_search_slots(cache, input, caps.slots_mut())?; + caps.set_pattern(pid); + Ok(()) + } + + /// Executes an anchored leftmost forward search and writes the spans + /// of capturing groups that participated in a match into the provided + /// `slots`, and returns the matching pattern ID. The contents of the + /// slots for patterns other than the matching pattern are unspecified. If + /// no match was found, then `None` is returned and the contents of all + /// `slots` is unspecified. + /// + /// This is like [`DFA::try_search`], but it accepts a raw slots slice + /// instead of a `Captures` value. This is useful in contexts where you + /// don't want or need to allocate a `Captures`. + /// + /// It is legal to pass _any_ number of slots to this routine. If the regex + /// engine would otherwise write a slot offset that doesn't fit in the + /// provided slice, then it is simply skipped. In general though, there are + /// usually three slice lengths you might want to use: + /// + /// * An empty slice, if you only care about which pattern matched. + /// * A slice with + /// [`pattern_len() * 2`](crate::dfa::onepass::DFA::pattern_len) + /// slots, if you only care about the overall match spans for each matching + /// pattern. + /// * A slice with + /// [`slot_len()`](crate::util::captures::GroupInfo::slot_len) slots, which + /// permits recording match offsets for every capturing group in every + /// pattern. + /// + /// # Errors + /// + /// This routine errors if the search could not complete. This can occur + /// in the following circumstances: + /// + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. Concretely, + /// this occurs when using [`Anchored::Pattern`] without enabling + /// [`Config::starts_for_each_pattern`]. + /// + /// When a search returns an error, callers cannot know whether a match + /// exists or not. + /// + /// # Example + /// + /// This example shows how to find the overall match offsets in a + /// multi-pattern search without allocating a `Captures` value. Indeed, we + /// can put our slots right on the stack. + /// + /// ``` + /// use regex_automata::{dfa::onepass::DFA, Anchored, Input, PatternID}; + /// + /// let re = DFA::new_many(&[ + /// r"[a-zA-Z]+", + /// r"[0-9]+", + /// ])?; + /// let mut cache = re.create_cache(); + /// let input = Input::new("123").anchored(Anchored::Yes); + /// + /// // We only care about the overall match offsets here, so we just + /// // allocate two slots for each pattern. Each slot records the start + /// // and end of the match. + /// let mut slots = [None; 4]; + /// let pid = re.try_search_slots(&mut cache, &input, &mut slots)?; + /// assert_eq!(Some(PatternID::must(1)), pid); + /// + /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'. + /// // See 'GroupInfo' for more details on the mapping between groups and + /// // slot indices. + /// let slot_start = pid.unwrap().as_usize() * 2; + /// let slot_end = slot_start + 1; + /// assert_eq!(Some(0), slots[slot_start].map(|s| s.get())); + /// assert_eq!(Some(3), slots[slot_end].map(|s| s.get())); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn try_search_slots( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Result, MatchError> { + let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); + if !utf8empty { + return self.try_search_slots_imp(cache, input, slots); + } + // See PikeVM::try_search_slots for why we do this. + let min = self.get_nfa().group_info().implicit_slot_len(); + if slots.len() >= min { + return self.try_search_slots_imp(cache, input, slots); + } + if self.get_nfa().pattern_len() == 1 { + let mut enough = [None, None]; + let got = self.try_search_slots_imp(cache, input, &mut enough)?; + // This is OK because we know `enough_slots` is strictly bigger + // than `slots`, otherwise this special case isn't reached. + slots.copy_from_slice(&enough[..slots.len()]); + return Ok(got); + } + let mut enough = vec![None; min]; + let got = self.try_search_slots_imp(cache, input, &mut enough)?; + // This is OK because we know `enough_slots` is strictly bigger than + // `slots`, otherwise this special case isn't reached. + slots.copy_from_slice(&enough[..slots.len()]); + Ok(got) + } + + #[inline(never)] + fn try_search_slots_imp( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Result, MatchError> { + let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); + match self.search_imp(cache, input, slots)? { + None => return Ok(None), + Some(pid) if !utf8empty => return Ok(Some(pid)), + Some(pid) => { + // These slot indices are always correct because we know our + // 'pid' is valid and thus we know that the slot indices for it + // are valid. + let slot_start = pid.as_usize().wrapping_mul(2); + let slot_end = slot_start.wrapping_add(1); + // OK because we know we have a match and we know our caller + // provided slots are big enough (which we make true above if + // the caller didn't). Namely, we're only here when 'utf8empty' + // is true, and when that's true, we require slots for every + // pattern. + let start = slots[slot_start].unwrap().get(); + let end = slots[slot_end].unwrap().get(); + // If our match splits a codepoint, then we cannot report is + // as a match. And since one-pass DFAs only support anchored + // searches, we don't try to skip ahead to find the next match. + // We can just quit with nothing. + if start == end && !input.is_char_boundary(start) { + return Ok(None); + } + Ok(Some(pid)) + } + } + } +} + +impl DFA { + fn search_imp( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Result, MatchError> { + // PERF: Some ideas. I ran out of steam after my initial impl to try + // many of these. + // + // 1) Try doing more state shuffling. Right now, all we do is push + // match states to the end of the transition table so that we can do + // 'if sid >= self.min_match_id' to know whether we're in a match + // state or not. But what about doing something like dense DFAs and + // pushing dead, match and states with captures/looks all toward the + // beginning of the transition table. Then we could do 'if sid <= + // self.max_special_id', in which case, we need to do some special + // handling of some sort. Otherwise, we get the happy path, just + // like in a DFA search. The main argument against this is that the + // one-pass DFA is likely to be used most often with capturing groups + // and if capturing groups are common, then this might wind up being a + // pessimization. + // + // 2) Consider moving 'PatternEpsilons' out of the transition table. + // It is only needed for match states and usually a small minority of + // states are match states. Therefore, we're using an extra 'u64' for + // most states. + // + // 3) I played around with the match state handling and it seems like + // there is probably a lot left on the table for improvement. The + // key tension is that the 'find_match' routine is a giant mess, but + // splitting it out into a non-inlineable function is a non-starter + // because the match state might consume input, so 'find_match' COULD + // be called quite a lot, and a function call at that point would trash + // perf. In theory, we could detect whether a match state consumes + // input and then specialize our search routine based on that. In that + // case, maybe an extra function call is OK, but even then, it might be + // too much of a latency hit. Another idea is to just try and figure + // out how to reduce the code size of 'find_match'. RE2 has a trick + // here where the match handling isn't done if we know the next byte of + // input yields a match too. Maybe we adopt that? + // + // This just might be a tricky DFA to optimize. + + if input.is_done() { + return Ok(None); + } + // We unfortunately have a bit of book-keeping to do to set things + // up. We do have to setup our cache and clear all of our slots. In + // particular, clearing the slots is necessary for the case where we + // report a match, but one of the capturing groups didn't participate + // in the match but had a span set from a previous search. That would + // be bad. In theory, we could avoid all this slot clearing if we knew + // that every slot was always activated for every match. Then we would + // know they would always be overwritten when a match is found. + let explicit_slots_len = core::cmp::min( + Slots::LIMIT, + slots.len().saturating_sub(self.explicit_slot_start), + ); + cache.setup_search(explicit_slots_len); + for slot in cache.explicit_slots() { + *slot = None; + } + for slot in slots.iter_mut() { + *slot = None; + } + // We set the starting slots for every pattern up front. This does + // increase our latency somewhat, but it avoids having to do it every + // time we see a match state (which could be many times in a single + // search if the match state consumes input). + for pid in self.nfa.patterns() { + let i = pid.as_usize() * 2; + if i >= slots.len() { + break; + } + slots[i] = NonMaxUsize::new(input.start()); + } + let mut pid = None; + let mut next_sid = match input.get_anchored() { + Anchored::Yes => self.start(), + Anchored::Pattern(pid) => self.start_pattern(pid)?, + Anchored::No => { + // If the regex is itself always anchored, then we're fine, + // even if the search is configured to be unanchored. + if !self.nfa.is_always_start_anchored() { + return Err(MatchError::unsupported_anchored( + Anchored::No, + )); + } + self.start() + } + }; + let leftmost_first = + matches!(self.config.get_match_kind(), MatchKind::LeftmostFirst); + for at in input.start()..input.end() { + let sid = next_sid; + let trans = self.transition(sid, input.haystack()[at]); + next_sid = trans.state_id(); + let epsilons = trans.epsilons(); + if sid >= self.min_match_id { + if self.find_match(cache, input, at, sid, slots, &mut pid) { + if input.get_earliest() + || (leftmost_first && trans.match_wins()) + { + return Ok(pid); + } + } + } + if sid == DEAD + || (!epsilons.looks().is_empty() + && !self.nfa.look_matcher().matches_set_inline( + epsilons.looks(), + input.haystack(), + at, + )) + { + return Ok(pid); + } + epsilons.slots().apply(at, cache.explicit_slots()); + } + if next_sid >= self.min_match_id { + self.find_match( + cache, + input, + input.end(), + next_sid, + slots, + &mut pid, + ); + } + Ok(pid) + } + + /// Assumes 'sid' is a match state and looks for whether a match can + /// be reported. If so, appropriate offsets are written to 'slots' and + /// 'matched_pid' is set to the matching pattern ID. + /// + /// Even when 'sid' is a match state, it's possible that a match won't + /// be reported. For example, when the conditional epsilon transitions + /// leading to the match state aren't satisfied at the given position in + /// the haystack. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn find_match( + &self, + cache: &mut Cache, + input: &Input<'_>, + at: usize, + sid: StateID, + slots: &mut [Option], + matched_pid: &mut Option, + ) -> bool { + debug_assert!(sid >= self.min_match_id); + let pateps = self.pattern_epsilons(sid); + let epsilons = pateps.epsilons(); + if !epsilons.looks().is_empty() + && !self.nfa.look_matcher().matches_set_inline( + epsilons.looks(), + input.haystack(), + at, + ) + { + return false; + } + let pid = pateps.pattern_id_unchecked(); + // This calculation is always correct because we know our 'pid' is + // valid and thus we know that the slot indices for it are valid. + let slot_end = pid.as_usize().wrapping_mul(2).wrapping_add(1); + // Set the implicit 'end' slot for the matching pattern. (The 'start' + // slot was set at the beginning of the search.) + if slot_end < slots.len() { + slots[slot_end] = NonMaxUsize::new(at); + } + // If the caller provided enough room, copy the previously recorded + // explicit slots from our scratch space to the caller provided slots. + // We *also* need to set any explicit slots that are active as part of + // the path to the match state. + if self.explicit_slot_start < slots.len() { + // NOTE: The 'cache.explicit_slots()' slice is setup at the + // beginning of every search such that it is guaranteed to return a + // slice of length equivalent to 'slots[explicit_slot_start..]'. + slots[self.explicit_slot_start..] + .copy_from_slice(cache.explicit_slots()); + epsilons.slots().apply(at, &mut slots[self.explicit_slot_start..]); + } + *matched_pid = Some(pid); + true + } +} + +impl DFA { + /// Returns the anchored start state for matching any pattern in this DFA. + fn start(&self) -> StateID { + self.starts[0] + } + + /// Returns the anchored start state for matching the given pattern. If + /// 'starts_for_each_pattern' + /// was not enabled, then this returns an error. If the given pattern is + /// not in this DFA, then `Ok(None)` is returned. + fn start_pattern(&self, pid: PatternID) -> Result { + if !self.config.get_starts_for_each_pattern() { + return Err(MatchError::unsupported_anchored(Anchored::Pattern( + pid, + ))); + } + // 'starts' always has non-zero length. The first entry is always the + // anchored starting state for all patterns, and the following entries + // are optional and correspond to the anchored starting states for + // patterns at pid+1. Thus, starts.len()-1 corresponds to the total + // number of patterns that one can explicitly search for. (And it may + // be zero.) + Ok(self.starts.get(pid.one_more()).copied().unwrap_or(DEAD)) + } + + /// Returns the transition from the given state ID and byte of input. The + /// transition includes the next state ID, the slots that should be saved + /// and any conditional epsilon transitions that must be satisfied in order + /// to take this transition. + fn transition(&self, sid: StateID, byte: u8) -> Transition { + let offset = sid.as_usize() << self.stride2(); + let class = self.classes.get(byte).as_usize(); + self.table[offset + class] + } + + /// Set the transition from the given state ID and byte of input to the + /// transition given. + fn set_transition(&mut self, sid: StateID, byte: u8, to: Transition) { + let offset = sid.as_usize() << self.stride2(); + let class = self.classes.get(byte).as_usize(); + self.table[offset + class] = to; + } + + /// Return an iterator of "sparse" transitions for the given state ID. + /// "sparse" in this context means that consecutive transitions that are + /// equivalent are returned as one group, and transitions to the DEAD state + /// are ignored. + /// + /// This winds up being useful for debug printing, since it's much terser + /// to display runs of equivalent transitions than the transition for every + /// possible byte value. Indeed, in practice, it's very common for runs + /// of equivalent transitions to appear. + fn sparse_transitions(&self, sid: StateID) -> SparseTransitionIter<'_> { + let start = sid.as_usize() << self.stride2(); + let end = start + self.alphabet_len(); + SparseTransitionIter { + it: self.table[start..end].iter().enumerate(), + cur: None, + } + } + + /// Return the pattern epsilons for the given state ID. + /// + /// If the given state ID does not correspond to a match state ID, then the + /// pattern epsilons returned is empty. + fn pattern_epsilons(&self, sid: StateID) -> PatternEpsilons { + let offset = sid.as_usize() << self.stride2(); + PatternEpsilons(self.table[offset + self.pateps_offset].0) + } + + /// Set the pattern epsilons for the given state ID. + fn set_pattern_epsilons(&mut self, sid: StateID, pateps: PatternEpsilons) { + let offset = sid.as_usize() << self.stride2(); + self.table[offset + self.pateps_offset] = Transition(pateps.0); + } + + /// Returns the state ID prior to the one given. This returns None if the + /// given ID is the first DFA state. + fn prev_state_id(&self, id: StateID) -> Option { + if id == DEAD { + None + } else { + // CORRECTNESS: Since 'id' is not the first state, subtracting 1 + // is always valid. + Some(StateID::new_unchecked(id.as_usize().checked_sub(1).unwrap())) + } + } + + /// Returns the state ID of the last state in this DFA's transition table. + /// "last" in this context means the last state to appear in memory, i.e., + /// the one with the greatest ID. + fn last_state_id(&self) -> StateID { + // CORRECTNESS: A DFA table is always non-empty since it always at + // least contains a DEAD state. Since every state has the same stride, + // we can just compute what the "next" state ID would have been and + // then subtract 1 from it. + StateID::new_unchecked( + (self.table.len() >> self.stride2()).checked_sub(1).unwrap(), + ) + } + + /// Move the transitions from 'id1' to 'id2' and vice versa. + /// + /// WARNING: This does not update the rest of the transition table to have + /// transitions to 'id1' changed to 'id2' and vice versa. This merely moves + /// the states in memory. + pub(super) fn swap_states(&mut self, id1: StateID, id2: StateID) { + let o1 = id1.as_usize() << self.stride2(); + let o2 = id2.as_usize() << self.stride2(); + for b in 0..self.stride() { + self.table.swap(o1 + b, o2 + b); + } + } + + /// Map all state IDs in this DFA (transition table + start states) + /// according to the closure given. + pub(super) fn remap(&mut self, map: impl Fn(StateID) -> StateID) { + for i in 0..self.state_len() { + let offset = i << self.stride2(); + for b in 0..self.alphabet_len() { + let next = self.table[offset + b].state_id(); + self.table[offset + b].set_state_id(map(next)); + } + } + for i in 0..self.starts.len() { + self.starts[i] = map(self.starts[i]); + } + } +} + +impl core::fmt::Debug for DFA { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + fn debug_state_transitions( + f: &mut core::fmt::Formatter, + dfa: &DFA, + sid: StateID, + ) -> core::fmt::Result { + for (i, (start, end, trans)) in + dfa.sparse_transitions(sid).enumerate() + { + let next = trans.state_id(); + if i > 0 { + write!(f, ", ")?; + } + if start == end { + write!( + f, + "{:?} => {:?}", + DebugByte(start), + next.as_usize(), + )?; + } else { + write!( + f, + "{:?}-{:?} => {:?}", + DebugByte(start), + DebugByte(end), + next.as_usize(), + )?; + } + if trans.match_wins() { + write!(f, " (MW)")?; + } + if !trans.epsilons().is_empty() { + write!(f, " ({:?})", trans.epsilons())?; + } + } + Ok(()) + } + + writeln!(f, "onepass::DFA(")?; + for index in 0..self.state_len() { + let sid = StateID::must(index); + let pateps = self.pattern_epsilons(sid); + if sid == DEAD { + write!(f, "D ")?; + } else if pateps.pattern_id().is_some() { + write!(f, "* ")?; + } else { + write!(f, " ")?; + } + write!(f, "{:06?}", sid.as_usize())?; + if !pateps.is_empty() { + write!(f, " ({pateps:?})")?; + } + write!(f, ": ")?; + debug_state_transitions(f, self, sid)?; + write!(f, "\n")?; + } + writeln!(f, "")?; + for (i, &sid) in self.starts.iter().enumerate() { + if i == 0 { + writeln!(f, "START(ALL): {:?}", sid.as_usize())?; + } else { + writeln!( + f, + "START(pattern: {:?}): {:?}", + i - 1, + sid.as_usize(), + )?; + } + } + writeln!(f, "state length: {:?}", self.state_len())?; + writeln!(f, "pattern length: {:?}", self.pattern_len())?; + writeln!(f, ")")?; + Ok(()) + } +} + +/// An iterator over groups of consecutive equivalent transitions in a single +/// state. +#[derive(Debug)] +struct SparseTransitionIter<'a> { + it: core::iter::Enumerate>, + cur: Option<(u8, u8, Transition)>, +} + +impl<'a> Iterator for SparseTransitionIter<'a> { + type Item = (u8, u8, Transition); + + fn next(&mut self) -> Option<(u8, u8, Transition)> { + while let Some((b, &trans)) = self.it.next() { + // Fine because we'll never have more than u8::MAX transitions in + // one state. + let b = b.as_u8(); + let (prev_start, prev_end, prev_trans) = match self.cur { + Some(t) => t, + None => { + self.cur = Some((b, b, trans)); + continue; + } + }; + if prev_trans == trans { + self.cur = Some((prev_start, b, prev_trans)); + } else { + self.cur = Some((b, b, trans)); + if prev_trans.state_id() != DEAD { + return Some((prev_start, prev_end, prev_trans)); + } + } + } + if let Some((start, end, trans)) = self.cur.take() { + if trans.state_id() != DEAD { + return Some((start, end, trans)); + } + } + None + } +} + +/// A cache represents mutable state that a one-pass [`DFA`] requires during a +/// search. +/// +/// For a given one-pass DFA, its corresponding cache may be created either via +/// [`DFA::create_cache`], or via [`Cache::new`]. They are equivalent in every +/// way, except the former does not require explicitly importing `Cache`. +/// +/// A particular `Cache` is coupled with the one-pass DFA from which it was +/// created. It may only be used with that one-pass DFA. A cache and its +/// allocations may be re-purposed via [`Cache::reset`], in which case, it can +/// only be used with the new one-pass DFA (and not the old one). +#[derive(Clone, Debug)] +pub struct Cache { + /// Scratch space used to store slots during a search. Basically, we use + /// the caller provided slots to store slots known when a match occurs. + /// But after a match occurs, we might continue a search but ultimately + /// fail to extend the match. When continuing the search, we need some + /// place to store candidate capture offsets without overwriting the slot + /// offsets recorded for the most recently seen match. + explicit_slots: Vec>, + /// The number of slots in the caller-provided 'Captures' value for the + /// current search. This is always at most 'explicit_slots.len()', but + /// might be less than it, if the caller provided fewer slots to fill. + explicit_slot_len: usize, +} + +impl Cache { + /// Create a new [`onepass::DFA`](DFA) cache. + /// + /// A potentially more convenient routine to create a cache is + /// [`DFA::create_cache`], as it does not require also importing the + /// `Cache` type. + /// + /// If you want to reuse the returned `Cache` with some other one-pass DFA, + /// then you must call [`Cache::reset`] with the desired one-pass DFA. + pub fn new(re: &DFA) -> Cache { + let mut cache = Cache { explicit_slots: vec![], explicit_slot_len: 0 }; + cache.reset(re); + cache + } + + /// Reset this cache such that it can be used for searching with a + /// different [`onepass::DFA`](DFA). + /// + /// A cache reset permits reusing memory already allocated in this cache + /// with a different one-pass DFA. + /// + /// # Example + /// + /// This shows how to re-purpose a cache for use with a different one-pass + /// DFA. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{dfa::onepass::DFA, Match}; + /// + /// let re1 = DFA::new(r"\w")?; + /// let re2 = DFA::new(r"\W")?; + /// let mut caps1 = re1.create_captures(); + /// let mut caps2 = re2.create_captures(); + /// + /// let mut cache = re1.create_cache(); + /// assert_eq!( + /// Some(Match::must(0, 0..2)), + /// { re1.captures(&mut cache, "Δ", &mut caps1); caps1.get_match() }, + /// ); + /// + /// // Using 'cache' with re2 is not allowed. It may result in panics or + /// // incorrect results. In order to re-purpose the cache, we must reset + /// // it with the one-pass DFA we'd like to use it with. + /// // + /// // Similarly, after this reset, using the cache with 're1' is also not + /// // allowed. + /// re2.reset_cache(&mut cache); + /// assert_eq!( + /// Some(Match::must(0, 0..3)), + /// { re2.captures(&mut cache, "☃", &mut caps2); caps2.get_match() }, + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn reset(&mut self, re: &DFA) { + let explicit_slot_len = re.get_nfa().group_info().explicit_slot_len(); + self.explicit_slots.resize(explicit_slot_len, None); + self.explicit_slot_len = explicit_slot_len; + } + + /// Returns the heap memory usage, in bytes, of this cache. + /// + /// This does **not** include the stack size used up by this cache. To + /// compute that, use `std::mem::size_of::()`. + pub fn memory_usage(&self) -> usize { + self.explicit_slots.len() * core::mem::size_of::>() + } + + fn explicit_slots(&mut self) -> &mut [Option] { + &mut self.explicit_slots[..self.explicit_slot_len] + } + + fn setup_search(&mut self, explicit_slot_len: usize) { + self.explicit_slot_len = explicit_slot_len; + } +} + +/// Represents a single transition in a one-pass DFA. +/// +/// The high 21 bits corresponds to the state ID. The bit following corresponds +/// to the special "match wins" flag. The remaining low 42 bits corresponds to +/// the transition epsilons, which contains the slots that should be saved when +/// this transition is followed and the conditional epsilon transitions that +/// must be satisfied in order to follow this transition. +#[derive(Clone, Copy, Eq, PartialEq)] +struct Transition(u64); + +impl Transition { + const STATE_ID_BITS: u64 = 21; + const STATE_ID_SHIFT: u64 = 64 - Transition::STATE_ID_BITS; + const STATE_ID_LIMIT: u64 = 1 << Transition::STATE_ID_BITS; + const MATCH_WINS_SHIFT: u64 = 64 - (Transition::STATE_ID_BITS + 1); + const INFO_MASK: u64 = 0x000003FF_FFFFFFFF; + + /// Return a new transition to the given state ID with the given epsilons. + fn new(match_wins: bool, sid: StateID, epsilons: Epsilons) -> Transition { + let match_wins = + if match_wins { 1 << Transition::MATCH_WINS_SHIFT } else { 0 }; + let sid = sid.as_u64() << Transition::STATE_ID_SHIFT; + Transition(sid | match_wins | epsilons.0) + } + + /// Returns true if and only if this transition points to the DEAD state. + fn is_dead(self) -> bool { + self.state_id() == DEAD + } + + /// Return whether this transition has a "match wins" property. + /// + /// When a transition has this property, it means that if a match has been + /// found and the search uses leftmost-first semantics, then that match + /// should be returned immediately instead of continuing on. + /// + /// The "match wins" name comes from RE2, which uses a pretty much + /// identical mechanism for implementing leftmost-first semantics. + fn match_wins(&self) -> bool { + (self.0 >> Transition::MATCH_WINS_SHIFT & 1) == 1 + } + + /// Return the "next" state ID that this transition points to. + fn state_id(&self) -> StateID { + // OK because a Transition has a valid StateID in its upper bits by + // construction. The cast to usize is also correct, even on 16-bit + // targets because, again, we know the upper bits is a valid StateID, + // which can never overflow usize on any supported target. + StateID::new_unchecked( + (self.0 >> Transition::STATE_ID_SHIFT).as_usize(), + ) + } + + /// Set the "next" state ID in this transition. + fn set_state_id(&mut self, sid: StateID) { + *self = Transition::new(self.match_wins(), sid, self.epsilons()); + } + + /// Return the epsilons embedded in this transition. + fn epsilons(&self) -> Epsilons { + Epsilons(self.0 & Transition::INFO_MASK) + } +} + +impl core::fmt::Debug for Transition { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + if self.is_dead() { + return write!(f, "0"); + } + write!(f, "{}", self.state_id().as_usize())?; + if self.match_wins() { + write!(f, "-MW")?; + } + if !self.epsilons().is_empty() { + write!(f, "-{:?}", self.epsilons())?; + } + Ok(()) + } +} + +/// A representation of a match state's pattern ID along with the epsilons for +/// when a match occurs. +/// +/// A match state in a one-pass DFA, unlike in a more general DFA, has exactly +/// one pattern ID. If it had more, then the original NFA would not have been +/// one-pass. +/// +/// The "epsilons" part of this corresponds to what was found in the epsilon +/// transitions between the transition taken in the last byte of input and the +/// ultimate match state. This might include saving slots and/or conditional +/// epsilon transitions that must be satisfied before one can report the match. +/// +/// Technically, every state has room for a 'PatternEpsilons', but it is only +/// ever non-empty for match states. +#[derive(Clone, Copy)] +struct PatternEpsilons(u64); + +impl PatternEpsilons { + const PATTERN_ID_BITS: u64 = 22; + const PATTERN_ID_SHIFT: u64 = 64 - PatternEpsilons::PATTERN_ID_BITS; + // A sentinel value indicating that this is not a match state. We don't + // use 0 since 0 is a valid pattern ID. + const PATTERN_ID_NONE: u64 = 0x00000000_003FFFFF; + const PATTERN_ID_LIMIT: u64 = PatternEpsilons::PATTERN_ID_NONE; + const PATTERN_ID_MASK: u64 = 0xFFFFFC00_00000000; + const EPSILONS_MASK: u64 = 0x000003FF_FFFFFFFF; + + /// Return a new empty pattern epsilons that has no pattern ID and has no + /// epsilons. This is suitable for non-match states. + fn empty() -> PatternEpsilons { + PatternEpsilons( + PatternEpsilons::PATTERN_ID_NONE + << PatternEpsilons::PATTERN_ID_SHIFT, + ) + } + + /// Whether this pattern epsilons is empty or not. It's empty when it has + /// no pattern ID and an empty epsilons. + fn is_empty(self) -> bool { + self.pattern_id().is_none() && self.epsilons().is_empty() + } + + /// Return the pattern ID in this pattern epsilons if one exists. + fn pattern_id(self) -> Option { + let pid = self.0 >> PatternEpsilons::PATTERN_ID_SHIFT; + if pid == PatternEpsilons::PATTERN_ID_LIMIT { + None + } else { + Some(PatternID::new_unchecked(pid.as_usize())) + } + } + + /// Returns the pattern ID without checking whether it's valid. If this is + /// called and there is no pattern ID in this `PatternEpsilons`, then this + /// will likely produce an incorrect result or possibly even a panic or + /// an overflow. But safety will not be violated. + /// + /// This is useful when you know a particular state is a match state. If + /// it's a match state, then it must have a pattern ID. + fn pattern_id_unchecked(self) -> PatternID { + let pid = self.0 >> PatternEpsilons::PATTERN_ID_SHIFT; + PatternID::new_unchecked(pid.as_usize()) + } + + /// Return a new pattern epsilons with the given pattern ID, but the same + /// epsilons. + fn set_pattern_id(self, pid: PatternID) -> PatternEpsilons { + PatternEpsilons( + (pid.as_u64() << PatternEpsilons::PATTERN_ID_SHIFT) + | (self.0 & PatternEpsilons::EPSILONS_MASK), + ) + } + + /// Return the epsilons part of this pattern epsilons. + fn epsilons(self) -> Epsilons { + Epsilons(self.0 & PatternEpsilons::EPSILONS_MASK) + } + + /// Return a new pattern epsilons with the given epsilons, but the same + /// pattern ID. + fn set_epsilons(self, epsilons: Epsilons) -> PatternEpsilons { + PatternEpsilons( + (self.0 & PatternEpsilons::PATTERN_ID_MASK) + | (u64::from(epsilons.0) & PatternEpsilons::EPSILONS_MASK), + ) + } +} + +impl core::fmt::Debug for PatternEpsilons { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + if self.is_empty() { + return write!(f, "N/A"); + } + if let Some(pid) = self.pattern_id() { + write!(f, "{}", pid.as_usize())?; + } + if !self.epsilons().is_empty() { + if self.pattern_id().is_some() { + write!(f, "/")?; + } + write!(f, "{:?}", self.epsilons())?; + } + Ok(()) + } +} + +/// Epsilons represents all of the NFA epsilons transitions that went into a +/// single transition in a single DFA state. In this case, it only represents +/// the epsilon transitions that have some kind of non-consuming side effect: +/// either the transition requires storing the current position of the search +/// into a slot, or the transition is conditional and requires the current +/// position in the input to satisfy an assertion before the transition may be +/// taken. +/// +/// This folds the cumulative effect of a group of NFA states (all connected +/// by epsilon transitions) down into a single set of bits. While these bits +/// can represent all possible conditional epsilon transitions, it only permits +/// storing up to a somewhat small number of slots. +/// +/// Epsilons is represented as a 42-bit integer. For example, it is packed into +/// the lower 42 bits of a `Transition`. (Where the high 22 bits contains a +/// `StateID` and a special "match wins" property.) +#[derive(Clone, Copy)] +struct Epsilons(u64); + +impl Epsilons { + const SLOT_MASK: u64 = 0x000003FF_FFFFFC00; + const SLOT_SHIFT: u64 = 10; + const LOOK_MASK: u64 = 0x00000000_000003FF; + + /// Create a new empty epsilons. It has no slots and no assertions that + /// need to be satisfied. + fn empty() -> Epsilons { + Epsilons(0) + } + + /// Returns true if this epsilons contains no slots and no assertions. + fn is_empty(self) -> bool { + self.0 == 0 + } + + /// Returns the slot epsilon transitions. + fn slots(self) -> Slots { + Slots((self.0 >> Epsilons::SLOT_SHIFT).low_u32()) + } + + /// Set the slot epsilon transitions. + fn set_slots(self, slots: Slots) -> Epsilons { + Epsilons( + (u64::from(slots.0) << Epsilons::SLOT_SHIFT) + | (self.0 & Epsilons::LOOK_MASK), + ) + } + + /// Return the set of look-around assertions in these epsilon transitions. + fn looks(self) -> LookSet { + LookSet { bits: (self.0 & Epsilons::LOOK_MASK).low_u32() } + } + + /// Set the look-around assertions on these epsilon transitions. + fn set_looks(self, look_set: LookSet) -> Epsilons { + Epsilons( + (self.0 & Epsilons::SLOT_MASK) + | (u64::from(look_set.bits) & Epsilons::LOOK_MASK), + ) + } +} + +impl core::fmt::Debug for Epsilons { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let mut wrote = false; + if !self.slots().is_empty() { + write!(f, "{:?}", self.slots())?; + wrote = true; + } + if !self.looks().is_empty() { + if wrote { + write!(f, "/")?; + } + write!(f, "{:?}", self.looks())?; + wrote = true; + } + if !wrote { + write!(f, "N/A")?; + } + Ok(()) + } +} + +/// The set of epsilon transitions indicating that the current position in a +/// search should be saved to a slot. +/// +/// This *only* represents explicit slots. So for example, the pattern +/// `[a-z]+([0-9]+)([a-z]+)` has: +/// +/// * 3 capturing groups, thus 6 slots. +/// * 1 implicit capturing group, thus 2 implicit slots. +/// * 2 explicit capturing groups, thus 4 explicit slots. +/// +/// While implicit slots are represented by epsilon transitions in an NFA, we +/// do not explicitly represent them here. Instead, implicit slots are assumed +/// to be present and handled automatically in the search code. Therefore, +/// that means we only need to represent explicit slots in our epsilon +/// transitions. +/// +/// Its representation is a bit set. The bit 'i' is set if and only if there +/// exists an explicit slot at index 'c', where 'c = (#patterns * 2) + i'. That +/// is, the bit 'i' corresponds to the first explicit slot and the first +/// explicit slot appears immediately following the last implicit slot. (If +/// this is confusing, see `GroupInfo` for more details on how slots works.) +/// +/// A single `Slots` represents all the active slots in a sub-graph of an NFA, +/// where all the states are connected by epsilon transitions. In effect, when +/// traversing the one-pass DFA during a search, all slots set in a particular +/// transition must be captured by recording the current search position. +/// +/// The API of `Slots` requires the caller to handle the explicit slot offset. +/// That is, a `Slots` doesn't know where the explicit slots start for a +/// particular NFA. Thus, if the callers see's the bit 'i' is set, then they +/// need to do the arithmetic above to find 'c', which is the real actual slot +/// index in the corresponding NFA. +#[derive(Clone, Copy)] +struct Slots(u32); + +impl Slots { + const LIMIT: usize = 32; + + /// Insert the slot at the given bit index. + fn insert(self, slot: usize) -> Slots { + debug_assert!(slot < Slots::LIMIT); + Slots(self.0 | (1 << slot.as_u32())) + } + + /// Remove the slot at the given bit index. + fn remove(self, slot: usize) -> Slots { + debug_assert!(slot < Slots::LIMIT); + Slots(self.0 & !(1 << slot.as_u32())) + } + + /// Returns true if and only if this set contains no slots. + fn is_empty(self) -> bool { + self.0 == 0 + } + + /// Returns an iterator over all of the set bits in this set. + fn iter(self) -> SlotsIter { + SlotsIter { slots: self } + } + + /// For the position `at` in the current haystack, copy it to + /// `caller_explicit_slots` for all slots that are in this set. + /// + /// Callers may pass a slice of any length. Slots in this set bigger than + /// the length of the given explicit slots are simply skipped. + /// + /// The slice *must* correspond only to the explicit slots and the first + /// element of the slice must always correspond to the first explicit slot + /// in the corresponding NFA. + fn apply( + self, + at: usize, + caller_explicit_slots: &mut [Option], + ) { + if self.is_empty() { + return; + } + let at = NonMaxUsize::new(at); + for slot in self.iter() { + if slot >= caller_explicit_slots.len() { + break; + } + caller_explicit_slots[slot] = at; + } + } +} + +impl core::fmt::Debug for Slots { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "S")?; + for slot in self.iter() { + write!(f, "-{slot:?}")?; + } + Ok(()) + } +} + +/// An iterator over all of the bits set in a slot set. +/// +/// This returns the bit index that is set, so callers may need to offset it +/// to get the actual NFA slot index. +#[derive(Debug)] +struct SlotsIter { + slots: Slots, +} + +impl Iterator for SlotsIter { + type Item = usize; + + fn next(&mut self) -> Option { + // Number of zeroes here is always <= u8::MAX, and so fits in a usize. + let slot = self.slots.0.trailing_zeros().as_usize(); + if slot >= Slots::LIMIT { + return None; + } + self.slots = self.slots.remove(slot); + Some(slot) + } +} + +/// An error that occurred during the construction of a one-pass DFA. +/// +/// This error does not provide many introspection capabilities. There are +/// generally only two things you can do with it: +/// +/// * Obtain a human readable message via its `std::fmt::Display` impl. +/// * Access an underlying [`thompson::BuildError`] type from its `source` +/// method via the `std::error::Error` trait. This error only occurs when using +/// convenience routines for building a one-pass DFA directly from a pattern +/// string. +/// +/// When the `std` feature is enabled, this implements the `std::error::Error` +/// trait. +#[derive(Clone, Debug)] +pub struct BuildError { + kind: BuildErrorKind, +} + +/// The kind of error that occurred during the construction of a one-pass DFA. +#[derive(Clone, Debug)] +enum BuildErrorKind { + NFA(crate::nfa::thompson::BuildError), + Word(UnicodeWordBoundaryError), + TooManyStates { limit: u64 }, + TooManyPatterns { limit: u64 }, + UnsupportedLook { look: Look }, + ExceededSizeLimit { limit: usize }, + NotOnePass { msg: &'static str }, +} + +impl BuildError { + fn nfa(err: crate::nfa::thompson::BuildError) -> BuildError { + BuildError { kind: BuildErrorKind::NFA(err) } + } + + fn word(err: UnicodeWordBoundaryError) -> BuildError { + BuildError { kind: BuildErrorKind::Word(err) } + } + + fn too_many_states(limit: u64) -> BuildError { + BuildError { kind: BuildErrorKind::TooManyStates { limit } } + } + + fn too_many_patterns(limit: u64) -> BuildError { + BuildError { kind: BuildErrorKind::TooManyPatterns { limit } } + } + + fn unsupported_look(look: Look) -> BuildError { + BuildError { kind: BuildErrorKind::UnsupportedLook { look } } + } + + fn exceeded_size_limit(limit: usize) -> BuildError { + BuildError { kind: BuildErrorKind::ExceededSizeLimit { limit } } + } + + fn not_one_pass(msg: &'static str) -> BuildError { + BuildError { kind: BuildErrorKind::NotOnePass { msg } } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for BuildError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + use self::BuildErrorKind::*; + + match self.kind { + NFA(ref err) => Some(err), + Word(ref err) => Some(err), + _ => None, + } + } +} + +impl core::fmt::Display for BuildError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + use self::BuildErrorKind::*; + + match self.kind { + NFA(_) => write!(f, "error building NFA"), + Word(_) => write!(f, "NFA contains Unicode word boundary"), + TooManyStates { limit } => write!( + f, + "one-pass DFA exceeded a limit of {limit:?} \ + for number of states", + ), + TooManyPatterns { limit } => write!( + f, + "one-pass DFA exceeded a limit of {limit:?} \ + for number of patterns", + ), + UnsupportedLook { look } => write!( + f, + "one-pass DFA does not support the {look:?} assertion", + ), + ExceededSizeLimit { limit } => write!( + f, + "one-pass DFA exceeded size limit of {limit:?} during building", + ), + NotOnePass { msg } => write!( + f, + "one-pass DFA could not be built because \ + pattern is not one-pass: {}", + msg, + ), + } + } +} + +#[cfg(all(test, feature = "syntax"))] +mod tests { + use alloc::string::ToString; + + use super::*; + + #[test] + fn fail_conflicting_transition() { + let predicate = |err: &str| err.contains("conflicting transition"); + + let err = DFA::new(r"a*[ab]").unwrap_err().to_string(); + assert!(predicate(&err), "{err}"); + } + + #[test] + fn fail_multiple_epsilon() { + let predicate = |err: &str| { + err.contains("multiple epsilon transitions to same state") + }; + + let err = DFA::new(r"(^|$)a").unwrap_err().to_string(); + assert!(predicate(&err), "{err}"); + } + + #[test] + fn fail_multiple_match() { + let predicate = |err: &str| { + err.contains("multiple epsilon transitions to match state") + }; + + let err = DFA::new_many(&[r"^", r"$"]).unwrap_err().to_string(); + assert!(predicate(&err), "{err}"); + } + + // This test is meant to build a one-pass regex with the maximum number of + // possible slots. + // + // NOTE: Remember that the slot limit only applies to explicit capturing + // groups. Any number of implicit capturing groups is supported (up to the + // maximum number of supported patterns), since implicit groups are handled + // by the search loop itself. + #[test] + fn max_slots() { + // One too many... + let pat = r"(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)(m)(n)(o)(p)(q)"; + assert!(DFA::new(pat).is_err()); + // Just right. + let pat = r"(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)(m)(n)(o)(p)"; + assert!(DFA::new(pat).is_ok()); + } + + // This test ensures that the one-pass DFA works with all look-around + // assertions that we expect it to work with. + // + // The utility of this test is that each one-pass transition has a small + // amount of space to store look-around assertions. Currently, there is + // logic in the one-pass constructor to ensure there aren't more than ten + // possible assertions. And indeed, there are only ten possible assertions + // (at time of writing), so this is okay. But conceivably, more assertions + // could be added. So we check that things at least work with what we + // expect them to work with. + #[test] + fn assertions() { + // haystack anchors + assert!(DFA::new(r"^").is_ok()); + assert!(DFA::new(r"$").is_ok()); + + // line anchors + assert!(DFA::new(r"(?m)^").is_ok()); + assert!(DFA::new(r"(?m)$").is_ok()); + assert!(DFA::new(r"(?Rm)^").is_ok()); + assert!(DFA::new(r"(?Rm)$").is_ok()); + + // word boundaries + if cfg!(feature = "unicode-word-boundary") { + assert!(DFA::new(r"\b").is_ok()); + assert!(DFA::new(r"\B").is_ok()); + } + assert!(DFA::new(r"(?-u)\b").is_ok()); + assert!(DFA::new(r"(?-u)\B").is_ok()); + } + + #[cfg(not(miri))] // takes too long on miri + #[test] + fn is_one_pass() { + use crate::util::syntax; + + assert!(DFA::new(r"a*b").is_ok()); + if cfg!(feature = "unicode-perl") { + assert!(DFA::new(r"\w").is_ok()); + } + assert!(DFA::new(r"(?-u)\w*\s").is_ok()); + assert!(DFA::new(r"(?s:.)*?").is_ok()); + assert!(DFA::builder() + .syntax(syntax::Config::new().utf8(false)) + .build(r"(?s-u:.)*?") + .is_ok()); + } + + #[test] + fn is_not_one_pass() { + assert!(DFA::new(r"a*a").is_err()); + assert!(DFA::new(r"(?s-u:.)*?").is_err()); + assert!(DFA::new(r"(?s:.)*?a").is_err()); + } + + #[cfg(not(miri))] + #[test] + fn is_not_one_pass_bigger() { + assert!(DFA::new(r"\w*\s").is_err()); + } +} diff --git a/vendor/regex-automata/src/dfa/regex.rs b/vendor/regex-automata/src/dfa/regex.rs new file mode 100644 index 00000000000000..892c442c8b48a8 --- /dev/null +++ b/vendor/regex-automata/src/dfa/regex.rs @@ -0,0 +1,870 @@ +/*! +A DFA-backed `Regex`. + +This module provides [`Regex`], which is defined generically over the +[`Automaton`] trait. A `Regex` implements convenience routines you might have +come to expect, such as finding the start/end of a match and iterating over +all non-overlapping matches. This `Regex` type is limited in its capabilities +to what a DFA can provide. Therefore, APIs involving capturing groups, for +example, are not provided. + +Internally, a `Regex` is composed of two DFAs. One is a "forward" DFA that +finds the end offset of a match, where as the other is a "reverse" DFA that +find the start offset of a match. + +See the [parent module](crate::dfa) for examples. +*/ + +#[cfg(feature = "alloc")] +use alloc::vec::Vec; + +#[cfg(feature = "dfa-build")] +use crate::dfa::dense::BuildError; +use crate::{ + dfa::{automaton::Automaton, dense}, + util::{iter, search::Input}, + Anchored, Match, MatchError, +}; +#[cfg(feature = "alloc")] +use crate::{ + dfa::{sparse, StartKind}, + util::search::MatchKind, +}; + +// When the alloc feature is enabled, the regex type sets its A type parameter +// to default to an owned dense DFA. But without alloc, we set no default. This +// makes things a lot more convenient in the common case, since writing out the +// DFA types is pretty annoying. +// +// Since we have two different definitions but only want to write one doc +// string, we use a macro to capture the doc and other attributes once and then +// repeat them for each definition. +macro_rules! define_regex_type { + ($(#[$doc:meta])*) => { + #[cfg(feature = "alloc")] + $(#[$doc])* + pub struct Regex { + forward: A, + reverse: A, + } + + #[cfg(not(feature = "alloc"))] + $(#[$doc])* + pub struct Regex { + forward: A, + reverse: A, + } + }; +} + +define_regex_type!( + /// A regular expression that uses deterministic finite automata for fast + /// searching. + /// + /// A regular expression is comprised of two DFAs, a "forward" DFA and a + /// "reverse" DFA. The forward DFA is responsible for detecting the end of + /// a match while the reverse DFA is responsible for detecting the start + /// of a match. Thus, in order to find the bounds of any given match, a + /// forward search must first be run followed by a reverse search. A match + /// found by the forward DFA guarantees that the reverse DFA will also find + /// a match. + /// + /// The type of the DFA used by a `Regex` corresponds to the `A` type + /// parameter, which must satisfy the [`Automaton`] trait. Typically, `A` + /// is either a [`dense::DFA`] or a [`sparse::DFA`], where dense DFAs use + /// more memory but search faster, while sparse DFAs use less memory but + /// search more slowly. + /// + /// # Crate features + /// + /// Note that despite what the documentation auto-generates, the _only_ + /// crate feature needed to use this type is `dfa-search`. You do _not_ + /// need to enable the `alloc` feature. + /// + /// By default, a regex's automaton type parameter is set to + /// `dense::DFA>` when the `alloc` feature is enabled. For most + /// in-memory work loads, this is the most convenient type that gives the + /// best search performance. When the `alloc` feature is disabled, no + /// default type is used. + /// + /// # When should I use this? + /// + /// Generally speaking, if you can afford the overhead of building a full + /// DFA for your regex, and you don't need things like capturing groups, + /// then this is a good choice if you're looking to optimize for matching + /// speed. Note however that its speed may be worse than a general purpose + /// regex engine if you don't provide a [`dense::Config::prefilter`] to the + /// underlying DFA. + /// + /// # Sparse DFAs + /// + /// Since a `Regex` is generic over the [`Automaton`] trait, it can be + /// used with any kind of DFA. While this crate constructs dense DFAs by + /// default, it is easy enough to build corresponding sparse DFAs, and then + /// build a regex from them: + /// + /// ``` + /// use regex_automata::dfa::regex::Regex; + /// + /// // First, build a regex that uses dense DFAs. + /// let dense_re = Regex::new("foo[0-9]+")?; + /// + /// // Second, build sparse DFAs from the forward and reverse dense DFAs. + /// let fwd = dense_re.forward().to_sparse()?; + /// let rev = dense_re.reverse().to_sparse()?; + /// + /// // Third, build a new regex from the constituent sparse DFAs. + /// let sparse_re = Regex::builder().build_from_dfas(fwd, rev); + /// + /// // A regex that uses sparse DFAs can be used just like with dense DFAs. + /// assert_eq!(true, sparse_re.is_match(b"foo123")); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Alternatively, one can use a [`Builder`] to construct a sparse DFA + /// more succinctly. (Note though that dense DFAs are still constructed + /// first internally, and then converted to sparse DFAs, as in the example + /// above.) + /// + /// ``` + /// use regex_automata::dfa::regex::Regex; + /// + /// let sparse_re = Regex::builder().build_sparse(r"foo[0-9]+")?; + /// // A regex that uses sparse DFAs can be used just like with dense DFAs. + /// assert!(sparse_re.is_match(b"foo123")); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Fallibility + /// + /// Most of the search routines defined on this type will _panic_ when the + /// underlying search fails. This might be because the DFA gave up because + /// it saw a quit byte, whether configured explicitly or via heuristic + /// Unicode word boundary support, although neither are enabled by default. + /// Or it might fail because an invalid `Input` configuration is given, + /// for example, with an unsupported [`Anchored`] mode. + /// + /// If you need to handle these error cases instead of allowing them to + /// trigger a panic, then the lower level [`Regex::try_search`] provides + /// a fallible API that never panics. + /// + /// # Example + /// + /// This example shows how to cause a search to terminate if it sees a + /// `\n` byte, and handle the error returned. This could be useful if, for + /// example, you wanted to prevent a user supplied pattern from matching + /// across a line boundary. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{dfa::{self, regex::Regex}, Input, MatchError}; + /// + /// let re = Regex::builder() + /// .dense(dfa::dense::Config::new().quit(b'\n', true)) + /// .build(r"foo\p{any}+bar")?; + /// + /// let input = Input::new("foo\nbar"); + /// // Normally this would produce a match, since \p{any} contains '\n'. + /// // But since we instructed the automaton to enter a quit state if a + /// // '\n' is observed, this produces a match error instead. + /// let expected = MatchError::quit(b'\n', 3); + /// let got = re.try_search(&input).unwrap_err(); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[derive(Clone, Debug)] +); + +#[cfg(all(feature = "syntax", feature = "dfa-build"))] +impl Regex { + /// Parse the given regular expression using the default configuration and + /// return the corresponding regex. + /// + /// If you want a non-default configuration, then use the [`Builder`] to + /// set your own configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{Match, dfa::regex::Regex}; + /// + /// let re = Regex::new("foo[0-9]+bar")?; + /// assert_eq!( + /// Some(Match::must(0, 3..14)), + /// re.find(b"zzzfoo12345barzzz"), + /// ); + /// # Ok::<(), Box>(()) + /// ``` + pub fn new(pattern: &str) -> Result { + Builder::new().build(pattern) + } + + /// Like `new`, but parses multiple patterns into a single "regex set." + /// This similarly uses the default regex configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{Match, dfa::regex::Regex}; + /// + /// let re = Regex::new_many(&["[a-z]+", "[0-9]+"])?; + /// + /// let mut it = re.find_iter(b"abc 1 foo 4567 0 quux"); + /// assert_eq!(Some(Match::must(0, 0..3)), it.next()); + /// assert_eq!(Some(Match::must(1, 4..5)), it.next()); + /// assert_eq!(Some(Match::must(0, 6..9)), it.next()); + /// assert_eq!(Some(Match::must(1, 10..14)), it.next()); + /// assert_eq!(Some(Match::must(1, 15..16)), it.next()); + /// assert_eq!(Some(Match::must(0, 17..21)), it.next()); + /// assert_eq!(None, it.next()); + /// # Ok::<(), Box>(()) + /// ``` + pub fn new_many>( + patterns: &[P], + ) -> Result { + Builder::new().build_many(patterns) + } +} + +#[cfg(all(feature = "syntax", feature = "dfa-build"))] +impl Regex>> { + /// Parse the given regular expression using the default configuration, + /// except using sparse DFAs, and return the corresponding regex. + /// + /// If you want a non-default configuration, then use the [`Builder`] to + /// set your own configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{Match, dfa::regex::Regex}; + /// + /// let re = Regex::new_sparse("foo[0-9]+bar")?; + /// assert_eq!( + /// Some(Match::must(0, 3..14)), + /// re.find(b"zzzfoo12345barzzz"), + /// ); + /// # Ok::<(), Box>(()) + /// ``` + pub fn new_sparse( + pattern: &str, + ) -> Result>>, BuildError> { + Builder::new().build_sparse(pattern) + } + + /// Like `new`, but parses multiple patterns into a single "regex set" + /// using sparse DFAs. This otherwise similarly uses the default regex + /// configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{Match, dfa::regex::Regex}; + /// + /// let re = Regex::new_many_sparse(&["[a-z]+", "[0-9]+"])?; + /// + /// let mut it = re.find_iter(b"abc 1 foo 4567 0 quux"); + /// assert_eq!(Some(Match::must(0, 0..3)), it.next()); + /// assert_eq!(Some(Match::must(1, 4..5)), it.next()); + /// assert_eq!(Some(Match::must(0, 6..9)), it.next()); + /// assert_eq!(Some(Match::must(1, 10..14)), it.next()); + /// assert_eq!(Some(Match::must(1, 15..16)), it.next()); + /// assert_eq!(Some(Match::must(0, 17..21)), it.next()); + /// assert_eq!(None, it.next()); + /// # Ok::<(), Box>(()) + /// ``` + pub fn new_many_sparse>( + patterns: &[P], + ) -> Result>>, BuildError> { + Builder::new().build_many_sparse(patterns) + } +} + +/// Convenience routines for regex construction. +impl Regex> { + /// Return a builder for configuring the construction of a `Regex`. + /// + /// This is a convenience routine to avoid needing to import the + /// [`Builder`] type in common cases. + /// + /// # Example + /// + /// This example shows how to use the builder to disable UTF-8 mode + /// everywhere. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// dfa::regex::Regex, nfa::thompson, util::syntax, Match, + /// }; + /// + /// let re = Regex::builder() + /// .syntax(syntax::Config::new().utf8(false)) + /// .thompson(thompson::Config::new().utf8(false)) + /// .build(r"foo(?-u:[^b])ar.*")?; + /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; + /// let expected = Some(Match::must(0, 1..9)); + /// let got = re.find(haystack); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn builder() -> Builder { + Builder::new() + } +} + +/// Standard search routines for finding and iterating over matches. +impl Regex { + /// Returns true if and only if this regex matches the given haystack. + /// + /// This routine may short circuit if it knows that scanning future input + /// will never lead to a different result. In particular, if the underlying + /// DFA enters a match state or a dead state, then this routine will return + /// `true` or `false`, respectively, without inspecting any future input. + /// + /// # Panics + /// + /// This routine panics if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the DFA quitting. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search panics, callers cannot know whether a match exists or + /// not. + /// + /// Use [`Regex::try_search`] if you want to handle these error conditions. + /// + /// # Example + /// + /// ``` + /// use regex_automata::dfa::regex::Regex; + /// + /// let re = Regex::new("foo[0-9]+bar")?; + /// assert_eq!(true, re.is_match("foo12345bar")); + /// assert_eq!(false, re.is_match("foobar")); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn is_match<'h, I: Into>>(&self, input: I) -> bool { + // Not only can we do an "earliest" search, but we can avoid doing a + // reverse scan too. + let input = input.into().earliest(true); + self.forward().try_search_fwd(&input).map(|x| x.is_some()).unwrap() + } + + /// Returns the start and end offset of the leftmost match. If no match + /// exists, then `None` is returned. + /// + /// # Panics + /// + /// This routine panics if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the DFA quitting. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search panics, callers cannot know whether a match exists or + /// not. + /// + /// Use [`Regex::try_search`] if you want to handle these error conditions. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{Match, dfa::regex::Regex}; + /// + /// // Greediness is applied appropriately. + /// let re = Regex::new("foo[0-9]+")?; + /// assert_eq!(Some(Match::must(0, 3..11)), re.find("zzzfoo12345zzz")); + /// + /// // Even though a match is found after reading the first byte (`a`), + /// // the default leftmost-first match semantics demand that we find the + /// // earliest match that prefers earlier parts of the pattern over latter + /// // parts. + /// let re = Regex::new("abc|a")?; + /// assert_eq!(Some(Match::must(0, 0..3)), re.find("abc")); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn find<'h, I: Into>>(&self, input: I) -> Option { + self.try_search(&input.into()).unwrap() + } + + /// Returns an iterator over all non-overlapping leftmost matches in the + /// given bytes. If no match exists, then the iterator yields no elements. + /// + /// This corresponds to the "standard" regex search iterator. + /// + /// # Panics + /// + /// If the search returns an error during iteration, then iteration + /// panics. See [`Regex::find`] for the panic conditions. + /// + /// Use [`Regex::try_search`] with + /// [`util::iter::Searcher`](crate::util::iter::Searcher) if you want to + /// handle these error conditions. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{Match, dfa::regex::Regex}; + /// + /// let re = Regex::new("foo[0-9]+")?; + /// let text = "foo1 foo12 foo123"; + /// let matches: Vec = re.find_iter(text).collect(); + /// assert_eq!(matches, vec![ + /// Match::must(0, 0..4), + /// Match::must(0, 5..10), + /// Match::must(0, 11..17), + /// ]); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn find_iter<'r, 'h, I: Into>>( + &'r self, + input: I, + ) -> FindMatches<'r, 'h, A> { + let it = iter::Searcher::new(input.into()); + FindMatches { re: self, it } + } +} + +/// Lower level fallible search routines that permit controlling where the +/// search starts and ends in a particular sequence. +impl Regex { + /// Returns the start and end offset of the leftmost match. If no match + /// exists, then `None` is returned. + /// + /// This is like [`Regex::find`] but with two differences: + /// + /// 1. It is not generic over `Into` and instead accepts a + /// `&Input`. This permits reusing the same `Input` for multiple searches + /// without needing to create a new one. This _may_ help with latency. + /// 2. It returns an error if the search could not complete where as + /// [`Regex::find`] will panic. + /// + /// # Errors + /// + /// This routine errors if the search could not complete. This can occur + /// in the following circumstances: + /// + /// * The configuration of the DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the DFA quitting. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search returns an error, callers cannot know whether a match + /// exists or not. + #[inline] + pub fn try_search( + &self, + input: &Input<'_>, + ) -> Result, MatchError> { + let (fwd, rev) = (self.forward(), self.reverse()); + let end = match fwd.try_search_fwd(input)? { + None => return Ok(None), + Some(end) => end, + }; + // This special cases an empty match at the beginning of the search. If + // our end matches our start, then since a reverse DFA can't match past + // the start, it must follow that our starting position is also our end + // position. So short circuit and skip the reverse search. + if input.start() == end.offset() { + return Ok(Some(Match::new( + end.pattern(), + end.offset()..end.offset(), + ))); + } + // We can also skip the reverse search if we know our search was + // anchored. This occurs either when the input config is anchored or + // when we know the regex itself is anchored. In this case, we know the + // start of the match, if one is found, must be the start of the + // search. + if self.is_anchored(input) { + return Ok(Some(Match::new( + end.pattern(), + input.start()..end.offset(), + ))); + } + // N.B. I have tentatively convinced myself that it isn't necessary + // to specify the specific pattern for the reverse search since the + // reverse search will always find the same pattern to match as the + // forward search. But I lack a rigorous proof. Why not just provide + // the pattern anyway? Well, if it is needed, then leaving it out + // gives us a chance to find a witness. (Also, if we don't need to + // specify the pattern, then we don't need to build the reverse DFA + // with 'starts_for_each_pattern' enabled.) + // + // We also need to be careful to disable 'earliest' for the reverse + // search, since it could be enabled for the forward search. In the + // reverse case, to satisfy "leftmost" criteria, we need to match + // as much as we can. We also need to be careful to make the search + // anchored. We don't want the reverse search to report any matches + // other than the one beginning at the end of our forward search. + let revsearch = input + .clone() + .span(input.start()..end.offset()) + .anchored(Anchored::Yes) + .earliest(false); + let start = rev + .try_search_rev(&revsearch)? + .expect("reverse search must match if forward search does"); + assert_eq!( + start.pattern(), + end.pattern(), + "forward and reverse search must match same pattern", + ); + assert!(start.offset() <= end.offset()); + Ok(Some(Match::new(end.pattern(), start.offset()..end.offset()))) + } + + /// Returns true if either the given input specifies an anchored search + /// or if the underlying DFA is always anchored. + fn is_anchored(&self, input: &Input<'_>) -> bool { + match input.get_anchored() { + Anchored::No => self.forward().is_always_start_anchored(), + Anchored::Yes | Anchored::Pattern(_) => true, + } + } +} + +/// Non-search APIs for querying information about the regex and setting a +/// prefilter. +impl Regex { + /// Return the underlying DFA responsible for forward matching. + /// + /// This is useful for accessing the underlying DFA and converting it to + /// some other format or size. See the [`Builder::build_from_dfas`] docs + /// for an example of where this might be useful. + pub fn forward(&self) -> &A { + &self.forward + } + + /// Return the underlying DFA responsible for reverse matching. + /// + /// This is useful for accessing the underlying DFA and converting it to + /// some other format or size. See the [`Builder::build_from_dfas`] docs + /// for an example of where this might be useful. + pub fn reverse(&self) -> &A { + &self.reverse + } + + /// Returns the total number of patterns matched by this regex. + /// + /// # Example + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::dfa::regex::Regex; + /// + /// let re = Regex::new_many(&[r"[a-z]+", r"[0-9]+", r"\w+"])?; + /// assert_eq!(3, re.pattern_len()); + /// # Ok::<(), Box>(()) + /// ``` + pub fn pattern_len(&self) -> usize { + assert_eq!(self.forward().pattern_len(), self.reverse().pattern_len()); + self.forward().pattern_len() + } +} + +/// An iterator over all non-overlapping matches for an infallible search. +/// +/// The iterator yields a [`Match`] value until no more matches could be found. +/// If the underlying regex engine returns an error, then a panic occurs. +/// +/// The type parameters are as follows: +/// +/// * `A` represents the type of the underlying DFA that implements the +/// [`Automaton`] trait. +/// +/// The lifetime parameters are as follows: +/// +/// * `'h` represents the lifetime of the haystack being searched. +/// * `'r` represents the lifetime of the regex object itself. +/// +/// This iterator can be created with the [`Regex::find_iter`] method. +#[derive(Debug)] +pub struct FindMatches<'r, 'h, A> { + re: &'r Regex, + it: iter::Searcher<'h>, +} + +impl<'r, 'h, A: Automaton> Iterator for FindMatches<'r, 'h, A> { + type Item = Match; + + #[inline] + fn next(&mut self) -> Option { + let FindMatches { re, ref mut it } = *self; + it.advance(|input| re.try_search(input)) + } +} + +/// A builder for a regex based on deterministic finite automatons. +/// +/// This builder permits configuring options for the syntax of a pattern, the +/// NFA construction, the DFA construction and finally the regex searching +/// itself. This builder is different from a general purpose regex builder in +/// that it permits fine grain configuration of the construction process. The +/// trade off for this is complexity, and the possibility of setting a +/// configuration that might not make sense. For example, there are two +/// different UTF-8 modes: +/// +/// * [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) controls +/// whether the pattern itself can contain sub-expressions that match invalid +/// UTF-8. +/// * [`thompson::Config::utf8`](crate::nfa::thompson::Config::utf8) controls +/// how the regex iterators themselves advance the starting position of the +/// next search when a match with zero length is found. +/// +/// Generally speaking, callers will want to either enable all of these or +/// disable all of these. +/// +/// Internally, building a regex requires building two DFAs, where one is +/// responsible for finding the end of a match and the other is responsible +/// for finding the start of a match. If you only need to detect whether +/// something matched, or only the end of a match, then you should use a +/// [`dense::Builder`] to construct a single DFA, which is cheaper than +/// building two DFAs. +/// +/// # Build methods +/// +/// This builder has a few "build" methods. In general, it's the result of +/// combining the following parameters: +/// +/// * Building one or many regexes. +/// * Building a regex with dense or sparse DFAs. +/// +/// The simplest "build" method is [`Builder::build`]. It accepts a single +/// pattern and builds a dense DFA using `usize` for the state identifier +/// representation. +/// +/// The most general "build" method is [`Builder::build_many`], which permits +/// building a regex that searches for multiple patterns simultaneously while +/// using a specific state identifier representation. +/// +/// The most flexible "build" method, but hardest to use, is +/// [`Builder::build_from_dfas`]. This exposes the fact that a [`Regex`] is +/// just a pair of DFAs, and this method allows you to specify those DFAs +/// exactly. +/// +/// # Example +/// +/// This example shows how to disable UTF-8 mode in the syntax and the regex +/// itself. This is generally what you want for matching on arbitrary bytes. +/// +/// ``` +/// # if cfg!(miri) { return Ok(()); } // miri takes too long +/// use regex_automata::{ +/// dfa::regex::Regex, nfa::thompson, util::syntax, Match, +/// }; +/// +/// let re = Regex::builder() +/// .syntax(syntax::Config::new().utf8(false)) +/// .thompson(thompson::Config::new().utf8(false)) +/// .build(r"foo(?-u:[^b])ar.*")?; +/// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; +/// let expected = Some(Match::must(0, 1..9)); +/// let got = re.find(haystack); +/// assert_eq!(expected, got); +/// // Notice that `(?-u:[^b])` matches invalid UTF-8, +/// // but the subsequent `.*` does not! Disabling UTF-8 +/// // on the syntax permits this. +/// assert_eq!(b"foo\xFFarzz", &haystack[got.unwrap().range()]); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct Builder { + #[cfg(feature = "dfa-build")] + dfa: dense::Builder, +} + +impl Builder { + /// Create a new regex builder with the default configuration. + pub fn new() -> Builder { + Builder { + #[cfg(feature = "dfa-build")] + dfa: dense::Builder::new(), + } + } + + /// Build a regex from the given pattern. + /// + /// If there was a problem parsing or compiling the pattern, then an error + /// is returned. + #[cfg(all(feature = "syntax", feature = "dfa-build"))] + pub fn build(&self, pattern: &str) -> Result { + self.build_many(&[pattern]) + } + + /// Build a regex from the given pattern using sparse DFAs. + /// + /// If there was a problem parsing or compiling the pattern, then an error + /// is returned. + #[cfg(all(feature = "syntax", feature = "dfa-build"))] + pub fn build_sparse( + &self, + pattern: &str, + ) -> Result>>, BuildError> { + self.build_many_sparse(&[pattern]) + } + + /// Build a regex from the given patterns. + #[cfg(all(feature = "syntax", feature = "dfa-build"))] + pub fn build_many>( + &self, + patterns: &[P], + ) -> Result { + let forward = self.dfa.build_many(patterns)?; + let reverse = self + .dfa + .clone() + .configure( + dense::Config::new() + .prefilter(None) + .specialize_start_states(false) + .start_kind(StartKind::Anchored) + .match_kind(MatchKind::All), + ) + .thompson(crate::nfa::thompson::Config::new().reverse(true)) + .build_many(patterns)?; + Ok(self.build_from_dfas(forward, reverse)) + } + + /// Build a sparse regex from the given patterns. + #[cfg(all(feature = "syntax", feature = "dfa-build"))] + pub fn build_many_sparse>( + &self, + patterns: &[P], + ) -> Result>>, BuildError> { + let re = self.build_many(patterns)?; + let forward = re.forward().to_sparse()?; + let reverse = re.reverse().to_sparse()?; + Ok(self.build_from_dfas(forward, reverse)) + } + + /// Build a regex from its component forward and reverse DFAs. + /// + /// This is useful when deserializing a regex from some arbitrary + /// memory region. This is also useful for building regexes from other + /// types of DFAs. + /// + /// If you're building the DFAs from scratch instead of building new DFAs + /// from other DFAs, then you'll need to make sure that the reverse DFA is + /// configured correctly to match the intended semantics. Namely: + /// + /// * It should be anchored. + /// * It should use [`MatchKind::All`] semantics. + /// * It should match in reverse. + /// * Otherwise, its configuration should match the forward DFA. + /// + /// If these conditions aren't satisfied, then the behavior of searches is + /// unspecified. + /// + /// Note that when using this constructor, no configuration is applied. + /// Since this routine provides the DFAs to the builder, there is no + /// opportunity to apply other configuration options. + /// + /// # Example + /// + /// This example is a bit a contrived. The usual use of these methods + /// would involve serializing `initial_re` somewhere and then deserializing + /// it later to build a regex. But in this case, we do everything in + /// memory. + /// + /// ``` + /// use regex_automata::dfa::regex::Regex; + /// + /// let initial_re = Regex::new("foo[0-9]+")?; + /// assert_eq!(true, initial_re.is_match(b"foo123")); + /// + /// let (fwd, rev) = (initial_re.forward(), initial_re.reverse()); + /// let re = Regex::builder().build_from_dfas(fwd, rev); + /// assert_eq!(true, re.is_match(b"foo123")); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// This example shows how to build a `Regex` that uses sparse DFAs instead + /// of dense DFAs without using one of the convenience `build_sparse` + /// routines: + /// + /// ``` + /// use regex_automata::dfa::regex::Regex; + /// + /// let initial_re = Regex::new("foo[0-9]+")?; + /// assert_eq!(true, initial_re.is_match(b"foo123")); + /// + /// let fwd = initial_re.forward().to_sparse()?; + /// let rev = initial_re.reverse().to_sparse()?; + /// let re = Regex::builder().build_from_dfas(fwd, rev); + /// assert_eq!(true, re.is_match(b"foo123")); + /// # Ok::<(), Box>(()) + /// ``` + pub fn build_from_dfas( + &self, + forward: A, + reverse: A, + ) -> Regex { + Regex { forward, reverse } + } + + /// Set the syntax configuration for this builder using + /// [`syntax::Config`](crate::util::syntax::Config). + /// + /// This permits setting things like case insensitivity, Unicode and multi + /// line mode. + #[cfg(all(feature = "syntax", feature = "dfa-build"))] + pub fn syntax( + &mut self, + config: crate::util::syntax::Config, + ) -> &mut Builder { + self.dfa.syntax(config); + self + } + + /// Set the Thompson NFA configuration for this builder using + /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). + /// + /// This permits setting things like whether additional time should be + /// spent shrinking the size of the NFA. + #[cfg(all(feature = "syntax", feature = "dfa-build"))] + pub fn thompson( + &mut self, + config: crate::nfa::thompson::Config, + ) -> &mut Builder { + self.dfa.thompson(config); + self + } + + /// Set the dense DFA compilation configuration for this builder using + /// [`dense::Config`]. + /// + /// This permits setting things like whether the underlying DFAs should + /// be minimized. + #[cfg(feature = "dfa-build")] + pub fn dense(&mut self, config: dense::Config) -> &mut Builder { + self.dfa.configure(config); + self + } +} + +impl Default for Builder { + fn default() -> Builder { + Builder::new() + } +} diff --git a/vendor/regex-automata/src/dfa/remapper.rs b/vendor/regex-automata/src/dfa/remapper.rs new file mode 100644 index 00000000000000..6e496467210b94 --- /dev/null +++ b/vendor/regex-automata/src/dfa/remapper.rs @@ -0,0 +1,242 @@ +use alloc::vec::Vec; + +use crate::util::primitives::StateID; + +/// Remappable is a tightly coupled abstraction that facilitates remapping +/// state identifiers in DFAs. +/// +/// The main idea behind remapping state IDs is that DFAs often need to check +/// if a certain state is a "special" state of some kind (like a match state) +/// during a search. Since this is extremely perf critical code, we want this +/// check to be as fast as possible. Partitioning state IDs into, for example, +/// into "non-match" and "match" states means one can tell if a state is a +/// match state via a simple comparison of the state ID. +/// +/// The issue is that during the DFA construction process, it's not +/// particularly easy to partition the states. Instead, the simplest thing is +/// to often just do a pass over all of the states and shuffle them into their +/// desired partitionings. To do that, we need a mechanism for swapping states. +/// Hence, this abstraction. +/// +/// Normally, for such little code, I would just duplicate it. But this is a +/// key optimization and the implementation is a bit subtle. So the abstraction +/// is basically a ham-fisted attempt at DRY. The only place we use this is in +/// the dense and one-pass DFAs. +/// +/// See also src/dfa/special.rs for a more detailed explanation of how dense +/// DFAs are partitioned. +pub(super) trait Remappable: core::fmt::Debug { + /// Return the total number of states. + fn state_len(&self) -> usize; + /// Return the power-of-2 exponent that yields the stride. The pertinent + /// laws here are, where N=stride2: 2^N=stride and len(alphabet) <= stride. + fn stride2(&self) -> usize; + /// Swap the states pointed to by the given IDs. The underlying finite + /// state machine should be mutated such that all of the transitions in + /// `id1` are now in the memory region where the transitions for `id2` + /// were, and all of the transitions in `id2` are now in the memory region + /// where the transitions for `id1` were. + /// + /// Essentially, this "moves" `id1` to `id2` and `id2` to `id1`. + /// + /// It is expected that, after calling this, the underlying value will be + /// left in an inconsistent state, since any other transitions pointing to, + /// e.g., `id1` need to be updated to point to `id2`, since that's where + /// `id1` moved to. + /// + /// In order to "fix" the underlying inconsistent state, a `Remapper` + /// should be used to guarantee that `remap` is called at the appropriate + /// time. + fn swap_states(&mut self, id1: StateID, id2: StateID); + /// This must remap every single state ID in the underlying value according + /// to the function given. For example, in a DFA, this should remap every + /// transition and every starting state ID. + fn remap(&mut self, map: impl Fn(StateID) -> StateID); +} + +/// Remapper is an abstraction the manages the remapping of state IDs in a +/// finite state machine. This is useful when one wants to shuffle states into +/// different positions in the machine. +/// +/// One of the key complexities this manages is the ability to correctly move +/// one state multiple times. +/// +/// Once shuffling is complete, `remap` must be called, which will rewrite +/// all pertinent transitions to updated state IDs. Neglecting to call `remap` +/// will almost certainly result in a corrupt machine. +#[derive(Debug)] +pub(super) struct Remapper { + /// A map from the index of a state to its pre-multiplied identifier. + /// + /// When a state is swapped with another, then their corresponding + /// locations in this map are also swapped. Thus, its new position will + /// still point to its old pre-multiplied StateID. + /// + /// While there is a bit more to it, this then allows us to rewrite the + /// state IDs in a DFA's transition table in a single pass. This is done + /// by iterating over every ID in this map, then iterating over each + /// transition for the state at that ID and re-mapping the transition from + /// `old_id` to `map[dfa.to_index(old_id)]`. That is, we find the position + /// in this map where `old_id` *started*, and set it to where it ended up + /// after all swaps have been completed. + map: Vec, + /// A mapper from state index to state ID (and back). + idxmap: IndexMapper, +} + +impl Remapper { + /// Create a new remapper from the given remappable implementation. The + /// remapper can then be used to swap states. The remappable value given + /// here must the same one given to `swap` and `remap`. + pub(super) fn new(r: &impl Remappable) -> Remapper { + let idxmap = IndexMapper { stride2: r.stride2() }; + let map = (0..r.state_len()).map(|i| idxmap.to_state_id(i)).collect(); + Remapper { map, idxmap } + } + + /// Swap two states. Once this is called, callers must follow through to + /// call `remap`, or else it's possible for the underlying remappable + /// value to be in a corrupt state. + pub(super) fn swap( + &mut self, + r: &mut impl Remappable, + id1: StateID, + id2: StateID, + ) { + if id1 == id2 { + return; + } + r.swap_states(id1, id2); + self.map.swap(self.idxmap.to_index(id1), self.idxmap.to_index(id2)); + } + + /// Complete the remapping process by rewriting all state IDs in the + /// remappable value according to the swaps performed. + pub(super) fn remap(mut self, r: &mut impl Remappable) { + // Update the map to account for states that have been swapped + // multiple times. For example, if (A, C) and (C, G) are swapped, then + // transitions previously pointing to A should now point to G. But if + // we don't update our map, they will erroneously be set to C. All we + // do is follow the swaps in our map until we see our original state + // ID. + // + // The intuition here is to think about how changes are made to the + // map: only through pairwise swaps. That means that starting at any + // given state, it is always possible to find the loop back to that + // state by following the swaps represented in the map (which might be + // 0 swaps). + // + // We are also careful to clone the map before starting in order to + // freeze it. We use the frozen map to find our loops, since we need to + // update our map as well. Without freezing it, our updates could break + // the loops referenced above and produce incorrect results. + let oldmap = self.map.clone(); + for i in 0..r.state_len() { + let cur_id = self.idxmap.to_state_id(i); + let mut new_id = oldmap[i]; + if cur_id == new_id { + continue; + } + loop { + let id = oldmap[self.idxmap.to_index(new_id)]; + if cur_id == id { + self.map[i] = new_id; + break; + } + new_id = id; + } + } + r.remap(|next| self.map[self.idxmap.to_index(next)]); + } +} + +/// A simple type for mapping between state indices and state IDs. +/// +/// The reason why this exists is because state IDs are "premultiplied." That +/// is, in order to get to the transitions for a particular state, one need +/// only use the state ID as-is, instead of having to multiple it by transition +/// table's stride. +/// +/// The downside of this is that it's inconvenient to map between state IDs +/// using a dense map, e.g., Vec. That's because state IDs look like +/// `0`, `0+stride`, `0+2*stride`, `0+3*stride`, etc., instead of `0`, `1`, +/// `2`, `3`, etc. +/// +/// Since our state IDs are premultiplied, we can convert back-and-forth +/// between IDs and indices by simply unmultiplying the IDs and multiplying the +/// indices. +#[derive(Debug)] +struct IndexMapper { + /// The power of 2 corresponding to the stride of the corresponding + /// transition table. 'id >> stride2' de-multiplies an ID while 'index << + /// stride2' pre-multiplies an index to an ID. + stride2: usize, +} + +impl IndexMapper { + /// Convert a state ID to a state index. + fn to_index(&self, id: StateID) -> usize { + id.as_usize() >> self.stride2 + } + + /// Convert a state index to a state ID. + fn to_state_id(&self, index: usize) -> StateID { + // CORRECTNESS: If the given index is not valid, then it is not + // required for this to panic or return a valid state ID. We'll "just" + // wind up with panics or silent logic errors at some other point. + StateID::new_unchecked(index << self.stride2) + } +} + +#[cfg(feature = "dfa-build")] +mod dense { + use crate::{dfa::dense::OwnedDFA, util::primitives::StateID}; + + use super::Remappable; + + impl Remappable for OwnedDFA { + fn state_len(&self) -> usize { + OwnedDFA::state_len(self) + } + + fn stride2(&self) -> usize { + OwnedDFA::stride2(self) + } + + fn swap_states(&mut self, id1: StateID, id2: StateID) { + OwnedDFA::swap_states(self, id1, id2) + } + + fn remap(&mut self, map: impl Fn(StateID) -> StateID) { + OwnedDFA::remap(self, map) + } + } +} + +#[cfg(feature = "dfa-onepass")] +mod onepass { + use crate::{dfa::onepass::DFA, util::primitives::StateID}; + + use super::Remappable; + + impl Remappable for DFA { + fn state_len(&self) -> usize { + DFA::state_len(self) + } + + fn stride2(&self) -> usize { + // We don't do pre-multiplication for the one-pass DFA, so + // returning 0 has the effect of making state IDs and state indices + // equivalent. + 0 + } + + fn swap_states(&mut self, id1: StateID, id2: StateID) { + DFA::swap_states(self, id1, id2) + } + + fn remap(&mut self, map: impl Fn(StateID) -> StateID) { + DFA::remap(self, map) + } + } +} diff --git a/vendor/regex-automata/src/dfa/search.rs b/vendor/regex-automata/src/dfa/search.rs new file mode 100644 index 00000000000000..5a82261f970f11 --- /dev/null +++ b/vendor/regex-automata/src/dfa/search.rs @@ -0,0 +1,644 @@ +use crate::{ + dfa::{ + accel, + automaton::{Automaton, OverlappingState}, + }, + util::{ + prefilter::Prefilter, + primitives::StateID, + search::{Anchored, HalfMatch, Input, Span}, + }, + MatchError, +}; + +#[inline(never)] +pub fn find_fwd( + dfa: &A, + input: &Input<'_>, +) -> Result, MatchError> { + if input.is_done() { + return Ok(None); + } + let pre = if input.get_anchored().is_anchored() { + None + } else { + dfa.get_prefilter() + }; + // Searching with a pattern ID is always anchored, so we should never use + // a prefilter. + if pre.is_some() { + if input.get_earliest() { + find_fwd_imp(dfa, input, pre, true) + } else { + find_fwd_imp(dfa, input, pre, false) + } + } else { + if input.get_earliest() { + find_fwd_imp(dfa, input, None, true) + } else { + find_fwd_imp(dfa, input, None, false) + } + } +} + +#[cfg_attr(feature = "perf-inline", inline(always))] +fn find_fwd_imp( + dfa: &A, + input: &Input<'_>, + pre: Option<&'_ Prefilter>, + earliest: bool, +) -> Result, MatchError> { + // See 'prefilter_restart' docs for explanation. + let universal_start = dfa.universal_start_state(Anchored::No).is_some(); + let mut mat = None; + let mut sid = init_fwd(dfa, input)?; + let mut at = input.start(); + // This could just be a closure, but then I think it would be unsound + // because it would need to be safe to invoke. This way, the lack of safety + // is clearer in the code below. + macro_rules! next_unchecked { + ($sid:expr, $at:expr) => {{ + let byte = *input.haystack().get_unchecked($at); + dfa.next_state_unchecked($sid, byte) + }}; + } + + if let Some(ref pre) = pre { + let span = Span::from(at..input.end()); + // If a prefilter doesn't report false positives, then we don't need to + // touch the DFA at all. However, since all matches include the pattern + // ID, and the prefilter infrastructure doesn't report pattern IDs, we + // limit this optimization to cases where there is exactly one pattern. + // In that case, any match must be the 0th pattern. + match pre.find(input.haystack(), span) { + None => return Ok(mat), + Some(ref span) => { + at = span.start; + if !universal_start { + sid = prefilter_restart(dfa, &input, at)?; + } + } + } + } + while at < input.end() { + // SAFETY: There are two safety invariants we need to uphold here in + // the loops below: that 'sid' and 'prev_sid' are valid state IDs + // for this DFA, and that 'at' is a valid index into 'haystack'. + // For the former, we rely on the invariant that next_state* and + // start_state_forward always returns a valid state ID (given a valid + // state ID in the former case). For the latter safety invariant, we + // always guard unchecked access with a check that 'at' is less than + // 'end', where 'end <= haystack.len()'. In the unrolled loop below, we + // ensure that 'at' is always in bounds. + // + // PERF: See a similar comment in src/hybrid/search.rs that justifies + // this extra work to make the search loop fast. The same reasoning and + // benchmarks apply here. + let mut prev_sid; + while at < input.end() { + prev_sid = unsafe { next_unchecked!(sid, at) }; + if dfa.is_special_state(prev_sid) || at + 3 >= input.end() { + core::mem::swap(&mut prev_sid, &mut sid); + break; + } + at += 1; + + sid = unsafe { next_unchecked!(prev_sid, at) }; + if dfa.is_special_state(sid) { + break; + } + at += 1; + + prev_sid = unsafe { next_unchecked!(sid, at) }; + if dfa.is_special_state(prev_sid) { + core::mem::swap(&mut prev_sid, &mut sid); + break; + } + at += 1; + + sid = unsafe { next_unchecked!(prev_sid, at) }; + if dfa.is_special_state(sid) { + break; + } + at += 1; + } + if dfa.is_special_state(sid) { + if dfa.is_start_state(sid) { + if let Some(ref pre) = pre { + let span = Span::from(at..input.end()); + match pre.find(input.haystack(), span) { + None => return Ok(mat), + Some(ref span) => { + // We want to skip any update to 'at' below + // at the end of this iteration and just + // jump immediately back to the next state + // transition at the leading position of the + // candidate match. + // + // ... but only if we actually made progress + // with our prefilter, otherwise if the start + // state has a self-loop, we can get stuck. + if span.start > at { + at = span.start; + if !universal_start { + sid = prefilter_restart(dfa, &input, at)?; + } + continue; + } + } + } + } else if dfa.is_accel_state(sid) { + let needles = dfa.accelerator(sid); + at = accel::find_fwd(needles, input.haystack(), at + 1) + .unwrap_or(input.end()); + continue; + } + } else if dfa.is_match_state(sid) { + let pattern = dfa.match_pattern(sid, 0); + mat = Some(HalfMatch::new(pattern, at)); + if earliest { + return Ok(mat); + } + if dfa.is_accel_state(sid) { + let needles = dfa.accelerator(sid); + at = accel::find_fwd(needles, input.haystack(), at + 1) + .unwrap_or(input.end()); + continue; + } + } else if dfa.is_accel_state(sid) { + let needs = dfa.accelerator(sid); + at = accel::find_fwd(needs, input.haystack(), at + 1) + .unwrap_or(input.end()); + continue; + } else if dfa.is_dead_state(sid) { + return Ok(mat); + } else { + // It's important that this is a debug_assert, since this can + // actually be tripped even if DFA::from_bytes succeeds and + // returns a supposedly valid DFA. + return Err(MatchError::quit(input.haystack()[at], at)); + } + } + at += 1; + } + eoi_fwd(dfa, input, &mut sid, &mut mat)?; + Ok(mat) +} + +#[inline(never)] +pub fn find_rev( + dfa: &A, + input: &Input<'_>, +) -> Result, MatchError> { + if input.is_done() { + return Ok(None); + } + if input.get_earliest() { + find_rev_imp(dfa, input, true) + } else { + find_rev_imp(dfa, input, false) + } +} + +#[cfg_attr(feature = "perf-inline", inline(always))] +fn find_rev_imp( + dfa: &A, + input: &Input<'_>, + earliest: bool, +) -> Result, MatchError> { + let mut mat = None; + let mut sid = init_rev(dfa, input)?; + // In reverse search, the loop below can't handle the case of searching an + // empty slice. Ideally we could write something congruent to the forward + // search, i.e., 'while at >= start', but 'start' might be 0. Since we use + // an unsigned offset, 'at >= 0' is trivially always true. We could avoid + // this extra case handling by using a signed offset, but Rust makes it + // annoying to do. So... We just handle the empty case separately. + if input.start() == input.end() { + eoi_rev(dfa, input, &mut sid, &mut mat)?; + return Ok(mat); + } + + let mut at = input.end() - 1; + macro_rules! next_unchecked { + ($sid:expr, $at:expr) => {{ + let byte = *input.haystack().get_unchecked($at); + dfa.next_state_unchecked($sid, byte) + }}; + } + loop { + // SAFETY: See comments in 'find_fwd' for a safety argument. + let mut prev_sid; + while at >= input.start() { + prev_sid = unsafe { next_unchecked!(sid, at) }; + if dfa.is_special_state(prev_sid) + || at <= input.start().saturating_add(3) + { + core::mem::swap(&mut prev_sid, &mut sid); + break; + } + at -= 1; + + sid = unsafe { next_unchecked!(prev_sid, at) }; + if dfa.is_special_state(sid) { + break; + } + at -= 1; + + prev_sid = unsafe { next_unchecked!(sid, at) }; + if dfa.is_special_state(prev_sid) { + core::mem::swap(&mut prev_sid, &mut sid); + break; + } + at -= 1; + + sid = unsafe { next_unchecked!(prev_sid, at) }; + if dfa.is_special_state(sid) { + break; + } + at -= 1; + } + if dfa.is_special_state(sid) { + if dfa.is_start_state(sid) { + if dfa.is_accel_state(sid) { + let needles = dfa.accelerator(sid); + at = accel::find_rev(needles, input.haystack(), at) + .map(|i| i + 1) + .unwrap_or(input.start()); + } + } else if dfa.is_match_state(sid) { + let pattern = dfa.match_pattern(sid, 0); + // Since reverse searches report the beginning of a match + // and the beginning is inclusive (not exclusive like the + // end of a match), we add 1 to make it inclusive. + mat = Some(HalfMatch::new(pattern, at + 1)); + if earliest { + return Ok(mat); + } + if dfa.is_accel_state(sid) { + let needles = dfa.accelerator(sid); + at = accel::find_rev(needles, input.haystack(), at) + .map(|i| i + 1) + .unwrap_or(input.start()); + } + } else if dfa.is_accel_state(sid) { + let needles = dfa.accelerator(sid); + // If the accelerator returns nothing, why don't we quit the + // search? Well, if the accelerator doesn't find anything, that + // doesn't mean we don't have a match. It just means that we + // can't leave the current state given one of the 255 possible + // byte values. However, there might be an EOI transition. So + // we set 'at' to the end of the haystack, which will cause + // this loop to stop and fall down into the EOI transition. + at = accel::find_rev(needles, input.haystack(), at) + .map(|i| i + 1) + .unwrap_or(input.start()); + } else if dfa.is_dead_state(sid) { + return Ok(mat); + } else { + return Err(MatchError::quit(input.haystack()[at], at)); + } + } + if at == input.start() { + break; + } + at -= 1; + } + eoi_rev(dfa, input, &mut sid, &mut mat)?; + Ok(mat) +} + +#[inline(never)] +pub fn find_overlapping_fwd( + dfa: &A, + input: &Input<'_>, + state: &mut OverlappingState, +) -> Result<(), MatchError> { + state.mat = None; + if input.is_done() { + return Ok(()); + } + let pre = if input.get_anchored().is_anchored() { + None + } else { + dfa.get_prefilter() + }; + if pre.is_some() { + find_overlapping_fwd_imp(dfa, input, pre, state) + } else { + find_overlapping_fwd_imp(dfa, input, None, state) + } +} + +#[cfg_attr(feature = "perf-inline", inline(always))] +fn find_overlapping_fwd_imp( + dfa: &A, + input: &Input<'_>, + pre: Option<&'_ Prefilter>, + state: &mut OverlappingState, +) -> Result<(), MatchError> { + // See 'prefilter_restart' docs for explanation. + let universal_start = dfa.universal_start_state(Anchored::No).is_some(); + let mut sid = match state.id { + None => { + state.at = input.start(); + init_fwd(dfa, input)? + } + Some(sid) => { + if let Some(match_index) = state.next_match_index { + let match_len = dfa.match_len(sid); + if match_index < match_len { + state.next_match_index = Some(match_index + 1); + let pattern = dfa.match_pattern(sid, match_index); + state.mat = Some(HalfMatch::new(pattern, state.at)); + return Ok(()); + } + } + // Once we've reported all matches at a given position, we need to + // advance the search to the next position. + state.at += 1; + if state.at > input.end() { + return Ok(()); + } + sid + } + }; + + // NOTE: We don't optimize the crap out of this routine primarily because + // it seems like most find_overlapping searches will have higher match + // counts, and thus, throughput is perhaps not as important. But if you + // have a use case for something faster, feel free to file an issue. + while state.at < input.end() { + sid = dfa.next_state(sid, input.haystack()[state.at]); + if dfa.is_special_state(sid) { + state.id = Some(sid); + if dfa.is_start_state(sid) { + if let Some(ref pre) = pre { + let span = Span::from(state.at..input.end()); + match pre.find(input.haystack(), span) { + None => return Ok(()), + Some(ref span) => { + if span.start > state.at { + state.at = span.start; + if !universal_start { + sid = prefilter_restart( + dfa, &input, state.at, + )?; + } + continue; + } + } + } + } else if dfa.is_accel_state(sid) { + let needles = dfa.accelerator(sid); + state.at = accel::find_fwd( + needles, + input.haystack(), + state.at + 1, + ) + .unwrap_or(input.end()); + continue; + } + } else if dfa.is_match_state(sid) { + state.next_match_index = Some(1); + let pattern = dfa.match_pattern(sid, 0); + state.mat = Some(HalfMatch::new(pattern, state.at)); + return Ok(()); + } else if dfa.is_accel_state(sid) { + let needs = dfa.accelerator(sid); + // If the accelerator returns nothing, why don't we quit the + // search? Well, if the accelerator doesn't find anything, that + // doesn't mean we don't have a match. It just means that we + // can't leave the current state given one of the 255 possible + // byte values. However, there might be an EOI transition. So + // we set 'at' to the end of the haystack, which will cause + // this loop to stop and fall down into the EOI transition. + state.at = + accel::find_fwd(needs, input.haystack(), state.at + 1) + .unwrap_or(input.end()); + continue; + } else if dfa.is_dead_state(sid) { + return Ok(()); + } else { + return Err(MatchError::quit( + input.haystack()[state.at], + state.at, + )); + } + } + state.at += 1; + } + + let result = eoi_fwd(dfa, input, &mut sid, &mut state.mat); + state.id = Some(sid); + if state.mat.is_some() { + // '1' is always correct here since if we get to this point, this + // always corresponds to the first (index '0') match discovered at + // this position. So the next match to report at this position (if + // it exists) is at index '1'. + state.next_match_index = Some(1); + } + result +} + +#[inline(never)] +pub(crate) fn find_overlapping_rev( + dfa: &A, + input: &Input<'_>, + state: &mut OverlappingState, +) -> Result<(), MatchError> { + state.mat = None; + if input.is_done() { + return Ok(()); + } + let mut sid = match state.id { + None => { + let sid = init_rev(dfa, input)?; + state.id = Some(sid); + if input.start() == input.end() { + state.rev_eoi = true; + } else { + state.at = input.end() - 1; + } + sid + } + Some(sid) => { + if let Some(match_index) = state.next_match_index { + let match_len = dfa.match_len(sid); + if match_index < match_len { + state.next_match_index = Some(match_index + 1); + let pattern = dfa.match_pattern(sid, match_index); + state.mat = Some(HalfMatch::new(pattern, state.at)); + return Ok(()); + } + } + // Once we've reported all matches at a given position, we need + // to advance the search to the next position. However, if we've + // already followed the EOI transition, then we know we're done + // with the search and there cannot be any more matches to report. + if state.rev_eoi { + return Ok(()); + } else if state.at == input.start() { + // At this point, we should follow the EOI transition. This + // will cause us the skip the main loop below and fall through + // to the final 'eoi_rev' transition. + state.rev_eoi = true; + } else { + // We haven't hit the end of the search yet, so move on. + state.at -= 1; + } + sid + } + }; + while !state.rev_eoi { + sid = dfa.next_state(sid, input.haystack()[state.at]); + if dfa.is_special_state(sid) { + state.id = Some(sid); + if dfa.is_start_state(sid) { + if dfa.is_accel_state(sid) { + let needles = dfa.accelerator(sid); + state.at = + accel::find_rev(needles, input.haystack(), state.at) + .map(|i| i + 1) + .unwrap_or(input.start()); + } + } else if dfa.is_match_state(sid) { + state.next_match_index = Some(1); + let pattern = dfa.match_pattern(sid, 0); + state.mat = Some(HalfMatch::new(pattern, state.at + 1)); + return Ok(()); + } else if dfa.is_accel_state(sid) { + let needles = dfa.accelerator(sid); + // If the accelerator returns nothing, why don't we quit the + // search? Well, if the accelerator doesn't find anything, that + // doesn't mean we don't have a match. It just means that we + // can't leave the current state given one of the 255 possible + // byte values. However, there might be an EOI transition. So + // we set 'at' to the end of the haystack, which will cause + // this loop to stop and fall down into the EOI transition. + state.at = + accel::find_rev(needles, input.haystack(), state.at) + .map(|i| i + 1) + .unwrap_or(input.start()); + } else if dfa.is_dead_state(sid) { + return Ok(()); + } else { + return Err(MatchError::quit( + input.haystack()[state.at], + state.at, + )); + } + } + if state.at == input.start() { + break; + } + state.at -= 1; + } + + let result = eoi_rev(dfa, input, &mut sid, &mut state.mat); + state.rev_eoi = true; + state.id = Some(sid); + if state.mat.is_some() { + // '1' is always correct here since if we get to this point, this + // always corresponds to the first (index '0') match discovered at + // this position. So the next match to report at this position (if + // it exists) is at index '1'. + state.next_match_index = Some(1); + } + result +} + +#[cfg_attr(feature = "perf-inline", inline(always))] +fn init_fwd( + dfa: &A, + input: &Input<'_>, +) -> Result { + let sid = dfa.start_state_forward(input)?; + // Start states can never be match states, since all matches are delayed + // by 1 byte. + debug_assert!(!dfa.is_match_state(sid)); + Ok(sid) +} + +#[cfg_attr(feature = "perf-inline", inline(always))] +fn init_rev( + dfa: &A, + input: &Input<'_>, +) -> Result { + let sid = dfa.start_state_reverse(input)?; + // Start states can never be match states, since all matches are delayed + // by 1 byte. + debug_assert!(!dfa.is_match_state(sid)); + Ok(sid) +} + +#[cfg_attr(feature = "perf-inline", inline(always))] +fn eoi_fwd( + dfa: &A, + input: &Input<'_>, + sid: &mut StateID, + mat: &mut Option, +) -> Result<(), MatchError> { + let sp = input.get_span(); + match input.haystack().get(sp.end) { + Some(&b) => { + *sid = dfa.next_state(*sid, b); + if dfa.is_match_state(*sid) { + let pattern = dfa.match_pattern(*sid, 0); + *mat = Some(HalfMatch::new(pattern, sp.end)); + } else if dfa.is_quit_state(*sid) { + return Err(MatchError::quit(b, sp.end)); + } + } + None => { + *sid = dfa.next_eoi_state(*sid); + if dfa.is_match_state(*sid) { + let pattern = dfa.match_pattern(*sid, 0); + *mat = Some(HalfMatch::new(pattern, input.haystack().len())); + } + } + } + Ok(()) +} + +#[cfg_attr(feature = "perf-inline", inline(always))] +fn eoi_rev( + dfa: &A, + input: &Input<'_>, + sid: &mut StateID, + mat: &mut Option, +) -> Result<(), MatchError> { + let sp = input.get_span(); + if sp.start > 0 { + let byte = input.haystack()[sp.start - 1]; + *sid = dfa.next_state(*sid, byte); + if dfa.is_match_state(*sid) { + let pattern = dfa.match_pattern(*sid, 0); + *mat = Some(HalfMatch::new(pattern, sp.start)); + } else if dfa.is_quit_state(*sid) { + return Err(MatchError::quit(byte, sp.start - 1)); + } + } else { + *sid = dfa.next_eoi_state(*sid); + if dfa.is_match_state(*sid) { + let pattern = dfa.match_pattern(*sid, 0); + *mat = Some(HalfMatch::new(pattern, 0)); + } + } + Ok(()) +} + +/// Re-compute the starting state that a DFA should be in after finding a +/// prefilter candidate match at the position `at`. +/// +/// The function with the same name has a bit more docs in hybrid/search.rs. +#[cfg_attr(feature = "perf-inline", inline(always))] +fn prefilter_restart( + dfa: &A, + input: &Input<'_>, + at: usize, +) -> Result { + let mut input = input.clone(); + input.set_start(at); + init_fwd(dfa, &input) +} diff --git a/vendor/regex-automata/src/dfa/sparse.rs b/vendor/regex-automata/src/dfa/sparse.rs new file mode 100644 index 00000000000000..5de00aca401276 --- /dev/null +++ b/vendor/regex-automata/src/dfa/sparse.rs @@ -0,0 +1,2655 @@ +/*! +Types and routines specific to sparse DFAs. + +This module is the home of [`sparse::DFA`](DFA). + +Unlike the [`dense`] module, this module does not contain a builder or +configuration specific for sparse DFAs. Instead, the intended way to build a +sparse DFA is either by using a default configuration with its constructor +[`sparse::DFA::new`](DFA::new), or by first configuring the construction of a +dense DFA with [`dense::Builder`] and then calling [`dense::DFA::to_sparse`]. +For example, this configures a sparse DFA to do an overlapping search: + +``` +use regex_automata::{ + dfa::{Automaton, OverlappingState, dense}, + HalfMatch, Input, MatchKind, +}; + +let dense_re = dense::Builder::new() + .configure(dense::Config::new().match_kind(MatchKind::All)) + .build(r"Samwise|Sam")?; +let sparse_re = dense_re.to_sparse()?; + +// Setup our haystack and initial start state. +let input = Input::new("Samwise"); +let mut state = OverlappingState::start(); + +// First, 'Sam' will match. +sparse_re.try_search_overlapping_fwd(&input, &mut state)?; +assert_eq!(Some(HalfMatch::must(0, 3)), state.get_match()); + +// And now 'Samwise' will match. +sparse_re.try_search_overlapping_fwd(&input, &mut state)?; +assert_eq!(Some(HalfMatch::must(0, 7)), state.get_match()); +# Ok::<(), Box>(()) +``` +*/ + +#[cfg(feature = "dfa-build")] +use core::iter; +use core::{fmt, mem::size_of}; + +#[cfg(feature = "dfa-build")] +use alloc::{vec, vec::Vec}; + +#[cfg(feature = "dfa-build")] +use crate::dfa::dense::{self, BuildError}; +use crate::{ + dfa::{ + automaton::{fmt_state_indicator, Automaton, StartError}, + dense::Flags, + special::Special, + StartKind, DEAD, + }, + util::{ + alphabet::{ByteClasses, ByteSet}, + escape::DebugByte, + int::{Pointer, Usize, U16, U32}, + prefilter::Prefilter, + primitives::{PatternID, StateID}, + search::Anchored, + start::{self, Start, StartByteMap}, + wire::{self, DeserializeError, Endian, SerializeError}, + }, +}; + +const LABEL: &str = "rust-regex-automata-dfa-sparse"; +const VERSION: u32 = 2; + +/// A sparse deterministic finite automaton (DFA) with variable sized states. +/// +/// In contrast to a [dense::DFA], a sparse DFA uses a more space efficient +/// representation for its transitions. Consequently, sparse DFAs may use much +/// less memory than dense DFAs, but this comes at a price. In particular, +/// reading the more space efficient transitions takes more work, and +/// consequently, searching using a sparse DFA is typically slower than a dense +/// DFA. +/// +/// A sparse DFA can be built using the default configuration via the +/// [`DFA::new`] constructor. Otherwise, one can configure various aspects of a +/// dense DFA via [`dense::Builder`], and then convert a dense DFA to a sparse +/// DFA using [`dense::DFA::to_sparse`]. +/// +/// In general, a sparse DFA supports all the same search operations as a dense +/// DFA. +/// +/// Making the choice between a dense and sparse DFA depends on your specific +/// work load. If you can sacrifice a bit of search time performance, then a +/// sparse DFA might be the best choice. In particular, while sparse DFAs are +/// probably always slower than dense DFAs, you may find that they are easily +/// fast enough for your purposes! +/// +/// # Type parameters +/// +/// A `DFA` has one type parameter, `T`, which is used to represent the parts +/// of a sparse DFA. `T` is typically a `Vec` or a `&[u8]`. +/// +/// # The `Automaton` trait +/// +/// This type implements the [`Automaton`] trait, which means it can be used +/// for searching. For example: +/// +/// ``` +/// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; +/// +/// let dfa = DFA::new("foo[0-9]+")?; +/// let expected = Some(HalfMatch::must(0, 8)); +/// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone)] +pub struct DFA { + // When compared to a dense DFA, a sparse DFA *looks* a lot simpler + // representation-wise. In reality, it is perhaps more complicated. Namely, + // in a dense DFA, all information needs to be very cheaply accessible + // using only state IDs. In a sparse DFA however, each state uses a + // variable amount of space because each state encodes more information + // than just its transitions. Each state also includes an accelerator if + // one exists, along with the matching pattern IDs if the state is a match + // state. + // + // That is, a lot of the complexity is pushed down into how each state + // itself is represented. + tt: Transitions, + st: StartTable, + special: Special, + pre: Option, + quitset: ByteSet, + flags: Flags, +} + +#[cfg(feature = "dfa-build")] +impl DFA> { + /// Parse the given regular expression using a default configuration and + /// return the corresponding sparse DFA. + /// + /// If you want a non-default configuration, then use the + /// [`dense::Builder`] to set your own configuration, and then call + /// [`dense::DFA::to_sparse`] to create a sparse DFA. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, sparse}, HalfMatch, Input}; + /// + /// let dfa = sparse::DFA::new("foo[0-9]+bar")?; + /// + /// let expected = Some(HalfMatch::must(0, 11)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn new(pattern: &str) -> Result>, BuildError> { + dense::Builder::new() + .build(pattern) + .and_then(|dense| dense.to_sparse()) + } + + /// Parse the given regular expressions using a default configuration and + /// return the corresponding multi-DFA. + /// + /// If you want a non-default configuration, then use the + /// [`dense::Builder`] to set your own configuration, and then call + /// [`dense::DFA::to_sparse`] to create a sparse DFA. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, sparse}, HalfMatch, Input}; + /// + /// let dfa = sparse::DFA::new_many(&["[0-9]+", "[a-z]+"])?; + /// let expected = Some(HalfMatch::must(1, 3)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn new_many>( + patterns: &[P], + ) -> Result>, BuildError> { + dense::Builder::new() + .build_many(patterns) + .and_then(|dense| dense.to_sparse()) + } +} + +#[cfg(feature = "dfa-build")] +impl DFA> { + /// Create a new DFA that matches every input. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{ + /// dfa::{Automaton, sparse}, + /// HalfMatch, Input, + /// }; + /// + /// let dfa = sparse::DFA::always_match()?; + /// + /// let expected = Some(HalfMatch::must(0, 0)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new(""))?); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo"))?); + /// # Ok::<(), Box>(()) + /// ``` + pub fn always_match() -> Result>, BuildError> { + dense::DFA::always_match()?.to_sparse() + } + + /// Create a new sparse DFA that never matches any input. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, sparse}, Input}; + /// + /// let dfa = sparse::DFA::never_match()?; + /// assert_eq!(None, dfa.try_search_fwd(&Input::new(""))?); + /// assert_eq!(None, dfa.try_search_fwd(&Input::new("foo"))?); + /// # Ok::<(), Box>(()) + /// ``` + pub fn never_match() -> Result>, BuildError> { + dense::DFA::never_match()?.to_sparse() + } + + /// The implementation for constructing a sparse DFA from a dense DFA. + pub(crate) fn from_dense>( + dfa: &dense::DFA, + ) -> Result>, BuildError> { + // In order to build the transition table, we need to be able to write + // state identifiers for each of the "next" transitions in each state. + // Our state identifiers correspond to the byte offset in the + // transition table at which the state is encoded. Therefore, we do not + // actually know what the state identifiers are until we've allocated + // exactly as much space as we need for each state. Thus, construction + // of the transition table happens in two passes. + // + // In the first pass, we fill out the shell of each state, which + // includes the transition length, the input byte ranges and + // zero-filled space for the transitions and accelerators, if present. + // In this first pass, we also build up a map from the state identifier + // index of the dense DFA to the state identifier in this sparse DFA. + // + // In the second pass, we fill in the transitions based on the map + // built in the first pass. + + // The capacity given here reflects a minimum. (Well, the true minimum + // is likely even bigger, but hopefully this saves a few reallocs.) + let mut sparse = Vec::with_capacity(StateID::SIZE * dfa.state_len()); + // This maps state indices from the dense DFA to StateIDs in the sparse + // DFA. We build out this map on the first pass, and then use it in the + // second pass to back-fill our transitions. + let mut remap: Vec = vec![DEAD; dfa.state_len()]; + for state in dfa.states() { + let pos = sparse.len(); + + remap[dfa.to_index(state.id())] = StateID::new(pos) + .map_err(|_| BuildError::too_many_states())?; + // zero-filled space for the transition length + sparse.push(0); + sparse.push(0); + + let mut transition_len = 0; + for (unit1, unit2, _) in state.sparse_transitions() { + match (unit1.as_u8(), unit2.as_u8()) { + (Some(b1), Some(b2)) => { + transition_len += 1; + sparse.push(b1); + sparse.push(b2); + } + (None, None) => {} + (Some(_), None) | (None, Some(_)) => { + // can never occur because sparse_transitions never + // groups EOI with any other transition. + unreachable!() + } + } + } + // Add dummy EOI transition. This is never actually read while + // searching, but having space equivalent to the total number + // of transitions is convenient. Otherwise, we'd need to track + // a different number of transitions for the byte ranges as for + // the 'next' states. + // + // N.B. The loop above is not guaranteed to yield the EOI + // transition, since it may point to a DEAD state. By putting + // it here, we always write the EOI transition, and thus + // guarantee that our transition length is >0. Why do we always + // need the EOI transition? Because in order to implement + // Automaton::next_eoi_state, this lets us just ask for the last + // transition. There are probably other/better ways to do this. + transition_len += 1; + sparse.push(0); + sparse.push(0); + + // Check some assumptions about transition length. + assert_ne!( + transition_len, 0, + "transition length should be non-zero", + ); + assert!( + transition_len <= 257, + "expected transition length {transition_len} to be <= 257", + ); + + // Fill in the transition length. + // Since transition length is always <= 257, we use the most + // significant bit to indicate whether this is a match state or + // not. + let ntrans = if dfa.is_match_state(state.id()) { + transition_len | (1 << 15) + } else { + transition_len + }; + wire::NE::write_u16(ntrans, &mut sparse[pos..]); + + // zero-fill the actual transitions. + // Unwraps are OK since transition_length <= 257 and our minimum + // support usize size is 16-bits. + let zeros = usize::try_from(transition_len) + .unwrap() + .checked_mul(StateID::SIZE) + .unwrap(); + sparse.extend(iter::repeat(0).take(zeros)); + + // If this is a match state, write the pattern IDs matched by this + // state. + if dfa.is_match_state(state.id()) { + let plen = dfa.match_pattern_len(state.id()); + // Write the actual pattern IDs with a u32 length prefix. + // First, zero-fill space. + let mut pos = sparse.len(); + // Unwraps are OK since it's guaranteed that plen <= + // PatternID::LIMIT, which is in turn guaranteed to fit into a + // u32. + let zeros = size_of::() + .checked_mul(plen) + .unwrap() + .checked_add(size_of::()) + .unwrap(); + sparse.extend(iter::repeat(0).take(zeros)); + + // Now write the length prefix. + wire::NE::write_u32( + // Will never fail since u32::MAX is invalid pattern ID. + // Thus, the number of pattern IDs is representable by a + // u32. + plen.try_into().expect("pattern ID length fits in u32"), + &mut sparse[pos..], + ); + pos += size_of::(); + + // Now write the pattern IDs. + for &pid in dfa.pattern_id_slice(state.id()) { + pos += wire::write_pattern_id::( + pid, + &mut sparse[pos..], + ); + } + } + + // And now add the accelerator, if one exists. An accelerator is + // at most 4 bytes and at least 1 byte. The first byte is the + // length, N. N bytes follow the length. The set of bytes that + // follow correspond (exhaustively) to the bytes that must be seen + // to leave this state. + let accel = dfa.accelerator(state.id()); + sparse.push(accel.len().try_into().unwrap()); + sparse.extend_from_slice(accel); + } + + let mut new = DFA { + tt: Transitions { + sparse, + classes: dfa.byte_classes().clone(), + state_len: dfa.state_len(), + pattern_len: dfa.pattern_len(), + }, + st: StartTable::from_dense_dfa(dfa, &remap)?, + special: dfa.special().remap(|id| remap[dfa.to_index(id)]), + pre: dfa.get_prefilter().map(|p| p.clone()), + quitset: dfa.quitset().clone(), + flags: dfa.flags().clone(), + }; + // And here's our second pass. Iterate over all of the dense states + // again, and update the transitions in each of the states in the + // sparse DFA. + for old_state in dfa.states() { + let new_id = remap[dfa.to_index(old_state.id())]; + let mut new_state = new.tt.state_mut(new_id); + let sparse = old_state.sparse_transitions(); + for (i, (_, _, next)) in sparse.enumerate() { + let next = remap[dfa.to_index(next)]; + new_state.set_next_at(i, next); + } + } + new.tt.sparse.shrink_to_fit(); + new.st.table.shrink_to_fit(); + debug!( + "created sparse DFA, memory usage: {} (dense memory usage: {})", + new.memory_usage(), + dfa.memory_usage(), + ); + Ok(new) + } +} + +impl> DFA { + /// Cheaply return a borrowed version of this sparse DFA. Specifically, the + /// DFA returned always uses `&[u8]` for its transitions. + pub fn as_ref<'a>(&'a self) -> DFA<&'a [u8]> { + DFA { + tt: self.tt.as_ref(), + st: self.st.as_ref(), + special: self.special, + pre: self.pre.clone(), + quitset: self.quitset, + flags: self.flags, + } + } + + /// Return an owned version of this sparse DFA. Specifically, the DFA + /// returned always uses `Vec` for its transitions. + /// + /// Effectively, this returns a sparse DFA whose transitions live on the + /// heap. + #[cfg(feature = "alloc")] + pub fn to_owned(&self) -> DFA> { + DFA { + tt: self.tt.to_owned(), + st: self.st.to_owned(), + special: self.special, + pre: self.pre.clone(), + quitset: self.quitset, + flags: self.flags, + } + } + + /// Returns the starting state configuration for this DFA. + /// + /// The default is [`StartKind::Both`], which means the DFA supports both + /// unanchored and anchored searches. However, this can generally lead to + /// bigger DFAs. Therefore, a DFA might be compiled with support for just + /// unanchored or anchored searches. In that case, running a search with + /// an unsupported configuration will panic. + pub fn start_kind(&self) -> StartKind { + self.st.kind + } + + /// Returns true only if this DFA has starting states for each pattern. + /// + /// When a DFA has starting states for each pattern, then a search with the + /// DFA can be configured to only look for anchored matches of a specific + /// pattern. Specifically, APIs like [`Automaton::try_search_fwd`] can + /// accept a [`Anchored::Pattern`] if and only if this method returns true. + /// Otherwise, an error will be returned. + /// + /// Note that if the DFA is empty, this always returns false. + pub fn starts_for_each_pattern(&self) -> bool { + self.st.pattern_len.is_some() + } + + /// Returns the equivalence classes that make up the alphabet for this DFA. + /// + /// Unless [`dense::Config::byte_classes`] was disabled, it is possible + /// that multiple distinct bytes are grouped into the same equivalence + /// class if it is impossible for them to discriminate between a match and + /// a non-match. This has the effect of reducing the overall alphabet size + /// and in turn potentially substantially reducing the size of the DFA's + /// transition table. + /// + /// The downside of using equivalence classes like this is that every state + /// transition will automatically use this map to convert an arbitrary + /// byte to its corresponding equivalence class. In practice this has a + /// negligible impact on performance. + pub fn byte_classes(&self) -> &ByteClasses { + &self.tt.classes + } + + /// Returns the memory usage, in bytes, of this DFA. + /// + /// The memory usage is computed based on the number of bytes used to + /// represent this DFA. + /// + /// This does **not** include the stack size used up by this DFA. To + /// compute that, use `std::mem::size_of::()`. + pub fn memory_usage(&self) -> usize { + self.tt.memory_usage() + self.st.memory_usage() + } +} + +/// Routines for converting a sparse DFA to other representations, such as raw +/// bytes suitable for persistent storage. +impl> DFA { + /// Serialize this DFA as raw bytes to a `Vec` in little endian + /// format. + /// + /// The written bytes are guaranteed to be deserialized correctly and + /// without errors in a semver compatible release of this crate by a + /// `DFA`'s deserialization APIs (assuming all other criteria for the + /// deserialization APIs has been satisfied): + /// + /// * [`DFA::from_bytes`] + /// * [`DFA::from_bytes_unchecked`] + /// + /// Note that unlike a [`dense::DFA`]'s serialization methods, this does + /// not add any initial padding to the returned bytes. Padding isn't + /// required for sparse DFAs since they have no alignment requirements. + /// + /// # Example + /// + /// This example shows how to serialize and deserialize a DFA: + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; + /// + /// // Compile our original DFA. + /// let original_dfa = DFA::new("foo[0-9]+")?; + /// + /// // N.B. We use native endianness here to make the example work, but + /// // using to_bytes_little_endian would work on a little endian target. + /// let buf = original_dfa.to_bytes_native_endian(); + /// // Even if buf has initial padding, DFA::from_bytes will automatically + /// // ignore it. + /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf)?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "dfa-build")] + pub fn to_bytes_little_endian(&self) -> Vec { + self.to_bytes::() + } + + /// Serialize this DFA as raw bytes to a `Vec` in big endian + /// format. + /// + /// The written bytes are guaranteed to be deserialized correctly and + /// without errors in a semver compatible release of this crate by a + /// `DFA`'s deserialization APIs (assuming all other criteria for the + /// deserialization APIs has been satisfied): + /// + /// * [`DFA::from_bytes`] + /// * [`DFA::from_bytes_unchecked`] + /// + /// Note that unlike a [`dense::DFA`]'s serialization methods, this does + /// not add any initial padding to the returned bytes. Padding isn't + /// required for sparse DFAs since they have no alignment requirements. + /// + /// # Example + /// + /// This example shows how to serialize and deserialize a DFA: + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; + /// + /// // Compile our original DFA. + /// let original_dfa = DFA::new("foo[0-9]+")?; + /// + /// // N.B. We use native endianness here to make the example work, but + /// // using to_bytes_big_endian would work on a big endian target. + /// let buf = original_dfa.to_bytes_native_endian(); + /// // Even if buf has initial padding, DFA::from_bytes will automatically + /// // ignore it. + /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf)?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "dfa-build")] + pub fn to_bytes_big_endian(&self) -> Vec { + self.to_bytes::() + } + + /// Serialize this DFA as raw bytes to a `Vec` in native endian + /// format. + /// + /// The written bytes are guaranteed to be deserialized correctly and + /// without errors in a semver compatible release of this crate by a + /// `DFA`'s deserialization APIs (assuming all other criteria for the + /// deserialization APIs has been satisfied): + /// + /// * [`DFA::from_bytes`] + /// * [`DFA::from_bytes_unchecked`] + /// + /// Note that unlike a [`dense::DFA`]'s serialization methods, this does + /// not add any initial padding to the returned bytes. Padding isn't + /// required for sparse DFAs since they have no alignment requirements. + /// + /// Generally speaking, native endian format should only be used when + /// you know that the target you're compiling the DFA for matches the + /// endianness of the target on which you're compiling DFA. For example, + /// if serialization and deserialization happen in the same process or on + /// the same machine. Otherwise, when serializing a DFA for use in a + /// portable environment, you'll almost certainly want to serialize _both_ + /// a little endian and a big endian version and then load the correct one + /// based on the target's configuration. + /// + /// # Example + /// + /// This example shows how to serialize and deserialize a DFA: + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; + /// + /// // Compile our original DFA. + /// let original_dfa = DFA::new("foo[0-9]+")?; + /// + /// let buf = original_dfa.to_bytes_native_endian(); + /// // Even if buf has initial padding, DFA::from_bytes will automatically + /// // ignore it. + /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf)?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "dfa-build")] + pub fn to_bytes_native_endian(&self) -> Vec { + self.to_bytes::() + } + + /// The implementation of the public `to_bytes` serialization methods, + /// which is generic over endianness. + #[cfg(feature = "dfa-build")] + fn to_bytes(&self) -> Vec { + let mut buf = vec![0; self.write_to_len()]; + // This should always succeed since the only possible serialization + // error is providing a buffer that's too small, but we've ensured that + // `buf` is big enough here. + self.write_to::(&mut buf).unwrap(); + buf + } + + /// Serialize this DFA as raw bytes to the given slice, in little endian + /// format. Upon success, the total number of bytes written to `dst` is + /// returned. + /// + /// The written bytes are guaranteed to be deserialized correctly and + /// without errors in a semver compatible release of this crate by a + /// `DFA`'s deserialization APIs (assuming all other criteria for the + /// deserialization APIs has been satisfied): + /// + /// * [`DFA::from_bytes`] + /// * [`DFA::from_bytes_unchecked`] + /// + /// # Errors + /// + /// This returns an error if the given destination slice is not big enough + /// to contain the full serialized DFA. If an error occurs, then nothing + /// is written to `dst`. + /// + /// # Example + /// + /// This example shows how to serialize and deserialize a DFA without + /// dynamic memory allocation. + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; + /// + /// // Compile our original DFA. + /// let original_dfa = DFA::new("foo[0-9]+")?; + /// + /// // Create a 4KB buffer on the stack to store our serialized DFA. + /// let mut buf = [0u8; 4 * (1<<10)]; + /// // N.B. We use native endianness here to make the example work, but + /// // using write_to_little_endian would work on a little endian target. + /// let written = original_dfa.write_to_native_endian(&mut buf)?; + /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + pub fn write_to_little_endian( + &self, + dst: &mut [u8], + ) -> Result { + self.write_to::(dst) + } + + /// Serialize this DFA as raw bytes to the given slice, in big endian + /// format. Upon success, the total number of bytes written to `dst` is + /// returned. + /// + /// The written bytes are guaranteed to be deserialized correctly and + /// without errors in a semver compatible release of this crate by a + /// `DFA`'s deserialization APIs (assuming all other criteria for the + /// deserialization APIs has been satisfied): + /// + /// * [`DFA::from_bytes`] + /// * [`DFA::from_bytes_unchecked`] + /// + /// # Errors + /// + /// This returns an error if the given destination slice is not big enough + /// to contain the full serialized DFA. If an error occurs, then nothing + /// is written to `dst`. + /// + /// # Example + /// + /// This example shows how to serialize and deserialize a DFA without + /// dynamic memory allocation. + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; + /// + /// // Compile our original DFA. + /// let original_dfa = DFA::new("foo[0-9]+")?; + /// + /// // Create a 4KB buffer on the stack to store our serialized DFA. + /// let mut buf = [0u8; 4 * (1<<10)]; + /// // N.B. We use native endianness here to make the example work, but + /// // using write_to_big_endian would work on a big endian target. + /// let written = original_dfa.write_to_native_endian(&mut buf)?; + /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + pub fn write_to_big_endian( + &self, + dst: &mut [u8], + ) -> Result { + self.write_to::(dst) + } + + /// Serialize this DFA as raw bytes to the given slice, in native endian + /// format. Upon success, the total number of bytes written to `dst` is + /// returned. + /// + /// The written bytes are guaranteed to be deserialized correctly and + /// without errors in a semver compatible release of this crate by a + /// `DFA`'s deserialization APIs (assuming all other criteria for the + /// deserialization APIs has been satisfied): + /// + /// * [`DFA::from_bytes`] + /// * [`DFA::from_bytes_unchecked`] + /// + /// Generally speaking, native endian format should only be used when + /// you know that the target you're compiling the DFA for matches the + /// endianness of the target on which you're compiling DFA. For example, + /// if serialization and deserialization happen in the same process or on + /// the same machine. Otherwise, when serializing a DFA for use in a + /// portable environment, you'll almost certainly want to serialize _both_ + /// a little endian and a big endian version and then load the correct one + /// based on the target's configuration. + /// + /// # Errors + /// + /// This returns an error if the given destination slice is not big enough + /// to contain the full serialized DFA. If an error occurs, then nothing + /// is written to `dst`. + /// + /// # Example + /// + /// This example shows how to serialize and deserialize a DFA without + /// dynamic memory allocation. + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; + /// + /// // Compile our original DFA. + /// let original_dfa = DFA::new("foo[0-9]+")?; + /// + /// // Create a 4KB buffer on the stack to store our serialized DFA. + /// let mut buf = [0u8; 4 * (1<<10)]; + /// let written = original_dfa.write_to_native_endian(&mut buf)?; + /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + pub fn write_to_native_endian( + &self, + dst: &mut [u8], + ) -> Result { + self.write_to::(dst) + } + + /// The implementation of the public `write_to` serialization methods, + /// which is generic over endianness. + fn write_to( + &self, + dst: &mut [u8], + ) -> Result { + let mut nw = 0; + nw += wire::write_label(LABEL, &mut dst[nw..])?; + nw += wire::write_endianness_check::(&mut dst[nw..])?; + nw += wire::write_version::(VERSION, &mut dst[nw..])?; + nw += { + // Currently unused, intended for future flexibility + E::write_u32(0, &mut dst[nw..]); + size_of::() + }; + nw += self.flags.write_to::(&mut dst[nw..])?; + nw += self.tt.write_to::(&mut dst[nw..])?; + nw += self.st.write_to::(&mut dst[nw..])?; + nw += self.special.write_to::(&mut dst[nw..])?; + nw += self.quitset.write_to::(&mut dst[nw..])?; + Ok(nw) + } + + /// Return the total number of bytes required to serialize this DFA. + /// + /// This is useful for determining the size of the buffer required to pass + /// to one of the serialization routines: + /// + /// * [`DFA::write_to_little_endian`] + /// * [`DFA::write_to_big_endian`] + /// * [`DFA::write_to_native_endian`] + /// + /// Passing a buffer smaller than the size returned by this method will + /// result in a serialization error. + /// + /// # Example + /// + /// This example shows how to dynamically allocate enough room to serialize + /// a sparse DFA. + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; + /// + /// // Compile our original DFA. + /// let original_dfa = DFA::new("foo[0-9]+")?; + /// + /// let mut buf = vec![0; original_dfa.write_to_len()]; + /// let written = original_dfa.write_to_native_endian(&mut buf)?; + /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + pub fn write_to_len(&self) -> usize { + wire::write_label_len(LABEL) + + wire::write_endianness_check_len() + + wire::write_version_len() + + size_of::() // unused, intended for future flexibility + + self.flags.write_to_len() + + self.tt.write_to_len() + + self.st.write_to_len() + + self.special.write_to_len() + + self.quitset.write_to_len() + } +} + +impl<'a> DFA<&'a [u8]> { + /// Safely deserialize a sparse DFA with a specific state identifier + /// representation. Upon success, this returns both the deserialized DFA + /// and the number of bytes read from the given slice. Namely, the contents + /// of the slice beyond the DFA are not read. + /// + /// Deserializing a DFA using this routine will never allocate heap memory. + /// For safety purposes, the DFA's transitions will be verified such that + /// every transition points to a valid state. If this verification is too + /// costly, then a [`DFA::from_bytes_unchecked`] API is provided, which + /// will always execute in constant time. + /// + /// The bytes given must be generated by one of the serialization APIs + /// of a `DFA` using a semver compatible release of this crate. Those + /// include: + /// + /// * [`DFA::to_bytes_little_endian`] + /// * [`DFA::to_bytes_big_endian`] + /// * [`DFA::to_bytes_native_endian`] + /// * [`DFA::write_to_little_endian`] + /// * [`DFA::write_to_big_endian`] + /// * [`DFA::write_to_native_endian`] + /// + /// The `to_bytes` methods allocate and return a `Vec` for you. The + /// `write_to` methods do not allocate and write to an existing slice + /// (which may be on the stack). Since deserialization always uses the + /// native endianness of the target platform, the serialization API you use + /// should match the endianness of the target platform. (It's often a good + /// idea to generate serialized DFAs for both forms of endianness and then + /// load the correct one based on endianness.) + /// + /// # Errors + /// + /// Generally speaking, it's easier to state the conditions in which an + /// error is _not_ returned. All of the following must be true: + /// + /// * The bytes given must be produced by one of the serialization APIs + /// on this DFA, as mentioned above. + /// * The endianness of the target platform matches the endianness used to + /// serialized the provided DFA. + /// + /// If any of the above are not true, then an error will be returned. + /// + /// Note that unlike deserializing a [`dense::DFA`], deserializing a sparse + /// DFA has no alignment requirements. That is, an alignment of `1` is + /// valid. + /// + /// # Panics + /// + /// This routine will never panic for any input. + /// + /// # Example + /// + /// This example shows how to serialize a DFA to raw bytes, deserialize it + /// and then use it for searching. + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; + /// + /// let initial = DFA::new("foo[0-9]+")?; + /// let bytes = initial.to_bytes_native_endian(); + /// let dfa: DFA<&[u8]> = DFA::from_bytes(&bytes)?.0; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: loading a DFA from static memory + /// + /// One use case this library supports is the ability to serialize a + /// DFA to disk and then use `include_bytes!` to store it in a compiled + /// Rust program. Those bytes can then be cheaply deserialized into a + /// `DFA` structure at runtime and used for searching without having to + /// re-compile the DFA (which can be quite costly). + /// + /// We can show this in two parts. The first part is serializing the DFA to + /// a file: + /// + /// ```no_run + /// use regex_automata::dfa::sparse::DFA; + /// + /// let dfa = DFA::new("foo[0-9]+")?; + /// + /// // Write a big endian serialized version of this DFA to a file. + /// let bytes = dfa.to_bytes_big_endian(); + /// std::fs::write("foo.bigendian.dfa", &bytes)?; + /// + /// // Do it again, but this time for little endian. + /// let bytes = dfa.to_bytes_little_endian(); + /// std::fs::write("foo.littleendian.dfa", &bytes)?; + /// # Ok::<(), Box>(()) + /// ``` + /// + /// And now the second part is embedding the DFA into the compiled program + /// and deserializing it at runtime on first use. We use conditional + /// compilation to choose the correct endianness. We do not need to employ + /// any special tricks to ensure a proper alignment, since a sparse DFA has + /// no alignment requirements. + /// + /// ```no_run + /// use regex_automata::{ + /// dfa::{Automaton, sparse::DFA}, + /// util::lazy::Lazy, + /// HalfMatch, Input, + /// }; + /// + /// // This crate provides its own "lazy" type, kind of like + /// // lazy_static! or once_cell::sync::Lazy. But it works in no-alloc + /// // no-std environments and let's us write this using completely + /// // safe code. + /// static RE: Lazy> = Lazy::new(|| { + /// # const _: &str = stringify! { + /// #[cfg(target_endian = "big")] + /// static BYTES: &[u8] = include_bytes!("foo.bigendian.dfa"); + /// #[cfg(target_endian = "little")] + /// static BYTES: &[u8] = include_bytes!("foo.littleendian.dfa"); + /// # }; + /// # static BYTES: &[u8] = b""; + /// + /// let (dfa, _) = DFA::from_bytes(BYTES) + /// .expect("serialized DFA should be valid"); + /// dfa + /// }); + /// + /// let expected = Ok(Some(HalfMatch::must(0, 8))); + /// assert_eq!(expected, RE.try_search_fwd(&Input::new("foo12345"))); + /// ``` + /// + /// Alternatively, consider using + /// [`lazy_static`](https://crates.io/crates/lazy_static) + /// or + /// [`once_cell`](https://crates.io/crates/once_cell), + /// which will guarantee safety for you. + pub fn from_bytes( + slice: &'a [u8], + ) -> Result<(DFA<&'a [u8]>, usize), DeserializeError> { + // SAFETY: This is safe because we validate both the sparse transitions + // (by trying to decode every state) and start state ID list below. If + // either validation fails, then we return an error. + let (dfa, nread) = unsafe { DFA::from_bytes_unchecked(slice)? }; + let seen = dfa.tt.validate(&dfa.special)?; + dfa.st.validate(&dfa.special, &seen)?; + // N.B. dfa.special doesn't have a way to do unchecked deserialization, + // so it has already been validated. + Ok((dfa, nread)) + } + + /// Deserialize a DFA with a specific state identifier representation in + /// constant time by omitting the verification of the validity of the + /// sparse transitions. + /// + /// This is just like [`DFA::from_bytes`], except it can potentially return + /// a DFA that exhibits undefined behavior if its transitions contains + /// invalid state identifiers. + /// + /// This routine is useful if you need to deserialize a DFA cheaply and + /// cannot afford the transition validation performed by `from_bytes`. + /// + /// # Safety + /// + /// This routine is not safe because it permits callers to provide + /// arbitrary transitions with possibly incorrect state identifiers. While + /// the various serialization routines will never return an incorrect + /// DFA, there is no guarantee that the bytes provided here are correct. + /// While `from_bytes_unchecked` will still do several forms of basic + /// validation, this routine does not check that the transitions themselves + /// are correct. Given an incorrect transition table, it is possible for + /// the search routines to access out-of-bounds memory because of explicit + /// bounds check elision. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; + /// + /// let initial = DFA::new("foo[0-9]+")?; + /// let bytes = initial.to_bytes_native_endian(); + /// // SAFETY: This is guaranteed to be safe since the bytes given come + /// // directly from a compatible serialization routine. + /// let dfa: DFA<&[u8]> = unsafe { DFA::from_bytes_unchecked(&bytes)?.0 }; + /// + /// let expected = Some(HalfMatch::must(0, 8)); + /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); + /// # Ok::<(), Box>(()) + /// ``` + pub unsafe fn from_bytes_unchecked( + slice: &'a [u8], + ) -> Result<(DFA<&'a [u8]>, usize), DeserializeError> { + let mut nr = 0; + + nr += wire::read_label(&slice[nr..], LABEL)?; + nr += wire::read_endianness_check(&slice[nr..])?; + nr += wire::read_version(&slice[nr..], VERSION)?; + + let _unused = wire::try_read_u32(&slice[nr..], "unused space")?; + nr += size_of::(); + + let (flags, nread) = Flags::from_bytes(&slice[nr..])?; + nr += nread; + + let (tt, nread) = Transitions::from_bytes_unchecked(&slice[nr..])?; + nr += nread; + + let (st, nread) = StartTable::from_bytes_unchecked(&slice[nr..])?; + nr += nread; + + let (special, nread) = Special::from_bytes(&slice[nr..])?; + nr += nread; + if special.max.as_usize() >= tt.sparse().len() { + return Err(DeserializeError::generic( + "max should not be greater than or equal to sparse bytes", + )); + } + + let (quitset, nread) = ByteSet::from_bytes(&slice[nr..])?; + nr += nread; + + // Prefilters don't support serialization, so they're always absent. + let pre = None; + Ok((DFA { tt, st, special, pre, quitset, flags }, nr)) + } +} + +/// Other routines that work for all `T`. +impl DFA { + /// Set or unset the prefilter attached to this DFA. + /// + /// This is useful when one has deserialized a DFA from `&[u8]`. + /// Deserialization does not currently include prefilters, so if you + /// want prefilter acceleration, you'll need to rebuild it and attach + /// it here. + pub fn set_prefilter(&mut self, prefilter: Option) { + self.pre = prefilter + } +} + +impl> fmt::Debug for DFA { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "sparse::DFA(")?; + for state in self.tt.states() { + fmt_state_indicator(f, self, state.id())?; + writeln!(f, "{:06?}: {:?}", state.id().as_usize(), state)?; + } + writeln!(f, "")?; + for (i, (start_id, anchored, sty)) in self.st.iter().enumerate() { + if i % self.st.stride == 0 { + match anchored { + Anchored::No => writeln!(f, "START-GROUP(unanchored)")?, + Anchored::Yes => writeln!(f, "START-GROUP(anchored)")?, + Anchored::Pattern(pid) => writeln!( + f, + "START_GROUP(pattern: {:?})", + pid.as_usize() + )?, + } + } + writeln!(f, " {:?} => {:06?}", sty, start_id.as_usize())?; + } + writeln!(f, "state length: {:?}", self.tt.state_len)?; + writeln!(f, "pattern length: {:?}", self.pattern_len())?; + writeln!(f, "flags: {:?}", self.flags)?; + writeln!(f, ")")?; + Ok(()) + } +} + +// SAFETY: We assert that our implementation of each method is correct. +unsafe impl> Automaton for DFA { + #[inline] + fn is_special_state(&self, id: StateID) -> bool { + self.special.is_special_state(id) + } + + #[inline] + fn is_dead_state(&self, id: StateID) -> bool { + self.special.is_dead_state(id) + } + + #[inline] + fn is_quit_state(&self, id: StateID) -> bool { + self.special.is_quit_state(id) + } + + #[inline] + fn is_match_state(&self, id: StateID) -> bool { + self.special.is_match_state(id) + } + + #[inline] + fn is_start_state(&self, id: StateID) -> bool { + self.special.is_start_state(id) + } + + #[inline] + fn is_accel_state(&self, id: StateID) -> bool { + self.special.is_accel_state(id) + } + + // This is marked as inline to help dramatically boost sparse searching, + // which decodes each state it enters to follow the next transition. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn next_state(&self, current: StateID, input: u8) -> StateID { + let input = self.tt.classes.get(input); + self.tt.state(current).next(input) + } + + #[inline] + unsafe fn next_state_unchecked( + &self, + current: StateID, + input: u8, + ) -> StateID { + self.next_state(current, input) + } + + #[inline] + fn next_eoi_state(&self, current: StateID) -> StateID { + self.tt.state(current).next_eoi() + } + + #[inline] + fn pattern_len(&self) -> usize { + self.tt.pattern_len + } + + #[inline] + fn match_len(&self, id: StateID) -> usize { + self.tt.state(id).pattern_len() + } + + #[inline] + fn match_pattern(&self, id: StateID, match_index: usize) -> PatternID { + // This is an optimization for the very common case of a DFA with a + // single pattern. This conditional avoids a somewhat more costly path + // that finds the pattern ID from the state machine, which requires + // a bit of slicing/pointer-chasing. This optimization tends to only + // matter when matches are frequent. + if self.tt.pattern_len == 1 { + return PatternID::ZERO; + } + self.tt.state(id).pattern_id(match_index) + } + + #[inline] + fn has_empty(&self) -> bool { + self.flags.has_empty + } + + #[inline] + fn is_utf8(&self) -> bool { + self.flags.is_utf8 + } + + #[inline] + fn is_always_start_anchored(&self) -> bool { + self.flags.is_always_start_anchored + } + + #[inline] + fn start_state( + &self, + config: &start::Config, + ) -> Result { + let anchored = config.get_anchored(); + let start = match config.get_look_behind() { + None => Start::Text, + Some(byte) => { + if !self.quitset.is_empty() && self.quitset.contains(byte) { + return Err(StartError::quit(byte)); + } + self.st.start_map.get(byte) + } + }; + self.st.start(anchored, start) + } + + #[inline] + fn universal_start_state(&self, mode: Anchored) -> Option { + match mode { + Anchored::No => self.st.universal_start_unanchored, + Anchored::Yes => self.st.universal_start_anchored, + Anchored::Pattern(_) => None, + } + } + + #[inline] + fn accelerator(&self, id: StateID) -> &[u8] { + self.tt.state(id).accelerator() + } + + #[inline] + fn get_prefilter(&self) -> Option<&Prefilter> { + self.pre.as_ref() + } +} + +/// The transition table portion of a sparse DFA. +/// +/// The transition table is the core part of the DFA in that it describes how +/// to move from one state to another based on the input sequence observed. +/// +/// Unlike a typical dense table based DFA, states in a sparse transition +/// table have variable size. That is, states with more transitions use more +/// space than states with fewer transitions. This means that finding the next +/// transition takes more work than with a dense DFA, but also typically uses +/// much less space. +#[derive(Clone)] +struct Transitions { + /// The raw encoding of each state in this DFA. + /// + /// Each state has the following information: + /// + /// * A set of transitions to subsequent states. Transitions to the dead + /// state are omitted. + /// * If the state can be accelerated, then any additional accelerator + /// information. + /// * If the state is a match state, then the state contains all pattern + /// IDs that match when in that state. + /// + /// To decode a state, use Transitions::state. + /// + /// In practice, T is either Vec or &[u8]. + sparse: T, + /// A set of equivalence classes, where a single equivalence class + /// represents a set of bytes that never discriminate between a match + /// and a non-match in the DFA. Each equivalence class corresponds to a + /// single character in this DFA's alphabet, where the maximum number of + /// characters is 257 (each possible value of a byte plus the special + /// EOI transition). Consequently, the number of equivalence classes + /// corresponds to the number of transitions for each DFA state. Note + /// though that the *space* used by each DFA state in the transition table + /// may be larger. The total space used by each DFA state is known as the + /// stride and is documented above. + /// + /// The only time the number of equivalence classes is fewer than 257 is + /// if the DFA's kind uses byte classes which is the default. Equivalence + /// classes should generally only be disabled when debugging, so that + /// the transitions themselves aren't obscured. Disabling them has no + /// other benefit, since the equivalence class map is always used while + /// searching. In the vast majority of cases, the number of equivalence + /// classes is substantially smaller than 257, particularly when large + /// Unicode classes aren't used. + /// + /// N.B. Equivalence classes aren't particularly useful in a sparse DFA + /// in the current implementation, since equivalence classes generally tend + /// to correspond to continuous ranges of bytes that map to the same + /// transition. So in a sparse DFA, equivalence classes don't really lead + /// to a space savings. In the future, it would be good to try and remove + /// them from sparse DFAs entirely, but requires a bit of work since sparse + /// DFAs are built from dense DFAs, which are in turn built on top of + /// equivalence classes. + classes: ByteClasses, + /// The total number of states in this DFA. Note that a DFA always has at + /// least one state---the dead state---even the empty DFA. In particular, + /// the dead state always has ID 0 and is correspondingly always the first + /// state. The dead state is never a match state. + state_len: usize, + /// The total number of unique patterns represented by these match states. + pattern_len: usize, +} + +impl<'a> Transitions<&'a [u8]> { + unsafe fn from_bytes_unchecked( + mut slice: &'a [u8], + ) -> Result<(Transitions<&'a [u8]>, usize), DeserializeError> { + let slice_start = slice.as_ptr().as_usize(); + + let (state_len, nr) = + wire::try_read_u32_as_usize(&slice, "state length")?; + slice = &slice[nr..]; + + let (pattern_len, nr) = + wire::try_read_u32_as_usize(&slice, "pattern length")?; + slice = &slice[nr..]; + + let (classes, nr) = ByteClasses::from_bytes(&slice)?; + slice = &slice[nr..]; + + let (len, nr) = + wire::try_read_u32_as_usize(&slice, "sparse transitions length")?; + slice = &slice[nr..]; + + wire::check_slice_len(slice, len, "sparse states byte length")?; + let sparse = &slice[..len]; + slice = &slice[len..]; + + let trans = Transitions { sparse, classes, state_len, pattern_len }; + Ok((trans, slice.as_ptr().as_usize() - slice_start)) + } +} + +impl> Transitions { + /// Writes a serialized form of this transition table to the buffer given. + /// If the buffer is too small, then an error is returned. To determine + /// how big the buffer must be, use `write_to_len`. + fn write_to( + &self, + mut dst: &mut [u8], + ) -> Result { + let nwrite = self.write_to_len(); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small( + "sparse transition table", + )); + } + dst = &mut dst[..nwrite]; + + // write state length + E::write_u32(u32::try_from(self.state_len).unwrap(), dst); + dst = &mut dst[size_of::()..]; + + // write pattern length + E::write_u32(u32::try_from(self.pattern_len).unwrap(), dst); + dst = &mut dst[size_of::()..]; + + // write byte class map + let n = self.classes.write_to(dst)?; + dst = &mut dst[n..]; + + // write number of bytes in sparse transitions + E::write_u32(u32::try_from(self.sparse().len()).unwrap(), dst); + dst = &mut dst[size_of::()..]; + + // write actual transitions + let mut id = DEAD; + while id.as_usize() < self.sparse().len() { + let state = self.state(id); + let n = state.write_to::(&mut dst)?; + dst = &mut dst[n..]; + // The next ID is the offset immediately following `state`. + id = StateID::new(id.as_usize() + state.write_to_len()).unwrap(); + } + Ok(nwrite) + } + + /// Returns the number of bytes the serialized form of this transition + /// table will use. + fn write_to_len(&self) -> usize { + size_of::() // state length + + size_of::() // pattern length + + self.classes.write_to_len() + + size_of::() // sparse transitions length + + self.sparse().len() + } + + /// Validates that every state ID in this transition table is valid. + /// + /// That is, every state ID can be used to correctly index a state in this + /// table. + fn validate(&self, sp: &Special) -> Result { + let mut verified = Seen::new(); + // We need to make sure that we decode the correct number of states. + // Otherwise, an empty set of transitions would validate even if the + // recorded state length is non-empty. + let mut len = 0; + // We can't use the self.states() iterator because it assumes the state + // encodings are valid. It could panic if they aren't. + let mut id = DEAD; + while id.as_usize() < self.sparse().len() { + // Before we even decode the state, we check that the ID itself + // is well formed. That is, if it's a special state then it must + // actually be a quit, dead, accel, match or start state. + if sp.is_special_state(id) { + let is_actually_special = sp.is_dead_state(id) + || sp.is_quit_state(id) + || sp.is_match_state(id) + || sp.is_start_state(id) + || sp.is_accel_state(id); + if !is_actually_special { + // This is kind of a cryptic error message... + return Err(DeserializeError::generic( + "found sparse state tagged as special but \ + wasn't actually special", + )); + } + } + let state = self.try_state(sp, id)?; + verified.insert(id); + // The next ID should be the offset immediately following `state`. + id = StateID::new(wire::add( + id.as_usize(), + state.write_to_len(), + "next state ID offset", + )?) + .map_err(|err| { + DeserializeError::state_id_error(err, "next state ID offset") + })?; + len += 1; + } + // Now that we've checked that all top-level states are correct and + // importantly, collected a set of valid state IDs, we have all the + // information we need to check that all transitions are correct too. + // + // Note that we can't use `valid_ids` to iterate because it will + // be empty in no-std no-alloc contexts. (And yes, that means our + // verification isn't quite as good.) We can use `self.states()` + // though at least, since we know that all states can at least be + // decoded and traversed correctly. + for state in self.states() { + // Check that all transitions in this state are correct. + for i in 0..state.ntrans { + let to = state.next_at(i); + // For no-alloc, we just check that the state can decode. It is + // technically possible that the state ID could still point to + // a non-existent state even if it decodes (fuzzing proved this + // to be true), but it shouldn't result in any memory unsafety + // or panics in non-debug mode. + #[cfg(not(feature = "alloc"))] + { + let _ = self.try_state(sp, to)?; + } + #[cfg(feature = "alloc")] + { + if !verified.contains(&to) { + return Err(DeserializeError::generic( + "found transition that points to a \ + non-existent state", + )); + } + } + } + } + if len != self.state_len { + return Err(DeserializeError::generic( + "mismatching sparse state length", + )); + } + Ok(verified) + } + + /// Converts these transitions to a borrowed value. + fn as_ref(&self) -> Transitions<&'_ [u8]> { + Transitions { + sparse: self.sparse(), + classes: self.classes.clone(), + state_len: self.state_len, + pattern_len: self.pattern_len, + } + } + + /// Converts these transitions to an owned value. + #[cfg(feature = "alloc")] + fn to_owned(&self) -> Transitions> { + Transitions { + sparse: self.sparse().to_vec(), + classes: self.classes.clone(), + state_len: self.state_len, + pattern_len: self.pattern_len, + } + } + + /// Return a convenient representation of the given state. + /// + /// This panics if the state is invalid. + /// + /// This is marked as inline to help dramatically boost sparse searching, + /// which decodes each state it enters to follow the next transition. Other + /// functions involved are also inlined, which should hopefully eliminate + /// a lot of the extraneous decoding that is never needed just to follow + /// the next transition. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn state(&self, id: StateID) -> State<'_> { + let mut state = &self.sparse()[id.as_usize()..]; + let mut ntrans = wire::read_u16(&state).as_usize(); + let is_match = (1 << 15) & ntrans != 0; + ntrans &= !(1 << 15); + state = &state[2..]; + + let (input_ranges, state) = state.split_at(ntrans * 2); + let (next, state) = state.split_at(ntrans * StateID::SIZE); + let (pattern_ids, state) = if is_match { + let npats = wire::read_u32(&state).as_usize(); + state[4..].split_at(npats * 4) + } else { + (&[][..], state) + }; + + let accel_len = usize::from(state[0]); + let accel = &state[1..accel_len + 1]; + State { id, is_match, ntrans, input_ranges, next, pattern_ids, accel } + } + + /// Like `state`, but will return an error if the state encoding is + /// invalid. This is useful for verifying states after deserialization, + /// which is required for a safe deserialization API. + /// + /// Note that this only verifies that this state is decodable and that + /// all of its data is consistent. It does not verify that its state ID + /// transitions point to valid states themselves, nor does it verify that + /// every pattern ID is valid. + fn try_state( + &self, + sp: &Special, + id: StateID, + ) -> Result, DeserializeError> { + if id.as_usize() > self.sparse().len() { + return Err(DeserializeError::generic( + "invalid caller provided sparse state ID", + )); + } + let mut state = &self.sparse()[id.as_usize()..]; + // Encoding format starts with a u16 that stores the total number of + // transitions in this state. + let (mut ntrans, _) = + wire::try_read_u16_as_usize(state, "state transition length")?; + let is_match = ((1 << 15) & ntrans) != 0; + ntrans &= !(1 << 15); + state = &state[2..]; + if ntrans > 257 || ntrans == 0 { + return Err(DeserializeError::generic( + "invalid transition length", + )); + } + if is_match && !sp.is_match_state(id) { + return Err(DeserializeError::generic( + "state marked as match but not in match ID range", + )); + } else if !is_match && sp.is_match_state(id) { + return Err(DeserializeError::generic( + "state in match ID range but not marked as match state", + )); + } + + // Each transition has two pieces: an inclusive range of bytes on which + // it is defined, and the state ID that those bytes transition to. The + // pairs come first, followed by a corresponding sequence of state IDs. + let input_ranges_len = ntrans.checked_mul(2).unwrap(); + wire::check_slice_len(state, input_ranges_len, "sparse byte pairs")?; + let (input_ranges, state) = state.split_at(input_ranges_len); + // Every range should be of the form A-B, where A<=B. + for pair in input_ranges.chunks(2) { + let (start, end) = (pair[0], pair[1]); + if start > end { + return Err(DeserializeError::generic("invalid input range")); + } + } + + // And now extract the corresponding sequence of state IDs. We leave + // this sequence as a &[u8] instead of a &[S] because sparse DFAs do + // not have any alignment requirements. + let next_len = ntrans + .checked_mul(self.id_len()) + .expect("state size * #trans should always fit in a usize"); + wire::check_slice_len(state, next_len, "sparse trans state IDs")?; + let (next, state) = state.split_at(next_len); + // We can at least verify that every state ID is in bounds. + for idbytes in next.chunks(self.id_len()) { + let (id, _) = + wire::read_state_id(idbytes, "sparse state ID in try_state")?; + wire::check_slice_len( + self.sparse(), + id.as_usize(), + "invalid sparse state ID", + )?; + } + + // If this is a match state, then read the pattern IDs for this state. + // Pattern IDs is a u32-length prefixed sequence of native endian + // encoded 32-bit integers. + let (pattern_ids, state) = if is_match { + let (npats, nr) = + wire::try_read_u32_as_usize(state, "pattern ID length")?; + let state = &state[nr..]; + if npats == 0 { + return Err(DeserializeError::generic( + "state marked as a match, but pattern length is zero", + )); + } + + let pattern_ids_len = + wire::mul(npats, 4, "sparse pattern ID byte length")?; + wire::check_slice_len( + state, + pattern_ids_len, + "sparse pattern IDs", + )?; + let (pattern_ids, state) = state.split_at(pattern_ids_len); + for patbytes in pattern_ids.chunks(PatternID::SIZE) { + wire::read_pattern_id( + patbytes, + "sparse pattern ID in try_state", + )?; + } + (pattern_ids, state) + } else { + (&[][..], state) + }; + if is_match && pattern_ids.is_empty() { + return Err(DeserializeError::generic( + "state marked as a match, but has no pattern IDs", + )); + } + if sp.is_match_state(id) && pattern_ids.is_empty() { + return Err(DeserializeError::generic( + "state marked special as a match, but has no pattern IDs", + )); + } + if sp.is_match_state(id) != is_match { + return Err(DeserializeError::generic( + "whether state is a match or not is inconsistent", + )); + } + + // Now read this state's accelerator info. The first byte is the length + // of the accelerator, which is typically 0 (for no acceleration) but + // is no bigger than 3. The length indicates the number of bytes that + // follow, where each byte corresponds to a transition out of this + // state. + if state.is_empty() { + return Err(DeserializeError::generic("no accelerator length")); + } + let (accel_len, state) = (usize::from(state[0]), &state[1..]); + + if accel_len > 3 { + return Err(DeserializeError::generic( + "sparse invalid accelerator length", + )); + } else if accel_len == 0 && sp.is_accel_state(id) { + return Err(DeserializeError::generic( + "got no accelerators in state, but in accelerator ID range", + )); + } else if accel_len > 0 && !sp.is_accel_state(id) { + return Err(DeserializeError::generic( + "state in accelerator ID range, but has no accelerators", + )); + } + + wire::check_slice_len( + state, + accel_len, + "sparse corrupt accelerator length", + )?; + let (accel, _) = (&state[..accel_len], &state[accel_len..]); + + let state = State { + id, + is_match, + ntrans, + input_ranges, + next, + pattern_ids, + accel, + }; + if sp.is_quit_state(state.next_at(state.ntrans - 1)) { + return Err(DeserializeError::generic( + "state with EOI transition to quit state is illegal", + )); + } + Ok(state) + } + + /// Return an iterator over all of the states in this DFA. + /// + /// The iterator returned yields tuples, where the first element is the + /// state ID and the second element is the state itself. + fn states(&self) -> StateIter<'_, T> { + StateIter { trans: self, id: DEAD.as_usize() } + } + + /// Returns the sparse transitions as raw bytes. + fn sparse(&self) -> &[u8] { + self.sparse.as_ref() + } + + /// Returns the number of bytes represented by a single state ID. + fn id_len(&self) -> usize { + StateID::SIZE + } + + /// Return the memory usage, in bytes, of these transitions. + /// + /// This does not include the size of a `Transitions` value itself. + fn memory_usage(&self) -> usize { + self.sparse().len() + } +} + +#[cfg(feature = "dfa-build")] +impl> Transitions { + /// Return a convenient mutable representation of the given state. + /// This panics if the state is invalid. + fn state_mut(&mut self, id: StateID) -> StateMut<'_> { + let mut state = &mut self.sparse_mut()[id.as_usize()..]; + let mut ntrans = wire::read_u16(&state).as_usize(); + let is_match = (1 << 15) & ntrans != 0; + ntrans &= !(1 << 15); + state = &mut state[2..]; + + let (input_ranges, state) = state.split_at_mut(ntrans * 2); + let (next, state) = state.split_at_mut(ntrans * StateID::SIZE); + let (pattern_ids, state) = if is_match { + let npats = wire::read_u32(&state).as_usize(); + state[4..].split_at_mut(npats * 4) + } else { + (&mut [][..], state) + }; + + let accel_len = usize::from(state[0]); + let accel = &mut state[1..accel_len + 1]; + StateMut { + id, + is_match, + ntrans, + input_ranges, + next, + pattern_ids, + accel, + } + } + + /// Returns the sparse transitions as raw mutable bytes. + fn sparse_mut(&mut self) -> &mut [u8] { + self.sparse.as_mut() + } +} + +/// The set of all possible starting states in a DFA. +/// +/// See the eponymous type in the `dense` module for more details. This type +/// is very similar to `dense::StartTable`, except that its underlying +/// representation is `&[u8]` instead of `&[S]`. (The latter would require +/// sparse DFAs to be aligned, which is explicitly something we do not require +/// because we don't really need it.) +#[derive(Clone)] +struct StartTable { + /// The initial start state IDs as a contiguous table of native endian + /// encoded integers, represented by `S`. + /// + /// In practice, T is either Vec or &[u8] and has no alignment + /// requirements. + /// + /// The first `2 * stride` (currently always 8) entries always correspond + /// to the starts states for the entire DFA, with the first 4 entries being + /// for unanchored searches and the second 4 entries being for anchored + /// searches. To keep things simple, we always use 8 entries even if the + /// `StartKind` is not both. + /// + /// After that, there are `stride * patterns` state IDs, where `patterns` + /// may be zero in the case of a DFA with no patterns or in the case where + /// the DFA was built without enabling starting states for each pattern. + table: T, + /// The starting state configuration supported. When 'both', both + /// unanchored and anchored searches work. When 'unanchored', anchored + /// searches panic. When 'anchored', unanchored searches panic. + kind: StartKind, + /// The start state configuration for every possible byte. + start_map: StartByteMap, + /// The number of starting state IDs per pattern. + stride: usize, + /// The total number of patterns for which starting states are encoded. + /// This is `None` for DFAs that were built without start states for each + /// pattern. Thus, one cannot use this field to say how many patterns + /// are in the DFA in all cases. It is specific to how many patterns are + /// represented in this start table. + pattern_len: Option, + /// The universal starting state for unanchored searches. This is only + /// present when the DFA supports unanchored searches and when all starting + /// state IDs for an unanchored search are equivalent. + universal_start_unanchored: Option, + /// The universal starting state for anchored searches. This is only + /// present when the DFA supports anchored searches and when all starting + /// state IDs for an anchored search are equivalent. + universal_start_anchored: Option, +} + +#[cfg(feature = "dfa-build")] +impl StartTable> { + fn new>( + dfa: &dense::DFA, + pattern_len: Option, + ) -> StartTable> { + let stride = Start::len(); + // This is OK since the only way we're here is if a dense DFA could be + // constructed successfully, which uses the same space. + let len = stride + .checked_mul(pattern_len.unwrap_or(0)) + .unwrap() + .checked_add(stride.checked_mul(2).unwrap()) + .unwrap() + .checked_mul(StateID::SIZE) + .unwrap(); + StartTable { + table: vec![0; len], + kind: dfa.start_kind(), + start_map: dfa.start_map().clone(), + stride, + pattern_len, + universal_start_unanchored: dfa + .universal_start_state(Anchored::No), + universal_start_anchored: dfa.universal_start_state(Anchored::Yes), + } + } + + fn from_dense_dfa>( + dfa: &dense::DFA, + remap: &[StateID], + ) -> Result>, BuildError> { + // Unless the DFA has start states compiled for each pattern, then + // as far as the starting state table is concerned, there are zero + // patterns to account for. It will instead only store starting states + // for the entire DFA. + let start_pattern_len = if dfa.starts_for_each_pattern() { + Some(dfa.pattern_len()) + } else { + None + }; + let mut sl = StartTable::new(dfa, start_pattern_len); + for (old_start_id, anchored, sty) in dfa.starts() { + let new_start_id = remap[dfa.to_index(old_start_id)]; + sl.set_start(anchored, sty, new_start_id); + } + if let Some(ref mut id) = sl.universal_start_anchored { + *id = remap[dfa.to_index(*id)]; + } + if let Some(ref mut id) = sl.universal_start_unanchored { + *id = remap[dfa.to_index(*id)]; + } + Ok(sl) + } +} + +impl<'a> StartTable<&'a [u8]> { + unsafe fn from_bytes_unchecked( + mut slice: &'a [u8], + ) -> Result<(StartTable<&'a [u8]>, usize), DeserializeError> { + let slice_start = slice.as_ptr().as_usize(); + + let (kind, nr) = StartKind::from_bytes(slice)?; + slice = &slice[nr..]; + + let (start_map, nr) = StartByteMap::from_bytes(slice)?; + slice = &slice[nr..]; + + let (stride, nr) = + wire::try_read_u32_as_usize(slice, "sparse start table stride")?; + slice = &slice[nr..]; + if stride != Start::len() { + return Err(DeserializeError::generic( + "invalid sparse starting table stride", + )); + } + + let (maybe_pattern_len, nr) = + wire::try_read_u32_as_usize(slice, "sparse start table patterns")?; + slice = &slice[nr..]; + let pattern_len = if maybe_pattern_len.as_u32() == u32::MAX { + None + } else { + Some(maybe_pattern_len) + }; + if pattern_len.map_or(false, |len| len > PatternID::LIMIT) { + return Err(DeserializeError::generic( + "sparse invalid number of patterns", + )); + } + + let (universal_unanchored, nr) = + wire::try_read_u32(slice, "universal unanchored start")?; + slice = &slice[nr..]; + let universal_start_unanchored = if universal_unanchored == u32::MAX { + None + } else { + Some(StateID::try_from(universal_unanchored).map_err(|e| { + DeserializeError::state_id_error( + e, + "universal unanchored start", + ) + })?) + }; + + let (universal_anchored, nr) = + wire::try_read_u32(slice, "universal anchored start")?; + slice = &slice[nr..]; + let universal_start_anchored = if universal_anchored == u32::MAX { + None + } else { + Some(StateID::try_from(universal_anchored).map_err(|e| { + DeserializeError::state_id_error(e, "universal anchored start") + })?) + }; + + let pattern_table_size = wire::mul( + stride, + pattern_len.unwrap_or(0), + "sparse invalid pattern length", + )?; + // Our start states always start with a single stride of start states + // for the entire automaton which permit it to match any pattern. What + // follows it are an optional set of start states for each pattern. + let start_state_len = wire::add( + wire::mul(2, stride, "start state stride too big")?, + pattern_table_size, + "sparse invalid 'any' pattern starts size", + )?; + let table_bytes_len = wire::mul( + start_state_len, + StateID::SIZE, + "sparse pattern table bytes length", + )?; + wire::check_slice_len( + slice, + table_bytes_len, + "sparse start ID table", + )?; + let table = &slice[..table_bytes_len]; + slice = &slice[table_bytes_len..]; + + let sl = StartTable { + table, + kind, + start_map, + stride, + pattern_len, + universal_start_unanchored, + universal_start_anchored, + }; + Ok((sl, slice.as_ptr().as_usize() - slice_start)) + } +} + +impl> StartTable { + fn write_to( + &self, + mut dst: &mut [u8], + ) -> Result { + let nwrite = self.write_to_len(); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small( + "sparse starting table ids", + )); + } + dst = &mut dst[..nwrite]; + + // write start kind + let nw = self.kind.write_to::(dst)?; + dst = &mut dst[nw..]; + // write start byte map + let nw = self.start_map.write_to(dst)?; + dst = &mut dst[nw..]; + // write stride + E::write_u32(u32::try_from(self.stride).unwrap(), dst); + dst = &mut dst[size_of::()..]; + // write pattern length + E::write_u32( + u32::try_from(self.pattern_len.unwrap_or(0xFFFF_FFFF)).unwrap(), + dst, + ); + dst = &mut dst[size_of::()..]; + // write universal start unanchored state id, u32::MAX if absent + E::write_u32( + self.universal_start_unanchored + .map_or(u32::MAX, |sid| sid.as_u32()), + dst, + ); + dst = &mut dst[size_of::()..]; + // write universal start anchored state id, u32::MAX if absent + E::write_u32( + self.universal_start_anchored.map_or(u32::MAX, |sid| sid.as_u32()), + dst, + ); + dst = &mut dst[size_of::()..]; + // write start IDs + for (sid, _, _) in self.iter() { + E::write_u32(sid.as_u32(), dst); + dst = &mut dst[StateID::SIZE..]; + } + Ok(nwrite) + } + + /// Returns the number of bytes the serialized form of this transition + /// table will use. + fn write_to_len(&self) -> usize { + self.kind.write_to_len() + + self.start_map.write_to_len() + + size_of::() // stride + + size_of::() // # patterns + + size_of::() // universal unanchored start + + size_of::() // universal anchored start + + self.table().len() + } + + /// Validates that every starting state ID in this table is valid. + /// + /// That is, every starting state ID can be used to correctly decode a + /// state in the DFA's sparse transitions. + fn validate( + &self, + sp: &Special, + seen: &Seen, + ) -> Result<(), DeserializeError> { + for (id, _, _) in self.iter() { + if !seen.contains(&id) { + return Err(DeserializeError::generic( + "found invalid start state ID", + )); + } + if sp.is_match_state(id) { + return Err(DeserializeError::generic( + "start states cannot be match states", + )); + } + } + Ok(()) + } + + /// Converts this start list to a borrowed value. + fn as_ref(&self) -> StartTable<&'_ [u8]> { + StartTable { + table: self.table(), + kind: self.kind, + start_map: self.start_map.clone(), + stride: self.stride, + pattern_len: self.pattern_len, + universal_start_unanchored: self.universal_start_unanchored, + universal_start_anchored: self.universal_start_anchored, + } + } + + /// Converts this start list to an owned value. + #[cfg(feature = "alloc")] + fn to_owned(&self) -> StartTable> { + StartTable { + table: self.table().to_vec(), + kind: self.kind, + start_map: self.start_map.clone(), + stride: self.stride, + pattern_len: self.pattern_len, + universal_start_unanchored: self.universal_start_unanchored, + universal_start_anchored: self.universal_start_anchored, + } + } + + /// Return the start state for the given index and pattern ID. If the + /// pattern ID is None, then the corresponding start state for the entire + /// DFA is returned. If the pattern ID is not None, then the corresponding + /// starting state for the given pattern is returned. If this start table + /// does not have individual starting states for each pattern, then this + /// panics. + fn start( + &self, + anchored: Anchored, + start: Start, + ) -> Result { + let start_index = start.as_usize(); + let index = match anchored { + Anchored::No => { + if !self.kind.has_unanchored() { + return Err(StartError::unsupported_anchored(anchored)); + } + start_index + } + Anchored::Yes => { + if !self.kind.has_anchored() { + return Err(StartError::unsupported_anchored(anchored)); + } + self.stride + start_index + } + Anchored::Pattern(pid) => { + let len = match self.pattern_len { + None => { + return Err(StartError::unsupported_anchored(anchored)) + } + Some(len) => len, + }; + if pid.as_usize() >= len { + return Ok(DEAD); + } + (2 * self.stride) + + (self.stride * pid.as_usize()) + + start_index + } + }; + let start = index * StateID::SIZE; + // This OK since we're allowed to assume that the start table contains + // valid StateIDs. + Ok(wire::read_state_id_unchecked(&self.table()[start..]).0) + } + + /// Return an iterator over all start IDs in this table. + fn iter(&self) -> StartStateIter<'_, T> { + StartStateIter { st: self, i: 0 } + } + + /// Returns the total number of start state IDs in this table. + fn len(&self) -> usize { + self.table().len() / StateID::SIZE + } + + /// Returns the table as a raw slice of bytes. + fn table(&self) -> &[u8] { + self.table.as_ref() + } + + /// Return the memory usage, in bytes, of this start list. + /// + /// This does not include the size of a `StartTable` value itself. + fn memory_usage(&self) -> usize { + self.table().len() + } +} + +#[cfg(feature = "dfa-build")] +impl> StartTable { + /// Set the start state for the given index and pattern. + /// + /// If the pattern ID or state ID are not valid, then this will panic. + fn set_start(&mut self, anchored: Anchored, start: Start, id: StateID) { + let start_index = start.as_usize(); + let index = match anchored { + Anchored::No => start_index, + Anchored::Yes => self.stride + start_index, + Anchored::Pattern(pid) => { + let pid = pid.as_usize(); + let len = self + .pattern_len + .expect("start states for each pattern enabled"); + assert!(pid < len, "invalid pattern ID {pid:?}"); + self.stride + .checked_mul(pid) + .unwrap() + .checked_add(self.stride.checked_mul(2).unwrap()) + .unwrap() + .checked_add(start_index) + .unwrap() + } + }; + let start = index * StateID::SIZE; + let end = start + StateID::SIZE; + wire::write_state_id::( + id, + &mut self.table.as_mut()[start..end], + ); + } +} + +/// An iterator over all state state IDs in a sparse DFA. +struct StartStateIter<'a, T> { + st: &'a StartTable, + i: usize, +} + +impl<'a, T: AsRef<[u8]>> Iterator for StartStateIter<'a, T> { + type Item = (StateID, Anchored, Start); + + fn next(&mut self) -> Option<(StateID, Anchored, Start)> { + let i = self.i; + if i >= self.st.len() { + return None; + } + self.i += 1; + + // This unwrap is okay since the stride of any DFA must always match + // the number of start state types. + let start_type = Start::from_usize(i % self.st.stride).unwrap(); + let anchored = if i < self.st.stride { + Anchored::No + } else if i < (2 * self.st.stride) { + Anchored::Yes + } else { + let pid = (i - (2 * self.st.stride)) / self.st.stride; + Anchored::Pattern(PatternID::new(pid).unwrap()) + }; + let start = i * StateID::SIZE; + let end = start + StateID::SIZE; + let bytes = self.st.table()[start..end].try_into().unwrap(); + // This is OK since we're allowed to assume that any IDs in this start + // table are correct and valid for this DFA. + let id = StateID::from_ne_bytes_unchecked(bytes); + Some((id, anchored, start_type)) + } +} + +impl<'a, T> fmt::Debug for StartStateIter<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("StartStateIter").field("i", &self.i).finish() + } +} + +/// An iterator over all states in a sparse DFA. +/// +/// This iterator yields tuples, where the first element is the state ID and +/// the second element is the state itself. +struct StateIter<'a, T> { + trans: &'a Transitions, + id: usize, +} + +impl<'a, T: AsRef<[u8]>> Iterator for StateIter<'a, T> { + type Item = State<'a>; + + fn next(&mut self) -> Option> { + if self.id >= self.trans.sparse().len() { + return None; + } + let state = self.trans.state(StateID::new_unchecked(self.id)); + self.id = self.id + state.write_to_len(); + Some(state) + } +} + +impl<'a, T> fmt::Debug for StateIter<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("StateIter").field("id", &self.id).finish() + } +} + +/// A representation of a sparse DFA state that can be cheaply materialized +/// from a state identifier. +#[derive(Clone)] +struct State<'a> { + /// The identifier of this state. + id: StateID, + /// Whether this is a match state or not. + is_match: bool, + /// The number of transitions in this state. + ntrans: usize, + /// Pairs of input ranges, where there is one pair for each transition. + /// Each pair specifies an inclusive start and end byte range for the + /// corresponding transition. + input_ranges: &'a [u8], + /// Transitions to the next state. This slice contains native endian + /// encoded state identifiers, with `S` as the representation. Thus, there + /// are `ntrans * size_of::()` bytes in this slice. + next: &'a [u8], + /// If this is a match state, then this contains the pattern IDs that match + /// when the DFA is in this state. + /// + /// This is a contiguous sequence of 32-bit native endian encoded integers. + pattern_ids: &'a [u8], + /// An accelerator for this state, if present. If this state has no + /// accelerator, then this is an empty slice. When non-empty, this slice + /// has length at most 3 and corresponds to the exhaustive set of bytes + /// that must be seen in order to transition out of this state. + accel: &'a [u8], +} + +impl<'a> State<'a> { + /// Searches for the next transition given an input byte. If no such + /// transition could be found, then a dead state is returned. + /// + /// This is marked as inline to help dramatically boost sparse searching, + /// which decodes each state it enters to follow the next transition. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn next(&self, input: u8) -> StateID { + // This straight linear search was observed to be much better than + // binary search on ASCII haystacks, likely because a binary search + // visits the ASCII case last but a linear search sees it first. A + // binary search does do a little better on non-ASCII haystacks, but + // not by much. There might be a better trade off lurking here. + for i in 0..(self.ntrans - 1) { + let (start, end) = self.range(i); + if start <= input && input <= end { + return self.next_at(i); + } + // We could bail early with an extra branch: if input < b1, then + // we know we'll never find a matching transition. Interestingly, + // this extra branch seems to not help performance, or will even + // hurt it. It's likely very dependent on the DFA itself and what + // is being searched. + } + DEAD + } + + /// Returns the next state ID for the special EOI transition. + fn next_eoi(&self) -> StateID { + self.next_at(self.ntrans - 1) + } + + /// Returns the identifier for this state. + fn id(&self) -> StateID { + self.id + } + + /// Returns the inclusive input byte range for the ith transition in this + /// state. + fn range(&self, i: usize) -> (u8, u8) { + (self.input_ranges[i * 2], self.input_ranges[i * 2 + 1]) + } + + /// Returns the next state for the ith transition in this state. + fn next_at(&self, i: usize) -> StateID { + let start = i * StateID::SIZE; + let end = start + StateID::SIZE; + let bytes = self.next[start..end].try_into().unwrap(); + StateID::from_ne_bytes_unchecked(bytes) + } + + /// Returns the pattern ID for the given match index. If the match index + /// is invalid, then this panics. + fn pattern_id(&self, match_index: usize) -> PatternID { + let start = match_index * PatternID::SIZE; + wire::read_pattern_id_unchecked(&self.pattern_ids[start..]).0 + } + + /// Returns the total number of pattern IDs for this state. This is always + /// zero when `is_match` is false. + fn pattern_len(&self) -> usize { + assert_eq!(0, self.pattern_ids.len() % 4); + self.pattern_ids.len() / 4 + } + + /// Return an accelerator for this state. + fn accelerator(&self) -> &'a [u8] { + self.accel + } + + /// Write the raw representation of this state to the given buffer using + /// the given endianness. + fn write_to( + &self, + mut dst: &mut [u8], + ) -> Result { + let nwrite = self.write_to_len(); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small( + "sparse state transitions", + )); + } + + let ntrans = + if self.is_match { self.ntrans | (1 << 15) } else { self.ntrans }; + E::write_u16(u16::try_from(ntrans).unwrap(), dst); + dst = &mut dst[size_of::()..]; + + dst[..self.input_ranges.len()].copy_from_slice(self.input_ranges); + dst = &mut dst[self.input_ranges.len()..]; + + for i in 0..self.ntrans { + E::write_u32(self.next_at(i).as_u32(), dst); + dst = &mut dst[StateID::SIZE..]; + } + + if self.is_match { + E::write_u32(u32::try_from(self.pattern_len()).unwrap(), dst); + dst = &mut dst[size_of::()..]; + for i in 0..self.pattern_len() { + let pid = self.pattern_id(i); + E::write_u32(pid.as_u32(), dst); + dst = &mut dst[PatternID::SIZE..]; + } + } + + dst[0] = u8::try_from(self.accel.len()).unwrap(); + dst[1..][..self.accel.len()].copy_from_slice(self.accel); + + Ok(nwrite) + } + + /// Return the total number of bytes that this state consumes in its + /// encoded form. + fn write_to_len(&self) -> usize { + let mut len = 2 + + (self.ntrans * 2) + + (self.ntrans * StateID::SIZE) + + (1 + self.accel.len()); + if self.is_match { + len += size_of::() + self.pattern_ids.len(); + } + len + } +} + +impl<'a> fmt::Debug for State<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut printed = false; + for i in 0..(self.ntrans - 1) { + let next = self.next_at(i); + if next == DEAD { + continue; + } + + if printed { + write!(f, ", ")?; + } + let (start, end) = self.range(i); + if start == end { + write!(f, "{:?} => {:?}", DebugByte(start), next.as_usize())?; + } else { + write!( + f, + "{:?}-{:?} => {:?}", + DebugByte(start), + DebugByte(end), + next.as_usize(), + )?; + } + printed = true; + } + let eoi = self.next_at(self.ntrans - 1); + if eoi != DEAD { + if printed { + write!(f, ", ")?; + } + write!(f, "EOI => {:?}", eoi.as_usize())?; + } + Ok(()) + } +} + +/// A representation of a mutable sparse DFA state that can be cheaply +/// materialized from a state identifier. +#[cfg(feature = "dfa-build")] +struct StateMut<'a> { + /// The identifier of this state. + id: StateID, + /// Whether this is a match state or not. + is_match: bool, + /// The number of transitions in this state. + ntrans: usize, + /// Pairs of input ranges, where there is one pair for each transition. + /// Each pair specifies an inclusive start and end byte range for the + /// corresponding transition. + input_ranges: &'a mut [u8], + /// Transitions to the next state. This slice contains native endian + /// encoded state identifiers, with `S` as the representation. Thus, there + /// are `ntrans * size_of::()` bytes in this slice. + next: &'a mut [u8], + /// If this is a match state, then this contains the pattern IDs that match + /// when the DFA is in this state. + /// + /// This is a contiguous sequence of 32-bit native endian encoded integers. + pattern_ids: &'a [u8], + /// An accelerator for this state, if present. If this state has no + /// accelerator, then this is an empty slice. When non-empty, this slice + /// has length at most 3 and corresponds to the exhaustive set of bytes + /// that must be seen in order to transition out of this state. + accel: &'a mut [u8], +} + +#[cfg(feature = "dfa-build")] +impl<'a> StateMut<'a> { + /// Sets the ith transition to the given state. + fn set_next_at(&mut self, i: usize, next: StateID) { + let start = i * StateID::SIZE; + let end = start + StateID::SIZE; + wire::write_state_id::(next, &mut self.next[start..end]); + } +} + +#[cfg(feature = "dfa-build")] +impl<'a> fmt::Debug for StateMut<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let state = State { + id: self.id, + is_match: self.is_match, + ntrans: self.ntrans, + input_ranges: self.input_ranges, + next: self.next, + pattern_ids: self.pattern_ids, + accel: self.accel, + }; + fmt::Debug::fmt(&state, f) + } +} + +// In order to validate everything, we not only need to make sure we +// can decode every state, but that every transition in every state +// points to a valid state. There are many duplicative transitions, so +// we record state IDs that we've verified so that we don't redo the +// decoding work. +// +// Except, when in no_std mode, we don't have dynamic memory allocation +// available to us, so we skip this optimization. It's not clear +// whether doing something more clever is worth it just yet. If you're +// profiling this code and need it to run faster, please file an issue. +// +// OK, so we also use this to record the set of valid state IDs. Since +// it is possible for a transition to point to an invalid state ID that +// still (somehow) deserializes to a valid state. So we need to make +// sure our transitions are limited to actually correct state IDs. +// The problem is, I'm not sure how to do this verification step in +// no-std no-alloc mode. I think we'd *have* to store the set of valid +// state IDs in the DFA itself. For now, we don't do this verification +// in no-std no-alloc mode. The worst thing that can happen is an +// incorrect result. But no panics or memory safety problems should +// result. Because we still do validate that the state itself is +// "valid" in the sense that everything it points to actually exists. +// +// ---AG +#[derive(Debug)] +struct Seen { + #[cfg(feature = "alloc")] + set: alloc::collections::BTreeSet, + #[cfg(not(feature = "alloc"))] + set: core::marker::PhantomData, +} + +#[cfg(feature = "alloc")] +impl Seen { + fn new() -> Seen { + Seen { set: alloc::collections::BTreeSet::new() } + } + fn insert(&mut self, id: StateID) { + self.set.insert(id); + } + fn contains(&self, id: &StateID) -> bool { + self.set.contains(id) + } +} + +#[cfg(not(feature = "alloc"))] +impl Seen { + fn new() -> Seen { + Seen { set: core::marker::PhantomData } + } + fn insert(&mut self, _id: StateID) {} + fn contains(&self, _id: &StateID) -> bool { + true + } +} + +/* +/// A binary search routine specialized specifically to a sparse DFA state's +/// transitions. Specifically, the transitions are defined as a set of pairs +/// of input bytes that delineate an inclusive range of bytes. If the input +/// byte is in the range, then the corresponding transition is a match. +/// +/// This binary search accepts a slice of these pairs and returns the position +/// of the matching pair (the ith transition), or None if no matching pair +/// could be found. +/// +/// Note that this routine is not currently used since it was observed to +/// either decrease performance when searching ASCII, or did not provide enough +/// of a boost on non-ASCII haystacks to be worth it. However, we leave it here +/// for posterity in case we can find a way to use it. +/// +/// In theory, we could use the standard library's search routine if we could +/// cast a `&[u8]` to a `&[(u8, u8)]`, but I don't believe this is currently +/// guaranteed to be safe and is thus UB (since I don't think the in-memory +/// representation of `(u8, u8)` has been nailed down). One could define a +/// repr(C) type, but the casting doesn't seem justified. +#[cfg_attr(feature = "perf-inline", inline(always))] +fn binary_search_ranges(ranges: &[u8], needle: u8) -> Option { + debug_assert!(ranges.len() % 2 == 0, "ranges must have even length"); + debug_assert!(ranges.len() <= 512, "ranges should be short"); + + let (mut left, mut right) = (0, ranges.len() / 2); + while left < right { + let mid = (left + right) / 2; + let (b1, b2) = (ranges[mid * 2], ranges[mid * 2 + 1]); + if needle < b1 { + right = mid; + } else if needle > b2 { + left = mid + 1; + } else { + return Some(mid); + } + } + None +} +*/ + +#[cfg(all(test, feature = "syntax", feature = "dfa-build"))] +mod tests { + use crate::{ + dfa::{dense::DFA, Automaton}, + nfa::thompson, + Input, MatchError, + }; + + // See the analogous test in src/hybrid/dfa.rs and src/dfa/dense.rs. + #[test] + fn heuristic_unicode_forward() { + let dfa = DFA::builder() + .configure(DFA::config().unicode_word_boundary(true)) + .thompson(thompson::Config::new().reverse(true)) + .build(r"\b[0-9]+\b") + .unwrap() + .to_sparse() + .unwrap(); + + let input = Input::new("β123").range(2..); + let expected = MatchError::quit(0xB2, 1); + let got = dfa.try_search_fwd(&input); + assert_eq!(Err(expected), got); + + let input = Input::new("123β").range(..3); + let expected = MatchError::quit(0xCE, 3); + let got = dfa.try_search_fwd(&input); + assert_eq!(Err(expected), got); + } + + // See the analogous test in src/hybrid/dfa.rs and src/dfa/dense.rs. + #[test] + fn heuristic_unicode_reverse() { + let dfa = DFA::builder() + .configure(DFA::config().unicode_word_boundary(true)) + .thompson(thompson::Config::new().reverse(true)) + .build(r"\b[0-9]+\b") + .unwrap() + .to_sparse() + .unwrap(); + + let input = Input::new("β123").range(2..); + let expected = MatchError::quit(0xB2, 1); + let got = dfa.try_search_rev(&input); + assert_eq!(Err(expected), got); + + let input = Input::new("123β").range(..3); + let expected = MatchError::quit(0xCE, 3); + let got = dfa.try_search_rev(&input); + assert_eq!(Err(expected), got); + } +} diff --git a/vendor/regex-automata/src/dfa/special.rs b/vendor/regex-automata/src/dfa/special.rs new file mode 100644 index 00000000000000..197323116fb695 --- /dev/null +++ b/vendor/regex-automata/src/dfa/special.rs @@ -0,0 +1,494 @@ +use crate::{ + dfa::DEAD, + util::{ + primitives::StateID, + wire::{self, DeserializeError, Endian, SerializeError}, + }, +}; + +macro_rules! err { + ($msg:expr) => { + return Err(DeserializeError::generic($msg)); + }; +} + +// Special represents the identifiers in a DFA that correspond to "special" +// states. If a state is one or more of the following, then it is considered +// special: +// +// * dead - A non-matching state where all outgoing transitions lead back to +// itself. There is only one of these, regardless of whether minimization +// has run. The dead state always has an ID of 0. i.e., It is always the +// first state in a DFA. +// * quit - A state that is entered whenever a byte is seen that should cause +// a DFA to give up and stop searching. This results in a MatchError::quit +// error being returned at search time. The default configuration for a DFA +// has no quit bytes, which means this state is unreachable by default, +// although it is always present for reasons of implementation simplicity. +// This state is only reachable when the caller configures the DFA to quit +// on certain bytes. There is always exactly one of these states and it +// is always the second state. (Its actual ID depends on the size of the +// alphabet in dense DFAs, since state IDs are premultiplied in order to +// allow them to be used directly as indices into the transition table.) +// * match - An accepting state, i.e., indicative of a match. There may be +// zero or more of these states. +// * accelerated - A state where all of its outgoing transitions, except a +// few, loop back to itself. These states are candidates for acceleration +// via memchr during search. There may be zero or more of these states. +// * start - A non-matching state that indicates where the automaton should +// start during a search. There is always at least one starting state and +// all are guaranteed to be non-match states. (A start state cannot be a +// match state because the DFAs in this crate delay all matches by one byte. +// So every search that finds a match must move through one transition to +// some other match state, even when searching an empty string.) +// +// These are not mutually exclusive categories. Namely, the following +// overlapping can occur: +// +// * {dead, start} - If a DFA can never lead to a match and it is minimized, +// then it will typically compile to something where all starting IDs point +// to the DFA's dead state. +// * {match, accelerated} - It is possible for a match state to have the +// majority of its transitions loop back to itself, which means it's +// possible for a match state to be accelerated. +// * {start, accelerated} - Similarly, it is possible for a start state to be +// accelerated. Note that it is possible for an accelerated state to be +// neither a match or a start state. Also note that just because both match +// and start states overlap with accelerated states does not mean that +// match and start states overlap with each other. In fact, they are +// guaranteed not to overlap. +// +// As a special mention, every DFA always has a dead and a quit state, even +// though from the perspective of the DFA, they are equivalent. (Indeed, +// minimization special cases them to ensure they don't get merged.) The +// purpose of keeping them distinct is to use the quit state as a sentinel to +// distinguish between whether a search finished successfully without finding +// anything or whether it gave up before finishing. +// +// So the main problem we want to solve here is the *fast* detection of whether +// a state is special or not. And we also want to do this while storing as +// little extra data as possible. AND we want to be able to quickly determine +// which categories a state falls into above if it is special. +// +// We achieve this by essentially shuffling all special states to the beginning +// of a DFA. That is, all special states appear before every other non-special +// state. By representing special states this way, we can determine whether a +// state is special or not by a single comparison, where special.max is the +// identifier of the last special state in the DFA: +// +// if current_state <= special.max: +// ... do something with special state +// +// The only thing left to do is to determine what kind of special state +// it is. Because what we do next depends on that. Since special states +// are typically rare, we can afford to do a bit more extra work, but we'd +// still like this to be as fast as possible. The trick we employ here is to +// continue shuffling states even within the special state range. Such that +// one contiguous region corresponds to match states, another for start states +// and then an overlapping range for accelerated states. At a high level, our +// special state detection might look like this (for leftmost searching, where +// we continue searching even after seeing a match): +// +// byte = input[offset] +// current_state = next_state(current_state, byte) +// offset += 1 +// if current_state <= special.max: +// if current_state == 0: +// # We can never leave a dead state, so this always marks the +// # end of our search. +// return last_match +// if current_state == special.quit_id: +// # A quit state means we give up. If he DFA has no quit state, +// # then special.quit_id == 0 == dead, which is handled by the +// # conditional above. +// return Err(MatchError::quit { byte, offset: offset - 1 }) +// if special.min_match <= current_state <= special.max_match: +// last_match = Some(offset) +// if special.min_accel <= current_state <= special.max_accel: +// offset = accelerate(input, offset) +// last_match = Some(offset) +// elif special.min_start <= current_state <= special.max_start: +// offset = prefilter.find(input, offset) +// if special.min_accel <= current_state <= special.max_accel: +// offset = accelerate(input, offset) +// elif special.min_accel <= current_state <= special.max_accel: +// offset = accelerate(input, offset) +// +// There are some small details left out of the logic above. For example, +// in order to accelerate a state, we need to know which bytes to search for. +// This in turn implies some extra data we need to store in the DFA. To keep +// things compact, we would ideally only store +// +// N = special.max_accel - special.min_accel + 1 +// +// items. But state IDs are premultiplied, which means they are not contiguous. +// So in order to take a state ID and index an array of accelerated structures, +// we need to do: +// +// i = (state_id - special.min_accel) / stride +// +// (N.B. 'stride' is always a power of 2, so the above can be implemented via +// '(state_id - special.min_accel) >> stride2', where 'stride2' is x in +// 2^x=stride.) +// +// Moreover, some of these specialty categories may be empty. For example, +// DFAs are not required to have any match states or any accelerated states. +// In that case, the lower and upper bounds are both set to 0 (the dead state +// ID) and the first `current_state == 0` check subsumes cases where the +// ranges are empty. +// +// Loop unrolling, if applicable, has also been left out of the logic above. +// +// Graphically, the ranges look like this, where asterisks indicate ranges +// that can be empty. Each 'x' is a state. +// +// quit +// dead| +// || +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// | | | | start | | +// | |-------------| |-------| | +// | match* | | | | +// | | | | | +// | |----------| | | +// | accel* | | +// | | | +// | | | +// |----------------------------|------------------------ +// special non-special* +#[derive(Clone, Copy, Debug)] +pub(crate) struct Special { + /// The identifier of the last special state in a DFA. A state is special + /// if and only if its identifier is less than or equal to `max`. + pub(crate) max: StateID, + /// The identifier of the quit state in a DFA. (There is no analogous field + /// for the dead state since the dead state's ID is always zero, regardless + /// of state ID size.) + pub(crate) quit_id: StateID, + /// The identifier of the first match state. + pub(crate) min_match: StateID, + /// The identifier of the last match state. + pub(crate) max_match: StateID, + /// The identifier of the first accelerated state. + pub(crate) min_accel: StateID, + /// The identifier of the last accelerated state. + pub(crate) max_accel: StateID, + /// The identifier of the first start state. + pub(crate) min_start: StateID, + /// The identifier of the last start state. + pub(crate) max_start: StateID, +} + +impl Special { + /// Creates a new set of special ranges for a DFA. All ranges are initially + /// set to only contain the dead state. This is interpreted as an empty + /// range. + #[cfg(feature = "dfa-build")] + pub(crate) fn new() -> Special { + Special { + max: DEAD, + quit_id: DEAD, + min_match: DEAD, + max_match: DEAD, + min_accel: DEAD, + max_accel: DEAD, + min_start: DEAD, + max_start: DEAD, + } + } + + /// Remaps all of the special state identifiers using the function given. + #[cfg(feature = "dfa-build")] + pub(crate) fn remap(&self, map: impl Fn(StateID) -> StateID) -> Special { + Special { + max: map(self.max), + quit_id: map(self.quit_id), + min_match: map(self.min_match), + max_match: map(self.max_match), + min_accel: map(self.min_accel), + max_accel: map(self.max_accel), + min_start: map(self.min_start), + max_start: map(self.max_start), + } + } + + /// Deserialize the given bytes into special state ranges. If the slice + /// given is not big enough, then this returns an error. Similarly, if + /// any of the expected invariants around special state ranges aren't + /// upheld, an error is returned. Note that this does not guarantee that + /// the information returned is correct. + /// + /// Upon success, this returns the number of bytes read in addition to the + /// special state IDs themselves. + pub(crate) fn from_bytes( + mut slice: &[u8], + ) -> Result<(Special, usize), DeserializeError> { + wire::check_slice_len(slice, 8 * StateID::SIZE, "special states")?; + + let mut nread = 0; + let mut read_id = |what| -> Result { + let (id, nr) = wire::try_read_state_id(slice, what)?; + nread += nr; + slice = &slice[StateID::SIZE..]; + Ok(id) + }; + + let max = read_id("special max id")?; + let quit_id = read_id("special quit id")?; + let min_match = read_id("special min match id")?; + let max_match = read_id("special max match id")?; + let min_accel = read_id("special min accel id")?; + let max_accel = read_id("special max accel id")?; + let min_start = read_id("special min start id")?; + let max_start = read_id("special max start id")?; + + let special = Special { + max, + quit_id, + min_match, + max_match, + min_accel, + max_accel, + min_start, + max_start, + }; + special.validate()?; + assert_eq!(nread, special.write_to_len()); + Ok((special, nread)) + } + + /// Validate that the information describing special states satisfies + /// all known invariants. + pub(crate) fn validate(&self) -> Result<(), DeserializeError> { + // Check that both ends of the range are DEAD or neither are. + if self.min_match == DEAD && self.max_match != DEAD { + err!("min_match is DEAD, but max_match is not"); + } + if self.min_match != DEAD && self.max_match == DEAD { + err!("max_match is DEAD, but min_match is not"); + } + if self.min_accel == DEAD && self.max_accel != DEAD { + err!("min_accel is DEAD, but max_accel is not"); + } + if self.min_accel != DEAD && self.max_accel == DEAD { + err!("max_accel is DEAD, but min_accel is not"); + } + if self.min_start == DEAD && self.max_start != DEAD { + err!("min_start is DEAD, but max_start is not"); + } + if self.min_start != DEAD && self.max_start == DEAD { + err!("max_start is DEAD, but min_start is not"); + } + + // Check that ranges are well formed. + if self.min_match > self.max_match { + err!("min_match should not be greater than max_match"); + } + if self.min_accel > self.max_accel { + err!("min_accel should not be greater than max_accel"); + } + if self.min_start > self.max_start { + err!("min_start should not be greater than max_start"); + } + + // Check that ranges are ordered with respect to one another. + if self.matches() && self.quit_id >= self.min_match { + err!("quit_id should not be greater than min_match"); + } + if self.accels() && self.quit_id >= self.min_accel { + err!("quit_id should not be greater than min_accel"); + } + if self.starts() && self.quit_id >= self.min_start { + err!("quit_id should not be greater than min_start"); + } + if self.matches() && self.accels() && self.min_accel < self.min_match { + err!("min_match should not be greater than min_accel"); + } + if self.matches() && self.starts() && self.min_start < self.min_match { + err!("min_match should not be greater than min_start"); + } + if self.accels() && self.starts() && self.min_start < self.min_accel { + err!("min_accel should not be greater than min_start"); + } + + // Check that max is at least as big as everything else. + if self.max < self.quit_id { + err!("quit_id should not be greater than max"); + } + if self.max < self.max_match { + err!("max_match should not be greater than max"); + } + if self.max < self.max_accel { + err!("max_accel should not be greater than max"); + } + if self.max < self.max_start { + err!("max_start should not be greater than max"); + } + + Ok(()) + } + + /// Validate that the special state information is compatible with the + /// given state len. + pub(crate) fn validate_state_len( + &self, + len: usize, + stride2: usize, + ) -> Result<(), DeserializeError> { + // We assume that 'validate' has already passed, so we know that 'max' + // is truly the max. So all we need to check is that the max state ID + // is less than the state ID len. The max legal value here is len-1, + // which occurs when there are no non-special states. + if (self.max.as_usize() >> stride2) >= len { + err!("max should not be greater than or equal to state length"); + } + Ok(()) + } + + /// Write the IDs and ranges for special states to the given byte buffer. + /// The buffer given must have enough room to store all data, otherwise + /// this will return an error. The number of bytes written is returned + /// on success. The number of bytes written is guaranteed to be a multiple + /// of 8. + pub(crate) fn write_to( + &self, + dst: &mut [u8], + ) -> Result { + use crate::util::wire::write_state_id as write; + + if dst.len() < self.write_to_len() { + return Err(SerializeError::buffer_too_small("special state ids")); + } + + let mut nwrite = 0; + nwrite += write::(self.max, &mut dst[nwrite..]); + nwrite += write::(self.quit_id, &mut dst[nwrite..]); + nwrite += write::(self.min_match, &mut dst[nwrite..]); + nwrite += write::(self.max_match, &mut dst[nwrite..]); + nwrite += write::(self.min_accel, &mut dst[nwrite..]); + nwrite += write::(self.max_accel, &mut dst[nwrite..]); + nwrite += write::(self.min_start, &mut dst[nwrite..]); + nwrite += write::(self.max_start, &mut dst[nwrite..]); + + assert_eq!( + self.write_to_len(), + nwrite, + "expected to write certain number of bytes", + ); + assert_eq!( + nwrite % 8, + 0, + "expected to write multiple of 8 bytes for special states", + ); + Ok(nwrite) + } + + /// Returns the total number of bytes written by `write_to`. + pub(crate) fn write_to_len(&self) -> usize { + 8 * StateID::SIZE + } + + /// Sets the maximum special state ID based on the current values. This + /// should be used once all possible state IDs are set. + #[cfg(feature = "dfa-build")] + pub(crate) fn set_max(&mut self) { + use core::cmp::max; + self.max = max( + self.quit_id, + max(self.max_match, max(self.max_accel, self.max_start)), + ); + } + + /// Sets the maximum special state ID such that starting states are not + /// considered "special." This also marks the min/max starting states as + /// DEAD such that 'is_start_state' always returns false, even if the state + /// is actually a starting state. + /// + /// This is useful when there is no prefilter set. It will avoid + /// ping-ponging between the hot path in the DFA search code and the start + /// state handling code, which is typically only useful for executing a + /// prefilter. + #[cfg(feature = "dfa-build")] + pub(crate) fn set_no_special_start_states(&mut self) { + use core::cmp::max; + self.max = max(self.quit_id, max(self.max_match, self.max_accel)); + self.min_start = DEAD; + self.max_start = DEAD; + } + + /// Returns true if and only if the given state ID is a special state. + #[inline] + pub(crate) fn is_special_state(&self, id: StateID) -> bool { + id <= self.max + } + + /// Returns true if and only if the given state ID is a dead state. + #[inline] + pub(crate) fn is_dead_state(&self, id: StateID) -> bool { + id == DEAD + } + + /// Returns true if and only if the given state ID is a quit state. + #[inline] + pub(crate) fn is_quit_state(&self, id: StateID) -> bool { + !self.is_dead_state(id) && self.quit_id == id + } + + /// Returns true if and only if the given state ID is a match state. + #[inline] + pub(crate) fn is_match_state(&self, id: StateID) -> bool { + !self.is_dead_state(id) && self.min_match <= id && id <= self.max_match + } + + /// Returns true if and only if the given state ID is an accel state. + #[inline] + pub(crate) fn is_accel_state(&self, id: StateID) -> bool { + !self.is_dead_state(id) && self.min_accel <= id && id <= self.max_accel + } + + /// Returns true if and only if the given state ID is a start state. + #[inline] + pub(crate) fn is_start_state(&self, id: StateID) -> bool { + !self.is_dead_state(id) && self.min_start <= id && id <= self.max_start + } + + /// Returns the total number of match states for a dense table based DFA. + #[inline] + pub(crate) fn match_len(&self, stride: usize) -> usize { + if self.matches() { + (self.max_match.as_usize() - self.min_match.as_usize() + stride) + / stride + } else { + 0 + } + } + + /// Returns true if and only if there is at least one match state. + #[inline] + pub(crate) fn matches(&self) -> bool { + self.min_match != DEAD + } + + /// Returns the total number of accel states. + #[cfg(feature = "dfa-build")] + pub(crate) fn accel_len(&self, stride: usize) -> usize { + if self.accels() { + (self.max_accel.as_usize() - self.min_accel.as_usize() + stride) + / stride + } else { + 0 + } + } + + /// Returns true if and only if there is at least one accel state. + #[inline] + pub(crate) fn accels(&self) -> bool { + self.min_accel != DEAD + } + + /// Returns true if and only if there is at least one start state. + #[inline] + pub(crate) fn starts(&self) -> bool { + self.min_start != DEAD + } +} diff --git a/vendor/regex-automata/src/dfa/start.rs b/vendor/regex-automata/src/dfa/start.rs new file mode 100644 index 00000000000000..fddc702df5628e --- /dev/null +++ b/vendor/regex-automata/src/dfa/start.rs @@ -0,0 +1,74 @@ +use core::mem::size_of; + +use crate::util::wire::{self, DeserializeError, Endian, SerializeError}; + +/// The kind of anchored starting configurations to support in a DFA. +/// +/// Fully compiled DFAs need to be explicitly configured as to which anchored +/// starting configurations to support. The reason for not just supporting +/// everything unconditionally is that it can use more resources (such as +/// memory and build time). The downside of this is that if you try to execute +/// a search using an [`Anchored`](crate::Anchored) mode that is not supported +/// by the DFA, then the search will return an error. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum StartKind { + /// Support both anchored and unanchored searches. + Both, + /// Support only unanchored searches. Requesting an anchored search will + /// panic. + /// + /// Note that even if an unanchored search is requested, the pattern itself + /// may still be anchored. For example, `^abc` will only match `abc` at the + /// start of a haystack. This will remain true, even if the regex engine + /// only supported unanchored searches. + Unanchored, + /// Support only anchored searches. Requesting an unanchored search will + /// panic. + Anchored, +} + +impl StartKind { + pub(crate) fn from_bytes( + slice: &[u8], + ) -> Result<(StartKind, usize), DeserializeError> { + wire::check_slice_len(slice, size_of::(), "start kind bytes")?; + let (n, nr) = wire::try_read_u32(slice, "start kind integer")?; + match n { + 0 => Ok((StartKind::Both, nr)), + 1 => Ok((StartKind::Unanchored, nr)), + 2 => Ok((StartKind::Anchored, nr)), + _ => Err(DeserializeError::generic("unrecognized start kind")), + } + } + + pub(crate) fn write_to( + &self, + dst: &mut [u8], + ) -> Result { + let nwrite = self.write_to_len(); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small("start kind")); + } + let n = match *self { + StartKind::Both => 0, + StartKind::Unanchored => 1, + StartKind::Anchored => 2, + }; + E::write_u32(n, dst); + Ok(nwrite) + } + + pub(crate) fn write_to_len(&self) -> usize { + size_of::() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn has_unanchored(&self) -> bool { + matches!(*self, StartKind::Both | StartKind::Unanchored) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn has_anchored(&self) -> bool { + matches!(*self, StartKind::Both | StartKind::Anchored) + } +} diff --git a/vendor/regex-automata/src/hybrid/dfa.rs b/vendor/regex-automata/src/hybrid/dfa.rs new file mode 100644 index 00000000000000..22893d7a328d1b --- /dev/null +++ b/vendor/regex-automata/src/hybrid/dfa.rs @@ -0,0 +1,4434 @@ +/*! +Types and routines specific to lazy DFAs. + +This module is the home of [`hybrid::dfa::DFA`](DFA). + +This module also contains a [`hybrid::dfa::Builder`](Builder) and a +[`hybrid::dfa::Config`](Config) for configuring and building a lazy DFA. +*/ + +use core::{iter, mem::size_of}; + +use alloc::vec::Vec; + +use crate::{ + hybrid::{ + error::{BuildError, CacheError, StartError}, + id::{LazyStateID, LazyStateIDError}, + search, + }, + nfa::thompson, + util::{ + alphabet::{self, ByteClasses, ByteSet}, + determinize::{self, State, StateBuilderEmpty, StateBuilderNFA}, + empty, + prefilter::Prefilter, + primitives::{PatternID, StateID as NFAStateID}, + search::{ + Anchored, HalfMatch, Input, MatchError, MatchKind, PatternSet, + }, + sparse_set::SparseSets, + start::{self, Start, StartByteMap}, + }, +}; + +/// The minimum number of states that a lazy DFA's cache size must support. +/// +/// This is checked at time of construction to ensure that at least some small +/// number of states can fit in the given capacity allotment. If we can't fit +/// at least this number of states, then the thinking is that it's pretty +/// senseless to use the lazy DFA. More to the point, parts of the code do +/// assume that the cache can fit at least some small number of states. +const MIN_STATES: usize = SENTINEL_STATES + 2; + +/// The number of "sentinel" states that get added to every lazy DFA. +/// +/// These are special states indicating status conditions of a search: unknown, +/// dead and quit. These states in particular also use zero NFA states, so +/// their memory usage is quite small. This is relevant for computing the +/// minimum memory needed for a lazy DFA cache. +const SENTINEL_STATES: usize = 3; + +/// A hybrid NFA/DFA (also called a "lazy DFA") for regex searching. +/// +/// A lazy DFA is a DFA that builds itself at search time. It otherwise has +/// very similar characteristics as a [`dense::DFA`](crate::dfa::dense::DFA). +/// Indeed, both support precisely the same regex features with precisely the +/// same semantics. +/// +/// Where as a `dense::DFA` must be completely built to handle any input before +/// it may be used for search, a lazy DFA starts off effectively empty. During +/// a search, a lazy DFA will build itself depending on whether it has already +/// computed the next transition or not. If it has, then it looks a lot like +/// a `dense::DFA` internally: it does a very fast table based access to find +/// the next transition. Otherwise, if the state hasn't been computed, then it +/// does determinization _for that specific transition_ to compute the next DFA +/// state. +/// +/// The main selling point of a lazy DFA is that, in practice, it has +/// the performance profile of a `dense::DFA` without the weakness of it +/// taking worst case exponential time to build. Indeed, for each byte of +/// input, the lazy DFA will construct as most one new DFA state. Thus, a +/// lazy DFA achieves worst case `O(mn)` time for regex search (where `m ~ +/// pattern.len()` and `n ~ haystack.len()`). +/// +/// The main downsides of a lazy DFA are: +/// +/// 1. It requires mutable "cache" space during search. This is where the +/// transition table, among other things, is stored. +/// 2. In pathological cases (e.g., if the cache is too small), it will run +/// out of room and either require a bigger cache capacity or will repeatedly +/// clear the cache and thus repeatedly regenerate DFA states. Overall, this +/// will tend to be slower than a typical NFA simulation. +/// +/// # Capabilities +/// +/// Like a `dense::DFA`, a single lazy DFA fundamentally supports the following +/// operations: +/// +/// 1. Detection of a match. +/// 2. Location of the end of a match. +/// 3. In the case of a lazy DFA with multiple patterns, which pattern matched +/// is reported as well. +/// +/// A notable absence from the above list of capabilities is the location of +/// the *start* of a match. In order to provide both the start and end of +/// a match, *two* lazy DFAs are required. This functionality is provided by a +/// [`Regex`](crate::hybrid::regex::Regex). +/// +/// # Example +/// +/// This shows how to build a lazy DFA with the default configuration and +/// execute a search. Notice how, in contrast to a `dense::DFA`, we must create +/// a cache and pass it to our search routine. +/// +/// ``` +/// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; +/// +/// let dfa = DFA::new("foo[0-9]+")?; +/// let mut cache = dfa.create_cache(); +/// +/// let expected = Some(HalfMatch::must(0, 8)); +/// assert_eq!(expected, dfa.try_search_fwd( +/// &mut cache, &Input::new("foo12345"))?, +/// ); +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct DFA { + config: Config, + nfa: thompson::NFA, + stride2: usize, + start_map: StartByteMap, + classes: ByteClasses, + quitset: ByteSet, + cache_capacity: usize, +} + +impl DFA { + /// Parse the given regular expression using a default configuration and + /// return the corresponding lazy DFA. + /// + /// If you want a non-default configuration, then use the [`Builder`] to + /// set your own configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; + /// + /// let dfa = DFA::new("foo[0-9]+bar")?; + /// let mut cache = dfa.create_cache(); + /// + /// let expected = HalfMatch::must(0, 11); + /// assert_eq!( + /// Some(expected), + /// dfa.try_search_fwd(&mut cache, &Input::new("foo12345bar"))?, + /// ); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn new(pattern: &str) -> Result { + DFA::builder().build(pattern) + } + + /// Parse the given regular expressions using a default configuration and + /// return the corresponding lazy multi-DFA. + /// + /// If you want a non-default configuration, then use the [`Builder`] to + /// set your own configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; + /// + /// let dfa = DFA::new_many(&["[0-9]+", "[a-z]+"])?; + /// let mut cache = dfa.create_cache(); + /// + /// let expected = HalfMatch::must(1, 3); + /// assert_eq!( + /// Some(expected), + /// dfa.try_search_fwd(&mut cache, &Input::new("foo12345bar"))?, + /// ); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn new_many>(patterns: &[P]) -> Result { + DFA::builder().build_many(patterns) + } + + /// Create a new lazy DFA that matches every input. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; + /// + /// let dfa = DFA::always_match()?; + /// let mut cache = dfa.create_cache(); + /// + /// let expected = HalfMatch::must(0, 0); + /// assert_eq!(Some(expected), dfa.try_search_fwd( + /// &mut cache, &Input::new(""))?, + /// ); + /// assert_eq!(Some(expected), dfa.try_search_fwd( + /// &mut cache, &Input::new("foo"))?, + /// ); + /// # Ok::<(), Box>(()) + /// ``` + pub fn always_match() -> Result { + let nfa = thompson::NFA::always_match(); + Builder::new().build_from_nfa(nfa) + } + + /// Create a new lazy DFA that never matches any input. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{hybrid::dfa::DFA, Input}; + /// + /// let dfa = DFA::never_match()?; + /// let mut cache = dfa.create_cache(); + /// + /// assert_eq!(None, dfa.try_search_fwd(&mut cache, &Input::new(""))?); + /// assert_eq!(None, dfa.try_search_fwd(&mut cache, &Input::new("foo"))?); + /// # Ok::<(), Box>(()) + /// ``` + pub fn never_match() -> Result { + let nfa = thompson::NFA::never_match(); + Builder::new().build_from_nfa(nfa) + } + + /// Return a default configuration for a `DFA`. + /// + /// This is a convenience routine to avoid needing to import the [`Config`] + /// type when customizing the construction of a lazy DFA. + /// + /// # Example + /// + /// This example shows how to build a lazy DFA that heuristically supports + /// Unicode word boundaries. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, MatchError, Input}; + /// + /// let re = DFA::builder() + /// .configure(DFA::config().unicode_word_boundary(true)) + /// .build(r"\b\w+\b")?; + /// let mut cache = re.create_cache(); + /// + /// // Since our haystack is all ASCII, the DFA search sees then and knows + /// // it is legal to interpret Unicode word boundaries as ASCII word + /// // boundaries. + /// let input = Input::new("!!foo!!"); + /// let expected = HalfMatch::must(0, 5); + /// assert_eq!(Some(expected), re.try_search_fwd(&mut cache, &input)?); + /// + /// // But if our haystack contains non-ASCII, then the search will fail + /// // with an error. + /// let input = Input::new("!!βββ!!"); + /// let expected = MatchError::quit(b'\xCE', 2); + /// assert_eq!(Err(expected), re.try_search_fwd(&mut cache, &input)); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn config() -> Config { + Config::new() + } + + /// Return a builder for configuring the construction of a `Regex`. + /// + /// This is a convenience routine to avoid needing to import the + /// [`Builder`] type in common cases. + /// + /// # Example + /// + /// This example shows how to use the builder to disable UTF-8 mode + /// everywhere for lazy DFAs. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{hybrid::dfa::DFA, util::syntax, HalfMatch, Input}; + /// + /// let re = DFA::builder() + /// .syntax(syntax::Config::new().utf8(false)) + /// .build(r"foo(?-u:[^b])ar.*")?; + /// let mut cache = re.create_cache(); + /// + /// let input = Input::new(b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"); + /// let expected = Some(HalfMatch::must(0, 9)); + /// let got = re.try_search_fwd(&mut cache, &input)?; + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn builder() -> Builder { + Builder::new() + } + + /// Create a new cache for this lazy DFA. + /// + /// The cache returned should only be used for searches for this + /// lazy DFA. If you want to reuse the cache for another DFA, then + /// you must call [`Cache::reset`] with that DFA (or, equivalently, + /// [`DFA::reset_cache`]). + pub fn create_cache(&self) -> Cache { + Cache::new(self) + } + + /// Reset the given cache such that it can be used for searching with the + /// this lazy DFA (and only this DFA). + /// + /// A cache reset permits reusing memory already allocated in this cache + /// with a different lazy DFA. + /// + /// Resetting a cache sets its "clear count" to 0. This is relevant if the + /// lazy DFA has been configured to "give up" after it has cleared the + /// cache a certain number of times. + /// + /// Any lazy state ID generated by the cache prior to resetting it is + /// invalid after the reset. + /// + /// # Example + /// + /// This shows how to re-purpose a cache for use with a different DFA. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; + /// + /// let dfa1 = DFA::new(r"\w")?; + /// let dfa2 = DFA::new(r"\W")?; + /// + /// let mut cache = dfa1.create_cache(); + /// assert_eq!( + /// Some(HalfMatch::must(0, 2)), + /// dfa1.try_search_fwd(&mut cache, &Input::new("Δ"))?, + /// ); + /// + /// // Using 'cache' with dfa2 is not allowed. It may result in panics or + /// // incorrect results. In order to re-purpose the cache, we must reset + /// // it with the DFA we'd like to use it with. + /// // + /// // Similarly, after this reset, using the cache with 'dfa1' is also not + /// // allowed. + /// dfa2.reset_cache(&mut cache); + /// assert_eq!( + /// Some(HalfMatch::must(0, 3)), + /// dfa2.try_search_fwd(&mut cache, &Input::new("☃"))?, + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn reset_cache(&self, cache: &mut Cache) { + Lazy::new(self, cache).reset_cache() + } + + /// Returns the total number of patterns compiled into this lazy DFA. + /// + /// In the case of a DFA that contains no patterns, this returns `0`. + /// + /// # Example + /// + /// This example shows the pattern length for a DFA that never matches: + /// + /// ``` + /// use regex_automata::hybrid::dfa::DFA; + /// + /// let dfa = DFA::never_match()?; + /// assert_eq!(dfa.pattern_len(), 0); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// And another example for a DFA that matches at every position: + /// + /// ``` + /// use regex_automata::hybrid::dfa::DFA; + /// + /// let dfa = DFA::always_match()?; + /// assert_eq!(dfa.pattern_len(), 1); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// And finally, a DFA that was constructed from multiple patterns: + /// + /// ``` + /// use regex_automata::hybrid::dfa::DFA; + /// + /// let dfa = DFA::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; + /// assert_eq!(dfa.pattern_len(), 3); + /// # Ok::<(), Box>(()) + /// ``` + pub fn pattern_len(&self) -> usize { + self.nfa.pattern_len() + } + + /// Returns the equivalence classes that make up the alphabet for this DFA. + /// + /// Unless [`Config::byte_classes`] was disabled, it is possible that + /// multiple distinct bytes are grouped into the same equivalence class + /// if it is impossible for them to discriminate between a match and a + /// non-match. This has the effect of reducing the overall alphabet size + /// and in turn potentially substantially reducing the size of the DFA's + /// transition table. + /// + /// The downside of using equivalence classes like this is that every state + /// transition will automatically use this map to convert an arbitrary + /// byte to its corresponding equivalence class. In practice this has a + /// negligible impact on performance. + pub fn byte_classes(&self) -> &ByteClasses { + &self.classes + } + + /// Returns this lazy DFA's configuration. + pub fn get_config(&self) -> &Config { + &self.config + } + + /// Returns a reference to the underlying NFA. + pub fn get_nfa(&self) -> &thompson::NFA { + &self.nfa + } + + /// Returns the stride, as a base-2 exponent, required for these + /// equivalence classes. + /// + /// The stride is always the smallest power of 2 that is greater than or + /// equal to the alphabet length. This is done so that converting between + /// state IDs and indices can be done with shifts alone, which is much + /// faster than integer division. + fn stride2(&self) -> usize { + self.stride2 + } + + /// Returns the total stride for every state in this lazy DFA. This + /// corresponds to the total number of transitions used by each state in + /// this DFA's transition table. + fn stride(&self) -> usize { + 1 << self.stride2() + } + + /// Returns the memory usage, in bytes, of this lazy DFA. + /// + /// This does **not** include the stack size used up by this lazy DFA. To + /// compute that, use `std::mem::size_of::()`. This also does not + /// include the size of the `Cache` used. + /// + /// This also does not include any heap memory used by the NFA inside of + /// this hybrid NFA/DFA. This is because the NFA's ownership is shared, and + /// thus not owned by this hybrid NFA/DFA. More practically, several regex + /// engines in this crate embed an NFA, and reporting the NFA's memory + /// usage in all of them would likely result in reporting higher heap + /// memory than is actually used. + pub fn memory_usage(&self) -> usize { + // The only thing that uses heap memory in a DFA is the NFA. But the + // NFA has shared ownership, so reporting its memory as part of the + // hybrid DFA is likely to lead to double-counting the NFA memory + // somehow. In particular, this DFA does not really own an NFA, so + // including it in the DFA's memory usage doesn't seem semantically + // correct. + 0 + } +} + +impl DFA { + /// Executes a forward search and returns the end position of the leftmost + /// match that is found. If no match exists, then `None` is returned. + /// + /// In particular, this method continues searching even after it enters + /// a match state. The search only terminates once it has reached the + /// end of the input or when it has entered a dead or quit state. Upon + /// termination, the position of the last byte seen while still in a match + /// state is returned. + /// + /// # Errors + /// + /// This routine errors if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the lazy DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the lazy DFA quitting. + /// * The configuration of the lazy DFA may also permit it to "give up" + /// on a search if it makes ineffective use of its transition table + /// cache. The default configuration does not enable this by default, + /// although it is typically a good idea to. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search returns an error, callers cannot know whether a match + /// exists or not. + /// + /// # Example + /// + /// This example shows how to run a basic search. + /// + /// ``` + /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; + /// + /// let dfa = DFA::new("foo[0-9]+")?; + /// let mut cache = dfa.create_cache(); + /// let expected = HalfMatch::must(0, 8); + /// assert_eq!(Some(expected), dfa.try_search_fwd( + /// &mut cache, &Input::new("foo12345"))?, + /// ); + /// + /// // Even though a match is found after reading the first byte (`a`), + /// // the leftmost first match semantics demand that we find the earliest + /// // match that prefers earlier parts of the pattern over later parts. + /// let dfa = DFA::new("abc|a")?; + /// let mut cache = dfa.create_cache(); + /// let expected = HalfMatch::must(0, 3); + /// assert_eq!(Some(expected), dfa.try_search_fwd( + /// &mut cache, &Input::new("abc"))?, + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: specific pattern search + /// + /// This example shows how to build a lazy multi-DFA that permits searching + /// for specific patterns. + /// + /// ``` + /// use regex_automata::{ + /// hybrid::dfa::DFA, + /// Anchored, HalfMatch, PatternID, Input, + /// }; + /// + /// let dfa = DFA::builder() + /// .configure(DFA::config().starts_for_each_pattern(true)) + /// .build_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?; + /// let mut cache = dfa.create_cache(); + /// let haystack = "foo123"; + /// + /// // Since we are using the default leftmost-first match and both + /// // patterns match at the same starting position, only the first pattern + /// // will be returned in this case when doing a search for any of the + /// // patterns. + /// let expected = Some(HalfMatch::must(0, 6)); + /// let got = dfa.try_search_fwd(&mut cache, &Input::new(haystack))?; + /// assert_eq!(expected, got); + /// + /// // But if we want to check whether some other pattern matches, then we + /// // can provide its pattern ID. + /// let expected = Some(HalfMatch::must(1, 6)); + /// let input = Input::new(haystack) + /// .anchored(Anchored::Pattern(PatternID::must(1))); + /// let got = dfa.try_search_fwd(&mut cache, &input)?; + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: specifying the bounds of a search + /// + /// This example shows how providing the bounds of a search can produce + /// different results than simply sub-slicing the haystack. + /// + /// ``` + /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; + /// + /// // N.B. We disable Unicode here so that we use a simple ASCII word + /// // boundary. Alternatively, we could enable heuristic support for + /// // Unicode word boundaries since our haystack is pure ASCII. + /// let dfa = DFA::new(r"(?-u)\b[0-9]{3}\b")?; + /// let mut cache = dfa.create_cache(); + /// let haystack = "foo123bar"; + /// + /// // Since we sub-slice the haystack, the search doesn't know about the + /// // larger context and assumes that `123` is surrounded by word + /// // boundaries. And of course, the match position is reported relative + /// // to the sub-slice as well, which means we get `3` instead of `6`. + /// let expected = Some(HalfMatch::must(0, 3)); + /// let got = dfa.try_search_fwd( + /// &mut cache, + /// &Input::new(&haystack[3..6]), + /// )?; + /// assert_eq!(expected, got); + /// + /// // But if we provide the bounds of the search within the context of the + /// // entire haystack, then the search can take the surrounding context + /// // into account. (And if we did find a match, it would be reported + /// // as a valid offset into `haystack` instead of its sub-slice.) + /// let expected = None; + /// let got = dfa.try_search_fwd( + /// &mut cache, + /// &Input::new(haystack).range(3..6), + /// )?; + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn try_search_fwd( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Result, MatchError> { + let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); + let hm = match search::find_fwd(self, cache, input)? { + None => return Ok(None), + Some(hm) if !utf8empty => return Ok(Some(hm)), + Some(hm) => hm, + }; + // We get to this point when we know our DFA can match the empty string + // AND when UTF-8 mode is enabled. In this case, we skip any matches + // whose offset splits a codepoint. Such a match is necessarily a + // zero-width match, because UTF-8 mode requires the underlying NFA + // to be built such that all non-empty matches span valid UTF-8. + // Therefore, any match that ends in the middle of a codepoint cannot + // be part of a span of valid UTF-8 and thus must be an empty match. + // In such cases, we skip it, so as not to report matches that split a + // codepoint. + // + // Note that this is not a checked assumption. Callers *can* provide an + // NFA with UTF-8 mode enabled but produces non-empty matches that span + // invalid UTF-8. But doing so is documented to result in unspecified + // behavior. + empty::skip_splits_fwd(input, hm, hm.offset(), |input| { + let got = search::find_fwd(self, cache, input)?; + Ok(got.map(|hm| (hm, hm.offset()))) + }) + } + + /// Executes a reverse search and returns the start of the position of the + /// leftmost match that is found. If no match exists, then `None` is + /// returned. + /// + /// # Errors + /// + /// This routine errors if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the lazy DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the lazy DFA quitting. + /// * The configuration of the lazy DFA may also permit it to "give up" + /// on a search if it makes ineffective use of its transition table + /// cache. The default configuration does not enable this by default, + /// although it is typically a good idea to. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search returns an error, callers cannot know whether a match + /// exists or not. + /// + /// # Example + /// + /// This routine is principally useful when used in + /// conjunction with the + /// [`nfa::thompson::Config::reverse`](crate::nfa::thompson::Config::reverse) + /// configuration. In general, it's unlikely to be correct to use both + /// `try_search_fwd` and `try_search_rev` with the same DFA since any + /// particular DFA will only support searching in one direction with + /// respect to the pattern. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson, + /// hybrid::dfa::DFA, + /// HalfMatch, Input, + /// }; + /// + /// let dfa = DFA::builder() + /// .thompson(thompson::Config::new().reverse(true)) + /// .build("foo[0-9]+")?; + /// let mut cache = dfa.create_cache(); + /// let expected = HalfMatch::must(0, 0); + /// assert_eq!( + /// Some(expected), + /// dfa.try_search_rev(&mut cache, &Input::new("foo12345"))?, + /// ); + /// + /// // Even though a match is found after reading the last byte (`c`), + /// // the leftmost first match semantics demand that we find the earliest + /// // match that prefers earlier parts of the pattern over latter parts. + /// let dfa = DFA::builder() + /// .thompson(thompson::Config::new().reverse(true)) + /// .build("abc|c")?; + /// let mut cache = dfa.create_cache(); + /// let expected = HalfMatch::must(0, 0); + /// assert_eq!(Some(expected), dfa.try_search_rev( + /// &mut cache, &Input::new("abc"))?, + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: UTF-8 mode + /// + /// This examples demonstrates that UTF-8 mode applies to reverse + /// DFAs. When UTF-8 mode is enabled in the underlying NFA, then all + /// matches reported must correspond to valid UTF-8 spans. This includes + /// prohibiting zero-width matches that split a codepoint. + /// + /// UTF-8 mode is enabled by default. Notice below how the only zero-width + /// matches reported are those at UTF-8 boundaries: + /// + /// ``` + /// use regex_automata::{ + /// hybrid::dfa::DFA, + /// nfa::thompson, + /// HalfMatch, Input, MatchKind, + /// }; + /// + /// let dfa = DFA::builder() + /// .thompson(thompson::Config::new().reverse(true)) + /// .build(r"")?; + /// let mut cache = dfa.create_cache(); + /// + /// // Run the reverse DFA to collect all matches. + /// let mut input = Input::new("☃"); + /// let mut matches = vec![]; + /// loop { + /// match dfa.try_search_rev(&mut cache, &input)? { + /// None => break, + /// Some(hm) => { + /// matches.push(hm); + /// if hm.offset() == 0 || input.end() == 0 { + /// break; + /// } else if hm.offset() < input.end() { + /// input.set_end(hm.offset()); + /// } else { + /// // This is only necessary to handle zero-width + /// // matches, which of course occur in this example. + /// // Without this, the search would never advance + /// // backwards beyond the initial match. + /// input.set_end(input.end() - 1); + /// } + /// } + /// } + /// } + /// + /// // No matches split a codepoint. + /// let expected = vec![ + /// HalfMatch::must(0, 3), + /// HalfMatch::must(0, 0), + /// ]; + /// assert_eq!(expected, matches); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Now let's look at the same example, but with UTF-8 mode on the + /// underlying NFA disabled: + /// + /// ``` + /// use regex_automata::{ + /// hybrid::dfa::DFA, + /// nfa::thompson, + /// HalfMatch, Input, MatchKind, + /// }; + /// + /// let dfa = DFA::builder() + /// .thompson(thompson::Config::new().reverse(true).utf8(false)) + /// .build(r"")?; + /// let mut cache = dfa.create_cache(); + /// + /// // Run the reverse DFA to collect all matches. + /// let mut input = Input::new("☃"); + /// let mut matches = vec![]; + /// loop { + /// match dfa.try_search_rev(&mut cache, &input)? { + /// None => break, + /// Some(hm) => { + /// matches.push(hm); + /// if hm.offset() == 0 || input.end() == 0 { + /// break; + /// } else if hm.offset() < input.end() { + /// input.set_end(hm.offset()); + /// } else { + /// // This is only necessary to handle zero-width + /// // matches, which of course occur in this example. + /// // Without this, the search would never advance + /// // backwards beyond the initial match. + /// input.set_end(input.end() - 1); + /// } + /// } + /// } + /// } + /// + /// // No matches split a codepoint. + /// let expected = vec![ + /// HalfMatch::must(0, 3), + /// HalfMatch::must(0, 2), + /// HalfMatch::must(0, 1), + /// HalfMatch::must(0, 0), + /// ]; + /// assert_eq!(expected, matches); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn try_search_rev( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Result, MatchError> { + let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); + let hm = match search::find_rev(self, cache, input)? { + None => return Ok(None), + Some(hm) if !utf8empty => return Ok(Some(hm)), + Some(hm) => hm, + }; + empty::skip_splits_rev(input, hm, hm.offset(), |input| { + let got = search::find_rev(self, cache, input)?; + Ok(got.map(|hm| (hm, hm.offset()))) + }) + } + + /// Executes an overlapping forward search and returns the end position of + /// matches as they are found. If no match exists, then `None` is returned. + /// + /// This routine is principally only useful when searching for multiple + /// patterns on inputs where multiple patterns may match the same regions + /// of text. In particular, callers must preserve the automaton's search + /// state from prior calls so that the implementation knows where the last + /// match occurred. + /// + /// When using this routine to implement an iterator of overlapping + /// matches, the `start` of the search should remain invariant throughout + /// iteration. The `OverlappingState` given to the search will keep track + /// of the current position of the search. (This is because multiple + /// matches may be reported at the same position, so only the search + /// implementation itself knows when to advance the position.) + /// + /// If for some reason you want the search to forget about its previous + /// state and restart the search at a particular position, then setting the + /// state to [`OverlappingState::start`] will accomplish that. + /// + /// # Errors + /// + /// This routine errors if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the lazy DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the lazy DFA quitting. + /// * The configuration of the lazy DFA may also permit it to "give up" + /// on a search if it makes ineffective use of its transition table + /// cache. The default configuration does not enable this by default, + /// although it is typically a good idea to. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search returns an error, callers cannot know whether a match + /// exists or not. + /// + /// # Example + /// + /// This example shows how to run a basic overlapping search. Notice + /// that we build the automaton with a `MatchKind::All` configuration. + /// Overlapping searches are unlikely to work as one would expect when + /// using the default `MatchKind::LeftmostFirst` match semantics, since + /// leftmost-first matching is fundamentally incompatible with overlapping + /// searches. Namely, overlapping searches need to report matches as they + /// are seen, where as leftmost-first searches will continue searching even + /// after a match has been observed in order to find the conventional end + /// position of the match. More concretely, leftmost-first searches use + /// dead states to terminate a search after a specific match can no longer + /// be extended. Overlapping searches instead do the opposite by continuing + /// the search to find totally new matches (potentially of other patterns). + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// hybrid::dfa::{DFA, OverlappingState}, + /// HalfMatch, Input, MatchKind, + /// }; + /// + /// let dfa = DFA::builder() + /// .configure(DFA::config().match_kind(MatchKind::All)) + /// .build_many(&[r"\w+$", r"\S+$"])?; + /// let mut cache = dfa.create_cache(); + /// + /// let haystack = "@foo"; + /// let mut state = OverlappingState::start(); + /// + /// let expected = Some(HalfMatch::must(1, 4)); + /// dfa.try_search_overlapping_fwd( + /// &mut cache, &Input::new(haystack), &mut state, + /// )?; + /// assert_eq!(expected, state.get_match()); + /// + /// // The first pattern also matches at the same position, so re-running + /// // the search will yield another match. Notice also that the first + /// // pattern is returned after the second. This is because the second + /// // pattern begins its match before the first, is therefore an earlier + /// // match and is thus reported first. + /// let expected = Some(HalfMatch::must(0, 4)); + /// dfa.try_search_overlapping_fwd( + /// &mut cache, &Input::new(haystack), &mut state, + /// )?; + /// assert_eq!(expected, state.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn try_search_overlapping_fwd( + &self, + cache: &mut Cache, + input: &Input<'_>, + state: &mut OverlappingState, + ) -> Result<(), MatchError> { + let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); + search::find_overlapping_fwd(self, cache, input, state)?; + match state.get_match() { + None => Ok(()), + Some(_) if !utf8empty => Ok(()), + Some(_) => skip_empty_utf8_splits_overlapping( + input, + state, + |input, state| { + search::find_overlapping_fwd(self, cache, input, state) + }, + ), + } + } + + /// Executes a reverse overlapping search and returns the start of the + /// position of the leftmost match that is found. If no match exists, then + /// `None` is returned. + /// + /// When using this routine to implement an iterator of overlapping + /// matches, the `start` of the search should remain invariant throughout + /// iteration. The `OverlappingState` given to the search will keep track + /// of the current position of the search. (This is because multiple + /// matches may be reported at the same position, so only the search + /// implementation itself knows when to advance the position.) + /// + /// If for some reason you want the search to forget about its previous + /// state and restart the search at a particular position, then setting the + /// state to [`OverlappingState::start`] will accomplish that. + /// + /// # Errors + /// + /// This routine errors if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the lazy DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the lazy DFA quitting. + /// * The configuration of the lazy DFA may also permit it to "give up" + /// on a search if it makes ineffective use of its transition table + /// cache. The default configuration does not enable this by default, + /// although it is typically a good idea to. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search returns an error, callers cannot know whether a match + /// exists or not. + /// + /// # Example: UTF-8 mode + /// + /// This examples demonstrates that UTF-8 mode applies to reverse + /// DFAs. When UTF-8 mode is enabled in the underlying NFA, then all + /// matches reported must correspond to valid UTF-8 spans. This includes + /// prohibiting zero-width matches that split a codepoint. + /// + /// UTF-8 mode is enabled by default. Notice below how the only zero-width + /// matches reported are those at UTF-8 boundaries: + /// + /// ``` + /// use regex_automata::{ + /// hybrid::dfa::{DFA, OverlappingState}, + /// nfa::thompson, + /// HalfMatch, Input, MatchKind, + /// }; + /// + /// let dfa = DFA::builder() + /// .configure(DFA::config().match_kind(MatchKind::All)) + /// .thompson(thompson::Config::new().reverse(true)) + /// .build_many(&[r"", r"☃"])?; + /// let mut cache = dfa.create_cache(); + /// + /// // Run the reverse DFA to collect all matches. + /// let input = Input::new("☃"); + /// let mut state = OverlappingState::start(); + /// let mut matches = vec![]; + /// loop { + /// dfa.try_search_overlapping_rev(&mut cache, &input, &mut state)?; + /// match state.get_match() { + /// None => break, + /// Some(hm) => matches.push(hm), + /// } + /// } + /// + /// // No matches split a codepoint. + /// let expected = vec![ + /// HalfMatch::must(0, 3), + /// HalfMatch::must(1, 0), + /// HalfMatch::must(0, 0), + /// ]; + /// assert_eq!(expected, matches); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Now let's look at the same example, but with UTF-8 mode on the + /// underlying NFA disabled: + /// + /// ``` + /// use regex_automata::{ + /// hybrid::dfa::{DFA, OverlappingState}, + /// nfa::thompson, + /// HalfMatch, Input, MatchKind, + /// }; + /// + /// let dfa = DFA::builder() + /// .configure(DFA::config().match_kind(MatchKind::All)) + /// .thompson(thompson::Config::new().reverse(true).utf8(false)) + /// .build_many(&[r"", r"☃"])?; + /// let mut cache = dfa.create_cache(); + /// + /// // Run the reverse DFA to collect all matches. + /// let input = Input::new("☃"); + /// let mut state = OverlappingState::start(); + /// let mut matches = vec![]; + /// loop { + /// dfa.try_search_overlapping_rev(&mut cache, &input, &mut state)?; + /// match state.get_match() { + /// None => break, + /// Some(hm) => matches.push(hm), + /// } + /// } + /// + /// // Now *all* positions match, even within a codepoint, + /// // because we lifted the requirement that matches + /// // correspond to valid UTF-8 spans. + /// let expected = vec![ + /// HalfMatch::must(0, 3), + /// HalfMatch::must(0, 2), + /// HalfMatch::must(0, 1), + /// HalfMatch::must(1, 0), + /// HalfMatch::must(0, 0), + /// ]; + /// assert_eq!(expected, matches); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn try_search_overlapping_rev( + &self, + cache: &mut Cache, + input: &Input<'_>, + state: &mut OverlappingState, + ) -> Result<(), MatchError> { + let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); + search::find_overlapping_rev(self, cache, input, state)?; + match state.get_match() { + None => Ok(()), + Some(_) if !utf8empty => Ok(()), + Some(_) => skip_empty_utf8_splits_overlapping( + input, + state, + |input, state| { + search::find_overlapping_rev(self, cache, input, state) + }, + ), + } + } + + /// Writes the set of patterns that match anywhere in the given search + /// configuration to `patset`. If multiple patterns match at the same + /// position and the underlying DFA supports overlapping matches, then all + /// matching patterns are written to the given set. + /// + /// Unless all of the patterns in this DFA are anchored, then generally + /// speaking, this will visit every byte in the haystack. + /// + /// This search routine *does not* clear the pattern set. This gives some + /// flexibility to the caller (e.g., running multiple searches with the + /// same pattern set), but does make the API bug-prone if you're reusing + /// the same pattern set for multiple searches but intended them to be + /// independent. + /// + /// If a pattern ID matched but the given `PatternSet` does not have + /// sufficient capacity to store it, then it is not inserted and silently + /// dropped. + /// + /// # Errors + /// + /// This routine errors if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the lazy DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the lazy DFA quitting. + /// * The configuration of the lazy DFA may also permit it to "give up" + /// on a search if it makes ineffective use of its transition table + /// cache. The default configuration does not enable this by default, + /// although it is typically a good idea to. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search returns an error, callers cannot know whether a match + /// exists or not. + /// + /// # Example + /// + /// This example shows how to find all matching patterns in a haystack, + /// even when some patterns match at the same position as other patterns. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// hybrid::dfa::DFA, + /// Input, MatchKind, PatternSet, + /// }; + /// + /// let patterns = &[ + /// r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar", + /// ]; + /// let dfa = DFA::builder() + /// .configure(DFA::config().match_kind(MatchKind::All)) + /// .build_many(patterns)?; + /// let mut cache = dfa.create_cache(); + /// + /// let input = Input::new("foobar"); + /// let mut patset = PatternSet::new(dfa.pattern_len()); + /// dfa.try_which_overlapping_matches(&mut cache, &input, &mut patset)?; + /// let expected = vec![0, 2, 3, 4, 6]; + /// let got: Vec = patset.iter().map(|p| p.as_usize()).collect(); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn try_which_overlapping_matches( + &self, + cache: &mut Cache, + input: &Input<'_>, + patset: &mut PatternSet, + ) -> Result<(), MatchError> { + let mut state = OverlappingState::start(); + while let Some(m) = { + self.try_search_overlapping_fwd(cache, input, &mut state)?; + state.get_match() + } { + let _ = patset.try_insert(m.pattern()); + // There's nothing left to find, so we can stop. Or the caller + // asked us to. + if patset.is_full() || input.get_earliest() { + break; + } + } + Ok(()) + } +} + +impl DFA { + /// Transitions from the current state to the next state, given the next + /// byte of input. + /// + /// The given cache is used to either reuse pre-computed state + /// transitions, or to store this newly computed transition for future + /// reuse. Thus, this routine guarantees that it will never return a state + /// ID that has an "unknown" tag. + /// + /// # State identifier validity + /// + /// The only valid value for `current` is the lazy state ID returned + /// by the most recent call to `next_state`, `next_state_untagged`, + /// `next_state_untagged_unchecked`, `start_state_forward` or + /// `state_state_reverse` for the given `cache`. Any state ID returned from + /// prior calls to these routines (with the same `cache`) is considered + /// invalid (even if it gives an appearance of working). State IDs returned + /// from _any_ prior call for different `cache` values are also always + /// invalid. + /// + /// The returned ID is always a valid ID when `current` refers to a valid + /// ID. Moreover, this routine is defined for all possible values of + /// `input`. + /// + /// These validity rules are not checked, even in debug mode. Callers are + /// required to uphold these rules themselves. + /// + /// Violating these state ID validity rules will not sacrifice memory + /// safety, but _may_ produce an incorrect result or a panic. + /// + /// # Panics + /// + /// If the given ID does not refer to a valid state, then this routine + /// may panic but it also may not panic and instead return an invalid or + /// incorrect ID. + /// + /// # Example + /// + /// This shows a simplistic example for walking a lazy DFA for a given + /// haystack by using the `next_state` method. + /// + /// ``` + /// use regex_automata::{hybrid::dfa::DFA, Input}; + /// + /// let dfa = DFA::new(r"[a-z]+r")?; + /// let mut cache = dfa.create_cache(); + /// let haystack = "bar".as_bytes(); + /// + /// // The start state is determined by inspecting the position and the + /// // initial bytes of the haystack. + /// let mut sid = dfa.start_state_forward( + /// &mut cache, &Input::new(haystack), + /// )?; + /// // Walk all the bytes in the haystack. + /// for &b in haystack { + /// sid = dfa.next_state(&mut cache, sid, b)?; + /// } + /// // Matches are always delayed by 1 byte, so we must explicitly walk the + /// // special "EOI" transition at the end of the search. + /// sid = dfa.next_eoi_state(&mut cache, sid)?; + /// assert!(sid.is_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn next_state( + &self, + cache: &mut Cache, + current: LazyStateID, + input: u8, + ) -> Result { + let class = usize::from(self.classes.get(input)); + let offset = current.as_usize_untagged() + class; + let sid = cache.trans[offset]; + if !sid.is_unknown() { + return Ok(sid); + } + let unit = alphabet::Unit::u8(input); + Lazy::new(self, cache).cache_next_state(current, unit) + } + + /// Transitions from the current state to the next state, given the next + /// byte of input and a state ID that is not tagged. + /// + /// The only reason to use this routine is performance. In particular, the + /// `next_state` method needs to do some additional checks, among them is + /// to account for identifiers to states that are not yet computed. In + /// such a case, the transition is computed on the fly. However, if it is + /// known that the `current` state ID is untagged, then these checks can be + /// omitted. + /// + /// Since this routine does not compute states on the fly, it does not + /// modify the cache and thus cannot return an error. Consequently, `cache` + /// does not need to be mutable and it is possible for this routine to + /// return a state ID corresponding to the special "unknown" state. In + /// this case, it is the caller's responsibility to use the prior state + /// ID and `input` with `next_state` in order to force the computation of + /// the unknown transition. Otherwise, trying to use the "unknown" state + /// ID will just result in transitioning back to itself, and thus never + /// terminating. (This is technically a special exemption to the state ID + /// validity rules, but is permissible since this routine is guaranteed to + /// never mutate the given `cache`, and thus the identifier is guaranteed + /// to remain valid.) + /// + /// See [`LazyStateID`] for more details on what it means for a state ID + /// to be tagged. Also, see + /// [`next_state_untagged_unchecked`](DFA::next_state_untagged_unchecked) + /// for this same idea, but with bounds checks forcefully elided. + /// + /// # State identifier validity + /// + /// The only valid value for `current` is an **untagged** lazy + /// state ID returned by the most recent call to `next_state`, + /// `next_state_untagged`, `next_state_untagged_unchecked`, + /// `start_state_forward` or `state_state_reverse` for the given `cache`. + /// Any state ID returned from prior calls to these routines (with the + /// same `cache`) is considered invalid (even if it gives an appearance + /// of working). State IDs returned from _any_ prior call for different + /// `cache` values are also always invalid. + /// + /// The returned ID is always a valid ID when `current` refers to a valid + /// ID, although it may be tagged. Moreover, this routine is defined for + /// all possible values of `input`. + /// + /// Not all validity rules are checked, even in debug mode. Callers are + /// required to uphold these rules themselves. + /// + /// Violating these state ID validity rules will not sacrifice memory + /// safety, but _may_ produce an incorrect result or a panic. + /// + /// # Panics + /// + /// If the given ID does not refer to a valid state, then this routine + /// may panic but it also may not panic and instead return an invalid or + /// incorrect ID. + /// + /// # Example + /// + /// This shows a simplistic example for walking a lazy DFA for a given + /// haystack by using the `next_state_untagged` method where possible. + /// + /// ``` + /// use regex_automata::{hybrid::dfa::DFA, Input}; + /// + /// let dfa = DFA::new(r"[a-z]+r")?; + /// let mut cache = dfa.create_cache(); + /// let haystack = "bar".as_bytes(); + /// + /// // The start state is determined by inspecting the position and the + /// // initial bytes of the haystack. + /// let mut sid = dfa.start_state_forward( + /// &mut cache, &Input::new(haystack), + /// )?; + /// // Walk all the bytes in the haystack. + /// let mut at = 0; + /// while at < haystack.len() { + /// if sid.is_tagged() { + /// sid = dfa.next_state(&mut cache, sid, haystack[at])?; + /// } else { + /// let mut prev_sid = sid; + /// // We attempt to chew through as much as we can while moving + /// // through untagged state IDs. Thus, the transition function + /// // does less work on average per byte. (Unrolling this loop + /// // may help even more.) + /// while at < haystack.len() { + /// prev_sid = sid; + /// sid = dfa.next_state_untagged( + /// &mut cache, sid, haystack[at], + /// ); + /// at += 1; + /// if sid.is_tagged() { + /// break; + /// } + /// } + /// // We must ensure that we never proceed to the next iteration + /// // with an unknown state ID. If we don't account for this + /// // case, then search isn't guaranteed to terminate since all + /// // transitions on unknown states loop back to itself. + /// if sid.is_unknown() { + /// sid = dfa.next_state( + /// &mut cache, prev_sid, haystack[at - 1], + /// )?; + /// } + /// } + /// } + /// // Matches are always delayed by 1 byte, so we must explicitly walk the + /// // special "EOI" transition at the end of the search. + /// sid = dfa.next_eoi_state(&mut cache, sid)?; + /// assert!(sid.is_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn next_state_untagged( + &self, + cache: &Cache, + current: LazyStateID, + input: u8, + ) -> LazyStateID { + debug_assert!(!current.is_tagged()); + let class = usize::from(self.classes.get(input)); + let offset = current.as_usize_unchecked() + class; + cache.trans[offset] + } + + /// Transitions from the current state to the next state, eliding bounds + /// checks, given the next byte of input and a state ID that is not tagged. + /// + /// The only reason to use this routine is performance. In particular, the + /// `next_state` method needs to do some additional checks, among them is + /// to account for identifiers to states that are not yet computed. In + /// such a case, the transition is computed on the fly. However, if it is + /// known that the `current` state ID is untagged, then these checks can be + /// omitted. + /// + /// Since this routine does not compute states on the fly, it does not + /// modify the cache and thus cannot return an error. Consequently, `cache` + /// does not need to be mutable and it is possible for this routine to + /// return a state ID corresponding to the special "unknown" state. In + /// this case, it is the caller's responsibility to use the prior state + /// ID and `input` with `next_state` in order to force the computation of + /// the unknown transition. Otherwise, trying to use the "unknown" state + /// ID will just result in transitioning back to itself, and thus never + /// terminating. (This is technically a special exemption to the state ID + /// validity rules, but is permissible since this routine is guaranteed to + /// never mutate the given `cache`, and thus the identifier is guaranteed + /// to remain valid.) + /// + /// See [`LazyStateID`] for more details on what it means for a state ID + /// to be tagged. Also, see + /// [`next_state_untagged`](DFA::next_state_untagged) + /// for this same idea, but with memory safety guaranteed by retaining + /// bounds checks. + /// + /// # State identifier validity + /// + /// The only valid value for `current` is an **untagged** lazy + /// state ID returned by the most recent call to `next_state`, + /// `next_state_untagged`, `next_state_untagged_unchecked`, + /// `start_state_forward` or `state_state_reverse` for the given `cache`. + /// Any state ID returned from prior calls to these routines (with the + /// same `cache`) is considered invalid (even if it gives an appearance + /// of working). State IDs returned from _any_ prior call for different + /// `cache` values are also always invalid. + /// + /// The returned ID is always a valid ID when `current` refers to a valid + /// ID, although it may be tagged. Moreover, this routine is defined for + /// all possible values of `input`. + /// + /// Not all validity rules are checked, even in debug mode. Callers are + /// required to uphold these rules themselves. + /// + /// Violating these state ID validity rules will not sacrifice memory + /// safety, but _may_ produce an incorrect result or a panic. + /// + /// # Safety + /// + /// Callers of this method must guarantee that `current` refers to a valid + /// state ID according to the rules described above. If `current` is not a + /// valid state ID for this automaton, then calling this routine may result + /// in undefined behavior. + /// + /// If `current` is valid, then the ID returned is valid for all possible + /// values of `input`. + #[inline] + pub unsafe fn next_state_untagged_unchecked( + &self, + cache: &Cache, + current: LazyStateID, + input: u8, + ) -> LazyStateID { + debug_assert!(!current.is_tagged()); + let class = usize::from(self.classes.get(input)); + let offset = current.as_usize_unchecked() + class; + *cache.trans.get_unchecked(offset) + } + + /// Transitions from the current state to the next state for the special + /// EOI symbol. + /// + /// The given cache is used to either reuse pre-computed state + /// transitions, or to store this newly computed transition for future + /// reuse. Thus, this routine guarantees that it will never return a state + /// ID that has an "unknown" tag. + /// + /// This routine must be called at the end of every search in a correct + /// implementation of search. Namely, lazy DFAs in this crate delay matches + /// by one byte in order to support look-around operators. Thus, after + /// reaching the end of a haystack, a search implementation must follow one + /// last EOI transition. + /// + /// It is best to think of EOI as an additional symbol in the alphabet of a + /// DFA that is distinct from every other symbol. That is, the alphabet of + /// lazy DFAs in this crate has a logical size of 257 instead of 256, where + /// 256 corresponds to every possible inhabitant of `u8`. (In practice, the + /// physical alphabet size may be smaller because of alphabet compression + /// via equivalence classes, but EOI is always represented somehow in the + /// alphabet.) + /// + /// # State identifier validity + /// + /// The only valid value for `current` is the lazy state ID returned + /// by the most recent call to `next_state`, `next_state_untagged`, + /// `next_state_untagged_unchecked`, `start_state_forward` or + /// `state_state_reverse` for the given `cache`. Any state ID returned from + /// prior calls to these routines (with the same `cache`) is considered + /// invalid (even if it gives an appearance of working). State IDs returned + /// from _any_ prior call for different `cache` values are also always + /// invalid. + /// + /// The returned ID is always a valid ID when `current` refers to a valid + /// ID. + /// + /// These validity rules are not checked, even in debug mode. Callers are + /// required to uphold these rules themselves. + /// + /// Violating these state ID validity rules will not sacrifice memory + /// safety, but _may_ produce an incorrect result or a panic. + /// + /// # Panics + /// + /// If the given ID does not refer to a valid state, then this routine + /// may panic but it also may not panic and instead return an invalid or + /// incorrect ID. + /// + /// # Example + /// + /// This shows a simplistic example for walking a DFA for a given haystack, + /// and then finishing the search with the final EOI transition. + /// + /// ``` + /// use regex_automata::{hybrid::dfa::DFA, Input}; + /// + /// let dfa = DFA::new(r"[a-z]+r")?; + /// let mut cache = dfa.create_cache(); + /// let haystack = "bar".as_bytes(); + /// + /// // The start state is determined by inspecting the position and the + /// // initial bytes of the haystack. + /// let mut sid = dfa.start_state_forward( + /// &mut cache, &Input::new(haystack), + /// )?; + /// // Walk all the bytes in the haystack. + /// for &b in haystack { + /// sid = dfa.next_state(&mut cache, sid, b)?; + /// } + /// // Matches are always delayed by 1 byte, so we must explicitly walk + /// // the special "EOI" transition at the end of the search. Without this + /// // final transition, the assert below will fail since the DFA will not + /// // have entered a match state yet! + /// sid = dfa.next_eoi_state(&mut cache, sid)?; + /// assert!(sid.is_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn next_eoi_state( + &self, + cache: &mut Cache, + current: LazyStateID, + ) -> Result { + let eoi = self.classes.eoi().as_usize(); + let offset = current.as_usize_untagged() + eoi; + let sid = cache.trans[offset]; + if !sid.is_unknown() { + return Ok(sid); + } + let unit = self.classes.eoi(); + Lazy::new(self, cache).cache_next_state(current, unit) + } + + /// Return the ID of the start state for this lazy DFA for the given + /// starting configuration. + /// + /// Unlike typical DFA implementations, the start state for DFAs in this + /// crate is dependent on a few different factors: + /// + /// * The [`Anchored`] mode of the search. Unanchored, anchored and + /// anchored searches for a specific [`PatternID`] all use different start + /// states. + /// * Whether a "look-behind" byte exists. For example, the `^` anchor + /// matches if and only if there is no look-behind byte. + /// * The specific value of that look-behind byte. For example, a `(?m:^)` + /// assertion only matches when there is either no look-behind byte, or + /// when the look-behind byte is a line terminator. + /// + /// The [starting configuration](start::Config) provides the above + /// information. + /// + /// This routine can be used for either forward or reverse searches. + /// Although, as a convenience, if you have an [`Input`], then it + /// may be more succinct to use [`DFA::start_state_forward`] or + /// [`DFA::start_state_reverse`]. Note, for example, that the convenience + /// routines return a [`MatchError`] on failure where as this routine + /// returns a [`StartError`]. + /// + /// # Errors + /// + /// This may return a [`StartError`] if the search needs to give up when + /// determining the start state (for example, if it sees a "quit" byte + /// or if the cache has become inefficient). This can also return an + /// error if the given configuration contains an unsupported [`Anchored`] + /// configuration. + #[cfg_attr(feature = "perf-inline", inline(always))] + pub fn start_state( + &self, + cache: &mut Cache, + config: &start::Config, + ) -> Result { + let lazy = LazyRef::new(self, cache); + let anchored = config.get_anchored(); + let start = match config.get_look_behind() { + None => Start::Text, + Some(byte) => { + if !self.quitset.is_empty() && self.quitset.contains(byte) { + return Err(StartError::quit(byte)); + } + self.start_map.get(byte) + } + }; + let start_id = lazy.get_cached_start_id(anchored, start)?; + if !start_id.is_unknown() { + return Ok(start_id); + } + Lazy::new(self, cache).cache_start_group(anchored, start) + } + + /// Return the ID of the start state for this lazy DFA when executing a + /// forward search. + /// + /// This is a convenience routine for calling [`DFA::start_state`] that + /// converts the given [`Input`] to a [start configuration](start::Config). + /// Additionally, if an error occurs, it is converted from a [`StartError`] + /// to a [`MatchError`] using the offset information in the given + /// [`Input`]. + /// + /// # Errors + /// + /// This may return a [`MatchError`] if the search needs to give up when + /// determining the start state (for example, if it sees a "quit" byte or + /// if the cache has become inefficient). This can also return an error if + /// the given `Input` contains an unsupported [`Anchored`] configuration. + #[cfg_attr(feature = "perf-inline", inline(always))] + pub fn start_state_forward( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Result { + let config = start::Config::from_input_forward(input); + self.start_state(cache, &config).map_err(|err| match err { + StartError::Cache { .. } => MatchError::gave_up(input.start()), + StartError::Quit { byte } => { + let offset = input + .start() + .checked_sub(1) + .expect("no quit in start without look-behind"); + MatchError::quit(byte, offset) + } + StartError::UnsupportedAnchored { mode } => { + MatchError::unsupported_anchored(mode) + } + }) + } + + /// Return the ID of the start state for this lazy DFA when executing a + /// reverse search. + /// + /// This is a convenience routine for calling [`DFA::start_state`] that + /// converts the given [`Input`] to a [start configuration](start::Config). + /// Additionally, if an error occurs, it is converted from a [`StartError`] + /// to a [`MatchError`] using the offset information in the given + /// [`Input`]. + /// + /// # Errors + /// + /// This may return a [`MatchError`] if the search needs to give up when + /// determining the start state (for example, if it sees a "quit" byte or + /// if the cache has become inefficient). This can also return an error if + /// the given `Input` contains an unsupported [`Anchored`] configuration. + #[cfg_attr(feature = "perf-inline", inline(always))] + pub fn start_state_reverse( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Result { + let config = start::Config::from_input_reverse(input); + self.start_state(cache, &config).map_err(|err| match err { + StartError::Cache { .. } => MatchError::gave_up(input.end()), + StartError::Quit { byte } => { + let offset = input.end(); + MatchError::quit(byte, offset) + } + StartError::UnsupportedAnchored { mode } => { + MatchError::unsupported_anchored(mode) + } + }) + } + + /// Returns the total number of patterns that match in this state. + /// + /// If the lazy DFA was compiled with one pattern, then this must + /// necessarily always return `1` for all match states. + /// + /// A lazy DFA guarantees that [`DFA::match_pattern`] can be called with + /// indices up to (but not including) the length returned by this routine + /// without panicking. + /// + /// # Panics + /// + /// If the given state is not a match state, then this may either panic + /// or return an incorrect result. + /// + /// # Example + /// + /// This example shows a simple instance of implementing overlapping + /// matches. In particular, it shows not only how to determine how many + /// patterns have matched in a particular state, but also how to access + /// which specific patterns have matched. + /// + /// Notice that we must use [`MatchKind::All`] when building the DFA. If we + /// used [`MatchKind::LeftmostFirst`] instead, then the DFA would not be + /// constructed in a way that supports overlapping matches. (It would only + /// report a single pattern that matches at any particular point in time.) + /// + /// Another thing to take note of is the patterns used and the order in + /// which the pattern IDs are reported. In the example below, pattern `3` + /// is yielded first. Why? Because it corresponds to the match that + /// appears first. Namely, the `@` symbol is part of `\S+` but not part + /// of any of the other patterns. Since the `\S+` pattern has a match that + /// starts to the left of any other pattern, its ID is returned before any + /// other. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{hybrid::dfa::DFA, Input, MatchKind}; + /// + /// let dfa = DFA::builder() + /// .configure(DFA::config().match_kind(MatchKind::All)) + /// .build_many(&[ + /// r"\w+", r"[a-z]+", r"[A-Z]+", r"\S+", + /// ])?; + /// let mut cache = dfa.create_cache(); + /// let haystack = "@bar".as_bytes(); + /// + /// // The start state is determined by inspecting the position and the + /// // initial bytes of the haystack. + /// let mut sid = dfa.start_state_forward( + /// &mut cache, &Input::new(haystack), + /// )?; + /// // Walk all the bytes in the haystack. + /// for &b in haystack { + /// sid = dfa.next_state(&mut cache, sid, b)?; + /// } + /// sid = dfa.next_eoi_state(&mut cache, sid)?; + /// + /// assert!(sid.is_match()); + /// assert_eq!(dfa.match_len(&mut cache, sid), 3); + /// // The following calls are guaranteed to not panic since `match_len` + /// // returned `3` above. + /// assert_eq!(dfa.match_pattern(&mut cache, sid, 0).as_usize(), 3); + /// assert_eq!(dfa.match_pattern(&mut cache, sid, 1).as_usize(), 0); + /// assert_eq!(dfa.match_pattern(&mut cache, sid, 2).as_usize(), 1); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn match_len(&self, cache: &Cache, id: LazyStateID) -> usize { + assert!(id.is_match()); + LazyRef::new(self, cache).get_cached_state(id).match_len() + } + + /// Returns the pattern ID corresponding to the given match index in the + /// given state. + /// + /// See [`DFA::match_len`] for an example of how to use this method + /// correctly. Note that if you know your lazy DFA is configured with a + /// single pattern, then this routine is never necessary since it will + /// always return a pattern ID of `0` for an index of `0` when `id` + /// corresponds to a match state. + /// + /// Typically, this routine is used when implementing an overlapping + /// search, as the example for `DFA::match_len` does. + /// + /// # Panics + /// + /// If the state ID is not a match state or if the match index is out + /// of bounds for the given state, then this routine may either panic + /// or produce an incorrect result. If the state ID is correct and the + /// match index is correct, then this routine always produces a valid + /// `PatternID`. + #[inline] + pub fn match_pattern( + &self, + cache: &Cache, + id: LazyStateID, + match_index: usize, + ) -> PatternID { + // This is an optimization for the very common case of a DFA with a + // single pattern. This conditional avoids a somewhat more costly path + // that finds the pattern ID from the corresponding `State`, which + // requires a bit of slicing/pointer-chasing. This optimization tends + // to only matter when matches are frequent. + if self.pattern_len() == 1 { + return PatternID::ZERO; + } + LazyRef::new(self, cache) + .get_cached_state(id) + .match_pattern(match_index) + } +} + +/// A cache represents a partially computed DFA. +/// +/// A cache is the key component that differentiates a classical DFA and a +/// hybrid NFA/DFA (also called a "lazy DFA"). Where a classical DFA builds a +/// complete transition table that can handle all possible inputs, a hybrid +/// NFA/DFA starts with an empty transition table and builds only the parts +/// required during search. The parts that are built are stored in a cache. For +/// this reason, a cache is a required parameter for nearly every operation on +/// a [`DFA`]. +/// +/// Caches can be created from their corresponding DFA via +/// [`DFA::create_cache`]. A cache can only be used with either the DFA that +/// created it, or the DFA that was most recently used to reset it with +/// [`Cache::reset`]. Using a cache with any other DFA may result in panics +/// or incorrect results. +#[derive(Clone, Debug)] +pub struct Cache { + // N.B. If you're looking to understand how determinization works, it + // is probably simpler to first grok src/dfa/determinize.rs, since that + // doesn't have the "laziness" component. + /// The transition table. + /// + /// Given a `current` LazyStateID and an `input` byte, the next state can + /// be computed via `trans[untagged(current) + equiv_class(input)]`. Notice + /// that no multiplication is used. That's because state identifiers are + /// "premultiplied." + /// + /// Note that the next state may be the "unknown" state. In this case, the + /// next state is not known and determinization for `current` on `input` + /// must be performed. + trans: Vec, + /// The starting states for this DFA. + /// + /// These are computed lazily. Initially, these are all set to "unknown" + /// lazy state IDs. + /// + /// When 'starts_for_each_pattern' is disabled (the default), then the size + /// of this is constrained to the possible starting configurations based + /// on the search parameters. (At time of writing, that's 4.) However, + /// when starting states for each pattern is enabled, then there are N + /// additional groups of starting states, where each group reflects the + /// different possible configurations and N is the number of patterns. + starts: Vec, + /// A sequence of NFA/DFA powerset states that have been computed for this + /// lazy DFA. This sequence is indexable by untagged LazyStateIDs. (Every + /// tagged LazyStateID can be used to index this sequence by converting it + /// to its untagged form.) + states: Vec, + /// A map from states to their corresponding IDs. This map may be accessed + /// via the raw byte representation of a state, which means that a `State` + /// does not need to be allocated to determine whether it already exists + /// in this map. Indeed, the existence of such a state is what determines + /// whether we allocate a new `State` or not. + /// + /// The higher level idea here is that we do just enough determinization + /// for a state to check whether we've already computed it. If we have, + /// then we can save a little (albeit not much) work. The real savings is + /// in memory usage. If we never checked for trivially duplicate states, + /// then our memory usage would explode to unreasonable levels. + states_to_id: StateMap, + /// Sparse sets used to track which NFA states have been visited during + /// various traversals. + sparses: SparseSets, + /// Scratch space for traversing the NFA graph. (We use space on the heap + /// instead of the call stack.) + stack: Vec, + /// Scratch space for building a NFA/DFA powerset state. This is used to + /// help amortize allocation since not every powerset state generated is + /// added to the cache. In particular, if it already exists in the cache, + /// then there is no need to allocate a new `State` for it. + scratch_state_builder: StateBuilderEmpty, + /// A simple abstraction for handling the saving of at most a single state + /// across a cache clearing. This is required for correctness. Namely, if + /// adding a new state after clearing the cache fails, then the caller + /// must retain the ability to continue using the state ID given. The + /// state corresponding to the state ID is what we preserve across cache + /// clearings. + state_saver: StateSaver, + /// The memory usage, in bytes, used by 'states' and 'states_to_id'. We + /// track this as new states are added since states use a variable amount + /// of heap. Tracking this as we add states makes it possible to compute + /// the total amount of memory used by the determinizer in constant time. + memory_usage_state: usize, + /// The number of times the cache has been cleared. When a minimum cache + /// clear count is set, then the cache will return an error instead of + /// clearing the cache if the count has been exceeded. + clear_count: usize, + /// The total number of bytes searched since the last time this cache was + /// cleared, not including the current search. + /// + /// This can be added to the length of the current search to get the true + /// total number of bytes searched. + /// + /// This is generally only non-zero when the + /// `Cache::search_{start,update,finish}` APIs are used to track search + /// progress. + bytes_searched: usize, + /// The progress of the current search. + /// + /// This is only non-`None` when callers utilize the `Cache::search_start`, + /// `Cache::search_update` and `Cache::search_finish` APIs. + /// + /// The purpose of recording search progress is to be able to make a + /// determination about the efficiency of the cache. Namely, by keeping + /// track of the + progress: Option, +} + +impl Cache { + /// Create a new cache for the given lazy DFA. + /// + /// The cache returned should only be used for searches for the given DFA. + /// If you want to reuse the cache for another DFA, then you must call + /// [`Cache::reset`] with that DFA. + pub fn new(dfa: &DFA) -> Cache { + let mut cache = Cache { + trans: alloc::vec![], + starts: alloc::vec![], + states: alloc::vec![], + states_to_id: StateMap::new(), + sparses: SparseSets::new(dfa.get_nfa().states().len()), + stack: alloc::vec![], + scratch_state_builder: StateBuilderEmpty::new(), + state_saver: StateSaver::none(), + memory_usage_state: 0, + clear_count: 0, + bytes_searched: 0, + progress: None, + }; + debug!("pre-init lazy DFA cache size: {}", cache.memory_usage()); + Lazy { dfa, cache: &mut cache }.init_cache(); + debug!("post-init lazy DFA cache size: {}", cache.memory_usage()); + cache + } + + /// Reset this cache such that it can be used for searching with the given + /// lazy DFA (and only that DFA). + /// + /// A cache reset permits reusing memory already allocated in this cache + /// with a different lazy DFA. + /// + /// Resetting a cache sets its "clear count" to 0. This is relevant if the + /// lazy DFA has been configured to "give up" after it has cleared the + /// cache a certain number of times. + /// + /// Any lazy state ID generated by the cache prior to resetting it is + /// invalid after the reset. + /// + /// # Example + /// + /// This shows how to re-purpose a cache for use with a different DFA. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; + /// + /// let dfa1 = DFA::new(r"\w")?; + /// let dfa2 = DFA::new(r"\W")?; + /// + /// let mut cache = dfa1.create_cache(); + /// assert_eq!( + /// Some(HalfMatch::must(0, 2)), + /// dfa1.try_search_fwd(&mut cache, &Input::new("Δ"))?, + /// ); + /// + /// // Using 'cache' with dfa2 is not allowed. It may result in panics or + /// // incorrect results. In order to re-purpose the cache, we must reset + /// // it with the DFA we'd like to use it with. + /// // + /// // Similarly, after this reset, using the cache with 'dfa1' is also not + /// // allowed. + /// cache.reset(&dfa2); + /// assert_eq!( + /// Some(HalfMatch::must(0, 3)), + /// dfa2.try_search_fwd(&mut cache, &Input::new("☃"))?, + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn reset(&mut self, dfa: &DFA) { + Lazy::new(dfa, self).reset_cache() + } + + /// Initializes a new search starting at the given position. + /// + /// If a previous search was unfinished, then it is finished automatically + /// and a new search is begun. + /// + /// Note that keeping track of search progress is _not necessary_ + /// for correct implementations of search using a lazy DFA. Keeping + /// track of search progress is only necessary if you want the + /// [`Config::minimum_bytes_per_state`] configuration knob to work. + #[inline] + pub fn search_start(&mut self, at: usize) { + // If a previous search wasn't marked as finished, then finish it + // now automatically. + if let Some(p) = self.progress.take() { + self.bytes_searched += p.len(); + } + self.progress = Some(SearchProgress { start: at, at }); + } + + /// Updates the current search to indicate that it has search to the + /// current position. + /// + /// No special care needs to be taken for reverse searches. Namely, the + /// position given may be _less than_ the starting position of the search. + /// + /// # Panics + /// + /// This panics if no search has been started by [`Cache::search_start`]. + #[inline] + pub fn search_update(&mut self, at: usize) { + let p = + self.progress.as_mut().expect("no in-progress search to update"); + p.at = at; + } + + /// Indicates that a search has finished at the given position. + /// + /// # Panics + /// + /// This panics if no search has been started by [`Cache::search_start`]. + #[inline] + pub fn search_finish(&mut self, at: usize) { + let mut p = + self.progress.take().expect("no in-progress search to finish"); + p.at = at; + self.bytes_searched += p.len(); + } + + /// Returns the total number of bytes that have been searched since this + /// cache was last cleared. + /// + /// This is useful for determining the efficiency of the cache. For + /// example, the lazy DFA uses this value in conjunction with the + /// [`Config::minimum_bytes_per_state`] knob to help determine whether it + /// should quit searching. + /// + /// This always returns `0` if search progress isn't being tracked. Note + /// that the lazy DFA search routines in this crate always track search + /// progress. + pub fn search_total_len(&self) -> usize { + self.bytes_searched + self.progress.as_ref().map_or(0, |p| p.len()) + } + + /// Returns the total number of times this cache has been cleared since it + /// was either created or last reset. + /// + /// This is useful for informational purposes or if you want to change + /// search strategies based on the number of times the cache has been + /// cleared. + pub fn clear_count(&self) -> usize { + self.clear_count + } + + /// Returns the heap memory usage, in bytes, of this cache. + /// + /// This does **not** include the stack size used up by this cache. To + /// compute that, use `std::mem::size_of::()`. + pub fn memory_usage(&self) -> usize { + const ID_SIZE: usize = size_of::(); + const STATE_SIZE: usize = size_of::(); + + // NOTE: If you make changes to the below, then + // 'minimum_cache_capacity' should be updated correspondingly. + + self.trans.len() * ID_SIZE + + self.starts.len() * ID_SIZE + + self.states.len() * STATE_SIZE + // Maps likely use more memory than this, but it's probably close. + + self.states_to_id.len() * (STATE_SIZE + ID_SIZE) + + self.sparses.memory_usage() + + self.stack.capacity() * ID_SIZE + + self.scratch_state_builder.capacity() + // Heap memory used by 'State' in both 'states' and 'states_to_id'. + + self.memory_usage_state + } +} + +/// Keeps track of the progress of the current search. +/// +/// This is updated via the `Cache::search_{start,update,finish}` APIs to +/// record how many bytes have been searched. This permits computing a +/// heuristic that represents the efficiency of a cache, and thus helps inform +/// whether the lazy DFA should give up or not. +#[derive(Clone, Debug)] +struct SearchProgress { + start: usize, + at: usize, +} + +impl SearchProgress { + /// Returns the length, in bytes, of this search so far. + /// + /// This automatically handles the case of a reverse search, where `at` + /// is likely to be less than `start`. + fn len(&self) -> usize { + if self.start <= self.at { + self.at - self.start + } else { + self.start - self.at + } + } +} + +/// A map from states to state identifiers. When using std, we use a standard +/// hashmap, since it's a bit faster for this use case. (Other maps, like +/// one's based on FNV, have not yet been benchmarked.) +/// +/// The main purpose of this map is to reuse states where possible. This won't +/// fully minimize the DFA, but it works well in a lot of cases. +#[cfg(feature = "std")] +type StateMap = std::collections::HashMap; +#[cfg(not(feature = "std"))] +type StateMap = alloc::collections::BTreeMap; + +/// A type that groups methods that require the base NFA/DFA and writable +/// access to the cache. +#[derive(Debug)] +struct Lazy<'i, 'c> { + dfa: &'i DFA, + cache: &'c mut Cache, +} + +impl<'i, 'c> Lazy<'i, 'c> { + /// Creates a new 'Lazy' wrapper for a DFA and its corresponding cache. + fn new(dfa: &'i DFA, cache: &'c mut Cache) -> Lazy<'i, 'c> { + Lazy { dfa, cache } + } + + /// Return an immutable view by downgrading a writable cache to a read-only + /// cache. + fn as_ref<'a>(&'a self) -> LazyRef<'i, 'a> { + LazyRef::new(self.dfa, self.cache) + } + + /// This is marked as 'inline(never)' to avoid bloating methods on 'DFA' + /// like 'next_state' and 'next_eoi_state' that are called in critical + /// areas. The idea is to let the optimizer focus on the other areas of + /// those methods as the hot path. + /// + /// Here's an example that justifies 'inline(never)' + /// + /// ```ignore + /// regex-cli find match hybrid \ + /// --cache-capacity 100000000 \ + /// -p '\pL{100}' + /// all-codepoints-utf8-100x + /// ``` + /// + /// Where 'all-codepoints-utf8-100x' is the UTF-8 encoding of every + /// codepoint, in sequence, repeated 100 times. + /// + /// With 'inline(never)' hyperfine reports 1.1s per run. With + /// 'inline(always)', hyperfine reports 1.23s. So that's a 10% improvement. + #[cold] + #[inline(never)] + fn cache_next_state( + &mut self, + mut current: LazyStateID, + unit: alphabet::Unit, + ) -> Result { + let stride2 = self.dfa.stride2(); + let empty_builder = self.get_state_builder(); + let builder = determinize::next( + self.dfa.get_nfa(), + self.dfa.get_config().get_match_kind(), + &mut self.cache.sparses, + &mut self.cache.stack, + &self.cache.states[current.as_usize_untagged() >> stride2], + unit, + empty_builder, + ); + // This is subtle, but if we *might* clear the cache, then we should + // try to save the current state so that we can re-map its ID after + // cache clearing. We *might* clear the cache when either the new + // state can't fit in the cache or when the number of transitions has + // reached the maximum. Even if either of these conditions is true, + // the cache might not be cleared if we can reuse an existing state. + // But we don't know that at this point. Moreover, we don't save the + // current state every time because it is costly. + // + // TODO: We should try to find a way to make this less subtle and error + // prone. ---AG + let save_state = !self.as_ref().state_builder_fits_in_cache(&builder) + || self.cache.trans.len() >= LazyStateID::MAX; + if save_state { + self.save_state(current); + } + let next = self.add_builder_state(builder, |sid| sid)?; + if save_state { + current = self.saved_state_id(); + } + // This is the payoff. The next time 'next_state' is called with this + // state and alphabet unit, it will find this transition and avoid + // having to re-determinize this transition. + self.set_transition(current, unit, next); + Ok(next) + } + + /// Compute and cache the starting state for the given pattern ID (if + /// present) and the starting configuration. + /// + /// This panics if a pattern ID is given and the DFA isn't configured to + /// build anchored start states for each pattern. + /// + /// This will never return an unknown lazy state ID. + /// + /// If caching this state would otherwise result in a cache that has been + /// cleared too many times, then an error is returned. + #[cold] + #[inline(never)] + fn cache_start_group( + &mut self, + anchored: Anchored, + start: Start, + ) -> Result { + let nfa_start_id = match anchored { + Anchored::No => self.dfa.get_nfa().start_unanchored(), + Anchored::Yes => self.dfa.get_nfa().start_anchored(), + Anchored::Pattern(pid) => { + if !self.dfa.get_config().get_starts_for_each_pattern() { + return Err(StartError::unsupported_anchored(anchored)); + } + match self.dfa.get_nfa().start_pattern(pid) { + None => return Ok(self.as_ref().dead_id()), + Some(sid) => sid, + } + } + }; + + let id = self + .cache_start_one(nfa_start_id, start) + .map_err(StartError::cache)?; + self.set_start_state(anchored, start, id); + Ok(id) + } + + /// Compute and cache the starting state for the given NFA state ID and the + /// starting configuration. The NFA state ID might be one of the following: + /// + /// 1) An unanchored start state to match any pattern. + /// 2) An anchored start state to match any pattern. + /// 3) An anchored start state for a particular pattern. + /// + /// This will never return an unknown lazy state ID. + /// + /// If caching this state would otherwise result in a cache that has been + /// cleared too many times, then an error is returned. + fn cache_start_one( + &mut self, + nfa_start_id: NFAStateID, + start: Start, + ) -> Result { + let mut builder_matches = self.get_state_builder().into_matches(); + determinize::set_lookbehind_from_start( + self.dfa.get_nfa(), + &start, + &mut builder_matches, + ); + self.cache.sparses.set1.clear(); + determinize::epsilon_closure( + self.dfa.get_nfa(), + nfa_start_id, + builder_matches.look_have(), + &mut self.cache.stack, + &mut self.cache.sparses.set1, + ); + let mut builder = builder_matches.into_nfa(); + determinize::add_nfa_states( + &self.dfa.get_nfa(), + &self.cache.sparses.set1, + &mut builder, + ); + let tag_starts = self.dfa.get_config().get_specialize_start_states(); + self.add_builder_state(builder, |id| { + if tag_starts { + id.to_start() + } else { + id + } + }) + } + + /// Either add the given builder state to this cache, or return an ID to an + /// equivalent state already in this cache. + /// + /// In the case where no equivalent state exists, the idmap function given + /// may be used to transform the identifier allocated. This is useful if + /// the caller needs to tag the ID with additional information. + /// + /// This will never return an unknown lazy state ID. + /// + /// If caching this state would otherwise result in a cache that has been + /// cleared too many times, then an error is returned. + fn add_builder_state( + &mut self, + builder: StateBuilderNFA, + idmap: impl Fn(LazyStateID) -> LazyStateID, + ) -> Result { + if let Some(&cached_id) = + self.cache.states_to_id.get(builder.as_bytes()) + { + // Since we have a cached state, put the constructed state's + // memory back into our scratch space, so that it can be reused. + self.put_state_builder(builder); + return Ok(cached_id); + } + let result = self.add_state(builder.to_state(), idmap); + self.put_state_builder(builder); + result + } + + /// Allocate a new state ID and add the given state to this cache. + /// + /// The idmap function given may be used to transform the identifier + /// allocated. This is useful if the caller needs to tag the ID with + /// additional information. + /// + /// This will never return an unknown lazy state ID. + /// + /// If caching this state would otherwise result in a cache that has been + /// cleared too many times, then an error is returned. + fn add_state( + &mut self, + state: State, + idmap: impl Fn(LazyStateID) -> LazyStateID, + ) -> Result { + if !self.as_ref().state_fits_in_cache(&state) { + self.try_clear_cache()?; + } + // It's important for this to come second, since the above may clear + // the cache. If we clear the cache after ID generation, then the ID + // is likely bunk since it would have been generated based on a larger + // transition table. + let mut id = idmap(self.next_state_id()?); + if state.is_match() { + id = id.to_match(); + } + // Add room in the transition table. Since this is a fresh state, all + // of its transitions are unknown. + self.cache.trans.extend( + iter::repeat(self.as_ref().unknown_id()).take(self.dfa.stride()), + ); + // When we add a sentinel state, we never want to set any quit + // transitions. Technically, this is harmless, since sentinel states + // have all of their transitions set to loop back to themselves. But + // when creating sentinel states before the quit sentinel state, + // this will try to call 'set_transition' on a state ID that doesn't + // actually exist yet, which isn't allowed. So we just skip doing so + // entirely. + if !self.dfa.quitset.is_empty() && !self.as_ref().is_sentinel(id) { + let quit_id = self.as_ref().quit_id(); + for b in self.dfa.quitset.iter() { + self.set_transition(id, alphabet::Unit::u8(b), quit_id); + } + } + self.cache.memory_usage_state += state.memory_usage(); + self.cache.states.push(state.clone()); + self.cache.states_to_id.insert(state, id); + Ok(id) + } + + /// Allocate a new state ID. + /// + /// This will never return an unknown lazy state ID. + /// + /// If caching this state would otherwise result in a cache that has been + /// cleared too many times, then an error is returned. + fn next_state_id(&mut self) -> Result { + let sid = match LazyStateID::new(self.cache.trans.len()) { + Ok(sid) => sid, + Err(_) => { + self.try_clear_cache()?; + // This has to pass since we check that ID capacity at + // construction time can fit at least MIN_STATES states. + LazyStateID::new(self.cache.trans.len()).unwrap() + } + }; + Ok(sid) + } + + /// Attempt to clear the cache used by this lazy DFA. + /// + /// If clearing the cache exceeds the minimum number of required cache + /// clearings, then this will return a cache error. In this case, + /// callers should bubble this up as the cache can't be used until it is + /// reset. Implementations of search should convert this error into a + /// [`MatchError::gave_up`]. + /// + /// If 'self.state_saver' is set to save a state, then this state is + /// persisted through cache clearing. Otherwise, the cache is returned to + /// its state after initialization with two exceptions: its clear count + /// is incremented and some of its memory likely has additional capacity. + /// That is, clearing a cache does _not_ release memory. + /// + /// Otherwise, any lazy state ID generated by the cache prior to resetting + /// it is invalid after the reset. + fn try_clear_cache(&mut self) -> Result<(), CacheError> { + let c = self.dfa.get_config(); + if let Some(min_count) = c.get_minimum_cache_clear_count() { + if self.cache.clear_count >= min_count { + if let Some(min_bytes_per) = c.get_minimum_bytes_per_state() { + let len = self.cache.search_total_len(); + let min_bytes = + min_bytes_per.saturating_mul(self.cache.states.len()); + // If we've searched 0 bytes then probably something has + // gone wrong and the lazy DFA search implementation isn't + // correctly updating the search progress state. + if len == 0 { + trace!( + "number of bytes searched is 0, but \ + a minimum bytes per state searched ({}) is \ + enabled, maybe Cache::search_update \ + is not being used?", + min_bytes_per, + ); + } + if len < min_bytes { + trace!( + "lazy DFA cache has been cleared {} times, \ + which exceeds the limit of {}, \ + AND its bytes searched per state is less \ + than the configured minimum of {}, \ + therefore lazy DFA is giving up \ + (bytes searched since cache clear = {}, \ + number of states = {})", + self.cache.clear_count, + min_count, + min_bytes_per, + len, + self.cache.states.len(), + ); + return Err(CacheError::bad_efficiency()); + } else { + trace!( + "lazy DFA cache has been cleared {} times, \ + which exceeds the limit of {}, \ + AND its bytes searched per state is greater \ + than the configured minimum of {}, \ + therefore lazy DFA is continuing! \ + (bytes searched since cache clear = {}, \ + number of states = {})", + self.cache.clear_count, + min_count, + min_bytes_per, + len, + self.cache.states.len(), + ); + } + } else { + trace!( + "lazy DFA cache has been cleared {} times, \ + which exceeds the limit of {}, \ + since there is no configured bytes per state \ + minimum, lazy DFA is giving up", + self.cache.clear_count, + min_count, + ); + return Err(CacheError::too_many_cache_clears()); + } + } + } + self.clear_cache(); + Ok(()) + } + + /// Clears _and_ resets the cache. Resetting the cache means that no + /// states are persisted and the clear count is reset to 0. No heap memory + /// is released. + /// + /// Note that the caller may reset a cache with a different DFA than what + /// it was created from. In which case, the cache can now be used with the + /// new DFA (and not the old DFA). + fn reset_cache(&mut self) { + self.cache.state_saver = StateSaver::none(); + self.clear_cache(); + // If a new DFA is used, it might have a different number of NFA + // states, so we need to make sure our sparse sets have the appropriate + // size. + self.cache.sparses.resize(self.dfa.get_nfa().states().len()); + self.cache.clear_count = 0; + self.cache.progress = None; + } + + /// Clear the cache used by this lazy DFA. + /// + /// If 'self.state_saver' is set to save a state, then this state is + /// persisted through cache clearing. Otherwise, the cache is returned to + /// its state after initialization with two exceptions: its clear count + /// is incremented and some of its memory likely has additional capacity. + /// That is, clearing a cache does _not_ release memory. + /// + /// Otherwise, any lazy state ID generated by the cache prior to resetting + /// it is invalid after the reset. + fn clear_cache(&mut self) { + self.cache.trans.clear(); + self.cache.starts.clear(); + self.cache.states.clear(); + self.cache.states_to_id.clear(); + self.cache.memory_usage_state = 0; + self.cache.clear_count += 1; + self.cache.bytes_searched = 0; + if let Some(ref mut progress) = self.cache.progress { + progress.start = progress.at; + } + trace!( + "lazy DFA cache has been cleared (count: {})", + self.cache.clear_count + ); + self.init_cache(); + // If the state we want to save is one of the sentinel + // (unknown/dead/quit) states, then 'init_cache' adds those back, and + // their identifier values remains invariant. So there's no need to add + // it again. (And indeed, doing so would be incorrect!) + if let Some((old_id, state)) = self.cache.state_saver.take_to_save() { + // If the state is one of the special sentinel states, then it is + // automatically added by cache initialization and its ID always + // remains the same. With that said, this should never occur since + // the sentinel states are all loop states back to themselves. So + // we should never be in a position where we're attempting to save + // a sentinel state since we never compute transitions out of a + // sentinel state. + assert!( + !self.as_ref().is_sentinel(old_id), + "cannot save sentinel state" + ); + let new_id = self + .add_state(state, |id| { + if old_id.is_start() { + // We don't need to consult the + // 'specialize_start_states' config knob here, because + // if it's disabled, old_id.is_start() will never + // return true. + id.to_start() + } else { + id + } + }) + // The unwrap here is OK because lazy DFA creation ensures that + // we have room in the cache to add MIN_STATES states. Since + // 'init_cache' above adds 3, this adds a 4th. + .expect("adding one state after cache clear must work"); + self.cache.state_saver = StateSaver::Saved(new_id); + } + } + + /// Initialize this cache from emptiness to a place where it can be used + /// for search. + /// + /// This is called both at cache creation time and after the cache has been + /// cleared. + /// + /// Primarily, this adds the three sentinel states and allocates some + /// initial memory. + fn init_cache(&mut self) { + // Why multiply by 2 here? Because we make room for both the unanchored + // and anchored start states. Unanchored is first and then anchored. + let mut starts_len = Start::len().checked_mul(2).unwrap(); + // ... but if we also want start states for every pattern, we make room + // for that too. + if self.dfa.get_config().get_starts_for_each_pattern() { + starts_len += Start::len() * self.dfa.pattern_len(); + } + self.cache + .starts + .extend(iter::repeat(self.as_ref().unknown_id()).take(starts_len)); + // This is the set of NFA states that corresponds to each of our three + // sentinel states: the empty set. + let dead = State::dead(); + // This sets up some states that we use as sentinels that are present + // in every DFA. While it would be technically possible to implement + // this DFA without explicitly putting these states in the transition + // table, this is convenient to do to make `next_state` correct for all + // valid state IDs without needing explicit conditionals to special + // case these sentinel states. + // + // All three of these states are "dead" states. That is, all of + // them transition only to themselves. So once you enter one of + // these states, it's impossible to leave them. Thus, any correct + // search routine must explicitly check for these state types. (Sans + // `unknown`, since that is only used internally to represent missing + // states.) + let unk_id = + self.add_state(dead.clone(), |id| id.to_unknown()).unwrap(); + let dead_id = self.add_state(dead.clone(), |id| id.to_dead()).unwrap(); + let quit_id = self.add_state(dead.clone(), |id| id.to_quit()).unwrap(); + assert_eq!(unk_id, self.as_ref().unknown_id()); + assert_eq!(dead_id, self.as_ref().dead_id()); + assert_eq!(quit_id, self.as_ref().quit_id()); + // The idea here is that if you start in an unknown/dead/quit state and + // try to transition on them, then you should end up where you started. + self.set_all_transitions(unk_id, unk_id); + self.set_all_transitions(dead_id, dead_id); + self.set_all_transitions(quit_id, quit_id); + // All of these states are technically equivalent from the FSM + // perspective, so putting all three of them in the cache isn't + // possible. (They are distinct merely because we use their + // identifiers as sentinels to mean something, as indicated by the + // names.) Moreover, we wouldn't want to do that. Unknown and quit + // states are special in that they are artificial constructions + // this implementation. But dead states are a natural part of + // determinization. When you reach a point in the NFA where you cannot + // go anywhere else, a dead state will naturally arise and we MUST + // reuse the canonical dead state that we've created here. Why? Because + // it is the state ID that tells the search routine whether a state is + // dead or not, and thus, whether to stop the search. Having a bunch of + // distinct dead states would be quite wasteful! + self.cache.states_to_id.insert(dead, dead_id); + } + + /// Save the state corresponding to the ID given such that the state + /// persists through a cache clearing. + /// + /// While the state may persist, the ID may not. In order to discover the + /// new state ID, one must call 'saved_state_id' after a cache clearing. + fn save_state(&mut self, id: LazyStateID) { + let state = self.as_ref().get_cached_state(id).clone(); + self.cache.state_saver = StateSaver::ToSave { id, state }; + } + + /// Returns the updated lazy state ID for a state that was persisted + /// through a cache clearing. + /// + /// It is only correct to call this routine when both a state has been + /// saved and the cache has just been cleared. Otherwise, this panics. + fn saved_state_id(&mut self) -> LazyStateID { + self.cache + .state_saver + .take_saved() + .expect("state saver does not have saved state ID") + } + + /// Set all transitions on the state 'from' to 'to'. + fn set_all_transitions(&mut self, from: LazyStateID, to: LazyStateID) { + for unit in self.dfa.classes.representatives(..) { + self.set_transition(from, unit, to); + } + } + + /// Set the transition on 'from' for 'unit' to 'to'. + /// + /// This panics if either 'from' or 'to' is invalid. + /// + /// All unit values are OK. + fn set_transition( + &mut self, + from: LazyStateID, + unit: alphabet::Unit, + to: LazyStateID, + ) { + assert!(self.as_ref().is_valid(from), "invalid 'from' id: {from:?}"); + assert!(self.as_ref().is_valid(to), "invalid 'to' id: {to:?}"); + let offset = + from.as_usize_untagged() + self.dfa.classes.get_by_unit(unit); + self.cache.trans[offset] = to; + } + + /// Set the start ID for the given pattern ID (if given) and starting + /// configuration to the ID given. + /// + /// This panics if 'id' is not valid or if a pattern ID is given and + /// 'starts_for_each_pattern' is not enabled. + fn set_start_state( + &mut self, + anchored: Anchored, + start: Start, + id: LazyStateID, + ) { + assert!(self.as_ref().is_valid(id)); + let start_index = start.as_usize(); + let index = match anchored { + Anchored::No => start_index, + Anchored::Yes => Start::len() + start_index, + Anchored::Pattern(pid) => { + assert!( + self.dfa.get_config().get_starts_for_each_pattern(), + "attempted to search for a specific pattern \ + without enabling starts_for_each_pattern", + ); + let pid = pid.as_usize(); + (2 * Start::len()) + (Start::len() * pid) + start_index + } + }; + self.cache.starts[index] = id; + } + + /// Returns a state builder from this DFA that might have existing + /// capacity. This helps avoid allocs in cases where a state is built that + /// turns out to already be cached. + /// + /// Callers must put the state builder back with 'put_state_builder', + /// otherwise the allocation reuse won't work. + fn get_state_builder(&mut self) -> StateBuilderEmpty { + core::mem::replace( + &mut self.cache.scratch_state_builder, + StateBuilderEmpty::new(), + ) + } + + /// Puts the given state builder back into this DFA for reuse. + /// + /// Note that building a 'State' from a builder always creates a new alloc, + /// so callers should always put the builder back. + fn put_state_builder(&mut self, builder: StateBuilderNFA) { + let _ = core::mem::replace( + &mut self.cache.scratch_state_builder, + builder.clear(), + ); + } +} + +/// A type that groups methods that require the base NFA/DFA and read-only +/// access to the cache. +#[derive(Debug)] +struct LazyRef<'i, 'c> { + dfa: &'i DFA, + cache: &'c Cache, +} + +impl<'i, 'c> LazyRef<'i, 'c> { + /// Creates a new 'Lazy' wrapper for a DFA and its corresponding cache. + fn new(dfa: &'i DFA, cache: &'c Cache) -> LazyRef<'i, 'c> { + LazyRef { dfa, cache } + } + + /// Return the ID of the start state for the given configuration. + /// + /// If the start state has not yet been computed, then this returns an + /// unknown lazy state ID. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn get_cached_start_id( + &self, + anchored: Anchored, + start: Start, + ) -> Result { + let start_index = start.as_usize(); + let index = match anchored { + Anchored::No => start_index, + Anchored::Yes => Start::len() + start_index, + Anchored::Pattern(pid) => { + if !self.dfa.get_config().get_starts_for_each_pattern() { + return Err(StartError::unsupported_anchored(anchored)); + } + if pid.as_usize() >= self.dfa.pattern_len() { + return Ok(self.dead_id()); + } + (2 * Start::len()) + + (Start::len() * pid.as_usize()) + + start_index + } + }; + Ok(self.cache.starts[index]) + } + + /// Return the cached NFA/DFA powerset state for the given ID. + /// + /// This panics if the given ID does not address a valid state. + fn get_cached_state(&self, sid: LazyStateID) -> &State { + let index = sid.as_usize_untagged() >> self.dfa.stride2(); + &self.cache.states[index] + } + + /// Returns true if and only if the given ID corresponds to a "sentinel" + /// state. + /// + /// A sentinel state is a state that signifies a special condition of + /// search, and where every transition maps back to itself. See LazyStateID + /// for more details. Note that start and match states are _not_ sentinels + /// since they may otherwise be real states with non-trivial transitions. + /// The purposes of sentinel states is purely to indicate something. Their + /// transitions are not meant to be followed. + fn is_sentinel(&self, id: LazyStateID) -> bool { + id == self.unknown_id() || id == self.dead_id() || id == self.quit_id() + } + + /// Returns the ID of the unknown state for this lazy DFA. + fn unknown_id(&self) -> LazyStateID { + // This unwrap is OK since 0 is always a valid state ID. + LazyStateID::new(0).unwrap().to_unknown() + } + + /// Returns the ID of the dead state for this lazy DFA. + fn dead_id(&self) -> LazyStateID { + // This unwrap is OK since the maximum value here is 1 * 512 = 512, + // which is <= 2047 (the maximum state ID on 16-bit systems). Where + // 512 is the worst case for our equivalence classes (every byte is a + // distinct class). + LazyStateID::new(1 << self.dfa.stride2()).unwrap().to_dead() + } + + /// Returns the ID of the quit state for this lazy DFA. + fn quit_id(&self) -> LazyStateID { + // This unwrap is OK since the maximum value here is 2 * 512 = 1024, + // which is <= 2047 (the maximum state ID on 16-bit systems). Where + // 512 is the worst case for our equivalence classes (every byte is a + // distinct class). + LazyStateID::new(2 << self.dfa.stride2()).unwrap().to_quit() + } + + /// Returns true if and only if the given ID is valid. + /// + /// An ID is valid if it is both a valid index into the transition table + /// and is a multiple of the DFA's stride. + fn is_valid(&self, id: LazyStateID) -> bool { + let id = id.as_usize_untagged(); + id < self.cache.trans.len() && id % self.dfa.stride() == 0 + } + + /// Returns true if adding the state given would fit in this cache. + fn state_fits_in_cache(&self, state: &State) -> bool { + let needed = self.cache.memory_usage() + + self.memory_usage_for_one_more_state(state.memory_usage()); + trace!( + "lazy DFA cache capacity state check: {:?} ?<=? {:?}", + needed, + self.dfa.cache_capacity + ); + needed <= self.dfa.cache_capacity + } + + /// Returns true if adding the state to be built by the given builder would + /// fit in this cache. + fn state_builder_fits_in_cache(&self, state: &StateBuilderNFA) -> bool { + let needed = self.cache.memory_usage() + + self.memory_usage_for_one_more_state(state.as_bytes().len()); + trace!( + "lazy DFA cache capacity state builder check: {:?} ?<=? {:?}", + needed, + self.dfa.cache_capacity + ); + needed <= self.dfa.cache_capacity + } + + /// Returns the additional memory usage, in bytes, required to add one more + /// state to this cache. The given size should be the heap size, in bytes, + /// that would be used by the new state being added. + fn memory_usage_for_one_more_state( + &self, + state_heap_size: usize, + ) -> usize { + const ID_SIZE: usize = size_of::(); + const STATE_SIZE: usize = size_of::(); + + self.dfa.stride() * ID_SIZE // additional space needed in trans table + + STATE_SIZE // space in cache.states + + (STATE_SIZE + ID_SIZE) // space in cache.states_to_id + + state_heap_size // heap memory used by state itself + } +} + +/// A simple type that encapsulates the saving of a state ID through a cache +/// clearing. +/// +/// A state ID can be marked for saving with ToSave, while a state ID can be +/// saved itself with Saved. +#[derive(Clone, Debug)] +enum StateSaver { + /// An empty state saver. In this case, no states (other than the special + /// sentinel states) are preserved after clearing the cache. + None, + /// An ID of a state (and the state itself) that should be preserved after + /// the lazy DFA's cache has been cleared. After clearing, the updated ID + /// is stored in 'Saved' since it may have changed. + ToSave { id: LazyStateID, state: State }, + /// An ID that of a state that has been persisted through a lazy DFA + /// cache clearing. The ID recorded here corresponds to an ID that was + /// once marked as ToSave. The IDs are likely not equivalent even though + /// the states they point to are. + Saved(LazyStateID), +} + +impl StateSaver { + /// Create an empty state saver. + fn none() -> StateSaver { + StateSaver::None + } + + /// Replace this state saver with an empty saver, and if this saver is a + /// request to save a state, return that request. + fn take_to_save(&mut self) -> Option<(LazyStateID, State)> { + match core::mem::replace(self, StateSaver::None) { + StateSaver::None | StateSaver::Saved(_) => None, + StateSaver::ToSave { id, state } => Some((id, state)), + } + } + + /// Replace this state saver with an empty saver, and if this saver is a + /// saved state (or a request to save a state), return that state's ID. + /// + /// The idea here is that a request to save a state isn't necessarily + /// honored because it might not be needed. e.g., Some higher level code + /// might request a state to be saved on the off chance that the cache gets + /// cleared when a new state is added at a lower level. But if that new + /// state is never added, then the cache is never cleared and the state and + /// its ID remain unchanged. + fn take_saved(&mut self) -> Option { + match core::mem::replace(self, StateSaver::None) { + StateSaver::None => None, + StateSaver::Saved(id) | StateSaver::ToSave { id, .. } => Some(id), + } + } +} + +/// The configuration used for building a lazy DFA. +/// +/// As a convenience, [`DFA::config`] is an alias for [`Config::new`]. The +/// advantage of the former is that it often lets you avoid importing the +/// `Config` type directly. +/// +/// A lazy DFA configuration is a simple data object that is typically used +/// with [`Builder::configure`]. +/// +/// The default configuration guarantees that a search will never return a +/// "gave up" or "quit" error, although it is possible for a search to fail +/// if [`Config::starts_for_each_pattern`] wasn't enabled (which it is not by +/// default) and an [`Anchored::Pattern`] mode is requested via [`Input`]. +#[derive(Clone, Debug, Default)] +pub struct Config { + // As with other configuration types in this crate, we put all our knobs + // in options so that we can distinguish between "default" and "not set." + // This makes it possible to easily combine multiple configurations + // without default values overwriting explicitly specified values. See the + // 'overwrite' method. + // + // For docs on the fields below, see the corresponding method setters. + match_kind: Option, + pre: Option>, + starts_for_each_pattern: Option, + byte_classes: Option, + unicode_word_boundary: Option, + quitset: Option, + specialize_start_states: Option, + cache_capacity: Option, + skip_cache_capacity_check: Option, + minimum_cache_clear_count: Option>, + minimum_bytes_per_state: Option>, +} + +impl Config { + /// Return a new default lazy DFA builder configuration. + pub fn new() -> Config { + Config::default() + } + + /// Set the desired match semantics. + /// + /// The default is [`MatchKind::LeftmostFirst`], which corresponds to the + /// match semantics of Perl-like regex engines. That is, when multiple + /// patterns would match at the same leftmost position, the pattern that + /// appears first in the concrete syntax is chosen. + /// + /// Currently, the only other kind of match semantics supported is + /// [`MatchKind::All`]. This corresponds to classical DFA construction + /// where all possible matches are added to the lazy DFA. + /// + /// Typically, `All` is used when one wants to execute an overlapping + /// search and `LeftmostFirst` otherwise. In particular, it rarely makes + /// sense to use `All` with the various "leftmost" find routines, since the + /// leftmost routines depend on the `LeftmostFirst` automata construction + /// strategy. Specifically, `LeftmostFirst` adds dead states to the + /// lazy DFA as a way to terminate the search and report a match. + /// `LeftmostFirst` also supports non-greedy matches using this strategy + /// where as `All` does not. + /// + /// # Example: overlapping search + /// + /// This example shows the typical use of `MatchKind::All`, which is to + /// report overlapping matches. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// hybrid::dfa::{DFA, OverlappingState}, + /// HalfMatch, Input, MatchKind, + /// }; + /// + /// let dfa = DFA::builder() + /// .configure(DFA::config().match_kind(MatchKind::All)) + /// .build_many(&[r"\w+$", r"\S+$"])?; + /// let mut cache = dfa.create_cache(); + /// let haystack = "@foo"; + /// let mut state = OverlappingState::start(); + /// + /// let expected = Some(HalfMatch::must(1, 4)); + /// dfa.try_search_overlapping_fwd( + /// &mut cache, &Input::new(haystack), &mut state, + /// )?; + /// assert_eq!(expected, state.get_match()); + /// + /// // The first pattern also matches at the same position, so re-running + /// // the search will yield another match. Notice also that the first + /// // pattern is returned after the second. This is because the second + /// // pattern begins its match before the first, is therefore an earlier + /// // match and is thus reported first. + /// let expected = Some(HalfMatch::must(0, 4)); + /// dfa.try_search_overlapping_fwd( + /// &mut cache, &Input::new(haystack), &mut state, + /// )?; + /// assert_eq!(expected, state.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: reverse automaton to find start of match + /// + /// Another example for using `MatchKind::All` is for constructing a + /// reverse automaton to find the start of a match. `All` semantics are + /// used for this in order to find the longest possible match, which + /// corresponds to the leftmost starting position. + /// + /// Note that if you need the starting position then + /// [`hybrid::regex::Regex`](crate::hybrid::regex::Regex) will handle this + /// for you, so it's usually not necessary to do this yourself. + /// + /// ``` + /// use regex_automata::{ + /// hybrid::dfa::DFA, + /// nfa::thompson::NFA, + /// Anchored, HalfMatch, Input, MatchKind, + /// }; + /// + /// let input = Input::new("123foobar456"); + /// let pattern = r"[a-z]+r"; + /// + /// let dfa_fwd = DFA::new(pattern)?; + /// let dfa_rev = DFA::builder() + /// .thompson(NFA::config().reverse(true)) + /// .configure(DFA::config().match_kind(MatchKind::All)) + /// .build(pattern)?; + /// let mut cache_fwd = dfa_fwd.create_cache(); + /// let mut cache_rev = dfa_rev.create_cache(); + /// + /// let expected_fwd = HalfMatch::must(0, 9); + /// let expected_rev = HalfMatch::must(0, 3); + /// let got_fwd = dfa_fwd.try_search_fwd(&mut cache_fwd, &input)?.unwrap(); + /// // Here we don't specify the pattern to search for since there's only + /// // one pattern and we're doing a leftmost search. But if this were an + /// // overlapping search, you'd need to specify the pattern that matched + /// // in the forward direction. (Otherwise, you might wind up finding the + /// // starting position of a match of some other pattern.) That in turn + /// // requires building the reverse automaton with starts_for_each_pattern + /// // enabled. + /// let input = input + /// .clone() + /// .range(..got_fwd.offset()) + /// .anchored(Anchored::Yes); + /// let got_rev = dfa_rev.try_search_rev(&mut cache_rev, &input)?.unwrap(); + /// assert_eq!(expected_fwd, got_fwd); + /// assert_eq!(expected_rev, got_rev); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn match_kind(mut self, kind: MatchKind) -> Config { + self.match_kind = Some(kind); + self + } + + /// Set a prefilter to be used whenever a start state is entered. + /// + /// A [`Prefilter`] in this context is meant to accelerate searches by + /// looking for literal prefixes that every match for the corresponding + /// pattern (or patterns) must start with. Once a prefilter produces a + /// match, the underlying search routine continues on to try and confirm + /// the match. + /// + /// Be warned that setting a prefilter does not guarantee that the search + /// will be faster. While it's usually a good bet, if the prefilter + /// produces a lot of false positive candidates (i.e., positions matched + /// by the prefilter but not by the regex), then the overall result can + /// be slower than if you had just executed the regex engine without any + /// prefilters. + /// + /// Note that unless [`Config::specialize_start_states`] has been + /// explicitly set, then setting this will also enable (when `pre` is + /// `Some`) or disable (when `pre` is `None`) start state specialization. + /// This occurs because without start state specialization, a prefilter + /// is likely to be less effective. And without a prefilter, start state + /// specialization is usually pointless. + /// + /// By default no prefilter is set. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{ + /// hybrid::dfa::DFA, + /// util::prefilter::Prefilter, + /// Input, HalfMatch, MatchKind, + /// }; + /// + /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "bar"]); + /// let re = DFA::builder() + /// .configure(DFA::config().prefilter(pre)) + /// .build(r"(foo|bar)[a-z]+")?; + /// let mut cache = re.create_cache(); + /// let input = Input::new("foo1 barfox bar"); + /// assert_eq!( + /// Some(HalfMatch::must(0, 11)), + /// re.try_search_fwd(&mut cache, &input)?, + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Be warned though that an incorrect prefilter can lead to incorrect + /// results! + /// + /// ``` + /// use regex_automata::{ + /// hybrid::dfa::DFA, + /// util::prefilter::Prefilter, + /// Input, HalfMatch, MatchKind, + /// }; + /// + /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "car"]); + /// let re = DFA::builder() + /// .configure(DFA::config().prefilter(pre)) + /// .build(r"(foo|bar)[a-z]+")?; + /// let mut cache = re.create_cache(); + /// let input = Input::new("foo1 barfox bar"); + /// assert_eq!( + /// // No match reported even though there clearly is one! + /// None, + /// re.try_search_fwd(&mut cache, &input)?, + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn prefilter(mut self, pre: Option) -> Config { + self.pre = Some(pre); + if self.specialize_start_states.is_none() { + self.specialize_start_states = + Some(self.get_prefilter().is_some()); + } + self + } + + /// Whether to compile a separate start state for each pattern in the + /// lazy DFA. + /// + /// When enabled, a separate **anchored** start state is added for each + /// pattern in the lazy DFA. When this start state is used, then the DFA + /// will only search for matches for the pattern specified, even if there + /// are other patterns in the DFA. + /// + /// The main downside of this option is that it can potentially increase + /// the size of the DFA and/or increase the time it takes to build the + /// DFA at search time. However, since this is configuration for a lazy + /// DFA, these states aren't actually built unless they're used. Enabling + /// this isn't necessarily free, however, as it may result in higher cache + /// usage. + /// + /// There are a few reasons one might want to enable this (it's disabled + /// by default): + /// + /// 1. When looking for the start of an overlapping match (using a reverse + /// DFA), doing it correctly requires starting the reverse search using the + /// starting state of the pattern that matched in the forward direction. + /// Indeed, when building a [`Regex`](crate::hybrid::regex::Regex), it + /// will automatically enable this option when building the reverse DFA + /// internally. + /// 2. When you want to use a DFA with multiple patterns to both search + /// for matches of any pattern or to search for anchored matches of one + /// particular pattern while using the same DFA. (Otherwise, you would need + /// to compile a new DFA for each pattern.) + /// + /// By default this is disabled. + /// + /// # Example + /// + /// This example shows how to use this option to permit the same lazy DFA + /// to run both general searches for any pattern and anchored searches for + /// a specific pattern. + /// + /// ``` + /// use regex_automata::{ + /// hybrid::dfa::DFA, + /// Anchored, HalfMatch, Input, PatternID, + /// }; + /// + /// let dfa = DFA::builder() + /// .configure(DFA::config().starts_for_each_pattern(true)) + /// .build_many(&[r"[a-z0-9]{6}", r"[a-z][a-z0-9]{5}"])?; + /// let mut cache = dfa.create_cache(); + /// let haystack = "bar foo123"; + /// + /// // Here's a normal unanchored search that looks for any pattern. + /// let expected = HalfMatch::must(0, 10); + /// let input = Input::new(haystack); + /// assert_eq!(Some(expected), dfa.try_search_fwd(&mut cache, &input)?); + /// // We can also do a normal anchored search for any pattern. Since it's + /// // an anchored search, we position the start of the search where we + /// // know the match will begin. + /// let expected = HalfMatch::must(0, 10); + /// let input = Input::new(haystack).range(4..); + /// assert_eq!(Some(expected), dfa.try_search_fwd(&mut cache, &input)?); + /// // Since we compiled anchored start states for each pattern, we can + /// // also look for matches of other patterns explicitly, even if a + /// // different pattern would have normally matched. + /// let expected = HalfMatch::must(1, 10); + /// let input = Input::new(haystack) + /// .range(4..) + /// .anchored(Anchored::Pattern(PatternID::must(1))); + /// assert_eq!(Some(expected), dfa.try_search_fwd(&mut cache, &input)?); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn starts_for_each_pattern(mut self, yes: bool) -> Config { + self.starts_for_each_pattern = Some(yes); + self + } + + /// Whether to attempt to shrink the size of the lazy DFA's alphabet or + /// not. + /// + /// This option is enabled by default and should never be disabled unless + /// one is debugging the lazy DFA. + /// + /// When enabled, the lazy DFA will use a map from all possible bytes + /// to their corresponding equivalence class. Each equivalence class + /// represents a set of bytes that does not discriminate between a match + /// and a non-match in the DFA. For example, the pattern `[ab]+` has at + /// least two equivalence classes: a set containing `a` and `b` and a set + /// containing every byte except for `a` and `b`. `a` and `b` are in the + /// same equivalence classes because they never discriminate between a + /// match and a non-match. + /// + /// The advantage of this map is that the size of the transition table + /// can be reduced drastically from `#states * 256 * sizeof(LazyStateID)` + /// to `#states * k * sizeof(LazyStateID)` where `k` is the number of + /// equivalence classes (rounded up to the nearest power of 2). As a + /// result, total space usage can decrease substantially. Moreover, since a + /// smaller alphabet is used, DFA compilation during search becomes faster + /// as well since it will potentially be able to reuse a single transition + /// for multiple bytes. + /// + /// **WARNING:** This is only useful for debugging lazy DFAs. Disabling + /// this does not yield any speed advantages. Namely, even when this is + /// disabled, a byte class map is still used while searching. The only + /// difference is that every byte will be forced into its own distinct + /// equivalence class. This is useful for debugging the actual generated + /// transitions because it lets one see the transitions defined on actual + /// bytes instead of the equivalence classes. + pub fn byte_classes(mut self, yes: bool) -> Config { + self.byte_classes = Some(yes); + self + } + + /// Heuristically enable Unicode word boundaries. + /// + /// When set, this will attempt to implement Unicode word boundaries as if + /// they were ASCII word boundaries. This only works when the search input + /// is ASCII only. If a non-ASCII byte is observed while searching, then a + /// [`MatchError::quit`] error is returned. + /// + /// A possible alternative to enabling this option is to simply use an + /// ASCII word boundary, e.g., via `(?-u:\b)`. The main reason to use this + /// option is if you absolutely need Unicode support. This option lets one + /// use a fast search implementation (a DFA) for some potentially very + /// common cases, while providing the option to fall back to some other + /// regex engine to handle the general case when an error is returned. + /// + /// If the pattern provided has no Unicode word boundary in it, then this + /// option has no effect. (That is, quitting on a non-ASCII byte only + /// occurs when this option is enabled _and_ a Unicode word boundary is + /// present in the pattern.) + /// + /// This is almost equivalent to setting all non-ASCII bytes to be quit + /// bytes. The only difference is that this will cause non-ASCII bytes to + /// be quit bytes _only_ when a Unicode word boundary is present in the + /// pattern. + /// + /// When enabling this option, callers _must_ be prepared to + /// handle a [`MatchError`] error during search. When using a + /// [`Regex`](crate::hybrid::regex::Regex), this corresponds to using the + /// `try_` suite of methods. Alternatively, if callers can guarantee that + /// their input is ASCII only, then a [`MatchError::quit`] error will never + /// be returned while searching. + /// + /// This is disabled by default. + /// + /// # Example + /// + /// This example shows how to heuristically enable Unicode word boundaries + /// in a pattern. It also shows what happens when a search comes across a + /// non-ASCII byte. + /// + /// ``` + /// use regex_automata::{ + /// hybrid::dfa::DFA, + /// HalfMatch, Input, MatchError, + /// }; + /// + /// let dfa = DFA::builder() + /// .configure(DFA::config().unicode_word_boundary(true)) + /// .build(r"\b[0-9]+\b")?; + /// let mut cache = dfa.create_cache(); + /// + /// // The match occurs before the search ever observes the snowman + /// // character, so no error occurs. + /// let haystack = "foo 123 ☃"; + /// let expected = Some(HalfMatch::must(0, 7)); + /// let got = dfa.try_search_fwd(&mut cache, &Input::new(haystack))?; + /// assert_eq!(expected, got); + /// + /// // Notice that this search fails, even though the snowman character + /// // occurs after the ending match offset. This is because search + /// // routines read one byte past the end of the search to account for + /// // look-around, and indeed, this is required here to determine whether + /// // the trailing \b matches. + /// let haystack = "foo 123 ☃"; + /// let expected = MatchError::quit(0xE2, 8); + /// let got = dfa.try_search_fwd(&mut cache, &Input::new(haystack)); + /// assert_eq!(Err(expected), got); + /// + /// // Another example is executing a search where the span of the haystack + /// // we specify is all ASCII, but there is non-ASCII just before it. This + /// // correctly also reports an error. + /// let input = Input::new("β123").range(2..); + /// let expected = MatchError::quit(0xB2, 1); + /// let got = dfa.try_search_fwd(&mut cache, &input); + /// assert_eq!(Err(expected), got); + /// + /// // And similarly for the trailing word boundary. + /// let input = Input::new("123β").range(..3); + /// let expected = MatchError::quit(0xCE, 3); + /// let got = dfa.try_search_fwd(&mut cache, &input); + /// assert_eq!(Err(expected), got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn unicode_word_boundary(mut self, yes: bool) -> Config { + // We have a separate option for this instead of just setting the + // appropriate quit bytes here because we don't want to set quit bytes + // for every regex. We only want to set them when the regex contains a + // Unicode word boundary. + self.unicode_word_boundary = Some(yes); + self + } + + /// Add a "quit" byte to the lazy DFA. + /// + /// When a quit byte is seen during search time, then search will return a + /// [`MatchError::quit`] error indicating the offset at which the search + /// stopped. + /// + /// A quit byte will always overrule any other aspects of a regex. For + /// example, if the `x` byte is added as a quit byte and the regex `\w` is + /// used, then observing `x` will cause the search to quit immediately + /// despite the fact that `x` is in the `\w` class. + /// + /// This mechanism is primarily useful for heuristically enabling certain + /// features like Unicode word boundaries in a DFA. Namely, if the input + /// to search is ASCII, then a Unicode word boundary can be implemented + /// via an ASCII word boundary with no change in semantics. Thus, a DFA + /// can attempt to match a Unicode word boundary but give up as soon as it + /// observes a non-ASCII byte. Indeed, if callers set all non-ASCII bytes + /// to be quit bytes, then Unicode word boundaries will be permitted when + /// building lazy DFAs. Of course, callers should enable + /// [`Config::unicode_word_boundary`] if they want this behavior instead. + /// (The advantage being that non-ASCII quit bytes will only be added if a + /// Unicode word boundary is in the pattern.) + /// + /// When enabling this option, callers _must_ be prepared to + /// handle a [`MatchError`] error during search. When using a + /// [`Regex`](crate::hybrid::regex::Regex), this corresponds to using the + /// `try_` suite of methods. + /// + /// By default, there are no quit bytes set. + /// + /// # Panics + /// + /// This panics if heuristic Unicode word boundaries are enabled and any + /// non-ASCII byte is removed from the set of quit bytes. Namely, enabling + /// Unicode word boundaries requires setting every non-ASCII byte to a quit + /// byte. So if the caller attempts to undo any of that, then this will + /// panic. + /// + /// # Example + /// + /// This example shows how to cause a search to terminate if it sees a + /// `\n` byte. This could be useful if, for example, you wanted to prevent + /// a user supplied pattern from matching across a line boundary. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{hybrid::dfa::DFA, MatchError, Input}; + /// + /// let dfa = DFA::builder() + /// .configure(DFA::config().quit(b'\n', true)) + /// .build(r"foo\p{any}+bar")?; + /// let mut cache = dfa.create_cache(); + /// + /// let haystack = "foo\nbar"; + /// // Normally this would produce a match, since \p{any} contains '\n'. + /// // But since we instructed the automaton to enter a quit state if a + /// // '\n' is observed, this produces a match error instead. + /// let expected = MatchError::quit(b'\n', 3); + /// let got = dfa.try_search_fwd( + /// &mut cache, + /// &Input::new(haystack), + /// ).unwrap_err(); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn quit(mut self, byte: u8, yes: bool) -> Config { + if self.get_unicode_word_boundary() && !byte.is_ascii() && !yes { + panic!( + "cannot set non-ASCII byte to be non-quit when \ + Unicode word boundaries are enabled" + ); + } + if self.quitset.is_none() { + self.quitset = Some(ByteSet::empty()); + } + if yes { + self.quitset.as_mut().unwrap().add(byte); + } else { + self.quitset.as_mut().unwrap().remove(byte); + } + self + } + + /// Enable specializing start states in the lazy DFA. + /// + /// When start states are specialized, an implementor of a search routine + /// using a lazy DFA can tell when the search has entered a starting state. + /// When start states aren't specialized, then it is impossible to know + /// whether the search has entered a start state. + /// + /// Ideally, this option wouldn't need to exist and we could always + /// specialize start states. The problem is that start states can be quite + /// active. This in turn means that an efficient search routine is likely + /// to ping-pong between a heavily optimized hot loop that handles most + /// states and to a less optimized specialized handling of start states. + /// This causes branches to get heavily mispredicted and overall can + /// materially decrease throughput. Therefore, specializing start states + /// should only be enabled when it is needed. + /// + /// Knowing whether a search is in a start state is typically useful when a + /// prefilter is active for the search. A prefilter is typically only run + /// when in a start state and a prefilter can greatly accelerate a search. + /// Therefore, the possible cost of specializing start states is worth it + /// in this case. Otherwise, if you have no prefilter, there is likely no + /// reason to specialize start states. + /// + /// This is disabled by default, but note that it is automatically + /// enabled (or disabled) if [`Config::prefilter`] is set. Namely, unless + /// `specialize_start_states` has already been set, [`Config::prefilter`] + /// will automatically enable or disable it based on whether a prefilter + /// is present or not, respectively. This is done because a prefilter's + /// effectiveness is rooted in being executed whenever the DFA is in a + /// start state, and that's only possible to do when they are specialized. + /// + /// Note that it is plausibly reasonable to _disable_ this option + /// explicitly while _enabling_ a prefilter. In that case, a prefilter + /// will still be run at the beginning of a search, but never again. This + /// in theory could strike a good balance if you're in a situation where a + /// prefilter is likely to produce many false positive candidates. + /// + /// # Example + /// + /// This example shows how to enable start state specialization and then + /// shows how to check whether a state is a start state or not. + /// + /// ``` + /// use regex_automata::{hybrid::dfa::DFA, MatchError, Input}; + /// + /// let dfa = DFA::builder() + /// .configure(DFA::config().specialize_start_states(true)) + /// .build(r"[a-z]+")?; + /// let mut cache = dfa.create_cache(); + /// + /// let haystack = "123 foobar 4567".as_bytes(); + /// let sid = dfa.start_state_forward(&mut cache, &Input::new(haystack))?; + /// // The ID returned by 'start_state_forward' will always be tagged as + /// // a start state when start state specialization is enabled. + /// assert!(sid.is_tagged()); + /// assert!(sid.is_start()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Compare the above with the default lazy DFA configuration where + /// start states are _not_ specialized. In this case, the start state + /// is not tagged and `sid.is_start()` returns false. + /// + /// ``` + /// use regex_automata::{hybrid::dfa::DFA, MatchError, Input}; + /// + /// let dfa = DFA::new(r"[a-z]+")?; + /// let mut cache = dfa.create_cache(); + /// + /// let haystack = "123 foobar 4567".as_bytes(); + /// let sid = dfa.start_state_forward(&mut cache, &Input::new(haystack))?; + /// // Start states are not tagged in the default configuration! + /// assert!(!sid.is_tagged()); + /// assert!(!sid.is_start()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn specialize_start_states(mut self, yes: bool) -> Config { + self.specialize_start_states = Some(yes); + self + } + + /// Sets the maximum amount of heap memory, in bytes, to allocate to the + /// cache for use during a lazy DFA search. If the lazy DFA would otherwise + /// use more heap memory, then, depending on other configuration knobs, + /// either stop the search and return an error or clear the cache and + /// continue the search. + /// + /// The default cache capacity is some "reasonable" number that will + /// accommodate most regular expressions. You may find that if you need + /// to build a large DFA then it may be necessary to increase the cache + /// capacity. + /// + /// Note that while building a lazy DFA will do a "minimum" check to ensure + /// the capacity is big enough, this is more or less about correctness. + /// If the cache is bigger than the minimum but still "too small," then the + /// lazy DFA could wind up spending a lot of time clearing the cache and + /// recomputing transitions, thus negating the performance benefits of a + /// lazy DFA. Thus, setting the cache capacity is mostly an experimental + /// endeavor. For most common patterns, however, the default should be + /// sufficient. + /// + /// For more details on how the lazy DFA's cache is used, see the + /// documentation for [`Cache`]. + /// + /// # Example + /// + /// This example shows what happens if the configured cache capacity is + /// too small. In such cases, one can override the cache capacity to make + /// it bigger. Alternatively, one might want to use less memory by setting + /// a smaller cache capacity. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; + /// + /// let pattern = r"\p{L}{1000}"; + /// + /// // The default cache capacity is likely too small to deal with regexes + /// // that are very large. Large repetitions of large Unicode character + /// // classes are a common way to make very large regexes. + /// let _ = DFA::new(pattern).unwrap_err(); + /// // Bump up the capacity to something bigger. + /// let dfa = DFA::builder() + /// .configure(DFA::config().cache_capacity(100 * (1<<20))) // 100 MB + /// .build(pattern)?; + /// let mut cache = dfa.create_cache(); + /// + /// let haystack = "ͰͲͶͿΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙ".repeat(50); + /// let expected = Some(HalfMatch::must(0, 2000)); + /// let got = dfa.try_search_fwd(&mut cache, &Input::new(&haystack))?; + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn cache_capacity(mut self, bytes: usize) -> Config { + self.cache_capacity = Some(bytes); + self + } + + /// Configures construction of a lazy DFA to use the minimum cache capacity + /// if the configured capacity is otherwise too small for the provided NFA. + /// + /// This is useful if you never want lazy DFA construction to fail because + /// of a capacity that is too small. + /// + /// In general, this option is typically not a good idea. In particular, + /// while a minimum cache capacity does permit the lazy DFA to function + /// where it otherwise couldn't, it's plausible that it may not function + /// well if it's constantly running out of room. In that case, the speed + /// advantages of the lazy DFA may be negated. On the other hand, the + /// "minimum" cache capacity computed may not be completely accurate and + /// could actually be bigger than what is really necessary. Therefore, it + /// is plausible that using the minimum cache capacity could still result + /// in very good performance. + /// + /// This is disabled by default. + /// + /// # Example + /// + /// This example shows what happens if the configured cache capacity is + /// too small. In such cases, one could override the capacity explicitly. + /// An alternative, demonstrated here, let's us force construction to use + /// the minimum cache capacity if the configured capacity is otherwise + /// too small. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; + /// + /// let pattern = r"\p{L}{1000}"; + /// + /// // The default cache capacity is likely too small to deal with regexes + /// // that are very large. Large repetitions of large Unicode character + /// // classes are a common way to make very large regexes. + /// let _ = DFA::new(pattern).unwrap_err(); + /// // Configure construction such it automatically selects the minimum + /// // cache capacity if it would otherwise be too small. + /// let dfa = DFA::builder() + /// .configure(DFA::config().skip_cache_capacity_check(true)) + /// .build(pattern)?; + /// let mut cache = dfa.create_cache(); + /// + /// let haystack = "ͰͲͶͿΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙ".repeat(50); + /// let expected = Some(HalfMatch::must(0, 2000)); + /// let got = dfa.try_search_fwd(&mut cache, &Input::new(&haystack))?; + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn skip_cache_capacity_check(mut self, yes: bool) -> Config { + self.skip_cache_capacity_check = Some(yes); + self + } + + /// Configure a lazy DFA search to quit after a certain number of cache + /// clearings. + /// + /// When a minimum is set, then a lazy DFA search will *possibly* "give + /// up" after the minimum number of cache clearings has occurred. This is + /// typically useful in scenarios where callers want to detect whether the + /// lazy DFA search is "efficient" or not. If the cache is cleared too many + /// times, this is a good indicator that it is not efficient, and thus, the + /// caller may wish to use some other regex engine. + /// + /// Note that the number of times a cache is cleared is a property of + /// the cache itself. Thus, if a cache is used in a subsequent search + /// with a similarly configured lazy DFA, then it could cause the + /// search to "give up" if the cache needed to be cleared, depending + /// on its internal count and configured minimum. The cache clear + /// count can only be reset to `0` via [`DFA::reset_cache`] (or + /// [`Regex::reset_cache`](crate::hybrid::regex::Regex::reset_cache) if + /// you're using the `Regex` API). + /// + /// By default, no minimum is configured. Thus, a lazy DFA search will + /// never give up due to cache clearings. If you do set this option, you + /// might consider also setting [`Config::minimum_bytes_per_state`] in + /// order for the lazy DFA to take efficiency into account before giving + /// up. + /// + /// # Example + /// + /// This example uses a somewhat pathological configuration to demonstrate + /// the _possible_ behavior of cache clearing and how it might result + /// in a search that returns an error. + /// + /// It is important to note that the precise mechanics of how and when + /// a cache gets cleared is an implementation detail. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{hybrid::dfa::DFA, Input, MatchError, MatchErrorKind}; + /// + /// // This is a carefully chosen regex. The idea is to pick one + /// // that requires some decent number of states (hence the bounded + /// // repetition). But we specifically choose to create a class with an + /// // ASCII letter and a non-ASCII letter so that we can check that no new + /// // states are created once the cache is full. Namely, if we fill up the + /// // cache on a haystack of 'a's, then in order to match one 'β', a new + /// // state will need to be created since a 'β' is encoded with multiple + /// // bytes. Since there's no room for this state, the search should quit + /// // at the very first position. + /// let pattern = r"[aβ]{100}"; + /// let dfa = DFA::builder() + /// .configure( + /// // Configure it so that we have the minimum cache capacity + /// // possible. And that if any clearings occur, the search quits. + /// DFA::config() + /// .skip_cache_capacity_check(true) + /// .cache_capacity(0) + /// .minimum_cache_clear_count(Some(0)), + /// ) + /// .build(pattern)?; + /// let mut cache = dfa.create_cache(); + /// + /// // Our search will give up before reaching the end! + /// let haystack = "a".repeat(101).into_bytes(); + /// let result = dfa.try_search_fwd(&mut cache, &Input::new(&haystack)); + /// assert!(matches!( + /// *result.unwrap_err().kind(), + /// MatchErrorKind::GaveUp { .. }, + /// )); + /// + /// // Now that we know the cache is full, if we search a haystack that we + /// // know will require creating at least one new state, it should not + /// // be able to make much progress. + /// let haystack = "β".repeat(101).into_bytes(); + /// let result = dfa.try_search_fwd(&mut cache, &Input::new(&haystack)); + /// assert!(matches!( + /// *result.unwrap_err().kind(), + /// MatchErrorKind::GaveUp { .. }, + /// )); + /// + /// // If we reset the cache, then we should be able to create more states + /// // and make more progress with searching for betas. + /// cache.reset(&dfa); + /// let haystack = "β".repeat(101).into_bytes(); + /// let result = dfa.try_search_fwd(&mut cache, &Input::new(&haystack)); + /// assert!(matches!( + /// *result.unwrap_err().kind(), + /// MatchErrorKind::GaveUp { .. }, + /// )); + /// + /// // ... switching back to ASCII still makes progress since it just needs + /// // to set transitions on existing states! + /// let haystack = "a".repeat(101).into_bytes(); + /// let result = dfa.try_search_fwd(&mut cache, &Input::new(&haystack)); + /// assert!(matches!( + /// *result.unwrap_err().kind(), + /// MatchErrorKind::GaveUp { .. }, + /// )); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn minimum_cache_clear_count(mut self, min: Option) -> Config { + self.minimum_cache_clear_count = Some(min); + self + } + + /// Configure a lazy DFA search to quit only when its efficiency drops + /// below the given minimum. + /// + /// The efficiency of the cache is determined by the number of DFA states + /// compiled per byte of haystack searched. For example, if the efficiency + /// is 2, then it means the lazy DFA is creating a new DFA state after + /// searching approximately 2 bytes in a haystack. Generally speaking, 2 + /// is quite bad and it's likely that even a slower regex engine like the + /// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) would be faster. + /// + /// This has no effect if [`Config::minimum_cache_clear_count`] is not set. + /// Namely, this option only kicks in when the cache has been cleared more + /// than the minimum number. If no minimum is set, then the cache is simply + /// cleared whenever it fills up and it is impossible for the lazy DFA to + /// quit due to ineffective use of the cache. + /// + /// In general, if one is setting [`Config::minimum_cache_clear_count`], + /// then one should probably also set this knob as well. The reason is + /// that the absolute number of times the cache is cleared is generally + /// not a great predictor of efficiency. For example, if a new DFA state + /// is created for every 1,000 bytes searched, then it wouldn't be hard + /// for the cache to get cleared more than `N` times and then cause the + /// lazy DFA to quit. But a new DFA state every 1,000 bytes is likely quite + /// good from a performance perspective, and it's likely that the lazy + /// DFA should continue searching, even if it requires clearing the cache + /// occasionally. + /// + /// Finally, note that if you're implementing your own lazy DFA search + /// routine and also want this efficiency check to work correctly, then + /// you'll need to use the following routines to record search progress: + /// + /// * Call [`Cache::search_start`] at the beginning of every search. + /// * Call [`Cache::search_update`] whenever [`DFA::next_state`] is + /// called. + /// * Call [`Cache::search_finish`] before completing a search. (It is + /// not strictly necessary to call this when an error is returned, as + /// `Cache::search_start` will automatically finish the previous search + /// for you. But calling it where possible before returning helps improve + /// the accuracy of how many bytes have actually been searched.) + pub fn minimum_bytes_per_state(mut self, min: Option) -> Config { + self.minimum_bytes_per_state = Some(min); + self + } + + /// Returns the match semantics set in this configuration. + pub fn get_match_kind(&self) -> MatchKind { + self.match_kind.unwrap_or(MatchKind::LeftmostFirst) + } + + /// Returns the prefilter set in this configuration, if one at all. + pub fn get_prefilter(&self) -> Option<&Prefilter> { + self.pre.as_ref().unwrap_or(&None).as_ref() + } + + /// Returns whether this configuration has enabled anchored starting states + /// for every pattern in the DFA. + pub fn get_starts_for_each_pattern(&self) -> bool { + self.starts_for_each_pattern.unwrap_or(false) + } + + /// Returns whether this configuration has enabled byte classes or not. + /// This is typically a debugging oriented option, as disabling it confers + /// no speed benefit. + pub fn get_byte_classes(&self) -> bool { + self.byte_classes.unwrap_or(true) + } + + /// Returns whether this configuration has enabled heuristic Unicode word + /// boundary support. When enabled, it is possible for a search to return + /// an error. + pub fn get_unicode_word_boundary(&self) -> bool { + self.unicode_word_boundary.unwrap_or(false) + } + + /// Returns whether this configuration will instruct the lazy DFA to enter + /// a quit state whenever the given byte is seen during a search. When at + /// least one byte has this enabled, it is possible for a search to return + /// an error. + pub fn get_quit(&self, byte: u8) -> bool { + self.quitset.map_or(false, |q| q.contains(byte)) + } + + /// Returns whether this configuration will instruct the lazy DFA to + /// "specialize" start states. When enabled, the lazy DFA will tag start + /// states so that search routines using the lazy DFA can detect when + /// it's in a start state and do some kind of optimization (like run a + /// prefilter). + pub fn get_specialize_start_states(&self) -> bool { + self.specialize_start_states.unwrap_or(false) + } + + /// Returns the cache capacity set on this configuration. + pub fn get_cache_capacity(&self) -> usize { + self.cache_capacity.unwrap_or(2 * (1 << 20)) + } + + /// Returns whether the cache capacity check should be skipped. + pub fn get_skip_cache_capacity_check(&self) -> bool { + self.skip_cache_capacity_check.unwrap_or(false) + } + + /// Returns, if set, the minimum number of times the cache must be cleared + /// before a lazy DFA search can give up. When no minimum is set, then a + /// search will never quit and will always clear the cache whenever it + /// fills up. + pub fn get_minimum_cache_clear_count(&self) -> Option { + self.minimum_cache_clear_count.unwrap_or(None) + } + + /// Returns, if set, the minimum number of bytes per state that need to be + /// processed in order for the lazy DFA to keep going. If the minimum falls + /// below this number (and the cache has been cleared a minimum number of + /// times), then the lazy DFA will return a "gave up" error. + pub fn get_minimum_bytes_per_state(&self) -> Option { + self.minimum_bytes_per_state.unwrap_or(None) + } + + /// Returns the minimum lazy DFA cache capacity required for the given NFA. + /// + /// The cache capacity required for a particular NFA may change without + /// notice. Callers should not rely on it being stable. + /// + /// This is useful for informational purposes, but can also be useful for + /// other reasons. For example, if one wants to check the minimum cache + /// capacity themselves or if one wants to set the capacity based on the + /// minimum. + /// + /// This may return an error if this configuration does not support all of + /// the instructions used in the given NFA. For example, if the NFA has a + /// Unicode word boundary but this configuration does not enable heuristic + /// support for Unicode word boundaries. + pub fn get_minimum_cache_capacity( + &self, + nfa: &thompson::NFA, + ) -> Result { + let quitset = self.quit_set_from_nfa(nfa)?; + let classes = self.byte_classes_from_nfa(nfa, &quitset); + let starts = self.get_starts_for_each_pattern(); + Ok(minimum_cache_capacity(nfa, &classes, starts)) + } + + /// Returns the byte class map used during search from the given NFA. + /// + /// If byte classes are disabled on this configuration, then a map is + /// returned that puts each byte in its own equivalent class. + fn byte_classes_from_nfa( + &self, + nfa: &thompson::NFA, + quit: &ByteSet, + ) -> ByteClasses { + if !self.get_byte_classes() { + // The lazy DFA will always use the equivalence class map, but + // enabling this option is useful for debugging. Namely, this will + // cause all transitions to be defined over their actual bytes + // instead of an opaque equivalence class identifier. The former is + // much easier to grok as a human. + ByteClasses::singletons() + } else { + let mut set = nfa.byte_class_set().clone(); + // It is important to distinguish any "quit" bytes from all other + // bytes. Otherwise, a non-quit byte may end up in the same class + // as a quit byte, and thus cause the DFA stop when it shouldn't. + // + // Test case: + // + // regex-cli find match hybrid --unicode-word-boundary \ + // -p '^#' -p '\b10\.55\.182\.100\b' -y @conn.json.1000x.log + if !quit.is_empty() { + set.add_set(&quit); + } + set.byte_classes() + } + } + + /// Return the quit set for this configuration and the given NFA. + /// + /// This may return an error if the NFA is incompatible with this + /// configuration's quit set. For example, if the NFA has a Unicode word + /// boundary and the quit set doesn't include non-ASCII bytes. + fn quit_set_from_nfa( + &self, + nfa: &thompson::NFA, + ) -> Result { + let mut quit = self.quitset.unwrap_or(ByteSet::empty()); + if nfa.look_set_any().contains_word_unicode() { + if self.get_unicode_word_boundary() { + for b in 0x80..=0xFF { + quit.add(b); + } + } else { + // If heuristic support for Unicode word boundaries wasn't + // enabled, then we can still check if our quit set is correct. + // If the caller set their quit bytes in a way that causes the + // DFA to quit on at least all non-ASCII bytes, then that's all + // we need for heuristic support to work. + if !quit.contains_range(0x80, 0xFF) { + return Err( + BuildError::unsupported_dfa_word_boundary_unicode(), + ); + } + } + } + Ok(quit) + } + + /// Overwrite the default configuration such that the options in `o` are + /// always used. If an option in `o` is not set, then the corresponding + /// option in `self` is used. If it's not set in `self` either, then it + /// remains not set. + fn overwrite(&self, o: Config) -> Config { + Config { + match_kind: o.match_kind.or(self.match_kind), + pre: o.pre.or_else(|| self.pre.clone()), + starts_for_each_pattern: o + .starts_for_each_pattern + .or(self.starts_for_each_pattern), + byte_classes: o.byte_classes.or(self.byte_classes), + unicode_word_boundary: o + .unicode_word_boundary + .or(self.unicode_word_boundary), + quitset: o.quitset.or(self.quitset), + specialize_start_states: o + .specialize_start_states + .or(self.specialize_start_states), + cache_capacity: o.cache_capacity.or(self.cache_capacity), + skip_cache_capacity_check: o + .skip_cache_capacity_check + .or(self.skip_cache_capacity_check), + minimum_cache_clear_count: o + .minimum_cache_clear_count + .or(self.minimum_cache_clear_count), + minimum_bytes_per_state: o + .minimum_bytes_per_state + .or(self.minimum_bytes_per_state), + } + } +} + +/// A builder for constructing a lazy deterministic finite automaton from +/// regular expressions. +/// +/// As a convenience, [`DFA::builder`] is an alias for [`Builder::new`]. The +/// advantage of the former is that it often lets you avoid importing the +/// `Builder` type directly. +/// +/// This builder provides two main things: +/// +/// 1. It provides a few different `build` routines for actually constructing +/// a DFA from different kinds of inputs. The most convenient is +/// [`Builder::build`], which builds a DFA directly from a pattern string. The +/// most flexible is [`Builder::build_from_nfa`], which builds a DFA straight +/// from an NFA. +/// 2. The builder permits configuring a number of things. +/// [`Builder::configure`] is used with [`Config`] to configure aspects of +/// the DFA and the construction process itself. [`Builder::syntax`] and +/// [`Builder::thompson`] permit configuring the regex parser and Thompson NFA +/// construction, respectively. The syntax and thompson configurations only +/// apply when building from a pattern string. +/// +/// This builder always constructs a *single* lazy DFA. As such, this builder +/// can only be used to construct regexes that either detect the presence +/// of a match or find the end location of a match. A single DFA cannot +/// produce both the start and end of a match. For that information, use a +/// [`Regex`](crate::hybrid::regex::Regex), which can be similarly configured +/// using [`regex::Builder`](crate::hybrid::regex::Builder). The main reason +/// to use a DFA directly is if the end location of a match is enough for your +/// use case. Namely, a `Regex` will construct two lazy DFAs instead of one, +/// since a second reverse DFA is needed to find the start of a match. +/// +/// # Example +/// +/// This example shows how to build a lazy DFA that uses a tiny cache capacity +/// and completely disables Unicode. That is: +/// +/// * Things such as `\w`, `.` and `\b` are no longer Unicode-aware. `\w` +/// and `\b` are ASCII-only while `.` matches any byte except for `\n` +/// (instead of any UTF-8 encoding of a Unicode scalar value except for +/// `\n`). Things that are Unicode only, such as `\pL`, are not allowed. +/// * The pattern itself is permitted to match invalid UTF-8. For example, +/// things like `[^a]` that match any byte except for `a` are permitted. +/// +/// ``` +/// use regex_automata::{ +/// hybrid::dfa::DFA, +/// nfa::thompson, +/// util::syntax, +/// HalfMatch, Input, +/// }; +/// +/// let dfa = DFA::builder() +/// .configure(DFA::config().cache_capacity(5_000)) +/// .thompson(thompson::Config::new().utf8(false)) +/// .syntax(syntax::Config::new().unicode(false).utf8(false)) +/// .build(r"foo[^b]ar.*")?; +/// let mut cache = dfa.create_cache(); +/// +/// let haystack = b"\xFEfoo\xFFar\xE2\x98\xFF\n"; +/// let expected = Some(HalfMatch::must(0, 10)); +/// let got = dfa.try_search_fwd(&mut cache, &Input::new(haystack))?; +/// assert_eq!(expected, got); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct Builder { + config: Config, + #[cfg(feature = "syntax")] + thompson: thompson::Compiler, +} + +impl Builder { + /// Create a new lazy DFA builder with the default configuration. + pub fn new() -> Builder { + Builder { + config: Config::default(), + #[cfg(feature = "syntax")] + thompson: thompson::Compiler::new(), + } + } + + /// Build a lazy DFA from the given pattern. + /// + /// If there was a problem parsing or compiling the pattern, then an error + /// is returned. + #[cfg(feature = "syntax")] + pub fn build(&self, pattern: &str) -> Result { + self.build_many(&[pattern]) + } + + /// Build a lazy DFA from the given patterns. + /// + /// When matches are returned, the pattern ID corresponds to the index of + /// the pattern in the slice given. + #[cfg(feature = "syntax")] + pub fn build_many>( + &self, + patterns: &[P], + ) -> Result { + let nfa = self + .thompson + .clone() + // We can always forcefully disable captures because DFAs do not + // support them. + .configure( + thompson::Config::new() + .which_captures(thompson::WhichCaptures::None), + ) + .build_many(patterns) + .map_err(BuildError::nfa)?; + self.build_from_nfa(nfa) + } + + /// Build a DFA from the given NFA. + /// + /// Note that this requires owning a `thompson::NFA`. While this may force + /// you to clone the NFA, such a clone is not a deep clone. Namely, NFAs + /// are defined internally to support shared ownership such that cloning is + /// very cheap. + /// + /// # Example + /// + /// This example shows how to build a lazy DFA if you already have an NFA + /// in hand. + /// + /// ``` + /// use regex_automata::{ + /// hybrid::dfa::DFA, + /// nfa::thompson, + /// HalfMatch, Input, + /// }; + /// + /// let haystack = "foo123bar"; + /// + /// // This shows how to set non-default options for building an NFA. + /// let nfa = thompson::Compiler::new() + /// .configure(thompson::Config::new().shrink(true)) + /// .build(r"[0-9]+")?; + /// let dfa = DFA::builder().build_from_nfa(nfa)?; + /// let mut cache = dfa.create_cache(); + /// let expected = Some(HalfMatch::must(0, 6)); + /// let got = dfa.try_search_fwd(&mut cache, &Input::new(haystack))?; + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn build_from_nfa( + &self, + nfa: thompson::NFA, + ) -> Result { + let quitset = self.config.quit_set_from_nfa(&nfa)?; + let classes = self.config.byte_classes_from_nfa(&nfa, &quitset); + // Check that we can fit at least a few states into our cache, + // otherwise it's pretty senseless to use the lazy DFA. This does have + // a possible failure mode though. This assumes the maximum size of a + // state in powerset space (so, the total number of NFA states), which + // may never actually materialize, and could be quite a bit larger + // than the actual biggest state. If this turns out to be a problem, + // we could expose a knob that disables this check. But if so, we have + // to be careful not to panic in other areas of the code (the cache + // clearing and init code) that tend to assume some minimum useful + // cache capacity. + let min_cache = minimum_cache_capacity( + &nfa, + &classes, + self.config.get_starts_for_each_pattern(), + ); + let mut cache_capacity = self.config.get_cache_capacity(); + if cache_capacity < min_cache { + // When the caller has asked us to skip the cache capacity check, + // then we simply force the cache capacity to its minimum amount + // and mush on. + if self.config.get_skip_cache_capacity_check() { + debug!( + "given capacity ({cache_capacity}) is too small, \ + since skip_cache_capacity_check is enabled, \ + setting cache capacity to minimum ({min_cache})", + ); + cache_capacity = min_cache; + } else { + return Err(BuildError::insufficient_cache_capacity( + min_cache, + cache_capacity, + )); + } + } + // We also need to check that we can fit at least some small number + // of states in our state ID space. This is unlikely to trigger in + // >=32-bit systems, but 16-bit systems have a pretty small state ID + // space since a number of bits are used up as sentinels. + if let Err(err) = minimum_lazy_state_id(&classes) { + return Err(BuildError::insufficient_state_id_capacity(err)); + } + let stride2 = classes.stride2(); + let start_map = StartByteMap::new(nfa.look_matcher()); + Ok(DFA { + config: self.config.clone(), + nfa, + stride2, + start_map, + classes, + quitset, + cache_capacity, + }) + } + + /// Apply the given lazy DFA configuration options to this builder. + pub fn configure(&mut self, config: Config) -> &mut Builder { + self.config = self.config.overwrite(config); + self + } + + /// Set the syntax configuration for this builder using + /// [`syntax::Config`](crate::util::syntax::Config). + /// + /// This permits setting things like case insensitivity, Unicode and multi + /// line mode. + /// + /// These settings only apply when constructing a lazy DFA directly from a + /// pattern. + #[cfg(feature = "syntax")] + pub fn syntax( + &mut self, + config: crate::util::syntax::Config, + ) -> &mut Builder { + self.thompson.syntax(config); + self + } + + /// Set the Thompson NFA configuration for this builder using + /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). + /// + /// This permits setting things like whether the DFA should match the regex + /// in reverse or if additional time should be spent shrinking the size of + /// the NFA. + /// + /// These settings only apply when constructing a DFA directly from a + /// pattern. + #[cfg(feature = "syntax")] + pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { + self.thompson.configure(config); + self + } +} + +/// Represents the current state of an overlapping search. +/// +/// This is used for overlapping searches since they need to know something +/// about the previous search. For example, when multiple patterns match at the +/// same position, this state tracks the last reported pattern so that the next +/// search knows whether to report another matching pattern or continue with +/// the search at the next position. Additionally, it also tracks which state +/// the last search call terminated in. +/// +/// This type provides little introspection capabilities. The only thing a +/// caller can do is construct it and pass it around to permit search routines +/// to use it to track state, and also ask whether a match has been found. +/// +/// Callers should always provide a fresh state constructed via +/// [`OverlappingState::start`] when starting a new search. Reusing state from +/// a previous search may result in incorrect results. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct OverlappingState { + /// The match reported by the most recent overlapping search to use this + /// state. + /// + /// If a search does not find any matches, then it is expected to clear + /// this value. + pub(crate) mat: Option, + /// The state ID of the state at which the search was in when the call + /// terminated. When this is a match state, `last_match` must be set to a + /// non-None value. + /// + /// A `None` value indicates the start state of the corresponding + /// automaton. We cannot use the actual ID, since any one automaton may + /// have many start states, and which one is in use depends on several + /// search-time factors. + pub(crate) id: Option, + /// The position of the search. + /// + /// When `id` is None (i.e., we are starting a search), this is set to + /// the beginning of the search as given by the caller regardless of its + /// current value. Subsequent calls to an overlapping search pick up at + /// this offset. + pub(crate) at: usize, + /// The index into the matching patterns of the next match to report if the + /// current state is a match state. Note that this may be 1 greater than + /// the total number of matches to report for the current match state. (In + /// which case, no more matches should be reported at the current position + /// and the search should advance to the next position.) + pub(crate) next_match_index: Option, + /// This is set to true when a reverse overlapping search has entered its + /// EOI transitions. + /// + /// This isn't used in a forward search because it knows to stop once the + /// position exceeds the end of the search range. In a reverse search, + /// since we use unsigned offsets, we don't "know" once we've gone past + /// `0`. So the only way to detect it is with this extra flag. The reverse + /// overlapping search knows to terminate specifically after it has + /// reported all matches after following the EOI transition. + pub(crate) rev_eoi: bool, +} + +impl OverlappingState { + /// Create a new overlapping state that begins at the start state of any + /// automaton. + pub fn start() -> OverlappingState { + OverlappingState { + mat: None, + id: None, + at: 0, + next_match_index: None, + rev_eoi: false, + } + } + + /// Return the match result of the most recent search to execute with this + /// state. + /// + /// A searches will clear this result automatically, such that if no + /// match is found, this will correctly report `None`. + pub fn get_match(&self) -> Option { + self.mat + } +} + +/// Runs the given overlapping `search` function (forwards or backwards) until +/// a match is found whose offset does not split a codepoint. +/// +/// This is *not* always correct to call. It should only be called when the +/// underlying NFA has UTF-8 mode enabled *and* it can produce zero-width +/// matches. Calling this when both of those things aren't true might result +/// in legitimate matches getting skipped. +#[cold] +#[inline(never)] +fn skip_empty_utf8_splits_overlapping( + input: &Input<'_>, + state: &mut OverlappingState, + mut search: F, +) -> Result<(), MatchError> +where + F: FnMut(&Input<'_>, &mut OverlappingState) -> Result<(), MatchError>, +{ + // Note that this routine works for forwards and reverse searches + // even though there's no code here to handle those cases. That's + // because overlapping searches drive themselves to completion via + // `OverlappingState`. So all we have to do is push it until no matches are + // found. + + let mut hm = match state.get_match() { + None => return Ok(()), + Some(hm) => hm, + }; + if input.get_anchored().is_anchored() { + if !input.is_char_boundary(hm.offset()) { + state.mat = None; + } + return Ok(()); + } + while !input.is_char_boundary(hm.offset()) { + search(input, state)?; + hm = match state.get_match() { + None => return Ok(()), + Some(hm) => hm, + }; + } + Ok(()) +} + +/// Based on the minimum number of states required for a useful lazy DFA cache, +/// this returns the minimum lazy state ID that must be representable. +/// +/// It's not likely for this to have any impact 32-bit systems (or higher), but +/// on 16-bit systems, the lazy state ID space is quite constrained and thus +/// may be insufficient if our MIN_STATES value is (for some reason) too high. +fn minimum_lazy_state_id( + classes: &ByteClasses, +) -> Result { + let stride = 1 << classes.stride2(); + let min_state_index = MIN_STATES.checked_sub(1).unwrap(); + LazyStateID::new(min_state_index * stride) +} + +/// Based on the minimum number of states required for a useful lazy DFA cache, +/// this returns a heuristic minimum number of bytes of heap space required. +/// +/// This is a "heuristic" because the minimum it returns is likely bigger than +/// the true minimum. Namely, it assumes that each powerset NFA/DFA state uses +/// the maximum number of NFA states (all of them). This is likely bigger +/// than what is required in practice. Computing the true minimum effectively +/// requires determinization, which is probably too much work to do for a +/// simple check like this. +/// +/// One of the issues with this approach IMO is that it requires that this +/// be in sync with the calculation above for computing how much heap memory +/// the DFA cache uses. If we get it wrong, it's possible for example for the +/// minimum to be smaller than the computed heap memory, and thus, it may be +/// the case that we can't add the required minimum number of states. That in +/// turn will make lazy DFA panic because we assume that we can add at least a +/// minimum number of states. +/// +/// Another approach would be to always allow the minimum number of states to +/// be added to the lazy DFA cache, even if it exceeds the configured cache +/// limit. This does mean that the limit isn't really a limit in all cases, +/// which is unfortunate. But it does at least guarantee that the lazy DFA can +/// always make progress, even if it is slow. (This approach is very similar to +/// enabling the 'skip_cache_capacity_check' config knob, except it wouldn't +/// rely on cache size calculation. Instead, it would just always permit a +/// minimum number of states to be added.) +fn minimum_cache_capacity( + nfa: &thompson::NFA, + classes: &ByteClasses, + starts_for_each_pattern: bool, +) -> usize { + const ID_SIZE: usize = size_of::(); + const STATE_SIZE: usize = size_of::(); + + let stride = 1 << classes.stride2(); + let states_len = nfa.states().len(); + let sparses = 2 * states_len * NFAStateID::SIZE; + let trans = MIN_STATES * stride * ID_SIZE; + + let mut starts = Start::len() * ID_SIZE; + if starts_for_each_pattern { + starts += (Start::len() * nfa.pattern_len()) * ID_SIZE; + } + + // The min number of states HAS to be at least 4: we have 3 sentinel states + // and then we need space for one more when we save a state after clearing + // the cache. We also need space for one more, otherwise we get stuck in a + // loop where we try to add a 5th state, which gets rejected, which clears + // the cache, which adds back a saved state (4th total state) which then + // tries to add the 5th state again. + assert!(MIN_STATES >= 5, "minimum number of states has to be at least 5"); + // The minimum number of non-sentinel states. We consider this separately + // because sentinel states are much smaller in that they contain no NFA + // states. Given our aggressive calculation here, it's worth being more + // precise with the number of states we need. + let non_sentinel = MIN_STATES.checked_sub(SENTINEL_STATES).unwrap(); + + // Every `State` has 5 bytes for flags, 4 bytes (max) for the number of + // patterns, followed by 32-bit encodings of patterns and then delta + // varint encodings of NFA state IDs. We use the worst case (which isn't + // technically possible) of 5 bytes for each NFA state ID. + // + // HOWEVER, three of the states needed by a lazy DFA are just the sentinel + // unknown, dead and quit states. Those states have a known size and it is + // small. + let dead_state_size = State::dead().memory_usage(); + let max_state_size = 5 + 4 + (nfa.pattern_len() * 4) + (states_len * 5); + let states = (SENTINEL_STATES * (STATE_SIZE + dead_state_size)) + + (non_sentinel * (STATE_SIZE + max_state_size)); + // NOTE: We don't double count heap memory used by State for this map since + // we use reference counting to avoid doubling memory usage. (This tends to + // be where most memory is allocated in the cache.) + let states_to_sid = (MIN_STATES * STATE_SIZE) + (MIN_STATES * ID_SIZE); + let stack = states_len * NFAStateID::SIZE; + let scratch_state_builder = max_state_size; + + trans + + starts + + states + + states_to_sid + + sparses + + stack + + scratch_state_builder +} + +#[cfg(all(test, feature = "syntax"))] +mod tests { + use super::*; + + // Tests that we handle heuristic Unicode word boundary support in reverse + // DFAs in the specific case of contextual searches. + // + // I wrote this test when I discovered a bug in how heuristic word + // boundaries were handled. Namely, that the starting state selection + // didn't consider the DFA's quit byte set when looking at the byte + // immediately before the start of the search (or immediately after the + // end of the search in the case of a reverse search). As a result, it was + // possible for '\bfoo\b' to match 'β123' because the trailing \xB2 byte + // in the 'β' codepoint would be treated as a non-word character. But of + // course, this search should trigger the DFA to quit, since there is a + // non-ASCII byte in consideration. + // + // Thus, I fixed 'start_state_{forward,reverse}' to check the quit byte set + // if it wasn't empty. The forward case is tested in the doc test for the + // Config::unicode_word_boundary API. We test the reverse case here, which + // is sufficiently niche that it doesn't really belong in a doc test. + #[test] + fn heuristic_unicode_reverse() { + let dfa = DFA::builder() + .configure(DFA::config().unicode_word_boundary(true)) + .thompson(thompson::Config::new().reverse(true)) + .build(r"\b[0-9]+\b") + .unwrap(); + let mut cache = dfa.create_cache(); + + let input = Input::new("β123").range(2..); + let expected = MatchError::quit(0xB2, 1); + let got = dfa.try_search_rev(&mut cache, &input); + assert_eq!(Err(expected), got); + + let input = Input::new("123β").range(..3); + let expected = MatchError::quit(0xCE, 3); + let got = dfa.try_search_rev(&mut cache, &input); + assert_eq!(Err(expected), got); + } +} diff --git a/vendor/regex-automata/src/hybrid/error.rs b/vendor/regex-automata/src/hybrid/error.rs new file mode 100644 index 00000000000000..93e58dd54f22b6 --- /dev/null +++ b/vendor/regex-automata/src/hybrid/error.rs @@ -0,0 +1,241 @@ +use crate::{hybrid::id::LazyStateIDError, nfa, util::search::Anchored}; + +/// An error that occurs when initial construction of a lazy DFA fails. +/// +/// A build error can occur when insufficient cache capacity is configured or +/// if something about the NFA is unsupported. (For example, if one attempts +/// to build a lazy DFA without heuristic Unicode support but with an NFA that +/// contains a Unicode word boundary.) +/// +/// This error does not provide many introspection capabilities. There are +/// generally only two things you can do with it: +/// +/// * Obtain a human readable message via its `std::fmt::Display` impl. +/// * Access an underlying +/// [`nfa::thompson::BuildError`](crate::nfa::thompson::BuildError) +/// type from its `source` method via the `std::error::Error` trait. This error +/// only occurs when using convenience routines for building a lazy DFA +/// directly from a pattern string. +/// +/// When the `std` feature is enabled, this implements the `std::error::Error` +/// trait. +#[derive(Clone, Debug)] +pub struct BuildError { + kind: BuildErrorKind, +} + +#[derive(Clone, Debug)] +enum BuildErrorKind { + NFA(nfa::thompson::BuildError), + InsufficientCacheCapacity { minimum: usize, given: usize }, + InsufficientStateIDCapacity { err: LazyStateIDError }, + Unsupported(&'static str), +} + +impl BuildError { + pub(crate) fn nfa(err: nfa::thompson::BuildError) -> BuildError { + BuildError { kind: BuildErrorKind::NFA(err) } + } + + pub(crate) fn insufficient_cache_capacity( + minimum: usize, + given: usize, + ) -> BuildError { + BuildError { + kind: BuildErrorKind::InsufficientCacheCapacity { minimum, given }, + } + } + + pub(crate) fn insufficient_state_id_capacity( + err: LazyStateIDError, + ) -> BuildError { + BuildError { + kind: BuildErrorKind::InsufficientStateIDCapacity { err }, + } + } + + pub(crate) fn unsupported_dfa_word_boundary_unicode() -> BuildError { + let msg = "cannot build lazy DFAs for regexes with Unicode word \ + boundaries; switch to ASCII word boundaries, or \ + heuristically enable Unicode word boundaries or use a \ + different regex engine"; + BuildError { kind: BuildErrorKind::Unsupported(msg) } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for BuildError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self.kind { + BuildErrorKind::NFA(ref err) => Some(err), + _ => None, + } + } +} + +impl core::fmt::Display for BuildError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self.kind { + BuildErrorKind::NFA(_) => write!(f, "error building NFA"), + BuildErrorKind::InsufficientCacheCapacity { minimum, given } => { + write!( + f, + "given cache capacity ({given}) is smaller than \ + minimum required ({minimum})", + ) + } + BuildErrorKind::InsufficientStateIDCapacity { ref err } => { + err.fmt(f) + } + BuildErrorKind::Unsupported(ref msg) => { + write!(f, "unsupported regex feature for DFAs: {msg}") + } + } + } +} + +/// An error that can occur when computing the start state for a search. +/// +/// Computing a start state can fail for a few reasons, either +/// based on incorrect configuration or even based on whether +/// the look-behind byte triggers a quit state. Typically +/// one does not need to handle this error if you're using +/// [`DFA::start_state_forward`](crate::hybrid::dfa::DFA::start_state_forward) +/// (or its reverse counterpart), as that routine automatically converts +/// `StartError` to a [`MatchError`](crate::MatchError) for you. +/// +/// This error may be returned by the +/// [`DFA::start_state`](crate::hybrid::dfa::DFA::start_state) routine. +/// +/// This error implements the `std::error::Error` trait when the `std` feature +/// is enabled. +/// +/// This error is marked as non-exhaustive. New variants may be added in a +/// semver compatible release. +#[non_exhaustive] +#[derive(Clone, Debug)] +pub enum StartError { + /// An error that occurs when cache inefficiency has dropped below the + /// configured heuristic thresholds. + Cache { + /// The underlying cache error that occurred. + err: CacheError, + }, + /// An error that occurs when a starting configuration's look-behind byte + /// is in this DFA's quit set. + Quit { + /// The quit byte that was found. + byte: u8, + }, + /// An error that occurs when the caller requests an anchored mode that + /// isn't supported by the DFA. + UnsupportedAnchored { + /// The anchored mode given that is unsupported. + mode: Anchored, + }, +} + +impl StartError { + pub(crate) fn cache(err: CacheError) -> StartError { + StartError::Cache { err } + } + + pub(crate) fn quit(byte: u8) -> StartError { + StartError::Quit { byte } + } + + pub(crate) fn unsupported_anchored(mode: Anchored) -> StartError { + StartError::UnsupportedAnchored { mode } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for StartError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match *self { + StartError::Cache { ref err } => Some(err), + _ => None, + } + } +} + +impl core::fmt::Display for StartError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match *self { + StartError::Cache { .. } => write!( + f, + "error computing start state because of cache inefficiency" + ), + StartError::Quit { byte } => write!( + f, + "error computing start state because the look-behind byte \ + {:?} triggered a quit state", + crate::util::escape::DebugByte(byte), + ), + StartError::UnsupportedAnchored { mode: Anchored::Yes } => { + write!( + f, + "error computing start state because \ + anchored searches are not supported or enabled" + ) + } + StartError::UnsupportedAnchored { mode: Anchored::No } => { + write!( + f, + "error computing start state because \ + unanchored searches are not supported or enabled" + ) + } + StartError::UnsupportedAnchored { + mode: Anchored::Pattern(pid), + } => { + write!( + f, + "error computing start state because \ + anchored searches for a specific pattern ({}) \ + are not supported or enabled", + pid.as_usize(), + ) + } + } + } +} + +/// An error that occurs when cache usage has become inefficient. +/// +/// One of the weaknesses of a lazy DFA is that it may need to clear its +/// cache repeatedly if it's not big enough. If this happens too much, then it +/// can slow searching down significantly. A mitigation to this is to use +/// heuristics to detect whether the cache is being used efficiently or not. +/// If not, then a lazy DFA can return a `CacheError`. +/// +/// The default configuration of a lazy DFA in this crate is +/// set such that a `CacheError` will never occur. Instead, +/// callers must opt into this behavior with settings like +/// [`dfa::Config::minimum_cache_clear_count`](crate::hybrid::dfa::Config::minimum_cache_clear_count) +/// and +/// [`dfa::Config::minimum_bytes_per_state`](crate::hybrid::dfa::Config::minimum_bytes_per_state). +/// +/// When the `std` feature is enabled, this implements the `std::error::Error` +/// trait. +#[derive(Clone, Debug)] +pub struct CacheError(()); + +impl CacheError { + pub(crate) fn too_many_cache_clears() -> CacheError { + CacheError(()) + } + + pub(crate) fn bad_efficiency() -> CacheError { + CacheError(()) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for CacheError {} + +impl core::fmt::Display for CacheError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "lazy DFA cache has been cleared too many times") + } +} diff --git a/vendor/regex-automata/src/hybrid/id.rs b/vendor/regex-automata/src/hybrid/id.rs new file mode 100644 index 00000000000000..65d8528e71734a --- /dev/null +++ b/vendor/regex-automata/src/hybrid/id.rs @@ -0,0 +1,354 @@ +/// A state identifier specifically tailored for lazy DFAs. +/// +/// A lazy state ID logically represents a pointer to a DFA state. In practice, +/// by limiting the number of DFA states it can address, it reserves some +/// bits of its representation to encode some additional information. That +/// additional information is called a "tag." That tag is used to record +/// whether the state it points to is an unknown, dead, quit, start or match +/// state. +/// +/// When implementing a low level search routine with a lazy DFA, it is +/// necessary to query the type of the current state to know what to do: +/// +/// * **Unknown** - The state has not yet been computed. The +/// parameters used to get this state ID must be re-passed to +/// [`DFA::next_state`](crate::hybrid::dfa::DFA::next_state), which will never +/// return an unknown state ID. +/// * **Dead** - A dead state only has transitions to itself. It indicates that +/// the search cannot do anything else and should stop with whatever result it +/// has. +/// * **Quit** - A quit state indicates that the automaton could not answer +/// whether a match exists or not. Correct search implementations must return a +/// [`MatchError::quit`](crate::MatchError::quit) when a DFA enters a quit +/// state. +/// * **Start** - A start state is a state in which a search can begin. +/// Lazy DFAs usually have more than one start state. Branching on +/// this isn't required for correctness, but a common optimization is +/// to run a prefilter when a search enters a start state. Note that +/// start states are *not* tagged automatically, and one must enable the +/// [`Config::specialize_start_states`](crate::hybrid::dfa::Config::specialize_start_states) +/// setting for start states to be tagged. The reason for this is +/// that a DFA search loop is usually written to execute a prefilter once it +/// enters a start state. But if there is no prefilter, this handling can be +/// quite disastrous as the DFA may ping-pong between the special handling code +/// and a possible optimized hot path for handling untagged states. When start +/// states aren't specialized, then they are untagged and remain in the hot +/// path. +/// * **Match** - A match state indicates that a match has been found. +/// Depending on the semantics of your search implementation, it may either +/// continue until the end of the haystack or a dead state, or it might quit +/// and return the match immediately. +/// +/// As an optimization, the [`is_tagged`](LazyStateID::is_tagged) predicate +/// can be used to determine if a tag exists at all. This is useful to avoid +/// branching on all of the above types for every byte searched. +/// +/// # Example +/// +/// This example shows how `LazyStateID` can be used to implement a correct +/// search routine with minimal branching. In particular, this search routine +/// implements "leftmost" matching, which means that it doesn't immediately +/// stop once a match is found. Instead, it continues until it reaches a dead +/// state. +/// +/// Notice also how a correct search implementation deals with +/// [`CacheError`](crate::hybrid::CacheError)s returned by some of +/// the lazy DFA routines. When a `CacheError` occurs, it returns +/// [`MatchError::gave_up`](crate::MatchError::gave_up). +/// +/// ``` +/// use regex_automata::{ +/// hybrid::dfa::{Cache, DFA}, +/// HalfMatch, MatchError, Input, +/// }; +/// +/// fn find_leftmost_first( +/// dfa: &DFA, +/// cache: &mut Cache, +/// haystack: &[u8], +/// ) -> Result, MatchError> { +/// // The start state is determined by inspecting the position and the +/// // initial bytes of the haystack. Note that start states can never +/// // be match states (since DFAs in this crate delay matches by 1 +/// // byte), so we don't need to check if the start state is a match. +/// let mut sid = dfa.start_state_forward( +/// cache, +/// &Input::new(haystack), +/// )?; +/// let mut last_match = None; +/// // Walk all the bytes in the haystack. We can quit early if we see +/// // a dead or a quit state. The former means the automaton will +/// // never transition to any other state. The latter means that the +/// // automaton entered a condition in which its search failed. +/// for (i, &b) in haystack.iter().enumerate() { +/// sid = dfa +/// .next_state(cache, sid, b) +/// .map_err(|_| MatchError::gave_up(i))?; +/// if sid.is_tagged() { +/// if sid.is_match() { +/// last_match = Some(HalfMatch::new( +/// dfa.match_pattern(cache, sid, 0), +/// i, +/// )); +/// } else if sid.is_dead() { +/// return Ok(last_match); +/// } else if sid.is_quit() { +/// // It is possible to enter into a quit state after +/// // observing a match has occurred. In that case, we +/// // should return the match instead of an error. +/// if last_match.is_some() { +/// return Ok(last_match); +/// } +/// return Err(MatchError::quit(b, i)); +/// } +/// // Implementors may also want to check for start states and +/// // handle them differently for performance reasons. But it is +/// // not necessary for correctness. Note that in order to check +/// // for start states, you'll need to enable the +/// // 'specialize_start_states' config knob, otherwise start +/// // states will not be tagged. +/// } +/// } +/// // Matches are always delayed by 1 byte, so we must explicitly walk +/// // the special "EOI" transition at the end of the search. +/// sid = dfa +/// .next_eoi_state(cache, sid) +/// .map_err(|_| MatchError::gave_up(haystack.len()))?; +/// if sid.is_match() { +/// last_match = Some(HalfMatch::new( +/// dfa.match_pattern(cache, sid, 0), +/// haystack.len(), +/// )); +/// } +/// Ok(last_match) +/// } +/// +/// // We use a greedy '+' operator to show how the search doesn't just stop +/// // once a match is detected. It continues extending the match. Using +/// // '[a-z]+?' would also work as expected and stop the search early. +/// // Greediness is built into the automaton. +/// let dfa = DFA::new(r"[a-z]+")?; +/// let mut cache = dfa.create_cache(); +/// let haystack = "123 foobar 4567".as_bytes(); +/// let mat = find_leftmost_first(&dfa, &mut cache, haystack)?.unwrap(); +/// assert_eq!(mat.pattern().as_usize(), 0); +/// assert_eq!(mat.offset(), 10); +/// +/// // Here's another example that tests our handling of the special +/// // EOI transition. This will fail to find a match if we don't call +/// // 'next_eoi_state' at the end of the search since the match isn't found +/// // until the final byte in the haystack. +/// let dfa = DFA::new(r"[0-9]{4}")?; +/// let mut cache = dfa.create_cache(); +/// let haystack = "123 foobar 4567".as_bytes(); +/// let mat = find_leftmost_first(&dfa, &mut cache, haystack)?.unwrap(); +/// assert_eq!(mat.pattern().as_usize(), 0); +/// assert_eq!(mat.offset(), 15); +/// +/// // And note that our search implementation above automatically works +/// // with multi-DFAs. Namely, `dfa.match_pattern(match_state, 0)` selects +/// // the appropriate pattern ID for us. +/// let dfa = DFA::new_many(&[r"[a-z]+", r"[0-9]+"])?; +/// let mut cache = dfa.create_cache(); +/// let haystack = "123 foobar 4567".as_bytes(); +/// let mat = find_leftmost_first(&dfa, &mut cache, haystack)?.unwrap(); +/// assert_eq!(mat.pattern().as_usize(), 1); +/// assert_eq!(mat.offset(), 3); +/// let mat = find_leftmost_first(&dfa, &mut cache, &haystack[3..])?.unwrap(); +/// assert_eq!(mat.pattern().as_usize(), 0); +/// assert_eq!(mat.offset(), 7); +/// let mat = find_leftmost_first(&dfa, &mut cache, &haystack[10..])?.unwrap(); +/// assert_eq!(mat.pattern().as_usize(), 1); +/// assert_eq!(mat.offset(), 5); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive( + Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord, +)] +pub struct LazyStateID(u32); + +impl LazyStateID { + #[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] + const MAX_BIT: usize = 31; + + #[cfg(target_pointer_width = "16")] + const MAX_BIT: usize = 15; + + const MASK_UNKNOWN: usize = 1 << (LazyStateID::MAX_BIT); + const MASK_DEAD: usize = 1 << (LazyStateID::MAX_BIT - 1); + const MASK_QUIT: usize = 1 << (LazyStateID::MAX_BIT - 2); + const MASK_START: usize = 1 << (LazyStateID::MAX_BIT - 3); + const MASK_MATCH: usize = 1 << (LazyStateID::MAX_BIT - 4); + pub(crate) const MAX: usize = LazyStateID::MASK_MATCH - 1; + + /// Create a new lazy state ID. + /// + /// If the given identifier exceeds [`LazyStateID::MAX`], then this returns + /// an error. + #[inline] + pub(crate) fn new(id: usize) -> Result { + if id > LazyStateID::MAX { + let attempted = u64::try_from(id).unwrap(); + return Err(LazyStateIDError { attempted }); + } + Ok(LazyStateID::new_unchecked(id)) + } + + /// Create a new lazy state ID without checking whether the given value + /// exceeds [`LazyStateID::MAX`]. + /// + /// While this is unchecked, providing an incorrect value must never + /// sacrifice memory safety. + #[inline] + const fn new_unchecked(id: usize) -> LazyStateID { + // FIXME: Use as_u32() once const functions in traits are stable. + LazyStateID(id as u32) + } + + /// Return this lazy state ID as an untagged `usize`. + /// + /// If this lazy state ID is tagged, then the usize returned is the state + /// ID without the tag. If the ID was not tagged, then the usize returned + /// is equivalent to the state ID. + #[inline] + pub(crate) fn as_usize_untagged(&self) -> usize { + self.as_usize_unchecked() & LazyStateID::MAX + } + + /// Return this lazy state ID as its raw internal `usize` value, which may + /// be tagged (and thus greater than LazyStateID::MAX). + #[inline] + pub(crate) const fn as_usize_unchecked(&self) -> usize { + // FIXME: Use as_usize() once const functions in traits are stable. + self.0 as usize + } + + #[inline] + pub(crate) const fn to_unknown(&self) -> LazyStateID { + LazyStateID::new_unchecked( + self.as_usize_unchecked() | LazyStateID::MASK_UNKNOWN, + ) + } + + #[inline] + pub(crate) const fn to_dead(&self) -> LazyStateID { + LazyStateID::new_unchecked( + self.as_usize_unchecked() | LazyStateID::MASK_DEAD, + ) + } + + #[inline] + pub(crate) const fn to_quit(&self) -> LazyStateID { + LazyStateID::new_unchecked( + self.as_usize_unchecked() | LazyStateID::MASK_QUIT, + ) + } + + /// Return this lazy state ID as a state ID that is tagged as a start + /// state. + #[inline] + pub(crate) const fn to_start(&self) -> LazyStateID { + LazyStateID::new_unchecked( + self.as_usize_unchecked() | LazyStateID::MASK_START, + ) + } + + /// Return this lazy state ID as a lazy state ID that is tagged as a match + /// state. + #[inline] + pub(crate) const fn to_match(&self) -> LazyStateID { + LazyStateID::new_unchecked( + self.as_usize_unchecked() | LazyStateID::MASK_MATCH, + ) + } + + /// Return true if and only if this lazy state ID is tagged. + /// + /// When a lazy state ID is tagged, then one can conclude that it is one + /// of a match, start, dead, quit or unknown state. + #[inline] + pub const fn is_tagged(&self) -> bool { + self.as_usize_unchecked() > LazyStateID::MAX + } + + /// Return true if and only if this represents a lazy state ID that is + /// "unknown." That is, the state has not yet been created. When a caller + /// sees this state ID, it generally means that a state has to be computed + /// in order to proceed. + #[inline] + pub const fn is_unknown(&self) -> bool { + self.as_usize_unchecked() & LazyStateID::MASK_UNKNOWN > 0 + } + + /// Return true if and only if this represents a dead state. A dead state + /// is a state that can never transition to any other state except the + /// dead state. When a dead state is seen, it generally indicates that a + /// search should stop. + #[inline] + pub const fn is_dead(&self) -> bool { + self.as_usize_unchecked() & LazyStateID::MASK_DEAD > 0 + } + + /// Return true if and only if this represents a quit state. A quit state + /// is a state that is representationally equivalent to a dead state, + /// except it indicates the automaton has reached a point at which it can + /// no longer determine whether a match exists or not. In general, this + /// indicates an error during search and the caller must either pass this + /// error up or use a different search technique. + #[inline] + pub const fn is_quit(&self) -> bool { + self.as_usize_unchecked() & LazyStateID::MASK_QUIT > 0 + } + + /// Return true if and only if this lazy state ID has been tagged as a + /// start state. + /// + /// Note that if + /// [`Config::specialize_start_states`](crate::hybrid::dfa::Config) is + /// disabled (which is the default), then this will always return false + /// since start states won't be tagged. + #[inline] + pub const fn is_start(&self) -> bool { + self.as_usize_unchecked() & LazyStateID::MASK_START > 0 + } + + /// Return true if and only if this lazy state ID has been tagged as a + /// match state. + #[inline] + pub const fn is_match(&self) -> bool { + self.as_usize_unchecked() & LazyStateID::MASK_MATCH > 0 + } +} + +/// This error occurs when a lazy state ID could not be constructed. +/// +/// This occurs when given an integer exceeding the maximum lazy state ID +/// value. +/// +/// When the `std` feature is enabled, this implements the `Error` trait. +#[derive(Clone, Debug, Eq, PartialEq)] +pub(crate) struct LazyStateIDError { + attempted: u64, +} + +impl LazyStateIDError { + /// Returns the value that failed to constructed a lazy state ID. + pub(crate) fn attempted(&self) -> u64 { + self.attempted + } +} + +#[cfg(feature = "std")] +impl std::error::Error for LazyStateIDError {} + +impl core::fmt::Display for LazyStateIDError { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!( + f, + "failed to create LazyStateID from {:?}, which exceeds {:?}", + self.attempted(), + LazyStateID::MAX, + ) + } +} diff --git a/vendor/regex-automata/src/hybrid/mod.rs b/vendor/regex-automata/src/hybrid/mod.rs new file mode 100644 index 00000000000000..2feb839d16a69a --- /dev/null +++ b/vendor/regex-automata/src/hybrid/mod.rs @@ -0,0 +1,144 @@ +/*! +A module for building and searching with lazy deterministic finite automata +(DFAs). + +Like other modules in this crate, lazy DFAs support a rich regex syntax with +Unicode features. The key feature of a lazy DFA is that it builds itself +incrementally during search, and never uses more than a configured capacity of +memory. Thus, when searching with a lazy DFA, one must supply a mutable "cache" +in which the actual DFA's transition table is stored. + +If you're looking for fully compiled DFAs, then please see the top-level +[`dfa` module](crate::dfa). + +# Overview + +This section gives a brief overview of the primary types in this module: + +* A [`regex::Regex`] provides a way to search for matches of a regular +expression using lazy DFAs. This includes iterating over matches with both the +start and end positions of each match. +* A [`dfa::DFA`] provides direct low level access to a lazy DFA. + +# Example: basic regex searching + +This example shows how to compile a regex using the default configuration +and then use it to find matches in a byte string: + +``` +use regex_automata::{hybrid::regex::Regex, Match}; + +let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; +let mut cache = re.create_cache(); + +let haystack = "2018-12-24 2016-10-08"; +let matches: Vec = re.find_iter(&mut cache, haystack).collect(); +assert_eq!(matches, vec![ + Match::must(0, 0..10), + Match::must(0, 11..21), +]); +# Ok::<(), Box>(()) +``` + +# Example: searching with multiple regexes + +The lazy DFAs in this module all fully support searching with multiple regexes +simultaneously. You can use this support with standard leftmost-first style +searching to find non-overlapping matches: + +``` +# if cfg!(miri) { return Ok(()); } // miri takes too long +use regex_automata::{hybrid::regex::Regex, Match}; + +let re = Regex::new_many(&[r"\w+", r"\S+"])?; +let mut cache = re.create_cache(); + +let haystack = "@foo bar"; +let matches: Vec = re.find_iter(&mut cache, haystack).collect(); +assert_eq!(matches, vec![ + Match::must(1, 0..4), + Match::must(0, 5..8), +]); +# Ok::<(), Box>(()) +``` + +# When should I use this? + +Generally speaking, if you can abide the use of mutable state during search, +and you don't need things like capturing groups or Unicode word boundary +support in non-ASCII text, then a lazy DFA is likely a robust choice with +respect to both search speed and memory usage. Note however that its speed +may be worse than a general purpose regex engine if you don't select a good +[prefilter](crate::util::prefilter). + +If you know ahead of time that your pattern would result in a very large DFA +if it was fully compiled, it may be better to use an NFA simulation instead +of a lazy DFA. Either that, or increase the cache capacity of your lazy DFA +to something that is big enough to hold the state machine (likely through +experimentation). The issue here is that if the cache is too small, then it +could wind up being reset too frequently and this might decrease searching +speed significantly. + +# Differences with fully compiled DFAs + +A [`hybrid::regex::Regex`](crate::hybrid::regex::Regex) and a +[`dfa::regex::Regex`](crate::dfa::regex::Regex) both have the same capabilities +(and similarly for their underlying DFAs), but they achieve them through +different means. The main difference is that a hybrid or "lazy" regex builds +its DFA lazily during search, where as a fully compiled regex will build its +DFA at construction time. While building a DFA at search time might sound like +it's slow, it tends to work out where most bytes seen during a search will +reuse pre-built parts of the DFA and thus can be almost as fast as a fully +compiled DFA. The main downside is that searching requires mutable space to +store the DFA, and, in the worst case, a search can result in a new state being +created for each byte seen, which would make searching quite a bit slower. + +A fully compiled DFA never has to worry about searches being slower once +it's built. (Aside from, say, the transition table being so large that it +is subject to harsh CPU cache effects.) However, of course, building a full +DFA can be quite time consuming and memory hungry. Particularly when large +Unicode character classes are used, which tend to translate into very large +DFAs. + +A lazy DFA strikes a nice balance _in practice_, particularly in the +presence of Unicode mode, by only building what is needed. It avoids the +worst case exponential time complexity of DFA compilation by guaranteeing that +it will only build at most one state per byte searched. While the worst +case here can lead to a very high constant, it will never be exponential. + +# Syntax + +This module supports the same syntax as the `regex` crate, since they share the +same parser. You can find an exhaustive list of supported syntax in the +[documentation for the `regex` crate](https://docs.rs/regex/1/regex/#syntax). + +There are two things that are not supported by the lazy DFAs in this module: + +* Capturing groups. The DFAs (and [`Regex`](regex::Regex)es built on top +of them) can only find the offsets of an entire match, but cannot resolve +the offsets of each capturing group. This is because DFAs do not have the +expressive power necessary. Note that it is okay to build a lazy DFA from an +NFA that contains capture groups. The capture groups will simply be ignored. +* Unicode word boundaries. These present particularly difficult challenges for +DFA construction and would result in an explosion in the number of states. +One can enable [`dfa::Config::unicode_word_boundary`] though, which provides +heuristic support for Unicode word boundaries that only works on ASCII text. +Otherwise, one can use `(?-u:\b)` for an ASCII word boundary, which will work +on any input. + +There are no plans to lift either of these limitations. + +Note that these restrictions are identical to the restrictions on fully +compiled DFAs. +*/ + +pub use self::{ + error::{BuildError, CacheError, StartError}, + id::LazyStateID, +}; + +pub mod dfa; +mod error; +mod id; +pub mod regex; +mod search; diff --git a/vendor/regex-automata/src/hybrid/regex.rs b/vendor/regex-automata/src/hybrid/regex.rs new file mode 100644 index 00000000000000..b3b1fe317d6775 --- /dev/null +++ b/vendor/regex-automata/src/hybrid/regex.rs @@ -0,0 +1,895 @@ +/*! +A lazy DFA backed `Regex`. + +This module provides a [`Regex`] backed by a lazy DFA. A `Regex` implements +convenience routines you might have come to expect, such as finding a match +and iterating over all non-overlapping matches. This `Regex` type is limited +in its capabilities to what a lazy DFA can provide. Therefore, APIs involving +capturing groups, for example, are not provided. + +Internally, a `Regex` is composed of two DFAs. One is a "forward" DFA that +finds the end offset of a match, where as the other is a "reverse" DFA that +find the start offset of a match. + +See the [parent module](crate::hybrid) for examples. +*/ + +use crate::{ + hybrid::{ + dfa::{self, DFA}, + error::BuildError, + }, + nfa::thompson, + util::{ + iter, + search::{Anchored, Input, Match, MatchError, MatchKind}, + }, +}; + +/// A regular expression that uses hybrid NFA/DFAs (also called "lazy DFAs") +/// for searching. +/// +/// A regular expression is comprised of two lazy DFAs, a "forward" DFA and a +/// "reverse" DFA. The forward DFA is responsible for detecting the end of +/// a match while the reverse DFA is responsible for detecting the start +/// of a match. Thus, in order to find the bounds of any given match, a +/// forward search must first be run followed by a reverse search. A match +/// found by the forward DFA guarantees that the reverse DFA will also find +/// a match. +/// +/// # Fallibility +/// +/// Most of the search routines defined on this type will _panic_ when the +/// underlying search fails. This might be because the DFA gave up because it +/// saw a quit byte, whether configured explicitly or via heuristic Unicode +/// word boundary support, although neither are enabled by default. It might +/// also fail if the underlying DFA determines it isn't making effective use of +/// the cache (which also never happens by default). Or it might fail because +/// an invalid `Input` configuration is given, for example, with an unsupported +/// [`Anchored`] mode. +/// +/// If you need to handle these error cases instead of allowing them to trigger +/// a panic, then the lower level [`Regex::try_search`] provides a fallible API +/// that never panics. +/// +/// # Example +/// +/// This example shows how to cause a search to terminate if it sees a +/// `\n` byte, and handle the error returned. This could be useful if, for +/// example, you wanted to prevent a user supplied pattern from matching +/// across a line boundary. +/// +/// ``` +/// # if cfg!(miri) { return Ok(()); } // miri takes too long +/// use regex_automata::{hybrid::{dfa, regex::Regex}, Input, MatchError}; +/// +/// let re = Regex::builder() +/// .dfa(dfa::Config::new().quit(b'\n', true)) +/// .build(r"foo\p{any}+bar")?; +/// let mut cache = re.create_cache(); +/// +/// let input = Input::new("foo\nbar"); +/// // Normally this would produce a match, since \p{any} contains '\n'. +/// // But since we instructed the automaton to enter a quit state if a +/// // '\n' is observed, this produces a match error instead. +/// let expected = MatchError::quit(b'\n', 3); +/// let got = re.try_search(&mut cache, &input).unwrap_err(); +/// assert_eq!(expected, got); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Debug)] +pub struct Regex { + /// The forward lazy DFA. This can only find the end of a match. + forward: DFA, + /// The reverse lazy DFA. This can only find the start of a match. + /// + /// This is built with 'all' match semantics (instead of leftmost-first) + /// so that it always finds the longest possible match (which corresponds + /// to the leftmost starting position). It is also compiled as an anchored + /// matcher and has 'starts_for_each_pattern' enabled. Including starting + /// states for each pattern is necessary to ensure that we only look for + /// matches of a pattern that matched in the forward direction. Otherwise, + /// we might wind up finding the "leftmost" starting position of a totally + /// different pattern! + reverse: DFA, +} + +/// Convenience routines for regex and cache construction. +impl Regex { + /// Parse the given regular expression using the default configuration and + /// return the corresponding regex. + /// + /// If you want a non-default configuration, then use the [`Builder`] to + /// set your own configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{hybrid::regex::Regex, Match}; + /// + /// let re = Regex::new("foo[0-9]+bar")?; + /// let mut cache = re.create_cache(); + /// assert_eq!( + /// Some(Match::must(0, 3..14)), + /// re.find(&mut cache, "zzzfoo12345barzzz"), + /// ); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn new(pattern: &str) -> Result { + Regex::builder().build(pattern) + } + + /// Like `new`, but parses multiple patterns into a single "multi regex." + /// This similarly uses the default regex configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{hybrid::regex::Regex, Match}; + /// + /// let re = Regex::new_many(&["[a-z]+", "[0-9]+"])?; + /// let mut cache = re.create_cache(); + /// + /// let mut it = re.find_iter(&mut cache, "abc 1 foo 4567 0 quux"); + /// assert_eq!(Some(Match::must(0, 0..3)), it.next()); + /// assert_eq!(Some(Match::must(1, 4..5)), it.next()); + /// assert_eq!(Some(Match::must(0, 6..9)), it.next()); + /// assert_eq!(Some(Match::must(1, 10..14)), it.next()); + /// assert_eq!(Some(Match::must(1, 15..16)), it.next()); + /// assert_eq!(Some(Match::must(0, 17..21)), it.next()); + /// assert_eq!(None, it.next()); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn new_many>( + patterns: &[P], + ) -> Result { + Regex::builder().build_many(patterns) + } + + /// Return a builder for configuring the construction of a `Regex`. + /// + /// This is a convenience routine to avoid needing to import the + /// [`Builder`] type in common cases. + /// + /// # Example + /// + /// This example shows how to use the builder to disable UTF-8 mode + /// everywhere. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// hybrid::regex::Regex, nfa::thompson, util::syntax, Match, + /// }; + /// + /// let re = Regex::builder() + /// .syntax(syntax::Config::new().utf8(false)) + /// .thompson(thompson::Config::new().utf8(false)) + /// .build(r"foo(?-u:[^b])ar.*")?; + /// let mut cache = re.create_cache(); + /// + /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; + /// let expected = Some(Match::must(0, 1..9)); + /// let got = re.find(&mut cache, haystack); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn builder() -> Builder { + Builder::new() + } + + /// Create a new cache for this `Regex`. + /// + /// The cache returned should only be used for searches for this + /// `Regex`. If you want to reuse the cache for another `Regex`, then + /// you must call [`Cache::reset`] with that `Regex` (or, equivalently, + /// [`Regex::reset_cache`]). + pub fn create_cache(&self) -> Cache { + Cache::new(self) + } + + /// Reset the given cache such that it can be used for searching with the + /// this `Regex` (and only this `Regex`). + /// + /// A cache reset permits reusing memory already allocated in this cache + /// with a different `Regex`. + /// + /// Resetting a cache sets its "clear count" to 0. This is relevant if the + /// `Regex` has been configured to "give up" after it has cleared the cache + /// a certain number of times. + /// + /// # Example + /// + /// This shows how to re-purpose a cache for use with a different `Regex`. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{hybrid::regex::Regex, Match}; + /// + /// let re1 = Regex::new(r"\w")?; + /// let re2 = Regex::new(r"\W")?; + /// + /// let mut cache = re1.create_cache(); + /// assert_eq!( + /// Some(Match::must(0, 0..2)), + /// re1.find(&mut cache, "Δ"), + /// ); + /// + /// // Using 'cache' with re2 is not allowed. It may result in panics or + /// // incorrect results. In order to re-purpose the cache, we must reset + /// // it with the Regex we'd like to use it with. + /// // + /// // Similarly, after this reset, using the cache with 're1' is also not + /// // allowed. + /// re2.reset_cache(&mut cache); + /// assert_eq!( + /// Some(Match::must(0, 0..3)), + /// re2.find(&mut cache, "☃"), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn reset_cache(&self, cache: &mut Cache) { + self.forward().reset_cache(&mut cache.forward); + self.reverse().reset_cache(&mut cache.reverse); + } +} + +/// Standard infallible search routines for finding and iterating over matches. +impl Regex { + /// Returns true if and only if this regex matches the given haystack. + /// + /// This routine may short circuit if it knows that scanning future input + /// will never lead to a different result. In particular, if the underlying + /// DFA enters a match state or a dead state, then this routine will return + /// `true` or `false`, respectively, without inspecting any future input. + /// + /// # Panics + /// + /// This routine panics if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the lazy DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the lazy DFA quitting. + /// * The configuration of the lazy DFA may also permit it to "give up" + /// on a search if it makes ineffective use of its transition table + /// cache. The default configuration does not enable this by default, + /// although it is typically a good idea to. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search panics, callers cannot know whether a match exists or + /// not. + /// + /// Use [`Regex::try_search`] if you want to handle these error conditions. + /// + /// # Example + /// + /// ``` + /// use regex_automata::hybrid::regex::Regex; + /// + /// let re = Regex::new("foo[0-9]+bar")?; + /// let mut cache = re.create_cache(); + /// + /// assert!(re.is_match(&mut cache, "foo12345bar")); + /// assert!(!re.is_match(&mut cache, "foobar")); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn is_match<'h, I: Into>>( + &self, + cache: &mut Cache, + input: I, + ) -> bool { + // Not only can we do an "earliest" search, but we can avoid doing a + // reverse scan too. + self.forward() + .try_search_fwd(&mut cache.forward, &input.into().earliest(true)) + .unwrap() + .is_some() + } + + /// Returns the start and end offset of the leftmost match. If no match + /// exists, then `None` is returned. + /// + /// # Panics + /// + /// This routine panics if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the lazy DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the lazy DFA quitting. + /// * The configuration of the lazy DFA may also permit it to "give up" + /// on a search if it makes ineffective use of its transition table + /// cache. The default configuration does not enable this by default, + /// although it is typically a good idea to. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search panics, callers cannot know whether a match exists or + /// not. + /// + /// Use [`Regex::try_search`] if you want to handle these error conditions. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{Match, hybrid::regex::Regex}; + /// + /// let re = Regex::new("foo[0-9]+")?; + /// let mut cache = re.create_cache(); + /// assert_eq!( + /// Some(Match::must(0, 3..11)), + /// re.find(&mut cache, "zzzfoo12345zzz"), + /// ); + /// + /// // Even though a match is found after reading the first byte (`a`), + /// // the default leftmost-first match semantics demand that we find the + /// // earliest match that prefers earlier parts of the pattern over latter + /// // parts. + /// let re = Regex::new("abc|a")?; + /// let mut cache = re.create_cache(); + /// assert_eq!(Some(Match::must(0, 0..3)), re.find(&mut cache, "abc")); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn find<'h, I: Into>>( + &self, + cache: &mut Cache, + input: I, + ) -> Option { + self.try_search(cache, &input.into()).unwrap() + } + + /// Returns an iterator over all non-overlapping leftmost matches in the + /// given bytes. If no match exists, then the iterator yields no elements. + /// + /// # Panics + /// + /// This routine panics if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the lazy DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the lazy DFA quitting. + /// * The configuration of the lazy DFA may also permit it to "give up" + /// on a search if it makes ineffective use of its transition table + /// cache. The default configuration does not enable this by default, + /// although it is typically a good idea to. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search panics, callers cannot know whether a match exists or + /// not. + /// + /// The above conditions also apply to the iterator returned as well. For + /// example, if the lazy DFA gives up or quits during a search using this + /// method, then a panic will occur during iteration. + /// + /// Use [`Regex::try_search`] with [`util::iter::Searcher`](iter::Searcher) + /// if you want to handle these error conditions. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{hybrid::regex::Regex, Match}; + /// + /// let re = Regex::new("foo[0-9]+")?; + /// let mut cache = re.create_cache(); + /// + /// let text = "foo1 foo12 foo123"; + /// let matches: Vec = re.find_iter(&mut cache, text).collect(); + /// assert_eq!(matches, vec![ + /// Match::must(0, 0..4), + /// Match::must(0, 5..10), + /// Match::must(0, 11..17), + /// ]); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn find_iter<'r, 'c, 'h, I: Into>>( + &'r self, + cache: &'c mut Cache, + input: I, + ) -> FindMatches<'r, 'c, 'h> { + let it = iter::Searcher::new(input.into()); + FindMatches { re: self, cache, it } + } +} + +/// Lower level "search" primitives that accept a `&Input` for cheap reuse +/// and return an error if one occurs instead of panicking. +impl Regex { + /// Returns the start and end offset of the leftmost match. If no match + /// exists, then `None` is returned. + /// + /// This is like [`Regex::find`] but with two differences: + /// + /// 1. It is not generic over `Into` and instead accepts a + /// `&Input`. This permits reusing the same `Input` for multiple searches + /// without needing to create a new one. This _may_ help with latency. + /// 2. It returns an error if the search could not complete where as + /// [`Regex::find`] will panic. + /// + /// # Errors + /// + /// This routine errors if the search could not complete. This can occur + /// in a number of circumstances: + /// + /// * The configuration of the lazy DFA may permit it to "quit" the search. + /// For example, setting quit bytes or enabling heuristic support for + /// Unicode word boundaries. The default configuration does not enable any + /// option that could result in the lazy DFA quitting. + /// * The configuration of the lazy DFA may also permit it to "give up" + /// on a search if it makes ineffective use of its transition table + /// cache. The default configuration does not enable this by default, + /// although it is typically a good idea to. + /// * When the provided `Input` configuration is not supported. For + /// example, by providing an unsupported anchor mode. + /// + /// When a search returns an error, callers cannot know whether a match + /// exists or not. + #[inline] + pub fn try_search( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Result, MatchError> { + let (fcache, rcache) = (&mut cache.forward, &mut cache.reverse); + let end = match self.forward().try_search_fwd(fcache, input)? { + None => return Ok(None), + Some(end) => end, + }; + // This special cases an empty match at the beginning of the search. If + // our end matches our start, then since a reverse DFA can't match past + // the start, it must follow that our starting position is also our end + // position. So short circuit and skip the reverse search. + if input.start() == end.offset() { + return Ok(Some(Match::new( + end.pattern(), + end.offset()..end.offset(), + ))); + } + // We can also skip the reverse search if we know our search was + // anchored. This occurs either when the input config is anchored or + // when we know the regex itself is anchored. In this case, we know the + // start of the match, if one is found, must be the start of the + // search. + if self.is_anchored(input) { + return Ok(Some(Match::new( + end.pattern(), + input.start()..end.offset(), + ))); + } + // N.B. I have tentatively convinced myself that it isn't necessary + // to specify the specific pattern for the reverse search since the + // reverse search will always find the same pattern to match as the + // forward search. But I lack a rigorous proof. Why not just provide + // the pattern anyway? Well, if it is needed, then leaving it out + // gives us a chance to find a witness. (Also, if we don't need to + // specify the pattern, then we don't need to build the reverse DFA + // with 'starts_for_each_pattern' enabled. It doesn't matter too much + // for the lazy DFA, but does make the overall DFA bigger.) + // + // We also need to be careful to disable 'earliest' for the reverse + // search, since it could be enabled for the forward search. In the + // reverse case, to satisfy "leftmost" criteria, we need to match as + // much as we can. We also need to be careful to make the search + // anchored. We don't want the reverse search to report any matches + // other than the one beginning at the end of our forward search. + let revsearch = input + .clone() + .span(input.start()..end.offset()) + .anchored(Anchored::Yes) + .earliest(false); + let start = self + .reverse() + .try_search_rev(rcache, &revsearch)? + .expect("reverse search must match if forward search does"); + debug_assert_eq!( + start.pattern(), + end.pattern(), + "forward and reverse search must match same pattern", + ); + debug_assert!(start.offset() <= end.offset()); + Ok(Some(Match::new(end.pattern(), start.offset()..end.offset()))) + } + + /// Returns true if either the given input specifies an anchored search + /// or if the underlying NFA is always anchored. + fn is_anchored(&self, input: &Input<'_>) -> bool { + match input.get_anchored() { + Anchored::No => { + self.forward().get_nfa().is_always_start_anchored() + } + Anchored::Yes | Anchored::Pattern(_) => true, + } + } +} + +/// Non-search APIs for querying information about the regex and setting a +/// prefilter. +impl Regex { + /// Return the underlying lazy DFA responsible for forward matching. + /// + /// This is useful for accessing the underlying lazy DFA and using it + /// directly if the situation calls for it. + pub fn forward(&self) -> &DFA { + &self.forward + } + + /// Return the underlying lazy DFA responsible for reverse matching. + /// + /// This is useful for accessing the underlying lazy DFA and using it + /// directly if the situation calls for it. + pub fn reverse(&self) -> &DFA { + &self.reverse + } + + /// Returns the total number of patterns matched by this regex. + /// + /// # Example + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::hybrid::regex::Regex; + /// + /// let re = Regex::new_many(&[r"[a-z]+", r"[0-9]+", r"\w+"])?; + /// assert_eq!(3, re.pattern_len()); + /// # Ok::<(), Box>(()) + /// ``` + pub fn pattern_len(&self) -> usize { + assert_eq!(self.forward().pattern_len(), self.reverse().pattern_len()); + self.forward().pattern_len() + } +} + +/// An iterator over all non-overlapping matches for an infallible search. +/// +/// The iterator yields a [`Match`] value until no more matches could be found. +/// If the underlying regex engine returns an error, then a panic occurs. +/// +/// The lifetime parameters are as follows: +/// +/// * `'r` represents the lifetime of the regex object. +/// * `'h` represents the lifetime of the haystack being searched. +/// * `'c` represents the lifetime of the regex cache. +/// +/// This iterator can be created with the [`Regex::find_iter`] method. +#[derive(Debug)] +pub struct FindMatches<'r, 'c, 'h> { + re: &'r Regex, + cache: &'c mut Cache, + it: iter::Searcher<'h>, +} + +impl<'r, 'c, 'h> Iterator for FindMatches<'r, 'c, 'h> { + type Item = Match; + + #[inline] + fn next(&mut self) -> Option { + let FindMatches { re, ref mut cache, ref mut it } = *self; + it.advance(|input| re.try_search(cache, input)) + } +} + +/// A cache represents a partially computed forward and reverse DFA. +/// +/// A cache is the key component that differentiates a classical DFA and a +/// hybrid NFA/DFA (also called a "lazy DFA"). Where a classical DFA builds a +/// complete transition table that can handle all possible inputs, a hybrid +/// NFA/DFA starts with an empty transition table and builds only the parts +/// required during search. The parts that are built are stored in a cache. For +/// this reason, a cache is a required parameter for nearly every operation on +/// a [`Regex`]. +/// +/// Caches can be created from their corresponding `Regex` via +/// [`Regex::create_cache`]. A cache can only be used with either the `Regex` +/// that created it, or the `Regex` that was most recently used to reset it +/// with [`Cache::reset`]. Using a cache with any other `Regex` may result in +/// panics or incorrect results. +#[derive(Debug, Clone)] +pub struct Cache { + forward: dfa::Cache, + reverse: dfa::Cache, +} + +impl Cache { + /// Create a new cache for the given `Regex`. + /// + /// The cache returned should only be used for searches for the given + /// `Regex`. If you want to reuse the cache for another `Regex`, then you + /// must call [`Cache::reset`] with that `Regex`. + pub fn new(re: &Regex) -> Cache { + let forward = dfa::Cache::new(re.forward()); + let reverse = dfa::Cache::new(re.reverse()); + Cache { forward, reverse } + } + + /// Reset this cache such that it can be used for searching with the given + /// `Regex` (and only that `Regex`). + /// + /// A cache reset permits reusing memory already allocated in this cache + /// with a different `Regex`. + /// + /// Resetting a cache sets its "clear count" to 0. This is relevant if the + /// `Regex` has been configured to "give up" after it has cleared the cache + /// a certain number of times. + /// + /// # Example + /// + /// This shows how to re-purpose a cache for use with a different `Regex`. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{hybrid::regex::Regex, Match}; + /// + /// let re1 = Regex::new(r"\w")?; + /// let re2 = Regex::new(r"\W")?; + /// + /// let mut cache = re1.create_cache(); + /// assert_eq!( + /// Some(Match::must(0, 0..2)), + /// re1.find(&mut cache, "Δ"), + /// ); + /// + /// // Using 'cache' with re2 is not allowed. It may result in panics or + /// // incorrect results. In order to re-purpose the cache, we must reset + /// // it with the Regex we'd like to use it with. + /// // + /// // Similarly, after this reset, using the cache with 're1' is also not + /// // allowed. + /// cache.reset(&re2); + /// assert_eq!( + /// Some(Match::must(0, 0..3)), + /// re2.find(&mut cache, "☃"), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn reset(&mut self, re: &Regex) { + self.forward.reset(re.forward()); + self.reverse.reset(re.reverse()); + } + + /// Return a reference to the forward cache. + pub fn forward(&mut self) -> &dfa::Cache { + &self.forward + } + + /// Return a reference to the reverse cache. + pub fn reverse(&mut self) -> &dfa::Cache { + &self.reverse + } + + /// Return a mutable reference to the forward cache. + /// + /// If you need mutable references to both the forward and reverse caches, + /// then use [`Cache::as_parts_mut`]. + pub fn forward_mut(&mut self) -> &mut dfa::Cache { + &mut self.forward + } + + /// Return a mutable reference to the reverse cache. + /// + /// If you need mutable references to both the forward and reverse caches, + /// then use [`Cache::as_parts_mut`]. + pub fn reverse_mut(&mut self) -> &mut dfa::Cache { + &mut self.reverse + } + + /// Return references to the forward and reverse caches, respectively. + pub fn as_parts(&self) -> (&dfa::Cache, &dfa::Cache) { + (&self.forward, &self.reverse) + } + + /// Return mutable references to the forward and reverse caches, + /// respectively. + pub fn as_parts_mut(&mut self) -> (&mut dfa::Cache, &mut dfa::Cache) { + (&mut self.forward, &mut self.reverse) + } + + /// Returns the heap memory usage, in bytes, as a sum of the forward and + /// reverse lazy DFA caches. + /// + /// This does **not** include the stack size used up by this cache. To + /// compute that, use `std::mem::size_of::()`. + pub fn memory_usage(&self) -> usize { + self.forward.memory_usage() + self.reverse.memory_usage() + } +} + +/// A builder for a regex based on a hybrid NFA/DFA. +/// +/// This builder permits configuring options for the syntax of a pattern, the +/// NFA construction, the lazy DFA construction and finally the regex searching +/// itself. This builder is different from a general purpose regex builder +/// in that it permits fine grain configuration of the construction process. +/// The trade off for this is complexity, and the possibility of setting a +/// configuration that might not make sense. For example, there are two +/// different UTF-8 modes: +/// +/// * [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) controls +/// whether the pattern itself can contain sub-expressions that match invalid +/// UTF-8. +/// * [`thompson::Config::utf8`] controls how the regex iterators themselves +/// advance the starting position of the next search when a match with zero +/// length is found. +/// +/// Generally speaking, callers will want to either enable all of these or +/// disable all of these. +/// +/// Internally, building a regex requires building two hybrid NFA/DFAs, +/// where one is responsible for finding the end of a match and the other is +/// responsible for finding the start of a match. If you only need to detect +/// whether something matched, or only the end of a match, then you should use +/// a [`dfa::Builder`] to construct a single hybrid NFA/DFA, which is cheaper +/// than building two of them. +/// +/// # Example +/// +/// This example shows how to disable UTF-8 mode in the syntax and the regex +/// itself. This is generally what you want for matching on arbitrary bytes. +/// +/// ``` +/// # if cfg!(miri) { return Ok(()); } // miri takes too long +/// use regex_automata::{ +/// hybrid::regex::Regex, nfa::thompson, util::syntax, Match, +/// }; +/// +/// let re = Regex::builder() +/// .syntax(syntax::Config::new().utf8(false)) +/// .thompson(thompson::Config::new().utf8(false)) +/// .build(r"foo(?-u:[^b])ar.*")?; +/// let mut cache = re.create_cache(); +/// +/// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; +/// let expected = Some(Match::must(0, 1..9)); +/// let got = re.find(&mut cache, haystack); +/// assert_eq!(expected, got); +/// // Notice that `(?-u:[^b])` matches invalid UTF-8, +/// // but the subsequent `.*` does not! Disabling UTF-8 +/// // on the syntax permits this. +/// assert_eq!(b"foo\xFFarzz", &haystack[got.unwrap().range()]); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct Builder { + dfa: dfa::Builder, +} + +impl Builder { + /// Create a new regex builder with the default configuration. + pub fn new() -> Builder { + Builder { dfa: DFA::builder() } + } + + /// Build a regex from the given pattern. + /// + /// If there was a problem parsing or compiling the pattern, then an error + /// is returned. + #[cfg(feature = "syntax")] + pub fn build(&self, pattern: &str) -> Result { + self.build_many(&[pattern]) + } + + /// Build a regex from the given patterns. + #[cfg(feature = "syntax")] + pub fn build_many>( + &self, + patterns: &[P], + ) -> Result { + let forward = self.dfa.build_many(patterns)?; + let reverse = self + .dfa + .clone() + .configure( + DFA::config() + .prefilter(None) + .specialize_start_states(false) + .match_kind(MatchKind::All), + ) + .thompson(thompson::Config::new().reverse(true)) + .build_many(patterns)?; + Ok(self.build_from_dfas(forward, reverse)) + } + + /// Build a regex from its component forward and reverse hybrid NFA/DFAs. + /// + /// This is useful when you've built a forward and reverse lazy DFA + /// separately, and want to combine them into a single regex. Once build, + /// the individual DFAs given can still be accessed via [`Regex::forward`] + /// and [`Regex::reverse`]. + /// + /// It is important that the reverse lazy DFA be compiled under the + /// following conditions: + /// + /// * It should use [`MatchKind::All`] semantics. + /// * It should match in reverse. + /// * Otherwise, its configuration should match the forward DFA. + /// + /// If these conditions aren't satisfied, then the behavior of searches is + /// unspecified. + /// + /// Note that when using this constructor, no configuration is applied. + /// Since this routine provides the DFAs to the builder, there is no + /// opportunity to apply other configuration options. + /// + /// # Example + /// + /// This shows how to build individual lazy forward and reverse DFAs, and + /// then combine them into a single `Regex`. + /// + /// ``` + /// use regex_automata::{ + /// hybrid::{dfa::DFA, regex::Regex}, + /// nfa::thompson, + /// MatchKind, + /// }; + /// + /// let fwd = DFA::new(r"foo[0-9]+")?; + /// let rev = DFA::builder() + /// .configure(DFA::config().match_kind(MatchKind::All)) + /// .thompson(thompson::Config::new().reverse(true)) + /// .build(r"foo[0-9]+")?; + /// + /// let re = Regex::builder().build_from_dfas(fwd, rev); + /// let mut cache = re.create_cache(); + /// assert_eq!(true, re.is_match(&mut cache, "foo123")); + /// # Ok::<(), Box>(()) + /// ``` + pub fn build_from_dfas(&self, forward: DFA, reverse: DFA) -> Regex { + Regex { forward, reverse } + } + + /// Set the syntax configuration for this builder using + /// [`syntax::Config`](crate::util::syntax::Config). + /// + /// This permits setting things like case insensitivity, Unicode and multi + /// line mode. + #[cfg(feature = "syntax")] + pub fn syntax( + &mut self, + config: crate::util::syntax::Config, + ) -> &mut Builder { + self.dfa.syntax(config); + self + } + + /// Set the Thompson NFA configuration for this builder using + /// [`nfa::thompson::Config`](thompson::Config). + /// + /// This permits setting things like whether additional time should be + /// spent shrinking the size of the NFA. + #[cfg(feature = "syntax")] + pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { + self.dfa.thompson(config); + self + } + + /// Set the lazy DFA compilation configuration for this builder using + /// [`dfa::Config`]. + /// + /// This permits setting things like whether Unicode word boundaries should + /// be heuristically supported or settings how the behavior of the cache. + pub fn dfa(&mut self, config: dfa::Config) -> &mut Builder { + self.dfa.configure(config); + self + } +} + +impl Default for Builder { + fn default() -> Builder { + Builder::new() + } +} diff --git a/vendor/regex-automata/src/hybrid/search.rs b/vendor/regex-automata/src/hybrid/search.rs new file mode 100644 index 00000000000000..1f4a505db41784 --- /dev/null +++ b/vendor/regex-automata/src/hybrid/search.rs @@ -0,0 +1,802 @@ +use crate::{ + hybrid::{ + dfa::{Cache, OverlappingState, DFA}, + id::LazyStateID, + }, + util::{ + prefilter::Prefilter, + search::{HalfMatch, Input, MatchError, Span}, + }, +}; + +#[inline(never)] +pub(crate) fn find_fwd( + dfa: &DFA, + cache: &mut Cache, + input: &Input<'_>, +) -> Result, MatchError> { + if input.is_done() { + return Ok(None); + } + let pre = if input.get_anchored().is_anchored() { + None + } else { + dfa.get_config().get_prefilter() + }; + // So what we do here is specialize four different versions of 'find_fwd': + // one for each of the combinations for 'has prefilter' and 'is earliest + // search'. The reason for doing this is that both of these things require + // branches and special handling in some code that can be very hot, + // and shaving off as much as we can when we don't need it tends to be + // beneficial in ad hoc benchmarks. To see these differences, you often + // need a query with a high match count. In other words, specializing these + // four routines *tends* to help latency more than throughput. + if pre.is_some() { + if input.get_earliest() { + find_fwd_imp(dfa, cache, input, pre, true) + } else { + find_fwd_imp(dfa, cache, input, pre, false) + } + } else { + if input.get_earliest() { + find_fwd_imp(dfa, cache, input, None, true) + } else { + find_fwd_imp(dfa, cache, input, None, false) + } + } +} + +#[cfg_attr(feature = "perf-inline", inline(always))] +fn find_fwd_imp( + dfa: &DFA, + cache: &mut Cache, + input: &Input<'_>, + pre: Option<&'_ Prefilter>, + earliest: bool, +) -> Result, MatchError> { + // See 'prefilter_restart' docs for explanation. + let universal_start = dfa.get_nfa().look_set_prefix_any().is_empty(); + let mut mat = None; + let mut sid = init_fwd(dfa, cache, input)?; + let mut at = input.start(); + // This could just be a closure, but then I think it would be unsound + // because it would need to be safe to invoke. This way, the lack of safety + // is clearer in the code below. + macro_rules! next_unchecked { + ($sid:expr, $at:expr) => {{ + let byte = *input.haystack().get_unchecked($at); + dfa.next_state_untagged_unchecked(cache, $sid, byte) + }}; + } + + if let Some(ref pre) = pre { + let span = Span::from(at..input.end()); + match pre.find(input.haystack(), span) { + None => return Ok(mat), + Some(ref span) => { + at = span.start; + if !universal_start { + sid = prefilter_restart(dfa, cache, &input, at)?; + } + } + } + } + cache.search_start(at); + while at < input.end() { + if sid.is_tagged() { + cache.search_update(at); + sid = dfa + .next_state(cache, sid, input.haystack()[at]) + .map_err(|_| gave_up(at))?; + } else { + // SAFETY: There are two safety invariants we need to uphold + // here in the loops below: that 'sid' and 'prev_sid' are valid + // state IDs for this DFA, and that 'at' is a valid index into + // 'haystack'. For the former, we rely on the invariant that + // next_state* and start_state_forward always returns a valid state + // ID (given a valid state ID in the former case), and that we are + // only at this place in the code if 'sid' is untagged. Moreover, + // every call to next_state_untagged_unchecked below is guarded by + // a check that sid is untagged. For the latter safety invariant, + // we always guard unchecked access with a check that 'at' is less + // than 'end', where 'end <= haystack.len()'. In the unrolled loop + // below, we ensure that 'at' is always in bounds. + // + // PERF: For justification of omitting bounds checks, it gives us a + // ~10% bump in search time. This was used for a benchmark: + // + // regex-cli find half hybrid -p '(?m)^.+$' -UBb bigfile + // + // PERF: For justification for the loop unrolling, we use a few + // different tests: + // + // regex-cli find half hybrid -p '\w{50}' -UBb bigfile + // regex-cli find half hybrid -p '(?m)^.+$' -UBb bigfile + // regex-cli find half hybrid -p 'ZQZQZQZQ' -UBb bigfile + // + // And there are three different configurations: + // + // nounroll: this entire 'else' block vanishes and we just + // always use 'dfa.next_state(..)'. + // unroll1: just the outer loop below + // unroll2: just the inner loop below + // unroll3: both the outer and inner loops below + // + // This results in a matrix of timings for each of the above + // regexes with each of the above unrolling configurations: + // + // '\w{50}' '(?m)^.+$' 'ZQZQZQZQ' + // nounroll 1.51s 2.34s 1.51s + // unroll1 1.53s 2.32s 1.56s + // unroll2 2.22s 1.50s 0.61s + // unroll3 1.67s 1.45s 0.61s + // + // Ideally we'd be able to find a configuration that yields the + // best time for all regexes, but alas we settle for unroll3 that + // gives us *almost* the best for '\w{50}' and the best for the + // other two regexes. + // + // So what exactly is going on here? The first unrolling (grouping + // together runs of untagged transitions) specifically targets + // our choice of representation. The second unrolling (grouping + // together runs of self-transitions) specifically targets a common + // DFA topology. Let's dig in a little bit by looking at our + // regexes: + // + // '\w{50}': This regex spends a lot of time outside of the DFA's + // start state matching some part of the '\w' repetition. This + // means that it's a bit of a worst case for loop unrolling that + // targets self-transitions since the self-transitions in '\w{50}' + // are not particularly active for this haystack. However, the + // first unrolling (grouping together untagged transitions) + // does apply quite well here since very few transitions hit + // match/dead/quit/unknown states. It is however worth mentioning + // that if start states are configured to be tagged (which you + // typically want to do if you have a prefilter), then this regex + // actually slows way down because it is constantly ping-ponging + // out of the unrolled loop and into the handling of a tagged start + // state below. But when start states aren't tagged, the unrolled + // loop stays hot. (This is why it's imperative that start state + // tagging be disabled when there isn't a prefilter!) + // + // '(?m)^.+$': There are two important aspects of this regex: 1) + // on this haystack, its match count is very high, much higher + // than the other two regex and 2) it spends the vast majority + // of its time matching '.+'. Since Unicode mode is disabled, + // this corresponds to repeatedly following self transitions for + // the vast majority of the input. This does benefit from the + // untagged unrolling since most of the transitions will be to + // untagged states, but the untagged unrolling does more work than + // what is actually required. Namely, it has to keep track of the + // previous and next state IDs, which I guess requires a bit more + // shuffling. This is supported by the fact that nounroll+unroll1 + // are both slower than unroll2+unroll3, where the latter has a + // loop unrolling that specifically targets self-transitions. + // + // 'ZQZQZQZQ': This one is very similar to '(?m)^.+$' because it + // spends the vast majority of its time in self-transitions for + // the (implicit) unanchored prefix. The main difference with + // '(?m)^.+$' is that it has a much lower match count. So there + // isn't much time spent in the overhead of reporting matches. This + // is the primary explainer in the perf difference here. We include + // this regex and the former to make sure we have comparison points + // with high and low match counts. + // + // NOTE: I used 'OpenSubtitles2018.raw.sample.en' for 'bigfile'. + // + // NOTE: In a follow-up, it turns out that the "inner" loop + // mentioned above was a pretty big pessimization in some other + // cases. Namely, it resulted in too much ping-ponging into and out + // of the loop, which resulted in nearly ~2x regressions in search + // time when compared to the originally lazy DFA in the regex crate. + // So I've removed the second loop unrolling that targets the + // self-transition case. + let mut prev_sid = sid; + while at < input.end() { + prev_sid = unsafe { next_unchecked!(sid, at) }; + if prev_sid.is_tagged() || at + 3 >= input.end() { + core::mem::swap(&mut prev_sid, &mut sid); + break; + } + at += 1; + + sid = unsafe { next_unchecked!(prev_sid, at) }; + if sid.is_tagged() { + break; + } + at += 1; + + prev_sid = unsafe { next_unchecked!(sid, at) }; + if prev_sid.is_tagged() { + core::mem::swap(&mut prev_sid, &mut sid); + break; + } + at += 1; + + sid = unsafe { next_unchecked!(prev_sid, at) }; + if sid.is_tagged() { + break; + } + at += 1; + } + // If we quit out of the code above with an unknown state ID at + // any point, then we need to re-compute that transition using + // 'next_state', which will do NFA powerset construction for us. + if sid.is_unknown() { + cache.search_update(at); + sid = dfa + .next_state(cache, prev_sid, input.haystack()[at]) + .map_err(|_| gave_up(at))?; + } + } + if sid.is_tagged() { + if sid.is_start() { + if let Some(ref pre) = pre { + let span = Span::from(at..input.end()); + match pre.find(input.haystack(), span) { + None => { + cache.search_finish(span.end); + return Ok(mat); + } + Some(ref span) => { + // We want to skip any update to 'at' below + // at the end of this iteration and just + // jump immediately back to the next state + // transition at the leading position of the + // candidate match. + // + // ... but only if we actually made progress + // with our prefilter, otherwise if the start + // state has a self-loop, we can get stuck. + if span.start > at { + at = span.start; + if !universal_start { + sid = prefilter_restart( + dfa, cache, &input, at, + )?; + } + continue; + } + } + } + } + } else if sid.is_match() { + let pattern = dfa.match_pattern(cache, sid, 0); + // Since slice ranges are inclusive at the beginning and + // exclusive at the end, and since forward searches report + // the end, we can return 'at' as-is. This only works because + // matches are delayed by 1 byte. So by the time we observe a + // match, 'at' has already been set to 1 byte past the actual + // match location, which is precisely the exclusive ending + // bound of the match. + mat = Some(HalfMatch::new(pattern, at)); + if earliest { + cache.search_finish(at); + return Ok(mat); + } + } else if sid.is_dead() { + cache.search_finish(at); + return Ok(mat); + } else if sid.is_quit() { + cache.search_finish(at); + return Err(MatchError::quit(input.haystack()[at], at)); + } else { + debug_assert!(sid.is_unknown()); + unreachable!("sid being unknown is a bug"); + } + } + at += 1; + } + eoi_fwd(dfa, cache, input, &mut sid, &mut mat)?; + cache.search_finish(input.end()); + Ok(mat) +} + +#[inline(never)] +pub(crate) fn find_rev( + dfa: &DFA, + cache: &mut Cache, + input: &Input<'_>, +) -> Result, MatchError> { + if input.is_done() { + return Ok(None); + } + if input.get_earliest() { + find_rev_imp(dfa, cache, input, true) + } else { + find_rev_imp(dfa, cache, input, false) + } +} + +#[cfg_attr(feature = "perf-inline", inline(always))] +fn find_rev_imp( + dfa: &DFA, + cache: &mut Cache, + input: &Input<'_>, + earliest: bool, +) -> Result, MatchError> { + let mut mat = None; + let mut sid = init_rev(dfa, cache, input)?; + // In reverse search, the loop below can't handle the case of searching an + // empty slice. Ideally we could write something congruent to the forward + // search, i.e., 'while at >= start', but 'start' might be 0. Since we use + // an unsigned offset, 'at >= 0' is trivially always true. We could avoid + // this extra case handling by using a signed offset, but Rust makes it + // annoying to do. So... We just handle the empty case separately. + if input.start() == input.end() { + eoi_rev(dfa, cache, input, &mut sid, &mut mat)?; + return Ok(mat); + } + + let mut at = input.end() - 1; + macro_rules! next_unchecked { + ($sid:expr, $at:expr) => {{ + let byte = *input.haystack().get_unchecked($at); + dfa.next_state_untagged_unchecked(cache, $sid, byte) + }}; + } + cache.search_start(at); + loop { + if sid.is_tagged() { + cache.search_update(at); + sid = dfa + .next_state(cache, sid, input.haystack()[at]) + .map_err(|_| gave_up(at))?; + } else { + // SAFETY: See comments in 'find_fwd' for a safety argument. + // + // PERF: The comments in 'find_fwd' also provide a justification + // from a performance perspective as to 1) why we elide bounds + // checks and 2) why we do a specialized version of unrolling + // below. The reverse search does have a slightly different + // consideration in that most reverse searches tend to be + // anchored and on shorter haystacks. However, this still makes a + // difference. Take this command for example: + // + // regex-cli find match hybrid -p '(?m)^.+$' -UBb bigfile + // + // (Notice that we use 'find hybrid regex', not 'find hybrid dfa' + // like in the justification for the forward direction. The 'regex' + // sub-command will find start-of-match and thus run the reverse + // direction.) + // + // Without unrolling below, the above command takes around 3.76s. + // But with the unrolling below, we get down to 2.55s. If we keep + // the unrolling but add in bounds checks, then we get 2.86s. + // + // NOTE: I used 'OpenSubtitles2018.raw.sample.en' for 'bigfile'. + let mut prev_sid = sid; + while at >= input.start() { + prev_sid = unsafe { next_unchecked!(sid, at) }; + if prev_sid.is_tagged() + || at <= input.start().saturating_add(3) + { + core::mem::swap(&mut prev_sid, &mut sid); + break; + } + at -= 1; + + sid = unsafe { next_unchecked!(prev_sid, at) }; + if sid.is_tagged() { + break; + } + at -= 1; + + prev_sid = unsafe { next_unchecked!(sid, at) }; + if prev_sid.is_tagged() { + core::mem::swap(&mut prev_sid, &mut sid); + break; + } + at -= 1; + + sid = unsafe { next_unchecked!(prev_sid, at) }; + if sid.is_tagged() { + break; + } + at -= 1; + } + // If we quit out of the code above with an unknown state ID at + // any point, then we need to re-compute that transition using + // 'next_state', which will do NFA powerset construction for us. + if sid.is_unknown() { + cache.search_update(at); + sid = dfa + .next_state(cache, prev_sid, input.haystack()[at]) + .map_err(|_| gave_up(at))?; + } + } + if sid.is_tagged() { + if sid.is_start() { + // do nothing + } else if sid.is_match() { + let pattern = dfa.match_pattern(cache, sid, 0); + // Since reverse searches report the beginning of a match + // and the beginning is inclusive (not exclusive like the + // end of a match), we add 1 to make it inclusive. + mat = Some(HalfMatch::new(pattern, at + 1)); + if earliest { + cache.search_finish(at); + return Ok(mat); + } + } else if sid.is_dead() { + cache.search_finish(at); + return Ok(mat); + } else if sid.is_quit() { + cache.search_finish(at); + return Err(MatchError::quit(input.haystack()[at], at)); + } else { + debug_assert!(sid.is_unknown()); + unreachable!("sid being unknown is a bug"); + } + } + if at == input.start() { + break; + } + at -= 1; + } + cache.search_finish(input.start()); + eoi_rev(dfa, cache, input, &mut sid, &mut mat)?; + Ok(mat) +} + +#[inline(never)] +pub(crate) fn find_overlapping_fwd( + dfa: &DFA, + cache: &mut Cache, + input: &Input<'_>, + state: &mut OverlappingState, +) -> Result<(), MatchError> { + state.mat = None; + if input.is_done() { + return Ok(()); + } + let pre = if input.get_anchored().is_anchored() { + None + } else { + dfa.get_config().get_prefilter() + }; + if pre.is_some() { + find_overlapping_fwd_imp(dfa, cache, input, pre, state) + } else { + find_overlapping_fwd_imp(dfa, cache, input, None, state) + } +} + +#[cfg_attr(feature = "perf-inline", inline(always))] +fn find_overlapping_fwd_imp( + dfa: &DFA, + cache: &mut Cache, + input: &Input<'_>, + pre: Option<&'_ Prefilter>, + state: &mut OverlappingState, +) -> Result<(), MatchError> { + // See 'prefilter_restart' docs for explanation. + let universal_start = dfa.get_nfa().look_set_prefix_any().is_empty(); + let mut sid = match state.id { + None => { + state.at = input.start(); + init_fwd(dfa, cache, input)? + } + Some(sid) => { + if let Some(match_index) = state.next_match_index { + let match_len = dfa.match_len(cache, sid); + if match_index < match_len { + state.next_match_index = Some(match_index + 1); + let pattern = dfa.match_pattern(cache, sid, match_index); + state.mat = Some(HalfMatch::new(pattern, state.at)); + return Ok(()); + } + } + // Once we've reported all matches at a given position, we need to + // advance the search to the next position. + state.at += 1; + if state.at > input.end() { + return Ok(()); + } + sid + } + }; + + // NOTE: We don't optimize the crap out of this routine primarily because + // it seems like most overlapping searches will have higher match counts, + // and thus, throughput is perhaps not as important. But if you have a use + // case for something faster, feel free to file an issue. + cache.search_start(state.at); + while state.at < input.end() { + sid = dfa + .next_state(cache, sid, input.haystack()[state.at]) + .map_err(|_| gave_up(state.at))?; + if sid.is_tagged() { + state.id = Some(sid); + if sid.is_start() { + if let Some(ref pre) = pre { + let span = Span::from(state.at..input.end()); + match pre.find(input.haystack(), span) { + None => return Ok(()), + Some(ref span) => { + if span.start > state.at { + state.at = span.start; + if !universal_start { + sid = prefilter_restart( + dfa, cache, &input, state.at, + )?; + } + continue; + } + } + } + } + } else if sid.is_match() { + state.next_match_index = Some(1); + let pattern = dfa.match_pattern(cache, sid, 0); + state.mat = Some(HalfMatch::new(pattern, state.at)); + cache.search_finish(state.at); + return Ok(()); + } else if sid.is_dead() { + cache.search_finish(state.at); + return Ok(()); + } else if sid.is_quit() { + cache.search_finish(state.at); + return Err(MatchError::quit( + input.haystack()[state.at], + state.at, + )); + } else { + debug_assert!(sid.is_unknown()); + unreachable!("sid being unknown is a bug"); + } + } + state.at += 1; + cache.search_update(state.at); + } + + let result = eoi_fwd(dfa, cache, input, &mut sid, &mut state.mat); + state.id = Some(sid); + if state.mat.is_some() { + // '1' is always correct here since if we get to this point, this + // always corresponds to the first (index '0') match discovered at + // this position. So the next match to report at this position (if + // it exists) is at index '1'. + state.next_match_index = Some(1); + } + cache.search_finish(input.end()); + result +} + +#[inline(never)] +pub(crate) fn find_overlapping_rev( + dfa: &DFA, + cache: &mut Cache, + input: &Input<'_>, + state: &mut OverlappingState, +) -> Result<(), MatchError> { + state.mat = None; + if input.is_done() { + return Ok(()); + } + let mut sid = match state.id { + None => { + let sid = init_rev(dfa, cache, input)?; + state.id = Some(sid); + if input.start() == input.end() { + state.rev_eoi = true; + } else { + state.at = input.end() - 1; + } + sid + } + Some(sid) => { + if let Some(match_index) = state.next_match_index { + let match_len = dfa.match_len(cache, sid); + if match_index < match_len { + state.next_match_index = Some(match_index + 1); + let pattern = dfa.match_pattern(cache, sid, match_index); + state.mat = Some(HalfMatch::new(pattern, state.at)); + return Ok(()); + } + } + // Once we've reported all matches at a given position, we need + // to advance the search to the next position. However, if we've + // already followed the EOI transition, then we know we're done + // with the search and there cannot be any more matches to report. + if state.rev_eoi { + return Ok(()); + } else if state.at == input.start() { + // At this point, we should follow the EOI transition. This + // will cause us the skip the main loop below and fall through + // to the final 'eoi_rev' transition. + state.rev_eoi = true; + } else { + // We haven't hit the end of the search yet, so move on. + state.at -= 1; + } + sid + } + }; + cache.search_start(state.at); + while !state.rev_eoi { + sid = dfa + .next_state(cache, sid, input.haystack()[state.at]) + .map_err(|_| gave_up(state.at))?; + if sid.is_tagged() { + state.id = Some(sid); + if sid.is_start() { + // do nothing + } else if sid.is_match() { + state.next_match_index = Some(1); + let pattern = dfa.match_pattern(cache, sid, 0); + state.mat = Some(HalfMatch::new(pattern, state.at + 1)); + cache.search_finish(state.at); + return Ok(()); + } else if sid.is_dead() { + cache.search_finish(state.at); + return Ok(()); + } else if sid.is_quit() { + cache.search_finish(state.at); + return Err(MatchError::quit( + input.haystack()[state.at], + state.at, + )); + } else { + debug_assert!(sid.is_unknown()); + unreachable!("sid being unknown is a bug"); + } + } + if state.at == input.start() { + break; + } + state.at -= 1; + cache.search_update(state.at); + } + + let result = eoi_rev(dfa, cache, input, &mut sid, &mut state.mat); + state.rev_eoi = true; + state.id = Some(sid); + if state.mat.is_some() { + // '1' is always correct here since if we get to this point, this + // always corresponds to the first (index '0') match discovered at + // this position. So the next match to report at this position (if + // it exists) is at index '1'. + state.next_match_index = Some(1); + } + cache.search_finish(input.start()); + result +} + +#[cfg_attr(feature = "perf-inline", inline(always))] +fn init_fwd( + dfa: &DFA, + cache: &mut Cache, + input: &Input<'_>, +) -> Result { + let sid = dfa.start_state_forward(cache, input)?; + // Start states can never be match states, since all matches are delayed + // by 1 byte. + debug_assert!(!sid.is_match()); + Ok(sid) +} + +#[cfg_attr(feature = "perf-inline", inline(always))] +fn init_rev( + dfa: &DFA, + cache: &mut Cache, + input: &Input<'_>, +) -> Result { + let sid = dfa.start_state_reverse(cache, input)?; + // Start states can never be match states, since all matches are delayed + // by 1 byte. + debug_assert!(!sid.is_match()); + Ok(sid) +} + +#[cfg_attr(feature = "perf-inline", inline(always))] +fn eoi_fwd( + dfa: &DFA, + cache: &mut Cache, + input: &Input<'_>, + sid: &mut LazyStateID, + mat: &mut Option, +) -> Result<(), MatchError> { + let sp = input.get_span(); + match input.haystack().get(sp.end) { + Some(&b) => { + *sid = + dfa.next_state(cache, *sid, b).map_err(|_| gave_up(sp.end))?; + if sid.is_match() { + let pattern = dfa.match_pattern(cache, *sid, 0); + *mat = Some(HalfMatch::new(pattern, sp.end)); + } else if sid.is_quit() { + return Err(MatchError::quit(b, sp.end)); + } + } + None => { + *sid = dfa + .next_eoi_state(cache, *sid) + .map_err(|_| gave_up(input.haystack().len()))?; + if sid.is_match() { + let pattern = dfa.match_pattern(cache, *sid, 0); + *mat = Some(HalfMatch::new(pattern, input.haystack().len())); + } + // N.B. We don't have to check 'is_quit' here because the EOI + // transition can never lead to a quit state. + debug_assert!(!sid.is_quit()); + } + } + Ok(()) +} + +#[cfg_attr(feature = "perf-inline", inline(always))] +fn eoi_rev( + dfa: &DFA, + cache: &mut Cache, + input: &Input<'_>, + sid: &mut LazyStateID, + mat: &mut Option, +) -> Result<(), MatchError> { + let sp = input.get_span(); + if sp.start > 0 { + let byte = input.haystack()[sp.start - 1]; + *sid = dfa + .next_state(cache, *sid, byte) + .map_err(|_| gave_up(sp.start))?; + if sid.is_match() { + let pattern = dfa.match_pattern(cache, *sid, 0); + *mat = Some(HalfMatch::new(pattern, sp.start)); + } else if sid.is_quit() { + return Err(MatchError::quit(byte, sp.start - 1)); + } + } else { + *sid = + dfa.next_eoi_state(cache, *sid).map_err(|_| gave_up(sp.start))?; + if sid.is_match() { + let pattern = dfa.match_pattern(cache, *sid, 0); + *mat = Some(HalfMatch::new(pattern, 0)); + } + // N.B. We don't have to check 'is_quit' here because the EOI + // transition can never lead to a quit state. + debug_assert!(!sid.is_quit()); + } + Ok(()) +} + +/// Re-compute the starting state that a DFA should be in after finding a +/// prefilter candidate match at the position `at`. +/// +/// It is always correct to call this, but not always necessary. Namely, +/// whenever the DFA has a universal start state, the DFA can remain in the +/// start state that it was in when it ran the prefilter. Why? Because in that +/// case, there is only one start state. +/// +/// When does a DFA have a universal start state? In precisely cases where +/// it has no look-around assertions in its prefix. So for example, `\bfoo` +/// does not have a universal start state because the start state depends on +/// whether the byte immediately before the start position is a word byte or +/// not. However, `foo\b` does have a universal start state because the word +/// boundary does not appear in the pattern's prefix. +/// +/// So... most cases don't need this, but when a pattern doesn't have a +/// universal start state, then after a prefilter candidate has been found, the +/// current state *must* be re-litigated as if computing the start state at the +/// beginning of the search because it might change. That is, not all start +/// states are created equal. +/// +/// Why avoid it? Because while it's not super expensive, it isn't a trivial +/// operation to compute the start state. It is much better to avoid it and +/// just state in the current state if you know it to be correct. +#[cfg_attr(feature = "perf-inline", inline(always))] +fn prefilter_restart( + dfa: &DFA, + cache: &mut Cache, + input: &Input<'_>, + at: usize, +) -> Result { + let mut input = input.clone(); + input.set_start(at); + init_fwd(dfa, cache, &input) +} + +/// A convenience routine for constructing a "gave up" match error. +#[cfg_attr(feature = "perf-inline", inline(always))] +fn gave_up(offset: usize) -> MatchError { + MatchError::gave_up(offset) +} diff --git a/vendor/regex-automata/src/lib.rs b/vendor/regex-automata/src/lib.rs new file mode 100644 index 00000000000000..b29f618a8b21cb --- /dev/null +++ b/vendor/regex-automata/src/lib.rs @@ -0,0 +1,651 @@ +/*! +This crate exposes a variety of regex engines used by the `regex` crate. +It provides a vast, sprawling and "expert" level API to each regex engine. +The regex engines provided by this crate focus heavily on finite automata +implementations and specifically guarantee worst case `O(m * n)` time +complexity for all searches. (Where `m ~ len(regex)` and `n ~ len(haystack)`.) + +The primary goal of this crate is to serve as an implementation detail for the +`regex` crate. A secondary goal is to make its internals available for use by +others. + +# Table of contents + +* [Should I be using this crate?](#should-i-be-using-this-crate) gives some +reasons for and against using this crate. +* [Examples](#examples) provides a small selection of things you can do with +this crate. +* [Available regex engines](#available-regex-engines) provides a hyperlinked +list of all regex engines in this crate. +* [API themes](#api-themes) discusses common elements used throughout this +crate. +* [Crate features](#crate-features) documents the extensive list of Cargo +features available. + +# Should I be using this crate? + +If you find yourself here because you just want to use regexes, then you should +first check out whether the [`regex` crate](https://docs.rs/regex) meets +your needs. It provides a streamlined and difficult-to-misuse API for regex +searching. + +If you're here because there is something specific you want to do that can't +be easily done with `regex` crate, then you are perhaps in the right place. +It's most likely that the first stop you'll want to make is to explore the +[`meta` regex APIs](meta). Namely, the `regex` crate is just a light wrapper +over a [`meta::Regex`], so its API will probably be the easiest to transition +to. In contrast to the `regex` crate, the `meta::Regex` API supports more +search parameters and does multi-pattern searches. However, it isn't quite as +ergonomic. + +Otherwise, the following is an inexhaustive list of reasons to use this crate: + +* You want to analyze or use a [Thompson `NFA`](nfa::thompson::NFA) directly. +* You want more powerful multi-pattern search than what is provided by +`RegexSet` in the `regex` crate. All regex engines in this crate support +multi-pattern searches. +* You want to use one of the `regex` crate's internal engines directly because +of some interesting configuration that isn't possible via the `regex` crate. +For example, a [lazy DFA's configuration](hybrid::dfa::Config) exposes a +dizzying number of options for controlling its execution. +* You want to use the lower level search APIs. For example, both the [lazy +DFA](hybrid::dfa) and [fully compiled DFAs](dfa) support searching by exploring +the automaton one state at a time. This might be useful, for example, for +stream searches or searches of strings stored in non-contiguous in memory. +* You want to build a fully compiled DFA and then [use zero-copy +deserialization](dfa::dense::DFA::from_bytes) to load it into memory and use +it for searching. This use case is supported in core-only no-std/no-alloc +environments. +* You want to run [anchored searches](Input::anchored) without using the `^` +anchor in your regex pattern. +* You need to work-around contention issues with +sharing a regex across multiple threads. The +[`meta::Regex::search_with`](meta::Regex::search_with) API permits bypassing +any kind of synchronization at all by requiring the caller to provide the +mutable scratch spaced needed during a search. +* You want to build your own regex engine on top of the `regex` crate's +infrastructure. + +# Examples + +This section tries to identify a few interesting things you can do with this +crate and demonstrates them. + +### Multi-pattern searches with capture groups + +One of the more frustrating limitations of `RegexSet` in the `regex` crate +(at the time of writing) is that it doesn't report match positions. With this +crate, multi-pattern support was intentionally designed in from the beginning, +which means it works in all regex engines and even for capture groups as well. + +This example shows how to search for matches of multiple regexes, where each +regex uses the same capture group names to parse different key-value formats. + +``` +use regex_automata::{meta::Regex, PatternID}; + +let re = Regex::new_many(&[ + r#"(?m)^(?[[:word:]]+)=(?[[:word:]]+)$"#, + r#"(?m)^(?[[:word:]]+)="(?[^"]+)"$"#, + r#"(?m)^(?[[:word:]]+)='(?[^']+)'$"#, + r#"(?m)^(?[[:word:]]+):\s*(?[[:word:]]+)$"#, +])?; +let hay = r#" +best_album="Blow Your Face Out" +best_quote='"then as it was, then again it will be"' +best_year=1973 +best_simpsons_episode: HOMR +"#; +let mut kvs = vec![]; +for caps in re.captures_iter(hay) { + // N.B. One could use capture indices '1' and '2' here + // as well. Capture indices are local to each pattern. + // (Just like names are.) + let key = &hay[caps.get_group_by_name("key").unwrap()]; + let val = &hay[caps.get_group_by_name("val").unwrap()]; + kvs.push((key, val)); +} +assert_eq!(kvs, vec![ + ("best_album", "Blow Your Face Out"), + ("best_quote", "\"then as it was, then again it will be\""), + ("best_year", "1973"), + ("best_simpsons_episode", "HOMR"), +]); + +# Ok::<(), Box>(()) +``` + +### Build a full DFA and walk it manually + +One of the regex engines in this crate is a fully compiled DFA. It takes worst +case exponential time to build, but once built, it can be easily explored and +used for searches. Here's a simple example that uses its lower level APIs to +implement a simple anchored search by hand. + +``` +use regex_automata::{dfa::{Automaton, dense}, Input}; + +let dfa = dense::DFA::new(r"(?-u)\b[A-Z]\w+z\b")?; +let haystack = "Quartz"; + +// The start state is determined by inspecting the position and the +// initial bytes of the haystack. +let mut state = dfa.start_state_forward(&Input::new(haystack))?; +// Walk all the bytes in the haystack. +for &b in haystack.as_bytes().iter() { + state = dfa.next_state(state, b); +} +// DFAs in this crate require an explicit +// end-of-input transition if a search reaches +// the end of a haystack. +state = dfa.next_eoi_state(state); +assert!(dfa.is_match_state(state)); + +# Ok::<(), Box>(()) +``` + +Or do the same with a lazy DFA that avoids exponential worst case compile time, +but requires mutable scratch space to lazily build the DFA during the search. + +``` +use regex_automata::{hybrid::dfa::DFA, Input}; + +let dfa = DFA::new(r"(?-u)\b[A-Z]\w+z\b")?; +let mut cache = dfa.create_cache(); +let hay = "Quartz"; + +// The start state is determined by inspecting the position and the +// initial bytes of the haystack. +let mut state = dfa.start_state_forward(&mut cache, &Input::new(hay))?; +// Walk all the bytes in the haystack. +for &b in hay.as_bytes().iter() { + state = dfa.next_state(&mut cache, state, b)?; +} +// DFAs in this crate require an explicit +// end-of-input transition if a search reaches +// the end of a haystack. +state = dfa.next_eoi_state(&mut cache, state)?; +assert!(state.is_match()); + +# Ok::<(), Box>(()) +``` + +### Find all overlapping matches + +This example shows how to build a DFA and use it to find all possible matches, +including overlapping matches. A similar example will work with a lazy DFA as +well. This also works with multiple patterns and will report all matches at the +same position where multiple patterns match. + +``` +use regex_automata::{ + dfa::{dense, Automaton, OverlappingState}, + Input, MatchKind, +}; + +let dfa = dense::DFA::builder() + .configure(dense::DFA::config().match_kind(MatchKind::All)) + .build(r"(?-u)\w{3,}")?; +let input = Input::new("homer marge bart lisa maggie"); +let mut state = OverlappingState::start(); + +let mut matches = vec![]; +while let Some(hm) = { + dfa.try_search_overlapping_fwd(&input, &mut state)?; + state.get_match() +} { + matches.push(hm.offset()); +} +assert_eq!(matches, vec![ + 3, 4, 5, // hom, home, homer + 9, 10, 11, // mar, marg, marge + 15, 16, // bar, bart + 20, 21, // lis, lisa + 25, 26, 27, 28, // mag, magg, maggi, maggie +]); + +# Ok::<(), Box>(()) +``` + +# Available regex engines + +The following is a complete list of all regex engines provided by this crate, +along with a very brief description of it and why you might want to use it. + +* [`dfa::regex::Regex`] is a regex engine that works on top of either +[dense](dfa::dense) or [sparse](dfa::sparse) fully compiled DFAs. You might +use a DFA if you need the fastest possible regex engine in this crate and can +afford the exorbitant memory usage usually required by DFAs. Low level APIs on +fully compiled DFAs are provided by the [`Automaton` trait](dfa::Automaton). +Fully compiled dense DFAs can handle all regexes except for searching a regex +with a Unicode word boundary on non-ASCII haystacks. A fully compiled DFA based +regex can only report the start and end of each match. +* [`hybrid::regex::Regex`] is a regex engine that works on top of a lazily +built DFA. Its performance profile is very similar to that of fully compiled +DFAs, but can be slower in some pathological cases. Fully compiled DFAs are +also amenable to more optimizations, such as state acceleration, that aren't +available in a lazy DFA. You might use this lazy DFA if you can't abide the +worst case exponential compile time of a full DFA, but still want the DFA +search performance in the vast majority of cases. A lazy DFA based regex can +only report the start and end of each match. +* [`dfa::onepass::DFA`] is a regex engine that is implemented as a DFA, but +can report the matches of each capture group in addition to the start and end +of each match. The catch is that it only works on a somewhat small subset of +regexes known as "one-pass." You'll want to use this for cases when you need +capture group matches and the regex is one-pass since it is likely to be faster +than any alternative. A one-pass DFA can handle all types of regexes, but does +have some reasonable limits on the number of capture groups it can handle. +* [`nfa::thompson::backtrack::BoundedBacktracker`] is a regex engine that uses +backtracking, but keeps track of the work it has done to avoid catastrophic +backtracking. Like the one-pass DFA, it provides the matches of each capture +group. It retains the `O(m * n)` worst case time bound. This tends to be slower +than the one-pass DFA regex engine, but faster than the PikeVM. It can handle +all types of regexes, but usually only works well with small haystacks and +small regexes due to the memory required to avoid redoing work. +* [`nfa::thompson::pikevm::PikeVM`] is a regex engine that can handle all +regexes, of all sizes and provides capture group matches. It tends to be a tool +of last resort because it is also usually the slowest regex engine. +* [`meta::Regex`] is the meta regex engine that combines *all* of the above +engines into one. The reason for this is that each of the engines above have +their own caveats such as, "only handles a subset of regexes" or "is generally +slow." The meta regex engine accounts for all of these caveats and composes +the engines in a way that attempts to mitigate each engine's weaknesses while +emphasizing its strengths. For example, it will attempt to run a lazy DFA even +if it might fail. In which case, it will restart the search with a likely +slower but more capable regex engine. The meta regex engine is what you should +default to. Use one of the above engines directly only if you have a specific +reason to. + +# API themes + +While each regex engine has its own APIs and configuration options, there are +some general themes followed by all of them. + +### The `Input` abstraction + +Most search routines in this crate accept anything that implements +`Into`. Both `&str` and `&[u8]` haystacks satisfy this constraint, which +means that things like `engine.search("foo")` will work as you would expect. + +By virtue of accepting an `Into` though, callers can provide more than +just a haystack. Indeed, the [`Input`] type has more details, but briefly, +callers can use it to configure various aspects of the search: + +* The span of the haystack to search via [`Input::span`] or [`Input::range`], +which might be a substring of the haystack. +* Whether to run an anchored search or not via [`Input::anchored`]. This +permits one to require matches to start at the same offset that the search +started. +* Whether to ask the regex engine to stop as soon as a match is seen via +[`Input::earliest`]. This can be used to find the offset of a match as soon +as it is known without waiting for the full leftmost-first match to be found. +This can also be used to avoid the worst case `O(m * n^2)` time complexity +of iteration. + +Some lower level search routines accept an `&Input` for performance reasons. +In which case, `&Input::new("haystack")` can be used for a simple search. + +### Error reporting + +Most, but not all, regex engines in this crate can fail to execute a search. +When a search fails, callers cannot determine whether or not a match exists. +That is, the result is indeterminate. + +Search failure, in all cases in this crate, is represented by a [`MatchError`]. +Routines that can fail start with the `try_` prefix in their name. For example, +[`hybrid::regex::Regex::try_search`] can fail for a number of reasons. +Conversely, routines that either can't fail or can panic on failure lack the +`try_` prefix. For example, [`hybrid::regex::Regex::find`] will panic in +cases where [`hybrid::regex::Regex::try_search`] would return an error, and +[`meta::Regex::find`] will never panic. Therefore, callers need to pay close +attention to the panicking conditions in the documentation. + +In most cases, the reasons that a search fails are either predictable or +configurable, albeit at some additional cost. + +An example of predictable failure is +[`BoundedBacktracker::try_search`](nfa::thompson::backtrack::BoundedBacktracker::try_search). +Namely, it fails whenever the multiplication of the haystack, the regex and some +constant exceeds the +[configured visited capacity](nfa::thompson::backtrack::Config::visited_capacity). +Callers can predict the failure in terms of haystack length via the +[`BoundedBacktracker::max_haystack_len`](nfa::thompson::backtrack::BoundedBacktracker::max_haystack_len) +method. While this form of failure is technically avoidable by increasing the +visited capacity, it isn't practical to do so for all inputs because the +memory usage required for larger haystacks becomes impractically large. So in +practice, if one is using the bounded backtracker, you really do have to deal +with the failure. + +An example of configurable failure happens when one enables heuristic support +for Unicode word boundaries in a DFA. Namely, since the DFAs in this crate +(except for the one-pass DFA) do not support Unicode word boundaries on +non-ASCII haystacks, building a DFA from an NFA that contains a Unicode word +boundary will itself fail. However, one can configure DFAs to still be built in +this case by +[configuring heuristic support for Unicode word boundaries](hybrid::dfa::Config::unicode_word_boundary). +If the NFA the DFA is built from contains a Unicode word boundary, then the +DFA will still be built, but special transitions will be added to every state +that cause the DFA to fail if any non-ASCII byte is seen. This failure happens +at search time and it requires the caller to opt into this. + +There are other ways for regex engines to fail in this crate, but the above +two should represent the general theme of failures one can find. Dealing +with these failures is, in part, one the responsibilities of the [meta regex +engine](meta). Notice, for example, that the meta regex engine exposes an API +that never returns an error nor panics. It carefully manages all of the ways +in which the regex engines can fail and either avoids the predictable ones +entirely (e.g., the bounded backtracker) or reacts to configured failures by +falling back to a different engine (e.g., the lazy DFA quitting because it saw +a non-ASCII byte). + +### Configuration and Builders + +Most of the regex engines in this crate come with two types to facilitate +building the regex engine: a `Config` and a `Builder`. A `Config` is usually +specific to that particular regex engine, but other objects such as parsing and +NFA compilation have `Config` types too. A `Builder` is the thing responsible +for taking inputs (either pattern strings or already-parsed patterns or even +NFAs directly) and turning them into an actual regex engine that can be used +for searching. + +The main reason why building a regex engine is a bit complicated is because +of the desire to permit composition with de-coupled components. For example, +you might want to [manually construct a Thompson NFA](nfa::thompson::Builder) +and then build a regex engine from it without ever using a regex parser +at all. On the other hand, you might also want to build a regex engine directly +from the concrete syntax. This demonstrates why regex engine construction is +so flexible: it needs to support not just convenient construction, but also +construction from parts built elsewhere. + +This is also in turn why there are many different `Config` structs in this +crate. Let's look more closely at an example: [`hybrid::regex::Builder`]. It +accepts three different `Config` types for configuring construction of a lazy +DFA regex: + +* [`hybrid::regex::Builder::syntax`] accepts a +[`util::syntax::Config`] for configuring the options found in the +[`regex-syntax`](regex_syntax) crate. For example, whether to match +case insensitively. +* [`hybrid::regex::Builder::thompson`] accepts a [`nfa::thompson::Config`] for +configuring construction of a [Thompson NFA](nfa::thompson::NFA). For example, +whether to build an NFA that matches the reverse language described by the +regex. +* [`hybrid::regex::Builder::dfa`] accept a [`hybrid::dfa::Config`] for +configuring construction of the pair of underlying lazy DFAs that make up the +lazy DFA regex engine. For example, changing the capacity of the cache used to +store the transition table. + +The lazy DFA regex engine uses all three of those configuration objects for +methods like [`hybrid::regex::Builder::build`], which accepts a pattern +string containing the concrete syntax of your regex. It uses the syntax +configuration to parse it into an AST and translate it into an HIR. Then the +NFA configuration when compiling the HIR into an NFA. And then finally the DFA +configuration when lazily determinizing the NFA into a DFA. + +Notice though that the builder also has a +[`hybrid::regex::Builder::build_from_dfas`] constructor. This permits callers +to build the underlying pair of lazy DFAs themselves (one for the forward +searching to find the end of a match and one for the reverse searching to find +the start of a match), and then build the regex engine from them. The lazy +DFAs, in turn, have their own builder that permits [construction directly from +a Thompson NFA](hybrid::dfa::Builder::build_from_nfa). Continuing down the +rabbit hole, a Thompson NFA has its own compiler that permits [construction +directly from an HIR](nfa::thompson::Compiler::build_from_hir). The lazy DFA +regex engine builder lets you follow this rabbit hole all the way down, but +also provides convenience routines that do it for you when you don't need +precise control over every component. + +The [meta regex engine](meta) is a good example of something that utilizes the +full flexibility of these builders. It often needs not only precise control +over each component, but also shares them across multiple regex engines. +(Most sharing is done by internal reference accounting. For example, an +[`NFA`](nfa::thompson::NFA) is reference counted internally which makes cloning +cheap.) + +### Size limits + +Unlike the `regex` crate, the `regex-automata` crate specifically does not +enable any size limits by default. That means users of this crate need to +be quite careful when using untrusted patterns. Namely, because bounded +repetitions can grow exponentially by stacking them, it is possible to build a +very large internal regex object from just a small pattern string. For example, +the NFA built from the pattern `a{10}{10}{10}{10}{10}{10}{10}` is over 240MB. + +There are multiple size limit options in this crate. If one or more size limits +are relevant for the object you're building, they will be configurable via +methods on a corresponding `Config` type. + +# Crate features + +This crate has a dizzying number of features. The main idea is to be able to +control how much stuff you pull in for your specific use case, since the full +crate is quite large and can dramatically increase compile times and binary +size. + +The most barebones but useful configuration is to disable all default features +and enable only `dfa-search`. This will bring in just the DFA deserialization +and search routines without any dependency on `std` or `alloc`. This does +require generating and serializing a DFA, and then storing it somewhere, but +it permits regex searches in freestanding or embedded environments. + +Because there are so many features, they are split into a few groups. + +The default set of features is: `std`, `syntax`, `perf`, `unicode`, `meta`, +`nfa`, `dfa` and `hybrid`. Basically, the default is to enable everything +except for development related features like `logging`. + +### Ecosystem features + +* **std** - Enables use of the standard library. In terms of APIs, this usually +just means that error types implement the `std::error::Error` trait. Otherwise, +`std` sometimes enables the code to be faster, for example, using a `HashMap` +instead of a `BTreeMap`. (The `std` feature matters more for dependencies like +`aho-corasick` and `memchr`, where `std` is required to enable certain classes +of SIMD optimizations.) Enabling `std` automatically enables `alloc`. +* **alloc** - Enables use of the `alloc` library. This is required for most +APIs in this crate. The main exception is deserializing and searching with +fully compiled DFAs. +* **logging** - Adds a dependency on the `log` crate and makes this crate emit +log messages of varying degrees of utility. The log messages are especially +useful in trying to understand what the meta regex engine is doing. + +### Performance features + +**Note**: + To get performance benefits offered by the SIMD, `std` must be enabled. + None of the `perf-*` features will enable `std` implicitly. + +* **perf** - Enables all of the below features. +* **perf-inline** - When enabled, `inline(always)` is used in (many) strategic +locations to help performance at the expense of longer compile times and +increased binary size. +* **perf-literal** - Enables all literal related optimizations. + * **perf-literal-substring** - Enables all single substring literal + optimizations. This includes adding a dependency on the `memchr` crate. + * **perf-literal-multisubstring** - Enables all multiple substring literal + optimizations. This includes adding a dependency on the `aho-corasick` + crate. + +### Unicode features + +* **unicode** - + Enables all Unicode features. This feature is enabled by default, and will + always cover all Unicode features, even if more are added in the future. +* **unicode-age** - + Provide the data for the + [Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age). + This makes it possible to use classes like `\p{Age:6.0}` to refer to all + codepoints first introduced in Unicode 6.0 +* **unicode-bool** - + Provide the data for numerous Unicode boolean properties. The full list + is not included here, but contains properties like `Alphabetic`, `Emoji`, + `Lowercase`, `Math`, `Uppercase` and `White_Space`. +* **unicode-case** - + Provide the data for case insensitive matching using + [Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches). +* **unicode-gencat** - + Provide the data for + [Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values). + This includes, but is not limited to, `Decimal_Number`, `Letter`, + `Math_Symbol`, `Number` and `Punctuation`. +* **unicode-perl** - + Provide the data for supporting the Unicode-aware Perl character classes, + corresponding to `\w`, `\s` and `\d`. This is also necessary for using + Unicode-aware word boundary assertions. Note that if this feature is + disabled, the `\s` and `\d` character classes are still available if the + `unicode-bool` and `unicode-gencat` features are enabled, respectively. +* **unicode-script** - + Provide the data for + [Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/). + This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`, + `Latin` and `Thai`. +* **unicode-segment** - + Provide the data necessary to provide the properties used to implement the + [Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/). + This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and + `\p{sb=ATerm}`. +* **unicode-word-boundary** - + Enables support for Unicode word boundaries, i.e., `\b`, in regexes. When + this and `unicode-perl` are enabled, then data tables from `regex-syntax` are + used to implement Unicode word boundaries. However, if `regex-syntax` isn't + enabled as a dependency then one can still enable this feature. It will + cause `regex-automata` to bundle its own data table that would otherwise be + redundant with `regex-syntax`'s table. + +### Regex engine features + +* **syntax** - Enables a dependency on `regex-syntax`. This makes APIs +for building regex engines from pattern strings available. Without the +`regex-syntax` dependency, the only way to build a regex engine is generally +to deserialize a previously built DFA or to hand assemble an NFA using its +[builder API](nfa::thompson::Builder). Once you have an NFA, you can build any +of the regex engines in this crate. The `syntax` feature also enables `alloc`. +* **meta** - Enables the meta regex engine. This also enables the `syntax` and +`nfa-pikevm` features, as both are the minimal requirements needed. The meta +regex engine benefits from enabling any of the other regex engines and will +use them automatically when appropriate. +* **nfa** - Enables all NFA related features below. + * **nfa-thompson** - Enables the Thompson NFA APIs. This enables `alloc`. + * **nfa-pikevm** - Enables the PikeVM regex engine. This enables + `nfa-thompson`. + * **nfa-backtrack** - Enables the bounded backtracker regex engine. This + enables `nfa-thompson`. +* **dfa** - Enables all DFA related features below. + * **dfa-build** - Enables APIs for determinizing DFAs from NFAs. This + enables `nfa-thompson` and `dfa-search`. + * **dfa-search** - Enables APIs for searching with DFAs. + * **dfa-onepass** - Enables the one-pass DFA API. This enables + `nfa-thompson`. +* **hybrid** - Enables the hybrid NFA/DFA or "lazy DFA" regex engine. This +enables `alloc` and `nfa-thompson`. + +*/ + +// We are no_std. +#![no_std] +// All APIs need docs! +#![deny(missing_docs)] +// Some intra-doc links are broken when certain features are disabled, so we +// only bleat about it when most (all?) features are enabled. But when we do, +// we block the build. Links need to work. +#![cfg_attr( + all( + feature = "std", + feature = "nfa", + feature = "dfa", + feature = "hybrid" + ), + deny(rustdoc::broken_intra_doc_links) +)] +// Broken rustdoc links are very easy to come by when you start disabling +// features. Namely, features tend to change imports, and imports change what's +// available to link to. +// +// Basically, we just don't support rustdoc for anything other than the maximal +// feature configuration. Other configurations will work, they just won't be +// perfect. +// +// So here, we specifically allow them so we don't even get warned about them. +#![cfg_attr( + not(all( + feature = "std", + feature = "nfa", + feature = "dfa", + feature = "hybrid" + )), + allow(rustdoc::broken_intra_doc_links) +)] +// Kinda similar, but eliminating all of the dead code and unused import +// warnings for every feature combo is a fool's errand. Instead, we just +// suppress those, but still let them through in a common configuration when we +// build most of everything. +// +// This does actually suggest that when features are disabled, we are actually +// compiling more code than we need to be. And this is perhaps not so great +// because disabling features is usually done in order to reduce compile times +// by reducing the amount of code one compiles... However, usually, most of the +// time this dead code is a relatively small amount from the 'util' module. +// But... I confess... There isn't a ton of visibility on this. +// +// I'm happy to try to address this in a different way, but "let's annotate +// every function in 'util' with some non-local combination of features" just +// cannot be the way forward. +#![cfg_attr( + not(all( + feature = "std", + feature = "nfa", + feature = "dfa", + feature = "hybrid", + feature = "perf-literal-substring", + feature = "perf-literal-multisubstring", + )), + allow(dead_code, unused_imports, unused_variables) +)] +// We generally want all types to impl Debug. +#![warn(missing_debug_implementations)] +// This adds Cargo feature annotations to items in the rustdoc output. Which is +// sadly hugely beneficial for this crate due to the number of features. +#![cfg_attr(docsrs_regex, feature(doc_cfg))] + +// I have literally never tested this crate on 16-bit, so it is quite +// suspicious to advertise support for it. But... the regex crate, at time +// of writing, at least claims to support it by not doing any conditional +// compilation based on the target pointer width. So I guess I remain +// consistent with that here. +// +// If you are here because you're on a 16-bit system and you were somehow using +// the regex crate previously, please file an issue. Please be prepared to +// provide some kind of reproduction or carve out some path to getting 16-bit +// working in CI. (Via qemu?) +#[cfg(not(any( + target_pointer_width = "16", + target_pointer_width = "32", + target_pointer_width = "64" +)))] +compile_error!("not supported on non-{16,32,64}, please file an issue"); + +#[cfg(any(test, feature = "std"))] +extern crate std; + +#[cfg(feature = "alloc")] +extern crate alloc; + +#[cfg(doctest)] +doc_comment::doctest!("../README.md"); + +#[doc(inline)] +pub use crate::util::primitives::PatternID; +pub use crate::util::search::*; + +#[macro_use] +mod macros; + +#[cfg(any(feature = "dfa-search", feature = "dfa-onepass"))] +pub mod dfa; +#[cfg(feature = "hybrid")] +pub mod hybrid; +#[cfg(feature = "meta")] +pub mod meta; +#[cfg(feature = "nfa-thompson")] +pub mod nfa; +pub mod util; diff --git a/vendor/regex-automata/src/macros.rs b/vendor/regex-automata/src/macros.rs new file mode 100644 index 00000000000000..31b4ca3816ace2 --- /dev/null +++ b/vendor/regex-automata/src/macros.rs @@ -0,0 +1,20 @@ +// Some feature combinations result in some of these macros never being used. +// Which is fine. Just squash the warnings. +#![allow(unused_macros)] + +macro_rules! log { + ($($tt:tt)*) => { + #[cfg(feature = "logging")] + { + $($tt)* + } + } +} + +macro_rules! debug { + ($($tt:tt)*) => { log!(log::debug!($($tt)*)) } +} + +macro_rules! trace { + ($($tt:tt)*) => { log!(log::trace!($($tt)*)) } +} diff --git a/vendor/regex-automata/src/meta/error.rs b/vendor/regex-automata/src/meta/error.rs new file mode 100644 index 00000000000000..9ead729bbdf986 --- /dev/null +++ b/vendor/regex-automata/src/meta/error.rs @@ -0,0 +1,241 @@ +use regex_syntax::{ast, hir}; + +use crate::{nfa, util::search::MatchError, PatternID}; + +/// An error that occurs when construction of a `Regex` fails. +/// +/// A build error is generally a result of one of two possible failure +/// modes. First is a parse or syntax error in the concrete syntax of a +/// pattern. Second is that the construction of the underlying regex matcher +/// fails, usually because it gets too big with respect to limits like +/// [`Config::nfa_size_limit`](crate::meta::Config::nfa_size_limit). +/// +/// This error provides very little introspection capabilities. You can: +/// +/// * Ask for the [`PatternID`] of the pattern that caused an error, if one +/// is available. This is available for things like syntax errors, but not for +/// cases where build limits are exceeded. +/// * Ask for the underlying syntax error, but only if the error is a syntax +/// error. +/// * Ask for a human readable message corresponding to the underlying error. +/// * The `BuildError::source` method (from the `std::error::Error` +/// trait implementation) may be used to query for an underlying error if one +/// exists. There are no API guarantees about which error is returned. +/// +/// When the `std` feature is enabled, this implements `std::error::Error`. +#[derive(Clone, Debug)] +pub struct BuildError { + kind: BuildErrorKind, +} + +#[derive(Clone, Debug)] +enum BuildErrorKind { + Syntax { pid: PatternID, err: regex_syntax::Error }, + NFA(nfa::thompson::BuildError), +} + +impl BuildError { + /// If it is known which pattern ID caused this build error to occur, then + /// this method returns it. + /// + /// Some errors are not associated with a particular pattern. However, any + /// errors that occur as part of parsing a pattern are guaranteed to be + /// associated with a pattern ID. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{meta::Regex, PatternID}; + /// + /// let err = Regex::new_many(&["a", "b", r"\p{Foo}", "c"]).unwrap_err(); + /// assert_eq!(Some(PatternID::must(2)), err.pattern()); + /// ``` + pub fn pattern(&self) -> Option { + match self.kind { + BuildErrorKind::Syntax { pid, .. } => Some(pid), + _ => None, + } + } + + /// If this error occurred because the regex exceeded the configured size + /// limit before being built, then this returns the configured size limit. + /// + /// The limit returned is what was configured, and corresponds to the + /// maximum amount of heap usage in bytes. + pub fn size_limit(&self) -> Option { + match self.kind { + BuildErrorKind::NFA(ref err) => err.size_limit(), + _ => None, + } + } + + /// If this error corresponds to a syntax error, then a reference to it is + /// returned by this method. + pub fn syntax_error(&self) -> Option<®ex_syntax::Error> { + match self.kind { + BuildErrorKind::Syntax { ref err, .. } => Some(err), + _ => None, + } + } + + pub(crate) fn ast(pid: PatternID, err: ast::Error) -> BuildError { + let err = regex_syntax::Error::from(err); + BuildError { kind: BuildErrorKind::Syntax { pid, err } } + } + + pub(crate) fn hir(pid: PatternID, err: hir::Error) -> BuildError { + let err = regex_syntax::Error::from(err); + BuildError { kind: BuildErrorKind::Syntax { pid, err } } + } + + pub(crate) fn nfa(err: nfa::thompson::BuildError) -> BuildError { + BuildError { kind: BuildErrorKind::NFA(err) } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for BuildError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self.kind { + BuildErrorKind::Syntax { ref err, .. } => Some(err), + BuildErrorKind::NFA(ref err) => Some(err), + } + } +} + +impl core::fmt::Display for BuildError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self.kind { + BuildErrorKind::Syntax { pid, .. } => { + write!(f, "error parsing pattern {}", pid.as_usize()) + } + BuildErrorKind::NFA(_) => write!(f, "error building NFA"), + } + } +} + +/// An error that occurs when a search should be retried. +/// +/// This retry error distinguishes between two different failure modes. +/// +/// The first is one where potential quadratic behavior has been detected. +/// In this case, whatever optimization that led to this behavior should be +/// stopped, and the next best strategy should be used. +/// +/// The second indicates that the underlying regex engine has failed for some +/// reason. This usually occurs because either a lazy DFA's cache has become +/// ineffective or because a non-ASCII byte has been seen *and* a Unicode word +/// boundary was used in one of the patterns. In this failure case, a different +/// regex engine that won't fail in these ways (PikeVM, backtracker or the +/// one-pass DFA) should be used. +/// +/// This is an internal error only and should never bleed into the public +/// API. +#[derive(Debug)] +pub(crate) enum RetryError { + Quadratic(RetryQuadraticError), + Fail(RetryFailError), +} + +#[cfg(feature = "std")] +impl std::error::Error for RetryError {} + +impl core::fmt::Display for RetryError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match *self { + RetryError::Quadratic(ref err) => err.fmt(f), + RetryError::Fail(ref err) => err.fmt(f), + } + } +} + +impl From for RetryError { + fn from(merr: MatchError) -> RetryError { + RetryError::Fail(RetryFailError::from(merr)) + } +} + +/// An error that occurs when potential quadratic behavior has been detected +/// when applying either the "reverse suffix" or "reverse inner" optimizations. +/// +/// When this error occurs, callers should abandon the "reverse" optimization +/// and use a normal forward search. +#[derive(Debug)] +pub(crate) struct RetryQuadraticError(()); + +impl RetryQuadraticError { + pub(crate) fn new() -> RetryQuadraticError { + RetryQuadraticError(()) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for RetryQuadraticError {} + +impl core::fmt::Display for RetryQuadraticError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "regex engine gave up to avoid quadratic behavior") + } +} + +impl From for RetryError { + fn from(err: RetryQuadraticError) -> RetryError { + RetryError::Quadratic(err) + } +} + +/// An error that occurs when a regex engine "gives up" for some reason before +/// finishing a search. Usually this occurs because of heuristic Unicode word +/// boundary support or because of ineffective cache usage in the lazy DFA. +/// +/// When this error occurs, callers should retry the regex search with a +/// different regex engine. +/// +/// Note that this has convenient `From` impls that will automatically +/// convert a `MatchError` into this error. This works because the meta +/// regex engine internals guarantee that errors like `HaystackTooLong` and +/// `UnsupportedAnchored` will never occur. The only errors left are `Quit` and +/// `GaveUp`, which both correspond to this "failure" error. +#[derive(Debug)] +pub(crate) struct RetryFailError { + offset: usize, +} + +impl RetryFailError { + pub(crate) fn from_offset(offset: usize) -> RetryFailError { + RetryFailError { offset } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for RetryFailError {} + +impl core::fmt::Display for RetryFailError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "regex engine failed at offset {:?}", self.offset) + } +} + +impl From for RetryError { + fn from(err: RetryFailError) -> RetryError { + RetryError::Fail(err) + } +} + +impl From for RetryFailError { + fn from(merr: MatchError) -> RetryFailError { + use crate::util::search::MatchErrorKind::*; + + match *merr.kind() { + Quit { offset, .. } => RetryFailError::from_offset(offset), + GaveUp { offset } => RetryFailError::from_offset(offset), + // These can never occur because we avoid them by construction + // or with higher level control flow logic. For example, the + // backtracker's wrapper will never hand out a backtracker engine + // when the haystack would be too long. + HaystackTooLong { .. } | UnsupportedAnchored { .. } => { + unreachable!("found impossible error in meta engine: {merr}") + } + } + } +} diff --git a/vendor/regex-automata/src/meta/limited.rs b/vendor/regex-automata/src/meta/limited.rs new file mode 100644 index 00000000000000..ce6708c701574b --- /dev/null +++ b/vendor/regex-automata/src/meta/limited.rs @@ -0,0 +1,251 @@ +/*! +This module defines two bespoke reverse DFA searching routines. (One for the +lazy DFA and one for the fully compiled DFA.) These routines differ from the +usual ones by permitting the caller to specify a minimum starting position. +That is, the search will begin at `input.end()` and will usually stop at +`input.start()`, unless `min_start > input.start()`, in which case, the search +will stop at `min_start`. + +In other words, this lets you say, "no, the search must not extend past this +point, even if it's within the bounds of the given `Input`." And if the search +*does* want to go past that point, it stops and returns a "may be quadratic" +error, which indicates that the caller should retry using some other technique. + +These routines specifically exist to protect against quadratic behavior when +employing the "reverse suffix" and "reverse inner" optimizations. Without the +backstop these routines provide, it is possible for parts of the haystack to +get re-scanned over and over again. The backstop not only prevents this, but +*tells you when it is happening* so that you can change the strategy. + +Why can't we just use the normal search routines? We could use the normal +search routines and just set the start bound on the provided `Input` to our +`min_start` position. The problem here is that it's impossible to distinguish +between "no match because we reached the end of input" and "determined there +was no match well before the end of input." The former case is what we care +about with respect to quadratic behavior. The latter case is totally fine. + +Why don't we modify the normal search routines to report the position at which +the search stops? I considered this, and I still wonder if it is indeed the +right thing to do. However, I think the straight-forward thing to do there +would be to complicate the return type signature of almost every search routine +in this crate, which I really do not want to do. It therefore might make more +sense to provide a richer way for search routines to report meta data, but that +was beyond my bandwidth to work on at the time of writing. + +See the 'opt/reverse-inner' and 'opt/reverse-suffix' benchmarks in rebar for a +real demonstration of how quadratic behavior is mitigated. +*/ + +use crate::{ + meta::error::{RetryError, RetryQuadraticError}, + HalfMatch, Input, MatchError, +}; + +#[cfg(feature = "dfa-build")] +pub(crate) fn dfa_try_search_half_rev( + dfa: &crate::dfa::dense::DFA>, + input: &Input<'_>, + min_start: usize, +) -> Result, RetryError> { + use crate::dfa::Automaton; + + let mut mat = None; + let mut sid = dfa.start_state_reverse(input)?; + if input.start() == input.end() { + dfa_eoi_rev(dfa, input, &mut sid, &mut mat)?; + return Ok(mat); + } + let mut at = input.end() - 1; + loop { + sid = dfa.next_state(sid, input.haystack()[at]); + if dfa.is_special_state(sid) { + if dfa.is_match_state(sid) { + let pattern = dfa.match_pattern(sid, 0); + // Since reverse searches report the beginning of a + // match and the beginning is inclusive (not exclusive + // like the end of a match), we add 1 to make it + // inclusive. + mat = Some(HalfMatch::new(pattern, at + 1)); + } else if dfa.is_dead_state(sid) { + return Ok(mat); + } else if dfa.is_quit_state(sid) { + return Err(MatchError::quit(input.haystack()[at], at).into()); + } + } + if at == input.start() { + break; + } + at -= 1; + if at < min_start { + trace!( + "reached position {at} which is before the previous literal \ + match, quitting to avoid quadratic behavior", + ); + return Err(RetryError::Quadratic(RetryQuadraticError::new())); + } + } + let was_dead = dfa.is_dead_state(sid); + dfa_eoi_rev(dfa, input, &mut sid, &mut mat)?; + // If we reach the beginning of the search and we could otherwise still + // potentially keep matching if there was more to match, then we actually + // return an error to indicate giving up on this optimization. Why? Because + // we can't prove that the real match begins at where we would report it. + // + // This only happens when all of the following are true: + // + // 1) We reach the starting point of our search span. + // 2) The match we found is before the starting point. + // 3) The FSM reports we could possibly find a longer match. + // + // We need (1) because otherwise the search stopped before the starting + // point and there is no possible way to find a more leftmost position. + // + // We need (2) because if the match found has an offset equal to the minimum + // possible offset, then there is no possible more leftmost match. + // + // We need (3) because if the FSM couldn't continue anyway (i.e., it's in + // a dead state), then we know we couldn't find anything more leftmost + // than what we have. (We have to check the state we were in prior to the + // EOI transition since the EOI transition will usually bring us to a dead + // state by virtue of it represents the end-of-input.) + if at == input.start() + && mat.map_or(false, |m| m.offset() > input.start()) + && !was_dead + { + trace!( + "reached beginning of search at offset {at} without hitting \ + a dead state, quitting to avoid potential false positive match", + ); + return Err(RetryError::Quadratic(RetryQuadraticError::new())); + } + Ok(mat) +} + +#[cfg(feature = "hybrid")] +pub(crate) fn hybrid_try_search_half_rev( + dfa: &crate::hybrid::dfa::DFA, + cache: &mut crate::hybrid::dfa::Cache, + input: &Input<'_>, + min_start: usize, +) -> Result, RetryError> { + let mut mat = None; + let mut sid = dfa.start_state_reverse(cache, input)?; + if input.start() == input.end() { + hybrid_eoi_rev(dfa, cache, input, &mut sid, &mut mat)?; + return Ok(mat); + } + let mut at = input.end() - 1; + loop { + sid = dfa + .next_state(cache, sid, input.haystack()[at]) + .map_err(|_| MatchError::gave_up(at))?; + if sid.is_tagged() { + if sid.is_match() { + let pattern = dfa.match_pattern(cache, sid, 0); + // Since reverse searches report the beginning of a + // match and the beginning is inclusive (not exclusive + // like the end of a match), we add 1 to make it + // inclusive. + mat = Some(HalfMatch::new(pattern, at + 1)); + } else if sid.is_dead() { + return Ok(mat); + } else if sid.is_quit() { + return Err(MatchError::quit(input.haystack()[at], at).into()); + } + } + if at == input.start() { + break; + } + at -= 1; + if at < min_start { + trace!( + "reached position {at} which is before the previous literal \ + match, quitting to avoid quadratic behavior", + ); + return Err(RetryError::Quadratic(RetryQuadraticError::new())); + } + } + let was_dead = sid.is_dead(); + hybrid_eoi_rev(dfa, cache, input, &mut sid, &mut mat)?; + // See the comments in the full DFA routine above for why we need this. + if at == input.start() + && mat.map_or(false, |m| m.offset() > input.start()) + && !was_dead + { + trace!( + "reached beginning of search at offset {at} without hitting \ + a dead state, quitting to avoid potential false positive match", + ); + return Err(RetryError::Quadratic(RetryQuadraticError::new())); + } + Ok(mat) +} + +#[cfg(feature = "dfa-build")] +#[cfg_attr(feature = "perf-inline", inline(always))] +fn dfa_eoi_rev( + dfa: &crate::dfa::dense::DFA>, + input: &Input<'_>, + sid: &mut crate::util::primitives::StateID, + mat: &mut Option, +) -> Result<(), MatchError> { + use crate::dfa::Automaton; + + let sp = input.get_span(); + if sp.start > 0 { + let byte = input.haystack()[sp.start - 1]; + *sid = dfa.next_state(*sid, byte); + if dfa.is_match_state(*sid) { + let pattern = dfa.match_pattern(*sid, 0); + *mat = Some(HalfMatch::new(pattern, sp.start)); + } else if dfa.is_quit_state(*sid) { + return Err(MatchError::quit(byte, sp.start - 1)); + } + } else { + *sid = dfa.next_eoi_state(*sid); + if dfa.is_match_state(*sid) { + let pattern = dfa.match_pattern(*sid, 0); + *mat = Some(HalfMatch::new(pattern, 0)); + } + // N.B. We don't have to check 'is_quit' here because the EOI + // transition can never lead to a quit state. + debug_assert!(!dfa.is_quit_state(*sid)); + } + Ok(()) +} + +#[cfg(feature = "hybrid")] +#[cfg_attr(feature = "perf-inline", inline(always))] +fn hybrid_eoi_rev( + dfa: &crate::hybrid::dfa::DFA, + cache: &mut crate::hybrid::dfa::Cache, + input: &Input<'_>, + sid: &mut crate::hybrid::LazyStateID, + mat: &mut Option, +) -> Result<(), MatchError> { + let sp = input.get_span(); + if sp.start > 0 { + let byte = input.haystack()[sp.start - 1]; + *sid = dfa + .next_state(cache, *sid, byte) + .map_err(|_| MatchError::gave_up(sp.start))?; + if sid.is_match() { + let pattern = dfa.match_pattern(cache, *sid, 0); + *mat = Some(HalfMatch::new(pattern, sp.start)); + } else if sid.is_quit() { + return Err(MatchError::quit(byte, sp.start - 1)); + } + } else { + *sid = dfa + .next_eoi_state(cache, *sid) + .map_err(|_| MatchError::gave_up(sp.start))?; + if sid.is_match() { + let pattern = dfa.match_pattern(cache, *sid, 0); + *mat = Some(HalfMatch::new(pattern, 0)); + } + // N.B. We don't have to check 'is_quit' here because the EOI + // transition can never lead to a quit state. + debug_assert!(!sid.is_quit()); + } + Ok(()) +} diff --git a/vendor/regex-automata/src/meta/literal.rs b/vendor/regex-automata/src/meta/literal.rs new file mode 100644 index 00000000000000..fac68d00539b8f --- /dev/null +++ b/vendor/regex-automata/src/meta/literal.rs @@ -0,0 +1,81 @@ +use alloc::{vec, vec::Vec}; + +use regex_syntax::hir::Hir; + +use crate::{meta::regex::RegexInfo, util::search::MatchKind}; + +/// Pull out an alternation of literals from the given sequence of HIR +/// expressions. +/// +/// There are numerous ways for this to fail. Generally, this only applies +/// to regexes of the form 'foo|bar|baz|...|quux'. It can also fail if there +/// are "too few" alternates, in which case, the regex engine is likely faster. +/// +/// And currently, this only returns something when 'hirs.len() == 1'. +pub(crate) fn alternation_literals( + info: &RegexInfo, + hirs: &[&Hir], +) -> Option>> { + use regex_syntax::hir::{HirKind, Literal}; + + // Might as well skip the work below if we know we can't build an + // Aho-Corasick searcher. + if !cfg!(feature = "perf-literal-multisubstring") { + return None; + } + // This is pretty hacky, but basically, if `is_alternation_literal` is + // true, then we can make several assumptions about the structure of our + // HIR. This is what justifies the `unreachable!` statements below. + if hirs.len() != 1 + || !info.props()[0].look_set().is_empty() + || info.props()[0].explicit_captures_len() > 0 + || !info.props()[0].is_alternation_literal() + || info.config().get_match_kind() != MatchKind::LeftmostFirst + { + return None; + } + let hir = &hirs[0]; + let alts = match *hir.kind() { + HirKind::Alternation(ref alts) => alts, + _ => return None, // one literal isn't worth it + }; + + let mut lits = vec![]; + for alt in alts { + let mut lit = vec![]; + match *alt.kind() { + HirKind::Literal(Literal(ref bytes)) => { + lit.extend_from_slice(bytes) + } + HirKind::Concat(ref exprs) => { + for e in exprs { + match *e.kind() { + HirKind::Literal(Literal(ref bytes)) => { + lit.extend_from_slice(bytes); + } + _ => unreachable!("expected literal, got {e:?}"), + } + } + } + _ => unreachable!("expected literal or concat, got {alt:?}"), + } + lits.push(lit); + } + // Why do this? Well, when the number of literals is small, it's likely + // that we'll use the lazy DFA which is in turn likely to be faster than + // Aho-Corasick in such cases. Primarily because Aho-Corasick doesn't have + // a "lazy DFA" but either a contiguous NFA or a full DFA. We rarely use + // the latter because it is so hungry (in time and space), and the former + // is decently fast, but not as fast as a well oiled lazy DFA. + // + // However, once the number starts getting large, the lazy DFA is likely + // to start thrashing because of the modest default cache size. When + // exactly does this happen? Dunno. But at whatever point that is (we make + // a guess below based on ad hoc benchmarking), we'll want to cut over to + // Aho-Corasick, where even the contiguous NFA is likely to do much better. + if lits.len() < 3000 { + debug!("skipping Aho-Corasick because there are too few literals"); + return None; + } + Some(lits) +} diff --git a/vendor/regex-automata/src/meta/mod.rs b/vendor/regex-automata/src/meta/mod.rs new file mode 100644 index 00000000000000..01f430fcb79949 --- /dev/null +++ b/vendor/regex-automata/src/meta/mod.rs @@ -0,0 +1,62 @@ +/*! +Provides a regex matcher that composes several other regex matchers +automatically. + +This module is home to a meta [`Regex`], which provides a convenient high +level API for executing regular expressions in linear time. + +# Comparison with the `regex` crate + +A meta `Regex` is the implementation used directly by the `regex` crate. +Indeed, the `regex` crate API is essentially just a light wrapper over a meta +`Regex`. This means that if you need the full flexibility offered by this +API, then you should be able to switch to using this API directly without +any changes in match semantics or syntax. However, there are some API level +differences: + +* The `regex` crate API returns match objects that include references to the +haystack itself, which in turn makes it easy to access the matching strings +without having to slice the haystack yourself. In contrast, a meta `Regex` +returns match objects that only have offsets in them. +* At time of writing, a meta `Regex` doesn't have some of the convenience +routines that the `regex` crate has, such as replacements. Note though that +[`Captures::interpolate_string`](crate::util::captures::Captures::interpolate_string) +will handle the replacement string interpolation for you. +* A meta `Regex` supports the [`Input`](crate::Input) abstraction, which +provides a way to configure a search in more ways than is supported by the +`regex` crate. For example, [`Input::anchored`](crate::Input::anchored) can +be used to run an anchored search, regardless of whether the pattern is itself +anchored with a `^`. +* A meta `Regex` supports multi-pattern searching everywhere. +Indeed, every [`Match`](crate::Match) returned by the search APIs +include a [`PatternID`](crate::PatternID) indicating which pattern +matched. In the single pattern case, all matches correspond to +[`PatternID::ZERO`](crate::PatternID::ZERO). In contrast, the `regex` crate +has distinct `Regex` and a `RegexSet` APIs. The former only supports a single +pattern, while the latter supports multiple patterns but cannot report the +offsets of a match. +* A meta `Regex` provides the explicit capability of bypassing its internal +memory pool for automatically acquiring mutable scratch space required by its +internal regex engines. Namely, a [`Cache`] can be explicitly provided to lower +level routines such as [`Regex::search_with`]. + +*/ + +pub use self::{ + error::BuildError, + regex::{ + Builder, Cache, CapturesMatches, Config, FindMatches, Regex, Split, + SplitN, + }, +}; + +mod error; +#[cfg(any(feature = "dfa-build", feature = "hybrid"))] +mod limited; +mod literal; +mod regex; +mod reverse_inner; +#[cfg(any(feature = "dfa-build", feature = "hybrid"))] +mod stopat; +mod strategy; +mod wrappers; diff --git a/vendor/regex-automata/src/meta/regex.rs b/vendor/regex-automata/src/meta/regex.rs new file mode 100644 index 00000000000000..21c1a3a31253c9 --- /dev/null +++ b/vendor/regex-automata/src/meta/regex.rs @@ -0,0 +1,3706 @@ +use core::{ + borrow::Borrow, + panic::{RefUnwindSafe, UnwindSafe}, +}; + +use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; + +use regex_syntax::{ + ast, + hir::{self, Hir}, +}; + +use crate::{ + meta::{ + error::BuildError, + strategy::{self, Strategy}, + wrappers, + }, + nfa::thompson::WhichCaptures, + util::{ + captures::{Captures, GroupInfo}, + iter, + pool::{Pool, PoolGuard}, + prefilter::Prefilter, + primitives::{NonMaxUsize, PatternID}, + search::{HalfMatch, Input, Match, MatchKind, PatternSet, Span}, + }, +}; + +/// A type alias for our pool of meta::Cache that fixes the type parameters to +/// what we use for the meta regex below. +type CachePool = Pool; + +/// Same as above, but for the guard returned by a pool. +type CachePoolGuard<'a> = PoolGuard<'a, Cache, CachePoolFn>; + +/// The type of the closure we use to create new caches. We need to spell out +/// all of the marker traits or else we risk leaking !MARKER impls. +type CachePoolFn = + Box Cache + Send + Sync + UnwindSafe + RefUnwindSafe>; + +/// A regex matcher that works by composing several other regex matchers +/// automatically. +/// +/// In effect, a meta regex papers over a lot of the quirks or performance +/// problems in each of the regex engines in this crate. Its goal is to provide +/// an infallible and simple API that "just does the right thing" in the common +/// case. +/// +/// A meta regex is the implementation of a `Regex` in the `regex` crate. +/// Indeed, the `regex` crate API is essentially just a light wrapper over +/// this type. This includes the `regex` crate's `RegexSet` API! +/// +/// # Composition +/// +/// This is called a "meta" matcher precisely because it uses other regex +/// matchers to provide a convenient high level regex API. Here are some +/// examples of how other regex matchers are composed: +/// +/// * When calling [`Regex::captures`], instead of immediately +/// running a slower but more capable regex engine like the +/// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM), the meta regex engine +/// will usually first look for the bounds of a match with a higher throughput +/// regex engine like a [lazy DFA](crate::hybrid). Only when a match is found +/// is a slower engine like `PikeVM` used to find the matching span for each +/// capture group. +/// * While higher throughout engines like the lazy DFA cannot handle +/// Unicode word boundaries in general, they can still be used on pure ASCII +/// haystacks by pretending that Unicode word boundaries are just plain ASCII +/// word boundaries. However, if a haystack is not ASCII, the meta regex engine +/// will automatically switch to a (possibly slower) regex engine that supports +/// Unicode word boundaries in general. +/// * In some cases where a regex pattern is just a simple literal or a small +/// set of literals, an actual regex engine won't be used at all. Instead, +/// substring or multi-substring search algorithms will be employed. +/// +/// There are many other forms of composition happening too, but the above +/// should give a general idea. In particular, it may perhaps be surprising +/// that *multiple* regex engines might get executed for a single search. That +/// is, the decision of what regex engine to use is not _just_ based on the +/// pattern, but also based on the dynamic execution of the search itself. +/// +/// The primary reason for this composition is performance. The fundamental +/// tension is that the faster engines tend to be less capable, and the more +/// capable engines tend to be slower. +/// +/// Note that the forms of composition that are allowed are determined by +/// compile time crate features and configuration. For example, if the `hybrid` +/// feature isn't enabled, or if [`Config::hybrid`] has been disabled, then the +/// meta regex engine will never use a lazy DFA. +/// +/// # Synchronization and cloning +/// +/// Most of the regex engines in this crate require some kind of mutable +/// "scratch" space to read and write from while performing a search. Since +/// a meta regex composes these regex engines, a meta regex also requires +/// mutable scratch space. This scratch space is called a [`Cache`]. +/// +/// Most regex engines _also_ usually have a read-only component, typically +/// a [Thompson `NFA`](crate::nfa::thompson::NFA). +/// +/// In order to make the `Regex` API convenient, most of the routines hide +/// the fact that a `Cache` is needed at all. To achieve this, a [memory +/// pool](crate::util::pool::Pool) is used internally to retrieve `Cache` +/// values in a thread safe way that also permits reuse. This in turn implies +/// that every such search call requires some form of synchronization. Usually +/// this synchronization is fast enough to not notice, but in some cases, it +/// can be a bottleneck. This typically occurs when all of the following are +/// true: +/// +/// * The same `Regex` is shared across multiple threads simultaneously, +/// usually via a [`util::lazy::Lazy`](crate::util::lazy::Lazy) or something +/// similar from the `once_cell` or `lazy_static` crates. +/// * The primary unit of work in each thread is a regex search. +/// * Searches are run on very short haystacks. +/// +/// This particular case can lead to high contention on the pool used by a +/// `Regex` internally, which can in turn increase latency to a noticeable +/// effect. This cost can be mitigated in one of the following ways: +/// +/// * Use a distinct copy of a `Regex` in each thread, usually by cloning it. +/// Cloning a `Regex` _does not_ do a deep copy of its read-only component. +/// But it does lead to each `Regex` having its own memory pool, which in +/// turn eliminates the problem of contention. In general, this technique should +/// not result in any additional memory usage when compared to sharing the same +/// `Regex` across multiple threads simultaneously. +/// * Use lower level APIs, like [`Regex::search_with`], which permit passing +/// a `Cache` explicitly. In this case, it is up to you to determine how best +/// to provide a `Cache`. For example, you might put a `Cache` in thread-local +/// storage if your use case allows for it. +/// +/// Overall, this is an issue that happens rarely in practice, but it can +/// happen. +/// +/// # Warning: spin-locks may be used in alloc-only mode +/// +/// When this crate is built without the `std` feature and the high level APIs +/// on a `Regex` are used, then a spin-lock will be used to synchronize access +/// to an internal pool of `Cache` values. This may be undesirable because +/// a spin-lock is [effectively impossible to implement correctly in user +/// space][spinlocks-are-bad]. That is, more concretely, the spin-lock could +/// result in a deadlock. +/// +/// [spinlocks-are-bad]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html +/// +/// If one wants to avoid the use of spin-locks when the `std` feature is +/// disabled, then you must use APIs that accept a `Cache` value explicitly. +/// For example, [`Regex::search_with`]. +/// +/// # Example +/// +/// ``` +/// use regex_automata::meta::Regex; +/// +/// let re = Regex::new(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}$")?; +/// assert!(re.is_match("2010-03-14")); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// # Example: anchored search +/// +/// This example shows how to use [`Input::anchored`] to run an anchored +/// search, even when the regex pattern itself isn't anchored. An anchored +/// search guarantees that if a match is found, then the start offset of the +/// match corresponds to the offset at which the search was started. +/// +/// ``` +/// use regex_automata::{meta::Regex, Anchored, Input, Match}; +/// +/// let re = Regex::new(r"\bfoo\b")?; +/// let input = Input::new("xx foo xx").range(3..).anchored(Anchored::Yes); +/// // The offsets are in terms of the original haystack. +/// assert_eq!(Some(Match::must(0, 3..6)), re.find(input)); +/// +/// // Notice that no match occurs here, because \b still takes the +/// // surrounding context into account, even if it means looking back +/// // before the start of your search. +/// let hay = "xxfoo xx"; +/// let input = Input::new(hay).range(2..).anchored(Anchored::Yes); +/// assert_eq!(None, re.find(input)); +/// // Indeed, you cannot achieve the above by simply slicing the +/// // haystack itself, since the regex engine can't see the +/// // surrounding context. This is why 'Input' permits setting +/// // the bounds of a search! +/// let input = Input::new(&hay[2..]).anchored(Anchored::Yes); +/// // WRONG! +/// assert_eq!(Some(Match::must(0, 0..3)), re.find(input)); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// # Example: earliest search +/// +/// This example shows how to use [`Input::earliest`] to run a search that +/// might stop before finding the typical leftmost match. +/// +/// ``` +/// use regex_automata::{meta::Regex, Anchored, Input, Match}; +/// +/// let re = Regex::new(r"[a-z]{3}|b")?; +/// let input = Input::new("abc").earliest(true); +/// assert_eq!(Some(Match::must(0, 1..2)), re.find(input)); +/// +/// // Note that "earliest" isn't really a match semantic unto itself. +/// // Instead, it is merely an instruction to whatever regex engine +/// // gets used internally to quit as soon as it can. For example, +/// // this regex uses a different search technique, and winds up +/// // producing a different (but valid) match! +/// let re = Regex::new(r"abc|b")?; +/// let input = Input::new("abc").earliest(true); +/// assert_eq!(Some(Match::must(0, 0..3)), re.find(input)); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// # Example: change the line terminator +/// +/// This example shows how to enable multi-line mode by default and change +/// the line terminator to the NUL byte: +/// +/// ``` +/// use regex_automata::{meta::Regex, util::syntax, Match}; +/// +/// let re = Regex::builder() +/// .syntax(syntax::Config::new().multi_line(true)) +/// .configure(Regex::config().line_terminator(b'\x00')) +/// .build(r"^foo$")?; +/// let hay = "\x00foo\x00"; +/// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay)); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Debug)] +pub struct Regex { + /// The actual regex implementation. + imp: Arc, + /// A thread safe pool of caches. + /// + /// For the higher level search APIs, a `Cache` is automatically plucked + /// from this pool before running a search. The lower level `with` methods + /// permit the caller to provide their own cache, thereby bypassing + /// accesses to this pool. + /// + /// Note that we put this outside the `Arc` so that cloning a `Regex` + /// results in creating a fresh `CachePool`. This in turn permits callers + /// to clone regexes into separate threads where each such regex gets + /// the pool's "thread owner" optimization. Otherwise, if one shares the + /// `Regex` directly, then the pool will go through a slower mutex path for + /// all threads except for the "owner." + pool: CachePool, +} + +/// The internal implementation of `Regex`, split out so that it can be wrapped +/// in an `Arc`. +#[derive(Debug)] +struct RegexI { + /// The core matching engine. + /// + /// Why is this reference counted when RegexI is already wrapped in an Arc? + /// Well, we need to capture this in a closure to our `Pool` below in order + /// to create new `Cache` values when needed. So since it needs to be in + /// two places, we make it reference counted. + /// + /// We make `RegexI` itself reference counted too so that `Regex` itself + /// stays extremely small and very cheap to clone. + strat: Arc, + /// Metadata about the regexes driving the strategy. The metadata is also + /// usually stored inside the strategy too, but we put it here as well + /// so that we can get quick access to it (without virtual calls) before + /// executing the regex engine. For example, we use this metadata to + /// detect a subset of cases where we know a match is impossible, and can + /// thus avoid calling into the strategy at all. + /// + /// Since `RegexInfo` is stored in multiple places, it is also reference + /// counted. + info: RegexInfo, +} + +/// Convenience constructors for a `Regex` using the default configuration. +impl Regex { + /// Builds a `Regex` from a single pattern string using the default + /// configuration. + /// + /// If there was a problem parsing the pattern or a problem turning it into + /// a regex matcher, then an error is returned. + /// + /// If you want to change the configuration of a `Regex`, use a [`Builder`] + /// with a [`Config`]. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{meta::Regex, Match}; + /// + /// let re = Regex::new(r"(?Rm)^foo$")?; + /// let hay = "\r\nfoo\r\n"; + /// assert_eq!(Some(Match::must(0, 2..5)), re.find(hay)); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn new(pattern: &str) -> Result { + Regex::builder().build(pattern) + } + + /// Builds a `Regex` from many pattern strings using the default + /// configuration. + /// + /// If there was a problem parsing any of the patterns or a problem turning + /// them into a regex matcher, then an error is returned. + /// + /// If you want to change the configuration of a `Regex`, use a [`Builder`] + /// with a [`Config`]. + /// + /// # Example: simple lexer + /// + /// This simplistic example leverages the multi-pattern support to build a + /// simple little lexer. The pattern ID in the match tells you which regex + /// matched, which in turn might be used to map back to the "type" of the + /// token returned by the lexer. + /// + /// ``` + /// use regex_automata::{meta::Regex, Match}; + /// + /// let re = Regex::new_many(&[ + /// r"[[:space:]]", + /// r"[A-Za-z0-9][A-Za-z0-9_]+", + /// r"->", + /// r".", + /// ])?; + /// let haystack = "fn is_boss(bruce: i32, springsteen: String) -> bool;"; + /// let matches: Vec = re.find_iter(haystack).collect(); + /// assert_eq!(matches, vec![ + /// Match::must(1, 0..2), // 'fn' + /// Match::must(0, 2..3), // ' ' + /// Match::must(1, 3..10), // 'is_boss' + /// Match::must(3, 10..11), // '(' + /// Match::must(1, 11..16), // 'bruce' + /// Match::must(3, 16..17), // ':' + /// Match::must(0, 17..18), // ' ' + /// Match::must(1, 18..21), // 'i32' + /// Match::must(3, 21..22), // ',' + /// Match::must(0, 22..23), // ' ' + /// Match::must(1, 23..34), // 'springsteen' + /// Match::must(3, 34..35), // ':' + /// Match::must(0, 35..36), // ' ' + /// Match::must(1, 36..42), // 'String' + /// Match::must(3, 42..43), // ')' + /// Match::must(0, 43..44), // ' ' + /// Match::must(2, 44..46), // '->' + /// Match::must(0, 46..47), // ' ' + /// Match::must(1, 47..51), // 'bool' + /// Match::must(3, 51..52), // ';' + /// ]); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// One can write a lexer like the above using a regex like + /// `(?P[[:space:]])|(?P[A-Za-z0-9][A-Za-z0-9_]+)|...`, + /// but then you need to ask whether capture group matched to determine + /// which branch in the regex matched, and thus, which token the match + /// corresponds to. In contrast, the above example includes the pattern ID + /// in the match. There's no need to use capture groups at all. + /// + /// # Example: finding the pattern that caused an error + /// + /// When a syntax error occurs, it is possible to ask which pattern + /// caused the syntax error. + /// + /// ``` + /// use regex_automata::{meta::Regex, PatternID}; + /// + /// let err = Regex::new_many(&["a", "b", r"\p{Foo}", "c"]).unwrap_err(); + /// assert_eq!(Some(PatternID::must(2)), err.pattern()); + /// ``` + /// + /// # Example: zero patterns is valid + /// + /// Building a regex with zero patterns results in a regex that never + /// matches anything. Because this routine is generic, passing an empty + /// slice usually requires a turbo-fish (or something else to help type + /// inference). + /// + /// ``` + /// use regex_automata::{meta::Regex, util::syntax, Match}; + /// + /// let re = Regex::new_many::<&str>(&[])?; + /// assert_eq!(None, re.find("")); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn new_many>( + patterns: &[P], + ) -> Result { + Regex::builder().build_many(patterns) + } + + /// Return a default configuration for a `Regex`. + /// + /// This is a convenience routine to avoid needing to import the [`Config`] + /// type when customizing the construction of a `Regex`. + /// + /// # Example: lower the NFA size limit + /// + /// In some cases, the default size limit might be too big. The size limit + /// can be lowered, which will prevent large regex patterns from compiling. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::meta::Regex; + /// + /// let result = Regex::builder() + /// .configure(Regex::config().nfa_size_limit(Some(20 * (1<<10)))) + /// // Not even 20KB is enough to build a single large Unicode class! + /// .build(r"\pL"); + /// assert!(result.is_err()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn config() -> Config { + Config::new() + } + + /// Return a builder for configuring the construction of a `Regex`. + /// + /// This is a convenience routine to avoid needing to import the + /// [`Builder`] type in common cases. + /// + /// # Example: change the line terminator + /// + /// This example shows how to enable multi-line mode by default and change + /// the line terminator to the NUL byte: + /// + /// ``` + /// use regex_automata::{meta::Regex, util::syntax, Match}; + /// + /// let re = Regex::builder() + /// .syntax(syntax::Config::new().multi_line(true)) + /// .configure(Regex::config().line_terminator(b'\x00')) + /// .build(r"^foo$")?; + /// let hay = "\x00foo\x00"; + /// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay)); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn builder() -> Builder { + Builder::new() + } +} + +/// High level convenience routines for using a regex to search a haystack. +impl Regex { + /// Returns true if and only if this regex matches the given haystack. + /// + /// This routine may short circuit if it knows that scanning future input + /// will never lead to a different result. (Consider how this might make + /// a difference given the regex `a+` on the haystack `aaaaaaaaaaaaaaa`. + /// This routine _may_ stop after it sees the first `a`, but routines like + /// `find` need to continue searching because `+` is greedy by default.) + /// + /// # Example + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// let re = Regex::new("foo[0-9]+bar")?; + /// + /// assert!(re.is_match("foo12345bar")); + /// assert!(!re.is_match("foobar")); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: consistency with search APIs + /// + /// `is_match` is guaranteed to return `true` whenever `find` returns a + /// match. This includes searches that are executed entirely within a + /// codepoint: + /// + /// ``` + /// use regex_automata::{meta::Regex, Input}; + /// + /// let re = Regex::new("a*")?; + /// + /// // This doesn't match because the default configuration bans empty + /// // matches from splitting a codepoint. + /// assert!(!re.is_match(Input::new("☃").span(1..2))); + /// assert_eq!(None, re.find(Input::new("☃").span(1..2))); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Notice that when UTF-8 mode is disabled, then the above reports a + /// match because the restriction against zero-width matches that split a + /// codepoint has been lifted: + /// + /// ``` + /// use regex_automata::{meta::Regex, Input, Match}; + /// + /// let re = Regex::builder() + /// .configure(Regex::config().utf8_empty(false)) + /// .build("a*")?; + /// + /// assert!(re.is_match(Input::new("☃").span(1..2))); + /// assert_eq!( + /// Some(Match::must(0, 1..1)), + /// re.find(Input::new("☃").span(1..2)), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// A similar idea applies when using line anchors with CRLF mode enabled, + /// which prevents them from matching between a `\r` and a `\n`. + /// + /// ``` + /// use regex_automata::{meta::Regex, Input, Match}; + /// + /// let re = Regex::new(r"(?Rm:$)")?; + /// assert!(!re.is_match(Input::new("\r\n").span(1..1))); + /// // A regular line anchor, which only considers \n as a + /// // line terminator, will match. + /// let re = Regex::new(r"(?m:$)")?; + /// assert!(re.is_match(Input::new("\r\n").span(1..1))); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn is_match<'h, I: Into>>(&self, input: I) -> bool { + let input = input.into().earliest(true); + if self.imp.info.is_impossible(&input) { + return false; + } + let mut guard = self.pool.get(); + let result = self.imp.strat.is_match(&mut guard, &input); + // See 'Regex::search' for why we put the guard back explicitly. + PoolGuard::put(guard); + result + } + + /// Executes a leftmost search and returns the first match that is found, + /// if one exists. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{meta::Regex, Match}; + /// + /// let re = Regex::new("foo[0-9]+")?; + /// assert_eq!(Some(Match::must(0, 0..8)), re.find("foo12345")); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn find<'h, I: Into>>(&self, input: I) -> Option { + self.search(&input.into()) + } + + /// Executes a leftmost forward search and writes the spans of capturing + /// groups that participated in a match into the provided [`Captures`] + /// value. If no match was found, then [`Captures::is_match`] is guaranteed + /// to return `false`. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{meta::Regex, Span}; + /// + /// let re = Regex::new(r"^([0-9]{4})-([0-9]{2})-([0-9]{2})$")?; + /// let mut caps = re.create_captures(); + /// + /// re.captures("2010-03-14", &mut caps); + /// assert!(caps.is_match()); + /// assert_eq!(Some(Span::from(0..4)), caps.get_group(1)); + /// assert_eq!(Some(Span::from(5..7)), caps.get_group(2)); + /// assert_eq!(Some(Span::from(8..10)), caps.get_group(3)); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn captures<'h, I: Into>>( + &self, + input: I, + caps: &mut Captures, + ) { + self.search_captures(&input.into(), caps) + } + + /// Returns an iterator over all non-overlapping leftmost matches in + /// the given haystack. If no match exists, then the iterator yields no + /// elements. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{meta::Regex, Match}; + /// + /// let re = Regex::new("foo[0-9]+")?; + /// let haystack = "foo1 foo12 foo123"; + /// let matches: Vec = re.find_iter(haystack).collect(); + /// assert_eq!(matches, vec![ + /// Match::must(0, 0..4), + /// Match::must(0, 5..10), + /// Match::must(0, 11..17), + /// ]); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn find_iter<'r, 'h, I: Into>>( + &'r self, + input: I, + ) -> FindMatches<'r, 'h> { + let cache = self.pool.get(); + let it = iter::Searcher::new(input.into()); + FindMatches { re: self, cache, it } + } + + /// Returns an iterator over all non-overlapping `Captures` values. If no + /// match exists, then the iterator yields no elements. + /// + /// This yields the same matches as [`Regex::find_iter`], but it includes + /// the spans of all capturing groups that participate in each match. + /// + /// **Tip:** See [`util::iter::Searcher`](crate::util::iter::Searcher) for + /// how to correctly iterate over all matches in a haystack while avoiding + /// the creation of a new `Captures` value for every match. (Which you are + /// forced to do with an `Iterator`.) + /// + /// # Example + /// + /// ``` + /// use regex_automata::{meta::Regex, Span}; + /// + /// let re = Regex::new("foo(?P[0-9]+)")?; + /// + /// let haystack = "foo1 foo12 foo123"; + /// let matches: Vec = re + /// .captures_iter(haystack) + /// // The unwrap is OK since 'numbers' matches if the pattern matches. + /// .map(|caps| caps.get_group_by_name("numbers").unwrap()) + /// .collect(); + /// assert_eq!(matches, vec![ + /// Span::from(3..4), + /// Span::from(8..10), + /// Span::from(14..17), + /// ]); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn captures_iter<'r, 'h, I: Into>>( + &'r self, + input: I, + ) -> CapturesMatches<'r, 'h> { + let cache = self.pool.get(); + let caps = self.create_captures(); + let it = iter::Searcher::new(input.into()); + CapturesMatches { re: self, cache, caps, it } + } + + /// Returns an iterator of spans of the haystack given, delimited by a + /// match of the regex. Namely, each element of the iterator corresponds to + /// a part of the haystack that *isn't* matched by the regular expression. + /// + /// # Example + /// + /// To split a string delimited by arbitrary amounts of spaces or tabs: + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// let re = Regex::new(r"[ \t]+")?; + /// let hay = "a b \t c\td e"; + /// let fields: Vec<&str> = re.split(hay).map(|span| &hay[span]).collect(); + /// assert_eq!(fields, vec!["a", "b", "c", "d", "e"]); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: more cases + /// + /// Basic usage: + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// let re = Regex::new(r" ")?; + /// let hay = "Mary had a little lamb"; + /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec!["Mary", "had", "a", "little", "lamb"]); + /// + /// let re = Regex::new(r"X")?; + /// let hay = ""; + /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec![""]); + /// + /// let re = Regex::new(r"X")?; + /// let hay = "lionXXtigerXleopard"; + /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec!["lion", "", "tiger", "leopard"]); + /// + /// let re = Regex::new(r"::")?; + /// let hay = "lion::tiger::leopard"; + /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec!["lion", "tiger", "leopard"]); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// If a haystack contains multiple contiguous matches, you will end up + /// with empty spans yielded by the iterator: + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// let re = Regex::new(r"X")?; + /// let hay = "XXXXaXXbXc"; + /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]); + /// + /// let re = Regex::new(r"/")?; + /// let hay = "(///)"; + /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec!["(", "", "", ")"]); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Separators at the start or end of a haystack are neighbored by empty + /// spans. + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// let re = Regex::new(r"0")?; + /// let hay = "010"; + /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec!["", "1", ""]); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// When the empty string is used as a regex, it splits at every valid + /// UTF-8 boundary by default (which includes the beginning and end of the + /// haystack): + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// let re = Regex::new(r"")?; + /// let hay = "rust"; + /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec!["", "r", "u", "s", "t", ""]); + /// + /// // Splitting by an empty string is UTF-8 aware by default! + /// let re = Regex::new(r"")?; + /// let hay = "☃"; + /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec!["", "☃", ""]); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// But note that UTF-8 mode for empty strings can be disabled, which will + /// then result in a match at every byte offset in the haystack, + /// including between every UTF-8 code unit. + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// let re = Regex::builder() + /// .configure(Regex::config().utf8_empty(false)) + /// .build(r"")?; + /// let hay = "☃".as_bytes(); + /// let got: Vec<&[u8]> = re.split(hay).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec![ + /// // Writing byte string slices is just brutal. The problem is that + /// // b"foo" has type &[u8; 3] instead of &[u8]. + /// &[][..], &[b'\xE2'][..], &[b'\x98'][..], &[b'\x83'][..], &[][..], + /// ]); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Contiguous separators (commonly shows up with whitespace), can lead to + /// possibly surprising behavior. For example, this code is correct: + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// let re = Regex::new(r" ")?; + /// let hay = " a b c"; + /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// It does *not* give you `["a", "b", "c"]`. For that behavior, you'd want + /// to match contiguous space characters: + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// let re = Regex::new(r" +")?; + /// let hay = " a b c"; + /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); + /// // N.B. This does still include a leading empty span because ' +' + /// // matches at the beginning of the haystack. + /// assert_eq!(got, vec!["", "a", "b", "c"]); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn split<'r, 'h, I: Into>>( + &'r self, + input: I, + ) -> Split<'r, 'h> { + Split { finder: self.find_iter(input), last: 0 } + } + + /// Returns an iterator of at most `limit` spans of the haystack given, + /// delimited by a match of the regex. (A `limit` of `0` will return no + /// spans.) Namely, each element of the iterator corresponds to a part + /// of the haystack that *isn't* matched by the regular expression. The + /// remainder of the haystack that is not split will be the last element in + /// the iterator. + /// + /// # Example + /// + /// Get the first two words in some haystack: + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::meta::Regex; + /// + /// let re = Regex::new(r"\W+").unwrap(); + /// let hay = "Hey! How are you?"; + /// let fields: Vec<&str> = + /// re.splitn(hay, 3).map(|span| &hay[span]).collect(); + /// assert_eq!(fields, vec!["Hey", "How", "are you?"]); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Examples: more cases + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// let re = Regex::new(r" ")?; + /// let hay = "Mary had a little lamb"; + /// let got: Vec<&str> = re.splitn(hay, 3).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec!["Mary", "had", "a little lamb"]); + /// + /// let re = Regex::new(r"X")?; + /// let hay = ""; + /// let got: Vec<&str> = re.splitn(hay, 3).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec![""]); + /// + /// let re = Regex::new(r"X")?; + /// let hay = "lionXXtigerXleopard"; + /// let got: Vec<&str> = re.splitn(hay, 3).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec!["lion", "", "tigerXleopard"]); + /// + /// let re = Regex::new(r"::")?; + /// let hay = "lion::tiger::leopard"; + /// let got: Vec<&str> = re.splitn(hay, 2).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec!["lion", "tiger::leopard"]); + /// + /// let re = Regex::new(r"X")?; + /// let hay = "abcXdef"; + /// let got: Vec<&str> = re.splitn(hay, 1).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec!["abcXdef"]); + /// + /// let re = Regex::new(r"X")?; + /// let hay = "abcdef"; + /// let got: Vec<&str> = re.splitn(hay, 2).map(|sp| &hay[sp]).collect(); + /// assert_eq!(got, vec!["abcdef"]); + /// + /// let re = Regex::new(r"X")?; + /// let hay = "abcXdef"; + /// let got: Vec<&str> = re.splitn(hay, 0).map(|sp| &hay[sp]).collect(); + /// assert!(got.is_empty()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn splitn<'r, 'h, I: Into>>( + &'r self, + input: I, + limit: usize, + ) -> SplitN<'r, 'h> { + SplitN { splits: self.split(input), limit } + } +} + +/// Lower level search routines that give more control. +impl Regex { + /// Returns the start and end offset of the leftmost match. If no match + /// exists, then `None` is returned. + /// + /// This is like [`Regex::find`] but, but it accepts a concrete `&Input` + /// instead of an `Into`. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{meta::Regex, Input, Match}; + /// + /// let re = Regex::new(r"Samwise|Sam")?; + /// let input = Input::new( + /// "one of the chief characters, Samwise the Brave", + /// ); + /// assert_eq!(Some(Match::must(0, 29..36)), re.search(&input)); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn search(&self, input: &Input<'_>) -> Option { + if self.imp.info.captures_disabled() + || self.imp.info.is_impossible(input) + { + return None; + } + let mut guard = self.pool.get(); + let result = self.imp.strat.search(&mut guard, input); + // We do this dance with the guard and explicitly put it back in the + // pool because it seems to result in better codegen. If we let the + // guard's Drop impl put it back in the pool, then functions like + // ptr::drop_in_place get called and they *don't* get inlined. This + // isn't usually a big deal, but in latency sensitive benchmarks the + // extra function call can matter. + // + // I used `rebar measure -f '^grep/every-line$' -e meta` to measure + // the effects here. + // + // Note that this doesn't eliminate the latency effects of using the + // pool. There is still some (minor) cost for the "thread owner" of the + // pool. (i.e., The thread that first calls a regex search routine.) + // However, for other threads using the regex, the pool access can be + // quite expensive as it goes through a mutex. Callers can avoid this + // by either cloning the Regex (which creates a distinct copy of the + // pool), or callers can use the lower level APIs that accept a 'Cache' + // directly and do their own handling. + PoolGuard::put(guard); + result + } + + /// Returns the end offset of the leftmost match. If no match exists, then + /// `None` is returned. + /// + /// This is distinct from [`Regex::search`] in that it only returns the end + /// of a match and not the start of the match. Depending on a variety of + /// implementation details, this _may_ permit the regex engine to do less + /// overall work. For example, if a DFA is being used to execute a search, + /// then the start of a match usually requires running a separate DFA in + /// reverse to the find the start of a match. If one only needs the end of + /// a match, then the separate reverse scan to find the start of a match + /// can be skipped. (Note that the reverse scan is avoided even when using + /// `Regex::search` when possible, for example, in the case of an anchored + /// search.) + /// + /// # Example + /// + /// ``` + /// use regex_automata::{meta::Regex, Input, HalfMatch}; + /// + /// let re = Regex::new(r"Samwise|Sam")?; + /// let input = Input::new( + /// "one of the chief characters, Samwise the Brave", + /// ); + /// assert_eq!(Some(HalfMatch::must(0, 36)), re.search_half(&input)); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn search_half(&self, input: &Input<'_>) -> Option { + if self.imp.info.captures_disabled() + || self.imp.info.is_impossible(input) + { + return None; + } + let mut guard = self.pool.get(); + let result = self.imp.strat.search_half(&mut guard, input); + // See 'Regex::search' for why we put the guard back explicitly. + PoolGuard::put(guard); + result + } + + /// Executes a leftmost forward search and writes the spans of capturing + /// groups that participated in a match into the provided [`Captures`] + /// value. If no match was found, then [`Captures::is_match`] is guaranteed + /// to return `false`. + /// + /// This is like [`Regex::captures`], but it accepts a concrete `&Input` + /// instead of an `Into`. + /// + /// # Example: specific pattern search + /// + /// This example shows how to build a multi-pattern `Regex` that permits + /// searching for specific patterns. + /// + /// ``` + /// use regex_automata::{ + /// meta::Regex, + /// Anchored, Match, PatternID, Input, + /// }; + /// + /// let re = Regex::new_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?; + /// let mut caps = re.create_captures(); + /// let haystack = "foo123"; + /// + /// // Since we are using the default leftmost-first match and both + /// // patterns match at the same starting position, only the first pattern + /// // will be returned in this case when doing a search for any of the + /// // patterns. + /// let expected = Some(Match::must(0, 0..6)); + /// re.search_captures(&Input::new(haystack), &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// // But if we want to check whether some other pattern matches, then we + /// // can provide its pattern ID. + /// let expected = Some(Match::must(1, 0..6)); + /// let input = Input::new(haystack) + /// .anchored(Anchored::Pattern(PatternID::must(1))); + /// re.search_captures(&input, &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: specifying the bounds of a search + /// + /// This example shows how providing the bounds of a search can produce + /// different results than simply sub-slicing the haystack. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{meta::Regex, Match, Input}; + /// + /// let re = Regex::new(r"\b[0-9]{3}\b")?; + /// let mut caps = re.create_captures(); + /// let haystack = "foo123bar"; + /// + /// // Since we sub-slice the haystack, the search doesn't know about + /// // the larger context and assumes that `123` is surrounded by word + /// // boundaries. And of course, the match position is reported relative + /// // to the sub-slice as well, which means we get `0..3` instead of + /// // `3..6`. + /// let expected = Some(Match::must(0, 0..3)); + /// let input = Input::new(&haystack[3..6]); + /// re.search_captures(&input, &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// // But if we provide the bounds of the search within the context of the + /// // entire haystack, then the search can take the surrounding context + /// // into account. (And if we did find a match, it would be reported + /// // as a valid offset into `haystack` instead of its sub-slice.) + /// let expected = None; + /// let input = Input::new(haystack).range(3..6); + /// re.search_captures(&input, &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn search_captures(&self, input: &Input<'_>, caps: &mut Captures) { + caps.set_pattern(None); + let pid = self.search_slots(input, caps.slots_mut()); + caps.set_pattern(pid); + } + + /// Executes a leftmost forward search and writes the spans of capturing + /// groups that participated in a match into the provided `slots`, and + /// returns the matching pattern ID. The contents of the slots for patterns + /// other than the matching pattern are unspecified. If no match was found, + /// then `None` is returned and the contents of `slots` is unspecified. + /// + /// This is like [`Regex::search`], but it accepts a raw slots slice + /// instead of a `Captures` value. This is useful in contexts where you + /// don't want or need to allocate a `Captures`. + /// + /// It is legal to pass _any_ number of slots to this routine. If the regex + /// engine would otherwise write a slot offset that doesn't fit in the + /// provided slice, then it is simply skipped. In general though, there are + /// usually three slice lengths you might want to use: + /// + /// * An empty slice, if you only care about which pattern matched. + /// * A slice with [`pattern_len() * 2`](Regex::pattern_len) slots, if you + /// only care about the overall match spans for each matching pattern. + /// * A slice with + /// [`slot_len()`](crate::util::captures::GroupInfo::slot_len) slots, which + /// permits recording match offsets for every capturing group in every + /// pattern. + /// + /// # Example + /// + /// This example shows how to find the overall match offsets in a + /// multi-pattern search without allocating a `Captures` value. Indeed, we + /// can put our slots right on the stack. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{meta::Regex, PatternID, Input}; + /// + /// let re = Regex::new_many(&[ + /// r"\pL+", + /// r"\d+", + /// ])?; + /// let input = Input::new("!@#123"); + /// + /// // We only care about the overall match offsets here, so we just + /// // allocate two slots for each pattern. Each slot records the start + /// // and end of the match. + /// let mut slots = [None; 4]; + /// let pid = re.search_slots(&input, &mut slots); + /// assert_eq!(Some(PatternID::must(1)), pid); + /// + /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'. + /// // See 'GroupInfo' for more details on the mapping between groups and + /// // slot indices. + /// let slot_start = pid.unwrap().as_usize() * 2; + /// let slot_end = slot_start + 1; + /// assert_eq!(Some(3), slots[slot_start].map(|s| s.get())); + /// assert_eq!(Some(6), slots[slot_end].map(|s| s.get())); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn search_slots( + &self, + input: &Input<'_>, + slots: &mut [Option], + ) -> Option { + if self.imp.info.captures_disabled() + || self.imp.info.is_impossible(input) + { + return None; + } + let mut guard = self.pool.get(); + let result = self.imp.strat.search_slots(&mut guard, input, slots); + // See 'Regex::search' for why we put the guard back explicitly. + PoolGuard::put(guard); + result + } + + /// Writes the set of patterns that match anywhere in the given search + /// configuration to `patset`. If multiple patterns match at the same + /// position and this `Regex` was configured with [`MatchKind::All`] + /// semantics, then all matching patterns are written to the given set. + /// + /// Unless all of the patterns in this `Regex` are anchored, then generally + /// speaking, this will scan the entire haystack. + /// + /// This search routine *does not* clear the pattern set. This gives some + /// flexibility to the caller (e.g., running multiple searches with the + /// same pattern set), but does make the API bug-prone if you're reusing + /// the same pattern set for multiple searches but intended them to be + /// independent. + /// + /// If a pattern ID matched but the given `PatternSet` does not have + /// sufficient capacity to store it, then it is not inserted and silently + /// dropped. + /// + /// # Example + /// + /// This example shows how to find all matching patterns in a haystack, + /// even when some patterns match at the same position as other patterns. + /// It is important that we configure the `Regex` with [`MatchKind::All`] + /// semantics here, or else overlapping matches will not be reported. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{meta::Regex, Input, MatchKind, PatternSet}; + /// + /// let patterns = &[ + /// r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar", + /// ]; + /// let re = Regex::builder() + /// .configure(Regex::config().match_kind(MatchKind::All)) + /// .build_many(patterns)?; + /// + /// let input = Input::new("foobar"); + /// let mut patset = PatternSet::new(re.pattern_len()); + /// re.which_overlapping_matches(&input, &mut patset); + /// let expected = vec![0, 2, 3, 4, 6]; + /// let got: Vec = patset.iter().map(|p| p.as_usize()).collect(); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn which_overlapping_matches( + &self, + input: &Input<'_>, + patset: &mut PatternSet, + ) { + if self.imp.info.is_impossible(input) { + return; + } + let mut guard = self.pool.get(); + let result = self + .imp + .strat + .which_overlapping_matches(&mut guard, input, patset); + // See 'Regex::search' for why we put the guard back explicitly. + PoolGuard::put(guard); + result + } +} + +/// Lower level search routines that give more control, and require the caller +/// to provide an explicit [`Cache`] parameter. +impl Regex { + /// This is like [`Regex::search`], but requires the caller to + /// explicitly pass a [`Cache`]. + /// + /// # Why pass a `Cache` explicitly? + /// + /// Passing a `Cache` explicitly will bypass the use of an internal memory + /// pool used by `Regex` to get a `Cache` for a search. The use of this + /// pool can be slower in some cases when a `Regex` is used from multiple + /// threads simultaneously. Typically, performance only becomes an issue + /// when there is heavy contention, which in turn usually only occurs + /// when each thread's primary unit of work is a regex search on a small + /// haystack. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{meta::Regex, Input, Match}; + /// + /// let re = Regex::new(r"Samwise|Sam")?; + /// let mut cache = re.create_cache(); + /// let input = Input::new( + /// "one of the chief characters, Samwise the Brave", + /// ); + /// assert_eq!( + /// Some(Match::must(0, 29..36)), + /// re.search_with(&mut cache, &input), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn search_with( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Option { + if self.imp.info.captures_disabled() + || self.imp.info.is_impossible(input) + { + return None; + } + self.imp.strat.search(cache, input) + } + + /// This is like [`Regex::search_half`], but requires the caller to + /// explicitly pass a [`Cache`]. + /// + /// # Why pass a `Cache` explicitly? + /// + /// Passing a `Cache` explicitly will bypass the use of an internal memory + /// pool used by `Regex` to get a `Cache` for a search. The use of this + /// pool can be slower in some cases when a `Regex` is used from multiple + /// threads simultaneously. Typically, performance only becomes an issue + /// when there is heavy contention, which in turn usually only occurs + /// when each thread's primary unit of work is a regex search on a small + /// haystack. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{meta::Regex, Input, HalfMatch}; + /// + /// let re = Regex::new(r"Samwise|Sam")?; + /// let mut cache = re.create_cache(); + /// let input = Input::new( + /// "one of the chief characters, Samwise the Brave", + /// ); + /// assert_eq!( + /// Some(HalfMatch::must(0, 36)), + /// re.search_half_with(&mut cache, &input), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn search_half_with( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Option { + if self.imp.info.captures_disabled() + || self.imp.info.is_impossible(input) + { + return None; + } + self.imp.strat.search_half(cache, input) + } + + /// This is like [`Regex::search_captures`], but requires the caller to + /// explicitly pass a [`Cache`]. + /// + /// # Why pass a `Cache` explicitly? + /// + /// Passing a `Cache` explicitly will bypass the use of an internal memory + /// pool used by `Regex` to get a `Cache` for a search. The use of this + /// pool can be slower in some cases when a `Regex` is used from multiple + /// threads simultaneously. Typically, performance only becomes an issue + /// when there is heavy contention, which in turn usually only occurs + /// when each thread's primary unit of work is a regex search on a small + /// haystack. + /// + /// # Example: specific pattern search + /// + /// This example shows how to build a multi-pattern `Regex` that permits + /// searching for specific patterns. + /// + /// ``` + /// use regex_automata::{ + /// meta::Regex, + /// Anchored, Match, PatternID, Input, + /// }; + /// + /// let re = Regex::new_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let haystack = "foo123"; + /// + /// // Since we are using the default leftmost-first match and both + /// // patterns match at the same starting position, only the first pattern + /// // will be returned in this case when doing a search for any of the + /// // patterns. + /// let expected = Some(Match::must(0, 0..6)); + /// re.search_captures_with(&mut cache, &Input::new(haystack), &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// // But if we want to check whether some other pattern matches, then we + /// // can provide its pattern ID. + /// let expected = Some(Match::must(1, 0..6)); + /// let input = Input::new(haystack) + /// .anchored(Anchored::Pattern(PatternID::must(1))); + /// re.search_captures_with(&mut cache, &input, &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: specifying the bounds of a search + /// + /// This example shows how providing the bounds of a search can produce + /// different results than simply sub-slicing the haystack. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{meta::Regex, Match, Input}; + /// + /// let re = Regex::new(r"\b[0-9]{3}\b")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let haystack = "foo123bar"; + /// + /// // Since we sub-slice the haystack, the search doesn't know about + /// // the larger context and assumes that `123` is surrounded by word + /// // boundaries. And of course, the match position is reported relative + /// // to the sub-slice as well, which means we get `0..3` instead of + /// // `3..6`. + /// let expected = Some(Match::must(0, 0..3)); + /// let input = Input::new(&haystack[3..6]); + /// re.search_captures_with(&mut cache, &input, &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// // But if we provide the bounds of the search within the context of the + /// // entire haystack, then the search can take the surrounding context + /// // into account. (And if we did find a match, it would be reported + /// // as a valid offset into `haystack` instead of its sub-slice.) + /// let expected = None; + /// let input = Input::new(haystack).range(3..6); + /// re.search_captures_with(&mut cache, &input, &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn search_captures_with( + &self, + cache: &mut Cache, + input: &Input<'_>, + caps: &mut Captures, + ) { + caps.set_pattern(None); + let pid = self.search_slots_with(cache, input, caps.slots_mut()); + caps.set_pattern(pid); + } + + /// This is like [`Regex::search_slots`], but requires the caller to + /// explicitly pass a [`Cache`]. + /// + /// # Why pass a `Cache` explicitly? + /// + /// Passing a `Cache` explicitly will bypass the use of an internal memory + /// pool used by `Regex` to get a `Cache` for a search. The use of this + /// pool can be slower in some cases when a `Regex` is used from multiple + /// threads simultaneously. Typically, performance only becomes an issue + /// when there is heavy contention, which in turn usually only occurs + /// when each thread's primary unit of work is a regex search on a small + /// haystack. + /// + /// # Example + /// + /// This example shows how to find the overall match offsets in a + /// multi-pattern search without allocating a `Captures` value. Indeed, we + /// can put our slots right on the stack. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{meta::Regex, PatternID, Input}; + /// + /// let re = Regex::new_many(&[ + /// r"\pL+", + /// r"\d+", + /// ])?; + /// let mut cache = re.create_cache(); + /// let input = Input::new("!@#123"); + /// + /// // We only care about the overall match offsets here, so we just + /// // allocate two slots for each pattern. Each slot records the start + /// // and end of the match. + /// let mut slots = [None; 4]; + /// let pid = re.search_slots_with(&mut cache, &input, &mut slots); + /// assert_eq!(Some(PatternID::must(1)), pid); + /// + /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'. + /// // See 'GroupInfo' for more details on the mapping between groups and + /// // slot indices. + /// let slot_start = pid.unwrap().as_usize() * 2; + /// let slot_end = slot_start + 1; + /// assert_eq!(Some(3), slots[slot_start].map(|s| s.get())); + /// assert_eq!(Some(6), slots[slot_end].map(|s| s.get())); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn search_slots_with( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Option { + if self.imp.info.captures_disabled() + || self.imp.info.is_impossible(input) + { + return None; + } + self.imp.strat.search_slots(cache, input, slots) + } + + /// This is like [`Regex::which_overlapping_matches`], but requires the + /// caller to explicitly pass a [`Cache`]. + /// + /// Passing a `Cache` explicitly will bypass the use of an internal memory + /// pool used by `Regex` to get a `Cache` for a search. The use of this + /// pool can be slower in some cases when a `Regex` is used from multiple + /// threads simultaneously. Typically, performance only becomes an issue + /// when there is heavy contention, which in turn usually only occurs + /// when each thread's primary unit of work is a regex search on a small + /// haystack. + /// + /// # Why pass a `Cache` explicitly? + /// + /// # Example + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{meta::Regex, Input, MatchKind, PatternSet}; + /// + /// let patterns = &[ + /// r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar", + /// ]; + /// let re = Regex::builder() + /// .configure(Regex::config().match_kind(MatchKind::All)) + /// .build_many(patterns)?; + /// let mut cache = re.create_cache(); + /// + /// let input = Input::new("foobar"); + /// let mut patset = PatternSet::new(re.pattern_len()); + /// re.which_overlapping_matches_with(&mut cache, &input, &mut patset); + /// let expected = vec![0, 2, 3, 4, 6]; + /// let got: Vec = patset.iter().map(|p| p.as_usize()).collect(); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn which_overlapping_matches_with( + &self, + cache: &mut Cache, + input: &Input<'_>, + patset: &mut PatternSet, + ) { + if self.imp.info.is_impossible(input) { + return; + } + self.imp.strat.which_overlapping_matches(cache, input, patset) + } +} + +/// Various non-search routines for querying properties of a `Regex` and +/// convenience routines for creating [`Captures`] and [`Cache`] values. +impl Regex { + /// Creates a new object for recording capture group offsets. This is used + /// in search APIs like [`Regex::captures`] and [`Regex::search_captures`]. + /// + /// This is a convenience routine for + /// `Captures::all(re.group_info().clone())`. Callers may build other types + /// of `Captures` values that record less information (and thus require + /// less work from the regex engine) using [`Captures::matches`] and + /// [`Captures::empty`]. + /// + /// # Example + /// + /// This shows some alternatives to [`Regex::create_captures`]: + /// + /// ``` + /// use regex_automata::{ + /// meta::Regex, + /// util::captures::Captures, + /// Match, PatternID, Span, + /// }; + /// + /// let re = Regex::new(r"(?[A-Z][a-z]+) (?[A-Z][a-z]+)")?; + /// + /// // This is equivalent to Regex::create_captures. It stores matching + /// // offsets for all groups in the regex. + /// let mut all = Captures::all(re.group_info().clone()); + /// re.captures("Bruce Springsteen", &mut all); + /// assert_eq!(Some(Match::must(0, 0..17)), all.get_match()); + /// assert_eq!(Some(Span::from(0..5)), all.get_group_by_name("first")); + /// assert_eq!(Some(Span::from(6..17)), all.get_group_by_name("last")); + /// + /// // In this version, we only care about the implicit groups, which + /// // means offsets for the explicit groups will be unavailable. It can + /// // sometimes be faster to ask for fewer groups, since the underlying + /// // regex engine needs to do less work to keep track of them. + /// let mut matches = Captures::matches(re.group_info().clone()); + /// re.captures("Bruce Springsteen", &mut matches); + /// // We still get the overall match info. + /// assert_eq!(Some(Match::must(0, 0..17)), matches.get_match()); + /// // But now the explicit groups are unavailable. + /// assert_eq!(None, matches.get_group_by_name("first")); + /// assert_eq!(None, matches.get_group_by_name("last")); + /// + /// // Finally, in this version, we don't ask to keep track of offsets for + /// // *any* groups. All we get back is whether a match occurred, and if + /// // so, the ID of the pattern that matched. + /// let mut empty = Captures::empty(re.group_info().clone()); + /// re.captures("Bruce Springsteen", &mut empty); + /// // it's a match! + /// assert!(empty.is_match()); + /// // for pattern ID 0 + /// assert_eq!(Some(PatternID::ZERO), empty.pattern()); + /// // Match offsets are unavailable. + /// assert_eq!(None, empty.get_match()); + /// // And of course, explicit groups are unavailable too. + /// assert_eq!(None, empty.get_group_by_name("first")); + /// assert_eq!(None, empty.get_group_by_name("last")); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn create_captures(&self) -> Captures { + Captures::all(self.group_info().clone()) + } + + /// Creates a new cache for use with lower level search APIs like + /// [`Regex::search_with`]. + /// + /// The cache returned should only be used for searches for this `Regex`. + /// If you want to reuse the cache for another `Regex`, then you must call + /// [`Cache::reset`] with that `Regex`. + /// + /// This is a convenience routine for [`Cache::new`]. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{meta::Regex, Input, Match}; + /// + /// let re = Regex::new(r"(?-u)m\w+\s+m\w+")?; + /// let mut cache = re.create_cache(); + /// let input = Input::new("crazy janey and her mission man"); + /// assert_eq!( + /// Some(Match::must(0, 20..31)), + /// re.search_with(&mut cache, &input), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn create_cache(&self) -> Cache { + self.imp.strat.create_cache() + } + + /// Returns the total number of patterns in this regex. + /// + /// The standard [`Regex::new`] constructor always results in a `Regex` + /// with a single pattern, but [`Regex::new_many`] permits building a + /// multi-pattern regex. + /// + /// A `Regex` guarantees that the maximum possible `PatternID` returned in + /// any match is `Regex::pattern_len() - 1`. In the case where the number + /// of patterns is `0`, a match is impossible. + /// + /// # Example + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// let re = Regex::new(r"(?m)^[a-z]$")?; + /// assert_eq!(1, re.pattern_len()); + /// + /// let re = Regex::new_many::<&str>(&[])?; + /// assert_eq!(0, re.pattern_len()); + /// + /// let re = Regex::new_many(&["a", "b", "c"])?; + /// assert_eq!(3, re.pattern_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn pattern_len(&self) -> usize { + self.imp.info.pattern_len() + } + + /// Returns the total number of capturing groups. + /// + /// This includes the implicit capturing group corresponding to the + /// entire match. Therefore, the minimum value returned is `1`. + /// + /// # Example + /// + /// This shows a few patterns and how many capture groups they have. + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// let len = |pattern| { + /// Regex::new(pattern).map(|re| re.captures_len()) + /// }; + /// + /// assert_eq!(1, len("a")?); + /// assert_eq!(2, len("(a)")?); + /// assert_eq!(3, len("(a)|(b)")?); + /// assert_eq!(5, len("(a)(b)|(c)(d)")?); + /// assert_eq!(2, len("(a)|b")?); + /// assert_eq!(2, len("a|(b)")?); + /// assert_eq!(2, len("(b)*")?); + /// assert_eq!(2, len("(b)+")?); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: multiple patterns + /// + /// This routine also works for multiple patterns. The total number is + /// the sum of the capture groups of each pattern. + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// let len = |patterns| { + /// Regex::new_many(patterns).map(|re| re.captures_len()) + /// }; + /// + /// assert_eq!(2, len(&["a", "b"])?); + /// assert_eq!(4, len(&["(a)", "(b)"])?); + /// assert_eq!(6, len(&["(a)|(b)", "(c)|(d)"])?); + /// assert_eq!(8, len(&["(a)(b)|(c)(d)", "(x)(y)"])?); + /// assert_eq!(3, len(&["(a)", "b"])?); + /// assert_eq!(3, len(&["a", "(b)"])?); + /// assert_eq!(4, len(&["(a)", "(b)*"])?); + /// assert_eq!(4, len(&["(a)+", "(b)+"])?); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn captures_len(&self) -> usize { + self.imp + .info + .props_union() + .explicit_captures_len() + .saturating_add(self.pattern_len()) + } + + /// Returns the total number of capturing groups that appear in every + /// possible match. + /// + /// If the number of capture groups can vary depending on the match, then + /// this returns `None`. That is, a value is only returned when the number + /// of matching groups is invariant or "static." + /// + /// Note that like [`Regex::captures_len`], this **does** include the + /// implicit capturing group corresponding to the entire match. Therefore, + /// when a non-None value is returned, it is guaranteed to be at least `1`. + /// Stated differently, a return value of `Some(0)` is impossible. + /// + /// # Example + /// + /// This shows a few cases where a static number of capture groups is + /// available and a few cases where it is not. + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// let len = |pattern| { + /// Regex::new(pattern).map(|re| re.static_captures_len()) + /// }; + /// + /// assert_eq!(Some(1), len("a")?); + /// assert_eq!(Some(2), len("(a)")?); + /// assert_eq!(Some(2), len("(a)|(b)")?); + /// assert_eq!(Some(3), len("(a)(b)|(c)(d)")?); + /// assert_eq!(None, len("(a)|b")?); + /// assert_eq!(None, len("a|(b)")?); + /// assert_eq!(None, len("(b)*")?); + /// assert_eq!(Some(2), len("(b)+")?); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: multiple patterns + /// + /// This property extends to regexes with multiple patterns as well. In + /// order for their to be a static number of capture groups in this case, + /// every pattern must have the same static number. + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// let len = |patterns| { + /// Regex::new_many(patterns).map(|re| re.static_captures_len()) + /// }; + /// + /// assert_eq!(Some(1), len(&["a", "b"])?); + /// assert_eq!(Some(2), len(&["(a)", "(b)"])?); + /// assert_eq!(Some(2), len(&["(a)|(b)", "(c)|(d)"])?); + /// assert_eq!(Some(3), len(&["(a)(b)|(c)(d)", "(x)(y)"])?); + /// assert_eq!(None, len(&["(a)", "b"])?); + /// assert_eq!(None, len(&["a", "(b)"])?); + /// assert_eq!(None, len(&["(a)", "(b)*"])?); + /// assert_eq!(Some(2), len(&["(a)+", "(b)+"])?); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn static_captures_len(&self) -> Option { + self.imp + .info + .props_union() + .static_explicit_captures_len() + .map(|len| len.saturating_add(1)) + } + + /// Return information about the capture groups in this `Regex`. + /// + /// A `GroupInfo` is an immutable object that can be cheaply cloned. It + /// is responsible for maintaining a mapping between the capture groups + /// in the concrete syntax of zero or more regex patterns and their + /// internal representation used by some of the regex matchers. It is also + /// responsible for maintaining a mapping between the name of each group + /// (if one exists) and its corresponding group index. + /// + /// A `GroupInfo` is ultimately what is used to build a [`Captures`] value, + /// which is some mutable space where group offsets are stored as a result + /// of a search. + /// + /// # Example + /// + /// This shows some alternatives to [`Regex::create_captures`]: + /// + /// ``` + /// use regex_automata::{ + /// meta::Regex, + /// util::captures::Captures, + /// Match, PatternID, Span, + /// }; + /// + /// let re = Regex::new(r"(?[A-Z][a-z]+) (?[A-Z][a-z]+)")?; + /// + /// // This is equivalent to Regex::create_captures. It stores matching + /// // offsets for all groups in the regex. + /// let mut all = Captures::all(re.group_info().clone()); + /// re.captures("Bruce Springsteen", &mut all); + /// assert_eq!(Some(Match::must(0, 0..17)), all.get_match()); + /// assert_eq!(Some(Span::from(0..5)), all.get_group_by_name("first")); + /// assert_eq!(Some(Span::from(6..17)), all.get_group_by_name("last")); + /// + /// // In this version, we only care about the implicit groups, which + /// // means offsets for the explicit groups will be unavailable. It can + /// // sometimes be faster to ask for fewer groups, since the underlying + /// // regex engine needs to do less work to keep track of them. + /// let mut matches = Captures::matches(re.group_info().clone()); + /// re.captures("Bruce Springsteen", &mut matches); + /// // We still get the overall match info. + /// assert_eq!(Some(Match::must(0, 0..17)), matches.get_match()); + /// // But now the explicit groups are unavailable. + /// assert_eq!(None, matches.get_group_by_name("first")); + /// assert_eq!(None, matches.get_group_by_name("last")); + /// + /// // Finally, in this version, we don't ask to keep track of offsets for + /// // *any* groups. All we get back is whether a match occurred, and if + /// // so, the ID of the pattern that matched. + /// let mut empty = Captures::empty(re.group_info().clone()); + /// re.captures("Bruce Springsteen", &mut empty); + /// // it's a match! + /// assert!(empty.is_match()); + /// // for pattern ID 0 + /// assert_eq!(Some(PatternID::ZERO), empty.pattern()); + /// // Match offsets are unavailable. + /// assert_eq!(None, empty.get_match()); + /// // And of course, explicit groups are unavailable too. + /// assert_eq!(None, empty.get_group_by_name("first")); + /// assert_eq!(None, empty.get_group_by_name("last")); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn group_info(&self) -> &GroupInfo { + self.imp.strat.group_info() + } + + /// Returns the configuration object used to build this `Regex`. + /// + /// If no configuration object was explicitly passed, then the + /// configuration returned represents the default. + #[inline] + pub fn get_config(&self) -> &Config { + self.imp.info.config() + } + + /// Returns true if this regex has a high chance of being "accelerated." + /// + /// The precise meaning of "accelerated" is specifically left unspecified, + /// but the general meaning is that the search is a high likelihood of + /// running faster than a character-at-a-time loop inside a standard + /// regex engine. + /// + /// When a regex is accelerated, it is only a *probabilistic* claim. That + /// is, just because the regex is believed to be accelerated, that doesn't + /// mean it will definitely execute searches very fast. Similarly, if a + /// regex is *not* accelerated, that is also a probabilistic claim. That + /// is, a regex for which `is_accelerated` returns `false` could still run + /// searches more quickly than a regex for which `is_accelerated` returns + /// `true`. + /// + /// Whether a regex is marked as accelerated or not is dependent on + /// implementations details that may change in a semver compatible release. + /// That is, a regex that is accelerated in a `x.y.1` release might not be + /// accelerated in a `x.y.2` release. + /// + /// Basically, the value of acceleration boils down to a hedge: a hodge + /// podge of internal heuristics combine to make a probabilistic guess + /// that this regex search may run "fast." The value in knowing this from + /// a caller's perspective is that it may act as a signal that no further + /// work should be done to accelerate a search. For example, a grep-like + /// tool might try to do some extra work extracting literals from a regex + /// to create its own heuristic acceleration strategies. But it might + /// choose to defer to this crate's acceleration strategy if one exists. + /// This routine permits querying whether such a strategy is active for a + /// particular regex. + /// + /// # Example + /// + /// ``` + /// use regex_automata::meta::Regex; + /// + /// // A simple literal is very likely to be accelerated. + /// let re = Regex::new(r"foo")?; + /// assert!(re.is_accelerated()); + /// + /// // A regex with no literals is likely to not be accelerated. + /// let re = Regex::new(r"\w")?; + /// assert!(!re.is_accelerated()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn is_accelerated(&self) -> bool { + self.imp.strat.is_accelerated() + } + + /// Return the total approximate heap memory, in bytes, used by this `Regex`. + /// + /// Note that currently, there is no high level configuration for setting + /// a limit on the specific value returned by this routine. Instead, the + /// following routines can be used to control heap memory at a bit of a + /// lower level: + /// + /// * [`Config::nfa_size_limit`] controls how big _any_ of the NFAs are + /// allowed to be. + /// * [`Config::onepass_size_limit`] controls how big the one-pass DFA is + /// allowed to be. + /// * [`Config::hybrid_cache_capacity`] controls how much memory the lazy + /// DFA is permitted to allocate to store its transition table. + /// * [`Config::dfa_size_limit`] controls how big a fully compiled DFA is + /// allowed to be. + /// * [`Config::dfa_state_limit`] controls the conditions under which the + /// meta regex engine will even attempt to build a fully compiled DFA. + #[inline] + pub fn memory_usage(&self) -> usize { + self.imp.strat.memory_usage() + } +} + +impl Clone for Regex { + fn clone(&self) -> Regex { + let imp = Arc::clone(&self.imp); + let pool = { + let strat = Arc::clone(&imp.strat); + let create: CachePoolFn = Box::new(move || strat.create_cache()); + Pool::new(create) + }; + Regex { imp, pool } + } +} + +#[derive(Clone, Debug)] +pub(crate) struct RegexInfo(Arc); + +#[derive(Clone, Debug)] +struct RegexInfoI { + config: Config, + props: Vec, + props_union: hir::Properties, +} + +impl RegexInfo { + fn new(config: Config, hirs: &[&Hir]) -> RegexInfo { + // Collect all of the properties from each of the HIRs, and also + // union them into one big set of properties representing all HIRs + // as if they were in one big alternation. + let mut props = vec![]; + for hir in hirs.iter() { + props.push(hir.properties().clone()); + } + let props_union = hir::Properties::union(&props); + + RegexInfo(Arc::new(RegexInfoI { config, props, props_union })) + } + + pub(crate) fn config(&self) -> &Config { + &self.0.config + } + + pub(crate) fn props(&self) -> &[hir::Properties] { + &self.0.props + } + + pub(crate) fn props_union(&self) -> &hir::Properties { + &self.0.props_union + } + + pub(crate) fn pattern_len(&self) -> usize { + self.props().len() + } + + pub(crate) fn memory_usage(&self) -> usize { + self.props().iter().map(|p| p.memory_usage()).sum::() + + self.props_union().memory_usage() + } + + /// Returns true when the search is guaranteed to be anchored. That is, + /// when a match is reported, its offset is guaranteed to correspond to + /// the start of the search. + /// + /// This includes returning true when `input` _isn't_ anchored but the + /// underlying regex is. + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn is_anchored_start(&self, input: &Input<'_>) -> bool { + input.get_anchored().is_anchored() || self.is_always_anchored_start() + } + + /// Returns true when this regex is always anchored to the start of a + /// search. And in particular, that regardless of an `Input` configuration, + /// if any match is reported it must start at `0`. + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn is_always_anchored_start(&self) -> bool { + use regex_syntax::hir::Look; + self.props_union().look_set_prefix().contains(Look::Start) + } + + /// Returns true when this regex is always anchored to the end of a + /// search. And in particular, that regardless of an `Input` configuration, + /// if any match is reported it must end at the end of the haystack. + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn is_always_anchored_end(&self) -> bool { + use regex_syntax::hir::Look; + self.props_union().look_set_suffix().contains(Look::End) + } + + /// Returns true when the regex's NFA lacks capture states. + /// + /// In this case, some regex engines (like the PikeVM) are unable to report + /// match offsets, while some (like the lazy DFA can). To avoid whether a + /// match or not is reported based on engine selection, routines that + /// return match offsets will _always_ report `None` when this is true. + /// + /// Yes, this is a weird case and it's a little fucked up. But + /// `WhichCaptures::None` comes with an appropriate warning. + fn captures_disabled(&self) -> bool { + matches!(self.config().get_which_captures(), WhichCaptures::None) + } + + /// Returns true if and only if it is known that a match is impossible + /// for the given input. This is useful for short-circuiting and avoiding + /// running the regex engine if it's known no match can be reported. + /// + /// Note that this doesn't necessarily detect every possible case. For + /// example, when `pattern_len() == 0`, a match is impossible, but that + /// case is so rare that it's fine to be handled by the regex engine + /// itself. That is, it's not worth the cost of adding it here in order to + /// make it a little faster. The reason is that this is called for every + /// search. so there is some cost to adding checks here. Arguably, some of + /// the checks that are here already probably shouldn't be here... + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_impossible(&self, input: &Input<'_>) -> bool { + // The underlying regex is anchored, so if we don't start the search + // at position 0, a match is impossible, because the anchor can only + // match at position 0. + if input.start() > 0 && self.is_always_anchored_start() { + return true; + } + // Same idea, but for the end anchor. + if input.end() < input.haystack().len() + && self.is_always_anchored_end() + { + return true; + } + // If the haystack is smaller than the minimum length required, then + // we know there can be no match. + let minlen = match self.props_union().minimum_len() { + None => return false, + Some(minlen) => minlen, + }; + if input.get_span().len() < minlen { + return true; + } + // Same idea as minimum, but for maximum. This is trickier. We can + // only apply the maximum when we know the entire span that we're + // searching *has* to match according to the regex (and possibly the + // input configuration). If we know there is too much for the regex + // to match, we can bail early. + // + // I don't think we can apply the maximum otherwise unfortunately. + if self.is_anchored_start(input) && self.is_always_anchored_end() { + let maxlen = match self.props_union().maximum_len() { + None => return false, + Some(maxlen) => maxlen, + }; + if input.get_span().len() > maxlen { + return true; + } + } + false + } +} + +/// An iterator over all non-overlapping matches. +/// +/// The iterator yields a [`Match`] value until no more matches could be found. +/// +/// The lifetime parameters are as follows: +/// +/// * `'r` represents the lifetime of the `Regex` that produced this iterator. +/// * `'h` represents the lifetime of the haystack being searched. +/// +/// This iterator can be created with the [`Regex::find_iter`] method. +#[derive(Debug)] +pub struct FindMatches<'r, 'h> { + re: &'r Regex, + cache: CachePoolGuard<'r>, + it: iter::Searcher<'h>, +} + +impl<'r, 'h> FindMatches<'r, 'h> { + /// Returns the `Regex` value that created this iterator. + #[inline] + pub fn regex(&self) -> &'r Regex { + self.re + } + + /// Returns the current `Input` associated with this iterator. + /// + /// The `start` position on the given `Input` may change during iteration, + /// but all other values are guaranteed to remain invariant. + #[inline] + pub fn input<'s>(&'s self) -> &'s Input<'h> { + self.it.input() + } +} + +impl<'r, 'h> Iterator for FindMatches<'r, 'h> { + type Item = Match; + + #[inline] + fn next(&mut self) -> Option { + let FindMatches { re, ref mut cache, ref mut it } = *self; + it.advance(|input| Ok(re.search_with(cache, input))) + } + + #[inline] + fn count(self) -> usize { + // If all we care about is a count of matches, then we only need to + // find the end position of each match. This can give us a 2x perf + // boost in some cases, because it avoids needing to do a reverse scan + // to find the start of a match. + let FindMatches { re, mut cache, it } = self; + // This does the deref for PoolGuard once instead of every iter. + let cache = &mut *cache; + it.into_half_matches_iter( + |input| Ok(re.search_half_with(cache, input)), + ) + .count() + } +} + +impl<'r, 'h> core::iter::FusedIterator for FindMatches<'r, 'h> {} + +/// An iterator over all non-overlapping leftmost matches with their capturing +/// groups. +/// +/// The iterator yields a [`Captures`] value until no more matches could be +/// found. +/// +/// The lifetime parameters are as follows: +/// +/// * `'r` represents the lifetime of the `Regex` that produced this iterator. +/// * `'h` represents the lifetime of the haystack being searched. +/// +/// This iterator can be created with the [`Regex::captures_iter`] method. +#[derive(Debug)] +pub struct CapturesMatches<'r, 'h> { + re: &'r Regex, + cache: CachePoolGuard<'r>, + caps: Captures, + it: iter::Searcher<'h>, +} + +impl<'r, 'h> CapturesMatches<'r, 'h> { + /// Returns the `Regex` value that created this iterator. + #[inline] + pub fn regex(&self) -> &'r Regex { + self.re + } + + /// Returns the current `Input` associated with this iterator. + /// + /// The `start` position on the given `Input` may change during iteration, + /// but all other values are guaranteed to remain invariant. + #[inline] + pub fn input<'s>(&'s self) -> &'s Input<'h> { + self.it.input() + } +} + +impl<'r, 'h> Iterator for CapturesMatches<'r, 'h> { + type Item = Captures; + + #[inline] + fn next(&mut self) -> Option { + // Splitting 'self' apart seems necessary to appease borrowck. + let CapturesMatches { re, ref mut cache, ref mut caps, ref mut it } = + *self; + let _ = it.advance(|input| { + re.search_captures_with(cache, input, caps); + Ok(caps.get_match()) + }); + if caps.is_match() { + Some(caps.clone()) + } else { + None + } + } + + #[inline] + fn count(self) -> usize { + let CapturesMatches { re, mut cache, it, .. } = self; + // This does the deref for PoolGuard once instead of every iter. + let cache = &mut *cache; + it.into_half_matches_iter( + |input| Ok(re.search_half_with(cache, input)), + ) + .count() + } +} + +impl<'r, 'h> core::iter::FusedIterator for CapturesMatches<'r, 'h> {} + +/// Yields all substrings delimited by a regular expression match. +/// +/// The spans correspond to the offsets between matches. +/// +/// The lifetime parameters are as follows: +/// +/// * `'r` represents the lifetime of the `Regex` that produced this iterator. +/// * `'h` represents the lifetime of the haystack being searched. +/// +/// This iterator can be created with the [`Regex::split`] method. +#[derive(Debug)] +pub struct Split<'r, 'h> { + finder: FindMatches<'r, 'h>, + last: usize, +} + +impl<'r, 'h> Split<'r, 'h> { + /// Returns the current `Input` associated with this iterator. + /// + /// The `start` position on the given `Input` may change during iteration, + /// but all other values are guaranteed to remain invariant. + #[inline] + pub fn input<'s>(&'s self) -> &'s Input<'h> { + self.finder.input() + } +} + +impl<'r, 'h> Iterator for Split<'r, 'h> { + type Item = Span; + + fn next(&mut self) -> Option { + match self.finder.next() { + None => { + let len = self.finder.it.input().haystack().len(); + if self.last > len { + None + } else { + let span = Span::from(self.last..len); + self.last = len + 1; // Next call will return None + Some(span) + } + } + Some(m) => { + let span = Span::from(self.last..m.start()); + self.last = m.end(); + Some(span) + } + } + } +} + +impl<'r, 'h> core::iter::FusedIterator for Split<'r, 'h> {} + +/// Yields at most `N` spans delimited by a regular expression match. +/// +/// The spans correspond to the offsets between matches. The last span will be +/// whatever remains after splitting. +/// +/// The lifetime parameters are as follows: +/// +/// * `'r` represents the lifetime of the `Regex` that produced this iterator. +/// * `'h` represents the lifetime of the haystack being searched. +/// +/// This iterator can be created with the [`Regex::splitn`] method. +#[derive(Debug)] +pub struct SplitN<'r, 'h> { + splits: Split<'r, 'h>, + limit: usize, +} + +impl<'r, 'h> SplitN<'r, 'h> { + /// Returns the current `Input` associated with this iterator. + /// + /// The `start` position on the given `Input` may change during iteration, + /// but all other values are guaranteed to remain invariant. + #[inline] + pub fn input<'s>(&'s self) -> &'s Input<'h> { + self.splits.input() + } +} + +impl<'r, 'h> Iterator for SplitN<'r, 'h> { + type Item = Span; + + fn next(&mut self) -> Option { + if self.limit == 0 { + return None; + } + + self.limit -= 1; + if self.limit > 0 { + return self.splits.next(); + } + + let len = self.splits.finder.it.input().haystack().len(); + if self.splits.last > len { + // We've already returned all substrings. + None + } else { + // self.n == 0, so future calls will return None immediately + Some(Span::from(self.splits.last..len)) + } + } + + fn size_hint(&self) -> (usize, Option) { + (0, Some(self.limit)) + } +} + +impl<'r, 'h> core::iter::FusedIterator for SplitN<'r, 'h> {} + +/// Represents mutable scratch space used by regex engines during a search. +/// +/// Most of the regex engines in this crate require some kind of +/// mutable state in order to execute a search. This mutable state is +/// explicitly separated from the core regex object (such as a +/// [`thompson::NFA`](crate::nfa::thompson::NFA)) so that the read-only regex +/// object can be shared across multiple threads simultaneously without any +/// synchronization. Conversely, a `Cache` must either be duplicated if using +/// the same `Regex` from multiple threads, or else there must be some kind of +/// synchronization that guarantees exclusive access while it's in use by one +/// thread. +/// +/// A `Regex` attempts to do this synchronization for you by using a thread +/// pool internally. Its size scales roughly with the number of simultaneous +/// regex searches. +/// +/// For cases where one does not want to rely on a `Regex`'s internal thread +/// pool, lower level routines such as [`Regex::search_with`] are provided +/// that permit callers to pass a `Cache` into the search routine explicitly. +/// +/// General advice is that the thread pool is often more than good enough. +/// However, it may be possible to observe the effects of its latency, +/// especially when searching many small haystacks from many threads +/// simultaneously. +/// +/// Caches can be created from their corresponding `Regex` via +/// [`Regex::create_cache`]. A cache can only be used with either the `Regex` +/// that created it, or the `Regex` that was most recently used to reset it +/// with [`Cache::reset`]. Using a cache with any other `Regex` may result in +/// panics or incorrect results. +/// +/// # Example +/// +/// ``` +/// use regex_automata::{meta::Regex, Input, Match}; +/// +/// let re = Regex::new(r"(?-u)m\w+\s+m\w+")?; +/// let mut cache = re.create_cache(); +/// let input = Input::new("crazy janey and her mission man"); +/// assert_eq!( +/// Some(Match::must(0, 20..31)), +/// re.search_with(&mut cache, &input), +/// ); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Debug, Clone)] +pub struct Cache { + pub(crate) capmatches: Captures, + pub(crate) pikevm: wrappers::PikeVMCache, + pub(crate) backtrack: wrappers::BoundedBacktrackerCache, + pub(crate) onepass: wrappers::OnePassCache, + pub(crate) hybrid: wrappers::HybridCache, + pub(crate) revhybrid: wrappers::ReverseHybridCache, +} + +impl Cache { + /// Creates a new `Cache` for use with this regex. + /// + /// The cache returned should only be used for searches for the given + /// `Regex`. If you want to reuse the cache for another `Regex`, then you + /// must call [`Cache::reset`] with that `Regex`. + pub fn new(re: &Regex) -> Cache { + re.create_cache() + } + + /// Reset this cache such that it can be used for searching with the given + /// `Regex` (and only that `Regex`). + /// + /// A cache reset permits potentially reusing memory already allocated in + /// this cache with a different `Regex`. + /// + /// # Example + /// + /// This shows how to re-purpose a cache for use with a different `Regex`. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{meta::Regex, Match, Input}; + /// + /// let re1 = Regex::new(r"\w")?; + /// let re2 = Regex::new(r"\W")?; + /// + /// let mut cache = re1.create_cache(); + /// assert_eq!( + /// Some(Match::must(0, 0..2)), + /// re1.search_with(&mut cache, &Input::new("Δ")), + /// ); + /// + /// // Using 'cache' with re2 is not allowed. It may result in panics or + /// // incorrect results. In order to re-purpose the cache, we must reset + /// // it with the Regex we'd like to use it with. + /// // + /// // Similarly, after this reset, using the cache with 're1' is also not + /// // allowed. + /// cache.reset(&re2); + /// assert_eq!( + /// Some(Match::must(0, 0..3)), + /// re2.search_with(&mut cache, &Input::new("☃")), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn reset(&mut self, re: &Regex) { + re.imp.strat.reset_cache(self) + } + + /// Returns the heap memory usage, in bytes, of this cache. + /// + /// This does **not** include the stack size used up by this cache. To + /// compute that, use `std::mem::size_of::()`. + pub fn memory_usage(&self) -> usize { + let mut bytes = 0; + bytes += self.pikevm.memory_usage(); + bytes += self.backtrack.memory_usage(); + bytes += self.onepass.memory_usage(); + bytes += self.hybrid.memory_usage(); + bytes += self.revhybrid.memory_usage(); + bytes + } +} + +/// An object describing the configuration of a `Regex`. +/// +/// This configuration only includes options for the +/// non-syntax behavior of a `Regex`, and can be applied via the +/// [`Builder::configure`] method. For configuring the syntax options, see +/// [`util::syntax::Config`](crate::util::syntax::Config). +/// +/// # Example: lower the NFA size limit +/// +/// In some cases, the default size limit might be too big. The size limit can +/// be lowered, which will prevent large regex patterns from compiling. +/// +/// ``` +/// # if cfg!(miri) { return Ok(()); } // miri takes too long +/// use regex_automata::meta::Regex; +/// +/// let result = Regex::builder() +/// .configure(Regex::config().nfa_size_limit(Some(20 * (1<<10)))) +/// // Not even 20KB is enough to build a single large Unicode class! +/// .build(r"\pL"); +/// assert!(result.is_err()); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug, Default)] +pub struct Config { + // As with other configuration types in this crate, we put all our knobs + // in options so that we can distinguish between "default" and "not set." + // This makes it possible to easily combine multiple configurations + // without default values overwriting explicitly specified values. See the + // 'overwrite' method. + // + // For docs on the fields below, see the corresponding method setters. + match_kind: Option, + utf8_empty: Option, + autopre: Option, + pre: Option>, + which_captures: Option, + nfa_size_limit: Option>, + onepass_size_limit: Option>, + hybrid_cache_capacity: Option, + hybrid: Option, + dfa: Option, + dfa_size_limit: Option>, + dfa_state_limit: Option>, + onepass: Option, + backtrack: Option, + byte_classes: Option, + line_terminator: Option, +} + +impl Config { + /// Create a new configuration object for a `Regex`. + pub fn new() -> Config { + Config::default() + } + + /// Set the match semantics for a `Regex`. + /// + /// The default value is [`MatchKind::LeftmostFirst`]. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{meta::Regex, Match, MatchKind}; + /// + /// // By default, leftmost-first semantics are used, which + /// // disambiguates matches at the same position by selecting + /// // the one that corresponds earlier in the pattern. + /// let re = Regex::new("sam|samwise")?; + /// assert_eq!(Some(Match::must(0, 0..3)), re.find("samwise")); + /// + /// // But with 'all' semantics, match priority is ignored + /// // and all match states are included. When coupled with + /// // a leftmost search, the search will report the last + /// // possible match. + /// let re = Regex::builder() + /// .configure(Regex::config().match_kind(MatchKind::All)) + /// .build("sam|samwise")?; + /// assert_eq!(Some(Match::must(0, 0..7)), re.find("samwise")); + /// // Beware that this can lead to skipping matches! + /// // Usually 'all' is used for anchored reverse searches + /// // only, or for overlapping searches. + /// assert_eq!(Some(Match::must(0, 4..11)), re.find("sam samwise")); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn match_kind(self, kind: MatchKind) -> Config { + Config { match_kind: Some(kind), ..self } + } + + /// Toggles whether empty matches are permitted to occur between the code + /// units of a UTF-8 encoded codepoint. + /// + /// This should generally be enabled when search a `&str` or anything that + /// you otherwise know is valid UTF-8. It should be disabled in all other + /// cases. Namely, if the haystack is not valid UTF-8 and this is enabled, + /// then behavior is unspecified. + /// + /// By default, this is enabled. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{meta::Regex, Match}; + /// + /// let re = Regex::new("")?; + /// let got: Vec = re.find_iter("☃").collect(); + /// // Matches only occur at the beginning and end of the snowman. + /// assert_eq!(got, vec![ + /// Match::must(0, 0..0), + /// Match::must(0, 3..3), + /// ]); + /// + /// let re = Regex::builder() + /// .configure(Regex::config().utf8_empty(false)) + /// .build("")?; + /// let got: Vec = re.find_iter("☃").collect(); + /// // Matches now occur at every position! + /// assert_eq!(got, vec![ + /// Match::must(0, 0..0), + /// Match::must(0, 1..1), + /// Match::must(0, 2..2), + /// Match::must(0, 3..3), + /// ]); + /// + /// Ok::<(), Box>(()) + /// ``` + pub fn utf8_empty(self, yes: bool) -> Config { + Config { utf8_empty: Some(yes), ..self } + } + + /// Toggles whether automatic prefilter support is enabled. + /// + /// If this is disabled and [`Config::prefilter`] is not set, then the + /// meta regex engine will not use any prefilters. This can sometimes + /// be beneficial in cases where you know (or have measured) that the + /// prefilter leads to overall worse search performance. + /// + /// By default, this is enabled. + /// + /// # Example + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{meta::Regex, Match}; + /// + /// let re = Regex::builder() + /// .configure(Regex::config().auto_prefilter(false)) + /// .build(r"Bruce \w+")?; + /// let hay = "Hello Bruce Springsteen!"; + /// assert_eq!(Some(Match::must(0, 6..23)), re.find(hay)); + /// + /// Ok::<(), Box>(()) + /// ``` + pub fn auto_prefilter(self, yes: bool) -> Config { + Config { autopre: Some(yes), ..self } + } + + /// Overrides and sets the prefilter to use inside a `Regex`. + /// + /// This permits one to forcefully set a prefilter in cases where the + /// caller knows better than whatever the automatic prefilter logic is + /// capable of. + /// + /// By default, this is set to `None` and an automatic prefilter will be + /// used if one could be built. (Assuming [`Config::auto_prefilter`] is + /// enabled, which it is by default.) + /// + /// # Example + /// + /// This example shows how to set your own prefilter. In the case of a + /// pattern like `Bruce \w+`, the automatic prefilter is likely to be + /// constructed in a way that it will look for occurrences of `Bruce `. + /// In most cases, this is the best choice. But in some cases, it may be + /// the case that running `memchr` on `B` is the best choice. One can + /// achieve that behavior by overriding the automatic prefilter logic + /// and providing a prefilter that just matches `B`. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// meta::Regex, + /// util::prefilter::Prefilter, + /// Match, MatchKind, + /// }; + /// + /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["B"]) + /// .expect("a prefilter"); + /// let re = Regex::builder() + /// .configure(Regex::config().prefilter(Some(pre))) + /// .build(r"Bruce \w+")?; + /// let hay = "Hello Bruce Springsteen!"; + /// assert_eq!(Some(Match::must(0, 6..23)), re.find(hay)); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: incorrect prefilters can lead to incorrect results! + /// + /// Be warned that setting an incorrect prefilter can lead to missed + /// matches. So if you use this option, ensure your prefilter can _never_ + /// report false negatives. (A false positive is, on the other hand, quite + /// okay and generally unavoidable.) + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// meta::Regex, + /// util::prefilter::Prefilter, + /// Match, MatchKind, + /// }; + /// + /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["Z"]) + /// .expect("a prefilter"); + /// let re = Regex::builder() + /// .configure(Regex::config().prefilter(Some(pre))) + /// .build(r"Bruce \w+")?; + /// let hay = "Hello Bruce Springsteen!"; + /// // Oops! No match found, but there should be one! + /// assert_eq!(None, re.find(hay)); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn prefilter(self, pre: Option) -> Config { + Config { pre: Some(pre), ..self } + } + + /// Configures what kinds of groups are compiled as "capturing" in the + /// underlying regex engine. + /// + /// This is set to [`WhichCaptures::All`] by default. Callers may wish to + /// use [`WhichCaptures::Implicit`] in cases where one wants avoid the + /// overhead of capture states for explicit groups. + /// + /// Note that another approach to avoiding the overhead of capture groups + /// is by using non-capturing groups in the regex pattern. That is, + /// `(?:a)` instead of `(a)`. This option is useful when you can't control + /// the concrete syntax but know that you don't need the underlying capture + /// states. For example, using `WhichCaptures::Implicit` will behave as if + /// all explicit capturing groups in the pattern were non-capturing. + /// + /// Setting this to `WhichCaptures::None` is usually not the right thing to + /// do. When no capture states are compiled, some regex engines (such as + /// the `PikeVM`) won't be able to report match offsets. This will manifest + /// as no match being found. Indeed, in order to enforce consistent + /// behavior, the meta regex engine will always report `None` for routines + /// that return match offsets even if one of its regex engines could + /// service the request. This avoids "match or not" behavior from being + /// influenced by user input (since user input can influence the selection + /// of the regex engine). + /// + /// # Example + /// + /// This example demonstrates how the results of capture groups can change + /// based on this option. First we show the default (all capture groups in + /// the pattern are capturing): + /// + /// ``` + /// use regex_automata::{meta::Regex, Match, Span}; + /// + /// let re = Regex::new(r"foo([0-9]+)bar")?; + /// let hay = "foo123bar"; + /// + /// let mut caps = re.create_captures(); + /// re.captures(hay, &mut caps); + /// assert_eq!(Some(Span::from(0..9)), caps.get_group(0)); + /// assert_eq!(Some(Span::from(3..6)), caps.get_group(1)); + /// + /// Ok::<(), Box>(()) + /// ``` + /// + /// And now we show the behavior when we only include implicit capture + /// groups. In this case, we can only find the overall match span, but the + /// spans of any other explicit group don't exist because they are treated + /// as non-capturing. (In effect, when `WhichCaptures::Implicit` is used, + /// there is no real point in using [`Regex::captures`] since it will never + /// be able to report more information than [`Regex::find`].) + /// + /// ``` + /// use regex_automata::{ + /// meta::Regex, + /// nfa::thompson::WhichCaptures, + /// Match, + /// Span, + /// }; + /// + /// let re = Regex::builder() + /// .configure(Regex::config().which_captures(WhichCaptures::Implicit)) + /// .build(r"foo([0-9]+)bar")?; + /// let hay = "foo123bar"; + /// + /// let mut caps = re.create_captures(); + /// re.captures(hay, &mut caps); + /// assert_eq!(Some(Span::from(0..9)), caps.get_group(0)); + /// assert_eq!(None, caps.get_group(1)); + /// + /// Ok::<(), Box>(()) + /// ``` + /// + /// # Example: strange `Regex::find` behavior + /// + /// As noted above, when using [`WhichCaptures::None`], this means that + /// `Regex::is_match` could return `true` while `Regex::find` returns + /// `None`: + /// + /// ``` + /// use regex_automata::{ + /// meta::Regex, + /// nfa::thompson::WhichCaptures, + /// Input, + /// Match, + /// Span, + /// }; + /// + /// let re = Regex::builder() + /// .configure(Regex::config().which_captures(WhichCaptures::None)) + /// .build(r"foo([0-9]+)bar")?; + /// let hay = "foo123bar"; + /// + /// assert!(re.is_match(hay)); + /// assert_eq!(re.find(hay), None); + /// assert_eq!(re.search_half(&Input::new(hay)), None); + /// + /// Ok::<(), Box>(()) + /// ``` + pub fn which_captures(mut self, which_captures: WhichCaptures) -> Config { + self.which_captures = Some(which_captures); + self + } + + /// Sets the size limit, in bytes, to enforce on the construction of every + /// NFA build by the meta regex engine. + /// + /// Setting it to `None` disables the limit. This is not recommended if + /// you're compiling untrusted patterns. + /// + /// Note that this limit is applied to _each_ NFA built, and if any of + /// them exceed the limit, then construction will fail. This limit does + /// _not_ correspond to the total memory used by all NFAs in the meta regex + /// engine. + /// + /// This defaults to some reasonable number that permits most reasonable + /// patterns. + /// + /// # Example + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::meta::Regex; + /// + /// let result = Regex::builder() + /// .configure(Regex::config().nfa_size_limit(Some(20 * (1<<10)))) + /// // Not even 20KB is enough to build a single large Unicode class! + /// .build(r"\pL"); + /// assert!(result.is_err()); + /// + /// // But notice that building such a regex with the exact same limit + /// // can succeed depending on other aspects of the configuration. For + /// // example, a single *forward* NFA will (at time of writing) fit into + /// // the 20KB limit, but a *reverse* NFA of the same pattern will not. + /// // So if one configures a meta regex such that a reverse NFA is never + /// // needed and thus never built, then the 20KB limit will be enough for + /// // a pattern like \pL! + /// let result = Regex::builder() + /// .configure(Regex::config() + /// .nfa_size_limit(Some(20 * (1<<10))) + /// // The DFAs are the only thing that (currently) need a reverse + /// // NFA. So if both are disabled, the meta regex engine will + /// // skip building the reverse NFA. Note that this isn't an API + /// // guarantee. A future semver compatible version may introduce + /// // new use cases for a reverse NFA. + /// .hybrid(false) + /// .dfa(false) + /// ) + /// // Not even 20KB is enough to build a single large Unicode class! + /// .build(r"\pL"); + /// assert!(result.is_ok()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn nfa_size_limit(self, limit: Option) -> Config { + Config { nfa_size_limit: Some(limit), ..self } + } + + /// Sets the size limit, in bytes, for the one-pass DFA. + /// + /// Setting it to `None` disables the limit. Disabling the limit is + /// strongly discouraged when compiling untrusted patterns. Even if the + /// patterns are trusted, it still may not be a good idea, since a one-pass + /// DFA can use a lot of memory. With that said, as the size of a regex + /// increases, the likelihood of it being one-pass likely decreases. + /// + /// This defaults to some reasonable number that permits most reasonable + /// one-pass patterns. + /// + /// # Example + /// + /// This shows how to set the one-pass DFA size limit. Note that since + /// a one-pass DFA is an optional component of the meta regex engine, + /// this size limit only impacts what is built internally and will never + /// determine whether a `Regex` itself fails to build. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::meta::Regex; + /// + /// let result = Regex::builder() + /// .configure(Regex::config().onepass_size_limit(Some(2 * (1<<20)))) + /// .build(r"\pL{5}"); + /// assert!(result.is_ok()); + /// # Ok::<(), Box>(()) + /// ``` + pub fn onepass_size_limit(self, limit: Option) -> Config { + Config { onepass_size_limit: Some(limit), ..self } + } + + /// Set the cache capacity, in bytes, for the lazy DFA. + /// + /// The cache capacity of the lazy DFA determines approximately how much + /// heap memory it is allowed to use to store its state transitions. The + /// state transitions are computed at search time, and if the cache fills + /// up it, it is cleared. At this point, any previously generated state + /// transitions are lost and are re-generated if they're needed again. + /// + /// This sort of cache filling and clearing works quite well _so long as + /// cache clearing happens infrequently_. If it happens too often, then the + /// meta regex engine will stop using the lazy DFA and switch over to a + /// different regex engine. + /// + /// In cases where the cache is cleared too often, it may be possible to + /// give the cache more space and reduce (or eliminate) how often it is + /// cleared. Similarly, sometimes a regex is so big that the lazy DFA isn't + /// used at all if its cache capacity isn't big enough. + /// + /// The capacity set here is a _limit_ on how much memory is used. The + /// actual memory used is only allocated as it's needed. + /// + /// Determining the right value for this is a little tricky and will likely + /// required some profiling. Enabling the `logging` feature and setting the + /// log level to `trace` will also tell you how often the cache is being + /// cleared. + /// + /// # Example + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::meta::Regex; + /// + /// let result = Regex::builder() + /// .configure(Regex::config().hybrid_cache_capacity(20 * (1<<20))) + /// .build(r"\pL{5}"); + /// assert!(result.is_ok()); + /// # Ok::<(), Box>(()) + /// ``` + pub fn hybrid_cache_capacity(self, limit: usize) -> Config { + Config { hybrid_cache_capacity: Some(limit), ..self } + } + + /// Sets the size limit, in bytes, for heap memory used for a fully + /// compiled DFA. + /// + /// **NOTE:** If you increase this, you'll likely also need to increase + /// [`Config::dfa_state_limit`]. + /// + /// In contrast to the lazy DFA, building a full DFA requires computing + /// all of its state transitions up front. This can be a very expensive + /// process, and runs in worst case `2^n` time and space (where `n` is + /// proportional to the size of the regex). However, a full DFA unlocks + /// some additional optimization opportunities. + /// + /// Because full DFAs can be so expensive, the default limits for them are + /// incredibly small. Generally speaking, if your regex is moderately big + /// or if you're using Unicode features (`\w` is Unicode-aware by default + /// for example), then you can expect that the meta regex engine won't even + /// attempt to build a DFA for it. + /// + /// If this and [`Config::dfa_state_limit`] are set to `None`, then the + /// meta regex will not use any sort of limits when deciding whether to + /// build a DFA. This in turn makes construction of a `Regex` take + /// worst case exponential time and space. Even short patterns can result + /// in huge space blow ups. So it is strongly recommended to keep some kind + /// of limit set! + /// + /// The default is set to a small number that permits some simple regexes + /// to get compiled into DFAs in reasonable time. + /// + /// # Example + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::meta::Regex; + /// + /// let result = Regex::builder() + /// // 100MB is much bigger than the default. + /// .configure(Regex::config() + /// .dfa_size_limit(Some(100 * (1<<20))) + /// // We don't care about size too much here, so just + /// // remove the NFA state limit altogether. + /// .dfa_state_limit(None)) + /// .build(r"\pL{5}"); + /// assert!(result.is_ok()); + /// # Ok::<(), Box>(()) + /// ``` + pub fn dfa_size_limit(self, limit: Option) -> Config { + Config { dfa_size_limit: Some(limit), ..self } + } + + /// Sets a limit on the total number of NFA states, beyond which, a full + /// DFA is not attempted to be compiled. + /// + /// This limit works in concert with [`Config::dfa_size_limit`]. Namely, + /// where as `Config::dfa_size_limit` is applied by attempting to construct + /// a DFA, this limit is used to avoid the attempt in the first place. This + /// is useful to avoid hefty initialization costs associated with building + /// a DFA for cases where it is obvious the DFA will ultimately be too big. + /// + /// By default, this is set to a very small number. + /// + /// # Example + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::meta::Regex; + /// + /// let result = Regex::builder() + /// .configure(Regex::config() + /// // Sometimes the default state limit rejects DFAs even + /// // if they would fit in the size limit. Here, we disable + /// // the check on the number of NFA states and just rely on + /// // the size limit. + /// .dfa_state_limit(None)) + /// .build(r"(?-u)\w{30}"); + /// assert!(result.is_ok()); + /// # Ok::<(), Box>(()) + /// ``` + pub fn dfa_state_limit(self, limit: Option) -> Config { + Config { dfa_state_limit: Some(limit), ..self } + } + + /// Whether to attempt to shrink the size of the alphabet for the regex + /// pattern or not. When enabled, the alphabet is shrunk into a set of + /// equivalence classes, where every byte in the same equivalence class + /// cannot discriminate between a match or non-match. + /// + /// **WARNING:** This is only useful for debugging DFAs. Disabling this + /// does not yield any speed advantages. Indeed, disabling it can result + /// in much higher memory usage. Disabling byte classes is useful for + /// debugging the actual generated transitions because it lets one see the + /// transitions defined on actual bytes instead of the equivalence classes. + /// + /// This option is enabled by default and should never be disabled unless + /// one is debugging the meta regex engine's internals. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{meta::Regex, Match}; + /// + /// let re = Regex::builder() + /// .configure(Regex::config().byte_classes(false)) + /// .build(r"[a-z]+")?; + /// let hay = "!!quux!!"; + /// assert_eq!(Some(Match::must(0, 2..6)), re.find(hay)); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn byte_classes(self, yes: bool) -> Config { + Config { byte_classes: Some(yes), ..self } + } + + /// Set the line terminator to be used by the `^` and `$` anchors in + /// multi-line mode. + /// + /// This option has no effect when CRLF mode is enabled. That is, + /// regardless of this setting, `(?Rm:^)` and `(?Rm:$)` will always treat + /// `\r` and `\n` as line terminators (and will never match between a `\r` + /// and a `\n`). + /// + /// By default, `\n` is the line terminator. + /// + /// **Warning**: This does not change the behavior of `.`. To do that, + /// you'll need to configure the syntax option + /// [`syntax::Config::line_terminator`](crate::util::syntax::Config::line_terminator) + /// in addition to this. Otherwise, `.` will continue to match any + /// character other than `\n`. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{meta::Regex, util::syntax, Match}; + /// + /// let re = Regex::builder() + /// .syntax(syntax::Config::new().multi_line(true)) + /// .configure(Regex::config().line_terminator(b'\x00')) + /// .build(r"^foo$")?; + /// let hay = "\x00foo\x00"; + /// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay)); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn line_terminator(self, byte: u8) -> Config { + Config { line_terminator: Some(byte), ..self } + } + + /// Toggle whether the hybrid NFA/DFA (also known as the "lazy DFA") should + /// be available for use by the meta regex engine. + /// + /// Enabling this does not necessarily mean that the lazy DFA will + /// definitely be used. It just means that it will be _available_ for use + /// if the meta regex engine thinks it will be useful. + /// + /// When the `hybrid` crate feature is enabled, then this is enabled by + /// default. Otherwise, if the crate feature is disabled, then this is + /// always disabled, regardless of its setting by the caller. + pub fn hybrid(self, yes: bool) -> Config { + Config { hybrid: Some(yes), ..self } + } + + /// Toggle whether a fully compiled DFA should be available for use by the + /// meta regex engine. + /// + /// Enabling this does not necessarily mean that a DFA will definitely be + /// used. It just means that it will be _available_ for use if the meta + /// regex engine thinks it will be useful. + /// + /// When the `dfa-build` crate feature is enabled, then this is enabled by + /// default. Otherwise, if the crate feature is disabled, then this is + /// always disabled, regardless of its setting by the caller. + pub fn dfa(self, yes: bool) -> Config { + Config { dfa: Some(yes), ..self } + } + + /// Toggle whether a one-pass DFA should be available for use by the meta + /// regex engine. + /// + /// Enabling this does not necessarily mean that a one-pass DFA will + /// definitely be used. It just means that it will be _available_ for + /// use if the meta regex engine thinks it will be useful. (Indeed, a + /// one-pass DFA can only be used when the regex is one-pass. See the + /// [`dfa::onepass`](crate::dfa::onepass) module for more details.) + /// + /// When the `dfa-onepass` crate feature is enabled, then this is enabled + /// by default. Otherwise, if the crate feature is disabled, then this is + /// always disabled, regardless of its setting by the caller. + pub fn onepass(self, yes: bool) -> Config { + Config { onepass: Some(yes), ..self } + } + + /// Toggle whether a bounded backtracking regex engine should be available + /// for use by the meta regex engine. + /// + /// Enabling this does not necessarily mean that a bounded backtracker will + /// definitely be used. It just means that it will be _available_ for use + /// if the meta regex engine thinks it will be useful. + /// + /// When the `nfa-backtrack` crate feature is enabled, then this is enabled + /// by default. Otherwise, if the crate feature is disabled, then this is + /// always disabled, regardless of its setting by the caller. + pub fn backtrack(self, yes: bool) -> Config { + Config { backtrack: Some(yes), ..self } + } + + /// Returns the match kind on this configuration, as set by + /// [`Config::match_kind`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_match_kind(&self) -> MatchKind { + self.match_kind.unwrap_or(MatchKind::LeftmostFirst) + } + + /// Returns whether empty matches must fall on valid UTF-8 boundaries, as + /// set by [`Config::utf8_empty`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_utf8_empty(&self) -> bool { + self.utf8_empty.unwrap_or(true) + } + + /// Returns whether automatic prefilters are enabled, as set by + /// [`Config::auto_prefilter`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_auto_prefilter(&self) -> bool { + self.autopre.unwrap_or(true) + } + + /// Returns a manually set prefilter, if one was set by + /// [`Config::prefilter`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_prefilter(&self) -> Option<&Prefilter> { + self.pre.as_ref().unwrap_or(&None).as_ref() + } + + /// Returns the capture configuration, as set by + /// [`Config::which_captures`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_which_captures(&self) -> WhichCaptures { + self.which_captures.unwrap_or(WhichCaptures::All) + } + + /// Returns NFA size limit, as set by [`Config::nfa_size_limit`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_nfa_size_limit(&self) -> Option { + self.nfa_size_limit.unwrap_or(Some(10 * (1 << 20))) + } + + /// Returns one-pass DFA size limit, as set by + /// [`Config::onepass_size_limit`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_onepass_size_limit(&self) -> Option { + self.onepass_size_limit.unwrap_or(Some(1 * (1 << 20))) + } + + /// Returns hybrid NFA/DFA cache capacity, as set by + /// [`Config::hybrid_cache_capacity`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_hybrid_cache_capacity(&self) -> usize { + self.hybrid_cache_capacity.unwrap_or(2 * (1 << 20)) + } + + /// Returns DFA size limit, as set by [`Config::dfa_size_limit`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_dfa_size_limit(&self) -> Option { + // The default for this is VERY small because building a full DFA is + // ridiculously costly. But for regexes that are very small, it can be + // beneficial to use a full DFA. In particular, a full DFA can enable + // additional optimizations via something called "accelerated" states. + // Namely, when there's a state with only a few outgoing transitions, + // we can temporary suspend walking the transition table and use memchr + // for just those outgoing transitions to skip ahead very quickly. + // + // Generally speaking, if Unicode is enabled in your regex and you're + // using some kind of Unicode feature, then it's going to blow this + // size limit. Moreover, Unicode tends to defeat the "accelerated" + // state optimization too, so it's a double whammy. + // + // We also use a limit on the number of NFA states to avoid even + // starting the DFA construction process. Namely, DFA construction + // itself could make lots of initial allocs proportional to the size + // of the NFA, and if the NFA is large, it doesn't make sense to pay + // that cost if we know it's likely to be blown by a large margin. + self.dfa_size_limit.unwrap_or(Some(40 * (1 << 10))) + } + + /// Returns DFA size limit in terms of the number of states in the NFA, as + /// set by [`Config::dfa_state_limit`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_dfa_state_limit(&self) -> Option { + // Again, as with the size limit, we keep this very small. + self.dfa_state_limit.unwrap_or(Some(30)) + } + + /// Returns whether byte classes are enabled, as set by + /// [`Config::byte_classes`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_byte_classes(&self) -> bool { + self.byte_classes.unwrap_or(true) + } + + /// Returns the line terminator for this configuration, as set by + /// [`Config::line_terminator`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_line_terminator(&self) -> u8 { + self.line_terminator.unwrap_or(b'\n') + } + + /// Returns whether the hybrid NFA/DFA regex engine may be used, as set by + /// [`Config::hybrid`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_hybrid(&self) -> bool { + #[cfg(feature = "hybrid")] + { + self.hybrid.unwrap_or(true) + } + #[cfg(not(feature = "hybrid"))] + { + false + } + } + + /// Returns whether the DFA regex engine may be used, as set by + /// [`Config::dfa`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_dfa(&self) -> bool { + #[cfg(feature = "dfa-build")] + { + self.dfa.unwrap_or(true) + } + #[cfg(not(feature = "dfa-build"))] + { + false + } + } + + /// Returns whether the one-pass DFA regex engine may be used, as set by + /// [`Config::onepass`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_onepass(&self) -> bool { + #[cfg(feature = "dfa-onepass")] + { + self.onepass.unwrap_or(true) + } + #[cfg(not(feature = "dfa-onepass"))] + { + false + } + } + + /// Returns whether the bounded backtracking regex engine may be used, as + /// set by [`Config::backtrack`]. + /// + /// If it was not explicitly set, then a default value is returned. + pub fn get_backtrack(&self) -> bool { + #[cfg(feature = "nfa-backtrack")] + { + self.backtrack.unwrap_or(true) + } + #[cfg(not(feature = "nfa-backtrack"))] + { + false + } + } + + /// Overwrite the default configuration such that the options in `o` are + /// always used. If an option in `o` is not set, then the corresponding + /// option in `self` is used. If it's not set in `self` either, then it + /// remains not set. + pub(crate) fn overwrite(&self, o: Config) -> Config { + Config { + match_kind: o.match_kind.or(self.match_kind), + utf8_empty: o.utf8_empty.or(self.utf8_empty), + autopre: o.autopre.or(self.autopre), + pre: o.pre.or_else(|| self.pre.clone()), + which_captures: o.which_captures.or(self.which_captures), + nfa_size_limit: o.nfa_size_limit.or(self.nfa_size_limit), + onepass_size_limit: o + .onepass_size_limit + .or(self.onepass_size_limit), + hybrid_cache_capacity: o + .hybrid_cache_capacity + .or(self.hybrid_cache_capacity), + hybrid: o.hybrid.or(self.hybrid), + dfa: o.dfa.or(self.dfa), + dfa_size_limit: o.dfa_size_limit.or(self.dfa_size_limit), + dfa_state_limit: o.dfa_state_limit.or(self.dfa_state_limit), + onepass: o.onepass.or(self.onepass), + backtrack: o.backtrack.or(self.backtrack), + byte_classes: o.byte_classes.or(self.byte_classes), + line_terminator: o.line_terminator.or(self.line_terminator), + } + } +} + +/// A builder for configuring and constructing a `Regex`. +/// +/// The builder permits configuring two different aspects of a `Regex`: +/// +/// * [`Builder::configure`] will set high-level configuration options as +/// described by a [`Config`]. +/// * [`Builder::syntax`] will set the syntax level configuration options +/// as described by a [`util::syntax::Config`](crate::util::syntax::Config). +/// This only applies when building a `Regex` from pattern strings. +/// +/// Once configured, the builder can then be used to construct a `Regex` from +/// one of 4 different inputs: +/// +/// * [`Builder::build`] creates a regex from a single pattern string. +/// * [`Builder::build_many`] creates a regex from many pattern strings. +/// * [`Builder::build_from_hir`] creates a regex from a +/// [`regex-syntax::Hir`](Hir) expression. +/// * [`Builder::build_many_from_hir`] creates a regex from many +/// [`regex-syntax::Hir`](Hir) expressions. +/// +/// The latter two methods in particular provide a way to construct a fully +/// feature regular expression matcher directly from an `Hir` expression +/// without having to first convert it to a string. (This is in contrast to the +/// top-level `regex` crate which intentionally provides no such API in order +/// to avoid making `regex-syntax` a public dependency.) +/// +/// As a convenience, this builder may be created via [`Regex::builder`], which +/// may help avoid an extra import. +/// +/// # Example: change the line terminator +/// +/// This example shows how to enable multi-line mode by default and change the +/// line terminator to the NUL byte: +/// +/// ``` +/// use regex_automata::{meta::Regex, util::syntax, Match}; +/// +/// let re = Regex::builder() +/// .syntax(syntax::Config::new().multi_line(true)) +/// .configure(Regex::config().line_terminator(b'\x00')) +/// .build(r"^foo$")?; +/// let hay = "\x00foo\x00"; +/// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay)); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// # Example: disable UTF-8 requirement +/// +/// By default, regex patterns are required to match UTF-8. This includes +/// regex patterns that can produce matches of length zero. In the case of an +/// empty match, by default, matches will not appear between the code units of +/// a UTF-8 encoded codepoint. +/// +/// However, it can be useful to disable this requirement, particularly if +/// you're searching things like `&[u8]` that are not known to be valid UTF-8. +/// +/// ``` +/// use regex_automata::{meta::Regex, util::syntax, Match}; +/// +/// let mut builder = Regex::builder(); +/// // Disables the requirement that non-empty matches match UTF-8. +/// builder.syntax(syntax::Config::new().utf8(false)); +/// // Disables the requirement that empty matches match UTF-8 boundaries. +/// builder.configure(Regex::config().utf8_empty(false)); +/// +/// // We can match raw bytes via \xZZ syntax, but we need to disable +/// // Unicode mode to do that. We could disable it everywhere, or just +/// // selectively, as shown here. +/// let re = builder.build(r"(?-u:\xFF)foo(?-u:\xFF)")?; +/// let hay = b"\xFFfoo\xFF"; +/// assert_eq!(Some(Match::must(0, 0..5)), re.find(hay)); +/// +/// // We can also match between code units. +/// let re = builder.build(r"")?; +/// let hay = "☃"; +/// assert_eq!(re.find_iter(hay).collect::>(), vec![ +/// Match::must(0, 0..0), +/// Match::must(0, 1..1), +/// Match::must(0, 2..2), +/// Match::must(0, 3..3), +/// ]); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct Builder { + config: Config, + ast: ast::parse::ParserBuilder, + hir: hir::translate::TranslatorBuilder, +} + +impl Builder { + /// Creates a new builder for configuring and constructing a [`Regex`]. + pub fn new() -> Builder { + Builder { + config: Config::default(), + ast: ast::parse::ParserBuilder::new(), + hir: hir::translate::TranslatorBuilder::new(), + } + } + + /// Builds a `Regex` from a single pattern string. + /// + /// If there was a problem parsing the pattern or a problem turning it into + /// a regex matcher, then an error is returned. + /// + /// # Example + /// + /// This example shows how to configure syntax options. + /// + /// ``` + /// use regex_automata::{meta::Regex, util::syntax, Match}; + /// + /// let re = Regex::builder() + /// .syntax(syntax::Config::new().crlf(true).multi_line(true)) + /// .build(r"^foo$")?; + /// let hay = "\r\nfoo\r\n"; + /// assert_eq!(Some(Match::must(0, 2..5)), re.find(hay)); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn build(&self, pattern: &str) -> Result { + self.build_many(&[pattern]) + } + + /// Builds a `Regex` from many pattern strings. + /// + /// If there was a problem parsing any of the patterns or a problem turning + /// them into a regex matcher, then an error is returned. + /// + /// # Example: finding the pattern that caused an error + /// + /// When a syntax error occurs, it is possible to ask which pattern + /// caused the syntax error. + /// + /// ``` + /// use regex_automata::{meta::Regex, PatternID}; + /// + /// let err = Regex::builder() + /// .build_many(&["a", "b", r"\p{Foo}", "c"]) + /// .unwrap_err(); + /// assert_eq!(Some(PatternID::must(2)), err.pattern()); + /// ``` + /// + /// # Example: zero patterns is valid + /// + /// Building a regex with zero patterns results in a regex that never + /// matches anything. Because this routine is generic, passing an empty + /// slice usually requires a turbo-fish (or something else to help type + /// inference). + /// + /// ``` + /// use regex_automata::{meta::Regex, util::syntax, Match}; + /// + /// let re = Regex::builder() + /// .build_many::<&str>(&[])?; + /// assert_eq!(None, re.find("")); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn build_many>( + &self, + patterns: &[P], + ) -> Result { + use crate::util::primitives::IteratorIndexExt; + log! { + debug!("building meta regex with {} patterns:", patterns.len()); + for (pid, p) in patterns.iter().with_pattern_ids() { + let p = p.as_ref(); + // We might split a grapheme with this truncation logic, but + // that's fine. We at least avoid splitting a codepoint. + let maxoff = p + .char_indices() + .map(|(i, ch)| i + ch.len_utf8()) + .take(1000) + .last() + .unwrap_or(0); + if maxoff < p.len() { + debug!("{pid:?}: {}[... snip ...]", &p[..maxoff]); + } else { + debug!("{pid:?}: {p}"); + } + } + } + let (mut asts, mut hirs) = (vec![], vec![]); + for (pid, p) in patterns.iter().with_pattern_ids() { + let ast = self + .ast + .build() + .parse(p.as_ref()) + .map_err(|err| BuildError::ast(pid, err))?; + asts.push(ast); + } + for ((pid, p), ast) in + patterns.iter().with_pattern_ids().zip(asts.iter()) + { + let hir = self + .hir + .build() + .translate(p.as_ref(), ast) + .map_err(|err| BuildError::hir(pid, err))?; + hirs.push(hir); + } + self.build_many_from_hir(&hirs) + } + + /// Builds a `Regex` directly from an `Hir` expression. + /// + /// This is useful if you needed to parse a pattern string into an `Hir` + /// for other reasons (such as analysis or transformations). This routine + /// permits building a `Regex` directly from the `Hir` expression instead + /// of first converting the `Hir` back to a pattern string. + /// + /// When using this method, any options set via [`Builder::syntax`] are + /// ignored. Namely, the syntax options only apply when parsing a pattern + /// string, which isn't relevant here. + /// + /// If there was a problem building the underlying regex matcher for the + /// given `Hir`, then an error is returned. + /// + /// # Example + /// + /// This example shows how one can hand-construct an `Hir` expression and + /// build a regex from it without doing any parsing at all. + /// + /// ``` + /// use { + /// regex_automata::{meta::Regex, Match}, + /// regex_syntax::hir::{Hir, Look}, + /// }; + /// + /// // (?Rm)^foo$ + /// let hir = Hir::concat(vec![ + /// Hir::look(Look::StartCRLF), + /// Hir::literal("foo".as_bytes()), + /// Hir::look(Look::EndCRLF), + /// ]); + /// let re = Regex::builder() + /// .build_from_hir(&hir)?; + /// let hay = "\r\nfoo\r\n"; + /// assert_eq!(Some(Match::must(0, 2..5)), re.find(hay)); + /// + /// Ok::<(), Box>(()) + /// ``` + pub fn build_from_hir(&self, hir: &Hir) -> Result { + self.build_many_from_hir(&[hir]) + } + + /// Builds a `Regex` directly from many `Hir` expressions. + /// + /// This is useful if you needed to parse pattern strings into `Hir` + /// expressions for other reasons (such as analysis or transformations). + /// This routine permits building a `Regex` directly from the `Hir` + /// expressions instead of first converting the `Hir` expressions back to + /// pattern strings. + /// + /// When using this method, any options set via [`Builder::syntax`] are + /// ignored. Namely, the syntax options only apply when parsing a pattern + /// string, which isn't relevant here. + /// + /// If there was a problem building the underlying regex matcher for the + /// given `Hir` expressions, then an error is returned. + /// + /// Note that unlike [`Builder::build_many`], this can only fail as a + /// result of building the underlying matcher. In that case, there is + /// no single `Hir` expression that can be isolated as a reason for the + /// failure. So if this routine fails, it's not possible to determine which + /// `Hir` expression caused the failure. + /// + /// # Example + /// + /// This example shows how one can hand-construct multiple `Hir` + /// expressions and build a single regex from them without doing any + /// parsing at all. + /// + /// ``` + /// use { + /// regex_automata::{meta::Regex, Match}, + /// regex_syntax::hir::{Hir, Look}, + /// }; + /// + /// // (?Rm)^foo$ + /// let hir1 = Hir::concat(vec![ + /// Hir::look(Look::StartCRLF), + /// Hir::literal("foo".as_bytes()), + /// Hir::look(Look::EndCRLF), + /// ]); + /// // (?Rm)^bar$ + /// let hir2 = Hir::concat(vec![ + /// Hir::look(Look::StartCRLF), + /// Hir::literal("bar".as_bytes()), + /// Hir::look(Look::EndCRLF), + /// ]); + /// let re = Regex::builder() + /// .build_many_from_hir(&[&hir1, &hir2])?; + /// let hay = "\r\nfoo\r\nbar"; + /// let got: Vec = re.find_iter(hay).collect(); + /// let expected = vec![ + /// Match::must(0, 2..5), + /// Match::must(1, 7..10), + /// ]; + /// assert_eq!(expected, got); + /// + /// Ok::<(), Box>(()) + /// ``` + pub fn build_many_from_hir>( + &self, + hirs: &[H], + ) -> Result { + let config = self.config.clone(); + // We collect the HIRs into a vec so we can write internal routines + // with '&[&Hir]'. i.e., Don't use generics everywhere to keep code + // bloat down.. + let hirs: Vec<&Hir> = hirs.iter().map(|hir| hir.borrow()).collect(); + let info = RegexInfo::new(config, &hirs); + let strat = strategy::new(&info, &hirs)?; + let pool = { + let strat = Arc::clone(&strat); + let create: CachePoolFn = Box::new(move || strat.create_cache()); + Pool::new(create) + }; + Ok(Regex { imp: Arc::new(RegexI { strat, info }), pool }) + } + + /// Configure the behavior of a `Regex`. + /// + /// This configuration controls non-syntax options related to the behavior + /// of a `Regex`. This includes things like whether empty matches can split + /// a codepoint, prefilters, line terminators and a long list of options + /// for configuring which regex engines the meta regex engine will be able + /// to use internally. + /// + /// # Example + /// + /// This example shows how to disable UTF-8 empty mode. This will permit + /// empty matches to occur between the UTF-8 encoding of a codepoint. + /// + /// ``` + /// use regex_automata::{meta::Regex, Match}; + /// + /// let re = Regex::new("")?; + /// let got: Vec = re.find_iter("☃").collect(); + /// // Matches only occur at the beginning and end of the snowman. + /// assert_eq!(got, vec![ + /// Match::must(0, 0..0), + /// Match::must(0, 3..3), + /// ]); + /// + /// let re = Regex::builder() + /// .configure(Regex::config().utf8_empty(false)) + /// .build("")?; + /// let got: Vec = re.find_iter("☃").collect(); + /// // Matches now occur at every position! + /// assert_eq!(got, vec![ + /// Match::must(0, 0..0), + /// Match::must(0, 1..1), + /// Match::must(0, 2..2), + /// Match::must(0, 3..3), + /// ]); + /// + /// Ok::<(), Box>(()) + /// ``` + pub fn configure(&mut self, config: Config) -> &mut Builder { + self.config = self.config.overwrite(config); + self + } + + /// Configure the syntax options when parsing a pattern string while + /// building a `Regex`. + /// + /// These options _only_ apply when [`Builder::build`] or [`Builder::build_many`] + /// are used. The other build methods accept `Hir` values, which have + /// already been parsed. + /// + /// # Example + /// + /// This example shows how to enable case insensitive mode. + /// + /// ``` + /// use regex_automata::{meta::Regex, util::syntax, Match}; + /// + /// let re = Regex::builder() + /// .syntax(syntax::Config::new().case_insensitive(true)) + /// .build(r"δ")?; + /// assert_eq!(Some(Match::must(0, 0..2)), re.find(r"Δ")); + /// + /// Ok::<(), Box>(()) + /// ``` + pub fn syntax( + &mut self, + config: crate::util::syntax::Config, + ) -> &mut Builder { + config.apply_ast(&mut self.ast); + config.apply_hir(&mut self.hir); + self + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // I found this in the course of building out the benchmark suite for + // rebar. + #[test] + fn regression_suffix_literal_count() { + let _ = env_logger::try_init(); + + let re = Regex::new(r"[a-zA-Z]+ing").unwrap(); + assert_eq!(1, re.find_iter("tingling").count()); + } +} diff --git a/vendor/regex-automata/src/meta/reverse_inner.rs b/vendor/regex-automata/src/meta/reverse_inner.rs new file mode 100644 index 00000000000000..3d78779f6f73ee --- /dev/null +++ b/vendor/regex-automata/src/meta/reverse_inner.rs @@ -0,0 +1,220 @@ +/*! +A module dedicated to plucking inner literals out of a regex pattern, and +then constructing a prefilter for them. We also include a regex pattern +"prefix" that corresponds to the bits of the regex that need to match before +the literals do. The reverse inner optimization then proceeds by looking for +matches of the inner literal(s), and then doing a reverse search of the prefix +from the start of the literal match to find the overall start position of the +match. + +The essential invariant we want to uphold here is that the literals we return +reflect a set where *at least* one of them must match in order for the overall +regex to match. We also need to maintain the invariant that the regex prefix +returned corresponds to the entirety of the regex up until the literals we +return. + +This somewhat limits what we can do. That is, if we a regex like +`\w+(@!|%%)\w+`, then we can pluck the `{@!, %%}` out and build a prefilter +from it. Then we just need to compile `\w+` in reverse. No fuss no muss. But if +we have a regex like \d+@!|\w+%%`, then we get kind of stymied. Technically, +we could still extract `{@!, %%}`, and it is true that at least of them must +match. But then, what is our regex prefix? Again, in theory, that could be +`\d+|\w+`, but that's not quite right, because the `\d+` only matches when `@!` +matches, and `\w+` only matches when `%%` matches. + +All of that is technically possible to do, but it seemingly requires a lot of +sophistication and machinery. Probably the way to tackle that is with some kind +of formalism and approach this problem more generally. + +For now, the code below basically just looks for a top-level concatenation. +And if it can find one, it looks for literals in each of the direct child +sub-expressions of that concatenation. If some good ones are found, we return +those and a concatenation of the Hir expressions seen up to that point. +*/ + +use alloc::vec::Vec; + +use regex_syntax::hir::{self, literal, Hir, HirKind}; + +use crate::{util::prefilter::Prefilter, MatchKind}; + +/// Attempts to extract an "inner" prefilter from the given HIR expressions. If +/// one was found, then a concatenation of the HIR expressions that precede it +/// is returned. +/// +/// The idea here is that the prefilter returned can be used to find candidate +/// matches. And then the HIR returned can be used to build a reverse regex +/// matcher, which will find the start of the candidate match. Finally, the +/// match still has to be confirmed with a normal anchored forward scan to find +/// the end position of the match. +/// +/// Note that this assumes leftmost-first match semantics, so callers must +/// not call this otherwise. +pub(crate) fn extract(hirs: &[&Hir]) -> Option<(Hir, Prefilter)> { + if hirs.len() != 1 { + debug!( + "skipping reverse inner optimization since it only \ + supports 1 pattern, {} were given", + hirs.len(), + ); + return None; + } + let mut concat = match top_concat(hirs[0]) { + Some(concat) => concat, + None => { + debug!( + "skipping reverse inner optimization because a top-level \ + concatenation could not found", + ); + return None; + } + }; + // We skip the first HIR because if it did have a prefix prefilter in it, + // we probably wouldn't be here looking for an inner prefilter. + for i in 1..concat.len() { + let hir = &concat[i]; + let pre = match prefilter(hir) { + None => continue, + Some(pre) => pre, + }; + // Even if we got a prefilter, if it isn't consider "fast," then we + // probably don't want to bother with it. Namely, since the reverse + // inner optimization requires some overhead, it likely only makes + // sense if the prefilter scan itself is (believed) to be much faster + // than the regex engine. + if !pre.is_fast() { + debug!( + "skipping extracted inner prefilter because \ + it probably isn't fast" + ); + continue; + } + let concat_suffix = Hir::concat(concat.split_off(i)); + let concat_prefix = Hir::concat(concat); + // Look for a prefilter again. Why? Because above we only looked for + // a prefilter on the individual 'hir', but we might be able to find + // something better and more discriminatory by looking at the entire + // suffix. We don't do this above to avoid making this loop worst case + // quadratic in the length of 'concat'. + let pre2 = match prefilter(&concat_suffix) { + None => pre, + Some(pre2) => { + if pre2.is_fast() { + pre2 + } else { + pre + } + } + }; + return Some((concat_prefix, pre2)); + } + debug!( + "skipping reverse inner optimization because a top-level \ + sub-expression with a fast prefilter could not be found" + ); + None +} + +/// Attempt to extract a prefilter from an HIR expression. +/// +/// We do a little massaging here to do our best that the prefilter we get out +/// of this is *probably* fast. Basically, the false positive rate has a much +/// higher impact for things like the reverse inner optimization because more +/// work needs to potentially be done for each candidate match. +/// +/// Note that this assumes leftmost-first match semantics, so callers must +/// not call this otherwise. +fn prefilter(hir: &Hir) -> Option { + let mut extractor = literal::Extractor::new(); + extractor.kind(literal::ExtractKind::Prefix); + let mut prefixes = extractor.extract(hir); + debug!( + "inner prefixes (len={:?}) extracted before optimization: {:?}", + prefixes.len(), + prefixes + ); + // Since these are inner literals, we know they cannot be exact. But the + // extractor doesn't know this. We mark them as inexact because this might + // impact literal optimization. Namely, optimization weights "all literals + // are exact" as very high, because it presumes that any match results in + // an overall match. But of course, that is not the case here. + // + // In practice, this avoids plucking out a ASCII-only \s as an alternation + // of single-byte whitespace characters. + prefixes.make_inexact(); + prefixes.optimize_for_prefix_by_preference(); + debug!( + "inner prefixes (len={:?}) extracted after optimization: {:?}", + prefixes.len(), + prefixes + ); + prefixes + .literals() + .and_then(|lits| Prefilter::new(MatchKind::LeftmostFirst, lits)) +} + +/// Looks for a "top level" HirKind::Concat item in the given HIR. This will +/// try to return one even if it's embedded in a capturing group, but is +/// otherwise pretty conservative in what is returned. +/// +/// The HIR returned is a complete copy of the concat with all capturing +/// groups removed. In effect, the concat returned is "flattened" with respect +/// to capturing groups. This makes the detection logic above for prefixes +/// a bit simpler, and it works because 1) capturing groups never influence +/// whether a match occurs or not and 2) capturing groups are not used when +/// doing the reverse inner search to find the start of the match. +fn top_concat(mut hir: &Hir) -> Option> { + loop { + hir = match hir.kind() { + HirKind::Empty + | HirKind::Literal(_) + | HirKind::Class(_) + | HirKind::Look(_) + | HirKind::Repetition(_) + | HirKind::Alternation(_) => return None, + HirKind::Capture(hir::Capture { ref sub, .. }) => sub, + HirKind::Concat(ref subs) => { + // We are careful to only do the flattening/copy when we know + // we have a "top level" concat we can inspect. This avoids + // doing extra work in cases where we definitely won't use it. + // (This might still be wasted work if we can't go on to find + // some literals to extract.) + let concat = + Hir::concat(subs.iter().map(|h| flatten(h)).collect()); + return match concat.into_kind() { + HirKind::Concat(xs) => Some(xs), + // It is actually possible for this case to occur, because + // 'Hir::concat' might simplify the expression to the point + // that concatenations are actually removed. One wonders + // whether this leads to other cases where we should be + // extracting literals, but in theory, I believe if we do + // get here, then it means that a "real" prefilter failed + // to be extracted and we should probably leave well enough + // alone. (A "real" prefilter is unbothered by "top-level + // concats" and "capturing groups.") + _ => return None, + }; + } + }; + } +} + +/// Returns a copy of the given HIR but with all capturing groups removed. +fn flatten(hir: &Hir) -> Hir { + match hir.kind() { + HirKind::Empty => Hir::empty(), + HirKind::Literal(hir::Literal(ref x)) => Hir::literal(x.clone()), + HirKind::Class(ref x) => Hir::class(x.clone()), + HirKind::Look(ref x) => Hir::look(x.clone()), + HirKind::Repetition(ref x) => Hir::repetition(x.with(flatten(&x.sub))), + // This is the interesting case. We just drop the group information + // entirely and use the child HIR itself. + HirKind::Capture(hir::Capture { ref sub, .. }) => flatten(sub), + HirKind::Alternation(ref xs) => { + Hir::alternation(xs.iter().map(|x| flatten(x)).collect()) + } + HirKind::Concat(ref xs) => { + Hir::concat(xs.iter().map(|x| flatten(x)).collect()) + } + } +} diff --git a/vendor/regex-automata/src/meta/stopat.rs b/vendor/regex-automata/src/meta/stopat.rs new file mode 100644 index 00000000000000..c4dcd797a0b8fc --- /dev/null +++ b/vendor/regex-automata/src/meta/stopat.rs @@ -0,0 +1,212 @@ +/*! +This module defines two bespoke forward DFA search routines. One for the lazy +DFA and one for the fully compiled DFA. These routines differ from the normal +ones by reporting the position at which the search terminates when a match +*isn't* found. + +This position at which a search terminates is useful in contexts where the meta +regex engine runs optimizations that could go quadratic if we aren't careful. +Namely, a regex search *could* scan to the end of the haystack only to report a +non-match. If the caller doesn't know that the search scanned to the end of the +haystack, it might restart the search at the next literal candidate it finds +and repeat the process. + +Providing the caller with the position at which the search stopped provides a +way for the caller to determine the point at which subsequent scans should not +pass. This is principally used in the "reverse inner" optimization, which works +like this: + +1. Look for a match of an inner literal. Say, 'Z' in '\w+Z\d+'. +2. At the spot where 'Z' matches, do a reverse anchored search from there for +'\w+'. +3. If the reverse search matches, it corresponds to the start position of a +(possible) match. At this point, do a forward anchored search to find the end +position. If an end position is found, then we have a match and we know its +bounds. + +If the forward anchored search in (3) searches the entire rest of the haystack +but reports a non-match, then a naive implementation of the above will continue +back at step 1 looking for more candidates. There might still be a match to be +found! It's possible. But we already scanned the whole haystack. So if we keep +repeating the process, then we might wind up taking quadratic time in the size +of the haystack, which is not great. + +So if the forward anchored search in (3) reports the position at which it +stops, then we can detect whether quadratic behavior might be occurring in +steps (1) and (2). For (1), it occurs if the literal candidate found occurs +*before* the end of the previous search in (3), since that means we're now +going to look for another match in a place where the forward search has already +scanned. It is *correct* to do so, but our technique has become inefficient. +For (2), quadratic behavior occurs similarly when its reverse search extends +past the point where the previous forward search in (3) terminated. Indeed, to +implement (2), we use the sibling 'limited' module for ensuring our reverse +scan doesn't go further than we want. + +See the 'opt/reverse-inner' benchmarks in rebar for a real demonstration of +how quadratic behavior is mitigated. +*/ + +use crate::{meta::error::RetryFailError, HalfMatch, Input, MatchError}; + +#[cfg(feature = "dfa-build")] +pub(crate) fn dfa_try_search_half_fwd( + dfa: &crate::dfa::dense::DFA>, + input: &Input<'_>, +) -> Result, RetryFailError> { + use crate::dfa::{accel, Automaton}; + + let mut mat = None; + let mut sid = dfa.start_state_forward(input)?; + let mut at = input.start(); + while at < input.end() { + sid = dfa.next_state(sid, input.haystack()[at]); + if dfa.is_special_state(sid) { + if dfa.is_match_state(sid) { + let pattern = dfa.match_pattern(sid, 0); + mat = Some(HalfMatch::new(pattern, at)); + if input.get_earliest() { + return Ok(mat.ok_or(at)); + } + if dfa.is_accel_state(sid) { + let needs = dfa.accelerator(sid); + at = accel::find_fwd(needs, input.haystack(), at) + .unwrap_or(input.end()); + continue; + } + } else if dfa.is_accel_state(sid) { + let needs = dfa.accelerator(sid); + at = accel::find_fwd(needs, input.haystack(), at) + .unwrap_or(input.end()); + continue; + } else if dfa.is_dead_state(sid) { + return Ok(mat.ok_or(at)); + } else if dfa.is_quit_state(sid) { + return Err(MatchError::quit(input.haystack()[at], at).into()); + } else { + // Ideally we wouldn't use a DFA that specialized start states + // and thus 'is_start_state()' could never be true here, but in + // practice we reuse the DFA created for the full regex which + // will specialize start states whenever there is a prefilter. + debug_assert!(dfa.is_start_state(sid)); + } + } + at += 1; + } + dfa_eoi_fwd(dfa, input, &mut sid, &mut mat)?; + Ok(mat.ok_or(at)) +} + +#[cfg(feature = "hybrid")] +pub(crate) fn hybrid_try_search_half_fwd( + dfa: &crate::hybrid::dfa::DFA, + cache: &mut crate::hybrid::dfa::Cache, + input: &Input<'_>, +) -> Result, RetryFailError> { + let mut mat = None; + let mut sid = dfa.start_state_forward(cache, input)?; + let mut at = input.start(); + while at < input.end() { + sid = dfa + .next_state(cache, sid, input.haystack()[at]) + .map_err(|_| MatchError::gave_up(at))?; + if sid.is_tagged() { + if sid.is_match() { + let pattern = dfa.match_pattern(cache, sid, 0); + mat = Some(HalfMatch::new(pattern, at)); + if input.get_earliest() { + return Ok(mat.ok_or(at)); + } + } else if sid.is_dead() { + return Ok(mat.ok_or(at)); + } else if sid.is_quit() { + return Err(MatchError::quit(input.haystack()[at], at).into()); + } else { + // We should NEVER get an unknown state ID back from + // dfa.next_state(). + debug_assert!(!sid.is_unknown()); + // Ideally we wouldn't use a lazy DFA that specialized start + // states and thus 'sid.is_start()' could never be true here, + // but in practice we reuse the lazy DFA created for the full + // regex which will specialize start states whenever there is + // a prefilter. + debug_assert!(sid.is_start()); + } + } + at += 1; + } + hybrid_eoi_fwd(dfa, cache, input, &mut sid, &mut mat)?; + Ok(mat.ok_or(at)) +} + +#[cfg(feature = "dfa-build")] +#[cfg_attr(feature = "perf-inline", inline(always))] +fn dfa_eoi_fwd( + dfa: &crate::dfa::dense::DFA>, + input: &Input<'_>, + sid: &mut crate::util::primitives::StateID, + mat: &mut Option, +) -> Result<(), MatchError> { + use crate::dfa::Automaton; + + let sp = input.get_span(); + match input.haystack().get(sp.end) { + Some(&b) => { + *sid = dfa.next_state(*sid, b); + if dfa.is_match_state(*sid) { + let pattern = dfa.match_pattern(*sid, 0); + *mat = Some(HalfMatch::new(pattern, sp.end)); + } else if dfa.is_quit_state(*sid) { + return Err(MatchError::quit(b, sp.end)); + } + } + None => { + *sid = dfa.next_eoi_state(*sid); + if dfa.is_match_state(*sid) { + let pattern = dfa.match_pattern(*sid, 0); + *mat = Some(HalfMatch::new(pattern, input.haystack().len())); + } + // N.B. We don't have to check 'is_quit' here because the EOI + // transition can never lead to a quit state. + debug_assert!(!dfa.is_quit_state(*sid)); + } + } + Ok(()) +} + +#[cfg(feature = "hybrid")] +#[cfg_attr(feature = "perf-inline", inline(always))] +fn hybrid_eoi_fwd( + dfa: &crate::hybrid::dfa::DFA, + cache: &mut crate::hybrid::dfa::Cache, + input: &Input<'_>, + sid: &mut crate::hybrid::LazyStateID, + mat: &mut Option, +) -> Result<(), MatchError> { + let sp = input.get_span(); + match input.haystack().get(sp.end) { + Some(&b) => { + *sid = dfa + .next_state(cache, *sid, b) + .map_err(|_| MatchError::gave_up(sp.end))?; + if sid.is_match() { + let pattern = dfa.match_pattern(cache, *sid, 0); + *mat = Some(HalfMatch::new(pattern, sp.end)); + } else if sid.is_quit() { + return Err(MatchError::quit(b, sp.end)); + } + } + None => { + *sid = dfa + .next_eoi_state(cache, *sid) + .map_err(|_| MatchError::gave_up(input.haystack().len()))?; + if sid.is_match() { + let pattern = dfa.match_pattern(cache, *sid, 0); + *mat = Some(HalfMatch::new(pattern, input.haystack().len())); + } + // N.B. We don't have to check 'is_quit' here because the EOI + // transition can never lead to a quit state. + debug_assert!(!sid.is_quit()); + } + } + Ok(()) +} diff --git a/vendor/regex-automata/src/meta/strategy.rs b/vendor/regex-automata/src/meta/strategy.rs new file mode 100644 index 00000000000000..ebb876b2b88525 --- /dev/null +++ b/vendor/regex-automata/src/meta/strategy.rs @@ -0,0 +1,1905 @@ +use core::{ + fmt::Debug, + panic::{RefUnwindSafe, UnwindSafe}, +}; + +use alloc::sync::Arc; + +use regex_syntax::hir::{literal, Hir}; + +use crate::{ + meta::{ + error::{BuildError, RetryError, RetryFailError, RetryQuadraticError}, + regex::{Cache, RegexInfo}, + reverse_inner, wrappers, + }, + nfa::thompson::{self, WhichCaptures, NFA}, + util::{ + captures::{Captures, GroupInfo}, + look::LookMatcher, + prefilter::{self, Prefilter, PrefilterI}, + primitives::{NonMaxUsize, PatternID}, + search::{Anchored, HalfMatch, Input, Match, MatchKind, PatternSet}, + }, +}; + +/// A trait that represents a single meta strategy. Its main utility is in +/// providing a way to do dynamic dispatch over a few choices. +/// +/// Why dynamic dispatch? I actually don't have a super compelling reason, and +/// importantly, I have not benchmarked it with the main alternative: an enum. +/// I went with dynamic dispatch initially because the regex engine search code +/// really can't be inlined into caller code in most cases because it's just +/// too big. In other words, it is already expected that every regex search +/// will entail at least the cost of a function call. +/// +/// I do wonder whether using enums would result in better codegen overall +/// though. It's a worthwhile experiment to try. Probably the most interesting +/// benchmark to run in such a case would be one with a high match count. That +/// is, a benchmark to test the overall latency of a search call. +pub(super) trait Strategy: + Debug + Send + Sync + RefUnwindSafe + UnwindSafe + 'static +{ + fn group_info(&self) -> &GroupInfo; + + fn create_cache(&self) -> Cache; + + fn reset_cache(&self, cache: &mut Cache); + + fn is_accelerated(&self) -> bool; + + fn memory_usage(&self) -> usize; + + fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option; + + fn search_half( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Option; + + fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool; + + fn search_slots( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Option; + + fn which_overlapping_matches( + &self, + cache: &mut Cache, + input: &Input<'_>, + patset: &mut PatternSet, + ); +} + +pub(super) fn new( + info: &RegexInfo, + hirs: &[&Hir], +) -> Result, BuildError> { + // At this point, we're committed to a regex engine of some kind. So pull + // out a prefilter if we can, which will feed to each of the constituent + // regex engines. + let pre = if info.is_always_anchored_start() { + // PERF: I'm not sure we necessarily want to do this... We may want to + // run a prefilter for quickly rejecting in some cases. The problem + // is that anchored searches overlap quite a bit with the use case + // of "run a regex on every line to extract data." In that case, the + // regex always matches, so running a prefilter doesn't really help us + // there. The main place where a prefilter helps in an anchored search + // is if the anchored search is not expected to match frequently. That + // is, the prefilter gives us a way to possibly reject a haystack very + // quickly. + // + // Maybe we should do use a prefilter, but only for longer haystacks? + // Or maybe we should only use a prefilter when we think it's "fast"? + // + // Interestingly, I think we currently lack the infrastructure for + // disabling a prefilter based on haystack length. That would probably + // need to be a new 'Input' option. (Interestingly, an 'Input' used to + // carry a 'Prefilter' with it, but I moved away from that.) + debug!("skipping literal extraction since regex is anchored"); + None + } else if let Some(pre) = info.config().get_prefilter() { + debug!( + "skipping literal extraction since the caller provided a prefilter" + ); + Some(pre.clone()) + } else if info.config().get_auto_prefilter() { + let kind = info.config().get_match_kind(); + let prefixes = crate::util::prefilter::prefixes(kind, hirs); + // If we can build a full `Strategy` from just the extracted prefixes, + // then we can short-circuit and avoid building a regex engine at all. + if let Some(pre) = Pre::from_prefixes(info, &prefixes) { + debug!( + "found that the regex can be broken down to a literal \ + search, avoiding the regex engine entirely", + ); + return Ok(pre); + } + // This now attempts another short-circuit of the regex engine: if we + // have a huge alternation of just plain literals, then we can just use + // Aho-Corasick for that and avoid the regex engine entirely. + // + // You might think this case would just be handled by + // `Pre::from_prefixes`, but that technique relies on heuristic literal + // extraction from the corresponding `Hir`. That works, but part of + // heuristics limit the size and number of literals returned. This case + // will specifically handle patterns with very large alternations. + // + // One wonders if we should just roll this our heuristic literal + // extraction, and then I think this case could disappear entirely. + if let Some(pre) = Pre::from_alternation_literals(info, hirs) { + debug!( + "found plain alternation of literals, \ + avoiding regex engine entirely and using Aho-Corasick" + ); + return Ok(pre); + } + prefixes.literals().and_then(|strings| { + debug!( + "creating prefilter from {} literals: {:?}", + strings.len(), + strings, + ); + Prefilter::new(kind, strings) + }) + } else { + debug!("skipping literal extraction since prefilters were disabled"); + None + }; + let mut core = Core::new(info.clone(), pre.clone(), hirs)?; + // Now that we have our core regex engines built, there are a few cases + // where we can do a little bit better than just a normal "search forward + // and maybe use a prefilter when in a start state." However, these cases + // may not always work or otherwise build on top of the Core searcher. + // For example, the reverse anchored optimization seems like it might + // always work, but only the DFAs support reverse searching and the DFAs + // might give up or quit for reasons. If we had, e.g., a PikeVM that + // supported reverse searching, then we could avoid building a full Core + // engine for this case. + core = match ReverseAnchored::new(core) { + Err(core) => core, + Ok(ra) => { + debug!("using reverse anchored strategy"); + return Ok(Arc::new(ra)); + } + }; + core = match ReverseSuffix::new(core, hirs) { + Err(core) => core, + Ok(rs) => { + debug!("using reverse suffix strategy"); + return Ok(Arc::new(rs)); + } + }; + core = match ReverseInner::new(core, hirs) { + Err(core) => core, + Ok(ri) => { + debug!("using reverse inner strategy"); + return Ok(Arc::new(ri)); + } + }; + debug!("using core strategy"); + Ok(Arc::new(core)) +} + +#[derive(Clone, Debug)] +struct Pre

{ + pre: P, + group_info: GroupInfo, +} + +impl Pre

{ + fn new(pre: P) -> Arc { + // The only thing we support when we use prefilters directly as a + // strategy is the start and end of the overall match for a single + // pattern. In other words, exactly one implicit capturing group. Which + // is exactly what we use here for a GroupInfo. + let group_info = GroupInfo::new([[None::<&str>]]).unwrap(); + Arc::new(Pre { pre, group_info }) + } +} + +// This is a little weird, but we don't actually care about the type parameter +// here because we're selecting which underlying prefilter to use. So we just +// define it on an arbitrary type. +impl Pre<()> { + /// Given a sequence of prefixes, attempt to return a full `Strategy` using + /// just the prefixes. + /// + /// Basically, this occurs when the prefixes given not just prefixes, + /// but an enumeration of the entire language matched by the regular + /// expression. + /// + /// A number of other conditions need to be true too. For example, there + /// can be only one pattern, the number of explicit capture groups is 0, no + /// look-around assertions and so on. + /// + /// Note that this ignores `Config::get_auto_prefilter` because if this + /// returns something, then it isn't a prefilter but a matcher itself. + /// Therefore, it shouldn't suffer from the problems typical to prefilters + /// (such as a high false positive rate). + fn from_prefixes( + info: &RegexInfo, + prefixes: &literal::Seq, + ) -> Option> { + let kind = info.config().get_match_kind(); + // Check to see if our prefixes are exact, which means we might be + // able to bypass the regex engine entirely and just rely on literal + // searches. + if !prefixes.is_exact() { + return None; + } + // We also require that we have a single regex pattern. Namely, + // we reuse the prefilter infrastructure to implement search and + // prefilters only report spans. Prefilters don't know about pattern + // IDs. The multi-regex case isn't a lost cause, we might still use + // Aho-Corasick and we might still just use a regular prefilter, but + // that's done below. + if info.pattern_len() != 1 { + return None; + } + // We can't have any capture groups either. The literal engines don't + // know how to deal with things like '(foo)(bar)'. In that case, a + // prefilter will just be used and then the regex engine will resolve + // the capture groups. + if info.props()[0].explicit_captures_len() != 0 { + return None; + } + // We also require that it has zero look-around assertions. Namely, + // literal extraction treats look-around assertions as if they match + // *every* empty string. But of course, that isn't true. So for + // example, 'foo\bquux' never matches anything, but 'fooquux' is + // extracted from that as an exact literal. Such cases should just run + // the regex engine. 'fooquux' will be used as a normal prefilter, and + // then the regex engine will try to look for an actual match. + if !info.props()[0].look_set().is_empty() { + return None; + } + // Finally, currently, our prefilters are all oriented around + // leftmost-first match semantics, so don't try to use them if the + // caller asked for anything else. + if kind != MatchKind::LeftmostFirst { + return None; + } + // The above seems like a lot of requirements to meet, but it applies + // to a lot of cases. 'foo', '[abc][123]' and 'foo|bar|quux' all meet + // the above criteria, for example. + // + // Note that this is effectively a latency optimization. If we didn't + // do this, then the extracted literals would still get bundled into + // a prefilter, and every regex engine capable of running unanchored + // searches supports prefilters. So this optimization merely sidesteps + // having to run the regex engine at all to confirm the match. Thus, it + // decreases the latency of a match. + + // OK because we know the set is exact and thus finite. + let prefixes = prefixes.literals().unwrap(); + debug!( + "trying to bypass regex engine by creating \ + prefilter from {} literals: {:?}", + prefixes.len(), + prefixes, + ); + let choice = match prefilter::Choice::new(kind, prefixes) { + Some(choice) => choice, + None => { + debug!( + "regex bypass failed because no prefilter could be built" + ); + return None; + } + }; + let strat: Arc = match choice { + prefilter::Choice::Memchr(pre) => Pre::new(pre), + prefilter::Choice::Memchr2(pre) => Pre::new(pre), + prefilter::Choice::Memchr3(pre) => Pre::new(pre), + prefilter::Choice::Memmem(pre) => Pre::new(pre), + prefilter::Choice::Teddy(pre) => Pre::new(pre), + prefilter::Choice::ByteSet(pre) => Pre::new(pre), + prefilter::Choice::AhoCorasick(pre) => Pre::new(pre), + }; + Some(strat) + } + + /// Attempts to extract an alternation of literals, and if it's deemed + /// worth doing, returns an Aho-Corasick prefilter as a strategy. + /// + /// And currently, this only returns something when 'hirs.len() == 1'. This + /// could in theory do something if there are multiple HIRs where all of + /// them are alternation of literals, but I haven't had the time to go down + /// that path yet. + fn from_alternation_literals( + info: &RegexInfo, + hirs: &[&Hir], + ) -> Option> { + use crate::util::prefilter::AhoCorasick; + + let lits = crate::meta::literal::alternation_literals(info, hirs)?; + let ac = AhoCorasick::new(MatchKind::LeftmostFirst, &lits)?; + Some(Pre::new(ac)) + } +} + +// This implements Strategy for anything that implements PrefilterI. +// +// Note that this must only be used for regexes of length 1. Multi-regexes +// don't work here. The prefilter interface only provides the span of a match +// and not the pattern ID. (I did consider making it more expressive, but I +// couldn't figure out how to tie everything together elegantly.) Thus, so long +// as the regex only contains one pattern, we can simply assume that a match +// corresponds to PatternID::ZERO. And indeed, that's what we do here. +// +// In practice, since this impl is used to report matches directly and thus +// completely bypasses the regex engine, we only wind up using this under the +// following restrictions: +// +// * There must be only one pattern. As explained above. +// * The literal sequence must be finite and only contain exact literals. +// * There must not be any look-around assertions. If there are, the literals +// extracted might be exact, but a match doesn't necessarily imply an overall +// match. As a trivial example, 'foo\bbar' does not match 'foobar'. +// * The pattern must not have any explicit capturing groups. If it does, the +// caller might expect them to be resolved. e.g., 'foo(bar)'. +// +// So when all of those things are true, we use a prefilter directly as a +// strategy. +// +// In the case where the number of patterns is more than 1, we don't use this +// but do use a special Aho-Corasick strategy if all of the regexes are just +// simple literals or alternations of literals. (We also use the Aho-Corasick +// strategy when len(patterns)==1 if the number of literals is large. In that +// case, literal extraction gives up and will return an infinite set.) +impl Strategy for Pre

{ + #[cfg_attr(feature = "perf-inline", inline(always))] + fn group_info(&self) -> &GroupInfo { + &self.group_info + } + + fn create_cache(&self) -> Cache { + Cache { + capmatches: Captures::all(self.group_info().clone()), + pikevm: wrappers::PikeVMCache::none(), + backtrack: wrappers::BoundedBacktrackerCache::none(), + onepass: wrappers::OnePassCache::none(), + hybrid: wrappers::HybridCache::none(), + revhybrid: wrappers::ReverseHybridCache::none(), + } + } + + fn reset_cache(&self, _cache: &mut Cache) {} + + fn is_accelerated(&self) -> bool { + self.pre.is_fast() + } + + fn memory_usage(&self) -> usize { + self.pre.memory_usage() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn search(&self, _cache: &mut Cache, input: &Input<'_>) -> Option { + if input.is_done() { + return None; + } + if input.get_anchored().is_anchored() { + return self + .pre + .prefix(input.haystack(), input.get_span()) + .map(|sp| Match::new(PatternID::ZERO, sp)); + } + self.pre + .find(input.haystack(), input.get_span()) + .map(|sp| Match::new(PatternID::ZERO, sp)) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn search_half( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Option { + self.search(cache, input).map(|m| HalfMatch::new(m.pattern(), m.end())) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool { + self.search(cache, input).is_some() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn search_slots( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Option { + let m = self.search(cache, input)?; + if let Some(slot) = slots.get_mut(0) { + *slot = NonMaxUsize::new(m.start()); + } + if let Some(slot) = slots.get_mut(1) { + *slot = NonMaxUsize::new(m.end()); + } + Some(m.pattern()) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn which_overlapping_matches( + &self, + cache: &mut Cache, + input: &Input<'_>, + patset: &mut PatternSet, + ) { + if self.search(cache, input).is_some() { + patset.insert(PatternID::ZERO); + } + } +} + +#[derive(Debug)] +struct Core { + info: RegexInfo, + pre: Option, + nfa: NFA, + nfarev: Option, + pikevm: wrappers::PikeVM, + backtrack: wrappers::BoundedBacktracker, + onepass: wrappers::OnePass, + hybrid: wrappers::Hybrid, + dfa: wrappers::DFA, +} + +impl Core { + fn new( + info: RegexInfo, + pre: Option, + hirs: &[&Hir], + ) -> Result { + let mut lookm = LookMatcher::new(); + lookm.set_line_terminator(info.config().get_line_terminator()); + let thompson_config = thompson::Config::new() + .utf8(info.config().get_utf8_empty()) + .nfa_size_limit(info.config().get_nfa_size_limit()) + .shrink(false) + .which_captures(info.config().get_which_captures()) + .look_matcher(lookm); + let nfa = thompson::Compiler::new() + .configure(thompson_config.clone()) + .build_many_from_hir(hirs) + .map_err(BuildError::nfa)?; + // It's possible for the PikeVM or the BB to fail to build, even though + // at this point, we already have a full NFA in hand. They can fail + // when a Unicode word boundary is used but where Unicode word boundary + // support is disabled at compile time, thus making it impossible to + // match. (Construction can also fail if the NFA was compiled without + // captures, but we always enable that above.) + let pikevm = wrappers::PikeVM::new(&info, pre.clone(), &nfa)?; + let backtrack = + wrappers::BoundedBacktracker::new(&info, pre.clone(), &nfa)?; + // The onepass engine can of course fail to build, but we expect it to + // fail in many cases because it is an optimization that doesn't apply + // to all regexes. The 'OnePass' wrapper encapsulates this failure (and + // logs a message if it occurs). + let onepass = wrappers::OnePass::new(&info, &nfa); + // We try to encapsulate whether a particular regex engine should be + // used within each respective wrapper, but the DFAs need a reverse NFA + // to build itself, and we really do not want to build a reverse NFA if + // we know we aren't going to use the lazy DFA. So we do a config check + // up front, which is in practice the only way we won't try to use the + // DFA. + let (nfarev, hybrid, dfa) = + if !info.config().get_hybrid() && !info.config().get_dfa() { + (None, wrappers::Hybrid::none(), wrappers::DFA::none()) + } else { + // FIXME: Technically, we don't quite yet KNOW that we need + // a reverse NFA. It's possible for the DFAs below to both + // fail to build just based on the forward NFA. In which case, + // building the reverse NFA was totally wasted work. But... + // fixing this requires breaking DFA construction apart into + // two pieces: one for the forward part and another for the + // reverse part. Quite annoying. Making it worse, when building + // both DFAs fails, it's quite likely that the NFA is large and + // that it will take quite some time to build the reverse NFA + // too. So... it's really probably worth it to do this! + let nfarev = thompson::Compiler::new() + // Currently, reverse NFAs don't support capturing groups, + // so we MUST disable them. But even if we didn't have to, + // we would, because nothing in this crate does anything + // useful with capturing groups in reverse. And of course, + // the lazy DFA ignores capturing groups in all cases. + .configure( + thompson_config + .clone() + .which_captures(WhichCaptures::None) + .reverse(true), + ) + .build_many_from_hir(hirs) + .map_err(BuildError::nfa)?; + let dfa = if !info.config().get_dfa() { + wrappers::DFA::none() + } else { + wrappers::DFA::new(&info, pre.clone(), &nfa, &nfarev) + }; + let hybrid = if !info.config().get_hybrid() { + wrappers::Hybrid::none() + } else if dfa.is_some() { + debug!("skipping lazy DFA because we have a full DFA"); + wrappers::Hybrid::none() + } else { + wrappers::Hybrid::new(&info, pre.clone(), &nfa, &nfarev) + }; + (Some(nfarev), hybrid, dfa) + }; + Ok(Core { + info, + pre, + nfa, + nfarev, + pikevm, + backtrack, + onepass, + hybrid, + dfa, + }) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn try_search_mayfail( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Option, RetryFailError>> { + if let Some(e) = self.dfa.get(input) { + trace!("using full DFA for search at {:?}", input.get_span()); + Some(e.try_search(input)) + } else if let Some(e) = self.hybrid.get(input) { + trace!("using lazy DFA for search at {:?}", input.get_span()); + Some(e.try_search(&mut cache.hybrid, input)) + } else { + None + } + } + + fn search_nofail( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Option { + let caps = &mut cache.capmatches; + caps.set_pattern(None); + // We manually inline 'try_search_slots_nofail' here because we need to + // borrow from 'cache.capmatches' in this method, but if we do, then + // we can't pass 'cache' wholesale to to 'try_slots_no_hybrid'. It's a + // classic example of how the borrow checker inhibits decomposition. + // There are of course work-arounds (more types and/or interior + // mutability), but that's more annoying than this IMO. + let pid = if let Some(ref e) = self.onepass.get(input) { + trace!("using OnePass for search at {:?}", input.get_span()); + e.search_slots(&mut cache.onepass, input, caps.slots_mut()) + } else if let Some(ref e) = self.backtrack.get(input) { + trace!( + "using BoundedBacktracker for search at {:?}", + input.get_span() + ); + e.search_slots(&mut cache.backtrack, input, caps.slots_mut()) + } else { + trace!("using PikeVM for search at {:?}", input.get_span()); + let e = self.pikevm.get(); + e.search_slots(&mut cache.pikevm, input, caps.slots_mut()) + }; + caps.set_pattern(pid); + caps.get_match() + } + + fn search_half_nofail( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Option { + // Only the lazy/full DFA returns half-matches, since the DFA requires + // a reverse scan to find the start position. These fallback regex + // engines can find the start and end in a single pass, so we just do + // that and throw away the start offset to conform to the API. + let m = self.search_nofail(cache, input)?; + Some(HalfMatch::new(m.pattern(), m.end())) + } + + fn search_slots_nofail( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Option { + if let Some(ref e) = self.onepass.get(input) { + trace!( + "using OnePass for capture search at {:?}", + input.get_span() + ); + e.search_slots(&mut cache.onepass, input, slots) + } else if let Some(ref e) = self.backtrack.get(input) { + trace!( + "using BoundedBacktracker for capture search at {:?}", + input.get_span() + ); + e.search_slots(&mut cache.backtrack, input, slots) + } else { + trace!( + "using PikeVM for capture search at {:?}", + input.get_span() + ); + let e = self.pikevm.get(); + e.search_slots(&mut cache.pikevm, input, slots) + } + } + + fn is_match_nofail(&self, cache: &mut Cache, input: &Input<'_>) -> bool { + if let Some(ref e) = self.onepass.get(input) { + trace!( + "using OnePass for is-match search at {:?}", + input.get_span() + ); + e.search_slots(&mut cache.onepass, input, &mut []).is_some() + } else if let Some(ref e) = self.backtrack.get(input) { + trace!( + "using BoundedBacktracker for is-match search at {:?}", + input.get_span() + ); + e.is_match(&mut cache.backtrack, input) + } else { + trace!( + "using PikeVM for is-match search at {:?}", + input.get_span() + ); + let e = self.pikevm.get(); + e.is_match(&mut cache.pikevm, input) + } + } + + fn is_capture_search_needed(&self, slots_len: usize) -> bool { + slots_len > self.nfa.group_info().implicit_slot_len() + } +} + +impl Strategy for Core { + #[cfg_attr(feature = "perf-inline", inline(always))] + fn group_info(&self) -> &GroupInfo { + self.nfa.group_info() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn create_cache(&self) -> Cache { + Cache { + capmatches: Captures::all(self.group_info().clone()), + pikevm: self.pikevm.create_cache(), + backtrack: self.backtrack.create_cache(), + onepass: self.onepass.create_cache(), + hybrid: self.hybrid.create_cache(), + revhybrid: wrappers::ReverseHybridCache::none(), + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn reset_cache(&self, cache: &mut Cache) { + cache.pikevm.reset(&self.pikevm); + cache.backtrack.reset(&self.backtrack); + cache.onepass.reset(&self.onepass); + cache.hybrid.reset(&self.hybrid); + } + + fn is_accelerated(&self) -> bool { + self.pre.as_ref().map_or(false, |pre| pre.is_fast()) + } + + fn memory_usage(&self) -> usize { + self.info.memory_usage() + + self.pre.as_ref().map_or(0, |pre| pre.memory_usage()) + + self.nfa.memory_usage() + + self.nfarev.as_ref().map_or(0, |nfa| nfa.memory_usage()) + + self.onepass.memory_usage() + + self.dfa.memory_usage() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option { + // We manually inline try_search_mayfail here because letting the + // compiler do it seems to produce pretty crappy codegen. + return if let Some(e) = self.dfa.get(input) { + trace!("using full DFA for full search at {:?}", input.get_span()); + match e.try_search(input) { + Ok(x) => x, + Err(_err) => { + trace!("full DFA search failed: {_err}"); + self.search_nofail(cache, input) + } + } + } else if let Some(e) = self.hybrid.get(input) { + trace!("using lazy DFA for full search at {:?}", input.get_span()); + match e.try_search(&mut cache.hybrid, input) { + Ok(x) => x, + Err(_err) => { + trace!("lazy DFA search failed: {_err}"); + self.search_nofail(cache, input) + } + } + } else { + self.search_nofail(cache, input) + }; + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn search_half( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Option { + // The main difference with 'search' is that if we're using a DFA, we + // can use a single forward scan without needing to run the reverse + // DFA. + if let Some(e) = self.dfa.get(input) { + trace!("using full DFA for half search at {:?}", input.get_span()); + match e.try_search_half_fwd(input) { + Ok(x) => x, + Err(_err) => { + trace!("full DFA half search failed: {_err}"); + self.search_half_nofail(cache, input) + } + } + } else if let Some(e) = self.hybrid.get(input) { + trace!("using lazy DFA for half search at {:?}", input.get_span()); + match e.try_search_half_fwd(&mut cache.hybrid, input) { + Ok(x) => x, + Err(_err) => { + trace!("lazy DFA half search failed: {_err}"); + self.search_half_nofail(cache, input) + } + } + } else { + self.search_half_nofail(cache, input) + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool { + if let Some(e) = self.dfa.get(input) { + trace!( + "using full DFA for is-match search at {:?}", + input.get_span() + ); + match e.try_search_half_fwd(input) { + Ok(x) => x.is_some(), + Err(_err) => { + trace!("full DFA half search failed: {_err}"); + self.is_match_nofail(cache, input) + } + } + } else if let Some(e) = self.hybrid.get(input) { + trace!( + "using lazy DFA for is-match search at {:?}", + input.get_span() + ); + match e.try_search_half_fwd(&mut cache.hybrid, input) { + Ok(x) => x.is_some(), + Err(_err) => { + trace!("lazy DFA half search failed: {_err}"); + self.is_match_nofail(cache, input) + } + } + } else { + self.is_match_nofail(cache, input) + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn search_slots( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Option { + // Even if the regex has explicit capture groups, if the caller didn't + // provide any explicit slots, then it doesn't make sense to try and do + // extra work to get offsets for those slots. Ideally the caller should + // realize this and not call this routine in the first place, but alas, + // we try to save the caller from themselves if they do. + if !self.is_capture_search_needed(slots.len()) { + trace!("asked for slots unnecessarily, trying fast path"); + let m = self.search(cache, input)?; + copy_match_to_slots(m, slots); + return Some(m.pattern()); + } + // If the onepass DFA is available for this search (which only happens + // when it's anchored), then skip running a fallible DFA. The onepass + // DFA isn't as fast as a full or lazy DFA, but it is typically quite + // a bit faster than the backtracker or the PikeVM. So it isn't as + // advantageous to try and do a full/lazy DFA scan first. + // + // We still theorize that it's better to do a full/lazy DFA scan, even + // when it's anchored, because it's usually much faster and permits us + // to say "no match" much more quickly. This does hurt the case of, + // say, parsing each line in a log file into capture groups, because + // in that case, the line always matches. So the lazy DFA scan is + // usually just wasted work. But, the lazy DFA is usually quite fast + // and doesn't cost too much here. + if self.onepass.get(&input).is_some() { + return self.search_slots_nofail(cache, &input, slots); + } + let m = match self.try_search_mayfail(cache, input) { + Some(Ok(Some(m))) => m, + Some(Ok(None)) => return None, + Some(Err(_err)) => { + trace!("fast capture search failed: {_err}"); + return self.search_slots_nofail(cache, input, slots); + } + None => { + return self.search_slots_nofail(cache, input, slots); + } + }; + // At this point, now that we've found the bounds of the + // match, we need to re-run something that can resolve + // capturing groups. But we only need to run on it on the + // match bounds and not the entire haystack. + trace!( + "match found at {}..{} in capture search, \ + using another engine to find captures", + m.start(), + m.end(), + ); + let input = input + .clone() + .span(m.start()..m.end()) + .anchored(Anchored::Pattern(m.pattern())); + Some( + self.search_slots_nofail(cache, &input, slots) + .expect("should find a match"), + ) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn which_overlapping_matches( + &self, + cache: &mut Cache, + input: &Input<'_>, + patset: &mut PatternSet, + ) { + if let Some(e) = self.dfa.get(input) { + trace!( + "using full DFA for overlapping search at {:?}", + input.get_span() + ); + let _err = match e.try_which_overlapping_matches(input, patset) { + Ok(()) => return, + Err(err) => err, + }; + trace!("fast overlapping search failed: {_err}"); + } else if let Some(e) = self.hybrid.get(input) { + trace!( + "using lazy DFA for overlapping search at {:?}", + input.get_span() + ); + let _err = match e.try_which_overlapping_matches( + &mut cache.hybrid, + input, + patset, + ) { + Ok(()) => { + return; + } + Err(err) => err, + }; + trace!("fast overlapping search failed: {_err}"); + } + trace!( + "using PikeVM for overlapping search at {:?}", + input.get_span() + ); + let e = self.pikevm.get(); + e.which_overlapping_matches(&mut cache.pikevm, input, patset) + } +} + +#[derive(Debug)] +struct ReverseAnchored { + core: Core, +} + +impl ReverseAnchored { + fn new(core: Core) -> Result { + if !core.info.is_always_anchored_end() { + debug!( + "skipping reverse anchored optimization because \ + the regex is not always anchored at the end" + ); + return Err(core); + } + // Note that the caller can still request an anchored search even when + // the regex isn't anchored at the start. We detect that case in the + // search routines below and just fallback to the core engine. This + // is fine because both searches are anchored. It's just a matter of + // picking one. Falling back to the core engine is a little simpler, + // since if we used the reverse anchored approach, we'd have to add an + // extra check to ensure the match reported starts at the place where + // the caller requested the search to start. + if core.info.is_always_anchored_start() { + debug!( + "skipping reverse anchored optimization because \ + the regex is also anchored at the start" + ); + return Err(core); + } + // Only DFAs can do reverse searches (currently), so we need one of + // them in order to do this optimization. It's possible (although + // pretty unlikely) that we have neither and need to give up. + if !core.hybrid.is_some() && !core.dfa.is_some() { + debug!( + "skipping reverse anchored optimization because \ + we don't have a lazy DFA or a full DFA" + ); + return Err(core); + } + Ok(ReverseAnchored { core }) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn try_search_half_anchored_rev( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Result, RetryFailError> { + // We of course always want an anchored search. In theory, the + // underlying regex engines should automatically enable anchored + // searches since the regex is itself anchored, but this more clearly + // expresses intent and is always correct. + let input = input.clone().anchored(Anchored::Yes); + if let Some(e) = self.core.dfa.get(&input) { + trace!( + "using full DFA for reverse anchored search at {:?}", + input.get_span() + ); + e.try_search_half_rev(&input) + } else if let Some(e) = self.core.hybrid.get(&input) { + trace!( + "using lazy DFA for reverse anchored search at {:?}", + input.get_span() + ); + e.try_search_half_rev(&mut cache.hybrid, &input) + } else { + unreachable!("ReverseAnchored always has a DFA") + } + } +} + +// Note that in this impl, we don't check that 'input.end() == +// input.haystack().len()'. In particular, when that condition is false, a +// match is always impossible because we know that the regex is always anchored +// at the end (or else 'ReverseAnchored' won't be built). We don't check that +// here because the 'Regex' wrapper actually does that for us in all cases. +// Thus, in this impl, we can actually assume that the end position in 'input' +// is equivalent to the length of the haystack. +impl Strategy for ReverseAnchored { + #[cfg_attr(feature = "perf-inline", inline(always))] + fn group_info(&self) -> &GroupInfo { + self.core.group_info() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn create_cache(&self) -> Cache { + self.core.create_cache() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn reset_cache(&self, cache: &mut Cache) { + self.core.reset_cache(cache); + } + + fn is_accelerated(&self) -> bool { + // Since this is anchored at the end, a reverse anchored search is + // almost certainly guaranteed to result in a much faster search than + // a standard forward search. + true + } + + fn memory_usage(&self) -> usize { + self.core.memory_usage() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option { + if input.get_anchored().is_anchored() { + return self.core.search(cache, input); + } + match self.try_search_half_anchored_rev(cache, input) { + Err(_err) => { + trace!("fast reverse anchored search failed: {_err}"); + self.core.search_nofail(cache, input) + } + Ok(None) => None, + Ok(Some(hm)) => { + Some(Match::new(hm.pattern(), hm.offset()..input.end())) + } + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn search_half( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Option { + if input.get_anchored().is_anchored() { + return self.core.search_half(cache, input); + } + match self.try_search_half_anchored_rev(cache, input) { + Err(_err) => { + trace!("fast reverse anchored search failed: {_err}"); + self.core.search_half_nofail(cache, input) + } + Ok(None) => None, + Ok(Some(hm)) => { + // Careful here! 'try_search_half' is a *forward* search that + // only cares about the *end* position of a match. But + // 'hm.offset()' is actually the start of the match. So we + // actually just throw that away here and, since we know we + // have a match, return the only possible position at which a + // match can occur: input.end(). + Some(HalfMatch::new(hm.pattern(), input.end())) + } + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool { + if input.get_anchored().is_anchored() { + return self.core.is_match(cache, input); + } + match self.try_search_half_anchored_rev(cache, input) { + Err(_err) => { + trace!("fast reverse anchored search failed: {_err}"); + self.core.is_match_nofail(cache, input) + } + Ok(None) => false, + Ok(Some(_)) => true, + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn search_slots( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Option { + if input.get_anchored().is_anchored() { + return self.core.search_slots(cache, input, slots); + } + match self.try_search_half_anchored_rev(cache, input) { + Err(_err) => { + trace!("fast reverse anchored search failed: {_err}"); + self.core.search_slots_nofail(cache, input, slots) + } + Ok(None) => None, + Ok(Some(hm)) => { + if !self.core.is_capture_search_needed(slots.len()) { + trace!("asked for slots unnecessarily, skipping captures"); + let m = Match::new(hm.pattern(), hm.offset()..input.end()); + copy_match_to_slots(m, slots); + return Some(m.pattern()); + } + let start = hm.offset(); + let input = input + .clone() + .span(start..input.end()) + .anchored(Anchored::Pattern(hm.pattern())); + self.core.search_slots_nofail(cache, &input, slots) + } + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn which_overlapping_matches( + &self, + cache: &mut Cache, + input: &Input<'_>, + patset: &mut PatternSet, + ) { + // It seems like this could probably benefit from a reverse anchored + // optimization, perhaps by doing an overlapping reverse search (which + // the DFAs do support). I haven't given it much thought though, and + // I'm currently focus more on the single pattern case. + self.core.which_overlapping_matches(cache, input, patset) + } +} + +#[derive(Debug)] +struct ReverseSuffix { + core: Core, + pre: Prefilter, +} + +impl ReverseSuffix { + fn new(core: Core, hirs: &[&Hir]) -> Result { + if !core.info.config().get_auto_prefilter() { + debug!( + "skipping reverse suffix optimization because \ + automatic prefilters are disabled" + ); + return Err(core); + } + // Like the reverse inner optimization, we don't do this for regexes + // that are always anchored. It could lead to scanning too much, but + // could say "no match" much more quickly than running the regex + // engine if the initial literal scan doesn't match. With that said, + // the reverse suffix optimization has lower overhead, since it only + // requires a reverse scan after a literal match to confirm or reject + // the match. (Although, in the case of confirmation, it then needs to + // do another forward scan to find the end position.) + // + // Note that the caller can still request an anchored search even + // when the regex isn't anchored. We detect that case in the search + // routines below and just fallback to the core engine. Currently this + // optimization assumes all searches are unanchored, so if we do want + // to enable this optimization for anchored searches, it will need a + // little work to support it. + if core.info.is_always_anchored_start() { + debug!( + "skipping reverse suffix optimization because \ + the regex is always anchored at the start", + ); + return Err(core); + } + // Only DFAs can do reverse searches (currently), so we need one of + // them in order to do this optimization. It's possible (although + // pretty unlikely) that we have neither and need to give up. + if !core.hybrid.is_some() && !core.dfa.is_some() { + debug!( + "skipping reverse suffix optimization because \ + we don't have a lazy DFA or a full DFA" + ); + return Err(core); + } + if core.pre.as_ref().map_or(false, |p| p.is_fast()) { + debug!( + "skipping reverse suffix optimization because \ + we already have a prefilter that we think is fast" + ); + return Err(core); + } + let kind = core.info.config().get_match_kind(); + let suffixes = crate::util::prefilter::suffixes(kind, hirs); + let lcs = match suffixes.longest_common_suffix() { + None => { + debug!( + "skipping reverse suffix optimization because \ + a longest common suffix could not be found", + ); + return Err(core); + } + Some(lcs) if lcs.is_empty() => { + debug!( + "skipping reverse suffix optimization because \ + the longest common suffix is the empty string", + ); + return Err(core); + } + Some(lcs) => lcs, + }; + let pre = match Prefilter::new(kind, &[lcs]) { + Some(pre) => pre, + None => { + debug!( + "skipping reverse suffix optimization because \ + a prefilter could not be constructed from the \ + longest common suffix", + ); + return Err(core); + } + }; + if !pre.is_fast() { + debug!( + "skipping reverse suffix optimization because \ + while we have a suffix prefilter, it is not \ + believed to be 'fast'" + ); + return Err(core); + } + Ok(ReverseSuffix { core, pre }) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn try_search_half_start( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Result, RetryError> { + let mut span = input.get_span(); + let mut min_start = 0; + loop { + let litmatch = match self.pre.find(input.haystack(), span) { + None => return Ok(None), + Some(span) => span, + }; + trace!("reverse suffix scan found suffix match at {litmatch:?}"); + let revinput = input + .clone() + .anchored(Anchored::Yes) + .span(input.start()..litmatch.end); + match self + .try_search_half_rev_limited(cache, &revinput, min_start)? + { + None => { + if span.start >= span.end { + break; + } + span.start = litmatch.start.checked_add(1).unwrap(); + } + Some(hm) => return Ok(Some(hm)), + } + min_start = litmatch.end; + } + Ok(None) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn try_search_half_fwd( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Result, RetryFailError> { + if let Some(e) = self.core.dfa.get(&input) { + trace!( + "using full DFA for forward reverse suffix search at {:?}", + input.get_span() + ); + e.try_search_half_fwd(&input) + } else if let Some(e) = self.core.hybrid.get(&input) { + trace!( + "using lazy DFA for forward reverse suffix search at {:?}", + input.get_span() + ); + e.try_search_half_fwd(&mut cache.hybrid, &input) + } else { + unreachable!("ReverseSuffix always has a DFA") + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn try_search_half_rev_limited( + &self, + cache: &mut Cache, + input: &Input<'_>, + min_start: usize, + ) -> Result, RetryError> { + if let Some(e) = self.core.dfa.get(&input) { + trace!( + "using full DFA for reverse suffix search at {:?}, \ + but will be stopped at {} to avoid quadratic behavior", + input.get_span(), + min_start, + ); + e.try_search_half_rev_limited(&input, min_start) + } else if let Some(e) = self.core.hybrid.get(&input) { + trace!( + "using lazy DFA for reverse suffix search at {:?}, \ + but will be stopped at {} to avoid quadratic behavior", + input.get_span(), + min_start, + ); + e.try_search_half_rev_limited(&mut cache.hybrid, &input, min_start) + } else { + unreachable!("ReverseSuffix always has a DFA") + } + } +} + +impl Strategy for ReverseSuffix { + #[cfg_attr(feature = "perf-inline", inline(always))] + fn group_info(&self) -> &GroupInfo { + self.core.group_info() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn create_cache(&self) -> Cache { + self.core.create_cache() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn reset_cache(&self, cache: &mut Cache) { + self.core.reset_cache(cache); + } + + fn is_accelerated(&self) -> bool { + self.pre.is_fast() + } + + fn memory_usage(&self) -> usize { + self.core.memory_usage() + self.pre.memory_usage() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option { + if input.get_anchored().is_anchored() { + return self.core.search(cache, input); + } + match self.try_search_half_start(cache, input) { + Err(RetryError::Quadratic(_err)) => { + trace!("reverse suffix optimization failed: {_err}"); + self.core.search(cache, input) + } + Err(RetryError::Fail(_err)) => { + trace!("reverse suffix reverse fast search failed: {_err}"); + self.core.search_nofail(cache, input) + } + Ok(None) => None, + Ok(Some(hm_start)) => { + let fwdinput = input + .clone() + .anchored(Anchored::Pattern(hm_start.pattern())) + .span(hm_start.offset()..input.end()); + match self.try_search_half_fwd(cache, &fwdinput) { + Err(_err) => { + trace!( + "reverse suffix forward fast search failed: {_err}" + ); + self.core.search_nofail(cache, input) + } + Ok(None) => { + unreachable!( + "suffix match plus reverse match implies \ + there must be a match", + ) + } + Ok(Some(hm_end)) => Some(Match::new( + hm_start.pattern(), + hm_start.offset()..hm_end.offset(), + )), + } + } + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn search_half( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Option { + if input.get_anchored().is_anchored() { + return self.core.search_half(cache, input); + } + match self.try_search_half_start(cache, input) { + Err(RetryError::Quadratic(_err)) => { + trace!("reverse suffix half optimization failed: {_err}"); + self.core.search_half(cache, input) + } + Err(RetryError::Fail(_err)) => { + trace!( + "reverse suffix reverse fast half search failed: {_err}" + ); + self.core.search_half_nofail(cache, input) + } + Ok(None) => None, + Ok(Some(hm_start)) => { + // This is a bit subtle. It is tempting to just stop searching + // at this point and return a half-match with an offset + // corresponding to where the suffix was found. But the suffix + // match does not necessarily correspond to the end of the + // proper leftmost-first match. Consider /[a-z]+ing/ against + // 'tingling'. The first suffix match is the first 'ing', and + // the /[a-z]+/ matches the 't'. So if we stopped here, then + // we'd report 'ting' as the match. But 'tingling' is the + // correct match because of greediness. + let fwdinput = input + .clone() + .anchored(Anchored::Pattern(hm_start.pattern())) + .span(hm_start.offset()..input.end()); + match self.try_search_half_fwd(cache, &fwdinput) { + Err(_err) => { + trace!( + "reverse suffix forward fast search failed: {_err}" + ); + self.core.search_half_nofail(cache, input) + } + Ok(None) => { + unreachable!( + "suffix match plus reverse match implies \ + there must be a match", + ) + } + Ok(Some(hm_end)) => Some(hm_end), + } + } + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool { + if input.get_anchored().is_anchored() { + return self.core.is_match(cache, input); + } + match self.try_search_half_start(cache, input) { + Err(RetryError::Quadratic(_err)) => { + trace!("reverse suffix half optimization failed: {_err}"); + self.core.is_match_nofail(cache, input) + } + Err(RetryError::Fail(_err)) => { + trace!( + "reverse suffix reverse fast half search failed: {_err}" + ); + self.core.is_match_nofail(cache, input) + } + Ok(None) => false, + Ok(Some(_)) => true, + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn search_slots( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Option { + if input.get_anchored().is_anchored() { + return self.core.search_slots(cache, input, slots); + } + if !self.core.is_capture_search_needed(slots.len()) { + trace!("asked for slots unnecessarily, trying fast path"); + let m = self.search(cache, input)?; + copy_match_to_slots(m, slots); + return Some(m.pattern()); + } + let hm_start = match self.try_search_half_start(cache, input) { + Err(RetryError::Quadratic(_err)) => { + trace!("reverse suffix captures optimization failed: {_err}"); + return self.core.search_slots(cache, input, slots); + } + Err(RetryError::Fail(_err)) => { + trace!( + "reverse suffix reverse fast captures search failed: \ + {_err}" + ); + return self.core.search_slots_nofail(cache, input, slots); + } + Ok(None) => return None, + Ok(Some(hm_start)) => hm_start, + }; + trace!( + "match found at {}..{} in capture search, \ + using another engine to find captures", + hm_start.offset(), + input.end(), + ); + let start = hm_start.offset(); + let input = input + .clone() + .span(start..input.end()) + .anchored(Anchored::Pattern(hm_start.pattern())); + self.core.search_slots_nofail(cache, &input, slots) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn which_overlapping_matches( + &self, + cache: &mut Cache, + input: &Input<'_>, + patset: &mut PatternSet, + ) { + self.core.which_overlapping_matches(cache, input, patset) + } +} + +#[derive(Debug)] +struct ReverseInner { + core: Core, + preinner: Prefilter, + nfarev: NFA, + hybrid: wrappers::ReverseHybrid, + dfa: wrappers::ReverseDFA, +} + +impl ReverseInner { + fn new(core: Core, hirs: &[&Hir]) -> Result { + if !core.info.config().get_auto_prefilter() { + debug!( + "skipping reverse inner optimization because \ + automatic prefilters are disabled" + ); + return Err(core); + } + // Currently we hard-code the assumption of leftmost-first match + // semantics. This isn't a huge deal because 'all' semantics tend to + // only be used for forward overlapping searches with multiple regexes, + // and this optimization only supports a single pattern at the moment. + if core.info.config().get_match_kind() != MatchKind::LeftmostFirst { + debug!( + "skipping reverse inner optimization because \ + match kind is {:?} but this only supports leftmost-first", + core.info.config().get_match_kind(), + ); + return Err(core); + } + // It's likely that a reverse inner scan has too much overhead for it + // to be worth it when the regex is anchored at the start. It is + // possible for it to be quite a bit faster if the initial literal + // scan fails to detect a match, in which case, we can say "no match" + // very quickly. But this could be undesirable, e.g., scanning too far + // or when the literal scan matches. If it matches, then confirming the + // match requires a reverse scan followed by a forward scan to confirm + // or reject, which is a fair bit of work. + // + // Note that the caller can still request an anchored search even + // when the regex isn't anchored. We detect that case in the search + // routines below and just fallback to the core engine. Currently this + // optimization assumes all searches are unanchored, so if we do want + // to enable this optimization for anchored searches, it will need a + // little work to support it. + if core.info.is_always_anchored_start() { + debug!( + "skipping reverse inner optimization because \ + the regex is always anchored at the start", + ); + return Err(core); + } + // Only DFAs can do reverse searches (currently), so we need one of + // them in order to do this optimization. It's possible (although + // pretty unlikely) that we have neither and need to give up. + if !core.hybrid.is_some() && !core.dfa.is_some() { + debug!( + "skipping reverse inner optimization because \ + we don't have a lazy DFA or a full DFA" + ); + return Err(core); + } + if core.pre.as_ref().map_or(false, |p| p.is_fast()) { + debug!( + "skipping reverse inner optimization because \ + we already have a prefilter that we think is fast" + ); + return Err(core); + } else if core.pre.is_some() { + debug!( + "core engine has a prefix prefilter, but it is \ + probably not fast, so continuing with attempt to \ + use reverse inner prefilter" + ); + } + let (concat_prefix, preinner) = match reverse_inner::extract(hirs) { + Some(x) => x, + // N.B. the 'extract' function emits debug messages explaining + // why we bailed out here. + None => return Err(core), + }; + debug!("building reverse NFA for prefix before inner literal"); + let mut lookm = LookMatcher::new(); + lookm.set_line_terminator(core.info.config().get_line_terminator()); + let thompson_config = thompson::Config::new() + .reverse(true) + .utf8(core.info.config().get_utf8_empty()) + .nfa_size_limit(core.info.config().get_nfa_size_limit()) + .shrink(false) + .which_captures(WhichCaptures::None) + .look_matcher(lookm); + let result = thompson::Compiler::new() + .configure(thompson_config) + .build_from_hir(&concat_prefix); + let nfarev = match result { + Ok(nfarev) => nfarev, + Err(_err) => { + debug!( + "skipping reverse inner optimization because the \ + reverse NFA failed to build: {}", + _err, + ); + return Err(core); + } + }; + debug!("building reverse DFA for prefix before inner literal"); + let dfa = if !core.info.config().get_dfa() { + wrappers::ReverseDFA::none() + } else { + wrappers::ReverseDFA::new(&core.info, &nfarev) + }; + let hybrid = if !core.info.config().get_hybrid() { + wrappers::ReverseHybrid::none() + } else if dfa.is_some() { + debug!( + "skipping lazy DFA for reverse inner optimization \ + because we have a full DFA" + ); + wrappers::ReverseHybrid::none() + } else { + wrappers::ReverseHybrid::new(&core.info, &nfarev) + }; + Ok(ReverseInner { core, preinner, nfarev, hybrid, dfa }) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn try_search_full( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Result, RetryError> { + let mut span = input.get_span(); + let mut min_match_start = 0; + let mut min_pre_start = 0; + loop { + let litmatch = match self.preinner.find(input.haystack(), span) { + None => return Ok(None), + Some(span) => span, + }; + if litmatch.start < min_pre_start { + trace!( + "found inner prefilter match at {litmatch:?}, which starts \ + before the end of the last forward scan at {min_pre_start}, \ + quitting to avoid quadratic behavior", + ); + return Err(RetryError::Quadratic(RetryQuadraticError::new())); + } + trace!("reverse inner scan found inner match at {litmatch:?}"); + let revinput = input + .clone() + .anchored(Anchored::Yes) + .span(input.start()..litmatch.start); + // Note that in addition to the literal search above scanning past + // our minimum start point, this routine can also return an error + // as a result of detecting possible quadratic behavior if the + // reverse scan goes past the minimum start point. That is, the + // literal search might not, but the reverse regex search for the + // prefix might! + match self.try_search_half_rev_limited( + cache, + &revinput, + min_match_start, + )? { + None => { + if span.start >= span.end { + break; + } + span.start = litmatch.start.checked_add(1).unwrap(); + } + Some(hm_start) => { + let fwdinput = input + .clone() + .anchored(Anchored::Pattern(hm_start.pattern())) + .span(hm_start.offset()..input.end()); + match self.try_search_half_fwd_stopat(cache, &fwdinput)? { + Err(stopat) => { + min_pre_start = stopat; + span.start = + litmatch.start.checked_add(1).unwrap(); + } + Ok(hm_end) => { + return Ok(Some(Match::new( + hm_start.pattern(), + hm_start.offset()..hm_end.offset(), + ))) + } + } + } + } + min_match_start = litmatch.end; + } + Ok(None) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn try_search_half_fwd_stopat( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Result, RetryFailError> { + if let Some(e) = self.core.dfa.get(&input) { + trace!( + "using full DFA for forward reverse inner search at {:?}", + input.get_span() + ); + e.try_search_half_fwd_stopat(&input) + } else if let Some(e) = self.core.hybrid.get(&input) { + trace!( + "using lazy DFA for forward reverse inner search at {:?}", + input.get_span() + ); + e.try_search_half_fwd_stopat(&mut cache.hybrid, &input) + } else { + unreachable!("ReverseInner always has a DFA") + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn try_search_half_rev_limited( + &self, + cache: &mut Cache, + input: &Input<'_>, + min_start: usize, + ) -> Result, RetryError> { + if let Some(e) = self.dfa.get(&input) { + trace!( + "using full DFA for reverse inner search at {:?}, \ + but will be stopped at {} to avoid quadratic behavior", + input.get_span(), + min_start, + ); + e.try_search_half_rev_limited(&input, min_start) + } else if let Some(e) = self.hybrid.get(&input) { + trace!( + "using lazy DFA for reverse inner search at {:?}, \ + but will be stopped at {} to avoid quadratic behavior", + input.get_span(), + min_start, + ); + e.try_search_half_rev_limited( + &mut cache.revhybrid, + &input, + min_start, + ) + } else { + unreachable!("ReverseInner always has a DFA") + } + } +} + +impl Strategy for ReverseInner { + #[cfg_attr(feature = "perf-inline", inline(always))] + fn group_info(&self) -> &GroupInfo { + self.core.group_info() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn create_cache(&self) -> Cache { + let mut cache = self.core.create_cache(); + cache.revhybrid = self.hybrid.create_cache(); + cache + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn reset_cache(&self, cache: &mut Cache) { + self.core.reset_cache(cache); + cache.revhybrid.reset(&self.hybrid); + } + + fn is_accelerated(&self) -> bool { + self.preinner.is_fast() + } + + fn memory_usage(&self) -> usize { + self.core.memory_usage() + + self.preinner.memory_usage() + + self.nfarev.memory_usage() + + self.dfa.memory_usage() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option { + if input.get_anchored().is_anchored() { + return self.core.search(cache, input); + } + match self.try_search_full(cache, input) { + Err(RetryError::Quadratic(_err)) => { + trace!("reverse inner optimization failed: {_err}"); + self.core.search(cache, input) + } + Err(RetryError::Fail(_err)) => { + trace!("reverse inner fast search failed: {_err}"); + self.core.search_nofail(cache, input) + } + Ok(matornot) => matornot, + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn search_half( + &self, + cache: &mut Cache, + input: &Input<'_>, + ) -> Option { + if input.get_anchored().is_anchored() { + return self.core.search_half(cache, input); + } + match self.try_search_full(cache, input) { + Err(RetryError::Quadratic(_err)) => { + trace!("reverse inner half optimization failed: {_err}"); + self.core.search_half(cache, input) + } + Err(RetryError::Fail(_err)) => { + trace!("reverse inner fast half search failed: {_err}"); + self.core.search_half_nofail(cache, input) + } + Ok(None) => None, + Ok(Some(m)) => Some(HalfMatch::new(m.pattern(), m.end())), + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool { + if input.get_anchored().is_anchored() { + return self.core.is_match(cache, input); + } + match self.try_search_full(cache, input) { + Err(RetryError::Quadratic(_err)) => { + trace!("reverse inner half optimization failed: {_err}"); + self.core.is_match_nofail(cache, input) + } + Err(RetryError::Fail(_err)) => { + trace!("reverse inner fast half search failed: {_err}"); + self.core.is_match_nofail(cache, input) + } + Ok(None) => false, + Ok(Some(_)) => true, + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn search_slots( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Option { + if input.get_anchored().is_anchored() { + return self.core.search_slots(cache, input, slots); + } + if !self.core.is_capture_search_needed(slots.len()) { + trace!("asked for slots unnecessarily, trying fast path"); + let m = self.search(cache, input)?; + copy_match_to_slots(m, slots); + return Some(m.pattern()); + } + let m = match self.try_search_full(cache, input) { + Err(RetryError::Quadratic(_err)) => { + trace!("reverse inner captures optimization failed: {_err}"); + return self.core.search_slots(cache, input, slots); + } + Err(RetryError::Fail(_err)) => { + trace!("reverse inner fast captures search failed: {_err}"); + return self.core.search_slots_nofail(cache, input, slots); + } + Ok(None) => return None, + Ok(Some(m)) => m, + }; + trace!( + "match found at {}..{} in capture search, \ + using another engine to find captures", + m.start(), + m.end(), + ); + let input = input + .clone() + .span(m.start()..m.end()) + .anchored(Anchored::Pattern(m.pattern())); + self.core.search_slots_nofail(cache, &input, slots) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn which_overlapping_matches( + &self, + cache: &mut Cache, + input: &Input<'_>, + patset: &mut PatternSet, + ) { + self.core.which_overlapping_matches(cache, input, patset) + } +} + +/// Copies the offsets in the given match to the corresponding positions in +/// `slots`. +/// +/// In effect, this sets the slots corresponding to the implicit group for the +/// pattern in the given match. If the indices for the corresponding slots do +/// not exist, then no slots are set. +/// +/// This is useful when the caller provides slots (or captures), but you use a +/// regex engine that doesn't operate on slots (like a lazy DFA). This function +/// lets you map the match you get back to the slots provided by the caller. +#[cfg_attr(feature = "perf-inline", inline(always))] +fn copy_match_to_slots(m: Match, slots: &mut [Option]) { + let slot_start = m.pattern().as_usize() * 2; + let slot_end = slot_start + 1; + if let Some(slot) = slots.get_mut(slot_start) { + *slot = NonMaxUsize::new(m.start()); + } + if let Some(slot) = slots.get_mut(slot_end) { + *slot = NonMaxUsize::new(m.end()); + } +} diff --git a/vendor/regex-automata/src/meta/wrappers.rs b/vendor/regex-automata/src/meta/wrappers.rs new file mode 100644 index 00000000000000..6651cb90761874 --- /dev/null +++ b/vendor/regex-automata/src/meta/wrappers.rs @@ -0,0 +1,1336 @@ +/*! +This module contains a boat load of wrappers around each of our internal regex +engines. They encapsulate a few things: + +1. The wrappers manage the conditional existence of the regex engine. Namely, +the PikeVM is the only required regex engine. The rest are optional. These +wrappers present a uniform API regardless of which engines are available. And +availability might be determined by compile time features or by dynamic +configuration via `meta::Config`. Encapsulating the conditional compilation +features is in particular a huge simplification for the higher level code that +composes these engines. +2. The wrappers manage construction of each engine, including skipping it if +the engine is unavailable or configured to not be used. +3. The wrappers manage whether an engine *can* be used for a particular +search configuration. For example, `BoundedBacktracker::get` only returns a +backtracking engine when the haystack is bigger than the maximum supported +length. The wrappers also sometimes take a position on when an engine *ought* +to be used, but only in cases where the logic is extremely local to the engine +itself. Otherwise, things like "choose between the backtracker and the one-pass +DFA" are managed by the higher level meta strategy code. + +There are also corresponding wrappers for the various `Cache` types for each +regex engine that needs them. If an engine is unavailable or not used, then a +cache for it will *not* actually be allocated. +*/ + +use alloc::vec::Vec; + +use crate::{ + meta::{ + error::{BuildError, RetryError, RetryFailError}, + regex::RegexInfo, + }, + nfa::thompson::{pikevm, NFA}, + util::{prefilter::Prefilter, primitives::NonMaxUsize}, + HalfMatch, Input, Match, MatchKind, PatternID, PatternSet, +}; + +#[cfg(feature = "dfa-build")] +use crate::dfa; +#[cfg(feature = "dfa-onepass")] +use crate::dfa::onepass; +#[cfg(feature = "hybrid")] +use crate::hybrid; +#[cfg(feature = "nfa-backtrack")] +use crate::nfa::thompson::backtrack; + +#[derive(Debug)] +pub(crate) struct PikeVM(PikeVMEngine); + +impl PikeVM { + pub(crate) fn new( + info: &RegexInfo, + pre: Option, + nfa: &NFA, + ) -> Result { + PikeVMEngine::new(info, pre, nfa).map(PikeVM) + } + + pub(crate) fn create_cache(&self) -> PikeVMCache { + PikeVMCache::none() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn get(&self) -> &PikeVMEngine { + &self.0 + } +} + +#[derive(Debug)] +pub(crate) struct PikeVMEngine(pikevm::PikeVM); + +impl PikeVMEngine { + pub(crate) fn new( + info: &RegexInfo, + pre: Option, + nfa: &NFA, + ) -> Result { + let pikevm_config = pikevm::Config::new() + .match_kind(info.config().get_match_kind()) + .prefilter(pre); + let engine = pikevm::Builder::new() + .configure(pikevm_config) + .build_from_nfa(nfa.clone()) + .map_err(BuildError::nfa)?; + debug!("PikeVM built"); + Ok(PikeVMEngine(engine)) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn is_match( + &self, + cache: &mut PikeVMCache, + input: &Input<'_>, + ) -> bool { + self.0.is_match(cache.get(&self.0), input.clone()) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn search_slots( + &self, + cache: &mut PikeVMCache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Option { + self.0.search_slots(cache.get(&self.0), input, slots) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn which_overlapping_matches( + &self, + cache: &mut PikeVMCache, + input: &Input<'_>, + patset: &mut PatternSet, + ) { + self.0.which_overlapping_matches(cache.get(&self.0), input, patset) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct PikeVMCache(Option); + +impl PikeVMCache { + pub(crate) fn none() -> PikeVMCache { + PikeVMCache(None) + } + + pub(crate) fn reset(&mut self, builder: &PikeVM) { + self.get(&builder.get().0).reset(&builder.get().0); + } + + pub(crate) fn memory_usage(&self) -> usize { + self.0.as_ref().map_or(0, |c| c.memory_usage()) + } + + fn get(&mut self, vm: &pikevm::PikeVM) -> &mut pikevm::Cache { + self.0.get_or_insert_with(|| vm.create_cache()) + } +} + +#[derive(Debug)] +pub(crate) struct BoundedBacktracker(Option); + +impl BoundedBacktracker { + pub(crate) fn new( + info: &RegexInfo, + pre: Option, + nfa: &NFA, + ) -> Result { + BoundedBacktrackerEngine::new(info, pre, nfa).map(BoundedBacktracker) + } + + pub(crate) fn create_cache(&self) -> BoundedBacktrackerCache { + BoundedBacktrackerCache::none() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn get( + &self, + input: &Input<'_>, + ) -> Option<&BoundedBacktrackerEngine> { + let engine = self.0.as_ref()?; + // It is difficult to make the backtracker give up early if it is + // guaranteed to eventually wind up in a match state. This is because + // of the greedy nature of a backtracker: it just blindly mushes + // forward. Every other regex engine is able to give up more quickly, + // so even if the backtracker might be able to zip through faster than + // (say) the PikeVM, we prefer the theoretical benefit that some other + // engine might be able to scan much less of the haystack than the + // backtracker. + // + // Now, if the haystack is really short already, then we allow the + // backtracker to run. (This hasn't been litigated quantitatively with + // benchmarks. Just a hunch.) + if input.get_earliest() && input.haystack().len() > 128 { + return None; + } + // If the backtracker is just going to return an error because the + // haystack is too long, then obviously do not use it. + if input.get_span().len() > engine.max_haystack_len() { + return None; + } + Some(engine) + } +} + +#[derive(Debug)] +pub(crate) struct BoundedBacktrackerEngine( + #[cfg(feature = "nfa-backtrack")] backtrack::BoundedBacktracker, + #[cfg(not(feature = "nfa-backtrack"))] (), +); + +impl BoundedBacktrackerEngine { + pub(crate) fn new( + info: &RegexInfo, + pre: Option, + nfa: &NFA, + ) -> Result, BuildError> { + #[cfg(feature = "nfa-backtrack")] + { + if !info.config().get_backtrack() + || info.config().get_match_kind() != MatchKind::LeftmostFirst + { + return Ok(None); + } + let backtrack_config = backtrack::Config::new().prefilter(pre); + let engine = backtrack::Builder::new() + .configure(backtrack_config) + .build_from_nfa(nfa.clone()) + .map_err(BuildError::nfa)?; + debug!( + "BoundedBacktracker built (max haystack length: {:?})", + engine.max_haystack_len() + ); + Ok(Some(BoundedBacktrackerEngine(engine))) + } + #[cfg(not(feature = "nfa-backtrack"))] + { + Ok(None) + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn is_match( + &self, + cache: &mut BoundedBacktrackerCache, + input: &Input<'_>, + ) -> bool { + #[cfg(feature = "nfa-backtrack")] + { + // OK because we only permit access to this engine when we know + // the haystack is short enough for the backtracker to run without + // reporting an error. + self.0.try_is_match(cache.get(&self.0), input.clone()).unwrap() + } + #[cfg(not(feature = "nfa-backtrack"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn search_slots( + &self, + cache: &mut BoundedBacktrackerCache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Option { + #[cfg(feature = "nfa-backtrack")] + { + // OK because we only permit access to this engine when we know + // the haystack is short enough for the backtracker to run without + // reporting an error. + self.0.try_search_slots(cache.get(&self.0), input, slots).unwrap() + } + #[cfg(not(feature = "nfa-backtrack"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn max_haystack_len(&self) -> usize { + #[cfg(feature = "nfa-backtrack")] + { + self.0.max_haystack_len() + } + #[cfg(not(feature = "nfa-backtrack"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } +} + +#[derive(Clone, Debug)] +pub(crate) struct BoundedBacktrackerCache( + #[cfg(feature = "nfa-backtrack")] Option, + #[cfg(not(feature = "nfa-backtrack"))] (), +); + +impl BoundedBacktrackerCache { + pub(crate) fn none() -> BoundedBacktrackerCache { + #[cfg(feature = "nfa-backtrack")] + { + BoundedBacktrackerCache(None) + } + #[cfg(not(feature = "nfa-backtrack"))] + { + BoundedBacktrackerCache(()) + } + } + + pub(crate) fn reset(&mut self, builder: &BoundedBacktracker) { + #[cfg(feature = "nfa-backtrack")] + if let Some(ref e) = builder.0 { + self.get(&e.0).reset(&e.0); + } + } + + pub(crate) fn memory_usage(&self) -> usize { + #[cfg(feature = "nfa-backtrack")] + { + self.0.as_ref().map_or(0, |c| c.memory_usage()) + } + #[cfg(not(feature = "nfa-backtrack"))] + { + 0 + } + } + + #[cfg(feature = "nfa-backtrack")] + fn get( + &mut self, + bb: &backtrack::BoundedBacktracker, + ) -> &mut backtrack::Cache { + self.0.get_or_insert_with(|| bb.create_cache()) + } +} + +#[derive(Debug)] +pub(crate) struct OnePass(Option); + +impl OnePass { + pub(crate) fn new(info: &RegexInfo, nfa: &NFA) -> OnePass { + OnePass(OnePassEngine::new(info, nfa)) + } + + pub(crate) fn create_cache(&self) -> OnePassCache { + OnePassCache::new(self) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn get(&self, input: &Input<'_>) -> Option<&OnePassEngine> { + let engine = self.0.as_ref()?; + if !input.get_anchored().is_anchored() + && !engine.get_nfa().is_always_start_anchored() + { + return None; + } + Some(engine) + } + + pub(crate) fn memory_usage(&self) -> usize { + self.0.as_ref().map_or(0, |e| e.memory_usage()) + } +} + +#[derive(Debug)] +pub(crate) struct OnePassEngine( + #[cfg(feature = "dfa-onepass")] onepass::DFA, + #[cfg(not(feature = "dfa-onepass"))] (), +); + +impl OnePassEngine { + pub(crate) fn new(info: &RegexInfo, nfa: &NFA) -> Option { + #[cfg(feature = "dfa-onepass")] + { + if !info.config().get_onepass() { + return None; + } + // In order to even attempt building a one-pass DFA, we require + // that we either have at least one explicit capturing group or + // there's a Unicode word boundary somewhere. If we don't have + // either of these things, then the lazy DFA will almost certainly + // be usable and be much faster. The only case where it might + // not is if the lazy DFA isn't utilizing its cache effectively, + // but in those cases, the underlying regex is almost certainly + // not one-pass or is too big to fit within the current one-pass + // implementation limits. + if info.props_union().explicit_captures_len() == 0 + && !info.props_union().look_set().contains_word_unicode() + { + debug!("not building OnePass because it isn't worth it"); + return None; + } + let onepass_config = onepass::Config::new() + .match_kind(info.config().get_match_kind()) + // Like for the lazy DFA, we unconditionally enable this + // because it doesn't cost much and makes the API more + // flexible. + .starts_for_each_pattern(true) + .byte_classes(info.config().get_byte_classes()) + .size_limit(info.config().get_onepass_size_limit()); + let result = onepass::Builder::new() + .configure(onepass_config) + .build_from_nfa(nfa.clone()); + let engine = match result { + Ok(engine) => engine, + Err(_err) => { + debug!("OnePass failed to build: {_err}"); + return None; + } + }; + debug!("OnePass built, {} bytes", engine.memory_usage()); + Some(OnePassEngine(engine)) + } + #[cfg(not(feature = "dfa-onepass"))] + { + None + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn search_slots( + &self, + cache: &mut OnePassCache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Option { + #[cfg(feature = "dfa-onepass")] + { + // OK because we only permit getting a OnePassEngine when we know + // the search is anchored and thus an error cannot occur. + self.0 + .try_search_slots(cache.0.as_mut().unwrap(), input, slots) + .unwrap() + } + #[cfg(not(feature = "dfa-onepass"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + pub(crate) fn memory_usage(&self) -> usize { + #[cfg(feature = "dfa-onepass")] + { + self.0.memory_usage() + } + #[cfg(not(feature = "dfa-onepass"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn get_nfa(&self) -> &NFA { + #[cfg(feature = "dfa-onepass")] + { + self.0.get_nfa() + } + #[cfg(not(feature = "dfa-onepass"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } +} + +#[derive(Clone, Debug)] +pub(crate) struct OnePassCache( + #[cfg(feature = "dfa-onepass")] Option, + #[cfg(not(feature = "dfa-onepass"))] (), +); + +impl OnePassCache { + pub(crate) fn none() -> OnePassCache { + #[cfg(feature = "dfa-onepass")] + { + OnePassCache(None) + } + #[cfg(not(feature = "dfa-onepass"))] + { + OnePassCache(()) + } + } + + pub(crate) fn new(builder: &OnePass) -> OnePassCache { + #[cfg(feature = "dfa-onepass")] + { + OnePassCache(builder.0.as_ref().map(|e| e.0.create_cache())) + } + #[cfg(not(feature = "dfa-onepass"))] + { + OnePassCache(()) + } + } + + pub(crate) fn reset(&mut self, builder: &OnePass) { + #[cfg(feature = "dfa-onepass")] + if let Some(ref e) = builder.0 { + self.0.as_mut().unwrap().reset(&e.0); + } + } + + pub(crate) fn memory_usage(&self) -> usize { + #[cfg(feature = "dfa-onepass")] + { + self.0.as_ref().map_or(0, |c| c.memory_usage()) + } + #[cfg(not(feature = "dfa-onepass"))] + { + 0 + } + } +} + +#[derive(Debug)] +pub(crate) struct Hybrid(Option); + +impl Hybrid { + pub(crate) fn none() -> Hybrid { + Hybrid(None) + } + + pub(crate) fn new( + info: &RegexInfo, + pre: Option, + nfa: &NFA, + nfarev: &NFA, + ) -> Hybrid { + Hybrid(HybridEngine::new(info, pre, nfa, nfarev)) + } + + pub(crate) fn create_cache(&self) -> HybridCache { + HybridCache::new(self) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn get(&self, _input: &Input<'_>) -> Option<&HybridEngine> { + let engine = self.0.as_ref()?; + Some(engine) + } + + pub(crate) fn is_some(&self) -> bool { + self.0.is_some() + } +} + +#[derive(Debug)] +pub(crate) struct HybridEngine( + #[cfg(feature = "hybrid")] hybrid::regex::Regex, + #[cfg(not(feature = "hybrid"))] (), +); + +impl HybridEngine { + pub(crate) fn new( + info: &RegexInfo, + pre: Option, + nfa: &NFA, + nfarev: &NFA, + ) -> Option { + #[cfg(feature = "hybrid")] + { + if !info.config().get_hybrid() { + return None; + } + let dfa_config = hybrid::dfa::Config::new() + .match_kind(info.config().get_match_kind()) + .prefilter(pre.clone()) + // Enabling this is necessary for ensuring we can service any + // kind of 'Input' search without error. For the lazy DFA, + // this is not particularly costly, since the start states are + // generated lazily. + .starts_for_each_pattern(true) + .byte_classes(info.config().get_byte_classes()) + .unicode_word_boundary(true) + .specialize_start_states(pre.is_some()) + .cache_capacity(info.config().get_hybrid_cache_capacity()) + // This makes it possible for building a lazy DFA to + // fail even though the NFA has already been built. Namely, + // if the cache capacity is too small to fit some minimum + // number of states (which is small, like 4 or 5), then the + // DFA will refuse to build. + // + // We shouldn't enable this to make building always work, since + // this could cause the allocation of a cache bigger than the + // provided capacity amount. + // + // This is effectively the only reason why building a lazy DFA + // could fail. If it does, then we simply suppress the error + // and return None. + .skip_cache_capacity_check(false) + // This and enabling heuristic Unicode word boundary support + // above make it so the lazy DFA can quit at match time. + .minimum_cache_clear_count(Some(3)) + .minimum_bytes_per_state(Some(10)); + let result = hybrid::dfa::Builder::new() + .configure(dfa_config.clone()) + .build_from_nfa(nfa.clone()); + let fwd = match result { + Ok(fwd) => fwd, + Err(_err) => { + debug!("forward lazy DFA failed to build: {_err}"); + return None; + } + }; + let result = hybrid::dfa::Builder::new() + .configure( + dfa_config + .clone() + .match_kind(MatchKind::All) + .prefilter(None) + .specialize_start_states(false), + ) + .build_from_nfa(nfarev.clone()); + let rev = match result { + Ok(rev) => rev, + Err(_err) => { + debug!("reverse lazy DFA failed to build: {_err}"); + return None; + } + }; + let engine = + hybrid::regex::Builder::new().build_from_dfas(fwd, rev); + debug!("lazy DFA built"); + Some(HybridEngine(engine)) + } + #[cfg(not(feature = "hybrid"))] + { + None + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn try_search( + &self, + cache: &mut HybridCache, + input: &Input<'_>, + ) -> Result, RetryFailError> { + #[cfg(feature = "hybrid")] + { + let cache = cache.0.as_mut().unwrap(); + self.0.try_search(cache, input).map_err(|e| e.into()) + } + #[cfg(not(feature = "hybrid"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn try_search_half_fwd( + &self, + cache: &mut HybridCache, + input: &Input<'_>, + ) -> Result, RetryFailError> { + #[cfg(feature = "hybrid")] + { + let fwd = self.0.forward(); + let mut fwdcache = cache.0.as_mut().unwrap().as_parts_mut().0; + fwd.try_search_fwd(&mut fwdcache, input).map_err(|e| e.into()) + } + #[cfg(not(feature = "hybrid"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn try_search_half_fwd_stopat( + &self, + cache: &mut HybridCache, + input: &Input<'_>, + ) -> Result, RetryFailError> { + #[cfg(feature = "hybrid")] + { + let dfa = self.0.forward(); + let mut cache = cache.0.as_mut().unwrap().as_parts_mut().0; + crate::meta::stopat::hybrid_try_search_half_fwd( + dfa, &mut cache, input, + ) + } + #[cfg(not(feature = "hybrid"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn try_search_half_rev( + &self, + cache: &mut HybridCache, + input: &Input<'_>, + ) -> Result, RetryFailError> { + #[cfg(feature = "hybrid")] + { + let rev = self.0.reverse(); + let mut revcache = cache.0.as_mut().unwrap().as_parts_mut().1; + rev.try_search_rev(&mut revcache, input).map_err(|e| e.into()) + } + #[cfg(not(feature = "hybrid"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn try_search_half_rev_limited( + &self, + cache: &mut HybridCache, + input: &Input<'_>, + min_start: usize, + ) -> Result, RetryError> { + #[cfg(feature = "hybrid")] + { + let dfa = self.0.reverse(); + let mut cache = cache.0.as_mut().unwrap().as_parts_mut().1; + crate::meta::limited::hybrid_try_search_half_rev( + dfa, &mut cache, input, min_start, + ) + } + #[cfg(not(feature = "hybrid"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + #[inline] + pub(crate) fn try_which_overlapping_matches( + &self, + cache: &mut HybridCache, + input: &Input<'_>, + patset: &mut PatternSet, + ) -> Result<(), RetryFailError> { + #[cfg(feature = "hybrid")] + { + let fwd = self.0.forward(); + let mut fwdcache = cache.0.as_mut().unwrap().as_parts_mut().0; + fwd.try_which_overlapping_matches(&mut fwdcache, input, patset) + .map_err(|e| e.into()) + } + #[cfg(not(feature = "hybrid"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } +} + +#[derive(Clone, Debug)] +pub(crate) struct HybridCache( + #[cfg(feature = "hybrid")] Option, + #[cfg(not(feature = "hybrid"))] (), +); + +impl HybridCache { + pub(crate) fn none() -> HybridCache { + #[cfg(feature = "hybrid")] + { + HybridCache(None) + } + #[cfg(not(feature = "hybrid"))] + { + HybridCache(()) + } + } + + pub(crate) fn new(builder: &Hybrid) -> HybridCache { + #[cfg(feature = "hybrid")] + { + HybridCache(builder.0.as_ref().map(|e| e.0.create_cache())) + } + #[cfg(not(feature = "hybrid"))] + { + HybridCache(()) + } + } + + pub(crate) fn reset(&mut self, builder: &Hybrid) { + #[cfg(feature = "hybrid")] + if let Some(ref e) = builder.0 { + self.0.as_mut().unwrap().reset(&e.0); + } + } + + pub(crate) fn memory_usage(&self) -> usize { + #[cfg(feature = "hybrid")] + { + self.0.as_ref().map_or(0, |c| c.memory_usage()) + } + #[cfg(not(feature = "hybrid"))] + { + 0 + } + } +} + +#[derive(Debug)] +pub(crate) struct DFA(Option); + +impl DFA { + pub(crate) fn none() -> DFA { + DFA(None) + } + + pub(crate) fn new( + info: &RegexInfo, + pre: Option, + nfa: &NFA, + nfarev: &NFA, + ) -> DFA { + DFA(DFAEngine::new(info, pre, nfa, nfarev)) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn get(&self, _input: &Input<'_>) -> Option<&DFAEngine> { + let engine = self.0.as_ref()?; + Some(engine) + } + + pub(crate) fn is_some(&self) -> bool { + self.0.is_some() + } + + pub(crate) fn memory_usage(&self) -> usize { + self.0.as_ref().map_or(0, |e| e.memory_usage()) + } +} + +#[derive(Debug)] +pub(crate) struct DFAEngine( + #[cfg(feature = "dfa-build")] dfa::regex::Regex, + #[cfg(not(feature = "dfa-build"))] (), +); + +impl DFAEngine { + pub(crate) fn new( + info: &RegexInfo, + pre: Option, + nfa: &NFA, + nfarev: &NFA, + ) -> Option { + #[cfg(feature = "dfa-build")] + { + if !info.config().get_dfa() { + return None; + } + // If our NFA is anything but small, don't even bother with a DFA. + if let Some(state_limit) = info.config().get_dfa_state_limit() { + if nfa.states().len() > state_limit { + debug!( + "skipping full DFA because NFA has {} states, \ + which exceeds the heuristic limit of {}", + nfa.states().len(), + state_limit, + ); + return None; + } + } + // We cut the size limit in four because the total heap used by + // DFA construction is determinization aux memory and the DFA + // itself, and those things are configured independently in the + // lower level DFA builder API. And then split that in two because + // of forward and reverse DFAs. + let size_limit = info.config().get_dfa_size_limit().map(|n| n / 4); + let dfa_config = dfa::dense::Config::new() + .match_kind(info.config().get_match_kind()) + .prefilter(pre.clone()) + // Enabling this is necessary for ensuring we can service any + // kind of 'Input' search without error. For the full DFA, this + // can be quite costly. But since we have such a small bound + // on the size of the DFA, in practice, any multi-regexes are + // probably going to blow the limit anyway. + .starts_for_each_pattern(true) + .byte_classes(info.config().get_byte_classes()) + .unicode_word_boundary(true) + .specialize_start_states(pre.is_some()) + .determinize_size_limit(size_limit) + .dfa_size_limit(size_limit); + let result = dfa::dense::Builder::new() + .configure(dfa_config.clone()) + .build_from_nfa(&nfa); + let fwd = match result { + Ok(fwd) => fwd, + Err(_err) => { + debug!("forward full DFA failed to build: {_err}"); + return None; + } + }; + let result = dfa::dense::Builder::new() + .configure( + dfa_config + .clone() + // We never need unanchored reverse searches, so + // there's no point in building it into the DFA, which + // WILL take more space. (This isn't done for the lazy + // DFA because the DFA is, well, lazy. It doesn't pay + // the cost for supporting unanchored searches unless + // you actually do an unanchored search, which we + // don't.) + .start_kind(dfa::StartKind::Anchored) + .match_kind(MatchKind::All) + .prefilter(None) + .specialize_start_states(false), + ) + .build_from_nfa(&nfarev); + let rev = match result { + Ok(rev) => rev, + Err(_err) => { + debug!("reverse full DFA failed to build: {_err}"); + return None; + } + }; + let engine = dfa::regex::Builder::new().build_from_dfas(fwd, rev); + debug!( + "fully compiled forward and reverse DFAs built, {} bytes", + engine.forward().memory_usage() + + engine.reverse().memory_usage(), + ); + Some(DFAEngine(engine)) + } + #[cfg(not(feature = "dfa-build"))] + { + None + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn try_search( + &self, + input: &Input<'_>, + ) -> Result, RetryFailError> { + #[cfg(feature = "dfa-build")] + { + self.0.try_search(input).map_err(|e| e.into()) + } + #[cfg(not(feature = "dfa-build"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn try_search_half_fwd( + &self, + input: &Input<'_>, + ) -> Result, RetryFailError> { + #[cfg(feature = "dfa-build")] + { + use crate::dfa::Automaton; + self.0.forward().try_search_fwd(input).map_err(|e| e.into()) + } + #[cfg(not(feature = "dfa-build"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn try_search_half_fwd_stopat( + &self, + input: &Input<'_>, + ) -> Result, RetryFailError> { + #[cfg(feature = "dfa-build")] + { + let dfa = self.0.forward(); + crate::meta::stopat::dfa_try_search_half_fwd(dfa, input) + } + #[cfg(not(feature = "dfa-build"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn try_search_half_rev( + &self, + input: &Input<'_>, + ) -> Result, RetryFailError> { + #[cfg(feature = "dfa-build")] + { + use crate::dfa::Automaton; + self.0.reverse().try_search_rev(&input).map_err(|e| e.into()) + } + #[cfg(not(feature = "dfa-build"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn try_search_half_rev_limited( + &self, + input: &Input<'_>, + min_start: usize, + ) -> Result, RetryError> { + #[cfg(feature = "dfa-build")] + { + let dfa = self.0.reverse(); + crate::meta::limited::dfa_try_search_half_rev( + dfa, input, min_start, + ) + } + #[cfg(not(feature = "dfa-build"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + #[inline] + pub(crate) fn try_which_overlapping_matches( + &self, + input: &Input<'_>, + patset: &mut PatternSet, + ) -> Result<(), RetryFailError> { + #[cfg(feature = "dfa-build")] + { + use crate::dfa::Automaton; + self.0 + .forward() + .try_which_overlapping_matches(input, patset) + .map_err(|e| e.into()) + } + #[cfg(not(feature = "dfa-build"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + pub(crate) fn memory_usage(&self) -> usize { + #[cfg(feature = "dfa-build")] + { + self.0.forward().memory_usage() + self.0.reverse().memory_usage() + } + #[cfg(not(feature = "dfa-build"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } +} + +#[derive(Debug)] +pub(crate) struct ReverseHybrid(Option); + +impl ReverseHybrid { + pub(crate) fn none() -> ReverseHybrid { + ReverseHybrid(None) + } + + pub(crate) fn new(info: &RegexInfo, nfarev: &NFA) -> ReverseHybrid { + ReverseHybrid(ReverseHybridEngine::new(info, nfarev)) + } + + pub(crate) fn create_cache(&self) -> ReverseHybridCache { + ReverseHybridCache::new(self) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn get( + &self, + _input: &Input<'_>, + ) -> Option<&ReverseHybridEngine> { + let engine = self.0.as_ref()?; + Some(engine) + } +} + +#[derive(Debug)] +pub(crate) struct ReverseHybridEngine( + #[cfg(feature = "hybrid")] hybrid::dfa::DFA, + #[cfg(not(feature = "hybrid"))] (), +); + +impl ReverseHybridEngine { + pub(crate) fn new( + info: &RegexInfo, + nfarev: &NFA, + ) -> Option { + #[cfg(feature = "hybrid")] + { + if !info.config().get_hybrid() { + return None; + } + // Since we only use this for reverse searches, we can hard-code + // a number of things like match semantics, prefilters, starts + // for each pattern and so on. + let dfa_config = hybrid::dfa::Config::new() + .match_kind(MatchKind::All) + .prefilter(None) + .starts_for_each_pattern(false) + .byte_classes(info.config().get_byte_classes()) + .unicode_word_boundary(true) + .specialize_start_states(false) + .cache_capacity(info.config().get_hybrid_cache_capacity()) + .skip_cache_capacity_check(false) + .minimum_cache_clear_count(Some(3)) + .minimum_bytes_per_state(Some(10)); + let result = hybrid::dfa::Builder::new() + .configure(dfa_config) + .build_from_nfa(nfarev.clone()); + let rev = match result { + Ok(rev) => rev, + Err(_err) => { + debug!("lazy reverse DFA failed to build: {_err}"); + return None; + } + }; + debug!("lazy reverse DFA built"); + Some(ReverseHybridEngine(rev)) + } + #[cfg(not(feature = "hybrid"))] + { + None + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn try_search_half_rev_limited( + &self, + cache: &mut ReverseHybridCache, + input: &Input<'_>, + min_start: usize, + ) -> Result, RetryError> { + #[cfg(feature = "hybrid")] + { + let dfa = &self.0; + let mut cache = cache.0.as_mut().unwrap(); + crate::meta::limited::hybrid_try_search_half_rev( + dfa, &mut cache, input, min_start, + ) + } + #[cfg(not(feature = "hybrid"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } +} + +#[derive(Clone, Debug)] +pub(crate) struct ReverseHybridCache( + #[cfg(feature = "hybrid")] Option, + #[cfg(not(feature = "hybrid"))] (), +); + +impl ReverseHybridCache { + pub(crate) fn none() -> ReverseHybridCache { + #[cfg(feature = "hybrid")] + { + ReverseHybridCache(None) + } + #[cfg(not(feature = "hybrid"))] + { + ReverseHybridCache(()) + } + } + + pub(crate) fn new(builder: &ReverseHybrid) -> ReverseHybridCache { + #[cfg(feature = "hybrid")] + { + ReverseHybridCache(builder.0.as_ref().map(|e| e.0.create_cache())) + } + #[cfg(not(feature = "hybrid"))] + { + ReverseHybridCache(()) + } + } + + pub(crate) fn reset(&mut self, builder: &ReverseHybrid) { + #[cfg(feature = "hybrid")] + if let Some(ref e) = builder.0 { + self.0.as_mut().unwrap().reset(&e.0); + } + } + + pub(crate) fn memory_usage(&self) -> usize { + #[cfg(feature = "hybrid")] + { + self.0.as_ref().map_or(0, |c| c.memory_usage()) + } + #[cfg(not(feature = "hybrid"))] + { + 0 + } + } +} + +#[derive(Debug)] +pub(crate) struct ReverseDFA(Option); + +impl ReverseDFA { + pub(crate) fn none() -> ReverseDFA { + ReverseDFA(None) + } + + pub(crate) fn new(info: &RegexInfo, nfarev: &NFA) -> ReverseDFA { + ReverseDFA(ReverseDFAEngine::new(info, nfarev)) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn get(&self, _input: &Input<'_>) -> Option<&ReverseDFAEngine> { + let engine = self.0.as_ref()?; + Some(engine) + } + + pub(crate) fn is_some(&self) -> bool { + self.0.is_some() + } + + pub(crate) fn memory_usage(&self) -> usize { + self.0.as_ref().map_or(0, |e| e.memory_usage()) + } +} + +#[derive(Debug)] +pub(crate) struct ReverseDFAEngine( + #[cfg(feature = "dfa-build")] dfa::dense::DFA>, + #[cfg(not(feature = "dfa-build"))] (), +); + +impl ReverseDFAEngine { + pub(crate) fn new( + info: &RegexInfo, + nfarev: &NFA, + ) -> Option { + #[cfg(feature = "dfa-build")] + { + if !info.config().get_dfa() { + return None; + } + // If our NFA is anything but small, don't even bother with a DFA. + if let Some(state_limit) = info.config().get_dfa_state_limit() { + if nfarev.states().len() > state_limit { + debug!( + "skipping full reverse DFA because NFA has {} states, \ + which exceeds the heuristic limit of {}", + nfarev.states().len(), + state_limit, + ); + return None; + } + } + // We cut the size limit in two because the total heap used by DFA + // construction is determinization aux memory and the DFA itself, + // and those things are configured independently in the lower level + // DFA builder API. + let size_limit = info.config().get_dfa_size_limit().map(|n| n / 2); + // Since we only use this for reverse searches, we can hard-code + // a number of things like match semantics, prefilters, starts + // for each pattern and so on. We also disable acceleration since + // it's incompatible with limited searches (which is the only + // operation we support for this kind of engine at the moment). + let dfa_config = dfa::dense::Config::new() + .match_kind(MatchKind::All) + .prefilter(None) + .accelerate(false) + .start_kind(dfa::StartKind::Anchored) + .starts_for_each_pattern(false) + .byte_classes(info.config().get_byte_classes()) + .unicode_word_boundary(true) + .specialize_start_states(false) + .determinize_size_limit(size_limit) + .dfa_size_limit(size_limit); + let result = dfa::dense::Builder::new() + .configure(dfa_config) + .build_from_nfa(&nfarev); + let rev = match result { + Ok(rev) => rev, + Err(_err) => { + debug!("full reverse DFA failed to build: {_err}"); + return None; + } + }; + debug!( + "fully compiled reverse DFA built, {} bytes", + rev.memory_usage() + ); + Some(ReverseDFAEngine(rev)) + } + #[cfg(not(feature = "dfa-build"))] + { + None + } + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn try_search_half_rev_limited( + &self, + input: &Input<'_>, + min_start: usize, + ) -> Result, RetryError> { + #[cfg(feature = "dfa-build")] + { + let dfa = &self.0; + crate::meta::limited::dfa_try_search_half_rev( + dfa, input, min_start, + ) + } + #[cfg(not(feature = "dfa-build"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } + + pub(crate) fn memory_usage(&self) -> usize { + #[cfg(feature = "dfa-build")] + { + self.0.memory_usage() + } + #[cfg(not(feature = "dfa-build"))] + { + // Impossible to reach because this engine is never constructed + // if the requisite features aren't enabled. + unreachable!() + } + } +} diff --git a/vendor/regex-automata/src/nfa/mod.rs b/vendor/regex-automata/src/nfa/mod.rs new file mode 100644 index 00000000000000..14a0c30bea70ec --- /dev/null +++ b/vendor/regex-automata/src/nfa/mod.rs @@ -0,0 +1,55 @@ +/*! +Provides non-deterministic finite automata (NFA) and regex engines that use +them. + +While NFAs and DFAs (deterministic finite automata) have equivalent *theoretical* +power, their usage in practice tends to result in different engineering trade +offs. While this isn't meant to be a comprehensive treatment of the topic, here +are a few key trade offs that are, at minimum, true for this crate: + +* NFAs tend to be represented sparsely where as DFAs are represented densely. +Sparse representations use less memory, but are slower to traverse. Conversely, +dense representations use more memory, but are faster to traverse. (Sometimes +these lines are blurred. For example, an `NFA` might choose to represent a +particular state in a dense fashion, and a DFA can be built using a sparse +representation via [`sparse::DFA`](crate::dfa::sparse::DFA). +* NFAs have epsilon transitions and DFAs don't. In practice, this means that +handling a single byte in a haystack with an NFA at search time may require +visiting multiple NFA states. In a DFA, each byte only requires visiting +a single state. Stated differently, NFAs require a variable number of CPU +instructions to process one byte in a haystack where as a DFA uses a constant +number of CPU instructions to process one byte. +* NFAs are generally easier to amend with secondary storage. For example, the +[`thompson::pikevm::PikeVM`] uses an NFA to match, but also uses additional +memory beyond the model of a finite state machine to track offsets for matching +capturing groups. Conversely, the most a DFA can do is report the offset (and +pattern ID) at which a match occurred. This is generally why we also compile +DFAs in reverse, so that we can run them after finding the end of a match to +also find the start of a match. +* NFAs take worst case linear time to build, but DFAs take worst case +exponential time to build. The [hybrid NFA/DFA](crate::hybrid) mitigates this +challenge for DFAs in many practical cases. + +There are likely other differences, but the bottom line is that NFAs tend to be +more memory efficient and give easier opportunities for increasing expressive +power, where as DFAs are faster to search with. + +# Why only a Thompson NFA? + +Currently, the only kind of NFA we support in this crate is a [Thompson +NFA](https://en.wikipedia.org/wiki/Thompson%27s_construction). This refers +to a specific construction algorithm that takes the syntax of a regex +pattern and converts it to an NFA. Specifically, it makes gratuitous use of +epsilon transitions in order to keep its structure simple. In exchange, its +construction time is linear in the size of the regex. A Thompson NFA also makes +the guarantee that given any state and a character in a haystack, there is at +most one transition defined for it. (Although there may be many epsilon +transitions.) + +It's possible that other types of NFAs will be added in the future, such as a +[Glushkov NFA](https://en.wikipedia.org/wiki/Glushkov%27s_construction_algorithm). +But currently, this crate only provides a Thompson NFA. +*/ + +#[cfg(feature = "nfa-thompson")] +pub mod thompson; diff --git a/vendor/regex-automata/src/nfa/thompson/backtrack.rs b/vendor/regex-automata/src/nfa/thompson/backtrack.rs new file mode 100644 index 00000000000000..df99e456df746f --- /dev/null +++ b/vendor/regex-automata/src/nfa/thompson/backtrack.rs @@ -0,0 +1,1908 @@ +/*! +An NFA backed bounded backtracker for executing regex searches with capturing +groups. + +This module provides a [`BoundedBacktracker`] that works by simulating an NFA +using the classical backtracking algorithm with a twist: it avoids redoing +work that it has done before and thereby avoids worst case exponential time. +In exchange, it can only be used on "short" haystacks. Its advantage is that +is can be faster than the [`PikeVM`](thompson::pikevm::PikeVM) in many cases +because it does less book-keeping. +*/ + +use alloc::{vec, vec::Vec}; + +use crate::{ + nfa::thompson::{self, BuildError, State, NFA}, + util::{ + captures::Captures, + empty, iter, + prefilter::Prefilter, + primitives::{NonMaxUsize, PatternID, SmallIndex, StateID}, + search::{Anchored, HalfMatch, Input, Match, MatchError, Span}, + }, +}; + +/// Returns the minimum visited capacity for the given haystack. +/// +/// This function can be used as the argument to [`Config::visited_capacity`] +/// in order to guarantee that a backtracking search for the given `input` +/// won't return an error when using a [`BoundedBacktracker`] built from the +/// given `NFA`. +/// +/// This routine exists primarily as a way to test that the bounded backtracker +/// works correctly when its capacity is set to the smallest possible amount. +/// Still, it may be useful in cases where you know you want to use the bounded +/// backtracker for a specific input, and just need to know what visited +/// capacity to provide to make it work. +/// +/// Be warned that this number could be quite large as it is multiplicative in +/// the size the given NFA and haystack. +pub fn min_visited_capacity(nfa: &NFA, input: &Input<'_>) -> usize { + div_ceil(nfa.states().len() * (input.get_span().len() + 1), 8) +} + +/// The configuration used for building a bounded backtracker. +/// +/// A bounded backtracker configuration is a simple data object that is +/// typically used with [`Builder::configure`]. +#[derive(Clone, Debug, Default)] +pub struct Config { + pre: Option>, + visited_capacity: Option, +} + +impl Config { + /// Return a new default regex configuration. + pub fn new() -> Config { + Config::default() + } + + /// Set a prefilter to be used whenever a start state is entered. + /// + /// A [`Prefilter`] in this context is meant to accelerate searches by + /// looking for literal prefixes that every match for the corresponding + /// pattern (or patterns) must start with. Once a prefilter produces a + /// match, the underlying search routine continues on to try and confirm + /// the match. + /// + /// Be warned that setting a prefilter does not guarantee that the search + /// will be faster. While it's usually a good bet, if the prefilter + /// produces a lot of false positive candidates (i.e., positions matched + /// by the prefilter but not by the regex), then the overall result can + /// be slower than if you had just executed the regex engine without any + /// prefilters. + /// + /// By default no prefilter is set. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// util::prefilter::Prefilter, + /// Input, Match, MatchKind, + /// }; + /// + /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "bar"]); + /// let re = BoundedBacktracker::builder() + /// .configure(BoundedBacktracker::config().prefilter(pre)) + /// .build(r"(foo|bar)[a-z]+")?; + /// let mut cache = re.create_cache(); + /// let input = Input::new("foo1 barfox bar"); + /// assert_eq!( + /// Some(Match::must(0, 5..11)), + /// re.try_find(&mut cache, input)?, + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Be warned though that an incorrect prefilter can lead to incorrect + /// results! + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// util::prefilter::Prefilter, + /// Input, HalfMatch, MatchKind, + /// }; + /// + /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "car"]); + /// let re = BoundedBacktracker::builder() + /// .configure(BoundedBacktracker::config().prefilter(pre)) + /// .build(r"(foo|bar)[a-z]+")?; + /// let mut cache = re.create_cache(); + /// let input = Input::new("foo1 barfox bar"); + /// // No match reported even though there clearly is one! + /// assert_eq!(None, re.try_find(&mut cache, input)?); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn prefilter(mut self, pre: Option) -> Config { + self.pre = Some(pre); + self + } + + /// Set the visited capacity used to bound backtracking. + /// + /// The visited capacity represents the amount of heap memory (in bytes) to + /// allocate toward tracking which parts of the backtracking search have + /// been done before. The heap memory needed for any particular search is + /// proportional to `haystack.len() * nfa.states().len()`, which an be + /// quite large. Therefore, the bounded backtracker is typically only able + /// to run on shorter haystacks. + /// + /// For a given regex, increasing the visited capacity means that the + /// maximum haystack length that can be searched is increased. The + /// [`BoundedBacktracker::max_haystack_len`] method returns that maximum. + /// + /// The default capacity is a reasonable but empirically chosen size. + /// + /// # Example + /// + /// As with other regex engines, Unicode is what tends to make the bounded + /// backtracker less useful by making the maximum haystack length quite + /// small. If necessary, increasing the visited capacity using this routine + /// will increase the maximum haystack length at the cost of using more + /// memory. + /// + /// Note though that the specific maximum values here are not an API + /// guarantee. The default visited capacity is subject to change and not + /// covered by semver. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; + /// + /// // Unicode inflates the size of the underlying NFA quite a bit, and + /// // thus means that the backtracker can only handle smaller haystacks, + /// // assuming that the visited capacity remains unchanged. + /// let re = BoundedBacktracker::new(r"\w+")?; + /// assert!(re.max_haystack_len() <= 7_000); + /// // But we can increase the visited capacity to handle bigger haystacks! + /// let re = BoundedBacktracker::builder() + /// .configure(BoundedBacktracker::config().visited_capacity(1<<20)) + /// .build(r"\w+")?; + /// assert!(re.max_haystack_len() >= 25_000); + /// assert!(re.max_haystack_len() <= 28_000); + /// # Ok::<(), Box>(()) + /// ``` + pub fn visited_capacity(mut self, capacity: usize) -> Config { + self.visited_capacity = Some(capacity); + self + } + + /// Returns the prefilter set in this configuration, if one at all. + pub fn get_prefilter(&self) -> Option<&Prefilter> { + self.pre.as_ref().unwrap_or(&None).as_ref() + } + + /// Returns the configured visited capacity. + /// + /// Note that the actual capacity used may be slightly bigger than the + /// configured capacity. + pub fn get_visited_capacity(&self) -> usize { + const DEFAULT: usize = 256 * (1 << 10); // 256 KB + self.visited_capacity.unwrap_or(DEFAULT) + } + + /// Overwrite the default configuration such that the options in `o` are + /// always used. If an option in `o` is not set, then the corresponding + /// option in `self` is used. If it's not set in `self` either, then it + /// remains not set. + pub(crate) fn overwrite(&self, o: Config) -> Config { + Config { + pre: o.pre.or_else(|| self.pre.clone()), + visited_capacity: o.visited_capacity.or(self.visited_capacity), + } + } +} + +/// A builder for a bounded backtracker. +/// +/// This builder permits configuring options for the syntax of a pattern, the +/// NFA construction and the `BoundedBacktracker` construction. This builder +/// is different from a general purpose regex builder in that it permits fine +/// grain configuration of the construction process. The trade off for this is +/// complexity, and the possibility of setting a configuration that might not +/// make sense. For example, there are two different UTF-8 modes: +/// +/// * [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) controls +/// whether the pattern itself can contain sub-expressions that match invalid +/// UTF-8. +/// * [`thompson::Config::utf8`] controls how the regex iterators themselves +/// advance the starting position of the next search when a match with zero +/// length is found. +/// +/// Generally speaking, callers will want to either enable all of these or +/// disable all of these. +/// +/// # Example +/// +/// This example shows how to disable UTF-8 mode in the syntax and the regex +/// itself. This is generally what you want for matching on arbitrary bytes. +/// +/// ``` +/// use regex_automata::{ +/// nfa::thompson::{self, backtrack::BoundedBacktracker}, +/// util::syntax, +/// Match, +/// }; +/// +/// let re = BoundedBacktracker::builder() +/// .syntax(syntax::Config::new().utf8(false)) +/// .thompson(thompson::Config::new().utf8(false)) +/// .build(r"foo(?-u:[^b])ar.*")?; +/// let mut cache = re.create_cache(); +/// +/// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; +/// let expected = Some(Ok(Match::must(0, 1..9))); +/// let got = re.try_find_iter(&mut cache, haystack).next(); +/// assert_eq!(expected, got); +/// // Notice that `(?-u:[^b])` matches invalid UTF-8, +/// // but the subsequent `.*` does not! Disabling UTF-8 +/// // on the syntax permits this. +/// // +/// // N.B. This example does not show the impact of +/// // disabling UTF-8 mode on a BoundedBacktracker Config, since that +/// // only impacts regexes that can produce matches of +/// // length 0. +/// assert_eq!(b"foo\xFFarzz", &haystack[got.unwrap()?.range()]); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct Builder { + config: Config, + #[cfg(feature = "syntax")] + thompson: thompson::Compiler, +} + +impl Builder { + /// Create a new BoundedBacktracker builder with its default configuration. + pub fn new() -> Builder { + Builder { + config: Config::default(), + #[cfg(feature = "syntax")] + thompson: thompson::Compiler::new(), + } + } + + /// Build a `BoundedBacktracker` from the given pattern. + /// + /// If there was a problem parsing or compiling the pattern, then an error + /// is returned. + #[cfg(feature = "syntax")] + pub fn build( + &self, + pattern: &str, + ) -> Result { + self.build_many(&[pattern]) + } + + /// Build a `BoundedBacktracker` from the given patterns. + #[cfg(feature = "syntax")] + pub fn build_many>( + &self, + patterns: &[P], + ) -> Result { + let nfa = self.thompson.build_many(patterns)?; + self.build_from_nfa(nfa) + } + + /// Build a `BoundedBacktracker` directly from its NFA. + /// + /// Note that when using this method, any configuration that applies to the + /// construction of the NFA itself will of course be ignored, since the NFA + /// given here is already built. + pub fn build_from_nfa( + &self, + nfa: NFA, + ) -> Result { + nfa.look_set_any().available().map_err(BuildError::word)?; + Ok(BoundedBacktracker { config: self.config.clone(), nfa }) + } + + /// Apply the given `BoundedBacktracker` configuration options to this + /// builder. + pub fn configure(&mut self, config: Config) -> &mut Builder { + self.config = self.config.overwrite(config); + self + } + + /// Set the syntax configuration for this builder using + /// [`syntax::Config`](crate::util::syntax::Config). + /// + /// This permits setting things like case insensitivity, Unicode and multi + /// line mode. + /// + /// These settings only apply when constructing a `BoundedBacktracker` + /// directly from a pattern. + #[cfg(feature = "syntax")] + pub fn syntax( + &mut self, + config: crate::util::syntax::Config, + ) -> &mut Builder { + self.thompson.syntax(config); + self + } + + /// Set the Thompson NFA configuration for this builder using + /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). + /// + /// This permits setting things like if additional time should be spent + /// shrinking the size of the NFA. + /// + /// These settings only apply when constructing a `BoundedBacktracker` + /// directly from a pattern. + #[cfg(feature = "syntax")] + pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { + self.thompson.configure(config); + self + } +} + +/// A backtracking regex engine that bounds its execution to avoid exponential +/// blow-up. +/// +/// This regex engine only implements leftmost-first match semantics and +/// only supports leftmost searches. It effectively does the same thing as a +/// [`PikeVM`](thompson::pikevm::PikeVM), but typically does it faster because +/// it doesn't have to worry about copying capturing group spans for most NFA +/// states. Instead, the backtracker can maintain one set of captures (provided +/// by the caller) and never needs to copy them. In exchange, the backtracker +/// bounds itself to ensure it doesn't exhibit worst case exponential time. +/// This results in the backtracker only being able to handle short haystacks +/// given reasonable memory usage. +/// +/// # Searches may return an error! +/// +/// By design, this backtracking regex engine is bounded. This bound is +/// implemented by not visiting any combination of NFA state ID and position +/// in a haystack more than once. Thus, the total memory required to bound +/// backtracking is proportional to `haystack.len() * nfa.states().len()`. +/// This can obviously get quite large, since large haystacks aren't terribly +/// uncommon. To avoid using exorbitant memory, the capacity is bounded by +/// a fixed limit set via [`Config::visited_capacity`]. Thus, if the total +/// capacity required for a particular regex and a haystack exceeds this +/// capacity, then the search routine will return an error. +/// +/// Unlike other regex engines that may return an error at search time (like +/// the DFA or the hybrid NFA/DFA), there is no way to guarantee that a bounded +/// backtracker will work for every haystack. Therefore, this regex engine +/// _only_ exposes fallible search routines to avoid the footgun of panicking +/// when running a search on a haystack that is too big. +/// +/// If one wants to use the fallible search APIs without handling the +/// error, the only way to guarantee an error won't occur from the +/// haystack length is to ensure the haystack length does not exceed +/// [`BoundedBacktracker::max_haystack_len`]. +/// +/// # Example: Unicode word boundaries +/// +/// This example shows that the bounded backtracker implements Unicode word +/// boundaries correctly by default. +/// +/// ``` +/// # if cfg!(miri) { return Ok(()); } // miri takes too long +/// use regex_automata::{nfa::thompson::backtrack::BoundedBacktracker, Match}; +/// +/// let re = BoundedBacktracker::new(r"\b\w+\b")?; +/// let mut cache = re.create_cache(); +/// +/// let mut it = re.try_find_iter(&mut cache, "Шерлок Холмс"); +/// assert_eq!(Some(Ok(Match::must(0, 0..12))), it.next()); +/// assert_eq!(Some(Ok(Match::must(0, 13..23))), it.next()); +/// assert_eq!(None, it.next()); +/// # Ok::<(), Box>(()) +/// ``` +/// +/// # Example: multiple regex patterns +/// +/// The bounded backtracker supports searching for multiple patterns +/// simultaneously, just like other regex engines. Note though that because it +/// uses a backtracking strategy, this regex engine is unlikely to scale well +/// as more patterns are added. But then again, as more patterns are added, the +/// maximum haystack length allowed will also shorten (assuming the visited +/// capacity remains invariant). +/// +/// ``` +/// use regex_automata::{nfa::thompson::backtrack::BoundedBacktracker, Match}; +/// +/// let re = BoundedBacktracker::new_many(&["[a-z]+", "[0-9]+"])?; +/// let mut cache = re.create_cache(); +/// +/// let mut it = re.try_find_iter(&mut cache, "abc 1 foo 4567 0 quux"); +/// assert_eq!(Some(Ok(Match::must(0, 0..3))), it.next()); +/// assert_eq!(Some(Ok(Match::must(1, 4..5))), it.next()); +/// assert_eq!(Some(Ok(Match::must(0, 6..9))), it.next()); +/// assert_eq!(Some(Ok(Match::must(1, 10..14))), it.next()); +/// assert_eq!(Some(Ok(Match::must(1, 15..16))), it.next()); +/// assert_eq!(Some(Ok(Match::must(0, 17..21))), it.next()); +/// assert_eq!(None, it.next()); +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct BoundedBacktracker { + config: Config, + nfa: NFA, +} + +impl BoundedBacktracker { + /// Parse the given regular expression using the default configuration and + /// return the corresponding `BoundedBacktracker`. + /// + /// If you want a non-default configuration, then use the [`Builder`] to + /// set your own configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// Match, + /// }; + /// + /// let re = BoundedBacktracker::new("foo[0-9]+bar")?; + /// let mut cache = re.create_cache(); + /// assert_eq!( + /// Some(Ok(Match::must(0, 3..14))), + /// re.try_find_iter(&mut cache, "zzzfoo12345barzzz").next(), + /// ); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn new(pattern: &str) -> Result { + BoundedBacktracker::builder().build(pattern) + } + + /// Like `new`, but parses multiple patterns into a single "multi regex." + /// This similarly uses the default regex configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// Match, + /// }; + /// + /// let re = BoundedBacktracker::new_many(&["[a-z]+", "[0-9]+"])?; + /// let mut cache = re.create_cache(); + /// + /// let mut it = re.try_find_iter(&mut cache, "abc 1 foo 4567 0 quux"); + /// assert_eq!(Some(Ok(Match::must(0, 0..3))), it.next()); + /// assert_eq!(Some(Ok(Match::must(1, 4..5))), it.next()); + /// assert_eq!(Some(Ok(Match::must(0, 6..9))), it.next()); + /// assert_eq!(Some(Ok(Match::must(1, 10..14))), it.next()); + /// assert_eq!(Some(Ok(Match::must(1, 15..16))), it.next()); + /// assert_eq!(Some(Ok(Match::must(0, 17..21))), it.next()); + /// assert_eq!(None, it.next()); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn new_many>( + patterns: &[P], + ) -> Result { + BoundedBacktracker::builder().build_many(patterns) + } + + /// # Example + /// + /// This shows how to hand assemble a regular expression via its HIR, + /// compile an NFA from it and build a BoundedBacktracker from the NFA. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::{NFA, backtrack::BoundedBacktracker}, + /// Match, + /// }; + /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; + /// + /// let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![ + /// ClassBytesRange::new(b'0', b'9'), + /// ClassBytesRange::new(b'A', b'Z'), + /// ClassBytesRange::new(b'_', b'_'), + /// ClassBytesRange::new(b'a', b'z'), + /// ]))); + /// + /// let config = NFA::config().nfa_size_limit(Some(1_000)); + /// let nfa = NFA::compiler().configure(config).build_from_hir(&hir)?; + /// + /// let re = BoundedBacktracker::new_from_nfa(nfa)?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let expected = Some(Match::must(0, 3..4)); + /// re.try_captures(&mut cache, "!@#A#@!", &mut caps)?; + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn new_from_nfa(nfa: NFA) -> Result { + BoundedBacktracker::builder().build_from_nfa(nfa) + } + + /// Create a new `BoundedBacktracker` that matches every input. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// Match, + /// }; + /// + /// let re = BoundedBacktracker::always_match()?; + /// let mut cache = re.create_cache(); + /// + /// let expected = Some(Ok(Match::must(0, 0..0))); + /// assert_eq!(expected, re.try_find_iter(&mut cache, "").next()); + /// assert_eq!(expected, re.try_find_iter(&mut cache, "foo").next()); + /// # Ok::<(), Box>(()) + /// ``` + pub fn always_match() -> Result { + let nfa = thompson::NFA::always_match(); + BoundedBacktracker::new_from_nfa(nfa) + } + + /// Create a new `BoundedBacktracker` that never matches any input. + /// + /// # Example + /// + /// ``` + /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; + /// + /// let re = BoundedBacktracker::never_match()?; + /// let mut cache = re.create_cache(); + /// + /// assert_eq!(None, re.try_find_iter(&mut cache, "").next()); + /// assert_eq!(None, re.try_find_iter(&mut cache, "foo").next()); + /// # Ok::<(), Box>(()) + /// ``` + pub fn never_match() -> Result { + let nfa = thompson::NFA::never_match(); + BoundedBacktracker::new_from_nfa(nfa) + } + + /// Return a default configuration for a `BoundedBacktracker`. + /// + /// This is a convenience routine to avoid needing to import the `Config` + /// type when customizing the construction of a `BoundedBacktracker`. + /// + /// # Example + /// + /// This example shows how to disable UTF-8 mode. When UTF-8 mode is + /// disabled, zero-width matches that split a codepoint are allowed. + /// Otherwise they are never reported. + /// + /// In the code below, notice that `""` is permitted to match positions + /// that split the encoding of a codepoint. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::{self, backtrack::BoundedBacktracker}, + /// Match, + /// }; + /// + /// let re = BoundedBacktracker::builder() + /// .thompson(thompson::Config::new().utf8(false)) + /// .build(r"")?; + /// let mut cache = re.create_cache(); + /// + /// let haystack = "a☃z"; + /// let mut it = re.try_find_iter(&mut cache, haystack); + /// assert_eq!(Some(Ok(Match::must(0, 0..0))), it.next()); + /// assert_eq!(Some(Ok(Match::must(0, 1..1))), it.next()); + /// assert_eq!(Some(Ok(Match::must(0, 2..2))), it.next()); + /// assert_eq!(Some(Ok(Match::must(0, 3..3))), it.next()); + /// assert_eq!(Some(Ok(Match::must(0, 4..4))), it.next()); + /// assert_eq!(Some(Ok(Match::must(0, 5..5))), it.next()); + /// assert_eq!(None, it.next()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn config() -> Config { + Config::new() + } + + /// Return a builder for configuring the construction of a + /// `BoundedBacktracker`. + /// + /// This is a convenience routine to avoid needing to import the + /// [`Builder`] type in common cases. + /// + /// # Example + /// + /// This example shows how to use the builder to disable UTF-8 mode + /// everywhere. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// nfa::thompson::{self, backtrack::BoundedBacktracker}, + /// util::syntax, + /// Match, + /// }; + /// + /// let re = BoundedBacktracker::builder() + /// .syntax(syntax::Config::new().utf8(false)) + /// .thompson(thompson::Config::new().utf8(false)) + /// .build(r"foo(?-u:[^b])ar.*")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; + /// let expected = Some(Match::must(0, 1..9)); + /// re.try_captures(&mut cache, haystack, &mut caps)?; + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn builder() -> Builder { + Builder::new() + } + + /// Create a new cache for this regex. + /// + /// The cache returned should only be used for searches for this + /// regex. If you want to reuse the cache for another regex, then you + /// must call [`Cache::reset`] with that regex (or, equivalently, + /// [`BoundedBacktracker::reset_cache`]). + pub fn create_cache(&self) -> Cache { + Cache::new(self) + } + + /// Create a new empty set of capturing groups that is guaranteed to be + /// valid for the search APIs on this `BoundedBacktracker`. + /// + /// A `Captures` value created for a specific `BoundedBacktracker` cannot + /// be used with any other `BoundedBacktracker`. + /// + /// This is a convenience function for [`Captures::all`]. See the + /// [`Captures`] documentation for an explanation of its alternative + /// constructors that permit the `BoundedBacktracker` to do less work + /// during a search, and thus might make it faster. + pub fn create_captures(&self) -> Captures { + Captures::all(self.get_nfa().group_info().clone()) + } + + /// Reset the given cache such that it can be used for searching with the + /// this `BoundedBacktracker` (and only this `BoundedBacktracker`). + /// + /// A cache reset permits reusing memory already allocated in this cache + /// with a different `BoundedBacktracker`. + /// + /// # Example + /// + /// This shows how to re-purpose a cache for use with a different + /// `BoundedBacktracker`. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// Match, + /// }; + /// + /// let re1 = BoundedBacktracker::new(r"\w")?; + /// let re2 = BoundedBacktracker::new(r"\W")?; + /// + /// let mut cache = re1.create_cache(); + /// assert_eq!( + /// Some(Ok(Match::must(0, 0..2))), + /// re1.try_find_iter(&mut cache, "Δ").next(), + /// ); + /// + /// // Using 'cache' with re2 is not allowed. It may result in panics or + /// // incorrect results. In order to re-purpose the cache, we must reset + /// // it with the BoundedBacktracker we'd like to use it with. + /// // + /// // Similarly, after this reset, using the cache with 're1' is also not + /// // allowed. + /// cache.reset(&re2); + /// assert_eq!( + /// Some(Ok(Match::must(0, 0..3))), + /// re2.try_find_iter(&mut cache, "☃").next(), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn reset_cache(&self, cache: &mut Cache) { + cache.reset(self); + } + + /// Returns the total number of patterns compiled into this + /// `BoundedBacktracker`. + /// + /// In the case of a `BoundedBacktracker` that contains no patterns, this + /// returns `0`. + /// + /// # Example + /// + /// This example shows the pattern length for a `BoundedBacktracker` that + /// never matches: + /// + /// ``` + /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; + /// + /// let re = BoundedBacktracker::never_match()?; + /// assert_eq!(re.pattern_len(), 0); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// And another example for a `BoundedBacktracker` that matches at every + /// position: + /// + /// ``` + /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; + /// + /// let re = BoundedBacktracker::always_match()?; + /// assert_eq!(re.pattern_len(), 1); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// And finally, a `BoundedBacktracker` that was constructed from multiple + /// patterns: + /// + /// ``` + /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; + /// + /// let re = BoundedBacktracker::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; + /// assert_eq!(re.pattern_len(), 3); + /// # Ok::<(), Box>(()) + /// ``` + pub fn pattern_len(&self) -> usize { + self.nfa.pattern_len() + } + + /// Return the config for this `BoundedBacktracker`. + #[inline] + pub fn get_config(&self) -> &Config { + &self.config + } + + /// Returns a reference to the underlying NFA. + #[inline] + pub fn get_nfa(&self) -> &NFA { + &self.nfa + } + + /// Returns the maximum haystack length supported by this backtracker. + /// + /// This routine is a function of both [`Config::visited_capacity`] and the + /// internal size of the backtracker's NFA. + /// + /// # Example + /// + /// This example shows how the maximum haystack length can vary depending + /// on the size of the regex itself. Note though that the specific maximum + /// values here are not an API guarantee. The default visited capacity is + /// subject to change and not covered by semver. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// Match, MatchError, + /// }; + /// + /// // If you're only using ASCII, you get a big budget. + /// let re = BoundedBacktracker::new(r"(?-u)\w+")?; + /// let mut cache = re.create_cache(); + /// assert_eq!(re.max_haystack_len(), 299_592); + /// // Things work up to the max. + /// let mut haystack = "a".repeat(299_592); + /// let expected = Some(Ok(Match::must(0, 0..299_592))); + /// assert_eq!(expected, re.try_find_iter(&mut cache, &haystack).next()); + /// // But you'll get an error if you provide a haystack that's too big. + /// // Notice that we use the 'try_find_iter' routine instead, which + /// // yields Result instead of Match. + /// haystack.push('a'); + /// let expected = Some(Err(MatchError::haystack_too_long(299_593))); + /// assert_eq!(expected, re.try_find_iter(&mut cache, &haystack).next()); + /// + /// // Unicode inflates the size of the underlying NFA quite a bit, and + /// // thus means that the backtracker can only handle smaller haystacks, + /// // assuming that the visited capacity remains unchanged. + /// let re = BoundedBacktracker::new(r"\w+")?; + /// assert!(re.max_haystack_len() <= 7_000); + /// // But we can increase the visited capacity to handle bigger haystacks! + /// let re = BoundedBacktracker::builder() + /// .configure(BoundedBacktracker::config().visited_capacity(1<<20)) + /// .build(r"\w+")?; + /// assert!(re.max_haystack_len() >= 25_000); + /// assert!(re.max_haystack_len() <= 28_000); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn max_haystack_len(&self) -> usize { + // The capacity given in the config is "bytes of heap memory," but the + // capacity we use here is "number of bits." So convert the capacity in + // bytes to the capacity in bits. + let capacity = 8 * self.get_config().get_visited_capacity(); + let blocks = div_ceil(capacity, Visited::BLOCK_SIZE); + let real_capacity = blocks.saturating_mul(Visited::BLOCK_SIZE); + // It's possible for `real_capacity` to be smaller than the number of + // NFA states for particularly large regexes, so we saturate towards + // zero. + (real_capacity / self.nfa.states().len()).saturating_sub(1) + } +} + +impl BoundedBacktracker { + /// Returns true if and only if this regex matches the given haystack. + /// + /// In the case of a backtracking regex engine, and unlike most other + /// regex engines in this crate, short circuiting isn't practical. However, + /// this routine may still be faster because it instructs backtracking to + /// not keep track of any capturing groups. + /// + /// # Errors + /// + /// This routine only errors if the search could not complete. For this + /// backtracking regex engine, this only occurs when the haystack length + /// exceeds [`BoundedBacktracker::max_haystack_len`]. + /// + /// When a search cannot complete, callers cannot know whether a match + /// exists or not. + /// + /// # Example + /// + /// ``` + /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; + /// + /// let re = BoundedBacktracker::new("foo[0-9]+bar")?; + /// let mut cache = re.create_cache(); + /// + /// assert!(re.try_is_match(&mut cache, "foo12345bar")?); + /// assert!(!re.try_is_match(&mut cache, "foobar")?); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: consistency with search APIs + /// + /// `is_match` is guaranteed to return `true` whenever `find` returns a + /// match. This includes searches that are executed entirely within a + /// codepoint: + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// Input, + /// }; + /// + /// let re = BoundedBacktracker::new("a*")?; + /// let mut cache = re.create_cache(); + /// + /// assert!(!re.try_is_match(&mut cache, Input::new("☃").span(1..2))?); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Notice that when UTF-8 mode is disabled, then the above reports a + /// match because the restriction against zero-width matches that split a + /// codepoint has been lifted: + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::{backtrack::BoundedBacktracker, NFA}, + /// Input, + /// }; + /// + /// let re = BoundedBacktracker::builder() + /// .thompson(NFA::config().utf8(false)) + /// .build("a*")?; + /// let mut cache = re.create_cache(); + /// + /// assert!(re.try_is_match(&mut cache, Input::new("☃").span(1..2))?); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn try_is_match<'h, I: Into>>( + &self, + cache: &mut Cache, + input: I, + ) -> Result { + let input = input.into().earliest(true); + self.try_search_slots(cache, &input, &mut []).map(|pid| pid.is_some()) + } + + /// Executes a leftmost forward search and returns a `Match` if one exists. + /// + /// This routine only includes the overall match span. To get + /// access to the individual spans of each capturing group, use + /// [`BoundedBacktracker::try_captures`]. + /// + /// # Errors + /// + /// This routine only errors if the search could not complete. For this + /// backtracking regex engine, this only occurs when the haystack length + /// exceeds [`BoundedBacktracker::max_haystack_len`]. + /// + /// When a search cannot complete, callers cannot know whether a match + /// exists or not. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// Match, + /// }; + /// + /// let re = BoundedBacktracker::new("foo[0-9]+")?; + /// let mut cache = re.create_cache(); + /// let expected = Match::must(0, 0..8); + /// assert_eq!(Some(expected), re.try_find(&mut cache, "foo12345")?); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn try_find<'h, I: Into>>( + &self, + cache: &mut Cache, + input: I, + ) -> Result, MatchError> { + let input = input.into(); + if self.get_nfa().pattern_len() == 1 { + let mut slots = [None, None]; + let pid = match self.try_search_slots(cache, &input, &mut slots)? { + None => return Ok(None), + Some(pid) => pid, + }; + let start = match slots[0] { + None => return Ok(None), + Some(s) => s.get(), + }; + let end = match slots[1] { + None => return Ok(None), + Some(s) => s.get(), + }; + return Ok(Some(Match::new(pid, Span { start, end }))); + } + let ginfo = self.get_nfa().group_info(); + let slots_len = ginfo.implicit_slot_len(); + let mut slots = vec![None; slots_len]; + let pid = match self.try_search_slots(cache, &input, &mut slots)? { + None => return Ok(None), + Some(pid) => pid, + }; + let start = match slots[pid.as_usize() * 2] { + None => return Ok(None), + Some(s) => s.get(), + }; + let end = match slots[pid.as_usize() * 2 + 1] { + None => return Ok(None), + Some(s) => s.get(), + }; + Ok(Some(Match::new(pid, Span { start, end }))) + } + + /// Executes a leftmost forward search and writes the spans of capturing + /// groups that participated in a match into the provided [`Captures`] + /// value. If no match was found, then [`Captures::is_match`] is guaranteed + /// to return `false`. + /// + /// # Errors + /// + /// This routine only errors if the search could not complete. For this + /// backtracking regex engine, this only occurs when the haystack length + /// exceeds [`BoundedBacktracker::max_haystack_len`]. + /// + /// When a search cannot complete, callers cannot know whether a match + /// exists or not. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// Span, + /// }; + /// + /// let re = BoundedBacktracker::new( + /// r"^([0-9]{4})-([0-9]{2})-([0-9]{2})$", + /// )?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// re.try_captures(&mut cache, "2010-03-14", &mut caps)?; + /// assert!(caps.is_match()); + /// assert_eq!(Some(Span::from(0..4)), caps.get_group(1)); + /// assert_eq!(Some(Span::from(5..7)), caps.get_group(2)); + /// assert_eq!(Some(Span::from(8..10)), caps.get_group(3)); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn try_captures<'h, I: Into>>( + &self, + cache: &mut Cache, + input: I, + caps: &mut Captures, + ) -> Result<(), MatchError> { + self.try_search(cache, &input.into(), caps) + } + + /// Returns an iterator over all non-overlapping leftmost matches in the + /// given bytes. If no match exists, then the iterator yields no elements. + /// + /// If the regex engine returns an error at any point, then the iterator + /// will yield that error. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// Match, MatchError, + /// }; + /// + /// let re = BoundedBacktracker::new("foo[0-9]+")?; + /// let mut cache = re.create_cache(); + /// + /// let text = "foo1 foo12 foo123"; + /// let result: Result, MatchError> = re + /// .try_find_iter(&mut cache, text) + /// .collect(); + /// let matches = result?; + /// assert_eq!(matches, vec![ + /// Match::must(0, 0..4), + /// Match::must(0, 5..10), + /// Match::must(0, 11..17), + /// ]); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn try_find_iter<'r, 'c, 'h, I: Into>>( + &'r self, + cache: &'c mut Cache, + input: I, + ) -> TryFindMatches<'r, 'c, 'h> { + let caps = Captures::matches(self.get_nfa().group_info().clone()); + let it = iter::Searcher::new(input.into()); + TryFindMatches { re: self, cache, caps, it } + } + + /// Returns an iterator over all non-overlapping `Captures` values. If no + /// match exists, then the iterator yields no elements. + /// + /// This yields the same matches as [`BoundedBacktracker::try_find_iter`], + /// but it includes the spans of all capturing groups that participate in + /// each match. + /// + /// If the regex engine returns an error at any point, then the iterator + /// will yield that error. + /// + /// **Tip:** See [`util::iter::Searcher`](crate::util::iter::Searcher) for + /// how to correctly iterate over all matches in a haystack while avoiding + /// the creation of a new `Captures` value for every match. (Which you are + /// forced to do with an `Iterator`.) + /// + /// # Example + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// Span, + /// }; + /// + /// let re = BoundedBacktracker::new("foo(?P[0-9]+)")?; + /// let mut cache = re.create_cache(); + /// + /// let text = "foo1 foo12 foo123"; + /// let mut spans = vec![]; + /// for result in re.try_captures_iter(&mut cache, text) { + /// let caps = result?; + /// // The unwrap is OK since 'numbers' matches if the pattern matches. + /// spans.push(caps.get_group_by_name("numbers").unwrap()); + /// } + /// assert_eq!(spans, vec![ + /// Span::from(3..4), + /// Span::from(8..10), + /// Span::from(14..17), + /// ]); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn try_captures_iter<'r, 'c, 'h, I: Into>>( + &'r self, + cache: &'c mut Cache, + input: I, + ) -> TryCapturesMatches<'r, 'c, 'h> { + let caps = self.create_captures(); + let it = iter::Searcher::new(input.into()); + TryCapturesMatches { re: self, cache, caps, it } + } +} + +impl BoundedBacktracker { + /// Executes a leftmost forward search and writes the spans of capturing + /// groups that participated in a match into the provided [`Captures`] + /// value. If no match was found, then [`Captures::is_match`] is guaranteed + /// to return `false`. + /// + /// This is like [`BoundedBacktracker::try_captures`], but it accepts a + /// concrete `&Input` instead of an `Into`. + /// + /// # Errors + /// + /// This routine only errors if the search could not complete. For this + /// backtracking regex engine, this only occurs when the haystack length + /// exceeds [`BoundedBacktracker::max_haystack_len`]. + /// + /// When a search cannot complete, callers cannot know whether a match + /// exists or not. + /// + /// # Example: specific pattern search + /// + /// This example shows how to build a multi bounded backtracker that + /// permits searching for specific patterns. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// Anchored, Input, Match, PatternID, + /// }; + /// + /// let re = BoundedBacktracker::new_many(&[ + /// "[a-z0-9]{6}", + /// "[a-z][a-z0-9]{5}", + /// ])?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let haystack = "foo123"; + /// + /// // Since we are using the default leftmost-first match and both + /// // patterns match at the same starting position, only the first pattern + /// // will be returned in this case when doing a search for any of the + /// // patterns. + /// let expected = Some(Match::must(0, 0..6)); + /// re.try_search(&mut cache, &Input::new(haystack), &mut caps)?; + /// assert_eq!(expected, caps.get_match()); + /// + /// // But if we want to check whether some other pattern matches, then we + /// // can provide its pattern ID. + /// let expected = Some(Match::must(1, 0..6)); + /// let input = Input::new(haystack) + /// .anchored(Anchored::Pattern(PatternID::must(1))); + /// re.try_search(&mut cache, &input, &mut caps)?; + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: specifying the bounds of a search + /// + /// This example shows how providing the bounds of a search can produce + /// different results than simply sub-slicing the haystack. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// Match, Input, + /// }; + /// + /// let re = BoundedBacktracker::new(r"\b[0-9]{3}\b")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let haystack = "foo123bar"; + /// + /// // Since we sub-slice the haystack, the search doesn't know about + /// // the larger context and assumes that `123` is surrounded by word + /// // boundaries. And of course, the match position is reported relative + /// // to the sub-slice as well, which means we get `0..3` instead of + /// // `3..6`. + /// let expected = Some(Match::must(0, 0..3)); + /// re.try_search(&mut cache, &Input::new(&haystack[3..6]), &mut caps)?; + /// assert_eq!(expected, caps.get_match()); + /// + /// // But if we provide the bounds of the search within the context of the + /// // entire haystack, then the search can take the surrounding context + /// // into account. (And if we did find a match, it would be reported + /// // as a valid offset into `haystack` instead of its sub-slice.) + /// let expected = None; + /// re.try_search( + /// &mut cache, &Input::new(haystack).range(3..6), &mut caps, + /// )?; + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn try_search( + &self, + cache: &mut Cache, + input: &Input<'_>, + caps: &mut Captures, + ) -> Result<(), MatchError> { + caps.set_pattern(None); + let pid = self.try_search_slots(cache, input, caps.slots_mut())?; + caps.set_pattern(pid); + Ok(()) + } + + /// Executes a leftmost forward search and writes the spans of capturing + /// groups that participated in a match into the provided `slots`, and + /// returns the matching pattern ID. The contents of the slots for patterns + /// other than the matching pattern are unspecified. If no match was found, + /// then `None` is returned and the contents of all `slots` is unspecified. + /// + /// This is like [`BoundedBacktracker::try_search`], but it accepts a raw + /// slots slice instead of a `Captures` value. This is useful in contexts + /// where you don't want or need to allocate a `Captures`. + /// + /// It is legal to pass _any_ number of slots to this routine. If the regex + /// engine would otherwise write a slot offset that doesn't fit in the + /// provided slice, then it is simply skipped. In general though, there are + /// usually three slice lengths you might want to use: + /// + /// * An empty slice, if you only care about which pattern matched. + /// * A slice with + /// [`pattern_len() * 2`](crate::nfa::thompson::NFA::pattern_len) + /// slots, if you only care about the overall match spans for each matching + /// pattern. + /// * A slice with + /// [`slot_len()`](crate::util::captures::GroupInfo::slot_len) slots, which + /// permits recording match offsets for every capturing group in every + /// pattern. + /// + /// # Errors + /// + /// This routine only errors if the search could not complete. For this + /// backtracking regex engine, this only occurs when the haystack length + /// exceeds [`BoundedBacktracker::max_haystack_len`]. + /// + /// When a search cannot complete, callers cannot know whether a match + /// exists or not. + /// + /// # Example + /// + /// This example shows how to find the overall match offsets in a + /// multi-pattern search without allocating a `Captures` value. Indeed, we + /// can put our slots right on the stack. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// PatternID, Input, + /// }; + /// + /// let re = BoundedBacktracker::new_many(&[ + /// r"\pL+", + /// r"\d+", + /// ])?; + /// let mut cache = re.create_cache(); + /// let input = Input::new("!@#123"); + /// + /// // We only care about the overall match offsets here, so we just + /// // allocate two slots for each pattern. Each slot records the start + /// // and end of the match. + /// let mut slots = [None; 4]; + /// let pid = re.try_search_slots(&mut cache, &input, &mut slots)?; + /// assert_eq!(Some(PatternID::must(1)), pid); + /// + /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'. + /// // See 'GroupInfo' for more details on the mapping between groups and + /// // slot indices. + /// let slot_start = pid.unwrap().as_usize() * 2; + /// let slot_end = slot_start + 1; + /// assert_eq!(Some(3), slots[slot_start].map(|s| s.get())); + /// assert_eq!(Some(6), slots[slot_end].map(|s| s.get())); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn try_search_slots( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Result, MatchError> { + let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); + if !utf8empty { + let maybe_hm = self.try_search_slots_imp(cache, input, slots)?; + return Ok(maybe_hm.map(|hm| hm.pattern())); + } + // See PikeVM::try_search_slots for why we do this. + let min = self.get_nfa().group_info().implicit_slot_len(); + if slots.len() >= min { + let maybe_hm = self.try_search_slots_imp(cache, input, slots)?; + return Ok(maybe_hm.map(|hm| hm.pattern())); + } + if self.get_nfa().pattern_len() == 1 { + let mut enough = [None, None]; + let got = self.try_search_slots_imp(cache, input, &mut enough)?; + // This is OK because we know `enough_slots` is strictly bigger + // than `slots`, otherwise this special case isn't reached. + slots.copy_from_slice(&enough[..slots.len()]); + return Ok(got.map(|hm| hm.pattern())); + } + let mut enough = vec![None; min]; + let got = self.try_search_slots_imp(cache, input, &mut enough)?; + // This is OK because we know `enough_slots` is strictly bigger than + // `slots`, otherwise this special case isn't reached. + slots.copy_from_slice(&enough[..slots.len()]); + Ok(got.map(|hm| hm.pattern())) + } + + /// This is the actual implementation of `try_search_slots_imp` that + /// doesn't account for the special case when 1) the NFA has UTF-8 mode + /// enabled, 2) the NFA can match the empty string and 3) the caller has + /// provided an insufficient number of slots to record match offsets. + #[inline(never)] + fn try_search_slots_imp( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Result, MatchError> { + let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); + let hm = match self.search_imp(cache, input, slots)? { + None => return Ok(None), + Some(hm) if !utf8empty => return Ok(Some(hm)), + Some(hm) => hm, + }; + empty::skip_splits_fwd(input, hm, hm.offset(), |input| { + Ok(self + .search_imp(cache, input, slots)? + .map(|hm| (hm, hm.offset()))) + }) + } + + /// The implementation of standard leftmost backtracking search. + /// + /// Capturing group spans are written to 'caps', but only if requested. + /// 'caps' can be one of three things: 1) totally empty, in which case, we + /// only report the pattern that matched or 2) only has slots for recording + /// the overall match offsets for any pattern or 3) has all slots available + /// for recording the spans of any groups participating in a match. + fn search_imp( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Result, MatchError> { + // Unlike in the PikeVM, we write our capturing group spans directly + // into the caller's captures groups. So we have to make sure we're + // starting with a blank slate first. In the PikeVM, we avoid this + // by construction: the spans that are copied to every slot in the + // 'Captures' value already account for presence/absence. In this + // backtracker, we write directly into the caller provided slots, where + // as in the PikeVM, we write into scratch space first and only copy + // them to the caller provided slots when a match is found. + for slot in slots.iter_mut() { + *slot = None; + } + cache.setup_search(&self, input)?; + if input.is_done() { + return Ok(None); + } + let (anchored, start_id) = match input.get_anchored() { + // Only way we're unanchored is if both the caller asked for an + // unanchored search *and* the pattern is itself not anchored. + Anchored::No => ( + self.nfa.is_always_start_anchored(), + // We always use the anchored starting state here, even if + // doing an unanchored search. The "unanchored" part of it is + // implemented in the loop below, by simply trying the next + // byte offset if the previous backtracking exploration failed. + self.nfa.start_anchored(), + ), + Anchored::Yes => (true, self.nfa.start_anchored()), + Anchored::Pattern(pid) => match self.nfa.start_pattern(pid) { + None => return Ok(None), + Some(sid) => (true, sid), + }, + }; + if anchored { + let at = input.start(); + return Ok(self.backtrack(cache, input, at, start_id, slots)); + } + let pre = self.get_config().get_prefilter(); + let mut at = input.start(); + while at <= input.end() { + if let Some(ref pre) = pre { + let span = Span::from(at..input.end()); + match pre.find(input.haystack(), span) { + None => break, + Some(ref span) => at = span.start, + } + } + if let Some(hm) = self.backtrack(cache, input, at, start_id, slots) + { + return Ok(Some(hm)); + } + at += 1; + } + Ok(None) + } + + /// Look for a match starting at `at` in `input` and write the matching + /// pattern ID and group spans to `caps`. The search uses `start_id` as its + /// starting state in the underlying NFA. + /// + /// If no match was found, then the caller should increment `at` and try + /// at the next position. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn backtrack( + &self, + cache: &mut Cache, + input: &Input<'_>, + at: usize, + start_id: StateID, + slots: &mut [Option], + ) -> Option { + cache.stack.push(Frame::Step { sid: start_id, at }); + while let Some(frame) = cache.stack.pop() { + match frame { + Frame::Step { sid, at } => { + if let Some(hm) = self.step(cache, input, sid, at, slots) { + return Some(hm); + } + } + Frame::RestoreCapture { slot, offset } => { + slots[slot] = offset; + } + } + } + None + } + + // LAMENTATION: The actual backtracking search is implemented in about + // 75 lines below. Yet this file is over 2,000 lines long. What have I + // done? + + /// Execute a "step" in the backtracing algorithm. + /// + /// A "step" is somewhat of a misnomer, because this routine keeps going + /// until it either runs out of things to try or fins a match. In the + /// former case, it may have pushed some things on to the backtracking + /// stack, in which case, those will be tried next as part of the + /// 'backtrack' routine above. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn step( + &self, + cache: &mut Cache, + input: &Input<'_>, + mut sid: StateID, + mut at: usize, + slots: &mut [Option], + ) -> Option { + loop { + if !cache.visited.insert(sid, at - input.start()) { + return None; + } + match *self.nfa.state(sid) { + State::ByteRange { ref trans } => { + // Why do we need this? Unlike other regex engines in this + // crate, the backtracker can steam roll ahead in the + // haystack outside of the main loop over the bytes in the + // haystack. While 'trans.matches()' below handles the case + // of 'at' being out of bounds of 'input.haystack()', we + // also need to handle the case of 'at' going out of bounds + // of the span the caller asked to search. + // + // We should perhaps make the 'trans.matches()' API accept + // an '&Input' instead of a '&[u8]'. Or at least, add a new + // API that does it. + if at >= input.end() { + return None; + } + if !trans.matches(input.haystack(), at) { + return None; + } + sid = trans.next; + at += 1; + } + State::Sparse(ref sparse) => { + if at >= input.end() { + return None; + } + sid = sparse.matches(input.haystack(), at)?; + at += 1; + } + State::Dense(ref dense) => { + if at >= input.end() { + return None; + } + sid = dense.matches(input.haystack(), at)?; + at += 1; + } + State::Look { look, next } => { + // OK because we don't permit building a searcher with a + // Unicode word boundary if the requisite Unicode data is + // unavailable. + if !self.nfa.look_matcher().matches_inline( + look, + input.haystack(), + at, + ) { + return None; + } + sid = next; + } + State::Union { ref alternates } => { + sid = match alternates.get(0) { + None => return None, + Some(&sid) => sid, + }; + cache.stack.extend( + alternates[1..] + .iter() + .copied() + .rev() + .map(|sid| Frame::Step { sid, at }), + ); + } + State::BinaryUnion { alt1, alt2 } => { + sid = alt1; + cache.stack.push(Frame::Step { sid: alt2, at }); + } + State::Capture { next, slot, .. } => { + if slot.as_usize() < slots.len() { + cache.stack.push(Frame::RestoreCapture { + slot, + offset: slots[slot], + }); + slots[slot] = NonMaxUsize::new(at); + } + sid = next; + } + State::Fail => return None, + State::Match { pattern_id } => { + return Some(HalfMatch::new(pattern_id, at)); + } + } + } + } +} + +/// An iterator over all non-overlapping matches for a fallible search. +/// +/// The iterator yields a `Result { + re: &'r BoundedBacktracker, + cache: &'c mut Cache, + caps: Captures, + it: iter::Searcher<'h>, +} + +impl<'r, 'c, 'h> Iterator for TryFindMatches<'r, 'c, 'h> { + type Item = Result; + + #[inline] + fn next(&mut self) -> Option> { + // Splitting 'self' apart seems necessary to appease borrowck. + let TryFindMatches { re, ref mut cache, ref mut caps, ref mut it } = + *self; + it.try_advance(|input| { + re.try_search(cache, input, caps)?; + Ok(caps.get_match()) + }) + .transpose() + } +} + +/// An iterator over all non-overlapping leftmost matches, with their capturing +/// groups, for a fallible search. +/// +/// The iterator yields a `Result` value until no more +/// matches could be found. +/// +/// The lifetime parameters are as follows: +/// +/// * `'r` represents the lifetime of the BoundedBacktracker. +/// * `'c` represents the lifetime of the BoundedBacktracker's cache. +/// * `'h` represents the lifetime of the haystack being searched. +/// +/// This iterator can be created with the +/// [`BoundedBacktracker::try_captures_iter`] method. +#[derive(Debug)] +pub struct TryCapturesMatches<'r, 'c, 'h> { + re: &'r BoundedBacktracker, + cache: &'c mut Cache, + caps: Captures, + it: iter::Searcher<'h>, +} + +impl<'r, 'c, 'h> Iterator for TryCapturesMatches<'r, 'c, 'h> { + type Item = Result; + + #[inline] + fn next(&mut self) -> Option> { + // Splitting 'self' apart seems necessary to appease borrowck. + let TryCapturesMatches { re, ref mut cache, ref mut caps, ref mut it } = + *self; + let _ = it + .try_advance(|input| { + re.try_search(cache, input, caps)?; + Ok(caps.get_match()) + }) + .transpose()?; + if caps.is_match() { + Some(Ok(caps.clone())) + } else { + None + } + } +} + +/// A cache represents mutable state that a [`BoundedBacktracker`] requires +/// during a search. +/// +/// For a given [`BoundedBacktracker`], its corresponding cache may be created +/// either via [`BoundedBacktracker::create_cache`], or via [`Cache::new`]. +/// They are equivalent in every way, except the former does not require +/// explicitly importing `Cache`. +/// +/// A particular `Cache` is coupled with the [`BoundedBacktracker`] from which +/// it was created. It may only be used with that `BoundedBacktracker`. A cache +/// and its allocations may be re-purposed via [`Cache::reset`], in which case, +/// it can only be used with the new `BoundedBacktracker` (and not the old +/// one). +#[derive(Clone, Debug)] +pub struct Cache { + /// Stack used on the heap for doing backtracking instead of the + /// traditional recursive approach. We don't want recursion because then + /// we're likely to hit a stack overflow for bigger regexes. + stack: Vec, + /// The set of (StateID, HaystackOffset) pairs that have been visited + /// by the backtracker within a single search. If such a pair has been + /// visited, then we avoid doing the work for that pair again. This is + /// what "bounds" the backtracking and prevents it from having worst case + /// exponential time. + visited: Visited, +} + +impl Cache { + /// Create a new [`BoundedBacktracker`] cache. + /// + /// A potentially more convenient routine to create a cache is + /// [`BoundedBacktracker::create_cache`], as it does not require also + /// importing the `Cache` type. + /// + /// If you want to reuse the returned `Cache` with some other + /// `BoundedBacktracker`, then you must call [`Cache::reset`] with the + /// desired `BoundedBacktracker`. + pub fn new(re: &BoundedBacktracker) -> Cache { + Cache { stack: vec![], visited: Visited::new(re) } + } + + /// Reset this cache such that it can be used for searching with different + /// [`BoundedBacktracker`]. + /// + /// A cache reset permits reusing memory already allocated in this cache + /// with a different `BoundedBacktracker`. + /// + /// # Example + /// + /// This shows how to re-purpose a cache for use with a different + /// `BoundedBacktracker`. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// nfa::thompson::backtrack::BoundedBacktracker, + /// Match, + /// }; + /// + /// let re1 = BoundedBacktracker::new(r"\w")?; + /// let re2 = BoundedBacktracker::new(r"\W")?; + /// + /// let mut cache = re1.create_cache(); + /// assert_eq!( + /// Some(Ok(Match::must(0, 0..2))), + /// re1.try_find_iter(&mut cache, "Δ").next(), + /// ); + /// + /// // Using 'cache' with re2 is not allowed. It may result in panics or + /// // incorrect results. In order to re-purpose the cache, we must reset + /// // it with the BoundedBacktracker we'd like to use it with. + /// // + /// // Similarly, after this reset, using the cache with 're1' is also not + /// // allowed. + /// cache.reset(&re2); + /// assert_eq!( + /// Some(Ok(Match::must(0, 0..3))), + /// re2.try_find_iter(&mut cache, "☃").next(), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn reset(&mut self, re: &BoundedBacktracker) { + self.visited.reset(re); + } + + /// Returns the heap memory usage, in bytes, of this cache. + /// + /// This does **not** include the stack size used up by this cache. To + /// compute that, use `std::mem::size_of::()`. + pub fn memory_usage(&self) -> usize { + self.stack.len() * core::mem::size_of::() + + self.visited.memory_usage() + } + + /// Clears this cache. This should be called at the start of every search + /// to ensure we start with a clean slate. + /// + /// This also sets the length of the capturing groups used in the current + /// search. This permits an optimization where by 'SlotTable::for_state' + /// only returns the number of slots equivalent to the number of slots + /// given in the 'Captures' value. This may be less than the total number + /// of possible slots, e.g., when one only wants to track overall match + /// offsets. This in turn permits less copying of capturing group spans + /// in the BoundedBacktracker. + fn setup_search( + &mut self, + re: &BoundedBacktracker, + input: &Input<'_>, + ) -> Result<(), MatchError> { + self.stack.clear(); + self.visited.setup_search(re, input)?; + Ok(()) + } +} + +/// Represents a stack frame on the heap while doing backtracking. +/// +/// Instead of using explicit recursion for backtracking, we use a stack on +/// the heap to keep track of things that we want to explore if the current +/// backtracking branch turns out to not lead to a match. +#[derive(Clone, Debug)] +enum Frame { + /// Look for a match starting at `sid` and the given position in the + /// haystack. + Step { sid: StateID, at: usize }, + /// Reset the given `slot` to the given `offset` (which might be `None`). + /// This effectively gives a "scope" to capturing groups, such that an + /// offset for a particular group only gets returned if the match goes + /// through that capturing group. If backtracking ends up going down a + /// different branch that results in a different offset (or perhaps none at + /// all), then this "restore capture" frame will cause the offset to get + /// reset. + RestoreCapture { slot: SmallIndex, offset: Option }, +} + +/// A bitset that keeps track of whether a particular (StateID, offset) has +/// been considered during backtracking. If it has already been visited, then +/// backtracking skips it. This is what gives backtracking its "bound." +#[derive(Clone, Debug)] +struct Visited { + /// The actual underlying bitset. Each element in the bitset corresponds + /// to a particular (StateID, offset) pair. States correspond to the rows + /// and the offsets correspond to the columns. + /// + /// If our underlying NFA has N states and the haystack we're searching + /// has M bytes, then we have N*(M+1) entries in our bitset table. The + /// M+1 occurs because our matches are delayed by one byte (to support + /// look-around), and so we need to handle the end position itself rather + /// than stopping just before the end. (If there is no end position, then + /// it's treated as "end-of-input," which is matched by things like '$'.) + /// + /// Given BITS=N*(M+1), we wind up with div_ceil(BITS, sizeof(usize)) + /// blocks. + /// + /// We use 'usize' to represent our blocks because it makes some of the + /// arithmetic in 'insert' a bit nicer. For example, if we used 'u32' for + /// our block, we'd either need to cast u32s to usizes or usizes to u32s. + bitset: Vec, + /// The stride represents one plus length of the haystack we're searching + /// (as described above). The stride must be initialized for each search. + stride: usize, +} + +impl Visited { + /// The size of each block, in bits. + const BLOCK_SIZE: usize = 8 * core::mem::size_of::(); + + /// Create a new visited set for the given backtracker. + /// + /// The set is ready to use, but must be setup at the beginning of each + /// search by calling `setup_search`. + fn new(re: &BoundedBacktracker) -> Visited { + let mut visited = Visited { bitset: vec![], stride: 0 }; + visited.reset(re); + visited + } + + /// Insert the given (StateID, offset) pair into this set. If it already + /// exists, then this is a no-op and it returns false. Otherwise this + /// returns true. + fn insert(&mut self, sid: StateID, at: usize) -> bool { + let table_index = sid.as_usize() * self.stride + at; + let block_index = table_index / Visited::BLOCK_SIZE; + let bit = table_index % Visited::BLOCK_SIZE; + let block_with_bit = 1 << bit; + if self.bitset[block_index] & block_with_bit != 0 { + return false; + } + self.bitset[block_index] |= block_with_bit; + true + } + + /// Reset this visited set to work with the given bounded backtracker. + fn reset(&mut self, _: &BoundedBacktracker) { + self.bitset.truncate(0); + } + + /// Setup this visited set to work for a search using the given NFA + /// and input configuration. The NFA must be the same NFA used by the + /// BoundedBacktracker given to Visited::reset. Failing to call this might + /// result in panics or silently incorrect search behavior. + fn setup_search( + &mut self, + re: &BoundedBacktracker, + input: &Input<'_>, + ) -> Result<(), MatchError> { + // Our haystack length is only the length of the span of the entire + // haystack that we'll be searching. + let haylen = input.get_span().len(); + let err = || MatchError::haystack_too_long(haylen); + // Our stride is one more than the length of the input because our main + // search loop includes the position at input.end(). (And it does this + // because matches are delayed by one byte to account for look-around.) + self.stride = haylen + 1; + let needed_capacity = + match re.get_nfa().states().len().checked_mul(self.stride) { + None => return Err(err()), + Some(capacity) => capacity, + }; + let max_capacity = 8 * re.get_config().get_visited_capacity(); + if needed_capacity > max_capacity { + return Err(err()); + } + let needed_blocks = div_ceil(needed_capacity, Visited::BLOCK_SIZE); + self.bitset.truncate(needed_blocks); + for block in self.bitset.iter_mut() { + *block = 0; + } + if needed_blocks > self.bitset.len() { + self.bitset.resize(needed_blocks, 0); + } + Ok(()) + } + + /// Return the heap memory usage, in bytes, of this visited set. + fn memory_usage(&self) -> usize { + self.bitset.len() * core::mem::size_of::() + } +} + +/// Integer division, but rounds up instead of down. +fn div_ceil(lhs: usize, rhs: usize) -> usize { + if lhs % rhs == 0 { + lhs / rhs + } else { + (lhs / rhs) + 1 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // This is a regression test for the maximum haystack length computation. + // Previously, it assumed that the total capacity of the backtracker's + // bitset would always be greater than the number of NFA states. But there + // is of course no guarantee that this is true. This regression test + // ensures that not only does `max_haystack_len` not panic, but that it + // should return `0`. + #[cfg(feature = "syntax")] + #[test] + fn max_haystack_len_overflow() { + let re = BoundedBacktracker::builder() + .configure(BoundedBacktracker::config().visited_capacity(10)) + .build(r"[0-9A-Za-z]{100}") + .unwrap(); + assert_eq!(0, re.max_haystack_len()); + } +} diff --git a/vendor/regex-automata/src/nfa/thompson/builder.rs b/vendor/regex-automata/src/nfa/thompson/builder.rs new file mode 100644 index 00000000000000..6b69e8784ded8a --- /dev/null +++ b/vendor/regex-automata/src/nfa/thompson/builder.rs @@ -0,0 +1,1337 @@ +use core::mem; + +use alloc::{sync::Arc, vec, vec::Vec}; + +use crate::{ + nfa::thompson::{ + error::BuildError, + nfa::{self, SparseTransitions, Transition, NFA}, + }, + util::{ + look::{Look, LookMatcher}, + primitives::{IteratorIndexExt, PatternID, SmallIndex, StateID}, + }, +}; + +/// An intermediate NFA state used during construction. +/// +/// During construction of an NFA, it is often convenient to work with states +/// that are amenable to mutation and other carry more information than we +/// otherwise need once an NFA has been built. This type represents those +/// needs. +/// +/// Once construction is finished, the builder will convert these states to a +/// [`nfa::thompson::State`](crate::nfa::thompson::State). This conversion not +/// only results in a simpler representation, but in some cases, entire classes +/// of states are completely removed (such as [`State::Empty`]). +#[derive(Clone, Debug, Eq, PartialEq)] +enum State { + /// An empty state whose only purpose is to forward the automaton to + /// another state via an unconditional epsilon transition. + /// + /// Unconditional epsilon transitions are quite useful during the + /// construction of an NFA, as they permit the insertion of no-op + /// placeholders that make it easier to compose NFA sub-graphs. When + /// the Thompson NFA builder produces a final NFA, all unconditional + /// epsilon transitions are removed, and state identifiers are remapped + /// accordingly. + Empty { + /// The next state that this state should transition to. + next: StateID, + }, + /// A state that only transitions to another state if the current input + /// byte is in a particular range of bytes. + ByteRange { trans: Transition }, + /// A state with possibly many transitions, represented in a sparse + /// fashion. Transitions must be ordered lexicographically by input range + /// and be non-overlapping. As such, this may only be used when every + /// transition has equal priority. (In practice, this is only used for + /// encoding large UTF-8 automata.) In contrast, a `Union` state has each + /// alternate in order of priority. Priority is used to implement greedy + /// matching and also alternations themselves, e.g., `abc|a` where `abc` + /// has priority over `a`. + /// + /// To clarify, it is possible to remove `Sparse` and represent all things + /// that `Sparse` is used for via `Union`. But this creates a more bloated + /// NFA with more epsilon transitions than is necessary in the special case + /// of character classes. + Sparse { transitions: Vec }, + /// A conditional epsilon transition satisfied via some sort of + /// look-around. + Look { look: Look, next: StateID }, + /// An empty state that records the start of a capture location. This is an + /// unconditional epsilon transition like `Empty`, except it can be used to + /// record position information for a capture group when using the NFA for + /// search. + CaptureStart { + /// The ID of the pattern that this capture was defined. + pattern_id: PatternID, + /// The capture group index that this capture state corresponds to. + /// The capture group index is always relative to its corresponding + /// pattern. Therefore, in the presence of multiple patterns, both the + /// pattern ID and the capture group index are required to uniquely + /// identify a capturing group. + group_index: SmallIndex, + /// The next state that this state should transition to. + next: StateID, + }, + /// An empty state that records the end of a capture location. This is an + /// unconditional epsilon transition like `Empty`, except it can be used to + /// record position information for a capture group when using the NFA for + /// search. + CaptureEnd { + /// The ID of the pattern that this capture was defined. + pattern_id: PatternID, + /// The capture group index that this capture state corresponds to. + /// The capture group index is always relative to its corresponding + /// pattern. Therefore, in the presence of multiple patterns, both the + /// pattern ID and the capture group index are required to uniquely + /// identify a capturing group. + group_index: SmallIndex, + /// The next state that this state should transition to. + next: StateID, + }, + /// An alternation such that there exists an epsilon transition to all + /// states in `alternates`, where matches found via earlier transitions + /// are preferred over later transitions. + Union { alternates: Vec }, + /// An alternation such that there exists an epsilon transition to all + /// states in `alternates`, where matches found via later transitions are + /// preferred over earlier transitions. + /// + /// This "reverse" state exists for convenience during compilation that + /// permits easy construction of non-greedy combinations of NFA states. At + /// the end of compilation, Union and UnionReverse states are merged into + /// one Union type of state, where the latter has its epsilon transitions + /// reversed to reflect the priority inversion. + /// + /// The "convenience" here arises from the fact that as new states are + /// added to the list of `alternates`, we would like that add operation + /// to be amortized constant time. But if we used a `Union`, we'd need to + /// prepend the state, which takes O(n) time. There are other approaches we + /// could use to solve this, but this seems simple enough. + UnionReverse { alternates: Vec }, + /// A state that cannot be transitioned out of. This is useful for cases + /// where you want to prevent matching from occurring. For example, if your + /// regex parser permits empty character classes, then one could choose a + /// `Fail` state to represent it. + Fail, + /// A match state. There is at most one such occurrence of this state in + /// an NFA for each pattern compiled into the NFA. At time of writing, a + /// match state is always produced for every pattern given, but in theory, + /// if a pattern can never lead to a match, then the match state could be + /// omitted. + /// + /// `pattern_id` refers to the ID of the pattern itself, which corresponds + /// to the pattern's index (starting at 0). + Match { pattern_id: PatternID }, +} + +impl State { + /// If this state is an unconditional epsilon transition, then this returns + /// the target of the transition. + fn goto(&self) -> Option { + match *self { + State::Empty { next } => Some(next), + State::Union { ref alternates } if alternates.len() == 1 => { + Some(alternates[0]) + } + State::UnionReverse { ref alternates } + if alternates.len() == 1 => + { + Some(alternates[0]) + } + _ => None, + } + } + + /// Returns the heap memory usage, in bytes, of this state. + fn memory_usage(&self) -> usize { + match *self { + State::Empty { .. } + | State::ByteRange { .. } + | State::Look { .. } + | State::CaptureStart { .. } + | State::CaptureEnd { .. } + | State::Fail + | State::Match { .. } => 0, + State::Sparse { ref transitions } => { + transitions.len() * mem::size_of::() + } + State::Union { ref alternates } => { + alternates.len() * mem::size_of::() + } + State::UnionReverse { ref alternates } => { + alternates.len() * mem::size_of::() + } + } + } +} + +/// An abstraction for building Thompson NFAs by hand. +/// +/// A builder is what a [`thompson::Compiler`](crate::nfa::thompson::Compiler) +/// uses internally to translate a regex's high-level intermediate +/// representation into an [`NFA`]. +/// +/// The primary function of this builder is to abstract away the internal +/// representation of an NFA and make it difficult to produce NFAs are that +/// internally invalid or inconsistent. This builder also provides a way to +/// add "empty" states (which can be thought of as unconditional epsilon +/// transitions), despite the fact that [`thompson::State`](nfa::State) does +/// not have any "empty" representation. The advantage of "empty" states is +/// that they make the code for constructing a Thompson NFA logically simpler. +/// +/// Many of the routines on this builder may panic or return errors. Generally +/// speaking, panics occur when an invalid sequence of method calls were made, +/// where as an error occurs if things get too big. (Where "too big" might mean +/// exhausting identifier space or using up too much heap memory in accordance +/// with the configured [`size_limit`](Builder::set_size_limit).) +/// +/// # Overview +/// +/// ## Adding multiple patterns +/// +/// Each pattern you add to an NFA should correspond to a pair of +/// [`Builder::start_pattern`] and [`Builder::finish_pattern`] calls, with +/// calls inbetween that add NFA states for that pattern. NFA states may be +/// added without first calling `start_pattern`, with the exception of adding +/// capturing states. +/// +/// ## Adding NFA states +/// +/// Here is a very brief overview of each of the methods that add NFA states. +/// Every method adds a single state. +/// +/// * [`add_empty`](Builder::add_empty): Add a state with a single +/// unconditional epsilon transition to another state. +/// * [`add_union`](Builder::add_union): Adds a state with unconditional +/// epsilon transitions to two or more states, with earlier transitions +/// preferred over later ones. +/// * [`add_union_reverse`](Builder::add_union_reverse): Adds a state with +/// unconditional epsilon transitions to two or more states, with later +/// transitions preferred over earlier ones. +/// * [`add_range`](Builder::add_range): Adds a state with a single transition +/// to another state that can only be followed if the current input byte is +/// within the range given. +/// * [`add_sparse`](Builder::add_sparse): Adds a state with two or more +/// range transitions to other states, where a transition is only followed +/// if the current input byte is within one of the ranges. All transitions +/// in this state have equal priority, and the corresponding ranges must be +/// non-overlapping. +/// * [`add_look`](Builder::add_look): Adds a state with a single *conditional* +/// epsilon transition to another state, where the condition depends on a +/// limited look-around property. +/// * [`add_capture_start`](Builder::add_capture_start): Adds a state with +/// a single unconditional epsilon transition that also instructs an NFA +/// simulation to record the current input position to a specific location in +/// memory. This is intended to represent the starting location of a capturing +/// group. +/// * [`add_capture_end`](Builder::add_capture_end): Adds a state with +/// a single unconditional epsilon transition that also instructs an NFA +/// simulation to record the current input position to a specific location in +/// memory. This is intended to represent the ending location of a capturing +/// group. +/// * [`add_fail`](Builder::add_fail): Adds a state that never transitions to +/// another state. +/// * [`add_match`](Builder::add_match): Add a state that indicates a match has +/// been found for a particular pattern. A match state is a final state with +/// no outgoing transitions. +/// +/// ## Setting transitions between NFA states +/// +/// The [`Builder::patch`] method creates a transition from one state to the +/// next. If the `from` state corresponds to a state that supports multiple +/// outgoing transitions (such as "union"), then this adds the corresponding +/// transition. Otherwise, it sets the single transition. (This routine panics +/// if `from` corresponds to a state added by `add_sparse`, since sparse states +/// need more specialized handling.) +/// +/// # Example +/// +/// This annotated example shows how to hand construct the regex `[a-z]+` +/// (without an unanchored prefix). +/// +/// ``` +/// use regex_automata::{ +/// nfa::thompson::{pikevm::PikeVM, Builder, Transition}, +/// util::primitives::StateID, +/// Match, +/// }; +/// +/// let mut builder = Builder::new(); +/// // Before adding NFA states for our pattern, we need to tell the builder +/// // that we are starting the pattern. +/// builder.start_pattern()?; +/// // Since we use the Pike VM below for searching, we need to add capturing +/// // states. If you're just going to build a DFA from the NFA, then capturing +/// // states do not need to be added. +/// let start = builder.add_capture_start(StateID::ZERO, 0, None)?; +/// let range = builder.add_range(Transition { +/// // We don't know the state ID of the 'next' state yet, so we just fill +/// // in a dummy 'ZERO' value. +/// start: b'a', end: b'z', next: StateID::ZERO, +/// })?; +/// // This state will point back to 'range', but also enable us to move ahead. +/// // That is, this implements the '+' repetition operator. We add 'range' and +/// // then 'end' below to this alternation. +/// let alt = builder.add_union(vec![])?; +/// // The final state before the match state, which serves to capture the +/// // end location of the match. +/// let end = builder.add_capture_end(StateID::ZERO, 0)?; +/// // The match state for our pattern. +/// let mat = builder.add_match()?; +/// // Now we fill in the transitions between states. +/// builder.patch(start, range)?; +/// builder.patch(range, alt)?; +/// // If we added 'end' before 'range', then we'd implement non-greedy +/// // matching, i.e., '+?'. +/// builder.patch(alt, range)?; +/// builder.patch(alt, end)?; +/// builder.patch(end, mat)?; +/// // We must explicitly finish pattern and provide the starting state ID for +/// // this particular pattern. +/// builder.finish_pattern(start)?; +/// // Finally, when we build the NFA, we provide the anchored and unanchored +/// // starting state IDs. Since we didn't bother with an unanchored prefix +/// // here, we only support anchored searching. Thus, both starting states are +/// // the same. +/// let nfa = builder.build(start, start)?; +/// +/// // Now build a Pike VM from our NFA, and use it for searching. This shows +/// // how we can use a regex engine without ever worrying about syntax! +/// let re = PikeVM::new_from_nfa(nfa)?; +/// let mut cache = re.create_cache(); +/// let mut caps = re.create_captures(); +/// let expected = Some(Match::must(0, 0..3)); +/// re.captures(&mut cache, "foo0", &mut caps); +/// assert_eq!(expected, caps.get_match()); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug, Default)] +pub struct Builder { + /// The ID of the pattern that we're currently building. + /// + /// Callers are required to set (and unset) this by calling + /// {start,finish}_pattern. Otherwise, most methods will panic. + pattern_id: Option, + /// A sequence of intermediate NFA states. Once a state is added to this + /// sequence, it is assigned a state ID equivalent to its index. Once a + /// state is added, it is still expected to be mutated, e.g., to set its + /// transition to a state that didn't exist at the time it was added. + states: Vec, + /// The starting states for each individual pattern. Starting at any + /// of these states will result in only an anchored search for the + /// corresponding pattern. The vec is indexed by pattern ID. When the NFA + /// contains a single regex, then `start_pattern[0]` and `start_anchored` + /// are always equivalent. + start_pattern: Vec, + /// A map from pattern ID to capture group index to name. (If no name + /// exists, then a None entry is present. Thus, all capturing groups are + /// present in this mapping.) + /// + /// The outer vec is indexed by pattern ID, while the inner vec is indexed + /// by capture index offset for the corresponding pattern. + /// + /// The first capture group for each pattern is always unnamed and is thus + /// always None. + captures: Vec>>>, + /// The combined memory used by each of the 'State's in 'states'. This + /// only includes heap usage by each state, and not the size of the state + /// itself. In other words, this tracks heap memory used that isn't + /// captured via `size_of::() * states.len()`. + memory_states: usize, + /// Whether this NFA only matches UTF-8 and whether regex engines using + /// this NFA for searching should report empty matches that split a + /// codepoint. + utf8: bool, + /// Whether this NFA should be matched in reverse or not. + reverse: bool, + /// The matcher to use for look-around assertions. + look_matcher: LookMatcher, + /// A size limit to respect when building an NFA. If the total heap memory + /// of the intermediate NFA states exceeds (or would exceed) this amount, + /// then an error is returned. + size_limit: Option, +} + +impl Builder { + /// Create a new builder for hand-assembling NFAs. + pub fn new() -> Builder { + Builder::default() + } + + /// Clear this builder. + /// + /// Clearing removes all state associated with building an NFA, but does + /// not reset configuration (such as size limits and whether the NFA + /// should only match UTF-8). After clearing, the builder can be reused to + /// assemble an entirely new NFA. + pub fn clear(&mut self) { + self.pattern_id = None; + self.states.clear(); + self.start_pattern.clear(); + self.captures.clear(); + self.memory_states = 0; + } + + /// Assemble a [`NFA`] from the states added so far. + /// + /// After building an NFA, more states may be added and `build` may be + /// called again. To reuse a builder to produce an entirely new NFA from + /// scratch, call the [`clear`](Builder::clear) method first. + /// + /// `start_anchored` refers to the ID of the starting state that anchored + /// searches should use. That is, searches who matches are limited to the + /// starting position of the search. + /// + /// `start_unanchored` refers to the ID of the starting state that + /// unanchored searches should use. This permits searches to report matches + /// that start after the beginning of the search. In cases where unanchored + /// searches are not supported, the unanchored starting state ID must be + /// the same as the anchored starting state ID. + /// + /// # Errors + /// + /// This returns an error if there was a problem producing the final NFA. + /// In particular, this might include an error if the capturing groups + /// added to this builder violate any of the invariants documented on + /// [`GroupInfo`](crate::util::captures::GroupInfo). + /// + /// # Panics + /// + /// If `start_pattern` was called, then `finish_pattern` must be called + /// before `build`, otherwise this panics. + /// + /// This may panic for other invalid uses of a builder. For example, if + /// a "start capture" state was added without a corresponding "end capture" + /// state. + pub fn build( + &self, + start_anchored: StateID, + start_unanchored: StateID, + ) -> Result { + assert!(self.pattern_id.is_none(), "must call 'finish_pattern' first"); + debug!( + "intermediate NFA compilation via builder is complete, \ + intermediate NFA size: {} states, {} bytes on heap", + self.states.len(), + self.memory_usage(), + ); + + let mut nfa = nfa::Inner::default(); + nfa.set_utf8(self.utf8); + nfa.set_reverse(self.reverse); + nfa.set_look_matcher(self.look_matcher.clone()); + // A set of compiler internal state IDs that correspond to states + // that are exclusively epsilon transitions, i.e., goto instructions, + // combined with the state that they point to. This is used to + // record said states while transforming the compiler's internal NFA + // representation to the external form. + let mut empties = vec![]; + // A map used to re-map state IDs when translating this builder's + // internal NFA state representation to the final NFA representation. + let mut remap = vec![]; + remap.resize(self.states.len(), StateID::ZERO); + + nfa.set_starts(start_anchored, start_unanchored, &self.start_pattern); + nfa.set_captures(&self.captures).map_err(BuildError::captures)?; + // The idea here is to convert our intermediate states to their final + // form. The only real complexity here is the process of converting + // transitions, which are expressed in terms of state IDs. The new + // set of states will be smaller because of partial epsilon removal, + // so the state IDs will not be the same. + for (sid, state) in self.states.iter().with_state_ids() { + match *state { + State::Empty { next } => { + // Since we're removing empty states, we need to handle + // them later since we don't yet know which new state this + // empty state will be mapped to. + empties.push((sid, next)); + } + State::ByteRange { trans } => { + remap[sid] = nfa.add(nfa::State::ByteRange { trans }); + } + State::Sparse { ref transitions } => { + remap[sid] = match transitions.len() { + 0 => nfa.add(nfa::State::Fail), + 1 => nfa.add(nfa::State::ByteRange { + trans: transitions[0], + }), + _ => { + let transitions = + transitions.to_vec().into_boxed_slice(); + let sparse = SparseTransitions { transitions }; + nfa.add(nfa::State::Sparse(sparse)) + } + } + } + State::Look { look, next } => { + remap[sid] = nfa.add(nfa::State::Look { look, next }); + } + State::CaptureStart { pattern_id, group_index, next } => { + // We can't remove this empty state because of the side + // effect of capturing an offset for this capture slot. + let slot = nfa + .group_info() + .slot(pattern_id, group_index.as_usize()) + .expect("invalid capture index"); + let slot = + SmallIndex::new(slot).expect("a small enough slot"); + remap[sid] = nfa.add(nfa::State::Capture { + next, + pattern_id, + group_index, + slot, + }); + } + State::CaptureEnd { pattern_id, group_index, next } => { + // We can't remove this empty state because of the side + // effect of capturing an offset for this capture slot. + // Also, this always succeeds because we check that all + // slot indices are valid for all capture indices when they + // are initially added. + let slot = nfa + .group_info() + .slot(pattern_id, group_index.as_usize()) + .expect("invalid capture index") + .checked_add(1) + .unwrap(); + let slot = + SmallIndex::new(slot).expect("a small enough slot"); + remap[sid] = nfa.add(nfa::State::Capture { + next, + pattern_id, + group_index, + slot, + }); + } + State::Union { ref alternates } => { + if alternates.is_empty() { + remap[sid] = nfa.add(nfa::State::Fail); + } else if alternates.len() == 1 { + empties.push((sid, alternates[0])); + remap[sid] = alternates[0]; + } else if alternates.len() == 2 { + remap[sid] = nfa.add(nfa::State::BinaryUnion { + alt1: alternates[0], + alt2: alternates[1], + }); + } else { + let alternates = + alternates.to_vec().into_boxed_slice(); + remap[sid] = nfa.add(nfa::State::Union { alternates }); + } + } + State::UnionReverse { ref alternates } => { + if alternates.is_empty() { + remap[sid] = nfa.add(nfa::State::Fail); + } else if alternates.len() == 1 { + empties.push((sid, alternates[0])); + remap[sid] = alternates[0]; + } else if alternates.len() == 2 { + remap[sid] = nfa.add(nfa::State::BinaryUnion { + alt1: alternates[1], + alt2: alternates[0], + }); + } else { + let mut alternates = + alternates.to_vec().into_boxed_slice(); + alternates.reverse(); + remap[sid] = nfa.add(nfa::State::Union { alternates }); + } + } + State::Fail => { + remap[sid] = nfa.add(nfa::State::Fail); + } + State::Match { pattern_id } => { + remap[sid] = nfa.add(nfa::State::Match { pattern_id }); + } + } + } + // Some of the new states still point to empty state IDs, so we need to + // follow each of them and remap the empty state IDs to their non-empty + // state IDs. + // + // We also keep track of which states we've already mapped. This helps + // avoid quadratic behavior in a long chain of empty states. For + // example, in 'a{0}{50000}'. + let mut remapped = vec![false; self.states.len()]; + for &(empty_id, empty_next) in empties.iter() { + if remapped[empty_id] { + continue; + } + // empty states can point to other empty states, forming a chain. + // So we must follow the chain until the end, which must end at + // a non-empty state, and therefore, a state that is correctly + // remapped. We are guaranteed to terminate because our compiler + // never builds a loop among only empty states. + let mut new_next = empty_next; + while let Some(next) = self.states[new_next].goto() { + new_next = next; + } + remap[empty_id] = remap[new_next]; + remapped[empty_id] = true; + + // Now that we've remapped the main 'empty_id' above, we re-follow + // the chain from above and remap every empty state we found along + // the way to our ultimate non-empty target. We are careful to set + // 'remapped' to true for each such state. We thus will not need + // to re-compute this chain for any subsequent empty states in + // 'empties' that are part of this chain. + let mut next2 = empty_next; + while let Some(next) = self.states[next2].goto() { + remap[next2] = remap[new_next]; + remapped[next2] = true; + next2 = next; + } + } + // Finally remap all of the state IDs. + nfa.remap(&remap); + let final_nfa = nfa.into_nfa(); + debug!( + "NFA compilation via builder complete, \ + final NFA size: {} states, {} bytes on heap, \ + has empty? {:?}, utf8? {:?}", + final_nfa.states().len(), + final_nfa.memory_usage(), + final_nfa.has_empty(), + final_nfa.is_utf8(), + ); + Ok(final_nfa) + } + + /// Start the assembly of a pattern in this NFA. + /// + /// Upon success, this returns the identifier for the new pattern. + /// Identifiers start at `0` and are incremented by 1 for each new pattern. + /// + /// It is necessary to call this routine before adding capturing states. + /// Otherwise, any other NFA state may be added before starting a pattern. + /// + /// # Errors + /// + /// If the pattern identifier space is exhausted, then this returns an + /// error. + /// + /// # Panics + /// + /// If this is called while assembling another pattern (i.e., before + /// `finish_pattern` is called), then this panics. + pub fn start_pattern(&mut self) -> Result { + assert!(self.pattern_id.is_none(), "must call 'finish_pattern' first"); + + let proposed = self.start_pattern.len(); + let pid = PatternID::new(proposed) + .map_err(|_| BuildError::too_many_patterns(proposed))?; + self.pattern_id = Some(pid); + // This gets filled in when 'finish_pattern' is called. + self.start_pattern.push(StateID::ZERO); + Ok(pid) + } + + /// Finish the assembly of a pattern in this NFA. + /// + /// Upon success, this returns the identifier for the new pattern. + /// Identifiers start at `0` and are incremented by 1 for each new + /// pattern. This is the same identifier returned by the corresponding + /// `start_pattern` call. + /// + /// Note that `start_pattern` and `finish_pattern` pairs cannot be + /// interleaved or nested. A correct `finish_pattern` call _always_ + /// corresponds to the most recently called `start_pattern` routine. + /// + /// # Errors + /// + /// This currently never returns an error, but this is subject to change. + /// + /// # Panics + /// + /// If this is called without a corresponding `start_pattern` call, then + /// this panics. + pub fn finish_pattern( + &mut self, + start_id: StateID, + ) -> Result { + let pid = self.current_pattern_id(); + self.start_pattern[pid] = start_id; + self.pattern_id = None; + Ok(pid) + } + + /// Returns the pattern identifier of the current pattern. + /// + /// # Panics + /// + /// If this doesn't occur after a `start_pattern` call and before the + /// corresponding `finish_pattern` call, then this panics. + pub fn current_pattern_id(&self) -> PatternID { + self.pattern_id.expect("must call 'start_pattern' first") + } + + /// Returns the number of patterns added to this builder so far. + /// + /// This only includes patterns that have had `finish_pattern` called + /// for them. + pub fn pattern_len(&self) -> usize { + self.start_pattern.len() + } + + /// Add an "empty" NFA state. + /// + /// An "empty" NFA state is a state with a single unconditional epsilon + /// transition to another NFA state. Such empty states are removed before + /// building the final [`NFA`] (which has no such "empty" states), but they + /// can be quite useful in the construction process of an NFA. + /// + /// # Errors + /// + /// This returns an error if the state identifier space is exhausted, or if + /// the configured heap size limit has been exceeded. + pub fn add_empty(&mut self) -> Result { + self.add(State::Empty { next: StateID::ZERO }) + } + + /// Add a "union" NFA state. + /// + /// A "union" NFA state that contains zero or more unconditional epsilon + /// transitions to other NFA states. The order of these transitions + /// reflects a priority order where earlier transitions are preferred over + /// later transitions. + /// + /// Callers may provide an empty set of alternates to this method call, and + /// then later add transitions via `patch`. At final build time, a "union" + /// state with no alternates is converted to a "fail" state, and a "union" + /// state with exactly one alternate is treated as if it were an "empty" + /// state. + /// + /// # Errors + /// + /// This returns an error if the state identifier space is exhausted, or if + /// the configured heap size limit has been exceeded. + pub fn add_union( + &mut self, + alternates: Vec, + ) -> Result { + self.add(State::Union { alternates }) + } + + /// Add a "reverse union" NFA state. + /// + /// A "reverse union" NFA state contains zero or more unconditional epsilon + /// transitions to other NFA states. The order of these transitions + /// reflects a priority order where later transitions are preferred + /// over earlier transitions. This is an inverted priority order when + /// compared to `add_union`. This is useful, for example, for implementing + /// non-greedy repetition operators. + /// + /// Callers may provide an empty set of alternates to this method call, and + /// then later add transitions via `patch`. At final build time, a "reverse + /// union" state with no alternates is converted to a "fail" state, and a + /// "reverse union" state with exactly one alternate is treated as if it + /// were an "empty" state. + /// + /// # Errors + /// + /// This returns an error if the state identifier space is exhausted, or if + /// the configured heap size limit has been exceeded. + pub fn add_union_reverse( + &mut self, + alternates: Vec, + ) -> Result { + self.add(State::UnionReverse { alternates }) + } + + /// Add a "range" NFA state. + /// + /// A "range" NFA state is a state with one outgoing transition to another + /// state, where that transition may only be followed if the current input + /// byte falls between a range of bytes given. + /// + /// # Errors + /// + /// This returns an error if the state identifier space is exhausted, or if + /// the configured heap size limit has been exceeded. + pub fn add_range( + &mut self, + trans: Transition, + ) -> Result { + self.add(State::ByteRange { trans }) + } + + /// Add a "sparse" NFA state. + /// + /// A "sparse" NFA state contains zero or more outgoing transitions, where + /// the transition to be followed (if any) is chosen based on whether the + /// current input byte falls in the range of one such transition. The + /// transitions given *must* be non-overlapping and in ascending order. (A + /// "sparse" state with no transitions is equivalent to a "fail" state.) + /// + /// A "sparse" state is like adding a "union" state and pointing it at a + /// bunch of "range" states, except that the different alternates have + /// equal priority. + /// + /// Note that a "sparse" state is the only state that cannot be patched. + /// This is because a "sparse" state has many transitions, each of which + /// may point to a different NFA state. Moreover, adding more such + /// transitions requires more than just an NFA state ID to point to. It + /// also requires a byte range. The `patch` routine does not support the + /// additional information required. Therefore, callers must ensure that + /// all outgoing transitions for this state are included when `add_sparse` + /// is called. There is no way to add more later. + /// + /// # Errors + /// + /// This returns an error if the state identifier space is exhausted, or if + /// the configured heap size limit has been exceeded. + /// + /// # Panics + /// + /// This routine _may_ panic if the transitions given overlap or are not + /// in ascending order. + pub fn add_sparse( + &mut self, + transitions: Vec, + ) -> Result { + self.add(State::Sparse { transitions }) + } + + /// Add a "look" NFA state. + /// + /// A "look" NFA state corresponds to a state with exactly one + /// *conditional* epsilon transition to another NFA state. Namely, it + /// represents one of a small set of simplistic look-around operators. + /// + /// Callers may provide a "dummy" state ID (typically [`StateID::ZERO`]), + /// and then change it later with [`patch`](Builder::patch). + /// + /// # Errors + /// + /// This returns an error if the state identifier space is exhausted, or if + /// the configured heap size limit has been exceeded. + pub fn add_look( + &mut self, + next: StateID, + look: Look, + ) -> Result { + self.add(State::Look { look, next }) + } + + /// Add a "start capture" NFA state. + /// + /// A "start capture" NFA state corresponds to a state with exactly one + /// outgoing unconditional epsilon transition to another state. Unlike + /// "empty" states, a "start capture" state also carries with it an + /// instruction for saving the current position of input to a particular + /// location in memory. NFA simulations, like the Pike VM, may use this + /// information to report the match locations of capturing groups in a + /// regex pattern. + /// + /// If the corresponding capturing group has a name, then callers should + /// include it here. + /// + /// Callers may provide a "dummy" state ID (typically [`StateID::ZERO`]), + /// and then change it later with [`patch`](Builder::patch). + /// + /// Note that unlike `start_pattern`/`finish_pattern`, capturing start and + /// end states may be interleaved. Indeed, it is typical for many "start + /// capture" NFA states to appear before the first "end capture" state. + /// + /// # Errors + /// + /// This returns an error if the state identifier space is exhausted, or if + /// the configured heap size limit has been exceeded or if the given + /// capture index overflows `usize`. + /// + /// While the above are the only conditions in which this routine can + /// currently return an error, it is possible to call this method with an + /// inputs that results in the final `build()` step failing to produce an + /// NFA. For example, if one adds two distinct capturing groups with the + /// same name, then that will result in `build()` failing with an error. + /// + /// See the [`GroupInfo`](crate::util::captures::GroupInfo) type for + /// more information on what qualifies as valid capturing groups. + /// + /// # Example + /// + /// This example shows that an error occurs when one tries to add multiple + /// capturing groups with the same name to the same pattern. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::Builder, + /// util::primitives::StateID, + /// }; + /// + /// let name = Some(std::sync::Arc::from("foo")); + /// let mut builder = Builder::new(); + /// builder.start_pattern()?; + /// // 0th capture group should always be unnamed. + /// let start = builder.add_capture_start(StateID::ZERO, 0, None)?; + /// // OK + /// builder.add_capture_start(StateID::ZERO, 1, name.clone())?; + /// // This is not OK, but 'add_capture_start' still succeeds. We don't + /// // get an error until we call 'build' below. Without this call, the + /// // call to 'build' below would succeed. + /// builder.add_capture_start(StateID::ZERO, 2, name.clone())?; + /// // Finish our pattern so we can try to build the NFA. + /// builder.finish_pattern(start)?; + /// let result = builder.build(start, start); + /// assert!(result.is_err()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// However, adding multiple capturing groups with the same name to + /// distinct patterns is okay: + /// + /// ``` + /// use std::sync::Arc; + /// + /// use regex_automata::{ + /// nfa::thompson::{pikevm::PikeVM, Builder, Transition}, + /// util::{ + /// captures::Captures, + /// primitives::{PatternID, StateID}, + /// }, + /// Span, + /// }; + /// + /// // Hand-compile the patterns '(?P[a-z])' and '(?P[A-Z])'. + /// let mut builder = Builder::new(); + /// // We compile them to support an unanchored search, which requires + /// // adding an implicit '(?s-u:.)*?' prefix before adding either pattern. + /// let unanchored_prefix = builder.add_union_reverse(vec![])?; + /// let any = builder.add_range(Transition { + /// start: b'\x00', end: b'\xFF', next: StateID::ZERO, + /// })?; + /// builder.patch(unanchored_prefix, any)?; + /// builder.patch(any, unanchored_prefix)?; + /// + /// // Compile an alternation that permits matching multiple patterns. + /// let alt = builder.add_union(vec![])?; + /// builder.patch(unanchored_prefix, alt)?; + /// + /// // Compile '(?P[a-z]+)'. + /// builder.start_pattern()?; + /// let start0 = builder.add_capture_start(StateID::ZERO, 0, None)?; + /// // N.B. 0th capture group must always be unnamed. + /// let foo_start0 = builder.add_capture_start( + /// StateID::ZERO, 1, Some(Arc::from("foo")), + /// )?; + /// let lowercase = builder.add_range(Transition { + /// start: b'a', end: b'z', next: StateID::ZERO, + /// })?; + /// let foo_end0 = builder.add_capture_end(StateID::ZERO, 1)?; + /// let end0 = builder.add_capture_end(StateID::ZERO, 0)?; + /// let match0 = builder.add_match()?; + /// builder.patch(start0, foo_start0)?; + /// builder.patch(foo_start0, lowercase)?; + /// builder.patch(lowercase, foo_end0)?; + /// builder.patch(foo_end0, end0)?; + /// builder.patch(end0, match0)?; + /// builder.finish_pattern(start0)?; + /// + /// // Compile '(?P[A-Z]+)'. + /// builder.start_pattern()?; + /// let start1 = builder.add_capture_start(StateID::ZERO, 0, None)?; + /// // N.B. 0th capture group must always be unnamed. + /// let foo_start1 = builder.add_capture_start( + /// StateID::ZERO, 1, Some(Arc::from("foo")), + /// )?; + /// let uppercase = builder.add_range(Transition { + /// start: b'A', end: b'Z', next: StateID::ZERO, + /// })?; + /// let foo_end1 = builder.add_capture_end(StateID::ZERO, 1)?; + /// let end1 = builder.add_capture_end(StateID::ZERO, 0)?; + /// let match1 = builder.add_match()?; + /// builder.patch(start1, foo_start1)?; + /// builder.patch(foo_start1, uppercase)?; + /// builder.patch(uppercase, foo_end1)?; + /// builder.patch(foo_end1, end1)?; + /// builder.patch(end1, match1)?; + /// builder.finish_pattern(start1)?; + /// + /// // Now add the patterns to our alternation that we started above. + /// builder.patch(alt, start0)?; + /// builder.patch(alt, start1)?; + /// + /// // Finally build the NFA. The first argument is the anchored starting + /// // state (the pattern alternation) where as the second is the + /// // unanchored starting state (the unanchored prefix). + /// let nfa = builder.build(alt, unanchored_prefix)?; + /// + /// // Now build a Pike VM from our NFA and access the 'foo' capture + /// // group regardless of which pattern matched, since it is defined + /// // for both patterns. + /// let vm = PikeVM::new_from_nfa(nfa)?; + /// let mut cache = vm.create_cache(); + /// let caps: Vec = + /// vm.captures_iter(&mut cache, "0123aAaAA").collect(); + /// assert_eq!(5, caps.len()); + /// + /// assert_eq!(Some(PatternID::must(0)), caps[0].pattern()); + /// assert_eq!(Some(Span::from(4..5)), caps[0].get_group_by_name("foo")); + /// + /// assert_eq!(Some(PatternID::must(1)), caps[1].pattern()); + /// assert_eq!(Some(Span::from(5..6)), caps[1].get_group_by_name("foo")); + /// + /// assert_eq!(Some(PatternID::must(0)), caps[2].pattern()); + /// assert_eq!(Some(Span::from(6..7)), caps[2].get_group_by_name("foo")); + /// + /// assert_eq!(Some(PatternID::must(1)), caps[3].pattern()); + /// assert_eq!(Some(Span::from(7..8)), caps[3].get_group_by_name("foo")); + /// + /// assert_eq!(Some(PatternID::must(1)), caps[4].pattern()); + /// assert_eq!(Some(Span::from(8..9)), caps[4].get_group_by_name("foo")); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn add_capture_start( + &mut self, + next: StateID, + group_index: u32, + name: Option>, + ) -> Result { + let pid = self.current_pattern_id(); + let group_index = match SmallIndex::try_from(group_index) { + Err(_) => { + return Err(BuildError::invalid_capture_index(group_index)) + } + Ok(group_index) => group_index, + }; + // Make sure we have space to insert our (pid,index)|-->name mapping. + if pid.as_usize() >= self.captures.len() { + for _ in 0..=(pid.as_usize() - self.captures.len()) { + self.captures.push(vec![]); + } + } + // In the case where 'group_index < self.captures[pid].len()', it means + // that we are adding a duplicate capture group. This is somewhat + // weird, but permissible because the capture group itself can be + // repeated in the syntax. For example, '([a-z]){4}' will produce 4 + // capture groups. In practice, only the last will be set at search + // time when a match occurs. For duplicates, we don't need to push + // anything other than a CaptureStart NFA state. + if group_index.as_usize() >= self.captures[pid].len() { + // For discontiguous indices, push placeholders for earlier capture + // groups that weren't explicitly added. + for _ in 0..(group_index.as_usize() - self.captures[pid].len()) { + self.captures[pid].push(None); + } + self.captures[pid].push(name); + } + self.add(State::CaptureStart { pattern_id: pid, group_index, next }) + } + + /// Add a "end capture" NFA state. + /// + /// A "end capture" NFA state corresponds to a state with exactly one + /// outgoing unconditional epsilon transition to another state. Unlike + /// "empty" states, a "end capture" state also carries with it an + /// instruction for saving the current position of input to a particular + /// location in memory. NFA simulations, like the Pike VM, may use this + /// information to report the match locations of capturing groups in a + /// + /// Callers may provide a "dummy" state ID (typically [`StateID::ZERO`]), + /// and then change it later with [`patch`](Builder::patch). + /// + /// Note that unlike `start_pattern`/`finish_pattern`, capturing start and + /// end states may be interleaved. Indeed, it is typical for many "start + /// capture" NFA states to appear before the first "end capture" state. + /// + /// # Errors + /// + /// This returns an error if the state identifier space is exhausted, or if + /// the configured heap size limit has been exceeded or if the given + /// capture index overflows `usize`. + /// + /// While the above are the only conditions in which this routine can + /// currently return an error, it is possible to call this method with an + /// inputs that results in the final `build()` step failing to produce an + /// NFA. For example, if one adds two distinct capturing groups with the + /// same name, then that will result in `build()` failing with an error. + /// + /// See the [`GroupInfo`](crate::util::captures::GroupInfo) type for + /// more information on what qualifies as valid capturing groups. + pub fn add_capture_end( + &mut self, + next: StateID, + group_index: u32, + ) -> Result { + let pid = self.current_pattern_id(); + let group_index = match SmallIndex::try_from(group_index) { + Err(_) => { + return Err(BuildError::invalid_capture_index(group_index)) + } + Ok(group_index) => group_index, + }; + self.add(State::CaptureEnd { pattern_id: pid, group_index, next }) + } + + /// Adds a "fail" NFA state. + /// + /// A "fail" state is simply a state that has no outgoing transitions. It + /// acts as a way to cause a search to stop without reporting a match. + /// For example, one way to represent an NFA with zero patterns is with a + /// single "fail" state. + /// + /// # Errors + /// + /// This returns an error if the state identifier space is exhausted, or if + /// the configured heap size limit has been exceeded. + pub fn add_fail(&mut self) -> Result { + self.add(State::Fail) + } + + /// Adds a "match" NFA state. + /// + /// A "match" state has no outgoing transitions (just like a "fail" + /// state), but it has special significance in that if a search enters + /// this state, then a match has been found. The match state that is added + /// automatically has the current pattern ID associated with it. This is + /// used to report the matching pattern ID at search time. + /// + /// # Errors + /// + /// This returns an error if the state identifier space is exhausted, or if + /// the configured heap size limit has been exceeded. + /// + /// # Panics + /// + /// This must be called after a `start_pattern` call but before the + /// corresponding `finish_pattern` call. Otherwise, it panics. + pub fn add_match(&mut self) -> Result { + let pattern_id = self.current_pattern_id(); + let sid = self.add(State::Match { pattern_id })?; + Ok(sid) + } + + /// The common implementation of "add a state." It handles the common + /// error cases of state ID exhausting (by owning state ID allocation) and + /// whether the size limit has been exceeded. + fn add(&mut self, state: State) -> Result { + let id = StateID::new(self.states.len()) + .map_err(|_| BuildError::too_many_states(self.states.len()))?; + self.memory_states += state.memory_usage(); + self.states.push(state); + self.check_size_limit()?; + Ok(id) + } + + /// Add a transition from one state to another. + /// + /// This routine is called "patch" since it is very common to add the + /// states you want, typically with "dummy" state ID transitions, and then + /// "patch" in the real state IDs later. This is because you don't always + /// know all of the necessary state IDs to add because they might not + /// exist yet. + /// + /// # Errors + /// + /// This may error if patching leads to an increase in heap usage beyond + /// the configured size limit. Heap usage only grows when patching adds a + /// new transition (as in the case of a "union" state). + /// + /// # Panics + /// + /// This panics if `from` corresponds to a "sparse" state. When "sparse" + /// states are added, there is no way to patch them after-the-fact. (If you + /// have a use case where this would be helpful, please file an issue. It + /// will likely require a new API.) + pub fn patch( + &mut self, + from: StateID, + to: StateID, + ) -> Result<(), BuildError> { + let old_memory_states = self.memory_states; + match self.states[from] { + State::Empty { ref mut next } => { + *next = to; + } + State::ByteRange { ref mut trans } => { + trans.next = to; + } + State::Sparse { .. } => { + panic!("cannot patch from a sparse NFA state") + } + State::Look { ref mut next, .. } => { + *next = to; + } + State::Union { ref mut alternates } => { + alternates.push(to); + self.memory_states += mem::size_of::(); + } + State::UnionReverse { ref mut alternates } => { + alternates.push(to); + self.memory_states += mem::size_of::(); + } + State::CaptureStart { ref mut next, .. } => { + *next = to; + } + State::CaptureEnd { ref mut next, .. } => { + *next = to; + } + State::Fail => {} + State::Match { .. } => {} + } + if old_memory_states != self.memory_states { + self.check_size_limit()?; + } + Ok(()) + } + + /// Set whether the NFA produced by this builder should only match UTF-8. + /// + /// This should be set when both of the following are true: + /// + /// 1. The caller guarantees that the NFA created by this build will only + /// report non-empty matches with spans that are valid UTF-8. + /// 2. The caller desires regex engines using this NFA to avoid reporting + /// empty matches with a span that splits a valid UTF-8 encoded codepoint. + /// + /// Property (1) is not checked. Instead, this requires the caller to + /// promise that it is true. Property (2) corresponds to the behavior of + /// regex engines using the NFA created by this builder. Namely, there + /// is no way in the NFA's graph itself to say that empty matches found + /// by, for example, the regex `a*` will fall on valid UTF-8 boundaries. + /// Instead, this option is used to communicate the UTF-8 semantic to regex + /// engines that will typically implement it as a post-processing step by + /// filtering out empty matches that don't fall on UTF-8 boundaries. + /// + /// If you're building an NFA from an HIR (and not using a + /// [`thompson::Compiler`](crate::nfa::thompson::Compiler)), then you can + /// use the [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) + /// option to guarantee that if the HIR detects a non-empty match, then it + /// is guaranteed to be valid UTF-8. + /// + /// Note that property (2) does *not* specify the behavior of executing + /// a search on a haystack that is not valid UTF-8. Therefore, if you're + /// *not* running this NFA on strings that are guaranteed to be valid + /// UTF-8, you almost certainly do not want to enable this option. + /// Similarly, if you are running the NFA on strings that *are* guaranteed + /// to be valid UTF-8, then you almost certainly want to enable this option + /// unless you can guarantee that your NFA will never produce a zero-width + /// match. + /// + /// It is disabled by default. + pub fn set_utf8(&mut self, yes: bool) { + self.utf8 = yes; + } + + /// Returns whether UTF-8 mode is enabled for this builder. + /// + /// See [`Builder::set_utf8`] for more details about what "UTF-8 mode" is. + pub fn get_utf8(&self) -> bool { + self.utf8 + } + + /// Sets whether the NFA produced by this builder should be matched in + /// reverse or not. Generally speaking, when enabled, the NFA produced + /// should be matched by moving backwards through a haystack, from a higher + /// memory address to a lower memory address. + /// + /// See also [`NFA::is_reverse`] for more details. + /// + /// This is disabled by default, which means NFAs are by default matched + /// in the forward direction. + pub fn set_reverse(&mut self, yes: bool) { + self.reverse = yes; + } + + /// Returns whether reverse mode is enabled for this builder. + /// + /// See [`Builder::set_reverse`] for more details about what "reverse mode" + /// is. + pub fn get_reverse(&self) -> bool { + self.reverse + } + + /// Sets the look-around matcher that should be used for the resulting NFA. + /// + /// A look-around matcher can be used to configure how look-around + /// assertions are matched. For example, a matcher might carry + /// configuration that changes the line terminator used for `(?m:^)` and + /// `(?m:$)` assertions. + pub fn set_look_matcher(&mut self, m: LookMatcher) { + self.look_matcher = m; + } + + /// Returns the look-around matcher used for this builder. + /// + /// If a matcher was not explicitly set, then `LookMatcher::default()` is + /// returned. + pub fn get_look_matcher(&self) -> &LookMatcher { + &self.look_matcher + } + + /// Set the size limit on this builder. + /// + /// Setting the size limit will also check whether the NFA built so far + /// fits within the given size limit. If it doesn't, then an error is + /// returned. + /// + /// By default, there is no configured size limit. + pub fn set_size_limit( + &mut self, + limit: Option, + ) -> Result<(), BuildError> { + self.size_limit = limit; + self.check_size_limit() + } + + /// Return the currently configured size limit. + /// + /// By default, this returns `None`, which corresponds to no configured + /// size limit. + pub fn get_size_limit(&self) -> Option { + self.size_limit + } + + /// Returns the heap memory usage, in bytes, used by the NFA states added + /// so far. + /// + /// Note that this is an approximation of how big the final NFA will be. + /// In practice, the final NFA will likely be a bit smaller because of + /// its simpler state representation. (For example, using things like + /// `Box<[StateID]>` instead of `Vec`.) + pub fn memory_usage(&self) -> usize { + self.states.len() * mem::size_of::() + self.memory_states + } + + fn check_size_limit(&self) -> Result<(), BuildError> { + if let Some(limit) = self.size_limit { + if self.memory_usage() > limit { + return Err(BuildError::exceeded_size_limit(limit)); + } + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // This asserts that a builder state doesn't have its size changed. It is + // *really* easy to accidentally increase the size, and thus potentially + // dramatically increase the memory usage of NFA builder. + // + // This assert doesn't mean we absolutely cannot increase the size of a + // builder state. We can. It's just here to make sure we do it knowingly + // and intentionally. + // + // A builder state is unfortunately a little bigger than an NFA state, + // since we really want to support adding things to a pre-existing state. + // i.e., We use Vec instead of Box<[thing]>. So we end up using an + // extra 8 bytes per state. Sad, but at least it gets freed once the NFA + // is built. + #[test] + fn state_has_small_size() { + #[cfg(target_pointer_width = "64")] + assert_eq!(32, core::mem::size_of::()); + #[cfg(target_pointer_width = "32")] + assert_eq!(16, core::mem::size_of::()); + } +} diff --git a/vendor/regex-automata/src/nfa/thompson/compiler.rs b/vendor/regex-automata/src/nfa/thompson/compiler.rs new file mode 100644 index 00000000000000..96a39ac4ebac08 --- /dev/null +++ b/vendor/regex-automata/src/nfa/thompson/compiler.rs @@ -0,0 +1,2368 @@ +use core::{borrow::Borrow, cell::RefCell}; + +use alloc::{sync::Arc, vec, vec::Vec}; + +use regex_syntax::{ + hir::{self, Hir}, + utf8::{Utf8Range, Utf8Sequences}, + ParserBuilder, +}; + +use crate::{ + nfa::thompson::{ + builder::Builder, + error::BuildError, + literal_trie::LiteralTrie, + map::{Utf8BoundedMap, Utf8SuffixKey, Utf8SuffixMap}, + nfa::{Transition, NFA}, + range_trie::RangeTrie, + }, + util::{ + look::{Look, LookMatcher}, + primitives::{PatternID, StateID}, + }, +}; + +/// The configuration used for a Thompson NFA compiler. +#[derive(Clone, Debug, Default)] +pub struct Config { + utf8: Option, + reverse: Option, + nfa_size_limit: Option>, + shrink: Option, + which_captures: Option, + look_matcher: Option, + #[cfg(test)] + unanchored_prefix: Option, +} + +impl Config { + /// Return a new default Thompson NFA compiler configuration. + pub fn new() -> Config { + Config::default() + } + + /// Whether to enable UTF-8 mode during search or not. + /// + /// A regex engine is said to be in UTF-8 mode when it guarantees that + /// all matches returned by it have spans consisting of only valid UTF-8. + /// That is, it is impossible for a match span to be returned that + /// contains any invalid UTF-8. + /// + /// UTF-8 mode generally consists of two things: + /// + /// 1. Whether the NFA's states are constructed such that all paths to a + /// match state that consume at least one byte always correspond to valid + /// UTF-8. + /// 2. Whether all paths to a match state that do _not_ consume any bytes + /// should always correspond to valid UTF-8 boundaries. + /// + /// (1) is a guarantee made by whoever constructs the NFA. + /// If you're parsing a regex from its concrete syntax, then + /// [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) can make + /// this guarantee for you. It does it by returning an error if the regex + /// pattern could every report a non-empty match span that contains invalid + /// UTF-8. So long as `syntax::Config::utf8` mode is enabled and your regex + /// successfully parses, then you're guaranteed that the corresponding NFA + /// will only ever report non-empty match spans containing valid UTF-8. + /// + /// (2) is a trickier guarantee because it cannot be enforced by the NFA + /// state graph itself. Consider, for example, the regex `a*`. It matches + /// the empty strings in `☃` at positions `0`, `1`, `2` and `3`, where + /// positions `1` and `2` occur within the UTF-8 encoding of a codepoint, + /// and thus correspond to invalid UTF-8 boundaries. Therefore, this + /// guarantee must be made at a higher level than the NFA state graph + /// itself. This crate deals with this case in each regex engine. Namely, + /// when a zero-width match that splits a codepoint is found and UTF-8 + /// mode enabled, then it is ignored and the engine moves on looking for + /// the next match. + /// + /// Thus, UTF-8 mode is both a promise that the NFA built only reports + /// non-empty matches that are valid UTF-8, and an *instruction* to regex + /// engines that empty matches that split codepoints should be banned. + /// + /// Because UTF-8 mode is fundamentally about avoiding invalid UTF-8 spans, + /// it only makes sense to enable this option when you *know* your haystack + /// is valid UTF-8. (For example, a `&str`.) Enabling UTF-8 mode and + /// searching a haystack that contains invalid UTF-8 leads to **unspecified + /// behavior**. + /// + /// Therefore, it may make sense to enable `syntax::Config::utf8` while + /// simultaneously *disabling* this option. That would ensure all non-empty + /// match spans are valid UTF-8, but that empty match spans may still split + /// a codepoint or match at other places that aren't valid UTF-8. + /// + /// In general, this mode is only relevant if your regex can match the + /// empty string. Most regexes don't. + /// + /// This is enabled by default. + /// + /// # Example + /// + /// This example shows how UTF-8 mode can impact the match spans that may + /// be reported in certain cases. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::{self, pikevm::PikeVM}, + /// Match, Input, + /// }; + /// + /// let re = PikeVM::new("")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// // UTF-8 mode is enabled by default. + /// let mut input = Input::new("☃"); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(Some(Match::must(0, 0..0)), caps.get_match()); + /// + /// // Even though an empty regex matches at 1..1, our next match is + /// // 3..3 because 1..1 and 2..2 split the snowman codepoint (which is + /// // three bytes long). + /// input.set_start(1); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(Some(Match::must(0, 3..3)), caps.get_match()); + /// + /// // But if we disable UTF-8, then we'll get matches at 1..1 and 2..2: + /// let re = PikeVM::builder() + /// .thompson(thompson::Config::new().utf8(false)) + /// .build("")?; + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(Some(Match::must(0, 1..1)), caps.get_match()); + /// + /// input.set_start(2); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(Some(Match::must(0, 2..2)), caps.get_match()); + /// + /// input.set_start(3); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(Some(Match::must(0, 3..3)), caps.get_match()); + /// + /// input.set_start(4); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(None, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn utf8(mut self, yes: bool) -> Config { + self.utf8 = Some(yes); + self + } + + /// Reverse the NFA. + /// + /// A NFA reversal is performed by reversing all of the concatenated + /// sub-expressions in the original pattern, recursively. (Look around + /// operators are also inverted.) The resulting NFA can be used to match + /// the pattern starting from the end of a string instead of the beginning + /// of a string. + /// + /// Reversing the NFA is useful for building a reverse DFA, which is most + /// useful for finding the start of a match after its ending position has + /// been found. NFA execution engines typically do not work on reverse + /// NFAs. For example, currently, the Pike VM reports the starting location + /// of matches without a reverse NFA. + /// + /// Currently, enabling this setting requires disabling the + /// [`captures`](Config::captures) setting. If both are enabled, then the + /// compiler will return an error. It is expected that this limitation will + /// be lifted in the future. + /// + /// This is disabled by default. + /// + /// # Example + /// + /// This example shows how to build a DFA from a reverse NFA, and then use + /// the DFA to search backwards. + /// + /// ``` + /// use regex_automata::{ + /// dfa::{self, Automaton}, + /// nfa::thompson::{NFA, WhichCaptures}, + /// HalfMatch, Input, + /// }; + /// + /// let dfa = dfa::dense::Builder::new() + /// .thompson(NFA::config() + /// .which_captures(WhichCaptures::None) + /// .reverse(true) + /// ) + /// .build("baz[0-9]+")?; + /// let expected = Some(HalfMatch::must(0, 3)); + /// assert_eq!( + /// expected, + /// dfa.try_search_rev(&Input::new("foobaz12345bar"))?, + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn reverse(mut self, yes: bool) -> Config { + self.reverse = Some(yes); + self + } + + /// Sets an approximate size limit on the total heap used by the NFA being + /// compiled. + /// + /// This permits imposing constraints on the size of a compiled NFA. This + /// may be useful in contexts where the regex pattern is untrusted and one + /// wants to avoid using too much memory. + /// + /// This size limit does not apply to auxiliary heap used during + /// compilation that is not part of the built NFA. + /// + /// Note that this size limit is applied during compilation in order for + /// the limit to prevent too much heap from being used. However, the + /// implementation may use an intermediate NFA representation that is + /// otherwise slightly bigger than the final public form. Since the size + /// limit may be applied to an intermediate representation, there is not + /// necessarily a precise correspondence between the configured size limit + /// and the heap usage of the final NFA. + /// + /// There is no size limit by default. + /// + /// # Example + /// + /// This example demonstrates how Unicode mode can greatly increase the + /// size of the NFA. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::nfa::thompson::NFA; + /// + /// // 300KB isn't enough! + /// NFA::compiler() + /// .configure(NFA::config().nfa_size_limit(Some(300_000))) + /// .build(r"\w{20}") + /// .unwrap_err(); + /// + /// // ... but 500KB probably is. + /// let nfa = NFA::compiler() + /// .configure(NFA::config().nfa_size_limit(Some(500_000))) + /// .build(r"\w{20}")?; + /// + /// assert_eq!(nfa.pattern_len(), 1); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn nfa_size_limit(mut self, bytes: Option) -> Config { + self.nfa_size_limit = Some(bytes); + self + } + + /// Apply best effort heuristics to shrink the NFA at the expense of more + /// time/memory. + /// + /// Generally speaking, if one is using an NFA to compile a DFA, then the + /// extra time used to shrink the NFA will be more than made up for during + /// DFA construction (potentially by a lot). In other words, enabling this + /// can substantially decrease the overall amount of time it takes to build + /// a DFA. + /// + /// A reason to keep this disabled is if you want to compile an NFA and + /// start using it as quickly as possible without needing to build a DFA, + /// and you don't mind using a bit of extra memory for the NFA. e.g., for + /// an NFA simulation or for a lazy DFA. + /// + /// NFA shrinking is currently most useful when compiling a reverse + /// NFA with large Unicode character classes. In particular, it trades + /// additional CPU time during NFA compilation in favor of generating fewer + /// NFA states. + /// + /// This is disabled by default because it can increase compile times + /// quite a bit if you aren't building a full DFA. + /// + /// # Example + /// + /// This example shows that NFA shrinking can lead to substantial space + /// savings in some cases. Notice that, as noted above, we build a reverse + /// DFA and use a pattern with a large Unicode character class. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::nfa::thompson::{NFA, WhichCaptures}; + /// + /// // Currently we have to disable captures when enabling reverse NFA. + /// let config = NFA::config() + /// .which_captures(WhichCaptures::None) + /// .reverse(true); + /// let not_shrunk = NFA::compiler() + /// .configure(config.clone().shrink(false)) + /// .build(r"\w")?; + /// let shrunk = NFA::compiler() + /// .configure(config.clone().shrink(true)) + /// .build(r"\w")?; + /// + /// // While a specific shrink factor is not guaranteed, the savings can be + /// // considerable in some cases. + /// assert!(shrunk.states().len() * 2 < not_shrunk.states().len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn shrink(mut self, yes: bool) -> Config { + self.shrink = Some(yes); + self + } + + /// Whether to include 'Capture' states in the NFA. + /// + /// Currently, enabling this setting requires disabling the + /// [`reverse`](Config::reverse) setting. If both are enabled, then the + /// compiler will return an error. It is expected that this limitation will + /// be lifted in the future. + /// + /// This is enabled by default. + /// + /// # Example + /// + /// This example demonstrates that some regex engines, like the Pike VM, + /// require capturing states to be present in the NFA to report match + /// offsets. + /// + /// (Note that since this method is deprecated, the example below uses + /// [`Config::which_captures`] to disable capture states.) + /// + /// ``` + /// use regex_automata::nfa::thompson::{ + /// pikevm::PikeVM, + /// NFA, + /// WhichCaptures, + /// }; + /// + /// let re = PikeVM::builder() + /// .thompson(NFA::config().which_captures(WhichCaptures::None)) + /// .build(r"[a-z]+")?; + /// let mut cache = re.create_cache(); + /// + /// assert!(re.is_match(&mut cache, "abc")); + /// assert_eq!(None, re.find(&mut cache, "abc")); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[deprecated(since = "0.3.5", note = "use which_captures instead")] + pub fn captures(self, yes: bool) -> Config { + self.which_captures(if yes { + WhichCaptures::All + } else { + WhichCaptures::None + }) + } + + /// Configures what kinds of capture groups are compiled into + /// [`State::Capture`](crate::nfa::thompson::State::Capture) states in a + /// Thompson NFA. + /// + /// Currently, using any option except for [`WhichCaptures::None`] requires + /// disabling the [`reverse`](Config::reverse) setting. If both are + /// enabled, then the compiler will return an error. It is expected that + /// this limitation will be lifted in the future. + /// + /// This is set to [`WhichCaptures::All`] by default. Callers may wish to + /// use [`WhichCaptures::Implicit`] in cases where one wants avoid the + /// overhead of capture states for explicit groups. Usually this occurs + /// when one wants to use the `PikeVM` only for determining the overall + /// match. Otherwise, the `PikeVM` could use much more memory than is + /// necessary. + /// + /// # Example + /// + /// This example demonstrates that some regex engines, like the Pike VM, + /// require capturing states to be present in the NFA to report match + /// offsets. + /// + /// ``` + /// use regex_automata::nfa::thompson::{ + /// pikevm::PikeVM, + /// NFA, + /// WhichCaptures, + /// }; + /// + /// let re = PikeVM::builder() + /// .thompson(NFA::config().which_captures(WhichCaptures::None)) + /// .build(r"[a-z]+")?; + /// let mut cache = re.create_cache(); + /// + /// assert!(re.is_match(&mut cache, "abc")); + /// assert_eq!(None, re.find(&mut cache, "abc")); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// The same applies to the bounded backtracker: + /// + /// ``` + /// use regex_automata::nfa::thompson::{ + /// backtrack::BoundedBacktracker, + /// NFA, + /// WhichCaptures, + /// }; + /// + /// let re = BoundedBacktracker::builder() + /// .thompson(NFA::config().which_captures(WhichCaptures::None)) + /// .build(r"[a-z]+")?; + /// let mut cache = re.create_cache(); + /// + /// assert!(re.try_is_match(&mut cache, "abc")?); + /// assert_eq!(None, re.try_find(&mut cache, "abc")?); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn which_captures(mut self, which_captures: WhichCaptures) -> Config { + self.which_captures = Some(which_captures); + self + } + + /// Sets the look-around matcher that should be used with this NFA. + /// + /// A look-around matcher determines how to match look-around assertions. + /// In particular, some assertions are configurable. For example, the + /// `(?m:^)` and `(?m:$)` assertions can have their line terminator changed + /// from the default of `\n` to any other byte. + /// + /// # Example + /// + /// This shows how to change the line terminator for multi-line assertions. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::{self, pikevm::PikeVM}, + /// util::look::LookMatcher, + /// Match, Input, + /// }; + /// + /// let mut lookm = LookMatcher::new(); + /// lookm.set_line_terminator(b'\x00'); + /// + /// let re = PikeVM::builder() + /// .thompson(thompson::Config::new().look_matcher(lookm)) + /// .build(r"(?m)^[a-z]+$")?; + /// let mut cache = re.create_cache(); + /// + /// // Multi-line assertions now use NUL as a terminator. + /// assert_eq!( + /// Some(Match::must(0, 1..4)), + /// re.find(&mut cache, b"\x00abc\x00"), + /// ); + /// // ... and \n is no longer recognized as a terminator. + /// assert_eq!( + /// None, + /// re.find(&mut cache, b"\nabc\n"), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn look_matcher(mut self, m: LookMatcher) -> Config { + self.look_matcher = Some(m); + self + } + + /// Whether to compile an unanchored prefix into this NFA. + /// + /// This is enabled by default. It is made available for tests only to make + /// it easier to unit test the output of the compiler. + #[cfg(test)] + fn unanchored_prefix(mut self, yes: bool) -> Config { + self.unanchored_prefix = Some(yes); + self + } + + /// Returns whether this configuration has enabled UTF-8 mode. + pub fn get_utf8(&self) -> bool { + self.utf8.unwrap_or(true) + } + + /// Returns whether this configuration has enabled reverse NFA compilation. + pub fn get_reverse(&self) -> bool { + self.reverse.unwrap_or(false) + } + + /// Return the configured NFA size limit, if it exists, in the number of + /// bytes of heap used. + pub fn get_nfa_size_limit(&self) -> Option { + self.nfa_size_limit.unwrap_or(None) + } + + /// Return whether NFA shrinking is enabled. + pub fn get_shrink(&self) -> bool { + self.shrink.unwrap_or(false) + } + + /// Return whether NFA compilation is configured to produce capture states. + #[deprecated(since = "0.3.5", note = "use get_which_captures instead")] + pub fn get_captures(&self) -> bool { + self.get_which_captures().is_any() + } + + /// Return what kinds of capture states will be compiled into an NFA. + pub fn get_which_captures(&self) -> WhichCaptures { + self.which_captures.unwrap_or(WhichCaptures::All) + } + + /// Return the look-around matcher for this NFA. + pub fn get_look_matcher(&self) -> LookMatcher { + self.look_matcher.clone().unwrap_or(LookMatcher::default()) + } + + /// Return whether NFA compilation is configured to include an unanchored + /// prefix. + /// + /// This is always false when not in test mode. + fn get_unanchored_prefix(&self) -> bool { + #[cfg(test)] + { + self.unanchored_prefix.unwrap_or(true) + } + #[cfg(not(test))] + { + true + } + } + + /// Overwrite the default configuration such that the options in `o` are + /// always used. If an option in `o` is not set, then the corresponding + /// option in `self` is used. If it's not set in `self` either, then it + /// remains not set. + pub(crate) fn overwrite(&self, o: Config) -> Config { + Config { + utf8: o.utf8.or(self.utf8), + reverse: o.reverse.or(self.reverse), + nfa_size_limit: o.nfa_size_limit.or(self.nfa_size_limit), + shrink: o.shrink.or(self.shrink), + which_captures: o.which_captures.or(self.which_captures), + look_matcher: o.look_matcher.or_else(|| self.look_matcher.clone()), + #[cfg(test)] + unanchored_prefix: o.unanchored_prefix.or(self.unanchored_prefix), + } + } +} + +/// A configuration indicating which kinds of +/// [`State::Capture`](crate::nfa::thompson::State::Capture) states to include. +/// +/// This configuration can be used with [`Config::which_captures`] to control +/// which capture states are compiled into a Thompson NFA. +/// +/// The default configuration is [`WhichCaptures::All`]. +#[derive(Clone, Copy, Debug)] +pub enum WhichCaptures { + /// All capture states, including those corresponding to both implicit and + /// explicit capture groups, are included in the Thompson NFA. + All, + /// Only capture states corresponding to implicit capture groups are + /// included. Implicit capture groups appear in every pattern implicitly + /// and correspond to the overall match of a pattern. + /// + /// This is useful when one only cares about the overall match of a + /// pattern. By excluding capture states from explicit capture groups, + /// one might be able to reduce the memory usage of a multi-pattern regex + /// substantially if it was otherwise written to have many explicit capture + /// groups. + Implicit, + /// No capture states are compiled into the Thompson NFA. + /// + /// This is useful when capture states are either not needed (for example, + /// if one is only trying to build a DFA) or if they aren't supported (for + /// example, a reverse NFA). + /// + /// # Warning + /// + /// Callers must be exceedingly careful when using this + /// option. In particular, not all regex engines support + /// reporting match spans when using this option (for example, + /// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) or + /// [`BoundedBacktracker`](crate::nfa::thompson::backtrack::BoundedBacktracker)). + /// + /// Perhaps more confusingly, using this option with such an + /// engine means that an `is_match` routine could report `true` + /// when `find` reports `None`. This is generally not something + /// that _should_ happen, but the low level control provided by + /// this crate makes it possible. + /// + /// Similarly, any regex engines (like [`meta::Regex`](crate::meta::Regex)) + /// should always return `None` from `find` routines when this option is + /// used, even if _some_ of its internal engines could find the match + /// boundaries. This is because inputs from user data could influence + /// engine selection, and thus influence whether a match is found or not. + /// Indeed, `meta::Regex::find` will always return `None` when configured + /// with this option. + None, +} + +impl Default for WhichCaptures { + fn default() -> WhichCaptures { + WhichCaptures::All + } +} + +impl WhichCaptures { + /// Returns true if this configuration indicates that no capture states + /// should be produced in an NFA. + pub fn is_none(&self) -> bool { + matches!(*self, WhichCaptures::None) + } + + /// Returns true if this configuration indicates that some capture states + /// should be added to an NFA. Note that this might only include capture + /// states for implicit capture groups. + pub fn is_any(&self) -> bool { + !self.is_none() + } +} + +/* +This compiler below uses Thompson's construction algorithm. The compiler takes +a regex-syntax::Hir as input and emits an NFA graph as output. The NFA graph +is structured in a way that permits it to be executed by a virtual machine and +also used to efficiently build a DFA. + +The compiler deals with a slightly expanded set of NFA states than what is +in a final NFA (as exhibited by builder::State and nfa::State). Notably a +compiler state includes an empty node that has exactly one unconditional +epsilon transition to the next state. In other words, it's a "goto" instruction +if one views Thompson's NFA as a set of bytecode instructions. These goto +instructions are removed in a subsequent phase before returning the NFA to the +caller. The purpose of these empty nodes is that they make the construction +algorithm substantially simpler to implement. We remove them before returning +to the caller because they can represent substantial overhead when traversing +the NFA graph (either while searching using the NFA directly or while building +a DFA). + +In the future, it would be nice to provide a Glushkov compiler as well, as it +would work well as a bit-parallel NFA for smaller regexes. But the Thompson +construction is one I'm more familiar with and seems more straight-forward to +deal with when it comes to large Unicode character classes. + +Internally, the compiler uses interior mutability to improve composition in the +face of the borrow checker. In particular, we'd really like to be able to write +things like this: + + self.c_concat(exprs.iter().map(|e| self.c(e))) + +Which elegantly uses iterators to build up a sequence of compiled regex +sub-expressions and then hands it off to the concatenating compiler routine. +Without interior mutability, the borrow checker won't let us borrow `self` +mutably both inside and outside the closure at the same time. +*/ + +/// A builder for compiling an NFA from a regex's high-level intermediate +/// representation (HIR). +/// +/// This compiler provides a way to translate a parsed regex pattern into an +/// NFA state graph. The NFA state graph can either be used directly to execute +/// a search (e.g., with a Pike VM), or it can be further used to build a DFA. +/// +/// This compiler provides APIs both for compiling regex patterns directly from +/// their concrete syntax, or via a [`regex_syntax::hir::Hir`]. +/// +/// This compiler has various options that may be configured via +/// [`thompson::Config`](Config). +/// +/// Note that a compiler is not the same as a [`thompson::Builder`](Builder). +/// A `Builder` provides a lower level API that is uncoupled from a regex +/// pattern's concrete syntax or even its HIR. Instead, it permits stitching +/// together an NFA by hand. See its docs for examples. +/// +/// # Example: compilation from concrete syntax +/// +/// This shows how to compile an NFA from a pattern string while setting a size +/// limit on how big the NFA is allowed to be (in terms of bytes of heap used). +/// +/// ``` +/// use regex_automata::{ +/// nfa::thompson::{NFA, pikevm::PikeVM}, +/// Match, +/// }; +/// +/// let config = NFA::config().nfa_size_limit(Some(1_000)); +/// let nfa = NFA::compiler().configure(config).build(r"(?-u)\w")?; +/// +/// let re = PikeVM::new_from_nfa(nfa)?; +/// let mut cache = re.create_cache(); +/// let mut caps = re.create_captures(); +/// let expected = Some(Match::must(0, 3..4)); +/// re.captures(&mut cache, "!@#A#@!", &mut caps); +/// assert_eq!(expected, caps.get_match()); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// # Example: compilation from HIR +/// +/// This shows how to hand assemble a regular expression via its HIR, and then +/// compile an NFA directly from it. +/// +/// ``` +/// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; +/// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; +/// +/// let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![ +/// ClassBytesRange::new(b'0', b'9'), +/// ClassBytesRange::new(b'A', b'Z'), +/// ClassBytesRange::new(b'_', b'_'), +/// ClassBytesRange::new(b'a', b'z'), +/// ]))); +/// +/// let config = NFA::config().nfa_size_limit(Some(1_000)); +/// let nfa = NFA::compiler().configure(config).build_from_hir(&hir)?; +/// +/// let re = PikeVM::new_from_nfa(nfa)?; +/// let mut cache = re.create_cache(); +/// let mut caps = re.create_captures(); +/// let expected = Some(Match::must(0, 3..4)); +/// re.captures(&mut cache, "!@#A#@!", &mut caps); +/// assert_eq!(expected, caps.get_match()); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct Compiler { + /// A regex parser, used when compiling an NFA directly from a pattern + /// string. + parser: ParserBuilder, + /// The compiler configuration. + config: Config, + /// The builder for actually constructing an NFA. This provides a + /// convenient abstraction for writing a compiler. + builder: RefCell, + /// State used for compiling character classes to UTF-8 byte automata. + /// State is not retained between character class compilations. This just + /// serves to amortize allocation to the extent possible. + utf8_state: RefCell, + /// State used for arranging character classes in reverse into a trie. + trie_state: RefCell, + /// State used for caching common suffixes when compiling reverse UTF-8 + /// automata (for Unicode character classes). + utf8_suffix: RefCell, +} + +impl Compiler { + /// Create a new NFA builder with its default configuration. + pub fn new() -> Compiler { + Compiler { + parser: ParserBuilder::new(), + config: Config::default(), + builder: RefCell::new(Builder::new()), + utf8_state: RefCell::new(Utf8State::new()), + trie_state: RefCell::new(RangeTrie::new()), + utf8_suffix: RefCell::new(Utf8SuffixMap::new(1000)), + } + } + + /// Compile the given regular expression pattern into an NFA. + /// + /// If there was a problem parsing the regex, then that error is returned. + /// + /// Otherwise, if there was a problem building the NFA, then an error is + /// returned. The only error that can occur is if the compiled regex would + /// exceed the size limits configured on this builder, or if any part of + /// the NFA would exceed the integer representations used. (For example, + /// too many states might plausibly occur on a 16-bit target.) + /// + /// # Example + /// + /// ``` + /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; + /// + /// let config = NFA::config().nfa_size_limit(Some(1_000)); + /// let nfa = NFA::compiler().configure(config).build(r"(?-u)\w")?; + /// + /// let re = PikeVM::new_from_nfa(nfa)?; + /// let mut cache = re.create_cache(); + /// let mut caps = re.create_captures(); + /// let expected = Some(Match::must(0, 3..4)); + /// re.captures(&mut cache, "!@#A#@!", &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn build(&self, pattern: &str) -> Result { + self.build_many(&[pattern]) + } + + /// Compile the given regular expression patterns into a single NFA. + /// + /// When matches are returned, the pattern ID corresponds to the index of + /// the pattern in the slice given. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; + /// + /// let config = NFA::config().nfa_size_limit(Some(1_000)); + /// let nfa = NFA::compiler().configure(config).build_many(&[ + /// r"(?-u)\s", + /// r"(?-u)\w", + /// ])?; + /// + /// let re = PikeVM::new_from_nfa(nfa)?; + /// let mut cache = re.create_cache(); + /// let mut caps = re.create_captures(); + /// let expected = Some(Match::must(1, 1..2)); + /// re.captures(&mut cache, "!A! !A!", &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn build_many>( + &self, + patterns: &[P], + ) -> Result { + let mut hirs = vec![]; + for p in patterns { + hirs.push( + self.parser + .build() + .parse(p.as_ref()) + .map_err(BuildError::syntax)?, + ); + debug!("parsed: {:?}", p.as_ref()); + } + self.build_many_from_hir(&hirs) + } + + /// Compile the given high level intermediate representation of a regular + /// expression into an NFA. + /// + /// If there was a problem building the NFA, then an error is returned. The + /// only error that can occur is if the compiled regex would exceed the + /// size limits configured on this builder, or if any part of the NFA would + /// exceed the integer representations used. (For example, too many states + /// might plausibly occur on a 16-bit target.) + /// + /// # Example + /// + /// ``` + /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; + /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; + /// + /// let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![ + /// ClassBytesRange::new(b'0', b'9'), + /// ClassBytesRange::new(b'A', b'Z'), + /// ClassBytesRange::new(b'_', b'_'), + /// ClassBytesRange::new(b'a', b'z'), + /// ]))); + /// + /// let config = NFA::config().nfa_size_limit(Some(1_000)); + /// let nfa = NFA::compiler().configure(config).build_from_hir(&hir)?; + /// + /// let re = PikeVM::new_from_nfa(nfa)?; + /// let mut cache = re.create_cache(); + /// let mut caps = re.create_captures(); + /// let expected = Some(Match::must(0, 3..4)); + /// re.captures(&mut cache, "!@#A#@!", &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn build_from_hir(&self, expr: &Hir) -> Result { + self.build_many_from_hir(&[expr]) + } + + /// Compile the given high level intermediate representations of regular + /// expressions into a single NFA. + /// + /// When matches are returned, the pattern ID corresponds to the index of + /// the pattern in the slice given. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; + /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; + /// + /// let hirs = &[ + /// Hir::class(Class::Bytes(ClassBytes::new(vec![ + /// ClassBytesRange::new(b'\t', b'\r'), + /// ClassBytesRange::new(b' ', b' '), + /// ]))), + /// Hir::class(Class::Bytes(ClassBytes::new(vec![ + /// ClassBytesRange::new(b'0', b'9'), + /// ClassBytesRange::new(b'A', b'Z'), + /// ClassBytesRange::new(b'_', b'_'), + /// ClassBytesRange::new(b'a', b'z'), + /// ]))), + /// ]; + /// + /// let config = NFA::config().nfa_size_limit(Some(1_000)); + /// let nfa = NFA::compiler().configure(config).build_many_from_hir(hirs)?; + /// + /// let re = PikeVM::new_from_nfa(nfa)?; + /// let mut cache = re.create_cache(); + /// let mut caps = re.create_captures(); + /// let expected = Some(Match::must(1, 1..2)); + /// re.captures(&mut cache, "!A! !A!", &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn build_many_from_hir>( + &self, + exprs: &[H], + ) -> Result { + self.compile(exprs) + } + + /// Apply the given NFA configuration options to this builder. + /// + /// # Example + /// + /// ``` + /// use regex_automata::nfa::thompson::NFA; + /// + /// let config = NFA::config().nfa_size_limit(Some(1_000)); + /// let nfa = NFA::compiler().configure(config).build(r"(?-u)\w")?; + /// assert_eq!(nfa.pattern_len(), 1); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn configure(&mut self, config: Config) -> &mut Compiler { + self.config = self.config.overwrite(config); + self + } + + /// Set the syntax configuration for this builder using + /// [`syntax::Config`](crate::util::syntax::Config). + /// + /// This permits setting things like case insensitivity, Unicode and multi + /// line mode. + /// + /// This syntax configuration only applies when an NFA is built directly + /// from a pattern string. If an NFA is built from an HIR, then all syntax + /// settings are ignored. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{nfa::thompson::NFA, util::syntax}; + /// + /// let syntax_config = syntax::Config::new().unicode(false); + /// let nfa = NFA::compiler().syntax(syntax_config).build(r"\w")?; + /// // If Unicode were enabled, the number of states would be much bigger. + /// assert!(nfa.states().len() < 15); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn syntax( + &mut self, + config: crate::util::syntax::Config, + ) -> &mut Compiler { + config.apply(&mut self.parser); + self + } +} + +impl Compiler { + /// Compile the sequence of HIR expressions given. Pattern IDs are + /// allocated starting from 0, in correspondence with the slice given. + /// + /// It is legal to provide an empty slice. In that case, the NFA returned + /// has no patterns and will never match anything. + fn compile>(&self, exprs: &[H]) -> Result { + if exprs.len() > PatternID::LIMIT { + return Err(BuildError::too_many_patterns(exprs.len())); + } + if self.config.get_reverse() + && self.config.get_which_captures().is_any() + { + return Err(BuildError::unsupported_captures()); + } + + self.builder.borrow_mut().clear(); + self.builder.borrow_mut().set_utf8(self.config.get_utf8()); + self.builder.borrow_mut().set_reverse(self.config.get_reverse()); + self.builder + .borrow_mut() + .set_look_matcher(self.config.get_look_matcher()); + self.builder + .borrow_mut() + .set_size_limit(self.config.get_nfa_size_limit())?; + + // We always add an unanchored prefix unless we were specifically told + // not to (for tests only), or if we know that the regex is anchored + // for all matches. When an unanchored prefix is not added, then the + // NFA's anchored and unanchored start states are equivalent. + let all_anchored = exprs.iter().all(|e| { + let props = e.borrow().properties(); + if self.config.get_reverse() { + props.look_set_suffix().contains(hir::Look::End) + } else { + props.look_set_prefix().contains(hir::Look::Start) + } + }); + let anchored = !self.config.get_unanchored_prefix() || all_anchored; + let unanchored_prefix = if anchored { + self.c_empty()? + } else { + self.c_at_least(&Hir::dot(hir::Dot::AnyByte), false, 0)? + }; + + let compiled = self.c_alt_iter(exprs.iter().map(|e| { + let _ = self.start_pattern()?; + let one = self.c_cap(0, None, e.borrow())?; + let match_state_id = self.add_match()?; + self.patch(one.end, match_state_id)?; + let _ = self.finish_pattern(one.start)?; + Ok(ThompsonRef { start: one.start, end: match_state_id }) + }))?; + self.patch(unanchored_prefix.end, compiled.start)?; + let nfa = self + .builder + .borrow_mut() + .build(compiled.start, unanchored_prefix.start)?; + + debug!("HIR-to-NFA compilation complete, config: {:?}", self.config); + Ok(nfa) + } + + /// Compile an arbitrary HIR expression. + fn c(&self, expr: &Hir) -> Result { + use regex_syntax::hir::{Class, HirKind::*}; + + match *expr.kind() { + Empty => self.c_empty(), + Literal(hir::Literal(ref bytes)) => self.c_literal(bytes), + Class(Class::Bytes(ref c)) => self.c_byte_class(c), + Class(Class::Unicode(ref c)) => self.c_unicode_class(c), + Look(ref look) => self.c_look(look), + Repetition(ref rep) => self.c_repetition(rep), + Capture(ref c) => self.c_cap(c.index, c.name.as_deref(), &c.sub), + Concat(ref es) => self.c_concat(es.iter().map(|e| self.c(e))), + Alternation(ref es) => self.c_alt_slice(es), + } + } + + /// Compile a concatenation of the sub-expressions yielded by the given + /// iterator. If the iterator yields no elements, then this compiles down + /// to an "empty" state that always matches. + /// + /// If the compiler is in reverse mode, then the expressions given are + /// automatically compiled in reverse. + fn c_concat(&self, mut it: I) -> Result + where + I: DoubleEndedIterator>, + { + let first = if self.is_reverse() { it.next_back() } else { it.next() }; + let ThompsonRef { start, mut end } = match first { + Some(result) => result?, + None => return self.c_empty(), + }; + loop { + let next = + if self.is_reverse() { it.next_back() } else { it.next() }; + let compiled = match next { + Some(result) => result?, + None => break, + }; + self.patch(end, compiled.start)?; + end = compiled.end; + } + Ok(ThompsonRef { start, end }) + } + + /// Compile an alternation of the given HIR values. + /// + /// This is like 'c_alt_iter', but it accepts a slice of HIR values instead + /// of an iterator of compiled NFA sub-graphs. The point of accepting a + /// slice here is that it opens up some optimization opportunities. For + /// example, if all of the HIR values are literals, then this routine might + /// re-shuffle them to make NFA epsilon closures substantially faster. + fn c_alt_slice(&self, exprs: &[Hir]) -> Result { + // self.c_alt_iter(exprs.iter().map(|e| self.c(e))) + let literal_count = exprs + .iter() + .filter(|e| { + matches!(*e.kind(), hir::HirKind::Literal(hir::Literal(_))) + }) + .count(); + if literal_count <= 1 || literal_count < exprs.len() { + return self.c_alt_iter(exprs.iter().map(|e| self.c(e))); + } + + let mut trie = if self.is_reverse() { + LiteralTrie::reverse() + } else { + LiteralTrie::forward() + }; + for expr in exprs.iter() { + let literal = match *expr.kind() { + hir::HirKind::Literal(hir::Literal(ref bytes)) => bytes, + _ => unreachable!(), + }; + trie.add(literal)?; + } + trie.compile(&mut self.builder.borrow_mut()) + } + + /// Compile an alternation, where each element yielded by the given + /// iterator represents an item in the alternation. If the iterator yields + /// no elements, then this compiles down to a "fail" state. + /// + /// In an alternation, expressions appearing earlier are "preferred" at + /// match time over expressions appearing later. At least, this is true + /// when using "leftmost first" match semantics. (If "leftmost longest" are + /// ever added in the future, then this preference order of priority would + /// not apply in that mode.) + fn c_alt_iter(&self, mut it: I) -> Result + where + I: Iterator>, + { + let first = match it.next() { + None => return self.c_fail(), + Some(result) => result?, + }; + let second = match it.next() { + None => return Ok(first), + Some(result) => result?, + }; + + let union = self.add_union()?; + let end = self.add_empty()?; + self.patch(union, first.start)?; + self.patch(first.end, end)?; + self.patch(union, second.start)?; + self.patch(second.end, end)?; + for result in it { + let compiled = result?; + self.patch(union, compiled.start)?; + self.patch(compiled.end, end)?; + } + Ok(ThompsonRef { start: union, end }) + } + + /// Compile the given capture sub-expression. `expr` should be the + /// sub-expression contained inside the capture. If "capture" states are + /// enabled, then they are added as appropriate. + /// + /// This accepts the pieces of a capture instead of a `hir::Capture` so + /// that it's easy to manufacture a "fake" group when necessary, e.g., for + /// adding the entire pattern as if it were a group in order to create + /// appropriate "capture" states in the NFA. + fn c_cap( + &self, + index: u32, + name: Option<&str>, + expr: &Hir, + ) -> Result { + match self.config.get_which_captures() { + // No capture states means we always skip them. + WhichCaptures::None => return self.c(expr), + // Implicit captures states means we only add when index==0 since + // index==0 implies the group is implicit. + WhichCaptures::Implicit if index > 0 => return self.c(expr), + _ => {} + } + + let start = self.add_capture_start(index, name)?; + let inner = self.c(expr)?; + let end = self.add_capture_end(index)?; + self.patch(start, inner.start)?; + self.patch(inner.end, end)?; + Ok(ThompsonRef { start, end }) + } + + /// Compile the given repetition expression. This handles all types of + /// repetitions and greediness. + fn c_repetition( + &self, + rep: &hir::Repetition, + ) -> Result { + match (rep.min, rep.max) { + (0, Some(1)) => self.c_zero_or_one(&rep.sub, rep.greedy), + (min, None) => self.c_at_least(&rep.sub, rep.greedy, min), + (min, Some(max)) if min == max => self.c_exactly(&rep.sub, min), + (min, Some(max)) => self.c_bounded(&rep.sub, rep.greedy, min, max), + } + } + + /// Compile the given expression such that it matches at least `min` times, + /// but no more than `max` times. + /// + /// When `greedy` is true, then the preference is for the expression to + /// match as much as possible. Otherwise, it will match as little as + /// possible. + fn c_bounded( + &self, + expr: &Hir, + greedy: bool, + min: u32, + max: u32, + ) -> Result { + let prefix = self.c_exactly(expr, min)?; + if min == max { + return Ok(prefix); + } + + // It is tempting here to compile the rest here as a concatenation + // of zero-or-one matches. i.e., for `a{2,5}`, compile it as if it + // were `aaa?a?a?`. The problem here is that it leads to this program: + // + // >000000: 61 => 01 + // 000001: 61 => 02 + // 000002: union(03, 04) + // 000003: 61 => 04 + // 000004: union(05, 06) + // 000005: 61 => 06 + // 000006: union(07, 08) + // 000007: 61 => 08 + // 000008: MATCH + // + // And effectively, once you hit state 2, the epsilon closure will + // include states 3, 5, 6, 7 and 8, which is quite a bit. It is better + // to instead compile it like so: + // + // >000000: 61 => 01 + // 000001: 61 => 02 + // 000002: union(03, 08) + // 000003: 61 => 04 + // 000004: union(05, 08) + // 000005: 61 => 06 + // 000006: union(07, 08) + // 000007: 61 => 08 + // 000008: MATCH + // + // So that the epsilon closure of state 2 is now just 3 and 8. + let empty = self.add_empty()?; + let mut prev_end = prefix.end; + for _ in min..max { + let union = if greedy { + self.add_union() + } else { + self.add_union_reverse() + }?; + let compiled = self.c(expr)?; + self.patch(prev_end, union)?; + self.patch(union, compiled.start)?; + self.patch(union, empty)?; + prev_end = compiled.end; + } + self.patch(prev_end, empty)?; + Ok(ThompsonRef { start: prefix.start, end: empty }) + } + + /// Compile the given expression such that it may be matched `n` or more + /// times, where `n` can be any integer. (Although a particularly large + /// integer is likely to run afoul of any configured size limits.) + /// + /// When `greedy` is true, then the preference is for the expression to + /// match as much as possible. Otherwise, it will match as little as + /// possible. + fn c_at_least( + &self, + expr: &Hir, + greedy: bool, + n: u32, + ) -> Result { + if n == 0 { + // When the expression cannot match the empty string, then we + // can get away with something much simpler: just one 'alt' + // instruction that optionally repeats itself. But if the expr + // can match the empty string... see below. + if expr.properties().minimum_len().map_or(false, |len| len > 0) { + let union = if greedy { + self.add_union() + } else { + self.add_union_reverse() + }?; + let compiled = self.c(expr)?; + self.patch(union, compiled.start)?; + self.patch(compiled.end, union)?; + return Ok(ThompsonRef { start: union, end: union }); + } + + // What's going on here? Shouldn't x* be simpler than this? It + // turns out that when implementing leftmost-first (Perl-like) + // match semantics, x* results in an incorrect preference order + // when computing the transitive closure of states if and only if + // 'x' can match the empty string. So instead, we compile x* as + // (x+)?, which preserves the correct preference order. + // + // See: https://github.com/rust-lang/regex/issues/779 + let compiled = self.c(expr)?; + let plus = if greedy { + self.add_union() + } else { + self.add_union_reverse() + }?; + self.patch(compiled.end, plus)?; + self.patch(plus, compiled.start)?; + + let question = if greedy { + self.add_union() + } else { + self.add_union_reverse() + }?; + let empty = self.add_empty()?; + self.patch(question, compiled.start)?; + self.patch(question, empty)?; + self.patch(plus, empty)?; + Ok(ThompsonRef { start: question, end: empty }) + } else if n == 1 { + let compiled = self.c(expr)?; + let union = if greedy { + self.add_union() + } else { + self.add_union_reverse() + }?; + self.patch(compiled.end, union)?; + self.patch(union, compiled.start)?; + Ok(ThompsonRef { start: compiled.start, end: union }) + } else { + let prefix = self.c_exactly(expr, n - 1)?; + let last = self.c(expr)?; + let union = if greedy { + self.add_union() + } else { + self.add_union_reverse() + }?; + self.patch(prefix.end, last.start)?; + self.patch(last.end, union)?; + self.patch(union, last.start)?; + Ok(ThompsonRef { start: prefix.start, end: union }) + } + } + + /// Compile the given expression such that it may be matched zero or one + /// times. + /// + /// When `greedy` is true, then the preference is for the expression to + /// match as much as possible. Otherwise, it will match as little as + /// possible. + fn c_zero_or_one( + &self, + expr: &Hir, + greedy: bool, + ) -> Result { + let union = + if greedy { self.add_union() } else { self.add_union_reverse() }?; + let compiled = self.c(expr)?; + let empty = self.add_empty()?; + self.patch(union, compiled.start)?; + self.patch(union, empty)?; + self.patch(compiled.end, empty)?; + Ok(ThompsonRef { start: union, end: empty }) + } + + /// Compile the given HIR expression exactly `n` times. + fn c_exactly( + &self, + expr: &Hir, + n: u32, + ) -> Result { + let it = (0..n).map(|_| self.c(expr)); + self.c_concat(it) + } + + /// Compile the given byte oriented character class. + /// + /// This uses "sparse" states to represent an alternation between ranges in + /// this character class. We can use "sparse" states instead of stitching + /// together a "union" state because all ranges in a character class have + /// equal priority *and* are non-overlapping (thus, only one can match, so + /// there's never a question of priority in the first place). This saves a + /// fair bit of overhead when traversing an NFA. + /// + /// This routine compiles an empty character class into a "fail" state. + fn c_byte_class( + &self, + cls: &hir::ClassBytes, + ) -> Result { + let end = self.add_empty()?; + let mut trans = Vec::with_capacity(cls.ranges().len()); + for r in cls.iter() { + trans.push(Transition { + start: r.start(), + end: r.end(), + next: end, + }); + } + Ok(ThompsonRef { start: self.add_sparse(trans)?, end }) + } + + /// Compile the given Unicode character class. + /// + /// This routine specifically tries to use various types of compression, + /// since UTF-8 automata of large classes can get quite large. The specific + /// type of compression used depends on forward vs reverse compilation, and + /// whether NFA shrinking is enabled or not. + /// + /// Aside from repetitions causing lots of repeat group, this is like the + /// single most expensive part of regex compilation. Therefore, a large part + /// of the expense of compilation may be reduce by disabling Unicode in the + /// pattern. + /// + /// This routine compiles an empty character class into a "fail" state. + fn c_unicode_class( + &self, + cls: &hir::ClassUnicode, + ) -> Result { + // If all we have are ASCII ranges wrapped in a Unicode package, then + // there is zero reason to bring out the big guns. We can fit all ASCII + // ranges within a single sparse state. + if cls.is_ascii() { + let end = self.add_empty()?; + let mut trans = Vec::with_capacity(cls.ranges().len()); + for r in cls.iter() { + // The unwraps below are OK because we've verified that this + // class only contains ASCII codepoints. + trans.push(Transition { + // FIXME(1.59): use the 'TryFrom for u8' impl. + start: u8::try_from(u32::from(r.start())).unwrap(), + end: u8::try_from(u32::from(r.end())).unwrap(), + next: end, + }); + } + Ok(ThompsonRef { start: self.add_sparse(trans)?, end }) + } else if self.is_reverse() { + if !self.config.get_shrink() { + // When we don't want to spend the extra time shrinking, we + // compile the UTF-8 automaton in reverse using something like + // the "naive" approach, but will attempt to re-use common + // suffixes. + self.c_unicode_class_reverse_with_suffix(cls) + } else { + // When we want to shrink our NFA for reverse UTF-8 automata, + // we cannot feed UTF-8 sequences directly to the UTF-8 + // compiler, since the UTF-8 compiler requires all sequences + // to be lexicographically sorted. Instead, we organize our + // sequences into a range trie, which can then output our + // sequences in the correct order. Unfortunately, building the + // range trie is fairly expensive (but not nearly as expensive + // as building a DFA). Hence the reason why the 'shrink' option + // exists, so that this path can be toggled off. For example, + // we might want to turn this off if we know we won't be + // compiling a DFA. + let mut trie = self.trie_state.borrow_mut(); + trie.clear(); + + for rng in cls.iter() { + for mut seq in Utf8Sequences::new(rng.start(), rng.end()) { + seq.reverse(); + trie.insert(seq.as_slice()); + } + } + let mut builder = self.builder.borrow_mut(); + let mut utf8_state = self.utf8_state.borrow_mut(); + let mut utf8c = + Utf8Compiler::new(&mut *builder, &mut *utf8_state)?; + trie.iter(|seq| { + utf8c.add(&seq)?; + Ok(()) + })?; + utf8c.finish() + } + } else { + // In the forward direction, we always shrink our UTF-8 automata + // because we can stream it right into the UTF-8 compiler. There + // is almost no downside (in either memory or time) to using this + // approach. + let mut builder = self.builder.borrow_mut(); + let mut utf8_state = self.utf8_state.borrow_mut(); + let mut utf8c = + Utf8Compiler::new(&mut *builder, &mut *utf8_state)?; + for rng in cls.iter() { + for seq in Utf8Sequences::new(rng.start(), rng.end()) { + utf8c.add(seq.as_slice())?; + } + } + utf8c.finish() + } + + // For reference, the code below is the "naive" version of compiling a + // UTF-8 automaton. It is deliciously simple (and works for both the + // forward and reverse cases), but will unfortunately produce very + // large NFAs. When compiling a forward automaton, the size difference + // can sometimes be an order of magnitude. For example, the '\w' regex + // will generate about ~3000 NFA states using the naive approach below, + // but only 283 states when using the approach above. This is because + // the approach above actually compiles a *minimal* (or near minimal, + // because of the bounded hashmap for reusing equivalent states) UTF-8 + // automaton. + // + // The code below is kept as a reference point in order to make it + // easier to understand the higher level goal here. Although, it will + // almost certainly bit-rot, so keep that in mind. Also, if you try to + // use it, some of the tests in this module will fail because they look + // for terser byte code produce by the more optimized handling above. + // But the integration test suite should still pass. + // + // One good example of the substantial difference this can make is to + // compare and contrast performance of the Pike VM when the code below + // is active vs the code above. Here's an example to try: + // + // regex-cli find match pikevm -b -p '(?m)^\w{20}' non-ascii-file + // + // With Unicode classes generated below, this search takes about 45s on + // my machine. But with the compressed version above, the search takes + // only around 1.4s. The NFA is also 20% smaller. This is in part due + // to the compression, but also because of the utilization of 'sparse' + // NFA states. They lead to much less state shuffling during the NFA + // search. + /* + let it = cls + .iter() + .flat_map(|rng| Utf8Sequences::new(rng.start(), rng.end())) + .map(|seq| { + let it = seq + .as_slice() + .iter() + .map(|rng| self.c_range(rng.start, rng.end)); + self.c_concat(it) + }); + self.c_alt_iter(it) + */ + } + + /// Compile the given Unicode character class in reverse with suffix + /// caching. + /// + /// This is a "quick" way to compile large Unicode classes into reverse + /// UTF-8 automata while doing a small amount of compression on that + /// automata by reusing common suffixes. + /// + /// A more comprehensive compression scheme can be accomplished by using + /// a range trie to efficiently sort a reverse sequence of UTF-8 byte + /// ranges, and then use Daciuk's algorithm via `Utf8Compiler`. + /// + /// This is the technique used when "NFA shrinking" is disabled. + /// + /// (This also tries to use "sparse" states where possible, just like + /// `c_byte_class` does.) + fn c_unicode_class_reverse_with_suffix( + &self, + cls: &hir::ClassUnicode, + ) -> Result { + // N.B. It would likely be better to cache common *prefixes* in the + // reverse direction, but it's not quite clear how to do that. The + // advantage of caching suffixes is that it does give us a win, and + // has a very small additional overhead. + let mut cache = self.utf8_suffix.borrow_mut(); + cache.clear(); + + let union = self.add_union()?; + let alt_end = self.add_empty()?; + for urng in cls.iter() { + for seq in Utf8Sequences::new(urng.start(), urng.end()) { + let mut end = alt_end; + for brng in seq.as_slice() { + let key = Utf8SuffixKey { + from: end, + start: brng.start, + end: brng.end, + }; + let hash = cache.hash(&key); + if let Some(id) = cache.get(&key, hash) { + end = id; + continue; + } + + let compiled = self.c_range(brng.start, brng.end)?; + self.patch(compiled.end, end)?; + end = compiled.start; + cache.set(key, hash, end); + } + self.patch(union, end)?; + } + } + Ok(ThompsonRef { start: union, end: alt_end }) + } + + /// Compile the given HIR look-around assertion to an NFA look-around + /// assertion. + fn c_look(&self, anchor: &hir::Look) -> Result { + let look = match *anchor { + hir::Look::Start => Look::Start, + hir::Look::End => Look::End, + hir::Look::StartLF => Look::StartLF, + hir::Look::EndLF => Look::EndLF, + hir::Look::StartCRLF => Look::StartCRLF, + hir::Look::EndCRLF => Look::EndCRLF, + hir::Look::WordAscii => Look::WordAscii, + hir::Look::WordAsciiNegate => Look::WordAsciiNegate, + hir::Look::WordUnicode => Look::WordUnicode, + hir::Look::WordUnicodeNegate => Look::WordUnicodeNegate, + hir::Look::WordStartAscii => Look::WordStartAscii, + hir::Look::WordEndAscii => Look::WordEndAscii, + hir::Look::WordStartUnicode => Look::WordStartUnicode, + hir::Look::WordEndUnicode => Look::WordEndUnicode, + hir::Look::WordStartHalfAscii => Look::WordStartHalfAscii, + hir::Look::WordEndHalfAscii => Look::WordEndHalfAscii, + hir::Look::WordStartHalfUnicode => Look::WordStartHalfUnicode, + hir::Look::WordEndHalfUnicode => Look::WordEndHalfUnicode, + }; + let id = self.add_look(look)?; + Ok(ThompsonRef { start: id, end: id }) + } + + /// Compile the given byte string to a concatenation of bytes. + fn c_literal(&self, bytes: &[u8]) -> Result { + self.c_concat(bytes.iter().copied().map(|b| self.c_range(b, b))) + } + + /// Compile a "range" state with one transition that may only be followed + /// if the input byte is in the (inclusive) range given. + /// + /// Both the `start` and `end` locations point to the state created. + /// Callers will likely want to keep the `start`, but patch the `end` to + /// point to some other state. + fn c_range(&self, start: u8, end: u8) -> Result { + let id = self.add_range(start, end)?; + Ok(ThompsonRef { start: id, end: id }) + } + + /// Compile an "empty" state with one unconditional epsilon transition. + /// + /// Both the `start` and `end` locations point to the state created. + /// Callers will likely want to keep the `start`, but patch the `end` to + /// point to some other state. + fn c_empty(&self) -> Result { + let id = self.add_empty()?; + Ok(ThompsonRef { start: id, end: id }) + } + + /// Compile a "fail" state that can never have any outgoing transitions. + fn c_fail(&self) -> Result { + let id = self.add_fail()?; + Ok(ThompsonRef { start: id, end: id }) + } + + // The below helpers are meant to be simple wrappers around the + // corresponding Builder methods. For the most part, they let us write + // 'self.add_foo()' instead of 'self.builder.borrow_mut().add_foo()', where + // the latter is a mouthful. Some of the methods do inject a little bit + // of extra logic. e.g., Flipping look-around operators when compiling in + // reverse mode. + + fn patch(&self, from: StateID, to: StateID) -> Result<(), BuildError> { + self.builder.borrow_mut().patch(from, to) + } + + fn start_pattern(&self) -> Result { + self.builder.borrow_mut().start_pattern() + } + + fn finish_pattern( + &self, + start_id: StateID, + ) -> Result { + self.builder.borrow_mut().finish_pattern(start_id) + } + + fn add_empty(&self) -> Result { + self.builder.borrow_mut().add_empty() + } + + fn add_range(&self, start: u8, end: u8) -> Result { + self.builder.borrow_mut().add_range(Transition { + start, + end, + next: StateID::ZERO, + }) + } + + fn add_sparse( + &self, + ranges: Vec, + ) -> Result { + self.builder.borrow_mut().add_sparse(ranges) + } + + fn add_look(&self, mut look: Look) -> Result { + if self.is_reverse() { + look = look.reversed(); + } + self.builder.borrow_mut().add_look(StateID::ZERO, look) + } + + fn add_union(&self) -> Result { + self.builder.borrow_mut().add_union(vec![]) + } + + fn add_union_reverse(&self) -> Result { + self.builder.borrow_mut().add_union_reverse(vec![]) + } + + fn add_capture_start( + &self, + capture_index: u32, + name: Option<&str>, + ) -> Result { + let name = name.map(Arc::from); + self.builder.borrow_mut().add_capture_start( + StateID::ZERO, + capture_index, + name, + ) + } + + fn add_capture_end( + &self, + capture_index: u32, + ) -> Result { + self.builder.borrow_mut().add_capture_end(StateID::ZERO, capture_index) + } + + fn add_fail(&self) -> Result { + self.builder.borrow_mut().add_fail() + } + + fn add_match(&self) -> Result { + self.builder.borrow_mut().add_match() + } + + fn is_reverse(&self) -> bool { + self.config.get_reverse() + } +} + +/// A value that represents the result of compiling a sub-expression of a +/// regex's HIR. Specifically, this represents a sub-graph of the NFA that +/// has an initial state at `start` and a final state at `end`. +#[derive(Clone, Copy, Debug)] +pub(crate) struct ThompsonRef { + pub(crate) start: StateID, + pub(crate) end: StateID, +} + +/// A UTF-8 compiler based on Daciuk's algorithm for compiling minimal DFAs +/// from a lexicographically sorted sequence of strings in linear time. +/// +/// The trick here is that any Unicode codepoint range can be converted to +/// a sequence of byte ranges that form a UTF-8 automaton. Connecting them +/// together via an alternation is trivial, and indeed, it works. However, +/// there is a lot of redundant structure in many UTF-8 automatons. Since our +/// UTF-8 ranges are in lexicographic order, we can use Daciuk's algorithm +/// to build nearly minimal DFAs in linear time. (They are guaranteed to be +/// minimal because we use a bounded cache of previously build DFA states.) +/// +/// The drawback is that this sadly doesn't work for reverse automata, since +/// the ranges are no longer in lexicographic order. For that, we invented the +/// range trie (which gets its own module). Once a range trie is built, we then +/// use this same Utf8Compiler to build a reverse UTF-8 automaton. +/// +/// The high level idea is described here: +/// https://blog.burntsushi.net/transducers/#finite-state-machines-as-data-structures +/// +/// There is also another implementation of this in the `fst` crate. +#[derive(Debug)] +struct Utf8Compiler<'a> { + builder: &'a mut Builder, + state: &'a mut Utf8State, + target: StateID, +} + +#[derive(Clone, Debug)] +struct Utf8State { + compiled: Utf8BoundedMap, + uncompiled: Vec, +} + +#[derive(Clone, Debug)] +struct Utf8Node { + trans: Vec, + last: Option, +} + +#[derive(Clone, Debug)] +struct Utf8LastTransition { + start: u8, + end: u8, +} + +impl Utf8State { + fn new() -> Utf8State { + Utf8State { compiled: Utf8BoundedMap::new(10_000), uncompiled: vec![] } + } + + fn clear(&mut self) { + self.compiled.clear(); + self.uncompiled.clear(); + } +} + +impl<'a> Utf8Compiler<'a> { + fn new( + builder: &'a mut Builder, + state: &'a mut Utf8State, + ) -> Result, BuildError> { + let target = builder.add_empty()?; + state.clear(); + let mut utf8c = Utf8Compiler { builder, state, target }; + utf8c.add_empty(); + Ok(utf8c) + } + + fn finish(&mut self) -> Result { + self.compile_from(0)?; + let node = self.pop_root(); + let start = self.compile(node)?; + Ok(ThompsonRef { start, end: self.target }) + } + + fn add(&mut self, ranges: &[Utf8Range]) -> Result<(), BuildError> { + let prefix_len = ranges + .iter() + .zip(&self.state.uncompiled) + .take_while(|&(range, node)| { + node.last.as_ref().map_or(false, |t| { + (t.start, t.end) == (range.start, range.end) + }) + }) + .count(); + assert!(prefix_len < ranges.len()); + self.compile_from(prefix_len)?; + self.add_suffix(&ranges[prefix_len..]); + Ok(()) + } + + fn compile_from(&mut self, from: usize) -> Result<(), BuildError> { + let mut next = self.target; + while from + 1 < self.state.uncompiled.len() { + let node = self.pop_freeze(next); + next = self.compile(node)?; + } + self.top_last_freeze(next); + Ok(()) + } + + fn compile( + &mut self, + node: Vec, + ) -> Result { + let hash = self.state.compiled.hash(&node); + if let Some(id) = self.state.compiled.get(&node, hash) { + return Ok(id); + } + let id = self.builder.add_sparse(node.clone())?; + self.state.compiled.set(node, hash, id); + Ok(id) + } + + fn add_suffix(&mut self, ranges: &[Utf8Range]) { + assert!(!ranges.is_empty()); + let last = self + .state + .uncompiled + .len() + .checked_sub(1) + .expect("non-empty nodes"); + assert!(self.state.uncompiled[last].last.is_none()); + self.state.uncompiled[last].last = Some(Utf8LastTransition { + start: ranges[0].start, + end: ranges[0].end, + }); + for r in &ranges[1..] { + self.state.uncompiled.push(Utf8Node { + trans: vec![], + last: Some(Utf8LastTransition { start: r.start, end: r.end }), + }); + } + } + + fn add_empty(&mut self) { + self.state.uncompiled.push(Utf8Node { trans: vec![], last: None }); + } + + fn pop_freeze(&mut self, next: StateID) -> Vec { + let mut uncompiled = self.state.uncompiled.pop().unwrap(); + uncompiled.set_last_transition(next); + uncompiled.trans + } + + fn pop_root(&mut self) -> Vec { + assert_eq!(self.state.uncompiled.len(), 1); + assert!(self.state.uncompiled[0].last.is_none()); + self.state.uncompiled.pop().expect("non-empty nodes").trans + } + + fn top_last_freeze(&mut self, next: StateID) { + let last = self + .state + .uncompiled + .len() + .checked_sub(1) + .expect("non-empty nodes"); + self.state.uncompiled[last].set_last_transition(next); + } +} + +impl Utf8Node { + fn set_last_transition(&mut self, next: StateID) { + if let Some(last) = self.last.take() { + self.trans.push(Transition { + start: last.start, + end: last.end, + next, + }); + } + } +} + +#[cfg(test)] +mod tests { + use alloc::vec; + + use crate::{ + nfa::thompson::{SparseTransitions, State}, + util::primitives::SmallIndex, + }; + + use super::*; + + fn build(pattern: &str) -> NFA { + NFA::compiler() + .configure( + NFA::config() + .which_captures(WhichCaptures::None) + .unanchored_prefix(false), + ) + .build(pattern) + .unwrap() + } + + fn pid(id: usize) -> PatternID { + PatternID::new(id).unwrap() + } + + fn sid(id: usize) -> StateID { + StateID::new(id).unwrap() + } + + fn s_byte(byte: u8, next: usize) -> State { + let next = sid(next); + let trans = Transition { start: byte, end: byte, next }; + State::ByteRange { trans } + } + + fn s_range(start: u8, end: u8, next: usize) -> State { + let next = sid(next); + let trans = Transition { start, end, next }; + State::ByteRange { trans } + } + + fn s_sparse(transitions: &[(u8, u8, usize)]) -> State { + let transitions = transitions + .iter() + .map(|&(start, end, next)| Transition { + start, + end, + next: sid(next), + }) + .collect(); + State::Sparse(SparseTransitions { transitions }) + } + + fn s_look(look: Look, next: usize) -> State { + let next = sid(next); + State::Look { look, next } + } + + fn s_bin_union(alt1: usize, alt2: usize) -> State { + State::BinaryUnion { alt1: sid(alt1), alt2: sid(alt2) } + } + + fn s_union(alts: &[usize]) -> State { + State::Union { + alternates: alts + .iter() + .map(|&id| sid(id)) + .collect::>() + .into_boxed_slice(), + } + } + + fn s_cap(next: usize, pattern: usize, index: usize, slot: usize) -> State { + State::Capture { + next: sid(next), + pattern_id: pid(pattern), + group_index: SmallIndex::new(index).unwrap(), + slot: SmallIndex::new(slot).unwrap(), + } + } + + fn s_fail() -> State { + State::Fail + } + + fn s_match(id: usize) -> State { + State::Match { pattern_id: pid(id) } + } + + // Test that building an unanchored NFA has an appropriate `(?s:.)*?` + // prefix. + #[test] + fn compile_unanchored_prefix() { + let nfa = NFA::compiler() + .configure(NFA::config().which_captures(WhichCaptures::None)) + .build(r"a") + .unwrap(); + assert_eq!( + nfa.states(), + &[ + s_bin_union(2, 1), + s_range(0, 255, 0), + s_byte(b'a', 3), + s_match(0), + ] + ); + } + + #[test] + fn compile_no_unanchored_prefix_with_start_anchor() { + let nfa = NFA::compiler() + .configure(NFA::config().which_captures(WhichCaptures::None)) + .build(r"^a") + .unwrap(); + assert_eq!( + nfa.states(), + &[s_look(Look::Start, 1), s_byte(b'a', 2), s_match(0)] + ); + } + + #[test] + fn compile_yes_unanchored_prefix_with_end_anchor() { + let nfa = NFA::compiler() + .configure(NFA::config().which_captures(WhichCaptures::None)) + .build(r"a$") + .unwrap(); + assert_eq!( + nfa.states(), + &[ + s_bin_union(2, 1), + s_range(0, 255, 0), + s_byte(b'a', 3), + s_look(Look::End, 4), + s_match(0), + ] + ); + } + + #[test] + fn compile_yes_reverse_unanchored_prefix_with_start_anchor() { + let nfa = NFA::compiler() + .configure( + NFA::config() + .reverse(true) + .which_captures(WhichCaptures::None), + ) + .build(r"^a") + .unwrap(); + assert_eq!( + nfa.states(), + &[ + s_bin_union(2, 1), + s_range(0, 255, 0), + s_byte(b'a', 3), + // Anchors get flipped in a reverse automaton. + s_look(Look::End, 4), + s_match(0), + ], + ); + } + + #[test] + fn compile_no_reverse_unanchored_prefix_with_end_anchor() { + let nfa = NFA::compiler() + .configure( + NFA::config() + .reverse(true) + .which_captures(WhichCaptures::None), + ) + .build(r"a$") + .unwrap(); + assert_eq!( + nfa.states(), + &[ + // Anchors get flipped in a reverse automaton. + s_look(Look::Start, 1), + s_byte(b'a', 2), + s_match(0), + ], + ); + } + + #[test] + fn compile_empty() { + assert_eq!(build("").states(), &[s_match(0),]); + } + + #[test] + fn compile_literal() { + assert_eq!(build("a").states(), &[s_byte(b'a', 1), s_match(0),]); + assert_eq!( + build("ab").states(), + &[s_byte(b'a', 1), s_byte(b'b', 2), s_match(0),] + ); + assert_eq!( + build("☃").states(), + &[s_byte(0xE2, 1), s_byte(0x98, 2), s_byte(0x83, 3), s_match(0)] + ); + + // Check that non-UTF-8 literals work. + let nfa = NFA::compiler() + .configure( + NFA::config() + .which_captures(WhichCaptures::None) + .unanchored_prefix(false), + ) + .syntax(crate::util::syntax::Config::new().utf8(false)) + .build(r"(?-u)\xFF") + .unwrap(); + assert_eq!(nfa.states(), &[s_byte(b'\xFF', 1), s_match(0),]); + } + + #[test] + fn compile_class_ascii() { + assert_eq!( + build(r"[a-z]").states(), + &[s_range(b'a', b'z', 1), s_match(0),] + ); + assert_eq!( + build(r"[x-za-c]").states(), + &[s_sparse(&[(b'a', b'c', 1), (b'x', b'z', 1)]), s_match(0)] + ); + } + + #[test] + #[cfg(not(miri))] + fn compile_class_unicode() { + assert_eq!( + build(r"[\u03B1-\u03B4]").states(), + &[s_range(0xB1, 0xB4, 2), s_byte(0xCE, 0), s_match(0)] + ); + assert_eq!( + build(r"[\u03B1-\u03B4\u{1F919}-\u{1F91E}]").states(), + &[ + s_range(0xB1, 0xB4, 5), + s_range(0x99, 0x9E, 5), + s_byte(0xA4, 1), + s_byte(0x9F, 2), + s_sparse(&[(0xCE, 0xCE, 0), (0xF0, 0xF0, 3)]), + s_match(0), + ] + ); + assert_eq!( + build(r"[a-z☃]").states(), + &[ + s_byte(0x83, 3), + s_byte(0x98, 0), + s_sparse(&[(b'a', b'z', 3), (0xE2, 0xE2, 1)]), + s_match(0), + ] + ); + } + + #[test] + fn compile_repetition() { + assert_eq!( + build(r"a?").states(), + &[s_bin_union(1, 2), s_byte(b'a', 2), s_match(0),] + ); + assert_eq!( + build(r"a??").states(), + &[s_bin_union(2, 1), s_byte(b'a', 2), s_match(0),] + ); + } + + #[test] + fn compile_group() { + assert_eq!( + build(r"ab+").states(), + &[s_byte(b'a', 1), s_byte(b'b', 2), s_bin_union(1, 3), s_match(0)] + ); + assert_eq!( + build(r"(ab)").states(), + &[s_byte(b'a', 1), s_byte(b'b', 2), s_match(0)] + ); + assert_eq!( + build(r"(ab)+").states(), + &[s_byte(b'a', 1), s_byte(b'b', 2), s_bin_union(0, 3), s_match(0)] + ); + } + + #[test] + fn compile_alternation() { + assert_eq!( + build(r"a|b").states(), + &[s_range(b'a', b'b', 1), s_match(0)] + ); + assert_eq!( + build(r"ab|cd").states(), + &[ + s_byte(b'b', 3), + s_byte(b'd', 3), + s_sparse(&[(b'a', b'a', 0), (b'c', b'c', 1)]), + s_match(0) + ], + ); + assert_eq!( + build(r"|b").states(), + &[s_byte(b'b', 2), s_bin_union(2, 0), s_match(0)] + ); + assert_eq!( + build(r"a|").states(), + &[s_byte(b'a', 2), s_bin_union(0, 2), s_match(0)] + ); + } + + // This tests the use of a non-binary union, i.e., a state with more than + // 2 unconditional epsilon transitions. The only place they tend to appear + // is in reverse NFAs when shrinking is disabled. Otherwise, 'binary-union' + // and 'sparse' tend to cover all other cases of alternation. + #[test] + fn compile_non_binary_union() { + let nfa = NFA::compiler() + .configure( + NFA::config() + .which_captures(WhichCaptures::None) + .reverse(true) + .shrink(false) + .unanchored_prefix(false), + ) + .build(r"[\u1000\u2000\u3000]") + .unwrap(); + assert_eq!( + nfa.states(), + &[ + s_union(&[3, 6, 9]), + s_byte(0xE1, 10), + s_byte(0x80, 1), + s_byte(0x80, 2), + s_byte(0xE2, 10), + s_byte(0x80, 4), + s_byte(0x80, 5), + s_byte(0xE3, 10), + s_byte(0x80, 7), + s_byte(0x80, 8), + s_match(0), + ] + ); + } + + #[test] + fn compile_many_start_pattern() { + let nfa = NFA::compiler() + .configure( + NFA::config() + .which_captures(WhichCaptures::None) + .unanchored_prefix(false), + ) + .build_many(&["a", "b"]) + .unwrap(); + assert_eq!( + nfa.states(), + &[ + s_byte(b'a', 1), + s_match(0), + s_byte(b'b', 3), + s_match(1), + s_bin_union(0, 2), + ] + ); + assert_eq!(nfa.start_anchored().as_usize(), 4); + assert_eq!(nfa.start_unanchored().as_usize(), 4); + // Test that the start states for each individual pattern are correct. + assert_eq!(nfa.start_pattern(pid(0)).unwrap(), sid(0)); + assert_eq!(nfa.start_pattern(pid(1)).unwrap(), sid(2)); + } + + // This tests that our compiler can handle an empty character class. At the + // time of writing, the regex parser forbids it, so the only way to test it + // is to provide a hand written HIR. + #[test] + fn empty_class_bytes() { + use regex_syntax::hir::{Class, ClassBytes, Hir}; + + let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![]))); + let config = NFA::config() + .which_captures(WhichCaptures::None) + .unanchored_prefix(false); + let nfa = + NFA::compiler().configure(config).build_from_hir(&hir).unwrap(); + assert_eq!(nfa.states(), &[s_fail(), s_match(0)]); + } + + // Like empty_class_bytes, but for a Unicode class. + #[test] + fn empty_class_unicode() { + use regex_syntax::hir::{Class, ClassUnicode, Hir}; + + let hir = Hir::class(Class::Unicode(ClassUnicode::new(vec![]))); + let config = NFA::config() + .which_captures(WhichCaptures::None) + .unanchored_prefix(false); + let nfa = + NFA::compiler().configure(config).build_from_hir(&hir).unwrap(); + assert_eq!(nfa.states(), &[s_fail(), s_match(0)]); + } + + #[test] + fn compile_captures_all() { + let nfa = NFA::compiler() + .configure( + NFA::config() + .unanchored_prefix(false) + .which_captures(WhichCaptures::All), + ) + .build("a(b)c") + .unwrap(); + assert_eq!( + nfa.states(), + &[ + s_cap(1, 0, 0, 0), + s_byte(b'a', 2), + s_cap(3, 0, 1, 2), + s_byte(b'b', 4), + s_cap(5, 0, 1, 3), + s_byte(b'c', 6), + s_cap(7, 0, 0, 1), + s_match(0) + ] + ); + let ginfo = nfa.group_info(); + assert_eq!(2, ginfo.all_group_len()); + } + + #[test] + fn compile_captures_implicit() { + let nfa = NFA::compiler() + .configure( + NFA::config() + .unanchored_prefix(false) + .which_captures(WhichCaptures::Implicit), + ) + .build("a(b)c") + .unwrap(); + assert_eq!( + nfa.states(), + &[ + s_cap(1, 0, 0, 0), + s_byte(b'a', 2), + s_byte(b'b', 3), + s_byte(b'c', 4), + s_cap(5, 0, 0, 1), + s_match(0) + ] + ); + let ginfo = nfa.group_info(); + assert_eq!(1, ginfo.all_group_len()); + } + + #[test] + fn compile_captures_none() { + let nfa = NFA::compiler() + .configure( + NFA::config() + .unanchored_prefix(false) + .which_captures(WhichCaptures::None), + ) + .build("a(b)c") + .unwrap(); + assert_eq!( + nfa.states(), + &[s_byte(b'a', 1), s_byte(b'b', 2), s_byte(b'c', 3), s_match(0)] + ); + let ginfo = nfa.group_info(); + assert_eq!(0, ginfo.all_group_len()); + } +} diff --git a/vendor/regex-automata/src/nfa/thompson/error.rs b/vendor/regex-automata/src/nfa/thompson/error.rs new file mode 100644 index 00000000000000..9f884ff20e3fa0 --- /dev/null +++ b/vendor/regex-automata/src/nfa/thompson/error.rs @@ -0,0 +1,182 @@ +use crate::util::{ + captures, look, + primitives::{PatternID, StateID}, +}; + +/// An error that can occurred during the construction of a thompson NFA. +/// +/// This error does not provide many introspection capabilities. There are +/// generally only two things you can do with it: +/// +/// * Obtain a human readable message via its `std::fmt::Display` impl. +/// * Access an underlying [`regex_syntax::Error`] type from its `source` +/// method via the `std::error::Error` trait. This error only occurs when using +/// convenience routines for building an NFA directly from a pattern string. +/// +/// Otherwise, errors typically occur when a limit has been breached. For +/// example, if the total heap usage of the compiled NFA exceeds the limit +/// set by [`Config::nfa_size_limit`](crate::nfa::thompson::Config), then +/// building the NFA will fail. +#[derive(Clone, Debug)] +pub struct BuildError { + kind: BuildErrorKind, +} + +/// The kind of error that occurred during the construction of a thompson NFA. +#[derive(Clone, Debug)] +enum BuildErrorKind { + /// An error that occurred while parsing a regular expression. Note that + /// this error may be printed over multiple lines, and is generally + /// intended to be end user readable on its own. + #[cfg(feature = "syntax")] + Syntax(regex_syntax::Error), + /// An error that occurs if the capturing groups provided to an NFA builder + /// do not satisfy the documented invariants. For example, things like + /// too many groups, missing groups, having the first (zeroth) group be + /// named or duplicate group names within the same pattern. + Captures(captures::GroupInfoError), + /// An error that occurs when an NFA contains a Unicode word boundary, but + /// where the crate was compiled without the necessary data for dealing + /// with Unicode word boundaries. + Word(look::UnicodeWordBoundaryError), + /// An error that occurs if too many patterns were given to the NFA + /// compiler. + TooManyPatterns { + /// The number of patterns given, which exceeds the limit. + given: usize, + /// The limit on the number of patterns. + limit: usize, + }, + /// An error that occurs if too states are produced while building an NFA. + TooManyStates { + /// The minimum number of states that are desired, which exceeds the + /// limit. + given: usize, + /// The limit on the number of states. + limit: usize, + }, + /// An error that occurs when NFA compilation exceeds a configured heap + /// limit. + ExceededSizeLimit { + /// The configured limit, in bytes. + limit: usize, + }, + /// An error that occurs when an invalid capture group index is added to + /// the NFA. An "invalid" index can be one that would otherwise overflow + /// a `usize` on the current target. + InvalidCaptureIndex { + /// The invalid index that was given. + index: u32, + }, + /// An error that occurs when one tries to build a reverse NFA with + /// captures enabled. Currently, this isn't supported, but we probably + /// should support it at some point. + #[cfg(feature = "syntax")] + UnsupportedCaptures, +} + +impl BuildError { + /// If this error occurred because the NFA exceeded the configured size + /// limit before being built, then this returns the configured size limit. + /// + /// The limit returned is what was configured, and corresponds to the + /// maximum amount of heap usage in bytes. + pub fn size_limit(&self) -> Option { + match self.kind { + BuildErrorKind::ExceededSizeLimit { limit } => Some(limit), + _ => None, + } + } + + fn kind(&self) -> &BuildErrorKind { + &self.kind + } + + #[cfg(feature = "syntax")] + pub(crate) fn syntax(err: regex_syntax::Error) -> BuildError { + BuildError { kind: BuildErrorKind::Syntax(err) } + } + + pub(crate) fn captures(err: captures::GroupInfoError) -> BuildError { + BuildError { kind: BuildErrorKind::Captures(err) } + } + + pub(crate) fn word(err: look::UnicodeWordBoundaryError) -> BuildError { + BuildError { kind: BuildErrorKind::Word(err) } + } + + pub(crate) fn too_many_patterns(given: usize) -> BuildError { + let limit = PatternID::LIMIT; + BuildError { kind: BuildErrorKind::TooManyPatterns { given, limit } } + } + + pub(crate) fn too_many_states(given: usize) -> BuildError { + let limit = StateID::LIMIT; + BuildError { kind: BuildErrorKind::TooManyStates { given, limit } } + } + + pub(crate) fn exceeded_size_limit(limit: usize) -> BuildError { + BuildError { kind: BuildErrorKind::ExceededSizeLimit { limit } } + } + + pub(crate) fn invalid_capture_index(index: u32) -> BuildError { + BuildError { kind: BuildErrorKind::InvalidCaptureIndex { index } } + } + + #[cfg(feature = "syntax")] + pub(crate) fn unsupported_captures() -> BuildError { + BuildError { kind: BuildErrorKind::UnsupportedCaptures } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for BuildError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self.kind() { + #[cfg(feature = "syntax")] + BuildErrorKind::Syntax(ref err) => Some(err), + BuildErrorKind::Captures(ref err) => Some(err), + _ => None, + } + } +} + +impl core::fmt::Display for BuildError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self.kind() { + #[cfg(feature = "syntax")] + BuildErrorKind::Syntax(_) => write!(f, "error parsing regex"), + BuildErrorKind::Captures(_) => { + write!(f, "error with capture groups") + } + BuildErrorKind::Word(_) => { + write!(f, "NFA contains Unicode word boundary") + } + BuildErrorKind::TooManyPatterns { given, limit } => write!( + f, + "attempted to compile {given} patterns, \ + which exceeds the limit of {limit}", + ), + BuildErrorKind::TooManyStates { given, limit } => write!( + f, + "attempted to compile {given} NFA states, \ + which exceeds the limit of {limit}", + ), + BuildErrorKind::ExceededSizeLimit { limit } => write!( + f, + "heap usage during NFA compilation exceeded limit of {limit}", + ), + BuildErrorKind::InvalidCaptureIndex { index } => write!( + f, + "capture group index {index} is invalid \ + (too big or discontinuous)", + ), + #[cfg(feature = "syntax")] + BuildErrorKind::UnsupportedCaptures => write!( + f, + "currently captures must be disabled when compiling \ + a reverse NFA", + ), + } + } +} diff --git a/vendor/regex-automata/src/nfa/thompson/literal_trie.rs b/vendor/regex-automata/src/nfa/thompson/literal_trie.rs new file mode 100644 index 00000000000000..08793cd6dc760a --- /dev/null +++ b/vendor/regex-automata/src/nfa/thompson/literal_trie.rs @@ -0,0 +1,528 @@ +use core::mem; + +use alloc::{vec, vec::Vec}; + +use crate::{ + nfa::thompson::{self, compiler::ThompsonRef, BuildError, Builder}, + util::primitives::{IteratorIndexExt, StateID}, +}; + +/// A trie that preserves leftmost-first match semantics. +/// +/// This is a purpose-built data structure for optimizing 'lit1|lit2|..|litN' +/// patterns. It can *only* handle alternations of literals, which makes it +/// somewhat restricted in its scope, but literal alternations are fairly +/// common. +/// +/// At a 5,000 foot level, the main idea of this trie is make an alternation of +/// literals look more like a DFA than an NFA via epsilon removal. +/// +/// More precisely, the main issue is in how alternations are compiled into +/// a Thompson NFA. Namely, each alternation gets a single NFA "union" state +/// with an epsilon transition for every branch of the alternation pointing to +/// an NFA state corresponding to the start of that branch. The main problem +/// with this representation is the cost of computing an epsilon closure. Once +/// you hit the alternation's start state, it acts as a sort of "clog" that +/// requires you to traverse all of the epsilon transitions to compute the full +/// closure. +/// +/// While fixing such clogs in the general case is pretty tricky without going +/// to a DFA (or perhaps a Glushkov NFA, but that comes with other problems). +/// But at least in the case of an alternation of literals, we can convert +/// that to a prefix trie without too much cost. In theory, that's all you +/// really need to do: build the trie and then compile it to a Thompson NFA. +/// For example, if you have the pattern 'bar|baz|foo', then using a trie, it +/// is transformed to something like 'b(a(r|z))|f'. This reduces the clog by +/// reducing the number of epsilon transitions out of the alternation's start +/// state from 3 to 2 (it actually gets down to 1 when you use a sparse state, +/// which we do below). It's a small effect here, but when your alternation is +/// huge, the savings is also huge. +/// +/// And that is... essentially what a LiteralTrie does. But there is one +/// hiccup. Consider a regex like 'sam|samwise'. How does a prefix trie compile +/// that when leftmost-first semantics are used? If 'sam|samwise' was the +/// entire regex, then you could just drop the 'samwise' branch entirely since +/// it is impossible to match ('sam' will always take priority, and since it +/// is a prefix of 'samwise', 'samwise' will never match). But what about the +/// regex '\b(sam|samwise)\b'? In that case, you can't remove 'samwise' because +/// it might match when 'sam' doesn't fall on a word boundary. +/// +/// The main idea is that 'sam|samwise' can be translated to 'sam(?:|wise)', +/// which is a precisely equivalent regex that also gets rid of the clog. +/// +/// Another example is 'zapper|z|zap'. That gets translated to +/// 'z(?:apper||ap)'. +/// +/// We accomplish this by giving each state in the trie multiple "chunks" of +/// transitions. Each chunk barrier represents a match. The idea is that once +/// you know a match occurs, none of the transitions after the match can be +/// re-ordered and mixed in with the transitions before the match. Otherwise, +/// the match semantics could be changed. +/// +/// See the 'State' data type for a bit more detail. +/// +/// Future work: +/// +/// * In theory, it would be nice to generalize the idea of removing clogs and +/// apply it to the NFA graph itself. Then this could in theory work for +/// case insensitive alternations of literals, or even just alternations where +/// each branch starts with a non-epsilon transition. +/// * Could we instead use the Aho-Corasick algorithm here? The aho-corasick +/// crate deals with leftmost-first matches correctly, but I think this implies +/// encoding failure transitions into a Thompson NFA somehow. Which seems fine, +/// because failure transitions are just unconditional epsilon transitions? +/// * Or perhaps even better, could we use an aho_corasick::AhoCorasick +/// directly? At time of writing, 0.7 is the current version of the +/// aho-corasick crate, and that definitely cannot be used as-is. But if we +/// expose the underlying finite state machine API, then could we use it? That +/// would be super. If we could figure that out, it might also lend itself to +/// more general composition of finite state machines. +#[derive(Clone)] +pub(crate) struct LiteralTrie { + /// The set of trie states. Each state contains one or more chunks, where + /// each chunk is a sparse set of transitions to other states. A leaf state + /// is always a match state that contains only empty chunks (i.e., no + /// transitions). + states: Vec, + /// Whether to add literals in reverse to the trie. Useful when building + /// a reverse NFA automaton. + rev: bool, +} + +impl LiteralTrie { + /// Create a new literal trie that adds literals in the forward direction. + pub(crate) fn forward() -> LiteralTrie { + let root = State::default(); + LiteralTrie { states: vec![root], rev: false } + } + + /// Create a new literal trie that adds literals in reverse. + pub(crate) fn reverse() -> LiteralTrie { + let root = State::default(); + LiteralTrie { states: vec![root], rev: true } + } + + /// Add the given literal to this trie. + /// + /// If the literal could not be added because the `StateID` space was + /// exhausted, then an error is returned. If an error returns, the trie + /// is in an unspecified state. + pub(crate) fn add(&mut self, bytes: &[u8]) -> Result<(), BuildError> { + let mut prev = StateID::ZERO; + let mut it = bytes.iter().copied(); + while let Some(b) = if self.rev { it.next_back() } else { it.next() } { + prev = self.get_or_add_state(prev, b)?; + } + self.states[prev].add_match(); + Ok(()) + } + + /// If the given transition is defined, then return the next state ID. + /// Otherwise, add the transition to `from` and point it to a new state. + /// + /// If a new state ID could not be allocated, then an error is returned. + fn get_or_add_state( + &mut self, + from: StateID, + byte: u8, + ) -> Result { + let active = self.states[from].active_chunk(); + match active.binary_search_by_key(&byte, |t| t.byte) { + Ok(i) => Ok(active[i].next), + Err(i) => { + // Add a new state and get its ID. + let next = StateID::new(self.states.len()).map_err(|_| { + BuildError::too_many_states(self.states.len()) + })?; + self.states.push(State::default()); + // Offset our position to account for all transitions and not + // just the ones in the active chunk. + let i = self.states[from].active_chunk_start() + i; + let t = Transition { byte, next }; + self.states[from].transitions.insert(i, t); + Ok(next) + } + } + } + + /// Compile this literal trie to the NFA builder given. + /// + /// This forwards any errors that may occur while using the given builder. + pub(crate) fn compile( + &self, + builder: &mut Builder, + ) -> Result { + // Compilation proceeds via depth-first traversal of the trie. + // + // This is overall pretty brutal. The recursive version of this is + // deliciously simple. (See 'compile_to_hir' below for what it might + // look like.) But recursion on a trie means your call stack grows + // in accordance with the longest literal, which just does not seem + // appropriate. So we push the call stack to the heap. But as a result, + // the trie traversal becomes pretty brutal because we essentially + // have to encode the state of a double for-loop into an explicit call + // frame. If someone can simplify this without using recursion, that'd + // be great. + + // 'end' is our match state for this trie, but represented in the the + // NFA. Any time we see a match in the trie, we insert a transition + // from the current state we're in to 'end'. + let end = builder.add_empty()?; + let mut stack = vec![]; + let mut f = Frame::new(&self.states[StateID::ZERO]); + loop { + if let Some(t) = f.transitions.next() { + if self.states[t.next].is_leaf() { + f.sparse.push(thompson::Transition { + start: t.byte, + end: t.byte, + next: end, + }); + } else { + f.sparse.push(thompson::Transition { + start: t.byte, + end: t.byte, + // This is a little funny, but when the frame we create + // below completes, it will pop this parent frame off + // and modify this transition to point to the correct + // state. + next: StateID::ZERO, + }); + stack.push(f); + f = Frame::new(&self.states[t.next]); + } + continue; + } + // At this point, we have visited all transitions in f.chunk, so + // add it as a sparse NFA state. Unless the chunk was empty, in + // which case, we don't do anything. + if !f.sparse.is_empty() { + let chunk_id = if f.sparse.len() == 1 { + builder.add_range(f.sparse.pop().unwrap())? + } else { + let sparse = mem::replace(&mut f.sparse, vec![]); + builder.add_sparse(sparse)? + }; + f.union.push(chunk_id); + } + // Now we need to look to see if there are other chunks to visit. + if let Some(chunk) = f.chunks.next() { + // If we're here, it means we're on the second (or greater) + // chunk, which implies there is a match at this point. So + // connect this state to the final end state. + f.union.push(end); + // Advance to the next chunk. + f.transitions = chunk.iter(); + continue; + } + // Now that we are out of chunks, we have completely visited + // this state. So turn our union of chunks into an NFA union + // state, and add that union state to the parent state's current + // sparse state. (If there is no parent, we're done.) + let start = builder.add_union(f.union)?; + match stack.pop() { + None => { + return Ok(ThompsonRef { start, end }); + } + Some(mut parent) => { + // OK because the only way a frame gets pushed on to the + // stack (aside from the root) is when a transition has + // been added to 'sparse'. + parent.sparse.last_mut().unwrap().next = start; + f = parent; + } + } + } + } + + /// Converts this trie to an equivalent HIR expression. + /// + /// We don't actually use this, but it's useful for tests. In particular, + /// it provides a (somewhat) human readable representation of the trie + /// itself. + #[cfg(test)] + fn compile_to_hir(&self) -> regex_syntax::hir::Hir { + self.compile_state_to_hir(StateID::ZERO) + } + + /// The recursive implementation of 'to_hir'. + /// + /// Notice how simple this is compared to 'compile' above. 'compile' could + /// be similarly simple, but we opt to not use recursion in order to avoid + /// overflowing the stack in the case of a longer literal. + #[cfg(test)] + fn compile_state_to_hir(&self, sid: StateID) -> regex_syntax::hir::Hir { + use regex_syntax::hir::Hir; + + let mut alt = vec![]; + for (i, chunk) in self.states[sid].chunks().enumerate() { + if i > 0 { + alt.push(Hir::empty()); + } + if chunk.is_empty() { + continue; + } + let mut chunk_alt = vec![]; + for t in chunk.iter() { + chunk_alt.push(Hir::concat(vec![ + Hir::literal(vec![t.byte]), + self.compile_state_to_hir(t.next), + ])); + } + alt.push(Hir::alternation(chunk_alt)); + } + Hir::alternation(alt) + } +} + +impl core::fmt::Debug for LiteralTrie { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + writeln!(f, "LiteralTrie(")?; + for (sid, state) in self.states.iter().with_state_ids() { + writeln!(f, "{:06?}: {:?}", sid.as_usize(), state)?; + } + writeln!(f, ")")?; + Ok(()) + } +} + +/// An explicit stack frame used for traversing the trie without using +/// recursion. +/// +/// Each frame is tied to the traversal of a single trie state. The frame is +/// dropped once the entire state (and all of its children) have been visited. +/// The "output" of compiling a state is the 'union' vector, which is turn +/// converted to a NFA union state. Each branch of the union corresponds to a +/// chunk in the trie state. +/// +/// 'sparse' corresponds to the set of transitions for a particular chunk in a +/// trie state. It is ultimately converted to an NFA sparse state. The 'sparse' +/// field, after being converted to a sparse NFA state, is reused for any +/// subsequent chunks in the trie state, if any exist. +#[derive(Debug)] +struct Frame<'a> { + /// The remaining chunks to visit for a trie state. + chunks: StateChunksIter<'a>, + /// The transitions of the current chunk that we're iterating over. Since + /// every trie state has at least one chunk, every frame is initialized + /// with the first chunk's transitions ready to be consumed. + transitions: core::slice::Iter<'a, Transition>, + /// The NFA state IDs pointing to the start of each chunk compiled by + /// this trie state. This ultimately gets converted to an NFA union once + /// the entire trie state (and all of its children) have been compiled. + /// The order of these matters for leftmost-first match semantics, since + /// earlier matches in the union are preferred over later ones. + union: Vec, + /// The actual NFA transitions for a single chunk in a trie state. This + /// gets converted to an NFA sparse state, and its corresponding NFA state + /// ID should get added to 'union'. + sparse: Vec, +} + +impl<'a> Frame<'a> { + /// Create a new stack frame for trie traversal. This initializes the + /// 'transitions' iterator to the transitions for the first chunk, with the + /// 'chunks' iterator being every chunk after the first one. + fn new(state: &'a State) -> Frame<'a> { + let mut chunks = state.chunks(); + // every state has at least 1 chunk + let chunk = chunks.next().unwrap(); + let transitions = chunk.iter(); + Frame { chunks, transitions, union: vec![], sparse: vec![] } + } +} + +/// A state in a trie. +/// +/// This uses a sparse representation. Since we don't use literal tries +/// for searching, and ultimately (and compilation requires visiting every +/// transition anyway), we use a sparse representation for transitions. This +/// means we save on memory, at the expense of 'LiteralTrie::add' being perhaps +/// a bit slower. +/// +/// While 'transitions' is pretty standard as far as tries goes, the 'chunks' +/// piece here is more unusual. In effect, 'chunks' defines a partitioning +/// of 'transitions', where each chunk corresponds to a distinct set of +/// transitions. The key invariant is that a transition in one chunk cannot +/// be moved to another chunk. This is the secret sauce that preserve +/// leftmost-first match semantics. +/// +/// A new chunk is added whenever we mark a state as a match state. Once a +/// new chunk is added, the old active chunk is frozen and is never mutated +/// again. The new chunk becomes the active chunk, which is defined as +/// '&transitions[chunks.last().map_or(0, |c| c.1)..]'. Thus, a state where +/// 'chunks' is empty actually contains one chunk. Thus, every state contains +/// at least one (possibly empty) chunk. +/// +/// A "leaf" state is a state that has no outgoing transitions (so +/// 'transitions' is empty). Note that there is no way for a leaf state to be a +/// non-matching state. (Although while building the trie, within 'add', a leaf +/// state may exist while not containing any matches. But this invariant is +/// only broken within 'add'. Once 'add' returns, the invariant is upheld.) +#[derive(Clone, Default)] +struct State { + transitions: Vec, + chunks: Vec<(usize, usize)>, +} + +impl State { + /// Mark this state as a match state and freeze the active chunk such that + /// it can not be further mutated. + fn add_match(&mut self) { + // This is not strictly necessary, but there's no point in recording + // another match by adding another chunk if the state has no + // transitions. Note though that we only skip this if we already know + // this is a match state, which is only true if 'chunks' is not empty. + // Basically, if we didn't do this, nothing semantically would change, + // but we'd end up pushing another chunk and potentially triggering an + // alloc. + if self.transitions.is_empty() && !self.chunks.is_empty() { + return; + } + let chunk_start = self.active_chunk_start(); + let chunk_end = self.transitions.len(); + self.chunks.push((chunk_start, chunk_end)); + } + + /// Returns true if and only if this state is a leaf state. That is, a + /// state that has no outgoing transitions. + fn is_leaf(&self) -> bool { + self.transitions.is_empty() + } + + /// Returns an iterator over all of the chunks (including the currently + /// active chunk) in this state. Since the active chunk is included, the + /// iterator is guaranteed to always yield at least one chunk (although the + /// chunk may be empty). + fn chunks(&self) -> StateChunksIter<'_> { + StateChunksIter { + transitions: &*self.transitions, + chunks: self.chunks.iter(), + active: Some(self.active_chunk()), + } + } + + /// Returns the active chunk as a slice of transitions. + fn active_chunk(&self) -> &[Transition] { + let start = self.active_chunk_start(); + &self.transitions[start..] + } + + /// Returns the index into 'transitions' where the active chunk starts. + fn active_chunk_start(&self) -> usize { + self.chunks.last().map_or(0, |&(_, end)| end) + } +} + +impl core::fmt::Debug for State { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let mut spacing = " "; + for (i, chunk) in self.chunks().enumerate() { + if i > 0 { + write!(f, "{spacing}MATCH")?; + } + spacing = ""; + for (j, t) in chunk.iter().enumerate() { + spacing = " "; + if j == 0 && i > 0 { + write!(f, " ")?; + } else if j > 0 { + write!(f, ", ")?; + } + write!(f, "{t:?}")?; + } + } + Ok(()) + } +} + +/// An iterator over all of the chunks in a state, including the active chunk. +/// +/// This iterator is created by `State::chunks`. We name this iterator so that +/// we can include it in the `Frame` type for non-recursive trie traversal. +#[derive(Debug)] +struct StateChunksIter<'a> { + transitions: &'a [Transition], + chunks: core::slice::Iter<'a, (usize, usize)>, + active: Option<&'a [Transition]>, +} + +impl<'a> Iterator for StateChunksIter<'a> { + type Item = &'a [Transition]; + + fn next(&mut self) -> Option<&'a [Transition]> { + if let Some(&(start, end)) = self.chunks.next() { + return Some(&self.transitions[start..end]); + } + if let Some(chunk) = self.active.take() { + return Some(chunk); + } + None + } +} + +/// A single transition in a trie to another state. +#[derive(Clone, Copy)] +struct Transition { + byte: u8, + next: StateID, +} + +impl core::fmt::Debug for Transition { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!( + f, + "{:?} => {}", + crate::util::escape::DebugByte(self.byte), + self.next.as_usize() + ) + } +} + +#[cfg(test)] +mod tests { + use bstr::B; + use regex_syntax::hir::Hir; + + use super::*; + + #[test] + fn zap() { + let mut trie = LiteralTrie::forward(); + trie.add(b"zapper").unwrap(); + trie.add(b"z").unwrap(); + trie.add(b"zap").unwrap(); + + let got = trie.compile_to_hir(); + let expected = Hir::concat(vec![ + Hir::literal(B("z")), + Hir::alternation(vec![ + Hir::literal(B("apper")), + Hir::empty(), + Hir::literal(B("ap")), + ]), + ]); + assert_eq!(expected, got); + } + + #[test] + fn maker() { + let mut trie = LiteralTrie::forward(); + trie.add(b"make").unwrap(); + trie.add(b"maple").unwrap(); + trie.add(b"maker").unwrap(); + + let got = trie.compile_to_hir(); + let expected = Hir::concat(vec![ + Hir::literal(B("ma")), + Hir::alternation(vec![ + Hir::concat(vec![ + Hir::literal(B("ke")), + Hir::alternation(vec![Hir::empty(), Hir::literal(B("r"))]), + ]), + Hir::literal(B("ple")), + ]), + ]); + assert_eq!(expected, got); + } +} diff --git a/vendor/regex-automata/src/nfa/thompson/map.rs b/vendor/regex-automata/src/nfa/thompson/map.rs new file mode 100644 index 00000000000000..7f074a353b93da --- /dev/null +++ b/vendor/regex-automata/src/nfa/thompson/map.rs @@ -0,0 +1,296 @@ +// This module contains a couple simple and purpose built hash maps. The key +// trade off they make is that they serve as caches rather than true maps. That +// is, inserting a new entry may cause eviction of another entry. This gives +// us two things. First, there's less overhead associated with inserts and +// lookups. Secondly, it lets us control our memory usage. +// +// These maps are used in some fairly hot code when generating NFA states for +// large Unicode character classes. +// +// Instead of exposing a rich hashmap entry API, we just permit the caller to +// produce a hash of the key directly. The hash can then be reused for both +// lookups and insertions at the cost of leaking abstraction a bit. But these +// are for internal use only, so it's fine. +// +// The Utf8BoundedMap is used for Daciuk's algorithm for constructing a +// (almost) minimal DFA for large Unicode character classes in linear time. +// (Daciuk's algorithm is always used when compiling forward NFAs. For reverse +// NFAs, it's only used when the compiler is configured to 'shrink' the NFA, +// since there's a bit more expense in the reverse direction.) +// +// The Utf8SuffixMap is used when compiling large Unicode character classes for +// reverse NFAs when 'shrink' is disabled. Specifically, it augments the naive +// construction of UTF-8 automata by caching common suffixes. This doesn't +// get the same space savings as Daciuk's algorithm, but it's basically as +// fast as the naive approach and typically winds up using less memory (since +// it generates smaller NFAs) despite the presence of the cache. +// +// These maps effectively represent caching mechanisms for sparse and +// byte-range NFA states, respectively. The former represents a single NFA +// state with many transitions of equivalent priority while the latter +// represents a single NFA state with a single transition. (Neither state ever +// has or is an epsilon transition.) Thus, they have different key types. It's +// likely we could make one generic map, but the machinery didn't seem worth +// it. They are simple enough. + +use alloc::{vec, vec::Vec}; + +use crate::{ + nfa::thompson::Transition, + util::{ + int::{Usize, U64}, + primitives::StateID, + }, +}; + +// Basic FNV-1a hash constants as described in: +// https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function +const PRIME: u64 = 1099511628211; +const INIT: u64 = 14695981039346656037; + +/// A bounded hash map where the key is a sequence of NFA transitions and the +/// value is a pre-existing NFA state ID. +/// +/// std's hashmap can be used for this, however, this map has two important +/// advantages. Firstly, it has lower overhead. Secondly, it permits us to +/// control our memory usage by limited the number of slots. In general, the +/// cost here is that this map acts as a cache. That is, inserting a new entry +/// may remove an old entry. We are okay with this, since it does not impact +/// correctness in the cases where it is used. The only effect that dropping +/// states from the cache has is that the resulting NFA generated may be bigger +/// than it otherwise would be. +/// +/// This improves benchmarks that compile large Unicode character classes, +/// since it makes the generation of (almost) minimal UTF-8 automaton faster. +/// Specifically, one could observe the difference with std's hashmap via +/// something like the following benchmark: +/// +/// hyperfine "regex-cli debug thompson -qr --captures none '\w{90} ecurB'" +/// +/// But to observe that difference, you'd have to modify the code to use +/// std's hashmap. +/// +/// It is quite possible that there is a better way to approach this problem. +/// For example, if there happens to be a very common state that collides with +/// a lot of less frequent states, then we could wind up with very poor caching +/// behavior. Alas, the effectiveness of this cache has not been measured. +/// Instead, ad hoc experiments suggest that it is "good enough." Additional +/// smarts (such as an LRU eviction policy) have to be weighed against the +/// amount of extra time they cost. +#[derive(Clone, Debug)] +pub struct Utf8BoundedMap { + /// The current version of this map. Only entries with matching versions + /// are considered during lookups. If an entry is found with a mismatched + /// version, then the map behaves as if the entry does not exist. + /// + /// This makes it possible to clear the map by simply incrementing the + /// version number instead of actually deallocating any storage. + version: u16, + /// The total number of entries this map can store. + capacity: usize, + /// The actual entries, keyed by hash. Collisions between different states + /// result in the old state being dropped. + map: Vec, +} + +/// An entry in this map. +#[derive(Clone, Debug, Default)] +struct Utf8BoundedEntry { + /// The version of the map used to produce this entry. If this entry's + /// version does not match the current version of the map, then the map + /// should behave as if this entry does not exist. + version: u16, + /// The key, which is a sorted sequence of non-overlapping NFA transitions. + key: Vec, + /// The state ID corresponding to the state containing the transitions in + /// this entry. + val: StateID, +} + +impl Utf8BoundedMap { + /// Create a new bounded map with the given capacity. The map will never + /// grow beyond the given size. + /// + /// Note that this does not allocate. Instead, callers must call `clear` + /// before using this map. `clear` will allocate space if necessary. + /// + /// This avoids the need to pay for the allocation of this map when + /// compiling regexes that lack large Unicode character classes. + pub fn new(capacity: usize) -> Utf8BoundedMap { + assert!(capacity > 0); + Utf8BoundedMap { version: 0, capacity, map: vec![] } + } + + /// Clear this map of all entries, but permit the reuse of allocation + /// if possible. + /// + /// This must be called before the map can be used. + pub fn clear(&mut self) { + if self.map.is_empty() { + self.map = vec![Utf8BoundedEntry::default(); self.capacity]; + } else { + self.version = self.version.wrapping_add(1); + // If we loop back to version 0, then we forcefully clear the + // entire map. Otherwise, it might be possible to incorrectly + // match entries used to generate other NFAs. + if self.version == 0 { + self.map = vec![Utf8BoundedEntry::default(); self.capacity]; + } + } + } + + /// Return a hash of the given transitions. + pub fn hash(&self, key: &[Transition]) -> usize { + let mut h = INIT; + for t in key { + h = (h ^ u64::from(t.start)).wrapping_mul(PRIME); + h = (h ^ u64::from(t.end)).wrapping_mul(PRIME); + h = (h ^ t.next.as_u64()).wrapping_mul(PRIME); + } + (h % self.map.len().as_u64()).as_usize() + } + + /// Retrieve the cached state ID corresponding to the given key. The hash + /// given must have been computed with `hash` using the same key value. + /// + /// If there is no cached state with the given transitions, then None is + /// returned. + pub fn get(&mut self, key: &[Transition], hash: usize) -> Option { + let entry = &self.map[hash]; + if entry.version != self.version { + return None; + } + // There may be a hash collision, so we need to confirm real equality. + if entry.key != key { + return None; + } + Some(entry.val) + } + + /// Add a cached state to this map with the given key. Callers should + /// ensure that `state_id` points to a state that contains precisely the + /// NFA transitions given. + /// + /// `hash` must have been computed using the `hash` method with the same + /// key. + pub fn set( + &mut self, + key: Vec, + hash: usize, + state_id: StateID, + ) { + self.map[hash] = + Utf8BoundedEntry { version: self.version, key, val: state_id }; + } +} + +/// A cache of suffixes used to modestly compress UTF-8 automata for large +/// Unicode character classes. +#[derive(Clone, Debug)] +pub struct Utf8SuffixMap { + /// The current version of this map. Only entries with matching versions + /// are considered during lookups. If an entry is found with a mismatched + /// version, then the map behaves as if the entry does not exist. + version: u16, + /// The total number of entries this map can store. + capacity: usize, + /// The actual entries, keyed by hash. Collisions between different states + /// result in the old state being dropped. + map: Vec, +} + +/// A key that uniquely identifies an NFA state. It is a triple that represents +/// a transition from one state for a particular byte range. +#[derive(Clone, Debug, Default, Eq, PartialEq)] +pub struct Utf8SuffixKey { + pub from: StateID, + pub start: u8, + pub end: u8, +} + +/// An entry in this map. +#[derive(Clone, Debug, Default)] +struct Utf8SuffixEntry { + /// The version of the map used to produce this entry. If this entry's + /// version does not match the current version of the map, then the map + /// should behave as if this entry does not exist. + version: u16, + /// The key, which consists of a transition in a particular state. + key: Utf8SuffixKey, + /// The identifier that the transition in the key maps to. + val: StateID, +} + +impl Utf8SuffixMap { + /// Create a new bounded map with the given capacity. The map will never + /// grow beyond the given size. + /// + /// Note that this does not allocate. Instead, callers must call `clear` + /// before using this map. `clear` will allocate space if necessary. + /// + /// This avoids the need to pay for the allocation of this map when + /// compiling regexes that lack large Unicode character classes. + pub fn new(capacity: usize) -> Utf8SuffixMap { + assert!(capacity > 0); + Utf8SuffixMap { version: 0, capacity, map: vec![] } + } + + /// Clear this map of all entries, but permit the reuse of allocation + /// if possible. + /// + /// This must be called before the map can be used. + pub fn clear(&mut self) { + if self.map.is_empty() { + self.map = vec![Utf8SuffixEntry::default(); self.capacity]; + } else { + self.version = self.version.wrapping_add(1); + if self.version == 0 { + self.map = vec![Utf8SuffixEntry::default(); self.capacity]; + } + } + } + + /// Return a hash of the given transition. + pub fn hash(&self, key: &Utf8SuffixKey) -> usize { + // Basic FNV-1a hash as described: + // https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function + const PRIME: u64 = 1099511628211; + const INIT: u64 = 14695981039346656037; + + let mut h = INIT; + h = (h ^ key.from.as_u64()).wrapping_mul(PRIME); + h = (h ^ u64::from(key.start)).wrapping_mul(PRIME); + h = (h ^ u64::from(key.end)).wrapping_mul(PRIME); + (h % self.map.len().as_u64()).as_usize() + } + + /// Retrieve the cached state ID corresponding to the given key. The hash + /// given must have been computed with `hash` using the same key value. + /// + /// If there is no cached state with the given key, then None is returned. + pub fn get( + &mut self, + key: &Utf8SuffixKey, + hash: usize, + ) -> Option { + let entry = &self.map[hash]; + if entry.version != self.version { + return None; + } + if key != &entry.key { + return None; + } + Some(entry.val) + } + + /// Add a cached state to this map with the given key. Callers should + /// ensure that `state_id` points to a state that contains precisely the + /// NFA transition given. + /// + /// `hash` must have been computed using the `hash` method with the same + /// key. + pub fn set(&mut self, key: Utf8SuffixKey, hash: usize, state_id: StateID) { + self.map[hash] = + Utf8SuffixEntry { version: self.version, key, val: state_id }; + } +} diff --git a/vendor/regex-automata/src/nfa/thompson/mod.rs b/vendor/regex-automata/src/nfa/thompson/mod.rs new file mode 100644 index 00000000000000..dc7effef1df36b --- /dev/null +++ b/vendor/regex-automata/src/nfa/thompson/mod.rs @@ -0,0 +1,81 @@ +/*! +Defines a Thompson NFA and provides the [`PikeVM`](pikevm::PikeVM) and +[`BoundedBacktracker`](backtrack::BoundedBacktracker) regex engines. + +A Thompson NFA (non-deterministic finite automaton) is arguably _the_ central +data type in this library. It is the result of what is commonly referred to as +"regex compilation." That is, turning a regex pattern from its concrete syntax +string into something that can run a search looks roughly like this: + +* A `&str` is parsed into a [`regex-syntax::ast::Ast`](regex_syntax::ast::Ast). +* An `Ast` is translated into a [`regex-syntax::hir::Hir`](regex_syntax::hir::Hir). +* An `Hir` is compiled into a [`NFA`]. +* The `NFA` is then used to build one of a few different regex engines: + * An `NFA` is used directly in the `PikeVM` and `BoundedBacktracker` engines. + * An `NFA` is used by a [hybrid NFA/DFA](crate::hybrid) to build out a DFA's + transition table at search time. + * An `NFA`, assuming it is one-pass, is used to build a full + [one-pass DFA](crate::dfa::onepass) ahead of time. + * An `NFA` is used to build a [full DFA](crate::dfa) ahead of time. + +The [`meta`](crate::meta) regex engine makes all of these choices for you based +on various criteria. However, if you have a lower level use case, _you_ can +build any of the above regex engines and use them directly. But you must start +here by building an `NFA`. + +# Details + +It is perhaps worth expanding a bit more on what it means to go through the +`&str`->`Ast`->`Hir`->`NFA` process. + +* Parsing a string into an `Ast` gives it a structured representation. +Crucially, the size and amount of work done in this step is proportional to the +size of the original string. No optimization or Unicode handling is done at +this point. This means that parsing into an `Ast` has very predictable costs. +Moreover, an `Ast` can be round-tripped back to its original pattern string as +written. +* Translating an `Ast` into an `Hir` is a process by which the structured +representation is simplified down to its most fundamental components. +Translation deals with flags such as case insensitivity by converting things +like `(?i:a)` to `[Aa]`. Translation is also where Unicode tables are consulted +to resolve things like `\p{Emoji}` and `\p{Greek}`. It also flattens each +character class, regardless of how deeply nested it is, into a single sequence +of non-overlapping ranges. All the various literal forms are thrown out in +favor of one common representation. Overall, the `Hir` is small enough to fit +into your head and makes analysis and other tasks much simpler. +* Compiling an `Hir` into an `NFA` formulates the regex into a finite state +machine whose transitions are defined over bytes. For example, an `Hir` might +have a Unicode character class corresponding to a sequence of ranges defined +in terms of `char`. Compilation is then responsible for turning those ranges +into a UTF-8 automaton. That is, an automaton that matches the UTF-8 encoding +of just the codepoints specified by those ranges. Otherwise, the main job of +an `NFA` is to serve as a byte-code of sorts for a virtual machine. It can be +seen as a sequence of instructions for how to match a regex. +*/ + +#[cfg(feature = "nfa-backtrack")] +pub mod backtrack; +mod builder; +#[cfg(feature = "syntax")] +mod compiler; +mod error; +#[cfg(feature = "syntax")] +mod literal_trie; +#[cfg(feature = "syntax")] +mod map; +mod nfa; +#[cfg(feature = "nfa-pikevm")] +pub mod pikevm; +#[cfg(feature = "syntax")] +mod range_trie; + +pub use self::{ + builder::Builder, + error::BuildError, + nfa::{ + DenseTransitions, PatternIter, SparseTransitions, State, Transition, + NFA, + }, +}; +#[cfg(feature = "syntax")] +pub use compiler::{Compiler, Config, WhichCaptures}; diff --git a/vendor/regex-automata/src/nfa/thompson/nfa.rs b/vendor/regex-automata/src/nfa/thompson/nfa.rs new file mode 100644 index 00000000000000..405aa7533d4936 --- /dev/null +++ b/vendor/regex-automata/src/nfa/thompson/nfa.rs @@ -0,0 +1,2098 @@ +use core::{fmt, mem}; + +use alloc::{boxed::Box, format, string::String, sync::Arc, vec, vec::Vec}; + +#[cfg(feature = "syntax")] +use crate::nfa::thompson::{ + compiler::{Compiler, Config}, + error::BuildError, +}; +use crate::{ + nfa::thompson::builder::Builder, + util::{ + alphabet::{self, ByteClassSet, ByteClasses}, + captures::{GroupInfo, GroupInfoError}, + look::{Look, LookMatcher, LookSet}, + primitives::{ + IteratorIndexExt, PatternID, PatternIDIter, SmallIndex, StateID, + }, + sparse_set::SparseSet, + }, +}; + +/// A byte oriented Thompson non-deterministic finite automaton (NFA). +/// +/// A Thompson NFA is a finite state machine that permits unconditional epsilon +/// transitions, but guarantees that there exists at most one non-epsilon +/// transition for each element in the alphabet for each state. +/// +/// An NFA may be used directly for searching, for analysis or to build +/// a deterministic finite automaton (DFA). +/// +/// # Cheap clones +/// +/// Since an NFA is a core data type in this crate that many other regex +/// engines are based on top of, it is convenient to give ownership of an NFA +/// to said regex engines. Because of this, an NFA uses reference counting +/// internally. Therefore, it is cheap to clone and it is encouraged to do so. +/// +/// # Capabilities +/// +/// Using an NFA for searching via the +/// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) provides the most amount +/// of "power" of any regex engine in this crate. Namely, it supports the +/// following in all cases: +/// +/// 1. Detection of a match. +/// 2. Location of a match, including both the start and end offset, in a +/// single pass of the haystack. +/// 3. Location of matching capturing groups. +/// 4. Handles multiple patterns, including (1)-(3) when multiple patterns are +/// present. +/// +/// # Capturing Groups +/// +/// Groups refer to parenthesized expressions inside a regex pattern. They look +/// like this, where `exp` is an arbitrary regex: +/// +/// * `(exp)` - An unnamed capturing group. +/// * `(?Pexp)` or `(?exp)` - A named capturing group. +/// * `(?:exp)` - A non-capturing group. +/// * `(?i:exp)` - A non-capturing group that sets flags. +/// +/// Only the first two forms are said to be _capturing_. Capturing +/// means that the last position at which they match is reportable. The +/// [`Captures`](crate::util::captures::Captures) type provides convenient +/// access to the match positions of capturing groups, which includes looking +/// up capturing groups by their name. +/// +/// # Byte oriented +/// +/// This NFA is byte oriented, which means that all of its transitions are +/// defined on bytes. In other words, the alphabet of an NFA consists of the +/// 256 different byte values. +/// +/// While DFAs nearly demand that they be byte oriented for performance +/// reasons, an NFA could conceivably be *Unicode codepoint* oriented. Indeed, +/// a previous version of this NFA supported both byte and codepoint oriented +/// modes. A codepoint oriented mode can work because an NFA fundamentally uses +/// a sparse representation of transitions, which works well with the large +/// sparse space of Unicode codepoints. +/// +/// Nevertheless, this NFA is only byte oriented. This choice is primarily +/// driven by implementation simplicity, and also in part memory usage. In +/// practice, performance between the two is roughly comparable. However, +/// building a DFA (including a hybrid DFA) really wants a byte oriented NFA. +/// So if we do have a codepoint oriented NFA, then we also need to generate +/// byte oriented NFA in order to build an hybrid NFA/DFA. Thus, by only +/// generating byte oriented NFAs, we can produce one less NFA. In other words, +/// if we made our NFA codepoint oriented, we'd need to *also* make it support +/// a byte oriented mode, which is more complicated. But a byte oriented mode +/// can support everything. +/// +/// # Differences with DFAs +/// +/// At the theoretical level, the precise difference between an NFA and a DFA +/// is that, in a DFA, for every state, an input symbol unambiguously refers +/// to a single transition _and_ that an input symbol is required for each +/// transition. At a practical level, this permits DFA implementations to be +/// implemented at their core with a small constant number of CPU instructions +/// for each byte of input searched. In practice, this makes them quite a bit +/// faster than NFAs _in general_. Namely, in order to execute a search for any +/// Thompson NFA, one needs to keep track of a _set_ of states, and execute +/// the possible transitions on all of those states for each input symbol. +/// Overall, this results in much more overhead. To a first approximation, one +/// can expect DFA searches to be about an order of magnitude faster. +/// +/// So why use an NFA at all? The main advantage of an NFA is that it takes +/// linear time (in the size of the pattern string after repetitions have been +/// expanded) to build and linear memory usage. A DFA, on the other hand, may +/// take exponential time and/or space to build. Even in non-pathological +/// cases, DFAs often take quite a bit more memory than their NFA counterparts, +/// _especially_ if large Unicode character classes are involved. Of course, +/// an NFA also provides additional capabilities. For example, it can match +/// Unicode word boundaries on non-ASCII text and resolve the positions of +/// capturing groups. +/// +/// Note that a [`hybrid::regex::Regex`](crate::hybrid::regex::Regex) strikes a +/// good balance between an NFA and a DFA. It avoids the exponential build time +/// of a DFA while maintaining its fast search time. The downside of a hybrid +/// NFA/DFA is that in some cases it can be slower at search time than the NFA. +/// (It also has less functionality than a pure NFA. It cannot handle Unicode +/// word boundaries on non-ASCII text and cannot resolve capturing groups.) +/// +/// # Example +/// +/// This shows how to build an NFA with the default configuration and execute a +/// search using the Pike VM. +/// +/// ``` +/// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; +/// +/// let re = PikeVM::new(r"foo[0-9]+")?; +/// let mut cache = re.create_cache(); +/// let mut caps = re.create_captures(); +/// +/// let expected = Some(Match::must(0, 0..8)); +/// re.captures(&mut cache, b"foo12345", &mut caps); +/// assert_eq!(expected, caps.get_match()); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// # Example: resolving capturing groups +/// +/// This example shows how to parse some simple dates and extract the +/// components of each date via capturing groups. +/// +/// ``` +/// # if cfg!(miri) { return Ok(()); } // miri takes too long +/// use regex_automata::{ +/// nfa::thompson::pikevm::PikeVM, +/// util::captures::Captures, +/// }; +/// +/// let vm = PikeVM::new(r"(?P\d{4})-(?P\d{2})-(?P\d{2})")?; +/// let mut cache = vm.create_cache(); +/// +/// let haystack = "2012-03-14, 2013-01-01 and 2014-07-05"; +/// let all: Vec = vm.captures_iter( +/// &mut cache, haystack.as_bytes() +/// ).collect(); +/// // There should be a total of 3 matches. +/// assert_eq!(3, all.len()); +/// // The year from the second match is '2013'. +/// let span = all[1].get_group_by_name("y").unwrap(); +/// assert_eq!("2013", &haystack[span]); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// This example shows that only the last match of a capturing group is +/// reported, even if it had to match multiple times for an overall match +/// to occur. +/// +/// ``` +/// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; +/// +/// let re = PikeVM::new(r"([a-z]){4}")?; +/// let mut cache = re.create_cache(); +/// let mut caps = re.create_captures(); +/// +/// let haystack = b"quux"; +/// re.captures(&mut cache, haystack, &mut caps); +/// assert!(caps.is_match()); +/// assert_eq!(Some(Span::from(3..4)), caps.get_group(1)); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone)] +pub struct NFA( + // We make NFAs reference counted primarily for two reasons. First is that + // the NFA type itself is quite large (at least 0.5KB), and so it makes + // sense to put it on the heap by default anyway. Second is that, for Arc + // specifically, this enables cheap clones. This tends to be useful because + // several structures (the backtracker, the Pike VM, the hybrid NFA/DFA) + // all want to hang on to an NFA for use during search time. We could + // provide the NFA at search time via a function argument, but this makes + // for an unnecessarily annoying API. Instead, we just let each structure + // share ownership of the NFA. Using a deep clone would not be smart, since + // the NFA can use quite a bit of heap space. + Arc, +); + +impl NFA { + /// Parse the given regular expression using a default configuration and + /// build an NFA from it. + /// + /// If you want a non-default configuration, then use the NFA + /// [`Compiler`] with a [`Config`]. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; + /// + /// let re = PikeVM::new(r"foo[0-9]+")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// let expected = Some(Match::must(0, 0..8)); + /// re.captures(&mut cache, b"foo12345", &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn new(pattern: &str) -> Result { + NFA::compiler().build(pattern) + } + + /// Parse the given regular expressions using a default configuration and + /// build a multi-NFA from them. + /// + /// If you want a non-default configuration, then use the NFA + /// [`Compiler`] with a [`Config`]. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; + /// + /// let re = PikeVM::new_many(&["[0-9]+", "[a-z]+"])?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// let expected = Some(Match::must(1, 0..3)); + /// re.captures(&mut cache, b"foo12345bar", &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn new_many>(patterns: &[P]) -> Result { + NFA::compiler().build_many(patterns) + } + + /// Returns an NFA with a single regex pattern that always matches at every + /// position. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; + /// + /// let re = PikeVM::new_from_nfa(NFA::always_match())?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// let expected = Some(Match::must(0, 0..0)); + /// re.captures(&mut cache, b"", &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// re.captures(&mut cache, b"foo", &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn always_match() -> NFA { + // We could use NFA::new("") here and we'd get the same semantics, but + // hand-assembling the NFA (as below) does the same thing with a fewer + // number of states. It also avoids needing the 'syntax' feature + // enabled. + // + // Technically all we need is the "match" state, but we add the + // "capture" states so that the PikeVM can use this NFA. + // + // The unwraps below are OK because we add so few states that they will + // never exhaust any default limits in any environment. + let mut builder = Builder::new(); + let pid = builder.start_pattern().unwrap(); + assert_eq!(pid.as_usize(), 0); + let start_id = + builder.add_capture_start(StateID::ZERO, 0, None).unwrap(); + let end_id = builder.add_capture_end(StateID::ZERO, 0).unwrap(); + let match_id = builder.add_match().unwrap(); + builder.patch(start_id, end_id).unwrap(); + builder.patch(end_id, match_id).unwrap(); + let pid = builder.finish_pattern(start_id).unwrap(); + assert_eq!(pid.as_usize(), 0); + builder.build(start_id, start_id).unwrap() + } + + /// Returns an NFA that never matches at any position. + /// + /// This is a convenience routine for creating an NFA with zero patterns. + /// + /// # Example + /// + /// ``` + /// use regex_automata::nfa::thompson::{NFA, pikevm::PikeVM}; + /// + /// let re = PikeVM::new_from_nfa(NFA::never_match())?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// re.captures(&mut cache, b"", &mut caps); + /// assert!(!caps.is_match()); + /// re.captures(&mut cache, b"foo", &mut caps); + /// assert!(!caps.is_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn never_match() -> NFA { + // This always succeeds because it only requires one NFA state, which + // will never exhaust any (default) limits. + let mut builder = Builder::new(); + let sid = builder.add_fail().unwrap(); + builder.build(sid, sid).unwrap() + } + + /// Return a default configuration for an `NFA`. + /// + /// This is a convenience routine to avoid needing to import the `Config` + /// type when customizing the construction of an NFA. + /// + /// # Example + /// + /// This example shows how to build an NFA with a small size limit that + /// results in a compilation error for any regex that tries to use more + /// heap memory than the configured limit. + /// + /// ``` + /// use regex_automata::nfa::thompson::{NFA, pikevm::PikeVM}; + /// + /// let result = PikeVM::builder() + /// .thompson(NFA::config().nfa_size_limit(Some(1_000))) + /// // Remember, \w is Unicode-aware by default and thus huge. + /// .build(r"\w+"); + /// assert!(result.is_err()); + /// ``` + #[cfg(feature = "syntax")] + pub fn config() -> Config { + Config::new() + } + + /// Return a compiler for configuring the construction of an `NFA`. + /// + /// This is a convenience routine to avoid needing to import the + /// [`Compiler`] type in common cases. + /// + /// # Example + /// + /// This example shows how to build an NFA that is permitted match invalid + /// UTF-8. Without the additional syntax configuration here, compilation of + /// `(?-u:.)` would fail because it is permitted to match invalid UTF-8. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::pikevm::PikeVM, + /// util::syntax, + /// Match, + /// }; + /// + /// let re = PikeVM::builder() + /// .syntax(syntax::Config::new().utf8(false)) + /// .build(r"[a-z]+(?-u:.)")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// let expected = Some(Match::must(0, 1..5)); + /// re.captures(&mut cache, b"\xFFabc\xFF", &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn compiler() -> Compiler { + Compiler::new() + } + + /// Returns an iterator over all pattern identifiers in this NFA. + /// + /// Pattern IDs are allocated in sequential order starting from zero, + /// where the order corresponds to the order of patterns provided to the + /// [`NFA::new_many`] constructor. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{nfa::thompson::NFA, PatternID}; + /// + /// let nfa = NFA::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; + /// let pids: Vec = nfa.patterns().collect(); + /// assert_eq!(pids, vec![ + /// PatternID::must(0), + /// PatternID::must(1), + /// PatternID::must(2), + /// ]); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn patterns(&self) -> PatternIter<'_> { + PatternIter { + it: PatternID::iter(self.pattern_len()), + _marker: core::marker::PhantomData, + } + } + + /// Returns the total number of regex patterns in this NFA. + /// + /// This may return zero if the NFA was constructed with no patterns. In + /// this case, the NFA can never produce a match for any input. + /// + /// This is guaranteed to be no bigger than [`PatternID::LIMIT`] because + /// NFA construction will fail if too many patterns are added. + /// + /// It is always true that `nfa.patterns().count() == nfa.pattern_len()`. + /// + /// # Example + /// + /// ``` + /// use regex_automata::nfa::thompson::NFA; + /// + /// let nfa = NFA::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; + /// assert_eq!(3, nfa.pattern_len()); + /// + /// let nfa = NFA::never_match(); + /// assert_eq!(0, nfa.pattern_len()); + /// + /// let nfa = NFA::always_match(); + /// assert_eq!(1, nfa.pattern_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn pattern_len(&self) -> usize { + self.0.start_pattern.len() + } + + /// Return the state identifier of the initial anchored state of this NFA. + /// + /// The returned identifier is guaranteed to be a valid index into the + /// slice returned by [`NFA::states`], and is also a valid argument to + /// [`NFA::state`]. + /// + /// # Example + /// + /// This example shows a somewhat contrived example where we can easily + /// predict the anchored starting state. + /// + /// ``` + /// use regex_automata::nfa::thompson::{NFA, State, WhichCaptures}; + /// + /// let nfa = NFA::compiler() + /// .configure(NFA::config().which_captures(WhichCaptures::None)) + /// .build("a")?; + /// let state = nfa.state(nfa.start_anchored()); + /// match *state { + /// State::ByteRange { trans } => { + /// assert_eq!(b'a', trans.start); + /// assert_eq!(b'a', trans.end); + /// } + /// _ => unreachable!("unexpected state"), + /// } + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn start_anchored(&self) -> StateID { + self.0.start_anchored + } + + /// Return the state identifier of the initial unanchored state of this + /// NFA. + /// + /// This is equivalent to the identifier returned by + /// [`NFA::start_anchored`] when the NFA has no unanchored starting state. + /// + /// The returned identifier is guaranteed to be a valid index into the + /// slice returned by [`NFA::states`], and is also a valid argument to + /// [`NFA::state`]. + /// + /// # Example + /// + /// This example shows that the anchored and unanchored starting states + /// are equivalent when an anchored NFA is built. + /// + /// ``` + /// use regex_automata::nfa::thompson::NFA; + /// + /// let nfa = NFA::new("^a")?; + /// assert_eq!(nfa.start_anchored(), nfa.start_unanchored()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn start_unanchored(&self) -> StateID { + self.0.start_unanchored + } + + /// Return the state identifier of the initial anchored state for the given + /// pattern, or `None` if there is no pattern corresponding to the given + /// identifier. + /// + /// If one uses the starting state for a particular pattern, then the only + /// match that can be returned is for the corresponding pattern. + /// + /// The returned identifier is guaranteed to be a valid index into the + /// slice returned by [`NFA::states`], and is also a valid argument to + /// [`NFA::state`]. + /// + /// # Errors + /// + /// If the pattern doesn't exist in this NFA, then this returns an error. + /// This occurs when `pid.as_usize() >= nfa.pattern_len()`. + /// + /// # Example + /// + /// This example shows that the anchored and unanchored starting states + /// are equivalent when an anchored NFA is built. + /// + /// ``` + /// use regex_automata::{nfa::thompson::NFA, PatternID}; + /// + /// let nfa = NFA::new_many(&["^a", "^b"])?; + /// // The anchored and unanchored states for the entire NFA are the same, + /// // since all of the patterns are anchored. + /// assert_eq!(nfa.start_anchored(), nfa.start_unanchored()); + /// // But the anchored starting states for each pattern are distinct, + /// // because these starting states can only lead to matches for the + /// // corresponding pattern. + /// let anchored = Some(nfa.start_anchored()); + /// assert_ne!(anchored, nfa.start_pattern(PatternID::must(0))); + /// assert_ne!(anchored, nfa.start_pattern(PatternID::must(1))); + /// // Requesting a pattern not in the NFA will result in None: + /// assert_eq!(None, nfa.start_pattern(PatternID::must(2))); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn start_pattern(&self, pid: PatternID) -> Option { + self.0.start_pattern.get(pid.as_usize()).copied() + } + + /// Get the byte class set for this NFA. + /// + /// A byte class set is a partitioning of this NFA's alphabet into + /// equivalence classes. Any two bytes in the same equivalence class are + /// guaranteed to never discriminate between a match or a non-match. (The + /// partitioning may not be minimal.) + /// + /// Byte classes are used internally by this crate when building DFAs. + /// Namely, among other optimizations, they enable a space optimization + /// where the DFA's internal alphabet is defined over the equivalence + /// classes of bytes instead of all possible byte values. The former is + /// often quite a bit smaller than the latter, which permits the DFA to use + /// less space for its transition table. + #[inline] + pub(crate) fn byte_class_set(&self) -> &ByteClassSet { + &self.0.byte_class_set + } + + /// Get the byte classes for this NFA. + /// + /// Byte classes represent a partitioning of this NFA's alphabet into + /// equivalence classes. Any two bytes in the same equivalence class are + /// guaranteed to never discriminate between a match or a non-match. (The + /// partitioning may not be minimal.) + /// + /// Byte classes are used internally by this crate when building DFAs. + /// Namely, among other optimizations, they enable a space optimization + /// where the DFA's internal alphabet is defined over the equivalence + /// classes of bytes instead of all possible byte values. The former is + /// often quite a bit smaller than the latter, which permits the DFA to use + /// less space for its transition table. + /// + /// # Example + /// + /// This example shows how to query the class of various bytes. + /// + /// ``` + /// use regex_automata::nfa::thompson::NFA; + /// + /// let nfa = NFA::new("[a-z]+")?; + /// let classes = nfa.byte_classes(); + /// // 'a' and 'z' are in the same class for this regex. + /// assert_eq!(classes.get(b'a'), classes.get(b'z')); + /// // But 'a' and 'A' are not. + /// assert_ne!(classes.get(b'a'), classes.get(b'A')); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn byte_classes(&self) -> &ByteClasses { + &self.0.byte_classes + } + + /// Return a reference to the NFA state corresponding to the given ID. + /// + /// This is a convenience routine for `nfa.states()[id]`. + /// + /// # Panics + /// + /// This panics when the given identifier does not reference a valid state. + /// That is, when `id.as_usize() >= nfa.states().len()`. + /// + /// # Example + /// + /// The anchored state for a pattern will typically correspond to a + /// capturing state for that pattern. (Although, this is not an API + /// guarantee!) + /// + /// ``` + /// use regex_automata::{nfa::thompson::{NFA, State}, PatternID}; + /// + /// let nfa = NFA::new("a")?; + /// let state = nfa.state(nfa.start_pattern(PatternID::ZERO).unwrap()); + /// match *state { + /// State::Capture { slot, .. } => { + /// assert_eq!(0, slot.as_usize()); + /// } + /// _ => unreachable!("unexpected state"), + /// } + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn state(&self, id: StateID) -> &State { + &self.states()[id] + } + + /// Returns a slice of all states in this NFA. + /// + /// The slice returned is indexed by `StateID`. This provides a convenient + /// way to access states while following transitions among those states. + /// + /// # Example + /// + /// This demonstrates that disabling UTF-8 mode can shrink the size of the + /// NFA considerably in some cases, especially when using Unicode character + /// classes. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::nfa::thompson::NFA; + /// + /// let nfa_unicode = NFA::new(r"\w")?; + /// let nfa_ascii = NFA::new(r"(?-u)\w")?; + /// // Yes, a factor of 45 difference. No lie. + /// assert!(40 * nfa_ascii.states().len() < nfa_unicode.states().len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn states(&self) -> &[State] { + &self.0.states + } + + /// Returns the capturing group info for this NFA. + /// + /// The [`GroupInfo`] provides a way to map to and from capture index + /// and capture name for each pattern. It also provides a mapping from + /// each of the capturing groups in every pattern to their corresponding + /// slot offsets encoded in [`State::Capture`] states. + /// + /// Note that `GroupInfo` uses reference counting internally, such that + /// cloning a `GroupInfo` is very cheap. + /// + /// # Example + /// + /// This example shows how to get a list of all capture group names for + /// a particular pattern. + /// + /// ``` + /// use regex_automata::{nfa::thompson::NFA, PatternID}; + /// + /// let nfa = NFA::new(r"(a)(?Pb)(c)(d)(?Pe)")?; + /// // The first is the implicit group that is always unnamed. The next + /// // 5 groups are the explicit groups found in the concrete syntax above. + /// let expected = vec![None, None, Some("foo"), None, None, Some("bar")]; + /// let got: Vec> = + /// nfa.group_info().pattern_names(PatternID::ZERO).collect(); + /// assert_eq!(expected, got); + /// + /// // Using an invalid pattern ID will result in nothing yielded. + /// let got = nfa.group_info().pattern_names(PatternID::must(999)).count(); + /// assert_eq!(0, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn group_info(&self) -> &GroupInfo { + &self.0.group_info() + } + + /// Returns true if and only if this NFA has at least one + /// [`Capture`](State::Capture) in its sequence of states. + /// + /// This is useful as a way to perform a quick test before attempting + /// something that does or does not require capture states. For example, + /// some regex engines (like the PikeVM) require capture states in order to + /// work at all. + /// + /// # Example + /// + /// This example shows a few different NFAs and whether they have captures + /// or not. + /// + /// ``` + /// use regex_automata::nfa::thompson::{NFA, WhichCaptures}; + /// + /// // Obviously has capture states. + /// let nfa = NFA::new("(a)")?; + /// assert!(nfa.has_capture()); + /// + /// // Less obviously has capture states, because every pattern has at + /// // least one anonymous capture group corresponding to the match for the + /// // entire pattern. + /// let nfa = NFA::new("a")?; + /// assert!(nfa.has_capture()); + /// + /// // Other than hand building your own NFA, this is the only way to build + /// // an NFA without capturing groups. In general, you should only do this + /// // if you don't intend to use any of the NFA-oriented regex engines. + /// // Overall, capturing groups don't have many downsides. Although they + /// // can add a bit of noise to simple NFAs, so it can be nice to disable + /// // them for debugging purposes. + /// // + /// // Notice that 'has_capture' is false here even when we have an + /// // explicit capture group in the pattern. + /// let nfa = NFA::compiler() + /// .configure(NFA::config().which_captures(WhichCaptures::None)) + /// .build("(a)")?; + /// assert!(!nfa.has_capture()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn has_capture(&self) -> bool { + self.0.has_capture + } + + /// Returns true if and only if this NFA can match the empty string. + /// When it returns false, all possible matches are guaranteed to have a + /// non-zero length. + /// + /// This is useful as cheap way to know whether code needs to handle the + /// case of a zero length match. This is particularly important when UTF-8 + /// modes are enabled, as when UTF-8 mode is enabled, empty matches that + /// split a codepoint must never be reported. This extra handling can + /// sometimes be costly, and since regexes matching an empty string are + /// somewhat rare, it can be beneficial to treat such regexes specially. + /// + /// # Example + /// + /// This example shows a few different NFAs and whether they match the + /// empty string or not. Notice the empty string isn't merely a matter + /// of a string of length literally `0`, but rather, whether a match can + /// occur between specific pairs of bytes. + /// + /// ``` + /// use regex_automata::{nfa::thompson::NFA, util::syntax}; + /// + /// // The empty regex matches the empty string. + /// let nfa = NFA::new("")?; + /// assert!(nfa.has_empty(), "empty matches empty"); + /// // The '+' repetition operator requires at least one match, and so + /// // does not match the empty string. + /// let nfa = NFA::new("a+")?; + /// assert!(!nfa.has_empty(), "+ does not match empty"); + /// // But the '*' repetition operator does. + /// let nfa = NFA::new("a*")?; + /// assert!(nfa.has_empty(), "* does match empty"); + /// // And wrapping '+' in an operator that can match an empty string also + /// // causes it to match the empty string too. + /// let nfa = NFA::new("(a+)*")?; + /// assert!(nfa.has_empty(), "+ inside of * matches empty"); + /// + /// // If a regex is just made of a look-around assertion, even if the + /// // assertion requires some kind of non-empty string around it (such as + /// // \b), then it is still treated as if it matches the empty string. + /// // Namely, if a match occurs of just a look-around assertion, then the + /// // match returned is empty. + /// let nfa = NFA::compiler() + /// .syntax(syntax::Config::new().utf8(false)) + /// .build(r"^$\A\z\b\B(?-u:\b\B)")?; + /// assert!(nfa.has_empty(), "assertions match empty"); + /// // Even when an assertion is wrapped in a '+', it still matches the + /// // empty string. + /// let nfa = NFA::new(r"\b+")?; + /// assert!(nfa.has_empty(), "+ of an assertion matches empty"); + /// + /// // An alternation with even one branch that can match the empty string + /// // is also said to match the empty string overall. + /// let nfa = NFA::new("foo|(bar)?|quux")?; + /// assert!(nfa.has_empty(), "alternations can match empty"); + /// + /// // An NFA that matches nothing does not match the empty string. + /// let nfa = NFA::new("[a&&b]")?; + /// assert!(!nfa.has_empty(), "never matching means not matching empty"); + /// // But if it's wrapped in something that doesn't require a match at + /// // all, then it can match the empty string! + /// let nfa = NFA::new("[a&&b]*")?; + /// assert!(nfa.has_empty(), "* on never-match still matches empty"); + /// // Since a '+' requires a match, using it on something that can never + /// // match will itself produce a regex that can never match anything, + /// // and thus does not match the empty string. + /// let nfa = NFA::new("[a&&b]+")?; + /// assert!(!nfa.has_empty(), "+ on never-match still matches nothing"); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn has_empty(&self) -> bool { + self.0.has_empty + } + + /// Whether UTF-8 mode is enabled for this NFA or not. + /// + /// When UTF-8 mode is enabled, all matches reported by a regex engine + /// derived from this NFA are guaranteed to correspond to spans of valid + /// UTF-8. This includes zero-width matches. For example, the regex engine + /// must guarantee that the empty regex will not match at the positions + /// between code units in the UTF-8 encoding of a single codepoint. + /// + /// See [`Config::utf8`] for more information. + /// + /// This is enabled by default. + /// + /// # Example + /// + /// This example shows how UTF-8 mode can impact the match spans that may + /// be reported in certain cases. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::{self, pikevm::PikeVM}, + /// Match, Input, + /// }; + /// + /// let re = PikeVM::new("")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// // UTF-8 mode is enabled by default. + /// let mut input = Input::new("☃"); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(Some(Match::must(0, 0..0)), caps.get_match()); + /// + /// // Even though an empty regex matches at 1..1, our next match is + /// // 3..3 because 1..1 and 2..2 split the snowman codepoint (which is + /// // three bytes long). + /// input.set_start(1); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(Some(Match::must(0, 3..3)), caps.get_match()); + /// + /// // But if we disable UTF-8, then we'll get matches at 1..1 and 2..2: + /// let re = PikeVM::builder() + /// .thompson(thompson::Config::new().utf8(false)) + /// .build("")?; + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(Some(Match::must(0, 1..1)), caps.get_match()); + /// + /// input.set_start(2); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(Some(Match::must(0, 2..2)), caps.get_match()); + /// + /// input.set_start(3); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(Some(Match::must(0, 3..3)), caps.get_match()); + /// + /// input.set_start(4); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(None, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn is_utf8(&self) -> bool { + self.0.utf8 + } + + /// Returns true when this NFA is meant to be matched in reverse. + /// + /// Generally speaking, when this is true, it means the NFA is supposed to + /// be used in conjunction with moving backwards through the haystack. That + /// is, from a higher memory address to a lower memory address. + /// + /// It is often the case that lower level routines dealing with an NFA + /// don't need to care about whether it is "meant" to be matched in reverse + /// or not. However, there are some specific cases where it matters. For + /// example, the implementation of CRLF-aware `^` and `$` line anchors + /// needs to know whether the search is in the forward or reverse + /// direction. In the forward direction, neither `^` nor `$` should match + /// when a `\r` has been seen previously and a `\n` is next. However, in + /// the reverse direction, neither `^` nor `$` should match when a `\n` + /// has been seen previously and a `\r` is next. This fundamentally changes + /// how the state machine is constructed, and thus needs to be altered + /// based on the direction of the search. + /// + /// This is automatically set when using a [`Compiler`] with a configuration + /// where [`Config::reverse`] is enabled. If you're building your own NFA + /// by hand via a [`Builder`] + #[inline] + pub fn is_reverse(&self) -> bool { + self.0.reverse + } + + /// Returns true if and only if all starting states for this NFA correspond + /// to the beginning of an anchored search. + /// + /// Typically, an NFA will have both an anchored and an unanchored starting + /// state. Namely, because it tends to be useful to have both and the cost + /// of having an unanchored starting state is almost zero (for an NFA). + /// However, if all patterns in the NFA are themselves anchored, then even + /// the unanchored starting state will correspond to an anchored search + /// since the pattern doesn't permit anything else. + /// + /// # Example + /// + /// This example shows a few different scenarios where this method's + /// return value varies. + /// + /// ``` + /// use regex_automata::nfa::thompson::NFA; + /// + /// // The unanchored starting state permits matching this pattern anywhere + /// // in a haystack, instead of just at the beginning. + /// let nfa = NFA::new("a")?; + /// assert!(!nfa.is_always_start_anchored()); + /// + /// // In this case, the pattern is itself anchored, so there is no way + /// // to run an unanchored search. + /// let nfa = NFA::new("^a")?; + /// assert!(nfa.is_always_start_anchored()); + /// + /// // When multiline mode is enabled, '^' can match at the start of a line + /// // in addition to the start of a haystack, so an unanchored search is + /// // actually possible. + /// let nfa = NFA::new("(?m)^a")?; + /// assert!(!nfa.is_always_start_anchored()); + /// + /// // Weird cases also work. A pattern is only considered anchored if all + /// // matches may only occur at the start of a haystack. + /// let nfa = NFA::new("(^a)|a")?; + /// assert!(!nfa.is_always_start_anchored()); + /// + /// // When multiple patterns are present, if they are all anchored, then + /// // the NFA is always anchored too. + /// let nfa = NFA::new_many(&["^a", "^b", "^c"])?; + /// assert!(nfa.is_always_start_anchored()); + /// + /// // But if one pattern is unanchored, then the NFA must permit an + /// // unanchored search. + /// let nfa = NFA::new_many(&["^a", "b", "^c"])?; + /// assert!(!nfa.is_always_start_anchored()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn is_always_start_anchored(&self) -> bool { + self.start_anchored() == self.start_unanchored() + } + + /// Returns the look-around matcher associated with this NFA. + /// + /// A look-around matcher determines how to match look-around assertions. + /// In particular, some assertions are configurable. For example, the + /// `(?m:^)` and `(?m:$)` assertions can have their line terminator changed + /// from the default of `\n` to any other byte. + /// + /// If the NFA was built using a [`Compiler`], then this matcher + /// can be set via the [`Config::look_matcher`] configuration + /// knob. Otherwise, if you've built an NFA by hand, it is set via + /// [`Builder::set_look_matcher`]. + /// + /// # Example + /// + /// This shows how to change the line terminator for multi-line assertions. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::{self, pikevm::PikeVM}, + /// util::look::LookMatcher, + /// Match, Input, + /// }; + /// + /// let mut lookm = LookMatcher::new(); + /// lookm.set_line_terminator(b'\x00'); + /// + /// let re = PikeVM::builder() + /// .thompson(thompson::Config::new().look_matcher(lookm)) + /// .build(r"(?m)^[a-z]+$")?; + /// let mut cache = re.create_cache(); + /// + /// // Multi-line assertions now use NUL as a terminator. + /// assert_eq!( + /// Some(Match::must(0, 1..4)), + /// re.find(&mut cache, b"\x00abc\x00"), + /// ); + /// // ... and \n is no longer recognized as a terminator. + /// assert_eq!( + /// None, + /// re.find(&mut cache, b"\nabc\n"), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn look_matcher(&self) -> &LookMatcher { + &self.0.look_matcher + } + + /// Returns the union of all look-around assertions used throughout this + /// NFA. When the returned set is empty, it implies that the NFA has no + /// look-around assertions and thus zero conditional epsilon transitions. + /// + /// This is useful in some cases enabling optimizations. It is not + /// unusual, for example, for optimizations to be of the form, "for any + /// regex with zero conditional epsilon transitions, do ..." where "..." + /// is some kind of optimization. + /// + /// This isn't only helpful for optimizations either. Sometimes look-around + /// assertions are difficult to support. For example, many of the DFAs in + /// this crate don't support Unicode word boundaries or handle them using + /// heuristics. Handling that correctly typically requires some kind of + /// cheap check of whether the NFA has a Unicode word boundary in the first + /// place. + /// + /// # Example + /// + /// This example shows how this routine varies based on the regex pattern: + /// + /// ``` + /// use regex_automata::{nfa::thompson::NFA, util::look::Look}; + /// + /// // No look-around at all. + /// let nfa = NFA::new("a")?; + /// assert!(nfa.look_set_any().is_empty()); + /// + /// // When multiple patterns are present, since this returns the union, + /// // it will include look-around assertions that only appear in one + /// // pattern. + /// let nfa = NFA::new_many(&["a", "b", "a^b", "c"])?; + /// assert!(nfa.look_set_any().contains(Look::Start)); + /// + /// // Some groups of assertions have various shortcuts. For example: + /// let nfa = NFA::new(r"(?-u:\b)")?; + /// assert!(nfa.look_set_any().contains_word()); + /// assert!(!nfa.look_set_any().contains_word_unicode()); + /// assert!(nfa.look_set_any().contains_word_ascii()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn look_set_any(&self) -> LookSet { + self.0.look_set_any + } + + /// Returns the union of all prefix look-around assertions for every + /// pattern in this NFA. When the returned set is empty, it implies none of + /// the patterns require moving through a conditional epsilon transition + /// before inspecting the first byte in the haystack. + /// + /// This can be useful for determining what kinds of assertions need to be + /// satisfied at the beginning of a search. For example, typically DFAs + /// in this crate will build a distinct starting state for each possible + /// starting configuration that might result in look-around assertions + /// being satisfied differently. However, if the set returned here is + /// empty, then you know that the start state is invariant because there + /// are no conditional epsilon transitions to consider. + /// + /// # Example + /// + /// This example shows how this routine varies based on the regex pattern: + /// + /// ``` + /// use regex_automata::{nfa::thompson::NFA, util::look::Look}; + /// + /// // No look-around at all. + /// let nfa = NFA::new("a")?; + /// assert!(nfa.look_set_prefix_any().is_empty()); + /// + /// // When multiple patterns are present, since this returns the union, + /// // it will include look-around assertions that only appear in one + /// // pattern. But it will only include assertions that are in the prefix + /// // of a pattern. For example, this includes '^' but not '$' even though + /// // '$' does appear. + /// let nfa = NFA::new_many(&["a", "b", "^ab$", "c"])?; + /// assert!(nfa.look_set_prefix_any().contains(Look::Start)); + /// assert!(!nfa.look_set_prefix_any().contains(Look::End)); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn look_set_prefix_any(&self) -> LookSet { + self.0.look_set_prefix_any + } + + // FIXME: The `look_set_prefix_all` computation was not correct, and it + // seemed a little tricky to fix it. Since I wasn't actually using it for + // anything, I just decided to remove it in the run up to the regex 1.9 + // release. If you need this, please file an issue. + /* + /// Returns the intersection of all prefix look-around assertions for every + /// pattern in this NFA. When the returned set is empty, it implies at + /// least one of the patterns does not require moving through a conditional + /// epsilon transition before inspecting the first byte in the haystack. + /// Conversely, when the set contains an assertion, it implies that every + /// pattern in the NFA also contains that assertion in its prefix. + /// + /// This can be useful for determining what kinds of assertions need to be + /// satisfied at the beginning of a search. For example, if you know that + /// [`Look::Start`] is in the prefix intersection set returned here, then + /// you know that all searches, regardless of input configuration, will be + /// anchored. + /// + /// # Example + /// + /// This example shows how this routine varies based on the regex pattern: + /// + /// ``` + /// use regex_automata::{nfa::thompson::NFA, util::look::Look}; + /// + /// // No look-around at all. + /// let nfa = NFA::new("a")?; + /// assert!(nfa.look_set_prefix_all().is_empty()); + /// + /// // When multiple patterns are present, since this returns the + /// // intersection, it will only include assertions present in every + /// // prefix, and only the prefix. + /// let nfa = NFA::new_many(&["^a$", "^b$", "$^ab$", "^c$"])?; + /// assert!(nfa.look_set_prefix_all().contains(Look::Start)); + /// assert!(!nfa.look_set_prefix_all().contains(Look::End)); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn look_set_prefix_all(&self) -> LookSet { + self.0.look_set_prefix_all + } + */ + + /// Returns the memory usage, in bytes, of this NFA. + /// + /// This does **not** include the stack size used up by this NFA. To + /// compute that, use `std::mem::size_of::()`. + /// + /// # Example + /// + /// This example shows that large Unicode character classes can use quite + /// a bit of memory. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::nfa::thompson::NFA; + /// + /// let nfa_unicode = NFA::new(r"\w")?; + /// let nfa_ascii = NFA::new(r"(?-u:\w)")?; + /// + /// assert!(10 * nfa_ascii.memory_usage() < nfa_unicode.memory_usage()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn memory_usage(&self) -> usize { + use core::mem::size_of; + + size_of::() // allocated on the heap via Arc + + self.0.states.len() * size_of::() + + self.0.start_pattern.len() * size_of::() + + self.0.group_info.memory_usage() + + self.0.memory_extra + } +} + +impl fmt::Debug for NFA { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +/// The "inner" part of the NFA. We split this part out so that we can easily +/// wrap it in an `Arc` above in the definition of `NFA`. +/// +/// See builder.rs for the code that actually builds this type. This module +/// does provide (internal) mutable methods for adding things to this +/// NFA before finalizing it, but the high level construction process is +/// controlled by the builder abstraction. (Which is complicated enough to +/// get its own module.) +#[derive(Default)] +pub(super) struct Inner { + /// The state sequence. This sequence is guaranteed to be indexable by all + /// starting state IDs, and it is also guaranteed to contain at most one + /// `Match` state for each pattern compiled into this NFA. (A pattern may + /// not have a corresponding `Match` state if a `Match` state is impossible + /// to reach.) + states: Vec, + /// The anchored starting state of this NFA. + start_anchored: StateID, + /// The unanchored starting state of this NFA. + start_unanchored: StateID, + /// The starting states for each individual pattern. Starting at any + /// of these states will result in only an anchored search for the + /// corresponding pattern. The vec is indexed by pattern ID. When the NFA + /// contains a single regex, then `start_pattern[0]` and `start_anchored` + /// are always equivalent. + start_pattern: Vec, + /// Info about the capturing groups in this NFA. This is responsible for + /// mapping groups to slots, mapping groups to names and names to groups. + group_info: GroupInfo, + /// A representation of equivalence classes over the transitions in this + /// NFA. Two bytes in the same equivalence class must not discriminate + /// between a match or a non-match. This map can be used to shrink the + /// total size of a DFA's transition table with a small match-time cost. + /// + /// Note that the NFA's transitions are *not* defined in terms of these + /// equivalence classes. The NFA's transitions are defined on the original + /// byte values. For the most part, this is because they wouldn't really + /// help the NFA much since the NFA already uses a sparse representation + /// to represent transitions. Byte classes are most effective in a dense + /// representation. + byte_class_set: ByteClassSet, + /// This is generated from `byte_class_set`, and essentially represents the + /// same thing but supports different access patterns. Namely, this permits + /// looking up the equivalence class of a byte very cheaply. + /// + /// Ideally we would just store this, but because of annoying code + /// structure reasons, we keep both this and `byte_class_set` around for + /// now. I think I would prefer that `byte_class_set` were computed in the + /// `Builder`, but right now, we compute it as states are added to the + /// `NFA`. + byte_classes: ByteClasses, + /// Whether this NFA has a `Capture` state anywhere. + has_capture: bool, + /// When the empty string is in the language matched by this NFA. + has_empty: bool, + /// Whether UTF-8 mode is enabled for this NFA. Briefly, this means that + /// all non-empty matches produced by this NFA correspond to spans of valid + /// UTF-8, and any empty matches produced by this NFA that split a UTF-8 + /// encoded codepoint should be filtered out by the corresponding regex + /// engine. + utf8: bool, + /// Whether this NFA is meant to be matched in reverse or not. + reverse: bool, + /// The matcher to be used for look-around assertions. + look_matcher: LookMatcher, + /// The union of all look-around assertions that occur anywhere within + /// this NFA. If this set is empty, then it means there are precisely zero + /// conditional epsilon transitions in the NFA. + look_set_any: LookSet, + /// The union of all look-around assertions that occur as a zero-length + /// prefix for any of the patterns in this NFA. + look_set_prefix_any: LookSet, + /* + /// The intersection of all look-around assertions that occur as a + /// zero-length prefix for any of the patterns in this NFA. + look_set_prefix_all: LookSet, + */ + /// Heap memory used indirectly by NFA states and other things (like the + /// various capturing group representations above). Since each state + /// might use a different amount of heap, we need to keep track of this + /// incrementally. + memory_extra: usize, +} + +impl Inner { + /// Runs any last finalization bits and turns this into a full NFA. + pub(super) fn into_nfa(mut self) -> NFA { + self.byte_classes = self.byte_class_set.byte_classes(); + // Do epsilon closure from the start state of every pattern in order + // to compute various properties such as look-around assertions and + // whether the empty string can be matched. + let mut stack = vec![]; + let mut seen = SparseSet::new(self.states.len()); + for &start_id in self.start_pattern.iter() { + stack.push(start_id); + seen.clear(); + // let mut prefix_all = LookSet::full(); + let mut prefix_any = LookSet::empty(); + while let Some(sid) = stack.pop() { + if !seen.insert(sid) { + continue; + } + match self.states[sid] { + State::ByteRange { .. } + | State::Dense { .. } + | State::Fail => continue, + State::Sparse(_) => { + // This snippet below will rewrite this sparse state + // as a dense state. By doing it here, we apply this + // optimization to all hot "sparse" states since these + // are the states that are reachable from the start + // state via an epsilon closure. + // + // Unfortunately, this optimization did not seem to + // help much in some very limited ad hoc benchmarking. + // + // I left the 'Dense' state type in place in case we + // want to revisit this, but I suspect the real way + // to make forward progress is a more fundamental + // re-architecting of how data in the NFA is laid out. + // I think we should consider a single contiguous + // allocation instead of all this indirection and + // potential heap allocations for every state. But this + // is a large re-design and will require API breaking + // changes. + // self.memory_extra -= self.states[sid].memory_usage(); + // let trans = DenseTransitions::from_sparse(sparse); + // self.states[sid] = State::Dense(trans); + // self.memory_extra += self.states[sid].memory_usage(); + continue; + } + State::Match { .. } => self.has_empty = true, + State::Look { look, next } => { + prefix_any = prefix_any.insert(look); + stack.push(next); + } + State::Union { ref alternates } => { + // Order doesn't matter here, since we're just dealing + // with look-around sets. But if we do richer analysis + // here that needs to care about preference order, then + // this should be done in reverse. + stack.extend(alternates.iter()); + } + State::BinaryUnion { alt1, alt2 } => { + stack.push(alt2); + stack.push(alt1); + } + State::Capture { next, .. } => { + stack.push(next); + } + } + } + self.look_set_prefix_any = + self.look_set_prefix_any.union(prefix_any); + } + self.states.shrink_to_fit(); + self.start_pattern.shrink_to_fit(); + NFA(Arc::new(self)) + } + + /// Returns the capturing group info for this NFA. + pub(super) fn group_info(&self) -> &GroupInfo { + &self.group_info + } + + /// Add the given state to this NFA after allocating a fresh identifier for + /// it. + /// + /// This panics if too many states are added such that a fresh identifier + /// could not be created. (Currently, the only caller of this routine is + /// a `Builder`, and it upholds this invariant.) + pub(super) fn add(&mut self, state: State) -> StateID { + match state { + State::ByteRange { ref trans } => { + self.byte_class_set.set_range(trans.start, trans.end); + } + State::Sparse(ref sparse) => { + for trans in sparse.transitions.iter() { + self.byte_class_set.set_range(trans.start, trans.end); + } + } + State::Dense { .. } => unreachable!(), + State::Look { look, .. } => { + self.look_matcher + .add_to_byteset(look, &mut self.byte_class_set); + self.look_set_any = self.look_set_any.insert(look); + } + State::Capture { .. } => { + self.has_capture = true; + } + State::Union { .. } + | State::BinaryUnion { .. } + | State::Fail + | State::Match { .. } => {} + } + + let id = StateID::new(self.states.len()).unwrap(); + self.memory_extra += state.memory_usage(); + self.states.push(state); + id + } + + /// Set the starting state identifiers for this NFA. + /// + /// `start_anchored` and `start_unanchored` may be equivalent. When they + /// are, then the NFA can only execute anchored searches. This might + /// occur, for example, for patterns that are unconditionally anchored. + /// e.g., `^foo`. + pub(super) fn set_starts( + &mut self, + start_anchored: StateID, + start_unanchored: StateID, + start_pattern: &[StateID], + ) { + self.start_anchored = start_anchored; + self.start_unanchored = start_unanchored; + self.start_pattern = start_pattern.to_vec(); + } + + /// Sets the UTF-8 mode of this NFA. + pub(super) fn set_utf8(&mut self, yes: bool) { + self.utf8 = yes; + } + + /// Sets the reverse mode of this NFA. + pub(super) fn set_reverse(&mut self, yes: bool) { + self.reverse = yes; + } + + /// Sets the look-around assertion matcher for this NFA. + pub(super) fn set_look_matcher(&mut self, m: LookMatcher) { + self.look_matcher = m; + } + + /// Set the capturing groups for this NFA. + /// + /// The given slice should contain the capturing groups for each pattern, + /// The capturing groups in turn should correspond to the total number of + /// capturing groups in the pattern, including the anonymous first capture + /// group for each pattern. If a capturing group does have a name, then it + /// should be provided as a Arc. + /// + /// This returns an error if a corresponding `GroupInfo` could not be + /// built. + pub(super) fn set_captures( + &mut self, + captures: &[Vec>>], + ) -> Result<(), GroupInfoError> { + self.group_info = GroupInfo::new( + captures.iter().map(|x| x.iter().map(|y| y.as_ref())), + )?; + Ok(()) + } + + /// Remap the transitions in every state of this NFA using the given map. + /// The given map should be indexed according to state ID namespace used by + /// the transitions of the states currently in this NFA. + /// + /// This is particularly useful to the NFA builder, since it is convenient + /// to add NFA states in order to produce their final IDs. Then, after all + /// of the intermediate "empty" states (unconditional epsilon transitions) + /// have been removed from the builder's representation, we can re-map all + /// of the transitions in the states already added to their final IDs. + pub(super) fn remap(&mut self, old_to_new: &[StateID]) { + for state in &mut self.states { + state.remap(old_to_new); + } + self.start_anchored = old_to_new[self.start_anchored]; + self.start_unanchored = old_to_new[self.start_unanchored]; + for id in self.start_pattern.iter_mut() { + *id = old_to_new[*id]; + } + } +} + +impl fmt::Debug for Inner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "thompson::NFA(")?; + for (sid, state) in self.states.iter().with_state_ids() { + let status = if sid == self.start_anchored { + '^' + } else if sid == self.start_unanchored { + '>' + } else { + ' ' + }; + writeln!(f, "{}{:06?}: {:?}", status, sid.as_usize(), state)?; + } + let pattern_len = self.start_pattern.len(); + if pattern_len > 1 { + writeln!(f)?; + for pid in 0..pattern_len { + let sid = self.start_pattern[pid]; + writeln!(f, "START({:06?}): {:?}", pid, sid.as_usize())?; + } + } + writeln!(f)?; + writeln!( + f, + "transition equivalence classes: {:?}", + self.byte_classes, + )?; + writeln!(f, ")")?; + Ok(()) + } +} + +/// A state in an NFA. +/// +/// In theory, it can help to conceptualize an `NFA` as a graph consisting of +/// `State`s. Each `State` contains its complete set of outgoing transitions. +/// +/// In practice, it can help to conceptualize an `NFA` as a sequence of +/// instructions for a virtual machine. Each `State` says what to do and where +/// to go next. +/// +/// Strictly speaking, the practical interpretation is the most correct one, +/// because of the [`Capture`](State::Capture) state. Namely, a `Capture` +/// state always forwards execution to another state unconditionally. Its only +/// purpose is to cause a side effect: the recording of the current input +/// position at a particular location in memory. In this sense, an `NFA` +/// has more power than a theoretical non-deterministic finite automaton. +/// +/// For most uses of this crate, it is likely that one may never even need to +/// be aware of this type at all. The main use cases for looking at `State`s +/// directly are if you need to write your own search implementation or if you +/// need to do some kind of analysis on the NFA. +#[derive(Clone, Eq, PartialEq)] +pub enum State { + /// A state with a single transition that can only be taken if the current + /// input symbol is in a particular range of bytes. + ByteRange { + /// The transition from this state to the next. + trans: Transition, + }, + /// A state with possibly many transitions represented in a sparse fashion. + /// Transitions are non-overlapping and ordered lexicographically by input + /// range. + /// + /// In practice, this is used for encoding UTF-8 automata. Its presence is + /// primarily an optimization that avoids many additional unconditional + /// epsilon transitions (via [`Union`](State::Union) states), and thus + /// decreases the overhead of traversing the NFA. This can improve both + /// matching time and DFA construction time. + Sparse(SparseTransitions), + /// A dense representation of a state with multiple transitions. + Dense(DenseTransitions), + /// A conditional epsilon transition satisfied via some sort of + /// look-around. Look-around is limited to anchor and word boundary + /// assertions. + /// + /// Look-around states are meant to be evaluated while performing epsilon + /// closure (computing the set of states reachable from a particular state + /// via only epsilon transitions). If the current position in the haystack + /// satisfies the look-around assertion, then you're permitted to follow + /// that epsilon transition. + Look { + /// The look-around assertion that must be satisfied before moving + /// to `next`. + look: Look, + /// The state to transition to if the look-around assertion is + /// satisfied. + next: StateID, + }, + /// An alternation such that there exists an epsilon transition to all + /// states in `alternates`, where matches found via earlier transitions + /// are preferred over later transitions. + Union { + /// An ordered sequence of unconditional epsilon transitions to other + /// states. Transitions earlier in the sequence are preferred over + /// transitions later in the sequence. + alternates: Box<[StateID]>, + }, + /// An alternation such that there exists precisely two unconditional + /// epsilon transitions, where matches found via `alt1` are preferred over + /// matches found via `alt2`. + /// + /// This state exists as a common special case of Union where there are + /// only two alternates. In this case, we don't need any allocations to + /// represent the state. This saves a bit of memory and also saves an + /// additional memory access when traversing the NFA. + BinaryUnion { + /// An unconditional epsilon transition to another NFA state. This + /// is preferred over `alt2`. + alt1: StateID, + /// An unconditional epsilon transition to another NFA state. Matches + /// reported via this transition should only be reported if no matches + /// were found by following `alt1`. + alt2: StateID, + }, + /// An empty state that records a capture location. + /// + /// From the perspective of finite automata, this is precisely equivalent + /// to an unconditional epsilon transition, but serves the purpose of + /// instructing NFA simulations to record additional state when the finite + /// state machine passes through this epsilon transition. + /// + /// `slot` in this context refers to the specific capture group slot + /// offset that is being recorded. Each capturing group has two slots + /// corresponding to the start and end of the matching portion of that + /// group. + /// + /// The pattern ID and capture group index are also included in this state + /// in case they are useful. But mostly, all you'll need is `next` and + /// `slot`. + Capture { + /// The state to transition to, unconditionally. + next: StateID, + /// The pattern ID that this capture belongs to. + pattern_id: PatternID, + /// The capture group index that this capture belongs to. Capture group + /// indices are local to each pattern. For example, when capturing + /// groups are enabled, every pattern has a capture group at index + /// `0`. + group_index: SmallIndex, + /// The slot index for this capture. Every capturing group has two + /// slots: one for the start haystack offset and one for the end + /// haystack offset. Unlike capture group indices, slot indices are + /// global across all patterns in this NFA. That is, each slot belongs + /// to a single pattern, but there is only one slot at index `i`. + slot: SmallIndex, + }, + /// A state that cannot be transitioned out of. This is useful for cases + /// where you want to prevent matching from occurring. For example, if your + /// regex parser permits empty character classes, then one could choose + /// a `Fail` state to represent them. (An empty character class can be + /// thought of as an empty set. Since nothing is in an empty set, they can + /// never match anything.) + Fail, + /// A match state. There is at least one such occurrence of this state for + /// each regex that can match that is in this NFA. + Match { + /// The matching pattern ID. + pattern_id: PatternID, + }, +} + +impl State { + /// Returns true if and only if this state contains one or more epsilon + /// transitions. + /// + /// In practice, a state has no outgoing transitions (like `Match`), has + /// only non-epsilon transitions (like `ByteRange`) or has only epsilon + /// transitions (like `Union`). + /// + /// # Example + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::{State, Transition}, + /// util::primitives::{PatternID, StateID, SmallIndex}, + /// }; + /// + /// // Capture states are epsilon transitions. + /// let state = State::Capture { + /// next: StateID::ZERO, + /// pattern_id: PatternID::ZERO, + /// group_index: SmallIndex::ZERO, + /// slot: SmallIndex::ZERO, + /// }; + /// assert!(state.is_epsilon()); + /// + /// // ByteRange states are not. + /// let state = State::ByteRange { + /// trans: Transition { start: b'a', end: b'z', next: StateID::ZERO }, + /// }; + /// assert!(!state.is_epsilon()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn is_epsilon(&self) -> bool { + match *self { + State::ByteRange { .. } + | State::Sparse { .. } + | State::Dense { .. } + | State::Fail + | State::Match { .. } => false, + State::Look { .. } + | State::Union { .. } + | State::BinaryUnion { .. } + | State::Capture { .. } => true, + } + } + + /// Returns the heap memory usage of this NFA state in bytes. + fn memory_usage(&self) -> usize { + match *self { + State::ByteRange { .. } + | State::Look { .. } + | State::BinaryUnion { .. } + | State::Capture { .. } + | State::Match { .. } + | State::Fail => 0, + State::Sparse(SparseTransitions { ref transitions }) => { + transitions.len() * mem::size_of::() + } + State::Dense { .. } => 256 * mem::size_of::(), + State::Union { ref alternates } => { + alternates.len() * mem::size_of::() + } + } + } + + /// Remap the transitions in this state using the given map. Namely, the + /// given map should be indexed according to the transitions currently + /// in this state. + /// + /// This is used during the final phase of the NFA compiler, which turns + /// its intermediate NFA into the final NFA. + fn remap(&mut self, remap: &[StateID]) { + match *self { + State::ByteRange { ref mut trans } => { + trans.next = remap[trans.next] + } + State::Sparse(SparseTransitions { ref mut transitions }) => { + for t in transitions.iter_mut() { + t.next = remap[t.next]; + } + } + State::Dense(DenseTransitions { ref mut transitions }) => { + for sid in transitions.iter_mut() { + *sid = remap[*sid]; + } + } + State::Look { ref mut next, .. } => *next = remap[*next], + State::Union { ref mut alternates } => { + for alt in alternates.iter_mut() { + *alt = remap[*alt]; + } + } + State::BinaryUnion { ref mut alt1, ref mut alt2 } => { + *alt1 = remap[*alt1]; + *alt2 = remap[*alt2]; + } + State::Capture { ref mut next, .. } => *next = remap[*next], + State::Fail => {} + State::Match { .. } => {} + } + } +} + +impl fmt::Debug for State { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + State::ByteRange { ref trans } => trans.fmt(f), + State::Sparse(SparseTransitions { ref transitions }) => { + let rs = transitions + .iter() + .map(|t| format!("{t:?}")) + .collect::>() + .join(", "); + write!(f, "sparse({rs})") + } + State::Dense(ref dense) => { + write!(f, "dense(")?; + for (i, t) in dense.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + write!(f, "{t:?}")?; + } + write!(f, ")") + } + State::Look { ref look, next } => { + write!(f, "{:?} => {:?}", look, next.as_usize()) + } + State::Union { ref alternates } => { + let alts = alternates + .iter() + .map(|id| format!("{:?}", id.as_usize())) + .collect::>() + .join(", "); + write!(f, "union({alts})") + } + State::BinaryUnion { alt1, alt2 } => { + write!( + f, + "binary-union({}, {})", + alt1.as_usize(), + alt2.as_usize() + ) + } + State::Capture { next, pattern_id, group_index, slot } => { + write!( + f, + "capture(pid={:?}, group={:?}, slot={:?}) => {:?}", + pattern_id.as_usize(), + group_index.as_usize(), + slot.as_usize(), + next.as_usize(), + ) + } + State::Fail => write!(f, "FAIL"), + State::Match { pattern_id } => { + write!(f, "MATCH({:?})", pattern_id.as_usize()) + } + } + } +} + +/// A sequence of transitions used to represent a sparse state. +/// +/// This is the primary representation of a [`Sparse`](State::Sparse) state. +/// It corresponds to a sorted sequence of transitions with non-overlapping +/// byte ranges. If the byte at the current position in the haystack matches +/// one of the byte ranges, then the finite state machine should take the +/// corresponding transition. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct SparseTransitions { + /// The sorted sequence of non-overlapping transitions. + pub transitions: Box<[Transition]>, +} + +impl SparseTransitions { + /// This follows the matching transition for a particular byte. + /// + /// The matching transition is found by looking for a matching byte + /// range (there is at most one) corresponding to the position `at` in + /// `haystack`. + /// + /// If `at >= haystack.len()`, then this returns `None`. + #[inline] + pub fn matches(&self, haystack: &[u8], at: usize) -> Option { + haystack.get(at).and_then(|&b| self.matches_byte(b)) + } + + /// This follows the matching transition for any member of the alphabet. + /// + /// The matching transition is found by looking for a matching byte + /// range (there is at most one) corresponding to the position `at` in + /// `haystack`. If the given alphabet unit is [`EOI`](alphabet::Unit::eoi), + /// then this always returns `None`. + #[inline] + pub(crate) fn matches_unit( + &self, + unit: alphabet::Unit, + ) -> Option { + unit.as_u8().and_then(|byte| self.matches_byte(byte)) + } + + /// This follows the matching transition for a particular byte. + /// + /// The matching transition is found by looking for a matching byte range + /// (there is at most one) corresponding to the byte given. + #[inline] + pub fn matches_byte(&self, byte: u8) -> Option { + for t in self.transitions.iter() { + if t.start > byte { + break; + } else if t.matches_byte(byte) { + return Some(t.next); + } + } + None + + /* + // This is an alternative implementation that uses binary search. In + // some ad hoc experiments, like + // + // regex-cli find match pikevm -b -p '\b\w+\b' non-ascii-file + // + // I could not observe any improvement, and in fact, things seemed to + // be a bit slower. I can see an improvement in at least one benchmark: + // + // regex-cli find match pikevm -b -p '\pL{100}' all-codepoints-utf8 + // + // Where total search time goes from 3.2s to 2.4s when using binary + // search. + self.transitions + .binary_search_by(|t| { + if t.end < byte { + core::cmp::Ordering::Less + } else if t.start > byte { + core::cmp::Ordering::Greater + } else { + core::cmp::Ordering::Equal + } + }) + .ok() + .map(|i| self.transitions[i].next) + */ + } +} + +/// A sequence of transitions used to represent a dense state. +/// +/// This is the primary representation of a [`Dense`](State::Dense) state. It +/// provides constant time matching. That is, given a byte in a haystack and +/// a `DenseTransitions`, one can determine if the state matches in constant +/// time. +/// +/// This is in contrast to `SparseTransitions`, whose time complexity is +/// necessarily bigger than constant time. Also in contrast, `DenseTransitions` +/// usually requires (much) more heap memory. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct DenseTransitions { + /// A dense representation of this state's transitions on the heap. This + /// always has length 256. + pub transitions: Box<[StateID]>, +} + +impl DenseTransitions { + /// This follows the matching transition for a particular byte. + /// + /// The matching transition is found by looking for a transition that + /// doesn't correspond to `StateID::ZERO` for the byte `at` the given + /// position in `haystack`. + /// + /// If `at >= haystack.len()`, then this returns `None`. + #[inline] + pub fn matches(&self, haystack: &[u8], at: usize) -> Option { + haystack.get(at).and_then(|&b| self.matches_byte(b)) + } + + /// This follows the matching transition for any member of the alphabet. + /// + /// The matching transition is found by looking for a transition that + /// doesn't correspond to `StateID::ZERO` for the given alphabet `unit`. + /// + /// If the given alphabet unit is [`EOI`](alphabet::Unit::eoi), then + /// this returns `None`. + #[inline] + pub(crate) fn matches_unit( + &self, + unit: alphabet::Unit, + ) -> Option { + unit.as_u8().and_then(|byte| self.matches_byte(byte)) + } + + /// This follows the matching transition for a particular byte. + /// + /// The matching transition is found by looking for a transition that + /// doesn't correspond to `StateID::ZERO` for the given `byte`. + #[inline] + pub fn matches_byte(&self, byte: u8) -> Option { + let next = self.transitions[usize::from(byte)]; + if next == StateID::ZERO { + None + } else { + Some(next) + } + } + + /* + /// The dense state optimization isn't currently enabled, so permit a + /// little bit of dead code. + pub(crate) fn from_sparse(sparse: &SparseTransitions) -> DenseTransitions { + let mut dense = vec![StateID::ZERO; 256]; + for t in sparse.transitions.iter() { + for b in t.start..=t.end { + dense[usize::from(b)] = t.next; + } + } + DenseTransitions { transitions: dense.into_boxed_slice() } + } + */ + + /// Returns an iterator over all transitions that don't point to + /// `StateID::ZERO`. + pub(crate) fn iter(&self) -> impl Iterator + '_ { + use crate::util::int::Usize; + self.transitions + .iter() + .enumerate() + .filter(|&(_, &sid)| sid != StateID::ZERO) + .map(|(byte, &next)| Transition { + start: byte.as_u8(), + end: byte.as_u8(), + next, + }) + } +} + +/// A single transition to another state. +/// +/// This transition may only be followed if the current byte in the haystack +/// falls in the inclusive range of bytes specified. +#[derive(Clone, Copy, Eq, Hash, PartialEq)] +pub struct Transition { + /// The inclusive start of the byte range. + pub start: u8, + /// The inclusive end of the byte range. + pub end: u8, + /// The identifier of the state to transition to. + pub next: StateID, +} + +impl Transition { + /// Returns true if the position `at` in `haystack` falls in this + /// transition's range of bytes. + /// + /// If `at >= haystack.len()`, then this returns `false`. + pub fn matches(&self, haystack: &[u8], at: usize) -> bool { + haystack.get(at).map_or(false, |&b| self.matches_byte(b)) + } + + /// Returns true if the given alphabet unit falls in this transition's + /// range of bytes. If the given unit is [`EOI`](alphabet::Unit::eoi), then + /// this returns `false`. + pub fn matches_unit(&self, unit: alphabet::Unit) -> bool { + unit.as_u8().map_or(false, |byte| self.matches_byte(byte)) + } + + /// Returns true if the given byte falls in this transition's range of + /// bytes. + pub fn matches_byte(&self, byte: u8) -> bool { + self.start <= byte && byte <= self.end + } +} + +impl fmt::Debug for Transition { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use crate::util::escape::DebugByte; + + let Transition { start, end, next } = *self; + if self.start == self.end { + write!(f, "{:?} => {:?}", DebugByte(start), next.as_usize()) + } else { + write!( + f, + "{:?}-{:?} => {:?}", + DebugByte(start), + DebugByte(end), + next.as_usize(), + ) + } + } +} + +/// An iterator over all pattern IDs in an NFA. +/// +/// This iterator is created by [`NFA::patterns`]. +/// +/// The lifetime parameter `'a` refers to the lifetime of the NFA from which +/// this pattern iterator was created. +#[derive(Debug)] +pub struct PatternIter<'a> { + it: PatternIDIter, + /// We explicitly associate a lifetime with this iterator even though we + /// don't actually borrow anything from the NFA. We do this for backward + /// compatibility purposes. If we ever do need to borrow something from + /// the NFA, then we can and just get rid of this marker without breaking + /// the public API. + _marker: core::marker::PhantomData<&'a ()>, +} + +impl<'a> Iterator for PatternIter<'a> { + type Item = PatternID; + + fn next(&mut self) -> Option { + self.it.next() + } +} + +#[cfg(all(test, feature = "nfa-pikevm"))] +mod tests { + use super::*; + use crate::{nfa::thompson::pikevm::PikeVM, Input}; + + // This asserts that an NFA state doesn't have its size changed. It is + // *really* easy to accidentally increase the size, and thus potentially + // dramatically increase the memory usage of every NFA. + // + // This assert doesn't mean we absolutely cannot increase the size of an + // NFA state. We can. It's just here to make sure we do it knowingly and + // intentionally. + #[test] + fn state_has_small_size() { + #[cfg(target_pointer_width = "64")] + assert_eq!(24, core::mem::size_of::()); + #[cfg(target_pointer_width = "32")] + assert_eq!(20, core::mem::size_of::()); + } + + #[test] + fn always_match() { + let re = PikeVM::new_from_nfa(NFA::always_match()).unwrap(); + let mut cache = re.create_cache(); + let mut caps = re.create_captures(); + let mut find = |haystack, start, end| { + let input = Input::new(haystack).range(start..end); + re.search(&mut cache, &input, &mut caps); + caps.get_match().map(|m| m.end()) + }; + + assert_eq!(Some(0), find("", 0, 0)); + assert_eq!(Some(0), find("a", 0, 1)); + assert_eq!(Some(1), find("a", 1, 1)); + assert_eq!(Some(0), find("ab", 0, 2)); + assert_eq!(Some(1), find("ab", 1, 2)); + assert_eq!(Some(2), find("ab", 2, 2)); + } + + #[test] + fn never_match() { + let re = PikeVM::new_from_nfa(NFA::never_match()).unwrap(); + let mut cache = re.create_cache(); + let mut caps = re.create_captures(); + let mut find = |haystack, start, end| { + let input = Input::new(haystack).range(start..end); + re.search(&mut cache, &input, &mut caps); + caps.get_match().map(|m| m.end()) + }; + + assert_eq!(None, find("", 0, 0)); + assert_eq!(None, find("a", 0, 1)); + assert_eq!(None, find("a", 1, 1)); + assert_eq!(None, find("ab", 0, 2)); + assert_eq!(None, find("ab", 1, 2)); + assert_eq!(None, find("ab", 2, 2)); + } +} diff --git a/vendor/regex-automata/src/nfa/thompson/pikevm.rs b/vendor/regex-automata/src/nfa/thompson/pikevm.rs new file mode 100644 index 00000000000000..a5cd7086f521de --- /dev/null +++ b/vendor/regex-automata/src/nfa/thompson/pikevm.rs @@ -0,0 +1,2359 @@ +/*! +An NFA backed Pike VM for executing regex searches with capturing groups. + +This module provides a [`PikeVM`] that works by simulating an NFA and +resolving all spans of capturing groups that participate in a match. +*/ + +#[cfg(feature = "internal-instrument-pikevm")] +use core::cell::RefCell; + +use alloc::{vec, vec::Vec}; + +use crate::{ + nfa::thompson::{self, BuildError, State, NFA}, + util::{ + captures::Captures, + empty, iter, + prefilter::Prefilter, + primitives::{NonMaxUsize, PatternID, SmallIndex, StateID}, + search::{ + Anchored, HalfMatch, Input, Match, MatchKind, PatternSet, Span, + }, + sparse_set::SparseSet, + }, +}; + +/// A simple macro for conditionally executing instrumentation logic when +/// the 'trace' log level is enabled. This is a compile-time no-op when the +/// 'internal-instrument-pikevm' feature isn't enabled. The intent here is that +/// this makes it easier to avoid doing extra work when instrumentation isn't +/// enabled. +/// +/// This macro accepts a closure of type `|&mut Counters|`. The closure can +/// then increment counters (or whatever) in accordance with what one wants +/// to track. +macro_rules! instrument { + ($fun:expr) => { + #[cfg(feature = "internal-instrument-pikevm")] + { + let fun: &mut dyn FnMut(&mut Counters) = &mut $fun; + COUNTERS.with(|c: &RefCell| fun(&mut *c.borrow_mut())); + } + }; +} + +#[cfg(feature = "internal-instrument-pikevm")] +std::thread_local! { + /// Effectively global state used to keep track of instrumentation + /// counters. The "proper" way to do this is to thread it through the + /// PikeVM, but it makes the code quite icky. Since this is just a + /// debugging feature, we're content to relegate it to thread local + /// state. When instrumentation is enabled, the counters are reset at the + /// beginning of every search and printed (with the 'trace' log level) at + /// the end of every search. + static COUNTERS: RefCell = RefCell::new(Counters::empty()); +} + +/// The configuration used for building a [`PikeVM`]. +/// +/// A PikeVM configuration is a simple data object that is typically used with +/// [`Builder::configure`]. It can be cheaply cloned. +/// +/// A default configuration can be created either with `Config::new`, or +/// perhaps more conveniently, with [`PikeVM::config`]. +#[derive(Clone, Debug, Default)] +pub struct Config { + match_kind: Option, + pre: Option>, +} + +impl Config { + /// Return a new default PikeVM configuration. + pub fn new() -> Config { + Config::default() + } + + /// Set the desired match semantics. + /// + /// The default is [`MatchKind::LeftmostFirst`], which corresponds to the + /// match semantics of Perl-like regex engines. That is, when multiple + /// patterns would match at the same leftmost position, the pattern that + /// appears first in the concrete syntax is chosen. + /// + /// Currently, the only other kind of match semantics supported is + /// [`MatchKind::All`]. This corresponds to "classical DFA" construction + /// where all possible matches are visited in the NFA by the `PikeVM`. + /// + /// Typically, `All` is used when one wants to execute an overlapping + /// search and `LeftmostFirst` otherwise. In particular, it rarely makes + /// sense to use `All` with the various "leftmost" find routines, since the + /// leftmost routines depend on the `LeftmostFirst` automata construction + /// strategy. Specifically, `LeftmostFirst` results in the `PikeVM` + /// simulating dead states as a way to terminate the search and report a + /// match. `LeftmostFirst` also supports non-greedy matches using this + /// strategy where as `All` does not. + pub fn match_kind(mut self, kind: MatchKind) -> Config { + self.match_kind = Some(kind); + self + } + + /// Set a prefilter to be used whenever a start state is entered. + /// + /// A [`Prefilter`] in this context is meant to accelerate searches by + /// looking for literal prefixes that every match for the corresponding + /// pattern (or patterns) must start with. Once a prefilter produces a + /// match, the underlying search routine continues on to try and confirm + /// the match. + /// + /// Be warned that setting a prefilter does not guarantee that the search + /// will be faster. While it's usually a good bet, if the prefilter + /// produces a lot of false positive candidates (i.e., positions matched + /// by the prefilter but not by the regex), then the overall result can + /// be slower than if you had just executed the regex engine without any + /// prefilters. + /// + /// By default no prefilter is set. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::pikevm::PikeVM, + /// util::prefilter::Prefilter, + /// Input, Match, MatchKind, + /// }; + /// + /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "bar"]); + /// let re = PikeVM::builder() + /// .configure(PikeVM::config().prefilter(pre)) + /// .build(r"(foo|bar)[a-z]+")?; + /// let mut cache = re.create_cache(); + /// let input = Input::new("foo1 barfox bar"); + /// assert_eq!(Some(Match::must(0, 5..11)), re.find(&mut cache, input)); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Be warned though that an incorrect prefilter can lead to incorrect + /// results! + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::pikevm::PikeVM, + /// util::prefilter::Prefilter, + /// Input, HalfMatch, MatchKind, + /// }; + /// + /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "car"]); + /// let re = PikeVM::builder() + /// .configure(PikeVM::config().prefilter(pre)) + /// .build(r"(foo|bar)[a-z]+")?; + /// let mut cache = re.create_cache(); + /// let input = Input::new("foo1 barfox bar"); + /// // No match reported even though there clearly is one! + /// assert_eq!(None, re.find(&mut cache, input)); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn prefilter(mut self, pre: Option) -> Config { + self.pre = Some(pre); + self + } + + /// Returns the match semantics set in this configuration. + pub fn get_match_kind(&self) -> MatchKind { + self.match_kind.unwrap_or(MatchKind::LeftmostFirst) + } + + /// Returns the prefilter set in this configuration, if one at all. + pub fn get_prefilter(&self) -> Option<&Prefilter> { + self.pre.as_ref().unwrap_or(&None).as_ref() + } + + /// Overwrite the default configuration such that the options in `o` are + /// always used. If an option in `o` is not set, then the corresponding + /// option in `self` is used. If it's not set in `self` either, then it + /// remains not set. + pub(crate) fn overwrite(&self, o: Config) -> Config { + Config { + match_kind: o.match_kind.or(self.match_kind), + pre: o.pre.or_else(|| self.pre.clone()), + } + } +} + +/// A builder for a `PikeVM`. +/// +/// This builder permits configuring options for the syntax of a pattern, +/// the NFA construction and the `PikeVM` construction. This builder is +/// different from a general purpose regex builder in that it permits fine +/// grain configuration of the construction process. The trade off for this is +/// complexity, and the possibility of setting a configuration that might not +/// make sense. For example, there are two different UTF-8 modes: +/// +/// * [`util::syntax::Config::utf8`](crate::util::syntax::Config::utf8) +/// controls whether the pattern itself can contain sub-expressions that match +/// invalid UTF-8. +/// * [`thompson::Config::utf8`] controls whether empty matches that split a +/// Unicode codepoint are reported or not. +/// +/// Generally speaking, callers will want to either enable all of these or +/// disable all of these. +/// +/// # Example +/// +/// This example shows how to disable UTF-8 mode in the syntax and the regex +/// itself. This is generally what you want for matching on arbitrary bytes. +/// +/// ``` +/// use regex_automata::{ +/// nfa::thompson::{self, pikevm::PikeVM}, +/// util::syntax, +/// Match, +/// }; +/// +/// let re = PikeVM::builder() +/// .syntax(syntax::Config::new().utf8(false)) +/// .thompson(thompson::Config::new().utf8(false)) +/// .build(r"foo(?-u:[^b])ar.*")?; +/// let mut cache = re.create_cache(); +/// +/// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; +/// let expected = Some(Match::must(0, 1..9)); +/// let got = re.find_iter(&mut cache, haystack).next(); +/// assert_eq!(expected, got); +/// // Notice that `(?-u:[^b])` matches invalid UTF-8, +/// // but the subsequent `.*` does not! Disabling UTF-8 +/// // on the syntax permits this. +/// // +/// // N.B. This example does not show the impact of +/// // disabling UTF-8 mode on a PikeVM Config, since that +/// // only impacts regexes that can produce matches of +/// // length 0. +/// assert_eq!(b"foo\xFFarzz", &haystack[got.unwrap().range()]); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct Builder { + config: Config, + #[cfg(feature = "syntax")] + thompson: thompson::Compiler, +} + +impl Builder { + /// Create a new PikeVM builder with its default configuration. + pub fn new() -> Builder { + Builder { + config: Config::default(), + #[cfg(feature = "syntax")] + thompson: thompson::Compiler::new(), + } + } + + /// Build a `PikeVM` from the given pattern. + /// + /// If there was a problem parsing or compiling the pattern, then an error + /// is returned. + #[cfg(feature = "syntax")] + pub fn build(&self, pattern: &str) -> Result { + self.build_many(&[pattern]) + } + + /// Build a `PikeVM` from the given patterns. + #[cfg(feature = "syntax")] + pub fn build_many>( + &self, + patterns: &[P], + ) -> Result { + let nfa = self.thompson.build_many(patterns)?; + self.build_from_nfa(nfa) + } + + /// Build a `PikeVM` directly from its NFA. + /// + /// Note that when using this method, any configuration that applies to the + /// construction of the NFA itself will of course be ignored, since the NFA + /// given here is already built. + pub fn build_from_nfa(&self, nfa: NFA) -> Result { + nfa.look_set_any().available().map_err(BuildError::word)?; + Ok(PikeVM { config: self.config.clone(), nfa }) + } + + /// Apply the given `PikeVM` configuration options to this builder. + pub fn configure(&mut self, config: Config) -> &mut Builder { + self.config = self.config.overwrite(config); + self + } + + /// Set the syntax configuration for this builder using + /// [`syntax::Config`](crate::util::syntax::Config). + /// + /// This permits setting things like case insensitivity, Unicode and multi + /// line mode. + /// + /// These settings only apply when constructing a PikeVM directly from a + /// pattern. + #[cfg(feature = "syntax")] + pub fn syntax( + &mut self, + config: crate::util::syntax::Config, + ) -> &mut Builder { + self.thompson.syntax(config); + self + } + + /// Set the Thompson NFA configuration for this builder using + /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). + /// + /// This permits setting things like if additional time should be spent + /// shrinking the size of the NFA. + /// + /// These settings only apply when constructing a PikeVM directly from a + /// pattern. + #[cfg(feature = "syntax")] + pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { + self.thompson.configure(config); + self + } +} + +/// A virtual machine for executing regex searches with capturing groups. +/// +/// # Infallible APIs +/// +/// Unlike most other regex engines in this crate, a `PikeVM` never returns an +/// error at search time. It supports all [`Anchored`] configurations, never +/// quits and works on haystacks of arbitrary length. +/// +/// There are two caveats to mention though: +/// +/// * If an invalid pattern ID is given to a search via [`Anchored::Pattern`], +/// then the PikeVM will report "no match." This is consistent with all other +/// regex engines in this crate. +/// * When using [`PikeVM::which_overlapping_matches`] with a [`PatternSet`] +/// that has insufficient capacity to store all valid pattern IDs, then if a +/// match occurs for a `PatternID` that cannot be inserted, it is silently +/// dropped as if it did not match. +/// +/// # Advice +/// +/// The `PikeVM` is generally the most "powerful" regex engine in this crate. +/// "Powerful" in this context means that it can handle any regular expression +/// that is parseable by `regex-syntax` and any size haystack. Regrettably, +/// the `PikeVM` is also simultaneously often the _slowest_ regex engine in +/// practice. This results in an annoying situation where one generally tries +/// to pick any other regex engine (or perhaps none at all) before being +/// forced to fall back to a `PikeVM`. +/// +/// For example, a common strategy for dealing with capturing groups is to +/// actually look for the overall match of the regex using a faster regex +/// engine, like a [lazy DFA](crate::hybrid::regex::Regex). Once the overall +/// match is found, one can then run the `PikeVM` on just the match span to +/// find the spans of the capturing groups. In this way, the faster regex +/// engine does the majority of the work, while the `PikeVM` only lends its +/// power in a more limited role. +/// +/// Unfortunately, this isn't always possible because the faster regex engines +/// don't support all of the regex features in `regex-syntax`. This notably +/// includes (and is currently limited to) Unicode word boundaries. So if +/// your pattern has Unicode word boundaries, you typically can't use a +/// DFA-based regex engine at all (unless you [enable heuristic support for +/// it](crate::hybrid::dfa::Config::unicode_word_boundary)). (The [one-pass +/// DFA](crate::dfa::onepass::DFA) can handle Unicode word boundaries for +/// anchored searches only, but in a cruel sort of joke, many Unicode features +/// tend to result in making the regex _not_ one-pass.) +/// +/// # Example +/// +/// This example shows that the `PikeVM` implements Unicode word boundaries +/// correctly by default. +/// +/// ``` +/// # if cfg!(miri) { return Ok(()); } // miri takes too long +/// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; +/// +/// let re = PikeVM::new(r"\b\w+\b")?; +/// let mut cache = re.create_cache(); +/// +/// let mut it = re.find_iter(&mut cache, "Шерлок Холмс"); +/// assert_eq!(Some(Match::must(0, 0..12)), it.next()); +/// assert_eq!(Some(Match::must(0, 13..23)), it.next()); +/// assert_eq!(None, it.next()); +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct PikeVM { + config: Config, + nfa: NFA, +} + +impl PikeVM { + /// Parse the given regular expression using the default configuration and + /// return the corresponding `PikeVM`. + /// + /// If you want a non-default configuration, then use the [`Builder`] to + /// set your own configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; + /// + /// let re = PikeVM::new("foo[0-9]+bar")?; + /// let mut cache = re.create_cache(); + /// assert_eq!( + /// Some(Match::must(0, 3..14)), + /// re.find_iter(&mut cache, "zzzfoo12345barzzz").next(), + /// ); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn new(pattern: &str) -> Result { + PikeVM::builder().build(pattern) + } + + /// Like `new`, but parses multiple patterns into a single "multi regex." + /// This similarly uses the default regex configuration. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; + /// + /// let re = PikeVM::new_many(&["[a-z]+", "[0-9]+"])?; + /// let mut cache = re.create_cache(); + /// + /// let mut it = re.find_iter(&mut cache, "abc 1 foo 4567 0 quux"); + /// assert_eq!(Some(Match::must(0, 0..3)), it.next()); + /// assert_eq!(Some(Match::must(1, 4..5)), it.next()); + /// assert_eq!(Some(Match::must(0, 6..9)), it.next()); + /// assert_eq!(Some(Match::must(1, 10..14)), it.next()); + /// assert_eq!(Some(Match::must(1, 15..16)), it.next()); + /// assert_eq!(Some(Match::must(0, 17..21)), it.next()); + /// assert_eq!(None, it.next()); + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn new_many>( + patterns: &[P], + ) -> Result { + PikeVM::builder().build_many(patterns) + } + + /// Like `new`, but builds a PikeVM directly from an NFA. This is useful + /// if you already have an NFA, or even if you hand-assembled the NFA. + /// + /// # Example + /// + /// This shows how to hand assemble a regular expression via its HIR, + /// compile an NFA from it and build a PikeVM from the NFA. + /// + /// ``` + /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; + /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; + /// + /// let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![ + /// ClassBytesRange::new(b'0', b'9'), + /// ClassBytesRange::new(b'A', b'Z'), + /// ClassBytesRange::new(b'_', b'_'), + /// ClassBytesRange::new(b'a', b'z'), + /// ]))); + /// + /// let config = NFA::config().nfa_size_limit(Some(1_000)); + /// let nfa = NFA::compiler().configure(config).build_from_hir(&hir)?; + /// + /// let re = PikeVM::new_from_nfa(nfa)?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let expected = Some(Match::must(0, 3..4)); + /// re.captures(&mut cache, "!@#A#@!", &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn new_from_nfa(nfa: NFA) -> Result { + PikeVM::builder().build_from_nfa(nfa) + } + + /// Create a new `PikeVM` that matches every input. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; + /// + /// let re = PikeVM::always_match()?; + /// let mut cache = re.create_cache(); + /// + /// let expected = Match::must(0, 0..0); + /// assert_eq!(Some(expected), re.find_iter(&mut cache, "").next()); + /// assert_eq!(Some(expected), re.find_iter(&mut cache, "foo").next()); + /// # Ok::<(), Box>(()) + /// ``` + pub fn always_match() -> Result { + let nfa = thompson::NFA::always_match(); + PikeVM::new_from_nfa(nfa) + } + + /// Create a new `PikeVM` that never matches any input. + /// + /// # Example + /// + /// ``` + /// use regex_automata::nfa::thompson::pikevm::PikeVM; + /// + /// let re = PikeVM::never_match()?; + /// let mut cache = re.create_cache(); + /// + /// assert_eq!(None, re.find_iter(&mut cache, "").next()); + /// assert_eq!(None, re.find_iter(&mut cache, "foo").next()); + /// # Ok::<(), Box>(()) + /// ``` + pub fn never_match() -> Result { + let nfa = thompson::NFA::never_match(); + PikeVM::new_from_nfa(nfa) + } + + /// Return a default configuration for a `PikeVM`. + /// + /// This is a convenience routine to avoid needing to import the `Config` + /// type when customizing the construction of a `PikeVM`. + /// + /// # Example + /// + /// This example shows how to disable UTF-8 mode. When UTF-8 mode is + /// disabled, zero-width matches that split a codepoint are allowed. + /// Otherwise they are never reported. + /// + /// In the code below, notice that `""` is permitted to match positions + /// that split the encoding of a codepoint. + /// + /// ``` + /// use regex_automata::{nfa::thompson::{self, pikevm::PikeVM}, Match}; + /// + /// let re = PikeVM::builder() + /// .thompson(thompson::Config::new().utf8(false)) + /// .build(r"")?; + /// let mut cache = re.create_cache(); + /// + /// let haystack = "a☃z"; + /// let mut it = re.find_iter(&mut cache, haystack); + /// assert_eq!(Some(Match::must(0, 0..0)), it.next()); + /// assert_eq!(Some(Match::must(0, 1..1)), it.next()); + /// assert_eq!(Some(Match::must(0, 2..2)), it.next()); + /// assert_eq!(Some(Match::must(0, 3..3)), it.next()); + /// assert_eq!(Some(Match::must(0, 4..4)), it.next()); + /// assert_eq!(Some(Match::must(0, 5..5)), it.next()); + /// assert_eq!(None, it.next()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn config() -> Config { + Config::new() + } + + /// Return a builder for configuring the construction of a `PikeVM`. + /// + /// This is a convenience routine to avoid needing to import the + /// [`Builder`] type in common cases. + /// + /// # Example + /// + /// This example shows how to use the builder to disable UTF-8 mode + /// everywhere. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::{self, pikevm::PikeVM}, + /// util::syntax, + /// Match, + /// }; + /// + /// let re = PikeVM::builder() + /// .syntax(syntax::Config::new().utf8(false)) + /// .thompson(thompson::Config::new().utf8(false)) + /// .build(r"foo(?-u:[^b])ar.*")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; + /// let expected = Some(Match::must(0, 1..9)); + /// re.captures(&mut cache, haystack, &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn builder() -> Builder { + Builder::new() + } + + /// Create a new empty set of capturing groups that is guaranteed to be + /// valid for the search APIs on this `PikeVM`. + /// + /// A `Captures` value created for a specific `PikeVM` cannot be used with + /// any other `PikeVM`. + /// + /// This is a convenience function for [`Captures::all`]. See the + /// [`Captures`] documentation for an explanation of its alternative + /// constructors that permit the `PikeVM` to do less work during a search, + /// and thus might make it faster. + pub fn create_captures(&self) -> Captures { + Captures::all(self.get_nfa().group_info().clone()) + } + + /// Create a new cache for this `PikeVM`. + /// + /// The cache returned should only be used for searches for this + /// `PikeVM`. If you want to reuse the cache for another `PikeVM`, then + /// you must call [`Cache::reset`] with that `PikeVM` (or, equivalently, + /// [`PikeVM::reset_cache`]). + pub fn create_cache(&self) -> Cache { + Cache::new(self) + } + + /// Reset the given cache such that it can be used for searching with the + /// this `PikeVM` (and only this `PikeVM`). + /// + /// A cache reset permits reusing memory already allocated in this cache + /// with a different `PikeVM`. + /// + /// # Example + /// + /// This shows how to re-purpose a cache for use with a different `PikeVM`. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; + /// + /// let re1 = PikeVM::new(r"\w")?; + /// let re2 = PikeVM::new(r"\W")?; + /// + /// let mut cache = re1.create_cache(); + /// assert_eq!( + /// Some(Match::must(0, 0..2)), + /// re1.find_iter(&mut cache, "Δ").next(), + /// ); + /// + /// // Using 'cache' with re2 is not allowed. It may result in panics or + /// // incorrect results. In order to re-purpose the cache, we must reset + /// // it with the PikeVM we'd like to use it with. + /// // + /// // Similarly, after this reset, using the cache with 're1' is also not + /// // allowed. + /// re2.reset_cache(&mut cache); + /// assert_eq!( + /// Some(Match::must(0, 0..3)), + /// re2.find_iter(&mut cache, "☃").next(), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn reset_cache(&self, cache: &mut Cache) { + cache.reset(self); + } + + /// Returns the total number of patterns compiled into this `PikeVM`. + /// + /// In the case of a `PikeVM` that contains no patterns, this returns `0`. + /// + /// # Example + /// + /// This example shows the pattern length for a `PikeVM` that never + /// matches: + /// + /// ``` + /// use regex_automata::nfa::thompson::pikevm::PikeVM; + /// + /// let re = PikeVM::never_match()?; + /// assert_eq!(re.pattern_len(), 0); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// And another example for a `PikeVM` that matches at every position: + /// + /// ``` + /// use regex_automata::nfa::thompson::pikevm::PikeVM; + /// + /// let re = PikeVM::always_match()?; + /// assert_eq!(re.pattern_len(), 1); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// And finally, a `PikeVM` that was constructed from multiple patterns: + /// + /// ``` + /// use regex_automata::nfa::thompson::pikevm::PikeVM; + /// + /// let re = PikeVM::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; + /// assert_eq!(re.pattern_len(), 3); + /// # Ok::<(), Box>(()) + /// ``` + pub fn pattern_len(&self) -> usize { + self.nfa.pattern_len() + } + + /// Return the config for this `PikeVM`. + #[inline] + pub fn get_config(&self) -> &Config { + &self.config + } + + /// Returns a reference to the underlying NFA. + #[inline] + pub fn get_nfa(&self) -> &NFA { + &self.nfa + } +} + +impl PikeVM { + /// Returns true if and only if this `PikeVM` matches the given haystack. + /// + /// This routine may short circuit if it knows that scanning future + /// input will never lead to a different result. In particular, if the + /// underlying NFA enters a match state, then this routine will return + /// `true` immediately without inspecting any future input. (Consider how + /// this might make a difference given the regex `a+` on the haystack + /// `aaaaaaaaaaaaaaa`. This routine can stop after it sees the first `a`, + /// but routines like `find` need to continue searching because `+` is + /// greedy by default.) + /// + /// # Example + /// + /// This shows basic usage: + /// + /// ``` + /// use regex_automata::nfa::thompson::pikevm::PikeVM; + /// + /// let re = PikeVM::new("foo[0-9]+bar")?; + /// let mut cache = re.create_cache(); + /// + /// assert!(re.is_match(&mut cache, "foo12345bar")); + /// assert!(!re.is_match(&mut cache, "foobar")); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: consistency with search APIs + /// + /// `is_match` is guaranteed to return `true` whenever `find` returns a + /// match. This includes searches that are executed entirely within a + /// codepoint: + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Input}; + /// + /// let re = PikeVM::new("a*")?; + /// let mut cache = re.create_cache(); + /// + /// assert!(!re.is_match(&mut cache, Input::new("☃").span(1..2))); + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Notice that when UTF-8 mode is disabled, then the above reports a + /// match because the restriction against zero-width matches that split a + /// codepoint has been lifted: + /// + /// ``` + /// use regex_automata::{nfa::thompson::{pikevm::PikeVM, NFA}, Input}; + /// + /// let re = PikeVM::builder() + /// .thompson(NFA::config().utf8(false)) + /// .build("a*")?; + /// let mut cache = re.create_cache(); + /// + /// assert!(re.is_match(&mut cache, Input::new("☃").span(1..2))); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn is_match<'h, I: Into>>( + &self, + cache: &mut Cache, + input: I, + ) -> bool { + let input = input.into().earliest(true); + self.search_slots(cache, &input, &mut []).is_some() + } + + /// Executes a leftmost forward search and returns a `Match` if one exists. + /// + /// This routine only includes the overall match span. To get access to the + /// individual spans of each capturing group, use [`PikeVM::captures`]. + /// + /// # Example + /// + /// Leftmost first match semantics corresponds to the match with the + /// smallest starting offset, but where the end offset is determined by + /// preferring earlier branches in the original regular expression. For + /// example, `Sam|Samwise` will match `Sam` in `Samwise`, but `Samwise|Sam` + /// will match `Samwise` in `Samwise`. + /// + /// Generally speaking, the "leftmost first" match is how most backtracking + /// regular expressions tend to work. This is in contrast to POSIX-style + /// regular expressions that yield "leftmost longest" matches. Namely, + /// both `Sam|Samwise` and `Samwise|Sam` match `Samwise` when using + /// leftmost longest semantics. (This crate does not currently support + /// leftmost longest semantics.) + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; + /// + /// let re = PikeVM::new("foo[0-9]+")?; + /// let mut cache = re.create_cache(); + /// let expected = Match::must(0, 0..8); + /// assert_eq!(Some(expected), re.find(&mut cache, "foo12345")); + /// + /// // Even though a match is found after reading the first byte (`a`), + /// // the leftmost first match semantics demand that we find the earliest + /// // match that prefers earlier parts of the pattern over later parts. + /// let re = PikeVM::new("abc|a")?; + /// let mut cache = re.create_cache(); + /// let expected = Match::must(0, 0..3); + /// assert_eq!(Some(expected), re.find(&mut cache, "abc")); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn find<'h, I: Into>>( + &self, + cache: &mut Cache, + input: I, + ) -> Option { + let input = input.into(); + if self.get_nfa().pattern_len() == 1 { + let mut slots = [None, None]; + let pid = self.search_slots(cache, &input, &mut slots)?; + let start = slots[0]?.get(); + let end = slots[1]?.get(); + return Some(Match::new(pid, Span { start, end })); + } + let ginfo = self.get_nfa().group_info(); + let slots_len = ginfo.implicit_slot_len(); + let mut slots = vec![None; slots_len]; + let pid = self.search_slots(cache, &input, &mut slots)?; + let start = slots[pid.as_usize() * 2]?.get(); + let end = slots[pid.as_usize() * 2 + 1]?.get(); + Some(Match::new(pid, Span { start, end })) + } + + /// Executes a leftmost forward search and writes the spans of capturing + /// groups that participated in a match into the provided [`Captures`] + /// value. If no match was found, then [`Captures::is_match`] is guaranteed + /// to return `false`. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; + /// + /// let re = PikeVM::new(r"^([0-9]{4})-([0-9]{2})-([0-9]{2})$")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// re.captures(&mut cache, "2010-03-14", &mut caps); + /// assert!(caps.is_match()); + /// assert_eq!(Some(Span::from(0..4)), caps.get_group(1)); + /// assert_eq!(Some(Span::from(5..7)), caps.get_group(2)); + /// assert_eq!(Some(Span::from(8..10)), caps.get_group(3)); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn captures<'h, I: Into>>( + &self, + cache: &mut Cache, + input: I, + caps: &mut Captures, + ) { + self.search(cache, &input.into(), caps) + } + + /// Returns an iterator over all non-overlapping leftmost matches in the + /// given bytes. If no match exists, then the iterator yields no elements. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; + /// + /// let re = PikeVM::new("foo[0-9]+")?; + /// let mut cache = re.create_cache(); + /// + /// let text = "foo1 foo12 foo123"; + /// let matches: Vec = re.find_iter(&mut cache, text).collect(); + /// assert_eq!(matches, vec![ + /// Match::must(0, 0..4), + /// Match::must(0, 5..10), + /// Match::must(0, 11..17), + /// ]); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn find_iter<'r, 'c, 'h, I: Into>>( + &'r self, + cache: &'c mut Cache, + input: I, + ) -> FindMatches<'r, 'c, 'h> { + let caps = Captures::matches(self.get_nfa().group_info().clone()); + let it = iter::Searcher::new(input.into()); + FindMatches { re: self, cache, caps, it } + } + + /// Returns an iterator over all non-overlapping `Captures` values. If no + /// match exists, then the iterator yields no elements. + /// + /// This yields the same matches as [`PikeVM::find_iter`], but it includes + /// the spans of all capturing groups that participate in each match. + /// + /// **Tip:** See [`util::iter::Searcher`](crate::util::iter::Searcher) for + /// how to correctly iterate over all matches in a haystack while avoiding + /// the creation of a new `Captures` value for every match. (Which you are + /// forced to do with an `Iterator`.) + /// + /// # Example + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; + /// + /// let re = PikeVM::new("foo(?P[0-9]+)")?; + /// let mut cache = re.create_cache(); + /// + /// let text = "foo1 foo12 foo123"; + /// let matches: Vec = re + /// .captures_iter(&mut cache, text) + /// // The unwrap is OK since 'numbers' matches if the pattern matches. + /// .map(|caps| caps.get_group_by_name("numbers").unwrap()) + /// .collect(); + /// assert_eq!(matches, vec![ + /// Span::from(3..4), + /// Span::from(8..10), + /// Span::from(14..17), + /// ]); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn captures_iter<'r, 'c, 'h, I: Into>>( + &'r self, + cache: &'c mut Cache, + input: I, + ) -> CapturesMatches<'r, 'c, 'h> { + let caps = self.create_captures(); + let it = iter::Searcher::new(input.into()); + CapturesMatches { re: self, cache, caps, it } + } +} + +impl PikeVM { + /// Executes a leftmost forward search and writes the spans of capturing + /// groups that participated in a match into the provided [`Captures`] + /// value. If no match was found, then [`Captures::is_match`] is guaranteed + /// to return `false`. + /// + /// This is like [`PikeVM::captures`], but it accepts a concrete `&Input` + /// instead of an `Into`. + /// + /// # Example: specific pattern search + /// + /// This example shows how to build a multi-PikeVM that permits searching + /// for specific patterns. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::pikevm::PikeVM, + /// Anchored, Match, PatternID, Input, + /// }; + /// + /// let re = PikeVM::new_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let haystack = "foo123"; + /// + /// // Since we are using the default leftmost-first match and both + /// // patterns match at the same starting position, only the first pattern + /// // will be returned in this case when doing a search for any of the + /// // patterns. + /// let expected = Some(Match::must(0, 0..6)); + /// re.search(&mut cache, &Input::new(haystack), &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// // But if we want to check whether some other pattern matches, then we + /// // can provide its pattern ID. + /// let expected = Some(Match::must(1, 0..6)); + /// let input = Input::new(haystack) + /// .anchored(Anchored::Pattern(PatternID::must(1))); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: specifying the bounds of a search + /// + /// This example shows how providing the bounds of a search can produce + /// different results than simply sub-slicing the haystack. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match, Input}; + /// + /// let re = PikeVM::new(r"\b[0-9]{3}\b")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let haystack = "foo123bar"; + /// + /// // Since we sub-slice the haystack, the search doesn't know about + /// // the larger context and assumes that `123` is surrounded by word + /// // boundaries. And of course, the match position is reported relative + /// // to the sub-slice as well, which means we get `0..3` instead of + /// // `3..6`. + /// let expected = Some(Match::must(0, 0..3)); + /// re.search(&mut cache, &Input::new(&haystack[3..6]), &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// // But if we provide the bounds of the search within the context of the + /// // entire haystack, then the search can take the surrounding context + /// // into account. (And if we did find a match, it would be reported + /// // as a valid offset into `haystack` instead of its sub-slice.) + /// let expected = None; + /// let input = Input::new(haystack).range(3..6); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn search( + &self, + cache: &mut Cache, + input: &Input<'_>, + caps: &mut Captures, + ) { + caps.set_pattern(None); + let pid = self.search_slots(cache, input, caps.slots_mut()); + caps.set_pattern(pid); + } + + /// Executes a leftmost forward search and writes the spans of capturing + /// groups that participated in a match into the provided `slots`, and + /// returns the matching pattern ID. The contents of the slots for patterns + /// other than the matching pattern are unspecified. If no match was found, + /// then `None` is returned and the contents of `slots` is unspecified. + /// + /// This is like [`PikeVM::search`], but it accepts a raw slots slice + /// instead of a `Captures` value. This is useful in contexts where you + /// don't want or need to allocate a `Captures`. + /// + /// It is legal to pass _any_ number of slots to this routine. If the regex + /// engine would otherwise write a slot offset that doesn't fit in the + /// provided slice, then it is simply skipped. In general though, there are + /// usually three slice lengths you might want to use: + /// + /// * An empty slice, if you only care about which pattern matched. + /// * A slice with + /// [`pattern_len() * 2`](crate::nfa::thompson::NFA::pattern_len) + /// slots, if you only care about the overall match spans for each matching + /// pattern. + /// * A slice with + /// [`slot_len()`](crate::util::captures::GroupInfo::slot_len) slots, which + /// permits recording match offsets for every capturing group in every + /// pattern. + /// + /// # Example + /// + /// This example shows how to find the overall match offsets in a + /// multi-pattern search without allocating a `Captures` value. Indeed, we + /// can put our slots right on the stack. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID, Input}; + /// + /// let re = PikeVM::new_many(&[ + /// r"\pL+", + /// r"\d+", + /// ])?; + /// let mut cache = re.create_cache(); + /// let input = Input::new("!@#123"); + /// + /// // We only care about the overall match offsets here, so we just + /// // allocate two slots for each pattern. Each slot records the start + /// // and end of the match. + /// let mut slots = [None; 4]; + /// let pid = re.search_slots(&mut cache, &input, &mut slots); + /// assert_eq!(Some(PatternID::must(1)), pid); + /// + /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'. + /// // See 'GroupInfo' for more details on the mapping between groups and + /// // slot indices. + /// let slot_start = pid.unwrap().as_usize() * 2; + /// let slot_end = slot_start + 1; + /// assert_eq!(Some(3), slots[slot_start].map(|s| s.get())); + /// assert_eq!(Some(6), slots[slot_end].map(|s| s.get())); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn search_slots( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Option { + let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); + if !utf8empty { + let hm = self.search_slots_imp(cache, input, slots)?; + return Some(hm.pattern()); + } + // There is an unfortunate special case where if the regex can + // match the empty string and UTF-8 mode is enabled, the search + // implementation requires that the slots have at least as much space + // to report the bounds of any match. This is so zero-width matches + // that split a codepoint can be filtered out. + // + // Note that if utf8empty is true, we specialize the case for when + // the number of patterns is 1. In that case, we can just use a stack + // allocation. Otherwise we resort to a heap allocation, which we + // convince ourselves we're fine with due to the pathological nature of + // this case. + let min = self.get_nfa().group_info().implicit_slot_len(); + if slots.len() >= min { + let hm = self.search_slots_imp(cache, input, slots)?; + return Some(hm.pattern()); + } + if self.get_nfa().pattern_len() == 1 { + let mut enough = [None, None]; + let got = self.search_slots_imp(cache, input, &mut enough); + // This is OK because we know `enough` is strictly bigger than + // `slots`, otherwise this special case isn't reached. + slots.copy_from_slice(&enough[..slots.len()]); + return got.map(|hm| hm.pattern()); + } + let mut enough = vec![None; min]; + let got = self.search_slots_imp(cache, input, &mut enough); + // This is OK because we know `enough` is strictly bigger than `slots`, + // otherwise this special case isn't reached. + slots.copy_from_slice(&enough[..slots.len()]); + got.map(|hm| hm.pattern()) + } + + /// This is the actual implementation of `search_slots_imp` that + /// doesn't account for the special case when 1) the NFA has UTF-8 mode + /// enabled, 2) the NFA can match the empty string and 3) the caller has + /// provided an insufficient number of slots to record match offsets. + #[inline(never)] + fn search_slots_imp( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Option { + let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); + let hm = match self.search_imp(cache, input, slots) { + None => return None, + Some(hm) if !utf8empty => return Some(hm), + Some(hm) => hm, + }; + empty::skip_splits_fwd(input, hm, hm.offset(), |input| { + Ok(self + .search_imp(cache, input, slots) + .map(|hm| (hm, hm.offset()))) + }) + // OK because the PikeVM never errors. + .unwrap() + } + + /// Writes the set of patterns that match anywhere in the given search + /// configuration to `patset`. If multiple patterns match at the same + /// position and this `PikeVM` was configured with [`MatchKind::All`] + /// semantics, then all matching patterns are written to the given set. + /// + /// Unless all of the patterns in this `PikeVM` are anchored, then + /// generally speaking, this will visit every byte in the haystack. + /// + /// This search routine *does not* clear the pattern set. This gives some + /// flexibility to the caller (e.g., running multiple searches with the + /// same pattern set), but does make the API bug-prone if you're reusing + /// the same pattern set for multiple searches but intended them to be + /// independent. + /// + /// If a pattern ID matched but the given `PatternSet` does not have + /// sufficient capacity to store it, then it is not inserted and silently + /// dropped. + /// + /// # Example + /// + /// This example shows how to find all matching patterns in a haystack, + /// even when some patterns match at the same position as other patterns. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// nfa::thompson::pikevm::PikeVM, + /// Input, MatchKind, PatternSet, + /// }; + /// + /// let patterns = &[ + /// r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar", + /// ]; + /// let re = PikeVM::builder() + /// .configure(PikeVM::config().match_kind(MatchKind::All)) + /// .build_many(patterns)?; + /// let mut cache = re.create_cache(); + /// + /// let input = Input::new("foobar"); + /// let mut patset = PatternSet::new(re.pattern_len()); + /// re.which_overlapping_matches(&mut cache, &input, &mut patset); + /// let expected = vec![0, 2, 3, 4, 6]; + /// let got: Vec = patset.iter().map(|p| p.as_usize()).collect(); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn which_overlapping_matches( + &self, + cache: &mut Cache, + input: &Input<'_>, + patset: &mut PatternSet, + ) { + self.which_overlapping_imp(cache, input, patset) + } +} + +impl PikeVM { + /// The implementation of standard leftmost search. + /// + /// Capturing group spans are written to `slots`, but only if requested. + /// `slots` can be any length. Any slot in the NFA that is activated but + /// which is out of bounds for the given `slots` is ignored. + fn search_imp( + &self, + cache: &mut Cache, + input: &Input<'_>, + slots: &mut [Option], + ) -> Option { + cache.setup_search(slots.len()); + if input.is_done() { + return None; + } + // Why do we even care about this? Well, in our 'Captures' + // representation, we use usize::MAX as a sentinel to indicate "no + // match." This isn't problematic so long as our haystack doesn't have + // a maximal length. Byte slices are guaranteed by Rust to have a + // length that fits into isize, and so this assert should always pass. + // But we put it here to make our assumption explicit. + assert!( + input.haystack().len() < core::usize::MAX, + "byte slice lengths must be less than usize MAX", + ); + instrument!(|c| c.reset(&self.nfa)); + + // Whether we want to visit all match states instead of emulating the + // 'leftmost' semantics of typical backtracking regex engines. + let allmatches = + self.config.get_match_kind().continue_past_first_match(); + let (anchored, start_id) = match self.start_config(input) { + None => return None, + Some(config) => config, + }; + + let pre = + if anchored { None } else { self.get_config().get_prefilter() }; + let Cache { ref mut stack, ref mut curr, ref mut next } = cache; + let mut hm = None; + // Yes, our search doesn't end at input.end(), but includes it. This + // is necessary because matches are delayed by one byte, just like + // how the DFA engines work. The delay is used to handle look-behind + // assertions. In the case of the PikeVM, the delay is implemented + // by not considering a match to exist until it is visited in + // 'steps'. Technically, we know a match exists in the previous + // iteration via 'epsilon_closure'. (It's the same thing in NFA-to-DFA + // determinization. We don't mark a DFA state as a match state if it + // contains an NFA match state, but rather, whether the DFA state was + // generated by a transition from a DFA state that contains an NFA + // match state.) + let mut at = input.start(); + while at <= input.end() { + // If we have no states left to visit, then there are some cases + // where we know we can quit early or even skip ahead. + if curr.set.is_empty() { + // We have a match and we haven't been instructed to continue + // on even after finding a match, so we can quit. + if hm.is_some() && !allmatches { + break; + } + // If we're running an anchored search and we've advanced + // beyond the start position with no other states to try, then + // we will never observe a match and thus can stop. + if anchored && at > input.start() { + break; + } + // If there no states left to explore at this position and we + // know we can't terminate early, then we are effectively at + // the starting state of the NFA. If we fell through here, + // we'd end up adding our '(?s-u:.)*?' prefix and it would be + // the only thing in 'curr'. So we might as well just skip + // ahead until we find something that we know might advance us + // forward. + if let Some(pre) = pre { + let span = Span::from(at..input.end()); + match pre.find(input.haystack(), span) { + None => break, + Some(ref span) => at = span.start, + } + } + } + // Instead of using the NFA's unanchored start state, we actually + // always use its anchored starting state. As a result, when doing + // an unanchored search, we need to simulate our own '(?s-u:.)*?' + // prefix, to permit a match to appear anywhere. + // + // Now, we don't *have* to do things this way. We could use the + // NFA's unanchored starting state and do one 'epsilon_closure' + // call from that starting state before the main loop here. And + // that is just as correct. However, it turns out to be slower + // than our approach here because it slightly increases the cost + // of processing each byte by requiring us to visit more NFA + // states to deal with the additional NFA states in the unanchored + // prefix. By simulating it explicitly here, we lower those costs + // substantially. The cost is itself small, but it adds up for + // large haystacks. + // + // In order to simulate the '(?s-u:.)*?' prefix---which is not + // greedy---we are careful not to perform an epsilon closure on + // the start state if we already have a match. Namely, if we + // did otherwise, we would never reach a terminating condition + // because there would always be additional states to process. + // In effect, the exclusion of running 'epsilon_closure' when + // we have a match corresponds to the "dead" states we have in + // our DFA regex engines. Namely, in a DFA, match states merely + // instruct the search execution to record the current offset as + // the most recently seen match. It is the dead state that actually + // indicates when to stop the search (other than EOF or quit + // states). + // + // However, when 'allmatches' is true, the caller has asked us to + // leave in every possible match state. This tends not to make a + // whole lot of sense in unanchored searches, because it means the + // search really cannot terminate until EOF. And often, in that + // case, you wind up skipping over a bunch of matches and are left + // with the "last" match. Arguably, it just doesn't make a lot of + // sense to run a 'leftmost' search (which is what this routine is) + // with 'allmatches' set to true. But the DFAs support it and this + // matches their behavior. (Generally, 'allmatches' is useful for + // overlapping searches or leftmost anchored searches to find the + // longest possible match by ignoring match priority.) + // + // Additionally, when we're running an anchored search, this + // epsilon closure should only be computed at the beginning of the + // search. If we re-computed it at every position, we would be + // simulating an unanchored search when we were tasked to perform + // an anchored search. + if (hm.is_none() || allmatches) + && (!anchored || at == input.start()) + { + // Since we are adding to the 'curr' active states and since + // this is for the start ID, we use a slots slice that is + // guaranteed to have the right length but where every element + // is absent. This is exactly what we want, because this + // epsilon closure is responsible for simulating an unanchored + // '(?s:.)*?' prefix. It is specifically outside of any + // capturing groups, and thus, using slots that are always + // absent is correct. + // + // Note though that we can't just use '&mut []' here, since + // this epsilon closure may traverse through 'Captures' epsilon + // transitions, and thus must be able to write offsets to the + // slots given which are later copied to slot values in 'curr'. + let slots = next.slot_table.all_absent(); + self.epsilon_closure(stack, slots, curr, input, at, start_id); + } + if let Some(pid) = self.nexts(stack, curr, next, input, at, slots) + { + hm = Some(HalfMatch::new(pid, at)); + } + // Unless the caller asked us to return early, we need to mush on + // to see if we can extend our match. (But note that 'nexts' will + // quit right after seeing a match when match_kind==LeftmostFirst, + // as is consistent with leftmost-first match priority.) + if input.get_earliest() && hm.is_some() { + break; + } + core::mem::swap(curr, next); + next.set.clear(); + at += 1; + } + instrument!(|c| c.eprint(&self.nfa)); + hm + } + + /// The implementation for the 'which_overlapping_matches' API. Basically, + /// we do a single scan through the entire haystack (unless our regex + /// or search is anchored) and record every pattern that matched. In + /// particular, when MatchKind::All is used, this supports overlapping + /// matches. So if we have the regexes 'sam' and 'samwise', they will + /// *both* be reported in the pattern set when searching the haystack + /// 'samwise'. + fn which_overlapping_imp( + &self, + cache: &mut Cache, + input: &Input<'_>, + patset: &mut PatternSet, + ) { + // NOTE: This is effectively a copy of 'search_imp' above, but with no + // captures support and instead writes patterns that matched directly + // to 'patset'. See that routine for better commentary about what's + // going on in this routine. We probably could unify the routines using + // generics or more helper routines, but I'm not sure it's worth it. + // + // NOTE: We somewhat go out of our way here to support things like + // 'input.get_earliest()' and 'leftmost-first' match semantics. Neither + // of those seem particularly relevant to this routine, but they are + // both supported by the DFA analogs of this routine by construction + // and composition, so it seems like good sense to have the PikeVM + // match that behavior. + + cache.setup_search(0); + if input.is_done() { + return; + } + assert!( + input.haystack().len() < core::usize::MAX, + "byte slice lengths must be less than usize MAX", + ); + instrument!(|c| c.reset(&self.nfa)); + + let allmatches = + self.config.get_match_kind().continue_past_first_match(); + let (anchored, start_id) = match self.start_config(input) { + None => return, + Some(config) => config, + }; + + let Cache { ref mut stack, ref mut curr, ref mut next } = cache; + for at in input.start()..=input.end() { + let any_matches = !patset.is_empty(); + if curr.set.is_empty() { + if any_matches && !allmatches { + break; + } + if anchored && at > input.start() { + break; + } + } + if !any_matches || allmatches { + let slots = &mut []; + self.epsilon_closure(stack, slots, curr, input, at, start_id); + } + self.nexts_overlapping(stack, curr, next, input, at, patset); + // If we found a match and filled our set, then there is no more + // additional info that we can provide. Thus, we can quit. We also + // quit if the caller asked us to stop at the earliest point that + // we know a match exists. + if patset.is_full() || input.get_earliest() { + break; + } + core::mem::swap(curr, next); + next.set.clear(); + } + instrument!(|c| c.eprint(&self.nfa)); + } + + /// Process the active states in 'curr' to find the states (written to + /// 'next') we should process for the next byte in the haystack. + /// + /// 'stack' is used to perform a depth first traversal of the NFA when + /// computing an epsilon closure. + /// + /// When a match is found, the slots for that match state (in 'curr') are + /// copied to 'caps'. Moreover, once a match is seen, processing for 'curr' + /// stops (unless the PikeVM was configured with MatchKind::All semantics). + #[cfg_attr(feature = "perf-inline", inline(always))] + fn nexts( + &self, + stack: &mut Vec, + curr: &mut ActiveStates, + next: &mut ActiveStates, + input: &Input<'_>, + at: usize, + slots: &mut [Option], + ) -> Option { + instrument!(|c| c.record_state_set(&curr.set)); + let mut pid = None; + let ActiveStates { ref set, ref mut slot_table } = *curr; + for sid in set.iter() { + pid = match self.next(stack, slot_table, next, input, at, sid) { + None => continue, + Some(pid) => Some(pid), + }; + slots.copy_from_slice(slot_table.for_state(sid)); + if !self.config.get_match_kind().continue_past_first_match() { + break; + } + } + pid + } + + /// Like 'nexts', but for the overlapping case. This doesn't write any + /// slots, and instead just writes which pattern matched in 'patset'. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn nexts_overlapping( + &self, + stack: &mut Vec, + curr: &mut ActiveStates, + next: &mut ActiveStates, + input: &Input<'_>, + at: usize, + patset: &mut PatternSet, + ) { + instrument!(|c| c.record_state_set(&curr.set)); + let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); + let ActiveStates { ref set, ref mut slot_table } = *curr; + for sid in set.iter() { + let pid = match self.next(stack, slot_table, next, input, at, sid) + { + None => continue, + Some(pid) => pid, + }; + // This handles the case of finding a zero-width match that splits + // a codepoint. Namely, if we're in UTF-8 mode AND we know we can + // match the empty string, then the only valid way of getting to + // this point with an offset that splits a codepoint is when we + // have an empty match. Such matches, in UTF-8 mode, must not be + // reported. So we just skip them here and pretend as if we did + // not see a match. + if utf8empty && !input.is_char_boundary(at) { + continue; + } + let _ = patset.try_insert(pid); + if !self.config.get_match_kind().continue_past_first_match() { + break; + } + } + } + + /// Starting from 'sid', if the position 'at' in the 'input' haystack has a + /// transition defined out of 'sid', then add the state transitioned to and + /// its epsilon closure to the 'next' set of states to explore. + /// + /// 'stack' is used by the epsilon closure computation to perform a depth + /// first traversal of the NFA. + /// + /// 'curr_slot_table' should be the table of slots for the current set of + /// states being explored. If there is a transition out of 'sid', then + /// sid's row in the slot table is used to perform the epsilon closure. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn next( + &self, + stack: &mut Vec, + curr_slot_table: &mut SlotTable, + next: &mut ActiveStates, + input: &Input<'_>, + at: usize, + sid: StateID, + ) -> Option { + instrument!(|c| c.record_step(sid)); + match *self.nfa.state(sid) { + State::Fail + | State::Look { .. } + | State::Union { .. } + | State::BinaryUnion { .. } + | State::Capture { .. } => None, + State::ByteRange { ref trans } => { + if trans.matches(input.haystack(), at) { + let slots = curr_slot_table.for_state(sid); + // OK because 'at <= haystack.len() < usize::MAX', so + // adding 1 will never wrap. + let at = at.wrapping_add(1); + self.epsilon_closure( + stack, slots, next, input, at, trans.next, + ); + } + None + } + State::Sparse(ref sparse) => { + if let Some(next_sid) = sparse.matches(input.haystack(), at) { + let slots = curr_slot_table.for_state(sid); + // OK because 'at <= haystack.len() < usize::MAX', so + // adding 1 will never wrap. + let at = at.wrapping_add(1); + self.epsilon_closure( + stack, slots, next, input, at, next_sid, + ); + } + None + } + State::Dense(ref dense) => { + if let Some(next_sid) = dense.matches(input.haystack(), at) { + let slots = curr_slot_table.for_state(sid); + // OK because 'at <= haystack.len() < usize::MAX', so + // adding 1 will never wrap. + let at = at.wrapping_add(1); + self.epsilon_closure( + stack, slots, next, input, at, next_sid, + ); + } + None + } + State::Match { pattern_id } => Some(pattern_id), + } + } + + /// Compute the epsilon closure of 'sid', writing the closure into 'next' + /// while copying slot values from 'curr_slots' into corresponding states + /// in 'next'. 'curr_slots' should be the slot values corresponding to + /// 'sid'. + /// + /// The given 'stack' is used to perform a depth first traversal of the + /// NFA by recursively following all epsilon transitions out of 'sid'. + /// Conditional epsilon transitions are followed if and only if they are + /// satisfied for the position 'at' in the 'input' haystack. + /// + /// While this routine may write to 'curr_slots', once it returns, any + /// writes are undone and the original values (even if absent) are + /// restored. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn epsilon_closure( + &self, + stack: &mut Vec, + curr_slots: &mut [Option], + next: &mut ActiveStates, + input: &Input<'_>, + at: usize, + sid: StateID, + ) { + instrument!(|c| { + c.record_closure(sid); + c.record_stack_push(sid); + }); + stack.push(FollowEpsilon::Explore(sid)); + while let Some(frame) = stack.pop() { + match frame { + FollowEpsilon::RestoreCapture { slot, offset: pos } => { + curr_slots[slot] = pos; + } + FollowEpsilon::Explore(sid) => { + self.epsilon_closure_explore( + stack, curr_slots, next, input, at, sid, + ); + } + } + } + } + + /// Explore all of the epsilon transitions out of 'sid'. This is mostly + /// split out from 'epsilon_closure' in order to clearly delineate + /// the actual work of computing an epsilon closure from the stack + /// book-keeping. + /// + /// This will push any additional explorations needed on to 'stack'. + /// + /// 'curr_slots' should refer to the slots for the currently active NFA + /// state. That is, the current state we are stepping through. These + /// slots are mutated in place as new 'Captures' states are traversed + /// during epsilon closure, but the slots are restored to their original + /// values once the full epsilon closure is completed. The ultimate use of + /// 'curr_slots' is to copy them to the corresponding 'next_slots', so that + /// the capturing group spans are forwarded from the currently active state + /// to the next. + /// + /// 'next' refers to the next set of active states. Computing an epsilon + /// closure may increase the next set of active states. + /// + /// 'input' refers to the caller's input configuration and 'at' refers to + /// the current position in the haystack. These are used to check whether + /// conditional epsilon transitions (like look-around) are satisfied at + /// the current position. If they aren't, then the epsilon closure won't + /// include them. + #[cfg_attr(feature = "perf-inline", inline(always))] + fn epsilon_closure_explore( + &self, + stack: &mut Vec, + curr_slots: &mut [Option], + next: &mut ActiveStates, + input: &Input<'_>, + at: usize, + mut sid: StateID, + ) { + // We can avoid pushing some state IDs on to our stack in precisely + // the cases where a 'push(x)' would be immediately followed by a 'x + // = pop()'. This is achieved by this outer-loop. We simply set 'sid' + // to be the next state ID we want to explore once we're done with + // our initial exploration. In practice, this avoids a lot of stack + // thrashing. + loop { + instrument!(|c| c.record_set_insert(sid)); + // Record this state as part of our next set of active states. If + // we've already explored it, then no need to do it again. + if !next.set.insert(sid) { + return; + } + match *self.nfa.state(sid) { + State::Fail + | State::Match { .. } + | State::ByteRange { .. } + | State::Sparse { .. } + | State::Dense { .. } => { + next.slot_table.for_state(sid).copy_from_slice(curr_slots); + return; + } + State::Look { look, next } => { + // OK because we don't permit building a searcher with a + // Unicode word boundary if the requisite Unicode data is + // unavailable. + if !self.nfa.look_matcher().matches_inline( + look, + input.haystack(), + at, + ) { + return; + } + sid = next; + } + State::Union { ref alternates } => { + sid = match alternates.get(0) { + None => return, + Some(&sid) => sid, + }; + instrument!(|c| { + for &alt in &alternates[1..] { + c.record_stack_push(alt); + } + }); + stack.extend( + alternates[1..] + .iter() + .copied() + .rev() + .map(FollowEpsilon::Explore), + ); + } + State::BinaryUnion { alt1, alt2 } => { + sid = alt1; + instrument!(|c| c.record_stack_push(sid)); + stack.push(FollowEpsilon::Explore(alt2)); + } + State::Capture { next, slot, .. } => { + // There's no need to do anything with slots that + // ultimately won't be copied into the caller-provided + // 'Captures' value. So we just skip dealing with them at + // all. + if slot.as_usize() < curr_slots.len() { + instrument!(|c| c.record_stack_push(sid)); + stack.push(FollowEpsilon::RestoreCapture { + slot, + offset: curr_slots[slot], + }); + // OK because length of a slice must fit into an isize. + curr_slots[slot] = Some(NonMaxUsize::new(at).unwrap()); + } + sid = next; + } + } + } + } + + /// Return the starting configuration of a PikeVM search. + /// + /// The "start config" is basically whether the search should be anchored + /// or not and the NFA state ID at which to begin the search. The state ID + /// returned always corresponds to an anchored starting state even when the + /// search is unanchored. This is because the PikeVM search loop deals with + /// unanchored searches with an explicit epsilon closure out of the start + /// state. + /// + /// This routine accounts for both the caller's `Input` configuration + /// and the pattern itself. For example, even if the caller asks for an + /// unanchored search, if the pattern itself is anchored, then this will + /// always return 'true' because implementing an unanchored search in that + /// case would be incorrect. + /// + /// Similarly, if the caller requests an anchored search for a particular + /// pattern, then the starting state ID returned will reflect that. + /// + /// If a pattern ID is given in the input configuration that is not in + /// this regex, then `None` is returned. + fn start_config(&self, input: &Input<'_>) -> Option<(bool, StateID)> { + match input.get_anchored() { + // Only way we're unanchored is if both the caller asked for an + // unanchored search *and* the pattern is itself not anchored. + Anchored::No => Some(( + self.nfa.is_always_start_anchored(), + self.nfa.start_anchored(), + )), + Anchored::Yes => Some((true, self.nfa.start_anchored())), + Anchored::Pattern(pid) => { + Some((true, self.nfa.start_pattern(pid)?)) + } + } + } +} + +/// An iterator over all non-overlapping matches for a particular search. +/// +/// The iterator yields a [`Match`] value until no more matches could be found. +/// +/// The lifetime parameters are as follows: +/// +/// * `'r` represents the lifetime of the PikeVM. +/// * `'c` represents the lifetime of the PikeVM's cache. +/// * `'h` represents the lifetime of the haystack being searched. +/// +/// This iterator can be created with the [`PikeVM::find_iter`] method. +#[derive(Debug)] +pub struct FindMatches<'r, 'c, 'h> { + re: &'r PikeVM, + cache: &'c mut Cache, + caps: Captures, + it: iter::Searcher<'h>, +} + +impl<'r, 'c, 'h> Iterator for FindMatches<'r, 'c, 'h> { + type Item = Match; + + #[inline] + fn next(&mut self) -> Option { + // Splitting 'self' apart seems necessary to appease borrowck. + let FindMatches { re, ref mut cache, ref mut caps, ref mut it } = + *self; + // 'advance' converts errors into panics, which is OK here because + // the PikeVM can never return an error. + it.advance(|input| { + re.search(cache, input, caps); + Ok(caps.get_match()) + }) + } +} + +/// An iterator over all non-overlapping leftmost matches, with their capturing +/// groups, for a particular search. +/// +/// The iterator yields a [`Captures`] value until no more matches could be +/// found. +/// +/// The lifetime parameters are as follows: +/// +/// * `'r` represents the lifetime of the PikeVM. +/// * `'c` represents the lifetime of the PikeVM's cache. +/// * `'h` represents the lifetime of the haystack being searched. +/// +/// This iterator can be created with the [`PikeVM::captures_iter`] method. +#[derive(Debug)] +pub struct CapturesMatches<'r, 'c, 'h> { + re: &'r PikeVM, + cache: &'c mut Cache, + caps: Captures, + it: iter::Searcher<'h>, +} + +impl<'r, 'c, 'h> Iterator for CapturesMatches<'r, 'c, 'h> { + type Item = Captures; + + #[inline] + fn next(&mut self) -> Option { + // Splitting 'self' apart seems necessary to appease borrowck. + let CapturesMatches { re, ref mut cache, ref mut caps, ref mut it } = + *self; + // 'advance' converts errors into panics, which is OK here because + // the PikeVM can never return an error. + it.advance(|input| { + re.search(cache, input, caps); + Ok(caps.get_match()) + }); + if caps.is_match() { + Some(caps.clone()) + } else { + None + } + } +} + +/// A cache represents mutable state that a [`PikeVM`] requires during a +/// search. +/// +/// For a given [`PikeVM`], its corresponding cache may be created either via +/// [`PikeVM::create_cache`], or via [`Cache::new`]. They are equivalent in +/// every way, except the former does not require explicitly importing `Cache`. +/// +/// A particular `Cache` is coupled with the [`PikeVM`] from which it +/// was created. It may only be used with that `PikeVM`. A cache and its +/// allocations may be re-purposed via [`Cache::reset`], in which case, it can +/// only be used with the new `PikeVM` (and not the old one). +#[derive(Clone, Debug)] +pub struct Cache { + /// Stack used while computing epsilon closure. This effectively lets us + /// move what is more naturally expressed through recursion to a stack + /// on the heap. + stack: Vec, + /// The current active states being explored for the current byte in the + /// haystack. + curr: ActiveStates, + /// The next set of states we're building that will be explored for the + /// next byte in the haystack. + next: ActiveStates, +} + +impl Cache { + /// Create a new [`PikeVM`] cache. + /// + /// A potentially more convenient routine to create a cache is + /// [`PikeVM::create_cache`], as it does not require also importing the + /// `Cache` type. + /// + /// If you want to reuse the returned `Cache` with some other `PikeVM`, + /// then you must call [`Cache::reset`] with the desired `PikeVM`. + pub fn new(re: &PikeVM) -> Cache { + Cache { + stack: vec![], + curr: ActiveStates::new(re), + next: ActiveStates::new(re), + } + } + + /// Reset this cache such that it can be used for searching with a + /// different [`PikeVM`]. + /// + /// A cache reset permits reusing memory already allocated in this cache + /// with a different `PikeVM`. + /// + /// # Example + /// + /// This shows how to re-purpose a cache for use with a different `PikeVM`. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; + /// + /// let re1 = PikeVM::new(r"\w")?; + /// let re2 = PikeVM::new(r"\W")?; + /// + /// let mut cache = re1.create_cache(); + /// assert_eq!( + /// Some(Match::must(0, 0..2)), + /// re1.find_iter(&mut cache, "Δ").next(), + /// ); + /// + /// // Using 'cache' with re2 is not allowed. It may result in panics or + /// // incorrect results. In order to re-purpose the cache, we must reset + /// // it with the PikeVM we'd like to use it with. + /// // + /// // Similarly, after this reset, using the cache with 're1' is also not + /// // allowed. + /// cache.reset(&re2); + /// assert_eq!( + /// Some(Match::must(0, 0..3)), + /// re2.find_iter(&mut cache, "☃").next(), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn reset(&mut self, re: &PikeVM) { + self.curr.reset(re); + self.next.reset(re); + } + + /// Returns the heap memory usage, in bytes, of this cache. + /// + /// This does **not** include the stack size used up by this cache. To + /// compute that, use `std::mem::size_of::()`. + pub fn memory_usage(&self) -> usize { + use core::mem::size_of; + (self.stack.len() * size_of::()) + + self.curr.memory_usage() + + self.next.memory_usage() + } + + /// Clears this cache. This should be called at the start of every search + /// to ensure we start with a clean slate. + /// + /// This also sets the length of the capturing groups used in the current + /// search. This permits an optimization where by 'SlotTable::for_state' + /// only returns the number of slots equivalent to the number of slots + /// given in the 'Captures' value. This may be less than the total number + /// of possible slots, e.g., when one only wants to track overall match + /// offsets. This in turn permits less copying of capturing group spans + /// in the PikeVM. + fn setup_search(&mut self, captures_slot_len: usize) { + self.stack.clear(); + self.curr.setup_search(captures_slot_len); + self.next.setup_search(captures_slot_len); + } +} + +/// A set of active states used to "simulate" the execution of an NFA via the +/// PikeVM. +/// +/// There are two sets of these used during NFA simulation. One set corresponds +/// to the "current" set of states being traversed for the current position +/// in a haystack. The other set corresponds to the "next" set of states being +/// built, which will become the new "current" set for the next position in the +/// haystack. These two sets correspond to CLIST and NLIST in Thompson's +/// original paper regexes: https://dl.acm.org/doi/pdf/10.1145/363347.363387 +/// +/// In addition to representing a set of NFA states, this also maintains slot +/// values for each state. These slot values are what turn the NFA simulation +/// into the "Pike VM." Namely, they track capturing group values for each +/// state. During the computation of epsilon closure, we copy slot values from +/// states in the "current" set to the "next" set. Eventually, once a match +/// is found, the slot values for that match state are what we write to the +/// caller provided 'Captures' value. +#[derive(Clone, Debug)] +struct ActiveStates { + /// The set of active NFA states. This set preserves insertion order, which + /// is critical for simulating the match semantics of backtracking regex + /// engines. + set: SparseSet, + /// The slots for every NFA state, where each slot stores a (possibly + /// absent) offset. Every capturing group has two slots. One for a start + /// offset and one for an end offset. + slot_table: SlotTable, +} + +impl ActiveStates { + /// Create a new set of active states for the given PikeVM. The active + /// states returned may only be used with the given PikeVM. (Use 'reset' + /// to re-purpose the allocation for a different PikeVM.) + fn new(re: &PikeVM) -> ActiveStates { + let mut active = ActiveStates { + set: SparseSet::new(0), + slot_table: SlotTable::new(), + }; + active.reset(re); + active + } + + /// Reset this set of active states such that it can be used with the given + /// PikeVM (and only that PikeVM). + fn reset(&mut self, re: &PikeVM) { + self.set.resize(re.get_nfa().states().len()); + self.slot_table.reset(re); + } + + /// Return the heap memory usage, in bytes, used by this set of active + /// states. + /// + /// This does not include the stack size of this value. + fn memory_usage(&self) -> usize { + self.set.memory_usage() + self.slot_table.memory_usage() + } + + /// Setup this set of active states for a new search. The given slot + /// length should be the number of slots in a caller provided 'Captures' + /// (and may be zero). + fn setup_search(&mut self, captures_slot_len: usize) { + self.set.clear(); + self.slot_table.setup_search(captures_slot_len); + } +} + +/// A table of slots, where each row represent a state in an NFA. Thus, the +/// table has room for storing slots for every single state in an NFA. +/// +/// This table is represented with a single contiguous allocation. In general, +/// the notion of "capturing group" doesn't really exist at this level of +/// abstraction, hence the name "slot" instead. (Indeed, every capturing group +/// maps to a pair of slots, one for the start offset and one for the end +/// offset.) Slots are indexed by the 'Captures' NFA state. +/// +/// N.B. Not every state actually needs a row of slots. Namely, states that +/// only have epsilon transitions currently never have anything written to +/// their rows in this table. Thus, the table is somewhat wasteful in its heap +/// usage. However, it is important to maintain fast random access by state +/// ID, which means one giant table tends to work well. RE2 takes a different +/// approach here and allocates each row as its own reference counted thing. +/// I explored such a strategy at one point here, but couldn't get it to work +/// well using entirely safe code. (To the ambitious reader: I encourage you to +/// re-litigate that experiment.) I very much wanted to stick to safe code, but +/// could be convinced otherwise if there was a solid argument and the safety +/// was encapsulated well. +#[derive(Clone, Debug)] +struct SlotTable { + /// The actual table of offsets. + table: Vec>, + /// The number of slots per state, i.e., the table's stride or the length + /// of each row. + slots_per_state: usize, + /// The number of slots in the caller-provided 'Captures' value for the + /// current search. Setting this to 'slots_per_state' is always correct, + /// but may be wasteful. + slots_for_captures: usize, +} + +impl SlotTable { + /// Create a new slot table. + /// + /// One should call 'reset' with the corresponding PikeVM before use. + fn new() -> SlotTable { + SlotTable { table: vec![], slots_for_captures: 0, slots_per_state: 0 } + } + + /// Reset this slot table such that it can be used with the given PikeVM + /// (and only that PikeVM). + fn reset(&mut self, re: &PikeVM) { + let nfa = re.get_nfa(); + self.slots_per_state = nfa.group_info().slot_len(); + // This is always correct, but may be reduced for a particular search + // if a 'Captures' has fewer slots, e.g., none at all or only slots + // for tracking the overall match instead of all slots for every + // group. + self.slots_for_captures = core::cmp::max( + self.slots_per_state, + nfa.pattern_len().checked_mul(2).unwrap(), + ); + let len = nfa + .states() + .len() + .checked_mul(self.slots_per_state) + // Add space to account for scratch space used during a search. + .and_then(|x| x.checked_add(self.slots_for_captures)) + // It seems like this could actually panic on legitimate inputs on + // 32-bit targets, and very likely to panic on 16-bit. Should we + // somehow convert this to an error? What about something similar + // for the lazy DFA cache? If you're tripping this assert, please + // file a bug. + .expect("slot table length doesn't overflow"); + // This happens about as often as a regex is compiled, so it probably + // should be at debug level, but I found it quite distracting and not + // particularly useful. + trace!( + "resizing PikeVM active states table to {} entries \ + (slots_per_state={})", + len, + self.slots_per_state, + ); + self.table.resize(len, None); + } + + /// Return the heap memory usage, in bytes, used by this slot table. + /// + /// This does not include the stack size of this value. + fn memory_usage(&self) -> usize { + self.table.len() * core::mem::size_of::>() + } + + /// Perform any per-search setup for this slot table. + /// + /// In particular, this sets the length of the number of slots used in the + /// 'Captures' given by the caller (if any at all). This number may be + /// smaller than the total number of slots available, e.g., when the caller + /// is only interested in tracking the overall match and not the spans of + /// every matching capturing group. Only tracking the overall match can + /// save a substantial amount of time copying capturing spans during a + /// search. + fn setup_search(&mut self, captures_slot_len: usize) { + self.slots_for_captures = captures_slot_len; + } + + /// Return a mutable slice of the slots for the given state. + /// + /// Note that the length of the slice returned may be less than the total + /// number of slots available for this state. In particular, the length + /// always matches the number of slots indicated via 'setup_search'. + fn for_state(&mut self, sid: StateID) -> &mut [Option] { + let i = sid.as_usize() * self.slots_per_state; + &mut self.table[i..i + self.slots_for_captures] + } + + /// Return a slice of slots of appropriate length where every slot offset + /// is guaranteed to be absent. This is useful in cases where you need to + /// compute an epsilon closure outside of the user supplied regex, and thus + /// never want it to have any capturing slots set. + fn all_absent(&mut self) -> &mut [Option] { + let i = self.table.len() - self.slots_for_captures; + &mut self.table[i..i + self.slots_for_captures] + } +} + +/// Represents a stack frame for use while computing an epsilon closure. +/// +/// (An "epsilon closure" refers to the set of reachable NFA states from a +/// single state without consuming any input. That is, the set of all epsilon +/// transitions not only from that single state, but from every other state +/// reachable by an epsilon transition as well. This is why it's called a +/// "closure." Computing an epsilon closure is also done during DFA +/// determinization! Compare and contrast the epsilon closure here in this +/// PikeVM and the one used for determinization in crate::util::determinize.) +/// +/// Computing the epsilon closure in a Thompson NFA proceeds via a depth +/// first traversal over all epsilon transitions from a particular state. +/// (A depth first traversal is important because it emulates the same priority +/// of matches that is typically found in backtracking regex engines.) This +/// depth first traversal is naturally expressed using recursion, but to avoid +/// a call stack size proportional to the size of a regex, we put our stack on +/// the heap instead. +/// +/// This stack thus consists of call frames. The typical call frame is +/// `Explore`, which instructs epsilon closure to explore the epsilon +/// transitions from that state. (Subsequent epsilon transitions are then +/// pushed on to the stack as more `Explore` frames.) If the state ID being +/// explored has no epsilon transitions, then the capturing group slots are +/// copied from the original state that sparked the epsilon closure (from the +/// 'step' routine) to the state ID being explored. This way, capturing group +/// slots are forwarded from the previous state to the next. +/// +/// The other stack frame, `RestoreCaptures`, instructs the epsilon closure to +/// set the position for a particular slot back to some particular offset. This +/// frame is pushed when `Explore` sees a `Capture` transition. `Explore` will +/// set the offset of the slot indicated in `Capture` to the current offset, +/// and then push the old offset on to the stack as a `RestoreCapture` frame. +/// Thus, the new offset is only used until the epsilon closure reverts back to +/// the `RestoreCapture` frame. In effect, this gives the `Capture` epsilon +/// transition its "scope" to only states that come "after" it during depth +/// first traversal. +#[derive(Clone, Debug)] +enum FollowEpsilon { + /// Explore the epsilon transitions from a state ID. + Explore(StateID), + /// Reset the given `slot` to the given `offset` (which might be `None`). + RestoreCapture { slot: SmallIndex, offset: Option }, +} + +/// A set of counters that "instruments" a PikeVM search. To enable this, you +/// must enable the 'internal-instrument-pikevm' feature. Then run your Rust +/// program with RUST_LOG=regex_automata::nfa::thompson::pikevm=trace set in +/// the environment. The metrics collected will be dumped automatically for +/// every search executed by the PikeVM. +/// +/// NOTE: When 'internal-instrument-pikevm' is enabled, it will likely cause an +/// absolute decrease in wall-clock performance, even if the 'trace' log level +/// isn't enabled. (Although, we do try to avoid extra costs when 'trace' isn't +/// enabled.) The main point of instrumentation is to get counts of various +/// events that occur during the PikeVM's execution. +/// +/// This is a somewhat hacked together collection of metrics that are useful +/// to gather from a PikeVM search. In particular, it lets us scrutinize the +/// performance profile of a search beyond what general purpose profiling tools +/// give us. Namely, we orient the profiling data around the specific states of +/// the NFA. +/// +/// In other words, this lets us see which parts of the NFA graph are most +/// frequently activated. This then provides direction for optimization +/// opportunities. +/// +/// The really sad part about this is that it absolutely clutters up the PikeVM +/// implementation. :'( Another approach would be to just manually add this +/// code in whenever I want this kind of profiling data, but it's complicated +/// and tedious enough that I went with this approach... for now. +/// +/// When instrumentation is enabled (which also turns on 'logging'), then a +/// `Counters` is initialized for every search and `trace`'d just before the +/// search returns to the caller. +/// +/// Tip: When debugging performance problems with the PikeVM, it's best to try +/// to work with an NFA that is as small as possible. Otherwise the state graph +/// is likely to be too big to digest. +#[cfg(feature = "internal-instrument-pikevm")] +#[derive(Clone, Debug)] +struct Counters { + /// The number of times the NFA is in a particular permutation of states. + state_sets: alloc::collections::BTreeMap, u64>, + /// The number of times 'step' is called for a particular state ID (which + /// indexes this array). + steps: Vec, + /// The number of times an epsilon closure was computed for a state. + closures: Vec, + /// The number of times a particular state ID is pushed on to a stack while + /// computing an epsilon closure. + stack_pushes: Vec, + /// The number of times a particular state ID is inserted into a sparse set + /// while computing an epsilon closure. + set_inserts: Vec, +} + +#[cfg(feature = "internal-instrument-pikevm")] +impl Counters { + fn empty() -> Counters { + Counters { + state_sets: alloc::collections::BTreeMap::new(), + steps: vec![], + closures: vec![], + stack_pushes: vec![], + set_inserts: vec![], + } + } + + fn reset(&mut self, nfa: &NFA) { + let len = nfa.states().len(); + + self.state_sets.clear(); + + self.steps.clear(); + self.steps.resize(len, 0); + + self.closures.clear(); + self.closures.resize(len, 0); + + self.stack_pushes.clear(); + self.stack_pushes.resize(len, 0); + + self.set_inserts.clear(); + self.set_inserts.resize(len, 0); + } + + fn eprint(&self, nfa: &NFA) { + trace!("===== START PikeVM Instrumentation Output ====="); + // We take the top-K most occurring state sets. Otherwise the output + // is likely to be overwhelming. And we probably only care about the + // most frequently occurring ones anyway. + const LIMIT: usize = 20; + let mut set_counts = + self.state_sets.iter().collect::, &u64)>>(); + set_counts.sort_by_key(|(_, &count)| core::cmp::Reverse(count)); + trace!("## PikeVM frequency of state sets (top {LIMIT})"); + for (set, count) in set_counts.iter().take(LIMIT) { + trace!("{set:?}: {count}"); + } + if set_counts.len() > LIMIT { + trace!( + "... {} sets omitted (out of {} total)", + set_counts.len() - LIMIT, + set_counts.len(), + ); + } + + trace!(""); + trace!("## PikeVM total frequency of events"); + trace!( + "steps: {}, closures: {}, stack-pushes: {}, set-inserts: {}", + self.steps.iter().copied().sum::(), + self.closures.iter().copied().sum::(), + self.stack_pushes.iter().copied().sum::(), + self.set_inserts.iter().copied().sum::(), + ); + + trace!(""); + trace!("## PikeVM frequency of events broken down by state"); + for sid in 0..self.steps.len() { + trace!( + "{:06}: steps: {}, closures: {}, \ + stack-pushes: {}, set-inserts: {}", + sid, + self.steps[sid], + self.closures[sid], + self.stack_pushes[sid], + self.set_inserts[sid], + ); + } + + trace!(""); + trace!("## NFA debug display"); + trace!("{nfa:?}"); + trace!("===== END PikeVM Instrumentation Output ====="); + } + + fn record_state_set(&mut self, set: &SparseSet) { + let set = set.iter().collect::>(); + *self.state_sets.entry(set).or_insert(0) += 1; + } + + fn record_step(&mut self, sid: StateID) { + self.steps[sid] += 1; + } + + fn record_closure(&mut self, sid: StateID) { + self.closures[sid] += 1; + } + + fn record_stack_push(&mut self, sid: StateID) { + self.stack_pushes[sid] += 1; + } + + fn record_set_insert(&mut self, sid: StateID) { + self.set_inserts[sid] += 1; + } +} diff --git a/vendor/regex-automata/src/nfa/thompson/range_trie.rs b/vendor/regex-automata/src/nfa/thompson/range_trie.rs new file mode 100644 index 00000000000000..57ae322d50af21 --- /dev/null +++ b/vendor/regex-automata/src/nfa/thompson/range_trie.rs @@ -0,0 +1,1051 @@ +/* +I've called the primary data structure in this module a "range trie." As far +as I can tell, there is no prior art on a data structure like this, however, +it's likely someone somewhere has built something like it. Searching for +"range trie" turns up the paper "Range Tries for Scalable Address Lookup," +but it does not appear relevant. + +The range trie is just like a trie in that it is a special case of a +deterministic finite state machine. It has states and each state has a set +of transitions to other states. It is acyclic, and, like a normal trie, +it makes no attempt to reuse common suffixes among its elements. The key +difference between a normal trie and a range trie below is that a range trie +operates on *contiguous sequences* of bytes instead of singleton bytes. +One could say say that our alphabet is ranges of bytes instead of bytes +themselves, except a key part of range trie construction is splitting ranges +apart to ensure there is at most one transition that can be taken for any +byte in a given state. + +I've tried to explain the details of how the range trie works below, so +for now, we are left with trying to understand what problem we're trying to +solve. Which is itself fairly involved! + +At the highest level, here's what we want to do. We want to convert a +sequence of Unicode codepoints into a finite state machine whose transitions +are over *bytes* and *not* Unicode codepoints. We want this because it makes +said finite state machines much smaller and much faster to execute. As a +simple example, consider a byte oriented automaton for all Unicode scalar +values (0x00 through 0x10FFFF, not including surrogate codepoints): + + [00-7F] + [C2-DF][80-BF] + [E0-E0][A0-BF][80-BF] + [E1-EC][80-BF][80-BF] + [ED-ED][80-9F][80-BF] + [EE-EF][80-BF][80-BF] + [F0-F0][90-BF][80-BF][80-BF] + [F1-F3][80-BF][80-BF][80-BF] + [F4-F4][80-8F][80-BF][80-BF] + +(These byte ranges are generated via the regex-syntax::utf8 module, which +was based on Russ Cox's code in RE2, which was in turn based on Ken +Thompson's implementation of the same idea in his Plan9 implementation of +grep.) + +It should be fairly straight-forward to see how one could compile this into +a DFA. The sequences are sorted and non-overlapping. Essentially, you could +build a trie from this fairly easy. The problem comes when your initial +range (in this case, 0x00-0x10FFFF) isn't so nice. For example, the class +represented by '\w' contains only a tenth of the codepoints that +0x00-0x10FFFF contains, but if we were to write out the byte based ranges +as we did above, the list would stretch to 892 entries! This turns into +quite a large NFA with a few thousand states. Turning this beast into a DFA +takes quite a bit of time. We are thus left with trying to trim down the +number of states we produce as early as possible. + +One approach (used by RE2 and still by the regex crate, at time of writing) +is to try to find common suffixes while building NFA states for the above +and reuse them. This is very cheap to do and one can control precisely how +much extra memory you want to use for the cache. + +Another approach, however, is to reuse an algorithm for constructing a +*minimal* DFA from a sorted sequence of inputs. I don't want to go into +the full details here, but I explain it in more depth in my blog post on +FSTs[1]. Note that the algorithm was not invented by me, but was published +in paper by Daciuk et al. in 2000 called "Incremental Construction of +MinimalAcyclic Finite-State Automata." Like the suffix cache approach above, +it is also possible to control the amount of extra memory one uses, although +this usually comes with the cost of sacrificing true minimality. (But it's +typically close enough with a reasonably sized cache of states.) + +The catch is that Daciuk's algorithm only works if you add your keys in +lexicographic ascending order. In our case, since we're dealing with ranges, +we also need the additional requirement that ranges are either equivalent +or do not overlap at all. For example, if one were given the following byte +ranges: + + [BC-BF][80-BF] + [BC-BF][90-BF] + +Then Daciuk's algorithm would not work, since there is nothing to handle the +fact that the ranges overlap. They would need to be split apart. Thankfully, +Thompson's algorithm for producing byte ranges for Unicode codepoint ranges +meets both of our requirements. (A proof for this eludes me, but it appears +true.) + +... however, we would also like to be able to compile UTF-8 automata in +reverse. We want this because in order to find the starting location of a +match using a DFA, we need to run a second DFA---a reversed version of the +forward DFA---backwards to discover the match location. Unfortunately, if +we reverse our byte sequences for 0x00-0x10FFFF, we get sequences that are +can overlap, even if they are sorted: + + [00-7F] + [80-BF][80-9F][ED-ED] + [80-BF][80-BF][80-8F][F4-F4] + [80-BF][80-BF][80-BF][F1-F3] + [80-BF][80-BF][90-BF][F0-F0] + [80-BF][80-BF][E1-EC] + [80-BF][80-BF][EE-EF] + [80-BF][A0-BF][E0-E0] + [80-BF][C2-DF] + +For example, '[80-BF][80-BF][EE-EF]' and '[80-BF][A0-BF][E0-E0]' have +overlapping ranges between '[80-BF]' and '[A0-BF]'. Thus, there is no +simple way to apply Daciuk's algorithm. + +And thus, the range trie was born. The range trie's only purpose is to take +sequences of byte ranges like the ones above, collect them into a trie and then +spit them out in a sorted fashion with no overlapping ranges. For example, +0x00-0x10FFFF gets translated to: + + [0-7F] + [80-BF][80-9F][80-8F][F1-F3] + [80-BF][80-9F][80-8F][F4] + [80-BF][80-9F][90-BF][F0] + [80-BF][80-9F][90-BF][F1-F3] + [80-BF][80-9F][E1-EC] + [80-BF][80-9F][ED] + [80-BF][80-9F][EE-EF] + [80-BF][A0-BF][80-8F][F1-F3] + [80-BF][A0-BF][80-8F][F4] + [80-BF][A0-BF][90-BF][F0] + [80-BF][A0-BF][90-BF][F1-F3] + [80-BF][A0-BF][E0] + [80-BF][A0-BF][E1-EC] + [80-BF][A0-BF][EE-EF] + [80-BF][C2-DF] + +We've thus satisfied our requirements for running Daciuk's algorithm. All +sequences of ranges are sorted, and any corresponding ranges are either +exactly equivalent or non-overlapping. + +In effect, a range trie is building a DFA from a sequence of arbitrary byte +ranges. But it uses an algorithm custom tailored to its input, so it is not as +costly as traditional DFA construction. While it is still quite a bit more +costly than the forward case (which only needs Daciuk's algorithm), it winds +up saving a substantial amount of time if one is doing a full DFA powerset +construction later by virtue of producing a much much smaller NFA. + +[1] - https://blog.burntsushi.net/transducers/ +[2] - https://www.mitpressjournals.org/doi/pdfplus/10.1162/089120100561601 +*/ + +use core::{cell::RefCell, fmt, mem, ops::RangeInclusive}; + +use alloc::{format, string::String, vec, vec::Vec}; + +use regex_syntax::utf8::Utf8Range; + +use crate::util::primitives::StateID; + +/// There is only one final state in this trie. Every sequence of byte ranges +/// added shares the same final state. +const FINAL: StateID = StateID::ZERO; + +/// The root state of the trie. +const ROOT: StateID = StateID::new_unchecked(1); + +/// A range trie represents an ordered set of sequences of bytes. +/// +/// A range trie accepts as input a sequence of byte ranges and merges +/// them into the existing set such that the trie can produce a sorted +/// non-overlapping sequence of byte ranges. The sequence emitted corresponds +/// precisely to the sequence of bytes matched by the given keys, although the +/// byte ranges themselves may be split at different boundaries. +/// +/// The order complexity of this data structure seems difficult to analyze. +/// If the size of a byte is held as a constant, then insertion is clearly +/// O(n) where n is the number of byte ranges in the input key. However, if +/// k=256 is our alphabet size, then insertion could be O(k^2 * n). In +/// particular it seems possible for pathological inputs to cause insertion +/// to do a lot of work. However, for what we use this data structure for, +/// there should be no pathological inputs since the ultimate source is always +/// a sorted set of Unicode scalar value ranges. +/// +/// Internally, this trie is setup like a finite state machine. Note though +/// that it is acyclic. +#[derive(Clone)] +pub struct RangeTrie { + /// The states in this trie. The first is always the shared final state. + /// The second is always the root state. Otherwise, there is no + /// particular order. + states: Vec, + /// A free-list of states. When a range trie is cleared, all of its states + /// are added to this list. Creating a new state reuses states from this + /// list before allocating a new one. + free: Vec, + /// A stack for traversing this trie to yield sequences of byte ranges in + /// lexicographic order. + iter_stack: RefCell>, + /// A buffer that stores the current sequence during iteration. + iter_ranges: RefCell>, + /// A stack used for traversing the trie in order to (deeply) duplicate + /// a state. States are recursively duplicated when ranges are split. + dupe_stack: Vec, + /// A stack used for traversing the trie during insertion of a new + /// sequence of byte ranges. + insert_stack: Vec, +} + +/// A single state in this trie. +#[derive(Clone)] +struct State { + /// A sorted sequence of non-overlapping transitions to other states. Each + /// transition corresponds to a single range of bytes. + transitions: Vec, +} + +/// A transition is a single range of bytes. If a particular byte is in this +/// range, then the corresponding machine may transition to the state pointed +/// to by `next_id`. +#[derive(Clone)] +struct Transition { + /// The byte range. + range: Utf8Range, + /// The next state to transition to. + next_id: StateID, +} + +impl RangeTrie { + /// Create a new empty range trie. + pub fn new() -> RangeTrie { + let mut trie = RangeTrie { + states: vec![], + free: vec![], + iter_stack: RefCell::new(vec![]), + iter_ranges: RefCell::new(vec![]), + dupe_stack: vec![], + insert_stack: vec![], + }; + trie.clear(); + trie + } + + /// Clear this range trie such that it is empty. Clearing a range trie + /// and reusing it can beneficial because this may reuse allocations. + pub fn clear(&mut self) { + self.free.append(&mut self.states); + self.add_empty(); // final + self.add_empty(); // root + } + + /// Iterate over all of the sequences of byte ranges in this trie, and + /// call the provided function for each sequence. Iteration occurs in + /// lexicographic order. + pub fn iter Result<(), E>>( + &self, + mut f: F, + ) -> Result<(), E> { + let mut stack = self.iter_stack.borrow_mut(); + stack.clear(); + let mut ranges = self.iter_ranges.borrow_mut(); + ranges.clear(); + + // We do iteration in a way that permits us to use a single buffer + // for our keys. We iterate in a depth first fashion, while being + // careful to expand our frontier as we move deeper in the trie. + stack.push(NextIter { state_id: ROOT, tidx: 0 }); + while let Some(NextIter { mut state_id, mut tidx }) = stack.pop() { + // This could be implemented more simply without an inner loop + // here, but at the cost of more stack pushes. + loop { + let state = self.state(state_id); + // If we've visited all transitions in this state, then pop + // back to the parent state. + if tidx >= state.transitions.len() { + ranges.pop(); + break; + } + + let t = &state.transitions[tidx]; + ranges.push(t.range); + if t.next_id == FINAL { + f(&ranges)?; + ranges.pop(); + tidx += 1; + } else { + // Expand our frontier. Once we come back to this state + // via the stack, start in on the next transition. + stack.push(NextIter { state_id, tidx: tidx + 1 }); + // Otherwise, move to the first transition of the next + // state. + state_id = t.next_id; + tidx = 0; + } + } + } + Ok(()) + } + + /// Inserts a new sequence of ranges into this trie. + /// + /// The sequence given must be non-empty and must not have a length + /// exceeding 4. + pub fn insert(&mut self, ranges: &[Utf8Range]) { + assert!(!ranges.is_empty()); + assert!(ranges.len() <= 4); + + let mut stack = core::mem::replace(&mut self.insert_stack, vec![]); + stack.clear(); + + stack.push(NextInsert::new(ROOT, ranges)); + while let Some(next) = stack.pop() { + let (state_id, ranges) = (next.state_id(), next.ranges()); + assert!(!ranges.is_empty()); + + let (mut new, rest) = (ranges[0], &ranges[1..]); + + // i corresponds to the position of the existing transition on + // which we are operating. Typically, the result is to remove the + // transition and replace it with two or more new transitions + // corresponding to the partitions generated by splitting the + // 'new' with the ith transition's range. + let mut i = self.state(state_id).find(new); + + // In this case, there is no overlap *and* the new range is greater + // than all existing ranges. So we can just add it to the end. + if i == self.state(state_id).transitions.len() { + let next_id = NextInsert::push(self, &mut stack, rest); + self.add_transition(state_id, new, next_id); + continue; + } + + // The need for this loop is a bit subtle, buf basically, after + // we've handled the partitions from our initial split, it's + // possible that there will be a partition leftover that overlaps + // with a subsequent transition. If so, then we have to repeat + // the split process again with the leftovers and that subsequent + // transition. + 'OUTER: loop { + let old = self.state(state_id).transitions[i].clone(); + let split = match Split::new(old.range, new) { + Some(split) => split, + None => { + let next_id = NextInsert::push(self, &mut stack, rest); + self.add_transition_at(i, state_id, new, next_id); + continue; + } + }; + let splits = split.as_slice(); + // If we only have one partition, then the ranges must be + // equivalent. There's nothing to do here for this state, so + // just move on to the next one. + if splits.len() == 1 { + // ... but only if we have anything left to do. + if !rest.is_empty() { + stack.push(NextInsert::new(old.next_id, rest)); + } + break; + } + // At this point, we know that 'split' is non-empty and there + // must be some overlap AND that the two ranges are not + // equivalent. Therefore, the existing range MUST be removed + // and split up somehow. Instead of actually doing the removal + // and then a subsequent insertion---with all the memory + // shuffling that entails---we simply overwrite the transition + // at position `i` for the first new transition we want to + // insert. After that, we're forced to do expensive inserts. + let mut first = true; + let mut add_trans = + |trie: &mut RangeTrie, pos, from, range, to| { + if first { + trie.set_transition_at(pos, from, range, to); + first = false; + } else { + trie.add_transition_at(pos, from, range, to); + } + }; + for (j, &srange) in splits.iter().enumerate() { + match srange { + SplitRange::Old(r) => { + // Deep clone the state pointed to by the ith + // transition. This is always necessary since 'old' + // is always coupled with at least a 'both' + // partition. We don't want any new changes made + // via the 'both' partition to impact the part of + // the transition that doesn't overlap with the + // new range. + let dup_id = self.duplicate(old.next_id); + add_trans(self, i, state_id, r, dup_id); + } + SplitRange::New(r) => { + // This is a bit subtle, but if this happens to be + // the last partition in our split, it is possible + // that this overlaps with a subsequent transition. + // If it does, then we must repeat the whole + // splitting process over again with `r` and the + // subsequent transition. + { + let trans = &self.state(state_id).transitions; + if j + 1 == splits.len() + && i < trans.len() + && intersects(r, trans[i].range) + { + new = r; + continue 'OUTER; + } + } + + // ... otherwise, setup exploration for a new + // empty state and add a brand new transition for + // this new range. + let next_id = + NextInsert::push(self, &mut stack, rest); + add_trans(self, i, state_id, r, next_id); + } + SplitRange::Both(r) => { + // Continue adding the remaining ranges on this + // path and update the transition with the new + // range. + if !rest.is_empty() { + stack.push(NextInsert::new(old.next_id, rest)); + } + add_trans(self, i, state_id, r, old.next_id); + } + } + i += 1; + } + // If we've reached this point, then we know that there are + // no subsequent transitions with any overlap. Therefore, we + // can stop processing this range and move on to the next one. + break; + } + } + self.insert_stack = stack; + } + + pub fn add_empty(&mut self) -> StateID { + let id = match StateID::try_from(self.states.len()) { + Ok(id) => id, + Err(_) => { + // This generally should not happen since a range trie is + // only ever used to compile a single sequence of Unicode + // scalar values. If we ever got to this point, we would, at + // *minimum*, be using 96GB in just the range trie alone. + panic!("too many sequences added to range trie"); + } + }; + // If we have some free states available, then use them to avoid + // more allocations. + if let Some(mut state) = self.free.pop() { + state.clear(); + self.states.push(state); + } else { + self.states.push(State { transitions: vec![] }); + } + id + } + + /// Performs a deep clone of the given state and returns the duplicate's + /// state ID. + /// + /// A "deep clone" in this context means that the state given along with + /// recursively all states that it points to are copied. Once complete, + /// the given state ID and the returned state ID share nothing. + /// + /// This is useful during range trie insertion when a new range overlaps + /// with an existing range that is bigger than the new one. The part + /// of the existing range that does *not* overlap with the new one is + /// duplicated so that adding the new range to the overlap doesn't disturb + /// the non-overlapping portion. + /// + /// There's one exception: if old_id is the final state, then it is not + /// duplicated and the same final state is returned. This is because all + /// final states in this trie are equivalent. + fn duplicate(&mut self, old_id: StateID) -> StateID { + if old_id == FINAL { + return FINAL; + } + + let mut stack = mem::replace(&mut self.dupe_stack, vec![]); + stack.clear(); + + let new_id = self.add_empty(); + // old_id is the state we're cloning and new_id is the ID of the + // duplicated state for old_id. + stack.push(NextDupe { old_id, new_id }); + while let Some(NextDupe { old_id, new_id }) = stack.pop() { + for i in 0..self.state(old_id).transitions.len() { + let t = self.state(old_id).transitions[i].clone(); + if t.next_id == FINAL { + // All final states are the same, so there's no need to + // duplicate it. + self.add_transition(new_id, t.range, FINAL); + continue; + } + + let new_child_id = self.add_empty(); + self.add_transition(new_id, t.range, new_child_id); + stack.push(NextDupe { + old_id: t.next_id, + new_id: new_child_id, + }); + } + } + self.dupe_stack = stack; + new_id + } + + /// Adds the given transition to the given state. + /// + /// Callers must ensure that all previous transitions in this state + /// are lexicographically smaller than the given range. + fn add_transition( + &mut self, + from_id: StateID, + range: Utf8Range, + next_id: StateID, + ) { + self.state_mut(from_id) + .transitions + .push(Transition { range, next_id }); + } + + /// Like `add_transition`, except this inserts the transition just before + /// the ith transition. + fn add_transition_at( + &mut self, + i: usize, + from_id: StateID, + range: Utf8Range, + next_id: StateID, + ) { + self.state_mut(from_id) + .transitions + .insert(i, Transition { range, next_id }); + } + + /// Overwrites the transition at position i with the given transition. + fn set_transition_at( + &mut self, + i: usize, + from_id: StateID, + range: Utf8Range, + next_id: StateID, + ) { + self.state_mut(from_id).transitions[i] = Transition { range, next_id }; + } + + /// Return an immutable borrow for the state with the given ID. + fn state(&self, id: StateID) -> &State { + &self.states[id] + } + + /// Return a mutable borrow for the state with the given ID. + fn state_mut(&mut self, id: StateID) -> &mut State { + &mut self.states[id] + } +} + +impl State { + /// Find the position at which the given range should be inserted in this + /// state. + /// + /// The position returned is always in the inclusive range + /// [0, transitions.len()]. If 'transitions.len()' is returned, then the + /// given range overlaps with no other range in this state *and* is greater + /// than all of them. + /// + /// For all other possible positions, the given range either overlaps + /// with the transition at that position or is otherwise less than it + /// with no overlap (and is greater than the previous transition). In the + /// former case, careful attention must be paid to inserting this range + /// as a new transition. In the latter case, the range can be inserted as + /// a new transition at the given position without disrupting any other + /// transitions. + fn find(&self, range: Utf8Range) -> usize { + /// Returns the position `i` at which `pred(xs[i])` first returns true + /// such that for all `j >= i`, `pred(xs[j]) == true`. If `pred` never + /// returns true, then `xs.len()` is returned. + /// + /// We roll our own binary search because it doesn't seem like the + /// standard library's binary search can be used here. Namely, if + /// there is an overlapping range, then we want to find the first such + /// occurrence, but there may be many. Or at least, it's not quite + /// clear to me how to do it. + fn binary_search(xs: &[T], mut pred: F) -> usize + where + F: FnMut(&T) -> bool, + { + let (mut left, mut right) = (0, xs.len()); + while left < right { + // Overflow is impossible because xs.len() <= 256. + let mid = (left + right) / 2; + if pred(&xs[mid]) { + right = mid; + } else { + left = mid + 1; + } + } + left + } + + // Benchmarks suggest that binary search is just a bit faster than + // straight linear search. Specifically when using the debug tool: + // + // hyperfine "regex-cli debug thompson -qr --captures none '\w{90} ecurB'" + binary_search(&self.transitions, |t| range.start <= t.range.end) + } + + /// Clear this state such that it has zero transitions. + fn clear(&mut self) { + self.transitions.clear(); + } +} + +/// The next state to process during duplication. +#[derive(Clone, Debug)] +struct NextDupe { + /// The state we want to duplicate. + old_id: StateID, + /// The ID of the new state that is a duplicate of old_id. + new_id: StateID, +} + +/// The next state (and its corresponding transition) that we want to visit +/// during iteration in lexicographic order. +#[derive(Clone, Debug)] +struct NextIter { + state_id: StateID, + tidx: usize, +} + +/// The next state to process during insertion and any remaining ranges that we +/// want to add for a particular sequence of ranges. The first such instance +/// is always the root state along with all ranges given. +#[derive(Clone, Debug)] +struct NextInsert { + /// The next state to begin inserting ranges. This state should be the + /// state at which `ranges[0]` should be inserted. + state_id: StateID, + /// The ranges to insert. We used a fixed-size array here to avoid an + /// allocation. + ranges: [Utf8Range; 4], + /// The number of valid ranges in the above array. + len: u8, +} + +impl NextInsert { + /// Create the next item to visit. The given state ID should correspond + /// to the state at which the first range in the given slice should be + /// inserted. The slice given must not be empty and it must be no longer + /// than 4. + fn new(state_id: StateID, ranges: &[Utf8Range]) -> NextInsert { + let len = ranges.len(); + assert!(len > 0); + assert!(len <= 4); + + let mut tmp = [Utf8Range { start: 0, end: 0 }; 4]; + tmp[..len].copy_from_slice(ranges); + NextInsert { state_id, ranges: tmp, len: u8::try_from(len).unwrap() } + } + + /// Push a new empty state to visit along with any remaining ranges that + /// still need to be inserted. The ID of the new empty state is returned. + /// + /// If ranges is empty, then no new state is created and FINAL is returned. + fn push( + trie: &mut RangeTrie, + stack: &mut Vec, + ranges: &[Utf8Range], + ) -> StateID { + if ranges.is_empty() { + FINAL + } else { + let next_id = trie.add_empty(); + stack.push(NextInsert::new(next_id, ranges)); + next_id + } + } + + /// Return the ID of the state to visit. + fn state_id(&self) -> StateID { + self.state_id + } + + /// Return the remaining ranges to insert. + fn ranges(&self) -> &[Utf8Range] { + &self.ranges[..usize::try_from(self.len).unwrap()] + } +} + +/// Split represents a partitioning of two ranges into one or more ranges. This +/// is the secret sauce that makes a range trie work, as it's what tells us +/// how to deal with two overlapping but unequal ranges during insertion. +/// +/// Essentially, either two ranges overlap or they don't. If they don't, then +/// handling insertion is easy: just insert the new range into its +/// lexicographically correct position. Since it does not overlap with anything +/// else, no other transitions are impacted by the new range. +/// +/// If they do overlap though, there are generally three possible cases to +/// handle: +/// +/// 1. The part where the two ranges actually overlap. i.e., The intersection. +/// 2. The part of the existing range that is not in the new range. +/// 3. The part of the new range that is not in the old range. +/// +/// (1) is guaranteed to always occur since all overlapping ranges have a +/// non-empty intersection. If the two ranges are not equivalent, then at +/// least one of (2) or (3) is guaranteed to occur as well. In some cases, +/// e.g., `[0-4]` and `[4-9]`, all three cases will occur. +/// +/// This `Split` type is responsible for providing (1), (2) and (3) for any +/// possible pair of byte ranges. +/// +/// As for insertion, for the overlap in (1), the remaining ranges to insert +/// should be added by following the corresponding transition. However, this +/// should only be done for the overlapping parts of the range. If there was +/// a part of the existing range that was not in the new range, then that +/// existing part must be split off from the transition and duplicated. The +/// remaining parts of the overlap can then be added to using the new ranges +/// without disturbing the existing range. +/// +/// Handling the case for the part of a new range that is not in an existing +/// range is seemingly easy. Just treat it as if it were a non-overlapping +/// range. The problem here is that if this new non-overlapping range occurs +/// after both (1) and (2), then it's possible that it can overlap with the +/// next transition in the current state. If it does, then the whole process +/// must be repeated! +/// +/// # Details of the 3 cases +/// +/// The following details the various cases that are implemented in code +/// below. It's plausible that the number of cases is not actually minimal, +/// but it's important for this code to remain at least somewhat readable. +/// +/// Given [a,b] and [x,y], where a <= b, x <= y, b < 256 and y < 256, we define +/// the follow distinct relationships where at least one must apply. The order +/// of these matters, since multiple can match. The first to match applies. +/// +/// 1. b < x <=> [a,b] < [x,y] +/// 2. y < a <=> [x,y] < [a,b] +/// +/// In the case of (1) and (2), these are the only cases where there is no +/// overlap. Or otherwise, the intersection of [a,b] and [x,y] is empty. In +/// order to compute the intersection, one can do [max(a,x), min(b,y)]. The +/// intersection in all of the following cases is non-empty. +/// +/// 3. a = x && b = y <=> [a,b] == [x,y] +/// 4. a = x && b < y <=> [x,y] right-extends [a,b] +/// 5. b = y && a > x <=> [x,y] left-extends [a,b] +/// 6. x = a && y < b <=> [a,b] right-extends [x,y] +/// 7. y = b && x > a <=> [a,b] left-extends [x,y] +/// 8. a > x && b < y <=> [x,y] covers [a,b] +/// 9. x > a && y < b <=> [a,b] covers [x,y] +/// 10. b = x && a < y <=> [a,b] is left-adjacent to [x,y] +/// 11. y = a && x < b <=> [x,y] is left-adjacent to [a,b] +/// 12. b > x && b < y <=> [a,b] left-overlaps [x,y] +/// 13. y > a && y < b <=> [x,y] left-overlaps [a,b] +/// +/// In cases 3-13, we can form rules that partition the ranges into a +/// non-overlapping ordered sequence of ranges: +/// +/// 3. [a,b] +/// 4. [a,b], [b+1,y] +/// 5. [x,a-1], [a,b] +/// 6. [x,y], [y+1,b] +/// 7. [a,x-1], [x,y] +/// 8. [x,a-1], [a,b], [b+1,y] +/// 9. [a,x-1], [x,y], [y+1,b] +/// 10. [a,b-1], [b,b], [b+1,y] +/// 11. [x,y-1], [y,y], [y+1,b] +/// 12. [a,x-1], [x,b], [b+1,y] +/// 13. [x,a-1], [a,y], [y+1,b] +/// +/// In the code below, we go a step further and identify each of the above +/// outputs as belonging either to the overlap of the two ranges or to one +/// of [a,b] or [x,y] exclusively. +#[derive(Clone, Debug, Eq, PartialEq)] +struct Split { + partitions: [SplitRange; 3], + len: usize, +} + +/// A tagged range indicating how it was derived from a pair of ranges. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +enum SplitRange { + Old(Utf8Range), + New(Utf8Range), + Both(Utf8Range), +} + +impl Split { + /// Create a partitioning of the given ranges. + /// + /// If the given ranges have an empty intersection, then None is returned. + fn new(o: Utf8Range, n: Utf8Range) -> Option { + let range = |r: RangeInclusive| Utf8Range { + start: *r.start(), + end: *r.end(), + }; + let old = |r| SplitRange::Old(range(r)); + let new = |r| SplitRange::New(range(r)); + let both = |r| SplitRange::Both(range(r)); + + // Use same names as the comment above to make it easier to compare. + let (a, b, x, y) = (o.start, o.end, n.start, n.end); + + if b < x || y < a { + // case 1, case 2 + None + } else if a == x && b == y { + // case 3 + Some(Split::parts1(both(a..=b))) + } else if a == x && b < y { + // case 4 + Some(Split::parts2(both(a..=b), new(b + 1..=y))) + } else if b == y && a > x { + // case 5 + Some(Split::parts2(new(x..=a - 1), both(a..=b))) + } else if x == a && y < b { + // case 6 + Some(Split::parts2(both(x..=y), old(y + 1..=b))) + } else if y == b && x > a { + // case 7 + Some(Split::parts2(old(a..=x - 1), both(x..=y))) + } else if a > x && b < y { + // case 8 + Some(Split::parts3(new(x..=a - 1), both(a..=b), new(b + 1..=y))) + } else if x > a && y < b { + // case 9 + Some(Split::parts3(old(a..=x - 1), both(x..=y), old(y + 1..=b))) + } else if b == x && a < y { + // case 10 + Some(Split::parts3(old(a..=b - 1), both(b..=b), new(b + 1..=y))) + } else if y == a && x < b { + // case 11 + Some(Split::parts3(new(x..=y - 1), both(y..=y), old(y + 1..=b))) + } else if b > x && b < y { + // case 12 + Some(Split::parts3(old(a..=x - 1), both(x..=b), new(b + 1..=y))) + } else if y > a && y < b { + // case 13 + Some(Split::parts3(new(x..=a - 1), both(a..=y), old(y + 1..=b))) + } else { + unreachable!() + } + } + + /// Create a new split with a single partition. This only occurs when two + /// ranges are equivalent. + fn parts1(r1: SplitRange) -> Split { + // This value doesn't matter since it is never accessed. + let nada = SplitRange::Old(Utf8Range { start: 0, end: 0 }); + Split { partitions: [r1, nada, nada], len: 1 } + } + + /// Create a new split with two partitions. + fn parts2(r1: SplitRange, r2: SplitRange) -> Split { + // This value doesn't matter since it is never accessed. + let nada = SplitRange::Old(Utf8Range { start: 0, end: 0 }); + Split { partitions: [r1, r2, nada], len: 2 } + } + + /// Create a new split with three partitions. + fn parts3(r1: SplitRange, r2: SplitRange, r3: SplitRange) -> Split { + Split { partitions: [r1, r2, r3], len: 3 } + } + + /// Return the partitions in this split as a slice. + fn as_slice(&self) -> &[SplitRange] { + &self.partitions[..self.len] + } +} + +impl fmt::Debug for RangeTrie { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f)?; + for (i, state) in self.states.iter().enumerate() { + let status = if i == FINAL.as_usize() { '*' } else { ' ' }; + writeln!(f, "{status}{i:06}: {state:?}")?; + } + Ok(()) + } +} + +impl fmt::Debug for State { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let rs = self + .transitions + .iter() + .map(|t| format!("{t:?}")) + .collect::>() + .join(", "); + write!(f, "{rs}") + } +} + +impl fmt::Debug for Transition { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.range.start == self.range.end { + write!( + f, + "{:02X} => {:02X}", + self.range.start, + self.next_id.as_usize(), + ) + } else { + write!( + f, + "{:02X}-{:02X} => {:02X}", + self.range.start, + self.range.end, + self.next_id.as_usize(), + ) + } + } +} + +/// Returns true if and only if the given ranges intersect. +fn intersects(r1: Utf8Range, r2: Utf8Range) -> bool { + !(r1.end < r2.start || r2.end < r1.start) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn r(range: RangeInclusive) -> Utf8Range { + Utf8Range { start: *range.start(), end: *range.end() } + } + + fn split_maybe( + old: RangeInclusive, + new: RangeInclusive, + ) -> Option { + Split::new(r(old), r(new)) + } + + fn split( + old: RangeInclusive, + new: RangeInclusive, + ) -> Vec { + split_maybe(old, new).unwrap().as_slice().to_vec() + } + + #[test] + fn no_splits() { + // case 1 + assert_eq!(None, split_maybe(0..=1, 2..=3)); + // case 2 + assert_eq!(None, split_maybe(2..=3, 0..=1)); + } + + #[test] + fn splits() { + let range = |r: RangeInclusive| Utf8Range { + start: *r.start(), + end: *r.end(), + }; + let old = |r| SplitRange::Old(range(r)); + let new = |r| SplitRange::New(range(r)); + let both = |r| SplitRange::Both(range(r)); + + // case 3 + assert_eq!(split(0..=0, 0..=0), vec![both(0..=0)]); + assert_eq!(split(9..=9, 9..=9), vec![both(9..=9)]); + + // case 4 + assert_eq!(split(0..=5, 0..=6), vec![both(0..=5), new(6..=6)]); + assert_eq!(split(0..=5, 0..=8), vec![both(0..=5), new(6..=8)]); + assert_eq!(split(5..=5, 5..=8), vec![both(5..=5), new(6..=8)]); + + // case 5 + assert_eq!(split(1..=5, 0..=5), vec![new(0..=0), both(1..=5)]); + assert_eq!(split(3..=5, 0..=5), vec![new(0..=2), both(3..=5)]); + assert_eq!(split(5..=5, 0..=5), vec![new(0..=4), both(5..=5)]); + + // case 6 + assert_eq!(split(0..=6, 0..=5), vec![both(0..=5), old(6..=6)]); + assert_eq!(split(0..=8, 0..=5), vec![both(0..=5), old(6..=8)]); + assert_eq!(split(5..=8, 5..=5), vec![both(5..=5), old(6..=8)]); + + // case 7 + assert_eq!(split(0..=5, 1..=5), vec![old(0..=0), both(1..=5)]); + assert_eq!(split(0..=5, 3..=5), vec![old(0..=2), both(3..=5)]); + assert_eq!(split(0..=5, 5..=5), vec![old(0..=4), both(5..=5)]); + + // case 8 + assert_eq!( + split(3..=6, 2..=7), + vec![new(2..=2), both(3..=6), new(7..=7)], + ); + assert_eq!( + split(3..=6, 1..=8), + vec![new(1..=2), both(3..=6), new(7..=8)], + ); + + // case 9 + assert_eq!( + split(2..=7, 3..=6), + vec![old(2..=2), both(3..=6), old(7..=7)], + ); + assert_eq!( + split(1..=8, 3..=6), + vec![old(1..=2), both(3..=6), old(7..=8)], + ); + + // case 10 + assert_eq!( + split(3..=6, 6..=7), + vec![old(3..=5), both(6..=6), new(7..=7)], + ); + assert_eq!( + split(3..=6, 6..=8), + vec![old(3..=5), both(6..=6), new(7..=8)], + ); + assert_eq!( + split(5..=6, 6..=7), + vec![old(5..=5), both(6..=6), new(7..=7)], + ); + + // case 11 + assert_eq!( + split(6..=7, 3..=6), + vec![new(3..=5), both(6..=6), old(7..=7)], + ); + assert_eq!( + split(6..=8, 3..=6), + vec![new(3..=5), both(6..=6), old(7..=8)], + ); + assert_eq!( + split(6..=7, 5..=6), + vec![new(5..=5), both(6..=6), old(7..=7)], + ); + + // case 12 + assert_eq!( + split(3..=7, 5..=9), + vec![old(3..=4), both(5..=7), new(8..=9)], + ); + assert_eq!( + split(3..=5, 4..=6), + vec![old(3..=3), both(4..=5), new(6..=6)], + ); + + // case 13 + assert_eq!( + split(5..=9, 3..=7), + vec![new(3..=4), both(5..=7), old(8..=9)], + ); + assert_eq!( + split(4..=6, 3..=5), + vec![new(3..=3), both(4..=5), old(6..=6)], + ); + } + + // Arguably there should be more tests here, but in practice, this data + // structure is well covered by the huge number of regex tests. +} diff --git a/vendor/regex-automata/src/util/alphabet.rs b/vendor/regex-automata/src/util/alphabet.rs new file mode 100644 index 00000000000000..475f9515963751 --- /dev/null +++ b/vendor/regex-automata/src/util/alphabet.rs @@ -0,0 +1,1139 @@ +/*! +This module provides APIs for dealing with the alphabets of finite state +machines. + +There are two principal types in this module, [`ByteClasses`] and [`Unit`]. +The former defines the alphabet of a finite state machine while the latter +represents an element of that alphabet. + +To a first approximation, the alphabet of all automata in this crate is just +a `u8`. Namely, every distinct byte value. All 256 of them. In practice, this +can be quite wasteful when building a transition table for a DFA, since it +requires storing a state identifier for each element in the alphabet. Instead, +we collapse the alphabet of an automaton down into equivalence classes, where +every byte in the same equivalence class never discriminates between a match or +a non-match from any other byte in the same class. For example, in the regex +`[a-z]+`, then you could consider it having an alphabet consisting of two +equivalence classes: `a-z` and everything else. In terms of the transitions on +an automaton, it doesn't actually require representing every distinct byte. +Just the equivalence classes. + +The downside of equivalence classes is that, of course, searching a haystack +deals with individual byte values. Those byte values need to be mapped to +their corresponding equivalence class. This is what `ByteClasses` does. In +practice, doing this for every state transition has negligible impact on modern +CPUs. Moreover, it helps make more efficient use of the CPU cache by (possibly +considerably) shrinking the size of the transition table. + +One last hiccup concerns `Unit`. Namely, because of look-around and how the +DFAs in this crate work, we need to add a sentinel value to our alphabet +of equivalence classes that represents the "end" of a search. We call that +sentinel [`Unit::eoi`] or "end of input." Thus, a `Unit` is either an +equivalence class corresponding to a set of bytes, or it is a special "end of +input" sentinel. + +In general, you should not expect to need either of these types unless you're +doing lower level shenanigans with DFAs, or even building your own DFAs. +(Although, you don't have to use these types to build your own DFAs of course.) +For example, if you're walking a DFA's state graph, it's probably useful to +make use of [`ByteClasses`] to visit each element in the DFA's alphabet instead +of just visiting every distinct `u8` value. The latter isn't necessarily wrong, +but it could be potentially very wasteful. +*/ +use crate::util::{ + escape::DebugByte, + wire::{self, DeserializeError, SerializeError}, +}; + +/// Unit represents a single unit of haystack for DFA based regex engines. +/// +/// It is not expected for consumers of this crate to need to use this type +/// unless they are implementing their own DFA. And even then, it's not +/// required: implementors may use other techniques to handle haystack units. +/// +/// Typically, a single unit of haystack for a DFA would be a single byte. +/// However, for the DFAs in this crate, matches are delayed by a single byte +/// in order to handle look-ahead assertions (`\b`, `$` and `\z`). Thus, once +/// we have consumed the haystack, we must run the DFA through one additional +/// transition using a unit that indicates the haystack has ended. +/// +/// There is no way to represent a sentinel with a `u8` since all possible +/// values *may* be valid haystack units to a DFA, therefore this type +/// explicitly adds room for a sentinel value. +/// +/// The sentinel EOI value is always its own equivalence class and is +/// ultimately represented by adding 1 to the maximum equivalence class value. +/// So for example, the regex `^[a-z]+$` might be split into the following +/// equivalence classes: +/// +/// ```text +/// 0 => [\x00-`] +/// 1 => [a-z] +/// 2 => [{-\xFF] +/// 3 => [EOI] +/// ``` +/// +/// Where EOI is the special sentinel value that is always in its own +/// singleton equivalence class. +#[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord)] +pub struct Unit(UnitKind); + +#[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord)] +enum UnitKind { + /// Represents a byte value, or more typically, an equivalence class + /// represented as a byte value. + U8(u8), + /// Represents the "end of input" sentinel. We regrettably use a `u16` + /// here since the maximum sentinel value is `256`. Thankfully, we don't + /// actually store a `Unit` anywhere, so this extra space shouldn't be too + /// bad. + EOI(u16), +} + +impl Unit { + /// Create a new haystack unit from a byte value. + /// + /// All possible byte values are legal. However, when creating a haystack + /// unit for a specific DFA, one should be careful to only construct units + /// that are in that DFA's alphabet. Namely, one way to compact a DFA's + /// in-memory representation is to collapse its transitions to a set of + /// equivalence classes into a set of all possible byte values. If a DFA + /// uses equivalence classes instead of byte values, then the byte given + /// here should be the equivalence class. + pub fn u8(byte: u8) -> Unit { + Unit(UnitKind::U8(byte)) + } + + /// Create a new "end of input" haystack unit. + /// + /// The value given is the sentinel value used by this unit to represent + /// the "end of input." The value should be the total number of equivalence + /// classes in the corresponding alphabet. Its maximum value is `256`, + /// which occurs when every byte is its own equivalence class. + /// + /// # Panics + /// + /// This panics when `num_byte_equiv_classes` is greater than `256`. + pub fn eoi(num_byte_equiv_classes: usize) -> Unit { + assert!( + num_byte_equiv_classes <= 256, + "max number of byte-based equivalent classes is 256, but got \ + {num_byte_equiv_classes}", + ); + Unit(UnitKind::EOI(u16::try_from(num_byte_equiv_classes).unwrap())) + } + + /// If this unit is not an "end of input" sentinel, then returns its + /// underlying byte value. Otherwise return `None`. + pub fn as_u8(self) -> Option { + match self.0 { + UnitKind::U8(b) => Some(b), + UnitKind::EOI(_) => None, + } + } + + /// If this unit is an "end of input" sentinel, then return the underlying + /// sentinel value that was given to [`Unit::eoi`]. Otherwise return + /// `None`. + pub fn as_eoi(self) -> Option { + match self.0 { + UnitKind::U8(_) => None, + UnitKind::EOI(sentinel) => Some(sentinel), + } + } + + /// Return this unit as a `usize`, regardless of whether it is a byte value + /// or an "end of input" sentinel. In the latter case, the underlying + /// sentinel value given to [`Unit::eoi`] is returned. + pub fn as_usize(self) -> usize { + match self.0 { + UnitKind::U8(b) => usize::from(b), + UnitKind::EOI(eoi) => usize::from(eoi), + } + } + + /// Returns true if and only of this unit is a byte value equivalent to the + /// byte given. This always returns false when this is an "end of input" + /// sentinel. + pub fn is_byte(self, byte: u8) -> bool { + self.as_u8().map_or(false, |b| b == byte) + } + + /// Returns true when this unit represents an "end of input" sentinel. + pub fn is_eoi(self) -> bool { + self.as_eoi().is_some() + } + + /// Returns true when this unit corresponds to an ASCII word byte. + /// + /// This always returns false when this unit represents an "end of input" + /// sentinel. + pub fn is_word_byte(self) -> bool { + self.as_u8().map_or(false, crate::util::utf8::is_word_byte) + } +} + +impl core::fmt::Debug for Unit { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + match self.0 { + UnitKind::U8(b) => write!(f, "{:?}", DebugByte(b)), + UnitKind::EOI(_) => write!(f, "EOI"), + } + } +} + +/// A representation of byte oriented equivalence classes. +/// +/// This is used in a DFA to reduce the size of the transition table. This can +/// have a particularly large impact not only on the total size of a dense DFA, +/// but also on compile times. +/// +/// The essential idea here is that the alphabet of a DFA is shrunk from the +/// usual 256 distinct byte values down to a set of equivalence classes. The +/// guarantee you get is that any byte belonging to the same equivalence class +/// can be treated as if it were any other byte in the same class, and the +/// result of a search wouldn't change. +/// +/// # Example +/// +/// This example shows how to get byte classes from an +/// [`NFA`](crate::nfa::thompson::NFA) and ask for the class of various bytes. +/// +/// ``` +/// use regex_automata::nfa::thompson::NFA; +/// +/// let nfa = NFA::new("[a-z]+")?; +/// let classes = nfa.byte_classes(); +/// // 'a' and 'z' are in the same class for this regex. +/// assert_eq!(classes.get(b'a'), classes.get(b'z')); +/// // But 'a' and 'A' are not. +/// assert_ne!(classes.get(b'a'), classes.get(b'A')); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Copy)] +pub struct ByteClasses([u8; 256]); + +impl ByteClasses { + /// Creates a new set of equivalence classes where all bytes are mapped to + /// the same class. + #[inline] + pub fn empty() -> ByteClasses { + ByteClasses([0; 256]) + } + + /// Creates a new set of equivalence classes where each byte belongs to + /// its own equivalence class. + #[inline] + pub fn singletons() -> ByteClasses { + let mut classes = ByteClasses::empty(); + for b in 0..=255 { + classes.set(b, b); + } + classes + } + + /// Deserializes a byte class map from the given slice. If the slice is of + /// insufficient length or otherwise contains an impossible mapping, then + /// an error is returned. Upon success, the number of bytes read along with + /// the map are returned. The number of bytes read is always a multiple of + /// 8. + pub(crate) fn from_bytes( + slice: &[u8], + ) -> Result<(ByteClasses, usize), DeserializeError> { + wire::check_slice_len(slice, 256, "byte class map")?; + let mut classes = ByteClasses::empty(); + for (b, &class) in slice[..256].iter().enumerate() { + classes.set(u8::try_from(b).unwrap(), class); + } + // We specifically don't use 'classes.iter()' here because that + // iterator depends on 'classes.alphabet_len()' being correct. But that + // is precisely the thing we're trying to verify below! + for &b in classes.0.iter() { + if usize::from(b) >= classes.alphabet_len() { + return Err(DeserializeError::generic( + "found equivalence class greater than alphabet len", + )); + } + } + Ok((classes, 256)) + } + + /// Writes this byte class map to the given byte buffer. if the given + /// buffer is too small, then an error is returned. Upon success, the total + /// number of bytes written is returned. The number of bytes written is + /// guaranteed to be a multiple of 8. + pub(crate) fn write_to( + &self, + mut dst: &mut [u8], + ) -> Result { + let nwrite = self.write_to_len(); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small("byte class map")); + } + for b in 0..=255 { + dst[0] = self.get(b); + dst = &mut dst[1..]; + } + Ok(nwrite) + } + + /// Returns the total number of bytes written by `write_to`. + pub(crate) fn write_to_len(&self) -> usize { + 256 + } + + /// Set the equivalence class for the given byte. + #[inline] + pub fn set(&mut self, byte: u8, class: u8) { + self.0[usize::from(byte)] = class; + } + + /// Get the equivalence class for the given byte. + #[inline] + pub fn get(&self, byte: u8) -> u8 { + self.0[usize::from(byte)] + } + + /// Get the equivalence class for the given haystack unit and return the + /// class as a `usize`. + #[inline] + pub fn get_by_unit(&self, unit: Unit) -> usize { + match unit.0 { + UnitKind::U8(b) => usize::from(self.get(b)), + UnitKind::EOI(b) => usize::from(b), + } + } + + /// Create a unit that represents the "end of input" sentinel based on the + /// number of equivalence classes. + #[inline] + pub fn eoi(&self) -> Unit { + // The alphabet length already includes the EOI sentinel, hence why + // we subtract 1. + Unit::eoi(self.alphabet_len().checked_sub(1).unwrap()) + } + + /// Return the total number of elements in the alphabet represented by + /// these equivalence classes. Equivalently, this returns the total number + /// of equivalence classes. + #[inline] + pub fn alphabet_len(&self) -> usize { + // Add one since the number of equivalence classes is one bigger than + // the last one. But add another to account for the final EOI class + // that isn't explicitly represented. + usize::from(self.0[255]) + 1 + 1 + } + + /// Returns the stride, as a base-2 exponent, required for these + /// equivalence classes. + /// + /// The stride is always the smallest power of 2 that is greater than or + /// equal to the alphabet length, and the `stride2` returned here is the + /// exponent applied to `2` to get the smallest power. This is done so that + /// converting between premultiplied state IDs and indices can be done with + /// shifts alone, which is much faster than integer division. + #[inline] + pub fn stride2(&self) -> usize { + let zeros = self.alphabet_len().next_power_of_two().trailing_zeros(); + usize::try_from(zeros).unwrap() + } + + /// Returns true if and only if every byte in this class maps to its own + /// equivalence class. Equivalently, there are 257 equivalence classes + /// and each class contains either exactly one byte or corresponds to the + /// singleton class containing the "end of input" sentinel. + #[inline] + pub fn is_singleton(&self) -> bool { + self.alphabet_len() == 257 + } + + /// Returns an iterator over all equivalence classes in this set. + #[inline] + pub fn iter(&self) -> ByteClassIter<'_> { + ByteClassIter { classes: self, i: 0 } + } + + /// Returns an iterator over a sequence of representative bytes from each + /// equivalence class within the range of bytes given. + /// + /// When the given range is unbounded on both sides, the iterator yields + /// exactly N items, where N is equivalent to the number of equivalence + /// classes. Each item is an arbitrary byte drawn from each equivalence + /// class. + /// + /// This is useful when one is determinizing an NFA and the NFA's alphabet + /// hasn't been converted to equivalence classes. Picking an arbitrary byte + /// from each equivalence class then permits a full exploration of the NFA + /// instead of using every possible byte value and thus potentially saves + /// quite a lot of redundant work. + /// + /// # Example + /// + /// This shows an example of what a complete sequence of representatives + /// might look like from a real example. + /// + /// ``` + /// use regex_automata::{nfa::thompson::NFA, util::alphabet::Unit}; + /// + /// let nfa = NFA::new("[a-z]+")?; + /// let classes = nfa.byte_classes(); + /// let reps: Vec = classes.representatives(..).collect(); + /// // Note that the specific byte values yielded are not guaranteed! + /// let expected = vec![ + /// Unit::u8(b'\x00'), + /// Unit::u8(b'a'), + /// Unit::u8(b'{'), + /// Unit::eoi(3), + /// ]; + /// assert_eq!(expected, reps); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Note though, that you can ask for an arbitrary range of bytes, and only + /// representatives for that range will be returned: + /// + /// ``` + /// use regex_automata::{nfa::thompson::NFA, util::alphabet::Unit}; + /// + /// let nfa = NFA::new("[a-z]+")?; + /// let classes = nfa.byte_classes(); + /// let reps: Vec = classes.representatives(b'A'..=b'z').collect(); + /// // Note that the specific byte values yielded are not guaranteed! + /// let expected = vec![ + /// Unit::u8(b'A'), + /// Unit::u8(b'a'), + /// ]; + /// assert_eq!(expected, reps); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn representatives>( + &self, + range: R, + ) -> ByteClassRepresentatives<'_> { + use core::ops::Bound; + + let cur_byte = match range.start_bound() { + Bound::Included(&i) => usize::from(i), + Bound::Excluded(&i) => usize::from(i).checked_add(1).unwrap(), + Bound::Unbounded => 0, + }; + let end_byte = match range.end_bound() { + Bound::Included(&i) => { + Some(usize::from(i).checked_add(1).unwrap()) + } + Bound::Excluded(&i) => Some(usize::from(i)), + Bound::Unbounded => None, + }; + assert_ne!( + cur_byte, + usize::MAX, + "start range must be less than usize::MAX", + ); + ByteClassRepresentatives { + classes: self, + cur_byte, + end_byte, + last_class: None, + } + } + + /// Returns an iterator of the bytes in the given equivalence class. + /// + /// This is useful when one needs to know the actual bytes that belong to + /// an equivalence class. For example, conceptually speaking, accelerating + /// a DFA state occurs when a state only has a few outgoing transitions. + /// But in reality, what is required is that there are only a small + /// number of distinct bytes that can lead to an outgoing transition. The + /// difference is that any one transition can correspond to an equivalence + /// class which may contains many bytes. Therefore, DFA state acceleration + /// considers the actual elements in each equivalence class of each + /// outgoing transition. + /// + /// # Example + /// + /// This shows an example of how to get all of the elements in an + /// equivalence class. + /// + /// ``` + /// use regex_automata::{nfa::thompson::NFA, util::alphabet::Unit}; + /// + /// let nfa = NFA::new("[a-z]+")?; + /// let classes = nfa.byte_classes(); + /// let elements: Vec = classes.elements(Unit::u8(1)).collect(); + /// let expected: Vec = (b'a'..=b'z').map(Unit::u8).collect(); + /// assert_eq!(expected, elements); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn elements(&self, class: Unit) -> ByteClassElements<'_> { + ByteClassElements { classes: self, class, byte: 0 } + } + + /// Returns an iterator of byte ranges in the given equivalence class. + /// + /// That is, a sequence of contiguous ranges are returned. Typically, every + /// class maps to a single contiguous range. + fn element_ranges(&self, class: Unit) -> ByteClassElementRanges<'_> { + ByteClassElementRanges { elements: self.elements(class), range: None } + } +} + +impl Default for ByteClasses { + fn default() -> ByteClasses { + ByteClasses::singletons() + } +} + +impl core::fmt::Debug for ByteClasses { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + if self.is_singleton() { + write!(f, "ByteClasses({{singletons}})") + } else { + write!(f, "ByteClasses(")?; + for (i, class) in self.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + write!(f, "{:?} => [", class.as_usize())?; + for (start, end) in self.element_ranges(class) { + if start == end { + write!(f, "{start:?}")?; + } else { + write!(f, "{start:?}-{end:?}")?; + } + } + write!(f, "]")?; + } + write!(f, ")") + } + } +} + +/// An iterator over each equivalence class. +/// +/// The last element in this iterator always corresponds to [`Unit::eoi`]. +/// +/// This is created by the [`ByteClasses::iter`] method. +/// +/// The lifetime `'a` refers to the lifetime of the byte classes that this +/// iterator was created from. +#[derive(Debug)] +pub struct ByteClassIter<'a> { + classes: &'a ByteClasses, + i: usize, +} + +impl<'a> Iterator for ByteClassIter<'a> { + type Item = Unit; + + fn next(&mut self) -> Option { + if self.i + 1 == self.classes.alphabet_len() { + self.i += 1; + Some(self.classes.eoi()) + } else if self.i < self.classes.alphabet_len() { + let class = u8::try_from(self.i).unwrap(); + self.i += 1; + Some(Unit::u8(class)) + } else { + None + } + } +} + +/// An iterator over representative bytes from each equivalence class. +/// +/// This is created by the [`ByteClasses::representatives`] method. +/// +/// The lifetime `'a` refers to the lifetime of the byte classes that this +/// iterator was created from. +#[derive(Debug)] +pub struct ByteClassRepresentatives<'a> { + classes: &'a ByteClasses, + cur_byte: usize, + end_byte: Option, + last_class: Option, +} + +impl<'a> Iterator for ByteClassRepresentatives<'a> { + type Item = Unit; + + fn next(&mut self) -> Option { + while self.cur_byte < self.end_byte.unwrap_or(256) { + let byte = u8::try_from(self.cur_byte).unwrap(); + let class = self.classes.get(byte); + self.cur_byte += 1; + + if self.last_class != Some(class) { + self.last_class = Some(class); + return Some(Unit::u8(byte)); + } + } + if self.cur_byte != usize::MAX && self.end_byte.is_none() { + // Using usize::MAX as a sentinel is OK because we ban usize::MAX + // from appearing as a start bound in iterator construction. But + // why do it this way? Well, we want to return the EOI class + // whenever the end of the given range is unbounded because EOI + // isn't really a "byte" per se, so the only way it should be + // excluded is if there is a bounded end to the range. Therefore, + // when the end is unbounded, we just need to know whether we've + // reported EOI or not. When we do, we set cur_byte to a value it + // can never otherwise be. + self.cur_byte = usize::MAX; + return Some(self.classes.eoi()); + } + None + } +} + +/// An iterator over all elements in an equivalence class. +/// +/// This is created by the [`ByteClasses::elements`] method. +/// +/// The lifetime `'a` refers to the lifetime of the byte classes that this +/// iterator was created from. +#[derive(Debug)] +pub struct ByteClassElements<'a> { + classes: &'a ByteClasses, + class: Unit, + byte: usize, +} + +impl<'a> Iterator for ByteClassElements<'a> { + type Item = Unit; + + fn next(&mut self) -> Option { + while self.byte < 256 { + let byte = u8::try_from(self.byte).unwrap(); + self.byte += 1; + if self.class.is_byte(self.classes.get(byte)) { + return Some(Unit::u8(byte)); + } + } + if self.byte < 257 { + self.byte += 1; + if self.class.is_eoi() { + return Some(Unit::eoi(256)); + } + } + None + } +} + +/// An iterator over all elements in an equivalence class expressed as a +/// sequence of contiguous ranges. +#[derive(Debug)] +struct ByteClassElementRanges<'a> { + elements: ByteClassElements<'a>, + range: Option<(Unit, Unit)>, +} + +impl<'a> Iterator for ByteClassElementRanges<'a> { + type Item = (Unit, Unit); + + fn next(&mut self) -> Option<(Unit, Unit)> { + loop { + let element = match self.elements.next() { + None => return self.range.take(), + Some(element) => element, + }; + match self.range.take() { + None => { + self.range = Some((element, element)); + } + Some((start, end)) => { + if end.as_usize() + 1 != element.as_usize() + || element.is_eoi() + { + self.range = Some((element, element)); + return Some((start, end)); + } + self.range = Some((start, element)); + } + } + } + } +} + +/// A partitioning of bytes into equivalence classes. +/// +/// A byte class set keeps track of an *approximation* of equivalence classes +/// of bytes during NFA construction. That is, every byte in an equivalence +/// class cannot discriminate between a match and a non-match. +/// +/// For example, in the regex `[ab]+`, the bytes `a` and `b` would be in the +/// same equivalence class because it never matters whether an `a` or a `b` is +/// seen, and no combination of `a`s and `b`s in the text can discriminate a +/// match. +/// +/// Note though that this does not compute the minimal set of equivalence +/// classes. For example, in the regex `[ac]+`, both `a` and `c` are in the +/// same equivalence class for the same reason that `a` and `b` are in the +/// same equivalence class in the aforementioned regex. However, in this +/// implementation, `a` and `c` are put into distinct equivalence classes. The +/// reason for this is implementation complexity. In the future, we should +/// endeavor to compute the minimal equivalence classes since they can have a +/// rather large impact on the size of the DFA. (Doing this will likely require +/// rethinking how equivalence classes are computed, including changing the +/// representation here, which is only able to group contiguous bytes into the +/// same equivalence class.) +#[cfg(feature = "alloc")] +#[derive(Clone, Debug)] +pub(crate) struct ByteClassSet(ByteSet); + +#[cfg(feature = "alloc")] +impl Default for ByteClassSet { + fn default() -> ByteClassSet { + ByteClassSet::empty() + } +} + +#[cfg(feature = "alloc")] +impl ByteClassSet { + /// Create a new set of byte classes where all bytes are part of the same + /// equivalence class. + pub(crate) fn empty() -> Self { + ByteClassSet(ByteSet::empty()) + } + + /// Indicate the range of byte given (inclusive) can discriminate a + /// match between it and all other bytes outside of the range. + pub(crate) fn set_range(&mut self, start: u8, end: u8) { + debug_assert!(start <= end); + if start > 0 { + self.0.add(start - 1); + } + self.0.add(end); + } + + /// Add the contiguous ranges in the set given to this byte class set. + pub(crate) fn add_set(&mut self, set: &ByteSet) { + for (start, end) in set.iter_ranges() { + self.set_range(start, end); + } + } + + /// Convert this boolean set to a map that maps all byte values to their + /// corresponding equivalence class. The last mapping indicates the largest + /// equivalence class identifier (which is never bigger than 255). + pub(crate) fn byte_classes(&self) -> ByteClasses { + let mut classes = ByteClasses::empty(); + let mut class = 0u8; + let mut b = 0u8; + loop { + classes.set(b, class); + if b == 255 { + break; + } + if self.0.contains(b) { + class = class.checked_add(1).unwrap(); + } + b = b.checked_add(1).unwrap(); + } + classes + } +} + +/// A simple set of bytes that is reasonably cheap to copy and allocation free. +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] +pub(crate) struct ByteSet { + bits: BitSet, +} + +/// The representation of a byte set. Split out so that we can define a +/// convenient Debug impl for it while keeping "ByteSet" in the output. +#[derive(Clone, Copy, Default, Eq, PartialEq)] +struct BitSet([u128; 2]); + +impl ByteSet { + /// Create an empty set of bytes. + pub(crate) fn empty() -> ByteSet { + ByteSet { bits: BitSet([0; 2]) } + } + + /// Add a byte to this set. + /// + /// If the given byte already belongs to this set, then this is a no-op. + pub(crate) fn add(&mut self, byte: u8) { + let bucket = byte / 128; + let bit = byte % 128; + self.bits.0[usize::from(bucket)] |= 1 << bit; + } + + /// Remove a byte from this set. + /// + /// If the given byte is not in this set, then this is a no-op. + pub(crate) fn remove(&mut self, byte: u8) { + let bucket = byte / 128; + let bit = byte % 128; + self.bits.0[usize::from(bucket)] &= !(1 << bit); + } + + /// Return true if and only if the given byte is in this set. + pub(crate) fn contains(&self, byte: u8) -> bool { + let bucket = byte / 128; + let bit = byte % 128; + self.bits.0[usize::from(bucket)] & (1 << bit) > 0 + } + + /// Return true if and only if the given inclusive range of bytes is in + /// this set. + pub(crate) fn contains_range(&self, start: u8, end: u8) -> bool { + (start..=end).all(|b| self.contains(b)) + } + + /// Returns an iterator over all bytes in this set. + pub(crate) fn iter(&self) -> ByteSetIter<'_> { + ByteSetIter { set: self, b: 0 } + } + + /// Returns an iterator over all contiguous ranges of bytes in this set. + pub(crate) fn iter_ranges(&self) -> ByteSetRangeIter<'_> { + ByteSetRangeIter { set: self, b: 0 } + } + + /// Return true if and only if this set is empty. + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn is_empty(&self) -> bool { + self.bits.0 == [0, 0] + } + + /// Deserializes a byte set from the given slice. If the slice is of + /// incorrect length or is otherwise malformed, then an error is returned. + /// Upon success, the number of bytes read along with the set are returned. + /// The number of bytes read is always a multiple of 8. + pub(crate) fn from_bytes( + slice: &[u8], + ) -> Result<(ByteSet, usize), DeserializeError> { + use core::mem::size_of; + + wire::check_slice_len(slice, 2 * size_of::(), "byte set")?; + let mut nread = 0; + let (low, nr) = wire::try_read_u128(slice, "byte set low bucket")?; + nread += nr; + let (high, nr) = wire::try_read_u128(slice, "byte set high bucket")?; + nread += nr; + Ok((ByteSet { bits: BitSet([low, high]) }, nread)) + } + + /// Writes this byte set to the given byte buffer. If the given buffer is + /// too small, then an error is returned. Upon success, the total number of + /// bytes written is returned. The number of bytes written is guaranteed to + /// be a multiple of 8. + pub(crate) fn write_to( + &self, + dst: &mut [u8], + ) -> Result { + use core::mem::size_of; + + let nwrite = self.write_to_len(); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small("byte set")); + } + let mut nw = 0; + E::write_u128(self.bits.0[0], &mut dst[nw..]); + nw += size_of::(); + E::write_u128(self.bits.0[1], &mut dst[nw..]); + nw += size_of::(); + assert_eq!(nwrite, nw, "expected to write certain number of bytes",); + assert_eq!( + nw % 8, + 0, + "expected to write multiple of 8 bytes for byte set", + ); + Ok(nw) + } + + /// Returns the total number of bytes written by `write_to`. + pub(crate) fn write_to_len(&self) -> usize { + 2 * core::mem::size_of::() + } +} + +impl core::fmt::Debug for BitSet { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let mut fmtd = f.debug_set(); + for b in 0u8..=255 { + if (ByteSet { bits: *self }).contains(b) { + fmtd.entry(&b); + } + } + fmtd.finish() + } +} + +#[derive(Debug)] +pub(crate) struct ByteSetIter<'a> { + set: &'a ByteSet, + b: usize, +} + +impl<'a> Iterator for ByteSetIter<'a> { + type Item = u8; + + fn next(&mut self) -> Option { + while self.b <= 255 { + let b = u8::try_from(self.b).unwrap(); + self.b += 1; + if self.set.contains(b) { + return Some(b); + } + } + None + } +} + +#[derive(Debug)] +pub(crate) struct ByteSetRangeIter<'a> { + set: &'a ByteSet, + b: usize, +} + +impl<'a> Iterator for ByteSetRangeIter<'a> { + type Item = (u8, u8); + + fn next(&mut self) -> Option<(u8, u8)> { + let asu8 = |n: usize| u8::try_from(n).unwrap(); + while self.b <= 255 { + let start = asu8(self.b); + self.b += 1; + if !self.set.contains(start) { + continue; + } + + let mut end = start; + while self.b <= 255 && self.set.contains(asu8(self.b)) { + end = asu8(self.b); + self.b += 1; + } + return Some((start, end)); + } + None + } +} + +#[cfg(all(test, feature = "alloc"))] +mod tests { + use alloc::{vec, vec::Vec}; + + use super::*; + + #[test] + fn byte_classes() { + let mut set = ByteClassSet::empty(); + set.set_range(b'a', b'z'); + + let classes = set.byte_classes(); + assert_eq!(classes.get(0), 0); + assert_eq!(classes.get(1), 0); + assert_eq!(classes.get(2), 0); + assert_eq!(classes.get(b'a' - 1), 0); + assert_eq!(classes.get(b'a'), 1); + assert_eq!(classes.get(b'm'), 1); + assert_eq!(classes.get(b'z'), 1); + assert_eq!(classes.get(b'z' + 1), 2); + assert_eq!(classes.get(254), 2); + assert_eq!(classes.get(255), 2); + + let mut set = ByteClassSet::empty(); + set.set_range(0, 2); + set.set_range(4, 6); + let classes = set.byte_classes(); + assert_eq!(classes.get(0), 0); + assert_eq!(classes.get(1), 0); + assert_eq!(classes.get(2), 0); + assert_eq!(classes.get(3), 1); + assert_eq!(classes.get(4), 2); + assert_eq!(classes.get(5), 2); + assert_eq!(classes.get(6), 2); + assert_eq!(classes.get(7), 3); + assert_eq!(classes.get(255), 3); + } + + #[test] + fn full_byte_classes() { + let mut set = ByteClassSet::empty(); + for b in 0u8..=255 { + set.set_range(b, b); + } + assert_eq!(set.byte_classes().alphabet_len(), 257); + } + + #[test] + fn elements_typical() { + let mut set = ByteClassSet::empty(); + set.set_range(b'b', b'd'); + set.set_range(b'g', b'm'); + set.set_range(b'z', b'z'); + let classes = set.byte_classes(); + // class 0: \x00-a + // class 1: b-d + // class 2: e-f + // class 3: g-m + // class 4: n-y + // class 5: z-z + // class 6: \x7B-\xFF + // class 7: EOI + assert_eq!(classes.alphabet_len(), 8); + + let elements = classes.elements(Unit::u8(0)).collect::>(); + assert_eq!(elements.len(), 98); + assert_eq!(elements[0], Unit::u8(b'\x00')); + assert_eq!(elements[97], Unit::u8(b'a')); + + let elements = classes.elements(Unit::u8(1)).collect::>(); + assert_eq!( + elements, + vec![Unit::u8(b'b'), Unit::u8(b'c'), Unit::u8(b'd')], + ); + + let elements = classes.elements(Unit::u8(2)).collect::>(); + assert_eq!(elements, vec![Unit::u8(b'e'), Unit::u8(b'f')],); + + let elements = classes.elements(Unit::u8(3)).collect::>(); + assert_eq!( + elements, + vec![ + Unit::u8(b'g'), + Unit::u8(b'h'), + Unit::u8(b'i'), + Unit::u8(b'j'), + Unit::u8(b'k'), + Unit::u8(b'l'), + Unit::u8(b'm'), + ], + ); + + let elements = classes.elements(Unit::u8(4)).collect::>(); + assert_eq!(elements.len(), 12); + assert_eq!(elements[0], Unit::u8(b'n')); + assert_eq!(elements[11], Unit::u8(b'y')); + + let elements = classes.elements(Unit::u8(5)).collect::>(); + assert_eq!(elements, vec![Unit::u8(b'z')]); + + let elements = classes.elements(Unit::u8(6)).collect::>(); + assert_eq!(elements.len(), 133); + assert_eq!(elements[0], Unit::u8(b'\x7B')); + assert_eq!(elements[132], Unit::u8(b'\xFF')); + + let elements = classes.elements(Unit::eoi(7)).collect::>(); + assert_eq!(elements, vec![Unit::eoi(256)]); + } + + #[test] + fn elements_singletons() { + let classes = ByteClasses::singletons(); + assert_eq!(classes.alphabet_len(), 257); + + let elements = classes.elements(Unit::u8(b'a')).collect::>(); + assert_eq!(elements, vec![Unit::u8(b'a')]); + + let elements = classes.elements(Unit::eoi(5)).collect::>(); + assert_eq!(elements, vec![Unit::eoi(256)]); + } + + #[test] + fn elements_empty() { + let classes = ByteClasses::empty(); + assert_eq!(classes.alphabet_len(), 2); + + let elements = classes.elements(Unit::u8(0)).collect::>(); + assert_eq!(elements.len(), 256); + assert_eq!(elements[0], Unit::u8(b'\x00')); + assert_eq!(elements[255], Unit::u8(b'\xFF')); + + let elements = classes.elements(Unit::eoi(1)).collect::>(); + assert_eq!(elements, vec![Unit::eoi(256)]); + } + + #[test] + fn representatives() { + let mut set = ByteClassSet::empty(); + set.set_range(b'b', b'd'); + set.set_range(b'g', b'm'); + set.set_range(b'z', b'z'); + let classes = set.byte_classes(); + + let got: Vec = classes.representatives(..).collect(); + let expected = vec![ + Unit::u8(b'\x00'), + Unit::u8(b'b'), + Unit::u8(b'e'), + Unit::u8(b'g'), + Unit::u8(b'n'), + Unit::u8(b'z'), + Unit::u8(b'\x7B'), + Unit::eoi(7), + ]; + assert_eq!(expected, got); + + let got: Vec = classes.representatives(..0).collect(); + assert!(got.is_empty()); + let got: Vec = classes.representatives(1..1).collect(); + assert!(got.is_empty()); + let got: Vec = classes.representatives(255..255).collect(); + assert!(got.is_empty()); + + // A weird case that is the only guaranteed to way to get an iterator + // of just the EOI class by excluding all possible byte values. + let got: Vec = classes + .representatives(( + core::ops::Bound::Excluded(255), + core::ops::Bound::Unbounded, + )) + .collect(); + let expected = vec![Unit::eoi(7)]; + assert_eq!(expected, got); + + let got: Vec = classes.representatives(..=255).collect(); + let expected = vec![ + Unit::u8(b'\x00'), + Unit::u8(b'b'), + Unit::u8(b'e'), + Unit::u8(b'g'), + Unit::u8(b'n'), + Unit::u8(b'z'), + Unit::u8(b'\x7B'), + ]; + assert_eq!(expected, got); + + let got: Vec = classes.representatives(b'b'..=b'd').collect(); + let expected = vec![Unit::u8(b'b')]; + assert_eq!(expected, got); + + let got: Vec = classes.representatives(b'a'..=b'd').collect(); + let expected = vec![Unit::u8(b'a'), Unit::u8(b'b')]; + assert_eq!(expected, got); + + let got: Vec = classes.representatives(b'b'..=b'e').collect(); + let expected = vec![Unit::u8(b'b'), Unit::u8(b'e')]; + assert_eq!(expected, got); + + let got: Vec = classes.representatives(b'A'..=b'Z').collect(); + let expected = vec![Unit::u8(b'A')]; + assert_eq!(expected, got); + + let got: Vec = classes.representatives(b'A'..=b'z').collect(); + let expected = vec![ + Unit::u8(b'A'), + Unit::u8(b'b'), + Unit::u8(b'e'), + Unit::u8(b'g'), + Unit::u8(b'n'), + Unit::u8(b'z'), + ]; + assert_eq!(expected, got); + + let got: Vec = classes.representatives(b'z'..).collect(); + let expected = vec![Unit::u8(b'z'), Unit::u8(b'\x7B'), Unit::eoi(7)]; + assert_eq!(expected, got); + + let got: Vec = classes.representatives(b'z'..=0xFF).collect(); + let expected = vec![Unit::u8(b'z'), Unit::u8(b'\x7B')]; + assert_eq!(expected, got); + } +} diff --git a/vendor/regex-automata/src/util/captures.rs b/vendor/regex-automata/src/util/captures.rs new file mode 100644 index 00000000000000..5376f348d10ebc --- /dev/null +++ b/vendor/regex-automata/src/util/captures.rs @@ -0,0 +1,2551 @@ +/*! +Provides types for dealing with capturing groups. + +Capturing groups refer to sub-patterns of regexes that some regex engines can +report matching offsets for. For example, matching `[a-z]([0-9]+)` against +`a789` would give `a789` as the overall match (for the implicit capturing group +at index `0`) and `789` as the match for the capturing group `([0-9]+)` (an +explicit capturing group at index `1`). + +Not all regex engines can report match offsets for capturing groups. Indeed, +to a first approximation, regex engines that can report capturing group offsets +tend to be quite a bit slower than regex engines that can't. This is because +tracking capturing groups at search time usually requires more "power" that +in turn adds overhead. + +Other regex implementations might call capturing groups "submatches." + +# Overview + +The main types in this module are: + +* [`Captures`] records the capturing group offsets found during a search. It +provides convenience routines for looking up capturing group offsets by either +index or name. +* [`GroupInfo`] records the mapping between capturing groups and "slots," +where the latter are how capturing groups are recorded during a regex search. +This also keeps a mapping from capturing group name to index, and capture +group index to name. A `GroupInfo` is used by `Captures` internally to +provide a convenient API. It is unlikely that you'll use a `GroupInfo` +directly, but for example, if you've compiled an Thompson NFA, then you can use +[`thompson::NFA::group_info`](crate::nfa::thompson::NFA::group_info) to get its +underlying `GroupInfo`. +*/ + +use alloc::{string::String, sync::Arc, vec, vec::Vec}; + +use crate::util::{ + interpolate, + primitives::{ + NonMaxUsize, PatternID, PatternIDError, PatternIDIter, SmallIndex, + }, + search::{Match, Span}, +}; + +/// The span offsets of capturing groups after a match has been found. +/// +/// This type represents the output of regex engines that can report the +/// offsets at which capturing groups matches or "submatches" occur. For +/// example, the [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM). When a match +/// occurs, it will at minimum contain the [`PatternID`] of the pattern that +/// matched. Depending upon how it was constructed, it may also contain the +/// start/end offsets of the entire match of the pattern and the start/end +/// offsets of each capturing group that participated in the match. +/// +/// Values of this type are always created for a specific [`GroupInfo`]. It is +/// unspecified behavior to use a `Captures` value in a search with any regex +/// engine that has a different `GroupInfo` than the one the `Captures` were +/// created with. +/// +/// # Constructors +/// +/// There are three constructors for this type that control what kind of +/// information is available upon a match: +/// +/// * [`Captures::all`]: Will store overall pattern match offsets in addition +/// to the offsets of capturing groups that participated in the match. +/// * [`Captures::matches`]: Will store only the overall pattern +/// match offsets. The offsets of capturing groups (even ones that participated +/// in the match) are not available. +/// * [`Captures::empty`]: Will only store the pattern ID that matched. No +/// match offsets are available at all. +/// +/// If you aren't sure which to choose, then pick the first one. The first one +/// is what convenience routines like, +/// [`PikeVM::create_captures`](crate::nfa::thompson::pikevm::PikeVM::create_captures), +/// will use automatically. +/// +/// The main difference between these choices is performance. Namely, if you +/// ask for _less_ information, then the execution of regex search may be able +/// to run more quickly. +/// +/// # Notes +/// +/// It is worth pointing out that this type is not coupled to any one specific +/// regex engine. Instead, its coupling is with [`GroupInfo`], which is the +/// thing that is responsible for mapping capturing groups to "slot" offsets. +/// Slot offsets are indices into a single sequence of memory at which matching +/// haystack offsets for the corresponding group are written by regex engines. +/// +/// # Example +/// +/// This example shows how to parse a simple date and extract the components of +/// the date via capturing groups: +/// +/// ``` +/// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; +/// +/// let re = PikeVM::new(r"^([0-9]{4})-([0-9]{2})-([0-9]{2})$")?; +/// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); +/// +/// re.captures(&mut cache, "2010-03-14", &mut caps); +/// assert!(caps.is_match()); +/// assert_eq!(Some(Span::from(0..4)), caps.get_group(1)); +/// assert_eq!(Some(Span::from(5..7)), caps.get_group(2)); +/// assert_eq!(Some(Span::from(8..10)), caps.get_group(3)); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// # Example: named capturing groups +/// +/// This example is like the one above, but leverages the ability to name +/// capturing groups in order to make the code a bit clearer: +/// +/// ``` +/// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; +/// +/// let re = PikeVM::new(r"^(?P[0-9]{4})-(?P[0-9]{2})-(?P[0-9]{2})$")?; +/// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); +/// +/// re.captures(&mut cache, "2010-03-14", &mut caps); +/// assert!(caps.is_match()); +/// assert_eq!(Some(Span::from(0..4)), caps.get_group_by_name("y")); +/// assert_eq!(Some(Span::from(5..7)), caps.get_group_by_name("m")); +/// assert_eq!(Some(Span::from(8..10)), caps.get_group_by_name("d")); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone)] +pub struct Captures { + /// The group info that these capture groups are coupled to. This is what + /// gives the "convenience" of the `Captures` API. Namely, it provides the + /// slot mapping and the name|-->index mapping for capture lookups by name. + group_info: GroupInfo, + /// The ID of the pattern that matched. Regex engines must set this to + /// None when no match occurs. + pid: Option, + /// The slot values, i.e., submatch offsets. + /// + /// In theory, the smallest sequence of slots would be something like + /// `max(groups(pattern) for pattern in regex) * 2`, but instead, we use + /// `sum(groups(pattern) for pattern in regex) * 2`. Why? + /// + /// Well, the former could be used in theory, because we don't generally + /// have any overlapping APIs that involve capturing groups. Therefore, + /// there's technically never any need to have slots set for multiple + /// patterns. However, this might change some day, in which case, we would + /// need to have slots available. + /// + /// The other reason is that during the execution of some regex engines, + /// there exists a point in time where multiple slots for different + /// patterns may be written to before knowing which pattern has matched. + /// Therefore, the regex engines themselves, in order to support multiple + /// patterns correctly, must have all slots available. If `Captures` + /// doesn't have all slots available, then regex engines can't write + /// directly into the caller provided `Captures` and must instead write + /// into some other storage and then copy the slots involved in the match + /// at the end of the search. + /// + /// So overall, at least as of the time of writing, it seems like the path + /// of least resistance is to just require allocating all possible slots + /// instead of the conceptual minimum. Another way to justify this is that + /// the most common case is a single pattern, in which case, there is no + /// inefficiency here since the 'max' and 'sum' calculations above are + /// equivalent in that case. + /// + /// N.B. The mapping from group index to slot is maintained by `GroupInfo` + /// and is considered an API guarantee. See `GroupInfo` for more details on + /// that mapping. + /// + /// N.B. `Option` has the same size as a `usize`. + slots: Vec>, +} + +impl Captures { + /// Create new storage for the offsets of all matching capturing groups. + /// + /// This routine provides the most information for matches---namely, the + /// spans of matching capturing groups---but also requires the regex search + /// routines to do the most work. + /// + /// It is unspecified behavior to use the returned `Captures` value in a + /// search with a `GroupInfo` other than the one that is provided to this + /// constructor. + /// + /// # Example + /// + /// This example shows that all capturing groups---but only ones that + /// participated in a match---are available to query after a match has + /// been found: + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::pikevm::PikeVM, + /// util::captures::Captures, + /// Span, Match, + /// }; + /// + /// let re = PikeVM::new( + /// r"^(?:(?P[a-z]+)|(?P[A-Z]+))(?P[0-9]+)$", + /// )?; + /// let mut cache = re.create_cache(); + /// let mut caps = Captures::all(re.get_nfa().group_info().clone()); + /// + /// re.captures(&mut cache, "ABC123", &mut caps); + /// assert!(caps.is_match()); + /// assert_eq!(Some(Match::must(0, 0..6)), caps.get_match()); + /// // The 'lower' group didn't match, so it won't have any offsets. + /// assert_eq!(None, caps.get_group_by_name("lower")); + /// assert_eq!(Some(Span::from(0..3)), caps.get_group_by_name("upper")); + /// assert_eq!(Some(Span::from(3..6)), caps.get_group_by_name("digits")); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn all(group_info: GroupInfo) -> Captures { + let slots = group_info.slot_len(); + Captures { group_info, pid: None, slots: vec![None; slots] } + } + + /// Create new storage for only the full match spans of a pattern. This + /// does not include any capturing group offsets. + /// + /// It is unspecified behavior to use the returned `Captures` value in a + /// search with a `GroupInfo` other than the one that is provided to this + /// constructor. + /// + /// # Example + /// + /// This example shows that only overall match offsets are reported when + /// this constructor is used. Accessing any capturing groups other than + /// the 0th will always return `None`. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::pikevm::PikeVM, + /// util::captures::Captures, + /// Match, + /// }; + /// + /// let re = PikeVM::new( + /// r"^(?:(?P[a-z]+)|(?P[A-Z]+))(?P[0-9]+)$", + /// )?; + /// let mut cache = re.create_cache(); + /// let mut caps = Captures::matches(re.get_nfa().group_info().clone()); + /// + /// re.captures(&mut cache, "ABC123", &mut caps); + /// assert!(caps.is_match()); + /// assert_eq!(Some(Match::must(0, 0..6)), caps.get_match()); + /// // We didn't ask for capturing group offsets, so they aren't available. + /// assert_eq!(None, caps.get_group_by_name("lower")); + /// assert_eq!(None, caps.get_group_by_name("upper")); + /// assert_eq!(None, caps.get_group_by_name("digits")); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn matches(group_info: GroupInfo) -> Captures { + // This is OK because we know there are at least this many slots, + // and GroupInfo construction guarantees that the number of slots fits + // into a usize. + let slots = group_info.pattern_len().checked_mul(2).unwrap(); + Captures { group_info, pid: None, slots: vec![None; slots] } + } + + /// Create new storage for only tracking which pattern matched. No offsets + /// are stored at all. + /// + /// It is unspecified behavior to use the returned `Captures` value in a + /// search with a `GroupInfo` other than the one that is provided to this + /// constructor. + /// + /// # Example + /// + /// This example shows that only the pattern that matched can be accessed + /// from a `Captures` value created via this constructor. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::pikevm::PikeVM, + /// util::captures::Captures, + /// PatternID, + /// }; + /// + /// let re = PikeVM::new_many(&[r"[a-z]+", r"[A-Z]+"])?; + /// let mut cache = re.create_cache(); + /// let mut caps = Captures::empty(re.get_nfa().group_info().clone()); + /// + /// re.captures(&mut cache, "aABCz", &mut caps); + /// assert!(caps.is_match()); + /// assert_eq!(Some(PatternID::must(0)), caps.pattern()); + /// // We didn't ask for any offsets, so they aren't available. + /// assert_eq!(None, caps.get_match()); + /// + /// re.captures(&mut cache, &"aABCz"[1..], &mut caps); + /// assert!(caps.is_match()); + /// assert_eq!(Some(PatternID::must(1)), caps.pattern()); + /// // We didn't ask for any offsets, so they aren't available. + /// assert_eq!(None, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn empty(group_info: GroupInfo) -> Captures { + Captures { group_info, pid: None, slots: vec![] } + } + + /// Returns true if and only if this capturing group represents a match. + /// + /// This is a convenience routine for `caps.pattern().is_some()`. + /// + /// # Example + /// + /// When using the PikeVM (for example), the lightest weight way of + /// detecting whether a match exists is to create capturing groups that + /// only track the ID of the pattern that match (if any): + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::pikevm::PikeVM, + /// util::captures::Captures, + /// }; + /// + /// let re = PikeVM::new(r"[a-z]+")?; + /// let mut cache = re.create_cache(); + /// let mut caps = Captures::empty(re.get_nfa().group_info().clone()); + /// + /// re.captures(&mut cache, "aABCz", &mut caps); + /// assert!(caps.is_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn is_match(&self) -> bool { + self.pid.is_some() + } + + /// Returns the identifier of the pattern that matched when this + /// capturing group represents a match. If no match was found, then this + /// always returns `None`. + /// + /// This returns a pattern ID in precisely the cases in which `is_match` + /// returns `true`. Similarly, the pattern ID returned is always the + /// same pattern ID found in the `Match` returned by `get_match`. + /// + /// # Example + /// + /// When using the PikeVM (for example), the lightest weight way of + /// detecting which pattern matched is to create capturing groups that only + /// track the ID of the pattern that match (if any): + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::pikevm::PikeVM, + /// util::captures::Captures, + /// PatternID, + /// }; + /// + /// let re = PikeVM::new_many(&[r"[a-z]+", r"[A-Z]+"])?; + /// let mut cache = re.create_cache(); + /// let mut caps = Captures::empty(re.get_nfa().group_info().clone()); + /// + /// re.captures(&mut cache, "ABC", &mut caps); + /// assert_eq!(Some(PatternID::must(1)), caps.pattern()); + /// // Recall that offsets are only available when using a non-empty + /// // Captures value. So even though a match occurred, this returns None! + /// assert_eq!(None, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn pattern(&self) -> Option { + self.pid + } + + /// Returns the pattern ID and the span of the match, if one occurred. + /// + /// This always returns `None` when `Captures` was created with + /// [`Captures::empty`], even if a match was found. + /// + /// If this routine returns a non-`None` value, then `is_match` is + /// guaranteed to return `true` and `pattern` is also guaranteed to return + /// a non-`None` value. + /// + /// # Example + /// + /// This example shows how to get the full match from a search: + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; + /// + /// let re = PikeVM::new_many(&[r"[a-z]+", r"[A-Z]+"])?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// re.captures(&mut cache, "ABC", &mut caps); + /// assert_eq!(Some(Match::must(1, 0..3)), caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn get_match(&self) -> Option { + Some(Match::new(self.pattern()?, self.get_group(0)?)) + } + + /// Returns the span of a capturing group match corresponding to the group + /// index given, only if both the overall pattern matched and the capturing + /// group participated in that match. + /// + /// This returns `None` if `index` is invalid. `index` is valid if and only + /// if it's less than [`Captures::group_len`] for the matching pattern. + /// + /// This always returns `None` when `Captures` was created with + /// [`Captures::empty`], even if a match was found. This also always + /// returns `None` for any `index > 0` when `Captures` was created with + /// [`Captures::matches`]. + /// + /// If this routine returns a non-`None` value, then `is_match` is + /// guaranteed to return `true`, `pattern` is guaranteed to return a + /// non-`None` value and `get_match` is guaranteed to return a non-`None` + /// value. + /// + /// By convention, the 0th capture group will always return the same + /// span as the span returned by `get_match`. This is because the 0th + /// capture group always corresponds to the entirety of the pattern's + /// match. (It is similarly always unnamed because it is implicit.) This + /// isn't necessarily true of all regex engines. For example, one can + /// hand-compile a [`thompson::NFA`](crate::nfa::thompson::NFA) via a + /// [`thompson::Builder`](crate::nfa::thompson::Builder), which isn't + /// technically forced to make the 0th capturing group always correspond to + /// the entire match. + /// + /// # Example + /// + /// This example shows how to get the capturing groups, by index, from a + /// match: + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span, Match}; + /// + /// let re = PikeVM::new(r"^(?P\pL+)\s+(?P\pL+)$")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// re.captures(&mut cache, "Bruce Springsteen", &mut caps); + /// assert_eq!(Some(Match::must(0, 0..17)), caps.get_match()); + /// assert_eq!(Some(Span::from(0..5)), caps.get_group(1)); + /// assert_eq!(Some(Span::from(6..17)), caps.get_group(2)); + /// // Looking for a non-existent capturing group will return None: + /// assert_eq!(None, caps.get_group(3)); + /// # // literals are too big for 32-bit usize: #1039 + /// # #[cfg(target_pointer_width = "64")] + /// assert_eq!(None, caps.get_group(9944060567225171988)); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn get_group(&self, index: usize) -> Option { + let pid = self.pattern()?; + // There's a little bit of work needed to map captures to slots in the + // fully general case. But in the overwhelming common case of a single + // pattern, we can just do some simple arithmetic. + let (slot_start, slot_end) = if self.group_info().pattern_len() == 1 { + (index.checked_mul(2)?, index.checked_mul(2)?.checked_add(1)?) + } else { + self.group_info().slots(pid, index)? + }; + let start = self.slots.get(slot_start).copied()??; + let end = self.slots.get(slot_end).copied()??; + Some(Span { start: start.get(), end: end.get() }) + } + + /// Returns the span of a capturing group match corresponding to the group + /// name given, only if both the overall pattern matched and the capturing + /// group participated in that match. + /// + /// This returns `None` if `name` does not correspond to a valid capturing + /// group for the pattern that matched. + /// + /// This always returns `None` when `Captures` was created with + /// [`Captures::empty`], even if a match was found. This also always + /// returns `None` for any `index > 0` when `Captures` was created with + /// [`Captures::matches`]. + /// + /// If this routine returns a non-`None` value, then `is_match` is + /// guaranteed to return `true`, `pattern` is guaranteed to return a + /// non-`None` value and `get_match` is guaranteed to return a non-`None` + /// value. + /// + /// # Example + /// + /// This example shows how to get the capturing groups, by name, from a + /// match: + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span, Match}; + /// + /// let re = PikeVM::new(r"^(?P\pL+)\s+(?P\pL+)$")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// re.captures(&mut cache, "Bruce Springsteen", &mut caps); + /// assert_eq!(Some(Match::must(0, 0..17)), caps.get_match()); + /// assert_eq!(Some(Span::from(0..5)), caps.get_group_by_name("first")); + /// assert_eq!(Some(Span::from(6..17)), caps.get_group_by_name("last")); + /// // Looking for a non-existent capturing group will return None: + /// assert_eq!(None, caps.get_group_by_name("middle")); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn get_group_by_name(&self, name: &str) -> Option { + let index = self.group_info().to_index(self.pattern()?, name)?; + self.get_group(index) + } + + /// Returns an iterator of possible spans for every capturing group in the + /// matching pattern. + /// + /// If this `Captures` value does not correspond to a match, then the + /// iterator returned yields no elements. + /// + /// Note that the iterator returned yields elements of type `Option`. + /// A span is present if and only if it corresponds to a capturing group + /// that participated in a match. + /// + /// # Example + /// + /// This example shows how to collect all capturing groups: + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; + /// + /// let re = PikeVM::new( + /// // Matches first/last names, with an optional middle name. + /// r"^(?P\pL+)\s+(?:(?P\pL+)\s+)?(?P\pL+)$", + /// )?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// re.captures(&mut cache, "Harry James Potter", &mut caps); + /// assert!(caps.is_match()); + /// let groups: Vec> = caps.iter().collect(); + /// assert_eq!(groups, vec![ + /// Some(Span::from(0..18)), + /// Some(Span::from(0..5)), + /// Some(Span::from(6..11)), + /// Some(Span::from(12..18)), + /// ]); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// This example uses the same regex as the previous example, but with a + /// haystack that omits the middle name. This results in a capturing group + /// that is present in the elements yielded by the iterator but without a + /// match: + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; + /// + /// let re = PikeVM::new( + /// // Matches first/last names, with an optional middle name. + /// r"^(?P\pL+)\s+(?:(?P\pL+)\s+)?(?P\pL+)$", + /// )?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// re.captures(&mut cache, "Harry Potter", &mut caps); + /// assert!(caps.is_match()); + /// let groups: Vec> = caps.iter().collect(); + /// assert_eq!(groups, vec![ + /// Some(Span::from(0..12)), + /// Some(Span::from(0..5)), + /// None, + /// Some(Span::from(6..12)), + /// ]); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn iter(&self) -> CapturesPatternIter<'_> { + let names = self + .pattern() + .map_or(GroupInfoPatternNames::empty().enumerate(), |pid| { + self.group_info().pattern_names(pid).enumerate() + }); + CapturesPatternIter { caps: self, names } + } + + /// Return the total number of capturing groups for the matching pattern. + /// + /// If this `Captures` value does not correspond to a match, then this + /// always returns `0`. + /// + /// This always returns the same number of elements yielded by + /// [`Captures::iter`]. That is, the number includes capturing groups even + /// if they don't participate in the match. + /// + /// # Example + /// + /// This example shows how to count the total number of capturing groups + /// associated with a pattern. Notice that it includes groups that did not + /// participate in a match (just like `Captures::iter` does). + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::nfa::thompson::pikevm::PikeVM; + /// + /// let re = PikeVM::new( + /// // Matches first/last names, with an optional middle name. + /// r"^(?P\pL+)\s+(?:(?P\pL+)\s+)?(?P\pL+)$", + /// )?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// re.captures(&mut cache, "Harry Potter", &mut caps); + /// assert_eq!(4, caps.group_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn group_len(&self) -> usize { + let pid = match self.pattern() { + None => return 0, + Some(pid) => pid, + }; + self.group_info().group_len(pid) + } + + /// Returns a reference to the underlying group info on which these + /// captures are based. + /// + /// The difference between `GroupInfo` and `Captures` is that the former + /// defines the structure of capturing groups where as the latter is what + /// stores the actual match information. So where as `Captures` only gives + /// you access to the current match, `GroupInfo` lets you query any + /// information about all capturing groups, even ones for patterns that + /// weren't involved in a match. + /// + /// Note that a `GroupInfo` uses reference counting internally, so it may + /// be cloned cheaply. + /// + /// # Example + /// + /// This example shows how to get all capturing group names from the + /// underlying `GroupInfo`. Notice that we don't even need to run a + /// search. + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID}; + /// + /// let re = PikeVM::new_many(&[ + /// r"(?Pa)", + /// r"(a)(b)", + /// r"ab", + /// r"(?Pa)(?Pa)", + /// r"(?Pz)", + /// ])?; + /// let caps = re.create_captures(); + /// + /// let expected = vec![ + /// (PatternID::must(0), 0, None), + /// (PatternID::must(0), 1, Some("foo")), + /// (PatternID::must(1), 0, None), + /// (PatternID::must(1), 1, None), + /// (PatternID::must(1), 2, None), + /// (PatternID::must(2), 0, None), + /// (PatternID::must(3), 0, None), + /// (PatternID::must(3), 1, Some("bar")), + /// (PatternID::must(3), 2, Some("quux")), + /// (PatternID::must(4), 0, None), + /// (PatternID::must(4), 1, Some("foo")), + /// ]; + /// // We could also just use 're.get_nfa().group_info()'. + /// let got: Vec<(PatternID, usize, Option<&str>)> = + /// caps.group_info().all_names().collect(); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn group_info(&self) -> &GroupInfo { + &self.group_info + } + + /// Interpolates the capture references in `replacement` with the + /// corresponding substrings in `haystack` matched by each reference. The + /// interpolated string is returned. + /// + /// See the [`interpolate` module](interpolate) for documentation on the + /// format of the replacement string. + /// + /// # Example + /// + /// This example shows how to use interpolation, and also shows how it + /// can work with multi-pattern regexes. + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID}; + /// + /// let re = PikeVM::new_many(&[ + /// r"(?[0-9]{2})-(?[0-9]{2})-(?[0-9]{4})", + /// r"(?[0-9]{4})-(?[0-9]{2})-(?[0-9]{2})", + /// ])?; + /// let mut cache = re.create_cache(); + /// let mut caps = re.create_captures(); + /// + /// let replacement = "year=$year, month=$month, day=$day"; + /// + /// // This matches the first pattern. + /// let hay = "On 14-03-2010, I became a Tennessee lamb."; + /// re.captures(&mut cache, hay, &mut caps); + /// let result = caps.interpolate_string(hay, replacement); + /// assert_eq!("year=2010, month=03, day=14", result); + /// + /// // And this matches the second pattern. + /// let hay = "On 2010-03-14, I became a Tennessee lamb."; + /// re.captures(&mut cache, hay, &mut caps); + /// let result = caps.interpolate_string(hay, replacement); + /// assert_eq!("year=2010, month=03, day=14", result); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn interpolate_string( + &self, + haystack: &str, + replacement: &str, + ) -> String { + let mut dst = String::new(); + self.interpolate_string_into(haystack, replacement, &mut dst); + dst + } + + /// Interpolates the capture references in `replacement` with the + /// corresponding substrings in `haystack` matched by each reference. The + /// interpolated string is written to `dst`. + /// + /// See the [`interpolate` module](interpolate) for documentation on the + /// format of the replacement string. + /// + /// # Example + /// + /// This example shows how to use interpolation, and also shows how it + /// can work with multi-pattern regexes. + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID}; + /// + /// let re = PikeVM::new_many(&[ + /// r"(?[0-9]{2})-(?[0-9]{2})-(?[0-9]{4})", + /// r"(?[0-9]{4})-(?[0-9]{2})-(?[0-9]{2})", + /// ])?; + /// let mut cache = re.create_cache(); + /// let mut caps = re.create_captures(); + /// + /// let replacement = "year=$year, month=$month, day=$day"; + /// + /// // This matches the first pattern. + /// let hay = "On 14-03-2010, I became a Tennessee lamb."; + /// re.captures(&mut cache, hay, &mut caps); + /// let mut dst = String::new(); + /// caps.interpolate_string_into(hay, replacement, &mut dst); + /// assert_eq!("year=2010, month=03, day=14", dst); + /// + /// // And this matches the second pattern. + /// let hay = "On 2010-03-14, I became a Tennessee lamb."; + /// re.captures(&mut cache, hay, &mut caps); + /// let mut dst = String::new(); + /// caps.interpolate_string_into(hay, replacement, &mut dst); + /// assert_eq!("year=2010, month=03, day=14", dst); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn interpolate_string_into( + &self, + haystack: &str, + replacement: &str, + dst: &mut String, + ) { + interpolate::string( + replacement, + |index, dst| { + let span = match self.get_group(index) { + None => return, + Some(span) => span, + }; + dst.push_str(&haystack[span]); + }, + |name| self.group_info().to_index(self.pattern()?, name), + dst, + ); + } + + /// Interpolates the capture references in `replacement` with the + /// corresponding substrings in `haystack` matched by each reference. The + /// interpolated byte string is returned. + /// + /// See the [`interpolate` module](interpolate) for documentation on the + /// format of the replacement string. + /// + /// # Example + /// + /// This example shows how to use interpolation, and also shows how it + /// can work with multi-pattern regexes. + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID}; + /// + /// let re = PikeVM::new_many(&[ + /// r"(?[0-9]{2})-(?[0-9]{2})-(?[0-9]{4})", + /// r"(?[0-9]{4})-(?[0-9]{2})-(?[0-9]{2})", + /// ])?; + /// let mut cache = re.create_cache(); + /// let mut caps = re.create_captures(); + /// + /// let replacement = b"year=$year, month=$month, day=$day"; + /// + /// // This matches the first pattern. + /// let hay = b"On 14-03-2010, I became a Tennessee lamb."; + /// re.captures(&mut cache, hay, &mut caps); + /// let result = caps.interpolate_bytes(hay, replacement); + /// assert_eq!(&b"year=2010, month=03, day=14"[..], result); + /// + /// // And this matches the second pattern. + /// let hay = b"On 2010-03-14, I became a Tennessee lamb."; + /// re.captures(&mut cache, hay, &mut caps); + /// let result = caps.interpolate_bytes(hay, replacement); + /// assert_eq!(&b"year=2010, month=03, day=14"[..], result); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn interpolate_bytes( + &self, + haystack: &[u8], + replacement: &[u8], + ) -> Vec { + let mut dst = vec![]; + self.interpolate_bytes_into(haystack, replacement, &mut dst); + dst + } + + /// Interpolates the capture references in `replacement` with the + /// corresponding substrings in `haystack` matched by each reference. The + /// interpolated byte string is written to `dst`. + /// + /// See the [`interpolate` module](interpolate) for documentation on the + /// format of the replacement string. + /// + /// # Example + /// + /// This example shows how to use interpolation, and also shows how it + /// can work with multi-pattern regexes. + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID}; + /// + /// let re = PikeVM::new_many(&[ + /// r"(?[0-9]{2})-(?[0-9]{2})-(?[0-9]{4})", + /// r"(?[0-9]{4})-(?[0-9]{2})-(?[0-9]{2})", + /// ])?; + /// let mut cache = re.create_cache(); + /// let mut caps = re.create_captures(); + /// + /// let replacement = b"year=$year, month=$month, day=$day"; + /// + /// // This matches the first pattern. + /// let hay = b"On 14-03-2010, I became a Tennessee lamb."; + /// re.captures(&mut cache, hay, &mut caps); + /// let mut dst = vec![]; + /// caps.interpolate_bytes_into(hay, replacement, &mut dst); + /// assert_eq!(&b"year=2010, month=03, day=14"[..], dst); + /// + /// // And this matches the second pattern. + /// let hay = b"On 2010-03-14, I became a Tennessee lamb."; + /// re.captures(&mut cache, hay, &mut caps); + /// let mut dst = vec![]; + /// caps.interpolate_bytes_into(hay, replacement, &mut dst); + /// assert_eq!(&b"year=2010, month=03, day=14"[..], dst); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn interpolate_bytes_into( + &self, + haystack: &[u8], + replacement: &[u8], + dst: &mut Vec, + ) { + interpolate::bytes( + replacement, + |index, dst| { + let span = match self.get_group(index) { + None => return, + Some(span) => span, + }; + dst.extend_from_slice(&haystack[span]); + }, + |name| self.group_info().to_index(self.pattern()?, name), + dst, + ); + } + + /// This is a convenience routine for extracting the substrings + /// corresponding to matching capture groups in the given `haystack`. The + /// `haystack` should be the same substring used to find the match spans in + /// this `Captures` value. + /// + /// This is identical to [`Captures::extract_bytes`], except it works with + /// `&str` instead of `&[u8]`. + /// + /// # Panics + /// + /// This panics if the number of explicit matching groups in this + /// `Captures` value is less than `N`. This also panics if this `Captures` + /// value does not correspond to a match. + /// + /// Note that this does *not* panic if the number of explicit matching + /// groups is bigger than `N`. In that case, only the first `N` matching + /// groups are extracted. + /// + /// # Example + /// + /// ``` + /// use regex_automata::nfa::thompson::pikevm::PikeVM; + /// + /// let re = PikeVM::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})")?; + /// let mut cache = re.create_cache(); + /// let mut caps = re.create_captures(); + /// + /// let hay = "On 2010-03-14, I became a Tennessee lamb."; + /// re.captures(&mut cache, hay, &mut caps); + /// assert!(caps.is_match()); + /// let (full, [year, month, day]) = caps.extract(hay); + /// assert_eq!("2010-03-14", full); + /// assert_eq!("2010", year); + /// assert_eq!("03", month); + /// assert_eq!("14", day); + /// + /// // We can also ask for fewer than all capture groups. + /// let (full, [year]) = caps.extract(hay); + /// assert_eq!("2010-03-14", full); + /// assert_eq!("2010", year); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn extract<'h, const N: usize>( + &self, + haystack: &'h str, + ) -> (&'h str, [&'h str; N]) { + let mut matched = self.iter().flatten(); + let whole_match = &haystack[matched.next().expect("a match")]; + let group_matches = [0; N].map(|_| { + let sp = matched.next().expect("too few matching groups"); + &haystack[sp] + }); + (whole_match, group_matches) + } + + /// This is a convenience routine for extracting the substrings + /// corresponding to matching capture groups in the given `haystack`. The + /// `haystack` should be the same substring used to find the match spans in + /// this `Captures` value. + /// + /// This is identical to [`Captures::extract`], except it works with + /// `&[u8]` instead of `&str`. + /// + /// # Panics + /// + /// This panics if the number of explicit matching groups in this + /// `Captures` value is less than `N`. This also panics if this `Captures` + /// value does not correspond to a match. + /// + /// Note that this does *not* panic if the number of explicit matching + /// groups is bigger than `N`. In that case, only the first `N` matching + /// groups are extracted. + /// + /// # Example + /// + /// ``` + /// use regex_automata::nfa::thompson::pikevm::PikeVM; + /// + /// let re = PikeVM::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})")?; + /// let mut cache = re.create_cache(); + /// let mut caps = re.create_captures(); + /// + /// let hay = b"On 2010-03-14, I became a Tennessee lamb."; + /// re.captures(&mut cache, hay, &mut caps); + /// assert!(caps.is_match()); + /// let (full, [year, month, day]) = caps.extract_bytes(hay); + /// assert_eq!(b"2010-03-14", full); + /// assert_eq!(b"2010", year); + /// assert_eq!(b"03", month); + /// assert_eq!(b"14", day); + /// + /// // We can also ask for fewer than all capture groups. + /// let (full, [year]) = caps.extract_bytes(hay); + /// assert_eq!(b"2010-03-14", full); + /// assert_eq!(b"2010", year); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn extract_bytes<'h, const N: usize>( + &self, + haystack: &'h [u8], + ) -> (&'h [u8], [&'h [u8]; N]) { + let mut matched = self.iter().flatten(); + let whole_match = &haystack[matched.next().expect("a match")]; + let group_matches = [0; N].map(|_| { + let sp = matched.next().expect("too few matching groups"); + &haystack[sp] + }); + (whole_match, group_matches) + } +} + +/// Lower level "slot" oriented APIs. One does not typically need to use these +/// when executing a search. They are instead mostly intended for folks that +/// are writing their own regex engine while reusing this `Captures` type. +impl Captures { + /// Clear this `Captures` value. + /// + /// After clearing, all slots inside this `Captures` value will be set to + /// `None`. Similarly, any pattern ID that it was previously associated + /// with (for a match) is erased. + /// + /// It is not usually necessary to call this routine. Namely, a `Captures` + /// value only provides high level access to the capturing groups of the + /// pattern that matched, and only low level access to individual slots. + /// Thus, even if slots corresponding to groups that aren't associated + /// with the matching pattern are set, then it won't impact the higher + /// level APIs. Namely, higher level APIs like [`Captures::get_group`] will + /// return `None` if no pattern ID is present, even if there are spans set + /// in the underlying slots. + /// + /// Thus, to "clear" a `Captures` value of a match, it is usually only + /// necessary to call [`Captures::set_pattern`] with `None`. + /// + /// # Example + /// + /// This example shows what happens when a `Captures` value is cleared. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::nfa::thompson::pikevm::PikeVM; + /// + /// let re = PikeVM::new(r"^(?P\pL+)\s+(?P\pL+)$")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// re.captures(&mut cache, "Bruce Springsteen", &mut caps); + /// assert!(caps.is_match()); + /// let slots: Vec> = + /// caps.slots().iter().map(|s| s.map(|x| x.get())).collect(); + /// // Note that the following ordering is considered an API guarantee. + /// assert_eq!(slots, vec![ + /// Some(0), + /// Some(17), + /// Some(0), + /// Some(5), + /// Some(6), + /// Some(17), + /// ]); + /// + /// // Now clear the slots. Everything is gone and it is no longer a match. + /// caps.clear(); + /// assert!(!caps.is_match()); + /// let slots: Vec> = + /// caps.slots().iter().map(|s| s.map(|x| x.get())).collect(); + /// assert_eq!(slots, vec![ + /// None, + /// None, + /// None, + /// None, + /// None, + /// None, + /// ]); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn clear(&mut self) { + self.pid = None; + for slot in self.slots.iter_mut() { + *slot = None; + } + } + + /// Set the pattern on this `Captures` value. + /// + /// When the pattern ID is `None`, then this `Captures` value does not + /// correspond to a match (`is_match` will return `false`). Otherwise, it + /// corresponds to a match. + /// + /// This is useful in search implementations where you might want to + /// initially call `set_pattern(None)` in order to avoid the cost of + /// calling `clear()` if it turns out to not be necessary. + /// + /// # Example + /// + /// This example shows that `set_pattern` merely overwrites the pattern ID. + /// It does not actually change the underlying slot values. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::nfa::thompson::pikevm::PikeVM; + /// + /// let re = PikeVM::new(r"^(?P\pL+)\s+(?P\pL+)$")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// re.captures(&mut cache, "Bruce Springsteen", &mut caps); + /// assert!(caps.is_match()); + /// assert!(caps.pattern().is_some()); + /// let slots: Vec> = + /// caps.slots().iter().map(|s| s.map(|x| x.get())).collect(); + /// // Note that the following ordering is considered an API guarantee. + /// assert_eq!(slots, vec![ + /// Some(0), + /// Some(17), + /// Some(0), + /// Some(5), + /// Some(6), + /// Some(17), + /// ]); + /// + /// // Now set the pattern to None. Note that the slot values remain. + /// caps.set_pattern(None); + /// assert!(!caps.is_match()); + /// assert!(!caps.pattern().is_some()); + /// let slots: Vec> = + /// caps.slots().iter().map(|s| s.map(|x| x.get())).collect(); + /// // Note that the following ordering is considered an API guarantee. + /// assert_eq!(slots, vec![ + /// Some(0), + /// Some(17), + /// Some(0), + /// Some(5), + /// Some(6), + /// Some(17), + /// ]); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn set_pattern(&mut self, pid: Option) { + self.pid = pid; + } + + /// Returns the underlying slots, where each slot stores a single offset. + /// + /// Every matching capturing group generally corresponds to two slots: one + /// slot for the starting position and another for the ending position. + /// Typically, either both are present or neither are. (The weasel word + /// "typically" is used here because it really depends on the regex engine + /// implementation. Every sensible regex engine likely adheres to this + /// invariant, and every regex engine in this crate is sensible.) + /// + /// Generally speaking, callers should prefer to use higher level routines + /// like [`Captures::get_match`] or [`Captures::get_group`]. + /// + /// An important note here is that a regex engine may not reset all of the + /// slots to `None` values when no match occurs, or even when a match of + /// a different pattern occurs. But this depends on how the regex engine + /// implementation deals with slots. + /// + /// # Example + /// + /// This example shows how to get the underlying slots from a regex match. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::pikevm::PikeVM, + /// util::primitives::{PatternID, NonMaxUsize}, + /// }; + /// + /// let re = PikeVM::new_many(&[ + /// r"[a-z]+", + /// r"[0-9]+", + /// ])?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// re.captures(&mut cache, "123", &mut caps); + /// assert_eq!(Some(PatternID::must(1)), caps.pattern()); + /// // Note that the only guarantee we have here is that slots 2 and 3 + /// // are set to correct values. The contents of the first two slots are + /// // unspecified since the 0th pattern did not match. + /// let expected = &[ + /// None, + /// None, + /// NonMaxUsize::new(0), + /// NonMaxUsize::new(3), + /// ]; + /// assert_eq!(expected, caps.slots()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn slots(&self) -> &[Option] { + &self.slots + } + + /// Returns the underlying slots as a mutable slice, where each slot stores + /// a single offset. + /// + /// This tends to be most useful for regex engine implementations for + /// writing offsets for matching capturing groups to slots. + /// + /// See [`Captures::slots`] for more information about slots. + #[inline] + pub fn slots_mut(&mut self) -> &mut [Option] { + &mut self.slots + } +} + +impl core::fmt::Debug for Captures { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let mut dstruct = f.debug_struct("Captures"); + dstruct.field("pid", &self.pid); + if let Some(pid) = self.pid { + dstruct.field("spans", &CapturesDebugMap { pid, caps: self }); + } + dstruct.finish() + } +} + +/// A little helper type to provide a nice map-like debug representation for +/// our capturing group spans. +struct CapturesDebugMap<'a> { + pid: PatternID, + caps: &'a Captures, +} + +impl<'a> core::fmt::Debug for CapturesDebugMap<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + struct Key<'a>(usize, Option<&'a str>); + + impl<'a> core::fmt::Debug for Key<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "{}", self.0)?; + if let Some(name) = self.1 { + write!(f, "/{name:?}")?; + } + Ok(()) + } + } + + let mut map = f.debug_map(); + let names = self.caps.group_info().pattern_names(self.pid); + for (group_index, maybe_name) in names.enumerate() { + let key = Key(group_index, maybe_name); + match self.caps.get_group(group_index) { + None => map.entry(&key, &None::<()>), + Some(span) => map.entry(&key, &span), + }; + } + map.finish() + } +} + +/// An iterator over all capturing groups in a `Captures` value. +/// +/// This iterator includes capturing groups that did not participate in a +/// match. See the [`Captures::iter`] method documentation for more details +/// and examples. +/// +/// The lifetime parameter `'a` refers to the lifetime of the underlying +/// `Captures` value. +#[derive(Clone, Debug)] +pub struct CapturesPatternIter<'a> { + caps: &'a Captures, + names: core::iter::Enumerate>, +} + +impl<'a> Iterator for CapturesPatternIter<'a> { + type Item = Option; + + fn next(&mut self) -> Option> { + let (group_index, _) = self.names.next()?; + Some(self.caps.get_group(group_index)) + } + + fn size_hint(&self) -> (usize, Option) { + self.names.size_hint() + } + + fn count(self) -> usize { + self.names.count() + } +} + +impl<'a> ExactSizeIterator for CapturesPatternIter<'a> {} +impl<'a> core::iter::FusedIterator for CapturesPatternIter<'a> {} + +/// Represents information about capturing groups in a compiled regex. +/// +/// The information encapsulated by this type consists of the following. For +/// each pattern: +/// +/// * A map from every capture group name to its corresponding capture group +/// index. +/// * A map from every capture group index to its corresponding capture group +/// name. +/// * A map from capture group index to its corresponding slot index. A slot +/// refers to one half of a capturing group. That is, a capture slot is either +/// the start or end of a capturing group. A slot is usually the mechanism +/// by which a regex engine records offsets for each capturing group during a +/// search. +/// +/// A `GroupInfo` uses reference counting internally and is thus cheap to +/// clone. +/// +/// # Mapping from capture groups to slots +/// +/// One of the main responsibilities of a `GroupInfo` is to build a mapping +/// from `(PatternID, u32)` (where the `u32` is a capture index) to something +/// called a "slot." As mentioned above, a slot refers to one half of a +/// capturing group. Both combined provide the start and end offsets of +/// a capturing group that participated in a match. +/// +/// **The mapping between group indices and slots is an API guarantee.** That +/// is, the mapping won't change within a semver compatible release. +/// +/// Slots exist primarily because this is a convenient mechanism by which +/// regex engines report group offsets at search time. For example, the +/// [`nfa::thompson::State::Capture`](crate::nfa::thompson::State::Capture) +/// NFA state includes the slot index. When a regex engine transitions through +/// this state, it will likely use the slot index to write the current haystack +/// offset to some region of memory. When a match is found, those slots are +/// then reported to the caller, typically via a convenient abstraction like a +/// [`Captures`] value. +/// +/// Because this crate provides first class support for multi-pattern regexes, +/// and because of some performance related reasons, the mapping between +/// capturing groups and slots is a little complex. However, in the case of a +/// single pattern, the mapping can be described very simply: for all capture +/// group indices `i`, its corresponding slots are at `i * 2` and `i * 2 + 1`. +/// Notice that the pattern ID isn't involved at all here, because it only +/// applies to a single-pattern regex, it is therefore always `0`. +/// +/// In the multi-pattern case, the mapping is a bit more complicated. To talk +/// about it, we must define what we mean by "implicit" vs "explicit" +/// capturing groups: +/// +/// * An **implicit** capturing group refers to the capturing group that is +/// present for every pattern automatically, and corresponds to the overall +/// match of a pattern. Every pattern has precisely one implicit capturing +/// group. It is always unnamed and it always corresponds to the capture group +/// index `0`. +/// * An **explicit** capturing group refers to any capturing group that +/// appears in the concrete syntax of the pattern. (Or, if an NFA was hand +/// built without any concrete syntax, it refers to any capturing group with an +/// index greater than `0`.) +/// +/// Some examples: +/// +/// * `\w+` has one implicit capturing group and zero explicit capturing +/// groups. +/// * `(\w+)` has one implicit group and one explicit group. +/// * `foo(\d+)(?:\pL+)(\d+)` has one implicit group and two explicit groups. +/// +/// Turning back to the slot mapping, we can now state it as follows: +/// +/// * Given a pattern ID `pid`, the slots for its implicit group are always +/// at `pid * 2` and `pid * 2 + 1`. +/// * Given a pattern ID `0`, the slots for its explicit groups start +/// at `group_info.pattern_len() * 2`. +/// * Given a pattern ID `pid > 0`, the slots for its explicit groups start +/// immediately following where the slots for the explicit groups of `pid - 1` +/// end. +/// +/// In particular, while there is a concrete formula one can use to determine +/// where the slots for the implicit group of any pattern are, there is no +/// general formula for determining where the slots for explicit capturing +/// groups are. This is because each pattern can contain a different number +/// of groups. +/// +/// The intended way of getting the slots for a particular capturing group +/// (whether implicit or explicit) is via the [`GroupInfo::slot`] or +/// [`GroupInfo::slots`] method. +/// +/// See below for a concrete example of how capturing groups get mapped to +/// slots. +/// +/// # Example +/// +/// This example shows how to build a new `GroupInfo` and query it for +/// information. +/// +/// ``` +/// use regex_automata::util::{captures::GroupInfo, primitives::PatternID}; +/// +/// let info = GroupInfo::new(vec![ +/// vec![None, Some("foo")], +/// vec![None], +/// vec![None, None, None, Some("bar"), None], +/// vec![None, None, Some("foo")], +/// ])?; +/// // The number of patterns being tracked. +/// assert_eq!(4, info.pattern_len()); +/// // We can query the number of groups for any pattern. +/// assert_eq!(2, info.group_len(PatternID::must(0))); +/// assert_eq!(1, info.group_len(PatternID::must(1))); +/// assert_eq!(5, info.group_len(PatternID::must(2))); +/// assert_eq!(3, info.group_len(PatternID::must(3))); +/// // An invalid pattern always has zero groups. +/// assert_eq!(0, info.group_len(PatternID::must(999))); +/// // 2 slots per group +/// assert_eq!(22, info.slot_len()); +/// +/// // We can map a group index for a particular pattern to its name, if +/// // one exists. +/// assert_eq!(Some("foo"), info.to_name(PatternID::must(3), 2)); +/// assert_eq!(None, info.to_name(PatternID::must(2), 4)); +/// // Or map a name to its group index. +/// assert_eq!(Some(1), info.to_index(PatternID::must(0), "foo")); +/// assert_eq!(Some(2), info.to_index(PatternID::must(3), "foo")); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// # Example: mapping from capture groups to slots +/// +/// This example shows the specific mapping from capture group indices for +/// each pattern to their corresponding slots. The slot values shown in this +/// example are considered an API guarantee. +/// +/// ``` +/// use regex_automata::util::{captures::GroupInfo, primitives::PatternID}; +/// +/// let info = GroupInfo::new(vec![ +/// vec![None, Some("foo")], +/// vec![None], +/// vec![None, None, None, Some("bar"), None], +/// vec![None, None, Some("foo")], +/// ])?; +/// +/// // We first show the slots for each pattern's implicit group. +/// assert_eq!(Some((0, 1)), info.slots(PatternID::must(0), 0)); +/// assert_eq!(Some((2, 3)), info.slots(PatternID::must(1), 0)); +/// assert_eq!(Some((4, 5)), info.slots(PatternID::must(2), 0)); +/// assert_eq!(Some((6, 7)), info.slots(PatternID::must(3), 0)); +/// +/// // And now we show the slots for each pattern's explicit group. +/// assert_eq!(Some((8, 9)), info.slots(PatternID::must(0), 1)); +/// assert_eq!(Some((10, 11)), info.slots(PatternID::must(2), 1)); +/// assert_eq!(Some((12, 13)), info.slots(PatternID::must(2), 2)); +/// assert_eq!(Some((14, 15)), info.slots(PatternID::must(2), 3)); +/// assert_eq!(Some((16, 17)), info.slots(PatternID::must(2), 4)); +/// assert_eq!(Some((18, 19)), info.slots(PatternID::must(3), 1)); +/// assert_eq!(Some((20, 21)), info.slots(PatternID::must(3), 2)); +/// +/// // Asking for the slots for an invalid pattern ID or even for an invalid +/// // group index for a specific pattern will return None. So for example, +/// // you're guaranteed to not get the slots for a different pattern than the +/// // one requested. +/// assert_eq!(None, info.slots(PatternID::must(5), 0)); +/// assert_eq!(None, info.slots(PatternID::must(1), 1)); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug, Default)] +pub struct GroupInfo(Arc); + +impl GroupInfo { + /// Creates a new group info from a sequence of patterns, where each + /// sequence of patterns yields a sequence of possible group names. The + /// index of each pattern in the sequence corresponds to its `PatternID`, + /// and the index of each group in each pattern's sequence corresponds to + /// its corresponding group index. + /// + /// While this constructor is very generic and therefore perhaps hard to + /// chew on, an example of a valid concrete type that can be passed to + /// this constructor is `Vec>>`. The outer `Vec` + /// corresponds to the patterns, i.e., one `Vec>` per + /// pattern. The inner `Vec` corresponds to the capturing groups for + /// each pattern. The `Option` corresponds to the name of the + /// capturing group, if present. + /// + /// It is legal to pass an empty iterator to this constructor. It will + /// return an empty group info with zero slots. An empty group info is + /// useful for cases where you have no patterns or for cases where slots + /// aren't being used at all (e.g., for most DFAs in this crate). + /// + /// # Errors + /// + /// This constructor returns an error if the given capturing groups are + /// invalid in some way. Those reasons include, but are not necessarily + /// limited to: + /// + /// * Too many patterns (i.e., `PatternID` would overflow). + /// * Too many capturing groups (e.g., `u32` would overflow). + /// * A pattern is given that has no capturing groups. (All patterns must + /// have at least an implicit capturing group at index `0`.) + /// * The capturing group at index `0` has a name. It must be unnamed. + /// * There are duplicate capturing group names within the same pattern. + /// (Multiple capturing groups with the same name may exist, but they + /// must be in different patterns.) + /// + /// An example below shows how to trigger some of the above error + /// conditions. + /// + /// # Example + /// + /// This example shows how to build a new `GroupInfo` and query it for + /// information. + /// + /// ``` + /// use regex_automata::util::captures::GroupInfo; + /// + /// let info = GroupInfo::new(vec![ + /// vec![None, Some("foo")], + /// vec![None], + /// vec![None, None, None, Some("bar"), None], + /// vec![None, None, Some("foo")], + /// ])?; + /// // The number of patterns being tracked. + /// assert_eq!(4, info.pattern_len()); + /// // 2 slots per group + /// assert_eq!(22, info.slot_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: empty `GroupInfo` + /// + /// This example shows how to build a new `GroupInfo` and query it for + /// information. + /// + /// ``` + /// use regex_automata::util::captures::GroupInfo; + /// + /// let info = GroupInfo::empty(); + /// // Everything is zero. + /// assert_eq!(0, info.pattern_len()); + /// assert_eq!(0, info.slot_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// # Example: error conditions + /// + /// This example shows how to provoke some of the ways in which building + /// a `GroupInfo` can fail. + /// + /// ``` + /// use regex_automata::util::captures::GroupInfo; + /// + /// // Either the group info is empty, or all patterns must have at least + /// // one capturing group. + /// assert!(GroupInfo::new(vec![ + /// vec![None, Some("a")], // ok + /// vec![None], // ok + /// vec![], // not ok + /// ]).is_err()); + /// // Note that building an empty group info is OK. + /// assert!(GroupInfo::new(Vec::>>::new()).is_ok()); + /// + /// // The first group in each pattern must correspond to an implicit + /// // anonymous group. i.e., One that is not named. By convention, this + /// // group corresponds to the overall match of a regex. Every other group + /// // in a pattern is explicit and optional. + /// assert!(GroupInfo::new(vec![vec![Some("foo")]]).is_err()); + /// + /// // There must not be duplicate group names within the same pattern. + /// assert!(GroupInfo::new(vec![ + /// vec![None, Some("foo"), Some("foo")], + /// ]).is_err()); + /// // But duplicate names across distinct patterns is OK. + /// assert!(GroupInfo::new(vec![ + /// vec![None, Some("foo")], + /// vec![None, Some("foo")], + /// ]).is_ok()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// There are other ways for building a `GroupInfo` to fail but are + /// difficult to show. For example, if the number of patterns given would + /// overflow `PatternID`. + pub fn new(pattern_groups: P) -> Result + where + P: IntoIterator, + G: IntoIterator>, + N: AsRef, + { + let mut group_info = GroupInfoInner { + slot_ranges: vec![], + name_to_index: vec![], + index_to_name: vec![], + memory_extra: 0, + }; + for (pattern_index, groups) in pattern_groups.into_iter().enumerate() { + // If we can't convert the pattern index to an ID, then the caller + // tried to build capture info for too many patterns. + let pid = PatternID::new(pattern_index) + .map_err(GroupInfoError::too_many_patterns)?; + + let mut groups_iter = groups.into_iter().enumerate(); + match groups_iter.next() { + None => return Err(GroupInfoError::missing_groups(pid)), + Some((_, Some(_))) => { + return Err(GroupInfoError::first_must_be_unnamed(pid)) + } + Some((_, None)) => {} + } + group_info.add_first_group(pid); + // Now iterate over the rest, which correspond to all of the + // (conventionally) explicit capture groups in a regex pattern. + for (group_index, maybe_name) in groups_iter { + // Just like for patterns, if the group index can't be + // converted to a "small" index, then the caller has given too + // many groups for a particular pattern. + let group = SmallIndex::new(group_index).map_err(|_| { + GroupInfoError::too_many_groups(pid, group_index) + })?; + group_info.add_explicit_group(pid, group, maybe_name)?; + } + } + group_info.fixup_slot_ranges()?; + group_info.slot_ranges.shrink_to_fit(); + group_info.name_to_index.shrink_to_fit(); + group_info.index_to_name.shrink_to_fit(); + Ok(GroupInfo(Arc::new(group_info))) + } + + /// This creates an empty `GroupInfo`. + /// + /// This is a convenience routine for calling `GroupInfo::new` with an + /// iterator that yields no elements. + /// + /// # Example + /// + /// This example shows how to build a new empty `GroupInfo` and query it + /// for information. + /// + /// ``` + /// use regex_automata::util::captures::GroupInfo; + /// + /// let info = GroupInfo::empty(); + /// // Everything is zero. + /// assert_eq!(0, info.pattern_len()); + /// assert_eq!(0, info.all_group_len()); + /// assert_eq!(0, info.slot_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn empty() -> GroupInfo { + GroupInfo::new(core::iter::empty::<[Option<&str>; 0]>()) + .expect("empty group info is always valid") + } + + /// Return the capture group index corresponding to the given name in the + /// given pattern. If no such capture group name exists in the given + /// pattern, then this returns `None`. + /// + /// If the given pattern ID is invalid, then this returns `None`. + /// + /// This also returns `None` for all inputs if these captures are empty + /// (e.g., built from an empty [`GroupInfo`]). To check whether captures + /// are present for a specific pattern, use [`GroupInfo::group_len`]. + /// + /// # Example + /// + /// This example shows how to find the capture index for the given pattern + /// and group name. + /// + /// Remember that capture indices are relative to the pattern, such that + /// the same capture index value may refer to different capturing groups + /// for distinct patterns. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{nfa::thompson::NFA, PatternID}; + /// + /// let (pid0, pid1) = (PatternID::must(0), PatternID::must(1)); + /// + /// let nfa = NFA::new_many(&[ + /// r"a(?P\w+)z(?P\s+)", + /// r"a(?P\d+)z", + /// ])?; + /// let groups = nfa.group_info(); + /// assert_eq!(Some(2), groups.to_index(pid0, "foo")); + /// // Recall that capture index 0 is always unnamed and refers to the + /// // entire pattern. So the first capturing group present in the pattern + /// // itself always starts at index 1. + /// assert_eq!(Some(1), groups.to_index(pid1, "foo")); + /// + /// // And if a name does not exist for a particular pattern, None is + /// // returned. + /// assert!(groups.to_index(pid0, "quux").is_some()); + /// assert!(groups.to_index(pid1, "quux").is_none()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn to_index(&self, pid: PatternID, name: &str) -> Option { + let indices = self.0.name_to_index.get(pid.as_usize())?; + indices.get(name).cloned().map(|i| i.as_usize()) + } + + /// Return the capture name for the given index and given pattern. If the + /// corresponding group does not have a name, then this returns `None`. + /// + /// If the pattern ID is invalid, then this returns `None`. + /// + /// If the group index is invalid for the given pattern, then this returns + /// `None`. A group `index` is valid for a pattern `pid` in an `nfa` if and + /// only if `index < nfa.pattern_capture_len(pid)`. + /// + /// This also returns `None` for all inputs if these captures are empty + /// (e.g., built from an empty [`GroupInfo`]). To check whether captures + /// are present for a specific pattern, use [`GroupInfo::group_len`]. + /// + /// # Example + /// + /// This example shows how to find the capture group name for the given + /// pattern and group index. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{nfa::thompson::NFA, PatternID}; + /// + /// let (pid0, pid1) = (PatternID::must(0), PatternID::must(1)); + /// + /// let nfa = NFA::new_many(&[ + /// r"a(?P\w+)z(\s+)x(\d+)", + /// r"a(\d+)z(?P\s+)", + /// ])?; + /// let groups = nfa.group_info(); + /// assert_eq!(None, groups.to_name(pid0, 0)); + /// assert_eq!(Some("foo"), groups.to_name(pid0, 1)); + /// assert_eq!(None, groups.to_name(pid0, 2)); + /// assert_eq!(None, groups.to_name(pid0, 3)); + /// + /// assert_eq!(None, groups.to_name(pid1, 0)); + /// assert_eq!(None, groups.to_name(pid1, 1)); + /// assert_eq!(Some("foo"), groups.to_name(pid1, 2)); + /// // '3' is not a valid capture index for the second pattern. + /// assert_eq!(None, groups.to_name(pid1, 3)); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn to_name(&self, pid: PatternID, group_index: usize) -> Option<&str> { + let pattern_names = self.0.index_to_name.get(pid.as_usize())?; + pattern_names.get(group_index)?.as_deref() + } + + /// Return an iterator of all capture groups and their names (if present) + /// for a particular pattern. + /// + /// If the given pattern ID is invalid or if this `GroupInfo` is empty, + /// then the iterator yields no elements. + /// + /// The number of elements yielded by this iterator is always equal to + /// the result of calling [`GroupInfo::group_len`] with the same + /// `PatternID`. + /// + /// # Example + /// + /// This example shows how to get a list of all capture group names for + /// a particular pattern. + /// + /// ``` + /// use regex_automata::{nfa::thompson::NFA, PatternID}; + /// + /// let nfa = NFA::new(r"(a)(?Pb)(c)(d)(?Pe)")?; + /// // The first is the implicit group that is always unnamed. The next + /// // 5 groups are the explicit groups found in the concrete syntax above. + /// let expected = vec![None, None, Some("foo"), None, None, Some("bar")]; + /// let got: Vec> = + /// nfa.group_info().pattern_names(PatternID::ZERO).collect(); + /// assert_eq!(expected, got); + /// + /// // Using an invalid pattern ID will result in nothing yielded. + /// let got = nfa.group_info().pattern_names(PatternID::must(999)).count(); + /// assert_eq!(0, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn pattern_names(&self, pid: PatternID) -> GroupInfoPatternNames<'_> { + GroupInfoPatternNames { + it: self + .0 + .index_to_name + .get(pid.as_usize()) + .map(|indices| indices.iter()) + .unwrap_or([].iter()), + } + } + + /// Return an iterator of all capture groups for all patterns supported by + /// this `GroupInfo`. Each item yielded is a triple of the group's pattern + /// ID, index in the pattern and the group's name, if present. + /// + /// # Example + /// + /// This example shows how to get a list of all capture groups found in + /// one NFA, potentially spanning multiple patterns. + /// + /// ``` + /// use regex_automata::{nfa::thompson::NFA, PatternID}; + /// + /// let nfa = NFA::new_many(&[ + /// r"(?Pa)", + /// r"a", + /// r"(a)", + /// ])?; + /// let expected = vec![ + /// (PatternID::must(0), 0, None), + /// (PatternID::must(0), 1, Some("foo")), + /// (PatternID::must(1), 0, None), + /// (PatternID::must(2), 0, None), + /// (PatternID::must(2), 1, None), + /// ]; + /// let got: Vec<(PatternID, usize, Option<&str>)> = + /// nfa.group_info().all_names().collect(); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// Unlike other capturing group related routines, this routine doesn't + /// panic even if captures aren't enabled on this NFA: + /// + /// ``` + /// use regex_automata::nfa::thompson::{NFA, WhichCaptures}; + /// + /// let nfa = NFA::compiler() + /// .configure(NFA::config().which_captures(WhichCaptures::None)) + /// .build_many(&[ + /// r"(?Pa)", + /// r"a", + /// r"(a)", + /// ])?; + /// // When captures aren't enabled, there's nothing to return. + /// assert_eq!(0, nfa.group_info().all_names().count()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn all_names(&self) -> GroupInfoAllNames<'_> { + GroupInfoAllNames { + group_info: self, + pids: PatternID::iter(self.pattern_len()), + current_pid: None, + names: None, + } + } + + /// Returns the starting and ending slot corresponding to the given + /// capturing group for the given pattern. The ending slot is always one + /// more than the starting slot returned. + /// + /// Note that this is like [`GroupInfo::slot`], except that it also returns + /// the ending slot value for convenience. + /// + /// If either the pattern ID or the capture index is invalid, then this + /// returns None. + /// + /// # Example + /// + /// This example shows that the starting slots for the first capturing + /// group of each pattern are distinct. + /// + /// ``` + /// use regex_automata::{nfa::thompson::NFA, PatternID}; + /// + /// let nfa = NFA::new_many(&["a", "b"])?; + /// assert_ne!( + /// nfa.group_info().slots(PatternID::must(0), 0), + /// nfa.group_info().slots(PatternID::must(1), 0), + /// ); + /// + /// // Also, the start and end slot values are never equivalent. + /// let (start, end) = nfa.group_info().slots(PatternID::ZERO, 0).unwrap(); + /// assert_ne!(start, end); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn slots( + &self, + pid: PatternID, + group_index: usize, + ) -> Option<(usize, usize)> { + // Since 'slot' only even returns valid starting slots, we know that + // there must also be an end slot and that end slot is always one more + // than the start slot. + self.slot(pid, group_index).map(|start| (start, start + 1)) + } + + /// Returns the starting slot corresponding to the given capturing group + /// for the given pattern. The ending slot is always one more than the + /// value returned. + /// + /// If either the pattern ID or the capture index is invalid, then this + /// returns None. + /// + /// # Example + /// + /// This example shows that the starting slots for the first capturing + /// group of each pattern are distinct. + /// + /// ``` + /// use regex_automata::{nfa::thompson::NFA, PatternID}; + /// + /// let nfa = NFA::new_many(&["a", "b"])?; + /// assert_ne!( + /// nfa.group_info().slot(PatternID::must(0), 0), + /// nfa.group_info().slot(PatternID::must(1), 0), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn slot(&self, pid: PatternID, group_index: usize) -> Option { + if group_index >= self.group_len(pid) { + return None; + } + // At this point, we know that 'pid' refers to a real pattern and that + // 'group_index' refers to a real group. We therefore also know that + // the pattern and group can be combined to return a correct slot. + // That's why we don't need to use checked arithmetic below. + if group_index == 0 { + Some(pid.as_usize() * 2) + } else { + // As above, we don't need to check that our slot is less than the + // end of our range since we already know the group index is a + // valid index for the given pattern. + let (start, _) = self.0.slot_ranges[pid]; + Some(start.as_usize() + ((group_index - 1) * 2)) + } + } + + /// Returns the total number of patterns in this `GroupInfo`. + /// + /// This may return zero if the `GroupInfo` was constructed with no + /// patterns. + /// + /// This is guaranteed to be no bigger than [`PatternID::LIMIT`] because + /// `GroupInfo` construction will fail if too many patterns are added. + /// + /// # Example + /// + /// ``` + /// use regex_automata::nfa::thompson::NFA; + /// + /// let nfa = NFA::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; + /// assert_eq!(3, nfa.group_info().pattern_len()); + /// + /// let nfa = NFA::never_match(); + /// assert_eq!(0, nfa.group_info().pattern_len()); + /// + /// let nfa = NFA::always_match(); + /// assert_eq!(1, nfa.group_info().pattern_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn pattern_len(&self) -> usize { + self.0.pattern_len() + } + + /// Return the number of capture groups in a pattern. + /// + /// If the pattern ID is invalid, then this returns `0`. + /// + /// # Example + /// + /// This example shows how the values returned by this routine may vary + /// for different patterns and NFA configurations. + /// + /// ``` + /// use regex_automata::{nfa::thompson::{NFA, WhichCaptures}, PatternID}; + /// + /// let nfa = NFA::new(r"(a)(b)(c)")?; + /// // There are 3 explicit groups in the pattern's concrete syntax and + /// // 1 unnamed and implicit group spanning the entire pattern. + /// assert_eq!(4, nfa.group_info().group_len(PatternID::ZERO)); + /// + /// let nfa = NFA::new(r"abc")?; + /// // There is just the unnamed implicit group. + /// assert_eq!(1, nfa.group_info().group_len(PatternID::ZERO)); + /// + /// let nfa = NFA::compiler() + /// .configure(NFA::config().which_captures(WhichCaptures::None)) + /// .build(r"abc")?; + /// // We disabled capturing groups, so there are none. + /// assert_eq!(0, nfa.group_info().group_len(PatternID::ZERO)); + /// + /// let nfa = NFA::compiler() + /// .configure(NFA::config().which_captures(WhichCaptures::None)) + /// .build(r"(a)(b)(c)")?; + /// // We disabled capturing groups, so there are none, even if there are + /// // explicit groups in the concrete syntax. + /// assert_eq!(0, nfa.group_info().group_len(PatternID::ZERO)); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn group_len(&self, pid: PatternID) -> usize { + self.0.group_len(pid) + } + + /// Return the total number of capture groups across all patterns. + /// + /// This includes implicit groups that represent the entire match of a + /// pattern. + /// + /// # Example + /// + /// This example shows how the values returned by this routine may vary + /// for different patterns and NFA configurations. + /// + /// ``` + /// use regex_automata::{nfa::thompson::{NFA, WhichCaptures}, PatternID}; + /// + /// let nfa = NFA::new(r"(a)(b)(c)")?; + /// // There are 3 explicit groups in the pattern's concrete syntax and + /// // 1 unnamed and implicit group spanning the entire pattern. + /// assert_eq!(4, nfa.group_info().all_group_len()); + /// + /// let nfa = NFA::new(r"abc")?; + /// // There is just the unnamed implicit group. + /// assert_eq!(1, nfa.group_info().all_group_len()); + /// + /// let nfa = NFA::new_many(&["(a)", "b", "(c)"])?; + /// // Each pattern has one implicit groups, and two + /// // patterns have one explicit group each. + /// assert_eq!(5, nfa.group_info().all_group_len()); + /// + /// let nfa = NFA::compiler() + /// .configure(NFA::config().which_captures(WhichCaptures::None)) + /// .build(r"abc")?; + /// // We disabled capturing groups, so there are none. + /// assert_eq!(0, nfa.group_info().all_group_len()); + /// + /// let nfa = NFA::compiler() + /// .configure(NFA::config().which_captures(WhichCaptures::None)) + /// .build(r"(a)(b)(c)")?; + /// // We disabled capturing groups, so there are none, even if there are + /// // explicit groups in the concrete syntax. + /// assert_eq!(0, nfa.group_info().group_len(PatternID::ZERO)); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn all_group_len(&self) -> usize { + self.slot_len() / 2 + } + + /// Returns the total number of slots in this `GroupInfo` across all + /// patterns. + /// + /// The total number of slots is always twice the total number of capturing + /// groups, including both implicit and explicit groups. + /// + /// # Example + /// + /// This example shows the relationship between the number of capturing + /// groups and slots. + /// + /// ``` + /// use regex_automata::util::captures::GroupInfo; + /// + /// // There are 11 total groups here. + /// let info = GroupInfo::new(vec![ + /// vec![None, Some("foo")], + /// vec![None], + /// vec![None, None, None, Some("bar"), None], + /// vec![None, None, Some("foo")], + /// ])?; + /// // 2 slots per group gives us 11*2=22 slots. + /// assert_eq!(22, info.slot_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn slot_len(&self) -> usize { + self.0.small_slot_len().as_usize() + } + + /// Returns the total number of slots for implicit capturing groups. + /// + /// This is like [`GroupInfo::slot_len`], except it doesn't include the + /// explicit slots for each pattern. Since there are always exactly 2 + /// implicit slots for each pattern, the number of implicit slots is always + /// equal to twice the number of patterns. + /// + /// # Example + /// + /// This example shows the relationship between the number of capturing + /// groups, implicit slots and explicit slots. + /// + /// ``` + /// use regex_automata::util::captures::GroupInfo; + /// + /// // There are 11 total groups here. + /// let info = GroupInfo::new(vec![vec![None, Some("foo"), Some("bar")]])?; + /// // 2 slots per group gives us 11*2=22 slots. + /// assert_eq!(6, info.slot_len()); + /// // 2 implicit slots per pattern gives us 2 implicit slots since there + /// // is 1 pattern. + /// assert_eq!(2, info.implicit_slot_len()); + /// // 2 explicit capturing groups gives us 2*2=4 explicit slots. + /// assert_eq!(4, info.explicit_slot_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn implicit_slot_len(&self) -> usize { + self.pattern_len() * 2 + } + + /// Returns the total number of slots for explicit capturing groups. + /// + /// This is like [`GroupInfo::slot_len`], except it doesn't include the + /// implicit slots for each pattern. (There are always 2 implicit slots for + /// each pattern.) + /// + /// For a non-empty `GroupInfo`, it is always the case that `slot_len` is + /// strictly greater than `explicit_slot_len`. For an empty `GroupInfo`, + /// both the total number of slots and the number of explicit slots is + /// `0`. + /// + /// # Example + /// + /// This example shows the relationship between the number of capturing + /// groups, implicit slots and explicit slots. + /// + /// ``` + /// use regex_automata::util::captures::GroupInfo; + /// + /// // There are 11 total groups here. + /// let info = GroupInfo::new(vec![vec![None, Some("foo"), Some("bar")]])?; + /// // 2 slots per group gives us 11*2=22 slots. + /// assert_eq!(6, info.slot_len()); + /// // 2 implicit slots per pattern gives us 2 implicit slots since there + /// // is 1 pattern. + /// assert_eq!(2, info.implicit_slot_len()); + /// // 2 explicit capturing groups gives us 2*2=4 explicit slots. + /// assert_eq!(4, info.explicit_slot_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn explicit_slot_len(&self) -> usize { + self.slot_len().saturating_sub(self.implicit_slot_len()) + } + + /// Returns the memory usage, in bytes, of this `GroupInfo`. + /// + /// This does **not** include the stack size used up by this `GroupInfo`. + /// To compute that, use `std::mem::size_of::()`. + #[inline] + pub fn memory_usage(&self) -> usize { + use core::mem::size_of as s; + + s::() + + self.0.slot_ranges.len() * s::<(SmallIndex, SmallIndex)>() + + self.0.name_to_index.len() * s::() + + self.0.index_to_name.len() * s::>>>() + + self.0.memory_extra + } +} + +/// A map from capture group name to its corresponding capture group index. +/// +/// This type is actually wrapped inside a Vec indexed by pattern ID on a +/// `GroupInfo`, since multiple patterns may have the same capture group name. +/// That is, each pattern gets its own namespace of capture group names. +/// +/// Perhaps a more memory efficient representation would be +/// HashMap<(PatternID, Arc), usize>, but this makes it difficult to look +/// up a capture index by name without producing a `Arc`, which requires +/// an allocation. To fix this, I think we'd need to define our own unsized +/// type or something? Anyway, I didn't give this much thought since it +/// probably doesn't matter much in the grand scheme of things. But it did +/// stand out to me as mildly wasteful. +#[cfg(feature = "std")] +type CaptureNameMap = std::collections::HashMap, SmallIndex>; +#[cfg(not(feature = "std"))] +type CaptureNameMap = alloc::collections::BTreeMap, SmallIndex>; + +/// The inner guts of `GroupInfo`. This type only exists so that it can +/// be wrapped in an `Arc` to make `GroupInfo` reference counted. +#[derive(Debug, Default)] +struct GroupInfoInner { + slot_ranges: Vec<(SmallIndex, SmallIndex)>, + name_to_index: Vec, + index_to_name: Vec>>>, + memory_extra: usize, +} + +impl GroupInfoInner { + /// This adds the first unnamed group for the given pattern ID. The given + /// pattern ID must be zero if this is the first time this method is + /// called, or must be exactly one more than the pattern ID supplied to the + /// previous call to this method. (This method panics if this rule is + /// violated.) + /// + /// This can be thought of as initializing the GroupInfo state for the + /// given pattern and closing off the state for any previous pattern. + fn add_first_group(&mut self, pid: PatternID) { + assert_eq!(pid.as_usize(), self.slot_ranges.len()); + assert_eq!(pid.as_usize(), self.name_to_index.len()); + assert_eq!(pid.as_usize(), self.index_to_name.len()); + // This is the start of our slots for the explicit capturing groups. + // Note that since the slots for the 0th group for every pattern appear + // before any slots for the nth group (where n > 0) in any pattern, we + // will have to fix up the slot ranges once we know how many patterns + // we've added capture groups for. + let slot_start = self.small_slot_len(); + self.slot_ranges.push((slot_start, slot_start)); + self.name_to_index.push(CaptureNameMap::new()); + self.index_to_name.push(vec![None]); + self.memory_extra += core::mem::size_of::>>(); + } + + /// Add an explicit capturing group for the given pattern with the given + /// index. If the group has a name, then that must be given as well. + /// + /// Note that every capturing group except for the first or zeroth group is + /// explicit. + /// + /// This returns an error if adding this group would result in overflowing + /// slot indices or if a capturing group with the same name for this + /// pattern has already been added. + fn add_explicit_group>( + &mut self, + pid: PatternID, + group: SmallIndex, + maybe_name: Option, + ) -> Result<(), GroupInfoError> { + // We also need to check that the slot index generated for + // this group is also valid. Although, this is a little weird + // because we offset these indices below, at which point, we'll + // have to recheck them. Gosh this is annoying. Note that + // the '+2' below is OK because 'end' is guaranteed to be less + // than isize::MAX. + let end = &mut self.slot_ranges[pid].1; + *end = SmallIndex::new(end.as_usize() + 2).map_err(|_| { + GroupInfoError::too_many_groups(pid, group.as_usize()) + })?; + if let Some(name) = maybe_name { + let name = Arc::::from(name.as_ref()); + if self.name_to_index[pid].contains_key(&*name) { + return Err(GroupInfoError::duplicate(pid, &name)); + } + let len = name.len(); + self.name_to_index[pid].insert(Arc::clone(&name), group); + self.index_to_name[pid].push(Some(name)); + // Adds the memory used by the Arc in both maps. + self.memory_extra += + 2 * (len + core::mem::size_of::>>()); + // And also the value entry for the 'name_to_index' map. + // This is probably an underestimate for 'name_to_index' since + // hashmaps/btrees likely have some non-zero overhead, but we + // assume here that they have zero overhead. + self.memory_extra += core::mem::size_of::(); + } else { + self.index_to_name[pid].push(None); + self.memory_extra += core::mem::size_of::>>(); + } + // This is a sanity assert that checks that our group index + // is in line with the number of groups added so far for this + // pattern. + assert_eq!(group.one_more(), self.group_len(pid)); + // And is also in line with the 'index_to_name' map. + assert_eq!(group.one_more(), self.index_to_name[pid].len()); + Ok(()) + } + + /// This corrects the slot ranges to account for the slots corresponding + /// to the zeroth group of each pattern. That is, every slot range is + /// offset by 'pattern_len() * 2', since each pattern uses two slots to + /// represent the zeroth group. + fn fixup_slot_ranges(&mut self) -> Result<(), GroupInfoError> { + use crate::util::primitives::IteratorIndexExt; + // Since we know number of patterns fits in PatternID and + // PatternID::MAX < isize::MAX, it follows that multiplying by 2 will + // never overflow usize. + let offset = self.pattern_len().checked_mul(2).unwrap(); + for (pid, &mut (ref mut start, ref mut end)) in + self.slot_ranges.iter_mut().with_pattern_ids() + { + let group_len = 1 + ((end.as_usize() - start.as_usize()) / 2); + let new_end = match end.as_usize().checked_add(offset) { + Some(new_end) => new_end, + None => { + return Err(GroupInfoError::too_many_groups( + pid, group_len, + )) + } + }; + *end = SmallIndex::new(new_end).map_err(|_| { + GroupInfoError::too_many_groups(pid, group_len) + })?; + // Since start <= end, if end is valid then start must be too. + *start = SmallIndex::new(start.as_usize() + offset).unwrap(); + } + Ok(()) + } + + /// Return the total number of patterns represented by this capture slot + /// info. + fn pattern_len(&self) -> usize { + self.slot_ranges.len() + } + + /// Return the total number of capturing groups for the given pattern. If + /// the given pattern isn't valid for this capture slot info, then 0 is + /// returned. + fn group_len(&self, pid: PatternID) -> usize { + let (start, end) = match self.slot_ranges.get(pid.as_usize()) { + None => return 0, + Some(range) => range, + }; + // The difference between any two SmallIndex values always fits in a + // usize since we know that SmallIndex::MAX <= isize::MAX-1. We also + // know that start<=end by construction and that the number of groups + // never exceeds SmallIndex and thus never overflows usize. + 1 + ((end.as_usize() - start.as_usize()) / 2) + } + + /// Return the total number of slots in this capture slot info as a + /// "small index." + fn small_slot_len(&self) -> SmallIndex { + // Since slots are allocated in order of pattern (starting at 0) and + // then in order of capture group, it follows that the number of slots + // is the end of the range of slots for the last pattern. This is + // true even when the last pattern has no capturing groups, since + // 'slot_ranges' will still represent it explicitly with an empty + // range. + self.slot_ranges.last().map_or(SmallIndex::ZERO, |&(_, end)| end) + } +} + +/// An error that may occur when building a `GroupInfo`. +/// +/// Building a `GroupInfo` does a variety of checks to make sure the +/// capturing groups satisfy a number of invariants. This includes, but is not +/// limited to, ensuring that the first capturing group is unnamed and that +/// there are no duplicate capture groups for a specific pattern. +#[derive(Clone, Debug)] +pub struct GroupInfoError { + kind: GroupInfoErrorKind, +} + +/// The kind of error that occurs when building a `GroupInfo` fails. +/// +/// We keep this un-exported because it's not clear how useful it is to +/// export it. +#[derive(Clone, Debug)] +enum GroupInfoErrorKind { + /// This occurs when too many patterns have been added. i.e., It would + /// otherwise overflow a `PatternID`. + TooManyPatterns { err: PatternIDError }, + /// This occurs when too many capturing groups have been added for a + /// particular pattern. + TooManyGroups { + /// The ID of the pattern that had too many groups. + pattern: PatternID, + /// The minimum number of groups that the caller has tried to add for + /// a pattern. + minimum: usize, + }, + /// An error that occurs when a pattern has no capture groups. Either the + /// group info must be empty, or all patterns must have at least one group + /// (corresponding to the unnamed group for the entire pattern). + MissingGroups { + /// The ID of the pattern that had no capturing groups. + pattern: PatternID, + }, + /// An error that occurs when one tries to provide a name for the capture + /// group at index 0. This capturing group must currently always be + /// unnamed. + FirstMustBeUnnamed { + /// The ID of the pattern that was found to have a named first + /// capturing group. + pattern: PatternID, + }, + /// An error that occurs when duplicate capture group names for the same + /// pattern are added. + /// + /// NOTE: At time of writing, this error can never occur if you're using + /// regex-syntax, since the parser itself will reject patterns with + /// duplicate capture group names. This error can only occur when the + /// builder is used to hand construct NFAs. + Duplicate { + /// The pattern in which the duplicate capture group name was found. + pattern: PatternID, + /// The duplicate name. + name: String, + }, +} + +impl GroupInfoError { + fn too_many_patterns(err: PatternIDError) -> GroupInfoError { + GroupInfoError { kind: GroupInfoErrorKind::TooManyPatterns { err } } + } + + fn too_many_groups(pattern: PatternID, minimum: usize) -> GroupInfoError { + GroupInfoError { + kind: GroupInfoErrorKind::TooManyGroups { pattern, minimum }, + } + } + + fn missing_groups(pattern: PatternID) -> GroupInfoError { + GroupInfoError { kind: GroupInfoErrorKind::MissingGroups { pattern } } + } + + fn first_must_be_unnamed(pattern: PatternID) -> GroupInfoError { + GroupInfoError { + kind: GroupInfoErrorKind::FirstMustBeUnnamed { pattern }, + } + } + + fn duplicate(pattern: PatternID, name: &str) -> GroupInfoError { + GroupInfoError { + kind: GroupInfoErrorKind::Duplicate { + pattern, + name: String::from(name), + }, + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for GroupInfoError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self.kind { + GroupInfoErrorKind::TooManyPatterns { .. } + | GroupInfoErrorKind::TooManyGroups { .. } + | GroupInfoErrorKind::MissingGroups { .. } + | GroupInfoErrorKind::FirstMustBeUnnamed { .. } + | GroupInfoErrorKind::Duplicate { .. } => None, + } + } +} + +impl core::fmt::Display for GroupInfoError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + use self::GroupInfoErrorKind::*; + + match self.kind { + TooManyPatterns { ref err } => { + write!(f, "too many patterns to build capture info: {err}") + } + TooManyGroups { pattern, minimum } => { + write!( + f, + "too many capture groups (at least {}) were \ + found for pattern {}", + minimum, + pattern.as_usize() + ) + } + MissingGroups { pattern } => write!( + f, + "no capturing groups found for pattern {} \ + (either all patterns have zero groups or all patterns have \ + at least one group)", + pattern.as_usize(), + ), + FirstMustBeUnnamed { pattern } => write!( + f, + "first capture group (at index 0) for pattern {} has a name \ + (it must be unnamed)", + pattern.as_usize(), + ), + Duplicate { pattern, ref name } => write!( + f, + "duplicate capture group name '{}' found for pattern {}", + name, + pattern.as_usize(), + ), + } + } +} + +/// An iterator over capturing groups and their names for a specific pattern. +/// +/// This iterator is created by [`GroupInfo::pattern_names`]. +/// +/// The lifetime parameter `'a` refers to the lifetime of the `GroupInfo` +/// from which this iterator was created. +#[derive(Clone, Debug)] +pub struct GroupInfoPatternNames<'a> { + it: core::slice::Iter<'a, Option>>, +} + +impl GroupInfoPatternNames<'static> { + fn empty() -> GroupInfoPatternNames<'static> { + GroupInfoPatternNames { it: [].iter() } + } +} + +impl<'a> Iterator for GroupInfoPatternNames<'a> { + type Item = Option<&'a str>; + + fn next(&mut self) -> Option> { + self.it.next().map(|x| x.as_deref()) + } + + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } + + fn count(self) -> usize { + self.it.count() + } +} + +impl<'a> ExactSizeIterator for GroupInfoPatternNames<'a> {} +impl<'a> core::iter::FusedIterator for GroupInfoPatternNames<'a> {} + +/// An iterator over capturing groups and their names for a `GroupInfo`. +/// +/// This iterator is created by [`GroupInfo::all_names`]. +/// +/// The lifetime parameter `'a` refers to the lifetime of the `GroupInfo` +/// from which this iterator was created. +#[derive(Debug)] +pub struct GroupInfoAllNames<'a> { + group_info: &'a GroupInfo, + pids: PatternIDIter, + current_pid: Option, + names: Option>>, +} + +impl<'a> Iterator for GroupInfoAllNames<'a> { + type Item = (PatternID, usize, Option<&'a str>); + + fn next(&mut self) -> Option<(PatternID, usize, Option<&'a str>)> { + // If the group info has no captures, then we never have anything + // to yield. We need to consider this case explicitly (at time of + // writing) because 'pattern_capture_names' will panic if captures + // aren't enabled. + if self.group_info.0.index_to_name.is_empty() { + return None; + } + if self.current_pid.is_none() { + self.current_pid = Some(self.pids.next()?); + } + let pid = self.current_pid.unwrap(); + if self.names.is_none() { + self.names = Some(self.group_info.pattern_names(pid).enumerate()); + } + let (group_index, name) = match self.names.as_mut().unwrap().next() { + Some((group_index, name)) => (group_index, name), + None => { + self.current_pid = None; + self.names = None; + return self.next(); + } + }; + Some((pid, group_index, name)) + } +} diff --git a/vendor/regex-automata/src/util/determinize/mod.rs b/vendor/regex-automata/src/util/determinize/mod.rs new file mode 100644 index 00000000000000..22e38c94ca4b0a --- /dev/null +++ b/vendor/regex-automata/src/util/determinize/mod.rs @@ -0,0 +1,682 @@ +/*! +This module contains types and routines for implementing determinization. + +In this crate, there are at least two places where we implement +determinization: fully ahead-of-time compiled DFAs in the `dfa` module and +lazily compiled DFAs in the `hybrid` module. The stuff in this module +corresponds to the things that are in common between these implementations. + +There are three broad things that our implementations of determinization have +in common, as defined by this module: + +* The classification of start states. That is, whether we're dealing with +word boundaries, line boundaries, etc., is all the same. This also includes +the look-behind assertions that are satisfied by each starting state +classification. +* The representation of DFA states as sets of NFA states, including +convenience types for building these DFA states that are amenable to reusing +allocations. +* Routines for the "classical" parts of determinization: computing the +epsilon closure, tracking match states (with corresponding pattern IDs, since +we support multi-pattern finite automata) and, of course, computing the +transition function between states for units of input. + +I did consider a couple of alternatives to this particular form of code reuse: + +1. Don't do any code reuse. The problem here is that we *really* want both +forms of determinization to do exactly identical things when it comes to +their handling of NFA states. While our tests generally ensure this, the code +is tricky and large enough where not reusing code is a pretty big bummer. + +2. Implement all of determinization once and make it generic over fully +compiled DFAs and lazily compiled DFAs. While I didn't actually try this +approach, my instinct is that it would be more complex than is needed here. +And the interface required would be pretty hairy. Instead, I think splitting +it into logical sub-components works better. +*/ + +use alloc::vec::Vec; + +pub(crate) use self::state::{ + State, StateBuilderEmpty, StateBuilderMatches, StateBuilderNFA, +}; + +use crate::{ + nfa::thompson, + util::{ + alphabet, + look::{Look, LookSet}, + primitives::StateID, + search::MatchKind, + sparse_set::{SparseSet, SparseSets}, + start::Start, + utf8, + }, +}; + +mod state; + +/// Compute the set of all reachable NFA states, including the full epsilon +/// closure, from a DFA state for a single unit of input. The set of reachable +/// states is returned as a `StateBuilderNFA`. The `StateBuilderNFA` returned +/// also includes any look-behind assertions satisfied by `unit`, in addition +/// to whether it is a match state. For multi-pattern DFAs, the builder will +/// also include the pattern IDs that match (in the order seen). +/// +/// `nfa` must be able to resolve any NFA state in `state` and any NFA state +/// reachable via the epsilon closure of any NFA state in `state`. `sparses` +/// must have capacity equivalent to `nfa.len()`. +/// +/// `match_kind` should correspond to the match semantics implemented by the +/// DFA being built. Generally speaking, for leftmost-first match semantics, +/// states that appear after the first NFA match state will not be included in +/// the `StateBuilderNFA` returned since they are impossible to visit. +/// +/// `sparses` is used as scratch space for NFA traversal. Other than their +/// capacity requirements (detailed above), there are no requirements on what's +/// contained within them (if anything). Similarly, what's inside of them once +/// this routine returns is unspecified. +/// +/// `stack` must have length 0. It is used as scratch space for depth first +/// traversal. After returning, it is guaranteed that `stack` will have length +/// 0. +/// +/// `state` corresponds to the current DFA state on which one wants to compute +/// the transition for the input `unit`. +/// +/// `empty_builder` corresponds to the builder allocation to use to produce a +/// complete `StateBuilderNFA` state. If the state is not needed (or is already +/// cached), then it can be cleared and reused without needing to create a new +/// `State`. The `StateBuilderNFA` state returned is final and ready to be +/// turned into a `State` if necessary. +pub(crate) fn next( + nfa: &thompson::NFA, + match_kind: MatchKind, + sparses: &mut SparseSets, + stack: &mut Vec, + state: &State, + unit: alphabet::Unit, + empty_builder: StateBuilderEmpty, +) -> StateBuilderNFA { + sparses.clear(); + + // Whether the NFA is matched in reverse or not. We use this in some + // conditional logic for dealing with the exceptionally annoying CRLF-aware + // line anchors. + let rev = nfa.is_reverse(); + // The look-around matcher that our NFA is configured with. We don't + // actually use it to match look-around assertions, but we do need its + // configuration for constructing states consistent with how it matches. + let lookm = nfa.look_matcher(); + + // Put the NFA state IDs into a sparse set in case we need to + // re-compute their epsilon closure. + // + // Doing this state shuffling is technically not necessary unless some + // kind of look-around is used in the DFA. Some ad hoc experiments + // suggested that avoiding this didn't lead to much of an improvement, + // but perhaps more rigorous experimentation should be done. And in + // particular, avoiding this check requires some light refactoring of + // the code below. + state.iter_nfa_state_ids(|nfa_id| { + sparses.set1.insert(nfa_id); + }); + + // Compute look-ahead assertions originating from the current state. Based + // on the input unit we're transitioning over, some additional set of + // assertions may be true. Thus, we re-compute this state's epsilon closure + // (but only if necessary). Notably, when we build a DFA state initially, + // we don't enable any look-ahead assertions because we don't know whether + // they're true or not at that point. + if !state.look_need().is_empty() { + // Add look-ahead assertions that are now true based on the current + // input unit. + let mut look_have = state.look_have(); + match unit.as_u8() { + Some(b'\r') => { + if !rev || !state.is_half_crlf() { + look_have = look_have.insert(Look::EndCRLF); + } + } + Some(b'\n') => { + if rev || !state.is_half_crlf() { + look_have = look_have.insert(Look::EndCRLF); + } + } + Some(_) => {} + None => { + look_have = look_have + .insert(Look::End) + .insert(Look::EndLF) + .insert(Look::EndCRLF); + } + } + if unit.is_byte(lookm.get_line_terminator()) { + look_have = look_have.insert(Look::EndLF); + } + if state.is_half_crlf() + && ((rev && !unit.is_byte(b'\r')) + || (!rev && !unit.is_byte(b'\n'))) + { + look_have = look_have.insert(Look::StartCRLF); + } + if state.is_from_word() == unit.is_word_byte() { + look_have = look_have + .insert(Look::WordAsciiNegate) + .insert(Look::WordUnicodeNegate); + } else { + look_have = + look_have.insert(Look::WordAscii).insert(Look::WordUnicode); + } + if !unit.is_word_byte() { + look_have = look_have + .insert(Look::WordEndHalfAscii) + .insert(Look::WordEndHalfUnicode); + } + if state.is_from_word() && !unit.is_word_byte() { + look_have = look_have + .insert(Look::WordEndAscii) + .insert(Look::WordEndUnicode); + } else if !state.is_from_word() && unit.is_word_byte() { + look_have = look_have + .insert(Look::WordStartAscii) + .insert(Look::WordStartUnicode); + } + // If we have new assertions satisfied that are among the set of + // assertions that exist in this state (that is, just because we added + // an EndLF assertion above doesn't mean there is an EndLF conditional + // epsilon transition in this state), then we re-compute this state's + // epsilon closure using the updated set of assertions. + // + // Note that since our DFA states omit unconditional epsilon + // transitions, this check is necessary for correctness. If we re-did + // the epsilon closure below needlessly, it could change based on the + // fact that we omitted epsilon states originally. + if !look_have + .subtract(state.look_have()) + .intersect(state.look_need()) + .is_empty() + { + for nfa_id in sparses.set1.iter() { + epsilon_closure( + nfa, + nfa_id, + look_have, + stack, + &mut sparses.set2, + ); + } + sparses.swap(); + sparses.set2.clear(); + } + } + + // Convert our empty builder into one that can record assertions and match + // pattern IDs. + let mut builder = empty_builder.into_matches(); + // Set whether the StartLF look-behind assertion is true for this + // transition or not. The look-behind assertion for ASCII word boundaries + // is handled below. + if nfa.look_set_any().contains_anchor_line() + && unit.is_byte(lookm.get_line_terminator()) + { + // Why only handle StartLF here and not Start? That's because Start + // can only impact the starting state, which is special cased in + // start state handling. + builder.set_look_have(|have| have.insert(Look::StartLF)); + } + // We also need to add StartCRLF to our assertions too, if we can. This + // is unfortunately a bit more complicated, because it depends on the + // direction of the search. In the forward direction, ^ matches after a + // \n, but in the reverse direction, ^ only matches after a \r. (This is + // further complicated by the fact that reverse a regex means changing a ^ + // to a $ and vice versa.) + if nfa.look_set_any().contains_anchor_crlf() + && ((rev && unit.is_byte(b'\r')) || (!rev && unit.is_byte(b'\n'))) + { + builder.set_look_have(|have| have.insert(Look::StartCRLF)); + } + // And also for the start-half word boundary assertions. As long as the + // look-behind byte is not a word char, then the assertions are satisfied. + if nfa.look_set_any().contains_word() && !unit.is_word_byte() { + builder.set_look_have(|have| { + have.insert(Look::WordStartHalfAscii) + .insert(Look::WordStartHalfUnicode) + }); + } + for nfa_id in sparses.set1.iter() { + match *nfa.state(nfa_id) { + thompson::State::Union { .. } + | thompson::State::BinaryUnion { .. } + | thompson::State::Fail + | thompson::State::Look { .. } + | thompson::State::Capture { .. } => {} + thompson::State::Match { pattern_id } => { + // Notice here that we are calling the NEW state a match + // state if the OLD state we are transitioning from + // contains an NFA match state. This is precisely how we + // delay all matches by one byte and also what therefore + // guarantees that starting states cannot be match states. + // + // If we didn't delay matches by one byte, then whether + // a DFA is a matching state or not would be determined + // by whether one of its own constituent NFA states + // was a match state. (And that would be done in + // 'add_nfa_states'.) + // + // Also, 'add_match_pattern_id' requires that callers never + // pass duplicative pattern IDs. We do in fact uphold that + // guarantee here, but it's subtle. In particular, a Thompson + // NFA guarantees that each pattern has exactly one match + // state. Moreover, since we're iterating over the NFA state + // IDs in a set, we are guaranteed not to have any duplicative + // match states. Thus, it is impossible to add the same pattern + // ID more than once. + // + // N.B. We delay matches by 1 byte as a way to hack 1-byte + // look-around into DFA searches. This lets us support ^, $ + // and ASCII-only \b. The delay is also why we need a special + // "end-of-input" (EOI) sentinel and why we need to follow the + // EOI sentinel at the end of every search. This final EOI + // transition is necessary to report matches found at the end + // of a haystack. + builder.add_match_pattern_id(pattern_id); + if !match_kind.continue_past_first_match() { + break; + } + } + thompson::State::ByteRange { ref trans } => { + if trans.matches_unit(unit) { + epsilon_closure( + nfa, + trans.next, + builder.look_have(), + stack, + &mut sparses.set2, + ); + } + } + thompson::State::Sparse(ref sparse) => { + if let Some(next) = sparse.matches_unit(unit) { + epsilon_closure( + nfa, + next, + builder.look_have(), + stack, + &mut sparses.set2, + ); + } + } + thompson::State::Dense(ref dense) => { + if let Some(next) = dense.matches_unit(unit) { + epsilon_closure( + nfa, + next, + builder.look_have(), + stack, + &mut sparses.set2, + ); + } + } + } + } + // We only set the word byte if there's a word boundary look-around + // anywhere in this regex. Otherwise, there's no point in bloating the + // number of states if we don't have one. + // + // We also only set it when the state has a non-zero number of NFA states. + // Otherwise, we could wind up with states that *should* be DEAD states + // but are otherwise distinct from DEAD states because of this look-behind + // assertion being set. While this can't technically impact correctness *in + // theory*, it can create pathological DFAs that consume input until EOI or + // a quit byte is seen. Consuming until EOI isn't a correctness problem, + // but a (serious) perf problem. Hitting a quit byte, however, could be a + // correctness problem since it could cause search routines to report an + // error instead of a detected match once the quit state is entered. (The + // search routine could be made to be a bit smarter by reporting a match + // if one was detected once it enters a quit state (and indeed, the search + // routines in this crate do just that), but it seems better to prevent + // these things by construction if possible.) + if !sparses.set2.is_empty() { + if nfa.look_set_any().contains_word() && unit.is_word_byte() { + builder.set_is_from_word(); + } + if nfa.look_set_any().contains_anchor_crlf() + && ((rev && unit.is_byte(b'\n')) || (!rev && unit.is_byte(b'\r'))) + { + builder.set_is_half_crlf(); + } + } + let mut builder_nfa = builder.into_nfa(); + add_nfa_states(nfa, &sparses.set2, &mut builder_nfa); + builder_nfa +} + +/// Compute the epsilon closure for the given NFA state. The epsilon closure +/// consists of all NFA state IDs, including `start_nfa_id`, that can be +/// reached from `start_nfa_id` without consuming any input. These state IDs +/// are written to `set` in the order they are visited, but only if they are +/// not already in `set`. `start_nfa_id` must be a valid state ID for the NFA +/// given. +/// +/// `look_have` consists of the satisfied assertions at the current +/// position. For conditional look-around epsilon transitions, these are +/// only followed if they are satisfied by `look_have`. +/// +/// `stack` must have length 0. It is used as scratch space for depth first +/// traversal. After returning, it is guaranteed that `stack` will have length +/// 0. +pub(crate) fn epsilon_closure( + nfa: &thompson::NFA, + start_nfa_id: StateID, + look_have: LookSet, + stack: &mut Vec, + set: &mut SparseSet, +) { + assert!(stack.is_empty()); + // If this isn't an epsilon state, then the epsilon closure is always just + // itself, so there's no need to spin up the machinery below to handle it. + if !nfa.state(start_nfa_id).is_epsilon() { + set.insert(start_nfa_id); + return; + } + + stack.push(start_nfa_id); + while let Some(mut id) = stack.pop() { + // In many cases, we can avoid stack operations when an NFA state only + // adds one new state to visit. In that case, we just set our ID to + // that state and mush on. We only use the stack when an NFA state + // introduces multiple new states to visit. + loop { + // Insert this NFA state, and if it's already in the set and thus + // already visited, then we can move on to the next one. + if !set.insert(id) { + break; + } + match *nfa.state(id) { + thompson::State::ByteRange { .. } + | thompson::State::Sparse { .. } + | thompson::State::Dense { .. } + | thompson::State::Fail + | thompson::State::Match { .. } => break, + thompson::State::Look { look, next } => { + if !look_have.contains(look) { + break; + } + id = next; + } + thompson::State::Union { ref alternates } => { + id = match alternates.get(0) { + None => break, + Some(&id) => id, + }; + // We need to process our alternates in order to preserve + // match preferences, so put the earliest alternates closer + // to the top of the stack. + stack.extend(alternates[1..].iter().rev()); + } + thompson::State::BinaryUnion { alt1, alt2 } => { + id = alt1; + stack.push(alt2); + } + thompson::State::Capture { next, .. } => { + id = next; + } + } + } + } +} + +/// Add the NFA state IDs in the given `set` to the given DFA builder state. +/// The order in which states are added corresponds to the order in which they +/// were added to `set`. +/// +/// The DFA builder state given should already have its complete set of match +/// pattern IDs added (if any) and any look-behind assertions (StartLF, Start +/// and whether this state is being generated for a transition over a word byte +/// when applicable) that are true immediately prior to transitioning into this +/// state (via `builder.look_have()`). The match pattern IDs should correspond +/// to matches that occurred on the previous transition, since all matches are +/// delayed by one byte. The things that should _not_ be set are look-ahead +/// assertions (EndLF, End and whether the next byte is a word byte or not). +/// The builder state should also not have anything in `look_need` set, as this +/// routine will compute that for you. +/// +/// The given NFA should be able to resolve all identifiers in `set` to a +/// particular NFA state. Additionally, `set` must have capacity equivalent +/// to `nfa.len()`. +pub(crate) fn add_nfa_states( + nfa: &thompson::NFA, + set: &SparseSet, + builder: &mut StateBuilderNFA, +) { + for nfa_id in set.iter() { + match *nfa.state(nfa_id) { + thompson::State::ByteRange { .. } => { + builder.add_nfa_state_id(nfa_id); + } + thompson::State::Sparse { .. } => { + builder.add_nfa_state_id(nfa_id); + } + thompson::State::Dense { .. } => { + builder.add_nfa_state_id(nfa_id); + } + thompson::State::Look { look, .. } => { + builder.add_nfa_state_id(nfa_id); + builder.set_look_need(|need| need.insert(look)); + } + thompson::State::Union { .. } + | thompson::State::BinaryUnion { .. } => { + // Pure epsilon transitions don't need to be tracked as part + // of the DFA state. Tracking them is actually superfluous; + // they won't cause any harm other than making determinization + // slower. + // + // Why aren't these needed? Well, in an NFA, epsilon + // transitions are really just jumping points to other states. + // So once you hit an epsilon transition, the same set of + // resulting states always appears. Therefore, putting them in + // a DFA's set of ordered NFA states is strictly redundant. + // + // Look-around states are also epsilon transitions, but + // they are *conditional*. So their presence could be + // discriminatory, and thus, they are tracked above. + // + // But wait... why are epsilon states in our `set` in the first + // place? Why not just leave them out? They're in our `set` + // because it was generated by computing an epsilon closure, + // and we want to keep track of all states we visited to avoid + // re-visiting them. In exchange, we have to do this second + // iteration over our collected states to finalize our DFA + // state. In theory, we could avoid this second iteration if + // we maintained two sets during epsilon closure: the set of + // visited states (to avoid cycles) and the set of states that + // will actually be used to construct the next DFA state. + // + // Note that this optimization requires that we re-compute the + // epsilon closure to account for look-ahead in 'next' *only + // when necessary*. Namely, only when the set of look-around + // assertions changes and only when those changes are within + // the set of assertions that are needed in order to step + // through the closure correctly. Otherwise, if we re-do the + // epsilon closure needlessly, it could change based on the + // fact that we are omitting epsilon states here. + // + // ----- + // + // Welp, scratch the above. It turns out that recording these + // is in fact necessary to seemingly handle one particularly + // annoying case: when a conditional epsilon transition is + // put inside of a repetition operator. One specific case I + // ran into was the regex `(?:\b|%)+` on the haystack `z%`. + // The correct leftmost first matches are: [0, 0] and [1, 1]. + // But the DFA was reporting [0, 0] and [1, 2]. To understand + // why this happens, consider the NFA for the aforementioned + // regex: + // + // >000000: binary-union(4, 1) + // 000001: \x00-\xFF => 0 + // 000002: WordAscii => 5 + // 000003: % => 5 + // ^000004: binary-union(2, 3) + // 000005: binary-union(4, 6) + // 000006: MATCH(0) + // + // The problem here is that one of the DFA start states is + // going to consist of the NFA states [2, 3] by computing the + // epsilon closure of state 4. State 4 isn't included because + // we previously were not keeping track of union states. But + // only a subset of transitions out of this state will be able + // to follow WordAscii, and in those cases, the epsilon closure + // is redone. The only problem is that computing the epsilon + // closure from [2, 3] is different than computing the epsilon + // closure from [4]. In the former case, assuming the WordAscii + // assertion is satisfied, you get: [2, 3, 6]. In the latter + // case, you get: [2, 6, 3]. Notice that '6' is the match state + // and appears AFTER '3' in the former case. This leads to a + // preferential but incorrect match of '%' before returning + // a match. In the latter case, the match is preferred over + // continuing to accept the '%'. + // + // It almost feels like we might be able to fix the NFA states + // to avoid this, or to at least only keep track of union + // states where this actually matters, since in the vast + // majority of cases, this doesn't matter. + // + // Another alternative would be to define a new HIR property + // called "assertion is repeated anywhere" and compute it + // inductively over the entire pattern. If it happens anywhere, + // which is probably pretty rare, then we record union states. + // Otherwise we don't. + builder.add_nfa_state_id(nfa_id); + } + // Capture states we definitely do not need to record, since they + // are unconditional epsilon transitions with no branching. + thompson::State::Capture { .. } => {} + // It's not totally clear whether we need to record fail states or + // not, but we do so out of an abundance of caution. Since they are + // quite rare in practice, there isn't much cost to recording them. + thompson::State::Fail => { + builder.add_nfa_state_id(nfa_id); + } + thompson::State::Match { .. } => { + // Normally, the NFA match state doesn't actually need to + // be inside the DFA state. But since we delay matches by + // one byte, the matching DFA state corresponds to states + // that transition from the one we're building here. And + // the way we detect those cases is by looking for an NFA + // match state. See 'next' for how this is handled. + builder.add_nfa_state_id(nfa_id); + } + } + } + // If we know this state contains no look-around assertions, then + // there's no reason to track which look-around assertions were + // satisfied when this state was created. + if builder.look_need().is_empty() { + builder.set_look_have(|_| LookSet::empty()); + } +} + +/// Sets the appropriate look-behind assertions on the given state based on +/// this starting configuration. +pub(crate) fn set_lookbehind_from_start( + nfa: &thompson::NFA, + start: &Start, + builder: &mut StateBuilderMatches, +) { + let rev = nfa.is_reverse(); + let lineterm = nfa.look_matcher().get_line_terminator(); + let lookset = nfa.look_set_any(); + match *start { + Start::NonWordByte => { + if lookset.contains_word() { + builder.set_look_have(|have| { + have.insert(Look::WordStartHalfAscii) + .insert(Look::WordStartHalfUnicode) + }); + } + } + Start::WordByte => { + if lookset.contains_word() { + builder.set_is_from_word(); + } + } + Start::Text => { + if lookset.contains_anchor_haystack() { + builder.set_look_have(|have| have.insert(Look::Start)); + } + if lookset.contains_anchor_line() { + builder.set_look_have(|have| { + have.insert(Look::StartLF).insert(Look::StartCRLF) + }); + } + if lookset.contains_word() { + builder.set_look_have(|have| { + have.insert(Look::WordStartHalfAscii) + .insert(Look::WordStartHalfUnicode) + }); + } + } + Start::LineLF => { + if rev { + if lookset.contains_anchor_crlf() { + builder.set_is_half_crlf(); + } + if lookset.contains_anchor_line() { + builder.set_look_have(|have| have.insert(Look::StartLF)); + } + } else { + if lookset.contains_anchor_line() { + builder.set_look_have(|have| have.insert(Look::StartCRLF)); + } + } + if lookset.contains_anchor_line() && lineterm == b'\n' { + builder.set_look_have(|have| have.insert(Look::StartLF)); + } + if lookset.contains_word() { + builder.set_look_have(|have| { + have.insert(Look::WordStartHalfAscii) + .insert(Look::WordStartHalfUnicode) + }); + } + } + Start::LineCR => { + if lookset.contains_anchor_crlf() { + if rev { + builder.set_look_have(|have| have.insert(Look::StartCRLF)); + } else { + builder.set_is_half_crlf(); + } + } + if lookset.contains_anchor_line() && lineterm == b'\r' { + builder.set_look_have(|have| have.insert(Look::StartLF)); + } + if lookset.contains_word() { + builder.set_look_have(|have| { + have.insert(Look::WordStartHalfAscii) + .insert(Look::WordStartHalfUnicode) + }); + } + } + Start::CustomLineTerminator => { + if lookset.contains_anchor_line() { + builder.set_look_have(|have| have.insert(Look::StartLF)); + } + // This is a bit of a tricky case, but if the line terminator was + // set to a word byte, then we also need to behave as if the start + // configuration is Start::WordByte. That is, we need to mark our + // state as having come from a word byte. + if lookset.contains_word() { + if utf8::is_word_byte(lineterm) { + builder.set_is_from_word(); + } else { + builder.set_look_have(|have| { + have.insert(Look::WordStartHalfAscii) + .insert(Look::WordStartHalfUnicode) + }); + } + } + } + } +} diff --git a/vendor/regex-automata/src/util/determinize/state.rs b/vendor/regex-automata/src/util/determinize/state.rs new file mode 100644 index 00000000000000..f410f9acb2f5b2 --- /dev/null +++ b/vendor/regex-automata/src/util/determinize/state.rs @@ -0,0 +1,907 @@ +/*! +This module defines a DFA state representation and builders for constructing +DFA states. + +This representation is specifically for use in implementations of NFA-to-DFA +conversion via powerset construction. (Also called "determinization" in this +crate.) + +The term "DFA state" is somewhat overloaded in this crate. In some cases, it +refers to the set of transitions over an alphabet for a particular state. In +other cases, it refers to a set of NFA states. The former is really about the +final representation of a state in a DFA's transition table, where as the +latter---what this module is focused on---is closer to an intermediate form +that is used to help eventually build the transition table. + +This module exports four types. All four types represent the same idea: an +ordered set of NFA states. This ordered set represents the epsilon closure of a +particular NFA state, where the "epsilon closure" is the set of NFA states that +can be transitioned to without consuming any input. i.e., Follow all of the NFA +state's epsilon transitions. In addition, this implementation of DFA states +cares about two other things: the ordered set of pattern IDs corresponding +to the patterns that match if the state is a match state, and the set of +look-behind assertions that were true when the state was created. + +The first, `State`, is a frozen representation of a state that cannot be +modified. It may be cheaply cloned without copying the state itself and can be +accessed safely from multiple threads simultaneously. This type is useful for +when one knows that the DFA state being constructed is distinct from any other +previously constructed states. Namely, powerset construction, in practice, +requires one to keep a cache of previously created DFA states. Otherwise, +the number of DFA states created in memory balloons to an impractically +large number. For this reason, equivalent states should endeavor to have an +equivalent byte-level representation. (In general, "equivalency" here means, +"equivalent assertions, pattern IDs and NFA state IDs." We do not require that +full DFA minimization be implemented here. This form of equivalency is only +surface deep and is more-or-less a practical necessity.) + +The other three types represent different phases in the construction of a +DFA state. Internally, these three types (and `State`) all use the same +byte-oriented representation. That means one can use any of the builder types +to check whether the state it represents already exists or not. If it does, +then there is no need to freeze it into a `State` (which requires an alloc and +a copy). Here are the three types described succinctly: + +* `StateBuilderEmpty` represents a state with no pattern IDs, no assertions +and no NFA states. Creating a `StateBuilderEmpty` performs no allocs. A +`StateBuilderEmpty` can only be used to query its underlying memory capacity, +or to convert into a builder for recording pattern IDs and/or assertions. + +* `StateBuilderMatches` represents a state with zero or more pattern IDs, zero +or more satisfied assertions and zero NFA state IDs. A `StateBuilderMatches` +can only be used for adding pattern IDs and recording assertions. + +* `StateBuilderNFA` represents a state with zero or more pattern IDs, zero or +more satisfied assertions and zero or more NFA state IDs. A `StateBuilderNFA` +can only be used for adding NFA state IDs and recording some assertions. + +The expected flow here is to use the above builders to construct a candidate +DFA state to check if it already exists. If it does, then there's no need to +freeze it into a `State`. If it doesn't exist, then `StateBuilderNFA::to_state` +can be called to freeze the builder into an immutable `State`. In either +case, `clear` should be called on the builder to turn it back into a +`StateBuilderEmpty` that reuses the underlying memory. + +The main purpose for splitting the builder into these distinct types is to +make it impossible to do things like adding a pattern ID after adding an NFA +state ID. Namely, this makes it simpler to use a space-and-time efficient +binary representation for the state. (The format is documented on the `Repr` +type below.) If we just used one type for everything, it would be possible for +callers to use an incorrect interleaving of calls and thus result in a corrupt +representation. I chose to use more type machinery to make this impossible to +do because 1) determinization is itself pretty complex and it wouldn't be too +hard to foul this up and 2) there isn't too much machinery involved and it's +well contained. + +As an optimization, sometimes states won't have certain things set. For +example, if the underlying NFA has no word boundary assertions, then there is +no reason to set a state's look-behind assertion as to whether it was generated +from a word byte or not. Similarly, if a state has no NFA states corresponding +to look-around assertions, then there is no reason to set `look_have` to a +non-empty set. Finally, callers usually omit unconditional epsilon transitions +when adding NFA state IDs since they aren't discriminatory. + +Finally, the binary representation used by these states is, thankfully, not +serialized anywhere. So any kind of change can be made with reckless abandon, +as long as everything in this module agrees. +*/ + +use core::mem; + +use alloc::{sync::Arc, vec::Vec}; + +use crate::util::{ + int::{I32, U32}, + look::LookSet, + primitives::{PatternID, StateID}, + wire::{self, Endian}, +}; + +/// A DFA state that, at its core, is represented by an ordered set of NFA +/// states. +/// +/// This type is intended to be used only in NFA-to-DFA conversion via powerset +/// construction. +/// +/// It may be cheaply cloned and accessed safely from multiple threads +/// simultaneously. +#[derive(Clone, Eq, Hash, PartialEq, PartialOrd, Ord)] +pub(crate) struct State(Arc<[u8]>); + +/// This Borrow impl permits us to lookup any state in a map by its byte +/// representation. This is particularly convenient when one has a StateBuilder +/// and we want to see if a correspondingly equivalent state already exists. If +/// one does exist, then we can reuse the allocation required by StateBuilder +/// without having to convert it into a State first. +impl core::borrow::Borrow<[u8]> for State { + fn borrow(&self) -> &[u8] { + &self.0 + } +} + +impl core::fmt::Debug for State { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_tuple("State").field(&self.repr()).finish() + } +} + +/// For docs on these routines, see the internal Repr and ReprVec types below. +impl State { + pub(crate) fn dead() -> State { + StateBuilderEmpty::new().into_matches().into_nfa().to_state() + } + + pub(crate) fn is_match(&self) -> bool { + self.repr().is_match() + } + + pub(crate) fn is_from_word(&self) -> bool { + self.repr().is_from_word() + } + + pub(crate) fn is_half_crlf(&self) -> bool { + self.repr().is_half_crlf() + } + + pub(crate) fn look_have(&self) -> LookSet { + self.repr().look_have() + } + + pub(crate) fn look_need(&self) -> LookSet { + self.repr().look_need() + } + + pub(crate) fn match_len(&self) -> usize { + self.repr().match_len() + } + + pub(crate) fn match_pattern(&self, index: usize) -> PatternID { + self.repr().match_pattern(index) + } + + pub(crate) fn match_pattern_ids(&self) -> Option> { + self.repr().match_pattern_ids() + } + + #[cfg(all(test, not(miri)))] + pub(crate) fn iter_match_pattern_ids(&self, f: F) { + self.repr().iter_match_pattern_ids(f) + } + + pub(crate) fn iter_nfa_state_ids(&self, f: F) { + self.repr().iter_nfa_state_ids(f) + } + + pub(crate) fn memory_usage(&self) -> usize { + self.0.len() + } + + fn repr(&self) -> Repr<'_> { + Repr(&self.0) + } +} + +/// A state builder that represents an empty state. +/// +/// This is a useful "initial condition" for state construction. It has no +/// NFA state IDs, no assertions set and no pattern IDs. No allocations are +/// made when new() is called. Its main use is for being converted into a +/// builder that can capture assertions and pattern IDs. +#[derive(Clone, Debug)] +pub(crate) struct StateBuilderEmpty(Vec); + +/// For docs on these routines, see the internal Repr and ReprVec types below. +impl StateBuilderEmpty { + pub(crate) fn new() -> StateBuilderEmpty { + StateBuilderEmpty(alloc::vec![]) + } + + pub(crate) fn into_matches(mut self) -> StateBuilderMatches { + self.0.extend_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0]); + StateBuilderMatches(self.0) + } + + fn clear(&mut self) { + self.0.clear(); + } + + pub(crate) fn capacity(&self) -> usize { + self.0.capacity() + } +} + +/// A state builder that collects assertions and pattern IDs. +/// +/// When collecting pattern IDs is finished, this can be converted into a +/// builder that collects NFA state IDs. +#[derive(Clone)] +pub(crate) struct StateBuilderMatches(Vec); + +impl core::fmt::Debug for StateBuilderMatches { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_tuple("StateBuilderMatches").field(&self.repr()).finish() + } +} + +/// For docs on these routines, see the internal Repr and ReprVec types below. +impl StateBuilderMatches { + pub(crate) fn into_nfa(mut self) -> StateBuilderNFA { + self.repr_vec().close_match_pattern_ids(); + StateBuilderNFA { repr: self.0, prev_nfa_state_id: StateID::ZERO } + } + + pub(crate) fn set_is_from_word(&mut self) { + self.repr_vec().set_is_from_word() + } + + pub(crate) fn set_is_half_crlf(&mut self) { + self.repr_vec().set_is_half_crlf() + } + + pub(crate) fn look_have(&self) -> LookSet { + LookSet::read_repr(&self.0[1..]) + } + + pub(crate) fn set_look_have( + &mut self, + set: impl FnMut(LookSet) -> LookSet, + ) { + self.repr_vec().set_look_have(set) + } + + pub(crate) fn add_match_pattern_id(&mut self, pid: PatternID) { + self.repr_vec().add_match_pattern_id(pid) + } + + fn repr(&self) -> Repr<'_> { + Repr(&self.0) + } + + fn repr_vec(&mut self) -> ReprVec<'_> { + ReprVec(&mut self.0) + } +} + +/// A state builder that collects some assertions and NFA state IDs. +/// +/// When collecting NFA state IDs is finished, this can be used to build a +/// `State` if necessary. +/// +/// When dont with building a state (regardless of whether it got kept or not), +/// it's usually a good idea to call `clear` to get an empty builder back so +/// that it can be reused to build the next state. +#[derive(Clone)] +pub(crate) struct StateBuilderNFA { + repr: Vec, + prev_nfa_state_id: StateID, +} + +impl core::fmt::Debug for StateBuilderNFA { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_tuple("StateBuilderNFA").field(&self.repr()).finish() + } +} + +/// For docs on these routines, see the internal Repr and ReprVec types below. +impl StateBuilderNFA { + pub(crate) fn to_state(&self) -> State { + State(Arc::from(&*self.repr)) + } + + pub(crate) fn clear(self) -> StateBuilderEmpty { + let mut builder = StateBuilderEmpty(self.repr); + builder.clear(); + builder + } + + pub(crate) fn look_need(&self) -> LookSet { + self.repr().look_need() + } + + pub(crate) fn set_look_have( + &mut self, + set: impl FnMut(LookSet) -> LookSet, + ) { + self.repr_vec().set_look_have(set) + } + + pub(crate) fn set_look_need( + &mut self, + set: impl FnMut(LookSet) -> LookSet, + ) { + self.repr_vec().set_look_need(set) + } + + pub(crate) fn add_nfa_state_id(&mut self, sid: StateID) { + ReprVec(&mut self.repr) + .add_nfa_state_id(&mut self.prev_nfa_state_id, sid) + } + + pub(crate) fn as_bytes(&self) -> &[u8] { + &self.repr + } + + fn repr(&self) -> Repr<'_> { + Repr(&self.repr) + } + + fn repr_vec(&mut self) -> ReprVec<'_> { + ReprVec(&mut self.repr) + } +} + +/// Repr is a read-only view into the representation of a DFA state. +/// +/// Primarily, a Repr is how we achieve DRY: we implement decoding the format +/// in one place, and then use a Repr to implement the various methods on the +/// public state types. +/// +/// The format is as follows: +/// +/// The first three bytes correspond to bitsets. +/// +/// Byte 0 is a bitset corresponding to miscellaneous flags associated with the +/// state. Bit 0 is set to 1 if the state is a match state. Bit 1 is set to 1 +/// if the state has pattern IDs explicitly written to it. (This is a flag that +/// is not meant to be set by determinization, but rather, is used as part of +/// an internal space-saving optimization.) Bit 2 is set to 1 if the state was +/// generated by a transition over a "word" byte. (Callers may not always set +/// this. For example, if the NFA has no word boundary assertion, then needing +/// to track whether a state came from a word byte or not is superfluous and +/// wasteful.) Bit 3 is set to 1 if the state was generated by a transition +/// from a `\r` (forward search) or a `\n` (reverse search) when CRLF mode is +/// enabled. +/// +/// Bytes 1..5 correspond to the look-behind assertions that were satisfied +/// by the transition that created this state. (Look-ahead assertions are not +/// tracked as part of states. Instead, these are applied by re-computing the +/// epsilon closure of a state when computing the transition function. See +/// `next` in the parent module.) +/// +/// Bytes 5..9 correspond to the set of look-around assertions (including both +/// look-behind and look-ahead) that appear somewhere in this state's set of +/// NFA state IDs. This is used to determine whether this state's epsilon +/// closure should be re-computed when computing the transition function. +/// Namely, look-around assertions are "just" conditional epsilon transitions, +/// so if there are new assertions available when computing the transition +/// function, we should only re-compute the epsilon closure if those new +/// assertions are relevant to this particular state. +/// +/// Bytes 9..13 correspond to a 32-bit native-endian encoded integer +/// corresponding to the number of patterns encoded in this state. If the state +/// is not a match state (byte 0 bit 0 is 0) or if it's only pattern ID is +/// PatternID::ZERO, then no integer is encoded at this position. Instead, byte +/// offset 3 is the position at which the first NFA state ID is encoded. +/// +/// For a match state with at least one non-ZERO pattern ID, the next bytes +/// correspond to a sequence of 32-bit native endian encoded integers that +/// represent each pattern ID, in order, that this match state represents. +/// +/// After the pattern IDs (if any), NFA state IDs are delta encoded as +/// varints.[1] The first NFA state ID is encoded as itself, and each +/// subsequent NFA state ID is encoded as the difference between itself and the +/// previous NFA state ID. +/// +/// [1] - https://developers.google.com/protocol-buffers/docs/encoding#varints +struct Repr<'a>(&'a [u8]); + +impl<'a> Repr<'a> { + /// Returns true if and only if this is a match state. + /// + /// If callers have added pattern IDs to this state, then callers MUST set + /// this state as a match state explicitly. However, as a special case, + /// states that are marked as match states but with no pattern IDs, then + /// the state is treated as if it had a single pattern ID equivalent to + /// PatternID::ZERO. + fn is_match(&self) -> bool { + self.0[0] & (1 << 0) > 0 + } + + /// Returns true if and only if this state has had at least one pattern + /// ID added to it. + /// + /// This is an internal-only flag that permits the representation to save + /// space in the common case of an NFA with one pattern in it. In that + /// case, a match state can only ever have exactly one pattern ID: + /// PatternID::ZERO. So there's no need to represent it. + fn has_pattern_ids(&self) -> bool { + self.0[0] & (1 << 1) > 0 + } + + /// Returns true if and only if this state is marked as having been created + /// from a transition over a word byte. This is useful for checking whether + /// a word boundary assertion is true or not, which requires look-behind + /// (whether the current state came from a word byte or not) and look-ahead + /// (whether the transition byte is a word byte or not). + /// + /// Since states with this set are distinct from states that don't have + /// this set (even if they are otherwise equivalent), callers should not + /// set this assertion unless the underlying NFA has at least one word + /// boundary assertion somewhere. Otherwise, a superfluous number of states + /// may be created. + fn is_from_word(&self) -> bool { + self.0[0] & (1 << 2) > 0 + } + + /// Returns true if and only if this state is marked as being inside of a + /// CRLF terminator. In the forward direction, this means the state was + /// created after seeing a `\r`. In the reverse direction, this means the + /// state was created after seeing a `\n`. + fn is_half_crlf(&self) -> bool { + self.0[0] & (1 << 3) > 0 + } + + /// The set of look-behind assertions that were true in the transition that + /// created this state. + /// + /// Generally, this should be empty if 'look_need' is empty, since there is + /// no reason to track which look-behind assertions are true if the state + /// has no conditional epsilon transitions. + /// + /// Satisfied look-ahead assertions are not tracked in states. Instead, + /// these are re-computed on demand via epsilon closure when computing the + /// transition function. + fn look_have(&self) -> LookSet { + LookSet::read_repr(&self.0[1..]) + } + + /// The set of look-around (both behind and ahead) assertions that appear + /// at least once in this state's set of NFA states. + /// + /// This is used to determine whether the epsilon closure needs to be + /// re-computed when computing the transition function. Namely, if the + /// state has no conditional epsilon transitions, then there is no need + /// to re-compute the epsilon closure. + fn look_need(&self) -> LookSet { + LookSet::read_repr(&self.0[5..]) + } + + /// Returns the total number of match pattern IDs in this state. + /// + /// If this state is not a match state, then this always returns 0. + fn match_len(&self) -> usize { + if !self.is_match() { + 0 + } else if !self.has_pattern_ids() { + 1 + } else { + self.encoded_pattern_len() + } + } + + /// Returns the pattern ID for this match state at the given index. + /// + /// If the given index is greater than or equal to `match_len()` for this + /// state, then this could panic or return incorrect results. + fn match_pattern(&self, index: usize) -> PatternID { + if !self.has_pattern_ids() { + PatternID::ZERO + } else { + let offset = 13 + index * PatternID::SIZE; + // This is OK since we only ever serialize valid PatternIDs to + // states. + wire::read_pattern_id_unchecked(&self.0[offset..]).0 + } + } + + /// Returns a copy of all match pattern IDs in this state. If this state + /// is not a match state, then this returns None. + fn match_pattern_ids(&self) -> Option> { + if !self.is_match() { + return None; + } + let mut pids = alloc::vec![]; + self.iter_match_pattern_ids(|pid| pids.push(pid)); + Some(pids) + } + + /// Calls the given function on every pattern ID in this state. + fn iter_match_pattern_ids(&self, mut f: F) { + if !self.is_match() { + return; + } + // As an optimization for a very common case, when this is a match + // state for an NFA with only one pattern, we don't actually write the + // pattern ID to the state representation. Instead, we know it must + // be there since it is the only possible choice. + if !self.has_pattern_ids() { + f(PatternID::ZERO); + return; + } + let mut pids = &self.0[13..self.pattern_offset_end()]; + while !pids.is_empty() { + let pid = wire::read_u32(pids); + pids = &pids[PatternID::SIZE..]; + // This is OK since we only ever serialize valid PatternIDs to + // states. And since pattern IDs can never exceed a usize, the + // unwrap is OK. + f(PatternID::new_unchecked(usize::try_from(pid).unwrap())); + } + } + + /// Calls the given function on every NFA state ID in this state. + fn iter_nfa_state_ids(&self, mut f: F) { + let mut sids = &self.0[self.pattern_offset_end()..]; + let mut prev = 0i32; + while !sids.is_empty() { + let (delta, nr) = read_vari32(sids); + sids = &sids[nr..]; + let sid = prev + delta; + prev = sid; + // This is OK since we only ever serialize valid StateIDs to + // states. And since state IDs can never exceed an isize, they must + // always be able to fit into a usize, and thus cast is OK. + f(StateID::new_unchecked(sid.as_usize())) + } + } + + /// Returns the offset into this state's representation where the pattern + /// IDs end and the NFA state IDs begin. + fn pattern_offset_end(&self) -> usize { + let encoded = self.encoded_pattern_len(); + if encoded == 0 { + return 9; + } + // This arithmetic is OK since we were able to address this many bytes + // when writing to the state, thus, it must fit into a usize. + encoded.checked_mul(4).unwrap().checked_add(13).unwrap() + } + + /// Returns the total number of *encoded* pattern IDs in this state. + /// + /// This may return 0 even when this is a match state, since the pattern + /// ID `PatternID::ZERO` is not encoded when it's the only pattern ID in + /// the match state (the overwhelming common case). + fn encoded_pattern_len(&self) -> usize { + if !self.has_pattern_ids() { + return 0; + } + // This unwrap is OK since the total number of patterns is always + // guaranteed to fit into a usize. + usize::try_from(wire::read_u32(&self.0[9..13])).unwrap() + } +} + +impl<'a> core::fmt::Debug for Repr<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let mut nfa_ids = alloc::vec![]; + self.iter_nfa_state_ids(|sid| nfa_ids.push(sid)); + f.debug_struct("Repr") + .field("is_match", &self.is_match()) + .field("is_from_word", &self.is_from_word()) + .field("is_half_crlf", &self.is_half_crlf()) + .field("look_have", &self.look_have()) + .field("look_need", &self.look_need()) + .field("match_pattern_ids", &self.match_pattern_ids()) + .field("nfa_state_ids", &nfa_ids) + .finish() + } +} + +/// ReprVec is a write-only view into the representation of a DFA state. +/// +/// See Repr for more details on the purpose of this type and also the format. +/// +/// Note that not all possible combinations of methods may be called. This is +/// precisely what the various StateBuilder types encapsulate: they only +/// permit valid combinations via Rust's linear typing. +struct ReprVec<'a>(&'a mut Vec); + +impl<'a> ReprVec<'a> { + /// Set this state as a match state. + /// + /// This should not be exposed explicitly outside of this module. It is + /// set automatically when a pattern ID is added. + fn set_is_match(&mut self) { + self.0[0] |= 1 << 0; + } + + /// Set that this state has pattern IDs explicitly written to it. + /// + /// This should not be exposed explicitly outside of this module. This is + /// used internally as a space saving optimization. Namely, if the state + /// is a match state but does not have any pattern IDs written to it, + /// then it is automatically inferred to have a pattern ID of ZERO. + fn set_has_pattern_ids(&mut self) { + self.0[0] |= 1 << 1; + } + + /// Set this state as being built from a transition over a word byte. + /// + /// Setting this is only necessary when one needs to deal with word + /// boundary assertions. Therefore, if the underlying NFA has no word + /// boundary assertions, callers should not set this. + fn set_is_from_word(&mut self) { + self.0[0] |= 1 << 2; + } + + /// Set this state as having seen half of a CRLF terminator. + /// + /// In the forward direction, this should be set when a `\r` has been seen. + /// In the reverse direction, this should be set when a `\n` has been seen. + fn set_is_half_crlf(&mut self) { + self.0[0] |= 1 << 3; + } + + /// The set of look-behind assertions that were true in the transition that + /// created this state. + fn look_have(&self) -> LookSet { + self.repr().look_have() + } + + /// The set of look-around (both behind and ahead) assertions that appear + /// at least once in this state's set of NFA states. + fn look_need(&self) -> LookSet { + self.repr().look_need() + } + + /// Mutate the set of look-behind assertions that were true in the + /// transition that created this state. + fn set_look_have(&mut self, mut set: impl FnMut(LookSet) -> LookSet) { + set(self.look_have()).write_repr(&mut self.0[1..]); + } + + /// Mutate the set of look-around (both behind and ahead) assertions that + /// appear at least once in this state's set of NFA states. + fn set_look_need(&mut self, mut set: impl FnMut(LookSet) -> LookSet) { + set(self.look_need()).write_repr(&mut self.0[5..]); + } + + /// Add a pattern ID to this state. All match states must have at least + /// one pattern ID associated with it. + /// + /// Callers must never add duplicative pattern IDs. + /// + /// The order in which patterns are added must correspond to the order + /// in which patterns are reported as matches. + fn add_match_pattern_id(&mut self, pid: PatternID) { + // As a (somewhat small) space saving optimization, in the case where + // a matching state has exactly one pattern ID, PatternID::ZERO, we do + // not write either the pattern ID or the number of patterns encoded. + // Instead, all we do is set the 'is_match' bit on this state. Overall, + // this saves 8 bytes per match state for the overwhelming majority of + // match states. + // + // In order to know whether pattern IDs need to be explicitly read or + // not, we use another internal-only bit, 'has_pattern_ids', to + // indicate whether they have been explicitly written or not. + if !self.repr().has_pattern_ids() { + if pid == PatternID::ZERO { + self.set_is_match(); + return; + } + // Make room for 'close_match_pattern_ids' to write the total + // number of pattern IDs written. + self.0.extend(core::iter::repeat(0).take(PatternID::SIZE)); + self.set_has_pattern_ids(); + // If this was already a match state, then the only way that's + // possible when the state doesn't have pattern IDs is if + // PatternID::ZERO was added by the caller previously. In this + // case, we are now adding a non-ZERO pattern ID after it, in + // which case, we want to make sure to represent ZERO explicitly + // now. + if self.repr().is_match() { + write_u32(self.0, 0) + } else { + // Otherwise, just make sure the 'is_match' bit is set. + self.set_is_match(); + } + } + write_u32(self.0, pid.as_u32()); + } + + /// Indicate that no more pattern IDs will be added to this state. + /// + /// Once this is called, callers must not call it or 'add_match_pattern_id' + /// again. + /// + /// This should not be exposed explicitly outside of this module. It + /// should be called only when converting a StateBuilderMatches into a + /// StateBuilderNFA. + fn close_match_pattern_ids(&mut self) { + // If we never wrote any pattern IDs, then there's nothing to do here. + if !self.repr().has_pattern_ids() { + return; + } + let patsize = PatternID::SIZE; + let pattern_bytes = self.0.len() - 13; + // Every pattern ID uses 4 bytes, so number of bytes should be + // divisible by 4. + assert_eq!(pattern_bytes % patsize, 0); + // This unwrap is OK since we are guaranteed that the maximum number + // of possible patterns fits into a u32. + let count32 = u32::try_from(pattern_bytes / patsize).unwrap(); + wire::NE::write_u32(count32, &mut self.0[9..13]); + } + + /// Add an NFA state ID to this state. The order in which NFA states are + /// added matters. It is the caller's responsibility to ensure that + /// duplicate NFA state IDs are not added. + fn add_nfa_state_id(&mut self, prev: &mut StateID, sid: StateID) { + let delta = sid.as_i32() - prev.as_i32(); + write_vari32(self.0, delta); + *prev = sid; + } + + /// Return a read-only view of this state's representation. + fn repr(&self) -> Repr<'_> { + Repr(self.0.as_slice()) + } +} + +/// Write a signed 32-bit integer using zig-zag encoding. +/// +/// https://developers.google.com/protocol-buffers/docs/encoding#varints +fn write_vari32(data: &mut Vec, n: i32) { + let mut un = n.to_bits() << 1; + if n < 0 { + un = !un; + } + write_varu32(data, un) +} + +/// Read a signed 32-bit integer using zig-zag encoding. Also, return the +/// number of bytes read. +/// +/// https://developers.google.com/protocol-buffers/docs/encoding#varints +fn read_vari32(data: &[u8]) -> (i32, usize) { + let (un, i) = read_varu32(data); + let mut n = i32::from_bits(un >> 1); + if un & 1 != 0 { + n = !n; + } + (n, i) +} + +/// Write an unsigned 32-bit integer as a varint. In essence, `n` is written +/// as a sequence of bytes where all bytes except for the last one have the +/// most significant bit set. The least significant 7 bits correspond to the +/// actual bits of `n`. So in the worst case, a varint uses 5 bytes, but in +/// very common cases, it uses fewer than 4. +/// +/// https://developers.google.com/protocol-buffers/docs/encoding#varints +fn write_varu32(data: &mut Vec, mut n: u32) { + while n >= 0b1000_0000 { + data.push(n.low_u8() | 0b1000_0000); + n >>= 7; + } + data.push(n.low_u8()); +} + +/// Read an unsigned 32-bit varint. Also, return the number of bytes read. +/// +/// https://developers.google.com/protocol-buffers/docs/encoding#varints +fn read_varu32(data: &[u8]) -> (u32, usize) { + // N.B. We can assume correctness here since we know that all var-u32 are + // written with write_varu32. Hence, the 'as' uses and unchecked arithmetic + // is all okay. + let mut n: u32 = 0; + let mut shift: u32 = 0; + for (i, &b) in data.iter().enumerate() { + if b < 0b1000_0000 { + return (n | (u32::from(b) << shift), i + 1); + } + n |= (u32::from(b) & 0b0111_1111) << shift; + shift += 7; + } + (0, 0) +} + +/// Push a native-endian encoded `n` on to `dst`. +fn write_u32(dst: &mut Vec, n: u32) { + use crate::util::wire::NE; + + let start = dst.len(); + dst.extend(core::iter::repeat(0).take(mem::size_of::())); + NE::write_u32(n, &mut dst[start..]); +} + +#[cfg(test)] +mod tests { + use alloc::vec; + + use quickcheck::quickcheck; + + use super::*; + + #[cfg(not(miri))] + quickcheck! { + fn prop_state_read_write_nfa_state_ids(sids: Vec) -> bool { + // Builders states do not permit duplicate IDs. + let sids = dedup_state_ids(sids); + + let mut b = StateBuilderEmpty::new().into_matches().into_nfa(); + for &sid in &sids { + b.add_nfa_state_id(sid); + } + let s = b.to_state(); + let mut got = vec![]; + s.iter_nfa_state_ids(|sid| got.push(sid)); + got == sids + } + + fn prop_state_read_write_pattern_ids(pids: Vec) -> bool { + // Builders states do not permit duplicate IDs. + let pids = dedup_pattern_ids(pids); + + let mut b = StateBuilderEmpty::new().into_matches(); + for &pid in &pids { + b.add_match_pattern_id(pid); + } + let s = b.into_nfa().to_state(); + let mut got = vec![]; + s.iter_match_pattern_ids(|pid| got.push(pid)); + got == pids + } + + fn prop_state_read_write_nfa_state_and_pattern_ids( + sids: Vec, + pids: Vec + ) -> bool { + // Builders states do not permit duplicate IDs. + let sids = dedup_state_ids(sids); + let pids = dedup_pattern_ids(pids); + + let mut b = StateBuilderEmpty::new().into_matches(); + for &pid in &pids { + b.add_match_pattern_id(pid); + } + + let mut b = b.into_nfa(); + for &sid in &sids { + b.add_nfa_state_id(sid); + } + + let s = b.to_state(); + let mut got_pids = vec![]; + s.iter_match_pattern_ids(|pid| got_pids.push(pid)); + let mut got_sids = vec![]; + s.iter_nfa_state_ids(|sid| got_sids.push(sid)); + got_pids == pids && got_sids == sids + } + } + + quickcheck! { + fn prop_read_write_varu32(n: u32) -> bool { + let mut buf = vec![]; + write_varu32(&mut buf, n); + let (got, nread) = read_varu32(&buf); + nread == buf.len() && got == n + } + + fn prop_read_write_vari32(n: i32) -> bool { + let mut buf = vec![]; + write_vari32(&mut buf, n); + let (got, nread) = read_vari32(&buf); + nread == buf.len() && got == n + } + } + + #[cfg(not(miri))] + fn dedup_state_ids(sids: Vec) -> Vec { + let mut set = alloc::collections::BTreeSet::new(); + let mut deduped = vec![]; + for sid in sids { + if set.contains(&sid) { + continue; + } + set.insert(sid); + deduped.push(sid); + } + deduped + } + + #[cfg(not(miri))] + fn dedup_pattern_ids(pids: Vec) -> Vec { + let mut set = alloc::collections::BTreeSet::new(); + let mut deduped = vec![]; + for pid in pids { + if set.contains(&pid) { + continue; + } + set.insert(pid); + deduped.push(pid); + } + deduped + } +} diff --git a/vendor/regex-automata/src/util/empty.rs b/vendor/regex-automata/src/util/empty.rs new file mode 100644 index 00000000000000..e16af3b6e596da --- /dev/null +++ b/vendor/regex-automata/src/util/empty.rs @@ -0,0 +1,265 @@ +/*! +This module provides helper routines for dealing with zero-width matches. + +The main problem being solved here is this: + +1. The caller wants to search something that they know is valid UTF-8, such +as a Rust `&str`. +2. The regex used by the caller can match the empty string. For example, `a*`. +3. The caller should never get match offsets returned that occur within the +encoding of a UTF-8 codepoint. It is logically incorrect, and also means that, +e.g., slicing the `&str` at those offsets will lead to a panic. + +So the question here is, how do we prevent the caller from getting match +offsets that split a codepoint? For example, strictly speaking, the regex `a*` +matches `☃` at the positions `[0, 0]`, `[1, 1]`, `[2, 2]` and `[3, 3]` since +the UTF-8 encoding of `☃` is `\xE2\x98\x83`. In particular, the `NFA` that +underlies all of the matching engines in this crate doesn't have anything in +its state graph that prevents matching between UTF-8 code units. Indeed, any +engine derived from the `NFA` will match at those positions by virtue of the +fact that the `NFA` is byte oriented. That is, its transitions are defined over +bytes and the matching engines work by proceeding one byte at a time. + +(An alternative architecture would be to define the transitions in an `NFA` +over codepoints, or `char`. And then make the matching engines proceed by +decoding one codepoint at a time. This is a viable strategy, but it doesn't +work for DFA matching engines because designing a fast and memory efficient +transition table for an alphabet as large as Unicode is quite difficult. More +to the point, the top-level `regex` crate supports matching on arbitrary bytes +when Unicode mode is disabled and one is searching a `&[u8]`. So in that case, +you can't just limit yourself to decoding codepoints and matching those. You +really do need to be able to follow byte oriented transitions on the `NFA`.) + +In an older version of the regex crate, we handled this case not in the regex +engine, but in the iterators over matches. Namely, since this case only arises +when the match is empty, we "just" incremented the next starting position +of the search by `N`, where `N` is the length of the codepoint encoded at +the current position. The alternative or more "natural" solution of just +incrementing by `1` would result in executing a search of `a*` on `☃` like +this: + +* Start search at `0`. +* Found match at `[0, 0]`. +* Next start position is `0`. +* To avoid an infinite loop, since it's an empty match, increment by `1`. +* Start search at `1`. +* Found match at `[1, 1]`. Oops. + +But if we instead incremented by `3` (the length in bytes of `☃`), then we get +the following: + +* Start search at `0`. +* Found match at `[0, 0]`. +* Next start position is `0`. +* To avoid an infinite loop, since it's an empty match, increment by `3`. +* Start search at `3`. +* Found match at `[3, 3]`. + +And we get the correct result. But does this technique work in all cases? +Crucially, it requires that a zero-width match that splits a codepoint never +occurs beyond the starting position of the search. Because if it did, merely +incrementing the start position by the number of bytes in the codepoint at +the current position wouldn't be enough. A zero-width match could just occur +anywhere. It turns out that it is _almost_ true. We can convince ourselves by +looking at all possible patterns that can match the empty string: + +* Patterns like `a*`, `a{0}`, `(?:)`, `a|` and `|a` all unconditionally match +the empty string. That is, assuming there isn't an `a` at the current position, +they will all match the empty string at the start of a search. There is no way +to move past it because any other match would not be "leftmost." +* `^` only matches at the beginning of the haystack, where the start position +is `0`. Since we know we're searching valid UTF-8 (if it isn't valid UTF-8, +then this entire problem goes away because it implies your string type supports +invalid UTF-8 and thus must deal with offsets that not only split a codepoint +but occur in entirely invalid UTF-8 somehow), it follows that `^` never matches +between the code units of a codepoint because the start of a valid UTF-8 string +is never within the encoding of a codepoint. +* `$` basically the same logic as `^`, but for the end of a string. A valid +UTF-8 string can't have an incomplete codepoint at the end of it. +* `(?m:^)` follows similarly to `^`, but it can match immediately following +a `\n`. However, since a `\n` is always a codepoint itself and can never +appear within a codepoint, it follows that the position immediately following +a `\n` in a string that is valid UTF-8 is guaranteed to not be between the +code units of another codepoint. (One caveat here is that the line terminator +for multi-line anchors can now be changed to any arbitrary byte, including +things like `\x98` which might occur within a codepoint. However, this wasn't +supported by the old regex crate. If it was, it pose the same problems as +`(?-u:\B)`, as we'll discuss below.) +* `(?m:$)` a similar argument as for `(?m:^)`. The only difference is that a +`(?m:$)` matches just before a `\n`. But the same argument applies. +* `(?Rm:^)` and `(?Rm:$)` weren't supported by the old regex crate, but the +CRLF aware line anchors follow a similar argument as for `(?m:^)` and `(?m:$)`. +Namely, since they only ever match at a boundary where one side is either a +`\r` or a `\n`, neither of which can occur within a codepoint. +* `\b` only matches at positions where both sides are valid codepoints, so +this cannot split a codepoint. +* `\B`, like `\b`, also only matches at positions where both sides are valid +codepoints. So this cannot split a codepoint either. +* `(?-u:\b)` matches only at positions where at least one side of it is an ASCII +word byte. Since ASCII bytes cannot appear as code units in non-ASCII codepoints +(one of the many amazing qualities of UTF-8), it follows that this too cannot +split a codepoint. +* `(?-u:\B)` finally represents a problem. It can matches between *any* two +bytes that are either both word bytes or non-word bytes. Since code units like +`\xE2` and `\x98` (from the UTF-8 encoding of `☃`) are both non-word bytes, +`(?-u:\B)` will match at the position between them. + +Thus, our approach of incrementing one codepoint at a time after seeing an +empty match is flawed because `(?-u:\B)` can result in an empty match that +splits a codepoint at a position past the starting point of a search. For +example, searching `(?-u:\B)` on `a☃` would produce the following matches: `[2, +2]`, `[3, 3]` and `[4, 4]`. The positions at `0` and `1` don't match because +they correspond to word boundaries since `a` is an ASCII word byte. + +So what did the old regex crate do to avoid this? It banned `(?-u:\B)` from +regexes that could match `&str`. That might sound extreme, but a lot of other +things were banned too. For example, all of `(?-u:.)`, `(?-u:[^a])` and +`(?-u:\W)` can match invalid UTF-8 too, including individual code units with a +codepoint. The key difference is that those expressions could never produce an +empty match. That ban happens when translating an `Ast` to an `Hir`, because +that process that reason about whether an `Hir` can produce *non-empty* matches +at invalid UTF-8 boundaries. Bottom line though is that we side-stepped the +`(?-u:\B)` issue by banning it. + +If banning `(?-u:\B)` were the only issue with the old regex crate's approach, +then I probably would have kept it. `\B` is rarely used, so it's not such a big +deal to have to work-around it. However, the problem with the above approach +is that it doesn't compose. The logic for avoiding splitting a codepoint only +lived in the iterator, which means if anyone wants to implement their own +iterator over regex matches, they have to deal with this extremely subtle edge +case to get full correctness. + +Instead, in this crate, we take the approach of pushing this complexity down +to the lowest layers of each regex engine. The approach is pretty simple: + +* If this corner case doesn't apply, don't do anything. (For example, if UTF-8 +mode isn't enabled or if the regex cannot match the empty string.) +* If an empty match is reported, explicitly check if it splits a codepoint. +* If it doesn't, we're done, return the match. +* If it does, then ignore the match and re-run the search. +* Repeat the above process until the end of the haystack is reached or a match +is found that doesn't split a codepoint or isn't zero width. + +And that's pretty much what this module provides. Every regex engine uses these +methods in their lowest level public APIs, but just above the layer where +their internal engine is used. That way, all regex engines can be arbitrarily +composed without worrying about handling this case, and iterators don't need to +handle it explicitly. + +(It turns out that a new feature I added, support for changing the line +terminator in a regex to any arbitrary byte, also provokes the above problem. +Namely, the byte could be invalid UTF-8 or a UTF-8 continuation byte. So that +support would need to be limited or banned when UTF-8 mode is enabled, just +like we did for `(?-u:\B)`. But thankfully our more robust approach in this +crate handles that case just fine too.) +*/ + +use crate::util::search::{Input, MatchError}; + +#[cold] +#[inline(never)] +pub(crate) fn skip_splits_fwd( + input: &Input<'_>, + init_value: T, + match_offset: usize, + find: F, +) -> Result, MatchError> +where + F: FnMut(&Input<'_>) -> Result, MatchError>, +{ + skip_splits(true, input, init_value, match_offset, find) +} + +#[cold] +#[inline(never)] +pub(crate) fn skip_splits_rev( + input: &Input<'_>, + init_value: T, + match_offset: usize, + find: F, +) -> Result, MatchError> +where + F: FnMut(&Input<'_>) -> Result, MatchError>, +{ + skip_splits(false, input, init_value, match_offset, find) +} + +fn skip_splits( + forward: bool, + input: &Input<'_>, + init_value: T, + mut match_offset: usize, + mut find: F, +) -> Result, MatchError> +where + F: FnMut(&Input<'_>) -> Result, MatchError>, +{ + // If our config says to do an anchored search, then we're definitely + // done. We just need to determine whether we have a valid match or + // not. If we don't, then we're not allowed to continue, so we report + // no match. + // + // This is actually quite a subtle correctness thing. The key here is + // that if we got an empty match that splits a codepoint after doing an + // anchored search in UTF-8 mode, then that implies that we must have + // *started* the search at a location that splits a codepoint. This + // follows from the fact that if a match is reported from an anchored + // search, then the start offset of the match *must* match the start + // offset of the search. + // + // It also follows that no other non-empty match is possible. For + // example, you might write a regex like '(?:)|SOMETHING' and start its + // search in the middle of a codepoint. The first branch is an empty + // regex that will bubble up a match at the first position, and then + // get rejected here and report no match. But what if 'SOMETHING' could + // have matched? We reason that such a thing is impossible, because + // if it does, it must report a match that starts in the middle of a + // codepoint. This in turn implies that a match is reported whose span + // does not correspond to valid UTF-8, and this breaks the promise + // made when UTF-8 mode is enabled. (That promise *can* be broken, for + // example, by enabling UTF-8 mode but building an by hand NFA that + // produces non-empty matches that span invalid UTF-8. This is an unchecked + // but documented precondition violation of UTF-8 mode, and is documented + // to have unspecified behavior.) + // + // I believe this actually means that if an anchored search is run, and + // UTF-8 mode is enabled and the start position splits a codepoint, + // then it is correct to immediately report no match without even + // executing the regex engine. But it doesn't really seem worth writing + // out that case in every regex engine to save a tiny bit of work in an + // extremely pathological case, so we just handle it here. + if input.get_anchored().is_anchored() { + return Ok(if input.is_char_boundary(match_offset) { + Some(init_value) + } else { + None + }); + } + // Otherwise, we have an unanchored search, so just keep looking for + // matches until we have one that does not split a codepoint or we hit + // EOI. + let mut value = init_value; + let mut input = input.clone(); + while !input.is_char_boundary(match_offset) { + if forward { + // The unwrap is OK here because overflowing usize while + // iterating over a slice is impossible, at it would require + // a slice of length greater than isize::MAX, which is itself + // impossible. + input.set_start(input.start().checked_add(1).unwrap()); + } else { + input.set_end(match input.end().checked_sub(1) { + None => return Ok(None), + Some(end) => end, + }); + } + match find(&input)? { + None => return Ok(None), + Some((new_value, new_match_end)) => { + value = new_value; + match_offset = new_match_end; + } + } + } + Ok(Some(value)) +} diff --git a/vendor/regex-automata/src/util/escape.rs b/vendor/regex-automata/src/util/escape.rs new file mode 100644 index 00000000000000..9c5b72e9d1ecec --- /dev/null +++ b/vendor/regex-automata/src/util/escape.rs @@ -0,0 +1,84 @@ +/*! +Provides convenience routines for escaping raw bytes. + +Since this crate tends to deal with `&[u8]` everywhere and the default +`Debug` implementation just shows decimal integers, it makes debugging those +representations quite difficult. This module provides types that show `&[u8]` +as if it were a string, with invalid UTF-8 escaped into its byte-by-byte hex +representation. +*/ + +use crate::util::utf8; + +/// Provides a convenient `Debug` implementation for a `u8`. +/// +/// The `Debug` impl treats the byte as an ASCII, and emits a human readable +/// representation of it. If the byte isn't ASCII, then it's emitted as a hex +/// escape sequence. +#[derive(Clone, Copy)] +pub struct DebugByte(pub u8); + +impl core::fmt::Debug for DebugByte { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + // Special case ASCII space. It's too hard to read otherwise, so + // put quotes around it. I sometimes wonder whether just '\x20' would + // be better... + if self.0 == b' ' { + return write!(f, "' '"); + } + // 10 bytes is enough to cover any output from ascii::escape_default. + let mut bytes = [0u8; 10]; + let mut len = 0; + for (i, mut b) in core::ascii::escape_default(self.0).enumerate() { + // capitalize \xab to \xAB + if i >= 2 && b'a' <= b && b <= b'f' { + b -= 32; + } + bytes[len] = b; + len += 1; + } + write!(f, "{}", core::str::from_utf8(&bytes[..len]).unwrap()) + } +} + +/// Provides a convenient `Debug` implementation for `&[u8]`. +/// +/// This generally works best when the bytes are presumed to be mostly UTF-8, +/// but will work for anything. For any bytes that aren't UTF-8, they are +/// emitted as hex escape sequences. +pub struct DebugHaystack<'a>(pub &'a [u8]); + +impl<'a> core::fmt::Debug for DebugHaystack<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "\"")?; + // This is a sad re-implementation of a similar impl found in bstr. + let mut bytes = self.0; + while let Some(result) = utf8::decode(bytes) { + let ch = match result { + Ok(ch) => ch, + Err(byte) => { + write!(f, r"\x{byte:02x}")?; + bytes = &bytes[1..]; + continue; + } + }; + bytes = &bytes[ch.len_utf8()..]; + match ch { + '\0' => write!(f, "\\0")?, + // ASCII control characters except \0, \n, \r, \t + '\x01'..='\x08' + | '\x0b' + | '\x0c' + | '\x0e'..='\x19' + | '\x7f' => { + write!(f, "\\x{:02x}", u32::from(ch))?; + } + '\n' | '\r' | '\t' | _ => { + write!(f, "{}", ch.escape_debug())?; + } + } + } + write!(f, "\"")?; + Ok(()) + } +} diff --git a/vendor/regex-automata/src/util/int.rs b/vendor/regex-automata/src/util/int.rs new file mode 100644 index 00000000000000..b726e93f858e4a --- /dev/null +++ b/vendor/regex-automata/src/util/int.rs @@ -0,0 +1,246 @@ +/*! +This module provides several integer oriented traits for converting between +both fixed size integers and integers whose size varies based on the target +(like `usize`). + +The driving design principle of this module is to attempt to centralize as many +`as` casts as possible here. And in particular, we separate casts into two +buckets: + +* Casts that we use for their truncating behavior. In this case, we use more +descriptive names, like `low_u32` and `high_u32`. +* Casts that we use for converting back-and-forth between `usize`. These +conversions are generally necessary because we often store indices in different +formats to save on memory, which requires converting to and from `usize`. In +this case, we very specifically do not want to overflow, and so the methods +defined here will panic if the `as` cast would be lossy in debug mode. (A +normal `as` cast will never panic!) + +For `as` casts between raw pointers, we use `cast`, so `as` isn't needed there. + +For regex engines, floating point is just never used, so we don't have to worry +about `as` casts for those. + +Otherwise, this module pretty much covers all of our `as` needs except for one +thing: const contexts. There are a select few places in this crate where we +still need to use `as` because const functions on traits aren't stable yet. +If we wind up significantly expanding our const footprint in this crate, it +might be worth defining free functions to handle those cases. But at the time +of writing, that just seemed like too much ceremony. Instead, I comment each +such use of `as` in a const context with a "fixme" notice. + +NOTE: for simplicity, we don't take target pointer width into account here for +`usize` conversions. Since we currently only panic in debug mode, skipping the +check when it can be proven it isn't needed at compile time doesn't really +matter. Now, if we wind up wanting to do as many checks as possible in release +mode, then we would want to skip those when we know the conversions are always +non-lossy. + +NOTE: this module isn't an exhaustive API. For example, we still use things +like `u64::from` where possible, or even `usize::try_from()` for when we do +explicitly want to panic or when we want to return an error for overflow. +*/ + +// We define a little more than what we need, but I'd rather just have +// everything via a consistent and uniform API then have holes. +#![allow(dead_code)] + +pub(crate) trait U8 { + fn as_usize(self) -> usize; +} + +impl U8 for u8 { + fn as_usize(self) -> usize { + usize::from(self) + } +} + +pub(crate) trait U16 { + fn as_usize(self) -> usize; + fn low_u8(self) -> u8; + fn high_u8(self) -> u8; +} + +impl U16 for u16 { + fn as_usize(self) -> usize { + usize::from(self) + } + + fn low_u8(self) -> u8 { + self as u8 + } + + fn high_u8(self) -> u8 { + (self >> 8) as u8 + } +} + +pub(crate) trait U32 { + fn as_usize(self) -> usize; + fn low_u8(self) -> u8; + fn low_u16(self) -> u16; + fn high_u16(self) -> u16; +} + +impl U32 for u32 { + fn as_usize(self) -> usize { + #[cfg(debug_assertions)] + { + usize::try_from(self).expect("u32 overflowed usize") + } + #[cfg(not(debug_assertions))] + { + self as usize + } + } + + fn low_u8(self) -> u8 { + self as u8 + } + + fn low_u16(self) -> u16 { + self as u16 + } + + fn high_u16(self) -> u16 { + (self >> 16) as u16 + } +} + +pub(crate) trait U64 { + fn as_usize(self) -> usize; + fn low_u8(self) -> u8; + fn low_u16(self) -> u16; + fn low_u32(self) -> u32; + fn high_u32(self) -> u32; +} + +impl U64 for u64 { + fn as_usize(self) -> usize { + #[cfg(debug_assertions)] + { + usize::try_from(self).expect("u64 overflowed usize") + } + #[cfg(not(debug_assertions))] + { + self as usize + } + } + + fn low_u8(self) -> u8 { + self as u8 + } + + fn low_u16(self) -> u16 { + self as u16 + } + + fn low_u32(self) -> u32 { + self as u32 + } + + fn high_u32(self) -> u32 { + (self >> 32) as u32 + } +} + +pub(crate) trait I32 { + fn as_usize(self) -> usize; + fn to_bits(self) -> u32; + fn from_bits(n: u32) -> i32; +} + +impl I32 for i32 { + fn as_usize(self) -> usize { + #[cfg(debug_assertions)] + { + usize::try_from(self).expect("i32 overflowed usize") + } + #[cfg(not(debug_assertions))] + { + self as usize + } + } + + fn to_bits(self) -> u32 { + self as u32 + } + + fn from_bits(n: u32) -> i32 { + n as i32 + } +} + +pub(crate) trait Usize { + fn as_u8(self) -> u8; + fn as_u16(self) -> u16; + fn as_u32(self) -> u32; + fn as_u64(self) -> u64; +} + +impl Usize for usize { + fn as_u8(self) -> u8 { + #[cfg(debug_assertions)] + { + u8::try_from(self).expect("usize overflowed u8") + } + #[cfg(not(debug_assertions))] + { + self as u8 + } + } + + fn as_u16(self) -> u16 { + #[cfg(debug_assertions)] + { + u16::try_from(self).expect("usize overflowed u16") + } + #[cfg(not(debug_assertions))] + { + self as u16 + } + } + + fn as_u32(self) -> u32 { + #[cfg(debug_assertions)] + { + u32::try_from(self).expect("usize overflowed u32") + } + #[cfg(not(debug_assertions))] + { + self as u32 + } + } + + fn as_u64(self) -> u64 { + #[cfg(debug_assertions)] + { + u64::try_from(self).expect("usize overflowed u64") + } + #[cfg(not(debug_assertions))] + { + self as u64 + } + } +} + +// Pointers aren't integers, but we convert pointers to integers to perform +// offset arithmetic in some places. (And no, we don't convert the integers +// back to pointers.) So add 'as_usize' conversions here too for completeness. +// +// These 'as' casts are actually okay because they're always non-lossy. But the +// idea here is to just try and remove as much 'as' as possible, particularly +// in this crate where we are being really paranoid about offsets and making +// sure we don't panic on inputs that might be untrusted. This way, the 'as' +// casts become easier to audit if they're all in one place, even when some of +// them are actually okay 100% of the time. + +pub(crate) trait Pointer { + fn as_usize(self) -> usize; +} + +impl Pointer for *const T { + fn as_usize(self) -> usize { + self as usize + } +} diff --git a/vendor/regex-automata/src/util/interpolate.rs b/vendor/regex-automata/src/util/interpolate.rs new file mode 100644 index 00000000000000..2b851aa8f9caf9 --- /dev/null +++ b/vendor/regex-automata/src/util/interpolate.rs @@ -0,0 +1,576 @@ +/*! +Provides routines for interpolating capture group references. + +That is, if a replacement string contains references like `$foo` or `${foo1}`, +then they are replaced with the corresponding capture values for the groups +named `foo` and `foo1`, respectively. Similarly, syntax like `$1` and `${1}` +is supported as well, with `1` corresponding to a capture group index and not +a name. + +This module provides the free functions [`string`] and [`bytes`], which +interpolate Rust Unicode strings and byte strings, respectively. + +# Format + +These routines support two different kinds of capture references: unbraced and +braced. + +For the unbraced format, the format supported is `$ref` where `name` can be +any character in the class `[0-9A-Za-z_]`. `ref` is always the longest +possible parse. So for example, `$1a` corresponds to the capture group named +`1a` and not the capture group at index `1`. If `ref` matches `^[0-9]+$`, then +it is treated as a capture group index itself and not a name. + +For the braced format, the format supported is `${ref}` where `ref` can be any +sequence of bytes except for `}`. If no closing brace occurs, then it is not +considered a capture reference. As with the unbraced format, if `ref` matches +`^[0-9]+$`, then it is treated as a capture group index and not a name. + +The braced format is useful for exerting precise control over the name of the +capture reference. For example, `${1}a` corresponds to the capture group +reference `1` followed by the letter `a`, where as `$1a` (as mentioned above) +corresponds to the capture group reference `1a`. The braced format is also +useful for expressing capture group names that use characters not supported by +the unbraced format. For example, `${foo[bar].baz}` refers to the capture group +named `foo[bar].baz`. + +If a capture group reference is found and it does not refer to a valid capture +group, then it will be replaced with the empty string. + +To write a literal `$`, use `$$`. + +To be clear, and as exhibited via the type signatures in the routines in this +module, it is impossible for a replacement string to be invalid. A replacement +string may not have the intended semantics, but the interpolation procedure +itself can never fail. +*/ + +use alloc::{string::String, vec::Vec}; + +use crate::util::memchr::memchr; + +/// Accepts a replacement string and interpolates capture references with their +/// corresponding values. +/// +/// `append` should be a function that appends the string value of a capture +/// group at a particular index to the string given. If the capture group +/// index is invalid, then nothing should be appended. +/// +/// `name_to_index` should be a function that maps a capture group name to a +/// capture group index. If the given name doesn't exist, then `None` should +/// be returned. +/// +/// Finally, `dst` is where the final interpolated contents should be written. +/// If `replacement` contains no capture group references, then `dst` will be +/// equivalent to `replacement`. +/// +/// See the [module documentation](self) for details about the format +/// supported. +/// +/// # Example +/// +/// ``` +/// use regex_automata::util::interpolate; +/// +/// let mut dst = String::new(); +/// interpolate::string( +/// "foo $bar baz", +/// |index, dst| { +/// if index == 0 { +/// dst.push_str("BAR"); +/// } +/// }, +/// |name| { +/// if name == "bar" { +/// Some(0) +/// } else { +/// None +/// } +/// }, +/// &mut dst, +/// ); +/// assert_eq!("foo BAR baz", dst); +/// ``` +pub fn string( + mut replacement: &str, + mut append: impl FnMut(usize, &mut String), + mut name_to_index: impl FnMut(&str) -> Option, + dst: &mut String, +) { + while !replacement.is_empty() { + match memchr(b'$', replacement.as_bytes()) { + None => break, + Some(i) => { + dst.push_str(&replacement[..i]); + replacement = &replacement[i..]; + } + } + // Handle escaping of '$'. + if replacement.as_bytes().get(1).map_or(false, |&b| b == b'$') { + dst.push_str("$"); + replacement = &replacement[2..]; + continue; + } + debug_assert!(!replacement.is_empty()); + let cap_ref = match find_cap_ref(replacement.as_bytes()) { + Some(cap_ref) => cap_ref, + None => { + dst.push_str("$"); + replacement = &replacement[1..]; + continue; + } + }; + replacement = &replacement[cap_ref.end..]; + match cap_ref.cap { + Ref::Number(i) => append(i, dst), + Ref::Named(name) => { + if let Some(i) = name_to_index(name) { + append(i, dst); + } + } + } + } + dst.push_str(replacement); +} + +/// Accepts a replacement byte string and interpolates capture references with +/// their corresponding values. +/// +/// `append` should be a function that appends the byte string value of a +/// capture group at a particular index to the byte string given. If the +/// capture group index is invalid, then nothing should be appended. +/// +/// `name_to_index` should be a function that maps a capture group name to a +/// capture group index. If the given name doesn't exist, then `None` should +/// be returned. +/// +/// Finally, `dst` is where the final interpolated contents should be written. +/// If `replacement` contains no capture group references, then `dst` will be +/// equivalent to `replacement`. +/// +/// See the [module documentation](self) for details about the format +/// supported. +/// +/// # Example +/// +/// ``` +/// use regex_automata::util::interpolate; +/// +/// let mut dst = vec![]; +/// interpolate::bytes( +/// b"foo $bar baz", +/// |index, dst| { +/// if index == 0 { +/// dst.extend_from_slice(b"BAR"); +/// } +/// }, +/// |name| { +/// if name == "bar" { +/// Some(0) +/// } else { +/// None +/// } +/// }, +/// &mut dst, +/// ); +/// assert_eq!(&b"foo BAR baz"[..], dst); +/// ``` +pub fn bytes( + mut replacement: &[u8], + mut append: impl FnMut(usize, &mut Vec), + mut name_to_index: impl FnMut(&str) -> Option, + dst: &mut Vec, +) { + while !replacement.is_empty() { + match memchr(b'$', replacement) { + None => break, + Some(i) => { + dst.extend_from_slice(&replacement[..i]); + replacement = &replacement[i..]; + } + } + // Handle escaping of '$'. + if replacement.get(1).map_or(false, |&b| b == b'$') { + dst.push(b'$'); + replacement = &replacement[2..]; + continue; + } + debug_assert!(!replacement.is_empty()); + let cap_ref = match find_cap_ref(replacement) { + Some(cap_ref) => cap_ref, + None => { + dst.push(b'$'); + replacement = &replacement[1..]; + continue; + } + }; + replacement = &replacement[cap_ref.end..]; + match cap_ref.cap { + Ref::Number(i) => append(i, dst), + Ref::Named(name) => { + if let Some(i) = name_to_index(name) { + append(i, dst); + } + } + } + } + dst.extend_from_slice(replacement); +} + +/// `CaptureRef` represents a reference to a capture group inside some text. +/// The reference is either a capture group name or a number. +/// +/// It is also tagged with the position in the text following the +/// capture reference. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +struct CaptureRef<'a> { + cap: Ref<'a>, + end: usize, +} + +/// A reference to a capture group in some text. +/// +/// e.g., `$2`, `$foo`, `${foo}`. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +enum Ref<'a> { + Named(&'a str), + Number(usize), +} + +impl<'a> From<&'a str> for Ref<'a> { + fn from(x: &'a str) -> Ref<'a> { + Ref::Named(x) + } +} + +impl From for Ref<'static> { + fn from(x: usize) -> Ref<'static> { + Ref::Number(x) + } +} + +/// Parses a possible reference to a capture group name in the given text, +/// starting at the beginning of `replacement`. +/// +/// If no such valid reference could be found, None is returned. +/// +/// Note that this returns a "possible" reference because this routine doesn't +/// know whether the reference is to a valid group or not. If it winds up not +/// being a valid reference, then it should be replaced with the empty string. +fn find_cap_ref(replacement: &[u8]) -> Option> { + let mut i = 0; + let rep: &[u8] = replacement; + if rep.len() <= 1 || rep[0] != b'$' { + return None; + } + i += 1; + if rep[i] == b'{' { + return find_cap_ref_braced(rep, i + 1); + } + let mut cap_end = i; + while rep.get(cap_end).copied().map_or(false, is_valid_cap_letter) { + cap_end += 1; + } + if cap_end == i { + return None; + } + // We just verified that the range 0..cap_end is valid ASCII, so it must + // therefore be valid UTF-8. If we really cared, we could avoid this UTF-8 + // check via an unchecked conversion or by parsing the number straight from + // &[u8]. + let cap = core::str::from_utf8(&rep[i..cap_end]) + .expect("valid UTF-8 capture name"); + Some(CaptureRef { + cap: match cap.parse::() { + Ok(i) => Ref::Number(i), + Err(_) => Ref::Named(cap), + }, + end: cap_end, + }) +} + +/// Looks for a braced reference, e.g., `${foo1}`. This assumes that an opening +/// brace has been found at `i-1` in `rep`. This then looks for a closing +/// brace and returns the capture reference within the brace. +fn find_cap_ref_braced(rep: &[u8], mut i: usize) -> Option> { + assert_eq!(b'{', rep[i.checked_sub(1).unwrap()]); + let start = i; + while rep.get(i).map_or(false, |&b| b != b'}') { + i += 1; + } + if !rep.get(i).map_or(false, |&b| b == b'}') { + return None; + } + // When looking at braced names, we don't put any restrictions on the name, + // so it's possible it could be invalid UTF-8. But a capture group name + // can never be invalid UTF-8, so if we have invalid UTF-8, then we can + // safely return None. + let cap = match core::str::from_utf8(&rep[start..i]) { + Err(_) => return None, + Ok(cap) => cap, + }; + Some(CaptureRef { + cap: match cap.parse::() { + Ok(i) => Ref::Number(i), + Err(_) => Ref::Named(cap), + }, + end: i + 1, + }) +} + +/// Returns true if and only if the given byte is allowed in a capture name +/// written in non-brace form. +fn is_valid_cap_letter(b: u8) -> bool { + matches!(b, b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' | b'_') +} + +#[cfg(test)] +mod tests { + use alloc::{string::String, vec, vec::Vec}; + + use super::{find_cap_ref, CaptureRef}; + + macro_rules! find { + ($name:ident, $text:expr) => { + #[test] + fn $name() { + assert_eq!(None, find_cap_ref($text.as_bytes())); + } + }; + ($name:ident, $text:expr, $capref:expr) => { + #[test] + fn $name() { + assert_eq!(Some($capref), find_cap_ref($text.as_bytes())); + } + }; + } + + macro_rules! c { + ($name_or_number:expr, $pos:expr) => { + CaptureRef { cap: $name_or_number.into(), end: $pos } + }; + } + + find!(find_cap_ref1, "$foo", c!("foo", 4)); + find!(find_cap_ref2, "${foo}", c!("foo", 6)); + find!(find_cap_ref3, "$0", c!(0, 2)); + find!(find_cap_ref4, "$5", c!(5, 2)); + find!(find_cap_ref5, "$10", c!(10, 3)); + // See https://github.com/rust-lang/regex/pull/585 + // for more on characters following numbers + find!(find_cap_ref6, "$42a", c!("42a", 4)); + find!(find_cap_ref7, "${42}a", c!(42, 5)); + find!(find_cap_ref8, "${42"); + find!(find_cap_ref9, "${42 "); + find!(find_cap_ref10, " $0 "); + find!(find_cap_ref11, "$"); + find!(find_cap_ref12, " "); + find!(find_cap_ref13, ""); + find!(find_cap_ref14, "$1-$2", c!(1, 2)); + find!(find_cap_ref15, "$1_$2", c!("1_", 3)); + find!(find_cap_ref16, "$x-$y", c!("x", 2)); + find!(find_cap_ref17, "$x_$y", c!("x_", 3)); + find!(find_cap_ref18, "${#}", c!("#", 4)); + find!(find_cap_ref19, "${Z[}", c!("Z[", 5)); + find!(find_cap_ref20, "${¾}", c!("¾", 5)); + find!(find_cap_ref21, "${¾a}", c!("¾a", 6)); + find!(find_cap_ref22, "${a¾}", c!("a¾", 6)); + find!(find_cap_ref23, "${☃}", c!("☃", 6)); + find!(find_cap_ref24, "${a☃}", c!("a☃", 7)); + find!(find_cap_ref25, "${☃a}", c!("☃a", 7)); + find!(find_cap_ref26, "${名字}", c!("名字", 9)); + + fn interpolate_string( + mut name_to_index: Vec<(&'static str, usize)>, + caps: Vec<&'static str>, + replacement: &str, + ) -> String { + name_to_index.sort_by_key(|x| x.0); + + let mut dst = String::new(); + super::string( + replacement, + |i, dst| { + if let Some(&s) = caps.get(i) { + dst.push_str(s); + } + }, + |name| -> Option { + name_to_index + .binary_search_by_key(&name, |x| x.0) + .ok() + .map(|i| name_to_index[i].1) + }, + &mut dst, + ); + dst + } + + fn interpolate_bytes( + mut name_to_index: Vec<(&'static str, usize)>, + caps: Vec<&'static str>, + replacement: &str, + ) -> String { + name_to_index.sort_by_key(|x| x.0); + + let mut dst = vec![]; + super::bytes( + replacement.as_bytes(), + |i, dst| { + if let Some(&s) = caps.get(i) { + dst.extend_from_slice(s.as_bytes()); + } + }, + |name| -> Option { + name_to_index + .binary_search_by_key(&name, |x| x.0) + .ok() + .map(|i| name_to_index[i].1) + }, + &mut dst, + ); + String::from_utf8(dst).unwrap() + } + + macro_rules! interp { + ($name:ident, $map:expr, $caps:expr, $hay:expr, $expected:expr $(,)*) => { + #[test] + fn $name() { + assert_eq!( + $expected, + interpolate_string($map, $caps, $hay), + "interpolate::string failed", + ); + assert_eq!( + $expected, + interpolate_bytes($map, $caps, $hay), + "interpolate::bytes failed", + ); + } + }; + } + + interp!( + interp1, + vec![("foo", 2)], + vec!["", "", "xxx"], + "test $foo test", + "test xxx test", + ); + + interp!( + interp2, + vec![("foo", 2)], + vec!["", "", "xxx"], + "test$footest", + "test", + ); + + interp!( + interp3, + vec![("foo", 2)], + vec!["", "", "xxx"], + "test${foo}test", + "testxxxtest", + ); + + interp!( + interp4, + vec![("foo", 2)], + vec!["", "", "xxx"], + "test$2test", + "test", + ); + + interp!( + interp5, + vec![("foo", 2)], + vec!["", "", "xxx"], + "test${2}test", + "testxxxtest", + ); + + interp!( + interp6, + vec![("foo", 2)], + vec!["", "", "xxx"], + "test $$foo test", + "test $foo test", + ); + + interp!( + interp7, + vec![("foo", 2)], + vec!["", "", "xxx"], + "test $foo", + "test xxx", + ); + + interp!( + interp8, + vec![("foo", 2)], + vec!["", "", "xxx"], + "$foo test", + "xxx test", + ); + + interp!( + interp9, + vec![("bar", 1), ("foo", 2)], + vec!["", "yyy", "xxx"], + "test $bar$foo", + "test yyyxxx", + ); + + interp!( + interp10, + vec![("bar", 1), ("foo", 2)], + vec!["", "yyy", "xxx"], + "test $ test", + "test $ test", + ); + + interp!( + interp11, + vec![("bar", 1), ("foo", 2)], + vec!["", "yyy", "xxx"], + "test ${} test", + "test test", + ); + + interp!( + interp12, + vec![("bar", 1), ("foo", 2)], + vec!["", "yyy", "xxx"], + "test ${ } test", + "test test", + ); + + interp!( + interp13, + vec![("bar", 1), ("foo", 2)], + vec!["", "yyy", "xxx"], + "test ${a b} test", + "test test", + ); + + interp!( + interp14, + vec![("bar", 1), ("foo", 2)], + vec!["", "yyy", "xxx"], + "test ${a} test", + "test test", + ); + + // This is a funny case where a braced reference is never closed, but + // within the unclosed braced reference, there is an unbraced reference. + // In this case, the braced reference is just treated literally and the + // unbraced reference is found. + interp!( + interp15, + vec![("bar", 1), ("foo", 2)], + vec!["", "yyy", "xxx"], + "test ${wat $bar ok", + "test ${wat yyy ok", + ); +} diff --git a/vendor/regex-automata/src/util/iter.rs b/vendor/regex-automata/src/util/iter.rs new file mode 100644 index 00000000000000..dcfa4a4cc3022f --- /dev/null +++ b/vendor/regex-automata/src/util/iter.rs @@ -0,0 +1,1022 @@ +/*! +Generic helpers for iteration of matches from a regex engine in a haystack. + +The principle type in this module is a [`Searcher`]. A `Searcher` provides +its own lower level iterator-like API in addition to methods for constructing +types that implement `Iterator`. The documentation for `Searcher` explains a +bit more about why these different APIs exist. + +Currently, this module supports iteration over any regex engine that works +with the [`HalfMatch`], [`Match`] or [`Captures`] types. +*/ + +#[cfg(feature = "alloc")] +use crate::util::captures::Captures; +use crate::util::search::{HalfMatch, Input, Match, MatchError}; + +/// A searcher for creating iterators and performing lower level iteration. +/// +/// This searcher encapsulates the logic required for finding all successive +/// non-overlapping matches in a haystack. In theory, iteration would look +/// something like this: +/// +/// 1. Setting the start position to `0`. +/// 2. Execute a regex search. If no match, end iteration. +/// 3. Report the match and set the start position to the end of the match. +/// 4. Go back to (2). +/// +/// And if this were indeed the case, it's likely that `Searcher` wouldn't +/// exist. Unfortunately, because a regex may match the empty string, the above +/// logic won't work for all possible regexes. Namely, if an empty match is +/// found, then step (3) would set the start position of the search to the +/// position it was at. Thus, iteration would never end. +/// +/// Instead, a `Searcher` knows how to detect these cases and forcefully +/// advance iteration in the case of an empty match that overlaps with a +/// previous match. +/// +/// If you know that your regex cannot match any empty string, then the simple +/// algorithm described above will work correctly. +/// +/// When possible, prefer the iterators defined on the regex engine you're +/// using. This tries to abstract over the regex engine and is thus a bit more +/// unwieldy to use. +/// +/// In particular, a `Searcher` is not itself an iterator. Instead, it provides +/// `advance` routines that permit moving the search along explicitly. It also +/// provides various routines, like [`Searcher::into_matches_iter`], that +/// accept a closure (representing how a regex engine executes a search) and +/// returns a conventional iterator. +/// +/// The lifetime parameters come from the [`Input`] type passed to +/// [`Searcher::new`]: +/// +/// * `'h` is the lifetime of the underlying haystack. +/// +/// # Searcher vs Iterator +/// +/// Why does a search type with "advance" APIs exist at all when we also have +/// iterators? Unfortunately, the reasoning behind this split is a complex +/// combination of the following things: +/// +/// 1. While many of the regex engines expose their own iterators, it is also +/// nice to expose this lower level iteration helper because it permits callers +/// to provide their own `Input` configuration. Moreover, a `Searcher` can work +/// with _any_ regex engine instead of only the ones defined in this crate. +/// This way, everyone benefits from a shared iteration implementation. +/// 2. There are many different regex engines that, while they have the same +/// match semantics, they have slightly different APIs. Iteration is just +/// complex enough to want to share code, and so we need a way of abstracting +/// over those different regex engines. While we could define a new trait that +/// describes any regex engine search API, it would wind up looking very close +/// to a closure. While there may still be reasons for the more generic trait +/// to exist, for now and for the purposes of iteration, we use a closure. +/// Closures also provide a lot of easy flexibility at the call site, in that +/// they permit the caller to borrow any kind of state they want for use during +/// each search call. +/// 3. As a result of using closures, and because closures are anonymous types +/// that cannot be named, it is difficult to encapsulate them without both +/// costs to speed and added complexity to the public API. For example, in +/// defining an iterator type like +/// [`dfa::regex::FindMatches`](crate::dfa::regex::FindMatches), +/// if we use a closure internally, it's not possible to name this type in the +/// return type of the iterator constructor. Thus, the only way around it is +/// to erase the type by boxing it and turning it into a `Box`. +/// This boxed closure is unlikely to be inlined _and_ it infects the public +/// API in subtle ways. Namely, unless you declare the closure as implementing +/// `Send` and `Sync`, then the resulting iterator type won't implement it +/// either. But there are practical issues with requiring the closure to +/// implement `Send` and `Sync` that result in other API complexities that +/// are beyond the scope of this already long exposition. +/// 4. Some regex engines expose more complex match information than just +/// "which pattern matched" and "at what offsets." For example, the PikeVM +/// exposes match spans for each capturing group that participated in the +/// match. In such cases, it can be quite beneficial to reuse the capturing +/// group allocation on subsequent searches. A proper iterator doesn't permit +/// this API due to its interface, so it's useful to have something a bit lower +/// level that permits callers to amortize allocations while also reusing a +/// shared implementation of iteration. (See the documentation for +/// [`Searcher::advance`] for an example of using the "advance" API with the +/// PikeVM.) +/// +/// What this boils down to is that there are "advance" APIs which require +/// handing a closure to it for every call, and there are also APIs to create +/// iterators from a closure. The former are useful for _implementing_ +/// iterators or when you need more flexibility, while the latter are useful +/// for conveniently writing custom iterators on-the-fly. +/// +/// # Example: iterating with captures +/// +/// Several regex engines in this crate over convenient iterator APIs over +/// [`Captures`] values. To do so, this requires allocating a new `Captures` +/// value for each iteration step. This can perhaps be more costly than you +/// might want. Instead of implementing your own iterator to avoid that +/// cost (which can be a little subtle if you want to handle empty matches +/// correctly), you can use this `Searcher` to do it for you: +/// +/// ``` +/// use regex_automata::{ +/// nfa::thompson::pikevm::PikeVM, +/// util::iter::Searcher, +/// Input, Span, +/// }; +/// +/// let re = PikeVM::new("foo(?P[0-9]+)")?; +/// let haystack = "foo1 foo12 foo123"; +/// +/// let mut caps = re.create_captures(); +/// let mut cache = re.create_cache(); +/// let mut matches = vec![]; +/// let mut searcher = Searcher::new(Input::new(haystack)); +/// while let Some(_) = searcher.advance(|input| { +/// re.search(&mut cache, input, &mut caps); +/// Ok(caps.get_match()) +/// }) { +/// // The unwrap is OK since 'numbers' matches if the pattern matches. +/// matches.push(caps.get_group_by_name("numbers").unwrap()); +/// } +/// assert_eq!(matches, vec![ +/// Span::from(3..4), +/// Span::from(8..10), +/// Span::from(14..17), +/// ]); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct Searcher<'h> { + /// The input parameters to give to each regex engine call. + /// + /// The start position of the search is mutated during iteration. + input: Input<'h>, + /// Records the end offset of the most recent match. This is necessary to + /// handle a corner case for preventing empty matches from overlapping with + /// the ending bounds of a prior match. + last_match_end: Option, +} + +impl<'h> Searcher<'h> { + /// Create a new fallible non-overlapping matches iterator. + /// + /// The given `input` provides the parameters (including the haystack), + /// while the `finder` represents a closure that calls the underlying regex + /// engine. The closure may borrow any additional state that is needed, + /// such as a prefilter scanner. + pub fn new(input: Input<'h>) -> Searcher<'h> { + Searcher { input, last_match_end: None } + } + + /// Returns the current `Input` used by this searcher. + /// + /// The `Input` returned is generally equivalent to the one given to + /// [`Searcher::new`], but its start position may be different to reflect + /// the start of the next search to be executed. + pub fn input<'s>(&'s self) -> &'s Input<'h> { + &self.input + } + + /// Return the next half match for an infallible search if one exists, and + /// advance to the next position. + /// + /// This is like `try_advance_half`, except errors are converted into + /// panics. + /// + /// # Panics + /// + /// If the given closure returns an error, then this panics. This is useful + /// when you know your underlying regex engine has been configured to not + /// return an error. + /// + /// # Example + /// + /// This example shows how to use a `Searcher` to iterate over all matches + /// when using a DFA, which only provides "half" matches. + /// + /// ``` + /// use regex_automata::{ + /// hybrid::dfa::DFA, + /// util::iter::Searcher, + /// HalfMatch, Input, + /// }; + /// + /// let re = DFA::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; + /// let mut cache = re.create_cache(); + /// + /// let input = Input::new("2010-03-14 2016-10-08 2020-10-22"); + /// let mut it = Searcher::new(input); + /// + /// let expected = Some(HalfMatch::must(0, 10)); + /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); + /// assert_eq!(expected, got); + /// + /// let expected = Some(HalfMatch::must(0, 21)); + /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); + /// assert_eq!(expected, got); + /// + /// let expected = Some(HalfMatch::must(0, 32)); + /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); + /// assert_eq!(expected, got); + /// + /// let expected = None; + /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// This correctly moves iteration forward even when an empty match occurs: + /// + /// ``` + /// use regex_automata::{ + /// hybrid::dfa::DFA, + /// util::iter::Searcher, + /// HalfMatch, Input, + /// }; + /// + /// let re = DFA::new(r"a|")?; + /// let mut cache = re.create_cache(); + /// + /// let input = Input::new("abba"); + /// let mut it = Searcher::new(input); + /// + /// let expected = Some(HalfMatch::must(0, 1)); + /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); + /// assert_eq!(expected, got); + /// + /// let expected = Some(HalfMatch::must(0, 2)); + /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); + /// assert_eq!(expected, got); + /// + /// let expected = Some(HalfMatch::must(0, 4)); + /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); + /// assert_eq!(expected, got); + /// + /// let expected = None; + /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn advance_half(&mut self, finder: F) -> Option + where + F: FnMut(&Input<'_>) -> Result, MatchError>, + { + match self.try_advance_half(finder) { + Ok(m) => m, + Err(err) => panic!( + "unexpected regex half find error: {err}\n\ + to handle find errors, use 'try' or 'search' methods", + ), + } + } + + /// Return the next match for an infallible search if one exists, and + /// advance to the next position. + /// + /// The search is advanced even in the presence of empty matches by + /// forbidding empty matches from overlapping with any other match. + /// + /// This is like `try_advance`, except errors are converted into panics. + /// + /// # Panics + /// + /// If the given closure returns an error, then this panics. This is useful + /// when you know your underlying regex engine has been configured to not + /// return an error. + /// + /// # Example + /// + /// This example shows how to use a `Searcher` to iterate over all matches + /// when using a regex based on lazy DFAs: + /// + /// ``` + /// use regex_automata::{ + /// hybrid::regex::Regex, + /// util::iter::Searcher, + /// Match, Input, + /// }; + /// + /// let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; + /// let mut cache = re.create_cache(); + /// + /// let input = Input::new("2010-03-14 2016-10-08 2020-10-22"); + /// let mut it = Searcher::new(input); + /// + /// let expected = Some(Match::must(0, 0..10)); + /// let got = it.advance(|input| re.try_search(&mut cache, input)); + /// assert_eq!(expected, got); + /// + /// let expected = Some(Match::must(0, 11..21)); + /// let got = it.advance(|input| re.try_search(&mut cache, input)); + /// assert_eq!(expected, got); + /// + /// let expected = Some(Match::must(0, 22..32)); + /// let got = it.advance(|input| re.try_search(&mut cache, input)); + /// assert_eq!(expected, got); + /// + /// let expected = None; + /// let got = it.advance(|input| re.try_search(&mut cache, input)); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// This example shows the same as above, but with the PikeVM. This example + /// is useful because it shows how to use this API even when the regex + /// engine doesn't directly return a `Match`. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::pikevm::PikeVM, + /// util::iter::Searcher, + /// Match, Input, + /// }; + /// + /// let re = PikeVM::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// + /// let input = Input::new("2010-03-14 2016-10-08 2020-10-22"); + /// let mut it = Searcher::new(input); + /// + /// let expected = Some(Match::must(0, 0..10)); + /// let got = it.advance(|input| { + /// re.search(&mut cache, input, &mut caps); + /// Ok(caps.get_match()) + /// }); + /// // Note that if we wanted to extract capturing group spans, we could + /// // do that here with 'caps'. + /// assert_eq!(expected, got); + /// + /// let expected = Some(Match::must(0, 11..21)); + /// let got = it.advance(|input| { + /// re.search(&mut cache, input, &mut caps); + /// Ok(caps.get_match()) + /// }); + /// assert_eq!(expected, got); + /// + /// let expected = Some(Match::must(0, 22..32)); + /// let got = it.advance(|input| { + /// re.search(&mut cache, input, &mut caps); + /// Ok(caps.get_match()) + /// }); + /// assert_eq!(expected, got); + /// + /// let expected = None; + /// let got = it.advance(|input| { + /// re.search(&mut cache, input, &mut caps); + /// Ok(caps.get_match()) + /// }); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn advance(&mut self, finder: F) -> Option + where + F: FnMut(&Input<'_>) -> Result, MatchError>, + { + match self.try_advance(finder) { + Ok(m) => m, + Err(err) => panic!( + "unexpected regex find error: {err}\n\ + to handle find errors, use 'try' or 'search' methods", + ), + } + } + + /// Return the next half match for a fallible search if one exists, and + /// advance to the next position. + /// + /// This is like `advance_half`, except it permits callers to handle errors + /// during iteration. + #[inline] + pub fn try_advance_half( + &mut self, + mut finder: F, + ) -> Result, MatchError> + where + F: FnMut(&Input<'_>) -> Result, MatchError>, + { + let mut m = match finder(&self.input)? { + None => return Ok(None), + Some(m) => m, + }; + if Some(m.offset()) == self.last_match_end { + m = match self.handle_overlapping_empty_half_match(m, finder)? { + None => return Ok(None), + Some(m) => m, + }; + } + self.input.set_start(m.offset()); + self.last_match_end = Some(m.offset()); + Ok(Some(m)) + } + + /// Return the next match for a fallible search if one exists, and advance + /// to the next position. + /// + /// This is like `advance`, except it permits callers to handle errors + /// during iteration. + #[inline] + pub fn try_advance( + &mut self, + mut finder: F, + ) -> Result, MatchError> + where + F: FnMut(&Input<'_>) -> Result, MatchError>, + { + let mut m = match finder(&self.input)? { + None => return Ok(None), + Some(m) => m, + }; + if m.is_empty() && Some(m.end()) == self.last_match_end { + m = match self.handle_overlapping_empty_match(m, finder)? { + None => return Ok(None), + Some(m) => m, + }; + } + self.input.set_start(m.end()); + self.last_match_end = Some(m.end()); + Ok(Some(m)) + } + + /// Given a closure that executes a single search, return an iterator over + /// all successive non-overlapping half matches. + /// + /// The iterator returned yields result values. If the underlying regex + /// engine is configured to never return an error, consider calling + /// [`TryHalfMatchesIter::infallible`] to convert errors into panics. + /// + /// # Example + /// + /// This example shows how to use a `Searcher` to create a proper + /// iterator over half matches. + /// + /// ``` + /// use regex_automata::{ + /// hybrid::dfa::DFA, + /// util::iter::Searcher, + /// HalfMatch, Input, + /// }; + /// + /// let re = DFA::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; + /// let mut cache = re.create_cache(); + /// + /// let input = Input::new("2010-03-14 2016-10-08 2020-10-22"); + /// let mut it = Searcher::new(input).into_half_matches_iter(|input| { + /// re.try_search_fwd(&mut cache, input) + /// }); + /// + /// let expected = Some(Ok(HalfMatch::must(0, 10))); + /// assert_eq!(expected, it.next()); + /// + /// let expected = Some(Ok(HalfMatch::must(0, 21))); + /// assert_eq!(expected, it.next()); + /// + /// let expected = Some(Ok(HalfMatch::must(0, 32))); + /// assert_eq!(expected, it.next()); + /// + /// let expected = None; + /// assert_eq!(expected, it.next()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn into_half_matches_iter( + self, + finder: F, + ) -> TryHalfMatchesIter<'h, F> + where + F: FnMut(&Input<'_>) -> Result, MatchError>, + { + TryHalfMatchesIter { it: self, finder } + } + + /// Given a closure that executes a single search, return an iterator over + /// all successive non-overlapping matches. + /// + /// The iterator returned yields result values. If the underlying regex + /// engine is configured to never return an error, consider calling + /// [`TryMatchesIter::infallible`] to convert errors into panics. + /// + /// # Example + /// + /// This example shows how to use a `Searcher` to create a proper + /// iterator over matches. + /// + /// ``` + /// use regex_automata::{ + /// hybrid::regex::Regex, + /// util::iter::Searcher, + /// Match, Input, + /// }; + /// + /// let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; + /// let mut cache = re.create_cache(); + /// + /// let input = Input::new("2010-03-14 2016-10-08 2020-10-22"); + /// let mut it = Searcher::new(input).into_matches_iter(|input| { + /// re.try_search(&mut cache, input) + /// }); + /// + /// let expected = Some(Ok(Match::must(0, 0..10))); + /// assert_eq!(expected, it.next()); + /// + /// let expected = Some(Ok(Match::must(0, 11..21))); + /// assert_eq!(expected, it.next()); + /// + /// let expected = Some(Ok(Match::must(0, 22..32))); + /// assert_eq!(expected, it.next()); + /// + /// let expected = None; + /// assert_eq!(expected, it.next()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn into_matches_iter(self, finder: F) -> TryMatchesIter<'h, F> + where + F: FnMut(&Input<'_>) -> Result, MatchError>, + { + TryMatchesIter { it: self, finder } + } + + /// Given a closure that executes a single search, return an iterator over + /// all successive non-overlapping `Captures` values. + /// + /// The iterator returned yields result values. If the underlying regex + /// engine is configured to never return an error, consider calling + /// [`TryCapturesIter::infallible`] to convert errors into panics. + /// + /// Unlike the other iterator constructors, this accepts an initial + /// `Captures` value. This `Captures` value is reused for each search, and + /// the iterator implementation clones it before returning it. The caller + /// must provide this value because the iterator is purposely ignorant + /// of the underlying regex engine and thus doesn't know how to create + /// one itself. More to the point, a `Captures` value itself has a few + /// different constructors, which change which kind of information is + /// available to query in exchange for search performance. + /// + /// # Example + /// + /// This example shows how to use a `Searcher` to create a proper iterator + /// over `Captures` values, which provides access to all capturing group + /// spans for each match. + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::pikevm::PikeVM, + /// util::iter::Searcher, + /// Input, + /// }; + /// + /// let re = PikeVM::new( + /// r"(?P[0-9]{4})-(?P[0-9]{2})-(?P[0-9]{2})", + /// )?; + /// let (mut cache, caps) = (re.create_cache(), re.create_captures()); + /// + /// let haystack = "2010-03-14 2016-10-08 2020-10-22"; + /// let input = Input::new(haystack); + /// let mut it = Searcher::new(input) + /// .into_captures_iter(caps, |input, caps| { + /// re.search(&mut cache, input, caps); + /// Ok(()) + /// }); + /// + /// let got = it.next().expect("first date")?; + /// let year = got.get_group_by_name("y").expect("must match"); + /// assert_eq!("2010", &haystack[year]); + /// + /// let got = it.next().expect("second date")?; + /// let month = got.get_group_by_name("m").expect("must match"); + /// assert_eq!("10", &haystack[month]); + /// + /// let got = it.next().expect("third date")?; + /// let day = got.get_group_by_name("d").expect("must match"); + /// assert_eq!("22", &haystack[day]); + /// + /// assert!(it.next().is_none()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "alloc")] + #[inline] + pub fn into_captures_iter( + self, + caps: Captures, + finder: F, + ) -> TryCapturesIter<'h, F> + where + F: FnMut(&Input<'_>, &mut Captures) -> Result<(), MatchError>, + { + TryCapturesIter { it: self, caps, finder } + } + + /// Handles the special case of a match that begins where the previous + /// match ended. Without this special handling, it'd be possible to get + /// stuck where an empty match never results in forward progress. This + /// also makes it more consistent with how presiding general purpose regex + /// engines work. + #[cold] + #[inline(never)] + fn handle_overlapping_empty_half_match( + &mut self, + _: HalfMatch, + mut finder: F, + ) -> Result, MatchError> + where + F: FnMut(&Input<'_>) -> Result, MatchError>, + { + // Since we are only here when 'm.offset()' matches the offset of the + // last match, it follows that this must have been an empty match. + // Since we both need to make progress *and* prevent overlapping + // matches, we discard this match and advance the search by 1. + // + // Note that this may start a search in the middle of a codepoint. The + // regex engines themselves are expected to deal with that and not + // report any matches within a codepoint if they are configured in + // UTF-8 mode. + self.input.set_start(self.input.start().checked_add(1).unwrap()); + finder(&self.input) + } + + /// Handles the special case of an empty match by ensuring that 1) the + /// iterator always advances and 2) empty matches never overlap with other + /// matches. + /// + /// (1) is necessary because we principally make progress by setting the + /// starting location of the next search to the ending location of the last + /// match. But if a match is empty, then this results in a search that does + /// not advance and thus does not terminate. + /// + /// (2) is not strictly necessary, but makes intuitive sense and matches + /// the presiding behavior of most general purpose regex engines. The + /// "intuitive sense" here is that we want to report NON-overlapping + /// matches. So for example, given the regex 'a|(?:)' against the haystack + /// 'a', without the special handling, you'd get the matches [0, 1) and [1, + /// 1), where the latter overlaps with the end bounds of the former. + /// + /// Note that we mark this cold and forcefully prevent inlining because + /// handling empty matches like this is extremely rare and does require + /// quite a bit of code, comparatively. Keeping this code out of the main + /// iterator function keeps it smaller and more amenable to inlining + /// itself. + #[cold] + #[inline(never)] + fn handle_overlapping_empty_match( + &mut self, + m: Match, + mut finder: F, + ) -> Result, MatchError> + where + F: FnMut(&Input<'_>) -> Result, MatchError>, + { + assert!(m.is_empty()); + self.input.set_start(self.input.start().checked_add(1).unwrap()); + finder(&self.input) + } +} + +/// An iterator over all non-overlapping half matches for a fallible search. +/// +/// The iterator yields a `Result` value until no more +/// matches could be found. +/// +/// The type parameters are as follows: +/// +/// * `F` represents the type of a closure that executes the search. +/// +/// The lifetime parameters come from the [`Input`] type: +/// +/// * `'h` is the lifetime of the underlying haystack. +/// +/// When possible, prefer the iterators defined on the regex engine you're +/// using. This tries to abstract over the regex engine and is thus a bit more +/// unwieldy to use. +/// +/// This iterator is created by [`Searcher::into_half_matches_iter`]. +pub struct TryHalfMatchesIter<'h, F> { + it: Searcher<'h>, + finder: F, +} + +impl<'h, F> TryHalfMatchesIter<'h, F> { + /// Return an infallible version of this iterator. + /// + /// Any item yielded that corresponds to an error results in a panic. This + /// is useful if your underlying regex engine is configured in a way that + /// it is guaranteed to never return an error. + pub fn infallible(self) -> HalfMatchesIter<'h, F> { + HalfMatchesIter(self) + } + + /// Returns the current `Input` used by this iterator. + /// + /// The `Input` returned is generally equivalent to the one used to + /// construct this iterator, but its start position may be different to + /// reflect the start of the next search to be executed. + pub fn input<'i>(&'i self) -> &'i Input<'h> { + self.it.input() + } +} + +impl<'h, F> Iterator for TryHalfMatchesIter<'h, F> +where + F: FnMut(&Input<'_>) -> Result, MatchError>, +{ + type Item = Result; + + #[inline] + fn next(&mut self) -> Option> { + self.it.try_advance_half(&mut self.finder).transpose() + } +} + +impl<'h, F> core::fmt::Debug for TryHalfMatchesIter<'h, F> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("TryHalfMatchesIter") + .field("it", &self.it) + .field("finder", &"") + .finish() + } +} + +/// An iterator over all non-overlapping half matches for an infallible search. +/// +/// The iterator yields a [`HalfMatch`] value until no more matches could be +/// found. +/// +/// The type parameters are as follows: +/// +/// * `F` represents the type of a closure that executes the search. +/// +/// The lifetime parameters come from the [`Input`] type: +/// +/// * `'h` is the lifetime of the underlying haystack. +/// +/// When possible, prefer the iterators defined on the regex engine you're +/// using. This tries to abstract over the regex engine and is thus a bit more +/// unwieldy to use. +/// +/// This iterator is created by [`Searcher::into_half_matches_iter`] and +/// then calling [`TryHalfMatchesIter::infallible`]. +#[derive(Debug)] +pub struct HalfMatchesIter<'h, F>(TryHalfMatchesIter<'h, F>); + +impl<'h, F> HalfMatchesIter<'h, F> { + /// Returns the current `Input` used by this iterator. + /// + /// The `Input` returned is generally equivalent to the one used to + /// construct this iterator, but its start position may be different to + /// reflect the start of the next search to be executed. + pub fn input<'i>(&'i self) -> &'i Input<'h> { + self.0.it.input() + } +} + +impl<'h, F> Iterator for HalfMatchesIter<'h, F> +where + F: FnMut(&Input<'_>) -> Result, MatchError>, +{ + type Item = HalfMatch; + + #[inline] + fn next(&mut self) -> Option { + match self.0.next()? { + Ok(m) => Some(m), + Err(err) => panic!( + "unexpected regex half find error: {err}\n\ + to handle find errors, use 'try' or 'search' methods", + ), + } + } +} + +/// An iterator over all non-overlapping matches for a fallible search. +/// +/// The iterator yields a `Result` value until no more +/// matches could be found. +/// +/// The type parameters are as follows: +/// +/// * `F` represents the type of a closure that executes the search. +/// +/// The lifetime parameters come from the [`Input`] type: +/// +/// * `'h` is the lifetime of the underlying haystack. +/// +/// When possible, prefer the iterators defined on the regex engine you're +/// using. This tries to abstract over the regex engine and is thus a bit more +/// unwieldy to use. +/// +/// This iterator is created by [`Searcher::into_matches_iter`]. +pub struct TryMatchesIter<'h, F> { + it: Searcher<'h>, + finder: F, +} + +impl<'h, F> TryMatchesIter<'h, F> { + /// Return an infallible version of this iterator. + /// + /// Any item yielded that corresponds to an error results in a panic. This + /// is useful if your underlying regex engine is configured in a way that + /// it is guaranteed to never return an error. + pub fn infallible(self) -> MatchesIter<'h, F> { + MatchesIter(self) + } + + /// Returns the current `Input` used by this iterator. + /// + /// The `Input` returned is generally equivalent to the one used to + /// construct this iterator, but its start position may be different to + /// reflect the start of the next search to be executed. + pub fn input<'i>(&'i self) -> &'i Input<'h> { + self.it.input() + } +} + +impl<'h, F> Iterator for TryMatchesIter<'h, F> +where + F: FnMut(&Input<'_>) -> Result, MatchError>, +{ + type Item = Result; + + #[inline] + fn next(&mut self) -> Option> { + self.it.try_advance(&mut self.finder).transpose() + } +} + +impl<'h, F> core::fmt::Debug for TryMatchesIter<'h, F> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("TryMatchesIter") + .field("it", &self.it) + .field("finder", &"") + .finish() + } +} + +/// An iterator over all non-overlapping matches for an infallible search. +/// +/// The iterator yields a [`Match`] value until no more matches could be found. +/// +/// The type parameters are as follows: +/// +/// * `F` represents the type of a closure that executes the search. +/// +/// The lifetime parameters come from the [`Input`] type: +/// +/// * `'h` is the lifetime of the underlying haystack. +/// +/// When possible, prefer the iterators defined on the regex engine you're +/// using. This tries to abstract over the regex engine and is thus a bit more +/// unwieldy to use. +/// +/// This iterator is created by [`Searcher::into_matches_iter`] and +/// then calling [`TryMatchesIter::infallible`]. +#[derive(Debug)] +pub struct MatchesIter<'h, F>(TryMatchesIter<'h, F>); + +impl<'h, F> MatchesIter<'h, F> { + /// Returns the current `Input` used by this iterator. + /// + /// The `Input` returned is generally equivalent to the one used to + /// construct this iterator, but its start position may be different to + /// reflect the start of the next search to be executed. + pub fn input<'i>(&'i self) -> &'i Input<'h> { + self.0.it.input() + } +} + +impl<'h, F> Iterator for MatchesIter<'h, F> +where + F: FnMut(&Input<'_>) -> Result, MatchError>, +{ + type Item = Match; + + #[inline] + fn next(&mut self) -> Option { + match self.0.next()? { + Ok(m) => Some(m), + Err(err) => panic!( + "unexpected regex find error: {err}\n\ + to handle find errors, use 'try' or 'search' methods", + ), + } + } +} + +/// An iterator over all non-overlapping captures for a fallible search. +/// +/// The iterator yields a `Result` value until no more +/// matches could be found. +/// +/// The type parameters are as follows: +/// +/// * `F` represents the type of a closure that executes the search. +/// +/// The lifetime parameters come from the [`Input`] type: +/// +/// * `'h` is the lifetime of the underlying haystack. +/// +/// When possible, prefer the iterators defined on the regex engine you're +/// using. This tries to abstract over the regex engine and is thus a bit more +/// unwieldy to use. +/// +/// This iterator is created by [`Searcher::into_captures_iter`]. +#[cfg(feature = "alloc")] +pub struct TryCapturesIter<'h, F> { + it: Searcher<'h>, + caps: Captures, + finder: F, +} + +#[cfg(feature = "alloc")] +impl<'h, F> TryCapturesIter<'h, F> { + /// Return an infallible version of this iterator. + /// + /// Any item yielded that corresponds to an error results in a panic. This + /// is useful if your underlying regex engine is configured in a way that + /// it is guaranteed to never return an error. + pub fn infallible(self) -> CapturesIter<'h, F> { + CapturesIter(self) + } +} + +#[cfg(feature = "alloc")] +impl<'h, F> Iterator for TryCapturesIter<'h, F> +where + F: FnMut(&Input<'_>, &mut Captures) -> Result<(), MatchError>, +{ + type Item = Result; + + #[inline] + fn next(&mut self) -> Option> { + let TryCapturesIter { ref mut it, ref mut caps, ref mut finder } = + *self; + let result = it + .try_advance(|input| { + (finder)(input, caps)?; + Ok(caps.get_match()) + }) + .transpose()?; + match result { + Ok(_) => Some(Ok(caps.clone())), + Err(err) => Some(Err(err)), + } + } +} + +#[cfg(feature = "alloc")] +impl<'h, F> core::fmt::Debug for TryCapturesIter<'h, F> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("TryCapturesIter") + .field("it", &self.it) + .field("caps", &self.caps) + .field("finder", &"") + .finish() + } +} + +/// An iterator over all non-overlapping captures for an infallible search. +/// +/// The iterator yields a [`Captures`] value until no more matches could be +/// found. +/// +/// The type parameters are as follows: +/// +/// * `F` represents the type of a closure that executes the search. +/// +/// The lifetime parameters come from the [`Input`] type: +/// +/// * `'h` is the lifetime of the underlying haystack. +/// +/// When possible, prefer the iterators defined on the regex engine you're +/// using. This tries to abstract over the regex engine and is thus a bit more +/// unwieldy to use. +/// +/// This iterator is created by [`Searcher::into_captures_iter`] and then +/// calling [`TryCapturesIter::infallible`]. +#[cfg(feature = "alloc")] +#[derive(Debug)] +pub struct CapturesIter<'h, F>(TryCapturesIter<'h, F>); + +#[cfg(feature = "alloc")] +impl<'h, F> Iterator for CapturesIter<'h, F> +where + F: FnMut(&Input<'_>, &mut Captures) -> Result<(), MatchError>, +{ + type Item = Captures; + + #[inline] + fn next(&mut self) -> Option { + match self.0.next()? { + Ok(m) => Some(m), + Err(err) => panic!( + "unexpected regex captures error: {err}\n\ + to handle find errors, use 'try' or 'search' methods", + ), + } + } +} diff --git a/vendor/regex-automata/src/util/lazy.rs b/vendor/regex-automata/src/util/lazy.rs new file mode 100644 index 00000000000000..c5903381ed59da --- /dev/null +++ b/vendor/regex-automata/src/util/lazy.rs @@ -0,0 +1,461 @@ +/*! +A lazily initialized value for safe sharing between threads. + +The principal type in this module is `Lazy`, which makes it easy to construct +values that are shared safely across multiple threads simultaneously. +*/ + +use core::fmt; + +/// A lazily initialized value that implements `Deref` for `T`. +/// +/// A `Lazy` takes an initialization function and permits callers from any +/// thread to access the result of that initialization function in a safe +/// manner. In effect, this permits one-time initialization of global resources +/// in a (possibly) multi-threaded program. +/// +/// This type and its functionality are available even when neither the `alloc` +/// nor the `std` features are enabled. In exchange, a `Lazy` does **not** +/// guarantee that the given `create` function is called at most once. It +/// might be called multiple times. Moreover, a call to `Lazy::get` (either +/// explicitly or implicitly via `Lazy`'s `Deref` impl) may block until a `T` +/// is available. +/// +/// This is very similar to `lazy_static` or `once_cell`, except it doesn't +/// guarantee that the initialization function will be run once and it works +/// in no-alloc no-std environments. With that said, if you need stronger +/// guarantees or a more flexible API, then it is recommended to use either +/// `lazy_static` or `once_cell`. +/// +/// # Warning: may use a spin lock +/// +/// When this crate is compiled _without_ the `alloc` feature, then this type +/// may used a spin lock internally. This can have subtle effects that may +/// be undesirable. See [Spinlocks Considered Harmful][spinharm] for a more +/// thorough treatment of this topic. +/// +/// [spinharm]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html +/// +/// # Example +/// +/// This type is useful for creating regexes once, and then using them from +/// multiple threads simultaneously without worrying about synchronization. +/// +/// ``` +/// use regex_automata::{dfa::regex::Regex, util::lazy::Lazy, Match}; +/// +/// static RE: Lazy = Lazy::new(|| Regex::new("foo[0-9]+bar").unwrap()); +/// +/// let expected = Some(Match::must(0, 3..14)); +/// assert_eq!(expected, RE.find(b"zzzfoo12345barzzz")); +/// ``` +pub struct Lazy T>(lazy::Lazy); + +impl Lazy { + /// Create a new `Lazy` value that is initialized via the given function. + /// + /// The `T` type is automatically inferred from the return type of the + /// `create` function given. + pub const fn new(create: F) -> Lazy { + Lazy(lazy::Lazy::new(create)) + } +} + +impl T> Lazy { + /// Return a reference to the lazily initialized value. + /// + /// This routine may block if another thread is initializing a `T`. + /// + /// Note that given a `x` which has type `Lazy`, this must be called via + /// `Lazy::get(x)` and not `x.get()`. This routine is defined this way + /// because `Lazy` impls `Deref` with a target of `T`. + /// + /// # Panics + /// + /// This panics if the `create` function inside this lazy value panics. + /// If the panic occurred in another thread, then this routine _may_ also + /// panic (but is not guaranteed to do so). + pub fn get(this: &Lazy) -> &T { + this.0.get() + } +} + +impl T> core::ops::Deref for Lazy { + type Target = T; + + fn deref(&self) -> &T { + Lazy::get(self) + } +} + +impl T> fmt::Debug for Lazy { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +#[cfg(feature = "alloc")] +mod lazy { + use core::{ + fmt, + marker::PhantomData, + sync::atomic::{AtomicPtr, Ordering}, + }; + + use alloc::boxed::Box; + + /// A non-std lazy initialized value. + /// + /// This might run the initialization function more than once, but will + /// never block. + /// + /// I wish I could get these semantics into the non-alloc non-std Lazy + /// type below, but I'm not sure how to do it. If you can do an alloc, + /// then the implementation becomes very simple if you don't care about + /// redundant work precisely because a pointer can be atomically swapped. + /// + /// Perhaps making this approach work in the non-alloc non-std case + /// requires asking the caller for a pointer? It would make the API less + /// convenient I think. + pub(super) struct Lazy { + data: AtomicPtr, + create: F, + // This indicates to the compiler that this type can drop T. It's not + // totally clear how the absence of this marker could lead to trouble, + // but putting here doesn't have any downsides so we hedge until someone + // can from the Unsafe Working Group can tell us definitively that we + // don't need it. + // + // See: https://github.com/BurntSushi/regex-automata/issues/30 + owned: PhantomData>, + } + + // SAFETY: So long as T and &T (and F and &F) can themselves be safely + // shared among threads, so to can a Lazy. Namely, the Lazy API only + // permits accessing a &T and initialization is free of data races. So if T + // is thread safe, then so to is Lazy. + // + // We specifically require that T: Send in order for Lazy to be Sync. + // Without that requirement, it's possible to send a T from one thread to + // another via Lazy's destructor. + // + // It's not clear whether we need F: Send+Sync for Lazy to be Sync. But + // we're conservative for now and keep both. + unsafe impl Sync for Lazy {} + + impl Lazy { + /// Create a new alloc but non-std lazy value that is racily + /// initialized. That is, the 'create' function may be called more than + /// once. + pub(super) const fn new(create: F) -> Lazy { + Lazy { + data: AtomicPtr::new(core::ptr::null_mut()), + create, + owned: PhantomData, + } + } + } + + impl T> Lazy { + /// Get the underlying lazy value. If it hasn't been initialized + /// yet, then always attempt to initialize it (even if some other + /// thread is initializing it) and atomically attach it to this lazy + /// value before returning it. + pub(super) fn get(&self) -> &T { + if let Some(data) = self.poll() { + return data; + } + let data = (self.create)(); + let mut ptr = Box::into_raw(Box::new(data)); + // We attempt to stuff our initialized value into our atomic + // pointer. Upon success, we don't need to do anything. But if + // someone else beat us to the punch, then we need to make sure + // our newly created value is dropped. + let result = self.data.compare_exchange( + core::ptr::null_mut(), + ptr, + Ordering::AcqRel, + Ordering::Acquire, + ); + if let Err(old) = result { + // SAFETY: We created 'ptr' via Box::into_raw above, so turning + // it back into a Box via from_raw is safe. + drop(unsafe { Box::from_raw(ptr) }); + ptr = old; + } + // SAFETY: We just set the pointer above to a non-null value, even + // in the error case, and set it to a fully initialized value + // returned by 'create'. + unsafe { &*ptr } + } + + /// If this lazy value has been initialized successfully, then return + /// that value. Otherwise return None immediately. This never attempts + /// to run initialization itself. + fn poll(&self) -> Option<&T> { + let ptr = self.data.load(Ordering::Acquire); + if ptr.is_null() { + return None; + } + // SAFETY: We just checked that the pointer is not null. Since it's + // not null, it must have been fully initialized by 'get' at some + // point. + Some(unsafe { &*ptr }) + } + } + + impl T> fmt::Debug for Lazy { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Lazy").field("data", &self.poll()).finish() + } + } + + impl Drop for Lazy { + fn drop(&mut self) { + let ptr = *self.data.get_mut(); + if !ptr.is_null() { + // SAFETY: We just checked that 'ptr' is not null. And since + // we have exclusive access, there are no races to worry about. + drop(unsafe { Box::from_raw(ptr) }); + } + } + } +} + +#[cfg(not(feature = "alloc"))] +mod lazy { + use core::{ + cell::Cell, + fmt, + mem::MaybeUninit, + panic::{RefUnwindSafe, UnwindSafe}, + sync::atomic::{AtomicU8, Ordering}, + }; + + /// Our 'Lazy' value can be in one of three states: + /// + /// * INIT is where it starts, and also ends up back here if the + /// 'create' routine panics. + /// * BUSY is where it sits while initialization is running in exactly + /// one thread. + /// * DONE is where it sits after 'create' has completed and 'data' has + /// been fully initialized. + const LAZY_STATE_INIT: u8 = 0; + const LAZY_STATE_BUSY: u8 = 1; + const LAZY_STATE_DONE: u8 = 2; + + /// A non-alloc non-std lazy initialized value. + /// + /// This guarantees initialization only happens once, but uses a spinlock + /// to block in the case of simultaneous access. Blocking occurs so that + /// one thread waits while another thread initializes the value. + /// + /// I would much rather have the semantics of the 'alloc' Lazy type above. + /// Namely, that we might run the initialization function more than once, + /// but we never otherwise block. However, I don't know how to do that in + /// a non-alloc non-std context. + pub(super) struct Lazy { + state: AtomicU8, + create: Cell>, + data: Cell>, + } + + // SAFETY: So long as T and &T (and F and &F) can themselves be safely + // shared among threads, so to can a Lazy. Namely, the Lazy API only + // permits accessing a &T and initialization is free of data races. So if T + // is thread safe, then so to is Lazy. + unsafe impl Sync for Lazy {} + // A reference to a Lazy is unwind safe because we specifically take + // precautions to poison all accesses to a Lazy if the caller-provided + // 'create' function panics. + impl RefUnwindSafe + for Lazy + { + } + + impl Lazy { + /// Create a new non-alloc non-std lazy value that is initialized + /// exactly once on first use using the given function. + pub(super) const fn new(create: F) -> Lazy { + Lazy { + state: AtomicU8::new(LAZY_STATE_INIT), + create: Cell::new(Some(create)), + data: Cell::new(MaybeUninit::uninit()), + } + } + } + + impl T> Lazy { + /// Get the underlying lazy value. If it isn't been initialized + /// yet, then either initialize it or block until some other thread + /// initializes it. If the 'create' function given to Lazy::new panics + /// (even in another thread), then this panics too. + pub(super) fn get(&self) -> &T { + // This is effectively a spinlock. We loop until we enter a DONE + // state, and if possible, initialize it ourselves. The only way + // we exit the loop is if 'create' panics, we initialize 'data' or + // some other thread initializes 'data'. + // + // Yes, I have read spinlocks considered harmful[1]. And that + // article is why this spinlock is only active when 'alloc' isn't + // enabled. I did this because I don't think there is really + // another choice without 'alloc', other than not providing this at + // all. But I think that's a big bummer. + // + // [1]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html + while self.state.load(Ordering::Acquire) != LAZY_STATE_DONE { + // Check if we're the first ones to get here. If so, we'll be + // the ones who initialize. + let result = self.state.compare_exchange( + LAZY_STATE_INIT, + LAZY_STATE_BUSY, + Ordering::AcqRel, + Ordering::Acquire, + ); + // This means we saw the INIT state and nobody else can. So we + // must take responsibility for initializing. And by virtue of + // observing INIT, we have also told anyone else trying to + // get here that we are BUSY. If someone else sees BUSY, then + // they will spin until we finish initialization. + if let Ok(_) = result { + // Since we are guaranteed to be the only ones here, we + // know that 'create' is there... Unless someone else got + // here before us and 'create' panicked. In which case, + // 'self.create' is now 'None' and we forward the panic + // to the caller. (i.e., We implement poisoning.) + // + // SAFETY: Our use of 'self.state' guarantees that we are + // the only thread executing this line, and thus there are + // no races. + let create = unsafe { + (*self.create.as_ptr()).take().expect( + "Lazy's create function panicked, \ + preventing initialization, + poisoning current thread", + ) + }; + let guard = Guard { state: &self.state }; + // SAFETY: Our use of 'self.state' guarantees that we are + // the only thread executing this line, and thus there are + // no races. + unsafe { + (*self.data.as_ptr()).as_mut_ptr().write(create()); + } + // All is well. 'self.create' ran successfully, so we + // forget the guard. + core::mem::forget(guard); + // Everything is initialized, so we can declare success. + self.state.store(LAZY_STATE_DONE, Ordering::Release); + break; + } + core::hint::spin_loop(); + } + // We only get here if data is fully initialized, and thus poll + // will always return something. + self.poll().unwrap() + } + + /// If this lazy value has been initialized successfully, then return + /// that value. Otherwise return None immediately. This never blocks. + fn poll(&self) -> Option<&T> { + if self.state.load(Ordering::Acquire) == LAZY_STATE_DONE { + // SAFETY: The DONE state only occurs when data has been fully + // initialized. + Some(unsafe { &*(*self.data.as_ptr()).as_ptr() }) + } else { + None + } + } + } + + impl T> fmt::Debug for Lazy { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Lazy") + .field("state", &self.state.load(Ordering::Acquire)) + .field("create", &"") + .field("data", &self.poll()) + .finish() + } + } + + impl Drop for Lazy { + fn drop(&mut self) { + if *self.state.get_mut() == LAZY_STATE_DONE { + // SAFETY: state is DONE if and only if data has been fully + // initialized. At which point, it is safe to drop. + unsafe { + self.data.get_mut().assume_init_drop(); + } + } + } + } + + /// A guard that will reset a Lazy's state back to INIT when dropped. The + /// idea here is to 'forget' this guard on success. On failure (when a + /// panic occurs), the Drop impl runs and causes all in-progress and future + /// 'get' calls to panic. Without this guard, all in-progress and future + /// 'get' calls would spin forever. Crashing is much better than getting + /// stuck in an infinite loop. + struct Guard<'a> { + state: &'a AtomicU8, + } + + impl<'a> Drop for Guard<'a> { + fn drop(&mut self) { + // We force ourselves back into an INIT state. This will in turn + // cause any future 'get' calls to attempt calling 'self.create' + // again which will in turn panic because 'self.create' will now + // be 'None'. + self.state.store(LAZY_STATE_INIT, Ordering::Release); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn assert_send() {} + fn assert_sync() {} + fn assert_unwind() {} + fn assert_refunwind() {} + + #[test] + fn oibits() { + assert_send::>(); + assert_sync::>(); + assert_unwind::>(); + assert_refunwind::>(); + } + + // This is a regression test because we used to rely on the inferred Sync + // impl for the Lazy type defined above (for 'alloc' mode). In the + // inferred impl, it only requires that T: Sync for Lazy: Sync. But + // if we have that, we can actually make use of the fact that Lazy drops + // T to create a value on one thread and drop it on another. This *should* + // require T: Send, but our missing bounds before let it sneak by. + // + // Basically, this test should not compile, so we... comment it out. We + // don't have a great way of testing compile-fail tests right now. + // + // See: https://github.com/BurntSushi/regex-automata/issues/30 + /* + #[test] + fn sync_not_send() { + #[allow(dead_code)] + fn inner() { + let lazy = Lazy::new(move || T::default()); + std::thread::scope(|scope| { + scope.spawn(|| { + Lazy::get(&lazy); // We create T in this thread + }); + }); + // And drop in this thread. + drop(lazy); + // So we have send a !Send type over threads. (with some more + // legwork, its possible to even sneak the value out of drop + // through thread local) + } + } + */ +} diff --git a/vendor/regex-automata/src/util/look.rs b/vendor/regex-automata/src/util/look.rs new file mode 100644 index 00000000000000..20bb8cc37149e5 --- /dev/null +++ b/vendor/regex-automata/src/util/look.rs @@ -0,0 +1,2547 @@ +/*! +Types and routines for working with look-around assertions. + +This module principally defines two types: + +* [`Look`] enumerates all of the assertions supported by this crate. +* [`LookSet`] provides a way to efficiently store a set of [`Look`] values. +* [`LookMatcher`] provides routines for checking whether a `Look` or a +`LookSet` matches at a particular position in a haystack. +*/ + +// LAMENTATION: Sadly, a lot of the API of `Look` and `LookSet` were basically +// copied verbatim from the regex-syntax crate. I would have no problems using +// the regex-syntax types and defining the matching routines (only found +// in this crate) as free functions, except the `Look` and `LookSet` types +// are used in lots of places. Including in places we expect to work when +// regex-syntax is *not* enabled, such as in the definition of the NFA itself. +// +// Thankfully the code we copy is pretty simple and there isn't much of it. +// Otherwise, the rest of this module deals with *matching* the assertions, +// which is not something that regex-syntax handles. + +use crate::util::{escape::DebugByte, utf8}; + +/// A look-around assertion. +/// +/// An assertion matches at a position between characters in a haystack. +/// Namely, it does not actually "consume" any input as most parts of a regular +/// expression do. Assertions are a way of stating that some property must be +/// true at a particular point during matching. +/// +/// For example, `(?m)^[a-z]+$` is a pattern that: +/// +/// * Scans the haystack for a position at which `(?m:^)` is satisfied. That +/// occurs at either the beginning of the haystack, or immediately following +/// a `\n` character. +/// * Looks for one or more occurrences of `[a-z]`. +/// * Once `[a-z]+` has matched as much as it can, an overall match is only +/// reported when `[a-z]+` stops just before a `\n`. +/// +/// So in this case, `abc` and `\nabc\n` match, but `\nabc1\n` does not. +/// +/// Assertions are also called "look-around," "look-behind" and "look-ahead." +/// Specifically, some assertions are look-behind (like `^`), other assertions +/// are look-ahead (like `$`) and yet other assertions are both look-ahead and +/// look-behind (like `\b`). +/// +/// # Assertions in an NFA +/// +/// An assertion in a [`thompson::NFA`](crate::nfa::thompson::NFA) can be +/// thought of as a conditional epsilon transition. That is, a matching engine +/// like the [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) only permits +/// moving through conditional epsilon transitions when their condition +/// is satisfied at whatever position the `PikeVM` is currently at in the +/// haystack. +/// +/// How assertions are handled in a `DFA` is trickier, since a DFA does not +/// have epsilon transitions at all. In this case, they are compiled into the +/// automaton itself, at the expense of more states than what would be required +/// without an assertion. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Look { + /// Match the beginning of text. Specifically, this matches at the starting + /// position of the input. + Start = 1 << 0, + /// Match the end of text. Specifically, this matches at the ending + /// position of the input. + End = 1 << 1, + /// Match the beginning of a line or the beginning of text. Specifically, + /// this matches at the starting position of the input, or at the position + /// immediately following a `\n` character. + StartLF = 1 << 2, + /// Match the end of a line or the end of text. Specifically, this matches + /// at the end position of the input, or at the position immediately + /// preceding a `\n` character. + EndLF = 1 << 3, + /// Match the beginning of a line or the beginning of text. Specifically, + /// this matches at the starting position of the input, or at the position + /// immediately following either a `\r` or `\n` character, but never after + /// a `\r` when a `\n` follows. + StartCRLF = 1 << 4, + /// Match the end of a line or the end of text. Specifically, this matches + /// at the end position of the input, or at the position immediately + /// preceding a `\r` or `\n` character, but never before a `\n` when a `\r` + /// precedes it. + EndCRLF = 1 << 5, + /// Match an ASCII-only word boundary. That is, this matches a position + /// where the left adjacent character and right adjacent character + /// correspond to a word and non-word or a non-word and word character. + WordAscii = 1 << 6, + /// Match an ASCII-only negation of a word boundary. + WordAsciiNegate = 1 << 7, + /// Match a Unicode-aware word boundary. That is, this matches a position + /// where the left adjacent character and right adjacent character + /// correspond to a word and non-word or a non-word and word character. + WordUnicode = 1 << 8, + /// Match a Unicode-aware negation of a word boundary. + WordUnicodeNegate = 1 << 9, + /// Match the start of an ASCII-only word boundary. That is, this matches a + /// position at either the beginning of the haystack or where the previous + /// character is not a word character and the following character is a word + /// character. + WordStartAscii = 1 << 10, + /// Match the end of an ASCII-only word boundary. That is, this matches + /// a position at either the end of the haystack or where the previous + /// character is a word character and the following character is not a word + /// character. + WordEndAscii = 1 << 11, + /// Match the start of a Unicode word boundary. That is, this matches a + /// position at either the beginning of the haystack or where the previous + /// character is not a word character and the following character is a word + /// character. + WordStartUnicode = 1 << 12, + /// Match the end of a Unicode word boundary. That is, this matches a + /// position at either the end of the haystack or where the previous + /// character is a word character and the following character is not a word + /// character. + WordEndUnicode = 1 << 13, + /// Match the start half of an ASCII-only word boundary. That is, this + /// matches a position at either the beginning of the haystack or where the + /// previous character is not a word character. + WordStartHalfAscii = 1 << 14, + /// Match the end half of an ASCII-only word boundary. That is, this + /// matches a position at either the end of the haystack or where the + /// following character is not a word character. + WordEndHalfAscii = 1 << 15, + /// Match the start half of a Unicode word boundary. That is, this matches + /// a position at either the beginning of the haystack or where the + /// previous character is not a word character. + WordStartHalfUnicode = 1 << 16, + /// Match the end half of a Unicode word boundary. That is, this matches + /// a position at either the end of the haystack or where the following + /// character is not a word character. + WordEndHalfUnicode = 1 << 17, +} + +impl Look { + /// Flip the look-around assertion to its equivalent for reverse searches. + /// For example, `StartLF` gets translated to `EndLF`. + /// + /// Some assertions, such as `WordUnicode`, remain the same since they + /// match the same positions regardless of the direction of the search. + #[inline] + pub const fn reversed(self) -> Look { + match self { + Look::Start => Look::End, + Look::End => Look::Start, + Look::StartLF => Look::EndLF, + Look::EndLF => Look::StartLF, + Look::StartCRLF => Look::EndCRLF, + Look::EndCRLF => Look::StartCRLF, + Look::WordAscii => Look::WordAscii, + Look::WordAsciiNegate => Look::WordAsciiNegate, + Look::WordUnicode => Look::WordUnicode, + Look::WordUnicodeNegate => Look::WordUnicodeNegate, + Look::WordStartAscii => Look::WordEndAscii, + Look::WordEndAscii => Look::WordStartAscii, + Look::WordStartUnicode => Look::WordEndUnicode, + Look::WordEndUnicode => Look::WordStartUnicode, + Look::WordStartHalfAscii => Look::WordEndHalfAscii, + Look::WordEndHalfAscii => Look::WordStartHalfAscii, + Look::WordStartHalfUnicode => Look::WordEndHalfUnicode, + Look::WordEndHalfUnicode => Look::WordStartHalfUnicode, + } + } + + /// Return the underlying representation of this look-around enumeration + /// as an integer. Giving the return value to the [`Look::from_repr`] + /// constructor is guaranteed to return the same look-around variant that + /// one started with within a semver compatible release of this crate. + #[inline] + pub const fn as_repr(self) -> u32 { + // AFAIK, 'as' is the only way to zero-cost convert an int enum to an + // actual int. + self as u32 + } + + /// Given the underlying representation of a `Look` value, return the + /// corresponding `Look` value if the representation is valid. Otherwise + /// `None` is returned. + #[inline] + pub const fn from_repr(repr: u32) -> Option { + match repr { + 0b00_0000_0000_0000_0001 => Some(Look::Start), + 0b00_0000_0000_0000_0010 => Some(Look::End), + 0b00_0000_0000_0000_0100 => Some(Look::StartLF), + 0b00_0000_0000_0000_1000 => Some(Look::EndLF), + 0b00_0000_0000_0001_0000 => Some(Look::StartCRLF), + 0b00_0000_0000_0010_0000 => Some(Look::EndCRLF), + 0b00_0000_0000_0100_0000 => Some(Look::WordAscii), + 0b00_0000_0000_1000_0000 => Some(Look::WordAsciiNegate), + 0b00_0000_0001_0000_0000 => Some(Look::WordUnicode), + 0b00_0000_0010_0000_0000 => Some(Look::WordUnicodeNegate), + 0b00_0000_0100_0000_0000 => Some(Look::WordStartAscii), + 0b00_0000_1000_0000_0000 => Some(Look::WordEndAscii), + 0b00_0001_0000_0000_0000 => Some(Look::WordStartUnicode), + 0b00_0010_0000_0000_0000 => Some(Look::WordEndUnicode), + 0b00_0100_0000_0000_0000 => Some(Look::WordStartHalfAscii), + 0b00_1000_0000_0000_0000 => Some(Look::WordEndHalfAscii), + 0b01_0000_0000_0000_0000 => Some(Look::WordStartHalfUnicode), + 0b10_0000_0000_0000_0000 => Some(Look::WordEndHalfUnicode), + _ => None, + } + } + + /// Returns a convenient single codepoint representation of this + /// look-around assertion. Each assertion is guaranteed to be represented + /// by a distinct character. + /// + /// This is useful for succinctly representing a look-around assertion in + /// human friendly but succinct output intended for a programmer working on + /// regex internals. + #[inline] + pub const fn as_char(self) -> char { + match self { + Look::Start => 'A', + Look::End => 'z', + Look::StartLF => '^', + Look::EndLF => '$', + Look::StartCRLF => 'r', + Look::EndCRLF => 'R', + Look::WordAscii => 'b', + Look::WordAsciiNegate => 'B', + Look::WordUnicode => '𝛃', + Look::WordUnicodeNegate => '𝚩', + Look::WordStartAscii => '<', + Look::WordEndAscii => '>', + Look::WordStartUnicode => '〈', + Look::WordEndUnicode => '〉', + Look::WordStartHalfAscii => '◁', + Look::WordEndHalfAscii => '▷', + Look::WordStartHalfUnicode => '◀', + Look::WordEndHalfUnicode => '▶', + } + } +} + +/// LookSet is a memory-efficient set of look-around assertions. +/// +/// This is useful for efficiently tracking look-around assertions. For +/// example, a [`thompson::NFA`](crate::nfa::thompson::NFA) provides properties +/// that return `LookSet`s. +#[derive(Clone, Copy, Default, Eq, PartialEq)] +pub struct LookSet { + /// The underlying representation this set is exposed to make it possible + /// to store it somewhere efficiently. The representation is that + /// of a bitset, where each assertion occupies bit `i` where + /// `i = Look::as_repr()`. + /// + /// Note that users of this internal representation must permit the full + /// range of `u16` values to be represented. For example, even if the + /// current implementation only makes use of the 10 least significant bits, + /// it may use more bits in a future semver compatible release. + pub bits: u32, +} + +impl LookSet { + /// Create an empty set of look-around assertions. + #[inline] + pub fn empty() -> LookSet { + LookSet { bits: 0 } + } + + /// Create a full set of look-around assertions. + /// + /// This set contains all possible look-around assertions. + #[inline] + pub fn full() -> LookSet { + LookSet { bits: !0 } + } + + /// Create a look-around set containing the look-around assertion given. + /// + /// This is a convenience routine for creating an empty set and inserting + /// one look-around assertions. + #[inline] + pub fn singleton(look: Look) -> LookSet { + LookSet::empty().insert(look) + } + + /// Returns the total number of look-around assertions in this set. + #[inline] + pub fn len(self) -> usize { + // OK because max value always fits in a u8, which in turn always + // fits in a usize, regardless of target. + usize::try_from(self.bits.count_ones()).unwrap() + } + + /// Returns true if and only if this set is empty. + #[inline] + pub fn is_empty(self) -> bool { + self.len() == 0 + } + + /// Returns true if and only if the given look-around assertion is in this + /// set. + #[inline] + pub fn contains(self, look: Look) -> bool { + self.bits & look.as_repr() != 0 + } + + /// Returns true if and only if this set contains any anchor assertions. + /// This includes both "start/end of haystack" and "start/end of line." + #[inline] + pub fn contains_anchor(&self) -> bool { + self.contains_anchor_haystack() || self.contains_anchor_line() + } + + /// Returns true if and only if this set contains any "start/end of + /// haystack" anchors. This doesn't include "start/end of line" anchors. + #[inline] + pub fn contains_anchor_haystack(&self) -> bool { + self.contains(Look::Start) || self.contains(Look::End) + } + + /// Returns true if and only if this set contains any "start/end of line" + /// anchors. This doesn't include "start/end of haystack" anchors. This + /// includes both `\n` line anchors and CRLF (`\r\n`) aware line anchors. + #[inline] + pub fn contains_anchor_line(&self) -> bool { + self.contains(Look::StartLF) + || self.contains(Look::EndLF) + || self.contains(Look::StartCRLF) + || self.contains(Look::EndCRLF) + } + + /// Returns true if and only if this set contains any "start/end of line" + /// anchors that only treat `\n` as line terminators. This does not include + /// haystack anchors or CRLF aware line anchors. + #[inline] + pub fn contains_anchor_lf(&self) -> bool { + self.contains(Look::StartLF) || self.contains(Look::EndLF) + } + + /// Returns true if and only if this set contains any "start/end of line" + /// anchors that are CRLF-aware. This doesn't include "start/end of + /// haystack" or "start/end of line-feed" anchors. + #[inline] + pub fn contains_anchor_crlf(&self) -> bool { + self.contains(Look::StartCRLF) || self.contains(Look::EndCRLF) + } + + /// Returns true if and only if this set contains any word boundary or + /// negated word boundary assertions. This include both Unicode and ASCII + /// word boundaries. + #[inline] + pub fn contains_word(self) -> bool { + self.contains_word_unicode() || self.contains_word_ascii() + } + + /// Returns true if and only if this set contains any Unicode word boundary + /// or negated Unicode word boundary assertions. + #[inline] + pub fn contains_word_unicode(self) -> bool { + self.contains(Look::WordUnicode) + || self.contains(Look::WordUnicodeNegate) + || self.contains(Look::WordStartUnicode) + || self.contains(Look::WordEndUnicode) + || self.contains(Look::WordStartHalfUnicode) + || self.contains(Look::WordEndHalfUnicode) + } + + /// Returns true if and only if this set contains any ASCII word boundary + /// or negated ASCII word boundary assertions. + #[inline] + pub fn contains_word_ascii(self) -> bool { + self.contains(Look::WordAscii) + || self.contains(Look::WordAsciiNegate) + || self.contains(Look::WordStartAscii) + || self.contains(Look::WordEndAscii) + || self.contains(Look::WordStartHalfAscii) + || self.contains(Look::WordEndHalfAscii) + } + + /// Returns an iterator over all of the look-around assertions in this set. + #[inline] + pub fn iter(self) -> LookSetIter { + LookSetIter { set: self } + } + + /// Return a new set that is equivalent to the original, but with the given + /// assertion added to it. If the assertion is already in the set, then the + /// returned set is equivalent to the original. + #[inline] + pub fn insert(self, look: Look) -> LookSet { + LookSet { bits: self.bits | look.as_repr() } + } + + /// Updates this set in place with the result of inserting the given + /// assertion into this set. + #[inline] + pub fn set_insert(&mut self, look: Look) { + *self = self.insert(look); + } + + /// Return a new set that is equivalent to the original, but with the given + /// assertion removed from it. If the assertion is not in the set, then the + /// returned set is equivalent to the original. + #[inline] + pub fn remove(self, look: Look) -> LookSet { + LookSet { bits: self.bits & !look.as_repr() } + } + + /// Updates this set in place with the result of removing the given + /// assertion from this set. + #[inline] + pub fn set_remove(&mut self, look: Look) { + *self = self.remove(look); + } + + /// Returns a new set that is the result of subtracting the given set from + /// this set. + #[inline] + pub fn subtract(self, other: LookSet) -> LookSet { + LookSet { bits: self.bits & !other.bits } + } + + /// Updates this set in place with the result of subtracting the given set + /// from this set. + #[inline] + pub fn set_subtract(&mut self, other: LookSet) { + *self = self.subtract(other); + } + + /// Returns a new set that is the union of this and the one given. + #[inline] + pub fn union(self, other: LookSet) -> LookSet { + LookSet { bits: self.bits | other.bits } + } + + /// Updates this set in place with the result of unioning it with the one + /// given. + #[inline] + pub fn set_union(&mut self, other: LookSet) { + *self = self.union(other); + } + + /// Returns a new set that is the intersection of this and the one given. + #[inline] + pub fn intersect(self, other: LookSet) -> LookSet { + LookSet { bits: self.bits & other.bits } + } + + /// Updates this set in place with the result of intersecting it with the + /// one given. + #[inline] + pub fn set_intersect(&mut self, other: LookSet) { + *self = self.intersect(other); + } + + /// Return a `LookSet` from the slice given as a native endian 32-bit + /// integer. + /// + /// # Panics + /// + /// This panics if `slice.len() < 4`. + #[inline] + pub fn read_repr(slice: &[u8]) -> LookSet { + let bits = u32::from_ne_bytes(slice[..4].try_into().unwrap()); + LookSet { bits } + } + + /// Write a `LookSet` as a native endian 32-bit integer to the beginning + /// of the slice given. + /// + /// # Panics + /// + /// This panics if `slice.len() < 4`. + #[inline] + pub fn write_repr(self, slice: &mut [u8]) { + let raw = self.bits.to_ne_bytes(); + slice[0] = raw[0]; + slice[1] = raw[1]; + slice[2] = raw[2]; + slice[3] = raw[3]; + } + + /// Checks that all assertions in this set can be matched. + /// + /// Some assertions, such as Unicode word boundaries, require optional (but + /// enabled by default) tables that may not be available. If there are + /// assertions in this set that require tables that are not available, then + /// this will return an error. + /// + /// Specifically, this returns an error when the + /// `unicode-word-boundary` feature is _not_ enabled _and_ this set + /// contains a Unicode word boundary assertion. + /// + /// It can be useful to use this on the result of + /// [`NFA::look_set_any`](crate::nfa::thompson::NFA::look_set_any) + /// when building a matcher engine to ensure methods like + /// [`LookMatcher::matches_set`] do not panic at search time. + pub fn available(self) -> Result<(), UnicodeWordBoundaryError> { + if self.contains_word_unicode() { + UnicodeWordBoundaryError::check()?; + } + Ok(()) + } +} + +impl core::fmt::Debug for LookSet { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + if self.is_empty() { + return write!(f, "∅"); + } + for look in self.iter() { + write!(f, "{}", look.as_char())?; + } + Ok(()) + } +} + +/// An iterator over all look-around assertions in a [`LookSet`]. +/// +/// This iterator is created by [`LookSet::iter`]. +#[derive(Clone, Debug)] +pub struct LookSetIter { + set: LookSet, +} + +impl Iterator for LookSetIter { + type Item = Look; + + #[inline] + fn next(&mut self) -> Option { + if self.set.is_empty() { + return None; + } + // We'll never have more than u8::MAX distinct look-around assertions, + // so 'bit' will always fit into a u16. + let bit = u16::try_from(self.set.bits.trailing_zeros()).unwrap(); + let look = Look::from_repr(1 << bit)?; + self.set = self.set.remove(look); + Some(look) + } +} + +/// A matcher for look-around assertions. +/// +/// This matcher permits configuring aspects of how look-around assertions are +/// matched. +/// +/// # Example +/// +/// A `LookMatcher` can change the line terminator used for matching multi-line +/// anchors such as `(?m:^)` and `(?m:$)`. +/// +/// ``` +/// use regex_automata::{ +/// nfa::thompson::{self, pikevm::PikeVM}, +/// util::look::LookMatcher, +/// Match, Input, +/// }; +/// +/// let mut lookm = LookMatcher::new(); +/// lookm.set_line_terminator(b'\x00'); +/// +/// let re = PikeVM::builder() +/// .thompson(thompson::Config::new().look_matcher(lookm)) +/// .build(r"(?m)^[a-z]+$")?; +/// let mut cache = re.create_cache(); +/// +/// // Multi-line assertions now use NUL as a terminator. +/// assert_eq!( +/// Some(Match::must(0, 1..4)), +/// re.find(&mut cache, b"\x00abc\x00"), +/// ); +/// // ... and \n is no longer recognized as a terminator. +/// assert_eq!( +/// None, +/// re.find(&mut cache, b"\nabc\n"), +/// ); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct LookMatcher { + lineterm: DebugByte, +} + +impl LookMatcher { + /// Creates a new default matcher for look-around assertions. + pub fn new() -> LookMatcher { + LookMatcher { lineterm: DebugByte(b'\n') } + } + + /// Sets the line terminator for use with `(?m:^)` and `(?m:$)`. + /// + /// Namely, instead of `^` matching after `\n` and `$` matching immediately + /// before a `\n`, this will cause it to match after and before the byte + /// given. + /// + /// It can occasionally be useful to use this to configure the line + /// terminator to the NUL byte when searching binary data. + /// + /// Note that this does not apply to CRLF-aware line anchors such as + /// `(?Rm:^)` and `(?Rm:$)`. CRLF-aware line anchors are hard-coded to + /// use `\r` and `\n`. + pub fn set_line_terminator(&mut self, byte: u8) -> &mut LookMatcher { + self.lineterm.0 = byte; + self + } + + /// Returns the line terminator that was configured for this matcher. + /// + /// If no line terminator was configured, then this returns `\n`. + /// + /// Note that the line terminator should only be used for matching `(?m:^)` + /// and `(?m:$)` assertions. It specifically should _not_ be used for + /// matching the CRLF aware assertions `(?Rm:^)` and `(?Rm:$)`. + pub fn get_line_terminator(&self) -> u8 { + self.lineterm.0 + } + + /// Returns true when the position `at` in `haystack` satisfies the given + /// look-around assertion. + /// + /// # Panics + /// + /// This panics when testing any Unicode word boundary assertion in this + /// set and when the Unicode word data is not available. Specifically, this + /// only occurs when the `unicode-word-boundary` feature is not enabled. + /// + /// Since it's generally expected that this routine is called inside of + /// a matching engine, callers should check the error condition when + /// building the matching engine. If there is a Unicode word boundary + /// in the matcher and the data isn't available, then the matcher should + /// fail to build. + /// + /// Callers can check the error condition with [`LookSet::available`]. + /// + /// This also may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + #[inline] + pub fn matches(&self, look: Look, haystack: &[u8], at: usize) -> bool { + self.matches_inline(look, haystack, at) + } + + /// Like `matches`, but forcefully inlined. + /// + /// # Panics + /// + /// This panics when testing any Unicode word boundary assertion in this + /// set and when the Unicode word data is not available. Specifically, this + /// only occurs when the `unicode-word-boundary` feature is not enabled. + /// + /// Since it's generally expected that this routine is called inside of + /// a matching engine, callers should check the error condition when + /// building the matching engine. If there is a Unicode word boundary + /// in the matcher and the data isn't available, then the matcher should + /// fail to build. + /// + /// Callers can check the error condition with [`LookSet::available`]. + /// + /// This also may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn matches_inline( + &self, + look: Look, + haystack: &[u8], + at: usize, + ) -> bool { + match look { + Look::Start => self.is_start(haystack, at), + Look::End => self.is_end(haystack, at), + Look::StartLF => self.is_start_lf(haystack, at), + Look::EndLF => self.is_end_lf(haystack, at), + Look::StartCRLF => self.is_start_crlf(haystack, at), + Look::EndCRLF => self.is_end_crlf(haystack, at), + Look::WordAscii => self.is_word_ascii(haystack, at), + Look::WordAsciiNegate => self.is_word_ascii_negate(haystack, at), + Look::WordUnicode => self.is_word_unicode(haystack, at).unwrap(), + Look::WordUnicodeNegate => { + self.is_word_unicode_negate(haystack, at).unwrap() + } + Look::WordStartAscii => self.is_word_start_ascii(haystack, at), + Look::WordEndAscii => self.is_word_end_ascii(haystack, at), + Look::WordStartUnicode => { + self.is_word_start_unicode(haystack, at).unwrap() + } + Look::WordEndUnicode => { + self.is_word_end_unicode(haystack, at).unwrap() + } + Look::WordStartHalfAscii => { + self.is_word_start_half_ascii(haystack, at) + } + Look::WordEndHalfAscii => { + self.is_word_end_half_ascii(haystack, at) + } + Look::WordStartHalfUnicode => { + self.is_word_start_half_unicode(haystack, at).unwrap() + } + Look::WordEndHalfUnicode => { + self.is_word_end_half_unicode(haystack, at).unwrap() + } + } + } + + /// Returns true when _all_ of the assertions in the given set match at the + /// given position in the haystack. + /// + /// # Panics + /// + /// This panics when testing any Unicode word boundary assertion in this + /// set and when the Unicode word data is not available. Specifically, this + /// only occurs when the `unicode-word-boundary` feature is not enabled. + /// + /// Since it's generally expected that this routine is called inside of + /// a matching engine, callers should check the error condition when + /// building the matching engine. If there is a Unicode word boundary + /// in the matcher and the data isn't available, then the matcher should + /// fail to build. + /// + /// Callers can check the error condition with [`LookSet::available`]. + /// + /// This also may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + #[inline] + pub fn matches_set( + &self, + set: LookSet, + haystack: &[u8], + at: usize, + ) -> bool { + self.matches_set_inline(set, haystack, at) + } + + /// Like `LookSet::matches`, but forcefully inlined for perf. + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn matches_set_inline( + &self, + set: LookSet, + haystack: &[u8], + at: usize, + ) -> bool { + // This used to use LookSet::iter with Look::matches on each element, + // but that proved to be quite disastrous for perf. The manual "if + // the set has this assertion, check it" turns out to be quite a bit + // faster. + if set.contains(Look::Start) { + if !self.is_start(haystack, at) { + return false; + } + } + if set.contains(Look::End) { + if !self.is_end(haystack, at) { + return false; + } + } + if set.contains(Look::StartLF) { + if !self.is_start_lf(haystack, at) { + return false; + } + } + if set.contains(Look::EndLF) { + if !self.is_end_lf(haystack, at) { + return false; + } + } + if set.contains(Look::StartCRLF) { + if !self.is_start_crlf(haystack, at) { + return false; + } + } + if set.contains(Look::EndCRLF) { + if !self.is_end_crlf(haystack, at) { + return false; + } + } + if set.contains(Look::WordAscii) { + if !self.is_word_ascii(haystack, at) { + return false; + } + } + if set.contains(Look::WordAsciiNegate) { + if !self.is_word_ascii_negate(haystack, at) { + return false; + } + } + if set.contains(Look::WordUnicode) { + if !self.is_word_unicode(haystack, at).unwrap() { + return false; + } + } + if set.contains(Look::WordUnicodeNegate) { + if !self.is_word_unicode_negate(haystack, at).unwrap() { + return false; + } + } + if set.contains(Look::WordStartAscii) { + if !self.is_word_start_ascii(haystack, at) { + return false; + } + } + if set.contains(Look::WordEndAscii) { + if !self.is_word_end_ascii(haystack, at) { + return false; + } + } + if set.contains(Look::WordStartUnicode) { + if !self.is_word_start_unicode(haystack, at).unwrap() { + return false; + } + } + if set.contains(Look::WordEndUnicode) { + if !self.is_word_end_unicode(haystack, at).unwrap() { + return false; + } + } + if set.contains(Look::WordStartHalfAscii) { + if !self.is_word_start_half_ascii(haystack, at) { + return false; + } + } + if set.contains(Look::WordEndHalfAscii) { + if !self.is_word_end_half_ascii(haystack, at) { + return false; + } + } + if set.contains(Look::WordStartHalfUnicode) { + if !self.is_word_start_half_unicode(haystack, at).unwrap() { + return false; + } + } + if set.contains(Look::WordEndHalfUnicode) { + if !self.is_word_end_half_unicode(haystack, at).unwrap() { + return false; + } + } + true + } + + /// Split up the given byte classes into equivalence classes in a way that + /// is consistent with this look-around assertion. + #[cfg(feature = "alloc")] + pub(crate) fn add_to_byteset( + &self, + look: Look, + set: &mut crate::util::alphabet::ByteClassSet, + ) { + match look { + Look::Start | Look::End => {} + Look::StartLF | Look::EndLF => { + set.set_range(self.lineterm.0, self.lineterm.0); + } + Look::StartCRLF | Look::EndCRLF => { + set.set_range(b'\r', b'\r'); + set.set_range(b'\n', b'\n'); + } + Look::WordAscii + | Look::WordAsciiNegate + | Look::WordUnicode + | Look::WordUnicodeNegate + | Look::WordStartAscii + | Look::WordEndAscii + | Look::WordStartUnicode + | Look::WordEndUnicode + | Look::WordStartHalfAscii + | Look::WordEndHalfAscii + | Look::WordStartHalfUnicode + | Look::WordEndHalfUnicode => { + // We need to mark all ranges of bytes whose pairs result in + // evaluating \b differently. This isn't technically correct + // for Unicode word boundaries, but DFAs can't handle those + // anyway, and thus, the byte classes don't need to either + // since they are themselves only used in DFAs. + // + // FIXME: It seems like the calls to 'set_range' here are + // completely invariant, which means we could just hard-code + // them here without needing to write a loop. And we only need + // to do this dance at most once per regex. + // + // FIXME: Is this correct for \B? + let iswb = utf8::is_word_byte; + // This unwrap is OK because we guard every use of 'asu8' with + // a check that the input is <= 255. + let asu8 = |b: u16| u8::try_from(b).unwrap(); + let mut b1: u16 = 0; + let mut b2: u16; + while b1 <= 255 { + b2 = b1 + 1; + while b2 <= 255 && iswb(asu8(b1)) == iswb(asu8(b2)) { + b2 += 1; + } + // The guards above guarantee that b2 can never get any + // bigger. + assert!(b2 <= 256); + // Subtracting 1 from b2 is always OK because it is always + // at least 1 greater than b1, and the assert above + // guarantees that the asu8 conversion will succeed. + set.set_range(asu8(b1), asu8(b2.checked_sub(1).unwrap())); + b1 = b2; + } + } + } + } + + /// Returns true when [`Look::Start`] is satisfied `at` the given position + /// in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + #[inline] + pub fn is_start(&self, _haystack: &[u8], at: usize) -> bool { + at == 0 + } + + /// Returns true when [`Look::End`] is satisfied `at` the given position in + /// `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + #[inline] + pub fn is_end(&self, haystack: &[u8], at: usize) -> bool { + at == haystack.len() + } + + /// Returns true when [`Look::StartLF`] is satisfied `at` the given + /// position in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + #[inline] + pub fn is_start_lf(&self, haystack: &[u8], at: usize) -> bool { + self.is_start(haystack, at) || haystack[at - 1] == self.lineterm.0 + } + + /// Returns true when [`Look::EndLF`] is satisfied `at` the given position + /// in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + #[inline] + pub fn is_end_lf(&self, haystack: &[u8], at: usize) -> bool { + self.is_end(haystack, at) || haystack[at] == self.lineterm.0 + } + + /// Returns true when [`Look::StartCRLF`] is satisfied `at` the given + /// position in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + #[inline] + pub fn is_start_crlf(&self, haystack: &[u8], at: usize) -> bool { + self.is_start(haystack, at) + || haystack[at - 1] == b'\n' + || (haystack[at - 1] == b'\r' + && (at >= haystack.len() || haystack[at] != b'\n')) + } + + /// Returns true when [`Look::EndCRLF`] is satisfied `at` the given + /// position in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + #[inline] + pub fn is_end_crlf(&self, haystack: &[u8], at: usize) -> bool { + self.is_end(haystack, at) + || haystack[at] == b'\r' + || (haystack[at] == b'\n' + && (at == 0 || haystack[at - 1] != b'\r')) + } + + /// Returns true when [`Look::WordAscii`] is satisfied `at` the given + /// position in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + #[inline] + pub fn is_word_ascii(&self, haystack: &[u8], at: usize) -> bool { + let word_before = at > 0 && utf8::is_word_byte(haystack[at - 1]); + let word_after = + at < haystack.len() && utf8::is_word_byte(haystack[at]); + word_before != word_after + } + + /// Returns true when [`Look::WordAsciiNegate`] is satisfied `at` the given + /// position in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + #[inline] + pub fn is_word_ascii_negate(&self, haystack: &[u8], at: usize) -> bool { + !self.is_word_ascii(haystack, at) + } + + /// Returns true when [`Look::WordUnicode`] is satisfied `at` the given + /// position in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + /// + /// # Errors + /// + /// This returns an error when Unicode word boundary tables + /// are not available. Specifically, this only occurs when the + /// `unicode-word-boundary` feature is not enabled. + #[inline] + pub fn is_word_unicode( + &self, + haystack: &[u8], + at: usize, + ) -> Result { + let word_before = is_word_char::rev(haystack, at)?; + let word_after = is_word_char::fwd(haystack, at)?; + Ok(word_before != word_after) + } + + /// Returns true when [`Look::WordUnicodeNegate`] is satisfied `at` the + /// given position in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + /// + /// # Errors + /// + /// This returns an error when Unicode word boundary tables + /// are not available. Specifically, this only occurs when the + /// `unicode-word-boundary` feature is not enabled. + #[inline] + pub fn is_word_unicode_negate( + &self, + haystack: &[u8], + at: usize, + ) -> Result { + // This is pretty subtle. Why do we need to do UTF-8 decoding here? + // Well... at time of writing, the is_word_char_{fwd,rev} routines will + // only return true if there is a valid UTF-8 encoding of a "word" + // codepoint, and false in every other case (including invalid UTF-8). + // This means that in regions of invalid UTF-8 (which might be a + // subset of valid UTF-8!), it would result in \B matching. While this + // would be questionable in the context of truly invalid UTF-8, it is + // *certainly* wrong to report match boundaries that split the encoding + // of a codepoint. So to work around this, we ensure that we can decode + // a codepoint on either side of `at`. If either direction fails, then + // we don't permit \B to match at all. + // + // Now, this isn't exactly optimal from a perf perspective. We could + // try and detect this in is_word_char::{fwd,rev}, but it's not clear + // if it's worth it. \B is, after all, rarely used. Even worse, + // is_word_char::{fwd,rev} could do its own UTF-8 decoding, and so this + // will wind up doing UTF-8 decoding twice. Ouch. We could fix this + // with more code complexity, but it just doesn't feel worth it for \B. + // + // And in particular, we do *not* have to do this with \b, because \b + // *requires* that at least one side of `at` be a "word" codepoint, + // which in turn implies one side of `at` must be valid UTF-8. This in + // turn implies that \b can never split a valid UTF-8 encoding of a + // codepoint. In the case where one side of `at` is truly invalid UTF-8 + // and the other side IS a word codepoint, then we want \b to match + // since it represents a valid UTF-8 boundary. It also makes sense. For + // example, you'd want \b\w+\b to match 'abc' in '\xFFabc\xFF'. + // + // Note also that this is not just '!is_word_unicode(..)' like it is + // for the ASCII case. For example, neither \b nor \B is satisfied + // within invalid UTF-8 sequences. + let word_before = at > 0 + && match utf8::decode_last(&haystack[..at]) { + None | Some(Err(_)) => return Ok(false), + Some(Ok(_)) => is_word_char::rev(haystack, at)?, + }; + let word_after = at < haystack.len() + && match utf8::decode(&haystack[at..]) { + None | Some(Err(_)) => return Ok(false), + Some(Ok(_)) => is_word_char::fwd(haystack, at)?, + }; + Ok(word_before == word_after) + } + + /// Returns true when [`Look::WordStartAscii`] is satisfied `at` the given + /// position in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + #[inline] + pub fn is_word_start_ascii(&self, haystack: &[u8], at: usize) -> bool { + let word_before = at > 0 && utf8::is_word_byte(haystack[at - 1]); + let word_after = + at < haystack.len() && utf8::is_word_byte(haystack[at]); + !word_before && word_after + } + + /// Returns true when [`Look::WordEndAscii`] is satisfied `at` the given + /// position in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + #[inline] + pub fn is_word_end_ascii(&self, haystack: &[u8], at: usize) -> bool { + let word_before = at > 0 && utf8::is_word_byte(haystack[at - 1]); + let word_after = + at < haystack.len() && utf8::is_word_byte(haystack[at]); + word_before && !word_after + } + + /// Returns true when [`Look::WordStartUnicode`] is satisfied `at` the + /// given position in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + /// + /// # Errors + /// + /// This returns an error when Unicode word boundary tables + /// are not available. Specifically, this only occurs when the + /// `unicode-word-boundary` feature is not enabled. + #[inline] + pub fn is_word_start_unicode( + &self, + haystack: &[u8], + at: usize, + ) -> Result { + let word_before = is_word_char::rev(haystack, at)?; + let word_after = is_word_char::fwd(haystack, at)?; + Ok(!word_before && word_after) + } + + /// Returns true when [`Look::WordEndUnicode`] is satisfied `at` the + /// given position in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + /// + /// # Errors + /// + /// This returns an error when Unicode word boundary tables + /// are not available. Specifically, this only occurs when the + /// `unicode-word-boundary` feature is not enabled. + #[inline] + pub fn is_word_end_unicode( + &self, + haystack: &[u8], + at: usize, + ) -> Result { + let word_before = is_word_char::rev(haystack, at)?; + let word_after = is_word_char::fwd(haystack, at)?; + Ok(word_before && !word_after) + } + + /// Returns true when [`Look::WordStartHalfAscii`] is satisfied `at` the + /// given position in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + #[inline] + pub fn is_word_start_half_ascii( + &self, + haystack: &[u8], + at: usize, + ) -> bool { + let word_before = at > 0 && utf8::is_word_byte(haystack[at - 1]); + !word_before + } + + /// Returns true when [`Look::WordEndHalfAscii`] is satisfied `at` the + /// given position in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + #[inline] + pub fn is_word_end_half_ascii(&self, haystack: &[u8], at: usize) -> bool { + let word_after = + at < haystack.len() && utf8::is_word_byte(haystack[at]); + !word_after + } + + /// Returns true when [`Look::WordStartHalfUnicode`] is satisfied `at` the + /// given position in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + /// + /// # Errors + /// + /// This returns an error when Unicode word boundary tables + /// are not available. Specifically, this only occurs when the + /// `unicode-word-boundary` feature is not enabled. + #[inline] + pub fn is_word_start_half_unicode( + &self, + haystack: &[u8], + at: usize, + ) -> Result { + // See `is_word_unicode_negate` for why we need to do this. We don't + // need to do it for `is_word_start_unicode` because that guarantees + // that the position matched falls on a valid UTF-8 boundary given + // that the right side must be in \w. + let word_before = at > 0 + && match utf8::decode_last(&haystack[..at]) { + None | Some(Err(_)) => return Ok(false), + Some(Ok(_)) => is_word_char::rev(haystack, at)?, + }; + Ok(!word_before) + } + + /// Returns true when [`Look::WordEndHalfUnicode`] is satisfied `at` the + /// given position in `haystack`. + /// + /// # Panics + /// + /// This may panic when `at > haystack.len()`. Note that `at == + /// haystack.len()` is legal and guaranteed not to panic. + /// + /// # Errors + /// + /// This returns an error when Unicode word boundary tables + /// are not available. Specifically, this only occurs when the + /// `unicode-word-boundary` feature is not enabled. + #[inline] + pub fn is_word_end_half_unicode( + &self, + haystack: &[u8], + at: usize, + ) -> Result { + // See `is_word_unicode_negate` for why we need to do this. We don't + // need to do it for `is_word_end_unicode` because that guarantees + // that the position matched falls on a valid UTF-8 boundary given + // that the left side must be in \w. + let word_after = at < haystack.len() + && match utf8::decode(&haystack[at..]) { + None | Some(Err(_)) => return Ok(false), + Some(Ok(_)) => is_word_char::fwd(haystack, at)?, + }; + Ok(!word_after) + } +} + +impl Default for LookMatcher { + fn default() -> LookMatcher { + LookMatcher::new() + } +} + +/// An error that occurs when the Unicode-aware `\w` class is unavailable. +/// +/// This error can occur when the data tables necessary for the Unicode aware +/// Perl character class `\w` are unavailable. The `\w` class is used to +/// determine whether a codepoint is considered a word character or not when +/// determining whether a Unicode aware `\b` (or `\B`) matches at a particular +/// position. +/// +/// This error can only occur when the `unicode-word-boundary` feature is +/// disabled. +#[derive(Clone, Debug)] +pub struct UnicodeWordBoundaryError(()); + +impl UnicodeWordBoundaryError { + #[cfg(not(feature = "unicode-word-boundary"))] + pub(crate) fn new() -> UnicodeWordBoundaryError { + UnicodeWordBoundaryError(()) + } + + /// Returns an error if and only if Unicode word boundary data is + /// unavailable. + pub fn check() -> Result<(), UnicodeWordBoundaryError> { + is_word_char::check() + } +} + +#[cfg(feature = "std")] +impl std::error::Error for UnicodeWordBoundaryError {} + +impl core::fmt::Display for UnicodeWordBoundaryError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "Unicode-aware \\b and \\B are unavailable because the \ + requisite data tables are missing, please enable the \ + unicode-word-boundary feature" + ) + } +} + +// Below are FOUR different ways for checking whether whether a "word" +// codepoint exists at a particular position in the haystack. The four +// different approaches are, in order of preference: +// +// 1. Parse '\w', convert to an NFA, convert to a fully compiled DFA on the +// first call, and then use that DFA for all subsequent calls. +// 2. Do UTF-8 decoding and use regex_syntax::is_word_character if available. +// 3. Do UTF-8 decoding and use our own 'perl_word' table. +// 4. Return an error. +// +// The reason for all of these approaches is a combination of perf and +// permitting one to build regex-automata without the Unicode data necessary +// for handling Unicode-aware word boundaries. (In which case, '(?-u:\b)' would +// still work.) +// +// The DFA approach is the fastest, but it requires the regex parser, the +// NFA compiler, the DFA builder and the DFA search runtime. That's a lot to +// bring in, but if it's available, it's (probably) the best we can do. +// +// Approaches (2) and (3) are effectively equivalent, but (2) reuses the +// data in regex-syntax and avoids duplicating it in regex-automata. +// +// Finally, (4) unconditionally returns an error since the requisite data isn't +// available anywhere. +// +// There are actually more approaches possible that we didn't implement. For +// example, if the DFA builder is available but the syntax parser is not, we +// could technically hand construct our own NFA from the 'perl_word' data +// table. But to avoid some pretty hairy code duplication, we would in turn +// need to pull the UTF-8 compiler out of the NFA compiler. Yikes. +// +// A possibly more sensible alternative is to use a lazy DFA when the full +// DFA builder isn't available... +// +// Yet another choice would be to build the full DFA and then embed it into the +// source. Then we'd only need to bring in the DFA search runtime, which is +// considerably smaller than the DFA builder code. The problem here is that the +// Debian people have spooked me[1] into avoiding cyclic dependencies. Namely, +// we'd need to build regex-cli, which depends on regex-automata in order to +// build some part of regex-automata. But to be honest, something like this has +// to be allowed somehow? I just don't know what the right process is. +// +// There are perhaps other choices as well. Why did I stop at these 4? Because +// I wanted to preserve my sanity. I suspect I'll wind up adding the lazy DFA +// approach eventually, as the benefits of the DFA approach are somewhat +// compelling. The 'boundary-words-holmes' benchmark tests this. (Note that +// the commands below no longer work. If necessary, we should re-capitulate +// the benchmark from whole cloth in rebar.) +// +// $ regex-cli bench measure -f boundary-words-holmes -e pikevm > dfa.csv +// +// Then I changed the code below so that the util/unicode_data/perl_word table +// was used and re-ran the benchmark: +// +// $ regex-cli bench measure -f boundary-words-holmes -e pikevm > table.csv +// +// And compared them: +// +// $ regex-cli bench diff dfa.csv table.csv +// benchmark engine dfa table +// --------- ------ --- ----- +// internal/count/boundary-words-holmes regex/automata/pikevm 18.6 MB/s 12.9 MB/s +// +// Which is a nice improvement. +// +// UPDATE: It turns out that it takes approximately 22ms to build the reverse +// DFA for \w. (And about 3ms for the forward DFA.) It's probably not much in +// the grand scheme things, but that is a significant latency cost. So I'm not +// sure that's a good idea. I then tried using a lazy DFA instead, and that +// eliminated the overhead, but since the lazy DFA requires mutable working +// memory, that requires introducing a 'Cache' for every simultaneous call. +// +// I ended up deciding for now to just keep the "UTF-8 decode and check the +// table." The DFA and lazy DFA approaches are still below, but commented out. +// +// [1]: https://github.com/BurntSushi/ucd-generate/issues/11 + +/* +/// A module that looks for word codepoints using lazy DFAs. +#[cfg(all( + feature = "unicode-word-boundary", + feature = "syntax", + feature = "unicode-perl", + feature = "hybrid" +))] +mod is_word_char { + use alloc::vec::Vec; + + use crate::{ + hybrid::dfa::{Cache, DFA}, + nfa::thompson::NFA, + util::{lazy::Lazy, pool::Pool, primitives::StateID}, + Anchored, Input, + }; + + pub(super) fn check() -> Result<(), super::UnicodeWordBoundaryError> { + Ok(()) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(super) fn fwd( + haystack: &[u8], + mut at: usize, + ) -> Result { + static WORD: Lazy = Lazy::new(|| DFA::new(r"\w").unwrap()); + static CACHE: Lazy> = + Lazy::new(|| Pool::new(|| WORD.create_cache())); + let dfa = Lazy::get(&WORD); + let mut cache = Lazy::get(&CACHE).get(); + let mut sid = dfa + .start_state_forward( + &mut cache, + &Input::new("").anchored(Anchored::Yes), + ) + .unwrap(); + while at < haystack.len() { + let byte = haystack[at]; + sid = dfa.next_state(&mut cache, sid, byte).unwrap(); + at += 1; + if sid.is_tagged() { + if sid.is_match() { + return Ok(true); + } else if sid.is_dead() { + return Ok(false); + } + } + } + Ok(dfa.next_eoi_state(&mut cache, sid).unwrap().is_match()) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(super) fn rev( + haystack: &[u8], + mut at: usize, + ) -> Result { + static WORD: Lazy = Lazy::new(|| { + DFA::builder() + .thompson(NFA::config().reverse(true)) + .build(r"\w") + .unwrap() + }); + static CACHE: Lazy> = + Lazy::new(|| Pool::new(|| WORD.create_cache())); + let dfa = Lazy::get(&WORD); + let mut cache = Lazy::get(&CACHE).get(); + let mut sid = dfa + .start_state_reverse( + &mut cache, + &Input::new("").anchored(Anchored::Yes), + ) + .unwrap(); + while at > 0 { + at -= 1; + let byte = haystack[at]; + sid = dfa.next_state(&mut cache, sid, byte).unwrap(); + if sid.is_tagged() { + if sid.is_match() { + return Ok(true); + } else if sid.is_dead() { + return Ok(false); + } + } + } + Ok(dfa.next_eoi_state(&mut cache, sid).unwrap().is_match()) + } +} +*/ + +/* +/// A module that looks for word codepoints using fully compiled DFAs. +#[cfg(all( + feature = "unicode-word-boundary", + feature = "syntax", + feature = "unicode-perl", + feature = "dfa-build" +))] +mod is_word_char { + use alloc::vec::Vec; + + use crate::{ + dfa::{dense::DFA, Automaton, StartKind}, + nfa::thompson::NFA, + util::{lazy::Lazy, primitives::StateID}, + Anchored, Input, + }; + + pub(super) fn check() -> Result<(), super::UnicodeWordBoundaryError> { + Ok(()) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(super) fn fwd( + haystack: &[u8], + mut at: usize, + ) -> Result { + static WORD: Lazy<(DFA>, StateID)> = Lazy::new(|| { + let dfa = DFA::builder() + .configure(DFA::config().start_kind(StartKind::Anchored)) + .build(r"\w") + .unwrap(); + // OK because our regex has no look-around. + let start_id = dfa.universal_start_state(Anchored::Yes).unwrap(); + (dfa, start_id) + }); + let &(ref dfa, mut sid) = Lazy::get(&WORD); + while at < haystack.len() { + let byte = haystack[at]; + sid = dfa.next_state(sid, byte); + at += 1; + if dfa.is_special_state(sid) { + if dfa.is_match_state(sid) { + return Ok(true); + } else if dfa.is_dead_state(sid) { + return Ok(false); + } + } + } + Ok(dfa.is_match_state(dfa.next_eoi_state(sid))) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(super) fn rev( + haystack: &[u8], + mut at: usize, + ) -> Result { + static WORD: Lazy<(DFA>, StateID)> = Lazy::new(|| { + let dfa = DFA::builder() + .configure(DFA::config().start_kind(StartKind::Anchored)) + // From ad hoc measurements, it looks like setting + // shrink==false is slightly faster than shrink==true. I kind + // of feel like this indicates that shrinking is probably a + // failure, although it can help in some cases. Sigh. + .thompson(NFA::config().reverse(true).shrink(false)) + .build(r"\w") + .unwrap(); + // OK because our regex has no look-around. + let start_id = dfa.universal_start_state(Anchored::Yes).unwrap(); + (dfa, start_id) + }); + let &(ref dfa, mut sid) = Lazy::get(&WORD); + while at > 0 { + at -= 1; + let byte = haystack[at]; + sid = dfa.next_state(sid, byte); + if dfa.is_special_state(sid) { + if dfa.is_match_state(sid) { + return Ok(true); + } else if dfa.is_dead_state(sid) { + return Ok(false); + } + } + } + Ok(dfa.is_match_state(dfa.next_eoi_state(sid))) + } +} +*/ + +/// A module that looks for word codepoints using regex-syntax's data tables. +#[cfg(all( + feature = "unicode-word-boundary", + feature = "syntax", + feature = "unicode-perl", +))] +mod is_word_char { + use regex_syntax::try_is_word_character; + + use crate::util::utf8; + + pub(super) fn check() -> Result<(), super::UnicodeWordBoundaryError> { + Ok(()) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(super) fn fwd( + haystack: &[u8], + at: usize, + ) -> Result { + Ok(match utf8::decode(&haystack[at..]) { + None | Some(Err(_)) => false, + Some(Ok(ch)) => try_is_word_character(ch).expect( + "since unicode-word-boundary, syntax and unicode-perl \ + are all enabled, it is expected that \ + try_is_word_character succeeds", + ), + }) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(super) fn rev( + haystack: &[u8], + at: usize, + ) -> Result { + Ok(match utf8::decode_last(&haystack[..at]) { + None | Some(Err(_)) => false, + Some(Ok(ch)) => try_is_word_character(ch).expect( + "since unicode-word-boundary, syntax and unicode-perl \ + are all enabled, it is expected that \ + try_is_word_character succeeds", + ), + }) + } +} + +/// A module that looks for word codepoints using regex-automata's data tables +/// (which are only compiled when regex-syntax's tables aren't available). +/// +/// Note that the cfg should match the one in src/util/unicode_data/mod.rs for +/// perl_word. +#[cfg(all( + feature = "unicode-word-boundary", + not(all(feature = "syntax", feature = "unicode-perl")), +))] +mod is_word_char { + use crate::util::utf8; + + pub(super) fn check() -> Result<(), super::UnicodeWordBoundaryError> { + Ok(()) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(super) fn fwd( + haystack: &[u8], + at: usize, + ) -> Result { + Ok(match utf8::decode(&haystack[at..]) { + None | Some(Err(_)) => false, + Some(Ok(ch)) => is_word_character(ch), + }) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(super) fn rev( + haystack: &[u8], + at: usize, + ) -> Result { + Ok(match utf8::decode_last(&haystack[..at]) { + None | Some(Err(_)) => false, + Some(Ok(ch)) => is_word_character(ch), + }) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_word_character(c: char) -> bool { + use crate::util::{unicode_data::perl_word::PERL_WORD, utf8}; + + if u8::try_from(c).map_or(false, utf8::is_word_byte) { + return true; + } + PERL_WORD + .binary_search_by(|&(start, end)| { + use core::cmp::Ordering; + + if start <= c && c <= end { + Ordering::Equal + } else if start > c { + Ordering::Greater + } else { + Ordering::Less + } + }) + .is_ok() + } +} + +/// A module that always returns an error if Unicode word boundaries are +/// disabled. When this feature is disabled, then regex-automata will not +/// include its own data tables even if regex-syntax is disabled. +#[cfg(not(feature = "unicode-word-boundary"))] +mod is_word_char { + pub(super) fn check() -> Result<(), super::UnicodeWordBoundaryError> { + Err(super::UnicodeWordBoundaryError::new()) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(super) fn fwd( + _bytes: &[u8], + _at: usize, + ) -> Result { + Err(super::UnicodeWordBoundaryError::new()) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(super) fn rev( + _bytes: &[u8], + _at: usize, + ) -> Result { + Err(super::UnicodeWordBoundaryError::new()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + macro_rules! testlook { + ($look:expr, $haystack:expr, $at:expr) => { + LookMatcher::default().matches($look, $haystack.as_bytes(), $at) + }; + } + + #[test] + fn look_matches_start_line() { + let look = Look::StartLF; + + assert!(testlook!(look, "", 0)); + assert!(testlook!(look, "\n", 0)); + assert!(testlook!(look, "\n", 1)); + assert!(testlook!(look, "a", 0)); + assert!(testlook!(look, "\na", 1)); + + assert!(!testlook!(look, "a", 1)); + assert!(!testlook!(look, "a\na", 1)); + } + + #[test] + fn look_matches_end_line() { + let look = Look::EndLF; + + assert!(testlook!(look, "", 0)); + assert!(testlook!(look, "\n", 1)); + assert!(testlook!(look, "\na", 0)); + assert!(testlook!(look, "\na", 2)); + assert!(testlook!(look, "a\na", 1)); + + assert!(!testlook!(look, "a", 0)); + assert!(!testlook!(look, "\na", 1)); + assert!(!testlook!(look, "a\na", 0)); + assert!(!testlook!(look, "a\na", 2)); + } + + #[test] + fn look_matches_start_text() { + let look = Look::Start; + + assert!(testlook!(look, "", 0)); + assert!(testlook!(look, "\n", 0)); + assert!(testlook!(look, "a", 0)); + + assert!(!testlook!(look, "\n", 1)); + assert!(!testlook!(look, "\na", 1)); + assert!(!testlook!(look, "a", 1)); + assert!(!testlook!(look, "a\na", 1)); + } + + #[test] + fn look_matches_end_text() { + let look = Look::End; + + assert!(testlook!(look, "", 0)); + assert!(testlook!(look, "\n", 1)); + assert!(testlook!(look, "\na", 2)); + + assert!(!testlook!(look, "\na", 0)); + assert!(!testlook!(look, "a\na", 1)); + assert!(!testlook!(look, "a", 0)); + assert!(!testlook!(look, "\na", 1)); + assert!(!testlook!(look, "a\na", 0)); + assert!(!testlook!(look, "a\na", 2)); + } + + #[test] + #[cfg(all(not(miri), feature = "unicode-word-boundary"))] + fn look_matches_word_unicode() { + let look = Look::WordUnicode; + + // \xF0\x9D\x9B\x83 = 𝛃 (in \w) + // \xF0\x90\x86\x80 = 𐆀 (not in \w) + + // Simple ASCII word boundaries. + assert!(testlook!(look, "a", 0)); + assert!(testlook!(look, "a", 1)); + assert!(testlook!(look, "a ", 1)); + assert!(testlook!(look, " a ", 1)); + assert!(testlook!(look, " a ", 2)); + + // Unicode word boundaries with a non-ASCII codepoint. + assert!(testlook!(look, "𝛃", 0)); + assert!(testlook!(look, "𝛃", 4)); + assert!(testlook!(look, "𝛃 ", 4)); + assert!(testlook!(look, " 𝛃 ", 1)); + assert!(testlook!(look, " 𝛃 ", 5)); + + // Unicode word boundaries between non-ASCII codepoints. + assert!(testlook!(look, "𝛃𐆀", 0)); + assert!(testlook!(look, "𝛃𐆀", 4)); + + // Non word boundaries for ASCII. + assert!(!testlook!(look, "", 0)); + assert!(!testlook!(look, "ab", 1)); + assert!(!testlook!(look, "a ", 2)); + assert!(!testlook!(look, " a ", 0)); + assert!(!testlook!(look, " a ", 3)); + + // Non word boundaries with a non-ASCII codepoint. + assert!(!testlook!(look, "𝛃b", 4)); + assert!(!testlook!(look, "𝛃 ", 5)); + assert!(!testlook!(look, " 𝛃 ", 0)); + assert!(!testlook!(look, " 𝛃 ", 6)); + assert!(!testlook!(look, "𝛃", 1)); + assert!(!testlook!(look, "𝛃", 2)); + assert!(!testlook!(look, "𝛃", 3)); + + // Non word boundaries with non-ASCII codepoints. + assert!(!testlook!(look, "𝛃𐆀", 1)); + assert!(!testlook!(look, "𝛃𐆀", 2)); + assert!(!testlook!(look, "𝛃𐆀", 3)); + assert!(!testlook!(look, "𝛃𐆀", 5)); + assert!(!testlook!(look, "𝛃𐆀", 6)); + assert!(!testlook!(look, "𝛃𐆀", 7)); + assert!(!testlook!(look, "𝛃𐆀", 8)); + } + + #[test] + fn look_matches_word_ascii() { + let look = Look::WordAscii; + + // \xF0\x9D\x9B\x83 = 𝛃 (in \w) + // \xF0\x90\x86\x80 = 𐆀 (not in \w) + + // Simple ASCII word boundaries. + assert!(testlook!(look, "a", 0)); + assert!(testlook!(look, "a", 1)); + assert!(testlook!(look, "a ", 1)); + assert!(testlook!(look, " a ", 1)); + assert!(testlook!(look, " a ", 2)); + + // Unicode word boundaries with a non-ASCII codepoint. Since this is + // an ASCII word boundary, none of these match. + assert!(!testlook!(look, "𝛃", 0)); + assert!(!testlook!(look, "𝛃", 4)); + assert!(!testlook!(look, "𝛃 ", 4)); + assert!(!testlook!(look, " 𝛃 ", 1)); + assert!(!testlook!(look, " 𝛃 ", 5)); + + // Unicode word boundaries between non-ASCII codepoints. Again, since + // this is an ASCII word boundary, none of these match. + assert!(!testlook!(look, "𝛃𐆀", 0)); + assert!(!testlook!(look, "𝛃𐆀", 4)); + + // Non word boundaries for ASCII. + assert!(!testlook!(look, "", 0)); + assert!(!testlook!(look, "ab", 1)); + assert!(!testlook!(look, "a ", 2)); + assert!(!testlook!(look, " a ", 0)); + assert!(!testlook!(look, " a ", 3)); + + // Non word boundaries with a non-ASCII codepoint. + assert!(testlook!(look, "𝛃b", 4)); + assert!(!testlook!(look, "𝛃 ", 5)); + assert!(!testlook!(look, " 𝛃 ", 0)); + assert!(!testlook!(look, " 𝛃 ", 6)); + assert!(!testlook!(look, "𝛃", 1)); + assert!(!testlook!(look, "𝛃", 2)); + assert!(!testlook!(look, "𝛃", 3)); + + // Non word boundaries with non-ASCII codepoints. + assert!(!testlook!(look, "𝛃𐆀", 1)); + assert!(!testlook!(look, "𝛃𐆀", 2)); + assert!(!testlook!(look, "𝛃𐆀", 3)); + assert!(!testlook!(look, "𝛃𐆀", 5)); + assert!(!testlook!(look, "𝛃𐆀", 6)); + assert!(!testlook!(look, "𝛃𐆀", 7)); + assert!(!testlook!(look, "𝛃𐆀", 8)); + } + + #[test] + #[cfg(all(not(miri), feature = "unicode-word-boundary"))] + fn look_matches_word_unicode_negate() { + let look = Look::WordUnicodeNegate; + + // \xF0\x9D\x9B\x83 = 𝛃 (in \w) + // \xF0\x90\x86\x80 = 𐆀 (not in \w) + + // Simple ASCII word boundaries. + assert!(!testlook!(look, "a", 0)); + assert!(!testlook!(look, "a", 1)); + assert!(!testlook!(look, "a ", 1)); + assert!(!testlook!(look, " a ", 1)); + assert!(!testlook!(look, " a ", 2)); + + // Unicode word boundaries with a non-ASCII codepoint. + assert!(!testlook!(look, "𝛃", 0)); + assert!(!testlook!(look, "𝛃", 4)); + assert!(!testlook!(look, "𝛃 ", 4)); + assert!(!testlook!(look, " 𝛃 ", 1)); + assert!(!testlook!(look, " 𝛃 ", 5)); + + // Unicode word boundaries between non-ASCII codepoints. + assert!(!testlook!(look, "𝛃𐆀", 0)); + assert!(!testlook!(look, "𝛃𐆀", 4)); + + // Non word boundaries for ASCII. + assert!(testlook!(look, "", 0)); + assert!(testlook!(look, "ab", 1)); + assert!(testlook!(look, "a ", 2)); + assert!(testlook!(look, " a ", 0)); + assert!(testlook!(look, " a ", 3)); + + // Non word boundaries with a non-ASCII codepoint. + assert!(testlook!(look, "𝛃b", 4)); + assert!(testlook!(look, "𝛃 ", 5)); + assert!(testlook!(look, " 𝛃 ", 0)); + assert!(testlook!(look, " 𝛃 ", 6)); + // These don't match because they could otherwise return an offset that + // splits the UTF-8 encoding of a codepoint. + assert!(!testlook!(look, "𝛃", 1)); + assert!(!testlook!(look, "𝛃", 2)); + assert!(!testlook!(look, "𝛃", 3)); + + // Non word boundaries with non-ASCII codepoints. These also don't + // match because they could otherwise return an offset that splits the + // UTF-8 encoding of a codepoint. + assert!(!testlook!(look, "𝛃𐆀", 1)); + assert!(!testlook!(look, "𝛃𐆀", 2)); + assert!(!testlook!(look, "𝛃𐆀", 3)); + assert!(!testlook!(look, "𝛃𐆀", 5)); + assert!(!testlook!(look, "𝛃𐆀", 6)); + assert!(!testlook!(look, "𝛃𐆀", 7)); + // But this one does, since 𐆀 isn't a word codepoint, and 8 is the end + // of the haystack. So the "end" of the haystack isn't a word and 𐆀 + // isn't a word, thus, \B matches. + assert!(testlook!(look, "𝛃𐆀", 8)); + } + + #[test] + fn look_matches_word_ascii_negate() { + let look = Look::WordAsciiNegate; + + // \xF0\x9D\x9B\x83 = 𝛃 (in \w) + // \xF0\x90\x86\x80 = 𐆀 (not in \w) + + // Simple ASCII word boundaries. + assert!(!testlook!(look, "a", 0)); + assert!(!testlook!(look, "a", 1)); + assert!(!testlook!(look, "a ", 1)); + assert!(!testlook!(look, " a ", 1)); + assert!(!testlook!(look, " a ", 2)); + + // Unicode word boundaries with a non-ASCII codepoint. Since this is + // an ASCII word boundary, none of these match. + assert!(testlook!(look, "𝛃", 0)); + assert!(testlook!(look, "𝛃", 4)); + assert!(testlook!(look, "𝛃 ", 4)); + assert!(testlook!(look, " 𝛃 ", 1)); + assert!(testlook!(look, " 𝛃 ", 5)); + + // Unicode word boundaries between non-ASCII codepoints. Again, since + // this is an ASCII word boundary, none of these match. + assert!(testlook!(look, "𝛃𐆀", 0)); + assert!(testlook!(look, "𝛃𐆀", 4)); + + // Non word boundaries for ASCII. + assert!(testlook!(look, "", 0)); + assert!(testlook!(look, "ab", 1)); + assert!(testlook!(look, "a ", 2)); + assert!(testlook!(look, " a ", 0)); + assert!(testlook!(look, " a ", 3)); + + // Non word boundaries with a non-ASCII codepoint. + assert!(!testlook!(look, "𝛃b", 4)); + assert!(testlook!(look, "𝛃 ", 5)); + assert!(testlook!(look, " 𝛃 ", 0)); + assert!(testlook!(look, " 𝛃 ", 6)); + assert!(testlook!(look, "𝛃", 1)); + assert!(testlook!(look, "𝛃", 2)); + assert!(testlook!(look, "𝛃", 3)); + + // Non word boundaries with non-ASCII codepoints. + assert!(testlook!(look, "𝛃𐆀", 1)); + assert!(testlook!(look, "𝛃𐆀", 2)); + assert!(testlook!(look, "𝛃𐆀", 3)); + assert!(testlook!(look, "𝛃𐆀", 5)); + assert!(testlook!(look, "𝛃𐆀", 6)); + assert!(testlook!(look, "𝛃𐆀", 7)); + assert!(testlook!(look, "𝛃𐆀", 8)); + } + + #[test] + fn look_matches_word_start_ascii() { + let look = Look::WordStartAscii; + + // \xF0\x9D\x9B\x83 = 𝛃 (in \w) + // \xF0\x90\x86\x80 = 𐆀 (not in \w) + + // Simple ASCII word boundaries. + assert!(testlook!(look, "a", 0)); + assert!(!testlook!(look, "a", 1)); + assert!(!testlook!(look, "a ", 1)); + assert!(testlook!(look, " a ", 1)); + assert!(!testlook!(look, " a ", 2)); + + // Unicode word boundaries with a non-ASCII codepoint. Since this is + // an ASCII word boundary, none of these match. + assert!(!testlook!(look, "𝛃", 0)); + assert!(!testlook!(look, "𝛃", 4)); + assert!(!testlook!(look, "𝛃 ", 4)); + assert!(!testlook!(look, " 𝛃 ", 1)); + assert!(!testlook!(look, " 𝛃 ", 5)); + + // Unicode word boundaries between non-ASCII codepoints. Again, since + // this is an ASCII word boundary, none of these match. + assert!(!testlook!(look, "𝛃𐆀", 0)); + assert!(!testlook!(look, "𝛃𐆀", 4)); + + // Non word boundaries for ASCII. + assert!(!testlook!(look, "", 0)); + assert!(!testlook!(look, "ab", 1)); + assert!(!testlook!(look, "a ", 2)); + assert!(!testlook!(look, " a ", 0)); + assert!(!testlook!(look, " a ", 3)); + + // Non word boundaries with a non-ASCII codepoint. + assert!(testlook!(look, "𝛃b", 4)); + assert!(!testlook!(look, "b𝛃", 1)); + assert!(!testlook!(look, "𝛃 ", 5)); + assert!(!testlook!(look, " 𝛃 ", 0)); + assert!(!testlook!(look, " 𝛃 ", 6)); + assert!(!testlook!(look, "𝛃", 1)); + assert!(!testlook!(look, "𝛃", 2)); + assert!(!testlook!(look, "𝛃", 3)); + + // Non word boundaries with non-ASCII codepoints. + assert!(!testlook!(look, "𝛃𐆀", 1)); + assert!(!testlook!(look, "𝛃𐆀", 2)); + assert!(!testlook!(look, "𝛃𐆀", 3)); + assert!(!testlook!(look, "𝛃𐆀", 5)); + assert!(!testlook!(look, "𝛃𐆀", 6)); + assert!(!testlook!(look, "𝛃𐆀", 7)); + assert!(!testlook!(look, "𝛃𐆀", 8)); + } + + #[test] + fn look_matches_word_end_ascii() { + let look = Look::WordEndAscii; + + // \xF0\x9D\x9B\x83 = 𝛃 (in \w) + // \xF0\x90\x86\x80 = 𐆀 (not in \w) + + // Simple ASCII word boundaries. + assert!(!testlook!(look, "a", 0)); + assert!(testlook!(look, "a", 1)); + assert!(testlook!(look, "a ", 1)); + assert!(!testlook!(look, " a ", 1)); + assert!(testlook!(look, " a ", 2)); + + // Unicode word boundaries with a non-ASCII codepoint. Since this is + // an ASCII word boundary, none of these match. + assert!(!testlook!(look, "𝛃", 0)); + assert!(!testlook!(look, "𝛃", 4)); + assert!(!testlook!(look, "𝛃 ", 4)); + assert!(!testlook!(look, " 𝛃 ", 1)); + assert!(!testlook!(look, " 𝛃 ", 5)); + + // Unicode word boundaries between non-ASCII codepoints. Again, since + // this is an ASCII word boundary, none of these match. + assert!(!testlook!(look, "𝛃𐆀", 0)); + assert!(!testlook!(look, "𝛃𐆀", 4)); + + // Non word boundaries for ASCII. + assert!(!testlook!(look, "", 0)); + assert!(!testlook!(look, "ab", 1)); + assert!(!testlook!(look, "a ", 2)); + assert!(!testlook!(look, " a ", 0)); + assert!(!testlook!(look, " a ", 3)); + + // Non word boundaries with a non-ASCII codepoint. + assert!(!testlook!(look, "𝛃b", 4)); + assert!(testlook!(look, "b𝛃", 1)); + assert!(!testlook!(look, "𝛃 ", 5)); + assert!(!testlook!(look, " 𝛃 ", 0)); + assert!(!testlook!(look, " 𝛃 ", 6)); + assert!(!testlook!(look, "𝛃", 1)); + assert!(!testlook!(look, "𝛃", 2)); + assert!(!testlook!(look, "𝛃", 3)); + + // Non word boundaries with non-ASCII codepoints. + assert!(!testlook!(look, "𝛃𐆀", 1)); + assert!(!testlook!(look, "𝛃𐆀", 2)); + assert!(!testlook!(look, "𝛃𐆀", 3)); + assert!(!testlook!(look, "𝛃𐆀", 5)); + assert!(!testlook!(look, "𝛃𐆀", 6)); + assert!(!testlook!(look, "𝛃𐆀", 7)); + assert!(!testlook!(look, "𝛃𐆀", 8)); + } + + #[test] + #[cfg(all(not(miri), feature = "unicode-word-boundary"))] + fn look_matches_word_start_unicode() { + let look = Look::WordStartUnicode; + + // \xF0\x9D\x9B\x83 = 𝛃 (in \w) + // \xF0\x90\x86\x80 = 𐆀 (not in \w) + + // Simple ASCII word boundaries. + assert!(testlook!(look, "a", 0)); + assert!(!testlook!(look, "a", 1)); + assert!(!testlook!(look, "a ", 1)); + assert!(testlook!(look, " a ", 1)); + assert!(!testlook!(look, " a ", 2)); + + // Unicode word boundaries with a non-ASCII codepoint. + assert!(testlook!(look, "𝛃", 0)); + assert!(!testlook!(look, "𝛃", 4)); + assert!(!testlook!(look, "𝛃 ", 4)); + assert!(testlook!(look, " 𝛃 ", 1)); + assert!(!testlook!(look, " 𝛃 ", 5)); + + // Unicode word boundaries between non-ASCII codepoints. + assert!(testlook!(look, "𝛃𐆀", 0)); + assert!(!testlook!(look, "𝛃𐆀", 4)); + + // Non word boundaries for ASCII. + assert!(!testlook!(look, "", 0)); + assert!(!testlook!(look, "ab", 1)); + assert!(!testlook!(look, "a ", 2)); + assert!(!testlook!(look, " a ", 0)); + assert!(!testlook!(look, " a ", 3)); + + // Non word boundaries with a non-ASCII codepoint. + assert!(!testlook!(look, "𝛃b", 4)); + assert!(!testlook!(look, "b𝛃", 1)); + assert!(!testlook!(look, "𝛃 ", 5)); + assert!(!testlook!(look, " 𝛃 ", 0)); + assert!(!testlook!(look, " 𝛃 ", 6)); + assert!(!testlook!(look, "𝛃", 1)); + assert!(!testlook!(look, "𝛃", 2)); + assert!(!testlook!(look, "𝛃", 3)); + + // Non word boundaries with non-ASCII codepoints. + assert!(!testlook!(look, "𝛃𐆀", 1)); + assert!(!testlook!(look, "𝛃𐆀", 2)); + assert!(!testlook!(look, "𝛃𐆀", 3)); + assert!(!testlook!(look, "𝛃𐆀", 5)); + assert!(!testlook!(look, "𝛃𐆀", 6)); + assert!(!testlook!(look, "𝛃𐆀", 7)); + assert!(!testlook!(look, "𝛃𐆀", 8)); + } + + #[test] + #[cfg(all(not(miri), feature = "unicode-word-boundary"))] + fn look_matches_word_end_unicode() { + let look = Look::WordEndUnicode; + + // \xF0\x9D\x9B\x83 = 𝛃 (in \w) + // \xF0\x90\x86\x80 = 𐆀 (not in \w) + + // Simple ASCII word boundaries. + assert!(!testlook!(look, "a", 0)); + assert!(testlook!(look, "a", 1)); + assert!(testlook!(look, "a ", 1)); + assert!(!testlook!(look, " a ", 1)); + assert!(testlook!(look, " a ", 2)); + + // Unicode word boundaries with a non-ASCII codepoint. + assert!(!testlook!(look, "𝛃", 0)); + assert!(testlook!(look, "𝛃", 4)); + assert!(testlook!(look, "𝛃 ", 4)); + assert!(!testlook!(look, " 𝛃 ", 1)); + assert!(testlook!(look, " 𝛃 ", 5)); + + // Unicode word boundaries between non-ASCII codepoints. + assert!(!testlook!(look, "𝛃𐆀", 0)); + assert!(testlook!(look, "𝛃𐆀", 4)); + + // Non word boundaries for ASCII. + assert!(!testlook!(look, "", 0)); + assert!(!testlook!(look, "ab", 1)); + assert!(!testlook!(look, "a ", 2)); + assert!(!testlook!(look, " a ", 0)); + assert!(!testlook!(look, " a ", 3)); + + // Non word boundaries with a non-ASCII codepoint. + assert!(!testlook!(look, "𝛃b", 4)); + assert!(!testlook!(look, "b𝛃", 1)); + assert!(!testlook!(look, "𝛃 ", 5)); + assert!(!testlook!(look, " 𝛃 ", 0)); + assert!(!testlook!(look, " 𝛃 ", 6)); + assert!(!testlook!(look, "𝛃", 1)); + assert!(!testlook!(look, "𝛃", 2)); + assert!(!testlook!(look, "𝛃", 3)); + + // Non word boundaries with non-ASCII codepoints. + assert!(!testlook!(look, "𝛃𐆀", 1)); + assert!(!testlook!(look, "𝛃𐆀", 2)); + assert!(!testlook!(look, "𝛃𐆀", 3)); + assert!(!testlook!(look, "𝛃𐆀", 5)); + assert!(!testlook!(look, "𝛃𐆀", 6)); + assert!(!testlook!(look, "𝛃𐆀", 7)); + assert!(!testlook!(look, "𝛃𐆀", 8)); + } + + #[test] + fn look_matches_word_start_half_ascii() { + let look = Look::WordStartHalfAscii; + + // \xF0\x9D\x9B\x83 = 𝛃 (in \w) + // \xF0\x90\x86\x80 = 𐆀 (not in \w) + + // Simple ASCII word boundaries. + assert!(testlook!(look, "a", 0)); + assert!(!testlook!(look, "a", 1)); + assert!(!testlook!(look, "a ", 1)); + assert!(testlook!(look, " a ", 1)); + assert!(!testlook!(look, " a ", 2)); + + // Unicode word boundaries with a non-ASCII codepoint. Since this is + // an ASCII word boundary, none of these match. + assert!(testlook!(look, "𝛃", 0)); + assert!(testlook!(look, "𝛃", 4)); + assert!(testlook!(look, "𝛃 ", 4)); + assert!(testlook!(look, " 𝛃 ", 1)); + assert!(testlook!(look, " 𝛃 ", 5)); + + // Unicode word boundaries between non-ASCII codepoints. Again, since + // this is an ASCII word boundary, none of these match. + assert!(testlook!(look, "𝛃𐆀", 0)); + assert!(testlook!(look, "𝛃𐆀", 4)); + + // Non word boundaries for ASCII. + assert!(testlook!(look, "", 0)); + assert!(!testlook!(look, "ab", 1)); + assert!(testlook!(look, "a ", 2)); + assert!(testlook!(look, " a ", 0)); + assert!(testlook!(look, " a ", 3)); + + // Non word boundaries with a non-ASCII codepoint. + assert!(testlook!(look, "𝛃b", 4)); + assert!(!testlook!(look, "b𝛃", 1)); + assert!(testlook!(look, "𝛃 ", 5)); + assert!(testlook!(look, " 𝛃 ", 0)); + assert!(testlook!(look, " 𝛃 ", 6)); + assert!(testlook!(look, "𝛃", 1)); + assert!(testlook!(look, "𝛃", 2)); + assert!(testlook!(look, "𝛃", 3)); + + // Non word boundaries with non-ASCII codepoints. + assert!(testlook!(look, "𝛃𐆀", 1)); + assert!(testlook!(look, "𝛃𐆀", 2)); + assert!(testlook!(look, "𝛃𐆀", 3)); + assert!(testlook!(look, "𝛃𐆀", 5)); + assert!(testlook!(look, "𝛃𐆀", 6)); + assert!(testlook!(look, "𝛃𐆀", 7)); + assert!(testlook!(look, "𝛃𐆀", 8)); + } + + #[test] + fn look_matches_word_end_half_ascii() { + let look = Look::WordEndHalfAscii; + + // \xF0\x9D\x9B\x83 = 𝛃 (in \w) + // \xF0\x90\x86\x80 = 𐆀 (not in \w) + + // Simple ASCII word boundaries. + assert!(!testlook!(look, "a", 0)); + assert!(testlook!(look, "a", 1)); + assert!(testlook!(look, "a ", 1)); + assert!(!testlook!(look, " a ", 1)); + assert!(testlook!(look, " a ", 2)); + + // Unicode word boundaries with a non-ASCII codepoint. Since this is + // an ASCII word boundary, none of these match. + assert!(testlook!(look, "𝛃", 0)); + assert!(testlook!(look, "𝛃", 4)); + assert!(testlook!(look, "𝛃 ", 4)); + assert!(testlook!(look, " 𝛃 ", 1)); + assert!(testlook!(look, " 𝛃 ", 5)); + + // Unicode word boundaries between non-ASCII codepoints. Again, since + // this is an ASCII word boundary, none of these match. + assert!(testlook!(look, "𝛃𐆀", 0)); + assert!(testlook!(look, "𝛃𐆀", 4)); + + // Non word boundaries for ASCII. + assert!(testlook!(look, "", 0)); + assert!(!testlook!(look, "ab", 1)); + assert!(testlook!(look, "a ", 2)); + assert!(testlook!(look, " a ", 0)); + assert!(testlook!(look, " a ", 3)); + + // Non word boundaries with a non-ASCII codepoint. + assert!(!testlook!(look, "𝛃b", 4)); + assert!(testlook!(look, "b𝛃", 1)); + assert!(testlook!(look, "𝛃 ", 5)); + assert!(testlook!(look, " 𝛃 ", 0)); + assert!(testlook!(look, " 𝛃 ", 6)); + assert!(testlook!(look, "𝛃", 1)); + assert!(testlook!(look, "𝛃", 2)); + assert!(testlook!(look, "𝛃", 3)); + + // Non word boundaries with non-ASCII codepoints. + assert!(testlook!(look, "𝛃𐆀", 1)); + assert!(testlook!(look, "𝛃𐆀", 2)); + assert!(testlook!(look, "𝛃𐆀", 3)); + assert!(testlook!(look, "𝛃𐆀", 5)); + assert!(testlook!(look, "𝛃𐆀", 6)); + assert!(testlook!(look, "𝛃𐆀", 7)); + assert!(testlook!(look, "𝛃𐆀", 8)); + } + + #[test] + #[cfg(all(not(miri), feature = "unicode-word-boundary"))] + fn look_matches_word_start_half_unicode() { + let look = Look::WordStartHalfUnicode; + + // \xF0\x9D\x9B\x83 = 𝛃 (in \w) + // \xF0\x90\x86\x80 = 𐆀 (not in \w) + + // Simple ASCII word boundaries. + assert!(testlook!(look, "a", 0)); + assert!(!testlook!(look, "a", 1)); + assert!(!testlook!(look, "a ", 1)); + assert!(testlook!(look, " a ", 1)); + assert!(!testlook!(look, " a ", 2)); + + // Unicode word boundaries with a non-ASCII codepoint. + assert!(testlook!(look, "𝛃", 0)); + assert!(!testlook!(look, "𝛃", 4)); + assert!(!testlook!(look, "𝛃 ", 4)); + assert!(testlook!(look, " 𝛃 ", 1)); + assert!(!testlook!(look, " 𝛃 ", 5)); + + // Unicode word boundaries between non-ASCII codepoints. + assert!(testlook!(look, "𝛃𐆀", 0)); + assert!(!testlook!(look, "𝛃𐆀", 4)); + + // Non word boundaries for ASCII. + assert!(testlook!(look, "", 0)); + assert!(!testlook!(look, "ab", 1)); + assert!(testlook!(look, "a ", 2)); + assert!(testlook!(look, " a ", 0)); + assert!(testlook!(look, " a ", 3)); + + // Non word boundaries with a non-ASCII codepoint. + assert!(!testlook!(look, "𝛃b", 4)); + assert!(!testlook!(look, "b𝛃", 1)); + assert!(testlook!(look, "𝛃 ", 5)); + assert!(testlook!(look, " 𝛃 ", 0)); + assert!(testlook!(look, " 𝛃 ", 6)); + assert!(!testlook!(look, "𝛃", 1)); + assert!(!testlook!(look, "𝛃", 2)); + assert!(!testlook!(look, "𝛃", 3)); + + // Non word boundaries with non-ASCII codepoints. + assert!(!testlook!(look, "𝛃𐆀", 1)); + assert!(!testlook!(look, "𝛃𐆀", 2)); + assert!(!testlook!(look, "𝛃𐆀", 3)); + assert!(!testlook!(look, "𝛃𐆀", 5)); + assert!(!testlook!(look, "𝛃𐆀", 6)); + assert!(!testlook!(look, "𝛃𐆀", 7)); + assert!(testlook!(look, "𝛃𐆀", 8)); + } + + #[test] + #[cfg(all(not(miri), feature = "unicode-word-boundary"))] + fn look_matches_word_end_half_unicode() { + let look = Look::WordEndHalfUnicode; + + // \xF0\x9D\x9B\x83 = 𝛃 (in \w) + // \xF0\x90\x86\x80 = 𐆀 (not in \w) + + // Simple ASCII word boundaries. + assert!(!testlook!(look, "a", 0)); + assert!(testlook!(look, "a", 1)); + assert!(testlook!(look, "a ", 1)); + assert!(!testlook!(look, " a ", 1)); + assert!(testlook!(look, " a ", 2)); + + // Unicode word boundaries with a non-ASCII codepoint. + assert!(!testlook!(look, "𝛃", 0)); + assert!(testlook!(look, "𝛃", 4)); + assert!(testlook!(look, "𝛃 ", 4)); + assert!(!testlook!(look, " 𝛃 ", 1)); + assert!(testlook!(look, " 𝛃 ", 5)); + + // Unicode word boundaries between non-ASCII codepoints. + assert!(!testlook!(look, "𝛃𐆀", 0)); + assert!(testlook!(look, "𝛃𐆀", 4)); + + // Non word boundaries for ASCII. + assert!(testlook!(look, "", 0)); + assert!(!testlook!(look, "ab", 1)); + assert!(testlook!(look, "a ", 2)); + assert!(testlook!(look, " a ", 0)); + assert!(testlook!(look, " a ", 3)); + + // Non word boundaries with a non-ASCII codepoint. + assert!(!testlook!(look, "𝛃b", 4)); + assert!(!testlook!(look, "b𝛃", 1)); + assert!(testlook!(look, "𝛃 ", 5)); + assert!(testlook!(look, " 𝛃 ", 0)); + assert!(testlook!(look, " 𝛃 ", 6)); + assert!(!testlook!(look, "𝛃", 1)); + assert!(!testlook!(look, "𝛃", 2)); + assert!(!testlook!(look, "𝛃", 3)); + + // Non word boundaries with non-ASCII codepoints. + assert!(!testlook!(look, "𝛃𐆀", 1)); + assert!(!testlook!(look, "𝛃𐆀", 2)); + assert!(!testlook!(look, "𝛃𐆀", 3)); + assert!(!testlook!(look, "𝛃𐆀", 5)); + assert!(!testlook!(look, "𝛃𐆀", 6)); + assert!(!testlook!(look, "𝛃𐆀", 7)); + assert!(testlook!(look, "𝛃𐆀", 8)); + } + + #[test] + fn look_set() { + let mut f = LookSet::default(); + assert!(!f.contains(Look::Start)); + assert!(!f.contains(Look::End)); + assert!(!f.contains(Look::StartLF)); + assert!(!f.contains(Look::EndLF)); + assert!(!f.contains(Look::WordUnicode)); + assert!(!f.contains(Look::WordUnicodeNegate)); + assert!(!f.contains(Look::WordAscii)); + assert!(!f.contains(Look::WordAsciiNegate)); + + f = f.insert(Look::Start); + assert!(f.contains(Look::Start)); + f = f.remove(Look::Start); + assert!(!f.contains(Look::Start)); + + f = f.insert(Look::End); + assert!(f.contains(Look::End)); + f = f.remove(Look::End); + assert!(!f.contains(Look::End)); + + f = f.insert(Look::StartLF); + assert!(f.contains(Look::StartLF)); + f = f.remove(Look::StartLF); + assert!(!f.contains(Look::StartLF)); + + f = f.insert(Look::EndLF); + assert!(f.contains(Look::EndLF)); + f = f.remove(Look::EndLF); + assert!(!f.contains(Look::EndLF)); + + f = f.insert(Look::StartCRLF); + assert!(f.contains(Look::StartCRLF)); + f = f.remove(Look::StartCRLF); + assert!(!f.contains(Look::StartCRLF)); + + f = f.insert(Look::EndCRLF); + assert!(f.contains(Look::EndCRLF)); + f = f.remove(Look::EndCRLF); + assert!(!f.contains(Look::EndCRLF)); + + f = f.insert(Look::WordUnicode); + assert!(f.contains(Look::WordUnicode)); + f = f.remove(Look::WordUnicode); + assert!(!f.contains(Look::WordUnicode)); + + f = f.insert(Look::WordUnicodeNegate); + assert!(f.contains(Look::WordUnicodeNegate)); + f = f.remove(Look::WordUnicodeNegate); + assert!(!f.contains(Look::WordUnicodeNegate)); + + f = f.insert(Look::WordAscii); + assert!(f.contains(Look::WordAscii)); + f = f.remove(Look::WordAscii); + assert!(!f.contains(Look::WordAscii)); + + f = f.insert(Look::WordAsciiNegate); + assert!(f.contains(Look::WordAsciiNegate)); + f = f.remove(Look::WordAsciiNegate); + assert!(!f.contains(Look::WordAsciiNegate)); + + f = f.insert(Look::WordStartAscii); + assert!(f.contains(Look::WordStartAscii)); + f = f.remove(Look::WordStartAscii); + assert!(!f.contains(Look::WordStartAscii)); + + f = f.insert(Look::WordEndAscii); + assert!(f.contains(Look::WordEndAscii)); + f = f.remove(Look::WordEndAscii); + assert!(!f.contains(Look::WordEndAscii)); + + f = f.insert(Look::WordStartUnicode); + assert!(f.contains(Look::WordStartUnicode)); + f = f.remove(Look::WordStartUnicode); + assert!(!f.contains(Look::WordStartUnicode)); + + f = f.insert(Look::WordEndUnicode); + assert!(f.contains(Look::WordEndUnicode)); + f = f.remove(Look::WordEndUnicode); + assert!(!f.contains(Look::WordEndUnicode)); + + f = f.insert(Look::WordStartHalfAscii); + assert!(f.contains(Look::WordStartHalfAscii)); + f = f.remove(Look::WordStartHalfAscii); + assert!(!f.contains(Look::WordStartHalfAscii)); + + f = f.insert(Look::WordEndHalfAscii); + assert!(f.contains(Look::WordEndHalfAscii)); + f = f.remove(Look::WordEndHalfAscii); + assert!(!f.contains(Look::WordEndHalfAscii)); + + f = f.insert(Look::WordStartHalfUnicode); + assert!(f.contains(Look::WordStartHalfUnicode)); + f = f.remove(Look::WordStartHalfUnicode); + assert!(!f.contains(Look::WordStartHalfUnicode)); + + f = f.insert(Look::WordEndHalfUnicode); + assert!(f.contains(Look::WordEndHalfUnicode)); + f = f.remove(Look::WordEndHalfUnicode); + assert!(!f.contains(Look::WordEndHalfUnicode)); + } + + #[test] + fn look_set_iter() { + let set = LookSet::empty(); + assert_eq!(0, set.iter().count()); + + let set = LookSet::full(); + assert_eq!(18, set.iter().count()); + + let set = + LookSet::empty().insert(Look::StartLF).insert(Look::WordUnicode); + assert_eq!(2, set.iter().count()); + + let set = LookSet::empty().insert(Look::StartLF); + assert_eq!(1, set.iter().count()); + + let set = LookSet::empty().insert(Look::WordAsciiNegate); + assert_eq!(1, set.iter().count()); + + let set = LookSet::empty().insert(Look::WordEndHalfUnicode); + assert_eq!(1, set.iter().count()); + } + + #[test] + #[cfg(feature = "alloc")] + fn look_set_debug() { + let res = alloc::format!("{:?}", LookSet::empty()); + assert_eq!("∅", res); + let res = alloc::format!("{:?}", LookSet::full()); + assert_eq!("Az^$rRbB𝛃𝚩<>〈〉◁▷◀▶", res); + } +} diff --git a/vendor/regex-automata/src/util/memchr.rs b/vendor/regex-automata/src/util/memchr.rs new file mode 100644 index 00000000000000..a2cbb07321a721 --- /dev/null +++ b/vendor/regex-automata/src/util/memchr.rs @@ -0,0 +1,93 @@ +/*! +This module defines simple wrapper routines for the memchr functions from the +`memchr` crate. Basically, when the `memchr` crate is available, we use it, +otherwise we use a naive implementation which is still pretty fast. +*/ + +pub(crate) use self::inner::*; + +#[cfg(feature = "perf-literal-substring")] +pub(super) mod inner { + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn memchr(n1: u8, haystack: &[u8]) -> Option { + memchr::memchr(n1, haystack) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn memchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option { + memchr::memchr2(n1, n2, haystack) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn memchr3( + n1: u8, + n2: u8, + n3: u8, + haystack: &[u8], + ) -> Option { + memchr::memchr3(n1, n2, n3, haystack) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn memrchr(n1: u8, haystack: &[u8]) -> Option { + memchr::memrchr(n1, haystack) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn memrchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option { + memchr::memrchr2(n1, n2, haystack) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn memrchr3( + n1: u8, + n2: u8, + n3: u8, + haystack: &[u8], + ) -> Option { + memchr::memrchr3(n1, n2, n3, haystack) + } +} + +#[cfg(not(feature = "perf-literal-substring"))] +pub(super) mod inner { + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn memchr(n1: u8, haystack: &[u8]) -> Option { + haystack.iter().position(|&b| b == n1) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn memchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option { + haystack.iter().position(|&b| b == n1 || b == n2) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn memchr3( + n1: u8, + n2: u8, + n3: u8, + haystack: &[u8], + ) -> Option { + haystack.iter().position(|&b| b == n1 || b == n2 || b == n3) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn memrchr(n1: u8, haystack: &[u8]) -> Option { + haystack.iter().rposition(|&b| b == n1) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn memrchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option { + haystack.iter().rposition(|&b| b == n1 || b == n2) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn memrchr3( + n1: u8, + n2: u8, + n3: u8, + haystack: &[u8], + ) -> Option { + haystack.iter().rposition(|&b| b == n1 || b == n2 || b == n3) + } +} diff --git a/vendor/regex-automata/src/util/mod.rs b/vendor/regex-automata/src/util/mod.rs new file mode 100644 index 00000000000000..b3eef64e64b476 --- /dev/null +++ b/vendor/regex-automata/src/util/mod.rs @@ -0,0 +1,57 @@ +/*! +A collection of modules that provide APIs that are useful across many regex +engines. + +While one should explore the sub-modules directly to get a sense of what's +there, here are some highlights that tie the sub-modules to higher level +use cases: + +* `alphabet` contains APIs that are useful if you're doing low level things +with the DFAs in this crate. For example, implementing determinization or +walking its state graph directly. +* `captures` contains APIs for dealing with capture group matches and their +mapping to "slots" used inside an NFA graph. This is also where you can find +iterators over capture group names. +* `escape` contains types for pretty-printing raw byte slices as strings. +* `iter` contains API helpers for writing regex iterators. +* `lazy` contains a no-std and no-alloc variant of `lazy_static!` and +`once_cell`. +* `look` contains APIs for matching and configuring look-around assertions. +* `pool` provides a way to reuse mutable memory allocated in a thread safe +manner. +* `prefilter` provides APIs for building prefilters and using them in searches. +* `primitives` are what you might use if you're doing lower level work on +automata, such as walking an NFA state graph. +* `syntax` provides some higher level convenience functions for interacting +with the `regex-syntax` crate. +* `wire` is useful if you're working with DFA serialization. +*/ + +pub mod alphabet; +#[cfg(feature = "alloc")] +pub mod captures; +pub mod escape; +#[cfg(feature = "alloc")] +pub mod interpolate; +pub mod iter; +pub mod lazy; +pub mod look; +#[cfg(feature = "alloc")] +pub mod pool; +pub mod prefilter; +pub mod primitives; +pub mod start; +#[cfg(feature = "syntax")] +pub mod syntax; +pub mod wire; + +#[cfg(any(feature = "dfa-build", feature = "hybrid"))] +pub(crate) mod determinize; +pub(crate) mod empty; +pub(crate) mod int; +pub(crate) mod memchr; +pub(crate) mod search; +#[cfg(feature = "alloc")] +pub(crate) mod sparse_set; +pub(crate) mod unicode_data; +pub(crate) mod utf8; diff --git a/vendor/regex-automata/src/util/pool.rs b/vendor/regex-automata/src/util/pool.rs new file mode 100644 index 00000000000000..567ebfb2ea9582 --- /dev/null +++ b/vendor/regex-automata/src/util/pool.rs @@ -0,0 +1,1199 @@ +// This module provides a relatively simple thread-safe pool of reusable +// objects. For the most part, it's implemented by a stack represented by a +// Mutex>. It has one small trick: because unlocking a mutex is somewhat +// costly, in the case where a pool is accessed by the first thread that tried +// to get a value, we bypass the mutex. Here are some benchmarks showing the +// difference. +// +// 2022-10-15: These benchmarks are from the old regex crate and they aren't +// easy to reproduce because some rely on older implementations of Pool that +// are no longer around. I've left the results here for posterity, but any +// enterprising individual should feel encouraged to re-litigate the way Pool +// works. I am not at all certain it is the best approach. +// +// 1) misc::anchored_literal_long_non_match 21 (18571 MB/s) +// 2) misc::anchored_literal_long_non_match 107 (3644 MB/s) +// 3) misc::anchored_literal_long_non_match 45 (8666 MB/s) +// 4) misc::anchored_literal_long_non_match 19 (20526 MB/s) +// +// (1) represents our baseline: the master branch at the time of writing when +// using the 'thread_local' crate to implement the pool below. +// +// (2) represents a naive pool implemented completely via Mutex>. There +// is no special trick for bypassing the mutex. +// +// (3) is the same as (2), except it uses Mutex>>. It is twice as +// fast because a Box is much smaller than the T we use with a Pool in this +// crate. So pushing and popping a Box from a Vec is quite a bit faster +// than for T. +// +// (4) is the same as (3), but with the trick for bypassing the mutex in the +// case of the first-to-get thread. +// +// Why move off of thread_local? Even though (4) is a hair faster than (1) +// above, this was not the main goal. The main goal was to move off of +// thread_local and find a way to *simply* re-capture some of its speed for +// regex's specific case. So again, why move off of it? The *primary* reason is +// because of memory leaks. See https://github.com/rust-lang/regex/issues/362 +// for example. (Why do I want it to be simple? Well, I suppose what I mean is, +// "use as much safe code as possible to minimize risk and be as sure as I can +// be that it is correct.") +// +// My guess is that the thread_local design is probably not appropriate for +// regex since its memory usage scales to the number of active threads that +// have used a regex, where as the pool below scales to the number of threads +// that simultaneously use a regex. While neither case permits contraction, +// since we own the pool data structure below, we can add contraction if a +// clear use case pops up in the wild. More pressingly though, it seems that +// there are at least some use case patterns where one might have many threads +// sitting around that might have used a regex at one point. While thread_local +// does try to reuse space previously used by a thread that has since stopped, +// its maximal memory usage still scales with the total number of active +// threads. In contrast, the pool below scales with the total number of threads +// *simultaneously* using the pool. The hope is that this uses less memory +// overall. And if it doesn't, we can hopefully tune it somehow. +// +// It seems that these sort of conditions happen frequently +// in FFI inside of other more "managed" languages. This was +// mentioned in the issue linked above, and also mentioned here: +// https://github.com/BurntSushi/rure-go/issues/3. And in particular, users +// confirm that disabling the use of thread_local resolves the leak. +// +// There were other weaker reasons for moving off of thread_local as well. +// Namely, at the time, I was looking to reduce dependencies. And for something +// like regex, maintenance can be simpler when we own the full dependency tree. +// +// Note that I am not entirely happy with this pool. It has some subtle +// implementation details and is overall still observable (even with the +// thread owner optimization) in benchmarks. If someone wants to take a crack +// at building something better, please file an issue. Even if it means a +// different API. The API exposed by this pool is not the minimal thing that +// something like a 'Regex' actually needs. It could adapt to, for example, +// an API more like what is found in the 'thread_local' crate. However, we do +// really need to support the no-std alloc-only context, or else the regex +// crate wouldn't be able to support no-std alloc-only. However, I'm generally +// okay with making the alloc-only context slower (as it is here), although I +// do find it unfortunate. + +/*! +A thread safe memory pool. + +The principal type in this module is a [`Pool`]. It main use case is for +holding a thread safe collection of mutable scratch spaces (usually called +`Cache` in this crate) that regex engines need to execute a search. This then +permits sharing the same read-only regex object across multiple threads while +having a quick way of reusing scratch space in a thread safe way. This avoids +needing to re-create the scratch space for every search, which could wind up +being quite expensive. +*/ + +/// A thread safe pool that works in an `alloc`-only context. +/// +/// Getting a value out comes with a guard. When that guard is dropped, the +/// value is automatically put back in the pool. The guard provides both a +/// `Deref` and a `DerefMut` implementation for easy access to an underlying +/// `T`. +/// +/// A `Pool` impls `Sync` when `T` is `Send` (even if `T` is not `Sync`). This +/// is possible because a pool is guaranteed to provide a value to exactly one +/// thread at any time. +/// +/// Currently, a pool never contracts in size. Its size is proportional to the +/// maximum number of simultaneous uses. This may change in the future. +/// +/// A `Pool` is a particularly useful data structure for this crate because +/// many of the regex engines require a mutable "cache" in order to execute +/// a search. Since regexes themselves tend to be global, the problem is then: +/// how do you get a mutable cache to execute a search? You could: +/// +/// 1. Use a `thread_local!`, which requires the standard library and requires +/// that the regex pattern be statically known. +/// 2. Use a `Pool`. +/// 3. Make the cache an explicit dependency in your code and pass it around. +/// 4. Put the cache state in a `Mutex`, but this means only one search can +/// execute at a time. +/// 5. Create a new cache for every search. +/// +/// A `thread_local!` is perhaps the best choice if it works for your use case. +/// Putting the cache in a mutex or creating a new cache for every search are +/// perhaps the worst choices. Of the remaining two choices, whether you use +/// this `Pool` or thread through a cache explicitly in your code is a matter +/// of taste and depends on your code architecture. +/// +/// # Warning: may use a spin lock +/// +/// When this crate is compiled _without_ the `std` feature, then this type +/// may used a spin lock internally. This can have subtle effects that may +/// be undesirable. See [Spinlocks Considered Harmful][spinharm] for a more +/// thorough treatment of this topic. +/// +/// [spinharm]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html +/// +/// # Example +/// +/// This example shows how to share a single hybrid regex among multiple +/// threads, while also safely getting exclusive access to a hybrid's +/// [`Cache`](crate::hybrid::regex::Cache) without preventing other searches +/// from running while your thread uses the `Cache`. +/// +/// ``` +/// use regex_automata::{ +/// hybrid::regex::{Cache, Regex}, +/// util::{lazy::Lazy, pool::Pool}, +/// Match, +/// }; +/// +/// static RE: Lazy = +/// Lazy::new(|| Regex::new("foo[0-9]+bar").unwrap()); +/// static CACHE: Lazy> = +/// Lazy::new(|| Pool::new(|| RE.create_cache())); +/// +/// let expected = Some(Match::must(0, 3..14)); +/// assert_eq!(expected, RE.find(&mut CACHE.get(), b"zzzfoo12345barzzz")); +/// ``` +pub struct Pool T>(alloc::boxed::Box>); + +impl Pool { + /// Create a new pool. The given closure is used to create values in + /// the pool when necessary. + pub fn new(create: F) -> Pool { + Pool(alloc::boxed::Box::new(inner::Pool::new(create))) + } +} + +impl T> Pool { + /// Get a value from the pool. The caller is guaranteed to have + /// exclusive access to the given value. Namely, it is guaranteed that + /// this will never return a value that was returned by another call to + /// `get` but was not put back into the pool. + /// + /// When the guard goes out of scope and its destructor is called, then + /// it will automatically be put back into the pool. Alternatively, + /// [`PoolGuard::put`] may be used to explicitly put it back in the pool + /// without relying on its destructor. + /// + /// Note that there is no guarantee provided about which value in the + /// pool is returned. That is, calling get, dropping the guard (causing + /// the value to go back into the pool) and then calling get again is + /// *not* guaranteed to return the same value received in the first `get` + /// call. + #[inline] + pub fn get(&self) -> PoolGuard<'_, T, F> { + PoolGuard(self.0.get()) + } +} + +impl core::fmt::Debug for Pool { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_tuple("Pool").field(&self.0).finish() + } +} + +/// A guard that is returned when a caller requests a value from the pool. +/// +/// The purpose of the guard is to use RAII to automatically put the value +/// back in the pool once it's dropped. +pub struct PoolGuard<'a, T: Send, F: Fn() -> T>(inner::PoolGuard<'a, T, F>); + +impl<'a, T: Send, F: Fn() -> T> PoolGuard<'a, T, F> { + /// Consumes this guard and puts it back into the pool. + /// + /// This circumvents the guard's `Drop` implementation. This can be useful + /// in circumstances where the automatic `Drop` results in poorer codegen, + /// such as calling non-inlined functions. + #[inline] + pub fn put(this: PoolGuard<'_, T, F>) { + inner::PoolGuard::put(this.0); + } +} + +impl<'a, T: Send, F: Fn() -> T> core::ops::Deref for PoolGuard<'a, T, F> { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + self.0.value() + } +} + +impl<'a, T: Send, F: Fn() -> T> core::ops::DerefMut for PoolGuard<'a, T, F> { + #[inline] + fn deref_mut(&mut self) -> &mut T { + self.0.value_mut() + } +} + +impl<'a, T: Send + core::fmt::Debug, F: Fn() -> T> core::fmt::Debug + for PoolGuard<'a, T, F> +{ + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_tuple("PoolGuard").field(&self.0).finish() + } +} + +#[cfg(feature = "std")] +mod inner { + use core::{ + cell::UnsafeCell, + panic::{RefUnwindSafe, UnwindSafe}, + sync::atomic::{AtomicUsize, Ordering}, + }; + + use alloc::{boxed::Box, vec, vec::Vec}; + + use std::{sync::Mutex, thread_local}; + + /// An atomic counter used to allocate thread IDs. + /// + /// We specifically start our counter at 3 so that we can use the values + /// less than it as sentinels. + static COUNTER: AtomicUsize = AtomicUsize::new(3); + + /// A thread ID indicating that there is no owner. This is the initial + /// state of a pool. Once a pool has an owner, there is no way to change + /// it. + static THREAD_ID_UNOWNED: usize = 0; + + /// A thread ID indicating that the special owner value is in use and not + /// available. This state is useful for avoiding a case where the owner + /// of a pool calls `get` before putting the result of a previous `get` + /// call back into the pool. + static THREAD_ID_INUSE: usize = 1; + + /// This sentinel is used to indicate that a guard has already been dropped + /// and should not be re-dropped. We use this because our drop code can be + /// called outside of Drop and thus there could be a bug in the internal + /// implementation that results in trying to put the same guard back into + /// the same pool multiple times, and *that* could result in UB if we + /// didn't mark the guard as already having been put back in the pool. + /// + /// So this isn't strictly necessary, but this let's us define some + /// routines as safe (like PoolGuard::put_imp) that we couldn't otherwise + /// do. + static THREAD_ID_DROPPED: usize = 2; + + /// The number of stacks we use inside of the pool. These are only used for + /// non-owners. That is, these represent the "slow" path. + /// + /// In the original implementation of this pool, we only used a single + /// stack. While this might be okay for a couple threads, the prevalence of + /// 32, 64 and even 128 core CPUs has made it untenable. The contention + /// such an environment introduces when threads are doing a lot of searches + /// on short haystacks (a not uncommon use case) is palpable and leads to + /// huge slowdowns. + /// + /// This constant reflects a change from using one stack to the number of + /// stacks that this constant is set to. The stack for a particular thread + /// is simply chosen by `thread_id % MAX_POOL_STACKS`. The idea behind + /// this setup is that there should be a good chance that accesses to the + /// pool will be distributed over several stacks instead of all of them + /// converging to one. + /// + /// This is not a particularly smart or dynamic strategy. Fixing this to a + /// specific number has at least two downsides. First is that it will help, + /// say, an 8 core CPU more than it will a 128 core CPU. (But, crucially, + /// it will still help the 128 core case.) Second is that this may wind + /// up being a little wasteful with respect to memory usage. Namely, if a + /// regex is used on one thread and then moved to another thread, then it + /// could result in creating a new copy of the data in the pool even though + /// only one is actually needed. + /// + /// And that memory usage bit is why this is set to 8 and not, say, 64. + /// Keeping it at 8 limits, to an extent, how much unnecessary memory can + /// be allocated. + /// + /// In an ideal world, we'd be able to have something like this: + /// + /// * Grow the number of stacks as the number of concurrent callers + /// increases. I spent a little time trying this, but even just adding an + /// atomic addition/subtraction for each pop/push for tracking concurrent + /// callers led to a big perf hit. Since even more work would seemingly be + /// required than just an addition/subtraction, I abandoned this approach. + /// * The maximum amount of memory used should scale with respect to the + /// number of concurrent callers and *not* the total number of existing + /// threads. This is primarily why the `thread_local` crate isn't used, as + /// as some environments spin up a lot of threads. This led to multiple + /// reports of extremely high memory usage (often described as memory + /// leaks). + /// * Even more ideally, the pool should contract in size. That is, it + /// should grow with bursts and then shrink. But this is a pretty thorny + /// issue to tackle and it might be better to just not. + /// * It would be nice to explore the use of, say, a lock-free stack + /// instead of using a mutex to guard a `Vec` that is ultimately just + /// treated as a stack. The main thing preventing me from exploring this + /// is the ABA problem. The `crossbeam` crate has tools for dealing with + /// this sort of problem (via its epoch based memory reclamation strategy), + /// but I can't justify bringing in all of `crossbeam` as a dependency of + /// `regex` for this. + /// + /// See this issue for more context and discussion: + /// https://github.com/rust-lang/regex/issues/934 + const MAX_POOL_STACKS: usize = 8; + + thread_local!( + /// A thread local used to assign an ID to a thread. + static THREAD_ID: usize = { + let next = COUNTER.fetch_add(1, Ordering::Relaxed); + // SAFETY: We cannot permit the reuse of thread IDs since reusing a + // thread ID might result in more than one thread "owning" a pool, + // and thus, permit accessing a mutable value from multiple threads + // simultaneously without synchronization. The intent of this panic + // is to be a sanity check. It is not expected that the thread ID + // space will actually be exhausted in practice. Even on a 32-bit + // system, it would require spawning 2^32 threads (although they + // wouldn't all need to run simultaneously, so it is in theory + // possible). + // + // This checks that the counter never wraps around, since atomic + // addition wraps around on overflow. + if next == 0 { + panic!("regex: thread ID allocation space exhausted"); + } + next + }; + ); + + /// This puts each stack in the pool below into its own cache line. This is + /// an absolutely critical optimization that tends to have the most impact + /// in high contention workloads. Without forcing each mutex protected + /// into its own cache line, high contention exacerbates the performance + /// problem by causing "false sharing." By putting each mutex in its own + /// cache-line, we avoid the false sharing problem and the affects of + /// contention are greatly reduced. + #[derive(Debug)] + #[repr(C, align(64))] + struct CacheLine(T); + + /// A thread safe pool utilizing std-only features. + /// + /// The main difference between this and the simplistic alloc-only pool is + /// the use of std::sync::Mutex and an "owner thread" optimization that + /// makes accesses by the owner of a pool faster than all other threads. + /// This makes the common case of running a regex within a single thread + /// faster by avoiding mutex unlocking. + pub(super) struct Pool { + /// A function to create more T values when stack is empty and a caller + /// has requested a T. + create: F, + /// Multiple stacks of T values to hand out. These are used when a Pool + /// is accessed by a thread that didn't create it. + /// + /// Conceptually this is `Mutex>>`, but sharded out to make + /// it scale better under high contention work-loads. We index into + /// this sequence via `thread_id % stacks.len()`. + stacks: Vec>>>>, + /// The ID of the thread that owns this pool. The owner is the thread + /// that makes the first call to 'get'. When the owner calls 'get', it + /// gets 'owner_val' directly instead of returning a T from 'stack'. + /// See comments elsewhere for details, but this is intended to be an + /// optimization for the common case that makes getting a T faster. + /// + /// It is initialized to a value of zero (an impossible thread ID) as a + /// sentinel to indicate that it is unowned. + owner: AtomicUsize, + /// A value to return when the caller is in the same thread that + /// first called `Pool::get`. + /// + /// This is set to None when a Pool is first created, and set to Some + /// once the first thread calls Pool::get. + owner_val: UnsafeCell>, + } + + // SAFETY: Since we want to use a Pool from multiple threads simultaneously + // behind an Arc, we need for it to be Sync. In cases where T is sync, + // Pool would be Sync. However, since we use a Pool to store mutable + // scratch space, we wind up using a T that has interior mutability and is + // thus itself not Sync. So what we *really* want is for our Pool to by + // Sync even when T is not Sync (but is at least Send). + // + // The only non-sync aspect of a Pool is its 'owner_val' field, which is + // used to implement faster access to a pool value in the common case of + // a pool being accessed in the same thread in which it was created. The + // 'stack' field is also shared, but a Mutex where T: Send is already + // Sync. So we only need to worry about 'owner_val'. + // + // The key is to guarantee that 'owner_val' can only ever be accessed from + // one thread. In our implementation below, we guarantee this by only + // returning the 'owner_val' when the ID of the current thread matches the + // ID of the thread that first called 'Pool::get'. Since this can only ever + // be one thread, it follows that only one thread can access 'owner_val' at + // any point in time. Thus, it is safe to declare that Pool is Sync when + // T is Send. + // + // If there is a way to achieve our performance goals using safe code, then + // I would very much welcome a patch. As it stands, the implementation + // below tries to balance safety with performance. The case where a Regex + // is used from multiple threads simultaneously will suffer a bit since + // getting a value out of the pool will require unlocking a mutex. + // + // We require `F: Send + Sync` because we call `F` at any point on demand, + // potentially from multiple threads simultaneously. + unsafe impl Sync for Pool {} + + // If T is UnwindSafe, then since we provide exclusive access to any + // particular value in the pool, the pool should therefore also be + // considered UnwindSafe. + // + // We require `F: UnwindSafe + RefUnwindSafe` because we call `F` at any + // point on demand, so it needs to be unwind safe on both dimensions for + // the entire Pool to be unwind safe. + impl UnwindSafe for Pool {} + + // If T is UnwindSafe, then since we provide exclusive access to any + // particular value in the pool, the pool should therefore also be + // considered RefUnwindSafe. + // + // We require `F: UnwindSafe + RefUnwindSafe` because we call `F` at any + // point on demand, so it needs to be unwind safe on both dimensions for + // the entire Pool to be unwind safe. + impl RefUnwindSafe + for Pool + { + } + + impl Pool { + /// Create a new pool. The given closure is used to create values in + /// the pool when necessary. + pub(super) fn new(create: F) -> Pool { + // FIXME: Now that we require 1.65+, Mutex::new is available as + // const... So we can almost mark this function as const. But of + // course, we're creating a Vec of stacks below (we didn't when I + // originally wrote this code). It seems like the best way to work + // around this would be to use a `[Stack; MAX_POOL_STACKS]` instead + // of a `Vec`. I refrained from making this change at time + // of writing (2023/10/08) because I was making a lot of other + // changes at the same time and wanted to do this more carefully. + // Namely, because of the cache line optimization, that `[Stack; + // MAX_POOL_STACKS]` would be quite big. It's unclear how bad (if + // at all) that would be. + // + // Another choice would be to lazily allocate the stacks, but... + // I'm not so sure about that. Seems like a fair bit of complexity? + // + // Maybe there's a simple solution I'm missing. + // + // ... OK, I tried to fix this. First, I did it by putting `stacks` + // in an `UnsafeCell` and using a `Once` to lazily initialize it. + // I benchmarked it and everything looked okay. I then made this + // function `const` and thought I was just about done. But the + // public pool type wraps its inner pool in a `Box` to keep its + // size down. Blech. + // + // So then I thought that I could push the box down into this + // type (and leave the non-std version unboxed) and use the same + // `UnsafeCell` technique to lazily initialize it. This has the + // downside of the `Once` now needing to get hit in the owner fast + // path, but maybe that's OK? However, I then realized that we can + // only lazily initialize `stacks`, `owner` and `owner_val`. The + // `create` function needs to be put somewhere outside of the box. + // So now the pool is a `Box`, `Once` and a function. Now we're + // starting to defeat the point of boxing in the first place. So I + // backed out that change too. + // + // Back to square one. I maybe we just don't make a pool's + // constructor const and live with it. It's probably not a huge + // deal. + let mut stacks = Vec::with_capacity(MAX_POOL_STACKS); + for _ in 0..stacks.capacity() { + stacks.push(CacheLine(Mutex::new(vec![]))); + } + let owner = AtomicUsize::new(THREAD_ID_UNOWNED); + let owner_val = UnsafeCell::new(None); // init'd on first access + Pool { create, stacks, owner, owner_val } + } + } + + impl T> Pool { + /// Get a value from the pool. This may block if another thread is also + /// attempting to retrieve a value from the pool. + #[inline] + pub(super) fn get(&self) -> PoolGuard<'_, T, F> { + // Our fast path checks if the caller is the thread that "owns" + // this pool. Or stated differently, whether it is the first thread + // that tried to extract a value from the pool. If it is, then we + // can return a T to the caller without going through a mutex. + // + // SAFETY: We must guarantee that only one thread gets access + // to this value. Since a thread is uniquely identified by the + // THREAD_ID thread local, it follows that if the caller's thread + // ID is equal to the owner, then only one thread may receive this + // value. This is also why we can get away with what looks like a + // racy load and a store. We know that if 'owner == caller', then + // only one thread can be here, so we don't need to worry about any + // other thread setting the owner to something else. + let caller = THREAD_ID.with(|id| *id); + let owner = self.owner.load(Ordering::Acquire); + if caller == owner { + // N.B. We could also do a CAS here instead of a load/store, + // but ad hoc benchmarking suggests it is slower. And a lot + // slower in the case where `get_slow` is common. + self.owner.store(THREAD_ID_INUSE, Ordering::Release); + return self.guard_owned(caller); + } + self.get_slow(caller, owner) + } + + /// This is the "slow" version that goes through a mutex to pop an + /// allocated value off a stack to return to the caller. (Or, if the + /// stack is empty, a new value is created.) + /// + /// If the pool has no owner, then this will set the owner. + #[cold] + fn get_slow( + &self, + caller: usize, + owner: usize, + ) -> PoolGuard<'_, T, F> { + if owner == THREAD_ID_UNOWNED { + // This sentinel means this pool is not yet owned. We try to + // atomically set the owner. If we do, then this thread becomes + // the owner and we can return a guard that represents the + // special T for the owner. + // + // Note that we set the owner to a different sentinel that + // indicates that the owned value is in use. The owner ID will + // get updated to the actual ID of this thread once the guard + // returned by this function is put back into the pool. + let res = self.owner.compare_exchange( + THREAD_ID_UNOWNED, + THREAD_ID_INUSE, + Ordering::AcqRel, + Ordering::Acquire, + ); + if res.is_ok() { + // SAFETY: A successful CAS above implies this thread is + // the owner and that this is the only such thread that + // can reach here. Thus, there is no data race. + unsafe { + *self.owner_val.get() = Some((self.create)()); + } + return self.guard_owned(caller); + } + } + let stack_id = caller % self.stacks.len(); + // We try to acquire exclusive access to this thread's stack, and + // if so, grab a value from it if we can. We put this in a loop so + // that it's easy to tweak and experiment with a different number + // of tries. In the end, I couldn't see anything obviously better + // than one attempt in ad hoc testing. + for _ in 0..1 { + let mut stack = match self.stacks[stack_id].0.try_lock() { + Err(_) => continue, + Ok(stack) => stack, + }; + if let Some(value) = stack.pop() { + return self.guard_stack(value); + } + // Unlock the mutex guarding the stack before creating a fresh + // value since we no longer need the stack. + drop(stack); + let value = Box::new((self.create)()); + return self.guard_stack(value); + } + // We're only here if we could get access to our stack, so just + // create a new value. This seems like it could be wasteful, but + // waiting for exclusive access to a stack when there's high + // contention is brutal for perf. + self.guard_stack_transient(Box::new((self.create)())) + } + + /// Puts a value back into the pool. Callers don't need to call this. + /// Once the guard that's returned by 'get' is dropped, it is put back + /// into the pool automatically. + #[inline] + fn put_value(&self, value: Box) { + let caller = THREAD_ID.with(|id| *id); + let stack_id = caller % self.stacks.len(); + // As with trying to pop a value from this thread's stack, we + // merely attempt to get access to push this value back on the + // stack. If there's too much contention, we just give up and throw + // the value away. + // + // Interestingly, in ad hoc benchmarking, it is beneficial to + // attempt to push the value back more than once, unlike when + // popping the value. I don't have a good theory for why this is. + // I guess if we drop too many values then that winds up forcing + // the pop operation to create new fresh values and thus leads to + // less reuse. There's definitely a balancing act here. + for _ in 0..10 { + let mut stack = match self.stacks[stack_id].0.try_lock() { + Err(_) => continue, + Ok(stack) => stack, + }; + stack.push(value); + return; + } + } + + /// Create a guard that represents the special owned T. + #[inline] + fn guard_owned(&self, caller: usize) -> PoolGuard<'_, T, F> { + PoolGuard { pool: self, value: Err(caller), discard: false } + } + + /// Create a guard that contains a value from the pool's stack. + #[inline] + fn guard_stack(&self, value: Box) -> PoolGuard<'_, T, F> { + PoolGuard { pool: self, value: Ok(value), discard: false } + } + + /// Create a guard that contains a value from the pool's stack with an + /// instruction to throw away the value instead of putting it back + /// into the pool. + #[inline] + fn guard_stack_transient(&self, value: Box) -> PoolGuard<'_, T, F> { + PoolGuard { pool: self, value: Ok(value), discard: true } + } + } + + impl core::fmt::Debug for Pool { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("Pool") + .field("stacks", &self.stacks) + .field("owner", &self.owner) + .field("owner_val", &self.owner_val) + .finish() + } + } + + /// A guard that is returned when a caller requests a value from the pool. + pub(super) struct PoolGuard<'a, T: Send, F: Fn() -> T> { + /// The pool that this guard is attached to. + pool: &'a Pool, + /// This is Err when the guard represents the special "owned" value. + /// In which case, the value is retrieved from 'pool.owner_val'. And + /// in the special case of `Err(THREAD_ID_DROPPED)`, it means the + /// guard has been put back into the pool and should no longer be used. + value: Result, usize>, + /// When true, the value should be discarded instead of being pushed + /// back into the pool. We tend to use this under high contention, and + /// this allows us to avoid inflating the size of the pool. (Because + /// under contention, we tend to create more values instead of waiting + /// for access to a stack of existing values.) + discard: bool, + } + + impl<'a, T: Send, F: Fn() -> T> PoolGuard<'a, T, F> { + /// Return the underlying value. + #[inline] + pub(super) fn value(&self) -> &T { + match self.value { + Ok(ref v) => v, + // SAFETY: This is safe because the only way a PoolGuard gets + // created for self.value=Err is when the current thread + // corresponds to the owning thread, of which there can only + // be one. Thus, we are guaranteed to be providing exclusive + // access here which makes this safe. + // + // Also, since 'owner_val' is guaranteed to be initialized + // before an owned PoolGuard is created, the unchecked unwrap + // is safe. + Err(id) => unsafe { + // This assert is *not* necessary for safety, since we + // should never be here if the guard had been put back into + // the pool. This is a sanity check to make sure we didn't + // break an internal invariant. + debug_assert_ne!(THREAD_ID_DROPPED, id); + (*self.pool.owner_val.get()).as_ref().unwrap_unchecked() + }, + } + } + + /// Return the underlying value as a mutable borrow. + #[inline] + pub(super) fn value_mut(&mut self) -> &mut T { + match self.value { + Ok(ref mut v) => v, + // SAFETY: This is safe because the only way a PoolGuard gets + // created for self.value=None is when the current thread + // corresponds to the owning thread, of which there can only + // be one. Thus, we are guaranteed to be providing exclusive + // access here which makes this safe. + // + // Also, since 'owner_val' is guaranteed to be initialized + // before an owned PoolGuard is created, the unwrap_unchecked + // is safe. + Err(id) => unsafe { + // This assert is *not* necessary for safety, since we + // should never be here if the guard had been put back into + // the pool. This is a sanity check to make sure we didn't + // break an internal invariant. + debug_assert_ne!(THREAD_ID_DROPPED, id); + (*self.pool.owner_val.get()).as_mut().unwrap_unchecked() + }, + } + } + + /// Consumes this guard and puts it back into the pool. + #[inline] + pub(super) fn put(this: PoolGuard<'_, T, F>) { + // Since this is effectively consuming the guard and putting the + // value back into the pool, there's no reason to run its Drop + // impl after doing this. I don't believe there is a correctness + // problem with doing so, but there's definitely a perf problem + // by redoing this work. So we avoid it. + let mut this = core::mem::ManuallyDrop::new(this); + this.put_imp(); + } + + /// Puts this guard back into the pool by only borrowing the guard as + /// mutable. This should be called at most once. + #[inline(always)] + fn put_imp(&mut self) { + match core::mem::replace(&mut self.value, Err(THREAD_ID_DROPPED)) { + Ok(value) => { + // If we were told to discard this value then don't bother + // trying to put it back into the pool. This occurs when + // the pop operation failed to acquire a lock and we + // decided to create a new value in lieu of contending for + // the lock. + if self.discard { + return; + } + self.pool.put_value(value); + } + // If this guard has a value "owned" by the thread, then + // the Pool guarantees that this is the ONLY such guard. + // Therefore, in order to place it back into the pool and make + // it available, we need to change the owner back to the owning + // thread's ID. But note that we use the ID that was stored in + // the guard, since a guard can be moved to another thread and + // dropped. (A previous iteration of this code read from the + // THREAD_ID thread local, which uses the ID of the current + // thread which may not be the ID of the owning thread! This + // also avoids the TLS access, which is likely a hair faster.) + Err(owner) => { + // If we hit this point, it implies 'put_imp' has been + // called multiple times for the same guard which in turn + // corresponds to a bug in this implementation. + assert_ne!(THREAD_ID_DROPPED, owner); + self.pool.owner.store(owner, Ordering::Release); + } + } + } + } + + impl<'a, T: Send, F: Fn() -> T> Drop for PoolGuard<'a, T, F> { + #[inline] + fn drop(&mut self) { + self.put_imp(); + } + } + + impl<'a, T: Send + core::fmt::Debug, F: Fn() -> T> core::fmt::Debug + for PoolGuard<'a, T, F> + { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_struct("PoolGuard") + .field("pool", &self.pool) + .field("value", &self.value) + .finish() + } + } +} + +// FUTURE: We should consider using Mara Bos's nearly-lock-free version of this +// here: https://gist.github.com/m-ou-se/5fdcbdf7dcf4585199ce2de697f367a4. +// +// One reason why I did things with a "mutex" below is that it isolates the +// safety concerns to just the Mutex, where as the safety of Mara's pool is a +// bit more sprawling. I also expect this code to not be used that much, and +// so is unlikely to get as much real world usage with which to test it. That +// means the "obviously correct" lever is an important one. +// +// The specific reason to use Mara's pool is that it is likely faster and also +// less likely to hit problems with spin-locks, although it is not completely +// impervious to them. +// +// The best solution to this problem, probably, is a truly lock free pool. That +// could be done with a lock free linked list. The issue is the ABA problem. It +// is difficult to avoid, and doing so is complex. BUT, the upshot of that is +// that if we had a truly lock free pool, then we could also use it above in +// the 'std' pool instead of a Mutex because it should be completely free the +// problems that come from spin-locks. +#[cfg(not(feature = "std"))] +mod inner { + use core::{ + cell::UnsafeCell, + panic::{RefUnwindSafe, UnwindSafe}, + sync::atomic::{AtomicBool, Ordering}, + }; + + use alloc::{boxed::Box, vec, vec::Vec}; + + /// A thread safe pool utilizing alloc-only features. + /// + /// Unlike the std version, it doesn't seem possible(?) to implement the + /// "thread owner" optimization because alloc-only doesn't have any concept + /// of threads. So the best we can do is just a normal stack. This will + /// increase latency in alloc-only environments. + pub(super) struct Pool { + /// A stack of T values to hand out. These are used when a Pool is + /// accessed by a thread that didn't create it. + stack: Mutex>>, + /// A function to create more T values when stack is empty and a caller + /// has requested a T. + create: F, + } + + // If T is UnwindSafe, then since we provide exclusive access to any + // particular value in the pool, it should therefore also be considered + // RefUnwindSafe. + impl RefUnwindSafe for Pool {} + + impl Pool { + /// Create a new pool. The given closure is used to create values in + /// the pool when necessary. + pub(super) const fn new(create: F) -> Pool { + Pool { stack: Mutex::new(vec![]), create } + } + } + + impl T> Pool { + /// Get a value from the pool. This may block if another thread is also + /// attempting to retrieve a value from the pool. + #[inline] + pub(super) fn get(&self) -> PoolGuard<'_, T, F> { + let mut stack = self.stack.lock(); + let value = match stack.pop() { + None => Box::new((self.create)()), + Some(value) => value, + }; + PoolGuard { pool: self, value: Some(value) } + } + + #[inline] + fn put(&self, guard: PoolGuard<'_, T, F>) { + let mut guard = core::mem::ManuallyDrop::new(guard); + if let Some(value) = guard.value.take() { + self.put_value(value); + } + } + + /// Puts a value back into the pool. Callers don't need to call this. + /// Once the guard that's returned by 'get' is dropped, it is put back + /// into the pool automatically. + #[inline] + fn put_value(&self, value: Box) { + let mut stack = self.stack.lock(); + stack.push(value); + } + } + + impl core::fmt::Debug for Pool { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("Pool").field("stack", &self.stack).finish() + } + } + + /// A guard that is returned when a caller requests a value from the pool. + pub(super) struct PoolGuard<'a, T: Send, F: Fn() -> T> { + /// The pool that this guard is attached to. + pool: &'a Pool, + /// This is None after the guard has been put back into the pool. + value: Option>, + } + + impl<'a, T: Send, F: Fn() -> T> PoolGuard<'a, T, F> { + /// Return the underlying value. + #[inline] + pub(super) fn value(&self) -> &T { + self.value.as_deref().unwrap() + } + + /// Return the underlying value as a mutable borrow. + #[inline] + pub(super) fn value_mut(&mut self) -> &mut T { + self.value.as_deref_mut().unwrap() + } + + /// Consumes this guard and puts it back into the pool. + #[inline] + pub(super) fn put(this: PoolGuard<'_, T, F>) { + // Since this is effectively consuming the guard and putting the + // value back into the pool, there's no reason to run its Drop + // impl after doing this. I don't believe there is a correctness + // problem with doing so, but there's definitely a perf problem + // by redoing this work. So we avoid it. + let mut this = core::mem::ManuallyDrop::new(this); + this.put_imp(); + } + + /// Puts this guard back into the pool by only borrowing the guard as + /// mutable. This should be called at most once. + #[inline(always)] + fn put_imp(&mut self) { + if let Some(value) = self.value.take() { + self.pool.put_value(value); + } + } + } + + impl<'a, T: Send, F: Fn() -> T> Drop for PoolGuard<'a, T, F> { + #[inline] + fn drop(&mut self) { + self.put_imp(); + } + } + + impl<'a, T: Send + core::fmt::Debug, F: Fn() -> T> core::fmt::Debug + for PoolGuard<'a, T, F> + { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_struct("PoolGuard") + .field("pool", &self.pool) + .field("value", &self.value) + .finish() + } + } + + /// A spin-lock based mutex. Yes, I have read spinlocks considered + /// harmful[1], and if there's a reasonable alternative choice, I'll + /// happily take it. + /// + /// I suspect the most likely alternative here is a Treiber stack, but + /// implementing one correctly in a way that avoids the ABA problem looks + /// subtle enough that I'm not sure I want to attempt that. But otherwise, + /// we only need a mutex in order to implement our pool, so if there's + /// something simpler we can use that works for our `Pool` use case, then + /// that would be great. + /// + /// Note that this mutex does not do poisoning. + /// + /// [1]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html + #[derive(Debug)] + struct Mutex { + locked: AtomicBool, + data: UnsafeCell, + } + + // SAFETY: Since a Mutex guarantees exclusive access, as long as we can + // send it across threads, it must also be Sync. + unsafe impl Sync for Mutex {} + + impl Mutex { + /// Create a new mutex for protecting access to the given value across + /// multiple threads simultaneously. + const fn new(value: T) -> Mutex { + Mutex { + locked: AtomicBool::new(false), + data: UnsafeCell::new(value), + } + } + + /// Lock this mutex and return a guard providing exclusive access to + /// `T`. This blocks if some other thread has already locked this + /// mutex. + #[inline] + fn lock(&self) -> MutexGuard<'_, T> { + while self + .locked + .compare_exchange( + false, + true, + Ordering::AcqRel, + Ordering::Acquire, + ) + .is_err() + { + core::hint::spin_loop(); + } + // SAFETY: The only way we're here is if we successfully set + // 'locked' to true, which implies we must be the only thread here + // and thus have exclusive access to 'data'. + let data = unsafe { &mut *self.data.get() }; + MutexGuard { locked: &self.locked, data } + } + } + + /// A guard that derefs to &T and &mut T. When it's dropped, the lock is + /// released. + #[derive(Debug)] + struct MutexGuard<'a, T> { + locked: &'a AtomicBool, + data: &'a mut T, + } + + impl<'a, T> core::ops::Deref for MutexGuard<'a, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + self.data + } + } + + impl<'a, T> core::ops::DerefMut for MutexGuard<'a, T> { + #[inline] + fn deref_mut(&mut self) -> &mut T { + self.data + } + } + + impl<'a, T> Drop for MutexGuard<'a, T> { + #[inline] + fn drop(&mut self) { + // Drop means 'data' is no longer accessible, so we can unlock + // the mutex. + self.locked.store(false, Ordering::Release); + } + } +} + +#[cfg(test)] +mod tests { + use core::panic::{RefUnwindSafe, UnwindSafe}; + + use alloc::{boxed::Box, vec, vec::Vec}; + + use super::*; + + #[test] + fn oibits() { + fn assert_oitbits() {} + assert_oitbits::>>(); + assert_oitbits::>>>(); + assert_oitbits::< + Pool< + Vec, + Box< + dyn Fn() -> Vec + + Send + + Sync + + UnwindSafe + + RefUnwindSafe, + >, + >, + >(); + } + + // Tests that Pool implements the "single owner" optimization. That is, the + // thread that first accesses the pool gets its own copy, while all other + // threads get distinct copies. + #[cfg(feature = "std")] + #[test] + fn thread_owner_optimization() { + use std::{cell::RefCell, sync::Arc, vec}; + + let pool: Arc>>> = + Arc::new(Pool::new(|| RefCell::new(vec!['a']))); + pool.get().borrow_mut().push('x'); + + let pool1 = pool.clone(); + let t1 = std::thread::spawn(move || { + let guard = pool1.get(); + guard.borrow_mut().push('y'); + }); + + let pool2 = pool.clone(); + let t2 = std::thread::spawn(move || { + let guard = pool2.get(); + guard.borrow_mut().push('z'); + }); + + t1.join().unwrap(); + t2.join().unwrap(); + + // If we didn't implement the single owner optimization, then one of + // the threads above is likely to have mutated the [a, x] vec that + // we stuffed in the pool before spawning the threads. But since + // neither thread was first to access the pool, and because of the + // optimization, we should be guaranteed that neither thread mutates + // the special owned pool value. + // + // (Technically this is an implementation detail and not a contract of + // Pool's API.) + assert_eq!(vec!['a', 'x'], *pool.get().borrow()); + } + + // This tests that if the "owner" of a pool asks for two values, then it + // gets two distinct values and not the same one. This test failed in the + // course of developing the pool, which in turn resulted in UB because it + // permitted getting aliasing &mut borrows to the same place in memory. + #[test] + fn thread_owner_distinct() { + let pool = Pool::new(|| vec!['a']); + + { + let mut g1 = pool.get(); + let v1 = &mut *g1; + let mut g2 = pool.get(); + let v2 = &mut *g2; + v1.push('b'); + v2.push('c'); + assert_eq!(&mut vec!['a', 'b'], v1); + assert_eq!(&mut vec!['a', 'c'], v2); + } + // This isn't technically guaranteed, but we + // expect to now get the "owned" value (the first + // call to 'get()' above) now that it's back in + // the pool. + assert_eq!(&mut vec!['a', 'b'], &mut *pool.get()); + } + + // This tests that we can share a guard with another thread, mutate the + // underlying value and everything works. This failed in the course of + // developing a pool since the pool permitted 'get()' to return the same + // value to the owner thread, even before the previous value was put back + // into the pool. This in turn resulted in this test producing a data race. + #[cfg(feature = "std")] + #[test] + fn thread_owner_sync() { + let pool = Pool::new(|| vec!['a']); + { + let mut g1 = pool.get(); + let mut g2 = pool.get(); + std::thread::scope(|s| { + s.spawn(|| { + g1.push('b'); + }); + s.spawn(|| { + g2.push('c'); + }); + }); + + let v1 = &mut *g1; + let v2 = &mut *g2; + assert_eq!(&mut vec!['a', 'b'], v1); + assert_eq!(&mut vec!['a', 'c'], v2); + } + + // This isn't technically guaranteed, but we + // expect to now get the "owned" value (the first + // call to 'get()' above) now that it's back in + // the pool. + assert_eq!(&mut vec!['a', 'b'], &mut *pool.get()); + } + + // This tests that if we move a PoolGuard that is owned by the current + // thread to another thread and drop it, then the thread owner doesn't + // change. During development of the pool, this test failed because the + // PoolGuard assumed it was dropped in the same thread from which it was + // created, and thus used the current thread's ID as the owner, which could + // be different than the actual owner of the pool. + #[cfg(feature = "std")] + #[test] + fn thread_owner_send_drop() { + let pool = Pool::new(|| vec!['a']); + // Establishes this thread as the owner. + { + pool.get().push('b'); + } + std::thread::scope(|s| { + // Sanity check that we get the same value back. + // (Not technically guaranteed.) + let mut g = pool.get(); + assert_eq!(&vec!['a', 'b'], &*g); + // Now push it to another thread and drop it. + s.spawn(move || { + g.push('c'); + }) + .join() + .unwrap(); + }); + // Now check that we're still the owner. This is not technically + // guaranteed by the API, but is true in practice given the thread + // owner optimization. + assert_eq!(&vec!['a', 'b', 'c'], &*pool.get()); + } +} diff --git a/vendor/regex-automata/src/util/prefilter/aho_corasick.rs b/vendor/regex-automata/src/util/prefilter/aho_corasick.rs new file mode 100644 index 00000000000000..7a2517fc7b2156 --- /dev/null +++ b/vendor/regex-automata/src/util/prefilter/aho_corasick.rs @@ -0,0 +1,149 @@ +use crate::util::{ + prefilter::PrefilterI, + search::{MatchKind, Span}, +}; + +#[derive(Clone, Debug)] +pub(crate) struct AhoCorasick { + #[cfg(not(feature = "perf-literal-multisubstring"))] + _unused: (), + #[cfg(feature = "perf-literal-multisubstring")] + ac: aho_corasick::AhoCorasick, +} + +impl AhoCorasick { + pub(crate) fn new>( + kind: MatchKind, + needles: &[B], + ) -> Option { + #[cfg(not(feature = "perf-literal-multisubstring"))] + { + None + } + #[cfg(feature = "perf-literal-multisubstring")] + { + // We used to use `aho_corasick::MatchKind::Standard` here when + // `kind` was `MatchKind::All`, but this is not correct. The + // "standard" Aho-Corasick match semantics are to report a match + // immediately as soon as it is seen, but `All` isn't like that. + // In particular, with "standard" semantics, given the needles + // "abc" and "b" and the haystack "abc," it would report a match + // at offset 1 before a match at offset 0. This is never what we + // want in the context of the regex engine, regardless of whether + // we have leftmost-first or 'all' semantics. Namely, we always + // want the leftmost match. + let ac_match_kind = match kind { + MatchKind::LeftmostFirst | MatchKind::All => { + aho_corasick::MatchKind::LeftmostFirst + } + }; + // This is kind of just an arbitrary number, but basically, if we + // have a small enough set of literals, then we try to use the VERY + // memory hungry DFA. Otherwise, we wimp out and use an NFA. The + // upshot is that the NFA is quite lean and decently fast. Faster + // than a naive Aho-Corasick NFA anyway. + let ac_kind = if needles.len() <= 500 { + aho_corasick::AhoCorasickKind::DFA + } else { + aho_corasick::AhoCorasickKind::ContiguousNFA + }; + let result = aho_corasick::AhoCorasick::builder() + .kind(Some(ac_kind)) + .match_kind(ac_match_kind) + .start_kind(aho_corasick::StartKind::Both) + // We try to handle all of the prefilter cases in the super + // module, and only use Aho-Corasick for the actual automaton. + // The aho-corasick crate does have some extra prefilters, + // namely, looking for rare bytes to feed to memchr{,2,3} + // instead of just the first byte. If we end up wanting + // those---and they are somewhat tricky to implement---then + // we could port them to this crate. + // + // The main reason for doing things this way is so we have a + // complete and easy to understand picture of which prefilters + // are available and how they work. Otherwise it seems too + // easy to get into a situation where we have a prefilter + // layered on top of prefilter, and that might have unintended + // consequences. + .prefilter(false) + .build(needles); + let ac = match result { + Ok(ac) => ac, + Err(_err) => { + debug!("aho-corasick prefilter failed to build: {_err}"); + return None; + } + }; + Some(AhoCorasick { ac }) + } + } +} + +impl PrefilterI for AhoCorasick { + fn find(&self, haystack: &[u8], span: Span) -> Option { + #[cfg(not(feature = "perf-literal-multisubstring"))] + { + unreachable!() + } + #[cfg(feature = "perf-literal-multisubstring")] + { + let input = + aho_corasick::Input::new(haystack).span(span.start..span.end); + self.ac + .find(input) + .map(|m| Span { start: m.start(), end: m.end() }) + } + } + + fn prefix(&self, haystack: &[u8], span: Span) -> Option { + #[cfg(not(feature = "perf-literal-multisubstring"))] + { + unreachable!() + } + #[cfg(feature = "perf-literal-multisubstring")] + { + let input = aho_corasick::Input::new(haystack) + .anchored(aho_corasick::Anchored::Yes) + .span(span.start..span.end); + self.ac + .find(input) + .map(|m| Span { start: m.start(), end: m.end() }) + } + } + + fn memory_usage(&self) -> usize { + #[cfg(not(feature = "perf-literal-multisubstring"))] + { + unreachable!() + } + #[cfg(feature = "perf-literal-multisubstring")] + { + self.ac.memory_usage() + } + } + + fn is_fast(&self) -> bool { + #[cfg(not(feature = "perf-literal-multisubstring"))] + { + unreachable!() + } + #[cfg(feature = "perf-literal-multisubstring")] + { + // Aho-Corasick is never considered "fast" because it's never + // going to be even close to an order of magnitude faster than the + // regex engine itself (assuming a DFA is used). In fact, it is + // usually slower. The magic of Aho-Corasick is that it can search + // a *large* number of literals with a relatively small amount of + // memory. The regex engines are far more wasteful. + // + // Aho-Corasick may be "fast" when the regex engine corresponds + // to, say, the PikeVM. That happens when the lazy DFA couldn't be + // built or used for some reason. But in these cases, the regex + // itself is likely quite big and we're probably hosed no matter + // what we do. (In this case, the best bet is for the caller to + // increase some of the memory limits on the hybrid cache capacity + // and hope that's enough.) + false + } + } +} diff --git a/vendor/regex-automata/src/util/prefilter/byteset.rs b/vendor/regex-automata/src/util/prefilter/byteset.rs new file mode 100644 index 00000000000000..a669d6c9d7b696 --- /dev/null +++ b/vendor/regex-automata/src/util/prefilter/byteset.rs @@ -0,0 +1,58 @@ +use crate::util::{ + prefilter::PrefilterI, + search::{MatchKind, Span}, +}; + +#[derive(Clone, Debug)] +pub(crate) struct ByteSet([bool; 256]); + +impl ByteSet { + pub(crate) fn new>( + _kind: MatchKind, + needles: &[B], + ) -> Option { + #[cfg(not(feature = "perf-literal-multisubstring"))] + { + None + } + #[cfg(feature = "perf-literal-multisubstring")] + { + let mut set = [false; 256]; + for needle in needles.iter() { + let needle = needle.as_ref(); + if needle.len() != 1 { + return None; + } + set[usize::from(needle[0])] = true; + } + Some(ByteSet(set)) + } + } +} + +impl PrefilterI for ByteSet { + fn find(&self, haystack: &[u8], span: Span) -> Option { + haystack[span].iter().position(|&b| self.0[usize::from(b)]).map(|i| { + let start = span.start + i; + let end = start + 1; + Span { start, end } + }) + } + + fn prefix(&self, haystack: &[u8], span: Span) -> Option { + let b = *haystack.get(span.start)?; + if self.0[usize::from(b)] { + Some(Span { start: span.start, end: span.start + 1 }) + } else { + None + } + } + + fn memory_usage(&self) -> usize { + 0 + } + + fn is_fast(&self) -> bool { + false + } +} diff --git a/vendor/regex-automata/src/util/prefilter/memchr.rs b/vendor/regex-automata/src/util/prefilter/memchr.rs new file mode 100644 index 00000000000000..3d44b837219060 --- /dev/null +++ b/vendor/regex-automata/src/util/prefilter/memchr.rs @@ -0,0 +1,186 @@ +use crate::util::{ + prefilter::PrefilterI, + search::{MatchKind, Span}, +}; + +#[derive(Clone, Debug)] +pub(crate) struct Memchr(u8); + +impl Memchr { + pub(crate) fn new>( + _kind: MatchKind, + needles: &[B], + ) -> Option { + #[cfg(not(feature = "perf-literal-substring"))] + { + None + } + #[cfg(feature = "perf-literal-substring")] + { + if needles.len() != 1 { + return None; + } + if needles[0].as_ref().len() != 1 { + return None; + } + Some(Memchr(needles[0].as_ref()[0])) + } + } +} + +impl PrefilterI for Memchr { + fn find(&self, haystack: &[u8], span: Span) -> Option { + #[cfg(not(feature = "perf-literal-substring"))] + { + unreachable!() + } + #[cfg(feature = "perf-literal-substring")] + { + memchr::memchr(self.0, &haystack[span]).map(|i| { + let start = span.start + i; + let end = start + 1; + Span { start, end } + }) + } + } + + fn prefix(&self, haystack: &[u8], span: Span) -> Option { + let b = *haystack.get(span.start)?; + if self.0 == b { + Some(Span { start: span.start, end: span.start + 1 }) + } else { + None + } + } + + fn memory_usage(&self) -> usize { + 0 + } + + fn is_fast(&self) -> bool { + true + } +} + +#[derive(Clone, Debug)] +pub(crate) struct Memchr2(u8, u8); + +impl Memchr2 { + pub(crate) fn new>( + _kind: MatchKind, + needles: &[B], + ) -> Option { + #[cfg(not(feature = "perf-literal-substring"))] + { + None + } + #[cfg(feature = "perf-literal-substring")] + { + if needles.len() != 2 { + return None; + } + if !needles.iter().all(|n| n.as_ref().len() == 1) { + return None; + } + let b1 = needles[0].as_ref()[0]; + let b2 = needles[1].as_ref()[0]; + Some(Memchr2(b1, b2)) + } + } +} + +impl PrefilterI for Memchr2 { + fn find(&self, haystack: &[u8], span: Span) -> Option { + #[cfg(not(feature = "perf-literal-substring"))] + { + unreachable!() + } + #[cfg(feature = "perf-literal-substring")] + { + memchr::memchr2(self.0, self.1, &haystack[span]).map(|i| { + let start = span.start + i; + let end = start + 1; + Span { start, end } + }) + } + } + + fn prefix(&self, haystack: &[u8], span: Span) -> Option { + let b = *haystack.get(span.start)?; + if self.0 == b || self.1 == b { + Some(Span { start: span.start, end: span.start + 1 }) + } else { + None + } + } + + fn memory_usage(&self) -> usize { + 0 + } + + fn is_fast(&self) -> bool { + true + } +} + +#[derive(Clone, Debug)] +pub(crate) struct Memchr3(u8, u8, u8); + +impl Memchr3 { + pub(crate) fn new>( + _kind: MatchKind, + needles: &[B], + ) -> Option { + #[cfg(not(feature = "perf-literal-substring"))] + { + None + } + #[cfg(feature = "perf-literal-substring")] + { + if needles.len() != 3 { + return None; + } + if !needles.iter().all(|n| n.as_ref().len() == 1) { + return None; + } + let b1 = needles[0].as_ref()[0]; + let b2 = needles[1].as_ref()[0]; + let b3 = needles[2].as_ref()[0]; + Some(Memchr3(b1, b2, b3)) + } + } +} + +impl PrefilterI for Memchr3 { + fn find(&self, haystack: &[u8], span: Span) -> Option { + #[cfg(not(feature = "perf-literal-substring"))] + { + unreachable!() + } + #[cfg(feature = "perf-literal-substring")] + { + memchr::memchr3(self.0, self.1, self.2, &haystack[span]).map(|i| { + let start = span.start + i; + let end = start + 1; + Span { start, end } + }) + } + } + + fn prefix(&self, haystack: &[u8], span: Span) -> Option { + let b = *haystack.get(span.start)?; + if self.0 == b || self.1 == b || self.2 == b { + Some(Span { start: span.start, end: span.start + 1 }) + } else { + None + } + } + + fn memory_usage(&self) -> usize { + 0 + } + + fn is_fast(&self) -> bool { + true + } +} diff --git a/vendor/regex-automata/src/util/prefilter/memmem.rs b/vendor/regex-automata/src/util/prefilter/memmem.rs new file mode 100644 index 00000000000000..deea17bd9ded14 --- /dev/null +++ b/vendor/regex-automata/src/util/prefilter/memmem.rs @@ -0,0 +1,88 @@ +use crate::util::{ + prefilter::PrefilterI, + search::{MatchKind, Span}, +}; + +#[derive(Clone, Debug)] +pub(crate) struct Memmem { + #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] + _unused: (), + #[cfg(all(feature = "std", feature = "perf-literal-substring"))] + finder: memchr::memmem::Finder<'static>, +} + +impl Memmem { + pub(crate) fn new>( + _kind: MatchKind, + needles: &[B], + ) -> Option { + #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] + { + None + } + #[cfg(all(feature = "std", feature = "perf-literal-substring"))] + { + if needles.len() != 1 { + return None; + } + let needle = needles[0].as_ref(); + let finder = memchr::memmem::Finder::new(needle).into_owned(); + Some(Memmem { finder }) + } + } +} + +impl PrefilterI for Memmem { + fn find(&self, haystack: &[u8], span: Span) -> Option { + #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] + { + unreachable!() + } + #[cfg(all(feature = "std", feature = "perf-literal-substring"))] + { + self.finder.find(&haystack[span]).map(|i| { + let start = span.start + i; + let end = start + self.finder.needle().len(); + Span { start, end } + }) + } + } + + fn prefix(&self, haystack: &[u8], span: Span) -> Option { + #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] + { + unreachable!() + } + #[cfg(all(feature = "std", feature = "perf-literal-substring"))] + { + let needle = self.finder.needle(); + if haystack[span].starts_with(needle) { + Some(Span { end: span.start + needle.len(), ..span }) + } else { + None + } + } + } + + fn memory_usage(&self) -> usize { + #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] + { + unreachable!() + } + #[cfg(all(feature = "std", feature = "perf-literal-substring"))] + { + self.finder.needle().len() + } + } + + fn is_fast(&self) -> bool { + #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] + { + unreachable!() + } + #[cfg(all(feature = "std", feature = "perf-literal-substring"))] + { + true + } + } +} diff --git a/vendor/regex-automata/src/util/prefilter/mod.rs b/vendor/regex-automata/src/util/prefilter/mod.rs new file mode 100644 index 00000000000000..f95adff05c882e --- /dev/null +++ b/vendor/regex-automata/src/util/prefilter/mod.rs @@ -0,0 +1,719 @@ +/*! +Defines a prefilter for accelerating regex searches. + +A prefilter can be created by building a [`Prefilter`] value. + +A prefilter represents one of the most important optimizations available for +accelerating regex searches. The idea of a prefilter is to very quickly find +candidate locations in a haystack where a regex _could_ match. Once a candidate +is found, it is then intended for the regex engine to run at that position to +determine whether the candidate is a match or a false positive. + +In the aforementioned description of the prefilter optimization also lay its +demise. Namely, if a prefilter has a high false positive rate and it produces +lots of candidates, then a prefilter can overall make a regex search slower. +It can run more slowly because more time is spent ping-ponging between the +prefilter search and the regex engine attempting to confirm each candidate as +a match. This ping-ponging has overhead that adds up, and is exacerbated by +a high false positive rate. + +Nevertheless, the optimization is still generally worth performing in most +cases. Particularly given just how much throughput can be improved. (It is not +uncommon for prefilter optimizations to improve throughput by one or two orders +of magnitude.) + +Typically a prefilter is used to find occurrences of literal prefixes from a +regex pattern, but this isn't required. A prefilter can be used to look for +suffixes or even inner literals. + +Note that as of now, prefilters throw away information about which pattern +each literal comes from. In other words, when a prefilter finds a match, +there's no way to know which pattern (or patterns) it came from. Therefore, +in order to confirm a match, you'll have to check all of the patterns by +running the full regex engine. +*/ + +mod aho_corasick; +mod byteset; +mod memchr; +mod memmem; +mod teddy; + +use core::{ + borrow::Borrow, + fmt::Debug, + panic::{RefUnwindSafe, UnwindSafe}, +}; + +#[cfg(feature = "alloc")] +use alloc::sync::Arc; + +#[cfg(feature = "syntax")] +use regex_syntax::hir::{literal, Hir}; + +use crate::util::search::{MatchKind, Span}; + +pub(crate) use crate::util::prefilter::{ + aho_corasick::AhoCorasick, + byteset::ByteSet, + memchr::{Memchr, Memchr2, Memchr3}, + memmem::Memmem, + teddy::Teddy, +}; + +/// A prefilter for accelerating regex searches. +/// +/// If you already have your literals that you want to search with, +/// then the vanilla [`Prefilter::new`] constructor is for you. But +/// if you have an [`Hir`] value from the `regex-syntax` crate, then +/// [`Prefilter::from_hir_prefix`] might be more convenient. Namely, it uses +/// the [`regex-syntax::hir::literal`](regex_syntax::hir::literal) module to +/// extract literal prefixes for you, optimize them and then select and build a +/// prefilter matcher. +/// +/// A prefilter must have **zero false negatives**. However, by its very +/// nature, it may produce false positives. That is, a prefilter will never +/// skip over a position in the haystack that corresponds to a match of the +/// original regex pattern, but it *may* produce a match for a position +/// in the haystack that does *not* correspond to a match of the original +/// regex pattern. If you use either the [`Prefilter::from_hir_prefix`] or +/// [`Prefilter::from_hirs_prefix`] constructors, then this guarantee is +/// upheld for you automatically. This guarantee is not preserved if you use +/// [`Prefilter::new`] though, since it is up to the caller to provide correct +/// literal strings with respect to the original regex pattern. +/// +/// # Cloning +/// +/// It is an API guarantee that cloning a prefilter is cheap. That is, cloning +/// it will not duplicate whatever heap memory is used to represent the +/// underlying matcher. +/// +/// # Example +/// +/// This example shows how to attach a `Prefilter` to the +/// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) in order to accelerate +/// searches. +/// +/// ``` +/// use regex_automata::{ +/// nfa::thompson::pikevm::PikeVM, +/// util::prefilter::Prefilter, +/// Match, MatchKind, +/// }; +/// +/// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["Bruce "]) +/// .expect("a prefilter"); +/// let re = PikeVM::builder() +/// .configure(PikeVM::config().prefilter(Some(pre))) +/// .build(r"Bruce \w+")?; +/// let mut cache = re.create_cache(); +/// assert_eq!( +/// Some(Match::must(0, 6..23)), +/// re.find(&mut cache, "Hello Bruce Springsteen!"), +/// ); +/// # Ok::<(), Box>(()) +/// ``` +/// +/// But note that if you get your prefilter incorrect, it could lead to an +/// incorrect result! +/// +/// ``` +/// use regex_automata::{ +/// nfa::thompson::pikevm::PikeVM, +/// util::prefilter::Prefilter, +/// Match, MatchKind, +/// }; +/// +/// // This prefilter is wrong! +/// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["Patti "]) +/// .expect("a prefilter"); +/// let re = PikeVM::builder() +/// .configure(PikeVM::config().prefilter(Some(pre))) +/// .build(r"Bruce \w+")?; +/// let mut cache = re.create_cache(); +/// // We find no match even though the regex does match. +/// assert_eq!( +/// None, +/// re.find(&mut cache, "Hello Bruce Springsteen!"), +/// ); +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct Prefilter { + #[cfg(not(feature = "alloc"))] + _unused: (), + #[cfg(feature = "alloc")] + pre: Arc, + #[cfg(feature = "alloc")] + is_fast: bool, + #[cfg(feature = "alloc")] + max_needle_len: usize, +} + +impl Prefilter { + /// Create a new prefilter from a sequence of needles and a corresponding + /// match semantics. + /// + /// This may return `None` for a variety of reasons, for example, if + /// a suitable prefilter could not be constructed. That might occur + /// if they are unavailable (e.g., the `perf-literal-substring` and + /// `perf-literal-multisubstring` features aren't enabled), or it might + /// occur because of heuristics or other artifacts of how the prefilter + /// works. + /// + /// Note that if you have an [`Hir`] expression, it may be more convenient + /// to use [`Prefilter::from_hir_prefix`]. It will automatically handle the + /// task of extracting prefix literals for you. + /// + /// # Example + /// + /// This example shows how match semantics can impact the matching + /// algorithm used by the prefilter. For this reason, it is important to + /// ensure that the match semantics given here are consistent with the + /// match semantics intended for the regular expression that the literals + /// were extracted from. + /// + /// ``` + /// use regex_automata::{ + /// util::{prefilter::Prefilter, syntax}, + /// MatchKind, Span, + /// }; + /// + /// let hay = "Hello samwise"; + /// + /// // With leftmost-first, we find 'samwise' here because it comes + /// // before 'sam' in the sequence we give it.. + /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["samwise", "sam"]) + /// .expect("a prefilter"); + /// assert_eq!( + /// Some(Span::from(6..13)), + /// pre.find(hay.as_bytes(), Span::from(0..hay.len())), + /// ); + /// // Still with leftmost-first but with the literals reverse, now 'sam' + /// // will match instead! + /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["sam", "samwise"]) + /// .expect("a prefilter"); + /// assert_eq!( + /// Some(Span::from(6..9)), + /// pre.find(hay.as_bytes(), Span::from(0..hay.len())), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn new>( + kind: MatchKind, + needles: &[B], + ) -> Option { + Choice::new(kind, needles).and_then(|choice| { + let max_needle_len = + needles.iter().map(|b| b.as_ref().len()).max().unwrap_or(0); + Prefilter::from_choice(choice, max_needle_len) + }) + } + + /// This turns a prefilter selection into a `Prefilter`. That is, in turns + /// the enum given into a trait object. + fn from_choice( + choice: Choice, + max_needle_len: usize, + ) -> Option { + #[cfg(not(feature = "alloc"))] + { + None + } + #[cfg(feature = "alloc")] + { + let pre: Arc = match choice { + Choice::Memchr(p) => Arc::new(p), + Choice::Memchr2(p) => Arc::new(p), + Choice::Memchr3(p) => Arc::new(p), + Choice::Memmem(p) => Arc::new(p), + Choice::Teddy(p) => Arc::new(p), + Choice::ByteSet(p) => Arc::new(p), + Choice::AhoCorasick(p) => Arc::new(p), + }; + let is_fast = pre.is_fast(); + Some(Prefilter { pre, is_fast, max_needle_len }) + } + } + + /// This attempts to extract prefixes from the given `Hir` expression for + /// the given match semantics, and if possible, builds a prefilter for + /// them. + /// + /// # Example + /// + /// This example shows how to build a prefilter directly from an [`Hir`] + /// expression, and use to find an occurrence of a prefix from the regex + /// pattern. + /// + /// ``` + /// use regex_automata::{ + /// util::{prefilter::Prefilter, syntax}, + /// MatchKind, Span, + /// }; + /// + /// let hir = syntax::parse(r"(Bruce|Patti) \w+")?; + /// let pre = Prefilter::from_hir_prefix(MatchKind::LeftmostFirst, &hir) + /// .expect("a prefilter"); + /// let hay = "Hello Patti Scialfa!"; + /// assert_eq!( + /// Some(Span::from(6..12)), + /// pre.find(hay.as_bytes(), Span::from(0..hay.len())), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn from_hir_prefix(kind: MatchKind, hir: &Hir) -> Option { + Prefilter::from_hirs_prefix(kind, &[hir]) + } + + /// This attempts to extract prefixes from the given `Hir` expressions for + /// the given match semantics, and if possible, builds a prefilter for + /// them. + /// + /// Note that as of now, prefilters throw away information about which + /// pattern each literal comes from. In other words, when a prefilter finds + /// a match, there's no way to know which pattern (or patterns) it came + /// from. Therefore, in order to confirm a match, you'll have to check all + /// of the patterns by running the full regex engine. + /// + /// # Example + /// + /// This example shows how to build a prefilter directly from multiple + /// `Hir` expressions expression, and use it to find an occurrence of a + /// prefix from the regex patterns. + /// + /// ``` + /// use regex_automata::{ + /// util::{prefilter::Prefilter, syntax}, + /// MatchKind, Span, + /// }; + /// + /// let hirs = syntax::parse_many(&[ + /// r"(Bruce|Patti) \w+", + /// r"Mrs?\. Doubtfire", + /// ])?; + /// let pre = Prefilter::from_hirs_prefix(MatchKind::LeftmostFirst, &hirs) + /// .expect("a prefilter"); + /// let hay = "Hello Mrs. Doubtfire"; + /// assert_eq!( + /// Some(Span::from(6..20)), + /// pre.find(hay.as_bytes(), Span::from(0..hay.len())), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[cfg(feature = "syntax")] + pub fn from_hirs_prefix>( + kind: MatchKind, + hirs: &[H], + ) -> Option { + prefixes(kind, hirs) + .literals() + .and_then(|lits| Prefilter::new(kind, lits)) + } + + /// Run this prefilter on `haystack[span.start..end]` and return a matching + /// span if one exists. + /// + /// The span returned is guaranteed to have a start position greater than + /// or equal to the one given, and an end position less than or equal to + /// the one given. + /// + /// # Example + /// + /// This example shows how to build a prefilter directly from an [`Hir`] + /// expression, and use it to find an occurrence of a prefix from the regex + /// pattern. + /// + /// ``` + /// use regex_automata::{ + /// util::{prefilter::Prefilter, syntax}, + /// MatchKind, Span, + /// }; + /// + /// let hir = syntax::parse(r"Bruce \w+")?; + /// let pre = Prefilter::from_hir_prefix(MatchKind::LeftmostFirst, &hir) + /// .expect("a prefilter"); + /// let hay = "Hello Bruce Springsteen!"; + /// assert_eq!( + /// Some(Span::from(6..12)), + /// pre.find(hay.as_bytes(), Span::from(0..hay.len())), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn find(&self, haystack: &[u8], span: Span) -> Option { + #[cfg(not(feature = "alloc"))] + { + unreachable!() + } + #[cfg(feature = "alloc")] + { + self.pre.find(haystack, span) + } + } + + /// Returns the span of a prefix of `haystack[span.start..span.end]` if + /// the prefilter matches. + /// + /// The span returned is guaranteed to have a start position equivalent to + /// the one given, and an end position less than or equal to the one given. + /// + /// # Example + /// + /// This example shows how to build a prefilter directly from an [`Hir`] + /// expression, and use it to find an occurrence of a prefix from the regex + /// pattern that begins at the start of a haystack only. + /// + /// ``` + /// use regex_automata::{ + /// util::{prefilter::Prefilter, syntax}, + /// MatchKind, Span, + /// }; + /// + /// let hir = syntax::parse(r"Bruce \w+")?; + /// let pre = Prefilter::from_hir_prefix(MatchKind::LeftmostFirst, &hir) + /// .expect("a prefilter"); + /// let hay = "Hello Bruce Springsteen!"; + /// // Nothing is found here because 'Bruce' does + /// // not occur at the beginning of our search. + /// assert_eq!( + /// None, + /// pre.prefix(hay.as_bytes(), Span::from(0..hay.len())), + /// ); + /// // But if we change where we start the search + /// // to begin where 'Bruce ' begins, then a + /// // match will be found. + /// assert_eq!( + /// Some(Span::from(6..12)), + /// pre.prefix(hay.as_bytes(), Span::from(6..hay.len())), + /// ); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn prefix(&self, haystack: &[u8], span: Span) -> Option { + #[cfg(not(feature = "alloc"))] + { + unreachable!() + } + #[cfg(feature = "alloc")] + { + self.pre.prefix(haystack, span) + } + } + + /// Returns the heap memory, in bytes, used by the underlying prefilter. + #[inline] + pub fn memory_usage(&self) -> usize { + #[cfg(not(feature = "alloc"))] + { + unreachable!() + } + #[cfg(feature = "alloc")] + { + self.pre.memory_usage() + } + } + + /// Return the length of the longest needle + /// in this Prefilter + #[inline] + pub fn max_needle_len(&self) -> usize { + #[cfg(not(feature = "alloc"))] + { + unreachable!() + } + #[cfg(feature = "alloc")] + { + self.max_needle_len + } + } + + /// Implementations might return true here if they believe themselves to + /// be "fast." The concept of "fast" is deliberately left vague, but in + /// practice this usually corresponds to whether it's believed that SIMD + /// will be used. + /// + /// Why do we care about this? Well, some prefilter tricks tend to come + /// with their own bits of overhead, and so might only make sense if we + /// know that a scan will be *much* faster than the regex engine itself. + /// Otherwise, the trick may not be worth doing. Whether something is + /// "much" faster than the regex engine generally boils down to whether + /// SIMD is used. (But not always. Even a SIMD matcher with a high false + /// positive rate can become quite slow.) + /// + /// Even if this returns true, it is still possible for the prefilter to + /// be "slow." Remember, prefilters are just heuristics. We can't really + /// *know* a prefilter will be fast without actually trying the prefilter. + /// (Which of course we cannot afford to do.) + #[inline] + pub fn is_fast(&self) -> bool { + #[cfg(not(feature = "alloc"))] + { + unreachable!() + } + #[cfg(feature = "alloc")] + { + self.is_fast + } + } +} + +/// A trait for abstracting over prefilters. Basically, a prefilter is +/// something that do an unanchored *and* an anchored search in a haystack +/// within a given span. +/// +/// This exists pretty much only so that we can use prefilters as a trait +/// object (which is what `Prefilter` is). If we ever move off of trait objects +/// and to an enum, then it's likely this trait could be removed. +pub(crate) trait PrefilterI: + Debug + Send + Sync + RefUnwindSafe + UnwindSafe + 'static +{ + /// Run this prefilter on `haystack[span.start..end]` and return a matching + /// span if one exists. + /// + /// The span returned is guaranteed to have a start position greater than + /// or equal to the one given, and an end position less than or equal to + /// the one given. + fn find(&self, haystack: &[u8], span: Span) -> Option; + + /// Returns the span of a prefix of `haystack[span.start..span.end]` if + /// the prefilter matches. + /// + /// The span returned is guaranteed to have a start position equivalent to + /// the one given, and an end position less than or equal to the one given. + fn prefix(&self, haystack: &[u8], span: Span) -> Option; + + /// Returns the heap memory, in bytes, used by the underlying prefilter. + fn memory_usage(&self) -> usize; + + /// Implementations might return true here if they believe themselves to + /// be "fast." See [`Prefilter::is_fast`] for more details. + fn is_fast(&self) -> bool; +} + +#[cfg(feature = "alloc")] +impl PrefilterI for Arc

+.             any character except new line (includes new line with s flag)
+[0-9]         any ASCII digit
+\d            digit (\p{Nd})
+\D            not digit
+\pX           Unicode character class identified by a one-letter name
+\p{Greek}     Unicode character class (general category or script)
+\PX           Negated Unicode character class identified by a one-letter name
+\P{Greek}     negated Unicode character class (general category or script)
+
+ +### Character classes + +
+[xyz]         A character class matching either x, y or z (union).
+[^xyz]        A character class matching any character except x, y and z.
+[a-z]         A character class matching any character in range a-z.
+[[:alpha:]]   ASCII character class ([A-Za-z])
+[[:^alpha:]]  Negated ASCII character class ([^A-Za-z])
+[x[^xyz]]     Nested/grouping character class (matching any character except y and z)
+[a-y&&xyz]    Intersection (matching x or y)
+[0-9&&[^4]]   Subtraction using intersection and negation (matching 0-9 except 4)
+[0-9--4]      Direct subtraction (matching 0-9 except 4)
+[a-g~~b-h]    Symmetric difference (matching `a` and `h` only)
+[\[\]]        Escaping in character classes (matching [ or ])
+[a&&b]        An empty character class matching nothing
+
+ +Any named character class may appear inside a bracketed `[...]` character +class. For example, `[\p{Greek}[:digit:]]` matches any ASCII digit or any +codepoint in the `Greek` script. `[\p{Greek}&&\pL]` matches Greek letters. + +Precedence in character classes, from most binding to least: + +1. Ranges: `[a-cd]` == `[[a-c]d]` +2. Union: `[ab&&bc]` == `[[ab]&&[bc]]` +3. Intersection, difference, symmetric difference. All three have equivalent +precedence, and are evaluated in left-to-right order. For example, +`[\pL--\p{Greek}&&\p{Uppercase}]` == `[[\pL--\p{Greek}]&&\p{Uppercase}]`. +4. Negation: `[^a-z&&b]` == `[^[a-z&&b]]`. + +### Composites + +
+xy    concatenation (x followed by y)
+x|y   alternation (x or y, prefer x)
+
+ +This example shows how an alternation works, and what it means to prefer a +branch in the alternation over subsequent branches. + +``` +use regex::Regex; + +let haystack = "samwise"; +// If 'samwise' comes first in our alternation, then it is +// preferred as a match, even if the regex engine could +// technically detect that 'sam' led to a match earlier. +let re = Regex::new(r"samwise|sam").unwrap(); +assert_eq!("samwise", re.find(haystack).unwrap().as_str()); +// But if 'sam' comes first, then it will match instead. +// In this case, it is impossible for 'samwise' to match +// because 'sam' is a prefix of it. +let re = Regex::new(r"sam|samwise").unwrap(); +assert_eq!("sam", re.find(haystack).unwrap().as_str()); +``` + +### Repetitions + +
+x*        zero or more of x (greedy)
+x+        one or more of x (greedy)
+x?        zero or one of x (greedy)
+x*?       zero or more of x (ungreedy/lazy)
+x+?       one or more of x (ungreedy/lazy)
+x??       zero or one of x (ungreedy/lazy)
+x{n,m}    at least n x and at most m x (greedy)
+x{n,}     at least n x (greedy)
+x{n}      exactly n x
+x{n,m}?   at least n x and at most m x (ungreedy/lazy)
+x{n,}?    at least n x (ungreedy/lazy)
+x{n}?     exactly n x
+
+ +### Empty matches + +
+^               the beginning of a haystack (or start-of-line with multi-line mode)
+$               the end of a haystack (or end-of-line with multi-line mode)
+\A              only the beginning of a haystack (even with multi-line mode enabled)
+\z              only the end of a haystack (even with multi-line mode enabled)
+\b              a Unicode word boundary (\w on one side and \W, \A, or \z on other)
+\B              not a Unicode word boundary
+\b{start}, \<   a Unicode start-of-word boundary (\W|\A on the left, \w on the right)
+\b{end}, \>     a Unicode end-of-word boundary (\w on the left, \W|\z on the right))
+\b{start-half}  half of a Unicode start-of-word boundary (\W|\A on the left)
+\b{end-half}    half of a Unicode end-of-word boundary (\W|\z on the right)
+
+ +The empty regex is valid and matches the empty string. For example, the +empty regex matches `abc` at positions `0`, `1`, `2` and `3`. When using the +top-level [`Regex`] on `&str` haystacks, an empty match that splits a codepoint +is guaranteed to never be returned. However, such matches are permitted when +using a [`bytes::Regex`]. For example: + +```rust +let re = regex::Regex::new(r"").unwrap(); +let ranges: Vec<_> = re.find_iter("💩").map(|m| m.range()).collect(); +assert_eq!(ranges, vec![0..0, 4..4]); + +let re = regex::bytes::Regex::new(r"").unwrap(); +let ranges: Vec<_> = re.find_iter("💩".as_bytes()).map(|m| m.range()).collect(); +assert_eq!(ranges, vec![0..0, 1..1, 2..2, 3..3, 4..4]); +``` + +Note that an empty regex is distinct from a regex that can never match. +For example, the regex `[a&&b]` is a character class that represents the +intersection of `a` and `b`. That intersection is empty, which means the +character class is empty. Since nothing is in the empty set, `[a&&b]` matches +nothing, not even the empty string. + +### Grouping and flags + +
+(exp)          numbered capture group (indexed by opening parenthesis)
+(?P<name>exp)  named (also numbered) capture group (names must be alpha-numeric)
+(?<name>exp)   named (also numbered) capture group (names must be alpha-numeric)
+(?:exp)        non-capturing group
+(?flags)       set flags within current group
+(?flags:exp)   set flags for exp (non-capturing)
+
+ +Capture group names must be any sequence of alpha-numeric Unicode codepoints, +in addition to `.`, `_`, `[` and `]`. Names must start with either an `_` or +an alphabetic codepoint. Alphabetic codepoints correspond to the `Alphabetic` +Unicode property, while numeric codepoints correspond to the union of the +`Decimal_Number`, `Letter_Number` and `Other_Number` general categories. + +Flags are each a single character. For example, `(?x)` sets the flag `x` +and `(?-x)` clears the flag `x`. Multiple flags can be set or cleared at +the same time: `(?xy)` sets both the `x` and `y` flags and `(?x-y)` sets +the `x` flag and clears the `y` flag. + +All flags are by default disabled unless stated otherwise. They are: + +
+i     case-insensitive: letters match both upper and lower case
+m     multi-line mode: ^ and $ match begin/end of line
+s     allow . to match \n
+R     enables CRLF mode: when multi-line mode is enabled, \r\n is used
+U     swap the meaning of x* and x*?
+u     Unicode support (enabled by default)
+x     verbose mode, ignores whitespace and allow line comments (starting with `#`)
+
+ +Note that in verbose mode, whitespace is ignored everywhere, including within +character classes. To insert whitespace, use its escaped form or a hex literal. +For example, `\ ` or `\x20` for an ASCII space. + +Flags can be toggled within a pattern. Here's an example that matches +case-insensitively for the first part but case-sensitively for the second part: + +```rust +use regex::Regex; + +let re = Regex::new(r"(?i)a+(?-i)b+").unwrap(); +let m = re.find("AaAaAbbBBBb").unwrap(); +assert_eq!(m.as_str(), "AaAaAbb"); +``` + +Notice that the `a+` matches either `a` or `A`, but the `b+` only matches +`b`. + +Multi-line mode means `^` and `$` no longer match just at the beginning/end of +the input, but also at the beginning/end of lines: + +``` +use regex::Regex; + +let re = Regex::new(r"(?m)^line \d+").unwrap(); +let m = re.find("line one\nline 2\n").unwrap(); +assert_eq!(m.as_str(), "line 2"); +``` + +Note that `^` matches after new lines, even at the end of input: + +``` +use regex::Regex; + +let re = Regex::new(r"(?m)^").unwrap(); +let m = re.find_iter("test\n").last().unwrap(); +assert_eq!((m.start(), m.end()), (5, 5)); +``` + +When both CRLF mode and multi-line mode are enabled, then `^` and `$` will +match either `\r` or `\n`, but never in the middle of a `\r\n`: + +``` +use regex::Regex; + +let re = Regex::new(r"(?mR)^foo$").unwrap(); +let m = re.find("\r\nfoo\r\n").unwrap(); +assert_eq!(m.as_str(), "foo"); +``` + +Unicode mode can also be selectively disabled, although only when the result +*would not* match invalid UTF-8. One good example of this is using an ASCII +word boundary instead of a Unicode word boundary, which might make some regex +searches run faster: + +```rust +use regex::Regex; + +let re = Regex::new(r"(?-u:\b).+(?-u:\b)").unwrap(); +let m = re.find("$$abc$$").unwrap(); +assert_eq!(m.as_str(), "abc"); +``` + +### Escape sequences + +Note that this includes all possible escape sequences, even ones that are +documented elsewhere. + +
+\*              literal *, applies to all ASCII except [0-9A-Za-z<>]
+\a              bell (\x07)
+\f              form feed (\x0C)
+\t              horizontal tab
+\n              new line
+\r              carriage return
+\v              vertical tab (\x0B)
+\A              matches at the beginning of a haystack
+\z              matches at the end of a haystack
+\b              word boundary assertion
+\B              negated word boundary assertion
+\b{start}, \<   start-of-word boundary assertion
+\b{end}, \>     end-of-word boundary assertion
+\b{start-half}  half of a start-of-word boundary assertion
+\b{end-half}    half of a end-of-word boundary assertion
+\123            octal character code, up to three digits (when enabled)
+\x7F            hex character code (exactly two digits)
+\x{10FFFF}      any hex character code corresponding to a Unicode code point
+\u007F          hex character code (exactly four digits)
+\u{7F}          any hex character code corresponding to a Unicode code point
+\U0000007F      hex character code (exactly eight digits)
+\U{7F}          any hex character code corresponding to a Unicode code point
+\p{Letter}      Unicode character class
+\P{Letter}      negated Unicode character class
+\d, \s, \w      Perl character class
+\D, \S, \W      negated Perl character class
+
+ +### Perl character classes (Unicode friendly) + +These classes are based on the definitions provided in +[UTS#18](https://www.unicode.org/reports/tr18/#Compatibility_Properties): + +
+\d     digit (\p{Nd})
+\D     not digit
+\s     whitespace (\p{White_Space})
+\S     not whitespace
+\w     word character (\p{Alphabetic} + \p{M} + \d + \p{Pc} + \p{Join_Control})
+\W     not word character
+
+ +### ASCII character classes + +These classes are based on the definitions provided in +[UTS#18](https://www.unicode.org/reports/tr18/#Compatibility_Properties): + +
+[[:alnum:]]    alphanumeric ([0-9A-Za-z])
+[[:alpha:]]    alphabetic ([A-Za-z])
+[[:ascii:]]    ASCII ([\x00-\x7F])
+[[:blank:]]    blank ([\t ])
+[[:cntrl:]]    control ([\x00-\x1F\x7F])
+[[:digit:]]    digits ([0-9])
+[[:graph:]]    graphical ([!-~])
+[[:lower:]]    lower case ([a-z])
+[[:print:]]    printable ([ -~])
+[[:punct:]]    punctuation ([!-/:-@\[-`{-~])
+[[:space:]]    whitespace ([\t\n\v\f\r ])
+[[:upper:]]    upper case ([A-Z])
+[[:word:]]     word characters ([0-9A-Za-z_])
+[[:xdigit:]]   hex digit ([0-9A-Fa-f])
+
+ +# Untrusted input + +This crate is meant to be able to run regex searches on untrusted haystacks +without fear of [ReDoS]. This crate also, to a certain extent, supports +untrusted patterns. + +[ReDoS]: https://en.wikipedia.org/wiki/ReDoS + +This crate differs from most (but not all) other regex engines in that it +doesn't use unbounded backtracking to run a regex search. In those cases, +one generally cannot use untrusted patterns *or* untrusted haystacks because +it can be very difficult to know whether a particular pattern will result in +catastrophic backtracking or not. + +We'll first discuss how this crate deals with untrusted inputs and then wrap +it up with a realistic discussion about what practice really looks like. + +### Panics + +Outside of clearly documented cases, most APIs in this crate are intended to +never panic regardless of the inputs given to them. For example, `Regex::new`, +`Regex::is_match`, `Regex::find` and `Regex::captures` should never panic. That +is, it is an API promise that those APIs will never panic no matter what inputs +are given to them. With that said, regex engines are complicated beasts, and +providing a rock solid guarantee that these APIs literally never panic is +essentially equivalent to saying, "there are no bugs in this library." That is +a bold claim, and not really one that can be feasibly made with a straight +face. + +Don't get the wrong impression here. This crate is extensively tested, not just +with unit and integration tests, but also via fuzz testing. For example, this +crate is part of the [OSS-fuzz project]. Panics should be incredibly rare, but +it is possible for bugs to exist, and thus possible for a panic to occur. If +you need a rock solid guarantee against panics, then you should wrap calls into +this library with [`std::panic::catch_unwind`]. + +It's also worth pointing out that this library will *generally* panic when +other regex engines would commit undefined behavior. When undefined behavior +occurs, your program might continue as if nothing bad has happened, but it also +might mean your program is open to the worst kinds of exploits. In contrast, +the worst thing a panic can do is a denial of service. + +[OSS-fuzz project]: https://android.googlesource.com/platform/external/oss-fuzz/+/refs/tags/android-t-preview-1/projects/rust-regex/ +[`std::panic::catch_unwind`]: https://doc.rust-lang.org/std/panic/fn.catch_unwind.html + +### Untrusted patterns + +The principal way this crate deals with them is by limiting their size by +default. The size limit can be configured via [`RegexBuilder::size_limit`]. The +idea of a size limit is that compiling a pattern into a `Regex` will fail if it +becomes "too big." Namely, while *most* resources consumed by compiling a regex +are approximately proportional (albeit with some high constant factors in some +cases, such as with Unicode character classes) to the length of the pattern +itself, there is one particular exception to this: counted repetitions. Namely, +this pattern: + +```text +a{5}{5}{5}{5}{5}{5} +``` + +Is equivalent to this pattern: + +```text +a{15625} +``` + +In both of these cases, the actual pattern string is quite small, but the +resulting `Regex` value is quite large. Indeed, as the first pattern shows, +it isn't enough to locally limit the size of each repetition because they can +be stacked in a way that results in exponential growth. + +To provide a bit more context, a simplified view of regex compilation looks +like this: + +* The pattern string is parsed into a structured representation called an AST. +Counted repetitions are not expanded and Unicode character classes are not +looked up in this stage. That is, the size of the AST is proportional to the +size of the pattern with "reasonable" constant factors. In other words, one +can reasonably limit the memory used by an AST by limiting the length of the +pattern string. +* The AST is translated into an HIR. Counted repetitions are still *not* +expanded at this stage, but Unicode character classes are embedded into the +HIR. The memory usage of a HIR is still proportional to the length of the +original pattern string, but the constant factors---mostly as a result of +Unicode character classes---can be quite high. Still though, the memory used by +an HIR can be reasonably limited by limiting the length of the pattern string. +* The HIR is compiled into a [Thompson NFA]. This is the stage at which +something like `\w{5}` is rewritten to `\w\w\w\w\w`. Thus, this is the stage +at which [`RegexBuilder::size_limit`] is enforced. If the NFA exceeds the +configured size, then this stage will fail. + +[Thompson NFA]: https://en.wikipedia.org/wiki/Thompson%27s_construction + +The size limit helps avoid two different kinds of exorbitant resource usage: + +* It avoids permitting exponential memory usage based on the size of the +pattern string. +* It avoids long search times. This will be discussed in more detail in the +next section, but worst case search time *is* dependent on the size of the +regex. So keeping regexes limited to a reasonable size is also a way of keeping +search times reasonable. + +Finally, it's worth pointing out that regex compilation is guaranteed to take +worst case `O(m)` time, where `m` is proportional to the size of regex. The +size of the regex here is *after* the counted repetitions have been expanded. + +**Advice for those using untrusted regexes**: limit the pattern length to +something small and expand it as needed. Configure [`RegexBuilder::size_limit`] +to something small and then expand it as needed. + +### Untrusted haystacks + +The main way this crate guards against searches from taking a long time is by +using algorithms that guarantee a `O(m * n)` worst case time and space bound. +Namely: + +* `m` is proportional to the size of the regex, where the size of the regex +includes the expansion of all counted repetitions. (See the previous section on +untrusted patterns.) +* `n` is proportional to the length, in bytes, of the haystack. + +In other words, if you consider `m` to be a constant (for example, the regex +pattern is a literal in the source code), then the search can be said to run +in "linear time." Or equivalently, "linear time with respect to the size of the +haystack." + +But the `m` factor here is important not to ignore. If a regex is +particularly big, the search times can get quite slow. This is why, in part, +[`RegexBuilder::size_limit`] exists. + +**Advice for those searching untrusted haystacks**: As long as your regexes +are not enormous, you should expect to be able to search untrusted haystacks +without fear. If you aren't sure, you should benchmark it. Unlike backtracking +engines, if your regex is so big that it's likely to result in slow searches, +this is probably something you'll be able to observe regardless of what the +haystack is made up of. + +### Iterating over matches + +One thing that is perhaps easy to miss is that the worst case time +complexity bound of `O(m * n)` applies to methods like [`Regex::is_match`], +[`Regex::find`] and [`Regex::captures`]. It does **not** apply to +[`Regex::find_iter`] or [`Regex::captures_iter`]. Namely, since iterating over +all matches can execute many searches, and each search can scan the entire +haystack, the worst case time complexity for iterators is `O(m * n^2)`. + +One example of where this occurs is when a pattern consists of an alternation, +where an earlier branch of the alternation requires scanning the entire +haystack only to discover that there is no match. It also requires a later +branch of the alternation to have matched at the beginning of the search. For +example, consider the pattern `.*[^A-Z]|[A-Z]` and the haystack `AAAAA`. The +first search will scan to the end looking for matches of `.*[^A-Z]` even though +a finite automata engine (as in this crate) knows that `[A-Z]` has already +matched the first character of the haystack. This is due to the greedy nature +of regex searching. That first search will report a match at the first `A` only +after scanning to the end to discover that no other match exists. The next +search then begins at the second `A` and the behavior repeats. + +There is no way to avoid this. This means that if both patterns and haystacks +are untrusted and you're iterating over all matches, you're susceptible to +worst case quadratic time complexity. One possible way to mitigate this +is to drop down to the lower level `regex-automata` crate and use its +`meta::Regex` iterator APIs. There, you can configure the search to operate +in "earliest" mode by passing a `Input::new(haystack).earliest(true)` to +`meta::Regex::find_iter` (for example). By enabling this mode, you give up +the normal greedy match semantics of regex searches and instead ask the regex +engine to immediately stop as soon as a match has been found. Enabling this +mode will thus restore the worst case `O(m * n)` time complexity bound, but at +the cost of different semantics. + +### Untrusted inputs in practice + +While providing a `O(m * n)` worst case time bound on all searches goes a long +way toward preventing [ReDoS], that doesn't mean every search you can possibly +run will complete without burning CPU time. In general, there are a few ways +for the `m * n` time bound to still bite you: + +* You are searching an exceptionally long haystack. No matter how you slice +it, a longer haystack will take more time to search. This crate may often make +very quick work of even long haystacks because of its literal optimizations, +but those aren't available for all regexes. +* Unicode character classes can cause searches to be quite slow in some cases. +This is especially true when they are combined with counted repetitions. While +the regex size limit above will protect you from the most egregious cases, +the default size limit still permits pretty big regexes that can execute more +slowly than one might expect. +* While routines like [`Regex::find`] and [`Regex::captures`] guarantee +worst case `O(m * n)` search time, routines like [`Regex::find_iter`] and +[`Regex::captures_iter`] actually have worst case `O(m * n^2)` search time. +This is because `find_iter` runs many searches, and each search takes worst +case `O(m * n)` time. Thus, iteration of all matches in a haystack has +worst case `O(m * n^2)`. A good example of a pattern that exhibits this is +`(?:A+){1000}|` or even `.*[^A-Z]|[A-Z]`. + +In general, untrusted haystacks are easier to stomach than untrusted patterns. +Untrusted patterns give a lot more control to the caller to impact the +performance of a search. In many cases, a regex search will actually execute in +average case `O(n)` time (i.e., not dependent on the size of the regex), but +this can't be guaranteed in general. Therefore, permitting untrusted patterns +means that your only line of defense is to put a limit on how big `m` (and +perhaps also `n`) can be in `O(m * n)`. `n` is limited by simply inspecting +the length of the haystack while `m` is limited by *both* applying a limit to +the length of the pattern *and* a limit on the compiled size of the regex via +[`RegexBuilder::size_limit`]. + +It bears repeating: if you're accepting untrusted patterns, it would be a good +idea to start with conservative limits on `m` and `n`, and then carefully +increase them as needed. + +# Crate features + +By default, this crate tries pretty hard to make regex matching both as fast +as possible and as correct as it can be. This means that there is a lot of +code dedicated to performance, the handling of Unicode data and the Unicode +data itself. Overall, this leads to more dependencies, larger binaries and +longer compile times. This trade off may not be appropriate in all cases, and +indeed, even when all Unicode and performance features are disabled, one is +still left with a perfectly serviceable regex engine that will work well in +many cases. (Note that code is not arbitrarily reducible, and for this reason, +the [`regex-lite`](https://docs.rs/regex-lite) crate exists to provide an even +more minimal experience by cutting out Unicode and performance, but still +maintaining the linear search time bound.) + +This crate exposes a number of features for controlling that trade off. Some +of these features are strictly performance oriented, such that disabling them +won't result in a loss of functionality, but may result in worse performance. +Other features, such as the ones controlling the presence or absence of Unicode +data, can result in a loss of functionality. For example, if one disables the +`unicode-case` feature (described below), then compiling the regex `(?i)a` +will fail since Unicode case insensitivity is enabled by default. Instead, +callers must use `(?i-u)a` to disable Unicode case folding. Stated differently, +enabling or disabling any of the features below can only add or subtract from +the total set of valid regular expressions. Enabling or disabling a feature +will never modify the match semantics of a regular expression. + +Most features below are enabled by default. Features that aren't enabled by +default are noted. + +### Ecosystem features + +* **std** - + When enabled, this will cause `regex` to use the standard library. In terms + of APIs, `std` causes error types to implement the `std::error::Error` + trait. Enabling `std` will also result in performance optimizations, + including SIMD and faster synchronization primitives. Notably, **disabling + the `std` feature will result in the use of spin locks**. To use a regex + engine without `std` and without spin locks, you'll need to drop down to + the [`regex-automata`](https://docs.rs/regex-automata) crate. +* **logging** - + When enabled, the `log` crate is used to emit messages about regex + compilation and search strategies. This is **disabled by default**. This is + typically only useful to someone working on this crate's internals, but might + be useful if you're doing some rabbit hole performance hacking. Or if you're + just interested in the kinds of decisions being made by the regex engine. + +### Performance features + +**Note**: + To get performance benefits offered by the SIMD, `std` must be enabled. + None of the `perf-*` features will enable `std` implicitly. + +* **perf** - + Enables all performance related features except for `perf-dfa-full`. This + feature is enabled by default is intended to cover all reasonable features + that improve performance, even if more are added in the future. +* **perf-dfa** - + Enables the use of a lazy DFA for matching. The lazy DFA is used to compile + portions of a regex to a very fast DFA on an as-needed basis. This can + result in substantial speedups, usually by an order of magnitude on large + haystacks. The lazy DFA does not bring in any new dependencies, but it can + make compile times longer. +* **perf-dfa-full** - + Enables the use of a full DFA for matching. Full DFAs are problematic because + they have worst case `O(2^n)` construction time. For this reason, when this + feature is enabled, full DFAs are only used for very small regexes and a + very small space bound is used during determinization to avoid the DFA + from blowing up. This feature is not enabled by default, even as part of + `perf`, because it results in fairly sizeable increases in binary size and + compilation time. It can result in faster search times, but they tend to be + more modest and limited to non-Unicode regexes. +* **perf-onepass** - + Enables the use of a one-pass DFA for extracting the positions of capture + groups. This optimization applies to a subset of certain types of NFAs and + represents the fastest engine in this crate for dealing with capture groups. +* **perf-backtrack** - + Enables the use of a bounded backtracking algorithm for extracting the + positions of capture groups. This usually sits between the slowest engine + (the PikeVM) and the fastest engine (one-pass DFA) for extracting capture + groups. It's used whenever the regex is not one-pass and is small enough. +* **perf-inline** - + Enables the use of aggressive inlining inside match routines. This reduces + the overhead of each match. The aggressive inlining, however, increases + compile times and binary size. +* **perf-literal** - + Enables the use of literal optimizations for speeding up matches. In some + cases, literal optimizations can result in speedups of _several_ orders of + magnitude. Disabling this drops the `aho-corasick` and `memchr` dependencies. +* **perf-cache** - + This feature used to enable a faster internal cache at the cost of using + additional dependencies, but this is no longer an option. A fast internal + cache is now used unconditionally with no additional dependencies. This may + change in the future. + +### Unicode features + +* **unicode** - + Enables all Unicode features. This feature is enabled by default, and will + always cover all Unicode features, even if more are added in the future. +* **unicode-age** - + Provide the data for the + [Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age). + This makes it possible to use classes like `\p{Age:6.0}` to refer to all + codepoints first introduced in Unicode 6.0 +* **unicode-bool** - + Provide the data for numerous Unicode boolean properties. The full list + is not included here, but contains properties like `Alphabetic`, `Emoji`, + `Lowercase`, `Math`, `Uppercase` and `White_Space`. +* **unicode-case** - + Provide the data for case insensitive matching using + [Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches). +* **unicode-gencat** - + Provide the data for + [Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values). + This includes, but is not limited to, `Decimal_Number`, `Letter`, + `Math_Symbol`, `Number` and `Punctuation`. +* **unicode-perl** - + Provide the data for supporting the Unicode-aware Perl character classes, + corresponding to `\w`, `\s` and `\d`. This is also necessary for using + Unicode-aware word boundary assertions. Note that if this feature is + disabled, the `\s` and `\d` character classes are still available if the + `unicode-bool` and `unicode-gencat` features are enabled, respectively. +* **unicode-script** - + Provide the data for + [Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/). + This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`, + `Latin` and `Thai`. +* **unicode-segment** - + Provide the data necessary to provide the properties used to implement the + [Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/). + This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and + `\p{sb=ATerm}`. + +# Other crates + +This crate has two required dependencies and several optional dependencies. +This section briefly describes them with the goal of raising awareness of how +different components of this crate may be used independently. + +It is somewhat unusual for a regex engine to have dependencies, as most regex +libraries are self contained units with no dependencies other than a particular +environment's standard library. Indeed, for other similarly optimized regex +engines, most or all of the code in the dependencies of this crate would +normally just be inseparable or coupled parts of the crate itself. But since +Rust and its tooling ecosystem make the use of dependencies so easy, it made +sense to spend some effort de-coupling parts of this crate and making them +independently useful. + +We only briefly describe each crate here. + +* [`regex-lite`](https://docs.rs/regex-lite) is not a dependency of `regex`, +but rather, a standalone zero-dependency simpler version of `regex` that +prioritizes compile times and binary size. In exchange, it eschews Unicode +support and performance. Its match semantics are as identical as possible to +the `regex` crate, and for the things it supports, its APIs are identical to +the APIs in this crate. In other words, for a lot of use cases, it is a drop-in +replacement. +* [`regex-syntax`](https://docs.rs/regex-syntax) provides a regular expression +parser via `Ast` and `Hir` types. It also provides routines for extracting +literals from a pattern. Folks can use this crate to do analysis, or even to +build their own regex engine without having to worry about writing a parser. +* [`regex-automata`](https://docs.rs/regex-automata) provides the regex engines +themselves. One of the downsides of finite automata based regex engines is that +they often need multiple internal engines in order to have similar or better +performance than an unbounded backtracking engine in practice. `regex-automata` +in particular provides public APIs for a PikeVM, a bounded backtracker, a +one-pass DFA, a lazy DFA, a fully compiled DFA and a meta regex engine that +combines all them together. It also has native multi-pattern support and +provides a way to compile and serialize full DFAs such that they can be loaded +and searched in a no-std no-alloc environment. `regex-automata` itself doesn't +even have a required dependency on `regex-syntax`! +* [`memchr`](https://docs.rs/memchr) provides low level SIMD vectorized +routines for quickly finding the location of single bytes or even substrings +in a haystack. In other words, it provides fast `memchr` and `memmem` routines. +These are used by this crate in literal optimizations. +* [`aho-corasick`](https://docs.rs/aho-corasick) provides multi-substring +search. It also provides SIMD vectorized routines in the case where the number +of substrings to search for is relatively small. The `regex` crate also uses +this for literal optimizations. +*/ + +#![no_std] +#![deny(missing_docs)] +#![cfg_attr(feature = "pattern", feature(pattern))] +// This adds Cargo feature annotations to items in the rustdoc output. Which is +// sadly hugely beneficial for this crate due to the number of features. +#![cfg_attr(docsrs_regex, feature(doc_cfg))] +#![warn(missing_debug_implementations)] + +#[cfg(doctest)] +doc_comment::doctest!("../README.md"); + +extern crate alloc; +#[cfg(any(test, feature = "std"))] +extern crate std; + +pub use crate::error::Error; + +pub use crate::{builders::string::*, regex::string::*, regexset::string::*}; + +mod builders; +pub mod bytes; +mod error; +mod find_byte; +#[cfg(feature = "pattern")] +mod pattern; +mod regex; +mod regexset; + +/// Escapes all regular expression meta characters in `pattern`. +/// +/// The string returned may be safely used as a literal in a regular +/// expression. +pub fn escape(pattern: &str) -> alloc::string::String { + regex_syntax::escape(pattern) +} diff --git a/vendor/regex/src/pattern.rs b/vendor/regex/src/pattern.rs new file mode 100644 index 00000000000000..d7bf148d5de2fb --- /dev/null +++ b/vendor/regex/src/pattern.rs @@ -0,0 +1,67 @@ +use core::str::pattern::{Pattern, SearchStep, Searcher, Utf8Pattern}; + +use crate::{Matches, Regex}; + +#[derive(Debug)] +pub struct RegexSearcher<'r, 't> { + haystack: &'t str, + it: Matches<'r, 't>, + last_step_end: usize, + next_match: Option<(usize, usize)>, +} + +impl<'r> Pattern for &'r Regex { + type Searcher<'t> = RegexSearcher<'r, 't>; + + fn into_searcher<'t>(self, haystack: &'t str) -> RegexSearcher<'r, 't> { + RegexSearcher { + haystack, + it: self.find_iter(haystack), + last_step_end: 0, + next_match: None, + } + } + + fn as_utf8_pattern<'p>(&'p self) -> Option> { + None + } +} + +unsafe impl<'r, 't> Searcher<'t> for RegexSearcher<'r, 't> { + #[inline] + fn haystack(&self) -> &'t str { + self.haystack + } + + #[inline] + fn next(&mut self) -> SearchStep { + if let Some((s, e)) = self.next_match { + self.next_match = None; + self.last_step_end = e; + return SearchStep::Match(s, e); + } + match self.it.next() { + None => { + if self.last_step_end < self.haystack().len() { + let last = self.last_step_end; + self.last_step_end = self.haystack().len(); + SearchStep::Reject(last, self.haystack().len()) + } else { + SearchStep::Done + } + } + Some(m) => { + let (s, e) = (m.start(), m.end()); + if s == self.last_step_end { + self.last_step_end = e; + SearchStep::Match(s, e) + } else { + self.next_match = Some((s, e)); + let last = self.last_step_end; + self.last_step_end = s; + SearchStep::Reject(last, s) + } + } + } + } +} diff --git a/vendor/regex/src/regex/bytes.rs b/vendor/regex/src/regex/bytes.rs new file mode 100644 index 00000000000000..303e0cbc4a3519 --- /dev/null +++ b/vendor/regex/src/regex/bytes.rs @@ -0,0 +1,2722 @@ +use alloc::{borrow::Cow, string::String, sync::Arc, vec::Vec}; + +use regex_automata::{meta, util::captures, Input, PatternID}; + +use crate::{bytes::RegexBuilder, error::Error}; + +/// A compiled regular expression for searching Unicode haystacks. +/// +/// A `Regex` can be used to search haystacks, split haystacks into substrings +/// or replace substrings in a haystack with a different substring. All +/// searching is done with an implicit `(?s:.)*?` at the beginning and end of +/// an pattern. To force an expression to match the whole string (or a prefix +/// or a suffix), you must use an anchor like `^` or `$` (or `\A` and `\z`). +/// +/// Like the `Regex` type in the parent module, matches with this regex return +/// byte offsets into the haystack. **Unlike** the parent `Regex` type, these +/// byte offsets may not correspond to UTF-8 sequence boundaries since the +/// regexes in this module can match arbitrary bytes. +/// +/// The only methods that allocate new byte strings are the string replacement +/// methods. All other methods (searching and splitting) return borrowed +/// references into the haystack given. +/// +/// # Example +/// +/// Find the offsets of a US phone number: +/// +/// ``` +/// use regex::bytes::Regex; +/// +/// let re = Regex::new("[0-9]{3}-[0-9]{3}-[0-9]{4}").unwrap(); +/// let m = re.find(b"phone: 111-222-3333").unwrap(); +/// assert_eq!(7..19, m.range()); +/// ``` +/// +/// # Example: extracting capture groups +/// +/// A common way to use regexes is with capture groups. That is, instead of +/// just looking for matches of an entire regex, parentheses are used to create +/// groups that represent part of the match. +/// +/// For example, consider a haystack with multiple lines, and each line has +/// three whitespace delimited fields where the second field is expected to be +/// a number and the third field a boolean. To make this convenient, we use +/// the [`Captures::extract`] API to put the strings that match each group +/// into a fixed size array: +/// +/// ``` +/// use regex::bytes::Regex; +/// +/// let hay = b" +/// rabbit 54 true +/// groundhog 2 true +/// does not match +/// fox 109 false +/// "; +/// let re = Regex::new(r"(?m)^\s*(\S+)\s+([0-9]+)\s+(true|false)\s*$").unwrap(); +/// let mut fields: Vec<(&[u8], i64, bool)> = vec![]; +/// for (_, [f1, f2, f3]) in re.captures_iter(hay).map(|caps| caps.extract()) { +/// // These unwraps are OK because our pattern is written in a way where +/// // all matches for f2 and f3 will be valid UTF-8. +/// let f2 = std::str::from_utf8(f2).unwrap(); +/// let f3 = std::str::from_utf8(f3).unwrap(); +/// fields.push((f1, f2.parse()?, f3.parse()?)); +/// } +/// assert_eq!(fields, vec![ +/// (&b"rabbit"[..], 54, true), +/// (&b"groundhog"[..], 2, true), +/// (&b"fox"[..], 109, false), +/// ]); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// # Example: matching invalid UTF-8 +/// +/// One of the reasons for searching `&[u8]` haystacks is that the `&[u8]` +/// might not be valid UTF-8. Indeed, with a `bytes::Regex`, patterns that +/// match invalid UTF-8 are explicitly allowed. Here's one example that looks +/// for valid UTF-8 fields that might be separated by invalid UTF-8. In this +/// case, we use `(?s-u:.)`, which matches any byte. Attempting to use it in a +/// top-level `Regex` will result in the regex failing to compile. Notice also +/// that we use `.` with Unicode mode enabled, in which case, only valid UTF-8 +/// is matched. In this way, we can build one pattern where some parts only +/// match valid UTF-8 while other parts are more permissive. +/// +/// ``` +/// use regex::bytes::Regex; +/// +/// // F0 9F 92 A9 is the UTF-8 encoding for a Pile of Poo. +/// let hay = b"\xFF\xFFfoo\xFF\xFF\xFF\xF0\x9F\x92\xA9\xFF"; +/// // An equivalent to '(?s-u:.)' is '(?-u:[\x00-\xFF])'. +/// let re = Regex::new(r"(?s)(?-u:.)*?(?.+)(?-u:.)*?(?.+)").unwrap(); +/// let caps = re.captures(hay).unwrap(); +/// assert_eq!(&caps["f1"], &b"foo"[..]); +/// assert_eq!(&caps["f2"], "💩".as_bytes()); +/// ``` +#[derive(Clone)] +pub struct Regex { + pub(crate) meta: meta::Regex, + pub(crate) pattern: Arc, +} + +impl core::fmt::Display for Regex { + /// Shows the original regular expression. + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +impl core::fmt::Debug for Regex { + /// Shows the original regular expression. + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("Regex").field(&self.as_str()).finish() + } +} + +impl core::str::FromStr for Regex { + type Err = Error; + + /// Attempts to parse a string into a regular expression + fn from_str(s: &str) -> Result { + Regex::new(s) + } +} + +impl TryFrom<&str> for Regex { + type Error = Error; + + /// Attempts to parse a string into a regular expression + fn try_from(s: &str) -> Result { + Regex::new(s) + } +} + +impl TryFrom for Regex { + type Error = Error; + + /// Attempts to parse a string into a regular expression + fn try_from(s: String) -> Result { + Regex::new(&s) + } +} + +/// Core regular expression methods. +impl Regex { + /// Compiles a regular expression. Once compiled, it can be used repeatedly + /// to search, split or replace substrings in a haystack. + /// + /// Note that regex compilation tends to be a somewhat expensive process, + /// and unlike higher level environments, compilation is not automatically + /// cached for you. One should endeavor to compile a regex once and then + /// reuse it. For example, it's a bad idea to compile the same regex + /// repeatedly in a loop. + /// + /// # Errors + /// + /// If an invalid pattern is given, then an error is returned. + /// An error is also returned if the pattern is valid, but would + /// produce a regex that is bigger than the configured size limit via + /// [`RegexBuilder::size_limit`]. (A reasonable size limit is enabled by + /// default.) + /// + /// # Example + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// // An Invalid pattern because of an unclosed parenthesis + /// assert!(Regex::new(r"foo(bar").is_err()); + /// // An invalid pattern because the regex would be too big + /// // because Unicode tends to inflate things. + /// assert!(Regex::new(r"\w{1000}").is_err()); + /// // Disabling Unicode can make the regex much smaller, + /// // potentially by up to or more than an order of magnitude. + /// assert!(Regex::new(r"(?-u:\w){1000}").is_ok()); + /// ``` + pub fn new(re: &str) -> Result { + RegexBuilder::new(re).build() + } + + /// Returns true if and only if there is a match for the regex anywhere + /// in the haystack given. + /// + /// It is recommended to use this method if all you need to do is test + /// whether a match exists, since the underlying matching engine may be + /// able to do less work. + /// + /// # Example + /// + /// Test if some haystack contains at least one word with exactly 13 + /// Unicode word characters: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"\b\w{13}\b").unwrap(); + /// let hay = b"I categorically deny having triskaidekaphobia."; + /// assert!(re.is_match(hay)); + /// ``` + #[inline] + pub fn is_match(&self, haystack: &[u8]) -> bool { + self.is_match_at(haystack, 0) + } + + /// This routine searches for the first match of this regex in the + /// haystack given, and if found, returns a [`Match`]. The `Match` + /// provides access to both the byte offsets of the match and the actual + /// substring that matched. + /// + /// Note that this should only be used if you want to find the entire + /// match. If instead you just want to test the existence of a match, + /// it's potentially faster to use `Regex::is_match(hay)` instead of + /// `Regex::find(hay).is_some()`. + /// + /// # Example + /// + /// Find the first word with exactly 13 Unicode word characters: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"\b\w{13}\b").unwrap(); + /// let hay = b"I categorically deny having triskaidekaphobia."; + /// let mat = re.find(hay).unwrap(); + /// assert_eq!(2..15, mat.range()); + /// assert_eq!(b"categorically", mat.as_bytes()); + /// ``` + #[inline] + pub fn find<'h>(&self, haystack: &'h [u8]) -> Option> { + self.find_at(haystack, 0) + } + + /// Returns an iterator that yields successive non-overlapping matches in + /// the given haystack. The iterator yields values of type [`Match`]. + /// + /// # Time complexity + /// + /// Note that since `find_iter` runs potentially many searches on the + /// haystack and since each search has worst case `O(m * n)` time + /// complexity, the overall worst case time complexity for iteration is + /// `O(m * n^2)`. + /// + /// # Example + /// + /// Find every word with exactly 13 Unicode word characters: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"\b\w{13}\b").unwrap(); + /// let hay = b"Retroactively relinquishing remunerations is reprehensible."; + /// let matches: Vec<_> = re.find_iter(hay).map(|m| m.as_bytes()).collect(); + /// assert_eq!(matches, vec![ + /// &b"Retroactively"[..], + /// &b"relinquishing"[..], + /// &b"remunerations"[..], + /// &b"reprehensible"[..], + /// ]); + /// ``` + #[inline] + pub fn find_iter<'r, 'h>(&'r self, haystack: &'h [u8]) -> Matches<'r, 'h> { + Matches { haystack, it: self.meta.find_iter(haystack) } + } + + /// This routine searches for the first match of this regex in the haystack + /// given, and if found, returns not only the overall match but also the + /// matches of each capture group in the regex. If no match is found, then + /// `None` is returned. + /// + /// Capture group `0` always corresponds to an implicit unnamed group that + /// includes the entire match. If a match is found, this group is always + /// present. Subsequent groups may be named and are numbered, starting + /// at 1, by the order in which the opening parenthesis appears in the + /// pattern. For example, in the pattern `(?
.(?.))(?.)`, `a`, + /// `b` and `c` correspond to capture group indices `1`, `2` and `3`, + /// respectively. + /// + /// You should only use `captures` if you need access to the capture group + /// matches. Otherwise, [`Regex::find`] is generally faster for discovering + /// just the overall match. + /// + /// # Example + /// + /// Say you have some haystack with movie names and their release years, + /// like "'Citizen Kane' (1941)". It'd be nice if we could search for + /// strings looking like that, while also extracting the movie name and its + /// release year separately. The example below shows how to do that. + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"'([^']+)'\s+\((\d{4})\)").unwrap(); + /// let hay = b"Not my favorite movie: 'Citizen Kane' (1941)."; + /// let caps = re.captures(hay).unwrap(); + /// assert_eq!(caps.get(0).unwrap().as_bytes(), b"'Citizen Kane' (1941)"); + /// assert_eq!(caps.get(1).unwrap().as_bytes(), b"Citizen Kane"); + /// assert_eq!(caps.get(2).unwrap().as_bytes(), b"1941"); + /// // You can also access the groups by index using the Index notation. + /// // Note that this will panic on an invalid index. In this case, these + /// // accesses are always correct because the overall regex will only + /// // match when these capture groups match. + /// assert_eq!(&caps[0], b"'Citizen Kane' (1941)"); + /// assert_eq!(&caps[1], b"Citizen Kane"); + /// assert_eq!(&caps[2], b"1941"); + /// ``` + /// + /// Note that the full match is at capture group `0`. Each subsequent + /// capture group is indexed by the order of its opening `(`. + /// + /// We can make this example a bit clearer by using *named* capture groups: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"'(?[^']+)'\s+\((?<year>\d{4})\)").unwrap(); + /// let hay = b"Not my favorite movie: 'Citizen Kane' (1941)."; + /// let caps = re.captures(hay).unwrap(); + /// assert_eq!(caps.get(0).unwrap().as_bytes(), b"'Citizen Kane' (1941)"); + /// assert_eq!(caps.name("title").unwrap().as_bytes(), b"Citizen Kane"); + /// assert_eq!(caps.name("year").unwrap().as_bytes(), b"1941"); + /// // You can also access the groups by name using the Index notation. + /// // Note that this will panic on an invalid group name. In this case, + /// // these accesses are always correct because the overall regex will + /// // only match when these capture groups match. + /// assert_eq!(&caps[0], b"'Citizen Kane' (1941)"); + /// assert_eq!(&caps["title"], b"Citizen Kane"); + /// assert_eq!(&caps["year"], b"1941"); + /// ``` + /// + /// Here we name the capture groups, which we can access with the `name` + /// method or the `Index` notation with a `&str`. Note that the named + /// capture groups are still accessible with `get` or the `Index` notation + /// with a `usize`. + /// + /// The `0`th capture group is always unnamed, so it must always be + /// accessed with `get(0)` or `[0]`. + /// + /// Finally, one other way to get the matched substrings is with the + /// [`Captures::extract`] API: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"'([^']+)'\s+\((\d{4})\)").unwrap(); + /// let hay = b"Not my favorite movie: 'Citizen Kane' (1941)."; + /// let (full, [title, year]) = re.captures(hay).unwrap().extract(); + /// assert_eq!(full, b"'Citizen Kane' (1941)"); + /// assert_eq!(title, b"Citizen Kane"); + /// assert_eq!(year, b"1941"); + /// ``` + #[inline] + pub fn captures<'h>(&self, haystack: &'h [u8]) -> Option<Captures<'h>> { + self.captures_at(haystack, 0) + } + + /// Returns an iterator that yields successive non-overlapping matches in + /// the given haystack. The iterator yields values of type [`Captures`]. + /// + /// This is the same as [`Regex::find_iter`], but instead of only providing + /// access to the overall match, each value yield includes access to the + /// matches of all capture groups in the regex. Reporting this extra match + /// data is potentially costly, so callers should only use `captures_iter` + /// over `find_iter` when they actually need access to the capture group + /// matches. + /// + /// # Time complexity + /// + /// Note that since `captures_iter` runs potentially many searches on the + /// haystack and since each search has worst case `O(m * n)` time + /// complexity, the overall worst case time complexity for iteration is + /// `O(m * n^2)`. + /// + /// # Example + /// + /// We can use this to find all movie titles and their release years in + /// some haystack, where the movie is formatted like "'Title' (xxxx)": + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"'([^']+)'\s+\(([0-9]{4})\)").unwrap(); + /// let hay = b"'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931)."; + /// let mut movies = vec![]; + /// for (_, [title, year]) in re.captures_iter(hay).map(|c| c.extract()) { + /// // OK because [0-9]{4} can only match valid UTF-8. + /// let year = std::str::from_utf8(year).unwrap(); + /// movies.push((title, year.parse::<i64>()?)); + /// } + /// assert_eq!(movies, vec![ + /// (&b"Citizen Kane"[..], 1941), + /// (&b"The Wizard of Oz"[..], 1939), + /// (&b"M"[..], 1931), + /// ]); + /// # Ok::<(), Box<dyn std::error::Error>>(()) + /// ``` + /// + /// Or with named groups: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"'(?<title>[^']+)'\s+\((?<year>[0-9]{4})\)").unwrap(); + /// let hay = b"'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931)."; + /// let mut it = re.captures_iter(hay); + /// + /// let caps = it.next().unwrap(); + /// assert_eq!(&caps["title"], b"Citizen Kane"); + /// assert_eq!(&caps["year"], b"1941"); + /// + /// let caps = it.next().unwrap(); + /// assert_eq!(&caps["title"], b"The Wizard of Oz"); + /// assert_eq!(&caps["year"], b"1939"); + /// + /// let caps = it.next().unwrap(); + /// assert_eq!(&caps["title"], b"M"); + /// assert_eq!(&caps["year"], b"1931"); + /// ``` + #[inline] + pub fn captures_iter<'r, 'h>( + &'r self, + haystack: &'h [u8], + ) -> CaptureMatches<'r, 'h> { + CaptureMatches { haystack, it: self.meta.captures_iter(haystack) } + } + + /// Returns an iterator of substrings of the haystack given, delimited by a + /// match of the regex. Namely, each element of the iterator corresponds to + /// a part of the haystack that *isn't* matched by the regular expression. + /// + /// # Time complexity + /// + /// Since iterators over all matches requires running potentially many + /// searches on the haystack, and since each search has worst case + /// `O(m * n)` time complexity, the overall worst case time complexity for + /// this routine is `O(m * n^2)`. + /// + /// # Example + /// + /// To split a string delimited by arbitrary amounts of spaces or tabs: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"[ \t]+").unwrap(); + /// let hay = b"a b \t c\td e"; + /// let fields: Vec<&[u8]> = re.split(hay).collect(); + /// assert_eq!(fields, vec![ + /// &b"a"[..], &b"b"[..], &b"c"[..], &b"d"[..], &b"e"[..], + /// ]); + /// ``` + /// + /// # Example: more cases + /// + /// Basic usage: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r" ").unwrap(); + /// let hay = b"Mary had a little lamb"; + /// let got: Vec<&[u8]> = re.split(hay).collect(); + /// assert_eq!(got, vec![ + /// &b"Mary"[..], &b"had"[..], &b"a"[..], &b"little"[..], &b"lamb"[..], + /// ]); + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = b""; + /// let got: Vec<&[u8]> = re.split(hay).collect(); + /// assert_eq!(got, vec![&b""[..]]); + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = b"lionXXtigerXleopard"; + /// let got: Vec<&[u8]> = re.split(hay).collect(); + /// assert_eq!(got, vec![ + /// &b"lion"[..], &b""[..], &b"tiger"[..], &b"leopard"[..], + /// ]); + /// + /// let re = Regex::new(r"::").unwrap(); + /// let hay = b"lion::tiger::leopard"; + /// let got: Vec<&[u8]> = re.split(hay).collect(); + /// assert_eq!(got, vec![&b"lion"[..], &b"tiger"[..], &b"leopard"[..]]); + /// ``` + /// + /// If a haystack contains multiple contiguous matches, you will end up + /// with empty spans yielded by the iterator: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = b"XXXXaXXbXc"; + /// let got: Vec<&[u8]> = re.split(hay).collect(); + /// assert_eq!(got, vec![ + /// &b""[..], &b""[..], &b""[..], &b""[..], + /// &b"a"[..], &b""[..], &b"b"[..], &b"c"[..], + /// ]); + /// + /// let re = Regex::new(r"/").unwrap(); + /// let hay = b"(///)"; + /// let got: Vec<&[u8]> = re.split(hay).collect(); + /// assert_eq!(got, vec![&b"("[..], &b""[..], &b""[..], &b")"[..]]); + /// ``` + /// + /// Separators at the start or end of a haystack are neighbored by empty + /// substring. + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"0").unwrap(); + /// let hay = b"010"; + /// let got: Vec<&[u8]> = re.split(hay).collect(); + /// assert_eq!(got, vec![&b""[..], &b"1"[..], &b""[..]]); + /// ``` + /// + /// When the regex can match the empty string, it splits at every byte + /// position in the haystack. This includes between all UTF-8 code units. + /// (The top-level [`Regex::split`](crate::Regex::split) will only split + /// at valid UTF-8 boundaries.) + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"").unwrap(); + /// let hay = "☃".as_bytes(); + /// let got: Vec<&[u8]> = re.split(hay).collect(); + /// assert_eq!(got, vec![ + /// &[][..], &[b'\xE2'][..], &[b'\x98'][..], &[b'\x83'][..], &[][..], + /// ]); + /// ``` + /// + /// Contiguous separators (commonly shows up with whitespace), can lead to + /// possibly surprising behavior. For example, this code is correct: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r" ").unwrap(); + /// let hay = b" a b c"; + /// let got: Vec<&[u8]> = re.split(hay).collect(); + /// assert_eq!(got, vec![ + /// &b""[..], &b""[..], &b""[..], &b""[..], + /// &b"a"[..], &b""[..], &b"b"[..], &b"c"[..], + /// ]); + /// ``` + /// + /// It does *not* give you `["a", "b", "c"]`. For that behavior, you'd want + /// to match contiguous space characters: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r" +").unwrap(); + /// let hay = b" a b c"; + /// let got: Vec<&[u8]> = re.split(hay).collect(); + /// // N.B. This does still include a leading empty span because ' +' + /// // matches at the beginning of the haystack. + /// assert_eq!(got, vec![&b""[..], &b"a"[..], &b"b"[..], &b"c"[..]]); + /// ``` + #[inline] + pub fn split<'r, 'h>(&'r self, haystack: &'h [u8]) -> Split<'r, 'h> { + Split { haystack, it: self.meta.split(haystack) } + } + + /// Returns an iterator of at most `limit` substrings of the haystack + /// given, delimited by a match of the regex. (A `limit` of `0` will return + /// no substrings.) Namely, each element of the iterator corresponds to a + /// part of the haystack that *isn't* matched by the regular expression. + /// The remainder of the haystack that is not split will be the last + /// element in the iterator. + /// + /// # Time complexity + /// + /// Since iterators over all matches requires running potentially many + /// searches on the haystack, and since each search has worst case + /// `O(m * n)` time complexity, the overall worst case time complexity for + /// this routine is `O(m * n^2)`. + /// + /// Although note that the worst case time here has an upper bound given + /// by the `limit` parameter. + /// + /// # Example + /// + /// Get the first two words in some haystack: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"\W+").unwrap(); + /// let hay = b"Hey! How are you?"; + /// let fields: Vec<&[u8]> = re.splitn(hay, 3).collect(); + /// assert_eq!(fields, vec![&b"Hey"[..], &b"How"[..], &b"are you?"[..]]); + /// ``` + /// + /// # Examples: more cases + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r" ").unwrap(); + /// let hay = b"Mary had a little lamb"; + /// let got: Vec<&[u8]> = re.splitn(hay, 3).collect(); + /// assert_eq!(got, vec![&b"Mary"[..], &b"had"[..], &b"a little lamb"[..]]); + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = b""; + /// let got: Vec<&[u8]> = re.splitn(hay, 3).collect(); + /// assert_eq!(got, vec![&b""[..]]); + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = b"lionXXtigerXleopard"; + /// let got: Vec<&[u8]> = re.splitn(hay, 3).collect(); + /// assert_eq!(got, vec![&b"lion"[..], &b""[..], &b"tigerXleopard"[..]]); + /// + /// let re = Regex::new(r"::").unwrap(); + /// let hay = b"lion::tiger::leopard"; + /// let got: Vec<&[u8]> = re.splitn(hay, 2).collect(); + /// assert_eq!(got, vec![&b"lion"[..], &b"tiger::leopard"[..]]); + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = b"abcXdef"; + /// let got: Vec<&[u8]> = re.splitn(hay, 1).collect(); + /// assert_eq!(got, vec![&b"abcXdef"[..]]); + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = b"abcdef"; + /// let got: Vec<&[u8]> = re.splitn(hay, 2).collect(); + /// assert_eq!(got, vec![&b"abcdef"[..]]); + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = b"abcXdef"; + /// let got: Vec<&[u8]> = re.splitn(hay, 0).collect(); + /// assert!(got.is_empty()); + /// ``` + #[inline] + pub fn splitn<'r, 'h>( + &'r self, + haystack: &'h [u8], + limit: usize, + ) -> SplitN<'r, 'h> { + SplitN { haystack, it: self.meta.splitn(haystack, limit) } + } + + /// Replaces the leftmost-first match in the given haystack with the + /// replacement provided. The replacement can be a regular string (where + /// `$N` and `$name` are expanded to match capture groups) or a function + /// that takes a [`Captures`] and returns the replaced string. + /// + /// If no match is found, then the haystack is returned unchanged. In that + /// case, this implementation will likely return a `Cow::Borrowed` value + /// such that no allocation is performed. + /// + /// When a `Cow::Borrowed` is returned, the value returned is guaranteed + /// to be equivalent to the `haystack` given. + /// + /// # Replacement string syntax + /// + /// All instances of `$ref` in the replacement string are replaced with + /// the substring corresponding to the capture group identified by `ref`. + /// + /// `ref` may be an integer corresponding to the index of the capture group + /// (counted by order of opening parenthesis where `0` is the entire match) + /// or it can be a name (consisting of letters, digits or underscores) + /// corresponding to a named capture group. + /// + /// If `ref` isn't a valid capture group (whether the name doesn't exist or + /// isn't a valid index), then it is replaced with the empty string. + /// + /// The longest possible name is used. For example, `$1a` looks up the + /// capture group named `1a` and not the capture group at index `1`. To + /// exert more precise control over the name, use braces, e.g., `${1}a`. + /// + /// To write a literal `$` use `$$`. + /// + /// # Example + /// + /// Note that this function is polymorphic with respect to the replacement. + /// In typical usage, this can just be a normal string: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"[^01]+").unwrap(); + /// assert_eq!(re.replace(b"1078910", b""), &b"1010"[..]); + /// ``` + /// + /// But anything satisfying the [`Replacer`] trait will work. For example, + /// a closure of type `|&Captures| -> String` provides direct access to the + /// captures corresponding to a match. This allows one to access capturing + /// group matches easily: + /// + /// ``` + /// use regex::bytes::{Captures, Regex}; + /// + /// let re = Regex::new(r"([^,\s]+),\s+(\S+)").unwrap(); + /// let result = re.replace(b"Springsteen, Bruce", |caps: &Captures| { + /// let mut buf = vec![]; + /// buf.extend_from_slice(&caps[2]); + /// buf.push(b' '); + /// buf.extend_from_slice(&caps[1]); + /// buf + /// }); + /// assert_eq!(result, &b"Bruce Springsteen"[..]); + /// ``` + /// + /// But this is a bit cumbersome to use all the time. Instead, a simple + /// syntax is supported (as described above) that expands `$name` into the + /// corresponding capture group. Here's the last example, but using this + /// expansion technique with named capture groups: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(?<first>\S+)").unwrap(); + /// let result = re.replace(b"Springsteen, Bruce", b"$first $last"); + /// assert_eq!(result, &b"Bruce Springsteen"[..]); + /// ``` + /// + /// Note that using `$2` instead of `$first` or `$1` instead of `$last` + /// would produce the same result. To write a literal `$` use `$$`. + /// + /// Sometimes the replacement string requires use of curly braces to + /// delineate a capture group replacement when it is adjacent to some other + /// literal text. For example, if we wanted to join two words together with + /// an underscore: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"(?<first>\w+)\s+(?<second>\w+)").unwrap(); + /// let result = re.replace(b"deep fried", b"${first}_$second"); + /// assert_eq!(result, &b"deep_fried"[..]); + /// ``` + /// + /// Without the curly braces, the capture group name `first_` would be + /// used, and since it doesn't exist, it would be replaced with the empty + /// string. + /// + /// Finally, sometimes you just want to replace a literal string with no + /// regard for capturing group expansion. This can be done by wrapping a + /// string with [`NoExpand`]: + /// + /// ``` + /// use regex::bytes::{NoExpand, Regex}; + /// + /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(\S+)").unwrap(); + /// let result = re.replace(b"Springsteen, Bruce", NoExpand(b"$2 $last")); + /// assert_eq!(result, &b"$2 $last"[..]); + /// ``` + /// + /// Using `NoExpand` may also be faster, since the replacement string won't + /// need to be parsed for the `$` syntax. + #[inline] + pub fn replace<'h, R: Replacer>( + &self, + haystack: &'h [u8], + rep: R, + ) -> Cow<'h, [u8]> { + self.replacen(haystack, 1, rep) + } + + /// Replaces all non-overlapping matches in the haystack with the + /// replacement provided. This is the same as calling `replacen` with + /// `limit` set to `0`. + /// + /// If no match is found, then the haystack is returned unchanged. In that + /// case, this implementation will likely return a `Cow::Borrowed` value + /// such that no allocation is performed. + /// + /// When a `Cow::Borrowed` is returned, the value returned is guaranteed + /// to be equivalent to the `haystack` given. + /// + /// The documentation for [`Regex::replace`] goes into more detail about + /// what kinds of replacement strings are supported. + /// + /// # Time complexity + /// + /// Since iterators over all matches requires running potentially many + /// searches on the haystack, and since each search has worst case + /// `O(m * n)` time complexity, the overall worst case time complexity for + /// this routine is `O(m * n^2)`. + /// + /// # Fallibility + /// + /// If you need to write a replacement routine where any individual + /// replacement might "fail," doing so with this API isn't really feasible + /// because there's no way to stop the search process if a replacement + /// fails. Instead, if you need this functionality, you should consider + /// implementing your own replacement routine: + /// + /// ``` + /// use regex::bytes::{Captures, Regex}; + /// + /// fn replace_all<E>( + /// re: &Regex, + /// haystack: &[u8], + /// replacement: impl Fn(&Captures) -> Result<Vec<u8>, E>, + /// ) -> Result<Vec<u8>, E> { + /// let mut new = Vec::with_capacity(haystack.len()); + /// let mut last_match = 0; + /// for caps in re.captures_iter(haystack) { + /// let m = caps.get(0).unwrap(); + /// new.extend_from_slice(&haystack[last_match..m.start()]); + /// new.extend_from_slice(&replacement(&caps)?); + /// last_match = m.end(); + /// } + /// new.extend_from_slice(&haystack[last_match..]); + /// Ok(new) + /// } + /// + /// // Let's replace each word with the number of bytes in that word. + /// // But if we see a word that is "too long," we'll give up. + /// let re = Regex::new(r"\w+").unwrap(); + /// let replacement = |caps: &Captures| -> Result<Vec<u8>, &'static str> { + /// if caps[0].len() >= 5 { + /// return Err("word too long"); + /// } + /// Ok(caps[0].len().to_string().into_bytes()) + /// }; + /// assert_eq!( + /// Ok(b"2 3 3 3?".to_vec()), + /// replace_all(&re, b"hi how are you?", &replacement), + /// ); + /// assert!(replace_all(&re, b"hi there", &replacement).is_err()); + /// ``` + /// + /// # Example + /// + /// This example shows how to flip the order of whitespace (excluding line + /// terminators) delimited fields, and normalizes the whitespace that + /// delimits the fields: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"(?m)^(\S+)[\s--\r\n]+(\S+)$").unwrap(); + /// let hay = b" + /// Greetings 1973 + /// Wild\t1973 + /// BornToRun\t\t\t\t1975 + /// Darkness 1978 + /// TheRiver 1980 + /// "; + /// let new = re.replace_all(hay, b"$2 $1"); + /// assert_eq!(new, &b" + /// 1973 Greetings + /// 1973 Wild + /// 1975 BornToRun + /// 1978 Darkness + /// 1980 TheRiver + /// "[..]); + /// ``` + #[inline] + pub fn replace_all<'h, R: Replacer>( + &self, + haystack: &'h [u8], + rep: R, + ) -> Cow<'h, [u8]> { + self.replacen(haystack, 0, rep) + } + + /// Replaces at most `limit` non-overlapping matches in the haystack with + /// the replacement provided. If `limit` is `0`, then all non-overlapping + /// matches are replaced. That is, `Regex::replace_all(hay, rep)` is + /// equivalent to `Regex::replacen(hay, 0, rep)`. + /// + /// If no match is found, then the haystack is returned unchanged. In that + /// case, this implementation will likely return a `Cow::Borrowed` value + /// such that no allocation is performed. + /// + /// When a `Cow::Borrowed` is returned, the value returned is guaranteed + /// to be equivalent to the `haystack` given. + /// + /// The documentation for [`Regex::replace`] goes into more detail about + /// what kinds of replacement strings are supported. + /// + /// # Time complexity + /// + /// Since iterators over all matches requires running potentially many + /// searches on the haystack, and since each search has worst case + /// `O(m * n)` time complexity, the overall worst case time complexity for + /// this routine is `O(m * n^2)`. + /// + /// Although note that the worst case time here has an upper bound given + /// by the `limit` parameter. + /// + /// # Fallibility + /// + /// See the corresponding section in the docs for [`Regex::replace_all`] + /// for tips on how to deal with a replacement routine that can fail. + /// + /// # Example + /// + /// This example shows how to flip the order of whitespace (excluding line + /// terminators) delimited fields, and normalizes the whitespace that + /// delimits the fields. But we only do it for the first two matches. + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"(?m)^(\S+)[\s--\r\n]+(\S+)$").unwrap(); + /// let hay = b" + /// Greetings 1973 + /// Wild\t1973 + /// BornToRun\t\t\t\t1975 + /// Darkness 1978 + /// TheRiver 1980 + /// "; + /// let new = re.replacen(hay, 2, b"$2 $1"); + /// assert_eq!(new, &b" + /// 1973 Greetings + /// 1973 Wild + /// BornToRun\t\t\t\t1975 + /// Darkness 1978 + /// TheRiver 1980 + /// "[..]); + /// ``` + #[inline] + pub fn replacen<'h, R: Replacer>( + &self, + haystack: &'h [u8], + limit: usize, + mut rep: R, + ) -> Cow<'h, [u8]> { + // If we know that the replacement doesn't have any capture expansions, + // then we can use the fast path. The fast path can make a tremendous + // difference: + // + // 1) We use `find_iter` instead of `captures_iter`. Not asking for + // captures generally makes the regex engines faster. + // 2) We don't need to look up all of the capture groups and do + // replacements inside the replacement string. We just push it + // at each match and be done with it. + if let Some(rep) = rep.no_expansion() { + let mut it = self.find_iter(haystack).enumerate().peekable(); + if it.peek().is_none() { + return Cow::Borrowed(haystack); + } + let mut new = Vec::with_capacity(haystack.len()); + let mut last_match = 0; + for (i, m) in it { + new.extend_from_slice(&haystack[last_match..m.start()]); + new.extend_from_slice(&rep); + last_match = m.end(); + if limit > 0 && i >= limit - 1 { + break; + } + } + new.extend_from_slice(&haystack[last_match..]); + return Cow::Owned(new); + } + + // The slower path, which we use if the replacement needs access to + // capture groups. + let mut it = self.captures_iter(haystack).enumerate().peekable(); + if it.peek().is_none() { + return Cow::Borrowed(haystack); + } + let mut new = Vec::with_capacity(haystack.len()); + let mut last_match = 0; + for (i, cap) in it { + // unwrap on 0 is OK because captures only reports matches + let m = cap.get(0).unwrap(); + new.extend_from_slice(&haystack[last_match..m.start()]); + rep.replace_append(&cap, &mut new); + last_match = m.end(); + if limit > 0 && i >= limit - 1 { + break; + } + } + new.extend_from_slice(&haystack[last_match..]); + Cow::Owned(new) + } +} + +/// A group of advanced or "lower level" search methods. Some methods permit +/// starting the search at a position greater than `0` in the haystack. Other +/// methods permit reusing allocations, for example, when extracting the +/// matches for capture groups. +impl Regex { + /// Returns the end byte offset of the first match in the haystack given. + /// + /// This method may have the same performance characteristics as + /// `is_match`. Behaviorally, it doesn't just report whether it match + /// occurs, but also the end offset for a match. In particular, the offset + /// returned *may be shorter* than the proper end of the leftmost-first + /// match that you would find via [`Regex::find`]. + /// + /// Note that it is not guaranteed that this routine finds the shortest or + /// "earliest" possible match. Instead, the main idea of this API is that + /// it returns the offset at the point at which the internal regex engine + /// has determined that a match has occurred. This may vary depending on + /// which internal regex engine is used, and thus, the offset itself may + /// change based on internal heuristics. + /// + /// # Example + /// + /// Typically, `a+` would match the entire first sequence of `a` in some + /// haystack, but `shortest_match` *may* give up as soon as it sees the + /// first `a`. + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"a+").unwrap(); + /// let offset = re.shortest_match(b"aaaaa").unwrap(); + /// assert_eq!(offset, 1); + /// ``` + #[inline] + pub fn shortest_match(&self, haystack: &[u8]) -> Option<usize> { + self.shortest_match_at(haystack, 0) + } + + /// Returns the same as `shortest_match`, but starts the search at the + /// given offset. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only match + /// when `start == 0`. + /// + /// If a match is found, the offset returned is relative to the beginning + /// of the haystack, not the beginning of the search. + /// + /// # Panics + /// + /// This panics when `start >= haystack.len() + 1`. + /// + /// # Example + /// + /// This example shows the significance of `start` by demonstrating how it + /// can be used to permit look-around assertions in a regex to take the + /// surrounding context into account. + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"\bchew\b").unwrap(); + /// let hay = b"eschew"; + /// // We get a match here, but it's probably not intended. + /// assert_eq!(re.shortest_match(&hay[2..]), Some(4)); + /// // No match because the assertions take the context into account. + /// assert_eq!(re.shortest_match_at(hay, 2), None); + /// ``` + #[inline] + pub fn shortest_match_at( + &self, + haystack: &[u8], + start: usize, + ) -> Option<usize> { + let input = + Input::new(haystack).earliest(true).span(start..haystack.len()); + self.meta.search_half(&input).map(|hm| hm.offset()) + } + + /// Returns the same as [`Regex::is_match`], but starts the search at the + /// given offset. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only + /// match when `start == 0`. + /// + /// # Panics + /// + /// This panics when `start >= haystack.len() + 1`. + /// + /// # Example + /// + /// This example shows the significance of `start` by demonstrating how it + /// can be used to permit look-around assertions in a regex to take the + /// surrounding context into account. + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"\bchew\b").unwrap(); + /// let hay = b"eschew"; + /// // We get a match here, but it's probably not intended. + /// assert!(re.is_match(&hay[2..])); + /// // No match because the assertions take the context into account. + /// assert!(!re.is_match_at(hay, 2)); + /// ``` + #[inline] + pub fn is_match_at(&self, haystack: &[u8], start: usize) -> bool { + self.meta.is_match(Input::new(haystack).span(start..haystack.len())) + } + + /// Returns the same as [`Regex::find`], but starts the search at the given + /// offset. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only + /// match when `start == 0`. + /// + /// # Panics + /// + /// This panics when `start >= haystack.len() + 1`. + /// + /// # Example + /// + /// This example shows the significance of `start` by demonstrating how it + /// can be used to permit look-around assertions in a regex to take the + /// surrounding context into account. + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"\bchew\b").unwrap(); + /// let hay = b"eschew"; + /// // We get a match here, but it's probably not intended. + /// assert_eq!(re.find(&hay[2..]).map(|m| m.range()), Some(0..4)); + /// // No match because the assertions take the context into account. + /// assert_eq!(re.find_at(hay, 2), None); + /// ``` + #[inline] + pub fn find_at<'h>( + &self, + haystack: &'h [u8], + start: usize, + ) -> Option<Match<'h>> { + let input = Input::new(haystack).span(start..haystack.len()); + self.meta.find(input).map(|m| Match::new(haystack, m.start(), m.end())) + } + + /// Returns the same as [`Regex::captures`], but starts the search at the + /// given offset. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only + /// match when `start == 0`. + /// + /// # Panics + /// + /// This panics when `start >= haystack.len() + 1`. + /// + /// # Example + /// + /// This example shows the significance of `start` by demonstrating how it + /// can be used to permit look-around assertions in a regex to take the + /// surrounding context into account. + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"\bchew\b").unwrap(); + /// let hay = b"eschew"; + /// // We get a match here, but it's probably not intended. + /// assert_eq!(&re.captures(&hay[2..]).unwrap()[0], b"chew"); + /// // No match because the assertions take the context into account. + /// assert!(re.captures_at(hay, 2).is_none()); + /// ``` + #[inline] + pub fn captures_at<'h>( + &self, + haystack: &'h [u8], + start: usize, + ) -> Option<Captures<'h>> { + let input = Input::new(haystack).span(start..haystack.len()); + let mut caps = self.meta.create_captures(); + self.meta.captures(input, &mut caps); + if caps.is_match() { + let static_captures_len = self.static_captures_len(); + Some(Captures { haystack, caps, static_captures_len }) + } else { + None + } + } + + /// This is like [`Regex::captures`], but writes the byte offsets of each + /// capture group match into the locations given. + /// + /// A [`CaptureLocations`] stores the same byte offsets as a [`Captures`], + /// but does *not* store a reference to the haystack. This makes its API + /// a bit lower level and less convenient. But in exchange, callers + /// may allocate their own `CaptureLocations` and reuse it for multiple + /// searches. This may be helpful if allocating a `Captures` shows up in a + /// profile as too costly. + /// + /// To create a `CaptureLocations` value, use the + /// [`Regex::capture_locations`] method. + /// + /// This also returns the overall match if one was found. When a match is + /// found, its offsets are also always stored in `locs` at index `0`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"^([a-z]+)=(\S*)$").unwrap(); + /// let mut locs = re.capture_locations(); + /// assert!(re.captures_read(&mut locs, b"id=foo123").is_some()); + /// assert_eq!(Some((0, 9)), locs.get(0)); + /// assert_eq!(Some((0, 2)), locs.get(1)); + /// assert_eq!(Some((3, 9)), locs.get(2)); + /// ``` + #[inline] + pub fn captures_read<'h>( + &self, + locs: &mut CaptureLocations, + haystack: &'h [u8], + ) -> Option<Match<'h>> { + self.captures_read_at(locs, haystack, 0) + } + + /// Returns the same as [`Regex::captures_read`], but starts the search at + /// the given offset. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only + /// match when `start == 0`. + /// + /// # Panics + /// + /// This panics when `start >= haystack.len() + 1`. + /// + /// # Example + /// + /// This example shows the significance of `start` by demonstrating how it + /// can be used to permit look-around assertions in a regex to take the + /// surrounding context into account. + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"\bchew\b").unwrap(); + /// let hay = b"eschew"; + /// let mut locs = re.capture_locations(); + /// // We get a match here, but it's probably not intended. + /// assert!(re.captures_read(&mut locs, &hay[2..]).is_some()); + /// // No match because the assertions take the context into account. + /// assert!(re.captures_read_at(&mut locs, hay, 2).is_none()); + /// ``` + #[inline] + pub fn captures_read_at<'h>( + &self, + locs: &mut CaptureLocations, + haystack: &'h [u8], + start: usize, + ) -> Option<Match<'h>> { + let input = Input::new(haystack).span(start..haystack.len()); + self.meta.search_captures(&input, &mut locs.0); + locs.0.get_match().map(|m| Match::new(haystack, m.start(), m.end())) + } + + /// An undocumented alias for `captures_read_at`. + /// + /// The `regex-capi` crate previously used this routine, so to avoid + /// breaking that crate, we continue to provide the name as an undocumented + /// alias. + #[doc(hidden)] + #[inline] + pub fn read_captures_at<'h>( + &self, + locs: &mut CaptureLocations, + haystack: &'h [u8], + start: usize, + ) -> Option<Match<'h>> { + self.captures_read_at(locs, haystack, start) + } +} + +/// Auxiliary methods. +impl Regex { + /// Returns the original string of this regex. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"foo\w+bar").unwrap(); + /// assert_eq!(re.as_str(), r"foo\w+bar"); + /// ``` + #[inline] + pub fn as_str(&self) -> &str { + &self.pattern + } + + /// Returns an iterator over the capture names in this regex. + /// + /// The iterator returned yields elements of type `Option<&str>`. That is, + /// the iterator yields values for all capture groups, even ones that are + /// unnamed. The order of the groups corresponds to the order of the group's + /// corresponding opening parenthesis. + /// + /// The first element of the iterator always yields the group corresponding + /// to the overall match, and this group is always unnamed. Therefore, the + /// iterator always yields at least one group. + /// + /// # Example + /// + /// This shows basic usage with a mix of named and unnamed capture groups: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"(?<a>.(?<b>.))(.)(?:.)(?<c>.)").unwrap(); + /// let mut names = re.capture_names(); + /// assert_eq!(names.next(), Some(None)); + /// assert_eq!(names.next(), Some(Some("a"))); + /// assert_eq!(names.next(), Some(Some("b"))); + /// assert_eq!(names.next(), Some(None)); + /// // the '(?:.)' group is non-capturing and so doesn't appear here! + /// assert_eq!(names.next(), Some(Some("c"))); + /// assert_eq!(names.next(), None); + /// ``` + /// + /// The iterator always yields at least one element, even for regexes with + /// no capture groups and even for regexes that can never match: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"").unwrap(); + /// let mut names = re.capture_names(); + /// assert_eq!(names.next(), Some(None)); + /// assert_eq!(names.next(), None); + /// + /// let re = Regex::new(r"[a&&b]").unwrap(); + /// let mut names = re.capture_names(); + /// assert_eq!(names.next(), Some(None)); + /// assert_eq!(names.next(), None); + /// ``` + #[inline] + pub fn capture_names(&self) -> CaptureNames<'_> { + CaptureNames(self.meta.group_info().pattern_names(PatternID::ZERO)) + } + + /// Returns the number of captures groups in this regex. + /// + /// This includes all named and unnamed groups, including the implicit + /// unnamed group that is always present and corresponds to the entire + /// match. + /// + /// Since the implicit unnamed group is always included in this length, the + /// length returned is guaranteed to be greater than zero. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"foo").unwrap(); + /// assert_eq!(1, re.captures_len()); + /// + /// let re = Regex::new(r"(foo)").unwrap(); + /// assert_eq!(2, re.captures_len()); + /// + /// let re = Regex::new(r"(?<a>.(?<b>.))(.)(?:.)(?<c>.)").unwrap(); + /// assert_eq!(5, re.captures_len()); + /// + /// let re = Regex::new(r"[a&&b]").unwrap(); + /// assert_eq!(1, re.captures_len()); + /// ``` + #[inline] + pub fn captures_len(&self) -> usize { + self.meta.group_info().group_len(PatternID::ZERO) + } + + /// Returns the total number of capturing groups that appear in every + /// possible match. + /// + /// If the number of capture groups can vary depending on the match, then + /// this returns `None`. That is, a value is only returned when the number + /// of matching groups is invariant or "static." + /// + /// Note that like [`Regex::captures_len`], this **does** include the + /// implicit capturing group corresponding to the entire match. Therefore, + /// when a non-None value is returned, it is guaranteed to be at least `1`. + /// Stated differently, a return value of `Some(0)` is impossible. + /// + /// # Example + /// + /// This shows a few cases where a static number of capture groups is + /// available and a few cases where it is not. + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let len = |pattern| { + /// Regex::new(pattern).map(|re| re.static_captures_len()) + /// }; + /// + /// assert_eq!(Some(1), len("a")?); + /// assert_eq!(Some(2), len("(a)")?); + /// assert_eq!(Some(2), len("(a)|(b)")?); + /// assert_eq!(Some(3), len("(a)(b)|(c)(d)")?); + /// assert_eq!(None, len("(a)|b")?); + /// assert_eq!(None, len("a|(b)")?); + /// assert_eq!(None, len("(b)*")?); + /// assert_eq!(Some(2), len("(b)+")?); + /// + /// # Ok::<(), Box<dyn std::error::Error>>(()) + /// ``` + #[inline] + pub fn static_captures_len(&self) -> Option<usize> { + self.meta.static_captures_len() + } + + /// Returns a fresh allocated set of capture locations that can + /// be reused in multiple calls to [`Regex::captures_read`] or + /// [`Regex::captures_read_at`]. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"(.)(.)(\w+)").unwrap(); + /// let mut locs = re.capture_locations(); + /// assert!(re.captures_read(&mut locs, b"Padron").is_some()); + /// assert_eq!(locs.get(0), Some((0, 6))); + /// assert_eq!(locs.get(1), Some((0, 1))); + /// assert_eq!(locs.get(2), Some((1, 2))); + /// assert_eq!(locs.get(3), Some((2, 6))); + /// ``` + #[inline] + pub fn capture_locations(&self) -> CaptureLocations { + CaptureLocations(self.meta.create_captures()) + } + + /// An alias for `capture_locations` to preserve backward compatibility. + /// + /// The `regex-capi` crate uses this method, so to avoid breaking that + /// crate, we continue to export it as an undocumented API. + #[doc(hidden)] + #[inline] + pub fn locations(&self) -> CaptureLocations { + self.capture_locations() + } +} + +/// Represents a single match of a regex in a haystack. +/// +/// A `Match` contains both the start and end byte offsets of the match and the +/// actual substring corresponding to the range of those byte offsets. It is +/// guaranteed that `start <= end`. When `start == end`, the match is empty. +/// +/// Unlike the top-level `Match` type, this `Match` type is produced by APIs +/// that search `&[u8]` haystacks. This means that the offsets in a `Match` can +/// point to anywhere in the haystack, including in a place that splits the +/// UTF-8 encoding of a Unicode scalar value. +/// +/// The lifetime parameter `'h` refers to the lifetime of the matched of the +/// haystack that this match was produced from. +/// +/// # Numbering +/// +/// The byte offsets in a `Match` form a half-open interval. That is, the +/// start of the range is inclusive and the end of the range is exclusive. +/// For example, given a haystack `abcFOOxyz` and a match of `FOO`, its byte +/// offset range starts at `3` and ends at `6`. `3` corresponds to `F` and +/// `6` corresponds to `x`, which is one past the end of the match. This +/// corresponds to the same kind of slicing that Rust uses. +/// +/// For more on why this was chosen over other schemes (aside from being +/// consistent with how Rust the language works), see [this discussion] and +/// [Dijkstra's note on a related topic][note]. +/// +/// [this discussion]: https://github.com/rust-lang/regex/discussions/866 +/// [note]: https://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html +/// +/// # Example +/// +/// This example shows the value of each of the methods on `Match` for a +/// particular search. +/// +/// ``` +/// use regex::bytes::Regex; +/// +/// let re = Regex::new(r"\p{Greek}+").unwrap(); +/// let hay = "Greek: αβγδ".as_bytes(); +/// let m = re.find(hay).unwrap(); +/// assert_eq!(7, m.start()); +/// assert_eq!(15, m.end()); +/// assert!(!m.is_empty()); +/// assert_eq!(8, m.len()); +/// assert_eq!(7..15, m.range()); +/// assert_eq!("αβγδ".as_bytes(), m.as_bytes()); +/// ``` +#[derive(Copy, Clone, Eq, PartialEq)] +pub struct Match<'h> { + haystack: &'h [u8], + start: usize, + end: usize, +} + +impl<'h> Match<'h> { + /// Returns the byte offset of the start of the match in the haystack. The + /// start of the match corresponds to the position where the match begins + /// and includes the first byte in the match. + /// + /// It is guaranteed that `Match::start() <= Match::end()`. + /// + /// Unlike the top-level `Match` type, the start offset may appear anywhere + /// in the haystack. This includes between the code units of a UTF-8 + /// encoded Unicode scalar value. + #[inline] + pub fn start(&self) -> usize { + self.start + } + + /// Returns the byte offset of the end of the match in the haystack. The + /// end of the match corresponds to the byte immediately following the last + /// byte in the match. This means that `&slice[start..end]` works as one + /// would expect. + /// + /// It is guaranteed that `Match::start() <= Match::end()`. + /// + /// Unlike the top-level `Match` type, the start offset may appear anywhere + /// in the haystack. This includes between the code units of a UTF-8 + /// encoded Unicode scalar value. + #[inline] + pub fn end(&self) -> usize { + self.end + } + + /// Returns true if and only if this match has a length of zero. + /// + /// Note that an empty match can only occur when the regex itself can + /// match the empty string. Here are some examples of regexes that can + /// all match the empty string: `^`, `^$`, `\b`, `a?`, `a*`, `a{0}`, + /// `(foo|\d+|quux)?`. + #[inline] + pub fn is_empty(&self) -> bool { + self.start == self.end + } + + /// Returns the length, in bytes, of this match. + #[inline] + pub fn len(&self) -> usize { + self.end - self.start + } + + /// Returns the range over the starting and ending byte offsets of the + /// match in the haystack. + #[inline] + pub fn range(&self) -> core::ops::Range<usize> { + self.start..self.end + } + + /// Returns the substring of the haystack that matched. + #[inline] + pub fn as_bytes(&self) -> &'h [u8] { + &self.haystack[self.range()] + } + + /// Creates a new match from the given haystack and byte offsets. + #[inline] + fn new(haystack: &'h [u8], start: usize, end: usize) -> Match<'h> { + Match { haystack, start, end } + } +} + +impl<'h> core::fmt::Debug for Match<'h> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + use regex_automata::util::escape::DebugHaystack; + + let mut fmt = f.debug_struct("Match"); + fmt.field("start", &self.start) + .field("end", &self.end) + .field("bytes", &DebugHaystack(&self.as_bytes())); + + fmt.finish() + } +} + +impl<'h> From<Match<'h>> for &'h [u8] { + fn from(m: Match<'h>) -> &'h [u8] { + m.as_bytes() + } +} + +impl<'h> From<Match<'h>> for core::ops::Range<usize> { + fn from(m: Match<'h>) -> core::ops::Range<usize> { + m.range() + } +} + +/// Represents the capture groups for a single match. +/// +/// Capture groups refer to parts of a regex enclosed in parentheses. They +/// can be optionally named. The purpose of capture groups is to be able to +/// reference different parts of a match based on the original pattern. In +/// essence, a `Captures` is a container of [`Match`] values for each group +/// that participated in a regex match. Each `Match` can be looked up by either +/// its capture group index or name (if it has one). +/// +/// For example, say you want to match the individual letters in a 5-letter +/// word: +/// +/// ```text +/// (?<first>\w)(\w)(?:\w)\w(?<last>\w) +/// ``` +/// +/// This regex has 4 capture groups: +/// +/// * The group at index `0` corresponds to the overall match. It is always +/// present in every match and never has a name. +/// * The group at index `1` with name `first` corresponding to the first +/// letter. +/// * The group at index `2` with no name corresponding to the second letter. +/// * The group at index `3` with name `last` corresponding to the fifth and +/// last letter. +/// +/// Notice that `(?:\w)` was not listed above as a capture group despite it +/// being enclosed in parentheses. That's because `(?:pattern)` is a special +/// syntax that permits grouping but *without* capturing. The reason for not +/// treating it as a capture is that tracking and reporting capture groups +/// requires additional state that may lead to slower searches. So using as few +/// capture groups as possible can help performance. (Although the difference +/// in performance of a couple of capture groups is likely immaterial.) +/// +/// Values with this type are created by [`Regex::captures`] or +/// [`Regex::captures_iter`]. +/// +/// `'h` is the lifetime of the haystack that these captures were matched from. +/// +/// # Example +/// +/// ``` +/// use regex::bytes::Regex; +/// +/// let re = Regex::new(r"(?<first>\w)(\w)(?:\w)\w(?<last>\w)").unwrap(); +/// let caps = re.captures(b"toady").unwrap(); +/// assert_eq!(b"toady", &caps[0]); +/// assert_eq!(b"t", &caps["first"]); +/// assert_eq!(b"o", &caps[2]); +/// assert_eq!(b"y", &caps["last"]); +/// ``` +pub struct Captures<'h> { + haystack: &'h [u8], + caps: captures::Captures, + static_captures_len: Option<usize>, +} + +impl<'h> Captures<'h> { + /// Returns the `Match` associated with the capture group at index `i`. If + /// `i` does not correspond to a capture group, or if the capture group did + /// not participate in the match, then `None` is returned. + /// + /// When `i == 0`, this is guaranteed to return a non-`None` value. + /// + /// # Examples + /// + /// Get the substring that matched with a default of an empty string if the + /// group didn't participate in the match: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"[a-z]+(?:([0-9]+)|([A-Z]+))").unwrap(); + /// let caps = re.captures(b"abc123").unwrap(); + /// + /// let substr1 = caps.get(1).map_or(&b""[..], |m| m.as_bytes()); + /// let substr2 = caps.get(2).map_or(&b""[..], |m| m.as_bytes()); + /// assert_eq!(substr1, b"123"); + /// assert_eq!(substr2, b""); + /// ``` + #[inline] + pub fn get(&self, i: usize) -> Option<Match<'h>> { + self.caps + .get_group(i) + .map(|sp| Match::new(self.haystack, sp.start, sp.end)) + } + + /// Return the overall match for the capture. + /// + /// This returns the match for index `0`. That is it is equivalent to + /// `m.get(0).unwrap()` + /// + /// # Example + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"[a-z]+([0-9]+)").unwrap(); + /// let caps = re.captures(b" abc123-def").unwrap(); + /// + /// assert_eq!(caps.get_match().as_bytes(), b"abc123"); + /// ``` + #[inline] + pub fn get_match(&self) -> Match<'h> { + self.get(0).unwrap() + } + + /// Returns the `Match` associated with the capture group named `name`. If + /// `name` isn't a valid capture group or it refers to a group that didn't + /// match, then `None` is returned. + /// + /// Note that unlike `caps["name"]`, this returns a `Match` whose lifetime + /// matches the lifetime of the haystack in this `Captures` value. + /// Conversely, the substring returned by `caps["name"]` has a lifetime + /// of the `Captures` value, which is likely shorter than the lifetime of + /// the haystack. In some cases, it may be necessary to use this method to + /// access the matching substring instead of the `caps["name"]` notation. + /// + /// # Examples + /// + /// Get the substring that matched with a default of an empty string if the + /// group didn't participate in the match: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new( + /// r"[a-z]+(?:(?<numbers>[0-9]+)|(?<letters>[A-Z]+))", + /// ).unwrap(); + /// let caps = re.captures(b"abc123").unwrap(); + /// + /// let numbers = caps.name("numbers").map_or(&b""[..], |m| m.as_bytes()); + /// let letters = caps.name("letters").map_or(&b""[..], |m| m.as_bytes()); + /// assert_eq!(numbers, b"123"); + /// assert_eq!(letters, b""); + /// ``` + #[inline] + pub fn name(&self, name: &str) -> Option<Match<'h>> { + self.caps + .get_group_by_name(name) + .map(|sp| Match::new(self.haystack, sp.start, sp.end)) + } + + /// This is a convenience routine for extracting the substrings + /// corresponding to matching capture groups. + /// + /// This returns a tuple where the first element corresponds to the full + /// substring of the haystack that matched the regex. The second element is + /// an array of substrings, with each corresponding to the substring that + /// matched for a particular capture group. + /// + /// # Panics + /// + /// This panics if the number of possible matching groups in this + /// `Captures` value is not fixed to `N` in all circumstances. + /// More precisely, this routine only works when `N` is equivalent to + /// [`Regex::static_captures_len`]. + /// + /// Stated more plainly, if the number of matching capture groups in a + /// regex can vary from match to match, then this function always panics. + /// + /// For example, `(a)(b)|(c)` could produce two matching capture groups + /// or one matching capture group for any given match. Therefore, one + /// cannot use `extract` with such a pattern. + /// + /// But a pattern like `(a)(b)|(c)(d)` can be used with `extract` because + /// the number of capture groups in every match is always equivalent, + /// even if the capture _indices_ in each match are not. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap(); + /// let hay = b"On 2010-03-14, I became a Tennessee lamb."; + /// let Some((full, [year, month, day])) = + /// re.captures(hay).map(|caps| caps.extract()) else { return }; + /// assert_eq!(b"2010-03-14", full); + /// assert_eq!(b"2010", year); + /// assert_eq!(b"03", month); + /// assert_eq!(b"14", day); + /// ``` + /// + /// # Example: iteration + /// + /// This example shows how to use this method when iterating over all + /// `Captures` matches in a haystack. + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap(); + /// let hay = b"1973-01-05, 1975-08-25 and 1980-10-18"; + /// + /// let mut dates: Vec<(&[u8], &[u8], &[u8])> = vec![]; + /// for (_, [y, m, d]) in re.captures_iter(hay).map(|c| c.extract()) { + /// dates.push((y, m, d)); + /// } + /// assert_eq!(dates, vec![ + /// (&b"1973"[..], &b"01"[..], &b"05"[..]), + /// (&b"1975"[..], &b"08"[..], &b"25"[..]), + /// (&b"1980"[..], &b"10"[..], &b"18"[..]), + /// ]); + /// ``` + /// + /// # Example: parsing different formats + /// + /// This API is particularly useful when you need to extract a particular + /// value that might occur in a different format. Consider, for example, + /// an identifier that might be in double quotes or single quotes: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r#"id:(?:"([^"]+)"|'([^']+)')"#).unwrap(); + /// let hay = br#"The first is id:"foo" and the second is id:'bar'."#; + /// let mut ids = vec![]; + /// for (_, [id]) in re.captures_iter(hay).map(|c| c.extract()) { + /// ids.push(id); + /// } + /// assert_eq!(ids, vec![b"foo", b"bar"]); + /// ``` + pub fn extract<const N: usize>(&self) -> (&'h [u8], [&'h [u8]; N]) { + let len = self + .static_captures_len + .expect("number of capture groups can vary in a match") + .checked_sub(1) + .expect("number of groups is always greater than zero"); + assert_eq!(N, len, "asked for {N} groups, but must ask for {len}"); + // The regex-automata variant of extract is a bit more permissive. + // It doesn't require the number of matching capturing groups to be + // static, and you can even request fewer groups than what's there. So + // this is guaranteed to never panic because we've asserted above that + // the user has requested precisely the number of groups that must be + // present in any match for this regex. + self.caps.extract_bytes(self.haystack) + } + + /// Expands all instances of `$ref` in `replacement` to the corresponding + /// capture group, and writes them to the `dst` buffer given. A `ref` can + /// be a capture group index or a name. If `ref` doesn't refer to a capture + /// group that participated in the match, then it is replaced with the + /// empty string. + /// + /// # Format + /// + /// The format of the replacement string supports two different kinds of + /// capture references: unbraced and braced. + /// + /// For the unbraced format, the format supported is `$ref` where `name` + /// can be any character in the class `[0-9A-Za-z_]`. `ref` is always + /// the longest possible parse. So for example, `$1a` corresponds to the + /// capture group named `1a` and not the capture group at index `1`. If + /// `ref` matches `^[0-9]+$`, then it is treated as a capture group index + /// itself and not a name. + /// + /// For the braced format, the format supported is `${ref}` where `ref` can + /// be any sequence of bytes except for `}`. If no closing brace occurs, + /// then it is not considered a capture reference. As with the unbraced + /// format, if `ref` matches `^[0-9]+$`, then it is treated as a capture + /// group index and not a name. + /// + /// The braced format is useful for exerting precise control over the name + /// of the capture reference. For example, `${1}a` corresponds to the + /// capture group reference `1` followed by the letter `a`, where as `$1a` + /// (as mentioned above) corresponds to the capture group reference `1a`. + /// The braced format is also useful for expressing capture group names + /// that use characters not supported by the unbraced format. For example, + /// `${foo[bar].baz}` refers to the capture group named `foo[bar].baz`. + /// + /// If a capture group reference is found and it does not refer to a valid + /// capture group, then it will be replaced with the empty string. + /// + /// To write a literal `$`, use `$$`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new( + /// r"(?<day>[0-9]{2})-(?<month>[0-9]{2})-(?<year>[0-9]{4})", + /// ).unwrap(); + /// let hay = b"On 14-03-2010, I became a Tennessee lamb."; + /// let caps = re.captures(hay).unwrap(); + /// + /// let mut dst = vec![]; + /// caps.expand(b"year=$year, month=$month, day=$day", &mut dst); + /// assert_eq!(dst, b"year=2010, month=03, day=14"); + /// ``` + #[inline] + pub fn expand(&self, replacement: &[u8], dst: &mut Vec<u8>) { + self.caps.interpolate_bytes_into(self.haystack, replacement, dst); + } + + /// Returns an iterator over all capture groups. This includes both + /// matching and non-matching groups. + /// + /// The iterator always yields at least one matching group: the first group + /// (at index `0`) with no name. Subsequent groups are returned in the order + /// of their opening parenthesis in the regex. + /// + /// The elements yielded have type `Option<Match<'h>>`, where a non-`None` + /// value is present if the capture group matches. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"(\w)(\d)?(\w)").unwrap(); + /// let caps = re.captures(b"AZ").unwrap(); + /// + /// let mut it = caps.iter(); + /// assert_eq!(it.next().unwrap().map(|m| m.as_bytes()), Some(&b"AZ"[..])); + /// assert_eq!(it.next().unwrap().map(|m| m.as_bytes()), Some(&b"A"[..])); + /// assert_eq!(it.next().unwrap().map(|m| m.as_bytes()), None); + /// assert_eq!(it.next().unwrap().map(|m| m.as_bytes()), Some(&b"Z"[..])); + /// assert_eq!(it.next(), None); + /// ``` + #[inline] + pub fn iter<'c>(&'c self) -> SubCaptureMatches<'c, 'h> { + SubCaptureMatches { haystack: self.haystack, it: self.caps.iter() } + } + + /// Returns the total number of capture groups. This includes both + /// matching and non-matching groups. + /// + /// The length returned is always equivalent to the number of elements + /// yielded by [`Captures::iter`]. Consequently, the length is always + /// greater than zero since every `Captures` value always includes the + /// match for the entire regex. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"(\w)(\d)?(\w)").unwrap(); + /// let caps = re.captures(b"AZ").unwrap(); + /// assert_eq!(caps.len(), 4); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.caps.group_len() + } +} + +impl<'h> core::fmt::Debug for Captures<'h> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + /// A little helper type to provide a nice map-like debug + /// representation for our capturing group spans. + /// + /// regex-automata has something similar, but it includes the pattern + /// ID in its debug output, which is confusing. It also doesn't include + /// that strings that match because a regex-automata `Captures` doesn't + /// borrow the haystack. + struct CapturesDebugMap<'a> { + caps: &'a Captures<'a>, + } + + impl<'a> core::fmt::Debug for CapturesDebugMap<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let mut map = f.debug_map(); + let names = + self.caps.caps.group_info().pattern_names(PatternID::ZERO); + for (group_index, maybe_name) in names.enumerate() { + let key = Key(group_index, maybe_name); + match self.caps.get(group_index) { + None => map.entry(&key, &None::<()>), + Some(mat) => map.entry(&key, &Value(mat)), + }; + } + map.finish() + } + } + + struct Key<'a>(usize, Option<&'a str>); + + impl<'a> core::fmt::Debug for Key<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "{}", self.0)?; + if let Some(name) = self.1 { + write!(f, "/{name:?}")?; + } + Ok(()) + } + } + + struct Value<'a>(Match<'a>); + + impl<'a> core::fmt::Debug for Value<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + use regex_automata::util::escape::DebugHaystack; + + write!( + f, + "{}..{}/{:?}", + self.0.start(), + self.0.end(), + DebugHaystack(self.0.as_bytes()) + ) + } + } + + f.debug_tuple("Captures") + .field(&CapturesDebugMap { caps: self }) + .finish() + } +} + +/// Get a matching capture group's haystack substring by index. +/// +/// The haystack substring returned can't outlive the `Captures` object if this +/// method is used, because of how `Index` is defined (normally `a[i]` is part +/// of `a` and can't outlive it). To work around this limitation, do that, use +/// [`Captures::get`] instead. +/// +/// `'h` is the lifetime of the matched haystack, but the lifetime of the +/// `&str` returned by this implementation is the lifetime of the `Captures` +/// value itself. +/// +/// # Panics +/// +/// If there is no matching group at the given index. +impl<'h> core::ops::Index<usize> for Captures<'h> { + type Output = [u8]; + + // The lifetime is written out to make it clear that the &str returned + // does NOT have a lifetime equivalent to 'h. + fn index<'a>(&'a self, i: usize) -> &'a [u8] { + self.get(i) + .map(|m| m.as_bytes()) + .unwrap_or_else(|| panic!("no group at index '{i}'")) + } +} + +/// Get a matching capture group's haystack substring by name. +/// +/// The haystack substring returned can't outlive the `Captures` object if this +/// method is used, because of how `Index` is defined (normally `a[i]` is part +/// of `a` and can't outlive it). To work around this limitation, do that, use +/// [`Captures::name`] instead. +/// +/// `'h` is the lifetime of the matched haystack, but the lifetime of the +/// `&str` returned by this implementation is the lifetime of the `Captures` +/// value itself. +/// +/// `'n` is the lifetime of the group name used to index the `Captures` value. +/// +/// # Panics +/// +/// If there is no matching group at the given name. +impl<'h, 'n> core::ops::Index<&'n str> for Captures<'h> { + type Output = [u8]; + + fn index<'a>(&'a self, name: &'n str) -> &'a [u8] { + self.name(name) + .map(|m| m.as_bytes()) + .unwrap_or_else(|| panic!("no group named '{name}'")) + } +} + +/// A low level representation of the byte offsets of each capture group. +/// +/// You can think of this as a lower level [`Captures`], where this type does +/// not support named capturing groups directly and it does not borrow the +/// haystack that these offsets were matched on. +/// +/// Primarily, this type is useful when using the lower level `Regex` APIs such +/// as [`Regex::captures_read`], which permits amortizing the allocation in +/// which capture match offsets are stored. +/// +/// In order to build a value of this type, you'll need to call the +/// [`Regex::capture_locations`] method. The value returned can then be reused +/// in subsequent searches for that regex. Using it for other regexes may +/// result in a panic or otherwise incorrect results. +/// +/// # Example +/// +/// This example shows how to create and use `CaptureLocations` in a search. +/// +/// ``` +/// use regex::bytes::Regex; +/// +/// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap(); +/// let mut locs = re.capture_locations(); +/// let m = re.captures_read(&mut locs, b"Bruce Springsteen").unwrap(); +/// assert_eq!(0..17, m.range()); +/// assert_eq!(Some((0, 17)), locs.get(0)); +/// assert_eq!(Some((0, 5)), locs.get(1)); +/// assert_eq!(Some((6, 17)), locs.get(2)); +/// +/// // Asking for an invalid capture group always returns None. +/// assert_eq!(None, locs.get(3)); +/// # // literals are too big for 32-bit usize: #1041 +/// # #[cfg(target_pointer_width = "64")] +/// assert_eq!(None, locs.get(34973498648)); +/// # #[cfg(target_pointer_width = "64")] +/// assert_eq!(None, locs.get(9944060567225171988)); +/// ``` +#[derive(Clone, Debug)] +pub struct CaptureLocations(captures::Captures); + +/// A type alias for `CaptureLocations` for backwards compatibility. +/// +/// Previously, we exported `CaptureLocations` as `Locations` in an +/// undocumented API. To prevent breaking that code (e.g., in `regex-capi`), +/// we continue re-exporting the same undocumented API. +#[doc(hidden)] +pub type Locations = CaptureLocations; + +impl CaptureLocations { + /// Returns the start and end byte offsets of the capture group at index + /// `i`. This returns `None` if `i` is not a valid capture group or if the + /// capture group did not match. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap(); + /// let mut locs = re.capture_locations(); + /// re.captures_read(&mut locs, b"Bruce Springsteen").unwrap(); + /// assert_eq!(Some((0, 17)), locs.get(0)); + /// assert_eq!(Some((0, 5)), locs.get(1)); + /// assert_eq!(Some((6, 17)), locs.get(2)); + /// ``` + #[inline] + pub fn get(&self, i: usize) -> Option<(usize, usize)> { + self.0.get_group(i).map(|sp| (sp.start, sp.end)) + } + + /// Returns the total number of capture groups (even if they didn't match). + /// That is, the length returned is unaffected by the result of a search. + /// + /// This is always at least `1` since every regex has at least `1` + /// capturing group that corresponds to the entire match. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap(); + /// let mut locs = re.capture_locations(); + /// assert_eq!(3, locs.len()); + /// re.captures_read(&mut locs, b"Bruce Springsteen").unwrap(); + /// assert_eq!(3, locs.len()); + /// ``` + /// + /// Notice that the length is always at least `1`, regardless of the regex: + /// + /// ``` + /// use regex::bytes::Regex; + /// + /// let re = Regex::new(r"").unwrap(); + /// let locs = re.capture_locations(); + /// assert_eq!(1, locs.len()); + /// + /// // [a&&b] is a regex that never matches anything. + /// let re = Regex::new(r"[a&&b]").unwrap(); + /// let locs = re.capture_locations(); + /// assert_eq!(1, locs.len()); + /// ``` + #[inline] + pub fn len(&self) -> usize { + // self.0.group_len() returns 0 if the underlying captures doesn't + // represent a match, but the behavior guaranteed for this method is + // that the length doesn't change based on a match or not. + self.0.group_info().group_len(PatternID::ZERO) + } + + /// An alias for the `get` method for backwards compatibility. + /// + /// Previously, we exported `get` as `pos` in an undocumented API. To + /// prevent breaking that code (e.g., in `regex-capi`), we continue + /// re-exporting the same undocumented API. + #[doc(hidden)] + #[inline] + pub fn pos(&self, i: usize) -> Option<(usize, usize)> { + self.get(i) + } +} + +/// An iterator over all non-overlapping matches in a haystack. +/// +/// This iterator yields [`Match`] values. The iterator stops when no more +/// matches can be found. +/// +/// `'r` is the lifetime of the compiled regular expression and `'h` is the +/// lifetime of the haystack. +/// +/// This iterator is created by [`Regex::find_iter`]. +/// +/// # Time complexity +/// +/// Note that since an iterator runs potentially many searches on the haystack +/// and since each search has worst case `O(m * n)` time complexity, the +/// overall worst case time complexity for iteration is `O(m * n^2)`. +#[derive(Debug)] +pub struct Matches<'r, 'h> { + haystack: &'h [u8], + it: meta::FindMatches<'r, 'h>, +} + +impl<'r, 'h> Iterator for Matches<'r, 'h> { + type Item = Match<'h>; + + #[inline] + fn next(&mut self) -> Option<Match<'h>> { + self.it + .next() + .map(|sp| Match::new(self.haystack, sp.start(), sp.end())) + } + + #[inline] + fn count(self) -> usize { + // This can actually be up to 2x faster than calling `next()` until + // completion, because counting matches when using a DFA only requires + // finding the end of each match. But returning a `Match` via `next()` + // requires the start of each match which, with a DFA, requires a + // reverse forward scan to find it. + self.it.count() + } +} + +impl<'r, 'h> core::iter::FusedIterator for Matches<'r, 'h> {} + +/// An iterator over all non-overlapping capture matches in a haystack. +/// +/// This iterator yields [`Captures`] values. The iterator stops when no more +/// matches can be found. +/// +/// `'r` is the lifetime of the compiled regular expression and `'h` is the +/// lifetime of the matched string. +/// +/// This iterator is created by [`Regex::captures_iter`]. +/// +/// # Time complexity +/// +/// Note that since an iterator runs potentially many searches on the haystack +/// and since each search has worst case `O(m * n)` time complexity, the +/// overall worst case time complexity for iteration is `O(m * n^2)`. +#[derive(Debug)] +pub struct CaptureMatches<'r, 'h> { + haystack: &'h [u8], + it: meta::CapturesMatches<'r, 'h>, +} + +impl<'r, 'h> Iterator for CaptureMatches<'r, 'h> { + type Item = Captures<'h>; + + #[inline] + fn next(&mut self) -> Option<Captures<'h>> { + let static_captures_len = self.it.regex().static_captures_len(); + self.it.next().map(|caps| Captures { + haystack: self.haystack, + caps, + static_captures_len, + }) + } + + #[inline] + fn count(self) -> usize { + // This can actually be up to 2x faster than calling `next()` until + // completion, because counting matches when using a DFA only requires + // finding the end of each match. But returning a `Match` via `next()` + // requires the start of each match which, with a DFA, requires a + // reverse forward scan to find it. + self.it.count() + } +} + +impl<'r, 'h> core::iter::FusedIterator for CaptureMatches<'r, 'h> {} + +/// An iterator over all substrings delimited by a regex match. +/// +/// `'r` is the lifetime of the compiled regular expression and `'h` is the +/// lifetime of the byte string being split. +/// +/// This iterator is created by [`Regex::split`]. +/// +/// # Time complexity +/// +/// Note that since an iterator runs potentially many searches on the haystack +/// and since each search has worst case `O(m * n)` time complexity, the +/// overall worst case time complexity for iteration is `O(m * n^2)`. +#[derive(Debug)] +pub struct Split<'r, 'h> { + haystack: &'h [u8], + it: meta::Split<'r, 'h>, +} + +impl<'r, 'h> Iterator for Split<'r, 'h> { + type Item = &'h [u8]; + + #[inline] + fn next(&mut self) -> Option<&'h [u8]> { + self.it.next().map(|span| &self.haystack[span]) + } +} + +impl<'r, 'h> core::iter::FusedIterator for Split<'r, 'h> {} + +/// An iterator over at most `N` substrings delimited by a regex match. +/// +/// The last substring yielded by this iterator will be whatever remains after +/// `N-1` splits. +/// +/// `'r` is the lifetime of the compiled regular expression and `'h` is the +/// lifetime of the byte string being split. +/// +/// This iterator is created by [`Regex::splitn`]. +/// +/// # Time complexity +/// +/// Note that since an iterator runs potentially many searches on the haystack +/// and since each search has worst case `O(m * n)` time complexity, the +/// overall worst case time complexity for iteration is `O(m * n^2)`. +/// +/// Although note that the worst case time here has an upper bound given +/// by the `limit` parameter to [`Regex::splitn`]. +#[derive(Debug)] +pub struct SplitN<'r, 'h> { + haystack: &'h [u8], + it: meta::SplitN<'r, 'h>, +} + +impl<'r, 'h> Iterator for SplitN<'r, 'h> { + type Item = &'h [u8]; + + #[inline] + fn next(&mut self) -> Option<&'h [u8]> { + self.it.next().map(|span| &self.haystack[span]) + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + self.it.size_hint() + } +} + +impl<'r, 'h> core::iter::FusedIterator for SplitN<'r, 'h> {} + +/// An iterator over the names of all capture groups in a regex. +/// +/// This iterator yields values of type `Option<&str>` in order of the opening +/// capture group parenthesis in the regex pattern. `None` is yielded for +/// groups with no name. The first element always corresponds to the implicit +/// and unnamed group for the overall match. +/// +/// `'r` is the lifetime of the compiled regular expression. +/// +/// This iterator is created by [`Regex::capture_names`]. +#[derive(Clone, Debug)] +pub struct CaptureNames<'r>(captures::GroupInfoPatternNames<'r>); + +impl<'r> Iterator for CaptureNames<'r> { + type Item = Option<&'r str>; + + #[inline] + fn next(&mut self) -> Option<Option<&'r str>> { + self.0.next() + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + self.0.size_hint() + } + + #[inline] + fn count(self) -> usize { + self.0.count() + } +} + +impl<'r> ExactSizeIterator for CaptureNames<'r> {} + +impl<'r> core::iter::FusedIterator for CaptureNames<'r> {} + +/// An iterator over all group matches in a [`Captures`] value. +/// +/// This iterator yields values of type `Option<Match<'h>>`, where `'h` is the +/// lifetime of the haystack that the matches are for. The order of elements +/// yielded corresponds to the order of the opening parenthesis for the group +/// in the regex pattern. `None` is yielded for groups that did not participate +/// in the match. +/// +/// The first element always corresponds to the implicit group for the overall +/// match. Since this iterator is created by a [`Captures`] value, and a +/// `Captures` value is only created when a match occurs, it follows that the +/// first element yielded by this iterator is guaranteed to be non-`None`. +/// +/// The lifetime `'c` corresponds to the lifetime of the `Captures` value that +/// created this iterator, and the lifetime `'h` corresponds to the originally +/// matched haystack. +#[derive(Clone, Debug)] +pub struct SubCaptureMatches<'c, 'h> { + haystack: &'h [u8], + it: captures::CapturesPatternIter<'c>, +} + +impl<'c, 'h> Iterator for SubCaptureMatches<'c, 'h> { + type Item = Option<Match<'h>>; + + #[inline] + fn next(&mut self) -> Option<Option<Match<'h>>> { + self.it.next().map(|group| { + group.map(|sp| Match::new(self.haystack, sp.start, sp.end)) + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + self.it.size_hint() + } + + #[inline] + fn count(self) -> usize { + self.it.count() + } +} + +impl<'c, 'h> ExactSizeIterator for SubCaptureMatches<'c, 'h> {} + +impl<'c, 'h> core::iter::FusedIterator for SubCaptureMatches<'c, 'h> {} + +/// A trait for types that can be used to replace matches in a haystack. +/// +/// In general, users of this crate shouldn't need to implement this trait, +/// since implementations are already provided for `&[u8]` along with other +/// variants of byte string types, as well as `FnMut(&Captures) -> Vec<u8>` (or +/// any `FnMut(&Captures) -> T` where `T: AsRef<[u8]>`). Those cover most use +/// cases, but callers can implement this trait directly if necessary. +/// +/// # Example +/// +/// This example shows a basic implementation of the `Replacer` trait. This can +/// be done much more simply using the replacement byte string interpolation +/// support (e.g., `$first $last`), but this approach avoids needing to parse +/// the replacement byte string at all. +/// +/// ``` +/// use regex::bytes::{Captures, Regex, Replacer}; +/// +/// struct NameSwapper; +/// +/// impl Replacer for NameSwapper { +/// fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { +/// dst.extend_from_slice(&caps["first"]); +/// dst.extend_from_slice(b" "); +/// dst.extend_from_slice(&caps["last"]); +/// } +/// } +/// +/// let re = Regex::new(r"(?<last>[^,\s]+),\s+(?<first>\S+)").unwrap(); +/// let result = re.replace(b"Springsteen, Bruce", NameSwapper); +/// assert_eq!(result, &b"Bruce Springsteen"[..]); +/// ``` +pub trait Replacer { + /// Appends possibly empty data to `dst` to replace the current match. + /// + /// The current match is represented by `caps`, which is guaranteed to have + /// a match at capture group `0`. + /// + /// For example, a no-op replacement would be + /// `dst.extend_from_slice(&caps[0])`. + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>); + + /// Return a fixed unchanging replacement byte string. + /// + /// When doing replacements, if access to [`Captures`] is not needed (e.g., + /// the replacement byte string does not need `$` expansion), then it can + /// be beneficial to avoid finding sub-captures. + /// + /// In general, this is called once for every call to a replacement routine + /// such as [`Regex::replace_all`]. + fn no_expansion<'r>(&'r mut self) -> Option<Cow<'r, [u8]>> { + None + } + + /// Returns a type that implements `Replacer`, but that borrows and wraps + /// this `Replacer`. + /// + /// This is useful when you want to take a generic `Replacer` (which might + /// not be cloneable) and use it without consuming it, so it can be used + /// more than once. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::{Regex, Replacer}; + /// + /// fn replace_all_twice<R: Replacer>( + /// re: Regex, + /// src: &[u8], + /// mut rep: R, + /// ) -> Vec<u8> { + /// let dst = re.replace_all(src, rep.by_ref()); + /// let dst = re.replace_all(&dst, rep.by_ref()); + /// dst.into_owned() + /// } + /// ``` + fn by_ref<'r>(&'r mut self) -> ReplacerRef<'r, Self> { + ReplacerRef(self) + } +} + +impl<'a, const N: usize> Replacer for &'a [u8; N] { + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { + caps.expand(&**self, dst); + } + + fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { + no_expansion(self) + } +} + +impl<const N: usize> Replacer for [u8; N] { + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { + caps.expand(&*self, dst); + } + + fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { + no_expansion(self) + } +} + +impl<'a> Replacer for &'a [u8] { + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { + caps.expand(*self, dst); + } + + fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { + no_expansion(self) + } +} + +impl<'a> Replacer for &'a Vec<u8> { + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { + caps.expand(*self, dst); + } + + fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { + no_expansion(self) + } +} + +impl Replacer for Vec<u8> { + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { + caps.expand(self, dst); + } + + fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { + no_expansion(self) + } +} + +impl<'a> Replacer for Cow<'a, [u8]> { + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { + caps.expand(self.as_ref(), dst); + } + + fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { + no_expansion(self) + } +} + +impl<'a> Replacer for &'a Cow<'a, [u8]> { + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { + caps.expand(self.as_ref(), dst); + } + + fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { + no_expansion(self) + } +} + +impl<F, T> Replacer for F +where + F: FnMut(&Captures<'_>) -> T, + T: AsRef<[u8]>, +{ + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { + dst.extend_from_slice((*self)(caps).as_ref()); + } +} + +/// A by-reference adaptor for a [`Replacer`]. +/// +/// This permits reusing the same `Replacer` value in multiple calls to a +/// replacement routine like [`Regex::replace_all`]. +/// +/// This type is created by [`Replacer::by_ref`]. +#[derive(Debug)] +pub struct ReplacerRef<'a, R: ?Sized>(&'a mut R); + +impl<'a, R: Replacer + ?Sized + 'a> Replacer for ReplacerRef<'a, R> { + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { + self.0.replace_append(caps, dst) + } + + fn no_expansion<'r>(&'r mut self) -> Option<Cow<'r, [u8]>> { + self.0.no_expansion() + } +} + +/// A helper type for forcing literal string replacement. +/// +/// It can be used with routines like [`Regex::replace`] and +/// [`Regex::replace_all`] to do a literal string replacement without expanding +/// `$name` to their corresponding capture groups. This can be both convenient +/// (to avoid escaping `$`, for example) and faster (since capture groups +/// don't need to be found). +/// +/// `'s` is the lifetime of the literal string to use. +/// +/// # Example +/// +/// ``` +/// use regex::bytes::{NoExpand, Regex}; +/// +/// let re = Regex::new(r"(?<last>[^,\s]+),\s+(\S+)").unwrap(); +/// let result = re.replace(b"Springsteen, Bruce", NoExpand(b"$2 $last")); +/// assert_eq!(result, &b"$2 $last"[..]); +/// ``` +#[derive(Clone, Debug)] +pub struct NoExpand<'s>(pub &'s [u8]); + +impl<'s> Replacer for NoExpand<'s> { + fn replace_append(&mut self, _: &Captures<'_>, dst: &mut Vec<u8>) { + dst.extend_from_slice(self.0); + } + + fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { + Some(Cow::Borrowed(self.0)) + } +} + +/// Quickly checks the given replacement string for whether interpolation +/// should be done on it. It returns `None` if a `$` was found anywhere in the +/// given string, which suggests interpolation needs to be done. But if there's +/// no `$` anywhere, then interpolation definitely does not need to be done. In +/// that case, the given string is returned as a borrowed `Cow`. +/// +/// This is meant to be used to implement the `Replacer::no_expansion` method +/// in its various trait impls. +fn no_expansion<T: AsRef<[u8]>>(replacement: &T) -> Option<Cow<'_, [u8]>> { + let replacement = replacement.as_ref(); + match crate::find_byte::find_byte(b'$', replacement) { + Some(_) => None, + None => Some(Cow::Borrowed(replacement)), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloc::format; + + #[test] + fn test_match_properties() { + let haystack = b"Hello, world!"; + let m = Match::new(haystack, 7, 12); + + assert_eq!(m.start(), 7); + assert_eq!(m.end(), 12); + assert_eq!(m.is_empty(), false); + assert_eq!(m.len(), 5); + assert_eq!(m.as_bytes(), b"world"); + } + + #[test] + fn test_empty_match() { + let haystack = b""; + let m = Match::new(haystack, 0, 0); + + assert_eq!(m.is_empty(), true); + assert_eq!(m.len(), 0); + } + + #[test] + fn test_debug_output_valid_utf8() { + let haystack = b"Hello, world!"; + let m = Match::new(haystack, 7, 12); + let debug_str = format!("{m:?}"); + + assert_eq!( + debug_str, + r#"Match { start: 7, end: 12, bytes: "world" }"# + ); + } + + #[test] + fn test_debug_output_invalid_utf8() { + let haystack = b"Hello, \xFFworld!"; + let m = Match::new(haystack, 7, 13); + let debug_str = format!("{m:?}"); + + assert_eq!( + debug_str, + r#"Match { start: 7, end: 13, bytes: "\xffworld" }"# + ); + } + + #[test] + fn test_debug_output_various_unicode() { + let haystack = + "Hello, 😊 world! 안녕하세요? مرحبا بالعالم!".as_bytes(); + let m = Match::new(haystack, 0, haystack.len()); + let debug_str = format!("{m:?}"); + + assert_eq!( + debug_str, + r#"Match { start: 0, end: 62, bytes: "Hello, 😊 world! 안녕하세요? مرحبا بالعالم!" }"# + ); + } + + #[test] + fn test_debug_output_ascii_escape() { + let haystack = b"Hello,\tworld!\nThis is a \x1b[31mtest\x1b[0m."; + let m = Match::new(haystack, 0, haystack.len()); + let debug_str = format!("{m:?}"); + + assert_eq!( + debug_str, + r#"Match { start: 0, end: 38, bytes: "Hello,\tworld!\nThis is a \u{1b}[31mtest\u{1b}[0m." }"# + ); + } + + #[test] + fn test_debug_output_match_in_middle() { + let haystack = b"The quick brown fox jumps over the lazy dog."; + let m = Match::new(haystack, 16, 19); + let debug_str = format!("{m:?}"); + + assert_eq!(debug_str, r#"Match { start: 16, end: 19, bytes: "fox" }"#); + } +} diff --git a/vendor/regex/src/regex/mod.rs b/vendor/regex/src/regex/mod.rs new file mode 100644 index 00000000000000..93fadec8bf65c9 --- /dev/null +++ b/vendor/regex/src/regex/mod.rs @@ -0,0 +1,2 @@ +pub(crate) mod bytes; +pub(crate) mod string; diff --git a/vendor/regex/src/regex/string.rs b/vendor/regex/src/regex/string.rs new file mode 100644 index 00000000000000..e066d7630cf0e4 --- /dev/null +++ b/vendor/regex/src/regex/string.rs @@ -0,0 +1,2625 @@ +use alloc::{borrow::Cow, string::String, sync::Arc}; + +use regex_automata::{meta, util::captures, Input, PatternID}; + +use crate::{error::Error, RegexBuilder}; + +/// A compiled regular expression for searching Unicode haystacks. +/// +/// A `Regex` can be used to search haystacks, split haystacks into substrings +/// or replace substrings in a haystack with a different substring. All +/// searching is done with an implicit `(?s:.)*?` at the beginning and end of +/// an pattern. To force an expression to match the whole string (or a prefix +/// or a suffix), you must use an anchor like `^` or `$` (or `\A` and `\z`). +/// +/// While this crate will handle Unicode strings (whether in the regular +/// expression or in the haystack), all positions returned are **byte +/// offsets**. Every byte offset is guaranteed to be at a Unicode code point +/// boundary. That is, all offsets returned by the `Regex` API are guaranteed +/// to be ranges that can slice a `&str` without panicking. If you want to +/// relax this requirement, then you must search `&[u8]` haystacks with a +/// [`bytes::Regex`](crate::bytes::Regex). +/// +/// The only methods that allocate new strings are the string replacement +/// methods. All other methods (searching and splitting) return borrowed +/// references into the haystack given. +/// +/// # Example +/// +/// Find the offsets of a US phone number: +/// +/// ``` +/// use regex::Regex; +/// +/// let re = Regex::new("[0-9]{3}-[0-9]{3}-[0-9]{4}").unwrap(); +/// let m = re.find("phone: 111-222-3333").unwrap(); +/// assert_eq!(7..19, m.range()); +/// ``` +/// +/// # Example: extracting capture groups +/// +/// A common way to use regexes is with capture groups. That is, instead of +/// just looking for matches of an entire regex, parentheses are used to create +/// groups that represent part of the match. +/// +/// For example, consider a haystack with multiple lines, and each line has +/// three whitespace delimited fields where the second field is expected to be +/// a number and the third field a boolean. To make this convenient, we use +/// the [`Captures::extract`] API to put the strings that match each group +/// into a fixed size array: +/// +/// ``` +/// use regex::Regex; +/// +/// let hay = " +/// rabbit 54 true +/// groundhog 2 true +/// does not match +/// fox 109 false +/// "; +/// let re = Regex::new(r"(?m)^\s*(\S+)\s+([0-9]+)\s+(true|false)\s*$").unwrap(); +/// let mut fields: Vec<(&str, i64, bool)> = vec![]; +/// for (_, [f1, f2, f3]) in re.captures_iter(hay).map(|caps| caps.extract()) { +/// fields.push((f1, f2.parse()?, f3.parse()?)); +/// } +/// assert_eq!(fields, vec![ +/// ("rabbit", 54, true), +/// ("groundhog", 2, true), +/// ("fox", 109, false), +/// ]); +/// +/// # Ok::<(), Box<dyn std::error::Error>>(()) +/// ``` +/// +/// # Example: searching with the `Pattern` trait +/// +/// **Note**: This section requires that this crate is compiled with the +/// `pattern` Cargo feature enabled, which **requires nightly Rust**. +/// +/// Since `Regex` implements `Pattern` from the standard library, one can +/// use regexes with methods defined on `&str`. For example, `is_match`, +/// `find`, `find_iter` and `split` can, in some cases, be replaced with +/// `str::contains`, `str::find`, `str::match_indices` and `str::split`. +/// +/// Here are some examples: +/// +/// ```ignore +/// use regex::Regex; +/// +/// let re = Regex::new(r"\d+").unwrap(); +/// let hay = "a111b222c"; +/// +/// assert!(hay.contains(&re)); +/// assert_eq!(hay.find(&re), Some(1)); +/// assert_eq!(hay.match_indices(&re).collect::<Vec<_>>(), vec![ +/// (1, "111"), +/// (5, "222"), +/// ]); +/// assert_eq!(hay.split(&re).collect::<Vec<_>>(), vec!["a", "b", "c"]); +/// ``` +#[derive(Clone)] +pub struct Regex { + pub(crate) meta: meta::Regex, + pub(crate) pattern: Arc<str>, +} + +impl core::fmt::Display for Regex { + /// Shows the original regular expression. + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +impl core::fmt::Debug for Regex { + /// Shows the original regular expression. + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("Regex").field(&self.as_str()).finish() + } +} + +impl core::str::FromStr for Regex { + type Err = Error; + + /// Attempts to parse a string into a regular expression + fn from_str(s: &str) -> Result<Regex, Error> { + Regex::new(s) + } +} + +impl TryFrom<&str> for Regex { + type Error = Error; + + /// Attempts to parse a string into a regular expression + fn try_from(s: &str) -> Result<Regex, Error> { + Regex::new(s) + } +} + +impl TryFrom<String> for Regex { + type Error = Error; + + /// Attempts to parse a string into a regular expression + fn try_from(s: String) -> Result<Regex, Error> { + Regex::new(&s) + } +} + +/// Core regular expression methods. +impl Regex { + /// Compiles a regular expression. Once compiled, it can be used repeatedly + /// to search, split or replace substrings in a haystack. + /// + /// Note that regex compilation tends to be a somewhat expensive process, + /// and unlike higher level environments, compilation is not automatically + /// cached for you. One should endeavor to compile a regex once and then + /// reuse it. For example, it's a bad idea to compile the same regex + /// repeatedly in a loop. + /// + /// # Errors + /// + /// If an invalid pattern is given, then an error is returned. + /// An error is also returned if the pattern is valid, but would + /// produce a regex that is bigger than the configured size limit via + /// [`RegexBuilder::size_limit`]. (A reasonable size limit is enabled by + /// default.) + /// + /// # Example + /// + /// ``` + /// use regex::Regex; + /// + /// // An Invalid pattern because of an unclosed parenthesis + /// assert!(Regex::new(r"foo(bar").is_err()); + /// // An invalid pattern because the regex would be too big + /// // because Unicode tends to inflate things. + /// assert!(Regex::new(r"\w{1000}").is_err()); + /// // Disabling Unicode can make the regex much smaller, + /// // potentially by up to or more than an order of magnitude. + /// assert!(Regex::new(r"(?-u:\w){1000}").is_ok()); + /// ``` + pub fn new(re: &str) -> Result<Regex, Error> { + RegexBuilder::new(re).build() + } + + /// Returns true if and only if there is a match for the regex anywhere + /// in the haystack given. + /// + /// It is recommended to use this method if all you need to do is test + /// whether a match exists, since the underlying matching engine may be + /// able to do less work. + /// + /// # Example + /// + /// Test if some haystack contains at least one word with exactly 13 + /// Unicode word characters: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"\b\w{13}\b").unwrap(); + /// let hay = "I categorically deny having triskaidekaphobia."; + /// assert!(re.is_match(hay)); + /// ``` + #[inline] + pub fn is_match(&self, haystack: &str) -> bool { + self.is_match_at(haystack, 0) + } + + /// This routine searches for the first match of this regex in the + /// haystack given, and if found, returns a [`Match`]. The `Match` + /// provides access to both the byte offsets of the match and the actual + /// substring that matched. + /// + /// Note that this should only be used if you want to find the entire + /// match. If instead you just want to test the existence of a match, + /// it's potentially faster to use `Regex::is_match(hay)` instead of + /// `Regex::find(hay).is_some()`. + /// + /// # Example + /// + /// Find the first word with exactly 13 Unicode word characters: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"\b\w{13}\b").unwrap(); + /// let hay = "I categorically deny having triskaidekaphobia."; + /// let mat = re.find(hay).unwrap(); + /// assert_eq!(2..15, mat.range()); + /// assert_eq!("categorically", mat.as_str()); + /// ``` + #[inline] + pub fn find<'h>(&self, haystack: &'h str) -> Option<Match<'h>> { + self.find_at(haystack, 0) + } + + /// Returns an iterator that yields successive non-overlapping matches in + /// the given haystack. The iterator yields values of type [`Match`]. + /// + /// # Time complexity + /// + /// Note that since `find_iter` runs potentially many searches on the + /// haystack and since each search has worst case `O(m * n)` time + /// complexity, the overall worst case time complexity for iteration is + /// `O(m * n^2)`. + /// + /// # Example + /// + /// Find every word with exactly 13 Unicode word characters: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"\b\w{13}\b").unwrap(); + /// let hay = "Retroactively relinquishing remunerations is reprehensible."; + /// let matches: Vec<_> = re.find_iter(hay).map(|m| m.as_str()).collect(); + /// assert_eq!(matches, vec![ + /// "Retroactively", + /// "relinquishing", + /// "remunerations", + /// "reprehensible", + /// ]); + /// ``` + #[inline] + pub fn find_iter<'r, 'h>(&'r self, haystack: &'h str) -> Matches<'r, 'h> { + Matches { haystack, it: self.meta.find_iter(haystack) } + } + + /// This routine searches for the first match of this regex in the haystack + /// given, and if found, returns not only the overall match but also the + /// matches of each capture group in the regex. If no match is found, then + /// `None` is returned. + /// + /// Capture group `0` always corresponds to an implicit unnamed group that + /// includes the entire match. If a match is found, this group is always + /// present. Subsequent groups may be named and are numbered, starting + /// at 1, by the order in which the opening parenthesis appears in the + /// pattern. For example, in the pattern `(?<a>.(?<b>.))(?<c>.)`, `a`, + /// `b` and `c` correspond to capture group indices `1`, `2` and `3`, + /// respectively. + /// + /// You should only use `captures` if you need access to the capture group + /// matches. Otherwise, [`Regex::find`] is generally faster for discovering + /// just the overall match. + /// + /// # Example + /// + /// Say you have some haystack with movie names and their release years, + /// like "'Citizen Kane' (1941)". It'd be nice if we could search for + /// substrings looking like that, while also extracting the movie name and + /// its release year separately. The example below shows how to do that. + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"'([^']+)'\s+\((\d{4})\)").unwrap(); + /// let hay = "Not my favorite movie: 'Citizen Kane' (1941)."; + /// let caps = re.captures(hay).unwrap(); + /// assert_eq!(caps.get(0).unwrap().as_str(), "'Citizen Kane' (1941)"); + /// assert_eq!(caps.get(1).unwrap().as_str(), "Citizen Kane"); + /// assert_eq!(caps.get(2).unwrap().as_str(), "1941"); + /// // You can also access the groups by index using the Index notation. + /// // Note that this will panic on an invalid index. In this case, these + /// // accesses are always correct because the overall regex will only + /// // match when these capture groups match. + /// assert_eq!(&caps[0], "'Citizen Kane' (1941)"); + /// assert_eq!(&caps[1], "Citizen Kane"); + /// assert_eq!(&caps[2], "1941"); + /// ``` + /// + /// Note that the full match is at capture group `0`. Each subsequent + /// capture group is indexed by the order of its opening `(`. + /// + /// We can make this example a bit clearer by using *named* capture groups: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"'(?<title>[^']+)'\s+\((?<year>\d{4})\)").unwrap(); + /// let hay = "Not my favorite movie: 'Citizen Kane' (1941)."; + /// let caps = re.captures(hay).unwrap(); + /// assert_eq!(caps.get(0).unwrap().as_str(), "'Citizen Kane' (1941)"); + /// assert_eq!(caps.name("title").unwrap().as_str(), "Citizen Kane"); + /// assert_eq!(caps.name("year").unwrap().as_str(), "1941"); + /// // You can also access the groups by name using the Index notation. + /// // Note that this will panic on an invalid group name. In this case, + /// // these accesses are always correct because the overall regex will + /// // only match when these capture groups match. + /// assert_eq!(&caps[0], "'Citizen Kane' (1941)"); + /// assert_eq!(&caps["title"], "Citizen Kane"); + /// assert_eq!(&caps["year"], "1941"); + /// ``` + /// + /// Here we name the capture groups, which we can access with the `name` + /// method or the `Index` notation with a `&str`. Note that the named + /// capture groups are still accessible with `get` or the `Index` notation + /// with a `usize`. + /// + /// The `0`th capture group is always unnamed, so it must always be + /// accessed with `get(0)` or `[0]`. + /// + /// Finally, one other way to get the matched substrings is with the + /// [`Captures::extract`] API: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"'([^']+)'\s+\((\d{4})\)").unwrap(); + /// let hay = "Not my favorite movie: 'Citizen Kane' (1941)."; + /// let (full, [title, year]) = re.captures(hay).unwrap().extract(); + /// assert_eq!(full, "'Citizen Kane' (1941)"); + /// assert_eq!(title, "Citizen Kane"); + /// assert_eq!(year, "1941"); + /// ``` + #[inline] + pub fn captures<'h>(&self, haystack: &'h str) -> Option<Captures<'h>> { + self.captures_at(haystack, 0) + } + + /// Returns an iterator that yields successive non-overlapping matches in + /// the given haystack. The iterator yields values of type [`Captures`]. + /// + /// This is the same as [`Regex::find_iter`], but instead of only providing + /// access to the overall match, each value yield includes access to the + /// matches of all capture groups in the regex. Reporting this extra match + /// data is potentially costly, so callers should only use `captures_iter` + /// over `find_iter` when they actually need access to the capture group + /// matches. + /// + /// # Time complexity + /// + /// Note that since `captures_iter` runs potentially many searches on the + /// haystack and since each search has worst case `O(m * n)` time + /// complexity, the overall worst case time complexity for iteration is + /// `O(m * n^2)`. + /// + /// # Example + /// + /// We can use this to find all movie titles and their release years in + /// some haystack, where the movie is formatted like "'Title' (xxxx)": + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"'([^']+)'\s+\(([0-9]{4})\)").unwrap(); + /// let hay = "'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931)."; + /// let mut movies = vec![]; + /// for (_, [title, year]) in re.captures_iter(hay).map(|c| c.extract()) { + /// movies.push((title, year.parse::<i64>()?)); + /// } + /// assert_eq!(movies, vec![ + /// ("Citizen Kane", 1941), + /// ("The Wizard of Oz", 1939), + /// ("M", 1931), + /// ]); + /// # Ok::<(), Box<dyn std::error::Error>>(()) + /// ``` + /// + /// Or with named groups: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"'(?<title>[^']+)'\s+\((?<year>[0-9]{4})\)").unwrap(); + /// let hay = "'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931)."; + /// let mut it = re.captures_iter(hay); + /// + /// let caps = it.next().unwrap(); + /// assert_eq!(&caps["title"], "Citizen Kane"); + /// assert_eq!(&caps["year"], "1941"); + /// + /// let caps = it.next().unwrap(); + /// assert_eq!(&caps["title"], "The Wizard of Oz"); + /// assert_eq!(&caps["year"], "1939"); + /// + /// let caps = it.next().unwrap(); + /// assert_eq!(&caps["title"], "M"); + /// assert_eq!(&caps["year"], "1931"); + /// ``` + #[inline] + pub fn captures_iter<'r, 'h>( + &'r self, + haystack: &'h str, + ) -> CaptureMatches<'r, 'h> { + CaptureMatches { haystack, it: self.meta.captures_iter(haystack) } + } + + /// Returns an iterator of substrings of the haystack given, delimited by a + /// match of the regex. Namely, each element of the iterator corresponds to + /// a part of the haystack that *isn't* matched by the regular expression. + /// + /// # Time complexity + /// + /// Since iterators over all matches requires running potentially many + /// searches on the haystack, and since each search has worst case + /// `O(m * n)` time complexity, the overall worst case time complexity for + /// this routine is `O(m * n^2)`. + /// + /// # Example + /// + /// To split a string delimited by arbitrary amounts of spaces or tabs: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"[ \t]+").unwrap(); + /// let hay = "a b \t c\td e"; + /// let fields: Vec<&str> = re.split(hay).collect(); + /// assert_eq!(fields, vec!["a", "b", "c", "d", "e"]); + /// ``` + /// + /// # Example: more cases + /// + /// Basic usage: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r" ").unwrap(); + /// let hay = "Mary had a little lamb"; + /// let got: Vec<&str> = re.split(hay).collect(); + /// assert_eq!(got, vec!["Mary", "had", "a", "little", "lamb"]); + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = ""; + /// let got: Vec<&str> = re.split(hay).collect(); + /// assert_eq!(got, vec![""]); + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = "lionXXtigerXleopard"; + /// let got: Vec<&str> = re.split(hay).collect(); + /// assert_eq!(got, vec!["lion", "", "tiger", "leopard"]); + /// + /// let re = Regex::new(r"::").unwrap(); + /// let hay = "lion::tiger::leopard"; + /// let got: Vec<&str> = re.split(hay).collect(); + /// assert_eq!(got, vec!["lion", "tiger", "leopard"]); + /// ``` + /// + /// If a haystack contains multiple contiguous matches, you will end up + /// with empty spans yielded by the iterator: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = "XXXXaXXbXc"; + /// let got: Vec<&str> = re.split(hay).collect(); + /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]); + /// + /// let re = Regex::new(r"/").unwrap(); + /// let hay = "(///)"; + /// let got: Vec<&str> = re.split(hay).collect(); + /// assert_eq!(got, vec!["(", "", "", ")"]); + /// ``` + /// + /// Separators at the start or end of a haystack are neighbored by empty + /// substring. + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"0").unwrap(); + /// let hay = "010"; + /// let got: Vec<&str> = re.split(hay).collect(); + /// assert_eq!(got, vec!["", "1", ""]); + /// ``` + /// + /// When the empty string is used as a regex, it splits at every valid + /// UTF-8 boundary by default (which includes the beginning and end of the + /// haystack): + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"").unwrap(); + /// let hay = "rust"; + /// let got: Vec<&str> = re.split(hay).collect(); + /// assert_eq!(got, vec!["", "r", "u", "s", "t", ""]); + /// + /// // Splitting by an empty string is UTF-8 aware by default! + /// let re = Regex::new(r"").unwrap(); + /// let hay = "☃"; + /// let got: Vec<&str> = re.split(hay).collect(); + /// assert_eq!(got, vec!["", "☃", ""]); + /// ``` + /// + /// Contiguous separators (commonly shows up with whitespace), can lead to + /// possibly surprising behavior. For example, this code is correct: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r" ").unwrap(); + /// let hay = " a b c"; + /// let got: Vec<&str> = re.split(hay).collect(); + /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]); + /// ``` + /// + /// It does *not* give you `["a", "b", "c"]`. For that behavior, you'd want + /// to match contiguous space characters: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r" +").unwrap(); + /// let hay = " a b c"; + /// let got: Vec<&str> = re.split(hay).collect(); + /// // N.B. This does still include a leading empty span because ' +' + /// // matches at the beginning of the haystack. + /// assert_eq!(got, vec!["", "a", "b", "c"]); + /// ``` + #[inline] + pub fn split<'r, 'h>(&'r self, haystack: &'h str) -> Split<'r, 'h> { + Split { haystack, it: self.meta.split(haystack) } + } + + /// Returns an iterator of at most `limit` substrings of the haystack + /// given, delimited by a match of the regex. (A `limit` of `0` will return + /// no substrings.) Namely, each element of the iterator corresponds to a + /// part of the haystack that *isn't* matched by the regular expression. + /// The remainder of the haystack that is not split will be the last + /// element in the iterator. + /// + /// # Time complexity + /// + /// Since iterators over all matches requires running potentially many + /// searches on the haystack, and since each search has worst case + /// `O(m * n)` time complexity, the overall worst case time complexity for + /// this routine is `O(m * n^2)`. + /// + /// Although note that the worst case time here has an upper bound given + /// by the `limit` parameter. + /// + /// # Example + /// + /// Get the first two words in some haystack: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"\W+").unwrap(); + /// let hay = "Hey! How are you?"; + /// let fields: Vec<&str> = re.splitn(hay, 3).collect(); + /// assert_eq!(fields, vec!["Hey", "How", "are you?"]); + /// ``` + /// + /// # Examples: more cases + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r" ").unwrap(); + /// let hay = "Mary had a little lamb"; + /// let got: Vec<&str> = re.splitn(hay, 3).collect(); + /// assert_eq!(got, vec!["Mary", "had", "a little lamb"]); + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = ""; + /// let got: Vec<&str> = re.splitn(hay, 3).collect(); + /// assert_eq!(got, vec![""]); + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = "lionXXtigerXleopard"; + /// let got: Vec<&str> = re.splitn(hay, 3).collect(); + /// assert_eq!(got, vec!["lion", "", "tigerXleopard"]); + /// + /// let re = Regex::new(r"::").unwrap(); + /// let hay = "lion::tiger::leopard"; + /// let got: Vec<&str> = re.splitn(hay, 2).collect(); + /// assert_eq!(got, vec!["lion", "tiger::leopard"]); + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = "abcXdef"; + /// let got: Vec<&str> = re.splitn(hay, 1).collect(); + /// assert_eq!(got, vec!["abcXdef"]); + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = "abcdef"; + /// let got: Vec<&str> = re.splitn(hay, 2).collect(); + /// assert_eq!(got, vec!["abcdef"]); + /// + /// let re = Regex::new(r"X").unwrap(); + /// let hay = "abcXdef"; + /// let got: Vec<&str> = re.splitn(hay, 0).collect(); + /// assert!(got.is_empty()); + /// ``` + #[inline] + pub fn splitn<'r, 'h>( + &'r self, + haystack: &'h str, + limit: usize, + ) -> SplitN<'r, 'h> { + SplitN { haystack, it: self.meta.splitn(haystack, limit) } + } + + /// Replaces the leftmost-first match in the given haystack with the + /// replacement provided. The replacement can be a regular string (where + /// `$N` and `$name` are expanded to match capture groups) or a function + /// that takes a [`Captures`] and returns the replaced string. + /// + /// If no match is found, then the haystack is returned unchanged. In that + /// case, this implementation will likely return a `Cow::Borrowed` value + /// such that no allocation is performed. + /// + /// When a `Cow::Borrowed` is returned, the value returned is guaranteed + /// to be equivalent to the `haystack` given. + /// + /// # Replacement string syntax + /// + /// All instances of `$ref` in the replacement string are replaced with + /// the substring corresponding to the capture group identified by `ref`. + /// + /// `ref` may be an integer corresponding to the index of the capture group + /// (counted by order of opening parenthesis where `0` is the entire match) + /// or it can be a name (consisting of letters, digits or underscores) + /// corresponding to a named capture group. + /// + /// If `ref` isn't a valid capture group (whether the name doesn't exist or + /// isn't a valid index), then it is replaced with the empty string. + /// + /// The longest possible name is used. For example, `$1a` looks up the + /// capture group named `1a` and not the capture group at index `1`. To + /// exert more precise control over the name, use braces, e.g., `${1}a`. + /// + /// To write a literal `$` use `$$`. + /// + /// # Example + /// + /// Note that this function is polymorphic with respect to the replacement. + /// In typical usage, this can just be a normal string: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"[^01]+").unwrap(); + /// assert_eq!(re.replace("1078910", ""), "1010"); + /// ``` + /// + /// But anything satisfying the [`Replacer`] trait will work. For example, + /// a closure of type `|&Captures| -> String` provides direct access to the + /// captures corresponding to a match. This allows one to access capturing + /// group matches easily: + /// + /// ``` + /// use regex::{Captures, Regex}; + /// + /// let re = Regex::new(r"([^,\s]+),\s+(\S+)").unwrap(); + /// let result = re.replace("Springsteen, Bruce", |caps: &Captures| { + /// format!("{} {}", &caps[2], &caps[1]) + /// }); + /// assert_eq!(result, "Bruce Springsteen"); + /// ``` + /// + /// But this is a bit cumbersome to use all the time. Instead, a simple + /// syntax is supported (as described above) that expands `$name` into the + /// corresponding capture group. Here's the last example, but using this + /// expansion technique with named capture groups: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(?<first>\S+)").unwrap(); + /// let result = re.replace("Springsteen, Bruce", "$first $last"); + /// assert_eq!(result, "Bruce Springsteen"); + /// ``` + /// + /// Note that using `$2` instead of `$first` or `$1` instead of `$last` + /// would produce the same result. To write a literal `$` use `$$`. + /// + /// Sometimes the replacement string requires use of curly braces to + /// delineate a capture group replacement when it is adjacent to some other + /// literal text. For example, if we wanted to join two words together with + /// an underscore: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"(?<first>\w+)\s+(?<second>\w+)").unwrap(); + /// let result = re.replace("deep fried", "${first}_$second"); + /// assert_eq!(result, "deep_fried"); + /// ``` + /// + /// Without the curly braces, the capture group name `first_` would be + /// used, and since it doesn't exist, it would be replaced with the empty + /// string. + /// + /// Finally, sometimes you just want to replace a literal string with no + /// regard for capturing group expansion. This can be done by wrapping a + /// string with [`NoExpand`]: + /// + /// ``` + /// use regex::{NoExpand, Regex}; + /// + /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(\S+)").unwrap(); + /// let result = re.replace("Springsteen, Bruce", NoExpand("$2 $last")); + /// assert_eq!(result, "$2 $last"); + /// ``` + /// + /// Using `NoExpand` may also be faster, since the replacement string won't + /// need to be parsed for the `$` syntax. + #[inline] + pub fn replace<'h, R: Replacer>( + &self, + haystack: &'h str, + rep: R, + ) -> Cow<'h, str> { + self.replacen(haystack, 1, rep) + } + + /// Replaces all non-overlapping matches in the haystack with the + /// replacement provided. This is the same as calling `replacen` with + /// `limit` set to `0`. + /// + /// If no match is found, then the haystack is returned unchanged. In that + /// case, this implementation will likely return a `Cow::Borrowed` value + /// such that no allocation is performed. + /// + /// When a `Cow::Borrowed` is returned, the value returned is guaranteed + /// to be equivalent to the `haystack` given. + /// + /// The documentation for [`Regex::replace`] goes into more detail about + /// what kinds of replacement strings are supported. + /// + /// # Time complexity + /// + /// Since iterators over all matches requires running potentially many + /// searches on the haystack, and since each search has worst case + /// `O(m * n)` time complexity, the overall worst case time complexity for + /// this routine is `O(m * n^2)`. + /// + /// # Fallibility + /// + /// If you need to write a replacement routine where any individual + /// replacement might "fail," doing so with this API isn't really feasible + /// because there's no way to stop the search process if a replacement + /// fails. Instead, if you need this functionality, you should consider + /// implementing your own replacement routine: + /// + /// ``` + /// use regex::{Captures, Regex}; + /// + /// fn replace_all<E>( + /// re: &Regex, + /// haystack: &str, + /// replacement: impl Fn(&Captures) -> Result<String, E>, + /// ) -> Result<String, E> { + /// let mut new = String::with_capacity(haystack.len()); + /// let mut last_match = 0; + /// for caps in re.captures_iter(haystack) { + /// let m = caps.get(0).unwrap(); + /// new.push_str(&haystack[last_match..m.start()]); + /// new.push_str(&replacement(&caps)?); + /// last_match = m.end(); + /// } + /// new.push_str(&haystack[last_match..]); + /// Ok(new) + /// } + /// + /// // Let's replace each word with the number of bytes in that word. + /// // But if we see a word that is "too long," we'll give up. + /// let re = Regex::new(r"\w+").unwrap(); + /// let replacement = |caps: &Captures| -> Result<String, &'static str> { + /// if caps[0].len() >= 5 { + /// return Err("word too long"); + /// } + /// Ok(caps[0].len().to_string()) + /// }; + /// assert_eq!( + /// Ok("2 3 3 3?".to_string()), + /// replace_all(&re, "hi how are you?", &replacement), + /// ); + /// assert!(replace_all(&re, "hi there", &replacement).is_err()); + /// ``` + /// + /// # Example + /// + /// This example shows how to flip the order of whitespace (excluding line + /// terminators) delimited fields, and normalizes the whitespace that + /// delimits the fields: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"(?m)^(\S+)[\s--\r\n]+(\S+)$").unwrap(); + /// let hay = " + /// Greetings 1973 + /// Wild\t1973 + /// BornToRun\t\t\t\t1975 + /// Darkness 1978 + /// TheRiver 1980 + /// "; + /// let new = re.replace_all(hay, "$2 $1"); + /// assert_eq!(new, " + /// 1973 Greetings + /// 1973 Wild + /// 1975 BornToRun + /// 1978 Darkness + /// 1980 TheRiver + /// "); + /// ``` + #[inline] + pub fn replace_all<'h, R: Replacer>( + &self, + haystack: &'h str, + rep: R, + ) -> Cow<'h, str> { + self.replacen(haystack, 0, rep) + } + + /// Replaces at most `limit` non-overlapping matches in the haystack with + /// the replacement provided. If `limit` is `0`, then all non-overlapping + /// matches are replaced. That is, `Regex::replace_all(hay, rep)` is + /// equivalent to `Regex::replacen(hay, 0, rep)`. + /// + /// If no match is found, then the haystack is returned unchanged. In that + /// case, this implementation will likely return a `Cow::Borrowed` value + /// such that no allocation is performed. + /// + /// When a `Cow::Borrowed` is returned, the value returned is guaranteed + /// to be equivalent to the `haystack` given. + /// + /// The documentation for [`Regex::replace`] goes into more detail about + /// what kinds of replacement strings are supported. + /// + /// # Time complexity + /// + /// Since iterators over all matches requires running potentially many + /// searches on the haystack, and since each search has worst case + /// `O(m * n)` time complexity, the overall worst case time complexity for + /// this routine is `O(m * n^2)`. + /// + /// Although note that the worst case time here has an upper bound given + /// by the `limit` parameter. + /// + /// # Fallibility + /// + /// See the corresponding section in the docs for [`Regex::replace_all`] + /// for tips on how to deal with a replacement routine that can fail. + /// + /// # Example + /// + /// This example shows how to flip the order of whitespace (excluding line + /// terminators) delimited fields, and normalizes the whitespace that + /// delimits the fields. But we only do it for the first two matches. + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"(?m)^(\S+)[\s--\r\n]+(\S+)$").unwrap(); + /// let hay = " + /// Greetings 1973 + /// Wild\t1973 + /// BornToRun\t\t\t\t1975 + /// Darkness 1978 + /// TheRiver 1980 + /// "; + /// let new = re.replacen(hay, 2, "$2 $1"); + /// assert_eq!(new, " + /// 1973 Greetings + /// 1973 Wild + /// BornToRun\t\t\t\t1975 + /// Darkness 1978 + /// TheRiver 1980 + /// "); + /// ``` + #[inline] + pub fn replacen<'h, R: Replacer>( + &self, + haystack: &'h str, + limit: usize, + mut rep: R, + ) -> Cow<'h, str> { + // If we know that the replacement doesn't have any capture expansions, + // then we can use the fast path. The fast path can make a tremendous + // difference: + // + // 1) We use `find_iter` instead of `captures_iter`. Not asking for + // captures generally makes the regex engines faster. + // 2) We don't need to look up all of the capture groups and do + // replacements inside the replacement string. We just push it + // at each match and be done with it. + if let Some(rep) = rep.no_expansion() { + let mut it = self.find_iter(haystack).enumerate().peekable(); + if it.peek().is_none() { + return Cow::Borrowed(haystack); + } + let mut new = String::with_capacity(haystack.len()); + let mut last_match = 0; + for (i, m) in it { + new.push_str(&haystack[last_match..m.start()]); + new.push_str(&rep); + last_match = m.end(); + if limit > 0 && i >= limit - 1 { + break; + } + } + new.push_str(&haystack[last_match..]); + return Cow::Owned(new); + } + + // The slower path, which we use if the replacement may need access to + // capture groups. + let mut it = self.captures_iter(haystack).enumerate().peekable(); + if it.peek().is_none() { + return Cow::Borrowed(haystack); + } + let mut new = String::with_capacity(haystack.len()); + let mut last_match = 0; + for (i, cap) in it { + // unwrap on 0 is OK because captures only reports matches + let m = cap.get(0).unwrap(); + new.push_str(&haystack[last_match..m.start()]); + rep.replace_append(&cap, &mut new); + last_match = m.end(); + if limit > 0 && i >= limit - 1 { + break; + } + } + new.push_str(&haystack[last_match..]); + Cow::Owned(new) + } +} + +/// A group of advanced or "lower level" search methods. Some methods permit +/// starting the search at a position greater than `0` in the haystack. Other +/// methods permit reusing allocations, for example, when extracting the +/// matches for capture groups. +impl Regex { + /// Returns the end byte offset of the first match in the haystack given. + /// + /// This method may have the same performance characteristics as + /// `is_match`. Behaviorally, it doesn't just report whether it match + /// occurs, but also the end offset for a match. In particular, the offset + /// returned *may be shorter* than the proper end of the leftmost-first + /// match that you would find via [`Regex::find`]. + /// + /// Note that it is not guaranteed that this routine finds the shortest or + /// "earliest" possible match. Instead, the main idea of this API is that + /// it returns the offset at the point at which the internal regex engine + /// has determined that a match has occurred. This may vary depending on + /// which internal regex engine is used, and thus, the offset itself may + /// change based on internal heuristics. + /// + /// # Example + /// + /// Typically, `a+` would match the entire first sequence of `a` in some + /// haystack, but `shortest_match` *may* give up as soon as it sees the + /// first `a`. + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"a+").unwrap(); + /// let offset = re.shortest_match("aaaaa").unwrap(); + /// assert_eq!(offset, 1); + /// ``` + #[inline] + pub fn shortest_match(&self, haystack: &str) -> Option<usize> { + self.shortest_match_at(haystack, 0) + } + + /// Returns the same as [`Regex::shortest_match`], but starts the search at + /// the given offset. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only match + /// when `start == 0`. + /// + /// If a match is found, the offset returned is relative to the beginning + /// of the haystack, not the beginning of the search. + /// + /// # Panics + /// + /// This panics when `start >= haystack.len() + 1`. + /// + /// # Example + /// + /// This example shows the significance of `start` by demonstrating how it + /// can be used to permit look-around assertions in a regex to take the + /// surrounding context into account. + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"\bchew\b").unwrap(); + /// let hay = "eschew"; + /// // We get a match here, but it's probably not intended. + /// assert_eq!(re.shortest_match(&hay[2..]), Some(4)); + /// // No match because the assertions take the context into account. + /// assert_eq!(re.shortest_match_at(hay, 2), None); + /// ``` + #[inline] + pub fn shortest_match_at( + &self, + haystack: &str, + start: usize, + ) -> Option<usize> { + let input = + Input::new(haystack).earliest(true).span(start..haystack.len()); + self.meta.search_half(&input).map(|hm| hm.offset()) + } + + /// Returns the same as [`Regex::is_match`], but starts the search at the + /// given offset. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only + /// match when `start == 0`. + /// + /// # Panics + /// + /// This panics when `start >= haystack.len() + 1`. + /// + /// # Example + /// + /// This example shows the significance of `start` by demonstrating how it + /// can be used to permit look-around assertions in a regex to take the + /// surrounding context into account. + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"\bchew\b").unwrap(); + /// let hay = "eschew"; + /// // We get a match here, but it's probably not intended. + /// assert!(re.is_match(&hay[2..])); + /// // No match because the assertions take the context into account. + /// assert!(!re.is_match_at(hay, 2)); + /// ``` + #[inline] + pub fn is_match_at(&self, haystack: &str, start: usize) -> bool { + let input = + Input::new(haystack).earliest(true).span(start..haystack.len()); + self.meta.search_half(&input).is_some() + } + + /// Returns the same as [`Regex::find`], but starts the search at the given + /// offset. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only + /// match when `start == 0`. + /// + /// # Panics + /// + /// This panics when `start >= haystack.len() + 1`. + /// + /// # Example + /// + /// This example shows the significance of `start` by demonstrating how it + /// can be used to permit look-around assertions in a regex to take the + /// surrounding context into account. + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"\bchew\b").unwrap(); + /// let hay = "eschew"; + /// // We get a match here, but it's probably not intended. + /// assert_eq!(re.find(&hay[2..]).map(|m| m.range()), Some(0..4)); + /// // No match because the assertions take the context into account. + /// assert_eq!(re.find_at(hay, 2), None); + /// ``` + #[inline] + pub fn find_at<'h>( + &self, + haystack: &'h str, + start: usize, + ) -> Option<Match<'h>> { + let input = Input::new(haystack).span(start..haystack.len()); + self.meta + .search(&input) + .map(|m| Match::new(haystack, m.start(), m.end())) + } + + /// Returns the same as [`Regex::captures`], but starts the search at the + /// given offset. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only + /// match when `start == 0`. + /// + /// # Panics + /// + /// This panics when `start >= haystack.len() + 1`. + /// + /// # Example + /// + /// This example shows the significance of `start` by demonstrating how it + /// can be used to permit look-around assertions in a regex to take the + /// surrounding context into account. + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"\bchew\b").unwrap(); + /// let hay = "eschew"; + /// // We get a match here, but it's probably not intended. + /// assert_eq!(&re.captures(&hay[2..]).unwrap()[0], "chew"); + /// // No match because the assertions take the context into account. + /// assert!(re.captures_at(hay, 2).is_none()); + /// ``` + #[inline] + pub fn captures_at<'h>( + &self, + haystack: &'h str, + start: usize, + ) -> Option<Captures<'h>> { + let input = Input::new(haystack).span(start..haystack.len()); + let mut caps = self.meta.create_captures(); + self.meta.search_captures(&input, &mut caps); + if caps.is_match() { + let static_captures_len = self.static_captures_len(); + Some(Captures { haystack, caps, static_captures_len }) + } else { + None + } + } + + /// This is like [`Regex::captures`], but writes the byte offsets of each + /// capture group match into the locations given. + /// + /// A [`CaptureLocations`] stores the same byte offsets as a [`Captures`], + /// but does *not* store a reference to the haystack. This makes its API + /// a bit lower level and less convenient. But in exchange, callers + /// may allocate their own `CaptureLocations` and reuse it for multiple + /// searches. This may be helpful if allocating a `Captures` shows up in a + /// profile as too costly. + /// + /// To create a `CaptureLocations` value, use the + /// [`Regex::capture_locations`] method. + /// + /// This also returns the overall match if one was found. When a match is + /// found, its offsets are also always stored in `locs` at index `0`. + /// + /// # Panics + /// + /// This routine may panic if the given `CaptureLocations` was not created + /// by this regex. + /// + /// # Example + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"^([a-z]+)=(\S*)$").unwrap(); + /// let mut locs = re.capture_locations(); + /// assert!(re.captures_read(&mut locs, "id=foo123").is_some()); + /// assert_eq!(Some((0, 9)), locs.get(0)); + /// assert_eq!(Some((0, 2)), locs.get(1)); + /// assert_eq!(Some((3, 9)), locs.get(2)); + /// ``` + #[inline] + pub fn captures_read<'h>( + &self, + locs: &mut CaptureLocations, + haystack: &'h str, + ) -> Option<Match<'h>> { + self.captures_read_at(locs, haystack, 0) + } + + /// Returns the same as [`Regex::captures_read`], but starts the search at + /// the given offset. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only + /// match when `start == 0`. + /// + /// # Panics + /// + /// This panics when `start >= haystack.len() + 1`. + /// + /// This routine may also panic if the given `CaptureLocations` was not + /// created by this regex. + /// + /// # Example + /// + /// This example shows the significance of `start` by demonstrating how it + /// can be used to permit look-around assertions in a regex to take the + /// surrounding context into account. + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"\bchew\b").unwrap(); + /// let hay = "eschew"; + /// let mut locs = re.capture_locations(); + /// // We get a match here, but it's probably not intended. + /// assert!(re.captures_read(&mut locs, &hay[2..]).is_some()); + /// // No match because the assertions take the context into account. + /// assert!(re.captures_read_at(&mut locs, hay, 2).is_none()); + /// ``` + #[inline] + pub fn captures_read_at<'h>( + &self, + locs: &mut CaptureLocations, + haystack: &'h str, + start: usize, + ) -> Option<Match<'h>> { + let input = Input::new(haystack).span(start..haystack.len()); + self.meta.search_captures(&input, &mut locs.0); + locs.0.get_match().map(|m| Match::new(haystack, m.start(), m.end())) + } + + /// An undocumented alias for `captures_read_at`. + /// + /// The `regex-capi` crate previously used this routine, so to avoid + /// breaking that crate, we continue to provide the name as an undocumented + /// alias. + #[doc(hidden)] + #[inline] + pub fn read_captures_at<'h>( + &self, + locs: &mut CaptureLocations, + haystack: &'h str, + start: usize, + ) -> Option<Match<'h>> { + self.captures_read_at(locs, haystack, start) + } +} + +/// Auxiliary methods. +impl Regex { + /// Returns the original string of this regex. + /// + /// # Example + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"foo\w+bar").unwrap(); + /// assert_eq!(re.as_str(), r"foo\w+bar"); + /// ``` + #[inline] + pub fn as_str(&self) -> &str { + &self.pattern + } + + /// Returns an iterator over the capture names in this regex. + /// + /// The iterator returned yields elements of type `Option<&str>`. That is, + /// the iterator yields values for all capture groups, even ones that are + /// unnamed. The order of the groups corresponds to the order of the group's + /// corresponding opening parenthesis. + /// + /// The first element of the iterator always yields the group corresponding + /// to the overall match, and this group is always unnamed. Therefore, the + /// iterator always yields at least one group. + /// + /// # Example + /// + /// This shows basic usage with a mix of named and unnamed capture groups: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"(?<a>.(?<b>.))(.)(?:.)(?<c>.)").unwrap(); + /// let mut names = re.capture_names(); + /// assert_eq!(names.next(), Some(None)); + /// assert_eq!(names.next(), Some(Some("a"))); + /// assert_eq!(names.next(), Some(Some("b"))); + /// assert_eq!(names.next(), Some(None)); + /// // the '(?:.)' group is non-capturing and so doesn't appear here! + /// assert_eq!(names.next(), Some(Some("c"))); + /// assert_eq!(names.next(), None); + /// ``` + /// + /// The iterator always yields at least one element, even for regexes with + /// no capture groups and even for regexes that can never match: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"").unwrap(); + /// let mut names = re.capture_names(); + /// assert_eq!(names.next(), Some(None)); + /// assert_eq!(names.next(), None); + /// + /// let re = Regex::new(r"[a&&b]").unwrap(); + /// let mut names = re.capture_names(); + /// assert_eq!(names.next(), Some(None)); + /// assert_eq!(names.next(), None); + /// ``` + #[inline] + pub fn capture_names(&self) -> CaptureNames<'_> { + CaptureNames(self.meta.group_info().pattern_names(PatternID::ZERO)) + } + + /// Returns the number of captures groups in this regex. + /// + /// This includes all named and unnamed groups, including the implicit + /// unnamed group that is always present and corresponds to the entire + /// match. + /// + /// Since the implicit unnamed group is always included in this length, the + /// length returned is guaranteed to be greater than zero. + /// + /// # Example + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"foo").unwrap(); + /// assert_eq!(1, re.captures_len()); + /// + /// let re = Regex::new(r"(foo)").unwrap(); + /// assert_eq!(2, re.captures_len()); + /// + /// let re = Regex::new(r"(?<a>.(?<b>.))(.)(?:.)(?<c>.)").unwrap(); + /// assert_eq!(5, re.captures_len()); + /// + /// let re = Regex::new(r"[a&&b]").unwrap(); + /// assert_eq!(1, re.captures_len()); + /// ``` + #[inline] + pub fn captures_len(&self) -> usize { + self.meta.group_info().group_len(PatternID::ZERO) + } + + /// Returns the total number of capturing groups that appear in every + /// possible match. + /// + /// If the number of capture groups can vary depending on the match, then + /// this returns `None`. That is, a value is only returned when the number + /// of matching groups is invariant or "static." + /// + /// Note that like [`Regex::captures_len`], this **does** include the + /// implicit capturing group corresponding to the entire match. Therefore, + /// when a non-None value is returned, it is guaranteed to be at least `1`. + /// Stated differently, a return value of `Some(0)` is impossible. + /// + /// # Example + /// + /// This shows a few cases where a static number of capture groups is + /// available and a few cases where it is not. + /// + /// ``` + /// use regex::Regex; + /// + /// let len = |pattern| { + /// Regex::new(pattern).map(|re| re.static_captures_len()) + /// }; + /// + /// assert_eq!(Some(1), len("a")?); + /// assert_eq!(Some(2), len("(a)")?); + /// assert_eq!(Some(2), len("(a)|(b)")?); + /// assert_eq!(Some(3), len("(a)(b)|(c)(d)")?); + /// assert_eq!(None, len("(a)|b")?); + /// assert_eq!(None, len("a|(b)")?); + /// assert_eq!(None, len("(b)*")?); + /// assert_eq!(Some(2), len("(b)+")?); + /// + /// # Ok::<(), Box<dyn std::error::Error>>(()) + /// ``` + #[inline] + pub fn static_captures_len(&self) -> Option<usize> { + self.meta.static_captures_len() + } + + /// Returns a fresh allocated set of capture locations that can + /// be reused in multiple calls to [`Regex::captures_read`] or + /// [`Regex::captures_read_at`]. + /// + /// The returned locations can be used for any subsequent search for this + /// particular regex. There is no guarantee that it is correct to use for + /// other regexes, even if they have the same number of capture groups. + /// + /// # Example + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"(.)(.)(\w+)").unwrap(); + /// let mut locs = re.capture_locations(); + /// assert!(re.captures_read(&mut locs, "Padron").is_some()); + /// assert_eq!(locs.get(0), Some((0, 6))); + /// assert_eq!(locs.get(1), Some((0, 1))); + /// assert_eq!(locs.get(2), Some((1, 2))); + /// assert_eq!(locs.get(3), Some((2, 6))); + /// ``` + #[inline] + pub fn capture_locations(&self) -> CaptureLocations { + CaptureLocations(self.meta.create_captures()) + } + + /// An alias for `capture_locations` to preserve backward compatibility. + /// + /// The `regex-capi` crate used this method, so to avoid breaking that + /// crate, we continue to export it as an undocumented API. + #[doc(hidden)] + #[inline] + pub fn locations(&self) -> CaptureLocations { + self.capture_locations() + } +} + +/// Represents a single match of a regex in a haystack. +/// +/// A `Match` contains both the start and end byte offsets of the match and the +/// actual substring corresponding to the range of those byte offsets. It is +/// guaranteed that `start <= end`. When `start == end`, the match is empty. +/// +/// Since this `Match` can only be produced by the top-level `Regex` APIs +/// that only support searching UTF-8 encoded strings, the byte offsets for a +/// `Match` are guaranteed to fall on valid UTF-8 codepoint boundaries. That +/// is, slicing a `&str` with [`Match::range`] is guaranteed to never panic. +/// +/// Values with this type are created by [`Regex::find`] or +/// [`Regex::find_iter`]. Other APIs can create `Match` values too. For +/// example, [`Captures::get`]. +/// +/// The lifetime parameter `'h` refers to the lifetime of the matched of the +/// haystack that this match was produced from. +/// +/// # Numbering +/// +/// The byte offsets in a `Match` form a half-open interval. That is, the +/// start of the range is inclusive and the end of the range is exclusive. +/// For example, given a haystack `abcFOOxyz` and a match of `FOO`, its byte +/// offset range starts at `3` and ends at `6`. `3` corresponds to `F` and +/// `6` corresponds to `x`, which is one past the end of the match. This +/// corresponds to the same kind of slicing that Rust uses. +/// +/// For more on why this was chosen over other schemes (aside from being +/// consistent with how Rust the language works), see [this discussion] and +/// [Dijkstra's note on a related topic][note]. +/// +/// [this discussion]: https://github.com/rust-lang/regex/discussions/866 +/// [note]: https://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html +/// +/// # Example +/// +/// This example shows the value of each of the methods on `Match` for a +/// particular search. +/// +/// ``` +/// use regex::Regex; +/// +/// let re = Regex::new(r"\p{Greek}+").unwrap(); +/// let hay = "Greek: αβγδ"; +/// let m = re.find(hay).unwrap(); +/// assert_eq!(7, m.start()); +/// assert_eq!(15, m.end()); +/// assert!(!m.is_empty()); +/// assert_eq!(8, m.len()); +/// assert_eq!(7..15, m.range()); +/// assert_eq!("αβγδ", m.as_str()); +/// ``` +#[derive(Copy, Clone, Eq, PartialEq)] +pub struct Match<'h> { + haystack: &'h str, + start: usize, + end: usize, +} + +impl<'h> Match<'h> { + /// Returns the byte offset of the start of the match in the haystack. The + /// start of the match corresponds to the position where the match begins + /// and includes the first byte in the match. + /// + /// It is guaranteed that `Match::start() <= Match::end()`. + /// + /// This is guaranteed to fall on a valid UTF-8 codepoint boundary. That + /// is, it will never be an offset that appears between the UTF-8 code + /// units of a UTF-8 encoded Unicode scalar value. Consequently, it is + /// always safe to slice the corresponding haystack using this offset. + #[inline] + pub fn start(&self) -> usize { + self.start + } + + /// Returns the byte offset of the end of the match in the haystack. The + /// end of the match corresponds to the byte immediately following the last + /// byte in the match. This means that `&slice[start..end]` works as one + /// would expect. + /// + /// It is guaranteed that `Match::start() <= Match::end()`. + /// + /// This is guaranteed to fall on a valid UTF-8 codepoint boundary. That + /// is, it will never be an offset that appears between the UTF-8 code + /// units of a UTF-8 encoded Unicode scalar value. Consequently, it is + /// always safe to slice the corresponding haystack using this offset. + #[inline] + pub fn end(&self) -> usize { + self.end + } + + /// Returns true if and only if this match has a length of zero. + /// + /// Note that an empty match can only occur when the regex itself can + /// match the empty string. Here are some examples of regexes that can + /// all match the empty string: `^`, `^$`, `\b`, `a?`, `a*`, `a{0}`, + /// `(foo|\d+|quux)?`. + #[inline] + pub fn is_empty(&self) -> bool { + self.start == self.end + } + + /// Returns the length, in bytes, of this match. + #[inline] + pub fn len(&self) -> usize { + self.end - self.start + } + + /// Returns the range over the starting and ending byte offsets of the + /// match in the haystack. + /// + /// It is always correct to slice the original haystack searched with this + /// range. That is, because the offsets are guaranteed to fall on valid + /// UTF-8 boundaries, the range returned is always valid. + #[inline] + pub fn range(&self) -> core::ops::Range<usize> { + self.start..self.end + } + + /// Returns the substring of the haystack that matched. + #[inline] + pub fn as_str(&self) -> &'h str { + &self.haystack[self.range()] + } + + /// Creates a new match from the given haystack and byte offsets. + #[inline] + fn new(haystack: &'h str, start: usize, end: usize) -> Match<'h> { + Match { haystack, start, end } + } +} + +impl<'h> core::fmt::Debug for Match<'h> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_struct("Match") + .field("start", &self.start) + .field("end", &self.end) + .field("string", &self.as_str()) + .finish() + } +} + +impl<'h> From<Match<'h>> for &'h str { + fn from(m: Match<'h>) -> &'h str { + m.as_str() + } +} + +impl<'h> From<Match<'h>> for core::ops::Range<usize> { + fn from(m: Match<'h>) -> core::ops::Range<usize> { + m.range() + } +} + +/// Represents the capture groups for a single match. +/// +/// Capture groups refer to parts of a regex enclosed in parentheses. They +/// can be optionally named. The purpose of capture groups is to be able to +/// reference different parts of a match based on the original pattern. In +/// essence, a `Captures` is a container of [`Match`] values for each group +/// that participated in a regex match. Each `Match` can be looked up by either +/// its capture group index or name (if it has one). +/// +/// For example, say you want to match the individual letters in a 5-letter +/// word: +/// +/// ```text +/// (?<first>\w)(\w)(?:\w)\w(?<last>\w) +/// ``` +/// +/// This regex has 4 capture groups: +/// +/// * The group at index `0` corresponds to the overall match. It is always +/// present in every match and never has a name. +/// * The group at index `1` with name `first` corresponding to the first +/// letter. +/// * The group at index `2` with no name corresponding to the second letter. +/// * The group at index `3` with name `last` corresponding to the fifth and +/// last letter. +/// +/// Notice that `(?:\w)` was not listed above as a capture group despite it +/// being enclosed in parentheses. That's because `(?:pattern)` is a special +/// syntax that permits grouping but *without* capturing. The reason for not +/// treating it as a capture is that tracking and reporting capture groups +/// requires additional state that may lead to slower searches. So using as few +/// capture groups as possible can help performance. (Although the difference +/// in performance of a couple of capture groups is likely immaterial.) +/// +/// Values with this type are created by [`Regex::captures`] or +/// [`Regex::captures_iter`]. +/// +/// `'h` is the lifetime of the haystack that these captures were matched from. +/// +/// # Example +/// +/// ``` +/// use regex::Regex; +/// +/// let re = Regex::new(r"(?<first>\w)(\w)(?:\w)\w(?<last>\w)").unwrap(); +/// let caps = re.captures("toady").unwrap(); +/// assert_eq!("toady", &caps[0]); +/// assert_eq!("t", &caps["first"]); +/// assert_eq!("o", &caps[2]); +/// assert_eq!("y", &caps["last"]); +/// ``` +pub struct Captures<'h> { + haystack: &'h str, + caps: captures::Captures, + static_captures_len: Option<usize>, +} + +impl<'h> Captures<'h> { + /// Returns the `Match` associated with the capture group at index `i`. If + /// `i` does not correspond to a capture group, or if the capture group did + /// not participate in the match, then `None` is returned. + /// + /// When `i == 0`, this is guaranteed to return a non-`None` value. + /// + /// # Examples + /// + /// Get the substring that matched with a default of an empty string if the + /// group didn't participate in the match: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"[a-z]+(?:([0-9]+)|([A-Z]+))").unwrap(); + /// let caps = re.captures("abc123").unwrap(); + /// + /// let substr1 = caps.get(1).map_or("", |m| m.as_str()); + /// let substr2 = caps.get(2).map_or("", |m| m.as_str()); + /// assert_eq!(substr1, "123"); + /// assert_eq!(substr2, ""); + /// ``` + #[inline] + pub fn get(&self, i: usize) -> Option<Match<'h>> { + self.caps + .get_group(i) + .map(|sp| Match::new(self.haystack, sp.start, sp.end)) + } + + /// Return the overall match for the capture. + /// + /// This returns the match for index `0`. That is it is equivalent to + /// `m.get(0).unwrap()` + /// + /// # Example + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"[a-z]+([0-9]+)").unwrap(); + /// let caps = re.captures(" abc123-def").unwrap(); + /// + /// assert_eq!(caps.get_match().as_str(), "abc123"); + /// + /// ``` + #[inline] + pub fn get_match(&self) -> Match<'h> { + self.get(0).unwrap() + } + + /// Returns the `Match` associated with the capture group named `name`. If + /// `name` isn't a valid capture group or it refers to a group that didn't + /// match, then `None` is returned. + /// + /// Note that unlike `caps["name"]`, this returns a `Match` whose lifetime + /// matches the lifetime of the haystack in this `Captures` value. + /// Conversely, the substring returned by `caps["name"]` has a lifetime + /// of the `Captures` value, which is likely shorter than the lifetime of + /// the haystack. In some cases, it may be necessary to use this method to + /// access the matching substring instead of the `caps["name"]` notation. + /// + /// # Examples + /// + /// Get the substring that matched with a default of an empty string if the + /// group didn't participate in the match: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new( + /// r"[a-z]+(?:(?<numbers>[0-9]+)|(?<letters>[A-Z]+))", + /// ).unwrap(); + /// let caps = re.captures("abc123").unwrap(); + /// + /// let numbers = caps.name("numbers").map_or("", |m| m.as_str()); + /// let letters = caps.name("letters").map_or("", |m| m.as_str()); + /// assert_eq!(numbers, "123"); + /// assert_eq!(letters, ""); + /// ``` + #[inline] + pub fn name(&self, name: &str) -> Option<Match<'h>> { + self.caps + .get_group_by_name(name) + .map(|sp| Match::new(self.haystack, sp.start, sp.end)) + } + + /// This is a convenience routine for extracting the substrings + /// corresponding to matching capture groups. + /// + /// This returns a tuple where the first element corresponds to the full + /// substring of the haystack that matched the regex. The second element is + /// an array of substrings, with each corresponding to the substring that + /// matched for a particular capture group. + /// + /// # Panics + /// + /// This panics if the number of possible matching groups in this + /// `Captures` value is not fixed to `N` in all circumstances. + /// More precisely, this routine only works when `N` is equivalent to + /// [`Regex::static_captures_len`]. + /// + /// Stated more plainly, if the number of matching capture groups in a + /// regex can vary from match to match, then this function always panics. + /// + /// For example, `(a)(b)|(c)` could produce two matching capture groups + /// or one matching capture group for any given match. Therefore, one + /// cannot use `extract` with such a pattern. + /// + /// But a pattern like `(a)(b)|(c)(d)` can be used with `extract` because + /// the number of capture groups in every match is always equivalent, + /// even if the capture _indices_ in each match are not. + /// + /// # Example + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap(); + /// let hay = "On 2010-03-14, I became a Tennessee lamb."; + /// let Some((full, [year, month, day])) = + /// re.captures(hay).map(|caps| caps.extract()) else { return }; + /// assert_eq!("2010-03-14", full); + /// assert_eq!("2010", year); + /// assert_eq!("03", month); + /// assert_eq!("14", day); + /// ``` + /// + /// # Example: iteration + /// + /// This example shows how to use this method when iterating over all + /// `Captures` matches in a haystack. + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap(); + /// let hay = "1973-01-05, 1975-08-25 and 1980-10-18"; + /// + /// let mut dates: Vec<(&str, &str, &str)> = vec![]; + /// for (_, [y, m, d]) in re.captures_iter(hay).map(|c| c.extract()) { + /// dates.push((y, m, d)); + /// } + /// assert_eq!(dates, vec![ + /// ("1973", "01", "05"), + /// ("1975", "08", "25"), + /// ("1980", "10", "18"), + /// ]); + /// ``` + /// + /// # Example: parsing different formats + /// + /// This API is particularly useful when you need to extract a particular + /// value that might occur in a different format. Consider, for example, + /// an identifier that might be in double quotes or single quotes: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r#"id:(?:"([^"]+)"|'([^']+)')"#).unwrap(); + /// let hay = r#"The first is id:"foo" and the second is id:'bar'."#; + /// let mut ids = vec![]; + /// for (_, [id]) in re.captures_iter(hay).map(|c| c.extract()) { + /// ids.push(id); + /// } + /// assert_eq!(ids, vec!["foo", "bar"]); + /// ``` + pub fn extract<const N: usize>(&self) -> (&'h str, [&'h str; N]) { + let len = self + .static_captures_len + .expect("number of capture groups can vary in a match") + .checked_sub(1) + .expect("number of groups is always greater than zero"); + assert_eq!(N, len, "asked for {N} groups, but must ask for {len}"); + // The regex-automata variant of extract is a bit more permissive. + // It doesn't require the number of matching capturing groups to be + // static, and you can even request fewer groups than what's there. So + // this is guaranteed to never panic because we've asserted above that + // the user has requested precisely the number of groups that must be + // present in any match for this regex. + self.caps.extract(self.haystack) + } + + /// Expands all instances of `$ref` in `replacement` to the corresponding + /// capture group, and writes them to the `dst` buffer given. A `ref` can + /// be a capture group index or a name. If `ref` doesn't refer to a capture + /// group that participated in the match, then it is replaced with the + /// empty string. + /// + /// # Format + /// + /// The format of the replacement string supports two different kinds of + /// capture references: unbraced and braced. + /// + /// For the unbraced format, the format supported is `$ref` where `name` + /// can be any character in the class `[0-9A-Za-z_]`. `ref` is always + /// the longest possible parse. So for example, `$1a` corresponds to the + /// capture group named `1a` and not the capture group at index `1`. If + /// `ref` matches `^[0-9]+$`, then it is treated as a capture group index + /// itself and not a name. + /// + /// For the braced format, the format supported is `${ref}` where `ref` can + /// be any sequence of bytes except for `}`. If no closing brace occurs, + /// then it is not considered a capture reference. As with the unbraced + /// format, if `ref` matches `^[0-9]+$`, then it is treated as a capture + /// group index and not a name. + /// + /// The braced format is useful for exerting precise control over the name + /// of the capture reference. For example, `${1}a` corresponds to the + /// capture group reference `1` followed by the letter `a`, where as `$1a` + /// (as mentioned above) corresponds to the capture group reference `1a`. + /// The braced format is also useful for expressing capture group names + /// that use characters not supported by the unbraced format. For example, + /// `${foo[bar].baz}` refers to the capture group named `foo[bar].baz`. + /// + /// If a capture group reference is found and it does not refer to a valid + /// capture group, then it will be replaced with the empty string. + /// + /// To write a literal `$`, use `$$`. + /// + /// # Example + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new( + /// r"(?<day>[0-9]{2})-(?<month>[0-9]{2})-(?<year>[0-9]{4})", + /// ).unwrap(); + /// let hay = "On 14-03-2010, I became a Tennessee lamb."; + /// let caps = re.captures(hay).unwrap(); + /// + /// let mut dst = String::new(); + /// caps.expand("year=$year, month=$month, day=$day", &mut dst); + /// assert_eq!(dst, "year=2010, month=03, day=14"); + /// ``` + #[inline] + pub fn expand(&self, replacement: &str, dst: &mut String) { + self.caps.interpolate_string_into(self.haystack, replacement, dst); + } + + /// Returns an iterator over all capture groups. This includes both + /// matching and non-matching groups. + /// + /// The iterator always yields at least one matching group: the first group + /// (at index `0`) with no name. Subsequent groups are returned in the order + /// of their opening parenthesis in the regex. + /// + /// The elements yielded have type `Option<Match<'h>>`, where a non-`None` + /// value is present if the capture group matches. + /// + /// # Example + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"(\w)(\d)?(\w)").unwrap(); + /// let caps = re.captures("AZ").unwrap(); + /// + /// let mut it = caps.iter(); + /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), Some("AZ")); + /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), Some("A")); + /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), None); + /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), Some("Z")); + /// assert_eq!(it.next(), None); + /// ``` + #[inline] + pub fn iter<'c>(&'c self) -> SubCaptureMatches<'c, 'h> { + SubCaptureMatches { haystack: self.haystack, it: self.caps.iter() } + } + + /// Returns the total number of capture groups. This includes both + /// matching and non-matching groups. + /// + /// The length returned is always equivalent to the number of elements + /// yielded by [`Captures::iter`]. Consequently, the length is always + /// greater than zero since every `Captures` value always includes the + /// match for the entire regex. + /// + /// # Example + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"(\w)(\d)?(\w)").unwrap(); + /// let caps = re.captures("AZ").unwrap(); + /// assert_eq!(caps.len(), 4); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.caps.group_len() + } +} + +impl<'h> core::fmt::Debug for Captures<'h> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + /// A little helper type to provide a nice map-like debug + /// representation for our capturing group spans. + /// + /// regex-automata has something similar, but it includes the pattern + /// ID in its debug output, which is confusing. It also doesn't include + /// that strings that match because a regex-automata `Captures` doesn't + /// borrow the haystack. + struct CapturesDebugMap<'a> { + caps: &'a Captures<'a>, + } + + impl<'a> core::fmt::Debug for CapturesDebugMap<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let mut map = f.debug_map(); + let names = + self.caps.caps.group_info().pattern_names(PatternID::ZERO); + for (group_index, maybe_name) in names.enumerate() { + let key = Key(group_index, maybe_name); + match self.caps.get(group_index) { + None => map.entry(&key, &None::<()>), + Some(mat) => map.entry(&key, &Value(mat)), + }; + } + map.finish() + } + } + + struct Key<'a>(usize, Option<&'a str>); + + impl<'a> core::fmt::Debug for Key<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "{}", self.0)?; + if let Some(name) = self.1 { + write!(f, "/{name:?}")?; + } + Ok(()) + } + } + + struct Value<'a>(Match<'a>); + + impl<'a> core::fmt::Debug for Value<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!( + f, + "{}..{}/{:?}", + self.0.start(), + self.0.end(), + self.0.as_str() + ) + } + } + + f.debug_tuple("Captures") + .field(&CapturesDebugMap { caps: self }) + .finish() + } +} + +/// Get a matching capture group's haystack substring by index. +/// +/// The haystack substring returned can't outlive the `Captures` object if this +/// method is used, because of how `Index` is defined (normally `a[i]` is part +/// of `a` and can't outlive it). To work around this limitation, do that, use +/// [`Captures::get`] instead. +/// +/// `'h` is the lifetime of the matched haystack, but the lifetime of the +/// `&str` returned by this implementation is the lifetime of the `Captures` +/// value itself. +/// +/// # Panics +/// +/// If there is no matching group at the given index. +impl<'h> core::ops::Index<usize> for Captures<'h> { + type Output = str; + + // The lifetime is written out to make it clear that the &str returned + // does NOT have a lifetime equivalent to 'h. + fn index<'a>(&'a self, i: usize) -> &'a str { + self.get(i) + .map(|m| m.as_str()) + .unwrap_or_else(|| panic!("no group at index '{i}'")) + } +} + +/// Get a matching capture group's haystack substring by name. +/// +/// The haystack substring returned can't outlive the `Captures` object if this +/// method is used, because of how `Index` is defined (normally `a[i]` is part +/// of `a` and can't outlive it). To work around this limitation, do that, use +/// [`Captures::name`] instead. +/// +/// `'h` is the lifetime of the matched haystack, but the lifetime of the +/// `&str` returned by this implementation is the lifetime of the `Captures` +/// value itself. +/// +/// `'n` is the lifetime of the group name used to index the `Captures` value. +/// +/// # Panics +/// +/// If there is no matching group at the given name. +impl<'h, 'n> core::ops::Index<&'n str> for Captures<'h> { + type Output = str; + + fn index<'a>(&'a self, name: &'n str) -> &'a str { + self.name(name) + .map(|m| m.as_str()) + .unwrap_or_else(|| panic!("no group named '{name}'")) + } +} + +/// A low level representation of the byte offsets of each capture group. +/// +/// You can think of this as a lower level [`Captures`], where this type does +/// not support named capturing groups directly and it does not borrow the +/// haystack that these offsets were matched on. +/// +/// Primarily, this type is useful when using the lower level `Regex` APIs such +/// as [`Regex::captures_read`], which permits amortizing the allocation in +/// which capture match offsets are stored. +/// +/// In order to build a value of this type, you'll need to call the +/// [`Regex::capture_locations`] method. The value returned can then be reused +/// in subsequent searches for that regex. Using it for other regexes may +/// result in a panic or otherwise incorrect results. +/// +/// # Example +/// +/// This example shows how to create and use `CaptureLocations` in a search. +/// +/// ``` +/// use regex::Regex; +/// +/// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap(); +/// let mut locs = re.capture_locations(); +/// let m = re.captures_read(&mut locs, "Bruce Springsteen").unwrap(); +/// assert_eq!(0..17, m.range()); +/// assert_eq!(Some((0, 17)), locs.get(0)); +/// assert_eq!(Some((0, 5)), locs.get(1)); +/// assert_eq!(Some((6, 17)), locs.get(2)); +/// +/// // Asking for an invalid capture group always returns None. +/// assert_eq!(None, locs.get(3)); +/// # // literals are too big for 32-bit usize: #1041 +/// # #[cfg(target_pointer_width = "64")] +/// assert_eq!(None, locs.get(34973498648)); +/// # #[cfg(target_pointer_width = "64")] +/// assert_eq!(None, locs.get(9944060567225171988)); +/// ``` +#[derive(Clone, Debug)] +pub struct CaptureLocations(captures::Captures); + +/// A type alias for `CaptureLocations` for backwards compatibility. +/// +/// Previously, we exported `CaptureLocations` as `Locations` in an +/// undocumented API. To prevent breaking that code (e.g., in `regex-capi`), +/// we continue re-exporting the same undocumented API. +#[doc(hidden)] +pub type Locations = CaptureLocations; + +impl CaptureLocations { + /// Returns the start and end byte offsets of the capture group at index + /// `i`. This returns `None` if `i` is not a valid capture group or if the + /// capture group did not match. + /// + /// # Example + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap(); + /// let mut locs = re.capture_locations(); + /// re.captures_read(&mut locs, "Bruce Springsteen").unwrap(); + /// assert_eq!(Some((0, 17)), locs.get(0)); + /// assert_eq!(Some((0, 5)), locs.get(1)); + /// assert_eq!(Some((6, 17)), locs.get(2)); + /// ``` + #[inline] + pub fn get(&self, i: usize) -> Option<(usize, usize)> { + self.0.get_group(i).map(|sp| (sp.start, sp.end)) + } + + /// Returns the total number of capture groups (even if they didn't match). + /// That is, the length returned is unaffected by the result of a search. + /// + /// This is always at least `1` since every regex has at least `1` + /// capturing group that corresponds to the entire match. + /// + /// # Example + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap(); + /// let mut locs = re.capture_locations(); + /// assert_eq!(3, locs.len()); + /// re.captures_read(&mut locs, "Bruce Springsteen").unwrap(); + /// assert_eq!(3, locs.len()); + /// ``` + /// + /// Notice that the length is always at least `1`, regardless of the regex: + /// + /// ``` + /// use regex::Regex; + /// + /// let re = Regex::new(r"").unwrap(); + /// let locs = re.capture_locations(); + /// assert_eq!(1, locs.len()); + /// + /// // [a&&b] is a regex that never matches anything. + /// let re = Regex::new(r"[a&&b]").unwrap(); + /// let locs = re.capture_locations(); + /// assert_eq!(1, locs.len()); + /// ``` + #[inline] + pub fn len(&self) -> usize { + // self.0.group_len() returns 0 if the underlying captures doesn't + // represent a match, but the behavior guaranteed for this method is + // that the length doesn't change based on a match or not. + self.0.group_info().group_len(PatternID::ZERO) + } + + /// An alias for the `get` method for backwards compatibility. + /// + /// Previously, we exported `get` as `pos` in an undocumented API. To + /// prevent breaking that code (e.g., in `regex-capi`), we continue + /// re-exporting the same undocumented API. + #[doc(hidden)] + #[inline] + pub fn pos(&self, i: usize) -> Option<(usize, usize)> { + self.get(i) + } +} + +/// An iterator over all non-overlapping matches in a haystack. +/// +/// This iterator yields [`Match`] values. The iterator stops when no more +/// matches can be found. +/// +/// `'r` is the lifetime of the compiled regular expression and `'h` is the +/// lifetime of the haystack. +/// +/// This iterator is created by [`Regex::find_iter`]. +/// +/// # Time complexity +/// +/// Note that since an iterator runs potentially many searches on the haystack +/// and since each search has worst case `O(m * n)` time complexity, the +/// overall worst case time complexity for iteration is `O(m * n^2)`. +#[derive(Debug)] +pub struct Matches<'r, 'h> { + haystack: &'h str, + it: meta::FindMatches<'r, 'h>, +} + +impl<'r, 'h> Iterator for Matches<'r, 'h> { + type Item = Match<'h>; + + #[inline] + fn next(&mut self) -> Option<Match<'h>> { + self.it + .next() + .map(|sp| Match::new(self.haystack, sp.start(), sp.end())) + } + + #[inline] + fn count(self) -> usize { + // This can actually be up to 2x faster than calling `next()` until + // completion, because counting matches when using a DFA only requires + // finding the end of each match. But returning a `Match` via `next()` + // requires the start of each match which, with a DFA, requires a + // reverse forward scan to find it. + self.it.count() + } +} + +impl<'r, 'h> core::iter::FusedIterator for Matches<'r, 'h> {} + +/// An iterator over all non-overlapping capture matches in a haystack. +/// +/// This iterator yields [`Captures`] values. The iterator stops when no more +/// matches can be found. +/// +/// `'r` is the lifetime of the compiled regular expression and `'h` is the +/// lifetime of the matched string. +/// +/// This iterator is created by [`Regex::captures_iter`]. +/// +/// # Time complexity +/// +/// Note that since an iterator runs potentially many searches on the haystack +/// and since each search has worst case `O(m * n)` time complexity, the +/// overall worst case time complexity for iteration is `O(m * n^2)`. +#[derive(Debug)] +pub struct CaptureMatches<'r, 'h> { + haystack: &'h str, + it: meta::CapturesMatches<'r, 'h>, +} + +impl<'r, 'h> Iterator for CaptureMatches<'r, 'h> { + type Item = Captures<'h>; + + #[inline] + fn next(&mut self) -> Option<Captures<'h>> { + let static_captures_len = self.it.regex().static_captures_len(); + self.it.next().map(|caps| Captures { + haystack: self.haystack, + caps, + static_captures_len, + }) + } + + #[inline] + fn count(self) -> usize { + // This can actually be up to 2x faster than calling `next()` until + // completion, because counting matches when using a DFA only requires + // finding the end of each match. But returning a `Match` via `next()` + // requires the start of each match which, with a DFA, requires a + // reverse forward scan to find it. + self.it.count() + } +} + +impl<'r, 'h> core::iter::FusedIterator for CaptureMatches<'r, 'h> {} + +/// An iterator over all substrings delimited by a regex match. +/// +/// `'r` is the lifetime of the compiled regular expression and `'h` is the +/// lifetime of the byte string being split. +/// +/// This iterator is created by [`Regex::split`]. +/// +/// # Time complexity +/// +/// Note that since an iterator runs potentially many searches on the haystack +/// and since each search has worst case `O(m * n)` time complexity, the +/// overall worst case time complexity for iteration is `O(m * n^2)`. +#[derive(Debug)] +pub struct Split<'r, 'h> { + haystack: &'h str, + it: meta::Split<'r, 'h>, +} + +impl<'r, 'h> Iterator for Split<'r, 'h> { + type Item = &'h str; + + #[inline] + fn next(&mut self) -> Option<&'h str> { + self.it.next().map(|span| &self.haystack[span]) + } +} + +impl<'r, 'h> core::iter::FusedIterator for Split<'r, 'h> {} + +/// An iterator over at most `N` substrings delimited by a regex match. +/// +/// The last substring yielded by this iterator will be whatever remains after +/// `N-1` splits. +/// +/// `'r` is the lifetime of the compiled regular expression and `'h` is the +/// lifetime of the byte string being split. +/// +/// This iterator is created by [`Regex::splitn`]. +/// +/// # Time complexity +/// +/// Note that since an iterator runs potentially many searches on the haystack +/// and since each search has worst case `O(m * n)` time complexity, the +/// overall worst case time complexity for iteration is `O(m * n^2)`. +/// +/// Although note that the worst case time here has an upper bound given +/// by the `limit` parameter to [`Regex::splitn`]. +#[derive(Debug)] +pub struct SplitN<'r, 'h> { + haystack: &'h str, + it: meta::SplitN<'r, 'h>, +} + +impl<'r, 'h> Iterator for SplitN<'r, 'h> { + type Item = &'h str; + + #[inline] + fn next(&mut self) -> Option<&'h str> { + self.it.next().map(|span| &self.haystack[span]) + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + self.it.size_hint() + } +} + +impl<'r, 'h> core::iter::FusedIterator for SplitN<'r, 'h> {} + +/// An iterator over the names of all capture groups in a regex. +/// +/// This iterator yields values of type `Option<&str>` in order of the opening +/// capture group parenthesis in the regex pattern. `None` is yielded for +/// groups with no name. The first element always corresponds to the implicit +/// and unnamed group for the overall match. +/// +/// `'r` is the lifetime of the compiled regular expression. +/// +/// This iterator is created by [`Regex::capture_names`]. +#[derive(Clone, Debug)] +pub struct CaptureNames<'r>(captures::GroupInfoPatternNames<'r>); + +impl<'r> Iterator for CaptureNames<'r> { + type Item = Option<&'r str>; + + #[inline] + fn next(&mut self) -> Option<Option<&'r str>> { + self.0.next() + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + self.0.size_hint() + } + + #[inline] + fn count(self) -> usize { + self.0.count() + } +} + +impl<'r> ExactSizeIterator for CaptureNames<'r> {} + +impl<'r> core::iter::FusedIterator for CaptureNames<'r> {} + +/// An iterator over all group matches in a [`Captures`] value. +/// +/// This iterator yields values of type `Option<Match<'h>>`, where `'h` is the +/// lifetime of the haystack that the matches are for. The order of elements +/// yielded corresponds to the order of the opening parenthesis for the group +/// in the regex pattern. `None` is yielded for groups that did not participate +/// in the match. +/// +/// The first element always corresponds to the implicit group for the overall +/// match. Since this iterator is created by a [`Captures`] value, and a +/// `Captures` value is only created when a match occurs, it follows that the +/// first element yielded by this iterator is guaranteed to be non-`None`. +/// +/// The lifetime `'c` corresponds to the lifetime of the `Captures` value that +/// created this iterator, and the lifetime `'h` corresponds to the originally +/// matched haystack. +#[derive(Clone, Debug)] +pub struct SubCaptureMatches<'c, 'h> { + haystack: &'h str, + it: captures::CapturesPatternIter<'c>, +} + +impl<'c, 'h> Iterator for SubCaptureMatches<'c, 'h> { + type Item = Option<Match<'h>>; + + #[inline] + fn next(&mut self) -> Option<Option<Match<'h>>> { + self.it.next().map(|group| { + group.map(|sp| Match::new(self.haystack, sp.start, sp.end)) + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + self.it.size_hint() + } + + #[inline] + fn count(self) -> usize { + self.it.count() + } +} + +impl<'c, 'h> ExactSizeIterator for SubCaptureMatches<'c, 'h> {} + +impl<'c, 'h> core::iter::FusedIterator for SubCaptureMatches<'c, 'h> {} + +/// A trait for types that can be used to replace matches in a haystack. +/// +/// In general, users of this crate shouldn't need to implement this trait, +/// since implementations are already provided for `&str` along with other +/// variants of string types, as well as `FnMut(&Captures) -> String` (or any +/// `FnMut(&Captures) -> T` where `T: AsRef<str>`). Those cover most use cases, +/// but callers can implement this trait directly if necessary. +/// +/// # Example +/// +/// This example shows a basic implementation of the `Replacer` trait. This +/// can be done much more simply using the replacement string interpolation +/// support (e.g., `$first $last`), but this approach avoids needing to parse +/// the replacement string at all. +/// +/// ``` +/// use regex::{Captures, Regex, Replacer}; +/// +/// struct NameSwapper; +/// +/// impl Replacer for NameSwapper { +/// fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { +/// dst.push_str(&caps["first"]); +/// dst.push_str(" "); +/// dst.push_str(&caps["last"]); +/// } +/// } +/// +/// let re = Regex::new(r"(?<last>[^,\s]+),\s+(?<first>\S+)").unwrap(); +/// let result = re.replace("Springsteen, Bruce", NameSwapper); +/// assert_eq!(result, "Bruce Springsteen"); +/// ``` +pub trait Replacer { + /// Appends possibly empty data to `dst` to replace the current match. + /// + /// The current match is represented by `caps`, which is guaranteed to + /// have a match at capture group `0`. + /// + /// For example, a no-op replacement would be `dst.push_str(&caps[0])`. + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String); + + /// Return a fixed unchanging replacement string. + /// + /// When doing replacements, if access to [`Captures`] is not needed (e.g., + /// the replacement string does not need `$` expansion), then it can be + /// beneficial to avoid finding sub-captures. + /// + /// In general, this is called once for every call to a replacement routine + /// such as [`Regex::replace_all`]. + fn no_expansion<'r>(&'r mut self) -> Option<Cow<'r, str>> { + None + } + + /// Returns a type that implements `Replacer`, but that borrows and wraps + /// this `Replacer`. + /// + /// This is useful when you want to take a generic `Replacer` (which might + /// not be cloneable) and use it without consuming it, so it can be used + /// more than once. + /// + /// # Example + /// + /// ``` + /// use regex::{Regex, Replacer}; + /// + /// fn replace_all_twice<R: Replacer>( + /// re: Regex, + /// src: &str, + /// mut rep: R, + /// ) -> String { + /// let dst = re.replace_all(src, rep.by_ref()); + /// let dst = re.replace_all(&dst, rep.by_ref()); + /// dst.into_owned() + /// } + /// ``` + fn by_ref<'r>(&'r mut self) -> ReplacerRef<'r, Self> { + ReplacerRef(self) + } +} + +impl<'a> Replacer for &'a str { + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { + caps.expand(*self, dst); + } + + fn no_expansion(&mut self) -> Option<Cow<'_, str>> { + no_expansion(self) + } +} + +impl<'a> Replacer for &'a String { + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { + self.as_str().replace_append(caps, dst) + } + + fn no_expansion(&mut self) -> Option<Cow<'_, str>> { + no_expansion(self) + } +} + +impl Replacer for String { + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { + self.as_str().replace_append(caps, dst) + } + + fn no_expansion(&mut self) -> Option<Cow<'_, str>> { + no_expansion(self) + } +} + +impl<'a> Replacer for Cow<'a, str> { + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { + self.as_ref().replace_append(caps, dst) + } + + fn no_expansion(&mut self) -> Option<Cow<'_, str>> { + no_expansion(self) + } +} + +impl<'a> Replacer for &'a Cow<'a, str> { + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { + self.as_ref().replace_append(caps, dst) + } + + fn no_expansion(&mut self) -> Option<Cow<'_, str>> { + no_expansion(self) + } +} + +impl<F, T> Replacer for F +where + F: FnMut(&Captures<'_>) -> T, + T: AsRef<str>, +{ + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { + dst.push_str((*self)(caps).as_ref()); + } +} + +/// A by-reference adaptor for a [`Replacer`]. +/// +/// This permits reusing the same `Replacer` value in multiple calls to a +/// replacement routine like [`Regex::replace_all`]. +/// +/// This type is created by [`Replacer::by_ref`]. +#[derive(Debug)] +pub struct ReplacerRef<'a, R: ?Sized>(&'a mut R); + +impl<'a, R: Replacer + ?Sized + 'a> Replacer for ReplacerRef<'a, R> { + fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { + self.0.replace_append(caps, dst) + } + + fn no_expansion(&mut self) -> Option<Cow<'_, str>> { + self.0.no_expansion() + } +} + +/// A helper type for forcing literal string replacement. +/// +/// It can be used with routines like [`Regex::replace`] and +/// [`Regex::replace_all`] to do a literal string replacement without expanding +/// `$name` to their corresponding capture groups. This can be both convenient +/// (to avoid escaping `$`, for example) and faster (since capture groups +/// don't need to be found). +/// +/// `'s` is the lifetime of the literal string to use. +/// +/// # Example +/// +/// ``` +/// use regex::{NoExpand, Regex}; +/// +/// let re = Regex::new(r"(?<last>[^,\s]+),\s+(\S+)").unwrap(); +/// let result = re.replace("Springsteen, Bruce", NoExpand("$2 $last")); +/// assert_eq!(result, "$2 $last"); +/// ``` +#[derive(Clone, Debug)] +pub struct NoExpand<'s>(pub &'s str); + +impl<'s> Replacer for NoExpand<'s> { + fn replace_append(&mut self, _: &Captures<'_>, dst: &mut String) { + dst.push_str(self.0); + } + + fn no_expansion(&mut self) -> Option<Cow<'_, str>> { + Some(Cow::Borrowed(self.0)) + } +} + +/// Quickly checks the given replacement string for whether interpolation +/// should be done on it. It returns `None` if a `$` was found anywhere in the +/// given string, which suggests interpolation needs to be done. But if there's +/// no `$` anywhere, then interpolation definitely does not need to be done. In +/// that case, the given string is returned as a borrowed `Cow`. +/// +/// This is meant to be used to implement the [`Replacer::no_expansion`] method +/// in its various trait impls. +fn no_expansion<T: AsRef<str>>(replacement: &T) -> Option<Cow<'_, str>> { + let replacement = replacement.as_ref(); + match crate::find_byte::find_byte(b'$', replacement.as_bytes()) { + Some(_) => None, + None => Some(Cow::Borrowed(replacement)), + } +} diff --git a/vendor/regex/src/regexset/bytes.rs b/vendor/regex/src/regexset/bytes.rs new file mode 100644 index 00000000000000..76174afffcd41a --- /dev/null +++ b/vendor/regex/src/regexset/bytes.rs @@ -0,0 +1,728 @@ +use alloc::string::String; + +use regex_automata::{meta, Input, PatternID, PatternSet, PatternSetIter}; + +use crate::{bytes::RegexSetBuilder, Error}; + +/// Match multiple, possibly overlapping, regexes in a single search. +/// +/// A regex set corresponds to the union of zero or more regular expressions. +/// That is, a regex set will match a haystack when at least one of its +/// constituent regexes matches. A regex set as its formulated here provides a +/// touch more power: it will also report *which* regular expressions in the +/// set match. Indeed, this is the key difference between regex sets and a +/// single `Regex` with many alternates, since only one alternate can match at +/// a time. +/// +/// For example, consider regular expressions to match email addresses and +/// domains: `[a-z]+@[a-z]+\.(com|org|net)` and `[a-z]+\.(com|org|net)`. If a +/// regex set is constructed from those regexes, then searching the haystack +/// `foo@example.com` will report both regexes as matching. Of course, one +/// could accomplish this by compiling each regex on its own and doing two +/// searches over the haystack. The key advantage of using a regex set is +/// that it will report the matching regexes using a *single pass through the +/// haystack*. If one has hundreds or thousands of regexes to match repeatedly +/// (like a URL router for a complex web application or a user agent matcher), +/// then a regex set *can* realize huge performance gains. +/// +/// Unlike the top-level [`RegexSet`](crate::RegexSet), this `RegexSet` +/// searches haystacks with type `&[u8]` instead of `&str`. Consequently, this +/// `RegexSet` is permitted to match invalid UTF-8. +/// +/// # Limitations +/// +/// Regex sets are limited to answering the following two questions: +/// +/// 1. Does any regex in the set match? +/// 2. If so, which regexes in the set match? +/// +/// As with the main [`Regex`][crate::bytes::Regex] type, it is cheaper to ask +/// (1) instead of (2) since the matching engines can stop after the first +/// match is found. +/// +/// You cannot directly extract [`Match`][crate::bytes::Match] or +/// [`Captures`][crate::bytes::Captures] objects from a regex set. If you need +/// these operations, the recommended approach is to compile each pattern in +/// the set independently and scan the exact same haystack a second time with +/// those independently compiled patterns: +/// +/// ``` +/// use regex::bytes::{Regex, RegexSet}; +/// +/// let patterns = ["foo", "bar"]; +/// // Both patterns will match different ranges of this string. +/// let hay = b"barfoo"; +/// +/// // Compile a set matching any of our patterns. +/// let set = RegexSet::new(patterns).unwrap(); +/// // Compile each pattern independently. +/// let regexes: Vec<_> = set +/// .patterns() +/// .iter() +/// .map(|pat| Regex::new(pat).unwrap()) +/// .collect(); +/// +/// // Match against the whole set first and identify the individual +/// // matching patterns. +/// let matches: Vec<&[u8]> = set +/// .matches(hay) +/// .into_iter() +/// // Dereference the match index to get the corresponding +/// // compiled pattern. +/// .map(|index| ®exes[index]) +/// // To get match locations or any other info, we then have to search the +/// // exact same haystack again, using our separately-compiled pattern. +/// .map(|re| re.find(hay).unwrap().as_bytes()) +/// .collect(); +/// +/// // Matches arrive in the order the constituent patterns were declared, +/// // not the order they appear in the haystack. +/// assert_eq!(vec![&b"foo"[..], &b"bar"[..]], matches); +/// ``` +/// +/// # Performance +/// +/// A `RegexSet` has the same performance characteristics as `Regex`. Namely, +/// search takes `O(m * n)` time, where `m` is proportional to the size of the +/// regex set and `n` is proportional to the length of the haystack. +/// +/// # Trait implementations +/// +/// The `Default` trait is implemented for `RegexSet`. The default value +/// is an empty set. An empty set can also be explicitly constructed via +/// [`RegexSet::empty`]. +/// +/// # Example +/// +/// This shows how the above two regexes (for matching email addresses and +/// domains) might work: +/// +/// ``` +/// use regex::bytes::RegexSet; +/// +/// let set = RegexSet::new(&[ +/// r"[a-z]+@[a-z]+\.(com|org|net)", +/// r"[a-z]+\.(com|org|net)", +/// ]).unwrap(); +/// +/// // Ask whether any regexes in the set match. +/// assert!(set.is_match(b"foo@example.com")); +/// +/// // Identify which regexes in the set match. +/// let matches: Vec<_> = set.matches(b"foo@example.com").into_iter().collect(); +/// assert_eq!(vec![0, 1], matches); +/// +/// // Try again, but with a haystack that only matches one of the regexes. +/// let matches: Vec<_> = set.matches(b"example.com").into_iter().collect(); +/// assert_eq!(vec![1], matches); +/// +/// // Try again, but with a haystack that doesn't match any regex in the set. +/// let matches: Vec<_> = set.matches(b"example").into_iter().collect(); +/// assert!(matches.is_empty()); +/// ``` +/// +/// Note that it would be possible to adapt the above example to using `Regex` +/// with an expression like: +/// +/// ```text +/// (?P<email>[a-z]+@(?P<email_domain>[a-z]+[.](com|org|net)))|(?P<domain>[a-z]+[.](com|org|net)) +/// ``` +/// +/// After a match, one could then inspect the capture groups to figure out +/// which alternates matched. The problem is that it is hard to make this +/// approach scale when there are many regexes since the overlap between each +/// alternate isn't always obvious to reason about. +#[derive(Clone)] +pub struct RegexSet { + pub(crate) meta: meta::Regex, + pub(crate) patterns: alloc::sync::Arc<[String]>, +} + +impl RegexSet { + /// Create a new regex set with the given regular expressions. + /// + /// This takes an iterator of `S`, where `S` is something that can produce + /// a `&str`. If any of the strings in the iterator are not valid regular + /// expressions, then an error is returned. + /// + /// # Example + /// + /// Create a new regex set from an iterator of strings: + /// + /// ``` + /// use regex::bytes::RegexSet; + /// + /// let set = RegexSet::new([r"\w+", r"\d+"]).unwrap(); + /// assert!(set.is_match(b"foo")); + /// ``` + pub fn new<I, S>(exprs: I) -> Result<RegexSet, Error> + where + S: AsRef<str>, + I: IntoIterator<Item = S>, + { + RegexSetBuilder::new(exprs).build() + } + + /// Create a new empty regex set. + /// + /// An empty regex never matches anything. + /// + /// This is a convenience function for `RegexSet::new([])`, but doesn't + /// require one to specify the type of the input. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSet; + /// + /// let set = RegexSet::empty(); + /// assert!(set.is_empty()); + /// // an empty set matches nothing + /// assert!(!set.is_match(b"")); + /// ``` + pub fn empty() -> RegexSet { + let empty: [&str; 0] = []; + RegexSetBuilder::new(empty).build().unwrap() + } + + /// Returns true if and only if one of the regexes in this set matches + /// the haystack given. + /// + /// This method should be preferred if you only need to test whether any + /// of the regexes in the set should match, but don't care about *which* + /// regexes matched. This is because the underlying matching engine will + /// quit immediately after seeing the first match instead of continuing to + /// find all matches. + /// + /// Note that as with searches using [`Regex`](crate::bytes::Regex), the + /// expression is unanchored by default. That is, if the regex does not + /// start with `^` or `\A`, or end with `$` or `\z`, then it is permitted + /// to match anywhere in the haystack. + /// + /// # Example + /// + /// Tests whether a set matches somewhere in a haystack: + /// + /// ``` + /// use regex::bytes::RegexSet; + /// + /// let set = RegexSet::new([r"\w+", r"\d+"]).unwrap(); + /// assert!(set.is_match(b"foo")); + /// assert!(!set.is_match("☃".as_bytes())); + /// ``` + #[inline] + pub fn is_match(&self, haystack: &[u8]) -> bool { + self.is_match_at(haystack, 0) + } + + /// Returns true if and only if one of the regexes in this set matches the + /// haystack given, with the search starting at the offset given. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only + /// match when `start == 0`. + /// + /// # Panics + /// + /// This panics when `start >= haystack.len() + 1`. + /// + /// # Example + /// + /// This example shows the significance of `start`. Namely, consider a + /// haystack `foobar` and a desire to execute a search starting at offset + /// `3`. You could search a substring explicitly, but then the look-around + /// assertions won't work correctly. Instead, you can use this method to + /// specify the start position of a search. + /// + /// ``` + /// use regex::bytes::RegexSet; + /// + /// let set = RegexSet::new([r"\bbar\b", r"(?m)^bar$"]).unwrap(); + /// let hay = b"foobar"; + /// // We get a match here, but it's probably not intended. + /// assert!(set.is_match(&hay[3..])); + /// // No match because the assertions take the context into account. + /// assert!(!set.is_match_at(hay, 3)); + /// ``` + #[inline] + pub fn is_match_at(&self, haystack: &[u8], start: usize) -> bool { + self.meta.is_match(Input::new(haystack).span(start..haystack.len())) + } + + /// Returns the set of regexes that match in the given haystack. + /// + /// The set returned contains the index of each regex that matches in + /// the given haystack. The index is in correspondence with the order of + /// regular expressions given to `RegexSet`'s constructor. + /// + /// The set can also be used to iterate over the matched indices. The order + /// of iteration is always ascending with respect to the matching indices. + /// + /// Note that as with searches using [`Regex`](crate::bytes::Regex), the + /// expression is unanchored by default. That is, if the regex does not + /// start with `^` or `\A`, or end with `$` or `\z`, then it is permitted + /// to match anywhere in the haystack. + /// + /// # Example + /// + /// Tests which regular expressions match the given haystack: + /// + /// ``` + /// use regex::bytes::RegexSet; + /// + /// let set = RegexSet::new([ + /// r"\w+", + /// r"\d+", + /// r"\pL+", + /// r"foo", + /// r"bar", + /// r"barfoo", + /// r"foobar", + /// ]).unwrap(); + /// let matches: Vec<_> = set.matches(b"foobar").into_iter().collect(); + /// assert_eq!(matches, vec![0, 2, 3, 4, 6]); + /// + /// // You can also test whether a particular regex matched: + /// let matches = set.matches(b"foobar"); + /// assert!(!matches.matched(5)); + /// assert!(matches.matched(6)); + /// ``` + #[inline] + pub fn matches(&self, haystack: &[u8]) -> SetMatches { + self.matches_at(haystack, 0) + } + + /// Returns the set of regexes that match in the given haystack. + /// + /// The set returned contains the index of each regex that matches in + /// the given haystack. The index is in correspondence with the order of + /// regular expressions given to `RegexSet`'s constructor. + /// + /// The set can also be used to iterate over the matched indices. The order + /// of iteration is always ascending with respect to the matching indices. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only + /// match when `start == 0`. + /// + /// # Panics + /// + /// This panics when `start >= haystack.len() + 1`. + /// + /// # Example + /// + /// Tests which regular expressions match the given haystack: + /// + /// ``` + /// use regex::bytes::RegexSet; + /// + /// let set = RegexSet::new([r"\bbar\b", r"(?m)^bar$"]).unwrap(); + /// let hay = b"foobar"; + /// // We get matches here, but it's probably not intended. + /// let matches: Vec<_> = set.matches(&hay[3..]).into_iter().collect(); + /// assert_eq!(matches, vec![0, 1]); + /// // No matches because the assertions take the context into account. + /// let matches: Vec<_> = set.matches_at(hay, 3).into_iter().collect(); + /// assert_eq!(matches, vec![]); + /// ``` + #[inline] + pub fn matches_at(&self, haystack: &[u8], start: usize) -> SetMatches { + let input = Input::new(haystack).span(start..haystack.len()); + let mut patset = PatternSet::new(self.meta.pattern_len()); + self.meta.which_overlapping_matches(&input, &mut patset); + SetMatches(patset) + } + + /// Returns the same as matches, but starts the search at the given + /// offset and stores the matches into the slice given. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only + /// match when `start == 0`. + /// + /// `matches` must have a length that is at least the number of regexes + /// in this set. + /// + /// This method returns true if and only if at least one member of + /// `matches` is true after executing the set against `haystack`. + #[doc(hidden)] + #[inline] + pub fn matches_read_at( + &self, + matches: &mut [bool], + haystack: &[u8], + start: usize, + ) -> bool { + // This is pretty dumb. We should try to fix this, but the + // regex-automata API doesn't provide a way to store matches in an + // arbitrary &mut [bool]. Thankfully, this API is doc(hidden) and + // thus not public... But regex-capi currently uses it. We should + // fix regex-capi to use a PatternSet, maybe? Not sure... PatternSet + // is in regex-automata, not regex. So maybe we should just accept a + // 'SetMatches', which is basically just a newtype around PatternSet. + let mut patset = PatternSet::new(self.meta.pattern_len()); + let mut input = Input::new(haystack); + input.set_start(start); + self.meta.which_overlapping_matches(&input, &mut patset); + for pid in patset.iter() { + matches[pid] = true; + } + !patset.is_empty() + } + + /// An alias for `matches_read_at` to preserve backward compatibility. + /// + /// The `regex-capi` crate used this method, so to avoid breaking that + /// crate, we continue to export it as an undocumented API. + #[doc(hidden)] + #[inline] + pub fn read_matches_at( + &self, + matches: &mut [bool], + haystack: &[u8], + start: usize, + ) -> bool { + self.matches_read_at(matches, haystack, start) + } + + /// Returns the total number of regexes in this set. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSet; + /// + /// assert_eq!(0, RegexSet::empty().len()); + /// assert_eq!(1, RegexSet::new([r"[0-9]"]).unwrap().len()); + /// assert_eq!(2, RegexSet::new([r"[0-9]", r"[a-z]"]).unwrap().len()); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.meta.pattern_len() + } + + /// Returns `true` if this set contains no regexes. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSet; + /// + /// assert!(RegexSet::empty().is_empty()); + /// assert!(!RegexSet::new([r"[0-9]"]).unwrap().is_empty()); + /// ``` + #[inline] + pub fn is_empty(&self) -> bool { + self.meta.pattern_len() == 0 + } + + /// Returns the regex patterns that this regex set was constructed from. + /// + /// This function can be used to determine the pattern for a match. The + /// slice returned has exactly as many patterns givens to this regex set, + /// and the order of the slice is the same as the order of the patterns + /// provided to the set. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSet; + /// + /// let set = RegexSet::new(&[ + /// r"\w+", + /// r"\d+", + /// r"\pL+", + /// r"foo", + /// r"bar", + /// r"barfoo", + /// r"foobar", + /// ]).unwrap(); + /// let matches: Vec<_> = set + /// .matches(b"foobar") + /// .into_iter() + /// .map(|index| &set.patterns()[index]) + /// .collect(); + /// assert_eq!(matches, vec![r"\w+", r"\pL+", r"foo", r"bar", r"foobar"]); + /// ``` + #[inline] + pub fn patterns(&self) -> &[String] { + &self.patterns + } +} + +impl Default for RegexSet { + fn default() -> Self { + RegexSet::empty() + } +} + +/// A set of matches returned by a regex set. +/// +/// Values of this type are constructed by [`RegexSet::matches`]. +#[derive(Clone, Debug)] +pub struct SetMatches(PatternSet); + +impl SetMatches { + /// Whether this set contains any matches. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSet; + /// + /// let set = RegexSet::new(&[ + /// r"[a-z]+@[a-z]+\.(com|org|net)", + /// r"[a-z]+\.(com|org|net)", + /// ]).unwrap(); + /// let matches = set.matches(b"foo@example.com"); + /// assert!(matches.matched_any()); + /// ``` + #[inline] + pub fn matched_any(&self) -> bool { + !self.0.is_empty() + } + + /// Whether all patterns in this set matched. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSet; + /// + /// let set = RegexSet::new(&[ + /// r"^foo", + /// r"[a-z]+\.com", + /// ]).unwrap(); + /// let matches = set.matches(b"foo.example.com"); + /// assert!(matches.matched_all()); + /// ``` + pub fn matched_all(&self) -> bool { + self.0.is_full() + } + + /// Whether the regex at the given index matched. + /// + /// The index for a regex is determined by its insertion order upon the + /// initial construction of a `RegexSet`, starting at `0`. + /// + /// # Panics + /// + /// If `index` is greater than or equal to the number of regexes in the + /// original set that produced these matches. Equivalently, when `index` + /// is greater than or equal to [`SetMatches::len`]. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSet; + /// + /// let set = RegexSet::new([ + /// r"[a-z]+@[a-z]+\.(com|org|net)", + /// r"[a-z]+\.(com|org|net)", + /// ]).unwrap(); + /// let matches = set.matches(b"example.com"); + /// assert!(!matches.matched(0)); + /// assert!(matches.matched(1)); + /// ``` + #[inline] + pub fn matched(&self, index: usize) -> bool { + self.0.contains(PatternID::new_unchecked(index)) + } + + /// The total number of regexes in the set that created these matches. + /// + /// **WARNING:** This always returns the same value as [`RegexSet::len`]. + /// In particular, it does *not* return the number of elements yielded by + /// [`SetMatches::iter`]. The only way to determine the total number of + /// matched regexes is to iterate over them. + /// + /// # Example + /// + /// Notice that this method returns the total number of regexes in the + /// original set, and *not* the total number of regexes that matched. + /// + /// ``` + /// use regex::bytes::RegexSet; + /// + /// let set = RegexSet::new([ + /// r"[a-z]+@[a-z]+\.(com|org|net)", + /// r"[a-z]+\.(com|org|net)", + /// ]).unwrap(); + /// let matches = set.matches(b"example.com"); + /// // Total number of patterns that matched. + /// assert_eq!(1, matches.iter().count()); + /// // Total number of patterns in the set. + /// assert_eq!(2, matches.len()); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.0.capacity() + } + + /// Returns an iterator over the indices of the regexes that matched. + /// + /// This will always produces matches in ascending order, where the index + /// yielded corresponds to the index of the regex that matched with respect + /// to its position when initially building the set. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSet; + /// + /// let set = RegexSet::new([ + /// r"[0-9]", + /// r"[a-z]", + /// r"[A-Z]", + /// r"\p{Greek}", + /// ]).unwrap(); + /// let hay = "βa1".as_bytes(); + /// let matches: Vec<_> = set.matches(hay).iter().collect(); + /// assert_eq!(matches, vec![0, 1, 3]); + /// ``` + /// + /// Note that `SetMatches` also implements the `IntoIterator` trait, so + /// this method is not always needed. For example: + /// + /// ``` + /// use regex::bytes::RegexSet; + /// + /// let set = RegexSet::new([ + /// r"[0-9]", + /// r"[a-z]", + /// r"[A-Z]", + /// r"\p{Greek}", + /// ]).unwrap(); + /// let hay = "βa1".as_bytes(); + /// let mut matches = vec![]; + /// for index in set.matches(hay) { + /// matches.push(index); + /// } + /// assert_eq!(matches, vec![0, 1, 3]); + /// ``` + #[inline] + pub fn iter(&self) -> SetMatchesIter<'_> { + SetMatchesIter(self.0.iter()) + } +} + +impl IntoIterator for SetMatches { + type IntoIter = SetMatchesIntoIter; + type Item = usize; + + fn into_iter(self) -> Self::IntoIter { + let it = 0..self.0.capacity(); + SetMatchesIntoIter { patset: self.0, it } + } +} + +impl<'a> IntoIterator for &'a SetMatches { + type IntoIter = SetMatchesIter<'a>; + type Item = usize; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +/// An owned iterator over the set of matches from a regex set. +/// +/// This will always produces matches in ascending order of index, where the +/// index corresponds to the index of the regex that matched with respect to +/// its position when initially building the set. +/// +/// This iterator is created by calling `SetMatches::into_iter` via the +/// `IntoIterator` trait. This is automatically done in `for` loops. +/// +/// # Example +/// +/// ``` +/// use regex::bytes::RegexSet; +/// +/// let set = RegexSet::new([ +/// r"[0-9]", +/// r"[a-z]", +/// r"[A-Z]", +/// r"\p{Greek}", +/// ]).unwrap(); +/// let hay = "βa1".as_bytes(); +/// let mut matches = vec![]; +/// for index in set.matches(hay) { +/// matches.push(index); +/// } +/// assert_eq!(matches, vec![0, 1, 3]); +/// ``` +#[derive(Debug)] +pub struct SetMatchesIntoIter { + patset: PatternSet, + it: core::ops::Range<usize>, +} + +impl Iterator for SetMatchesIntoIter { + type Item = usize; + + fn next(&mut self) -> Option<usize> { + loop { + let id = self.it.next()?; + if self.patset.contains(PatternID::new_unchecked(id)) { + return Some(id); + } + } + } + + fn size_hint(&self) -> (usize, Option<usize>) { + self.it.size_hint() + } +} + +impl DoubleEndedIterator for SetMatchesIntoIter { + fn next_back(&mut self) -> Option<usize> { + loop { + let id = self.it.next_back()?; + if self.patset.contains(PatternID::new_unchecked(id)) { + return Some(id); + } + } + } +} + +impl core::iter::FusedIterator for SetMatchesIntoIter {} + +/// A borrowed iterator over the set of matches from a regex set. +/// +/// The lifetime `'a` refers to the lifetime of the [`SetMatches`] value that +/// created this iterator. +/// +/// This will always produces matches in ascending order, where the index +/// corresponds to the index of the regex that matched with respect to its +/// position when initially building the set. +/// +/// This iterator is created by the [`SetMatches::iter`] method. +#[derive(Clone, Debug)] +pub struct SetMatchesIter<'a>(PatternSetIter<'a>); + +impl<'a> Iterator for SetMatchesIter<'a> { + type Item = usize; + + fn next(&mut self) -> Option<usize> { + self.0.next().map(|pid| pid.as_usize()) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + self.0.size_hint() + } +} + +impl<'a> DoubleEndedIterator for SetMatchesIter<'a> { + fn next_back(&mut self) -> Option<usize> { + self.0.next_back().map(|pid| pid.as_usize()) + } +} + +impl<'a> core::iter::FusedIterator for SetMatchesIter<'a> {} + +impl core::fmt::Debug for RegexSet { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "RegexSet({:?})", self.patterns()) + } +} diff --git a/vendor/regex/src/regexset/mod.rs b/vendor/regex/src/regexset/mod.rs new file mode 100644 index 00000000000000..93fadec8bf65c9 --- /dev/null +++ b/vendor/regex/src/regexset/mod.rs @@ -0,0 +1,2 @@ +pub(crate) mod bytes; +pub(crate) mod string; diff --git a/vendor/regex/src/regexset/string.rs b/vendor/regex/src/regexset/string.rs new file mode 100644 index 00000000000000..5126a4661ebca0 --- /dev/null +++ b/vendor/regex/src/regexset/string.rs @@ -0,0 +1,724 @@ +use alloc::string::String; + +use regex_automata::{meta, Input, PatternID, PatternSet, PatternSetIter}; + +use crate::{Error, RegexSetBuilder}; + +/// Match multiple, possibly overlapping, regexes in a single search. +/// +/// A regex set corresponds to the union of zero or more regular expressions. +/// That is, a regex set will match a haystack when at least one of its +/// constituent regexes matches. A regex set as its formulated here provides a +/// touch more power: it will also report *which* regular expressions in the +/// set match. Indeed, this is the key difference between regex sets and a +/// single `Regex` with many alternates, since only one alternate can match at +/// a time. +/// +/// For example, consider regular expressions to match email addresses and +/// domains: `[a-z]+@[a-z]+\.(com|org|net)` and `[a-z]+\.(com|org|net)`. If a +/// regex set is constructed from those regexes, then searching the haystack +/// `foo@example.com` will report both regexes as matching. Of course, one +/// could accomplish this by compiling each regex on its own and doing two +/// searches over the haystack. The key advantage of using a regex set is +/// that it will report the matching regexes using a *single pass through the +/// haystack*. If one has hundreds or thousands of regexes to match repeatedly +/// (like a URL router for a complex web application or a user agent matcher), +/// then a regex set *can* realize huge performance gains. +/// +/// # Limitations +/// +/// Regex sets are limited to answering the following two questions: +/// +/// 1. Does any regex in the set match? +/// 2. If so, which regexes in the set match? +/// +/// As with the main [`Regex`][crate::Regex] type, it is cheaper to ask (1) +/// instead of (2) since the matching engines can stop after the first match +/// is found. +/// +/// You cannot directly extract [`Match`][crate::Match] or +/// [`Captures`][crate::Captures] objects from a regex set. If you need these +/// operations, the recommended approach is to compile each pattern in the set +/// independently and scan the exact same haystack a second time with those +/// independently compiled patterns: +/// +/// ``` +/// use regex::{Regex, RegexSet}; +/// +/// let patterns = ["foo", "bar"]; +/// // Both patterns will match different ranges of this string. +/// let hay = "barfoo"; +/// +/// // Compile a set matching any of our patterns. +/// let set = RegexSet::new(patterns).unwrap(); +/// // Compile each pattern independently. +/// let regexes: Vec<_> = set +/// .patterns() +/// .iter() +/// .map(|pat| Regex::new(pat).unwrap()) +/// .collect(); +/// +/// // Match against the whole set first and identify the individual +/// // matching patterns. +/// let matches: Vec<&str> = set +/// .matches(hay) +/// .into_iter() +/// // Dereference the match index to get the corresponding +/// // compiled pattern. +/// .map(|index| ®exes[index]) +/// // To get match locations or any other info, we then have to search the +/// // exact same haystack again, using our separately-compiled pattern. +/// .map(|re| re.find(hay).unwrap().as_str()) +/// .collect(); +/// +/// // Matches arrive in the order the constituent patterns were declared, +/// // not the order they appear in the haystack. +/// assert_eq!(vec!["foo", "bar"], matches); +/// ``` +/// +/// # Performance +/// +/// A `RegexSet` has the same performance characteristics as `Regex`. Namely, +/// search takes `O(m * n)` time, where `m` is proportional to the size of the +/// regex set and `n` is proportional to the length of the haystack. +/// +/// # Trait implementations +/// +/// The `Default` trait is implemented for `RegexSet`. The default value +/// is an empty set. An empty set can also be explicitly constructed via +/// [`RegexSet::empty`]. +/// +/// # Example +/// +/// This shows how the above two regexes (for matching email addresses and +/// domains) might work: +/// +/// ``` +/// use regex::RegexSet; +/// +/// let set = RegexSet::new(&[ +/// r"[a-z]+@[a-z]+\.(com|org|net)", +/// r"[a-z]+\.(com|org|net)", +/// ]).unwrap(); +/// +/// // Ask whether any regexes in the set match. +/// assert!(set.is_match("foo@example.com")); +/// +/// // Identify which regexes in the set match. +/// let matches: Vec<_> = set.matches("foo@example.com").into_iter().collect(); +/// assert_eq!(vec![0, 1], matches); +/// +/// // Try again, but with a haystack that only matches one of the regexes. +/// let matches: Vec<_> = set.matches("example.com").into_iter().collect(); +/// assert_eq!(vec![1], matches); +/// +/// // Try again, but with a haystack that doesn't match any regex in the set. +/// let matches: Vec<_> = set.matches("example").into_iter().collect(); +/// assert!(matches.is_empty()); +/// ``` +/// +/// Note that it would be possible to adapt the above example to using `Regex` +/// with an expression like: +/// +/// ```text +/// (?P<email>[a-z]+@(?P<email_domain>[a-z]+[.](com|org|net)))|(?P<domain>[a-z]+[.](com|org|net)) +/// ``` +/// +/// After a match, one could then inspect the capture groups to figure out +/// which alternates matched. The problem is that it is hard to make this +/// approach scale when there are many regexes since the overlap between each +/// alternate isn't always obvious to reason about. +#[derive(Clone)] +pub struct RegexSet { + pub(crate) meta: meta::Regex, + pub(crate) patterns: alloc::sync::Arc<[String]>, +} + +impl RegexSet { + /// Create a new regex set with the given regular expressions. + /// + /// This takes an iterator of `S`, where `S` is something that can produce + /// a `&str`. If any of the strings in the iterator are not valid regular + /// expressions, then an error is returned. + /// + /// # Example + /// + /// Create a new regex set from an iterator of strings: + /// + /// ``` + /// use regex::RegexSet; + /// + /// let set = RegexSet::new([r"\w+", r"\d+"]).unwrap(); + /// assert!(set.is_match("foo")); + /// ``` + pub fn new<I, S>(exprs: I) -> Result<RegexSet, Error> + where + S: AsRef<str>, + I: IntoIterator<Item = S>, + { + RegexSetBuilder::new(exprs).build() + } + + /// Create a new empty regex set. + /// + /// An empty regex never matches anything. + /// + /// This is a convenience function for `RegexSet::new([])`, but doesn't + /// require one to specify the type of the input. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSet; + /// + /// let set = RegexSet::empty(); + /// assert!(set.is_empty()); + /// // an empty set matches nothing + /// assert!(!set.is_match("")); + /// ``` + pub fn empty() -> RegexSet { + let empty: [&str; 0] = []; + RegexSetBuilder::new(empty).build().unwrap() + } + + /// Returns true if and only if one of the regexes in this set matches + /// the haystack given. + /// + /// This method should be preferred if you only need to test whether any + /// of the regexes in the set should match, but don't care about *which* + /// regexes matched. This is because the underlying matching engine will + /// quit immediately after seeing the first match instead of continuing to + /// find all matches. + /// + /// Note that as with searches using [`Regex`](crate::Regex), the + /// expression is unanchored by default. That is, if the regex does not + /// start with `^` or `\A`, or end with `$` or `\z`, then it is permitted + /// to match anywhere in the haystack. + /// + /// # Example + /// + /// Tests whether a set matches somewhere in a haystack: + /// + /// ``` + /// use regex::RegexSet; + /// + /// let set = RegexSet::new([r"\w+", r"\d+"]).unwrap(); + /// assert!(set.is_match("foo")); + /// assert!(!set.is_match("☃")); + /// ``` + #[inline] + pub fn is_match(&self, haystack: &str) -> bool { + self.is_match_at(haystack, 0) + } + + /// Returns true if and only if one of the regexes in this set matches the + /// haystack given, with the search starting at the offset given. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only + /// match when `start == 0`. + /// + /// # Panics + /// + /// This panics when `start >= haystack.len() + 1`. + /// + /// # Example + /// + /// This example shows the significance of `start`. Namely, consider a + /// haystack `foobar` and a desire to execute a search starting at offset + /// `3`. You could search a substring explicitly, but then the look-around + /// assertions won't work correctly. Instead, you can use this method to + /// specify the start position of a search. + /// + /// ``` + /// use regex::RegexSet; + /// + /// let set = RegexSet::new([r"\bbar\b", r"(?m)^bar$"]).unwrap(); + /// let hay = "foobar"; + /// // We get a match here, but it's probably not intended. + /// assert!(set.is_match(&hay[3..])); + /// // No match because the assertions take the context into account. + /// assert!(!set.is_match_at(hay, 3)); + /// ``` + #[inline] + pub fn is_match_at(&self, haystack: &str, start: usize) -> bool { + self.meta.is_match(Input::new(haystack).span(start..haystack.len())) + } + + /// Returns the set of regexes that match in the given haystack. + /// + /// The set returned contains the index of each regex that matches in + /// the given haystack. The index is in correspondence with the order of + /// regular expressions given to `RegexSet`'s constructor. + /// + /// The set can also be used to iterate over the matched indices. The order + /// of iteration is always ascending with respect to the matching indices. + /// + /// Note that as with searches using [`Regex`](crate::Regex), the + /// expression is unanchored by default. That is, if the regex does not + /// start with `^` or `\A`, or end with `$` or `\z`, then it is permitted + /// to match anywhere in the haystack. + /// + /// # Example + /// + /// Tests which regular expressions match the given haystack: + /// + /// ``` + /// use regex::RegexSet; + /// + /// let set = RegexSet::new([ + /// r"\w+", + /// r"\d+", + /// r"\pL+", + /// r"foo", + /// r"bar", + /// r"barfoo", + /// r"foobar", + /// ]).unwrap(); + /// let matches: Vec<_> = set.matches("foobar").into_iter().collect(); + /// assert_eq!(matches, vec![0, 2, 3, 4, 6]); + /// + /// // You can also test whether a particular regex matched: + /// let matches = set.matches("foobar"); + /// assert!(!matches.matched(5)); + /// assert!(matches.matched(6)); + /// ``` + #[inline] + pub fn matches(&self, haystack: &str) -> SetMatches { + self.matches_at(haystack, 0) + } + + /// Returns the set of regexes that match in the given haystack. + /// + /// The set returned contains the index of each regex that matches in + /// the given haystack. The index is in correspondence with the order of + /// regular expressions given to `RegexSet`'s constructor. + /// + /// The set can also be used to iterate over the matched indices. The order + /// of iteration is always ascending with respect to the matching indices. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only + /// match when `start == 0`. + /// + /// # Panics + /// + /// This panics when `start >= haystack.len() + 1`. + /// + /// # Example + /// + /// Tests which regular expressions match the given haystack: + /// + /// ``` + /// use regex::RegexSet; + /// + /// let set = RegexSet::new([r"\bbar\b", r"(?m)^bar$"]).unwrap(); + /// let hay = "foobar"; + /// // We get matches here, but it's probably not intended. + /// let matches: Vec<_> = set.matches(&hay[3..]).into_iter().collect(); + /// assert_eq!(matches, vec![0, 1]); + /// // No matches because the assertions take the context into account. + /// let matches: Vec<_> = set.matches_at(hay, 3).into_iter().collect(); + /// assert_eq!(matches, vec![]); + /// ``` + #[inline] + pub fn matches_at(&self, haystack: &str, start: usize) -> SetMatches { + let input = Input::new(haystack).span(start..haystack.len()); + let mut patset = PatternSet::new(self.meta.pattern_len()); + self.meta.which_overlapping_matches(&input, &mut patset); + SetMatches(patset) + } + + /// Returns the same as matches, but starts the search at the given + /// offset and stores the matches into the slice given. + /// + /// The significance of the starting point is that it takes the surrounding + /// context into consideration. For example, the `\A` anchor can only + /// match when `start == 0`. + /// + /// `matches` must have a length that is at least the number of regexes + /// in this set. + /// + /// This method returns true if and only if at least one member of + /// `matches` is true after executing the set against `haystack`. + #[doc(hidden)] + #[inline] + pub fn matches_read_at( + &self, + matches: &mut [bool], + haystack: &str, + start: usize, + ) -> bool { + // This is pretty dumb. We should try to fix this, but the + // regex-automata API doesn't provide a way to store matches in an + // arbitrary &mut [bool]. Thankfully, this API is doc(hidden) and + // thus not public... But regex-capi currently uses it. We should + // fix regex-capi to use a PatternSet, maybe? Not sure... PatternSet + // is in regex-automata, not regex. So maybe we should just accept a + // 'SetMatches', which is basically just a newtype around PatternSet. + let mut patset = PatternSet::new(self.meta.pattern_len()); + let mut input = Input::new(haystack); + input.set_start(start); + self.meta.which_overlapping_matches(&input, &mut patset); + for pid in patset.iter() { + matches[pid] = true; + } + !patset.is_empty() + } + + /// An alias for `matches_read_at` to preserve backward compatibility. + /// + /// The `regex-capi` crate used this method, so to avoid breaking that + /// crate, we continue to export it as an undocumented API. + #[doc(hidden)] + #[inline] + pub fn read_matches_at( + &self, + matches: &mut [bool], + haystack: &str, + start: usize, + ) -> bool { + self.matches_read_at(matches, haystack, start) + } + + /// Returns the total number of regexes in this set. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSet; + /// + /// assert_eq!(0, RegexSet::empty().len()); + /// assert_eq!(1, RegexSet::new([r"[0-9]"]).unwrap().len()); + /// assert_eq!(2, RegexSet::new([r"[0-9]", r"[a-z]"]).unwrap().len()); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.meta.pattern_len() + } + + /// Returns `true` if this set contains no regexes. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSet; + /// + /// assert!(RegexSet::empty().is_empty()); + /// assert!(!RegexSet::new([r"[0-9]"]).unwrap().is_empty()); + /// ``` + #[inline] + pub fn is_empty(&self) -> bool { + self.meta.pattern_len() == 0 + } + + /// Returns the regex patterns that this regex set was constructed from. + /// + /// This function can be used to determine the pattern for a match. The + /// slice returned has exactly as many patterns givens to this regex set, + /// and the order of the slice is the same as the order of the patterns + /// provided to the set. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSet; + /// + /// let set = RegexSet::new(&[ + /// r"\w+", + /// r"\d+", + /// r"\pL+", + /// r"foo", + /// r"bar", + /// r"barfoo", + /// r"foobar", + /// ]).unwrap(); + /// let matches: Vec<_> = set + /// .matches("foobar") + /// .into_iter() + /// .map(|index| &set.patterns()[index]) + /// .collect(); + /// assert_eq!(matches, vec![r"\w+", r"\pL+", r"foo", r"bar", r"foobar"]); + /// ``` + #[inline] + pub fn patterns(&self) -> &[String] { + &self.patterns + } +} + +impl Default for RegexSet { + fn default() -> Self { + RegexSet::empty() + } +} + +/// A set of matches returned by a regex set. +/// +/// Values of this type are constructed by [`RegexSet::matches`]. +#[derive(Clone, Debug)] +pub struct SetMatches(PatternSet); + +impl SetMatches { + /// Whether this set contains any matches. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSet; + /// + /// let set = RegexSet::new(&[ + /// r"[a-z]+@[a-z]+\.(com|org|net)", + /// r"[a-z]+\.(com|org|net)", + /// ]).unwrap(); + /// let matches = set.matches("foo@example.com"); + /// assert!(matches.matched_any()); + /// ``` + #[inline] + pub fn matched_any(&self) -> bool { + !self.0.is_empty() + } + + /// Whether all patterns in this set matched. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSet; + /// + /// let set = RegexSet::new(&[ + /// r"^foo", + /// r"[a-z]+\.com", + /// ]).unwrap(); + /// let matches = set.matches("foo.example.com"); + /// assert!(matches.matched_all()); + /// ``` + pub fn matched_all(&self) -> bool { + self.0.is_full() + } + + /// Whether the regex at the given index matched. + /// + /// The index for a regex is determined by its insertion order upon the + /// initial construction of a `RegexSet`, starting at `0`. + /// + /// # Panics + /// + /// If `index` is greater than or equal to the number of regexes in the + /// original set that produced these matches. Equivalently, when `index` + /// is greater than or equal to [`SetMatches::len`]. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSet; + /// + /// let set = RegexSet::new([ + /// r"[a-z]+@[a-z]+\.(com|org|net)", + /// r"[a-z]+\.(com|org|net)", + /// ]).unwrap(); + /// let matches = set.matches("example.com"); + /// assert!(!matches.matched(0)); + /// assert!(matches.matched(1)); + /// ``` + #[inline] + pub fn matched(&self, index: usize) -> bool { + self.0.contains(PatternID::new_unchecked(index)) + } + + /// The total number of regexes in the set that created these matches. + /// + /// **WARNING:** This always returns the same value as [`RegexSet::len`]. + /// In particular, it does *not* return the number of elements yielded by + /// [`SetMatches::iter`]. The only way to determine the total number of + /// matched regexes is to iterate over them. + /// + /// # Example + /// + /// Notice that this method returns the total number of regexes in the + /// original set, and *not* the total number of regexes that matched. + /// + /// ``` + /// use regex::RegexSet; + /// + /// let set = RegexSet::new([ + /// r"[a-z]+@[a-z]+\.(com|org|net)", + /// r"[a-z]+\.(com|org|net)", + /// ]).unwrap(); + /// let matches = set.matches("example.com"); + /// // Total number of patterns that matched. + /// assert_eq!(1, matches.iter().count()); + /// // Total number of patterns in the set. + /// assert_eq!(2, matches.len()); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.0.capacity() + } + + /// Returns an iterator over the indices of the regexes that matched. + /// + /// This will always produces matches in ascending order, where the index + /// yielded corresponds to the index of the regex that matched with respect + /// to its position when initially building the set. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSet; + /// + /// let set = RegexSet::new([ + /// r"[0-9]", + /// r"[a-z]", + /// r"[A-Z]", + /// r"\p{Greek}", + /// ]).unwrap(); + /// let hay = "βa1"; + /// let matches: Vec<_> = set.matches(hay).iter().collect(); + /// assert_eq!(matches, vec![0, 1, 3]); + /// ``` + /// + /// Note that `SetMatches` also implements the `IntoIterator` trait, so + /// this method is not always needed. For example: + /// + /// ``` + /// use regex::RegexSet; + /// + /// let set = RegexSet::new([ + /// r"[0-9]", + /// r"[a-z]", + /// r"[A-Z]", + /// r"\p{Greek}", + /// ]).unwrap(); + /// let hay = "βa1"; + /// let mut matches = vec![]; + /// for index in set.matches(hay) { + /// matches.push(index); + /// } + /// assert_eq!(matches, vec![0, 1, 3]); + /// ``` + #[inline] + pub fn iter(&self) -> SetMatchesIter<'_> { + SetMatchesIter(self.0.iter()) + } +} + +impl IntoIterator for SetMatches { + type IntoIter = SetMatchesIntoIter; + type Item = usize; + + fn into_iter(self) -> Self::IntoIter { + let it = 0..self.0.capacity(); + SetMatchesIntoIter { patset: self.0, it } + } +} + +impl<'a> IntoIterator for &'a SetMatches { + type IntoIter = SetMatchesIter<'a>; + type Item = usize; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +/// An owned iterator over the set of matches from a regex set. +/// +/// This will always produces matches in ascending order of index, where the +/// index corresponds to the index of the regex that matched with respect to +/// its position when initially building the set. +/// +/// This iterator is created by calling `SetMatches::into_iter` via the +/// `IntoIterator` trait. This is automatically done in `for` loops. +/// +/// # Example +/// +/// ``` +/// use regex::RegexSet; +/// +/// let set = RegexSet::new([ +/// r"[0-9]", +/// r"[a-z]", +/// r"[A-Z]", +/// r"\p{Greek}", +/// ]).unwrap(); +/// let hay = "βa1"; +/// let mut matches = vec![]; +/// for index in set.matches(hay) { +/// matches.push(index); +/// } +/// assert_eq!(matches, vec![0, 1, 3]); +/// ``` +#[derive(Debug)] +pub struct SetMatchesIntoIter { + patset: PatternSet, + it: core::ops::Range<usize>, +} + +impl Iterator for SetMatchesIntoIter { + type Item = usize; + + fn next(&mut self) -> Option<usize> { + loop { + let id = self.it.next()?; + if self.patset.contains(PatternID::new_unchecked(id)) { + return Some(id); + } + } + } + + fn size_hint(&self) -> (usize, Option<usize>) { + self.it.size_hint() + } +} + +impl DoubleEndedIterator for SetMatchesIntoIter { + fn next_back(&mut self) -> Option<usize> { + loop { + let id = self.it.next_back()?; + if self.patset.contains(PatternID::new_unchecked(id)) { + return Some(id); + } + } + } +} + +impl core::iter::FusedIterator for SetMatchesIntoIter {} + +/// A borrowed iterator over the set of matches from a regex set. +/// +/// The lifetime `'a` refers to the lifetime of the [`SetMatches`] value that +/// created this iterator. +/// +/// This will always produces matches in ascending order, where the index +/// corresponds to the index of the regex that matched with respect to its +/// position when initially building the set. +/// +/// This iterator is created by the [`SetMatches::iter`] method. +#[derive(Clone, Debug)] +pub struct SetMatchesIter<'a>(PatternSetIter<'a>); + +impl<'a> Iterator for SetMatchesIter<'a> { + type Item = usize; + + fn next(&mut self) -> Option<usize> { + self.0.next().map(|pid| pid.as_usize()) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + self.0.size_hint() + } +} + +impl<'a> DoubleEndedIterator for SetMatchesIter<'a> { + fn next_back(&mut self) -> Option<usize> { + self.0.next_back().map(|pid| pid.as_usize()) + } +} + +impl<'a> core::iter::FusedIterator for SetMatchesIter<'a> {} + +impl core::fmt::Debug for RegexSet { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "RegexSet({:?})", self.patterns()) + } +} diff --git a/vendor/regex/test b/vendor/regex/test new file mode 100755 index 00000000000000..48224c6d114eaa --- /dev/null +++ b/vendor/regex/test @@ -0,0 +1,46 @@ +#!/bin/bash + +set -e + +# cd to the directory containing this crate's Cargo.toml so that we don't need +# to pass --manifest-path to every `cargo` command. +cd "$(dirname "$0")" + +# This is a convenience script for running a broad swath of tests across +# features. We don't test the complete space, since the complete space is quite +# large. Hopefully once we migrate the test suite to better infrastructure +# (like regex-automata), we'll be able to test more of the space. +echo "===== DEFAULT FEATURES =====" +cargo test + +# no-std mode is annoyingly difficult to test. Currently, the integration tests +# don't run. So for now, we just test that library tests run. (There aren't +# many because `regex` is just a wrapper crate.) +cargo test --no-default-features --lib + +echo "===== DOC TESTS =====" +cargo test --doc + +features=( + "std" + "std unicode" + "std unicode-perl" + "std perf" + "std perf-cache" + "std perf-dfa" + "std perf-inline" + "std perf-literal" + "std perf-dfa-full" + "std perf-onepass" + "std perf-backtrack" +) +for f in "${features[@]}"; do + echo "===== FEATURE: $f =====" + cargo test --test integration --no-default-features --features "$f" +done + +# And test the probably-forever-nightly-only 'pattern' feature... +if rustc --version | grep -q nightly; then + echo "===== FEATURE: std,pattern,unicode-perl =====" + cargo test --test integration --no-default-features --features std,pattern,unicode-perl +fi diff --git a/vendor/regex/testdata/README.md b/vendor/regex/testdata/README.md new file mode 100644 index 00000000000000..dcac6719f4f076 --- /dev/null +++ b/vendor/regex/testdata/README.md @@ -0,0 +1,22 @@ +This directory contains a large suite of regex tests defined in a TOML format. +They are used to drive tests in `tests/lib.rs`, `regex-automata/tests/lib.rs` +and `regex-lite/tests/lib.rs`. + +See the [`regex-test`][regex-test] crate documentation for an explanation of +the format and how it generates tests. + +The basic idea here is that we have many different regex engines but generally +one set of tests. We want to be able to run those tests (or most of them) on +every engine. Prior to `regex 1.9`, we used to do this with a hodge podge soup +of macros and a different test executable for each engine. It overall took a +longer time to compile, was harder to maintain, and it made the test definitions +themselves less clear. + +In `regex 1.9`, when we moved over to `regex-automata`, the situation got a lot +worse because of an increase in the number of engines. So I devised an engine +independent format for testing regex patterns and their semantics. + +Note: the naming scheme used in these tests isn't terribly consistent. It would +be great to fix that. + +[regex-test]: https://docs.rs/regex-test diff --git a/vendor/regex/testdata/anchored.toml b/vendor/regex/testdata/anchored.toml new file mode 100644 index 00000000000000..0f2248d098716e --- /dev/null +++ b/vendor/regex/testdata/anchored.toml @@ -0,0 +1,127 @@ +# These tests are specifically geared toward searches with 'anchored = true'. +# While they are interesting in their own right, they are particularly +# important for testing the one-pass DFA since the one-pass DFA can't work in +# unanchored contexts. +# +# Note that "anchored" in this context does not mean "^". Anchored searches are +# searches whose matches must begin at the start of the search, which may not +# be at the start of the haystack. That's why anchored searches---and there are +# some examples below---can still report multiple matches. This occurs when the +# matches are adjacent to one another. + +[[test]] +name = "greedy" +regex = '(abc)+' +haystack = "abcabcabc" +matches = [ + [[0, 9], [6, 9]], +] +anchored = true + +# When a "earliest" search is used, greediness doesn't really exist because +# matches are reported as soon as they are known. +[[test]] +name = "greedy-earliest" +regex = '(abc)+' +haystack = "abcabcabc" +matches = [ + [[0, 3], [0, 3]], + [[3, 6], [3, 6]], + [[6, 9], [6, 9]], +] +anchored = true +search-kind = "earliest" + +[[test]] +name = "nongreedy" +regex = '(abc)+?' +haystack = "abcabcabc" +matches = [ + [[0, 3], [0, 3]], + [[3, 6], [3, 6]], + [[6, 9], [6, 9]], +] +anchored = true + +# When "all" semantics are used, non-greediness doesn't exist since the longest +# possible match is always taken. +[[test]] +name = "nongreedy-all" +regex = '(abc)+?' +haystack = "abcabcabc" +matches = [ + [[0, 9], [6, 9]], +] +anchored = true +match-kind = "all" + +[[test]] +name = "word-boundary-unicode-01" +regex = '\b\w+\b' +haystack = 'βββ☃' +matches = [[0, 6]] +anchored = true + +[[test]] +name = "word-boundary-nounicode-01" +regex = '\b\w+\b' +haystack = 'abcβ' +matches = [[0, 3]] +anchored = true +unicode = false + +# Tests that '.c' doesn't match 'abc' when performing an anchored search from +# the beginning of the haystack. This test found two different bugs in the +# PikeVM and the meta engine. +[[test]] +name = "no-match-at-start" +regex = '.c' +haystack = 'abc' +matches = [] +anchored = true + +# Like above, but at a non-zero start offset. +[[test]] +name = "no-match-at-start-bounds" +regex = '.c' +haystack = 'aabc' +bounds = [1, 4] +matches = [] +anchored = true + +# This is like no-match-at-start, but hits the "reverse inner" optimization +# inside the meta engine. (no-match-at-start hits the "reverse suffix" +# optimization.) +[[test]] +name = "no-match-at-start-reverse-inner" +regex = '.c[a-z]' +haystack = 'abcz' +matches = [] +anchored = true + +# Like above, but at a non-zero start offset. +[[test]] +name = "no-match-at-start-reverse-inner-bounds" +regex = '.c[a-z]' +haystack = 'aabcz' +bounds = [1, 5] +matches = [] +anchored = true + +# Same as no-match-at-start, but applies to the meta engine's "reverse +# anchored" optimization. +[[test]] +name = "no-match-at-start-reverse-anchored" +regex = '.c[a-z]$' +haystack = 'abcz' +matches = [] +anchored = true + +# Like above, but at a non-zero start offset. +[[test]] +name = "no-match-at-start-reverse-anchored-bounds" +regex = '.c[a-z]$' +haystack = 'aabcz' +bounds = [1, 5] +matches = [] +anchored = true diff --git a/vendor/regex/testdata/bytes.toml b/vendor/regex/testdata/bytes.toml new file mode 100644 index 00000000000000..346e36971d4335 --- /dev/null +++ b/vendor/regex/testdata/bytes.toml @@ -0,0 +1,235 @@ +# These are tests specifically crafted for regexes that can match arbitrary +# bytes. In some cases, we also test the Unicode variant as well, just because +# it's good sense to do so. But also, these tests aren't really about Unicode, +# but whether matches are only reported at valid UTF-8 boundaries. For most +# tests in this entire collection, utf8 = true. But for these tests, we use +# utf8 = false. + +[[test]] +name = "word-boundary-ascii" +regex = ' \b' +haystack = " δ" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "word-boundary-unicode" +regex = ' \b' +haystack = " δ" +matches = [[0, 1]] +unicode = true +utf8 = false + +[[test]] +name = "word-boundary-ascii-not" +regex = ' \B' +haystack = " δ" +matches = [[0, 1]] +unicode = false +utf8 = false + +[[test]] +name = "word-boundary-unicode-not" +regex = ' \B' +haystack = " δ" +matches = [] +unicode = true +utf8 = false + +[[test]] +name = "perl-word-ascii" +regex = '\w+' +haystack = "aδ" +matches = [[0, 1]] +unicode = false +utf8 = false + +[[test]] +name = "perl-word-unicode" +regex = '\w+' +haystack = "aδ" +matches = [[0, 3]] +unicode = true +utf8 = false + +[[test]] +name = "perl-decimal-ascii" +regex = '\d+' +haystack = "1२३9" +matches = [[0, 1], [7, 8]] +unicode = false +utf8 = false + +[[test]] +name = "perl-decimal-unicode" +regex = '\d+' +haystack = "1२३9" +matches = [[0, 8]] +unicode = true +utf8 = false + +[[test]] +name = "perl-whitespace-ascii" +regex = '\s+' +haystack = " \u1680" +matches = [[0, 1]] +unicode = false +utf8 = false + +[[test]] +name = "perl-whitespace-unicode" +regex = '\s+' +haystack = " \u1680" +matches = [[0, 4]] +unicode = true +utf8 = false + +# The first `(.+)` matches two Unicode codepoints, but can't match the 5th +# byte, which isn't valid UTF-8. The second (byte based) `(.+)` takes over and +# matches. +[[test]] +name = "mixed-dot" +regex = '(.+)(?-u)(.+)' +haystack = '\xCE\x93\xCE\x94\xFF' +matches = [ + [[0, 5], [0, 4], [4, 5]], +] +unescape = true +unicode = true +utf8 = false + +[[test]] +name = "case-one-ascii" +regex = 'a' +haystack = "A" +matches = [[0, 1]] +case-insensitive = true +unicode = false +utf8 = false + +[[test]] +name = "case-one-unicode" +regex = 'a' +haystack = "A" +matches = [[0, 1]] +case-insensitive = true +unicode = true +utf8 = false + +[[test]] +name = "case-class-simple-ascii" +regex = '[a-z]+' +haystack = "AaAaA" +matches = [[0, 5]] +case-insensitive = true +unicode = false +utf8 = false + +[[test]] +name = "case-class-ascii" +regex = '[a-z]+' +haystack = "aA\u212AaA" +matches = [[0, 2], [5, 7]] +case-insensitive = true +unicode = false +utf8 = false + +[[test]] +name = "case-class-unicode" +regex = '[a-z]+' +haystack = "aA\u212AaA" +matches = [[0, 7]] +case-insensitive = true +unicode = true +utf8 = false + +[[test]] +name = "negate-ascii" +regex = '[^a]' +haystack = "δ" +matches = [[0, 1], [1, 2]] +unicode = false +utf8 = false + +[[test]] +name = "negate-unicode" +regex = '[^a]' +haystack = "δ" +matches = [[0, 2]] +unicode = true +utf8 = false + +# When utf8=true, this won't match, because the implicit '.*?' prefix is +# Unicode aware and will refuse to match through invalid UTF-8 bytes. +[[test]] +name = "dotstar-prefix-ascii" +regex = 'a' +haystack = '\xFFa' +matches = [[1, 2]] +unescape = true +unicode = false +utf8 = false + +[[test]] +name = "dotstar-prefix-unicode" +regex = 'a' +haystack = '\xFFa' +matches = [[1, 2]] +unescape = true +unicode = true +utf8 = false + +[[test]] +name = "null-bytes" +regex = '(?P<cstr>[^\x00]+)\x00' +haystack = 'foo\x00' +matches = [ + [[0, 4], [0, 3]], +] +unescape = true +unicode = false +utf8 = false + +[[test]] +name = "invalid-utf8-anchor-100" +regex = '\xCC?^' +haystack = '\x8d#;\x1a\xa4s3\x05foobarX\\\x0f0t\xe4\x9b\xa4' +matches = [[0, 0]] +unescape = true +unicode = false +utf8 = false + +[[test]] +name = "invalid-utf8-anchor-200" +regex = '^\xf7|4\xff\d\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a##########[] d\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a##########\[] #####\x80\S7|$' +haystack = '\x8d#;\x1a\xa4s3\x05foobarX\\\x0f0t\xe4\x9b\xa4' +matches = [[22, 22]] +unescape = true +unicode = false +utf8 = false + +[[test]] +name = "invalid-utf8-anchor-300" +regex = '^|ddp\xff\xffdddddlQd@\x80' +haystack = '\x8d#;\x1a\xa4s3\x05foobarX\\\x0f0t\xe4\x9b\xa4' +matches = [[0, 0]] +unescape = true +unicode = false +utf8 = false + +[[test]] +name = "word-boundary-ascii-100" +regex = '\Bx\B' +haystack = "áxβ" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "word-boundary-ascii-200" +regex = '\B' +haystack = "0\U0007EF5E" +matches = [[2, 2], [3, 3], [4, 4], [5, 5]] +unicode = false +utf8 = false diff --git a/vendor/regex/testdata/crazy.toml b/vendor/regex/testdata/crazy.toml new file mode 100644 index 00000000000000..aed46ea1570f11 --- /dev/null +++ b/vendor/regex/testdata/crazy.toml @@ -0,0 +1,315 @@ +[[test]] +name = "nothing-empty" +regex = [] +haystack = "" +matches = [] + +[[test]] +name = "nothing-something" +regex = [] +haystack = "wat" +matches = [] + +[[test]] +name = "ranges" +regex = '(?-u)\b(?:[0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\b' +haystack = "num: 255" +matches = [[5, 8]] + +[[test]] +name = "ranges-not" +regex = '(?-u)\b(?:[0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\b' +haystack = "num: 256" +matches = [] + +[[test]] +name = "float1" +regex = '[-+]?[0-9]*\.?[0-9]+' +haystack = "0.1" +matches = [[0, 3]] + +[[test]] +name = "float2" +regex = '[-+]?[0-9]*\.?[0-9]+' +haystack = "0.1.2" +matches = [[0, 3]] +match-limit = 1 + +[[test]] +name = "float3" +regex = '[-+]?[0-9]*\.?[0-9]+' +haystack = "a1.2" +matches = [[1, 4]] + +[[test]] +name = "float4" +regex = '[-+]?[0-9]*\.?[0-9]+' +haystack = "1.a" +matches = [[0, 1]] + +[[test]] +name = "float5" +regex = '^[-+]?[0-9]*\.?[0-9]+$' +haystack = "1.a" +matches = [] + +[[test]] +name = "email" +regex = '(?i-u)\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}\b' +haystack = "mine is jam.slam@gmail.com " +matches = [[8, 26]] + +[[test]] +name = "email-not" +regex = '(?i-u)\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}\b' +haystack = "mine is jam.slam@gmail " +matches = [] + +[[test]] +name = "email-big" +regex = '''[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?''' +haystack = "mine is jam.slam@gmail.com " +matches = [[8, 26]] + +[[test]] +name = "date1" +regex = '^(?:19|20)\d\d[- /.](?:0[1-9]|1[012])[- /.](?:0[1-9]|[12][0-9]|3[01])$' +haystack = "1900-01-01" +matches = [[0, 10]] +unicode = false + +[[test]] +name = "date2" +regex = '^(?:19|20)\d\d[- /.](?:0[1-9]|1[012])[- /.](?:0[1-9]|[12][0-9]|3[01])$' +haystack = "1900-00-01" +matches = [] +unicode = false + +[[test]] +name = "date3" +regex = '^(?:19|20)\d\d[- /.](?:0[1-9]|1[012])[- /.](?:0[1-9]|[12][0-9]|3[01])$' +haystack = "1900-13-01" +matches = [] +unicode = false + +[[test]] +name = "start-end-empty" +regex = '^$' +haystack = "" +matches = [[0, 0]] + +[[test]] +name = "start-end-empty-rev" +regex = '$^' +haystack = "" +matches = [[0, 0]] + +[[test]] +name = "start-end-empty-many-1" +regex = '^$^$^$' +haystack = "" +matches = [[0, 0]] + +[[test]] +name = "start-end-empty-many-2" +regex = '^^^$$$' +haystack = "" +matches = [[0, 0]] + +[[test]] +name = "start-end-empty-rep" +regex = '(?:^$)*' +haystack = "a\nb\nc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] + +[[test]] +name = "start-end-empty-rep-rev" +regex = '(?:$^)*' +haystack = "a\nb\nc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] + +[[test]] +name = "neg-class-letter" +regex = '[^ac]' +haystack = "acx" +matches = [[2, 3]] + +[[test]] +name = "neg-class-letter-comma" +regex = '[^a,]' +haystack = "a,x" +matches = [[2, 3]] + +[[test]] +name = "neg-class-letter-space" +regex = '[^a[:space:]]' +haystack = "a x" +matches = [[2, 3]] + +[[test]] +name = "neg-class-comma" +regex = '[^,]' +haystack = ",,x" +matches = [[2, 3]] + +[[test]] +name = "neg-class-space" +regex = '[^[:space:]]' +haystack = " a" +matches = [[1, 2]] + +[[test]] +name = "neg-class-space-comma" +regex = '[^,[:space:]]' +haystack = ", a" +matches = [[2, 3]] + +[[test]] +name = "neg-class-comma-space" +regex = '[^[:space:],]' +haystack = " ,a" +matches = [[2, 3]] + +[[test]] +name = "neg-class-ascii" +regex = '[^[:alpha:]Z]' +haystack = "A1" +matches = [[1, 2]] + +[[test]] +name = "lazy-many-many" +regex = '(?:(?:.*)*?)=' +haystack = "a=b" +matches = [[0, 2]] + +[[test]] +name = "lazy-many-optional" +regex = '(?:(?:.?)*?)=' +haystack = "a=b" +matches = [[0, 2]] + +[[test]] +name = "lazy-one-many-many" +regex = '(?:(?:.*)+?)=' +haystack = "a=b" +matches = [[0, 2]] + +[[test]] +name = "lazy-one-many-optional" +regex = '(?:(?:.?)+?)=' +haystack = "a=b" +matches = [[0, 2]] + +[[test]] +name = "lazy-range-min-many" +regex = '(?:(?:.*){1,}?)=' +haystack = "a=b" +matches = [[0, 2]] + +[[test]] +name = "lazy-range-many" +regex = '(?:(?:.*){1,2}?)=' +haystack = "a=b" +matches = [[0, 2]] + +[[test]] +name = "greedy-many-many" +regex = '(?:(?:.*)*)=' +haystack = "a=b" +matches = [[0, 2]] + +[[test]] +name = "greedy-many-optional" +regex = '(?:(?:.?)*)=' +haystack = "a=b" +matches = [[0, 2]] + +[[test]] +name = "greedy-one-many-many" +regex = '(?:(?:.*)+)=' +haystack = "a=b" +matches = [[0, 2]] + +[[test]] +name = "greedy-one-many-optional" +regex = '(?:(?:.?)+)=' +haystack = "a=b" +matches = [[0, 2]] + +[[test]] +name = "greedy-range-min-many" +regex = '(?:(?:.*){1,})=' +haystack = "a=b" +matches = [[0, 2]] + +[[test]] +name = "greedy-range-many" +regex = '(?:(?:.*){1,2})=' +haystack = "a=b" +matches = [[0, 2]] + +[[test]] +name = "empty1" +regex = '' +haystack = "" +matches = [[0, 0]] + +[[test]] +name = "empty2" +regex = '' +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty3" +regex = '(?:)' +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty4" +regex = '(?:)*' +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty5" +regex = '(?:)+' +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty6" +regex = '(?:)?' +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty7" +regex = '(?:)(?:)' +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty8" +regex = '(?:)+|z' +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty9" +regex = 'z|(?:)+' +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty10" +regex = '(?:)+|b' +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty11" +regex = 'b|(?:)+' +haystack = "abc" +matches = [[0, 0], [1, 2], [3, 3]] diff --git a/vendor/regex/testdata/crlf.toml b/vendor/regex/testdata/crlf.toml new file mode 100644 index 00000000000000..9e2d3761af7271 --- /dev/null +++ b/vendor/regex/testdata/crlf.toml @@ -0,0 +1,117 @@ +# This is a basic test that checks ^ and $ treat \r\n as a single line +# terminator. If ^ and $ only treated \n as a line terminator, then this would +# only match 'xyz' at the end of the haystack. +[[test]] +name = "basic" +regex = '(?mR)^[a-z]+$' +haystack = "abc\r\ndef\r\nxyz" +matches = [[0, 3], [5, 8], [10, 13]] + +# Tests that a CRLF-aware '^$' assertion does not match between CR and LF. +[[test]] +name = "start-end-non-empty" +regex = '(?mR)^$' +haystack = "abc\r\ndef\r\nxyz" +matches = [] + +# Tests that a CRLF-aware '^$' assertion matches the empty string, just like +# a non-CRLF-aware '^$' assertion. +[[test]] +name = "start-end-empty" +regex = '(?mR)^$' +haystack = "" +matches = [[0, 0]] + +# Tests that a CRLF-aware '^$' assertion matches the empty string preceding +# and following a line terminator. +[[test]] +name = "start-end-before-after" +regex = '(?mR)^$' +haystack = "\r\n" +matches = [[0, 0], [2, 2]] + +# Tests that a CRLF-aware '^' assertion does not split a line terminator. +[[test]] +name = "start-no-split" +regex = '(?mR)^' +haystack = "abc\r\ndef\r\nxyz" +matches = [[0, 0], [5, 5], [10, 10]] + +# Same as above, but with adjacent runs of line terminators. +[[test]] +name = "start-no-split-adjacent" +regex = '(?mR)^' +haystack = "\r\n\r\n\r\n" +matches = [[0, 0], [2, 2], [4, 4], [6, 6]] + +# Same as above, but with adjacent runs of just carriage returns. +[[test]] +name = "start-no-split-adjacent-cr" +regex = '(?mR)^' +haystack = "\r\r\r" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +# Same as above, but with adjacent runs of just line feeds. +[[test]] +name = "start-no-split-adjacent-lf" +regex = '(?mR)^' +haystack = "\n\n\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +# Tests that a CRLF-aware '$' assertion does not split a line terminator. +[[test]] +name = "end-no-split" +regex = '(?mR)$' +haystack = "abc\r\ndef\r\nxyz" +matches = [[3, 3], [8, 8], [13, 13]] + +# Same as above, but with adjacent runs of line terminators. +[[test]] +name = "end-no-split-adjacent" +regex = '(?mR)$' +haystack = "\r\n\r\n\r\n" +matches = [[0, 0], [2, 2], [4, 4], [6, 6]] + +# Same as above, but with adjacent runs of just carriage returns. +[[test]] +name = "end-no-split-adjacent-cr" +regex = '(?mR)$' +haystack = "\r\r\r" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +# Same as above, but with adjacent runs of just line feeds. +[[test]] +name = "end-no-split-adjacent-lf" +regex = '(?mR)$' +haystack = "\n\n\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +# Tests that '.' does not match either \r or \n when CRLF mode is enabled. Note +# that this doesn't require multi-line mode to be enabled. +[[test]] +name = "dot-no-crlf" +regex = '(?R).' +haystack = "\r\n\r\n\r\n" +matches = [] + +# This is a test that caught a bug in the one-pass DFA where it (amazingly) was +# using 'is_end_lf' instead of 'is_end_crlf' here. It was probably a copy & +# paste bug. We insert an empty capture group here because it provokes the meta +# regex engine to first find a match and then trip over a panic because the +# one-pass DFA erroneously says there is no match. +[[test]] +name = "onepass-wrong-crlf-with-capture" +regex = '(?Rm:().$)' +haystack = "ZZ\r" +matches = [[[1, 2], [1, 1]]] + +# This is like onepass-wrong-crlf-with-capture above, except it sets up the +# test so that it can be run by the one-pass DFA directly. (i.e., Make it +# anchored and start the search at the right place.) +[[test]] +name = "onepass-wrong-crlf-anchored" +regex = '(?Rm:.$)' +haystack = "ZZ\r" +matches = [[1, 2]] +anchored = true +bounds = [1, 3] diff --git a/vendor/regex/testdata/earliest.toml b/vendor/regex/testdata/earliest.toml new file mode 100644 index 00000000000000..951689358e6516 --- /dev/null +++ b/vendor/regex/testdata/earliest.toml @@ -0,0 +1,52 @@ +[[test]] +name = "no-greedy-100" +regex = 'a+' +haystack = "aaa" +matches = [[0, 1], [1, 2], [2, 3]] +search-kind = "earliest" + +[[test]] +name = "no-greedy-200" +regex = 'abc+' +haystack = "zzzabccc" +matches = [[3, 6]] +search-kind = "earliest" + +[[test]] +name = "is-ungreedy" +regex = 'a+?' +haystack = "aaa" +matches = [[0, 1], [1, 2], [2, 3]] +search-kind = "earliest" + +[[test]] +name = "look-start-test" +regex = '^(abc|a)' +haystack = "abc" +matches = [ + [[0, 1], [0, 1]], +] +search-kind = "earliest" + +[[test]] +name = "look-end-test" +regex = '(abc|a)$' +haystack = "abc" +matches = [ + [[0, 3], [0, 3]], +] +search-kind = "earliest" + +[[test]] +name = "no-leftmost-first-100" +regex = 'abc|a' +haystack = "abc" +matches = [[0, 1]] +search-kind = "earliest" + +[[test]] +name = "no-leftmost-first-200" +regex = 'aba|a' +haystack = "aba" +matches = [[0, 1], [2, 3]] +search-kind = "earliest" diff --git a/vendor/regex/testdata/empty.toml b/vendor/regex/testdata/empty.toml new file mode 100644 index 00000000000000..7dfd8027a4410f --- /dev/null +++ b/vendor/regex/testdata/empty.toml @@ -0,0 +1,113 @@ +[[test]] +name = "100" +regex = "|b" +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "110" +regex = "b|" +haystack = "abc" +matches = [[0, 0], [1, 2], [3, 3]] + +[[test]] +name = "120" +regex = "|z" +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "130" +regex = "z|" +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "200" +regex = "|" +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "210" +regex = "||" +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "220" +regex = "||b" +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "230" +regex = "b||" +haystack = "abc" +matches = [[0, 0], [1, 2], [3, 3]] + +[[test]] +name = "240" +regex = "||z" +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "300" +regex = "(?:)|b" +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "310" +regex = "b|(?:)" +haystack = "abc" +matches = [[0, 0], [1, 2], [3, 3]] + +[[test]] +name = "320" +regex = "(?:|)" +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "330" +regex = "(?:|)|z" +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "400" +regex = "a(?:)|b" +haystack = "abc" +matches = [[0, 1], [1, 2]] + +[[test]] +name = "500" +regex = "" +haystack = "" +matches = [[0, 0]] + +[[test]] +name = "510" +regex = "" +haystack = "a" +matches = [[0, 0], [1, 1]] + +[[test]] +name = "520" +regex = "" +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "600" +regex = '(?:|a)*' +haystack = "aaa" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "610" +regex = '(?:|a)+' +haystack = "aaa" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] diff --git a/vendor/regex/testdata/expensive.toml b/vendor/regex/testdata/expensive.toml new file mode 100644 index 00000000000000..b70e42f9bb15ca --- /dev/null +++ b/vendor/regex/testdata/expensive.toml @@ -0,0 +1,23 @@ +# This file represent tests that may be expensive to run on some regex engines. +# For example, tests that build a full DFA ahead of time and minimize it can +# take a horrendously long time on regexes that are large (or result in an +# explosion in the number of states). We group these tests together so that +# such engines can simply skip these tests. + +# See: https://github.com/rust-lang/regex/issues/98 +[[test]] +name = "regression-many-repeat-no-stack-overflow" +regex = '^.{1,2500}' +haystack = "a" +matches = [[0, 1]] + +# This test is meant to blow the bounded backtracker's visited capacity. In +# order to do that, we need a somewhat sizeable regex. The purpose of this +# is to make sure there's at least one test that exercises this path in the +# backtracker. All other tests (at time of writing) are small enough that the +# backtracker can handle them fine. +[[test]] +name = "backtrack-blow-visited-capacity" +regex = '\pL{50}' +haystack = "abcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyZZ" +matches = [[0, 50], [50, 100], [100, 150]] diff --git a/vendor/regex/testdata/flags.toml b/vendor/regex/testdata/flags.toml new file mode 100644 index 00000000000000..30b412ca65079d --- /dev/null +++ b/vendor/regex/testdata/flags.toml @@ -0,0 +1,68 @@ +[[test]] +name = "1" +regex = "(?i)abc" +haystack = "ABC" +matches = [[0, 3]] + +[[test]] +name = "2" +regex = "(?i)a(?-i)bc" +haystack = "Abc" +matches = [[0, 3]] + +[[test]] +name = "3" +regex = "(?i)a(?-i)bc" +haystack = "ABC" +matches = [] + +[[test]] +name = "4" +regex = "(?is)a." +haystack = "A\n" +matches = [[0, 2]] + +[[test]] +name = "5" +regex = "(?is)a.(?-is)a." +haystack = "A\nab" +matches = [[0, 4]] + +[[test]] +name = "6" +regex = "(?is)a.(?-is)a." +haystack = "A\na\n" +matches = [] + +[[test]] +name = "7" +regex = "(?is)a.(?-is:a.)?" +haystack = "A\na\n" +matches = [[0, 2]] +match-limit = 1 + +[[test]] +name = "8" +regex = "(?U)a+" +haystack = "aa" +matches = [[0, 1]] +match-limit = 1 + +[[test]] +name = "9" +regex = "(?U)a+?" +haystack = "aa" +matches = [[0, 2]] + +[[test]] +name = "10" +regex = "(?U)(?-U)a+" +haystack = "aa" +matches = [[0, 2]] + +[[test]] +name = "11" +regex = '(?m)(?:^\d+$\n?)+' +haystack = "123\n456\n789" +matches = [[0, 11]] +unicode = false diff --git a/vendor/regex/testdata/fowler/basic.toml b/vendor/regex/testdata/fowler/basic.toml new file mode 100644 index 00000000000000..92b4e4cf724c34 --- /dev/null +++ b/vendor/regex/testdata/fowler/basic.toml @@ -0,0 +1,1611 @@ +# !!! DO NOT EDIT !!! +# Automatically generated by 'regex-cli generate fowler'. +# Numbers in the test names correspond to the line number of the test from +# the original dat file. + +[[test]] +name = "basic3" +regex = '''abracadabra$''' +haystack = '''abracadabracadabra''' +matches = [[[7, 18]]] +match-limit = 1 + +[[test]] +name = "basic4" +regex = '''a...b''' +haystack = '''abababbb''' +matches = [[[2, 7]]] +match-limit = 1 + +[[test]] +name = "basic5" +regex = '''XXXXXX''' +haystack = '''..XXXXXX''' +matches = [[[2, 8]]] +match-limit = 1 + +[[test]] +name = "basic6" +regex = '''\)''' +haystack = '''()''' +matches = [[[1, 2]]] +match-limit = 1 + +[[test]] +name = "basic7" +regex = '''a]''' +haystack = '''a]a''' +matches = [[[0, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic9" +regex = '''\}''' +haystack = '''}''' +matches = [[[0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic10" +regex = '''\]''' +haystack = ''']''' +matches = [[[0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic12" +regex = ''']''' +haystack = ''']''' +matches = [[[0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic15" +regex = '''^a''' +haystack = '''ax''' +matches = [[[0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic16" +regex = '''\^a''' +haystack = '''a^a''' +matches = [[[1, 3]]] +match-limit = 1 + +[[test]] +name = "basic17" +regex = '''a\^''' +haystack = '''a^''' +matches = [[[0, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic18" +regex = '''a$''' +haystack = '''aa''' +matches = [[[1, 2]]] +match-limit = 1 + +[[test]] +name = "basic19" +regex = '''a\$''' +haystack = '''a$''' +matches = [[[0, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic20" +regex = '''^$''' +haystack = '''''' +matches = [[[0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic21" +regex = '''$^''' +haystack = '''''' +matches = [[[0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic22" +regex = '''a($)''' +haystack = '''aa''' +matches = [[[1, 2], [2, 2]]] +match-limit = 1 + +[[test]] +name = "basic23" +regex = '''a*(^a)''' +haystack = '''aa''' +matches = [[[0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic24" +regex = '''(..)*(...)*''' +haystack = '''a''' +matches = [[[0, 0], [], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic25" +regex = '''(..)*(...)*''' +haystack = '''abcd''' +matches = [[[0, 4], [2, 4], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic26" +regex = '''(ab|a)(bc|c)''' +haystack = '''abc''' +matches = [[[0, 3], [0, 2], [2, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic27" +regex = '''(ab)c|abc''' +haystack = '''abc''' +matches = [[[0, 3], [0, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic28" +regex = '''a{0}b''' +haystack = '''ab''' +matches = [[[1, 2]]] +match-limit = 1 + +[[test]] +name = "basic29" +regex = '''(a*)(b?)(b+)b{3}''' +haystack = '''aaabbbbbbb''' +matches = [[[0, 10], [0, 3], [3, 4], [4, 7]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic30" +regex = '''(a*)(b{0,1})(b{1,})b{3}''' +haystack = '''aaabbbbbbb''' +matches = [[[0, 10], [0, 3], [3, 4], [4, 7]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic32" +regex = '''((a|a)|a)''' +haystack = '''a''' +matches = [[[0, 1], [0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic33" +regex = '''(a*)(a|aa)''' +haystack = '''aaaa''' +matches = [[[0, 4], [0, 3], [3, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic34" +regex = '''a*(a.|aa)''' +haystack = '''aaaa''' +matches = [[[0, 4], [2, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic35" +regex = '''a(b)|c(d)|a(e)f''' +haystack = '''aef''' +matches = [[[0, 3], [], [], [1, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic36" +regex = '''(a|b)?.*''' +haystack = '''b''' +matches = [[[0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic37" +regex = '''(a|b)c|a(b|c)''' +haystack = '''ac''' +matches = [[[0, 2], [0, 1], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic38" +regex = '''(a|b)c|a(b|c)''' +haystack = '''ab''' +matches = [[[0, 2], [], [1, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic39" +regex = '''(a|b)*c|(a|ab)*c''' +haystack = '''abc''' +matches = [[[0, 3], [1, 2], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic40" +regex = '''(a|b)*c|(a|ab)*c''' +haystack = '''xc''' +matches = [[[1, 2], [], []]] +match-limit = 1 + +[[test]] +name = "basic41" +regex = '''(.a|.b).*|.*(.a|.b)''' +haystack = '''xa''' +matches = [[[0, 2], [0, 2], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic42" +regex = '''a?(ab|ba)ab''' +haystack = '''abab''' +matches = [[[0, 4], [0, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic43" +regex = '''a?(ac{0}b|ba)ab''' +haystack = '''abab''' +matches = [[[0, 4], [0, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic44" +regex = '''ab|abab''' +haystack = '''abbabab''' +matches = [[[0, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic45" +regex = '''aba|bab|bba''' +haystack = '''baaabbbaba''' +matches = [[[5, 8]]] +match-limit = 1 + +[[test]] +name = "basic46" +regex = '''aba|bab''' +haystack = '''baaabbbaba''' +matches = [[[6, 9]]] +match-limit = 1 + +[[test]] +name = "basic47" +regex = '''(aa|aaa)*|(a|aaaaa)''' +haystack = '''aa''' +matches = [[[0, 2], [0, 2], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic48" +regex = '''(a.|.a.)*|(a|.a...)''' +haystack = '''aa''' +matches = [[[0, 2], [0, 2], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic49" +regex = '''ab|a''' +haystack = '''xabc''' +matches = [[[1, 3]]] +match-limit = 1 + +[[test]] +name = "basic50" +regex = '''ab|a''' +haystack = '''xxabc''' +matches = [[[2, 4]]] +match-limit = 1 + +[[test]] +name = "basic51" +regex = '''(Ab|cD)*''' +haystack = '''aBcD''' +matches = [[[0, 4], [2, 4]]] +match-limit = 1 +anchored = true +case-insensitive = true + +[[test]] +name = "basic52" +regex = '''[^-]''' +haystack = '''--a''' +matches = [[[2, 3]]] +match-limit = 1 + +[[test]] +name = "basic53" +regex = '''[a-]*''' +haystack = '''--a''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic54" +regex = '''[a-m-]*''' +haystack = '''--amoma--''' +matches = [[[0, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic55" +regex = ''':::1:::0:|:::1:1:0:''' +haystack = ''':::0:::1:::1:::0:''' +matches = [[[8, 17]]] +match-limit = 1 + +[[test]] +name = "basic56" +regex = ''':::1:::0:|:::1:1:1:''' +haystack = ''':::0:::1:::1:::0:''' +matches = [[[8, 17]]] +match-limit = 1 + +[[test]] +name = "basic57" +regex = '''[[:upper:]]''' +haystack = '''A''' +matches = [[[0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic58" +regex = '''[[:lower:]]+''' +haystack = '''`az{''' +matches = [[[1, 3]]] +match-limit = 1 + +[[test]] +name = "basic59" +regex = '''[[:upper:]]+''' +haystack = '''@AZ[''' +matches = [[[1, 3]]] +match-limit = 1 + +[[test]] +name = "basic65" +regex = '''\n''' +haystack = '''\n''' +matches = [[[0, 1]]] +match-limit = 1 +anchored = true +unescape = true + +[[test]] +name = "basic66" +regex = '''\n''' +haystack = '''\n''' +matches = [[[0, 1]]] +match-limit = 1 +anchored = true +unescape = true + +[[test]] +name = "basic67" +regex = '''[^a]''' +haystack = '''\n''' +matches = [[[0, 1]]] +match-limit = 1 +anchored = true +unescape = true + +[[test]] +name = "basic68" +regex = '''\na''' +haystack = '''\na''' +matches = [[[0, 2]]] +match-limit = 1 +anchored = true +unescape = true + +[[test]] +name = "basic69" +regex = '''(a)(b)(c)''' +haystack = '''abc''' +matches = [[[0, 3], [0, 1], [1, 2], [2, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic70" +regex = '''xxx''' +haystack = '''xxx''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +# Test added by Rust regex project. +[[test]] +name = "basic72" +regex = '''(?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$)''' +haystack = '''feb 6,''' +matches = [[[0, 6]]] +match-limit = 1 +anchored = true + +# Test added by Rust regex project. +[[test]] +name = "basic74" +regex = '''(?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$)''' +haystack = '''2/7''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +# Test added by Rust regex project. +[[test]] +name = "basic76" +regex = '''(?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$)''' +haystack = '''feb 1,Feb 6''' +matches = [[[5, 11]]] +match-limit = 1 + +# Test added by Rust regex project. +[[test]] +name = "basic78" +regex = '''(((?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:x))))))))))))))))))))))))))))))''' +haystack = '''x''' +matches = [[[0, 1], [0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +# Test added by Rust regex project. +[[test]] +name = "basic80" +regex = '''(((?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:x))))))))))))))))))))))))))))))*''' +haystack = '''xx''' +matches = [[[0, 2], [1, 2], [1, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic81" +regex = '''a?(ab|ba)*''' +haystack = '''ababababababababababababababababababababababababababababababababababababababababa''' +matches = [[[0, 81], [79, 81]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic82" +regex = '''abaa|abbaa|abbbaa|abbbbaa''' +haystack = '''ababbabbbabbbabbbbabbbbaa''' +matches = [[[18, 25]]] +match-limit = 1 + +[[test]] +name = "basic83" +regex = '''abaa|abbaa|abbbaa|abbbbaa''' +haystack = '''ababbabbbabbbabbbbabaa''' +matches = [[[18, 22]]] +match-limit = 1 + +[[test]] +name = "basic84" +regex = '''aaac|aabc|abac|abbc|baac|babc|bbac|bbbc''' +haystack = '''baaabbbabac''' +matches = [[[7, 11]]] +match-limit = 1 + +# Test added by Rust regex project. +[[test]] +name = "basic86" +regex = '''.*''' +haystack = '''\x01\x7f''' +matches = [[[0, 2]]] +match-limit = 1 +anchored = true +unescape = true + +[[test]] +name = "basic87" +regex = '''aaaa|bbbb|cccc|ddddd|eeeeee|fffffff|gggg|hhhh|iiiii|jjjjj|kkkkk|llll''' +haystack = '''XaaaXbbbXcccXdddXeeeXfffXgggXhhhXiiiXjjjXkkkXlllXcbaXaaaa''' +matches = [[[53, 57]]] +match-limit = 1 + +[[test]] +name = "basic89" +regex = '''a*a*a*a*a*b''' +haystack = '''aaaaaaaaab''' +matches = [[[0, 10]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic90" +regex = '''^''' +haystack = '''''' +matches = [[[0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic91" +regex = '''$''' +haystack = '''''' +matches = [[[0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic92" +regex = '''^$''' +haystack = '''''' +matches = [[[0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic93" +regex = '''^a$''' +haystack = '''a''' +matches = [[[0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic94" +regex = '''abc''' +haystack = '''abc''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic95" +regex = '''abc''' +haystack = '''xabcy''' +matches = [[[1, 4]]] +match-limit = 1 + +[[test]] +name = "basic96" +regex = '''abc''' +haystack = '''ababc''' +matches = [[[2, 5]]] +match-limit = 1 + +[[test]] +name = "basic97" +regex = '''ab*c''' +haystack = '''abc''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic98" +regex = '''ab*bc''' +haystack = '''abc''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic99" +regex = '''ab*bc''' +haystack = '''abbc''' +matches = [[[0, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic100" +regex = '''ab*bc''' +haystack = '''abbbbc''' +matches = [[[0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic101" +regex = '''ab+bc''' +haystack = '''abbc''' +matches = [[[0, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic102" +regex = '''ab+bc''' +haystack = '''abbbbc''' +matches = [[[0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic103" +regex = '''ab?bc''' +haystack = '''abbc''' +matches = [[[0, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic104" +regex = '''ab?bc''' +haystack = '''abc''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic105" +regex = '''ab?c''' +haystack = '''abc''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic106" +regex = '''^abc$''' +haystack = '''abc''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic107" +regex = '''^abc''' +haystack = '''abcc''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic108" +regex = '''abc$''' +haystack = '''aabc''' +matches = [[[1, 4]]] +match-limit = 1 + +[[test]] +name = "basic109" +regex = '''^''' +haystack = '''abc''' +matches = [[[0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic110" +regex = '''$''' +haystack = '''abc''' +matches = [[[3, 3]]] +match-limit = 1 + +[[test]] +name = "basic111" +regex = '''a.c''' +haystack = '''abc''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic112" +regex = '''a.c''' +haystack = '''axc''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic113" +regex = '''a.*c''' +haystack = '''axyzc''' +matches = [[[0, 5]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic114" +regex = '''a[bc]d''' +haystack = '''abd''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic115" +regex = '''a[b-d]e''' +haystack = '''ace''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic116" +regex = '''a[b-d]''' +haystack = '''aac''' +matches = [[[1, 3]]] +match-limit = 1 + +[[test]] +name = "basic117" +regex = '''a[-b]''' +haystack = '''a-''' +matches = [[[0, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic118" +regex = '''a[b-]''' +haystack = '''a-''' +matches = [[[0, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic119" +regex = '''a]''' +haystack = '''a]''' +matches = [[[0, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic120" +regex = '''a[]]b''' +haystack = '''a]b''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic121" +regex = '''a[^bc]d''' +haystack = '''aed''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic122" +regex = '''a[^-b]c''' +haystack = '''adc''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic123" +regex = '''a[^]b]c''' +haystack = '''adc''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic124" +regex = '''ab|cd''' +haystack = '''abc''' +matches = [[[0, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic125" +regex = '''ab|cd''' +haystack = '''abcd''' +matches = [[[0, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic126" +regex = '''a\(b''' +haystack = '''a(b''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic127" +regex = '''a\(*b''' +haystack = '''ab''' +matches = [[[0, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic128" +regex = '''a\(*b''' +haystack = '''a((b''' +matches = [[[0, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic129" +regex = '''((a))''' +haystack = '''abc''' +matches = [[[0, 1], [0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic130" +regex = '''(a)b(c)''' +haystack = '''abc''' +matches = [[[0, 3], [0, 1], [2, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic131" +regex = '''a+b+c''' +haystack = '''aabbabc''' +matches = [[[4, 7]]] +match-limit = 1 + +[[test]] +name = "basic132" +regex = '''a*''' +haystack = '''aaa''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic133" +regex = '''(a*)*''' +haystack = '''-''' +matches = [[[0, 0], [0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic134" +regex = '''(a*)+''' +haystack = '''-''' +matches = [[[0, 0], [0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic135" +regex = '''(a*|b)*''' +haystack = '''-''' +matches = [[[0, 0], [0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic136" +regex = '''(a+|b)*''' +haystack = '''ab''' +matches = [[[0, 2], [1, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic137" +regex = '''(a+|b)+''' +haystack = '''ab''' +matches = [[[0, 2], [1, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic138" +regex = '''(a+|b)?''' +haystack = '''ab''' +matches = [[[0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic139" +regex = '''[^ab]*''' +haystack = '''cde''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic140" +regex = '''(^)*''' +haystack = '''-''' +matches = [[[0, 0], [0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic141" +regex = '''a*''' +haystack = '''''' +matches = [[[0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic142" +regex = '''([abc])*d''' +haystack = '''abbbcd''' +matches = [[[0, 6], [4, 5]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic143" +regex = '''([abc])*bcd''' +haystack = '''abcd''' +matches = [[[0, 4], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic144" +regex = '''a|b|c|d|e''' +haystack = '''e''' +matches = [[[0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic145" +regex = '''(a|b|c|d|e)f''' +haystack = '''ef''' +matches = [[[0, 2], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic146" +regex = '''((a*|b))*''' +haystack = '''-''' +matches = [[[0, 0], [0, 0], [0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic147" +regex = '''abcd*efg''' +haystack = '''abcdefg''' +matches = [[[0, 7]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic148" +regex = '''ab*''' +haystack = '''xabyabbbz''' +matches = [[[1, 3]]] +match-limit = 1 + +[[test]] +name = "basic149" +regex = '''ab*''' +haystack = '''xayabbbz''' +matches = [[[1, 2]]] +match-limit = 1 + +[[test]] +name = "basic150" +regex = '''(ab|cd)e''' +haystack = '''abcde''' +matches = [[[2, 5], [2, 4]]] +match-limit = 1 + +[[test]] +name = "basic151" +regex = '''[abhgefdc]ij''' +haystack = '''hij''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic152" +regex = '''(a|b)c*d''' +haystack = '''abcd''' +matches = [[[1, 4], [1, 2]]] +match-limit = 1 + +[[test]] +name = "basic153" +regex = '''(ab|ab*)bc''' +haystack = '''abc''' +matches = [[[0, 3], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic154" +regex = '''a([bc]*)c*''' +haystack = '''abc''' +matches = [[[0, 3], [1, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic155" +regex = '''a([bc]*)(c*d)''' +haystack = '''abcd''' +matches = [[[0, 4], [1, 3], [3, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic156" +regex = '''a([bc]+)(c*d)''' +haystack = '''abcd''' +matches = [[[0, 4], [1, 3], [3, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic157" +regex = '''a([bc]*)(c+d)''' +haystack = '''abcd''' +matches = [[[0, 4], [1, 2], [2, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic158" +regex = '''a[bcd]*dcdcde''' +haystack = '''adcdcde''' +matches = [[[0, 7]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic159" +regex = '''(ab|a)b*c''' +haystack = '''abc''' +matches = [[[0, 3], [0, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic160" +regex = '''((a)(b)c)(d)''' +haystack = '''abcd''' +matches = [[[0, 4], [0, 3], [0, 1], [1, 2], [3, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic161" +regex = '''[A-Za-z_][A-Za-z0-9_]*''' +haystack = '''alpha''' +matches = [[[0, 5]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic162" +regex = '''^a(bc+|b[eh])g|.h$''' +haystack = '''abh''' +matches = [[[1, 3], []]] +match-limit = 1 + +[[test]] +name = "basic163" +regex = '''(bc+d$|ef*g.|h?i(j|k))''' +haystack = '''effgz''' +matches = [[[0, 5], [0, 5], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic164" +regex = '''(bc+d$|ef*g.|h?i(j|k))''' +haystack = '''ij''' +matches = [[[0, 2], [0, 2], [1, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic165" +regex = '''(bc+d$|ef*g.|h?i(j|k))''' +haystack = '''reffgz''' +matches = [[[1, 6], [1, 6], []]] +match-limit = 1 + +[[test]] +name = "basic166" +regex = '''(((((((((a)))))))))''' +haystack = '''a''' +matches = [[[0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic167" +regex = '''multiple words''' +haystack = '''multiple words yeah''' +matches = [[[0, 14]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic168" +regex = '''(.*)c(.*)''' +haystack = '''abcde''' +matches = [[[0, 5], [0, 2], [3, 5]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic169" +regex = '''abcd''' +haystack = '''abcd''' +matches = [[[0, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic170" +regex = '''a(bc)d''' +haystack = '''abcd''' +matches = [[[0, 4], [1, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic171" +regex = '''a[\x01-\x03]?c''' +haystack = '''a\x02c''' +matches = [[[0, 3]]] +match-limit = 1 +anchored = true +unescape = true + +[[test]] +name = "basic172" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Muammar Qaddafi''' +matches = [[[0, 15], [], [10, 12]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic173" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Mo'ammar Gadhafi''' +matches = [[[0, 16], [], [11, 13]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic174" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Muammar Kaddafi''' +matches = [[[0, 15], [], [10, 12]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic175" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Muammar Qadhafi''' +matches = [[[0, 15], [], [10, 12]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic176" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Muammar Gadafi''' +matches = [[[0, 14], [], [10, 11]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic177" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Mu'ammar Qadafi''' +matches = [[[0, 15], [], [11, 12]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic178" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Moamar Gaddafi''' +matches = [[[0, 14], [], [9, 11]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic179" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Mu'ammar Qadhdhafi''' +matches = [[[0, 18], [], [13, 15]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic180" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Muammar Khaddafi''' +matches = [[[0, 16], [], [11, 13]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic181" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Muammar Ghaddafy''' +matches = [[[0, 16], [], [11, 13]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic182" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Muammar Ghadafi''' +matches = [[[0, 15], [], [11, 12]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic183" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Muammar Ghaddafi''' +matches = [[[0, 16], [], [11, 13]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic184" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Muamar Kaddafi''' +matches = [[[0, 14], [], [9, 11]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic185" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Muammar Quathafi''' +matches = [[[0, 16], [], [11, 13]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic186" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Muammar Gheddafi''' +matches = [[[0, 16], [], [11, 13]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic187" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Moammar Khadafy''' +matches = [[[0, 15], [], [11, 12]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic188" +regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' +haystack = '''Moammar Qudhafi''' +matches = [[[0, 15], [], [10, 12]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic189" +regex = '''a+(b|c)*d+''' +haystack = '''aabcdd''' +matches = [[[0, 6], [3, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic190" +regex = '''^.+$''' +haystack = '''vivi''' +matches = [[[0, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic191" +regex = '''^(.+)$''' +haystack = '''vivi''' +matches = [[[0, 4], [0, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic192" +regex = '''^([^!.]+).att.com!(.+)$''' +haystack = '''gryphon.att.com!eby''' +matches = [[[0, 19], [0, 7], [16, 19]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic193" +regex = '''^([^!]+!)?([^!]+)$''' +haystack = '''bas''' +matches = [[[0, 3], [], [0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic194" +regex = '''^([^!]+!)?([^!]+)$''' +haystack = '''bar!bas''' +matches = [[[0, 7], [0, 4], [4, 7]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic195" +regex = '''^([^!]+!)?([^!]+)$''' +haystack = '''foo!bas''' +matches = [[[0, 7], [0, 4], [4, 7]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic196" +regex = '''^.+!([^!]+!)([^!]+)$''' +haystack = '''foo!bar!bas''' +matches = [[[0, 11], [4, 8], [8, 11]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic197" +regex = '''((foo)|(bar))!bas''' +haystack = '''bar!bas''' +matches = [[[0, 7], [0, 3], [], [0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic198" +regex = '''((foo)|(bar))!bas''' +haystack = '''foo!bar!bas''' +matches = [[[4, 11], [4, 7], [], [4, 7]]] +match-limit = 1 + +[[test]] +name = "basic199" +regex = '''((foo)|(bar))!bas''' +haystack = '''foo!bas''' +matches = [[[0, 7], [0, 3], [0, 3], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic200" +regex = '''((foo)|bar)!bas''' +haystack = '''bar!bas''' +matches = [[[0, 7], [0, 3], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic201" +regex = '''((foo)|bar)!bas''' +haystack = '''foo!bar!bas''' +matches = [[[4, 11], [4, 7], []]] +match-limit = 1 + +[[test]] +name = "basic202" +regex = '''((foo)|bar)!bas''' +haystack = '''foo!bas''' +matches = [[[0, 7], [0, 3], [0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic203" +regex = '''(foo|(bar))!bas''' +haystack = '''bar!bas''' +matches = [[[0, 7], [0, 3], [0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic204" +regex = '''(foo|(bar))!bas''' +haystack = '''foo!bar!bas''' +matches = [[[4, 11], [4, 7], [4, 7]]] +match-limit = 1 + +[[test]] +name = "basic205" +regex = '''(foo|(bar))!bas''' +haystack = '''foo!bas''' +matches = [[[0, 7], [0, 3], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic206" +regex = '''(foo|bar)!bas''' +haystack = '''bar!bas''' +matches = [[[0, 7], [0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic207" +regex = '''(foo|bar)!bas''' +haystack = '''foo!bar!bas''' +matches = [[[4, 11], [4, 7]]] +match-limit = 1 + +[[test]] +name = "basic208" +regex = '''(foo|bar)!bas''' +haystack = '''foo!bas''' +matches = [[[0, 7], [0, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic209" +regex = '''^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$''' +haystack = '''foo!bar!bas''' +matches = [[[0, 11], [0, 11], [], [], [4, 8], [8, 11]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic210" +regex = '''^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$''' +haystack = '''bas''' +matches = [[[0, 3], [], [0, 3], [], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic211" +regex = '''^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$''' +haystack = '''bar!bas''' +matches = [[[0, 7], [0, 4], [4, 7], [], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic212" +regex = '''^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$''' +haystack = '''foo!bar!bas''' +matches = [[[0, 11], [], [], [4, 8], [8, 11]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic213" +regex = '''^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$''' +haystack = '''foo!bas''' +matches = [[[0, 7], [0, 4], [4, 7], [], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic214" +regex = '''^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$''' +haystack = '''bas''' +matches = [[[0, 3], [0, 3], [], [0, 3], [], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic215" +regex = '''^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$''' +haystack = '''bar!bas''' +matches = [[[0, 7], [0, 7], [0, 4], [4, 7], [], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic216" +regex = '''^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$''' +haystack = '''foo!bar!bas''' +matches = [[[0, 11], [0, 11], [], [], [4, 8], [8, 11]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic217" +regex = '''^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$''' +haystack = '''foo!bas''' +matches = [[[0, 7], [0, 7], [0, 4], [4, 7], [], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic218" +regex = '''.*(/XXX).*''' +haystack = '''/XXX''' +matches = [[[0, 4], [0, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic219" +regex = '''.*(\\XXX).*''' +haystack = '''\XXX''' +matches = [[[0, 4], [0, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic220" +regex = '''\\XXX''' +haystack = '''\XXX''' +matches = [[[0, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic221" +regex = '''.*(/000).*''' +haystack = '''/000''' +matches = [[[0, 4], [0, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic222" +regex = '''.*(\\000).*''' +haystack = '''\000''' +matches = [[[0, 4], [0, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "basic223" +regex = '''\\000''' +haystack = '''\000''' +matches = [[[0, 4]]] +match-limit = 1 +anchored = true + diff --git a/vendor/regex/testdata/fowler/dat/README b/vendor/regex/testdata/fowler/dat/README new file mode 100644 index 00000000000000..242a0e6c3a9914 --- /dev/null +++ b/vendor/regex/testdata/fowler/dat/README @@ -0,0 +1,25 @@ +Test data was taken from the Go distribution, which was in turn taken from the +testregex test suite: + + http://web.archive.org/web/20150925124103/http://www2.research.att.com/~astopen/testregex/testregex.html + +Unfortunately, the original web site now appears dead, but the test data lives +on. + +The LICENSE in this directory corresponds to the LICENSE that the data was +originally released under. + +The tests themselves were modified for RE2/Go (and marked as such). A +couple were modified further by me (Andrew Gallant) and marked with 'Rust'. + +After some number of years, these tests were transformed into a TOML format +using the 'regex-cli generate fowler' command. To re-generate the +TOML files, run the following from the root of this repository: + + regex-cli generate fowler tests/data/fowler tests/data/fowler/dat/*.dat + +This assumes that you have 'regex-cli' installed. See 'regex-cli/README.md' +from the root of the repository for more information. + +This brings the Fowler tests into a more "sensible" structured format in which +other tests can be written such that they aren't write-only. diff --git a/vendor/regex/testdata/fowler/dat/basic.dat b/vendor/regex/testdata/fowler/dat/basic.dat new file mode 100644 index 00000000000000..654a72b39b821b --- /dev/null +++ b/vendor/regex/testdata/fowler/dat/basic.dat @@ -0,0 +1,223 @@ +NOTE all standard compliant implementations should pass these : 2002-05-31 + +BE abracadabra$ abracadabracadabra (7,18) +BE a...b abababbb (2,7) +BE XXXXXX ..XXXXXX (2,8) +E \) () (1,2) +BE a] a]a (0,2) +B } } (0,1) +E \} } (0,1) +BE \] ] (0,1) +B ] ] (0,1) +E ] ] (0,1) +B { { (0,1) +B } } (0,1) +BE ^a ax (0,1) +BE \^a a^a (1,3) +BE a\^ a^ (0,2) +BE a$ aa (1,2) +BE a\$ a$ (0,2) +BE ^$ NULL (0,0) +E $^ NULL (0,0) +E a($) aa (1,2)(2,2) +E a*(^a) aa (0,1)(0,1) +E (..)*(...)* a (0,0) +E (..)*(...)* abcd (0,4)(2,4) +E (ab|a)(bc|c) abc (0,3)(0,2)(2,3) +E (ab)c|abc abc (0,3)(0,2) +E a{0}b ab (1,2) +E (a*)(b?)(b+)b{3} aaabbbbbbb (0,10)(0,3)(3,4)(4,7) +E (a*)(b{0,1})(b{1,})b{3} aaabbbbbbb (0,10)(0,3)(3,4)(4,7) +E a{9876543210} NULL BADBR +E ((a|a)|a) a (0,1)(0,1)(0,1) +E (a*)(a|aa) aaaa (0,4)(0,3)(3,4) +E a*(a.|aa) aaaa (0,4)(2,4) +E a(b)|c(d)|a(e)f aef (0,3)(?,?)(?,?)(1,2) +E (a|b)?.* b (0,1)(0,1) +E (a|b)c|a(b|c) ac (0,2)(0,1) +E (a|b)c|a(b|c) ab (0,2)(?,?)(1,2) +E (a|b)*c|(a|ab)*c abc (0,3)(1,2) +E (a|b)*c|(a|ab)*c xc (1,2) +E (.a|.b).*|.*(.a|.b) xa (0,2)(0,2) +E a?(ab|ba)ab abab (0,4)(0,2) +E a?(ac{0}b|ba)ab abab (0,4)(0,2) +E ab|abab abbabab (0,2) +E aba|bab|bba baaabbbaba (5,8) +E aba|bab baaabbbaba (6,9) +E (aa|aaa)*|(a|aaaaa) aa (0,2)(0,2) +E (a.|.a.)*|(a|.a...) aa (0,2)(0,2) +E ab|a xabc (1,3) +E ab|a xxabc (2,4) +Ei (Ab|cD)* aBcD (0,4)(2,4) +BE [^-] --a (2,3) +BE [a-]* --a (0,3) +BE [a-m-]* --amoma-- (0,4) +E :::1:::0:|:::1:1:0: :::0:::1:::1:::0: (8,17) +E :::1:::0:|:::1:1:1: :::0:::1:::1:::0: (8,17) +{E [[:upper:]] A (0,1) [[<element>]] not supported +E [[:lower:]]+ `az{ (1,3) +E [[:upper:]]+ @AZ[ (1,3) +# No collation in Go +#BE [[-]] [[-]] (2,4) +#BE [[.NIL.]] NULL ECOLLATE +#BE [[=aleph=]] NULL ECOLLATE +} +BE$ \n \n (0,1) +BEn$ \n \n (0,1) +BE$ [^a] \n (0,1) +BE$ \na \na (0,2) +E (a)(b)(c) abc (0,3)(0,1)(1,2)(2,3) +BE xxx xxx (0,3) +#E1 (^|[ (,;])((([Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))([^0-9]|$) feb 6, (0,6) +E (?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$) feb 6, (0,6) Rust +#E1 (^|[ (,;])((([Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))([^0-9]|$) 2/7 (0,3) +E (?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$) 2/7 (0,3) Rust +#E1 (^|[ (,;])((([Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))([^0-9]|$) feb 1,Feb 6 (5,11) +E (?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$) feb 1,Feb 6 (5,11) Rust +#E3 ((((((((((((((((((((((((((((((x)))))))))))))))))))))))))))))) x (0,1)(0,1)(0,1) +E (((?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:x)))))))))))))))))))))))))))))) x (0,1)(0,1)(0,1) Rust +#E3 ((((((((((((((((((((((((((((((x))))))))))))))))))))))))))))))* xx (0,2)(1,2)(1,2) +E (((?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:x))))))))))))))))))))))))))))))* xx (0,2)(1,2)(1,2) Rust +E a?(ab|ba)* ababababababababababababababababababababababababababababababababababababababababa (0,81)(79,81) +E abaa|abbaa|abbbaa|abbbbaa ababbabbbabbbabbbbabbbbaa (18,25) +E abaa|abbaa|abbbaa|abbbbaa ababbabbbabbbabbbbabaa (18,22) +E aaac|aabc|abac|abbc|baac|babc|bbac|bbbc baaabbbabac (7,11) +#BE$ .* \x01\xff (0,2) +BE$ .* \x01\x7f (0,2) Rust +E aaaa|bbbb|cccc|ddddd|eeeeee|fffffff|gggg|hhhh|iiiii|jjjjj|kkkkk|llll XaaaXbbbXcccXdddXeeeXfffXgggXhhhXiiiXjjjXkkkXlllXcbaXaaaa (53,57) +L aaaa\nbbbb\ncccc\nddddd\neeeeee\nfffffff\ngggg\nhhhh\niiiii\njjjjj\nkkkkk\nllll XaaaXbbbXcccXdddXeeeXfffXgggXhhhXiiiXjjjXkkkXlllXcbaXaaaa NOMATCH +E a*a*a*a*a*b aaaaaaaaab (0,10) +BE ^ NULL (0,0) +BE $ NULL (0,0) +BE ^$ NULL (0,0) +BE ^a$ a (0,1) +BE abc abc (0,3) +BE abc xabcy (1,4) +BE abc ababc (2,5) +BE ab*c abc (0,3) +BE ab*bc abc (0,3) +BE ab*bc abbc (0,4) +BE ab*bc abbbbc (0,6) +E ab+bc abbc (0,4) +E ab+bc abbbbc (0,6) +E ab?bc abbc (0,4) +E ab?bc abc (0,3) +E ab?c abc (0,3) +BE ^abc$ abc (0,3) +BE ^abc abcc (0,3) +BE abc$ aabc (1,4) +BE ^ abc (0,0) +BE $ abc (3,3) +BE a.c abc (0,3) +BE a.c axc (0,3) +BE a.*c axyzc (0,5) +BE a[bc]d abd (0,3) +BE a[b-d]e ace (0,3) +BE a[b-d] aac (1,3) +BE a[-b] a- (0,2) +BE a[b-] a- (0,2) +BE a] a] (0,2) +BE a[]]b a]b (0,3) +BE a[^bc]d aed (0,3) +BE a[^-b]c adc (0,3) +BE a[^]b]c adc (0,3) +E ab|cd abc (0,2) +E ab|cd abcd (0,2) +E a\(b a(b (0,3) +E a\(*b ab (0,2) +E a\(*b a((b (0,4) +E ((a)) abc (0,1)(0,1)(0,1) +E (a)b(c) abc (0,3)(0,1)(2,3) +E a+b+c aabbabc (4,7) +E a* aaa (0,3) +E (a*)* - (0,0)(0,0) +E (a*)+ - (0,0)(0,0) +E (a*|b)* - (0,0)(0,0) +E (a+|b)* ab (0,2)(1,2) +E (a+|b)+ ab (0,2)(1,2) +E (a+|b)? ab (0,1)(0,1) +BE [^ab]* cde (0,3) +E (^)* - (0,0)(0,0) +BE a* NULL (0,0) +E ([abc])*d abbbcd (0,6)(4,5) +E ([abc])*bcd abcd (0,4)(0,1) +E a|b|c|d|e e (0,1) +E (a|b|c|d|e)f ef (0,2)(0,1) +E ((a*|b))* - (0,0)(0,0)(0,0) +BE abcd*efg abcdefg (0,7) +BE ab* xabyabbbz (1,3) +BE ab* xayabbbz (1,2) +E (ab|cd)e abcde (2,5)(2,4) +BE [abhgefdc]ij hij (0,3) +E (a|b)c*d abcd (1,4)(1,2) +E (ab|ab*)bc abc (0,3)(0,1) +E a([bc]*)c* abc (0,3)(1,3) +E a([bc]*)(c*d) abcd (0,4)(1,3)(3,4) +E a([bc]+)(c*d) abcd (0,4)(1,3)(3,4) +E a([bc]*)(c+d) abcd (0,4)(1,2)(2,4) +E a[bcd]*dcdcde adcdcde (0,7) +E (ab|a)b*c abc (0,3)(0,2) +E ((a)(b)c)(d) abcd (0,4)(0,3)(0,1)(1,2)(3,4) +BE [A-Za-z_][A-Za-z0-9_]* alpha (0,5) +E ^a(bc+|b[eh])g|.h$ abh (1,3) +E (bc+d$|ef*g.|h?i(j|k)) effgz (0,5)(0,5) +E (bc+d$|ef*g.|h?i(j|k)) ij (0,2)(0,2)(1,2) +E (bc+d$|ef*g.|h?i(j|k)) reffgz (1,6)(1,6) +E (((((((((a))))))))) a (0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1) +BE multiple words multiple words yeah (0,14) +E (.*)c(.*) abcde (0,5)(0,2)(3,5) +BE abcd abcd (0,4) +E a(bc)d abcd (0,4)(1,3) +E a[-]?c ac (0,3) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Qaddafi (0,15)(?,?)(10,12) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Mo'ammar Gadhafi (0,16)(?,?)(11,13) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Kaddafi (0,15)(?,?)(10,12) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Qadhafi (0,15)(?,?)(10,12) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Gadafi (0,14)(?,?)(10,11) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Mu'ammar Qadafi (0,15)(?,?)(11,12) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Moamar Gaddafi (0,14)(?,?)(9,11) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Mu'ammar Qadhdhafi (0,18)(?,?)(13,15) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Khaddafi (0,16)(?,?)(11,13) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Ghaddafy (0,16)(?,?)(11,13) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Ghadafi (0,15)(?,?)(11,12) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Ghaddafi (0,16)(?,?)(11,13) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muamar Kaddafi (0,14)(?,?)(9,11) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Quathafi (0,16)(?,?)(11,13) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Gheddafi (0,16)(?,?)(11,13) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Moammar Khadafy (0,15)(?,?)(11,12) +E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Moammar Qudhafi (0,15)(?,?)(10,12) +E a+(b|c)*d+ aabcdd (0,6)(3,4) +E ^.+$ vivi (0,4) +E ^(.+)$ vivi (0,4)(0,4) +E ^([^!.]+).att.com!(.+)$ gryphon.att.com!eby (0,19)(0,7)(16,19) +E ^([^!]+!)?([^!]+)$ bas (0,3)(?,?)(0,3) +E ^([^!]+!)?([^!]+)$ bar!bas (0,7)(0,4)(4,7) +E ^([^!]+!)?([^!]+)$ foo!bas (0,7)(0,4)(4,7) +E ^.+!([^!]+!)([^!]+)$ foo!bar!bas (0,11)(4,8)(8,11) +E ((foo)|(bar))!bas bar!bas (0,7)(0,3)(?,?)(0,3) +E ((foo)|(bar))!bas foo!bar!bas (4,11)(4,7)(?,?)(4,7) +E ((foo)|(bar))!bas foo!bas (0,7)(0,3)(0,3) +E ((foo)|bar)!bas bar!bas (0,7)(0,3) +E ((foo)|bar)!bas foo!bar!bas (4,11)(4,7) +E ((foo)|bar)!bas foo!bas (0,7)(0,3)(0,3) +E (foo|(bar))!bas bar!bas (0,7)(0,3)(0,3) +E (foo|(bar))!bas foo!bar!bas (4,11)(4,7)(4,7) +E (foo|(bar))!bas foo!bas (0,7)(0,3) +E (foo|bar)!bas bar!bas (0,7)(0,3) +E (foo|bar)!bas foo!bar!bas (4,11)(4,7) +E (foo|bar)!bas foo!bas (0,7)(0,3) +E ^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$ foo!bar!bas (0,11)(0,11)(?,?)(?,?)(4,8)(8,11) +E ^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$ bas (0,3)(?,?)(0,3) +E ^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$ bar!bas (0,7)(0,4)(4,7) +E ^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$ foo!bar!bas (0,11)(?,?)(?,?)(4,8)(8,11) +E ^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$ foo!bas (0,7)(0,4)(4,7) +E ^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$ bas (0,3)(0,3)(?,?)(0,3) +E ^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$ bar!bas (0,7)(0,7)(0,4)(4,7) +E ^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$ foo!bar!bas (0,11)(0,11)(?,?)(?,?)(4,8)(8,11) +E ^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$ foo!bas (0,7)(0,7)(0,4)(4,7) +E .*(/XXX).* /XXX (0,4)(0,4) +E .*(\\XXX).* \XXX (0,4)(0,4) +E \\XXX \XXX (0,4) +E .*(/000).* /000 (0,4)(0,4) +E .*(\\000).* \000 (0,4)(0,4) +E \\000 \000 (0,4) diff --git a/vendor/regex/testdata/fowler/dat/nullsubexpr.dat b/vendor/regex/testdata/fowler/dat/nullsubexpr.dat new file mode 100644 index 00000000000000..eb3e721d3301a1 --- /dev/null +++ b/vendor/regex/testdata/fowler/dat/nullsubexpr.dat @@ -0,0 +1,74 @@ +NOTE null subexpression matches : 2002-06-06 + +E (a*)* a (0,1)(0,1) +E SAME x (0,0)(0,0) +E SAME aaaaaa (0,6)(0,6) +E SAME aaaaaax (0,6)(0,6) +E (a*)+ a (0,1)(0,1) +E SAME x (0,0)(0,0) +E SAME aaaaaa (0,6)(0,6) +E SAME aaaaaax (0,6)(0,6) +E (a+)* a (0,1)(0,1) +E SAME x (0,0) +E SAME aaaaaa (0,6)(0,6) +E SAME aaaaaax (0,6)(0,6) +E (a+)+ a (0,1)(0,1) +E SAME x NOMATCH +E SAME aaaaaa (0,6)(0,6) +E SAME aaaaaax (0,6)(0,6) + +E ([a]*)* a (0,1)(0,1) +E SAME x (0,0)(0,0) +E SAME aaaaaa (0,6)(0,6) +E SAME aaaaaax (0,6)(0,6) +E ([a]*)+ a (0,1)(0,1) +E SAME x (0,0)(0,0) +E SAME aaaaaa (0,6)(0,6) +E SAME aaaaaax (0,6)(0,6) +E ([^b]*)* a (0,1)(0,1) +E SAME b (0,0)(0,0) +E SAME aaaaaa (0,6)(0,6) +E SAME aaaaaab (0,6)(0,6) +E ([ab]*)* a (0,1)(0,1) +E SAME aaaaaa (0,6)(0,6) +E SAME ababab (0,6)(0,6) +E SAME bababa (0,6)(0,6) +E SAME b (0,1)(0,1) +E SAME bbbbbb (0,6)(0,6) +E SAME aaaabcde (0,5)(0,5) +E ([^a]*)* b (0,1)(0,1) +E SAME bbbbbb (0,6)(0,6) +E SAME aaaaaa (0,0)(0,0) +E ([^ab]*)* ccccxx (0,6)(0,6) +E SAME ababab (0,0)(0,0) + +#E ((z)+|a)* zabcde (0,2)(1,2) +E ((z)+|a)* zabcde (0,2)(1,2)(0,1) Rust + +#{E a+? aaaaaa (0,1) no *? +? minimal match ops +#E (a) aaa (0,1)(0,1) +#E (a*?) aaa (0,0)(0,0) +#E (a)*? aaa (0,0) +#E (a*?)*? aaa (0,0) +#} + +B \(a*\)*\(x\) x (0,1)(0,0)(0,1) +B \(a*\)*\(x\) ax (0,2)(0,1)(1,2) +B \(a*\)*\(x\) axa (0,2)(0,1)(1,2) +B \(a*\)*\(x\)\(\1\) x (0,1)(0,0)(0,1)(1,1) +B \(a*\)*\(x\)\(\1\) ax (0,2)(1,1)(1,2)(2,2) +B \(a*\)*\(x\)\(\1\) axa (0,3)(0,1)(1,2)(2,3) +B \(a*\)*\(x\)\(\1\)\(x\) axax (0,4)(0,1)(1,2)(2,3)(3,4) +B \(a*\)*\(x\)\(\1\)\(x\) axxa (0,3)(1,1)(1,2)(2,2)(2,3) + +E (a*)*(x) x (0,1)(0,0)(0,1) +E (a*)*(x) ax (0,2)(0,1)(1,2) +E (a*)*(x) axa (0,2)(0,1)(1,2) + +E (a*)+(x) x (0,1)(0,0)(0,1) +E (a*)+(x) ax (0,2)(0,1)(1,2) +E (a*)+(x) axa (0,2)(0,1)(1,2) + +E (a*){2}(x) x (0,1)(0,0)(0,1) +E (a*){2}(x) ax (0,2)(1,1)(1,2) +E (a*){2}(x) axa (0,2)(1,1)(1,2) diff --git a/vendor/regex/testdata/fowler/dat/repetition.dat b/vendor/regex/testdata/fowler/dat/repetition.dat new file mode 100644 index 00000000000000..cf0d8382f84357 --- /dev/null +++ b/vendor/regex/testdata/fowler/dat/repetition.dat @@ -0,0 +1,169 @@ +NOTE implicit vs. explicit repetitions : 2009-02-02 + +# Glenn Fowler <gsf@research.att.com> +# conforming matches (column 4) must match one of the following BREs +# NOMATCH +# (0,.)\((\(.\),\(.\))(?,?)(\2,\3)\)* +# (0,.)\((\(.\),\(.\))(\2,\3)(?,?)\)* +# i.e., each 3-tuple has two identical elements and one (?,?) + +E ((..)|(.)) NULL NOMATCH +E ((..)|(.))((..)|(.)) NULL NOMATCH +E ((..)|(.))((..)|(.))((..)|(.)) NULL NOMATCH + +E ((..)|(.)){1} NULL NOMATCH +E ((..)|(.)){2} NULL NOMATCH +E ((..)|(.)){3} NULL NOMATCH + +E ((..)|(.))* NULL (0,0) + +E ((..)|(.)) a (0,1)(0,1)(?,?)(0,1) +E ((..)|(.))((..)|(.)) a NOMATCH +E ((..)|(.))((..)|(.))((..)|(.)) a NOMATCH + +E ((..)|(.)){1} a (0,1)(0,1)(?,?)(0,1) +E ((..)|(.)){2} a NOMATCH +E ((..)|(.)){3} a NOMATCH + +E ((..)|(.))* a (0,1)(0,1)(?,?)(0,1) + +E ((..)|(.)) aa (0,2)(0,2)(0,2)(?,?) +E ((..)|(.))((..)|(.)) aa (0,2)(0,1)(?,?)(0,1)(1,2)(?,?)(1,2) +E ((..)|(.))((..)|(.))((..)|(.)) aa NOMATCH + +E ((..)|(.)){1} aa (0,2)(0,2)(0,2)(?,?) +E ((..)|(.)){2} aa (0,2)(1,2)(?,?)(1,2) +E ((..)|(.)){3} aa NOMATCH + +E ((..)|(.))* aa (0,2)(0,2)(0,2)(?,?) + +E ((..)|(.)) aaa (0,2)(0,2)(0,2)(?,?) +E ((..)|(.))((..)|(.)) aaa (0,3)(0,2)(0,2)(?,?)(2,3)(?,?)(2,3) +E ((..)|(.))((..)|(.))((..)|(.)) aaa (0,3)(0,1)(?,?)(0,1)(1,2)(?,?)(1,2)(2,3)(?,?)(2,3) + +E ((..)|(.)){1} aaa (0,2)(0,2)(0,2)(?,?) +#E ((..)|(.)){2} aaa (0,3)(2,3)(?,?)(2,3) +E ((..)|(.)){2} aaa (0,3)(2,3)(0,2)(2,3) RE2/Go +E ((..)|(.)){3} aaa (0,3)(2,3)(?,?)(2,3) + +#E ((..)|(.))* aaa (0,3)(2,3)(?,?)(2,3) +E ((..)|(.))* aaa (0,3)(2,3)(0,2)(2,3) RE2/Go + +E ((..)|(.)) aaaa (0,2)(0,2)(0,2)(?,?) +E ((..)|(.))((..)|(.)) aaaa (0,4)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?) +E ((..)|(.))((..)|(.))((..)|(.)) aaaa (0,4)(0,2)(0,2)(?,?)(2,3)(?,?)(2,3)(3,4)(?,?)(3,4) + +E ((..)|(.)){1} aaaa (0,2)(0,2)(0,2)(?,?) +E ((..)|(.)){2} aaaa (0,4)(2,4)(2,4)(?,?) +#E ((..)|(.)){3} aaaa (0,4)(3,4)(?,?)(3,4) +E ((..)|(.)){3} aaaa (0,4)(3,4)(0,2)(3,4) RE2/Go + +E ((..)|(.))* aaaa (0,4)(2,4)(2,4)(?,?) + +E ((..)|(.)) aaaaa (0,2)(0,2)(0,2)(?,?) +E ((..)|(.))((..)|(.)) aaaaa (0,4)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?) +E ((..)|(.))((..)|(.))((..)|(.)) aaaaa (0,5)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?)(4,5)(?,?)(4,5) + +E ((..)|(.)){1} aaaaa (0,2)(0,2)(0,2)(?,?) +E ((..)|(.)){2} aaaaa (0,4)(2,4)(2,4)(?,?) +#E ((..)|(.)){3} aaaaa (0,5)(4,5)(?,?)(4,5) +E ((..)|(.)){3} aaaaa (0,5)(4,5)(2,4)(4,5) RE2/Go + +#E ((..)|(.))* aaaaa (0,5)(4,5)(?,?)(4,5) +E ((..)|(.))* aaaaa (0,5)(4,5)(2,4)(4,5) RE2/Go + +E ((..)|(.)) aaaaaa (0,2)(0,2)(0,2)(?,?) +E ((..)|(.))((..)|(.)) aaaaaa (0,4)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?) +E ((..)|(.))((..)|(.))((..)|(.)) aaaaaa (0,6)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?)(4,6)(4,6)(?,?) + +E ((..)|(.)){1} aaaaaa (0,2)(0,2)(0,2)(?,?) +E ((..)|(.)){2} aaaaaa (0,4)(2,4)(2,4)(?,?) +E ((..)|(.)){3} aaaaaa (0,6)(4,6)(4,6)(?,?) + +E ((..)|(.))* aaaaaa (0,6)(4,6)(4,6)(?,?) + +NOTE additional repetition tests graciously provided by Chris Kuklewicz www.haskell.org 2009-02-02 + +# These test a bug in OS X / FreeBSD / NetBSD, and libtree. +# Linux/GLIBC gets the {8,} and {8,8} wrong. + +:HA#100:E X(.?){0,}Y X1234567Y (0,9)(7,8) +:HA#101:E X(.?){1,}Y X1234567Y (0,9)(7,8) +:HA#102:E X(.?){2,}Y X1234567Y (0,9)(7,8) +:HA#103:E X(.?){3,}Y X1234567Y (0,9)(7,8) +:HA#104:E X(.?){4,}Y X1234567Y (0,9)(7,8) +:HA#105:E X(.?){5,}Y X1234567Y (0,9)(7,8) +:HA#106:E X(.?){6,}Y X1234567Y (0,9)(7,8) +:HA#107:E X(.?){7,}Y X1234567Y (0,9)(7,8) +:HA#108:E X(.?){8,}Y X1234567Y (0,9)(8,8) +#:HA#110:E X(.?){0,8}Y X1234567Y (0,9)(7,8) +:HA#110:E X(.?){0,8}Y X1234567Y (0,9)(8,8) RE2/Go +#:HA#111:E X(.?){1,8}Y X1234567Y (0,9)(7,8) +:HA#111:E X(.?){1,8}Y X1234567Y (0,9)(8,8) RE2/Go +#:HA#112:E X(.?){2,8}Y X1234567Y (0,9)(7,8) +:HA#112:E X(.?){2,8}Y X1234567Y (0,9)(8,8) RE2/Go +#:HA#113:E X(.?){3,8}Y X1234567Y (0,9)(7,8) +:HA#113:E X(.?){3,8}Y X1234567Y (0,9)(8,8) RE2/Go +#:HA#114:E X(.?){4,8}Y X1234567Y (0,9)(7,8) +:HA#114:E X(.?){4,8}Y X1234567Y (0,9)(8,8) RE2/Go +#:HA#115:E X(.?){5,8}Y X1234567Y (0,9)(7,8) +:HA#115:E X(.?){5,8}Y X1234567Y (0,9)(8,8) RE2/Go +#:HA#116:E X(.?){6,8}Y X1234567Y (0,9)(7,8) +:HA#116:E X(.?){6,8}Y X1234567Y (0,9)(8,8) RE2/Go +#:HA#117:E X(.?){7,8}Y X1234567Y (0,9)(7,8) +:HA#117:E X(.?){7,8}Y X1234567Y (0,9)(8,8) RE2/Go +:HA#118:E X(.?){8,8}Y X1234567Y (0,9)(8,8) + +# These test a fixed bug in my regex-tdfa that did not keep the expanded +# form properly grouped, so right association did the wrong thing with +# these ambiguous patterns (crafted just to test my code when I became +# suspicious of my implementation). The first subexpression should use +# "ab" then "a" then "bcd". + +# OS X / FreeBSD / NetBSD badly fail many of these, with impossible +# results like (0,6)(4,5)(6,6). + +#:HA#260:E (a|ab|c|bcd){0,}(d*) ababcd (0,6)(3,6)(6,6) +:HA#260:E (a|ab|c|bcd){0,}(d*) ababcd (0,1)(0,1)(1,1) Rust +#:HA#261:E (a|ab|c|bcd){1,}(d*) ababcd (0,6)(3,6)(6,6) +:HA#261:E (a|ab|c|bcd){1,}(d*) ababcd (0,1)(0,1)(1,1) Rust +:HA#262:E (a|ab|c|bcd){2,}(d*) ababcd (0,6)(3,6)(6,6) +:HA#263:E (a|ab|c|bcd){3,}(d*) ababcd (0,6)(3,6)(6,6) +:HA#264:E (a|ab|c|bcd){4,}(d*) ababcd NOMATCH +#:HA#265:E (a|ab|c|bcd){0,10}(d*) ababcd (0,6)(3,6)(6,6) +:HA#265:E (a|ab|c|bcd){0,10}(d*) ababcd (0,1)(0,1)(1,1) Rust +#:HA#266:E (a|ab|c|bcd){1,10}(d*) ababcd (0,6)(3,6)(6,6) +:HA#266:E (a|ab|c|bcd){1,10}(d*) ababcd (0,1)(0,1)(1,1) Rust +:HA#267:E (a|ab|c|bcd){2,10}(d*) ababcd (0,6)(3,6)(6,6) +:HA#268:E (a|ab|c|bcd){3,10}(d*) ababcd (0,6)(3,6)(6,6) +:HA#269:E (a|ab|c|bcd){4,10}(d*) ababcd NOMATCH +#:HA#270:E (a|ab|c|bcd)*(d*) ababcd (0,6)(3,6)(6,6) +:HA#270:E (a|ab|c|bcd)*(d*) ababcd (0,1)(0,1)(1,1) Rust +#:HA#271:E (a|ab|c|bcd)+(d*) ababcd (0,6)(3,6)(6,6) +:HA#271:E (a|ab|c|bcd)+(d*) ababcd (0,1)(0,1)(1,1) Rust + +# The above worked on Linux/GLIBC but the following often fail. +# They also trip up OS X / FreeBSD / NetBSD: + +#:HA#280:E (ab|a|c|bcd){0,}(d*) ababcd (0,6)(3,6)(6,6) +:HA#280:E (ab|a|c|bcd){0,}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go +#:HA#281:E (ab|a|c|bcd){1,}(d*) ababcd (0,6)(3,6)(6,6) +:HA#281:E (ab|a|c|bcd){1,}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go +#:HA#282:E (ab|a|c|bcd){2,}(d*) ababcd (0,6)(3,6)(6,6) +:HA#282:E (ab|a|c|bcd){2,}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go +#:HA#283:E (ab|a|c|bcd){3,}(d*) ababcd (0,6)(3,6)(6,6) +:HA#283:E (ab|a|c|bcd){3,}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go +:HA#284:E (ab|a|c|bcd){4,}(d*) ababcd NOMATCH +#:HA#285:E (ab|a|c|bcd){0,10}(d*) ababcd (0,6)(3,6)(6,6) +:HA#285:E (ab|a|c|bcd){0,10}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go +#:HA#286:E (ab|a|c|bcd){1,10}(d*) ababcd (0,6)(3,6)(6,6) +:HA#286:E (ab|a|c|bcd){1,10}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go +#:HA#287:E (ab|a|c|bcd){2,10}(d*) ababcd (0,6)(3,6)(6,6) +:HA#287:E (ab|a|c|bcd){2,10}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go +#:HA#288:E (ab|a|c|bcd){3,10}(d*) ababcd (0,6)(3,6)(6,6) +:HA#288:E (ab|a|c|bcd){3,10}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go +:HA#289:E (ab|a|c|bcd){4,10}(d*) ababcd NOMATCH +#:HA#290:E (ab|a|c|bcd)*(d*) ababcd (0,6)(3,6)(6,6) +:HA#290:E (ab|a|c|bcd)*(d*) ababcd (0,6)(4,5)(5,6) RE2/Go +#:HA#291:E (ab|a|c|bcd)+(d*) ababcd (0,6)(3,6)(6,6) +:HA#291:E (ab|a|c|bcd)+(d*) ababcd (0,6)(4,5)(5,6) RE2/Go diff --git a/vendor/regex/testdata/fowler/nullsubexpr.toml b/vendor/regex/testdata/fowler/nullsubexpr.toml new file mode 100644 index 00000000000000..2f1f0183edf4da --- /dev/null +++ b/vendor/regex/testdata/fowler/nullsubexpr.toml @@ -0,0 +1,405 @@ +# !!! DO NOT EDIT !!! +# Automatically generated by 'regex-cli generate fowler'. +# Numbers in the test names correspond to the line number of the test from +# the original dat file. + +[[test]] +name = "nullsubexpr3" +regex = '''(a*)*''' +haystack = '''a''' +matches = [[[0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr4" +regex = '''(a*)*''' +haystack = '''x''' +matches = [[[0, 0], [0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr5" +regex = '''(a*)*''' +haystack = '''aaaaaa''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr6" +regex = '''(a*)*''' +haystack = '''aaaaaax''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr7" +regex = '''(a*)+''' +haystack = '''a''' +matches = [[[0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr8" +regex = '''(a*)+''' +haystack = '''x''' +matches = [[[0, 0], [0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr9" +regex = '''(a*)+''' +haystack = '''aaaaaa''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr10" +regex = '''(a*)+''' +haystack = '''aaaaaax''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr11" +regex = '''(a+)*''' +haystack = '''a''' +matches = [[[0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr12" +regex = '''(a+)*''' +haystack = '''x''' +matches = [[[0, 0], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr13" +regex = '''(a+)*''' +haystack = '''aaaaaa''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr14" +regex = '''(a+)*''' +haystack = '''aaaaaax''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr15" +regex = '''(a+)+''' +haystack = '''a''' +matches = [[[0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr16" +regex = '''(a+)+''' +haystack = '''x''' +matches = [] +match-limit = 1 + +[[test]] +name = "nullsubexpr17" +regex = '''(a+)+''' +haystack = '''aaaaaa''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr18" +regex = '''(a+)+''' +haystack = '''aaaaaax''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr20" +regex = '''([a]*)*''' +haystack = '''a''' +matches = [[[0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr21" +regex = '''([a]*)*''' +haystack = '''x''' +matches = [[[0, 0], [0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr22" +regex = '''([a]*)*''' +haystack = '''aaaaaa''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr23" +regex = '''([a]*)*''' +haystack = '''aaaaaax''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr24" +regex = '''([a]*)+''' +haystack = '''a''' +matches = [[[0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr25" +regex = '''([a]*)+''' +haystack = '''x''' +matches = [[[0, 0], [0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr26" +regex = '''([a]*)+''' +haystack = '''aaaaaa''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr27" +regex = '''([a]*)+''' +haystack = '''aaaaaax''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr28" +regex = '''([^b]*)*''' +haystack = '''a''' +matches = [[[0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr29" +regex = '''([^b]*)*''' +haystack = '''b''' +matches = [[[0, 0], [0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr30" +regex = '''([^b]*)*''' +haystack = '''aaaaaa''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr31" +regex = '''([^b]*)*''' +haystack = '''aaaaaab''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr32" +regex = '''([ab]*)*''' +haystack = '''a''' +matches = [[[0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr33" +regex = '''([ab]*)*''' +haystack = '''aaaaaa''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr34" +regex = '''([ab]*)*''' +haystack = '''ababab''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr35" +regex = '''([ab]*)*''' +haystack = '''bababa''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr36" +regex = '''([ab]*)*''' +haystack = '''b''' +matches = [[[0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr37" +regex = '''([ab]*)*''' +haystack = '''bbbbbb''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr38" +regex = '''([ab]*)*''' +haystack = '''aaaabcde''' +matches = [[[0, 5], [0, 5]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr39" +regex = '''([^a]*)*''' +haystack = '''b''' +matches = [[[0, 1], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr40" +regex = '''([^a]*)*''' +haystack = '''bbbbbb''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr41" +regex = '''([^a]*)*''' +haystack = '''aaaaaa''' +matches = [[[0, 0], [0, 0]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr42" +regex = '''([^ab]*)*''' +haystack = '''ccccxx''' +matches = [[[0, 6], [0, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr43" +regex = '''([^ab]*)*''' +haystack = '''ababab''' +matches = [[[0, 0], [0, 0]]] +match-limit = 1 +anchored = true + +# Test added by Rust regex project. +[[test]] +name = "nullsubexpr46" +regex = '''((z)+|a)*''' +haystack = '''zabcde''' +matches = [[[0, 2], [1, 2], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr64" +regex = '''(a*)*(x)''' +haystack = '''x''' +matches = [[[0, 1], [0, 0], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr65" +regex = '''(a*)*(x)''' +haystack = '''ax''' +matches = [[[0, 2], [0, 1], [1, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr66" +regex = '''(a*)*(x)''' +haystack = '''axa''' +matches = [[[0, 2], [0, 1], [1, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr68" +regex = '''(a*)+(x)''' +haystack = '''x''' +matches = [[[0, 1], [0, 0], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr69" +regex = '''(a*)+(x)''' +haystack = '''ax''' +matches = [[[0, 2], [0, 1], [1, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr70" +regex = '''(a*)+(x)''' +haystack = '''axa''' +matches = [[[0, 2], [0, 1], [1, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr72" +regex = '''(a*){2}(x)''' +haystack = '''x''' +matches = [[[0, 1], [0, 0], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr73" +regex = '''(a*){2}(x)''' +haystack = '''ax''' +matches = [[[0, 2], [1, 1], [1, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "nullsubexpr74" +regex = '''(a*){2}(x)''' +haystack = '''axa''' +matches = [[[0, 2], [1, 1], [1, 2]]] +match-limit = 1 +anchored = true + diff --git a/vendor/regex/testdata/fowler/repetition.toml b/vendor/regex/testdata/fowler/repetition.toml new file mode 100644 index 00000000000000..d6a711202209b2 --- /dev/null +++ b/vendor/regex/testdata/fowler/repetition.toml @@ -0,0 +1,746 @@ +# !!! DO NOT EDIT !!! +# Automatically generated by 'regex-cli generate fowler'. +# Numbers in the test names correspond to the line number of the test from +# the original dat file. + +[[test]] +name = "repetition10" +regex = '''((..)|(.))''' +haystack = '''''' +matches = [] +match-limit = 1 + +[[test]] +name = "repetition11" +regex = '''((..)|(.))((..)|(.))''' +haystack = '''''' +matches = [] +match-limit = 1 + +[[test]] +name = "repetition12" +regex = '''((..)|(.))((..)|(.))((..)|(.))''' +haystack = '''''' +matches = [] +match-limit = 1 + +[[test]] +name = "repetition14" +regex = '''((..)|(.)){1}''' +haystack = '''''' +matches = [] +match-limit = 1 + +[[test]] +name = "repetition15" +regex = '''((..)|(.)){2}''' +haystack = '''''' +matches = [] +match-limit = 1 + +[[test]] +name = "repetition16" +regex = '''((..)|(.)){3}''' +haystack = '''''' +matches = [] +match-limit = 1 + +[[test]] +name = "repetition18" +regex = '''((..)|(.))*''' +haystack = '''''' +matches = [[[0, 0], [], [], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition20" +regex = '''((..)|(.))''' +haystack = '''a''' +matches = [[[0, 1], [0, 1], [], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition21" +regex = '''((..)|(.))((..)|(.))''' +haystack = '''a''' +matches = [] +match-limit = 1 + +[[test]] +name = "repetition22" +regex = '''((..)|(.))((..)|(.))((..)|(.))''' +haystack = '''a''' +matches = [] +match-limit = 1 + +[[test]] +name = "repetition24" +regex = '''((..)|(.)){1}''' +haystack = '''a''' +matches = [[[0, 1], [0, 1], [], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition25" +regex = '''((..)|(.)){2}''' +haystack = '''a''' +matches = [] +match-limit = 1 + +[[test]] +name = "repetition26" +regex = '''((..)|(.)){3}''' +haystack = '''a''' +matches = [] +match-limit = 1 + +[[test]] +name = "repetition28" +regex = '''((..)|(.))*''' +haystack = '''a''' +matches = [[[0, 1], [0, 1], [], [0, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition30" +regex = '''((..)|(.))''' +haystack = '''aa''' +matches = [[[0, 2], [0, 2], [0, 2], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition31" +regex = '''((..)|(.))((..)|(.))''' +haystack = '''aa''' +matches = [[[0, 2], [0, 1], [], [0, 1], [1, 2], [], [1, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition32" +regex = '''((..)|(.))((..)|(.))((..)|(.))''' +haystack = '''aa''' +matches = [] +match-limit = 1 + +[[test]] +name = "repetition34" +regex = '''((..)|(.)){1}''' +haystack = '''aa''' +matches = [[[0, 2], [0, 2], [0, 2], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition35" +regex = '''((..)|(.)){2}''' +haystack = '''aa''' +matches = [[[0, 2], [1, 2], [], [1, 2]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition36" +regex = '''((..)|(.)){3}''' +haystack = '''aa''' +matches = [] +match-limit = 1 + +[[test]] +name = "repetition38" +regex = '''((..)|(.))*''' +haystack = '''aa''' +matches = [[[0, 2], [0, 2], [0, 2], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition40" +regex = '''((..)|(.))''' +haystack = '''aaa''' +matches = [[[0, 2], [0, 2], [0, 2], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition41" +regex = '''((..)|(.))((..)|(.))''' +haystack = '''aaa''' +matches = [[[0, 3], [0, 2], [0, 2], [], [2, 3], [], [2, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition42" +regex = '''((..)|(.))((..)|(.))((..)|(.))''' +haystack = '''aaa''' +matches = [[[0, 3], [0, 1], [], [0, 1], [1, 2], [], [1, 2], [2, 3], [], [2, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition44" +regex = '''((..)|(.)){1}''' +haystack = '''aaa''' +matches = [[[0, 2], [0, 2], [0, 2], []]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition46" +regex = '''((..)|(.)){2}''' +haystack = '''aaa''' +matches = [[[0, 3], [2, 3], [0, 2], [2, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition47" +regex = '''((..)|(.)){3}''' +haystack = '''aaa''' +matches = [[[0, 3], [2, 3], [], [2, 3]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition50" +regex = '''((..)|(.))*''' +haystack = '''aaa''' +matches = [[[0, 3], [2, 3], [0, 2], [2, 3]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition52" +regex = '''((..)|(.))''' +haystack = '''aaaa''' +matches = [[[0, 2], [0, 2], [0, 2], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition53" +regex = '''((..)|(.))((..)|(.))''' +haystack = '''aaaa''' +matches = [[[0, 4], [0, 2], [0, 2], [], [2, 4], [2, 4], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition54" +regex = '''((..)|(.))((..)|(.))((..)|(.))''' +haystack = '''aaaa''' +matches = [[[0, 4], [0, 2], [0, 2], [], [2, 3], [], [2, 3], [3, 4], [], [3, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition56" +regex = '''((..)|(.)){1}''' +haystack = '''aaaa''' +matches = [[[0, 2], [0, 2], [0, 2], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition57" +regex = '''((..)|(.)){2}''' +haystack = '''aaaa''' +matches = [[[0, 4], [2, 4], [2, 4], []]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition59" +regex = '''((..)|(.)){3}''' +haystack = '''aaaa''' +matches = [[[0, 4], [3, 4], [0, 2], [3, 4]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition61" +regex = '''((..)|(.))*''' +haystack = '''aaaa''' +matches = [[[0, 4], [2, 4], [2, 4], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition63" +regex = '''((..)|(.))''' +haystack = '''aaaaa''' +matches = [[[0, 2], [0, 2], [0, 2], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition64" +regex = '''((..)|(.))((..)|(.))''' +haystack = '''aaaaa''' +matches = [[[0, 4], [0, 2], [0, 2], [], [2, 4], [2, 4], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition65" +regex = '''((..)|(.))((..)|(.))((..)|(.))''' +haystack = '''aaaaa''' +matches = [[[0, 5], [0, 2], [0, 2], [], [2, 4], [2, 4], [], [4, 5], [], [4, 5]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition67" +regex = '''((..)|(.)){1}''' +haystack = '''aaaaa''' +matches = [[[0, 2], [0, 2], [0, 2], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition68" +regex = '''((..)|(.)){2}''' +haystack = '''aaaaa''' +matches = [[[0, 4], [2, 4], [2, 4], []]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition70" +regex = '''((..)|(.)){3}''' +haystack = '''aaaaa''' +matches = [[[0, 5], [4, 5], [2, 4], [4, 5]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition73" +regex = '''((..)|(.))*''' +haystack = '''aaaaa''' +matches = [[[0, 5], [4, 5], [2, 4], [4, 5]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition75" +regex = '''((..)|(.))''' +haystack = '''aaaaaa''' +matches = [[[0, 2], [0, 2], [0, 2], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition76" +regex = '''((..)|(.))((..)|(.))''' +haystack = '''aaaaaa''' +matches = [[[0, 4], [0, 2], [0, 2], [], [2, 4], [2, 4], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition77" +regex = '''((..)|(.))((..)|(.))((..)|(.))''' +haystack = '''aaaaaa''' +matches = [[[0, 6], [0, 2], [0, 2], [], [2, 4], [2, 4], [], [4, 6], [4, 6], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition79" +regex = '''((..)|(.)){1}''' +haystack = '''aaaaaa''' +matches = [[[0, 2], [0, 2], [0, 2], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition80" +regex = '''((..)|(.)){2}''' +haystack = '''aaaaaa''' +matches = [[[0, 4], [2, 4], [2, 4], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition81" +regex = '''((..)|(.)){3}''' +haystack = '''aaaaaa''' +matches = [[[0, 6], [4, 6], [4, 6], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition83" +regex = '''((..)|(.))*''' +haystack = '''aaaaaa''' +matches = [[[0, 6], [4, 6], [4, 6], []]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive90" +regex = '''X(.?){0,}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [7, 8]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive91" +regex = '''X(.?){1,}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [7, 8]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive92" +regex = '''X(.?){2,}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [7, 8]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive93" +regex = '''X(.?){3,}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [7, 8]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive94" +regex = '''X(.?){4,}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [7, 8]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive95" +regex = '''X(.?){5,}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [7, 8]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive96" +regex = '''X(.?){6,}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [7, 8]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive97" +regex = '''X(.?){7,}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [7, 8]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive98" +regex = '''X(.?){8,}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [8, 8]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive100" +regex = '''X(.?){0,8}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [8, 8]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive102" +regex = '''X(.?){1,8}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [8, 8]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive104" +regex = '''X(.?){2,8}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [8, 8]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive106" +regex = '''X(.?){3,8}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [8, 8]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive108" +regex = '''X(.?){4,8}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [8, 8]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive110" +regex = '''X(.?){5,8}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [8, 8]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive112" +regex = '''X(.?){6,8}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [8, 8]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive114" +regex = '''X(.?){7,8}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [8, 8]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive115" +regex = '''X(.?){8,8}Y''' +haystack = '''X1234567Y''' +matches = [[[0, 9], [8, 8]]] +match-limit = 1 +anchored = true + +# Test added by Rust regex project. +[[test]] +name = "repetition-expensive127" +regex = '''(a|ab|c|bcd){0,}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 1], [0, 1], [1, 1]]] +match-limit = 1 +anchored = true + +# Test added by Rust regex project. +[[test]] +name = "repetition-expensive129" +regex = '''(a|ab|c|bcd){1,}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 1], [0, 1], [1, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive130" +regex = '''(a|ab|c|bcd){2,}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 6], [3, 6], [6, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive131" +regex = '''(a|ab|c|bcd){3,}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 6], [3, 6], [6, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive132" +regex = '''(a|ab|c|bcd){4,}(d*)''' +haystack = '''ababcd''' +matches = [] +match-limit = 1 + +# Test added by Rust regex project. +[[test]] +name = "repetition-expensive134" +regex = '''(a|ab|c|bcd){0,10}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 1], [0, 1], [1, 1]]] +match-limit = 1 +anchored = true + +# Test added by Rust regex project. +[[test]] +name = "repetition-expensive136" +regex = '''(a|ab|c|bcd){1,10}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 1], [0, 1], [1, 1]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive137" +regex = '''(a|ab|c|bcd){2,10}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 6], [3, 6], [6, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive138" +regex = '''(a|ab|c|bcd){3,10}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 6], [3, 6], [6, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive139" +regex = '''(a|ab|c|bcd){4,10}(d*)''' +haystack = '''ababcd''' +matches = [] +match-limit = 1 + +# Test added by Rust regex project. +[[test]] +name = "repetition-expensive141" +regex = '''(a|ab|c|bcd)*(d*)''' +haystack = '''ababcd''' +matches = [[[0, 1], [0, 1], [1, 1]]] +match-limit = 1 +anchored = true + +# Test added by Rust regex project. +[[test]] +name = "repetition-expensive143" +regex = '''(a|ab|c|bcd)+(d*)''' +haystack = '''ababcd''' +matches = [[[0, 1], [0, 1], [1, 1]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive149" +regex = '''(ab|a|c|bcd){0,}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 6], [4, 5], [5, 6]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive151" +regex = '''(ab|a|c|bcd){1,}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 6], [4, 5], [5, 6]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive153" +regex = '''(ab|a|c|bcd){2,}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 6], [4, 5], [5, 6]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive155" +regex = '''(ab|a|c|bcd){3,}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 6], [4, 5], [5, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive156" +regex = '''(ab|a|c|bcd){4,}(d*)''' +haystack = '''ababcd''' +matches = [] +match-limit = 1 + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive158" +regex = '''(ab|a|c|bcd){0,10}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 6], [4, 5], [5, 6]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive160" +regex = '''(ab|a|c|bcd){1,10}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 6], [4, 5], [5, 6]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive162" +regex = '''(ab|a|c|bcd){2,10}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 6], [4, 5], [5, 6]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive164" +regex = '''(ab|a|c|bcd){3,10}(d*)''' +haystack = '''ababcd''' +matches = [[[0, 6], [4, 5], [5, 6]]] +match-limit = 1 +anchored = true + +[[test]] +name = "repetition-expensive165" +regex = '''(ab|a|c|bcd){4,10}(d*)''' +haystack = '''ababcd''' +matches = [] +match-limit = 1 + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive167" +regex = '''(ab|a|c|bcd)*(d*)''' +haystack = '''ababcd''' +matches = [[[0, 6], [4, 5], [5, 6]]] +match-limit = 1 +anchored = true + +# Test added by RE2/Go project. +[[test]] +name = "repetition-expensive169" +regex = '''(ab|a|c|bcd)+(d*)''' +haystack = '''ababcd''' +matches = [[[0, 6], [4, 5], [5, 6]]] +match-limit = 1 +anchored = true + diff --git a/vendor/regex/testdata/iter.toml b/vendor/regex/testdata/iter.toml new file mode 100644 index 00000000000000..329b9f031b2184 --- /dev/null +++ b/vendor/regex/testdata/iter.toml @@ -0,0 +1,143 @@ +[[test]] +name = "1" +regex = "a" +haystack = "aaa" +matches = [[0, 1], [1, 2], [2, 3]] + +[[test]] +name = "2" +regex = "a" +haystack = "aba" +matches = [[0, 1], [2, 3]] + +[[test]] +name = "empty1" +regex = '' +haystack = '' +matches = [[0, 0]] + +[[test]] +name = "empty2" +regex = '' +haystack = 'abc' +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty3" +regex = '(?:)' +haystack = 'abc' +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty4" +regex = '(?:)*' +haystack = 'abc' +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty5" +regex = '(?:)+' +haystack = 'abc' +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty6" +regex = '(?:)?' +haystack = 'abc' +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty7" +regex = '(?:)(?:)' +haystack = 'abc' +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty8" +regex = '(?:)+|z' +haystack = 'abc' +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty9" +regex = 'z|(?:)+' +haystack = 'abc' +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty10" +regex = '(?:)+|b' +haystack = 'abc' +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] + +[[test]] +name = "empty11" +regex = 'b|(?:)+' +haystack = 'abc' +matches = [[0, 0], [1, 2], [3, 3]] + +[[test]] +name = "start1" +regex = "^a" +haystack = "a" +matches = [[0, 1]] + +[[test]] +name = "start2" +regex = "^a" +haystack = "aa" +matches = [[0, 1]] + +[[test]] +name = "anchored1" +regex = "a" +haystack = "a" +matches = [[0, 1]] +anchored = true + +# This test is pretty subtle. It demonstrates the crucial difference between +# '^a' and 'a' compiled in 'anchored' mode. The former regex exclusively +# matches at the start of a haystack and nowhere else. The latter regex has +# no such restriction, but its automaton is constructed such that it lacks a +# `.*?` prefix. So it can actually produce matches at multiple locations. +# The anchored3 test drives this point home. +[[test]] +name = "anchored2" +regex = "a" +haystack = "aa" +matches = [[0, 1], [1, 2]] +anchored = true + +# Unlikely anchored2, this test stops matching anything after it sees `b` +# since it lacks a `.*?` prefix. Since it is looking for 'a' but sees 'b', it +# determines that there are no remaining matches. +[[test]] +name = "anchored3" +regex = "a" +haystack = "aaba" +matches = [[0, 1], [1, 2]] +anchored = true + +[[test]] +name = "nonempty-followedby-empty" +regex = 'abc|.*?' +haystack = "abczzz" +matches = [[0, 3], [4, 4], [5, 5], [6, 6]] + +[[test]] +name = "nonempty-followedby-oneempty" +regex = 'abc|.*?' +haystack = "abcz" +matches = [[0, 3], [4, 4]] + +[[test]] +name = "nonempty-followedby-onemixed" +regex = 'abc|.*?' +haystack = "abczabc" +matches = [[0, 3], [4, 7]] + +[[test]] +name = "nonempty-followedby-twomixed" +regex = 'abc|.*?' +haystack = "abczzabc" +matches = [[0, 3], [4, 4], [5, 8]] diff --git a/vendor/regex/testdata/leftmost-all.toml b/vendor/regex/testdata/leftmost-all.toml new file mode 100644 index 00000000000000..e3fd950b6ba1ae --- /dev/null +++ b/vendor/regex/testdata/leftmost-all.toml @@ -0,0 +1,25 @@ +[[test]] +name = "alt" +regex = 'foo|foobar' +haystack = "foobar" +matches = [[0, 6]] +match-kind = "all" +search-kind = "leftmost" + +[[test]] +name = "multi" +regex = ['foo', 'foobar'] +haystack = "foobar" +matches = [ + { id = 1, span = [0, 6] }, +] +match-kind = "all" +search-kind = "leftmost" + +[[test]] +name = "dotall" +regex = '(?s:.)' +haystack = "foobar" +matches = [[5, 6]] +match-kind = "all" +search-kind = "leftmost" diff --git a/vendor/regex/testdata/line-terminator.toml b/vendor/regex/testdata/line-terminator.toml new file mode 100644 index 00000000000000..a398dafa2ecf99 --- /dev/null +++ b/vendor/regex/testdata/line-terminator.toml @@ -0,0 +1,109 @@ +# This tests that we can switch the line terminator to the NUL byte. +[[test]] +name = "nul" +regex = '(?m)^[a-z]+$' +haystack = '\x00abc\x00' +matches = [[1, 4]] +unescape = true +line-terminator = '\x00' + +# This tests that '.' will not match the configured line terminator, but will +# match \n. +[[test]] +name = "dot-changes-with-line-terminator" +regex = '.' +haystack = '\x00\n' +matches = [[1, 2]] +unescape = true +line-terminator = '\x00' + +# This tests that when we switch the line terminator, \n is no longer +# recognized as the terminator. +[[test]] +name = "not-line-feed" +regex = '(?m)^[a-z]+$' +haystack = '\nabc\n' +matches = [] +unescape = true +line-terminator = '\x00' + +# This tests that we can set the line terminator to a non-ASCII byte and have +# it behave as expected. +[[test]] +name = "non-ascii" +regex = '(?m)^[a-z]+$' +haystack = '\xFFabc\xFF' +matches = [[1, 4]] +unescape = true +line-terminator = '\xFF' +utf8 = false + +# This tests a tricky case where the line terminator is set to \r. This ensures +# that the StartLF look-behind assertion is tracked when computing the start +# state. +[[test]] +name = "carriage" +regex = '(?m)^[a-z]+' +haystack = 'ABC\rabc' +matches = [[4, 7]] +bounds = [4, 7] +unescape = true +line-terminator = '\r' + +# This tests that we can set the line terminator to a byte corresponding to a +# word character, and things work as expected. +[[test]] +name = "word-byte" +regex = '(?m)^[a-z]+$' +haystack = 'ZabcZ' +matches = [[1, 4]] +unescape = true +line-terminator = 'Z' + +# This tests that we can set the line terminator to a byte corresponding to a +# non-word character, and things work as expected. +[[test]] +name = "non-word-byte" +regex = '(?m)^[a-z]+$' +haystack = '%abc%' +matches = [[1, 4]] +unescape = true +line-terminator = '%' + +# This combines "set line terminator to a word byte" with a word boundary +# assertion, which should result in no match even though ^/$ matches. +[[test]] +name = "word-boundary" +regex = '(?m)^\b[a-z]+\b$' +haystack = 'ZabcZ' +matches = [] +unescape = true +line-terminator = 'Z' + +# Like 'word-boundary', but does an anchored search at the point where ^ +# matches, but where \b should not. +[[test]] +name = "word-boundary-at" +regex = '(?m)^\b[a-z]+\b$' +haystack = 'ZabcZ' +matches = [] +bounds = [1, 4] +anchored = true +unescape = true +line-terminator = 'Z' + +# Like 'word-boundary-at', but flips the word boundary to a negation. This +# in particular tests a tricky case in DFA engines, where they must consider +# explicitly that a starting configuration from a custom line terminator may +# also required setting the "is from word byte" flag on a state. Otherwise, +# it's treated as "not from a word byte," which would result in \B not matching +# here when it should. +[[test]] +name = "not-word-boundary-at" +regex = '(?m)^\B[a-z]+\B$' +haystack = 'ZabcZ' +matches = [[1, 4]] +bounds = [1, 4] +anchored = true +unescape = true +line-terminator = 'Z' diff --git a/vendor/regex/testdata/misc.toml b/vendor/regex/testdata/misc.toml new file mode 100644 index 00000000000000..c65531f5d9065a --- /dev/null +++ b/vendor/regex/testdata/misc.toml @@ -0,0 +1,99 @@ +[[test]] +name = "ascii-literal" +regex = "a" +haystack = "a" +matches = [[0, 1]] + +[[test]] +name = "ascii-literal-not" +regex = "a" +haystack = "z" +matches = [] + +[[test]] +name = "ascii-literal-anchored" +regex = "a" +haystack = "a" +matches = [[0, 1]] +anchored = true + +[[test]] +name = "ascii-literal-anchored-not" +regex = "a" +haystack = "z" +matches = [] +anchored = true + +[[test]] +name = "anchor-start-end-line" +regex = '(?m)^bar$' +haystack = "foo\nbar\nbaz" +matches = [[4, 7]] + +[[test]] +name = "prefix-literal-match" +regex = '^abc' +haystack = "abc" +matches = [[0, 3]] + +[[test]] +name = "prefix-literal-match-ascii" +regex = '^abc' +haystack = "abc" +matches = [[0, 3]] +unicode = false +utf8 = false + +[[test]] +name = "prefix-literal-no-match" +regex = '^abc' +haystack = "zabc" +matches = [] + +[[test]] +name = "one-literal-edge" +regex = 'abc' +haystack = "xxxxxab" +matches = [] + +[[test]] +name = "terminates" +regex = 'a$' +haystack = "a" +matches = [[0, 1]] + +[[test]] +name = "suffix-100" +regex = '.*abcd' +haystack = "abcd" +matches = [[0, 4]] + +[[test]] +name = "suffix-200" +regex = '.*(?:abcd)+' +haystack = "abcd" +matches = [[0, 4]] + +[[test]] +name = "suffix-300" +regex = '.*(?:abcd)+' +haystack = "abcdabcd" +matches = [[0, 8]] + +[[test]] +name = "suffix-400" +regex = '.*(?:abcd)+' +haystack = "abcdxabcd" +matches = [[0, 9]] + +[[test]] +name = "suffix-500" +regex = '.*x(?:abcd)+' +haystack = "abcdxabcd" +matches = [[0, 9]] + +[[test]] +name = "suffix-600" +regex = '[^abcd]*x(?:abcd)+' +haystack = "abcdxabcd" +matches = [[4, 9]] diff --git a/vendor/regex/testdata/multiline.toml b/vendor/regex/testdata/multiline.toml new file mode 100644 index 00000000000000..3acc901d50af20 --- /dev/null +++ b/vendor/regex/testdata/multiline.toml @@ -0,0 +1,845 @@ +[[test]] +name = "basic1" +regex = '(?m)^[a-z]+$' +haystack = "abc\ndef\nxyz" +matches = [[0, 3], [4, 7], [8, 11]] + +[[test]] +name = "basic1-crlf" +regex = '(?Rm)^[a-z]+$' +haystack = "abc\ndef\nxyz" +matches = [[0, 3], [4, 7], [8, 11]] + +[[test]] +name = "basic1-crlf-cr" +regex = '(?Rm)^[a-z]+$' +haystack = "abc\rdef\rxyz" +matches = [[0, 3], [4, 7], [8, 11]] + +[[test]] +name = "basic2" +regex = '(?m)^$' +haystack = "abc\ndef\nxyz" +matches = [] + +[[test]] +name = "basic2-crlf" +regex = '(?Rm)^$' +haystack = "abc\ndef\nxyz" +matches = [] + +[[test]] +name = "basic2-crlf-cr" +regex = '(?Rm)^$' +haystack = "abc\rdef\rxyz" +matches = [] + +[[test]] +name = "basic3" +regex = '(?m)^' +haystack = "abc\ndef\nxyz" +matches = [[0, 0], [4, 4], [8, 8]] + +[[test]] +name = "basic3-crlf" +regex = '(?Rm)^' +haystack = "abc\ndef\nxyz" +matches = [[0, 0], [4, 4], [8, 8]] + +[[test]] +name = "basic3-crlf-cr" +regex = '(?Rm)^' +haystack = "abc\rdef\rxyz" +matches = [[0, 0], [4, 4], [8, 8]] + +[[test]] +name = "basic4" +regex = '(?m)$' +haystack = "abc\ndef\nxyz" +matches = [[3, 3], [7, 7], [11, 11]] + +[[test]] +name = "basic4-crlf" +regex = '(?Rm)$' +haystack = "abc\ndef\nxyz" +matches = [[3, 3], [7, 7], [11, 11]] + +[[test]] +name = "basic4-crlf-cr" +regex = '(?Rm)$' +haystack = "abc\rdef\rxyz" +matches = [[3, 3], [7, 7], [11, 11]] + +[[test]] +name = "basic5" +regex = '(?m)^[a-z]' +haystack = "abc\ndef\nxyz" +matches = [[0, 1], [4, 5], [8, 9]] + +[[test]] +name = "basic5-crlf" +regex = '(?Rm)^[a-z]' +haystack = "abc\ndef\nxyz" +matches = [[0, 1], [4, 5], [8, 9]] + +[[test]] +name = "basic5-crlf-cr" +regex = '(?Rm)^[a-z]' +haystack = "abc\rdef\rxyz" +matches = [[0, 1], [4, 5], [8, 9]] + +[[test]] +name = "basic6" +regex = '(?m)[a-z]^' +haystack = "abc\ndef\nxyz" +matches = [] + +[[test]] +name = "basic6-crlf" +regex = '(?Rm)[a-z]^' +haystack = "abc\ndef\nxyz" +matches = [] + +[[test]] +name = "basic6-crlf-cr" +regex = '(?Rm)[a-z]^' +haystack = "abc\rdef\rxyz" +matches = [] + +[[test]] +name = "basic7" +regex = '(?m)[a-z]$' +haystack = "abc\ndef\nxyz" +matches = [[2, 3], [6, 7], [10, 11]] + +[[test]] +name = "basic7-crlf" +regex = '(?Rm)[a-z]$' +haystack = "abc\ndef\nxyz" +matches = [[2, 3], [6, 7], [10, 11]] + +[[test]] +name = "basic7-crlf-cr" +regex = '(?Rm)[a-z]$' +haystack = "abc\rdef\rxyz" +matches = [[2, 3], [6, 7], [10, 11]] + +[[test]] +name = "basic8" +regex = '(?m)$[a-z]' +haystack = "abc\ndef\nxyz" +matches = [] + +[[test]] +name = "basic8-crlf" +regex = '(?Rm)$[a-z]' +haystack = "abc\ndef\nxyz" +matches = [] + +[[test]] +name = "basic8-crlf-cr" +regex = '(?Rm)$[a-z]' +haystack = "abc\rdef\rxyz" +matches = [] + +[[test]] +name = "basic9" +regex = '(?m)^$' +haystack = "" +matches = [[0, 0]] + +[[test]] +name = "basic9-crlf" +regex = '(?Rm)^$' +haystack = "" +matches = [[0, 0]] + +[[test]] +name = "repeat1" +regex = '(?m)(?:^$)*' +haystack = "a\nb\nc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] + +[[test]] +name = "repeat1-crlf" +regex = '(?Rm)(?:^$)*' +haystack = "a\nb\nc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] + +[[test]] +name = "repeat1-crlf-cr" +regex = '(?Rm)(?:^$)*' +haystack = "a\rb\rc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] + +[[test]] +name = "repeat1-no-multi" +regex = '(?:^$)*' +haystack = "a\nb\nc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] + +[[test]] +name = "repeat1-no-multi-crlf" +regex = '(?R)(?:^$)*' +haystack = "a\nb\nc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] + +[[test]] +name = "repeat1-no-multi-crlf-cr" +regex = '(?R)(?:^$)*' +haystack = "a\rb\rc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] + +[[test]] +name = "repeat2" +regex = '(?m)(?:^|a)+' +haystack = "a\naaa\n" +matches = [[0, 0], [2, 2], [3, 5], [6, 6]] + +[[test]] +name = "repeat2-crlf" +regex = '(?Rm)(?:^|a)+' +haystack = "a\naaa\n" +matches = [[0, 0], [2, 2], [3, 5], [6, 6]] + +[[test]] +name = "repeat2-crlf-cr" +regex = '(?Rm)(?:^|a)+' +haystack = "a\raaa\r" +matches = [[0, 0], [2, 2], [3, 5], [6, 6]] + +[[test]] +name = "repeat2-no-multi" +regex = '(?:^|a)+' +haystack = "a\naaa\n" +matches = [[0, 0], [2, 5]] + +[[test]] +name = "repeat2-no-multi-crlf" +regex = '(?R)(?:^|a)+' +haystack = "a\naaa\n" +matches = [[0, 0], [2, 5]] + +[[test]] +name = "repeat2-no-multi-crlf-cr" +regex = '(?R)(?:^|a)+' +haystack = "a\raaa\r" +matches = [[0, 0], [2, 5]] + +[[test]] +name = "repeat3" +regex = '(?m)(?:^|a)*' +haystack = "a\naaa\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] + +[[test]] +name = "repeat3-crlf" +regex = '(?Rm)(?:^|a)*' +haystack = "a\naaa\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] + +[[test]] +name = "repeat3-crlf-cr" +regex = '(?Rm)(?:^|a)*' +haystack = "a\raaa\r" +matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] + +[[test]] +name = "repeat3-no-multi" +regex = '(?:^|a)*' +haystack = "a\naaa\n" +matches = [[0, 0], [1, 1], [2, 5], [6, 6]] + +[[test]] +name = "repeat3-no-multi-crlf" +regex = '(?R)(?:^|a)*' +haystack = "a\naaa\n" +matches = [[0, 0], [1, 1], [2, 5], [6, 6]] + +[[test]] +name = "repeat3-no-multi-crlf-cr" +regex = '(?R)(?:^|a)*' +haystack = "a\raaa\r" +matches = [[0, 0], [1, 1], [2, 5], [6, 6]] + +[[test]] +name = "repeat4" +regex = '(?m)(?:^|a+)' +haystack = "a\naaa\n" +matches = [[0, 0], [2, 2], [3, 5], [6, 6]] + +[[test]] +name = "repeat4-crlf" +regex = '(?Rm)(?:^|a+)' +haystack = "a\naaa\n" +matches = [[0, 0], [2, 2], [3, 5], [6, 6]] + +[[test]] +name = "repeat4-crlf-cr" +regex = '(?Rm)(?:^|a+)' +haystack = "a\raaa\r" +matches = [[0, 0], [2, 2], [3, 5], [6, 6]] + +[[test]] +name = "repeat4-no-multi" +regex = '(?:^|a+)' +haystack = "a\naaa\n" +matches = [[0, 0], [2, 5]] + +[[test]] +name = "repeat4-no-multi-crlf" +regex = '(?R)(?:^|a+)' +haystack = "a\naaa\n" +matches = [[0, 0], [2, 5]] + +[[test]] +name = "repeat4-no-multi-crlf-cr" +regex = '(?R)(?:^|a+)' +haystack = "a\raaa\r" +matches = [[0, 0], [2, 5]] + +[[test]] +name = "repeat5" +regex = '(?m)(?:^|a*)' +haystack = "a\naaa\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] + +[[test]] +name = "repeat5-crlf" +regex = '(?Rm)(?:^|a*)' +haystack = "a\naaa\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] + +[[test]] +name = "repeat5-crlf-cr" +regex = '(?Rm)(?:^|a*)' +haystack = "a\raaa\r" +matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] + +[[test]] +name = "repeat5-no-multi" +regex = '(?:^|a*)' +haystack = "a\naaa\n" +matches = [[0, 0], [1, 1], [2, 5], [6, 6]] + +[[test]] +name = "repeat5-no-multi-crlf" +regex = '(?R)(?:^|a*)' +haystack = "a\naaa\n" +matches = [[0, 0], [1, 1], [2, 5], [6, 6]] + +[[test]] +name = "repeat5-no-multi-crlf-cr" +regex = '(?R)(?:^|a*)' +haystack = "a\raaa\r" +matches = [[0, 0], [1, 1], [2, 5], [6, 6]] + +[[test]] +name = "repeat6" +regex = '(?m)(?:^[a-z])+' +haystack = "abc\ndef\nxyz" +matches = [[0, 1], [4, 5], [8, 9]] + +[[test]] +name = "repeat6-crlf" +regex = '(?Rm)(?:^[a-z])+' +haystack = "abc\ndef\nxyz" +matches = [[0, 1], [4, 5], [8, 9]] + +[[test]] +name = "repeat6-crlf-cr" +regex = '(?Rm)(?:^[a-z])+' +haystack = "abc\rdef\rxyz" +matches = [[0, 1], [4, 5], [8, 9]] + +[[test]] +name = "repeat6-no-multi" +regex = '(?:^[a-z])+' +haystack = "abc\ndef\nxyz" +matches = [[0, 1]] + +[[test]] +name = "repeat6-no-multi-crlf" +regex = '(?R)(?:^[a-z])+' +haystack = "abc\ndef\nxyz" +matches = [[0, 1]] + +[[test]] +name = "repeat6-no-multi-crlf-cr" +regex = '(?R)(?:^[a-z])+' +haystack = "abc\rdef\rxyz" +matches = [[0, 1]] + +[[test]] +name = "repeat7" +regex = '(?m)(?:^[a-z]{3}\n?)+' +haystack = "abc\ndef\nxyz" +matches = [[0, 11]] + +[[test]] +name = "repeat7-crlf" +regex = '(?Rm)(?:^[a-z]{3}\n?)+' +haystack = "abc\ndef\nxyz" +matches = [[0, 11]] + +[[test]] +name = "repeat7-crlf-cr" +regex = '(?Rm)(?:^[a-z]{3}\r?)+' +haystack = "abc\rdef\rxyz" +matches = [[0, 11]] + +[[test]] +name = "repeat7-no-multi" +regex = '(?:^[a-z]{3}\n?)+' +haystack = "abc\ndef\nxyz" +matches = [[0, 4]] + +[[test]] +name = "repeat7-no-multi-crlf" +regex = '(?R)(?:^[a-z]{3}\n?)+' +haystack = "abc\ndef\nxyz" +matches = [[0, 4]] + +[[test]] +name = "repeat7-no-multi-crlf-cr" +regex = '(?R)(?:^[a-z]{3}\r?)+' +haystack = "abc\rdef\rxyz" +matches = [[0, 4]] + +[[test]] +name = "repeat8" +regex = '(?m)(?:^[a-z]{3}\n?)*' +haystack = "abc\ndef\nxyz" +matches = [[0, 11]] + +[[test]] +name = "repeat8-crlf" +regex = '(?Rm)(?:^[a-z]{3}\n?)*' +haystack = "abc\ndef\nxyz" +matches = [[0, 11]] + +[[test]] +name = "repeat8-crlf-cr" +regex = '(?Rm)(?:^[a-z]{3}\r?)*' +haystack = "abc\rdef\rxyz" +matches = [[0, 11]] + +[[test]] +name = "repeat8-no-multi" +regex = '(?:^[a-z]{3}\n?)*' +haystack = "abc\ndef\nxyz" +matches = [[0, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9], [10, 10], [11, 11]] + +[[test]] +name = "repeat8-no-multi-crlf" +regex = '(?R)(?:^[a-z]{3}\n?)*' +haystack = "abc\ndef\nxyz" +matches = [[0, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9], [10, 10], [11, 11]] + +[[test]] +name = "repeat8-no-multi-crlf-cr" +regex = '(?R)(?:^[a-z]{3}\r?)*' +haystack = "abc\rdef\rxyz" +matches = [[0, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9], [10, 10], [11, 11]] + +[[test]] +name = "repeat9" +regex = '(?m)(?:\n?[a-z]{3}$)+' +haystack = "abc\ndef\nxyz" +matches = [[0, 11]] + +[[test]] +name = "repeat9-crlf" +regex = '(?Rm)(?:\n?[a-z]{3}$)+' +haystack = "abc\ndef\nxyz" +matches = [[0, 11]] + +[[test]] +name = "repeat9-crlf-cr" +regex = '(?Rm)(?:\r?[a-z]{3}$)+' +haystack = "abc\rdef\rxyz" +matches = [[0, 11]] + +[[test]] +name = "repeat9-no-multi" +regex = '(?:\n?[a-z]{3}$)+' +haystack = "abc\ndef\nxyz" +matches = [[7, 11]] + +[[test]] +name = "repeat9-no-multi-crlf" +regex = '(?R)(?:\n?[a-z]{3}$)+' +haystack = "abc\ndef\nxyz" +matches = [[7, 11]] + +[[test]] +name = "repeat9-no-multi-crlf-cr" +regex = '(?R)(?:\r?[a-z]{3}$)+' +haystack = "abc\rdef\rxyz" +matches = [[7, 11]] + +[[test]] +name = "repeat10" +regex = '(?m)(?:\n?[a-z]{3}$)*' +haystack = "abc\ndef\nxyz" +matches = [[0, 11]] + +[[test]] +name = "repeat10-crlf" +regex = '(?Rm)(?:\n?[a-z]{3}$)*' +haystack = "abc\ndef\nxyz" +matches = [[0, 11]] + +[[test]] +name = "repeat10-crlf-cr" +regex = '(?Rm)(?:\r?[a-z]{3}$)*' +haystack = "abc\rdef\rxyz" +matches = [[0, 11]] + +[[test]] +name = "repeat10-no-multi" +regex = '(?:\n?[a-z]{3}$)*' +haystack = "abc\ndef\nxyz" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 11]] + +[[test]] +name = "repeat10-no-multi-crlf" +regex = '(?R)(?:\n?[a-z]{3}$)*' +haystack = "abc\ndef\nxyz" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 11]] + +[[test]] +name = "repeat10-no-multi-crlf-cr" +regex = '(?R)(?:\r?[a-z]{3}$)*' +haystack = "abc\rdef\rxyz" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 11]] + +[[test]] +name = "repeat11" +regex = '(?m)^*' +haystack = "\naa\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] + +[[test]] +name = "repeat11-crlf" +regex = '(?Rm)^*' +haystack = "\naa\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] + +[[test]] +name = "repeat11-crlf-cr" +regex = '(?Rm)^*' +haystack = "\raa\r" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] + +[[test]] +name = "repeat11-no-multi" +regex = '^*' +haystack = "\naa\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] + +[[test]] +name = "repeat11-no-multi-crlf" +regex = '(?R)^*' +haystack = "\naa\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] + +[[test]] +name = "repeat11-no-multi-crlf-cr" +regex = '(?R)^*' +haystack = "\raa\r" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] + +[[test]] +name = "repeat12" +regex = '(?m)^+' +haystack = "\naa\n" +matches = [[0, 0], [1, 1], [4, 4]] + +[[test]] +name = "repeat12-crlf" +regex = '(?Rm)^+' +haystack = "\naa\n" +matches = [[0, 0], [1, 1], [4, 4]] + +[[test]] +name = "repeat12-crlf-cr" +regex = '(?Rm)^+' +haystack = "\raa\r" +matches = [[0, 0], [1, 1], [4, 4]] + +[[test]] +name = "repeat12-no-multi" +regex = '^+' +haystack = "\naa\n" +matches = [[0, 0]] + +[[test]] +name = "repeat12-no-multi-crlf" +regex = '(?R)^+' +haystack = "\naa\n" +matches = [[0, 0]] + +[[test]] +name = "repeat12-no-multi-crlf-cr" +regex = '(?R)^+' +haystack = "\raa\r" +matches = [[0, 0]] + +[[test]] +name = "repeat13" +regex = '(?m)$*' +haystack = "\naa\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] + +[[test]] +name = "repeat13-crlf" +regex = '(?Rm)$*' +haystack = "\naa\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] + +[[test]] +name = "repeat13-crlf-cr" +regex = '(?Rm)$*' +haystack = "\raa\r" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] + +[[test]] +name = "repeat13-no-multi" +regex = '$*' +haystack = "\naa\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] + +[[test]] +name = "repeat13-no-multi-crlf" +regex = '(?R)$*' +haystack = "\naa\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] + +[[test]] +name = "repeat13-no-multi-crlf-cr" +regex = '(?R)$*' +haystack = "\raa\r" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] + +[[test]] +name = "repeat14" +regex = '(?m)$+' +haystack = "\naa\n" +matches = [[0, 0], [3, 3], [4, 4]] + +[[test]] +name = "repeat14-crlf" +regex = '(?Rm)$+' +haystack = "\naa\n" +matches = [[0, 0], [3, 3], [4, 4]] + +[[test]] +name = "repeat14-crlf-cr" +regex = '(?Rm)$+' +haystack = "\raa\r" +matches = [[0, 0], [3, 3], [4, 4]] + +[[test]] +name = "repeat14-no-multi" +regex = '$+' +haystack = "\naa\n" +matches = [[4, 4]] + +[[test]] +name = "repeat14-no-multi-crlf" +regex = '(?R)$+' +haystack = "\naa\n" +matches = [[4, 4]] + +[[test]] +name = "repeat14-no-multi-crlf-cr" +regex = '(?R)$+' +haystack = "\raa\r" +matches = [[4, 4]] + +[[test]] +name = "repeat15" +regex = '(?m)(?:$\n)+' +haystack = "\n\naaa\n\n" +matches = [[0, 2], [5, 7]] + +[[test]] +name = "repeat15-crlf" +regex = '(?Rm)(?:$\n)+' +haystack = "\n\naaa\n\n" +matches = [[0, 2], [5, 7]] + +[[test]] +name = "repeat15-crlf-cr" +regex = '(?Rm)(?:$\r)+' +haystack = "\r\raaa\r\r" +matches = [[0, 2], [5, 7]] + +[[test]] +name = "repeat15-no-multi" +regex = '(?:$\n)+' +haystack = "\n\naaa\n\n" +matches = [] + +[[test]] +name = "repeat15-no-multi-crlf" +regex = '(?R)(?:$\n)+' +haystack = "\n\naaa\n\n" +matches = [] + +[[test]] +name = "repeat15-no-multi-crlf-cr" +regex = '(?R)(?:$\r)+' +haystack = "\r\raaa\r\r" +matches = [] + +[[test]] +name = "repeat16" +regex = '(?m)(?:$\n)*' +haystack = "\n\naaa\n\n" +matches = [[0, 2], [3, 3], [4, 4], [5, 7]] + +[[test]] +name = "repeat16-crlf" +regex = '(?Rm)(?:$\n)*' +haystack = "\n\naaa\n\n" +matches = [[0, 2], [3, 3], [4, 4], [5, 7]] + +[[test]] +name = "repeat16-crlf-cr" +regex = '(?Rm)(?:$\r)*' +haystack = "\r\raaa\r\r" +matches = [[0, 2], [3, 3], [4, 4], [5, 7]] + +[[test]] +name = "repeat16-no-multi" +regex = '(?:$\n)*' +haystack = "\n\naaa\n\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7]] + +[[test]] +name = "repeat16-no-multi-crlf" +regex = '(?R)(?:$\n)*' +haystack = "\n\naaa\n\n" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7]] + +[[test]] +name = "repeat16-no-multi-crlf-cr" +regex = '(?R)(?:$\r)*' +haystack = "\r\raaa\r\r" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7]] + +[[test]] +name = "repeat17" +regex = '(?m)(?:$\n^)+' +haystack = "\n\naaa\n\n" +matches = [[0, 2], [5, 7]] + +[[test]] +name = "repeat17-crlf" +regex = '(?Rm)(?:$\n^)+' +haystack = "\n\naaa\n\n" +matches = [[0, 2], [5, 7]] + +[[test]] +name = "repeat17-crlf-cr" +regex = '(?Rm)(?:$\r^)+' +haystack = "\r\raaa\r\r" +matches = [[0, 2], [5, 7]] + +[[test]] +name = "repeat17-no-multi" +regex = '(?:$\n^)+' +haystack = "\n\naaa\n\n" +matches = [] + +[[test]] +name = "repeat17-no-multi-crlf" +regex = '(?R)(?:$\n^)+' +haystack = "\n\naaa\n\n" +matches = [] + +[[test]] +name = "repeat17-no-multi-crlf-cr" +regex = '(?R)(?:$\r^)+' +haystack = "\r\raaa\r\r" +matches = [] + +[[test]] +name = "repeat18" +regex = '(?m)(?:^|$)+' +haystack = "\n\naaa\n\n" +matches = [[0, 0], [1, 1], [2, 2], [5, 5], [6, 6], [7, 7]] + +[[test]] +name = "repeat18-crlf" +regex = '(?Rm)(?:^|$)+' +haystack = "\n\naaa\n\n" +matches = [[0, 0], [1, 1], [2, 2], [5, 5], [6, 6], [7, 7]] + +[[test]] +name = "repeat18-crlf-cr" +regex = '(?Rm)(?:^|$)+' +haystack = "\r\raaa\r\r" +matches = [[0, 0], [1, 1], [2, 2], [5, 5], [6, 6], [7, 7]] + +[[test]] +name = "repeat18-no-multi" +regex = '(?:^|$)+' +haystack = "\n\naaa\n\n" +matches = [[0, 0], [7, 7]] + +[[test]] +name = "repeat18-no-multi-crlf" +regex = '(?R)(?:^|$)+' +haystack = "\n\naaa\n\n" +matches = [[0, 0], [7, 7]] + +[[test]] +name = "repeat18-no-multi-crlf-cr" +regex = '(?R)(?:^|$)+' +haystack = "\r\raaa\r\r" +matches = [[0, 0], [7, 7]] + +[[test]] +name = "match-line-100" +regex = '(?m)^.+$' +haystack = "aa\naaaaaaaaaaaaaaaaaaa\n" +matches = [[0, 2], [3, 22]] + +[[test]] +name = "match-line-100-crlf" +regex = '(?Rm)^.+$' +haystack = "aa\naaaaaaaaaaaaaaaaaaa\n" +matches = [[0, 2], [3, 22]] + +[[test]] +name = "match-line-100-crlf-cr" +regex = '(?Rm)^.+$' +haystack = "aa\raaaaaaaaaaaaaaaaaaa\r" +matches = [[0, 2], [3, 22]] + +[[test]] +name = "match-line-200" +regex = '(?m)^.+$' +haystack = "aa\naaaaaaaaaaaaaaaaaaa\n" +matches = [[0, 2], [3, 22]] +unicode = false +utf8 = false + +[[test]] +name = "match-line-200-crlf" +regex = '(?Rm)^.+$' +haystack = "aa\naaaaaaaaaaaaaaaaaaa\n" +matches = [[0, 2], [3, 22]] +unicode = false +utf8 = false + +[[test]] +name = "match-line-200-crlf-cr" +regex = '(?Rm)^.+$' +haystack = "aa\raaaaaaaaaaaaaaaaaaa\r" +matches = [[0, 2], [3, 22]] +unicode = false +utf8 = false diff --git a/vendor/regex/testdata/no-unicode.toml b/vendor/regex/testdata/no-unicode.toml new file mode 100644 index 00000000000000..0ddac4c96d116f --- /dev/null +++ b/vendor/regex/testdata/no-unicode.toml @@ -0,0 +1,222 @@ +[[test]] +name = "invalid-utf8-literal1" +regex = '\xFF' +haystack = '\xFF' +matches = [[0, 1]] +unicode = false +utf8 = false +unescape = true + + +[[test]] +name = "mixed" +regex = '(?:.+)(?-u)(?:.+)' +haystack = '\xCE\x93\xCE\x94\xFF' +matches = [[0, 5]] +utf8 = false +unescape = true + + +[[test]] +name = "case1" +regex = "a" +haystack = "A" +matches = [[0, 1]] +case-insensitive = true +unicode = false + +[[test]] +name = "case2" +regex = "[a-z]+" +haystack = "AaAaA" +matches = [[0, 5]] +case-insensitive = true +unicode = false + +[[test]] +name = "case3" +regex = "[a-z]+" +haystack = "aA\u212AaA" +matches = [[0, 7]] +case-insensitive = true + +[[test]] +name = "case4" +regex = "[a-z]+" +haystack = "aA\u212AaA" +matches = [[0, 2], [5, 7]] +case-insensitive = true +unicode = false + + +[[test]] +name = "negate1" +regex = "[^a]" +haystack = "δ" +matches = [[0, 2]] + +[[test]] +name = "negate2" +regex = "[^a]" +haystack = "δ" +matches = [[0, 1], [1, 2]] +unicode = false +utf8 = false + + +[[test]] +name = "dotstar-prefix1" +regex = "a" +haystack = '\xFFa' +matches = [[1, 2]] +unicode = false +utf8 = false +unescape = true + +[[test]] +name = "dotstar-prefix2" +regex = "a" +haystack = '\xFFa' +matches = [[1, 2]] +utf8 = false +unescape = true + + +[[test]] +name = "null-bytes1" +regex = '[^\x00]+\x00' +haystack = 'foo\x00' +matches = [[0, 4]] +unicode = false +utf8 = false +unescape = true + + +[[test]] +name = "word-ascii" +regex = '\w+' +haystack = "aδ" +matches = [[0, 1]] +unicode = false + +[[test]] +name = "word-unicode" +regex = '\w+' +haystack = "aδ" +matches = [[0, 3]] + +[[test]] +name = "decimal-ascii" +regex = '\d+' +haystack = "1२३9" +matches = [[0, 1], [7, 8]] +unicode = false + +[[test]] +name = "decimal-unicode" +regex = '\d+' +haystack = "1२३9" +matches = [[0, 8]] + +[[test]] +name = "space-ascii" +regex = '\s+' +haystack = " \u1680" +matches = [[0, 1]] +unicode = false + +[[test]] +name = "space-unicode" +regex = '\s+' +haystack = " \u1680" +matches = [[0, 4]] + + +[[test]] +# See: https://github.com/rust-lang/regex/issues/484 +name = "iter1-bytes" +regex = '' +haystack = "☃" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] +utf8 = false + +[[test]] +# See: https://github.com/rust-lang/regex/issues/484 +name = "iter1-utf8" +regex = '' +haystack = "☃" +matches = [[0, 0], [3, 3]] + +[[test]] +# See: https://github.com/rust-lang/regex/issues/484 +# Note that iter2-utf8 doesn't make sense here, since the input isn't UTF-8. +name = "iter2-bytes" +regex = '' +haystack = 'b\xFFr' +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] +unescape = true +utf8 = false + + +# These test that unanchored prefixes can munch through invalid UTF-8 even when +# utf8 is enabled. +# +# This test actually reflects an interesting simplification in how the Thompson +# NFA is constructed. It used to be that the NFA could be built with an +# unanchored prefix that either matched any byte or _only_ matched valid UTF-8. +# But the latter turns out to be pretty precarious when it comes to prefilters, +# because if you search a haystack that contains invalid UTF-8 but have an +# unanchored prefix that requires UTF-8, then prefilters are no longer a valid +# optimization because you actually have to check that everything is valid +# UTF-8. +# +# Originally, I had thought that we needed a valid UTF-8 unanchored prefix in +# order to guarantee that we only match at valid UTF-8 boundaries. But this +# isn't actually true! There are really only two things to consider here: +# +# 1) Will a regex match split an encoded codepoint? No. Because by construction, +# we ensure that a MATCH state can only be reached by following valid UTF-8 (assuming +# all of the UTF-8 modes are enabled). +# +# 2) Will a regex match arbitrary bytes that aren't valid UTF-8? Again, no, +# assuming all of the UTF-8 modes are enabled. +[[test]] +name = "unanchored-invalid-utf8-match-100" +regex = '[a-z]' +haystack = '\xFFa\xFF' +matches = [[1, 2]] +unescape = true +utf8 = false + +# This test shows that we can still prevent a match from occurring by requiring +# that valid UTF-8 match by inserting our own unanchored prefix. Thus, if the +# behavior of not munching through invalid UTF-8 anywhere is needed, then it +# can be achieved thusly. +[[test]] +name = "unanchored-invalid-utf8-nomatch" +regex = '^(?s:.)*?[a-z]' +haystack = '\xFFa\xFF' +matches = [] +unescape = true +utf8 = false + +# This is a tricky test that makes sure we don't accidentally do a kind of +# unanchored search when we've requested that a regex engine not report +# empty matches that split a codepoint. This test caught a regression during +# development where the code for skipping over bad empty matches would do so +# even if the search should have been anchored. This is ultimately what led to +# making 'anchored' an 'Input' option, so that it was always clear what kind +# of search was being performed. (Before that, whether a search was anchored +# or not was a config knob on the regex engine.) This did wind up making DFAs +# a little more complex to configure (with their 'StartKind' knob), but it +# generally smoothed out everything else. +# +# Great example of a test whose failure motivated a sweeping API refactoring. +[[test]] +name = "anchored-iter-empty-utf8" +regex = '' +haystack = 'a☃z' +matches = [[0, 0], [1, 1]] +unescape = false +utf8 = true +anchored = true diff --git a/vendor/regex/testdata/overlapping.toml b/vendor/regex/testdata/overlapping.toml new file mode 100644 index 00000000000000..7bcd45a2f78e13 --- /dev/null +++ b/vendor/regex/testdata/overlapping.toml @@ -0,0 +1,280 @@ +# NOTE: We define a number of tests where the *match* kind is 'leftmost-first' +# but the *search* kind is 'overlapping'. This is a somewhat nonsensical +# combination and can produce odd results. Nevertheless, those results should +# be consistent so we test them here. (At the time of writing this note, I +# hadn't yet decided whether to make 'leftmost-first' with 'overlapping' result +# in unspecified behavior.) + +# This demonstrates how a full overlapping search is obvious quadratic. This +# regex reports a match for every substring in the haystack. +[[test]] +name = "ungreedy-dotstar-matches-everything-100" +regex = [".*?"] +haystack = "zzz" +matches = [ + { id = 0, span = [0, 0] }, + { id = 0, span = [1, 1] }, + { id = 0, span = [0, 1] }, + { id = 0, span = [2, 2] }, + { id = 0, span = [1, 2] }, + { id = 0, span = [0, 2] }, + { id = 0, span = [3, 3] }, + { id = 0, span = [2, 3] }, + { id = 0, span = [1, 3] }, + { id = 0, span = [0, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "greedy-dotstar-matches-everything-100" +regex = [".*"] +haystack = "zzz" +matches = [ + { id = 0, span = [0, 0] }, + { id = 0, span = [1, 1] }, + { id = 0, span = [0, 1] }, + { id = 0, span = [2, 2] }, + { id = 0, span = [1, 2] }, + { id = 0, span = [0, 2] }, + { id = 0, span = [3, 3] }, + { id = 0, span = [2, 3] }, + { id = 0, span = [1, 3] }, + { id = 0, span = [0, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "repetition-plus-leftmost-first-100" +regex = 'a+' +haystack = "aaa" +matches = [[0, 1], [1, 2], [0, 2], [2, 3], [1, 3], [0, 3]] +match-kind = "leftmost-first" +search-kind = "overlapping" + +[[test]] +name = "repetition-plus-leftmost-first-110" +regex = '☃+' +haystack = "☃☃☃" +matches = [[0, 3], [3, 6], [0, 6], [6, 9], [3, 9], [0, 9]] +match-kind = "leftmost-first" +search-kind = "overlapping" + +[[test]] +name = "repetition-plus-all-100" +regex = 'a+' +haystack = "aaa" +matches = [[0, 1], [1, 2], [0, 2], [2, 3], [1, 3], [0, 3]] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "repetition-plus-all-110" +regex = '☃+' +haystack = "☃☃☃" +matches = [[0, 3], [3, 6], [0, 6], [6, 9], [3, 9], [0, 9]] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "repetition-plus-leftmost-first-200" +regex = '(abc)+' +haystack = "zzabcabczzabc" +matches = [ + [[2, 5], [2, 5]], + [[5, 8], [5, 8]], + [[2, 8], [5, 8]], +] +match-kind = "leftmost-first" +search-kind = "overlapping" + +[[test]] +name = "repetition-plus-all-200" +regex = '(abc)+' +haystack = "zzabcabczzabc" +matches = [ + [[2, 5], [2, 5]], + [[5, 8], [5, 8]], + [[2, 8], [5, 8]], + [[10, 13], [10, 13]], +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "repetition-star-leftmost-first-100" +regex = 'a*' +haystack = "aaa" +matches = [ + [0, 0], + [1, 1], + [0, 1], + [2, 2], + [1, 2], + [0, 2], + [3, 3], + [2, 3], + [1, 3], + [0, 3], +] +match-kind = "leftmost-first" +search-kind = "overlapping" + +[[test]] +name = "repetition-star-all-100" +regex = 'a*' +haystack = "aaa" +matches = [ + [0, 0], + [1, 1], + [0, 1], + [2, 2], + [1, 2], + [0, 2], + [3, 3], + [2, 3], + [1, 3], + [0, 3], +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "repetition-star-leftmost-first-200" +regex = '(abc)*' +haystack = "zzabcabczzabc" +matches = [ + [[0, 0], []], +] +match-kind = "leftmost-first" +search-kind = "overlapping" + +[[test]] +name = "repetition-star-all-200" +regex = '(abc)*' +haystack = "zzabcabczzabc" +matches = [ + [[0, 0], []], + [[1, 1], []], + [[2, 2], []], + [[3, 3], []], + [[4, 4], []], + [[5, 5], []], + [[2, 5], [2, 5]], + [[6, 6], []], + [[7, 7], []], + [[8, 8], []], + [[5, 8], [5, 8]], + [[2, 8], [5, 8]], + [[9, 9], []], + [[10, 10], []], + [[11, 11], []], + [[12, 12], []], + [[13, 13], []], + [[10, 13], [10, 13]], +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "start-end-rep-leftmost-first" +regex = '(^$)*' +haystack = "abc" +matches = [ + [[0, 0], []], +] +match-kind = "leftmost-first" +search-kind = "overlapping" + +[[test]] +name = "start-end-rep-all" +regex = '(^$)*' +haystack = "abc" +matches = [ + [[0, 0], []], + [[1, 1], []], + [[2, 2], []], + [[3, 3], []], +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "alt-leftmost-first-100" +regex = 'abc|a' +haystack = "zzabcazzaabc" +matches = [[2, 3], [2, 5]] +match-kind = "leftmost-first" +search-kind = "overlapping" + +[[test]] +name = "alt-all-100" +regex = 'abc|a' +haystack = "zzabcazzaabc" +matches = [[2, 3], [2, 5], [5, 6], [8, 9], [9, 10], [9, 12]] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "empty-000" +regex = "" +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "empty-alt-000" +regex = "|b" +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [1, 2], [3, 3]] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "empty-alt-010" +regex = "b|" +haystack = "abc" +matches = [[0, 0], [1, 1], [2, 2], [1, 2], [3, 3]] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +# See: https://github.com/rust-lang/regex/issues/484 +name = "iter1-bytes" +regex = '' +haystack = "☃" +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] +utf8 = false +match-kind = "all" +search-kind = "overlapping" + +[[test]] +# See: https://github.com/rust-lang/regex/issues/484 +name = "iter1-utf8" +regex = '' +haystack = "☃" +matches = [[0, 0], [3, 3]] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "iter1-incomplete-utf8" +regex = '' +haystack = '\xE2\x98' # incomplete snowman +matches = [[0, 0], [1, 1], [2, 2]] +match-kind = "all" +search-kind = "overlapping" +unescape = true +utf8 = false + +[[test]] +name = "scratch" +regex = ['sam', 'samwise'] +haystack = "samwise" +matches = [ + { id = 0, span = [0, 3] }, +] +match-kind = "leftmost-first" +search-kind = "overlapping" diff --git a/vendor/regex/testdata/regex-lite.toml b/vendor/regex/testdata/regex-lite.toml new file mode 100644 index 00000000000000..1769d803d4e07c --- /dev/null +++ b/vendor/regex/testdata/regex-lite.toml @@ -0,0 +1,98 @@ +# These tests are specifically written to test the regex-lite crate. While it +# largely has the same semantics as the regex crate, there are some differences +# around Unicode support and UTF-8. +# +# To be clear, regex-lite supports far fewer patterns because of its lack of +# Unicode support, nested character classes and character class set operations. +# What we're talking about here are the patterns that both crates support but +# where the semantics might differ. + +# regex-lite uses ASCII definitions for Perl character classes. +[[test]] +name = "perl-class-decimal" +regex = '\d' +haystack = '᠕' +matches = [] +unicode = true + +# regex-lite uses ASCII definitions for Perl character classes. +[[test]] +name = "perl-class-space" +regex = '\s' +haystack = "\u2000" +matches = [] +unicode = true + +# regex-lite uses ASCII definitions for Perl character classes. +[[test]] +name = "perl-class-word" +regex = '\w' +haystack = 'δ' +matches = [] +unicode = true + +# regex-lite uses the ASCII definition of word for word boundary assertions. +[[test]] +name = "word-boundary" +regex = '\b' +haystack = 'δ' +matches = [] +unicode = true + +# regex-lite uses the ASCII definition of word for negated word boundary +# assertions. But note that it should still not split codepoints! +[[test]] +name = "word-boundary-negated" +regex = '\B' +haystack = 'δ' +matches = [[0, 0], [2, 2]] +unicode = true + +# While we're here, the empty regex---which matches at every +# position---shouldn't split a codepoint either. +[[test]] +name = "empty-no-split-codepoint" +regex = '' +haystack = '💩' +matches = [[0, 0], [4, 4]] +unicode = true + +# A dot always matches a full codepoint. +[[test]] +name = "dot-always-matches-codepoint" +regex = '.' +haystack = '💩' +matches = [[0, 4]] +unicode = false + +# A negated character class also always matches a full codepoint. +[[test]] +name = "negated-class-always-matches-codepoint" +regex = '[^a]' +haystack = '💩' +matches = [[0, 4]] +unicode = false + +# regex-lite only supports ASCII-aware case insensitive matching. +[[test]] +name = "case-insensitive-is-ascii-only" +regex = 's' +haystack = 'ſ' +matches = [] +unicode = true +case-insensitive = true + +# Negated word boundaries shouldn't split a codepoint, but they will match +# between invalid UTF-8. +# +# This test is only valid for a 'bytes' API, but that doesn't (yet) exist in +# regex-lite. This can't happen in the main API because &str can't contain +# invalid UTF-8. +# [[test]] +# name = "word-boundary-invalid-utf8" +# regex = '\B' +# haystack = '\xFF\xFF\xFF\xFF' +# unescape = true +# matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] +# unicode = true +# utf8 = false diff --git a/vendor/regex/testdata/regression.toml b/vendor/regex/testdata/regression.toml new file mode 100644 index 00000000000000..53b0701a3ceeb2 --- /dev/null +++ b/vendor/regex/testdata/regression.toml @@ -0,0 +1,830 @@ +# See: https://github.com/rust-lang/regex/issues/48 +[[test]] +name = "invalid-regex-no-crash-100" +regex = '(*)' +haystack = "" +matches = [] +compiles = false + +# See: https://github.com/rust-lang/regex/issues/48 +[[test]] +name = "invalid-regex-no-crash-200" +regex = '(?:?)' +haystack = "" +matches = [] +compiles = false + +# See: https://github.com/rust-lang/regex/issues/48 +[[test]] +name = "invalid-regex-no-crash-300" +regex = '(?)' +haystack = "" +matches = [] +compiles = false + +# See: https://github.com/rust-lang/regex/issues/48 +[[test]] +name = "invalid-regex-no-crash-400" +regex = '*' +haystack = "" +matches = [] +compiles = false + +# See: https://github.com/rust-lang/regex/issues/75 +[[test]] +name = "unsorted-binary-search-100" +regex = '(?i-u)[a_]+' +haystack = "A_" +matches = [[0, 2]] + +# See: https://github.com/rust-lang/regex/issues/75 +[[test]] +name = "unsorted-binary-search-200" +regex = '(?i-u)[A_]+' +haystack = "a_" +matches = [[0, 2]] + +# See: https://github.com/rust-lang/regex/issues/76 +[[test]] +name = "unicode-case-lower-nocase-flag" +regex = '(?i)\p{Ll}+' +haystack = "ΛΘΓΔα" +matches = [[0, 10]] + +# See: https://github.com/rust-lang/regex/issues/99 +[[test]] +name = "negated-char-class-100" +regex = '(?i)[^x]' +haystack = "x" +matches = [] + +# See: https://github.com/rust-lang/regex/issues/99 +[[test]] +name = "negated-char-class-200" +regex = '(?i)[^x]' +haystack = "X" +matches = [] + +# See: https://github.com/rust-lang/regex/issues/101 +[[test]] +name = "ascii-word-underscore" +regex = '[[:word:]]' +haystack = "_" +matches = [[0, 1]] + +# See: https://github.com/rust-lang/regex/issues/129 +[[test]] +name = "captures-repeat" +regex = '([a-f]){2}(?P<foo>[x-z])' +haystack = "abx" +matches = [ + [[0, 3], [1, 2], [2, 3]], +] + +# See: https://github.com/rust-lang/regex/issues/153 +[[test]] +name = "alt-in-alt-100" +regex = 'ab?|$' +haystack = "az" +matches = [[0, 1], [2, 2]] + +# See: https://github.com/rust-lang/regex/issues/153 +[[test]] +name = "alt-in-alt-200" +regex = '^(?:.*?)(?:\n|\r\n?|$)' +haystack = "ab\rcd" +matches = [[0, 3]] + +# See: https://github.com/rust-lang/regex/issues/169 +[[test]] +name = "leftmost-first-prefix" +regex = 'z*azb' +haystack = "azb" +matches = [[0, 3]] + +# See: https://github.com/rust-lang/regex/issues/191 +[[test]] +name = "many-alternates" +regex = '1|2|3|4|5|6|7|8|9|10|int' +haystack = "int" +matches = [[0, 3]] + +# See: https://github.com/rust-lang/regex/issues/204 +[[test]] +name = "word-boundary-alone-100" +regex = '\b' +haystack = "Should this (work?)" +matches = [[0, 0], [6, 6], [7, 7], [11, 11], [13, 13], [17, 17]] + +# See: https://github.com/rust-lang/regex/issues/204 +[[test]] +name = "word-boundary-alone-200" +regex = '\b' +haystack = "a b c" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] + +# See: https://github.com/rust-lang/regex/issues/264 +[[test]] +name = "word-boundary-ascii-no-capture" +regex = '\B' +haystack = "\U00028F3E" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] +unicode = false +utf8 = false + +# See: https://github.com/rust-lang/regex/issues/264 +[[test]] +name = "word-boundary-ascii-capture" +regex = '(?:\B)' +haystack = "\U00028F3E" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] +unicode = false +utf8 = false + +# See: https://github.com/rust-lang/regex/issues/268 +[[test]] +name = "partial-anchor" +regex = '^a|b' +haystack = "ba" +matches = [[0, 1]] + +# See: https://github.com/rust-lang/regex/issues/271 +[[test]] +name = "endl-or-word-boundary" +regex = '(?m:$)|(?-u:\b)' +haystack = "\U0006084E" +matches = [[4, 4]] + +# See: https://github.com/rust-lang/regex/issues/271 +[[test]] +name = "zero-or-end" +regex = '(?i-u:\x00)|$' +haystack = "\U000E682F" +matches = [[4, 4]] + +# See: https://github.com/rust-lang/regex/issues/271 +[[test]] +name = "y-or-endl" +regex = '(?i-u:y)|(?m:$)' +haystack = "\U000B4331" +matches = [[4, 4]] + +# See: https://github.com/rust-lang/regex/issues/271 +[[test]] +name = "word-boundary-start-x" +regex = '(?u:\b)^(?-u:X)' +haystack = "X" +matches = [[0, 1]] + +# See: https://github.com/rust-lang/regex/issues/271 +[[test]] +name = "word-boundary-ascii-start-x" +regex = '(?-u:\b)^(?-u:X)' +haystack = "X" +matches = [[0, 1]] + +# See: https://github.com/rust-lang/regex/issues/271 +[[test]] +name = "end-not-word-boundary" +regex = '$\B' +haystack = "\U0005C124\U000B576C" +matches = [[8, 8]] +unicode = false +utf8 = false + +# See: https://github.com/rust-lang/regex/issues/280 +[[test]] +name = "partial-anchor-alternate-begin" +regex = '^a|z' +haystack = "yyyyya" +matches = [] + +# See: https://github.com/rust-lang/regex/issues/280 +[[test]] +name = "partial-anchor-alternate-end" +regex = 'a$|z' +haystack = "ayyyyy" +matches = [] + +# See: https://github.com/rust-lang/regex/issues/289 +[[test]] +name = "lits-unambiguous-100" +regex = '(?:ABC|CDA|BC)X' +haystack = "CDAX" +matches = [[0, 4]] + +# See: https://github.com/rust-lang/regex/issues/291 +[[test]] +name = "lits-unambiguous-200" +regex = '((IMG|CAM|MG|MB2)_|(DSCN|CIMG))(?P<n>[0-9]+)$' +haystack = "CIMG2341" +matches = [ + [[0, 8], [0, 4], [], [0, 4], [4, 8]], +] + +# See: https://github.com/rust-lang/regex/issues/303 +# +# 2022-09-19: This has now been "properly" fixed in that empty character +# classes are fully supported as something that can never match. This test +# used to be marked as 'compiles = false', but now it works. +[[test]] +name = "negated-full-byte-range" +regex = '[^\x00-\xFF]' +haystack = "" +matches = [] +compiles = true +unicode = false +utf8 = false + +# See: https://github.com/rust-lang/regex/issues/321 +[[test]] +name = "strange-anchor-non-complete-prefix" +regex = 'a^{2}' +haystack = "" +matches = [] + +# See: https://github.com/rust-lang/regex/issues/321 +[[test]] +name = "strange-anchor-non-complete-suffix" +regex = '${2}a' +haystack = "" +matches = [] + +# See: https://github.com/rust-lang/regex/issues/334 +# See: https://github.com/rust-lang/regex/issues/557 +[[test]] +name = "captures-after-dfa-premature-end-100" +regex = 'a(b*(X|$))?' +haystack = "abcbX" +matches = [ + [[0, 1], [], []], +] + +# See: https://github.com/rust-lang/regex/issues/334 +# See: https://github.com/rust-lang/regex/issues/557 +[[test]] +name = "captures-after-dfa-premature-end-200" +regex = 'a(bc*(X|$))?' +haystack = "abcbX" +matches = [ + [[0, 1], [], []], +] + +# See: https://github.com/rust-lang/regex/issues/334 +# See: https://github.com/rust-lang/regex/issues/557 +[[test]] +name = "captures-after-dfa-premature-end-300" +regex = '(aa$)?' +haystack = "aaz" +matches = [ + [[0, 0], []], + [[1, 1], []], + [[2, 2], []], + [[3, 3], []], +] + +# Plucked from "Why aren’t regular expressions a lingua franca? an empirical +# study on the re-use and portability of regular expressions", The ACM Joint +# European Software Engineering Conference and Symposium on the Foundations of +# Software Engineering (ESEC/FSE), 2019. +# +# Link: https://dl.acm.org/doi/pdf/10.1145/3338906.3338909 +[[test]] +name = "captures-after-dfa-premature-end-400" +regex = '(a)\d*\.?\d+\b' +haystack = "a0.0c" +matches = [ + [[0, 2], [0, 1]], +] + +# See: https://github.com/rust-lang/regex/issues/437 +[[test]] +name = "literal-panic" +regex = 'typename type\-parameter\-[0-9]+\-[0-9]+::.+' +haystack = "test" +matches = [] + +# See: https://github.com/rust-lang/regex/issues/527 +[[test]] +name = "empty-flag-expr" +regex = '(?:(?:(?x)))' +haystack = "" +matches = [[0, 0]] + +# See: https://github.com/rust-lang/regex/issues/533 +#[[tests]] +#name = "blank-matches-nothing-between-space-and-tab" +#regex = '[[:blank:]]' +#input = '\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F' +#match = false +#unescape = true + +# See: https://github.com/rust-lang/regex/issues/533 +#[[tests]] +#name = "blank-matches-nothing-between-space-and-tab-inverted" +#regex = '^[[:^blank:]]+$' +#input = '\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F' +#match = true +#unescape = true + +# See: https://github.com/rust-lang/regex/issues/555 +[[test]] +name = "invalid-repetition" +regex = '(?m){1,1}' +haystack = "" +matches = [] +compiles = false + +# See: https://github.com/rust-lang/regex/issues/640 +[[test]] +name = "flags-are-unset" +regex = '(?:(?i)foo)|Bar' +haystack = "foo Foo bar Bar" +matches = [[0, 3], [4, 7], [12, 15]] + +# Note that 'Ј' is not 'j', but cyrillic Je +# https://en.wikipedia.org/wiki/Je_(Cyrillic) +# +# See: https://github.com/rust-lang/regex/issues/659 +[[test]] +name = "empty-group-with-unicode" +regex = '(?:)Ј01' +haystack = 'zЈ01' +matches = [[1, 5]] + +# See: https://github.com/rust-lang/regex/issues/579 +[[test]] +name = "word-boundary-weird" +regex = '\b..\b' +haystack = "I have 12, he has 2!" +matches = [[0, 2], [7, 9], [9, 11], [11, 13], [17, 19]] + +# See: https://github.com/rust-lang/regex/issues/579 +[[test]] +name = "word-boundary-weird-ascii" +regex = '\b..\b' +haystack = "I have 12, he has 2!" +matches = [[0, 2], [7, 9], [9, 11], [11, 13], [17, 19]] +unicode = false +utf8 = false + +# See: https://github.com/rust-lang/regex/issues/579 +[[test]] +name = "word-boundary-weird-minimal-ascii" +regex = '\b..\b' +haystack = "az,,b" +matches = [[0, 2], [2, 4]] +unicode = false +utf8 = false + +# See: https://github.com/BurntSushi/ripgrep/issues/1203 +[[test]] +name = "reverse-suffix-100" +regex = '[0-4][0-4][0-4]000' +haystack = "153.230000" +matches = [[4, 10]] + +# See: https://github.com/BurntSushi/ripgrep/issues/1203 +[[test]] +name = "reverse-suffix-200" +regex = '[0-9][0-9][0-9]000' +haystack = "153.230000\n" +matches = [[4, 10]] + +# This is a tricky case for the reverse suffix optimization, because it +# finds the 'foobar' match but the reverse scan must fail to find a match by +# correctly dealing with the word boundary following the 'foobar' literal when +# computing the start state. +# +# This test exists because I tried to break the following assumption that +# is currently in the code: that if a suffix is found and the reverse scan +# succeeds, then it's guaranteed that there is an overall match. Namely, the +# 'is_match' routine does *not* do another forward scan in this case because of +# this assumption. +[[test]] +name = "reverse-suffix-300" +regex = '\w+foobar\b' +haystack = "xyzfoobarZ" +matches = [] +unicode = false +utf8 = false + +# See: https://github.com/BurntSushi/ripgrep/issues/1247 +[[test]] +name = "stops" +regex = '\bs(?:[ab])' +haystack = 's\xE4' +matches = [] +unescape = true +utf8 = false + +# See: https://github.com/BurntSushi/ripgrep/issues/1247 +[[test]] +name = "stops-ascii" +regex = '(?-u:\b)s(?:[ab])' +haystack = 's\xE4' +matches = [] +unescape = true +utf8 = false + +# See: https://github.com/rust-lang/regex/issues/850 +[[test]] +name = "adjacent-line-boundary-100" +regex = '(?m)^(?:[^ ]+?)$' +haystack = "line1\nline2" +matches = [[0, 5], [6, 11]] + +# Continued. +[[test]] +name = "adjacent-line-boundary-200" +regex = '(?m)^(?:[^ ]+?)$' +haystack = "A\nB" +matches = [[0, 1], [2, 3]] + +# There is no issue for this bug. +[[test]] +name = "anchored-prefix-100" +regex = '^a[[:^space:]]' +haystack = "a " +matches = [] + +# There is no issue for this bug. +[[test]] +name = "anchored-prefix-200" +regex = '^a[[:^space:]]' +haystack = "foo boo a" +matches = [] + +# There is no issue for this bug. +[[test]] +name = "anchored-prefix-300" +regex = '^-[a-z]' +haystack = "r-f" +matches = [] + +# Tests that a possible Aho-Corasick optimization works correctly. It only +# kicks in when we have a lot of literals. By "works correctly," we mean that +# leftmost-first match semantics are properly respected. That is, samwise +# should match, not sam. +# +# There is no issue for this bug. +[[test]] +name = "aho-corasick-100" +regex = 'samwise|sam|a|b|c|d|e|f|g|h|i|j|k|l|m|n|o|p|q|r|s|t|u|v|w|x|y|z|A|B|C|D|E|F|G|H|I|J|K|L|M|N|O|P|Q|R|S|T|U|V|W|X|Y|Z' +haystack = "samwise" +matches = [[0, 7]] + +# See: https://github.com/rust-lang/regex/issues/921 +[[test]] +name = "interior-anchor-capture" +regex = '(a$)b$' +haystack = 'ab' +matches = [] + +# I found this bug in the course of adding some of the regexes that Ruff uses +# to rebar. It turns out that the lazy DFA was finding a match that was being +# rejected by the one-pass DFA. Yikes. I then minimized the regex and haystack. +# +# Source: https://github.com/charliermarsh/ruff/blob/a919041ddaa64cdf6f216f90dd0480dab69fd3ba/crates/ruff/src/rules/pycodestyle/rules/whitespace_around_keywords.rs#L52 +[[test]] +name = "ruff-whitespace-around-keywords" +regex = '^(a|ab)$' +haystack = "ab" +anchored = true +unicode = false +utf8 = true +matches = [[[0, 2], [0, 2]]] + +# From: https://github.com/rust-lang/regex/issues/429 +[[test]] +name = "i429-0" +regex = '(?:(?-u:\b)|(?u:h))+' +haystack = "h" +unicode = true +utf8 = false +matches = [[0, 0], [1, 1]] + +# From: https://github.com/rust-lang/regex/issues/429 +[[test]] +name = "i429-1" +regex = '(?u:\B)' +haystack = "鋸" +unicode = true +utf8 = false +matches = [] + +# From: https://github.com/rust-lang/regex/issues/429 +[[test]] +name = "i429-2" +regex = '(?:(?u:\b)|(?s-u:.))+' +haystack = "oB" +unicode = true +utf8 = false +matches = [[0, 0], [1, 2]] + +# From: https://github.com/rust-lang/regex/issues/429 +[[test]] +name = "i429-3" +regex = '(?:(?-u:\B)|(?su:.))+' +haystack = "\U000FEF80" +unicode = true +utf8 = false +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] + +# From: https://github.com/rust-lang/regex/issues/429 +[[test]] +name = "i429-3-utf8" +regex = '(?:(?-u:\B)|(?su:.))+' +haystack = "\U000FEF80" +unicode = true +utf8 = true +matches = [[0, 0], [4, 4]] + +# From: https://github.com/rust-lang/regex/issues/429 +[[test]] +name = "i429-4" +regex = '(?m:$)(?m:^)(?su:.)' +haystack = "\n‣" +unicode = true +utf8 = false +matches = [[0, 1]] + +# From: https://github.com/rust-lang/regex/issues/429 +[[test]] +name = "i429-5" +regex = '(?m:$)^(?m:^)' +haystack = "\n" +unicode = true +utf8 = false +matches = [[0, 0]] + +# From: https://github.com/rust-lang/regex/issues/429 +[[test]] +name = "i429-6" +regex = '(?P<kp>(?iu:do)(?m:$))*' +haystack = "dodo" +unicode = true +utf8 = false +matches = [ + [[0, 0], []], + [[1, 1], []], + [[2, 4], [2, 4]], +] + +# From: https://github.com/rust-lang/regex/issues/429 +[[test]] +name = "i429-7" +regex = '(?u:\B)' +haystack = "䡁" +unicode = true +utf8 = false +matches = [] + +# From: https://github.com/rust-lang/regex/issues/429 +[[test]] +name = "i429-8" +regex = '(?:(?-u:\b)|(?u:[\u{0}-W]))+' +haystack = "0" +unicode = true +utf8 = false +matches = [[0, 0], [1, 1]] + +# From: https://github.com/rust-lang/regex/issues/429 +[[test]] +name = "i429-9" +regex = '((?m:$)(?-u:\B)(?s-u:.)(?-u:\B)$)' +haystack = "\n\n" +unicode = true +utf8 = false +matches = [ + [[1, 2], [1, 2]], +] + +# From: https://github.com/rust-lang/regex/issues/429 +[[test]] +name = "i429-10" +regex = '(?m:$)(?m:$)^(?su:.)' +haystack = "\n\u0081¨\u200a" +unicode = true +utf8 = false +matches = [[0, 1]] + +# From: https://github.com/rust-lang/regex/issues/429 +[[test]] +name = "i429-11" +regex = '(?-u:\B)(?m:^)' +haystack = "0\n" +unicode = true +utf8 = false +matches = [[2, 2]] + +# From: https://github.com/rust-lang/regex/issues/429 +[[test]] +name = "i429-12" +regex = '(?:(?u:\b)|(?-u:.))+' +haystack = "0" +unicode = true +utf8 = false +matches = [[0, 0], [1, 1]] + +# From: https://github.com/rust-lang/regex/issues/969 +[[test]] +name = "i969" +regex = 'c.*d\z' +haystack = "ababcd" +bounds = [4, 6] +search-kind = "earliest" +matches = [[4, 6]] + +# I found this during the regex-automata migration. This is the fowler basic +# 154 test, but without anchored = true and without a match limit. +# +# This test caught a subtle bug in the hybrid reverse DFA search, where it +# would skip over the termination condition if it entered a start state. This +# was a double bug. Firstly, the reverse DFA shouldn't have had start states +# specialized in the first place, and thus it shouldn't have possible to detect +# that the DFA had entered a start state. The second bug was that the start +# state handling was incorrect by jumping over the termination condition. +[[test]] +name = "fowler-basic154-unanchored" +regex = '''a([bc]*)c*''' +haystack = '''abc''' +matches = [[[0, 3], [1, 3]]] + +# From: https://github.com/rust-lang/regex/issues/981 +# +# This was never really a problem in the new architecture because the +# regex-automata engines are far more principled about how they deal with +# look-around. (This was one of the many reasons I wanted to re-work the +# original regex crate engines.) +[[test]] +name = "word-boundary-interact-poorly-with-literal-optimizations" +regex = '(?i:(?:\b|_)win(?:32|64|dows)?(?:\b|_))' +haystack = 'ubi-Darwin-x86_64.tar.gz' +matches = [] + +# This was found during fuzz testing of regex. It provoked a panic in the meta +# engine as a result of the reverse suffix optimization. Namely, it hit a case +# where a suffix match was found, a corresponding reverse match was found, but +# the forward search turned up no match. The forward search should always match +# if the suffix and reverse search match. +# +# This in turn uncovered an inconsistency between the PikeVM and the DFA (lazy +# and fully compiled) engines. It was caused by a mishandling of the collection +# of NFA state IDs in the generic determinization code (which is why both types +# of DFA were impacted). Namely, when a fail state was encountered (that's the +# `[^\s\S]` in the pattern below), then it would just stop collecting states. +# But that's not correct since a later state could lead to a match. +[[test]] +name = "impossible-branch" +regex = '.*[^\s\S]A|B' +haystack = "B" +matches = [[0, 1]] + +# This was found during fuzz testing in regex-lite. The regex crate never +# suffered from this bug, but it causes regex-lite to incorrectly compile +# captures. +[[test]] +name = "captures-wrong-order" +regex = '(a){0}(a)' +haystack = 'a' +matches = [[[0, 1], [], [0, 1]]] + +# This tests a bug in how quit states are handled in the DFA. At some point +# during development, the DFAs were tweaked slightly such that if they hit +# a quit state (which means, they hit a byte that the caller configured should +# stop the search), then it might not return an error necessarily. Namely, if a +# match had already been found, then it would be returned instead of an error. +# +# But this is actually wrong! Why? Because even though a match had been found, +# it wouldn't be fully correct to return it once a quit state has been seen +# because you can't determine whether the match offset returned is the correct +# greedy/leftmost-first match. Since you can't complete the search as requested +# by the caller, the DFA should just stop and return an error. +# +# Interestingly, this does seem to produce an unavoidable difference between +# 'try_is_match().unwrap()' and 'try_find().unwrap().is_some()' for the DFAs. +# The former will stop immediately once a match is known to occur and return +# 'Ok(true)', where as the latter could find the match but quit with an +# 'Err(..)' first. +# +# Thankfully, I believe this inconsistency between 'is_match()' and 'find()' +# cannot be observed in the higher level meta regex API because it specifically +# will try another engine that won't fail in the case of a DFA failing. +# +# This regression happened in the regex crate rewrite, but before anything got +# released. +[[test]] +name = "negated-unicode-word-boundary-dfa-fail" +regex = '\B.*' +haystack = "!\u02D7" +matches = [[0, 3]] + +# This failure was found in the *old* regex crate (prior to regex 1.9), but +# I didn't investigate why. My best guess is that it's a literal optimization +# bug. It didn't occur in the rewrite. +[[test]] +name = "missed-match" +regex = 'e..+e.ee>' +haystack = 'Zeee.eZZZZZZZZeee>eeeeeee>' +matches = [[1, 26]] + +# This test came from the 'ignore' crate and tripped a bug in how accelerated +# DFA states were handled in an overlapping search. +[[test]] +name = "regex-to-glob" +regex = ['(?-u)^path1/[^/]*$'] +haystack = "path1/foo" +matches = [[0, 9]] +utf8 = false +match-kind = "all" +search-kind = "overlapping" + +# See: https://github.com/rust-lang/regex/issues/1060 +[[test]] +name = "reverse-inner-plus-shorter-than-expected" +regex = '(?:(\d+)[:.])?(\d{1,2})[:.](\d{2})' +haystack = '102:12:39' +matches = [[[0, 9], [0, 3], [4, 6], [7, 9]]] + +# Like reverse-inner-plus-shorter-than-expected, but using a far simpler regex +# to demonstrate the extent of the rot. Sigh. +# +# See: https://github.com/rust-lang/regex/issues/1060 +[[test]] +name = "reverse-inner-short" +regex = '(?:([0-9][0-9][0-9]):)?([0-9][0-9]):([0-9][0-9])' +haystack = '102:12:39' +matches = [[[0, 9], [0, 3], [4, 6], [7, 9]]] + +# This regression test was found via the RegexSet APIs. It triggered a +# particular code path where a regex was compiled with 'All' match semantics +# (to support overlapping search), but got funneled down into a standard +# leftmost search when calling 'is_match'. This is fine on its own, but the +# leftmost search will use a prefilter and that's where this went awry. +# +# Namely, since 'All' semantics were used, the aho-corasick prefilter was +# incorrectly compiled with 'Standard' semantics. This was wrong because +# 'Standard' immediately attempts to report a match at every position, even if +# that would mean reporting a match past the leftmost match before reporting +# the leftmost match. This breaks the prefilter contract of never having false +# negatives and leads overall to the engine not finding a match. +# +# See: https://github.com/rust-lang/regex/issues/1070 +[[test]] +name = "prefilter-with-aho-corasick-standard-semantics" +regex = '(?m)^ *v [0-9]' +haystack = 'v 0' +matches = [ + { id = 0, spans = [[0, 3]] }, +] +match-kind = "all" +search-kind = "overlapping" +unicode = true +utf8 = true + +# This tests that the PikeVM and the meta regex agree on a particular regex. +# This test previously failed when the ad hoc engines inside the meta engine +# did not handle quit states correctly. Namely, the Unicode word boundary here +# combined with a non-ASCII codepoint provokes the quit state. The ad hoc +# engines were previously returning a match even after entering the quit state +# if a match had been previously detected, but this is incorrect. The reason +# is that if a quit state is found, then the search must give up *immediately* +# because it prevents the search from finding the "proper" leftmost-first +# match. If it instead returns a match that has been found, it risks reporting +# an improper match, as it did in this case. +# +# See: https://github.com/rust-lang/regex/issues/1046 +[[test]] +name = "non-prefix-literal-quit-state" +regex = '.+\b\n' +haystack = "β77\n" +matches = [[0, 5]] + +# This is a regression test for some errant HIR interval set operations that +# were made in the regex-syntax 0.8.0 release and then reverted in 0.8.1. The +# issue here is that the HIR produced from the regex had out-of-order ranges. +# +# See: https://github.com/rust-lang/regex/issues/1103 +# Ref: https://github.com/rust-lang/regex/pull/1051 +# Ref: https://github.com/rust-lang/regex/pull/1102 +[[test]] +name = "hir-optimization-out-of-order-class" +regex = '^[[:alnum:]./-]+$' +haystack = "a-b" +matches = [[0, 3]] + +# This is a regression test for an improper reverse suffix optimization. This +# occurred when I "broadened" the applicability of the optimization to include +# multiple possible literal suffixes instead of only sticking to a non-empty +# longest common suffix. It turns out that, at least given how the reverse +# suffix optimization works, we need to stick to the longest common suffix for +# now. +# +# See: https://github.com/rust-lang/regex/issues/1110 +# See also: https://github.com/astral-sh/ruff/pull/7980 +[[test]] +name = 'improper-reverse-suffix-optimization' +regex = '(\\N\{[^}]+})|([{}])' +haystack = 'hiya \N{snowman} bye' +matches = [[[5, 16], [5, 16], []]] diff --git a/vendor/regex/testdata/set.toml b/vendor/regex/testdata/set.toml new file mode 100644 index 00000000000000..049e8a89d1bcb8 --- /dev/null +++ b/vendor/regex/testdata/set.toml @@ -0,0 +1,641 @@ +# Basic multi-regex tests. + +[[test]] +name = "basic10" +regex = ["a", "a"] +haystack = "a" +matches = [ + { id = 0, span = [0, 1] }, + { id = 1, span = [0, 1] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic10-leftmost-first" +regex = ["a", "a"] +haystack = "a" +matches = [ + { id = 0, span = [0, 1] }, +] +match-kind = "leftmost-first" +search-kind = "leftmost" + +[[test]] +name = "basic20" +regex = ["a", "a"] +haystack = "ba" +matches = [ + { id = 0, span = [1, 2] }, + { id = 1, span = [1, 2] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic30" +regex = ["a", "b"] +haystack = "a" +matches = [ + { id = 0, span = [0, 1] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic40" +regex = ["a", "b"] +haystack = "b" +matches = [ + { id = 1, span = [0, 1] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic50" +regex = ["a|b", "b|a"] +haystack = "b" +matches = [ + { id = 0, span = [0, 1] }, + { id = 1, span = [0, 1] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic60" +regex = ["foo", "oo"] +haystack = "foo" +matches = [ + { id = 0, span = [0, 3] }, + { id = 1, span = [1, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic60-leftmost-first" +regex = ["foo", "oo"] +haystack = "foo" +matches = [ + { id = 0, span = [0, 3] }, +] +match-kind = "leftmost-first" +search-kind = "leftmost" + +[[test]] +name = "basic61" +regex = ["oo", "foo"] +haystack = "foo" +matches = [ + { id = 1, span = [0, 3] }, + { id = 0, span = [1, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic61-leftmost-first" +regex = ["oo", "foo"] +haystack = "foo" +matches = [ + { id = 1, span = [0, 3] }, +] +match-kind = "leftmost-first" +search-kind = "leftmost" + +[[test]] +name = "basic70" +regex = ["abcd", "bcd", "cd", "d"] +haystack = "abcd" +matches = [ + { id = 0, span = [0, 4] }, + { id = 1, span = [1, 4] }, + { id = 2, span = [2, 4] }, + { id = 3, span = [3, 4] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic71" +regex = ["bcd", "cd", "d", "abcd"] +haystack = "abcd" +matches = [ + { id = 3, span = [0, 4] }, +] +match-kind = "leftmost-first" +search-kind = "leftmost" + +[[test]] +name = "basic80" +regex = ["^foo", "bar$"] +haystack = "foo" +matches = [ + { id = 0, span = [0, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic81" +regex = ["^foo", "bar$"] +haystack = "foo bar" +matches = [ + { id = 0, span = [0, 3] }, + { id = 1, span = [4, 7] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic82" +regex = ["^foo", "bar$"] +haystack = "bar" +matches = [ + { id = 1, span = [0, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic90" +regex = ["[a-z]+$", "foo"] +haystack = "01234 foo" +matches = [ + { id = 0, span = [8, 9] }, + { id = 0, span = [7, 9] }, + { id = 0, span = [6, 9] }, + { id = 1, span = [6, 9] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic91" +regex = ["[a-z]+$", "foo"] +haystack = "foo 01234" +matches = [ + { id = 1, span = [0, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic100" +regex = [".*?", "a"] +haystack = "zzza" +matches = [ + { id = 0, span = [0, 0] }, + { id = 0, span = [1, 1] }, + { id = 0, span = [0, 1] }, + { id = 0, span = [2, 2] }, + { id = 0, span = [1, 2] }, + { id = 0, span = [0, 2] }, + { id = 0, span = [3, 3] }, + { id = 0, span = [2, 3] }, + { id = 0, span = [1, 3] }, + { id = 0, span = [0, 3] }, + { id = 0, span = [4, 4] }, + { id = 0, span = [3, 4] }, + { id = 0, span = [2, 4] }, + { id = 0, span = [1, 4] }, + { id = 0, span = [0, 4] }, + { id = 1, span = [3, 4] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic101" +regex = [".*", "a"] +haystack = "zzza" +matches = [ + { id = 0, span = [0, 0] }, + { id = 0, span = [1, 1] }, + { id = 0, span = [0, 1] }, + { id = 0, span = [2, 2] }, + { id = 0, span = [1, 2] }, + { id = 0, span = [0, 2] }, + { id = 0, span = [3, 3] }, + { id = 0, span = [2, 3] }, + { id = 0, span = [1, 3] }, + { id = 0, span = [0, 3] }, + { id = 0, span = [4, 4] }, + { id = 0, span = [3, 4] }, + { id = 0, span = [2, 4] }, + { id = 0, span = [1, 4] }, + { id = 0, span = [0, 4] }, + { id = 1, span = [3, 4] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic102" +regex = [".*", "a"] +haystack = "zzz" +matches = [ + { id = 0, span = [0, 0] }, + { id = 0, span = [1, 1] }, + { id = 0, span = [0, 1] }, + { id = 0, span = [2, 2] }, + { id = 0, span = [1, 2] }, + { id = 0, span = [0, 2] }, + { id = 0, span = [3, 3] }, + { id = 0, span = [2, 3] }, + { id = 0, span = [1, 3] }, + { id = 0, span = [0, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic110" +regex = ['\ba\b'] +haystack = "hello a bye" +matches = [ + { id = 0, span = [6, 7] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic111" +regex = ['\ba\b', '\be\b'] +haystack = "hello a bye e" +matches = [ + { id = 0, span = [6, 7] }, + { id = 1, span = [12, 13] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic120" +regex = ["a"] +haystack = "a" +matches = [ + { id = 0, span = [0, 1] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic121" +regex = [".*a"] +haystack = "a" +matches = [ + { id = 0, span = [0, 1] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic122" +regex = [".*a", "β"] +haystack = "β" +matches = [ + { id = 1, span = [0, 2] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "basic130" +regex = ["ab", "b"] +haystack = "ba" +matches = [ + { id = 1, span = [0, 1] }, +] +match-kind = "all" +search-kind = "overlapping" + +# These test cases where one of the regexes matches the empty string. + +[[test]] +name = "empty10" +regex = ["", "a"] +haystack = "abc" +matches = [ + { id = 0, span = [0, 0] }, + { id = 1, span = [0, 1] }, + { id = 0, span = [1, 1] }, + { id = 0, span = [2, 2] }, + { id = 0, span = [3, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "empty10-leftmost-first" +regex = ["", "a"] +haystack = "abc" +matches = [ + { id = 0, span = [0, 0] }, + { id = 0, span = [1, 1] }, + { id = 0, span = [2, 2] }, + { id = 0, span = [3, 3] }, +] +match-kind = "leftmost-first" +search-kind = "leftmost" + +[[test]] +name = "empty11" +regex = ["a", ""] +haystack = "abc" +matches = [ + { id = 1, span = [0, 0] }, + { id = 0, span = [0, 1] }, + { id = 1, span = [1, 1] }, + { id = 1, span = [2, 2] }, + { id = 1, span = [3, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "empty11-leftmost-first" +regex = ["a", ""] +haystack = "abc" +matches = [ + { id = 0, span = [0, 1] }, + { id = 1, span = [2, 2] }, + { id = 1, span = [3, 3] }, +] +match-kind = "leftmost-first" +search-kind = "leftmost" + +[[test]] +name = "empty20" +regex = ["", "b"] +haystack = "abc" +matches = [ + { id = 0, span = [0, 0] }, + { id = 0, span = [1, 1] }, + { id = 1, span = [1, 2] }, + { id = 0, span = [2, 2] }, + { id = 0, span = [3, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "empty20-leftmost-first" +regex = ["", "b"] +haystack = "abc" +matches = [ + { id = 0, span = [0, 0] }, + { id = 0, span = [1, 1] }, + { id = 0, span = [2, 2] }, + { id = 0, span = [3, 3] }, +] +match-kind = "leftmost-first" +search-kind = "leftmost" + +[[test]] +name = "empty21" +regex = ["b", ""] +haystack = "abc" +matches = [ + { id = 1, span = [0, 0] }, + { id = 1, span = [1, 1] }, + { id = 0, span = [1, 2] }, + { id = 1, span = [2, 2] }, + { id = 1, span = [3, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "empty21-leftmost-first" +regex = ["b", ""] +haystack = "abc" +matches = [ + { id = 1, span = [0, 0] }, + { id = 0, span = [1, 2] }, + { id = 1, span = [3, 3] }, +] +match-kind = "leftmost-first" +search-kind = "leftmost" + +[[test]] +name = "empty22" +regex = ["(?:)", "b"] +haystack = "abc" +matches = [ + { id = 0, span = [0, 0] }, + { id = 0, span = [1, 1] }, + { id = 1, span = [1, 2] }, + { id = 0, span = [2, 2] }, + { id = 0, span = [3, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "empty23" +regex = ["b", "(?:)"] +haystack = "abc" +matches = [ + { id = 1, span = [0, 0] }, + { id = 1, span = [1, 1] }, + { id = 0, span = [1, 2] }, + { id = 1, span = [2, 2] }, + { id = 1, span = [3, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "empty30" +regex = ["", "z"] +haystack = "abc" +matches = [ + { id = 0, span = [0, 0] }, + { id = 0, span = [1, 1] }, + { id = 0, span = [2, 2] }, + { id = 0, span = [3, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "empty30-leftmost-first" +regex = ["", "z"] +haystack = "abc" +matches = [ + { id = 0, span = [0, 0] }, + { id = 0, span = [1, 1] }, + { id = 0, span = [2, 2] }, + { id = 0, span = [3, 3] }, +] +match-kind = "leftmost-first" +search-kind = "leftmost" + +[[test]] +name = "empty31" +regex = ["z", ""] +haystack = "abc" +matches = [ + { id = 1, span = [0, 0] }, + { id = 1, span = [1, 1] }, + { id = 1, span = [2, 2] }, + { id = 1, span = [3, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "empty31-leftmost-first" +regex = ["z", ""] +haystack = "abc" +matches = [ + { id = 1, span = [0, 0] }, + { id = 1, span = [1, 1] }, + { id = 1, span = [2, 2] }, + { id = 1, span = [3, 3] }, +] +match-kind = "leftmost-first" +search-kind = "leftmost" + +[[test]] +name = "empty40" +regex = ["c(?:)", "b"] +haystack = "abc" +matches = [ + { id = 1, span = [1, 2] }, + { id = 0, span = [2, 3] }, +] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "empty40-leftmost-first" +regex = ["c(?:)", "b"] +haystack = "abc" +matches = [ + { id = 1, span = [1, 2] }, + { id = 0, span = [2, 3] }, +] +match-kind = "leftmost-first" +search-kind = "leftmost" + +# These test cases where there are no matches. + +[[test]] +name = "nomatch10" +regex = ["a", "a"] +haystack = "b" +matches = [] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "nomatch20" +regex = ["^foo", "bar$"] +haystack = "bar foo" +matches = [] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "nomatch30" +regex = [] +haystack = "a" +matches = [] +match-kind = "all" +search-kind = "overlapping" + +[[test]] +name = "nomatch40" +regex = ["^rooted$", '\.log$'] +haystack = "notrooted" +matches = [] +match-kind = "all" +search-kind = "overlapping" + +# These test multi-regex searches with capture groups. +# +# NOTE: I wrote these tests in the course of developing a first class API for +# overlapping capturing group matches, but ultimately removed that API because +# the semantics for overlapping matches aren't totally clear. However, I've +# left the tests because I believe the semantics for these patterns are clear +# and because we can still test our "which patterns matched" APIs with them. + +[[test]] +name = "caps-010" +regex = ['^(\w+) (\w+)$', '^(\S+) (\S+)$'] +haystack = "Bruce Springsteen" +matches = [ + { id = 0, spans = [[0, 17], [0, 5], [6, 17]] }, + { id = 1, spans = [[0, 17], [0, 5], [6, 17]] }, +] +match-kind = "all" +search-kind = "overlapping" +unicode = false +utf8 = false + +[[test]] +name = "caps-020" +regex = ['^(\w+) (\w+)$', '^[A-Z](\S+) [A-Z](\S+)$'] +haystack = "Bruce Springsteen" +matches = [ + { id = 0, spans = [[0, 17], [0, 5], [6, 17]] }, + { id = 1, spans = [[0, 17], [1, 5], [7, 17]] }, +] +match-kind = "all" +search-kind = "overlapping" +unicode = false +utf8 = false + +[[test]] +name = "caps-030" +regex = ['^(\w+) (\w+)$', '^([A-Z])(\S+) ([A-Z])(\S+)$'] +haystack = "Bruce Springsteen" +matches = [ + { id = 0, spans = [[0, 17], [0, 5], [6, 17]] }, + { id = 1, spans = [[0, 17], [0, 1], [1, 5], [6, 7], [7, 17]] }, +] +match-kind = "all" +search-kind = "overlapping" +unicode = false +utf8 = false + +[[test]] +name = "caps-110" +regex = ['(\w+) (\w+)', '(\S+) (\S+)'] +haystack = "Bruce Springsteen" +matches = [ + { id = 0, spans = [[0, 17], [0, 5], [6, 17]] }, +] +match-kind = "leftmost-first" +search-kind = "leftmost" +unicode = false +utf8 = false + +[[test]] +name = "caps-120" +regex = ['(\w+) (\w+)', '(\S+) (\S+)'] +haystack = "&ruce $pringsteen" +matches = [ + { id = 1, spans = [[0, 17], [0, 5], [6, 17]] }, +] +match-kind = "leftmost-first" +search-kind = "leftmost" +unicode = false +utf8 = false + +[[test]] +name = "caps-121" +regex = ['(\w+) (\w+)', '(\S+) (\S+)'] +haystack = "&ruce $pringsteen Foo Bar" +matches = [ + { id = 1, spans = [[0, 17], [0, 5], [6, 17]] }, + { id = 0, spans = [[18, 25], [18, 21], [22, 25]] }, +] +match-kind = "leftmost-first" +search-kind = "leftmost" +unicode = false +utf8 = false diff --git a/vendor/regex/testdata/substring.toml b/vendor/regex/testdata/substring.toml new file mode 100644 index 00000000000000..69595ce851de32 --- /dev/null +++ b/vendor/regex/testdata/substring.toml @@ -0,0 +1,36 @@ +# These tests check that regex engines perform as expected when the search is +# instructed to only search a substring of a haystack instead of the entire +# haystack. This tends to exercise interesting edge cases that are otherwise +# difficult to provoke. (But not necessarily impossible. Regex search iterators +# for example, make use of the "search just a substring" APIs by changing the +# starting position of a search to the end position of the previous match.) + +[[test]] +name = "unicode-word-start" +regex = '\b[0-9]+\b' +haystack = "β123" +bounds = { start = 2, end = 5 } +matches = [] + +[[test]] +name = "unicode-word-end" +regex = '\b[0-9]+\b' +haystack = "123β" +bounds = { start = 0, end = 3 } +matches = [] + +[[test]] +name = "ascii-word-start" +regex = '\b[0-9]+\b' +haystack = "β123" +bounds = { start = 2, end = 5 } +matches = [[2, 5]] +unicode = false + +[[test]] +name = "ascii-word-end" +regex = '\b[0-9]+\b' +haystack = "123β" +bounds = { start = 0, end = 3 } +matches = [[0, 3]] +unicode = false diff --git a/vendor/regex/testdata/unicode.toml b/vendor/regex/testdata/unicode.toml new file mode 100644 index 00000000000000..f4ac76bae65c12 --- /dev/null +++ b/vendor/regex/testdata/unicode.toml @@ -0,0 +1,517 @@ +# Basic Unicode literal support. +[[test]] +name = "literal1" +regex = '☃' +haystack = "☃" +matches = [[0, 3]] + +[[test]] +name = "literal2" +regex = '☃+' +haystack = "☃" +matches = [[0, 3]] + +[[test]] +name = "literal3" +regex = '☃+' +haystack = "☃" +matches = [[0, 3]] +case-insensitive = true + +[[test]] +name = "literal4" +regex = 'Δ' +haystack = "δ" +matches = [[0, 2]] +case-insensitive = true + +# Unicode word boundaries. +[[test]] +name = "wb-100" +regex = '\d\b' +haystack = "6δ" +matches = [] + +[[test]] +name = "wb-200" +regex = '\d\b' +haystack = "6 " +matches = [[0, 1]] + +[[test]] +name = "wb-300" +regex = '\d\B' +haystack = "6δ" +matches = [[0, 1]] + +[[test]] +name = "wb-400" +regex = '\d\B' +haystack = "6 " +matches = [] + +# Unicode character class support. +[[test]] +name = "class1" +regex = '[☃Ⅰ]+' +haystack = "☃" +matches = [[0, 3]] + +[[test]] +name = "class2" +regex = '\pN' +haystack = "Ⅰ" +matches = [[0, 3]] + +[[test]] +name = "class3" +regex = '\pN+' +haystack = "Ⅰ1Ⅱ2" +matches = [[0, 8]] + +[[test]] +name = "class4" +regex = '\PN+' +haystack = "abⅠ" +matches = [[0, 2]] + +[[test]] +name = "class5" +regex = '[\PN]+' +haystack = "abⅠ" +matches = [[0, 2]] + +[[test]] +name = "class6" +regex = '[^\PN]+' +haystack = "abⅠ" +matches = [[2, 5]] + +[[test]] +name = "class7" +regex = '\p{Lu}+' +haystack = "ΛΘΓΔα" +matches = [[0, 8]] + +[[test]] +name = "class8" +regex = '\p{Lu}+' +haystack = "ΛΘΓΔα" +matches = [[0, 10]] +case-insensitive = true + +[[test]] +name = "class9" +regex = '\pL+' +haystack = "ΛΘΓΔα" +matches = [[0, 10]] + +[[test]] +name = "class10" +regex = '\p{Ll}+' +haystack = "ΛΘΓΔα" +matches = [[8, 10]] + +# Unicode aware "Perl" character classes. +[[test]] +name = "perl1" +regex = '\w+' +haystack = "dδd" +matches = [[0, 4]] + +[[test]] +name = "perl2" +regex = '\w+' +haystack = "⥡" +matches = [] + +[[test]] +name = "perl3" +regex = '\W+' +haystack = "⥡" +matches = [[0, 3]] + +[[test]] +name = "perl4" +regex = '\d+' +haystack = "1२३9" +matches = [[0, 8]] + +[[test]] +name = "perl5" +regex = '\d+' +haystack = "Ⅱ" +matches = [] + +[[test]] +name = "perl6" +regex = '\D+' +haystack = "Ⅱ" +matches = [[0, 3]] + +[[test]] +name = "perl7" +regex = '\s+' +haystack = " " +matches = [[0, 3]] + +[[test]] +name = "perl8" +regex = '\s+' +haystack = "☃" +matches = [] + +[[test]] +name = "perl9" +regex = '\S+' +haystack = "☃" +matches = [[0, 3]] + +# Specific tests for Unicode general category classes. +[[test]] +name = "class-gencat1" +regex = '\p{Cased_Letter}' +haystack = "A" +matches = [[0, 3]] + +[[test]] +name = "class-gencat2" +regex = '\p{Close_Punctuation}' +haystack = "❯" +matches = [[0, 3]] + +[[test]] +name = "class-gencat3" +regex = '\p{Connector_Punctuation}' +haystack = "⁀" +matches = [[0, 3]] + +[[test]] +name = "class-gencat4" +regex = '\p{Control}' +haystack = "\u009F" +matches = [[0, 2]] + +[[test]] +name = "class-gencat5" +regex = '\p{Currency_Symbol}' +haystack = "£" +matches = [[0, 3]] + +[[test]] +name = "class-gencat6" +regex = '\p{Dash_Punctuation}' +haystack = "〰" +matches = [[0, 3]] + +[[test]] +name = "class-gencat7" +regex = '\p{Decimal_Number}' +haystack = "𑓙" +matches = [[0, 4]] + +[[test]] +name = "class-gencat8" +regex = '\p{Enclosing_Mark}' +haystack = "\uA672" +matches = [[0, 3]] + +[[test]] +name = "class-gencat9" +regex = '\p{Final_Punctuation}' +haystack = "⸡" +matches = [[0, 3]] + +[[test]] +name = "class-gencat10" +regex = '\p{Format}' +haystack = "\U000E007F" +matches = [[0, 4]] + +[[test]] +name = "class-gencat11" +regex = '\p{Initial_Punctuation}' +haystack = "⸜" +matches = [[0, 3]] + +[[test]] +name = "class-gencat12" +regex = '\p{Letter}' +haystack = "Έ" +matches = [[0, 2]] + +[[test]] +name = "class-gencat13" +regex = '\p{Letter_Number}' +haystack = "ↂ" +matches = [[0, 3]] + +[[test]] +name = "class-gencat14" +regex = '\p{Line_Separator}' +haystack = "\u2028" +matches = [[0, 3]] + +[[test]] +name = "class-gencat15" +regex = '\p{Lowercase_Letter}' +haystack = "ϛ" +matches = [[0, 2]] + +[[test]] +name = "class-gencat16" +regex = '\p{Mark}' +haystack = "\U000E01EF" +matches = [[0, 4]] + +[[test]] +name = "class-gencat17" +regex = '\p{Math}' +haystack = "⋿" +matches = [[0, 3]] + +[[test]] +name = "class-gencat18" +regex = '\p{Modifier_Letter}' +haystack = "𖭃" +matches = [[0, 4]] + +[[test]] +name = "class-gencat19" +regex = '\p{Modifier_Symbol}' +haystack = "🏿" +matches = [[0, 4]] + +[[test]] +name = "class-gencat20" +regex = '\p{Nonspacing_Mark}' +haystack = "\U0001E94A" +matches = [[0, 4]] + +[[test]] +name = "class-gencat21" +regex = '\p{Number}' +haystack = "⓿" +matches = [[0, 3]] + +[[test]] +name = "class-gencat22" +regex = '\p{Open_Punctuation}' +haystack = "⦅" +matches = [[0, 3]] + +[[test]] +name = "class-gencat23" +regex = '\p{Other}' +haystack = "\u0BC9" +matches = [[0, 3]] + +[[test]] +name = "class-gencat24" +regex = '\p{Other_Letter}' +haystack = "ꓷ" +matches = [[0, 3]] + +[[test]] +name = "class-gencat25" +regex = '\p{Other_Number}' +haystack = "㉏" +matches = [[0, 3]] + +[[test]] +name = "class-gencat26" +regex = '\p{Other_Punctuation}' +haystack = "𞥞" +matches = [[0, 4]] + +[[test]] +name = "class-gencat27" +regex = '\p{Other_Symbol}' +haystack = "⅌" +matches = [[0, 3]] + +[[test]] +name = "class-gencat28" +regex = '\p{Paragraph_Separator}' +haystack = "\u2029" +matches = [[0, 3]] + +[[test]] +name = "class-gencat29" +regex = '\p{Private_Use}' +haystack = "\U0010FFFD" +matches = [[0, 4]] + +[[test]] +name = "class-gencat30" +regex = '\p{Punctuation}' +haystack = "𑁍" +matches = [[0, 4]] + +[[test]] +name = "class-gencat31" +regex = '\p{Separator}' +haystack = "\u3000" +matches = [[0, 3]] + +[[test]] +name = "class-gencat32" +regex = '\p{Space_Separator}' +haystack = "\u205F" +matches = [[0, 3]] + +[[test]] +name = "class-gencat33" +regex = '\p{Spacing_Mark}' +haystack = "\U00016F7E" +matches = [[0, 4]] + +[[test]] +name = "class-gencat34" +regex = '\p{Symbol}' +haystack = "⯈" +matches = [[0, 3]] + +[[test]] +name = "class-gencat35" +regex = '\p{Titlecase_Letter}' +haystack = "ῼ" +matches = [[0, 3]] + +[[test]] +name = "class-gencat36" +regex = '\p{Unassigned}' +haystack = "\U0010FFFF" +matches = [[0, 4]] + +[[test]] +name = "class-gencat37" +regex = '\p{Uppercase_Letter}' +haystack = "Ꝋ" +matches = [[0, 3]] + + +# Tests for Unicode emoji properties. +[[test]] +name = "class-emoji1" +regex = '\p{Emoji}' +haystack = "\u23E9" +matches = [[0, 3]] + +[[test]] +name = "class-emoji2" +regex = '\p{emoji}' +haystack = "\U0001F21A" +matches = [[0, 4]] + +[[test]] +name = "class-emoji3" +regex = '\p{extendedpictographic}' +haystack = "\U0001FA6E" +matches = [[0, 4]] + +[[test]] +name = "class-emoji4" +regex = '\p{extendedpictographic}' +haystack = "\U0001FFFD" +matches = [[0, 4]] + + +# Tests for Unicode grapheme cluster properties. +[[test]] +name = "class-gcb1" +regex = '\p{grapheme_cluster_break=prepend}' +haystack = "\U00011D46" +matches = [[0, 4]] + +[[test]] +name = "class-gcb2" +regex = '\p{gcb=regional_indicator}' +haystack = "\U0001F1E6" +matches = [[0, 4]] + +[[test]] +name = "class-gcb3" +regex = '\p{gcb=ri}' +haystack = "\U0001F1E7" +matches = [[0, 4]] + +[[test]] +name = "class-gcb4" +regex = '\p{regionalindicator}' +haystack = "\U0001F1FF" +matches = [[0, 4]] + +[[test]] +name = "class-gcb5" +regex = '\p{gcb=lvt}' +haystack = "\uC989" +matches = [[0, 3]] + +[[test]] +name = "class-gcb6" +regex = '\p{gcb=zwj}' +haystack = "\u200D" +matches = [[0, 3]] + +# Tests for Unicode word boundary properties. +[[test]] +name = "class-word-break1" +regex = '\p{word_break=Hebrew_Letter}' +haystack = "\uFB46" +matches = [[0, 3]] + +[[test]] +name = "class-word-break2" +regex = '\p{wb=hebrewletter}' +haystack = "\uFB46" +matches = [[0, 3]] + +[[test]] +name = "class-word-break3" +regex = '\p{wb=ExtendNumLet}' +haystack = "\uFF3F" +matches = [[0, 3]] + +[[test]] +name = "class-word-break4" +regex = '\p{wb=WSegSpace}' +haystack = "\u3000" +matches = [[0, 3]] + +[[test]] +name = "class-word-break5" +regex = '\p{wb=numeric}' +haystack = "\U0001E950" +matches = [[0, 4]] + +# Tests for Unicode sentence boundary properties. +[[test]] +name = "class-sentence-break1" +regex = '\p{sentence_break=Lower}' +haystack = "\u0469" +matches = [[0, 2]] + +[[test]] +name = "class-sentence-break2" +regex = '\p{sb=lower}' +haystack = "\u0469" +matches = [[0, 2]] + +[[test]] +name = "class-sentence-break3" +regex = '\p{sb=Close}' +haystack = "\uFF60" +matches = [[0, 3]] + +[[test]] +name = "class-sentence-break4" +regex = '\p{sb=Close}' +haystack = "\U0001F677" +matches = [[0, 4]] + +[[test]] +name = "class-sentence-break5" +regex = '\p{sb=SContinue}' +haystack = "\uFF64" +matches = [[0, 3]] diff --git a/vendor/regex/testdata/utf8.toml b/vendor/regex/testdata/utf8.toml new file mode 100644 index 00000000000000..39e284b3828039 --- /dev/null +++ b/vendor/regex/testdata/utf8.toml @@ -0,0 +1,399 @@ +# These test the UTF-8 modes expose by regex-automata. Namely, when utf8 is +# true, then we promise that the haystack is valid UTF-8. (Otherwise behavior +# is unspecified.) This also corresponds to building the regex engine with the +# following two guarantees: +# +# 1) For any non-empty match reported, its span is guaranteed to correspond to +# valid UTF-8. +# 2) All empty or zero-width matches reported must never split a UTF-8 +# encoded codepoint. If the haystack has invalid UTF-8, then this results in +# unspecified behavior. +# +# The (2) is in particular what we focus our testing on since (1) is generally +# guaranteed by regex-syntax's AST-to-HIR translator and is well tested there. +# The thing with (2) is that it can't be described in the HIR, so the regex +# engines have to handle that case. Thus, we test it here. +# +# Note that it is possible to build a regex that has property (1) but not +# (2), and vice versa. This is done by building the HIR with 'utf8=true' but +# building the Thompson NFA with 'utf8=false'. We don't test that here because +# the harness doesn't expose a way to enable or disable UTF-8 mode with that +# granularity. Instead, those combinations are lightly tested via doc examples. +# That's not to say that (1) without (2) is uncommon. Indeed, ripgrep uses it +# because it cannot guarantee that its haystack is valid UTF-8. + +# This tests that an empty regex doesn't split a codepoint. +[[test]] +name = "empty-utf8yes" +regex = '' +haystack = '☃' +matches = [[0, 0], [3, 3]] +unicode = true +utf8 = true + +# Tests the overlapping case of the above. +[[test]] +name = "empty-utf8yes-overlapping" +regex = '' +haystack = '☃' +matches = [[0, 0], [3, 3]] +unicode = true +utf8 = true +match-kind = "all" +search-kind = "overlapping" + +# This tests that an empty regex DOES split a codepoint when utf=false. +[[test]] +name = "empty-utf8no" +regex = '' +haystack = '☃' +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] +unicode = true +utf8 = false + +# Tests the overlapping case of the above. +[[test]] +name = "empty-utf8no-overlapping" +regex = '' +haystack = '☃' +matches = [[0, 0], [1, 1], [2, 2], [3, 3]] +unicode = true +utf8 = false +match-kind = "all" +search-kind = "overlapping" + +# This tests that an empty regex doesn't split a codepoint, even if we give +# it bounds entirely within the codepoint. +# +# This is one of the trickier cases and is what motivated the current UTF-8 +# mode design. In particular, at one point, this test failed the 'is_match' +# variant of the test but not 'find'. This is because the 'is_match' code path +# is specifically optimized for "was a match found" rather than "where is the +# match." In the former case, you don't really care about the empty-vs-non-empty +# matches, and thus, the codepoint splitting filtering logic wasn't getting +# applied. (In multiple ways across multiple regex engines.) In this way, you +# can wind up with a situation where 'is_match' says "yes," but 'find' says, +# "I didn't find anything." Which is... not great. +# +# I could have decided to say that providing boundaries that themselves split +# a codepoint would have unspecified behavior. But I couldn't quite convince +# myself that such boundaries were the only way to get an inconsistency between +# 'is_match' and 'find'. +# +# Note that I also tried to come up with a test like this that fails without +# using `bounds`. Specifically, a test where 'is_match' and 'find' disagree. +# But I couldn't do it, and I'm tempted to conclude it is impossible. The +# fundamental problem is that you need to simultaneously produce an empty match +# that splits a codepoint while *not* matching before or after the codepoint. +[[test]] +name = "empty-utf8yes-bounds" +regex = '' +haystack = '𝛃' +bounds = [1, 3] +matches = [] +unicode = true +utf8 = true + +# Tests the overlapping case of the above. +[[test]] +name = "empty-utf8yes-bounds-overlapping" +regex = '' +haystack = '𝛃' +bounds = [1, 3] +matches = [] +unicode = true +utf8 = true +match-kind = "all" +search-kind = "overlapping" + +# This tests that an empty regex splits a codepoint when the bounds are +# entirely within the codepoint. +[[test]] +name = "empty-utf8no-bounds" +regex = '' +haystack = '𝛃' +bounds = [1, 3] +matches = [[1, 1], [2, 2], [3, 3]] +unicode = true +utf8 = false + +# Tests the overlapping case of the above. +[[test]] +name = "empty-utf8no-bounds-overlapping" +regex = '' +haystack = '𝛃' +bounds = [1, 3] +matches = [[1, 1], [2, 2], [3, 3]] +unicode = true +utf8 = false +match-kind = "all" +search-kind = "overlapping" + +# In this test, we anchor the search. Since the start position is also a UTF-8 +# boundary, we get a match. +[[test]] +name = "empty-utf8yes-anchored" +regex = '' +haystack = '𝛃' +matches = [[0, 0]] +anchored = true +unicode = true +utf8 = true + +# Tests the overlapping case of the above. +[[test]] +name = "empty-utf8yes-anchored-overlapping" +regex = '' +haystack = '𝛃' +matches = [[0, 0]] +anchored = true +unicode = true +utf8 = true +match-kind = "all" +search-kind = "overlapping" + +# Same as above, except with UTF-8 mode disabled. It almost doesn't change the +# result, except for the fact that since this is an anchored search and we +# always find all matches, the test harness will keep reporting matches until +# none are found. Because it's anchored, matches will be reported so long as +# they are directly adjacent. Since with UTF-8 mode the next anchored search +# after the match at [0, 0] fails, iteration stops (and doesn't find the last +# match at [4, 4]). +[[test]] +name = "empty-utf8no-anchored" +regex = '' +haystack = '𝛃' +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] +anchored = true +unicode = true +utf8 = false + +# Tests the overlapping case of the above. +# +# Note that overlapping anchored searches are a little weird, and it's not +# totally clear what their semantics ought to be. For now, we just test the +# current behavior of our test shim that implements overlapping search. (This +# is one of the reasons why we don't really expose regex-level overlapping +# searches.) +[[test]] +name = "empty-utf8no-anchored-overlapping" +regex = '' +haystack = '𝛃' +matches = [[0, 0]] +anchored = true +unicode = true +utf8 = false +match-kind = "all" +search-kind = "overlapping" + +# In this test, we anchor the search, but also set bounds. The bounds start the +# search in the middle of a codepoint, so there should never be a match. +[[test]] +name = "empty-utf8yes-anchored-bounds" +regex = '' +haystack = '𝛃' +matches = [] +bounds = [1, 3] +anchored = true +unicode = true +utf8 = true + +# Tests the overlapping case of the above. +[[test]] +name = "empty-utf8yes-anchored-bounds-overlapping" +regex = '' +haystack = '𝛃' +matches = [] +bounds = [1, 3] +anchored = true +unicode = true +utf8 = true +match-kind = "all" +search-kind = "overlapping" + +# Same as above, except with UTF-8 mode disabled. Without UTF-8 mode enabled, +# matching within a codepoint is allowed. And remember, as in the anchored test +# above with UTF-8 mode disabled, iteration will report all adjacent matches. +# The matches at [0, 0] and [4, 4] are not included because of the bounds of +# the search. +[[test]] +name = "empty-utf8no-anchored-bounds" +regex = '' +haystack = '𝛃' +bounds = [1, 3] +matches = [[1, 1], [2, 2], [3, 3]] +anchored = true +unicode = true +utf8 = false + +# Tests the overlapping case of the above. +# +# Note that overlapping anchored searches are a little weird, and it's not +# totally clear what their semantics ought to be. For now, we just test the +# current behavior of our test shim that implements overlapping search. (This +# is one of the reasons why we don't really expose regex-level overlapping +# searches.) +[[test]] +name = "empty-utf8no-anchored-bounds-overlapping" +regex = '' +haystack = '𝛃' +bounds = [1, 3] +matches = [[1, 1]] +anchored = true +unicode = true +utf8 = false +match-kind = "all" +search-kind = "overlapping" + +# This tests that we find the match at the end of the string when the bounds +# exclude the first match. +[[test]] +name = "empty-utf8yes-startbound" +regex = '' +haystack = '𝛃' +bounds = [1, 4] +matches = [[4, 4]] +unicode = true +utf8 = true + +# Tests the overlapping case of the above. +[[test]] +name = "empty-utf8yes-startbound-overlapping" +regex = '' +haystack = '𝛃' +bounds = [1, 4] +matches = [[4, 4]] +unicode = true +utf8 = true +match-kind = "all" +search-kind = "overlapping" + +# Same as above, except since UTF-8 mode is disabled, we also find the matches +# inbetween that split the codepoint. +[[test]] +name = "empty-utf8no-startbound" +regex = '' +haystack = '𝛃' +bounds = [1, 4] +matches = [[1, 1], [2, 2], [3, 3], [4, 4]] +unicode = true +utf8 = false + +# Tests the overlapping case of the above. +[[test]] +name = "empty-utf8no-startbound-overlapping" +regex = '' +haystack = '𝛃' +bounds = [1, 4] +matches = [[1, 1], [2, 2], [3, 3], [4, 4]] +unicode = true +utf8 = false +match-kind = "all" +search-kind = "overlapping" + +# This tests that we don't find any matches in an anchored search, even when +# the bounds include a match (at the end). +[[test]] +name = "empty-utf8yes-anchored-startbound" +regex = '' +haystack = '𝛃' +bounds = [1, 4] +matches = [] +anchored = true +unicode = true +utf8 = true + +# Tests the overlapping case of the above. +[[test]] +name = "empty-utf8yes-anchored-startbound-overlapping" +regex = '' +haystack = '𝛃' +bounds = [1, 4] +matches = [] +anchored = true +unicode = true +utf8 = true +match-kind = "all" +search-kind = "overlapping" + +# Same as above, except since UTF-8 mode is disabled, we also find the matches +# inbetween that split the codepoint. Even though this is an anchored search, +# since the matches are adjacent, we find all of them. +[[test]] +name = "empty-utf8no-anchored-startbound" +regex = '' +haystack = '𝛃' +bounds = [1, 4] +matches = [[1, 1], [2, 2], [3, 3], [4, 4]] +anchored = true +unicode = true +utf8 = false + +# Tests the overlapping case of the above. +# +# Note that overlapping anchored searches are a little weird, and it's not +# totally clear what their semantics ought to be. For now, we just test the +# current behavior of our test shim that implements overlapping search. (This +# is one of the reasons why we don't really expose regex-level overlapping +# searches.) +[[test]] +name = "empty-utf8no-anchored-startbound-overlapping" +regex = '' +haystack = '𝛃' +bounds = [1, 4] +matches = [[1, 1]] +anchored = true +unicode = true +utf8 = false +match-kind = "all" +search-kind = "overlapping" + +# This tests that we find the match at the end of the haystack in UTF-8 mode +# when our bounds only include the empty string at the end of the haystack. +[[test]] +name = "empty-utf8yes-anchored-endbound" +regex = '' +haystack = '𝛃' +bounds = [4, 4] +matches = [[4, 4]] +anchored = true +unicode = true +utf8 = true + +# Tests the overlapping case of the above. +[[test]] +name = "empty-utf8yes-anchored-endbound-overlapping" +regex = '' +haystack = '𝛃' +bounds = [4, 4] +matches = [[4, 4]] +anchored = true +unicode = true +utf8 = true +match-kind = "all" +search-kind = "overlapping" + +# Same as above, but with UTF-8 mode disabled. Results remain the same since +# the only possible match does not split a codepoint. +[[test]] +name = "empty-utf8no-anchored-endbound" +regex = '' +haystack = '𝛃' +bounds = [4, 4] +matches = [[4, 4]] +anchored = true +unicode = true +utf8 = false + +# Tests the overlapping case of the above. +[[test]] +name = "empty-utf8no-anchored-endbound-overlapping" +regex = '' +haystack = '𝛃' +bounds = [4, 4] +matches = [[4, 4]] +anchored = true +unicode = true +utf8 = false +match-kind = "all" +search-kind = "overlapping" diff --git a/vendor/regex/testdata/word-boundary-special.toml b/vendor/regex/testdata/word-boundary-special.toml new file mode 100644 index 00000000000000..2b5a2a0acf9378 --- /dev/null +++ b/vendor/regex/testdata/word-boundary-special.toml @@ -0,0 +1,687 @@ +# These tests are for the "special" word boundary assertions. That is, +# \b{start}, \b{end}, \b{start-half}, \b{end-half}. These are specialty +# assertions for more niche use cases, but hitting those cases without these +# assertions is difficult. For example, \b{start-half} and \b{end-half} are +# used to implement the -w/--word-regexp flag in a grep program. + +# Tests for (?-u:\b{start}) + +[[test]] +name = "word-start-ascii-010" +regex = '\b{start}' +haystack = "a" +matches = [[0, 0]] +unicode = false + +[[test]] +name = "word-start-ascii-020" +regex = '\b{start}' +haystack = "a " +matches = [[0, 0]] +unicode = false + +[[test]] +name = "word-start-ascii-030" +regex = '\b{start}' +haystack = " a " +matches = [[1, 1]] +unicode = false + +[[test]] +name = "word-start-ascii-040" +regex = '\b{start}' +haystack = "" +matches = [] +unicode = false + +[[test]] +name = "word-start-ascii-050" +regex = '\b{start}' +haystack = "ab" +matches = [[0, 0]] +unicode = false + +[[test]] +name = "word-start-ascii-060" +regex = '\b{start}' +haystack = "𝛃" +matches = [] +unicode = false + +[[test]] +name = "word-start-ascii-060-bounds" +regex = '\b{start}' +haystack = "𝛃" +bounds = [2, 3] +matches = [] +unicode = false + +[[test]] +name = "word-start-ascii-070" +regex = '\b{start}' +haystack = " 𝛃 " +matches = [] +unicode = false + +[[test]] +name = "word-start-ascii-080" +regex = '\b{start}' +haystack = "𝛃𐆀" +matches = [] +unicode = false + +[[test]] +name = "word-start-ascii-090" +regex = '\b{start}' +haystack = "𝛃b" +matches = [[4, 4]] +unicode = false + +[[test]] +name = "word-start-ascii-110" +regex = '\b{start}' +haystack = "b𝛃" +matches = [[0, 0]] +unicode = false + +# Tests for (?-u:\b{end}) + +[[test]] +name = "word-end-ascii-010" +regex = '\b{end}' +haystack = "a" +matches = [[1, 1]] +unicode = false + +[[test]] +name = "word-end-ascii-020" +regex = '\b{end}' +haystack = "a " +matches = [[1, 1]] +unicode = false + +[[test]] +name = "word-end-ascii-030" +regex = '\b{end}' +haystack = " a " +matches = [[2, 2]] +unicode = false + +[[test]] +name = "word-end-ascii-040" +regex = '\b{end}' +haystack = "" +matches = [] +unicode = false + +[[test]] +name = "word-end-ascii-050" +regex = '\b{end}' +haystack = "ab" +matches = [[2, 2]] +unicode = false + +[[test]] +name = "word-end-ascii-060" +regex = '\b{end}' +haystack = "𝛃" +matches = [] +unicode = false + +[[test]] +name = "word-end-ascii-060-bounds" +regex = '\b{end}' +haystack = "𝛃" +bounds = [2, 3] +matches = [] +unicode = false + +[[test]] +name = "word-end-ascii-070" +regex = '\b{end}' +haystack = " 𝛃 " +matches = [] +unicode = false + +[[test]] +name = "word-end-ascii-080" +regex = '\b{end}' +haystack = "𝛃𐆀" +matches = [] +unicode = false + +[[test]] +name = "word-end-ascii-090" +regex = '\b{end}' +haystack = "𝛃b" +matches = [[5, 5]] +unicode = false + +[[test]] +name = "word-end-ascii-110" +regex = '\b{end}' +haystack = "b𝛃" +matches = [[1, 1]] +unicode = false + +# Tests for \b{start} + +[[test]] +name = "word-start-unicode-010" +regex = '\b{start}' +haystack = "a" +matches = [[0, 0]] +unicode = true + +[[test]] +name = "word-start-unicode-020" +regex = '\b{start}' +haystack = "a " +matches = [[0, 0]] +unicode = true + +[[test]] +name = "word-start-unicode-030" +regex = '\b{start}' +haystack = " a " +matches = [[1, 1]] +unicode = true + +[[test]] +name = "word-start-unicode-040" +regex = '\b{start}' +haystack = "" +matches = [] +unicode = true + +[[test]] +name = "word-start-unicode-050" +regex = '\b{start}' +haystack = "ab" +matches = [[0, 0]] +unicode = true + +[[test]] +name = "word-start-unicode-060" +regex = '\b{start}' +haystack = "𝛃" +matches = [[0, 0]] +unicode = true + +[[test]] +name = "word-start-unicode-060-bounds" +regex = '\b{start}' +haystack = "𝛃" +bounds = [2, 3] +matches = [] +unicode = true + +[[test]] +name = "word-start-unicode-070" +regex = '\b{start}' +haystack = " 𝛃 " +matches = [[1, 1]] +unicode = true + +[[test]] +name = "word-start-unicode-080" +regex = '\b{start}' +haystack = "𝛃𐆀" +matches = [[0, 0]] +unicode = true + +[[test]] +name = "word-start-unicode-090" +regex = '\b{start}' +haystack = "𝛃b" +matches = [[0, 0]] +unicode = true + +[[test]] +name = "word-start-unicode-110" +regex = '\b{start}' +haystack = "b𝛃" +matches = [[0, 0]] +unicode = true + +# Tests for \b{end} + +[[test]] +name = "word-end-unicode-010" +regex = '\b{end}' +haystack = "a" +matches = [[1, 1]] +unicode = true + +[[test]] +name = "word-end-unicode-020" +regex = '\b{end}' +haystack = "a " +matches = [[1, 1]] +unicode = true + +[[test]] +name = "word-end-unicode-030" +regex = '\b{end}' +haystack = " a " +matches = [[2, 2]] +unicode = true + +[[test]] +name = "word-end-unicode-040" +regex = '\b{end}' +haystack = "" +matches = [] +unicode = true + +[[test]] +name = "word-end-unicode-050" +regex = '\b{end}' +haystack = "ab" +matches = [[2, 2]] +unicode = true + +[[test]] +name = "word-end-unicode-060" +regex = '\b{end}' +haystack = "𝛃" +matches = [[4, 4]] +unicode = true + +[[test]] +name = "word-end-unicode-060-bounds" +regex = '\b{end}' +haystack = "𝛃" +bounds = [2, 3] +matches = [] +unicode = true + +[[test]] +name = "word-end-unicode-070" +regex = '\b{end}' +haystack = " 𝛃 " +matches = [[5, 5]] +unicode = true + +[[test]] +name = "word-end-unicode-080" +regex = '\b{end}' +haystack = "𝛃𐆀" +matches = [[4, 4]] +unicode = true + +[[test]] +name = "word-end-unicode-090" +regex = '\b{end}' +haystack = "𝛃b" +matches = [[5, 5]] +unicode = true + +[[test]] +name = "word-end-unicode-110" +regex = '\b{end}' +haystack = "b𝛃" +matches = [[5, 5]] +unicode = true + +# Tests for (?-u:\b{start-half}) + +[[test]] +name = "word-start-half-ascii-010" +regex = '\b{start-half}' +haystack = "a" +matches = [[0, 0]] +unicode = false + +[[test]] +name = "word-start-half-ascii-020" +regex = '\b{start-half}' +haystack = "a " +matches = [[0, 0], [2, 2]] +unicode = false + +[[test]] +name = "word-start-half-ascii-030" +regex = '\b{start-half}' +haystack = " a " +matches = [[0, 0], [1, 1], [3, 3]] +unicode = false + +[[test]] +name = "word-start-half-ascii-040" +regex = '\b{start-half}' +haystack = "" +matches = [[0, 0]] +unicode = false + +[[test]] +name = "word-start-half-ascii-050" +regex = '\b{start-half}' +haystack = "ab" +matches = [[0, 0]] +unicode = false + +[[test]] +name = "word-start-half-ascii-060" +regex = '\b{start-half}' +haystack = "𝛃" +matches = [[0, 0], [4, 4]] +unicode = false + +[[test]] +name = "word-start-half-ascii-060-noutf8" +regex = '\b{start-half}' +haystack = "𝛃" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] +unicode = false +utf8 = false + +[[test]] +name = "word-start-half-ascii-060-bounds" +regex = '\b{start-half}' +haystack = "𝛃" +bounds = [2, 3] +matches = [] +unicode = false + +[[test]] +name = "word-start-half-ascii-070" +regex = '\b{start-half}' +haystack = " 𝛃 " +matches = [[0, 0], [1, 1], [5, 5], [6, 6]] +unicode = false + +[[test]] +name = "word-start-half-ascii-080" +regex = '\b{start-half}' +haystack = "𝛃𐆀" +matches = [[0, 0], [4, 4], [8, 8]] +unicode = false + +[[test]] +name = "word-start-half-ascii-090" +regex = '\b{start-half}' +haystack = "𝛃b" +matches = [[0, 0], [4, 4]] +unicode = false + +[[test]] +name = "word-start-half-ascii-110" +regex = '\b{start-half}' +haystack = "b𝛃" +matches = [[0, 0], [5, 5]] +unicode = false + +# Tests for (?-u:\b{end-half}) + +[[test]] +name = "word-end-half-ascii-010" +regex = '\b{end-half}' +haystack = "a" +matches = [[1, 1]] +unicode = false + +[[test]] +name = "word-end-half-ascii-020" +regex = '\b{end-half}' +haystack = "a " +matches = [[1, 1], [2, 2]] +unicode = false + +[[test]] +name = "word-end-half-ascii-030" +regex = '\b{end-half}' +haystack = " a " +matches = [[0, 0], [2, 2], [3, 3]] +unicode = false + +[[test]] +name = "word-end-half-ascii-040" +regex = '\b{end-half}' +haystack = "" +matches = [[0, 0]] +unicode = false + +[[test]] +name = "word-end-half-ascii-050" +regex = '\b{end-half}' +haystack = "ab" +matches = [[2, 2]] +unicode = false + +[[test]] +name = "word-end-half-ascii-060" +regex = '\b{end-half}' +haystack = "𝛃" +matches = [[0, 0], [4, 4]] +unicode = false + +[[test]] +name = "word-end-half-ascii-060-bounds" +regex = '\b{end-half}' +haystack = "𝛃" +bounds = [2, 3] +matches = [] +unicode = false + +[[test]] +name = "word-end-half-ascii-070" +regex = '\b{end-half}' +haystack = " 𝛃 " +matches = [[0, 0], [1, 1], [5, 5], [6, 6]] +unicode = false + +[[test]] +name = "word-end-half-ascii-080" +regex = '\b{end-half}' +haystack = "𝛃𐆀" +matches = [[0, 0], [4, 4], [8, 8]] +unicode = false + +[[test]] +name = "word-end-half-ascii-090" +regex = '\b{end-half}' +haystack = "𝛃b" +matches = [[0, 0], [5, 5]] +unicode = false + +[[test]] +name = "word-end-half-ascii-110" +regex = '\b{end-half}' +haystack = "b𝛃" +matches = [[1, 1], [5, 5]] +unicode = false + +# Tests for \b{start-half} + +[[test]] +name = "word-start-half-unicode-010" +regex = '\b{start-half}' +haystack = "a" +matches = [[0, 0]] +unicode = true + +[[test]] +name = "word-start-half-unicode-020" +regex = '\b{start-half}' +haystack = "a " +matches = [[0, 0], [2, 2]] +unicode = true + +[[test]] +name = "word-start-half-unicode-030" +regex = '\b{start-half}' +haystack = " a " +matches = [[0, 0], [1, 1], [3, 3]] +unicode = true + +[[test]] +name = "word-start-half-unicode-040" +regex = '\b{start-half}' +haystack = "" +matches = [[0, 0]] +unicode = true + +[[test]] +name = "word-start-half-unicode-050" +regex = '\b{start-half}' +haystack = "ab" +matches = [[0, 0]] +unicode = true + +[[test]] +name = "word-start-half-unicode-060" +regex = '\b{start-half}' +haystack = "𝛃" +matches = [[0, 0]] +unicode = true + +[[test]] +name = "word-start-half-unicode-060-bounds" +regex = '\b{start-half}' +haystack = "𝛃" +bounds = [2, 3] +matches = [] +unicode = true + +[[test]] +name = "word-start-half-unicode-070" +regex = '\b{start-half}' +haystack = " 𝛃 " +matches = [[0, 0], [1, 1], [6, 6]] +unicode = true + +[[test]] +name = "word-start-half-unicode-080" +regex = '\b{start-half}' +haystack = "𝛃𐆀" +matches = [[0, 0], [8, 8]] +unicode = true + +[[test]] +name = "word-start-half-unicode-090" +regex = '\b{start-half}' +haystack = "𝛃b" +matches = [[0, 0]] +unicode = true + +[[test]] +name = "word-start-half-unicode-110" +regex = '\b{start-half}' +haystack = "b𝛃" +matches = [[0, 0]] +unicode = true + +# Tests for \b{end-half} + +[[test]] +name = "word-end-half-unicode-010" +regex = '\b{end-half}' +haystack = "a" +matches = [[1, 1]] +unicode = true + +[[test]] +name = "word-end-half-unicode-020" +regex = '\b{end-half}' +haystack = "a " +matches = [[1, 1], [2, 2]] +unicode = true + +[[test]] +name = "word-end-half-unicode-030" +regex = '\b{end-half}' +haystack = " a " +matches = [[0, 0], [2, 2], [3, 3]] +unicode = true + +[[test]] +name = "word-end-half-unicode-040" +regex = '\b{end-half}' +haystack = "" +matches = [[0, 0]] +unicode = true + +[[test]] +name = "word-end-half-unicode-050" +regex = '\b{end-half}' +haystack = "ab" +matches = [[2, 2]] +unicode = true + +[[test]] +name = "word-end-half-unicode-060" +regex = '\b{end-half}' +haystack = "𝛃" +matches = [[4, 4]] +unicode = true + +[[test]] +name = "word-end-half-unicode-060-bounds" +regex = '\b{end-half}' +haystack = "𝛃" +bounds = [2, 3] +matches = [] +unicode = true + +[[test]] +name = "word-end-half-unicode-070" +regex = '\b{end-half}' +haystack = " 𝛃 " +matches = [[0, 0], [5, 5], [6, 6]] +unicode = true + +[[test]] +name = "word-end-half-unicode-080" +regex = '\b{end-half}' +haystack = "𝛃𐆀" +matches = [[4, 4], [8, 8]] +unicode = true + +[[test]] +name = "word-end-half-unicode-090" +regex = '\b{end-half}' +haystack = "𝛃b" +matches = [[5, 5]] +unicode = true + +[[test]] +name = "word-end-half-unicode-110" +regex = '\b{end-half}' +haystack = "b𝛃" +matches = [[5, 5]] +unicode = true + +# Specialty tests. + +# Since \r is special cased in the start state computation (to deal with CRLF +# mode), this test ensures that the correct start state is computed when the +# pattern starts with a half word boundary assertion. +[[test]] +name = "word-start-half-ascii-carriage" +regex = '\b{start-half}[a-z]+' +haystack = 'ABC\rabc' +matches = [[4, 7]] +bounds = [4, 7] +unescape = true + +# Since \n is also special cased in the start state computation, this test +# ensures that the correct start state is computed when the pattern starts with +# a half word boundary assertion. +[[test]] +name = "word-start-half-ascii-linefeed" +regex = '\b{start-half}[a-z]+' +haystack = 'ABC\nabc' +matches = [[4, 7]] +bounds = [4, 7] +unescape = true + +# Like the carriage return test above, but with a custom line terminator. +[[test]] +name = "word-start-half-ascii-customlineterm" +regex = '\b{start-half}[a-z]+' +haystack = 'ABC!abc' +matches = [[4, 7]] +bounds = [4, 7] +unescape = true +line-terminator = '!' diff --git a/vendor/regex/testdata/word-boundary.toml b/vendor/regex/testdata/word-boundary.toml new file mode 100644 index 00000000000000..1d86fc9bb3b235 --- /dev/null +++ b/vendor/regex/testdata/word-boundary.toml @@ -0,0 +1,781 @@ +# Some of these are cribbed from RE2's test suite. + +# These test \b. Below are tests for \B. +[[test]] +name = "wb1" +regex = '\b' +haystack = "" +matches = [] +unicode = false + +[[test]] +name = "wb2" +regex = '\b' +haystack = "a" +matches = [[0, 0], [1, 1]] +unicode = false + +[[test]] +name = "wb3" +regex = '\b' +haystack = "ab" +matches = [[0, 0], [2, 2]] +unicode = false + +[[test]] +name = "wb4" +regex = '^\b' +haystack = "ab" +matches = [[0, 0]] +unicode = false + +[[test]] +name = "wb5" +regex = '\b$' +haystack = "ab" +matches = [[2, 2]] +unicode = false + +[[test]] +name = "wb6" +regex = '^\b$' +haystack = "ab" +matches = [] +unicode = false + +[[test]] +name = "wb7" +regex = '\bbar\b' +haystack = "nobar bar foo bar" +matches = [[6, 9], [14, 17]] +unicode = false + +[[test]] +name = "wb8" +regex = 'a\b' +haystack = "faoa x" +matches = [[3, 4]] +unicode = false + +[[test]] +name = "wb9" +regex = '\bbar' +haystack = "bar x" +matches = [[0, 3]] +unicode = false + +[[test]] +name = "wb10" +regex = '\bbar' +haystack = "foo\nbar x" +matches = [[4, 7]] +unicode = false + +[[test]] +name = "wb11" +regex = 'bar\b' +haystack = "foobar" +matches = [[3, 6]] +unicode = false + +[[test]] +name = "wb12" +regex = 'bar\b' +haystack = "foobar\nxxx" +matches = [[3, 6]] +unicode = false + +[[test]] +name = "wb13" +regex = '(?:foo|bar|[A-Z])\b' +haystack = "foo" +matches = [[0, 3]] +unicode = false + +[[test]] +name = "wb14" +regex = '(?:foo|bar|[A-Z])\b' +haystack = "foo\n" +matches = [[0, 3]] +unicode = false + +[[test]] +name = "wb15" +regex = '\b(?:foo|bar|[A-Z])' +haystack = "foo" +matches = [[0, 3]] +unicode = false + +[[test]] +name = "wb16" +regex = '\b(?:foo|bar|[A-Z])\b' +haystack = "X" +matches = [[0, 1]] +unicode = false + +[[test]] +name = "wb17" +regex = '\b(?:foo|bar|[A-Z])\b' +haystack = "XY" +matches = [] +unicode = false + +[[test]] +name = "wb18" +regex = '\b(?:foo|bar|[A-Z])\b' +haystack = "bar" +matches = [[0, 3]] +unicode = false + +[[test]] +name = "wb19" +regex = '\b(?:foo|bar|[A-Z])\b' +haystack = "foo" +matches = [[0, 3]] +unicode = false + +[[test]] +name = "wb20" +regex = '\b(?:foo|bar|[A-Z])\b' +haystack = "foo\n" +matches = [[0, 3]] +unicode = false + +[[test]] +name = "wb21" +regex = '\b(?:foo|bar|[A-Z])\b' +haystack = "ffoo bbar N x" +matches = [[10, 11]] +unicode = false + +[[test]] +name = "wb22" +regex = '\b(?:fo|foo)\b' +haystack = "fo" +matches = [[0, 2]] +unicode = false + +[[test]] +name = "wb23" +regex = '\b(?:fo|foo)\b' +haystack = "foo" +matches = [[0, 3]] +unicode = false + +[[test]] +name = "wb24" +regex = '\b\b' +haystack = "" +matches = [] +unicode = false + +[[test]] +name = "wb25" +regex = '\b\b' +haystack = "a" +matches = [[0, 0], [1, 1]] +unicode = false + +[[test]] +name = "wb26" +regex = '\b$' +haystack = "" +matches = [] +unicode = false + +[[test]] +name = "wb27" +regex = '\b$' +haystack = "x" +matches = [[1, 1]] +unicode = false + +[[test]] +name = "wb28" +regex = '\b$' +haystack = "y x" +matches = [[3, 3]] +unicode = false + +[[test]] +name = "wb29" +regex = '(?-u:\b).$' +haystack = "x" +matches = [[0, 1]] + +[[test]] +name = "wb30" +regex = '^\b(?:fo|foo)\b' +haystack = "fo" +matches = [[0, 2]] +unicode = false + +[[test]] +name = "wb31" +regex = '^\b(?:fo|foo)\b' +haystack = "foo" +matches = [[0, 3]] +unicode = false + +[[test]] +name = "wb32" +regex = '^\b$' +haystack = "" +matches = [] +unicode = false + +[[test]] +name = "wb33" +regex = '^\b$' +haystack = "x" +matches = [] +unicode = false + +[[test]] +name = "wb34" +regex = '^(?-u:\b).$' +haystack = "x" +matches = [[0, 1]] + +[[test]] +name = "wb35" +regex = '^(?-u:\b).(?-u:\b)$' +haystack = "x" +matches = [[0, 1]] + +[[test]] +name = "wb36" +regex = '^^^^^\b$$$$$' +haystack = "" +matches = [] +unicode = false + +[[test]] +name = "wb37" +regex = '^^^^^(?-u:\b).$$$$$' +haystack = "x" +matches = [[0, 1]] + +[[test]] +name = "wb38" +regex = '^^^^^\b$$$$$' +haystack = "x" +matches = [] +unicode = false + +[[test]] +name = "wb39" +regex = '^^^^^(?-u:\b\b\b).(?-u:\b\b\b)$$$$$' +haystack = "x" +matches = [[0, 1]] + +[[test]] +name = "wb40" +regex = '(?-u:\b).+(?-u:\b)' +haystack = "$$abc$$" +matches = [[2, 5]] + +[[test]] +name = "wb41" +regex = '\b' +haystack = "a b c" +matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] +unicode = false + +[[test]] +name = "wb42" +regex = '\bfoo\b' +haystack = "zzz foo zzz" +matches = [[4, 7]] +unicode = false + +[[test]] +name = "wb43" +regex = '\b^' +haystack = "ab" +matches = [[0, 0]] +unicode = false + +[[test]] +name = "wb44" +regex = '$\b' +haystack = "ab" +matches = [[2, 2]] +unicode = false + + +# Tests for \B. Note that \B is not allowed if UTF-8 mode is enabled, so we +# have to disable it for most of these tests. This is because \B can match at +# non-UTF-8 boundaries. +[[test]] +name = "nb1" +regex = '\Bfoo\B' +haystack = "n foo xfoox that" +matches = [[7, 10]] +unicode = false +utf8 = false + +[[test]] +name = "nb2" +regex = 'a\B' +haystack = "faoa x" +matches = [[1, 2]] +unicode = false +utf8 = false + +[[test]] +name = "nb3" +regex = '\Bbar' +haystack = "bar x" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb4" +regex = '\Bbar' +haystack = "foo\nbar x" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb5" +regex = 'bar\B' +haystack = "foobar" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb6" +regex = 'bar\B' +haystack = "foobar\nxxx" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb7" +regex = '(?:foo|bar|[A-Z])\B' +haystack = "foox" +matches = [[0, 3]] +unicode = false +utf8 = false + +[[test]] +name = "nb8" +regex = '(?:foo|bar|[A-Z])\B' +haystack = "foo\n" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb9" +regex = '\B' +haystack = "" +matches = [[0, 0]] +unicode = false +utf8 = false + +[[test]] +name = "nb10" +regex = '\B' +haystack = "x" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb11" +regex = '\B(?:foo|bar|[A-Z])' +haystack = "foo" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb12" +regex = '\B(?:foo|bar|[A-Z])\B' +haystack = "xXy" +matches = [[1, 2]] +unicode = false +utf8 = false + +[[test]] +name = "nb13" +regex = '\B(?:foo|bar|[A-Z])\B' +haystack = "XY" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb14" +regex = '\B(?:foo|bar|[A-Z])\B' +haystack = "XYZ" +matches = [[1, 2]] +unicode = false +utf8 = false + +[[test]] +name = "nb15" +regex = '\B(?:foo|bar|[A-Z])\B' +haystack = "abara" +matches = [[1, 4]] +unicode = false +utf8 = false + +[[test]] +name = "nb16" +regex = '\B(?:foo|bar|[A-Z])\B' +haystack = "xfoo_" +matches = [[1, 4]] +unicode = false +utf8 = false + +[[test]] +name = "nb17" +regex = '\B(?:foo|bar|[A-Z])\B' +haystack = "xfoo\n" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb18" +regex = '\B(?:foo|bar|[A-Z])\B' +haystack = "foo bar vNX" +matches = [[9, 10]] +unicode = false +utf8 = false + +[[test]] +name = "nb19" +regex = '\B(?:fo|foo)\B' +haystack = "xfoo" +matches = [[1, 3]] +unicode = false +utf8 = false + +[[test]] +name = "nb20" +regex = '\B(?:foo|fo)\B' +haystack = "xfooo" +matches = [[1, 4]] +unicode = false +utf8 = false + +[[test]] +name = "nb21" +regex = '\B\B' +haystack = "" +matches = [[0, 0]] +unicode = false +utf8 = false + +[[test]] +name = "nb22" +regex = '\B\B' +haystack = "x" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb23" +regex = '\B$' +haystack = "" +matches = [[0, 0]] +unicode = false +utf8 = false + +[[test]] +name = "nb24" +regex = '\B$' +haystack = "x" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb25" +regex = '\B$' +haystack = "y x" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb26" +regex = '\B.$' +haystack = "x" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb27" +regex = '^\B(?:fo|foo)\B' +haystack = "fo" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb28" +regex = '^\B(?:fo|foo)\B' +haystack = "fo" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb29" +regex = '^\B' +haystack = "" +matches = [[0, 0]] +unicode = false +utf8 = false + +[[test]] +name = "nb30" +regex = '^\B' +haystack = "x" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb31" +regex = '^\B\B' +haystack = "" +matches = [[0, 0]] +unicode = false +utf8 = false + +[[test]] +name = "nb32" +regex = '^\B\B' +haystack = "x" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb33" +regex = '^\B$' +haystack = "" +matches = [[0, 0]] +unicode = false +utf8 = false + +[[test]] +name = "nb34" +regex = '^\B$' +haystack = "x" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb35" +regex = '^\B.$' +haystack = "x" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb36" +regex = '^\B.\B$' +haystack = "x" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb37" +regex = '^^^^^\B$$$$$' +haystack = "" +matches = [[0, 0]] +unicode = false +utf8 = false + +[[test]] +name = "nb38" +regex = '^^^^^\B.$$$$$' +haystack = "x" +matches = [] +unicode = false +utf8 = false + +[[test]] +name = "nb39" +regex = '^^^^^\B$$$$$' +haystack = "x" +matches = [] +unicode = false +utf8 = false + + +# unicode1* and unicode2* work for both Unicode and ASCII because all matches +# are reported as byte offsets, and « and » do not correspond to word +# boundaries at either the character or byte level. +[[test]] +name = "unicode1" +regex = '\bx\b' +haystack = "«x" +matches = [[2, 3]] + +[[test]] +name = "unicode1-only-ascii" +regex = '\bx\b' +haystack = "«x" +matches = [[2, 3]] +unicode = false + +[[test]] +name = "unicode2" +regex = '\bx\b' +haystack = "x»" +matches = [[0, 1]] + +[[test]] +name = "unicode2-only-ascii" +regex = '\bx\b' +haystack = "x»" +matches = [[0, 1]] +unicode = false + +# ASCII word boundaries are completely oblivious to Unicode characters, so +# even though β is a character, an ASCII \b treats it as a word boundary +# when it is adjacent to another ASCII character. (The ASCII \b only looks +# at the leading byte of β.) For Unicode \b, the tests are precisely inverted. +[[test]] +name = "unicode3" +regex = '\bx\b' +haystack = 'áxβ' +matches = [] + +[[test]] +name = "unicode3-only-ascii" +regex = '\bx\b' +haystack = 'áxβ' +matches = [[2, 3]] +unicode = false + +[[test]] +name = "unicode4" +regex = '\Bx\B' +haystack = 'áxβ' +matches = [[2, 3]] + +[[test]] +name = "unicode4-only-ascii" +regex = '\Bx\B' +haystack = 'áxβ' +matches = [] +unicode = false +utf8 = false + +# The same as above, but with \b instead of \B as a sanity check. +[[test]] +name = "unicode5" +regex = '\b' +haystack = "0\U0007EF5E" +matches = [[0, 0], [1, 1]] + +[[test]] +name = "unicode5-only-ascii" +regex = '\b' +haystack = "0\U0007EF5E" +matches = [[0, 0], [1, 1]] +unicode = false +utf8 = false + +[[test]] +name = "unicode5-noutf8" +regex = '\b' +haystack = '0\xFF\xFF\xFF\xFF' +matches = [[0, 0], [1, 1]] +unescape = true +utf8 = false + +[[test]] +name = "unicode5-noutf8-only-ascii" +regex = '\b' +haystack = '0\xFF\xFF\xFF\xFF' +matches = [[0, 0], [1, 1]] +unescape = true +unicode = false +utf8 = false + +# Weird special case to ensure that ASCII \B treats each individual code unit +# as a non-word byte. (The specific codepoint is irrelevant. It's an arbitrary +# codepoint that uses 4 bytes in its UTF-8 encoding and is not a member of the +# \w character class.) +[[test]] +name = "unicode5-not" +regex = '\B' +haystack = "0\U0007EF5E" +matches = [[5, 5]] + +[[test]] +name = "unicode5-not-only-ascii" +regex = '\B' +haystack = "0\U0007EF5E" +matches = [[2, 2], [3, 3], [4, 4], [5, 5]] +unicode = false +utf8 = false + +# This gets no matches since \B only matches in the presence of valid UTF-8 +# when Unicode is enabled, even when UTF-8 mode is disabled. +[[test]] +name = "unicode5-not-noutf8" +regex = '\B' +haystack = '0\xFF\xFF\xFF\xFF' +matches = [] +unescape = true +utf8 = false + +# But this DOES get matches since \B in ASCII mode only looks at individual +# bytes. +[[test]] +name = "unicode5-not-noutf8-only-ascii" +regex = '\B' +haystack = '0\xFF\xFF\xFF\xFF' +matches = [[2, 2], [3, 3], [4, 4], [5, 5]] +unescape = true +unicode = false +utf8 = false + +# Some tests of no particular significance. +[[test]] +name = "unicode6" +regex = '\b[0-9]+\b' +haystack = "foo 123 bar 456 quux 789" +matches = [[4, 7], [12, 15], [21, 24]] + +[[test]] +name = "unicode7" +regex = '\b[0-9]+\b' +haystack = "foo 123 bar a456 quux 789" +matches = [[4, 7], [22, 25]] + +[[test]] +name = "unicode8" +regex = '\b[0-9]+\b' +haystack = "foo 123 bar 456a quux 789" +matches = [[4, 7], [22, 25]] + +# A variant of the problem described here: +# https://github.com/google/re2/blob/89567f5de5b23bb5ad0c26cbafc10bdc7389d1fa/re2/dfa.cc#L658-L667 +[[test]] +name = "alt-with-assertion-repetition" +regex = '(?:\b|%)+' +haystack = "z%" +bounds = [1, 2] +anchored = true +matches = [[1, 1]] diff --git a/vendor/regex/tests/lib.rs b/vendor/regex/tests/lib.rs new file mode 100644 index 00000000000000..b3f69423d955a0 --- /dev/null +++ b/vendor/regex/tests/lib.rs @@ -0,0 +1,58 @@ +#![cfg_attr(feature = "pattern", feature(pattern))] + +mod fuzz; +mod misc; +mod regression; +mod regression_fuzz; +mod replace; +#[cfg(feature = "pattern")] +mod searcher; +mod suite_bytes; +mod suite_bytes_set; +mod suite_string; +mod suite_string_set; + +const BLACKLIST: &[&str] = &[ + // Nothing to blacklist yet! +]; + +fn suite() -> anyhow::Result<regex_test::RegexTests> { + let _ = env_logger::try_init(); + + let mut tests = regex_test::RegexTests::new(); + macro_rules! load { + ($name:expr) => {{ + const DATA: &[u8] = + include_bytes!(concat!("../testdata/", $name, ".toml")); + tests.load_slice($name, DATA)?; + }}; + } + + load!("anchored"); + load!("bytes"); + load!("crazy"); + load!("crlf"); + load!("earliest"); + load!("empty"); + load!("expensive"); + load!("flags"); + load!("iter"); + load!("leftmost-all"); + load!("line-terminator"); + load!("misc"); + load!("multiline"); + load!("no-unicode"); + load!("overlapping"); + load!("regression"); + load!("set"); + load!("substring"); + load!("unicode"); + load!("utf8"); + load!("word-boundary"); + load!("word-boundary-special"); + load!("fowler/basic"); + load!("fowler/nullsubexpr"); + load!("fowler/repetition"); + + Ok(tests) +} diff --git a/vendor/regex/tests/misc.rs b/vendor/regex/tests/misc.rs new file mode 100644 index 00000000000000..c04c9c9fe2b367 --- /dev/null +++ b/vendor/regex/tests/misc.rs @@ -0,0 +1,143 @@ +use regex::Regex; + +macro_rules! regex { + ($pattern:expr) => { + regex::Regex::new($pattern).unwrap() + }; +} + +#[test] +fn unclosed_group_error() { + let err = Regex::new(r"(").unwrap_err(); + let msg = err.to_string(); + assert!(msg.contains("unclosed group"), "error message: {msg:?}"); +} + +#[test] +fn regex_string() { + assert_eq!(r"[a-zA-Z0-9]+", regex!(r"[a-zA-Z0-9]+").as_str()); + assert_eq!(r"[a-zA-Z0-9]+", &format!("{}", regex!(r"[a-zA-Z0-9]+"))); + assert_eq!( + r#"Regex("[a-zA-Z0-9]+")"#, + &format!("{:?}", regex!(r"[a-zA-Z0-9]+")) + ); +} + +#[test] +fn capture_names() { + let re = regex!(r"(.)(?P<a>.)"); + assert_eq!(3, re.captures_len()); + assert_eq!((3, Some(3)), re.capture_names().size_hint()); + assert_eq!( + vec![None, None, Some("a")], + re.capture_names().collect::<Vec<_>>() + ); +} + +#[test] +fn capture_index() { + let re = regex!(r"^(?P<name>.+)$"); + let cap = re.captures("abc").unwrap(); + assert_eq!(&cap[0], "abc"); + assert_eq!(&cap[1], "abc"); + assert_eq!(&cap["name"], "abc"); +} + +#[test] +#[should_panic] +fn capture_index_panic_usize() { + let re = regex!(r"^(?P<name>.+)$"); + let cap = re.captures("abc").unwrap(); + let _ = cap[2]; +} + +#[test] +#[should_panic] +fn capture_index_panic_name() { + let re = regex!(r"^(?P<name>.+)$"); + let cap = re.captures("abc").unwrap(); + let _ = cap["bad name"]; +} + +#[test] +fn capture_index_lifetime() { + // This is a test of whether the types on `caps["..."]` are general + // enough. If not, this will fail to typecheck. + fn inner(s: &str) -> usize { + let re = regex!(r"(?P<number>[0-9]+)"); + let caps = re.captures(s).unwrap(); + caps["number"].len() + } + assert_eq!(3, inner("123")); +} + +#[test] +fn capture_misc() { + let re = regex!(r"(.)(?P<a>a)?(.)(?P<b>.)"); + let cap = re.captures("abc").unwrap(); + + assert_eq!(5, cap.len()); + + assert_eq!((0, 3), { + let m = cap.get(0).unwrap(); + (m.start(), m.end()) + }); + assert_eq!(None, cap.get(2)); + assert_eq!((2, 3), { + let m = cap.get(4).unwrap(); + (m.start(), m.end()) + }); + + assert_eq!("abc", cap.get(0).unwrap().as_str()); + assert_eq!(None, cap.get(2)); + assert_eq!("c", cap.get(4).unwrap().as_str()); + + assert_eq!(None, cap.name("a")); + assert_eq!("c", cap.name("b").unwrap().as_str()); +} + +#[test] +fn sub_capture_matches() { + let re = regex!(r"([a-z])(([a-z])|([0-9]))"); + let cap = re.captures("a5").unwrap(); + let subs: Vec<_> = cap.iter().collect(); + + assert_eq!(5, subs.len()); + assert!(subs[0].is_some()); + assert!(subs[1].is_some()); + assert!(subs[2].is_some()); + assert!(subs[3].is_none()); + assert!(subs[4].is_some()); + + assert_eq!("a5", subs[0].unwrap().as_str()); + assert_eq!("a", subs[1].unwrap().as_str()); + assert_eq!("5", subs[2].unwrap().as_str()); + assert_eq!("5", subs[4].unwrap().as_str()); +} + +// Test that the DFA can handle pathological cases. (This should result in the +// DFA's cache being flushed too frequently, which should cause it to quit and +// fall back to the NFA algorithm.) +#[test] +fn dfa_handles_pathological_case() { + fn ones_and_zeroes(count: usize) -> String { + let mut s = String::new(); + for i in 0..count { + if i % 3 == 0 { + s.push('1'); + } else { + s.push('0'); + } + } + s + } + + let re = regex!(r"[01]*1[01]{20}$"); + let text = { + let mut pieces = ones_and_zeroes(100_000); + pieces.push('1'); + pieces.push_str(&ones_and_zeroes(20)); + pieces + }; + assert!(re.is_match(&text)); +} diff --git a/vendor/regex/tests/regression.rs b/vendor/regex/tests/regression.rs new file mode 100644 index 00000000000000..a5867016b211cc --- /dev/null +++ b/vendor/regex/tests/regression.rs @@ -0,0 +1,94 @@ +use regex::Regex; + +macro_rules! regex { + ($pattern:expr) => { + regex::Regex::new($pattern).unwrap() + }; +} + +// See: https://github.com/rust-lang/regex/issues/48 +#[test] +fn invalid_regexes_no_crash() { + assert!(Regex::new("(*)").is_err()); + assert!(Regex::new("(?:?)").is_err()); + assert!(Regex::new("(?)").is_err()); + assert!(Regex::new("*").is_err()); +} + +// See: https://github.com/rust-lang/regex/issues/98 +#[test] +fn regression_many_repeat_stack_overflow() { + let re = regex!("^.{1,2500}"); + assert_eq!( + vec![0..1], + re.find_iter("a").map(|m| m.range()).collect::<Vec<_>>() + ); +} + +// See: https://github.com/rust-lang/regex/issues/555 +#[test] +fn regression_invalid_repetition_expr() { + assert!(Regex::new("(?m){1,1}").is_err()); +} + +// See: https://github.com/rust-lang/regex/issues/527 +#[test] +fn regression_invalid_flags_expression() { + assert!(Regex::new("(((?x)))").is_ok()); +} + +// See: https://github.com/rust-lang/regex/issues/129 +#[test] +fn regression_captures_rep() { + let re = regex!(r"([a-f]){2}(?P<foo>[x-z])"); + let caps = re.captures("abx").unwrap(); + assert_eq!(&caps["foo"], "x"); +} + +// See: https://github.com/BurntSushi/ripgrep/issues/1247 +#[cfg(feature = "unicode-perl")] +#[test] +fn regression_nfa_stops1() { + let re = regex::bytes::Regex::new(r"\bs(?:[ab])").unwrap(); + assert_eq!(0, re.find_iter(b"s\xE4").count()); +} + +// See: https://github.com/rust-lang/regex/issues/981 +#[cfg(feature = "unicode")] +#[test] +fn regression_bad_word_boundary() { + let re = regex!(r#"(?i:(?:\b|_)win(?:32|64|dows)?(?:\b|_))"#); + let hay = "ubi-Darwin-x86_64.tar.gz"; + assert!(!re.is_match(hay)); + let hay = "ubi-Windows-x86_64.zip"; + assert!(re.is_match(hay)); +} + +// See: https://github.com/rust-lang/regex/issues/982 +#[cfg(feature = "unicode-perl")] +#[test] +fn regression_unicode_perl_not_enabled() { + let pat = r"(\d+\s?(years|year|y))?\s?(\d+\s?(months|month|m))?\s?(\d+\s?(weeks|week|w))?\s?(\d+\s?(days|day|d))?\s?(\d+\s?(hours|hour|h))?"; + assert!(Regex::new(pat).is_ok()); +} + +// See: https://github.com/rust-lang/regex/issues/995 +#[test] +fn regression_big_regex_overflow() { + let pat = r" {2147483516}{2147483416}{5}"; + assert!(Regex::new(pat).is_err()); +} + +// See: https://github.com/rust-lang/regex/issues/999 +#[test] +fn regression_complete_literals_suffix_incorrect() { + let needles = vec![ + "aA", "bA", "cA", "dA", "eA", "fA", "gA", "hA", "iA", "jA", "kA", + "lA", "mA", "nA", "oA", "pA", "qA", "rA", "sA", "tA", "uA", "vA", + "wA", "xA", "yA", "zA", + ]; + let pattern = needles.join("|"); + let re = regex!(&pattern); + let hay = "FUBAR"; + assert_eq!(0, re.find_iter(hay).count()); +} diff --git a/vendor/regex/tests/regression_fuzz.rs b/vendor/regex/tests/regression_fuzz.rs new file mode 100644 index 00000000000000..f90ad4cb20d685 --- /dev/null +++ b/vendor/regex/tests/regression_fuzz.rs @@ -0,0 +1,61 @@ +// These tests are only run for the "default" test target because some of them +// can take quite a long time. Some of them take long enough that it's not +// practical to run them in debug mode. :-/ + +use regex::Regex; + +macro_rules! regex { + ($pattern:expr) => { + regex::Regex::new($pattern).unwrap() + }; +} + +// See: https://oss-fuzz.com/testcase-detail/5673225499181056 +// +// Ignored by default since it takes too long in debug mode (almost a minute). +#[test] +#[ignore] +fn fuzz1() { + regex!(r"1}{55}{0}*{1}{55}{55}{5}*{1}{55}+{56}|;**"); +} + +// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26505 +// See: https://github.com/rust-lang/regex/issues/722 +#[test] +#[cfg(feature = "unicode")] +fn empty_any_errors_no_panic() { + assert!(Regex::new(r"\P{any}").is_ok()); +} + +// This tests that a very large regex errors during compilation instead of +// using gratuitous amounts of memory. The specific problem is that the +// compiler wasn't accounting for the memory used by Unicode character classes +// correctly. +// +// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=33579 +#[test] +fn big_regex_fails_to_compile() { + let pat = "[\u{0}\u{e}\u{2}\\w~~>[l\t\u{0}]p?<]{971158}"; + assert!(Regex::new(pat).is_err()); +} + +// This was caught while on master but before a release went out(!). +// +// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=58173 +#[test] +fn todo() { + let pat = "(?:z|xx)@|xx"; + assert!(Regex::new(pat).is_ok()); +} + +// This was caused by the fuzzer, and then minimized by hand. +// +// This was caused by a bug in DFA determinization that mishandled NFA fail +// states. +#[test] +fn fail_branch_prevents_match() { + let pat = r".*[a&&b]A|B"; + let hay = "B"; + let re = Regex::new(pat).unwrap(); + assert!(re.is_match(hay)); +} diff --git a/vendor/regex/tests/replace.rs b/vendor/regex/tests/replace.rs new file mode 100644 index 00000000000000..f26ae46030bad2 --- /dev/null +++ b/vendor/regex/tests/replace.rs @@ -0,0 +1,183 @@ +macro_rules! replace( + ($name:ident, $which:ident, $re:expr, + $search:expr, $replace:expr, $result:expr) => ( + #[test] + fn $name() { + let re = regex::Regex::new($re).unwrap(); + assert_eq!(re.$which($search, $replace), $result); + } + ); +); + +replace!(first, replace, r"[0-9]", "age: 26", "Z", "age: Z6"); +replace!(plus, replace, r"[0-9]+", "age: 26", "Z", "age: Z"); +replace!(all, replace_all, r"[0-9]", "age: 26", "Z", "age: ZZ"); +replace!(groups, replace, r"([^ ]+)[ ]+([^ ]+)", "w1 w2", "$2 $1", "w2 w1"); +replace!( + double_dollar, + replace, + r"([^ ]+)[ ]+([^ ]+)", + "w1 w2", + "$2 $$1", + "w2 $1" +); +// replace!(adjacent_index, replace, +// r"([^aeiouy])ies$", "skies", "$1y", "sky"); +replace!( + named, + replace_all, + r"(?P<first>[^ ]+)[ ]+(?P<last>[^ ]+)(?P<space>[ ]*)", + "w1 w2 w3 w4", + "$last $first$space", + "w2 w1 w4 w3" +); +replace!( + trim, + replace_all, + "^[ \t]+|[ \t]+$", + " \t trim me\t \t", + "", + "trim me" +); +replace!(number_hyphen, replace, r"(.)(.)", "ab", "$1-$2", "a-b"); +// replace!(number_underscore, replace, r"(.)(.)", "ab", "$1_$2", "a_b"); +replace!( + simple_expand, + replace_all, + r"([a-z]) ([a-z])", + "a b", + "$2 $1", + "b a" +); +replace!( + literal_dollar1, + replace_all, + r"([a-z]+) ([a-z]+)", + "a b", + "$$1", + "$1" +); +replace!( + literal_dollar2, + replace_all, + r"([a-z]+) ([a-z]+)", + "a b", + "$2 $$c $1", + "b $c a" +); +replace!( + no_expand1, + replace, + r"([^ ]+)[ ]+([^ ]+)", + "w1 w2", + regex::NoExpand("$2 $1"), + "$2 $1" +); +replace!( + no_expand2, + replace, + r"([^ ]+)[ ]+([^ ]+)", + "w1 w2", + regex::NoExpand("$$1"), + "$$1" +); +replace!( + closure_returning_reference, + replace, + r"([0-9]+)", + "age: 26", + |captures: ®ex::Captures<'_>| { captures[1][0..1].to_owned() }, + "age: 2" +); +replace!( + closure_returning_value, + replace, + r"[0-9]+", + "age: 26", + |_captures: ®ex::Captures<'_>| "Z".to_owned(), + "age: Z" +); + +// See https://github.com/rust-lang/regex/issues/314 +replace!( + match_at_start_replace_with_empty, + replace_all, + r"foo", + "foobar", + "", + "bar" +); + +// See https://github.com/rust-lang/regex/issues/393 +replace!(single_empty_match, replace, r"^", "bar", "foo", "foobar"); + +// See https://github.com/rust-lang/regex/issues/399 +replace!( + capture_longest_possible_name, + replace_all, + r"(.)", + "b", + "${1}a $1a", + "ba " +); + +replace!( + impl_string, + replace, + r"[0-9]", + "age: 26", + "Z".to_string(), + "age: Z6" +); +replace!( + impl_string_ref, + replace, + r"[0-9]", + "age: 26", + &"Z".to_string(), + "age: Z6" +); +replace!( + impl_cow_str_borrowed, + replace, + r"[0-9]", + "age: 26", + std::borrow::Cow::<'_, str>::Borrowed("Z"), + "age: Z6" +); +replace!( + impl_cow_str_borrowed_ref, + replace, + r"[0-9]", + "age: 26", + &std::borrow::Cow::<'_, str>::Borrowed("Z"), + "age: Z6" +); +replace!( + impl_cow_str_owned, + replace, + r"[0-9]", + "age: 26", + std::borrow::Cow::<'_, str>::Owned("Z".to_string()), + "age: Z6" +); +replace!( + impl_cow_str_owned_ref, + replace, + r"[0-9]", + "age: 26", + &std::borrow::Cow::<'_, str>::Owned("Z".to_string()), + "age: Z6" +); + +#[test] +fn replacen_no_captures() { + let re = regex::Regex::new(r"[0-9]").unwrap(); + assert_eq!(re.replacen("age: 1234", 2, "Z"), "age: ZZ34"); +} + +#[test] +fn replacen_with_captures() { + let re = regex::Regex::new(r"([0-9])").unwrap(); + assert_eq!(re.replacen("age: 1234", 2, "${1}Z"), "age: 1Z2Z34"); +} diff --git a/vendor/regex/tests/searcher.rs b/vendor/regex/tests/searcher.rs new file mode 100644 index 00000000000000..f6dae13105ffe9 --- /dev/null +++ b/vendor/regex/tests/searcher.rs @@ -0,0 +1,93 @@ +macro_rules! searcher { + ($name:ident, $re:expr, $haystack:expr) => ( + searcher!($name, $re, $haystack, vec vec![]); + ); + ($name:ident, $re:expr, $haystack:expr, $($steps:expr,)*) => ( + searcher!($name, $re, $haystack, vec vec![$($steps),*]); + ); + ($name:ident, $re:expr, $haystack:expr, $($steps:expr),*) => ( + searcher!($name, $re, $haystack, vec vec![$($steps),*]); + ); + ($name:ident, $re:expr, $haystack:expr, vec $expect_steps:expr) => ( + #[test] + #[allow(unused_imports)] + fn $name() { + use std::str::pattern::{Pattern, Searcher}; + use std::str::pattern::SearchStep::{Match, Reject, Done}; + let re = regex::Regex::new($re).unwrap(); + let mut se = re.into_searcher($haystack); + let mut got_steps = vec![]; + loop { + match se.next() { + Done => break, + step => { got_steps.push(step); } + } + } + assert_eq!(got_steps, $expect_steps); + } + ); +} + +searcher!(searcher_empty_regex_empty_haystack, r"", "", Match(0, 0)); +searcher!( + searcher_empty_regex, + r"", + "ab", + Match(0, 0), + Reject(0, 1), + Match(1, 1), + Reject(1, 2), + Match(2, 2) +); +searcher!(searcher_empty_haystack, r"\d", ""); +searcher!(searcher_one_match, r"\d", "5", Match(0, 1)); +searcher!(searcher_no_match, r"\d", "a", Reject(0, 1)); +searcher!( + searcher_two_adjacent_matches, + r"\d", + "56", + Match(0, 1), + Match(1, 2) +); +searcher!( + searcher_two_non_adjacent_matches, + r"\d", + "5a6", + Match(0, 1), + Reject(1, 2), + Match(2, 3) +); +searcher!(searcher_reject_first, r"\d", "a6", Reject(0, 1), Match(1, 2)); +searcher!( + searcher_one_zero_length_matches, + r"\d*", + "a1b2", + Match(0, 0), // ^ + Reject(0, 1), // a + Match(1, 2), // a1 + Reject(2, 3), // a1b + Match(3, 4), // a1b2 +); +searcher!( + searcher_many_zero_length_matches, + r"\d*", + "a1bbb2", + Match(0, 0), // ^ + Reject(0, 1), // a + Match(1, 2), // a1 + Reject(2, 3), // a1b + Match(3, 3), // a1bb + Reject(3, 4), // a1bb + Match(4, 4), // a1bbb + Reject(4, 5), // a1bbb + Match(5, 6), // a1bbba +); +searcher!( + searcher_unicode, + r".+?", + "Ⅰ1Ⅱ2", + Match(0, 3), + Match(3, 4), + Match(4, 7), + Match(7, 8) +); diff --git a/vendor/regex/tests/suite_bytes.rs b/vendor/regex/tests/suite_bytes.rs new file mode 100644 index 00000000000000..784b1a47adcbba --- /dev/null +++ b/vendor/regex/tests/suite_bytes.rs @@ -0,0 +1,108 @@ +use { + anyhow::Result, + regex::bytes::{Regex, RegexBuilder}, + regex_test::{ + CompiledRegex, Match, RegexTest, Span, TestResult, TestRunner, + }, +}; + +/// Tests the default configuration of the hybrid NFA/DFA. +#[test] +fn default() -> Result<()> { + let mut runner = TestRunner::new()?; + runner + .expand(&["is_match", "find", "captures"], |test| test.compiles()) + .blacklist_iter(super::BLACKLIST) + .test_iter(crate::suite()?.iter(), compiler) + .assert(); + Ok(()) +} + +fn run_test(re: &Regex, test: &RegexTest) -> TestResult { + match test.additional_name() { + "is_match" => TestResult::matched(re.is_match(test.haystack())), + "find" => TestResult::matches( + re.find_iter(test.haystack()) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|m| Match { + id: 0, + span: Span { start: m.start(), end: m.end() }, + }), + ), + "captures" => { + let it = re + .captures_iter(test.haystack()) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|caps| testify_captures(&caps)); + TestResult::captures(it) + } + name => TestResult::fail(&format!("unrecognized test name: {name}")), + } +} + +/// Converts the given regex test to a closure that searches with a +/// `bytes::Regex`. If the test configuration is unsupported, then a +/// `CompiledRegex` that skips the test is returned. +fn compiler( + test: &RegexTest, + _patterns: &[String], +) -> anyhow::Result<CompiledRegex> { + let skip = Ok(CompiledRegex::skip()); + + // We're only testing bytes::Regex here, which supports one pattern only. + let pattern = match test.regexes().len() { + 1 => &test.regexes()[0], + _ => return skip, + }; + // We only test is_match, find_iter and captures_iter. All of those are + // leftmost searches. + if !matches!(test.search_kind(), regex_test::SearchKind::Leftmost) { + return skip; + } + // The top-level single-pattern regex API always uses leftmost-first. + if !matches!(test.match_kind(), regex_test::MatchKind::LeftmostFirst) { + return skip; + } + // The top-level regex API always runs unanchored searches. ... But we can + // handle tests that are anchored but have only one match. + if test.anchored() && test.match_limit() != Some(1) { + return skip; + } + // We don't support tests with explicit search bounds. We could probably + // support this by using the 'find_at' (and such) APIs. + let bounds = test.bounds(); + if !(bounds.start == 0 && bounds.end == test.haystack().len()) { + return skip; + } + // The bytes::Regex API specifically does not support enabling UTF-8 mode. + // It could I suppose, but currently it does not. That is, it permits + // matches to have offsets that split codepoints. + if test.utf8() { + return skip; + } + // If the test requires Unicode but the Unicode feature isn't enabled, + // skip it. This is a little aggressive, but the test suite doesn't + // have any easy way of communicating which Unicode features are needed. + if test.unicode() && !cfg!(feature = "unicode") { + return skip; + } + let re = RegexBuilder::new(pattern) + .case_insensitive(test.case_insensitive()) + .unicode(test.unicode()) + .line_terminator(test.line_terminator()) + .build()?; + Ok(CompiledRegex::compiled(move |test| run_test(&re, test))) +} + +/// Convert `Captures` into the test suite's capture values. +fn testify_captures( + caps: ®ex::bytes::Captures<'_>, +) -> regex_test::Captures { + let spans = caps.iter().map(|group| { + group.map(|m| regex_test::Span { start: m.start(), end: m.end() }) + }); + // This unwrap is OK because we assume our 'caps' represents a match, and + // a match always gives a non-zero number of groups with the first group + // being non-None. + regex_test::Captures::new(0, spans).unwrap() +} diff --git a/vendor/regex/tests/suite_bytes_set.rs b/vendor/regex/tests/suite_bytes_set.rs new file mode 100644 index 00000000000000..9b75f8da1fedae --- /dev/null +++ b/vendor/regex/tests/suite_bytes_set.rs @@ -0,0 +1,71 @@ +use { + anyhow::Result, + regex::bytes::{RegexSet, RegexSetBuilder}, + regex_test::{CompiledRegex, RegexTest, TestResult, TestRunner}, +}; + +/// Tests the default configuration of the hybrid NFA/DFA. +#[test] +fn default() -> Result<()> { + let mut runner = TestRunner::new()?; + runner + .expand(&["is_match", "which"], |test| test.compiles()) + .blacklist_iter(super::BLACKLIST) + .test_iter(crate::suite()?.iter(), compiler) + .assert(); + Ok(()) +} + +fn run_test(re: &RegexSet, test: &RegexTest) -> TestResult { + match test.additional_name() { + "is_match" => TestResult::matched(re.is_match(test.haystack())), + "which" => TestResult::which(re.matches(test.haystack()).iter()), + name => TestResult::fail(&format!("unrecognized test name: {name}")), + } +} + +/// Converts the given regex test to a closure that searches with a +/// `bytes::Regex`. If the test configuration is unsupported, then a +/// `CompiledRegex` that skips the test is returned. +fn compiler( + test: &RegexTest, + _patterns: &[String], +) -> anyhow::Result<CompiledRegex> { + let skip = Ok(CompiledRegex::skip()); + + // The top-level RegexSet API only supports "overlapping" semantics. + if !matches!(test.search_kind(), regex_test::SearchKind::Overlapping) { + return skip; + } + // The top-level RegexSet API only supports "all" semantics. + if !matches!(test.match_kind(), regex_test::MatchKind::All) { + return skip; + } + // The top-level RegexSet API always runs unanchored searches. + if test.anchored() { + return skip; + } + // We don't support tests with explicit search bounds. + let bounds = test.bounds(); + if !(bounds.start == 0 && bounds.end == test.haystack().len()) { + return skip; + } + // The bytes::Regex API specifically does not support enabling UTF-8 mode. + // It could I suppose, but currently it does not. That is, it permits + // matches to have offsets that split codepoints. + if test.utf8() { + return skip; + } + // If the test requires Unicode but the Unicode feature isn't enabled, + // skip it. This is a little aggressive, but the test suite doesn't + // have any easy way of communicating which Unicode features are needed. + if test.unicode() && !cfg!(feature = "unicode") { + return skip; + } + let re = RegexSetBuilder::new(test.regexes()) + .case_insensitive(test.case_insensitive()) + .unicode(test.unicode()) + .line_terminator(test.line_terminator()) + .build()?; + Ok(CompiledRegex::compiled(move |test| run_test(&re, test))) +} diff --git a/vendor/regex/tests/suite_string.rs b/vendor/regex/tests/suite_string.rs new file mode 100644 index 00000000000000..2a6d7709be7521 --- /dev/null +++ b/vendor/regex/tests/suite_string.rs @@ -0,0 +1,113 @@ +use { + anyhow::Result, + regex::{Regex, RegexBuilder}, + regex_test::{ + CompiledRegex, Match, RegexTest, Span, TestResult, TestRunner, + }, +}; + +/// Tests the default configuration of the hybrid NFA/DFA. +#[test] +fn default() -> Result<()> { + let mut runner = TestRunner::new()?; + runner + .expand(&["is_match", "find", "captures"], |test| test.compiles()) + .blacklist_iter(super::BLACKLIST) + .test_iter(crate::suite()?.iter(), compiler) + .assert(); + Ok(()) +} + +fn run_test(re: &Regex, test: &RegexTest) -> TestResult { + let hay = match std::str::from_utf8(test.haystack()) { + Ok(hay) => hay, + Err(err) => { + return TestResult::fail(&format!( + "haystack is not valid UTF-8: {err}", + )); + } + }; + match test.additional_name() { + "is_match" => TestResult::matched(re.is_match(hay)), + "find" => TestResult::matches( + re.find_iter(hay) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|m| Match { + id: 0, + span: Span { start: m.start(), end: m.end() }, + }), + ), + "captures" => { + let it = re + .captures_iter(hay) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|caps| testify_captures(&caps)); + TestResult::captures(it) + } + name => TestResult::fail(&format!("unrecognized test name: {name}")), + } +} + +/// Converts the given regex test to a closure that searches with a +/// `bytes::Regex`. If the test configuration is unsupported, then a +/// `CompiledRegex` that skips the test is returned. +fn compiler( + test: &RegexTest, + _patterns: &[String], +) -> anyhow::Result<CompiledRegex> { + let skip = Ok(CompiledRegex::skip()); + + // We're only testing bytes::Regex here, which supports one pattern only. + let pattern = match test.regexes().len() { + 1 => &test.regexes()[0], + _ => return skip, + }; + // We only test is_match, find_iter and captures_iter. All of those are + // leftmost searches. + if !matches!(test.search_kind(), regex_test::SearchKind::Leftmost) { + return skip; + } + // The top-level single-pattern regex API always uses leftmost-first. + if !matches!(test.match_kind(), regex_test::MatchKind::LeftmostFirst) { + return skip; + } + // The top-level regex API always runs unanchored searches. ... But we can + // handle tests that are anchored but have only one match. + if test.anchored() && test.match_limit() != Some(1) { + return skip; + } + // We don't support tests with explicit search bounds. We could probably + // support this by using the 'find_at' (and such) APIs. + let bounds = test.bounds(); + if !(bounds.start == 0 && bounds.end == test.haystack().len()) { + return skip; + } + // The Regex API specifically does not support disabling UTF-8 mode because + // it can only search &str which is always valid UTF-8. + if !test.utf8() { + return skip; + } + // If the test requires Unicode but the Unicode feature isn't enabled, + // skip it. This is a little aggressive, but the test suite doesn't + // have any easy way of communicating which Unicode features are needed. + if test.unicode() && !cfg!(feature = "unicode") { + return skip; + } + let re = RegexBuilder::new(pattern) + .case_insensitive(test.case_insensitive()) + .unicode(test.unicode()) + .line_terminator(test.line_terminator()) + .build()?; + Ok(CompiledRegex::compiled(move |test| run_test(&re, test))) +} + +/// Convert `Captures` into the test suite's capture values. +fn testify_captures(caps: ®ex::Captures<'_>) -> regex_test::Captures { + let spans = caps.iter().map(|group| { + group.map(|m| regex_test::Span { start: m.start(), end: m.end() }) + }); + // This unwrap is OK because we assume our 'caps' represents a match, and + // a match always gives a non-zero number of groups with the first group + // being non-None. + regex_test::Captures::new(0, spans).unwrap() +} diff --git a/vendor/regex/tests/suite_string_set.rs b/vendor/regex/tests/suite_string_set.rs new file mode 100644 index 00000000000000..122e39c75e908b --- /dev/null +++ b/vendor/regex/tests/suite_string_set.rs @@ -0,0 +1,78 @@ +use { + anyhow::Result, + regex::{RegexSet, RegexSetBuilder}, + regex_test::{CompiledRegex, RegexTest, TestResult, TestRunner}, +}; + +/// Tests the default configuration of the hybrid NFA/DFA. +#[test] +fn default() -> Result<()> { + let mut runner = TestRunner::new()?; + runner + .expand(&["is_match", "which"], |test| test.compiles()) + .blacklist_iter(super::BLACKLIST) + .test_iter(crate::suite()?.iter(), compiler) + .assert(); + Ok(()) +} + +fn run_test(re: &RegexSet, test: &RegexTest) -> TestResult { + let hay = match std::str::from_utf8(test.haystack()) { + Ok(hay) => hay, + Err(err) => { + return TestResult::fail(&format!( + "haystack is not valid UTF-8: {err}", + )); + } + }; + match test.additional_name() { + "is_match" => TestResult::matched(re.is_match(hay)), + "which" => TestResult::which(re.matches(hay).iter()), + name => TestResult::fail(&format!("unrecognized test name: {name}")), + } +} + +/// Converts the given regex test to a closure that searches with a +/// `bytes::Regex`. If the test configuration is unsupported, then a +/// `CompiledRegex` that skips the test is returned. +fn compiler( + test: &RegexTest, + _patterns: &[String], +) -> anyhow::Result<CompiledRegex> { + let skip = Ok(CompiledRegex::skip()); + + // The top-level RegexSet API only supports "overlapping" semantics. + if !matches!(test.search_kind(), regex_test::SearchKind::Overlapping) { + return skip; + } + // The top-level RegexSet API only supports "all" semantics. + if !matches!(test.match_kind(), regex_test::MatchKind::All) { + return skip; + } + // The top-level RegexSet API always runs unanchored searches. + if test.anchored() { + return skip; + } + // We don't support tests with explicit search bounds. + let bounds = test.bounds(); + if !(bounds.start == 0 && bounds.end == test.haystack().len()) { + return skip; + } + // The Regex API specifically does not support disabling UTF-8 mode because + // it can only search &str which is always valid UTF-8. + if !test.utf8() { + return skip; + } + // If the test requires Unicode but the Unicode feature isn't enabled, + // skip it. This is a little aggressive, but the test suite doesn't + // have any easy way of communicating which Unicode features are needed. + if test.unicode() && !cfg!(feature = "unicode") { + return skip; + } + let re = RegexSetBuilder::new(test.regexes()) + .case_insensitive(test.case_insensitive()) + .unicode(test.unicode()) + .line_terminator(test.line_terminator()) + .build()?; + Ok(CompiledRegex::compiled(move |test| run_test(&re, test))) +} diff --git a/vendor/rustc-hash/.cargo-checksum.json b/vendor/rustc-hash/.cargo-checksum.json new file mode 100644 index 00000000000000..694a5dfff97328 --- /dev/null +++ b/vendor/rustc-hash/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"afbb3c737b1463a6ef5ba68383db3ddcd9cc1228ea88b9f4fa488619f7bb2fb2",".github/workflows/rust.yml":"ff0288f89c97203b725441e9d9717fa0c049f1cebb2bcbb556981bfa8be10029","CHANGELOG.md":"1c9951d52d63dfbff8d32ad7909761517db5dc8f9084dba7892da8d5028c9692","CODE_OF_CONDUCT.md":"3e77f5476805b69467641b2c682aa2355344395056939089182cd901c56dce63","Cargo.lock":"213c05814f6402c09bc5bf32579187b5448ec81f8530f58d7289fd50a35d5b2a","Cargo.toml":"110004d0c56ebe79a83b2cbe44b1574a69010a9e4a8581e5215ac14251ef30cc","Cargo.toml.orig":"4c6ecfcf11005839dc797e6de41b70c97ab043a9a9fe8cc1c27b904252ce8ae7","LICENSE-APACHE":"95bd3988beee069fa2848f648dab43cc6e0b2add2ad6bcb17360caf749802bcc","LICENSE-MIT":"30fefc3a7d6a0041541858293bcbea2dde4caa4c0a5802f996a7f7e8c0085652","README.md":"ccd7a15a2e2021dbbfd5b7f99a10666a64ac50f8d5d6926a858efdde724fb424","src/lib.rs":"6928d71e403482e0e6f3324fbcef23a731c9236a5315db829f4020991064c5fa","src/random_state.rs":"39063b702c38dc93b7a9039f19f4acfdc539acf1604584a87eeb43cca149ca7e","src/seeded_state.rs":"530ba6e25d766231cc7540f968d3e41c5af5a38d936542b407010b9d35746fd8"},"package":"357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"} \ No newline at end of file diff --git a/vendor/rustc-hash/.cargo_vcs_info.json b/vendor/rustc-hash/.cargo_vcs_info.json new file mode 100644 index 00000000000000..63811667ce1f28 --- /dev/null +++ b/vendor/rustc-hash/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "dc5c33f1283de2da64d8d7a06401d91aded03ad4" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/rustc-hash/.github/workflows/rust.yml b/vendor/rustc-hash/.github/workflows/rust.yml new file mode 100644 index 00000000000000..0a019cbfb00afd --- /dev/null +++ b/vendor/rustc-hash/.github/workflows/rust.yml @@ -0,0 +1,73 @@ +name: Rust + +permissions: + contents: read + +on: [push, pull_request] + +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 + RUSTUP_MAX_RETRIES: 10 + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} + cancel-in-progress: true + +jobs: + test: + strategy: + matrix: + os: [ubuntu, windows, macos] + runs-on: ${{ matrix.os }}-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - run: rustup update stable && rustup default stable + - run: cargo check + - run: cargo test + - run: rustup update nightly && rustup default nightly + - run: cargo test --all-features + cross-test: + strategy: + matrix: + target: [ + "x86_64-unknown-linux-gnu", # 64-bits, little-endian + "i686-unknown-linux-gnu", # 32-bits, little-endian + "mips-unknown-linux-gnu", # 32-bits, big-endian + "mips64-unknown-linux-gnuabi64", # 64-bits, big-endian + ] + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - name: install miri + run: rustup toolchain add nightly --no-self-update --component miri && rustup default nightly + - run: | + cargo miri test --target=${{ matrix.target }} --all-features + env: + MIRIFLAGS: -Zmiri-strict-provenance + RUSTDOCFLAGS: ${{ env.RUSTDOCFLAGS }} -Z randomize-layout + RUSTFLAGS: ${{ env.RUSTFLAGS }} -Z randomize-layout + fmt: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - run: rustup update stable && rustup default stable + - run: rustup component add rustfmt + - run: cargo fmt --all --check + docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - run: rustup update stable && rustup default stable + - run: cargo doc --workspace --document-private-items --no-deps + env: + RUSTDOCFLAGS: -D warnings + clippy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - run: rustup update stable && rustup default stable + - run: rustup component add clippy + - run: cargo clippy --workspace --all-targets --no-deps diff --git a/vendor/rustc-hash/CHANGELOG.md b/vendor/rustc-hash/CHANGELOG.md new file mode 100644 index 00000000000000..d52aba07819be5 --- /dev/null +++ b/vendor/rustc-hash/CHANGELOG.md @@ -0,0 +1,32 @@ +# 2.1.1 + +- Change the internal algorithm to better accomodate large hashmaps. + This mitigates a [regression with 2.0 in rustc](https://github.com/rust-lang/rust/issues/135477). + See [PR#55](https://github.com/rust-lang/rustc-hash/pull/55) for more details on the change (this PR was not merged). + This problem might be improved with changes to hashbrown in the future. + +## 2.1.0 + +- Implement `Clone` for `FxRandomState` +- Implement `Clone` for `FxSeededState` +- Use SPDX license expression in license field + +## 2.0.0 + +- Replace hash with faster and better finalized hash. + This replaces the previous "fxhash" algorithm originating in Firefox + with a custom hasher designed and implemented by Orson Peters ([`@orlp`](https://github.com/orlp)). + It was measured to have slightly better performance for rustc, has better theoretical properties + and also includes a significantly better string hasher. +- Fix `no_std` builds + +## 1.2.0 (**YANKED**) + +**Note: This version has been yanked due to issues with the `no_std` feature!** + +- Add a `FxBuildHasher` unit struct +- Improve documentation +- Add seed API for supplying custom seeds other than 0 +- Add `FxRandomState` based on `rand` (behind the `rand` feature) for random seeds +- Make many functions `const fn` +- Implement `Clone` for `FxHasher` struct diff --git a/vendor/rustc-hash/CODE_OF_CONDUCT.md b/vendor/rustc-hash/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000000..d6d774281213a9 --- /dev/null +++ b/vendor/rustc-hash/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +# The Rust Code of Conduct + +The Code of Conduct for this repository [can be found online](https://www.rust-lang.org/conduct.html). \ No newline at end of file diff --git a/vendor/rustc-hash/Cargo.lock b/vendor/rustc-hash/Cargo.lock new file mode 100644 index 00000000000000..2b1b0744884706 --- /dev/null +++ b/vendor/rustc-hash/Cargo.lock @@ -0,0 +1,75 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "getrandom" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "libc" +version = "0.2.153" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rustc-hash" +version = "2.1.1" +dependencies = [ + "rand", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" diff --git a/vendor/rustc-hash/Cargo.toml b/vendor/rustc-hash/Cargo.toml new file mode 100644 index 00000000000000..a95ba3a3fd1555 --- /dev/null +++ b/vendor/rustc-hash/Cargo.toml @@ -0,0 +1,49 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +name = "rustc-hash" +version = "2.1.1" +authors = ["The Rust Project Developers"] +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "A speedy, non-cryptographic hashing algorithm used by rustc" +readme = "README.md" +keywords = [ + "hash", + "hasher", + "fxhash", + "rustc", +] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/rust-lang/rustc-hash" + +[features] +default = ["std"] +nightly = [] +rand = [ + "dep:rand", + "std", +] +std = [] + +[lib] +name = "rustc_hash" +path = "src/lib.rs" + +[dependencies.rand] +version = "0.8" +optional = true diff --git a/vendor/rustc-hash/LICENSE-APACHE b/vendor/rustc-hash/LICENSE-APACHE new file mode 100644 index 00000000000000..a7e77cb28d386e --- /dev/null +++ b/vendor/rustc-hash/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/vendor/rustc-hash/LICENSE-MIT b/vendor/rustc-hash/LICENSE-MIT new file mode 100644 index 00000000000000..468cd79a8f6e50 --- /dev/null +++ b/vendor/rustc-hash/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/rustc-hash/README.md b/vendor/rustc-hash/README.md new file mode 100644 index 00000000000000..bcac3455ac90d6 --- /dev/null +++ b/vendor/rustc-hash/README.md @@ -0,0 +1,42 @@ +# rustc-hash + +[![crates.io](https://img.shields.io/crates/v/rustc-hash.svg)](https://crates.io/crates/rustc-hash) +[![Documentation](https://docs.rs/rustc-hash/badge.svg)](https://docs.rs/rustc-hash) + +A speedy, non-cryptographic hashing algorithm used by `rustc`. +The [hash map in `std`](https://doc.rust-lang.org/std/collections/struct.HashMap.html) uses SipHash by default, which provides resistance against DOS attacks. +These attacks aren't a concern in the compiler so we prefer to use a quicker, +non-cryptographic hash algorithm. + +The original hash algorithm provided by this crate was one taken from Firefox, +hence the hasher it provides is called FxHasher. This name is kept for backwards +compatibility, but the underlying hash has since been replaced. The current +design for the hasher is a polynomial hash finished with a single bit rotation, +together with a wyhash-inspired compression function for strings/slices, both +designed by Orson Peters. + +For `rustc` we have tried many different hashing algorithms. Hashing speed is +critical, especially for single integers. Spending more CPU cycles on a higher +quality hash does not reduce hash collisions enough to make the compiler faster +on real-world benchmarks. + +## Usage + +This crate provides `FxHashMap` and `FxHashSet` as collections. +They are simply type aliases for their `std::collection` counterparts using the Fx hasher. + +```rust +use rustc_hash::FxHashMap; + +let mut map: FxHashMap<u32, u32> = FxHashMap::default(); +map.insert(22, 44); +``` + +### `no_std` + +The `std` feature is on by default to enable collections. +It can be turned off in `Cargo.toml` like so: + +```toml +rustc-hash = { version = "2.1", default-features = false } +``` diff --git a/vendor/rustc-hash/src/lib.rs b/vendor/rustc-hash/src/lib.rs new file mode 100644 index 00000000000000..03117c96c015cc --- /dev/null +++ b/vendor/rustc-hash/src/lib.rs @@ -0,0 +1,459 @@ +//! A speedy, non-cryptographic hashing algorithm used by `rustc`. +//! +//! # Example +//! +//! ```rust +//! # #[cfg(feature = "std")] +//! # fn main() { +//! use rustc_hash::FxHashMap; +//! +//! let mut map: FxHashMap<u32, u32> = FxHashMap::default(); +//! map.insert(22, 44); +//! # } +//! # #[cfg(not(feature = "std"))] +//! # fn main() { } +//! ``` + +#![no_std] +#![cfg_attr(feature = "nightly", feature(hasher_prefixfree_extras))] + +#[cfg(feature = "std")] +extern crate std; + +#[cfg(feature = "rand")] +extern crate rand; + +#[cfg(feature = "rand")] +mod random_state; + +mod seeded_state; + +use core::default::Default; +use core::hash::{BuildHasher, Hasher}; +#[cfg(feature = "std")] +use std::collections::{HashMap, HashSet}; + +/// Type alias for a hash map that uses the Fx hashing algorithm. +#[cfg(feature = "std")] +pub type FxHashMap<K, V> = HashMap<K, V, FxBuildHasher>; + +/// Type alias for a hash set that uses the Fx hashing algorithm. +#[cfg(feature = "std")] +pub type FxHashSet<V> = HashSet<V, FxBuildHasher>; + +#[cfg(feature = "rand")] +pub use random_state::{FxHashMapRand, FxHashSetRand, FxRandomState}; + +pub use seeded_state::FxSeededState; +#[cfg(feature = "std")] +pub use seeded_state::{FxHashMapSeed, FxHashSetSeed}; + +/// A speedy hash algorithm for use within rustc. The hashmap in liballoc +/// by default uses SipHash which isn't quite as speedy as we want. In the +/// compiler we're not really worried about DOS attempts, so we use a fast +/// non-cryptographic hash. +/// +/// The current implementation is a fast polynomial hash with a single +/// bit rotation as a finishing step designed by Orson Peters. +#[derive(Clone)] +pub struct FxHasher { + hash: usize, +} + +// One might view a polynomial hash +// m[0] * k + m[1] * k^2 + m[2] * k^3 + ... +// as a multilinear hash with keystream k[..] +// m[0] * k[0] + m[1] * k[1] + m[2] * k[2] + ... +// where keystream k just happens to be generated using a multiplicative +// congruential pseudorandom number generator (MCG). For that reason we chose a +// constant that was found to be good for a MCG in: +// "Computationally Easy, Spectrally Good Multipliers for Congruential +// Pseudorandom Number Generators" by Guy Steele and Sebastiano Vigna. +#[cfg(target_pointer_width = "64")] +const K: usize = 0xf1357aea2e62a9c5; +#[cfg(target_pointer_width = "32")] +const K: usize = 0x93d765dd; + +impl FxHasher { + /// Creates a `fx` hasher with a given seed. + pub const fn with_seed(seed: usize) -> FxHasher { + FxHasher { hash: seed } + } + + /// Creates a default `fx` hasher. + pub const fn default() -> FxHasher { + FxHasher { hash: 0 } + } +} + +impl Default for FxHasher { + #[inline] + fn default() -> FxHasher { + Self::default() + } +} + +impl FxHasher { + #[inline] + fn add_to_hash(&mut self, i: usize) { + self.hash = self.hash.wrapping_add(i).wrapping_mul(K); + } +} + +impl Hasher for FxHasher { + #[inline] + fn write(&mut self, bytes: &[u8]) { + // Compress the byte string to a single u64 and add to our hash. + self.write_u64(hash_bytes(bytes)); + } + + #[inline] + fn write_u8(&mut self, i: u8) { + self.add_to_hash(i as usize); + } + + #[inline] + fn write_u16(&mut self, i: u16) { + self.add_to_hash(i as usize); + } + + #[inline] + fn write_u32(&mut self, i: u32) { + self.add_to_hash(i as usize); + } + + #[inline] + fn write_u64(&mut self, i: u64) { + self.add_to_hash(i as usize); + #[cfg(target_pointer_width = "32")] + self.add_to_hash((i >> 32) as usize); + } + + #[inline] + fn write_u128(&mut self, i: u128) { + self.add_to_hash(i as usize); + #[cfg(target_pointer_width = "32")] + self.add_to_hash((i >> 32) as usize); + self.add_to_hash((i >> 64) as usize); + #[cfg(target_pointer_width = "32")] + self.add_to_hash((i >> 96) as usize); + } + + #[inline] + fn write_usize(&mut self, i: usize) { + self.add_to_hash(i); + } + + #[cfg(feature = "nightly")] + #[inline] + fn write_length_prefix(&mut self, _len: usize) { + // Most cases will specialize hash_slice to call write(), which encodes + // the length already in a more efficient manner than we could here. For + // HashDoS-resistance you would still need to include this for the + // non-slice collection hashes, but for the purposes of rustc we do not + // care and do not wish to pay the performance penalty of mixing in len + // for those collections. + } + + #[cfg(feature = "nightly")] + #[inline] + fn write_str(&mut self, s: &str) { + // Similarly here, write already encodes the length, so nothing special + // is needed. + self.write(s.as_bytes()) + } + + #[inline] + fn finish(&self) -> u64 { + // Since we used a multiplicative hash our top bits have the most + // entropy (with the top bit having the most, decreasing as you go). + // As most hash table implementations (including hashbrown) compute + // the bucket index from the bottom bits we want to move bits from the + // top to the bottom. Ideally we'd rotate left by exactly the hash table + // size, but as we don't know this we'll choose 26 bits, giving decent + // entropy up until 2^26 table sizes. On 32-bit hosts we'll dial it + // back down a bit to 15 bits. + + #[cfg(target_pointer_width = "64")] + const ROTATE: u32 = 26; + #[cfg(target_pointer_width = "32")] + const ROTATE: u32 = 15; + + self.hash.rotate_left(ROTATE) as u64 + + // A bit reversal would be even better, except hashbrown also expects + // good entropy in the top 7 bits and a bit reverse would fill those + // bits with low entropy. More importantly, bit reversals are very slow + // on x86-64. A byte reversal is relatively fast, but still has a 2 + // cycle latency on x86-64 compared to the 1 cycle latency of a rotate. + // It also suffers from the hashbrown-top-7-bit-issue. + } +} + +// Nothing special, digits of pi. +const SEED1: u64 = 0x243f6a8885a308d3; +const SEED2: u64 = 0x13198a2e03707344; +const PREVENT_TRIVIAL_ZERO_COLLAPSE: u64 = 0xa4093822299f31d0; + +#[inline] +fn multiply_mix(x: u64, y: u64) -> u64 { + #[cfg(target_pointer_width = "64")] + { + // We compute the full u64 x u64 -> u128 product, this is a single mul + // instruction on x86-64, one mul plus one mulhi on ARM64. + let full = (x as u128) * (y as u128); + let lo = full as u64; + let hi = (full >> 64) as u64; + + // The middle bits of the full product fluctuate the most with small + // changes in the input. This is the top bits of lo and the bottom bits + // of hi. We can thus make the entire output fluctuate with small + // changes to the input by XOR'ing these two halves. + lo ^ hi + + // Unfortunately both 2^64 + 1 and 2^64 - 1 have small prime factors, + // otherwise combining with + or - could result in a really strong hash, as: + // x * y = 2^64 * hi + lo = (-1) * hi + lo = lo - hi, (mod 2^64 + 1) + // x * y = 2^64 * hi + lo = 1 * hi + lo = lo + hi, (mod 2^64 - 1) + // Multiplicative hashing is universal in a field (like mod p). + } + + #[cfg(target_pointer_width = "32")] + { + // u64 x u64 -> u128 product is prohibitively expensive on 32-bit. + // Decompose into 32-bit parts. + let lx = x as u32; + let ly = y as u32; + let hx = (x >> 32) as u32; + let hy = (y >> 32) as u32; + + // u32 x u32 -> u64 the low bits of one with the high bits of the other. + let afull = (lx as u64) * (hy as u64); + let bfull = (hx as u64) * (ly as u64); + + // Combine, swapping low/high of one of them so the upper bits of the + // product of one combine with the lower bits of the other. + afull ^ bfull.rotate_right(32) + } +} + +/// A wyhash-inspired non-collision-resistant hash for strings/slices designed +/// by Orson Peters, with a focus on small strings and small codesize. +/// +/// The 64-bit version of this hash passes the SMHasher3 test suite on the full +/// 64-bit output, that is, f(hash_bytes(b) ^ f(seed)) for some good avalanching +/// permutation f() passed all tests with zero failures. When using the 32-bit +/// version of multiply_mix this hash has a few non-catastrophic failures where +/// there are a handful more collisions than an optimal hash would give. +/// +/// We don't bother avalanching here as we'll feed this hash into a +/// multiplication after which we take the high bits, which avalanches for us. +#[inline] +fn hash_bytes(bytes: &[u8]) -> u64 { + let len = bytes.len(); + let mut s0 = SEED1; + let mut s1 = SEED2; + + if len <= 16 { + // XOR the input into s0, s1. + if len >= 8 { + s0 ^= u64::from_le_bytes(bytes[0..8].try_into().unwrap()); + s1 ^= u64::from_le_bytes(bytes[len - 8..].try_into().unwrap()); + } else if len >= 4 { + s0 ^= u32::from_le_bytes(bytes[0..4].try_into().unwrap()) as u64; + s1 ^= u32::from_le_bytes(bytes[len - 4..].try_into().unwrap()) as u64; + } else if len > 0 { + let lo = bytes[0]; + let mid = bytes[len / 2]; + let hi = bytes[len - 1]; + s0 ^= lo as u64; + s1 ^= ((hi as u64) << 8) | mid as u64; + } + } else { + // Handle bulk (can partially overlap with suffix). + let mut off = 0; + while off < len - 16 { + let x = u64::from_le_bytes(bytes[off..off + 8].try_into().unwrap()); + let y = u64::from_le_bytes(bytes[off + 8..off + 16].try_into().unwrap()); + + // Replace s1 with a mix of s0, x, and y, and s0 with s1. + // This ensures the compiler can unroll this loop into two + // independent streams, one operating on s0, the other on s1. + // + // Since zeroes are a common input we prevent an immediate trivial + // collapse of the hash function by XOR'ing a constant with y. + let t = multiply_mix(s0 ^ x, PREVENT_TRIVIAL_ZERO_COLLAPSE ^ y); + s0 = s1; + s1 = t; + off += 16; + } + + let suffix = &bytes[len - 16..]; + s0 ^= u64::from_le_bytes(suffix[0..8].try_into().unwrap()); + s1 ^= u64::from_le_bytes(suffix[8..16].try_into().unwrap()); + } + + multiply_mix(s0, s1) ^ (len as u64) +} + +/// An implementation of [`BuildHasher`] that produces [`FxHasher`]s. +/// +/// ``` +/// use std::hash::BuildHasher; +/// use rustc_hash::FxBuildHasher; +/// assert_ne!(FxBuildHasher.hash_one(1), FxBuildHasher.hash_one(2)); +/// ``` +#[derive(Copy, Clone, Default)] +pub struct FxBuildHasher; + +impl BuildHasher for FxBuildHasher { + type Hasher = FxHasher; + fn build_hasher(&self) -> FxHasher { + FxHasher::default() + } +} + +#[cfg(test)] +mod tests { + #[cfg(not(any(target_pointer_width = "64", target_pointer_width = "32")))] + compile_error!("The test suite only supports 64 bit and 32 bit usize"); + + use crate::{FxBuildHasher, FxHasher}; + use core::hash::{BuildHasher, Hash, Hasher}; + + macro_rules! test_hash { + ( + $( + hash($value:expr) == $result:expr, + )* + ) => { + $( + assert_eq!(FxBuildHasher.hash_one($value), $result); + )* + }; + } + + const B32: bool = cfg!(target_pointer_width = "32"); + + #[test] + fn unsigned() { + test_hash! { + hash(0_u8) == 0, + hash(1_u8) == if B32 { 3001993707 } else { 12157901119326311915 }, + hash(100_u8) == if B32 { 3844759569 } else { 16751747135202103309 }, + hash(u8::MAX) == if B32 { 999399879 } else { 1211781028898739645 }, + + hash(0_u16) == 0, + hash(1_u16) == if B32 { 3001993707 } else { 12157901119326311915 }, + hash(100_u16) == if B32 { 3844759569 } else { 16751747135202103309 }, + hash(u16::MAX) == if B32 { 3440503042 } else { 16279819243059860173 }, + + hash(0_u32) == 0, + hash(1_u32) == if B32 { 3001993707 } else { 12157901119326311915 }, + hash(100_u32) == if B32 { 3844759569 } else { 16751747135202103309 }, + hash(u32::MAX) == if B32 { 1293006356 } else { 7729994835221066939 }, + + hash(0_u64) == 0, + hash(1_u64) == if B32 { 275023839 } else { 12157901119326311915 }, + hash(100_u64) == if B32 { 1732383522 } else { 16751747135202103309 }, + hash(u64::MAX) == if B32 { 1017982517 } else { 6288842954450348564 }, + + hash(0_u128) == 0, + hash(1_u128) == if B32 { 1860738631 } else { 13032756267696824044 }, + hash(100_u128) == if B32 { 1389515751 } else { 12003541609544029302 }, + hash(u128::MAX) == if B32 { 2156022013 } else { 11702830760530184999 }, + + hash(0_usize) == 0, + hash(1_usize) == if B32 { 3001993707 } else { 12157901119326311915 }, + hash(100_usize) == if B32 { 3844759569 } else { 16751747135202103309 }, + hash(usize::MAX) == if B32 { 1293006356 } else { 6288842954450348564 }, + } + } + + #[test] + fn signed() { + test_hash! { + hash(i8::MIN) == if B32 { 2000713177 } else { 6684841074112525780 }, + hash(0_i8) == 0, + hash(1_i8) == if B32 { 3001993707 } else { 12157901119326311915 }, + hash(100_i8) == if B32 { 3844759569 } else { 16751747135202103309 }, + hash(i8::MAX) == if B32 { 3293686765 } else { 12973684028562874344 }, + + hash(i16::MIN) == if B32 { 1073764727 } else { 14218860181193086044 }, + hash(0_i16) == 0, + hash(1_i16) == if B32 { 3001993707 } else { 12157901119326311915 }, + hash(100_i16) == if B32 { 3844759569 } else { 16751747135202103309 }, + hash(i16::MAX) == if B32 { 2366738315 } else { 2060959061933882993 }, + + hash(i32::MIN) == if B32 { 16384 } else { 9943947977240134995 }, + hash(0_i32) == 0, + hash(1_i32) == if B32 { 3001993707 } else { 12157901119326311915 }, + hash(100_i32) == if B32 { 3844759569 } else { 16751747135202103309 }, + hash(i32::MAX) == if B32 { 1293022740 } else { 16232790931690483559 }, + + hash(i64::MIN) == if B32 { 16384 } else { 33554432 }, + hash(0_i64) == 0, + hash(1_i64) == if B32 { 275023839 } else { 12157901119326311915 }, + hash(100_i64) == if B32 { 1732383522 } else { 16751747135202103309 }, + hash(i64::MAX) == if B32 { 1017998901 } else { 6288842954483902996 }, + + hash(i128::MIN) == if B32 { 16384 } else { 33554432 }, + hash(0_i128) == 0, + hash(1_i128) == if B32 { 1860738631 } else { 13032756267696824044 }, + hash(100_i128) == if B32 { 1389515751 } else { 12003541609544029302 }, + hash(i128::MAX) == if B32 { 2156005629 } else { 11702830760496630567 }, + + hash(isize::MIN) == if B32 { 16384 } else { 33554432 }, + hash(0_isize) == 0, + hash(1_isize) == if B32 { 3001993707 } else { 12157901119326311915 }, + hash(100_isize) == if B32 { 3844759569 } else { 16751747135202103309 }, + hash(isize::MAX) == if B32 { 1293022740 } else { 6288842954483902996 }, + } + } + + // Avoid relying on any `Hash` implementations in the standard library. + struct HashBytes(&'static [u8]); + impl Hash for HashBytes { + fn hash<H: core::hash::Hasher>(&self, state: &mut H) { + state.write(self.0); + } + } + + #[test] + fn bytes() { + test_hash! { + hash(HashBytes(&[])) == if B32 { 2673204745 } else { 17606491139363777937 }, + hash(HashBytes(&[0])) == if B32 { 2948228584 } else { 5448590020104574886 }, + hash(HashBytes(&[0, 0, 0, 0, 0, 0])) == if B32 { 3223252423 } else { 16766921560080789783 }, + hash(HashBytes(&[1])) == if B32 { 2943445104 } else { 5922447956811044110 }, + hash(HashBytes(&[2])) == if B32 { 1055423297 } else { 5229781508510959783 }, + hash(HashBytes(b"uwu")) == if B32 { 2699662140 } else { 7168164714682931527 }, + hash(HashBytes(b"These are some bytes for testing rustc_hash.")) == if B32 { 2303640537 } else { 2349210501944688211 }, + } + } + + #[test] + fn with_seed_actually_different() { + let seeds = [ + [1, 2], + [42, 17], + [124436707, 99237], + [usize::MIN, usize::MAX], + ]; + + for [a_seed, b_seed] in seeds { + let a = || FxHasher::with_seed(a_seed); + let b = || FxHasher::with_seed(b_seed); + + for x in u8::MIN..=u8::MAX { + let mut a = a(); + let mut b = b(); + + x.hash(&mut a); + x.hash(&mut b); + + assert_ne!(a.finish(), b.finish()) + } + } + } +} diff --git a/vendor/rustc-hash/src/random_state.rs b/vendor/rustc-hash/src/random_state.rs new file mode 100644 index 00000000000000..c8c35a0b1a4da9 --- /dev/null +++ b/vendor/rustc-hash/src/random_state.rs @@ -0,0 +1,101 @@ +use std::collections::{HashMap, HashSet}; + +use crate::FxHasher; + +/// Type alias for a hashmap using the `fx` hash algorithm with [`FxRandomState`]. +pub type FxHashMapRand<K, V> = HashMap<K, V, FxRandomState>; + +/// Type alias for a hashmap using the `fx` hash algorithm with [`FxRandomState`]. +pub type FxHashSetRand<V> = HashSet<V, FxRandomState>; + +/// `FxRandomState` is an alternative state for `HashMap` types. +/// +/// A particular instance `FxRandomState` will create the same instances of +/// [`Hasher`], but the hashers created by two different `FxRandomState` +/// instances are unlikely to produce the same result for the same values. +#[derive(Clone)] +pub struct FxRandomState { + seed: usize, +} + +impl FxRandomState { + /// Constructs a new `FxRandomState` that is initialized with random seed. + pub fn new() -> FxRandomState { + use rand::Rng; + use std::{cell::Cell, thread_local}; + + // This mirrors what `std::collections::hash_map::RandomState` does, as of 2024-01-14. + // + // Basically + // 1. Cache result of the rng in a thread local, so repeatedly + // creating maps is cheaper + // 2. Change the cached result on every creation, so maps created + // on the same thread don't have the same iteration order + thread_local!(static SEED: Cell<usize> = { + Cell::new(rand::thread_rng().gen()) + }); + + SEED.with(|seed| { + let s = seed.get(); + seed.set(s.wrapping_add(1)); + FxRandomState { seed: s } + }) + } +} + +impl core::hash::BuildHasher for FxRandomState { + type Hasher = FxHasher; + + fn build_hasher(&self) -> Self::Hasher { + FxHasher::with_seed(self.seed) + } +} + +impl Default for FxRandomState { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use std::thread; + + use crate::FxHashMapRand; + + #[test] + fn cloned_random_states_are_equal() { + let a = FxHashMapRand::<&str, u32>::default(); + let b = a.clone(); + + assert_eq!(a.hasher().seed, b.hasher().seed); + } + + #[test] + fn random_states_are_different() { + let a = FxHashMapRand::<&str, u32>::default(); + let b = FxHashMapRand::<&str, u32>::default(); + + // That's the whole point of them being random! + // + // N.B.: `FxRandomState` uses a thread-local set to a random value and then incremented, + // which means that this is *guaranteed* to pass :> + assert_ne!(a.hasher().seed, b.hasher().seed); + } + + #[test] + fn random_states_are_different_cross_thread() { + // This is similar to the test above, but uses two different threads, so they both get + // completely random, unrelated values. + // + // This means that this test is technically flaky, but the probability of it failing is + // `1 / 2.pow(bit_size_of::<usize>())`. Or 1/1.7e19 for 64 bit platforms or 1/4294967295 + // for 32 bit platforms. I suppose this is acceptable. + let a = FxHashMapRand::<&str, u32>::default(); + let b = thread::spawn(|| FxHashMapRand::<&str, u32>::default()) + .join() + .unwrap(); + + assert_ne!(a.hasher().seed, b.hasher().seed); + } +} diff --git a/vendor/rustc-hash/src/seeded_state.rs b/vendor/rustc-hash/src/seeded_state.rs new file mode 100644 index 00000000000000..e84190625939e8 --- /dev/null +++ b/vendor/rustc-hash/src/seeded_state.rs @@ -0,0 +1,76 @@ +use crate::FxHasher; + +/// Type alias for a hashmap using the `fx` hash algorithm with [`FxSeededState`]. +#[cfg(feature = "std")] +pub type FxHashMapSeed<K, V> = std::collections::HashMap<K, V, FxSeededState>; + +/// Type alias for a hashmap using the `fx` hash algorithm with [`FxSeededState`]. +#[cfg(feature = "std")] +pub type FxHashSetSeed<V> = std::collections::HashSet<V, FxSeededState>; + +/// [`FxSeededState`] is an alternative state for `HashMap` types, allowing to use [`FxHasher`] with a set seed. +/// +/// ``` +/// # use std::collections::HashMap; +/// use rustc_hash::FxSeededState; +/// +/// let mut map = HashMap::with_hasher(FxSeededState::with_seed(12)); +/// map.insert(15, 610); +/// assert_eq!(map[&15], 610); +/// ``` +#[derive(Clone)] +pub struct FxSeededState { + seed: usize, +} + +impl FxSeededState { + /// Constructs a new `FxSeededState` that is initialized with a `seed`. + pub const fn with_seed(seed: usize) -> FxSeededState { + Self { seed } + } +} + +impl core::hash::BuildHasher for FxSeededState { + type Hasher = FxHasher; + + fn build_hasher(&self) -> Self::Hasher { + FxHasher::with_seed(self.seed) + } +} + +#[cfg(test)] +mod tests { + use core::hash::BuildHasher; + + use crate::FxSeededState; + + #[test] + fn cloned_seeded_states_are_equal() { + let seed = 2; + let a = FxSeededState::with_seed(seed); + let b = a.clone(); + + assert_eq!(a.seed, b.seed); + assert_eq!(a.seed, seed); + + assert_eq!(a.build_hasher().hash, b.build_hasher().hash); + } + + #[test] + fn same_seed_produces_same_hasher() { + let seed = 1; + let a = FxSeededState::with_seed(seed); + let b = FxSeededState::with_seed(seed); + + // The hashers should be the same, as they have the same seed. + assert_eq!(a.build_hasher().hash, b.build_hasher().hash); + } + + #[test] + fn different_states_are_different() { + let a = FxSeededState::with_seed(1); + let b = FxSeededState::with_seed(2); + + assert_ne!(a.build_hasher().hash, b.build_hasher().hash); + } +} diff --git a/vendor/shlex/.cargo-checksum.json b/vendor/shlex/.cargo-checksum.json new file mode 100644 index 00000000000000..6a97828db78816 --- /dev/null +++ b/vendor/shlex/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"5ba6bffcbe6628331b4b8ae50936c44d6260de2913c83f55e0c19fce366af72c",".github/workflows/test.yml":"cca05e7dbd8b0c65f5c8f5a30fb76fd11aaaaf9a3216b5d5b0ea387b3d94fffa","CHANGELOG.md":"879a16b3fef6fb3251fcac516fe73414109e3b7df5eb2ec4863a7551674038a0","Cargo.toml":"d7eb8c4bce681b4dd1dfc2c98c649754390775f38f4796d491948ddbb53aa2ef","Cargo.toml.orig":"aba3cfcd4981d79feac94eb673bcdd0754962edc0e2a0ce81a13d5285c5a3f3d","LICENSE-APACHE":"553fffcd9b1cb158bc3e9edc35da85ca5c3b3d7d2e61c883ebcfa8a65814b583","LICENSE-MIT":"4455bf75a91154108304cb283e0fea9948c14f13e20d60887cf2552449dea3b1","README.md":"082e505bba5dffc5904af5602b45d01129173e617db62c81e6c11d71c964ea71","src/bytes.rs":"eadfffcdb7846d341ba451d6118d275b9d0f14a9554984ccfcdbe9a8d77ec5ee","src/lib.rs":"44c8fb929e1443f2446d26025a9bcfca0b329811bbc309b4a6afb8ec17d7de8d","src/quoting_warning.md":"566d6509211ddcd4afbd4f1117c5234567f6b6d01f5da60acfaef011362be045"},"package":"0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"} \ No newline at end of file diff --git a/vendor/shlex/.cargo_vcs_info.json b/vendor/shlex/.cargo_vcs_info.json new file mode 100644 index 00000000000000..efa0c6e18d1233 --- /dev/null +++ b/vendor/shlex/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "4a0724b0b62ef715467875b040a890ce75a8a829" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/shlex/.github/workflows/test.yml b/vendor/shlex/.github/workflows/test.yml new file mode 100644 index 00000000000000..7f299916bc6db5 --- /dev/null +++ b/vendor/shlex/.github/workflows/test.yml @@ -0,0 +1,36 @@ +name: Rust + +on: + pull_request: + push: + +jobs: + check: + name: Check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: ATiltedTree/setup-rust@v1 + with: + rust-version: stable + - run: cargo check + + test: + name: Test + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: ATiltedTree/setup-rust@v1 + with: + rust-version: stable + - run: cargo test + + test_no_default_features: + name: Test (no default features) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: ATiltedTree/setup-rust@v1 + with: + rust-version: stable + - run: cargo test --no-default-features diff --git a/vendor/shlex/CHANGELOG.md b/vendor/shlex/CHANGELOG.md new file mode 100644 index 00000000000000..95552b430bb65b --- /dev/null +++ b/vendor/shlex/CHANGELOG.md @@ -0,0 +1,21 @@ +# 1.2.0 + +* Adds `bytes` module to support operating directly on byte strings. + +# 1.1.0 + +* Adds the `std` feature (enabled by default) +* Disabling the `std` feature makes the crate work in `#![no_std]` mode, assuming presence of the `alloc` crate + +# 1.0.0 + +* Adds the `join` convenience function. +* Fixes parsing of `'\\n'` to match the behavior of bash/Zsh/Python `shlex`. The result was previously `\n`, now it is `\\n`. + +# 0.1.1 + +* Adds handling of `#` comments. + +# 0.1.0 + +This is the initial release. diff --git a/vendor/shlex/Cargo.toml b/vendor/shlex/Cargo.toml new file mode 100644 index 00000000000000..2b668928d63fd3 --- /dev/null +++ b/vendor/shlex/Cargo.toml @@ -0,0 +1,35 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +rust-version = "1.46.0" +name = "shlex" +version = "1.3.0" +authors = [ + "comex <comexk@gmail.com>", + "Fenhl <fenhl@fenhl.net>", + "Adrian Taylor <adetaylor@chromium.org>", + "Alex Touchet <alextouchet@outlook.com>", + "Daniel Parks <dp+git@oxidized.org>", + "Garrett Berg <googberg@gmail.com>", +] +description = "Split a string into shell words, like Python's shlex." +readme = "README.md" +categories = [ + "command-line-interface", + "parser-implementations", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/comex/rust-shlex" + +[features] +default = ["std"] +std = [] diff --git a/vendor/shlex/LICENSE-APACHE b/vendor/shlex/LICENSE-APACHE new file mode 100644 index 00000000000000..37465048a6f63d --- /dev/null +++ b/vendor/shlex/LICENSE-APACHE @@ -0,0 +1,13 @@ +Copyright 2015 Nicholas Allegra (comex). + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/shlex/LICENSE-MIT b/vendor/shlex/LICENSE-MIT new file mode 100644 index 00000000000000..5ec1fe1cd795fa --- /dev/null +++ b/vendor/shlex/LICENSE-MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Nicholas Allegra (comex). + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/shlex/README.md b/vendor/shlex/README.md new file mode 100644 index 00000000000000..6400a6f75a915e --- /dev/null +++ b/vendor/shlex/README.md @@ -0,0 +1,39 @@ +[![ci badge]][ci link] [![crates.io badge]][crates.io link] [![docs.rs badge]][docs.rs link] + +[crates.io badge]: https://img.shields.io/crates/v/shlex.svg?style=flat-square +[crates.io link]: https://crates.io/crates/shlex +[docs.rs badge]: https://img.shields.io/badge/docs-online-dddddd.svg?style=flat-square +[docs.rs link]: https://docs.rs/shlex +[ci badge]: https://img.shields.io/github/actions/workflow/status/comex/rust-shlex/test.yml?branch=master&style=flat-square +[ci link]: https://github.com/comex/rust-shlex/actions + +Same idea as (but implementation not directly based on) the Python shlex +module. However, this implementation does not support any of the Python +module's customization because it makes parsing slower and is fairly useless. +You only get the default settings of shlex.split, which mimic the POSIX shell: +<https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html> + +This implementation also deviates from the Python version in not treating \r +specially, which I believe is more compliant. + +This crate can be used on either normal Rust strings, or on byte strings with +the `bytes` module. The algorithms used are oblivious to UTF-8 high bytes, so +internally they all work on bytes directly as a micro-optimization. + +Disabling the `std` feature (which is enabled by default) will allow the crate +to work in `no_std` environments, where the `alloc` crate, and a global +allocator, are available. + +# LICENSE + +The source code in this repository is Licensed under either of +- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + https://www.apache.org/licenses/LICENSE-2.0) +- MIT license ([LICENSE-MIT](LICENSE-MIT) or + https://opensource.org/licenses/MIT) + +at your option. + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. diff --git a/vendor/shlex/src/bytes.rs b/vendor/shlex/src/bytes.rs new file mode 100644 index 00000000000000..af8daad0d33c18 --- /dev/null +++ b/vendor/shlex/src/bytes.rs @@ -0,0 +1,576 @@ +// Copyright 2015 Nicholas Allegra (comex). +// Licensed under the Apache License, Version 2.0 <https://www.apache.org/licenses/LICENSE-2.0> or +// the MIT license <https://opensource.org/licenses/MIT>, at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +//! [`Shlex`] and friends for byte strings. +//! +//! This is used internally by the [outer module](crate), and may be more +//! convenient if you are working with byte slices (`[u8]`) or types that are +//! wrappers around bytes, such as [`OsStr`](std::ffi::OsStr): +//! +//! ```rust +//! #[cfg(unix)] { +//! use shlex::bytes::quote; +//! use std::ffi::OsStr; +//! use std::os::unix::ffi::OsStrExt; +//! +//! // `\x80` is invalid in UTF-8. +//! let os_str = OsStr::from_bytes(b"a\x80b c"); +//! assert_eq!(quote(os_str.as_bytes()), &b"'a\x80b c'"[..]); +//! } +//! ``` +//! +//! (On Windows, `OsStr` uses 16 bit wide characters so this will not work.) + +extern crate alloc; +use alloc::vec::Vec; +use alloc::borrow::Cow; +#[cfg(test)] +use alloc::vec; +#[cfg(test)] +use alloc::borrow::ToOwned; +#[cfg(all(doc, not(doctest)))] +use crate::{self as shlex, quoting_warning}; + +use super::QuoteError; + +/// An iterator that takes an input byte string and splits it into the words using the same syntax as +/// the POSIX shell. +pub struct Shlex<'a> { + in_iter: core::slice::Iter<'a, u8>, + /// The number of newlines read so far, plus one. + pub line_no: usize, + /// An input string is erroneous if it ends while inside a quotation or right after an + /// unescaped backslash. Since Iterator does not have a mechanism to return an error, if that + /// happens, Shlex just throws out the last token, ends the iteration, and sets 'had_error' to + /// true; best to check it after you're done iterating. + pub had_error: bool, +} + +impl<'a> Shlex<'a> { + pub fn new(in_bytes: &'a [u8]) -> Self { + Shlex { + in_iter: in_bytes.iter(), + line_no: 1, + had_error: false, + } + } + + fn parse_word(&mut self, mut ch: u8) -> Option<Vec<u8>> { + let mut result: Vec<u8> = Vec::new(); + loop { + match ch as char { + '"' => if let Err(()) = self.parse_double(&mut result) { + self.had_error = true; + return None; + }, + '\'' => if let Err(()) = self.parse_single(&mut result) { + self.had_error = true; + return None; + }, + '\\' => if let Some(ch2) = self.next_char() { + if ch2 != '\n' as u8 { result.push(ch2); } + } else { + self.had_error = true; + return None; + }, + ' ' | '\t' | '\n' => { break; }, + _ => { result.push(ch as u8); }, + } + if let Some(ch2) = self.next_char() { ch = ch2; } else { break; } + } + Some(result) + } + + fn parse_double(&mut self, result: &mut Vec<u8>) -> Result<(), ()> { + loop { + if let Some(ch2) = self.next_char() { + match ch2 as char { + '\\' => { + if let Some(ch3) = self.next_char() { + match ch3 as char { + // \$ => $ + '$' | '`' | '"' | '\\' => { result.push(ch3); }, + // \<newline> => nothing + '\n' => {}, + // \x => =x + _ => { result.push('\\' as u8); result.push(ch3); } + } + } else { + return Err(()); + } + }, + '"' => { return Ok(()); }, + _ => { result.push(ch2); }, + } + } else { + return Err(()); + } + } + } + + fn parse_single(&mut self, result: &mut Vec<u8>) -> Result<(), ()> { + loop { + if let Some(ch2) = self.next_char() { + match ch2 as char { + '\'' => { return Ok(()); }, + _ => { result.push(ch2); }, + } + } else { + return Err(()); + } + } + } + + fn next_char(&mut self) -> Option<u8> { + let res = self.in_iter.next().copied(); + if res == Some(b'\n') { self.line_no += 1; } + res + } +} + +impl<'a> Iterator for Shlex<'a> { + type Item = Vec<u8>; + fn next(&mut self) -> Option<Self::Item> { + if let Some(mut ch) = self.next_char() { + // skip initial whitespace + loop { + match ch as char { + ' ' | '\t' | '\n' => {}, + '#' => { + while let Some(ch2) = self.next_char() { + if ch2 as char == '\n' { break; } + } + }, + _ => { break; } + } + if let Some(ch2) = self.next_char() { ch = ch2; } else { return None; } + } + self.parse_word(ch) + } else { // no initial character + None + } + } + +} + +/// Convenience function that consumes the whole byte string at once. Returns None if the input was +/// erroneous. +pub fn split(in_bytes: &[u8]) -> Option<Vec<Vec<u8>>> { + let mut shl = Shlex::new(in_bytes); + let res = shl.by_ref().collect(); + if shl.had_error { None } else { Some(res) } +} + +/// A more configurable interface to quote strings. If you only want the default settings you can +/// use the convenience functions [`try_quote`] and [`try_join`]. +/// +/// The string equivalent is [`shlex::Quoter`]. +#[derive(Default, Debug, Clone)] +pub struct Quoter { + allow_nul: bool, + // TODO: more options +} + +impl Quoter { + /// Create a new [`Quoter`] with default settings. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// Set whether to allow [nul bytes](quoting_warning#nul-bytes). By default they are not + /// allowed and will result in an error of [`QuoteError::Nul`]. + #[inline] + pub fn allow_nul(mut self, allow: bool) -> Self { + self.allow_nul = allow; + self + } + + /// Convenience function that consumes an iterable of words and turns it into a single byte string, + /// quoting words when necessary. Consecutive words will be separated by a single space. + pub fn join<'a, I: IntoIterator<Item = &'a [u8]>>(&self, words: I) -> Result<Vec<u8>, QuoteError> { + Ok(words.into_iter() + .map(|word| self.quote(word)) + .collect::<Result<Vec<Cow<[u8]>>, QuoteError>>()? + .join(&b' ')) + } + + /// Given a single word, return a byte string suitable to encode it as a shell argument. + /// + /// If given valid UTF-8, this will never produce invalid UTF-8. This is because it only + /// ever inserts valid ASCII characters before or after existing ASCII characters (or + /// returns two single quotes if the input was an empty string). It will never modify a + /// multibyte UTF-8 character. + pub fn quote<'a>(&self, mut in_bytes: &'a [u8]) -> Result<Cow<'a, [u8]>, QuoteError> { + if in_bytes.is_empty() { + // Empty string. Special case that isn't meaningful as only part of a word. + return Ok(b"''"[..].into()); + } + if !self.allow_nul && in_bytes.iter().any(|&b| b == b'\0') { + return Err(QuoteError::Nul); + } + let mut out: Vec<u8> = Vec::new(); + while !in_bytes.is_empty() { + // Pick a quoting strategy for some prefix of the input. Normally this will cover the + // entire input, but in some case we might need to divide the input into multiple chunks + // that are quoted differently. + let (cur_len, strategy) = quoting_strategy(in_bytes); + if cur_len == in_bytes.len() && strategy == QuotingStrategy::Unquoted && out.is_empty() { + // Entire string can be represented unquoted. Reuse the allocation. + return Ok(in_bytes.into()); + } + let (cur_chunk, rest) = in_bytes.split_at(cur_len); + assert!(rest.len() < in_bytes.len()); // no infinite loop + in_bytes = rest; + append_quoted_chunk(&mut out, cur_chunk, strategy); + } + Ok(out.into()) + } + +} + +#[derive(PartialEq)] +enum QuotingStrategy { + /// No quotes and no backslash escapes. (If backslash escapes would be necessary, we use a + /// different strategy instead.) + Unquoted, + /// Single quoted. + SingleQuoted, + /// Double quotes, potentially with backslash escapes. + DoubleQuoted, + // TODO: add $'xxx' and "$(printf 'xxx')" styles +} + +/// Is this ASCII byte okay to emit unquoted? +const fn unquoted_ok(c: u8) -> bool { + match c as char { + // Allowed characters: + '+' | '-' | '.' | '/' | ':' | '@' | ']' | '_' | + '0'..='9' | 'A'..='Z' | 'a'..='z' + => true, + + // Non-allowed characters: + // From POSIX https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html + // "The application shall quote the following characters if they are to represent themselves:" + '|' | '&' | ';' | '<' | '>' | '(' | ')' | '$' | '`' | '\\' | '"' | '\'' | ' ' | '\t' | '\n' | + // "and the following may need to be quoted under certain circumstances[..]:" + '*' | '?' | '[' | '#' | '~' | '=' | '%' | + // Brace expansion. These ought to be in the POSIX list but aren't yet; + // see: https://www.austingroupbugs.net/view.php?id=1193 + '{' | '}' | + // Also quote comma, just to be safe in the extremely odd case that the user of this crate + // is intentionally placing a quoted string inside a brace expansion, e.g.: + // format!("echo foo{{a,b,{}}}" | shlex::quote(some_str)) + ',' | + // '\r' is allowed in a word by all real shells I tested, but is treated as a word + // separator by Python `shlex` | and might be translated to '\n' in interactive mode. + '\r' | + // '!' and '^' are treated specially in interactive mode; see quoting_warning. + '!' | '^' | + // Nul bytes and control characters. + '\x00' ..= '\x1f' | '\x7f' + => false, + '\u{80}' ..= '\u{10ffff}' => { + // This is unreachable since `unquoted_ok` is only called for 0..128. + // Non-ASCII bytes are handled separately in `quoting_strategy`. + // Can't call unreachable!() from `const fn` on old Rust, so... + unquoted_ok(c) + }, + } + // Note: The logic cited above for quoting comma might suggest that `..` should also be quoted, + // it as a special case of brace expansion). But it's not necessary. There are three cases: + // + // 1. The user wants comma-based brace expansion, but the untrusted string being `quote`d + // contains `..`, so they get something like `{foo,bar,3..5}`. + // => That's safe; both Bash and Zsh expand this to `foo bar 3..5` rather than + // `foo bar 3 4 5`. The presence of commas disables sequence expression expansion. + // + // 2. The user wants comma-based brace expansion where the contents of the braces are a + // variable number of `quote`d strings and nothing else. There happens to be exactly + // one string and it contains `..`, so they get something like `{3..5}`. + // => Then this will expand as a sequence expression, which is unintended. But I don't mind, + // because any such code is already buggy. Suppose the untrusted string *didn't* contain + // `,` or `..`, resulting in shell input like `{foo}`. Then the shell would interpret it + // as the literal string `{foo}` rather than brace-expanding it into `foo`. + // + // 3. The user wants a sequence expression and wants to supply an untrusted string as one of + // the endpoints or the increment. + // => Well, that's just silly, since the endpoints can only be numbers or single letters. +} + +/// Optimized version of `unquoted_ok`. +fn unquoted_ok_fast(c: u8) -> bool { + const UNQUOTED_OK_MASK: u128 = { + // Make a mask of all bytes in 0..<0x80 that pass. + let mut c = 0u8; + let mut mask = 0u128; + while c < 0x80 { + if unquoted_ok(c) { + mask |= 1u128 << c; + } + c += 1; + } + mask + }; + ((UNQUOTED_OK_MASK >> c) & 1) != 0 +} + +/// Is this ASCII byte okay to emit in single quotes? +fn single_quoted_ok(c: u8) -> bool { + match c { + // No single quotes in single quotes. + b'\'' => false, + // To work around a Bash bug, ^ is only allowed right after an opening single quote; see + // quoting_warning. + b'^' => false, + // Backslashes in single quotes are literal according to POSIX, but Fish treats them as an + // escape character. Ban them. Fish doesn't aim to be POSIX-compatible, but we *can* + // achieve Fish compatibility using double quotes, so we might as well. + b'\\' => false, + _ => true + } +} + +/// Is this ASCII byte okay to emit in double quotes? +fn double_quoted_ok(c: u8) -> bool { + match c { + // Work around Python `shlex` bug where parsing "\`" and "\$" doesn't strip the + // backslash, even though POSIX requires it. + b'`' | b'$' => false, + // '!' and '^' are treated specially in interactive mode; see quoting_warning. + b'!' | b'^' => false, + _ => true + } +} + +/// Given an input, return a quoting strategy that can cover some prefix of the string, along with +/// the size of that prefix. +/// +/// Precondition: input size is nonzero. (Empty strings are handled by the caller.) +/// Postcondition: returned size is nonzero. +#[cfg_attr(manual_codegen_check, inline(never))] +fn quoting_strategy(in_bytes: &[u8]) -> (usize, QuotingStrategy) { + const UNQUOTED_OK: u8 = 1; + const SINGLE_QUOTED_OK: u8 = 2; + const DOUBLE_QUOTED_OK: u8 = 4; + + let mut prev_ok = SINGLE_QUOTED_OK | DOUBLE_QUOTED_OK | UNQUOTED_OK; + let mut i = 0; + + if in_bytes[0] == b'^' { + // To work around a Bash bug, ^ is only allowed right after an opening single quote; see + // quoting_warning. + prev_ok = SINGLE_QUOTED_OK; + i = 1; + } + + while i < in_bytes.len() { + let c = in_bytes[i]; + let mut cur_ok = prev_ok; + + if c >= 0x80 { + // Normally, non-ASCII characters shouldn't require quoting, but see quoting_warning.md + // about \xa0. For now, just treat all non-ASCII characters as requiring quotes. This + // also ensures things are safe in the off-chance that you're in a legacy 8-bit locale that + // has additional characters satisfying `isblank`. + cur_ok &= !UNQUOTED_OK; + } else { + if !unquoted_ok_fast(c) { + cur_ok &= !UNQUOTED_OK; + } + if !single_quoted_ok(c){ + cur_ok &= !SINGLE_QUOTED_OK; + } + if !double_quoted_ok(c) { + cur_ok &= !DOUBLE_QUOTED_OK; + } + } + + if cur_ok == 0 { + // There are no quoting strategies that would work for both the previous characters and + // this one. So we have to end the chunk before this character. The caller will call + // `quoting_strategy` again to handle the rest of the string. + break; + } + + prev_ok = cur_ok; + i += 1; + } + + // Pick the best allowed strategy. + let strategy = if prev_ok & UNQUOTED_OK != 0 { + QuotingStrategy::Unquoted + } else if prev_ok & SINGLE_QUOTED_OK != 0 { + QuotingStrategy::SingleQuoted + } else if prev_ok & DOUBLE_QUOTED_OK != 0 { + QuotingStrategy::DoubleQuoted + } else { + unreachable!() + }; + debug_assert!(i > 0); + (i, strategy) +} + +fn append_quoted_chunk(out: &mut Vec<u8>, cur_chunk: &[u8], strategy: QuotingStrategy) { + match strategy { + QuotingStrategy::Unquoted => { + out.extend_from_slice(cur_chunk); + }, + QuotingStrategy::SingleQuoted => { + out.reserve(cur_chunk.len() + 2); + out.push(b'\''); + out.extend_from_slice(cur_chunk); + out.push(b'\''); + }, + QuotingStrategy::DoubleQuoted => { + out.reserve(cur_chunk.len() + 2); + out.push(b'"'); + for &c in cur_chunk.into_iter() { + if let b'$' | b'`' | b'"' | b'\\' = c { + // Add a preceding backslash. + // Note: We shouldn't actually get here for $ and ` because they don't pass + // `double_quoted_ok`. + out.push(b'\\'); + } + // Add the character itself. + out.push(c); + } + out.push(b'"'); + }, + } +} + +/// Convenience function that consumes an iterable of words and turns it into a single byte string, +/// quoting words when necessary. Consecutive words will be separated by a single space. +/// +/// Uses default settings except that nul bytes are passed through, which [may be +/// dangerous](quoting_warning#nul-bytes), leading to this function being deprecated. +/// +/// Equivalent to [`Quoter::new().allow_nul(true).join(words).unwrap()`](Quoter). +/// +/// (That configuration never returns `Err`, so this function does not panic.) +/// +/// The string equivalent is [shlex::join]. +#[deprecated(since = "1.3.0", note = "replace with `try_join(words)?` to avoid nul byte danger")] +pub fn join<'a, I: IntoIterator<Item = &'a [u8]>>(words: I) -> Vec<u8> { + Quoter::new().allow_nul(true).join(words).unwrap() +} + +/// Convenience function that consumes an iterable of words and turns it into a single byte string, +/// quoting words when necessary. Consecutive words will be separated by a single space. +/// +/// Uses default settings. The only error that can be returned is [`QuoteError::Nul`]. +/// +/// Equivalent to [`Quoter::new().join(words)`](Quoter). +/// +/// The string equivalent is [shlex::try_join]. +pub fn try_join<'a, I: IntoIterator<Item = &'a [u8]>>(words: I) -> Result<Vec<u8>, QuoteError> { + Quoter::new().join(words) +} + +/// Given a single word, return a string suitable to encode it as a shell argument. +/// +/// Uses default settings except that nul bytes are passed through, which [may be +/// dangerous](quoting_warning#nul-bytes), leading to this function being deprecated. +/// +/// Equivalent to [`Quoter::new().allow_nul(true).quote(in_bytes).unwrap()`](Quoter). +/// +/// (That configuration never returns `Err`, so this function does not panic.) +/// +/// The string equivalent is [shlex::quote]. +#[deprecated(since = "1.3.0", note = "replace with `try_quote(str)?` to avoid nul byte danger")] +pub fn quote(in_bytes: &[u8]) -> Cow<[u8]> { + Quoter::new().allow_nul(true).quote(in_bytes).unwrap() +} + +/// Given a single word, return a string suitable to encode it as a shell argument. +/// +/// Uses default settings. The only error that can be returned is [`QuoteError::Nul`]. +/// +/// Equivalent to [`Quoter::new().quote(in_bytes)`](Quoter). +/// +/// (That configuration never returns `Err`, so this function does not panic.) +/// +/// The string equivalent is [shlex::try_quote]. +pub fn try_quote(in_bytes: &[u8]) -> Result<Cow<[u8]>, QuoteError> { + Quoter::new().quote(in_bytes) +} + +#[cfg(test)] +const INVALID_UTF8: &[u8] = b"\xa1"; +#[cfg(test)] +const INVALID_UTF8_SINGLEQUOTED: &[u8] = b"'\xa1'"; + +#[test] +#[allow(invalid_from_utf8)] +fn test_invalid_utf8() { + // Check that our test string is actually invalid UTF-8. + assert!(core::str::from_utf8(INVALID_UTF8).is_err()); +} + +#[cfg(test)] +static SPLIT_TEST_ITEMS: &'static [(&'static [u8], Option<&'static [&'static [u8]]>)] = &[ + (b"foo$baz", Some(&[b"foo$baz"])), + (b"foo baz", Some(&[b"foo", b"baz"])), + (b"foo\"bar\"baz", Some(&[b"foobarbaz"])), + (b"foo \"bar\"baz", Some(&[b"foo", b"barbaz"])), + (b" foo \nbar", Some(&[b"foo", b"bar"])), + (b"foo\\\nbar", Some(&[b"foobar"])), + (b"\"foo\\\nbar\"", Some(&[b"foobar"])), + (b"'baz\\$b'", Some(&[b"baz\\$b"])), + (b"'baz\\\''", None), + (b"\\", None), + (b"\"\\", None), + (b"'\\", None), + (b"\"", None), + (b"'", None), + (b"foo #bar\nbaz", Some(&[b"foo", b"baz"])), + (b"foo #bar", Some(&[b"foo"])), + (b"foo#bar", Some(&[b"foo#bar"])), + (b"foo\"#bar", None), + (b"'\\n'", Some(&[b"\\n"])), + (b"'\\\\n'", Some(&[b"\\\\n"])), + (INVALID_UTF8, Some(&[INVALID_UTF8])), +]; + +#[test] +fn test_split() { + for &(input, output) in SPLIT_TEST_ITEMS { + assert_eq!(split(input), output.map(|o| o.iter().map(|&x| x.to_owned()).collect())); + } +} + +#[test] +fn test_lineno() { + let mut sh = Shlex::new(b"\nfoo\nbar"); + while let Some(word) = sh.next() { + if word == b"bar" { + assert_eq!(sh.line_no, 3); + } + } +} + +#[test] +#[allow(deprecated)] +fn test_quote() { + // Validate behavior with invalid UTF-8: + assert_eq!(quote(INVALID_UTF8), INVALID_UTF8_SINGLEQUOTED); + // Replicate a few tests from lib.rs. No need to replicate all of them. + assert_eq!(quote(b""), &b"''"[..]); + assert_eq!(quote(b"foobar"), &b"foobar"[..]); + assert_eq!(quote(b"foo bar"), &b"'foo bar'"[..]); + assert_eq!(quote(b"'\""), &b"\"'\\\"\""[..]); + assert_eq!(quote(b""), &b"''"[..]); +} + +#[test] +#[allow(deprecated)] +fn test_join() { + // Validate behavior with invalid UTF-8: + assert_eq!(join(vec![INVALID_UTF8]), INVALID_UTF8_SINGLEQUOTED); + // Replicate a few tests from lib.rs. No need to replicate all of them. + assert_eq!(join(vec![]), &b""[..]); + assert_eq!(join(vec![&b""[..]]), b"''"); +} diff --git a/vendor/shlex/src/lib.rs b/vendor/shlex/src/lib.rs new file mode 100644 index 00000000000000..aa5c3067af82eb --- /dev/null +++ b/vendor/shlex/src/lib.rs @@ -0,0 +1,358 @@ +// Copyright 2015 Nicholas Allegra (comex). +// Licensed under the Apache License, Version 2.0 <https://www.apache.org/licenses/LICENSE-2.0> or +// the MIT license <https://opensource.org/licenses/MIT>, at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +//! Parse strings like, and escape strings for, POSIX shells. +//! +//! Same idea as (but implementation not directly based on) the Python shlex module. +//! +//! Disabling the `std` feature (which is enabled by default) will allow the crate to work in +//! `no_std` environments, where the `alloc` crate, and a global allocator, are available. +//! +//! ## <span style="color:red">Warning</span> +//! +//! The [`try_quote`]/[`try_join`] family of APIs does not quote control characters (because they +//! cannot be quoted portably). +//! +//! This is fully safe in noninteractive contexts, like shell scripts and `sh -c` arguments (or +//! even scripts `source`d from interactive shells). +//! +//! But if you are quoting for human consumption, you should keep in mind that ugly inputs produce +//! ugly outputs (which may not be copy-pastable). +//! +//! And if by chance you are piping the output of [`try_quote`]/[`try_join`] directly to the stdin +//! of an interactive shell, you should stop, because control characters can lead to arbitrary +//! command injection. +//! +//! For more information, and for information about more minor issues, please see [quoting_warning]. +//! +//! ## Compatibility +//! +//! This crate's quoting functionality tries to be compatible with **any POSIX-compatible shell**; +//! it's tested against `bash`, `zsh`, `dash`, Busybox `ash`, and `mksh`, plus `fish` (which is not +//! POSIX-compatible but close enough). +//! +//! It also aims to be compatible with Python `shlex` and C `wordexp`. + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; +use alloc::vec::Vec; +use alloc::borrow::Cow; +use alloc::string::String; +#[cfg(test)] +use alloc::vec; +#[cfg(test)] +use alloc::borrow::ToOwned; + +pub mod bytes; +#[cfg(all(doc, not(doctest)))] +#[path = "quoting_warning.md"] +pub mod quoting_warning; + +/// An iterator that takes an input string and splits it into the words using the same syntax as +/// the POSIX shell. +/// +/// See [`bytes::Shlex`]. +pub struct Shlex<'a>(bytes::Shlex<'a>); + +impl<'a> Shlex<'a> { + pub fn new(in_str: &'a str) -> Self { + Self(bytes::Shlex::new(in_str.as_bytes())) + } +} + +impl<'a> Iterator for Shlex<'a> { + type Item = String; + fn next(&mut self) -> Option<String> { + self.0.next().map(|byte_word| { + // Safety: given valid UTF-8, bytes::Shlex will always return valid UTF-8. + unsafe { String::from_utf8_unchecked(byte_word) } + }) + } +} + +impl<'a> core::ops::Deref for Shlex<'a> { + type Target = bytes::Shlex<'a>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'a> core::ops::DerefMut for Shlex<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +/// Convenience function that consumes the whole string at once. Returns None if the input was +/// erroneous. +pub fn split(in_str: &str) -> Option<Vec<String>> { + let mut shl = Shlex::new(in_str); + let res = shl.by_ref().collect(); + if shl.had_error { None } else { Some(res) } +} + +/// Errors from [`Quoter::quote`], [`Quoter::join`], etc. (and their [`bytes`] counterparts). +/// +/// By default, the only error that can be returned is [`QuoteError::Nul`]. If you call +/// `allow_nul(true)`, then no errors can be returned at all. Any error variants added in the +/// future will not be enabled by default; they will be enabled through corresponding non-default +/// [`Quoter`] options. +/// +/// ...In theory. In the unlikely event that additional classes of inputs are discovered that, +/// like nul bytes, are fundamentally unsafe to quote even for non-interactive shells, the risk +/// will be mitigated by adding corresponding [`QuoteError`] variants that *are* enabled by +/// default. +#[non_exhaustive] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum QuoteError { + /// The input contained a nul byte. In most cases, shells fundamentally [cannot handle strings + /// containing nul bytes](quoting_warning#nul-bytes), no matter how they are quoted. But if + /// you're sure you can handle nul bytes, you can call `allow_nul(true)` on the `Quoter` to let + /// them pass through. + Nul, +} + +impl core::fmt::Display for QuoteError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + QuoteError::Nul => f.write_str("cannot shell-quote string containing nul byte"), + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for QuoteError {} + +/// A more configurable interface to quote strings. If you only want the default settings you can +/// use the convenience functions [`try_quote`] and [`try_join`]. +/// +/// The bytes equivalent is [`bytes::Quoter`]. +#[derive(Default, Debug, Clone)] +pub struct Quoter { + inner: bytes::Quoter, +} + +impl Quoter { + /// Create a new [`Quoter`] with default settings. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// Set whether to allow [nul bytes](quoting_warning#nul-bytes). By default they are not + /// allowed and will result in an error of [`QuoteError::Nul`]. + #[inline] + pub fn allow_nul(mut self, allow: bool) -> Self { + self.inner = self.inner.allow_nul(allow); + self + } + + /// Convenience function that consumes an iterable of words and turns it into a single string, + /// quoting words when necessary. Consecutive words will be separated by a single space. + pub fn join<'a, I: IntoIterator<Item = &'a str>>(&self, words: I) -> Result<String, QuoteError> { + // Safety: given valid UTF-8, bytes::join() will always return valid UTF-8. + self.inner.join(words.into_iter().map(|s| s.as_bytes())) + .map(|bytes| unsafe { String::from_utf8_unchecked(bytes) }) + } + + /// Given a single word, return a string suitable to encode it as a shell argument. + pub fn quote<'a>(&self, in_str: &'a str) -> Result<Cow<'a, str>, QuoteError> { + Ok(match self.inner.quote(in_str.as_bytes())? { + Cow::Borrowed(out) => { + // Safety: given valid UTF-8, bytes::quote() will always return valid UTF-8. + unsafe { core::str::from_utf8_unchecked(out) }.into() + } + Cow::Owned(out) => { + // Safety: given valid UTF-8, bytes::quote() will always return valid UTF-8. + unsafe { String::from_utf8_unchecked(out) }.into() + } + }) + } +} + +impl From<bytes::Quoter> for Quoter { + fn from(inner: bytes::Quoter) -> Quoter { + Quoter { inner } + } +} + +impl From<Quoter> for bytes::Quoter { + fn from(quoter: Quoter) -> bytes::Quoter { + quoter.inner + } +} + +/// Convenience function that consumes an iterable of words and turns it into a single string, +/// quoting words when necessary. Consecutive words will be separated by a single space. +/// +/// Uses default settings except that nul bytes are passed through, which [may be +/// dangerous](quoting_warning#nul-bytes), leading to this function being deprecated. +/// +/// Equivalent to [`Quoter::new().allow_nul(true).join(words).unwrap()`](Quoter). +/// +/// (That configuration never returns `Err`, so this function does not panic.) +/// +/// The bytes equivalent is [bytes::join]. +#[deprecated(since = "1.3.0", note = "replace with `try_join(words)?` to avoid nul byte danger")] +pub fn join<'a, I: IntoIterator<Item = &'a str>>(words: I) -> String { + Quoter::new().allow_nul(true).join(words).unwrap() +} + +/// Convenience function that consumes an iterable of words and turns it into a single string, +/// quoting words when necessary. Consecutive words will be separated by a single space. +/// +/// Uses default settings. The only error that can be returned is [`QuoteError::Nul`]. +/// +/// Equivalent to [`Quoter::new().join(words)`](Quoter). +/// +/// The bytes equivalent is [bytes::try_join]. +pub fn try_join<'a, I: IntoIterator<Item = &'a str>>(words: I) -> Result<String, QuoteError> { + Quoter::new().join(words) +} + +/// Given a single word, return a string suitable to encode it as a shell argument. +/// +/// Uses default settings except that nul bytes are passed through, which [may be +/// dangerous](quoting_warning#nul-bytes), leading to this function being deprecated. +/// +/// Equivalent to [`Quoter::new().allow_nul(true).quote(in_str).unwrap()`](Quoter). +/// +/// (That configuration never returns `Err`, so this function does not panic.) +/// +/// The bytes equivalent is [bytes::quote]. +#[deprecated(since = "1.3.0", note = "replace with `try_quote(str)?` to avoid nul byte danger")] +pub fn quote(in_str: &str) -> Cow<str> { + Quoter::new().allow_nul(true).quote(in_str).unwrap() +} + +/// Given a single word, return a string suitable to encode it as a shell argument. +/// +/// Uses default settings. The only error that can be returned is [`QuoteError::Nul`]. +/// +/// Equivalent to [`Quoter::new().quote(in_str)`](Quoter). +/// +/// (That configuration never returns `Err`, so this function does not panic.) +/// +/// The bytes equivalent is [bytes::try_quote]. +pub fn try_quote(in_str: &str) -> Result<Cow<str>, QuoteError> { + Quoter::new().quote(in_str) +} + +#[cfg(test)] +static SPLIT_TEST_ITEMS: &'static [(&'static str, Option<&'static [&'static str]>)] = &[ + ("foo$baz", Some(&["foo$baz"])), + ("foo baz", Some(&["foo", "baz"])), + ("foo\"bar\"baz", Some(&["foobarbaz"])), + ("foo \"bar\"baz", Some(&["foo", "barbaz"])), + (" foo \nbar", Some(&["foo", "bar"])), + ("foo\\\nbar", Some(&["foobar"])), + ("\"foo\\\nbar\"", Some(&["foobar"])), + ("'baz\\$b'", Some(&["baz\\$b"])), + ("'baz\\\''", None), + ("\\", None), + ("\"\\", None), + ("'\\", None), + ("\"", None), + ("'", None), + ("foo #bar\nbaz", Some(&["foo", "baz"])), + ("foo #bar", Some(&["foo"])), + ("foo#bar", Some(&["foo#bar"])), + ("foo\"#bar", None), + ("'\\n'", Some(&["\\n"])), + ("'\\\\n'", Some(&["\\\\n"])), +]; + +#[test] +fn test_split() { + for &(input, output) in SPLIT_TEST_ITEMS { + assert_eq!(split(input), output.map(|o| o.iter().map(|&x| x.to_owned()).collect())); + } +} + +#[test] +fn test_lineno() { + let mut sh = Shlex::new("\nfoo\nbar"); + while let Some(word) = sh.next() { + if word == "bar" { + assert_eq!(sh.line_no, 3); + } + } +} + +#[test] +#[cfg_attr(not(feature = "std"), allow(unreachable_code, unused_mut))] +fn test_quote() { + // This is a list of (unquoted, quoted) pairs. + // But it's using a single long (raw) string literal with an ad-hoc format, just because it's + // hard to read if we have to put the test strings through Rust escaping on top of the escaping + // being tested. (Even raw string literals are noisy for short strings). + // Ad-hoc: "NL" is replaced with a literal newline; no other escape sequences. + let tests = r#" + <> => <''> + <foobar> => <foobar> + <foo bar> => <'foo bar'> + <"foo bar'"> => <"\"foo bar'\""> + <'foo bar'> => <"'foo bar'"> + <"> => <'"'> + <"'> => <"\"'"> + <hello!world> => <'hello!world'> + <'hello!world> => <"'hello"'!world'> + <'hello!> => <"'hello"'!'> + <hello ^ world> => <'hello ''^ world'> + <hello^> => <hello'^'> + <!world'> => <'!world'"'"> + <{a, b}> => <'{a, b}'> + <NL> => <'NL'> + <^> => <'^'> + <foo^bar> => <foo'^bar'> + <NLx^> => <'NLx''^'> + <NL^x> => <'NL''^x'> + <NL ^x> => <'NL ''^x'> + <{a,b}> => <'{a,b}'> + <a,b> => <'a,b'> + <a..b => <a..b> + <'$> => <"'"'$'> + <"^> => <'"''^'> + "#; + let mut ok = true; + for test in tests.trim().split('\n') { + let parts: Vec<String> = test + .replace("NL", "\n") + .split("=>") + .map(|part| part.trim().trim_start_matches('<').trim_end_matches('>').to_owned()) + .collect(); + assert!(parts.len() == 2); + let unquoted = &*parts[0]; + let quoted_expected = &*parts[1]; + let quoted_actual = try_quote(&parts[0]).unwrap(); + if quoted_expected != quoted_actual { + #[cfg(not(feature = "std"))] + panic!("FAIL: for input <{}>, expected <{}>, got <{}>", + unquoted, quoted_expected, quoted_actual); + #[cfg(feature = "std")] + println!("FAIL: for input <{}>, expected <{}>, got <{}>", + unquoted, quoted_expected, quoted_actual); + ok = false; + } + } + assert!(ok); +} + +#[test] +#[allow(deprecated)] +fn test_join() { + assert_eq!(join(vec![]), ""); + assert_eq!(join(vec![""]), "''"); + assert_eq!(join(vec!["a", "b"]), "a b"); + assert_eq!(join(vec!["foo bar", "baz"]), "'foo bar' baz"); +} + +#[test] +fn test_fallible() { + assert_eq!(try_join(vec!["\0"]), Err(QuoteError::Nul)); + assert_eq!(try_quote("\0"), Err(QuoteError::Nul)); +} diff --git a/vendor/shlex/src/quoting_warning.md b/vendor/shlex/src/quoting_warning.md new file mode 100644 index 00000000000000..fab9857bec9686 --- /dev/null +++ b/vendor/shlex/src/quoting_warning.md @@ -0,0 +1,365 @@ +// vim: textwidth=99 +/* +Meta note: This file is loaded as a .rs file by rustdoc only. +*/ +/*! + +A more detailed version of the [warning at the top level](super#warning) about the `quote`/`join` +family of APIs. + +In general, passing the output of these APIs to a shell should recover the original string(s). +This page lists cases where it fails to do so. + +In noninteractive contexts, there are only minor issues. 'Noninteractive' includes shell scripts +and `sh -c` arguments, or even scripts `source`d from interactive shells. The issues are: + +- [Nul bytes](#nul-bytes) + +- [Overlong commands](#overlong-commands) + +If you are writing directly to the stdin of an interactive (`-i`) shell (i.e., if you are +pretending to be a terminal), or if you are writing to a cooked-mode pty (even if the other end is +noninteractive), then there is a **severe** security issue: + +- [Control characters](#control-characters-interactive-contexts-only) + +Finally, there are some [solved issues](#solved-issues). + +# List of issues + +## Nul bytes + +For non-interactive shells, the most problematic input is nul bytes (bytes with value 0). The +non-deprecated functions all default to returning [`QuoteError::Nul`] when encountering them, but +the deprecated [`quote`] and [`join`] functions leave them as-is. + +In Unix, nul bytes can't appear in command arguments, environment variables, or filenames. It's +not a question of proper quoting; they just can't be used at all. This is a consequence of Unix's +system calls all being designed around nul-terminated C strings. + +Shells inherit that limitation. Most of them do not accept nul bytes in strings even internally. +Even when they do, it's pretty much useless or even dangerous, since you can't pass them to +external commands. + +In some cases, you might fail to pass the nul byte to the shell in the first place. For example, +the following code uses [`join`] to tunnel a command over an SSH connection: + +```rust +std::process::Command::new("ssh") + .arg("myhost") + .arg("--") + .arg(join(my_cmd_args)) +``` + +If any argument in `my_cmd_args` contains a nul byte, then `join(my_cmd_args)` will contain a nul +byte. But `join(my_cmd_args)` is itself being passed as an argument to a command (the ssh +command), and command arguments can't contain nul bytes! So this will simply result in the +`Command` failing to launch. + +Still, there are other ways to smuggle nul bytes into a shell. How the shell reacts depends on the +shell and the method of smuggling. For example, here is Bash 5.2.21 exhibiting three different +behaviors: + +- With ANSI-C quoting, the string is truncated at the first nul byte: + ```bash + $ echo $'foo\0bar' | hexdump -C + 00000000 66 6f 6f 0a |foo.| + ``` + +- With command substitution, nul bytes are removed with a warning: + ```bash + $ echo $(printf 'foo\0bar') | hexdump -C + bash: warning: command substitution: ignored null byte in input + 00000000 66 6f 6f 62 61 72 0a |foobar.| + ``` + +- When a nul byte appears directly in a shell script, it's removed with no warning: + ```bash + $ printf 'echo "foo\0bar"' | bash | hexdump -C + 00000000 66 6f 6f 62 61 72 0a |foobar.| + ``` + +Zsh, in contrast, actually allows nul bytes internally, in shell variables and even arguments to +builtin commands. But if a variable is exported to the environment, or if an argument is used for +an external command, then the child process will see it silently truncated at the first nul. This +might actually be more dangerous, depending on the use case. + +## Overlong commands + +If you pass a long string into a shell, several things might happen: + +- It might succeed, yet the shell might have trouble actually doing anything with it. For example: + + ```bash + x=$(printf '%010000000d' 0); /bin/echo $x + bash: /bin/echo: Argument list too long + ``` + +- If you're using certain shells (e.g. Busybox Ash) *and* using a pty for communication, then the + shell will impose a line length limit, ignoring all input past the limit. + +- If you're using a pty in cooked mode, then by default, if you write so many bytes as input that + it fills the kernel's internal buffer, the kernel will simply drop those bytes, instead of + blocking waiting for the shell to empty out the buffer. In other words, random bits of input can + be lost, which is obviously insecure. + +Future versions of this crate may add an option to [`Quoter`] to check the length for you. + +## Control characters (*interactive contexts only*) + +Control characters are the bytes from `\x00` to `\x1f`, plus `\x7f`. `\x00` (the nul byte) is +discussed [above](#nul-bytes), but what about the rest? Well, many of them correspond to terminal +keyboard shortcuts. For example, when you press Ctrl-A at a shell prompt, your terminal sends the +byte `\x01`. The shell sees that byte and (if not configured differently) takes the standard +action for Ctrl-A, which is to move the cursor to the beginning of the line. + +This means that it's quite dangerous to pipe bytes to an interactive shell. For example, here is a +program that tries to tell Bash to echo an arbitrary string, 'safely': +```rust +use std::process::{Command, Stdio}; +use std::io::Write; + +let evil_string = "\x01do_something_evil; "; +let quoted = shlex::try_quote(evil_string).unwrap(); +println!("quoted string is {:?}", quoted); + +let mut bash = Command::new("bash") + .arg("-i") // force interactive mode + .stdin(Stdio::piped()) + .spawn() + .unwrap(); +let stdin = bash.stdin.as_mut().unwrap(); +write!(stdin, "echo {}\n", quoted).unwrap(); +``` + +Here's the output of the program (with irrelevant bits removed): + +```text +quoted string is "'\u{1}do_something_evil; '" +/tmp comex$ do_something_evil; 'echo ' +bash: do_something_evil: command not found +bash: echo : command not found +``` + +Even though we quoted it, Bash still ran an arbitrary command! + +This is not because the quoting was insufficient, per se. In single quotes, all input is supposed +to be treated as raw data until the closing single quote. And in fact, this would work fine +without the `"-i"` argument. + +But line input is a separate stage from shell syntax parsing. After all, if you type a single +quote on the keyboard, you wouldn't expect it to disable all your keyboard shortcuts. So a control +character always has its designated effect, no matter if it's quoted or backslash-escaped. + +Also, some control characters are interpreted by the kernel tty layer instead, like CTRL-C to send +SIGINT. These can be an issue even with noninteractive shells, but only if using a pty for +communication, as opposed to a pipe. + +To be safe, you just have to avoid sending them. + +### Why not just use hex escapes? + +In any normal programming languages, this would be no big deal. + +Any normal language has a way to escape arbitrary characters in strings by writing out their +numeric values. For example, Rust lets you write them in hexadecimal, like `"\x4f"` (or +`"\u{1d546}"` for Unicode). In this way, arbitrary strings can be represented using only 'nice' +simple characters. Any remotely suspicious character can be replaced with a numeric escape +sequence, where the escape sequence itself consists only of alphanumeric characters and some +punctuation. The result may not be the most readable[^choices], but it's quite safe from being +misinterpreted or corrupted in transit. + +Shell is not normal. It has no numeric escape sequences. + +There are a few different ways to quote characters (unquoted, unquoted-with-backslash, single +quotes, double quotes), but all of them involve writing the character itself. If the input +contains a control character, the output must contain that same character. + +### Mitigation: terminal filters + +In practice, automating interactive shells like in the above example is pretty uncommon these days. +In most cases, the only way for a programmatically generated string to make its way to the input of +an interactive shell is if a human copies and pastes it into their terminal. + +And many terminals detect when you paste a string containing control characters. iTerm2 strips +them out; gnome-terminal replaces them with alternate characters[^gr]; Kitty outright prompts for +confirmation. This mitigates the risk. + +But it's not perfect. Some other terminals don't implement this check or implement it incorrectly. +Also, these checks tend to not filter the tab character, which could trigger tab completion. In +most cases that's a non-issue, because most shells support paste bracketing, which disables tab and +some other control characters[^bracketing] within pasted text. But in some cases paste bracketing +gets disabled. + +### Future possibility: ANSI-C quoting + +I said that shell syntax has no numeric escapes, but that only applies to *portable* shell syntax. +Bash and Zsh support an obscure alternate quoting style with the syntax `$'foo'`. It's called +["ANSI-C quoting"][ansic], and inside it you can use all the escape sequences supported by C, +including hex escapes: + +```bash +$ echo $'\x41\n\x42' +A +B +``` + +But other shells don't support it — including Dash, a popular choice for `/bin/sh`, and Busybox's +Ash, frequently seen on stripped-down embedded systems. This crate's quoting functionality [tries +to be compatible](crate#compatibility) with those shells, plus all other POSIX-compatible shells. +That makes ANSI-C quoting a no-go. + +Still, future versions of this crate may provide an option to enable ANSI-C quoting, at the cost of +reduced portability. + +### Future possibility: printf + +Another option would be to invoke the `printf` command, which is required by POSIX to support octal +escapes. For example, you could 'escape' the Rust string `"\x01"` into the shell syntax `"$(printf +'\001')"`. The shell will execute the command `printf` with the first argument being literally a +backslash followed by three digits; `printf` will output the actual byte with value 1; and the +shell will substitute that back into the original command. + +The problem is that 'escaping' a string into a command substitution just feels too surprising. If +nothing else, it only works with an actual shell; [other languages' shell parsing +routines](crate#compatibility) wouldn't understand it. Neither would this crate's own parser, +though that could be fixed. + +Future versions of this crate may provide an option to use `printf` for quoting. + +### Special note: newlines + +Did you know that `\r` and `\n` are control characters? They aren't as dangerous as other control +characters (if quoted properly). But there's still an issue with them in interactive contexts. + +Namely, in some cases, interactive shells and/or the tty layer will 'helpfully' translate between +different line ending conventions. The possibilities include replacing `\r` with `\n`, replacing +`\n` with `\r\n`, and others. This can't result in command injection, but it's still a lossy +transformation which can result in a failure to round-trip (i.e. the shell sees a different string +from what was originally passed to `quote`). + +Numeric escapes would solve this as well. + +# Solved issues + +## Solved: Past vulnerability (GHSA-r7qv-8r2h-pg27 / RUSTSEC-2024-XXX) + +Versions of this crate before 1.3.0 did not quote `{`, `}`, and `\xa0`. + +See: +- <https://github.com/advisories/GHSA-r7qv-8r2h-pg27> +- (TODO: Add Rustsec link) + +## Solved: `!` and `^` + +There are two non-control characters which have a special meaning in interactive contexts only: `!` and +`^`. Luckily, these can be escaped adequately. + +The `!` character triggers [history expansion][he]; the `^` character can trigger a variant of +history expansion known as [Quick Substitution][qs]. Both of these characters get expanded even +inside of double-quoted strings\! + +If we're in a double-quoted string, then we can't just escape these characters with a backslash. +Only a specific set of characters can be backslash-escaped inside double quotes; the set of +supported characters depends on the shell, but it often doesn't include `!` and `^`.[^escbs] +Trying to backslash-escape an unsupported character produces a literal backslash: +```bash +$ echo "\!" +\! +``` + +However, these characters don't get expanded in single-quoted strings, so this crate just +single-quotes them. + +But there's a Bash bug where `^` actually does get partially expanded in single-quoted strings: +```bash +$ echo ' +> ^a^b +> ' + +!!:s^a^b +``` + +To work around that, this crate forces `^` to appear right after an opening single quote. For +example, the string `"^` is quoted into `'"''^'` instead of `'"^'`. This restriction is overkill, +since `^` is only meaningful right after a newline, but it's a sufficient restriction (after all, a +`^` character can't be preceded by a newline if it's forced to be preceded by a single quote), and +for now it simplifies things. + +## Solved: `\xa0` + +The byte `\xa0` may be treated as a shell word separator, specifically on Bash on macOS when using +the default UTF-8 locale, only when the input is invalid UTF-8. This crate handles the issue by +always using quotes for arguments containing this byte. + +In fact, this crate always uses quotes for arguments containing any non-ASCII bytes. This may be +changed in the future, since it's a bit unfriendly to non-English users. But for now it +minimizes risk, especially considering the large number of different legacy single-byte locales +someone might hypothetically be running their shell in. + +### Demonstration + +```bash +$ echo -e 'ls a\xa0b' | bash +ls: a: No such file or directory +ls: b: No such file or directory +``` +The normal behavior would be to output a single line, e.g.: +```bash +$ echo -e 'ls a\xa0b' | bash +ls: cannot access 'a'$'\240''b': No such file or directory +``` +(The specific quoting in the error doesn't matter.) + +### Cause + +Just for fun, here's why this behavior occurs: + +Bash decides which bytes serve as word separators based on the libc function [`isblank`][isblank]. +On macOS on UTF-8 locales, this passes for `\xa0`, corresponding to U+00A0 NO-BREAK SPACE. + +This is doubly unique compared to the other systems I tested (Linux/glibc, Linux/musl, and +Windows/MSVC). First, the other systems don't allow bytes in the range [0x80, 0xFF] to pass +<code>is<i>foo</i></code> functions in UTF-8 locales, even if the corresponding Unicode codepoint +does pass, as determined by the wide-character equivalent function, <code>isw<i>foo</i></code>. +Second, the other systems don't treat U+00A0 as blank (even using `iswblank`). + +Meanwhile, Bash checks for multi-byte sequences and forbids them from being treated as special +characters, so the proper UTF-8 encoding of U+00A0, `b"\xc2\xa0"`, is not treated as a word +separator. Treatment as a word separator only happens for `b"\xa0"` alone, which is illegal UTF-8. + +[ansic]: https://www.gnu.org/software/bash/manual/html_node/ANSI_002dC-Quoting.html +[he]: https://www.gnu.org/software/bash/manual/html_node/History-Interaction.html +[qs]: https://www.gnu.org/software/bash/manual/html_node/Event-Designators.html +[isblank]: https://man7.org/linux/man-pages/man3/isblank.3p.html +[nul]: #nul-bytes + +[^choices]: This can lead to tough choices over which + characters to escape and which to leave as-is, especially when Unicode gets involved and you + have to balance the risk of confusion with the benefit of properly supporting non-English + languages. + <br> + <br> + We don't have the luxury of those choices. + +[^gr]: For example, backspace (in Unicode lingo, U+0008 BACKSPACE) turns into U+2408 SYMBOL FOR BACKSPACE. + +[^bracketing]: It typically disables almost all handling of control characters by the shell proper, + but one necessary exception is the end-of-paste sequence itself (which starts with the control + character `\x1b`). In addition, paste bracketing does not suppress handling of control + characters by the kernel tty layer, such as `\x03` sending SIGINT (which typically clears the + currently typed command, making it dangerous in a similar way to `\x01`). + +[^escbs]: For example, Dash doesn't remove the backslash from `"\!"` because it simply doesn't know + anything about `!` as a special character: it doesn't support history expansion. On the other + end of the spectrum, Zsh supports history expansion and does remove the backslash — though only + in interactive mode. Bash's behavior is weirder. It supports history expansion, and if you + write `"\!"`, the backslash does prevent history expansion from occurring — but it doesn't get + removed! + +*/ + +// `use` declarations to make auto links work: +use ::{quote, join, Shlex, Quoter, QuoteError}; + +// TODO: add more about copy-paste and human readability. diff --git a/vendor/syn/.cargo-checksum.json b/vendor/syn/.cargo-checksum.json new file mode 100644 index 00000000000000..ce1b816764beb8 --- /dev/null +++ b/vendor/syn/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"645009f117c4017452703e2b0999c110f324ef2cb9f5aff4055fd3712c3a90ab","Cargo.lock":"151d8723bc5c18decd094e2053baaef8853dfb4b0652aae99d90c6bb03c7af20","Cargo.toml":"25e3f93c23cc3f60f1bae346e1bdcf7c0790a09168b5bcac3e80e6158d437a9b","Cargo.toml.orig":"bf872e309248326ee3a96a8871b927f059277754f4534136b2d637ce86c63004","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"ae6deb98ea51df4829c0327139a555cc115c6bcf6fb459db0ef0d6a96c4566ec","benches/file.rs":"6f2ae7748d1576cff48e4ef55d4e87e2f5bb7898a36aa99ea6dd6ed0f72a4e3c","benches/rust.rs":"53cb8accfda73e59a3efc78081e7c58a1010ae60c23ef8c43bb240550daa3e96","src/attr.rs":"f44ff3cb9d3bc34d7de9e3f1aba62ddb1c8973881d9db981623b112005d4ed53","src/bigint.rs":"0299829b2f7a1a798fe2f7bc1680e4a10f9b6f4a852d09af4da2deab466c4242","src/buffer.rs":"7622b646e0d2399068868c41cb6b8abec39afa925c220a8e9f19c2a424911fd4","src/classify.rs":"3c796df4c891034abc3008196d34ad62c22fcb9525a067890731a5a6bbc7b5fb","src/custom_keyword.rs":"322114e36ae43a2f8605506fb4568efdbc2986853e2fee74bd10a4ca0fb60c69","src/custom_punctuation.rs":"26b28164f0b2e5e80e7cf36a3ba6d2577d27442cce5e00a72f685b5ee6f51ecd","src/data.rs":"fa04dce757ca3dd1e350aaa84bbcf8b743c13a00b0983b980bf2919f91a22078","src/derive.rs":"f54f8cf9386a2d45186ff3c86ade5dae59e0e337b0198532449190ae8520cff8","src/discouraged.rs":"653c5d9e6c4e3c2359817dc343f145569a0c9562a707f4949c374c242955ce12","src/drops.rs":"e98da4aaafb5afc75919f9e2914326ad09bf16094a9407c60a05515a2e01dd00","src/error.rs":"cbf06fb7b000f2e6444fa224a062c493911a8f9fc5d386be6e52dadbb7658f34","src/export.rs":"b260cc49da1da3489e7755832bc8015cfad79e84f6c74e237f65ae25a2385e56","src/expr.rs":"fa766ce749ea31367e178f45a2dc8f8545b9467f7fc51e7a1fe72bbb0b9738dc","src/ext.rs":"57577c7e6b7b65cd27ac5aad66d47e63693762d8880cde088b20975ec845244d","src/file.rs":"9d04206da5eff88e92698b3f78c51082d615300cb11d93768250a3e97c40b078","src/fixup.rs":"7647cde30efdce96b1488ae805788c168d4499b464b7d421abc17ea8ffde66f2","src/gen/clone.rs":"7af00b0a240453d7aac178be1b6cdf9df3b33f1281da35e02f66ba7ba55d060c","src/gen/debug.rs":"59bc259fa9dc0c7ffe094df7ad8afc9c4e79b6d73a8f0fae8a2e435905866e7a","src/gen/eq.rs":"d7428672d82c1931fdefb8bda0425a25ebbe20e5e2736b18cfd1752b64e99f78","src/gen/fold.rs":"39b0a26cfdf0accaff6da108a2b6d8f93e83c63d0bf6a8d7af0900fc0f71b55b","src/gen/hash.rs":"6808bb0e47e7346a14fbec5f55430906efa172f46417c83c2f7c76ce8c9ceab6","src/gen/token.css":"3a5882d0b3aefbf56ca5d08c28c117e25ac2327eadf7242202d906b2ddb2232e","src/gen/visit.rs":"fe1443aa7953eaca10d6bf982396e627e31ce6b8aea8eb7cf949e0adeea5badb","src/gen/visit_mut.rs":"9948f0f07aefd8133dcc958e744c49f1da625d199f7707363b79f0373b2dcd6b","src/generics.rs":"6170b4a9d82ba27a3b4471057a5206e45d4b379813855b67d06aa1fc7f978ccc","src/group.rs":"ddbff97e41315bdf9dfce215a8c00bb4d532827cf794246afde7308b39dc09ca","src/ident.rs":"d6061030fadae9c7dc847e1ee46178d9657d782aad108c7197e8cafe765b3eaa","src/item.rs":"ad2d5f4621426420ba4dc0c1a82626b7b0979cb67c06fbcb16ee6abb025e7c80","src/lib.rs":"33992cd3fb39b1af62b844da65596854d355ad7e85d516d67bbf67b3f04bfc09","src/lifetime.rs":"ec748fdbdedeb75c4dbc4460653cf97fcf113207eea5b12fea9e1f6e1198beca","src/lit.rs":"69ef534be9ba43de0da9a65d75de36f3d14d83f5bd1666ea72419c9302095408","src/lookahead.rs":"b2837d80fa4466bb430b65d32b54d1bad5de9bb851550f916658347145c281b4","src/mac.rs":"fdce8291f71adef3f69975f229156dca2309ca232ed943061afaf96220908ab8","src/macros.rs":"2a6e895dfe1c3a9a7237b5e23358ca5d8967e2beae6d094dda68d3659f9a5c84","src/meta.rs":"969d8ccbdbc6ea2e4928a21831b791c57447b231e1373149e4c63b46f3951801","src/op.rs":"a61757370f802e44efa3c4a1057ae2cd26e64e273f7d76c06d5ffb49602319e2","src/parse.rs":"bbe69237d50ce5f9b5c029e851607c54ca6232cad0790551c2f5bb29e2f9657d","src/parse_macro_input.rs":"e4e22b63d0496d06a4ca17742a22467ed93f08a739081324773828bad63175ee","src/parse_quote.rs":"80eec7ce54c38f3bbd23acb70cd8a6649d7e1523c3977e3bf12849fd8c5cf16d","src/pat.rs":"b6c8c04c330a76dbe9cd35949026724fc3aeacf98e8c0a259cf2e16caff99071","src/path.rs":"2146bdf5e0eb6991232c8a09de3a30440727f439ab792a34f5313057c091a724","src/precedence.rs":"58420a5015003ecd4d7a4a0c87c168caa4c696e646355523d9eaae81fc5e1d54","src/print.rs":"22910bf0521ab868ebd7c62601c55912d12cfb400c65723e08e5cfa3a2d111c0","src/punctuated.rs":"711c1f9122f560530d40bdccbd8784b6c2c54067f0d753cce282a4d6ca933a37","src/restriction.rs":"a7152ec5a4ee4f55446019aa2b4d84f2238776f0e6ffc0c22adf3374b517fe56","src/scan_expr.rs":"e199c35e8bbf3e2c70901e1175df8dd446f4cb67b60100647f478f2dc31f6f12","src/sealed.rs":"6ece3b3dcb30f6bb98b93d83759ca7712ee8592bef9c0511141039c38765db0e","src/span.rs":"0a48e375e5c9768f6f64174a91ba6a255f4b021e2fb3548d8494e617f142601b","src/spanned.rs":"4b9bd65f60ab81922adfd0be8f03b6d50e98da3a5f525f242f9639aec4beac79","src/stmt.rs":"7a594d08cbedef4c6c0ed6ca9c331f4f087bd631a12938240180f7c53ada44e9","src/thread.rs":"1f1deb1272525ab2af9a36aac4bce8f65b0e315adb1656641fd7075662f49222","src/token.rs":"55f1ad3ba0edc43ae7b65a6fa6dc13fc1a99053d6300187a4cc48572b8f451f3","src/tt.rs":"ad478bef531007fac0e4af7ecae81f8fe66a5ce44532288156b7e3d4bfc45950","src/ty.rs":"b7daaf57dd96fc09448e45fc92f55b00f3b7ba99a00f3f2eb8a11f35e302af3c","src/verbatim.rs":"4aa06d0ce2f6b6c6aa657bc349ccc85005d2eb05494dfa1ac1fe9012916dcc3e","src/whitespace.rs":"9cdcbfe9045b259046329a795bc1105ab5a871471a6d3f7318d275ee53f7a825","tests/common/eq.rs":"4e66a9bd9262a8ff7db3a243cbb21c870b50f3286a23d013c767ec849e4f311d","tests/common/mod.rs":"b752aa8f1faf8c6abf1286a12fb50b6c257ec1889d81bcdb3dc3257134695a89","tests/common/parse.rs":"f226bfa84803429c4ef203a09b30372db01298e14443089fb60c11e2112212db","tests/common/visit.rs":"a260ecd2ce7853cd3644e19aba08e8d358a656fd3fb0f1287cea40c59c9e62c9","tests/debug/gen.rs":"cdd89f1bf91fe215e06868fc93423d2f1872c812c3bfec93dc920bc105e20c09","tests/debug/mod.rs":"1259df940bbcaa968a837e402d6853f2efa38d2260e306d42f17f9e8ef74fae5","tests/macros/mod.rs":"d2294a79e341c623ae671dd363e99965d78dda7f340b0cc038267207adfacae2","tests/regression.rs":"e9565ea0efecb4136f099164ffcfa26e1996b0a27fb9c6659e90ad9bdd42e7b6","tests/regression/issue1108.rs":"f32db35244a674e22ff824ca9e5bbec2184e287b59f022db68c418b5878a2edc","tests/regression/issue1235.rs":"a2266b10c3f7c7af5734817ab0a3e8b309b51e7d177b63f26e67e6b744d280b0","tests/repo/mod.rs":"4e2d370876192fc0514962e1eeb9e1e4a96e3805b1f87257ba4d1eeda8b1db73","tests/repo/progress.rs":"c08d0314a7f3ecf760d471f27da3cd2a500aeb9f1c8331bffb2aa648f9fabf3f","tests/snapshot/mod.rs":"4a101272c5abe6ca9f3501e0cacacee9a0ccf7ca773348a239e5b046d0316a7e","tests/test_asyncness.rs":"971d560d927d5a8494eaa7fce8f0d062d6971c17c4c464fcfc31570572b7d3d7","tests/test_attribute.rs":"8a4429b7cfe2360bb73beae54a62ae3255ebbd5181467a8608d6f858c2711728","tests/test_derive_input.rs":"c8f5dbac6482dadd0fab30d0b1fe3254869256c48ea68ea484cad7f7406c8568","tests/test_expr.rs":"055cb9b33a5bb6ed5dc67491e6f1ae794a53a5a091245debd464ef57144f5edb","tests/test_generics.rs":"0d79a25b75e45779185c2adefd3d88a9e49d0f333d885265551df1402d50abaf","tests/test_grouping.rs":"fe3de6e8824f0722ab6450c6dfc374f6e0f8fe75c87c4dd56b2cb00a2197ed58","tests/test_ident.rs":"d5850e817720e774cd397a46dbc5298c57933823c18e20805e84503fc9387e8f","tests/test_item.rs":"f4119000784af2d65d5fd097830368a391c05b249f3df8c60613a98b16a322ca","tests/test_lit.rs":"4130efa425d14ed3ad9a1c2a00ef4b29782c9d1cf9e29ff9dddd3b23b2e3ddee","tests/test_meta.rs":"5b0fdee0decbd07476c9673403a662de385901b4bf60600c26ac879893f5bf9c","tests/test_parse_buffer.rs":"0de6af13ba0345986b18d495063f9b75a1018e8569c34b277f9522c63a6c0941","tests/test_parse_quote.rs":"85d90d2d51b82aab7c30159dd884f26c592ddb28ed31ef2baf371ee31349694c","tests/test_parse_stream.rs":"b6b533432173123d6d01d8d2cb33714bc50b30b16ffbb6116f93937221ad4594","tests/test_pat.rs":"dafa3e1f51812e8c852dc5210640a4adf6fff7cd0a0790ee17d2c4c115321846","tests/test_path.rs":"7a6763a262c41a9522068887702fe7cd4ff72b07da5253ac47761d73315b021d","tests/test_precedence.rs":"ed27331fe3bc4496970e677df0d2f66e4516e6eea975d4a31029338ad23c79c0","tests/test_punctuated.rs":"efed2c281b6965d71b065c7606631ba1989af6e7b5f5d1ca1033f8b968dc076c","tests/test_receiver.rs":"2053028236f95f3cb508ebf2eb606df43cae4f9f4dd27823661459ff6c54a39c","tests/test_round_trip.rs":"8b2ed3c4164247577953e3108cca67eed97761c90b9c0df31cbd50097ed1a047","tests/test_shebang.rs":"9bc24b1ee2947b06a279d2ed40039cb45bba6caf7cd40530d93f7e2355de53c6","tests/test_size.rs":"03efaf829b80b7db1f831474c1d3ce268914fc499d0e2a7eea03cad04a482974","tests/test_stmt.rs":"b3c120059d7b56388963b85234feb2e4d379e32a0bf7f29b6683eca000dd3919","tests/test_token_trees.rs":"c30b921a96739c9334ec2bdd06552729891e0251b9d8fbdf0b8f5cc897babee5","tests/test_ty.rs":"9bb5f632941451ca6b200100310b55e62a9956190df3efe28b80d42843e75362","tests/test_unparenthesize.rs":"e5c047819afd5f70fde1bdd095467b1291d0854641f21e8183e50919986d8ce7","tests/test_visibility.rs":"7d05f05b0782976369d21477ac9f4d35a7c7f36faa42127e3a9c12ada270baf8","tests/zzz_stable.rs":"2a862e59cb446235ed99aec0e6ada8e16d3ecc30229b29d825b7c0bbc2602989"},"package":"a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea"} \ No newline at end of file diff --git a/vendor/syn/.cargo_vcs_info.json b/vendor/syn/.cargo_vcs_info.json new file mode 100644 index 00000000000000..b403b881f18c95 --- /dev/null +++ b/vendor/syn/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "1c8cabea3c98acb7c23722b9663e269d93ce692b" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/syn/Cargo.lock b/vendor/syn/Cargo.lock new file mode 100644 index 00000000000000..ced51021e878ce --- /dev/null +++ b/vendor/syn/Cargo.lock @@ -0,0 +1,1819 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "anyhow" +version = "1.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "automod" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebb4bd301db2e2ca1f5be131c24eb8ebf2d9559bc3744419e93baf8ddea7e670" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.109", +] + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bitflags" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" + +[[package]] +name = "cc" +version = "1.2.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35900b6c8d709fb1d854671ae27aeaa9eec2f8b01b364e1619a40da3e6fe2afe" +dependencies = [ + "find-msvc-tools", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "console" +version = "0.15.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" +dependencies = [ + "encode_unicode", + "libc", + "once_cell", + "windows-sys 0.59.0", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.109", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "encode_unicode" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "filetime" +version = "0.2.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" +dependencies = [ + "cfg-if", + "libc", + "libredox", + "windows-sys 0.60.2", +] + +[[package]] +name = "find-msvc-tools" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" + +[[package]] +name = "flate2" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasip2", +] + +[[package]] +name = "h2" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" + +[[package]] +name = "http" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "hyper" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http", + "http-body", + "httparse", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" +dependencies = [ + "base64", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2", + "system-configuration", + "tokio", + "tower-service", + "tracing", + "windows-registry", +] + +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "insta" +version = "1.43.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46fdb647ebde000f43b5b53f773c30cf9b0cb4300453208713fa38b2c70935a0" +dependencies = [ + "console", + "once_cell", + "similar", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "js-sys" +version = "0.3.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "libc" +version = "0.2.177" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" + +[[package]] +name = "libredox" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +dependencies = [ + "bitflags", + "libc", + "redox_syscall", +] + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + +[[package]] +name = "log" +version = "0.4.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", + "simd-adler32", +] + +[[package]] +name = "mio" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "native-tls" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "openssl" +version = "0.10.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.109", +] + +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + +[[package]] +name = "openssl-sys" +version = "0.9.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "potential_utf" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] + +[[package]] +name = "proc-macro2" +version = "1.0.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rayon" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags", +] + +[[package]] +name = "ref-cast" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.109", +] + +[[package]] +name = "reqwest" +version = "0.12.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" +dependencies = [ + "base64", + "bytes", + "encoding_rs", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-tls", + "hyper-util", + "js-sys", + "log", + "mime", + "native-tls", + "percent-encoding", + "pin-project-lite", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-native-tls", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustix" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls" +version = "0.23.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +dependencies = [ + "once_cell", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pki-types" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a" +dependencies = [ + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.109", +] + +[[package]] +name = "serde_json" +version = "1.0.145" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", + "serde_core", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + +[[package]] +name = "similar" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" + +[[package]] +name = "slab" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f17c7e013e88258aa9543dcbe81aca68a667a9ac37cd69c9fbc07858bfe0e2f" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.110" +dependencies = [ + "anyhow", + "automod", + "flate2", + "insta", + "proc-macro2", + "quote", + "rayon", + "ref-cast", + "reqwest", + "rustversion", + "syn-test-suite", + "tar", + "termcolor", + "unicode-ident", + "walkdir", +] + +[[package]] +name = "syn-test-suite" +version = "0.0.0+test" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0d661992f60e67c8bdd9a7d6360d30d1301f5783abf7d59933844f656762eb5" + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.109", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tar" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" +dependencies = [ + "filetime", + "libc", + "xattr", +] + +[[package]] +name = "tempfile" +version = "3.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" +dependencies = [ + "fastrand", + "getrandom 0.3.4", + "once_cell", + "rustix", + "windows-sys 0.61.2", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tokio" +version = "1.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +dependencies = [ + "bytes", + "libc", + "mio", + "pin-project-lite", + "socket2", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +dependencies = [ + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.55" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn 2.0.109", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "web-sys" +version = "0.3.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-registry" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" +dependencies = [ + "windows-link 0.1.3", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-result" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +dependencies = [ + "windows-link 0.1.3", +] + +[[package]] +name = "windows-strings" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +dependencies = [ + "windows-link 0.1.3", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link 0.2.1", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link 0.2.1", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "wit-bindgen" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "xattr" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" +dependencies = [ + "libc", + "rustix", +] + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.109", + "synstructure", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.109", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.109", +] diff --git a/vendor/syn/Cargo.toml b/vendor/syn/Cargo.toml new file mode 100644 index 00000000000000..b3e4ae86a8239b --- /dev/null +++ b/vendor/syn/Cargo.toml @@ -0,0 +1,272 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.68" +name = "syn" +version = "2.0.110" +authors = ["David Tolnay <dtolnay@gmail.com>"] +build = false +include = [ + "/benches/**", + "/Cargo.toml", + "/LICENSE-APACHE", + "/LICENSE-MIT", + "/README.md", + "/src/**", + "/tests/**", +] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Parser for Rust source code" +documentation = "https://docs.rs/syn" +readme = "README.md" +keywords = [ + "macros", + "syn", +] +categories = [ + "development-tools::procedural-macro-helpers", + "parser-implementations", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/syn" + +[package.metadata.docs.rs] +all-features = true +targets = ["x86_64-unknown-linux-gnu"] +rustdoc-args = [ + "--generate-link-to-definition", + "--generate-macro-expansion", + "--extend-css=src/gen/token.css", + "--extern-html-root-url=core=https://doc.rust-lang.org", + "--extern-html-root-url=alloc=https://doc.rust-lang.org", + "--extern-html-root-url=std=https://doc.rust-lang.org", + "--extern-html-root-url=proc_macro=https://doc.rust-lang.org", +] + +[package.metadata.playground] +features = [ + "full", + "visit", + "visit-mut", + "fold", + "extra-traits", +] + +[features] +clone-impls = [] +default = [ + "derive", + "parsing", + "printing", + "clone-impls", + "proc-macro", +] +derive = [] +extra-traits = [] +fold = [] +full = [] +parsing = [] +printing = ["dep:quote"] +proc-macro = [ + "proc-macro2/proc-macro", + "quote?/proc-macro", +] +test = ["syn-test-suite/all-features"] +visit = [] +visit-mut = [] + +[lib] +name = "syn" +path = "src/lib.rs" + +[[test]] +name = "regression" +path = "tests/regression.rs" + +[[test]] +name = "test_asyncness" +path = "tests/test_asyncness.rs" + +[[test]] +name = "test_attribute" +path = "tests/test_attribute.rs" + +[[test]] +name = "test_derive_input" +path = "tests/test_derive_input.rs" + +[[test]] +name = "test_expr" +path = "tests/test_expr.rs" + +[[test]] +name = "test_generics" +path = "tests/test_generics.rs" + +[[test]] +name = "test_grouping" +path = "tests/test_grouping.rs" + +[[test]] +name = "test_ident" +path = "tests/test_ident.rs" + +[[test]] +name = "test_item" +path = "tests/test_item.rs" + +[[test]] +name = "test_lit" +path = "tests/test_lit.rs" + +[[test]] +name = "test_meta" +path = "tests/test_meta.rs" + +[[test]] +name = "test_parse_buffer" +path = "tests/test_parse_buffer.rs" + +[[test]] +name = "test_parse_quote" +path = "tests/test_parse_quote.rs" + +[[test]] +name = "test_parse_stream" +path = "tests/test_parse_stream.rs" + +[[test]] +name = "test_pat" +path = "tests/test_pat.rs" + +[[test]] +name = "test_path" +path = "tests/test_path.rs" + +[[test]] +name = "test_precedence" +path = "tests/test_precedence.rs" + +[[test]] +name = "test_punctuated" +path = "tests/test_punctuated.rs" + +[[test]] +name = "test_receiver" +path = "tests/test_receiver.rs" + +[[test]] +name = "test_round_trip" +path = "tests/test_round_trip.rs" + +[[test]] +name = "test_shebang" +path = "tests/test_shebang.rs" + +[[test]] +name = "test_size" +path = "tests/test_size.rs" + +[[test]] +name = "test_stmt" +path = "tests/test_stmt.rs" + +[[test]] +name = "test_token_trees" +path = "tests/test_token_trees.rs" + +[[test]] +name = "test_ty" +path = "tests/test_ty.rs" + +[[test]] +name = "test_unparenthesize" +path = "tests/test_unparenthesize.rs" + +[[test]] +name = "test_visibility" +path = "tests/test_visibility.rs" + +[[test]] +name = "zzz_stable" +path = "tests/zzz_stable.rs" + +[[bench]] +name = "file" +path = "benches/file.rs" +required-features = [ + "full", + "parsing", +] + +[[bench]] +name = "rust" +path = "benches/rust.rs" +harness = false +required-features = [ + "full", + "parsing", +] + +[dependencies.proc-macro2] +version = "1.0.91" +default-features = false + +[dependencies.quote] +version = "1.0.35" +optional = true +default-features = false + +[dependencies.unicode-ident] +version = "1" + +[dev-dependencies.anyhow] +version = "1" + +[dev-dependencies.automod] +version = "1" + +[dev-dependencies.insta] +version = "1" + +[dev-dependencies.ref-cast] +version = "1" + +[dev-dependencies.rustversion] +version = "1" + +[dev-dependencies.syn-test-suite] +version = "0" + +[dev-dependencies.termcolor] +version = "1" + +[target."cfg(not(miri))".dev-dependencies.flate2] +version = "1" + +[target."cfg(not(miri))".dev-dependencies.rayon] +version = "1" + +[target."cfg(not(miri))".dev-dependencies.reqwest] +version = "0.12" +features = ["blocking"] + +[target."cfg(not(miri))".dev-dependencies.tar] +version = "0.4.16" + +[target."cfg(not(miri))".dev-dependencies.walkdir] +version = "2.3.2" diff --git a/vendor/syn/LICENSE-APACHE b/vendor/syn/LICENSE-APACHE new file mode 100644 index 00000000000000..1b5ec8b78e237b --- /dev/null +++ b/vendor/syn/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/vendor/syn/LICENSE-MIT b/vendor/syn/LICENSE-MIT new file mode 100644 index 00000000000000..31aa79387f27e7 --- /dev/null +++ b/vendor/syn/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/syn/README.md b/vendor/syn/README.md new file mode 100644 index 00000000000000..16a393b9f0dc6a --- /dev/null +++ b/vendor/syn/README.md @@ -0,0 +1,284 @@ +Parser for Rust source code +=========================== + +[<img alt="github" src="https://img.shields.io/badge/github-dtolnay/syn-8da0cb?style=for-the-badge&labelColor=555555&logo=github" height="20">](https://github.com/dtolnay/syn) +[<img alt="crates.io" src="https://img.shields.io/crates/v/syn.svg?style=for-the-badge&color=fc8d62&logo=rust" height="20">](https://crates.io/crates/syn) +[<img alt="docs.rs" src="https://img.shields.io/badge/docs.rs-syn-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs" height="20">](https://docs.rs/syn) +[<img alt="build status" src="https://img.shields.io/github/actions/workflow/status/dtolnay/syn/ci.yml?branch=master&style=for-the-badge" height="20">](https://github.com/dtolnay/syn/actions?query=branch%3Amaster) + +Syn is a parsing library for parsing a stream of Rust tokens into a syntax tree +of Rust source code. + +Currently this library is geared toward use in Rust procedural macros, but +contains some APIs that may be useful more generally. + +- **Data structures** — Syn provides a complete syntax tree that can represent + any valid Rust source code. The syntax tree is rooted at [`syn::File`] which + represents a full source file, but there are other entry points that may be + useful to procedural macros including [`syn::Item`], [`syn::Expr`] and + [`syn::Type`]. + +- **Derives** — Of particular interest to derive macros is [`syn::DeriveInput`] + which is any of the three legal input items to a derive macro. An example + below shows using this type in a library that can derive implementations of a + user-defined trait. + +- **Parsing** — Parsing in Syn is built around [parser functions] with the + signature `fn(ParseStream) -> Result<T>`. Every syntax tree node defined by + Syn is individually parsable and may be used as a building block for custom + syntaxes, or you may dream up your own brand new syntax without involving any + of our syntax tree types. + +- **Location information** — Every token parsed by Syn is associated with a + `Span` that tracks line and column information back to the source of that + token. These spans allow a procedural macro to display detailed error messages + pointing to all the right places in the user's code. There is an example of + this below. + +- **Feature flags** — Functionality is aggressively feature gated so your + procedural macros enable only what they need, and do not pay in compile time + for all the rest. + +[`syn::File`]: https://docs.rs/syn/2.0/syn/struct.File.html +[`syn::Item`]: https://docs.rs/syn/2.0/syn/enum.Item.html +[`syn::Expr`]: https://docs.rs/syn/2.0/syn/enum.Expr.html +[`syn::Type`]: https://docs.rs/syn/2.0/syn/enum.Type.html +[`syn::DeriveInput`]: https://docs.rs/syn/2.0/syn/struct.DeriveInput.html +[parser functions]: https://docs.rs/syn/2.0/syn/parse/index.html + +*Version requirement: Syn supports rustc 1.61 and up.* + +[*Release notes*](https://github.com/dtolnay/syn/releases) + +<br> + +## Resources + +The best way to learn about procedural macros is by writing some. Consider +working through [this procedural macro workshop][workshop] to get familiar with +the different types of procedural macros. The workshop contains relevant links +into the Syn documentation as you work through each project. + +[workshop]: https://github.com/dtolnay/proc-macro-workshop + +<br> + +## Example of a derive macro + +The canonical derive macro using Syn looks like this. We write an ordinary Rust +function tagged with a `proc_macro_derive` attribute and the name of the trait +we are deriving. Any time that derive appears in the user's code, the Rust +compiler passes their data structure as tokens into our macro. We get to execute +arbitrary Rust code to figure out what to do with those tokens, then hand some +tokens back to the compiler to compile into the user's crate. + +[`TokenStream`]: https://doc.rust-lang.org/proc_macro/struct.TokenStream.html + +```toml +[dependencies] +syn = "2.0" +quote = "1.0" + +[lib] +proc-macro = true +``` + +```rust +use proc_macro::TokenStream; +use quote::quote; +use syn::{parse_macro_input, DeriveInput}; + +#[proc_macro_derive(MyMacro)] +pub fn my_macro(input: TokenStream) -> TokenStream { + // Parse the input tokens into a syntax tree + let input = parse_macro_input!(input as DeriveInput); + + // Build the output, possibly using quasi-quotation + let expanded = quote! { + // ... + }; + + // Hand the output tokens back to the compiler + TokenStream::from(expanded) +} +``` + +The [`heapsize`] example directory shows a complete working implementation of a +derive macro. The example derives a `HeapSize` trait which computes an estimate +of the amount of heap memory owned by a value. + +[`heapsize`]: examples/heapsize + +```rust +pub trait HeapSize { + /// Total number of bytes of heap memory owned by `self`. + fn heap_size_of_children(&self) -> usize; +} +``` + +The derive macro allows users to write `#[derive(HeapSize)]` on data structures +in their program. + +```rust +#[derive(HeapSize)] +struct Demo<'a, T: ?Sized> { + a: Box<T>, + b: u8, + c: &'a str, + d: String, +} +``` + +<br> + +## Spans and error reporting + +The token-based procedural macro API provides great control over where the +compiler's error messages are displayed in user code. Consider the error the +user sees if one of their field types does not implement `HeapSize`. + +```rust +#[derive(HeapSize)] +struct Broken { + ok: String, + bad: std::thread::Thread, +} +``` + +By tracking span information all the way through the expansion of a procedural +macro as shown in the `heapsize` example, token-based macros in Syn are able to +trigger errors that directly pinpoint the source of the problem. + +```console +error[E0277]: the trait bound `std::thread::Thread: HeapSize` is not satisfied + --> src/main.rs:7:5 + | +7 | bad: std::thread::Thread, + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `HeapSize` is not implemented for `std::thread::Thread` +``` + +<br> + +## Parsing a custom syntax + +The [`lazy-static`] example directory shows the implementation of a +`functionlike!(...)` procedural macro in which the input tokens are parsed using +Syn's parsing API. + +[`lazy-static`]: examples/lazy-static + +The example reimplements the popular `lazy_static` crate from crates.io as a +procedural macro. + +```rust +lazy_static! { + static ref USERNAME: Regex = Regex::new("^[a-z0-9_-]{3,16}$").unwrap(); +} +``` + +The implementation shows how to trigger custom warnings and error messages on +the macro input. + +```console +warning: come on, pick a more creative name + --> src/main.rs:10:16 + | +10 | static ref FOO: String = "lazy_static".to_owned(); + | ^^^ +``` + +<br> + +## Testing + +When testing macros, we often care not just that the macro can be used +successfully but also that when the macro is provided with invalid input it +produces maximally helpful error messages. Consider using the [`trybuild`] crate +to write tests for errors that are emitted by your macro or errors detected by +the Rust compiler in the expanded code following misuse of the macro. Such tests +help avoid regressions from later refactors that mistakenly make an error no +longer trigger or be less helpful than it used to be. + +[`trybuild`]: https://github.com/dtolnay/trybuild + +<br> + +## Debugging + +When developing a procedural macro it can be helpful to look at what the +generated code looks like. Use `cargo rustc -- -Zunstable-options +--pretty=expanded` or the [`cargo expand`] subcommand. + +[`cargo expand`]: https://github.com/dtolnay/cargo-expand + +To show the expanded code for some crate that uses your procedural macro, run +`cargo expand` from that crate. To show the expanded code for one of your own +test cases, run `cargo expand --test the_test_case` where the last argument is +the name of the test file without the `.rs` extension. + +This write-up by Brandon W Maister discusses debugging in more detail: +[Debugging Rust's new Custom Derive system][debugging]. + +[debugging]: https://quodlibetor.github.io/posts/debugging-rusts-new-custom-derive-system/ + +<br> + +## Optional features + +Syn puts a lot of functionality behind optional features in order to optimize +compile time for the most common use cases. The following features are +available. + +- **`derive`** *(enabled by default)* — Data structures for representing the + possible input to a derive macro, including structs and enums and types. +- **`full`** — Data structures for representing the syntax tree of all valid + Rust source code, including items and expressions. +- **`parsing`** *(enabled by default)* — Ability to parse input tokens into a + syntax tree node of a chosen type. +- **`printing`** *(enabled by default)* — Ability to print a syntax tree node as + tokens of Rust source code. +- **`visit`** — Trait for traversing a syntax tree. +- **`visit-mut`** — Trait for traversing and mutating in place a syntax tree. +- **`fold`** — Trait for transforming an owned syntax tree. +- **`clone-impls`** *(enabled by default)* — Clone impls for all syntax tree + types. +- **`extra-traits`** — Debug, Eq, PartialEq, Hash impls for all syntax tree + types. +- **`proc-macro`** *(enabled by default)* — Runtime dependency on the dynamic + library libproc_macro from rustc toolchain. + +<br> + +## Proc macro shim + +Syn operates on the token representation provided by the [proc-macro2] crate +from crates.io rather than using the compiler's built in proc-macro crate +directly. This enables code using Syn to execute outside of the context of a +procedural macro, such as in unit tests or build.rs, and we avoid needing +incompatible ecosystems for proc macros vs non-macro use cases. + +In general all of your code should be written against proc-macro2 rather than +proc-macro. The one exception is in the signatures of procedural macro entry +points, which are required by the language to use `proc_macro::TokenStream`. + +The proc-macro2 crate will automatically detect and use the compiler's data +structures when a procedural macro is active. + +[proc-macro2]: https://docs.rs/proc-macro2/1.0/proc_macro2/ + +<br> + +#### License + +<sup> +Licensed under either of <a href="LICENSE-APACHE">Apache License, Version +2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option. +</sup> + +<br> + +<sub> +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this crate by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. +</sub> diff --git a/vendor/syn/benches/file.rs b/vendor/syn/benches/file.rs new file mode 100644 index 00000000000000..6167488c9c5500 --- /dev/null +++ b/vendor/syn/benches/file.rs @@ -0,0 +1,59 @@ +// $ cargo bench --features full,test --bench file + +#![feature(rustc_private, test)] +#![recursion_limit = "1024"] +#![allow( + clippy::elidable_lifetime_names, + clippy::items_after_statements, + clippy::manual_let_else, + clippy::match_like_matches_macro, + clippy::missing_panics_doc, + clippy::must_use_candidate, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] + +extern crate test; + +#[macro_use] +#[path = "../tests/macros/mod.rs"] +mod macros; + +#[allow(dead_code)] +#[path = "../tests/repo/mod.rs"] +mod repo; + +use proc_macro2::{Span, TokenStream}; +use std::fs; +use std::str::FromStr; +use syn::parse::{ParseStream, Parser}; +use test::Bencher; + +const FILE: &str = "tests/rust/library/core/src/str/mod.rs"; + +fn get_tokens() -> TokenStream { + repo::clone_rust(); + let content = fs::read_to_string(FILE).unwrap(); + TokenStream::from_str(&content).unwrap() +} + +#[bench] +fn baseline(b: &mut Bencher) { + let tokens = get_tokens(); + b.iter(|| drop(tokens.clone())); +} + +#[bench] +fn create_token_buffer(b: &mut Bencher) { + let tokens = get_tokens(); + fn immediate_fail(_input: ParseStream) -> syn::Result<()> { + Err(syn::Error::new(Span::call_site(), "")) + } + b.iter(|| immediate_fail.parse2(tokens.clone())); +} + +#[bench] +fn parse_file(b: &mut Bencher) { + let tokens = get_tokens(); + b.iter(|| syn::parse2::<syn::File>(tokens.clone())); +} diff --git a/vendor/syn/benches/rust.rs b/vendor/syn/benches/rust.rs new file mode 100644 index 00000000000000..ecb9c56fa314e3 --- /dev/null +++ b/vendor/syn/benches/rust.rs @@ -0,0 +1,194 @@ +// $ cargo bench --features full,test --bench rust +// +// Syn only, useful for profiling: +// $ RUSTFLAGS='--cfg syn_only' cargo build --release --features full,test --bench rust + +#![cfg_attr(not(syn_only), feature(rustc_private))] +#![recursion_limit = "1024"] +#![allow( + clippy::arc_with_non_send_sync, + clippy::cast_lossless, + clippy::elidable_lifetime_names, + clippy::let_underscore_untyped, + clippy::manual_let_else, + clippy::match_like_matches_macro, + clippy::needless_lifetimes, + clippy::uninlined_format_args, + clippy::unnecessary_wraps +)] + +#[macro_use] +#[path = "../tests/macros/mod.rs"] +mod macros; + +#[allow(dead_code)] +#[path = "../tests/repo/mod.rs"] +mod repo; + +use std::fs; +use std::path::Path; +use std::time::{Duration, Instant}; + +#[cfg(not(syn_only))] +mod tokenstream_parse { + use proc_macro2::TokenStream; + use std::path::Path; + use std::str::FromStr; + + pub fn bench(_path: &Path, content: &str) -> Result<(), ()> { + TokenStream::from_str(content).map(drop).map_err(drop) + } +} + +mod syn_parse { + use std::path::Path; + + pub fn bench(_path: &Path, content: &str) -> Result<(), ()> { + syn::parse_file(content).map(drop).map_err(drop) + } +} + +#[cfg(not(syn_only))] +mod librustc_parse { + extern crate rustc_data_structures; + extern crate rustc_driver; + extern crate rustc_error_messages; + extern crate rustc_errors; + extern crate rustc_parse; + extern crate rustc_session; + extern crate rustc_span; + + use crate::repo; + use rustc_errors::emitter::Emitter; + use rustc_errors::registry::Registry; + use rustc_errors::translation::Translator; + use rustc_errors::{DiagCtxt, DiagInner}; + use rustc_parse::lexer::StripTokens; + use rustc_session::parse::ParseSess; + use rustc_span::source_map::{FilePathMapping, SourceMap}; + use rustc_span::FileName; + use std::path::Path; + use std::sync::Arc; + + pub fn bench(path: &Path, content: &str) -> Result<(), ()> { + struct SilentEmitter; + + impl Emitter for SilentEmitter { + fn emit_diagnostic(&mut self, _diag: DiagInner, _registry: &Registry) {} + fn source_map(&self) -> Option<&SourceMap> { + None + } + fn translator(&self) -> &Translator { + panic!("silent emitter attempted to translate a diagnostic"); + } + } + + let edition = repo::edition(path).parse().unwrap(); + rustc_span::create_session_if_not_set_then(edition, |_| { + let source_map = Arc::new(SourceMap::new(FilePathMapping::empty())); + let emitter = Box::new(SilentEmitter); + let handler = DiagCtxt::new(emitter); + let sess = ParseSess::with_dcx(handler, source_map); + let name = FileName::Custom("bench".to_owned()); + let mut parser = rustc_parse::new_parser_from_source_str( + &sess, + name, + content.to_owned(), + StripTokens::ShebangAndFrontmatter, + ) + .unwrap(); + if let Err(diagnostic) = parser.parse_crate_mod() { + diagnostic.cancel(); + return Err(()); + } + Ok(()) + }) + } +} + +#[cfg(not(syn_only))] +mod read_from_disk { + use std::path::Path; + + pub fn bench(_path: &Path, content: &str) -> Result<(), ()> { + let _ = content; + Ok(()) + } +} + +fn exec(mut codepath: impl FnMut(&Path, &str) -> Result<(), ()>) -> Duration { + let begin = Instant::now(); + let mut success = 0; + let mut total = 0; + + ["tests/rust/compiler", "tests/rust/library"] + .iter() + .flat_map(|dir| { + walkdir::WalkDir::new(dir) + .into_iter() + .filter_entry(repo::base_dir_filter) + }) + .for_each(|entry| { + let entry = entry.unwrap(); + let path = entry.path(); + if path.is_dir() { + return; + } + let content = fs::read_to_string(path).unwrap(); + let ok = codepath(path, &content).is_ok(); + success += ok as usize; + total += 1; + if !ok { + eprintln!("FAIL {}", path.display()); + } + }); + + assert_eq!(success, total); + begin.elapsed() +} + +fn main() { + repo::clone_rust(); + + macro_rules! testcases { + ($($(#[$cfg:meta])* $name:ident,)*) => { + [ + $( + $(#[$cfg])* + (stringify!($name), $name::bench as fn(&Path, &str) -> Result<(), ()>), + )* + ] + }; + } + + #[cfg(not(syn_only))] + { + let mut lines = 0; + let mut files = 0; + exec(|_path, content| { + lines += content.lines().count(); + files += 1; + Ok(()) + }); + eprintln!("\n{} lines in {} files", lines, files); + } + + for (name, f) in testcases!( + #[cfg(not(syn_only))] + read_from_disk, + #[cfg(not(syn_only))] + tokenstream_parse, + syn_parse, + #[cfg(not(syn_only))] + librustc_parse, + ) { + eprint!("{:20}", format!("{}:", name)); + let elapsed = exec(f); + eprintln!( + "elapsed={}.{:03}s", + elapsed.as_secs(), + elapsed.subsec_millis(), + ); + } + eprintln!(); +} diff --git a/vendor/syn/src/attr.rs b/vendor/syn/src/attr.rs new file mode 100644 index 00000000000000..a543af5597bd1a --- /dev/null +++ b/vendor/syn/src/attr.rs @@ -0,0 +1,836 @@ +#[cfg(feature = "parsing")] +use crate::error::Error; +#[cfg(feature = "parsing")] +use crate::error::Result; +use crate::expr::Expr; +use crate::mac::MacroDelimiter; +#[cfg(feature = "parsing")] +use crate::meta::{self, ParseNestedMeta}; +#[cfg(feature = "parsing")] +use crate::parse::{Parse, ParseStream, Parser}; +use crate::path::Path; +use crate::token; +use proc_macro2::TokenStream; +#[cfg(feature = "printing")] +use std::iter; +#[cfg(feature = "printing")] +use std::slice; + +ast_struct! { + /// An attribute, like `#[repr(transparent)]`. + /// + /// <br> + /// + /// # Syntax + /// + /// Rust has six types of attributes. + /// + /// - Outer attributes like `#[repr(transparent)]`. These appear outside or + /// in front of the item they describe. + /// + /// - Inner attributes like `#![feature(proc_macro)]`. These appear inside + /// of the item they describe, usually a module. + /// + /// - Outer one-line doc comments like `/// Example`. + /// + /// - Inner one-line doc comments like `//! Please file an issue`. + /// + /// - Outer documentation blocks `/** Example */`. + /// + /// - Inner documentation blocks `/*! Please file an issue */`. + /// + /// The `style` field of type `AttrStyle` distinguishes whether an attribute + /// is outer or inner. + /// + /// Every attribute has a `path` that indicates the intended interpretation + /// of the rest of the attribute's contents. The path and the optional + /// additional contents are represented together in the `meta` field of the + /// attribute in three possible varieties: + /// + /// - Meta::Path — attributes whose information content conveys just a + /// path, for example the `#[test]` attribute. + /// + /// - Meta::List — attributes that carry arbitrary tokens after the + /// path, surrounded by a delimiter (parenthesis, bracket, or brace). For + /// example `#[derive(Copy)]` or `#[precondition(x < 5)]`. + /// + /// - Meta::NameValue — attributes with an `=` sign after the path, + /// followed by a Rust expression. For example `#[path = + /// "sys/windows.rs"]`. + /// + /// All doc comments are represented in the NameValue style with a path of + /// "doc", as this is how they are processed by the compiler and by + /// `macro_rules!` macros. + /// + /// ```text + /// #[derive(Copy, Clone)] + /// ~~~~~~Path + /// ^^^^^^^^^^^^^^^^^^^Meta::List + /// + /// #[path = "sys/windows.rs"] + /// ~~~~Path + /// ^^^^^^^^^^^^^^^^^^^^^^^Meta::NameValue + /// + /// #[test] + /// ^^^^Meta::Path + /// ``` + /// + /// <br> + /// + /// # Parsing from tokens to Attribute + /// + /// This type does not implement the [`Parse`] trait and thus cannot be + /// parsed directly by [`ParseStream::parse`]. Instead use + /// [`ParseStream::call`] with one of the two parser functions + /// [`Attribute::parse_outer`] or [`Attribute::parse_inner`] depending on + /// which you intend to parse. + /// + /// [`Parse`]: crate::parse::Parse + /// [`ParseStream::parse`]: crate::parse::ParseBuffer::parse + /// [`ParseStream::call`]: crate::parse::ParseBuffer::call + /// + /// ``` + /// use syn::{Attribute, Ident, Result, Token}; + /// use syn::parse::{Parse, ParseStream}; + /// + /// // Parses a unit struct with attributes. + /// // + /// // #[path = "s.tmpl"] + /// // struct S; + /// struct UnitStruct { + /// attrs: Vec<Attribute>, + /// struct_token: Token![struct], + /// name: Ident, + /// semi_token: Token![;], + /// } + /// + /// impl Parse for UnitStruct { + /// fn parse(input: ParseStream) -> Result<Self> { + /// Ok(UnitStruct { + /// attrs: input.call(Attribute::parse_outer)?, + /// struct_token: input.parse()?, + /// name: input.parse()?, + /// semi_token: input.parse()?, + /// }) + /// } + /// } + /// ``` + /// + /// <p><br></p> + /// + /// # Parsing from Attribute to structured arguments + /// + /// The grammar of attributes in Rust is very flexible, which makes the + /// syntax tree not that useful on its own. In particular, arguments of the + /// `Meta::List` variety of attribute are held in an arbitrary `tokens: + /// TokenStream`. Macros are expected to check the `path` of the attribute, + /// decide whether they recognize it, and then parse the remaining tokens + /// according to whatever grammar they wish to require for that kind of + /// attribute. Use [`parse_args()`] to parse those tokens into the expected + /// data structure. + /// + /// [`parse_args()`]: Attribute::parse_args + /// + /// <p><br></p> + /// + /// # Doc comments + /// + /// The compiler transforms doc comments, such as `/// comment` and `/*! + /// comment */`, into attributes before macros are expanded. Each comment is + /// expanded into an attribute of the form `#[doc = r"comment"]`. + /// + /// As an example, the following `mod` items are expanded identically: + /// + /// ``` + /// # use syn::{ItemMod, parse_quote}; + /// let doc: ItemMod = parse_quote! { + /// /// Single line doc comments + /// /// We write so many! + /// /** + /// * Multi-line comments... + /// * May span many lines + /// */ + /// mod example { + /// //! Of course, they can be inner too + /// /*! And fit in a single line */ + /// } + /// }; + /// let attr: ItemMod = parse_quote! { + /// #[doc = r" Single line doc comments"] + /// #[doc = r" We write so many!"] + /// #[doc = r" + /// * Multi-line comments... + /// * May span many lines + /// "] + /// mod example { + /// #![doc = r" Of course, they can be inner too"] + /// #![doc = r" And fit in a single line "] + /// } + /// }; + /// assert_eq!(doc, attr); + /// ``` + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct Attribute { + pub pound_token: Token![#], + pub style: AttrStyle, + pub bracket_token: token::Bracket, + pub meta: Meta, + } +} + +impl Attribute { + /// Returns the path that identifies the interpretation of this attribute. + /// + /// For example this would return the `test` in `#[test]`, the `derive` in + /// `#[derive(Copy)]`, and the `path` in `#[path = "sys/windows.rs"]`. + pub fn path(&self) -> &Path { + self.meta.path() + } + + /// Parse the arguments to the attribute as a syntax tree. + /// + /// This is similar to pulling out the `TokenStream` from `Meta::List` and + /// doing `syn::parse2::<T>(meta_list.tokens)`, except that using + /// `parse_args` the error message has a more useful span when `tokens` is + /// empty. + /// + /// The surrounding delimiters are *not* included in the input to the + /// parser. + /// + /// ```text + /// #[my_attr(value < 5)] + /// ^^^^^^^^^ what gets parsed + /// ``` + /// + /// # Example + /// + /// ``` + /// use syn::{parse_quote, Attribute, Expr}; + /// + /// let attr: Attribute = parse_quote! { + /// #[precondition(value < 5)] + /// }; + /// + /// if attr.path().is_ident("precondition") { + /// let precondition: Expr = attr.parse_args()?; + /// // ... + /// } + /// # anyhow::Ok(()) + /// ``` + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_args<T: Parse>(&self) -> Result<T> { + self.parse_args_with(T::parse) + } + + /// Parse the arguments to the attribute using the given parser. + /// + /// # Example + /// + /// ``` + /// use syn::{parse_quote, Attribute}; + /// + /// let attr: Attribute = parse_quote! { + /// #[inception { #[brrrrrrraaaaawwwwrwrrrmrmrmmrmrmmmmm] }] + /// }; + /// + /// let bwom = attr.parse_args_with(Attribute::parse_outer)?; + /// + /// // Attribute does not have a Parse impl, so we couldn't directly do: + /// // let bwom: Attribute = attr.parse_args()?; + /// # anyhow::Ok(()) + /// ``` + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_args_with<F: Parser>(&self, parser: F) -> Result<F::Output> { + match &self.meta { + Meta::Path(path) => Err(crate::error::new2( + path.segments.first().unwrap().ident.span(), + path.segments.last().unwrap().ident.span(), + format!( + "expected attribute arguments in parentheses: {}[{}(...)]", + parsing::DisplayAttrStyle(&self.style), + parsing::DisplayPath(path), + ), + )), + Meta::NameValue(meta) => Err(Error::new( + meta.eq_token.span, + format_args!( + "expected parentheses: {}[{}(...)]", + parsing::DisplayAttrStyle(&self.style), + parsing::DisplayPath(&meta.path), + ), + )), + Meta::List(meta) => meta.parse_args_with(parser), + } + } + + /// Parse the arguments to the attribute, expecting it to follow the + /// conventional structure used by most of Rust's built-in attributes. + /// + /// The [*Meta Item Attribute Syntax*][syntax] section in the Rust reference + /// explains the convention in more detail. Not all attributes follow this + /// convention, so [`parse_args()`][Self::parse_args] is available if you + /// need to parse arbitrarily goofy attribute syntax. + /// + /// [syntax]: https://doc.rust-lang.org/reference/attributes.html#meta-item-attribute-syntax + /// + /// # Example + /// + /// We'll parse a struct, and then parse some of Rust's `#[repr]` attribute + /// syntax. + /// + /// ``` + /// use syn::{parenthesized, parse_quote, token, ItemStruct, LitInt}; + /// + /// let input: ItemStruct = parse_quote! { + /// #[repr(C, align(4))] + /// pub struct MyStruct(u16, u32); + /// }; + /// + /// let mut repr_c = false; + /// let mut repr_transparent = false; + /// let mut repr_align = None::<usize>; + /// let mut repr_packed = None::<usize>; + /// for attr in &input.attrs { + /// if attr.path().is_ident("repr") { + /// attr.parse_nested_meta(|meta| { + /// // #[repr(C)] + /// if meta.path.is_ident("C") { + /// repr_c = true; + /// return Ok(()); + /// } + /// + /// // #[repr(transparent)] + /// if meta.path.is_ident("transparent") { + /// repr_transparent = true; + /// return Ok(()); + /// } + /// + /// // #[repr(align(N))] + /// if meta.path.is_ident("align") { + /// let content; + /// parenthesized!(content in meta.input); + /// let lit: LitInt = content.parse()?; + /// let n: usize = lit.base10_parse()?; + /// repr_align = Some(n); + /// return Ok(()); + /// } + /// + /// // #[repr(packed)] or #[repr(packed(N))], omitted N means 1 + /// if meta.path.is_ident("packed") { + /// if meta.input.peek(token::Paren) { + /// let content; + /// parenthesized!(content in meta.input); + /// let lit: LitInt = content.parse()?; + /// let n: usize = lit.base10_parse()?; + /// repr_packed = Some(n); + /// } else { + /// repr_packed = Some(1); + /// } + /// return Ok(()); + /// } + /// + /// Err(meta.error("unrecognized repr")) + /// })?; + /// } + /// } + /// # anyhow::Ok(()) + /// ``` + /// + /// # Alternatives + /// + /// In some cases, for attributes which have nested layers of structured + /// content, the following less flexible approach might be more convenient: + /// + /// ``` + /// # use syn::{parse_quote, ItemStruct}; + /// # + /// # let input: ItemStruct = parse_quote! { + /// # #[repr(C, align(4))] + /// # pub struct MyStruct(u16, u32); + /// # }; + /// # + /// use syn::punctuated::Punctuated; + /// use syn::{parenthesized, token, Error, LitInt, Meta, Token}; + /// + /// let mut repr_c = false; + /// let mut repr_transparent = false; + /// let mut repr_align = None::<usize>; + /// let mut repr_packed = None::<usize>; + /// for attr in &input.attrs { + /// if attr.path().is_ident("repr") { + /// let nested = attr.parse_args_with(Punctuated::<Meta, Token![,]>::parse_terminated)?; + /// for meta in nested { + /// match meta { + /// // #[repr(C)] + /// Meta::Path(path) if path.is_ident("C") => { + /// repr_c = true; + /// } + /// + /// // #[repr(align(N))] + /// Meta::List(meta) if meta.path.is_ident("align") => { + /// let lit: LitInt = meta.parse_args()?; + /// let n: usize = lit.base10_parse()?; + /// repr_align = Some(n); + /// } + /// + /// /* ... */ + /// + /// _ => { + /// return Err(Error::new_spanned(meta, "unrecognized repr")); + /// } + /// } + /// } + /// } + /// } + /// # Ok(()) + /// ``` + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_nested_meta( + &self, + logic: impl FnMut(ParseNestedMeta) -> Result<()>, + ) -> Result<()> { + self.parse_args_with(meta::parser(logic)) + } + + /// Parses zero or more outer attributes from the stream. + /// + /// # Example + /// + /// See + /// [*Parsing from tokens to Attribute*](#parsing-from-tokens-to-attribute). + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_outer(input: ParseStream) -> Result<Vec<Self>> { + let mut attrs = Vec::new(); + while input.peek(Token![#]) { + attrs.push(input.call(parsing::single_parse_outer)?); + } + Ok(attrs) + } + + /// Parses zero or more inner attributes from the stream. + /// + /// # Example + /// + /// See + /// [*Parsing from tokens to Attribute*](#parsing-from-tokens-to-attribute). + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_inner(input: ParseStream) -> Result<Vec<Self>> { + let mut attrs = Vec::new(); + parsing::parse_inner(input, &mut attrs)?; + Ok(attrs) + } +} + +ast_enum! { + /// Distinguishes between attributes that decorate an item and attributes + /// that are contained within an item. + /// + /// # Outer attributes + /// + /// - `#[repr(transparent)]` + /// - `/// # Example` + /// - `/** Please file an issue */` + /// + /// # Inner attributes + /// + /// - `#![feature(proc_macro)]` + /// - `//! # Example` + /// - `/*! Please file an issue */` + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub enum AttrStyle { + Outer, + Inner(Token![!]), + } +} + +ast_enum! { + /// Content of a compile-time structured attribute. + /// + /// ## Path + /// + /// A meta path is like the `test` in `#[test]`. + /// + /// ## List + /// + /// A meta list is like the `derive(Copy)` in `#[derive(Copy)]`. + /// + /// ## NameValue + /// + /// A name-value meta is like the `path = "..."` in `#[path = + /// "sys/windows.rs"]`. + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub enum Meta { + Path(Path), + + /// A structured list within an attribute, like `derive(Copy, Clone)`. + List(MetaList), + + /// A name-value pair within an attribute, like `feature = "nightly"`. + NameValue(MetaNameValue), + } +} + +ast_struct! { + /// A structured list within an attribute, like `derive(Copy, Clone)`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct MetaList { + pub path: Path, + pub delimiter: MacroDelimiter, + pub tokens: TokenStream, + } +} + +ast_struct! { + /// A name-value pair within an attribute, like `feature = "nightly"`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct MetaNameValue { + pub path: Path, + pub eq_token: Token![=], + pub value: Expr, + } +} + +impl Meta { + /// Returns the path that begins this structured meta item. + /// + /// For example this would return the `test` in `#[test]`, the `derive` in + /// `#[derive(Copy)]`, and the `path` in `#[path = "sys/windows.rs"]`. + pub fn path(&self) -> &Path { + match self { + Meta::Path(path) => path, + Meta::List(meta) => &meta.path, + Meta::NameValue(meta) => &meta.path, + } + } + + /// Error if this is a `Meta::List` or `Meta::NameValue`. + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn require_path_only(&self) -> Result<&Path> { + let error_span = match self { + Meta::Path(path) => return Ok(path), + Meta::List(meta) => meta.delimiter.span().open(), + Meta::NameValue(meta) => meta.eq_token.span, + }; + Err(Error::new(error_span, "unexpected token in attribute")) + } + + /// Error if this is a `Meta::Path` or `Meta::NameValue`. + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn require_list(&self) -> Result<&MetaList> { + match self { + Meta::List(meta) => Ok(meta), + Meta::Path(path) => Err(crate::error::new2( + path.segments.first().unwrap().ident.span(), + path.segments.last().unwrap().ident.span(), + format!( + "expected attribute arguments in parentheses: `{}(...)`", + parsing::DisplayPath(path), + ), + )), + Meta::NameValue(meta) => Err(Error::new(meta.eq_token.span, "expected `(`")), + } + } + + /// Error if this is a `Meta::Path` or `Meta::List`. + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn require_name_value(&self) -> Result<&MetaNameValue> { + match self { + Meta::NameValue(meta) => Ok(meta), + Meta::Path(path) => Err(crate::error::new2( + path.segments.first().unwrap().ident.span(), + path.segments.last().unwrap().ident.span(), + format!( + "expected a value for this attribute: `{} = ...`", + parsing::DisplayPath(path), + ), + )), + Meta::List(meta) => Err(Error::new(meta.delimiter.span().open(), "expected `=`")), + } + } +} + +impl MetaList { + /// See [`Attribute::parse_args`]. + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_args<T: Parse>(&self) -> Result<T> { + self.parse_args_with(T::parse) + } + + /// See [`Attribute::parse_args_with`]. + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_args_with<F: Parser>(&self, parser: F) -> Result<F::Output> { + let scope = self.delimiter.span().close(); + crate::parse::parse_scoped(parser, scope, self.tokens.clone()) + } + + /// See [`Attribute::parse_nested_meta`]. + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_nested_meta( + &self, + logic: impl FnMut(ParseNestedMeta) -> Result<()>, + ) -> Result<()> { + self.parse_args_with(meta::parser(logic)) + } +} + +#[cfg(feature = "printing")] +pub(crate) trait FilterAttrs<'a> { + type Ret: Iterator<Item = &'a Attribute>; + + fn outer(self) -> Self::Ret; + #[cfg(feature = "full")] + fn inner(self) -> Self::Ret; +} + +#[cfg(feature = "printing")] +impl<'a> FilterAttrs<'a> for &'a [Attribute] { + type Ret = iter::Filter<slice::Iter<'a, Attribute>, fn(&&Attribute) -> bool>; + + fn outer(self) -> Self::Ret { + fn is_outer(attr: &&Attribute) -> bool { + match attr.style { + AttrStyle::Outer => true, + AttrStyle::Inner(_) => false, + } + } + self.iter().filter(is_outer) + } + + #[cfg(feature = "full")] + fn inner(self) -> Self::Ret { + fn is_inner(attr: &&Attribute) -> bool { + match attr.style { + AttrStyle::Inner(_) => true, + AttrStyle::Outer => false, + } + } + self.iter().filter(is_inner) + } +} + +impl From<Path> for Meta { + fn from(meta: Path) -> Meta { + Meta::Path(meta) + } +} + +impl From<MetaList> for Meta { + fn from(meta: MetaList) -> Meta { + Meta::List(meta) + } +} + +impl From<MetaNameValue> for Meta { + fn from(meta: MetaNameValue) -> Meta { + Meta::NameValue(meta) + } +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::attr::{AttrStyle, Attribute, Meta, MetaList, MetaNameValue}; + use crate::error::Result; + use crate::expr::{Expr, ExprLit}; + use crate::lit::Lit; + use crate::parse::discouraged::Speculative as _; + use crate::parse::{Parse, ParseStream}; + use crate::path::Path; + use crate::{mac, token}; + use proc_macro2::Ident; + use std::fmt::{self, Display}; + + pub(crate) fn parse_inner(input: ParseStream, attrs: &mut Vec<Attribute>) -> Result<()> { + while input.peek(Token![#]) && input.peek2(Token![!]) { + attrs.push(input.call(single_parse_inner)?); + } + Ok(()) + } + + pub(crate) fn single_parse_inner(input: ParseStream) -> Result<Attribute> { + let content; + Ok(Attribute { + pound_token: input.parse()?, + style: AttrStyle::Inner(input.parse()?), + bracket_token: bracketed!(content in input), + meta: content.parse()?, + }) + } + + pub(crate) fn single_parse_outer(input: ParseStream) -> Result<Attribute> { + let content; + Ok(Attribute { + pound_token: input.parse()?, + style: AttrStyle::Outer, + bracket_token: bracketed!(content in input), + meta: content.parse()?, + }) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Meta { + fn parse(input: ParseStream) -> Result<Self> { + let path = parse_outermost_meta_path(input)?; + parse_meta_after_path(path, input) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for MetaList { + fn parse(input: ParseStream) -> Result<Self> { + let path = parse_outermost_meta_path(input)?; + parse_meta_list_after_path(path, input) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for MetaNameValue { + fn parse(input: ParseStream) -> Result<Self> { + let path = parse_outermost_meta_path(input)?; + parse_meta_name_value_after_path(path, input) + } + } + + // Unlike meta::parse_meta_path which accepts arbitrary keywords in the path, + // only the `unsafe` keyword is accepted as an attribute's outermost path. + fn parse_outermost_meta_path(input: ParseStream) -> Result<Path> { + if input.peek(Token![unsafe]) { + let unsafe_token: Token![unsafe] = input.parse()?; + Ok(Path::from(Ident::new("unsafe", unsafe_token.span))) + } else { + Path::parse_mod_style(input) + } + } + + pub(crate) fn parse_meta_after_path(path: Path, input: ParseStream) -> Result<Meta> { + if input.peek(token::Paren) || input.peek(token::Bracket) || input.peek(token::Brace) { + parse_meta_list_after_path(path, input).map(Meta::List) + } else if input.peek(Token![=]) && !input.peek(Token![==]) && !input.peek(Token![=>]) { + parse_meta_name_value_after_path(path, input).map(Meta::NameValue) + } else { + Ok(Meta::Path(path)) + } + } + + fn parse_meta_list_after_path(path: Path, input: ParseStream) -> Result<MetaList> { + let (delimiter, tokens) = mac::parse_delimiter(input)?; + Ok(MetaList { + path, + delimiter, + tokens, + }) + } + + fn parse_meta_name_value_after_path(path: Path, input: ParseStream) -> Result<MetaNameValue> { + let eq_token: Token![=] = input.parse()?; + let ahead = input.fork(); + let lit: Option<Lit> = ahead.parse()?; + let value = if let (Some(lit), true) = (lit, ahead.is_empty()) { + input.advance_to(&ahead); + Expr::Lit(ExprLit { + attrs: Vec::new(), + lit, + }) + } else if input.peek(Token![#]) && input.peek2(token::Bracket) { + return Err(input.error("unexpected attribute inside of attribute")); + } else { + input.parse()? + }; + Ok(MetaNameValue { + path, + eq_token, + value, + }) + } + + pub(super) struct DisplayAttrStyle<'a>(pub &'a AttrStyle); + + impl<'a> Display for DisplayAttrStyle<'a> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(match self.0 { + AttrStyle::Outer => "#", + AttrStyle::Inner(_) => "#!", + }) + } + } + + pub(super) struct DisplayPath<'a>(pub &'a Path); + + impl<'a> Display for DisplayPath<'a> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + for (i, segment) in self.0.segments.iter().enumerate() { + if i > 0 || self.0.leading_colon.is_some() { + formatter.write_str("::")?; + } + write!(formatter, "{}", segment.ident)?; + } + Ok(()) + } + } +} + +#[cfg(feature = "printing")] +mod printing { + use crate::attr::{AttrStyle, Attribute, Meta, MetaList, MetaNameValue}; + use crate::path; + use crate::path::printing::PathStyle; + use proc_macro2::TokenStream; + use quote::ToTokens; + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Attribute { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.pound_token.to_tokens(tokens); + if let AttrStyle::Inner(b) = &self.style { + b.to_tokens(tokens); + } + self.bracket_token.surround(tokens, |tokens| { + self.meta.to_tokens(tokens); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Meta { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + Meta::Path(path) => path::printing::print_path(tokens, path, PathStyle::Mod), + Meta::List(meta_list) => meta_list.to_tokens(tokens), + Meta::NameValue(meta_name_value) => meta_name_value.to_tokens(tokens), + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for MetaList { + fn to_tokens(&self, tokens: &mut TokenStream) { + path::printing::print_path(tokens, &self.path, PathStyle::Mod); + self.delimiter.surround(tokens, self.tokens.clone()); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for MetaNameValue { + fn to_tokens(&self, tokens: &mut TokenStream) { + path::printing::print_path(tokens, &self.path, PathStyle::Mod); + self.eq_token.to_tokens(tokens); + self.value.to_tokens(tokens); + } + } +} diff --git a/vendor/syn/src/bigint.rs b/vendor/syn/src/bigint.rs new file mode 100644 index 00000000000000..66aaa93725401a --- /dev/null +++ b/vendor/syn/src/bigint.rs @@ -0,0 +1,66 @@ +use std::ops::{AddAssign, MulAssign}; + +// For implementing base10_digits() accessor on LitInt. +pub(crate) struct BigInt { + digits: Vec<u8>, +} + +impl BigInt { + pub(crate) fn new() -> Self { + BigInt { digits: Vec::new() } + } + + pub(crate) fn to_string(&self) -> String { + let mut repr = String::with_capacity(self.digits.len()); + + let mut has_nonzero = false; + for digit in self.digits.iter().rev() { + has_nonzero |= *digit != 0; + if has_nonzero { + repr.push((*digit + b'0') as char); + } + } + + if repr.is_empty() { + repr.push('0'); + } + + repr + } + + fn reserve_two_digits(&mut self) { + let len = self.digits.len(); + let desired = + len + !self.digits.ends_with(&[0, 0]) as usize + !self.digits.ends_with(&[0]) as usize; + self.digits.resize(desired, 0); + } +} + +impl AddAssign<u8> for BigInt { + // Assumes increment <16. + fn add_assign(&mut self, mut increment: u8) { + self.reserve_two_digits(); + + let mut i = 0; + while increment > 0 { + let sum = self.digits[i] + increment; + self.digits[i] = sum % 10; + increment = sum / 10; + i += 1; + } + } +} + +impl MulAssign<u8> for BigInt { + // Assumes base <=16. + fn mul_assign(&mut self, base: u8) { + self.reserve_two_digits(); + + let mut carry = 0; + for digit in &mut self.digits { + let prod = *digit * base + carry; + *digit = prod % 10; + carry = prod / 10; + } + } +} diff --git a/vendor/syn/src/buffer.rs b/vendor/syn/src/buffer.rs new file mode 100644 index 00000000000000..b0f3148443d81c --- /dev/null +++ b/vendor/syn/src/buffer.rs @@ -0,0 +1,435 @@ +//! A stably addressed token buffer supporting efficient traversal based on a +//! cheaply copyable cursor. + +// This module is heavily commented as it contains most of the unsafe code in +// Syn, and caution should be used when editing it. The public-facing interface +// is 100% safe but the implementation is fragile internally. + +use crate::ext::TokenStreamExt as _; +use crate::Lifetime; +use proc_macro2::extra::DelimSpan; +use proc_macro2::{Delimiter, Group, Ident, Literal, Punct, Spacing, Span, TokenStream, TokenTree}; +use std::cmp::Ordering; +use std::marker::PhantomData; +use std::ptr; + +/// Internal type which is used instead of `TokenTree` to represent a token tree +/// within a `TokenBuffer`. +enum Entry { + // Mimicking types from proc-macro. + // Group entries contain the offset to the matching End entry. + Group(Group, usize), + Ident(Ident), + Punct(Punct), + Literal(Literal), + // End entries contain the offset (negative) to the start of the buffer, and + // offset (negative) to the matching Group entry. + End(isize, isize), +} + +/// A buffer that can be efficiently traversed multiple times, unlike +/// `TokenStream` which requires a deep copy in order to traverse more than +/// once. +pub struct TokenBuffer { + // NOTE: Do not implement clone on this - while the current design could be + // cloned, other designs which could be desirable may not be cloneable. + entries: Box<[Entry]>, +} + +impl TokenBuffer { + fn recursive_new(entries: &mut Vec<Entry>, stream: TokenStream) { + for tt in stream { + match tt { + TokenTree::Ident(ident) => entries.push(Entry::Ident(ident)), + TokenTree::Punct(punct) => entries.push(Entry::Punct(punct)), + TokenTree::Literal(literal) => entries.push(Entry::Literal(literal)), + TokenTree::Group(group) => { + let group_start_index = entries.len(); + entries.push(Entry::End(0, 0)); // we replace this below + Self::recursive_new(entries, group.stream()); + let group_end_index = entries.len(); + let group_offset = group_end_index - group_start_index; + entries.push(Entry::End( + -(group_end_index as isize), + -(group_offset as isize), + )); + entries[group_start_index] = Entry::Group(group, group_offset); + } + } + } + } + + /// Creates a `TokenBuffer` containing all the tokens from the input + /// `proc_macro::TokenStream`. + #[cfg(feature = "proc-macro")] + #[cfg_attr(docsrs, doc(cfg(feature = "proc-macro")))] + pub fn new(stream: proc_macro::TokenStream) -> Self { + Self::new2(stream.into()) + } + + /// Creates a `TokenBuffer` containing all the tokens from the input + /// `proc_macro2::TokenStream`. + pub fn new2(stream: TokenStream) -> Self { + let mut entries = Vec::new(); + Self::recursive_new(&mut entries, stream); + entries.push(Entry::End(-(entries.len() as isize), 0)); + Self { + entries: entries.into_boxed_slice(), + } + } + + /// Creates a cursor referencing the first token in the buffer and able to + /// traverse until the end of the buffer. + pub fn begin(&self) -> Cursor { + let ptr = self.entries.as_ptr(); + unsafe { Cursor::create(ptr, ptr.add(self.entries.len() - 1)) } + } +} + +/// A cheaply copyable cursor into a `TokenBuffer`. +/// +/// This cursor holds a shared reference into the immutable data which is used +/// internally to represent a `TokenStream`, and can be efficiently manipulated +/// and copied around. +/// +/// An empty `Cursor` can be created directly, or one may create a `TokenBuffer` +/// object and get a cursor to its first token with `begin()`. +pub struct Cursor<'a> { + // The current entry which the `Cursor` is pointing at. + ptr: *const Entry, + // This is the only `Entry::End` object which this cursor is allowed to + // point at. All other `End` objects are skipped over in `Cursor::create`. + scope: *const Entry, + // Cursor is covariant in 'a. This field ensures that our pointers are still + // valid. + marker: PhantomData<&'a Entry>, +} + +impl<'a> Cursor<'a> { + /// Creates a cursor referencing a static empty TokenStream. + pub fn empty() -> Self { + // It's safe in this situation for us to put an `Entry` object in global + // storage, despite it not actually being safe to send across threads + // (`Ident` is a reference into a thread-local table). This is because + // this entry never includes a `Ident` object. + // + // This wrapper struct allows us to break the rules and put a `Sync` + // object in global storage. + struct UnsafeSyncEntry(Entry); + unsafe impl Sync for UnsafeSyncEntry {} + static EMPTY_ENTRY: UnsafeSyncEntry = UnsafeSyncEntry(Entry::End(0, 0)); + + Cursor { + ptr: &EMPTY_ENTRY.0, + scope: &EMPTY_ENTRY.0, + marker: PhantomData, + } + } + + /// This create method intelligently exits non-explicitly-entered + /// `None`-delimited scopes when the cursor reaches the end of them, + /// allowing for them to be treated transparently. + unsafe fn create(mut ptr: *const Entry, scope: *const Entry) -> Self { + // NOTE: If we're looking at a `End`, we want to advance the cursor + // past it, unless `ptr == scope`, which means that we're at the edge of + // our cursor's scope. We should only have `ptr != scope` at the exit + // from None-delimited groups entered with `ignore_none`. + while let Entry::End(..) = unsafe { &*ptr } { + if ptr::eq(ptr, scope) { + break; + } + ptr = unsafe { ptr.add(1) }; + } + + Cursor { + ptr, + scope, + marker: PhantomData, + } + } + + /// Get the current entry. + fn entry(self) -> &'a Entry { + unsafe { &*self.ptr } + } + + /// Bump the cursor to point at the next token after the current one. This + /// is undefined behavior if the cursor is currently looking at an + /// `Entry::End`. + /// + /// If the cursor is looking at an `Entry::Group`, the bumped cursor will + /// point at the first token in the group (with the same scope end). + unsafe fn bump_ignore_group(self) -> Cursor<'a> { + unsafe { Cursor::create(self.ptr.offset(1), self.scope) } + } + + /// While the cursor is looking at a `None`-delimited group, move it to look + /// at the first token inside instead. If the group is empty, this will move + /// the cursor past the `None`-delimited group. + /// + /// WARNING: This mutates its argument. + fn ignore_none(&mut self) { + while let Entry::Group(group, _) = self.entry() { + if group.delimiter() == Delimiter::None { + unsafe { *self = self.bump_ignore_group() }; + } else { + break; + } + } + } + + /// Checks whether the cursor is currently pointing at the end of its valid + /// scope. + pub fn eof(self) -> bool { + // We're at eof if we're at the end of our scope. + ptr::eq(self.ptr, self.scope) + } + + /// If the cursor is pointing at a `Ident`, returns it along with a cursor + /// pointing at the next `TokenTree`. + pub fn ident(mut self) -> Option<(Ident, Cursor<'a>)> { + self.ignore_none(); + match self.entry() { + Entry::Ident(ident) => Some((ident.clone(), unsafe { self.bump_ignore_group() })), + _ => None, + } + } + + /// If the cursor is pointing at a `Punct`, returns it along with a cursor + /// pointing at the next `TokenTree`. + pub fn punct(mut self) -> Option<(Punct, Cursor<'a>)> { + self.ignore_none(); + match self.entry() { + Entry::Punct(punct) if punct.as_char() != '\'' => { + Some((punct.clone(), unsafe { self.bump_ignore_group() })) + } + _ => None, + } + } + + /// If the cursor is pointing at a `Literal`, return it along with a cursor + /// pointing at the next `TokenTree`. + pub fn literal(mut self) -> Option<(Literal, Cursor<'a>)> { + self.ignore_none(); + match self.entry() { + Entry::Literal(literal) => Some((literal.clone(), unsafe { self.bump_ignore_group() })), + _ => None, + } + } + + /// If the cursor is pointing at a `Lifetime`, returns it along with a + /// cursor pointing at the next `TokenTree`. + pub fn lifetime(mut self) -> Option<(Lifetime, Cursor<'a>)> { + self.ignore_none(); + match self.entry() { + Entry::Punct(punct) if punct.as_char() == '\'' && punct.spacing() == Spacing::Joint => { + let next = unsafe { self.bump_ignore_group() }; + let (ident, rest) = next.ident()?; + let lifetime = Lifetime { + apostrophe: punct.span(), + ident, + }; + Some((lifetime, rest)) + } + _ => None, + } + } + + /// If the cursor is pointing at a `Group` with the given delimiter, returns + /// a cursor into that group and one pointing to the next `TokenTree`. + pub fn group(mut self, delim: Delimiter) -> Option<(Cursor<'a>, DelimSpan, Cursor<'a>)> { + // If we're not trying to enter a none-delimited group, we want to + // ignore them. We have to make sure to _not_ ignore them when we want + // to enter them, of course. For obvious reasons. + if delim != Delimiter::None { + self.ignore_none(); + } + + if let Entry::Group(group, end_offset) = self.entry() { + if group.delimiter() == delim { + let span = group.delim_span(); + let end_of_group = unsafe { self.ptr.add(*end_offset) }; + let inside_of_group = unsafe { Cursor::create(self.ptr.add(1), end_of_group) }; + let after_group = unsafe { Cursor::create(end_of_group, self.scope) }; + return Some((inside_of_group, span, after_group)); + } + } + + None + } + + /// If the cursor is pointing at a `Group`, returns a cursor into the group + /// and one pointing to the next `TokenTree`. + pub fn any_group(self) -> Option<(Cursor<'a>, Delimiter, DelimSpan, Cursor<'a>)> { + if let Entry::Group(group, end_offset) = self.entry() { + let delimiter = group.delimiter(); + let span = group.delim_span(); + let end_of_group = unsafe { self.ptr.add(*end_offset) }; + let inside_of_group = unsafe { Cursor::create(self.ptr.add(1), end_of_group) }; + let after_group = unsafe { Cursor::create(end_of_group, self.scope) }; + return Some((inside_of_group, delimiter, span, after_group)); + } + + None + } + + pub(crate) fn any_group_token(self) -> Option<(Group, Cursor<'a>)> { + if let Entry::Group(group, end_offset) = self.entry() { + let end_of_group = unsafe { self.ptr.add(*end_offset) }; + let after_group = unsafe { Cursor::create(end_of_group, self.scope) }; + return Some((group.clone(), after_group)); + } + + None + } + + /// Copies all remaining tokens visible from this cursor into a + /// `TokenStream`. + pub fn token_stream(self) -> TokenStream { + let mut tokens = TokenStream::new(); + let mut cursor = self; + while let Some((tt, rest)) = cursor.token_tree() { + tokens.append(tt); + cursor = rest; + } + tokens + } + + /// If the cursor is pointing at a `TokenTree`, returns it along with a + /// cursor pointing at the next `TokenTree`. + /// + /// Returns `None` if the cursor has reached the end of its stream. + /// + /// This method does not treat `None`-delimited groups as transparent, and + /// will return a `Group(None, ..)` if the cursor is looking at one. + pub fn token_tree(self) -> Option<(TokenTree, Cursor<'a>)> { + let (tree, len) = match self.entry() { + Entry::Group(group, end_offset) => (group.clone().into(), *end_offset), + Entry::Literal(literal) => (literal.clone().into(), 1), + Entry::Ident(ident) => (ident.clone().into(), 1), + Entry::Punct(punct) => (punct.clone().into(), 1), + Entry::End(..) => return None, + }; + + let rest = unsafe { Cursor::create(self.ptr.add(len), self.scope) }; + Some((tree, rest)) + } + + /// Returns the `Span` of the current token, or `Span::call_site()` if this + /// cursor points to eof. + pub fn span(mut self) -> Span { + match self.entry() { + Entry::Group(group, _) => group.span(), + Entry::Literal(literal) => literal.span(), + Entry::Ident(ident) => ident.span(), + Entry::Punct(punct) => punct.span(), + Entry::End(_, offset) => { + self.ptr = unsafe { self.ptr.offset(*offset) }; + if let Entry::Group(group, _) = self.entry() { + group.span_close() + } else { + Span::call_site() + } + } + } + } + + /// Returns the `Span` of the token immediately prior to the position of + /// this cursor, or of the current token if there is no previous one. + #[cfg(any(feature = "full", feature = "derive"))] + pub(crate) fn prev_span(mut self) -> Span { + if start_of_buffer(self) < self.ptr { + self.ptr = unsafe { self.ptr.offset(-1) }; + } + self.span() + } + + /// Skip over the next token that is not a None-delimited group, without + /// cloning it. Returns `None` if this cursor points to eof. + /// + /// This method treats `'lifetimes` as a single token. + pub(crate) fn skip(mut self) -> Option<Cursor<'a>> { + self.ignore_none(); + + let len = match self.entry() { + Entry::End(..) => return None, + + // Treat lifetimes as a single tt for the purposes of 'skip'. + Entry::Punct(punct) if punct.as_char() == '\'' && punct.spacing() == Spacing::Joint => { + match unsafe { &*self.ptr.add(1) } { + Entry::Ident(_) => 2, + _ => 1, + } + } + + Entry::Group(_, end_offset) => *end_offset, + _ => 1, + }; + + Some(unsafe { Cursor::create(self.ptr.add(len), self.scope) }) + } + + pub(crate) fn scope_delimiter(self) -> Delimiter { + match unsafe { &*self.scope } { + Entry::End(_, offset) => match unsafe { &*self.scope.offset(*offset) } { + Entry::Group(group, _) => group.delimiter(), + _ => Delimiter::None, + }, + _ => unreachable!(), + } + } +} + +impl<'a> Copy for Cursor<'a> {} + +impl<'a> Clone for Cursor<'a> { + fn clone(&self) -> Self { + *self + } +} + +impl<'a> Eq for Cursor<'a> {} + +impl<'a> PartialEq for Cursor<'a> { + fn eq(&self, other: &Self) -> bool { + ptr::eq(self.ptr, other.ptr) + } +} + +impl<'a> PartialOrd for Cursor<'a> { + fn partial_cmp(&self, other: &Self) -> Option<Ordering> { + if same_buffer(*self, *other) { + Some(cmp_assuming_same_buffer(*self, *other)) + } else { + None + } + } +} + +pub(crate) fn same_scope(a: Cursor, b: Cursor) -> bool { + ptr::eq(a.scope, b.scope) +} + +pub(crate) fn same_buffer(a: Cursor, b: Cursor) -> bool { + ptr::eq(start_of_buffer(a), start_of_buffer(b)) +} + +fn start_of_buffer(cursor: Cursor) -> *const Entry { + unsafe { + match &*cursor.scope { + Entry::End(offset, _) => cursor.scope.offset(*offset), + _ => unreachable!(), + } + } +} + +pub(crate) fn cmp_assuming_same_buffer(a: Cursor, b: Cursor) -> Ordering { + a.ptr.cmp(&b.ptr) +} + +pub(crate) fn open_span_of_group(cursor: Cursor) -> Span { + match cursor.entry() { + Entry::Group(group, _) => group.span_open(), + _ => cursor.span(), + } +} diff --git a/vendor/syn/src/classify.rs b/vendor/syn/src/classify.rs new file mode 100644 index 00000000000000..8eab19dbc37cac --- /dev/null +++ b/vendor/syn/src/classify.rs @@ -0,0 +1,311 @@ +#[cfg(feature = "full")] +use crate::expr::Expr; +#[cfg(any(feature = "printing", feature = "full"))] +use crate::generics::TypeParamBound; +#[cfg(any(feature = "printing", feature = "full"))] +use crate::path::{Path, PathArguments}; +#[cfg(any(feature = "printing", feature = "full"))] +use crate::punctuated::Punctuated; +#[cfg(any(feature = "printing", feature = "full"))] +use crate::ty::{ReturnType, Type}; +#[cfg(feature = "full")] +use proc_macro2::{Delimiter, TokenStream, TokenTree}; +#[cfg(any(feature = "printing", feature = "full"))] +use std::ops::ControlFlow; + +#[cfg(feature = "full")] +pub(crate) fn requires_semi_to_be_stmt(expr: &Expr) -> bool { + match expr { + Expr::Macro(expr) => !expr.mac.delimiter.is_brace(), + _ => requires_comma_to_be_match_arm(expr), + } +} + +#[cfg(feature = "full")] +pub(crate) fn requires_comma_to_be_match_arm(expr: &Expr) -> bool { + match expr { + Expr::If(_) + | Expr::Match(_) + | Expr::Block(_) | Expr::Unsafe(_) // both under ExprKind::Block in rustc + | Expr::While(_) + | Expr::Loop(_) + | Expr::ForLoop(_) + | Expr::TryBlock(_) + | Expr::Const(_) => false, + + Expr::Array(_) + | Expr::Assign(_) + | Expr::Async(_) + | Expr::Await(_) + | Expr::Binary(_) + | Expr::Break(_) + | Expr::Call(_) + | Expr::Cast(_) + | Expr::Closure(_) + | Expr::Continue(_) + | Expr::Field(_) + | Expr::Group(_) + | Expr::Index(_) + | Expr::Infer(_) + | Expr::Let(_) + | Expr::Lit(_) + | Expr::Macro(_) + | Expr::MethodCall(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Range(_) + | Expr::RawAddr(_) + | Expr::Reference(_) + | Expr::Repeat(_) + | Expr::Return(_) + | Expr::Struct(_) + | Expr::Try(_) + | Expr::Tuple(_) + | Expr::Unary(_) + | Expr::Yield(_) + | Expr::Verbatim(_) => true, + } +} + +#[cfg(feature = "printing")] +pub(crate) fn trailing_unparameterized_path(mut ty: &Type) -> bool { + loop { + match ty { + Type::BareFn(t) => match &t.output { + ReturnType::Default => return false, + ReturnType::Type(_, ret) => ty = ret, + }, + Type::ImplTrait(t) => match last_type_in_bounds(&t.bounds) { + ControlFlow::Break(trailing_path) => return trailing_path, + ControlFlow::Continue(t) => ty = t, + }, + Type::Path(t) => match last_type_in_path(&t.path) { + ControlFlow::Break(trailing_path) => return trailing_path, + ControlFlow::Continue(t) => ty = t, + }, + Type::Ptr(t) => ty = &t.elem, + Type::Reference(t) => ty = &t.elem, + Type::TraitObject(t) => match last_type_in_bounds(&t.bounds) { + ControlFlow::Break(trailing_path) => return trailing_path, + ControlFlow::Continue(t) => ty = t, + }, + + Type::Array(_) + | Type::Group(_) + | Type::Infer(_) + | Type::Macro(_) + | Type::Never(_) + | Type::Paren(_) + | Type::Slice(_) + | Type::Tuple(_) + | Type::Verbatim(_) => return false, + } + } + + fn last_type_in_path(path: &Path) -> ControlFlow<bool, &Type> { + match &path.segments.last().unwrap().arguments { + PathArguments::None => ControlFlow::Break(true), + PathArguments::AngleBracketed(_) => ControlFlow::Break(false), + PathArguments::Parenthesized(arg) => match &arg.output { + ReturnType::Default => ControlFlow::Break(false), + ReturnType::Type(_, ret) => ControlFlow::Continue(ret), + }, + } + } + + fn last_type_in_bounds( + bounds: &Punctuated<TypeParamBound, Token![+]>, + ) -> ControlFlow<bool, &Type> { + match bounds.last().unwrap() { + TypeParamBound::Trait(t) => last_type_in_path(&t.path), + TypeParamBound::Lifetime(_) + | TypeParamBound::PreciseCapture(_) + | TypeParamBound::Verbatim(_) => ControlFlow::Break(false), + } + } +} + +/// Whether the expression's first token is the label of a loop/block. +#[cfg(all(feature = "printing", feature = "full"))] +pub(crate) fn expr_leading_label(mut expr: &Expr) -> bool { + loop { + match expr { + Expr::Block(e) => return e.label.is_some(), + Expr::ForLoop(e) => return e.label.is_some(), + Expr::Loop(e) => return e.label.is_some(), + Expr::While(e) => return e.label.is_some(), + + Expr::Assign(e) => expr = &e.left, + Expr::Await(e) => expr = &e.base, + Expr::Binary(e) => expr = &e.left, + Expr::Call(e) => expr = &e.func, + Expr::Cast(e) => expr = &e.expr, + Expr::Field(e) => expr = &e.base, + Expr::Index(e) => expr = &e.expr, + Expr::MethodCall(e) => expr = &e.receiver, + Expr::Range(e) => match &e.start { + Some(start) => expr = start, + None => return false, + }, + Expr::Try(e) => expr = &e.expr, + + Expr::Array(_) + | Expr::Async(_) + | Expr::Break(_) + | Expr::Closure(_) + | Expr::Const(_) + | Expr::Continue(_) + | Expr::Group(_) + | Expr::If(_) + | Expr::Infer(_) + | Expr::Let(_) + | Expr::Lit(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::RawAddr(_) + | Expr::Reference(_) + | Expr::Repeat(_) + | Expr::Return(_) + | Expr::Struct(_) + | Expr::TryBlock(_) + | Expr::Tuple(_) + | Expr::Unary(_) + | Expr::Unsafe(_) + | Expr::Verbatim(_) + | Expr::Yield(_) => return false, + } + } +} + +/// Whether the expression's last token is `}`. +#[cfg(feature = "full")] +pub(crate) fn expr_trailing_brace(mut expr: &Expr) -> bool { + loop { + match expr { + Expr::Async(_) + | Expr::Block(_) + | Expr::Const(_) + | Expr::ForLoop(_) + | Expr::If(_) + | Expr::Loop(_) + | Expr::Match(_) + | Expr::Struct(_) + | Expr::TryBlock(_) + | Expr::Unsafe(_) + | Expr::While(_) => return true, + + Expr::Assign(e) => expr = &e.right, + Expr::Binary(e) => expr = &e.right, + Expr::Break(e) => match &e.expr { + Some(e) => expr = e, + None => return false, + }, + Expr::Cast(e) => return type_trailing_brace(&e.ty), + Expr::Closure(e) => expr = &e.body, + Expr::Let(e) => expr = &e.expr, + Expr::Macro(e) => return e.mac.delimiter.is_brace(), + Expr::Range(e) => match &e.end { + Some(end) => expr = end, + None => return false, + }, + Expr::RawAddr(e) => expr = &e.expr, + Expr::Reference(e) => expr = &e.expr, + Expr::Return(e) => match &e.expr { + Some(e) => expr = e, + None => return false, + }, + Expr::Unary(e) => expr = &e.expr, + Expr::Verbatim(e) => return tokens_trailing_brace(e), + Expr::Yield(e) => match &e.expr { + Some(e) => expr = e, + None => return false, + }, + + Expr::Array(_) + | Expr::Await(_) + | Expr::Call(_) + | Expr::Continue(_) + | Expr::Field(_) + | Expr::Group(_) + | Expr::Index(_) + | Expr::Infer(_) + | Expr::Lit(_) + | Expr::MethodCall(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Repeat(_) + | Expr::Try(_) + | Expr::Tuple(_) => return false, + } + } + + fn type_trailing_brace(mut ty: &Type) -> bool { + loop { + match ty { + Type::BareFn(t) => match &t.output { + ReturnType::Default => return false, + ReturnType::Type(_, ret) => ty = ret, + }, + Type::ImplTrait(t) => match last_type_in_bounds(&t.bounds) { + ControlFlow::Break(trailing_brace) => return trailing_brace, + ControlFlow::Continue(t) => ty = t, + }, + Type::Macro(t) => return t.mac.delimiter.is_brace(), + Type::Path(t) => match last_type_in_path(&t.path) { + Some(t) => ty = t, + None => return false, + }, + Type::Ptr(t) => ty = &t.elem, + Type::Reference(t) => ty = &t.elem, + Type::TraitObject(t) => match last_type_in_bounds(&t.bounds) { + ControlFlow::Break(trailing_brace) => return trailing_brace, + ControlFlow::Continue(t) => ty = t, + }, + Type::Verbatim(t) => return tokens_trailing_brace(t), + + Type::Array(_) + | Type::Group(_) + | Type::Infer(_) + | Type::Never(_) + | Type::Paren(_) + | Type::Slice(_) + | Type::Tuple(_) => return false, + } + } + } + + fn last_type_in_path(path: &Path) -> Option<&Type> { + match &path.segments.last().unwrap().arguments { + PathArguments::None | PathArguments::AngleBracketed(_) => None, + PathArguments::Parenthesized(arg) => match &arg.output { + ReturnType::Default => None, + ReturnType::Type(_, ret) => Some(ret), + }, + } + } + + fn last_type_in_bounds( + bounds: &Punctuated<TypeParamBound, Token![+]>, + ) -> ControlFlow<bool, &Type> { + match bounds.last().unwrap() { + TypeParamBound::Trait(t) => match last_type_in_path(&t.path) { + Some(t) => ControlFlow::Continue(t), + None => ControlFlow::Break(false), + }, + TypeParamBound::Lifetime(_) | TypeParamBound::PreciseCapture(_) => { + ControlFlow::Break(false) + } + TypeParamBound::Verbatim(t) => ControlFlow::Break(tokens_trailing_brace(t)), + } + } + + fn tokens_trailing_brace(tokens: &TokenStream) -> bool { + if let Some(TokenTree::Group(last)) = tokens.clone().into_iter().last() { + last.delimiter() == Delimiter::Brace + } else { + false + } + } +} diff --git a/vendor/syn/src/custom_keyword.rs b/vendor/syn/src/custom_keyword.rs new file mode 100644 index 00000000000000..cc4f632c981a97 --- /dev/null +++ b/vendor/syn/src/custom_keyword.rs @@ -0,0 +1,260 @@ +/// Define a type that supports parsing and printing a given identifier as if it +/// were a keyword. +/// +/// # Usage +/// +/// As a convention, it is recommended that this macro be invoked within a +/// module called `kw` or `keyword` and that the resulting parser be invoked +/// with a `kw::` or `keyword::` prefix. +/// +/// ``` +/// mod kw { +/// syn::custom_keyword!(whatever); +/// } +/// ``` +/// +/// The generated syntax tree node supports the following operations just like +/// any built-in keyword token. +/// +/// - [Peeking] — `input.peek(kw::whatever)` +/// +/// - [Parsing] — `input.parse::<kw::whatever>()?` +/// +/// - [Printing] — `quote!( ... #whatever_token ... )` +/// +/// - Construction from a [`Span`] — `let whatever_token = kw::whatever(sp)` +/// +/// - Field access to its span — `let sp = whatever_token.span` +/// +/// [Peeking]: crate::parse::ParseBuffer::peek +/// [Parsing]: crate::parse::ParseBuffer::parse +/// [Printing]: quote::ToTokens +/// [`Span`]: proc_macro2::Span +/// +/// # Example +/// +/// This example parses input that looks like `bool = true` or `str = "value"`. +/// The key must be either the identifier `bool` or the identifier `str`. If +/// `bool`, the value may be either `true` or `false`. If `str`, the value may +/// be any string literal. +/// +/// The symbols `bool` and `str` are not reserved keywords in Rust so these are +/// not considered keywords in the `syn::token` module. Like any other +/// identifier that is not a keyword, these can be declared as custom keywords +/// by crates that need to use them as such. +/// +/// ``` +/// use syn::{LitBool, LitStr, Result, Token}; +/// use syn::parse::{Parse, ParseStream}; +/// +/// mod kw { +/// syn::custom_keyword!(bool); +/// syn::custom_keyword!(str); +/// } +/// +/// enum Argument { +/// Bool { +/// bool_token: kw::bool, +/// eq_token: Token![=], +/// value: LitBool, +/// }, +/// Str { +/// str_token: kw::str, +/// eq_token: Token![=], +/// value: LitStr, +/// }, +/// } +/// +/// impl Parse for Argument { +/// fn parse(input: ParseStream) -> Result<Self> { +/// let lookahead = input.lookahead1(); +/// if lookahead.peek(kw::bool) { +/// Ok(Argument::Bool { +/// bool_token: input.parse::<kw::bool>()?, +/// eq_token: input.parse()?, +/// value: input.parse()?, +/// }) +/// } else if lookahead.peek(kw::str) { +/// Ok(Argument::Str { +/// str_token: input.parse::<kw::str>()?, +/// eq_token: input.parse()?, +/// value: input.parse()?, +/// }) +/// } else { +/// Err(lookahead.error()) +/// } +/// } +/// } +/// ``` +#[macro_export] +macro_rules! custom_keyword { + ($ident:ident) => { + #[allow(non_camel_case_types)] + pub struct $ident { + #[allow(dead_code)] + pub span: $crate::__private::Span, + } + + #[doc(hidden)] + #[allow(dead_code, non_snake_case)] + pub fn $ident<__S: $crate::__private::IntoSpans<$crate::__private::Span>>( + span: __S, + ) -> $ident { + $ident { + span: $crate::__private::IntoSpans::into_spans(span), + } + } + + const _: () = { + impl $crate::__private::Default for $ident { + fn default() -> Self { + $ident { + span: $crate::__private::Span::call_site(), + } + } + } + + $crate::impl_parse_for_custom_keyword!($ident); + $crate::impl_to_tokens_for_custom_keyword!($ident); + $crate::impl_clone_for_custom_keyword!($ident); + $crate::impl_extra_traits_for_custom_keyword!($ident); + }; + }; +} + +// Not public API. +#[cfg(feature = "parsing")] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_parse_for_custom_keyword { + ($ident:ident) => { + // For peek. + impl $crate::__private::CustomToken for $ident { + fn peek(cursor: $crate::buffer::Cursor) -> $crate::__private::bool { + if let $crate::__private::Some((ident, _rest)) = cursor.ident() { + ident == $crate::__private::stringify!($ident) + } else { + false + } + } + + fn display() -> &'static $crate::__private::str { + $crate::__private::concat!("`", $crate::__private::stringify!($ident), "`") + } + } + + impl $crate::parse::Parse for $ident { + fn parse(input: $crate::parse::ParseStream) -> $crate::parse::Result<$ident> { + input.step(|cursor| { + if let $crate::__private::Some((ident, rest)) = cursor.ident() { + if ident == $crate::__private::stringify!($ident) { + return $crate::__private::Ok(($ident { span: ident.span() }, rest)); + } + } + $crate::__private::Err(cursor.error($crate::__private::concat!( + "expected `", + $crate::__private::stringify!($ident), + "`", + ))) + }) + } + } + }; +} + +// Not public API. +#[cfg(not(feature = "parsing"))] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_parse_for_custom_keyword { + ($ident:ident) => {}; +} + +// Not public API. +#[cfg(feature = "printing")] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_to_tokens_for_custom_keyword { + ($ident:ident) => { + impl $crate::__private::ToTokens for $ident { + fn to_tokens(&self, tokens: &mut $crate::__private::TokenStream2) { + let ident = $crate::Ident::new($crate::__private::stringify!($ident), self.span); + $crate::__private::TokenStreamExt::append(tokens, ident); + } + } + }; +} + +// Not public API. +#[cfg(not(feature = "printing"))] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_to_tokens_for_custom_keyword { + ($ident:ident) => {}; +} + +// Not public API. +#[cfg(feature = "clone-impls")] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_clone_for_custom_keyword { + ($ident:ident) => { + impl $crate::__private::Copy for $ident {} + + #[allow(clippy::expl_impl_clone_on_copy)] + impl $crate::__private::Clone for $ident { + fn clone(&self) -> Self { + *self + } + } + }; +} + +// Not public API. +#[cfg(not(feature = "clone-impls"))] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_clone_for_custom_keyword { + ($ident:ident) => {}; +} + +// Not public API. +#[cfg(feature = "extra-traits")] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_extra_traits_for_custom_keyword { + ($ident:ident) => { + impl $crate::__private::Debug for $ident { + fn fmt(&self, f: &mut $crate::__private::Formatter) -> $crate::__private::FmtResult { + $crate::__private::Formatter::write_str( + f, + $crate::__private::concat!( + "Keyword [", + $crate::__private::stringify!($ident), + "]", + ), + ) + } + } + + impl $crate::__private::Eq for $ident {} + + impl $crate::__private::PartialEq for $ident { + fn eq(&self, _other: &Self) -> $crate::__private::bool { + true + } + } + + impl $crate::__private::Hash for $ident { + fn hash<__H: $crate::__private::Hasher>(&self, _state: &mut __H) {} + } + }; +} + +// Not public API. +#[cfg(not(feature = "extra-traits"))] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_extra_traits_for_custom_keyword { + ($ident:ident) => {}; +} diff --git a/vendor/syn/src/custom_punctuation.rs b/vendor/syn/src/custom_punctuation.rs new file mode 100644 index 00000000000000..568bc5d92ef848 --- /dev/null +++ b/vendor/syn/src/custom_punctuation.rs @@ -0,0 +1,305 @@ +/// Define a type that supports parsing and printing a multi-character symbol +/// as if it were a punctuation token. +/// +/// # Usage +/// +/// ``` +/// syn::custom_punctuation!(LeftRightArrow, <=>); +/// ``` +/// +/// The generated syntax tree node supports the following operations just like +/// any built-in punctuation token. +/// +/// - [Peeking] — `input.peek(LeftRightArrow)` +/// +/// - [Parsing] — `input.parse::<LeftRightArrow>()?` +/// +/// - [Printing] — `quote!( ... #lrarrow ... )` +/// +/// - Construction from a [`Span`] — `let lrarrow = LeftRightArrow(sp)` +/// +/// - Construction from multiple [`Span`] — `let lrarrow = LeftRightArrow([sp, sp, sp])` +/// +/// - Field access to its spans — `let spans = lrarrow.spans` +/// +/// [Peeking]: crate::parse::ParseBuffer::peek +/// [Parsing]: crate::parse::ParseBuffer::parse +/// [Printing]: quote::ToTokens +/// [`Span`]: proc_macro2::Span +/// +/// # Example +/// +/// ``` +/// use proc_macro2::{TokenStream, TokenTree}; +/// use std::iter; +/// use syn::parse::{Parse, ParseStream, Peek, Result}; +/// use syn::punctuated::Punctuated; +/// use syn::Expr; +/// +/// syn::custom_punctuation!(PathSeparator, </>); +/// +/// // expr </> expr </> expr ... +/// struct PathSegments { +/// segments: Punctuated<Expr, PathSeparator>, +/// } +/// +/// impl Parse for PathSegments { +/// fn parse(input: ParseStream) -> Result<Self> { +/// let mut segments = Punctuated::new(); +/// +/// let first = parse_until(input, PathSeparator)?; +/// segments.push_value(syn::parse2(first)?); +/// +/// while input.peek(PathSeparator) { +/// segments.push_punct(input.parse()?); +/// +/// let next = parse_until(input, PathSeparator)?; +/// segments.push_value(syn::parse2(next)?); +/// } +/// +/// Ok(PathSegments { segments }) +/// } +/// } +/// +/// fn parse_until<E: Peek>(input: ParseStream, end: E) -> Result<TokenStream> { +/// let mut tokens = TokenStream::new(); +/// while !input.is_empty() && !input.peek(end) { +/// let next: TokenTree = input.parse()?; +/// tokens.extend(iter::once(next)); +/// } +/// Ok(tokens) +/// } +/// +/// fn main() { +/// let input = r#" a::b </> c::d::e "#; +/// let _: PathSegments = syn::parse_str(input).unwrap(); +/// } +/// ``` +#[macro_export] +macro_rules! custom_punctuation { + ($ident:ident, $($tt:tt)+) => { + pub struct $ident { + #[allow(dead_code)] + pub spans: $crate::custom_punctuation_repr!($($tt)+), + } + + #[doc(hidden)] + #[allow(dead_code, non_snake_case)] + pub fn $ident<__S: $crate::__private::IntoSpans<$crate::custom_punctuation_repr!($($tt)+)>>( + spans: __S, + ) -> $ident { + let _validate_len = 0 $(+ $crate::custom_punctuation_len!(strict, $tt))*; + $ident { + spans: $crate::__private::IntoSpans::into_spans(spans) + } + } + + const _: () = { + impl $crate::__private::Default for $ident { + fn default() -> Self { + $ident($crate::__private::Span::call_site()) + } + } + + $crate::impl_parse_for_custom_punctuation!($ident, $($tt)+); + $crate::impl_to_tokens_for_custom_punctuation!($ident, $($tt)+); + $crate::impl_clone_for_custom_punctuation!($ident, $($tt)+); + $crate::impl_extra_traits_for_custom_punctuation!($ident, $($tt)+); + }; + }; +} + +// Not public API. +#[cfg(feature = "parsing")] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_parse_for_custom_punctuation { + ($ident:ident, $($tt:tt)+) => { + impl $crate::__private::CustomToken for $ident { + fn peek(cursor: $crate::buffer::Cursor) -> $crate::__private::bool { + $crate::__private::peek_punct(cursor, $crate::stringify_punct!($($tt)+)) + } + + fn display() -> &'static $crate::__private::str { + $crate::__private::concat!("`", $crate::stringify_punct!($($tt)+), "`") + } + } + + impl $crate::parse::Parse for $ident { + fn parse(input: $crate::parse::ParseStream) -> $crate::parse::Result<$ident> { + let spans: $crate::custom_punctuation_repr!($($tt)+) = + $crate::__private::parse_punct(input, $crate::stringify_punct!($($tt)+))?; + Ok($ident(spans)) + } + } + }; +} + +// Not public API. +#[cfg(not(feature = "parsing"))] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_parse_for_custom_punctuation { + ($ident:ident, $($tt:tt)+) => {}; +} + +// Not public API. +#[cfg(feature = "printing")] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_to_tokens_for_custom_punctuation { + ($ident:ident, $($tt:tt)+) => { + impl $crate::__private::ToTokens for $ident { + fn to_tokens(&self, tokens: &mut $crate::__private::TokenStream2) { + $crate::__private::print_punct($crate::stringify_punct!($($tt)+), &self.spans, tokens) + } + } + }; +} + +// Not public API. +#[cfg(not(feature = "printing"))] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_to_tokens_for_custom_punctuation { + ($ident:ident, $($tt:tt)+) => {}; +} + +// Not public API. +#[cfg(feature = "clone-impls")] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_clone_for_custom_punctuation { + ($ident:ident, $($tt:tt)+) => { + impl $crate::__private::Copy for $ident {} + + #[allow(clippy::expl_impl_clone_on_copy)] + impl $crate::__private::Clone for $ident { + fn clone(&self) -> Self { + *self + } + } + }; +} + +// Not public API. +#[cfg(not(feature = "clone-impls"))] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_clone_for_custom_punctuation { + ($ident:ident, $($tt:tt)+) => {}; +} + +// Not public API. +#[cfg(feature = "extra-traits")] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_extra_traits_for_custom_punctuation { + ($ident:ident, $($tt:tt)+) => { + impl $crate::__private::Debug for $ident { + fn fmt(&self, f: &mut $crate::__private::Formatter) -> $crate::__private::FmtResult { + $crate::__private::Formatter::write_str(f, $crate::__private::stringify!($ident)) + } + } + + impl $crate::__private::Eq for $ident {} + + impl $crate::__private::PartialEq for $ident { + fn eq(&self, _other: &Self) -> $crate::__private::bool { + true + } + } + + impl $crate::__private::Hash for $ident { + fn hash<__H: $crate::__private::Hasher>(&self, _state: &mut __H) {} + } + }; +} + +// Not public API. +#[cfg(not(feature = "extra-traits"))] +#[doc(hidden)] +#[macro_export] +macro_rules! impl_extra_traits_for_custom_punctuation { + ($ident:ident, $($tt:tt)+) => {}; +} + +// Not public API. +#[doc(hidden)] +#[macro_export] +macro_rules! custom_punctuation_repr { + ($($tt:tt)+) => { + [$crate::__private::Span; 0 $(+ $crate::custom_punctuation_len!(lenient, $tt))+] + }; +} + +// Not public API. +#[doc(hidden)] +#[macro_export] +#[rustfmt::skip] +macro_rules! custom_punctuation_len { + ($mode:ident, &) => { 1 }; + ($mode:ident, &&) => { 2 }; + ($mode:ident, &=) => { 2 }; + ($mode:ident, @) => { 1 }; + ($mode:ident, ^) => { 1 }; + ($mode:ident, ^=) => { 2 }; + ($mode:ident, :) => { 1 }; + ($mode:ident, ,) => { 1 }; + ($mode:ident, $) => { 1 }; + ($mode:ident, .) => { 1 }; + ($mode:ident, ..) => { 2 }; + ($mode:ident, ...) => { 3 }; + ($mode:ident, ..=) => { 3 }; + ($mode:ident, =) => { 1 }; + ($mode:ident, ==) => { 2 }; + ($mode:ident, =>) => { 2 }; + ($mode:ident, >=) => { 2 }; + ($mode:ident, >) => { 1 }; + ($mode:ident, <-) => { 2 }; + ($mode:ident, <=) => { 2 }; + ($mode:ident, <) => { 1 }; + ($mode:ident, -) => { 1 }; + ($mode:ident, -=) => { 2 }; + ($mode:ident, !=) => { 2 }; + ($mode:ident, !) => { 1 }; + ($mode:ident, |) => { 1 }; + ($mode:ident, |=) => { 2 }; + ($mode:ident, ||) => { 2 }; + ($mode:ident, ::) => { 2 }; + ($mode:ident, %) => { 1 }; + ($mode:ident, %=) => { 2 }; + ($mode:ident, +) => { 1 }; + ($mode:ident, +=) => { 2 }; + ($mode:ident, #) => { 1 }; + ($mode:ident, ?) => { 1 }; + ($mode:ident, ->) => { 2 }; + ($mode:ident, ;) => { 1 }; + ($mode:ident, <<) => { 2 }; + ($mode:ident, <<=) => { 3 }; + ($mode:ident, >>) => { 2 }; + ($mode:ident, >>=) => { 3 }; + ($mode:ident, /) => { 1 }; + ($mode:ident, /=) => { 2 }; + ($mode:ident, *) => { 1 }; + ($mode:ident, *=) => { 2 }; + ($mode:ident, ~) => { 1 }; + (lenient, $tt:tt) => { 0 }; + (strict, $tt:tt) => {{ $crate::custom_punctuation_unexpected!($tt); 0 }}; +} + +// Not public API. +#[doc(hidden)] +#[macro_export] +macro_rules! custom_punctuation_unexpected { + () => {}; +} + +// Not public API. +#[doc(hidden)] +#[macro_export] +macro_rules! stringify_punct { + ($($tt:tt)+) => { + $crate::__private::concat!($($crate::__private::stringify!($tt)),+) + }; +} diff --git a/vendor/syn/src/data.rs b/vendor/syn/src/data.rs new file mode 100644 index 00000000000000..f973004dc63f88 --- /dev/null +++ b/vendor/syn/src/data.rs @@ -0,0 +1,424 @@ +use crate::attr::Attribute; +use crate::expr::{Expr, Index, Member}; +use crate::ident::Ident; +use crate::punctuated::{self, Punctuated}; +use crate::restriction::{FieldMutability, Visibility}; +use crate::token; +use crate::ty::Type; + +ast_struct! { + /// An enum variant. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct Variant { + pub attrs: Vec<Attribute>, + + /// Name of the variant. + pub ident: Ident, + + /// Content stored in the variant. + pub fields: Fields, + + /// Explicit discriminant: `Variant = 1` + pub discriminant: Option<(Token![=], Expr)>, + } +} + +ast_enum_of_structs! { + /// Data stored within an enum variant or struct. + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub enum Fields { + /// Named fields of a struct or struct variant such as `Point { x: f64, + /// y: f64 }`. + Named(FieldsNamed), + + /// Unnamed fields of a tuple struct or tuple variant such as `Some(T)`. + Unnamed(FieldsUnnamed), + + /// Unit struct or unit variant such as `None`. + Unit, + } +} + +ast_struct! { + /// Named fields of a struct or struct variant such as `Point { x: f64, + /// y: f64 }`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct FieldsNamed { + pub brace_token: token::Brace, + pub named: Punctuated<Field, Token![,]>, + } +} + +ast_struct! { + /// Unnamed fields of a tuple struct or tuple variant such as `Some(T)`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct FieldsUnnamed { + pub paren_token: token::Paren, + pub unnamed: Punctuated<Field, Token![,]>, + } +} + +impl Fields { + /// Get an iterator over the borrowed [`Field`] items in this object. This + /// iterator can be used to iterate over a named or unnamed struct or + /// variant's fields uniformly. + pub fn iter(&self) -> punctuated::Iter<Field> { + match self { + Fields::Unit => crate::punctuated::empty_punctuated_iter(), + Fields::Named(f) => f.named.iter(), + Fields::Unnamed(f) => f.unnamed.iter(), + } + } + + /// Get an iterator over the mutably borrowed [`Field`] items in this + /// object. This iterator can be used to iterate over a named or unnamed + /// struct or variant's fields uniformly. + pub fn iter_mut(&mut self) -> punctuated::IterMut<Field> { + match self { + Fields::Unit => crate::punctuated::empty_punctuated_iter_mut(), + Fields::Named(f) => f.named.iter_mut(), + Fields::Unnamed(f) => f.unnamed.iter_mut(), + } + } + + /// Returns the number of fields. + pub fn len(&self) -> usize { + match self { + Fields::Unit => 0, + Fields::Named(f) => f.named.len(), + Fields::Unnamed(f) => f.unnamed.len(), + } + } + + /// Returns `true` if there are zero fields. + pub fn is_empty(&self) -> bool { + match self { + Fields::Unit => true, + Fields::Named(f) => f.named.is_empty(), + Fields::Unnamed(f) => f.unnamed.is_empty(), + } + } + + return_impl_trait! { + /// Get an iterator over the fields of a struct or variant as [`Member`]s. + /// This iterator can be used to iterate over a named or unnamed struct or + /// variant's fields uniformly. + /// + /// # Example + /// + /// The following is a simplistic [`Clone`] derive for structs. (A more + /// complete implementation would additionally want to infer trait bounds on + /// the generic type parameters.) + /// + /// ``` + /// # use quote::quote; + /// # + /// fn derive_clone(input: &syn::ItemStruct) -> proc_macro2::TokenStream { + /// let ident = &input.ident; + /// let members = input.fields.members(); + /// let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); + /// quote! { + /// impl #impl_generics Clone for #ident #ty_generics #where_clause { + /// fn clone(&self) -> Self { + /// Self { + /// #(#members: self.#members.clone()),* + /// } + /// } + /// } + /// } + /// } + /// ``` + /// + /// For structs with named fields, it produces an expression like `Self { a: + /// self.a.clone() }`. For structs with unnamed fields, `Self { 0: + /// self.0.clone() }`. And for unit structs, `Self {}`. + pub fn members(&self) -> impl Iterator<Item = Member> + Clone + '_ [Members] { + Members { + fields: self.iter(), + index: 0, + } + } + } +} + +impl IntoIterator for Fields { + type Item = Field; + type IntoIter = punctuated::IntoIter<Field>; + + fn into_iter(self) -> Self::IntoIter { + match self { + Fields::Unit => Punctuated::<Field, ()>::new().into_iter(), + Fields::Named(f) => f.named.into_iter(), + Fields::Unnamed(f) => f.unnamed.into_iter(), + } + } +} + +impl<'a> IntoIterator for &'a Fields { + type Item = &'a Field; + type IntoIter = punctuated::Iter<'a, Field>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a> IntoIterator for &'a mut Fields { + type Item = &'a mut Field; + type IntoIter = punctuated::IterMut<'a, Field>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +ast_struct! { + /// A field of a struct or enum variant. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct Field { + pub attrs: Vec<Attribute>, + + pub vis: Visibility, + + pub mutability: FieldMutability, + + /// Name of the field, if any. + /// + /// Fields of tuple structs have no names. + pub ident: Option<Ident>, + + pub colon_token: Option<Token![:]>, + + pub ty: Type, + } +} + +pub struct Members<'a> { + fields: punctuated::Iter<'a, Field>, + index: u32, +} + +impl<'a> Iterator for Members<'a> { + type Item = Member; + + fn next(&mut self) -> Option<Self::Item> { + let field = self.fields.next()?; + let member = match &field.ident { + Some(ident) => Member::Named(ident.clone()), + None => { + #[cfg(all(feature = "parsing", feature = "printing"))] + let span = crate::spanned::Spanned::span(&field.ty); + #[cfg(not(all(feature = "parsing", feature = "printing")))] + let span = proc_macro2::Span::call_site(); + Member::Unnamed(Index { + index: self.index, + span, + }) + } + }; + self.index += 1; + Some(member) + } +} + +impl<'a> Clone for Members<'a> { + fn clone(&self) -> Self { + Members { + fields: self.fields.clone(), + index: self.index, + } + } +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::attr::Attribute; + use crate::data::{Field, Fields, FieldsNamed, FieldsUnnamed, Variant}; + use crate::error::Result; + use crate::expr::Expr; + use crate::ext::IdentExt as _; + use crate::ident::Ident; + #[cfg(not(feature = "full"))] + use crate::parse::discouraged::Speculative as _; + use crate::parse::{Parse, ParseStream}; + use crate::restriction::{FieldMutability, Visibility}; + #[cfg(not(feature = "full"))] + use crate::scan_expr::scan_expr; + use crate::token; + use crate::ty::Type; + use crate::verbatim; + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Variant { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let _visibility: Visibility = input.parse()?; + let ident: Ident = input.parse()?; + let fields = if input.peek(token::Brace) { + Fields::Named(input.parse()?) + } else if input.peek(token::Paren) { + Fields::Unnamed(input.parse()?) + } else { + Fields::Unit + }; + let discriminant = if input.peek(Token![=]) { + let eq_token: Token![=] = input.parse()?; + #[cfg(feature = "full")] + let discriminant: Expr = input.parse()?; + #[cfg(not(feature = "full"))] + let discriminant = { + let begin = input.fork(); + let ahead = input.fork(); + let mut discriminant: Result<Expr> = ahead.parse(); + if discriminant.is_ok() { + input.advance_to(&ahead); + } else if scan_expr(input).is_ok() { + discriminant = Ok(Expr::Verbatim(verbatim::between(&begin, input))); + } + discriminant? + }; + Some((eq_token, discriminant)) + } else { + None + }; + Ok(Variant { + attrs, + ident, + fields, + discriminant, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for FieldsNamed { + fn parse(input: ParseStream) -> Result<Self> { + let content; + Ok(FieldsNamed { + brace_token: braced!(content in input), + named: content.parse_terminated(Field::parse_named, Token![,])?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for FieldsUnnamed { + fn parse(input: ParseStream) -> Result<Self> { + let content; + Ok(FieldsUnnamed { + paren_token: parenthesized!(content in input), + unnamed: content.parse_terminated(Field::parse_unnamed, Token![,])?, + }) + } + } + + impl Field { + /// Parses a named (braced struct) field. + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_named(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + + let unnamed_field = cfg!(feature = "full") && input.peek(Token![_]); + let ident = if unnamed_field { + input.call(Ident::parse_any) + } else { + input.parse() + }?; + + let colon_token: Token![:] = input.parse()?; + + let ty: Type = if unnamed_field + && (input.peek(Token![struct]) + || input.peek(Token![union]) && input.peek2(token::Brace)) + { + let begin = input.fork(); + input.call(Ident::parse_any)?; + input.parse::<FieldsNamed>()?; + Type::Verbatim(verbatim::between(&begin, input)) + } else { + input.parse()? + }; + + Ok(Field { + attrs, + vis, + mutability: FieldMutability::None, + ident: Some(ident), + colon_token: Some(colon_token), + ty, + }) + } + + /// Parses an unnamed (tuple struct) field. + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_unnamed(input: ParseStream) -> Result<Self> { + Ok(Field { + attrs: input.call(Attribute::parse_outer)?, + vis: input.parse()?, + mutability: FieldMutability::None, + ident: None, + colon_token: None, + ty: input.parse()?, + }) + } + } +} + +#[cfg(feature = "printing")] +mod printing { + use crate::data::{Field, FieldsNamed, FieldsUnnamed, Variant}; + use crate::print::TokensOrDefault; + use proc_macro2::TokenStream; + use quote::{ToTokens, TokenStreamExt as _}; + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Variant { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(&self.attrs); + self.ident.to_tokens(tokens); + self.fields.to_tokens(tokens); + if let Some((eq_token, disc)) = &self.discriminant { + eq_token.to_tokens(tokens); + disc.to_tokens(tokens); + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for FieldsNamed { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.brace_token.surround(tokens, |tokens| { + self.named.to_tokens(tokens); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for FieldsUnnamed { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.paren_token.surround(tokens, |tokens| { + self.unnamed.to_tokens(tokens); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Field { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(&self.attrs); + self.vis.to_tokens(tokens); + if let Some(ident) = &self.ident { + ident.to_tokens(tokens); + TokensOrDefault(&self.colon_token).to_tokens(tokens); + } + self.ty.to_tokens(tokens); + } + } +} diff --git a/vendor/syn/src/derive.rs b/vendor/syn/src/derive.rs new file mode 100644 index 00000000000000..3443ecfc05cba6 --- /dev/null +++ b/vendor/syn/src/derive.rs @@ -0,0 +1,259 @@ +use crate::attr::Attribute; +use crate::data::{Fields, FieldsNamed, Variant}; +use crate::generics::Generics; +use crate::ident::Ident; +use crate::punctuated::Punctuated; +use crate::restriction::Visibility; +use crate::token; + +ast_struct! { + /// Data structure sent to a `proc_macro_derive` macro. + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + pub struct DeriveInput { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub ident: Ident, + pub generics: Generics, + pub data: Data, + } +} + +ast_enum! { + /// The storage of a struct, enum or union data structure. + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + pub enum Data { + Struct(DataStruct), + Enum(DataEnum), + Union(DataUnion), + } +} + +ast_struct! { + /// A struct input to a `proc_macro_derive` macro. + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + pub struct DataStruct { + pub struct_token: Token![struct], + pub fields: Fields, + pub semi_token: Option<Token![;]>, + } +} + +ast_struct! { + /// An enum input to a `proc_macro_derive` macro. + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + pub struct DataEnum { + pub enum_token: Token![enum], + pub brace_token: token::Brace, + pub variants: Punctuated<Variant, Token![,]>, + } +} + +ast_struct! { + /// An untagged union input to a `proc_macro_derive` macro. + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + pub struct DataUnion { + pub union_token: Token![union], + pub fields: FieldsNamed, + } +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::attr::Attribute; + use crate::data::{Fields, FieldsNamed, Variant}; + use crate::derive::{Data, DataEnum, DataStruct, DataUnion, DeriveInput}; + use crate::error::Result; + use crate::generics::{Generics, WhereClause}; + use crate::ident::Ident; + use crate::parse::{Parse, ParseStream}; + use crate::punctuated::Punctuated; + use crate::restriction::Visibility; + use crate::token; + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for DeriveInput { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let vis = input.parse::<Visibility>()?; + + let lookahead = input.lookahead1(); + if lookahead.peek(Token![struct]) { + let struct_token = input.parse::<Token![struct]>()?; + let ident = input.parse::<Ident>()?; + let generics = input.parse::<Generics>()?; + let (where_clause, fields, semi) = data_struct(input)?; + Ok(DeriveInput { + attrs, + vis, + ident, + generics: Generics { + where_clause, + ..generics + }, + data: Data::Struct(DataStruct { + struct_token, + fields, + semi_token: semi, + }), + }) + } else if lookahead.peek(Token![enum]) { + let enum_token = input.parse::<Token![enum]>()?; + let ident = input.parse::<Ident>()?; + let generics = input.parse::<Generics>()?; + let (where_clause, brace, variants) = data_enum(input)?; + Ok(DeriveInput { + attrs, + vis, + ident, + generics: Generics { + where_clause, + ..generics + }, + data: Data::Enum(DataEnum { + enum_token, + brace_token: brace, + variants, + }), + }) + } else if lookahead.peek(Token![union]) { + let union_token = input.parse::<Token![union]>()?; + let ident = input.parse::<Ident>()?; + let generics = input.parse::<Generics>()?; + let (where_clause, fields) = data_union(input)?; + Ok(DeriveInput { + attrs, + vis, + ident, + generics: Generics { + where_clause, + ..generics + }, + data: Data::Union(DataUnion { + union_token, + fields, + }), + }) + } else { + Err(lookahead.error()) + } + } + } + + pub(crate) fn data_struct( + input: ParseStream, + ) -> Result<(Option<WhereClause>, Fields, Option<Token![;]>)> { + let mut lookahead = input.lookahead1(); + let mut where_clause = None; + if lookahead.peek(Token![where]) { + where_clause = Some(input.parse()?); + lookahead = input.lookahead1(); + } + + if where_clause.is_none() && lookahead.peek(token::Paren) { + let fields = input.parse()?; + + lookahead = input.lookahead1(); + if lookahead.peek(Token![where]) { + where_clause = Some(input.parse()?); + lookahead = input.lookahead1(); + } + + if lookahead.peek(Token![;]) { + let semi = input.parse()?; + Ok((where_clause, Fields::Unnamed(fields), Some(semi))) + } else { + Err(lookahead.error()) + } + } else if lookahead.peek(token::Brace) { + let fields = input.parse()?; + Ok((where_clause, Fields::Named(fields), None)) + } else if lookahead.peek(Token![;]) { + let semi = input.parse()?; + Ok((where_clause, Fields::Unit, Some(semi))) + } else { + Err(lookahead.error()) + } + } + + pub(crate) fn data_enum( + input: ParseStream, + ) -> Result<( + Option<WhereClause>, + token::Brace, + Punctuated<Variant, Token![,]>, + )> { + let where_clause = input.parse()?; + + let content; + let brace = braced!(content in input); + let variants = content.parse_terminated(Variant::parse, Token![,])?; + + Ok((where_clause, brace, variants)) + } + + pub(crate) fn data_union(input: ParseStream) -> Result<(Option<WhereClause>, FieldsNamed)> { + let where_clause = input.parse()?; + let fields = input.parse()?; + Ok((where_clause, fields)) + } +} + +#[cfg(feature = "printing")] +mod printing { + use crate::attr::FilterAttrs; + use crate::data::Fields; + use crate::derive::{Data, DeriveInput}; + use crate::print::TokensOrDefault; + use proc_macro2::TokenStream; + use quote::ToTokens; + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for DeriveInput { + fn to_tokens(&self, tokens: &mut TokenStream) { + for attr in self.attrs.outer() { + attr.to_tokens(tokens); + } + self.vis.to_tokens(tokens); + match &self.data { + Data::Struct(d) => d.struct_token.to_tokens(tokens), + Data::Enum(d) => d.enum_token.to_tokens(tokens), + Data::Union(d) => d.union_token.to_tokens(tokens), + } + self.ident.to_tokens(tokens); + self.generics.to_tokens(tokens); + match &self.data { + Data::Struct(data) => match &data.fields { + Fields::Named(fields) => { + self.generics.where_clause.to_tokens(tokens); + fields.to_tokens(tokens); + } + Fields::Unnamed(fields) => { + fields.to_tokens(tokens); + self.generics.where_clause.to_tokens(tokens); + TokensOrDefault(&data.semi_token).to_tokens(tokens); + } + Fields::Unit => { + self.generics.where_clause.to_tokens(tokens); + TokensOrDefault(&data.semi_token).to_tokens(tokens); + } + }, + Data::Enum(data) => { + self.generics.where_clause.to_tokens(tokens); + data.brace_token.surround(tokens, |tokens| { + data.variants.to_tokens(tokens); + }); + } + Data::Union(data) => { + self.generics.where_clause.to_tokens(tokens); + data.fields.to_tokens(tokens); + } + } + } + } +} diff --git a/vendor/syn/src/discouraged.rs b/vendor/syn/src/discouraged.rs new file mode 100644 index 00000000000000..c8d6bfe89a14ad --- /dev/null +++ b/vendor/syn/src/discouraged.rs @@ -0,0 +1,225 @@ +//! Extensions to the parsing API with niche applicability. + +use crate::buffer::Cursor; +use crate::error::Result; +use crate::parse::{inner_unexpected, ParseBuffer, Unexpected}; +use proc_macro2::extra::DelimSpan; +use proc_macro2::Delimiter; +use std::cell::Cell; +use std::mem; +use std::rc::Rc; + +/// Extensions to the `ParseStream` API to support speculative parsing. +pub trait Speculative { + /// Advance this parse stream to the position of a forked parse stream. + /// + /// This is the opposite operation to [`ParseStream::fork`]. You can fork a + /// parse stream, perform some speculative parsing, then join the original + /// stream to the fork to "commit" the parsing from the fork to the main + /// stream. + /// + /// If you can avoid doing this, you should, as it limits the ability to + /// generate useful errors. That said, it is often the only way to parse + /// syntax of the form `A* B*` for arbitrary syntax `A` and `B`. The problem + /// is that when the fork fails to parse an `A`, it's impossible to tell + /// whether that was because of a syntax error and the user meant to provide + /// an `A`, or that the `A`s are finished and it's time to start parsing + /// `B`s. Use with care. + /// + /// Also note that if `A` is a subset of `B`, `A* B*` can be parsed by + /// parsing `B*` and removing the leading members of `A` from the + /// repetition, bypassing the need to involve the downsides associated with + /// speculative parsing. + /// + /// [`ParseStream::fork`]: ParseBuffer::fork + /// + /// # Example + /// + /// There has been chatter about the possibility of making the colons in the + /// turbofish syntax like `path::to::<T>` no longer required by accepting + /// `path::to<T>` in expression position. Specifically, according to [RFC + /// 2544], [`PathSegment`] parsing should always try to consume a following + /// `<` token as the start of generic arguments, and reset to the `<` if + /// that fails (e.g. the token is acting as a less-than operator). + /// + /// This is the exact kind of parsing behavior which requires the "fork, + /// try, commit" behavior that [`ParseStream::fork`] discourages. With + /// `advance_to`, we can avoid having to parse the speculatively parsed + /// content a second time. + /// + /// This change in behavior can be implemented in syn by replacing just the + /// `Parse` implementation for `PathSegment`: + /// + /// ``` + /// # use syn::ext::IdentExt; + /// use syn::parse::discouraged::Speculative; + /// # use syn::parse::{Parse, ParseStream}; + /// # use syn::{Ident, PathArguments, Result, Token}; + /// + /// pub struct PathSegment { + /// pub ident: Ident, + /// pub arguments: PathArguments, + /// } + /// # + /// # impl<T> From<T> for PathSegment + /// # where + /// # T: Into<Ident>, + /// # { + /// # fn from(ident: T) -> Self { + /// # PathSegment { + /// # ident: ident.into(), + /// # arguments: PathArguments::None, + /// # } + /// # } + /// # } + /// + /// impl Parse for PathSegment { + /// fn parse(input: ParseStream) -> Result<Self> { + /// if input.peek(Token![super]) + /// || input.peek(Token![self]) + /// || input.peek(Token![Self]) + /// || input.peek(Token![crate]) + /// { + /// let ident = input.call(Ident::parse_any)?; + /// return Ok(PathSegment::from(ident)); + /// } + /// + /// let ident = input.parse()?; + /// if input.peek(Token![::]) && input.peek3(Token![<]) { + /// return Ok(PathSegment { + /// ident, + /// arguments: PathArguments::AngleBracketed(input.parse()?), + /// }); + /// } + /// if input.peek(Token![<]) && !input.peek(Token![<=]) { + /// let fork = input.fork(); + /// if let Ok(arguments) = fork.parse() { + /// input.advance_to(&fork); + /// return Ok(PathSegment { + /// ident, + /// arguments: PathArguments::AngleBracketed(arguments), + /// }); + /// } + /// } + /// Ok(PathSegment::from(ident)) + /// } + /// } + /// + /// # syn::parse_str::<PathSegment>("a<b,c>").unwrap(); + /// ``` + /// + /// # Drawbacks + /// + /// The main drawback of this style of speculative parsing is in error + /// presentation. Even if the lookahead is the "correct" parse, the error + /// that is shown is that of the "fallback" parse. To use the same example + /// as the turbofish above, take the following unfinished "turbofish": + /// + /// ```text + /// let _ = f<&'a fn(), for<'a> serde::>(); + /// ``` + /// + /// If this is parsed as generic arguments, we can provide the error message + /// + /// ```text + /// error: expected identifier + /// --> src.rs:L:C + /// | + /// L | let _ = f<&'a fn(), for<'a> serde::>(); + /// | ^ + /// ``` + /// + /// but if parsed using the above speculative parsing, it falls back to + /// assuming that the `<` is a less-than when it fails to parse the generic + /// arguments, and tries to interpret the `&'a` as the start of a labelled + /// loop, resulting in the much less helpful error + /// + /// ```text + /// error: expected `:` + /// --> src.rs:L:C + /// | + /// L | let _ = f<&'a fn(), for<'a> serde::>(); + /// | ^^ + /// ``` + /// + /// This can be mitigated with various heuristics (two examples: show both + /// forks' parse errors, or show the one that consumed more tokens), but + /// when you can control the grammar, sticking to something that can be + /// parsed LL(3) and without the LL(*) speculative parsing this makes + /// possible, displaying reasonable errors becomes much more simple. + /// + /// [RFC 2544]: https://github.com/rust-lang/rfcs/pull/2544 + /// [`PathSegment`]: crate::PathSegment + /// + /// # Performance + /// + /// This method performs a cheap fixed amount of work that does not depend + /// on how far apart the two streams are positioned. + /// + /// # Panics + /// + /// The forked stream in the argument of `advance_to` must have been + /// obtained by forking `self`. Attempting to advance to any other stream + /// will cause a panic. + fn advance_to(&self, fork: &Self); +} + +impl<'a> Speculative for ParseBuffer<'a> { + fn advance_to(&self, fork: &Self) { + if !crate::buffer::same_scope(self.cursor(), fork.cursor()) { + panic!("fork was not derived from the advancing parse stream"); + } + + let (self_unexp, self_sp) = inner_unexpected(self); + let (fork_unexp, fork_sp) = inner_unexpected(fork); + if !Rc::ptr_eq(&self_unexp, &fork_unexp) { + match (fork_sp, self_sp) { + // Unexpected set on the fork, but not on `self`, copy it over. + (Some((span, delimiter)), None) => { + self_unexp.set(Unexpected::Some(span, delimiter)); + } + // Unexpected unset. Use chain to propagate errors from fork. + (None, None) => { + fork_unexp.set(Unexpected::Chain(self_unexp)); + + // Ensure toplevel 'unexpected' tokens from the fork don't + // propagate up the chain by replacing the root `unexpected` + // pointer, only 'unexpected' tokens from existing group + // parsers should propagate. + fork.unexpected + .set(Some(Rc::new(Cell::new(Unexpected::None)))); + } + // Unexpected has been set on `self`. No changes needed. + (_, Some(_)) => {} + } + } + + // See comment on `cell` in the struct definition. + self.cell + .set(unsafe { mem::transmute::<Cursor, Cursor<'static>>(fork.cursor()) }); + } +} + +/// Extensions to the `ParseStream` API to support manipulating invisible +/// delimiters the same as if they were visible. +pub trait AnyDelimiter { + /// Returns the delimiter, the span of the delimiter token, and the nested + /// contents for further parsing. + fn parse_any_delimiter(&self) -> Result<(Delimiter, DelimSpan, ParseBuffer)>; +} + +impl<'a> AnyDelimiter for ParseBuffer<'a> { + fn parse_any_delimiter(&self) -> Result<(Delimiter, DelimSpan, ParseBuffer)> { + self.step(|cursor| { + if let Some((content, delimiter, span, rest)) = cursor.any_group() { + let scope = span.close(); + let nested = crate::parse::advance_step_cursor(cursor, content); + let unexpected = crate::parse::get_unexpected(self); + let content = crate::parse::new_parse_buffer(scope, nested, unexpected); + Ok(((delimiter, span, content), rest)) + } else { + Err(cursor.error("expected any delimiter")) + } + }) + } +} diff --git a/vendor/syn/src/drops.rs b/vendor/syn/src/drops.rs new file mode 100644 index 00000000000000..c54308f02c13d3 --- /dev/null +++ b/vendor/syn/src/drops.rs @@ -0,0 +1,58 @@ +use std::iter; +use std::mem::ManuallyDrop; +use std::ops::{Deref, DerefMut}; +use std::option; +use std::slice; + +#[repr(transparent)] +pub(crate) struct NoDrop<T: ?Sized>(ManuallyDrop<T>); + +impl<T> NoDrop<T> { + pub(crate) fn new(value: T) -> Self + where + T: TrivialDrop, + { + NoDrop(ManuallyDrop::new(value)) + } +} + +impl<T: ?Sized> Deref for NoDrop<T> { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<T: ?Sized> DerefMut for NoDrop<T> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +pub(crate) trait TrivialDrop {} + +impl<T> TrivialDrop for iter::Empty<T> {} +impl<T> TrivialDrop for slice::Iter<'_, T> {} +impl<T> TrivialDrop for slice::IterMut<'_, T> {} +impl<T> TrivialDrop for option::IntoIter<&T> {} +impl<T> TrivialDrop for option::IntoIter<&mut T> {} + +#[test] +fn test_needs_drop() { + use std::mem::needs_drop; + + struct NeedsDrop; + + impl Drop for NeedsDrop { + fn drop(&mut self) {} + } + + assert!(needs_drop::<NeedsDrop>()); + + // Test each of the types with a handwritten TrivialDrop impl above. + assert!(!needs_drop::<iter::Empty<NeedsDrop>>()); + assert!(!needs_drop::<slice::Iter<NeedsDrop>>()); + assert!(!needs_drop::<slice::IterMut<NeedsDrop>>()); + assert!(!needs_drop::<option::IntoIter<&NeedsDrop>>()); + assert!(!needs_drop::<option::IntoIter<&mut NeedsDrop>>()); +} diff --git a/vendor/syn/src/error.rs b/vendor/syn/src/error.rs new file mode 100644 index 00000000000000..f89278c26c40c2 --- /dev/null +++ b/vendor/syn/src/error.rs @@ -0,0 +1,468 @@ +#[cfg(feature = "parsing")] +use crate::buffer::Cursor; +use crate::ext::{PunctExt as _, TokenStreamExt as _}; +use crate::thread::ThreadBound; +use proc_macro2::{ + Delimiter, Group, Ident, LexError, Literal, Punct, Spacing, Span, TokenStream, TokenTree, +}; +#[cfg(feature = "printing")] +use quote::ToTokens; +use std::fmt::{self, Debug, Display}; +use std::slice; +use std::vec; + +/// The result of a Syn parser. +pub type Result<T> = std::result::Result<T, Error>; + +/// Error returned when a Syn parser cannot parse the input tokens. +/// +/// # Error reporting in proc macros +/// +/// The correct way to report errors back to the compiler from a procedural +/// macro is by emitting an appropriately spanned invocation of +/// [`compile_error!`] in the generated code. This produces a better diagnostic +/// message than simply panicking the macro. +/// +/// [`compile_error!`]: std::compile_error! +/// +/// When parsing macro input, the [`parse_macro_input!`] macro handles the +/// conversion to `compile_error!` automatically. +/// +/// [`parse_macro_input!`]: crate::parse_macro_input! +/// +/// ``` +/// # extern crate proc_macro; +/// # +/// use proc_macro::TokenStream; +/// use syn::parse::{Parse, ParseStream, Result}; +/// use syn::{parse_macro_input, ItemFn}; +/// +/// # const IGNORE: &str = stringify! { +/// #[proc_macro_attribute] +/// # }; +/// pub fn my_attr(args: TokenStream, input: TokenStream) -> TokenStream { +/// let args = parse_macro_input!(args as MyAttrArgs); +/// let input = parse_macro_input!(input as ItemFn); +/// +/// /* ... */ +/// # TokenStream::new() +/// } +/// +/// struct MyAttrArgs { +/// # _k: [(); { stringify! { +/// ... +/// # }; 0 }] +/// } +/// +/// impl Parse for MyAttrArgs { +/// fn parse(input: ParseStream) -> Result<Self> { +/// # stringify! { +/// ... +/// # }; +/// # unimplemented!() +/// } +/// } +/// ``` +/// +/// For errors that arise later than the initial parsing stage, the +/// [`.to_compile_error()`] or [`.into_compile_error()`] methods can be used to +/// perform an explicit conversion to `compile_error!`. +/// +/// [`.to_compile_error()`]: Error::to_compile_error +/// [`.into_compile_error()`]: Error::into_compile_error +/// +/// ``` +/// # extern crate proc_macro; +/// # +/// # use proc_macro::TokenStream; +/// # use syn::{parse_macro_input, DeriveInput}; +/// # +/// # const IGNORE: &str = stringify! { +/// #[proc_macro_derive(MyDerive)] +/// # }; +/// pub fn my_derive(input: TokenStream) -> TokenStream { +/// let input = parse_macro_input!(input as DeriveInput); +/// +/// // fn(DeriveInput) -> syn::Result<proc_macro2::TokenStream> +/// expand::my_derive(input) +/// .unwrap_or_else(syn::Error::into_compile_error) +/// .into() +/// } +/// # +/// # mod expand { +/// # use proc_macro2::TokenStream; +/// # use syn::{DeriveInput, Result}; +/// # +/// # pub fn my_derive(input: DeriveInput) -> Result<TokenStream> { +/// # unimplemented!() +/// # } +/// # } +/// ``` +pub struct Error { + messages: Vec<ErrorMessage>, +} + +struct ErrorMessage { + // Span is implemented as an index into a thread-local interner to keep the + // size small. It is not safe to access from a different thread. We want + // errors to be Send and Sync to play nicely with ecosystem crates for error + // handling, so pin the span we're given to its original thread and assume + // it is Span::call_site if accessed from any other thread. + span: ThreadBound<SpanRange>, + message: String, +} + +// Cannot use std::ops::Range<Span> because that does not implement Copy, +// whereas ThreadBound<T> requires a Copy impl as a way to ensure no Drop impls +// are involved. +struct SpanRange { + start: Span, + end: Span, +} + +#[cfg(test)] +struct _Test +where + Error: Send + Sync; + +impl Error { + /// Usually the [`ParseStream::error`] method will be used instead, which + /// automatically uses the correct span from the current position of the + /// parse stream. + /// + /// Use `Error::new` when the error needs to be triggered on some span other + /// than where the parse stream is currently positioned. + /// + /// [`ParseStream::error`]: crate::parse::ParseBuffer::error + /// + /// # Example + /// + /// ``` + /// use syn::{Error, Ident, LitStr, Result, Token}; + /// use syn::parse::ParseStream; + /// + /// // Parses input that looks like `name = "string"` where the key must be + /// // the identifier `name` and the value may be any string literal. + /// // Returns the string literal. + /// fn parse_name(input: ParseStream) -> Result<LitStr> { + /// let name_token: Ident = input.parse()?; + /// if name_token != "name" { + /// // Trigger an error not on the current position of the stream, + /// // but on the position of the unexpected identifier. + /// return Err(Error::new(name_token.span(), "expected `name`")); + /// } + /// input.parse::<Token![=]>()?; + /// let s: LitStr = input.parse()?; + /// Ok(s) + /// } + /// ``` + pub fn new<T: Display>(span: Span, message: T) -> Self { + return new(span, message.to_string()); + + fn new(span: Span, message: String) -> Error { + Error { + messages: vec![ErrorMessage { + span: ThreadBound::new(SpanRange { + start: span, + end: span, + }), + message, + }], + } + } + } + + /// Creates an error with the specified message spanning the given syntax + /// tree node. + /// + /// Unlike the `Error::new` constructor, this constructor takes an argument + /// `tokens` which is a syntax tree node. This allows the resulting `Error` + /// to attempt to span all tokens inside of `tokens`. While you would + /// typically be able to use the `Spanned` trait with the above `Error::new` + /// constructor, implementation limitations today mean that + /// `Error::new_spanned` may provide a higher-quality error message on + /// stable Rust. + /// + /// When in doubt it's recommended to stick to `Error::new` (or + /// `ParseStream::error`)! + #[cfg(feature = "printing")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + pub fn new_spanned<T: ToTokens, U: Display>(tokens: T, message: U) -> Self { + return new_spanned(tokens.into_token_stream(), message.to_string()); + + fn new_spanned(tokens: TokenStream, message: String) -> Error { + let mut iter = tokens.into_iter(); + let start = iter.next().map_or_else(Span::call_site, |t| t.span()); + let end = iter.last().map_or(start, |t| t.span()); + Error { + messages: vec![ErrorMessage { + span: ThreadBound::new(SpanRange { start, end }), + message, + }], + } + } + } + + /// The source location of the error. + /// + /// Spans are not thread-safe so this function returns `Span::call_site()` + /// if called from a different thread than the one on which the `Error` was + /// originally created. + pub fn span(&self) -> Span { + let SpanRange { start, end } = match self.messages[0].span.get() { + Some(span) => *span, + None => return Span::call_site(), + }; + start.join(end).unwrap_or(start) + } + + /// Render the error as an invocation of [`compile_error!`]. + /// + /// The [`parse_macro_input!`] macro provides a convenient way to invoke + /// this method correctly in a procedural macro. + /// + /// [`compile_error!`]: std::compile_error! + /// [`parse_macro_input!`]: crate::parse_macro_input! + pub fn to_compile_error(&self) -> TokenStream { + let mut tokens = TokenStream::new(); + for msg in &self.messages { + ErrorMessage::to_compile_error(msg, &mut tokens); + } + tokens + } + + /// Render the error as an invocation of [`compile_error!`]. + /// + /// [`compile_error!`]: std::compile_error! + /// + /// # Example + /// + /// ``` + /// # extern crate proc_macro; + /// # + /// use proc_macro::TokenStream; + /// use syn::{parse_macro_input, DeriveInput, Error}; + /// + /// # const _: &str = stringify! { + /// #[proc_macro_derive(MyTrait)] + /// # }; + /// pub fn derive_my_trait(input: TokenStream) -> TokenStream { + /// let input = parse_macro_input!(input as DeriveInput); + /// my_trait::expand(input) + /// .unwrap_or_else(Error::into_compile_error) + /// .into() + /// } + /// + /// mod my_trait { + /// use proc_macro2::TokenStream; + /// use syn::{DeriveInput, Result}; + /// + /// pub(crate) fn expand(input: DeriveInput) -> Result<TokenStream> { + /// /* ... */ + /// # unimplemented!() + /// } + /// } + /// ``` + pub fn into_compile_error(self) -> TokenStream { + self.to_compile_error() + } + + /// Add another error message to self such that when `to_compile_error()` is + /// called, both errors will be emitted together. + pub fn combine(&mut self, another: Error) { + self.messages.extend(another.messages); + } +} + +impl ErrorMessage { + fn to_compile_error(&self, tokens: &mut TokenStream) { + let (start, end) = match self.span.get() { + Some(range) => (range.start, range.end), + None => (Span::call_site(), Span::call_site()), + }; + + // ::core::compile_error!($message) + tokens.append(TokenTree::Punct(Punct::new_spanned( + ':', + Spacing::Joint, + start, + ))); + tokens.append(TokenTree::Punct(Punct::new_spanned( + ':', + Spacing::Alone, + start, + ))); + tokens.append(TokenTree::Ident(Ident::new("core", start))); + tokens.append(TokenTree::Punct(Punct::new_spanned( + ':', + Spacing::Joint, + start, + ))); + tokens.append(TokenTree::Punct(Punct::new_spanned( + ':', + Spacing::Alone, + start, + ))); + tokens.append(TokenTree::Ident(Ident::new("compile_error", start))); + tokens.append(TokenTree::Punct(Punct::new_spanned( + '!', + Spacing::Alone, + start, + ))); + tokens.append(TokenTree::Group({ + let mut group = Group::new( + Delimiter::Brace, + TokenStream::from({ + let mut string = Literal::string(&self.message); + string.set_span(end); + TokenTree::Literal(string) + }), + ); + group.set_span(end); + group + })); + } +} + +#[cfg(feature = "parsing")] +pub(crate) fn new_at<T: Display>(scope: Span, cursor: Cursor, message: T) -> Error { + if cursor.eof() { + Error::new(scope, format!("unexpected end of input, {}", message)) + } else { + let span = crate::buffer::open_span_of_group(cursor); + Error::new(span, message) + } +} + +#[cfg(all(feature = "parsing", any(feature = "full", feature = "derive")))] +pub(crate) fn new2<T: Display>(start: Span, end: Span, message: T) -> Error { + return new2(start, end, message.to_string()); + + fn new2(start: Span, end: Span, message: String) -> Error { + Error { + messages: vec![ErrorMessage { + span: ThreadBound::new(SpanRange { start, end }), + message, + }], + } + } +} + +impl Debug for Error { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + if self.messages.len() == 1 { + formatter + .debug_tuple("Error") + .field(&self.messages[0]) + .finish() + } else { + formatter + .debug_tuple("Error") + .field(&self.messages) + .finish() + } + } +} + +impl Debug for ErrorMessage { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(&self.message, formatter) + } +} + +impl Display for Error { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(&self.messages[0].message) + } +} + +impl Clone for Error { + fn clone(&self) -> Self { + Error { + messages: self.messages.clone(), + } + } +} + +impl Clone for ErrorMessage { + fn clone(&self) -> Self { + ErrorMessage { + span: self.span, + message: self.message.clone(), + } + } +} + +impl Clone for SpanRange { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for SpanRange {} + +impl std::error::Error for Error {} + +impl From<LexError> for Error { + fn from(err: LexError) -> Self { + Error::new(err.span(), err) + } +} + +impl IntoIterator for Error { + type Item = Error; + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter { + messages: self.messages.into_iter(), + } + } +} + +pub struct IntoIter { + messages: vec::IntoIter<ErrorMessage>, +} + +impl Iterator for IntoIter { + type Item = Error; + + fn next(&mut self) -> Option<Self::Item> { + Some(Error { + messages: vec![self.messages.next()?], + }) + } +} + +impl<'a> IntoIterator for &'a Error { + type Item = Error; + type IntoIter = Iter<'a>; + + fn into_iter(self) -> Self::IntoIter { + Iter { + messages: self.messages.iter(), + } + } +} + +pub struct Iter<'a> { + messages: slice::Iter<'a, ErrorMessage>, +} + +impl<'a> Iterator for Iter<'a> { + type Item = Error; + + fn next(&mut self) -> Option<Self::Item> { + Some(Error { + messages: vec![self.messages.next()?.clone()], + }) + } +} + +impl Extend<Error> for Error { + fn extend<T: IntoIterator<Item = Error>>(&mut self, iter: T) { + for err in iter { + self.combine(err); + } + } +} diff --git a/vendor/syn/src/export.rs b/vendor/syn/src/export.rs new file mode 100644 index 00000000000000..b9ea5c747b75a7 --- /dev/null +++ b/vendor/syn/src/export.rs @@ -0,0 +1,73 @@ +#[doc(hidden)] +pub use std::clone::Clone; +#[doc(hidden)] +pub use std::cmp::{Eq, PartialEq}; +#[doc(hidden)] +pub use std::concat; +#[doc(hidden)] +pub use std::default::Default; +#[doc(hidden)] +pub use std::fmt::Debug; +#[doc(hidden)] +pub use std::hash::{Hash, Hasher}; +#[doc(hidden)] +pub use std::marker::Copy; +#[doc(hidden)] +pub use std::option::Option::{None, Some}; +#[doc(hidden)] +pub use std::result::Result::{Err, Ok}; +#[doc(hidden)] +pub use std::stringify; + +#[doc(hidden)] +pub type Formatter<'a> = std::fmt::Formatter<'a>; +#[doc(hidden)] +pub type FmtResult = std::fmt::Result; + +#[doc(hidden)] +pub type bool = std::primitive::bool; +#[doc(hidden)] +pub type str = std::primitive::str; + +#[cfg(feature = "printing")] +#[doc(hidden)] +pub use quote; + +#[doc(hidden)] +pub type Span = proc_macro2::Span; +#[doc(hidden)] +pub type TokenStream2 = proc_macro2::TokenStream; + +#[cfg(feature = "parsing")] +#[doc(hidden)] +pub use crate::group::{parse_braces, parse_brackets, parse_parens}; + +#[doc(hidden)] +pub use crate::span::IntoSpans; + +#[cfg(all(feature = "parsing", feature = "printing"))] +#[doc(hidden)] +pub use crate::parse_quote::parse as parse_quote; + +#[cfg(feature = "parsing")] +#[doc(hidden)] +pub use crate::token::parsing::{peek_punct, punct as parse_punct}; + +#[cfg(feature = "printing")] +#[doc(hidden)] +pub use crate::token::printing::punct as print_punct; + +#[cfg(feature = "parsing")] +#[doc(hidden)] +pub use crate::token::private::CustomToken; + +#[cfg(feature = "proc-macro")] +#[doc(hidden)] +pub type TokenStream = proc_macro::TokenStream; + +#[cfg(feature = "printing")] +#[doc(hidden)] +pub use quote::{ToTokens, TokenStreamExt}; + +#[doc(hidden)] +pub struct private(pub(crate) ()); diff --git a/vendor/syn/src/expr.rs b/vendor/syn/src/expr.rs new file mode 100644 index 00000000000000..b1b16465fcdd91 --- /dev/null +++ b/vendor/syn/src/expr.rs @@ -0,0 +1,4173 @@ +use crate::attr::Attribute; +#[cfg(all(feature = "parsing", feature = "full"))] +use crate::error::Result; +#[cfg(feature = "parsing")] +use crate::ext::IdentExt as _; +#[cfg(feature = "full")] +use crate::generics::BoundLifetimes; +use crate::ident::Ident; +#[cfg(any(feature = "parsing", feature = "full"))] +use crate::lifetime::Lifetime; +use crate::lit::Lit; +use crate::mac::Macro; +use crate::op::{BinOp, UnOp}; +#[cfg(feature = "parsing")] +use crate::parse::ParseStream; +#[cfg(feature = "full")] +use crate::pat::Pat; +use crate::path::{AngleBracketedGenericArguments, Path, QSelf}; +use crate::punctuated::Punctuated; +#[cfg(feature = "full")] +use crate::stmt::Block; +use crate::token; +#[cfg(feature = "full")] +use crate::ty::ReturnType; +use crate::ty::Type; +use proc_macro2::{Span, TokenStream}; +#[cfg(feature = "printing")] +use quote::IdentFragment; +#[cfg(feature = "printing")] +use std::fmt::{self, Display}; +use std::hash::{Hash, Hasher}; +#[cfg(all(feature = "parsing", feature = "full"))] +use std::mem; + +ast_enum_of_structs! { + /// A Rust expression. + /// + /// *This type is available only if Syn is built with the `"derive"` or `"full"` + /// feature, but most of the variants are not available unless "full" is enabled.* + /// + /// # Syntax tree enums + /// + /// This type is a syntax tree enum. In Syn this and other syntax tree enums + /// are designed to be traversed using the following rebinding idiom. + /// + /// ``` + /// # use syn::Expr; + /// # + /// # fn example(expr: Expr) { + /// # const IGNORE: &str = stringify! { + /// let expr: Expr = /* ... */; + /// # }; + /// match expr { + /// Expr::MethodCall(expr) => { + /// /* ... */ + /// } + /// Expr::Cast(expr) => { + /// /* ... */ + /// } + /// Expr::If(expr) => { + /// /* ... */ + /// } + /// + /// /* ... */ + /// # _ => {} + /// # } + /// # } + /// ``` + /// + /// We begin with a variable `expr` of type `Expr` that has no fields + /// (because it is an enum), and by matching on it and rebinding a variable + /// with the same name `expr` we effectively imbue our variable with all of + /// the data fields provided by the variant that it turned out to be. So for + /// example above if we ended up in the `MethodCall` case then we get to use + /// `expr.receiver`, `expr.args` etc; if we ended up in the `If` case we get + /// to use `expr.cond`, `expr.then_branch`, `expr.else_branch`. + /// + /// This approach avoids repeating the variant names twice on every line. + /// + /// ``` + /// # use syn::{Expr, ExprMethodCall}; + /// # + /// # fn example(expr: Expr) { + /// // Repetitive; recommend not doing this. + /// match expr { + /// Expr::MethodCall(ExprMethodCall { method, args, .. }) => { + /// # } + /// # _ => {} + /// # } + /// # } + /// ``` + /// + /// In general, the name to which a syntax tree enum variant is bound should + /// be a suitable name for the complete syntax tree enum type. + /// + /// ``` + /// # use syn::{Expr, ExprField}; + /// # + /// # fn example(discriminant: ExprField) { + /// // Binding is called `base` which is the name I would use if I were + /// // assigning `*discriminant.base` without an `if let`. + /// if let Expr::Tuple(base) = *discriminant.base { + /// # } + /// # } + /// ``` + /// + /// A sign that you may not be choosing the right variable names is if you + /// see names getting repeated in your code, like accessing + /// `receiver.receiver` or `pat.pat` or `cond.cond`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + #[non_exhaustive] + pub enum Expr { + /// A slice literal expression: `[a, b, c, d]`. + Array(ExprArray), + + /// An assignment expression: `a = compute()`. + Assign(ExprAssign), + + /// An async block: `async { ... }`. + Async(ExprAsync), + + /// An await expression: `fut.await`. + Await(ExprAwait), + + /// A binary operation: `a + b`, `a += b`. + Binary(ExprBinary), + + /// A blocked scope: `{ ... }`. + Block(ExprBlock), + + /// A `break`, with an optional label to break and an optional + /// expression. + Break(ExprBreak), + + /// A function call expression: `invoke(a, b)`. + Call(ExprCall), + + /// A cast expression: `foo as f64`. + Cast(ExprCast), + + /// A closure expression: `|a, b| a + b`. + Closure(ExprClosure), + + /// A const block: `const { ... }`. + Const(ExprConst), + + /// A `continue`, with an optional label. + Continue(ExprContinue), + + /// Access of a named struct field (`obj.k`) or unnamed tuple struct + /// field (`obj.0`). + Field(ExprField), + + /// A for loop: `for pat in expr { ... }`. + ForLoop(ExprForLoop), + + /// An expression contained within invisible delimiters. + /// + /// This variant is important for faithfully representing the precedence + /// of expressions and is related to `None`-delimited spans in a + /// `TokenStream`. + Group(ExprGroup), + + /// An `if` expression with an optional `else` block: `if expr { ... } + /// else { ... }`. + /// + /// The `else` branch expression may only be an `If` or `Block` + /// expression, not any of the other types of expression. + If(ExprIf), + + /// A square bracketed indexing expression: `vector[2]`. + Index(ExprIndex), + + /// The inferred value of a const generic argument, denoted `_`. + Infer(ExprInfer), + + /// A `let` guard: `let Some(x) = opt`. + Let(ExprLet), + + /// A literal in place of an expression: `1`, `"foo"`. + Lit(ExprLit), + + /// Conditionless loop: `loop { ... }`. + Loop(ExprLoop), + + /// A macro invocation expression: `format!("{}", q)`. + Macro(ExprMacro), + + /// A `match` expression: `match n { Some(n) => {}, None => {} }`. + Match(ExprMatch), + + /// A method call expression: `x.foo::<T>(a, b)`. + MethodCall(ExprMethodCall), + + /// A parenthesized expression: `(a + b)`. + Paren(ExprParen), + + /// A path like `std::mem::replace` possibly containing generic + /// parameters and a qualified self-type. + /// + /// A plain identifier like `x` is a path of length 1. + Path(ExprPath), + + /// A range expression: `1..2`, `1..`, `..2`, `1..=2`, `..=2`. + Range(ExprRange), + + /// Address-of operation: `&raw const place` or `&raw mut place`. + RawAddr(ExprRawAddr), + + /// A referencing operation: `&a` or `&mut a`. + Reference(ExprReference), + + /// An array literal constructed from one repeated element: `[0u8; N]`. + Repeat(ExprRepeat), + + /// A `return`, with an optional value to be returned. + Return(ExprReturn), + + /// A struct literal expression: `Point { x: 1, y: 1 }`. + /// + /// The `rest` provides the value of the remaining fields as in `S { a: + /// 1, b: 1, ..rest }`. + Struct(ExprStruct), + + /// A try-expression: `expr?`. + Try(ExprTry), + + /// A try block: `try { ... }`. + TryBlock(ExprTryBlock), + + /// A tuple expression: `(a, b, c, d)`. + Tuple(ExprTuple), + + /// A unary operation: `!x`, `*x`. + Unary(ExprUnary), + + /// An unsafe block: `unsafe { ... }`. + Unsafe(ExprUnsafe), + + /// Tokens in expression position not interpreted by Syn. + Verbatim(TokenStream), + + /// A while loop: `while expr { ... }`. + While(ExprWhile), + + /// A yield expression: `yield expr`. + Yield(ExprYield), + + // For testing exhaustiveness in downstream code, use the following idiom: + // + // match expr { + // #![cfg_attr(test, deny(non_exhaustive_omitted_patterns))] + // + // Expr::Array(expr) => {...} + // Expr::Assign(expr) => {...} + // ... + // Expr::Yield(expr) => {...} + // + // _ => { /* some sane fallback */ } + // } + // + // This way we fail your tests but don't break your library when adding + // a variant. You will be notified by a test failure when a variant is + // added, so that you can add code to handle it, but your library will + // continue to compile and work for downstream users in the interim. + } +} + +ast_struct! { + /// A slice literal expression: `[a, b, c, d]`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprArray #full { + pub attrs: Vec<Attribute>, + pub bracket_token: token::Bracket, + pub elems: Punctuated<Expr, Token![,]>, + } +} + +ast_struct! { + /// An assignment expression: `a = compute()`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprAssign #full { + pub attrs: Vec<Attribute>, + pub left: Box<Expr>, + pub eq_token: Token![=], + pub right: Box<Expr>, + } +} + +ast_struct! { + /// An async block: `async { ... }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprAsync #full { + pub attrs: Vec<Attribute>, + pub async_token: Token![async], + pub capture: Option<Token![move]>, + pub block: Block, + } +} + +ast_struct! { + /// An await expression: `fut.await`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprAwait #full { + pub attrs: Vec<Attribute>, + pub base: Box<Expr>, + pub dot_token: Token![.], + pub await_token: Token![await], + } +} + +ast_struct! { + /// A binary operation: `a + b`, `a += b`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct ExprBinary { + pub attrs: Vec<Attribute>, + pub left: Box<Expr>, + pub op: BinOp, + pub right: Box<Expr>, + } +} + +ast_struct! { + /// A blocked scope: `{ ... }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprBlock #full { + pub attrs: Vec<Attribute>, + pub label: Option<Label>, + pub block: Block, + } +} + +ast_struct! { + /// A `break`, with an optional label to break and an optional + /// expression. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprBreak #full { + pub attrs: Vec<Attribute>, + pub break_token: Token![break], + pub label: Option<Lifetime>, + pub expr: Option<Box<Expr>>, + } +} + +ast_struct! { + /// A function call expression: `invoke(a, b)`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct ExprCall { + pub attrs: Vec<Attribute>, + pub func: Box<Expr>, + pub paren_token: token::Paren, + pub args: Punctuated<Expr, Token![,]>, + } +} + +ast_struct! { + /// A cast expression: `foo as f64`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct ExprCast { + pub attrs: Vec<Attribute>, + pub expr: Box<Expr>, + pub as_token: Token![as], + pub ty: Box<Type>, + } +} + +ast_struct! { + /// A closure expression: `|a, b| a + b`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprClosure #full { + pub attrs: Vec<Attribute>, + pub lifetimes: Option<BoundLifetimes>, + pub constness: Option<Token![const]>, + pub movability: Option<Token![static]>, + pub asyncness: Option<Token![async]>, + pub capture: Option<Token![move]>, + pub or1_token: Token![|], + pub inputs: Punctuated<Pat, Token![,]>, + pub or2_token: Token![|], + pub output: ReturnType, + pub body: Box<Expr>, + } +} + +ast_struct! { + /// A const block: `const { ... }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprConst #full { + pub attrs: Vec<Attribute>, + pub const_token: Token![const], + pub block: Block, + } +} + +ast_struct! { + /// A `continue`, with an optional label. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprContinue #full { + pub attrs: Vec<Attribute>, + pub continue_token: Token![continue], + pub label: Option<Lifetime>, + } +} + +ast_struct! { + /// Access of a named struct field (`obj.k`) or unnamed tuple struct + /// field (`obj.0`). + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct ExprField { + pub attrs: Vec<Attribute>, + pub base: Box<Expr>, + pub dot_token: Token![.], + pub member: Member, + } +} + +ast_struct! { + /// A for loop: `for pat in expr { ... }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprForLoop #full { + pub attrs: Vec<Attribute>, + pub label: Option<Label>, + pub for_token: Token![for], + pub pat: Box<Pat>, + pub in_token: Token![in], + pub expr: Box<Expr>, + pub body: Block, + } +} + +ast_struct! { + /// An expression contained within invisible delimiters. + /// + /// This variant is important for faithfully representing the precedence + /// of expressions and is related to `None`-delimited spans in a + /// `TokenStream`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprGroup { + pub attrs: Vec<Attribute>, + pub group_token: token::Group, + pub expr: Box<Expr>, + } +} + +ast_struct! { + /// An `if` expression with an optional `else` block: `if expr { ... } + /// else { ... }`. + /// + /// The `else` branch expression may only be an `If` or `Block` + /// expression, not any of the other types of expression. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprIf #full { + pub attrs: Vec<Attribute>, + pub if_token: Token![if], + pub cond: Box<Expr>, + pub then_branch: Block, + pub else_branch: Option<(Token![else], Box<Expr>)>, + } +} + +ast_struct! { + /// A square bracketed indexing expression: `vector[2]`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct ExprIndex { + pub attrs: Vec<Attribute>, + pub expr: Box<Expr>, + pub bracket_token: token::Bracket, + pub index: Box<Expr>, + } +} + +ast_struct! { + /// The inferred value of a const generic argument, denoted `_`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprInfer #full { + pub attrs: Vec<Attribute>, + pub underscore_token: Token![_], + } +} + +ast_struct! { + /// A `let` guard: `let Some(x) = opt`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprLet #full { + pub attrs: Vec<Attribute>, + pub let_token: Token![let], + pub pat: Box<Pat>, + pub eq_token: Token![=], + pub expr: Box<Expr>, + } +} + +ast_struct! { + /// A literal in place of an expression: `1`, `"foo"`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct ExprLit { + pub attrs: Vec<Attribute>, + pub lit: Lit, + } +} + +ast_struct! { + /// Conditionless loop: `loop { ... }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprLoop #full { + pub attrs: Vec<Attribute>, + pub label: Option<Label>, + pub loop_token: Token![loop], + pub body: Block, + } +} + +ast_struct! { + /// A macro invocation expression: `format!("{}", q)`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct ExprMacro { + pub attrs: Vec<Attribute>, + pub mac: Macro, + } +} + +ast_struct! { + /// A `match` expression: `match n { Some(n) => {}, None => {} }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprMatch #full { + pub attrs: Vec<Attribute>, + pub match_token: Token![match], + pub expr: Box<Expr>, + pub brace_token: token::Brace, + pub arms: Vec<Arm>, + } +} + +ast_struct! { + /// A method call expression: `x.foo::<T>(a, b)`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct ExprMethodCall { + pub attrs: Vec<Attribute>, + pub receiver: Box<Expr>, + pub dot_token: Token![.], + pub method: Ident, + pub turbofish: Option<AngleBracketedGenericArguments>, + pub paren_token: token::Paren, + pub args: Punctuated<Expr, Token![,]>, + } +} + +ast_struct! { + /// A parenthesized expression: `(a + b)`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct ExprParen { + pub attrs: Vec<Attribute>, + pub paren_token: token::Paren, + pub expr: Box<Expr>, + } +} + +ast_struct! { + /// A path like `std::mem::replace` possibly containing generic + /// parameters and a qualified self-type. + /// + /// A plain identifier like `x` is a path of length 1. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct ExprPath { + pub attrs: Vec<Attribute>, + pub qself: Option<QSelf>, + pub path: Path, + } +} + +ast_struct! { + /// A range expression: `1..2`, `1..`, `..2`, `1..=2`, `..=2`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprRange #full { + pub attrs: Vec<Attribute>, + pub start: Option<Box<Expr>>, + pub limits: RangeLimits, + pub end: Option<Box<Expr>>, + } +} + +ast_struct! { + /// Address-of operation: `&raw const place` or `&raw mut place`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprRawAddr #full { + pub attrs: Vec<Attribute>, + pub and_token: Token![&], + pub raw: Token![raw], + pub mutability: PointerMutability, + pub expr: Box<Expr>, + } +} + +ast_struct! { + /// A referencing operation: `&a` or `&mut a`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct ExprReference { + pub attrs: Vec<Attribute>, + pub and_token: Token![&], + pub mutability: Option<Token![mut]>, + pub expr: Box<Expr>, + } +} + +ast_struct! { + /// An array literal constructed from one repeated element: `[0u8; N]`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprRepeat #full { + pub attrs: Vec<Attribute>, + pub bracket_token: token::Bracket, + pub expr: Box<Expr>, + pub semi_token: Token![;], + pub len: Box<Expr>, + } +} + +ast_struct! { + /// A `return`, with an optional value to be returned. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprReturn #full { + pub attrs: Vec<Attribute>, + pub return_token: Token![return], + pub expr: Option<Box<Expr>>, + } +} + +ast_struct! { + /// A struct literal expression: `Point { x: 1, y: 1 }`. + /// + /// The `rest` provides the value of the remaining fields as in `S { a: + /// 1, b: 1, ..rest }`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct ExprStruct { + pub attrs: Vec<Attribute>, + pub qself: Option<QSelf>, + pub path: Path, + pub brace_token: token::Brace, + pub fields: Punctuated<FieldValue, Token![,]>, + pub dot2_token: Option<Token![..]>, + pub rest: Option<Box<Expr>>, + } +} + +ast_struct! { + /// A try-expression: `expr?`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprTry #full { + pub attrs: Vec<Attribute>, + pub expr: Box<Expr>, + pub question_token: Token![?], + } +} + +ast_struct! { + /// A try block: `try { ... }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprTryBlock #full { + pub attrs: Vec<Attribute>, + pub try_token: Token![try], + pub block: Block, + } +} + +ast_struct! { + /// A tuple expression: `(a, b, c, d)`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprTuple { + pub attrs: Vec<Attribute>, + pub paren_token: token::Paren, + pub elems: Punctuated<Expr, Token![,]>, + } +} + +ast_struct! { + /// A unary operation: `!x`, `*x`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct ExprUnary { + pub attrs: Vec<Attribute>, + pub op: UnOp, + pub expr: Box<Expr>, + } +} + +ast_struct! { + /// An unsafe block: `unsafe { ... }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprUnsafe #full { + pub attrs: Vec<Attribute>, + pub unsafe_token: Token![unsafe], + pub block: Block, + } +} + +ast_struct! { + /// A while loop: `while expr { ... }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprWhile #full { + pub attrs: Vec<Attribute>, + pub label: Option<Label>, + pub while_token: Token![while], + pub cond: Box<Expr>, + pub body: Block, + } +} + +ast_struct! { + /// A yield expression: `yield expr`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ExprYield #full { + pub attrs: Vec<Attribute>, + pub yield_token: Token![yield], + pub expr: Option<Box<Expr>>, + } +} + +impl Expr { + /// An unspecified invalid expression. + /// + /// ``` + /// use quote::ToTokens; + /// use std::mem; + /// use syn::{parse_quote, Expr}; + /// + /// fn unparenthesize(e: &mut Expr) { + /// while let Expr::Paren(paren) = e { + /// *e = mem::replace(&mut *paren.expr, Expr::PLACEHOLDER); + /// } + /// } + /// + /// fn main() { + /// let mut e: Expr = parse_quote! { ((1 + 1)) }; + /// unparenthesize(&mut e); + /// assert_eq!("1 + 1", e.to_token_stream().to_string()); + /// } + /// ``` + pub const PLACEHOLDER: Self = Expr::Path(ExprPath { + attrs: Vec::new(), + qself: None, + path: Path { + leading_colon: None, + segments: Punctuated::new(), + }, + }); + + /// An alternative to the primary `Expr::parse` parser (from the [`Parse`] + /// trait) for ambiguous syntactic positions in which a trailing brace + /// should not be taken as part of the expression. + /// + /// [`Parse`]: crate::parse::Parse + /// + /// Rust grammar has an ambiguity where braces sometimes turn a path + /// expression into a struct initialization and sometimes do not. In the + /// following code, the expression `S {}` is one expression. Presumably + /// there is an empty struct `struct S {}` defined somewhere which it is + /// instantiating. + /// + /// ``` + /// # struct S; + /// # impl std::ops::Deref for S { + /// # type Target = bool; + /// # fn deref(&self) -> &Self::Target { + /// # &true + /// # } + /// # } + /// let _ = *S {}; + /// + /// // parsed by rustc as: `*(S {})` + /// ``` + /// + /// We would want to parse the above using `Expr::parse` after the `=` + /// token. + /// + /// But in the following, `S {}` is *not* a struct init expression. + /// + /// ``` + /// # const S: &bool = &true; + /// if *S {} {} + /// + /// // parsed by rustc as: + /// // + /// // if (*S) { + /// // /* empty block */ + /// // } + /// // { + /// // /* another empty block */ + /// // } + /// ``` + /// + /// For that reason we would want to parse if-conditions using + /// `Expr::parse_without_eager_brace` after the `if` token. Same for similar + /// syntactic positions such as the condition expr after a `while` token or + /// the expr at the top of a `match`. + /// + /// The Rust grammar's choices around which way this ambiguity is resolved + /// at various syntactic positions is fairly arbitrary. Really either parse + /// behavior could work in most positions, and language designers just + /// decide each case based on which is more likely to be what the programmer + /// had in mind most of the time. + /// + /// ``` + /// # struct S; + /// # fn doc() -> S { + /// if return S {} {} + /// # unreachable!() + /// # } + /// + /// // parsed by rustc as: + /// // + /// // if (return (S {})) { + /// // } + /// // + /// // but could equally well have been this other arbitrary choice: + /// // + /// // if (return S) { + /// // } + /// // {} + /// ``` + /// + /// Note the grammar ambiguity on trailing braces is distinct from + /// precedence and is not captured by assigning a precedence level to the + /// braced struct init expr in relation to other operators. This can be + /// illustrated by `return 0..S {}` vs `match 0..S {}`. The former parses as + /// `return (0..(S {}))` implying tighter precedence for struct init than + /// `..`, while the latter parses as `match (0..S) {}` implying tighter + /// precedence for `..` than struct init, a contradiction. + #[cfg(all(feature = "full", feature = "parsing"))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "full", feature = "parsing"))))] + pub fn parse_without_eager_brace(input: ParseStream) -> Result<Expr> { + parsing::ambiguous_expr(input, parsing::AllowStruct(false)) + } + + /// An alternative to the primary `Expr::parse` parser (from the [`Parse`] + /// trait) for syntactic positions in which expression boundaries are placed + /// more eagerly than done by the typical expression grammar. This includes + /// expressions at the head of a statement or in the right-hand side of a + /// `match` arm. + /// + /// [`Parse`]: crate::parse::Parse + /// + /// Compare the following cases: + /// + /// 1. + /// ``` + /// # let result = (); + /// # let guard = false; + /// # let cond = true; + /// # let f = true; + /// # let g = f; + /// # + /// let _ = match result { + /// () if guard => if cond { f } else { g } + /// () => false, + /// }; + /// ``` + /// + /// 2. + /// ``` + /// # let cond = true; + /// # let f = (); + /// # let g = f; + /// # + /// let _ = || { + /// if cond { f } else { g } + /// () + /// }; + /// ``` + /// + /// 3. + /// ``` + /// # let cond = true; + /// # let f = || (); + /// # let g = f; + /// # + /// let _ = [if cond { f } else { g } ()]; + /// ``` + /// + /// The same sequence of tokens `if cond { f } else { g } ()` appears in + /// expression position 3 times. The first two syntactic positions use eager + /// placement of expression boundaries, and parse as `Expr::If`, with the + /// adjacent `()` becoming `Pat::Tuple` or `Expr::Tuple`. In contrast, the + /// third case uses standard expression boundaries and parses as + /// `Expr::Call`. + /// + /// As with [`parse_without_eager_brace`], this ambiguity in the Rust + /// grammar is independent of precedence. + /// + /// [`parse_without_eager_brace`]: Self::parse_without_eager_brace + #[cfg(all(feature = "full", feature = "parsing"))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "full", feature = "parsing"))))] + pub fn parse_with_earlier_boundary_rule(input: ParseStream) -> Result<Expr> { + parsing::parse_with_earlier_boundary_rule(input) + } + + /// Returns whether the next token in the parse stream is one that might + /// possibly form the beginning of an expr. + /// + /// This classification is a load-bearing part of the grammar of some Rust + /// expressions, notably `return` and `break`. For example `return < …` will + /// never parse `<` as a binary operator regardless of what comes after, + /// because `<` is a legal starting token for an expression and so it's + /// required to be continued as a return value, such as `return <Struct as + /// Trait>::CONST`. Meanwhile `return > …` treats the `>` as a binary + /// operator because it cannot be a starting token for any Rust expression. + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn peek(input: ParseStream) -> bool { + input.peek(Ident::peek_any) && !input.peek(Token![as]) // value name or keyword + || input.peek(token::Paren) // tuple + || input.peek(token::Bracket) // array + || input.peek(token::Brace) // block + || input.peek(Lit) // literal + || input.peek(Token![!]) && !input.peek(Token![!=]) // operator not + || input.peek(Token![-]) && !input.peek(Token![-=]) && !input.peek(Token![->]) // unary minus + || input.peek(Token![*]) && !input.peek(Token![*=]) // dereference + || input.peek(Token![|]) && !input.peek(Token![|=]) // closure + || input.peek(Token![&]) && !input.peek(Token![&=]) // reference + || input.peek(Token![..]) // range + || input.peek(Token![<]) && !input.peek(Token![<=]) && !input.peek(Token![<<=]) // associated path + || input.peek(Token![::]) // absolute path + || input.peek(Lifetime) // labeled loop + || input.peek(Token![#]) // expression attributes + } + + #[cfg(all(feature = "parsing", feature = "full"))] + pub(crate) fn replace_attrs(&mut self, new: Vec<Attribute>) -> Vec<Attribute> { + match self { + Expr::Array(ExprArray { attrs, .. }) + | Expr::Assign(ExprAssign { attrs, .. }) + | Expr::Async(ExprAsync { attrs, .. }) + | Expr::Await(ExprAwait { attrs, .. }) + | Expr::Binary(ExprBinary { attrs, .. }) + | Expr::Block(ExprBlock { attrs, .. }) + | Expr::Break(ExprBreak { attrs, .. }) + | Expr::Call(ExprCall { attrs, .. }) + | Expr::Cast(ExprCast { attrs, .. }) + | Expr::Closure(ExprClosure { attrs, .. }) + | Expr::Const(ExprConst { attrs, .. }) + | Expr::Continue(ExprContinue { attrs, .. }) + | Expr::Field(ExprField { attrs, .. }) + | Expr::ForLoop(ExprForLoop { attrs, .. }) + | Expr::Group(ExprGroup { attrs, .. }) + | Expr::If(ExprIf { attrs, .. }) + | Expr::Index(ExprIndex { attrs, .. }) + | Expr::Infer(ExprInfer { attrs, .. }) + | Expr::Let(ExprLet { attrs, .. }) + | Expr::Lit(ExprLit { attrs, .. }) + | Expr::Loop(ExprLoop { attrs, .. }) + | Expr::Macro(ExprMacro { attrs, .. }) + | Expr::Match(ExprMatch { attrs, .. }) + | Expr::MethodCall(ExprMethodCall { attrs, .. }) + | Expr::Paren(ExprParen { attrs, .. }) + | Expr::Path(ExprPath { attrs, .. }) + | Expr::Range(ExprRange { attrs, .. }) + | Expr::RawAddr(ExprRawAddr { attrs, .. }) + | Expr::Reference(ExprReference { attrs, .. }) + | Expr::Repeat(ExprRepeat { attrs, .. }) + | Expr::Return(ExprReturn { attrs, .. }) + | Expr::Struct(ExprStruct { attrs, .. }) + | Expr::Try(ExprTry { attrs, .. }) + | Expr::TryBlock(ExprTryBlock { attrs, .. }) + | Expr::Tuple(ExprTuple { attrs, .. }) + | Expr::Unary(ExprUnary { attrs, .. }) + | Expr::Unsafe(ExprUnsafe { attrs, .. }) + | Expr::While(ExprWhile { attrs, .. }) + | Expr::Yield(ExprYield { attrs, .. }) => mem::replace(attrs, new), + Expr::Verbatim(_) => Vec::new(), + } + } +} + +ast_enum! { + /// A struct or tuple struct field accessed in a struct literal or field + /// expression. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub enum Member { + /// A named field like `self.x`. + Named(Ident), + /// An unnamed field like `self.0`. + Unnamed(Index), + } +} + +impl From<Ident> for Member { + fn from(ident: Ident) -> Member { + Member::Named(ident) + } +} + +impl From<Index> for Member { + fn from(index: Index) -> Member { + Member::Unnamed(index) + } +} + +impl From<usize> for Member { + fn from(index: usize) -> Member { + Member::Unnamed(Index::from(index)) + } +} + +impl Eq for Member {} + +impl PartialEq for Member { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Member::Named(this), Member::Named(other)) => this == other, + (Member::Unnamed(this), Member::Unnamed(other)) => this == other, + _ => false, + } + } +} + +impl Hash for Member { + fn hash<H: Hasher>(&self, state: &mut H) { + match self { + Member::Named(m) => m.hash(state), + Member::Unnamed(m) => m.hash(state), + } + } +} + +#[cfg(feature = "printing")] +impl IdentFragment for Member { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match self { + Member::Named(m) => Display::fmt(m, formatter), + Member::Unnamed(m) => Display::fmt(&m.index, formatter), + } + } + + fn span(&self) -> Option<Span> { + match self { + Member::Named(m) => Some(m.span()), + Member::Unnamed(m) => Some(m.span), + } + } +} + +#[cfg(any(feature = "parsing", feature = "printing"))] +impl Member { + pub(crate) fn is_named(&self) -> bool { + match self { + Member::Named(_) => true, + Member::Unnamed(_) => false, + } + } +} + +ast_struct! { + /// The index of an unnamed tuple struct field. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct Index { + pub index: u32, + pub span: Span, + } +} + +impl From<usize> for Index { + fn from(index: usize) -> Index { + assert!(index < u32::MAX as usize); + Index { + index: index as u32, + span: Span::call_site(), + } + } +} + +impl Eq for Index {} + +impl PartialEq for Index { + fn eq(&self, other: &Self) -> bool { + self.index == other.index + } +} + +impl Hash for Index { + fn hash<H: Hasher>(&self, state: &mut H) { + self.index.hash(state); + } +} + +#[cfg(feature = "printing")] +impl IdentFragment for Index { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + Display::fmt(&self.index, formatter) + } + + fn span(&self) -> Option<Span> { + Some(self.span) + } +} + +ast_struct! { + /// A field-value pair in a struct literal. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct FieldValue { + pub attrs: Vec<Attribute>, + pub member: Member, + + /// The colon in `Struct { x: x }`. If written in shorthand like + /// `Struct { x }`, there is no colon. + pub colon_token: Option<Token![:]>, + + pub expr: Expr, + } +} + +#[cfg(feature = "full")] +ast_struct! { + /// A lifetime labeling a `for`, `while`, or `loop`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct Label { + pub name: Lifetime, + pub colon_token: Token![:], + } +} + +#[cfg(feature = "full")] +ast_struct! { + /// One arm of a `match` expression: `0..=10 => { return true; }`. + /// + /// As in: + /// + /// ``` + /// # fn f() -> bool { + /// # let n = 0; + /// match n { + /// 0..=10 => { + /// return true; + /// } + /// // ... + /// # _ => {} + /// } + /// # false + /// # } + /// ``` + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct Arm { + pub attrs: Vec<Attribute>, + pub pat: Pat, + pub guard: Option<(Token![if], Box<Expr>)>, + pub fat_arrow_token: Token![=>], + pub body: Box<Expr>, + pub comma: Option<Token![,]>, + } +} + +#[cfg(feature = "full")] +ast_enum! { + /// Limit types of a range, inclusive or exclusive. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub enum RangeLimits { + /// Inclusive at the beginning, exclusive at the end. + HalfOpen(Token![..]), + /// Inclusive at the beginning and end. + Closed(Token![..=]), + } +} + +#[cfg(feature = "full")] +ast_enum! { + /// Mutability of a raw pointer (`*const T`, `*mut T`), in which non-mutable + /// isn't the implicit default. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub enum PointerMutability { + Const(Token![const]), + Mut(Token![mut]), + } +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + #[cfg(feature = "full")] + use crate::attr; + use crate::attr::Attribute; + #[cfg(feature = "full")] + use crate::classify; + use crate::error::{Error, Result}; + #[cfg(feature = "full")] + use crate::expr::{ + Arm, ExprArray, ExprAssign, ExprAsync, ExprAwait, ExprBlock, ExprBreak, ExprClosure, + ExprConst, ExprContinue, ExprForLoop, ExprIf, ExprInfer, ExprLet, ExprLoop, ExprMatch, + ExprRange, ExprRawAddr, ExprRepeat, ExprReturn, ExprTry, ExprTryBlock, ExprUnsafe, + ExprWhile, ExprYield, Label, PointerMutability, RangeLimits, + }; + use crate::expr::{ + Expr, ExprBinary, ExprCall, ExprCast, ExprField, ExprGroup, ExprIndex, ExprLit, ExprMacro, + ExprMethodCall, ExprParen, ExprPath, ExprReference, ExprStruct, ExprTuple, ExprUnary, + FieldValue, Index, Member, + }; + #[cfg(feature = "full")] + use crate::generics::{self, BoundLifetimes}; + use crate::ident::Ident; + #[cfg(feature = "full")] + use crate::lifetime::Lifetime; + use crate::lit::{Lit, LitFloat, LitInt}; + use crate::mac::{self, Macro}; + use crate::op::BinOp; + use crate::parse::discouraged::Speculative as _; + #[cfg(feature = "full")] + use crate::parse::ParseBuffer; + use crate::parse::{Parse, ParseStream}; + #[cfg(feature = "full")] + use crate::pat::{Pat, PatType}; + use crate::path::{self, AngleBracketedGenericArguments, Path, QSelf}; + use crate::precedence::Precedence; + use crate::punctuated::Punctuated; + #[cfg(feature = "full")] + use crate::stmt::Block; + use crate::token; + use crate::ty; + #[cfg(feature = "full")] + use crate::ty::{ReturnType, Type}; + use crate::verbatim; + #[cfg(feature = "full")] + use proc_macro2::{Span, TokenStream}; + use std::mem; + + // When we're parsing expressions which occur before blocks, like in an if + // statement's condition, we cannot parse a struct literal. + // + // Struct literals are ambiguous in certain positions + // https://github.com/rust-lang/rfcs/pull/92 + #[cfg(feature = "full")] + pub(super) struct AllowStruct(pub bool); + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Expr { + fn parse(input: ParseStream) -> Result<Self> { + ambiguous_expr( + input, + #[cfg(feature = "full")] + AllowStruct(true), + ) + } + } + + #[cfg(feature = "full")] + pub(super) fn parse_with_earlier_boundary_rule(input: ParseStream) -> Result<Expr> { + let mut attrs = input.call(expr_attrs)?; + let mut expr = if input.peek(token::Group) { + let allow_struct = AllowStruct(true); + let atom = expr_group(input, allow_struct)?; + if continue_parsing_early(&atom) { + trailer_helper(input, atom)? + } else { + atom + } + } else if input.peek(Token![if]) { + Expr::If(input.parse()?) + } else if input.peek(Token![while]) { + Expr::While(input.parse()?) + } else if input.peek(Token![for]) + && !generics::parsing::choose_generics_over_qpath_after_keyword(input) + { + Expr::ForLoop(input.parse()?) + } else if input.peek(Token![loop]) { + Expr::Loop(input.parse()?) + } else if input.peek(Token![match]) { + Expr::Match(input.parse()?) + } else if input.peek(Token![try]) && input.peek2(token::Brace) { + Expr::TryBlock(input.parse()?) + } else if input.peek(Token![unsafe]) { + Expr::Unsafe(input.parse()?) + } else if input.peek(Token![const]) && input.peek2(token::Brace) { + Expr::Const(input.parse()?) + } else if input.peek(token::Brace) { + Expr::Block(input.parse()?) + } else if input.peek(Lifetime) { + atom_labeled(input)? + } else { + let allow_struct = AllowStruct(true); + unary_expr(input, allow_struct)? + }; + + if continue_parsing_early(&expr) { + attrs.extend(expr.replace_attrs(Vec::new())); + expr.replace_attrs(attrs); + + let allow_struct = AllowStruct(true); + return parse_expr(input, expr, allow_struct, Precedence::MIN); + } + + if input.peek(Token![.]) && !input.peek(Token![..]) || input.peek(Token![?]) { + expr = trailer_helper(input, expr)?; + + attrs.extend(expr.replace_attrs(Vec::new())); + expr.replace_attrs(attrs); + + let allow_struct = AllowStruct(true); + return parse_expr(input, expr, allow_struct, Precedence::MIN); + } + + attrs.extend(expr.replace_attrs(Vec::new())); + expr.replace_attrs(attrs); + Ok(expr) + } + + #[cfg(feature = "full")] + impl Copy for AllowStruct {} + + #[cfg(feature = "full")] + impl Clone for AllowStruct { + fn clone(&self) -> Self { + *self + } + } + + #[cfg(feature = "full")] + fn parse_expr( + input: ParseStream, + mut lhs: Expr, + allow_struct: AllowStruct, + base: Precedence, + ) -> Result<Expr> { + loop { + let ahead = input.fork(); + if let Expr::Range(_) = lhs { + // A range cannot be the left-hand side of another binary operator. + break; + } else if let Ok(op) = ahead.parse::<BinOp>() { + let precedence = Precedence::of_binop(&op); + if precedence < base { + break; + } + if precedence == Precedence::Assign { + if let Expr::Range(_) = lhs { + break; + } + } + if precedence == Precedence::Compare { + if let Expr::Binary(lhs) = &lhs { + if Precedence::of_binop(&lhs.op) == Precedence::Compare { + return Err(input.error("comparison operators cannot be chained")); + } + } + } + input.advance_to(&ahead); + let right = parse_binop_rhs(input, allow_struct, precedence)?; + lhs = Expr::Binary(ExprBinary { + attrs: Vec::new(), + left: Box::new(lhs), + op, + right, + }); + } else if Precedence::Assign >= base + && input.peek(Token![=]) + && !input.peek(Token![=>]) + && match lhs { + Expr::Range(_) => false, + _ => true, + } + { + let eq_token: Token![=] = input.parse()?; + let right = parse_binop_rhs(input, allow_struct, Precedence::Assign)?; + lhs = Expr::Assign(ExprAssign { + attrs: Vec::new(), + left: Box::new(lhs), + eq_token, + right, + }); + } else if Precedence::Range >= base && input.peek(Token![..]) { + let limits: RangeLimits = input.parse()?; + let end = parse_range_end(input, &limits, allow_struct)?; + lhs = Expr::Range(ExprRange { + attrs: Vec::new(), + start: Some(Box::new(lhs)), + limits, + end, + }); + } else if Precedence::Cast >= base && input.peek(Token![as]) { + let as_token: Token![as] = input.parse()?; + let allow_plus = false; + let allow_group_generic = false; + let ty = ty::parsing::ambig_ty(input, allow_plus, allow_group_generic)?; + check_cast(input)?; + lhs = Expr::Cast(ExprCast { + attrs: Vec::new(), + expr: Box::new(lhs), + as_token, + ty: Box::new(ty), + }); + } else { + break; + } + } + Ok(lhs) + } + + #[cfg(not(feature = "full"))] + fn parse_expr(input: ParseStream, mut lhs: Expr, base: Precedence) -> Result<Expr> { + loop { + let ahead = input.fork(); + if let Ok(op) = ahead.parse::<BinOp>() { + let precedence = Precedence::of_binop(&op); + if precedence < base { + break; + } + if precedence == Precedence::Compare { + if let Expr::Binary(lhs) = &lhs { + if Precedence::of_binop(&lhs.op) == Precedence::Compare { + return Err(input.error("comparison operators cannot be chained")); + } + } + } + input.advance_to(&ahead); + let right = parse_binop_rhs(input, precedence)?; + lhs = Expr::Binary(ExprBinary { + attrs: Vec::new(), + left: Box::new(lhs), + op, + right, + }); + } else if Precedence::Cast >= base && input.peek(Token![as]) { + let as_token: Token![as] = input.parse()?; + let allow_plus = false; + let allow_group_generic = false; + let ty = ty::parsing::ambig_ty(input, allow_plus, allow_group_generic)?; + check_cast(input)?; + lhs = Expr::Cast(ExprCast { + attrs: Vec::new(), + expr: Box::new(lhs), + as_token, + ty: Box::new(ty), + }); + } else { + break; + } + } + Ok(lhs) + } + + fn parse_binop_rhs( + input: ParseStream, + #[cfg(feature = "full")] allow_struct: AllowStruct, + precedence: Precedence, + ) -> Result<Box<Expr>> { + let mut rhs = unary_expr( + input, + #[cfg(feature = "full")] + allow_struct, + )?; + loop { + let next = peek_precedence(input); + if next > precedence || next == precedence && precedence == Precedence::Assign { + let cursor = input.cursor(); + rhs = parse_expr( + input, + rhs, + #[cfg(feature = "full")] + allow_struct, + next, + )?; + if cursor == input.cursor() { + // Bespoke grammar restrictions separate from precedence can + // cause parsing to not advance, such as `..a` being + // disallowed in the left-hand side of binary operators, + // even ones that have lower precedence than `..`. + break; + } + } else { + break; + } + } + Ok(Box::new(rhs)) + } + + fn peek_precedence(input: ParseStream) -> Precedence { + if let Ok(op) = input.fork().parse() { + Precedence::of_binop(&op) + } else if input.peek(Token![=]) && !input.peek(Token![=>]) { + Precedence::Assign + } else if input.peek(Token![..]) { + Precedence::Range + } else if input.peek(Token![as]) { + Precedence::Cast + } else { + Precedence::MIN + } + } + + // Parse an arbitrary expression. + pub(super) fn ambiguous_expr( + input: ParseStream, + #[cfg(feature = "full")] allow_struct: AllowStruct, + ) -> Result<Expr> { + let lhs = unary_expr( + input, + #[cfg(feature = "full")] + allow_struct, + )?; + parse_expr( + input, + lhs, + #[cfg(feature = "full")] + allow_struct, + Precedence::MIN, + ) + } + + #[cfg(feature = "full")] + fn expr_attrs(input: ParseStream) -> Result<Vec<Attribute>> { + let mut attrs = Vec::new(); + while !input.peek(token::Group) && input.peek(Token![#]) { + attrs.push(input.call(attr::parsing::single_parse_outer)?); + } + Ok(attrs) + } + + // <UnOp> <trailer> + // & <trailer> + // &mut <trailer> + // box <trailer> + #[cfg(feature = "full")] + fn unary_expr(input: ParseStream, allow_struct: AllowStruct) -> Result<Expr> { + let begin = input.fork(); + let attrs = input.call(expr_attrs)?; + if input.peek(token::Group) { + return trailer_expr(begin, attrs, input, allow_struct); + } + + if input.peek(Token![&]) { + let and_token: Token![&] = input.parse()?; + let raw: Option<Token![raw]> = if input.peek(Token![raw]) + && (input.peek2(Token![mut]) || input.peek2(Token![const])) + { + Some(input.parse()?) + } else { + None + }; + let mutability: Option<Token![mut]> = input.parse()?; + let const_token: Option<Token![const]> = if raw.is_some() && mutability.is_none() { + Some(input.parse()?) + } else { + None + }; + let expr = Box::new(unary_expr(input, allow_struct)?); + if let Some(raw) = raw { + Ok(Expr::RawAddr(ExprRawAddr { + attrs, + and_token, + raw, + mutability: match mutability { + Some(mut_token) => PointerMutability::Mut(mut_token), + None => PointerMutability::Const(const_token.unwrap()), + }, + expr, + })) + } else { + Ok(Expr::Reference(ExprReference { + attrs, + and_token, + mutability, + expr, + })) + } + } else if input.peek(Token![*]) || input.peek(Token![!]) || input.peek(Token![-]) { + expr_unary(input, attrs, allow_struct).map(Expr::Unary) + } else { + trailer_expr(begin, attrs, input, allow_struct) + } + } + + #[cfg(not(feature = "full"))] + fn unary_expr(input: ParseStream) -> Result<Expr> { + if input.peek(Token![&]) { + Ok(Expr::Reference(ExprReference { + attrs: Vec::new(), + and_token: input.parse()?, + mutability: input.parse()?, + expr: Box::new(unary_expr(input)?), + })) + } else if input.peek(Token![*]) || input.peek(Token![!]) || input.peek(Token![-]) { + Ok(Expr::Unary(ExprUnary { + attrs: Vec::new(), + op: input.parse()?, + expr: Box::new(unary_expr(input)?), + })) + } else { + trailer_expr(input) + } + } + + // <atom> (..<args>) ... + // <atom> . <ident> (..<args>) ... + // <atom> . <ident> ... + // <atom> . <lit> ... + // <atom> [ <expr> ] ... + // <atom> ? ... + #[cfg(feature = "full")] + fn trailer_expr( + begin: ParseBuffer, + mut attrs: Vec<Attribute>, + input: ParseStream, + allow_struct: AllowStruct, + ) -> Result<Expr> { + let atom = atom_expr(input, allow_struct)?; + let mut e = trailer_helper(input, atom)?; + + if let Expr::Verbatim(tokens) = &mut e { + *tokens = verbatim::between(&begin, input); + } else if !attrs.is_empty() { + if let Expr::Range(range) = e { + let spans: &[Span] = match &range.limits { + RangeLimits::HalfOpen(limits) => &limits.spans, + RangeLimits::Closed(limits) => &limits.spans, + }; + return Err(crate::error::new2( + spans[0], + *spans.last().unwrap(), + "attributes are not allowed on range expressions starting with `..`", + )); + } + let inner_attrs = e.replace_attrs(Vec::new()); + attrs.extend(inner_attrs); + e.replace_attrs(attrs); + } + + Ok(e) + } + + #[cfg(feature = "full")] + fn trailer_helper(input: ParseStream, mut e: Expr) -> Result<Expr> { + loop { + if input.peek(token::Paren) { + let content; + e = Expr::Call(ExprCall { + attrs: Vec::new(), + func: Box::new(e), + paren_token: parenthesized!(content in input), + args: content.parse_terminated(Expr::parse, Token![,])?, + }); + } else if input.peek(Token![.]) + && !input.peek(Token![..]) + && match e { + Expr::Range(_) => false, + _ => true, + } + { + let mut dot_token: Token![.] = input.parse()?; + + let float_token: Option<LitFloat> = input.parse()?; + if let Some(float_token) = float_token { + if multi_index(&mut e, &mut dot_token, float_token)? { + continue; + } + } + + let await_token: Option<Token![await]> = input.parse()?; + if let Some(await_token) = await_token { + e = Expr::Await(ExprAwait { + attrs: Vec::new(), + base: Box::new(e), + dot_token, + await_token, + }); + continue; + } + + let member: Member = input.parse()?; + let turbofish = if member.is_named() && input.peek(Token![::]) { + Some(AngleBracketedGenericArguments::parse_turbofish(input)?) + } else { + None + }; + + if turbofish.is_some() || input.peek(token::Paren) { + if let Member::Named(method) = member { + let content; + e = Expr::MethodCall(ExprMethodCall { + attrs: Vec::new(), + receiver: Box::new(e), + dot_token, + method, + turbofish, + paren_token: parenthesized!(content in input), + args: content.parse_terminated(Expr::parse, Token![,])?, + }); + continue; + } + } + + e = Expr::Field(ExprField { + attrs: Vec::new(), + base: Box::new(e), + dot_token, + member, + }); + } else if input.peek(token::Bracket) { + let content; + e = Expr::Index(ExprIndex { + attrs: Vec::new(), + expr: Box::new(e), + bracket_token: bracketed!(content in input), + index: content.parse()?, + }); + } else if input.peek(Token![?]) + && match e { + Expr::Range(_) => false, + _ => true, + } + { + e = Expr::Try(ExprTry { + attrs: Vec::new(), + expr: Box::new(e), + question_token: input.parse()?, + }); + } else { + break; + } + } + Ok(e) + } + + #[cfg(not(feature = "full"))] + fn trailer_expr(input: ParseStream) -> Result<Expr> { + let mut e = atom_expr(input)?; + + loop { + if input.peek(token::Paren) { + let content; + e = Expr::Call(ExprCall { + attrs: Vec::new(), + func: Box::new(e), + paren_token: parenthesized!(content in input), + args: content.parse_terminated(Expr::parse, Token![,])?, + }); + } else if input.peek(Token![.]) + && !input.peek(Token![..]) + && !input.peek2(Token![await]) + { + let mut dot_token: Token![.] = input.parse()?; + + let float_token: Option<LitFloat> = input.parse()?; + if let Some(float_token) = float_token { + if multi_index(&mut e, &mut dot_token, float_token)? { + continue; + } + } + + let member: Member = input.parse()?; + let turbofish = if member.is_named() && input.peek(Token![::]) { + let colon2_token: Token![::] = input.parse()?; + let turbofish = + AngleBracketedGenericArguments::do_parse(Some(colon2_token), input)?; + Some(turbofish) + } else { + None + }; + + if turbofish.is_some() || input.peek(token::Paren) { + if let Member::Named(method) = member { + let content; + e = Expr::MethodCall(ExprMethodCall { + attrs: Vec::new(), + receiver: Box::new(e), + dot_token, + method, + turbofish, + paren_token: parenthesized!(content in input), + args: content.parse_terminated(Expr::parse, Token![,])?, + }); + continue; + } + } + + e = Expr::Field(ExprField { + attrs: Vec::new(), + base: Box::new(e), + dot_token, + member, + }); + } else if input.peek(token::Bracket) { + let content; + e = Expr::Index(ExprIndex { + attrs: Vec::new(), + expr: Box::new(e), + bracket_token: bracketed!(content in input), + index: content.parse()?, + }); + } else { + break; + } + } + + Ok(e) + } + + // Parse all atomic expressions which don't have to worry about precedence + // interactions, as they are fully contained. + #[cfg(feature = "full")] + fn atom_expr(input: ParseStream, allow_struct: AllowStruct) -> Result<Expr> { + if input.peek(token::Group) { + expr_group(input, allow_struct) + } else if input.peek(Lit) { + input.parse().map(Expr::Lit) + } else if input.peek(Token![async]) + && (input.peek2(token::Brace) || input.peek2(Token![move]) && input.peek3(token::Brace)) + { + input.parse().map(Expr::Async) + } else if input.peek(Token![try]) && input.peek2(token::Brace) { + input.parse().map(Expr::TryBlock) + } else if input.peek(Token![|]) + || input.peek(Token![move]) + || input.peek(Token![for]) + && generics::parsing::choose_generics_over_qpath_after_keyword(input) + || input.peek(Token![const]) && !input.peek2(token::Brace) + || input.peek(Token![static]) + || input.peek(Token![async]) && (input.peek2(Token![|]) || input.peek2(Token![move])) + { + expr_closure(input, allow_struct).map(Expr::Closure) + } else if token::parsing::peek_keyword(input.cursor(), "builtin") && input.peek2(Token![#]) + { + expr_builtin(input) + } else if input.peek(Ident) + || input.peek(Token![::]) + || input.peek(Token![<]) + || input.peek(Token![self]) + || input.peek(Token![Self]) + || input.peek(Token![super]) + || input.peek(Token![crate]) + || input.peek(Token![try]) && (input.peek2(Token![!]) || input.peek2(Token![::])) + { + path_or_macro_or_struct(input, allow_struct) + } else if input.peek(token::Paren) { + paren_or_tuple(input) + } else if input.peek(Token![break]) { + expr_break(input, allow_struct).map(Expr::Break) + } else if input.peek(Token![continue]) { + input.parse().map(Expr::Continue) + } else if input.peek(Token![return]) { + input.parse().map(Expr::Return) + } else if input.peek(Token![become]) { + expr_become(input) + } else if input.peek(token::Bracket) { + array_or_repeat(input) + } else if input.peek(Token![let]) { + expr_let(input, allow_struct).map(Expr::Let) + } else if input.peek(Token![if]) { + input.parse().map(Expr::If) + } else if input.peek(Token![while]) { + input.parse().map(Expr::While) + } else if input.peek(Token![for]) { + input.parse().map(Expr::ForLoop) + } else if input.peek(Token![loop]) { + input.parse().map(Expr::Loop) + } else if input.peek(Token![match]) { + input.parse().map(Expr::Match) + } else if input.peek(Token![yield]) { + input.parse().map(Expr::Yield) + } else if input.peek(Token![unsafe]) { + input.parse().map(Expr::Unsafe) + } else if input.peek(Token![const]) { + input.parse().map(Expr::Const) + } else if input.peek(token::Brace) { + input.parse().map(Expr::Block) + } else if input.peek(Token![..]) { + expr_range(input, allow_struct).map(Expr::Range) + } else if input.peek(Token![_]) { + input.parse().map(Expr::Infer) + } else if input.peek(Lifetime) { + atom_labeled(input) + } else { + Err(input.error("expected an expression")) + } + } + + #[cfg(feature = "full")] + fn atom_labeled(input: ParseStream) -> Result<Expr> { + let the_label: Label = input.parse()?; + let mut expr = if input.peek(Token![while]) { + Expr::While(input.parse()?) + } else if input.peek(Token![for]) { + Expr::ForLoop(input.parse()?) + } else if input.peek(Token![loop]) { + Expr::Loop(input.parse()?) + } else if input.peek(token::Brace) { + Expr::Block(input.parse()?) + } else { + return Err(input.error("expected loop or block expression")); + }; + match &mut expr { + Expr::While(ExprWhile { label, .. }) + | Expr::ForLoop(ExprForLoop { label, .. }) + | Expr::Loop(ExprLoop { label, .. }) + | Expr::Block(ExprBlock { label, .. }) => *label = Some(the_label), + _ => unreachable!(), + } + Ok(expr) + } + + #[cfg(not(feature = "full"))] + fn atom_expr(input: ParseStream) -> Result<Expr> { + if input.peek(token::Group) { + expr_group(input) + } else if input.peek(Lit) { + input.parse().map(Expr::Lit) + } else if input.peek(token::Paren) { + paren_or_tuple(input) + } else if input.peek(Ident) + || input.peek(Token![::]) + || input.peek(Token![<]) + || input.peek(Token![self]) + || input.peek(Token![Self]) + || input.peek(Token![super]) + || input.peek(Token![crate]) + { + path_or_macro_or_struct(input) + } else if input.is_empty() { + Err(input.error("expected an expression")) + } else { + if input.peek(token::Brace) { + let scan = input.fork(); + let content; + braced!(content in scan); + if content.parse::<Expr>().is_ok() && content.is_empty() { + let expr_block = verbatim::between(input, &scan); + input.advance_to(&scan); + return Ok(Expr::Verbatim(expr_block)); + } + } + Err(input.error("unsupported expression; enable syn's features=[\"full\"]")) + } + } + + #[cfg(feature = "full")] + fn expr_builtin(input: ParseStream) -> Result<Expr> { + let begin = input.fork(); + + token::parsing::keyword(input, "builtin")?; + input.parse::<Token![#]>()?; + input.parse::<Ident>()?; + + let args; + parenthesized!(args in input); + args.parse::<TokenStream>()?; + + Ok(Expr::Verbatim(verbatim::between(&begin, input))) + } + + fn path_or_macro_or_struct( + input: ParseStream, + #[cfg(feature = "full")] allow_struct: AllowStruct, + ) -> Result<Expr> { + let expr_style = true; + let (qself, path) = path::parsing::qpath(input, expr_style)?; + rest_of_path_or_macro_or_struct( + qself, + path, + input, + #[cfg(feature = "full")] + allow_struct, + ) + } + + fn rest_of_path_or_macro_or_struct( + qself: Option<QSelf>, + path: Path, + input: ParseStream, + #[cfg(feature = "full")] allow_struct: AllowStruct, + ) -> Result<Expr> { + if qself.is_none() + && input.peek(Token![!]) + && !input.peek(Token![!=]) + && path.is_mod_style() + { + let bang_token: Token![!] = input.parse()?; + let (delimiter, tokens) = mac::parse_delimiter(input)?; + return Ok(Expr::Macro(ExprMacro { + attrs: Vec::new(), + mac: Macro { + path, + bang_token, + delimiter, + tokens, + }, + })); + } + + #[cfg(not(feature = "full"))] + let allow_struct = (true,); + if allow_struct.0 && input.peek(token::Brace) { + return expr_struct_helper(input, qself, path).map(Expr::Struct); + } + + Ok(Expr::Path(ExprPath { + attrs: Vec::new(), + qself, + path, + })) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprMacro { + fn parse(input: ParseStream) -> Result<Self> { + Ok(ExprMacro { + attrs: Vec::new(), + mac: input.parse()?, + }) + } + } + + fn paren_or_tuple(input: ParseStream) -> Result<Expr> { + let content; + let paren_token = parenthesized!(content in input); + if content.is_empty() { + return Ok(Expr::Tuple(ExprTuple { + attrs: Vec::new(), + paren_token, + elems: Punctuated::new(), + })); + } + + let first: Expr = content.parse()?; + if content.is_empty() { + return Ok(Expr::Paren(ExprParen { + attrs: Vec::new(), + paren_token, + expr: Box::new(first), + })); + } + + let mut elems = Punctuated::new(); + elems.push_value(first); + while !content.is_empty() { + let punct = content.parse()?; + elems.push_punct(punct); + if content.is_empty() { + break; + } + let value = content.parse()?; + elems.push_value(value); + } + Ok(Expr::Tuple(ExprTuple { + attrs: Vec::new(), + paren_token, + elems, + })) + } + + #[cfg(feature = "full")] + fn array_or_repeat(input: ParseStream) -> Result<Expr> { + let content; + let bracket_token = bracketed!(content in input); + if content.is_empty() { + return Ok(Expr::Array(ExprArray { + attrs: Vec::new(), + bracket_token, + elems: Punctuated::new(), + })); + } + + let first: Expr = content.parse()?; + if content.is_empty() || content.peek(Token![,]) { + let mut elems = Punctuated::new(); + elems.push_value(first); + while !content.is_empty() { + let punct = content.parse()?; + elems.push_punct(punct); + if content.is_empty() { + break; + } + let value = content.parse()?; + elems.push_value(value); + } + Ok(Expr::Array(ExprArray { + attrs: Vec::new(), + bracket_token, + elems, + })) + } else if content.peek(Token![;]) { + let semi_token: Token![;] = content.parse()?; + let len: Expr = content.parse()?; + Ok(Expr::Repeat(ExprRepeat { + attrs: Vec::new(), + bracket_token, + expr: Box::new(first), + semi_token, + len: Box::new(len), + })) + } else { + Err(content.error("expected `,` or `;`")) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprArray { + fn parse(input: ParseStream) -> Result<Self> { + let content; + let bracket_token = bracketed!(content in input); + let mut elems = Punctuated::new(); + + while !content.is_empty() { + let first: Expr = content.parse()?; + elems.push_value(first); + if content.is_empty() { + break; + } + let punct = content.parse()?; + elems.push_punct(punct); + } + + Ok(ExprArray { + attrs: Vec::new(), + bracket_token, + elems, + }) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprRepeat { + fn parse(input: ParseStream) -> Result<Self> { + let content; + Ok(ExprRepeat { + bracket_token: bracketed!(content in input), + attrs: Vec::new(), + expr: content.parse()?, + semi_token: content.parse()?, + len: content.parse()?, + }) + } + } + + #[cfg(feature = "full")] + fn continue_parsing_early(mut expr: &Expr) -> bool { + while let Expr::Group(group) = expr { + expr = &group.expr; + } + match expr { + Expr::If(_) + | Expr::While(_) + | Expr::ForLoop(_) + | Expr::Loop(_) + | Expr::Match(_) + | Expr::TryBlock(_) + | Expr::Unsafe(_) + | Expr::Const(_) + | Expr::Block(_) => false, + _ => true, + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprLit { + fn parse(input: ParseStream) -> Result<Self> { + Ok(ExprLit { + attrs: Vec::new(), + lit: input.parse()?, + }) + } + } + + fn expr_group( + input: ParseStream, + #[cfg(feature = "full")] allow_struct: AllowStruct, + ) -> Result<Expr> { + let group = crate::group::parse_group(input)?; + let mut inner: Expr = group.content.parse()?; + + match inner { + Expr::Path(mut expr) if expr.attrs.is_empty() => { + let grouped_len = expr.path.segments.len(); + Path::parse_rest(input, &mut expr.path, true)?; + match rest_of_path_or_macro_or_struct( + expr.qself, + expr.path, + input, + #[cfg(feature = "full")] + allow_struct, + )? { + Expr::Path(expr) if expr.path.segments.len() == grouped_len => { + inner = Expr::Path(expr); + } + extended => return Ok(extended), + } + } + _ => {} + } + + Ok(Expr::Group(ExprGroup { + attrs: Vec::new(), + group_token: group.token, + expr: Box::new(inner), + })) + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprParen { + fn parse(input: ParseStream) -> Result<Self> { + let content; + Ok(ExprParen { + attrs: Vec::new(), + paren_token: parenthesized!(content in input), + expr: content.parse()?, + }) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprLet { + fn parse(input: ParseStream) -> Result<Self> { + let allow_struct = AllowStruct(true); + expr_let(input, allow_struct) + } + } + + #[cfg(feature = "full")] + fn expr_let(input: ParseStream, allow_struct: AllowStruct) -> Result<ExprLet> { + Ok(ExprLet { + attrs: Vec::new(), + let_token: input.parse()?, + pat: Box::new(Pat::parse_multi_with_leading_vert(input)?), + eq_token: input.parse()?, + expr: Box::new({ + let lhs = unary_expr(input, allow_struct)?; + parse_expr(input, lhs, allow_struct, Precedence::Compare)? + }), + }) + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprIf { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + + let mut clauses = Vec::new(); + let mut expr; + loop { + let if_token: Token![if] = input.parse()?; + let cond = input.call(Expr::parse_without_eager_brace)?; + let then_branch: Block = input.parse()?; + + expr = ExprIf { + attrs: Vec::new(), + if_token, + cond: Box::new(cond), + then_branch, + else_branch: None, + }; + + if !input.peek(Token![else]) { + break; + } + + let else_token: Token![else] = input.parse()?; + let lookahead = input.lookahead1(); + if lookahead.peek(Token![if]) { + expr.else_branch = Some((else_token, Box::new(Expr::PLACEHOLDER))); + clauses.push(expr); + } else if lookahead.peek(token::Brace) { + expr.else_branch = Some(( + else_token, + Box::new(Expr::Block(ExprBlock { + attrs: Vec::new(), + label: None, + block: input.parse()?, + })), + )); + break; + } else { + return Err(lookahead.error()); + } + } + + while let Some(mut prev) = clauses.pop() { + *prev.else_branch.as_mut().unwrap().1 = Expr::If(expr); + expr = prev; + } + expr.attrs = attrs; + Ok(expr) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprInfer { + fn parse(input: ParseStream) -> Result<Self> { + Ok(ExprInfer { + attrs: input.call(Attribute::parse_outer)?, + underscore_token: input.parse()?, + }) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprForLoop { + fn parse(input: ParseStream) -> Result<Self> { + let mut attrs = input.call(Attribute::parse_outer)?; + let label: Option<Label> = input.parse()?; + let for_token: Token![for] = input.parse()?; + + let pat = Pat::parse_multi_with_leading_vert(input)?; + + let in_token: Token![in] = input.parse()?; + let expr: Expr = input.call(Expr::parse_without_eager_brace)?; + + let content; + let brace_token = braced!(content in input); + attr::parsing::parse_inner(&content, &mut attrs)?; + let stmts = content.call(Block::parse_within)?; + + Ok(ExprForLoop { + attrs, + label, + for_token, + pat: Box::new(pat), + in_token, + expr: Box::new(expr), + body: Block { brace_token, stmts }, + }) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprLoop { + fn parse(input: ParseStream) -> Result<Self> { + let mut attrs = input.call(Attribute::parse_outer)?; + let label: Option<Label> = input.parse()?; + let loop_token: Token![loop] = input.parse()?; + + let content; + let brace_token = braced!(content in input); + attr::parsing::parse_inner(&content, &mut attrs)?; + let stmts = content.call(Block::parse_within)?; + + Ok(ExprLoop { + attrs, + label, + loop_token, + body: Block { brace_token, stmts }, + }) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprMatch { + fn parse(input: ParseStream) -> Result<Self> { + let mut attrs = input.call(Attribute::parse_outer)?; + let match_token: Token![match] = input.parse()?; + let expr = Expr::parse_without_eager_brace(input)?; + + let content; + let brace_token = braced!(content in input); + attr::parsing::parse_inner(&content, &mut attrs)?; + + let arms = Arm::parse_multiple(&content)?; + + Ok(ExprMatch { + attrs, + match_token, + expr: Box::new(expr), + brace_token, + arms, + }) + } + } + + macro_rules! impl_by_parsing_expr { + ( + $( + $expr_type:ty, $variant:ident, $msg:expr, + )* + ) => { + $( + #[cfg(all(feature = "full", feature = "printing"))] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for $expr_type { + fn parse(input: ParseStream) -> Result<Self> { + let mut expr: Expr = input.parse()?; + loop { + match expr { + Expr::$variant(inner) => return Ok(inner), + Expr::Group(next) => expr = *next.expr, + _ => return Err(Error::new_spanned(expr, $msg)), + } + } + } + } + )* + }; + } + + impl_by_parsing_expr! { + ExprAssign, Assign, "expected assignment expression", + ExprAwait, Await, "expected await expression", + ExprBinary, Binary, "expected binary operation", + ExprCall, Call, "expected function call expression", + ExprCast, Cast, "expected cast expression", + ExprField, Field, "expected struct field access", + ExprIndex, Index, "expected indexing expression", + ExprMethodCall, MethodCall, "expected method call expression", + ExprRange, Range, "expected range expression", + ExprTry, Try, "expected try expression", + ExprTuple, Tuple, "expected tuple expression", + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprUnary { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = Vec::new(); + let allow_struct = AllowStruct(true); + expr_unary(input, attrs, allow_struct) + } + } + + #[cfg(feature = "full")] + fn expr_unary( + input: ParseStream, + attrs: Vec<Attribute>, + allow_struct: AllowStruct, + ) -> Result<ExprUnary> { + Ok(ExprUnary { + attrs, + op: input.parse()?, + expr: Box::new(unary_expr(input, allow_struct)?), + }) + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprClosure { + fn parse(input: ParseStream) -> Result<Self> { + let allow_struct = AllowStruct(true); + expr_closure(input, allow_struct) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprRawAddr { + fn parse(input: ParseStream) -> Result<Self> { + let allow_struct = AllowStruct(true); + Ok(ExprRawAddr { + attrs: Vec::new(), + and_token: input.parse()?, + raw: input.parse()?, + mutability: input.parse()?, + expr: Box::new(unary_expr(input, allow_struct)?), + }) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprReference { + fn parse(input: ParseStream) -> Result<Self> { + let allow_struct = AllowStruct(true); + Ok(ExprReference { + attrs: Vec::new(), + and_token: input.parse()?, + mutability: input.parse()?, + expr: Box::new(unary_expr(input, allow_struct)?), + }) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprBreak { + fn parse(input: ParseStream) -> Result<Self> { + let allow_struct = AllowStruct(true); + expr_break(input, allow_struct) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprReturn { + fn parse(input: ParseStream) -> Result<Self> { + Ok(ExprReturn { + attrs: Vec::new(), + return_token: input.parse()?, + expr: { + if Expr::peek(input) { + Some(input.parse()?) + } else { + None + } + }, + }) + } + } + + #[cfg(feature = "full")] + fn expr_become(input: ParseStream) -> Result<Expr> { + let begin = input.fork(); + input.parse::<Token![become]>()?; + input.parse::<Expr>()?; + Ok(Expr::Verbatim(verbatim::between(&begin, input))) + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprTryBlock { + fn parse(input: ParseStream) -> Result<Self> { + Ok(ExprTryBlock { + attrs: Vec::new(), + try_token: input.parse()?, + block: input.parse()?, + }) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprYield { + fn parse(input: ParseStream) -> Result<Self> { + Ok(ExprYield { + attrs: Vec::new(), + yield_token: input.parse()?, + expr: { + if Expr::peek(input) { + Some(input.parse()?) + } else { + None + } + }, + }) + } + } + + #[cfg(feature = "full")] + fn expr_closure(input: ParseStream, allow_struct: AllowStruct) -> Result<ExprClosure> { + let lifetimes: Option<BoundLifetimes> = input.parse()?; + let constness: Option<Token![const]> = input.parse()?; + let movability: Option<Token![static]> = input.parse()?; + let asyncness: Option<Token![async]> = input.parse()?; + let capture: Option<Token![move]> = input.parse()?; + let or1_token: Token![|] = input.parse()?; + + let mut inputs = Punctuated::new(); + loop { + if input.peek(Token![|]) { + break; + } + let value = closure_arg(input)?; + inputs.push_value(value); + if input.peek(Token![|]) { + break; + } + let punct: Token![,] = input.parse()?; + inputs.push_punct(punct); + } + + let or2_token: Token![|] = input.parse()?; + + let (output, body) = if input.peek(Token![->]) { + let arrow_token: Token![->] = input.parse()?; + let ty: Type = input.parse()?; + let body: Block = input.parse()?; + let output = ReturnType::Type(arrow_token, Box::new(ty)); + let block = Expr::Block(ExprBlock { + attrs: Vec::new(), + label: None, + block: body, + }); + (output, block) + } else { + let body = ambiguous_expr(input, allow_struct)?; + (ReturnType::Default, body) + }; + + Ok(ExprClosure { + attrs: Vec::new(), + lifetimes, + constness, + movability, + asyncness, + capture, + or1_token, + inputs, + or2_token, + output, + body: Box::new(body), + }) + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprAsync { + fn parse(input: ParseStream) -> Result<Self> { + Ok(ExprAsync { + attrs: Vec::new(), + async_token: input.parse()?, + capture: input.parse()?, + block: input.parse()?, + }) + } + } + + #[cfg(feature = "full")] + fn closure_arg(input: ParseStream) -> Result<Pat> { + let attrs = input.call(Attribute::parse_outer)?; + let mut pat = Pat::parse_single(input)?; + + if input.peek(Token![:]) { + Ok(Pat::Type(PatType { + attrs, + pat: Box::new(pat), + colon_token: input.parse()?, + ty: input.parse()?, + })) + } else { + match &mut pat { + Pat::Const(pat) => pat.attrs = attrs, + Pat::Ident(pat) => pat.attrs = attrs, + Pat::Lit(pat) => pat.attrs = attrs, + Pat::Macro(pat) => pat.attrs = attrs, + Pat::Or(pat) => pat.attrs = attrs, + Pat::Paren(pat) => pat.attrs = attrs, + Pat::Path(pat) => pat.attrs = attrs, + Pat::Range(pat) => pat.attrs = attrs, + Pat::Reference(pat) => pat.attrs = attrs, + Pat::Rest(pat) => pat.attrs = attrs, + Pat::Slice(pat) => pat.attrs = attrs, + Pat::Struct(pat) => pat.attrs = attrs, + Pat::Tuple(pat) => pat.attrs = attrs, + Pat::TupleStruct(pat) => pat.attrs = attrs, + Pat::Type(_) => unreachable!(), + Pat::Verbatim(_) => {} + Pat::Wild(pat) => pat.attrs = attrs, + } + Ok(pat) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprWhile { + fn parse(input: ParseStream) -> Result<Self> { + let mut attrs = input.call(Attribute::parse_outer)?; + let label: Option<Label> = input.parse()?; + let while_token: Token![while] = input.parse()?; + let cond = Expr::parse_without_eager_brace(input)?; + + let content; + let brace_token = braced!(content in input); + attr::parsing::parse_inner(&content, &mut attrs)?; + let stmts = content.call(Block::parse_within)?; + + Ok(ExprWhile { + attrs, + label, + while_token, + cond: Box::new(cond), + body: Block { brace_token, stmts }, + }) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprConst { + fn parse(input: ParseStream) -> Result<Self> { + let const_token: Token![const] = input.parse()?; + + let content; + let brace_token = braced!(content in input); + let inner_attrs = content.call(Attribute::parse_inner)?; + let stmts = content.call(Block::parse_within)?; + + Ok(ExprConst { + attrs: inner_attrs, + const_token, + block: Block { brace_token, stmts }, + }) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Label { + fn parse(input: ParseStream) -> Result<Self> { + Ok(Label { + name: input.parse()?, + colon_token: input.parse()?, + }) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Option<Label> { + fn parse(input: ParseStream) -> Result<Self> { + if input.peek(Lifetime) { + input.parse().map(Some) + } else { + Ok(None) + } + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprContinue { + fn parse(input: ParseStream) -> Result<Self> { + Ok(ExprContinue { + attrs: Vec::new(), + continue_token: input.parse()?, + label: input.parse()?, + }) + } + } + + #[cfg(feature = "full")] + fn expr_break(input: ParseStream, allow_struct: AllowStruct) -> Result<ExprBreak> { + let break_token: Token![break] = input.parse()?; + + let ahead = input.fork(); + let label: Option<Lifetime> = ahead.parse()?; + if label.is_some() && ahead.peek(Token![:]) { + // Not allowed: `break 'label: loop {...}` + // Parentheses are required. `break ('label: loop {...})` + let _: Expr = input.parse()?; + let start_span = label.unwrap().apostrophe; + let end_span = input.cursor().prev_span(); + return Err(crate::error::new2( + start_span, + end_span, + "parentheses required", + )); + } + + input.advance_to(&ahead); + let expr = if Expr::peek(input) && (allow_struct.0 || !input.peek(token::Brace)) { + Some(input.parse()?) + } else { + None + }; + + Ok(ExprBreak { + attrs: Vec::new(), + break_token, + label, + expr, + }) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for FieldValue { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let member: Member = input.parse()?; + let (colon_token, value) = if input.peek(Token![:]) || !member.is_named() { + let colon_token: Token![:] = input.parse()?; + let value: Expr = input.parse()?; + (Some(colon_token), value) + } else if let Member::Named(ident) = &member { + let value = Expr::Path(ExprPath { + attrs: Vec::new(), + qself: None, + path: Path::from(ident.clone()), + }); + (None, value) + } else { + unreachable!() + }; + + Ok(FieldValue { + attrs, + member, + colon_token, + expr: value, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprStruct { + fn parse(input: ParseStream) -> Result<Self> { + let expr_style = true; + let (qself, path) = path::parsing::qpath(input, expr_style)?; + expr_struct_helper(input, qself, path) + } + } + + fn expr_struct_helper( + input: ParseStream, + qself: Option<QSelf>, + path: Path, + ) -> Result<ExprStruct> { + let content; + let brace_token = braced!(content in input); + + let mut fields = Punctuated::new(); + while !content.is_empty() { + if content.peek(Token![..]) { + return Ok(ExprStruct { + attrs: Vec::new(), + qself, + path, + brace_token, + fields, + dot2_token: Some(content.parse()?), + rest: if content.is_empty() { + None + } else { + Some(Box::new(content.parse()?)) + }, + }); + } + + fields.push(content.parse()?); + if content.is_empty() { + break; + } + let punct: Token![,] = content.parse()?; + fields.push_punct(punct); + } + + Ok(ExprStruct { + attrs: Vec::new(), + qself, + path, + brace_token, + fields, + dot2_token: None, + rest: None, + }) + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprUnsafe { + fn parse(input: ParseStream) -> Result<Self> { + let unsafe_token: Token![unsafe] = input.parse()?; + + let content; + let brace_token = braced!(content in input); + let inner_attrs = content.call(Attribute::parse_inner)?; + let stmts = content.call(Block::parse_within)?; + + Ok(ExprUnsafe { + attrs: inner_attrs, + unsafe_token, + block: Block { brace_token, stmts }, + }) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprBlock { + fn parse(input: ParseStream) -> Result<Self> { + let mut attrs = input.call(Attribute::parse_outer)?; + let label: Option<Label> = input.parse()?; + + let content; + let brace_token = braced!(content in input); + attr::parsing::parse_inner(&content, &mut attrs)?; + let stmts = content.call(Block::parse_within)?; + + Ok(ExprBlock { + attrs, + label, + block: Block { brace_token, stmts }, + }) + } + } + + #[cfg(feature = "full")] + fn expr_range(input: ParseStream, allow_struct: AllowStruct) -> Result<ExprRange> { + let limits: RangeLimits = input.parse()?; + let end = parse_range_end(input, &limits, allow_struct)?; + Ok(ExprRange { + attrs: Vec::new(), + start: None, + limits, + end, + }) + } + + #[cfg(feature = "full")] + fn parse_range_end( + input: ParseStream, + limits: &RangeLimits, + allow_struct: AllowStruct, + ) -> Result<Option<Box<Expr>>> { + if matches!(limits, RangeLimits::HalfOpen(_)) + && (input.is_empty() + || input.peek(Token![,]) + || input.peek(Token![;]) + || input.peek(Token![.]) && !input.peek(Token![..]) + || input.peek(Token![?]) + || input.peek(Token![=>]) + || !allow_struct.0 && input.peek(token::Brace) + || input.peek(Token![=]) + || input.peek(Token![+]) + || input.peek(Token![/]) + || input.peek(Token![%]) + || input.peek(Token![^]) + || input.peek(Token![>]) + || input.peek(Token![<=]) + || input.peek(Token![!=]) + || input.peek(Token![-=]) + || input.peek(Token![*=]) + || input.peek(Token![&=]) + || input.peek(Token![|=]) + || input.peek(Token![<<=]) + || input.peek(Token![as])) + { + Ok(None) + } else { + let end = parse_binop_rhs(input, allow_struct, Precedence::Range)?; + Ok(Some(end)) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for RangeLimits { + fn parse(input: ParseStream) -> Result<Self> { + let lookahead = input.lookahead1(); + let dot_dot = lookahead.peek(Token![..]); + let dot_dot_eq = dot_dot && lookahead.peek(Token![..=]); + let dot_dot_dot = dot_dot && input.peek(Token![...]); + if dot_dot_eq { + input.parse().map(RangeLimits::Closed) + } else if dot_dot && !dot_dot_dot { + input.parse().map(RangeLimits::HalfOpen) + } else { + Err(lookahead.error()) + } + } + } + + #[cfg(feature = "full")] + impl RangeLimits { + pub(crate) fn parse_obsolete(input: ParseStream) -> Result<Self> { + let lookahead = input.lookahead1(); + let dot_dot = lookahead.peek(Token![..]); + let dot_dot_eq = dot_dot && lookahead.peek(Token![..=]); + let dot_dot_dot = dot_dot && input.peek(Token![...]); + if dot_dot_eq { + input.parse().map(RangeLimits::Closed) + } else if dot_dot_dot { + let dot3: Token![...] = input.parse()?; + Ok(RangeLimits::Closed(Token![..=](dot3.spans))) + } else if dot_dot { + input.parse().map(RangeLimits::HalfOpen) + } else { + Err(lookahead.error()) + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ExprPath { + fn parse(input: ParseStream) -> Result<Self> { + #[cfg(not(feature = "full"))] + let attrs = Vec::new(); + #[cfg(feature = "full")] + let attrs = input.call(Attribute::parse_outer)?; + + let expr_style = true; + let (qself, path) = path::parsing::qpath(input, expr_style)?; + + Ok(ExprPath { attrs, qself, path }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Member { + fn parse(input: ParseStream) -> Result<Self> { + if input.peek(Ident) { + input.parse().map(Member::Named) + } else if input.peek(LitInt) { + input.parse().map(Member::Unnamed) + } else { + Err(input.error("expected identifier or integer")) + } + } + } + + #[cfg(feature = "full")] + impl Arm { + pub(crate) fn parse_multiple(input: ParseStream) -> Result<Vec<Self>> { + let mut arms = Vec::new(); + while !input.is_empty() { + arms.push(input.call(Arm::parse)?); + } + Ok(arms) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Arm { + fn parse(input: ParseStream) -> Result<Arm> { + let requires_comma; + Ok(Arm { + attrs: input.call(Attribute::parse_outer)?, + pat: Pat::parse_multi_with_leading_vert(input)?, + guard: { + if input.peek(Token![if]) { + let if_token: Token![if] = input.parse()?; + let guard: Expr = input.parse()?; + Some((if_token, Box::new(guard))) + } else { + None + } + }, + fat_arrow_token: input.parse()?, + body: { + let body = Expr::parse_with_earlier_boundary_rule(input)?; + requires_comma = classify::requires_comma_to_be_match_arm(&body); + Box::new(body) + }, + comma: { + if requires_comma && !input.is_empty() { + Some(input.parse()?) + } else { + input.parse()? + } + }, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Index { + fn parse(input: ParseStream) -> Result<Self> { + let lit: LitInt = input.parse()?; + if lit.suffix().is_empty() { + Ok(Index { + index: lit + .base10_digits() + .parse() + .map_err(|err| Error::new(lit.span(), err))?, + span: lit.span(), + }) + } else { + Err(Error::new(lit.span(), "expected unsuffixed integer")) + } + } + } + + fn multi_index(e: &mut Expr, dot_token: &mut Token![.], float: LitFloat) -> Result<bool> { + let float_token = float.token(); + let float_span = float_token.span(); + let mut float_repr = float_token.to_string(); + let trailing_dot = float_repr.ends_with('.'); + if trailing_dot { + float_repr.truncate(float_repr.len() - 1); + } + + let mut offset = 0; + for part in float_repr.split('.') { + let mut index: Index = + crate::parse_str(part).map_err(|err| Error::new(float_span, err))?; + let part_end = offset + part.len(); + index.span = float_token.subspan(offset..part_end).unwrap_or(float_span); + + let base = mem::replace(e, Expr::PLACEHOLDER); + *e = Expr::Field(ExprField { + attrs: Vec::new(), + base: Box::new(base), + dot_token: Token![.](dot_token.span), + member: Member::Unnamed(index), + }); + + let dot_span = float_token + .subspan(part_end..part_end + 1) + .unwrap_or(float_span); + *dot_token = Token![.](dot_span); + offset = part_end + 1; + } + + Ok(!trailing_dot) + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for PointerMutability { + fn parse(input: ParseStream) -> Result<Self> { + let lookahead = input.lookahead1(); + if lookahead.peek(Token![const]) { + Ok(PointerMutability::Const(input.parse()?)) + } else if lookahead.peek(Token![mut]) { + Ok(PointerMutability::Mut(input.parse()?)) + } else { + Err(lookahead.error()) + } + } + } + + fn check_cast(input: ParseStream) -> Result<()> { + let kind = if input.peek(Token![.]) && !input.peek(Token![..]) { + if input.peek2(Token![await]) { + "`.await`" + } else if input.peek2(Ident) && (input.peek3(token::Paren) || input.peek3(Token![::])) { + "a method call" + } else { + "a field access" + } + } else if input.peek(Token![?]) { + "`?`" + } else if input.peek(token::Bracket) { + "indexing" + } else if input.peek(token::Paren) { + "a function call" + } else { + return Ok(()); + }; + let msg = format!("casts cannot be followed by {}", kind); + Err(input.error(msg)) + } +} + +#[cfg(feature = "printing")] +pub(crate) mod printing { + use crate::attr::Attribute; + #[cfg(feature = "full")] + use crate::attr::FilterAttrs; + #[cfg(feature = "full")] + use crate::classify; + #[cfg(feature = "full")] + use crate::expr::{ + Arm, ExprArray, ExprAssign, ExprAsync, ExprAwait, ExprBlock, ExprBreak, ExprClosure, + ExprConst, ExprContinue, ExprForLoop, ExprIf, ExprInfer, ExprLet, ExprLoop, ExprMatch, + ExprRange, ExprRawAddr, ExprRepeat, ExprReturn, ExprTry, ExprTryBlock, ExprUnsafe, + ExprWhile, ExprYield, Label, PointerMutability, RangeLimits, + }; + use crate::expr::{ + Expr, ExprBinary, ExprCall, ExprCast, ExprField, ExprGroup, ExprIndex, ExprLit, ExprMacro, + ExprMethodCall, ExprParen, ExprPath, ExprReference, ExprStruct, ExprTuple, ExprUnary, + FieldValue, Index, Member, + }; + use crate::fixup::FixupContext; + use crate::op::BinOp; + use crate::path; + use crate::path::printing::PathStyle; + use crate::precedence::Precedence; + use crate::token; + #[cfg(feature = "full")] + use crate::ty::ReturnType; + use proc_macro2::{Literal, Span, TokenStream}; + use quote::{ToTokens, TokenStreamExt as _}; + + #[cfg(feature = "full")] + pub(crate) fn outer_attrs_to_tokens(attrs: &[Attribute], tokens: &mut TokenStream) { + tokens.append_all(attrs.outer()); + } + + #[cfg(feature = "full")] + fn inner_attrs_to_tokens(attrs: &[Attribute], tokens: &mut TokenStream) { + tokens.append_all(attrs.inner()); + } + + #[cfg(not(feature = "full"))] + pub(crate) fn outer_attrs_to_tokens(_attrs: &[Attribute], _tokens: &mut TokenStream) {} + + pub(crate) fn print_subexpression( + expr: &Expr, + needs_group: bool, + tokens: &mut TokenStream, + mut fixup: FixupContext, + ) { + if needs_group { + // If we are surrounding the whole cond in parentheses, such as: + // + // if (return Struct {}) {} + // + // then there is no need for parenthesizing the individual struct + // expressions within. On the other hand if the whole cond is not + // parenthesized, then print_expr must parenthesize exterior struct + // literals. + // + // if x == (Struct {}) {} + // + fixup = FixupContext::NONE; + } + + let do_print_expr = |tokens: &mut TokenStream| print_expr(expr, tokens, fixup); + + if needs_group { + token::Paren::default().surround(tokens, do_print_expr); + } else { + do_print_expr(tokens); + } + } + + pub(crate) fn print_expr(expr: &Expr, tokens: &mut TokenStream, mut fixup: FixupContext) { + #[cfg(feature = "full")] + let needs_group = fixup.parenthesize(expr); + #[cfg(not(feature = "full"))] + let needs_group = false; + + if needs_group { + fixup = FixupContext::NONE; + } + + let do_print_expr = |tokens: &mut TokenStream| match expr { + #[cfg(feature = "full")] + Expr::Array(e) => e.to_tokens(tokens), + #[cfg(feature = "full")] + Expr::Assign(e) => print_expr_assign(e, tokens, fixup), + #[cfg(feature = "full")] + Expr::Async(e) => e.to_tokens(tokens), + #[cfg(feature = "full")] + Expr::Await(e) => print_expr_await(e, tokens, fixup), + Expr::Binary(e) => print_expr_binary(e, tokens, fixup), + #[cfg(feature = "full")] + Expr::Block(e) => e.to_tokens(tokens), + #[cfg(feature = "full")] + Expr::Break(e) => print_expr_break(e, tokens, fixup), + Expr::Call(e) => print_expr_call(e, tokens, fixup), + Expr::Cast(e) => print_expr_cast(e, tokens, fixup), + #[cfg(feature = "full")] + Expr::Closure(e) => print_expr_closure(e, tokens, fixup), + #[cfg(feature = "full")] + Expr::Const(e) => e.to_tokens(tokens), + #[cfg(feature = "full")] + Expr::Continue(e) => e.to_tokens(tokens), + Expr::Field(e) => print_expr_field(e, tokens, fixup), + #[cfg(feature = "full")] + Expr::ForLoop(e) => e.to_tokens(tokens), + Expr::Group(e) => e.to_tokens(tokens), + #[cfg(feature = "full")] + Expr::If(e) => e.to_tokens(tokens), + Expr::Index(e) => print_expr_index(e, tokens, fixup), + #[cfg(feature = "full")] + Expr::Infer(e) => e.to_tokens(tokens), + #[cfg(feature = "full")] + Expr::Let(e) => print_expr_let(e, tokens, fixup), + Expr::Lit(e) => e.to_tokens(tokens), + #[cfg(feature = "full")] + Expr::Loop(e) => e.to_tokens(tokens), + Expr::Macro(e) => e.to_tokens(tokens), + #[cfg(feature = "full")] + Expr::Match(e) => e.to_tokens(tokens), + Expr::MethodCall(e) => print_expr_method_call(e, tokens, fixup), + Expr::Paren(e) => e.to_tokens(tokens), + Expr::Path(e) => e.to_tokens(tokens), + #[cfg(feature = "full")] + Expr::Range(e) => print_expr_range(e, tokens, fixup), + #[cfg(feature = "full")] + Expr::RawAddr(e) => print_expr_raw_addr(e, tokens, fixup), + Expr::Reference(e) => print_expr_reference(e, tokens, fixup), + #[cfg(feature = "full")] + Expr::Repeat(e) => e.to_tokens(tokens), + #[cfg(feature = "full")] + Expr::Return(e) => print_expr_return(e, tokens, fixup), + Expr::Struct(e) => e.to_tokens(tokens), + #[cfg(feature = "full")] + Expr::Try(e) => print_expr_try(e, tokens, fixup), + #[cfg(feature = "full")] + Expr::TryBlock(e) => e.to_tokens(tokens), + Expr::Tuple(e) => e.to_tokens(tokens), + Expr::Unary(e) => print_expr_unary(e, tokens, fixup), + #[cfg(feature = "full")] + Expr::Unsafe(e) => e.to_tokens(tokens), + Expr::Verbatim(e) => e.to_tokens(tokens), + #[cfg(feature = "full")] + Expr::While(e) => e.to_tokens(tokens), + #[cfg(feature = "full")] + Expr::Yield(e) => print_expr_yield(e, tokens, fixup), + + #[cfg(not(feature = "full"))] + _ => unreachable!(), + }; + + if needs_group { + token::Paren::default().surround(tokens, do_print_expr); + } else { + do_print_expr(tokens); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprArray { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.bracket_token.surround(tokens, |tokens| { + self.elems.to_tokens(tokens); + }); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprAssign { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_assign(self, tokens, FixupContext::NONE); + } + } + + #[cfg(feature = "full")] + fn print_expr_assign(e: &ExprAssign, tokens: &mut TokenStream, mut fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + + let needs_group = !e.attrs.is_empty(); + if needs_group { + fixup = FixupContext::NONE; + } + + let do_print_expr = |tokens: &mut TokenStream| { + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( + &e.left, + false, + false, + Precedence::Assign, + ); + print_subexpression(&e.left, left_prec <= Precedence::Range, tokens, left_fixup); + e.eq_token.to_tokens(tokens); + print_expr( + &e.right, + tokens, + fixup.rightmost_subexpression_fixup(false, false, Precedence::Assign), + ); + }; + + if needs_group { + token::Paren::default().surround(tokens, do_print_expr); + } else { + do_print_expr(tokens); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprAsync { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.async_token.to_tokens(tokens); + self.capture.to_tokens(tokens); + self.block.to_tokens(tokens); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprAwait { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_await(self, tokens, FixupContext::NONE); + } + } + + #[cfg(feature = "full")] + fn print_expr_await(e: &ExprAwait, tokens: &mut TokenStream, fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&e.base); + print_subexpression( + &e.base, + left_prec < Precedence::Unambiguous, + tokens, + left_fixup, + ); + e.dot_token.to_tokens(tokens); + e.await_token.to_tokens(tokens); + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprBinary { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_binary(self, tokens, FixupContext::NONE); + } + } + + fn print_expr_binary(e: &ExprBinary, tokens: &mut TokenStream, mut fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + + let needs_group = !e.attrs.is_empty(); + if needs_group { + fixup = FixupContext::NONE; + } + + let do_print_expr = |tokens: &mut TokenStream| { + let binop_prec = Precedence::of_binop(&e.op); + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( + &e.left, + #[cfg(feature = "full")] + match &e.op { + BinOp::Sub(_) + | BinOp::Mul(_) + | BinOp::And(_) + | BinOp::Or(_) + | BinOp::BitAnd(_) + | BinOp::BitOr(_) + | BinOp::Shl(_) + | BinOp::Lt(_) => true, + _ => false, + }, + match &e.op { + BinOp::Shl(_) | BinOp::Lt(_) => true, + _ => false, + }, + #[cfg(feature = "full")] + binop_prec, + ); + let left_needs_group = match binop_prec { + Precedence::Assign => left_prec <= Precedence::Range, + Precedence::Compare => left_prec <= binop_prec, + _ => left_prec < binop_prec, + }; + + let right_fixup = fixup.rightmost_subexpression_fixup( + #[cfg(feature = "full")] + false, + #[cfg(feature = "full")] + false, + #[cfg(feature = "full")] + binop_prec, + ); + let right_needs_group = binop_prec != Precedence::Assign + && right_fixup.rightmost_subexpression_precedence(&e.right) <= binop_prec; + + print_subexpression(&e.left, left_needs_group, tokens, left_fixup); + e.op.to_tokens(tokens); + print_subexpression(&e.right, right_needs_group, tokens, right_fixup); + }; + + if needs_group { + token::Paren::default().surround(tokens, do_print_expr); + } else { + do_print_expr(tokens); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprBlock { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.label.to_tokens(tokens); + self.block.brace_token.surround(tokens, |tokens| { + inner_attrs_to_tokens(&self.attrs, tokens); + tokens.append_all(&self.block.stmts); + }); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprBreak { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_break(self, tokens, FixupContext::NONE); + } + } + + #[cfg(feature = "full")] + fn print_expr_break(e: &ExprBreak, tokens: &mut TokenStream, fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + e.break_token.to_tokens(tokens); + e.label.to_tokens(tokens); + if let Some(value) = &e.expr { + print_subexpression( + value, + // Parenthesize `break 'inner: loop { break 'inner 1 } + 1` + // ^---------------------------------^ + e.label.is_none() && classify::expr_leading_label(value), + tokens, + fixup.rightmost_subexpression_fixup(true, true, Precedence::Jump), + ); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprCall { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_call(self, tokens, FixupContext::NONE); + } + } + + fn print_expr_call(e: &ExprCall, tokens: &mut TokenStream, fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( + &e.func, + #[cfg(feature = "full")] + true, + false, + #[cfg(feature = "full")] + Precedence::Unambiguous, + ); + let needs_group = if let Expr::Field(func) = &*e.func { + func.member.is_named() + } else { + left_prec < Precedence::Unambiguous + }; + print_subexpression(&e.func, needs_group, tokens, left_fixup); + + e.paren_token.surround(tokens, |tokens| { + e.args.to_tokens(tokens); + }); + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprCast { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_cast(self, tokens, FixupContext::NONE); + } + } + + fn print_expr_cast(e: &ExprCast, tokens: &mut TokenStream, mut fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + + let needs_group = !e.attrs.is_empty(); + if needs_group { + fixup = FixupContext::NONE; + } + + let do_print_expr = |tokens: &mut TokenStream| { + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( + &e.expr, + #[cfg(feature = "full")] + false, + false, + #[cfg(feature = "full")] + Precedence::Cast, + ); + print_subexpression(&e.expr, left_prec < Precedence::Cast, tokens, left_fixup); + e.as_token.to_tokens(tokens); + e.ty.to_tokens(tokens); + }; + + if needs_group { + token::Paren::default().surround(tokens, do_print_expr); + } else { + do_print_expr(tokens); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprClosure { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_closure(self, tokens, FixupContext::NONE); + } + } + + #[cfg(feature = "full")] + fn print_expr_closure(e: &ExprClosure, tokens: &mut TokenStream, fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + e.lifetimes.to_tokens(tokens); + e.constness.to_tokens(tokens); + e.movability.to_tokens(tokens); + e.asyncness.to_tokens(tokens); + e.capture.to_tokens(tokens); + e.or1_token.to_tokens(tokens); + e.inputs.to_tokens(tokens); + e.or2_token.to_tokens(tokens); + e.output.to_tokens(tokens); + if matches!(e.output, ReturnType::Default) + || matches!(&*e.body, Expr::Block(body) if body.attrs.is_empty() && body.label.is_none()) + { + print_expr( + &e.body, + tokens, + fixup.rightmost_subexpression_fixup(false, false, Precedence::Jump), + ); + } else { + token::Brace::default().surround(tokens, |tokens| { + print_expr(&e.body, tokens, FixupContext::new_stmt()); + }); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprConst { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.const_token.to_tokens(tokens); + self.block.brace_token.surround(tokens, |tokens| { + inner_attrs_to_tokens(&self.attrs, tokens); + tokens.append_all(&self.block.stmts); + }); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprContinue { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.continue_token.to_tokens(tokens); + self.label.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprField { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_field(self, tokens, FixupContext::NONE); + } + } + + fn print_expr_field(e: &ExprField, tokens: &mut TokenStream, fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&e.base); + print_subexpression( + &e.base, + left_prec < Precedence::Unambiguous, + tokens, + left_fixup, + ); + e.dot_token.to_tokens(tokens); + e.member.to_tokens(tokens); + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprForLoop { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.label.to_tokens(tokens); + self.for_token.to_tokens(tokens); + self.pat.to_tokens(tokens); + self.in_token.to_tokens(tokens); + print_expr(&self.expr, tokens, FixupContext::new_condition()); + self.body.brace_token.surround(tokens, |tokens| { + inner_attrs_to_tokens(&self.attrs, tokens); + tokens.append_all(&self.body.stmts); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprGroup { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.group_token.surround(tokens, |tokens| { + self.expr.to_tokens(tokens); + }); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprIf { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + + let mut expr = self; + loop { + expr.if_token.to_tokens(tokens); + print_expr(&expr.cond, tokens, FixupContext::new_condition()); + expr.then_branch.to_tokens(tokens); + + let (else_token, else_) = match &expr.else_branch { + Some(else_branch) => else_branch, + None => break, + }; + + else_token.to_tokens(tokens); + match &**else_ { + Expr::If(next) => { + expr = next; + } + Expr::Block(last) => { + last.to_tokens(tokens); + break; + } + // If this is not one of the valid expressions to exist in + // an else clause, wrap it in a block. + other => { + token::Brace::default().surround(tokens, |tokens| { + print_expr(other, tokens, FixupContext::new_stmt()); + }); + break; + } + } + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprIndex { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_index(self, tokens, FixupContext::NONE); + } + } + + fn print_expr_index(e: &ExprIndex, tokens: &mut TokenStream, fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( + &e.expr, + #[cfg(feature = "full")] + true, + false, + #[cfg(feature = "full")] + Precedence::Unambiguous, + ); + print_subexpression( + &e.expr, + left_prec < Precedence::Unambiguous, + tokens, + left_fixup, + ); + e.bracket_token.surround(tokens, |tokens| { + e.index.to_tokens(tokens); + }); + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprInfer { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.underscore_token.to_tokens(tokens); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprLet { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_let(self, tokens, FixupContext::NONE); + } + } + + #[cfg(feature = "full")] + fn print_expr_let(e: &ExprLet, tokens: &mut TokenStream, fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + e.let_token.to_tokens(tokens); + e.pat.to_tokens(tokens); + e.eq_token.to_tokens(tokens); + let (right_prec, right_fixup) = fixup.rightmost_subexpression(&e.expr, Precedence::Let); + print_subexpression(&e.expr, right_prec < Precedence::Let, tokens, right_fixup); + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprLit { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.lit.to_tokens(tokens); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprLoop { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.label.to_tokens(tokens); + self.loop_token.to_tokens(tokens); + self.body.brace_token.surround(tokens, |tokens| { + inner_attrs_to_tokens(&self.attrs, tokens); + tokens.append_all(&self.body.stmts); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprMacro { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.mac.to_tokens(tokens); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprMatch { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.match_token.to_tokens(tokens); + print_expr(&self.expr, tokens, FixupContext::new_condition()); + self.brace_token.surround(tokens, |tokens| { + inner_attrs_to_tokens(&self.attrs, tokens); + for (i, arm) in self.arms.iter().enumerate() { + arm.to_tokens(tokens); + // Ensure that we have a comma after a non-block arm, except + // for the last one. + let is_last = i == self.arms.len() - 1; + if !is_last + && classify::requires_comma_to_be_match_arm(&arm.body) + && arm.comma.is_none() + { + <Token![,]>::default().to_tokens(tokens); + } + } + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprMethodCall { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_method_call(self, tokens, FixupContext::NONE); + } + } + + fn print_expr_method_call(e: &ExprMethodCall, tokens: &mut TokenStream, fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&e.receiver); + print_subexpression( + &e.receiver, + left_prec < Precedence::Unambiguous, + tokens, + left_fixup, + ); + e.dot_token.to_tokens(tokens); + e.method.to_tokens(tokens); + if let Some(turbofish) = &e.turbofish { + path::printing::print_angle_bracketed_generic_arguments( + tokens, + turbofish, + PathStyle::Expr, + ); + } + e.paren_token.surround(tokens, |tokens| { + e.args.to_tokens(tokens); + }); + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprParen { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.paren_token.surround(tokens, |tokens| { + self.expr.to_tokens(tokens); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprPath { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + path::printing::print_qpath(tokens, &self.qself, &self.path, PathStyle::Expr); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprRange { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_range(self, tokens, FixupContext::NONE); + } + } + + #[cfg(feature = "full")] + fn print_expr_range(e: &ExprRange, tokens: &mut TokenStream, mut fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + + let needs_group = !e.attrs.is_empty(); + if needs_group { + fixup = FixupContext::NONE; + } + + let do_print_expr = |tokens: &mut TokenStream| { + if let Some(start) = &e.start { + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( + start, + true, + false, + Precedence::Range, + ); + print_subexpression(start, left_prec <= Precedence::Range, tokens, left_fixup); + } + e.limits.to_tokens(tokens); + if let Some(end) = &e.end { + let right_fixup = + fixup.rightmost_subexpression_fixup(false, true, Precedence::Range); + let right_prec = right_fixup.rightmost_subexpression_precedence(end); + print_subexpression(end, right_prec <= Precedence::Range, tokens, right_fixup); + } + }; + + if needs_group { + token::Paren::default().surround(tokens, do_print_expr); + } else { + do_print_expr(tokens); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprRawAddr { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_raw_addr(self, tokens, FixupContext::NONE); + } + } + + #[cfg(feature = "full")] + fn print_expr_raw_addr(e: &ExprRawAddr, tokens: &mut TokenStream, fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + e.and_token.to_tokens(tokens); + e.raw.to_tokens(tokens); + e.mutability.to_tokens(tokens); + let (right_prec, right_fixup) = fixup.rightmost_subexpression(&e.expr, Precedence::Prefix); + print_subexpression( + &e.expr, + right_prec < Precedence::Prefix, + tokens, + right_fixup, + ); + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprReference { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_reference(self, tokens, FixupContext::NONE); + } + } + + fn print_expr_reference(e: &ExprReference, tokens: &mut TokenStream, fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + e.and_token.to_tokens(tokens); + e.mutability.to_tokens(tokens); + let (right_prec, right_fixup) = fixup.rightmost_subexpression( + &e.expr, + #[cfg(feature = "full")] + Precedence::Prefix, + ); + print_subexpression( + &e.expr, + right_prec < Precedence::Prefix, + tokens, + right_fixup, + ); + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprRepeat { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.bracket_token.surround(tokens, |tokens| { + self.expr.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + self.len.to_tokens(tokens); + }); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprReturn { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_return(self, tokens, FixupContext::NONE); + } + } + + #[cfg(feature = "full")] + fn print_expr_return(e: &ExprReturn, tokens: &mut TokenStream, fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + e.return_token.to_tokens(tokens); + if let Some(expr) = &e.expr { + print_expr( + expr, + tokens, + fixup.rightmost_subexpression_fixup(true, false, Precedence::Jump), + ); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprStruct { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + path::printing::print_qpath(tokens, &self.qself, &self.path, PathStyle::Expr); + self.brace_token.surround(tokens, |tokens| { + self.fields.to_tokens(tokens); + if let Some(dot2_token) = &self.dot2_token { + dot2_token.to_tokens(tokens); + } else if self.rest.is_some() { + Token![..](Span::call_site()).to_tokens(tokens); + } + self.rest.to_tokens(tokens); + }); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprTry { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_try(self, tokens, FixupContext::NONE); + } + } + + #[cfg(feature = "full")] + fn print_expr_try(e: &ExprTry, tokens: &mut TokenStream, fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&e.expr); + print_subexpression( + &e.expr, + left_prec < Precedence::Unambiguous, + tokens, + left_fixup, + ); + e.question_token.to_tokens(tokens); + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprTryBlock { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.try_token.to_tokens(tokens); + self.block.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprTuple { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.paren_token.surround(tokens, |tokens| { + self.elems.to_tokens(tokens); + // If we only have one argument, we need a trailing comma to + // distinguish ExprTuple from ExprParen. + if self.elems.len() == 1 && !self.elems.trailing_punct() { + <Token![,]>::default().to_tokens(tokens); + } + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprUnary { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_unary(self, tokens, FixupContext::NONE); + } + } + + fn print_expr_unary(e: &ExprUnary, tokens: &mut TokenStream, fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + e.op.to_tokens(tokens); + let (right_prec, right_fixup) = fixup.rightmost_subexpression( + &e.expr, + #[cfg(feature = "full")] + Precedence::Prefix, + ); + print_subexpression( + &e.expr, + right_prec < Precedence::Prefix, + tokens, + right_fixup, + ); + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprUnsafe { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.unsafe_token.to_tokens(tokens); + self.block.brace_token.surround(tokens, |tokens| { + inner_attrs_to_tokens(&self.attrs, tokens); + tokens.append_all(&self.block.stmts); + }); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprWhile { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.label.to_tokens(tokens); + self.while_token.to_tokens(tokens); + print_expr(&self.cond, tokens, FixupContext::new_condition()); + self.body.brace_token.surround(tokens, |tokens| { + inner_attrs_to_tokens(&self.attrs, tokens); + tokens.append_all(&self.body.stmts); + }); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ExprYield { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_expr_yield(self, tokens, FixupContext::NONE); + } + } + + #[cfg(feature = "full")] + fn print_expr_yield(e: &ExprYield, tokens: &mut TokenStream, fixup: FixupContext) { + outer_attrs_to_tokens(&e.attrs, tokens); + e.yield_token.to_tokens(tokens); + if let Some(expr) = &e.expr { + print_expr( + expr, + tokens, + fixup.rightmost_subexpression_fixup(true, false, Precedence::Jump), + ); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Arm { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(&self.attrs); + self.pat.to_tokens(tokens); + if let Some((if_token, guard)) = &self.guard { + if_token.to_tokens(tokens); + guard.to_tokens(tokens); + } + self.fat_arrow_token.to_tokens(tokens); + print_expr(&self.body, tokens, FixupContext::new_match_arm()); + self.comma.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for FieldValue { + fn to_tokens(&self, tokens: &mut TokenStream) { + outer_attrs_to_tokens(&self.attrs, tokens); + self.member.to_tokens(tokens); + if let Some(colon_token) = &self.colon_token { + colon_token.to_tokens(tokens); + self.expr.to_tokens(tokens); + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Index { + fn to_tokens(&self, tokens: &mut TokenStream) { + let mut lit = Literal::i64_unsuffixed(i64::from(self.index)); + lit.set_span(self.span); + tokens.append(lit); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Label { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.name.to_tokens(tokens); + self.colon_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Member { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + Member::Named(ident) => ident.to_tokens(tokens), + Member::Unnamed(index) => index.to_tokens(tokens), + } + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for RangeLimits { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + RangeLimits::HalfOpen(t) => t.to_tokens(tokens), + RangeLimits::Closed(t) => t.to_tokens(tokens), + } + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PointerMutability { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + PointerMutability::Const(const_token) => const_token.to_tokens(tokens), + PointerMutability::Mut(mut_token) => mut_token.to_tokens(tokens), + } + } + } +} diff --git a/vendor/syn/src/ext.rs b/vendor/syn/src/ext.rs new file mode 100644 index 00000000000000..7cf62bd45ef560 --- /dev/null +++ b/vendor/syn/src/ext.rs @@ -0,0 +1,179 @@ +//! Extension traits to provide parsing methods on foreign types. + +#[cfg(feature = "parsing")] +use crate::buffer::Cursor; +#[cfg(feature = "parsing")] +use crate::error::Result; +#[cfg(feature = "parsing")] +use crate::parse::ParseStream; +#[cfg(feature = "parsing")] +use crate::parse::Peek; +#[cfg(feature = "parsing")] +use crate::sealed::lookahead; +#[cfg(feature = "parsing")] +use crate::token::CustomToken; +use proc_macro2::{Ident, Punct, Spacing, Span, TokenStream, TokenTree}; +use std::iter; + +/// Additional methods for `Ident` not provided by proc-macro2 or libproc_macro. +/// +/// This trait is sealed and cannot be implemented for types outside of Syn. It +/// is implemented only for `proc_macro2::Ident`. +pub trait IdentExt: Sized + private::Sealed { + /// Parses any identifier including keywords. + /// + /// This is useful when parsing macro input which allows Rust keywords as + /// identifiers. + /// + /// # Example + /// + /// ``` + /// use syn::{Error, Ident, Result, Token}; + /// use syn::ext::IdentExt; + /// use syn::parse::ParseStream; + /// + /// mod kw { + /// syn::custom_keyword!(name); + /// } + /// + /// // Parses input that looks like `name = NAME` where `NAME` can be + /// // any identifier. + /// // + /// // Examples: + /// // + /// // name = anything + /// // name = impl + /// fn parse_dsl(input: ParseStream) -> Result<Ident> { + /// input.parse::<kw::name>()?; + /// input.parse::<Token![=]>()?; + /// let name = input.call(Ident::parse_any)?; + /// Ok(name) + /// } + /// ``` + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + fn parse_any(input: ParseStream) -> Result<Self>; + + /// Peeks any identifier including keywords. Usage: + /// `input.peek(Ident::peek_any)` + /// + /// This is different from `input.peek(Ident)` which only returns true in + /// the case of an ident which is not a Rust keyword. + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + #[allow(non_upper_case_globals)] + const peek_any: private::PeekFn = private::PeekFn; + + /// Strips the raw marker `r#`, if any, from the beginning of an ident. + /// + /// - unraw(`x`) = `x` + /// - unraw(`move`) = `move` + /// - unraw(`r#move`) = `move` + /// + /// # Example + /// + /// In the case of interop with other languages like Python that have a + /// different set of keywords than Rust, we might come across macro input + /// that involves raw identifiers to refer to ordinary variables in the + /// other language with a name that happens to be a Rust keyword. + /// + /// The function below appends an identifier from the caller's input onto a + /// fixed prefix. Without using `unraw()`, this would tend to produce + /// invalid identifiers like `__pyo3_get_r#move`. + /// + /// ``` + /// use proc_macro2::Span; + /// use syn::Ident; + /// use syn::ext::IdentExt; + /// + /// fn ident_for_getter(variable: &Ident) -> Ident { + /// let getter = format!("__pyo3_get_{}", variable.unraw()); + /// Ident::new(&getter, Span::call_site()) + /// } + /// ``` + fn unraw(&self) -> Ident; +} + +impl IdentExt for Ident { + #[cfg(feature = "parsing")] + fn parse_any(input: ParseStream) -> Result<Self> { + input.step(|cursor| match cursor.ident() { + Some((ident, rest)) => Ok((ident, rest)), + None => Err(cursor.error("expected ident")), + }) + } + + fn unraw(&self) -> Ident { + let string = self.to_string(); + if let Some(string) = string.strip_prefix("r#") { + Ident::new(string, self.span()) + } else { + self.clone() + } + } +} + +#[cfg(feature = "parsing")] +impl Peek for private::PeekFn { + type Token = private::IdentAny; +} + +#[cfg(feature = "parsing")] +impl CustomToken for private::IdentAny { + fn peek(cursor: Cursor) -> bool { + cursor.ident().is_some() + } + + fn display() -> &'static str { + "identifier" + } +} + +#[cfg(feature = "parsing")] +impl lookahead::Sealed for private::PeekFn {} + +pub(crate) trait TokenStreamExt { + fn append(&mut self, token: TokenTree); +} + +impl TokenStreamExt for TokenStream { + fn append(&mut self, token: TokenTree) { + self.extend(iter::once(token)); + } +} + +pub(crate) trait PunctExt { + fn new_spanned(ch: char, spacing: Spacing, span: Span) -> Self; +} + +impl PunctExt for Punct { + fn new_spanned(ch: char, spacing: Spacing, span: Span) -> Self { + let mut punct = Punct::new(ch, spacing); + punct.set_span(span); + punct + } +} + +mod private { + use proc_macro2::Ident; + + pub trait Sealed {} + + impl Sealed for Ident {} + + #[cfg(feature = "parsing")] + pub struct PeekFn; + + #[cfg(feature = "parsing")] + pub struct IdentAny; + + #[cfg(feature = "parsing")] + impl Copy for PeekFn {} + + #[cfg(feature = "parsing")] + impl Clone for PeekFn { + fn clone(&self) -> Self { + *self + } + } +} diff --git a/vendor/syn/src/file.rs b/vendor/syn/src/file.rs new file mode 100644 index 00000000000000..066f97b1a2bfb9 --- /dev/null +++ b/vendor/syn/src/file.rs @@ -0,0 +1,125 @@ +use crate::attr::Attribute; +use crate::item::Item; + +ast_struct! { + /// A complete file of Rust source code. + /// + /// Typically `File` objects are created with [`parse_file`]. + /// + /// [`parse_file`]: crate::parse_file + /// + /// # Example + /// + /// Parse a Rust source file into a `syn::File` and print out a debug + /// representation of the syntax tree. + /// + /// ``` + /// use std::env; + /// use std::fs; + /// use std::process; + /// + /// fn main() { + /// # } + /// # + /// # fn fake_main() { + /// let mut args = env::args(); + /// let _ = args.next(); // executable name + /// + /// let filename = match (args.next(), args.next()) { + /// (Some(filename), None) => filename, + /// _ => { + /// eprintln!("Usage: dump-syntax path/to/filename.rs"); + /// process::exit(1); + /// } + /// }; + /// + /// let src = fs::read_to_string(&filename).expect("unable to read file"); + /// let syntax = syn::parse_file(&src).expect("unable to parse file"); + /// + /// // Debug impl is available if Syn is built with "extra-traits" feature. + /// println!("{:#?}", syntax); + /// } + /// ``` + /// + /// Running with its own source code as input, this program prints output + /// that begins with: + /// + /// ```text + /// File { + /// shebang: None, + /// attrs: [], + /// items: [ + /// Use( + /// ItemUse { + /// attrs: [], + /// vis: Inherited, + /// use_token: Use, + /// leading_colon: None, + /// tree: Path( + /// UsePath { + /// ident: Ident( + /// std, + /// ), + /// colon2_token: Colon2, + /// tree: Name( + /// UseName { + /// ident: Ident( + /// env, + /// ), + /// }, + /// ), + /// }, + /// ), + /// semi_token: Semi, + /// }, + /// ), + /// ... + /// ``` + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct File { + pub shebang: Option<String>, + pub attrs: Vec<Attribute>, + pub items: Vec<Item>, + } +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::attr::Attribute; + use crate::error::Result; + use crate::file::File; + use crate::parse::{Parse, ParseStream}; + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for File { + fn parse(input: ParseStream) -> Result<Self> { + Ok(File { + shebang: None, + attrs: input.call(Attribute::parse_inner)?, + items: { + let mut items = Vec::new(); + while !input.is_empty() { + items.push(input.parse()?); + } + items + }, + }) + } + } +} + +#[cfg(feature = "printing")] +mod printing { + use crate::attr::FilterAttrs; + use crate::file::File; + use proc_macro2::TokenStream; + use quote::{ToTokens, TokenStreamExt as _}; + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for File { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.inner()); + tokens.append_all(&self.items); + } + } +} diff --git a/vendor/syn/src/fixup.rs b/vendor/syn/src/fixup.rs new file mode 100644 index 00000000000000..6d2c3092d54838 --- /dev/null +++ b/vendor/syn/src/fixup.rs @@ -0,0 +1,773 @@ +use crate::classify; +use crate::expr::Expr; +#[cfg(feature = "full")] +use crate::expr::{ + ExprBreak, ExprRange, ExprRawAddr, ExprReference, ExprReturn, ExprUnary, ExprYield, +}; +use crate::precedence::Precedence; +#[cfg(feature = "full")] +use crate::ty::ReturnType; + +pub(crate) struct FixupContext { + #[cfg(feature = "full")] + previous_operator: Precedence, + #[cfg(feature = "full")] + next_operator: Precedence, + + // Print expression such that it can be parsed back as a statement + // consisting of the original expression. + // + // The effect of this is for binary operators in statement position to set + // `leftmost_subexpression_in_stmt` when printing their left-hand operand. + // + // (match x {}) - 1; // match needs parens when LHS of binary operator + // + // match x {}; // not when its own statement + // + #[cfg(feature = "full")] + stmt: bool, + + // This is the difference between: + // + // (match x {}) - 1; // subexpression needs parens + // + // let _ = match x {} - 1; // no parens + // + // There are 3 distinguishable contexts in which `print_expr` might be + // called with the expression `$match` as its argument, where `$match` + // represents an expression of kind `ExprKind::Match`: + // + // - stmt=false leftmost_subexpression_in_stmt=false + // + // Example: `let _ = $match - 1;` + // + // No parentheses required. + // + // - stmt=false leftmost_subexpression_in_stmt=true + // + // Example: `$match - 1;` + // + // Must parenthesize `($match)`, otherwise parsing back the output as a + // statement would terminate the statement after the closing brace of + // the match, parsing `-1;` as a separate statement. + // + // - stmt=true leftmost_subexpression_in_stmt=false + // + // Example: `$match;` + // + // No parentheses required. + #[cfg(feature = "full")] + leftmost_subexpression_in_stmt: bool, + + // Print expression such that it can be parsed as a match arm. + // + // This is almost equivalent to `stmt`, but the grammar diverges a tiny bit + // between statements and match arms when it comes to braced macro calls. + // Macro calls with brace delimiter terminate a statement without a + // semicolon, but do not terminate a match-arm without comma. + // + // m! {} - 1; // two statements: a macro call followed by -1 literal + // + // match () { + // _ => m! {} - 1, // binary subtraction operator + // } + // + #[cfg(feature = "full")] + match_arm: bool, + + // This is almost equivalent to `leftmost_subexpression_in_stmt`, other than + // for braced macro calls. + // + // If we have `m! {} - 1` as an expression, the leftmost subexpression + // `m! {}` will need to be parenthesized in the statement case but not the + // match-arm case. + // + // (m! {}) - 1; // subexpression needs parens + // + // match () { + // _ => m! {} - 1, // no parens + // } + // + #[cfg(feature = "full")] + leftmost_subexpression_in_match_arm: bool, + + // This is the difference between: + // + // if let _ = (Struct {}) {} // needs parens + // + // match () { + // () if let _ = Struct {} => {} // no parens + // } + // + #[cfg(feature = "full")] + condition: bool, + + // This is the difference between: + // + // if break Struct {} == (break) {} // needs parens + // + // if break break == Struct {} {} // no parens + // + #[cfg(feature = "full")] + rightmost_subexpression_in_condition: bool, + + // This is the difference between: + // + // if break ({ x }).field + 1 {} needs parens + // + // if break 1 + { x }.field {} // no parens + // + #[cfg(feature = "full")] + leftmost_subexpression_in_optional_operand: bool, + + // This is the difference between: + // + // let _ = (return) - 1; // without paren, this would return -1 + // + // let _ = return + 1; // no paren because '+' cannot begin expr + // + #[cfg(feature = "full")] + next_operator_can_begin_expr: bool, + + // This is the difference between: + // + // let _ = 1 + return 1; // no parens if rightmost subexpression + // + // let _ = 1 + (return 1) + 1; // needs parens + // + #[cfg(feature = "full")] + next_operator_can_continue_expr: bool, + + // This is the difference between: + // + // let _ = x as u8 + T; + // + // let _ = (x as u8) < T; + // + // Without parens, the latter would want to parse `u8<T...` as a type. + next_operator_can_begin_generics: bool, +} + +impl FixupContext { + /// The default amount of fixing is minimal fixing. Fixups should be turned + /// on in a targeted fashion where needed. + pub const NONE: Self = FixupContext { + #[cfg(feature = "full")] + previous_operator: Precedence::MIN, + #[cfg(feature = "full")] + next_operator: Precedence::MIN, + #[cfg(feature = "full")] + stmt: false, + #[cfg(feature = "full")] + leftmost_subexpression_in_stmt: false, + #[cfg(feature = "full")] + match_arm: false, + #[cfg(feature = "full")] + leftmost_subexpression_in_match_arm: false, + #[cfg(feature = "full")] + condition: false, + #[cfg(feature = "full")] + rightmost_subexpression_in_condition: false, + #[cfg(feature = "full")] + leftmost_subexpression_in_optional_operand: false, + #[cfg(feature = "full")] + next_operator_can_begin_expr: false, + #[cfg(feature = "full")] + next_operator_can_continue_expr: false, + next_operator_can_begin_generics: false, + }; + + /// Create the initial fixup for printing an expression in statement + /// position. + #[cfg(feature = "full")] + pub fn new_stmt() -> Self { + FixupContext { + stmt: true, + ..FixupContext::NONE + } + } + + /// Create the initial fixup for printing an expression as the right-hand + /// side of a match arm. + #[cfg(feature = "full")] + pub fn new_match_arm() -> Self { + FixupContext { + match_arm: true, + ..FixupContext::NONE + } + } + + /// Create the initial fixup for printing an expression as the "condition" + /// of an `if` or `while`. There are a few other positions which are + /// grammatically equivalent and also use this, such as the iterator + /// expression in `for` and the scrutinee in `match`. + #[cfg(feature = "full")] + pub fn new_condition() -> Self { + FixupContext { + condition: true, + rightmost_subexpression_in_condition: true, + ..FixupContext::NONE + } + } + + /// Transform this fixup into the one that should apply when printing the + /// leftmost subexpression of the current expression. + /// + /// The leftmost subexpression is any subexpression that has the same first + /// token as the current expression, but has a different last token. + /// + /// For example in `$a + $b` and `$a.method()`, the subexpression `$a` is a + /// leftmost subexpression. + /// + /// Not every expression has a leftmost subexpression. For example neither + /// `-$a` nor `[$a]` have one. + pub fn leftmost_subexpression_with_operator( + self, + expr: &Expr, + #[cfg(feature = "full")] next_operator_can_begin_expr: bool, + next_operator_can_begin_generics: bool, + #[cfg(feature = "full")] precedence: Precedence, + ) -> (Precedence, Self) { + let fixup = FixupContext { + #[cfg(feature = "full")] + next_operator: precedence, + #[cfg(feature = "full")] + stmt: false, + #[cfg(feature = "full")] + leftmost_subexpression_in_stmt: self.stmt || self.leftmost_subexpression_in_stmt, + #[cfg(feature = "full")] + match_arm: false, + #[cfg(feature = "full")] + leftmost_subexpression_in_match_arm: self.match_arm + || self.leftmost_subexpression_in_match_arm, + #[cfg(feature = "full")] + rightmost_subexpression_in_condition: false, + #[cfg(feature = "full")] + next_operator_can_begin_expr, + #[cfg(feature = "full")] + next_operator_can_continue_expr: true, + next_operator_can_begin_generics, + ..self + }; + + (fixup.leftmost_subexpression_precedence(expr), fixup) + } + + /// Transform this fixup into the one that should apply when printing a + /// leftmost subexpression followed by a `.` or `?` token, which confer + /// different statement boundary rules compared to other leftmost + /// subexpressions. + pub fn leftmost_subexpression_with_dot(self, expr: &Expr) -> (Precedence, Self) { + let fixup = FixupContext { + #[cfg(feature = "full")] + next_operator: Precedence::Unambiguous, + #[cfg(feature = "full")] + stmt: self.stmt || self.leftmost_subexpression_in_stmt, + #[cfg(feature = "full")] + leftmost_subexpression_in_stmt: false, + #[cfg(feature = "full")] + match_arm: self.match_arm || self.leftmost_subexpression_in_match_arm, + #[cfg(feature = "full")] + leftmost_subexpression_in_match_arm: false, + #[cfg(feature = "full")] + rightmost_subexpression_in_condition: false, + #[cfg(feature = "full")] + next_operator_can_begin_expr: false, + #[cfg(feature = "full")] + next_operator_can_continue_expr: true, + next_operator_can_begin_generics: false, + ..self + }; + + (fixup.leftmost_subexpression_precedence(expr), fixup) + } + + fn leftmost_subexpression_precedence(self, expr: &Expr) -> Precedence { + #[cfg(feature = "full")] + if !self.next_operator_can_begin_expr || self.next_operator == Precedence::Range { + if let Scan::Bailout = scan_right(expr, self, Precedence::MIN, 0, 0) { + if scan_left(expr, self) { + return Precedence::Unambiguous; + } + } + } + + self.precedence(expr) + } + + /// Transform this fixup into the one that should apply when printing the + /// rightmost subexpression of the current expression. + /// + /// The rightmost subexpression is any subexpression that has a different + /// first token than the current expression, but has the same last token. + /// + /// For example in `$a + $b` and `-$b`, the subexpression `$b` is a + /// rightmost subexpression. + /// + /// Not every expression has a rightmost subexpression. For example neither + /// `[$b]` nor `$a.f($b)` have one. + pub fn rightmost_subexpression( + self, + expr: &Expr, + #[cfg(feature = "full")] precedence: Precedence, + ) -> (Precedence, Self) { + let fixup = self.rightmost_subexpression_fixup( + #[cfg(feature = "full")] + false, + #[cfg(feature = "full")] + false, + #[cfg(feature = "full")] + precedence, + ); + (fixup.rightmost_subexpression_precedence(expr), fixup) + } + + pub fn rightmost_subexpression_fixup( + self, + #[cfg(feature = "full")] reset_allow_struct: bool, + #[cfg(feature = "full")] optional_operand: bool, + #[cfg(feature = "full")] precedence: Precedence, + ) -> Self { + FixupContext { + #[cfg(feature = "full")] + previous_operator: precedence, + #[cfg(feature = "full")] + stmt: false, + #[cfg(feature = "full")] + leftmost_subexpression_in_stmt: false, + #[cfg(feature = "full")] + match_arm: false, + #[cfg(feature = "full")] + leftmost_subexpression_in_match_arm: false, + #[cfg(feature = "full")] + condition: self.condition && !reset_allow_struct, + #[cfg(feature = "full")] + leftmost_subexpression_in_optional_operand: self.condition && optional_operand, + ..self + } + } + + pub fn rightmost_subexpression_precedence(self, expr: &Expr) -> Precedence { + let default_prec = self.precedence(expr); + + #[cfg(feature = "full")] + if match self.previous_operator { + Precedence::Assign | Precedence::Let | Precedence::Prefix => { + default_prec < self.previous_operator + } + _ => default_prec <= self.previous_operator, + } && match self.next_operator { + Precedence::Range | Precedence::Or | Precedence::And => true, + _ => !self.next_operator_can_begin_expr, + } { + if let Scan::Bailout | Scan::Fail = scan_right(expr, self, self.previous_operator, 1, 0) + { + if scan_left(expr, self) { + return Precedence::Prefix; + } + } + } + + default_prec + } + + /// Determine whether parentheses are needed around the given expression to + /// head off the early termination of a statement or condition. + #[cfg(feature = "full")] + pub fn parenthesize(self, expr: &Expr) -> bool { + (self.leftmost_subexpression_in_stmt && !classify::requires_semi_to_be_stmt(expr)) + || ((self.stmt || self.leftmost_subexpression_in_stmt) && matches!(expr, Expr::Let(_))) + || (self.leftmost_subexpression_in_match_arm + && !classify::requires_comma_to_be_match_arm(expr)) + || (self.condition && matches!(expr, Expr::Struct(_))) + || (self.rightmost_subexpression_in_condition + && matches!( + expr, + Expr::Return(ExprReturn { expr: None, .. }) + | Expr::Yield(ExprYield { expr: None, .. }) + )) + || (self.rightmost_subexpression_in_condition + && !self.condition + && matches!( + expr, + Expr::Break(ExprBreak { expr: None, .. }) + | Expr::Path(_) + | Expr::Range(ExprRange { end: None, .. }) + )) + || (self.leftmost_subexpression_in_optional_operand + && matches!(expr, Expr::Block(expr) if expr.attrs.is_empty() && expr.label.is_none())) + } + + /// Determines the effective precedence of a subexpression. Some expressions + /// have higher or lower precedence when adjacent to particular operators. + fn precedence(self, expr: &Expr) -> Precedence { + #[cfg(feature = "full")] + if self.next_operator_can_begin_expr { + // Decrease precedence of value-less jumps when followed by an + // operator that would otherwise get interpreted as beginning a + // value for the jump. + if let Expr::Break(ExprBreak { expr: None, .. }) + | Expr::Return(ExprReturn { expr: None, .. }) + | Expr::Yield(ExprYield { expr: None, .. }) = expr + { + return Precedence::Jump; + } + } + + #[cfg(feature = "full")] + if !self.next_operator_can_continue_expr { + match expr { + // Increase precedence of expressions that extend to the end of + // current statement or group. + Expr::Break(_) + | Expr::Closure(_) + | Expr::Let(_) + | Expr::Return(_) + | Expr::Yield(_) => { + return Precedence::Prefix; + } + Expr::Range(e) if e.start.is_none() => return Precedence::Prefix, + _ => {} + } + } + + if self.next_operator_can_begin_generics { + if let Expr::Cast(cast) = expr { + if classify::trailing_unparameterized_path(&cast.ty) { + return Precedence::MIN; + } + } + } + + Precedence::of(expr) + } +} + +impl Copy for FixupContext {} + +impl Clone for FixupContext { + fn clone(&self) -> Self { + *self + } +} + +#[cfg(feature = "full")] +enum Scan { + Fail, + Bailout, + Consume, +} + +#[cfg(feature = "full")] +impl Copy for Scan {} + +#[cfg(feature = "full")] +impl Clone for Scan { + fn clone(&self) -> Self { + *self + } +} + +#[cfg(feature = "full")] +impl PartialEq for Scan { + fn eq(&self, other: &Self) -> bool { + *self as u8 == *other as u8 + } +} + +#[cfg(feature = "full")] +fn scan_left(expr: &Expr, fixup: FixupContext) -> bool { + match expr { + Expr::Assign(_) => fixup.previous_operator <= Precedence::Assign, + Expr::Binary(e) => match Precedence::of_binop(&e.op) { + Precedence::Assign => fixup.previous_operator <= Precedence::Assign, + binop_prec => fixup.previous_operator < binop_prec, + }, + Expr::Cast(_) => fixup.previous_operator < Precedence::Cast, + Expr::Range(e) => e.start.is_none() || fixup.previous_operator < Precedence::Assign, + _ => true, + } +} + +#[cfg(feature = "full")] +fn scan_right( + expr: &Expr, + fixup: FixupContext, + precedence: Precedence, + fail_offset: u8, + bailout_offset: u8, +) -> Scan { + let consume_by_precedence = if match precedence { + Precedence::Assign | Precedence::Compare => precedence <= fixup.next_operator, + _ => precedence < fixup.next_operator, + } || fixup.next_operator == Precedence::MIN + { + Scan::Consume + } else { + Scan::Bailout + }; + if fixup.parenthesize(expr) { + return consume_by_precedence; + } + match expr { + Expr::Assign(e) if e.attrs.is_empty() => { + if match fixup.next_operator { + Precedence::Unambiguous => fail_offset >= 2, + _ => bailout_offset >= 1, + } { + return Scan::Consume; + } + let right_fixup = fixup.rightmost_subexpression_fixup(false, false, Precedence::Assign); + let scan = scan_right( + &e.right, + right_fixup, + Precedence::Assign, + match fixup.next_operator { + Precedence::Unambiguous => fail_offset, + _ => 1, + }, + 1, + ); + if let Scan::Bailout | Scan::Consume = scan { + Scan::Consume + } else if let Precedence::Unambiguous = fixup.next_operator { + Scan::Fail + } else { + Scan::Bailout + } + } + Expr::Binary(e) if e.attrs.is_empty() => { + if match fixup.next_operator { + Precedence::Unambiguous => { + fail_offset >= 2 + && (consume_by_precedence == Scan::Consume || bailout_offset >= 1) + } + _ => bailout_offset >= 1, + } { + return Scan::Consume; + } + let binop_prec = Precedence::of_binop(&e.op); + if binop_prec == Precedence::Compare && fixup.next_operator == Precedence::Compare { + return Scan::Consume; + } + let right_fixup = fixup.rightmost_subexpression_fixup(false, false, binop_prec); + let scan = scan_right( + &e.right, + right_fixup, + binop_prec, + match fixup.next_operator { + Precedence::Unambiguous => fail_offset, + _ => 1, + }, + consume_by_precedence as u8 - Scan::Bailout as u8, + ); + match scan { + Scan::Fail => {} + Scan::Bailout => return consume_by_precedence, + Scan::Consume => return Scan::Consume, + } + let right_needs_group = binop_prec != Precedence::Assign + && right_fixup.rightmost_subexpression_precedence(&e.right) <= binop_prec; + if right_needs_group { + consume_by_precedence + } else if let (Scan::Fail, Precedence::Unambiguous) = (scan, fixup.next_operator) { + Scan::Fail + } else { + Scan::Bailout + } + } + Expr::RawAddr(ExprRawAddr { expr, .. }) + | Expr::Reference(ExprReference { expr, .. }) + | Expr::Unary(ExprUnary { expr, .. }) => { + if match fixup.next_operator { + Precedence::Unambiguous => { + fail_offset >= 2 + && (consume_by_precedence == Scan::Consume || bailout_offset >= 1) + } + _ => bailout_offset >= 1, + } { + return Scan::Consume; + } + let right_fixup = fixup.rightmost_subexpression_fixup(false, false, Precedence::Prefix); + let scan = scan_right( + expr, + right_fixup, + precedence, + match fixup.next_operator { + Precedence::Unambiguous => fail_offset, + _ => 1, + }, + consume_by_precedence as u8 - Scan::Bailout as u8, + ); + match scan { + Scan::Fail => {} + Scan::Bailout => return consume_by_precedence, + Scan::Consume => return Scan::Consume, + } + if right_fixup.rightmost_subexpression_precedence(expr) < Precedence::Prefix { + consume_by_precedence + } else if let (Scan::Fail, Precedence::Unambiguous) = (scan, fixup.next_operator) { + Scan::Fail + } else { + Scan::Bailout + } + } + Expr::Range(e) if e.attrs.is_empty() => match &e.end { + Some(end) => { + if fail_offset >= 2 { + return Scan::Consume; + } + let right_fixup = + fixup.rightmost_subexpression_fixup(false, true, Precedence::Range); + let scan = scan_right( + end, + right_fixup, + Precedence::Range, + fail_offset, + match fixup.next_operator { + Precedence::Assign | Precedence::Range => 0, + _ => 1, + }, + ); + if match (scan, fixup.next_operator) { + (Scan::Fail, _) => false, + (Scan::Bailout, Precedence::Assign | Precedence::Range) => false, + (Scan::Bailout | Scan::Consume, _) => true, + } { + return Scan::Consume; + } + if right_fixup.rightmost_subexpression_precedence(end) <= Precedence::Range { + Scan::Consume + } else { + Scan::Fail + } + } + None => { + if fixup.next_operator_can_begin_expr { + Scan::Consume + } else { + Scan::Fail + } + } + }, + Expr::Break(e) => match &e.expr { + Some(value) => { + if bailout_offset >= 1 || e.label.is_none() && classify::expr_leading_label(value) { + return Scan::Consume; + } + let right_fixup = fixup.rightmost_subexpression_fixup(true, true, Precedence::Jump); + match scan_right(value, right_fixup, Precedence::Jump, 1, 1) { + Scan::Fail => Scan::Bailout, + Scan::Bailout | Scan::Consume => Scan::Consume, + } + } + None => match fixup.next_operator { + Precedence::Assign if precedence > Precedence::Assign => Scan::Fail, + _ => Scan::Consume, + }, + }, + Expr::Return(ExprReturn { expr, .. }) | Expr::Yield(ExprYield { expr, .. }) => match expr { + Some(e) => { + if bailout_offset >= 1 { + return Scan::Consume; + } + let right_fixup = + fixup.rightmost_subexpression_fixup(true, false, Precedence::Jump); + match scan_right(e, right_fixup, Precedence::Jump, 1, 1) { + Scan::Fail => Scan::Bailout, + Scan::Bailout | Scan::Consume => Scan::Consume, + } + } + None => match fixup.next_operator { + Precedence::Assign if precedence > Precedence::Assign => Scan::Fail, + _ => Scan::Consume, + }, + }, + Expr::Closure(e) => { + if matches!(e.output, ReturnType::Default) + || matches!(&*e.body, Expr::Block(body) if body.attrs.is_empty() && body.label.is_none()) + { + if bailout_offset >= 1 { + return Scan::Consume; + } + let right_fixup = + fixup.rightmost_subexpression_fixup(false, false, Precedence::Jump); + match scan_right(&e.body, right_fixup, Precedence::Jump, 1, 1) { + Scan::Fail => Scan::Bailout, + Scan::Bailout | Scan::Consume => Scan::Consume, + } + } else { + Scan::Consume + } + } + Expr::Let(e) => { + if bailout_offset >= 1 { + return Scan::Consume; + } + let right_fixup = fixup.rightmost_subexpression_fixup(false, false, Precedence::Let); + let scan = scan_right( + &e.expr, + right_fixup, + Precedence::Let, + 1, + if fixup.next_operator < Precedence::Let { + 0 + } else { + 1 + }, + ); + match scan { + Scan::Fail | Scan::Bailout if fixup.next_operator < Precedence::Let => { + return Scan::Bailout; + } + Scan::Consume => return Scan::Consume, + _ => {} + } + if right_fixup.rightmost_subexpression_precedence(&e.expr) < Precedence::Let { + Scan::Consume + } else if let Scan::Fail = scan { + Scan::Bailout + } else { + Scan::Consume + } + } + Expr::Array(_) + | Expr::Assign(_) + | Expr::Async(_) + | Expr::Await(_) + | Expr::Binary(_) + | Expr::Block(_) + | Expr::Call(_) + | Expr::Cast(_) + | Expr::Const(_) + | Expr::Continue(_) + | Expr::Field(_) + | Expr::ForLoop(_) + | Expr::Group(_) + | Expr::If(_) + | Expr::Index(_) + | Expr::Infer(_) + | Expr::Lit(_) + | Expr::Loop(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::MethodCall(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Range(_) + | Expr::Repeat(_) + | Expr::Struct(_) + | Expr::Try(_) + | Expr::TryBlock(_) + | Expr::Tuple(_) + | Expr::Unsafe(_) + | Expr::Verbatim(_) + | Expr::While(_) => match fixup.next_operator { + Precedence::Assign | Precedence::Range if precedence == Precedence::Range => Scan::Fail, + _ if precedence == Precedence::Let && fixup.next_operator < Precedence::Let => { + Scan::Fail + } + _ => consume_by_precedence, + }, + } +} diff --git a/vendor/syn/src/gen/clone.rs b/vendor/syn/src/gen/clone.rs new file mode 100644 index 00000000000000..be2b698422da91 --- /dev/null +++ b/vendor/syn/src/gen/clone.rs @@ -0,0 +1,2267 @@ +// This file is @generated by syn-internal-codegen. +// It is not intended for manual editing. + +#![allow(clippy::clone_on_copy, clippy::expl_impl_clone_on_copy)] +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Abi { + fn clone(&self) -> Self { + crate::Abi { + extern_token: self.extern_token.clone(), + name: self.name.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::AngleBracketedGenericArguments { + fn clone(&self) -> Self { + crate::AngleBracketedGenericArguments { + colon2_token: self.colon2_token.clone(), + lt_token: self.lt_token.clone(), + args: self.args.clone(), + gt_token: self.gt_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Arm { + fn clone(&self) -> Self { + crate::Arm { + attrs: self.attrs.clone(), + pat: self.pat.clone(), + guard: self.guard.clone(), + fat_arrow_token: self.fat_arrow_token.clone(), + body: self.body.clone(), + comma: self.comma.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::AssocConst { + fn clone(&self) -> Self { + crate::AssocConst { + ident: self.ident.clone(), + generics: self.generics.clone(), + eq_token: self.eq_token.clone(), + value: self.value.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::AssocType { + fn clone(&self) -> Self { + crate::AssocType { + ident: self.ident.clone(), + generics: self.generics.clone(), + eq_token: self.eq_token.clone(), + ty: self.ty.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Copy for crate::AttrStyle {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::AttrStyle { + fn clone(&self) -> Self { + *self + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Attribute { + fn clone(&self) -> Self { + crate::Attribute { + pound_token: self.pound_token.clone(), + style: self.style.clone(), + bracket_token: self.bracket_token.clone(), + meta: self.meta.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::BareFnArg { + fn clone(&self) -> Self { + crate::BareFnArg { + attrs: self.attrs.clone(), + name: self.name.clone(), + ty: self.ty.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::BareVariadic { + fn clone(&self) -> Self { + crate::BareVariadic { + attrs: self.attrs.clone(), + name: self.name.clone(), + dots: self.dots.clone(), + comma: self.comma.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Copy for crate::BinOp {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::BinOp { + fn clone(&self) -> Self { + *self + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Block { + fn clone(&self) -> Self { + crate::Block { + brace_token: self.brace_token.clone(), + stmts: self.stmts.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::BoundLifetimes { + fn clone(&self) -> Self { + crate::BoundLifetimes { + for_token: self.for_token.clone(), + lt_token: self.lt_token.clone(), + lifetimes: self.lifetimes.clone(), + gt_token: self.gt_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::CapturedParam { + fn clone(&self) -> Self { + match self { + crate::CapturedParam::Lifetime(v0) => { + crate::CapturedParam::Lifetime(v0.clone()) + } + crate::CapturedParam::Ident(v0) => crate::CapturedParam::Ident(v0.clone()), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ConstParam { + fn clone(&self) -> Self { + crate::ConstParam { + attrs: self.attrs.clone(), + const_token: self.const_token.clone(), + ident: self.ident.clone(), + colon_token: self.colon_token.clone(), + ty: self.ty.clone(), + eq_token: self.eq_token.clone(), + default: self.default.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Constraint { + fn clone(&self) -> Self { + crate::Constraint { + ident: self.ident.clone(), + generics: self.generics.clone(), + colon_token: self.colon_token.clone(), + bounds: self.bounds.clone(), + } + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Data { + fn clone(&self) -> Self { + match self { + crate::Data::Struct(v0) => crate::Data::Struct(v0.clone()), + crate::Data::Enum(v0) => crate::Data::Enum(v0.clone()), + crate::Data::Union(v0) => crate::Data::Union(v0.clone()), + } + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::DataEnum { + fn clone(&self) -> Self { + crate::DataEnum { + enum_token: self.enum_token.clone(), + brace_token: self.brace_token.clone(), + variants: self.variants.clone(), + } + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::DataStruct { + fn clone(&self) -> Self { + crate::DataStruct { + struct_token: self.struct_token.clone(), + fields: self.fields.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::DataUnion { + fn clone(&self) -> Self { + crate::DataUnion { + union_token: self.union_token.clone(), + fields: self.fields.clone(), + } + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::DeriveInput { + fn clone(&self) -> Self { + crate::DeriveInput { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + ident: self.ident.clone(), + generics: self.generics.clone(), + data: self.data.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Expr { + fn clone(&self) -> Self { + match self { + #[cfg(feature = "full")] + crate::Expr::Array(v0) => crate::Expr::Array(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Assign(v0) => crate::Expr::Assign(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Async(v0) => crate::Expr::Async(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Await(v0) => crate::Expr::Await(v0.clone()), + crate::Expr::Binary(v0) => crate::Expr::Binary(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Block(v0) => crate::Expr::Block(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Break(v0) => crate::Expr::Break(v0.clone()), + crate::Expr::Call(v0) => crate::Expr::Call(v0.clone()), + crate::Expr::Cast(v0) => crate::Expr::Cast(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Closure(v0) => crate::Expr::Closure(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Const(v0) => crate::Expr::Const(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Continue(v0) => crate::Expr::Continue(v0.clone()), + crate::Expr::Field(v0) => crate::Expr::Field(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::ForLoop(v0) => crate::Expr::ForLoop(v0.clone()), + crate::Expr::Group(v0) => crate::Expr::Group(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::If(v0) => crate::Expr::If(v0.clone()), + crate::Expr::Index(v0) => crate::Expr::Index(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Infer(v0) => crate::Expr::Infer(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Let(v0) => crate::Expr::Let(v0.clone()), + crate::Expr::Lit(v0) => crate::Expr::Lit(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Loop(v0) => crate::Expr::Loop(v0.clone()), + crate::Expr::Macro(v0) => crate::Expr::Macro(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Match(v0) => crate::Expr::Match(v0.clone()), + crate::Expr::MethodCall(v0) => crate::Expr::MethodCall(v0.clone()), + crate::Expr::Paren(v0) => crate::Expr::Paren(v0.clone()), + crate::Expr::Path(v0) => crate::Expr::Path(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Range(v0) => crate::Expr::Range(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::RawAddr(v0) => crate::Expr::RawAddr(v0.clone()), + crate::Expr::Reference(v0) => crate::Expr::Reference(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Repeat(v0) => crate::Expr::Repeat(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Return(v0) => crate::Expr::Return(v0.clone()), + crate::Expr::Struct(v0) => crate::Expr::Struct(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Try(v0) => crate::Expr::Try(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::TryBlock(v0) => crate::Expr::TryBlock(v0.clone()), + crate::Expr::Tuple(v0) => crate::Expr::Tuple(v0.clone()), + crate::Expr::Unary(v0) => crate::Expr::Unary(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Unsafe(v0) => crate::Expr::Unsafe(v0.clone()), + crate::Expr::Verbatim(v0) => crate::Expr::Verbatim(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::While(v0) => crate::Expr::While(v0.clone()), + #[cfg(feature = "full")] + crate::Expr::Yield(v0) => crate::Expr::Yield(v0.clone()), + #[cfg(not(feature = "full"))] + _ => unreachable!(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprArray { + fn clone(&self) -> Self { + crate::ExprArray { + attrs: self.attrs.clone(), + bracket_token: self.bracket_token.clone(), + elems: self.elems.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprAssign { + fn clone(&self) -> Self { + crate::ExprAssign { + attrs: self.attrs.clone(), + left: self.left.clone(), + eq_token: self.eq_token.clone(), + right: self.right.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprAsync { + fn clone(&self) -> Self { + crate::ExprAsync { + attrs: self.attrs.clone(), + async_token: self.async_token.clone(), + capture: self.capture.clone(), + block: self.block.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprAwait { + fn clone(&self) -> Self { + crate::ExprAwait { + attrs: self.attrs.clone(), + base: self.base.clone(), + dot_token: self.dot_token.clone(), + await_token: self.await_token.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprBinary { + fn clone(&self) -> Self { + crate::ExprBinary { + attrs: self.attrs.clone(), + left: self.left.clone(), + op: self.op.clone(), + right: self.right.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprBlock { + fn clone(&self) -> Self { + crate::ExprBlock { + attrs: self.attrs.clone(), + label: self.label.clone(), + block: self.block.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprBreak { + fn clone(&self) -> Self { + crate::ExprBreak { + attrs: self.attrs.clone(), + break_token: self.break_token.clone(), + label: self.label.clone(), + expr: self.expr.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprCall { + fn clone(&self) -> Self { + crate::ExprCall { + attrs: self.attrs.clone(), + func: self.func.clone(), + paren_token: self.paren_token.clone(), + args: self.args.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprCast { + fn clone(&self) -> Self { + crate::ExprCast { + attrs: self.attrs.clone(), + expr: self.expr.clone(), + as_token: self.as_token.clone(), + ty: self.ty.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprClosure { + fn clone(&self) -> Self { + crate::ExprClosure { + attrs: self.attrs.clone(), + lifetimes: self.lifetimes.clone(), + constness: self.constness.clone(), + movability: self.movability.clone(), + asyncness: self.asyncness.clone(), + capture: self.capture.clone(), + or1_token: self.or1_token.clone(), + inputs: self.inputs.clone(), + or2_token: self.or2_token.clone(), + output: self.output.clone(), + body: self.body.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprConst { + fn clone(&self) -> Self { + crate::ExprConst { + attrs: self.attrs.clone(), + const_token: self.const_token.clone(), + block: self.block.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprContinue { + fn clone(&self) -> Self { + crate::ExprContinue { + attrs: self.attrs.clone(), + continue_token: self.continue_token.clone(), + label: self.label.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprField { + fn clone(&self) -> Self { + crate::ExprField { + attrs: self.attrs.clone(), + base: self.base.clone(), + dot_token: self.dot_token.clone(), + member: self.member.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprForLoop { + fn clone(&self) -> Self { + crate::ExprForLoop { + attrs: self.attrs.clone(), + label: self.label.clone(), + for_token: self.for_token.clone(), + pat: self.pat.clone(), + in_token: self.in_token.clone(), + expr: self.expr.clone(), + body: self.body.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprGroup { + fn clone(&self) -> Self { + crate::ExprGroup { + attrs: self.attrs.clone(), + group_token: self.group_token.clone(), + expr: self.expr.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprIf { + fn clone(&self) -> Self { + crate::ExprIf { + attrs: self.attrs.clone(), + if_token: self.if_token.clone(), + cond: self.cond.clone(), + then_branch: self.then_branch.clone(), + else_branch: self.else_branch.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprIndex { + fn clone(&self) -> Self { + crate::ExprIndex { + attrs: self.attrs.clone(), + expr: self.expr.clone(), + bracket_token: self.bracket_token.clone(), + index: self.index.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprInfer { + fn clone(&self) -> Self { + crate::ExprInfer { + attrs: self.attrs.clone(), + underscore_token: self.underscore_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprLet { + fn clone(&self) -> Self { + crate::ExprLet { + attrs: self.attrs.clone(), + let_token: self.let_token.clone(), + pat: self.pat.clone(), + eq_token: self.eq_token.clone(), + expr: self.expr.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprLit { + fn clone(&self) -> Self { + crate::ExprLit { + attrs: self.attrs.clone(), + lit: self.lit.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprLoop { + fn clone(&self) -> Self { + crate::ExprLoop { + attrs: self.attrs.clone(), + label: self.label.clone(), + loop_token: self.loop_token.clone(), + body: self.body.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprMacro { + fn clone(&self) -> Self { + crate::ExprMacro { + attrs: self.attrs.clone(), + mac: self.mac.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprMatch { + fn clone(&self) -> Self { + crate::ExprMatch { + attrs: self.attrs.clone(), + match_token: self.match_token.clone(), + expr: self.expr.clone(), + brace_token: self.brace_token.clone(), + arms: self.arms.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprMethodCall { + fn clone(&self) -> Self { + crate::ExprMethodCall { + attrs: self.attrs.clone(), + receiver: self.receiver.clone(), + dot_token: self.dot_token.clone(), + method: self.method.clone(), + turbofish: self.turbofish.clone(), + paren_token: self.paren_token.clone(), + args: self.args.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprParen { + fn clone(&self) -> Self { + crate::ExprParen { + attrs: self.attrs.clone(), + paren_token: self.paren_token.clone(), + expr: self.expr.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprPath { + fn clone(&self) -> Self { + crate::ExprPath { + attrs: self.attrs.clone(), + qself: self.qself.clone(), + path: self.path.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprRange { + fn clone(&self) -> Self { + crate::ExprRange { + attrs: self.attrs.clone(), + start: self.start.clone(), + limits: self.limits.clone(), + end: self.end.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprRawAddr { + fn clone(&self) -> Self { + crate::ExprRawAddr { + attrs: self.attrs.clone(), + and_token: self.and_token.clone(), + raw: self.raw.clone(), + mutability: self.mutability.clone(), + expr: self.expr.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprReference { + fn clone(&self) -> Self { + crate::ExprReference { + attrs: self.attrs.clone(), + and_token: self.and_token.clone(), + mutability: self.mutability.clone(), + expr: self.expr.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprRepeat { + fn clone(&self) -> Self { + crate::ExprRepeat { + attrs: self.attrs.clone(), + bracket_token: self.bracket_token.clone(), + expr: self.expr.clone(), + semi_token: self.semi_token.clone(), + len: self.len.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprReturn { + fn clone(&self) -> Self { + crate::ExprReturn { + attrs: self.attrs.clone(), + return_token: self.return_token.clone(), + expr: self.expr.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprStruct { + fn clone(&self) -> Self { + crate::ExprStruct { + attrs: self.attrs.clone(), + qself: self.qself.clone(), + path: self.path.clone(), + brace_token: self.brace_token.clone(), + fields: self.fields.clone(), + dot2_token: self.dot2_token.clone(), + rest: self.rest.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprTry { + fn clone(&self) -> Self { + crate::ExprTry { + attrs: self.attrs.clone(), + expr: self.expr.clone(), + question_token: self.question_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprTryBlock { + fn clone(&self) -> Self { + crate::ExprTryBlock { + attrs: self.attrs.clone(), + try_token: self.try_token.clone(), + block: self.block.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprTuple { + fn clone(&self) -> Self { + crate::ExprTuple { + attrs: self.attrs.clone(), + paren_token: self.paren_token.clone(), + elems: self.elems.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprUnary { + fn clone(&self) -> Self { + crate::ExprUnary { + attrs: self.attrs.clone(), + op: self.op.clone(), + expr: self.expr.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprUnsafe { + fn clone(&self) -> Self { + crate::ExprUnsafe { + attrs: self.attrs.clone(), + unsafe_token: self.unsafe_token.clone(), + block: self.block.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprWhile { + fn clone(&self) -> Self { + crate::ExprWhile { + attrs: self.attrs.clone(), + label: self.label.clone(), + while_token: self.while_token.clone(), + cond: self.cond.clone(), + body: self.body.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ExprYield { + fn clone(&self) -> Self { + crate::ExprYield { + attrs: self.attrs.clone(), + yield_token: self.yield_token.clone(), + expr: self.expr.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Field { + fn clone(&self) -> Self { + crate::Field { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + mutability: self.mutability.clone(), + ident: self.ident.clone(), + colon_token: self.colon_token.clone(), + ty: self.ty.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::FieldMutability { + fn clone(&self) -> Self { + match self { + crate::FieldMutability::None => crate::FieldMutability::None, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::FieldPat { + fn clone(&self) -> Self { + crate::FieldPat { + attrs: self.attrs.clone(), + member: self.member.clone(), + colon_token: self.colon_token.clone(), + pat: self.pat.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::FieldValue { + fn clone(&self) -> Self { + crate::FieldValue { + attrs: self.attrs.clone(), + member: self.member.clone(), + colon_token: self.colon_token.clone(), + expr: self.expr.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Fields { + fn clone(&self) -> Self { + match self { + crate::Fields::Named(v0) => crate::Fields::Named(v0.clone()), + crate::Fields::Unnamed(v0) => crate::Fields::Unnamed(v0.clone()), + crate::Fields::Unit => crate::Fields::Unit, + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::FieldsNamed { + fn clone(&self) -> Self { + crate::FieldsNamed { + brace_token: self.brace_token.clone(), + named: self.named.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::FieldsUnnamed { + fn clone(&self) -> Self { + crate::FieldsUnnamed { + paren_token: self.paren_token.clone(), + unnamed: self.unnamed.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::File { + fn clone(&self) -> Self { + crate::File { + shebang: self.shebang.clone(), + attrs: self.attrs.clone(), + items: self.items.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::FnArg { + fn clone(&self) -> Self { + match self { + crate::FnArg::Receiver(v0) => crate::FnArg::Receiver(v0.clone()), + crate::FnArg::Typed(v0) => crate::FnArg::Typed(v0.clone()), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ForeignItem { + fn clone(&self) -> Self { + match self { + crate::ForeignItem::Fn(v0) => crate::ForeignItem::Fn(v0.clone()), + crate::ForeignItem::Static(v0) => crate::ForeignItem::Static(v0.clone()), + crate::ForeignItem::Type(v0) => crate::ForeignItem::Type(v0.clone()), + crate::ForeignItem::Macro(v0) => crate::ForeignItem::Macro(v0.clone()), + crate::ForeignItem::Verbatim(v0) => crate::ForeignItem::Verbatim(v0.clone()), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ForeignItemFn { + fn clone(&self) -> Self { + crate::ForeignItemFn { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + sig: self.sig.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ForeignItemMacro { + fn clone(&self) -> Self { + crate::ForeignItemMacro { + attrs: self.attrs.clone(), + mac: self.mac.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ForeignItemStatic { + fn clone(&self) -> Self { + crate::ForeignItemStatic { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + static_token: self.static_token.clone(), + mutability: self.mutability.clone(), + ident: self.ident.clone(), + colon_token: self.colon_token.clone(), + ty: self.ty.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ForeignItemType { + fn clone(&self) -> Self { + crate::ForeignItemType { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + type_token: self.type_token.clone(), + ident: self.ident.clone(), + generics: self.generics.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::GenericArgument { + fn clone(&self) -> Self { + match self { + crate::GenericArgument::Lifetime(v0) => { + crate::GenericArgument::Lifetime(v0.clone()) + } + crate::GenericArgument::Type(v0) => crate::GenericArgument::Type(v0.clone()), + crate::GenericArgument::Const(v0) => { + crate::GenericArgument::Const(v0.clone()) + } + crate::GenericArgument::AssocType(v0) => { + crate::GenericArgument::AssocType(v0.clone()) + } + crate::GenericArgument::AssocConst(v0) => { + crate::GenericArgument::AssocConst(v0.clone()) + } + crate::GenericArgument::Constraint(v0) => { + crate::GenericArgument::Constraint(v0.clone()) + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::GenericParam { + fn clone(&self) -> Self { + match self { + crate::GenericParam::Lifetime(v0) => { + crate::GenericParam::Lifetime(v0.clone()) + } + crate::GenericParam::Type(v0) => crate::GenericParam::Type(v0.clone()), + crate::GenericParam::Const(v0) => crate::GenericParam::Const(v0.clone()), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Generics { + fn clone(&self) -> Self { + crate::Generics { + lt_token: self.lt_token.clone(), + params: self.params.clone(), + gt_token: self.gt_token.clone(), + where_clause: self.where_clause.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ImplItem { + fn clone(&self) -> Self { + match self { + crate::ImplItem::Const(v0) => crate::ImplItem::Const(v0.clone()), + crate::ImplItem::Fn(v0) => crate::ImplItem::Fn(v0.clone()), + crate::ImplItem::Type(v0) => crate::ImplItem::Type(v0.clone()), + crate::ImplItem::Macro(v0) => crate::ImplItem::Macro(v0.clone()), + crate::ImplItem::Verbatim(v0) => crate::ImplItem::Verbatim(v0.clone()), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ImplItemConst { + fn clone(&self) -> Self { + crate::ImplItemConst { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + defaultness: self.defaultness.clone(), + const_token: self.const_token.clone(), + ident: self.ident.clone(), + generics: self.generics.clone(), + colon_token: self.colon_token.clone(), + ty: self.ty.clone(), + eq_token: self.eq_token.clone(), + expr: self.expr.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ImplItemFn { + fn clone(&self) -> Self { + crate::ImplItemFn { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + defaultness: self.defaultness.clone(), + sig: self.sig.clone(), + block: self.block.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ImplItemMacro { + fn clone(&self) -> Self { + crate::ImplItemMacro { + attrs: self.attrs.clone(), + mac: self.mac.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ImplItemType { + fn clone(&self) -> Self { + crate::ImplItemType { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + defaultness: self.defaultness.clone(), + type_token: self.type_token.clone(), + ident: self.ident.clone(), + generics: self.generics.clone(), + eq_token: self.eq_token.clone(), + ty: self.ty.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ImplRestriction { + fn clone(&self) -> Self { + match *self {} + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Index { + fn clone(&self) -> Self { + crate::Index { + index: self.index.clone(), + span: self.span.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Item { + fn clone(&self) -> Self { + match self { + crate::Item::Const(v0) => crate::Item::Const(v0.clone()), + crate::Item::Enum(v0) => crate::Item::Enum(v0.clone()), + crate::Item::ExternCrate(v0) => crate::Item::ExternCrate(v0.clone()), + crate::Item::Fn(v0) => crate::Item::Fn(v0.clone()), + crate::Item::ForeignMod(v0) => crate::Item::ForeignMod(v0.clone()), + crate::Item::Impl(v0) => crate::Item::Impl(v0.clone()), + crate::Item::Macro(v0) => crate::Item::Macro(v0.clone()), + crate::Item::Mod(v0) => crate::Item::Mod(v0.clone()), + crate::Item::Static(v0) => crate::Item::Static(v0.clone()), + crate::Item::Struct(v0) => crate::Item::Struct(v0.clone()), + crate::Item::Trait(v0) => crate::Item::Trait(v0.clone()), + crate::Item::TraitAlias(v0) => crate::Item::TraitAlias(v0.clone()), + crate::Item::Type(v0) => crate::Item::Type(v0.clone()), + crate::Item::Union(v0) => crate::Item::Union(v0.clone()), + crate::Item::Use(v0) => crate::Item::Use(v0.clone()), + crate::Item::Verbatim(v0) => crate::Item::Verbatim(v0.clone()), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ItemConst { + fn clone(&self) -> Self { + crate::ItemConst { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + const_token: self.const_token.clone(), + ident: self.ident.clone(), + generics: self.generics.clone(), + colon_token: self.colon_token.clone(), + ty: self.ty.clone(), + eq_token: self.eq_token.clone(), + expr: self.expr.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ItemEnum { + fn clone(&self) -> Self { + crate::ItemEnum { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + enum_token: self.enum_token.clone(), + ident: self.ident.clone(), + generics: self.generics.clone(), + brace_token: self.brace_token.clone(), + variants: self.variants.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ItemExternCrate { + fn clone(&self) -> Self { + crate::ItemExternCrate { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + extern_token: self.extern_token.clone(), + crate_token: self.crate_token.clone(), + ident: self.ident.clone(), + rename: self.rename.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ItemFn { + fn clone(&self) -> Self { + crate::ItemFn { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + sig: self.sig.clone(), + block: self.block.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ItemForeignMod { + fn clone(&self) -> Self { + crate::ItemForeignMod { + attrs: self.attrs.clone(), + unsafety: self.unsafety.clone(), + abi: self.abi.clone(), + brace_token: self.brace_token.clone(), + items: self.items.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ItemImpl { + fn clone(&self) -> Self { + crate::ItemImpl { + attrs: self.attrs.clone(), + defaultness: self.defaultness.clone(), + unsafety: self.unsafety.clone(), + impl_token: self.impl_token.clone(), + generics: self.generics.clone(), + trait_: self.trait_.clone(), + self_ty: self.self_ty.clone(), + brace_token: self.brace_token.clone(), + items: self.items.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ItemMacro { + fn clone(&self) -> Self { + crate::ItemMacro { + attrs: self.attrs.clone(), + ident: self.ident.clone(), + mac: self.mac.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ItemMod { + fn clone(&self) -> Self { + crate::ItemMod { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + unsafety: self.unsafety.clone(), + mod_token: self.mod_token.clone(), + ident: self.ident.clone(), + content: self.content.clone(), + semi: self.semi.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ItemStatic { + fn clone(&self) -> Self { + crate::ItemStatic { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + static_token: self.static_token.clone(), + mutability: self.mutability.clone(), + ident: self.ident.clone(), + colon_token: self.colon_token.clone(), + ty: self.ty.clone(), + eq_token: self.eq_token.clone(), + expr: self.expr.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ItemStruct { + fn clone(&self) -> Self { + crate::ItemStruct { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + struct_token: self.struct_token.clone(), + ident: self.ident.clone(), + generics: self.generics.clone(), + fields: self.fields.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ItemTrait { + fn clone(&self) -> Self { + crate::ItemTrait { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + unsafety: self.unsafety.clone(), + auto_token: self.auto_token.clone(), + restriction: self.restriction.clone(), + trait_token: self.trait_token.clone(), + ident: self.ident.clone(), + generics: self.generics.clone(), + colon_token: self.colon_token.clone(), + supertraits: self.supertraits.clone(), + brace_token: self.brace_token.clone(), + items: self.items.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ItemTraitAlias { + fn clone(&self) -> Self { + crate::ItemTraitAlias { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + trait_token: self.trait_token.clone(), + ident: self.ident.clone(), + generics: self.generics.clone(), + eq_token: self.eq_token.clone(), + bounds: self.bounds.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ItemType { + fn clone(&self) -> Self { + crate::ItemType { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + type_token: self.type_token.clone(), + ident: self.ident.clone(), + generics: self.generics.clone(), + eq_token: self.eq_token.clone(), + ty: self.ty.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ItemUnion { + fn clone(&self) -> Self { + crate::ItemUnion { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + union_token: self.union_token.clone(), + ident: self.ident.clone(), + generics: self.generics.clone(), + fields: self.fields.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ItemUse { + fn clone(&self) -> Self { + crate::ItemUse { + attrs: self.attrs.clone(), + vis: self.vis.clone(), + use_token: self.use_token.clone(), + leading_colon: self.leading_colon.clone(), + tree: self.tree.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Label { + fn clone(&self) -> Self { + crate::Label { + name: self.name.clone(), + colon_token: self.colon_token.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::LifetimeParam { + fn clone(&self) -> Self { + crate::LifetimeParam { + attrs: self.attrs.clone(), + lifetime: self.lifetime.clone(), + colon_token: self.colon_token.clone(), + bounds: self.bounds.clone(), + } + } +} +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Lit { + fn clone(&self) -> Self { + match self { + crate::Lit::Str(v0) => crate::Lit::Str(v0.clone()), + crate::Lit::ByteStr(v0) => crate::Lit::ByteStr(v0.clone()), + crate::Lit::CStr(v0) => crate::Lit::CStr(v0.clone()), + crate::Lit::Byte(v0) => crate::Lit::Byte(v0.clone()), + crate::Lit::Char(v0) => crate::Lit::Char(v0.clone()), + crate::Lit::Int(v0) => crate::Lit::Int(v0.clone()), + crate::Lit::Float(v0) => crate::Lit::Float(v0.clone()), + crate::Lit::Bool(v0) => crate::Lit::Bool(v0.clone()), + crate::Lit::Verbatim(v0) => crate::Lit::Verbatim(v0.clone()), + } + } +} +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::LitBool { + fn clone(&self) -> Self { + crate::LitBool { + value: self.value.clone(), + span: self.span.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Local { + fn clone(&self) -> Self { + crate::Local { + attrs: self.attrs.clone(), + let_token: self.let_token.clone(), + pat: self.pat.clone(), + init: self.init.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::LocalInit { + fn clone(&self) -> Self { + crate::LocalInit { + eq_token: self.eq_token.clone(), + expr: self.expr.clone(), + diverge: self.diverge.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Macro { + fn clone(&self) -> Self { + crate::Macro { + path: self.path.clone(), + bang_token: self.bang_token.clone(), + delimiter: self.delimiter.clone(), + tokens: self.tokens.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::MacroDelimiter { + fn clone(&self) -> Self { + match self { + crate::MacroDelimiter::Paren(v0) => crate::MacroDelimiter::Paren(v0.clone()), + crate::MacroDelimiter::Brace(v0) => crate::MacroDelimiter::Brace(v0.clone()), + crate::MacroDelimiter::Bracket(v0) => { + crate::MacroDelimiter::Bracket(v0.clone()) + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Member { + fn clone(&self) -> Self { + match self { + crate::Member::Named(v0) => crate::Member::Named(v0.clone()), + crate::Member::Unnamed(v0) => crate::Member::Unnamed(v0.clone()), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Meta { + fn clone(&self) -> Self { + match self { + crate::Meta::Path(v0) => crate::Meta::Path(v0.clone()), + crate::Meta::List(v0) => crate::Meta::List(v0.clone()), + crate::Meta::NameValue(v0) => crate::Meta::NameValue(v0.clone()), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::MetaList { + fn clone(&self) -> Self { + crate::MetaList { + path: self.path.clone(), + delimiter: self.delimiter.clone(), + tokens: self.tokens.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::MetaNameValue { + fn clone(&self) -> Self { + crate::MetaNameValue { + path: self.path.clone(), + eq_token: self.eq_token.clone(), + value: self.value.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ParenthesizedGenericArguments { + fn clone(&self) -> Self { + crate::ParenthesizedGenericArguments { + paren_token: self.paren_token.clone(), + inputs: self.inputs.clone(), + output: self.output.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Pat { + fn clone(&self) -> Self { + match self { + crate::Pat::Const(v0) => crate::Pat::Const(v0.clone()), + crate::Pat::Ident(v0) => crate::Pat::Ident(v0.clone()), + crate::Pat::Lit(v0) => crate::Pat::Lit(v0.clone()), + crate::Pat::Macro(v0) => crate::Pat::Macro(v0.clone()), + crate::Pat::Or(v0) => crate::Pat::Or(v0.clone()), + crate::Pat::Paren(v0) => crate::Pat::Paren(v0.clone()), + crate::Pat::Path(v0) => crate::Pat::Path(v0.clone()), + crate::Pat::Range(v0) => crate::Pat::Range(v0.clone()), + crate::Pat::Reference(v0) => crate::Pat::Reference(v0.clone()), + crate::Pat::Rest(v0) => crate::Pat::Rest(v0.clone()), + crate::Pat::Slice(v0) => crate::Pat::Slice(v0.clone()), + crate::Pat::Struct(v0) => crate::Pat::Struct(v0.clone()), + crate::Pat::Tuple(v0) => crate::Pat::Tuple(v0.clone()), + crate::Pat::TupleStruct(v0) => crate::Pat::TupleStruct(v0.clone()), + crate::Pat::Type(v0) => crate::Pat::Type(v0.clone()), + crate::Pat::Verbatim(v0) => crate::Pat::Verbatim(v0.clone()), + crate::Pat::Wild(v0) => crate::Pat::Wild(v0.clone()), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PatIdent { + fn clone(&self) -> Self { + crate::PatIdent { + attrs: self.attrs.clone(), + by_ref: self.by_ref.clone(), + mutability: self.mutability.clone(), + ident: self.ident.clone(), + subpat: self.subpat.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PatOr { + fn clone(&self) -> Self { + crate::PatOr { + attrs: self.attrs.clone(), + leading_vert: self.leading_vert.clone(), + cases: self.cases.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PatParen { + fn clone(&self) -> Self { + crate::PatParen { + attrs: self.attrs.clone(), + paren_token: self.paren_token.clone(), + pat: self.pat.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PatReference { + fn clone(&self) -> Self { + crate::PatReference { + attrs: self.attrs.clone(), + and_token: self.and_token.clone(), + mutability: self.mutability.clone(), + pat: self.pat.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PatRest { + fn clone(&self) -> Self { + crate::PatRest { + attrs: self.attrs.clone(), + dot2_token: self.dot2_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PatSlice { + fn clone(&self) -> Self { + crate::PatSlice { + attrs: self.attrs.clone(), + bracket_token: self.bracket_token.clone(), + elems: self.elems.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PatStruct { + fn clone(&self) -> Self { + crate::PatStruct { + attrs: self.attrs.clone(), + qself: self.qself.clone(), + path: self.path.clone(), + brace_token: self.brace_token.clone(), + fields: self.fields.clone(), + rest: self.rest.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PatTuple { + fn clone(&self) -> Self { + crate::PatTuple { + attrs: self.attrs.clone(), + paren_token: self.paren_token.clone(), + elems: self.elems.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PatTupleStruct { + fn clone(&self) -> Self { + crate::PatTupleStruct { + attrs: self.attrs.clone(), + qself: self.qself.clone(), + path: self.path.clone(), + paren_token: self.paren_token.clone(), + elems: self.elems.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PatType { + fn clone(&self) -> Self { + crate::PatType { + attrs: self.attrs.clone(), + pat: self.pat.clone(), + colon_token: self.colon_token.clone(), + ty: self.ty.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PatWild { + fn clone(&self) -> Self { + crate::PatWild { + attrs: self.attrs.clone(), + underscore_token: self.underscore_token.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Path { + fn clone(&self) -> Self { + crate::Path { + leading_colon: self.leading_colon.clone(), + segments: self.segments.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PathArguments { + fn clone(&self) -> Self { + match self { + crate::PathArguments::None => crate::PathArguments::None, + crate::PathArguments::AngleBracketed(v0) => { + crate::PathArguments::AngleBracketed(v0.clone()) + } + crate::PathArguments::Parenthesized(v0) => { + crate::PathArguments::Parenthesized(v0.clone()) + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PathSegment { + fn clone(&self) -> Self { + crate::PathSegment { + ident: self.ident.clone(), + arguments: self.arguments.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PointerMutability { + fn clone(&self) -> Self { + match self { + crate::PointerMutability::Const(v0) => { + crate::PointerMutability::Const(v0.clone()) + } + crate::PointerMutability::Mut(v0) => { + crate::PointerMutability::Mut(v0.clone()) + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PreciseCapture { + fn clone(&self) -> Self { + crate::PreciseCapture { + use_token: self.use_token.clone(), + lt_token: self.lt_token.clone(), + params: self.params.clone(), + gt_token: self.gt_token.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PredicateLifetime { + fn clone(&self) -> Self { + crate::PredicateLifetime { + lifetime: self.lifetime.clone(), + colon_token: self.colon_token.clone(), + bounds: self.bounds.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::PredicateType { + fn clone(&self) -> Self { + crate::PredicateType { + lifetimes: self.lifetimes.clone(), + bounded_ty: self.bounded_ty.clone(), + colon_token: self.colon_token.clone(), + bounds: self.bounds.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::QSelf { + fn clone(&self) -> Self { + crate::QSelf { + lt_token: self.lt_token.clone(), + ty: self.ty.clone(), + position: self.position.clone(), + as_token: self.as_token.clone(), + gt_token: self.gt_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Copy for crate::RangeLimits {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::RangeLimits { + fn clone(&self) -> Self { + *self + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Receiver { + fn clone(&self) -> Self { + crate::Receiver { + attrs: self.attrs.clone(), + reference: self.reference.clone(), + mutability: self.mutability.clone(), + self_token: self.self_token.clone(), + colon_token: self.colon_token.clone(), + ty: self.ty.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::ReturnType { + fn clone(&self) -> Self { + match self { + crate::ReturnType::Default => crate::ReturnType::Default, + crate::ReturnType::Type(v0, v1) => { + crate::ReturnType::Type(v0.clone(), v1.clone()) + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Signature { + fn clone(&self) -> Self { + crate::Signature { + constness: self.constness.clone(), + asyncness: self.asyncness.clone(), + unsafety: self.unsafety.clone(), + abi: self.abi.clone(), + fn_token: self.fn_token.clone(), + ident: self.ident.clone(), + generics: self.generics.clone(), + paren_token: self.paren_token.clone(), + inputs: self.inputs.clone(), + variadic: self.variadic.clone(), + output: self.output.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::StaticMutability { + fn clone(&self) -> Self { + match self { + crate::StaticMutability::Mut(v0) => crate::StaticMutability::Mut(v0.clone()), + crate::StaticMutability::None => crate::StaticMutability::None, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Stmt { + fn clone(&self) -> Self { + match self { + crate::Stmt::Local(v0) => crate::Stmt::Local(v0.clone()), + crate::Stmt::Item(v0) => crate::Stmt::Item(v0.clone()), + crate::Stmt::Expr(v0, v1) => crate::Stmt::Expr(v0.clone(), v1.clone()), + crate::Stmt::Macro(v0) => crate::Stmt::Macro(v0.clone()), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::StmtMacro { + fn clone(&self) -> Self { + crate::StmtMacro { + attrs: self.attrs.clone(), + mac: self.mac.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TraitBound { + fn clone(&self) -> Self { + crate::TraitBound { + paren_token: self.paren_token.clone(), + modifier: self.modifier.clone(), + lifetimes: self.lifetimes.clone(), + path: self.path.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Copy for crate::TraitBoundModifier {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TraitBoundModifier { + fn clone(&self) -> Self { + *self + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TraitItem { + fn clone(&self) -> Self { + match self { + crate::TraitItem::Const(v0) => crate::TraitItem::Const(v0.clone()), + crate::TraitItem::Fn(v0) => crate::TraitItem::Fn(v0.clone()), + crate::TraitItem::Type(v0) => crate::TraitItem::Type(v0.clone()), + crate::TraitItem::Macro(v0) => crate::TraitItem::Macro(v0.clone()), + crate::TraitItem::Verbatim(v0) => crate::TraitItem::Verbatim(v0.clone()), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TraitItemConst { + fn clone(&self) -> Self { + crate::TraitItemConst { + attrs: self.attrs.clone(), + const_token: self.const_token.clone(), + ident: self.ident.clone(), + generics: self.generics.clone(), + colon_token: self.colon_token.clone(), + ty: self.ty.clone(), + default: self.default.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TraitItemFn { + fn clone(&self) -> Self { + crate::TraitItemFn { + attrs: self.attrs.clone(), + sig: self.sig.clone(), + default: self.default.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TraitItemMacro { + fn clone(&self) -> Self { + crate::TraitItemMacro { + attrs: self.attrs.clone(), + mac: self.mac.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TraitItemType { + fn clone(&self) -> Self { + crate::TraitItemType { + attrs: self.attrs.clone(), + type_token: self.type_token.clone(), + ident: self.ident.clone(), + generics: self.generics.clone(), + colon_token: self.colon_token.clone(), + bounds: self.bounds.clone(), + default: self.default.clone(), + semi_token: self.semi_token.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Type { + fn clone(&self) -> Self { + match self { + crate::Type::Array(v0) => crate::Type::Array(v0.clone()), + crate::Type::BareFn(v0) => crate::Type::BareFn(v0.clone()), + crate::Type::Group(v0) => crate::Type::Group(v0.clone()), + crate::Type::ImplTrait(v0) => crate::Type::ImplTrait(v0.clone()), + crate::Type::Infer(v0) => crate::Type::Infer(v0.clone()), + crate::Type::Macro(v0) => crate::Type::Macro(v0.clone()), + crate::Type::Never(v0) => crate::Type::Never(v0.clone()), + crate::Type::Paren(v0) => crate::Type::Paren(v0.clone()), + crate::Type::Path(v0) => crate::Type::Path(v0.clone()), + crate::Type::Ptr(v0) => crate::Type::Ptr(v0.clone()), + crate::Type::Reference(v0) => crate::Type::Reference(v0.clone()), + crate::Type::Slice(v0) => crate::Type::Slice(v0.clone()), + crate::Type::TraitObject(v0) => crate::Type::TraitObject(v0.clone()), + crate::Type::Tuple(v0) => crate::Type::Tuple(v0.clone()), + crate::Type::Verbatim(v0) => crate::Type::Verbatim(v0.clone()), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypeArray { + fn clone(&self) -> Self { + crate::TypeArray { + bracket_token: self.bracket_token.clone(), + elem: self.elem.clone(), + semi_token: self.semi_token.clone(), + len: self.len.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypeBareFn { + fn clone(&self) -> Self { + crate::TypeBareFn { + lifetimes: self.lifetimes.clone(), + unsafety: self.unsafety.clone(), + abi: self.abi.clone(), + fn_token: self.fn_token.clone(), + paren_token: self.paren_token.clone(), + inputs: self.inputs.clone(), + variadic: self.variadic.clone(), + output: self.output.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypeGroup { + fn clone(&self) -> Self { + crate::TypeGroup { + group_token: self.group_token.clone(), + elem: self.elem.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypeImplTrait { + fn clone(&self) -> Self { + crate::TypeImplTrait { + impl_token: self.impl_token.clone(), + bounds: self.bounds.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypeInfer { + fn clone(&self) -> Self { + crate::TypeInfer { + underscore_token: self.underscore_token.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypeMacro { + fn clone(&self) -> Self { + crate::TypeMacro { + mac: self.mac.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypeNever { + fn clone(&self) -> Self { + crate::TypeNever { + bang_token: self.bang_token.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypeParam { + fn clone(&self) -> Self { + crate::TypeParam { + attrs: self.attrs.clone(), + ident: self.ident.clone(), + colon_token: self.colon_token.clone(), + bounds: self.bounds.clone(), + eq_token: self.eq_token.clone(), + default: self.default.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypeParamBound { + fn clone(&self) -> Self { + match self { + crate::TypeParamBound::Trait(v0) => crate::TypeParamBound::Trait(v0.clone()), + crate::TypeParamBound::Lifetime(v0) => { + crate::TypeParamBound::Lifetime(v0.clone()) + } + #[cfg(feature = "full")] + crate::TypeParamBound::PreciseCapture(v0) => { + crate::TypeParamBound::PreciseCapture(v0.clone()) + } + crate::TypeParamBound::Verbatim(v0) => { + crate::TypeParamBound::Verbatim(v0.clone()) + } + #[cfg(not(feature = "full"))] + _ => unreachable!(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypeParen { + fn clone(&self) -> Self { + crate::TypeParen { + paren_token: self.paren_token.clone(), + elem: self.elem.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypePath { + fn clone(&self) -> Self { + crate::TypePath { + qself: self.qself.clone(), + path: self.path.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypePtr { + fn clone(&self) -> Self { + crate::TypePtr { + star_token: self.star_token.clone(), + const_token: self.const_token.clone(), + mutability: self.mutability.clone(), + elem: self.elem.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypeReference { + fn clone(&self) -> Self { + crate::TypeReference { + and_token: self.and_token.clone(), + lifetime: self.lifetime.clone(), + mutability: self.mutability.clone(), + elem: self.elem.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypeSlice { + fn clone(&self) -> Self { + crate::TypeSlice { + bracket_token: self.bracket_token.clone(), + elem: self.elem.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypeTraitObject { + fn clone(&self) -> Self { + crate::TypeTraitObject { + dyn_token: self.dyn_token.clone(), + bounds: self.bounds.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::TypeTuple { + fn clone(&self) -> Self { + crate::TypeTuple { + paren_token: self.paren_token.clone(), + elems: self.elems.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Copy for crate::UnOp {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::UnOp { + fn clone(&self) -> Self { + *self + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::UseGlob { + fn clone(&self) -> Self { + crate::UseGlob { + star_token: self.star_token.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::UseGroup { + fn clone(&self) -> Self { + crate::UseGroup { + brace_token: self.brace_token.clone(), + items: self.items.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::UseName { + fn clone(&self) -> Self { + crate::UseName { + ident: self.ident.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::UsePath { + fn clone(&self) -> Self { + crate::UsePath { + ident: self.ident.clone(), + colon2_token: self.colon2_token.clone(), + tree: self.tree.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::UseRename { + fn clone(&self) -> Self { + crate::UseRename { + ident: self.ident.clone(), + as_token: self.as_token.clone(), + rename: self.rename.clone(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::UseTree { + fn clone(&self) -> Self { + match self { + crate::UseTree::Path(v0) => crate::UseTree::Path(v0.clone()), + crate::UseTree::Name(v0) => crate::UseTree::Name(v0.clone()), + crate::UseTree::Rename(v0) => crate::UseTree::Rename(v0.clone()), + crate::UseTree::Glob(v0) => crate::UseTree::Glob(v0.clone()), + crate::UseTree::Group(v0) => crate::UseTree::Group(v0.clone()), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Variadic { + fn clone(&self) -> Self { + crate::Variadic { + attrs: self.attrs.clone(), + pat: self.pat.clone(), + dots: self.dots.clone(), + comma: self.comma.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Variant { + fn clone(&self) -> Self { + crate::Variant { + attrs: self.attrs.clone(), + ident: self.ident.clone(), + fields: self.fields.clone(), + discriminant: self.discriminant.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::VisRestricted { + fn clone(&self) -> Self { + crate::VisRestricted { + pub_token: self.pub_token.clone(), + paren_token: self.paren_token.clone(), + in_token: self.in_token.clone(), + path: self.path.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::Visibility { + fn clone(&self) -> Self { + match self { + crate::Visibility::Public(v0) => crate::Visibility::Public(v0.clone()), + crate::Visibility::Restricted(v0) => { + crate::Visibility::Restricted(v0.clone()) + } + crate::Visibility::Inherited => crate::Visibility::Inherited, + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::WhereClause { + fn clone(&self) -> Self { + crate::WhereClause { + where_token: self.where_token.clone(), + predicates: self.predicates.clone(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for crate::WherePredicate { + fn clone(&self) -> Self { + match self { + crate::WherePredicate::Lifetime(v0) => { + crate::WherePredicate::Lifetime(v0.clone()) + } + crate::WherePredicate::Type(v0) => crate::WherePredicate::Type(v0.clone()), + } + } +} diff --git a/vendor/syn/src/gen/debug.rs b/vendor/syn/src/gen/debug.rs new file mode 100644 index 00000000000000..aa42e32c60ede5 --- /dev/null +++ b/vendor/syn/src/gen/debug.rs @@ -0,0 +1,3238 @@ +// This file is @generated by syn-internal-codegen. +// It is not intended for manual editing. + +#![allow(unknown_lints, non_local_definitions)] +use std::fmt::{self, Debug}; +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Abi { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Abi"); + formatter.field("extern_token", &self.extern_token); + formatter.field("name", &self.name); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::AngleBracketedGenericArguments { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "AngleBracketedGenericArguments") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::AngleBracketedGenericArguments { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("colon2_token", &self.colon2_token); + formatter.field("lt_token", &self.lt_token); + formatter.field("args", &self.args); + formatter.field("gt_token", &self.gt_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Arm { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Arm"); + formatter.field("attrs", &self.attrs); + formatter.field("pat", &self.pat); + formatter.field("guard", &self.guard); + formatter.field("fat_arrow_token", &self.fat_arrow_token); + formatter.field("body", &self.body); + formatter.field("comma", &self.comma); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::AssocConst { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("AssocConst"); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("eq_token", &self.eq_token); + formatter.field("value", &self.value); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::AssocType { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("AssocType"); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("eq_token", &self.eq_token); + formatter.field("ty", &self.ty); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::AttrStyle { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("AttrStyle::")?; + match self { + crate::AttrStyle::Outer => formatter.write_str("Outer"), + crate::AttrStyle::Inner(v0) => { + let mut formatter = formatter.debug_tuple("Inner"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Attribute { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Attribute"); + formatter.field("pound_token", &self.pound_token); + formatter.field("style", &self.style); + formatter.field("bracket_token", &self.bracket_token); + formatter.field("meta", &self.meta); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::BareFnArg { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("BareFnArg"); + formatter.field("attrs", &self.attrs); + formatter.field("name", &self.name); + formatter.field("ty", &self.ty); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::BareVariadic { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("BareVariadic"); + formatter.field("attrs", &self.attrs); + formatter.field("name", &self.name); + formatter.field("dots", &self.dots); + formatter.field("comma", &self.comma); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::BinOp { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("BinOp::")?; + match self { + crate::BinOp::Add(v0) => { + let mut formatter = formatter.debug_tuple("Add"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::Sub(v0) => { + let mut formatter = formatter.debug_tuple("Sub"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::Mul(v0) => { + let mut formatter = formatter.debug_tuple("Mul"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::Div(v0) => { + let mut formatter = formatter.debug_tuple("Div"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::Rem(v0) => { + let mut formatter = formatter.debug_tuple("Rem"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::And(v0) => { + let mut formatter = formatter.debug_tuple("And"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::Or(v0) => { + let mut formatter = formatter.debug_tuple("Or"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::BitXor(v0) => { + let mut formatter = formatter.debug_tuple("BitXor"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::BitAnd(v0) => { + let mut formatter = formatter.debug_tuple("BitAnd"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::BitOr(v0) => { + let mut formatter = formatter.debug_tuple("BitOr"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::Shl(v0) => { + let mut formatter = formatter.debug_tuple("Shl"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::Shr(v0) => { + let mut formatter = formatter.debug_tuple("Shr"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::Eq(v0) => { + let mut formatter = formatter.debug_tuple("Eq"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::Lt(v0) => { + let mut formatter = formatter.debug_tuple("Lt"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::Le(v0) => { + let mut formatter = formatter.debug_tuple("Le"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::Ne(v0) => { + let mut formatter = formatter.debug_tuple("Ne"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::Ge(v0) => { + let mut formatter = formatter.debug_tuple("Ge"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::Gt(v0) => { + let mut formatter = formatter.debug_tuple("Gt"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::AddAssign(v0) => { + let mut formatter = formatter.debug_tuple("AddAssign"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::SubAssign(v0) => { + let mut formatter = formatter.debug_tuple("SubAssign"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::MulAssign(v0) => { + let mut formatter = formatter.debug_tuple("MulAssign"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::DivAssign(v0) => { + let mut formatter = formatter.debug_tuple("DivAssign"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::RemAssign(v0) => { + let mut formatter = formatter.debug_tuple("RemAssign"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::BitXorAssign(v0) => { + let mut formatter = formatter.debug_tuple("BitXorAssign"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::BitAndAssign(v0) => { + let mut formatter = formatter.debug_tuple("BitAndAssign"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::BitOrAssign(v0) => { + let mut formatter = formatter.debug_tuple("BitOrAssign"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::ShlAssign(v0) => { + let mut formatter = formatter.debug_tuple("ShlAssign"); + formatter.field(v0); + formatter.finish() + } + crate::BinOp::ShrAssign(v0) => { + let mut formatter = formatter.debug_tuple("ShrAssign"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Block { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Block"); + formatter.field("brace_token", &self.brace_token); + formatter.field("stmts", &self.stmts); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::BoundLifetimes { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("BoundLifetimes"); + formatter.field("for_token", &self.for_token); + formatter.field("lt_token", &self.lt_token); + formatter.field("lifetimes", &self.lifetimes); + formatter.field("gt_token", &self.gt_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::CapturedParam { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("CapturedParam::")?; + match self { + crate::CapturedParam::Lifetime(v0) => { + let mut formatter = formatter.debug_tuple("Lifetime"); + formatter.field(v0); + formatter.finish() + } + crate::CapturedParam::Ident(v0) => { + let mut formatter = formatter.debug_tuple("Ident"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ConstParam { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ConstParam"); + formatter.field("attrs", &self.attrs); + formatter.field("const_token", &self.const_token); + formatter.field("ident", &self.ident); + formatter.field("colon_token", &self.colon_token); + formatter.field("ty", &self.ty); + formatter.field("eq_token", &self.eq_token); + formatter.field("default", &self.default); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Constraint { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Constraint"); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("colon_token", &self.colon_token); + formatter.field("bounds", &self.bounds); + formatter.finish() + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Data { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Data::")?; + match self { + crate::Data::Struct(v0) => v0.debug(formatter, "Struct"), + crate::Data::Enum(v0) => v0.debug(formatter, "Enum"), + crate::Data::Union(v0) => v0.debug(formatter, "Union"), + } + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::DataEnum { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "DataEnum") + } +} +#[cfg(feature = "derive")] +impl crate::DataEnum { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("enum_token", &self.enum_token); + formatter.field("brace_token", &self.brace_token); + formatter.field("variants", &self.variants); + formatter.finish() + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::DataStruct { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "DataStruct") + } +} +#[cfg(feature = "derive")] +impl crate::DataStruct { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("struct_token", &self.struct_token); + formatter.field("fields", &self.fields); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::DataUnion { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "DataUnion") + } +} +#[cfg(feature = "derive")] +impl crate::DataUnion { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("union_token", &self.union_token); + formatter.field("fields", &self.fields); + formatter.finish() + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::DeriveInput { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("DeriveInput"); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("data", &self.data); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Expr { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Expr::")?; + match self { + #[cfg(feature = "full")] + crate::Expr::Array(v0) => v0.debug(formatter, "Array"), + #[cfg(feature = "full")] + crate::Expr::Assign(v0) => v0.debug(formatter, "Assign"), + #[cfg(feature = "full")] + crate::Expr::Async(v0) => v0.debug(formatter, "Async"), + #[cfg(feature = "full")] + crate::Expr::Await(v0) => v0.debug(formatter, "Await"), + crate::Expr::Binary(v0) => v0.debug(formatter, "Binary"), + #[cfg(feature = "full")] + crate::Expr::Block(v0) => v0.debug(formatter, "Block"), + #[cfg(feature = "full")] + crate::Expr::Break(v0) => v0.debug(formatter, "Break"), + crate::Expr::Call(v0) => v0.debug(formatter, "Call"), + crate::Expr::Cast(v0) => v0.debug(formatter, "Cast"), + #[cfg(feature = "full")] + crate::Expr::Closure(v0) => v0.debug(formatter, "Closure"), + #[cfg(feature = "full")] + crate::Expr::Const(v0) => v0.debug(formatter, "Const"), + #[cfg(feature = "full")] + crate::Expr::Continue(v0) => v0.debug(formatter, "Continue"), + crate::Expr::Field(v0) => v0.debug(formatter, "Field"), + #[cfg(feature = "full")] + crate::Expr::ForLoop(v0) => v0.debug(formatter, "ForLoop"), + crate::Expr::Group(v0) => v0.debug(formatter, "Group"), + #[cfg(feature = "full")] + crate::Expr::If(v0) => v0.debug(formatter, "If"), + crate::Expr::Index(v0) => v0.debug(formatter, "Index"), + #[cfg(feature = "full")] + crate::Expr::Infer(v0) => v0.debug(formatter, "Infer"), + #[cfg(feature = "full")] + crate::Expr::Let(v0) => v0.debug(formatter, "Let"), + crate::Expr::Lit(v0) => v0.debug(formatter, "Lit"), + #[cfg(feature = "full")] + crate::Expr::Loop(v0) => v0.debug(formatter, "Loop"), + crate::Expr::Macro(v0) => v0.debug(formatter, "Macro"), + #[cfg(feature = "full")] + crate::Expr::Match(v0) => v0.debug(formatter, "Match"), + crate::Expr::MethodCall(v0) => v0.debug(formatter, "MethodCall"), + crate::Expr::Paren(v0) => v0.debug(formatter, "Paren"), + crate::Expr::Path(v0) => v0.debug(formatter, "Path"), + #[cfg(feature = "full")] + crate::Expr::Range(v0) => v0.debug(formatter, "Range"), + #[cfg(feature = "full")] + crate::Expr::RawAddr(v0) => v0.debug(formatter, "RawAddr"), + crate::Expr::Reference(v0) => v0.debug(formatter, "Reference"), + #[cfg(feature = "full")] + crate::Expr::Repeat(v0) => v0.debug(formatter, "Repeat"), + #[cfg(feature = "full")] + crate::Expr::Return(v0) => v0.debug(formatter, "Return"), + crate::Expr::Struct(v0) => v0.debug(formatter, "Struct"), + #[cfg(feature = "full")] + crate::Expr::Try(v0) => v0.debug(formatter, "Try"), + #[cfg(feature = "full")] + crate::Expr::TryBlock(v0) => v0.debug(formatter, "TryBlock"), + crate::Expr::Tuple(v0) => v0.debug(formatter, "Tuple"), + crate::Expr::Unary(v0) => v0.debug(formatter, "Unary"), + #[cfg(feature = "full")] + crate::Expr::Unsafe(v0) => v0.debug(formatter, "Unsafe"), + crate::Expr::Verbatim(v0) => { + let mut formatter = formatter.debug_tuple("Verbatim"); + formatter.field(v0); + formatter.finish() + } + #[cfg(feature = "full")] + crate::Expr::While(v0) => v0.debug(formatter, "While"), + #[cfg(feature = "full")] + crate::Expr::Yield(v0) => v0.debug(formatter, "Yield"), + #[cfg(not(feature = "full"))] + _ => unreachable!(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprArray { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprArray") + } +} +#[cfg(feature = "full")] +impl crate::ExprArray { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("bracket_token", &self.bracket_token); + formatter.field("elems", &self.elems); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprAssign { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprAssign") + } +} +#[cfg(feature = "full")] +impl crate::ExprAssign { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("left", &self.left); + formatter.field("eq_token", &self.eq_token); + formatter.field("right", &self.right); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprAsync { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprAsync") + } +} +#[cfg(feature = "full")] +impl crate::ExprAsync { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("async_token", &self.async_token); + formatter.field("capture", &self.capture); + formatter.field("block", &self.block); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprAwait { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprAwait") + } +} +#[cfg(feature = "full")] +impl crate::ExprAwait { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("base", &self.base); + formatter.field("dot_token", &self.dot_token); + formatter.field("await_token", &self.await_token); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprBinary { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprBinary") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ExprBinary { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("left", &self.left); + formatter.field("op", &self.op); + formatter.field("right", &self.right); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprBlock { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprBlock") + } +} +#[cfg(feature = "full")] +impl crate::ExprBlock { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("label", &self.label); + formatter.field("block", &self.block); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprBreak { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprBreak") + } +} +#[cfg(feature = "full")] +impl crate::ExprBreak { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("break_token", &self.break_token); + formatter.field("label", &self.label); + formatter.field("expr", &self.expr); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprCall { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprCall") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ExprCall { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("func", &self.func); + formatter.field("paren_token", &self.paren_token); + formatter.field("args", &self.args); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprCast { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprCast") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ExprCast { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("expr", &self.expr); + formatter.field("as_token", &self.as_token); + formatter.field("ty", &self.ty); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprClosure { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprClosure") + } +} +#[cfg(feature = "full")] +impl crate::ExprClosure { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("lifetimes", &self.lifetimes); + formatter.field("constness", &self.constness); + formatter.field("movability", &self.movability); + formatter.field("asyncness", &self.asyncness); + formatter.field("capture", &self.capture); + formatter.field("or1_token", &self.or1_token); + formatter.field("inputs", &self.inputs); + formatter.field("or2_token", &self.or2_token); + formatter.field("output", &self.output); + formatter.field("body", &self.body); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprConst { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprConst") + } +} +#[cfg(feature = "full")] +impl crate::ExprConst { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("const_token", &self.const_token); + formatter.field("block", &self.block); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprContinue { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprContinue") + } +} +#[cfg(feature = "full")] +impl crate::ExprContinue { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("continue_token", &self.continue_token); + formatter.field("label", &self.label); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprField { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprField") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ExprField { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("base", &self.base); + formatter.field("dot_token", &self.dot_token); + formatter.field("member", &self.member); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprForLoop { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprForLoop") + } +} +#[cfg(feature = "full")] +impl crate::ExprForLoop { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("label", &self.label); + formatter.field("for_token", &self.for_token); + formatter.field("pat", &self.pat); + formatter.field("in_token", &self.in_token); + formatter.field("expr", &self.expr); + formatter.field("body", &self.body); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprGroup { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprGroup") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ExprGroup { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("group_token", &self.group_token); + formatter.field("expr", &self.expr); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprIf { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprIf") + } +} +#[cfg(feature = "full")] +impl crate::ExprIf { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("if_token", &self.if_token); + formatter.field("cond", &self.cond); + formatter.field("then_branch", &self.then_branch); + formatter.field("else_branch", &self.else_branch); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprIndex { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprIndex") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ExprIndex { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("expr", &self.expr); + formatter.field("bracket_token", &self.bracket_token); + formatter.field("index", &self.index); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprInfer { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprInfer") + } +} +#[cfg(feature = "full")] +impl crate::ExprInfer { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("underscore_token", &self.underscore_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprLet { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprLet") + } +} +#[cfg(feature = "full")] +impl crate::ExprLet { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("let_token", &self.let_token); + formatter.field("pat", &self.pat); + formatter.field("eq_token", &self.eq_token); + formatter.field("expr", &self.expr); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprLit { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprLit") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ExprLit { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("lit", &self.lit); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprLoop { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprLoop") + } +} +#[cfg(feature = "full")] +impl crate::ExprLoop { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("label", &self.label); + formatter.field("loop_token", &self.loop_token); + formatter.field("body", &self.body); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprMacro { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprMacro") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ExprMacro { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("mac", &self.mac); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprMatch { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprMatch") + } +} +#[cfg(feature = "full")] +impl crate::ExprMatch { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("match_token", &self.match_token); + formatter.field("expr", &self.expr); + formatter.field("brace_token", &self.brace_token); + formatter.field("arms", &self.arms); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprMethodCall { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprMethodCall") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ExprMethodCall { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("receiver", &self.receiver); + formatter.field("dot_token", &self.dot_token); + formatter.field("method", &self.method); + formatter.field("turbofish", &self.turbofish); + formatter.field("paren_token", &self.paren_token); + formatter.field("args", &self.args); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprParen { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprParen") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ExprParen { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("paren_token", &self.paren_token); + formatter.field("expr", &self.expr); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprPath { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprPath") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ExprPath { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("qself", &self.qself); + formatter.field("path", &self.path); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprRange { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprRange") + } +} +#[cfg(feature = "full")] +impl crate::ExprRange { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("start", &self.start); + formatter.field("limits", &self.limits); + formatter.field("end", &self.end); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprRawAddr { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprRawAddr") + } +} +#[cfg(feature = "full")] +impl crate::ExprRawAddr { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("and_token", &self.and_token); + formatter.field("raw", &self.raw); + formatter.field("mutability", &self.mutability); + formatter.field("expr", &self.expr); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprReference { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprReference") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ExprReference { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("and_token", &self.and_token); + formatter.field("mutability", &self.mutability); + formatter.field("expr", &self.expr); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprRepeat { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprRepeat") + } +} +#[cfg(feature = "full")] +impl crate::ExprRepeat { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("bracket_token", &self.bracket_token); + formatter.field("expr", &self.expr); + formatter.field("semi_token", &self.semi_token); + formatter.field("len", &self.len); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprReturn { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprReturn") + } +} +#[cfg(feature = "full")] +impl crate::ExprReturn { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("return_token", &self.return_token); + formatter.field("expr", &self.expr); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprStruct { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprStruct") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ExprStruct { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("qself", &self.qself); + formatter.field("path", &self.path); + formatter.field("brace_token", &self.brace_token); + formatter.field("fields", &self.fields); + formatter.field("dot2_token", &self.dot2_token); + formatter.field("rest", &self.rest); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprTry { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprTry") + } +} +#[cfg(feature = "full")] +impl crate::ExprTry { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("expr", &self.expr); + formatter.field("question_token", &self.question_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprTryBlock { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprTryBlock") + } +} +#[cfg(feature = "full")] +impl crate::ExprTryBlock { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("try_token", &self.try_token); + formatter.field("block", &self.block); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprTuple { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprTuple") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ExprTuple { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("paren_token", &self.paren_token); + formatter.field("elems", &self.elems); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprUnary { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprUnary") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ExprUnary { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("op", &self.op); + formatter.field("expr", &self.expr); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprUnsafe { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprUnsafe") + } +} +#[cfg(feature = "full")] +impl crate::ExprUnsafe { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("unsafe_token", &self.unsafe_token); + formatter.field("block", &self.block); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprWhile { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprWhile") + } +} +#[cfg(feature = "full")] +impl crate::ExprWhile { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("label", &self.label); + formatter.field("while_token", &self.while_token); + formatter.field("cond", &self.cond); + formatter.field("body", &self.body); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ExprYield { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ExprYield") + } +} +#[cfg(feature = "full")] +impl crate::ExprYield { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("yield_token", &self.yield_token); + formatter.field("expr", &self.expr); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Field { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Field"); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("mutability", &self.mutability); + formatter.field("ident", &self.ident); + formatter.field("colon_token", &self.colon_token); + formatter.field("ty", &self.ty); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::FieldMutability { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("FieldMutability::")?; + match self { + crate::FieldMutability::None => formatter.write_str("None"), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::FieldPat { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("FieldPat"); + formatter.field("attrs", &self.attrs); + formatter.field("member", &self.member); + formatter.field("colon_token", &self.colon_token); + formatter.field("pat", &self.pat); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::FieldValue { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("FieldValue"); + formatter.field("attrs", &self.attrs); + formatter.field("member", &self.member); + formatter.field("colon_token", &self.colon_token); + formatter.field("expr", &self.expr); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Fields { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Fields::")?; + match self { + crate::Fields::Named(v0) => v0.debug(formatter, "Named"), + crate::Fields::Unnamed(v0) => v0.debug(formatter, "Unnamed"), + crate::Fields::Unit => formatter.write_str("Unit"), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::FieldsNamed { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "FieldsNamed") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::FieldsNamed { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("brace_token", &self.brace_token); + formatter.field("named", &self.named); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::FieldsUnnamed { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "FieldsUnnamed") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::FieldsUnnamed { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("paren_token", &self.paren_token); + formatter.field("unnamed", &self.unnamed); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::File { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("File"); + formatter.field("shebang", &self.shebang); + formatter.field("attrs", &self.attrs); + formatter.field("items", &self.items); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::FnArg { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("FnArg::")?; + match self { + crate::FnArg::Receiver(v0) => { + let mut formatter = formatter.debug_tuple("Receiver"); + formatter.field(v0); + formatter.finish() + } + crate::FnArg::Typed(v0) => { + let mut formatter = formatter.debug_tuple("Typed"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ForeignItem { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("ForeignItem::")?; + match self { + crate::ForeignItem::Fn(v0) => v0.debug(formatter, "Fn"), + crate::ForeignItem::Static(v0) => v0.debug(formatter, "Static"), + crate::ForeignItem::Type(v0) => v0.debug(formatter, "Type"), + crate::ForeignItem::Macro(v0) => v0.debug(formatter, "Macro"), + crate::ForeignItem::Verbatim(v0) => { + let mut formatter = formatter.debug_tuple("Verbatim"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ForeignItemFn { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ForeignItemFn") + } +} +#[cfg(feature = "full")] +impl crate::ForeignItemFn { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("sig", &self.sig); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ForeignItemMacro { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ForeignItemMacro") + } +} +#[cfg(feature = "full")] +impl crate::ForeignItemMacro { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("mac", &self.mac); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ForeignItemStatic { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ForeignItemStatic") + } +} +#[cfg(feature = "full")] +impl crate::ForeignItemStatic { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("static_token", &self.static_token); + formatter.field("mutability", &self.mutability); + formatter.field("ident", &self.ident); + formatter.field("colon_token", &self.colon_token); + formatter.field("ty", &self.ty); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ForeignItemType { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ForeignItemType") + } +} +#[cfg(feature = "full")] +impl crate::ForeignItemType { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("type_token", &self.type_token); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::GenericArgument { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("GenericArgument::")?; + match self { + crate::GenericArgument::Lifetime(v0) => { + let mut formatter = formatter.debug_tuple("Lifetime"); + formatter.field(v0); + formatter.finish() + } + crate::GenericArgument::Type(v0) => { + let mut formatter = formatter.debug_tuple("Type"); + formatter.field(v0); + formatter.finish() + } + crate::GenericArgument::Const(v0) => { + let mut formatter = formatter.debug_tuple("Const"); + formatter.field(v0); + formatter.finish() + } + crate::GenericArgument::AssocType(v0) => { + let mut formatter = formatter.debug_tuple("AssocType"); + formatter.field(v0); + formatter.finish() + } + crate::GenericArgument::AssocConst(v0) => { + let mut formatter = formatter.debug_tuple("AssocConst"); + formatter.field(v0); + formatter.finish() + } + crate::GenericArgument::Constraint(v0) => { + let mut formatter = formatter.debug_tuple("Constraint"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::GenericParam { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("GenericParam::")?; + match self { + crate::GenericParam::Lifetime(v0) => { + let mut formatter = formatter.debug_tuple("Lifetime"); + formatter.field(v0); + formatter.finish() + } + crate::GenericParam::Type(v0) => { + let mut formatter = formatter.debug_tuple("Type"); + formatter.field(v0); + formatter.finish() + } + crate::GenericParam::Const(v0) => { + let mut formatter = formatter.debug_tuple("Const"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Generics { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Generics"); + formatter.field("lt_token", &self.lt_token); + formatter.field("params", &self.params); + formatter.field("gt_token", &self.gt_token); + formatter.field("where_clause", &self.where_clause); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ImplItem { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("ImplItem::")?; + match self { + crate::ImplItem::Const(v0) => v0.debug(formatter, "Const"), + crate::ImplItem::Fn(v0) => v0.debug(formatter, "Fn"), + crate::ImplItem::Type(v0) => v0.debug(formatter, "Type"), + crate::ImplItem::Macro(v0) => v0.debug(formatter, "Macro"), + crate::ImplItem::Verbatim(v0) => { + let mut formatter = formatter.debug_tuple("Verbatim"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ImplItemConst { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ImplItemConst") + } +} +#[cfg(feature = "full")] +impl crate::ImplItemConst { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("defaultness", &self.defaultness); + formatter.field("const_token", &self.const_token); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("colon_token", &self.colon_token); + formatter.field("ty", &self.ty); + formatter.field("eq_token", &self.eq_token); + formatter.field("expr", &self.expr); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ImplItemFn { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ImplItemFn") + } +} +#[cfg(feature = "full")] +impl crate::ImplItemFn { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("defaultness", &self.defaultness); + formatter.field("sig", &self.sig); + formatter.field("block", &self.block); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ImplItemMacro { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ImplItemMacro") + } +} +#[cfg(feature = "full")] +impl crate::ImplItemMacro { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("mac", &self.mac); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ImplItemType { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ImplItemType") + } +} +#[cfg(feature = "full")] +impl crate::ImplItemType { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("defaultness", &self.defaultness); + formatter.field("type_token", &self.type_token); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("eq_token", &self.eq_token); + formatter.field("ty", &self.ty); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ImplRestriction { + fn fmt(&self, _formatter: &mut fmt::Formatter) -> fmt::Result { + match *self {} + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Index { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Index"); + formatter.field("index", &self.index); + formatter.field("span", &self.span); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Item { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Item::")?; + match self { + crate::Item::Const(v0) => v0.debug(formatter, "Const"), + crate::Item::Enum(v0) => v0.debug(formatter, "Enum"), + crate::Item::ExternCrate(v0) => v0.debug(formatter, "ExternCrate"), + crate::Item::Fn(v0) => v0.debug(formatter, "Fn"), + crate::Item::ForeignMod(v0) => v0.debug(formatter, "ForeignMod"), + crate::Item::Impl(v0) => v0.debug(formatter, "Impl"), + crate::Item::Macro(v0) => v0.debug(formatter, "Macro"), + crate::Item::Mod(v0) => v0.debug(formatter, "Mod"), + crate::Item::Static(v0) => v0.debug(formatter, "Static"), + crate::Item::Struct(v0) => v0.debug(formatter, "Struct"), + crate::Item::Trait(v0) => v0.debug(formatter, "Trait"), + crate::Item::TraitAlias(v0) => v0.debug(formatter, "TraitAlias"), + crate::Item::Type(v0) => v0.debug(formatter, "Type"), + crate::Item::Union(v0) => v0.debug(formatter, "Union"), + crate::Item::Use(v0) => v0.debug(formatter, "Use"), + crate::Item::Verbatim(v0) => { + let mut formatter = formatter.debug_tuple("Verbatim"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ItemConst { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ItemConst") + } +} +#[cfg(feature = "full")] +impl crate::ItemConst { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("const_token", &self.const_token); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("colon_token", &self.colon_token); + formatter.field("ty", &self.ty); + formatter.field("eq_token", &self.eq_token); + formatter.field("expr", &self.expr); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ItemEnum { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ItemEnum") + } +} +#[cfg(feature = "full")] +impl crate::ItemEnum { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("enum_token", &self.enum_token); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("brace_token", &self.brace_token); + formatter.field("variants", &self.variants); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ItemExternCrate { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ItemExternCrate") + } +} +#[cfg(feature = "full")] +impl crate::ItemExternCrate { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("extern_token", &self.extern_token); + formatter.field("crate_token", &self.crate_token); + formatter.field("ident", &self.ident); + formatter.field("rename", &self.rename); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ItemFn { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ItemFn") + } +} +#[cfg(feature = "full")] +impl crate::ItemFn { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("sig", &self.sig); + formatter.field("block", &self.block); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ItemForeignMod { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ItemForeignMod") + } +} +#[cfg(feature = "full")] +impl crate::ItemForeignMod { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("unsafety", &self.unsafety); + formatter.field("abi", &self.abi); + formatter.field("brace_token", &self.brace_token); + formatter.field("items", &self.items); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ItemImpl { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ItemImpl") + } +} +#[cfg(feature = "full")] +impl crate::ItemImpl { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("defaultness", &self.defaultness); + formatter.field("unsafety", &self.unsafety); + formatter.field("impl_token", &self.impl_token); + formatter.field("generics", &self.generics); + formatter.field("trait_", &self.trait_); + formatter.field("self_ty", &self.self_ty); + formatter.field("brace_token", &self.brace_token); + formatter.field("items", &self.items); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ItemMacro { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ItemMacro") + } +} +#[cfg(feature = "full")] +impl crate::ItemMacro { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("ident", &self.ident); + formatter.field("mac", &self.mac); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ItemMod { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ItemMod") + } +} +#[cfg(feature = "full")] +impl crate::ItemMod { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("unsafety", &self.unsafety); + formatter.field("mod_token", &self.mod_token); + formatter.field("ident", &self.ident); + formatter.field("content", &self.content); + formatter.field("semi", &self.semi); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ItemStatic { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ItemStatic") + } +} +#[cfg(feature = "full")] +impl crate::ItemStatic { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("static_token", &self.static_token); + formatter.field("mutability", &self.mutability); + formatter.field("ident", &self.ident); + formatter.field("colon_token", &self.colon_token); + formatter.field("ty", &self.ty); + formatter.field("eq_token", &self.eq_token); + formatter.field("expr", &self.expr); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ItemStruct { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ItemStruct") + } +} +#[cfg(feature = "full")] +impl crate::ItemStruct { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("struct_token", &self.struct_token); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("fields", &self.fields); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ItemTrait { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ItemTrait") + } +} +#[cfg(feature = "full")] +impl crate::ItemTrait { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("unsafety", &self.unsafety); + formatter.field("auto_token", &self.auto_token); + formatter.field("restriction", &self.restriction); + formatter.field("trait_token", &self.trait_token); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("colon_token", &self.colon_token); + formatter.field("supertraits", &self.supertraits); + formatter.field("brace_token", &self.brace_token); + formatter.field("items", &self.items); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ItemTraitAlias { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ItemTraitAlias") + } +} +#[cfg(feature = "full")] +impl crate::ItemTraitAlias { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("trait_token", &self.trait_token); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("eq_token", &self.eq_token); + formatter.field("bounds", &self.bounds); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ItemType { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ItemType") + } +} +#[cfg(feature = "full")] +impl crate::ItemType { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("type_token", &self.type_token); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("eq_token", &self.eq_token); + formatter.field("ty", &self.ty); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ItemUnion { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ItemUnion") + } +} +#[cfg(feature = "full")] +impl crate::ItemUnion { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("union_token", &self.union_token); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("fields", &self.fields); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ItemUse { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ItemUse") + } +} +#[cfg(feature = "full")] +impl crate::ItemUse { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("vis", &self.vis); + formatter.field("use_token", &self.use_token); + formatter.field("leading_colon", &self.leading_colon); + formatter.field("tree", &self.tree); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Label { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Label"); + formatter.field("name", &self.name); + formatter.field("colon_token", &self.colon_token); + formatter.finish() + } +} +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Lifetime { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "Lifetime") + } +} +impl crate::Lifetime { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("apostrophe", &self.apostrophe); + formatter.field("ident", &self.ident); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::LifetimeParam { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("LifetimeParam"); + formatter.field("attrs", &self.attrs); + formatter.field("lifetime", &self.lifetime); + formatter.field("colon_token", &self.colon_token); + formatter.field("bounds", &self.bounds); + formatter.finish() + } +} +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Lit { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Lit::")?; + match self { + crate::Lit::Str(v0) => v0.debug(formatter, "Str"), + crate::Lit::ByteStr(v0) => v0.debug(formatter, "ByteStr"), + crate::Lit::CStr(v0) => v0.debug(formatter, "CStr"), + crate::Lit::Byte(v0) => v0.debug(formatter, "Byte"), + crate::Lit::Char(v0) => v0.debug(formatter, "Char"), + crate::Lit::Int(v0) => v0.debug(formatter, "Int"), + crate::Lit::Float(v0) => v0.debug(formatter, "Float"), + crate::Lit::Bool(v0) => v0.debug(formatter, "Bool"), + crate::Lit::Verbatim(v0) => { + let mut formatter = formatter.debug_tuple("Verbatim"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Local { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "Local") + } +} +#[cfg(feature = "full")] +impl crate::Local { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("let_token", &self.let_token); + formatter.field("pat", &self.pat); + formatter.field("init", &self.init); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::LocalInit { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("LocalInit"); + formatter.field("eq_token", &self.eq_token); + formatter.field("expr", &self.expr); + formatter.field("diverge", &self.diverge); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Macro { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Macro"); + formatter.field("path", &self.path); + formatter.field("bang_token", &self.bang_token); + formatter.field("delimiter", &self.delimiter); + formatter.field("tokens", &self.tokens); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::MacroDelimiter { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("MacroDelimiter::")?; + match self { + crate::MacroDelimiter::Paren(v0) => { + let mut formatter = formatter.debug_tuple("Paren"); + formatter.field(v0); + formatter.finish() + } + crate::MacroDelimiter::Brace(v0) => { + let mut formatter = formatter.debug_tuple("Brace"); + formatter.field(v0); + formatter.finish() + } + crate::MacroDelimiter::Bracket(v0) => { + let mut formatter = formatter.debug_tuple("Bracket"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Member { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Member::")?; + match self { + crate::Member::Named(v0) => { + let mut formatter = formatter.debug_tuple("Named"); + formatter.field(v0); + formatter.finish() + } + crate::Member::Unnamed(v0) => { + let mut formatter = formatter.debug_tuple("Unnamed"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Meta { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Meta::")?; + match self { + crate::Meta::Path(v0) => v0.debug(formatter, "Path"), + crate::Meta::List(v0) => v0.debug(formatter, "List"), + crate::Meta::NameValue(v0) => v0.debug(formatter, "NameValue"), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::MetaList { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "MetaList") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::MetaList { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("path", &self.path); + formatter.field("delimiter", &self.delimiter); + formatter.field("tokens", &self.tokens); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::MetaNameValue { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "MetaNameValue") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::MetaNameValue { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("path", &self.path); + formatter.field("eq_token", &self.eq_token); + formatter.field("value", &self.value); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ParenthesizedGenericArguments { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "ParenthesizedGenericArguments") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::ParenthesizedGenericArguments { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("paren_token", &self.paren_token); + formatter.field("inputs", &self.inputs); + formatter.field("output", &self.output); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Pat { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Pat::")?; + match self { + crate::Pat::Const(v0) => v0.debug(formatter, "Const"), + crate::Pat::Ident(v0) => v0.debug(formatter, "Ident"), + crate::Pat::Lit(v0) => v0.debug(formatter, "Lit"), + crate::Pat::Macro(v0) => v0.debug(formatter, "Macro"), + crate::Pat::Or(v0) => v0.debug(formatter, "Or"), + crate::Pat::Paren(v0) => v0.debug(formatter, "Paren"), + crate::Pat::Path(v0) => v0.debug(formatter, "Path"), + crate::Pat::Range(v0) => v0.debug(formatter, "Range"), + crate::Pat::Reference(v0) => v0.debug(formatter, "Reference"), + crate::Pat::Rest(v0) => v0.debug(formatter, "Rest"), + crate::Pat::Slice(v0) => v0.debug(formatter, "Slice"), + crate::Pat::Struct(v0) => v0.debug(formatter, "Struct"), + crate::Pat::Tuple(v0) => v0.debug(formatter, "Tuple"), + crate::Pat::TupleStruct(v0) => v0.debug(formatter, "TupleStruct"), + crate::Pat::Type(v0) => v0.debug(formatter, "Type"), + crate::Pat::Verbatim(v0) => { + let mut formatter = formatter.debug_tuple("Verbatim"); + formatter.field(v0); + formatter.finish() + } + crate::Pat::Wild(v0) => v0.debug(formatter, "Wild"), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PatIdent { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "PatIdent") + } +} +#[cfg(feature = "full")] +impl crate::PatIdent { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("by_ref", &self.by_ref); + formatter.field("mutability", &self.mutability); + formatter.field("ident", &self.ident); + formatter.field("subpat", &self.subpat); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PatOr { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "PatOr") + } +} +#[cfg(feature = "full")] +impl crate::PatOr { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("leading_vert", &self.leading_vert); + formatter.field("cases", &self.cases); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PatParen { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "PatParen") + } +} +#[cfg(feature = "full")] +impl crate::PatParen { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("paren_token", &self.paren_token); + formatter.field("pat", &self.pat); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PatReference { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "PatReference") + } +} +#[cfg(feature = "full")] +impl crate::PatReference { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("and_token", &self.and_token); + formatter.field("mutability", &self.mutability); + formatter.field("pat", &self.pat); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PatRest { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "PatRest") + } +} +#[cfg(feature = "full")] +impl crate::PatRest { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("dot2_token", &self.dot2_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PatSlice { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "PatSlice") + } +} +#[cfg(feature = "full")] +impl crate::PatSlice { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("bracket_token", &self.bracket_token); + formatter.field("elems", &self.elems); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PatStruct { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "PatStruct") + } +} +#[cfg(feature = "full")] +impl crate::PatStruct { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("qself", &self.qself); + formatter.field("path", &self.path); + formatter.field("brace_token", &self.brace_token); + formatter.field("fields", &self.fields); + formatter.field("rest", &self.rest); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PatTuple { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "PatTuple") + } +} +#[cfg(feature = "full")] +impl crate::PatTuple { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("paren_token", &self.paren_token); + formatter.field("elems", &self.elems); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PatTupleStruct { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "PatTupleStruct") + } +} +#[cfg(feature = "full")] +impl crate::PatTupleStruct { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("qself", &self.qself); + formatter.field("path", &self.path); + formatter.field("paren_token", &self.paren_token); + formatter.field("elems", &self.elems); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PatType { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "PatType") + } +} +#[cfg(feature = "full")] +impl crate::PatType { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("pat", &self.pat); + formatter.field("colon_token", &self.colon_token); + formatter.field("ty", &self.ty); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PatWild { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "PatWild") + } +} +#[cfg(feature = "full")] +impl crate::PatWild { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("underscore_token", &self.underscore_token); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Path { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "Path") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::Path { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("leading_colon", &self.leading_colon); + formatter.field("segments", &self.segments); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PathArguments { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("PathArguments::")?; + match self { + crate::PathArguments::None => formatter.write_str("None"), + crate::PathArguments::AngleBracketed(v0) => { + v0.debug(formatter, "AngleBracketed") + } + crate::PathArguments::Parenthesized(v0) => { + v0.debug(formatter, "Parenthesized") + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PathSegment { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PathSegment"); + formatter.field("ident", &self.ident); + formatter.field("arguments", &self.arguments); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PointerMutability { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("PointerMutability::")?; + match self { + crate::PointerMutability::Const(v0) => { + let mut formatter = formatter.debug_tuple("Const"); + formatter.field(v0); + formatter.finish() + } + crate::PointerMutability::Mut(v0) => { + let mut formatter = formatter.debug_tuple("Mut"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PreciseCapture { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PreciseCapture"); + formatter.field("use_token", &self.use_token); + formatter.field("lt_token", &self.lt_token); + formatter.field("params", &self.params); + formatter.field("gt_token", &self.gt_token); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PredicateLifetime { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PredicateLifetime"); + formatter.field("lifetime", &self.lifetime); + formatter.field("colon_token", &self.colon_token); + formatter.field("bounds", &self.bounds); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::PredicateType { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PredicateType"); + formatter.field("lifetimes", &self.lifetimes); + formatter.field("bounded_ty", &self.bounded_ty); + formatter.field("colon_token", &self.colon_token); + formatter.field("bounds", &self.bounds); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::QSelf { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("QSelf"); + formatter.field("lt_token", &self.lt_token); + formatter.field("ty", &self.ty); + formatter.field("position", &self.position); + formatter.field("as_token", &self.as_token); + formatter.field("gt_token", &self.gt_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::RangeLimits { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("RangeLimits::")?; + match self { + crate::RangeLimits::HalfOpen(v0) => { + let mut formatter = formatter.debug_tuple("HalfOpen"); + formatter.field(v0); + formatter.finish() + } + crate::RangeLimits::Closed(v0) => { + let mut formatter = formatter.debug_tuple("Closed"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Receiver { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Receiver"); + formatter.field("attrs", &self.attrs); + formatter.field("reference", &self.reference); + formatter.field("mutability", &self.mutability); + formatter.field("self_token", &self.self_token); + formatter.field("colon_token", &self.colon_token); + formatter.field("ty", &self.ty); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::ReturnType { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("ReturnType::")?; + match self { + crate::ReturnType::Default => formatter.write_str("Default"), + crate::ReturnType::Type(v0, v1) => { + let mut formatter = formatter.debug_tuple("Type"); + formatter.field(v0); + formatter.field(v1); + formatter.finish() + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Signature { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Signature"); + formatter.field("constness", &self.constness); + formatter.field("asyncness", &self.asyncness); + formatter.field("unsafety", &self.unsafety); + formatter.field("abi", &self.abi); + formatter.field("fn_token", &self.fn_token); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("paren_token", &self.paren_token); + formatter.field("inputs", &self.inputs); + formatter.field("variadic", &self.variadic); + formatter.field("output", &self.output); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::StaticMutability { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("StaticMutability::")?; + match self { + crate::StaticMutability::Mut(v0) => { + let mut formatter = formatter.debug_tuple("Mut"); + formatter.field(v0); + formatter.finish() + } + crate::StaticMutability::None => formatter.write_str("None"), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Stmt { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Stmt::")?; + match self { + crate::Stmt::Local(v0) => v0.debug(formatter, "Local"), + crate::Stmt::Item(v0) => { + let mut formatter = formatter.debug_tuple("Item"); + formatter.field(v0); + formatter.finish() + } + crate::Stmt::Expr(v0, v1) => { + let mut formatter = formatter.debug_tuple("Expr"); + formatter.field(v0); + formatter.field(v1); + formatter.finish() + } + crate::Stmt::Macro(v0) => v0.debug(formatter, "Macro"), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::StmtMacro { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "StmtMacro") + } +} +#[cfg(feature = "full")] +impl crate::StmtMacro { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("mac", &self.mac); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TraitBound { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TraitBound"); + formatter.field("paren_token", &self.paren_token); + formatter.field("modifier", &self.modifier); + formatter.field("lifetimes", &self.lifetimes); + formatter.field("path", &self.path); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TraitBoundModifier { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("TraitBoundModifier::")?; + match self { + crate::TraitBoundModifier::None => formatter.write_str("None"), + crate::TraitBoundModifier::Maybe(v0) => { + let mut formatter = formatter.debug_tuple("Maybe"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TraitItem { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("TraitItem::")?; + match self { + crate::TraitItem::Const(v0) => v0.debug(formatter, "Const"), + crate::TraitItem::Fn(v0) => v0.debug(formatter, "Fn"), + crate::TraitItem::Type(v0) => v0.debug(formatter, "Type"), + crate::TraitItem::Macro(v0) => v0.debug(formatter, "Macro"), + crate::TraitItem::Verbatim(v0) => { + let mut formatter = formatter.debug_tuple("Verbatim"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TraitItemConst { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TraitItemConst") + } +} +#[cfg(feature = "full")] +impl crate::TraitItemConst { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("const_token", &self.const_token); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("colon_token", &self.colon_token); + formatter.field("ty", &self.ty); + formatter.field("default", &self.default); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TraitItemFn { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TraitItemFn") + } +} +#[cfg(feature = "full")] +impl crate::TraitItemFn { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("sig", &self.sig); + formatter.field("default", &self.default); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TraitItemMacro { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TraitItemMacro") + } +} +#[cfg(feature = "full")] +impl crate::TraitItemMacro { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("mac", &self.mac); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TraitItemType { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TraitItemType") + } +} +#[cfg(feature = "full")] +impl crate::TraitItemType { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("attrs", &self.attrs); + formatter.field("type_token", &self.type_token); + formatter.field("ident", &self.ident); + formatter.field("generics", &self.generics); + formatter.field("colon_token", &self.colon_token); + formatter.field("bounds", &self.bounds); + formatter.field("default", &self.default); + formatter.field("semi_token", &self.semi_token); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Type { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Type::")?; + match self { + crate::Type::Array(v0) => v0.debug(formatter, "Array"), + crate::Type::BareFn(v0) => v0.debug(formatter, "BareFn"), + crate::Type::Group(v0) => v0.debug(formatter, "Group"), + crate::Type::ImplTrait(v0) => v0.debug(formatter, "ImplTrait"), + crate::Type::Infer(v0) => v0.debug(formatter, "Infer"), + crate::Type::Macro(v0) => v0.debug(formatter, "Macro"), + crate::Type::Never(v0) => v0.debug(formatter, "Never"), + crate::Type::Paren(v0) => v0.debug(formatter, "Paren"), + crate::Type::Path(v0) => v0.debug(formatter, "Path"), + crate::Type::Ptr(v0) => v0.debug(formatter, "Ptr"), + crate::Type::Reference(v0) => v0.debug(formatter, "Reference"), + crate::Type::Slice(v0) => v0.debug(formatter, "Slice"), + crate::Type::TraitObject(v0) => v0.debug(formatter, "TraitObject"), + crate::Type::Tuple(v0) => v0.debug(formatter, "Tuple"), + crate::Type::Verbatim(v0) => { + let mut formatter = formatter.debug_tuple("Verbatim"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypeArray { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TypeArray") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::TypeArray { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("bracket_token", &self.bracket_token); + formatter.field("elem", &self.elem); + formatter.field("semi_token", &self.semi_token); + formatter.field("len", &self.len); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypeBareFn { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TypeBareFn") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::TypeBareFn { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("lifetimes", &self.lifetimes); + formatter.field("unsafety", &self.unsafety); + formatter.field("abi", &self.abi); + formatter.field("fn_token", &self.fn_token); + formatter.field("paren_token", &self.paren_token); + formatter.field("inputs", &self.inputs); + formatter.field("variadic", &self.variadic); + formatter.field("output", &self.output); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypeGroup { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TypeGroup") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::TypeGroup { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("group_token", &self.group_token); + formatter.field("elem", &self.elem); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypeImplTrait { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TypeImplTrait") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::TypeImplTrait { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("impl_token", &self.impl_token); + formatter.field("bounds", &self.bounds); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypeInfer { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TypeInfer") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::TypeInfer { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("underscore_token", &self.underscore_token); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypeMacro { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TypeMacro") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::TypeMacro { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("mac", &self.mac); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypeNever { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TypeNever") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::TypeNever { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("bang_token", &self.bang_token); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypeParam { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypeParam"); + formatter.field("attrs", &self.attrs); + formatter.field("ident", &self.ident); + formatter.field("colon_token", &self.colon_token); + formatter.field("bounds", &self.bounds); + formatter.field("eq_token", &self.eq_token); + formatter.field("default", &self.default); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypeParamBound { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("TypeParamBound::")?; + match self { + crate::TypeParamBound::Trait(v0) => { + let mut formatter = formatter.debug_tuple("Trait"); + formatter.field(v0); + formatter.finish() + } + crate::TypeParamBound::Lifetime(v0) => v0.debug(formatter, "Lifetime"), + #[cfg(feature = "full")] + crate::TypeParamBound::PreciseCapture(v0) => { + let mut formatter = formatter.debug_tuple("PreciseCapture"); + formatter.field(v0); + formatter.finish() + } + crate::TypeParamBound::Verbatim(v0) => { + let mut formatter = formatter.debug_tuple("Verbatim"); + formatter.field(v0); + formatter.finish() + } + #[cfg(not(feature = "full"))] + _ => unreachable!(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypeParen { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TypeParen") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::TypeParen { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("paren_token", &self.paren_token); + formatter.field("elem", &self.elem); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypePath { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TypePath") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::TypePath { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("qself", &self.qself); + formatter.field("path", &self.path); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypePtr { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TypePtr") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::TypePtr { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("star_token", &self.star_token); + formatter.field("const_token", &self.const_token); + formatter.field("mutability", &self.mutability); + formatter.field("elem", &self.elem); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypeReference { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TypeReference") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::TypeReference { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("and_token", &self.and_token); + formatter.field("lifetime", &self.lifetime); + formatter.field("mutability", &self.mutability); + formatter.field("elem", &self.elem); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypeSlice { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TypeSlice") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::TypeSlice { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("bracket_token", &self.bracket_token); + formatter.field("elem", &self.elem); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypeTraitObject { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TypeTraitObject") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::TypeTraitObject { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("dyn_token", &self.dyn_token); + formatter.field("bounds", &self.bounds); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::TypeTuple { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "TypeTuple") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::TypeTuple { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("paren_token", &self.paren_token); + formatter.field("elems", &self.elems); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::UnOp { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("UnOp::")?; + match self { + crate::UnOp::Deref(v0) => { + let mut formatter = formatter.debug_tuple("Deref"); + formatter.field(v0); + formatter.finish() + } + crate::UnOp::Not(v0) => { + let mut formatter = formatter.debug_tuple("Not"); + formatter.field(v0); + formatter.finish() + } + crate::UnOp::Neg(v0) => { + let mut formatter = formatter.debug_tuple("Neg"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::UseGlob { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("UseGlob"); + formatter.field("star_token", &self.star_token); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::UseGroup { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("UseGroup"); + formatter.field("brace_token", &self.brace_token); + formatter.field("items", &self.items); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::UseName { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("UseName"); + formatter.field("ident", &self.ident); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::UsePath { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("UsePath"); + formatter.field("ident", &self.ident); + formatter.field("colon2_token", &self.colon2_token); + formatter.field("tree", &self.tree); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::UseRename { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("UseRename"); + formatter.field("ident", &self.ident); + formatter.field("as_token", &self.as_token); + formatter.field("rename", &self.rename); + formatter.finish() + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::UseTree { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("UseTree::")?; + match self { + crate::UseTree::Path(v0) => { + let mut formatter = formatter.debug_tuple("Path"); + formatter.field(v0); + formatter.finish() + } + crate::UseTree::Name(v0) => { + let mut formatter = formatter.debug_tuple("Name"); + formatter.field(v0); + formatter.finish() + } + crate::UseTree::Rename(v0) => { + let mut formatter = formatter.debug_tuple("Rename"); + formatter.field(v0); + formatter.finish() + } + crate::UseTree::Glob(v0) => { + let mut formatter = formatter.debug_tuple("Glob"); + formatter.field(v0); + formatter.finish() + } + crate::UseTree::Group(v0) => { + let mut formatter = formatter.debug_tuple("Group"); + formatter.field(v0); + formatter.finish() + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Variadic { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Variadic"); + formatter.field("attrs", &self.attrs); + formatter.field("pat", &self.pat); + formatter.field("dots", &self.dots); + formatter.field("comma", &self.comma); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Variant { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Variant"); + formatter.field("attrs", &self.attrs); + formatter.field("ident", &self.ident); + formatter.field("fields", &self.fields); + formatter.field("discriminant", &self.discriminant); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::VisRestricted { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "VisRestricted") + } +} +#[cfg(any(feature = "derive", feature = "full"))] +impl crate::VisRestricted { + fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + let mut formatter = formatter.debug_struct(name); + formatter.field("pub_token", &self.pub_token); + formatter.field("paren_token", &self.paren_token); + formatter.field("in_token", &self.in_token); + formatter.field("path", &self.path); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::Visibility { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Visibility::")?; + match self { + crate::Visibility::Public(v0) => { + let mut formatter = formatter.debug_tuple("Public"); + formatter.field(v0); + formatter.finish() + } + crate::Visibility::Restricted(v0) => v0.debug(formatter, "Restricted"), + crate::Visibility::Inherited => formatter.write_str("Inherited"), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::WhereClause { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("WhereClause"); + formatter.field("where_token", &self.where_token); + formatter.field("predicates", &self.predicates); + formatter.finish() + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for crate::WherePredicate { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("WherePredicate::")?; + match self { + crate::WherePredicate::Lifetime(v0) => { + let mut formatter = formatter.debug_tuple("Lifetime"); + formatter.field(v0); + formatter.finish() + } + crate::WherePredicate::Type(v0) => { + let mut formatter = formatter.debug_tuple("Type"); + formatter.field(v0); + formatter.finish() + } + } + } +} diff --git a/vendor/syn/src/gen/eq.rs b/vendor/syn/src/gen/eq.rs new file mode 100644 index 00000000000000..128e8991eeccfb --- /dev/null +++ b/vendor/syn/src/gen/eq.rs @@ -0,0 +1,2306 @@ +// This file is @generated by syn-internal-codegen. +// It is not intended for manual editing. + +#[cfg(any(feature = "derive", feature = "full"))] +use crate::tt::TokenStreamHelper; +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Abi {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Abi { + fn eq(&self, other: &Self) -> bool { + self.name == other.name + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::AngleBracketedGenericArguments {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::AngleBracketedGenericArguments { + fn eq(&self, other: &Self) -> bool { + self.colon2_token == other.colon2_token && self.args == other.args + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Arm {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Arm { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.pat == other.pat && self.guard == other.guard + && self.body == other.body && self.comma == other.comma + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::AssocConst {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::AssocConst { + fn eq(&self, other: &Self) -> bool { + self.ident == other.ident && self.generics == other.generics + && self.value == other.value + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::AssocType {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::AssocType { + fn eq(&self, other: &Self) -> bool { + self.ident == other.ident && self.generics == other.generics + && self.ty == other.ty + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::AttrStyle {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::AttrStyle { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::AttrStyle::Outer, crate::AttrStyle::Outer) => true, + (crate::AttrStyle::Inner(_), crate::AttrStyle::Inner(_)) => true, + _ => false, + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Attribute {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Attribute { + fn eq(&self, other: &Self) -> bool { + self.style == other.style && self.meta == other.meta + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::BareFnArg {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::BareFnArg { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.name == other.name && self.ty == other.ty + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::BareVariadic {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::BareVariadic { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.name == other.name && self.comma == other.comma + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::BinOp {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::BinOp { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::BinOp::Add(_), crate::BinOp::Add(_)) => true, + (crate::BinOp::Sub(_), crate::BinOp::Sub(_)) => true, + (crate::BinOp::Mul(_), crate::BinOp::Mul(_)) => true, + (crate::BinOp::Div(_), crate::BinOp::Div(_)) => true, + (crate::BinOp::Rem(_), crate::BinOp::Rem(_)) => true, + (crate::BinOp::And(_), crate::BinOp::And(_)) => true, + (crate::BinOp::Or(_), crate::BinOp::Or(_)) => true, + (crate::BinOp::BitXor(_), crate::BinOp::BitXor(_)) => true, + (crate::BinOp::BitAnd(_), crate::BinOp::BitAnd(_)) => true, + (crate::BinOp::BitOr(_), crate::BinOp::BitOr(_)) => true, + (crate::BinOp::Shl(_), crate::BinOp::Shl(_)) => true, + (crate::BinOp::Shr(_), crate::BinOp::Shr(_)) => true, + (crate::BinOp::Eq(_), crate::BinOp::Eq(_)) => true, + (crate::BinOp::Lt(_), crate::BinOp::Lt(_)) => true, + (crate::BinOp::Le(_), crate::BinOp::Le(_)) => true, + (crate::BinOp::Ne(_), crate::BinOp::Ne(_)) => true, + (crate::BinOp::Ge(_), crate::BinOp::Ge(_)) => true, + (crate::BinOp::Gt(_), crate::BinOp::Gt(_)) => true, + (crate::BinOp::AddAssign(_), crate::BinOp::AddAssign(_)) => true, + (crate::BinOp::SubAssign(_), crate::BinOp::SubAssign(_)) => true, + (crate::BinOp::MulAssign(_), crate::BinOp::MulAssign(_)) => true, + (crate::BinOp::DivAssign(_), crate::BinOp::DivAssign(_)) => true, + (crate::BinOp::RemAssign(_), crate::BinOp::RemAssign(_)) => true, + (crate::BinOp::BitXorAssign(_), crate::BinOp::BitXorAssign(_)) => true, + (crate::BinOp::BitAndAssign(_), crate::BinOp::BitAndAssign(_)) => true, + (crate::BinOp::BitOrAssign(_), crate::BinOp::BitOrAssign(_)) => true, + (crate::BinOp::ShlAssign(_), crate::BinOp::ShlAssign(_)) => true, + (crate::BinOp::ShrAssign(_), crate::BinOp::ShrAssign(_)) => true, + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Block {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Block { + fn eq(&self, other: &Self) -> bool { + self.stmts == other.stmts + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::BoundLifetimes {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::BoundLifetimes { + fn eq(&self, other: &Self) -> bool { + self.lifetimes == other.lifetimes + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::CapturedParam {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::CapturedParam { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + ( + crate::CapturedParam::Lifetime(self0), + crate::CapturedParam::Lifetime(other0), + ) => self0 == other0, + (crate::CapturedParam::Ident(self0), crate::CapturedParam::Ident(other0)) => { + self0 == other0 + } + _ => false, + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ConstParam {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ConstParam { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.ident == other.ident && self.ty == other.ty + && self.eq_token == other.eq_token && self.default == other.default + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Constraint {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Constraint { + fn eq(&self, other: &Self) -> bool { + self.ident == other.ident && self.generics == other.generics + && self.bounds == other.bounds + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Data {} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Data { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::Data::Struct(self0), crate::Data::Struct(other0)) => self0 == other0, + (crate::Data::Enum(self0), crate::Data::Enum(other0)) => self0 == other0, + (crate::Data::Union(self0), crate::Data::Union(other0)) => self0 == other0, + _ => false, + } + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::DataEnum {} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::DataEnum { + fn eq(&self, other: &Self) -> bool { + self.variants == other.variants + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::DataStruct {} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::DataStruct { + fn eq(&self, other: &Self) -> bool { + self.fields == other.fields && self.semi_token == other.semi_token + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::DataUnion {} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::DataUnion { + fn eq(&self, other: &Self) -> bool { + self.fields == other.fields + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::DeriveInput {} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::DeriveInput { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident + && self.generics == other.generics && self.data == other.data + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Expr {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Expr { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + #[cfg(feature = "full")] + (crate::Expr::Array(self0), crate::Expr::Array(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Assign(self0), crate::Expr::Assign(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Async(self0), crate::Expr::Async(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Await(self0), crate::Expr::Await(other0)) => self0 == other0, + (crate::Expr::Binary(self0), crate::Expr::Binary(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Block(self0), crate::Expr::Block(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Break(self0), crate::Expr::Break(other0)) => self0 == other0, + (crate::Expr::Call(self0), crate::Expr::Call(other0)) => self0 == other0, + (crate::Expr::Cast(self0), crate::Expr::Cast(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Closure(self0), crate::Expr::Closure(other0)) => { + self0 == other0 + } + #[cfg(feature = "full")] + (crate::Expr::Const(self0), crate::Expr::Const(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Continue(self0), crate::Expr::Continue(other0)) => { + self0 == other0 + } + (crate::Expr::Field(self0), crate::Expr::Field(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::ForLoop(self0), crate::Expr::ForLoop(other0)) => { + self0 == other0 + } + (crate::Expr::Group(self0), crate::Expr::Group(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::If(self0), crate::Expr::If(other0)) => self0 == other0, + (crate::Expr::Index(self0), crate::Expr::Index(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Infer(self0), crate::Expr::Infer(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Let(self0), crate::Expr::Let(other0)) => self0 == other0, + (crate::Expr::Lit(self0), crate::Expr::Lit(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Loop(self0), crate::Expr::Loop(other0)) => self0 == other0, + (crate::Expr::Macro(self0), crate::Expr::Macro(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Match(self0), crate::Expr::Match(other0)) => self0 == other0, + (crate::Expr::MethodCall(self0), crate::Expr::MethodCall(other0)) => { + self0 == other0 + } + (crate::Expr::Paren(self0), crate::Expr::Paren(other0)) => self0 == other0, + (crate::Expr::Path(self0), crate::Expr::Path(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Range(self0), crate::Expr::Range(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::RawAddr(self0), crate::Expr::RawAddr(other0)) => { + self0 == other0 + } + (crate::Expr::Reference(self0), crate::Expr::Reference(other0)) => { + self0 == other0 + } + #[cfg(feature = "full")] + (crate::Expr::Repeat(self0), crate::Expr::Repeat(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Return(self0), crate::Expr::Return(other0)) => self0 == other0, + (crate::Expr::Struct(self0), crate::Expr::Struct(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Try(self0), crate::Expr::Try(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::TryBlock(self0), crate::Expr::TryBlock(other0)) => { + self0 == other0 + } + (crate::Expr::Tuple(self0), crate::Expr::Tuple(other0)) => self0 == other0, + (crate::Expr::Unary(self0), crate::Expr::Unary(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Unsafe(self0), crate::Expr::Unsafe(other0)) => self0 == other0, + (crate::Expr::Verbatim(self0), crate::Expr::Verbatim(other0)) => { + TokenStreamHelper(self0) == TokenStreamHelper(other0) + } + #[cfg(feature = "full")] + (crate::Expr::While(self0), crate::Expr::While(other0)) => self0 == other0, + #[cfg(feature = "full")] + (crate::Expr::Yield(self0), crate::Expr::Yield(other0)) => self0 == other0, + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprArray {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprArray { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.elems == other.elems + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprAssign {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprAssign { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.left == other.left && self.right == other.right + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprAsync {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprAsync { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.capture == other.capture + && self.block == other.block + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprAwait {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprAwait { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.base == other.base + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprBinary {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprBinary { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.left == other.left && self.op == other.op + && self.right == other.right + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprBlock {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprBlock { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.label == other.label + && self.block == other.block + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprBreak {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprBreak { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.label == other.label && self.expr == other.expr + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprCall {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprCall { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.func == other.func && self.args == other.args + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprCast {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprCast { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.expr == other.expr && self.ty == other.ty + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprClosure {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprClosure { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.lifetimes == other.lifetimes + && self.constness == other.constness && self.movability == other.movability + && self.asyncness == other.asyncness && self.capture == other.capture + && self.inputs == other.inputs && self.output == other.output + && self.body == other.body + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprConst {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprConst { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.block == other.block + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprContinue {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprContinue { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.label == other.label + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprField {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprField { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.base == other.base + && self.member == other.member + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprForLoop {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprForLoop { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.label == other.label && self.pat == other.pat + && self.expr == other.expr && self.body == other.body + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprGroup {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprGroup { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.expr == other.expr + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprIf {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprIf { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.cond == other.cond + && self.then_branch == other.then_branch + && self.else_branch == other.else_branch + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprIndex {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprIndex { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.expr == other.expr && self.index == other.index + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprInfer {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprInfer { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprLet {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprLet { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.pat == other.pat && self.expr == other.expr + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprLit {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprLit { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.lit == other.lit + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprLoop {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprLoop { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.label == other.label && self.body == other.body + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprMacro {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprMacro { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.mac == other.mac + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprMatch {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprMatch { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.expr == other.expr && self.arms == other.arms + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprMethodCall {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprMethodCall { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.receiver == other.receiver + && self.method == other.method && self.turbofish == other.turbofish + && self.args == other.args + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprParen {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprParen { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.expr == other.expr + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprPath {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprPath { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.qself == other.qself && self.path == other.path + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprRange {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprRange { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.start == other.start + && self.limits == other.limits && self.end == other.end + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprRawAddr {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprRawAddr { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.mutability == other.mutability + && self.expr == other.expr + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprReference {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprReference { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.mutability == other.mutability + && self.expr == other.expr + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprRepeat {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprRepeat { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.expr == other.expr && self.len == other.len + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprReturn {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprReturn { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.expr == other.expr + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprStruct {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprStruct { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.qself == other.qself && self.path == other.path + && self.fields == other.fields && self.dot2_token == other.dot2_token + && self.rest == other.rest + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprTry {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprTry { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.expr == other.expr + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprTryBlock {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprTryBlock { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.block == other.block + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprTuple {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprTuple { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.elems == other.elems + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprUnary {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprUnary { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.op == other.op && self.expr == other.expr + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprUnsafe {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprUnsafe { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.block == other.block + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprWhile {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprWhile { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.label == other.label && self.cond == other.cond + && self.body == other.body + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ExprYield {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ExprYield { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.expr == other.expr + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Field {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Field { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis + && self.mutability == other.mutability && self.ident == other.ident + && self.colon_token == other.colon_token && self.ty == other.ty + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::FieldMutability {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::FieldMutability { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::FieldMutability::None, crate::FieldMutability::None) => true, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::FieldPat {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::FieldPat { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.member == other.member + && self.colon_token == other.colon_token && self.pat == other.pat + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::FieldValue {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::FieldValue { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.member == other.member + && self.colon_token == other.colon_token && self.expr == other.expr + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Fields {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Fields { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::Fields::Named(self0), crate::Fields::Named(other0)) => { + self0 == other0 + } + (crate::Fields::Unnamed(self0), crate::Fields::Unnamed(other0)) => { + self0 == other0 + } + (crate::Fields::Unit, crate::Fields::Unit) => true, + _ => false, + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::FieldsNamed {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::FieldsNamed { + fn eq(&self, other: &Self) -> bool { + self.named == other.named + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::FieldsUnnamed {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::FieldsUnnamed { + fn eq(&self, other: &Self) -> bool { + self.unnamed == other.unnamed + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::File {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::File { + fn eq(&self, other: &Self) -> bool { + self.shebang == other.shebang && self.attrs == other.attrs + && self.items == other.items + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::FnArg {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::FnArg { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::FnArg::Receiver(self0), crate::FnArg::Receiver(other0)) => { + self0 == other0 + } + (crate::FnArg::Typed(self0), crate::FnArg::Typed(other0)) => self0 == other0, + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ForeignItem {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ForeignItem { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::ForeignItem::Fn(self0), crate::ForeignItem::Fn(other0)) => { + self0 == other0 + } + (crate::ForeignItem::Static(self0), crate::ForeignItem::Static(other0)) => { + self0 == other0 + } + (crate::ForeignItem::Type(self0), crate::ForeignItem::Type(other0)) => { + self0 == other0 + } + (crate::ForeignItem::Macro(self0), crate::ForeignItem::Macro(other0)) => { + self0 == other0 + } + ( + crate::ForeignItem::Verbatim(self0), + crate::ForeignItem::Verbatim(other0), + ) => TokenStreamHelper(self0) == TokenStreamHelper(other0), + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ForeignItemFn {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ForeignItemFn { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis && self.sig == other.sig + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ForeignItemMacro {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ForeignItemMacro { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.mac == other.mac + && self.semi_token == other.semi_token + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ForeignItemStatic {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ForeignItemStatic { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis + && self.mutability == other.mutability && self.ident == other.ident + && self.ty == other.ty + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ForeignItemType {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ForeignItemType { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident + && self.generics == other.generics + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::GenericArgument {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::GenericArgument { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + ( + crate::GenericArgument::Lifetime(self0), + crate::GenericArgument::Lifetime(other0), + ) => self0 == other0, + ( + crate::GenericArgument::Type(self0), + crate::GenericArgument::Type(other0), + ) => self0 == other0, + ( + crate::GenericArgument::Const(self0), + crate::GenericArgument::Const(other0), + ) => self0 == other0, + ( + crate::GenericArgument::AssocType(self0), + crate::GenericArgument::AssocType(other0), + ) => self0 == other0, + ( + crate::GenericArgument::AssocConst(self0), + crate::GenericArgument::AssocConst(other0), + ) => self0 == other0, + ( + crate::GenericArgument::Constraint(self0), + crate::GenericArgument::Constraint(other0), + ) => self0 == other0, + _ => false, + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::GenericParam {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::GenericParam { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + ( + crate::GenericParam::Lifetime(self0), + crate::GenericParam::Lifetime(other0), + ) => self0 == other0, + (crate::GenericParam::Type(self0), crate::GenericParam::Type(other0)) => { + self0 == other0 + } + (crate::GenericParam::Const(self0), crate::GenericParam::Const(other0)) => { + self0 == other0 + } + _ => false, + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Generics {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Generics { + fn eq(&self, other: &Self) -> bool { + self.lt_token == other.lt_token && self.params == other.params + && self.gt_token == other.gt_token && self.where_clause == other.where_clause + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ImplItem {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ImplItem { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::ImplItem::Const(self0), crate::ImplItem::Const(other0)) => { + self0 == other0 + } + (crate::ImplItem::Fn(self0), crate::ImplItem::Fn(other0)) => self0 == other0, + (crate::ImplItem::Type(self0), crate::ImplItem::Type(other0)) => { + self0 == other0 + } + (crate::ImplItem::Macro(self0), crate::ImplItem::Macro(other0)) => { + self0 == other0 + } + (crate::ImplItem::Verbatim(self0), crate::ImplItem::Verbatim(other0)) => { + TokenStreamHelper(self0) == TokenStreamHelper(other0) + } + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ImplItemConst {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ImplItemConst { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis + && self.defaultness == other.defaultness && self.ident == other.ident + && self.generics == other.generics && self.ty == other.ty + && self.expr == other.expr + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ImplItemFn {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ImplItemFn { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis + && self.defaultness == other.defaultness && self.sig == other.sig + && self.block == other.block + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ImplItemMacro {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ImplItemMacro { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.mac == other.mac + && self.semi_token == other.semi_token + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ImplItemType {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ImplItemType { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis + && self.defaultness == other.defaultness && self.ident == other.ident + && self.generics == other.generics && self.ty == other.ty + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ImplRestriction {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ImplRestriction { + fn eq(&self, _other: &Self) -> bool { + match *self {} + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Item {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Item { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::Item::Const(self0), crate::Item::Const(other0)) => self0 == other0, + (crate::Item::Enum(self0), crate::Item::Enum(other0)) => self0 == other0, + (crate::Item::ExternCrate(self0), crate::Item::ExternCrate(other0)) => { + self0 == other0 + } + (crate::Item::Fn(self0), crate::Item::Fn(other0)) => self0 == other0, + (crate::Item::ForeignMod(self0), crate::Item::ForeignMod(other0)) => { + self0 == other0 + } + (crate::Item::Impl(self0), crate::Item::Impl(other0)) => self0 == other0, + (crate::Item::Macro(self0), crate::Item::Macro(other0)) => self0 == other0, + (crate::Item::Mod(self0), crate::Item::Mod(other0)) => self0 == other0, + (crate::Item::Static(self0), crate::Item::Static(other0)) => self0 == other0, + (crate::Item::Struct(self0), crate::Item::Struct(other0)) => self0 == other0, + (crate::Item::Trait(self0), crate::Item::Trait(other0)) => self0 == other0, + (crate::Item::TraitAlias(self0), crate::Item::TraitAlias(other0)) => { + self0 == other0 + } + (crate::Item::Type(self0), crate::Item::Type(other0)) => self0 == other0, + (crate::Item::Union(self0), crate::Item::Union(other0)) => self0 == other0, + (crate::Item::Use(self0), crate::Item::Use(other0)) => self0 == other0, + (crate::Item::Verbatim(self0), crate::Item::Verbatim(other0)) => { + TokenStreamHelper(self0) == TokenStreamHelper(other0) + } + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ItemConst {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ItemConst { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident + && self.generics == other.generics && self.ty == other.ty + && self.expr == other.expr + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ItemEnum {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ItemEnum { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident + && self.generics == other.generics && self.variants == other.variants + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ItemExternCrate {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ItemExternCrate { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident + && self.rename == other.rename + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ItemFn {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ItemFn { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis && self.sig == other.sig + && self.block == other.block + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ItemForeignMod {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ItemForeignMod { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.unsafety == other.unsafety + && self.abi == other.abi && self.items == other.items + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ItemImpl {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ItemImpl { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.defaultness == other.defaultness + && self.unsafety == other.unsafety && self.generics == other.generics + && self.trait_ == other.trait_ && self.self_ty == other.self_ty + && self.items == other.items + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ItemMacro {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ItemMacro { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.ident == other.ident && self.mac == other.mac + && self.semi_token == other.semi_token + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ItemMod {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ItemMod { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis + && self.unsafety == other.unsafety && self.ident == other.ident + && self.content == other.content && self.semi == other.semi + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ItemStatic {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ItemStatic { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis + && self.mutability == other.mutability && self.ident == other.ident + && self.ty == other.ty && self.expr == other.expr + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ItemStruct {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ItemStruct { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident + && self.generics == other.generics && self.fields == other.fields + && self.semi_token == other.semi_token + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ItemTrait {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ItemTrait { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis + && self.unsafety == other.unsafety && self.auto_token == other.auto_token + && self.restriction == other.restriction && self.ident == other.ident + && self.generics == other.generics && self.colon_token == other.colon_token + && self.supertraits == other.supertraits && self.items == other.items + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ItemTraitAlias {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ItemTraitAlias { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident + && self.generics == other.generics && self.bounds == other.bounds + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ItemType {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ItemType { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident + && self.generics == other.generics && self.ty == other.ty + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ItemUnion {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ItemUnion { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident + && self.generics == other.generics && self.fields == other.fields + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ItemUse {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ItemUse { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.vis == other.vis + && self.leading_colon == other.leading_colon && self.tree == other.tree + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Label {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Label { + fn eq(&self, other: &Self) -> bool { + self.name == other.name + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::LifetimeParam {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::LifetimeParam { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.lifetime == other.lifetime + && self.colon_token == other.colon_token && self.bounds == other.bounds + } +} +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Lit {} +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Lit { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::Lit::Str(self0), crate::Lit::Str(other0)) => self0 == other0, + (crate::Lit::ByteStr(self0), crate::Lit::ByteStr(other0)) => self0 == other0, + (crate::Lit::CStr(self0), crate::Lit::CStr(other0)) => self0 == other0, + (crate::Lit::Byte(self0), crate::Lit::Byte(other0)) => self0 == other0, + (crate::Lit::Char(self0), crate::Lit::Char(other0)) => self0 == other0, + (crate::Lit::Int(self0), crate::Lit::Int(other0)) => self0 == other0, + (crate::Lit::Float(self0), crate::Lit::Float(other0)) => self0 == other0, + (crate::Lit::Bool(self0), crate::Lit::Bool(other0)) => self0 == other0, + (crate::Lit::Verbatim(self0), crate::Lit::Verbatim(other0)) => { + self0.to_string() == other0.to_string() + } + _ => false, + } + } +} +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::LitBool {} +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::LitBool { + fn eq(&self, other: &Self) -> bool { + self.value == other.value + } +} +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::LitByte {} +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::LitByteStr {} +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::LitCStr {} +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::LitChar {} +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::LitFloat {} +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::LitInt {} +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::LitStr {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Local {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Local { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.pat == other.pat && self.init == other.init + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::LocalInit {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::LocalInit { + fn eq(&self, other: &Self) -> bool { + self.expr == other.expr && self.diverge == other.diverge + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Macro {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Macro { + fn eq(&self, other: &Self) -> bool { + self.path == other.path && self.delimiter == other.delimiter + && TokenStreamHelper(&self.tokens) == TokenStreamHelper(&other.tokens) + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::MacroDelimiter {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::MacroDelimiter { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::MacroDelimiter::Paren(_), crate::MacroDelimiter::Paren(_)) => true, + (crate::MacroDelimiter::Brace(_), crate::MacroDelimiter::Brace(_)) => true, + (crate::MacroDelimiter::Bracket(_), crate::MacroDelimiter::Bracket(_)) => { + true + } + _ => false, + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Meta {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Meta { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::Meta::Path(self0), crate::Meta::Path(other0)) => self0 == other0, + (crate::Meta::List(self0), crate::Meta::List(other0)) => self0 == other0, + (crate::Meta::NameValue(self0), crate::Meta::NameValue(other0)) => { + self0 == other0 + } + _ => false, + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::MetaList {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::MetaList { + fn eq(&self, other: &Self) -> bool { + self.path == other.path && self.delimiter == other.delimiter + && TokenStreamHelper(&self.tokens) == TokenStreamHelper(&other.tokens) + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::MetaNameValue {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::MetaNameValue { + fn eq(&self, other: &Self) -> bool { + self.path == other.path && self.value == other.value + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ParenthesizedGenericArguments {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ParenthesizedGenericArguments { + fn eq(&self, other: &Self) -> bool { + self.inputs == other.inputs && self.output == other.output + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Pat {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Pat { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::Pat::Const(self0), crate::Pat::Const(other0)) => self0 == other0, + (crate::Pat::Ident(self0), crate::Pat::Ident(other0)) => self0 == other0, + (crate::Pat::Lit(self0), crate::Pat::Lit(other0)) => self0 == other0, + (crate::Pat::Macro(self0), crate::Pat::Macro(other0)) => self0 == other0, + (crate::Pat::Or(self0), crate::Pat::Or(other0)) => self0 == other0, + (crate::Pat::Paren(self0), crate::Pat::Paren(other0)) => self0 == other0, + (crate::Pat::Path(self0), crate::Pat::Path(other0)) => self0 == other0, + (crate::Pat::Range(self0), crate::Pat::Range(other0)) => self0 == other0, + (crate::Pat::Reference(self0), crate::Pat::Reference(other0)) => { + self0 == other0 + } + (crate::Pat::Rest(self0), crate::Pat::Rest(other0)) => self0 == other0, + (crate::Pat::Slice(self0), crate::Pat::Slice(other0)) => self0 == other0, + (crate::Pat::Struct(self0), crate::Pat::Struct(other0)) => self0 == other0, + (crate::Pat::Tuple(self0), crate::Pat::Tuple(other0)) => self0 == other0, + (crate::Pat::TupleStruct(self0), crate::Pat::TupleStruct(other0)) => { + self0 == other0 + } + (crate::Pat::Type(self0), crate::Pat::Type(other0)) => self0 == other0, + (crate::Pat::Verbatim(self0), crate::Pat::Verbatim(other0)) => { + TokenStreamHelper(self0) == TokenStreamHelper(other0) + } + (crate::Pat::Wild(self0), crate::Pat::Wild(other0)) => self0 == other0, + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PatIdent {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PatIdent { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.by_ref == other.by_ref + && self.mutability == other.mutability && self.ident == other.ident + && self.subpat == other.subpat + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PatOr {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PatOr { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.leading_vert == other.leading_vert + && self.cases == other.cases + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PatParen {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PatParen { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.pat == other.pat + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PatReference {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PatReference { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.mutability == other.mutability + && self.pat == other.pat + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PatRest {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PatRest { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PatSlice {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PatSlice { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.elems == other.elems + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PatStruct {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PatStruct { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.qself == other.qself && self.path == other.path + && self.fields == other.fields && self.rest == other.rest + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PatTuple {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PatTuple { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.elems == other.elems + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PatTupleStruct {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PatTupleStruct { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.qself == other.qself && self.path == other.path + && self.elems == other.elems + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PatType {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PatType { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.pat == other.pat && self.ty == other.ty + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PatWild {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PatWild { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Path {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Path { + fn eq(&self, other: &Self) -> bool { + self.leading_colon == other.leading_colon && self.segments == other.segments + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PathArguments {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PathArguments { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::PathArguments::None, crate::PathArguments::None) => true, + ( + crate::PathArguments::AngleBracketed(self0), + crate::PathArguments::AngleBracketed(other0), + ) => self0 == other0, + ( + crate::PathArguments::Parenthesized(self0), + crate::PathArguments::Parenthesized(other0), + ) => self0 == other0, + _ => false, + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PathSegment {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PathSegment { + fn eq(&self, other: &Self) -> bool { + self.ident == other.ident && self.arguments == other.arguments + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PointerMutability {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PointerMutability { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::PointerMutability::Const(_), crate::PointerMutability::Const(_)) => { + true + } + (crate::PointerMutability::Mut(_), crate::PointerMutability::Mut(_)) => true, + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PreciseCapture {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PreciseCapture { + fn eq(&self, other: &Self) -> bool { + self.params == other.params + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PredicateLifetime {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PredicateLifetime { + fn eq(&self, other: &Self) -> bool { + self.lifetime == other.lifetime && self.bounds == other.bounds + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::PredicateType {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::PredicateType { + fn eq(&self, other: &Self) -> bool { + self.lifetimes == other.lifetimes && self.bounded_ty == other.bounded_ty + && self.bounds == other.bounds + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::QSelf {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::QSelf { + fn eq(&self, other: &Self) -> bool { + self.ty == other.ty && self.position == other.position + && self.as_token == other.as_token + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::RangeLimits {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::RangeLimits { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::RangeLimits::HalfOpen(_), crate::RangeLimits::HalfOpen(_)) => true, + (crate::RangeLimits::Closed(_), crate::RangeLimits::Closed(_)) => true, + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Receiver {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Receiver { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.reference == other.reference + && self.mutability == other.mutability + && self.colon_token == other.colon_token && self.ty == other.ty + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::ReturnType {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::ReturnType { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::ReturnType::Default, crate::ReturnType::Default) => true, + (crate::ReturnType::Type(_, self1), crate::ReturnType::Type(_, other1)) => { + self1 == other1 + } + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Signature {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Signature { + fn eq(&self, other: &Self) -> bool { + self.constness == other.constness && self.asyncness == other.asyncness + && self.unsafety == other.unsafety && self.abi == other.abi + && self.ident == other.ident && self.generics == other.generics + && self.inputs == other.inputs && self.variadic == other.variadic + && self.output == other.output + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::StaticMutability {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::StaticMutability { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::StaticMutability::Mut(_), crate::StaticMutability::Mut(_)) => true, + (crate::StaticMutability::None, crate::StaticMutability::None) => true, + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Stmt {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Stmt { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::Stmt::Local(self0), crate::Stmt::Local(other0)) => self0 == other0, + (crate::Stmt::Item(self0), crate::Stmt::Item(other0)) => self0 == other0, + (crate::Stmt::Expr(self0, self1), crate::Stmt::Expr(other0, other1)) => { + self0 == other0 && self1 == other1 + } + (crate::Stmt::Macro(self0), crate::Stmt::Macro(other0)) => self0 == other0, + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::StmtMacro {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::StmtMacro { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.mac == other.mac + && self.semi_token == other.semi_token + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TraitBound {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TraitBound { + fn eq(&self, other: &Self) -> bool { + self.paren_token == other.paren_token && self.modifier == other.modifier + && self.lifetimes == other.lifetimes && self.path == other.path + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TraitBoundModifier {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TraitBoundModifier { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::TraitBoundModifier::None, crate::TraitBoundModifier::None) => true, + ( + crate::TraitBoundModifier::Maybe(_), + crate::TraitBoundModifier::Maybe(_), + ) => true, + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TraitItem {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TraitItem { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::TraitItem::Const(self0), crate::TraitItem::Const(other0)) => { + self0 == other0 + } + (crate::TraitItem::Fn(self0), crate::TraitItem::Fn(other0)) => { + self0 == other0 + } + (crate::TraitItem::Type(self0), crate::TraitItem::Type(other0)) => { + self0 == other0 + } + (crate::TraitItem::Macro(self0), crate::TraitItem::Macro(other0)) => { + self0 == other0 + } + (crate::TraitItem::Verbatim(self0), crate::TraitItem::Verbatim(other0)) => { + TokenStreamHelper(self0) == TokenStreamHelper(other0) + } + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TraitItemConst {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TraitItemConst { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.ident == other.ident + && self.generics == other.generics && self.ty == other.ty + && self.default == other.default + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TraitItemFn {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TraitItemFn { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.sig == other.sig + && self.default == other.default && self.semi_token == other.semi_token + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TraitItemMacro {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TraitItemMacro { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.mac == other.mac + && self.semi_token == other.semi_token + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TraitItemType {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TraitItemType { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.ident == other.ident + && self.generics == other.generics && self.colon_token == other.colon_token + && self.bounds == other.bounds && self.default == other.default + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Type {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Type { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::Type::Array(self0), crate::Type::Array(other0)) => self0 == other0, + (crate::Type::BareFn(self0), crate::Type::BareFn(other0)) => self0 == other0, + (crate::Type::Group(self0), crate::Type::Group(other0)) => self0 == other0, + (crate::Type::ImplTrait(self0), crate::Type::ImplTrait(other0)) => { + self0 == other0 + } + (crate::Type::Infer(self0), crate::Type::Infer(other0)) => self0 == other0, + (crate::Type::Macro(self0), crate::Type::Macro(other0)) => self0 == other0, + (crate::Type::Never(self0), crate::Type::Never(other0)) => self0 == other0, + (crate::Type::Paren(self0), crate::Type::Paren(other0)) => self0 == other0, + (crate::Type::Path(self0), crate::Type::Path(other0)) => self0 == other0, + (crate::Type::Ptr(self0), crate::Type::Ptr(other0)) => self0 == other0, + (crate::Type::Reference(self0), crate::Type::Reference(other0)) => { + self0 == other0 + } + (crate::Type::Slice(self0), crate::Type::Slice(other0)) => self0 == other0, + (crate::Type::TraitObject(self0), crate::Type::TraitObject(other0)) => { + self0 == other0 + } + (crate::Type::Tuple(self0), crate::Type::Tuple(other0)) => self0 == other0, + (crate::Type::Verbatim(self0), crate::Type::Verbatim(other0)) => { + TokenStreamHelper(self0) == TokenStreamHelper(other0) + } + _ => false, + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypeArray {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypeArray { + fn eq(&self, other: &Self) -> bool { + self.elem == other.elem && self.len == other.len + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypeBareFn {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypeBareFn { + fn eq(&self, other: &Self) -> bool { + self.lifetimes == other.lifetimes && self.unsafety == other.unsafety + && self.abi == other.abi && self.inputs == other.inputs + && self.variadic == other.variadic && self.output == other.output + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypeGroup {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypeGroup { + fn eq(&self, other: &Self) -> bool { + self.elem == other.elem + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypeImplTrait {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypeImplTrait { + fn eq(&self, other: &Self) -> bool { + self.bounds == other.bounds + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypeInfer {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypeInfer { + fn eq(&self, _other: &Self) -> bool { + true + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypeMacro {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypeMacro { + fn eq(&self, other: &Self) -> bool { + self.mac == other.mac + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypeNever {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypeNever { + fn eq(&self, _other: &Self) -> bool { + true + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypeParam {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypeParam { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.ident == other.ident + && self.colon_token == other.colon_token && self.bounds == other.bounds + && self.eq_token == other.eq_token && self.default == other.default + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypeParamBound {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypeParamBound { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + ( + crate::TypeParamBound::Trait(self0), + crate::TypeParamBound::Trait(other0), + ) => self0 == other0, + ( + crate::TypeParamBound::Lifetime(self0), + crate::TypeParamBound::Lifetime(other0), + ) => self0 == other0, + #[cfg(feature = "full")] + ( + crate::TypeParamBound::PreciseCapture(self0), + crate::TypeParamBound::PreciseCapture(other0), + ) => self0 == other0, + ( + crate::TypeParamBound::Verbatim(self0), + crate::TypeParamBound::Verbatim(other0), + ) => TokenStreamHelper(self0) == TokenStreamHelper(other0), + _ => false, + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypeParen {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypeParen { + fn eq(&self, other: &Self) -> bool { + self.elem == other.elem + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypePath {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypePath { + fn eq(&self, other: &Self) -> bool { + self.qself == other.qself && self.path == other.path + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypePtr {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypePtr { + fn eq(&self, other: &Self) -> bool { + self.const_token == other.const_token && self.mutability == other.mutability + && self.elem == other.elem + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypeReference {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypeReference { + fn eq(&self, other: &Self) -> bool { + self.lifetime == other.lifetime && self.mutability == other.mutability + && self.elem == other.elem + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypeSlice {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypeSlice { + fn eq(&self, other: &Self) -> bool { + self.elem == other.elem + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypeTraitObject {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypeTraitObject { + fn eq(&self, other: &Self) -> bool { + self.dyn_token == other.dyn_token && self.bounds == other.bounds + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::TypeTuple {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::TypeTuple { + fn eq(&self, other: &Self) -> bool { + self.elems == other.elems + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::UnOp {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::UnOp { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::UnOp::Deref(_), crate::UnOp::Deref(_)) => true, + (crate::UnOp::Not(_), crate::UnOp::Not(_)) => true, + (crate::UnOp::Neg(_), crate::UnOp::Neg(_)) => true, + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::UseGlob {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::UseGlob { + fn eq(&self, _other: &Self) -> bool { + true + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::UseGroup {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::UseGroup { + fn eq(&self, other: &Self) -> bool { + self.items == other.items + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::UseName {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::UseName { + fn eq(&self, other: &Self) -> bool { + self.ident == other.ident + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::UsePath {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::UsePath { + fn eq(&self, other: &Self) -> bool { + self.ident == other.ident && self.tree == other.tree + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::UseRename {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::UseRename { + fn eq(&self, other: &Self) -> bool { + self.ident == other.ident && self.rename == other.rename + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::UseTree {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::UseTree { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::UseTree::Path(self0), crate::UseTree::Path(other0)) => { + self0 == other0 + } + (crate::UseTree::Name(self0), crate::UseTree::Name(other0)) => { + self0 == other0 + } + (crate::UseTree::Rename(self0), crate::UseTree::Rename(other0)) => { + self0 == other0 + } + (crate::UseTree::Glob(self0), crate::UseTree::Glob(other0)) => { + self0 == other0 + } + (crate::UseTree::Group(self0), crate::UseTree::Group(other0)) => { + self0 == other0 + } + _ => false, + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Variadic {} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Variadic { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.pat == other.pat && self.comma == other.comma + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Variant {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Variant { + fn eq(&self, other: &Self) -> bool { + self.attrs == other.attrs && self.ident == other.ident + && self.fields == other.fields && self.discriminant == other.discriminant + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::VisRestricted {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::VisRestricted { + fn eq(&self, other: &Self) -> bool { + self.in_token == other.in_token && self.path == other.path + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::Visibility {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::Visibility { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (crate::Visibility::Public(_), crate::Visibility::Public(_)) => true, + ( + crate::Visibility::Restricted(self0), + crate::Visibility::Restricted(other0), + ) => self0 == other0, + (crate::Visibility::Inherited, crate::Visibility::Inherited) => true, + _ => false, + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::WhereClause {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::WhereClause { + fn eq(&self, other: &Self) -> bool { + self.predicates == other.predicates + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for crate::WherePredicate {} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for crate::WherePredicate { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + ( + crate::WherePredicate::Lifetime(self0), + crate::WherePredicate::Lifetime(other0), + ) => self0 == other0, + (crate::WherePredicate::Type(self0), crate::WherePredicate::Type(other0)) => { + self0 == other0 + } + _ => false, + } + } +} diff --git a/vendor/syn/src/gen/fold.rs b/vendor/syn/src/gen/fold.rs new file mode 100644 index 00000000000000..1f0afd31919d9c --- /dev/null +++ b/vendor/syn/src/gen/fold.rs @@ -0,0 +1,3902 @@ +// This file is @generated by syn-internal-codegen. +// It is not intended for manual editing. + +#![allow(unreachable_code, unused_variables)] +#![allow( + clippy::match_wildcard_for_single_variants, + clippy::needless_match, + clippy::needless_pass_by_ref_mut, +)] +#[cfg(feature = "full")] +macro_rules! full { + ($e:expr) => { + $e + }; +} +#[cfg(all(feature = "derive", not(feature = "full")))] +macro_rules! full { + ($e:expr) => { + unreachable!() + }; +} +/// Syntax tree traversal to transform the nodes of an owned syntax tree. +/// +/// See the [module documentation] for details. +/// +/// [module documentation]: self +pub trait Fold { + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_abi(&mut self, i: crate::Abi) -> crate::Abi { + fold_abi(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_angle_bracketed_generic_arguments( + &mut self, + i: crate::AngleBracketedGenericArguments, + ) -> crate::AngleBracketedGenericArguments { + fold_angle_bracketed_generic_arguments(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_arm(&mut self, i: crate::Arm) -> crate::Arm { + fold_arm(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_assoc_const(&mut self, i: crate::AssocConst) -> crate::AssocConst { + fold_assoc_const(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_assoc_type(&mut self, i: crate::AssocType) -> crate::AssocType { + fold_assoc_type(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_attr_style(&mut self, i: crate::AttrStyle) -> crate::AttrStyle { + fold_attr_style(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_attribute(&mut self, i: crate::Attribute) -> crate::Attribute { + fold_attribute(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_attributes(&mut self, i: Vec<crate::Attribute>) -> Vec<crate::Attribute> { + fold_vec(i, self, Self::fold_attribute) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_bare_fn_arg(&mut self, i: crate::BareFnArg) -> crate::BareFnArg { + fold_bare_fn_arg(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_bare_variadic(&mut self, i: crate::BareVariadic) -> crate::BareVariadic { + fold_bare_variadic(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_bin_op(&mut self, i: crate::BinOp) -> crate::BinOp { + fold_bin_op(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_block(&mut self, i: crate::Block) -> crate::Block { + fold_block(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_bound_lifetimes( + &mut self, + i: crate::BoundLifetimes, + ) -> crate::BoundLifetimes { + fold_bound_lifetimes(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_captured_param(&mut self, i: crate::CapturedParam) -> crate::CapturedParam { + fold_captured_param(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_const_param(&mut self, i: crate::ConstParam) -> crate::ConstParam { + fold_const_param(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_constraint(&mut self, i: crate::Constraint) -> crate::Constraint { + fold_constraint(self, i) + } + #[cfg(feature = "derive")] + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + fn fold_data(&mut self, i: crate::Data) -> crate::Data { + fold_data(self, i) + } + #[cfg(feature = "derive")] + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + fn fold_data_enum(&mut self, i: crate::DataEnum) -> crate::DataEnum { + fold_data_enum(self, i) + } + #[cfg(feature = "derive")] + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + fn fold_data_struct(&mut self, i: crate::DataStruct) -> crate::DataStruct { + fold_data_struct(self, i) + } + #[cfg(feature = "derive")] + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + fn fold_data_union(&mut self, i: crate::DataUnion) -> crate::DataUnion { + fold_data_union(self, i) + } + #[cfg(feature = "derive")] + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + fn fold_derive_input(&mut self, i: crate::DeriveInput) -> crate::DeriveInput { + fold_derive_input(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr(&mut self, i: crate::Expr) -> crate::Expr { + fold_expr(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_array(&mut self, i: crate::ExprArray) -> crate::ExprArray { + fold_expr_array(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_assign(&mut self, i: crate::ExprAssign) -> crate::ExprAssign { + fold_expr_assign(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_async(&mut self, i: crate::ExprAsync) -> crate::ExprAsync { + fold_expr_async(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_await(&mut self, i: crate::ExprAwait) -> crate::ExprAwait { + fold_expr_await(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr_binary(&mut self, i: crate::ExprBinary) -> crate::ExprBinary { + fold_expr_binary(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_block(&mut self, i: crate::ExprBlock) -> crate::ExprBlock { + fold_expr_block(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_break(&mut self, i: crate::ExprBreak) -> crate::ExprBreak { + fold_expr_break(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr_call(&mut self, i: crate::ExprCall) -> crate::ExprCall { + fold_expr_call(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr_cast(&mut self, i: crate::ExprCast) -> crate::ExprCast { + fold_expr_cast(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_closure(&mut self, i: crate::ExprClosure) -> crate::ExprClosure { + fold_expr_closure(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_const(&mut self, i: crate::ExprConst) -> crate::ExprConst { + fold_expr_const(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_continue(&mut self, i: crate::ExprContinue) -> crate::ExprContinue { + fold_expr_continue(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr_field(&mut self, i: crate::ExprField) -> crate::ExprField { + fold_expr_field(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_for_loop(&mut self, i: crate::ExprForLoop) -> crate::ExprForLoop { + fold_expr_for_loop(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr_group(&mut self, i: crate::ExprGroup) -> crate::ExprGroup { + fold_expr_group(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_if(&mut self, i: crate::ExprIf) -> crate::ExprIf { + fold_expr_if(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr_index(&mut self, i: crate::ExprIndex) -> crate::ExprIndex { + fold_expr_index(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_infer(&mut self, i: crate::ExprInfer) -> crate::ExprInfer { + fold_expr_infer(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_let(&mut self, i: crate::ExprLet) -> crate::ExprLet { + fold_expr_let(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr_lit(&mut self, i: crate::ExprLit) -> crate::ExprLit { + fold_expr_lit(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_loop(&mut self, i: crate::ExprLoop) -> crate::ExprLoop { + fold_expr_loop(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr_macro(&mut self, i: crate::ExprMacro) -> crate::ExprMacro { + fold_expr_macro(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_match(&mut self, i: crate::ExprMatch) -> crate::ExprMatch { + fold_expr_match(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr_method_call( + &mut self, + i: crate::ExprMethodCall, + ) -> crate::ExprMethodCall { + fold_expr_method_call(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr_paren(&mut self, i: crate::ExprParen) -> crate::ExprParen { + fold_expr_paren(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr_path(&mut self, i: crate::ExprPath) -> crate::ExprPath { + fold_expr_path(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_range(&mut self, i: crate::ExprRange) -> crate::ExprRange { + fold_expr_range(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_raw_addr(&mut self, i: crate::ExprRawAddr) -> crate::ExprRawAddr { + fold_expr_raw_addr(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr_reference(&mut self, i: crate::ExprReference) -> crate::ExprReference { + fold_expr_reference(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_repeat(&mut self, i: crate::ExprRepeat) -> crate::ExprRepeat { + fold_expr_repeat(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_return(&mut self, i: crate::ExprReturn) -> crate::ExprReturn { + fold_expr_return(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr_struct(&mut self, i: crate::ExprStruct) -> crate::ExprStruct { + fold_expr_struct(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_try(&mut self, i: crate::ExprTry) -> crate::ExprTry { + fold_expr_try(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_try_block(&mut self, i: crate::ExprTryBlock) -> crate::ExprTryBlock { + fold_expr_try_block(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr_tuple(&mut self, i: crate::ExprTuple) -> crate::ExprTuple { + fold_expr_tuple(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_expr_unary(&mut self, i: crate::ExprUnary) -> crate::ExprUnary { + fold_expr_unary(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_unsafe(&mut self, i: crate::ExprUnsafe) -> crate::ExprUnsafe { + fold_expr_unsafe(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_while(&mut self, i: crate::ExprWhile) -> crate::ExprWhile { + fold_expr_while(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_expr_yield(&mut self, i: crate::ExprYield) -> crate::ExprYield { + fold_expr_yield(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_field(&mut self, i: crate::Field) -> crate::Field { + fold_field(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_field_mutability( + &mut self, + i: crate::FieldMutability, + ) -> crate::FieldMutability { + fold_field_mutability(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_field_pat(&mut self, i: crate::FieldPat) -> crate::FieldPat { + fold_field_pat(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_field_value(&mut self, i: crate::FieldValue) -> crate::FieldValue { + fold_field_value(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_fields(&mut self, i: crate::Fields) -> crate::Fields { + fold_fields(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_fields_named(&mut self, i: crate::FieldsNamed) -> crate::FieldsNamed { + fold_fields_named(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_fields_unnamed(&mut self, i: crate::FieldsUnnamed) -> crate::FieldsUnnamed { + fold_fields_unnamed(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_file(&mut self, i: crate::File) -> crate::File { + fold_file(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_fn_arg(&mut self, i: crate::FnArg) -> crate::FnArg { + fold_fn_arg(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_foreign_item(&mut self, i: crate::ForeignItem) -> crate::ForeignItem { + fold_foreign_item(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_foreign_item_fn(&mut self, i: crate::ForeignItemFn) -> crate::ForeignItemFn { + fold_foreign_item_fn(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_foreign_item_macro( + &mut self, + i: crate::ForeignItemMacro, + ) -> crate::ForeignItemMacro { + fold_foreign_item_macro(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_foreign_item_static( + &mut self, + i: crate::ForeignItemStatic, + ) -> crate::ForeignItemStatic { + fold_foreign_item_static(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_foreign_item_type( + &mut self, + i: crate::ForeignItemType, + ) -> crate::ForeignItemType { + fold_foreign_item_type(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_generic_argument( + &mut self, + i: crate::GenericArgument, + ) -> crate::GenericArgument { + fold_generic_argument(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_generic_param(&mut self, i: crate::GenericParam) -> crate::GenericParam { + fold_generic_param(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_generics(&mut self, i: crate::Generics) -> crate::Generics { + fold_generics(self, i) + } + fn fold_ident(&mut self, i: proc_macro2::Ident) -> proc_macro2::Ident { + fold_ident(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_impl_item(&mut self, i: crate::ImplItem) -> crate::ImplItem { + fold_impl_item(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_impl_item_const(&mut self, i: crate::ImplItemConst) -> crate::ImplItemConst { + fold_impl_item_const(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_impl_item_fn(&mut self, i: crate::ImplItemFn) -> crate::ImplItemFn { + fold_impl_item_fn(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_impl_item_macro(&mut self, i: crate::ImplItemMacro) -> crate::ImplItemMacro { + fold_impl_item_macro(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_impl_item_type(&mut self, i: crate::ImplItemType) -> crate::ImplItemType { + fold_impl_item_type(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_impl_restriction( + &mut self, + i: crate::ImplRestriction, + ) -> crate::ImplRestriction { + fold_impl_restriction(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_index(&mut self, i: crate::Index) -> crate::Index { + fold_index(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item(&mut self, i: crate::Item) -> crate::Item { + fold_item(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item_const(&mut self, i: crate::ItemConst) -> crate::ItemConst { + fold_item_const(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item_enum(&mut self, i: crate::ItemEnum) -> crate::ItemEnum { + fold_item_enum(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item_extern_crate( + &mut self, + i: crate::ItemExternCrate, + ) -> crate::ItemExternCrate { + fold_item_extern_crate(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item_fn(&mut self, i: crate::ItemFn) -> crate::ItemFn { + fold_item_fn(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item_foreign_mod( + &mut self, + i: crate::ItemForeignMod, + ) -> crate::ItemForeignMod { + fold_item_foreign_mod(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item_impl(&mut self, i: crate::ItemImpl) -> crate::ItemImpl { + fold_item_impl(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item_macro(&mut self, i: crate::ItemMacro) -> crate::ItemMacro { + fold_item_macro(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item_mod(&mut self, i: crate::ItemMod) -> crate::ItemMod { + fold_item_mod(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item_static(&mut self, i: crate::ItemStatic) -> crate::ItemStatic { + fold_item_static(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item_struct(&mut self, i: crate::ItemStruct) -> crate::ItemStruct { + fold_item_struct(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item_trait(&mut self, i: crate::ItemTrait) -> crate::ItemTrait { + fold_item_trait(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item_trait_alias( + &mut self, + i: crate::ItemTraitAlias, + ) -> crate::ItemTraitAlias { + fold_item_trait_alias(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item_type(&mut self, i: crate::ItemType) -> crate::ItemType { + fold_item_type(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item_union(&mut self, i: crate::ItemUnion) -> crate::ItemUnion { + fold_item_union(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_item_use(&mut self, i: crate::ItemUse) -> crate::ItemUse { + fold_item_use(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_label(&mut self, i: crate::Label) -> crate::Label { + fold_label(self, i) + } + fn fold_lifetime(&mut self, i: crate::Lifetime) -> crate::Lifetime { + fold_lifetime(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_lifetime_param(&mut self, i: crate::LifetimeParam) -> crate::LifetimeParam { + fold_lifetime_param(self, i) + } + fn fold_lit(&mut self, i: crate::Lit) -> crate::Lit { + fold_lit(self, i) + } + fn fold_lit_bool(&mut self, i: crate::LitBool) -> crate::LitBool { + fold_lit_bool(self, i) + } + fn fold_lit_byte(&mut self, i: crate::LitByte) -> crate::LitByte { + fold_lit_byte(self, i) + } + fn fold_lit_byte_str(&mut self, i: crate::LitByteStr) -> crate::LitByteStr { + fold_lit_byte_str(self, i) + } + fn fold_lit_cstr(&mut self, i: crate::LitCStr) -> crate::LitCStr { + fold_lit_cstr(self, i) + } + fn fold_lit_char(&mut self, i: crate::LitChar) -> crate::LitChar { + fold_lit_char(self, i) + } + fn fold_lit_float(&mut self, i: crate::LitFloat) -> crate::LitFloat { + fold_lit_float(self, i) + } + fn fold_lit_int(&mut self, i: crate::LitInt) -> crate::LitInt { + fold_lit_int(self, i) + } + fn fold_lit_str(&mut self, i: crate::LitStr) -> crate::LitStr { + fold_lit_str(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_local(&mut self, i: crate::Local) -> crate::Local { + fold_local(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_local_init(&mut self, i: crate::LocalInit) -> crate::LocalInit { + fold_local_init(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_macro(&mut self, i: crate::Macro) -> crate::Macro { + fold_macro(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_macro_delimiter( + &mut self, + i: crate::MacroDelimiter, + ) -> crate::MacroDelimiter { + fold_macro_delimiter(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_member(&mut self, i: crate::Member) -> crate::Member { + fold_member(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_meta(&mut self, i: crate::Meta) -> crate::Meta { + fold_meta(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_meta_list(&mut self, i: crate::MetaList) -> crate::MetaList { + fold_meta_list(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_meta_name_value(&mut self, i: crate::MetaNameValue) -> crate::MetaNameValue { + fold_meta_name_value(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_parenthesized_generic_arguments( + &mut self, + i: crate::ParenthesizedGenericArguments, + ) -> crate::ParenthesizedGenericArguments { + fold_parenthesized_generic_arguments(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_pat(&mut self, i: crate::Pat) -> crate::Pat { + fold_pat(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_pat_ident(&mut self, i: crate::PatIdent) -> crate::PatIdent { + fold_pat_ident(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_pat_or(&mut self, i: crate::PatOr) -> crate::PatOr { + fold_pat_or(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_pat_paren(&mut self, i: crate::PatParen) -> crate::PatParen { + fold_pat_paren(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_pat_reference(&mut self, i: crate::PatReference) -> crate::PatReference { + fold_pat_reference(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_pat_rest(&mut self, i: crate::PatRest) -> crate::PatRest { + fold_pat_rest(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_pat_slice(&mut self, i: crate::PatSlice) -> crate::PatSlice { + fold_pat_slice(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_pat_struct(&mut self, i: crate::PatStruct) -> crate::PatStruct { + fold_pat_struct(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_pat_tuple(&mut self, i: crate::PatTuple) -> crate::PatTuple { + fold_pat_tuple(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_pat_tuple_struct( + &mut self, + i: crate::PatTupleStruct, + ) -> crate::PatTupleStruct { + fold_pat_tuple_struct(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_pat_type(&mut self, i: crate::PatType) -> crate::PatType { + fold_pat_type(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_pat_wild(&mut self, i: crate::PatWild) -> crate::PatWild { + fold_pat_wild(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_path(&mut self, i: crate::Path) -> crate::Path { + fold_path(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_path_arguments(&mut self, i: crate::PathArguments) -> crate::PathArguments { + fold_path_arguments(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_path_segment(&mut self, i: crate::PathSegment) -> crate::PathSegment { + fold_path_segment(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_pointer_mutability( + &mut self, + i: crate::PointerMutability, + ) -> crate::PointerMutability { + fold_pointer_mutability(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_precise_capture( + &mut self, + i: crate::PreciseCapture, + ) -> crate::PreciseCapture { + fold_precise_capture(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_predicate_lifetime( + &mut self, + i: crate::PredicateLifetime, + ) -> crate::PredicateLifetime { + fold_predicate_lifetime(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_predicate_type(&mut self, i: crate::PredicateType) -> crate::PredicateType { + fold_predicate_type(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_qself(&mut self, i: crate::QSelf) -> crate::QSelf { + fold_qself(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_range_limits(&mut self, i: crate::RangeLimits) -> crate::RangeLimits { + fold_range_limits(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_receiver(&mut self, i: crate::Receiver) -> crate::Receiver { + fold_receiver(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_return_type(&mut self, i: crate::ReturnType) -> crate::ReturnType { + fold_return_type(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_signature(&mut self, i: crate::Signature) -> crate::Signature { + fold_signature(self, i) + } + fn fold_span(&mut self, i: proc_macro2::Span) -> proc_macro2::Span { + i + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_static_mutability( + &mut self, + i: crate::StaticMutability, + ) -> crate::StaticMutability { + fold_static_mutability(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_stmt(&mut self, i: crate::Stmt) -> crate::Stmt { + fold_stmt(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_stmt_macro(&mut self, i: crate::StmtMacro) -> crate::StmtMacro { + fold_stmt_macro(self, i) + } + fn fold_token_stream( + &mut self, + i: proc_macro2::TokenStream, + ) -> proc_macro2::TokenStream { + i + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_trait_bound(&mut self, i: crate::TraitBound) -> crate::TraitBound { + fold_trait_bound(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_trait_bound_modifier( + &mut self, + i: crate::TraitBoundModifier, + ) -> crate::TraitBoundModifier { + fold_trait_bound_modifier(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_trait_item(&mut self, i: crate::TraitItem) -> crate::TraitItem { + fold_trait_item(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_trait_item_const( + &mut self, + i: crate::TraitItemConst, + ) -> crate::TraitItemConst { + fold_trait_item_const(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_trait_item_fn(&mut self, i: crate::TraitItemFn) -> crate::TraitItemFn { + fold_trait_item_fn(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_trait_item_macro( + &mut self, + i: crate::TraitItemMacro, + ) -> crate::TraitItemMacro { + fold_trait_item_macro(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_trait_item_type(&mut self, i: crate::TraitItemType) -> crate::TraitItemType { + fold_trait_item_type(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type(&mut self, i: crate::Type) -> crate::Type { + fold_type(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_array(&mut self, i: crate::TypeArray) -> crate::TypeArray { + fold_type_array(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_bare_fn(&mut self, i: crate::TypeBareFn) -> crate::TypeBareFn { + fold_type_bare_fn(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_group(&mut self, i: crate::TypeGroup) -> crate::TypeGroup { + fold_type_group(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_impl_trait(&mut self, i: crate::TypeImplTrait) -> crate::TypeImplTrait { + fold_type_impl_trait(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_infer(&mut self, i: crate::TypeInfer) -> crate::TypeInfer { + fold_type_infer(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_macro(&mut self, i: crate::TypeMacro) -> crate::TypeMacro { + fold_type_macro(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_never(&mut self, i: crate::TypeNever) -> crate::TypeNever { + fold_type_never(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_param(&mut self, i: crate::TypeParam) -> crate::TypeParam { + fold_type_param(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_param_bound( + &mut self, + i: crate::TypeParamBound, + ) -> crate::TypeParamBound { + fold_type_param_bound(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_paren(&mut self, i: crate::TypeParen) -> crate::TypeParen { + fold_type_paren(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_path(&mut self, i: crate::TypePath) -> crate::TypePath { + fold_type_path(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_ptr(&mut self, i: crate::TypePtr) -> crate::TypePtr { + fold_type_ptr(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_reference(&mut self, i: crate::TypeReference) -> crate::TypeReference { + fold_type_reference(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_slice(&mut self, i: crate::TypeSlice) -> crate::TypeSlice { + fold_type_slice(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_trait_object( + &mut self, + i: crate::TypeTraitObject, + ) -> crate::TypeTraitObject { + fold_type_trait_object(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_type_tuple(&mut self, i: crate::TypeTuple) -> crate::TypeTuple { + fold_type_tuple(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_un_op(&mut self, i: crate::UnOp) -> crate::UnOp { + fold_un_op(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_use_glob(&mut self, i: crate::UseGlob) -> crate::UseGlob { + fold_use_glob(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_use_group(&mut self, i: crate::UseGroup) -> crate::UseGroup { + fold_use_group(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_use_name(&mut self, i: crate::UseName) -> crate::UseName { + fold_use_name(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_use_path(&mut self, i: crate::UsePath) -> crate::UsePath { + fold_use_path(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_use_rename(&mut self, i: crate::UseRename) -> crate::UseRename { + fold_use_rename(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_use_tree(&mut self, i: crate::UseTree) -> crate::UseTree { + fold_use_tree(self, i) + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn fold_variadic(&mut self, i: crate::Variadic) -> crate::Variadic { + fold_variadic(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_variant(&mut self, i: crate::Variant) -> crate::Variant { + fold_variant(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_vis_restricted(&mut self, i: crate::VisRestricted) -> crate::VisRestricted { + fold_vis_restricted(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_visibility(&mut self, i: crate::Visibility) -> crate::Visibility { + fold_visibility(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_where_clause(&mut self, i: crate::WhereClause) -> crate::WhereClause { + fold_where_clause(self, i) + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn fold_where_predicate( + &mut self, + i: crate::WherePredicate, + ) -> crate::WherePredicate { + fold_where_predicate(self, i) + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_abi<F>(f: &mut F, node: crate::Abi) -> crate::Abi +where + F: Fold + ?Sized, +{ + crate::Abi { + extern_token: node.extern_token, + name: (node.name).map(|it| f.fold_lit_str(it)), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_angle_bracketed_generic_arguments<F>( + f: &mut F, + node: crate::AngleBracketedGenericArguments, +) -> crate::AngleBracketedGenericArguments +where + F: Fold + ?Sized, +{ + crate::AngleBracketedGenericArguments { + colon2_token: node.colon2_token, + lt_token: node.lt_token, + args: crate::punctuated::fold(node.args, f, F::fold_generic_argument), + gt_token: node.gt_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_arm<F>(f: &mut F, node: crate::Arm) -> crate::Arm +where + F: Fold + ?Sized, +{ + crate::Arm { + attrs: f.fold_attributes(node.attrs), + pat: f.fold_pat(node.pat), + guard: (node.guard).map(|it| ((it).0, Box::new(f.fold_expr(*(it).1)))), + fat_arrow_token: node.fat_arrow_token, + body: Box::new(f.fold_expr(*node.body)), + comma: node.comma, + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_assoc_const<F>(f: &mut F, node: crate::AssocConst) -> crate::AssocConst +where + F: Fold + ?Sized, +{ + crate::AssocConst { + ident: f.fold_ident(node.ident), + generics: (node.generics).map(|it| f.fold_angle_bracketed_generic_arguments(it)), + eq_token: node.eq_token, + value: f.fold_expr(node.value), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_assoc_type<F>(f: &mut F, node: crate::AssocType) -> crate::AssocType +where + F: Fold + ?Sized, +{ + crate::AssocType { + ident: f.fold_ident(node.ident), + generics: (node.generics).map(|it| f.fold_angle_bracketed_generic_arguments(it)), + eq_token: node.eq_token, + ty: f.fold_type(node.ty), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_attr_style<F>(f: &mut F, node: crate::AttrStyle) -> crate::AttrStyle +where + F: Fold + ?Sized, +{ + match node { + crate::AttrStyle::Outer => crate::AttrStyle::Outer, + crate::AttrStyle::Inner(_binding_0) => crate::AttrStyle::Inner(_binding_0), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_attribute<F>(f: &mut F, node: crate::Attribute) -> crate::Attribute +where + F: Fold + ?Sized, +{ + crate::Attribute { + pound_token: node.pound_token, + style: f.fold_attr_style(node.style), + bracket_token: node.bracket_token, + meta: f.fold_meta(node.meta), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_bare_fn_arg<F>(f: &mut F, node: crate::BareFnArg) -> crate::BareFnArg +where + F: Fold + ?Sized, +{ + crate::BareFnArg { + attrs: f.fold_attributes(node.attrs), + name: (node.name).map(|it| (f.fold_ident((it).0), (it).1)), + ty: f.fold_type(node.ty), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_bare_variadic<F>(f: &mut F, node: crate::BareVariadic) -> crate::BareVariadic +where + F: Fold + ?Sized, +{ + crate::BareVariadic { + attrs: f.fold_attributes(node.attrs), + name: (node.name).map(|it| (f.fold_ident((it).0), (it).1)), + dots: node.dots, + comma: node.comma, + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_bin_op<F>(f: &mut F, node: crate::BinOp) -> crate::BinOp +where + F: Fold + ?Sized, +{ + match node { + crate::BinOp::Add(_binding_0) => crate::BinOp::Add(_binding_0), + crate::BinOp::Sub(_binding_0) => crate::BinOp::Sub(_binding_0), + crate::BinOp::Mul(_binding_0) => crate::BinOp::Mul(_binding_0), + crate::BinOp::Div(_binding_0) => crate::BinOp::Div(_binding_0), + crate::BinOp::Rem(_binding_0) => crate::BinOp::Rem(_binding_0), + crate::BinOp::And(_binding_0) => crate::BinOp::And(_binding_0), + crate::BinOp::Or(_binding_0) => crate::BinOp::Or(_binding_0), + crate::BinOp::BitXor(_binding_0) => crate::BinOp::BitXor(_binding_0), + crate::BinOp::BitAnd(_binding_0) => crate::BinOp::BitAnd(_binding_0), + crate::BinOp::BitOr(_binding_0) => crate::BinOp::BitOr(_binding_0), + crate::BinOp::Shl(_binding_0) => crate::BinOp::Shl(_binding_0), + crate::BinOp::Shr(_binding_0) => crate::BinOp::Shr(_binding_0), + crate::BinOp::Eq(_binding_0) => crate::BinOp::Eq(_binding_0), + crate::BinOp::Lt(_binding_0) => crate::BinOp::Lt(_binding_0), + crate::BinOp::Le(_binding_0) => crate::BinOp::Le(_binding_0), + crate::BinOp::Ne(_binding_0) => crate::BinOp::Ne(_binding_0), + crate::BinOp::Ge(_binding_0) => crate::BinOp::Ge(_binding_0), + crate::BinOp::Gt(_binding_0) => crate::BinOp::Gt(_binding_0), + crate::BinOp::AddAssign(_binding_0) => crate::BinOp::AddAssign(_binding_0), + crate::BinOp::SubAssign(_binding_0) => crate::BinOp::SubAssign(_binding_0), + crate::BinOp::MulAssign(_binding_0) => crate::BinOp::MulAssign(_binding_0), + crate::BinOp::DivAssign(_binding_0) => crate::BinOp::DivAssign(_binding_0), + crate::BinOp::RemAssign(_binding_0) => crate::BinOp::RemAssign(_binding_0), + crate::BinOp::BitXorAssign(_binding_0) => crate::BinOp::BitXorAssign(_binding_0), + crate::BinOp::BitAndAssign(_binding_0) => crate::BinOp::BitAndAssign(_binding_0), + crate::BinOp::BitOrAssign(_binding_0) => crate::BinOp::BitOrAssign(_binding_0), + crate::BinOp::ShlAssign(_binding_0) => crate::BinOp::ShlAssign(_binding_0), + crate::BinOp::ShrAssign(_binding_0) => crate::BinOp::ShrAssign(_binding_0), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_block<F>(f: &mut F, node: crate::Block) -> crate::Block +where + F: Fold + ?Sized, +{ + crate::Block { + brace_token: node.brace_token, + stmts: fold_vec(node.stmts, f, F::fold_stmt), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_bound_lifetimes<F>( + f: &mut F, + node: crate::BoundLifetimes, +) -> crate::BoundLifetimes +where + F: Fold + ?Sized, +{ + crate::BoundLifetimes { + for_token: node.for_token, + lt_token: node.lt_token, + lifetimes: crate::punctuated::fold(node.lifetimes, f, F::fold_generic_param), + gt_token: node.gt_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_captured_param<F>( + f: &mut F, + node: crate::CapturedParam, +) -> crate::CapturedParam +where + F: Fold + ?Sized, +{ + match node { + crate::CapturedParam::Lifetime(_binding_0) => { + crate::CapturedParam::Lifetime(f.fold_lifetime(_binding_0)) + } + crate::CapturedParam::Ident(_binding_0) => { + crate::CapturedParam::Ident(f.fold_ident(_binding_0)) + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_const_param<F>(f: &mut F, node: crate::ConstParam) -> crate::ConstParam +where + F: Fold + ?Sized, +{ + crate::ConstParam { + attrs: f.fold_attributes(node.attrs), + const_token: node.const_token, + ident: f.fold_ident(node.ident), + colon_token: node.colon_token, + ty: f.fold_type(node.ty), + eq_token: node.eq_token, + default: (node.default).map(|it| f.fold_expr(it)), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_constraint<F>(f: &mut F, node: crate::Constraint) -> crate::Constraint +where + F: Fold + ?Sized, +{ + crate::Constraint { + ident: f.fold_ident(node.ident), + generics: (node.generics).map(|it| f.fold_angle_bracketed_generic_arguments(it)), + colon_token: node.colon_token, + bounds: crate::punctuated::fold(node.bounds, f, F::fold_type_param_bound), + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub fn fold_data<F>(f: &mut F, node: crate::Data) -> crate::Data +where + F: Fold + ?Sized, +{ + match node { + crate::Data::Struct(_binding_0) => { + crate::Data::Struct(f.fold_data_struct(_binding_0)) + } + crate::Data::Enum(_binding_0) => crate::Data::Enum(f.fold_data_enum(_binding_0)), + crate::Data::Union(_binding_0) => { + crate::Data::Union(f.fold_data_union(_binding_0)) + } + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub fn fold_data_enum<F>(f: &mut F, node: crate::DataEnum) -> crate::DataEnum +where + F: Fold + ?Sized, +{ + crate::DataEnum { + enum_token: node.enum_token, + brace_token: node.brace_token, + variants: crate::punctuated::fold(node.variants, f, F::fold_variant), + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub fn fold_data_struct<F>(f: &mut F, node: crate::DataStruct) -> crate::DataStruct +where + F: Fold + ?Sized, +{ + crate::DataStruct { + struct_token: node.struct_token, + fields: f.fold_fields(node.fields), + semi_token: node.semi_token, + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub fn fold_data_union<F>(f: &mut F, node: crate::DataUnion) -> crate::DataUnion +where + F: Fold + ?Sized, +{ + crate::DataUnion { + union_token: node.union_token, + fields: f.fold_fields_named(node.fields), + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub fn fold_derive_input<F>(f: &mut F, node: crate::DeriveInput) -> crate::DeriveInput +where + F: Fold + ?Sized, +{ + crate::DeriveInput { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + ident: f.fold_ident(node.ident), + generics: f.fold_generics(node.generics), + data: f.fold_data(node.data), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr<F>(f: &mut F, node: crate::Expr) -> crate::Expr +where + F: Fold + ?Sized, +{ + match node { + crate::Expr::Array(_binding_0) => { + crate::Expr::Array(full!(f.fold_expr_array(_binding_0))) + } + crate::Expr::Assign(_binding_0) => { + crate::Expr::Assign(full!(f.fold_expr_assign(_binding_0))) + } + crate::Expr::Async(_binding_0) => { + crate::Expr::Async(full!(f.fold_expr_async(_binding_0))) + } + crate::Expr::Await(_binding_0) => { + crate::Expr::Await(full!(f.fold_expr_await(_binding_0))) + } + crate::Expr::Binary(_binding_0) => { + crate::Expr::Binary(f.fold_expr_binary(_binding_0)) + } + crate::Expr::Block(_binding_0) => { + crate::Expr::Block(full!(f.fold_expr_block(_binding_0))) + } + crate::Expr::Break(_binding_0) => { + crate::Expr::Break(full!(f.fold_expr_break(_binding_0))) + } + crate::Expr::Call(_binding_0) => crate::Expr::Call(f.fold_expr_call(_binding_0)), + crate::Expr::Cast(_binding_0) => crate::Expr::Cast(f.fold_expr_cast(_binding_0)), + crate::Expr::Closure(_binding_0) => { + crate::Expr::Closure(full!(f.fold_expr_closure(_binding_0))) + } + crate::Expr::Const(_binding_0) => { + crate::Expr::Const(full!(f.fold_expr_const(_binding_0))) + } + crate::Expr::Continue(_binding_0) => { + crate::Expr::Continue(full!(f.fold_expr_continue(_binding_0))) + } + crate::Expr::Field(_binding_0) => { + crate::Expr::Field(f.fold_expr_field(_binding_0)) + } + crate::Expr::ForLoop(_binding_0) => { + crate::Expr::ForLoop(full!(f.fold_expr_for_loop(_binding_0))) + } + crate::Expr::Group(_binding_0) => { + crate::Expr::Group(f.fold_expr_group(_binding_0)) + } + crate::Expr::If(_binding_0) => crate::Expr::If(full!(f.fold_expr_if(_binding_0))), + crate::Expr::Index(_binding_0) => { + crate::Expr::Index(f.fold_expr_index(_binding_0)) + } + crate::Expr::Infer(_binding_0) => { + crate::Expr::Infer(full!(f.fold_expr_infer(_binding_0))) + } + crate::Expr::Let(_binding_0) => { + crate::Expr::Let(full!(f.fold_expr_let(_binding_0))) + } + crate::Expr::Lit(_binding_0) => crate::Expr::Lit(f.fold_expr_lit(_binding_0)), + crate::Expr::Loop(_binding_0) => { + crate::Expr::Loop(full!(f.fold_expr_loop(_binding_0))) + } + crate::Expr::Macro(_binding_0) => { + crate::Expr::Macro(f.fold_expr_macro(_binding_0)) + } + crate::Expr::Match(_binding_0) => { + crate::Expr::Match(full!(f.fold_expr_match(_binding_0))) + } + crate::Expr::MethodCall(_binding_0) => { + crate::Expr::MethodCall(f.fold_expr_method_call(_binding_0)) + } + crate::Expr::Paren(_binding_0) => { + crate::Expr::Paren(f.fold_expr_paren(_binding_0)) + } + crate::Expr::Path(_binding_0) => crate::Expr::Path(f.fold_expr_path(_binding_0)), + crate::Expr::Range(_binding_0) => { + crate::Expr::Range(full!(f.fold_expr_range(_binding_0))) + } + crate::Expr::RawAddr(_binding_0) => { + crate::Expr::RawAddr(full!(f.fold_expr_raw_addr(_binding_0))) + } + crate::Expr::Reference(_binding_0) => { + crate::Expr::Reference(f.fold_expr_reference(_binding_0)) + } + crate::Expr::Repeat(_binding_0) => { + crate::Expr::Repeat(full!(f.fold_expr_repeat(_binding_0))) + } + crate::Expr::Return(_binding_0) => { + crate::Expr::Return(full!(f.fold_expr_return(_binding_0))) + } + crate::Expr::Struct(_binding_0) => { + crate::Expr::Struct(f.fold_expr_struct(_binding_0)) + } + crate::Expr::Try(_binding_0) => { + crate::Expr::Try(full!(f.fold_expr_try(_binding_0))) + } + crate::Expr::TryBlock(_binding_0) => { + crate::Expr::TryBlock(full!(f.fold_expr_try_block(_binding_0))) + } + crate::Expr::Tuple(_binding_0) => { + crate::Expr::Tuple(f.fold_expr_tuple(_binding_0)) + } + crate::Expr::Unary(_binding_0) => { + crate::Expr::Unary(f.fold_expr_unary(_binding_0)) + } + crate::Expr::Unsafe(_binding_0) => { + crate::Expr::Unsafe(full!(f.fold_expr_unsafe(_binding_0))) + } + crate::Expr::Verbatim(_binding_0) => { + crate::Expr::Verbatim(f.fold_token_stream(_binding_0)) + } + crate::Expr::While(_binding_0) => { + crate::Expr::While(full!(f.fold_expr_while(_binding_0))) + } + crate::Expr::Yield(_binding_0) => { + crate::Expr::Yield(full!(f.fold_expr_yield(_binding_0))) + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_array<F>(f: &mut F, node: crate::ExprArray) -> crate::ExprArray +where + F: Fold + ?Sized, +{ + crate::ExprArray { + attrs: f.fold_attributes(node.attrs), + bracket_token: node.bracket_token, + elems: crate::punctuated::fold(node.elems, f, F::fold_expr), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_assign<F>(f: &mut F, node: crate::ExprAssign) -> crate::ExprAssign +where + F: Fold + ?Sized, +{ + crate::ExprAssign { + attrs: f.fold_attributes(node.attrs), + left: Box::new(f.fold_expr(*node.left)), + eq_token: node.eq_token, + right: Box::new(f.fold_expr(*node.right)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_async<F>(f: &mut F, node: crate::ExprAsync) -> crate::ExprAsync +where + F: Fold + ?Sized, +{ + crate::ExprAsync { + attrs: f.fold_attributes(node.attrs), + async_token: node.async_token, + capture: node.capture, + block: f.fold_block(node.block), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_await<F>(f: &mut F, node: crate::ExprAwait) -> crate::ExprAwait +where + F: Fold + ?Sized, +{ + crate::ExprAwait { + attrs: f.fold_attributes(node.attrs), + base: Box::new(f.fold_expr(*node.base)), + dot_token: node.dot_token, + await_token: node.await_token, + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr_binary<F>(f: &mut F, node: crate::ExprBinary) -> crate::ExprBinary +where + F: Fold + ?Sized, +{ + crate::ExprBinary { + attrs: f.fold_attributes(node.attrs), + left: Box::new(f.fold_expr(*node.left)), + op: f.fold_bin_op(node.op), + right: Box::new(f.fold_expr(*node.right)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_block<F>(f: &mut F, node: crate::ExprBlock) -> crate::ExprBlock +where + F: Fold + ?Sized, +{ + crate::ExprBlock { + attrs: f.fold_attributes(node.attrs), + label: (node.label).map(|it| f.fold_label(it)), + block: f.fold_block(node.block), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_break<F>(f: &mut F, node: crate::ExprBreak) -> crate::ExprBreak +where + F: Fold + ?Sized, +{ + crate::ExprBreak { + attrs: f.fold_attributes(node.attrs), + break_token: node.break_token, + label: (node.label).map(|it| f.fold_lifetime(it)), + expr: (node.expr).map(|it| Box::new(f.fold_expr(*it))), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr_call<F>(f: &mut F, node: crate::ExprCall) -> crate::ExprCall +where + F: Fold + ?Sized, +{ + crate::ExprCall { + attrs: f.fold_attributes(node.attrs), + func: Box::new(f.fold_expr(*node.func)), + paren_token: node.paren_token, + args: crate::punctuated::fold(node.args, f, F::fold_expr), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr_cast<F>(f: &mut F, node: crate::ExprCast) -> crate::ExprCast +where + F: Fold + ?Sized, +{ + crate::ExprCast { + attrs: f.fold_attributes(node.attrs), + expr: Box::new(f.fold_expr(*node.expr)), + as_token: node.as_token, + ty: Box::new(f.fold_type(*node.ty)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_closure<F>(f: &mut F, node: crate::ExprClosure) -> crate::ExprClosure +where + F: Fold + ?Sized, +{ + crate::ExprClosure { + attrs: f.fold_attributes(node.attrs), + lifetimes: (node.lifetimes).map(|it| f.fold_bound_lifetimes(it)), + constness: node.constness, + movability: node.movability, + asyncness: node.asyncness, + capture: node.capture, + or1_token: node.or1_token, + inputs: crate::punctuated::fold(node.inputs, f, F::fold_pat), + or2_token: node.or2_token, + output: f.fold_return_type(node.output), + body: Box::new(f.fold_expr(*node.body)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_const<F>(f: &mut F, node: crate::ExprConst) -> crate::ExprConst +where + F: Fold + ?Sized, +{ + crate::ExprConst { + attrs: f.fold_attributes(node.attrs), + const_token: node.const_token, + block: f.fold_block(node.block), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_continue<F>(f: &mut F, node: crate::ExprContinue) -> crate::ExprContinue +where + F: Fold + ?Sized, +{ + crate::ExprContinue { + attrs: f.fold_attributes(node.attrs), + continue_token: node.continue_token, + label: (node.label).map(|it| f.fold_lifetime(it)), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr_field<F>(f: &mut F, node: crate::ExprField) -> crate::ExprField +where + F: Fold + ?Sized, +{ + crate::ExprField { + attrs: f.fold_attributes(node.attrs), + base: Box::new(f.fold_expr(*node.base)), + dot_token: node.dot_token, + member: f.fold_member(node.member), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_for_loop<F>(f: &mut F, node: crate::ExprForLoop) -> crate::ExprForLoop +where + F: Fold + ?Sized, +{ + crate::ExprForLoop { + attrs: f.fold_attributes(node.attrs), + label: (node.label).map(|it| f.fold_label(it)), + for_token: node.for_token, + pat: Box::new(f.fold_pat(*node.pat)), + in_token: node.in_token, + expr: Box::new(f.fold_expr(*node.expr)), + body: f.fold_block(node.body), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr_group<F>(f: &mut F, node: crate::ExprGroup) -> crate::ExprGroup +where + F: Fold + ?Sized, +{ + crate::ExprGroup { + attrs: f.fold_attributes(node.attrs), + group_token: node.group_token, + expr: Box::new(f.fold_expr(*node.expr)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_if<F>(f: &mut F, node: crate::ExprIf) -> crate::ExprIf +where + F: Fold + ?Sized, +{ + crate::ExprIf { + attrs: f.fold_attributes(node.attrs), + if_token: node.if_token, + cond: Box::new(f.fold_expr(*node.cond)), + then_branch: f.fold_block(node.then_branch), + else_branch: (node.else_branch) + .map(|it| ((it).0, Box::new(f.fold_expr(*(it).1)))), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr_index<F>(f: &mut F, node: crate::ExprIndex) -> crate::ExprIndex +where + F: Fold + ?Sized, +{ + crate::ExprIndex { + attrs: f.fold_attributes(node.attrs), + expr: Box::new(f.fold_expr(*node.expr)), + bracket_token: node.bracket_token, + index: Box::new(f.fold_expr(*node.index)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_infer<F>(f: &mut F, node: crate::ExprInfer) -> crate::ExprInfer +where + F: Fold + ?Sized, +{ + crate::ExprInfer { + attrs: f.fold_attributes(node.attrs), + underscore_token: node.underscore_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_let<F>(f: &mut F, node: crate::ExprLet) -> crate::ExprLet +where + F: Fold + ?Sized, +{ + crate::ExprLet { + attrs: f.fold_attributes(node.attrs), + let_token: node.let_token, + pat: Box::new(f.fold_pat(*node.pat)), + eq_token: node.eq_token, + expr: Box::new(f.fold_expr(*node.expr)), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr_lit<F>(f: &mut F, node: crate::ExprLit) -> crate::ExprLit +where + F: Fold + ?Sized, +{ + crate::ExprLit { + attrs: f.fold_attributes(node.attrs), + lit: f.fold_lit(node.lit), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_loop<F>(f: &mut F, node: crate::ExprLoop) -> crate::ExprLoop +where + F: Fold + ?Sized, +{ + crate::ExprLoop { + attrs: f.fold_attributes(node.attrs), + label: (node.label).map(|it| f.fold_label(it)), + loop_token: node.loop_token, + body: f.fold_block(node.body), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr_macro<F>(f: &mut F, node: crate::ExprMacro) -> crate::ExprMacro +where + F: Fold + ?Sized, +{ + crate::ExprMacro { + attrs: f.fold_attributes(node.attrs), + mac: f.fold_macro(node.mac), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_match<F>(f: &mut F, node: crate::ExprMatch) -> crate::ExprMatch +where + F: Fold + ?Sized, +{ + crate::ExprMatch { + attrs: f.fold_attributes(node.attrs), + match_token: node.match_token, + expr: Box::new(f.fold_expr(*node.expr)), + brace_token: node.brace_token, + arms: fold_vec(node.arms, f, F::fold_arm), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr_method_call<F>( + f: &mut F, + node: crate::ExprMethodCall, +) -> crate::ExprMethodCall +where + F: Fold + ?Sized, +{ + crate::ExprMethodCall { + attrs: f.fold_attributes(node.attrs), + receiver: Box::new(f.fold_expr(*node.receiver)), + dot_token: node.dot_token, + method: f.fold_ident(node.method), + turbofish: (node.turbofish) + .map(|it| f.fold_angle_bracketed_generic_arguments(it)), + paren_token: node.paren_token, + args: crate::punctuated::fold(node.args, f, F::fold_expr), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr_paren<F>(f: &mut F, node: crate::ExprParen) -> crate::ExprParen +where + F: Fold + ?Sized, +{ + crate::ExprParen { + attrs: f.fold_attributes(node.attrs), + paren_token: node.paren_token, + expr: Box::new(f.fold_expr(*node.expr)), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr_path<F>(f: &mut F, node: crate::ExprPath) -> crate::ExprPath +where + F: Fold + ?Sized, +{ + crate::ExprPath { + attrs: f.fold_attributes(node.attrs), + qself: (node.qself).map(|it| f.fold_qself(it)), + path: f.fold_path(node.path), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_range<F>(f: &mut F, node: crate::ExprRange) -> crate::ExprRange +where + F: Fold + ?Sized, +{ + crate::ExprRange { + attrs: f.fold_attributes(node.attrs), + start: (node.start).map(|it| Box::new(f.fold_expr(*it))), + limits: f.fold_range_limits(node.limits), + end: (node.end).map(|it| Box::new(f.fold_expr(*it))), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_raw_addr<F>(f: &mut F, node: crate::ExprRawAddr) -> crate::ExprRawAddr +where + F: Fold + ?Sized, +{ + crate::ExprRawAddr { + attrs: f.fold_attributes(node.attrs), + and_token: node.and_token, + raw: node.raw, + mutability: f.fold_pointer_mutability(node.mutability), + expr: Box::new(f.fold_expr(*node.expr)), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr_reference<F>( + f: &mut F, + node: crate::ExprReference, +) -> crate::ExprReference +where + F: Fold + ?Sized, +{ + crate::ExprReference { + attrs: f.fold_attributes(node.attrs), + and_token: node.and_token, + mutability: node.mutability, + expr: Box::new(f.fold_expr(*node.expr)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_repeat<F>(f: &mut F, node: crate::ExprRepeat) -> crate::ExprRepeat +where + F: Fold + ?Sized, +{ + crate::ExprRepeat { + attrs: f.fold_attributes(node.attrs), + bracket_token: node.bracket_token, + expr: Box::new(f.fold_expr(*node.expr)), + semi_token: node.semi_token, + len: Box::new(f.fold_expr(*node.len)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_return<F>(f: &mut F, node: crate::ExprReturn) -> crate::ExprReturn +where + F: Fold + ?Sized, +{ + crate::ExprReturn { + attrs: f.fold_attributes(node.attrs), + return_token: node.return_token, + expr: (node.expr).map(|it| Box::new(f.fold_expr(*it))), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr_struct<F>(f: &mut F, node: crate::ExprStruct) -> crate::ExprStruct +where + F: Fold + ?Sized, +{ + crate::ExprStruct { + attrs: f.fold_attributes(node.attrs), + qself: (node.qself).map(|it| f.fold_qself(it)), + path: f.fold_path(node.path), + brace_token: node.brace_token, + fields: crate::punctuated::fold(node.fields, f, F::fold_field_value), + dot2_token: node.dot2_token, + rest: (node.rest).map(|it| Box::new(f.fold_expr(*it))), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_try<F>(f: &mut F, node: crate::ExprTry) -> crate::ExprTry +where + F: Fold + ?Sized, +{ + crate::ExprTry { + attrs: f.fold_attributes(node.attrs), + expr: Box::new(f.fold_expr(*node.expr)), + question_token: node.question_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_try_block<F>( + f: &mut F, + node: crate::ExprTryBlock, +) -> crate::ExprTryBlock +where + F: Fold + ?Sized, +{ + crate::ExprTryBlock { + attrs: f.fold_attributes(node.attrs), + try_token: node.try_token, + block: f.fold_block(node.block), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr_tuple<F>(f: &mut F, node: crate::ExprTuple) -> crate::ExprTuple +where + F: Fold + ?Sized, +{ + crate::ExprTuple { + attrs: f.fold_attributes(node.attrs), + paren_token: node.paren_token, + elems: crate::punctuated::fold(node.elems, f, F::fold_expr), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_expr_unary<F>(f: &mut F, node: crate::ExprUnary) -> crate::ExprUnary +where + F: Fold + ?Sized, +{ + crate::ExprUnary { + attrs: f.fold_attributes(node.attrs), + op: f.fold_un_op(node.op), + expr: Box::new(f.fold_expr(*node.expr)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_unsafe<F>(f: &mut F, node: crate::ExprUnsafe) -> crate::ExprUnsafe +where + F: Fold + ?Sized, +{ + crate::ExprUnsafe { + attrs: f.fold_attributes(node.attrs), + unsafe_token: node.unsafe_token, + block: f.fold_block(node.block), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_while<F>(f: &mut F, node: crate::ExprWhile) -> crate::ExprWhile +where + F: Fold + ?Sized, +{ + crate::ExprWhile { + attrs: f.fold_attributes(node.attrs), + label: (node.label).map(|it| f.fold_label(it)), + while_token: node.while_token, + cond: Box::new(f.fold_expr(*node.cond)), + body: f.fold_block(node.body), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_expr_yield<F>(f: &mut F, node: crate::ExprYield) -> crate::ExprYield +where + F: Fold + ?Sized, +{ + crate::ExprYield { + attrs: f.fold_attributes(node.attrs), + yield_token: node.yield_token, + expr: (node.expr).map(|it| Box::new(f.fold_expr(*it))), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_field<F>(f: &mut F, node: crate::Field) -> crate::Field +where + F: Fold + ?Sized, +{ + crate::Field { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + mutability: f.fold_field_mutability(node.mutability), + ident: (node.ident).map(|it| f.fold_ident(it)), + colon_token: node.colon_token, + ty: f.fold_type(node.ty), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_field_mutability<F>( + f: &mut F, + node: crate::FieldMutability, +) -> crate::FieldMutability +where + F: Fold + ?Sized, +{ + match node { + crate::FieldMutability::None => crate::FieldMutability::None, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_field_pat<F>(f: &mut F, node: crate::FieldPat) -> crate::FieldPat +where + F: Fold + ?Sized, +{ + crate::FieldPat { + attrs: f.fold_attributes(node.attrs), + member: f.fold_member(node.member), + colon_token: node.colon_token, + pat: Box::new(f.fold_pat(*node.pat)), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_field_value<F>(f: &mut F, node: crate::FieldValue) -> crate::FieldValue +where + F: Fold + ?Sized, +{ + crate::FieldValue { + attrs: f.fold_attributes(node.attrs), + member: f.fold_member(node.member), + colon_token: node.colon_token, + expr: f.fold_expr(node.expr), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_fields<F>(f: &mut F, node: crate::Fields) -> crate::Fields +where + F: Fold + ?Sized, +{ + match node { + crate::Fields::Named(_binding_0) => { + crate::Fields::Named(f.fold_fields_named(_binding_0)) + } + crate::Fields::Unnamed(_binding_0) => { + crate::Fields::Unnamed(f.fold_fields_unnamed(_binding_0)) + } + crate::Fields::Unit => crate::Fields::Unit, + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_fields_named<F>(f: &mut F, node: crate::FieldsNamed) -> crate::FieldsNamed +where + F: Fold + ?Sized, +{ + crate::FieldsNamed { + brace_token: node.brace_token, + named: crate::punctuated::fold(node.named, f, F::fold_field), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_fields_unnamed<F>( + f: &mut F, + node: crate::FieldsUnnamed, +) -> crate::FieldsUnnamed +where + F: Fold + ?Sized, +{ + crate::FieldsUnnamed { + paren_token: node.paren_token, + unnamed: crate::punctuated::fold(node.unnamed, f, F::fold_field), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_file<F>(f: &mut F, node: crate::File) -> crate::File +where + F: Fold + ?Sized, +{ + crate::File { + shebang: node.shebang, + attrs: f.fold_attributes(node.attrs), + items: fold_vec(node.items, f, F::fold_item), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_fn_arg<F>(f: &mut F, node: crate::FnArg) -> crate::FnArg +where + F: Fold + ?Sized, +{ + match node { + crate::FnArg::Receiver(_binding_0) => { + crate::FnArg::Receiver(f.fold_receiver(_binding_0)) + } + crate::FnArg::Typed(_binding_0) => { + crate::FnArg::Typed(f.fold_pat_type(_binding_0)) + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_foreign_item<F>(f: &mut F, node: crate::ForeignItem) -> crate::ForeignItem +where + F: Fold + ?Sized, +{ + match node { + crate::ForeignItem::Fn(_binding_0) => { + crate::ForeignItem::Fn(f.fold_foreign_item_fn(_binding_0)) + } + crate::ForeignItem::Static(_binding_0) => { + crate::ForeignItem::Static(f.fold_foreign_item_static(_binding_0)) + } + crate::ForeignItem::Type(_binding_0) => { + crate::ForeignItem::Type(f.fold_foreign_item_type(_binding_0)) + } + crate::ForeignItem::Macro(_binding_0) => { + crate::ForeignItem::Macro(f.fold_foreign_item_macro(_binding_0)) + } + crate::ForeignItem::Verbatim(_binding_0) => { + crate::ForeignItem::Verbatim(f.fold_token_stream(_binding_0)) + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_foreign_item_fn<F>( + f: &mut F, + node: crate::ForeignItemFn, +) -> crate::ForeignItemFn +where + F: Fold + ?Sized, +{ + crate::ForeignItemFn { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + sig: f.fold_signature(node.sig), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_foreign_item_macro<F>( + f: &mut F, + node: crate::ForeignItemMacro, +) -> crate::ForeignItemMacro +where + F: Fold + ?Sized, +{ + crate::ForeignItemMacro { + attrs: f.fold_attributes(node.attrs), + mac: f.fold_macro(node.mac), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_foreign_item_static<F>( + f: &mut F, + node: crate::ForeignItemStatic, +) -> crate::ForeignItemStatic +where + F: Fold + ?Sized, +{ + crate::ForeignItemStatic { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + static_token: node.static_token, + mutability: f.fold_static_mutability(node.mutability), + ident: f.fold_ident(node.ident), + colon_token: node.colon_token, + ty: Box::new(f.fold_type(*node.ty)), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_foreign_item_type<F>( + f: &mut F, + node: crate::ForeignItemType, +) -> crate::ForeignItemType +where + F: Fold + ?Sized, +{ + crate::ForeignItemType { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + type_token: node.type_token, + ident: f.fold_ident(node.ident), + generics: f.fold_generics(node.generics), + semi_token: node.semi_token, + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_generic_argument<F>( + f: &mut F, + node: crate::GenericArgument, +) -> crate::GenericArgument +where + F: Fold + ?Sized, +{ + match node { + crate::GenericArgument::Lifetime(_binding_0) => { + crate::GenericArgument::Lifetime(f.fold_lifetime(_binding_0)) + } + crate::GenericArgument::Type(_binding_0) => { + crate::GenericArgument::Type(f.fold_type(_binding_0)) + } + crate::GenericArgument::Const(_binding_0) => { + crate::GenericArgument::Const(f.fold_expr(_binding_0)) + } + crate::GenericArgument::AssocType(_binding_0) => { + crate::GenericArgument::AssocType(f.fold_assoc_type(_binding_0)) + } + crate::GenericArgument::AssocConst(_binding_0) => { + crate::GenericArgument::AssocConst(f.fold_assoc_const(_binding_0)) + } + crate::GenericArgument::Constraint(_binding_0) => { + crate::GenericArgument::Constraint(f.fold_constraint(_binding_0)) + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_generic_param<F>(f: &mut F, node: crate::GenericParam) -> crate::GenericParam +where + F: Fold + ?Sized, +{ + match node { + crate::GenericParam::Lifetime(_binding_0) => { + crate::GenericParam::Lifetime(f.fold_lifetime_param(_binding_0)) + } + crate::GenericParam::Type(_binding_0) => { + crate::GenericParam::Type(f.fold_type_param(_binding_0)) + } + crate::GenericParam::Const(_binding_0) => { + crate::GenericParam::Const(f.fold_const_param(_binding_0)) + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_generics<F>(f: &mut F, node: crate::Generics) -> crate::Generics +where + F: Fold + ?Sized, +{ + crate::Generics { + lt_token: node.lt_token, + params: crate::punctuated::fold(node.params, f, F::fold_generic_param), + gt_token: node.gt_token, + where_clause: (node.where_clause).map(|it| f.fold_where_clause(it)), + } +} +pub fn fold_ident<F>(f: &mut F, node: proc_macro2::Ident) -> proc_macro2::Ident +where + F: Fold + ?Sized, +{ + let mut node = node; + let span = f.fold_span(node.span()); + node.set_span(span); + node +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_impl_item<F>(f: &mut F, node: crate::ImplItem) -> crate::ImplItem +where + F: Fold + ?Sized, +{ + match node { + crate::ImplItem::Const(_binding_0) => { + crate::ImplItem::Const(f.fold_impl_item_const(_binding_0)) + } + crate::ImplItem::Fn(_binding_0) => { + crate::ImplItem::Fn(f.fold_impl_item_fn(_binding_0)) + } + crate::ImplItem::Type(_binding_0) => { + crate::ImplItem::Type(f.fold_impl_item_type(_binding_0)) + } + crate::ImplItem::Macro(_binding_0) => { + crate::ImplItem::Macro(f.fold_impl_item_macro(_binding_0)) + } + crate::ImplItem::Verbatim(_binding_0) => { + crate::ImplItem::Verbatim(f.fold_token_stream(_binding_0)) + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_impl_item_const<F>( + f: &mut F, + node: crate::ImplItemConst, +) -> crate::ImplItemConst +where + F: Fold + ?Sized, +{ + crate::ImplItemConst { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + defaultness: node.defaultness, + const_token: node.const_token, + ident: f.fold_ident(node.ident), + generics: f.fold_generics(node.generics), + colon_token: node.colon_token, + ty: f.fold_type(node.ty), + eq_token: node.eq_token, + expr: f.fold_expr(node.expr), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_impl_item_fn<F>(f: &mut F, node: crate::ImplItemFn) -> crate::ImplItemFn +where + F: Fold + ?Sized, +{ + crate::ImplItemFn { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + defaultness: node.defaultness, + sig: f.fold_signature(node.sig), + block: f.fold_block(node.block), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_impl_item_macro<F>( + f: &mut F, + node: crate::ImplItemMacro, +) -> crate::ImplItemMacro +where + F: Fold + ?Sized, +{ + crate::ImplItemMacro { + attrs: f.fold_attributes(node.attrs), + mac: f.fold_macro(node.mac), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_impl_item_type<F>( + f: &mut F, + node: crate::ImplItemType, +) -> crate::ImplItemType +where + F: Fold + ?Sized, +{ + crate::ImplItemType { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + defaultness: node.defaultness, + type_token: node.type_token, + ident: f.fold_ident(node.ident), + generics: f.fold_generics(node.generics), + eq_token: node.eq_token, + ty: f.fold_type(node.ty), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_impl_restriction<F>( + f: &mut F, + node: crate::ImplRestriction, +) -> crate::ImplRestriction +where + F: Fold + ?Sized, +{ + match node {} +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_index<F>(f: &mut F, node: crate::Index) -> crate::Index +where + F: Fold + ?Sized, +{ + crate::Index { + index: node.index, + span: f.fold_span(node.span), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item<F>(f: &mut F, node: crate::Item) -> crate::Item +where + F: Fold + ?Sized, +{ + match node { + crate::Item::Const(_binding_0) => { + crate::Item::Const(f.fold_item_const(_binding_0)) + } + crate::Item::Enum(_binding_0) => crate::Item::Enum(f.fold_item_enum(_binding_0)), + crate::Item::ExternCrate(_binding_0) => { + crate::Item::ExternCrate(f.fold_item_extern_crate(_binding_0)) + } + crate::Item::Fn(_binding_0) => crate::Item::Fn(f.fold_item_fn(_binding_0)), + crate::Item::ForeignMod(_binding_0) => { + crate::Item::ForeignMod(f.fold_item_foreign_mod(_binding_0)) + } + crate::Item::Impl(_binding_0) => crate::Item::Impl(f.fold_item_impl(_binding_0)), + crate::Item::Macro(_binding_0) => { + crate::Item::Macro(f.fold_item_macro(_binding_0)) + } + crate::Item::Mod(_binding_0) => crate::Item::Mod(f.fold_item_mod(_binding_0)), + crate::Item::Static(_binding_0) => { + crate::Item::Static(f.fold_item_static(_binding_0)) + } + crate::Item::Struct(_binding_0) => { + crate::Item::Struct(f.fold_item_struct(_binding_0)) + } + crate::Item::Trait(_binding_0) => { + crate::Item::Trait(f.fold_item_trait(_binding_0)) + } + crate::Item::TraitAlias(_binding_0) => { + crate::Item::TraitAlias(f.fold_item_trait_alias(_binding_0)) + } + crate::Item::Type(_binding_0) => crate::Item::Type(f.fold_item_type(_binding_0)), + crate::Item::Union(_binding_0) => { + crate::Item::Union(f.fold_item_union(_binding_0)) + } + crate::Item::Use(_binding_0) => crate::Item::Use(f.fold_item_use(_binding_0)), + crate::Item::Verbatim(_binding_0) => { + crate::Item::Verbatim(f.fold_token_stream(_binding_0)) + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item_const<F>(f: &mut F, node: crate::ItemConst) -> crate::ItemConst +where + F: Fold + ?Sized, +{ + crate::ItemConst { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + const_token: node.const_token, + ident: f.fold_ident(node.ident), + generics: f.fold_generics(node.generics), + colon_token: node.colon_token, + ty: Box::new(f.fold_type(*node.ty)), + eq_token: node.eq_token, + expr: Box::new(f.fold_expr(*node.expr)), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item_enum<F>(f: &mut F, node: crate::ItemEnum) -> crate::ItemEnum +where + F: Fold + ?Sized, +{ + crate::ItemEnum { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + enum_token: node.enum_token, + ident: f.fold_ident(node.ident), + generics: f.fold_generics(node.generics), + brace_token: node.brace_token, + variants: crate::punctuated::fold(node.variants, f, F::fold_variant), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item_extern_crate<F>( + f: &mut F, + node: crate::ItemExternCrate, +) -> crate::ItemExternCrate +where + F: Fold + ?Sized, +{ + crate::ItemExternCrate { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + extern_token: node.extern_token, + crate_token: node.crate_token, + ident: f.fold_ident(node.ident), + rename: (node.rename).map(|it| ((it).0, f.fold_ident((it).1))), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item_fn<F>(f: &mut F, node: crate::ItemFn) -> crate::ItemFn +where + F: Fold + ?Sized, +{ + crate::ItemFn { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + sig: f.fold_signature(node.sig), + block: Box::new(f.fold_block(*node.block)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item_foreign_mod<F>( + f: &mut F, + node: crate::ItemForeignMod, +) -> crate::ItemForeignMod +where + F: Fold + ?Sized, +{ + crate::ItemForeignMod { + attrs: f.fold_attributes(node.attrs), + unsafety: node.unsafety, + abi: f.fold_abi(node.abi), + brace_token: node.brace_token, + items: fold_vec(node.items, f, F::fold_foreign_item), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item_impl<F>(f: &mut F, node: crate::ItemImpl) -> crate::ItemImpl +where + F: Fold + ?Sized, +{ + crate::ItemImpl { + attrs: f.fold_attributes(node.attrs), + defaultness: node.defaultness, + unsafety: node.unsafety, + impl_token: node.impl_token, + generics: f.fold_generics(node.generics), + trait_: (node.trait_).map(|it| ((it).0, f.fold_path((it).1), (it).2)), + self_ty: Box::new(f.fold_type(*node.self_ty)), + brace_token: node.brace_token, + items: fold_vec(node.items, f, F::fold_impl_item), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item_macro<F>(f: &mut F, node: crate::ItemMacro) -> crate::ItemMacro +where + F: Fold + ?Sized, +{ + crate::ItemMacro { + attrs: f.fold_attributes(node.attrs), + ident: (node.ident).map(|it| f.fold_ident(it)), + mac: f.fold_macro(node.mac), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item_mod<F>(f: &mut F, node: crate::ItemMod) -> crate::ItemMod +where + F: Fold + ?Sized, +{ + crate::ItemMod { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + unsafety: node.unsafety, + mod_token: node.mod_token, + ident: f.fold_ident(node.ident), + content: (node.content).map(|it| ((it).0, fold_vec((it).1, f, F::fold_item))), + semi: node.semi, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item_static<F>(f: &mut F, node: crate::ItemStatic) -> crate::ItemStatic +where + F: Fold + ?Sized, +{ + crate::ItemStatic { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + static_token: node.static_token, + mutability: f.fold_static_mutability(node.mutability), + ident: f.fold_ident(node.ident), + colon_token: node.colon_token, + ty: Box::new(f.fold_type(*node.ty)), + eq_token: node.eq_token, + expr: Box::new(f.fold_expr(*node.expr)), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item_struct<F>(f: &mut F, node: crate::ItemStruct) -> crate::ItemStruct +where + F: Fold + ?Sized, +{ + crate::ItemStruct { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + struct_token: node.struct_token, + ident: f.fold_ident(node.ident), + generics: f.fold_generics(node.generics), + fields: f.fold_fields(node.fields), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item_trait<F>(f: &mut F, node: crate::ItemTrait) -> crate::ItemTrait +where + F: Fold + ?Sized, +{ + crate::ItemTrait { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + unsafety: node.unsafety, + auto_token: node.auto_token, + restriction: (node.restriction).map(|it| f.fold_impl_restriction(it)), + trait_token: node.trait_token, + ident: f.fold_ident(node.ident), + generics: f.fold_generics(node.generics), + colon_token: node.colon_token, + supertraits: crate::punctuated::fold( + node.supertraits, + f, + F::fold_type_param_bound, + ), + brace_token: node.brace_token, + items: fold_vec(node.items, f, F::fold_trait_item), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item_trait_alias<F>( + f: &mut F, + node: crate::ItemTraitAlias, +) -> crate::ItemTraitAlias +where + F: Fold + ?Sized, +{ + crate::ItemTraitAlias { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + trait_token: node.trait_token, + ident: f.fold_ident(node.ident), + generics: f.fold_generics(node.generics), + eq_token: node.eq_token, + bounds: crate::punctuated::fold(node.bounds, f, F::fold_type_param_bound), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item_type<F>(f: &mut F, node: crate::ItemType) -> crate::ItemType +where + F: Fold + ?Sized, +{ + crate::ItemType { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + type_token: node.type_token, + ident: f.fold_ident(node.ident), + generics: f.fold_generics(node.generics), + eq_token: node.eq_token, + ty: Box::new(f.fold_type(*node.ty)), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item_union<F>(f: &mut F, node: crate::ItemUnion) -> crate::ItemUnion +where + F: Fold + ?Sized, +{ + crate::ItemUnion { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + union_token: node.union_token, + ident: f.fold_ident(node.ident), + generics: f.fold_generics(node.generics), + fields: f.fold_fields_named(node.fields), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_item_use<F>(f: &mut F, node: crate::ItemUse) -> crate::ItemUse +where + F: Fold + ?Sized, +{ + crate::ItemUse { + attrs: f.fold_attributes(node.attrs), + vis: f.fold_visibility(node.vis), + use_token: node.use_token, + leading_colon: node.leading_colon, + tree: f.fold_use_tree(node.tree), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_label<F>(f: &mut F, node: crate::Label) -> crate::Label +where + F: Fold + ?Sized, +{ + crate::Label { + name: f.fold_lifetime(node.name), + colon_token: node.colon_token, + } +} +pub fn fold_lifetime<F>(f: &mut F, node: crate::Lifetime) -> crate::Lifetime +where + F: Fold + ?Sized, +{ + crate::Lifetime { + apostrophe: f.fold_span(node.apostrophe), + ident: f.fold_ident(node.ident), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_lifetime_param<F>( + f: &mut F, + node: crate::LifetimeParam, +) -> crate::LifetimeParam +where + F: Fold + ?Sized, +{ + crate::LifetimeParam { + attrs: f.fold_attributes(node.attrs), + lifetime: f.fold_lifetime(node.lifetime), + colon_token: node.colon_token, + bounds: crate::punctuated::fold(node.bounds, f, F::fold_lifetime), + } +} +pub fn fold_lit<F>(f: &mut F, node: crate::Lit) -> crate::Lit +where + F: Fold + ?Sized, +{ + match node { + crate::Lit::Str(_binding_0) => crate::Lit::Str(f.fold_lit_str(_binding_0)), + crate::Lit::ByteStr(_binding_0) => { + crate::Lit::ByteStr(f.fold_lit_byte_str(_binding_0)) + } + crate::Lit::CStr(_binding_0) => crate::Lit::CStr(f.fold_lit_cstr(_binding_0)), + crate::Lit::Byte(_binding_0) => crate::Lit::Byte(f.fold_lit_byte(_binding_0)), + crate::Lit::Char(_binding_0) => crate::Lit::Char(f.fold_lit_char(_binding_0)), + crate::Lit::Int(_binding_0) => crate::Lit::Int(f.fold_lit_int(_binding_0)), + crate::Lit::Float(_binding_0) => crate::Lit::Float(f.fold_lit_float(_binding_0)), + crate::Lit::Bool(_binding_0) => crate::Lit::Bool(f.fold_lit_bool(_binding_0)), + crate::Lit::Verbatim(_binding_0) => crate::Lit::Verbatim(_binding_0), + } +} +pub fn fold_lit_bool<F>(f: &mut F, node: crate::LitBool) -> crate::LitBool +where + F: Fold + ?Sized, +{ + crate::LitBool { + value: node.value, + span: f.fold_span(node.span), + } +} +pub fn fold_lit_byte<F>(f: &mut F, node: crate::LitByte) -> crate::LitByte +where + F: Fold + ?Sized, +{ + let span = f.fold_span(node.span()); + let mut node = node; + node.set_span(span); + node +} +pub fn fold_lit_byte_str<F>(f: &mut F, node: crate::LitByteStr) -> crate::LitByteStr +where + F: Fold + ?Sized, +{ + let span = f.fold_span(node.span()); + let mut node = node; + node.set_span(span); + node +} +pub fn fold_lit_cstr<F>(f: &mut F, node: crate::LitCStr) -> crate::LitCStr +where + F: Fold + ?Sized, +{ + let span = f.fold_span(node.span()); + let mut node = node; + node.set_span(span); + node +} +pub fn fold_lit_char<F>(f: &mut F, node: crate::LitChar) -> crate::LitChar +where + F: Fold + ?Sized, +{ + let span = f.fold_span(node.span()); + let mut node = node; + node.set_span(span); + node +} +pub fn fold_lit_float<F>(f: &mut F, node: crate::LitFloat) -> crate::LitFloat +where + F: Fold + ?Sized, +{ + let span = f.fold_span(node.span()); + let mut node = node; + node.set_span(span); + node +} +pub fn fold_lit_int<F>(f: &mut F, node: crate::LitInt) -> crate::LitInt +where + F: Fold + ?Sized, +{ + let span = f.fold_span(node.span()); + let mut node = node; + node.set_span(span); + node +} +pub fn fold_lit_str<F>(f: &mut F, node: crate::LitStr) -> crate::LitStr +where + F: Fold + ?Sized, +{ + let span = f.fold_span(node.span()); + let mut node = node; + node.set_span(span); + node +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_local<F>(f: &mut F, node: crate::Local) -> crate::Local +where + F: Fold + ?Sized, +{ + crate::Local { + attrs: f.fold_attributes(node.attrs), + let_token: node.let_token, + pat: f.fold_pat(node.pat), + init: (node.init).map(|it| f.fold_local_init(it)), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_local_init<F>(f: &mut F, node: crate::LocalInit) -> crate::LocalInit +where + F: Fold + ?Sized, +{ + crate::LocalInit { + eq_token: node.eq_token, + expr: Box::new(f.fold_expr(*node.expr)), + diverge: (node.diverge).map(|it| ((it).0, Box::new(f.fold_expr(*(it).1)))), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_macro<F>(f: &mut F, node: crate::Macro) -> crate::Macro +where + F: Fold + ?Sized, +{ + crate::Macro { + path: f.fold_path(node.path), + bang_token: node.bang_token, + delimiter: f.fold_macro_delimiter(node.delimiter), + tokens: f.fold_token_stream(node.tokens), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_macro_delimiter<F>( + f: &mut F, + node: crate::MacroDelimiter, +) -> crate::MacroDelimiter +where + F: Fold + ?Sized, +{ + match node { + crate::MacroDelimiter::Paren(_binding_0) => { + crate::MacroDelimiter::Paren(_binding_0) + } + crate::MacroDelimiter::Brace(_binding_0) => { + crate::MacroDelimiter::Brace(_binding_0) + } + crate::MacroDelimiter::Bracket(_binding_0) => { + crate::MacroDelimiter::Bracket(_binding_0) + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_member<F>(f: &mut F, node: crate::Member) -> crate::Member +where + F: Fold + ?Sized, +{ + match node { + crate::Member::Named(_binding_0) => { + crate::Member::Named(f.fold_ident(_binding_0)) + } + crate::Member::Unnamed(_binding_0) => { + crate::Member::Unnamed(f.fold_index(_binding_0)) + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_meta<F>(f: &mut F, node: crate::Meta) -> crate::Meta +where + F: Fold + ?Sized, +{ + match node { + crate::Meta::Path(_binding_0) => crate::Meta::Path(f.fold_path(_binding_0)), + crate::Meta::List(_binding_0) => crate::Meta::List(f.fold_meta_list(_binding_0)), + crate::Meta::NameValue(_binding_0) => { + crate::Meta::NameValue(f.fold_meta_name_value(_binding_0)) + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_meta_list<F>(f: &mut F, node: crate::MetaList) -> crate::MetaList +where + F: Fold + ?Sized, +{ + crate::MetaList { + path: f.fold_path(node.path), + delimiter: f.fold_macro_delimiter(node.delimiter), + tokens: f.fold_token_stream(node.tokens), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_meta_name_value<F>( + f: &mut F, + node: crate::MetaNameValue, +) -> crate::MetaNameValue +where + F: Fold + ?Sized, +{ + crate::MetaNameValue { + path: f.fold_path(node.path), + eq_token: node.eq_token, + value: f.fold_expr(node.value), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_parenthesized_generic_arguments<F>( + f: &mut F, + node: crate::ParenthesizedGenericArguments, +) -> crate::ParenthesizedGenericArguments +where + F: Fold + ?Sized, +{ + crate::ParenthesizedGenericArguments { + paren_token: node.paren_token, + inputs: crate::punctuated::fold(node.inputs, f, F::fold_type), + output: f.fold_return_type(node.output), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_pat<F>(f: &mut F, node: crate::Pat) -> crate::Pat +where + F: Fold + ?Sized, +{ + match node { + crate::Pat::Const(_binding_0) => crate::Pat::Const(f.fold_expr_const(_binding_0)), + crate::Pat::Ident(_binding_0) => crate::Pat::Ident(f.fold_pat_ident(_binding_0)), + crate::Pat::Lit(_binding_0) => crate::Pat::Lit(f.fold_expr_lit(_binding_0)), + crate::Pat::Macro(_binding_0) => crate::Pat::Macro(f.fold_expr_macro(_binding_0)), + crate::Pat::Or(_binding_0) => crate::Pat::Or(f.fold_pat_or(_binding_0)), + crate::Pat::Paren(_binding_0) => crate::Pat::Paren(f.fold_pat_paren(_binding_0)), + crate::Pat::Path(_binding_0) => crate::Pat::Path(f.fold_expr_path(_binding_0)), + crate::Pat::Range(_binding_0) => crate::Pat::Range(f.fold_expr_range(_binding_0)), + crate::Pat::Reference(_binding_0) => { + crate::Pat::Reference(f.fold_pat_reference(_binding_0)) + } + crate::Pat::Rest(_binding_0) => crate::Pat::Rest(f.fold_pat_rest(_binding_0)), + crate::Pat::Slice(_binding_0) => crate::Pat::Slice(f.fold_pat_slice(_binding_0)), + crate::Pat::Struct(_binding_0) => { + crate::Pat::Struct(f.fold_pat_struct(_binding_0)) + } + crate::Pat::Tuple(_binding_0) => crate::Pat::Tuple(f.fold_pat_tuple(_binding_0)), + crate::Pat::TupleStruct(_binding_0) => { + crate::Pat::TupleStruct(f.fold_pat_tuple_struct(_binding_0)) + } + crate::Pat::Type(_binding_0) => crate::Pat::Type(f.fold_pat_type(_binding_0)), + crate::Pat::Verbatim(_binding_0) => { + crate::Pat::Verbatim(f.fold_token_stream(_binding_0)) + } + crate::Pat::Wild(_binding_0) => crate::Pat::Wild(f.fold_pat_wild(_binding_0)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_pat_ident<F>(f: &mut F, node: crate::PatIdent) -> crate::PatIdent +where + F: Fold + ?Sized, +{ + crate::PatIdent { + attrs: f.fold_attributes(node.attrs), + by_ref: node.by_ref, + mutability: node.mutability, + ident: f.fold_ident(node.ident), + subpat: (node.subpat).map(|it| ((it).0, Box::new(f.fold_pat(*(it).1)))), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_pat_or<F>(f: &mut F, node: crate::PatOr) -> crate::PatOr +where + F: Fold + ?Sized, +{ + crate::PatOr { + attrs: f.fold_attributes(node.attrs), + leading_vert: node.leading_vert, + cases: crate::punctuated::fold(node.cases, f, F::fold_pat), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_pat_paren<F>(f: &mut F, node: crate::PatParen) -> crate::PatParen +where + F: Fold + ?Sized, +{ + crate::PatParen { + attrs: f.fold_attributes(node.attrs), + paren_token: node.paren_token, + pat: Box::new(f.fold_pat(*node.pat)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_pat_reference<F>(f: &mut F, node: crate::PatReference) -> crate::PatReference +where + F: Fold + ?Sized, +{ + crate::PatReference { + attrs: f.fold_attributes(node.attrs), + and_token: node.and_token, + mutability: node.mutability, + pat: Box::new(f.fold_pat(*node.pat)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_pat_rest<F>(f: &mut F, node: crate::PatRest) -> crate::PatRest +where + F: Fold + ?Sized, +{ + crate::PatRest { + attrs: f.fold_attributes(node.attrs), + dot2_token: node.dot2_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_pat_slice<F>(f: &mut F, node: crate::PatSlice) -> crate::PatSlice +where + F: Fold + ?Sized, +{ + crate::PatSlice { + attrs: f.fold_attributes(node.attrs), + bracket_token: node.bracket_token, + elems: crate::punctuated::fold(node.elems, f, F::fold_pat), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_pat_struct<F>(f: &mut F, node: crate::PatStruct) -> crate::PatStruct +where + F: Fold + ?Sized, +{ + crate::PatStruct { + attrs: f.fold_attributes(node.attrs), + qself: (node.qself).map(|it| f.fold_qself(it)), + path: f.fold_path(node.path), + brace_token: node.brace_token, + fields: crate::punctuated::fold(node.fields, f, F::fold_field_pat), + rest: (node.rest).map(|it| f.fold_pat_rest(it)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_pat_tuple<F>(f: &mut F, node: crate::PatTuple) -> crate::PatTuple +where + F: Fold + ?Sized, +{ + crate::PatTuple { + attrs: f.fold_attributes(node.attrs), + paren_token: node.paren_token, + elems: crate::punctuated::fold(node.elems, f, F::fold_pat), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_pat_tuple_struct<F>( + f: &mut F, + node: crate::PatTupleStruct, +) -> crate::PatTupleStruct +where + F: Fold + ?Sized, +{ + crate::PatTupleStruct { + attrs: f.fold_attributes(node.attrs), + qself: (node.qself).map(|it| f.fold_qself(it)), + path: f.fold_path(node.path), + paren_token: node.paren_token, + elems: crate::punctuated::fold(node.elems, f, F::fold_pat), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_pat_type<F>(f: &mut F, node: crate::PatType) -> crate::PatType +where + F: Fold + ?Sized, +{ + crate::PatType { + attrs: f.fold_attributes(node.attrs), + pat: Box::new(f.fold_pat(*node.pat)), + colon_token: node.colon_token, + ty: Box::new(f.fold_type(*node.ty)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_pat_wild<F>(f: &mut F, node: crate::PatWild) -> crate::PatWild +where + F: Fold + ?Sized, +{ + crate::PatWild { + attrs: f.fold_attributes(node.attrs), + underscore_token: node.underscore_token, + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_path<F>(f: &mut F, node: crate::Path) -> crate::Path +where + F: Fold + ?Sized, +{ + crate::Path { + leading_colon: node.leading_colon, + segments: crate::punctuated::fold(node.segments, f, F::fold_path_segment), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_path_arguments<F>( + f: &mut F, + node: crate::PathArguments, +) -> crate::PathArguments +where + F: Fold + ?Sized, +{ + match node { + crate::PathArguments::None => crate::PathArguments::None, + crate::PathArguments::AngleBracketed(_binding_0) => { + crate::PathArguments::AngleBracketed( + f.fold_angle_bracketed_generic_arguments(_binding_0), + ) + } + crate::PathArguments::Parenthesized(_binding_0) => { + crate::PathArguments::Parenthesized( + f.fold_parenthesized_generic_arguments(_binding_0), + ) + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_path_segment<F>(f: &mut F, node: crate::PathSegment) -> crate::PathSegment +where + F: Fold + ?Sized, +{ + crate::PathSegment { + ident: f.fold_ident(node.ident), + arguments: f.fold_path_arguments(node.arguments), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_pointer_mutability<F>( + f: &mut F, + node: crate::PointerMutability, +) -> crate::PointerMutability +where + F: Fold + ?Sized, +{ + match node { + crate::PointerMutability::Const(_binding_0) => { + crate::PointerMutability::Const(_binding_0) + } + crate::PointerMutability::Mut(_binding_0) => { + crate::PointerMutability::Mut(_binding_0) + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_precise_capture<F>( + f: &mut F, + node: crate::PreciseCapture, +) -> crate::PreciseCapture +where + F: Fold + ?Sized, +{ + crate::PreciseCapture { + use_token: node.use_token, + lt_token: node.lt_token, + params: crate::punctuated::fold(node.params, f, F::fold_captured_param), + gt_token: node.gt_token, + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_predicate_lifetime<F>( + f: &mut F, + node: crate::PredicateLifetime, +) -> crate::PredicateLifetime +where + F: Fold + ?Sized, +{ + crate::PredicateLifetime { + lifetime: f.fold_lifetime(node.lifetime), + colon_token: node.colon_token, + bounds: crate::punctuated::fold(node.bounds, f, F::fold_lifetime), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_predicate_type<F>( + f: &mut F, + node: crate::PredicateType, +) -> crate::PredicateType +where + F: Fold + ?Sized, +{ + crate::PredicateType { + lifetimes: (node.lifetimes).map(|it| f.fold_bound_lifetimes(it)), + bounded_ty: f.fold_type(node.bounded_ty), + colon_token: node.colon_token, + bounds: crate::punctuated::fold(node.bounds, f, F::fold_type_param_bound), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_qself<F>(f: &mut F, node: crate::QSelf) -> crate::QSelf +where + F: Fold + ?Sized, +{ + crate::QSelf { + lt_token: node.lt_token, + ty: Box::new(f.fold_type(*node.ty)), + position: node.position, + as_token: node.as_token, + gt_token: node.gt_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_range_limits<F>(f: &mut F, node: crate::RangeLimits) -> crate::RangeLimits +where + F: Fold + ?Sized, +{ + match node { + crate::RangeLimits::HalfOpen(_binding_0) => { + crate::RangeLimits::HalfOpen(_binding_0) + } + crate::RangeLimits::Closed(_binding_0) => crate::RangeLimits::Closed(_binding_0), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_receiver<F>(f: &mut F, node: crate::Receiver) -> crate::Receiver +where + F: Fold + ?Sized, +{ + crate::Receiver { + attrs: f.fold_attributes(node.attrs), + reference: (node.reference) + .map(|it| ((it).0, ((it).1).map(|it| f.fold_lifetime(it)))), + mutability: node.mutability, + self_token: node.self_token, + colon_token: node.colon_token, + ty: Box::new(f.fold_type(*node.ty)), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_return_type<F>(f: &mut F, node: crate::ReturnType) -> crate::ReturnType +where + F: Fold + ?Sized, +{ + match node { + crate::ReturnType::Default => crate::ReturnType::Default, + crate::ReturnType::Type(_binding_0, _binding_1) => { + crate::ReturnType::Type(_binding_0, Box::new(f.fold_type(*_binding_1))) + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_signature<F>(f: &mut F, node: crate::Signature) -> crate::Signature +where + F: Fold + ?Sized, +{ + crate::Signature { + constness: node.constness, + asyncness: node.asyncness, + unsafety: node.unsafety, + abi: (node.abi).map(|it| f.fold_abi(it)), + fn_token: node.fn_token, + ident: f.fold_ident(node.ident), + generics: f.fold_generics(node.generics), + paren_token: node.paren_token, + inputs: crate::punctuated::fold(node.inputs, f, F::fold_fn_arg), + variadic: (node.variadic).map(|it| f.fold_variadic(it)), + output: f.fold_return_type(node.output), + } +} +pub fn fold_span<F>(f: &mut F, node: proc_macro2::Span) -> proc_macro2::Span +where + F: Fold + ?Sized, +{ + node +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_static_mutability<F>( + f: &mut F, + node: crate::StaticMutability, +) -> crate::StaticMutability +where + F: Fold + ?Sized, +{ + match node { + crate::StaticMutability::Mut(_binding_0) => { + crate::StaticMutability::Mut(_binding_0) + } + crate::StaticMutability::None => crate::StaticMutability::None, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_stmt<F>(f: &mut F, node: crate::Stmt) -> crate::Stmt +where + F: Fold + ?Sized, +{ + match node { + crate::Stmt::Local(_binding_0) => crate::Stmt::Local(f.fold_local(_binding_0)), + crate::Stmt::Item(_binding_0) => crate::Stmt::Item(f.fold_item(_binding_0)), + crate::Stmt::Expr(_binding_0, _binding_1) => { + crate::Stmt::Expr(f.fold_expr(_binding_0), _binding_1) + } + crate::Stmt::Macro(_binding_0) => { + crate::Stmt::Macro(f.fold_stmt_macro(_binding_0)) + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_stmt_macro<F>(f: &mut F, node: crate::StmtMacro) -> crate::StmtMacro +where + F: Fold + ?Sized, +{ + crate::StmtMacro { + attrs: f.fold_attributes(node.attrs), + mac: f.fold_macro(node.mac), + semi_token: node.semi_token, + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_trait_bound<F>(f: &mut F, node: crate::TraitBound) -> crate::TraitBound +where + F: Fold + ?Sized, +{ + crate::TraitBound { + paren_token: node.paren_token, + modifier: f.fold_trait_bound_modifier(node.modifier), + lifetimes: (node.lifetimes).map(|it| f.fold_bound_lifetimes(it)), + path: f.fold_path(node.path), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_trait_bound_modifier<F>( + f: &mut F, + node: crate::TraitBoundModifier, +) -> crate::TraitBoundModifier +where + F: Fold + ?Sized, +{ + match node { + crate::TraitBoundModifier::None => crate::TraitBoundModifier::None, + crate::TraitBoundModifier::Maybe(_binding_0) => { + crate::TraitBoundModifier::Maybe(_binding_0) + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_trait_item<F>(f: &mut F, node: crate::TraitItem) -> crate::TraitItem +where + F: Fold + ?Sized, +{ + match node { + crate::TraitItem::Const(_binding_0) => { + crate::TraitItem::Const(f.fold_trait_item_const(_binding_0)) + } + crate::TraitItem::Fn(_binding_0) => { + crate::TraitItem::Fn(f.fold_trait_item_fn(_binding_0)) + } + crate::TraitItem::Type(_binding_0) => { + crate::TraitItem::Type(f.fold_trait_item_type(_binding_0)) + } + crate::TraitItem::Macro(_binding_0) => { + crate::TraitItem::Macro(f.fold_trait_item_macro(_binding_0)) + } + crate::TraitItem::Verbatim(_binding_0) => { + crate::TraitItem::Verbatim(f.fold_token_stream(_binding_0)) + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_trait_item_const<F>( + f: &mut F, + node: crate::TraitItemConst, +) -> crate::TraitItemConst +where + F: Fold + ?Sized, +{ + crate::TraitItemConst { + attrs: f.fold_attributes(node.attrs), + const_token: node.const_token, + ident: f.fold_ident(node.ident), + generics: f.fold_generics(node.generics), + colon_token: node.colon_token, + ty: f.fold_type(node.ty), + default: (node.default).map(|it| ((it).0, f.fold_expr((it).1))), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_trait_item_fn<F>(f: &mut F, node: crate::TraitItemFn) -> crate::TraitItemFn +where + F: Fold + ?Sized, +{ + crate::TraitItemFn { + attrs: f.fold_attributes(node.attrs), + sig: f.fold_signature(node.sig), + default: (node.default).map(|it| f.fold_block(it)), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_trait_item_macro<F>( + f: &mut F, + node: crate::TraitItemMacro, +) -> crate::TraitItemMacro +where + F: Fold + ?Sized, +{ + crate::TraitItemMacro { + attrs: f.fold_attributes(node.attrs), + mac: f.fold_macro(node.mac), + semi_token: node.semi_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_trait_item_type<F>( + f: &mut F, + node: crate::TraitItemType, +) -> crate::TraitItemType +where + F: Fold + ?Sized, +{ + crate::TraitItemType { + attrs: f.fold_attributes(node.attrs), + type_token: node.type_token, + ident: f.fold_ident(node.ident), + generics: f.fold_generics(node.generics), + colon_token: node.colon_token, + bounds: crate::punctuated::fold(node.bounds, f, F::fold_type_param_bound), + default: (node.default).map(|it| ((it).0, f.fold_type((it).1))), + semi_token: node.semi_token, + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type<F>(f: &mut F, node: crate::Type) -> crate::Type +where + F: Fold + ?Sized, +{ + match node { + crate::Type::Array(_binding_0) => { + crate::Type::Array(f.fold_type_array(_binding_0)) + } + crate::Type::BareFn(_binding_0) => { + crate::Type::BareFn(f.fold_type_bare_fn(_binding_0)) + } + crate::Type::Group(_binding_0) => { + crate::Type::Group(f.fold_type_group(_binding_0)) + } + crate::Type::ImplTrait(_binding_0) => { + crate::Type::ImplTrait(f.fold_type_impl_trait(_binding_0)) + } + crate::Type::Infer(_binding_0) => { + crate::Type::Infer(f.fold_type_infer(_binding_0)) + } + crate::Type::Macro(_binding_0) => { + crate::Type::Macro(f.fold_type_macro(_binding_0)) + } + crate::Type::Never(_binding_0) => { + crate::Type::Never(f.fold_type_never(_binding_0)) + } + crate::Type::Paren(_binding_0) => { + crate::Type::Paren(f.fold_type_paren(_binding_0)) + } + crate::Type::Path(_binding_0) => crate::Type::Path(f.fold_type_path(_binding_0)), + crate::Type::Ptr(_binding_0) => crate::Type::Ptr(f.fold_type_ptr(_binding_0)), + crate::Type::Reference(_binding_0) => { + crate::Type::Reference(f.fold_type_reference(_binding_0)) + } + crate::Type::Slice(_binding_0) => { + crate::Type::Slice(f.fold_type_slice(_binding_0)) + } + crate::Type::TraitObject(_binding_0) => { + crate::Type::TraitObject(f.fold_type_trait_object(_binding_0)) + } + crate::Type::Tuple(_binding_0) => { + crate::Type::Tuple(f.fold_type_tuple(_binding_0)) + } + crate::Type::Verbatim(_binding_0) => { + crate::Type::Verbatim(f.fold_token_stream(_binding_0)) + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_array<F>(f: &mut F, node: crate::TypeArray) -> crate::TypeArray +where + F: Fold + ?Sized, +{ + crate::TypeArray { + bracket_token: node.bracket_token, + elem: Box::new(f.fold_type(*node.elem)), + semi_token: node.semi_token, + len: f.fold_expr(node.len), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_bare_fn<F>(f: &mut F, node: crate::TypeBareFn) -> crate::TypeBareFn +where + F: Fold + ?Sized, +{ + crate::TypeBareFn { + lifetimes: (node.lifetimes).map(|it| f.fold_bound_lifetimes(it)), + unsafety: node.unsafety, + abi: (node.abi).map(|it| f.fold_abi(it)), + fn_token: node.fn_token, + paren_token: node.paren_token, + inputs: crate::punctuated::fold(node.inputs, f, F::fold_bare_fn_arg), + variadic: (node.variadic).map(|it| f.fold_bare_variadic(it)), + output: f.fold_return_type(node.output), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_group<F>(f: &mut F, node: crate::TypeGroup) -> crate::TypeGroup +where + F: Fold + ?Sized, +{ + crate::TypeGroup { + group_token: node.group_token, + elem: Box::new(f.fold_type(*node.elem)), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_impl_trait<F>( + f: &mut F, + node: crate::TypeImplTrait, +) -> crate::TypeImplTrait +where + F: Fold + ?Sized, +{ + crate::TypeImplTrait { + impl_token: node.impl_token, + bounds: crate::punctuated::fold(node.bounds, f, F::fold_type_param_bound), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_infer<F>(f: &mut F, node: crate::TypeInfer) -> crate::TypeInfer +where + F: Fold + ?Sized, +{ + crate::TypeInfer { + underscore_token: node.underscore_token, + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_macro<F>(f: &mut F, node: crate::TypeMacro) -> crate::TypeMacro +where + F: Fold + ?Sized, +{ + crate::TypeMacro { + mac: f.fold_macro(node.mac), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_never<F>(f: &mut F, node: crate::TypeNever) -> crate::TypeNever +where + F: Fold + ?Sized, +{ + crate::TypeNever { + bang_token: node.bang_token, + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_param<F>(f: &mut F, node: crate::TypeParam) -> crate::TypeParam +where + F: Fold + ?Sized, +{ + crate::TypeParam { + attrs: f.fold_attributes(node.attrs), + ident: f.fold_ident(node.ident), + colon_token: node.colon_token, + bounds: crate::punctuated::fold(node.bounds, f, F::fold_type_param_bound), + eq_token: node.eq_token, + default: (node.default).map(|it| f.fold_type(it)), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_param_bound<F>( + f: &mut F, + node: crate::TypeParamBound, +) -> crate::TypeParamBound +where + F: Fold + ?Sized, +{ + match node { + crate::TypeParamBound::Trait(_binding_0) => { + crate::TypeParamBound::Trait(f.fold_trait_bound(_binding_0)) + } + crate::TypeParamBound::Lifetime(_binding_0) => { + crate::TypeParamBound::Lifetime(f.fold_lifetime(_binding_0)) + } + crate::TypeParamBound::PreciseCapture(_binding_0) => { + crate::TypeParamBound::PreciseCapture( + full!(f.fold_precise_capture(_binding_0)), + ) + } + crate::TypeParamBound::Verbatim(_binding_0) => { + crate::TypeParamBound::Verbatim(f.fold_token_stream(_binding_0)) + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_paren<F>(f: &mut F, node: crate::TypeParen) -> crate::TypeParen +where + F: Fold + ?Sized, +{ + crate::TypeParen { + paren_token: node.paren_token, + elem: Box::new(f.fold_type(*node.elem)), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_path<F>(f: &mut F, node: crate::TypePath) -> crate::TypePath +where + F: Fold + ?Sized, +{ + crate::TypePath { + qself: (node.qself).map(|it| f.fold_qself(it)), + path: f.fold_path(node.path), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_ptr<F>(f: &mut F, node: crate::TypePtr) -> crate::TypePtr +where + F: Fold + ?Sized, +{ + crate::TypePtr { + star_token: node.star_token, + const_token: node.const_token, + mutability: node.mutability, + elem: Box::new(f.fold_type(*node.elem)), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_reference<F>( + f: &mut F, + node: crate::TypeReference, +) -> crate::TypeReference +where + F: Fold + ?Sized, +{ + crate::TypeReference { + and_token: node.and_token, + lifetime: (node.lifetime).map(|it| f.fold_lifetime(it)), + mutability: node.mutability, + elem: Box::new(f.fold_type(*node.elem)), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_slice<F>(f: &mut F, node: crate::TypeSlice) -> crate::TypeSlice +where + F: Fold + ?Sized, +{ + crate::TypeSlice { + bracket_token: node.bracket_token, + elem: Box::new(f.fold_type(*node.elem)), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_trait_object<F>( + f: &mut F, + node: crate::TypeTraitObject, +) -> crate::TypeTraitObject +where + F: Fold + ?Sized, +{ + crate::TypeTraitObject { + dyn_token: node.dyn_token, + bounds: crate::punctuated::fold(node.bounds, f, F::fold_type_param_bound), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_type_tuple<F>(f: &mut F, node: crate::TypeTuple) -> crate::TypeTuple +where + F: Fold + ?Sized, +{ + crate::TypeTuple { + paren_token: node.paren_token, + elems: crate::punctuated::fold(node.elems, f, F::fold_type), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_un_op<F>(f: &mut F, node: crate::UnOp) -> crate::UnOp +where + F: Fold + ?Sized, +{ + match node { + crate::UnOp::Deref(_binding_0) => crate::UnOp::Deref(_binding_0), + crate::UnOp::Not(_binding_0) => crate::UnOp::Not(_binding_0), + crate::UnOp::Neg(_binding_0) => crate::UnOp::Neg(_binding_0), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_use_glob<F>(f: &mut F, node: crate::UseGlob) -> crate::UseGlob +where + F: Fold + ?Sized, +{ + crate::UseGlob { + star_token: node.star_token, + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_use_group<F>(f: &mut F, node: crate::UseGroup) -> crate::UseGroup +where + F: Fold + ?Sized, +{ + crate::UseGroup { + brace_token: node.brace_token, + items: crate::punctuated::fold(node.items, f, F::fold_use_tree), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_use_name<F>(f: &mut F, node: crate::UseName) -> crate::UseName +where + F: Fold + ?Sized, +{ + crate::UseName { + ident: f.fold_ident(node.ident), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_use_path<F>(f: &mut F, node: crate::UsePath) -> crate::UsePath +where + F: Fold + ?Sized, +{ + crate::UsePath { + ident: f.fold_ident(node.ident), + colon2_token: node.colon2_token, + tree: Box::new(f.fold_use_tree(*node.tree)), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_use_rename<F>(f: &mut F, node: crate::UseRename) -> crate::UseRename +where + F: Fold + ?Sized, +{ + crate::UseRename { + ident: f.fold_ident(node.ident), + as_token: node.as_token, + rename: f.fold_ident(node.rename), + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_use_tree<F>(f: &mut F, node: crate::UseTree) -> crate::UseTree +where + F: Fold + ?Sized, +{ + match node { + crate::UseTree::Path(_binding_0) => { + crate::UseTree::Path(f.fold_use_path(_binding_0)) + } + crate::UseTree::Name(_binding_0) => { + crate::UseTree::Name(f.fold_use_name(_binding_0)) + } + crate::UseTree::Rename(_binding_0) => { + crate::UseTree::Rename(f.fold_use_rename(_binding_0)) + } + crate::UseTree::Glob(_binding_0) => { + crate::UseTree::Glob(f.fold_use_glob(_binding_0)) + } + crate::UseTree::Group(_binding_0) => { + crate::UseTree::Group(f.fold_use_group(_binding_0)) + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn fold_variadic<F>(f: &mut F, node: crate::Variadic) -> crate::Variadic +where + F: Fold + ?Sized, +{ + crate::Variadic { + attrs: f.fold_attributes(node.attrs), + pat: (node.pat).map(|it| (Box::new(f.fold_pat(*(it).0)), (it).1)), + dots: node.dots, + comma: node.comma, + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_variant<F>(f: &mut F, node: crate::Variant) -> crate::Variant +where + F: Fold + ?Sized, +{ + crate::Variant { + attrs: f.fold_attributes(node.attrs), + ident: f.fold_ident(node.ident), + fields: f.fold_fields(node.fields), + discriminant: (node.discriminant).map(|it| ((it).0, f.fold_expr((it).1))), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_vis_restricted<F>( + f: &mut F, + node: crate::VisRestricted, +) -> crate::VisRestricted +where + F: Fold + ?Sized, +{ + crate::VisRestricted { + pub_token: node.pub_token, + paren_token: node.paren_token, + in_token: node.in_token, + path: Box::new(f.fold_path(*node.path)), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_visibility<F>(f: &mut F, node: crate::Visibility) -> crate::Visibility +where + F: Fold + ?Sized, +{ + match node { + crate::Visibility::Public(_binding_0) => crate::Visibility::Public(_binding_0), + crate::Visibility::Restricted(_binding_0) => { + crate::Visibility::Restricted(f.fold_vis_restricted(_binding_0)) + } + crate::Visibility::Inherited => crate::Visibility::Inherited, + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_where_clause<F>(f: &mut F, node: crate::WhereClause) -> crate::WhereClause +where + F: Fold + ?Sized, +{ + crate::WhereClause { + where_token: node.where_token, + predicates: crate::punctuated::fold(node.predicates, f, F::fold_where_predicate), + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn fold_where_predicate<F>( + f: &mut F, + node: crate::WherePredicate, +) -> crate::WherePredicate +where + F: Fold + ?Sized, +{ + match node { + crate::WherePredicate::Lifetime(_binding_0) => { + crate::WherePredicate::Lifetime(f.fold_predicate_lifetime(_binding_0)) + } + crate::WherePredicate::Type(_binding_0) => { + crate::WherePredicate::Type(f.fold_predicate_type(_binding_0)) + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +fn fold_vec<T, V, F>(vec: Vec<T>, fold: &mut V, mut f: F) -> Vec<T> +where + V: ?Sized, + F: FnMut(&mut V, T) -> T, +{ + vec.into_iter().map(|it| f(fold, it)).collect() +} diff --git a/vendor/syn/src/gen/hash.rs b/vendor/syn/src/gen/hash.rs new file mode 100644 index 00000000000000..04f23453a11777 --- /dev/null +++ b/vendor/syn/src/gen/hash.rs @@ -0,0 +1,2876 @@ +// This file is @generated by syn-internal-codegen. +// It is not intended for manual editing. + +#[cfg(any(feature = "derive", feature = "full"))] +use crate::tt::TokenStreamHelper; +use std::hash::{Hash, Hasher}; +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Abi { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.name.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::AngleBracketedGenericArguments { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.colon2_token.hash(state); + self.args.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Arm { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.pat.hash(state); + self.guard.hash(state); + self.body.hash(state); + self.comma.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::AssocConst { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.ident.hash(state); + self.generics.hash(state); + self.value.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::AssocType { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.ident.hash(state); + self.generics.hash(state); + self.ty.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::AttrStyle { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::AttrStyle::Outer => { + state.write_u8(0u8); + } + crate::AttrStyle::Inner(_) => { + state.write_u8(1u8); + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Attribute { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.style.hash(state); + self.meta.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::BareFnArg { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.name.hash(state); + self.ty.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::BareVariadic { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.name.hash(state); + self.comma.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::BinOp { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::BinOp::Add(_) => { + state.write_u8(0u8); + } + crate::BinOp::Sub(_) => { + state.write_u8(1u8); + } + crate::BinOp::Mul(_) => { + state.write_u8(2u8); + } + crate::BinOp::Div(_) => { + state.write_u8(3u8); + } + crate::BinOp::Rem(_) => { + state.write_u8(4u8); + } + crate::BinOp::And(_) => { + state.write_u8(5u8); + } + crate::BinOp::Or(_) => { + state.write_u8(6u8); + } + crate::BinOp::BitXor(_) => { + state.write_u8(7u8); + } + crate::BinOp::BitAnd(_) => { + state.write_u8(8u8); + } + crate::BinOp::BitOr(_) => { + state.write_u8(9u8); + } + crate::BinOp::Shl(_) => { + state.write_u8(10u8); + } + crate::BinOp::Shr(_) => { + state.write_u8(11u8); + } + crate::BinOp::Eq(_) => { + state.write_u8(12u8); + } + crate::BinOp::Lt(_) => { + state.write_u8(13u8); + } + crate::BinOp::Le(_) => { + state.write_u8(14u8); + } + crate::BinOp::Ne(_) => { + state.write_u8(15u8); + } + crate::BinOp::Ge(_) => { + state.write_u8(16u8); + } + crate::BinOp::Gt(_) => { + state.write_u8(17u8); + } + crate::BinOp::AddAssign(_) => { + state.write_u8(18u8); + } + crate::BinOp::SubAssign(_) => { + state.write_u8(19u8); + } + crate::BinOp::MulAssign(_) => { + state.write_u8(20u8); + } + crate::BinOp::DivAssign(_) => { + state.write_u8(21u8); + } + crate::BinOp::RemAssign(_) => { + state.write_u8(22u8); + } + crate::BinOp::BitXorAssign(_) => { + state.write_u8(23u8); + } + crate::BinOp::BitAndAssign(_) => { + state.write_u8(24u8); + } + crate::BinOp::BitOrAssign(_) => { + state.write_u8(25u8); + } + crate::BinOp::ShlAssign(_) => { + state.write_u8(26u8); + } + crate::BinOp::ShrAssign(_) => { + state.write_u8(27u8); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Block { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.stmts.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::BoundLifetimes { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.lifetimes.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::CapturedParam { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::CapturedParam::Lifetime(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::CapturedParam::Ident(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ConstParam { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.ident.hash(state); + self.ty.hash(state); + self.eq_token.hash(state); + self.default.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Constraint { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.ident.hash(state); + self.generics.hash(state); + self.bounds.hash(state); + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Data { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::Data::Struct(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::Data::Enum(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::Data::Union(v0) => { + state.write_u8(2u8); + v0.hash(state); + } + } + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::DataEnum { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.variants.hash(state); + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::DataStruct { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.fields.hash(state); + self.semi_token.hash(state); + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::DataUnion { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.fields.hash(state); + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::DeriveInput { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.ident.hash(state); + self.generics.hash(state); + self.data.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Expr { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + #[cfg(feature = "full")] + crate::Expr::Array(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Assign(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Async(v0) => { + state.write_u8(2u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Await(v0) => { + state.write_u8(3u8); + v0.hash(state); + } + crate::Expr::Binary(v0) => { + state.write_u8(4u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Block(v0) => { + state.write_u8(5u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Break(v0) => { + state.write_u8(6u8); + v0.hash(state); + } + crate::Expr::Call(v0) => { + state.write_u8(7u8); + v0.hash(state); + } + crate::Expr::Cast(v0) => { + state.write_u8(8u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Closure(v0) => { + state.write_u8(9u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Const(v0) => { + state.write_u8(10u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Continue(v0) => { + state.write_u8(11u8); + v0.hash(state); + } + crate::Expr::Field(v0) => { + state.write_u8(12u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::ForLoop(v0) => { + state.write_u8(13u8); + v0.hash(state); + } + crate::Expr::Group(v0) => { + state.write_u8(14u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::If(v0) => { + state.write_u8(15u8); + v0.hash(state); + } + crate::Expr::Index(v0) => { + state.write_u8(16u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Infer(v0) => { + state.write_u8(17u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Let(v0) => { + state.write_u8(18u8); + v0.hash(state); + } + crate::Expr::Lit(v0) => { + state.write_u8(19u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Loop(v0) => { + state.write_u8(20u8); + v0.hash(state); + } + crate::Expr::Macro(v0) => { + state.write_u8(21u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Match(v0) => { + state.write_u8(22u8); + v0.hash(state); + } + crate::Expr::MethodCall(v0) => { + state.write_u8(23u8); + v0.hash(state); + } + crate::Expr::Paren(v0) => { + state.write_u8(24u8); + v0.hash(state); + } + crate::Expr::Path(v0) => { + state.write_u8(25u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Range(v0) => { + state.write_u8(26u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::RawAddr(v0) => { + state.write_u8(27u8); + v0.hash(state); + } + crate::Expr::Reference(v0) => { + state.write_u8(28u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Repeat(v0) => { + state.write_u8(29u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Return(v0) => { + state.write_u8(30u8); + v0.hash(state); + } + crate::Expr::Struct(v0) => { + state.write_u8(31u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Try(v0) => { + state.write_u8(32u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::TryBlock(v0) => { + state.write_u8(33u8); + v0.hash(state); + } + crate::Expr::Tuple(v0) => { + state.write_u8(34u8); + v0.hash(state); + } + crate::Expr::Unary(v0) => { + state.write_u8(35u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Unsafe(v0) => { + state.write_u8(36u8); + v0.hash(state); + } + crate::Expr::Verbatim(v0) => { + state.write_u8(37u8); + TokenStreamHelper(v0).hash(state); + } + #[cfg(feature = "full")] + crate::Expr::While(v0) => { + state.write_u8(38u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::Expr::Yield(v0) => { + state.write_u8(39u8); + v0.hash(state); + } + #[cfg(not(feature = "full"))] + _ => unreachable!(), + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprArray { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.elems.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprAssign { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.left.hash(state); + self.right.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprAsync { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.capture.hash(state); + self.block.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprAwait { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.base.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprBinary { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.left.hash(state); + self.op.hash(state); + self.right.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprBlock { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.label.hash(state); + self.block.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprBreak { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.label.hash(state); + self.expr.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprCall { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.func.hash(state); + self.args.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprCast { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.expr.hash(state); + self.ty.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprClosure { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.lifetimes.hash(state); + self.constness.hash(state); + self.movability.hash(state); + self.asyncness.hash(state); + self.capture.hash(state); + self.inputs.hash(state); + self.output.hash(state); + self.body.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprConst { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.block.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprContinue { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.label.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprField { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.base.hash(state); + self.member.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprForLoop { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.label.hash(state); + self.pat.hash(state); + self.expr.hash(state); + self.body.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprGroup { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.expr.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprIf { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.cond.hash(state); + self.then_branch.hash(state); + self.else_branch.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprIndex { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.expr.hash(state); + self.index.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprInfer { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprLet { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.pat.hash(state); + self.expr.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprLit { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.lit.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprLoop { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.label.hash(state); + self.body.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprMacro { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.mac.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprMatch { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.expr.hash(state); + self.arms.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprMethodCall { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.receiver.hash(state); + self.method.hash(state); + self.turbofish.hash(state); + self.args.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprParen { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.expr.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprPath { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.qself.hash(state); + self.path.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprRange { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.start.hash(state); + self.limits.hash(state); + self.end.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprRawAddr { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.mutability.hash(state); + self.expr.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprReference { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.mutability.hash(state); + self.expr.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprRepeat { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.expr.hash(state); + self.len.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprReturn { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.expr.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprStruct { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.qself.hash(state); + self.path.hash(state); + self.fields.hash(state); + self.dot2_token.hash(state); + self.rest.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprTry { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.expr.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprTryBlock { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.block.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprTuple { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.elems.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprUnary { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.op.hash(state); + self.expr.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprUnsafe { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.block.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprWhile { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.label.hash(state); + self.cond.hash(state); + self.body.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ExprYield { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.expr.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Field { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.mutability.hash(state); + self.ident.hash(state); + self.colon_token.hash(state); + self.ty.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::FieldMutability { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::FieldMutability::None => { + state.write_u8(0u8); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::FieldPat { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.member.hash(state); + self.colon_token.hash(state); + self.pat.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::FieldValue { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.member.hash(state); + self.colon_token.hash(state); + self.expr.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Fields { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::Fields::Named(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::Fields::Unnamed(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::Fields::Unit => { + state.write_u8(2u8); + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::FieldsNamed { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.named.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::FieldsUnnamed { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.unnamed.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::File { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.shebang.hash(state); + self.attrs.hash(state); + self.items.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::FnArg { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::FnArg::Receiver(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::FnArg::Typed(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ForeignItem { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::ForeignItem::Fn(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::ForeignItem::Static(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::ForeignItem::Type(v0) => { + state.write_u8(2u8); + v0.hash(state); + } + crate::ForeignItem::Macro(v0) => { + state.write_u8(3u8); + v0.hash(state); + } + crate::ForeignItem::Verbatim(v0) => { + state.write_u8(4u8); + TokenStreamHelper(v0).hash(state); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ForeignItemFn { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.sig.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ForeignItemMacro { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.mac.hash(state); + self.semi_token.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ForeignItemStatic { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.mutability.hash(state); + self.ident.hash(state); + self.ty.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ForeignItemType { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.ident.hash(state); + self.generics.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::GenericArgument { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::GenericArgument::Lifetime(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::GenericArgument::Type(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::GenericArgument::Const(v0) => { + state.write_u8(2u8); + v0.hash(state); + } + crate::GenericArgument::AssocType(v0) => { + state.write_u8(3u8); + v0.hash(state); + } + crate::GenericArgument::AssocConst(v0) => { + state.write_u8(4u8); + v0.hash(state); + } + crate::GenericArgument::Constraint(v0) => { + state.write_u8(5u8); + v0.hash(state); + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::GenericParam { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::GenericParam::Lifetime(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::GenericParam::Type(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::GenericParam::Const(v0) => { + state.write_u8(2u8); + v0.hash(state); + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Generics { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.lt_token.hash(state); + self.params.hash(state); + self.gt_token.hash(state); + self.where_clause.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ImplItem { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::ImplItem::Const(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::ImplItem::Fn(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::ImplItem::Type(v0) => { + state.write_u8(2u8); + v0.hash(state); + } + crate::ImplItem::Macro(v0) => { + state.write_u8(3u8); + v0.hash(state); + } + crate::ImplItem::Verbatim(v0) => { + state.write_u8(4u8); + TokenStreamHelper(v0).hash(state); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ImplItemConst { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.defaultness.hash(state); + self.ident.hash(state); + self.generics.hash(state); + self.ty.hash(state); + self.expr.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ImplItemFn { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.defaultness.hash(state); + self.sig.hash(state); + self.block.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ImplItemMacro { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.mac.hash(state); + self.semi_token.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ImplItemType { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.defaultness.hash(state); + self.ident.hash(state); + self.generics.hash(state); + self.ty.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ImplRestriction { + fn hash<H>(&self, _state: &mut H) + where + H: Hasher, + { + match *self {} + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Item { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::Item::Const(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::Item::Enum(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::Item::ExternCrate(v0) => { + state.write_u8(2u8); + v0.hash(state); + } + crate::Item::Fn(v0) => { + state.write_u8(3u8); + v0.hash(state); + } + crate::Item::ForeignMod(v0) => { + state.write_u8(4u8); + v0.hash(state); + } + crate::Item::Impl(v0) => { + state.write_u8(5u8); + v0.hash(state); + } + crate::Item::Macro(v0) => { + state.write_u8(6u8); + v0.hash(state); + } + crate::Item::Mod(v0) => { + state.write_u8(7u8); + v0.hash(state); + } + crate::Item::Static(v0) => { + state.write_u8(8u8); + v0.hash(state); + } + crate::Item::Struct(v0) => { + state.write_u8(9u8); + v0.hash(state); + } + crate::Item::Trait(v0) => { + state.write_u8(10u8); + v0.hash(state); + } + crate::Item::TraitAlias(v0) => { + state.write_u8(11u8); + v0.hash(state); + } + crate::Item::Type(v0) => { + state.write_u8(12u8); + v0.hash(state); + } + crate::Item::Union(v0) => { + state.write_u8(13u8); + v0.hash(state); + } + crate::Item::Use(v0) => { + state.write_u8(14u8); + v0.hash(state); + } + crate::Item::Verbatim(v0) => { + state.write_u8(15u8); + TokenStreamHelper(v0).hash(state); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ItemConst { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.ident.hash(state); + self.generics.hash(state); + self.ty.hash(state); + self.expr.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ItemEnum { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.ident.hash(state); + self.generics.hash(state); + self.variants.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ItemExternCrate { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.ident.hash(state); + self.rename.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ItemFn { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.sig.hash(state); + self.block.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ItemForeignMod { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.unsafety.hash(state); + self.abi.hash(state); + self.items.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ItemImpl { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.defaultness.hash(state); + self.unsafety.hash(state); + self.generics.hash(state); + self.trait_.hash(state); + self.self_ty.hash(state); + self.items.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ItemMacro { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.ident.hash(state); + self.mac.hash(state); + self.semi_token.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ItemMod { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.unsafety.hash(state); + self.ident.hash(state); + self.content.hash(state); + self.semi.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ItemStatic { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.mutability.hash(state); + self.ident.hash(state); + self.ty.hash(state); + self.expr.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ItemStruct { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.ident.hash(state); + self.generics.hash(state); + self.fields.hash(state); + self.semi_token.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ItemTrait { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.unsafety.hash(state); + self.auto_token.hash(state); + self.restriction.hash(state); + self.ident.hash(state); + self.generics.hash(state); + self.colon_token.hash(state); + self.supertraits.hash(state); + self.items.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ItemTraitAlias { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.ident.hash(state); + self.generics.hash(state); + self.bounds.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ItemType { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.ident.hash(state); + self.generics.hash(state); + self.ty.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ItemUnion { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.ident.hash(state); + self.generics.hash(state); + self.fields.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ItemUse { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.vis.hash(state); + self.leading_colon.hash(state); + self.tree.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Label { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.name.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::LifetimeParam { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.lifetime.hash(state); + self.colon_token.hash(state); + self.bounds.hash(state); + } +} +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Lit { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::Lit::Str(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::Lit::ByteStr(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::Lit::CStr(v0) => { + state.write_u8(2u8); + v0.hash(state); + } + crate::Lit::Byte(v0) => { + state.write_u8(3u8); + v0.hash(state); + } + crate::Lit::Char(v0) => { + state.write_u8(4u8); + v0.hash(state); + } + crate::Lit::Int(v0) => { + state.write_u8(5u8); + v0.hash(state); + } + crate::Lit::Float(v0) => { + state.write_u8(6u8); + v0.hash(state); + } + crate::Lit::Bool(v0) => { + state.write_u8(7u8); + v0.hash(state); + } + crate::Lit::Verbatim(v0) => { + state.write_u8(8u8); + v0.to_string().hash(state); + } + } + } +} +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::LitBool { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.value.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Local { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.pat.hash(state); + self.init.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::LocalInit { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.expr.hash(state); + self.diverge.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Macro { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.path.hash(state); + self.delimiter.hash(state); + TokenStreamHelper(&self.tokens).hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::MacroDelimiter { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::MacroDelimiter::Paren(_) => { + state.write_u8(0u8); + } + crate::MacroDelimiter::Brace(_) => { + state.write_u8(1u8); + } + crate::MacroDelimiter::Bracket(_) => { + state.write_u8(2u8); + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Meta { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::Meta::Path(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::Meta::List(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::Meta::NameValue(v0) => { + state.write_u8(2u8); + v0.hash(state); + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::MetaList { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.path.hash(state); + self.delimiter.hash(state); + TokenStreamHelper(&self.tokens).hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::MetaNameValue { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.path.hash(state); + self.value.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ParenthesizedGenericArguments { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.inputs.hash(state); + self.output.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Pat { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::Pat::Const(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::Pat::Ident(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::Pat::Lit(v0) => { + state.write_u8(2u8); + v0.hash(state); + } + crate::Pat::Macro(v0) => { + state.write_u8(3u8); + v0.hash(state); + } + crate::Pat::Or(v0) => { + state.write_u8(4u8); + v0.hash(state); + } + crate::Pat::Paren(v0) => { + state.write_u8(5u8); + v0.hash(state); + } + crate::Pat::Path(v0) => { + state.write_u8(6u8); + v0.hash(state); + } + crate::Pat::Range(v0) => { + state.write_u8(7u8); + v0.hash(state); + } + crate::Pat::Reference(v0) => { + state.write_u8(8u8); + v0.hash(state); + } + crate::Pat::Rest(v0) => { + state.write_u8(9u8); + v0.hash(state); + } + crate::Pat::Slice(v0) => { + state.write_u8(10u8); + v0.hash(state); + } + crate::Pat::Struct(v0) => { + state.write_u8(11u8); + v0.hash(state); + } + crate::Pat::Tuple(v0) => { + state.write_u8(12u8); + v0.hash(state); + } + crate::Pat::TupleStruct(v0) => { + state.write_u8(13u8); + v0.hash(state); + } + crate::Pat::Type(v0) => { + state.write_u8(14u8); + v0.hash(state); + } + crate::Pat::Verbatim(v0) => { + state.write_u8(15u8); + TokenStreamHelper(v0).hash(state); + } + crate::Pat::Wild(v0) => { + state.write_u8(16u8); + v0.hash(state); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PatIdent { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.by_ref.hash(state); + self.mutability.hash(state); + self.ident.hash(state); + self.subpat.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PatOr { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.leading_vert.hash(state); + self.cases.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PatParen { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.pat.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PatReference { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.mutability.hash(state); + self.pat.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PatRest { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PatSlice { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.elems.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PatStruct { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.qself.hash(state); + self.path.hash(state); + self.fields.hash(state); + self.rest.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PatTuple { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.elems.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PatTupleStruct { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.qself.hash(state); + self.path.hash(state); + self.elems.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PatType { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.pat.hash(state); + self.ty.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PatWild { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Path { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.leading_colon.hash(state); + self.segments.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PathArguments { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::PathArguments::None => { + state.write_u8(0u8); + } + crate::PathArguments::AngleBracketed(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::PathArguments::Parenthesized(v0) => { + state.write_u8(2u8); + v0.hash(state); + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PathSegment { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.ident.hash(state); + self.arguments.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PointerMutability { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::PointerMutability::Const(_) => { + state.write_u8(0u8); + } + crate::PointerMutability::Mut(_) => { + state.write_u8(1u8); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PreciseCapture { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.params.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PredicateLifetime { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.lifetime.hash(state); + self.bounds.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::PredicateType { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.lifetimes.hash(state); + self.bounded_ty.hash(state); + self.bounds.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::QSelf { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.ty.hash(state); + self.position.hash(state); + self.as_token.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::RangeLimits { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::RangeLimits::HalfOpen(_) => { + state.write_u8(0u8); + } + crate::RangeLimits::Closed(_) => { + state.write_u8(1u8); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Receiver { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.reference.hash(state); + self.mutability.hash(state); + self.colon_token.hash(state); + self.ty.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::ReturnType { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::ReturnType::Default => { + state.write_u8(0u8); + } + crate::ReturnType::Type(_, v1) => { + state.write_u8(1u8); + v1.hash(state); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Signature { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.constness.hash(state); + self.asyncness.hash(state); + self.unsafety.hash(state); + self.abi.hash(state); + self.ident.hash(state); + self.generics.hash(state); + self.inputs.hash(state); + self.variadic.hash(state); + self.output.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::StaticMutability { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::StaticMutability::Mut(_) => { + state.write_u8(0u8); + } + crate::StaticMutability::None => { + state.write_u8(1u8); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Stmt { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::Stmt::Local(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::Stmt::Item(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::Stmt::Expr(v0, v1) => { + state.write_u8(2u8); + v0.hash(state); + v1.hash(state); + } + crate::Stmt::Macro(v0) => { + state.write_u8(3u8); + v0.hash(state); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::StmtMacro { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.mac.hash(state); + self.semi_token.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TraitBound { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.paren_token.hash(state); + self.modifier.hash(state); + self.lifetimes.hash(state); + self.path.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TraitBoundModifier { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::TraitBoundModifier::None => { + state.write_u8(0u8); + } + crate::TraitBoundModifier::Maybe(_) => { + state.write_u8(1u8); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TraitItem { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::TraitItem::Const(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::TraitItem::Fn(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::TraitItem::Type(v0) => { + state.write_u8(2u8); + v0.hash(state); + } + crate::TraitItem::Macro(v0) => { + state.write_u8(3u8); + v0.hash(state); + } + crate::TraitItem::Verbatim(v0) => { + state.write_u8(4u8); + TokenStreamHelper(v0).hash(state); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TraitItemConst { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.ident.hash(state); + self.generics.hash(state); + self.ty.hash(state); + self.default.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TraitItemFn { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.sig.hash(state); + self.default.hash(state); + self.semi_token.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TraitItemMacro { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.mac.hash(state); + self.semi_token.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TraitItemType { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.ident.hash(state); + self.generics.hash(state); + self.colon_token.hash(state); + self.bounds.hash(state); + self.default.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Type { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::Type::Array(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::Type::BareFn(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::Type::Group(v0) => { + state.write_u8(2u8); + v0.hash(state); + } + crate::Type::ImplTrait(v0) => { + state.write_u8(3u8); + v0.hash(state); + } + crate::Type::Infer(v0) => { + state.write_u8(4u8); + v0.hash(state); + } + crate::Type::Macro(v0) => { + state.write_u8(5u8); + v0.hash(state); + } + crate::Type::Never(v0) => { + state.write_u8(6u8); + v0.hash(state); + } + crate::Type::Paren(v0) => { + state.write_u8(7u8); + v0.hash(state); + } + crate::Type::Path(v0) => { + state.write_u8(8u8); + v0.hash(state); + } + crate::Type::Ptr(v0) => { + state.write_u8(9u8); + v0.hash(state); + } + crate::Type::Reference(v0) => { + state.write_u8(10u8); + v0.hash(state); + } + crate::Type::Slice(v0) => { + state.write_u8(11u8); + v0.hash(state); + } + crate::Type::TraitObject(v0) => { + state.write_u8(12u8); + v0.hash(state); + } + crate::Type::Tuple(v0) => { + state.write_u8(13u8); + v0.hash(state); + } + crate::Type::Verbatim(v0) => { + state.write_u8(14u8); + TokenStreamHelper(v0).hash(state); + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypeArray { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.elem.hash(state); + self.len.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypeBareFn { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.lifetimes.hash(state); + self.unsafety.hash(state); + self.abi.hash(state); + self.inputs.hash(state); + self.variadic.hash(state); + self.output.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypeGroup { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.elem.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypeImplTrait { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.bounds.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypeInfer { + fn hash<H>(&self, _state: &mut H) + where + H: Hasher, + {} +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypeMacro { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.mac.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypeNever { + fn hash<H>(&self, _state: &mut H) + where + H: Hasher, + {} +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypeParam { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.ident.hash(state); + self.colon_token.hash(state); + self.bounds.hash(state); + self.eq_token.hash(state); + self.default.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypeParamBound { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::TypeParamBound::Trait(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::TypeParamBound::Lifetime(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + #[cfg(feature = "full")] + crate::TypeParamBound::PreciseCapture(v0) => { + state.write_u8(2u8); + v0.hash(state); + } + crate::TypeParamBound::Verbatim(v0) => { + state.write_u8(3u8); + TokenStreamHelper(v0).hash(state); + } + #[cfg(not(feature = "full"))] + _ => unreachable!(), + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypeParen { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.elem.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypePath { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.qself.hash(state); + self.path.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypePtr { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.const_token.hash(state); + self.mutability.hash(state); + self.elem.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypeReference { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.lifetime.hash(state); + self.mutability.hash(state); + self.elem.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypeSlice { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.elem.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypeTraitObject { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.dyn_token.hash(state); + self.bounds.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::TypeTuple { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.elems.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::UnOp { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::UnOp::Deref(_) => { + state.write_u8(0u8); + } + crate::UnOp::Not(_) => { + state.write_u8(1u8); + } + crate::UnOp::Neg(_) => { + state.write_u8(2u8); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::UseGlob { + fn hash<H>(&self, _state: &mut H) + where + H: Hasher, + {} +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::UseGroup { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.items.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::UseName { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.ident.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::UsePath { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.ident.hash(state); + self.tree.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::UseRename { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.ident.hash(state); + self.rename.hash(state); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::UseTree { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::UseTree::Path(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::UseTree::Name(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::UseTree::Rename(v0) => { + state.write_u8(2u8); + v0.hash(state); + } + crate::UseTree::Glob(v0) => { + state.write_u8(3u8); + v0.hash(state); + } + crate::UseTree::Group(v0) => { + state.write_u8(4u8); + v0.hash(state); + } + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Variadic { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.pat.hash(state); + self.comma.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Variant { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.attrs.hash(state); + self.ident.hash(state); + self.fields.hash(state); + self.discriminant.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::VisRestricted { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.in_token.hash(state); + self.path.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::Visibility { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::Visibility::Public(_) => { + state.write_u8(0u8); + } + crate::Visibility::Restricted(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + crate::Visibility::Inherited => { + state.write_u8(2u8); + } + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::WhereClause { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.predicates.hash(state); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for crate::WherePredicate { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + match self { + crate::WherePredicate::Lifetime(v0) => { + state.write_u8(0u8); + v0.hash(state); + } + crate::WherePredicate::Type(v0) => { + state.write_u8(1u8); + v0.hash(state); + } + } + } +} diff --git a/vendor/syn/src/gen/token.css b/vendor/syn/src/gen/token.css new file mode 100644 index 00000000000000..ed82ae1502b4b6 --- /dev/null +++ b/vendor/syn/src/gen/token.css @@ -0,0 +1,737 @@ +a.struct[title="struct syn::token::Abstract"], +a.struct[title="struct syn::token::And"], +a.struct[title="struct syn::token::AndAnd"], +a.struct[title="struct syn::token::AndEq"], +a.struct[title="struct syn::token::As"], +a.struct[title="struct syn::token::Async"], +a.struct[title="struct syn::token::At"], +a.struct[title="struct syn::token::Auto"], +a.struct[title="struct syn::token::Await"], +a.struct[title="struct syn::token::Become"], +a.struct[title="struct syn::token::Box"], +a.struct[title="struct syn::token::Break"], +a.struct[title="struct syn::token::Caret"], +a.struct[title="struct syn::token::CaretEq"], +a.struct[title="struct syn::token::Colon"], +a.struct[title="struct syn::token::Comma"], +a.struct[title="struct syn::token::Const"], +a.struct[title="struct syn::token::Continue"], +a.struct[title="struct syn::token::Crate"], +a.struct[title="struct syn::token::Default"], +a.struct[title="struct syn::token::Do"], +a.struct[title="struct syn::token::Dollar"], +a.struct[title="struct syn::token::Dot"], +a.struct[title="struct syn::token::DotDot"], +a.struct[title="struct syn::token::DotDotDot"], +a.struct[title="struct syn::token::DotDotEq"], +a.struct[title="struct syn::token::Dyn"], +a.struct[title="struct syn::token::Else"], +a.struct[title="struct syn::token::Enum"], +a.struct[title="struct syn::token::Eq"], +a.struct[title="struct syn::token::EqEq"], +a.struct[title="struct syn::token::Extern"], +a.struct[title="struct syn::token::FatArrow"], +a.struct[title="struct syn::token::Final"], +a.struct[title="struct syn::token::Fn"], +a.struct[title="struct syn::token::For"], +a.struct[title="struct syn::token::Ge"], +a.struct[title="struct syn::token::Gt"], +a.struct[title="struct syn::token::If"], +a.struct[title="struct syn::token::Impl"], +a.struct[title="struct syn::token::In"], +a.struct[title="struct syn::token::LArrow"], +a.struct[title="struct syn::token::Le"], +a.struct[title="struct syn::token::Let"], +a.struct[title="struct syn::token::Loop"], +a.struct[title="struct syn::token::Lt"], +a.struct[title="struct syn::token::Macro"], +a.struct[title="struct syn::token::Match"], +a.struct[title="struct syn::token::Minus"], +a.struct[title="struct syn::token::MinusEq"], +a.struct[title="struct syn::token::Mod"], +a.struct[title="struct syn::token::Move"], +a.struct[title="struct syn::token::Mut"], +a.struct[title="struct syn::token::Ne"], +a.struct[title="struct syn::token::Not"], +a.struct[title="struct syn::token::Or"], +a.struct[title="struct syn::token::OrEq"], +a.struct[title="struct syn::token::OrOr"], +a.struct[title="struct syn::token::Override"], +a.struct[title="struct syn::token::PathSep"], +a.struct[title="struct syn::token::Percent"], +a.struct[title="struct syn::token::PercentEq"], +a.struct[title="struct syn::token::Plus"], +a.struct[title="struct syn::token::PlusEq"], +a.struct[title="struct syn::token::Pound"], +a.struct[title="struct syn::token::Priv"], +a.struct[title="struct syn::token::Pub"], +a.struct[title="struct syn::token::Question"], +a.struct[title="struct syn::token::RArrow"], +a.struct[title="struct syn::token::Raw"], +a.struct[title="struct syn::token::Ref"], +a.struct[title="struct syn::token::Return"], +a.struct[title="struct syn::token::SelfType"], +a.struct[title="struct syn::token::SelfValue"], +a.struct[title="struct syn::token::Semi"], +a.struct[title="struct syn::token::Shl"], +a.struct[title="struct syn::token::ShlEq"], +a.struct[title="struct syn::token::Shr"], +a.struct[title="struct syn::token::ShrEq"], +a.struct[title="struct syn::token::Slash"], +a.struct[title="struct syn::token::SlashEq"], +a.struct[title="struct syn::token::Star"], +a.struct[title="struct syn::token::StarEq"], +a.struct[title="struct syn::token::Static"], +a.struct[title="struct syn::token::Struct"], +a.struct[title="struct syn::token::Super"], +a.struct[title="struct syn::token::Tilde"], +a.struct[title="struct syn::token::Trait"], +a.struct[title="struct syn::token::Try"], +a.struct[title="struct syn::token::Type"], +a.struct[title="struct syn::token::Typeof"], +a.struct[title="struct syn::token::Underscore"], +a.struct[title="struct syn::token::Union"], +a.struct[title="struct syn::token::Unsafe"], +a.struct[title="struct syn::token::Unsized"], +a.struct[title="struct syn::token::Use"], +a.struct[title="struct syn::token::Virtual"], +a.struct[title="struct syn::token::Where"], +a.struct[title="struct syn::token::While"], +a.struct[title="struct syn::token::Yield"] { + display: inline-block; + color: transparent; + white-space: nowrap; +} + +a.struct[title="struct syn::token::Abstract"]::before, +a.struct[title="struct syn::token::And"]::before, +a.struct[title="struct syn::token::AndAnd"]::before, +a.struct[title="struct syn::token::AndEq"]::before, +a.struct[title="struct syn::token::As"]::before, +a.struct[title="struct syn::token::Async"]::before, +a.struct[title="struct syn::token::At"]::before, +a.struct[title="struct syn::token::Auto"]::before, +a.struct[title="struct syn::token::Await"]::before, +a.struct[title="struct syn::token::Become"]::before, +a.struct[title="struct syn::token::Box"]::before, +a.struct[title="struct syn::token::Break"]::before, +a.struct[title="struct syn::token::Caret"]::before, +a.struct[title="struct syn::token::CaretEq"]::before, +a.struct[title="struct syn::token::Colon"]::before, +a.struct[title="struct syn::token::Comma"]::before, +a.struct[title="struct syn::token::Const"]::before, +a.struct[title="struct syn::token::Continue"]::before, +a.struct[title="struct syn::token::Crate"]::before, +a.struct[title="struct syn::token::Default"]::before, +a.struct[title="struct syn::token::Do"]::before, +a.struct[title="struct syn::token::Dollar"]::before, +a.struct[title="struct syn::token::Dot"]::before, +a.struct[title="struct syn::token::DotDot"]::before, +a.struct[title="struct syn::token::DotDotDot"]::before, +a.struct[title="struct syn::token::DotDotEq"]::before, +a.struct[title="struct syn::token::Dyn"]::before, +a.struct[title="struct syn::token::Else"]::before, +a.struct[title="struct syn::token::Enum"]::before, +a.struct[title="struct syn::token::Eq"]::before, +a.struct[title="struct syn::token::EqEq"]::before, +a.struct[title="struct syn::token::Extern"]::before, +a.struct[title="struct syn::token::FatArrow"]::before, +a.struct[title="struct syn::token::Final"]::before, +a.struct[title="struct syn::token::Fn"]::before, +a.struct[title="struct syn::token::For"]::before, +a.struct[title="struct syn::token::Ge"]::before, +a.struct[title="struct syn::token::Gt"]::before, +a.struct[title="struct syn::token::If"]::before, +a.struct[title="struct syn::token::Impl"]::before, +a.struct[title="struct syn::token::In"]::before, +a.struct[title="struct syn::token::LArrow"]::before, +a.struct[title="struct syn::token::Le"]::before, +a.struct[title="struct syn::token::Let"]::before, +a.struct[title="struct syn::token::Loop"]::before, +a.struct[title="struct syn::token::Lt"]::before, +a.struct[title="struct syn::token::Macro"]::before, +a.struct[title="struct syn::token::Match"]::before, +a.struct[title="struct syn::token::Minus"]::before, +a.struct[title="struct syn::token::MinusEq"]::before, +a.struct[title="struct syn::token::Mod"]::before, +a.struct[title="struct syn::token::Move"]::before, +a.struct[title="struct syn::token::Mut"]::before, +a.struct[title="struct syn::token::Ne"]::before, +a.struct[title="struct syn::token::Not"]::before, +a.struct[title="struct syn::token::Or"]::before, +a.struct[title="struct syn::token::OrEq"]::before, +a.struct[title="struct syn::token::OrOr"]::before, +a.struct[title="struct syn::token::Override"]::before, +a.struct[title="struct syn::token::PathSep"]::before, +a.struct[title="struct syn::token::Percent"]::before, +a.struct[title="struct syn::token::PercentEq"]::before, +a.struct[title="struct syn::token::Plus"]::before, +a.struct[title="struct syn::token::PlusEq"]::before, +a.struct[title="struct syn::token::Pound"]::before, +a.struct[title="struct syn::token::Priv"]::before, +a.struct[title="struct syn::token::Pub"]::before, +a.struct[title="struct syn::token::Question"]::before, +a.struct[title="struct syn::token::RArrow"]::before, +a.struct[title="struct syn::token::Raw"]::before, +a.struct[title="struct syn::token::Ref"]::before, +a.struct[title="struct syn::token::Return"]::before, +a.struct[title="struct syn::token::SelfType"]::before, +a.struct[title="struct syn::token::SelfValue"]::before, +a.struct[title="struct syn::token::Semi"]::before, +a.struct[title="struct syn::token::Shl"]::before, +a.struct[title="struct syn::token::ShlEq"]::before, +a.struct[title="struct syn::token::Shr"]::before, +a.struct[title="struct syn::token::ShrEq"]::before, +a.struct[title="struct syn::token::Slash"]::before, +a.struct[title="struct syn::token::SlashEq"]::before, +a.struct[title="struct syn::token::Star"]::before, +a.struct[title="struct syn::token::StarEq"]::before, +a.struct[title="struct syn::token::Static"]::before, +a.struct[title="struct syn::token::Struct"]::before, +a.struct[title="struct syn::token::Super"]::before, +a.struct[title="struct syn::token::Tilde"]::before, +a.struct[title="struct syn::token::Trait"]::before, +a.struct[title="struct syn::token::Try"]::before, +a.struct[title="struct syn::token::Type"]::before, +a.struct[title="struct syn::token::Typeof"]::before, +a.struct[title="struct syn::token::Underscore"]::before, +a.struct[title="struct syn::token::Union"]::before, +a.struct[title="struct syn::token::Unsafe"]::before, +a.struct[title="struct syn::token::Unsized"]::before, +a.struct[title="struct syn::token::Use"]::before, +a.struct[title="struct syn::token::Virtual"]::before, +a.struct[title="struct syn::token::Where"]::before, +a.struct[title="struct syn::token::While"]::before, +a.struct[title="struct syn::token::Yield"]::before { + display: inline-block; + color: var(--type-link-color); + width: 0; +} + +a.struct[title="struct syn::token::Abstract"]::before { + content: "Token![abstract]"; +} + +a.struct[title="struct syn::token::And"]::before { + content: "Token![&]"; +} + +a.struct[title="struct syn::token::AndAnd"]::before { + content: "Token![&&]"; +} + +a.struct[title="struct syn::token::AndEq"]::before { + content: "Token![&=]"; +} + +a.struct[title="struct syn::token::As"]::before { + content: "Token![as]"; +} + +a.struct[title="struct syn::token::Async"]::before { + content: "Token![async]"; +} + +a.struct[title="struct syn::token::At"]::before { + content: "Token![@]"; +} + +a.struct[title="struct syn::token::Auto"]::before { + content: "Token![auto]"; +} + +a.struct[title="struct syn::token::Await"]::before { + content: "Token![await]"; +} + +a.struct[title="struct syn::token::Become"]::before { + content: "Token![become]"; +} + +a.struct[title="struct syn::token::Box"]::before { + content: "Token![box]"; +} + +a.struct[title="struct syn::token::Break"]::before { + content: "Token![break]"; +} + +a.struct[title="struct syn::token::Caret"]::before { + content: "Token![^]"; +} + +a.struct[title="struct syn::token::CaretEq"]::before { + content: "Token![^=]"; +} + +a.struct[title="struct syn::token::Colon"]::before { + content: "Token![:]"; +} + +a.struct[title="struct syn::token::Comma"]::before { + content: "Token![,]"; +} + +a.struct[title="struct syn::token::Const"]::before { + content: "Token![const]"; +} + +a.struct[title="struct syn::token::Continue"]::before { + content: "Token![continue]"; +} + +a.struct[title="struct syn::token::Crate"]::before { + content: "Token![crate]"; +} + +a.struct[title="struct syn::token::Default"]::before { + content: "Token![default]"; +} + +a.struct[title="struct syn::token::Do"]::before { + content: "Token![do]"; +} + +a.struct[title="struct syn::token::Dollar"]::before { + content: "Token![$]"; +} + +a.struct[title="struct syn::token::Dot"]::before { + content: "Token![.]"; +} + +a.struct[title="struct syn::token::DotDot"]::before { + content: "Token![..]"; +} + +a.struct[title="struct syn::token::DotDotDot"]::before { + content: "Token![...]"; +} + +a.struct[title="struct syn::token::DotDotEq"]::before { + content: "Token![..=]"; +} + +a.struct[title="struct syn::token::Dyn"]::before { + content: "Token![dyn]"; +} + +a.struct[title="struct syn::token::Else"]::before { + content: "Token![else]"; +} + +a.struct[title="struct syn::token::Enum"]::before { + content: "Token![enum]"; +} + +a.struct[title="struct syn::token::Eq"]::before { + content: "Token![=]"; +} + +a.struct[title="struct syn::token::EqEq"]::before { + content: "Token![==]"; +} + +a.struct[title="struct syn::token::Extern"]::before { + content: "Token![extern]"; +} + +a.struct[title="struct syn::token::FatArrow"]::before { + content: "Token![=>]"; +} + +a.struct[title="struct syn::token::Final"]::before { + content: "Token![final]"; +} + +a.struct[title="struct syn::token::Fn"]::before { + content: "Token![fn]"; +} + +a.struct[title="struct syn::token::For"]::before { + content: "Token![for]"; +} + +a.struct[title="struct syn::token::Ge"]::before { + content: "Token![>=]"; +} + +a.struct[title="struct syn::token::Gt"]::before { + content: "Token![>]"; +} + +a.struct[title="struct syn::token::If"]::before { + content: "Token![if]"; +} + +a.struct[title="struct syn::token::Impl"]::before { + content: "Token![impl]"; +} + +a.struct[title="struct syn::token::In"]::before { + content: "Token![in]"; +} + +a.struct[title="struct syn::token::LArrow"]::before { + content: "Token![<-]"; +} + +a.struct[title="struct syn::token::Le"]::before { + content: "Token![<=]"; +} + +a.struct[title="struct syn::token::Let"]::before { + content: "Token![let]"; +} + +a.struct[title="struct syn::token::Loop"]::before { + content: "Token![loop]"; +} + +a.struct[title="struct syn::token::Lt"]::before { + content: "Token![<]"; +} + +a.struct[title="struct syn::token::Macro"]::before { + content: "Token![macro]"; +} + +a.struct[title="struct syn::token::Match"]::before { + content: "Token![match]"; +} + +a.struct[title="struct syn::token::Minus"]::before { + content: "Token![-]"; +} + +a.struct[title="struct syn::token::MinusEq"]::before { + content: "Token![-=]"; +} + +a.struct[title="struct syn::token::Mod"]::before { + content: "Token![mod]"; +} + +a.struct[title="struct syn::token::Move"]::before { + content: "Token![move]"; +} + +a.struct[title="struct syn::token::Mut"]::before { + content: "Token![mut]"; +} + +a.struct[title="struct syn::token::Ne"]::before { + content: "Token![!=]"; +} + +a.struct[title="struct syn::token::Not"]::before { + content: "Token![!]"; +} + +a.struct[title="struct syn::token::Or"]::before { + content: "Token![|]"; +} + +a.struct[title="struct syn::token::OrEq"]::before { + content: "Token![|=]"; +} + +a.struct[title="struct syn::token::OrOr"]::before { + content: "Token![||]"; +} + +a.struct[title="struct syn::token::Override"]::before { + content: "Token![override]"; +} + +a.struct[title="struct syn::token::PathSep"]::before { + content: "Token![::]"; +} + +a.struct[title="struct syn::token::Percent"]::before { + content: "Token![%]"; +} + +a.struct[title="struct syn::token::PercentEq"]::before { + content: "Token![%=]"; +} + +a.struct[title="struct syn::token::Plus"]::before { + content: "Token![+]"; +} + +a.struct[title="struct syn::token::PlusEq"]::before { + content: "Token![+=]"; +} + +a.struct[title="struct syn::token::Pound"]::before { + content: "Token![#]"; +} + +a.struct[title="struct syn::token::Priv"]::before { + content: "Token![priv]"; +} + +a.struct[title="struct syn::token::Pub"]::before { + content: "Token![pub]"; +} + +a.struct[title="struct syn::token::Question"]::before { + content: "Token![?]"; +} + +a.struct[title="struct syn::token::RArrow"]::before { + content: "Token![->]"; +} + +a.struct[title="struct syn::token::Raw"]::before { + content: "Token![raw]"; +} + +a.struct[title="struct syn::token::Ref"]::before { + content: "Token![ref]"; +} + +a.struct[title="struct syn::token::Return"]::before { + content: "Token![return]"; +} + +a.struct[title="struct syn::token::SelfType"]::before { + content: "Token![Self]"; +} + +a.struct[title="struct syn::token::SelfValue"]::before { + content: "Token![self]"; +} + +a.struct[title="struct syn::token::Semi"]::before { + content: "Token![;]"; +} + +a.struct[title="struct syn::token::Shl"]::before { + content: "Token![<<]"; +} + +a.struct[title="struct syn::token::ShlEq"]::before { + content: "Token![<<=]"; +} + +a.struct[title="struct syn::token::Shr"]::before { + content: "Token![>>]"; +} + +a.struct[title="struct syn::token::ShrEq"]::before { + content: "Token![>>=]"; +} + +a.struct[title="struct syn::token::Slash"]::before { + content: "Token![/]"; +} + +a.struct[title="struct syn::token::SlashEq"]::before { + content: "Token![/=]"; +} + +a.struct[title="struct syn::token::Star"]::before { + content: "Token![*]"; +} + +a.struct[title="struct syn::token::StarEq"]::before { + content: "Token![*=]"; +} + +a.struct[title="struct syn::token::Static"]::before { + content: "Token![static]"; +} + +a.struct[title="struct syn::token::Struct"]::before { + content: "Token![struct]"; +} + +a.struct[title="struct syn::token::Super"]::before { + content: "Token![super]"; +} + +a.struct[title="struct syn::token::Tilde"]::before { + content: "Token![~]"; +} + +a.struct[title="struct syn::token::Trait"]::before { + content: "Token![trait]"; +} + +a.struct[title="struct syn::token::Try"]::before { + content: "Token![try]"; +} + +a.struct[title="struct syn::token::Type"]::before { + content: "Token![type]"; +} + +a.struct[title="struct syn::token::Typeof"]::before { + content: "Token![typeof]"; +} + +a.struct[title="struct syn::token::Underscore"]::before { + content: "Token![_]"; + font-size: calc(100% * 10 / 9); +} + +a.struct[title="struct syn::token::Union"]::before { + content: "Token![union]"; +} + +a.struct[title="struct syn::token::Unsafe"]::before { + content: "Token![unsafe]"; +} + +a.struct[title="struct syn::token::Unsized"]::before { + content: "Token![unsized]"; +} + +a.struct[title="struct syn::token::Use"]::before { + content: "Token![use]"; +} + +a.struct[title="struct syn::token::Virtual"]::before { + content: "Token![virtual]"; +} + +a.struct[title="struct syn::token::Where"]::before { + content: "Token![where]"; +} + +a.struct[title="struct syn::token::While"]::before { + content: "Token![while]"; +} + +a.struct[title="struct syn::token::Yield"]::before { + content: "Token![yield]"; +} + +a.struct[title="struct syn::token::Underscore"] { + font-size: calc(100% * 9 / 10); +} + +a.struct[title="struct syn::token::PercentEq"]::after, +a.struct[title="struct syn::token::Question"]::after { + content: "."; +} + +a.struct[title="struct syn::token::DotDotDot"]::after, +a.struct[title="struct syn::token::FatArrow"]::after, +a.struct[title="struct syn::token::Percent"]::after { + content: ".."; +} + +a.struct[title="struct syn::token::CaretEq"]::after, +a.struct[title="struct syn::token::Dollar"]::after, +a.struct[title="struct syn::token::DotDotEq"]::after, +a.struct[title="struct syn::token::MinusEq"]::after, +a.struct[title="struct syn::token::PathSep"]::after, +a.struct[title="struct syn::token::SelfValue"]::after, +a.struct[title="struct syn::token::SlashEq"]::after { + content: "..."; +} + +a.struct[title="struct syn::token::AndAnd"]::after, +a.struct[title="struct syn::token::Caret"]::after, +a.struct[title="struct syn::token::Colon"]::after, +a.struct[title="struct syn::token::Comma"]::after, +a.struct[title="struct syn::token::DotDot"]::after, +a.struct[title="struct syn::token::LArrow"]::after, +a.struct[title="struct syn::token::Minus"]::after, +a.struct[title="struct syn::token::PlusEq"]::after, +a.struct[title="struct syn::token::Pound"]::after, +a.struct[title="struct syn::token::RArrow"]::after, +a.struct[title="struct syn::token::SelfType"]::after, +a.struct[title="struct syn::token::Slash"]::after, +a.struct[title="struct syn::token::StarEq"]::after, +a.struct[title="struct syn::token::Tilde"]::after { + content: "...."; +} + +a.struct[title="struct syn::token::AndEq"]::after, +a.struct[title="struct syn::token::Plus"]::after, +a.struct[title="struct syn::token::Semi"]::after, +a.struct[title="struct syn::token::Star"]::after { + content: "....."; +} + +a.struct[title="struct syn::token::And"]::after, +a.struct[title="struct syn::token::Dot"]::after, +a.struct[title="struct syn::token::EqEq"]::after, +a.struct[title="struct syn::token::Not"]::after, +a.struct[title="struct syn::token::OrEq"]::after, +a.struct[title="struct syn::token::OrOr"]::after, +a.struct[title="struct syn::token::ShlEq"]::after, +a.struct[title="struct syn::token::ShrEq"]::after { + content: "......"; +} + +a.struct[title="struct syn::token::At"]::after, +a.struct[title="struct syn::token::Eq"]::after, +a.struct[title="struct syn::token::Gt"]::after, +a.struct[title="struct syn::token::Lt"]::after, +a.struct[title="struct syn::token::Or"]::after, +a.struct[title="struct syn::token::Shl"]::after, +a.struct[title="struct syn::token::Shr"]::after { + content: "......."; +} + +a.struct[title="struct syn::token::Abstract"]::after, +a.struct[title="struct syn::token::As"]::after, +a.struct[title="struct syn::token::Async"]::after, +a.struct[title="struct syn::token::Auto"]::after, +a.struct[title="struct syn::token::Await"]::after, +a.struct[title="struct syn::token::Become"]::after, +a.struct[title="struct syn::token::Box"]::after, +a.struct[title="struct syn::token::Break"]::after, +a.struct[title="struct syn::token::Const"]::after, +a.struct[title="struct syn::token::Continue"]::after, +a.struct[title="struct syn::token::Crate"]::after, +a.struct[title="struct syn::token::Default"]::after, +a.struct[title="struct syn::token::Do"]::after, +a.struct[title="struct syn::token::Dyn"]::after, +a.struct[title="struct syn::token::Else"]::after, +a.struct[title="struct syn::token::Enum"]::after, +a.struct[title="struct syn::token::Extern"]::after, +a.struct[title="struct syn::token::Final"]::after, +a.struct[title="struct syn::token::Fn"]::after, +a.struct[title="struct syn::token::For"]::after, +a.struct[title="struct syn::token::Ge"]::after, +a.struct[title="struct syn::token::If"]::after, +a.struct[title="struct syn::token::Impl"]::after, +a.struct[title="struct syn::token::In"]::after, +a.struct[title="struct syn::token::Le"]::after, +a.struct[title="struct syn::token::Let"]::after, +a.struct[title="struct syn::token::Loop"]::after, +a.struct[title="struct syn::token::Macro"]::after, +a.struct[title="struct syn::token::Match"]::after, +a.struct[title="struct syn::token::Mod"]::after, +a.struct[title="struct syn::token::Move"]::after, +a.struct[title="struct syn::token::Mut"]::after, +a.struct[title="struct syn::token::Ne"]::after, +a.struct[title="struct syn::token::Override"]::after, +a.struct[title="struct syn::token::Priv"]::after, +a.struct[title="struct syn::token::Pub"]::after, +a.struct[title="struct syn::token::Raw"]::after, +a.struct[title="struct syn::token::Ref"]::after, +a.struct[title="struct syn::token::Return"]::after, +a.struct[title="struct syn::token::Static"]::after, +a.struct[title="struct syn::token::Struct"]::after, +a.struct[title="struct syn::token::Super"]::after, +a.struct[title="struct syn::token::Trait"]::after, +a.struct[title="struct syn::token::Try"]::after, +a.struct[title="struct syn::token::Type"]::after, +a.struct[title="struct syn::token::Typeof"]::after, +a.struct[title="struct syn::token::Union"]::after, +a.struct[title="struct syn::token::Unsafe"]::after, +a.struct[title="struct syn::token::Unsized"]::after, +a.struct[title="struct syn::token::Use"]::after, +a.struct[title="struct syn::token::Virtual"]::after, +a.struct[title="struct syn::token::Where"]::after, +a.struct[title="struct syn::token::While"]::after, +a.struct[title="struct syn::token::Yield"]::after { + content: "........"; +} diff --git a/vendor/syn/src/gen/visit.rs b/vendor/syn/src/gen/visit.rs new file mode 100644 index 00000000000000..cd258fcde120a9 --- /dev/null +++ b/vendor/syn/src/gen/visit.rs @@ -0,0 +1,3941 @@ +// This file is @generated by syn-internal-codegen. +// It is not intended for manual editing. + +#![allow(unused_variables)] +#![allow(clippy::needless_pass_by_ref_mut)] +#[cfg(any(feature = "full", feature = "derive"))] +use crate::punctuated::Punctuated; +#[cfg(feature = "full")] +macro_rules! full { + ($e:expr) => { + $e + }; +} +#[cfg(all(feature = "derive", not(feature = "full")))] +macro_rules! full { + ($e:expr) => { + unreachable!() + }; +} +macro_rules! skip { + ($($tt:tt)*) => {}; +} +/// Syntax tree traversal to walk a shared borrow of a syntax tree. +/// +/// See the [module documentation] for details. +/// +/// [module documentation]: self +pub trait Visit<'ast> { + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_abi(&mut self, i: &'ast crate::Abi) { + visit_abi(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_angle_bracketed_generic_arguments( + &mut self, + i: &'ast crate::AngleBracketedGenericArguments, + ) { + visit_angle_bracketed_generic_arguments(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_arm(&mut self, i: &'ast crate::Arm) { + visit_arm(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_assoc_const(&mut self, i: &'ast crate::AssocConst) { + visit_assoc_const(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_assoc_type(&mut self, i: &'ast crate::AssocType) { + visit_assoc_type(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_attr_style(&mut self, i: &'ast crate::AttrStyle) { + visit_attr_style(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_attribute(&mut self, i: &'ast crate::Attribute) { + visit_attribute(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_bare_fn_arg(&mut self, i: &'ast crate::BareFnArg) { + visit_bare_fn_arg(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_bare_variadic(&mut self, i: &'ast crate::BareVariadic) { + visit_bare_variadic(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_bin_op(&mut self, i: &'ast crate::BinOp) { + visit_bin_op(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_block(&mut self, i: &'ast crate::Block) { + visit_block(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_bound_lifetimes(&mut self, i: &'ast crate::BoundLifetimes) { + visit_bound_lifetimes(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_captured_param(&mut self, i: &'ast crate::CapturedParam) { + visit_captured_param(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_const_param(&mut self, i: &'ast crate::ConstParam) { + visit_const_param(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_constraint(&mut self, i: &'ast crate::Constraint) { + visit_constraint(self, i); + } + #[cfg(feature = "derive")] + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + fn visit_data(&mut self, i: &'ast crate::Data) { + visit_data(self, i); + } + #[cfg(feature = "derive")] + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + fn visit_data_enum(&mut self, i: &'ast crate::DataEnum) { + visit_data_enum(self, i); + } + #[cfg(feature = "derive")] + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + fn visit_data_struct(&mut self, i: &'ast crate::DataStruct) { + visit_data_struct(self, i); + } + #[cfg(feature = "derive")] + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + fn visit_data_union(&mut self, i: &'ast crate::DataUnion) { + visit_data_union(self, i); + } + #[cfg(feature = "derive")] + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + fn visit_derive_input(&mut self, i: &'ast crate::DeriveInput) { + visit_derive_input(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr(&mut self, i: &'ast crate::Expr) { + visit_expr(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_array(&mut self, i: &'ast crate::ExprArray) { + visit_expr_array(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_assign(&mut self, i: &'ast crate::ExprAssign) { + visit_expr_assign(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_async(&mut self, i: &'ast crate::ExprAsync) { + visit_expr_async(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_await(&mut self, i: &'ast crate::ExprAwait) { + visit_expr_await(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_binary(&mut self, i: &'ast crate::ExprBinary) { + visit_expr_binary(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_block(&mut self, i: &'ast crate::ExprBlock) { + visit_expr_block(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_break(&mut self, i: &'ast crate::ExprBreak) { + visit_expr_break(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_call(&mut self, i: &'ast crate::ExprCall) { + visit_expr_call(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_cast(&mut self, i: &'ast crate::ExprCast) { + visit_expr_cast(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_closure(&mut self, i: &'ast crate::ExprClosure) { + visit_expr_closure(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_const(&mut self, i: &'ast crate::ExprConst) { + visit_expr_const(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_continue(&mut self, i: &'ast crate::ExprContinue) { + visit_expr_continue(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_field(&mut self, i: &'ast crate::ExprField) { + visit_expr_field(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_for_loop(&mut self, i: &'ast crate::ExprForLoop) { + visit_expr_for_loop(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_group(&mut self, i: &'ast crate::ExprGroup) { + visit_expr_group(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_if(&mut self, i: &'ast crate::ExprIf) { + visit_expr_if(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_index(&mut self, i: &'ast crate::ExprIndex) { + visit_expr_index(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_infer(&mut self, i: &'ast crate::ExprInfer) { + visit_expr_infer(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_let(&mut self, i: &'ast crate::ExprLet) { + visit_expr_let(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_lit(&mut self, i: &'ast crate::ExprLit) { + visit_expr_lit(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_loop(&mut self, i: &'ast crate::ExprLoop) { + visit_expr_loop(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_macro(&mut self, i: &'ast crate::ExprMacro) { + visit_expr_macro(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_match(&mut self, i: &'ast crate::ExprMatch) { + visit_expr_match(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_method_call(&mut self, i: &'ast crate::ExprMethodCall) { + visit_expr_method_call(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_paren(&mut self, i: &'ast crate::ExprParen) { + visit_expr_paren(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_path(&mut self, i: &'ast crate::ExprPath) { + visit_expr_path(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_range(&mut self, i: &'ast crate::ExprRange) { + visit_expr_range(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_raw_addr(&mut self, i: &'ast crate::ExprRawAddr) { + visit_expr_raw_addr(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_reference(&mut self, i: &'ast crate::ExprReference) { + visit_expr_reference(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_repeat(&mut self, i: &'ast crate::ExprRepeat) { + visit_expr_repeat(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_return(&mut self, i: &'ast crate::ExprReturn) { + visit_expr_return(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_struct(&mut self, i: &'ast crate::ExprStruct) { + visit_expr_struct(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_try(&mut self, i: &'ast crate::ExprTry) { + visit_expr_try(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_try_block(&mut self, i: &'ast crate::ExprTryBlock) { + visit_expr_try_block(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_tuple(&mut self, i: &'ast crate::ExprTuple) { + visit_expr_tuple(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_unary(&mut self, i: &'ast crate::ExprUnary) { + visit_expr_unary(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_unsafe(&mut self, i: &'ast crate::ExprUnsafe) { + visit_expr_unsafe(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_while(&mut self, i: &'ast crate::ExprWhile) { + visit_expr_while(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_yield(&mut self, i: &'ast crate::ExprYield) { + visit_expr_yield(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_field(&mut self, i: &'ast crate::Field) { + visit_field(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_field_mutability(&mut self, i: &'ast crate::FieldMutability) { + visit_field_mutability(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_field_pat(&mut self, i: &'ast crate::FieldPat) { + visit_field_pat(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_field_value(&mut self, i: &'ast crate::FieldValue) { + visit_field_value(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_fields(&mut self, i: &'ast crate::Fields) { + visit_fields(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_fields_named(&mut self, i: &'ast crate::FieldsNamed) { + visit_fields_named(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_fields_unnamed(&mut self, i: &'ast crate::FieldsUnnamed) { + visit_fields_unnamed(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_file(&mut self, i: &'ast crate::File) { + visit_file(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_fn_arg(&mut self, i: &'ast crate::FnArg) { + visit_fn_arg(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_foreign_item(&mut self, i: &'ast crate::ForeignItem) { + visit_foreign_item(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_foreign_item_fn(&mut self, i: &'ast crate::ForeignItemFn) { + visit_foreign_item_fn(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_foreign_item_macro(&mut self, i: &'ast crate::ForeignItemMacro) { + visit_foreign_item_macro(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_foreign_item_static(&mut self, i: &'ast crate::ForeignItemStatic) { + visit_foreign_item_static(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_foreign_item_type(&mut self, i: &'ast crate::ForeignItemType) { + visit_foreign_item_type(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_generic_argument(&mut self, i: &'ast crate::GenericArgument) { + visit_generic_argument(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_generic_param(&mut self, i: &'ast crate::GenericParam) { + visit_generic_param(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_generics(&mut self, i: &'ast crate::Generics) { + visit_generics(self, i); + } + fn visit_ident(&mut self, i: &'ast proc_macro2::Ident) { + visit_ident(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_impl_item(&mut self, i: &'ast crate::ImplItem) { + visit_impl_item(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_impl_item_const(&mut self, i: &'ast crate::ImplItemConst) { + visit_impl_item_const(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_impl_item_fn(&mut self, i: &'ast crate::ImplItemFn) { + visit_impl_item_fn(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_impl_item_macro(&mut self, i: &'ast crate::ImplItemMacro) { + visit_impl_item_macro(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_impl_item_type(&mut self, i: &'ast crate::ImplItemType) { + visit_impl_item_type(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_impl_restriction(&mut self, i: &'ast crate::ImplRestriction) { + visit_impl_restriction(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_index(&mut self, i: &'ast crate::Index) { + visit_index(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item(&mut self, i: &'ast crate::Item) { + visit_item(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_const(&mut self, i: &'ast crate::ItemConst) { + visit_item_const(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_enum(&mut self, i: &'ast crate::ItemEnum) { + visit_item_enum(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_extern_crate(&mut self, i: &'ast crate::ItemExternCrate) { + visit_item_extern_crate(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_fn(&mut self, i: &'ast crate::ItemFn) { + visit_item_fn(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_foreign_mod(&mut self, i: &'ast crate::ItemForeignMod) { + visit_item_foreign_mod(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_impl(&mut self, i: &'ast crate::ItemImpl) { + visit_item_impl(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_macro(&mut self, i: &'ast crate::ItemMacro) { + visit_item_macro(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_mod(&mut self, i: &'ast crate::ItemMod) { + visit_item_mod(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_static(&mut self, i: &'ast crate::ItemStatic) { + visit_item_static(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_struct(&mut self, i: &'ast crate::ItemStruct) { + visit_item_struct(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_trait(&mut self, i: &'ast crate::ItemTrait) { + visit_item_trait(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_trait_alias(&mut self, i: &'ast crate::ItemTraitAlias) { + visit_item_trait_alias(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_type(&mut self, i: &'ast crate::ItemType) { + visit_item_type(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_union(&mut self, i: &'ast crate::ItemUnion) { + visit_item_union(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_use(&mut self, i: &'ast crate::ItemUse) { + visit_item_use(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_label(&mut self, i: &'ast crate::Label) { + visit_label(self, i); + } + fn visit_lifetime(&mut self, i: &'ast crate::Lifetime) { + visit_lifetime(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_lifetime_param(&mut self, i: &'ast crate::LifetimeParam) { + visit_lifetime_param(self, i); + } + fn visit_lit(&mut self, i: &'ast crate::Lit) { + visit_lit(self, i); + } + fn visit_lit_bool(&mut self, i: &'ast crate::LitBool) { + visit_lit_bool(self, i); + } + fn visit_lit_byte(&mut self, i: &'ast crate::LitByte) { + visit_lit_byte(self, i); + } + fn visit_lit_byte_str(&mut self, i: &'ast crate::LitByteStr) { + visit_lit_byte_str(self, i); + } + fn visit_lit_cstr(&mut self, i: &'ast crate::LitCStr) { + visit_lit_cstr(self, i); + } + fn visit_lit_char(&mut self, i: &'ast crate::LitChar) { + visit_lit_char(self, i); + } + fn visit_lit_float(&mut self, i: &'ast crate::LitFloat) { + visit_lit_float(self, i); + } + fn visit_lit_int(&mut self, i: &'ast crate::LitInt) { + visit_lit_int(self, i); + } + fn visit_lit_str(&mut self, i: &'ast crate::LitStr) { + visit_lit_str(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_local(&mut self, i: &'ast crate::Local) { + visit_local(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_local_init(&mut self, i: &'ast crate::LocalInit) { + visit_local_init(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_macro(&mut self, i: &'ast crate::Macro) { + visit_macro(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_macro_delimiter(&mut self, i: &'ast crate::MacroDelimiter) { + visit_macro_delimiter(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_member(&mut self, i: &'ast crate::Member) { + visit_member(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_meta(&mut self, i: &'ast crate::Meta) { + visit_meta(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_meta_list(&mut self, i: &'ast crate::MetaList) { + visit_meta_list(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_meta_name_value(&mut self, i: &'ast crate::MetaNameValue) { + visit_meta_name_value(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_parenthesized_generic_arguments( + &mut self, + i: &'ast crate::ParenthesizedGenericArguments, + ) { + visit_parenthesized_generic_arguments(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat(&mut self, i: &'ast crate::Pat) { + visit_pat(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_ident(&mut self, i: &'ast crate::PatIdent) { + visit_pat_ident(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_or(&mut self, i: &'ast crate::PatOr) { + visit_pat_or(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_paren(&mut self, i: &'ast crate::PatParen) { + visit_pat_paren(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_reference(&mut self, i: &'ast crate::PatReference) { + visit_pat_reference(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_rest(&mut self, i: &'ast crate::PatRest) { + visit_pat_rest(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_slice(&mut self, i: &'ast crate::PatSlice) { + visit_pat_slice(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_struct(&mut self, i: &'ast crate::PatStruct) { + visit_pat_struct(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_tuple(&mut self, i: &'ast crate::PatTuple) { + visit_pat_tuple(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_tuple_struct(&mut self, i: &'ast crate::PatTupleStruct) { + visit_pat_tuple_struct(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_type(&mut self, i: &'ast crate::PatType) { + visit_pat_type(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_wild(&mut self, i: &'ast crate::PatWild) { + visit_pat_wild(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_path(&mut self, i: &'ast crate::Path) { + visit_path(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_path_arguments(&mut self, i: &'ast crate::PathArguments) { + visit_path_arguments(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_path_segment(&mut self, i: &'ast crate::PathSegment) { + visit_path_segment(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pointer_mutability(&mut self, i: &'ast crate::PointerMutability) { + visit_pointer_mutability(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_precise_capture(&mut self, i: &'ast crate::PreciseCapture) { + visit_precise_capture(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_predicate_lifetime(&mut self, i: &'ast crate::PredicateLifetime) { + visit_predicate_lifetime(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_predicate_type(&mut self, i: &'ast crate::PredicateType) { + visit_predicate_type(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_qself(&mut self, i: &'ast crate::QSelf) { + visit_qself(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_range_limits(&mut self, i: &'ast crate::RangeLimits) { + visit_range_limits(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_receiver(&mut self, i: &'ast crate::Receiver) { + visit_receiver(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_return_type(&mut self, i: &'ast crate::ReturnType) { + visit_return_type(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_signature(&mut self, i: &'ast crate::Signature) { + visit_signature(self, i); + } + fn visit_span(&mut self, i: &proc_macro2::Span) {} + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_static_mutability(&mut self, i: &'ast crate::StaticMutability) { + visit_static_mutability(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_stmt(&mut self, i: &'ast crate::Stmt) { + visit_stmt(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_stmt_macro(&mut self, i: &'ast crate::StmtMacro) { + visit_stmt_macro(self, i); + } + fn visit_token_stream(&mut self, i: &'ast proc_macro2::TokenStream) {} + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_trait_bound(&mut self, i: &'ast crate::TraitBound) { + visit_trait_bound(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_trait_bound_modifier(&mut self, i: &'ast crate::TraitBoundModifier) { + visit_trait_bound_modifier(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_trait_item(&mut self, i: &'ast crate::TraitItem) { + visit_trait_item(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_trait_item_const(&mut self, i: &'ast crate::TraitItemConst) { + visit_trait_item_const(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_trait_item_fn(&mut self, i: &'ast crate::TraitItemFn) { + visit_trait_item_fn(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_trait_item_macro(&mut self, i: &'ast crate::TraitItemMacro) { + visit_trait_item_macro(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_trait_item_type(&mut self, i: &'ast crate::TraitItemType) { + visit_trait_item_type(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type(&mut self, i: &'ast crate::Type) { + visit_type(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_array(&mut self, i: &'ast crate::TypeArray) { + visit_type_array(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_bare_fn(&mut self, i: &'ast crate::TypeBareFn) { + visit_type_bare_fn(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_group(&mut self, i: &'ast crate::TypeGroup) { + visit_type_group(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_impl_trait(&mut self, i: &'ast crate::TypeImplTrait) { + visit_type_impl_trait(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_infer(&mut self, i: &'ast crate::TypeInfer) { + visit_type_infer(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_macro(&mut self, i: &'ast crate::TypeMacro) { + visit_type_macro(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_never(&mut self, i: &'ast crate::TypeNever) { + visit_type_never(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_param(&mut self, i: &'ast crate::TypeParam) { + visit_type_param(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_param_bound(&mut self, i: &'ast crate::TypeParamBound) { + visit_type_param_bound(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_paren(&mut self, i: &'ast crate::TypeParen) { + visit_type_paren(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_path(&mut self, i: &'ast crate::TypePath) { + visit_type_path(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_ptr(&mut self, i: &'ast crate::TypePtr) { + visit_type_ptr(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_reference(&mut self, i: &'ast crate::TypeReference) { + visit_type_reference(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_slice(&mut self, i: &'ast crate::TypeSlice) { + visit_type_slice(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_trait_object(&mut self, i: &'ast crate::TypeTraitObject) { + visit_type_trait_object(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_tuple(&mut self, i: &'ast crate::TypeTuple) { + visit_type_tuple(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_un_op(&mut self, i: &'ast crate::UnOp) { + visit_un_op(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_use_glob(&mut self, i: &'ast crate::UseGlob) { + visit_use_glob(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_use_group(&mut self, i: &'ast crate::UseGroup) { + visit_use_group(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_use_name(&mut self, i: &'ast crate::UseName) { + visit_use_name(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_use_path(&mut self, i: &'ast crate::UsePath) { + visit_use_path(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_use_rename(&mut self, i: &'ast crate::UseRename) { + visit_use_rename(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_use_tree(&mut self, i: &'ast crate::UseTree) { + visit_use_tree(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_variadic(&mut self, i: &'ast crate::Variadic) { + visit_variadic(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_variant(&mut self, i: &'ast crate::Variant) { + visit_variant(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_vis_restricted(&mut self, i: &'ast crate::VisRestricted) { + visit_vis_restricted(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_visibility(&mut self, i: &'ast crate::Visibility) { + visit_visibility(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_where_clause(&mut self, i: &'ast crate::WhereClause) { + visit_where_clause(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_where_predicate(&mut self, i: &'ast crate::WherePredicate) { + visit_where_predicate(self, i); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_abi<'ast, V>(v: &mut V, node: &'ast crate::Abi) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.extern_token); + if let Some(it) = &node.name { + v.visit_lit_str(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_angle_bracketed_generic_arguments<'ast, V>( + v: &mut V, + node: &'ast crate::AngleBracketedGenericArguments, +) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.colon2_token); + skip!(node.lt_token); + for el in Punctuated::pairs(&node.args) { + let it = el.value(); + v.visit_generic_argument(it); + } + skip!(node.gt_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_arm<'ast, V>(v: &mut V, node: &'ast crate::Arm) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_pat(&node.pat); + if let Some(it) = &node.guard { + skip!((it).0); + v.visit_expr(&*(it).1); + } + skip!(node.fat_arrow_token); + v.visit_expr(&*node.body); + skip!(node.comma); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_assoc_const<'ast, V>(v: &mut V, node: &'ast crate::AssocConst) +where + V: Visit<'ast> + ?Sized, +{ + v.visit_ident(&node.ident); + if let Some(it) = &node.generics { + v.visit_angle_bracketed_generic_arguments(it); + } + skip!(node.eq_token); + v.visit_expr(&node.value); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_assoc_type<'ast, V>(v: &mut V, node: &'ast crate::AssocType) +where + V: Visit<'ast> + ?Sized, +{ + v.visit_ident(&node.ident); + if let Some(it) = &node.generics { + v.visit_angle_bracketed_generic_arguments(it); + } + skip!(node.eq_token); + v.visit_type(&node.ty); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_attr_style<'ast, V>(v: &mut V, node: &'ast crate::AttrStyle) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::AttrStyle::Outer => {} + crate::AttrStyle::Inner(_binding_0) => { + skip!(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_attribute<'ast, V>(v: &mut V, node: &'ast crate::Attribute) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.pound_token); + v.visit_attr_style(&node.style); + skip!(node.bracket_token); + v.visit_meta(&node.meta); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_bare_fn_arg<'ast, V>(v: &mut V, node: &'ast crate::BareFnArg) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + if let Some(it) = &node.name { + v.visit_ident(&(it).0); + skip!((it).1); + } + v.visit_type(&node.ty); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_bare_variadic<'ast, V>(v: &mut V, node: &'ast crate::BareVariadic) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + if let Some(it) = &node.name { + v.visit_ident(&(it).0); + skip!((it).1); + } + skip!(node.dots); + skip!(node.comma); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_bin_op<'ast, V>(v: &mut V, node: &'ast crate::BinOp) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::BinOp::Add(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Sub(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Mul(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Div(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Rem(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::And(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Or(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::BitXor(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::BitAnd(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::BitOr(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Shl(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Shr(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Eq(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Lt(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Le(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Ne(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Ge(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Gt(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::AddAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::SubAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::MulAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::DivAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::RemAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::BitXorAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::BitAndAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::BitOrAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::ShlAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::ShrAssign(_binding_0) => { + skip!(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_block<'ast, V>(v: &mut V, node: &'ast crate::Block) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.brace_token); + for it in &node.stmts { + v.visit_stmt(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_bound_lifetimes<'ast, V>(v: &mut V, node: &'ast crate::BoundLifetimes) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.for_token); + skip!(node.lt_token); + for el in Punctuated::pairs(&node.lifetimes) { + let it = el.value(); + v.visit_generic_param(it); + } + skip!(node.gt_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_captured_param<'ast, V>(v: &mut V, node: &'ast crate::CapturedParam) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::CapturedParam::Lifetime(_binding_0) => { + v.visit_lifetime(_binding_0); + } + crate::CapturedParam::Ident(_binding_0) => { + v.visit_ident(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_const_param<'ast, V>(v: &mut V, node: &'ast crate::ConstParam) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.const_token); + v.visit_ident(&node.ident); + skip!(node.colon_token); + v.visit_type(&node.ty); + skip!(node.eq_token); + if let Some(it) = &node.default { + v.visit_expr(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_constraint<'ast, V>(v: &mut V, node: &'ast crate::Constraint) +where + V: Visit<'ast> + ?Sized, +{ + v.visit_ident(&node.ident); + if let Some(it) = &node.generics { + v.visit_angle_bracketed_generic_arguments(it); + } + skip!(node.colon_token); + for el in Punctuated::pairs(&node.bounds) { + let it = el.value(); + v.visit_type_param_bound(it); + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub fn visit_data<'ast, V>(v: &mut V, node: &'ast crate::Data) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::Data::Struct(_binding_0) => { + v.visit_data_struct(_binding_0); + } + crate::Data::Enum(_binding_0) => { + v.visit_data_enum(_binding_0); + } + crate::Data::Union(_binding_0) => { + v.visit_data_union(_binding_0); + } + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub fn visit_data_enum<'ast, V>(v: &mut V, node: &'ast crate::DataEnum) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.enum_token); + skip!(node.brace_token); + for el in Punctuated::pairs(&node.variants) { + let it = el.value(); + v.visit_variant(it); + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub fn visit_data_struct<'ast, V>(v: &mut V, node: &'ast crate::DataStruct) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.struct_token); + v.visit_fields(&node.fields); + skip!(node.semi_token); +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub fn visit_data_union<'ast, V>(v: &mut V, node: &'ast crate::DataUnion) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.union_token); + v.visit_fields_named(&node.fields); +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub fn visit_derive_input<'ast, V>(v: &mut V, node: &'ast crate::DeriveInput) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + v.visit_ident(&node.ident); + v.visit_generics(&node.generics); + v.visit_data(&node.data); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr<'ast, V>(v: &mut V, node: &'ast crate::Expr) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::Expr::Array(_binding_0) => { + full!(v.visit_expr_array(_binding_0)); + } + crate::Expr::Assign(_binding_0) => { + full!(v.visit_expr_assign(_binding_0)); + } + crate::Expr::Async(_binding_0) => { + full!(v.visit_expr_async(_binding_0)); + } + crate::Expr::Await(_binding_0) => { + full!(v.visit_expr_await(_binding_0)); + } + crate::Expr::Binary(_binding_0) => { + v.visit_expr_binary(_binding_0); + } + crate::Expr::Block(_binding_0) => { + full!(v.visit_expr_block(_binding_0)); + } + crate::Expr::Break(_binding_0) => { + full!(v.visit_expr_break(_binding_0)); + } + crate::Expr::Call(_binding_0) => { + v.visit_expr_call(_binding_0); + } + crate::Expr::Cast(_binding_0) => { + v.visit_expr_cast(_binding_0); + } + crate::Expr::Closure(_binding_0) => { + full!(v.visit_expr_closure(_binding_0)); + } + crate::Expr::Const(_binding_0) => { + full!(v.visit_expr_const(_binding_0)); + } + crate::Expr::Continue(_binding_0) => { + full!(v.visit_expr_continue(_binding_0)); + } + crate::Expr::Field(_binding_0) => { + v.visit_expr_field(_binding_0); + } + crate::Expr::ForLoop(_binding_0) => { + full!(v.visit_expr_for_loop(_binding_0)); + } + crate::Expr::Group(_binding_0) => { + v.visit_expr_group(_binding_0); + } + crate::Expr::If(_binding_0) => { + full!(v.visit_expr_if(_binding_0)); + } + crate::Expr::Index(_binding_0) => { + v.visit_expr_index(_binding_0); + } + crate::Expr::Infer(_binding_0) => { + full!(v.visit_expr_infer(_binding_0)); + } + crate::Expr::Let(_binding_0) => { + full!(v.visit_expr_let(_binding_0)); + } + crate::Expr::Lit(_binding_0) => { + v.visit_expr_lit(_binding_0); + } + crate::Expr::Loop(_binding_0) => { + full!(v.visit_expr_loop(_binding_0)); + } + crate::Expr::Macro(_binding_0) => { + v.visit_expr_macro(_binding_0); + } + crate::Expr::Match(_binding_0) => { + full!(v.visit_expr_match(_binding_0)); + } + crate::Expr::MethodCall(_binding_0) => { + v.visit_expr_method_call(_binding_0); + } + crate::Expr::Paren(_binding_0) => { + v.visit_expr_paren(_binding_0); + } + crate::Expr::Path(_binding_0) => { + v.visit_expr_path(_binding_0); + } + crate::Expr::Range(_binding_0) => { + full!(v.visit_expr_range(_binding_0)); + } + crate::Expr::RawAddr(_binding_0) => { + full!(v.visit_expr_raw_addr(_binding_0)); + } + crate::Expr::Reference(_binding_0) => { + v.visit_expr_reference(_binding_0); + } + crate::Expr::Repeat(_binding_0) => { + full!(v.visit_expr_repeat(_binding_0)); + } + crate::Expr::Return(_binding_0) => { + full!(v.visit_expr_return(_binding_0)); + } + crate::Expr::Struct(_binding_0) => { + v.visit_expr_struct(_binding_0); + } + crate::Expr::Try(_binding_0) => { + full!(v.visit_expr_try(_binding_0)); + } + crate::Expr::TryBlock(_binding_0) => { + full!(v.visit_expr_try_block(_binding_0)); + } + crate::Expr::Tuple(_binding_0) => { + v.visit_expr_tuple(_binding_0); + } + crate::Expr::Unary(_binding_0) => { + v.visit_expr_unary(_binding_0); + } + crate::Expr::Unsafe(_binding_0) => { + full!(v.visit_expr_unsafe(_binding_0)); + } + crate::Expr::Verbatim(_binding_0) => { + v.visit_token_stream(_binding_0); + } + crate::Expr::While(_binding_0) => { + full!(v.visit_expr_while(_binding_0)); + } + crate::Expr::Yield(_binding_0) => { + full!(v.visit_expr_yield(_binding_0)); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_array<'ast, V>(v: &mut V, node: &'ast crate::ExprArray) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.bracket_token); + for el in Punctuated::pairs(&node.elems) { + let it = el.value(); + v.visit_expr(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_assign<'ast, V>(v: &mut V, node: &'ast crate::ExprAssign) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_expr(&*node.left); + skip!(node.eq_token); + v.visit_expr(&*node.right); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_async<'ast, V>(v: &mut V, node: &'ast crate::ExprAsync) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.async_token); + skip!(node.capture); + v.visit_block(&node.block); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_await<'ast, V>(v: &mut V, node: &'ast crate::ExprAwait) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_expr(&*node.base); + skip!(node.dot_token); + skip!(node.await_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_binary<'ast, V>(v: &mut V, node: &'ast crate::ExprBinary) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_expr(&*node.left); + v.visit_bin_op(&node.op); + v.visit_expr(&*node.right); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_block<'ast, V>(v: &mut V, node: &'ast crate::ExprBlock) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + if let Some(it) = &node.label { + v.visit_label(it); + } + v.visit_block(&node.block); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_break<'ast, V>(v: &mut V, node: &'ast crate::ExprBreak) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.break_token); + if let Some(it) = &node.label { + v.visit_lifetime(it); + } + if let Some(it) = &node.expr { + v.visit_expr(&**it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_call<'ast, V>(v: &mut V, node: &'ast crate::ExprCall) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_expr(&*node.func); + skip!(node.paren_token); + for el in Punctuated::pairs(&node.args) { + let it = el.value(); + v.visit_expr(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_cast<'ast, V>(v: &mut V, node: &'ast crate::ExprCast) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_expr(&*node.expr); + skip!(node.as_token); + v.visit_type(&*node.ty); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_closure<'ast, V>(v: &mut V, node: &'ast crate::ExprClosure) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + if let Some(it) = &node.lifetimes { + v.visit_bound_lifetimes(it); + } + skip!(node.constness); + skip!(node.movability); + skip!(node.asyncness); + skip!(node.capture); + skip!(node.or1_token); + for el in Punctuated::pairs(&node.inputs) { + let it = el.value(); + v.visit_pat(it); + } + skip!(node.or2_token); + v.visit_return_type(&node.output); + v.visit_expr(&*node.body); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_const<'ast, V>(v: &mut V, node: &'ast crate::ExprConst) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.const_token); + v.visit_block(&node.block); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_continue<'ast, V>(v: &mut V, node: &'ast crate::ExprContinue) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.continue_token); + if let Some(it) = &node.label { + v.visit_lifetime(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_field<'ast, V>(v: &mut V, node: &'ast crate::ExprField) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_expr(&*node.base); + skip!(node.dot_token); + v.visit_member(&node.member); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_for_loop<'ast, V>(v: &mut V, node: &'ast crate::ExprForLoop) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + if let Some(it) = &node.label { + v.visit_label(it); + } + skip!(node.for_token); + v.visit_pat(&*node.pat); + skip!(node.in_token); + v.visit_expr(&*node.expr); + v.visit_block(&node.body); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_group<'ast, V>(v: &mut V, node: &'ast crate::ExprGroup) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.group_token); + v.visit_expr(&*node.expr); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_if<'ast, V>(v: &mut V, node: &'ast crate::ExprIf) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.if_token); + v.visit_expr(&*node.cond); + v.visit_block(&node.then_branch); + if let Some(it) = &node.else_branch { + skip!((it).0); + v.visit_expr(&*(it).1); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_index<'ast, V>(v: &mut V, node: &'ast crate::ExprIndex) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_expr(&*node.expr); + skip!(node.bracket_token); + v.visit_expr(&*node.index); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_infer<'ast, V>(v: &mut V, node: &'ast crate::ExprInfer) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.underscore_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_let<'ast, V>(v: &mut V, node: &'ast crate::ExprLet) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.let_token); + v.visit_pat(&*node.pat); + skip!(node.eq_token); + v.visit_expr(&*node.expr); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_lit<'ast, V>(v: &mut V, node: &'ast crate::ExprLit) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_lit(&node.lit); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_loop<'ast, V>(v: &mut V, node: &'ast crate::ExprLoop) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + if let Some(it) = &node.label { + v.visit_label(it); + } + skip!(node.loop_token); + v.visit_block(&node.body); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_macro<'ast, V>(v: &mut V, node: &'ast crate::ExprMacro) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_macro(&node.mac); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_match<'ast, V>(v: &mut V, node: &'ast crate::ExprMatch) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.match_token); + v.visit_expr(&*node.expr); + skip!(node.brace_token); + for it in &node.arms { + v.visit_arm(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_method_call<'ast, V>(v: &mut V, node: &'ast crate::ExprMethodCall) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_expr(&*node.receiver); + skip!(node.dot_token); + v.visit_ident(&node.method); + if let Some(it) = &node.turbofish { + v.visit_angle_bracketed_generic_arguments(it); + } + skip!(node.paren_token); + for el in Punctuated::pairs(&node.args) { + let it = el.value(); + v.visit_expr(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_paren<'ast, V>(v: &mut V, node: &'ast crate::ExprParen) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.paren_token); + v.visit_expr(&*node.expr); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_path<'ast, V>(v: &mut V, node: &'ast crate::ExprPath) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + if let Some(it) = &node.qself { + v.visit_qself(it); + } + v.visit_path(&node.path); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_range<'ast, V>(v: &mut V, node: &'ast crate::ExprRange) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + if let Some(it) = &node.start { + v.visit_expr(&**it); + } + v.visit_range_limits(&node.limits); + if let Some(it) = &node.end { + v.visit_expr(&**it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_raw_addr<'ast, V>(v: &mut V, node: &'ast crate::ExprRawAddr) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.and_token); + skip!(node.raw); + v.visit_pointer_mutability(&node.mutability); + v.visit_expr(&*node.expr); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_reference<'ast, V>(v: &mut V, node: &'ast crate::ExprReference) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.and_token); + skip!(node.mutability); + v.visit_expr(&*node.expr); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_repeat<'ast, V>(v: &mut V, node: &'ast crate::ExprRepeat) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.bracket_token); + v.visit_expr(&*node.expr); + skip!(node.semi_token); + v.visit_expr(&*node.len); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_return<'ast, V>(v: &mut V, node: &'ast crate::ExprReturn) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.return_token); + if let Some(it) = &node.expr { + v.visit_expr(&**it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_struct<'ast, V>(v: &mut V, node: &'ast crate::ExprStruct) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + if let Some(it) = &node.qself { + v.visit_qself(it); + } + v.visit_path(&node.path); + skip!(node.brace_token); + for el in Punctuated::pairs(&node.fields) { + let it = el.value(); + v.visit_field_value(it); + } + skip!(node.dot2_token); + if let Some(it) = &node.rest { + v.visit_expr(&**it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_try<'ast, V>(v: &mut V, node: &'ast crate::ExprTry) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_expr(&*node.expr); + skip!(node.question_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_try_block<'ast, V>(v: &mut V, node: &'ast crate::ExprTryBlock) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.try_token); + v.visit_block(&node.block); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_tuple<'ast, V>(v: &mut V, node: &'ast crate::ExprTuple) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.paren_token); + for el in Punctuated::pairs(&node.elems) { + let it = el.value(); + v.visit_expr(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_unary<'ast, V>(v: &mut V, node: &'ast crate::ExprUnary) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_un_op(&node.op); + v.visit_expr(&*node.expr); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_unsafe<'ast, V>(v: &mut V, node: &'ast crate::ExprUnsafe) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.unsafe_token); + v.visit_block(&node.block); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_while<'ast, V>(v: &mut V, node: &'ast crate::ExprWhile) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + if let Some(it) = &node.label { + v.visit_label(it); + } + skip!(node.while_token); + v.visit_expr(&*node.cond); + v.visit_block(&node.body); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_yield<'ast, V>(v: &mut V, node: &'ast crate::ExprYield) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.yield_token); + if let Some(it) = &node.expr { + v.visit_expr(&**it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_field<'ast, V>(v: &mut V, node: &'ast crate::Field) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + v.visit_field_mutability(&node.mutability); + if let Some(it) = &node.ident { + v.visit_ident(it); + } + skip!(node.colon_token); + v.visit_type(&node.ty); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_field_mutability<'ast, V>(v: &mut V, node: &'ast crate::FieldMutability) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::FieldMutability::None => {} + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_field_pat<'ast, V>(v: &mut V, node: &'ast crate::FieldPat) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_member(&node.member); + skip!(node.colon_token); + v.visit_pat(&*node.pat); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_field_value<'ast, V>(v: &mut V, node: &'ast crate::FieldValue) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_member(&node.member); + skip!(node.colon_token); + v.visit_expr(&node.expr); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_fields<'ast, V>(v: &mut V, node: &'ast crate::Fields) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::Fields::Named(_binding_0) => { + v.visit_fields_named(_binding_0); + } + crate::Fields::Unnamed(_binding_0) => { + v.visit_fields_unnamed(_binding_0); + } + crate::Fields::Unit => {} + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_fields_named<'ast, V>(v: &mut V, node: &'ast crate::FieldsNamed) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.brace_token); + for el in Punctuated::pairs(&node.named) { + let it = el.value(); + v.visit_field(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_fields_unnamed<'ast, V>(v: &mut V, node: &'ast crate::FieldsUnnamed) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.paren_token); + for el in Punctuated::pairs(&node.unnamed) { + let it = el.value(); + v.visit_field(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_file<'ast, V>(v: &mut V, node: &'ast crate::File) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.shebang); + for it in &node.attrs { + v.visit_attribute(it); + } + for it in &node.items { + v.visit_item(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_fn_arg<'ast, V>(v: &mut V, node: &'ast crate::FnArg) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::FnArg::Receiver(_binding_0) => { + v.visit_receiver(_binding_0); + } + crate::FnArg::Typed(_binding_0) => { + v.visit_pat_type(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_foreign_item<'ast, V>(v: &mut V, node: &'ast crate::ForeignItem) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::ForeignItem::Fn(_binding_0) => { + v.visit_foreign_item_fn(_binding_0); + } + crate::ForeignItem::Static(_binding_0) => { + v.visit_foreign_item_static(_binding_0); + } + crate::ForeignItem::Type(_binding_0) => { + v.visit_foreign_item_type(_binding_0); + } + crate::ForeignItem::Macro(_binding_0) => { + v.visit_foreign_item_macro(_binding_0); + } + crate::ForeignItem::Verbatim(_binding_0) => { + v.visit_token_stream(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_foreign_item_fn<'ast, V>(v: &mut V, node: &'ast crate::ForeignItemFn) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + v.visit_signature(&node.sig); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_foreign_item_macro<'ast, V>(v: &mut V, node: &'ast crate::ForeignItemMacro) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_macro(&node.mac); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_foreign_item_static<'ast, V>( + v: &mut V, + node: &'ast crate::ForeignItemStatic, +) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.static_token); + v.visit_static_mutability(&node.mutability); + v.visit_ident(&node.ident); + skip!(node.colon_token); + v.visit_type(&*node.ty); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_foreign_item_type<'ast, V>(v: &mut V, node: &'ast crate::ForeignItemType) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.type_token); + v.visit_ident(&node.ident); + v.visit_generics(&node.generics); + skip!(node.semi_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_generic_argument<'ast, V>(v: &mut V, node: &'ast crate::GenericArgument) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::GenericArgument::Lifetime(_binding_0) => { + v.visit_lifetime(_binding_0); + } + crate::GenericArgument::Type(_binding_0) => { + v.visit_type(_binding_0); + } + crate::GenericArgument::Const(_binding_0) => { + v.visit_expr(_binding_0); + } + crate::GenericArgument::AssocType(_binding_0) => { + v.visit_assoc_type(_binding_0); + } + crate::GenericArgument::AssocConst(_binding_0) => { + v.visit_assoc_const(_binding_0); + } + crate::GenericArgument::Constraint(_binding_0) => { + v.visit_constraint(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_generic_param<'ast, V>(v: &mut V, node: &'ast crate::GenericParam) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::GenericParam::Lifetime(_binding_0) => { + v.visit_lifetime_param(_binding_0); + } + crate::GenericParam::Type(_binding_0) => { + v.visit_type_param(_binding_0); + } + crate::GenericParam::Const(_binding_0) => { + v.visit_const_param(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_generics<'ast, V>(v: &mut V, node: &'ast crate::Generics) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.lt_token); + for el in Punctuated::pairs(&node.params) { + let it = el.value(); + v.visit_generic_param(it); + } + skip!(node.gt_token); + if let Some(it) = &node.where_clause { + v.visit_where_clause(it); + } +} +pub fn visit_ident<'ast, V>(v: &mut V, node: &'ast proc_macro2::Ident) +where + V: Visit<'ast> + ?Sized, +{ + v.visit_span(&node.span()); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_impl_item<'ast, V>(v: &mut V, node: &'ast crate::ImplItem) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::ImplItem::Const(_binding_0) => { + v.visit_impl_item_const(_binding_0); + } + crate::ImplItem::Fn(_binding_0) => { + v.visit_impl_item_fn(_binding_0); + } + crate::ImplItem::Type(_binding_0) => { + v.visit_impl_item_type(_binding_0); + } + crate::ImplItem::Macro(_binding_0) => { + v.visit_impl_item_macro(_binding_0); + } + crate::ImplItem::Verbatim(_binding_0) => { + v.visit_token_stream(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_impl_item_const<'ast, V>(v: &mut V, node: &'ast crate::ImplItemConst) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.defaultness); + skip!(node.const_token); + v.visit_ident(&node.ident); + v.visit_generics(&node.generics); + skip!(node.colon_token); + v.visit_type(&node.ty); + skip!(node.eq_token); + v.visit_expr(&node.expr); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_impl_item_fn<'ast, V>(v: &mut V, node: &'ast crate::ImplItemFn) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.defaultness); + v.visit_signature(&node.sig); + v.visit_block(&node.block); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_impl_item_macro<'ast, V>(v: &mut V, node: &'ast crate::ImplItemMacro) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_macro(&node.mac); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_impl_item_type<'ast, V>(v: &mut V, node: &'ast crate::ImplItemType) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.defaultness); + skip!(node.type_token); + v.visit_ident(&node.ident); + v.visit_generics(&node.generics); + skip!(node.eq_token); + v.visit_type(&node.ty); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_impl_restriction<'ast, V>(v: &mut V, node: &'ast crate::ImplRestriction) +where + V: Visit<'ast> + ?Sized, +{ + match *node {} +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_index<'ast, V>(v: &mut V, node: &'ast crate::Index) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.index); + v.visit_span(&node.span); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item<'ast, V>(v: &mut V, node: &'ast crate::Item) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::Item::Const(_binding_0) => { + v.visit_item_const(_binding_0); + } + crate::Item::Enum(_binding_0) => { + v.visit_item_enum(_binding_0); + } + crate::Item::ExternCrate(_binding_0) => { + v.visit_item_extern_crate(_binding_0); + } + crate::Item::Fn(_binding_0) => { + v.visit_item_fn(_binding_0); + } + crate::Item::ForeignMod(_binding_0) => { + v.visit_item_foreign_mod(_binding_0); + } + crate::Item::Impl(_binding_0) => { + v.visit_item_impl(_binding_0); + } + crate::Item::Macro(_binding_0) => { + v.visit_item_macro(_binding_0); + } + crate::Item::Mod(_binding_0) => { + v.visit_item_mod(_binding_0); + } + crate::Item::Static(_binding_0) => { + v.visit_item_static(_binding_0); + } + crate::Item::Struct(_binding_0) => { + v.visit_item_struct(_binding_0); + } + crate::Item::Trait(_binding_0) => { + v.visit_item_trait(_binding_0); + } + crate::Item::TraitAlias(_binding_0) => { + v.visit_item_trait_alias(_binding_0); + } + crate::Item::Type(_binding_0) => { + v.visit_item_type(_binding_0); + } + crate::Item::Union(_binding_0) => { + v.visit_item_union(_binding_0); + } + crate::Item::Use(_binding_0) => { + v.visit_item_use(_binding_0); + } + crate::Item::Verbatim(_binding_0) => { + v.visit_token_stream(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_const<'ast, V>(v: &mut V, node: &'ast crate::ItemConst) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.const_token); + v.visit_ident(&node.ident); + v.visit_generics(&node.generics); + skip!(node.colon_token); + v.visit_type(&*node.ty); + skip!(node.eq_token); + v.visit_expr(&*node.expr); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_enum<'ast, V>(v: &mut V, node: &'ast crate::ItemEnum) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.enum_token); + v.visit_ident(&node.ident); + v.visit_generics(&node.generics); + skip!(node.brace_token); + for el in Punctuated::pairs(&node.variants) { + let it = el.value(); + v.visit_variant(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_extern_crate<'ast, V>(v: &mut V, node: &'ast crate::ItemExternCrate) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.extern_token); + skip!(node.crate_token); + v.visit_ident(&node.ident); + if let Some(it) = &node.rename { + skip!((it).0); + v.visit_ident(&(it).1); + } + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_fn<'ast, V>(v: &mut V, node: &'ast crate::ItemFn) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + v.visit_signature(&node.sig); + v.visit_block(&*node.block); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_foreign_mod<'ast, V>(v: &mut V, node: &'ast crate::ItemForeignMod) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.unsafety); + v.visit_abi(&node.abi); + skip!(node.brace_token); + for it in &node.items { + v.visit_foreign_item(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_impl<'ast, V>(v: &mut V, node: &'ast crate::ItemImpl) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.defaultness); + skip!(node.unsafety); + skip!(node.impl_token); + v.visit_generics(&node.generics); + if let Some(it) = &node.trait_ { + skip!((it).0); + v.visit_path(&(it).1); + skip!((it).2); + } + v.visit_type(&*node.self_ty); + skip!(node.brace_token); + for it in &node.items { + v.visit_impl_item(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_macro<'ast, V>(v: &mut V, node: &'ast crate::ItemMacro) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + if let Some(it) = &node.ident { + v.visit_ident(it); + } + v.visit_macro(&node.mac); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_mod<'ast, V>(v: &mut V, node: &'ast crate::ItemMod) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.unsafety); + skip!(node.mod_token); + v.visit_ident(&node.ident); + if let Some(it) = &node.content { + skip!((it).0); + for it in &(it).1 { + v.visit_item(it); + } + } + skip!(node.semi); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_static<'ast, V>(v: &mut V, node: &'ast crate::ItemStatic) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.static_token); + v.visit_static_mutability(&node.mutability); + v.visit_ident(&node.ident); + skip!(node.colon_token); + v.visit_type(&*node.ty); + skip!(node.eq_token); + v.visit_expr(&*node.expr); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_struct<'ast, V>(v: &mut V, node: &'ast crate::ItemStruct) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.struct_token); + v.visit_ident(&node.ident); + v.visit_generics(&node.generics); + v.visit_fields(&node.fields); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_trait<'ast, V>(v: &mut V, node: &'ast crate::ItemTrait) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.unsafety); + skip!(node.auto_token); + if let Some(it) = &node.restriction { + v.visit_impl_restriction(it); + } + skip!(node.trait_token); + v.visit_ident(&node.ident); + v.visit_generics(&node.generics); + skip!(node.colon_token); + for el in Punctuated::pairs(&node.supertraits) { + let it = el.value(); + v.visit_type_param_bound(it); + } + skip!(node.brace_token); + for it in &node.items { + v.visit_trait_item(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_trait_alias<'ast, V>(v: &mut V, node: &'ast crate::ItemTraitAlias) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.trait_token); + v.visit_ident(&node.ident); + v.visit_generics(&node.generics); + skip!(node.eq_token); + for el in Punctuated::pairs(&node.bounds) { + let it = el.value(); + v.visit_type_param_bound(it); + } + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_type<'ast, V>(v: &mut V, node: &'ast crate::ItemType) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.type_token); + v.visit_ident(&node.ident); + v.visit_generics(&node.generics); + skip!(node.eq_token); + v.visit_type(&*node.ty); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_union<'ast, V>(v: &mut V, node: &'ast crate::ItemUnion) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.union_token); + v.visit_ident(&node.ident); + v.visit_generics(&node.generics); + v.visit_fields_named(&node.fields); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_use<'ast, V>(v: &mut V, node: &'ast crate::ItemUse) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_visibility(&node.vis); + skip!(node.use_token); + skip!(node.leading_colon); + v.visit_use_tree(&node.tree); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_label<'ast, V>(v: &mut V, node: &'ast crate::Label) +where + V: Visit<'ast> + ?Sized, +{ + v.visit_lifetime(&node.name); + skip!(node.colon_token); +} +pub fn visit_lifetime<'ast, V>(v: &mut V, node: &'ast crate::Lifetime) +where + V: Visit<'ast> + ?Sized, +{ + v.visit_span(&node.apostrophe); + v.visit_ident(&node.ident); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_lifetime_param<'ast, V>(v: &mut V, node: &'ast crate::LifetimeParam) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_lifetime(&node.lifetime); + skip!(node.colon_token); + for el in Punctuated::pairs(&node.bounds) { + let it = el.value(); + v.visit_lifetime(it); + } +} +pub fn visit_lit<'ast, V>(v: &mut V, node: &'ast crate::Lit) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::Lit::Str(_binding_0) => { + v.visit_lit_str(_binding_0); + } + crate::Lit::ByteStr(_binding_0) => { + v.visit_lit_byte_str(_binding_0); + } + crate::Lit::CStr(_binding_0) => { + v.visit_lit_cstr(_binding_0); + } + crate::Lit::Byte(_binding_0) => { + v.visit_lit_byte(_binding_0); + } + crate::Lit::Char(_binding_0) => { + v.visit_lit_char(_binding_0); + } + crate::Lit::Int(_binding_0) => { + v.visit_lit_int(_binding_0); + } + crate::Lit::Float(_binding_0) => { + v.visit_lit_float(_binding_0); + } + crate::Lit::Bool(_binding_0) => { + v.visit_lit_bool(_binding_0); + } + crate::Lit::Verbatim(_binding_0) => { + skip!(_binding_0); + } + } +} +pub fn visit_lit_bool<'ast, V>(v: &mut V, node: &'ast crate::LitBool) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.value); + v.visit_span(&node.span); +} +pub fn visit_lit_byte<'ast, V>(v: &mut V, node: &'ast crate::LitByte) +where + V: Visit<'ast> + ?Sized, +{} +pub fn visit_lit_byte_str<'ast, V>(v: &mut V, node: &'ast crate::LitByteStr) +where + V: Visit<'ast> + ?Sized, +{} +pub fn visit_lit_cstr<'ast, V>(v: &mut V, node: &'ast crate::LitCStr) +where + V: Visit<'ast> + ?Sized, +{} +pub fn visit_lit_char<'ast, V>(v: &mut V, node: &'ast crate::LitChar) +where + V: Visit<'ast> + ?Sized, +{} +pub fn visit_lit_float<'ast, V>(v: &mut V, node: &'ast crate::LitFloat) +where + V: Visit<'ast> + ?Sized, +{} +pub fn visit_lit_int<'ast, V>(v: &mut V, node: &'ast crate::LitInt) +where + V: Visit<'ast> + ?Sized, +{} +pub fn visit_lit_str<'ast, V>(v: &mut V, node: &'ast crate::LitStr) +where + V: Visit<'ast> + ?Sized, +{} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_local<'ast, V>(v: &mut V, node: &'ast crate::Local) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.let_token); + v.visit_pat(&node.pat); + if let Some(it) = &node.init { + v.visit_local_init(it); + } + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_local_init<'ast, V>(v: &mut V, node: &'ast crate::LocalInit) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.eq_token); + v.visit_expr(&*node.expr); + if let Some(it) = &node.diverge { + skip!((it).0); + v.visit_expr(&*(it).1); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_macro<'ast, V>(v: &mut V, node: &'ast crate::Macro) +where + V: Visit<'ast> + ?Sized, +{ + v.visit_path(&node.path); + skip!(node.bang_token); + v.visit_macro_delimiter(&node.delimiter); + v.visit_token_stream(&node.tokens); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_macro_delimiter<'ast, V>(v: &mut V, node: &'ast crate::MacroDelimiter) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::MacroDelimiter::Paren(_binding_0) => { + skip!(_binding_0); + } + crate::MacroDelimiter::Brace(_binding_0) => { + skip!(_binding_0); + } + crate::MacroDelimiter::Bracket(_binding_0) => { + skip!(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_member<'ast, V>(v: &mut V, node: &'ast crate::Member) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::Member::Named(_binding_0) => { + v.visit_ident(_binding_0); + } + crate::Member::Unnamed(_binding_0) => { + v.visit_index(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_meta<'ast, V>(v: &mut V, node: &'ast crate::Meta) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::Meta::Path(_binding_0) => { + v.visit_path(_binding_0); + } + crate::Meta::List(_binding_0) => { + v.visit_meta_list(_binding_0); + } + crate::Meta::NameValue(_binding_0) => { + v.visit_meta_name_value(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_meta_list<'ast, V>(v: &mut V, node: &'ast crate::MetaList) +where + V: Visit<'ast> + ?Sized, +{ + v.visit_path(&node.path); + v.visit_macro_delimiter(&node.delimiter); + v.visit_token_stream(&node.tokens); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_meta_name_value<'ast, V>(v: &mut V, node: &'ast crate::MetaNameValue) +where + V: Visit<'ast> + ?Sized, +{ + v.visit_path(&node.path); + skip!(node.eq_token); + v.visit_expr(&node.value); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_parenthesized_generic_arguments<'ast, V>( + v: &mut V, + node: &'ast crate::ParenthesizedGenericArguments, +) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.paren_token); + for el in Punctuated::pairs(&node.inputs) { + let it = el.value(); + v.visit_type(it); + } + v.visit_return_type(&node.output); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat<'ast, V>(v: &mut V, node: &'ast crate::Pat) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::Pat::Const(_binding_0) => { + v.visit_expr_const(_binding_0); + } + crate::Pat::Ident(_binding_0) => { + v.visit_pat_ident(_binding_0); + } + crate::Pat::Lit(_binding_0) => { + v.visit_expr_lit(_binding_0); + } + crate::Pat::Macro(_binding_0) => { + v.visit_expr_macro(_binding_0); + } + crate::Pat::Or(_binding_0) => { + v.visit_pat_or(_binding_0); + } + crate::Pat::Paren(_binding_0) => { + v.visit_pat_paren(_binding_0); + } + crate::Pat::Path(_binding_0) => { + v.visit_expr_path(_binding_0); + } + crate::Pat::Range(_binding_0) => { + v.visit_expr_range(_binding_0); + } + crate::Pat::Reference(_binding_0) => { + v.visit_pat_reference(_binding_0); + } + crate::Pat::Rest(_binding_0) => { + v.visit_pat_rest(_binding_0); + } + crate::Pat::Slice(_binding_0) => { + v.visit_pat_slice(_binding_0); + } + crate::Pat::Struct(_binding_0) => { + v.visit_pat_struct(_binding_0); + } + crate::Pat::Tuple(_binding_0) => { + v.visit_pat_tuple(_binding_0); + } + crate::Pat::TupleStruct(_binding_0) => { + v.visit_pat_tuple_struct(_binding_0); + } + crate::Pat::Type(_binding_0) => { + v.visit_pat_type(_binding_0); + } + crate::Pat::Verbatim(_binding_0) => { + v.visit_token_stream(_binding_0); + } + crate::Pat::Wild(_binding_0) => { + v.visit_pat_wild(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_ident<'ast, V>(v: &mut V, node: &'ast crate::PatIdent) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.by_ref); + skip!(node.mutability); + v.visit_ident(&node.ident); + if let Some(it) = &node.subpat { + skip!((it).0); + v.visit_pat(&*(it).1); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_or<'ast, V>(v: &mut V, node: &'ast crate::PatOr) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.leading_vert); + for el in Punctuated::pairs(&node.cases) { + let it = el.value(); + v.visit_pat(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_paren<'ast, V>(v: &mut V, node: &'ast crate::PatParen) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.paren_token); + v.visit_pat(&*node.pat); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_reference<'ast, V>(v: &mut V, node: &'ast crate::PatReference) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.and_token); + skip!(node.mutability); + v.visit_pat(&*node.pat); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_rest<'ast, V>(v: &mut V, node: &'ast crate::PatRest) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.dot2_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_slice<'ast, V>(v: &mut V, node: &'ast crate::PatSlice) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.bracket_token); + for el in Punctuated::pairs(&node.elems) { + let it = el.value(); + v.visit_pat(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_struct<'ast, V>(v: &mut V, node: &'ast crate::PatStruct) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + if let Some(it) = &node.qself { + v.visit_qself(it); + } + v.visit_path(&node.path); + skip!(node.brace_token); + for el in Punctuated::pairs(&node.fields) { + let it = el.value(); + v.visit_field_pat(it); + } + if let Some(it) = &node.rest { + v.visit_pat_rest(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_tuple<'ast, V>(v: &mut V, node: &'ast crate::PatTuple) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.paren_token); + for el in Punctuated::pairs(&node.elems) { + let it = el.value(); + v.visit_pat(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_tuple_struct<'ast, V>(v: &mut V, node: &'ast crate::PatTupleStruct) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + if let Some(it) = &node.qself { + v.visit_qself(it); + } + v.visit_path(&node.path); + skip!(node.paren_token); + for el in Punctuated::pairs(&node.elems) { + let it = el.value(); + v.visit_pat(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_type<'ast, V>(v: &mut V, node: &'ast crate::PatType) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_pat(&*node.pat); + skip!(node.colon_token); + v.visit_type(&*node.ty); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_wild<'ast, V>(v: &mut V, node: &'ast crate::PatWild) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.underscore_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_path<'ast, V>(v: &mut V, node: &'ast crate::Path) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.leading_colon); + for el in Punctuated::pairs(&node.segments) { + let it = el.value(); + v.visit_path_segment(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_path_arguments<'ast, V>(v: &mut V, node: &'ast crate::PathArguments) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::PathArguments::None => {} + crate::PathArguments::AngleBracketed(_binding_0) => { + v.visit_angle_bracketed_generic_arguments(_binding_0); + } + crate::PathArguments::Parenthesized(_binding_0) => { + v.visit_parenthesized_generic_arguments(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_path_segment<'ast, V>(v: &mut V, node: &'ast crate::PathSegment) +where + V: Visit<'ast> + ?Sized, +{ + v.visit_ident(&node.ident); + v.visit_path_arguments(&node.arguments); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pointer_mutability<'ast, V>(v: &mut V, node: &'ast crate::PointerMutability) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::PointerMutability::Const(_binding_0) => { + skip!(_binding_0); + } + crate::PointerMutability::Mut(_binding_0) => { + skip!(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_precise_capture<'ast, V>(v: &mut V, node: &'ast crate::PreciseCapture) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.use_token); + skip!(node.lt_token); + for el in Punctuated::pairs(&node.params) { + let it = el.value(); + v.visit_captured_param(it); + } + skip!(node.gt_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_predicate_lifetime<'ast, V>(v: &mut V, node: &'ast crate::PredicateLifetime) +where + V: Visit<'ast> + ?Sized, +{ + v.visit_lifetime(&node.lifetime); + skip!(node.colon_token); + for el in Punctuated::pairs(&node.bounds) { + let it = el.value(); + v.visit_lifetime(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_predicate_type<'ast, V>(v: &mut V, node: &'ast crate::PredicateType) +where + V: Visit<'ast> + ?Sized, +{ + if let Some(it) = &node.lifetimes { + v.visit_bound_lifetimes(it); + } + v.visit_type(&node.bounded_ty); + skip!(node.colon_token); + for el in Punctuated::pairs(&node.bounds) { + let it = el.value(); + v.visit_type_param_bound(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_qself<'ast, V>(v: &mut V, node: &'ast crate::QSelf) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.lt_token); + v.visit_type(&*node.ty); + skip!(node.position); + skip!(node.as_token); + skip!(node.gt_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_range_limits<'ast, V>(v: &mut V, node: &'ast crate::RangeLimits) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::RangeLimits::HalfOpen(_binding_0) => { + skip!(_binding_0); + } + crate::RangeLimits::Closed(_binding_0) => { + skip!(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_receiver<'ast, V>(v: &mut V, node: &'ast crate::Receiver) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + if let Some(it) = &node.reference { + skip!((it).0); + if let Some(it) = &(it).1 { + v.visit_lifetime(it); + } + } + skip!(node.mutability); + skip!(node.self_token); + skip!(node.colon_token); + v.visit_type(&*node.ty); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_return_type<'ast, V>(v: &mut V, node: &'ast crate::ReturnType) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::ReturnType::Default => {} + crate::ReturnType::Type(_binding_0, _binding_1) => { + skip!(_binding_0); + v.visit_type(&**_binding_1); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_signature<'ast, V>(v: &mut V, node: &'ast crate::Signature) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.constness); + skip!(node.asyncness); + skip!(node.unsafety); + if let Some(it) = &node.abi { + v.visit_abi(it); + } + skip!(node.fn_token); + v.visit_ident(&node.ident); + v.visit_generics(&node.generics); + skip!(node.paren_token); + for el in Punctuated::pairs(&node.inputs) { + let it = el.value(); + v.visit_fn_arg(it); + } + if let Some(it) = &node.variadic { + v.visit_variadic(it); + } + v.visit_return_type(&node.output); +} +pub fn visit_span<'ast, V>(v: &mut V, node: &proc_macro2::Span) +where + V: Visit<'ast> + ?Sized, +{} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_static_mutability<'ast, V>(v: &mut V, node: &'ast crate::StaticMutability) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::StaticMutability::Mut(_binding_0) => { + skip!(_binding_0); + } + crate::StaticMutability::None => {} + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_stmt<'ast, V>(v: &mut V, node: &'ast crate::Stmt) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::Stmt::Local(_binding_0) => { + v.visit_local(_binding_0); + } + crate::Stmt::Item(_binding_0) => { + v.visit_item(_binding_0); + } + crate::Stmt::Expr(_binding_0, _binding_1) => { + v.visit_expr(_binding_0); + skip!(_binding_1); + } + crate::Stmt::Macro(_binding_0) => { + v.visit_stmt_macro(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_stmt_macro<'ast, V>(v: &mut V, node: &'ast crate::StmtMacro) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_macro(&node.mac); + skip!(node.semi_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_trait_bound<'ast, V>(v: &mut V, node: &'ast crate::TraitBound) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.paren_token); + v.visit_trait_bound_modifier(&node.modifier); + if let Some(it) = &node.lifetimes { + v.visit_bound_lifetimes(it); + } + v.visit_path(&node.path); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_trait_bound_modifier<'ast, V>( + v: &mut V, + node: &'ast crate::TraitBoundModifier, +) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::TraitBoundModifier::None => {} + crate::TraitBoundModifier::Maybe(_binding_0) => { + skip!(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_trait_item<'ast, V>(v: &mut V, node: &'ast crate::TraitItem) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::TraitItem::Const(_binding_0) => { + v.visit_trait_item_const(_binding_0); + } + crate::TraitItem::Fn(_binding_0) => { + v.visit_trait_item_fn(_binding_0); + } + crate::TraitItem::Type(_binding_0) => { + v.visit_trait_item_type(_binding_0); + } + crate::TraitItem::Macro(_binding_0) => { + v.visit_trait_item_macro(_binding_0); + } + crate::TraitItem::Verbatim(_binding_0) => { + v.visit_token_stream(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_trait_item_const<'ast, V>(v: &mut V, node: &'ast crate::TraitItemConst) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.const_token); + v.visit_ident(&node.ident); + v.visit_generics(&node.generics); + skip!(node.colon_token); + v.visit_type(&node.ty); + if let Some(it) = &node.default { + skip!((it).0); + v.visit_expr(&(it).1); + } + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_trait_item_fn<'ast, V>(v: &mut V, node: &'ast crate::TraitItemFn) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_signature(&node.sig); + if let Some(it) = &node.default { + v.visit_block(it); + } + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_trait_item_macro<'ast, V>(v: &mut V, node: &'ast crate::TraitItemMacro) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_macro(&node.mac); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_trait_item_type<'ast, V>(v: &mut V, node: &'ast crate::TraitItemType) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + skip!(node.type_token); + v.visit_ident(&node.ident); + v.visit_generics(&node.generics); + skip!(node.colon_token); + for el in Punctuated::pairs(&node.bounds) { + let it = el.value(); + v.visit_type_param_bound(it); + } + if let Some(it) = &node.default { + skip!((it).0); + v.visit_type(&(it).1); + } + skip!(node.semi_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type<'ast, V>(v: &mut V, node: &'ast crate::Type) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::Type::Array(_binding_0) => { + v.visit_type_array(_binding_0); + } + crate::Type::BareFn(_binding_0) => { + v.visit_type_bare_fn(_binding_0); + } + crate::Type::Group(_binding_0) => { + v.visit_type_group(_binding_0); + } + crate::Type::ImplTrait(_binding_0) => { + v.visit_type_impl_trait(_binding_0); + } + crate::Type::Infer(_binding_0) => { + v.visit_type_infer(_binding_0); + } + crate::Type::Macro(_binding_0) => { + v.visit_type_macro(_binding_0); + } + crate::Type::Never(_binding_0) => { + v.visit_type_never(_binding_0); + } + crate::Type::Paren(_binding_0) => { + v.visit_type_paren(_binding_0); + } + crate::Type::Path(_binding_0) => { + v.visit_type_path(_binding_0); + } + crate::Type::Ptr(_binding_0) => { + v.visit_type_ptr(_binding_0); + } + crate::Type::Reference(_binding_0) => { + v.visit_type_reference(_binding_0); + } + crate::Type::Slice(_binding_0) => { + v.visit_type_slice(_binding_0); + } + crate::Type::TraitObject(_binding_0) => { + v.visit_type_trait_object(_binding_0); + } + crate::Type::Tuple(_binding_0) => { + v.visit_type_tuple(_binding_0); + } + crate::Type::Verbatim(_binding_0) => { + v.visit_token_stream(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_array<'ast, V>(v: &mut V, node: &'ast crate::TypeArray) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.bracket_token); + v.visit_type(&*node.elem); + skip!(node.semi_token); + v.visit_expr(&node.len); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_bare_fn<'ast, V>(v: &mut V, node: &'ast crate::TypeBareFn) +where + V: Visit<'ast> + ?Sized, +{ + if let Some(it) = &node.lifetimes { + v.visit_bound_lifetimes(it); + } + skip!(node.unsafety); + if let Some(it) = &node.abi { + v.visit_abi(it); + } + skip!(node.fn_token); + skip!(node.paren_token); + for el in Punctuated::pairs(&node.inputs) { + let it = el.value(); + v.visit_bare_fn_arg(it); + } + if let Some(it) = &node.variadic { + v.visit_bare_variadic(it); + } + v.visit_return_type(&node.output); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_group<'ast, V>(v: &mut V, node: &'ast crate::TypeGroup) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.group_token); + v.visit_type(&*node.elem); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_impl_trait<'ast, V>(v: &mut V, node: &'ast crate::TypeImplTrait) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.impl_token); + for el in Punctuated::pairs(&node.bounds) { + let it = el.value(); + v.visit_type_param_bound(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_infer<'ast, V>(v: &mut V, node: &'ast crate::TypeInfer) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.underscore_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_macro<'ast, V>(v: &mut V, node: &'ast crate::TypeMacro) +where + V: Visit<'ast> + ?Sized, +{ + v.visit_macro(&node.mac); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_never<'ast, V>(v: &mut V, node: &'ast crate::TypeNever) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.bang_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_param<'ast, V>(v: &mut V, node: &'ast crate::TypeParam) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_ident(&node.ident); + skip!(node.colon_token); + for el in Punctuated::pairs(&node.bounds) { + let it = el.value(); + v.visit_type_param_bound(it); + } + skip!(node.eq_token); + if let Some(it) = &node.default { + v.visit_type(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_param_bound<'ast, V>(v: &mut V, node: &'ast crate::TypeParamBound) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::TypeParamBound::Trait(_binding_0) => { + v.visit_trait_bound(_binding_0); + } + crate::TypeParamBound::Lifetime(_binding_0) => { + v.visit_lifetime(_binding_0); + } + crate::TypeParamBound::PreciseCapture(_binding_0) => { + full!(v.visit_precise_capture(_binding_0)); + } + crate::TypeParamBound::Verbatim(_binding_0) => { + v.visit_token_stream(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_paren<'ast, V>(v: &mut V, node: &'ast crate::TypeParen) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.paren_token); + v.visit_type(&*node.elem); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_path<'ast, V>(v: &mut V, node: &'ast crate::TypePath) +where + V: Visit<'ast> + ?Sized, +{ + if let Some(it) = &node.qself { + v.visit_qself(it); + } + v.visit_path(&node.path); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_ptr<'ast, V>(v: &mut V, node: &'ast crate::TypePtr) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.star_token); + skip!(node.const_token); + skip!(node.mutability); + v.visit_type(&*node.elem); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_reference<'ast, V>(v: &mut V, node: &'ast crate::TypeReference) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.and_token); + if let Some(it) = &node.lifetime { + v.visit_lifetime(it); + } + skip!(node.mutability); + v.visit_type(&*node.elem); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_slice<'ast, V>(v: &mut V, node: &'ast crate::TypeSlice) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.bracket_token); + v.visit_type(&*node.elem); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_trait_object<'ast, V>(v: &mut V, node: &'ast crate::TypeTraitObject) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.dyn_token); + for el in Punctuated::pairs(&node.bounds) { + let it = el.value(); + v.visit_type_param_bound(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_tuple<'ast, V>(v: &mut V, node: &'ast crate::TypeTuple) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.paren_token); + for el in Punctuated::pairs(&node.elems) { + let it = el.value(); + v.visit_type(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_un_op<'ast, V>(v: &mut V, node: &'ast crate::UnOp) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::UnOp::Deref(_binding_0) => { + skip!(_binding_0); + } + crate::UnOp::Not(_binding_0) => { + skip!(_binding_0); + } + crate::UnOp::Neg(_binding_0) => { + skip!(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_use_glob<'ast, V>(v: &mut V, node: &'ast crate::UseGlob) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.star_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_use_group<'ast, V>(v: &mut V, node: &'ast crate::UseGroup) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.brace_token); + for el in Punctuated::pairs(&node.items) { + let it = el.value(); + v.visit_use_tree(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_use_name<'ast, V>(v: &mut V, node: &'ast crate::UseName) +where + V: Visit<'ast> + ?Sized, +{ + v.visit_ident(&node.ident); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_use_path<'ast, V>(v: &mut V, node: &'ast crate::UsePath) +where + V: Visit<'ast> + ?Sized, +{ + v.visit_ident(&node.ident); + skip!(node.colon2_token); + v.visit_use_tree(&*node.tree); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_use_rename<'ast, V>(v: &mut V, node: &'ast crate::UseRename) +where + V: Visit<'ast> + ?Sized, +{ + v.visit_ident(&node.ident); + skip!(node.as_token); + v.visit_ident(&node.rename); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_use_tree<'ast, V>(v: &mut V, node: &'ast crate::UseTree) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::UseTree::Path(_binding_0) => { + v.visit_use_path(_binding_0); + } + crate::UseTree::Name(_binding_0) => { + v.visit_use_name(_binding_0); + } + crate::UseTree::Rename(_binding_0) => { + v.visit_use_rename(_binding_0); + } + crate::UseTree::Glob(_binding_0) => { + v.visit_use_glob(_binding_0); + } + crate::UseTree::Group(_binding_0) => { + v.visit_use_group(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_variadic<'ast, V>(v: &mut V, node: &'ast crate::Variadic) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + if let Some(it) = &node.pat { + v.visit_pat(&*(it).0); + skip!((it).1); + } + skip!(node.dots); + skip!(node.comma); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_variant<'ast, V>(v: &mut V, node: &'ast crate::Variant) +where + V: Visit<'ast> + ?Sized, +{ + for it in &node.attrs { + v.visit_attribute(it); + } + v.visit_ident(&node.ident); + v.visit_fields(&node.fields); + if let Some(it) = &node.discriminant { + skip!((it).0); + v.visit_expr(&(it).1); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_vis_restricted<'ast, V>(v: &mut V, node: &'ast crate::VisRestricted) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.pub_token); + skip!(node.paren_token); + skip!(node.in_token); + v.visit_path(&*node.path); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_visibility<'ast, V>(v: &mut V, node: &'ast crate::Visibility) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::Visibility::Public(_binding_0) => { + skip!(_binding_0); + } + crate::Visibility::Restricted(_binding_0) => { + v.visit_vis_restricted(_binding_0); + } + crate::Visibility::Inherited => {} + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_where_clause<'ast, V>(v: &mut V, node: &'ast crate::WhereClause) +where + V: Visit<'ast> + ?Sized, +{ + skip!(node.where_token); + for el in Punctuated::pairs(&node.predicates) { + let it = el.value(); + v.visit_where_predicate(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_where_predicate<'ast, V>(v: &mut V, node: &'ast crate::WherePredicate) +where + V: Visit<'ast> + ?Sized, +{ + match node { + crate::WherePredicate::Lifetime(_binding_0) => { + v.visit_predicate_lifetime(_binding_0); + } + crate::WherePredicate::Type(_binding_0) => { + v.visit_predicate_type(_binding_0); + } + } +} diff --git a/vendor/syn/src/gen/visit_mut.rs b/vendor/syn/src/gen/visit_mut.rs new file mode 100644 index 00000000000000..2bbd6895db7573 --- /dev/null +++ b/vendor/syn/src/gen/visit_mut.rs @@ -0,0 +1,3759 @@ +// This file is @generated by syn-internal-codegen. +// It is not intended for manual editing. + +#![allow(unused_variables)] +#![allow(clippy::needless_pass_by_ref_mut)] +#[cfg(any(feature = "full", feature = "derive"))] +use crate::punctuated::Punctuated; +#[cfg(feature = "full")] +macro_rules! full { + ($e:expr) => { + $e + }; +} +#[cfg(all(feature = "derive", not(feature = "full")))] +macro_rules! full { + ($e:expr) => { + unreachable!() + }; +} +macro_rules! skip { + ($($tt:tt)*) => {}; +} +/// Syntax tree traversal to mutate an exclusive borrow of a syntax tree in +/// place. +/// +/// See the [module documentation] for details. +/// +/// [module documentation]: self +pub trait VisitMut { + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_abi_mut(&mut self, i: &mut crate::Abi) { + visit_abi_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_angle_bracketed_generic_arguments_mut( + &mut self, + i: &mut crate::AngleBracketedGenericArguments, + ) { + visit_angle_bracketed_generic_arguments_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_arm_mut(&mut self, i: &mut crate::Arm) { + visit_arm_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_assoc_const_mut(&mut self, i: &mut crate::AssocConst) { + visit_assoc_const_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_assoc_type_mut(&mut self, i: &mut crate::AssocType) { + visit_assoc_type_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_attr_style_mut(&mut self, i: &mut crate::AttrStyle) { + visit_attr_style_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_attribute_mut(&mut self, i: &mut crate::Attribute) { + visit_attribute_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_attributes_mut(&mut self, i: &mut Vec<crate::Attribute>) { + for attr in i { + self.visit_attribute_mut(attr); + } + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_bare_fn_arg_mut(&mut self, i: &mut crate::BareFnArg) { + visit_bare_fn_arg_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_bare_variadic_mut(&mut self, i: &mut crate::BareVariadic) { + visit_bare_variadic_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_bin_op_mut(&mut self, i: &mut crate::BinOp) { + visit_bin_op_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_block_mut(&mut self, i: &mut crate::Block) { + visit_block_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_bound_lifetimes_mut(&mut self, i: &mut crate::BoundLifetimes) { + visit_bound_lifetimes_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_captured_param_mut(&mut self, i: &mut crate::CapturedParam) { + visit_captured_param_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_const_param_mut(&mut self, i: &mut crate::ConstParam) { + visit_const_param_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_constraint_mut(&mut self, i: &mut crate::Constraint) { + visit_constraint_mut(self, i); + } + #[cfg(feature = "derive")] + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + fn visit_data_mut(&mut self, i: &mut crate::Data) { + visit_data_mut(self, i); + } + #[cfg(feature = "derive")] + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + fn visit_data_enum_mut(&mut self, i: &mut crate::DataEnum) { + visit_data_enum_mut(self, i); + } + #[cfg(feature = "derive")] + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + fn visit_data_struct_mut(&mut self, i: &mut crate::DataStruct) { + visit_data_struct_mut(self, i); + } + #[cfg(feature = "derive")] + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + fn visit_data_union_mut(&mut self, i: &mut crate::DataUnion) { + visit_data_union_mut(self, i); + } + #[cfg(feature = "derive")] + #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] + fn visit_derive_input_mut(&mut self, i: &mut crate::DeriveInput) { + visit_derive_input_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_mut(&mut self, i: &mut crate::Expr) { + visit_expr_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_array_mut(&mut self, i: &mut crate::ExprArray) { + visit_expr_array_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_assign_mut(&mut self, i: &mut crate::ExprAssign) { + visit_expr_assign_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_async_mut(&mut self, i: &mut crate::ExprAsync) { + visit_expr_async_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_await_mut(&mut self, i: &mut crate::ExprAwait) { + visit_expr_await_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_binary_mut(&mut self, i: &mut crate::ExprBinary) { + visit_expr_binary_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_block_mut(&mut self, i: &mut crate::ExprBlock) { + visit_expr_block_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_break_mut(&mut self, i: &mut crate::ExprBreak) { + visit_expr_break_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_call_mut(&mut self, i: &mut crate::ExprCall) { + visit_expr_call_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_cast_mut(&mut self, i: &mut crate::ExprCast) { + visit_expr_cast_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_closure_mut(&mut self, i: &mut crate::ExprClosure) { + visit_expr_closure_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_const_mut(&mut self, i: &mut crate::ExprConst) { + visit_expr_const_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_continue_mut(&mut self, i: &mut crate::ExprContinue) { + visit_expr_continue_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_field_mut(&mut self, i: &mut crate::ExprField) { + visit_expr_field_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_for_loop_mut(&mut self, i: &mut crate::ExprForLoop) { + visit_expr_for_loop_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_group_mut(&mut self, i: &mut crate::ExprGroup) { + visit_expr_group_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_if_mut(&mut self, i: &mut crate::ExprIf) { + visit_expr_if_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_index_mut(&mut self, i: &mut crate::ExprIndex) { + visit_expr_index_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_infer_mut(&mut self, i: &mut crate::ExprInfer) { + visit_expr_infer_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_let_mut(&mut self, i: &mut crate::ExprLet) { + visit_expr_let_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_lit_mut(&mut self, i: &mut crate::ExprLit) { + visit_expr_lit_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_loop_mut(&mut self, i: &mut crate::ExprLoop) { + visit_expr_loop_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_macro_mut(&mut self, i: &mut crate::ExprMacro) { + visit_expr_macro_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_match_mut(&mut self, i: &mut crate::ExprMatch) { + visit_expr_match_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_method_call_mut(&mut self, i: &mut crate::ExprMethodCall) { + visit_expr_method_call_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_paren_mut(&mut self, i: &mut crate::ExprParen) { + visit_expr_paren_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_path_mut(&mut self, i: &mut crate::ExprPath) { + visit_expr_path_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_range_mut(&mut self, i: &mut crate::ExprRange) { + visit_expr_range_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_raw_addr_mut(&mut self, i: &mut crate::ExprRawAddr) { + visit_expr_raw_addr_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_reference_mut(&mut self, i: &mut crate::ExprReference) { + visit_expr_reference_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_repeat_mut(&mut self, i: &mut crate::ExprRepeat) { + visit_expr_repeat_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_return_mut(&mut self, i: &mut crate::ExprReturn) { + visit_expr_return_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_struct_mut(&mut self, i: &mut crate::ExprStruct) { + visit_expr_struct_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_try_mut(&mut self, i: &mut crate::ExprTry) { + visit_expr_try_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_try_block_mut(&mut self, i: &mut crate::ExprTryBlock) { + visit_expr_try_block_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_tuple_mut(&mut self, i: &mut crate::ExprTuple) { + visit_expr_tuple_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_expr_unary_mut(&mut self, i: &mut crate::ExprUnary) { + visit_expr_unary_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_unsafe_mut(&mut self, i: &mut crate::ExprUnsafe) { + visit_expr_unsafe_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_while_mut(&mut self, i: &mut crate::ExprWhile) { + visit_expr_while_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_expr_yield_mut(&mut self, i: &mut crate::ExprYield) { + visit_expr_yield_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_field_mut(&mut self, i: &mut crate::Field) { + visit_field_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_field_mutability_mut(&mut self, i: &mut crate::FieldMutability) { + visit_field_mutability_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_field_pat_mut(&mut self, i: &mut crate::FieldPat) { + visit_field_pat_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_field_value_mut(&mut self, i: &mut crate::FieldValue) { + visit_field_value_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_fields_mut(&mut self, i: &mut crate::Fields) { + visit_fields_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_fields_named_mut(&mut self, i: &mut crate::FieldsNamed) { + visit_fields_named_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_fields_unnamed_mut(&mut self, i: &mut crate::FieldsUnnamed) { + visit_fields_unnamed_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_file_mut(&mut self, i: &mut crate::File) { + visit_file_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_fn_arg_mut(&mut self, i: &mut crate::FnArg) { + visit_fn_arg_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_foreign_item_mut(&mut self, i: &mut crate::ForeignItem) { + visit_foreign_item_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_foreign_item_fn_mut(&mut self, i: &mut crate::ForeignItemFn) { + visit_foreign_item_fn_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_foreign_item_macro_mut(&mut self, i: &mut crate::ForeignItemMacro) { + visit_foreign_item_macro_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_foreign_item_static_mut(&mut self, i: &mut crate::ForeignItemStatic) { + visit_foreign_item_static_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_foreign_item_type_mut(&mut self, i: &mut crate::ForeignItemType) { + visit_foreign_item_type_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_generic_argument_mut(&mut self, i: &mut crate::GenericArgument) { + visit_generic_argument_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_generic_param_mut(&mut self, i: &mut crate::GenericParam) { + visit_generic_param_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_generics_mut(&mut self, i: &mut crate::Generics) { + visit_generics_mut(self, i); + } + fn visit_ident_mut(&mut self, i: &mut proc_macro2::Ident) { + visit_ident_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_impl_item_mut(&mut self, i: &mut crate::ImplItem) { + visit_impl_item_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_impl_item_const_mut(&mut self, i: &mut crate::ImplItemConst) { + visit_impl_item_const_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_impl_item_fn_mut(&mut self, i: &mut crate::ImplItemFn) { + visit_impl_item_fn_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_impl_item_macro_mut(&mut self, i: &mut crate::ImplItemMacro) { + visit_impl_item_macro_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_impl_item_type_mut(&mut self, i: &mut crate::ImplItemType) { + visit_impl_item_type_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_impl_restriction_mut(&mut self, i: &mut crate::ImplRestriction) { + visit_impl_restriction_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_index_mut(&mut self, i: &mut crate::Index) { + visit_index_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_mut(&mut self, i: &mut crate::Item) { + visit_item_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_const_mut(&mut self, i: &mut crate::ItemConst) { + visit_item_const_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_enum_mut(&mut self, i: &mut crate::ItemEnum) { + visit_item_enum_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_extern_crate_mut(&mut self, i: &mut crate::ItemExternCrate) { + visit_item_extern_crate_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_fn_mut(&mut self, i: &mut crate::ItemFn) { + visit_item_fn_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_foreign_mod_mut(&mut self, i: &mut crate::ItemForeignMod) { + visit_item_foreign_mod_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_impl_mut(&mut self, i: &mut crate::ItemImpl) { + visit_item_impl_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_macro_mut(&mut self, i: &mut crate::ItemMacro) { + visit_item_macro_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_mod_mut(&mut self, i: &mut crate::ItemMod) { + visit_item_mod_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_static_mut(&mut self, i: &mut crate::ItemStatic) { + visit_item_static_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_struct_mut(&mut self, i: &mut crate::ItemStruct) { + visit_item_struct_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_trait_mut(&mut self, i: &mut crate::ItemTrait) { + visit_item_trait_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_trait_alias_mut(&mut self, i: &mut crate::ItemTraitAlias) { + visit_item_trait_alias_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_type_mut(&mut self, i: &mut crate::ItemType) { + visit_item_type_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_union_mut(&mut self, i: &mut crate::ItemUnion) { + visit_item_union_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_item_use_mut(&mut self, i: &mut crate::ItemUse) { + visit_item_use_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_label_mut(&mut self, i: &mut crate::Label) { + visit_label_mut(self, i); + } + fn visit_lifetime_mut(&mut self, i: &mut crate::Lifetime) { + visit_lifetime_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_lifetime_param_mut(&mut self, i: &mut crate::LifetimeParam) { + visit_lifetime_param_mut(self, i); + } + fn visit_lit_mut(&mut self, i: &mut crate::Lit) { + visit_lit_mut(self, i); + } + fn visit_lit_bool_mut(&mut self, i: &mut crate::LitBool) { + visit_lit_bool_mut(self, i); + } + fn visit_lit_byte_mut(&mut self, i: &mut crate::LitByte) { + visit_lit_byte_mut(self, i); + } + fn visit_lit_byte_str_mut(&mut self, i: &mut crate::LitByteStr) { + visit_lit_byte_str_mut(self, i); + } + fn visit_lit_cstr_mut(&mut self, i: &mut crate::LitCStr) { + visit_lit_cstr_mut(self, i); + } + fn visit_lit_char_mut(&mut self, i: &mut crate::LitChar) { + visit_lit_char_mut(self, i); + } + fn visit_lit_float_mut(&mut self, i: &mut crate::LitFloat) { + visit_lit_float_mut(self, i); + } + fn visit_lit_int_mut(&mut self, i: &mut crate::LitInt) { + visit_lit_int_mut(self, i); + } + fn visit_lit_str_mut(&mut self, i: &mut crate::LitStr) { + visit_lit_str_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_local_mut(&mut self, i: &mut crate::Local) { + visit_local_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_local_init_mut(&mut self, i: &mut crate::LocalInit) { + visit_local_init_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_macro_mut(&mut self, i: &mut crate::Macro) { + visit_macro_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_macro_delimiter_mut(&mut self, i: &mut crate::MacroDelimiter) { + visit_macro_delimiter_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_member_mut(&mut self, i: &mut crate::Member) { + visit_member_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_meta_mut(&mut self, i: &mut crate::Meta) { + visit_meta_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_meta_list_mut(&mut self, i: &mut crate::MetaList) { + visit_meta_list_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_meta_name_value_mut(&mut self, i: &mut crate::MetaNameValue) { + visit_meta_name_value_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_parenthesized_generic_arguments_mut( + &mut self, + i: &mut crate::ParenthesizedGenericArguments, + ) { + visit_parenthesized_generic_arguments_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_mut(&mut self, i: &mut crate::Pat) { + visit_pat_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_ident_mut(&mut self, i: &mut crate::PatIdent) { + visit_pat_ident_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_or_mut(&mut self, i: &mut crate::PatOr) { + visit_pat_or_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_paren_mut(&mut self, i: &mut crate::PatParen) { + visit_pat_paren_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_reference_mut(&mut self, i: &mut crate::PatReference) { + visit_pat_reference_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_rest_mut(&mut self, i: &mut crate::PatRest) { + visit_pat_rest_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_slice_mut(&mut self, i: &mut crate::PatSlice) { + visit_pat_slice_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_struct_mut(&mut self, i: &mut crate::PatStruct) { + visit_pat_struct_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_tuple_mut(&mut self, i: &mut crate::PatTuple) { + visit_pat_tuple_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_tuple_struct_mut(&mut self, i: &mut crate::PatTupleStruct) { + visit_pat_tuple_struct_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_type_mut(&mut self, i: &mut crate::PatType) { + visit_pat_type_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pat_wild_mut(&mut self, i: &mut crate::PatWild) { + visit_pat_wild_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_path_mut(&mut self, i: &mut crate::Path) { + visit_path_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_path_arguments_mut(&mut self, i: &mut crate::PathArguments) { + visit_path_arguments_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_path_segment_mut(&mut self, i: &mut crate::PathSegment) { + visit_path_segment_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_pointer_mutability_mut(&mut self, i: &mut crate::PointerMutability) { + visit_pointer_mutability_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_precise_capture_mut(&mut self, i: &mut crate::PreciseCapture) { + visit_precise_capture_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_predicate_lifetime_mut(&mut self, i: &mut crate::PredicateLifetime) { + visit_predicate_lifetime_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_predicate_type_mut(&mut self, i: &mut crate::PredicateType) { + visit_predicate_type_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_qself_mut(&mut self, i: &mut crate::QSelf) { + visit_qself_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_range_limits_mut(&mut self, i: &mut crate::RangeLimits) { + visit_range_limits_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_receiver_mut(&mut self, i: &mut crate::Receiver) { + visit_receiver_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_return_type_mut(&mut self, i: &mut crate::ReturnType) { + visit_return_type_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_signature_mut(&mut self, i: &mut crate::Signature) { + visit_signature_mut(self, i); + } + fn visit_span_mut(&mut self, i: &mut proc_macro2::Span) {} + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_static_mutability_mut(&mut self, i: &mut crate::StaticMutability) { + visit_static_mutability_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_stmt_mut(&mut self, i: &mut crate::Stmt) { + visit_stmt_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_stmt_macro_mut(&mut self, i: &mut crate::StmtMacro) { + visit_stmt_macro_mut(self, i); + } + fn visit_token_stream_mut(&mut self, i: &mut proc_macro2::TokenStream) {} + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_trait_bound_mut(&mut self, i: &mut crate::TraitBound) { + visit_trait_bound_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_trait_bound_modifier_mut(&mut self, i: &mut crate::TraitBoundModifier) { + visit_trait_bound_modifier_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_trait_item_mut(&mut self, i: &mut crate::TraitItem) { + visit_trait_item_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_trait_item_const_mut(&mut self, i: &mut crate::TraitItemConst) { + visit_trait_item_const_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_trait_item_fn_mut(&mut self, i: &mut crate::TraitItemFn) { + visit_trait_item_fn_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_trait_item_macro_mut(&mut self, i: &mut crate::TraitItemMacro) { + visit_trait_item_macro_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_trait_item_type_mut(&mut self, i: &mut crate::TraitItemType) { + visit_trait_item_type_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_mut(&mut self, i: &mut crate::Type) { + visit_type_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_array_mut(&mut self, i: &mut crate::TypeArray) { + visit_type_array_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_bare_fn_mut(&mut self, i: &mut crate::TypeBareFn) { + visit_type_bare_fn_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_group_mut(&mut self, i: &mut crate::TypeGroup) { + visit_type_group_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_impl_trait_mut(&mut self, i: &mut crate::TypeImplTrait) { + visit_type_impl_trait_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_infer_mut(&mut self, i: &mut crate::TypeInfer) { + visit_type_infer_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_macro_mut(&mut self, i: &mut crate::TypeMacro) { + visit_type_macro_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_never_mut(&mut self, i: &mut crate::TypeNever) { + visit_type_never_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_param_mut(&mut self, i: &mut crate::TypeParam) { + visit_type_param_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_param_bound_mut(&mut self, i: &mut crate::TypeParamBound) { + visit_type_param_bound_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_paren_mut(&mut self, i: &mut crate::TypeParen) { + visit_type_paren_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_path_mut(&mut self, i: &mut crate::TypePath) { + visit_type_path_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_ptr_mut(&mut self, i: &mut crate::TypePtr) { + visit_type_ptr_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_reference_mut(&mut self, i: &mut crate::TypeReference) { + visit_type_reference_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_slice_mut(&mut self, i: &mut crate::TypeSlice) { + visit_type_slice_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_trait_object_mut(&mut self, i: &mut crate::TypeTraitObject) { + visit_type_trait_object_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_type_tuple_mut(&mut self, i: &mut crate::TypeTuple) { + visit_type_tuple_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_un_op_mut(&mut self, i: &mut crate::UnOp) { + visit_un_op_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_use_glob_mut(&mut self, i: &mut crate::UseGlob) { + visit_use_glob_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_use_group_mut(&mut self, i: &mut crate::UseGroup) { + visit_use_group_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_use_name_mut(&mut self, i: &mut crate::UseName) { + visit_use_name_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_use_path_mut(&mut self, i: &mut crate::UsePath) { + visit_use_path_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_use_rename_mut(&mut self, i: &mut crate::UseRename) { + visit_use_rename_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_use_tree_mut(&mut self, i: &mut crate::UseTree) { + visit_use_tree_mut(self, i); + } + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + fn visit_variadic_mut(&mut self, i: &mut crate::Variadic) { + visit_variadic_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_variant_mut(&mut self, i: &mut crate::Variant) { + visit_variant_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_vis_restricted_mut(&mut self, i: &mut crate::VisRestricted) { + visit_vis_restricted_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_visibility_mut(&mut self, i: &mut crate::Visibility) { + visit_visibility_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_where_clause_mut(&mut self, i: &mut crate::WhereClause) { + visit_where_clause_mut(self, i); + } + #[cfg(any(feature = "derive", feature = "full"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] + fn visit_where_predicate_mut(&mut self, i: &mut crate::WherePredicate) { + visit_where_predicate_mut(self, i); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_abi_mut<V>(v: &mut V, node: &mut crate::Abi) +where + V: VisitMut + ?Sized, +{ + skip!(node.extern_token); + if let Some(it) = &mut node.name { + v.visit_lit_str_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_angle_bracketed_generic_arguments_mut<V>( + v: &mut V, + node: &mut crate::AngleBracketedGenericArguments, +) +where + V: VisitMut + ?Sized, +{ + skip!(node.colon2_token); + skip!(node.lt_token); + for mut el in Punctuated::pairs_mut(&mut node.args) { + let it = el.value_mut(); + v.visit_generic_argument_mut(it); + } + skip!(node.gt_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_arm_mut<V>(v: &mut V, node: &mut crate::Arm) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_pat_mut(&mut node.pat); + if let Some(it) = &mut node.guard { + skip!((it).0); + v.visit_expr_mut(&mut *(it).1); + } + skip!(node.fat_arrow_token); + v.visit_expr_mut(&mut *node.body); + skip!(node.comma); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_assoc_const_mut<V>(v: &mut V, node: &mut crate::AssocConst) +where + V: VisitMut + ?Sized, +{ + v.visit_ident_mut(&mut node.ident); + if let Some(it) = &mut node.generics { + v.visit_angle_bracketed_generic_arguments_mut(it); + } + skip!(node.eq_token); + v.visit_expr_mut(&mut node.value); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_assoc_type_mut<V>(v: &mut V, node: &mut crate::AssocType) +where + V: VisitMut + ?Sized, +{ + v.visit_ident_mut(&mut node.ident); + if let Some(it) = &mut node.generics { + v.visit_angle_bracketed_generic_arguments_mut(it); + } + skip!(node.eq_token); + v.visit_type_mut(&mut node.ty); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_attr_style_mut<V>(v: &mut V, node: &mut crate::AttrStyle) +where + V: VisitMut + ?Sized, +{ + match node { + crate::AttrStyle::Outer => {} + crate::AttrStyle::Inner(_binding_0) => { + skip!(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_attribute_mut<V>(v: &mut V, node: &mut crate::Attribute) +where + V: VisitMut + ?Sized, +{ + skip!(node.pound_token); + v.visit_attr_style_mut(&mut node.style); + skip!(node.bracket_token); + v.visit_meta_mut(&mut node.meta); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_bare_fn_arg_mut<V>(v: &mut V, node: &mut crate::BareFnArg) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + if let Some(it) = &mut node.name { + v.visit_ident_mut(&mut (it).0); + skip!((it).1); + } + v.visit_type_mut(&mut node.ty); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_bare_variadic_mut<V>(v: &mut V, node: &mut crate::BareVariadic) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + if let Some(it) = &mut node.name { + v.visit_ident_mut(&mut (it).0); + skip!((it).1); + } + skip!(node.dots); + skip!(node.comma); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_bin_op_mut<V>(v: &mut V, node: &mut crate::BinOp) +where + V: VisitMut + ?Sized, +{ + match node { + crate::BinOp::Add(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Sub(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Mul(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Div(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Rem(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::And(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Or(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::BitXor(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::BitAnd(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::BitOr(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Shl(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Shr(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Eq(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Lt(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Le(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Ne(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Ge(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::Gt(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::AddAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::SubAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::MulAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::DivAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::RemAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::BitXorAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::BitAndAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::BitOrAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::ShlAssign(_binding_0) => { + skip!(_binding_0); + } + crate::BinOp::ShrAssign(_binding_0) => { + skip!(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_block_mut<V>(v: &mut V, node: &mut crate::Block) +where + V: VisitMut + ?Sized, +{ + skip!(node.brace_token); + for it in &mut node.stmts { + v.visit_stmt_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_bound_lifetimes_mut<V>(v: &mut V, node: &mut crate::BoundLifetimes) +where + V: VisitMut + ?Sized, +{ + skip!(node.for_token); + skip!(node.lt_token); + for mut el in Punctuated::pairs_mut(&mut node.lifetimes) { + let it = el.value_mut(); + v.visit_generic_param_mut(it); + } + skip!(node.gt_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_captured_param_mut<V>(v: &mut V, node: &mut crate::CapturedParam) +where + V: VisitMut + ?Sized, +{ + match node { + crate::CapturedParam::Lifetime(_binding_0) => { + v.visit_lifetime_mut(_binding_0); + } + crate::CapturedParam::Ident(_binding_0) => { + v.visit_ident_mut(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_const_param_mut<V>(v: &mut V, node: &mut crate::ConstParam) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.const_token); + v.visit_ident_mut(&mut node.ident); + skip!(node.colon_token); + v.visit_type_mut(&mut node.ty); + skip!(node.eq_token); + if let Some(it) = &mut node.default { + v.visit_expr_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_constraint_mut<V>(v: &mut V, node: &mut crate::Constraint) +where + V: VisitMut + ?Sized, +{ + v.visit_ident_mut(&mut node.ident); + if let Some(it) = &mut node.generics { + v.visit_angle_bracketed_generic_arguments_mut(it); + } + skip!(node.colon_token); + for mut el in Punctuated::pairs_mut(&mut node.bounds) { + let it = el.value_mut(); + v.visit_type_param_bound_mut(it); + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub fn visit_data_mut<V>(v: &mut V, node: &mut crate::Data) +where + V: VisitMut + ?Sized, +{ + match node { + crate::Data::Struct(_binding_0) => { + v.visit_data_struct_mut(_binding_0); + } + crate::Data::Enum(_binding_0) => { + v.visit_data_enum_mut(_binding_0); + } + crate::Data::Union(_binding_0) => { + v.visit_data_union_mut(_binding_0); + } + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub fn visit_data_enum_mut<V>(v: &mut V, node: &mut crate::DataEnum) +where + V: VisitMut + ?Sized, +{ + skip!(node.enum_token); + skip!(node.brace_token); + for mut el in Punctuated::pairs_mut(&mut node.variants) { + let it = el.value_mut(); + v.visit_variant_mut(it); + } +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub fn visit_data_struct_mut<V>(v: &mut V, node: &mut crate::DataStruct) +where + V: VisitMut + ?Sized, +{ + skip!(node.struct_token); + v.visit_fields_mut(&mut node.fields); + skip!(node.semi_token); +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub fn visit_data_union_mut<V>(v: &mut V, node: &mut crate::DataUnion) +where + V: VisitMut + ?Sized, +{ + skip!(node.union_token); + v.visit_fields_named_mut(&mut node.fields); +} +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub fn visit_derive_input_mut<V>(v: &mut V, node: &mut crate::DeriveInput) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + v.visit_ident_mut(&mut node.ident); + v.visit_generics_mut(&mut node.generics); + v.visit_data_mut(&mut node.data); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_mut<V>(v: &mut V, node: &mut crate::Expr) +where + V: VisitMut + ?Sized, +{ + match node { + crate::Expr::Array(_binding_0) => { + full!(v.visit_expr_array_mut(_binding_0)); + } + crate::Expr::Assign(_binding_0) => { + full!(v.visit_expr_assign_mut(_binding_0)); + } + crate::Expr::Async(_binding_0) => { + full!(v.visit_expr_async_mut(_binding_0)); + } + crate::Expr::Await(_binding_0) => { + full!(v.visit_expr_await_mut(_binding_0)); + } + crate::Expr::Binary(_binding_0) => { + v.visit_expr_binary_mut(_binding_0); + } + crate::Expr::Block(_binding_0) => { + full!(v.visit_expr_block_mut(_binding_0)); + } + crate::Expr::Break(_binding_0) => { + full!(v.visit_expr_break_mut(_binding_0)); + } + crate::Expr::Call(_binding_0) => { + v.visit_expr_call_mut(_binding_0); + } + crate::Expr::Cast(_binding_0) => { + v.visit_expr_cast_mut(_binding_0); + } + crate::Expr::Closure(_binding_0) => { + full!(v.visit_expr_closure_mut(_binding_0)); + } + crate::Expr::Const(_binding_0) => { + full!(v.visit_expr_const_mut(_binding_0)); + } + crate::Expr::Continue(_binding_0) => { + full!(v.visit_expr_continue_mut(_binding_0)); + } + crate::Expr::Field(_binding_0) => { + v.visit_expr_field_mut(_binding_0); + } + crate::Expr::ForLoop(_binding_0) => { + full!(v.visit_expr_for_loop_mut(_binding_0)); + } + crate::Expr::Group(_binding_0) => { + v.visit_expr_group_mut(_binding_0); + } + crate::Expr::If(_binding_0) => { + full!(v.visit_expr_if_mut(_binding_0)); + } + crate::Expr::Index(_binding_0) => { + v.visit_expr_index_mut(_binding_0); + } + crate::Expr::Infer(_binding_0) => { + full!(v.visit_expr_infer_mut(_binding_0)); + } + crate::Expr::Let(_binding_0) => { + full!(v.visit_expr_let_mut(_binding_0)); + } + crate::Expr::Lit(_binding_0) => { + v.visit_expr_lit_mut(_binding_0); + } + crate::Expr::Loop(_binding_0) => { + full!(v.visit_expr_loop_mut(_binding_0)); + } + crate::Expr::Macro(_binding_0) => { + v.visit_expr_macro_mut(_binding_0); + } + crate::Expr::Match(_binding_0) => { + full!(v.visit_expr_match_mut(_binding_0)); + } + crate::Expr::MethodCall(_binding_0) => { + v.visit_expr_method_call_mut(_binding_0); + } + crate::Expr::Paren(_binding_0) => { + v.visit_expr_paren_mut(_binding_0); + } + crate::Expr::Path(_binding_0) => { + v.visit_expr_path_mut(_binding_0); + } + crate::Expr::Range(_binding_0) => { + full!(v.visit_expr_range_mut(_binding_0)); + } + crate::Expr::RawAddr(_binding_0) => { + full!(v.visit_expr_raw_addr_mut(_binding_0)); + } + crate::Expr::Reference(_binding_0) => { + v.visit_expr_reference_mut(_binding_0); + } + crate::Expr::Repeat(_binding_0) => { + full!(v.visit_expr_repeat_mut(_binding_0)); + } + crate::Expr::Return(_binding_0) => { + full!(v.visit_expr_return_mut(_binding_0)); + } + crate::Expr::Struct(_binding_0) => { + v.visit_expr_struct_mut(_binding_0); + } + crate::Expr::Try(_binding_0) => { + full!(v.visit_expr_try_mut(_binding_0)); + } + crate::Expr::TryBlock(_binding_0) => { + full!(v.visit_expr_try_block_mut(_binding_0)); + } + crate::Expr::Tuple(_binding_0) => { + v.visit_expr_tuple_mut(_binding_0); + } + crate::Expr::Unary(_binding_0) => { + v.visit_expr_unary_mut(_binding_0); + } + crate::Expr::Unsafe(_binding_0) => { + full!(v.visit_expr_unsafe_mut(_binding_0)); + } + crate::Expr::Verbatim(_binding_0) => { + v.visit_token_stream_mut(_binding_0); + } + crate::Expr::While(_binding_0) => { + full!(v.visit_expr_while_mut(_binding_0)); + } + crate::Expr::Yield(_binding_0) => { + full!(v.visit_expr_yield_mut(_binding_0)); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_array_mut<V>(v: &mut V, node: &mut crate::ExprArray) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.bracket_token); + for mut el in Punctuated::pairs_mut(&mut node.elems) { + let it = el.value_mut(); + v.visit_expr_mut(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_assign_mut<V>(v: &mut V, node: &mut crate::ExprAssign) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_expr_mut(&mut *node.left); + skip!(node.eq_token); + v.visit_expr_mut(&mut *node.right); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_async_mut<V>(v: &mut V, node: &mut crate::ExprAsync) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.async_token); + skip!(node.capture); + v.visit_block_mut(&mut node.block); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_await_mut<V>(v: &mut V, node: &mut crate::ExprAwait) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_expr_mut(&mut *node.base); + skip!(node.dot_token); + skip!(node.await_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_binary_mut<V>(v: &mut V, node: &mut crate::ExprBinary) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_expr_mut(&mut *node.left); + v.visit_bin_op_mut(&mut node.op); + v.visit_expr_mut(&mut *node.right); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_block_mut<V>(v: &mut V, node: &mut crate::ExprBlock) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + if let Some(it) = &mut node.label { + v.visit_label_mut(it); + } + v.visit_block_mut(&mut node.block); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_break_mut<V>(v: &mut V, node: &mut crate::ExprBreak) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.break_token); + if let Some(it) = &mut node.label { + v.visit_lifetime_mut(it); + } + if let Some(it) = &mut node.expr { + v.visit_expr_mut(&mut **it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_call_mut<V>(v: &mut V, node: &mut crate::ExprCall) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_expr_mut(&mut *node.func); + skip!(node.paren_token); + for mut el in Punctuated::pairs_mut(&mut node.args) { + let it = el.value_mut(); + v.visit_expr_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_cast_mut<V>(v: &mut V, node: &mut crate::ExprCast) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_expr_mut(&mut *node.expr); + skip!(node.as_token); + v.visit_type_mut(&mut *node.ty); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_closure_mut<V>(v: &mut V, node: &mut crate::ExprClosure) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + if let Some(it) = &mut node.lifetimes { + v.visit_bound_lifetimes_mut(it); + } + skip!(node.constness); + skip!(node.movability); + skip!(node.asyncness); + skip!(node.capture); + skip!(node.or1_token); + for mut el in Punctuated::pairs_mut(&mut node.inputs) { + let it = el.value_mut(); + v.visit_pat_mut(it); + } + skip!(node.or2_token); + v.visit_return_type_mut(&mut node.output); + v.visit_expr_mut(&mut *node.body); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_const_mut<V>(v: &mut V, node: &mut crate::ExprConst) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.const_token); + v.visit_block_mut(&mut node.block); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_continue_mut<V>(v: &mut V, node: &mut crate::ExprContinue) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.continue_token); + if let Some(it) = &mut node.label { + v.visit_lifetime_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_field_mut<V>(v: &mut V, node: &mut crate::ExprField) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_expr_mut(&mut *node.base); + skip!(node.dot_token); + v.visit_member_mut(&mut node.member); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_for_loop_mut<V>(v: &mut V, node: &mut crate::ExprForLoop) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + if let Some(it) = &mut node.label { + v.visit_label_mut(it); + } + skip!(node.for_token); + v.visit_pat_mut(&mut *node.pat); + skip!(node.in_token); + v.visit_expr_mut(&mut *node.expr); + v.visit_block_mut(&mut node.body); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_group_mut<V>(v: &mut V, node: &mut crate::ExprGroup) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.group_token); + v.visit_expr_mut(&mut *node.expr); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_if_mut<V>(v: &mut V, node: &mut crate::ExprIf) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.if_token); + v.visit_expr_mut(&mut *node.cond); + v.visit_block_mut(&mut node.then_branch); + if let Some(it) = &mut node.else_branch { + skip!((it).0); + v.visit_expr_mut(&mut *(it).1); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_index_mut<V>(v: &mut V, node: &mut crate::ExprIndex) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_expr_mut(&mut *node.expr); + skip!(node.bracket_token); + v.visit_expr_mut(&mut *node.index); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_infer_mut<V>(v: &mut V, node: &mut crate::ExprInfer) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.underscore_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_let_mut<V>(v: &mut V, node: &mut crate::ExprLet) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.let_token); + v.visit_pat_mut(&mut *node.pat); + skip!(node.eq_token); + v.visit_expr_mut(&mut *node.expr); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_lit_mut<V>(v: &mut V, node: &mut crate::ExprLit) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_lit_mut(&mut node.lit); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_loop_mut<V>(v: &mut V, node: &mut crate::ExprLoop) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + if let Some(it) = &mut node.label { + v.visit_label_mut(it); + } + skip!(node.loop_token); + v.visit_block_mut(&mut node.body); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_macro_mut<V>(v: &mut V, node: &mut crate::ExprMacro) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_macro_mut(&mut node.mac); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_match_mut<V>(v: &mut V, node: &mut crate::ExprMatch) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.match_token); + v.visit_expr_mut(&mut *node.expr); + skip!(node.brace_token); + for it in &mut node.arms { + v.visit_arm_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_method_call_mut<V>(v: &mut V, node: &mut crate::ExprMethodCall) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_expr_mut(&mut *node.receiver); + skip!(node.dot_token); + v.visit_ident_mut(&mut node.method); + if let Some(it) = &mut node.turbofish { + v.visit_angle_bracketed_generic_arguments_mut(it); + } + skip!(node.paren_token); + for mut el in Punctuated::pairs_mut(&mut node.args) { + let it = el.value_mut(); + v.visit_expr_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_paren_mut<V>(v: &mut V, node: &mut crate::ExprParen) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.paren_token); + v.visit_expr_mut(&mut *node.expr); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_path_mut<V>(v: &mut V, node: &mut crate::ExprPath) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + if let Some(it) = &mut node.qself { + v.visit_qself_mut(it); + } + v.visit_path_mut(&mut node.path); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_range_mut<V>(v: &mut V, node: &mut crate::ExprRange) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + if let Some(it) = &mut node.start { + v.visit_expr_mut(&mut **it); + } + v.visit_range_limits_mut(&mut node.limits); + if let Some(it) = &mut node.end { + v.visit_expr_mut(&mut **it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_raw_addr_mut<V>(v: &mut V, node: &mut crate::ExprRawAddr) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.and_token); + skip!(node.raw); + v.visit_pointer_mutability_mut(&mut node.mutability); + v.visit_expr_mut(&mut *node.expr); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_reference_mut<V>(v: &mut V, node: &mut crate::ExprReference) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.and_token); + skip!(node.mutability); + v.visit_expr_mut(&mut *node.expr); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_repeat_mut<V>(v: &mut V, node: &mut crate::ExprRepeat) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.bracket_token); + v.visit_expr_mut(&mut *node.expr); + skip!(node.semi_token); + v.visit_expr_mut(&mut *node.len); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_return_mut<V>(v: &mut V, node: &mut crate::ExprReturn) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.return_token); + if let Some(it) = &mut node.expr { + v.visit_expr_mut(&mut **it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_struct_mut<V>(v: &mut V, node: &mut crate::ExprStruct) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + if let Some(it) = &mut node.qself { + v.visit_qself_mut(it); + } + v.visit_path_mut(&mut node.path); + skip!(node.brace_token); + for mut el in Punctuated::pairs_mut(&mut node.fields) { + let it = el.value_mut(); + v.visit_field_value_mut(it); + } + skip!(node.dot2_token); + if let Some(it) = &mut node.rest { + v.visit_expr_mut(&mut **it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_try_mut<V>(v: &mut V, node: &mut crate::ExprTry) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_expr_mut(&mut *node.expr); + skip!(node.question_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_try_block_mut<V>(v: &mut V, node: &mut crate::ExprTryBlock) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.try_token); + v.visit_block_mut(&mut node.block); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_tuple_mut<V>(v: &mut V, node: &mut crate::ExprTuple) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.paren_token); + for mut el in Punctuated::pairs_mut(&mut node.elems) { + let it = el.value_mut(); + v.visit_expr_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_expr_unary_mut<V>(v: &mut V, node: &mut crate::ExprUnary) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_un_op_mut(&mut node.op); + v.visit_expr_mut(&mut *node.expr); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_unsafe_mut<V>(v: &mut V, node: &mut crate::ExprUnsafe) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.unsafe_token); + v.visit_block_mut(&mut node.block); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_while_mut<V>(v: &mut V, node: &mut crate::ExprWhile) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + if let Some(it) = &mut node.label { + v.visit_label_mut(it); + } + skip!(node.while_token); + v.visit_expr_mut(&mut *node.cond); + v.visit_block_mut(&mut node.body); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_expr_yield_mut<V>(v: &mut V, node: &mut crate::ExprYield) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.yield_token); + if let Some(it) = &mut node.expr { + v.visit_expr_mut(&mut **it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_field_mut<V>(v: &mut V, node: &mut crate::Field) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + v.visit_field_mutability_mut(&mut node.mutability); + if let Some(it) = &mut node.ident { + v.visit_ident_mut(it); + } + skip!(node.colon_token); + v.visit_type_mut(&mut node.ty); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_field_mutability_mut<V>(v: &mut V, node: &mut crate::FieldMutability) +where + V: VisitMut + ?Sized, +{ + match node { + crate::FieldMutability::None => {} + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_field_pat_mut<V>(v: &mut V, node: &mut crate::FieldPat) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_member_mut(&mut node.member); + skip!(node.colon_token); + v.visit_pat_mut(&mut *node.pat); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_field_value_mut<V>(v: &mut V, node: &mut crate::FieldValue) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_member_mut(&mut node.member); + skip!(node.colon_token); + v.visit_expr_mut(&mut node.expr); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_fields_mut<V>(v: &mut V, node: &mut crate::Fields) +where + V: VisitMut + ?Sized, +{ + match node { + crate::Fields::Named(_binding_0) => { + v.visit_fields_named_mut(_binding_0); + } + crate::Fields::Unnamed(_binding_0) => { + v.visit_fields_unnamed_mut(_binding_0); + } + crate::Fields::Unit => {} + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_fields_named_mut<V>(v: &mut V, node: &mut crate::FieldsNamed) +where + V: VisitMut + ?Sized, +{ + skip!(node.brace_token); + for mut el in Punctuated::pairs_mut(&mut node.named) { + let it = el.value_mut(); + v.visit_field_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_fields_unnamed_mut<V>(v: &mut V, node: &mut crate::FieldsUnnamed) +where + V: VisitMut + ?Sized, +{ + skip!(node.paren_token); + for mut el in Punctuated::pairs_mut(&mut node.unnamed) { + let it = el.value_mut(); + v.visit_field_mut(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_file_mut<V>(v: &mut V, node: &mut crate::File) +where + V: VisitMut + ?Sized, +{ + skip!(node.shebang); + v.visit_attributes_mut(&mut node.attrs); + for it in &mut node.items { + v.visit_item_mut(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_fn_arg_mut<V>(v: &mut V, node: &mut crate::FnArg) +where + V: VisitMut + ?Sized, +{ + match node { + crate::FnArg::Receiver(_binding_0) => { + v.visit_receiver_mut(_binding_0); + } + crate::FnArg::Typed(_binding_0) => { + v.visit_pat_type_mut(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_foreign_item_mut<V>(v: &mut V, node: &mut crate::ForeignItem) +where + V: VisitMut + ?Sized, +{ + match node { + crate::ForeignItem::Fn(_binding_0) => { + v.visit_foreign_item_fn_mut(_binding_0); + } + crate::ForeignItem::Static(_binding_0) => { + v.visit_foreign_item_static_mut(_binding_0); + } + crate::ForeignItem::Type(_binding_0) => { + v.visit_foreign_item_type_mut(_binding_0); + } + crate::ForeignItem::Macro(_binding_0) => { + v.visit_foreign_item_macro_mut(_binding_0); + } + crate::ForeignItem::Verbatim(_binding_0) => { + v.visit_token_stream_mut(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_foreign_item_fn_mut<V>(v: &mut V, node: &mut crate::ForeignItemFn) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + v.visit_signature_mut(&mut node.sig); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_foreign_item_macro_mut<V>(v: &mut V, node: &mut crate::ForeignItemMacro) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_macro_mut(&mut node.mac); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_foreign_item_static_mut<V>(v: &mut V, node: &mut crate::ForeignItemStatic) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.static_token); + v.visit_static_mutability_mut(&mut node.mutability); + v.visit_ident_mut(&mut node.ident); + skip!(node.colon_token); + v.visit_type_mut(&mut *node.ty); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_foreign_item_type_mut<V>(v: &mut V, node: &mut crate::ForeignItemType) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.type_token); + v.visit_ident_mut(&mut node.ident); + v.visit_generics_mut(&mut node.generics); + skip!(node.semi_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_generic_argument_mut<V>(v: &mut V, node: &mut crate::GenericArgument) +where + V: VisitMut + ?Sized, +{ + match node { + crate::GenericArgument::Lifetime(_binding_0) => { + v.visit_lifetime_mut(_binding_0); + } + crate::GenericArgument::Type(_binding_0) => { + v.visit_type_mut(_binding_0); + } + crate::GenericArgument::Const(_binding_0) => { + v.visit_expr_mut(_binding_0); + } + crate::GenericArgument::AssocType(_binding_0) => { + v.visit_assoc_type_mut(_binding_0); + } + crate::GenericArgument::AssocConst(_binding_0) => { + v.visit_assoc_const_mut(_binding_0); + } + crate::GenericArgument::Constraint(_binding_0) => { + v.visit_constraint_mut(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_generic_param_mut<V>(v: &mut V, node: &mut crate::GenericParam) +where + V: VisitMut + ?Sized, +{ + match node { + crate::GenericParam::Lifetime(_binding_0) => { + v.visit_lifetime_param_mut(_binding_0); + } + crate::GenericParam::Type(_binding_0) => { + v.visit_type_param_mut(_binding_0); + } + crate::GenericParam::Const(_binding_0) => { + v.visit_const_param_mut(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_generics_mut<V>(v: &mut V, node: &mut crate::Generics) +where + V: VisitMut + ?Sized, +{ + skip!(node.lt_token); + for mut el in Punctuated::pairs_mut(&mut node.params) { + let it = el.value_mut(); + v.visit_generic_param_mut(it); + } + skip!(node.gt_token); + if let Some(it) = &mut node.where_clause { + v.visit_where_clause_mut(it); + } +} +pub fn visit_ident_mut<V>(v: &mut V, node: &mut proc_macro2::Ident) +where + V: VisitMut + ?Sized, +{ + let mut span = node.span(); + v.visit_span_mut(&mut span); + node.set_span(span); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_impl_item_mut<V>(v: &mut V, node: &mut crate::ImplItem) +where + V: VisitMut + ?Sized, +{ + match node { + crate::ImplItem::Const(_binding_0) => { + v.visit_impl_item_const_mut(_binding_0); + } + crate::ImplItem::Fn(_binding_0) => { + v.visit_impl_item_fn_mut(_binding_0); + } + crate::ImplItem::Type(_binding_0) => { + v.visit_impl_item_type_mut(_binding_0); + } + crate::ImplItem::Macro(_binding_0) => { + v.visit_impl_item_macro_mut(_binding_0); + } + crate::ImplItem::Verbatim(_binding_0) => { + v.visit_token_stream_mut(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_impl_item_const_mut<V>(v: &mut V, node: &mut crate::ImplItemConst) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.defaultness); + skip!(node.const_token); + v.visit_ident_mut(&mut node.ident); + v.visit_generics_mut(&mut node.generics); + skip!(node.colon_token); + v.visit_type_mut(&mut node.ty); + skip!(node.eq_token); + v.visit_expr_mut(&mut node.expr); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_impl_item_fn_mut<V>(v: &mut V, node: &mut crate::ImplItemFn) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.defaultness); + v.visit_signature_mut(&mut node.sig); + v.visit_block_mut(&mut node.block); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_impl_item_macro_mut<V>(v: &mut V, node: &mut crate::ImplItemMacro) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_macro_mut(&mut node.mac); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_impl_item_type_mut<V>(v: &mut V, node: &mut crate::ImplItemType) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.defaultness); + skip!(node.type_token); + v.visit_ident_mut(&mut node.ident); + v.visit_generics_mut(&mut node.generics); + skip!(node.eq_token); + v.visit_type_mut(&mut node.ty); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_impl_restriction_mut<V>(v: &mut V, node: &mut crate::ImplRestriction) +where + V: VisitMut + ?Sized, +{ + match *node {} +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_index_mut<V>(v: &mut V, node: &mut crate::Index) +where + V: VisitMut + ?Sized, +{ + skip!(node.index); + v.visit_span_mut(&mut node.span); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_mut<V>(v: &mut V, node: &mut crate::Item) +where + V: VisitMut + ?Sized, +{ + match node { + crate::Item::Const(_binding_0) => { + v.visit_item_const_mut(_binding_0); + } + crate::Item::Enum(_binding_0) => { + v.visit_item_enum_mut(_binding_0); + } + crate::Item::ExternCrate(_binding_0) => { + v.visit_item_extern_crate_mut(_binding_0); + } + crate::Item::Fn(_binding_0) => { + v.visit_item_fn_mut(_binding_0); + } + crate::Item::ForeignMod(_binding_0) => { + v.visit_item_foreign_mod_mut(_binding_0); + } + crate::Item::Impl(_binding_0) => { + v.visit_item_impl_mut(_binding_0); + } + crate::Item::Macro(_binding_0) => { + v.visit_item_macro_mut(_binding_0); + } + crate::Item::Mod(_binding_0) => { + v.visit_item_mod_mut(_binding_0); + } + crate::Item::Static(_binding_0) => { + v.visit_item_static_mut(_binding_0); + } + crate::Item::Struct(_binding_0) => { + v.visit_item_struct_mut(_binding_0); + } + crate::Item::Trait(_binding_0) => { + v.visit_item_trait_mut(_binding_0); + } + crate::Item::TraitAlias(_binding_0) => { + v.visit_item_trait_alias_mut(_binding_0); + } + crate::Item::Type(_binding_0) => { + v.visit_item_type_mut(_binding_0); + } + crate::Item::Union(_binding_0) => { + v.visit_item_union_mut(_binding_0); + } + crate::Item::Use(_binding_0) => { + v.visit_item_use_mut(_binding_0); + } + crate::Item::Verbatim(_binding_0) => { + v.visit_token_stream_mut(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_const_mut<V>(v: &mut V, node: &mut crate::ItemConst) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.const_token); + v.visit_ident_mut(&mut node.ident); + v.visit_generics_mut(&mut node.generics); + skip!(node.colon_token); + v.visit_type_mut(&mut *node.ty); + skip!(node.eq_token); + v.visit_expr_mut(&mut *node.expr); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_enum_mut<V>(v: &mut V, node: &mut crate::ItemEnum) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.enum_token); + v.visit_ident_mut(&mut node.ident); + v.visit_generics_mut(&mut node.generics); + skip!(node.brace_token); + for mut el in Punctuated::pairs_mut(&mut node.variants) { + let it = el.value_mut(); + v.visit_variant_mut(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_extern_crate_mut<V>(v: &mut V, node: &mut crate::ItemExternCrate) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.extern_token); + skip!(node.crate_token); + v.visit_ident_mut(&mut node.ident); + if let Some(it) = &mut node.rename { + skip!((it).0); + v.visit_ident_mut(&mut (it).1); + } + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_fn_mut<V>(v: &mut V, node: &mut crate::ItemFn) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + v.visit_signature_mut(&mut node.sig); + v.visit_block_mut(&mut *node.block); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_foreign_mod_mut<V>(v: &mut V, node: &mut crate::ItemForeignMod) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.unsafety); + v.visit_abi_mut(&mut node.abi); + skip!(node.brace_token); + for it in &mut node.items { + v.visit_foreign_item_mut(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_impl_mut<V>(v: &mut V, node: &mut crate::ItemImpl) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.defaultness); + skip!(node.unsafety); + skip!(node.impl_token); + v.visit_generics_mut(&mut node.generics); + if let Some(it) = &mut node.trait_ { + skip!((it).0); + v.visit_path_mut(&mut (it).1); + skip!((it).2); + } + v.visit_type_mut(&mut *node.self_ty); + skip!(node.brace_token); + for it in &mut node.items { + v.visit_impl_item_mut(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_macro_mut<V>(v: &mut V, node: &mut crate::ItemMacro) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + if let Some(it) = &mut node.ident { + v.visit_ident_mut(it); + } + v.visit_macro_mut(&mut node.mac); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_mod_mut<V>(v: &mut V, node: &mut crate::ItemMod) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.unsafety); + skip!(node.mod_token); + v.visit_ident_mut(&mut node.ident); + if let Some(it) = &mut node.content { + skip!((it).0); + for it in &mut (it).1 { + v.visit_item_mut(it); + } + } + skip!(node.semi); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_static_mut<V>(v: &mut V, node: &mut crate::ItemStatic) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.static_token); + v.visit_static_mutability_mut(&mut node.mutability); + v.visit_ident_mut(&mut node.ident); + skip!(node.colon_token); + v.visit_type_mut(&mut *node.ty); + skip!(node.eq_token); + v.visit_expr_mut(&mut *node.expr); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_struct_mut<V>(v: &mut V, node: &mut crate::ItemStruct) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.struct_token); + v.visit_ident_mut(&mut node.ident); + v.visit_generics_mut(&mut node.generics); + v.visit_fields_mut(&mut node.fields); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_trait_mut<V>(v: &mut V, node: &mut crate::ItemTrait) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.unsafety); + skip!(node.auto_token); + if let Some(it) = &mut node.restriction { + v.visit_impl_restriction_mut(it); + } + skip!(node.trait_token); + v.visit_ident_mut(&mut node.ident); + v.visit_generics_mut(&mut node.generics); + skip!(node.colon_token); + for mut el in Punctuated::pairs_mut(&mut node.supertraits) { + let it = el.value_mut(); + v.visit_type_param_bound_mut(it); + } + skip!(node.brace_token); + for it in &mut node.items { + v.visit_trait_item_mut(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_trait_alias_mut<V>(v: &mut V, node: &mut crate::ItemTraitAlias) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.trait_token); + v.visit_ident_mut(&mut node.ident); + v.visit_generics_mut(&mut node.generics); + skip!(node.eq_token); + for mut el in Punctuated::pairs_mut(&mut node.bounds) { + let it = el.value_mut(); + v.visit_type_param_bound_mut(it); + } + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_type_mut<V>(v: &mut V, node: &mut crate::ItemType) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.type_token); + v.visit_ident_mut(&mut node.ident); + v.visit_generics_mut(&mut node.generics); + skip!(node.eq_token); + v.visit_type_mut(&mut *node.ty); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_union_mut<V>(v: &mut V, node: &mut crate::ItemUnion) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.union_token); + v.visit_ident_mut(&mut node.ident); + v.visit_generics_mut(&mut node.generics); + v.visit_fields_named_mut(&mut node.fields); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_item_use_mut<V>(v: &mut V, node: &mut crate::ItemUse) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_visibility_mut(&mut node.vis); + skip!(node.use_token); + skip!(node.leading_colon); + v.visit_use_tree_mut(&mut node.tree); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_label_mut<V>(v: &mut V, node: &mut crate::Label) +where + V: VisitMut + ?Sized, +{ + v.visit_lifetime_mut(&mut node.name); + skip!(node.colon_token); +} +pub fn visit_lifetime_mut<V>(v: &mut V, node: &mut crate::Lifetime) +where + V: VisitMut + ?Sized, +{ + v.visit_span_mut(&mut node.apostrophe); + v.visit_ident_mut(&mut node.ident); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_lifetime_param_mut<V>(v: &mut V, node: &mut crate::LifetimeParam) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_lifetime_mut(&mut node.lifetime); + skip!(node.colon_token); + for mut el in Punctuated::pairs_mut(&mut node.bounds) { + let it = el.value_mut(); + v.visit_lifetime_mut(it); + } +} +pub fn visit_lit_mut<V>(v: &mut V, node: &mut crate::Lit) +where + V: VisitMut + ?Sized, +{ + match node { + crate::Lit::Str(_binding_0) => { + v.visit_lit_str_mut(_binding_0); + } + crate::Lit::ByteStr(_binding_0) => { + v.visit_lit_byte_str_mut(_binding_0); + } + crate::Lit::CStr(_binding_0) => { + v.visit_lit_cstr_mut(_binding_0); + } + crate::Lit::Byte(_binding_0) => { + v.visit_lit_byte_mut(_binding_0); + } + crate::Lit::Char(_binding_0) => { + v.visit_lit_char_mut(_binding_0); + } + crate::Lit::Int(_binding_0) => { + v.visit_lit_int_mut(_binding_0); + } + crate::Lit::Float(_binding_0) => { + v.visit_lit_float_mut(_binding_0); + } + crate::Lit::Bool(_binding_0) => { + v.visit_lit_bool_mut(_binding_0); + } + crate::Lit::Verbatim(_binding_0) => { + skip!(_binding_0); + } + } +} +pub fn visit_lit_bool_mut<V>(v: &mut V, node: &mut crate::LitBool) +where + V: VisitMut + ?Sized, +{ + skip!(node.value); + v.visit_span_mut(&mut node.span); +} +pub fn visit_lit_byte_mut<V>(v: &mut V, node: &mut crate::LitByte) +where + V: VisitMut + ?Sized, +{} +pub fn visit_lit_byte_str_mut<V>(v: &mut V, node: &mut crate::LitByteStr) +where + V: VisitMut + ?Sized, +{} +pub fn visit_lit_cstr_mut<V>(v: &mut V, node: &mut crate::LitCStr) +where + V: VisitMut + ?Sized, +{} +pub fn visit_lit_char_mut<V>(v: &mut V, node: &mut crate::LitChar) +where + V: VisitMut + ?Sized, +{} +pub fn visit_lit_float_mut<V>(v: &mut V, node: &mut crate::LitFloat) +where + V: VisitMut + ?Sized, +{} +pub fn visit_lit_int_mut<V>(v: &mut V, node: &mut crate::LitInt) +where + V: VisitMut + ?Sized, +{} +pub fn visit_lit_str_mut<V>(v: &mut V, node: &mut crate::LitStr) +where + V: VisitMut + ?Sized, +{} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_local_mut<V>(v: &mut V, node: &mut crate::Local) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.let_token); + v.visit_pat_mut(&mut node.pat); + if let Some(it) = &mut node.init { + v.visit_local_init_mut(it); + } + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_local_init_mut<V>(v: &mut V, node: &mut crate::LocalInit) +where + V: VisitMut + ?Sized, +{ + skip!(node.eq_token); + v.visit_expr_mut(&mut *node.expr); + if let Some(it) = &mut node.diverge { + skip!((it).0); + v.visit_expr_mut(&mut *(it).1); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_macro_mut<V>(v: &mut V, node: &mut crate::Macro) +where + V: VisitMut + ?Sized, +{ + v.visit_path_mut(&mut node.path); + skip!(node.bang_token); + v.visit_macro_delimiter_mut(&mut node.delimiter); + v.visit_token_stream_mut(&mut node.tokens); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_macro_delimiter_mut<V>(v: &mut V, node: &mut crate::MacroDelimiter) +where + V: VisitMut + ?Sized, +{ + match node { + crate::MacroDelimiter::Paren(_binding_0) => { + skip!(_binding_0); + } + crate::MacroDelimiter::Brace(_binding_0) => { + skip!(_binding_0); + } + crate::MacroDelimiter::Bracket(_binding_0) => { + skip!(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_member_mut<V>(v: &mut V, node: &mut crate::Member) +where + V: VisitMut + ?Sized, +{ + match node { + crate::Member::Named(_binding_0) => { + v.visit_ident_mut(_binding_0); + } + crate::Member::Unnamed(_binding_0) => { + v.visit_index_mut(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_meta_mut<V>(v: &mut V, node: &mut crate::Meta) +where + V: VisitMut + ?Sized, +{ + match node { + crate::Meta::Path(_binding_0) => { + v.visit_path_mut(_binding_0); + } + crate::Meta::List(_binding_0) => { + v.visit_meta_list_mut(_binding_0); + } + crate::Meta::NameValue(_binding_0) => { + v.visit_meta_name_value_mut(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_meta_list_mut<V>(v: &mut V, node: &mut crate::MetaList) +where + V: VisitMut + ?Sized, +{ + v.visit_path_mut(&mut node.path); + v.visit_macro_delimiter_mut(&mut node.delimiter); + v.visit_token_stream_mut(&mut node.tokens); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_meta_name_value_mut<V>(v: &mut V, node: &mut crate::MetaNameValue) +where + V: VisitMut + ?Sized, +{ + v.visit_path_mut(&mut node.path); + skip!(node.eq_token); + v.visit_expr_mut(&mut node.value); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_parenthesized_generic_arguments_mut<V>( + v: &mut V, + node: &mut crate::ParenthesizedGenericArguments, +) +where + V: VisitMut + ?Sized, +{ + skip!(node.paren_token); + for mut el in Punctuated::pairs_mut(&mut node.inputs) { + let it = el.value_mut(); + v.visit_type_mut(it); + } + v.visit_return_type_mut(&mut node.output); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_mut<V>(v: &mut V, node: &mut crate::Pat) +where + V: VisitMut + ?Sized, +{ + match node { + crate::Pat::Const(_binding_0) => { + v.visit_expr_const_mut(_binding_0); + } + crate::Pat::Ident(_binding_0) => { + v.visit_pat_ident_mut(_binding_0); + } + crate::Pat::Lit(_binding_0) => { + v.visit_expr_lit_mut(_binding_0); + } + crate::Pat::Macro(_binding_0) => { + v.visit_expr_macro_mut(_binding_0); + } + crate::Pat::Or(_binding_0) => { + v.visit_pat_or_mut(_binding_0); + } + crate::Pat::Paren(_binding_0) => { + v.visit_pat_paren_mut(_binding_0); + } + crate::Pat::Path(_binding_0) => { + v.visit_expr_path_mut(_binding_0); + } + crate::Pat::Range(_binding_0) => { + v.visit_expr_range_mut(_binding_0); + } + crate::Pat::Reference(_binding_0) => { + v.visit_pat_reference_mut(_binding_0); + } + crate::Pat::Rest(_binding_0) => { + v.visit_pat_rest_mut(_binding_0); + } + crate::Pat::Slice(_binding_0) => { + v.visit_pat_slice_mut(_binding_0); + } + crate::Pat::Struct(_binding_0) => { + v.visit_pat_struct_mut(_binding_0); + } + crate::Pat::Tuple(_binding_0) => { + v.visit_pat_tuple_mut(_binding_0); + } + crate::Pat::TupleStruct(_binding_0) => { + v.visit_pat_tuple_struct_mut(_binding_0); + } + crate::Pat::Type(_binding_0) => { + v.visit_pat_type_mut(_binding_0); + } + crate::Pat::Verbatim(_binding_0) => { + v.visit_token_stream_mut(_binding_0); + } + crate::Pat::Wild(_binding_0) => { + v.visit_pat_wild_mut(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_ident_mut<V>(v: &mut V, node: &mut crate::PatIdent) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.by_ref); + skip!(node.mutability); + v.visit_ident_mut(&mut node.ident); + if let Some(it) = &mut node.subpat { + skip!((it).0); + v.visit_pat_mut(&mut *(it).1); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_or_mut<V>(v: &mut V, node: &mut crate::PatOr) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.leading_vert); + for mut el in Punctuated::pairs_mut(&mut node.cases) { + let it = el.value_mut(); + v.visit_pat_mut(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_paren_mut<V>(v: &mut V, node: &mut crate::PatParen) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.paren_token); + v.visit_pat_mut(&mut *node.pat); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_reference_mut<V>(v: &mut V, node: &mut crate::PatReference) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.and_token); + skip!(node.mutability); + v.visit_pat_mut(&mut *node.pat); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_rest_mut<V>(v: &mut V, node: &mut crate::PatRest) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.dot2_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_slice_mut<V>(v: &mut V, node: &mut crate::PatSlice) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.bracket_token); + for mut el in Punctuated::pairs_mut(&mut node.elems) { + let it = el.value_mut(); + v.visit_pat_mut(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_struct_mut<V>(v: &mut V, node: &mut crate::PatStruct) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + if let Some(it) = &mut node.qself { + v.visit_qself_mut(it); + } + v.visit_path_mut(&mut node.path); + skip!(node.brace_token); + for mut el in Punctuated::pairs_mut(&mut node.fields) { + let it = el.value_mut(); + v.visit_field_pat_mut(it); + } + if let Some(it) = &mut node.rest { + v.visit_pat_rest_mut(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_tuple_mut<V>(v: &mut V, node: &mut crate::PatTuple) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.paren_token); + for mut el in Punctuated::pairs_mut(&mut node.elems) { + let it = el.value_mut(); + v.visit_pat_mut(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_tuple_struct_mut<V>(v: &mut V, node: &mut crate::PatTupleStruct) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + if let Some(it) = &mut node.qself { + v.visit_qself_mut(it); + } + v.visit_path_mut(&mut node.path); + skip!(node.paren_token); + for mut el in Punctuated::pairs_mut(&mut node.elems) { + let it = el.value_mut(); + v.visit_pat_mut(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_type_mut<V>(v: &mut V, node: &mut crate::PatType) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_pat_mut(&mut *node.pat); + skip!(node.colon_token); + v.visit_type_mut(&mut *node.ty); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pat_wild_mut<V>(v: &mut V, node: &mut crate::PatWild) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.underscore_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_path_mut<V>(v: &mut V, node: &mut crate::Path) +where + V: VisitMut + ?Sized, +{ + skip!(node.leading_colon); + for mut el in Punctuated::pairs_mut(&mut node.segments) { + let it = el.value_mut(); + v.visit_path_segment_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_path_arguments_mut<V>(v: &mut V, node: &mut crate::PathArguments) +where + V: VisitMut + ?Sized, +{ + match node { + crate::PathArguments::None => {} + crate::PathArguments::AngleBracketed(_binding_0) => { + v.visit_angle_bracketed_generic_arguments_mut(_binding_0); + } + crate::PathArguments::Parenthesized(_binding_0) => { + v.visit_parenthesized_generic_arguments_mut(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_path_segment_mut<V>(v: &mut V, node: &mut crate::PathSegment) +where + V: VisitMut + ?Sized, +{ + v.visit_ident_mut(&mut node.ident); + v.visit_path_arguments_mut(&mut node.arguments); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_pointer_mutability_mut<V>(v: &mut V, node: &mut crate::PointerMutability) +where + V: VisitMut + ?Sized, +{ + match node { + crate::PointerMutability::Const(_binding_0) => { + skip!(_binding_0); + } + crate::PointerMutability::Mut(_binding_0) => { + skip!(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_precise_capture_mut<V>(v: &mut V, node: &mut crate::PreciseCapture) +where + V: VisitMut + ?Sized, +{ + skip!(node.use_token); + skip!(node.lt_token); + for mut el in Punctuated::pairs_mut(&mut node.params) { + let it = el.value_mut(); + v.visit_captured_param_mut(it); + } + skip!(node.gt_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_predicate_lifetime_mut<V>(v: &mut V, node: &mut crate::PredicateLifetime) +where + V: VisitMut + ?Sized, +{ + v.visit_lifetime_mut(&mut node.lifetime); + skip!(node.colon_token); + for mut el in Punctuated::pairs_mut(&mut node.bounds) { + let it = el.value_mut(); + v.visit_lifetime_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_predicate_type_mut<V>(v: &mut V, node: &mut crate::PredicateType) +where + V: VisitMut + ?Sized, +{ + if let Some(it) = &mut node.lifetimes { + v.visit_bound_lifetimes_mut(it); + } + v.visit_type_mut(&mut node.bounded_ty); + skip!(node.colon_token); + for mut el in Punctuated::pairs_mut(&mut node.bounds) { + let it = el.value_mut(); + v.visit_type_param_bound_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_qself_mut<V>(v: &mut V, node: &mut crate::QSelf) +where + V: VisitMut + ?Sized, +{ + skip!(node.lt_token); + v.visit_type_mut(&mut *node.ty); + skip!(node.position); + skip!(node.as_token); + skip!(node.gt_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_range_limits_mut<V>(v: &mut V, node: &mut crate::RangeLimits) +where + V: VisitMut + ?Sized, +{ + match node { + crate::RangeLimits::HalfOpen(_binding_0) => { + skip!(_binding_0); + } + crate::RangeLimits::Closed(_binding_0) => { + skip!(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_receiver_mut<V>(v: &mut V, node: &mut crate::Receiver) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + if let Some(it) = &mut node.reference { + skip!((it).0); + if let Some(it) = &mut (it).1 { + v.visit_lifetime_mut(it); + } + } + skip!(node.mutability); + skip!(node.self_token); + skip!(node.colon_token); + v.visit_type_mut(&mut *node.ty); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_return_type_mut<V>(v: &mut V, node: &mut crate::ReturnType) +where + V: VisitMut + ?Sized, +{ + match node { + crate::ReturnType::Default => {} + crate::ReturnType::Type(_binding_0, _binding_1) => { + skip!(_binding_0); + v.visit_type_mut(&mut **_binding_1); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_signature_mut<V>(v: &mut V, node: &mut crate::Signature) +where + V: VisitMut + ?Sized, +{ + skip!(node.constness); + skip!(node.asyncness); + skip!(node.unsafety); + if let Some(it) = &mut node.abi { + v.visit_abi_mut(it); + } + skip!(node.fn_token); + v.visit_ident_mut(&mut node.ident); + v.visit_generics_mut(&mut node.generics); + skip!(node.paren_token); + for mut el in Punctuated::pairs_mut(&mut node.inputs) { + let it = el.value_mut(); + v.visit_fn_arg_mut(it); + } + if let Some(it) = &mut node.variadic { + v.visit_variadic_mut(it); + } + v.visit_return_type_mut(&mut node.output); +} +pub fn visit_span_mut<V>(v: &mut V, node: &mut proc_macro2::Span) +where + V: VisitMut + ?Sized, +{} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_static_mutability_mut<V>(v: &mut V, node: &mut crate::StaticMutability) +where + V: VisitMut + ?Sized, +{ + match node { + crate::StaticMutability::Mut(_binding_0) => { + skip!(_binding_0); + } + crate::StaticMutability::None => {} + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_stmt_mut<V>(v: &mut V, node: &mut crate::Stmt) +where + V: VisitMut + ?Sized, +{ + match node { + crate::Stmt::Local(_binding_0) => { + v.visit_local_mut(_binding_0); + } + crate::Stmt::Item(_binding_0) => { + v.visit_item_mut(_binding_0); + } + crate::Stmt::Expr(_binding_0, _binding_1) => { + v.visit_expr_mut(_binding_0); + skip!(_binding_1); + } + crate::Stmt::Macro(_binding_0) => { + v.visit_stmt_macro_mut(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_stmt_macro_mut<V>(v: &mut V, node: &mut crate::StmtMacro) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_macro_mut(&mut node.mac); + skip!(node.semi_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_trait_bound_mut<V>(v: &mut V, node: &mut crate::TraitBound) +where + V: VisitMut + ?Sized, +{ + skip!(node.paren_token); + v.visit_trait_bound_modifier_mut(&mut node.modifier); + if let Some(it) = &mut node.lifetimes { + v.visit_bound_lifetimes_mut(it); + } + v.visit_path_mut(&mut node.path); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_trait_bound_modifier_mut<V>(v: &mut V, node: &mut crate::TraitBoundModifier) +where + V: VisitMut + ?Sized, +{ + match node { + crate::TraitBoundModifier::None => {} + crate::TraitBoundModifier::Maybe(_binding_0) => { + skip!(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_trait_item_mut<V>(v: &mut V, node: &mut crate::TraitItem) +where + V: VisitMut + ?Sized, +{ + match node { + crate::TraitItem::Const(_binding_0) => { + v.visit_trait_item_const_mut(_binding_0); + } + crate::TraitItem::Fn(_binding_0) => { + v.visit_trait_item_fn_mut(_binding_0); + } + crate::TraitItem::Type(_binding_0) => { + v.visit_trait_item_type_mut(_binding_0); + } + crate::TraitItem::Macro(_binding_0) => { + v.visit_trait_item_macro_mut(_binding_0); + } + crate::TraitItem::Verbatim(_binding_0) => { + v.visit_token_stream_mut(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_trait_item_const_mut<V>(v: &mut V, node: &mut crate::TraitItemConst) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.const_token); + v.visit_ident_mut(&mut node.ident); + v.visit_generics_mut(&mut node.generics); + skip!(node.colon_token); + v.visit_type_mut(&mut node.ty); + if let Some(it) = &mut node.default { + skip!((it).0); + v.visit_expr_mut(&mut (it).1); + } + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_trait_item_fn_mut<V>(v: &mut V, node: &mut crate::TraitItemFn) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_signature_mut(&mut node.sig); + if let Some(it) = &mut node.default { + v.visit_block_mut(it); + } + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_trait_item_macro_mut<V>(v: &mut V, node: &mut crate::TraitItemMacro) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_macro_mut(&mut node.mac); + skip!(node.semi_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_trait_item_type_mut<V>(v: &mut V, node: &mut crate::TraitItemType) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + skip!(node.type_token); + v.visit_ident_mut(&mut node.ident); + v.visit_generics_mut(&mut node.generics); + skip!(node.colon_token); + for mut el in Punctuated::pairs_mut(&mut node.bounds) { + let it = el.value_mut(); + v.visit_type_param_bound_mut(it); + } + if let Some(it) = &mut node.default { + skip!((it).0); + v.visit_type_mut(&mut (it).1); + } + skip!(node.semi_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_mut<V>(v: &mut V, node: &mut crate::Type) +where + V: VisitMut + ?Sized, +{ + match node { + crate::Type::Array(_binding_0) => { + v.visit_type_array_mut(_binding_0); + } + crate::Type::BareFn(_binding_0) => { + v.visit_type_bare_fn_mut(_binding_0); + } + crate::Type::Group(_binding_0) => { + v.visit_type_group_mut(_binding_0); + } + crate::Type::ImplTrait(_binding_0) => { + v.visit_type_impl_trait_mut(_binding_0); + } + crate::Type::Infer(_binding_0) => { + v.visit_type_infer_mut(_binding_0); + } + crate::Type::Macro(_binding_0) => { + v.visit_type_macro_mut(_binding_0); + } + crate::Type::Never(_binding_0) => { + v.visit_type_never_mut(_binding_0); + } + crate::Type::Paren(_binding_0) => { + v.visit_type_paren_mut(_binding_0); + } + crate::Type::Path(_binding_0) => { + v.visit_type_path_mut(_binding_0); + } + crate::Type::Ptr(_binding_0) => { + v.visit_type_ptr_mut(_binding_0); + } + crate::Type::Reference(_binding_0) => { + v.visit_type_reference_mut(_binding_0); + } + crate::Type::Slice(_binding_0) => { + v.visit_type_slice_mut(_binding_0); + } + crate::Type::TraitObject(_binding_0) => { + v.visit_type_trait_object_mut(_binding_0); + } + crate::Type::Tuple(_binding_0) => { + v.visit_type_tuple_mut(_binding_0); + } + crate::Type::Verbatim(_binding_0) => { + v.visit_token_stream_mut(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_array_mut<V>(v: &mut V, node: &mut crate::TypeArray) +where + V: VisitMut + ?Sized, +{ + skip!(node.bracket_token); + v.visit_type_mut(&mut *node.elem); + skip!(node.semi_token); + v.visit_expr_mut(&mut node.len); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_bare_fn_mut<V>(v: &mut V, node: &mut crate::TypeBareFn) +where + V: VisitMut + ?Sized, +{ + if let Some(it) = &mut node.lifetimes { + v.visit_bound_lifetimes_mut(it); + } + skip!(node.unsafety); + if let Some(it) = &mut node.abi { + v.visit_abi_mut(it); + } + skip!(node.fn_token); + skip!(node.paren_token); + for mut el in Punctuated::pairs_mut(&mut node.inputs) { + let it = el.value_mut(); + v.visit_bare_fn_arg_mut(it); + } + if let Some(it) = &mut node.variadic { + v.visit_bare_variadic_mut(it); + } + v.visit_return_type_mut(&mut node.output); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_group_mut<V>(v: &mut V, node: &mut crate::TypeGroup) +where + V: VisitMut + ?Sized, +{ + skip!(node.group_token); + v.visit_type_mut(&mut *node.elem); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_impl_trait_mut<V>(v: &mut V, node: &mut crate::TypeImplTrait) +where + V: VisitMut + ?Sized, +{ + skip!(node.impl_token); + for mut el in Punctuated::pairs_mut(&mut node.bounds) { + let it = el.value_mut(); + v.visit_type_param_bound_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_infer_mut<V>(v: &mut V, node: &mut crate::TypeInfer) +where + V: VisitMut + ?Sized, +{ + skip!(node.underscore_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_macro_mut<V>(v: &mut V, node: &mut crate::TypeMacro) +where + V: VisitMut + ?Sized, +{ + v.visit_macro_mut(&mut node.mac); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_never_mut<V>(v: &mut V, node: &mut crate::TypeNever) +where + V: VisitMut + ?Sized, +{ + skip!(node.bang_token); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_param_mut<V>(v: &mut V, node: &mut crate::TypeParam) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_ident_mut(&mut node.ident); + skip!(node.colon_token); + for mut el in Punctuated::pairs_mut(&mut node.bounds) { + let it = el.value_mut(); + v.visit_type_param_bound_mut(it); + } + skip!(node.eq_token); + if let Some(it) = &mut node.default { + v.visit_type_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_param_bound_mut<V>(v: &mut V, node: &mut crate::TypeParamBound) +where + V: VisitMut + ?Sized, +{ + match node { + crate::TypeParamBound::Trait(_binding_0) => { + v.visit_trait_bound_mut(_binding_0); + } + crate::TypeParamBound::Lifetime(_binding_0) => { + v.visit_lifetime_mut(_binding_0); + } + crate::TypeParamBound::PreciseCapture(_binding_0) => { + full!(v.visit_precise_capture_mut(_binding_0)); + } + crate::TypeParamBound::Verbatim(_binding_0) => { + v.visit_token_stream_mut(_binding_0); + } + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_paren_mut<V>(v: &mut V, node: &mut crate::TypeParen) +where + V: VisitMut + ?Sized, +{ + skip!(node.paren_token); + v.visit_type_mut(&mut *node.elem); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_path_mut<V>(v: &mut V, node: &mut crate::TypePath) +where + V: VisitMut + ?Sized, +{ + if let Some(it) = &mut node.qself { + v.visit_qself_mut(it); + } + v.visit_path_mut(&mut node.path); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_ptr_mut<V>(v: &mut V, node: &mut crate::TypePtr) +where + V: VisitMut + ?Sized, +{ + skip!(node.star_token); + skip!(node.const_token); + skip!(node.mutability); + v.visit_type_mut(&mut *node.elem); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_reference_mut<V>(v: &mut V, node: &mut crate::TypeReference) +where + V: VisitMut + ?Sized, +{ + skip!(node.and_token); + if let Some(it) = &mut node.lifetime { + v.visit_lifetime_mut(it); + } + skip!(node.mutability); + v.visit_type_mut(&mut *node.elem); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_slice_mut<V>(v: &mut V, node: &mut crate::TypeSlice) +where + V: VisitMut + ?Sized, +{ + skip!(node.bracket_token); + v.visit_type_mut(&mut *node.elem); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_trait_object_mut<V>(v: &mut V, node: &mut crate::TypeTraitObject) +where + V: VisitMut + ?Sized, +{ + skip!(node.dyn_token); + for mut el in Punctuated::pairs_mut(&mut node.bounds) { + let it = el.value_mut(); + v.visit_type_param_bound_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_type_tuple_mut<V>(v: &mut V, node: &mut crate::TypeTuple) +where + V: VisitMut + ?Sized, +{ + skip!(node.paren_token); + for mut el in Punctuated::pairs_mut(&mut node.elems) { + let it = el.value_mut(); + v.visit_type_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_un_op_mut<V>(v: &mut V, node: &mut crate::UnOp) +where + V: VisitMut + ?Sized, +{ + match node { + crate::UnOp::Deref(_binding_0) => { + skip!(_binding_0); + } + crate::UnOp::Not(_binding_0) => { + skip!(_binding_0); + } + crate::UnOp::Neg(_binding_0) => { + skip!(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_use_glob_mut<V>(v: &mut V, node: &mut crate::UseGlob) +where + V: VisitMut + ?Sized, +{ + skip!(node.star_token); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_use_group_mut<V>(v: &mut V, node: &mut crate::UseGroup) +where + V: VisitMut + ?Sized, +{ + skip!(node.brace_token); + for mut el in Punctuated::pairs_mut(&mut node.items) { + let it = el.value_mut(); + v.visit_use_tree_mut(it); + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_use_name_mut<V>(v: &mut V, node: &mut crate::UseName) +where + V: VisitMut + ?Sized, +{ + v.visit_ident_mut(&mut node.ident); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_use_path_mut<V>(v: &mut V, node: &mut crate::UsePath) +where + V: VisitMut + ?Sized, +{ + v.visit_ident_mut(&mut node.ident); + skip!(node.colon2_token); + v.visit_use_tree_mut(&mut *node.tree); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_use_rename_mut<V>(v: &mut V, node: &mut crate::UseRename) +where + V: VisitMut + ?Sized, +{ + v.visit_ident_mut(&mut node.ident); + skip!(node.as_token); + v.visit_ident_mut(&mut node.rename); +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_use_tree_mut<V>(v: &mut V, node: &mut crate::UseTree) +where + V: VisitMut + ?Sized, +{ + match node { + crate::UseTree::Path(_binding_0) => { + v.visit_use_path_mut(_binding_0); + } + crate::UseTree::Name(_binding_0) => { + v.visit_use_name_mut(_binding_0); + } + crate::UseTree::Rename(_binding_0) => { + v.visit_use_rename_mut(_binding_0); + } + crate::UseTree::Glob(_binding_0) => { + v.visit_use_glob_mut(_binding_0); + } + crate::UseTree::Group(_binding_0) => { + v.visit_use_group_mut(_binding_0); + } + } +} +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub fn visit_variadic_mut<V>(v: &mut V, node: &mut crate::Variadic) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + if let Some(it) = &mut node.pat { + v.visit_pat_mut(&mut *(it).0); + skip!((it).1); + } + skip!(node.dots); + skip!(node.comma); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_variant_mut<V>(v: &mut V, node: &mut crate::Variant) +where + V: VisitMut + ?Sized, +{ + v.visit_attributes_mut(&mut node.attrs); + v.visit_ident_mut(&mut node.ident); + v.visit_fields_mut(&mut node.fields); + if let Some(it) = &mut node.discriminant { + skip!((it).0); + v.visit_expr_mut(&mut (it).1); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_vis_restricted_mut<V>(v: &mut V, node: &mut crate::VisRestricted) +where + V: VisitMut + ?Sized, +{ + skip!(node.pub_token); + skip!(node.paren_token); + skip!(node.in_token); + v.visit_path_mut(&mut *node.path); +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_visibility_mut<V>(v: &mut V, node: &mut crate::Visibility) +where + V: VisitMut + ?Sized, +{ + match node { + crate::Visibility::Public(_binding_0) => { + skip!(_binding_0); + } + crate::Visibility::Restricted(_binding_0) => { + v.visit_vis_restricted_mut(_binding_0); + } + crate::Visibility::Inherited => {} + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_where_clause_mut<V>(v: &mut V, node: &mut crate::WhereClause) +where + V: VisitMut + ?Sized, +{ + skip!(node.where_token); + for mut el in Punctuated::pairs_mut(&mut node.predicates) { + let it = el.value_mut(); + v.visit_where_predicate_mut(it); + } +} +#[cfg(any(feature = "derive", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] +pub fn visit_where_predicate_mut<V>(v: &mut V, node: &mut crate::WherePredicate) +where + V: VisitMut + ?Sized, +{ + match node { + crate::WherePredicate::Lifetime(_binding_0) => { + v.visit_predicate_lifetime_mut(_binding_0); + } + crate::WherePredicate::Type(_binding_0) => { + v.visit_predicate_type_mut(_binding_0); + } + } +} diff --git a/vendor/syn/src/generics.rs b/vendor/syn/src/generics.rs new file mode 100644 index 00000000000000..de8e09151e3c4d --- /dev/null +++ b/vendor/syn/src/generics.rs @@ -0,0 +1,1477 @@ +use crate::attr::Attribute; +use crate::expr::Expr; +use crate::ident::Ident; +use crate::lifetime::Lifetime; +use crate::path::Path; +use crate::punctuated::{Iter, IterMut, Punctuated}; +use crate::token; +use crate::ty::Type; +use proc_macro2::TokenStream; +#[cfg(all(feature = "printing", feature = "extra-traits"))] +use std::fmt::{self, Debug}; +#[cfg(all(feature = "printing", feature = "extra-traits"))] +use std::hash::{Hash, Hasher}; + +ast_struct! { + /// Lifetimes and type parameters attached to a declaration of a function, + /// enum, trait, etc. + /// + /// This struct represents two distinct optional syntactic elements, + /// [generic parameters] and [where clause]. In some locations of the + /// grammar, there may be other tokens in between these two things. + /// + /// [generic parameters]: https://doc.rust-lang.org/stable/reference/items/generics.html#generic-parameters + /// [where clause]: https://doc.rust-lang.org/stable/reference/items/generics.html#where-clauses + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct Generics { + pub lt_token: Option<Token![<]>, + pub params: Punctuated<GenericParam, Token![,]>, + pub gt_token: Option<Token![>]>, + pub where_clause: Option<WhereClause>, + } +} + +ast_enum_of_structs! { + /// A generic type parameter, lifetime, or const generic: `T: Into<String>`, + /// `'a: 'b`, `const LEN: usize`. + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub enum GenericParam { + /// A lifetime parameter: `'a: 'b + 'c + 'd`. + Lifetime(LifetimeParam), + + /// A generic type parameter: `T: Into<String>`. + Type(TypeParam), + + /// A const generic parameter: `const LENGTH: usize`. + Const(ConstParam), + } +} + +ast_struct! { + /// A lifetime definition: `'a: 'b + 'c + 'd`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct LifetimeParam { + pub attrs: Vec<Attribute>, + pub lifetime: Lifetime, + pub colon_token: Option<Token![:]>, + pub bounds: Punctuated<Lifetime, Token![+]>, + } +} + +ast_struct! { + /// A generic type parameter: `T: Into<String>`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TypeParam { + pub attrs: Vec<Attribute>, + pub ident: Ident, + pub colon_token: Option<Token![:]>, + pub bounds: Punctuated<TypeParamBound, Token![+]>, + pub eq_token: Option<Token![=]>, + pub default: Option<Type>, + } +} + +ast_struct! { + /// A const generic parameter: `const LENGTH: usize`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct ConstParam { + pub attrs: Vec<Attribute>, + pub const_token: Token![const], + pub ident: Ident, + pub colon_token: Token![:], + pub ty: Type, + pub eq_token: Option<Token![=]>, + pub default: Option<Expr>, + } +} + +impl Default for Generics { + fn default() -> Self { + Generics { + lt_token: None, + params: Punctuated::new(), + gt_token: None, + where_clause: None, + } + } +} + +impl Generics { + return_impl_trait! { + /// Iterator over the lifetime parameters in `self.params`. + pub fn lifetimes(&self) -> impl Iterator<Item = &LifetimeParam> [Lifetimes] { + Lifetimes(self.params.iter()) + } + } + + return_impl_trait! { + /// Iterator over the lifetime parameters in `self.params`. + pub fn lifetimes_mut(&mut self) -> impl Iterator<Item = &mut LifetimeParam> [LifetimesMut] { + LifetimesMut(self.params.iter_mut()) + } + } + + return_impl_trait! { + /// Iterator over the type parameters in `self.params`. + pub fn type_params(&self) -> impl Iterator<Item = &TypeParam> [TypeParams] { + TypeParams(self.params.iter()) + } + } + + return_impl_trait! { + /// Iterator over the type parameters in `self.params`. + pub fn type_params_mut(&mut self) -> impl Iterator<Item = &mut TypeParam> [TypeParamsMut] { + TypeParamsMut(self.params.iter_mut()) + } + } + + return_impl_trait! { + /// Iterator over the constant parameters in `self.params`. + pub fn const_params(&self) -> impl Iterator<Item = &ConstParam> [ConstParams] { + ConstParams(self.params.iter()) + } + } + + return_impl_trait! { + /// Iterator over the constant parameters in `self.params`. + pub fn const_params_mut(&mut self) -> impl Iterator<Item = &mut ConstParam> [ConstParamsMut] { + ConstParamsMut(self.params.iter_mut()) + } + } + + /// Initializes an empty `where`-clause if there is not one present already. + pub fn make_where_clause(&mut self) -> &mut WhereClause { + self.where_clause.get_or_insert_with(|| WhereClause { + where_token: <Token![where]>::default(), + predicates: Punctuated::new(), + }) + } + + /// Split a type's generics into the pieces required for impl'ing a trait + /// for that type. + /// + /// ``` + /// # use proc_macro2::{Span, Ident}; + /// # use quote::quote; + /// # + /// # let generics: syn::Generics = Default::default(); + /// # let name = Ident::new("MyType", Span::call_site()); + /// # + /// let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + /// quote! { + /// impl #impl_generics MyTrait for #name #ty_generics #where_clause { + /// // ... + /// } + /// } + /// # ; + /// ``` + #[cfg(feature = "printing")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + pub fn split_for_impl(&self) -> (ImplGenerics, TypeGenerics, Option<&WhereClause>) { + ( + ImplGenerics(self), + TypeGenerics(self), + self.where_clause.as_ref(), + ) + } +} + +pub struct Lifetimes<'a>(Iter<'a, GenericParam>); + +impl<'a> Iterator for Lifetimes<'a> { + type Item = &'a LifetimeParam; + + fn next(&mut self) -> Option<Self::Item> { + if let GenericParam::Lifetime(lifetime) = self.0.next()? { + Some(lifetime) + } else { + self.next() + } + } +} + +pub struct LifetimesMut<'a>(IterMut<'a, GenericParam>); + +impl<'a> Iterator for LifetimesMut<'a> { + type Item = &'a mut LifetimeParam; + + fn next(&mut self) -> Option<Self::Item> { + if let GenericParam::Lifetime(lifetime) = self.0.next()? { + Some(lifetime) + } else { + self.next() + } + } +} + +pub struct TypeParams<'a>(Iter<'a, GenericParam>); + +impl<'a> Iterator for TypeParams<'a> { + type Item = &'a TypeParam; + + fn next(&mut self) -> Option<Self::Item> { + if let GenericParam::Type(type_param) = self.0.next()? { + Some(type_param) + } else { + self.next() + } + } +} + +pub struct TypeParamsMut<'a>(IterMut<'a, GenericParam>); + +impl<'a> Iterator for TypeParamsMut<'a> { + type Item = &'a mut TypeParam; + + fn next(&mut self) -> Option<Self::Item> { + if let GenericParam::Type(type_param) = self.0.next()? { + Some(type_param) + } else { + self.next() + } + } +} + +pub struct ConstParams<'a>(Iter<'a, GenericParam>); + +impl<'a> Iterator for ConstParams<'a> { + type Item = &'a ConstParam; + + fn next(&mut self) -> Option<Self::Item> { + if let GenericParam::Const(const_param) = self.0.next()? { + Some(const_param) + } else { + self.next() + } + } +} + +pub struct ConstParamsMut<'a>(IterMut<'a, GenericParam>); + +impl<'a> Iterator for ConstParamsMut<'a> { + type Item = &'a mut ConstParam; + + fn next(&mut self) -> Option<Self::Item> { + if let GenericParam::Const(const_param) = self.0.next()? { + Some(const_param) + } else { + self.next() + } + } +} + +/// Returned by `Generics::split_for_impl`. +#[cfg(feature = "printing")] +#[cfg_attr( + docsrs, + doc(cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))) +)] +pub struct ImplGenerics<'a>(&'a Generics); + +/// Returned by `Generics::split_for_impl`. +#[cfg(feature = "printing")] +#[cfg_attr( + docsrs, + doc(cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))) +)] +pub struct TypeGenerics<'a>(&'a Generics); + +/// Returned by `TypeGenerics::as_turbofish`. +#[cfg(feature = "printing")] +#[cfg_attr( + docsrs, + doc(cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))) +)] +pub struct Turbofish<'a>(&'a Generics); + +#[cfg(feature = "printing")] +macro_rules! generics_wrapper_impls { + ($ty:ident) => { + #[cfg(feature = "clone-impls")] + #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] + impl<'a> Clone for $ty<'a> { + fn clone(&self) -> Self { + $ty(self.0) + } + } + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl<'a> Debug for $ty<'a> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter + .debug_tuple(stringify!($ty)) + .field(self.0) + .finish() + } + } + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl<'a> Eq for $ty<'a> {} + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl<'a> PartialEq for $ty<'a> { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl<'a> Hash for $ty<'a> { + fn hash<H: Hasher>(&self, state: &mut H) { + self.0.hash(state); + } + } + }; +} + +#[cfg(feature = "printing")] +generics_wrapper_impls!(ImplGenerics); +#[cfg(feature = "printing")] +generics_wrapper_impls!(TypeGenerics); +#[cfg(feature = "printing")] +generics_wrapper_impls!(Turbofish); + +#[cfg(feature = "printing")] +impl<'a> TypeGenerics<'a> { + /// Turn a type's generics like `<X, Y>` into a turbofish like `::<X, Y>`. + pub fn as_turbofish(&self) -> Turbofish<'a> { + Turbofish(self.0) + } +} + +ast_struct! { + /// A set of bound lifetimes: `for<'a, 'b, 'c>`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct BoundLifetimes { + pub for_token: Token![for], + pub lt_token: Token![<], + pub lifetimes: Punctuated<GenericParam, Token![,]>, + pub gt_token: Token![>], + } +} + +impl Default for BoundLifetimes { + fn default() -> Self { + BoundLifetimes { + for_token: Default::default(), + lt_token: Default::default(), + lifetimes: Punctuated::new(), + gt_token: Default::default(), + } + } +} + +impl LifetimeParam { + pub fn new(lifetime: Lifetime) -> Self { + LifetimeParam { + attrs: Vec::new(), + lifetime, + colon_token: None, + bounds: Punctuated::new(), + } + } +} + +impl From<Ident> for TypeParam { + fn from(ident: Ident) -> Self { + TypeParam { + attrs: vec![], + ident, + colon_token: None, + bounds: Punctuated::new(), + eq_token: None, + default: None, + } + } +} + +ast_enum_of_structs! { + /// A trait or lifetime used as a bound on a type parameter. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + #[non_exhaustive] + pub enum TypeParamBound { + Trait(TraitBound), + Lifetime(Lifetime), + PreciseCapture(PreciseCapture), + Verbatim(TokenStream), + } +} + +ast_struct! { + /// A trait used as a bound on a type parameter. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TraitBound { + pub paren_token: Option<token::Paren>, + pub modifier: TraitBoundModifier, + /// The `for<'a>` in `for<'a> Foo<&'a T>` + pub lifetimes: Option<BoundLifetimes>, + /// The `Foo<&'a T>` in `for<'a> Foo<&'a T>` + pub path: Path, + } +} + +ast_enum! { + /// A modifier on a trait bound, currently only used for the `?` in + /// `?Sized`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub enum TraitBoundModifier { + None, + Maybe(Token![?]), + } +} + +ast_struct! { + /// Precise capturing bound: the 'use<…>' in `impl Trait + + /// use<'a, T>`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct PreciseCapture #full { + pub use_token: Token![use], + pub lt_token: Token![<], + pub params: Punctuated<CapturedParam, Token![,]>, + pub gt_token: Token![>], + } +} + +#[cfg(feature = "full")] +ast_enum! { + /// Single parameter in a precise capturing bound. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + #[non_exhaustive] + pub enum CapturedParam { + /// A lifetime parameter in precise capturing bound: `fn f<'a>() -> impl + /// Trait + use<'a>`. + Lifetime(Lifetime), + /// A type parameter or const generic parameter in precise capturing + /// bound: `fn f<T>() -> impl Trait + use<T>` or `fn f<const K: T>() -> + /// impl Trait + use<K>`. + Ident(Ident), + } +} + +ast_struct! { + /// A `where` clause in a definition: `where T: Deserialize<'de>, D: + /// 'static`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct WhereClause { + pub where_token: Token![where], + pub predicates: Punctuated<WherePredicate, Token![,]>, + } +} + +ast_enum_of_structs! { + /// A single predicate in a `where` clause: `T: Deserialize<'de>`. + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + #[non_exhaustive] + pub enum WherePredicate { + /// A lifetime predicate in a `where` clause: `'a: 'b + 'c`. + Lifetime(PredicateLifetime), + + /// A type predicate in a `where` clause: `for<'c> Foo<'c>: Trait<'c>`. + Type(PredicateType), + } +} + +ast_struct! { + /// A lifetime predicate in a `where` clause: `'a: 'b + 'c`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct PredicateLifetime { + pub lifetime: Lifetime, + pub colon_token: Token![:], + pub bounds: Punctuated<Lifetime, Token![+]>, + } +} + +ast_struct! { + /// A type predicate in a `where` clause: `for<'c> Foo<'c>: Trait<'c>`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct PredicateType { + /// Any lifetimes from a `for` binding + pub lifetimes: Option<BoundLifetimes>, + /// The type being bounded + pub bounded_ty: Type, + pub colon_token: Token![:], + /// Trait and lifetime bounds (`Clone+Send+'static`) + pub bounds: Punctuated<TypeParamBound, Token![+]>, + } +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::attr::Attribute; + #[cfg(feature = "full")] + use crate::error; + use crate::error::{Error, Result}; + use crate::ext::IdentExt as _; + use crate::generics::{ + BoundLifetimes, ConstParam, GenericParam, Generics, LifetimeParam, PredicateLifetime, + PredicateType, TraitBound, TraitBoundModifier, TypeParam, TypeParamBound, WhereClause, + WherePredicate, + }; + #[cfg(feature = "full")] + use crate::generics::{CapturedParam, PreciseCapture}; + use crate::ident::Ident; + use crate::lifetime::Lifetime; + use crate::parse::{Parse, ParseStream}; + use crate::path::{self, ParenthesizedGenericArguments, Path, PathArguments}; + use crate::punctuated::Punctuated; + use crate::token; + use crate::ty::Type; + use crate::verbatim; + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Generics { + fn parse(input: ParseStream) -> Result<Self> { + if !input.peek(Token![<]) { + return Ok(Generics::default()); + } + + let lt_token: Token![<] = input.parse()?; + + let mut params = Punctuated::new(); + loop { + if input.peek(Token![>]) { + break; + } + + let attrs = input.call(Attribute::parse_outer)?; + let lookahead = input.lookahead1(); + if lookahead.peek(Lifetime) { + params.push_value(GenericParam::Lifetime(LifetimeParam { + attrs, + ..input.parse()? + })); + } else if lookahead.peek(Ident) { + params.push_value(GenericParam::Type(TypeParam { + attrs, + ..input.parse()? + })); + } else if lookahead.peek(Token![const]) { + params.push_value(GenericParam::Const(ConstParam { + attrs, + ..input.parse()? + })); + } else if input.peek(Token![_]) { + params.push_value(GenericParam::Type(TypeParam { + attrs, + ident: input.call(Ident::parse_any)?, + colon_token: None, + bounds: Punctuated::new(), + eq_token: None, + default: None, + })); + } else { + return Err(lookahead.error()); + } + + if input.peek(Token![>]) { + break; + } + let punct = input.parse()?; + params.push_punct(punct); + } + + let gt_token: Token![>] = input.parse()?; + + Ok(Generics { + lt_token: Some(lt_token), + params, + gt_token: Some(gt_token), + where_clause: None, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for GenericParam { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + + let lookahead = input.lookahead1(); + if lookahead.peek(Ident) { + Ok(GenericParam::Type(TypeParam { + attrs, + ..input.parse()? + })) + } else if lookahead.peek(Lifetime) { + Ok(GenericParam::Lifetime(LifetimeParam { + attrs, + ..input.parse()? + })) + } else if lookahead.peek(Token![const]) { + Ok(GenericParam::Const(ConstParam { + attrs, + ..input.parse()? + })) + } else { + Err(lookahead.error()) + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for LifetimeParam { + fn parse(input: ParseStream) -> Result<Self> { + let has_colon; + Ok(LifetimeParam { + attrs: input.call(Attribute::parse_outer)?, + lifetime: input.parse()?, + colon_token: { + if input.peek(Token![:]) { + has_colon = true; + Some(input.parse()?) + } else { + has_colon = false; + None + } + }, + bounds: { + let mut bounds = Punctuated::new(); + if has_colon { + loop { + if input.peek(Token![,]) || input.peek(Token![>]) { + break; + } + let value = input.parse()?; + bounds.push_value(value); + if !input.peek(Token![+]) { + break; + } + let punct = input.parse()?; + bounds.push_punct(punct); + } + } + bounds + }, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for BoundLifetimes { + fn parse(input: ParseStream) -> Result<Self> { + Ok(BoundLifetimes { + for_token: input.parse()?, + lt_token: input.parse()?, + lifetimes: { + let mut lifetimes = Punctuated::new(); + while !input.peek(Token![>]) { + lifetimes.push_value(input.parse()?); + if input.peek(Token![>]) { + break; + } + lifetimes.push_punct(input.parse()?); + } + lifetimes + }, + gt_token: input.parse()?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Option<BoundLifetimes> { + fn parse(input: ParseStream) -> Result<Self> { + if input.peek(Token![for]) { + input.parse().map(Some) + } else { + Ok(None) + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypeParam { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let ident: Ident = input.parse()?; + let colon_token: Option<Token![:]> = input.parse()?; + + let mut bounds = Punctuated::new(); + if colon_token.is_some() { + loop { + if input.peek(Token![,]) || input.peek(Token![>]) || input.peek(Token![=]) { + break; + } + bounds.push_value({ + let allow_precise_capture = false; + let allow_const = true; + TypeParamBound::parse_single(input, allow_precise_capture, allow_const)? + }); + if !input.peek(Token![+]) { + break; + } + let punct: Token![+] = input.parse()?; + bounds.push_punct(punct); + } + } + + let eq_token: Option<Token![=]> = input.parse()?; + let default = if eq_token.is_some() { + Some(input.parse::<Type>()?) + } else { + None + }; + + Ok(TypeParam { + attrs, + ident, + colon_token, + bounds, + eq_token, + default, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypeParamBound { + fn parse(input: ParseStream) -> Result<Self> { + let allow_precise_capture = true; + let allow_const = true; + Self::parse_single(input, allow_precise_capture, allow_const) + } + } + + impl TypeParamBound { + pub(crate) fn parse_single( + input: ParseStream, + #[cfg_attr(not(feature = "full"), allow(unused_variables))] allow_precise_capture: bool, + allow_const: bool, + ) -> Result<Self> { + if input.peek(Lifetime) { + return input.parse().map(TypeParamBound::Lifetime); + } + + #[cfg(feature = "full")] + { + if input.peek(Token![use]) { + let precise_capture: PreciseCapture = input.parse()?; + return if allow_precise_capture { + Ok(TypeParamBound::PreciseCapture(precise_capture)) + } else { + let msg = "`use<...>` precise capturing syntax is not allowed here"; + Err(error::new2( + precise_capture.use_token.span, + precise_capture.gt_token.span, + msg, + )) + }; + } + } + + let begin = input.fork(); + + let content; + let (paren_token, content) = if input.peek(token::Paren) { + (Some(parenthesized!(content in input)), &content) + } else { + (None, input) + }; + + if let Some(mut bound) = TraitBound::do_parse(content, allow_const)? { + bound.paren_token = paren_token; + Ok(TypeParamBound::Trait(bound)) + } else { + Ok(TypeParamBound::Verbatim(verbatim::between(&begin, input))) + } + } + + pub(crate) fn parse_multiple( + input: ParseStream, + allow_plus: bool, + allow_precise_capture: bool, + allow_const: bool, + ) -> Result<Punctuated<Self, Token![+]>> { + let mut bounds = Punctuated::new(); + loop { + let bound = Self::parse_single(input, allow_precise_capture, allow_const)?; + bounds.push_value(bound); + if !(allow_plus && input.peek(Token![+])) { + break; + } + bounds.push_punct(input.parse()?); + if !(input.peek(Ident::peek_any) + || input.peek(Token![::]) + || input.peek(Token![?]) + || input.peek(Lifetime) + || input.peek(token::Paren) + || (allow_const && (input.peek(token::Bracket) || input.peek(Token![const])))) + { + break; + } + } + Ok(bounds) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TraitBound { + fn parse(input: ParseStream) -> Result<Self> { + let allow_const = false; + Self::do_parse(input, allow_const).map(Option::unwrap) + } + } + + impl TraitBound { + fn do_parse(input: ParseStream, allow_const: bool) -> Result<Option<Self>> { + let mut lifetimes: Option<BoundLifetimes> = input.parse()?; + + let is_conditionally_const = cfg!(feature = "full") && input.peek(token::Bracket); + let is_unconditionally_const = cfg!(feature = "full") && input.peek(Token![const]); + if is_conditionally_const { + let conditionally_const; + let bracket_token = bracketed!(conditionally_const in input); + conditionally_const.parse::<Token![const]>()?; + if !allow_const { + let msg = "`[const]` is not allowed here"; + return Err(Error::new(bracket_token.span.join(), msg)); + } + } else if is_unconditionally_const { + let const_token: Token![const] = input.parse()?; + if !allow_const { + let msg = "`const` is not allowed here"; + return Err(Error::new(const_token.span, msg)); + } + } + + let modifier: TraitBoundModifier = input.parse()?; + if lifetimes.is_none() && matches!(modifier, TraitBoundModifier::Maybe(_)) { + lifetimes = input.parse()?; + } + + let mut path: Path = input.parse()?; + if path.segments.last().unwrap().arguments.is_empty() + && (input.peek(token::Paren) || input.peek(Token![::]) && input.peek3(token::Paren)) + { + input.parse::<Option<Token![::]>>()?; + let args: ParenthesizedGenericArguments = input.parse()?; + let parenthesized = PathArguments::Parenthesized(args); + path.segments.last_mut().unwrap().arguments = parenthesized; + } + + if lifetimes.is_some() { + match modifier { + TraitBoundModifier::None => {} + TraitBoundModifier::Maybe(maybe) => { + let msg = "`for<...>` binder not allowed with `?` trait polarity modifier"; + return Err(Error::new(maybe.span, msg)); + } + } + } + + if is_conditionally_const || is_unconditionally_const { + Ok(None) + } else { + Ok(Some(TraitBound { + paren_token: None, + modifier, + lifetimes, + path, + })) + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TraitBoundModifier { + fn parse(input: ParseStream) -> Result<Self> { + if input.peek(Token![?]) { + input.parse().map(TraitBoundModifier::Maybe) + } else { + Ok(TraitBoundModifier::None) + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ConstParam { + fn parse(input: ParseStream) -> Result<Self> { + let mut default = None; + Ok(ConstParam { + attrs: input.call(Attribute::parse_outer)?, + const_token: input.parse()?, + ident: input.parse()?, + colon_token: input.parse()?, + ty: input.parse()?, + eq_token: { + if input.peek(Token![=]) { + let eq_token = input.parse()?; + default = Some(path::parsing::const_argument(input)?); + Some(eq_token) + } else { + None + } + }, + default, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for WhereClause { + fn parse(input: ParseStream) -> Result<Self> { + let where_token: Token![where] = input.parse()?; + + if choose_generics_over_qpath(input) { + return Err(input + .error("generic parameters on `where` clauses are reserved for future use")); + } + + Ok(WhereClause { + where_token, + predicates: { + let mut predicates = Punctuated::new(); + loop { + if input.is_empty() + || input.peek(token::Brace) + || input.peek(Token![,]) + || input.peek(Token![;]) + || input.peek(Token![:]) && !input.peek(Token![::]) + || input.peek(Token![=]) + { + break; + } + let value = input.parse()?; + predicates.push_value(value); + if !input.peek(Token![,]) { + break; + } + let punct = input.parse()?; + predicates.push_punct(punct); + } + predicates + }, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Option<WhereClause> { + fn parse(input: ParseStream) -> Result<Self> { + if input.peek(Token![where]) { + input.parse().map(Some) + } else { + Ok(None) + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for WherePredicate { + fn parse(input: ParseStream) -> Result<Self> { + if input.peek(Lifetime) && input.peek2(Token![:]) { + Ok(WherePredicate::Lifetime(PredicateLifetime { + lifetime: input.parse()?, + colon_token: input.parse()?, + bounds: { + let mut bounds = Punctuated::new(); + loop { + if input.is_empty() + || input.peek(token::Brace) + || input.peek(Token![,]) + || input.peek(Token![;]) + || input.peek(Token![:]) + || input.peek(Token![=]) + { + break; + } + let value = input.parse()?; + bounds.push_value(value); + if !input.peek(Token![+]) { + break; + } + let punct = input.parse()?; + bounds.push_punct(punct); + } + bounds + }, + })) + } else { + Ok(WherePredicate::Type(PredicateType { + lifetimes: input.parse()?, + bounded_ty: input.parse()?, + colon_token: input.parse()?, + bounds: { + let mut bounds = Punctuated::new(); + loop { + if input.is_empty() + || input.peek(token::Brace) + || input.peek(Token![,]) + || input.peek(Token![;]) + || input.peek(Token![:]) && !input.peek(Token![::]) + || input.peek(Token![=]) + { + break; + } + bounds.push_value({ + let allow_precise_capture = false; + let allow_const = true; + TypeParamBound::parse_single( + input, + allow_precise_capture, + allow_const, + )? + }); + if !input.peek(Token![+]) { + break; + } + let punct = input.parse()?; + bounds.push_punct(punct); + } + bounds + }, + })) + } + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for PreciseCapture { + fn parse(input: ParseStream) -> Result<Self> { + let use_token: Token![use] = input.parse()?; + let lt_token: Token![<] = input.parse()?; + let mut params = Punctuated::new(); + loop { + let lookahead = input.lookahead1(); + params.push_value( + if lookahead.peek(Lifetime) || lookahead.peek(Ident) || input.peek(Token![Self]) + { + input.parse::<CapturedParam>()? + } else if lookahead.peek(Token![>]) { + break; + } else { + return Err(lookahead.error()); + }, + ); + let lookahead = input.lookahead1(); + params.push_punct(if lookahead.peek(Token![,]) { + input.parse::<Token![,]>()? + } else if lookahead.peek(Token![>]) { + break; + } else { + return Err(lookahead.error()); + }); + } + let gt_token: Token![>] = input.parse()?; + Ok(PreciseCapture { + use_token, + lt_token, + params, + gt_token, + }) + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for CapturedParam { + fn parse(input: ParseStream) -> Result<Self> { + let lookahead = input.lookahead1(); + if lookahead.peek(Lifetime) { + input.parse().map(CapturedParam::Lifetime) + } else if lookahead.peek(Ident) || input.peek(Token![Self]) { + input.call(Ident::parse_any).map(CapturedParam::Ident) + } else { + Err(lookahead.error()) + } + } + } + + pub(crate) fn choose_generics_over_qpath(input: ParseStream) -> bool { + // Rust syntax has an ambiguity between generic parameters and qualified + // paths. In `impl <T> :: Thing<T, U> {}` this may either be a generic + // inherent impl `impl<T> ::Thing<T, U>` or a non-generic inherent impl + // for an associated type `impl <T>::Thing<T, U>`. + // + // After `<` the following continuations can only begin generics, not a + // qualified path: + // + // `<` `>` - empty generic parameters + // `<` `#` - generic parameters with attribute + // `<` LIFETIME `>` - single lifetime parameter + // `<` (LIFETIME|IDENT) `,` - first generic parameter in a list + // `<` (LIFETIME|IDENT) `:` - generic parameter with bounds + // `<` (LIFETIME|IDENT) `=` - generic parameter with a default + // `<` const - generic const parameter + // + // The only truly ambiguous case is: + // + // `<` IDENT `>` `::` IDENT ... + // + // which we disambiguate in favor of generics because this is almost + // always the expected one in the context of real-world code. + input.peek(Token![<]) + && (input.peek2(Token![>]) + || input.peek2(Token![#]) + || (input.peek2(Lifetime) || input.peek2(Ident)) + && (input.peek3(Token![>]) + || input.peek3(Token![,]) + || input.peek3(Token![:]) && !input.peek3(Token![::]) + || input.peek3(Token![=])) + || input.peek2(Token![const])) + } + + #[cfg(feature = "full")] + pub(crate) fn choose_generics_over_qpath_after_keyword(input: ParseStream) -> bool { + let input = input.fork(); + input.call(Ident::parse_any).unwrap(); // `impl` or `for` or `where` + choose_generics_over_qpath(&input) + } +} + +#[cfg(feature = "printing")] +pub(crate) mod printing { + use crate::attr::FilterAttrs; + #[cfg(feature = "full")] + use crate::expr; + use crate::expr::Expr; + #[cfg(feature = "full")] + use crate::fixup::FixupContext; + use crate::generics::{ + BoundLifetimes, ConstParam, GenericParam, Generics, ImplGenerics, LifetimeParam, + PredicateLifetime, PredicateType, TraitBound, TraitBoundModifier, Turbofish, TypeGenerics, + TypeParam, WhereClause, + }; + #[cfg(feature = "full")] + use crate::generics::{CapturedParam, PreciseCapture}; + use crate::print::TokensOrDefault; + use crate::token; + use proc_macro2::TokenStream; + use quote::{ToTokens, TokenStreamExt as _}; + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Generics { + fn to_tokens(&self, tokens: &mut TokenStream) { + if self.params.is_empty() { + return; + } + + TokensOrDefault(&self.lt_token).to_tokens(tokens); + + // Print lifetimes before types and consts, regardless of their + // order in self.params. + let mut trailing_or_empty = true; + for param in self.params.pairs() { + if let GenericParam::Lifetime(_) = **param.value() { + param.to_tokens(tokens); + trailing_or_empty = param.punct().is_some(); + } + } + for param in self.params.pairs() { + match param.value() { + GenericParam::Type(_) | GenericParam::Const(_) => { + if !trailing_or_empty { + <Token![,]>::default().to_tokens(tokens); + trailing_or_empty = true; + } + param.to_tokens(tokens); + } + GenericParam::Lifetime(_) => {} + } + } + + TokensOrDefault(&self.gt_token).to_tokens(tokens); + } + } + + impl<'a> ToTokens for ImplGenerics<'a> { + fn to_tokens(&self, tokens: &mut TokenStream) { + if self.0.params.is_empty() { + return; + } + + TokensOrDefault(&self.0.lt_token).to_tokens(tokens); + + // Print lifetimes before types and consts, regardless of their + // order in self.params. + let mut trailing_or_empty = true; + for param in self.0.params.pairs() { + if let GenericParam::Lifetime(_) = **param.value() { + param.to_tokens(tokens); + trailing_or_empty = param.punct().is_some(); + } + } + for param in self.0.params.pairs() { + if let GenericParam::Lifetime(_) = **param.value() { + continue; + } + if !trailing_or_empty { + <Token![,]>::default().to_tokens(tokens); + trailing_or_empty = true; + } + match param.value() { + GenericParam::Lifetime(_) => unreachable!(), + GenericParam::Type(param) => { + // Leave off the type parameter defaults + tokens.append_all(param.attrs.outer()); + param.ident.to_tokens(tokens); + if !param.bounds.is_empty() { + TokensOrDefault(¶m.colon_token).to_tokens(tokens); + param.bounds.to_tokens(tokens); + } + } + GenericParam::Const(param) => { + // Leave off the const parameter defaults + tokens.append_all(param.attrs.outer()); + param.const_token.to_tokens(tokens); + param.ident.to_tokens(tokens); + param.colon_token.to_tokens(tokens); + param.ty.to_tokens(tokens); + } + } + param.punct().to_tokens(tokens); + } + + TokensOrDefault(&self.0.gt_token).to_tokens(tokens); + } + } + + impl<'a> ToTokens for TypeGenerics<'a> { + fn to_tokens(&self, tokens: &mut TokenStream) { + if self.0.params.is_empty() { + return; + } + + TokensOrDefault(&self.0.lt_token).to_tokens(tokens); + + // Print lifetimes before types and consts, regardless of their + // order in self.params. + let mut trailing_or_empty = true; + for param in self.0.params.pairs() { + if let GenericParam::Lifetime(def) = *param.value() { + // Leave off the lifetime bounds and attributes + def.lifetime.to_tokens(tokens); + param.punct().to_tokens(tokens); + trailing_or_empty = param.punct().is_some(); + } + } + for param in self.0.params.pairs() { + if let GenericParam::Lifetime(_) = **param.value() { + continue; + } + if !trailing_or_empty { + <Token![,]>::default().to_tokens(tokens); + trailing_or_empty = true; + } + match param.value() { + GenericParam::Lifetime(_) => unreachable!(), + GenericParam::Type(param) => { + // Leave off the type parameter defaults + param.ident.to_tokens(tokens); + } + GenericParam::Const(param) => { + // Leave off the const parameter defaults + param.ident.to_tokens(tokens); + } + } + param.punct().to_tokens(tokens); + } + + TokensOrDefault(&self.0.gt_token).to_tokens(tokens); + } + } + + impl<'a> ToTokens for Turbofish<'a> { + fn to_tokens(&self, tokens: &mut TokenStream) { + if !self.0.params.is_empty() { + <Token![::]>::default().to_tokens(tokens); + TypeGenerics(self.0).to_tokens(tokens); + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for BoundLifetimes { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.for_token.to_tokens(tokens); + self.lt_token.to_tokens(tokens); + self.lifetimes.to_tokens(tokens); + self.gt_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for LifetimeParam { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.lifetime.to_tokens(tokens); + if !self.bounds.is_empty() { + TokensOrDefault(&self.colon_token).to_tokens(tokens); + self.bounds.to_tokens(tokens); + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TypeParam { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.ident.to_tokens(tokens); + if !self.bounds.is_empty() { + TokensOrDefault(&self.colon_token).to_tokens(tokens); + self.bounds.to_tokens(tokens); + } + if let Some(default) = &self.default { + TokensOrDefault(&self.eq_token).to_tokens(tokens); + default.to_tokens(tokens); + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TraitBound { + fn to_tokens(&self, tokens: &mut TokenStream) { + let to_tokens = |tokens: &mut TokenStream| { + self.modifier.to_tokens(tokens); + self.lifetimes.to_tokens(tokens); + self.path.to_tokens(tokens); + }; + match &self.paren_token { + Some(paren) => paren.surround(tokens, to_tokens), + None => to_tokens(tokens), + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TraitBoundModifier { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + TraitBoundModifier::None => {} + TraitBoundModifier::Maybe(t) => t.to_tokens(tokens), + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ConstParam { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.const_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.colon_token.to_tokens(tokens); + self.ty.to_tokens(tokens); + if let Some(default) = &self.default { + TokensOrDefault(&self.eq_token).to_tokens(tokens); + print_const_argument(default, tokens); + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for WhereClause { + fn to_tokens(&self, tokens: &mut TokenStream) { + if !self.predicates.is_empty() { + self.where_token.to_tokens(tokens); + self.predicates.to_tokens(tokens); + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PredicateLifetime { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.lifetime.to_tokens(tokens); + self.colon_token.to_tokens(tokens); + self.bounds.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PredicateType { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.lifetimes.to_tokens(tokens); + self.bounded_ty.to_tokens(tokens); + self.colon_token.to_tokens(tokens); + self.bounds.to_tokens(tokens); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PreciseCapture { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.use_token.to_tokens(tokens); + self.lt_token.to_tokens(tokens); + + // Print lifetimes before types and consts, regardless of their + // order in self.params. + let mut trailing_or_empty = true; + for param in self.params.pairs() { + if let CapturedParam::Lifetime(_) = **param.value() { + param.to_tokens(tokens); + trailing_or_empty = param.punct().is_some(); + } + } + for param in self.params.pairs() { + if let CapturedParam::Ident(_) = **param.value() { + if !trailing_or_empty { + <Token![,]>::default().to_tokens(tokens); + trailing_or_empty = true; + } + param.to_tokens(tokens); + } + } + + self.gt_token.to_tokens(tokens); + } + } + + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for CapturedParam { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + CapturedParam::Lifetime(lifetime) => lifetime.to_tokens(tokens), + CapturedParam::Ident(ident) => ident.to_tokens(tokens), + } + } + } + + pub(crate) fn print_const_argument(expr: &Expr, tokens: &mut TokenStream) { + match expr { + Expr::Lit(expr) => expr.to_tokens(tokens), + + Expr::Path(expr) + if expr.attrs.is_empty() + && expr.qself.is_none() + && expr.path.get_ident().is_some() => + { + expr.to_tokens(tokens); + } + + #[cfg(feature = "full")] + Expr::Block(expr) => expr.to_tokens(tokens), + + #[cfg(not(feature = "full"))] + Expr::Verbatim(expr) => expr.to_tokens(tokens), + + // ERROR CORRECTION: Add braces to make sure that the + // generated code is valid. + _ => token::Brace::default().surround(tokens, |tokens| { + #[cfg(feature = "full")] + expr::printing::print_expr(expr, tokens, FixupContext::new_stmt()); + + #[cfg(not(feature = "full"))] + expr.to_tokens(tokens); + }), + } + } +} diff --git a/vendor/syn/src/group.rs b/vendor/syn/src/group.rs new file mode 100644 index 00000000000000..1534ae995dd5ab --- /dev/null +++ b/vendor/syn/src/group.rs @@ -0,0 +1,291 @@ +use crate::error::Result; +use crate::parse::ParseBuffer; +use crate::token; +use proc_macro2::extra::DelimSpan; +use proc_macro2::Delimiter; + +// Not public API. +#[doc(hidden)] +pub struct Parens<'a> { + #[doc(hidden)] + pub token: token::Paren, + #[doc(hidden)] + pub content: ParseBuffer<'a>, +} + +// Not public API. +#[doc(hidden)] +pub struct Braces<'a> { + #[doc(hidden)] + pub token: token::Brace, + #[doc(hidden)] + pub content: ParseBuffer<'a>, +} + +// Not public API. +#[doc(hidden)] +pub struct Brackets<'a> { + #[doc(hidden)] + pub token: token::Bracket, + #[doc(hidden)] + pub content: ParseBuffer<'a>, +} + +// Not public API. +#[cfg(any(feature = "full", feature = "derive"))] +#[doc(hidden)] +pub struct Group<'a> { + #[doc(hidden)] + pub token: token::Group, + #[doc(hidden)] + pub content: ParseBuffer<'a>, +} + +// Not public API. +#[doc(hidden)] +pub fn parse_parens<'a>(input: &ParseBuffer<'a>) -> Result<Parens<'a>> { + parse_delimited(input, Delimiter::Parenthesis).map(|(span, content)| Parens { + token: token::Paren(span), + content, + }) +} + +// Not public API. +#[doc(hidden)] +pub fn parse_braces<'a>(input: &ParseBuffer<'a>) -> Result<Braces<'a>> { + parse_delimited(input, Delimiter::Brace).map(|(span, content)| Braces { + token: token::Brace(span), + content, + }) +} + +// Not public API. +#[doc(hidden)] +pub fn parse_brackets<'a>(input: &ParseBuffer<'a>) -> Result<Brackets<'a>> { + parse_delimited(input, Delimiter::Bracket).map(|(span, content)| Brackets { + token: token::Bracket(span), + content, + }) +} + +#[cfg(any(feature = "full", feature = "derive"))] +pub(crate) fn parse_group<'a>(input: &ParseBuffer<'a>) -> Result<Group<'a>> { + parse_delimited(input, Delimiter::None).map(|(span, content)| Group { + token: token::Group(span.join()), + content, + }) +} + +fn parse_delimited<'a>( + input: &ParseBuffer<'a>, + delimiter: Delimiter, +) -> Result<(DelimSpan, ParseBuffer<'a>)> { + input.step(|cursor| { + if let Some((content, span, rest)) = cursor.group(delimiter) { + let scope = span.close(); + let nested = crate::parse::advance_step_cursor(cursor, content); + let unexpected = crate::parse::get_unexpected(input); + let content = crate::parse::new_parse_buffer(scope, nested, unexpected); + Ok(((span, content), rest)) + } else { + let message = match delimiter { + Delimiter::Parenthesis => "expected parentheses", + Delimiter::Brace => "expected curly braces", + Delimiter::Bracket => "expected square brackets", + Delimiter::None => "expected invisible group", + }; + Err(cursor.error(message)) + } + }) +} + +/// Parse a set of parentheses and expose their content to subsequent parsers. +/// +/// # Example +/// +/// ``` +/// # use quote::quote; +/// # +/// use syn::{parenthesized, token, Ident, Result, Token, Type}; +/// use syn::parse::{Parse, ParseStream}; +/// use syn::punctuated::Punctuated; +/// +/// // Parse a simplified tuple struct syntax like: +/// // +/// // struct S(A, B); +/// struct TupleStruct { +/// struct_token: Token![struct], +/// ident: Ident, +/// paren_token: token::Paren, +/// fields: Punctuated<Type, Token![,]>, +/// semi_token: Token![;], +/// } +/// +/// impl Parse for TupleStruct { +/// fn parse(input: ParseStream) -> Result<Self> { +/// let content; +/// Ok(TupleStruct { +/// struct_token: input.parse()?, +/// ident: input.parse()?, +/// paren_token: parenthesized!(content in input), +/// fields: content.parse_terminated(Type::parse, Token![,])?, +/// semi_token: input.parse()?, +/// }) +/// } +/// } +/// # +/// # fn main() { +/// # let input = quote! { +/// # struct S(A, B); +/// # }; +/// # syn::parse2::<TupleStruct>(input).unwrap(); +/// # } +/// ``` +#[macro_export] +#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] +macro_rules! parenthesized { + ($content:ident in $cursor:expr) => { + match $crate::__private::parse_parens(&$cursor) { + $crate::__private::Ok(parens) => { + $content = parens.content; + parens.token + } + $crate::__private::Err(error) => { + return $crate::__private::Err(error); + } + } + }; +} + +/// Parse a set of curly braces and expose their content to subsequent parsers. +/// +/// # Example +/// +/// ``` +/// # use quote::quote; +/// # +/// use syn::{braced, token, Ident, Result, Token, Type}; +/// use syn::parse::{Parse, ParseStream}; +/// use syn::punctuated::Punctuated; +/// +/// // Parse a simplified struct syntax like: +/// // +/// // struct S { +/// // a: A, +/// // b: B, +/// // } +/// struct Struct { +/// struct_token: Token![struct], +/// ident: Ident, +/// brace_token: token::Brace, +/// fields: Punctuated<Field, Token![,]>, +/// } +/// +/// struct Field { +/// name: Ident, +/// colon_token: Token![:], +/// ty: Type, +/// } +/// +/// impl Parse for Struct { +/// fn parse(input: ParseStream) -> Result<Self> { +/// let content; +/// Ok(Struct { +/// struct_token: input.parse()?, +/// ident: input.parse()?, +/// brace_token: braced!(content in input), +/// fields: content.parse_terminated(Field::parse, Token![,])?, +/// }) +/// } +/// } +/// +/// impl Parse for Field { +/// fn parse(input: ParseStream) -> Result<Self> { +/// Ok(Field { +/// name: input.parse()?, +/// colon_token: input.parse()?, +/// ty: input.parse()?, +/// }) +/// } +/// } +/// # +/// # fn main() { +/// # let input = quote! { +/// # struct S { +/// # a: A, +/// # b: B, +/// # } +/// # }; +/// # syn::parse2::<Struct>(input).unwrap(); +/// # } +/// ``` +#[macro_export] +#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] +macro_rules! braced { + ($content:ident in $cursor:expr) => { + match $crate::__private::parse_braces(&$cursor) { + $crate::__private::Ok(braces) => { + $content = braces.content; + braces.token + } + $crate::__private::Err(error) => { + return $crate::__private::Err(error); + } + } + }; +} + +/// Parse a set of square brackets and expose their content to subsequent +/// parsers. +/// +/// # Example +/// +/// ``` +/// # use quote::quote; +/// # +/// use proc_macro2::TokenStream; +/// use syn::{bracketed, token, Result, Token}; +/// use syn::parse::{Parse, ParseStream}; +/// +/// // Parse an outer attribute like: +/// // +/// // #[repr(C, packed)] +/// struct OuterAttribute { +/// pound_token: Token![#], +/// bracket_token: token::Bracket, +/// content: TokenStream, +/// } +/// +/// impl Parse for OuterAttribute { +/// fn parse(input: ParseStream) -> Result<Self> { +/// let content; +/// Ok(OuterAttribute { +/// pound_token: input.parse()?, +/// bracket_token: bracketed!(content in input), +/// content: content.parse()?, +/// }) +/// } +/// } +/// # +/// # fn main() { +/// # let input = quote! { +/// # #[repr(C, packed)] +/// # }; +/// # syn::parse2::<OuterAttribute>(input).unwrap(); +/// # } +/// ``` +#[macro_export] +#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] +macro_rules! bracketed { + ($content:ident in $cursor:expr) => { + match $crate::__private::parse_brackets(&$cursor) { + $crate::__private::Ok(brackets) => { + $content = brackets.content; + brackets.token + } + $crate::__private::Err(error) => { + return $crate::__private::Err(error); + } + } + }; +} diff --git a/vendor/syn/src/ident.rs b/vendor/syn/src/ident.rs new file mode 100644 index 00000000000000..8a8e8a50d9b0bf --- /dev/null +++ b/vendor/syn/src/ident.rs @@ -0,0 +1,108 @@ +#[cfg(feature = "parsing")] +use crate::lookahead; + +pub use proc_macro2::Ident; + +#[cfg(feature = "parsing")] +pub_if_not_doc! { + #[doc(hidden)] + #[allow(non_snake_case)] + pub fn Ident(marker: lookahead::TokenMarker) -> Ident { + match marker {} + } +} + +macro_rules! ident_from_token { + ($token:ident) => { + impl From<Token![$token]> for Ident { + fn from(token: Token![$token]) -> Ident { + Ident::new(stringify!($token), token.span) + } + } + }; +} + +ident_from_token!(self); +ident_from_token!(Self); +ident_from_token!(super); +ident_from_token!(crate); +ident_from_token!(extern); + +impl From<Token![_]> for Ident { + fn from(token: Token![_]) -> Ident { + Ident::new("_", token.span) + } +} + +pub(crate) fn xid_ok(symbol: &str) -> bool { + let mut chars = symbol.chars(); + let first = chars.next().unwrap(); + if !(first == '_' || unicode_ident::is_xid_start(first)) { + return false; + } + for ch in chars { + if !unicode_ident::is_xid_continue(ch) { + return false; + } + } + true +} + +#[cfg(feature = "parsing")] +mod parsing { + use crate::buffer::Cursor; + use crate::error::Result; + use crate::parse::{Parse, ParseStream}; + use crate::token::Token; + use proc_macro2::Ident; + + fn accept_as_ident(ident: &Ident) -> bool { + match ident.to_string().as_str() { + "_" | + // Based on https://doc.rust-lang.org/1.65.0/reference/keywords.html + "abstract" | "as" | "async" | "await" | "become" | "box" | "break" | + "const" | "continue" | "crate" | "do" | "dyn" | "else" | "enum" | + "extern" | "false" | "final" | "fn" | "for" | "if" | "impl" | "in" | + "let" | "loop" | "macro" | "match" | "mod" | "move" | "mut" | + "override" | "priv" | "pub" | "ref" | "return" | "Self" | "self" | + "static" | "struct" | "super" | "trait" | "true" | "try" | "type" | + "typeof" | "unsafe" | "unsized" | "use" | "virtual" | "where" | + "while" | "yield" => false, + _ => true, + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Ident { + fn parse(input: ParseStream) -> Result<Self> { + input.step(|cursor| { + if let Some((ident, rest)) = cursor.ident() { + if accept_as_ident(&ident) { + Ok((ident, rest)) + } else { + Err(cursor.error(format_args!( + "expected identifier, found keyword `{}`", + ident, + ))) + } + } else { + Err(cursor.error("expected identifier")) + } + }) + } + } + + impl Token for Ident { + fn peek(cursor: Cursor) -> bool { + if let Some((ident, _rest)) = cursor.ident() { + accept_as_ident(&ident) + } else { + false + } + } + + fn display() -> &'static str { + "identifier" + } + } +} diff --git a/vendor/syn/src/item.rs b/vendor/syn/src/item.rs new file mode 100644 index 00000000000000..00beb0d3686992 --- /dev/null +++ b/vendor/syn/src/item.rs @@ -0,0 +1,3490 @@ +use crate::attr::Attribute; +use crate::data::{Fields, FieldsNamed, Variant}; +use crate::derive::{Data, DataEnum, DataStruct, DataUnion, DeriveInput}; +use crate::expr::Expr; +use crate::generics::{Generics, TypeParamBound}; +use crate::ident::Ident; +use crate::lifetime::Lifetime; +use crate::mac::Macro; +use crate::pat::{Pat, PatType}; +use crate::path::Path; +use crate::punctuated::Punctuated; +use crate::restriction::Visibility; +use crate::stmt::Block; +use crate::token; +use crate::ty::{Abi, ReturnType, Type}; +use proc_macro2::TokenStream; +#[cfg(feature = "parsing")] +use std::mem; + +ast_enum_of_structs! { + /// Things that can appear directly inside of a module or scope. + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + #[non_exhaustive] + pub enum Item { + /// A constant item: `const MAX: u16 = 65535`. + Const(ItemConst), + + /// An enum definition: `enum Foo<A, B> { A(A), B(B) }`. + Enum(ItemEnum), + + /// An `extern crate` item: `extern crate serde`. + ExternCrate(ItemExternCrate), + + /// A free-standing function: `fn process(n: usize) -> Result<()> { ... + /// }`. + Fn(ItemFn), + + /// A block of foreign items: `extern "C" { ... }`. + ForeignMod(ItemForeignMod), + + /// An impl block providing trait or associated items: `impl<A> Trait + /// for Data<A> { ... }`. + Impl(ItemImpl), + + /// A macro invocation, which includes `macro_rules!` definitions. + Macro(ItemMacro), + + /// A module or module declaration: `mod m` or `mod m { ... }`. + Mod(ItemMod), + + /// A static item: `static BIKE: Shed = Shed(42)`. + Static(ItemStatic), + + /// A struct definition: `struct Foo<A> { x: A }`. + Struct(ItemStruct), + + /// A trait definition: `pub trait Iterator { ... }`. + Trait(ItemTrait), + + /// A trait alias: `pub trait SharableIterator = Iterator + Sync`. + TraitAlias(ItemTraitAlias), + + /// A type alias: `type Result<T> = std::result::Result<T, MyError>`. + Type(ItemType), + + /// A union definition: `union Foo<A, B> { x: A, y: B }`. + Union(ItemUnion), + + /// A use declaration: `use std::collections::HashMap`. + Use(ItemUse), + + /// Tokens forming an item not interpreted by Syn. + Verbatim(TokenStream), + + // For testing exhaustiveness in downstream code, use the following idiom: + // + // match item { + // #![cfg_attr(test, deny(non_exhaustive_omitted_patterns))] + // + // Item::Const(item) => {...} + // Item::Enum(item) => {...} + // ... + // Item::Verbatim(item) => {...} + // + // _ => { /* some sane fallback */ } + // } + // + // This way we fail your tests but don't break your library when adding + // a variant. You will be notified by a test failure when a variant is + // added, so that you can add code to handle it, but your library will + // continue to compile and work for downstream users in the interim. + } +} + +ast_struct! { + /// A constant item: `const MAX: u16 = 65535`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ItemConst { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub const_token: Token![const], + pub ident: Ident, + pub generics: Generics, + pub colon_token: Token![:], + pub ty: Box<Type>, + pub eq_token: Token![=], + pub expr: Box<Expr>, + pub semi_token: Token![;], + } +} + +ast_struct! { + /// An enum definition: `enum Foo<A, B> { A(A), B(B) }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ItemEnum { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub enum_token: Token![enum], + pub ident: Ident, + pub generics: Generics, + pub brace_token: token::Brace, + pub variants: Punctuated<Variant, Token![,]>, + } +} + +ast_struct! { + /// An `extern crate` item: `extern crate serde`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ItemExternCrate { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub extern_token: Token![extern], + pub crate_token: Token![crate], + pub ident: Ident, + pub rename: Option<(Token![as], Ident)>, + pub semi_token: Token![;], + } +} + +ast_struct! { + /// A free-standing function: `fn process(n: usize) -> Result<()> { ... }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ItemFn { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub sig: Signature, + pub block: Box<Block>, + } +} + +ast_struct! { + /// A block of foreign items: `extern "C" { ... }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ItemForeignMod { + pub attrs: Vec<Attribute>, + pub unsafety: Option<Token![unsafe]>, + pub abi: Abi, + pub brace_token: token::Brace, + pub items: Vec<ForeignItem>, + } +} + +ast_struct! { + /// An impl block providing trait or associated items: `impl<A> Trait + /// for Data<A> { ... }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ItemImpl { + pub attrs: Vec<Attribute>, + pub defaultness: Option<Token![default]>, + pub unsafety: Option<Token![unsafe]>, + pub impl_token: Token![impl], + pub generics: Generics, + /// Trait this impl implements. + pub trait_: Option<(Option<Token![!]>, Path, Token![for])>, + /// The Self type of the impl. + pub self_ty: Box<Type>, + pub brace_token: token::Brace, + pub items: Vec<ImplItem>, + } +} + +ast_struct! { + /// A macro invocation, which includes `macro_rules!` definitions. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ItemMacro { + pub attrs: Vec<Attribute>, + /// The `example` in `macro_rules! example { ... }`. + pub ident: Option<Ident>, + pub mac: Macro, + pub semi_token: Option<Token![;]>, + } +} + +ast_struct! { + /// A module or module declaration: `mod m` or `mod m { ... }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ItemMod { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub unsafety: Option<Token![unsafe]>, + pub mod_token: Token![mod], + pub ident: Ident, + pub content: Option<(token::Brace, Vec<Item>)>, + pub semi: Option<Token![;]>, + } +} + +ast_struct! { + /// A static item: `static BIKE: Shed = Shed(42)`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ItemStatic { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub static_token: Token![static], + pub mutability: StaticMutability, + pub ident: Ident, + pub colon_token: Token![:], + pub ty: Box<Type>, + pub eq_token: Token![=], + pub expr: Box<Expr>, + pub semi_token: Token![;], + } +} + +ast_struct! { + /// A struct definition: `struct Foo<A> { x: A }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ItemStruct { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub struct_token: Token![struct], + pub ident: Ident, + pub generics: Generics, + pub fields: Fields, + pub semi_token: Option<Token![;]>, + } +} + +ast_struct! { + /// A trait definition: `pub trait Iterator { ... }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ItemTrait { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub unsafety: Option<Token![unsafe]>, + pub auto_token: Option<Token![auto]>, + pub restriction: Option<ImplRestriction>, + pub trait_token: Token![trait], + pub ident: Ident, + pub generics: Generics, + pub colon_token: Option<Token![:]>, + pub supertraits: Punctuated<TypeParamBound, Token![+]>, + pub brace_token: token::Brace, + pub items: Vec<TraitItem>, + } +} + +ast_struct! { + /// A trait alias: `pub trait SharableIterator = Iterator + Sync`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ItemTraitAlias { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub trait_token: Token![trait], + pub ident: Ident, + pub generics: Generics, + pub eq_token: Token![=], + pub bounds: Punctuated<TypeParamBound, Token![+]>, + pub semi_token: Token![;], + } +} + +ast_struct! { + /// A type alias: `type Result<T> = std::result::Result<T, MyError>`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ItemType { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub type_token: Token![type], + pub ident: Ident, + pub generics: Generics, + pub eq_token: Token![=], + pub ty: Box<Type>, + pub semi_token: Token![;], + } +} + +ast_struct! { + /// A union definition: `union Foo<A, B> { x: A, y: B }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ItemUnion { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub union_token: Token![union], + pub ident: Ident, + pub generics: Generics, + pub fields: FieldsNamed, + } +} + +ast_struct! { + /// A use declaration: `use std::collections::HashMap`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ItemUse { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub use_token: Token![use], + pub leading_colon: Option<Token![::]>, + pub tree: UseTree, + pub semi_token: Token![;], + } +} + +impl Item { + #[cfg(feature = "parsing")] + pub(crate) fn replace_attrs(&mut self, new: Vec<Attribute>) -> Vec<Attribute> { + match self { + Item::Const(ItemConst { attrs, .. }) + | Item::Enum(ItemEnum { attrs, .. }) + | Item::ExternCrate(ItemExternCrate { attrs, .. }) + | Item::Fn(ItemFn { attrs, .. }) + | Item::ForeignMod(ItemForeignMod { attrs, .. }) + | Item::Impl(ItemImpl { attrs, .. }) + | Item::Macro(ItemMacro { attrs, .. }) + | Item::Mod(ItemMod { attrs, .. }) + | Item::Static(ItemStatic { attrs, .. }) + | Item::Struct(ItemStruct { attrs, .. }) + | Item::Trait(ItemTrait { attrs, .. }) + | Item::TraitAlias(ItemTraitAlias { attrs, .. }) + | Item::Type(ItemType { attrs, .. }) + | Item::Union(ItemUnion { attrs, .. }) + | Item::Use(ItemUse { attrs, .. }) => mem::replace(attrs, new), + Item::Verbatim(_) => Vec::new(), + } + } +} + +impl From<DeriveInput> for Item { + fn from(input: DeriveInput) -> Item { + match input.data { + Data::Struct(data) => Item::Struct(ItemStruct { + attrs: input.attrs, + vis: input.vis, + struct_token: data.struct_token, + ident: input.ident, + generics: input.generics, + fields: data.fields, + semi_token: data.semi_token, + }), + Data::Enum(data) => Item::Enum(ItemEnum { + attrs: input.attrs, + vis: input.vis, + enum_token: data.enum_token, + ident: input.ident, + generics: input.generics, + brace_token: data.brace_token, + variants: data.variants, + }), + Data::Union(data) => Item::Union(ItemUnion { + attrs: input.attrs, + vis: input.vis, + union_token: data.union_token, + ident: input.ident, + generics: input.generics, + fields: data.fields, + }), + } + } +} + +impl From<ItemStruct> for DeriveInput { + fn from(input: ItemStruct) -> DeriveInput { + DeriveInput { + attrs: input.attrs, + vis: input.vis, + ident: input.ident, + generics: input.generics, + data: Data::Struct(DataStruct { + struct_token: input.struct_token, + fields: input.fields, + semi_token: input.semi_token, + }), + } + } +} + +impl From<ItemEnum> for DeriveInput { + fn from(input: ItemEnum) -> DeriveInput { + DeriveInput { + attrs: input.attrs, + vis: input.vis, + ident: input.ident, + generics: input.generics, + data: Data::Enum(DataEnum { + enum_token: input.enum_token, + brace_token: input.brace_token, + variants: input.variants, + }), + } + } +} + +impl From<ItemUnion> for DeriveInput { + fn from(input: ItemUnion) -> DeriveInput { + DeriveInput { + attrs: input.attrs, + vis: input.vis, + ident: input.ident, + generics: input.generics, + data: Data::Union(DataUnion { + union_token: input.union_token, + fields: input.fields, + }), + } + } +} + +ast_enum_of_structs! { + /// A suffix of an import tree in a `use` item: `Type as Renamed` or `*`. + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub enum UseTree { + /// A path prefix of imports in a `use` item: `std::...`. + Path(UsePath), + + /// An identifier imported by a `use` item: `HashMap`. + Name(UseName), + + /// An renamed identifier imported by a `use` item: `HashMap as Map`. + Rename(UseRename), + + /// A glob import in a `use` item: `*`. + Glob(UseGlob), + + /// A braced group of imports in a `use` item: `{A, B, C}`. + Group(UseGroup), + } +} + +ast_struct! { + /// A path prefix of imports in a `use` item: `std::...`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct UsePath { + pub ident: Ident, + pub colon2_token: Token![::], + pub tree: Box<UseTree>, + } +} + +ast_struct! { + /// An identifier imported by a `use` item: `HashMap`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct UseName { + pub ident: Ident, + } +} + +ast_struct! { + /// An renamed identifier imported by a `use` item: `HashMap as Map`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct UseRename { + pub ident: Ident, + pub as_token: Token![as], + pub rename: Ident, + } +} + +ast_struct! { + /// A glob import in a `use` item: `*`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct UseGlob { + pub star_token: Token![*], + } +} + +ast_struct! { + /// A braced group of imports in a `use` item: `{A, B, C}`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct UseGroup { + pub brace_token: token::Brace, + pub items: Punctuated<UseTree, Token![,]>, + } +} + +ast_enum_of_structs! { + /// An item within an `extern` block. + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + #[non_exhaustive] + pub enum ForeignItem { + /// A foreign function in an `extern` block. + Fn(ForeignItemFn), + + /// A foreign static item in an `extern` block: `static ext: u8`. + Static(ForeignItemStatic), + + /// A foreign type in an `extern` block: `type void`. + Type(ForeignItemType), + + /// A macro invocation within an extern block. + Macro(ForeignItemMacro), + + /// Tokens in an `extern` block not interpreted by Syn. + Verbatim(TokenStream), + + // For testing exhaustiveness in downstream code, use the following idiom: + // + // match item { + // #![cfg_attr(test, deny(non_exhaustive_omitted_patterns))] + // + // ForeignItem::Fn(item) => {...} + // ForeignItem::Static(item) => {...} + // ... + // ForeignItem::Verbatim(item) => {...} + // + // _ => { /* some sane fallback */ } + // } + // + // This way we fail your tests but don't break your library when adding + // a variant. You will be notified by a test failure when a variant is + // added, so that you can add code to handle it, but your library will + // continue to compile and work for downstream users in the interim. + } +} + +ast_struct! { + /// A foreign function in an `extern` block. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ForeignItemFn { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub sig: Signature, + pub semi_token: Token![;], + } +} + +ast_struct! { + /// A foreign static item in an `extern` block: `static ext: u8`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ForeignItemStatic { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub static_token: Token![static], + pub mutability: StaticMutability, + pub ident: Ident, + pub colon_token: Token![:], + pub ty: Box<Type>, + pub semi_token: Token![;], + } +} + +ast_struct! { + /// A foreign type in an `extern` block: `type void`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ForeignItemType { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub type_token: Token![type], + pub ident: Ident, + pub generics: Generics, + pub semi_token: Token![;], + } +} + +ast_struct! { + /// A macro invocation within an extern block. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ForeignItemMacro { + pub attrs: Vec<Attribute>, + pub mac: Macro, + pub semi_token: Option<Token![;]>, + } +} + +ast_enum_of_structs! { + /// An item declaration within the definition of a trait. + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + #[non_exhaustive] + pub enum TraitItem { + /// An associated constant within the definition of a trait. + Const(TraitItemConst), + + /// An associated function within the definition of a trait. + Fn(TraitItemFn), + + /// An associated type within the definition of a trait. + Type(TraitItemType), + + /// A macro invocation within the definition of a trait. + Macro(TraitItemMacro), + + /// Tokens within the definition of a trait not interpreted by Syn. + Verbatim(TokenStream), + + // For testing exhaustiveness in downstream code, use the following idiom: + // + // match item { + // #![cfg_attr(test, deny(non_exhaustive_omitted_patterns))] + // + // TraitItem::Const(item) => {...} + // TraitItem::Fn(item) => {...} + // ... + // TraitItem::Verbatim(item) => {...} + // + // _ => { /* some sane fallback */ } + // } + // + // This way we fail your tests but don't break your library when adding + // a variant. You will be notified by a test failure when a variant is + // added, so that you can add code to handle it, but your library will + // continue to compile and work for downstream users in the interim. + } +} + +ast_struct! { + /// An associated constant within the definition of a trait. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct TraitItemConst { + pub attrs: Vec<Attribute>, + pub const_token: Token![const], + pub ident: Ident, + pub generics: Generics, + pub colon_token: Token![:], + pub ty: Type, + pub default: Option<(Token![=], Expr)>, + pub semi_token: Token![;], + } +} + +ast_struct! { + /// An associated function within the definition of a trait. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct TraitItemFn { + pub attrs: Vec<Attribute>, + pub sig: Signature, + pub default: Option<Block>, + pub semi_token: Option<Token![;]>, + } +} + +ast_struct! { + /// An associated type within the definition of a trait. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct TraitItemType { + pub attrs: Vec<Attribute>, + pub type_token: Token![type], + pub ident: Ident, + pub generics: Generics, + pub colon_token: Option<Token![:]>, + pub bounds: Punctuated<TypeParamBound, Token![+]>, + pub default: Option<(Token![=], Type)>, + pub semi_token: Token![;], + } +} + +ast_struct! { + /// A macro invocation within the definition of a trait. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct TraitItemMacro { + pub attrs: Vec<Attribute>, + pub mac: Macro, + pub semi_token: Option<Token![;]>, + } +} + +ast_enum_of_structs! { + /// An item within an impl block. + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + #[non_exhaustive] + pub enum ImplItem { + /// An associated constant within an impl block. + Const(ImplItemConst), + + /// An associated function within an impl block. + Fn(ImplItemFn), + + /// An associated type within an impl block. + Type(ImplItemType), + + /// A macro invocation within an impl block. + Macro(ImplItemMacro), + + /// Tokens within an impl block not interpreted by Syn. + Verbatim(TokenStream), + + // For testing exhaustiveness in downstream code, use the following idiom: + // + // match item { + // #![cfg_attr(test, deny(non_exhaustive_omitted_patterns))] + // + // ImplItem::Const(item) => {...} + // ImplItem::Fn(item) => {...} + // ... + // ImplItem::Verbatim(item) => {...} + // + // _ => { /* some sane fallback */ } + // } + // + // This way we fail your tests but don't break your library when adding + // a variant. You will be notified by a test failure when a variant is + // added, so that you can add code to handle it, but your library will + // continue to compile and work for downstream users in the interim. + } +} + +ast_struct! { + /// An associated constant within an impl block. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ImplItemConst { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub defaultness: Option<Token![default]>, + pub const_token: Token![const], + pub ident: Ident, + pub generics: Generics, + pub colon_token: Token![:], + pub ty: Type, + pub eq_token: Token![=], + pub expr: Expr, + pub semi_token: Token![;], + } +} + +ast_struct! { + /// An associated function within an impl block. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ImplItemFn { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub defaultness: Option<Token![default]>, + pub sig: Signature, + pub block: Block, + } +} + +ast_struct! { + /// An associated type within an impl block. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ImplItemType { + pub attrs: Vec<Attribute>, + pub vis: Visibility, + pub defaultness: Option<Token![default]>, + pub type_token: Token![type], + pub ident: Ident, + pub generics: Generics, + pub eq_token: Token![=], + pub ty: Type, + pub semi_token: Token![;], + } +} + +ast_struct! { + /// A macro invocation within an impl block. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct ImplItemMacro { + pub attrs: Vec<Attribute>, + pub mac: Macro, + pub semi_token: Option<Token![;]>, + } +} + +ast_struct! { + /// A function signature in a trait or implementation: `unsafe fn + /// initialize(&self)`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct Signature { + pub constness: Option<Token![const]>, + pub asyncness: Option<Token![async]>, + pub unsafety: Option<Token![unsafe]>, + pub abi: Option<Abi>, + pub fn_token: Token![fn], + pub ident: Ident, + pub generics: Generics, + pub paren_token: token::Paren, + pub inputs: Punctuated<FnArg, Token![,]>, + pub variadic: Option<Variadic>, + pub output: ReturnType, + } +} + +impl Signature { + /// A method's `self` receiver, such as `&self` or `self: Box<Self>`. + pub fn receiver(&self) -> Option<&Receiver> { + let arg = self.inputs.first()?; + match arg { + FnArg::Receiver(receiver) => Some(receiver), + FnArg::Typed(_) => None, + } + } +} + +ast_enum_of_structs! { + /// An argument in a function signature: the `n: usize` in `fn f(n: usize)`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub enum FnArg { + /// The `self` argument of an associated method. + Receiver(Receiver), + + /// A function argument accepted by pattern and type. + Typed(PatType), + } +} + +ast_struct! { + /// The `self` argument of an associated method. + /// + /// If `colon_token` is present, the receiver is written with an explicit + /// type such as `self: Box<Self>`. If `colon_token` is absent, the receiver + /// is written in shorthand such as `self` or `&self` or `&mut self`. In the + /// shorthand case, the type in `ty` is reconstructed as one of `Self`, + /// `&Self`, or `&mut Self`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct Receiver { + pub attrs: Vec<Attribute>, + pub reference: Option<(Token![&], Option<Lifetime>)>, + pub mutability: Option<Token![mut]>, + pub self_token: Token![self], + pub colon_token: Option<Token![:]>, + pub ty: Box<Type>, + } +} + +impl Receiver { + pub fn lifetime(&self) -> Option<&Lifetime> { + self.reference.as_ref()?.1.as_ref() + } +} + +ast_struct! { + /// The variadic argument of a foreign function. + /// + /// ```rust + /// # struct c_char; + /// # struct c_int; + /// # + /// extern "C" { + /// fn printf(format: *const c_char, ...) -> c_int; + /// // ^^^ + /// } + /// ``` + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct Variadic { + pub attrs: Vec<Attribute>, + pub pat: Option<(Box<Pat>, Token![:])>, + pub dots: Token![...], + pub comma: Option<Token![,]>, + } +} + +ast_enum! { + /// The mutability of an `Item::Static` or `ForeignItem::Static`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + #[non_exhaustive] + pub enum StaticMutability { + Mut(Token![mut]), + None, + } +} + +ast_enum! { + /// Unused, but reserved for RFC 3323 restrictions. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + #[non_exhaustive] + pub enum ImplRestriction {} + + + // TODO: https://rust-lang.github.io/rfcs/3323-restrictions.html + // + // pub struct ImplRestriction { + // pub impl_token: Token![impl], + // pub paren_token: token::Paren, + // pub in_token: Option<Token![in]>, + // pub path: Box<Path>, + // } +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::attr::{self, Attribute}; + use crate::derive; + use crate::error::{Error, Result}; + use crate::expr::Expr; + use crate::ext::IdentExt as _; + use crate::generics::{self, Generics, TypeParamBound}; + use crate::ident::Ident; + use crate::item::{ + FnArg, ForeignItem, ForeignItemFn, ForeignItemMacro, ForeignItemStatic, ForeignItemType, + ImplItem, ImplItemConst, ImplItemFn, ImplItemMacro, ImplItemType, Item, ItemConst, + ItemEnum, ItemExternCrate, ItemFn, ItemForeignMod, ItemImpl, ItemMacro, ItemMod, + ItemStatic, ItemStruct, ItemTrait, ItemTraitAlias, ItemType, ItemUnion, ItemUse, Receiver, + Signature, StaticMutability, TraitItem, TraitItemConst, TraitItemFn, TraitItemMacro, + TraitItemType, UseGlob, UseGroup, UseName, UsePath, UseRename, UseTree, Variadic, + }; + use crate::lifetime::Lifetime; + use crate::lit::LitStr; + use crate::mac::{self, Macro}; + use crate::parse::discouraged::Speculative as _; + use crate::parse::{Parse, ParseBuffer, ParseStream}; + use crate::pat::{Pat, PatType, PatWild}; + use crate::path::Path; + use crate::punctuated::Punctuated; + use crate::restriction::Visibility; + use crate::stmt::Block; + use crate::token; + use crate::ty::{Abi, ReturnType, Type, TypePath, TypeReference}; + use crate::verbatim; + use proc_macro2::TokenStream; + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Item { + fn parse(input: ParseStream) -> Result<Self> { + let begin = input.fork(); + let attrs = input.call(Attribute::parse_outer)?; + parse_rest_of_item(begin, attrs, input) + } + } + + pub(crate) fn parse_rest_of_item( + begin: ParseBuffer, + mut attrs: Vec<Attribute>, + input: ParseStream, + ) -> Result<Item> { + let ahead = input.fork(); + let vis: Visibility = ahead.parse()?; + + let lookahead = ahead.lookahead1(); + let allow_safe = false; + let mut item = if lookahead.peek(Token![fn]) || peek_signature(&ahead, allow_safe) { + let vis: Visibility = input.parse()?; + let sig: Signature = input.parse()?; + if input.peek(Token![;]) { + input.parse::<Token![;]>()?; + Ok(Item::Verbatim(verbatim::between(&begin, input))) + } else { + parse_rest_of_fn(input, Vec::new(), vis, sig).map(Item::Fn) + } + } else if lookahead.peek(Token![extern]) { + ahead.parse::<Token![extern]>()?; + let lookahead = ahead.lookahead1(); + if lookahead.peek(Token![crate]) { + input.parse().map(Item::ExternCrate) + } else if lookahead.peek(token::Brace) { + input.parse().map(Item::ForeignMod) + } else if lookahead.peek(LitStr) { + ahead.parse::<LitStr>()?; + let lookahead = ahead.lookahead1(); + if lookahead.peek(token::Brace) { + input.parse().map(Item::ForeignMod) + } else { + Err(lookahead.error()) + } + } else { + Err(lookahead.error()) + } + } else if lookahead.peek(Token![use]) { + let allow_crate_root_in_path = true; + match parse_item_use(input, allow_crate_root_in_path)? { + Some(item_use) => Ok(Item::Use(item_use)), + None => Ok(Item::Verbatim(verbatim::between(&begin, input))), + } + } else if lookahead.peek(Token![static]) { + let vis = input.parse()?; + let static_token = input.parse()?; + let mutability = input.parse()?; + let ident = input.parse()?; + if input.peek(Token![=]) { + input.parse::<Token![=]>()?; + input.parse::<Expr>()?; + input.parse::<Token![;]>()?; + Ok(Item::Verbatim(verbatim::between(&begin, input))) + } else { + let colon_token = input.parse()?; + let ty = input.parse()?; + if input.peek(Token![;]) { + input.parse::<Token![;]>()?; + Ok(Item::Verbatim(verbatim::between(&begin, input))) + } else { + Ok(Item::Static(ItemStatic { + attrs: Vec::new(), + vis, + static_token, + mutability, + ident, + colon_token, + ty, + eq_token: input.parse()?, + expr: input.parse()?, + semi_token: input.parse()?, + })) + } + } + } else if lookahead.peek(Token![const]) { + let vis = input.parse()?; + let const_token: Token![const] = input.parse()?; + let lookahead = input.lookahead1(); + let ident = if lookahead.peek(Ident) || lookahead.peek(Token![_]) { + input.call(Ident::parse_any)? + } else { + return Err(lookahead.error()); + }; + let mut generics: Generics = input.parse()?; + let colon_token = input.parse()?; + let ty = input.parse()?; + let value = if let Some(eq_token) = input.parse::<Option<Token![=]>>()? { + let expr: Expr = input.parse()?; + Some((eq_token, expr)) + } else { + None + }; + generics.where_clause = input.parse()?; + let semi_token: Token![;] = input.parse()?; + match value { + Some((eq_token, expr)) + if generics.lt_token.is_none() && generics.where_clause.is_none() => + { + Ok(Item::Const(ItemConst { + attrs: Vec::new(), + vis, + const_token, + ident, + generics, + colon_token, + ty, + eq_token, + expr: Box::new(expr), + semi_token, + })) + } + _ => Ok(Item::Verbatim(verbatim::between(&begin, input))), + } + } else if lookahead.peek(Token![unsafe]) { + ahead.parse::<Token![unsafe]>()?; + let lookahead = ahead.lookahead1(); + if lookahead.peek(Token![trait]) + || lookahead.peek(Token![auto]) && ahead.peek2(Token![trait]) + { + input.parse().map(Item::Trait) + } else if lookahead.peek(Token![impl]) { + let allow_verbatim_impl = true; + if let Some(item) = parse_impl(input, allow_verbatim_impl)? { + Ok(Item::Impl(item)) + } else { + Ok(Item::Verbatim(verbatim::between(&begin, input))) + } + } else if lookahead.peek(Token![extern]) { + input.parse().map(Item::ForeignMod) + } else if lookahead.peek(Token![mod]) { + input.parse().map(Item::Mod) + } else { + Err(lookahead.error()) + } + } else if lookahead.peek(Token![mod]) { + input.parse().map(Item::Mod) + } else if lookahead.peek(Token![type]) { + parse_item_type(begin, input) + } else if lookahead.peek(Token![struct]) { + input.parse().map(Item::Struct) + } else if lookahead.peek(Token![enum]) { + input.parse().map(Item::Enum) + } else if lookahead.peek(Token![union]) && ahead.peek2(Ident) { + input.parse().map(Item::Union) + } else if lookahead.peek(Token![trait]) { + input.call(parse_trait_or_trait_alias) + } else if lookahead.peek(Token![auto]) && ahead.peek2(Token![trait]) { + input.parse().map(Item::Trait) + } else if lookahead.peek(Token![impl]) + || lookahead.peek(Token![default]) && !ahead.peek2(Token![!]) + { + let allow_verbatim_impl = true; + if let Some(item) = parse_impl(input, allow_verbatim_impl)? { + Ok(Item::Impl(item)) + } else { + Ok(Item::Verbatim(verbatim::between(&begin, input))) + } + } else if lookahead.peek(Token![macro]) { + input.advance_to(&ahead); + parse_macro2(begin, vis, input) + } else if vis.is_inherited() + && (lookahead.peek(Ident) + || lookahead.peek(Token![self]) + || lookahead.peek(Token![super]) + || lookahead.peek(Token![crate]) + || lookahead.peek(Token![::])) + { + input.parse().map(Item::Macro) + } else { + Err(lookahead.error()) + }?; + + attrs.extend(item.replace_attrs(Vec::new())); + item.replace_attrs(attrs); + Ok(item) + } + + struct FlexibleItemType { + vis: Visibility, + defaultness: Option<Token![default]>, + type_token: Token![type], + ident: Ident, + generics: Generics, + colon_token: Option<Token![:]>, + bounds: Punctuated<TypeParamBound, Token![+]>, + ty: Option<(Token![=], Type)>, + semi_token: Token![;], + } + + enum TypeDefaultness { + Optional, + Disallowed, + } + + enum WhereClauseLocation { + // type Ty<T> where T: 'static = T; + BeforeEq, + // type Ty<T> = T where T: 'static; + AfterEq, + // TODO: goes away once the migration period on rust-lang/rust#89122 is over + Both, + } + + impl FlexibleItemType { + fn parse( + input: ParseStream, + allow_defaultness: TypeDefaultness, + where_clause_location: WhereClauseLocation, + ) -> Result<Self> { + let vis: Visibility = input.parse()?; + let defaultness: Option<Token![default]> = match allow_defaultness { + TypeDefaultness::Optional => input.parse()?, + TypeDefaultness::Disallowed => None, + }; + let type_token: Token![type] = input.parse()?; + let ident: Ident = input.parse()?; + let mut generics: Generics = input.parse()?; + let (colon_token, bounds) = Self::parse_optional_bounds(input)?; + + match where_clause_location { + WhereClauseLocation::BeforeEq | WhereClauseLocation::Both => { + generics.where_clause = input.parse()?; + } + WhereClauseLocation::AfterEq => {} + } + + let ty = Self::parse_optional_definition(input)?; + + match where_clause_location { + WhereClauseLocation::AfterEq | WhereClauseLocation::Both + if generics.where_clause.is_none() => + { + generics.where_clause = input.parse()?; + } + _ => {} + } + + let semi_token: Token![;] = input.parse()?; + + Ok(FlexibleItemType { + vis, + defaultness, + type_token, + ident, + generics, + colon_token, + bounds, + ty, + semi_token, + }) + } + + fn parse_optional_bounds( + input: ParseStream, + ) -> Result<(Option<Token![:]>, Punctuated<TypeParamBound, Token![+]>)> { + let colon_token: Option<Token![:]> = input.parse()?; + + let mut bounds = Punctuated::new(); + if colon_token.is_some() { + loop { + if input.peek(Token![where]) || input.peek(Token![=]) || input.peek(Token![;]) { + break; + } + bounds.push_value({ + let allow_precise_capture = false; + let allow_const = true; + TypeParamBound::parse_single(input, allow_precise_capture, allow_const)? + }); + if input.peek(Token![where]) || input.peek(Token![=]) || input.peek(Token![;]) { + break; + } + bounds.push_punct(input.parse::<Token![+]>()?); + } + } + + Ok((colon_token, bounds)) + } + + fn parse_optional_definition(input: ParseStream) -> Result<Option<(Token![=], Type)>> { + let eq_token: Option<Token![=]> = input.parse()?; + if let Some(eq_token) = eq_token { + let definition: Type = input.parse()?; + Ok(Some((eq_token, definition))) + } else { + Ok(None) + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ItemMacro { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let path = input.call(Path::parse_mod_style)?; + let bang_token: Token![!] = input.parse()?; + let ident: Option<Ident> = if input.peek(Token![try]) { + input.call(Ident::parse_any).map(Some) + } else { + input.parse() + }?; + let (delimiter, tokens) = input.call(mac::parse_delimiter)?; + let semi_token: Option<Token![;]> = if !delimiter.is_brace() { + Some(input.parse()?) + } else { + None + }; + Ok(ItemMacro { + attrs, + ident, + mac: Macro { + path, + bang_token, + delimiter, + tokens, + }, + semi_token, + }) + } + } + + fn parse_macro2(begin: ParseBuffer, _vis: Visibility, input: ParseStream) -> Result<Item> { + input.parse::<Token![macro]>()?; + input.parse::<Ident>()?; + + let mut lookahead = input.lookahead1(); + if lookahead.peek(token::Paren) { + let paren_content; + parenthesized!(paren_content in input); + paren_content.parse::<TokenStream>()?; + lookahead = input.lookahead1(); + } + + if lookahead.peek(token::Brace) { + let brace_content; + braced!(brace_content in input); + brace_content.parse::<TokenStream>()?; + } else { + return Err(lookahead.error()); + } + + Ok(Item::Verbatim(verbatim::between(&begin, input))) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ItemExternCrate { + fn parse(input: ParseStream) -> Result<Self> { + Ok(ItemExternCrate { + attrs: input.call(Attribute::parse_outer)?, + vis: input.parse()?, + extern_token: input.parse()?, + crate_token: input.parse()?, + ident: { + if input.peek(Token![self]) { + input.call(Ident::parse_any)? + } else { + input.parse()? + } + }, + rename: { + if input.peek(Token![as]) { + let as_token: Token![as] = input.parse()?; + let rename: Ident = if input.peek(Token![_]) { + Ident::from(input.parse::<Token![_]>()?) + } else { + input.parse()? + }; + Some((as_token, rename)) + } else { + None + } + }, + semi_token: input.parse()?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ItemUse { + fn parse(input: ParseStream) -> Result<Self> { + let allow_crate_root_in_path = false; + parse_item_use(input, allow_crate_root_in_path).map(Option::unwrap) + } + } + + fn parse_item_use( + input: ParseStream, + allow_crate_root_in_path: bool, + ) -> Result<Option<ItemUse>> { + let attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let use_token: Token![use] = input.parse()?; + let leading_colon: Option<Token![::]> = input.parse()?; + let tree = parse_use_tree(input, allow_crate_root_in_path && leading_colon.is_none())?; + let semi_token: Token![;] = input.parse()?; + + let tree = match tree { + Some(tree) => tree, + None => return Ok(None), + }; + + Ok(Some(ItemUse { + attrs, + vis, + use_token, + leading_colon, + tree, + semi_token, + })) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for UseTree { + fn parse(input: ParseStream) -> Result<UseTree> { + let allow_crate_root_in_path = false; + parse_use_tree(input, allow_crate_root_in_path).map(Option::unwrap) + } + } + + fn parse_use_tree( + input: ParseStream, + allow_crate_root_in_path: bool, + ) -> Result<Option<UseTree>> { + let lookahead = input.lookahead1(); + if lookahead.peek(Ident) + || lookahead.peek(Token![self]) + || lookahead.peek(Token![super]) + || lookahead.peek(Token![crate]) + || lookahead.peek(Token![try]) + { + let ident = input.call(Ident::parse_any)?; + if input.peek(Token![::]) { + Ok(Some(UseTree::Path(UsePath { + ident, + colon2_token: input.parse()?, + tree: Box::new(input.parse()?), + }))) + } else if input.peek(Token![as]) { + Ok(Some(UseTree::Rename(UseRename { + ident, + as_token: input.parse()?, + rename: { + if input.peek(Ident) { + input.parse()? + } else if input.peek(Token![_]) { + Ident::from(input.parse::<Token![_]>()?) + } else { + return Err(input.error("expected identifier or underscore")); + } + }, + }))) + } else { + Ok(Some(UseTree::Name(UseName { ident }))) + } + } else if lookahead.peek(Token![*]) { + Ok(Some(UseTree::Glob(UseGlob { + star_token: input.parse()?, + }))) + } else if lookahead.peek(token::Brace) { + let content; + let brace_token = braced!(content in input); + let mut items = Punctuated::new(); + let mut has_any_crate_root_in_path = false; + loop { + if content.is_empty() { + break; + } + let this_tree_starts_with_crate_root = + allow_crate_root_in_path && content.parse::<Option<Token![::]>>()?.is_some(); + has_any_crate_root_in_path |= this_tree_starts_with_crate_root; + match parse_use_tree( + &content, + allow_crate_root_in_path && !this_tree_starts_with_crate_root, + )? { + Some(tree) if !has_any_crate_root_in_path => items.push_value(tree), + _ => has_any_crate_root_in_path = true, + } + if content.is_empty() { + break; + } + let comma: Token![,] = content.parse()?; + if !has_any_crate_root_in_path { + items.push_punct(comma); + } + } + if has_any_crate_root_in_path { + Ok(None) + } else { + Ok(Some(UseTree::Group(UseGroup { brace_token, items }))) + } + } else { + Err(lookahead.error()) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ItemStatic { + fn parse(input: ParseStream) -> Result<Self> { + Ok(ItemStatic { + attrs: input.call(Attribute::parse_outer)?, + vis: input.parse()?, + static_token: input.parse()?, + mutability: input.parse()?, + ident: input.parse()?, + colon_token: input.parse()?, + ty: input.parse()?, + eq_token: input.parse()?, + expr: input.parse()?, + semi_token: input.parse()?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ItemConst { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let const_token: Token![const] = input.parse()?; + + let lookahead = input.lookahead1(); + let ident = if lookahead.peek(Ident) || lookahead.peek(Token![_]) { + input.call(Ident::parse_any)? + } else { + return Err(lookahead.error()); + }; + + let colon_token: Token![:] = input.parse()?; + let ty: Type = input.parse()?; + let eq_token: Token![=] = input.parse()?; + let expr: Expr = input.parse()?; + let semi_token: Token![;] = input.parse()?; + + Ok(ItemConst { + attrs, + vis, + const_token, + ident, + generics: Generics::default(), + colon_token, + ty: Box::new(ty), + eq_token, + expr: Box::new(expr), + semi_token, + }) + } + } + + fn peek_signature(input: ParseStream, allow_safe: bool) -> bool { + let fork = input.fork(); + fork.parse::<Option<Token![const]>>().is_ok() + && fork.parse::<Option<Token![async]>>().is_ok() + && ((allow_safe + && token::parsing::peek_keyword(fork.cursor(), "safe") + && token::parsing::keyword(&fork, "safe").is_ok()) + || fork.parse::<Option<Token![unsafe]>>().is_ok()) + && fork.parse::<Option<Abi>>().is_ok() + && fork.peek(Token![fn]) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Signature { + fn parse(input: ParseStream) -> Result<Self> { + let allow_safe = false; + parse_signature(input, allow_safe).map(Option::unwrap) + } + } + + fn parse_signature(input: ParseStream, allow_safe: bool) -> Result<Option<Signature>> { + let constness: Option<Token![const]> = input.parse()?; + let asyncness: Option<Token![async]> = input.parse()?; + let unsafety: Option<Token![unsafe]> = input.parse()?; + let safe = allow_safe + && unsafety.is_none() + && token::parsing::peek_keyword(input.cursor(), "safe"); + if safe { + token::parsing::keyword(input, "safe")?; + } + let abi: Option<Abi> = input.parse()?; + let fn_token: Token![fn] = input.parse()?; + let ident: Ident = input.parse()?; + let mut generics: Generics = input.parse()?; + + let content; + let paren_token = parenthesized!(content in input); + let (inputs, variadic) = parse_fn_args(&content)?; + + let output: ReturnType = input.parse()?; + generics.where_clause = input.parse()?; + + Ok(if safe { + None + } else { + Some(Signature { + constness, + asyncness, + unsafety, + abi, + fn_token, + ident, + generics, + paren_token, + inputs, + variadic, + output, + }) + }) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ItemFn { + fn parse(input: ParseStream) -> Result<Self> { + let outer_attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let sig: Signature = input.parse()?; + parse_rest_of_fn(input, outer_attrs, vis, sig) + } + } + + fn parse_rest_of_fn( + input: ParseStream, + mut attrs: Vec<Attribute>, + vis: Visibility, + sig: Signature, + ) -> Result<ItemFn> { + let content; + let brace_token = braced!(content in input); + attr::parsing::parse_inner(&content, &mut attrs)?; + let stmts = content.call(Block::parse_within)?; + + Ok(ItemFn { + attrs, + vis, + sig, + block: Box::new(Block { brace_token, stmts }), + }) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for FnArg { + fn parse(input: ParseStream) -> Result<Self> { + let allow_variadic = false; + let attrs = input.call(Attribute::parse_outer)?; + match parse_fn_arg_or_variadic(input, attrs, allow_variadic)? { + FnArgOrVariadic::FnArg(arg) => Ok(arg), + FnArgOrVariadic::Variadic(_) => unreachable!(), + } + } + } + + enum FnArgOrVariadic { + FnArg(FnArg), + Variadic(Variadic), + } + + fn parse_fn_arg_or_variadic( + input: ParseStream, + attrs: Vec<Attribute>, + allow_variadic: bool, + ) -> Result<FnArgOrVariadic> { + let ahead = input.fork(); + if let Ok(mut receiver) = ahead.parse::<Receiver>() { + input.advance_to(&ahead); + receiver.attrs = attrs; + return Ok(FnArgOrVariadic::FnArg(FnArg::Receiver(receiver))); + } + + // Hack to parse pre-2018 syntax in + // test/ui/rfc-2565-param-attrs/param-attrs-pretty.rs + // because the rest of the test case is valuable. + if input.peek(Ident) && input.peek2(Token![<]) { + let span = input.span(); + return Ok(FnArgOrVariadic::FnArg(FnArg::Typed(PatType { + attrs, + pat: Box::new(Pat::Wild(PatWild { + attrs: Vec::new(), + underscore_token: Token![_](span), + })), + colon_token: Token![:](span), + ty: input.parse()?, + }))); + } + + let pat = Box::new(Pat::parse_single(input)?); + let colon_token: Token![:] = input.parse()?; + + if allow_variadic { + if let Some(dots) = input.parse::<Option<Token![...]>>()? { + return Ok(FnArgOrVariadic::Variadic(Variadic { + attrs, + pat: Some((pat, colon_token)), + dots, + comma: None, + })); + } + } + + Ok(FnArgOrVariadic::FnArg(FnArg::Typed(PatType { + attrs, + pat, + colon_token, + ty: input.parse()?, + }))) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Receiver { + fn parse(input: ParseStream) -> Result<Self> { + let reference = if input.peek(Token![&]) { + let ampersand: Token![&] = input.parse()?; + let lifetime: Option<Lifetime> = input.parse()?; + Some((ampersand, lifetime)) + } else { + None + }; + let mutability: Option<Token![mut]> = input.parse()?; + let self_token: Token![self] = input.parse()?; + let colon_token: Option<Token![:]> = if reference.is_some() { + None + } else { + input.parse()? + }; + let ty: Type = if colon_token.is_some() { + input.parse()? + } else { + let mut ty = Type::Path(TypePath { + qself: None, + path: Path::from(Ident::new("Self", self_token.span)), + }); + if let Some((ampersand, lifetime)) = reference.as_ref() { + ty = Type::Reference(TypeReference { + and_token: Token![&](ampersand.span), + lifetime: lifetime.clone(), + mutability: mutability.as_ref().map(|m| Token![mut](m.span)), + elem: Box::new(ty), + }); + } + ty + }; + Ok(Receiver { + attrs: Vec::new(), + reference, + mutability, + self_token, + colon_token, + ty: Box::new(ty), + }) + } + } + + fn parse_fn_args( + input: ParseStream, + ) -> Result<(Punctuated<FnArg, Token![,]>, Option<Variadic>)> { + let mut args = Punctuated::new(); + let mut variadic = None; + let mut has_receiver = false; + + while !input.is_empty() { + let attrs = input.call(Attribute::parse_outer)?; + + if let Some(dots) = input.parse::<Option<Token![...]>>()? { + variadic = Some(Variadic { + attrs, + pat: None, + dots, + comma: if input.is_empty() { + None + } else { + Some(input.parse()?) + }, + }); + break; + } + + let allow_variadic = true; + let arg = match parse_fn_arg_or_variadic(input, attrs, allow_variadic)? { + FnArgOrVariadic::FnArg(arg) => arg, + FnArgOrVariadic::Variadic(arg) => { + variadic = Some(Variadic { + comma: if input.is_empty() { + None + } else { + Some(input.parse()?) + }, + ..arg + }); + break; + } + }; + + match &arg { + FnArg::Receiver(receiver) if has_receiver => { + return Err(Error::new( + receiver.self_token.span, + "unexpected second method receiver", + )); + } + FnArg::Receiver(receiver) if !args.is_empty() => { + return Err(Error::new( + receiver.self_token.span, + "unexpected method receiver", + )); + } + FnArg::Receiver(_) => has_receiver = true, + FnArg::Typed(_) => {} + } + args.push_value(arg); + + if input.is_empty() { + break; + } + + let comma: Token![,] = input.parse()?; + args.push_punct(comma); + } + + Ok((args, variadic)) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ItemMod { + fn parse(input: ParseStream) -> Result<Self> { + let mut attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let unsafety: Option<Token![unsafe]> = input.parse()?; + let mod_token: Token![mod] = input.parse()?; + let ident: Ident = if input.peek(Token![try]) { + input.call(Ident::parse_any) + } else { + input.parse() + }?; + + let lookahead = input.lookahead1(); + if lookahead.peek(Token![;]) { + Ok(ItemMod { + attrs, + vis, + unsafety, + mod_token, + ident, + content: None, + semi: Some(input.parse()?), + }) + } else if lookahead.peek(token::Brace) { + let content; + let brace_token = braced!(content in input); + attr::parsing::parse_inner(&content, &mut attrs)?; + + let mut items = Vec::new(); + while !content.is_empty() { + items.push(content.parse()?); + } + + Ok(ItemMod { + attrs, + vis, + unsafety, + mod_token, + ident, + content: Some((brace_token, items)), + semi: None, + }) + } else { + Err(lookahead.error()) + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ItemForeignMod { + fn parse(input: ParseStream) -> Result<Self> { + let mut attrs = input.call(Attribute::parse_outer)?; + let unsafety: Option<Token![unsafe]> = input.parse()?; + let abi: Abi = input.parse()?; + + let content; + let brace_token = braced!(content in input); + attr::parsing::parse_inner(&content, &mut attrs)?; + let mut items = Vec::new(); + while !content.is_empty() { + items.push(content.parse()?); + } + + Ok(ItemForeignMod { + attrs, + unsafety, + abi, + brace_token, + items, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ForeignItem { + fn parse(input: ParseStream) -> Result<Self> { + let begin = input.fork(); + let mut attrs = input.call(Attribute::parse_outer)?; + let ahead = input.fork(); + let vis: Visibility = ahead.parse()?; + + let lookahead = ahead.lookahead1(); + let allow_safe = true; + let mut item = if lookahead.peek(Token![fn]) || peek_signature(&ahead, allow_safe) { + let vis: Visibility = input.parse()?; + let sig = parse_signature(input, allow_safe)?; + let has_safe = sig.is_none(); + let has_body = input.peek(token::Brace); + let semi_token: Option<Token![;]> = if has_body { + let content; + braced!(content in input); + content.call(Attribute::parse_inner)?; + content.call(Block::parse_within)?; + None + } else { + Some(input.parse()?) + }; + if has_safe || has_body { + Ok(ForeignItem::Verbatim(verbatim::between(&begin, input))) + } else { + Ok(ForeignItem::Fn(ForeignItemFn { + attrs: Vec::new(), + vis, + sig: sig.unwrap(), + semi_token: semi_token.unwrap(), + })) + } + } else if lookahead.peek(Token![static]) + || ((ahead.peek(Token![unsafe]) + || token::parsing::peek_keyword(ahead.cursor(), "safe")) + && ahead.peek2(Token![static])) + { + let vis = input.parse()?; + let unsafety: Option<Token![unsafe]> = input.parse()?; + let safe = + unsafety.is_none() && token::parsing::peek_keyword(input.cursor(), "safe"); + if safe { + token::parsing::keyword(input, "safe")?; + } + let static_token = input.parse()?; + let mutability = input.parse()?; + let ident = input.parse()?; + let colon_token = input.parse()?; + let ty = input.parse()?; + let has_value = input.peek(Token![=]); + if has_value { + input.parse::<Token![=]>()?; + input.parse::<Expr>()?; + } + let semi_token: Token![;] = input.parse()?; + if unsafety.is_some() || safe || has_value { + Ok(ForeignItem::Verbatim(verbatim::between(&begin, input))) + } else { + Ok(ForeignItem::Static(ForeignItemStatic { + attrs: Vec::new(), + vis, + static_token, + mutability, + ident, + colon_token, + ty, + semi_token, + })) + } + } else if lookahead.peek(Token![type]) { + parse_foreign_item_type(begin, input) + } else if vis.is_inherited() + && (lookahead.peek(Ident) + || lookahead.peek(Token![self]) + || lookahead.peek(Token![super]) + || lookahead.peek(Token![crate]) + || lookahead.peek(Token![::])) + { + input.parse().map(ForeignItem::Macro) + } else { + Err(lookahead.error()) + }?; + + let item_attrs = match &mut item { + ForeignItem::Fn(item) => &mut item.attrs, + ForeignItem::Static(item) => &mut item.attrs, + ForeignItem::Type(item) => &mut item.attrs, + ForeignItem::Macro(item) => &mut item.attrs, + ForeignItem::Verbatim(_) => return Ok(item), + }; + attrs.append(item_attrs); + *item_attrs = attrs; + + Ok(item) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ForeignItemFn { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let sig: Signature = input.parse()?; + let semi_token: Token![;] = input.parse()?; + Ok(ForeignItemFn { + attrs, + vis, + sig, + semi_token, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ForeignItemStatic { + fn parse(input: ParseStream) -> Result<Self> { + Ok(ForeignItemStatic { + attrs: input.call(Attribute::parse_outer)?, + vis: input.parse()?, + static_token: input.parse()?, + mutability: input.parse()?, + ident: input.parse()?, + colon_token: input.parse()?, + ty: input.parse()?, + semi_token: input.parse()?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ForeignItemType { + fn parse(input: ParseStream) -> Result<Self> { + Ok(ForeignItemType { + attrs: input.call(Attribute::parse_outer)?, + vis: input.parse()?, + type_token: input.parse()?, + ident: input.parse()?, + generics: { + let mut generics: Generics = input.parse()?; + generics.where_clause = input.parse()?; + generics + }, + semi_token: input.parse()?, + }) + } + } + + fn parse_foreign_item_type(begin: ParseBuffer, input: ParseStream) -> Result<ForeignItem> { + let FlexibleItemType { + vis, + defaultness: _, + type_token, + ident, + generics, + colon_token, + bounds: _, + ty, + semi_token, + } = FlexibleItemType::parse( + input, + TypeDefaultness::Disallowed, + WhereClauseLocation::Both, + )?; + + if colon_token.is_some() || ty.is_some() { + Ok(ForeignItem::Verbatim(verbatim::between(&begin, input))) + } else { + Ok(ForeignItem::Type(ForeignItemType { + attrs: Vec::new(), + vis, + type_token, + ident, + generics, + semi_token, + })) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ForeignItemMacro { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let mac: Macro = input.parse()?; + let semi_token: Option<Token![;]> = if mac.delimiter.is_brace() { + None + } else { + Some(input.parse()?) + }; + Ok(ForeignItemMacro { + attrs, + mac, + semi_token, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ItemType { + fn parse(input: ParseStream) -> Result<Self> { + Ok(ItemType { + attrs: input.call(Attribute::parse_outer)?, + vis: input.parse()?, + type_token: input.parse()?, + ident: input.parse()?, + generics: { + let mut generics: Generics = input.parse()?; + generics.where_clause = input.parse()?; + generics + }, + eq_token: input.parse()?, + ty: input.parse()?, + semi_token: input.parse()?, + }) + } + } + + fn parse_item_type(begin: ParseBuffer, input: ParseStream) -> Result<Item> { + let FlexibleItemType { + vis, + defaultness: _, + type_token, + ident, + generics, + colon_token, + bounds: _, + ty, + semi_token, + } = FlexibleItemType::parse( + input, + TypeDefaultness::Disallowed, + WhereClauseLocation::BeforeEq, + )?; + + let (eq_token, ty) = match ty { + Some(ty) if colon_token.is_none() => ty, + _ => return Ok(Item::Verbatim(verbatim::between(&begin, input))), + }; + + Ok(Item::Type(ItemType { + attrs: Vec::new(), + vis, + type_token, + ident, + generics, + eq_token, + ty: Box::new(ty), + semi_token, + })) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ItemStruct { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let vis = input.parse::<Visibility>()?; + let struct_token = input.parse::<Token![struct]>()?; + let ident = input.parse::<Ident>()?; + let generics = input.parse::<Generics>()?; + let (where_clause, fields, semi_token) = derive::parsing::data_struct(input)?; + Ok(ItemStruct { + attrs, + vis, + struct_token, + ident, + generics: Generics { + where_clause, + ..generics + }, + fields, + semi_token, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ItemEnum { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let vis = input.parse::<Visibility>()?; + let enum_token = input.parse::<Token![enum]>()?; + let ident = input.parse::<Ident>()?; + let generics = input.parse::<Generics>()?; + let (where_clause, brace_token, variants) = derive::parsing::data_enum(input)?; + Ok(ItemEnum { + attrs, + vis, + enum_token, + ident, + generics: Generics { + where_clause, + ..generics + }, + brace_token, + variants, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ItemUnion { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let vis = input.parse::<Visibility>()?; + let union_token = input.parse::<Token![union]>()?; + let ident = input.parse::<Ident>()?; + let generics = input.parse::<Generics>()?; + let (where_clause, fields) = derive::parsing::data_union(input)?; + Ok(ItemUnion { + attrs, + vis, + union_token, + ident, + generics: Generics { + where_clause, + ..generics + }, + fields, + }) + } + } + + fn parse_trait_or_trait_alias(input: ParseStream) -> Result<Item> { + let (attrs, vis, trait_token, ident, generics) = parse_start_of_trait_alias(input)?; + let lookahead = input.lookahead1(); + if lookahead.peek(token::Brace) + || lookahead.peek(Token![:]) + || lookahead.peek(Token![where]) + { + let unsafety = None; + let auto_token = None; + parse_rest_of_trait( + input, + attrs, + vis, + unsafety, + auto_token, + trait_token, + ident, + generics, + ) + .map(Item::Trait) + } else if lookahead.peek(Token![=]) { + parse_rest_of_trait_alias(input, attrs, vis, trait_token, ident, generics) + .map(Item::TraitAlias) + } else { + Err(lookahead.error()) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ItemTrait { + fn parse(input: ParseStream) -> Result<Self> { + let outer_attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let unsafety: Option<Token![unsafe]> = input.parse()?; + let auto_token: Option<Token![auto]> = input.parse()?; + let trait_token: Token![trait] = input.parse()?; + let ident: Ident = input.parse()?; + let generics: Generics = input.parse()?; + parse_rest_of_trait( + input, + outer_attrs, + vis, + unsafety, + auto_token, + trait_token, + ident, + generics, + ) + } + } + + fn parse_rest_of_trait( + input: ParseStream, + mut attrs: Vec<Attribute>, + vis: Visibility, + unsafety: Option<Token![unsafe]>, + auto_token: Option<Token![auto]>, + trait_token: Token![trait], + ident: Ident, + mut generics: Generics, + ) -> Result<ItemTrait> { + let colon_token: Option<Token![:]> = input.parse()?; + + let mut supertraits = Punctuated::new(); + if colon_token.is_some() { + loop { + if input.peek(Token![where]) || input.peek(token::Brace) { + break; + } + supertraits.push_value({ + let allow_precise_capture = false; + let allow_const = true; + TypeParamBound::parse_single(input, allow_precise_capture, allow_const)? + }); + if input.peek(Token![where]) || input.peek(token::Brace) { + break; + } + supertraits.push_punct(input.parse()?); + } + } + + generics.where_clause = input.parse()?; + + let content; + let brace_token = braced!(content in input); + attr::parsing::parse_inner(&content, &mut attrs)?; + let mut items = Vec::new(); + while !content.is_empty() { + items.push(content.parse()?); + } + + Ok(ItemTrait { + attrs, + vis, + unsafety, + auto_token, + restriction: None, + trait_token, + ident, + generics, + colon_token, + supertraits, + brace_token, + items, + }) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ItemTraitAlias { + fn parse(input: ParseStream) -> Result<Self> { + let (attrs, vis, trait_token, ident, generics) = parse_start_of_trait_alias(input)?; + parse_rest_of_trait_alias(input, attrs, vis, trait_token, ident, generics) + } + } + + fn parse_start_of_trait_alias( + input: ParseStream, + ) -> Result<(Vec<Attribute>, Visibility, Token![trait], Ident, Generics)> { + let attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let trait_token: Token![trait] = input.parse()?; + let ident: Ident = input.parse()?; + let generics: Generics = input.parse()?; + Ok((attrs, vis, trait_token, ident, generics)) + } + + fn parse_rest_of_trait_alias( + input: ParseStream, + attrs: Vec<Attribute>, + vis: Visibility, + trait_token: Token![trait], + ident: Ident, + mut generics: Generics, + ) -> Result<ItemTraitAlias> { + let eq_token: Token![=] = input.parse()?; + + let mut bounds = Punctuated::new(); + loop { + if input.peek(Token![where]) || input.peek(Token![;]) { + break; + } + bounds.push_value({ + let allow_precise_capture = false; + let allow_const = false; + TypeParamBound::parse_single(input, allow_precise_capture, allow_const)? + }); + if input.peek(Token![where]) || input.peek(Token![;]) { + break; + } + bounds.push_punct(input.parse()?); + } + + generics.where_clause = input.parse()?; + let semi_token: Token![;] = input.parse()?; + + Ok(ItemTraitAlias { + attrs, + vis, + trait_token, + ident, + generics, + eq_token, + bounds, + semi_token, + }) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TraitItem { + fn parse(input: ParseStream) -> Result<Self> { + let begin = input.fork(); + let mut attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let defaultness: Option<Token![default]> = input.parse()?; + let ahead = input.fork(); + + let lookahead = ahead.lookahead1(); + let allow_safe = false; + let mut item = if lookahead.peek(Token![fn]) || peek_signature(&ahead, allow_safe) { + input.parse().map(TraitItem::Fn) + } else if lookahead.peek(Token![const]) { + let const_token: Token![const] = ahead.parse()?; + let lookahead = ahead.lookahead1(); + if lookahead.peek(Ident) || lookahead.peek(Token![_]) { + input.advance_to(&ahead); + let ident = input.call(Ident::parse_any)?; + let mut generics: Generics = input.parse()?; + let colon_token: Token![:] = input.parse()?; + let ty: Type = input.parse()?; + let default = if let Some(eq_token) = input.parse::<Option<Token![=]>>()? { + let expr: Expr = input.parse()?; + Some((eq_token, expr)) + } else { + None + }; + generics.where_clause = input.parse()?; + let semi_token: Token![;] = input.parse()?; + if generics.lt_token.is_none() && generics.where_clause.is_none() { + Ok(TraitItem::Const(TraitItemConst { + attrs: Vec::new(), + const_token, + ident, + generics, + colon_token, + ty, + default, + semi_token, + })) + } else { + return Ok(TraitItem::Verbatim(verbatim::between(&begin, input))); + } + } else if lookahead.peek(Token![async]) + || lookahead.peek(Token![unsafe]) + || lookahead.peek(Token![extern]) + || lookahead.peek(Token![fn]) + { + input.parse().map(TraitItem::Fn) + } else { + Err(lookahead.error()) + } + } else if lookahead.peek(Token![type]) { + parse_trait_item_type(begin.fork(), input) + } else if vis.is_inherited() + && defaultness.is_none() + && (lookahead.peek(Ident) + || lookahead.peek(Token![self]) + || lookahead.peek(Token![super]) + || lookahead.peek(Token![crate]) + || lookahead.peek(Token![::])) + { + input.parse().map(TraitItem::Macro) + } else { + Err(lookahead.error()) + }?; + + match (vis, defaultness) { + (Visibility::Inherited, None) => {} + _ => return Ok(TraitItem::Verbatim(verbatim::between(&begin, input))), + } + + let item_attrs = match &mut item { + TraitItem::Const(item) => &mut item.attrs, + TraitItem::Fn(item) => &mut item.attrs, + TraitItem::Type(item) => &mut item.attrs, + TraitItem::Macro(item) => &mut item.attrs, + TraitItem::Verbatim(_) => unreachable!(), + }; + attrs.append(item_attrs); + *item_attrs = attrs; + Ok(item) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TraitItemConst { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let const_token: Token![const] = input.parse()?; + + let lookahead = input.lookahead1(); + let ident = if lookahead.peek(Ident) || lookahead.peek(Token![_]) { + input.call(Ident::parse_any)? + } else { + return Err(lookahead.error()); + }; + + let colon_token: Token![:] = input.parse()?; + let ty: Type = input.parse()?; + let default = if input.peek(Token![=]) { + let eq_token: Token![=] = input.parse()?; + let default: Expr = input.parse()?; + Some((eq_token, default)) + } else { + None + }; + let semi_token: Token![;] = input.parse()?; + + Ok(TraitItemConst { + attrs, + const_token, + ident, + generics: Generics::default(), + colon_token, + ty, + default, + semi_token, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TraitItemFn { + fn parse(input: ParseStream) -> Result<Self> { + let mut attrs = input.call(Attribute::parse_outer)?; + let sig: Signature = input.parse()?; + + let lookahead = input.lookahead1(); + let (brace_token, stmts, semi_token) = if lookahead.peek(token::Brace) { + let content; + let brace_token = braced!(content in input); + attr::parsing::parse_inner(&content, &mut attrs)?; + let stmts = content.call(Block::parse_within)?; + (Some(brace_token), stmts, None) + } else if lookahead.peek(Token![;]) { + let semi_token: Token![;] = input.parse()?; + (None, Vec::new(), Some(semi_token)) + } else { + return Err(lookahead.error()); + }; + + Ok(TraitItemFn { + attrs, + sig, + default: brace_token.map(|brace_token| Block { brace_token, stmts }), + semi_token, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TraitItemType { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let type_token: Token![type] = input.parse()?; + let ident: Ident = input.parse()?; + let mut generics: Generics = input.parse()?; + let (colon_token, bounds) = FlexibleItemType::parse_optional_bounds(input)?; + let default = FlexibleItemType::parse_optional_definition(input)?; + generics.where_clause = input.parse()?; + let semi_token: Token![;] = input.parse()?; + Ok(TraitItemType { + attrs, + type_token, + ident, + generics, + colon_token, + bounds, + default, + semi_token, + }) + } + } + + fn parse_trait_item_type(begin: ParseBuffer, input: ParseStream) -> Result<TraitItem> { + let FlexibleItemType { + vis, + defaultness: _, + type_token, + ident, + generics, + colon_token, + bounds, + ty, + semi_token, + } = FlexibleItemType::parse( + input, + TypeDefaultness::Disallowed, + WhereClauseLocation::AfterEq, + )?; + + if vis.is_some() { + Ok(TraitItem::Verbatim(verbatim::between(&begin, input))) + } else { + Ok(TraitItem::Type(TraitItemType { + attrs: Vec::new(), + type_token, + ident, + generics, + colon_token, + bounds, + default: ty, + semi_token, + })) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TraitItemMacro { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let mac: Macro = input.parse()?; + let semi_token: Option<Token![;]> = if mac.delimiter.is_brace() { + None + } else { + Some(input.parse()?) + }; + Ok(TraitItemMacro { + attrs, + mac, + semi_token, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ItemImpl { + fn parse(input: ParseStream) -> Result<Self> { + let allow_verbatim_impl = false; + parse_impl(input, allow_verbatim_impl).map(Option::unwrap) + } + } + + fn parse_impl(input: ParseStream, allow_verbatim_impl: bool) -> Result<Option<ItemImpl>> { + let mut attrs = input.call(Attribute::parse_outer)?; + let has_visibility = allow_verbatim_impl && input.parse::<Visibility>()?.is_some(); + let defaultness: Option<Token![default]> = input.parse()?; + let unsafety: Option<Token![unsafe]> = input.parse()?; + let impl_token: Token![impl] = input.parse()?; + + let has_generics = generics::parsing::choose_generics_over_qpath(input); + let mut generics: Generics = if has_generics { + input.parse()? + } else { + Generics::default() + }; + + let is_const_impl = allow_verbatim_impl + && (input.peek(Token![const]) || input.peek(Token![?]) && input.peek2(Token![const])); + if is_const_impl { + input.parse::<Option<Token![?]>>()?; + input.parse::<Token![const]>()?; + } + + let polarity = if input.peek(Token![!]) && !input.peek2(token::Brace) { + Some(input.parse::<Token![!]>()?) + } else { + None + }; + + #[cfg(not(feature = "printing"))] + let first_ty_span = input.span(); + let mut first_ty: Type = input.parse()?; + let self_ty: Type; + let trait_; + + let is_impl_for = input.peek(Token![for]); + if is_impl_for { + let for_token: Token![for] = input.parse()?; + let mut first_ty_ref = &first_ty; + while let Type::Group(ty) = first_ty_ref { + first_ty_ref = &ty.elem; + } + if let Type::Path(TypePath { qself: None, .. }) = first_ty_ref { + while let Type::Group(ty) = first_ty { + first_ty = *ty.elem; + } + if let Type::Path(TypePath { qself: None, path }) = first_ty { + trait_ = Some((polarity, path, for_token)); + } else { + unreachable!(); + } + } else if !allow_verbatim_impl { + #[cfg(feature = "printing")] + return Err(Error::new_spanned(first_ty_ref, "expected trait path")); + #[cfg(not(feature = "printing"))] + return Err(Error::new(first_ty_span, "expected trait path")); + } else { + trait_ = None; + } + self_ty = input.parse()?; + } else if let Some(polarity) = polarity { + return Err(Error::new( + polarity.span, + "inherent impls cannot be negative", + )); + } else { + trait_ = None; + self_ty = first_ty; + } + + generics.where_clause = input.parse()?; + + let content; + let brace_token = braced!(content in input); + attr::parsing::parse_inner(&content, &mut attrs)?; + + let mut items = Vec::new(); + while !content.is_empty() { + items.push(content.parse()?); + } + + if has_visibility || is_const_impl || is_impl_for && trait_.is_none() { + Ok(None) + } else { + Ok(Some(ItemImpl { + attrs, + defaultness, + unsafety, + impl_token, + generics, + trait_, + self_ty: Box::new(self_ty), + brace_token, + items, + })) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ImplItem { + fn parse(input: ParseStream) -> Result<Self> { + let begin = input.fork(); + let mut attrs = input.call(Attribute::parse_outer)?; + let ahead = input.fork(); + let vis: Visibility = ahead.parse()?; + + let mut lookahead = ahead.lookahead1(); + let defaultness = if lookahead.peek(Token![default]) && !ahead.peek2(Token![!]) { + let defaultness: Token![default] = ahead.parse()?; + lookahead = ahead.lookahead1(); + Some(defaultness) + } else { + None + }; + + let allow_safe = false; + let mut item = if lookahead.peek(Token![fn]) || peek_signature(&ahead, allow_safe) { + let allow_omitted_body = true; + if let Some(item) = parse_impl_item_fn(input, allow_omitted_body)? { + Ok(ImplItem::Fn(item)) + } else { + Ok(ImplItem::Verbatim(verbatim::between(&begin, input))) + } + } else if lookahead.peek(Token![const]) { + input.advance_to(&ahead); + let const_token: Token![const] = input.parse()?; + let lookahead = input.lookahead1(); + let ident = if lookahead.peek(Ident) || lookahead.peek(Token![_]) { + input.call(Ident::parse_any)? + } else { + return Err(lookahead.error()); + }; + let mut generics: Generics = input.parse()?; + let colon_token: Token![:] = input.parse()?; + let ty: Type = input.parse()?; + let value = if let Some(eq_token) = input.parse::<Option<Token![=]>>()? { + let expr: Expr = input.parse()?; + Some((eq_token, expr)) + } else { + None + }; + generics.where_clause = input.parse()?; + let semi_token: Token![;] = input.parse()?; + return match value { + Some((eq_token, expr)) + if generics.lt_token.is_none() && generics.where_clause.is_none() => + { + Ok(ImplItem::Const(ImplItemConst { + attrs, + vis, + defaultness, + const_token, + ident, + generics, + colon_token, + ty, + eq_token, + expr, + semi_token, + })) + } + _ => Ok(ImplItem::Verbatim(verbatim::between(&begin, input))), + }; + } else if lookahead.peek(Token![type]) { + parse_impl_item_type(begin, input) + } else if vis.is_inherited() + && defaultness.is_none() + && (lookahead.peek(Ident) + || lookahead.peek(Token![self]) + || lookahead.peek(Token![super]) + || lookahead.peek(Token![crate]) + || lookahead.peek(Token![::])) + { + input.parse().map(ImplItem::Macro) + } else { + Err(lookahead.error()) + }?; + + { + let item_attrs = match &mut item { + ImplItem::Const(item) => &mut item.attrs, + ImplItem::Fn(item) => &mut item.attrs, + ImplItem::Type(item) => &mut item.attrs, + ImplItem::Macro(item) => &mut item.attrs, + ImplItem::Verbatim(_) => return Ok(item), + }; + attrs.append(item_attrs); + *item_attrs = attrs; + } + + Ok(item) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ImplItemConst { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let defaultness: Option<Token![default]> = input.parse()?; + let const_token: Token![const] = input.parse()?; + + let lookahead = input.lookahead1(); + let ident = if lookahead.peek(Ident) || lookahead.peek(Token![_]) { + input.call(Ident::parse_any)? + } else { + return Err(lookahead.error()); + }; + + let colon_token: Token![:] = input.parse()?; + let ty: Type = input.parse()?; + let eq_token: Token![=] = input.parse()?; + let expr: Expr = input.parse()?; + let semi_token: Token![;] = input.parse()?; + + Ok(ImplItemConst { + attrs, + vis, + defaultness, + const_token, + ident, + generics: Generics::default(), + colon_token, + ty, + eq_token, + expr, + semi_token, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ImplItemFn { + fn parse(input: ParseStream) -> Result<Self> { + let allow_omitted_body = false; + parse_impl_item_fn(input, allow_omitted_body).map(Option::unwrap) + } + } + + fn parse_impl_item_fn( + input: ParseStream, + allow_omitted_body: bool, + ) -> Result<Option<ImplItemFn>> { + let mut attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let defaultness: Option<Token![default]> = input.parse()?; + let sig: Signature = input.parse()?; + + // Accept functions without a body in an impl block because rustc's + // *parser* does not reject them (the compilation error is emitted later + // than parsing) and it can be useful for macro DSLs. + if allow_omitted_body && input.parse::<Option<Token![;]>>()?.is_some() { + return Ok(None); + } + + let content; + let brace_token = braced!(content in input); + attrs.extend(content.call(Attribute::parse_inner)?); + let block = Block { + brace_token, + stmts: content.call(Block::parse_within)?, + }; + + Ok(Some(ImplItemFn { + attrs, + vis, + defaultness, + sig, + block, + })) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ImplItemType { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + let defaultness: Option<Token![default]> = input.parse()?; + let type_token: Token![type] = input.parse()?; + let ident: Ident = input.parse()?; + let mut generics: Generics = input.parse()?; + let eq_token: Token![=] = input.parse()?; + let ty: Type = input.parse()?; + generics.where_clause = input.parse()?; + let semi_token: Token![;] = input.parse()?; + Ok(ImplItemType { + attrs, + vis, + defaultness, + type_token, + ident, + generics, + eq_token, + ty, + semi_token, + }) + } + } + + fn parse_impl_item_type(begin: ParseBuffer, input: ParseStream) -> Result<ImplItem> { + let FlexibleItemType { + vis, + defaultness, + type_token, + ident, + generics, + colon_token, + bounds: _, + ty, + semi_token, + } = FlexibleItemType::parse( + input, + TypeDefaultness::Optional, + WhereClauseLocation::AfterEq, + )?; + + let (eq_token, ty) = match ty { + Some(ty) if colon_token.is_none() => ty, + _ => return Ok(ImplItem::Verbatim(verbatim::between(&begin, input))), + }; + + Ok(ImplItem::Type(ImplItemType { + attrs: Vec::new(), + vis, + defaultness, + type_token, + ident, + generics, + eq_token, + ty, + semi_token, + })) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ImplItemMacro { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let mac: Macro = input.parse()?; + let semi_token: Option<Token![;]> = if mac.delimiter.is_brace() { + None + } else { + Some(input.parse()?) + }; + Ok(ImplItemMacro { + attrs, + mac, + semi_token, + }) + } + } + + impl Visibility { + fn is_inherited(&self) -> bool { + match self { + Visibility::Inherited => true, + _ => false, + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for StaticMutability { + fn parse(input: ParseStream) -> Result<Self> { + let mut_token: Option<Token![mut]> = input.parse()?; + Ok(mut_token.map_or(StaticMutability::None, StaticMutability::Mut)) + } + } +} + +#[cfg(feature = "printing")] +mod printing { + use crate::attr::FilterAttrs; + use crate::data::Fields; + use crate::item::{ + ForeignItemFn, ForeignItemMacro, ForeignItemStatic, ForeignItemType, ImplItemConst, + ImplItemFn, ImplItemMacro, ImplItemType, ItemConst, ItemEnum, ItemExternCrate, ItemFn, + ItemForeignMod, ItemImpl, ItemMacro, ItemMod, ItemStatic, ItemStruct, ItemTrait, + ItemTraitAlias, ItemType, ItemUnion, ItemUse, Receiver, Signature, StaticMutability, + TraitItemConst, TraitItemFn, TraitItemMacro, TraitItemType, UseGlob, UseGroup, UseName, + UsePath, UseRename, Variadic, + }; + use crate::mac::MacroDelimiter; + use crate::path; + use crate::path::printing::PathStyle; + use crate::print::TokensOrDefault; + use crate::ty::Type; + use proc_macro2::TokenStream; + use quote::{ToTokens, TokenStreamExt as _}; + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ItemExternCrate { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.extern_token.to_tokens(tokens); + self.crate_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + if let Some((as_token, rename)) = &self.rename { + as_token.to_tokens(tokens); + rename.to_tokens(tokens); + } + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ItemUse { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.use_token.to_tokens(tokens); + self.leading_colon.to_tokens(tokens); + self.tree.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ItemStatic { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.static_token.to_tokens(tokens); + self.mutability.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.colon_token.to_tokens(tokens); + self.ty.to_tokens(tokens); + self.eq_token.to_tokens(tokens); + self.expr.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ItemConst { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.const_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.colon_token.to_tokens(tokens); + self.ty.to_tokens(tokens); + self.eq_token.to_tokens(tokens); + self.expr.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ItemFn { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.sig.to_tokens(tokens); + self.block.brace_token.surround(tokens, |tokens| { + tokens.append_all(self.attrs.inner()); + tokens.append_all(&self.block.stmts); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ItemMod { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.unsafety.to_tokens(tokens); + self.mod_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + if let Some((brace, items)) = &self.content { + brace.surround(tokens, |tokens| { + tokens.append_all(self.attrs.inner()); + tokens.append_all(items); + }); + } else { + TokensOrDefault(&self.semi).to_tokens(tokens); + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ItemForeignMod { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.unsafety.to_tokens(tokens); + self.abi.to_tokens(tokens); + self.brace_token.surround(tokens, |tokens| { + tokens.append_all(self.attrs.inner()); + tokens.append_all(&self.items); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ItemType { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.type_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.generics.to_tokens(tokens); + self.generics.where_clause.to_tokens(tokens); + self.eq_token.to_tokens(tokens); + self.ty.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ItemEnum { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.enum_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.generics.to_tokens(tokens); + self.generics.where_clause.to_tokens(tokens); + self.brace_token.surround(tokens, |tokens| { + self.variants.to_tokens(tokens); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ItemStruct { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.struct_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.generics.to_tokens(tokens); + match &self.fields { + Fields::Named(fields) => { + self.generics.where_clause.to_tokens(tokens); + fields.to_tokens(tokens); + } + Fields::Unnamed(fields) => { + fields.to_tokens(tokens); + self.generics.where_clause.to_tokens(tokens); + TokensOrDefault(&self.semi_token).to_tokens(tokens); + } + Fields::Unit => { + self.generics.where_clause.to_tokens(tokens); + TokensOrDefault(&self.semi_token).to_tokens(tokens); + } + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ItemUnion { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.union_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.generics.to_tokens(tokens); + self.generics.where_clause.to_tokens(tokens); + self.fields.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ItemTrait { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.unsafety.to_tokens(tokens); + self.auto_token.to_tokens(tokens); + self.trait_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.generics.to_tokens(tokens); + if !self.supertraits.is_empty() { + TokensOrDefault(&self.colon_token).to_tokens(tokens); + self.supertraits.to_tokens(tokens); + } + self.generics.where_clause.to_tokens(tokens); + self.brace_token.surround(tokens, |tokens| { + tokens.append_all(self.attrs.inner()); + tokens.append_all(&self.items); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ItemTraitAlias { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.trait_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.generics.to_tokens(tokens); + self.eq_token.to_tokens(tokens); + self.bounds.to_tokens(tokens); + self.generics.where_clause.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ItemImpl { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.defaultness.to_tokens(tokens); + self.unsafety.to_tokens(tokens); + self.impl_token.to_tokens(tokens); + self.generics.to_tokens(tokens); + if let Some((polarity, path, for_token)) = &self.trait_ { + polarity.to_tokens(tokens); + path.to_tokens(tokens); + for_token.to_tokens(tokens); + } + self.self_ty.to_tokens(tokens); + self.generics.where_clause.to_tokens(tokens); + self.brace_token.surround(tokens, |tokens| { + tokens.append_all(self.attrs.inner()); + tokens.append_all(&self.items); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ItemMacro { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + path::printing::print_path(tokens, &self.mac.path, PathStyle::Mod); + self.mac.bang_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + match &self.mac.delimiter { + MacroDelimiter::Paren(paren) => { + paren.surround(tokens, |tokens| self.mac.tokens.to_tokens(tokens)); + } + MacroDelimiter::Brace(brace) => { + brace.surround(tokens, |tokens| self.mac.tokens.to_tokens(tokens)); + } + MacroDelimiter::Bracket(bracket) => { + bracket.surround(tokens, |tokens| self.mac.tokens.to_tokens(tokens)); + } + } + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for UsePath { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.ident.to_tokens(tokens); + self.colon2_token.to_tokens(tokens); + self.tree.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for UseName { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.ident.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for UseRename { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.ident.to_tokens(tokens); + self.as_token.to_tokens(tokens); + self.rename.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for UseGlob { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.star_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for UseGroup { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.brace_token.surround(tokens, |tokens| { + self.items.to_tokens(tokens); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TraitItemConst { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.const_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.colon_token.to_tokens(tokens); + self.ty.to_tokens(tokens); + if let Some((eq_token, default)) = &self.default { + eq_token.to_tokens(tokens); + default.to_tokens(tokens); + } + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TraitItemFn { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.sig.to_tokens(tokens); + match &self.default { + Some(block) => { + block.brace_token.surround(tokens, |tokens| { + tokens.append_all(self.attrs.inner()); + tokens.append_all(&block.stmts); + }); + } + None => { + TokensOrDefault(&self.semi_token).to_tokens(tokens); + } + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TraitItemType { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.type_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.generics.to_tokens(tokens); + if !self.bounds.is_empty() { + TokensOrDefault(&self.colon_token).to_tokens(tokens); + self.bounds.to_tokens(tokens); + } + if let Some((eq_token, default)) = &self.default { + eq_token.to_tokens(tokens); + default.to_tokens(tokens); + } + self.generics.where_clause.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TraitItemMacro { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.mac.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ImplItemConst { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.defaultness.to_tokens(tokens); + self.const_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.colon_token.to_tokens(tokens); + self.ty.to_tokens(tokens); + self.eq_token.to_tokens(tokens); + self.expr.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ImplItemFn { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.defaultness.to_tokens(tokens); + self.sig.to_tokens(tokens); + self.block.brace_token.surround(tokens, |tokens| { + tokens.append_all(self.attrs.inner()); + tokens.append_all(&self.block.stmts); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ImplItemType { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.defaultness.to_tokens(tokens); + self.type_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.generics.to_tokens(tokens); + self.eq_token.to_tokens(tokens); + self.ty.to_tokens(tokens); + self.generics.where_clause.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ImplItemMacro { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.mac.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ForeignItemFn { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.sig.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ForeignItemStatic { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.static_token.to_tokens(tokens); + self.mutability.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.colon_token.to_tokens(tokens); + self.ty.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ForeignItemType { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.vis.to_tokens(tokens); + self.type_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.generics.to_tokens(tokens); + self.generics.where_clause.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ForeignItemMacro { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.mac.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Signature { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.constness.to_tokens(tokens); + self.asyncness.to_tokens(tokens); + self.unsafety.to_tokens(tokens); + self.abi.to_tokens(tokens); + self.fn_token.to_tokens(tokens); + self.ident.to_tokens(tokens); + self.generics.to_tokens(tokens); + self.paren_token.surround(tokens, |tokens| { + self.inputs.to_tokens(tokens); + if let Some(variadic) = &self.variadic { + if !self.inputs.empty_or_trailing() { + <Token![,]>::default().to_tokens(tokens); + } + variadic.to_tokens(tokens); + } + }); + self.output.to_tokens(tokens); + self.generics.where_clause.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Receiver { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + if let Some((ampersand, lifetime)) = &self.reference { + ampersand.to_tokens(tokens); + lifetime.to_tokens(tokens); + } + self.mutability.to_tokens(tokens); + self.self_token.to_tokens(tokens); + if let Some(colon_token) = &self.colon_token { + colon_token.to_tokens(tokens); + self.ty.to_tokens(tokens); + } else { + let consistent = match (&self.reference, &self.mutability, &*self.ty) { + (Some(_), mutability, Type::Reference(ty)) => { + mutability.is_some() == ty.mutability.is_some() + && match &*ty.elem { + Type::Path(ty) => ty.qself.is_none() && ty.path.is_ident("Self"), + _ => false, + } + } + (None, _, Type::Path(ty)) => ty.qself.is_none() && ty.path.is_ident("Self"), + _ => false, + }; + if !consistent { + <Token![:]>::default().to_tokens(tokens); + self.ty.to_tokens(tokens); + } + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Variadic { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + if let Some((pat, colon)) = &self.pat { + pat.to_tokens(tokens); + colon.to_tokens(tokens); + } + self.dots.to_tokens(tokens); + self.comma.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for StaticMutability { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + StaticMutability::None => {} + StaticMutability::Mut(mut_token) => mut_token.to_tokens(tokens), + } + } + } +} diff --git a/vendor/syn/src/lib.rs b/vendor/syn/src/lib.rs new file mode 100644 index 00000000000000..cd7090515ac4f1 --- /dev/null +++ b/vendor/syn/src/lib.rs @@ -0,0 +1,1009 @@ +//! [![github]](https://github.com/dtolnay/syn) [![crates-io]](https://crates.io/crates/syn) [![docs-rs]](crate) +//! +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust +//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs +//! +//! <br> +//! +//! Syn is a parsing library for parsing a stream of Rust tokens into a syntax +//! tree of Rust source code. +//! +//! Currently this library is geared toward use in Rust procedural macros, but +//! contains some APIs that may be useful more generally. +//! +//! - **Data structures** — Syn provides a complete syntax tree that can +//! represent any valid Rust source code. The syntax tree is rooted at +//! [`syn::File`] which represents a full source file, but there are other +//! entry points that may be useful to procedural macros including +//! [`syn::Item`], [`syn::Expr`] and [`syn::Type`]. +//! +//! - **Derives** — Of particular interest to derive macros is +//! [`syn::DeriveInput`] which is any of the three legal input items to a +//! derive macro. An example below shows using this type in a library that can +//! derive implementations of a user-defined trait. +//! +//! - **Parsing** — Parsing in Syn is built around [parser functions] with the +//! signature `fn(ParseStream) -> Result<T>`. Every syntax tree node defined +//! by Syn is individually parsable and may be used as a building block for +//! custom syntaxes, or you may dream up your own brand new syntax without +//! involving any of our syntax tree types. +//! +//! - **Location information** — Every token parsed by Syn is associated with a +//! `Span` that tracks line and column information back to the source of that +//! token. These spans allow a procedural macro to display detailed error +//! messages pointing to all the right places in the user's code. There is an +//! example of this below. +//! +//! - **Feature flags** — Functionality is aggressively feature gated so your +//! procedural macros enable only what they need, and do not pay in compile +//! time for all the rest. +//! +//! [`syn::File`]: File +//! [`syn::Item`]: Item +//! [`syn::Expr`]: Expr +//! [`syn::Type`]: Type +//! [`syn::DeriveInput`]: DeriveInput +//! [parser functions]: mod@parse +//! +//! <br> +//! +//! # Example of a derive macro +//! +//! The canonical derive macro using Syn looks like this. We write an ordinary +//! Rust function tagged with a `proc_macro_derive` attribute and the name of +//! the trait we are deriving. Any time that derive appears in the user's code, +//! the Rust compiler passes their data structure as tokens into our macro. We +//! get to execute arbitrary Rust code to figure out what to do with those +//! tokens, then hand some tokens back to the compiler to compile into the +//! user's crate. +//! +//! [`TokenStream`]: proc_macro::TokenStream +//! +//! ```toml +//! [dependencies] +//! syn = "2.0" +//! quote = "1.0" +//! +//! [lib] +//! proc-macro = true +//! ``` +//! +//! ``` +//! # extern crate proc_macro; +//! # +//! use proc_macro::TokenStream; +//! use quote::quote; +//! use syn::{parse_macro_input, DeriveInput}; +//! +//! # const IGNORE_TOKENS: &str = stringify! { +//! #[proc_macro_derive(MyMacro)] +//! # }; +//! pub fn my_macro(input: TokenStream) -> TokenStream { +//! // Parse the input tokens into a syntax tree +//! let input = parse_macro_input!(input as DeriveInput); +//! +//! // Build the output, possibly using quasi-quotation +//! let expanded = quote! { +//! // ... +//! }; +//! +//! // Hand the output tokens back to the compiler +//! TokenStream::from(expanded) +//! } +//! ``` +//! +//! The [`heapsize`] example directory shows a complete working implementation +//! of a derive macro. The example derives a `HeapSize` trait which computes an +//! estimate of the amount of heap memory owned by a value. +//! +//! [`heapsize`]: https://github.com/dtolnay/syn/tree/master/examples/heapsize +//! +//! ``` +//! pub trait HeapSize { +//! /// Total number of bytes of heap memory owned by `self`. +//! fn heap_size_of_children(&self) -> usize; +//! } +//! ``` +//! +//! The derive macro allows users to write `#[derive(HeapSize)]` on data +//! structures in their program. +//! +//! ``` +//! # const IGNORE_TOKENS: &str = stringify! { +//! #[derive(HeapSize)] +//! # }; +//! struct Demo<'a, T: ?Sized> { +//! a: Box<T>, +//! b: u8, +//! c: &'a str, +//! d: String, +//! } +//! ``` +//! +//! <p><br></p> +//! +//! # Spans and error reporting +//! +//! The token-based procedural macro API provides great control over where the +//! compiler's error messages are displayed in user code. Consider the error the +//! user sees if one of their field types does not implement `HeapSize`. +//! +//! ``` +//! # const IGNORE_TOKENS: &str = stringify! { +//! #[derive(HeapSize)] +//! # }; +//! struct Broken { +//! ok: String, +//! bad: std::thread::Thread, +//! } +//! ``` +//! +//! By tracking span information all the way through the expansion of a +//! procedural macro as shown in the `heapsize` example, token-based macros in +//! Syn are able to trigger errors that directly pinpoint the source of the +//! problem. +//! +//! ```text +//! error[E0277]: the trait bound `std::thread::Thread: HeapSize` is not satisfied +//! --> src/main.rs:7:5 +//! | +//! 7 | bad: std::thread::Thread, +//! | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `HeapSize` is not implemented for `Thread` +//! ``` +//! +//! <br> +//! +//! # Parsing a custom syntax +//! +//! The [`lazy-static`] example directory shows the implementation of a +//! `functionlike!(...)` procedural macro in which the input tokens are parsed +//! using Syn's parsing API. +//! +//! [`lazy-static`]: https://github.com/dtolnay/syn/tree/master/examples/lazy-static +//! +//! The example reimplements the popular `lazy_static` crate from crates.io as a +//! procedural macro. +//! +//! ``` +//! # macro_rules! lazy_static { +//! # ($($tt:tt)*) => {} +//! # } +//! # +//! lazy_static! { +//! static ref USERNAME: Regex = Regex::new("^[a-z0-9_-]{3,16}$").unwrap(); +//! } +//! ``` +//! +//! The implementation shows how to trigger custom warnings and error messages +//! on the macro input. +//! +//! ```text +//! warning: come on, pick a more creative name +//! --> src/main.rs:10:16 +//! | +//! 10 | static ref FOO: String = "lazy_static".to_owned(); +//! | ^^^ +//! ``` +//! +//! <br> +//! +//! # Testing +//! +//! When testing macros, we often care not just that the macro can be used +//! successfully but also that when the macro is provided with invalid input it +//! produces maximally helpful error messages. Consider using the [`trybuild`] +//! crate to write tests for errors that are emitted by your macro or errors +//! detected by the Rust compiler in the expanded code following misuse of the +//! macro. Such tests help avoid regressions from later refactors that +//! mistakenly make an error no longer trigger or be less helpful than it used +//! to be. +//! +//! [`trybuild`]: https://github.com/dtolnay/trybuild +//! +//! <br> +//! +//! # Debugging +//! +//! When developing a procedural macro it can be helpful to look at what the +//! generated code looks like. Use `cargo rustc -- -Zunstable-options +//! --pretty=expanded` or the [`cargo expand`] subcommand. +//! +//! [`cargo expand`]: https://github.com/dtolnay/cargo-expand +//! +//! To show the expanded code for some crate that uses your procedural macro, +//! run `cargo expand` from that crate. To show the expanded code for one of +//! your own test cases, run `cargo expand --test the_test_case` where the last +//! argument is the name of the test file without the `.rs` extension. +//! +//! This write-up by Brandon W Maister discusses debugging in more detail: +//! [Debugging Rust's new Custom Derive system][debugging]. +//! +//! [debugging]: https://quodlibetor.github.io/posts/debugging-rusts-new-custom-derive-system/ +//! +//! <br> +//! +//! # Optional features +//! +//! Syn puts a lot of functionality behind optional features in order to +//! optimize compile time for the most common use cases. The following features +//! are available. +//! +//! - **`derive`** *(enabled by default)* — Data structures for representing the +//! possible input to a derive macro, including structs and enums and types. +//! - **`full`** — Data structures for representing the syntax tree of all valid +//! Rust source code, including items and expressions. +//! - **`parsing`** *(enabled by default)* — Ability to parse input tokens into +//! a syntax tree node of a chosen type. +//! - **`printing`** *(enabled by default)* — Ability to print a syntax tree +//! node as tokens of Rust source code. +//! - **`visit`** — Trait for traversing a syntax tree. +//! - **`visit-mut`** — Trait for traversing and mutating in place a syntax +//! tree. +//! - **`fold`** — Trait for transforming an owned syntax tree. +//! - **`clone-impls`** *(enabled by default)* — Clone impls for all syntax tree +//! types. +//! - **`extra-traits`** — Debug, Eq, PartialEq, Hash impls for all syntax tree +//! types. +//! - **`proc-macro`** *(enabled by default)* — Runtime dependency on the +//! dynamic library libproc_macro from rustc toolchain. + +// Syn types in rustdoc of other crates get linked to here. +#![doc(html_root_url = "https://docs.rs/syn/2.0.110")] +#![cfg_attr(docsrs, feature(doc_cfg), doc(auto_cfg = false))] +#![deny(unsafe_op_in_unsafe_fn)] +#![allow(non_camel_case_types)] +#![cfg_attr(not(check_cfg), allow(unexpected_cfgs))] +#![allow( + clippy::bool_to_int_with_if, + clippy::cast_lossless, + clippy::cast_possible_truncation, + clippy::cast_possible_wrap, + clippy::cast_ptr_alignment, + clippy::default_trait_access, + clippy::derivable_impls, + clippy::diverging_sub_expression, + clippy::doc_markdown, + clippy::elidable_lifetime_names, + clippy::enum_glob_use, + clippy::expl_impl_clone_on_copy, + clippy::explicit_auto_deref, + clippy::fn_params_excessive_bools, + clippy::if_not_else, + clippy::inherent_to_string, + clippy::into_iter_without_iter, + clippy::items_after_statements, + clippy::large_enum_variant, + clippy::let_underscore_untyped, // https://github.com/rust-lang/rust-clippy/issues/10410 + clippy::manual_assert, + clippy::manual_let_else, + clippy::manual_map, + clippy::match_like_matches_macro, + clippy::match_same_arms, + clippy::match_wildcard_for_single_variants, // clippy bug: https://github.com/rust-lang/rust-clippy/issues/6984 + clippy::missing_errors_doc, + clippy::missing_panics_doc, + clippy::module_name_repetitions, + clippy::must_use_candidate, + clippy::needless_doctest_main, + clippy::needless_lifetimes, + clippy::needless_pass_by_value, + clippy::needless_update, + clippy::never_loop, + clippy::range_plus_one, + clippy::redundant_else, + clippy::ref_option, + clippy::return_self_not_must_use, + clippy::similar_names, + clippy::single_match_else, + clippy::struct_excessive_bools, + clippy::too_many_arguments, + clippy::too_many_lines, + clippy::trivially_copy_pass_by_ref, + clippy::unconditional_recursion, // https://github.com/rust-lang/rust-clippy/issues/12133 + clippy::uninhabited_references, + clippy::uninlined_format_args, + clippy::unnecessary_box_returns, + clippy::unnecessary_unwrap, + clippy::used_underscore_binding, + clippy::wildcard_imports, +)] +#![allow(unknown_lints, mismatched_lifetime_syntaxes)] + +extern crate self as syn; + +#[cfg(feature = "proc-macro")] +extern crate proc_macro; + +#[macro_use] +mod macros; + +#[cfg(feature = "parsing")] +#[macro_use] +mod group; + +#[macro_use] +pub mod token; + +#[cfg(any(feature = "full", feature = "derive"))] +mod attr; +#[cfg(any(feature = "full", feature = "derive"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] +pub use crate::attr::{AttrStyle, Attribute, Meta, MetaList, MetaNameValue}; + +mod bigint; + +#[cfg(feature = "parsing")] +#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] +pub mod buffer; + +#[cfg(any( + all(feature = "parsing", feature = "full"), + all(feature = "printing", any(feature = "full", feature = "derive")), +))] +mod classify; + +mod custom_keyword; + +mod custom_punctuation; + +#[cfg(any(feature = "full", feature = "derive"))] +mod data; +#[cfg(any(feature = "full", feature = "derive"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] +pub use crate::data::{Field, Fields, FieldsNamed, FieldsUnnamed, Variant}; + +#[cfg(any(feature = "full", feature = "derive"))] +mod derive; +#[cfg(feature = "derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub use crate::derive::{Data, DataEnum, DataStruct, DataUnion, DeriveInput}; + +mod drops; + +mod error; +pub use crate::error::{Error, Result}; + +#[cfg(any(feature = "full", feature = "derive"))] +mod expr; +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub use crate::expr::{Arm, Label, PointerMutability, RangeLimits}; +#[cfg(any(feature = "full", feature = "derive"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] +pub use crate::expr::{ + Expr, ExprBinary, ExprCall, ExprCast, ExprField, ExprIndex, ExprLit, ExprMacro, ExprMethodCall, + ExprParen, ExprPath, ExprReference, ExprStruct, ExprUnary, FieldValue, Index, Member, +}; +#[cfg(any(feature = "full", feature = "derive"))] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub use crate::expr::{ + ExprArray, ExprAssign, ExprAsync, ExprAwait, ExprBlock, ExprBreak, ExprClosure, ExprConst, + ExprContinue, ExprForLoop, ExprGroup, ExprIf, ExprInfer, ExprLet, ExprLoop, ExprMatch, + ExprRange, ExprRawAddr, ExprRepeat, ExprReturn, ExprTry, ExprTryBlock, ExprTuple, ExprUnsafe, + ExprWhile, ExprYield, +}; + +pub mod ext; + +#[cfg(feature = "full")] +mod file; +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub use crate::file::File; + +#[cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))] +mod fixup; + +#[cfg(any(feature = "full", feature = "derive"))] +mod generics; +#[cfg(any(feature = "full", feature = "derive"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] +pub use crate::generics::{ + BoundLifetimes, ConstParam, GenericParam, Generics, LifetimeParam, PredicateLifetime, + PredicateType, TraitBound, TraitBoundModifier, TypeParam, TypeParamBound, WhereClause, + WherePredicate, +}; +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub use crate::generics::{CapturedParam, PreciseCapture}; +#[cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))] +#[cfg_attr( + docsrs, + doc(cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))) +)] +pub use crate::generics::{ImplGenerics, Turbofish, TypeGenerics}; + +mod ident; +#[doc(inline)] +pub use crate::ident::Ident; + +#[cfg(feature = "full")] +mod item; +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub use crate::item::{ + FnArg, ForeignItem, ForeignItemFn, ForeignItemMacro, ForeignItemStatic, ForeignItemType, + ImplItem, ImplItemConst, ImplItemFn, ImplItemMacro, ImplItemType, ImplRestriction, Item, + ItemConst, ItemEnum, ItemExternCrate, ItemFn, ItemForeignMod, ItemImpl, ItemMacro, ItemMod, + ItemStatic, ItemStruct, ItemTrait, ItemTraitAlias, ItemType, ItemUnion, ItemUse, Receiver, + Signature, StaticMutability, TraitItem, TraitItemConst, TraitItemFn, TraitItemMacro, + TraitItemType, UseGlob, UseGroup, UseName, UsePath, UseRename, UseTree, Variadic, +}; + +mod lifetime; +#[doc(inline)] +pub use crate::lifetime::Lifetime; + +mod lit; +#[doc(hidden)] // https://github.com/dtolnay/syn/issues/1566 +pub use crate::lit::StrStyle; +#[doc(inline)] +pub use crate::lit::{ + Lit, LitBool, LitByte, LitByteStr, LitCStr, LitChar, LitFloat, LitInt, LitStr, +}; + +#[cfg(feature = "parsing")] +mod lookahead; + +#[cfg(any(feature = "full", feature = "derive"))] +mod mac; +#[cfg(any(feature = "full", feature = "derive"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] +pub use crate::mac::{Macro, MacroDelimiter}; + +#[cfg(all(feature = "parsing", any(feature = "full", feature = "derive")))] +#[cfg_attr( + docsrs, + doc(cfg(all(feature = "parsing", any(feature = "full", feature = "derive")))) +)] +pub mod meta; + +#[cfg(any(feature = "full", feature = "derive"))] +mod op; +#[cfg(any(feature = "full", feature = "derive"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] +pub use crate::op::{BinOp, UnOp}; + +#[cfg(feature = "parsing")] +#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] +pub mod parse; + +#[cfg(all(feature = "parsing", feature = "proc-macro"))] +mod parse_macro_input; + +#[cfg(all(feature = "parsing", feature = "printing"))] +mod parse_quote; + +#[cfg(feature = "full")] +mod pat; +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub use crate::pat::{ + FieldPat, Pat, PatConst, PatIdent, PatLit, PatMacro, PatOr, PatParen, PatPath, PatRange, + PatReference, PatRest, PatSlice, PatStruct, PatTuple, PatTupleStruct, PatType, PatWild, +}; + +#[cfg(any(feature = "full", feature = "derive"))] +mod path; +#[cfg(any(feature = "full", feature = "derive"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] +pub use crate::path::{ + AngleBracketedGenericArguments, AssocConst, AssocType, Constraint, GenericArgument, + ParenthesizedGenericArguments, Path, PathArguments, PathSegment, QSelf, +}; + +#[cfg(all( + any(feature = "full", feature = "derive"), + any(feature = "parsing", feature = "printing") +))] +mod precedence; + +#[cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))] +mod print; + +pub mod punctuated; + +#[cfg(any(feature = "full", feature = "derive"))] +mod restriction; +#[cfg(any(feature = "full", feature = "derive"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] +pub use crate::restriction::{FieldMutability, VisRestricted, Visibility}; + +mod sealed; + +#[cfg(all(feature = "parsing", feature = "derive", not(feature = "full")))] +mod scan_expr; + +mod span; + +#[cfg(all(feature = "parsing", feature = "printing"))] +#[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "printing"))))] +pub mod spanned; + +#[cfg(feature = "full")] +mod stmt; +#[cfg(feature = "full")] +#[cfg_attr(docsrs, doc(cfg(feature = "full")))] +pub use crate::stmt::{Block, Local, LocalInit, Stmt, StmtMacro}; + +mod thread; + +#[cfg(all(any(feature = "full", feature = "derive"), feature = "extra-traits"))] +mod tt; + +#[cfg(any(feature = "full", feature = "derive"))] +mod ty; +#[cfg(any(feature = "full", feature = "derive"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] +pub use crate::ty::{ + Abi, BareFnArg, BareVariadic, ReturnType, Type, TypeArray, TypeBareFn, TypeGroup, + TypeImplTrait, TypeInfer, TypeMacro, TypeNever, TypeParen, TypePath, TypePtr, TypeReference, + TypeSlice, TypeTraitObject, TypeTuple, +}; + +#[cfg(all(any(feature = "full", feature = "derive"), feature = "parsing"))] +mod verbatim; + +#[cfg(all(feature = "parsing", feature = "full"))] +mod whitespace; + +#[rustfmt::skip] // https://github.com/rust-lang/rustfmt/issues/6176 +mod gen { + /// Syntax tree traversal to transform the nodes of an owned syntax tree. + /// + /// Each method of the [`Fold`] trait is a hook that can be overridden to + /// customize the behavior when transforming the corresponding type of node. + /// By default, every method recursively visits the substructure of the + /// input by invoking the right visitor method of each of its fields. + /// + /// [`Fold`]: fold::Fold + /// + /// ``` + /// # use syn::{Attribute, BinOp, Expr, ExprBinary}; + /// # + /// pub trait Fold { + /// /* ... */ + /// + /// fn fold_expr_binary(&mut self, node: ExprBinary) -> ExprBinary { + /// fold_expr_binary(self, node) + /// } + /// + /// /* ... */ + /// # fn fold_attribute(&mut self, node: Attribute) -> Attribute; + /// # fn fold_expr(&mut self, node: Expr) -> Expr; + /// # fn fold_bin_op(&mut self, node: BinOp) -> BinOp; + /// } + /// + /// pub fn fold_expr_binary<V>(v: &mut V, node: ExprBinary) -> ExprBinary + /// where + /// V: Fold + ?Sized, + /// { + /// ExprBinary { + /// attrs: node + /// .attrs + /// .into_iter() + /// .map(|attr| v.fold_attribute(attr)) + /// .collect(), + /// left: Box::new(v.fold_expr(*node.left)), + /// op: v.fold_bin_op(node.op), + /// right: Box::new(v.fold_expr(*node.right)), + /// } + /// } + /// + /// /* ... */ + /// ``` + /// + /// <br> + /// + /// # Example + /// + /// This fold inserts parentheses to fully parenthesizes any expression. + /// + /// ``` + /// // [dependencies] + /// // quote = "1.0" + /// // syn = { version = "2.0", features = ["fold", "full"] } + /// + /// use quote::quote; + /// use syn::fold::{fold_expr, Fold}; + /// use syn::{token, Expr, ExprParen}; + /// + /// struct ParenthesizeEveryExpr; + /// + /// impl Fold for ParenthesizeEveryExpr { + /// fn fold_expr(&mut self, expr: Expr) -> Expr { + /// Expr::Paren(ExprParen { + /// attrs: Vec::new(), + /// expr: Box::new(fold_expr(self, expr)), + /// paren_token: token::Paren::default(), + /// }) + /// } + /// } + /// + /// fn main() { + /// let code = quote! { a() + b(1) * c.d }; + /// let expr: Expr = syn::parse2(code).unwrap(); + /// let parenthesized = ParenthesizeEveryExpr.fold_expr(expr); + /// println!("{}", quote!(#parenthesized)); + /// + /// // Output: (((a)()) + (((b)((1))) * ((c).d))) + /// } + /// ``` + #[cfg(feature = "fold")] + #[cfg_attr(docsrs, doc(cfg(feature = "fold")))] + #[rustfmt::skip] + pub mod fold; + + /// Syntax tree traversal to walk a shared borrow of a syntax tree. + /// + /// Each method of the [`Visit`] trait is a hook that can be overridden to + /// customize the behavior when visiting the corresponding type of node. By + /// default, every method recursively visits the substructure of the input + /// by invoking the right visitor method of each of its fields. + /// + /// [`Visit`]: visit::Visit + /// + /// ``` + /// # use syn::{Attribute, BinOp, Expr, ExprBinary}; + /// # + /// pub trait Visit<'ast> { + /// /* ... */ + /// + /// fn visit_expr_binary(&mut self, node: &'ast ExprBinary) { + /// visit_expr_binary(self, node); + /// } + /// + /// /* ... */ + /// # fn visit_attribute(&mut self, node: &'ast Attribute); + /// # fn visit_expr(&mut self, node: &'ast Expr); + /// # fn visit_bin_op(&mut self, node: &'ast BinOp); + /// } + /// + /// pub fn visit_expr_binary<'ast, V>(v: &mut V, node: &'ast ExprBinary) + /// where + /// V: Visit<'ast> + ?Sized, + /// { + /// for attr in &node.attrs { + /// v.visit_attribute(attr); + /// } + /// v.visit_expr(&*node.left); + /// v.visit_bin_op(&node.op); + /// v.visit_expr(&*node.right); + /// } + /// + /// /* ... */ + /// ``` + /// + /// <br> + /// + /// # Example + /// + /// This visitor will print the name of every freestanding function in the + /// syntax tree, including nested functions. + /// + /// ``` + /// // [dependencies] + /// // quote = "1.0" + /// // syn = { version = "2.0", features = ["full", "visit"] } + /// + /// use quote::quote; + /// use syn::visit::{self, Visit}; + /// use syn::{File, ItemFn}; + /// + /// struct FnVisitor; + /// + /// impl<'ast> Visit<'ast> for FnVisitor { + /// fn visit_item_fn(&mut self, node: &'ast ItemFn) { + /// println!("Function with name={}", node.sig.ident); + /// + /// // Delegate to the default impl to visit any nested functions. + /// visit::visit_item_fn(self, node); + /// } + /// } + /// + /// fn main() { + /// let code = quote! { + /// pub fn f() { + /// fn g() {} + /// } + /// }; + /// + /// let syntax_tree: File = syn::parse2(code).unwrap(); + /// FnVisitor.visit_file(&syntax_tree); + /// } + /// ``` + /// + /// The `'ast` lifetime on the input references means that the syntax tree + /// outlives the complete recursive visit call, so the visitor is allowed to + /// hold on to references into the syntax tree. + /// + /// ``` + /// use quote::quote; + /// use syn::visit::{self, Visit}; + /// use syn::{File, ItemFn}; + /// + /// struct FnVisitor<'ast> { + /// functions: Vec<&'ast ItemFn>, + /// } + /// + /// impl<'ast> Visit<'ast> for FnVisitor<'ast> { + /// fn visit_item_fn(&mut self, node: &'ast ItemFn) { + /// self.functions.push(node); + /// visit::visit_item_fn(self, node); + /// } + /// } + /// + /// fn main() { + /// let code = quote! { + /// pub fn f() { + /// fn g() {} + /// } + /// }; + /// + /// let syntax_tree: File = syn::parse2(code).unwrap(); + /// let mut visitor = FnVisitor { functions: Vec::new() }; + /// visitor.visit_file(&syntax_tree); + /// for f in visitor.functions { + /// println!("Function with name={}", f.sig.ident); + /// } + /// } + /// ``` + #[cfg(feature = "visit")] + #[cfg_attr(docsrs, doc(cfg(feature = "visit")))] + #[rustfmt::skip] + pub mod visit; + + /// Syntax tree traversal to mutate an exclusive borrow of a syntax tree in + /// place. + /// + /// Each method of the [`VisitMut`] trait is a hook that can be overridden + /// to customize the behavior when mutating the corresponding type of node. + /// By default, every method recursively visits the substructure of the + /// input by invoking the right visitor method of each of its fields. + /// + /// [`VisitMut`]: visit_mut::VisitMut + /// + /// ``` + /// # use syn::{Attribute, BinOp, Expr, ExprBinary}; + /// # + /// pub trait VisitMut { + /// /* ... */ + /// + /// fn visit_expr_binary_mut(&mut self, node: &mut ExprBinary) { + /// visit_expr_binary_mut(self, node); + /// } + /// + /// /* ... */ + /// # fn visit_attribute_mut(&mut self, node: &mut Attribute); + /// # fn visit_expr_mut(&mut self, node: &mut Expr); + /// # fn visit_bin_op_mut(&mut self, node: &mut BinOp); + /// } + /// + /// pub fn visit_expr_binary_mut<V>(v: &mut V, node: &mut ExprBinary) + /// where + /// V: VisitMut + ?Sized, + /// { + /// for attr in &mut node.attrs { + /// v.visit_attribute_mut(attr); + /// } + /// v.visit_expr_mut(&mut *node.left); + /// v.visit_bin_op_mut(&mut node.op); + /// v.visit_expr_mut(&mut *node.right); + /// } + /// + /// /* ... */ + /// ``` + /// + /// <br> + /// + /// # Example + /// + /// This mut visitor replace occurrences of u256 suffixed integer literals + /// like `999u256` with a macro invocation `bigint::u256!(999)`. + /// + /// ``` + /// // [dependencies] + /// // quote = "1.0" + /// // syn = { version = "2.0", features = ["full", "visit-mut"] } + /// + /// use quote::quote; + /// use syn::visit_mut::{self, VisitMut}; + /// use syn::{parse_quote, Expr, File, Lit, LitInt}; + /// + /// struct BigintReplace; + /// + /// impl VisitMut for BigintReplace { + /// fn visit_expr_mut(&mut self, node: &mut Expr) { + /// if let Expr::Lit(expr) = &node { + /// if let Lit::Int(int) = &expr.lit { + /// if int.suffix() == "u256" { + /// let digits = int.base10_digits(); + /// let unsuffixed: LitInt = syn::parse_str(digits).unwrap(); + /// *node = parse_quote!(bigint::u256!(#unsuffixed)); + /// return; + /// } + /// } + /// } + /// + /// // Delegate to the default impl to visit nested expressions. + /// visit_mut::visit_expr_mut(self, node); + /// } + /// } + /// + /// fn main() { + /// let code = quote! { + /// fn main() { + /// let _ = 999u256; + /// } + /// }; + /// + /// let mut syntax_tree: File = syn::parse2(code).unwrap(); + /// BigintReplace.visit_file_mut(&mut syntax_tree); + /// println!("{}", quote!(#syntax_tree)); + /// } + /// ``` + #[cfg(feature = "visit-mut")] + #[cfg_attr(docsrs, doc(cfg(feature = "visit-mut")))] + #[rustfmt::skip] + pub mod visit_mut; + + #[cfg(feature = "clone-impls")] + #[rustfmt::skip] + mod clone; + + #[cfg(feature = "extra-traits")] + #[rustfmt::skip] + mod debug; + + #[cfg(feature = "extra-traits")] + #[rustfmt::skip] + mod eq; + + #[cfg(feature = "extra-traits")] + #[rustfmt::skip] + mod hash; +} + +#[cfg(feature = "fold")] +#[cfg_attr(docsrs, doc(cfg(feature = "fold")))] +pub use crate::gen::fold; + +#[cfg(feature = "visit")] +#[cfg_attr(docsrs, doc(cfg(feature = "visit")))] +pub use crate::gen::visit; + +#[cfg(feature = "visit-mut")] +#[cfg_attr(docsrs, doc(cfg(feature = "visit-mut")))] +pub use crate::gen::visit_mut; + +// Not public API. +#[doc(hidden)] +#[path = "export.rs"] +pub mod __private; + +/// Parse tokens of source code into the chosen syntax tree node. +/// +/// This is preferred over parsing a string because tokens are able to preserve +/// information about where in the user's code they were originally written (the +/// "span" of the token), possibly allowing the compiler to produce better error +/// messages. +/// +/// This function parses a `proc_macro::TokenStream` which is the type used for +/// interop with the compiler in a procedural macro. To parse a +/// `proc_macro2::TokenStream`, use [`syn::parse2`] instead. +/// +/// [`syn::parse2`]: parse2 +/// +/// This function enforces that the input is fully parsed. If there are any +/// unparsed tokens at the end of the stream, an error is returned. +#[cfg(all(feature = "parsing", feature = "proc-macro"))] +#[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "proc-macro"))))] +pub fn parse<T: parse::Parse>(tokens: proc_macro::TokenStream) -> Result<T> { + parse::Parser::parse(T::parse, tokens) +} + +/// Parse a proc-macro2 token stream into the chosen syntax tree node. +/// +/// This function parses a `proc_macro2::TokenStream` which is commonly useful +/// when the input comes from a node of the Syn syntax tree, for example the +/// body tokens of a [`Macro`] node. When in a procedural macro parsing the +/// `proc_macro::TokenStream` provided by the compiler, use [`syn::parse`] +/// instead. +/// +/// [`syn::parse`]: parse() +/// +/// This function enforces that the input is fully parsed. If there are any +/// unparsed tokens at the end of the stream, an error is returned. +#[cfg(feature = "parsing")] +#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] +pub fn parse2<T: parse::Parse>(tokens: proc_macro2::TokenStream) -> Result<T> { + parse::Parser::parse2(T::parse, tokens) +} + +/// Parse a string of Rust code into the chosen syntax tree node. +/// +/// This function enforces that the input is fully parsed. If there are any +/// unparsed tokens at the end of the stream, an error is returned. +/// +/// # Hygiene +/// +/// Every span in the resulting syntax tree will be set to resolve at the macro +/// call site. +/// +/// # Examples +/// +/// ``` +/// use syn::{Expr, Result}; +/// +/// fn run() -> Result<()> { +/// let code = "assert_eq!(u8::max_value(), 255)"; +/// let expr = syn::parse_str::<Expr>(code)?; +/// println!("{:#?}", expr); +/// Ok(()) +/// } +/// # +/// # run().unwrap(); +/// ``` +#[cfg(feature = "parsing")] +#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] +pub fn parse_str<T: parse::Parse>(s: &str) -> Result<T> { + parse::Parser::parse_str(T::parse, s) +} + +/// Parse the content of a file of Rust code. +/// +/// This is different from `syn::parse_str::<File>(content)` in two ways: +/// +/// - It discards a leading byte order mark `\u{FEFF}` if the file has one. +/// - It preserves the shebang line of the file, such as `#!/usr/bin/env rustx`. +/// +/// If present, either of these would be an error using `from_str`. +/// +/// # Examples +/// +/// ```no_run +/// use std::error::Error; +/// use std::fs; +/// use std::io::Read; +/// +/// fn run() -> Result<(), Box<dyn Error>> { +/// let content = fs::read_to_string("path/to/code.rs")?; +/// let ast = syn::parse_file(&content)?; +/// if let Some(shebang) = ast.shebang { +/// println!("{}", shebang); +/// } +/// println!("{} items", ast.items.len()); +/// +/// Ok(()) +/// } +/// # +/// # run().unwrap(); +/// ``` +#[cfg(all(feature = "parsing", feature = "full"))] +#[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "full"))))] +pub fn parse_file(mut content: &str) -> Result<File> { + // Strip the BOM if it is present + const BOM: &str = "\u{feff}"; + if content.starts_with(BOM) { + content = &content[BOM.len()..]; + } + + let mut shebang = None; + if content.starts_with("#!") { + let rest = whitespace::skip(&content[2..]); + if !rest.starts_with('[') { + if let Some(idx) = content.find('\n') { + shebang = Some(content[..idx].to_string()); + content = &content[idx..]; + } else { + shebang = Some(content.to_string()); + content = ""; + } + } + } + + let mut file: File = parse_str(content)?; + file.shebang = shebang; + Ok(file) +} diff --git a/vendor/syn/src/lifetime.rs b/vendor/syn/src/lifetime.rs new file mode 100644 index 00000000000000..248af5aaad54f8 --- /dev/null +++ b/vendor/syn/src/lifetime.rs @@ -0,0 +1,155 @@ +#[cfg(feature = "parsing")] +use crate::lookahead; +use proc_macro2::{Ident, Span}; +use std::cmp::Ordering; +use std::fmt::{self, Display}; +use std::hash::{Hash, Hasher}; + +/// A Rust lifetime: `'a`. +/// +/// Lifetime names must conform to the following rules: +/// +/// - Must start with an apostrophe. +/// - Must not consist of just an apostrophe: `'`. +/// - Character after the apostrophe must be `_` or a Unicode code point with +/// the XID_Start property. +/// - All following characters must be Unicode code points with the XID_Continue +/// property. +pub struct Lifetime { + pub apostrophe: Span, + pub ident: Ident, +} + +impl Lifetime { + /// # Panics + /// + /// Panics if the lifetime does not conform to the bulleted rules above. + /// + /// # Invocation + /// + /// ``` + /// # use proc_macro2::Span; + /// # use syn::Lifetime; + /// # + /// # fn f() -> Lifetime { + /// Lifetime::new("'a", Span::call_site()) + /// # } + /// ``` + pub fn new(symbol: &str, span: Span) -> Self { + if !symbol.starts_with('\'') { + panic!( + "lifetime name must start with apostrophe as in \"'a\", got {:?}", + symbol + ); + } + + if symbol == "'" { + panic!("lifetime name must not be empty"); + } + + if !crate::ident::xid_ok(&symbol[1..]) { + panic!("{:?} is not a valid lifetime name", symbol); + } + + Lifetime { + apostrophe: span, + ident: Ident::new(&symbol[1..], span), + } + } + + pub fn span(&self) -> Span { + self.apostrophe + .join(self.ident.span()) + .unwrap_or(self.apostrophe) + } + + pub fn set_span(&mut self, span: Span) { + self.apostrophe = span; + self.ident.set_span(span); + } +} + +impl Display for Lifetime { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + "'".fmt(formatter)?; + self.ident.fmt(formatter) + } +} + +impl Clone for Lifetime { + fn clone(&self) -> Self { + Lifetime { + apostrophe: self.apostrophe, + ident: self.ident.clone(), + } + } +} + +impl PartialEq for Lifetime { + fn eq(&self, other: &Lifetime) -> bool { + self.ident.eq(&other.ident) + } +} + +impl Eq for Lifetime {} + +impl PartialOrd for Lifetime { + fn partial_cmp(&self, other: &Lifetime) -> Option<Ordering> { + Some(self.cmp(other)) + } +} + +impl Ord for Lifetime { + fn cmp(&self, other: &Lifetime) -> Ordering { + self.ident.cmp(&other.ident) + } +} + +impl Hash for Lifetime { + fn hash<H: Hasher>(&self, h: &mut H) { + self.ident.hash(h); + } +} + +#[cfg(feature = "parsing")] +pub_if_not_doc! { + #[doc(hidden)] + #[allow(non_snake_case)] + pub fn Lifetime(marker: lookahead::TokenMarker) -> Lifetime { + match marker {} + } +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::error::Result; + use crate::lifetime::Lifetime; + use crate::parse::{Parse, ParseStream}; + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Lifetime { + fn parse(input: ParseStream) -> Result<Self> { + input.step(|cursor| { + cursor + .lifetime() + .ok_or_else(|| cursor.error("expected lifetime")) + }) + } + } +} + +#[cfg(feature = "printing")] +mod printing { + use crate::ext::PunctExt as _; + use crate::lifetime::Lifetime; + use proc_macro2::{Punct, Spacing, TokenStream}; + use quote::{ToTokens, TokenStreamExt as _}; + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Lifetime { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Punct::new_spanned('\'', Spacing::Joint, self.apostrophe)); + self.ident.to_tokens(tokens); + } + } +} diff --git a/vendor/syn/src/lit.rs b/vendor/syn/src/lit.rs new file mode 100644 index 00000000000000..369c3a12d1e982 --- /dev/null +++ b/vendor/syn/src/lit.rs @@ -0,0 +1,1918 @@ +#[cfg(feature = "parsing")] +use crate::ext::TokenStreamExt as _; +#[cfg(feature = "parsing")] +use crate::lookahead; +#[cfg(feature = "parsing")] +use crate::parse::{Parse, Parser}; +use crate::{Error, Result}; +use proc_macro2::{Ident, Literal, Span}; +#[cfg(feature = "parsing")] +use proc_macro2::{TokenStream, TokenTree}; +use std::ffi::{CStr, CString}; +use std::fmt::{self, Display}; +#[cfg(feature = "extra-traits")] +use std::hash::{Hash, Hasher}; +use std::str::{self, FromStr}; + +ast_enum_of_structs! { + /// A Rust literal such as a string or integer or boolean. + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums + #[non_exhaustive] + pub enum Lit { + /// A UTF-8 string literal: `"foo"`. + Str(LitStr), + + /// A byte string literal: `b"foo"`. + ByteStr(LitByteStr), + + /// A nul-terminated C-string literal: `c"foo"`. + CStr(LitCStr), + + /// A byte literal: `b'f'`. + Byte(LitByte), + + /// A character literal: `'a'`. + Char(LitChar), + + /// An integer literal: `1` or `1u16`. + Int(LitInt), + + /// A floating point literal: `1f64` or `1.0e10f64`. + /// + /// Must be finite. May not be infinite or NaN. + Float(LitFloat), + + /// A boolean literal: `true` or `false`. + Bool(LitBool), + + /// A raw token literal not interpreted by Syn. + Verbatim(Literal), + } +} + +ast_struct! { + /// A UTF-8 string literal: `"foo"`. + pub struct LitStr { + repr: Box<LitRepr>, + } +} + +ast_struct! { + /// A byte string literal: `b"foo"`. + pub struct LitByteStr { + repr: Box<LitRepr>, + } +} + +ast_struct! { + /// A nul-terminated C-string literal: `c"foo"`. + pub struct LitCStr { + repr: Box<LitRepr>, + } +} + +ast_struct! { + /// A byte literal: `b'f'`. + pub struct LitByte { + repr: Box<LitRepr>, + } +} + +ast_struct! { + /// A character literal: `'a'`. + pub struct LitChar { + repr: Box<LitRepr>, + } +} + +struct LitRepr { + token: Literal, + suffix: Box<str>, +} + +ast_struct! { + /// An integer literal: `1` or `1u16`. + pub struct LitInt { + repr: Box<LitIntRepr>, + } +} + +struct LitIntRepr { + token: Literal, + digits: Box<str>, + suffix: Box<str>, +} + +ast_struct! { + /// A floating point literal: `1f64` or `1.0e10f64`. + /// + /// Must be finite. May not be infinite or NaN. + pub struct LitFloat { + repr: Box<LitFloatRepr>, + } +} + +struct LitFloatRepr { + token: Literal, + digits: Box<str>, + suffix: Box<str>, +} + +ast_struct! { + /// A boolean literal: `true` or `false`. + pub struct LitBool { + pub value: bool, + pub span: Span, + } +} + +impl LitStr { + pub fn new(value: &str, span: Span) -> Self { + let mut token = Literal::string(value); + token.set_span(span); + LitStr { + repr: Box::new(LitRepr { + token, + suffix: Box::<str>::default(), + }), + } + } + + pub fn value(&self) -> String { + let repr = self.repr.token.to_string(); + let (value, _suffix) = value::parse_lit_str(&repr).unwrap(); + String::from(value) + } + + /// Parse a syntax tree node from the content of this string literal. + /// + /// All spans in the syntax tree will point to the span of this `LitStr`. + /// + /// # Example + /// + /// ``` + /// use syn::{Attribute, Error, Expr, Lit, Meta, Path, Result}; + /// + /// // Parses the path from an attribute that looks like: + /// // + /// // #[path = "a::b::c"] + /// // + /// // or returns `None` if the input is some other attribute. + /// fn get_path(attr: &Attribute) -> Result<Option<Path>> { + /// if !attr.path().is_ident("path") { + /// return Ok(None); + /// } + /// + /// if let Meta::NameValue(meta) = &attr.meta { + /// if let Expr::Lit(expr) = &meta.value { + /// if let Lit::Str(lit_str) = &expr.lit { + /// return lit_str.parse().map(Some); + /// } + /// } + /// } + /// + /// let message = "expected #[path = \"...\"]"; + /// Err(Error::new_spanned(attr, message)) + /// } + /// ``` + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse<T: Parse>(&self) -> Result<T> { + self.parse_with(T::parse) + } + + /// Invoke parser on the content of this string literal. + /// + /// All spans in the syntax tree will point to the span of this `LitStr`. + /// + /// # Example + /// + /// ``` + /// # use proc_macro2::Span; + /// # use syn::{LitStr, Result}; + /// # + /// # fn main() -> Result<()> { + /// # let lit_str = LitStr::new("a::b::c", Span::call_site()); + /// # + /// # const IGNORE: &str = stringify! { + /// let lit_str: LitStr = /* ... */; + /// # }; + /// + /// // Parse a string literal like "a::b::c" into a Path, not allowing + /// // generic arguments on any of the path segments. + /// let basic_path = lit_str.parse_with(syn::Path::parse_mod_style)?; + /// # + /// # Ok(()) + /// # } + /// ``` + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_with<F: Parser>(&self, parser: F) -> Result<F::Output> { + use proc_macro2::Group; + + // Token stream with every span replaced by the given one. + fn respan_token_stream(stream: TokenStream, span: Span) -> TokenStream { + let mut tokens = TokenStream::new(); + for token in stream { + tokens.append(respan_token_tree(token, span)); + } + tokens + } + + // Token tree with every span replaced by the given one. + fn respan_token_tree(mut token: TokenTree, span: Span) -> TokenTree { + match &mut token { + TokenTree::Group(g) => { + let stream = respan_token_stream(g.stream(), span); + *g = Group::new(g.delimiter(), stream); + g.set_span(span); + } + other => other.set_span(span), + } + token + } + + // Parse string literal into a token stream with every span equal to the + // original literal's span. + let span = self.span(); + let mut tokens = TokenStream::from_str(&self.value())?; + tokens = respan_token_stream(tokens, span); + + let result = crate::parse::parse_scoped(parser, span, tokens)?; + + let suffix = self.suffix(); + if !suffix.is_empty() { + return Err(Error::new( + self.span(), + format!("unexpected suffix `{}` on string literal", suffix), + )); + } + + Ok(result) + } + + pub fn span(&self) -> Span { + self.repr.token.span() + } + + pub fn set_span(&mut self, span: Span) { + self.repr.token.set_span(span); + } + + pub fn suffix(&self) -> &str { + &self.repr.suffix + } + + pub fn token(&self) -> Literal { + self.repr.token.clone() + } +} + +impl LitByteStr { + pub fn new(value: &[u8], span: Span) -> Self { + let mut token = Literal::byte_string(value); + token.set_span(span); + LitByteStr { + repr: Box::new(LitRepr { + token, + suffix: Box::<str>::default(), + }), + } + } + + pub fn value(&self) -> Vec<u8> { + let repr = self.repr.token.to_string(); + let (value, _suffix) = value::parse_lit_byte_str(&repr).unwrap(); + value + } + + pub fn span(&self) -> Span { + self.repr.token.span() + } + + pub fn set_span(&mut self, span: Span) { + self.repr.token.set_span(span); + } + + pub fn suffix(&self) -> &str { + &self.repr.suffix + } + + pub fn token(&self) -> Literal { + self.repr.token.clone() + } +} + +impl LitCStr { + pub fn new(value: &CStr, span: Span) -> Self { + let mut token = Literal::c_string(value); + token.set_span(span); + LitCStr { + repr: Box::new(LitRepr { + token, + suffix: Box::<str>::default(), + }), + } + } + + pub fn value(&self) -> CString { + let repr = self.repr.token.to_string(); + let (value, _suffix) = value::parse_lit_c_str(&repr).unwrap(); + value + } + + pub fn span(&self) -> Span { + self.repr.token.span() + } + + pub fn set_span(&mut self, span: Span) { + self.repr.token.set_span(span); + } + + pub fn suffix(&self) -> &str { + &self.repr.suffix + } + + pub fn token(&self) -> Literal { + self.repr.token.clone() + } +} + +impl LitByte { + pub fn new(value: u8, span: Span) -> Self { + let mut token = Literal::u8_suffixed(value); + token.set_span(span); + LitByte { + repr: Box::new(LitRepr { + token, + suffix: Box::<str>::default(), + }), + } + } + + pub fn value(&self) -> u8 { + let repr = self.repr.token.to_string(); + let (value, _suffix) = value::parse_lit_byte(&repr).unwrap(); + value + } + + pub fn span(&self) -> Span { + self.repr.token.span() + } + + pub fn set_span(&mut self, span: Span) { + self.repr.token.set_span(span); + } + + pub fn suffix(&self) -> &str { + &self.repr.suffix + } + + pub fn token(&self) -> Literal { + self.repr.token.clone() + } +} + +impl LitChar { + pub fn new(value: char, span: Span) -> Self { + let mut token = Literal::character(value); + token.set_span(span); + LitChar { + repr: Box::new(LitRepr { + token, + suffix: Box::<str>::default(), + }), + } + } + + pub fn value(&self) -> char { + let repr = self.repr.token.to_string(); + let (value, _suffix) = value::parse_lit_char(&repr).unwrap(); + value + } + + pub fn span(&self) -> Span { + self.repr.token.span() + } + + pub fn set_span(&mut self, span: Span) { + self.repr.token.set_span(span); + } + + pub fn suffix(&self) -> &str { + &self.repr.suffix + } + + pub fn token(&self) -> Literal { + self.repr.token.clone() + } +} + +impl LitInt { + #[track_caller] + pub fn new(repr: &str, span: Span) -> Self { + let (digits, suffix) = match value::parse_lit_int(repr) { + Some(parse) => parse, + None => panic!("not an integer literal: `{}`", repr), + }; + + let mut token: Literal = repr.parse().unwrap(); + token.set_span(span); + LitInt { + repr: Box::new(LitIntRepr { + token, + digits, + suffix, + }), + } + } + + pub fn base10_digits(&self) -> &str { + &self.repr.digits + } + + /// Parses the literal into a selected number type. + /// + /// This is equivalent to `lit.base10_digits().parse()` except that the + /// resulting errors will be correctly spanned to point to the literal token + /// in the macro input. + /// + /// ``` + /// use syn::LitInt; + /// use syn::parse::{Parse, ParseStream, Result}; + /// + /// struct Port { + /// value: u16, + /// } + /// + /// impl Parse for Port { + /// fn parse(input: ParseStream) -> Result<Self> { + /// let lit: LitInt = input.parse()?; + /// let value = lit.base10_parse::<u16>()?; + /// Ok(Port { value }) + /// } + /// } + /// ``` + pub fn base10_parse<N>(&self) -> Result<N> + where + N: FromStr, + N::Err: Display, + { + self.base10_digits() + .parse() + .map_err(|err| Error::new(self.span(), err)) + } + + pub fn suffix(&self) -> &str { + &self.repr.suffix + } + + pub fn span(&self) -> Span { + self.repr.token.span() + } + + pub fn set_span(&mut self, span: Span) { + self.repr.token.set_span(span); + } + + pub fn token(&self) -> Literal { + self.repr.token.clone() + } +} + +impl From<Literal> for LitInt { + #[track_caller] + fn from(token: Literal) -> Self { + let repr = token.to_string(); + if let Some((digits, suffix)) = value::parse_lit_int(&repr) { + LitInt { + repr: Box::new(LitIntRepr { + token, + digits, + suffix, + }), + } + } else { + panic!("not an integer literal: `{}`", repr); + } + } +} + +impl Display for LitInt { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.repr.token.fmt(formatter) + } +} + +impl LitFloat { + #[track_caller] + pub fn new(repr: &str, span: Span) -> Self { + let (digits, suffix) = match value::parse_lit_float(repr) { + Some(parse) => parse, + None => panic!("not a float literal: `{}`", repr), + }; + + let mut token: Literal = repr.parse().unwrap(); + token.set_span(span); + LitFloat { + repr: Box::new(LitFloatRepr { + token, + digits, + suffix, + }), + } + } + + pub fn base10_digits(&self) -> &str { + &self.repr.digits + } + + pub fn base10_parse<N>(&self) -> Result<N> + where + N: FromStr, + N::Err: Display, + { + self.base10_digits() + .parse() + .map_err(|err| Error::new(self.span(), err)) + } + + pub fn suffix(&self) -> &str { + &self.repr.suffix + } + + pub fn span(&self) -> Span { + self.repr.token.span() + } + + pub fn set_span(&mut self, span: Span) { + self.repr.token.set_span(span); + } + + pub fn token(&self) -> Literal { + self.repr.token.clone() + } +} + +impl From<Literal> for LitFloat { + #[track_caller] + fn from(token: Literal) -> Self { + let repr = token.to_string(); + if let Some((digits, suffix)) = value::parse_lit_float(&repr) { + LitFloat { + repr: Box::new(LitFloatRepr { + token, + digits, + suffix, + }), + } + } else { + panic!("not a float literal: `{}`", repr); + } + } +} + +impl Display for LitFloat { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.repr.token.fmt(formatter) + } +} + +impl LitBool { + pub fn new(value: bool, span: Span) -> Self { + LitBool { value, span } + } + + pub fn value(&self) -> bool { + self.value + } + + pub fn span(&self) -> Span { + self.span + } + + pub fn set_span(&mut self, span: Span) { + self.span = span; + } + + pub fn token(&self) -> Ident { + let s = if self.value { "true" } else { "false" }; + Ident::new(s, self.span) + } +} + +#[cfg(feature = "extra-traits")] +mod debug_impls { + use crate::lit::{LitBool, LitByte, LitByteStr, LitCStr, LitChar, LitFloat, LitInt, LitStr}; + use std::fmt::{self, Debug}; + + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl Debug for LitStr { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "LitStr") + } + } + + impl LitStr { + pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + formatter + .debug_struct(name) + .field("token", &format_args!("{}", self.repr.token)) + .finish() + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl Debug for LitByteStr { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "LitByteStr") + } + } + + impl LitByteStr { + pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + formatter + .debug_struct(name) + .field("token", &format_args!("{}", self.repr.token)) + .finish() + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl Debug for LitCStr { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "LitCStr") + } + } + + impl LitCStr { + pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + formatter + .debug_struct(name) + .field("token", &format_args!("{}", self.repr.token)) + .finish() + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl Debug for LitByte { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "LitByte") + } + } + + impl LitByte { + pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + formatter + .debug_struct(name) + .field("token", &format_args!("{}", self.repr.token)) + .finish() + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl Debug for LitChar { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "LitChar") + } + } + + impl LitChar { + pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + formatter + .debug_struct(name) + .field("token", &format_args!("{}", self.repr.token)) + .finish() + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl Debug for LitInt { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "LitInt") + } + } + + impl LitInt { + pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + formatter + .debug_struct(name) + .field("token", &format_args!("{}", self.repr.token)) + .finish() + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl Debug for LitFloat { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "LitFloat") + } + } + + impl LitFloat { + pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + formatter + .debug_struct(name) + .field("token", &format_args!("{}", self.repr.token)) + .finish() + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl Debug for LitBool { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.debug(formatter, "LitBool") + } + } + + impl LitBool { + pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { + formatter + .debug_struct(name) + .field("value", &self.value) + .finish() + } + } +} + +#[cfg(feature = "clone-impls")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for LitRepr { + fn clone(&self) -> Self { + LitRepr { + token: self.token.clone(), + suffix: self.suffix.clone(), + } + } +} + +#[cfg(feature = "clone-impls")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for LitIntRepr { + fn clone(&self) -> Self { + LitIntRepr { + token: self.token.clone(), + digits: self.digits.clone(), + suffix: self.suffix.clone(), + } + } +} + +#[cfg(feature = "clone-impls")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for LitFloatRepr { + fn clone(&self) -> Self { + LitFloatRepr { + token: self.token.clone(), + digits: self.digits.clone(), + suffix: self.suffix.clone(), + } + } +} + +macro_rules! lit_extra_traits { + ($ty:ident) => { + #[cfg(feature = "clone-impls")] + #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] + impl Clone for $ty { + fn clone(&self) -> Self { + $ty { + repr: self.repr.clone(), + } + } + } + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl PartialEq for $ty { + fn eq(&self, other: &Self) -> bool { + self.repr.token.to_string() == other.repr.token.to_string() + } + } + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl Hash for $ty { + fn hash<H>(&self, state: &mut H) + where + H: Hasher, + { + self.repr.token.to_string().hash(state); + } + } + + #[cfg(feature = "parsing")] + pub_if_not_doc! { + #[doc(hidden)] + #[allow(non_snake_case)] + pub fn $ty(marker: lookahead::TokenMarker) -> $ty { + match marker {} + } + } + }; +} + +lit_extra_traits!(LitStr); +lit_extra_traits!(LitByteStr); +lit_extra_traits!(LitCStr); +lit_extra_traits!(LitByte); +lit_extra_traits!(LitChar); +lit_extra_traits!(LitInt); +lit_extra_traits!(LitFloat); + +#[cfg(feature = "parsing")] +pub_if_not_doc! { + #[doc(hidden)] + #[allow(non_snake_case)] + pub fn LitBool(marker: lookahead::TokenMarker) -> LitBool { + match marker {} + } +} + +/// The style of a string literal, either plain quoted or a raw string like +/// `r##"data"##`. +#[doc(hidden)] // https://github.com/dtolnay/syn/issues/1566 +pub enum StrStyle { + /// An ordinary string like `"data"`. + Cooked, + /// A raw string like `r##"data"##`. + /// + /// The unsigned integer is the number of `#` symbols used. + Raw(usize), +} + +#[cfg(feature = "parsing")] +pub_if_not_doc! { + #[doc(hidden)] + #[allow(non_snake_case)] + pub fn Lit(marker: lookahead::TokenMarker) -> Lit { + match marker {} + } +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::buffer::Cursor; + use crate::error::Result; + use crate::lit::{ + value, Lit, LitBool, LitByte, LitByteStr, LitCStr, LitChar, LitFloat, LitFloatRepr, LitInt, + LitIntRepr, LitStr, + }; + use crate::parse::{Parse, ParseStream, Unexpected}; + use crate::token::{self, Token}; + use proc_macro2::{Literal, Punct, Span}; + use std::cell::Cell; + use std::rc::Rc; + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Lit { + fn parse(input: ParseStream) -> Result<Self> { + input.step(|cursor| { + if let Some((lit, rest)) = cursor.literal() { + return Ok((Lit::new(lit), rest)); + } + + if let Some((ident, rest)) = cursor.ident() { + let value = ident == "true"; + if value || ident == "false" { + let lit_bool = LitBool { + value, + span: ident.span(), + }; + return Ok((Lit::Bool(lit_bool), rest)); + } + } + + if let Some((punct, rest)) = cursor.punct() { + if punct.as_char() == '-' { + if let Some((lit, rest)) = parse_negative_lit(punct, rest) { + return Ok((lit, rest)); + } + } + } + + Err(cursor.error("expected literal")) + }) + } + } + + fn parse_negative_lit(neg: Punct, cursor: Cursor) -> Option<(Lit, Cursor)> { + let (lit, rest) = cursor.literal()?; + + let mut span = neg.span(); + span = span.join(lit.span()).unwrap_or(span); + + let mut repr = lit.to_string(); + repr.insert(0, '-'); + + if let Some((digits, suffix)) = value::parse_lit_int(&repr) { + let mut token: Literal = repr.parse().unwrap(); + token.set_span(span); + return Some(( + Lit::Int(LitInt { + repr: Box::new(LitIntRepr { + token, + digits, + suffix, + }), + }), + rest, + )); + } + + let (digits, suffix) = value::parse_lit_float(&repr)?; + let mut token: Literal = repr.parse().unwrap(); + token.set_span(span); + Some(( + Lit::Float(LitFloat { + repr: Box::new(LitFloatRepr { + token, + digits, + suffix, + }), + }), + rest, + )) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for LitStr { + fn parse(input: ParseStream) -> Result<Self> { + let head = input.fork(); + match input.parse() { + Ok(Lit::Str(lit)) => Ok(lit), + _ => Err(head.error("expected string literal")), + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for LitByteStr { + fn parse(input: ParseStream) -> Result<Self> { + let head = input.fork(); + match input.parse() { + Ok(Lit::ByteStr(lit)) => Ok(lit), + _ => Err(head.error("expected byte string literal")), + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for LitCStr { + fn parse(input: ParseStream) -> Result<Self> { + let head = input.fork(); + match input.parse() { + Ok(Lit::CStr(lit)) => Ok(lit), + _ => Err(head.error("expected C string literal")), + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for LitByte { + fn parse(input: ParseStream) -> Result<Self> { + let head = input.fork(); + match input.parse() { + Ok(Lit::Byte(lit)) => Ok(lit), + _ => Err(head.error("expected byte literal")), + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for LitChar { + fn parse(input: ParseStream) -> Result<Self> { + let head = input.fork(); + match input.parse() { + Ok(Lit::Char(lit)) => Ok(lit), + _ => Err(head.error("expected character literal")), + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for LitInt { + fn parse(input: ParseStream) -> Result<Self> { + let head = input.fork(); + match input.parse() { + Ok(Lit::Int(lit)) => Ok(lit), + _ => Err(head.error("expected integer literal")), + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for LitFloat { + fn parse(input: ParseStream) -> Result<Self> { + let head = input.fork(); + match input.parse() { + Ok(Lit::Float(lit)) => Ok(lit), + _ => Err(head.error("expected floating point literal")), + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for LitBool { + fn parse(input: ParseStream) -> Result<Self> { + let head = input.fork(); + match input.parse() { + Ok(Lit::Bool(lit)) => Ok(lit), + _ => Err(head.error("expected boolean literal")), + } + } + } + + fn peek_impl(cursor: Cursor, peek: fn(ParseStream) -> bool) -> bool { + let scope = Span::call_site(); + let unexpected = Rc::new(Cell::new(Unexpected::None)); + let buffer = crate::parse::new_parse_buffer(scope, cursor, unexpected); + peek(&buffer) + } + + macro_rules! impl_token { + ($display:literal $name:ty) => { + impl Token for $name { + fn peek(cursor: Cursor) -> bool { + fn peek(input: ParseStream) -> bool { + <$name as Parse>::parse(input).is_ok() + } + peek_impl(cursor, peek) + } + + fn display() -> &'static str { + $display + } + } + + impl token::private::Sealed for $name {} + }; + } + + impl_token!("literal" Lit); + impl_token!("string literal" LitStr); + impl_token!("byte string literal" LitByteStr); + impl_token!("C-string literal" LitCStr); + impl_token!("byte literal" LitByte); + impl_token!("character literal" LitChar); + impl_token!("integer literal" LitInt); + impl_token!("floating point literal" LitFloat); + impl_token!("boolean literal" LitBool); +} + +#[cfg(feature = "printing")] +mod printing { + use crate::lit::{LitBool, LitByte, LitByteStr, LitCStr, LitChar, LitFloat, LitInt, LitStr}; + use proc_macro2::TokenStream; + use quote::{ToTokens, TokenStreamExt as _}; + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for LitStr { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.repr.token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for LitByteStr { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.repr.token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for LitCStr { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.repr.token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for LitByte { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.repr.token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for LitChar { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.repr.token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for LitInt { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.repr.token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for LitFloat { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.repr.token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for LitBool { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(self.token()); + } + } +} + +mod value { + use crate::bigint::BigInt; + use crate::lit::{ + Lit, LitBool, LitByte, LitByteStr, LitCStr, LitChar, LitFloat, LitFloatRepr, LitInt, + LitIntRepr, LitRepr, LitStr, + }; + use proc_macro2::{Literal, Span}; + use std::char; + use std::ffi::CString; + use std::ops::{Index, RangeFrom}; + + impl Lit { + /// Interpret a Syn literal from a proc-macro2 literal. + pub fn new(token: Literal) -> Self { + let repr = token.to_string(); + Lit::from_str(token, &repr) + } + + #[cfg(fuzzing)] + #[doc(hidden)] + pub fn from_str_for_fuzzing(repr: &str) -> Self { + let token = Literal::u8_unsuffixed(0); + Lit::from_str(token, repr) + } + + fn from_str(token: Literal, repr: &str) -> Self { + match byte(repr, 0) { + // "...", r"...", r#"..."# + b'"' | b'r' => { + if let Some((_, suffix)) = parse_lit_str(repr) { + return Lit::Str(LitStr { + repr: Box::new(LitRepr { token, suffix }), + }); + } + } + b'b' => match byte(repr, 1) { + // b"...", br"...", br#"...#" + b'"' | b'r' => { + if let Some((_, suffix)) = parse_lit_byte_str(repr) { + return Lit::ByteStr(LitByteStr { + repr: Box::new(LitRepr { token, suffix }), + }); + } + } + // b'...' + b'\'' => { + if let Some((_, suffix)) = parse_lit_byte(repr) { + return Lit::Byte(LitByte { + repr: Box::new(LitRepr { token, suffix }), + }); + } + } + _ => {} + }, + b'c' => match byte(repr, 1) { + // c"...", cr"...", cr#"..."# + b'"' | b'r' => { + if let Some((_, suffix)) = parse_lit_c_str(repr) { + return Lit::CStr(LitCStr { + repr: Box::new(LitRepr { token, suffix }), + }); + } + } + _ => {} + }, + // '...' + b'\'' => { + if let Some((_, suffix)) = parse_lit_char(repr) { + return Lit::Char(LitChar { + repr: Box::new(LitRepr { token, suffix }), + }); + } + } + b'0'..=b'9' | b'-' => { + // 0, 123, 0xFF, 0o77, 0b11 + if let Some((digits, suffix)) = parse_lit_int(repr) { + return Lit::Int(LitInt { + repr: Box::new(LitIntRepr { + token, + digits, + suffix, + }), + }); + } + // 1.0, 1e-1, 1e+1 + if let Some((digits, suffix)) = parse_lit_float(repr) { + return Lit::Float(LitFloat { + repr: Box::new(LitFloatRepr { + token, + digits, + suffix, + }), + }); + } + } + // true, false + b't' | b'f' => { + if repr == "true" || repr == "false" { + return Lit::Bool(LitBool { + value: repr == "true", + span: token.span(), + }); + } + } + b'(' if repr == "(/*ERROR*/)" => return Lit::Verbatim(token), + _ => {} + } + + Lit::Verbatim(token) + } + + pub fn suffix(&self) -> &str { + match self { + Lit::Str(lit) => lit.suffix(), + Lit::ByteStr(lit) => lit.suffix(), + Lit::CStr(lit) => lit.suffix(), + Lit::Byte(lit) => lit.suffix(), + Lit::Char(lit) => lit.suffix(), + Lit::Int(lit) => lit.suffix(), + Lit::Float(lit) => lit.suffix(), + Lit::Bool(_) | Lit::Verbatim(_) => "", + } + } + + pub fn span(&self) -> Span { + match self { + Lit::Str(lit) => lit.span(), + Lit::ByteStr(lit) => lit.span(), + Lit::CStr(lit) => lit.span(), + Lit::Byte(lit) => lit.span(), + Lit::Char(lit) => lit.span(), + Lit::Int(lit) => lit.span(), + Lit::Float(lit) => lit.span(), + Lit::Bool(lit) => lit.span, + Lit::Verbatim(lit) => lit.span(), + } + } + + pub fn set_span(&mut self, span: Span) { + match self { + Lit::Str(lit) => lit.set_span(span), + Lit::ByteStr(lit) => lit.set_span(span), + Lit::CStr(lit) => lit.set_span(span), + Lit::Byte(lit) => lit.set_span(span), + Lit::Char(lit) => lit.set_span(span), + Lit::Int(lit) => lit.set_span(span), + Lit::Float(lit) => lit.set_span(span), + Lit::Bool(lit) => lit.span = span, + Lit::Verbatim(lit) => lit.set_span(span), + } + } + } + + /// Get the byte at offset idx, or a default of `b'\0'` if we're looking + /// past the end of the input buffer. + pub(crate) fn byte<S: AsRef<[u8]> + ?Sized>(s: &S, idx: usize) -> u8 { + let s = s.as_ref(); + if idx < s.len() { + s[idx] + } else { + 0 + } + } + + fn next_chr(s: &str) -> char { + s.chars().next().unwrap_or('\0') + } + + // Returns (content, suffix). + pub(crate) fn parse_lit_str(s: &str) -> Option<(Box<str>, Box<str>)> { + match byte(s, 0) { + b'"' => parse_lit_str_cooked(s), + b'r' => parse_lit_str_raw(s), + _ => unreachable!(), + } + } + + fn parse_lit_str_cooked(mut s: &str) -> Option<(Box<str>, Box<str>)> { + assert_eq!(byte(s, 0), b'"'); + s = &s[1..]; + + let mut content = String::new(); + 'outer: loop { + let ch = match byte(s, 0) { + b'"' => break, + b'\\' => { + let b = byte(s, 1); + s = s.get(2..)?; + match b { + b'x' => { + let (byte, rest) = backslash_x(s)?; + s = rest; + if byte > 0x7F { + // invalid \x byte in string literal + return None; + } + char::from(byte) + } + b'u' => { + let (ch, rest) = backslash_u(s)?; + s = rest; + ch + } + b'n' => '\n', + b'r' => '\r', + b't' => '\t', + b'\\' => '\\', + b'0' => '\0', + b'\'' => '\'', + b'"' => '"', + b'\r' | b'\n' => loop { + let b = byte(s, 0); + match b { + b' ' | b'\t' | b'\n' | b'\r' => s = &s[1..], + _ => continue 'outer, + } + }, + _ => { + // unexpected byte after backslash + return None; + } + } + } + b'\r' => { + if byte(s, 1) != b'\n' { + // bare carriage return not allowed in string + return None; + } + s = &s[2..]; + '\n' + } + _ => { + let ch = next_chr(s); + s = s.get(ch.len_utf8()..)?; + ch + } + }; + content.push(ch); + } + + assert!(s.starts_with('"')); + let content = content.into_boxed_str(); + let suffix = s[1..].to_owned().into_boxed_str(); + Some((content, suffix)) + } + + fn parse_lit_str_raw(mut s: &str) -> Option<(Box<str>, Box<str>)> { + assert_eq!(byte(s, 0), b'r'); + s = &s[1..]; + + let mut pounds = 0; + loop { + match byte(s, pounds) { + b'#' => pounds += 1, + b'"' => break, + _ => return None, + } + } + let close = s.rfind('"').unwrap(); + for end in s.get(close + 1..close + 1 + pounds)?.bytes() { + if end != b'#' { + return None; + } + } + + let content = s.get(pounds + 1..close)?.to_owned().into_boxed_str(); + let suffix = s[close + 1 + pounds..].to_owned().into_boxed_str(); + Some((content, suffix)) + } + + // Returns (content, suffix). + pub(crate) fn parse_lit_byte_str(s: &str) -> Option<(Vec<u8>, Box<str>)> { + assert_eq!(byte(s, 0), b'b'); + match byte(s, 1) { + b'"' => parse_lit_byte_str_cooked(s), + b'r' => parse_lit_byte_str_raw(s), + _ => unreachable!(), + } + } + + fn parse_lit_byte_str_cooked(mut s: &str) -> Option<(Vec<u8>, Box<str>)> { + assert_eq!(byte(s, 0), b'b'); + assert_eq!(byte(s, 1), b'"'); + s = &s[2..]; + + // We're going to want to have slices which don't respect codepoint boundaries. + let mut v = s.as_bytes(); + + let mut out = Vec::new(); + 'outer: loop { + let byte = match byte(v, 0) { + b'"' => break, + b'\\' => { + let b = byte(v, 1); + v = v.get(2..)?; + match b { + b'x' => { + let (b, rest) = backslash_x(v)?; + v = rest; + b + } + b'n' => b'\n', + b'r' => b'\r', + b't' => b'\t', + b'\\' => b'\\', + b'0' => b'\0', + b'\'' => b'\'', + b'"' => b'"', + b'\r' | b'\n' => loop { + let byte = byte(v, 0); + if matches!(byte, b' ' | b'\t' | b'\n' | b'\r') { + v = &v[1..]; + } else { + continue 'outer; + } + }, + _ => { + // unexpected byte after backslash + return None; + } + } + } + b'\r' => { + if byte(v, 1) != b'\n' { + // bare carriage return not allowed in string + return None; + } + v = &v[2..]; + b'\n' + } + b => { + v = v.get(1..)?; + b + } + }; + out.push(byte); + } + + assert_eq!(byte(v, 0), b'"'); + let suffix = s[s.len() - v.len() + 1..].to_owned().into_boxed_str(); + Some((out, suffix)) + } + + fn parse_lit_byte_str_raw(s: &str) -> Option<(Vec<u8>, Box<str>)> { + assert_eq!(byte(s, 0), b'b'); + let (value, suffix) = parse_lit_str_raw(&s[1..])?; + Some((String::from(value).into_bytes(), suffix)) + } + + // Returns (content, suffix). + pub(crate) fn parse_lit_c_str(s: &str) -> Option<(CString, Box<str>)> { + assert_eq!(byte(s, 0), b'c'); + match byte(s, 1) { + b'"' => parse_lit_c_str_cooked(s), + b'r' => parse_lit_c_str_raw(s), + _ => unreachable!(), + } + } + + fn parse_lit_c_str_cooked(mut s: &str) -> Option<(CString, Box<str>)> { + assert_eq!(byte(s, 0), b'c'); + assert_eq!(byte(s, 1), b'"'); + s = &s[2..]; + + // We're going to want to have slices which don't respect codepoint boundaries. + let mut v = s.as_bytes(); + + let mut out = Vec::new(); + 'outer: loop { + let byte = match byte(v, 0) { + b'"' => break, + b'\\' => { + let b = byte(v, 1); + v = v.get(2..)?; + match b { + b'x' => { + let (b, rest) = backslash_x(v)?; + if b == 0 { + // \x00 is not allowed in C-string literal + return None; + } + v = rest; + b + } + b'u' => { + let (ch, rest) = backslash_u(v)?; + if ch == '\0' { + // \u{0} is not allowed in C-string literal + return None; + } + v = rest; + out.extend_from_slice(ch.encode_utf8(&mut [0u8; 4]).as_bytes()); + continue 'outer; + } + b'n' => b'\n', + b'r' => b'\r', + b't' => b'\t', + b'\\' => b'\\', + b'\'' => b'\'', + b'"' => b'"', + b'\r' | b'\n' => loop { + let byte = byte(v, 0); + if matches!(byte, b' ' | b'\t' | b'\n' | b'\r') { + v = &v[1..]; + } else { + continue 'outer; + } + }, + _ => { + // unexpected byte after backslash + return None; + } + } + } + b'\r' => { + if byte(v, 1) != b'\n' { + // bare carriage return not allowed in string + return None; + } + v = &v[2..]; + b'\n' + } + b => { + v = v.get(1..)?; + b + } + }; + out.push(byte); + } + + assert_eq!(byte(v, 0), b'"'); + let suffix = s[s.len() - v.len() + 1..].to_owned().into_boxed_str(); + let cstring = CString::new(out).ok()?; + Some((cstring, suffix)) + } + + fn parse_lit_c_str_raw(s: &str) -> Option<(CString, Box<str>)> { + assert_eq!(byte(s, 0), b'c'); + let (value, suffix) = parse_lit_str_raw(&s[1..])?; + let cstring = CString::new(String::from(value)).ok()?; + Some((cstring, suffix)) + } + + // Returns (value, suffix). + pub(crate) fn parse_lit_byte(s: &str) -> Option<(u8, Box<str>)> { + assert_eq!(byte(s, 0), b'b'); + assert_eq!(byte(s, 1), b'\''); + + // We're going to want to have slices which don't respect codepoint boundaries. + let mut v = &s.as_bytes()[2..]; + + let b = match byte(v, 0) { + b'\\' => { + let b = byte(v, 1); + v = v.get(2..)?; + match b { + b'x' => { + let (b, rest) = backslash_x(v)?; + v = rest; + b + } + b'n' => b'\n', + b'r' => b'\r', + b't' => b'\t', + b'\\' => b'\\', + b'0' => b'\0', + b'\'' => b'\'', + b'"' => b'"', + _ => { + // unexpected byte after backslash + return None; + } + } + } + b => { + v = v.get(1..)?; + b + } + }; + + if byte(v, 0) != b'\'' { + return None; + } + + let suffix = s[s.len() - v.len() + 1..].to_owned().into_boxed_str(); + Some((b, suffix)) + } + + // Returns (value, suffix). + pub(crate) fn parse_lit_char(mut s: &str) -> Option<(char, Box<str>)> { + assert_eq!(byte(s, 0), b'\''); + s = &s[1..]; + + let ch = match byte(s, 0) { + b'\\' => { + let b = byte(s, 1); + s = s.get(2..)?; + match b { + b'x' => { + let (byte, rest) = backslash_x(s)?; + s = rest; + if byte > 0x7F { + // invalid \x byte in character literal + return None; + } + char::from(byte) + } + b'u' => { + let (ch, rest) = backslash_u(s)?; + s = rest; + ch + } + b'n' => '\n', + b'r' => '\r', + b't' => '\t', + b'\\' => '\\', + b'0' => '\0', + b'\'' => '\'', + b'"' => '"', + _ => { + // unexpected byte after backslash + return None; + } + } + } + _ => { + let ch = next_chr(s); + s = s.get(ch.len_utf8()..)?; + ch + } + }; + + if byte(s, 0) != b'\'' { + return None; + } + + let suffix = s[1..].to_owned().into_boxed_str(); + Some((ch, suffix)) + } + + fn backslash_x<S>(s: &S) -> Option<(u8, &S)> + where + S: Index<RangeFrom<usize>, Output = S> + AsRef<[u8]> + ?Sized, + { + let mut ch = 0; + let b0 = byte(s, 0); + let b1 = byte(s, 1); + ch += 0x10 + * match b0 { + b'0'..=b'9' => b0 - b'0', + b'a'..=b'f' => 10 + (b0 - b'a'), + b'A'..=b'F' => 10 + (b0 - b'A'), + _ => return None, + }; + ch += match b1 { + b'0'..=b'9' => b1 - b'0', + b'a'..=b'f' => 10 + (b1 - b'a'), + b'A'..=b'F' => 10 + (b1 - b'A'), + _ => return None, + }; + Some((ch, &s[2..])) + } + + fn backslash_u<S>(mut s: &S) -> Option<(char, &S)> + where + S: Index<RangeFrom<usize>, Output = S> + AsRef<[u8]> + ?Sized, + { + if byte(s, 0) != b'{' { + return None; + } + s = &s[1..]; + + let mut ch = 0; + let mut digits = 0; + loop { + let b = byte(s, 0); + let digit = match b { + b'0'..=b'9' => b - b'0', + b'a'..=b'f' => 10 + b - b'a', + b'A'..=b'F' => 10 + b - b'A', + b'_' if digits > 0 => { + s = &s[1..]; + continue; + } + b'}' if digits == 0 => return None, + b'}' => break, + _ => return None, + }; + if digits == 6 { + return None; + } + ch *= 0x10; + ch += u32::from(digit); + digits += 1; + s = &s[1..]; + } + if byte(s, 0) != b'}' { + return None; + } + s = &s[1..]; + + let ch = char::from_u32(ch)?; + Some((ch, s)) + } + + // Returns base 10 digits and suffix. + pub(crate) fn parse_lit_int(mut s: &str) -> Option<(Box<str>, Box<str>)> { + let negative = byte(s, 0) == b'-'; + if negative { + s = &s[1..]; + } + + let base = match (byte(s, 0), byte(s, 1)) { + (b'0', b'x') => { + s = &s[2..]; + 16 + } + (b'0', b'o') => { + s = &s[2..]; + 8 + } + (b'0', b'b') => { + s = &s[2..]; + 2 + } + (b'0'..=b'9', _) => 10, + _ => return None, + }; + + let mut value = BigInt::new(); + let mut has_digit = false; + 'outer: loop { + let b = byte(s, 0); + let digit = match b { + b'0'..=b'9' => b - b'0', + b'a'..=b'f' if base > 10 => b - b'a' + 10, + b'A'..=b'F' if base > 10 => b - b'A' + 10, + b'_' => { + s = &s[1..]; + continue; + } + // If looking at a floating point literal, we don't want to + // consider it an integer. + b'.' if base == 10 => return None, + b'e' | b'E' if base == 10 => { + let mut has_exp = false; + for (i, b) in s[1..].bytes().enumerate() { + match b { + b'_' => {} + b'-' | b'+' => return None, + b'0'..=b'9' => has_exp = true, + _ => { + let suffix = &s[1 + i..]; + if has_exp && crate::ident::xid_ok(suffix) { + return None; + } else { + break 'outer; + } + } + } + } + if has_exp { + return None; + } else { + break; + } + } + _ => break, + }; + + if digit >= base { + return None; + } + + has_digit = true; + value *= base; + value += digit; + s = &s[1..]; + } + + if !has_digit { + return None; + } + + let suffix = s; + if suffix.is_empty() || crate::ident::xid_ok(suffix) { + let mut repr = value.to_string(); + if negative { + repr.insert(0, '-'); + } + Some((repr.into_boxed_str(), suffix.to_owned().into_boxed_str())) + } else { + None + } + } + + // Returns base 10 digits and suffix. + pub(crate) fn parse_lit_float(input: &str) -> Option<(Box<str>, Box<str>)> { + // Rust's floating point literals are very similar to the ones parsed by + // the standard library, except that rust's literals can contain + // ignorable underscores. Let's remove those underscores. + + let mut bytes = input.to_owned().into_bytes(); + + let start = (*bytes.first()? == b'-') as usize; + match bytes.get(start)? { + b'0'..=b'9' => {} + _ => return None, + } + + let mut read = start; + let mut write = start; + let mut has_dot = false; + let mut has_e = false; + let mut has_sign = false; + let mut has_exponent = false; + while read < bytes.len() { + match bytes[read] { + b'_' => { + // Don't increase write + read += 1; + continue; + } + b'0'..=b'9' => { + if has_e { + has_exponent = true; + } + bytes[write] = bytes[read]; + } + b'.' => { + if has_e || has_dot { + return None; + } + has_dot = true; + bytes[write] = b'.'; + } + b'e' | b'E' => { + match bytes[read + 1..] + .iter() + .find(|b| **b != b'_') + .unwrap_or(&b'\0') + { + b'-' | b'+' | b'0'..=b'9' => {} + _ => break, + } + if has_e { + if has_exponent { + break; + } else { + return None; + } + } + has_e = true; + bytes[write] = b'e'; + } + b'-' | b'+' => { + if has_sign || has_exponent || !has_e { + return None; + } + has_sign = true; + if bytes[read] == b'-' { + bytes[write] = bytes[read]; + } else { + // Omit '+' + read += 1; + continue; + } + } + _ => break, + } + read += 1; + write += 1; + } + + if has_e && !has_exponent { + return None; + } + + let mut digits = String::from_utf8(bytes).unwrap(); + let suffix = digits.split_off(read); + digits.truncate(write); + if suffix.is_empty() || crate::ident::xid_ok(&suffix) { + Some((digits.into_boxed_str(), suffix.into_boxed_str())) + } else { + None + } + } +} diff --git a/vendor/syn/src/lookahead.rs b/vendor/syn/src/lookahead.rs new file mode 100644 index 00000000000000..10b4566135c9e9 --- /dev/null +++ b/vendor/syn/src/lookahead.rs @@ -0,0 +1,348 @@ +use crate::buffer::Cursor; +use crate::error::{self, Error}; +use crate::sealed::lookahead::Sealed; +use crate::span::IntoSpans; +use crate::token::{CustomToken, Token}; +use proc_macro2::{Delimiter, Span}; +use std::cell::RefCell; +use std::fmt::{self, Display}; + +/// Support for checking the next token in a stream to decide how to parse. +/// +/// An important advantage over [`ParseStream::peek`] is that here we +/// automatically construct an appropriate error message based on the token +/// alternatives that get peeked. If you are producing your own error message, +/// go ahead and use `ParseStream::peek` instead. +/// +/// Use [`ParseStream::lookahead1`] to construct this object. +/// +/// [`ParseStream::peek`]: crate::parse::ParseBuffer::peek +/// [`ParseStream::lookahead1`]: crate::parse::ParseBuffer::lookahead1 +/// +/// Consuming tokens from the source stream after constructing a lookahead +/// object does not also advance the lookahead object. +/// +/// # Example +/// +/// ``` +/// use syn::{ConstParam, Ident, Lifetime, LifetimeParam, Result, Token, TypeParam}; +/// use syn::parse::{Parse, ParseStream}; +/// +/// // A generic parameter, a single one of the comma-separated elements inside +/// // angle brackets in: +/// // +/// // fn f<T: Clone, 'a, 'b: 'a, const N: usize>() { ... } +/// // +/// // On invalid input, lookahead gives us a reasonable error message. +/// // +/// // error: expected one of: identifier, lifetime, `const` +/// // | +/// // 5 | fn f<!Sized>() {} +/// // | ^ +/// enum GenericParam { +/// Type(TypeParam), +/// Lifetime(LifetimeParam), +/// Const(ConstParam), +/// } +/// +/// impl Parse for GenericParam { +/// fn parse(input: ParseStream) -> Result<Self> { +/// let lookahead = input.lookahead1(); +/// if lookahead.peek(Ident) { +/// input.parse().map(GenericParam::Type) +/// } else if lookahead.peek(Lifetime) { +/// input.parse().map(GenericParam::Lifetime) +/// } else if lookahead.peek(Token![const]) { +/// input.parse().map(GenericParam::Const) +/// } else { +/// Err(lookahead.error()) +/// } +/// } +/// } +/// ``` +pub struct Lookahead1<'a> { + scope: Span, + cursor: Cursor<'a>, + comparisons: RefCell<Vec<&'static str>>, +} + +pub(crate) fn new(scope: Span, cursor: Cursor) -> Lookahead1 { + Lookahead1 { + scope, + cursor, + comparisons: RefCell::new(Vec::new()), + } +} + +fn peek_impl( + lookahead: &Lookahead1, + peek: fn(Cursor) -> bool, + display: fn() -> &'static str, +) -> bool { + if peek(lookahead.cursor) { + return true; + } + lookahead.comparisons.borrow_mut().push(display()); + false +} + +impl<'a> Lookahead1<'a> { + /// Looks at the next token in the parse stream to determine whether it + /// matches the requested type of token. + /// + /// # Syntax + /// + /// Note that this method does not use turbofish syntax. Pass the peek type + /// inside of parentheses. + /// + /// - `input.peek(Token![struct])` + /// - `input.peek(Token![==])` + /// - `input.peek(Ident)` *(does not accept keywords)* + /// - `input.peek(Ident::peek_any)` + /// - `input.peek(Lifetime)` + /// - `input.peek(token::Brace)` + pub fn peek<T: Peek>(&self, token: T) -> bool { + let _ = token; + peek_impl(self, T::Token::peek, T::Token::display) + } + + /// Triggers an error at the current position of the parse stream. + /// + /// The error message will identify all of the expected token types that + /// have been peeked against this lookahead instance. + pub fn error(self) -> Error { + let mut comparisons = self.comparisons.into_inner(); + comparisons.retain_mut(|display| { + if *display == "`)`" { + *display = match self.cursor.scope_delimiter() { + Delimiter::Parenthesis => "`)`", + Delimiter::Brace => "`}`", + Delimiter::Bracket => "`]`", + Delimiter::None => return false, + } + } + true + }); + match comparisons.len() { + 0 => { + if self.cursor.eof() { + Error::new(self.scope, "unexpected end of input") + } else { + Error::new(self.cursor.span(), "unexpected token") + } + } + 1 => { + let message = format!("expected {}", comparisons[0]); + error::new_at(self.scope, self.cursor, message) + } + 2 => { + let message = format!("expected {} or {}", comparisons[0], comparisons[1]); + error::new_at(self.scope, self.cursor, message) + } + _ => { + let message = format!("expected one of: {}", CommaSeparated(&comparisons)); + error::new_at(self.scope, self.cursor, message) + } + } + } +} + +struct CommaSeparated<'a>(&'a [&'a str]); + +impl<'a> Display for CommaSeparated<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut first = true; + for &s in self.0 { + if !first { + f.write_str(", ")?; + } + f.write_str(s)?; + first = false; + } + Ok(()) + } +} + +/// Types that can be parsed by looking at just one token. +/// +/// Use [`ParseStream::peek`] to peek one of these types in a parse stream +/// without consuming it from the stream. +/// +/// This trait is sealed and cannot be implemented for types outside of Syn. +/// +/// [`ParseStream::peek`]: crate::parse::ParseBuffer::peek +pub trait Peek: Sealed { + // Not public API. + #[doc(hidden)] + type Token: Token; +} + +/// Pseudo-token used for peeking the end of a parse stream. +/// +/// This type is only useful as an argument to one of the following functions: +/// +/// - [`ParseStream::peek`][crate::parse::ParseBuffer::peek] +/// - [`ParseStream::peek2`][crate::parse::ParseBuffer::peek2] +/// - [`ParseStream::peek3`][crate::parse::ParseBuffer::peek3] +/// - [`Lookahead1::peek`] +/// +/// The peek will return `true` if there are no remaining tokens after that +/// point in the parse stream. +/// +/// # Example +/// +/// Suppose we are parsing attributes containing core::fmt inspired formatting +/// arguments: +/// +/// - `#[fmt("simple example")]` +/// - `#[fmt("interpolation e{}ample", self.x)]` +/// - `#[fmt("interpolation e{x}ample")]` +/// +/// and we want to recognize the cases where no interpolation occurs so that +/// more efficient code can be generated. +/// +/// The following implementation uses `input.peek(Token![,]) && +/// input.peek2(End)` to recognize the case of a trailing comma without +/// consuming the comma from the parse stream, because if it isn't a trailing +/// comma, that same comma needs to be parsed as part of `args`. +/// +/// ``` +/// use proc_macro2::TokenStream; +/// use quote::quote; +/// use syn::parse::{End, Parse, ParseStream, Result}; +/// use syn::{parse_quote, Attribute, LitStr, Token}; +/// +/// struct FormatArgs { +/// template: LitStr, // "...{}..." +/// args: TokenStream, // , self.x +/// } +/// +/// impl Parse for FormatArgs { +/// fn parse(input: ParseStream) -> Result<Self> { +/// let template: LitStr = input.parse()?; +/// +/// let args = if input.is_empty() +/// || input.peek(Token![,]) && input.peek2(End) +/// { +/// input.parse::<Option<Token![,]>>()?; +/// TokenStream::new() +/// } else { +/// input.parse()? +/// }; +/// +/// Ok(FormatArgs { +/// template, +/// args, +/// }) +/// } +/// } +/// +/// fn main() -> Result<()> { +/// let attrs: Vec<Attribute> = parse_quote! { +/// #[fmt("simple example")] +/// #[fmt("interpolation e{}ample", self.x)] +/// #[fmt("interpolation e{x}ample")] +/// }; +/// +/// for attr in &attrs { +/// let FormatArgs { template, args } = attr.parse_args()?; +/// let requires_fmt_machinery = +/// !args.is_empty() || template.value().contains(['{', '}']); +/// let out = if requires_fmt_machinery { +/// quote! { +/// ::core::write!(__formatter, #template #args) +/// } +/// } else { +/// quote! { +/// __formatter.write_str(#template) +/// } +/// }; +/// println!("{}", out); +/// } +/// Ok(()) +/// } +/// ``` +/// +/// Implementing this parsing logic without `peek2(End)` is more clumsy because +/// we'd need a parse stream actually advanced past the comma before being able +/// to find out whether there is anything after it. It would look something +/// like: +/// +/// ``` +/// # use proc_macro2::TokenStream; +/// # use syn::parse::{ParseStream, Result}; +/// # use syn::Token; +/// # +/// # fn parse(input: ParseStream) -> Result<()> { +/// use syn::parse::discouraged::Speculative as _; +/// +/// let ahead = input.fork(); +/// ahead.parse::<Option<Token![,]>>()?; +/// let args = if ahead.is_empty() { +/// input.advance_to(&ahead); +/// TokenStream::new() +/// } else { +/// input.parse()? +/// }; +/// # Ok(()) +/// # } +/// ``` +/// +/// or: +/// +/// ``` +/// # use proc_macro2::TokenStream; +/// # use syn::parse::{ParseStream, Result}; +/// # use syn::Token; +/// # +/// # fn parse(input: ParseStream) -> Result<()> { +/// use quote::ToTokens as _; +/// +/// let comma: Option<Token![,]> = input.parse()?; +/// let mut args = TokenStream::new(); +/// if !input.is_empty() { +/// comma.to_tokens(&mut args); +/// input.parse::<TokenStream>()?.to_tokens(&mut args); +/// } +/// # Ok(()) +/// # } +/// ``` +pub struct End; + +impl Copy for End {} + +impl Clone for End { + fn clone(&self) -> Self { + *self + } +} + +impl Peek for End { + type Token = Self; +} + +impl CustomToken for End { + fn peek(cursor: Cursor) -> bool { + cursor.eof() + } + + fn display() -> &'static str { + "`)`" // Lookahead1 error message will fill in the expected close delimiter + } +} + +impl<F: Copy + FnOnce(TokenMarker) -> T, T: Token> Peek for F { + type Token = T; +} + +pub enum TokenMarker {} + +impl<S> IntoSpans<S> for TokenMarker { + fn into_spans(self) -> S { + match self {} + } +} + +impl<F: Copy + FnOnce(TokenMarker) -> T, T: Token> Sealed for F {} + +impl Sealed for End {} diff --git a/vendor/syn/src/mac.rs b/vendor/syn/src/mac.rs new file mode 100644 index 00000000000000..15107801cfee02 --- /dev/null +++ b/vendor/syn/src/mac.rs @@ -0,0 +1,225 @@ +#[cfg(feature = "parsing")] +use crate::error::Result; +#[cfg(feature = "parsing")] +use crate::parse::{Parse, ParseStream, Parser}; +use crate::path::Path; +use crate::token::{Brace, Bracket, Paren}; +use proc_macro2::extra::DelimSpan; +#[cfg(feature = "parsing")] +use proc_macro2::Delimiter; +use proc_macro2::TokenStream; +#[cfg(feature = "parsing")] +use proc_macro2::TokenTree; + +ast_struct! { + /// A macro invocation: `println!("{}", mac)`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct Macro { + pub path: Path, + pub bang_token: Token![!], + pub delimiter: MacroDelimiter, + pub tokens: TokenStream, + } +} + +ast_enum! { + /// A grouping token that surrounds a macro body: `m!(...)` or `m!{...}` or `m![...]`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub enum MacroDelimiter { + Paren(Paren), + Brace(Brace), + Bracket(Bracket), + } +} + +impl MacroDelimiter { + pub fn span(&self) -> &DelimSpan { + match self { + MacroDelimiter::Paren(token) => &token.span, + MacroDelimiter::Brace(token) => &token.span, + MacroDelimiter::Bracket(token) => &token.span, + } + } + + #[cfg(all(feature = "full", any(feature = "parsing", feature = "printing")))] + pub(crate) fn is_brace(&self) -> bool { + match self { + MacroDelimiter::Brace(_) => true, + MacroDelimiter::Paren(_) | MacroDelimiter::Bracket(_) => false, + } + } +} + +impl Macro { + /// Parse the tokens within the macro invocation's delimiters into a syntax + /// tree. + /// + /// This is equivalent to `syn::parse2::<T>(mac.tokens)` except that it + /// produces a more useful span when `tokens` is empty. + /// + /// # Example + /// + /// ``` + /// use syn::{parse_quote, Expr, ExprLit, Ident, Lit, LitStr, Macro, Token}; + /// use syn::ext::IdentExt; + /// use syn::parse::{Error, Parse, ParseStream, Result}; + /// use syn::punctuated::Punctuated; + /// + /// // The arguments expected by libcore's format_args macro, and as a + /// // result most other formatting and printing macros like println. + /// // + /// // println!("{} is {number:.prec$}", "x", prec=5, number=0.01) + /// struct FormatArgs { + /// format_string: Expr, + /// positional_args: Vec<Expr>, + /// named_args: Vec<(Ident, Expr)>, + /// } + /// + /// impl Parse for FormatArgs { + /// fn parse(input: ParseStream) -> Result<Self> { + /// let format_string: Expr; + /// let mut positional_args = Vec::new(); + /// let mut named_args = Vec::new(); + /// + /// format_string = input.parse()?; + /// while !input.is_empty() { + /// input.parse::<Token![,]>()?; + /// if input.is_empty() { + /// break; + /// } + /// if input.peek(Ident::peek_any) && input.peek2(Token![=]) { + /// while !input.is_empty() { + /// let name: Ident = input.call(Ident::parse_any)?; + /// input.parse::<Token![=]>()?; + /// let value: Expr = input.parse()?; + /// named_args.push((name, value)); + /// if input.is_empty() { + /// break; + /// } + /// input.parse::<Token![,]>()?; + /// } + /// break; + /// } + /// positional_args.push(input.parse()?); + /// } + /// + /// Ok(FormatArgs { + /// format_string, + /// positional_args, + /// named_args, + /// }) + /// } + /// } + /// + /// // Extract the first argument, the format string literal, from an + /// // invocation of a formatting or printing macro. + /// fn get_format_string(m: &Macro) -> Result<LitStr> { + /// let args: FormatArgs = m.parse_body()?; + /// match args.format_string { + /// Expr::Lit(ExprLit { lit: Lit::Str(lit), .. }) => Ok(lit), + /// other => { + /// // First argument was not a string literal expression. + /// // Maybe something like: println!(concat!(...), ...) + /// Err(Error::new_spanned(other, "format string must be a string literal")) + /// } + /// } + /// } + /// + /// fn main() { + /// let invocation = parse_quote! { + /// println!("{:?}", Instant::now()) + /// }; + /// let lit = get_format_string(&invocation).unwrap(); + /// assert_eq!(lit.value(), "{:?}"); + /// } + /// ``` + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_body<T: Parse>(&self) -> Result<T> { + self.parse_body_with(T::parse) + } + + /// Parse the tokens within the macro invocation's delimiters using the + /// given parser. + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_body_with<F: Parser>(&self, parser: F) -> Result<F::Output> { + let scope = self.delimiter.span().close(); + crate::parse::parse_scoped(parser, scope, self.tokens.clone()) + } +} + +#[cfg(feature = "parsing")] +pub(crate) fn parse_delimiter(input: ParseStream) -> Result<(MacroDelimiter, TokenStream)> { + input.step(|cursor| { + if let Some((TokenTree::Group(g), rest)) = cursor.token_tree() { + let span = g.delim_span(); + let delimiter = match g.delimiter() { + Delimiter::Parenthesis => MacroDelimiter::Paren(Paren(span)), + Delimiter::Brace => MacroDelimiter::Brace(Brace(span)), + Delimiter::Bracket => MacroDelimiter::Bracket(Bracket(span)), + Delimiter::None => { + return Err(cursor.error("expected delimiter")); + } + }; + Ok(((delimiter, g.stream()), rest)) + } else { + Err(cursor.error("expected delimiter")) + } + }) +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::error::Result; + use crate::mac::{parse_delimiter, Macro}; + use crate::parse::{Parse, ParseStream}; + use crate::path::Path; + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Macro { + fn parse(input: ParseStream) -> Result<Self> { + let tokens; + Ok(Macro { + path: input.call(Path::parse_mod_style)?, + bang_token: input.parse()?, + delimiter: { + let (delimiter, content) = parse_delimiter(input)?; + tokens = content; + delimiter + }, + tokens, + }) + } + } +} + +#[cfg(feature = "printing")] +mod printing { + use crate::mac::{Macro, MacroDelimiter}; + use crate::path; + use crate::path::printing::PathStyle; + use crate::token; + use proc_macro2::{Delimiter, TokenStream}; + use quote::ToTokens; + + impl MacroDelimiter { + pub(crate) fn surround(&self, tokens: &mut TokenStream, inner: TokenStream) { + let (delim, span) = match self { + MacroDelimiter::Paren(paren) => (Delimiter::Parenthesis, paren.span), + MacroDelimiter::Brace(brace) => (Delimiter::Brace, brace.span), + MacroDelimiter::Bracket(bracket) => (Delimiter::Bracket, bracket.span), + }; + token::printing::delim(delim, span.join(), tokens, inner); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Macro { + fn to_tokens(&self, tokens: &mut TokenStream) { + path::printing::print_path(tokens, &self.path, PathStyle::Mod); + self.bang_token.to_tokens(tokens); + self.delimiter.surround(tokens, self.tokens.clone()); + } + } +} diff --git a/vendor/syn/src/macros.rs b/vendor/syn/src/macros.rs new file mode 100644 index 00000000000000..167f2cf260a7c7 --- /dev/null +++ b/vendor/syn/src/macros.rs @@ -0,0 +1,182 @@ +#[cfg_attr( + not(any(feature = "full", feature = "derive")), + allow(unknown_lints, unused_macro_rules) +)] +macro_rules! ast_struct { + ( + $(#[$attr:meta])* + $pub:ident $struct:ident $name:ident #full $body:tt + ) => { + check_keyword_matches!(pub $pub); + check_keyword_matches!(struct $struct); + + #[cfg(feature = "full")] + $(#[$attr])* $pub $struct $name $body + + #[cfg(not(feature = "full"))] + $(#[$attr])* $pub $struct $name { + _noconstruct: ::std::marker::PhantomData<::proc_macro2::Span>, + } + + #[cfg(all(not(feature = "full"), feature = "printing"))] + impl ::quote::ToTokens for $name { + fn to_tokens(&self, _: &mut ::proc_macro2::TokenStream) { + unreachable!() + } + } + }; + + ( + $(#[$attr:meta])* + $pub:ident $struct:ident $name:ident $body:tt + ) => { + check_keyword_matches!(pub $pub); + check_keyword_matches!(struct $struct); + + $(#[$attr])* $pub $struct $name $body + }; +} + +#[cfg(any(feature = "full", feature = "derive"))] +macro_rules! ast_enum { + ( + $(#[$enum_attr:meta])* + $pub:ident $enum:ident $name:ident $body:tt + ) => { + check_keyword_matches!(pub $pub); + check_keyword_matches!(enum $enum); + + $(#[$enum_attr])* $pub $enum $name $body + }; +} + +macro_rules! ast_enum_of_structs { + ( + $(#[$enum_attr:meta])* + $pub:ident $enum:ident $name:ident $body:tt + ) => { + check_keyword_matches!(pub $pub); + check_keyword_matches!(enum $enum); + + $(#[$enum_attr])* $pub $enum $name $body + + ast_enum_of_structs_impl!($name $body); + + #[cfg(feature = "printing")] + generate_to_tokens!(() tokens $name $body); + }; +} + +macro_rules! ast_enum_of_structs_impl { + ( + $name:ident { + $( + $(#[cfg $cfg_attr:tt])* + $(#[doc $($doc_attr:tt)*])* + $variant:ident $( ($member:ident) )*, + )* + } + ) => { + $($( + ast_enum_from_struct!($name::$variant, $member); + )*)* + }; +} + +macro_rules! ast_enum_from_struct { + // No From<TokenStream> for verbatim variants. + ($name:ident::Verbatim, $member:ident) => {}; + + ($name:ident::$variant:ident, $member:ident) => { + impl From<$member> for $name { + fn from(e: $member) -> $name { + $name::$variant(e) + } + } + }; +} + +#[cfg(feature = "printing")] +macro_rules! generate_to_tokens { + ( + ($($arms:tt)*) $tokens:ident $name:ident { + $(#[cfg $cfg_attr:tt])* + $(#[doc $($doc_attr:tt)*])* + $variant:ident, + $($next:tt)* + } + ) => { + generate_to_tokens!( + ($($arms)* $(#[cfg $cfg_attr])* $name::$variant => {}) + $tokens $name { $($next)* } + ); + }; + + ( + ($($arms:tt)*) $tokens:ident $name:ident { + $(#[cfg $cfg_attr:tt])* + $(#[doc $($doc_attr:tt)*])* + $variant:ident($member:ident), + $($next:tt)* + } + ) => { + generate_to_tokens!( + ($($arms)* $(#[cfg $cfg_attr])* $name::$variant(_e) => _e.to_tokens($tokens),) + $tokens $name { $($next)* } + ); + }; + + (($($arms:tt)*) $tokens:ident $name:ident {}) => { + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ::quote::ToTokens for $name { + fn to_tokens(&self, $tokens: &mut ::proc_macro2::TokenStream) { + match self { + $($arms)* + } + } + } + }; +} + +// Rustdoc bug: does not respect the doc(hidden) on some items. +#[cfg(all(doc, feature = "parsing"))] +macro_rules! pub_if_not_doc { + ($(#[$m:meta])* $pub:ident $($item:tt)*) => { + check_keyword_matches!(pub $pub); + + $(#[$m])* + $pub(crate) $($item)* + }; +} + +#[cfg(all(not(doc), feature = "parsing"))] +macro_rules! pub_if_not_doc { + ($(#[$m:meta])* $pub:ident $($item:tt)*) => { + check_keyword_matches!(pub $pub); + + $(#[$m])* + $pub $($item)* + }; +} + +macro_rules! check_keyword_matches { + (enum enum) => {}; + (pub pub) => {}; + (struct struct) => {}; +} + +#[cfg(any(feature = "full", feature = "derive"))] +macro_rules! return_impl_trait { + ( + $(#[$attr:meta])* + $vis:vis fn $name:ident $args:tt -> $impl_trait:ty [$concrete:ty] $body:block + ) => { + #[cfg(not(docsrs))] + $(#[$attr])* + $vis fn $name $args -> $concrete $body + + #[cfg(docsrs)] + $(#[$attr])* + $vis fn $name $args -> $impl_trait $body + }; +} diff --git a/vendor/syn/src/meta.rs b/vendor/syn/src/meta.rs new file mode 100644 index 00000000000000..ffeeb2629f4f78 --- /dev/null +++ b/vendor/syn/src/meta.rs @@ -0,0 +1,427 @@ +//! Facility for interpreting structured content inside of an `Attribute`. + +use crate::error::{Error, Result}; +use crate::ext::IdentExt as _; +use crate::lit::Lit; +use crate::parse::{ParseStream, Parser}; +use crate::path::{Path, PathSegment}; +use crate::punctuated::Punctuated; +use proc_macro2::Ident; +use std::fmt::Display; + +/// Make a parser that is usable with `parse_macro_input!` in a +/// `#[proc_macro_attribute]` macro. +/// +/// *Warning:* When parsing attribute args **other than** the +/// `proc_macro::TokenStream` input of a `proc_macro_attribute`, you do **not** +/// need this function. In several cases your callers will get worse error +/// messages if you use this function, because the surrounding delimiter's span +/// is concealed from attribute macros by rustc. Use +/// [`Attribute::parse_nested_meta`] instead. +/// +/// [`Attribute::parse_nested_meta`]: crate::Attribute::parse_nested_meta +/// +/// # Example +/// +/// This example implements an attribute macro whose invocations look like this: +/// +/// ``` +/// # const IGNORE: &str = stringify! { +/// #[tea(kind = "EarlGrey", hot)] +/// struct Picard {...} +/// # }; +/// ``` +/// +/// The "parameters" supported by the attribute are: +/// +/// - `kind = "..."` +/// - `hot` +/// - `with(sugar, milk, ...)`, a comma-separated list of ingredients +/// +/// ``` +/// # extern crate proc_macro; +/// # +/// use proc_macro::TokenStream; +/// use syn::{parse_macro_input, LitStr, Path}; +/// +/// # const IGNORE: &str = stringify! { +/// #[proc_macro_attribute] +/// # }; +/// pub fn tea(args: TokenStream, input: TokenStream) -> TokenStream { +/// let mut kind: Option<LitStr> = None; +/// let mut hot: bool = false; +/// let mut with: Vec<Path> = Vec::new(); +/// let tea_parser = syn::meta::parser(|meta| { +/// if meta.path.is_ident("kind") { +/// kind = Some(meta.value()?.parse()?); +/// Ok(()) +/// } else if meta.path.is_ident("hot") { +/// hot = true; +/// Ok(()) +/// } else if meta.path.is_ident("with") { +/// meta.parse_nested_meta(|meta| { +/// with.push(meta.path); +/// Ok(()) +/// }) +/// } else { +/// Err(meta.error("unsupported tea property")) +/// } +/// }); +/// +/// parse_macro_input!(args with tea_parser); +/// eprintln!("kind={kind:?} hot={hot} with={with:?}"); +/// +/// /* ... */ +/// # TokenStream::new() +/// } +/// ``` +/// +/// The `syn::meta` library will take care of dealing with the commas including +/// trailing commas, and producing sensible error messages on unexpected input. +/// +/// ```console +/// error: expected `,` +/// --> src/main.rs:3:37 +/// | +/// 3 | #[tea(kind = "EarlGrey", with(sugar = "lol", milk))] +/// | ^ +/// ``` +/// +/// # Example +/// +/// Same as above but we factor out most of the logic into a separate function. +/// +/// ``` +/// # extern crate proc_macro; +/// # +/// use proc_macro::TokenStream; +/// use syn::meta::ParseNestedMeta; +/// use syn::parse::{Parser, Result}; +/// use syn::{parse_macro_input, LitStr, Path}; +/// +/// # const IGNORE: &str = stringify! { +/// #[proc_macro_attribute] +/// # }; +/// pub fn tea(args: TokenStream, input: TokenStream) -> TokenStream { +/// let mut attrs = TeaAttributes::default(); +/// let tea_parser = syn::meta::parser(|meta| attrs.parse(meta)); +/// parse_macro_input!(args with tea_parser); +/// +/// /* ... */ +/// # TokenStream::new() +/// } +/// +/// #[derive(Default)] +/// struct TeaAttributes { +/// kind: Option<LitStr>, +/// hot: bool, +/// with: Vec<Path>, +/// } +/// +/// impl TeaAttributes { +/// fn parse(&mut self, meta: ParseNestedMeta) -> Result<()> { +/// if meta.path.is_ident("kind") { +/// self.kind = Some(meta.value()?.parse()?); +/// Ok(()) +/// } else /* just like in last example */ +/// # { unimplemented!() } +/// +/// } +/// } +/// ``` +pub fn parser(logic: impl FnMut(ParseNestedMeta) -> Result<()>) -> impl Parser<Output = ()> { + |input: ParseStream| { + if input.is_empty() { + Ok(()) + } else { + parse_nested_meta(input, logic) + } + } +} + +/// Context for parsing a single property in the conventional syntax for +/// structured attributes. +/// +/// # Examples +/// +/// Refer to usage examples on the following two entry-points: +/// +/// - [`Attribute::parse_nested_meta`] if you have an entire `Attribute` to +/// parse. Always use this if possible. Generally this is able to produce +/// better error messages because `Attribute` holds span information for all +/// of the delimiters therein. +/// +/// - [`syn::meta::parser`] if you are implementing a `proc_macro_attribute` +/// macro and parsing the arguments to the attribute macro, i.e. the ones +/// written in the same attribute that dispatched the macro invocation. Rustc +/// does not pass span information for the surrounding delimiters into the +/// attribute macro invocation in this situation, so error messages might be +/// less precise. +/// +/// [`Attribute::parse_nested_meta`]: crate::Attribute::parse_nested_meta +/// [`syn::meta::parser`]: crate::meta::parser +#[non_exhaustive] +pub struct ParseNestedMeta<'a> { + pub path: Path, + pub input: ParseStream<'a>, +} + +impl<'a> ParseNestedMeta<'a> { + /// Used when parsing `key = "value"` syntax. + /// + /// All it does is advance `meta.input` past the `=` sign in the input. You + /// could accomplish the same effect by writing + /// `meta.parse::<Token![=]>()?`, so at most it is a minor convenience to + /// use `meta.value()?`. + /// + /// # Example + /// + /// ``` + /// use syn::{parse_quote, Attribute, LitStr}; + /// + /// let attr: Attribute = parse_quote! { + /// #[tea(kind = "EarlGrey")] + /// }; + /// // conceptually: + /// if attr.path().is_ident("tea") { // this parses the `tea` + /// attr.parse_nested_meta(|meta| { // this parses the `(` + /// if meta.path.is_ident("kind") { // this parses the `kind` + /// let value = meta.value()?; // this parses the `=` + /// let s: LitStr = value.parse()?; // this parses `"EarlGrey"` + /// if s.value() == "EarlGrey" { + /// // ... + /// } + /// Ok(()) + /// } else { + /// Err(meta.error("unsupported attribute")) + /// } + /// })?; + /// } + /// # anyhow::Ok(()) + /// ``` + pub fn value(&self) -> Result<ParseStream<'a>> { + self.input.parse::<Token![=]>()?; + Ok(self.input) + } + + /// Used when parsing `list(...)` syntax **if** the content inside the + /// nested parentheses is also expected to conform to Rust's structured + /// attribute convention. + /// + /// # Example + /// + /// ``` + /// use syn::{parse_quote, Attribute}; + /// + /// let attr: Attribute = parse_quote! { + /// #[tea(with(sugar, milk))] + /// }; + /// + /// if attr.path().is_ident("tea") { + /// attr.parse_nested_meta(|meta| { + /// if meta.path.is_ident("with") { + /// meta.parse_nested_meta(|meta| { // <--- + /// if meta.path.is_ident("sugar") { + /// // Here we can go even deeper if needed. + /// Ok(()) + /// } else if meta.path.is_ident("milk") { + /// Ok(()) + /// } else { + /// Err(meta.error("unsupported ingredient")) + /// } + /// }) + /// } else { + /// Err(meta.error("unsupported tea property")) + /// } + /// })?; + /// } + /// # anyhow::Ok(()) + /// ``` + /// + /// # Counterexample + /// + /// If you don't need `parse_nested_meta`'s help in parsing the content + /// written within the nested parentheses, keep in mind that you can always + /// just parse it yourself from the exposed ParseStream. Rust syntax permits + /// arbitrary tokens within those parentheses so for the crazier stuff, + /// `parse_nested_meta` is not what you want. + /// + /// ``` + /// use syn::{parenthesized, parse_quote, Attribute, LitInt}; + /// + /// let attr: Attribute = parse_quote! { + /// #[repr(align(32))] + /// }; + /// + /// let mut align: Option<LitInt> = None; + /// if attr.path().is_ident("repr") { + /// attr.parse_nested_meta(|meta| { + /// if meta.path.is_ident("align") { + /// let content; + /// parenthesized!(content in meta.input); + /// align = Some(content.parse()?); + /// Ok(()) + /// } else { + /// Err(meta.error("unsupported repr")) + /// } + /// })?; + /// } + /// # anyhow::Ok(()) + /// ``` + pub fn parse_nested_meta( + &self, + logic: impl FnMut(ParseNestedMeta) -> Result<()>, + ) -> Result<()> { + let content; + parenthesized!(content in self.input); + parse_nested_meta(&content, logic) + } + + /// Report that the attribute's content did not conform to expectations. + /// + /// The span of the resulting error will cover `meta.path` *and* everything + /// that has been parsed so far since it. + /// + /// There are 2 ways you might call this. First, if `meta.path` is not + /// something you recognize: + /// + /// ``` + /// # use syn::Attribute; + /// # + /// # fn example(attr: &Attribute) -> syn::Result<()> { + /// attr.parse_nested_meta(|meta| { + /// if meta.path.is_ident("kind") { + /// // ... + /// Ok(()) + /// } else { + /// Err(meta.error("unsupported tea property")) + /// } + /// })?; + /// # Ok(()) + /// # } + /// ``` + /// + /// In this case, it behaves exactly like + /// `syn::Error::new_spanned(&meta.path, "message...")`. + /// + /// ```console + /// error: unsupported tea property + /// --> src/main.rs:3:26 + /// | + /// 3 | #[tea(kind = "EarlGrey", wat = "foo")] + /// | ^^^ + /// ``` + /// + /// More usefully, the second place is if you've already parsed a value but + /// have decided not to accept the value: + /// + /// ``` + /// # use syn::Attribute; + /// # + /// # fn example(attr: &Attribute) -> syn::Result<()> { + /// use syn::Expr; + /// + /// attr.parse_nested_meta(|meta| { + /// if meta.path.is_ident("kind") { + /// let expr: Expr = meta.value()?.parse()?; + /// match expr { + /// Expr::Lit(expr) => /* ... */ + /// # unimplemented!(), + /// Expr::Path(expr) => /* ... */ + /// # unimplemented!(), + /// Expr::Macro(expr) => /* ... */ + /// # unimplemented!(), + /// _ => Err(meta.error("tea kind must be a string literal, path, or macro")), + /// } + /// } else /* as above */ + /// # { unimplemented!() } + /// + /// })?; + /// # Ok(()) + /// # } + /// ``` + /// + /// ```console + /// error: tea kind must be a string literal, path, or macro + /// --> src/main.rs:3:7 + /// | + /// 3 | #[tea(kind = async { replicator.await })] + /// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + /// ``` + /// + /// Often you may want to use `syn::Error::new_spanned` even in this + /// situation. In the above code, that would be: + /// + /// ``` + /// # use syn::{Error, Expr}; + /// # + /// # fn example(expr: Expr) -> syn::Result<()> { + /// match expr { + /// Expr::Lit(expr) => /* ... */ + /// # unimplemented!(), + /// Expr::Path(expr) => /* ... */ + /// # unimplemented!(), + /// Expr::Macro(expr) => /* ... */ + /// # unimplemented!(), + /// _ => Err(Error::new_spanned(expr, "unsupported expression type for `kind`")), + /// } + /// # } + /// ``` + /// + /// ```console + /// error: unsupported expression type for `kind` + /// --> src/main.rs:3:14 + /// | + /// 3 | #[tea(kind = async { replicator.await })] + /// | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + /// ``` + pub fn error(&self, msg: impl Display) -> Error { + let start_span = self.path.segments[0].ident.span(); + let end_span = self.input.cursor().prev_span(); + crate::error::new2(start_span, end_span, msg) + } +} + +pub(crate) fn parse_nested_meta( + input: ParseStream, + mut logic: impl FnMut(ParseNestedMeta) -> Result<()>, +) -> Result<()> { + loop { + let path = input.call(parse_meta_path)?; + logic(ParseNestedMeta { path, input })?; + if input.is_empty() { + return Ok(()); + } + input.parse::<Token![,]>()?; + if input.is_empty() { + return Ok(()); + } + } +} + +// Like Path::parse_mod_style, but accepts keywords in the path. +fn parse_meta_path(input: ParseStream) -> Result<Path> { + Ok(Path { + leading_colon: input.parse()?, + segments: { + let mut segments = Punctuated::new(); + if input.peek(Ident::peek_any) { + let ident = Ident::parse_any(input)?; + segments.push_value(PathSegment::from(ident)); + } else if input.is_empty() { + return Err(input.error("expected nested attribute")); + } else if input.peek(Lit) { + return Err(input.error("unexpected literal in nested attribute, expected ident")); + } else { + return Err(input.error("unexpected token in nested attribute, expected ident")); + } + while input.peek(Token![::]) { + let punct = input.parse()?; + segments.push_punct(punct); + let ident = Ident::parse_any(input)?; + segments.push_value(PathSegment::from(ident)); + } + segments + }, + }) +} diff --git a/vendor/syn/src/op.rs b/vendor/syn/src/op.rs new file mode 100644 index 00000000000000..575d9faa1273ad --- /dev/null +++ b/vendor/syn/src/op.rs @@ -0,0 +1,219 @@ +ast_enum! { + /// A binary operator: `+`, `+=`, `&`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + #[non_exhaustive] + pub enum BinOp { + /// The `+` operator (addition) + Add(Token![+]), + /// The `-` operator (subtraction) + Sub(Token![-]), + /// The `*` operator (multiplication) + Mul(Token![*]), + /// The `/` operator (division) + Div(Token![/]), + /// The `%` operator (modulus) + Rem(Token![%]), + /// The `&&` operator (logical and) + And(Token![&&]), + /// The `||` operator (logical or) + Or(Token![||]), + /// The `^` operator (bitwise xor) + BitXor(Token![^]), + /// The `&` operator (bitwise and) + BitAnd(Token![&]), + /// The `|` operator (bitwise or) + BitOr(Token![|]), + /// The `<<` operator (shift left) + Shl(Token![<<]), + /// The `>>` operator (shift right) + Shr(Token![>>]), + /// The `==` operator (equality) + Eq(Token![==]), + /// The `<` operator (less than) + Lt(Token![<]), + /// The `<=` operator (less than or equal to) + Le(Token![<=]), + /// The `!=` operator (not equal to) + Ne(Token![!=]), + /// The `>=` operator (greater than or equal to) + Ge(Token![>=]), + /// The `>` operator (greater than) + Gt(Token![>]), + /// The `+=` operator + AddAssign(Token![+=]), + /// The `-=` operator + SubAssign(Token![-=]), + /// The `*=` operator + MulAssign(Token![*=]), + /// The `/=` operator + DivAssign(Token![/=]), + /// The `%=` operator + RemAssign(Token![%=]), + /// The `^=` operator + BitXorAssign(Token![^=]), + /// The `&=` operator + BitAndAssign(Token![&=]), + /// The `|=` operator + BitOrAssign(Token![|=]), + /// The `<<=` operator + ShlAssign(Token![<<=]), + /// The `>>=` operator + ShrAssign(Token![>>=]), + } +} + +ast_enum! { + /// A unary operator: `*`, `!`, `-`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + #[non_exhaustive] + pub enum UnOp { + /// The `*` operator for dereferencing + Deref(Token![*]), + /// The `!` operator for logical inversion + Not(Token![!]), + /// The `-` operator for negation + Neg(Token![-]), + } +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::error::Result; + use crate::op::{BinOp, UnOp}; + use crate::parse::{Parse, ParseStream}; + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for BinOp { + fn parse(input: ParseStream) -> Result<Self> { + if input.peek(Token![+=]) { + input.parse().map(BinOp::AddAssign) + } else if input.peek(Token![-=]) { + input.parse().map(BinOp::SubAssign) + } else if input.peek(Token![*=]) { + input.parse().map(BinOp::MulAssign) + } else if input.peek(Token![/=]) { + input.parse().map(BinOp::DivAssign) + } else if input.peek(Token![%=]) { + input.parse().map(BinOp::RemAssign) + } else if input.peek(Token![^=]) { + input.parse().map(BinOp::BitXorAssign) + } else if input.peek(Token![&=]) { + input.parse().map(BinOp::BitAndAssign) + } else if input.peek(Token![|=]) { + input.parse().map(BinOp::BitOrAssign) + } else if input.peek(Token![<<=]) { + input.parse().map(BinOp::ShlAssign) + } else if input.peek(Token![>>=]) { + input.parse().map(BinOp::ShrAssign) + } else if input.peek(Token![&&]) { + input.parse().map(BinOp::And) + } else if input.peek(Token![||]) { + input.parse().map(BinOp::Or) + } else if input.peek(Token![<<]) { + input.parse().map(BinOp::Shl) + } else if input.peek(Token![>>]) { + input.parse().map(BinOp::Shr) + } else if input.peek(Token![==]) { + input.parse().map(BinOp::Eq) + } else if input.peek(Token![<=]) { + input.parse().map(BinOp::Le) + } else if input.peek(Token![!=]) { + input.parse().map(BinOp::Ne) + } else if input.peek(Token![>=]) { + input.parse().map(BinOp::Ge) + } else if input.peek(Token![+]) { + input.parse().map(BinOp::Add) + } else if input.peek(Token![-]) { + input.parse().map(BinOp::Sub) + } else if input.peek(Token![*]) { + input.parse().map(BinOp::Mul) + } else if input.peek(Token![/]) { + input.parse().map(BinOp::Div) + } else if input.peek(Token![%]) { + input.parse().map(BinOp::Rem) + } else if input.peek(Token![^]) { + input.parse().map(BinOp::BitXor) + } else if input.peek(Token![&]) { + input.parse().map(BinOp::BitAnd) + } else if input.peek(Token![|]) { + input.parse().map(BinOp::BitOr) + } else if input.peek(Token![<]) { + input.parse().map(BinOp::Lt) + } else if input.peek(Token![>]) { + input.parse().map(BinOp::Gt) + } else { + Err(input.error("expected binary operator")) + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for UnOp { + fn parse(input: ParseStream) -> Result<Self> { + let lookahead = input.lookahead1(); + if lookahead.peek(Token![*]) { + input.parse().map(UnOp::Deref) + } else if lookahead.peek(Token![!]) { + input.parse().map(UnOp::Not) + } else if lookahead.peek(Token![-]) { + input.parse().map(UnOp::Neg) + } else { + Err(lookahead.error()) + } + } + } +} + +#[cfg(feature = "printing")] +mod printing { + use crate::op::{BinOp, UnOp}; + use proc_macro2::TokenStream; + use quote::ToTokens; + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for BinOp { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + BinOp::Add(t) => t.to_tokens(tokens), + BinOp::Sub(t) => t.to_tokens(tokens), + BinOp::Mul(t) => t.to_tokens(tokens), + BinOp::Div(t) => t.to_tokens(tokens), + BinOp::Rem(t) => t.to_tokens(tokens), + BinOp::And(t) => t.to_tokens(tokens), + BinOp::Or(t) => t.to_tokens(tokens), + BinOp::BitXor(t) => t.to_tokens(tokens), + BinOp::BitAnd(t) => t.to_tokens(tokens), + BinOp::BitOr(t) => t.to_tokens(tokens), + BinOp::Shl(t) => t.to_tokens(tokens), + BinOp::Shr(t) => t.to_tokens(tokens), + BinOp::Eq(t) => t.to_tokens(tokens), + BinOp::Lt(t) => t.to_tokens(tokens), + BinOp::Le(t) => t.to_tokens(tokens), + BinOp::Ne(t) => t.to_tokens(tokens), + BinOp::Ge(t) => t.to_tokens(tokens), + BinOp::Gt(t) => t.to_tokens(tokens), + BinOp::AddAssign(t) => t.to_tokens(tokens), + BinOp::SubAssign(t) => t.to_tokens(tokens), + BinOp::MulAssign(t) => t.to_tokens(tokens), + BinOp::DivAssign(t) => t.to_tokens(tokens), + BinOp::RemAssign(t) => t.to_tokens(tokens), + BinOp::BitXorAssign(t) => t.to_tokens(tokens), + BinOp::BitAndAssign(t) => t.to_tokens(tokens), + BinOp::BitOrAssign(t) => t.to_tokens(tokens), + BinOp::ShlAssign(t) => t.to_tokens(tokens), + BinOp::ShrAssign(t) => t.to_tokens(tokens), + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for UnOp { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + UnOp::Deref(t) => t.to_tokens(tokens), + UnOp::Not(t) => t.to_tokens(tokens), + UnOp::Neg(t) => t.to_tokens(tokens), + } + } + } +} diff --git a/vendor/syn/src/parse.rs b/vendor/syn/src/parse.rs new file mode 100644 index 00000000000000..57531005ac4eb2 --- /dev/null +++ b/vendor/syn/src/parse.rs @@ -0,0 +1,1419 @@ +//! Parsing interface for parsing a token stream into a syntax tree node. +//! +//! Parsing in Syn is built on parser functions that take in a [`ParseStream`] +//! and produce a [`Result<T>`] where `T` is some syntax tree node. Underlying +//! these parser functions is a lower level mechanism built around the +//! [`Cursor`] type. `Cursor` is a cheaply copyable cursor over a range of +//! tokens in a token stream. +//! +//! [`Result<T>`]: Result +//! [`Cursor`]: crate::buffer::Cursor +//! +//! # Example +//! +//! Here is a snippet of parsing code to get a feel for the style of the +//! library. We define data structures for a subset of Rust syntax including +//! enums (not shown) and structs, then provide implementations of the [`Parse`] +//! trait to parse these syntax tree data structures from a token stream. +//! +//! Once `Parse` impls have been defined, they can be called conveniently from a +//! procedural macro through [`parse_macro_input!`] as shown at the bottom of +//! the snippet. If the caller provides syntactically invalid input to the +//! procedural macro, they will receive a helpful compiler error message +//! pointing out the exact token that triggered the failure to parse. +//! +//! [`parse_macro_input!`]: crate::parse_macro_input! +//! +//! ``` +//! # extern crate proc_macro; +//! # +//! use proc_macro::TokenStream; +//! use syn::{braced, parse_macro_input, token, Field, Ident, Result, Token}; +//! use syn::parse::{Parse, ParseStream}; +//! use syn::punctuated::Punctuated; +//! +//! enum Item { +//! Struct(ItemStruct), +//! Enum(ItemEnum), +//! } +//! +//! struct ItemStruct { +//! struct_token: Token![struct], +//! ident: Ident, +//! brace_token: token::Brace, +//! fields: Punctuated<Field, Token![,]>, +//! } +//! # +//! # enum ItemEnum {} +//! +//! impl Parse for Item { +//! fn parse(input: ParseStream) -> Result<Self> { +//! let lookahead = input.lookahead1(); +//! if lookahead.peek(Token![struct]) { +//! input.parse().map(Item::Struct) +//! } else if lookahead.peek(Token![enum]) { +//! input.parse().map(Item::Enum) +//! } else { +//! Err(lookahead.error()) +//! } +//! } +//! } +//! +//! impl Parse for ItemStruct { +//! fn parse(input: ParseStream) -> Result<Self> { +//! let content; +//! Ok(ItemStruct { +//! struct_token: input.parse()?, +//! ident: input.parse()?, +//! brace_token: braced!(content in input), +//! fields: content.parse_terminated(Field::parse_named, Token![,])?, +//! }) +//! } +//! } +//! # +//! # impl Parse for ItemEnum { +//! # fn parse(input: ParseStream) -> Result<Self> { +//! # unimplemented!() +//! # } +//! # } +//! +//! # const IGNORE: &str = stringify! { +//! #[proc_macro] +//! # }; +//! pub fn my_macro(tokens: TokenStream) -> TokenStream { +//! let input = parse_macro_input!(tokens as Item); +//! +//! /* ... */ +//! # TokenStream::new() +//! } +//! ``` +//! +//! # The `syn::parse*` functions +//! +//! The [`syn::parse`], [`syn::parse2`], and [`syn::parse_str`] functions serve +//! as an entry point for parsing syntax tree nodes that can be parsed in an +//! obvious default way. These functions can return any syntax tree node that +//! implements the [`Parse`] trait, which includes most types in Syn. +//! +//! [`syn::parse`]: crate::parse() +//! [`syn::parse2`]: crate::parse2() +//! [`syn::parse_str`]: crate::parse_str() +//! +//! ``` +//! use syn::Type; +//! +//! # fn run_parser() -> syn::Result<()> { +//! let t: Type = syn::parse_str("std::collections::HashMap<String, Value>")?; +//! # Ok(()) +//! # } +//! # +//! # run_parser().unwrap(); +//! ``` +//! +//! The [`parse_quote!`] macro also uses this approach. +//! +//! [`parse_quote!`]: crate::parse_quote! +//! +//! # The `Parser` trait +//! +//! Some types can be parsed in several ways depending on context. For example +//! an [`Attribute`] can be either "outer" like `#[...]` or "inner" like +//! `#![...]` and parsing the wrong one would be a bug. Similarly [`Punctuated`] +//! may or may not allow trailing punctuation, and parsing it the wrong way +//! would either reject valid input or accept invalid input. +//! +//! [`Attribute`]: crate::Attribute +//! [`Punctuated`]: crate::punctuated +//! +//! The `Parse` trait is not implemented in these cases because there is no good +//! behavior to consider the default. +//! +//! ```compile_fail +//! # extern crate proc_macro; +//! # +//! # use syn::punctuated::Punctuated; +//! # use syn::{PathSegment, Result, Token}; +//! # +//! # fn f(tokens: proc_macro::TokenStream) -> Result<()> { +//! # +//! // Can't parse `Punctuated` without knowing whether trailing punctuation +//! // should be allowed in this context. +//! let path: Punctuated<PathSegment, Token![::]> = syn::parse(tokens)?; +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! In these cases the types provide a choice of parser functions rather than a +//! single `Parse` implementation, and those parser functions can be invoked +//! through the [`Parser`] trait. +//! +//! +//! ``` +//! # extern crate proc_macro; +//! # +//! use proc_macro::TokenStream; +//! use syn::parse::Parser; +//! use syn::punctuated::Punctuated; +//! use syn::{Attribute, Expr, PathSegment, Result, Token}; +//! +//! fn call_some_parser_methods(input: TokenStream) -> Result<()> { +//! // Parse a nonempty sequence of path segments separated by `::` punctuation +//! // with no trailing punctuation. +//! let tokens = input.clone(); +//! let parser = Punctuated::<PathSegment, Token![::]>::parse_separated_nonempty; +//! let _path = parser.parse(tokens)?; +//! +//! // Parse a possibly empty sequence of expressions terminated by commas with +//! // an optional trailing punctuation. +//! let tokens = input.clone(); +//! let parser = Punctuated::<Expr, Token![,]>::parse_terminated; +//! let _args = parser.parse(tokens)?; +//! +//! // Parse zero or more outer attributes but not inner attributes. +//! let tokens = input.clone(); +//! let parser = Attribute::parse_outer; +//! let _attrs = parser.parse(tokens)?; +//! +//! Ok(()) +//! } +//! ``` + +#[path = "discouraged.rs"] +pub mod discouraged; + +use crate::buffer::{Cursor, TokenBuffer}; +use crate::error; +use crate::lookahead; +use crate::punctuated::Punctuated; +use crate::token::Token; +use proc_macro2::{Delimiter, Group, Literal, Punct, Span, TokenStream, TokenTree}; +#[cfg(feature = "printing")] +use quote::ToTokens; +use std::cell::Cell; +use std::fmt::{self, Debug, Display}; +#[cfg(feature = "extra-traits")] +use std::hash::{Hash, Hasher}; +use std::marker::PhantomData; +use std::mem; +use std::ops::Deref; +use std::panic::{RefUnwindSafe, UnwindSafe}; +use std::rc::Rc; +use std::str::FromStr; + +pub use crate::error::{Error, Result}; +pub use crate::lookahead::{End, Lookahead1, Peek}; + +/// Parsing interface implemented by all types that can be parsed in a default +/// way from a token stream. +/// +/// Refer to the [module documentation] for details about implementing and using +/// the `Parse` trait. +/// +/// [module documentation]: self +pub trait Parse: Sized { + fn parse(input: ParseStream) -> Result<Self>; +} + +/// Input to a Syn parser function. +/// +/// See the methods of this type under the documentation of [`ParseBuffer`]. For +/// an overview of parsing in Syn, refer to the [module documentation]. +/// +/// [module documentation]: self +pub type ParseStream<'a> = &'a ParseBuffer<'a>; + +/// Cursor position within a buffered token stream. +/// +/// This type is more commonly used through the type alias [`ParseStream`] which +/// is an alias for `&ParseBuffer`. +/// +/// `ParseStream` is the input type for all parser functions in Syn. They have +/// the signature `fn(ParseStream) -> Result<T>`. +/// +/// ## Calling a parser function +/// +/// There is no public way to construct a `ParseBuffer`. Instead, if you are +/// looking to invoke a parser function that requires `ParseStream` as input, +/// you will need to go through one of the public parsing entry points. +/// +/// - The [`parse_macro_input!`] macro if parsing input of a procedural macro; +/// - One of [the `syn::parse*` functions][syn-parse]; or +/// - A method of the [`Parser`] trait. +/// +/// [`parse_macro_input!`]: crate::parse_macro_input! +/// [syn-parse]: self#the-synparse-functions +pub struct ParseBuffer<'a> { + scope: Span, + // Instead of Cell<Cursor<'a>> so that ParseBuffer<'a> is covariant in 'a. + // The rest of the code in this module needs to be careful that only a + // cursor derived from this `cell` is ever assigned to this `cell`. + // + // Cell<Cursor<'a>> cannot be covariant in 'a because then we could take a + // ParseBuffer<'a>, upcast to ParseBuffer<'short> for some lifetime shorter + // than 'a, and then assign a Cursor<'short> into the Cell. + // + // By extension, it would not be safe to expose an API that accepts a + // Cursor<'a> and trusts that it lives as long as the cursor currently in + // the cell. + cell: Cell<Cursor<'static>>, + marker: PhantomData<Cursor<'a>>, + unexpected: Cell<Option<Rc<Cell<Unexpected>>>>, +} + +impl<'a> Drop for ParseBuffer<'a> { + fn drop(&mut self) { + if let Some((unexpected_span, delimiter)) = span_of_unexpected_ignoring_nones(self.cursor()) + { + let (inner, old_span) = inner_unexpected(self); + if old_span.is_none() { + inner.set(Unexpected::Some(unexpected_span, delimiter)); + } + } + } +} + +impl<'a> Display for ParseBuffer<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Display::fmt(&self.cursor().token_stream(), f) + } +} + +impl<'a> Debug for ParseBuffer<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(&self.cursor().token_stream(), f) + } +} + +impl<'a> UnwindSafe for ParseBuffer<'a> {} +impl<'a> RefUnwindSafe for ParseBuffer<'a> {} + +/// Cursor state associated with speculative parsing. +/// +/// This type is the input of the closure provided to [`ParseStream::step`]. +/// +/// [`ParseStream::step`]: ParseBuffer::step +/// +/// # Example +/// +/// ``` +/// use proc_macro2::TokenTree; +/// use syn::Result; +/// use syn::parse::ParseStream; +/// +/// // This function advances the stream past the next occurrence of `@`. If +/// // no `@` is present in the stream, the stream position is unchanged and +/// // an error is returned. +/// fn skip_past_next_at(input: ParseStream) -> Result<()> { +/// input.step(|cursor| { +/// let mut rest = *cursor; +/// while let Some((tt, next)) = rest.token_tree() { +/// match &tt { +/// TokenTree::Punct(punct) if punct.as_char() == '@' => { +/// return Ok(((), next)); +/// } +/// _ => rest = next, +/// } +/// } +/// Err(cursor.error("no `@` was found after this point")) +/// }) +/// } +/// # +/// # fn remainder_after_skipping_past_next_at( +/// # input: ParseStream, +/// # ) -> Result<proc_macro2::TokenStream> { +/// # skip_past_next_at(input)?; +/// # input.parse() +/// # } +/// # +/// # use syn::parse::Parser; +/// # let remainder = remainder_after_skipping_past_next_at +/// # .parse_str("a @ b c") +/// # .unwrap(); +/// # assert_eq!(remainder.to_string(), "b c"); +/// ``` +pub struct StepCursor<'c, 'a> { + scope: Span, + // This field is covariant in 'c. + cursor: Cursor<'c>, + // This field is contravariant in 'c. Together these make StepCursor + // invariant in 'c. Also covariant in 'a. The user cannot cast 'c to a + // different lifetime but can upcast into a StepCursor with a shorter + // lifetime 'a. + // + // As long as we only ever construct a StepCursor for which 'c outlives 'a, + // this means if ever a StepCursor<'c, 'a> exists we are guaranteed that 'c + // outlives 'a. + marker: PhantomData<fn(Cursor<'c>) -> Cursor<'a>>, +} + +impl<'c, 'a> Deref for StepCursor<'c, 'a> { + type Target = Cursor<'c>; + + fn deref(&self) -> &Self::Target { + &self.cursor + } +} + +impl<'c, 'a> Copy for StepCursor<'c, 'a> {} + +impl<'c, 'a> Clone for StepCursor<'c, 'a> { + fn clone(&self) -> Self { + *self + } +} + +impl<'c, 'a> StepCursor<'c, 'a> { + /// Triggers an error at the current position of the parse stream. + /// + /// The `ParseStream::step` invocation will return this same error without + /// advancing the stream state. + pub fn error<T: Display>(self, message: T) -> Error { + error::new_at(self.scope, self.cursor, message) + } +} + +pub(crate) fn advance_step_cursor<'c, 'a>(proof: StepCursor<'c, 'a>, to: Cursor<'c>) -> Cursor<'a> { + // Refer to the comments within the StepCursor definition. We use the + // fact that a StepCursor<'c, 'a> exists as proof that 'c outlives 'a. + // Cursor is covariant in its lifetime parameter so we can cast a + // Cursor<'c> to one with the shorter lifetime Cursor<'a>. + let _ = proof; + unsafe { mem::transmute::<Cursor<'c>, Cursor<'a>>(to) } +} + +pub(crate) fn new_parse_buffer( + scope: Span, + cursor: Cursor, + unexpected: Rc<Cell<Unexpected>>, +) -> ParseBuffer { + ParseBuffer { + scope, + // See comment on `cell` in the struct definition. + cell: Cell::new(unsafe { mem::transmute::<Cursor, Cursor<'static>>(cursor) }), + marker: PhantomData, + unexpected: Cell::new(Some(unexpected)), + } +} + +pub(crate) enum Unexpected { + None, + Some(Span, Delimiter), + Chain(Rc<Cell<Unexpected>>), +} + +impl Default for Unexpected { + fn default() -> Self { + Unexpected::None + } +} + +impl Clone for Unexpected { + fn clone(&self) -> Self { + match self { + Unexpected::None => Unexpected::None, + Unexpected::Some(span, delimiter) => Unexpected::Some(*span, *delimiter), + Unexpected::Chain(next) => Unexpected::Chain(next.clone()), + } + } +} + +// We call this on Cell<Unexpected> and Cell<Option<T>> where temporarily +// swapping in a None is cheap. +fn cell_clone<T: Default + Clone>(cell: &Cell<T>) -> T { + let prev = cell.take(); + let ret = prev.clone(); + cell.set(prev); + ret +} + +fn inner_unexpected(buffer: &ParseBuffer) -> (Rc<Cell<Unexpected>>, Option<(Span, Delimiter)>) { + let mut unexpected = get_unexpected(buffer); + loop { + match cell_clone(&unexpected) { + Unexpected::None => return (unexpected, None), + Unexpected::Some(span, delimiter) => return (unexpected, Some((span, delimiter))), + Unexpected::Chain(next) => unexpected = next, + } + } +} + +pub(crate) fn get_unexpected(buffer: &ParseBuffer) -> Rc<Cell<Unexpected>> { + cell_clone(&buffer.unexpected).unwrap() +} + +fn span_of_unexpected_ignoring_nones(mut cursor: Cursor) -> Option<(Span, Delimiter)> { + if cursor.eof() { + return None; + } + while let Some((inner, _span, rest)) = cursor.group(Delimiter::None) { + if let Some(unexpected) = span_of_unexpected_ignoring_nones(inner) { + return Some(unexpected); + } + cursor = rest; + } + if cursor.eof() { + None + } else { + Some((cursor.span(), cursor.scope_delimiter())) + } +} + +impl<'a> ParseBuffer<'a> { + /// Parses a syntax tree node of type `T`, advancing the position of our + /// parse stream past it. + pub fn parse<T: Parse>(&self) -> Result<T> { + T::parse(self) + } + + /// Calls the given parser function to parse a syntax tree node of type `T` + /// from this stream. + /// + /// # Example + /// + /// The parser below invokes [`Attribute::parse_outer`] to parse a vector of + /// zero or more outer attributes. + /// + /// [`Attribute::parse_outer`]: crate::Attribute::parse_outer + /// + /// ``` + /// use syn::{Attribute, Ident, Result, Token}; + /// use syn::parse::{Parse, ParseStream}; + /// + /// // Parses a unit struct with attributes. + /// // + /// // #[path = "s.tmpl"] + /// // struct S; + /// struct UnitStruct { + /// attrs: Vec<Attribute>, + /// struct_token: Token![struct], + /// name: Ident, + /// semi_token: Token![;], + /// } + /// + /// impl Parse for UnitStruct { + /// fn parse(input: ParseStream) -> Result<Self> { + /// Ok(UnitStruct { + /// attrs: input.call(Attribute::parse_outer)?, + /// struct_token: input.parse()?, + /// name: input.parse()?, + /// semi_token: input.parse()?, + /// }) + /// } + /// } + /// ``` + pub fn call<T>(&'a self, function: fn(ParseStream<'a>) -> Result<T>) -> Result<T> { + function(self) + } + + /// Looks at the next token in the parse stream to determine whether it + /// matches the requested type of token. + /// + /// Does not advance the position of the parse stream. + /// + /// # Syntax + /// + /// Note that this method does not use turbofish syntax. Pass the peek type + /// inside of parentheses. + /// + /// - `input.peek(Token![struct])` + /// - `input.peek(Token![==])` + /// - `input.peek(syn::Ident)` *(does not accept keywords)* + /// - `input.peek(syn::Ident::peek_any)` + /// - `input.peek(Lifetime)` + /// - `input.peek(token::Brace)` + /// + /// # Example + /// + /// In this example we finish parsing the list of supertraits when the next + /// token in the input is either `where` or an opening curly brace. + /// + /// ``` + /// use syn::{braced, token, Generics, Ident, Result, Token, TypeParamBound}; + /// use syn::parse::{Parse, ParseStream}; + /// use syn::punctuated::Punctuated; + /// + /// // Parses a trait definition containing no associated items. + /// // + /// // trait Marker<'de, T>: A + B<'de> where Box<T>: Clone {} + /// struct MarkerTrait { + /// trait_token: Token![trait], + /// ident: Ident, + /// generics: Generics, + /// colon_token: Option<Token![:]>, + /// supertraits: Punctuated<TypeParamBound, Token![+]>, + /// brace_token: token::Brace, + /// } + /// + /// impl Parse for MarkerTrait { + /// fn parse(input: ParseStream) -> Result<Self> { + /// let trait_token: Token![trait] = input.parse()?; + /// let ident: Ident = input.parse()?; + /// let mut generics: Generics = input.parse()?; + /// let colon_token: Option<Token![:]> = input.parse()?; + /// + /// let mut supertraits = Punctuated::new(); + /// if colon_token.is_some() { + /// loop { + /// supertraits.push_value(input.parse()?); + /// if input.peek(Token![where]) || input.peek(token::Brace) { + /// break; + /// } + /// supertraits.push_punct(input.parse()?); + /// } + /// } + /// + /// generics.where_clause = input.parse()?; + /// let content; + /// let empty_brace_token = braced!(content in input); + /// + /// Ok(MarkerTrait { + /// trait_token, + /// ident, + /// generics, + /// colon_token, + /// supertraits, + /// brace_token: empty_brace_token, + /// }) + /// } + /// } + /// ``` + pub fn peek<T: Peek>(&self, token: T) -> bool { + let _ = token; + T::Token::peek(self.cursor()) + } + + /// Looks at the second-next token in the parse stream. + /// + /// This is commonly useful as a way to implement contextual keywords. + /// + /// # Example + /// + /// This example needs to use `peek2` because the symbol `union` is not a + /// keyword in Rust. We can't use just `peek` and decide to parse a union if + /// the very next token is `union`, because someone is free to write a `mod + /// union` and a macro invocation that looks like `union::some_macro! { ... + /// }`. In other words `union` is a contextual keyword. + /// + /// ``` + /// use syn::{Ident, ItemUnion, Macro, Result, Token}; + /// use syn::parse::{Parse, ParseStream}; + /// + /// // Parses either a union or a macro invocation. + /// enum UnionOrMacro { + /// // union MaybeUninit<T> { uninit: (), value: T } + /// Union(ItemUnion), + /// // lazy_static! { ... } + /// Macro(Macro), + /// } + /// + /// impl Parse for UnionOrMacro { + /// fn parse(input: ParseStream) -> Result<Self> { + /// if input.peek(Token![union]) && input.peek2(Ident) { + /// input.parse().map(UnionOrMacro::Union) + /// } else { + /// input.parse().map(UnionOrMacro::Macro) + /// } + /// } + /// } + /// ``` + pub fn peek2<T: Peek>(&self, token: T) -> bool { + fn peek2(buffer: &ParseBuffer, peek: fn(Cursor) -> bool) -> bool { + buffer.cursor().skip().map_or(false, peek) + } + + let _ = token; + peek2(self, T::Token::peek) + } + + /// Looks at the third-next token in the parse stream. + pub fn peek3<T: Peek>(&self, token: T) -> bool { + fn peek3(buffer: &ParseBuffer, peek: fn(Cursor) -> bool) -> bool { + buffer + .cursor() + .skip() + .and_then(Cursor::skip) + .map_or(false, peek) + } + + let _ = token; + peek3(self, T::Token::peek) + } + + /// Parses zero or more occurrences of `T` separated by punctuation of type + /// `P`, with optional trailing punctuation. + /// + /// Parsing continues until the end of this parse stream. The entire content + /// of this parse stream must consist of `T` and `P`. + /// + /// # Example + /// + /// ``` + /// # use quote::quote; + /// # + /// use syn::{parenthesized, token, Ident, Result, Token, Type}; + /// use syn::parse::{Parse, ParseStream}; + /// use syn::punctuated::Punctuated; + /// + /// // Parse a simplified tuple struct syntax like: + /// // + /// // struct S(A, B); + /// struct TupleStruct { + /// struct_token: Token![struct], + /// ident: Ident, + /// paren_token: token::Paren, + /// fields: Punctuated<Type, Token![,]>, + /// semi_token: Token![;], + /// } + /// + /// impl Parse for TupleStruct { + /// fn parse(input: ParseStream) -> Result<Self> { + /// let content; + /// Ok(TupleStruct { + /// struct_token: input.parse()?, + /// ident: input.parse()?, + /// paren_token: parenthesized!(content in input), + /// fields: content.parse_terminated(Type::parse, Token![,])?, + /// semi_token: input.parse()?, + /// }) + /// } + /// } + /// # + /// # let input = quote! { + /// # struct S(A, B); + /// # }; + /// # syn::parse2::<TupleStruct>(input).unwrap(); + /// ``` + /// + /// # See also + /// + /// If your separator is anything more complicated than an invocation of the + /// `Token!` macro, this method won't be applicable and you can instead + /// directly use `Punctuated`'s parser functions: [`parse_terminated`], + /// [`parse_separated_nonempty`] etc. + /// + /// [`parse_terminated`]: Punctuated::parse_terminated + /// [`parse_separated_nonempty`]: Punctuated::parse_separated_nonempty + /// + /// ``` + /// use syn::{custom_keyword, Expr, Result, Token}; + /// use syn::parse::{Parse, ParseStream}; + /// use syn::punctuated::Punctuated; + /// + /// mod kw { + /// syn::custom_keyword!(fin); + /// } + /// + /// struct Fin(kw::fin, Token![;]); + /// + /// impl Parse for Fin { + /// fn parse(input: ParseStream) -> Result<Self> { + /// Ok(Self(input.parse()?, input.parse()?)) + /// } + /// } + /// + /// struct Thing { + /// steps: Punctuated<Expr, Fin>, + /// } + /// + /// impl Parse for Thing { + /// fn parse(input: ParseStream) -> Result<Self> { + /// # if true { + /// Ok(Thing { + /// steps: Punctuated::parse_terminated(input)?, + /// }) + /// # } else { + /// // or equivalently, this means the same thing: + /// # Ok(Thing { + /// steps: input.call(Punctuated::parse_terminated)?, + /// # }) + /// # } + /// } + /// } + /// ``` + pub fn parse_terminated<T, P>( + &'a self, + parser: fn(ParseStream<'a>) -> Result<T>, + separator: P, + ) -> Result<Punctuated<T, P::Token>> + where + P: Peek, + P::Token: Parse, + { + let _ = separator; + Punctuated::parse_terminated_with(self, parser) + } + + /// Returns whether there are no more tokens remaining to be parsed from + /// this stream. + /// + /// This method returns true upon reaching the end of the content within a + /// set of delimiters, as well as at the end of the tokens provided to the + /// outermost parsing entry point. + /// + /// This is equivalent to + /// <code>.<a href="#method.peek">peek</a>(<a href="struct.End.html">syn::parse::End</a>)</code>. + /// Use `.peek2(End)` or `.peek3(End)` to look for the end of a parse stream + /// further ahead than the current position. + /// + /// # Example + /// + /// ``` + /// use syn::{braced, token, Ident, Item, Result, Token}; + /// use syn::parse::{Parse, ParseStream}; + /// + /// // Parses a Rust `mod m { ... }` containing zero or more items. + /// struct Mod { + /// mod_token: Token![mod], + /// name: Ident, + /// brace_token: token::Brace, + /// items: Vec<Item>, + /// } + /// + /// impl Parse for Mod { + /// fn parse(input: ParseStream) -> Result<Self> { + /// let content; + /// Ok(Mod { + /// mod_token: input.parse()?, + /// name: input.parse()?, + /// brace_token: braced!(content in input), + /// items: { + /// let mut items = Vec::new(); + /// while !content.is_empty() { + /// items.push(content.parse()?); + /// } + /// items + /// }, + /// }) + /// } + /// } + /// ``` + pub fn is_empty(&self) -> bool { + self.cursor().eof() + } + + /// Constructs a helper for peeking at the next token in this stream and + /// building an error message if it is not one of a set of expected tokens. + /// + /// # Example + /// + /// ``` + /// use syn::{ConstParam, Ident, Lifetime, LifetimeParam, Result, Token, TypeParam}; + /// use syn::parse::{Parse, ParseStream}; + /// + /// // A generic parameter, a single one of the comma-separated elements inside + /// // angle brackets in: + /// // + /// // fn f<T: Clone, 'a, 'b: 'a, const N: usize>() { ... } + /// // + /// // On invalid input, lookahead gives us a reasonable error message. + /// // + /// // error: expected one of: identifier, lifetime, `const` + /// // | + /// // 5 | fn f<!Sized>() {} + /// // | ^ + /// enum GenericParam { + /// Type(TypeParam), + /// Lifetime(LifetimeParam), + /// Const(ConstParam), + /// } + /// + /// impl Parse for GenericParam { + /// fn parse(input: ParseStream) -> Result<Self> { + /// let lookahead = input.lookahead1(); + /// if lookahead.peek(Ident) { + /// input.parse().map(GenericParam::Type) + /// } else if lookahead.peek(Lifetime) { + /// input.parse().map(GenericParam::Lifetime) + /// } else if lookahead.peek(Token![const]) { + /// input.parse().map(GenericParam::Const) + /// } else { + /// Err(lookahead.error()) + /// } + /// } + /// } + /// ``` + pub fn lookahead1(&self) -> Lookahead1<'a> { + lookahead::new(self.scope, self.cursor()) + } + + /// Forks a parse stream so that parsing tokens out of either the original + /// or the fork does not advance the position of the other. + /// + /// # Performance + /// + /// Forking a parse stream is a cheap fixed amount of work and does not + /// involve copying token buffers. Where you might hit performance problems + /// is if your macro ends up parsing a large amount of content more than + /// once. + /// + /// ``` + /// # use syn::{Expr, Result}; + /// # use syn::parse::ParseStream; + /// # + /// # fn bad(input: ParseStream) -> Result<Expr> { + /// // Do not do this. + /// if input.fork().parse::<Expr>().is_ok() { + /// return input.parse::<Expr>(); + /// } + /// # unimplemented!() + /// # } + /// ``` + /// + /// As a rule, avoid parsing an unbounded amount of tokens out of a forked + /// parse stream. Only use a fork when the amount of work performed against + /// the fork is small and bounded. + /// + /// When complex speculative parsing against the forked stream is + /// unavoidable, use [`parse::discouraged::Speculative`] to advance the + /// original stream once the fork's parse is determined to have been + /// successful. + /// + /// For a lower level way to perform speculative parsing at the token level, + /// consider using [`ParseStream::step`] instead. + /// + /// [`parse::discouraged::Speculative`]: discouraged::Speculative + /// [`ParseStream::step`]: ParseBuffer::step + /// + /// # Example + /// + /// The parse implementation shown here parses possibly restricted `pub` + /// visibilities. + /// + /// - `pub` + /// - `pub(crate)` + /// - `pub(self)` + /// - `pub(super)` + /// - `pub(in some::path)` + /// + /// To handle the case of visibilities inside of tuple structs, the parser + /// needs to distinguish parentheses that specify visibility restrictions + /// from parentheses that form part of a tuple type. + /// + /// ``` + /// # struct A; + /// # struct B; + /// # struct C; + /// # + /// struct S(pub(crate) A, pub (B, C)); + /// ``` + /// + /// In this example input the first tuple struct element of `S` has + /// `pub(crate)` visibility while the second tuple struct element has `pub` + /// visibility; the parentheses around `(B, C)` are part of the type rather + /// than part of a visibility restriction. + /// + /// The parser uses a forked parse stream to check the first token inside of + /// parentheses after the `pub` keyword. This is a small bounded amount of + /// work performed against the forked parse stream. + /// + /// ``` + /// use syn::{parenthesized, token, Ident, Path, Result, Token}; + /// use syn::ext::IdentExt; + /// use syn::parse::{Parse, ParseStream}; + /// + /// struct PubVisibility { + /// pub_token: Token![pub], + /// restricted: Option<Restricted>, + /// } + /// + /// struct Restricted { + /// paren_token: token::Paren, + /// in_token: Option<Token![in]>, + /// path: Path, + /// } + /// + /// impl Parse for PubVisibility { + /// fn parse(input: ParseStream) -> Result<Self> { + /// let pub_token: Token![pub] = input.parse()?; + /// + /// if input.peek(token::Paren) { + /// let ahead = input.fork(); + /// let mut content; + /// parenthesized!(content in ahead); + /// + /// if content.peek(Token![crate]) + /// || content.peek(Token![self]) + /// || content.peek(Token![super]) + /// { + /// return Ok(PubVisibility { + /// pub_token, + /// restricted: Some(Restricted { + /// paren_token: parenthesized!(content in input), + /// in_token: None, + /// path: Path::from(content.call(Ident::parse_any)?), + /// }), + /// }); + /// } else if content.peek(Token![in]) { + /// return Ok(PubVisibility { + /// pub_token, + /// restricted: Some(Restricted { + /// paren_token: parenthesized!(content in input), + /// in_token: Some(content.parse()?), + /// path: content.call(Path::parse_mod_style)?, + /// }), + /// }); + /// } + /// } + /// + /// Ok(PubVisibility { + /// pub_token, + /// restricted: None, + /// }) + /// } + /// } + /// ``` + pub fn fork(&self) -> Self { + ParseBuffer { + scope: self.scope, + cell: self.cell.clone(), + marker: PhantomData, + // Not the parent's unexpected. Nothing cares whether the clone + // parses all the way unless we `advance_to`. + unexpected: Cell::new(Some(Rc::new(Cell::new(Unexpected::None)))), + } + } + + /// Triggers an error at the current position of the parse stream. + /// + /// # Example + /// + /// ``` + /// use syn::{Expr, Result, Token}; + /// use syn::parse::{Parse, ParseStream}; + /// + /// // Some kind of loop: `while` or `for` or `loop`. + /// struct Loop { + /// expr: Expr, + /// } + /// + /// impl Parse for Loop { + /// fn parse(input: ParseStream) -> Result<Self> { + /// if input.peek(Token![while]) + /// || input.peek(Token![for]) + /// || input.peek(Token![loop]) + /// { + /// Ok(Loop { + /// expr: input.parse()?, + /// }) + /// } else { + /// Err(input.error("expected some kind of loop")) + /// } + /// } + /// } + /// ``` + pub fn error<T: Display>(&self, message: T) -> Error { + error::new_at(self.scope, self.cursor(), message) + } + + /// Speculatively parses tokens from this parse stream, advancing the + /// position of this stream only if parsing succeeds. + /// + /// This is a powerful low-level API used for defining the `Parse` impls of + /// the basic built-in token types. It is not something that will be used + /// widely outside of the Syn codebase. + /// + /// # Example + /// + /// ``` + /// use proc_macro2::TokenTree; + /// use syn::Result; + /// use syn::parse::ParseStream; + /// + /// // This function advances the stream past the next occurrence of `@`. If + /// // no `@` is present in the stream, the stream position is unchanged and + /// // an error is returned. + /// fn skip_past_next_at(input: ParseStream) -> Result<()> { + /// input.step(|cursor| { + /// let mut rest = *cursor; + /// while let Some((tt, next)) = rest.token_tree() { + /// match &tt { + /// TokenTree::Punct(punct) if punct.as_char() == '@' => { + /// return Ok(((), next)); + /// } + /// _ => rest = next, + /// } + /// } + /// Err(cursor.error("no `@` was found after this point")) + /// }) + /// } + /// # + /// # fn remainder_after_skipping_past_next_at( + /// # input: ParseStream, + /// # ) -> Result<proc_macro2::TokenStream> { + /// # skip_past_next_at(input)?; + /// # input.parse() + /// # } + /// # + /// # use syn::parse::Parser; + /// # let remainder = remainder_after_skipping_past_next_at + /// # .parse_str("a @ b c") + /// # .unwrap(); + /// # assert_eq!(remainder.to_string(), "b c"); + /// ``` + pub fn step<F, R>(&self, function: F) -> Result<R> + where + F: for<'c> FnOnce(StepCursor<'c, 'a>) -> Result<(R, Cursor<'c>)>, + { + // Since the user's function is required to work for any 'c, we know + // that the Cursor<'c> they return is either derived from the input + // StepCursor<'c, 'a> or from a Cursor<'static>. + // + // It would not be legal to write this function without the invariant + // lifetime 'c in StepCursor<'c, 'a>. If this function were written only + // in terms of 'a, the user could take our ParseBuffer<'a>, upcast it to + // a ParseBuffer<'short> which some shorter lifetime than 'a, invoke + // `step` on their ParseBuffer<'short> with a closure that returns + // Cursor<'short>, and we would wrongly write that Cursor<'short> into + // the Cell intended to hold Cursor<'a>. + // + // In some cases it may be necessary for R to contain a Cursor<'a>. + // Within Syn we solve this using `advance_step_cursor` which uses the + // existence of a StepCursor<'c, 'a> as proof that it is safe to cast + // from Cursor<'c> to Cursor<'a>. If needed outside of Syn, it would be + // safe to expose that API as a method on StepCursor. + let (node, rest) = function(StepCursor { + scope: self.scope, + cursor: self.cell.get(), + marker: PhantomData, + })?; + self.cell.set(rest); + Ok(node) + } + + /// Returns the `Span` of the next token in the parse stream, or + /// `Span::call_site()` if this parse stream has completely exhausted its + /// input `TokenStream`. + pub fn span(&self) -> Span { + let cursor = self.cursor(); + if cursor.eof() { + self.scope + } else { + crate::buffer::open_span_of_group(cursor) + } + } + + /// Provides low-level access to the token representation underlying this + /// parse stream. + /// + /// Cursors are immutable so no operations you perform against the cursor + /// will affect the state of this parse stream. + /// + /// # Example + /// + /// ``` + /// use proc_macro2::TokenStream; + /// use syn::buffer::Cursor; + /// use syn::parse::{ParseStream, Result}; + /// + /// // Run a parser that returns T, but get its output as TokenStream instead of T. + /// // This works without T needing to implement ToTokens. + /// fn recognize_token_stream<T>( + /// recognizer: fn(ParseStream) -> Result<T>, + /// ) -> impl Fn(ParseStream) -> Result<TokenStream> { + /// move |input| { + /// let begin = input.cursor(); + /// recognizer(input)?; + /// let end = input.cursor(); + /// Ok(tokens_between(begin, end)) + /// } + /// } + /// + /// // Collect tokens between two cursors as a TokenStream. + /// fn tokens_between(begin: Cursor, end: Cursor) -> TokenStream { + /// assert!(begin <= end); + /// + /// let mut cursor = begin; + /// let mut tokens = TokenStream::new(); + /// while cursor < end { + /// let (token, next) = cursor.token_tree().unwrap(); + /// tokens.extend(std::iter::once(token)); + /// cursor = next; + /// } + /// tokens + /// } + /// + /// fn main() { + /// use quote::quote; + /// use syn::parse::{Parse, Parser}; + /// use syn::Token; + /// + /// // Parse syn::Type as a TokenStream, surrounded by angle brackets. + /// fn example(input: ParseStream) -> Result<TokenStream> { + /// let _langle: Token![<] = input.parse()?; + /// let ty = recognize_token_stream(syn::Type::parse)(input)?; + /// let _rangle: Token![>] = input.parse()?; + /// Ok(ty) + /// } + /// + /// let tokens = quote! { <fn() -> u8> }; + /// println!("{}", example.parse2(tokens).unwrap()); + /// } + /// ``` + pub fn cursor(&self) -> Cursor<'a> { + self.cell.get() + } + + fn check_unexpected(&self) -> Result<()> { + match inner_unexpected(self).1 { + Some((span, delimiter)) => Err(err_unexpected_token(span, delimiter)), + None => Ok(()), + } + } +} + +#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] +impl<T: Parse> Parse for Box<T> { + fn parse(input: ParseStream) -> Result<Self> { + input.parse().map(Box::new) + } +} + +#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] +impl<T: Parse + Token> Parse for Option<T> { + fn parse(input: ParseStream) -> Result<Self> { + if T::peek(input.cursor()) { + Ok(Some(input.parse()?)) + } else { + Ok(None) + } + } +} + +#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] +impl Parse for TokenStream { + fn parse(input: ParseStream) -> Result<Self> { + input.step(|cursor| Ok((cursor.token_stream(), Cursor::empty()))) + } +} + +#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] +impl Parse for TokenTree { + fn parse(input: ParseStream) -> Result<Self> { + input.step(|cursor| match cursor.token_tree() { + Some((tt, rest)) => Ok((tt, rest)), + None => Err(cursor.error("expected token tree")), + }) + } +} + +#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] +impl Parse for Group { + fn parse(input: ParseStream) -> Result<Self> { + input.step(|cursor| { + if let Some((group, rest)) = cursor.any_group_token() { + if group.delimiter() != Delimiter::None { + return Ok((group, rest)); + } + } + Err(cursor.error("expected group token")) + }) + } +} + +#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] +impl Parse for Punct { + fn parse(input: ParseStream) -> Result<Self> { + input.step(|cursor| match cursor.punct() { + Some((punct, rest)) => Ok((punct, rest)), + None => Err(cursor.error("expected punctuation token")), + }) + } +} + +#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] +impl Parse for Literal { + fn parse(input: ParseStream) -> Result<Self> { + input.step(|cursor| match cursor.literal() { + Some((literal, rest)) => Ok((literal, rest)), + None => Err(cursor.error("expected literal token")), + }) + } +} + +/// Parser that can parse Rust tokens into a particular syntax tree node. +/// +/// Refer to the [module documentation] for details about parsing in Syn. +/// +/// [module documentation]: self +pub trait Parser: Sized { + type Output; + + /// Parse a proc-macro2 token stream into the chosen syntax tree node. + /// + /// This function enforces that the input is fully parsed. If there are any + /// unparsed tokens at the end of the stream, an error is returned. + fn parse2(self, tokens: TokenStream) -> Result<Self::Output>; + + /// Parse tokens of source code into the chosen syntax tree node. + /// + /// This function enforces that the input is fully parsed. If there are any + /// unparsed tokens at the end of the stream, an error is returned. + #[cfg(feature = "proc-macro")] + #[cfg_attr(docsrs, doc(cfg(feature = "proc-macro")))] + fn parse(self, tokens: proc_macro::TokenStream) -> Result<Self::Output> { + self.parse2(proc_macro2::TokenStream::from(tokens)) + } + + /// Parse a string of Rust code into the chosen syntax tree node. + /// + /// This function enforces that the input is fully parsed. If there are any + /// unparsed tokens at the end of the string, an error is returned. + /// + /// # Hygiene + /// + /// Every span in the resulting syntax tree will be set to resolve at the + /// macro call site. + fn parse_str(self, s: &str) -> Result<Self::Output> { + self.parse2(proc_macro2::TokenStream::from_str(s)?) + } + + // Not public API. + #[doc(hidden)] + fn __parse_scoped(self, scope: Span, tokens: TokenStream) -> Result<Self::Output> { + let _ = scope; + self.parse2(tokens) + } +} + +fn tokens_to_parse_buffer(tokens: &TokenBuffer) -> ParseBuffer { + let scope = Span::call_site(); + let cursor = tokens.begin(); + let unexpected = Rc::new(Cell::new(Unexpected::None)); + new_parse_buffer(scope, cursor, unexpected) +} + +impl<F, T> Parser for F +where + F: FnOnce(ParseStream) -> Result<T>, +{ + type Output = T; + + fn parse2(self, tokens: TokenStream) -> Result<T> { + let buf = TokenBuffer::new2(tokens); + let state = tokens_to_parse_buffer(&buf); + let node = self(&state)?; + state.check_unexpected()?; + if let Some((unexpected_span, delimiter)) = + span_of_unexpected_ignoring_nones(state.cursor()) + { + Err(err_unexpected_token(unexpected_span, delimiter)) + } else { + Ok(node) + } + } + + fn __parse_scoped(self, scope: Span, tokens: TokenStream) -> Result<Self::Output> { + let buf = TokenBuffer::new2(tokens); + let cursor = buf.begin(); + let unexpected = Rc::new(Cell::new(Unexpected::None)); + let state = new_parse_buffer(scope, cursor, unexpected); + let node = self(&state)?; + state.check_unexpected()?; + if let Some((unexpected_span, delimiter)) = + span_of_unexpected_ignoring_nones(state.cursor()) + { + Err(err_unexpected_token(unexpected_span, delimiter)) + } else { + Ok(node) + } + } +} + +pub(crate) fn parse_scoped<F: Parser>(f: F, scope: Span, tokens: TokenStream) -> Result<F::Output> { + f.__parse_scoped(scope, tokens) +} + +fn err_unexpected_token(span: Span, delimiter: Delimiter) -> Error { + let msg = match delimiter { + Delimiter::Parenthesis => "unexpected token, expected `)`", + Delimiter::Brace => "unexpected token, expected `}`", + Delimiter::Bracket => "unexpected token, expected `]`", + Delimiter::None => "unexpected token", + }; + Error::new(span, msg) +} + +/// An empty syntax tree node that consumes no tokens when parsed. +/// +/// This is useful for attribute macros that want to ensure they are not +/// provided any attribute args. +/// +/// ``` +/// # extern crate proc_macro; +/// # +/// use proc_macro::TokenStream; +/// use syn::parse_macro_input; +/// use syn::parse::Nothing; +/// +/// # const IGNORE: &str = stringify! { +/// #[proc_macro_attribute] +/// # }; +/// pub fn my_attr(args: TokenStream, input: TokenStream) -> TokenStream { +/// parse_macro_input!(args as Nothing); +/// +/// /* ... */ +/// # TokenStream::new() +/// } +/// ``` +/// +/// ```text +/// error: unexpected token +/// --> src/main.rs:3:19 +/// | +/// 3 | #[my_attr(asdf)] +/// | ^^^^ +/// ``` +pub struct Nothing; + +impl Parse for Nothing { + fn parse(_input: ParseStream) -> Result<Self> { + Ok(Nothing) + } +} + +#[cfg(feature = "printing")] +#[cfg_attr(docsrs, doc(cfg(feature = "printing")))] +impl ToTokens for Nothing { + fn to_tokens(&self, tokens: &mut TokenStream) { + let _ = tokens; + } +} + +#[cfg(feature = "clone-impls")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for Nothing { + fn clone(&self) -> Self { + *self + } +} + +#[cfg(feature = "clone-impls")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Copy for Nothing {} + +#[cfg(feature = "extra-traits")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for Nothing { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("Nothing") + } +} + +#[cfg(feature = "extra-traits")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Eq for Nothing {} + +#[cfg(feature = "extra-traits")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for Nothing { + fn eq(&self, _other: &Self) -> bool { + true + } +} + +#[cfg(feature = "extra-traits")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for Nothing { + fn hash<H: Hasher>(&self, _state: &mut H) {} +} diff --git a/vendor/syn/src/parse_macro_input.rs b/vendor/syn/src/parse_macro_input.rs new file mode 100644 index 00000000000000..f0660aedd7dff9 --- /dev/null +++ b/vendor/syn/src/parse_macro_input.rs @@ -0,0 +1,128 @@ +/// Parse the input TokenStream of a macro, triggering a compile error if the +/// tokens fail to parse. +/// +/// Refer to the [`parse` module] documentation for more details about parsing +/// in Syn. +/// +/// [`parse` module]: mod@crate::parse +/// +/// <br> +/// +/// # Intended usage +/// +/// This macro must be called from a function that returns +/// `proc_macro::TokenStream`. Usually this will be your proc macro entry point, +/// the function that has the #\[proc_macro\] / #\[proc_macro_derive\] / +/// #\[proc_macro_attribute\] attribute. +/// +/// ``` +/// # extern crate proc_macro; +/// # +/// use proc_macro::TokenStream; +/// use syn::{parse_macro_input, Result}; +/// use syn::parse::{Parse, ParseStream}; +/// +/// struct MyMacroInput { +/// /* ... */ +/// } +/// +/// impl Parse for MyMacroInput { +/// fn parse(input: ParseStream) -> Result<Self> { +/// /* ... */ +/// # Ok(MyMacroInput {}) +/// } +/// } +/// +/// # const IGNORE: &str = stringify! { +/// #[proc_macro] +/// # }; +/// pub fn my_macro(tokens: TokenStream) -> TokenStream { +/// let input = parse_macro_input!(tokens as MyMacroInput); +/// +/// /* ... */ +/// # TokenStream::new() +/// } +/// ``` +/// +/// <br> +/// +/// # Usage with Parser +/// +/// This macro can also be used with the [`Parser` trait] for types that have +/// multiple ways that they can be parsed. +/// +/// [`Parser` trait]: crate::parse::Parser +/// +/// ``` +/// # extern crate proc_macro; +/// # +/// # use proc_macro::TokenStream; +/// # use syn::{parse_macro_input, Result}; +/// # use syn::parse::ParseStream; +/// # +/// # struct MyMacroInput {} +/// # +/// impl MyMacroInput { +/// fn parse_alternate(input: ParseStream) -> Result<Self> { +/// /* ... */ +/// # Ok(MyMacroInput {}) +/// } +/// } +/// +/// # const IGNORE: &str = stringify! { +/// #[proc_macro] +/// # }; +/// pub fn my_macro(tokens: TokenStream) -> TokenStream { +/// let input = parse_macro_input!(tokens with MyMacroInput::parse_alternate); +/// +/// /* ... */ +/// # TokenStream::new() +/// } +/// ``` +/// +/// <br> +/// +/// # Expansion +/// +/// `parse_macro_input!($variable as $Type)` expands to something like: +/// +/// ```no_run +/// # extern crate proc_macro; +/// # +/// # macro_rules! doc_test { +/// # ($variable:ident as $Type:ty) => { +/// match syn::parse::<$Type>($variable) { +/// Ok(syntax_tree) => syntax_tree, +/// Err(err) => return proc_macro::TokenStream::from(err.to_compile_error()), +/// } +/// # }; +/// # } +/// # +/// # fn test(input: proc_macro::TokenStream) -> proc_macro::TokenStream { +/// # let _ = doc_test!(input as syn::Ident); +/// # proc_macro::TokenStream::new() +/// # } +/// ``` +#[macro_export] +#[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "proc-macro"))))] +macro_rules! parse_macro_input { + ($tokenstream:ident as $ty:ty) => { + match $crate::parse::<$ty>($tokenstream) { + $crate::__private::Ok(data) => data, + $crate::__private::Err(err) => { + return $crate::__private::TokenStream::from(err.to_compile_error()); + } + } + }; + ($tokenstream:ident with $parser:path) => { + match $crate::parse::Parser::parse($parser, $tokenstream) { + $crate::__private::Ok(data) => data, + $crate::__private::Err(err) => { + return $crate::__private::TokenStream::from(err.to_compile_error()); + } + } + }; + ($tokenstream:ident) => { + $crate::parse_macro_input!($tokenstream as _) + }; +} diff --git a/vendor/syn/src/parse_quote.rs b/vendor/syn/src/parse_quote.rs new file mode 100644 index 00000000000000..2db20597c43682 --- /dev/null +++ b/vendor/syn/src/parse_quote.rs @@ -0,0 +1,240 @@ +/// Quasi-quotation macro that accepts input like the [`quote!`] macro but uses +/// type inference to figure out a return type for those tokens. +/// +/// [`quote!`]: https://docs.rs/quote/1.0/quote/index.html +/// +/// The return type can be any syntax tree node that implements the [`Parse`] +/// trait. +/// +/// [`Parse`]: crate::parse::Parse +/// +/// ``` +/// use quote::quote; +/// use syn::{parse_quote, Stmt}; +/// +/// fn main() { +/// let name = quote!(v); +/// let ty = quote!(u8); +/// +/// let stmt: Stmt = parse_quote! { +/// let #name: #ty = Default::default(); +/// }; +/// +/// println!("{:#?}", stmt); +/// } +/// ``` +/// +/// *This macro is available only if Syn is built with both the `"parsing"` and +/// `"printing"` features.* +/// +/// # Example +/// +/// The following helper function adds a bound `T: HeapSize` to every type +/// parameter `T` in the input generics. +/// +/// ``` +/// use syn::{parse_quote, Generics, GenericParam}; +/// +/// // Add a bound `T: HeapSize` to every type parameter T. +/// fn add_trait_bounds(mut generics: Generics) -> Generics { +/// for param in &mut generics.params { +/// if let GenericParam::Type(type_param) = param { +/// type_param.bounds.push(parse_quote!(HeapSize)); +/// } +/// } +/// generics +/// } +/// ``` +/// +/// # Special cases +/// +/// This macro can parse the following additional types as a special case even +/// though they do not implement the `Parse` trait. +/// +/// - [`Attribute`] — parses one attribute, allowing either outer like `#[...]` +/// or inner like `#![...]` +/// - [`Vec<Attribute>`] — parses multiple attributes, including mixed kinds in +/// any order +/// - [`Punctuated<T, P>`] — parses zero or more `T` separated by punctuation +/// `P` with optional trailing punctuation +/// - [`Vec<Arm>`] — parses arms separated by optional commas according to the +/// same grammar as the inside of a `match` expression +/// - [`Vec<Stmt>`] — parses the same as `Block::parse_within` +/// - [`Pat`], [`Box<Pat>`] — parses the same as +/// `Pat::parse_multi_with_leading_vert` +/// - [`Field`] — parses a named or unnamed struct field +/// +/// [`Vec<Attribute>`]: Attribute +/// [`Vec<Arm>`]: Arm +/// [`Vec<Stmt>`]: Block::parse_within +/// [`Pat`]: Pat::parse_multi_with_leading_vert +/// [`Box<Pat>`]: Pat::parse_multi_with_leading_vert +/// +/// # Panics +/// +/// Panics if the tokens fail to parse as the expected syntax tree type. The +/// caller is responsible for ensuring that the input tokens are syntactically +/// valid. +#[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "printing"))))] +#[macro_export] +macro_rules! parse_quote { + ($($tt:tt)*) => { + $crate::__private::parse_quote($crate::__private::quote::quote!($($tt)*)) + }; +} + +/// This macro is [`parse_quote!`] + [`quote_spanned!`][quote::quote_spanned]. +/// +/// Please refer to each of their documentation. +/// +/// # Example +/// +/// ``` +/// use quote::{quote, quote_spanned}; +/// use syn::spanned::Spanned; +/// use syn::{parse_quote_spanned, ReturnType, Signature}; +/// +/// // Changes `fn()` to `fn() -> Pin<Box<dyn Future<Output = ()>>>`, +/// // and `fn() -> T` to `fn() -> Pin<Box<dyn Future<Output = T>>>`, +/// // without introducing any call_site() spans. +/// fn make_ret_pinned_future(sig: &mut Signature) { +/// let ret = match &sig.output { +/// ReturnType::Default => quote_spanned!(sig.paren_token.span=> ()), +/// ReturnType::Type(_, ret) => quote!(#ret), +/// }; +/// sig.output = parse_quote_spanned! {ret.span()=> +/// -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = #ret>>> +/// }; +/// } +/// ``` +#[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "printing"))))] +#[macro_export] +macro_rules! parse_quote_spanned { + ($span:expr=> $($tt:tt)*) => { + $crate::__private::parse_quote($crate::__private::quote::quote_spanned!($span=> $($tt)*)) + }; +} + +//////////////////////////////////////////////////////////////////////////////// +// Can parse any type that implements Parse. + +use crate::error::Result; +use crate::parse::{Parse, ParseStream, Parser}; +use proc_macro2::TokenStream; + +// Not public API. +#[doc(hidden)] +#[track_caller] +pub fn parse<T: ParseQuote>(token_stream: TokenStream) -> T { + let parser = T::parse; + match parser.parse2(token_stream) { + Ok(t) => t, + Err(err) => panic!("{}", err), + } +} + +#[doc(hidden)] +pub trait ParseQuote: Sized { + fn parse(input: ParseStream) -> Result<Self>; +} + +impl<T: Parse> ParseQuote for T { + fn parse(input: ParseStream) -> Result<Self> { + <T as Parse>::parse(input) + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Any other types that we want `parse_quote!` to be able to parse. + +use crate::punctuated::Punctuated; +#[cfg(any(feature = "full", feature = "derive"))] +use crate::{attr, Attribute, Field, FieldMutability, Ident, Type, Visibility}; +#[cfg(feature = "full")] +use crate::{Arm, Block, Pat, Stmt}; + +#[cfg(any(feature = "full", feature = "derive"))] +impl ParseQuote for Attribute { + fn parse(input: ParseStream) -> Result<Self> { + if input.peek(Token![#]) && input.peek2(Token![!]) { + attr::parsing::single_parse_inner(input) + } else { + attr::parsing::single_parse_outer(input) + } + } +} + +#[cfg(any(feature = "full", feature = "derive"))] +impl ParseQuote for Vec<Attribute> { + fn parse(input: ParseStream) -> Result<Self> { + let mut attrs = Vec::new(); + while !input.is_empty() { + attrs.push(ParseQuote::parse(input)?); + } + Ok(attrs) + } +} + +#[cfg(any(feature = "full", feature = "derive"))] +impl ParseQuote for Field { + fn parse(input: ParseStream) -> Result<Self> { + let attrs = input.call(Attribute::parse_outer)?; + let vis: Visibility = input.parse()?; + + let ident: Option<Ident>; + let colon_token: Option<Token![:]>; + let is_named = input.peek(Ident) && input.peek2(Token![:]) && !input.peek2(Token![::]); + if is_named { + ident = Some(input.parse()?); + colon_token = Some(input.parse()?); + } else { + ident = None; + colon_token = None; + } + + let ty: Type = input.parse()?; + + Ok(Field { + attrs, + vis, + mutability: FieldMutability::None, + ident, + colon_token, + ty, + }) + } +} + +#[cfg(feature = "full")] +impl ParseQuote for Pat { + fn parse(input: ParseStream) -> Result<Self> { + Pat::parse_multi_with_leading_vert(input) + } +} + +#[cfg(feature = "full")] +impl ParseQuote for Box<Pat> { + fn parse(input: ParseStream) -> Result<Self> { + <Pat as ParseQuote>::parse(input).map(Box::new) + } +} + +impl<T: Parse, P: Parse> ParseQuote for Punctuated<T, P> { + fn parse(input: ParseStream) -> Result<Self> { + Self::parse_terminated(input) + } +} + +#[cfg(feature = "full")] +impl ParseQuote for Vec<Stmt> { + fn parse(input: ParseStream) -> Result<Self> { + Block::parse_within(input) + } +} + +#[cfg(feature = "full")] +impl ParseQuote for Vec<Arm> { + fn parse(input: ParseStream) -> Result<Self> { + Arm::parse_multiple(input) + } +} diff --git a/vendor/syn/src/pat.rs b/vendor/syn/src/pat.rs new file mode 100644 index 00000000000000..5cc3ff9081a8ad --- /dev/null +++ b/vendor/syn/src/pat.rs @@ -0,0 +1,955 @@ +use crate::attr::Attribute; +use crate::expr::Member; +use crate::ident::Ident; +use crate::path::{Path, QSelf}; +use crate::punctuated::Punctuated; +use crate::token; +use crate::ty::Type; +use proc_macro2::TokenStream; + +pub use crate::expr::{ + ExprConst as PatConst, ExprLit as PatLit, ExprMacro as PatMacro, ExprPath as PatPath, + ExprRange as PatRange, +}; + +ast_enum_of_structs! { + /// A pattern in a local binding, function signature, match expression, or + /// various other places. + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + #[non_exhaustive] + pub enum Pat { + /// A const block: `const { ... }`. + Const(PatConst), + + /// A pattern that binds a new variable: `ref mut binding @ SUBPATTERN`. + Ident(PatIdent), + + /// A literal pattern: `0`. + Lit(PatLit), + + /// A macro in pattern position. + Macro(PatMacro), + + /// A pattern that matches any one of a set of cases. + Or(PatOr), + + /// A parenthesized pattern: `(A | B)`. + Paren(PatParen), + + /// A path pattern like `Color::Red`, optionally qualified with a + /// self-type. + /// + /// Unqualified path patterns can legally refer to variants, structs, + /// constants or associated constants. Qualified path patterns like + /// `<A>::B::C` and `<A as Trait>::B::C` can only legally refer to + /// associated constants. + Path(PatPath), + + /// A range pattern: `1..=2`. + Range(PatRange), + + /// A reference pattern: `&mut var`. + Reference(PatReference), + + /// The dots in a tuple or slice pattern: `[0, 1, ..]`. + Rest(PatRest), + + /// A dynamically sized slice pattern: `[a, b, ref i @ .., y, z]`. + Slice(PatSlice), + + /// A struct or struct variant pattern: `Variant { x, y, .. }`. + Struct(PatStruct), + + /// A tuple pattern: `(a, b)`. + Tuple(PatTuple), + + /// A tuple struct or tuple variant pattern: `Variant(x, y, .., z)`. + TupleStruct(PatTupleStruct), + + /// A type ascription pattern: `foo: f64`. + Type(PatType), + + /// Tokens in pattern position not interpreted by Syn. + Verbatim(TokenStream), + + /// A pattern that matches any value: `_`. + Wild(PatWild), + + // For testing exhaustiveness in downstream code, use the following idiom: + // + // match pat { + // #![cfg_attr(test, deny(non_exhaustive_omitted_patterns))] + // + // Pat::Box(pat) => {...} + // Pat::Ident(pat) => {...} + // ... + // Pat::Wild(pat) => {...} + // + // _ => { /* some sane fallback */ } + // } + // + // This way we fail your tests but don't break your library when adding + // a variant. You will be notified by a test failure when a variant is + // added, so that you can add code to handle it, but your library will + // continue to compile and work for downstream users in the interim. + } +} + +ast_struct! { + /// A pattern that binds a new variable: `ref mut binding @ SUBPATTERN`. + /// + /// It may also be a unit struct or struct variant (e.g. `None`), or a + /// constant; these cannot be distinguished syntactically. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct PatIdent { + pub attrs: Vec<Attribute>, + pub by_ref: Option<Token![ref]>, + pub mutability: Option<Token![mut]>, + pub ident: Ident, + pub subpat: Option<(Token![@], Box<Pat>)>, + } +} + +ast_struct! { + /// A pattern that matches any one of a set of cases. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct PatOr { + pub attrs: Vec<Attribute>, + pub leading_vert: Option<Token![|]>, + pub cases: Punctuated<Pat, Token![|]>, + } +} + +ast_struct! { + /// A parenthesized pattern: `(A | B)`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct PatParen { + pub attrs: Vec<Attribute>, + pub paren_token: token::Paren, + pub pat: Box<Pat>, + } +} + +ast_struct! { + /// A reference pattern: `&mut var`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct PatReference { + pub attrs: Vec<Attribute>, + pub and_token: Token![&], + pub mutability: Option<Token![mut]>, + pub pat: Box<Pat>, + } +} + +ast_struct! { + /// The dots in a tuple or slice pattern: `[0, 1, ..]`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct PatRest { + pub attrs: Vec<Attribute>, + pub dot2_token: Token![..], + } +} + +ast_struct! { + /// A dynamically sized slice pattern: `[a, b, ref i @ .., y, z]`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct PatSlice { + pub attrs: Vec<Attribute>, + pub bracket_token: token::Bracket, + pub elems: Punctuated<Pat, Token![,]>, + } +} + +ast_struct! { + /// A struct or struct variant pattern: `Variant { x, y, .. }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct PatStruct { + pub attrs: Vec<Attribute>, + pub qself: Option<QSelf>, + pub path: Path, + pub brace_token: token::Brace, + pub fields: Punctuated<FieldPat, Token![,]>, + pub rest: Option<PatRest>, + } +} + +ast_struct! { + /// A tuple pattern: `(a, b)`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct PatTuple { + pub attrs: Vec<Attribute>, + pub paren_token: token::Paren, + pub elems: Punctuated<Pat, Token![,]>, + } +} + +ast_struct! { + /// A tuple struct or tuple variant pattern: `Variant(x, y, .., z)`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct PatTupleStruct { + pub attrs: Vec<Attribute>, + pub qself: Option<QSelf>, + pub path: Path, + pub paren_token: token::Paren, + pub elems: Punctuated<Pat, Token![,]>, + } +} + +ast_struct! { + /// A type ascription pattern: `foo: f64`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct PatType { + pub attrs: Vec<Attribute>, + pub pat: Box<Pat>, + pub colon_token: Token![:], + pub ty: Box<Type>, + } +} + +ast_struct! { + /// A pattern that matches any value: `_`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct PatWild { + pub attrs: Vec<Attribute>, + pub underscore_token: Token![_], + } +} + +ast_struct! { + /// A single field in a struct pattern. + /// + /// Patterns like the fields of Foo `{ x, ref y, ref mut z }` are treated + /// the same as `x: x, y: ref y, z: ref mut z` but there is no colon token. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct FieldPat { + pub attrs: Vec<Attribute>, + pub member: Member, + pub colon_token: Option<Token![:]>, + pub pat: Box<Pat>, + } +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::attr::Attribute; + use crate::error::{self, Result}; + use crate::expr::{ + Expr, ExprConst, ExprLit, ExprMacro, ExprPath, ExprRange, Member, RangeLimits, + }; + use crate::ext::IdentExt as _; + use crate::ident::Ident; + use crate::lit::Lit; + use crate::mac::{self, Macro}; + use crate::parse::{Parse, ParseBuffer, ParseStream}; + use crate::pat::{ + FieldPat, Pat, PatIdent, PatOr, PatParen, PatReference, PatRest, PatSlice, PatStruct, + PatTuple, PatTupleStruct, PatType, PatWild, + }; + use crate::path::{self, Path, QSelf}; + use crate::punctuated::Punctuated; + use crate::stmt::Block; + use crate::token; + use crate::verbatim; + use proc_macro2::TokenStream; + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Pat { + /// Parse a pattern that does _not_ involve `|` at the top level. + /// + /// This parser matches the behavior of the `$:pat_param` macro_rules + /// matcher, and on editions prior to Rust 2021, the behavior of + /// `$:pat`. + /// + /// In Rust syntax, some examples of where this syntax would occur are + /// in the argument pattern of functions and closures. Patterns using + /// `|` are not allowed to occur in these positions. + /// + /// ```compile_fail + /// fn f(Some(_) | None: Option<T>) { + /// let _ = |Some(_) | None: Option<T>| {}; + /// // ^^^^^^^^^^^^^^^^^^^^^^^^^??? :( + /// } + /// ``` + /// + /// ```console + /// error: top-level or-patterns are not allowed in function parameters + /// --> src/main.rs:1:6 + /// | + /// 1 | fn f(Some(_) | None: Option<T>) { + /// | ^^^^^^^^^^^^^^ help: wrap the pattern in parentheses: `(Some(_) | None)` + /// ``` + pub fn parse_single(input: ParseStream) -> Result<Self> { + let begin = input.fork(); + let lookahead = input.lookahead1(); + if lookahead.peek(Ident) + && (input.peek2(Token![::]) + || input.peek2(Token![!]) + || input.peek2(token::Brace) + || input.peek2(token::Paren) + || input.peek2(Token![..])) + || input.peek(Token![self]) && input.peek2(Token![::]) + || lookahead.peek(Token![::]) + || lookahead.peek(Token![<]) + || input.peek(Token![Self]) + || input.peek(Token![super]) + || input.peek(Token![crate]) + { + pat_path_or_macro_or_struct_or_range(input) + } else if lookahead.peek(Token![_]) { + input.call(pat_wild).map(Pat::Wild) + } else if input.peek(Token![box]) { + pat_box(begin, input) + } else if input.peek(Token![-]) || lookahead.peek(Lit) || lookahead.peek(Token![const]) + { + pat_lit_or_range(input) + } else if lookahead.peek(Token![ref]) + || lookahead.peek(Token![mut]) + || input.peek(Token![self]) + || input.peek(Ident) + { + input.call(pat_ident).map(Pat::Ident) + } else if lookahead.peek(Token![&]) { + input.call(pat_reference).map(Pat::Reference) + } else if lookahead.peek(token::Paren) { + input.call(pat_paren_or_tuple) + } else if lookahead.peek(token::Bracket) { + input.call(pat_slice).map(Pat::Slice) + } else if lookahead.peek(Token![..]) && !input.peek(Token![...]) { + pat_range_half_open(input) + } else if lookahead.peek(Token![const]) { + input.call(pat_const).map(Pat::Verbatim) + } else { + Err(lookahead.error()) + } + } + + /// Parse a pattern, possibly involving `|`, but not a leading `|`. + pub fn parse_multi(input: ParseStream) -> Result<Self> { + multi_pat_impl(input, None) + } + + /// Parse a pattern, possibly involving `|`, possibly including a + /// leading `|`. + /// + /// This parser matches the behavior of the Rust 2021 edition's `$:pat` + /// macro_rules matcher. + /// + /// In Rust syntax, an example of where this syntax would occur is in + /// the pattern of a `match` arm, where the language permits an optional + /// leading `|`, although it is not idiomatic to write one there in + /// handwritten code. + /// + /// ``` + /// # let wat = None; + /// match wat { + /// | None | Some(false) => {} + /// | Some(true) => {} + /// } + /// ``` + /// + /// The compiler accepts it only to facilitate some situations in + /// macro-generated code where a macro author might need to write: + /// + /// ``` + /// # macro_rules! doc { + /// # ($value:expr, ($($conditions1:pat),*), ($($conditions2:pat),*), $then:expr) => { + /// match $value { + /// $(| $conditions1)* $(| $conditions2)* => $then + /// } + /// # }; + /// # } + /// # + /// # doc!(true, (true), (false), {}); + /// # doc!(true, (), (true, false), {}); + /// # doc!(true, (true, false), (), {}); + /// ``` + /// + /// Expressing the same thing correctly in the case that either one (but + /// not both) of `$conditions1` and `$conditions2` might be empty, + /// without leading `|`, is complex. + /// + /// Use [`Pat::parse_multi`] instead if you are not intending to support + /// macro-generated macro input. + pub fn parse_multi_with_leading_vert(input: ParseStream) -> Result<Self> { + let leading_vert: Option<Token![|]> = input.parse()?; + multi_pat_impl(input, leading_vert) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for PatType { + fn parse(input: ParseStream) -> Result<Self> { + Ok(PatType { + attrs: Vec::new(), + pat: Box::new(Pat::parse_single(input)?), + colon_token: input.parse()?, + ty: input.parse()?, + }) + } + } + + fn multi_pat_impl(input: ParseStream, leading_vert: Option<Token![|]>) -> Result<Pat> { + let mut pat = Pat::parse_single(input)?; + if leading_vert.is_some() + || input.peek(Token![|]) && !input.peek(Token![||]) && !input.peek(Token![|=]) + { + let mut cases = Punctuated::new(); + cases.push_value(pat); + while input.peek(Token![|]) && !input.peek(Token![||]) && !input.peek(Token![|=]) { + let punct = input.parse()?; + cases.push_punct(punct); + let pat = Pat::parse_single(input)?; + cases.push_value(pat); + } + pat = Pat::Or(PatOr { + attrs: Vec::new(), + leading_vert, + cases, + }); + } + Ok(pat) + } + + fn pat_path_or_macro_or_struct_or_range(input: ParseStream) -> Result<Pat> { + let expr_style = true; + let (qself, path) = path::parsing::qpath(input, expr_style)?; + + if qself.is_none() + && input.peek(Token![!]) + && !input.peek(Token![!=]) + && path.is_mod_style() + { + let bang_token: Token![!] = input.parse()?; + let (delimiter, tokens) = mac::parse_delimiter(input)?; + return Ok(Pat::Macro(ExprMacro { + attrs: Vec::new(), + mac: Macro { + path, + bang_token, + delimiter, + tokens, + }, + })); + } + + if input.peek(token::Brace) { + pat_struct(input, qself, path).map(Pat::Struct) + } else if input.peek(token::Paren) { + pat_tuple_struct(input, qself, path).map(Pat::TupleStruct) + } else if input.peek(Token![..]) { + pat_range(input, qself, path) + } else { + Ok(Pat::Path(ExprPath { + attrs: Vec::new(), + qself, + path, + })) + } + } + + fn pat_wild(input: ParseStream) -> Result<PatWild> { + Ok(PatWild { + attrs: Vec::new(), + underscore_token: input.parse()?, + }) + } + + fn pat_box(begin: ParseBuffer, input: ParseStream) -> Result<Pat> { + input.parse::<Token![box]>()?; + Pat::parse_single(input)?; + Ok(Pat::Verbatim(verbatim::between(&begin, input))) + } + + fn pat_ident(input: ParseStream) -> Result<PatIdent> { + Ok(PatIdent { + attrs: Vec::new(), + by_ref: input.parse()?, + mutability: input.parse()?, + ident: { + if input.peek(Token![self]) { + input.call(Ident::parse_any)? + } else { + input.parse()? + } + }, + subpat: { + if input.peek(Token![@]) { + let at_token: Token![@] = input.parse()?; + let subpat = Pat::parse_single(input)?; + Some((at_token, Box::new(subpat))) + } else { + None + } + }, + }) + } + + fn pat_tuple_struct( + input: ParseStream, + qself: Option<QSelf>, + path: Path, + ) -> Result<PatTupleStruct> { + let content; + let paren_token = parenthesized!(content in input); + + let mut elems = Punctuated::new(); + while !content.is_empty() { + let value = Pat::parse_multi_with_leading_vert(&content)?; + elems.push_value(value); + if content.is_empty() { + break; + } + let punct = content.parse()?; + elems.push_punct(punct); + } + + Ok(PatTupleStruct { + attrs: Vec::new(), + qself, + path, + paren_token, + elems, + }) + } + + fn pat_struct(input: ParseStream, qself: Option<QSelf>, path: Path) -> Result<PatStruct> { + let content; + let brace_token = braced!(content in input); + + let mut fields = Punctuated::new(); + let mut rest = None; + while !content.is_empty() { + let attrs = content.call(Attribute::parse_outer)?; + if content.peek(Token![..]) { + rest = Some(PatRest { + attrs, + dot2_token: content.parse()?, + }); + break; + } + let mut value = content.call(field_pat)?; + value.attrs = attrs; + fields.push_value(value); + if content.is_empty() { + break; + } + let punct: Token![,] = content.parse()?; + fields.push_punct(punct); + } + + Ok(PatStruct { + attrs: Vec::new(), + qself, + path, + brace_token, + fields, + rest, + }) + } + + fn field_pat(input: ParseStream) -> Result<FieldPat> { + let begin = input.fork(); + let boxed: Option<Token![box]> = input.parse()?; + let by_ref: Option<Token![ref]> = input.parse()?; + let mutability: Option<Token![mut]> = input.parse()?; + + let member = if boxed.is_some() || by_ref.is_some() || mutability.is_some() { + input.parse().map(Member::Named) + } else { + input.parse() + }?; + + if boxed.is_none() && by_ref.is_none() && mutability.is_none() && input.peek(Token![:]) + || !member.is_named() + { + return Ok(FieldPat { + attrs: Vec::new(), + member, + colon_token: Some(input.parse()?), + pat: Box::new(Pat::parse_multi_with_leading_vert(input)?), + }); + } + + let ident = match member { + Member::Named(ident) => ident, + Member::Unnamed(_) => unreachable!(), + }; + + let pat = if boxed.is_some() { + Pat::Verbatim(verbatim::between(&begin, input)) + } else { + Pat::Ident(PatIdent { + attrs: Vec::new(), + by_ref, + mutability, + ident: ident.clone(), + subpat: None, + }) + }; + + Ok(FieldPat { + attrs: Vec::new(), + member: Member::Named(ident), + colon_token: None, + pat: Box::new(pat), + }) + } + + fn pat_range(input: ParseStream, qself: Option<QSelf>, path: Path) -> Result<Pat> { + let limits = RangeLimits::parse_obsolete(input)?; + let end = input.call(pat_range_bound)?; + if let (RangeLimits::Closed(_), None) = (&limits, &end) { + return Err(input.error("expected range upper bound")); + } + Ok(Pat::Range(ExprRange { + attrs: Vec::new(), + start: Some(Box::new(Expr::Path(ExprPath { + attrs: Vec::new(), + qself, + path, + }))), + limits, + end: end.map(PatRangeBound::into_expr), + })) + } + + fn pat_range_half_open(input: ParseStream) -> Result<Pat> { + let limits: RangeLimits = input.parse()?; + let end = input.call(pat_range_bound)?; + if end.is_some() { + Ok(Pat::Range(ExprRange { + attrs: Vec::new(), + start: None, + limits, + end: end.map(PatRangeBound::into_expr), + })) + } else { + match limits { + RangeLimits::HalfOpen(dot2_token) => Ok(Pat::Rest(PatRest { + attrs: Vec::new(), + dot2_token, + })), + RangeLimits::Closed(_) => Err(input.error("expected range upper bound")), + } + } + } + + fn pat_paren_or_tuple(input: ParseStream) -> Result<Pat> { + let content; + let paren_token = parenthesized!(content in input); + + let mut elems = Punctuated::new(); + while !content.is_empty() { + let value = Pat::parse_multi_with_leading_vert(&content)?; + if content.is_empty() { + if elems.is_empty() && !matches!(value, Pat::Rest(_)) { + return Ok(Pat::Paren(PatParen { + attrs: Vec::new(), + paren_token, + pat: Box::new(value), + })); + } + elems.push_value(value); + break; + } + elems.push_value(value); + let punct = content.parse()?; + elems.push_punct(punct); + } + + Ok(Pat::Tuple(PatTuple { + attrs: Vec::new(), + paren_token, + elems, + })) + } + + fn pat_reference(input: ParseStream) -> Result<PatReference> { + Ok(PatReference { + attrs: Vec::new(), + and_token: input.parse()?, + mutability: input.parse()?, + pat: Box::new(Pat::parse_single(input)?), + }) + } + + fn pat_lit_or_range(input: ParseStream) -> Result<Pat> { + let start = input.call(pat_range_bound)?.unwrap(); + if input.peek(Token![..]) { + let limits = RangeLimits::parse_obsolete(input)?; + let end = input.call(pat_range_bound)?; + if let (RangeLimits::Closed(_), None) = (&limits, &end) { + return Err(input.error("expected range upper bound")); + } + Ok(Pat::Range(ExprRange { + attrs: Vec::new(), + start: Some(start.into_expr()), + limits, + end: end.map(PatRangeBound::into_expr), + })) + } else { + Ok(start.into_pat()) + } + } + + // Patterns that can appear on either side of a range pattern. + enum PatRangeBound { + Const(ExprConst), + Lit(ExprLit), + Path(ExprPath), + } + + impl PatRangeBound { + fn into_expr(self) -> Box<Expr> { + Box::new(match self { + PatRangeBound::Const(pat) => Expr::Const(pat), + PatRangeBound::Lit(pat) => Expr::Lit(pat), + PatRangeBound::Path(pat) => Expr::Path(pat), + }) + } + + fn into_pat(self) -> Pat { + match self { + PatRangeBound::Const(pat) => Pat::Const(pat), + PatRangeBound::Lit(pat) => Pat::Lit(pat), + PatRangeBound::Path(pat) => Pat::Path(pat), + } + } + } + + fn pat_range_bound(input: ParseStream) -> Result<Option<PatRangeBound>> { + if input.is_empty() + || input.peek(Token![|]) + || input.peek(Token![=]) + || input.peek(Token![:]) && !input.peek(Token![::]) + || input.peek(Token![,]) + || input.peek(Token![;]) + || input.peek(Token![if]) + { + return Ok(None); + } + + let lookahead = input.lookahead1(); + let expr = if lookahead.peek(Lit) { + PatRangeBound::Lit(input.parse()?) + } else if lookahead.peek(Ident) + || lookahead.peek(Token![::]) + || lookahead.peek(Token![<]) + || lookahead.peek(Token![self]) + || lookahead.peek(Token![Self]) + || lookahead.peek(Token![super]) + || lookahead.peek(Token![crate]) + { + PatRangeBound::Path(input.parse()?) + } else if lookahead.peek(Token![const]) { + PatRangeBound::Const(input.parse()?) + } else { + return Err(lookahead.error()); + }; + + Ok(Some(expr)) + } + + fn pat_slice(input: ParseStream) -> Result<PatSlice> { + let content; + let bracket_token = bracketed!(content in input); + + let mut elems = Punctuated::new(); + while !content.is_empty() { + let value = Pat::parse_multi_with_leading_vert(&content)?; + match value { + Pat::Range(pat) if pat.start.is_none() || pat.end.is_none() => { + let (start, end) = match pat.limits { + RangeLimits::HalfOpen(dot_dot) => (dot_dot.spans[0], dot_dot.spans[1]), + RangeLimits::Closed(dot_dot_eq) => { + (dot_dot_eq.spans[0], dot_dot_eq.spans[2]) + } + }; + let msg = "range pattern is not allowed unparenthesized inside slice pattern"; + return Err(error::new2(start, end, msg)); + } + _ => {} + } + elems.push_value(value); + if content.is_empty() { + break; + } + let punct = content.parse()?; + elems.push_punct(punct); + } + + Ok(PatSlice { + attrs: Vec::new(), + bracket_token, + elems, + }) + } + + fn pat_const(input: ParseStream) -> Result<TokenStream> { + let begin = input.fork(); + input.parse::<Token![const]>()?; + + let content; + braced!(content in input); + content.call(Attribute::parse_inner)?; + content.call(Block::parse_within)?; + + Ok(verbatim::between(&begin, input)) + } +} + +#[cfg(feature = "printing")] +mod printing { + use crate::attr::FilterAttrs; + use crate::pat::{ + FieldPat, Pat, PatIdent, PatOr, PatParen, PatReference, PatRest, PatSlice, PatStruct, + PatTuple, PatTupleStruct, PatType, PatWild, + }; + use crate::path; + use crate::path::printing::PathStyle; + use proc_macro2::TokenStream; + use quote::{ToTokens, TokenStreamExt as _}; + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PatIdent { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.by_ref.to_tokens(tokens); + self.mutability.to_tokens(tokens); + self.ident.to_tokens(tokens); + if let Some((at_token, subpat)) = &self.subpat { + at_token.to_tokens(tokens); + subpat.to_tokens(tokens); + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PatOr { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.leading_vert.to_tokens(tokens); + self.cases.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PatParen { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.paren_token.surround(tokens, |tokens| { + self.pat.to_tokens(tokens); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PatReference { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.and_token.to_tokens(tokens); + self.mutability.to_tokens(tokens); + self.pat.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PatRest { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.dot2_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PatSlice { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.bracket_token.surround(tokens, |tokens| { + self.elems.to_tokens(tokens); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PatStruct { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + path::printing::print_qpath(tokens, &self.qself, &self.path, PathStyle::Expr); + self.brace_token.surround(tokens, |tokens| { + self.fields.to_tokens(tokens); + // NOTE: We need a comma before the dot2 token if it is present. + if !self.fields.empty_or_trailing() && self.rest.is_some() { + <Token![,]>::default().to_tokens(tokens); + } + self.rest.to_tokens(tokens); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PatTuple { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.paren_token.surround(tokens, |tokens| { + self.elems.to_tokens(tokens); + // If there is only one element, a trailing comma is needed to + // distinguish PatTuple from PatParen, unless this is `(..)` + // which is a tuple pattern even without comma. + if self.elems.len() == 1 + && !self.elems.trailing_punct() + && !matches!(self.elems[0], Pat::Rest { .. }) + { + <Token![,]>::default().to_tokens(tokens); + } + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PatTupleStruct { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + path::printing::print_qpath(tokens, &self.qself, &self.path, PathStyle::Expr); + self.paren_token.surround(tokens, |tokens| { + self.elems.to_tokens(tokens); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PatType { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.pat.to_tokens(tokens); + self.colon_token.to_tokens(tokens); + self.ty.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PatWild { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + self.underscore_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for FieldPat { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + if let Some(colon_token) = &self.colon_token { + self.member.to_tokens(tokens); + colon_token.to_tokens(tokens); + } + self.pat.to_tokens(tokens); + } + } +} diff --git a/vendor/syn/src/path.rs b/vendor/syn/src/path.rs new file mode 100644 index 00000000000000..d2fcb9bc5d28b4 --- /dev/null +++ b/vendor/syn/src/path.rs @@ -0,0 +1,966 @@ +#[cfg(feature = "parsing")] +use crate::error::Result; +use crate::expr::Expr; +use crate::generics::TypeParamBound; +use crate::ident::Ident; +use crate::lifetime::Lifetime; +use crate::punctuated::Punctuated; +use crate::token; +use crate::ty::{ReturnType, Type}; + +ast_struct! { + /// A path at which a named item is exported (e.g. `std::collections::HashMap`). + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct Path { + pub leading_colon: Option<Token![::]>, + pub segments: Punctuated<PathSegment, Token![::]>, + } +} + +impl<T> From<T> for Path +where + T: Into<PathSegment>, +{ + fn from(segment: T) -> Self { + let mut path = Path { + leading_colon: None, + segments: Punctuated::new(), + }; + path.segments.push_value(segment.into()); + path + } +} + +impl Path { + /// Determines whether this is a path of length 1 equal to the given + /// ident. + /// + /// For them to compare equal, it must be the case that: + /// + /// - the path has no leading colon, + /// - the number of path segments is 1, + /// - the first path segment has no angle bracketed or parenthesized + /// path arguments, and + /// - the ident of the first path segment is equal to the given one. + /// + /// # Example + /// + /// ``` + /// use proc_macro2::TokenStream; + /// use syn::{Attribute, Error, Meta, Result}; + /// + /// fn get_serde_meta_item(attr: &Attribute) -> Result<Option<&TokenStream>> { + /// if attr.path().is_ident("serde") { + /// match &attr.meta { + /// Meta::List(meta) => Ok(Some(&meta.tokens)), + /// bad => Err(Error::new_spanned(bad, "unrecognized attribute")), + /// } + /// } else { + /// Ok(None) + /// } + /// } + /// ``` + pub fn is_ident<I>(&self, ident: &I) -> bool + where + I: ?Sized, + Ident: PartialEq<I>, + { + match self.get_ident() { + Some(id) => id == ident, + None => false, + } + } + + /// If this path consists of a single ident, returns the ident. + /// + /// A path is considered an ident if: + /// + /// - the path has no leading colon, + /// - the number of path segments is 1, and + /// - the first path segment has no angle bracketed or parenthesized + /// path arguments. + pub fn get_ident(&self) -> Option<&Ident> { + if self.leading_colon.is_none() + && self.segments.len() == 1 + && self.segments[0].arguments.is_none() + { + Some(&self.segments[0].ident) + } else { + None + } + } + + /// An error if this path is not a single ident, as defined in `get_ident`. + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn require_ident(&self) -> Result<&Ident> { + self.get_ident().ok_or_else(|| { + crate::error::new2( + self.segments.first().unwrap().ident.span(), + self.segments.last().unwrap().ident.span(), + "expected this path to be an identifier", + ) + }) + } +} + +ast_struct! { + /// A segment of a path together with any path arguments on that segment. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct PathSegment { + pub ident: Ident, + pub arguments: PathArguments, + } +} + +impl<T> From<T> for PathSegment +where + T: Into<Ident>, +{ + fn from(ident: T) -> Self { + PathSegment { + ident: ident.into(), + arguments: PathArguments::None, + } + } +} + +ast_enum! { + /// Angle bracketed or parenthesized arguments of a path segment. + /// + /// ## Angle bracketed + /// + /// The `<'a, T>` in `std::slice::iter<'a, T>`. + /// + /// ## Parenthesized + /// + /// The `(A, B) -> C` in `Fn(A, B) -> C`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub enum PathArguments { + None, + /// The `<'a, T>` in `std::slice::iter<'a, T>`. + AngleBracketed(AngleBracketedGenericArguments), + /// The `(A, B) -> C` in `Fn(A, B) -> C`. + Parenthesized(ParenthesizedGenericArguments), + } +} + +impl Default for PathArguments { + fn default() -> Self { + PathArguments::None + } +} + +impl PathArguments { + pub fn is_empty(&self) -> bool { + match self { + PathArguments::None => true, + PathArguments::AngleBracketed(bracketed) => bracketed.args.is_empty(), + PathArguments::Parenthesized(_) => false, + } + } + + pub fn is_none(&self) -> bool { + match self { + PathArguments::None => true, + PathArguments::AngleBracketed(_) | PathArguments::Parenthesized(_) => false, + } + } +} + +ast_enum! { + /// An individual generic argument, like `'a`, `T`, or `Item = T`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + #[non_exhaustive] + pub enum GenericArgument { + /// A lifetime argument. + Lifetime(Lifetime), + /// A type argument. + Type(Type), + /// A const expression. Must be inside of a block. + /// + /// NOTE: Identity expressions are represented as Type arguments, as + /// they are indistinguishable syntactically. + Const(Expr), + /// A binding (equality constraint) on an associated type: the `Item = + /// u8` in `Iterator<Item = u8>`. + AssocType(AssocType), + /// An equality constraint on an associated constant: the `PANIC = + /// false` in `Trait<PANIC = false>`. + AssocConst(AssocConst), + /// An associated type bound: `Iterator<Item: Display>`. + Constraint(Constraint), + } +} + +ast_struct! { + /// Angle bracketed arguments of a path segment: the `<K, V>` in `HashMap<K, + /// V>`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct AngleBracketedGenericArguments { + pub colon2_token: Option<Token![::]>, + pub lt_token: Token![<], + pub args: Punctuated<GenericArgument, Token![,]>, + pub gt_token: Token![>], + } +} + +ast_struct! { + /// A binding (equality constraint) on an associated type: the `Item = u8` + /// in `Iterator<Item = u8>`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct AssocType { + pub ident: Ident, + pub generics: Option<AngleBracketedGenericArguments>, + pub eq_token: Token![=], + pub ty: Type, + } +} + +ast_struct! { + /// An equality constraint on an associated constant: the `PANIC = false` in + /// `Trait<PANIC = false>`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct AssocConst { + pub ident: Ident, + pub generics: Option<AngleBracketedGenericArguments>, + pub eq_token: Token![=], + pub value: Expr, + } +} + +ast_struct! { + /// An associated type bound: `Iterator<Item: Display>`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct Constraint { + pub ident: Ident, + pub generics: Option<AngleBracketedGenericArguments>, + pub colon_token: Token![:], + pub bounds: Punctuated<TypeParamBound, Token![+]>, + } +} + +ast_struct! { + /// Arguments of a function path segment: the `(A, B) -> C` in `Fn(A,B) -> + /// C`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct ParenthesizedGenericArguments { + pub paren_token: token::Paren, + /// `(A, B)` + pub inputs: Punctuated<Type, Token![,]>, + /// `C` + pub output: ReturnType, + } +} + +ast_struct! { + /// The explicit Self type in a qualified path: the `T` in `<T as + /// Display>::fmt`. + /// + /// The actual path, including the trait and the associated item, is stored + /// separately. The `position` field represents the index of the associated + /// item qualified with this Self type. + /// + /// ```text + /// <Vec<T> as a::b::Trait>::AssociatedItem + /// ^~~~~~ ~~~~~~~~~~~~~~^ + /// ty position = 3 + /// + /// <Vec<T>>::AssociatedItem + /// ^~~~~~ ^ + /// ty position = 0 + /// ``` + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct QSelf { + pub lt_token: Token![<], + pub ty: Box<Type>, + pub position: usize, + pub as_token: Option<Token![as]>, + pub gt_token: Token![>], + } +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::error::Result; + #[cfg(feature = "full")] + use crate::expr::ExprBlock; + use crate::expr::{Expr, ExprPath}; + use crate::ext::IdentExt as _; + #[cfg(feature = "full")] + use crate::generics::TypeParamBound; + use crate::ident::Ident; + use crate::lifetime::Lifetime; + use crate::lit::Lit; + use crate::parse::{Parse, ParseStream}; + #[cfg(feature = "full")] + use crate::path::Constraint; + use crate::path::{ + AngleBracketedGenericArguments, AssocConst, AssocType, GenericArgument, + ParenthesizedGenericArguments, Path, PathArguments, PathSegment, QSelf, + }; + use crate::punctuated::Punctuated; + use crate::token; + use crate::ty::{ReturnType, Type}; + #[cfg(not(feature = "full"))] + use crate::verbatim; + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Path { + fn parse(input: ParseStream) -> Result<Self> { + Self::parse_helper(input, false) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for GenericArgument { + fn parse(input: ParseStream) -> Result<Self> { + if input.peek(Lifetime) && !input.peek2(Token![+]) { + return Ok(GenericArgument::Lifetime(input.parse()?)); + } + + if input.peek(Lit) || input.peek(token::Brace) { + return const_argument(input).map(GenericArgument::Const); + } + + let mut argument: Type = input.parse()?; + + match argument { + Type::Path(mut ty) + if ty.qself.is_none() + && ty.path.leading_colon.is_none() + && ty.path.segments.len() == 1 + && match &ty.path.segments[0].arguments { + PathArguments::None | PathArguments::AngleBracketed(_) => true, + PathArguments::Parenthesized(_) => false, + } => + { + if let Some(eq_token) = input.parse::<Option<Token![=]>>()? { + let segment = ty.path.segments.pop().unwrap().into_value(); + let ident = segment.ident; + let generics = match segment.arguments { + PathArguments::None => None, + PathArguments::AngleBracketed(arguments) => Some(arguments), + PathArguments::Parenthesized(_) => unreachable!(), + }; + return if input.peek(Lit) || input.peek(token::Brace) { + Ok(GenericArgument::AssocConst(AssocConst { + ident, + generics, + eq_token, + value: const_argument(input)?, + })) + } else { + Ok(GenericArgument::AssocType(AssocType { + ident, + generics, + eq_token, + ty: input.parse()?, + })) + }; + } + + #[cfg(feature = "full")] + if let Some(colon_token) = input.parse::<Option<Token![:]>>()? { + let segment = ty.path.segments.pop().unwrap().into_value(); + return Ok(GenericArgument::Constraint(Constraint { + ident: segment.ident, + generics: match segment.arguments { + PathArguments::None => None, + PathArguments::AngleBracketed(arguments) => Some(arguments), + PathArguments::Parenthesized(_) => unreachable!(), + }, + colon_token, + bounds: { + let mut bounds = Punctuated::new(); + loop { + if input.peek(Token![,]) || input.peek(Token![>]) { + break; + } + bounds.push_value({ + let allow_precise_capture = false; + let allow_const = true; + TypeParamBound::parse_single( + input, + allow_precise_capture, + allow_const, + )? + }); + if !input.peek(Token![+]) { + break; + } + let punct: Token![+] = input.parse()?; + bounds.push_punct(punct); + } + bounds + }, + })); + } + + argument = Type::Path(ty); + } + _ => {} + } + + Ok(GenericArgument::Type(argument)) + } + } + + pub(crate) fn const_argument(input: ParseStream) -> Result<Expr> { + let lookahead = input.lookahead1(); + + if input.peek(Lit) { + let lit = input.parse()?; + return Ok(Expr::Lit(lit)); + } + + if input.peek(Ident) { + let ident: Ident = input.parse()?; + return Ok(Expr::Path(ExprPath { + attrs: Vec::new(), + qself: None, + path: Path::from(ident), + })); + } + + if input.peek(token::Brace) { + #[cfg(feature = "full")] + { + let block: ExprBlock = input.parse()?; + return Ok(Expr::Block(block)); + } + + #[cfg(not(feature = "full"))] + { + let begin = input.fork(); + let content; + braced!(content in input); + content.parse::<Expr>()?; + let verbatim = verbatim::between(&begin, input); + return Ok(Expr::Verbatim(verbatim)); + } + } + + Err(lookahead.error()) + } + + impl AngleBracketedGenericArguments { + /// Parse `::<…>` with mandatory leading `::`. + /// + /// The ordinary [`Parse`] impl for `AngleBracketedGenericArguments` + /// parses optional leading `::`. + #[cfg(feature = "full")] + #[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "full"))))] + pub fn parse_turbofish(input: ParseStream) -> Result<Self> { + let colon2_token: Token![::] = input.parse()?; + Self::do_parse(Some(colon2_token), input) + } + + pub(crate) fn do_parse( + colon2_token: Option<Token![::]>, + input: ParseStream, + ) -> Result<Self> { + Ok(AngleBracketedGenericArguments { + colon2_token, + lt_token: input.parse()?, + args: { + let mut args = Punctuated::new(); + loop { + if input.peek(Token![>]) { + break; + } + let value: GenericArgument = input.parse()?; + args.push_value(value); + if input.peek(Token![>]) { + break; + } + let punct: Token![,] = input.parse()?; + args.push_punct(punct); + } + args + }, + gt_token: input.parse()?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for AngleBracketedGenericArguments { + fn parse(input: ParseStream) -> Result<Self> { + let colon2_token: Option<Token![::]> = input.parse()?; + Self::do_parse(colon2_token, input) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ParenthesizedGenericArguments { + fn parse(input: ParseStream) -> Result<Self> { + let content; + Ok(ParenthesizedGenericArguments { + paren_token: parenthesized!(content in input), + inputs: content.parse_terminated(Type::parse, Token![,])?, + output: input.call(ReturnType::without_plus)?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for PathSegment { + fn parse(input: ParseStream) -> Result<Self> { + Self::parse_helper(input, false) + } + } + + impl PathSegment { + fn parse_helper(input: ParseStream, expr_style: bool) -> Result<Self> { + if input.peek(Token![super]) + || input.peek(Token![self]) + || input.peek(Token![crate]) + || cfg!(feature = "full") && input.peek(Token![try]) + { + let ident = input.call(Ident::parse_any)?; + return Ok(PathSegment::from(ident)); + } + + let ident = if input.peek(Token![Self]) { + input.call(Ident::parse_any)? + } else { + input.parse()? + }; + + if !expr_style + && input.peek(Token![<]) + && !input.peek(Token![<=]) + && !input.peek(Token![<<=]) + || input.peek(Token![::]) && input.peek3(Token![<]) + { + Ok(PathSegment { + ident, + arguments: PathArguments::AngleBracketed(input.parse()?), + }) + } else { + Ok(PathSegment::from(ident)) + } + } + } + + impl Path { + /// Parse a `Path` containing no path arguments on any of its segments. + /// + /// # Example + /// + /// ``` + /// use syn::{Path, Result, Token}; + /// use syn::parse::{Parse, ParseStream}; + /// + /// // A simplified single `use` statement like: + /// // + /// // use std::collections::HashMap; + /// // + /// // Note that generic parameters are not allowed in a `use` statement + /// // so the following must not be accepted. + /// // + /// // use a::<b>::c; + /// struct SingleUse { + /// use_token: Token![use], + /// path: Path, + /// } + /// + /// impl Parse for SingleUse { + /// fn parse(input: ParseStream) -> Result<Self> { + /// Ok(SingleUse { + /// use_token: input.parse()?, + /// path: input.call(Path::parse_mod_style)?, + /// }) + /// } + /// } + /// ``` + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_mod_style(input: ParseStream) -> Result<Self> { + Ok(Path { + leading_colon: input.parse()?, + segments: { + let mut segments = Punctuated::new(); + loop { + if !input.peek(Ident) + && !input.peek(Token![super]) + && !input.peek(Token![self]) + && !input.peek(Token![Self]) + && !input.peek(Token![crate]) + { + break; + } + let ident = Ident::parse_any(input)?; + segments.push_value(PathSegment::from(ident)); + if !input.peek(Token![::]) { + break; + } + let punct = input.parse()?; + segments.push_punct(punct); + } + if segments.is_empty() { + return Err(input.parse::<Ident>().unwrap_err()); + } else if segments.trailing_punct() { + return Err(input.error("expected path segment after `::`")); + } + segments + }, + }) + } + + pub(crate) fn parse_helper(input: ParseStream, expr_style: bool) -> Result<Self> { + let mut path = Path { + leading_colon: input.parse()?, + segments: { + let mut segments = Punctuated::new(); + let value = PathSegment::parse_helper(input, expr_style)?; + segments.push_value(value); + segments + }, + }; + Path::parse_rest(input, &mut path, expr_style)?; + Ok(path) + } + + pub(crate) fn parse_rest( + input: ParseStream, + path: &mut Self, + expr_style: bool, + ) -> Result<()> { + while input.peek(Token![::]) && !input.peek3(token::Paren) { + let punct: Token![::] = input.parse()?; + path.segments.push_punct(punct); + let value = PathSegment::parse_helper(input, expr_style)?; + path.segments.push_value(value); + } + Ok(()) + } + + pub(crate) fn is_mod_style(&self) -> bool { + self.segments + .iter() + .all(|segment| segment.arguments.is_none()) + } + } + + pub(crate) fn qpath(input: ParseStream, expr_style: bool) -> Result<(Option<QSelf>, Path)> { + if input.peek(Token![<]) { + let lt_token: Token![<] = input.parse()?; + let this: Type = input.parse()?; + let path = if input.peek(Token![as]) { + let as_token: Token![as] = input.parse()?; + let path: Path = input.parse()?; + Some((as_token, path)) + } else { + None + }; + let gt_token: Token![>] = input.parse()?; + let colon2_token: Token![::] = input.parse()?; + let mut rest = Punctuated::new(); + loop { + let path = PathSegment::parse_helper(input, expr_style)?; + rest.push_value(path); + if !input.peek(Token![::]) { + break; + } + let punct: Token![::] = input.parse()?; + rest.push_punct(punct); + } + let (position, as_token, path) = match path { + Some((as_token, mut path)) => { + let pos = path.segments.len(); + path.segments.push_punct(colon2_token); + path.segments.extend(rest.into_pairs()); + (pos, Some(as_token), path) + } + None => { + let path = Path { + leading_colon: Some(colon2_token), + segments: rest, + }; + (0, None, path) + } + }; + let qself = QSelf { + lt_token, + ty: Box::new(this), + position, + as_token, + gt_token, + }; + Ok((Some(qself), path)) + } else { + let path = Path::parse_helper(input, expr_style)?; + Ok((None, path)) + } + } +} + +#[cfg(feature = "printing")] +pub(crate) mod printing { + use crate::generics; + use crate::path::{ + AngleBracketedGenericArguments, AssocConst, AssocType, Constraint, GenericArgument, + ParenthesizedGenericArguments, Path, PathArguments, PathSegment, QSelf, + }; + use crate::print::TokensOrDefault; + #[cfg(feature = "parsing")] + use crate::spanned::Spanned; + #[cfg(feature = "parsing")] + use proc_macro2::Span; + use proc_macro2::TokenStream; + use quote::ToTokens; + use std::cmp; + + pub(crate) enum PathStyle { + Expr, + Mod, + AsWritten, + } + + impl Copy for PathStyle {} + + impl Clone for PathStyle { + fn clone(&self) -> Self { + *self + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Path { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_path(tokens, self, PathStyle::AsWritten); + } + } + + pub(crate) fn print_path(tokens: &mut TokenStream, path: &Path, style: PathStyle) { + path.leading_colon.to_tokens(tokens); + for segment in path.segments.pairs() { + print_path_segment(tokens, segment.value(), style); + segment.punct().to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PathSegment { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_path_segment(tokens, self, PathStyle::AsWritten); + } + } + + fn print_path_segment(tokens: &mut TokenStream, segment: &PathSegment, style: PathStyle) { + segment.ident.to_tokens(tokens); + print_path_arguments(tokens, &segment.arguments, style); + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for PathArguments { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_path_arguments(tokens, self, PathStyle::AsWritten); + } + } + + fn print_path_arguments(tokens: &mut TokenStream, arguments: &PathArguments, style: PathStyle) { + match arguments { + PathArguments::None => {} + PathArguments::AngleBracketed(arguments) => { + print_angle_bracketed_generic_arguments(tokens, arguments, style); + } + PathArguments::Parenthesized(arguments) => { + print_parenthesized_generic_arguments(tokens, arguments, style); + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for GenericArgument { + #[allow(clippy::match_same_arms)] + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + GenericArgument::Lifetime(lt) => lt.to_tokens(tokens), + GenericArgument::Type(ty) => ty.to_tokens(tokens), + GenericArgument::Const(expr) => { + generics::printing::print_const_argument(expr, tokens); + } + GenericArgument::AssocType(assoc) => assoc.to_tokens(tokens), + GenericArgument::AssocConst(assoc) => assoc.to_tokens(tokens), + GenericArgument::Constraint(constraint) => constraint.to_tokens(tokens), + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for AngleBracketedGenericArguments { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_angle_bracketed_generic_arguments(tokens, self, PathStyle::AsWritten); + } + } + + pub(crate) fn print_angle_bracketed_generic_arguments( + tokens: &mut TokenStream, + arguments: &AngleBracketedGenericArguments, + style: PathStyle, + ) { + if let PathStyle::Mod = style { + return; + } + + conditionally_print_turbofish(tokens, &arguments.colon2_token, style); + arguments.lt_token.to_tokens(tokens); + + // Print lifetimes before types/consts/bindings, regardless of their + // order in args. + let mut trailing_or_empty = true; + for param in arguments.args.pairs() { + match param.value() { + GenericArgument::Lifetime(_) => { + param.to_tokens(tokens); + trailing_or_empty = param.punct().is_some(); + } + GenericArgument::Type(_) + | GenericArgument::Const(_) + | GenericArgument::AssocType(_) + | GenericArgument::AssocConst(_) + | GenericArgument::Constraint(_) => {} + } + } + for param in arguments.args.pairs() { + match param.value() { + GenericArgument::Type(_) + | GenericArgument::Const(_) + | GenericArgument::AssocType(_) + | GenericArgument::AssocConst(_) + | GenericArgument::Constraint(_) => { + if !trailing_or_empty { + <Token![,]>::default().to_tokens(tokens); + } + param.to_tokens(tokens); + trailing_or_empty = param.punct().is_some(); + } + GenericArgument::Lifetime(_) => {} + } + } + + arguments.gt_token.to_tokens(tokens); + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for AssocType { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.ident.to_tokens(tokens); + self.generics.to_tokens(tokens); + self.eq_token.to_tokens(tokens); + self.ty.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for AssocConst { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.ident.to_tokens(tokens); + self.generics.to_tokens(tokens); + self.eq_token.to_tokens(tokens); + generics::printing::print_const_argument(&self.value, tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Constraint { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.ident.to_tokens(tokens); + self.generics.to_tokens(tokens); + self.colon_token.to_tokens(tokens); + self.bounds.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ParenthesizedGenericArguments { + fn to_tokens(&self, tokens: &mut TokenStream) { + print_parenthesized_generic_arguments(tokens, self, PathStyle::AsWritten); + } + } + + fn print_parenthesized_generic_arguments( + tokens: &mut TokenStream, + arguments: &ParenthesizedGenericArguments, + style: PathStyle, + ) { + if let PathStyle::Mod = style { + return; + } + + conditionally_print_turbofish(tokens, &None, style); + arguments.paren_token.surround(tokens, |tokens| { + arguments.inputs.to_tokens(tokens); + }); + arguments.output.to_tokens(tokens); + } + + pub(crate) fn print_qpath( + tokens: &mut TokenStream, + qself: &Option<QSelf>, + path: &Path, + style: PathStyle, + ) { + let qself = match qself { + Some(qself) => qself, + None => { + print_path(tokens, path, style); + return; + } + }; + qself.lt_token.to_tokens(tokens); + qself.ty.to_tokens(tokens); + + let pos = cmp::min(qself.position, path.segments.len()); + let mut segments = path.segments.pairs(); + if pos > 0 { + TokensOrDefault(&qself.as_token).to_tokens(tokens); + path.leading_colon.to_tokens(tokens); + for (i, segment) in segments.by_ref().take(pos).enumerate() { + print_path_segment(tokens, segment.value(), PathStyle::AsWritten); + if i + 1 == pos { + qself.gt_token.to_tokens(tokens); + } + segment.punct().to_tokens(tokens); + } + } else { + qself.gt_token.to_tokens(tokens); + path.leading_colon.to_tokens(tokens); + } + for segment in segments { + print_path_segment(tokens, segment.value(), style); + segment.punct().to_tokens(tokens); + } + } + + fn conditionally_print_turbofish( + tokens: &mut TokenStream, + colon2_token: &Option<Token![::]>, + style: PathStyle, + ) { + match style { + PathStyle::Expr => TokensOrDefault(colon2_token).to_tokens(tokens), + PathStyle::Mod => unreachable!(), + PathStyle::AsWritten => colon2_token.to_tokens(tokens), + } + } + + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "printing"))))] + impl Spanned for QSelf { + fn span(&self) -> Span { + struct QSelfDelimiters<'a>(&'a QSelf); + + impl<'a> ToTokens for QSelfDelimiters<'a> { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.0.lt_token.to_tokens(tokens); + self.0.gt_token.to_tokens(tokens); + } + } + + QSelfDelimiters(self).span() + } + } +} diff --git a/vendor/syn/src/precedence.rs b/vendor/syn/src/precedence.rs new file mode 100644 index 00000000000000..1891bfc202fda5 --- /dev/null +++ b/vendor/syn/src/precedence.rs @@ -0,0 +1,210 @@ +#[cfg(all(feature = "printing", feature = "full"))] +use crate::attr::{AttrStyle, Attribute}; +#[cfg(feature = "printing")] +use crate::expr::Expr; +#[cfg(all(feature = "printing", feature = "full"))] +use crate::expr::{ + ExprArray, ExprAsync, ExprAwait, ExprBlock, ExprBreak, ExprCall, ExprConst, ExprContinue, + ExprField, ExprForLoop, ExprGroup, ExprIf, ExprIndex, ExprInfer, ExprLit, ExprLoop, ExprMacro, + ExprMatch, ExprMethodCall, ExprParen, ExprPath, ExprRepeat, ExprReturn, ExprStruct, ExprTry, + ExprTryBlock, ExprTuple, ExprUnsafe, ExprWhile, ExprYield, +}; +use crate::op::BinOp; +#[cfg(all(feature = "printing", feature = "full"))] +use crate::ty::ReturnType; +use std::cmp::Ordering; + +// Reference: https://doc.rust-lang.org/reference/expressions.html#expression-precedence +pub(crate) enum Precedence { + // return, break, closures + Jump, + // = += -= *= /= %= &= |= ^= <<= >>= + Assign, + // .. ..= + Range, + // || + Or, + // && + And, + // let + #[cfg(feature = "printing")] + Let, + // == != < > <= >= + Compare, + // | + BitOr, + // ^ + BitXor, + // & + BitAnd, + // << >> + Shift, + // + - + Sum, + // * / % + Product, + // as + Cast, + // unary - * ! & &mut + #[cfg(feature = "printing")] + Prefix, + // paths, loops, function calls, array indexing, field expressions, method calls + #[cfg(feature = "printing")] + Unambiguous, +} + +impl Precedence { + pub(crate) const MIN: Self = Precedence::Jump; + + pub(crate) fn of_binop(op: &BinOp) -> Self { + match op { + BinOp::Add(_) | BinOp::Sub(_) => Precedence::Sum, + BinOp::Mul(_) | BinOp::Div(_) | BinOp::Rem(_) => Precedence::Product, + BinOp::And(_) => Precedence::And, + BinOp::Or(_) => Precedence::Or, + BinOp::BitXor(_) => Precedence::BitXor, + BinOp::BitAnd(_) => Precedence::BitAnd, + BinOp::BitOr(_) => Precedence::BitOr, + BinOp::Shl(_) | BinOp::Shr(_) => Precedence::Shift, + + BinOp::Eq(_) + | BinOp::Lt(_) + | BinOp::Le(_) + | BinOp::Ne(_) + | BinOp::Ge(_) + | BinOp::Gt(_) => Precedence::Compare, + + BinOp::AddAssign(_) + | BinOp::SubAssign(_) + | BinOp::MulAssign(_) + | BinOp::DivAssign(_) + | BinOp::RemAssign(_) + | BinOp::BitXorAssign(_) + | BinOp::BitAndAssign(_) + | BinOp::BitOrAssign(_) + | BinOp::ShlAssign(_) + | BinOp::ShrAssign(_) => Precedence::Assign, + } + } + + #[cfg(feature = "printing")] + pub(crate) fn of(e: &Expr) -> Self { + #[cfg(feature = "full")] + fn prefix_attrs(attrs: &[Attribute]) -> Precedence { + for attr in attrs { + if let AttrStyle::Outer = attr.style { + return Precedence::Prefix; + } + } + Precedence::Unambiguous + } + + match e { + #[cfg(feature = "full")] + Expr::Closure(e) => match e.output { + ReturnType::Default => Precedence::Jump, + ReturnType::Type(..) => prefix_attrs(&e.attrs), + }, + + #[cfg(feature = "full")] + Expr::Break(ExprBreak { expr, .. }) + | Expr::Return(ExprReturn { expr, .. }) + | Expr::Yield(ExprYield { expr, .. }) => match expr { + Some(_) => Precedence::Jump, + None => Precedence::Unambiguous, + }, + + Expr::Assign(_) => Precedence::Assign, + Expr::Range(_) => Precedence::Range, + Expr::Binary(e) => Precedence::of_binop(&e.op), + Expr::Let(_) => Precedence::Let, + Expr::Cast(_) => Precedence::Cast, + Expr::RawAddr(_) | Expr::Reference(_) | Expr::Unary(_) => Precedence::Prefix, + + #[cfg(feature = "full")] + Expr::Array(ExprArray { attrs, .. }) + | Expr::Async(ExprAsync { attrs, .. }) + | Expr::Await(ExprAwait { attrs, .. }) + | Expr::Block(ExprBlock { attrs, .. }) + | Expr::Call(ExprCall { attrs, .. }) + | Expr::Const(ExprConst { attrs, .. }) + | Expr::Continue(ExprContinue { attrs, .. }) + | Expr::Field(ExprField { attrs, .. }) + | Expr::ForLoop(ExprForLoop { attrs, .. }) + | Expr::Group(ExprGroup { attrs, .. }) + | Expr::If(ExprIf { attrs, .. }) + | Expr::Index(ExprIndex { attrs, .. }) + | Expr::Infer(ExprInfer { attrs, .. }) + | Expr::Lit(ExprLit { attrs, .. }) + | Expr::Loop(ExprLoop { attrs, .. }) + | Expr::Macro(ExprMacro { attrs, .. }) + | Expr::Match(ExprMatch { attrs, .. }) + | Expr::MethodCall(ExprMethodCall { attrs, .. }) + | Expr::Paren(ExprParen { attrs, .. }) + | Expr::Path(ExprPath { attrs, .. }) + | Expr::Repeat(ExprRepeat { attrs, .. }) + | Expr::Struct(ExprStruct { attrs, .. }) + | Expr::Try(ExprTry { attrs, .. }) + | Expr::TryBlock(ExprTryBlock { attrs, .. }) + | Expr::Tuple(ExprTuple { attrs, .. }) + | Expr::Unsafe(ExprUnsafe { attrs, .. }) + | Expr::While(ExprWhile { attrs, .. }) => prefix_attrs(attrs), + + #[cfg(not(feature = "full"))] + Expr::Array(_) + | Expr::Async(_) + | Expr::Await(_) + | Expr::Block(_) + | Expr::Call(_) + | Expr::Const(_) + | Expr::Continue(_) + | Expr::Field(_) + | Expr::ForLoop(_) + | Expr::Group(_) + | Expr::If(_) + | Expr::Index(_) + | Expr::Infer(_) + | Expr::Lit(_) + | Expr::Loop(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::MethodCall(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Repeat(_) + | Expr::Struct(_) + | Expr::Try(_) + | Expr::TryBlock(_) + | Expr::Tuple(_) + | Expr::Unsafe(_) + | Expr::While(_) => Precedence::Unambiguous, + + Expr::Verbatim(_) => Precedence::Unambiguous, + + #[cfg(not(feature = "full"))] + Expr::Break(_) | Expr::Closure(_) | Expr::Return(_) | Expr::Yield(_) => unreachable!(), + } + } +} + +impl Copy for Precedence {} + +impl Clone for Precedence { + fn clone(&self) -> Self { + *self + } +} + +impl PartialEq for Precedence { + fn eq(&self, other: &Self) -> bool { + *self as u8 == *other as u8 + } +} + +impl PartialOrd for Precedence { + fn partial_cmp(&self, other: &Self) -> Option<Ordering> { + let this = *self as u8; + let other = *other as u8; + Some(this.cmp(&other)) + } +} diff --git a/vendor/syn/src/print.rs b/vendor/syn/src/print.rs new file mode 100644 index 00000000000000..07409932677bba --- /dev/null +++ b/vendor/syn/src/print.rs @@ -0,0 +1,16 @@ +use proc_macro2::TokenStream; +use quote::ToTokens; + +pub(crate) struct TokensOrDefault<'a, T: 'a>(pub &'a Option<T>); + +impl<'a, T> ToTokens for TokensOrDefault<'a, T> +where + T: ToTokens + Default, +{ + fn to_tokens(&self, tokens: &mut TokenStream) { + match self.0 { + Some(t) => t.to_tokens(tokens), + None => T::default().to_tokens(tokens), + } + } +} diff --git a/vendor/syn/src/punctuated.rs b/vendor/syn/src/punctuated.rs new file mode 100644 index 00000000000000..fdefc7d24bed93 --- /dev/null +++ b/vendor/syn/src/punctuated.rs @@ -0,0 +1,1169 @@ +//! A punctuated sequence of syntax tree nodes separated by punctuation. +//! +//! Lots of things in Rust are punctuated sequences. +//! +//! - The fields of a struct are `Punctuated<Field, Token![,]>`. +//! - The segments of a path are `Punctuated<PathSegment, Token![::]>`. +//! - The bounds on a generic parameter are `Punctuated<TypeParamBound, +//! Token![+]>`. +//! - The arguments to a function call are `Punctuated<Expr, Token![,]>`. +//! +//! This module provides a common representation for these punctuated sequences +//! in the form of the [`Punctuated<T, P>`] type. We store a vector of pairs of +//! syntax tree node + punctuation, where every node in the sequence is followed +//! by punctuation except for possibly the final one. +//! +//! [`Punctuated<T, P>`]: Punctuated +//! +//! ```text +//! a_function_call(arg1, arg2, arg3); +//! ~~~~^ ~~~~^ ~~~~ +//! ``` + +use crate::drops::{NoDrop, TrivialDrop}; +#[cfg(feature = "parsing")] +use crate::error::Result; +#[cfg(feature = "parsing")] +use crate::parse::{Parse, ParseStream}; +#[cfg(feature = "parsing")] +use crate::token::Token; +#[cfg(all(feature = "fold", any(feature = "full", feature = "derive")))] +use std::collections::VecDeque; +#[cfg(feature = "extra-traits")] +use std::fmt::{self, Debug}; +#[cfg(feature = "extra-traits")] +use std::hash::{Hash, Hasher}; +#[cfg(any(feature = "full", feature = "derive"))] +use std::iter; +use std::ops::{Index, IndexMut}; +use std::option; +use std::slice; +use std::vec; + +/// **A punctuated sequence of syntax tree nodes of type `T` separated by +/// punctuation of type `P`.** +/// +/// Refer to the [module documentation] for details about punctuated sequences. +/// +/// [module documentation]: self +pub struct Punctuated<T, P> { + inner: Vec<(T, P)>, + last: Option<Box<T>>, +} + +impl<T, P> Punctuated<T, P> { + /// Creates an empty punctuated sequence. + pub const fn new() -> Self { + Punctuated { + inner: Vec::new(), + last: None, + } + } + + /// Determines whether this punctuated sequence is empty, meaning it + /// contains no syntax tree nodes or punctuation. + pub fn is_empty(&self) -> bool { + self.inner.len() == 0 && self.last.is_none() + } + + /// Returns the number of syntax tree nodes in this punctuated sequence. + /// + /// This is the number of nodes of type `T`, not counting the punctuation of + /// type `P`. + pub fn len(&self) -> usize { + self.inner.len() + if self.last.is_some() { 1 } else { 0 } + } + + /// Borrows the first element in this sequence. + pub fn first(&self) -> Option<&T> { + self.iter().next() + } + + /// Mutably borrows the first element in this sequence. + pub fn first_mut(&mut self) -> Option<&mut T> { + self.iter_mut().next() + } + + /// Borrows the last element in this sequence. + pub fn last(&self) -> Option<&T> { + self.iter().next_back() + } + + /// Mutably borrows the last element in this sequence. + pub fn last_mut(&mut self) -> Option<&mut T> { + self.iter_mut().next_back() + } + + /// Borrows the element at the given index. + pub fn get(&self, index: usize) -> Option<&T> { + if let Some((value, _punct)) = self.inner.get(index) { + Some(value) + } else if index == self.inner.len() { + self.last.as_deref() + } else { + None + } + } + + /// Mutably borrows the element at the given index. + pub fn get_mut(&mut self, index: usize) -> Option<&mut T> { + let inner_len = self.inner.len(); + if let Some((value, _punct)) = self.inner.get_mut(index) { + Some(value) + } else if index == inner_len { + self.last.as_deref_mut() + } else { + None + } + } + + /// Returns an iterator over borrowed syntax tree nodes of type `&T`. + pub fn iter(&self) -> Iter<T> { + Iter { + inner: Box::new(NoDrop::new(PrivateIter { + inner: self.inner.iter(), + last: self.last.as_ref().map(Box::as_ref).into_iter(), + })), + } + } + + /// Returns an iterator over mutably borrowed syntax tree nodes of type + /// `&mut T`. + pub fn iter_mut(&mut self) -> IterMut<T> { + IterMut { + inner: Box::new(NoDrop::new(PrivateIterMut { + inner: self.inner.iter_mut(), + last: self.last.as_mut().map(Box::as_mut).into_iter(), + })), + } + } + + /// Returns an iterator over the contents of this sequence as borrowed + /// punctuated pairs. + pub fn pairs(&self) -> Pairs<T, P> { + Pairs { + inner: self.inner.iter(), + last: self.last.as_ref().map(Box::as_ref).into_iter(), + } + } + + /// Returns an iterator over the contents of this sequence as mutably + /// borrowed punctuated pairs. + pub fn pairs_mut(&mut self) -> PairsMut<T, P> { + PairsMut { + inner: self.inner.iter_mut(), + last: self.last.as_mut().map(Box::as_mut).into_iter(), + } + } + + /// Returns an iterator over the contents of this sequence as owned + /// punctuated pairs. + pub fn into_pairs(self) -> IntoPairs<T, P> { + IntoPairs { + inner: self.inner.into_iter(), + last: self.last.map(|t| *t).into_iter(), + } + } + + /// Appends a syntax tree node onto the end of this punctuated sequence. The + /// sequence must already have a trailing punctuation, or be empty. + /// + /// Use [`push`] instead if the punctuated sequence may or may not already + /// have trailing punctuation. + /// + /// [`push`]: Punctuated::push + /// + /// # Panics + /// + /// Panics if the sequence is nonempty and does not already have a trailing + /// punctuation. + pub fn push_value(&mut self, value: T) { + assert!( + self.empty_or_trailing(), + "Punctuated::push_value: cannot push value if Punctuated is missing trailing punctuation", + ); + + self.last = Some(Box::new(value)); + } + + /// Appends a trailing punctuation onto the end of this punctuated sequence. + /// The sequence must be non-empty and must not already have trailing + /// punctuation. + /// + /// # Panics + /// + /// Panics if the sequence is empty or already has a trailing punctuation. + pub fn push_punct(&mut self, punctuation: P) { + assert!( + self.last.is_some(), + "Punctuated::push_punct: cannot push punctuation if Punctuated is empty or already has trailing punctuation", + ); + + let last = self.last.take().unwrap(); + self.inner.push((*last, punctuation)); + } + + /// Removes the last punctuated pair from this sequence, or `None` if the + /// sequence is empty. + pub fn pop(&mut self) -> Option<Pair<T, P>> { + if self.last.is_some() { + self.last.take().map(|t| Pair::End(*t)) + } else { + self.inner.pop().map(|(t, p)| Pair::Punctuated(t, p)) + } + } + + /// Removes the trailing punctuation from this punctuated sequence, or + /// `None` if there isn't any. + pub fn pop_punct(&mut self) -> Option<P> { + if self.last.is_some() { + None + } else { + let (t, p) = self.inner.pop()?; + self.last = Some(Box::new(t)); + Some(p) + } + } + + /// Determines whether this punctuated sequence ends with a trailing + /// punctuation. + pub fn trailing_punct(&self) -> bool { + self.last.is_none() && !self.is_empty() + } + + /// Returns true if either this `Punctuated` is empty, or it has a trailing + /// punctuation. + /// + /// Equivalent to `punctuated.is_empty() || punctuated.trailing_punct()`. + pub fn empty_or_trailing(&self) -> bool { + self.last.is_none() + } + + /// Appends a syntax tree node onto the end of this punctuated sequence. + /// + /// If there is not a trailing punctuation in this sequence when this method + /// is called, the default value of punctuation type `P` is inserted before + /// the given value of type `T`. + pub fn push(&mut self, value: T) + where + P: Default, + { + if !self.empty_or_trailing() { + self.push_punct(Default::default()); + } + self.push_value(value); + } + + /// Inserts an element at position `index`. + /// + /// # Panics + /// + /// Panics if `index` is greater than the number of elements previously in + /// this punctuated sequence. + pub fn insert(&mut self, index: usize, value: T) + where + P: Default, + { + assert!( + index <= self.len(), + "Punctuated::insert: index out of range", + ); + + if index == self.len() { + self.push(value); + } else { + self.inner.insert(index, (value, Default::default())); + } + } + + /// Clears the sequence of all values and punctuation, making it empty. + pub fn clear(&mut self) { + self.inner.clear(); + self.last = None; + } + + /// Parses zero or more occurrences of `T` separated by punctuation of type + /// `P`, with optional trailing punctuation. + /// + /// Parsing continues until the end of this parse stream. The entire content + /// of this parse stream must consist of `T` and `P`. + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_terminated(input: ParseStream) -> Result<Self> + where + T: Parse, + P: Parse, + { + Self::parse_terminated_with(input, T::parse) + } + + /// Parses zero or more occurrences of `T` using the given parse function, + /// separated by punctuation of type `P`, with optional trailing + /// punctuation. + /// + /// Like [`parse_terminated`], the entire content of this stream is expected + /// to be parsed. + /// + /// [`parse_terminated`]: Punctuated::parse_terminated + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_terminated_with<'a>( + input: ParseStream<'a>, + parser: fn(ParseStream<'a>) -> Result<T>, + ) -> Result<Self> + where + P: Parse, + { + let mut punctuated = Punctuated::new(); + + loop { + if input.is_empty() { + break; + } + let value = parser(input)?; + punctuated.push_value(value); + if input.is_empty() { + break; + } + let punct = input.parse()?; + punctuated.push_punct(punct); + } + + Ok(punctuated) + } + + /// Parses one or more occurrences of `T` separated by punctuation of type + /// `P`, not accepting trailing punctuation. + /// + /// Parsing continues as long as punctuation `P` is present at the head of + /// the stream. This method returns upon parsing a `T` and observing that it + /// is not followed by a `P`, even if there are remaining tokens in the + /// stream. + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_separated_nonempty(input: ParseStream) -> Result<Self> + where + T: Parse, + P: Token + Parse, + { + Self::parse_separated_nonempty_with(input, T::parse) + } + + /// Parses one or more occurrences of `T` using the given parse function, + /// separated by punctuation of type `P`, not accepting trailing + /// punctuation. + /// + /// Like [`parse_separated_nonempty`], may complete early without parsing + /// the entire content of this stream. + /// + /// [`parse_separated_nonempty`]: Punctuated::parse_separated_nonempty + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_separated_nonempty_with<'a>( + input: ParseStream<'a>, + parser: fn(ParseStream<'a>) -> Result<T>, + ) -> Result<Self> + where + P: Token + Parse, + { + let mut punctuated = Punctuated::new(); + + loop { + let value = parser(input)?; + punctuated.push_value(value); + if !P::peek(input.cursor()) { + break; + } + let punct = input.parse()?; + punctuated.push_punct(punct); + } + + Ok(punctuated) + } +} + +#[cfg(feature = "clone-impls")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl<T, P> Clone for Punctuated<T, P> +where + T: Clone, + P: Clone, +{ + fn clone(&self) -> Self { + Punctuated { + inner: self.inner.clone(), + last: self.last.clone(), + } + } + + fn clone_from(&mut self, other: &Self) { + self.inner.clone_from(&other.inner); + self.last.clone_from(&other.last); + } +} + +#[cfg(feature = "extra-traits")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl<T, P> Eq for Punctuated<T, P> +where + T: Eq, + P: Eq, +{ +} + +#[cfg(feature = "extra-traits")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl<T, P> PartialEq for Punctuated<T, P> +where + T: PartialEq, + P: PartialEq, +{ + fn eq(&self, other: &Self) -> bool { + let Punctuated { inner, last } = self; + *inner == other.inner && *last == other.last + } +} + +#[cfg(feature = "extra-traits")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl<T, P> Hash for Punctuated<T, P> +where + T: Hash, + P: Hash, +{ + fn hash<H: Hasher>(&self, state: &mut H) { + let Punctuated { inner, last } = self; + inner.hash(state); + last.hash(state); + } +} + +#[cfg(feature = "extra-traits")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl<T: Debug, P: Debug> Debug for Punctuated<T, P> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut list = f.debug_list(); + for (t, p) in &self.inner { + list.entry(t); + list.entry(p); + } + if let Some(last) = &self.last { + list.entry(last); + } + list.finish() + } +} + +impl<T, P> FromIterator<T> for Punctuated<T, P> +where + P: Default, +{ + fn from_iter<I: IntoIterator<Item = T>>(i: I) -> Self { + let mut ret = Punctuated::new(); + ret.extend(i); + ret + } +} + +impl<T, P> Extend<T> for Punctuated<T, P> +where + P: Default, +{ + fn extend<I: IntoIterator<Item = T>>(&mut self, i: I) { + for value in i { + self.push(value); + } + } +} + +impl<T, P> FromIterator<Pair<T, P>> for Punctuated<T, P> { + fn from_iter<I: IntoIterator<Item = Pair<T, P>>>(i: I) -> Self { + let mut ret = Punctuated::new(); + do_extend(&mut ret, i.into_iter()); + ret + } +} + +impl<T, P> Extend<Pair<T, P>> for Punctuated<T, P> +where + P: Default, +{ + fn extend<I: IntoIterator<Item = Pair<T, P>>>(&mut self, i: I) { + if !self.empty_or_trailing() { + self.push_punct(P::default()); + } + do_extend(self, i.into_iter()); + } +} + +fn do_extend<T, P, I>(punctuated: &mut Punctuated<T, P>, i: I) +where + I: Iterator<Item = Pair<T, P>>, +{ + let mut nomore = false; + for pair in i { + if nomore { + panic!("punctuated extended with items after a Pair::End"); + } + match pair { + Pair::Punctuated(a, b) => punctuated.inner.push((a, b)), + Pair::End(a) => { + punctuated.last = Some(Box::new(a)); + nomore = true; + } + } + } +} + +impl<T, P> IntoIterator for Punctuated<T, P> { + type Item = T; + type IntoIter = IntoIter<T>; + + fn into_iter(self) -> Self::IntoIter { + let mut elements = Vec::with_capacity(self.len()); + + for (t, _) in self.inner { + elements.push(t); + } + if let Some(t) = self.last { + elements.push(*t); + } + + IntoIter { + inner: elements.into_iter(), + } + } +} + +impl<'a, T, P> IntoIterator for &'a Punctuated<T, P> { + type Item = &'a T; + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + Punctuated::iter(self) + } +} + +impl<'a, T, P> IntoIterator for &'a mut Punctuated<T, P> { + type Item = &'a mut T; + type IntoIter = IterMut<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + Punctuated::iter_mut(self) + } +} + +impl<T, P> Default for Punctuated<T, P> { + fn default() -> Self { + Punctuated::new() + } +} + +/// An iterator over borrowed pairs of type `Pair<&T, &P>`. +/// +/// Refer to the [module documentation] for details about punctuated sequences. +/// +/// [module documentation]: self +pub struct Pairs<'a, T: 'a, P: 'a> { + inner: slice::Iter<'a, (T, P)>, + last: option::IntoIter<&'a T>, +} + +impl<'a, T, P> Iterator for Pairs<'a, T, P> { + type Item = Pair<&'a T, &'a P>; + + fn next(&mut self) -> Option<Self::Item> { + self.inner + .next() + .map(|(t, p)| Pair::Punctuated(t, p)) + .or_else(|| self.last.next().map(Pair::End)) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + (self.len(), Some(self.len())) + } +} + +impl<'a, T, P> DoubleEndedIterator for Pairs<'a, T, P> { + fn next_back(&mut self) -> Option<Self::Item> { + self.last + .next() + .map(Pair::End) + .or_else(|| self.inner.next_back().map(|(t, p)| Pair::Punctuated(t, p))) + } +} + +impl<'a, T, P> ExactSizeIterator for Pairs<'a, T, P> { + fn len(&self) -> usize { + self.inner.len() + self.last.len() + } +} + +// No Clone bound on T or P. +impl<'a, T, P> Clone for Pairs<'a, T, P> { + fn clone(&self) -> Self { + Pairs { + inner: self.inner.clone(), + last: self.last.clone(), + } + } +} + +/// An iterator over mutably borrowed pairs of type `Pair<&mut T, &mut P>`. +/// +/// Refer to the [module documentation] for details about punctuated sequences. +/// +/// [module documentation]: self +pub struct PairsMut<'a, T: 'a, P: 'a> { + inner: slice::IterMut<'a, (T, P)>, + last: option::IntoIter<&'a mut T>, +} + +impl<'a, T, P> Iterator for PairsMut<'a, T, P> { + type Item = Pair<&'a mut T, &'a mut P>; + + fn next(&mut self) -> Option<Self::Item> { + self.inner + .next() + .map(|(t, p)| Pair::Punctuated(t, p)) + .or_else(|| self.last.next().map(Pair::End)) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + (self.len(), Some(self.len())) + } +} + +impl<'a, T, P> DoubleEndedIterator for PairsMut<'a, T, P> { + fn next_back(&mut self) -> Option<Self::Item> { + self.last + .next() + .map(Pair::End) + .or_else(|| self.inner.next_back().map(|(t, p)| Pair::Punctuated(t, p))) + } +} + +impl<'a, T, P> ExactSizeIterator for PairsMut<'a, T, P> { + fn len(&self) -> usize { + self.inner.len() + self.last.len() + } +} + +/// An iterator over owned pairs of type `Pair<T, P>`. +/// +/// Refer to the [module documentation] for details about punctuated sequences. +/// +/// [module documentation]: self +pub struct IntoPairs<T, P> { + inner: vec::IntoIter<(T, P)>, + last: option::IntoIter<T>, +} + +impl<T, P> Iterator for IntoPairs<T, P> { + type Item = Pair<T, P>; + + fn next(&mut self) -> Option<Self::Item> { + self.inner + .next() + .map(|(t, p)| Pair::Punctuated(t, p)) + .or_else(|| self.last.next().map(Pair::End)) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + (self.len(), Some(self.len())) + } +} + +impl<T, P> DoubleEndedIterator for IntoPairs<T, P> { + fn next_back(&mut self) -> Option<Self::Item> { + self.last + .next() + .map(Pair::End) + .or_else(|| self.inner.next_back().map(|(t, p)| Pair::Punctuated(t, p))) + } +} + +impl<T, P> ExactSizeIterator for IntoPairs<T, P> { + fn len(&self) -> usize { + self.inner.len() + self.last.len() + } +} + +impl<T, P> Clone for IntoPairs<T, P> +where + T: Clone, + P: Clone, +{ + fn clone(&self) -> Self { + IntoPairs { + inner: self.inner.clone(), + last: self.last.clone(), + } + } +} + +/// An iterator over owned values of type `T`. +/// +/// Refer to the [module documentation] for details about punctuated sequences. +/// +/// [module documentation]: self +pub struct IntoIter<T> { + inner: vec::IntoIter<T>, +} + +impl<T> Iterator for IntoIter<T> { + type Item = T; + + fn next(&mut self) -> Option<Self::Item> { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option<usize>) { + (self.len(), Some(self.len())) + } +} + +impl<T> DoubleEndedIterator for IntoIter<T> { + fn next_back(&mut self) -> Option<Self::Item> { + self.inner.next_back() + } +} + +impl<T> ExactSizeIterator for IntoIter<T> { + fn len(&self) -> usize { + self.inner.len() + } +} + +impl<T> Clone for IntoIter<T> +where + T: Clone, +{ + fn clone(&self) -> Self { + IntoIter { + inner: self.inner.clone(), + } + } +} + +/// An iterator over borrowed values of type `&T`. +/// +/// Refer to the [module documentation] for details about punctuated sequences. +/// +/// [module documentation]: self +pub struct Iter<'a, T: 'a> { + inner: Box<NoDrop<dyn IterTrait<'a, T> + 'a>>, +} + +trait IterTrait<'a, T: 'a>: Iterator<Item = &'a T> + DoubleEndedIterator + ExactSizeIterator { + fn clone_box(&self) -> Box<NoDrop<dyn IterTrait<'a, T> + 'a>>; +} + +struct PrivateIter<'a, T: 'a, P: 'a> { + inner: slice::Iter<'a, (T, P)>, + last: option::IntoIter<&'a T>, +} + +impl<'a, T, P> TrivialDrop for PrivateIter<'a, T, P> +where + slice::Iter<'a, (T, P)>: TrivialDrop, + option::IntoIter<&'a T>: TrivialDrop, +{ +} + +#[cfg(any(feature = "full", feature = "derive"))] +pub(crate) fn empty_punctuated_iter<'a, T>() -> Iter<'a, T> { + Iter { + inner: Box::new(NoDrop::new(iter::empty())), + } +} + +// No Clone bound on T. +impl<'a, T> Clone for Iter<'a, T> { + fn clone(&self) -> Self { + Iter { + inner: self.inner.clone_box(), + } + } +} + +impl<'a, T> Iterator for Iter<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option<Self::Item> { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option<usize>) { + (self.len(), Some(self.len())) + } +} + +impl<'a, T> DoubleEndedIterator for Iter<'a, T> { + fn next_back(&mut self) -> Option<Self::Item> { + self.inner.next_back() + } +} + +impl<'a, T> ExactSizeIterator for Iter<'a, T> { + fn len(&self) -> usize { + self.inner.len() + } +} + +impl<'a, T, P> Iterator for PrivateIter<'a, T, P> { + type Item = &'a T; + + fn next(&mut self) -> Option<Self::Item> { + self.inner + .next() + .map(|pair| &pair.0) + .or_else(|| self.last.next()) + } +} + +impl<'a, T, P> DoubleEndedIterator for PrivateIter<'a, T, P> { + fn next_back(&mut self) -> Option<Self::Item> { + self.last + .next() + .or_else(|| self.inner.next_back().map(|pair| &pair.0)) + } +} + +impl<'a, T, P> ExactSizeIterator for PrivateIter<'a, T, P> { + fn len(&self) -> usize { + self.inner.len() + self.last.len() + } +} + +// No Clone bound on T or P. +impl<'a, T, P> Clone for PrivateIter<'a, T, P> { + fn clone(&self) -> Self { + PrivateIter { + inner: self.inner.clone(), + last: self.last.clone(), + } + } +} + +impl<'a, T, I> IterTrait<'a, T> for I +where + T: 'a, + I: DoubleEndedIterator<Item = &'a T> + + ExactSizeIterator<Item = &'a T> + + Clone + + TrivialDrop + + 'a, +{ + fn clone_box(&self) -> Box<NoDrop<dyn IterTrait<'a, T> + 'a>> { + Box::new(NoDrop::new(self.clone())) + } +} + +/// An iterator over mutably borrowed values of type `&mut T`. +/// +/// Refer to the [module documentation] for details about punctuated sequences. +/// +/// [module documentation]: self +pub struct IterMut<'a, T: 'a> { + inner: Box<NoDrop<dyn IterMutTrait<'a, T, Item = &'a mut T> + 'a>>, +} + +trait IterMutTrait<'a, T: 'a>: + DoubleEndedIterator<Item = &'a mut T> + ExactSizeIterator<Item = &'a mut T> +{ +} + +struct PrivateIterMut<'a, T: 'a, P: 'a> { + inner: slice::IterMut<'a, (T, P)>, + last: option::IntoIter<&'a mut T>, +} + +impl<'a, T, P> TrivialDrop for PrivateIterMut<'a, T, P> +where + slice::IterMut<'a, (T, P)>: TrivialDrop, + option::IntoIter<&'a mut T>: TrivialDrop, +{ +} + +#[cfg(any(feature = "full", feature = "derive"))] +pub(crate) fn empty_punctuated_iter_mut<'a, T>() -> IterMut<'a, T> { + IterMut { + inner: Box::new(NoDrop::new(iter::empty())), + } +} + +impl<'a, T> Iterator for IterMut<'a, T> { + type Item = &'a mut T; + + fn next(&mut self) -> Option<Self::Item> { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option<usize>) { + (self.len(), Some(self.len())) + } +} + +impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { + fn next_back(&mut self) -> Option<Self::Item> { + self.inner.next_back() + } +} + +impl<'a, T> ExactSizeIterator for IterMut<'a, T> { + fn len(&self) -> usize { + self.inner.len() + } +} + +impl<'a, T, P> Iterator for PrivateIterMut<'a, T, P> { + type Item = &'a mut T; + + fn next(&mut self) -> Option<Self::Item> { + self.inner + .next() + .map(|pair| &mut pair.0) + .or_else(|| self.last.next()) + } +} + +impl<'a, T, P> DoubleEndedIterator for PrivateIterMut<'a, T, P> { + fn next_back(&mut self) -> Option<Self::Item> { + self.last + .next() + .or_else(|| self.inner.next_back().map(|pair| &mut pair.0)) + } +} + +impl<'a, T, P> ExactSizeIterator for PrivateIterMut<'a, T, P> { + fn len(&self) -> usize { + self.inner.len() + self.last.len() + } +} + +impl<'a, T, I> IterMutTrait<'a, T> for I +where + T: 'a, + I: DoubleEndedIterator<Item = &'a mut T> + ExactSizeIterator<Item = &'a mut T> + 'a, +{ +} + +/// A single syntax tree node of type `T` followed by its trailing punctuation +/// of type `P` if any. +/// +/// Refer to the [module documentation] for details about punctuated sequences. +/// +/// [module documentation]: self +pub enum Pair<T, P> { + Punctuated(T, P), + End(T), +} + +impl<T, P> Pair<T, P> { + /// Extracts the syntax tree node from this punctuated pair, discarding the + /// following punctuation. + pub fn into_value(self) -> T { + match self { + Pair::Punctuated(t, _) | Pair::End(t) => t, + } + } + + /// Borrows the syntax tree node from this punctuated pair. + pub fn value(&self) -> &T { + match self { + Pair::Punctuated(t, _) | Pair::End(t) => t, + } + } + + /// Mutably borrows the syntax tree node from this punctuated pair. + pub fn value_mut(&mut self) -> &mut T { + match self { + Pair::Punctuated(t, _) | Pair::End(t) => t, + } + } + + /// Borrows the punctuation from this punctuated pair, unless this pair is + /// the final one and there is no trailing punctuation. + pub fn punct(&self) -> Option<&P> { + match self { + Pair::Punctuated(_, p) => Some(p), + Pair::End(_) => None, + } + } + + /// Mutably borrows the punctuation from this punctuated pair, unless the + /// pair is the final one and there is no trailing punctuation. + /// + /// # Example + /// + /// ``` + /// # use proc_macro2::Span; + /// # use syn::punctuated::Punctuated; + /// # use syn::{parse_quote, Token, TypeParamBound}; + /// # + /// # let mut punctuated = Punctuated::<TypeParamBound, Token![+]>::new(); + /// # let span = Span::call_site(); + /// # + /// punctuated.insert(0, parse_quote!('lifetime)); + /// if let Some(punct) = punctuated.pairs_mut().next().unwrap().punct_mut() { + /// punct.span = span; + /// } + /// ``` + pub fn punct_mut(&mut self) -> Option<&mut P> { + match self { + Pair::Punctuated(_, p) => Some(p), + Pair::End(_) => None, + } + } + + /// Creates a punctuated pair out of a syntax tree node and an optional + /// following punctuation. + pub fn new(t: T, p: Option<P>) -> Self { + match p { + Some(p) => Pair::Punctuated(t, p), + None => Pair::End(t), + } + } + + /// Produces this punctuated pair as a tuple of syntax tree node and + /// optional following punctuation. + pub fn into_tuple(self) -> (T, Option<P>) { + match self { + Pair::Punctuated(t, p) => (t, Some(p)), + Pair::End(t) => (t, None), + } + } +} + +#[cfg(feature = "clone-impls")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl<T, P> Pair<&T, &P> { + pub fn cloned(self) -> Pair<T, P> + where + T: Clone, + P: Clone, + { + match self { + Pair::Punctuated(t, p) => Pair::Punctuated(t.clone(), p.clone()), + Pair::End(t) => Pair::End(t.clone()), + } + } +} + +#[cfg(feature = "clone-impls")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl<T, P> Clone for Pair<T, P> +where + T: Clone, + P: Clone, +{ + fn clone(&self) -> Self { + match self { + Pair::Punctuated(t, p) => Pair::Punctuated(t.clone(), p.clone()), + Pair::End(t) => Pair::End(t.clone()), + } + } +} + +#[cfg(feature = "clone-impls")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl<T, P> Copy for Pair<T, P> +where + T: Copy, + P: Copy, +{ +} + +impl<T, P> Index<usize> for Punctuated<T, P> { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + if index.checked_add(1) == Some(self.len()) { + match &self.last { + Some(t) => t, + None => &self.inner[index].0, + } + } else { + &self.inner[index].0 + } + } +} + +impl<T, P> IndexMut<usize> for Punctuated<T, P> { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + if index.checked_add(1) == Some(self.len()) { + match &mut self.last { + Some(t) => t, + None => &mut self.inner[index].0, + } + } else { + &mut self.inner[index].0 + } + } +} + +#[cfg(all(feature = "fold", any(feature = "full", feature = "derive")))] +pub(crate) fn fold<T, P, V, F>( + punctuated: Punctuated<T, P>, + fold: &mut V, + mut f: F, +) -> Punctuated<T, P> +where + V: ?Sized, + F: FnMut(&mut V, T) -> T, +{ + let Punctuated { inner, last } = punctuated; + + // Convert into VecDeque to prevent needing to allocate a new Vec<(T, P)> + // for the folded elements. + let mut inner = VecDeque::from(inner); + for _ in 0..inner.len() { + if let Some((t, p)) = inner.pop_front() { + inner.push_back((f(fold, t), p)); + } + } + + Punctuated { + inner: Vec::from(inner), + last: match last { + Some(t) => Some(Box::new(f(fold, *t))), + None => None, + }, + } +} + +#[cfg(feature = "printing")] +mod printing { + use crate::punctuated::{Pair, Punctuated}; + use proc_macro2::TokenStream; + use quote::{ToTokens, TokenStreamExt as _}; + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl<T, P> ToTokens for Punctuated<T, P> + where + T: ToTokens, + P: ToTokens, + { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.pairs()); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl<T, P> ToTokens for Pair<T, P> + where + T: ToTokens, + P: ToTokens, + { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + Pair::Punctuated(a, b) => { + a.to_tokens(tokens); + b.to_tokens(tokens); + } + Pair::End(a) => a.to_tokens(tokens), + } + } + } +} diff --git a/vendor/syn/src/restriction.rs b/vendor/syn/src/restriction.rs new file mode 100644 index 00000000000000..6e6758f3cd7369 --- /dev/null +++ b/vendor/syn/src/restriction.rs @@ -0,0 +1,178 @@ +use crate::path::Path; +use crate::token; + +ast_enum! { + /// The visibility level of an item: inherited or `pub` or + /// `pub(restricted)`. + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub enum Visibility { + /// A public visibility level: `pub`. + Public(Token![pub]), + + /// A visibility level restricted to some path: `pub(self)` or + /// `pub(super)` or `pub(crate)` or `pub(in some::module)`. + Restricted(VisRestricted), + + /// An inherited visibility, which usually means private. + Inherited, + } +} + +ast_struct! { + /// A visibility level restricted to some path: `pub(self)` or + /// `pub(super)` or `pub(crate)` or `pub(in some::module)`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct VisRestricted { + pub pub_token: Token![pub], + pub paren_token: token::Paren, + pub in_token: Option<Token![in]>, + pub path: Box<Path>, + } +} + +ast_enum! { + /// Unused, but reserved for RFC 3323 restrictions. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + #[non_exhaustive] + pub enum FieldMutability { + None, + + // TODO: https://rust-lang.github.io/rfcs/3323-restrictions.html + // + // FieldMutability::Restricted(MutRestricted) + // + // pub struct MutRestricted { + // pub mut_token: Token![mut], + // pub paren_token: token::Paren, + // pub in_token: Option<Token![in]>, + // pub path: Box<Path>, + // } + } +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::error::Result; + use crate::ext::IdentExt as _; + use crate::ident::Ident; + use crate::parse::discouraged::Speculative as _; + use crate::parse::{Parse, ParseStream}; + use crate::path::Path; + use crate::restriction::{VisRestricted, Visibility}; + use crate::token; + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Visibility { + fn parse(input: ParseStream) -> Result<Self> { + // Recognize an empty None-delimited group, as produced by a $:vis + // matcher that matched no tokens. + if input.peek(token::Group) { + let ahead = input.fork(); + let group = crate::group::parse_group(&ahead)?; + if group.content.is_empty() { + input.advance_to(&ahead); + return Ok(Visibility::Inherited); + } + } + + if input.peek(Token![pub]) { + Self::parse_pub(input) + } else { + Ok(Visibility::Inherited) + } + } + } + + impl Visibility { + fn parse_pub(input: ParseStream) -> Result<Self> { + let pub_token = input.parse::<Token![pub]>()?; + + if input.peek(token::Paren) { + let ahead = input.fork(); + + let content; + let paren_token = parenthesized!(content in ahead); + if content.peek(Token![crate]) + || content.peek(Token![self]) + || content.peek(Token![super]) + { + let path = content.call(Ident::parse_any)?; + + // Ensure there are no additional tokens within `content`. + // Without explicitly checking, we may misinterpret a tuple + // field as a restricted visibility, causing a parse error. + // e.g. `pub (crate::A, crate::B)` (Issue #720). + if content.is_empty() { + input.advance_to(&ahead); + return Ok(Visibility::Restricted(VisRestricted { + pub_token, + paren_token, + in_token: None, + path: Box::new(Path::from(path)), + })); + } + } else if content.peek(Token![in]) { + let in_token: Token![in] = content.parse()?; + let path = content.call(Path::parse_mod_style)?; + + input.advance_to(&ahead); + return Ok(Visibility::Restricted(VisRestricted { + pub_token, + paren_token, + in_token: Some(in_token), + path: Box::new(path), + })); + } + } + + Ok(Visibility::Public(pub_token)) + } + + #[cfg(feature = "full")] + pub(crate) fn is_some(&self) -> bool { + match self { + Visibility::Inherited => false, + _ => true, + } + } + } +} + +#[cfg(feature = "printing")] +mod printing { + use crate::path; + use crate::path::printing::PathStyle; + use crate::restriction::{VisRestricted, Visibility}; + use proc_macro2::TokenStream; + use quote::ToTokens; + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Visibility { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + Visibility::Public(pub_token) => pub_token.to_tokens(tokens), + Visibility::Restricted(vis_restricted) => vis_restricted.to_tokens(tokens), + Visibility::Inherited => {} + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for VisRestricted { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.pub_token.to_tokens(tokens); + self.paren_token.surround(tokens, |tokens| { + // TODO: If we have a path which is not "self" or "super" or + // "crate", automatically add the "in" token. + self.in_token.to_tokens(tokens); + path::printing::print_path(tokens, &self.path, PathStyle::Mod); + }); + } + } +} diff --git a/vendor/syn/src/scan_expr.rs b/vendor/syn/src/scan_expr.rs new file mode 100644 index 00000000000000..a3a0416cb09731 --- /dev/null +++ b/vendor/syn/src/scan_expr.rs @@ -0,0 +1,268 @@ +use self::{Action::*, Input::*}; +use proc_macro2::{Delimiter, Ident, Spacing, TokenTree}; +use syn::parse::{ParseStream, Result}; +#[allow(unused_imports)] +//#[cfg_attr(not(test), expect(unused_imports))] // Rust 1.81+ +use syn::Token; +use syn::{AngleBracketedGenericArguments, BinOp, Expr, ExprPath, Lifetime, Lit, Type}; + +enum Input { + Keyword(&'static str), + Punct(&'static str), + ConsumeAny, + ConsumeBinOp, + ConsumeBrace, + ConsumeDelimiter, + ConsumeIdent, + ConsumeLifetime, + ConsumeLiteral, + ConsumeNestedBrace, + ExpectPath, + ExpectTurbofish, + ExpectType, + CanBeginExpr, + Otherwise, + Empty, +} + +enum Action { + SetState(&'static [(Input, Action)]), + IncDepth, + DecDepth, + Finish, +} + +static INIT: [(Input, Action); 28] = [ + (ConsumeDelimiter, SetState(&POSTFIX)), + (Keyword("async"), SetState(&ASYNC)), + (Keyword("break"), SetState(&BREAK_LABEL)), + (Keyword("const"), SetState(&CONST)), + (Keyword("continue"), SetState(&CONTINUE)), + (Keyword("for"), SetState(&FOR)), + (Keyword("if"), IncDepth), + (Keyword("let"), SetState(&PATTERN)), + (Keyword("loop"), SetState(&BLOCK)), + (Keyword("match"), IncDepth), + (Keyword("move"), SetState(&CLOSURE)), + (Keyword("return"), SetState(&RETURN)), + (Keyword("static"), SetState(&CLOSURE)), + (Keyword("unsafe"), SetState(&BLOCK)), + (Keyword("while"), IncDepth), + (Keyword("yield"), SetState(&RETURN)), + (Keyword("_"), SetState(&POSTFIX)), + (Punct("!"), SetState(&INIT)), + (Punct("#"), SetState(&[(ConsumeDelimiter, SetState(&INIT))])), + (Punct("&"), SetState(&REFERENCE)), + (Punct("*"), SetState(&INIT)), + (Punct("-"), SetState(&INIT)), + (Punct("..="), SetState(&INIT)), + (Punct(".."), SetState(&RANGE)), + (Punct("|"), SetState(&CLOSURE_ARGS)), + (ConsumeLifetime, SetState(&[(Punct(":"), SetState(&INIT))])), + (ConsumeLiteral, SetState(&POSTFIX)), + (ExpectPath, SetState(&PATH)), +]; + +static POSTFIX: [(Input, Action); 10] = [ + (Keyword("as"), SetState(&[(ExpectType, SetState(&POSTFIX))])), + (Punct("..="), SetState(&INIT)), + (Punct(".."), SetState(&RANGE)), + (Punct("."), SetState(&DOT)), + (Punct("?"), SetState(&POSTFIX)), + (ConsumeBinOp, SetState(&INIT)), + (Punct("="), SetState(&INIT)), + (ConsumeNestedBrace, SetState(&IF_THEN)), + (ConsumeDelimiter, SetState(&POSTFIX)), + (Empty, Finish), +]; + +static ASYNC: [(Input, Action); 3] = [ + (Keyword("move"), SetState(&ASYNC)), + (Punct("|"), SetState(&CLOSURE_ARGS)), + (ConsumeBrace, SetState(&POSTFIX)), +]; + +static BLOCK: [(Input, Action); 1] = [(ConsumeBrace, SetState(&POSTFIX))]; + +static BREAK_LABEL: [(Input, Action); 2] = [ + (ConsumeLifetime, SetState(&BREAK_VALUE)), + (Otherwise, SetState(&BREAK_VALUE)), +]; + +static BREAK_VALUE: [(Input, Action); 3] = [ + (ConsumeNestedBrace, SetState(&IF_THEN)), + (CanBeginExpr, SetState(&INIT)), + (Otherwise, SetState(&POSTFIX)), +]; + +static CLOSURE: [(Input, Action); 7] = [ + (Keyword("async"), SetState(&CLOSURE)), + (Keyword("move"), SetState(&CLOSURE)), + (Punct(","), SetState(&CLOSURE)), + (Punct(">"), SetState(&CLOSURE)), + (Punct("|"), SetState(&CLOSURE_ARGS)), + (ConsumeLifetime, SetState(&CLOSURE)), + (ConsumeIdent, SetState(&CLOSURE)), +]; + +static CLOSURE_ARGS: [(Input, Action); 2] = [ + (Punct("|"), SetState(&CLOSURE_RET)), + (ConsumeAny, SetState(&CLOSURE_ARGS)), +]; + +static CLOSURE_RET: [(Input, Action); 2] = [ + (Punct("->"), SetState(&[(ExpectType, SetState(&BLOCK))])), + (Otherwise, SetState(&INIT)), +]; + +static CONST: [(Input, Action); 2] = [ + (Punct("|"), SetState(&CLOSURE_ARGS)), + (ConsumeBrace, SetState(&POSTFIX)), +]; + +static CONTINUE: [(Input, Action); 2] = [ + (ConsumeLifetime, SetState(&POSTFIX)), + (Otherwise, SetState(&POSTFIX)), +]; + +static DOT: [(Input, Action); 3] = [ + (Keyword("await"), SetState(&POSTFIX)), + (ConsumeIdent, SetState(&METHOD)), + (ConsumeLiteral, SetState(&POSTFIX)), +]; + +static FOR: [(Input, Action); 2] = [ + (Punct("<"), SetState(&CLOSURE)), + (Otherwise, SetState(&PATTERN)), +]; + +static IF_ELSE: [(Input, Action); 2] = [(Keyword("if"), SetState(&INIT)), (ConsumeBrace, DecDepth)]; +static IF_THEN: [(Input, Action); 2] = + [(Keyword("else"), SetState(&IF_ELSE)), (Otherwise, DecDepth)]; + +static METHOD: [(Input, Action); 1] = [(ExpectTurbofish, SetState(&POSTFIX))]; + +static PATH: [(Input, Action); 4] = [ + (Punct("!="), SetState(&INIT)), + (Punct("!"), SetState(&INIT)), + (ConsumeNestedBrace, SetState(&IF_THEN)), + (Otherwise, SetState(&POSTFIX)), +]; + +static PATTERN: [(Input, Action); 15] = [ + (ConsumeDelimiter, SetState(&PATTERN)), + (Keyword("box"), SetState(&PATTERN)), + (Keyword("in"), IncDepth), + (Keyword("mut"), SetState(&PATTERN)), + (Keyword("ref"), SetState(&PATTERN)), + (Keyword("_"), SetState(&PATTERN)), + (Punct("!"), SetState(&PATTERN)), + (Punct("&"), SetState(&PATTERN)), + (Punct("..="), SetState(&PATTERN)), + (Punct(".."), SetState(&PATTERN)), + (Punct("="), SetState(&INIT)), + (Punct("@"), SetState(&PATTERN)), + (Punct("|"), SetState(&PATTERN)), + (ConsumeLiteral, SetState(&PATTERN)), + (ExpectPath, SetState(&PATTERN)), +]; + +static RANGE: [(Input, Action); 6] = [ + (Punct("..="), SetState(&INIT)), + (Punct(".."), SetState(&RANGE)), + (Punct("."), SetState(&DOT)), + (ConsumeNestedBrace, SetState(&IF_THEN)), + (Empty, Finish), + (Otherwise, SetState(&INIT)), +]; + +static RAW: [(Input, Action); 3] = [ + (Keyword("const"), SetState(&INIT)), + (Keyword("mut"), SetState(&INIT)), + (Otherwise, SetState(&POSTFIX)), +]; + +static REFERENCE: [(Input, Action); 3] = [ + (Keyword("mut"), SetState(&INIT)), + (Keyword("raw"), SetState(&RAW)), + (Otherwise, SetState(&INIT)), +]; + +static RETURN: [(Input, Action); 2] = [ + (CanBeginExpr, SetState(&INIT)), + (Otherwise, SetState(&POSTFIX)), +]; + +pub(crate) fn scan_expr(input: ParseStream) -> Result<()> { + let mut state = INIT.as_slice(); + let mut depth = 0usize; + 'table: loop { + for rule in state { + if match rule.0 { + Input::Keyword(expected) => input.step(|cursor| match cursor.ident() { + Some((ident, rest)) if ident == expected => Ok((true, rest)), + _ => Ok((false, *cursor)), + })?, + Input::Punct(expected) => input.step(|cursor| { + let begin = *cursor; + let mut cursor = begin; + for (i, ch) in expected.chars().enumerate() { + match cursor.punct() { + Some((punct, _)) if punct.as_char() != ch => break, + Some((_, rest)) if i == expected.len() - 1 => { + return Ok((true, rest)); + } + Some((punct, rest)) if punct.spacing() == Spacing::Joint => { + cursor = rest; + } + _ => break, + } + } + Ok((false, begin)) + })?, + Input::ConsumeAny => input.parse::<Option<TokenTree>>()?.is_some(), + Input::ConsumeBinOp => input.parse::<BinOp>().is_ok(), + Input::ConsumeBrace | Input::ConsumeNestedBrace => { + (matches!(rule.0, Input::ConsumeBrace) || depth > 0) + && input.step(|cursor| match cursor.group(Delimiter::Brace) { + Some((_inside, _span, rest)) => Ok((true, rest)), + None => Ok((false, *cursor)), + })? + } + Input::ConsumeDelimiter => input.step(|cursor| match cursor.any_group() { + Some((_inside, _delimiter, _span, rest)) => Ok((true, rest)), + None => Ok((false, *cursor)), + })?, + Input::ConsumeIdent => input.parse::<Option<Ident>>()?.is_some(), + Input::ConsumeLifetime => input.parse::<Option<Lifetime>>()?.is_some(), + Input::ConsumeLiteral => input.parse::<Option<Lit>>()?.is_some(), + Input::ExpectPath => { + input.parse::<ExprPath>()?; + true + } + Input::ExpectTurbofish => { + if input.peek(Token![::]) { + input.parse::<AngleBracketedGenericArguments>()?; + } + true + } + Input::ExpectType => { + Type::without_plus(input)?; + true + } + Input::CanBeginExpr => Expr::peek(input), + Input::Otherwise => true, + Input::Empty => input.is_empty() || input.peek(Token![,]), + } { + state = match rule.1 { + Action::SetState(next) => next, + Action::IncDepth => (depth += 1, &INIT).1, + Action::DecDepth => (depth -= 1, &POSTFIX).1, + Action::Finish => return if depth == 0 { Ok(()) } else { break }, + }; + continue 'table; + } + } + return Err(input.error("unsupported expression")); + } +} diff --git a/vendor/syn/src/sealed.rs b/vendor/syn/src/sealed.rs new file mode 100644 index 00000000000000..dc804742d12db0 --- /dev/null +++ b/vendor/syn/src/sealed.rs @@ -0,0 +1,4 @@ +#[cfg(feature = "parsing")] +pub(crate) mod lookahead { + pub trait Sealed: Copy {} +} diff --git a/vendor/syn/src/span.rs b/vendor/syn/src/span.rs new file mode 100644 index 00000000000000..eb2779479aaac5 --- /dev/null +++ b/vendor/syn/src/span.rs @@ -0,0 +1,63 @@ +use proc_macro2::extra::DelimSpan; +use proc_macro2::{Delimiter, Group, Span, TokenStream}; + +#[doc(hidden)] +pub trait IntoSpans<S> { + fn into_spans(self) -> S; +} + +impl IntoSpans<Span> for Span { + fn into_spans(self) -> Span { + self + } +} + +impl IntoSpans<[Span; 1]> for Span { + fn into_spans(self) -> [Span; 1] { + [self] + } +} + +impl IntoSpans<[Span; 2]> for Span { + fn into_spans(self) -> [Span; 2] { + [self, self] + } +} + +impl IntoSpans<[Span; 3]> for Span { + fn into_spans(self) -> [Span; 3] { + [self, self, self] + } +} + +impl IntoSpans<[Span; 1]> for [Span; 1] { + fn into_spans(self) -> [Span; 1] { + self + } +} + +impl IntoSpans<[Span; 2]> for [Span; 2] { + fn into_spans(self) -> [Span; 2] { + self + } +} + +impl IntoSpans<[Span; 3]> for [Span; 3] { + fn into_spans(self) -> [Span; 3] { + self + } +} + +impl IntoSpans<DelimSpan> for Span { + fn into_spans(self) -> DelimSpan { + let mut group = Group::new(Delimiter::None, TokenStream::new()); + group.set_span(self); + group.delim_span() + } +} + +impl IntoSpans<DelimSpan> for DelimSpan { + fn into_spans(self) -> DelimSpan { + self + } +} diff --git a/vendor/syn/src/spanned.rs b/vendor/syn/src/spanned.rs new file mode 100644 index 00000000000000..17b69e9f5b2847 --- /dev/null +++ b/vendor/syn/src/spanned.rs @@ -0,0 +1,118 @@ +//! A trait that can provide the `Span` of the complete contents of a syntax +//! tree node. +//! +//! <br> +//! +//! # Example +//! +//! Suppose in a procedural macro we have a [`Type`] that we want to assert +//! implements the [`Sync`] trait. Maybe this is the type of one of the fields +//! of a struct for which we are deriving a trait implementation, and we need to +//! be able to pass a reference to one of those fields across threads. +//! +//! [`Type`]: crate::Type +//! [`Sync`]: std::marker::Sync +//! +//! If the field type does *not* implement `Sync` as required, we want the +//! compiler to report an error pointing out exactly which type it was. +//! +//! The following macro code takes a variable `ty` of type `Type` and produces a +//! static assertion that `Sync` is implemented for that type. +//! +//! ``` +//! # extern crate proc_macro; +//! # +//! use proc_macro::TokenStream; +//! use proc_macro2::Span; +//! use quote::quote_spanned; +//! use syn::Type; +//! use syn::spanned::Spanned; +//! +//! # const IGNORE_TOKENS: &str = stringify! { +//! #[proc_macro_derive(MyMacro)] +//! # }; +//! pub fn my_macro(input: TokenStream) -> TokenStream { +//! # let ty = get_a_type(); +//! /* ... */ +//! +//! let assert_sync = quote_spanned! {ty.span()=> +//! struct _AssertSync where #ty: Sync; +//! }; +//! +//! /* ... */ +//! # input +//! } +//! # +//! # fn get_a_type() -> Type { +//! # unimplemented!() +//! # } +//! ``` +//! +//! By inserting this `assert_sync` fragment into the output code generated by +//! our macro, the user's code will fail to compile if `ty` does not implement +//! `Sync`. The errors they would see look like the following. +//! +//! ```text +//! error[E0277]: the trait bound `*const i32: std::marker::Sync` is not satisfied +//! --> src/main.rs:10:21 +//! | +//! 10 | bad_field: *const i32, +//! | ^^^^^^^^^^ `*const i32` cannot be shared between threads safely +//! ``` +//! +//! In this technique, using the `Type`'s span for the error message makes the +//! error appear in the correct place underlining the right type. +//! +//! <br> +//! +//! # Limitations +//! +//! The underlying [`proc_macro::Span::join`] method is nightly-only. When +//! called from within a procedural macro in a nightly compiler, `Spanned` will +//! use `join` to produce the intended span. When not using a nightly compiler, +//! only the span of the *first token* of the syntax tree node is returned. +//! +//! In the common case of wanting to use the joined span as the span of a +//! `syn::Error`, consider instead using [`syn::Error::new_spanned`] which is +//! able to span the error correctly under the complete syntax tree node without +//! needing the unstable `join`. +//! +//! [`syn::Error::new_spanned`]: crate::Error::new_spanned + +use proc_macro2::Span; +use quote::spanned::Spanned as ToTokens; + +/// A trait that can provide the `Span` of the complete contents of a syntax +/// tree node. +/// +/// This trait is automatically implemented for all types that implement +/// [`ToTokens`] from the `quote` crate, as well as for `Span` itself. +/// +/// [`ToTokens`]: quote::ToTokens +/// +/// See the [module documentation] for an example. +/// +/// [module documentation]: self +pub trait Spanned: private::Sealed { + /// Returns a `Span` covering the complete contents of this syntax tree + /// node, or [`Span::call_site()`] if this node is empty. + /// + /// [`Span::call_site()`]: proc_macro2::Span::call_site + fn span(&self) -> Span; +} + +impl<T: ?Sized + ToTokens> Spanned for T { + fn span(&self) -> Span { + self.__span() + } +} + +mod private { + use crate::spanned::ToTokens; + + pub trait Sealed {} + impl<T: ?Sized + ToTokens> Sealed for T {} + + #[cfg(any(feature = "full", feature = "derive"))] + impl Sealed for crate::QSelf {} +} diff --git a/vendor/syn/src/stmt.rs b/vendor/syn/src/stmt.rs new file mode 100644 index 00000000000000..970bc13dc25a1d --- /dev/null +++ b/vendor/syn/src/stmt.rs @@ -0,0 +1,484 @@ +use crate::attr::Attribute; +use crate::expr::Expr; +use crate::item::Item; +use crate::mac::Macro; +use crate::pat::Pat; +use crate::token; + +ast_struct! { + /// A braced block containing Rust statements. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct Block { + pub brace_token: token::Brace, + /// Statements in a block + pub stmts: Vec<Stmt>, + } +} + +ast_enum! { + /// A statement, usually ending in a semicolon. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub enum Stmt { + /// A local (let) binding. + Local(Local), + + /// An item definition. + Item(Item), + + /// Expression, with or without trailing semicolon. + Expr(Expr, Option<Token![;]>), + + /// A macro invocation in statement position. + /// + /// Syntactically it's ambiguous which other kind of statement this + /// macro would expand to. It can be any of local variable (`let`), + /// item, or expression. + Macro(StmtMacro), + } +} + +ast_struct! { + /// A local `let` binding: `let x: u64 = s.parse()?;`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct Local { + pub attrs: Vec<Attribute>, + pub let_token: Token![let], + pub pat: Pat, + pub init: Option<LocalInit>, + pub semi_token: Token![;], + } +} + +ast_struct! { + /// The expression assigned in a local `let` binding, including optional + /// diverging `else` block. + /// + /// `LocalInit` represents `= s.parse()?` in `let x: u64 = s.parse()?` and + /// `= r else { return }` in `let Ok(x) = r else { return }`. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct LocalInit { + pub eq_token: Token![=], + pub expr: Box<Expr>, + pub diverge: Option<(Token![else], Box<Expr>)>, + } +} + +ast_struct! { + /// A macro invocation in statement position. + /// + /// Syntactically it's ambiguous which other kind of statement this macro + /// would expand to. It can be any of local variable (`let`), item, or + /// expression. + #[cfg_attr(docsrs, doc(cfg(feature = "full")))] + pub struct StmtMacro { + pub attrs: Vec<Attribute>, + pub mac: Macro, + pub semi_token: Option<Token![;]>, + } +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::attr::Attribute; + use crate::classify; + use crate::error::Result; + use crate::expr::{Expr, ExprBlock, ExprMacro}; + use crate::ident::Ident; + use crate::item; + use crate::mac::{self, Macro}; + use crate::parse::discouraged::Speculative as _; + use crate::parse::{Parse, ParseStream}; + use crate::pat::{Pat, PatType}; + use crate::path::Path; + use crate::stmt::{Block, Local, LocalInit, Stmt, StmtMacro}; + use crate::token; + use crate::ty::Type; + use proc_macro2::TokenStream; + + struct AllowNoSemi(bool); + + impl Block { + /// Parse the body of a block as zero or more statements, possibly + /// including one trailing expression. + /// + /// # Example + /// + /// ``` + /// use syn::{braced, token, Attribute, Block, Ident, Result, Stmt, Token}; + /// use syn::parse::{Parse, ParseStream}; + /// + /// // Parse a function with no generics or parameter list. + /// // + /// // fn playground { + /// // let mut x = 1; + /// // x += 1; + /// // println!("{}", x); + /// // } + /// struct MiniFunction { + /// attrs: Vec<Attribute>, + /// fn_token: Token![fn], + /// name: Ident, + /// brace_token: token::Brace, + /// stmts: Vec<Stmt>, + /// } + /// + /// impl Parse for MiniFunction { + /// fn parse(input: ParseStream) -> Result<Self> { + /// let outer_attrs = input.call(Attribute::parse_outer)?; + /// let fn_token: Token![fn] = input.parse()?; + /// let name: Ident = input.parse()?; + /// + /// let content; + /// let brace_token = braced!(content in input); + /// let inner_attrs = content.call(Attribute::parse_inner)?; + /// let stmts = content.call(Block::parse_within)?; + /// + /// Ok(MiniFunction { + /// attrs: { + /// let mut attrs = outer_attrs; + /// attrs.extend(inner_attrs); + /// attrs + /// }, + /// fn_token, + /// name, + /// brace_token, + /// stmts, + /// }) + /// } + /// } + /// ``` + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn parse_within(input: ParseStream) -> Result<Vec<Stmt>> { + let mut stmts = Vec::new(); + loop { + while let semi @ Some(_) = input.parse()? { + stmts.push(Stmt::Expr(Expr::Verbatim(TokenStream::new()), semi)); + } + if input.is_empty() { + break; + } + let stmt = parse_stmt(input, AllowNoSemi(true))?; + let requires_semicolon = match &stmt { + Stmt::Expr(stmt, None) => classify::requires_semi_to_be_stmt(stmt), + Stmt::Macro(stmt) => { + stmt.semi_token.is_none() && !stmt.mac.delimiter.is_brace() + } + Stmt::Local(_) | Stmt::Item(_) | Stmt::Expr(_, Some(_)) => false, + }; + stmts.push(stmt); + if input.is_empty() { + break; + } else if requires_semicolon { + return Err(input.error("unexpected token, expected `;`")); + } + } + Ok(stmts) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Block { + fn parse(input: ParseStream) -> Result<Self> { + let content; + Ok(Block { + brace_token: braced!(content in input), + stmts: content.call(Block::parse_within)?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Stmt { + fn parse(input: ParseStream) -> Result<Self> { + let allow_nosemi = AllowNoSemi(false); + parse_stmt(input, allow_nosemi) + } + } + + fn parse_stmt(input: ParseStream, allow_nosemi: AllowNoSemi) -> Result<Stmt> { + let begin = input.fork(); + let attrs = input.call(Attribute::parse_outer)?; + + // brace-style macros; paren and bracket macros get parsed as + // expression statements. + let ahead = input.fork(); + let mut is_item_macro = false; + if let Ok(path) = ahead.call(Path::parse_mod_style) { + if ahead.peek(Token![!]) { + if ahead.peek2(Ident) || ahead.peek2(Token![try]) { + is_item_macro = true; + } else if ahead.peek2(token::Brace) + && !(ahead.peek3(Token![.]) && !ahead.peek3(Token![..]) + || ahead.peek3(Token![?])) + { + input.advance_to(&ahead); + return stmt_mac(input, attrs, path).map(Stmt::Macro); + } + } + } + + if input.peek(Token![let]) && !input.peek(token::Group) { + stmt_local(input, attrs).map(Stmt::Local) + } else if input.peek(Token![pub]) + || input.peek(Token![crate]) && !input.peek2(Token![::]) + || input.peek(Token![extern]) + || input.peek(Token![use]) + || input.peek(Token![static]) + && (input.peek2(Token![mut]) + || input.peek2(Ident) + && !(input.peek2(Token![async]) + && (input.peek3(Token![move]) || input.peek3(Token![|])))) + || input.peek(Token![const]) + && !(input.peek2(token::Brace) + || input.peek2(Token![static]) + || input.peek2(Token![async]) + && !(input.peek3(Token![unsafe]) + || input.peek3(Token![extern]) + || input.peek3(Token![fn])) + || input.peek2(Token![move]) + || input.peek2(Token![|])) + || input.peek(Token![unsafe]) && !input.peek2(token::Brace) + || input.peek(Token![async]) + && (input.peek2(Token![unsafe]) + || input.peek2(Token![extern]) + || input.peek2(Token![fn])) + || input.peek(Token![fn]) + || input.peek(Token![mod]) + || input.peek(Token![type]) + || input.peek(Token![struct]) + || input.peek(Token![enum]) + || input.peek(Token![union]) && input.peek2(Ident) + || input.peek(Token![auto]) && input.peek2(Token![trait]) + || input.peek(Token![trait]) + || input.peek(Token![default]) + && (input.peek2(Token![unsafe]) || input.peek2(Token![impl])) + || input.peek(Token![impl]) + || input.peek(Token![macro]) + || is_item_macro + { + let item = item::parsing::parse_rest_of_item(begin, attrs, input)?; + Ok(Stmt::Item(item)) + } else { + stmt_expr(input, allow_nosemi, attrs) + } + } + + fn stmt_mac(input: ParseStream, attrs: Vec<Attribute>, path: Path) -> Result<StmtMacro> { + let bang_token: Token![!] = input.parse()?; + let (delimiter, tokens) = mac::parse_delimiter(input)?; + let semi_token: Option<Token![;]> = input.parse()?; + + Ok(StmtMacro { + attrs, + mac: Macro { + path, + bang_token, + delimiter, + tokens, + }, + semi_token, + }) + } + + fn stmt_local(input: ParseStream, attrs: Vec<Attribute>) -> Result<Local> { + let let_token: Token![let] = input.parse()?; + + let mut pat = Pat::parse_single(input)?; + if input.peek(Token![:]) { + let colon_token: Token![:] = input.parse()?; + let ty: Type = input.parse()?; + pat = Pat::Type(PatType { + attrs: Vec::new(), + pat: Box::new(pat), + colon_token, + ty: Box::new(ty), + }); + } + + let init = if let Some(eq_token) = input.parse()? { + let eq_token: Token![=] = eq_token; + let expr: Expr = input.parse()?; + + let diverge = if !classify::expr_trailing_brace(&expr) && input.peek(Token![else]) { + let else_token: Token![else] = input.parse()?; + let diverge = ExprBlock { + attrs: Vec::new(), + label: None, + block: input.parse()?, + }; + Some((else_token, Box::new(Expr::Block(diverge)))) + } else { + None + }; + + Some(LocalInit { + eq_token, + expr: Box::new(expr), + diverge, + }) + } else { + None + }; + + let semi_token: Token![;] = input.parse()?; + + Ok(Local { + attrs, + let_token, + pat, + init, + semi_token, + }) + } + + fn stmt_expr( + input: ParseStream, + allow_nosemi: AllowNoSemi, + mut attrs: Vec<Attribute>, + ) -> Result<Stmt> { + let mut e = Expr::parse_with_earlier_boundary_rule(input)?; + + let mut attr_target = &mut e; + loop { + attr_target = match attr_target { + Expr::Assign(e) => &mut e.left, + Expr::Binary(e) => &mut e.left, + Expr::Cast(e) => &mut e.expr, + Expr::Array(_) + | Expr::Async(_) + | Expr::Await(_) + | Expr::Block(_) + | Expr::Break(_) + | Expr::Call(_) + | Expr::Closure(_) + | Expr::Const(_) + | Expr::Continue(_) + | Expr::Field(_) + | Expr::ForLoop(_) + | Expr::Group(_) + | Expr::If(_) + | Expr::Index(_) + | Expr::Infer(_) + | Expr::Let(_) + | Expr::Lit(_) + | Expr::Loop(_) + | Expr::Macro(_) + | Expr::Match(_) + | Expr::MethodCall(_) + | Expr::Paren(_) + | Expr::Path(_) + | Expr::Range(_) + | Expr::RawAddr(_) + | Expr::Reference(_) + | Expr::Repeat(_) + | Expr::Return(_) + | Expr::Struct(_) + | Expr::Try(_) + | Expr::TryBlock(_) + | Expr::Tuple(_) + | Expr::Unary(_) + | Expr::Unsafe(_) + | Expr::While(_) + | Expr::Yield(_) + | Expr::Verbatim(_) => break, + }; + } + attrs.extend(attr_target.replace_attrs(Vec::new())); + attr_target.replace_attrs(attrs); + + let semi_token: Option<Token![;]> = input.parse()?; + + match e { + Expr::Macro(ExprMacro { attrs, mac }) + if semi_token.is_some() || mac.delimiter.is_brace() => + { + return Ok(Stmt::Macro(StmtMacro { + attrs, + mac, + semi_token, + })); + } + _ => {} + } + + if semi_token.is_some() { + Ok(Stmt::Expr(e, semi_token)) + } else if allow_nosemi.0 || !classify::requires_semi_to_be_stmt(&e) { + Ok(Stmt::Expr(e, None)) + } else { + Err(input.error("expected semicolon")) + } + } +} + +#[cfg(feature = "printing")] +pub(crate) mod printing { + use crate::classify; + use crate::expr::{self, Expr}; + use crate::fixup::FixupContext; + use crate::stmt::{Block, Local, Stmt, StmtMacro}; + use crate::token; + use proc_macro2::TokenStream; + use quote::{ToTokens, TokenStreamExt as _}; + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Block { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.brace_token.surround(tokens, |tokens| { + tokens.append_all(&self.stmts); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Stmt { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + Stmt::Local(local) => local.to_tokens(tokens), + Stmt::Item(item) => item.to_tokens(tokens), + Stmt::Expr(expr, semi) => { + expr::printing::print_expr(expr, tokens, FixupContext::new_stmt()); + semi.to_tokens(tokens); + } + Stmt::Macro(mac) => mac.to_tokens(tokens), + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Local { + fn to_tokens(&self, tokens: &mut TokenStream) { + expr::printing::outer_attrs_to_tokens(&self.attrs, tokens); + self.let_token.to_tokens(tokens); + self.pat.to_tokens(tokens); + if let Some(init) = &self.init { + init.eq_token.to_tokens(tokens); + expr::printing::print_subexpression( + &init.expr, + init.diverge.is_some() && classify::expr_trailing_brace(&init.expr), + tokens, + FixupContext::NONE, + ); + if let Some((else_token, diverge)) = &init.diverge { + else_token.to_tokens(tokens); + match &**diverge { + Expr::Block(diverge) => diverge.to_tokens(tokens), + _ => token::Brace::default().surround(tokens, |tokens| { + expr::printing::print_expr(diverge, tokens, FixupContext::new_stmt()); + }), + } + } + } + self.semi_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for StmtMacro { + fn to_tokens(&self, tokens: &mut TokenStream) { + expr::printing::outer_attrs_to_tokens(&self.attrs, tokens); + self.mac.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + } + } +} diff --git a/vendor/syn/src/thread.rs b/vendor/syn/src/thread.rs new file mode 100644 index 00000000000000..b33d248afc6063 --- /dev/null +++ b/vendor/syn/src/thread.rs @@ -0,0 +1,60 @@ +use std::fmt::{self, Debug}; +use std::thread::{self, ThreadId}; + +/// ThreadBound is a Sync-maker and Send-maker that allows accessing a value +/// of type T only from the original thread on which the ThreadBound was +/// constructed. +pub(crate) struct ThreadBound<T> { + value: T, + thread_id: ThreadId, +} + +unsafe impl<T> Sync for ThreadBound<T> {} + +// Send bound requires Copy, as otherwise Drop could run in the wrong place. +// +// Today Copy and Drop are mutually exclusive so `T: Copy` implies `T: !Drop`. +// This impl needs to be revisited if that restriction is relaxed in the future. +unsafe impl<T: Copy> Send for ThreadBound<T> {} + +impl<T> ThreadBound<T> { + pub(crate) fn new(value: T) -> Self { + ThreadBound { + value, + thread_id: thread::current().id(), + } + } + + pub(crate) fn get(&self) -> Option<&T> { + if thread::current().id() == self.thread_id { + Some(&self.value) + } else { + None + } + } +} + +impl<T: Debug> Debug for ThreadBound<T> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match self.get() { + Some(value) => Debug::fmt(value, formatter), + None => formatter.write_str("unknown"), + } + } +} + +// Copy the bytes of T, even if the currently running thread is the "wrong" +// thread. This is fine as long as the original thread is not simultaneously +// mutating this value via interior mutability, which would be a data race. +// +// Currently `T: Copy` is sufficient to guarantee that T contains no interior +// mutability, because _all_ interior mutability in Rust is built on +// std::cell::UnsafeCell, which has no Copy impl. This impl needs to be +// revisited if that restriction is relaxed in the future. +impl<T: Copy> Copy for ThreadBound<T> {} + +impl<T: Copy> Clone for ThreadBound<T> { + fn clone(&self) -> Self { + *self + } +} diff --git a/vendor/syn/src/token.rs b/vendor/syn/src/token.rs new file mode 100644 index 00000000000000..52321fc6c70611 --- /dev/null +++ b/vendor/syn/src/token.rs @@ -0,0 +1,1093 @@ +//! Tokens representing Rust punctuation, keywords, and delimiters. +//! +//! The type names in this module can be difficult to keep straight, so we +//! prefer to use the [`Token!`] macro instead. This is a type-macro that +//! expands to the token type of the given token. +//! +//! [`Token!`]: crate::Token +//! +//! # Example +//! +//! The [`ItemStatic`] syntax tree node is defined like this. +//! +//! [`ItemStatic`]: crate::ItemStatic +//! +//! ``` +//! # use syn::{Attribute, Expr, Ident, Token, Type, Visibility}; +//! # +//! pub struct ItemStatic { +//! pub attrs: Vec<Attribute>, +//! pub vis: Visibility, +//! pub static_token: Token![static], +//! pub mutability: Option<Token![mut]>, +//! pub ident: Ident, +//! pub colon_token: Token![:], +//! pub ty: Box<Type>, +//! pub eq_token: Token![=], +//! pub expr: Box<Expr>, +//! pub semi_token: Token![;], +//! } +//! ``` +//! +//! # Parsing +//! +//! Keywords and punctuation can be parsed through the [`ParseStream::parse`] +//! method. Delimiter tokens are parsed using the [`parenthesized!`], +//! [`bracketed!`] and [`braced!`] macros. +//! +//! [`ParseStream::parse`]: crate::parse::ParseBuffer::parse() +//! [`parenthesized!`]: crate::parenthesized! +//! [`bracketed!`]: crate::bracketed! +//! [`braced!`]: crate::braced! +//! +//! ``` +//! use syn::{Attribute, Result}; +//! use syn::parse::{Parse, ParseStream}; +//! # +//! # enum ItemStatic {} +//! +//! // Parse the ItemStatic struct shown above. +//! impl Parse for ItemStatic { +//! fn parse(input: ParseStream) -> Result<Self> { +//! # use syn::ItemStatic; +//! # fn parse(input: ParseStream) -> Result<ItemStatic> { +//! Ok(ItemStatic { +//! attrs: input.call(Attribute::parse_outer)?, +//! vis: input.parse()?, +//! static_token: input.parse()?, +//! mutability: input.parse()?, +//! ident: input.parse()?, +//! colon_token: input.parse()?, +//! ty: input.parse()?, +//! eq_token: input.parse()?, +//! expr: input.parse()?, +//! semi_token: input.parse()?, +//! }) +//! # } +//! # unimplemented!() +//! } +//! } +//! ``` +//! +//! # Other operations +//! +//! Every keyword and punctuation token supports the following operations. +//! +//! - [Peeking] — `input.peek(Token![...])` +//! +//! - [Parsing] — `input.parse::<Token![...]>()?` +//! +//! - [Printing] — `quote!( ... #the_token ... )` +//! +//! - Construction from a [`Span`] — `let the_token = Token![...](sp)` +//! +//! - Field access to its span — `let sp = the_token.span` +//! +//! [Peeking]: crate::parse::ParseBuffer::peek() +//! [Parsing]: crate::parse::ParseBuffer::parse() +//! [Printing]: https://docs.rs/quote/1.0/quote/trait.ToTokens.html +//! [`Span`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.Span.html + +#[cfg(feature = "parsing")] +pub(crate) use self::private::CustomToken; +use self::private::WithSpan; +#[cfg(feature = "parsing")] +use crate::buffer::Cursor; +#[cfg(feature = "parsing")] +use crate::error::Result; +#[cfg(feature = "parsing")] +use crate::lifetime::Lifetime; +#[cfg(feature = "parsing")] +use crate::parse::{Parse, ParseStream}; +use crate::span::IntoSpans; +use proc_macro2::extra::DelimSpan; +use proc_macro2::Span; +#[cfg(feature = "printing")] +use proc_macro2::TokenStream; +#[cfg(any(feature = "parsing", feature = "printing"))] +use proc_macro2::{Delimiter, Ident}; +#[cfg(feature = "parsing")] +use proc_macro2::{Literal, Punct, TokenTree}; +#[cfg(feature = "printing")] +use quote::{ToTokens, TokenStreamExt as _}; +#[cfg(feature = "extra-traits")] +use std::cmp; +#[cfg(feature = "extra-traits")] +use std::fmt::{self, Debug}; +#[cfg(feature = "extra-traits")] +use std::hash::{Hash, Hasher}; +use std::ops::{Deref, DerefMut}; + +/// Marker trait for types that represent single tokens. +/// +/// This trait is sealed and cannot be implemented for types outside of Syn. +#[cfg(feature = "parsing")] +pub trait Token: private::Sealed { + // Not public API. + #[doc(hidden)] + fn peek(cursor: Cursor) -> bool; + + // Not public API. + #[doc(hidden)] + fn display() -> &'static str; +} + +pub(crate) mod private { + #[cfg(feature = "parsing")] + use crate::buffer::Cursor; + use proc_macro2::Span; + + #[cfg(feature = "parsing")] + pub trait Sealed {} + + /// Support writing `token.span` rather than `token.spans[0]` on tokens that + /// hold a single span. + #[repr(transparent)] + #[allow(unknown_lints, repr_transparent_non_zst_fields)] // False positive: https://github.com/rust-lang/rust/issues/115922 + pub struct WithSpan { + pub span: Span, + } + + // Not public API. + #[doc(hidden)] + #[cfg(feature = "parsing")] + pub trait CustomToken { + fn peek(cursor: Cursor) -> bool; + fn display() -> &'static str; + } +} + +#[cfg(feature = "parsing")] +impl private::Sealed for Ident {} + +macro_rules! impl_low_level_token { + ($display:literal $($path:ident)::+ $get:ident) => { + #[cfg(feature = "parsing")] + impl Token for $($path)::+ { + fn peek(cursor: Cursor) -> bool { + cursor.$get().is_some() + } + + fn display() -> &'static str { + $display + } + } + + #[cfg(feature = "parsing")] + impl private::Sealed for $($path)::+ {} + }; +} + +impl_low_level_token!("punctuation token" Punct punct); +impl_low_level_token!("literal" Literal literal); +impl_low_level_token!("token" TokenTree token_tree); +impl_low_level_token!("group token" proc_macro2::Group any_group); +impl_low_level_token!("lifetime" Lifetime lifetime); + +#[cfg(feature = "parsing")] +impl<T: CustomToken> private::Sealed for T {} + +#[cfg(feature = "parsing")] +impl<T: CustomToken> Token for T { + fn peek(cursor: Cursor) -> bool { + <Self as CustomToken>::peek(cursor) + } + + fn display() -> &'static str { + <Self as CustomToken>::display() + } +} + +macro_rules! define_keywords { + ($($token:literal pub struct $name:ident)*) => { + $( + #[doc = concat!('`', $token, '`')] + /// + /// Don't try to remember the name of this type — use the + /// [`Token!`] macro instead. + /// + /// [`Token!`]: crate::token + pub struct $name { + pub span: Span, + } + + #[doc(hidden)] + #[allow(non_snake_case)] + pub fn $name<S: IntoSpans<Span>>(span: S) -> $name { + $name { + span: span.into_spans(), + } + } + + impl std::default::Default for $name { + fn default() -> Self { + $name { + span: Span::call_site(), + } + } + } + + #[cfg(feature = "clone-impls")] + #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] + impl Copy for $name {} + + #[cfg(feature = "clone-impls")] + #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] + impl Clone for $name { + fn clone(&self) -> Self { + *self + } + } + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl Debug for $name { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(stringify!($name)) + } + } + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl cmp::Eq for $name {} + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl PartialEq for $name { + fn eq(&self, _other: &$name) -> bool { + true + } + } + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl Hash for $name { + fn hash<H: Hasher>(&self, _state: &mut H) {} + } + + #[cfg(feature = "printing")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for $name { + fn to_tokens(&self, tokens: &mut TokenStream) { + printing::keyword($token, self.span, tokens); + } + } + + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for $name { + fn parse(input: ParseStream) -> Result<Self> { + Ok($name { + span: parsing::keyword(input, $token)?, + }) + } + } + + #[cfg(feature = "parsing")] + impl Token for $name { + fn peek(cursor: Cursor) -> bool { + parsing::peek_keyword(cursor, $token) + } + + fn display() -> &'static str { + concat!("`", $token, "`") + } + } + + #[cfg(feature = "parsing")] + impl private::Sealed for $name {} + )* + }; +} + +macro_rules! impl_deref_if_len_is_1 { + ($name:ident/1) => { + impl Deref for $name { + type Target = WithSpan; + + fn deref(&self) -> &Self::Target { + unsafe { &*(self as *const Self).cast::<WithSpan>() } + } + } + + impl DerefMut for $name { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { &mut *(self as *mut Self).cast::<WithSpan>() } + } + } + }; + + ($name:ident/$len:literal) => {}; +} + +macro_rules! define_punctuation_structs { + ($($token:literal pub struct $name:ident/$len:tt #[doc = $usage:literal])*) => { + $( + #[cfg_attr(not(doc), repr(transparent))] + #[allow(unknown_lints, repr_transparent_non_zst_fields)] // False positive: https://github.com/rust-lang/rust/issues/115922 + #[doc = concat!('`', $token, '`')] + /// + /// Usage: + #[doc = concat!($usage, '.')] + /// + /// Don't try to remember the name of this type — use the + /// [`Token!`] macro instead. + /// + /// [`Token!`]: crate::token + pub struct $name { + pub spans: [Span; $len], + } + + #[doc(hidden)] + #[allow(non_snake_case)] + pub fn $name<S: IntoSpans<[Span; $len]>>(spans: S) -> $name { + $name { + spans: spans.into_spans(), + } + } + + impl std::default::Default for $name { + fn default() -> Self { + $name { + spans: [Span::call_site(); $len], + } + } + } + + #[cfg(feature = "clone-impls")] + #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] + impl Copy for $name {} + + #[cfg(feature = "clone-impls")] + #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] + impl Clone for $name { + fn clone(&self) -> Self { + *self + } + } + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl Debug for $name { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(stringify!($name)) + } + } + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl cmp::Eq for $name {} + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl PartialEq for $name { + fn eq(&self, _other: &$name) -> bool { + true + } + } + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl Hash for $name { + fn hash<H: Hasher>(&self, _state: &mut H) {} + } + + impl_deref_if_len_is_1!($name/$len); + )* + }; +} + +macro_rules! define_punctuation { + ($($token:literal pub struct $name:ident/$len:tt #[doc = $usage:literal])*) => { + $( + define_punctuation_structs! { + $token pub struct $name/$len #[doc = $usage] + } + + #[cfg(feature = "printing")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for $name { + fn to_tokens(&self, tokens: &mut TokenStream) { + printing::punct($token, &self.spans, tokens); + } + } + + #[cfg(feature = "parsing")] + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for $name { + fn parse(input: ParseStream) -> Result<Self> { + Ok($name { + spans: parsing::punct(input, $token)?, + }) + } + } + + #[cfg(feature = "parsing")] + impl Token for $name { + fn peek(cursor: Cursor) -> bool { + parsing::peek_punct(cursor, $token) + } + + fn display() -> &'static str { + concat!("`", $token, "`") + } + } + + #[cfg(feature = "parsing")] + impl private::Sealed for $name {} + )* + }; +} + +macro_rules! define_delimiters { + ($($delim:ident pub struct $name:ident #[$doc:meta])*) => { + $( + #[$doc] + pub struct $name { + pub span: DelimSpan, + } + + #[doc(hidden)] + #[allow(non_snake_case)] + pub fn $name<S: IntoSpans<DelimSpan>>(span: S) -> $name { + $name { + span: span.into_spans(), + } + } + + impl std::default::Default for $name { + fn default() -> Self { + $name(Span::call_site()) + } + } + + #[cfg(feature = "clone-impls")] + #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] + impl Copy for $name {} + + #[cfg(feature = "clone-impls")] + #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] + impl Clone for $name { + fn clone(&self) -> Self { + *self + } + } + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl Debug for $name { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(stringify!($name)) + } + } + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl cmp::Eq for $name {} + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl PartialEq for $name { + fn eq(&self, _other: &$name) -> bool { + true + } + } + + #[cfg(feature = "extra-traits")] + #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] + impl Hash for $name { + fn hash<H: Hasher>(&self, _state: &mut H) {} + } + + impl $name { + #[cfg(feature = "printing")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + pub fn surround<F>(&self, tokens: &mut TokenStream, f: F) + where + F: FnOnce(&mut TokenStream), + { + let mut inner = TokenStream::new(); + f(&mut inner); + printing::delim(Delimiter::$delim, self.span.join(), tokens, inner); + } + } + + #[cfg(feature = "parsing")] + impl private::Sealed for $name {} + )* + }; +} + +define_punctuation_structs! { + "_" pub struct Underscore/1 /// wildcard patterns, inferred types, unnamed items in constants, extern crates, use declarations, and destructuring assignment +} + +#[cfg(feature = "printing")] +#[cfg_attr(docsrs, doc(cfg(feature = "printing")))] +impl ToTokens for Underscore { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Ident::new("_", self.span)); + } +} + +#[cfg(feature = "parsing")] +#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] +impl Parse for Underscore { + fn parse(input: ParseStream) -> Result<Self> { + input.step(|cursor| { + if let Some((ident, rest)) = cursor.ident() { + if ident == "_" { + return Ok((Underscore(ident.span()), rest)); + } + } + if let Some((punct, rest)) = cursor.punct() { + if punct.as_char() == '_' { + return Ok((Underscore(punct.span()), rest)); + } + } + Err(cursor.error("expected `_`")) + }) + } +} + +#[cfg(feature = "parsing")] +impl Token for Underscore { + fn peek(cursor: Cursor) -> bool { + if let Some((ident, _rest)) = cursor.ident() { + return ident == "_"; + } + if let Some((punct, _rest)) = cursor.punct() { + return punct.as_char() == '_'; + } + false + } + + fn display() -> &'static str { + "`_`" + } +} + +#[cfg(feature = "parsing")] +impl private::Sealed for Underscore {} + +/// None-delimited group +pub struct Group { + pub span: Span, +} + +#[doc(hidden)] +#[allow(non_snake_case)] +pub fn Group<S: IntoSpans<Span>>(span: S) -> Group { + Group { + span: span.into_spans(), + } +} + +impl std::default::Default for Group { + fn default() -> Self { + Group { + span: Span::call_site(), + } + } +} + +#[cfg(feature = "clone-impls")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Copy for Group {} + +#[cfg(feature = "clone-impls")] +#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] +impl Clone for Group { + fn clone(&self) -> Self { + *self + } +} + +#[cfg(feature = "extra-traits")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Debug for Group { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("Group") + } +} + +#[cfg(feature = "extra-traits")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl cmp::Eq for Group {} + +#[cfg(feature = "extra-traits")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl PartialEq for Group { + fn eq(&self, _other: &Group) -> bool { + true + } +} + +#[cfg(feature = "extra-traits")] +#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] +impl Hash for Group { + fn hash<H: Hasher>(&self, _state: &mut H) {} +} + +impl Group { + #[cfg(feature = "printing")] + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + pub fn surround<F>(&self, tokens: &mut TokenStream, f: F) + where + F: FnOnce(&mut TokenStream), + { + let mut inner = TokenStream::new(); + f(&mut inner); + printing::delim(Delimiter::None, self.span, tokens, inner); + } +} + +#[cfg(feature = "parsing")] +impl private::Sealed for Group {} + +#[cfg(feature = "parsing")] +impl Token for Paren { + fn peek(cursor: Cursor) -> bool { + cursor.group(Delimiter::Parenthesis).is_some() + } + + fn display() -> &'static str { + "parentheses" + } +} + +#[cfg(feature = "parsing")] +impl Token for Brace { + fn peek(cursor: Cursor) -> bool { + cursor.group(Delimiter::Brace).is_some() + } + + fn display() -> &'static str { + "curly braces" + } +} + +#[cfg(feature = "parsing")] +impl Token for Bracket { + fn peek(cursor: Cursor) -> bool { + cursor.group(Delimiter::Bracket).is_some() + } + + fn display() -> &'static str { + "square brackets" + } +} + +#[cfg(feature = "parsing")] +impl Token for Group { + fn peek(cursor: Cursor) -> bool { + cursor.group(Delimiter::None).is_some() + } + + fn display() -> &'static str { + "invisible group" + } +} + +define_keywords! { + "abstract" pub struct Abstract + "as" pub struct As + "async" pub struct Async + "auto" pub struct Auto + "await" pub struct Await + "become" pub struct Become + "box" pub struct Box + "break" pub struct Break + "const" pub struct Const + "continue" pub struct Continue + "crate" pub struct Crate + "default" pub struct Default + "do" pub struct Do + "dyn" pub struct Dyn + "else" pub struct Else + "enum" pub struct Enum + "extern" pub struct Extern + "final" pub struct Final + "fn" pub struct Fn + "for" pub struct For + "if" pub struct If + "impl" pub struct Impl + "in" pub struct In + "let" pub struct Let + "loop" pub struct Loop + "macro" pub struct Macro + "match" pub struct Match + "mod" pub struct Mod + "move" pub struct Move + "mut" pub struct Mut + "override" pub struct Override + "priv" pub struct Priv + "pub" pub struct Pub + "raw" pub struct Raw + "ref" pub struct Ref + "return" pub struct Return + "Self" pub struct SelfType + "self" pub struct SelfValue + "static" pub struct Static + "struct" pub struct Struct + "super" pub struct Super + "trait" pub struct Trait + "try" pub struct Try + "type" pub struct Type + "typeof" pub struct Typeof + "union" pub struct Union + "unsafe" pub struct Unsafe + "unsized" pub struct Unsized + "use" pub struct Use + "virtual" pub struct Virtual + "where" pub struct Where + "while" pub struct While + "yield" pub struct Yield +} + +define_punctuation! { + "&" pub struct And/1 /// bitwise and logical AND, borrow, references, reference patterns + "&&" pub struct AndAnd/2 /// lazy AND, borrow, references, reference patterns + "&=" pub struct AndEq/2 /// bitwise AND assignment + "@" pub struct At/1 /// subpattern binding + "^" pub struct Caret/1 /// bitwise and logical XOR + "^=" pub struct CaretEq/2 /// bitwise XOR assignment + ":" pub struct Colon/1 /// various separators + "," pub struct Comma/1 /// various separators + "$" pub struct Dollar/1 /// macros + "." pub struct Dot/1 /// field access, tuple index + ".." pub struct DotDot/2 /// range, struct expressions, patterns, range patterns + "..." pub struct DotDotDot/3 /// variadic functions, range patterns + "..=" pub struct DotDotEq/3 /// inclusive range, range patterns + "=" pub struct Eq/1 /// assignment, attributes, various type definitions + "==" pub struct EqEq/2 /// equal + "=>" pub struct FatArrow/2 /// match arms, macros + ">=" pub struct Ge/2 /// greater than or equal to, generics + ">" pub struct Gt/1 /// greater than, generics, paths + "<-" pub struct LArrow/2 /// unused + "<=" pub struct Le/2 /// less than or equal to + "<" pub struct Lt/1 /// less than, generics, paths + "-" pub struct Minus/1 /// subtraction, negation + "-=" pub struct MinusEq/2 /// subtraction assignment + "!=" pub struct Ne/2 /// not equal + "!" pub struct Not/1 /// bitwise and logical NOT, macro calls, inner attributes, never type, negative impls + "|" pub struct Or/1 /// bitwise and logical OR, closures, patterns in match, if let, and while let + "|=" pub struct OrEq/2 /// bitwise OR assignment + "||" pub struct OrOr/2 /// lazy OR, closures + "::" pub struct PathSep/2 /// path separator + "%" pub struct Percent/1 /// remainder + "%=" pub struct PercentEq/2 /// remainder assignment + "+" pub struct Plus/1 /// addition, trait bounds, macro Kleene matcher + "+=" pub struct PlusEq/2 /// addition assignment + "#" pub struct Pound/1 /// attributes + "?" pub struct Question/1 /// question mark operator, questionably sized, macro Kleene matcher + "->" pub struct RArrow/2 /// function return type, closure return type, function pointer type + ";" pub struct Semi/1 /// terminator for various items and statements, array types + "<<" pub struct Shl/2 /// shift left, nested generics + "<<=" pub struct ShlEq/3 /// shift left assignment + ">>" pub struct Shr/2 /// shift right, nested generics + ">>=" pub struct ShrEq/3 /// shift right assignment, nested generics + "/" pub struct Slash/1 /// division + "/=" pub struct SlashEq/2 /// division assignment + "*" pub struct Star/1 /// multiplication, dereference, raw pointers, macro Kleene matcher, use wildcards + "*=" pub struct StarEq/2 /// multiplication assignment + "~" pub struct Tilde/1 /// unused since before Rust 1.0 +} + +define_delimiters! { + Brace pub struct Brace /// `{`…`}` + Bracket pub struct Bracket /// `[`…`]` + Parenthesis pub struct Paren /// `(`…`)` +} + +/// A type-macro that expands to the name of the Rust type representation of a +/// given token. +/// +/// As a type, `Token!` is commonly used in the type of struct fields, the type +/// of a `let` statement, or in turbofish for a `parse` function. +/// +/// ``` +/// use syn::{Ident, Token}; +/// use syn::parse::{Parse, ParseStream, Result}; +/// +/// // `struct Foo;` +/// pub struct UnitStruct { +/// struct_token: Token![struct], +/// ident: Ident, +/// semi_token: Token![;], +/// } +/// +/// impl Parse for UnitStruct { +/// fn parse(input: ParseStream) -> Result<Self> { +/// let struct_token: Token![struct] = input.parse()?; +/// let ident: Ident = input.parse()?; +/// let semi_token = input.parse::<Token![;]>()?; +/// Ok(UnitStruct { struct_token, ident, semi_token }) +/// } +/// } +/// ``` +/// +/// As an expression, `Token!` is used for peeking tokens or instantiating +/// tokens from a span. +/// +/// ``` +/// # use syn::{Ident, Token}; +/// # use syn::parse::{Parse, ParseStream, Result}; +/// # +/// # struct UnitStruct { +/// # struct_token: Token![struct], +/// # ident: Ident, +/// # semi_token: Token![;], +/// # } +/// # +/// # impl Parse for UnitStruct { +/// # fn parse(input: ParseStream) -> Result<Self> { +/// # unimplemented!() +/// # } +/// # } +/// # +/// fn make_unit_struct(name: Ident) -> UnitStruct { +/// let span = name.span(); +/// UnitStruct { +/// struct_token: Token![struct](span), +/// ident: name, +/// semi_token: Token![;](span), +/// } +/// } +/// +/// # fn parse(input: ParseStream) -> Result<()> { +/// if input.peek(Token![struct]) { +/// let unit_struct: UnitStruct = input.parse()?; +/// /* ... */ +/// } +/// # Ok(()) +/// # } +/// ``` +/// +/// See the [token module] documentation for details and examples. +/// +/// [token module]: crate::token +#[macro_export] +macro_rules! Token { + [abstract] => { $crate::token::Abstract }; + [as] => { $crate::token::As }; + [async] => { $crate::token::Async }; + [auto] => { $crate::token::Auto }; + [await] => { $crate::token::Await }; + [become] => { $crate::token::Become }; + [box] => { $crate::token::Box }; + [break] => { $crate::token::Break }; + [const] => { $crate::token::Const }; + [continue] => { $crate::token::Continue }; + [crate] => { $crate::token::Crate }; + [default] => { $crate::token::Default }; + [do] => { $crate::token::Do }; + [dyn] => { $crate::token::Dyn }; + [else] => { $crate::token::Else }; + [enum] => { $crate::token::Enum }; + [extern] => { $crate::token::Extern }; + [final] => { $crate::token::Final }; + [fn] => { $crate::token::Fn }; + [for] => { $crate::token::For }; + [if] => { $crate::token::If }; + [impl] => { $crate::token::Impl }; + [in] => { $crate::token::In }; + [let] => { $crate::token::Let }; + [loop] => { $crate::token::Loop }; + [macro] => { $crate::token::Macro }; + [match] => { $crate::token::Match }; + [mod] => { $crate::token::Mod }; + [move] => { $crate::token::Move }; + [mut] => { $crate::token::Mut }; + [override] => { $crate::token::Override }; + [priv] => { $crate::token::Priv }; + [pub] => { $crate::token::Pub }; + [raw] => { $crate::token::Raw }; + [ref] => { $crate::token::Ref }; + [return] => { $crate::token::Return }; + [Self] => { $crate::token::SelfType }; + [self] => { $crate::token::SelfValue }; + [static] => { $crate::token::Static }; + [struct] => { $crate::token::Struct }; + [super] => { $crate::token::Super }; + [trait] => { $crate::token::Trait }; + [try] => { $crate::token::Try }; + [type] => { $crate::token::Type }; + [typeof] => { $crate::token::Typeof }; + [union] => { $crate::token::Union }; + [unsafe] => { $crate::token::Unsafe }; + [unsized] => { $crate::token::Unsized }; + [use] => { $crate::token::Use }; + [virtual] => { $crate::token::Virtual }; + [where] => { $crate::token::Where }; + [while] => { $crate::token::While }; + [yield] => { $crate::token::Yield }; + [&] => { $crate::token::And }; + [&&] => { $crate::token::AndAnd }; + [&=] => { $crate::token::AndEq }; + [@] => { $crate::token::At }; + [^] => { $crate::token::Caret }; + [^=] => { $crate::token::CaretEq }; + [:] => { $crate::token::Colon }; + [,] => { $crate::token::Comma }; + [$] => { $crate::token::Dollar }; + [.] => { $crate::token::Dot }; + [..] => { $crate::token::DotDot }; + [...] => { $crate::token::DotDotDot }; + [..=] => { $crate::token::DotDotEq }; + [=] => { $crate::token::Eq }; + [==] => { $crate::token::EqEq }; + [=>] => { $crate::token::FatArrow }; + [>=] => { $crate::token::Ge }; + [>] => { $crate::token::Gt }; + [<-] => { $crate::token::LArrow }; + [<=] => { $crate::token::Le }; + [<] => { $crate::token::Lt }; + [-] => { $crate::token::Minus }; + [-=] => { $crate::token::MinusEq }; + [!=] => { $crate::token::Ne }; + [!] => { $crate::token::Not }; + [|] => { $crate::token::Or }; + [|=] => { $crate::token::OrEq }; + [||] => { $crate::token::OrOr }; + [::] => { $crate::token::PathSep }; + [%] => { $crate::token::Percent }; + [%=] => { $crate::token::PercentEq }; + [+] => { $crate::token::Plus }; + [+=] => { $crate::token::PlusEq }; + [#] => { $crate::token::Pound }; + [?] => { $crate::token::Question }; + [->] => { $crate::token::RArrow }; + [;] => { $crate::token::Semi }; + [<<] => { $crate::token::Shl }; + [<<=] => { $crate::token::ShlEq }; + [>>] => { $crate::token::Shr }; + [>>=] => { $crate::token::ShrEq }; + [/] => { $crate::token::Slash }; + [/=] => { $crate::token::SlashEq }; + [*] => { $crate::token::Star }; + [*=] => { $crate::token::StarEq }; + [~] => { $crate::token::Tilde }; + [_] => { $crate::token::Underscore }; +} + +// Not public API. +#[doc(hidden)] +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::buffer::Cursor; + use crate::error::{Error, Result}; + use crate::parse::ParseStream; + use proc_macro2::{Spacing, Span}; + + pub(crate) fn keyword(input: ParseStream, token: &str) -> Result<Span> { + input.step(|cursor| { + if let Some((ident, rest)) = cursor.ident() { + if ident == token { + return Ok((ident.span(), rest)); + } + } + Err(cursor.error(format!("expected `{}`", token))) + }) + } + + pub(crate) fn peek_keyword(cursor: Cursor, token: &str) -> bool { + if let Some((ident, _rest)) = cursor.ident() { + ident == token + } else { + false + } + } + + #[doc(hidden)] + pub fn punct<const N: usize>(input: ParseStream, token: &str) -> Result<[Span; N]> { + let mut spans = [input.span(); N]; + punct_helper(input, token, &mut spans)?; + Ok(spans) + } + + fn punct_helper(input: ParseStream, token: &str, spans: &mut [Span]) -> Result<()> { + input.step(|cursor| { + let mut cursor = *cursor; + assert_eq!(token.len(), spans.len()); + + for (i, ch) in token.chars().enumerate() { + match cursor.punct() { + Some((punct, rest)) => { + spans[i] = punct.span(); + if punct.as_char() != ch { + break; + } else if i == token.len() - 1 { + return Ok(((), rest)); + } else if punct.spacing() != Spacing::Joint { + break; + } + cursor = rest; + } + None => break, + } + } + + Err(Error::new(spans[0], format!("expected `{}`", token))) + }) + } + + #[doc(hidden)] + pub fn peek_punct(mut cursor: Cursor, token: &str) -> bool { + for (i, ch) in token.chars().enumerate() { + match cursor.punct() { + Some((punct, rest)) => { + if punct.as_char() != ch { + break; + } else if i == token.len() - 1 { + return true; + } else if punct.spacing() != Spacing::Joint { + break; + } + cursor = rest; + } + None => break, + } + } + false + } +} + +// Not public API. +#[doc(hidden)] +#[cfg(feature = "printing")] +pub(crate) mod printing { + use crate::ext::PunctExt as _; + use proc_macro2::{Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream}; + use quote::TokenStreamExt as _; + + #[doc(hidden)] + pub fn punct(s: &str, spans: &[Span], tokens: &mut TokenStream) { + assert_eq!(s.len(), spans.len()); + + let mut chars = s.chars(); + let mut spans = spans.iter(); + let ch = chars.next_back().unwrap(); + let span = spans.next_back().unwrap(); + for (ch, span) in chars.zip(spans) { + tokens.append(Punct::new_spanned(ch, Spacing::Joint, *span)); + } + + tokens.append(Punct::new_spanned(ch, Spacing::Alone, *span)); + } + + pub(crate) fn keyword(s: &str, span: Span, tokens: &mut TokenStream) { + tokens.append(Ident::new(s, span)); + } + + pub(crate) fn delim( + delim: Delimiter, + span: Span, + tokens: &mut TokenStream, + inner: TokenStream, + ) { + let mut g = Group::new(delim, inner); + g.set_span(span); + tokens.append(g); + } +} diff --git a/vendor/syn/src/tt.rs b/vendor/syn/src/tt.rs new file mode 100644 index 00000000000000..2a9843e1a1c8ad --- /dev/null +++ b/vendor/syn/src/tt.rs @@ -0,0 +1,96 @@ +use proc_macro2::{Delimiter, Spacing, TokenStream, TokenTree}; +use std::hash::{Hash, Hasher}; + +pub(crate) struct TokenTreeHelper<'a>(pub &'a TokenTree); + +impl<'a> PartialEq for TokenTreeHelper<'a> { + fn eq(&self, other: &Self) -> bool { + match (self.0, other.0) { + (TokenTree::Group(g1), TokenTree::Group(g2)) => { + match (g1.delimiter(), g2.delimiter()) { + (Delimiter::Parenthesis, Delimiter::Parenthesis) + | (Delimiter::Brace, Delimiter::Brace) + | (Delimiter::Bracket, Delimiter::Bracket) + | (Delimiter::None, Delimiter::None) => {} + _ => return false, + } + + TokenStreamHelper(&g1.stream()) == TokenStreamHelper(&g2.stream()) + } + (TokenTree::Punct(o1), TokenTree::Punct(o2)) => { + o1.as_char() == o2.as_char() + && match (o1.spacing(), o2.spacing()) { + (Spacing::Alone, Spacing::Alone) | (Spacing::Joint, Spacing::Joint) => true, + _ => false, + } + } + (TokenTree::Literal(l1), TokenTree::Literal(l2)) => l1.to_string() == l2.to_string(), + (TokenTree::Ident(s1), TokenTree::Ident(s2)) => s1 == s2, + _ => false, + } + } +} + +impl<'a> Hash for TokenTreeHelper<'a> { + fn hash<H: Hasher>(&self, h: &mut H) { + match self.0 { + TokenTree::Group(g) => { + 0u8.hash(h); + match g.delimiter() { + Delimiter::Parenthesis => 0u8.hash(h), + Delimiter::Brace => 1u8.hash(h), + Delimiter::Bracket => 2u8.hash(h), + Delimiter::None => 3u8.hash(h), + } + + for item in g.stream() { + TokenTreeHelper(&item).hash(h); + } + 0xFFu8.hash(h); // terminator w/ a variant we don't normally hash + } + TokenTree::Punct(op) => { + 1u8.hash(h); + op.as_char().hash(h); + match op.spacing() { + Spacing::Alone => 0u8.hash(h), + Spacing::Joint => 1u8.hash(h), + } + } + TokenTree::Literal(lit) => (2u8, lit.to_string()).hash(h), + TokenTree::Ident(word) => (3u8, word).hash(h), + } + } +} + +pub(crate) struct TokenStreamHelper<'a>(pub &'a TokenStream); + +impl<'a> PartialEq for TokenStreamHelper<'a> { + fn eq(&self, other: &Self) -> bool { + let left = self.0.clone().into_iter(); + let mut right = other.0.clone().into_iter(); + + for item1 in left { + let item2 = match right.next() { + Some(item) => item, + None => return false, + }; + if TokenTreeHelper(&item1) != TokenTreeHelper(&item2) { + return false; + } + } + + right.next().is_none() + } +} + +impl<'a> Hash for TokenStreamHelper<'a> { + fn hash<H: Hasher>(&self, state: &mut H) { + let tokens = self.0.clone().into_iter(); + + tokens.clone().count().hash(state); + + for tt in tokens { + TokenTreeHelper(&tt).hash(state); + } + } +} diff --git a/vendor/syn/src/ty.rs b/vendor/syn/src/ty.rs new file mode 100644 index 00000000000000..5b4177f6875418 --- /dev/null +++ b/vendor/syn/src/ty.rs @@ -0,0 +1,1271 @@ +use crate::attr::Attribute; +use crate::expr::Expr; +use crate::generics::{BoundLifetimes, TypeParamBound}; +use crate::ident::Ident; +use crate::lifetime::Lifetime; +use crate::lit::LitStr; +use crate::mac::Macro; +use crate::path::{Path, QSelf}; +use crate::punctuated::Punctuated; +use crate::token; +use proc_macro2::TokenStream; + +ast_enum_of_structs! { + /// The possible types that a Rust value could have. + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + #[non_exhaustive] + pub enum Type { + /// A fixed size array type: `[T; n]`. + Array(TypeArray), + + /// A bare function type: `fn(usize) -> bool`. + BareFn(TypeBareFn), + + /// A type contained within invisible delimiters. + Group(TypeGroup), + + /// An `impl Bound1 + Bound2 + Bound3` type where `Bound` is a trait or + /// a lifetime. + ImplTrait(TypeImplTrait), + + /// Indication that a type should be inferred by the compiler: `_`. + Infer(TypeInfer), + + /// A macro in the type position. + Macro(TypeMacro), + + /// The never type: `!`. + Never(TypeNever), + + /// A parenthesized type equivalent to the inner type. + Paren(TypeParen), + + /// A path like `std::slice::Iter`, optionally qualified with a + /// self-type as in `<Vec<T> as SomeTrait>::Associated`. + Path(TypePath), + + /// A raw pointer type: `*const T` or `*mut T`. + Ptr(TypePtr), + + /// A reference type: `&'a T` or `&'a mut T`. + Reference(TypeReference), + + /// A dynamically sized slice type: `[T]`. + Slice(TypeSlice), + + /// A trait object type `dyn Bound1 + Bound2 + Bound3` where `Bound` is a + /// trait or a lifetime. + TraitObject(TypeTraitObject), + + /// A tuple type: `(A, B, C, String)`. + Tuple(TypeTuple), + + /// Tokens in type position not interpreted by Syn. + Verbatim(TokenStream), + + // For testing exhaustiveness in downstream code, use the following idiom: + // + // match ty { + // #![cfg_attr(test, deny(non_exhaustive_omitted_patterns))] + // + // Type::Array(ty) => {...} + // Type::BareFn(ty) => {...} + // ... + // Type::Verbatim(ty) => {...} + // + // _ => { /* some sane fallback */ } + // } + // + // This way we fail your tests but don't break your library when adding + // a variant. You will be notified by a test failure when a variant is + // added, so that you can add code to handle it, but your library will + // continue to compile and work for downstream users in the interim. + } +} + +ast_struct! { + /// A fixed size array type: `[T; n]`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TypeArray { + pub bracket_token: token::Bracket, + pub elem: Box<Type>, + pub semi_token: Token![;], + pub len: Expr, + } +} + +ast_struct! { + /// A bare function type: `fn(usize) -> bool`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TypeBareFn { + pub lifetimes: Option<BoundLifetimes>, + pub unsafety: Option<Token![unsafe]>, + pub abi: Option<Abi>, + pub fn_token: Token![fn], + pub paren_token: token::Paren, + pub inputs: Punctuated<BareFnArg, Token![,]>, + pub variadic: Option<BareVariadic>, + pub output: ReturnType, + } +} + +ast_struct! { + /// A type contained within invisible delimiters. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TypeGroup { + pub group_token: token::Group, + pub elem: Box<Type>, + } +} + +ast_struct! { + /// An `impl Bound1 + Bound2 + Bound3` type where `Bound` is a trait or + /// a lifetime. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TypeImplTrait { + pub impl_token: Token![impl], + pub bounds: Punctuated<TypeParamBound, Token![+]>, + } +} + +ast_struct! { + /// Indication that a type should be inferred by the compiler: `_`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TypeInfer { + pub underscore_token: Token![_], + } +} + +ast_struct! { + /// A macro in the type position. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TypeMacro { + pub mac: Macro, + } +} + +ast_struct! { + /// The never type: `!`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TypeNever { + pub bang_token: Token![!], + } +} + +ast_struct! { + /// A parenthesized type equivalent to the inner type. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TypeParen { + pub paren_token: token::Paren, + pub elem: Box<Type>, + } +} + +ast_struct! { + /// A path like `std::slice::Iter`, optionally qualified with a + /// self-type as in `<Vec<T> as SomeTrait>::Associated`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TypePath { + pub qself: Option<QSelf>, + pub path: Path, + } +} + +ast_struct! { + /// A raw pointer type: `*const T` or `*mut T`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TypePtr { + pub star_token: Token![*], + pub const_token: Option<Token![const]>, + pub mutability: Option<Token![mut]>, + pub elem: Box<Type>, + } +} + +ast_struct! { + /// A reference type: `&'a T` or `&'a mut T`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TypeReference { + pub and_token: Token![&], + pub lifetime: Option<Lifetime>, + pub mutability: Option<Token![mut]>, + pub elem: Box<Type>, + } +} + +ast_struct! { + /// A dynamically sized slice type: `[T]`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TypeSlice { + pub bracket_token: token::Bracket, + pub elem: Box<Type>, + } +} + +ast_struct! { + /// A trait object type `dyn Bound1 + Bound2 + Bound3` where `Bound` is a + /// trait or a lifetime. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TypeTraitObject { + pub dyn_token: Option<Token![dyn]>, + pub bounds: Punctuated<TypeParamBound, Token![+]>, + } +} + +ast_struct! { + /// A tuple type: `(A, B, C, String)`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct TypeTuple { + pub paren_token: token::Paren, + pub elems: Punctuated<Type, Token![,]>, + } +} + +ast_struct! { + /// The binary interface of a function: `extern "C"`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct Abi { + pub extern_token: Token![extern], + pub name: Option<LitStr>, + } +} + +ast_struct! { + /// An argument in a function type: the `usize` in `fn(usize) -> bool`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct BareFnArg { + pub attrs: Vec<Attribute>, + pub name: Option<(Ident, Token![:])>, + pub ty: Type, + } +} + +ast_struct! { + /// The variadic argument of a function pointer like `fn(usize, ...)`. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub struct BareVariadic { + pub attrs: Vec<Attribute>, + pub name: Option<(Ident, Token![:])>, + pub dots: Token![...], + pub comma: Option<Token![,]>, + } +} + +ast_enum! { + /// Return type of a function signature. + #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] + pub enum ReturnType { + /// Return type is not specified. + /// + /// Functions default to `()` and closures default to type inference. + Default, + /// A particular type is returned. + Type(Token![->], Box<Type>), + } +} + +#[cfg(feature = "parsing")] +pub(crate) mod parsing { + use crate::attr::Attribute; + use crate::error::{self, Result}; + use crate::ext::IdentExt as _; + use crate::generics::{BoundLifetimes, TraitBound, TraitBoundModifier, TypeParamBound}; + use crate::ident::Ident; + use crate::lifetime::Lifetime; + use crate::mac::{self, Macro}; + use crate::parse::{Parse, ParseStream}; + use crate::path; + use crate::path::{Path, PathArguments, QSelf}; + use crate::punctuated::Punctuated; + use crate::token; + use crate::ty::{ + Abi, BareFnArg, BareVariadic, ReturnType, Type, TypeArray, TypeBareFn, TypeGroup, + TypeImplTrait, TypeInfer, TypeMacro, TypeNever, TypeParen, TypePath, TypePtr, + TypeReference, TypeSlice, TypeTraitObject, TypeTuple, + }; + use crate::verbatim; + use proc_macro2::Span; + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Type { + fn parse(input: ParseStream) -> Result<Self> { + let allow_plus = true; + let allow_group_generic = true; + ambig_ty(input, allow_plus, allow_group_generic) + } + } + + impl Type { + /// In some positions, types may not contain the `+` character, to + /// disambiguate them. For example in the expression `1 as T`, T may not + /// contain a `+` character. + /// + /// This parser does not allow a `+`, while the default parser does. + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn without_plus(input: ParseStream) -> Result<Self> { + let allow_plus = false; + let allow_group_generic = true; + ambig_ty(input, allow_plus, allow_group_generic) + } + } + + pub(crate) fn ambig_ty( + input: ParseStream, + allow_plus: bool, + allow_group_generic: bool, + ) -> Result<Type> { + let begin = input.fork(); + + if input.peek(token::Group) { + let mut group: TypeGroup = input.parse()?; + if input.peek(Token![::]) && input.peek3(Ident::peek_any) { + if let Type::Path(mut ty) = *group.elem { + Path::parse_rest(input, &mut ty.path, false)?; + return Ok(Type::Path(ty)); + } else { + return Ok(Type::Path(TypePath { + qself: Some(QSelf { + lt_token: Token![<](group.group_token.span), + position: 0, + as_token: None, + gt_token: Token![>](group.group_token.span), + ty: group.elem, + }), + path: Path::parse_helper(input, false)?, + })); + } + } else if input.peek(Token![<]) && allow_group_generic + || input.peek(Token![::]) && input.peek3(Token![<]) + { + if let Type::Path(mut ty) = *group.elem { + let arguments = &mut ty.path.segments.last_mut().unwrap().arguments; + if arguments.is_none() { + *arguments = PathArguments::AngleBracketed(input.parse()?); + Path::parse_rest(input, &mut ty.path, false)?; + return Ok(Type::Path(ty)); + } else { + *group.elem = Type::Path(ty); + } + } + } + return Ok(Type::Group(group)); + } + + let mut lifetimes = None::<BoundLifetimes>; + let mut lookahead = input.lookahead1(); + if lookahead.peek(Token![for]) { + lifetimes = input.parse()?; + lookahead = input.lookahead1(); + if !lookahead.peek(Ident) + && !lookahead.peek(Token![fn]) + && !lookahead.peek(Token![unsafe]) + && !lookahead.peek(Token![extern]) + && !lookahead.peek(Token![super]) + && !lookahead.peek(Token![self]) + && !lookahead.peek(Token![Self]) + && !lookahead.peek(Token![crate]) + || input.peek(Token![dyn]) + { + return Err(lookahead.error()); + } + } + + if lookahead.peek(token::Paren) { + let content; + let paren_token = parenthesized!(content in input); + if content.is_empty() { + return Ok(Type::Tuple(TypeTuple { + paren_token, + elems: Punctuated::new(), + })); + } + if content.peek(Lifetime) { + return Ok(Type::Paren(TypeParen { + paren_token, + elem: Box::new(Type::TraitObject(content.parse()?)), + })); + } + if content.peek(Token![?]) { + return Ok(Type::TraitObject(TypeTraitObject { + dyn_token: None, + bounds: { + let mut bounds = Punctuated::new(); + bounds.push_value(TypeParamBound::Trait(TraitBound { + paren_token: Some(paren_token), + ..content.parse()? + })); + while let Some(plus) = input.parse()? { + bounds.push_punct(plus); + bounds.push_value({ + let allow_precise_capture = false; + let allow_const = false; + TypeParamBound::parse_single( + input, + allow_precise_capture, + allow_const, + )? + }); + } + bounds + }, + })); + } + let mut first: Type = content.parse()?; + if content.peek(Token![,]) { + return Ok(Type::Tuple(TypeTuple { + paren_token, + elems: { + let mut elems = Punctuated::new(); + elems.push_value(first); + elems.push_punct(content.parse()?); + while !content.is_empty() { + elems.push_value(content.parse()?); + if content.is_empty() { + break; + } + elems.push_punct(content.parse()?); + } + elems + }, + })); + } + if allow_plus && input.peek(Token![+]) { + loop { + let first = match first { + Type::Path(TypePath { qself: None, path }) => { + TypeParamBound::Trait(TraitBound { + paren_token: Some(paren_token), + modifier: TraitBoundModifier::None, + lifetimes: None, + path, + }) + } + Type::TraitObject(TypeTraitObject { + dyn_token: None, + bounds, + }) => { + if bounds.len() > 1 || bounds.trailing_punct() { + first = Type::TraitObject(TypeTraitObject { + dyn_token: None, + bounds, + }); + break; + } + match bounds.into_iter().next().unwrap() { + TypeParamBound::Trait(trait_bound) => { + TypeParamBound::Trait(TraitBound { + paren_token: Some(paren_token), + ..trait_bound + }) + } + other @ (TypeParamBound::Lifetime(_) + | TypeParamBound::PreciseCapture(_) + | TypeParamBound::Verbatim(_)) => other, + } + } + _ => break, + }; + return Ok(Type::TraitObject(TypeTraitObject { + dyn_token: None, + bounds: { + let mut bounds = Punctuated::new(); + bounds.push_value(first); + while let Some(plus) = input.parse()? { + bounds.push_punct(plus); + bounds.push_value({ + let allow_precise_capture = false; + let allow_const = false; + TypeParamBound::parse_single( + input, + allow_precise_capture, + allow_const, + )? + }); + } + bounds + }, + })); + } + } + Ok(Type::Paren(TypeParen { + paren_token, + elem: Box::new(first), + })) + } else if lookahead.peek(Token![fn]) + || lookahead.peek(Token![unsafe]) + || lookahead.peek(Token![extern]) + { + let mut bare_fn: TypeBareFn = input.parse()?; + bare_fn.lifetimes = lifetimes; + Ok(Type::BareFn(bare_fn)) + } else if lookahead.peek(Ident) + || input.peek(Token![super]) + || input.peek(Token![self]) + || input.peek(Token![Self]) + || input.peek(Token![crate]) + || lookahead.peek(Token![::]) + || lookahead.peek(Token![<]) + { + let ty: TypePath = input.parse()?; + if ty.qself.is_some() { + return Ok(Type::Path(ty)); + } + + if input.peek(Token![!]) && !input.peek(Token![!=]) && ty.path.is_mod_style() { + let bang_token: Token![!] = input.parse()?; + let (delimiter, tokens) = mac::parse_delimiter(input)?; + return Ok(Type::Macro(TypeMacro { + mac: Macro { + path: ty.path, + bang_token, + delimiter, + tokens, + }, + })); + } + + if lifetimes.is_some() || allow_plus && input.peek(Token![+]) { + let mut bounds = Punctuated::new(); + bounds.push_value(TypeParamBound::Trait(TraitBound { + paren_token: None, + modifier: TraitBoundModifier::None, + lifetimes, + path: ty.path, + })); + if allow_plus { + while input.peek(Token![+]) { + bounds.push_punct(input.parse()?); + if !(input.peek(Ident::peek_any) + || input.peek(Token![::]) + || input.peek(Token![?]) + || input.peek(Lifetime) + || input.peek(token::Paren)) + { + break; + } + bounds.push_value({ + let allow_precise_capture = false; + let allow_const = false; + TypeParamBound::parse_single(input, allow_precise_capture, allow_const)? + }); + } + } + return Ok(Type::TraitObject(TypeTraitObject { + dyn_token: None, + bounds, + })); + } + + Ok(Type::Path(ty)) + } else if lookahead.peek(Token![dyn]) { + let dyn_token: Token![dyn] = input.parse()?; + let dyn_span = dyn_token.span; + let star_token: Option<Token![*]> = input.parse()?; + let bounds = TypeTraitObject::parse_bounds(dyn_span, input, allow_plus)?; + Ok(if star_token.is_some() { + Type::Verbatim(verbatim::between(&begin, input)) + } else { + Type::TraitObject(TypeTraitObject { + dyn_token: Some(dyn_token), + bounds, + }) + }) + } else if lookahead.peek(token::Bracket) { + let content; + let bracket_token = bracketed!(content in input); + let elem: Type = content.parse()?; + if content.peek(Token![;]) { + Ok(Type::Array(TypeArray { + bracket_token, + elem: Box::new(elem), + semi_token: content.parse()?, + len: content.parse()?, + })) + } else { + Ok(Type::Slice(TypeSlice { + bracket_token, + elem: Box::new(elem), + })) + } + } else if lookahead.peek(Token![*]) { + input.parse().map(Type::Ptr) + } else if lookahead.peek(Token![&]) { + input.parse().map(Type::Reference) + } else if lookahead.peek(Token![!]) && !input.peek(Token![=]) { + input.parse().map(Type::Never) + } else if lookahead.peek(Token![impl]) { + TypeImplTrait::parse(input, allow_plus).map(Type::ImplTrait) + } else if lookahead.peek(Token![_]) { + input.parse().map(Type::Infer) + } else if lookahead.peek(Lifetime) { + input.parse().map(Type::TraitObject) + } else { + Err(lookahead.error()) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypeSlice { + fn parse(input: ParseStream) -> Result<Self> { + let content; + Ok(TypeSlice { + bracket_token: bracketed!(content in input), + elem: content.parse()?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypeArray { + fn parse(input: ParseStream) -> Result<Self> { + let content; + Ok(TypeArray { + bracket_token: bracketed!(content in input), + elem: content.parse()?, + semi_token: content.parse()?, + len: content.parse()?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypePtr { + fn parse(input: ParseStream) -> Result<Self> { + let star_token: Token![*] = input.parse()?; + + let lookahead = input.lookahead1(); + let (const_token, mutability) = if lookahead.peek(Token![const]) { + (Some(input.parse()?), None) + } else if lookahead.peek(Token![mut]) { + (None, Some(input.parse()?)) + } else { + return Err(lookahead.error()); + }; + + Ok(TypePtr { + star_token, + const_token, + mutability, + elem: Box::new(input.call(Type::without_plus)?), + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypeReference { + fn parse(input: ParseStream) -> Result<Self> { + Ok(TypeReference { + and_token: input.parse()?, + lifetime: input.parse()?, + mutability: input.parse()?, + // & binds tighter than +, so we don't allow + here. + elem: Box::new(input.call(Type::without_plus)?), + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypeBareFn { + fn parse(input: ParseStream) -> Result<Self> { + let args; + let mut variadic = None; + + Ok(TypeBareFn { + lifetimes: input.parse()?, + unsafety: input.parse()?, + abi: input.parse()?, + fn_token: input.parse()?, + paren_token: parenthesized!(args in input), + inputs: { + let mut inputs = Punctuated::new(); + + while !args.is_empty() { + let attrs = args.call(Attribute::parse_outer)?; + + if inputs.empty_or_trailing() + && (args.peek(Token![...]) + || (args.peek(Ident) || args.peek(Token![_])) + && args.peek2(Token![:]) + && args.peek3(Token![...])) + { + variadic = Some(parse_bare_variadic(&args, attrs)?); + break; + } + + let allow_self = inputs.is_empty(); + let arg = parse_bare_fn_arg(&args, allow_self)?; + inputs.push_value(BareFnArg { attrs, ..arg }); + if args.is_empty() { + break; + } + + let comma = args.parse()?; + inputs.push_punct(comma); + } + + inputs + }, + variadic, + output: input.call(ReturnType::without_plus)?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypeNever { + fn parse(input: ParseStream) -> Result<Self> { + Ok(TypeNever { + bang_token: input.parse()?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypeInfer { + fn parse(input: ParseStream) -> Result<Self> { + Ok(TypeInfer { + underscore_token: input.parse()?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypeTuple { + fn parse(input: ParseStream) -> Result<Self> { + let content; + let paren_token = parenthesized!(content in input); + + if content.is_empty() { + return Ok(TypeTuple { + paren_token, + elems: Punctuated::new(), + }); + } + + let first: Type = content.parse()?; + Ok(TypeTuple { + paren_token, + elems: { + let mut elems = Punctuated::new(); + elems.push_value(first); + elems.push_punct(content.parse()?); + while !content.is_empty() { + elems.push_value(content.parse()?); + if content.is_empty() { + break; + } + elems.push_punct(content.parse()?); + } + elems + }, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypeMacro { + fn parse(input: ParseStream) -> Result<Self> { + Ok(TypeMacro { + mac: input.parse()?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypePath { + fn parse(input: ParseStream) -> Result<Self> { + let expr_style = false; + let (qself, path) = path::parsing::qpath(input, expr_style)?; + Ok(TypePath { qself, path }) + } + } + + impl ReturnType { + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn without_plus(input: ParseStream) -> Result<Self> { + let allow_plus = false; + Self::parse(input, allow_plus) + } + + pub(crate) fn parse(input: ParseStream, allow_plus: bool) -> Result<Self> { + if input.peek(Token![->]) { + let arrow = input.parse()?; + let allow_group_generic = true; + let ty = ambig_ty(input, allow_plus, allow_group_generic)?; + Ok(ReturnType::Type(arrow, Box::new(ty))) + } else { + Ok(ReturnType::Default) + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for ReturnType { + fn parse(input: ParseStream) -> Result<Self> { + let allow_plus = true; + Self::parse(input, allow_plus) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypeTraitObject { + fn parse(input: ParseStream) -> Result<Self> { + let allow_plus = true; + Self::parse(input, allow_plus) + } + } + + impl TypeTraitObject { + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn without_plus(input: ParseStream) -> Result<Self> { + let allow_plus = false; + Self::parse(input, allow_plus) + } + + // Only allow multiple trait references if allow_plus is true. + pub(crate) fn parse(input: ParseStream, allow_plus: bool) -> Result<Self> { + let dyn_token: Option<Token![dyn]> = input.parse()?; + let dyn_span = match &dyn_token { + Some(token) => token.span, + None => input.span(), + }; + let bounds = Self::parse_bounds(dyn_span, input, allow_plus)?; + Ok(TypeTraitObject { dyn_token, bounds }) + } + + fn parse_bounds( + dyn_span: Span, + input: ParseStream, + allow_plus: bool, + ) -> Result<Punctuated<TypeParamBound, Token![+]>> { + let allow_precise_capture = false; + let allow_const = false; + let bounds = TypeParamBound::parse_multiple( + input, + allow_plus, + allow_precise_capture, + allow_const, + )?; + let mut last_lifetime_span = None; + let mut at_least_one_trait = false; + for bound in &bounds { + match bound { + TypeParamBound::Trait(_) => { + at_least_one_trait = true; + break; + } + TypeParamBound::Lifetime(lifetime) => { + last_lifetime_span = Some(lifetime.ident.span()); + } + TypeParamBound::PreciseCapture(_) | TypeParamBound::Verbatim(_) => { + unreachable!() + } + } + } + // Just lifetimes like `'a + 'b` is not a TraitObject. + if !at_least_one_trait { + let msg = "at least one trait is required for an object type"; + return Err(error::new2(dyn_span, last_lifetime_span.unwrap(), msg)); + } + Ok(bounds) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypeImplTrait { + fn parse(input: ParseStream) -> Result<Self> { + let allow_plus = true; + Self::parse(input, allow_plus) + } + } + + impl TypeImplTrait { + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + pub fn without_plus(input: ParseStream) -> Result<Self> { + let allow_plus = false; + Self::parse(input, allow_plus) + } + + pub(crate) fn parse(input: ParseStream, allow_plus: bool) -> Result<Self> { + let impl_token: Token![impl] = input.parse()?; + let allow_precise_capture = true; + let allow_const = true; + let bounds = TypeParamBound::parse_multiple( + input, + allow_plus, + allow_precise_capture, + allow_const, + )?; + let mut last_nontrait_span = None; + let mut at_least_one_trait = false; + for bound in &bounds { + match bound { + TypeParamBound::Trait(_) => { + at_least_one_trait = true; + break; + } + TypeParamBound::Lifetime(lifetime) => { + last_nontrait_span = Some(lifetime.ident.span()); + } + TypeParamBound::PreciseCapture(precise_capture) => { + #[cfg(feature = "full")] + { + last_nontrait_span = Some(precise_capture.gt_token.span); + } + #[cfg(not(feature = "full"))] + { + _ = precise_capture; + unreachable!(); + } + } + TypeParamBound::Verbatim(_) => { + // `[const] Trait` + at_least_one_trait = true; + break; + } + } + } + if !at_least_one_trait { + let msg = "at least one trait must be specified"; + return Err(error::new2( + impl_token.span, + last_nontrait_span.unwrap(), + msg, + )); + } + Ok(TypeImplTrait { impl_token, bounds }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypeGroup { + fn parse(input: ParseStream) -> Result<Self> { + let group = crate::group::parse_group(input)?; + Ok(TypeGroup { + group_token: group.token, + elem: group.content.parse()?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for TypeParen { + fn parse(input: ParseStream) -> Result<Self> { + let allow_plus = false; + Self::parse(input, allow_plus) + } + } + + impl TypeParen { + fn parse(input: ParseStream, allow_plus: bool) -> Result<Self> { + let content; + Ok(TypeParen { + paren_token: parenthesized!(content in input), + elem: Box::new({ + let allow_group_generic = true; + ambig_ty(&content, allow_plus, allow_group_generic)? + }), + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for BareFnArg { + fn parse(input: ParseStream) -> Result<Self> { + let allow_self = false; + parse_bare_fn_arg(input, allow_self) + } + } + + fn parse_bare_fn_arg(input: ParseStream, allow_self: bool) -> Result<BareFnArg> { + let attrs = input.call(Attribute::parse_outer)?; + + let begin = input.fork(); + + let has_mut_self = allow_self && input.peek(Token![mut]) && input.peek2(Token![self]); + if has_mut_self { + input.parse::<Token![mut]>()?; + } + + let mut has_self = false; + let mut name = if (input.peek(Ident) || input.peek(Token![_]) || { + has_self = allow_self && input.peek(Token![self]); + has_self + }) && input.peek2(Token![:]) + && !input.peek2(Token![::]) + { + let name = input.call(Ident::parse_any)?; + let colon: Token![:] = input.parse()?; + Some((name, colon)) + } else { + has_self = false; + None + }; + + let ty = if allow_self && !has_self && input.peek(Token![mut]) && input.peek2(Token![self]) + { + input.parse::<Token![mut]>()?; + input.parse::<Token![self]>()?; + None + } else if has_mut_self && name.is_none() { + input.parse::<Token![self]>()?; + None + } else { + Some(input.parse()?) + }; + + let ty = match ty { + Some(ty) if !has_mut_self => ty, + _ => { + name = None; + Type::Verbatim(verbatim::between(&begin, input)) + } + }; + + Ok(BareFnArg { attrs, name, ty }) + } + + fn parse_bare_variadic(input: ParseStream, attrs: Vec<Attribute>) -> Result<BareVariadic> { + Ok(BareVariadic { + attrs, + name: if input.peek(Ident) || input.peek(Token![_]) { + let name = input.call(Ident::parse_any)?; + let colon: Token![:] = input.parse()?; + Some((name, colon)) + } else { + None + }, + dots: input.parse()?, + comma: input.parse()?, + }) + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Abi { + fn parse(input: ParseStream) -> Result<Self> { + Ok(Abi { + extern_token: input.parse()?, + name: input.parse()?, + }) + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] + impl Parse for Option<Abi> { + fn parse(input: ParseStream) -> Result<Self> { + if input.peek(Token![extern]) { + input.parse().map(Some) + } else { + Ok(None) + } + } + } +} + +#[cfg(feature = "printing")] +mod printing { + use crate::attr::FilterAttrs; + use crate::path; + use crate::path::printing::PathStyle; + use crate::print::TokensOrDefault; + use crate::ty::{ + Abi, BareFnArg, BareVariadic, ReturnType, TypeArray, TypeBareFn, TypeGroup, TypeImplTrait, + TypeInfer, TypeMacro, TypeNever, TypeParen, TypePath, TypePtr, TypeReference, TypeSlice, + TypeTraitObject, TypeTuple, + }; + use proc_macro2::TokenStream; + use quote::{ToTokens, TokenStreamExt as _}; + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TypeSlice { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.bracket_token.surround(tokens, |tokens| { + self.elem.to_tokens(tokens); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TypeArray { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.bracket_token.surround(tokens, |tokens| { + self.elem.to_tokens(tokens); + self.semi_token.to_tokens(tokens); + self.len.to_tokens(tokens); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TypePtr { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.star_token.to_tokens(tokens); + match &self.mutability { + Some(tok) => tok.to_tokens(tokens), + None => { + TokensOrDefault(&self.const_token).to_tokens(tokens); + } + } + self.elem.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TypeReference { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.and_token.to_tokens(tokens); + self.lifetime.to_tokens(tokens); + self.mutability.to_tokens(tokens); + self.elem.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TypeBareFn { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.lifetimes.to_tokens(tokens); + self.unsafety.to_tokens(tokens); + self.abi.to_tokens(tokens); + self.fn_token.to_tokens(tokens); + self.paren_token.surround(tokens, |tokens| { + self.inputs.to_tokens(tokens); + if let Some(variadic) = &self.variadic { + if !self.inputs.empty_or_trailing() { + let span = variadic.dots.spans[0]; + Token![,](span).to_tokens(tokens); + } + variadic.to_tokens(tokens); + } + }); + self.output.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TypeNever { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.bang_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TypeTuple { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.paren_token.surround(tokens, |tokens| { + self.elems.to_tokens(tokens); + // If we only have one argument, we need a trailing comma to + // distinguish TypeTuple from TypeParen. + if self.elems.len() == 1 && !self.elems.trailing_punct() { + <Token![,]>::default().to_tokens(tokens); + } + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TypePath { + fn to_tokens(&self, tokens: &mut TokenStream) { + path::printing::print_qpath(tokens, &self.qself, &self.path, PathStyle::AsWritten); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TypeTraitObject { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.dyn_token.to_tokens(tokens); + self.bounds.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TypeImplTrait { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.impl_token.to_tokens(tokens); + self.bounds.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TypeGroup { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.group_token.surround(tokens, |tokens| { + self.elem.to_tokens(tokens); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TypeParen { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.paren_token.surround(tokens, |tokens| { + self.elem.to_tokens(tokens); + }); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TypeInfer { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.underscore_token.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for TypeMacro { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.mac.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for ReturnType { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + ReturnType::Default => {} + ReturnType::Type(arrow, ty) => { + arrow.to_tokens(tokens); + ty.to_tokens(tokens); + } + } + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for BareFnArg { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + if let Some((name, colon)) = &self.name { + name.to_tokens(tokens); + colon.to_tokens(tokens); + } + self.ty.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for BareVariadic { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(self.attrs.outer()); + if let Some((name, colon)) = &self.name { + name.to_tokens(tokens); + colon.to_tokens(tokens); + } + self.dots.to_tokens(tokens); + self.comma.to_tokens(tokens); + } + } + + #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] + impl ToTokens for Abi { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.extern_token.to_tokens(tokens); + self.name.to_tokens(tokens); + } + } +} diff --git a/vendor/syn/src/verbatim.rs b/vendor/syn/src/verbatim.rs new file mode 100644 index 00000000000000..4a7ea2e1bb2388 --- /dev/null +++ b/vendor/syn/src/verbatim.rs @@ -0,0 +1,33 @@ +use crate::ext::TokenStreamExt as _; +use crate::parse::ParseStream; +use proc_macro2::{Delimiter, TokenStream}; +use std::cmp::Ordering; + +pub(crate) fn between<'a>(begin: ParseStream<'a>, end: ParseStream<'a>) -> TokenStream { + let end = end.cursor(); + let mut cursor = begin.cursor(); + assert!(crate::buffer::same_buffer(end, cursor)); + + let mut tokens = TokenStream::new(); + while cursor != end { + let (tt, next) = cursor.token_tree().unwrap(); + + if crate::buffer::cmp_assuming_same_buffer(end, next) == Ordering::Less { + // A syntax node can cross the boundary of a None-delimited group + // due to such groups being transparent to the parser in most cases. + // Any time this occurs the group is known to be semantically + // irrelevant. https://github.com/dtolnay/syn/issues/1235 + if let Some((inside, _span, after)) = cursor.group(Delimiter::None) { + assert!(next == after); + cursor = inside; + continue; + } else { + panic!("verbatim end must not be inside a delimited group"); + } + } + + tokens.append(tt); + cursor = next; + } + tokens +} diff --git a/vendor/syn/src/whitespace.rs b/vendor/syn/src/whitespace.rs new file mode 100644 index 00000000000000..a50b5069a68b92 --- /dev/null +++ b/vendor/syn/src/whitespace.rs @@ -0,0 +1,65 @@ +pub(crate) fn skip(mut s: &str) -> &str { + 'skip: while !s.is_empty() { + let byte = s.as_bytes()[0]; + if byte == b'/' { + if s.starts_with("//") + && (!s.starts_with("///") || s.starts_with("////")) + && !s.starts_with("//!") + { + if let Some(i) = s.find('\n') { + s = &s[i + 1..]; + continue; + } else { + return ""; + } + } else if s.starts_with("/**/") { + s = &s[4..]; + continue; + } else if s.starts_with("/*") + && (!s.starts_with("/**") || s.starts_with("/***")) + && !s.starts_with("/*!") + { + let mut depth = 0; + let bytes = s.as_bytes(); + let mut i = 0; + let upper = bytes.len() - 1; + while i < upper { + if bytes[i] == b'/' && bytes[i + 1] == b'*' { + depth += 1; + i += 1; // eat '*' + } else if bytes[i] == b'*' && bytes[i + 1] == b'/' { + depth -= 1; + if depth == 0 { + s = &s[i + 2..]; + continue 'skip; + } + i += 1; // eat '/' + } + i += 1; + } + return s; + } + } + match byte { + b' ' | 0x09..=0x0D => { + s = &s[1..]; + continue; + } + b if b <= 0x7F => {} + _ => { + let ch = s.chars().next().unwrap(); + if is_whitespace(ch) { + s = &s[ch.len_utf8()..]; + continue; + } + } + } + return s; + } + s +} + +fn is_whitespace(ch: char) -> bool { + // Rust treats left-to-right mark and right-to-left mark as whitespace + ch.is_whitespace() || ch == '\u{200e}' || ch == '\u{200f}' +} diff --git a/vendor/syn/tests/common/eq.rs b/vendor/syn/tests/common/eq.rs new file mode 100644 index 00000000000000..6bf4a58169fb73 --- /dev/null +++ b/vendor/syn/tests/common/eq.rs @@ -0,0 +1,898 @@ +#![allow(unused_macro_rules)] + +extern crate rustc_ast; +extern crate rustc_data_structures; +extern crate rustc_driver; +extern crate rustc_span; +extern crate thin_vec; + +use rustc_ast::ast::AngleBracketedArg; +use rustc_ast::ast::AngleBracketedArgs; +use rustc_ast::ast::AnonConst; +use rustc_ast::ast::Arm; +use rustc_ast::ast::AsmMacro; +use rustc_ast::ast::AssignOpKind; +use rustc_ast::ast::AssocItemConstraint; +use rustc_ast::ast::AssocItemConstraintKind; +use rustc_ast::ast::AssocItemKind; +use rustc_ast::ast::AttrArgs; +use rustc_ast::ast::AttrId; +use rustc_ast::ast::AttrItem; +use rustc_ast::ast::AttrKind; +use rustc_ast::ast::AttrStyle; +use rustc_ast::ast::Attribute; +use rustc_ast::ast::BinOpKind; +use rustc_ast::ast::BindingMode; +use rustc_ast::ast::Block; +use rustc_ast::ast::BlockCheckMode; +use rustc_ast::ast::BorrowKind; +use rustc_ast::ast::BoundAsyncness; +use rustc_ast::ast::BoundConstness; +use rustc_ast::ast::BoundPolarity; +use rustc_ast::ast::ByRef; +use rustc_ast::ast::CaptureBy; +use rustc_ast::ast::Closure; +use rustc_ast::ast::ClosureBinder; +use rustc_ast::ast::Const; +use rustc_ast::ast::ConstItem; +use rustc_ast::ast::ConstItemRhs; +use rustc_ast::ast::CoroutineKind; +use rustc_ast::ast::Crate; +use rustc_ast::ast::Defaultness; +use rustc_ast::ast::Delegation; +use rustc_ast::ast::DelegationMac; +use rustc_ast::ast::DelimArgs; +use rustc_ast::ast::EnumDef; +use rustc_ast::ast::Expr; +use rustc_ast::ast::ExprField; +use rustc_ast::ast::ExprKind; +use rustc_ast::ast::Extern; +use rustc_ast::ast::FieldDef; +use rustc_ast::ast::FloatTy; +use rustc_ast::ast::Fn; +use rustc_ast::ast::FnContract; +use rustc_ast::ast::FnDecl; +use rustc_ast::ast::FnHeader; +use rustc_ast::ast::FnPtrTy; +use rustc_ast::ast::FnRetTy; +use rustc_ast::ast::FnSig; +use rustc_ast::ast::ForLoopKind; +use rustc_ast::ast::ForeignItemKind; +use rustc_ast::ast::ForeignMod; +use rustc_ast::ast::FormatAlignment; +use rustc_ast::ast::FormatArgPosition; +use rustc_ast::ast::FormatArgPositionKind; +use rustc_ast::ast::FormatArgs; +use rustc_ast::ast::FormatArgsPiece; +use rustc_ast::ast::FormatArgument; +use rustc_ast::ast::FormatArgumentKind; +use rustc_ast::ast::FormatArguments; +use rustc_ast::ast::FormatCount; +use rustc_ast::ast::FormatDebugHex; +use rustc_ast::ast::FormatOptions; +use rustc_ast::ast::FormatPlaceholder; +use rustc_ast::ast::FormatSign; +use rustc_ast::ast::FormatTrait; +use rustc_ast::ast::GenBlockKind; +use rustc_ast::ast::GenericArg; +use rustc_ast::ast::GenericArgs; +use rustc_ast::ast::GenericBound; +use rustc_ast::ast::GenericParam; +use rustc_ast::ast::GenericParamKind; +use rustc_ast::ast::Generics; +use rustc_ast::ast::Impl; +use rustc_ast::ast::ImplPolarity; +use rustc_ast::ast::Inline; +use rustc_ast::ast::InlineAsm; +use rustc_ast::ast::InlineAsmOperand; +use rustc_ast::ast::InlineAsmOptions; +use rustc_ast::ast::InlineAsmRegOrRegClass; +use rustc_ast::ast::InlineAsmSym; +use rustc_ast::ast::InlineAsmTemplatePiece; +use rustc_ast::ast::IntTy; +use rustc_ast::ast::IsAuto; +use rustc_ast::ast::Item; +use rustc_ast::ast::ItemKind; +use rustc_ast::ast::Label; +use rustc_ast::ast::Lifetime; +use rustc_ast::ast::LitFloatType; +use rustc_ast::ast::LitIntType; +use rustc_ast::ast::LitKind; +use rustc_ast::ast::Local; +use rustc_ast::ast::LocalKind; +use rustc_ast::ast::MacCall; +use rustc_ast::ast::MacCallStmt; +use rustc_ast::ast::MacStmtStyle; +use rustc_ast::ast::MacroDef; +use rustc_ast::ast::MatchKind; +use rustc_ast::ast::MetaItem; +use rustc_ast::ast::MetaItemInner; +use rustc_ast::ast::MetaItemKind; +use rustc_ast::ast::MetaItemLit; +use rustc_ast::ast::MethodCall; +use rustc_ast::ast::ModKind; +use rustc_ast::ast::ModSpans; +use rustc_ast::ast::Movability; +use rustc_ast::ast::MutTy; +use rustc_ast::ast::Mutability; +use rustc_ast::ast::NodeId; +use rustc_ast::ast::NormalAttr; +use rustc_ast::ast::Param; +use rustc_ast::ast::Parens; +use rustc_ast::ast::ParenthesizedArgs; +use rustc_ast::ast::Pat; +use rustc_ast::ast::PatField; +use rustc_ast::ast::PatFieldsRest; +use rustc_ast::ast::PatKind; +use rustc_ast::ast::Path; +use rustc_ast::ast::PathSegment; +use rustc_ast::ast::Pinnedness; +use rustc_ast::ast::PolyTraitRef; +use rustc_ast::ast::PreciseCapturingArg; +use rustc_ast::ast::QSelf; +use rustc_ast::ast::RangeEnd; +use rustc_ast::ast::RangeLimits; +use rustc_ast::ast::RangeSyntax; +use rustc_ast::ast::Recovered; +use rustc_ast::ast::Safety; +use rustc_ast::ast::StaticItem; +use rustc_ast::ast::Stmt; +use rustc_ast::ast::StmtKind; +use rustc_ast::ast::StrLit; +use rustc_ast::ast::StrStyle; +use rustc_ast::ast::StructExpr; +use rustc_ast::ast::StructRest; +use rustc_ast::ast::Term; +use rustc_ast::ast::Trait; +use rustc_ast::ast::TraitAlias; +use rustc_ast::ast::TraitBoundModifiers; +use rustc_ast::ast::TraitImplHeader; +use rustc_ast::ast::TraitObjectSyntax; +use rustc_ast::ast::TraitRef; +use rustc_ast::ast::Ty; +use rustc_ast::ast::TyAlias; +use rustc_ast::ast::TyKind; +use rustc_ast::ast::TyPat; +use rustc_ast::ast::TyPatKind; +use rustc_ast::ast::UintTy; +use rustc_ast::ast::UnOp; +use rustc_ast::ast::UnsafeBinderCastKind; +use rustc_ast::ast::UnsafeBinderTy; +use rustc_ast::ast::UnsafeSource; +use rustc_ast::ast::UseTree; +use rustc_ast::ast::UseTreeKind; +use rustc_ast::ast::Variant; +use rustc_ast::ast::VariantData; +use rustc_ast::ast::Visibility; +use rustc_ast::ast::VisibilityKind; +use rustc_ast::ast::WhereBoundPredicate; +use rustc_ast::ast::WhereClause; +use rustc_ast::ast::WhereEqPredicate; +use rustc_ast::ast::WherePredicate; +use rustc_ast::ast::WherePredicateKind; +use rustc_ast::ast::WhereRegionPredicate; +use rustc_ast::ast::YieldKind; +use rustc_ast::token::{self, CommentKind, Delimiter, IdentIsRaw, Lit, Token, TokenKind}; +use rustc_ast::tokenstream::{ + AttrTokenStream, AttrTokenTree, AttrsTarget, DelimSpacing, DelimSpan, LazyAttrTokenStream, + Spacing, TokenStream, TokenTree, +}; +use rustc_data_structures::packed::Pu128; +use rustc_span::source_map::Spanned; +use rustc_span::symbol::{sym, ByteSymbol, Ident, Symbol}; +use rustc_span::{ErrorGuaranteed, Span, SyntaxContext, DUMMY_SP}; +use std::borrow::Cow; +use std::collections::HashMap; +use std::hash::{BuildHasher, Hash}; +use std::sync::Arc; +use thin_vec::ThinVec; + +pub trait SpanlessEq { + fn eq(&self, other: &Self) -> bool; +} + +impl<T: ?Sized + SpanlessEq> SpanlessEq for Box<T> { + fn eq(&self, other: &Self) -> bool { + SpanlessEq::eq(&**self, &**other) + } +} + +impl<T: ?Sized + SpanlessEq> SpanlessEq for Arc<T> { + fn eq(&self, other: &Self) -> bool { + SpanlessEq::eq(&**self, &**other) + } +} + +impl<T: SpanlessEq> SpanlessEq for Option<T> { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (None, None) => true, + (Some(this), Some(other)) => SpanlessEq::eq(this, other), + _ => false, + } + } +} + +impl<T: SpanlessEq, E: SpanlessEq> SpanlessEq for Result<T, E> { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Ok(this), Ok(other)) => SpanlessEq::eq(this, other), + (Err(this), Err(other)) => SpanlessEq::eq(this, other), + _ => false, + } + } +} + +impl<T: SpanlessEq> SpanlessEq for [T] { + fn eq(&self, other: &Self) -> bool { + self.len() == other.len() && self.iter().zip(other).all(|(a, b)| SpanlessEq::eq(a, b)) + } +} + +impl<T: SpanlessEq> SpanlessEq for Vec<T> { + fn eq(&self, other: &Self) -> bool { + <[T] as SpanlessEq>::eq(self, other) + } +} + +impl<T: SpanlessEq> SpanlessEq for ThinVec<T> { + fn eq(&self, other: &Self) -> bool { + self.len() == other.len() + && self + .iter() + .zip(other.iter()) + .all(|(a, b)| SpanlessEq::eq(a, b)) + } +} + +impl<K: Eq + Hash, V: SpanlessEq, S: BuildHasher> SpanlessEq for HashMap<K, V, S> { + fn eq(&self, other: &Self) -> bool { + self.len() == other.len() + && self.iter().all(|(key, this_v)| { + other + .get(key) + .map_or(false, |other_v| SpanlessEq::eq(this_v, other_v)) + }) + } +} + +impl<'a, T: ?Sized + ToOwned + SpanlessEq> SpanlessEq for Cow<'a, T> { + fn eq(&self, other: &Self) -> bool { + <T as SpanlessEq>::eq(self, other) + } +} + +impl<T: SpanlessEq> SpanlessEq for Spanned<T> { + fn eq(&self, other: &Self) -> bool { + SpanlessEq::eq(&self.node, &other.node) + } +} + +impl<A: SpanlessEq, B: SpanlessEq> SpanlessEq for (A, B) { + fn eq(&self, other: &Self) -> bool { + SpanlessEq::eq(&self.0, &other.0) && SpanlessEq::eq(&self.1, &other.1) + } +} + +impl<A: SpanlessEq, B: SpanlessEq, C: SpanlessEq> SpanlessEq for (A, B, C) { + fn eq(&self, other: &Self) -> bool { + SpanlessEq::eq(&self.0, &other.0) + && SpanlessEq::eq(&self.1, &other.1) + && SpanlessEq::eq(&self.2, &other.2) + } +} + +macro_rules! spanless_eq_true { + ($name:ty) => { + impl SpanlessEq for $name { + fn eq(&self, _other: &Self) -> bool { + true + } + } + }; +} + +spanless_eq_true!(Span); +spanless_eq_true!(DelimSpan); +spanless_eq_true!(AttrId); +spanless_eq_true!(NodeId); +spanless_eq_true!(SyntaxContext); +spanless_eq_true!(Spacing); + +macro_rules! spanless_eq_partial_eq { + ($name:ty) => { + impl SpanlessEq for $name { + fn eq(&self, other: &Self) -> bool { + PartialEq::eq(self, other) + } + } + }; +} + +spanless_eq_partial_eq!(()); +spanless_eq_partial_eq!(bool); +spanless_eq_partial_eq!(u8); +spanless_eq_partial_eq!(u16); +spanless_eq_partial_eq!(u32); +spanless_eq_partial_eq!(u128); +spanless_eq_partial_eq!(usize); +spanless_eq_partial_eq!(char); +spanless_eq_partial_eq!(str); +spanless_eq_partial_eq!(String); +spanless_eq_partial_eq!(Pu128); +spanless_eq_partial_eq!(Symbol); +spanless_eq_partial_eq!(ByteSymbol); +spanless_eq_partial_eq!(CommentKind); +spanless_eq_partial_eq!(Delimiter); +spanless_eq_partial_eq!(InlineAsmOptions); +spanless_eq_partial_eq!(token::LitKind); +spanless_eq_partial_eq!(ErrorGuaranteed); + +macro_rules! spanless_eq_struct { + { + $($name:ident)::+ $(<$param:ident>)? + $([$field:tt $this:ident $other:ident])* + $(![$ignore:tt])*; + } => { + impl $(<$param: SpanlessEq>)* SpanlessEq for $($name)::+ $(<$param>)* { + fn eq(&self, other: &Self) -> bool { + let $($name)::+ { $($field: $this,)* $($ignore: _,)* } = self; + let $($name)::+ { $($field: $other,)* $($ignore: _,)* } = other; + true $(&& SpanlessEq::eq($this, $other))* + } + } + }; + + { + $($name:ident)::+ $(<$param:ident>)? + $([$field:tt $this:ident $other:ident])* + $(![$ignore:tt])*; + !$next:tt + $($rest:tt)* + } => { + spanless_eq_struct! { + $($name)::+ $(<$param>)* + $([$field $this $other])* + $(![$ignore])* + ![$next]; + $($rest)* + } + }; + + { + $($name:ident)::+ $(<$param:ident>)? + $([$field:tt $this:ident $other:ident])* + $(![$ignore:tt])*; + $next:tt + $($rest:tt)* + } => { + spanless_eq_struct! { + $($name)::+ $(<$param>)* + $([$field $this $other])* + [$next this other] + $(![$ignore])*; + $($rest)* + } + }; +} + +macro_rules! spanless_eq_enum { + { + $($name:ident)::+; + $([$($variant:ident)::+; $([$field:tt $this:ident $other:ident])* $(![$ignore:tt])*])* + } => { + impl SpanlessEq for $($name)::+ { + fn eq(&self, other: &Self) -> bool { + match self { + $( + $($variant)::+ { .. } => {} + )* + } + #[allow(unreachable_patterns)] + match (self, other) { + $( + ( + $($variant)::+ { $($field: $this,)* $($ignore: _,)* }, + $($variant)::+ { $($field: $other,)* $($ignore: _,)* }, + ) => { + true $(&& SpanlessEq::eq($this, $other))* + } + )* + _ => false, + } + } + } + }; + + { + $($name:ident)::+; + $([$($variant:ident)::+; $($fields:tt)*])* + $next:ident [$([$($named:tt)*])* $(![$ignore:tt])*] (!$i:tt $($field:tt)*) + $($rest:tt)* + } => { + spanless_eq_enum! { + $($name)::+; + $([$($variant)::+; $($fields)*])* + $next [$([$($named)*])* $(![$ignore])* ![$i]] ($($field)*) + $($rest)* + } + }; + + { + $($name:ident)::+; + $([$($variant:ident)::+; $($fields:tt)*])* + $next:ident [$([$($named:tt)*])* $(![$ignore:tt])*] ($i:tt $($field:tt)*) + $($rest:tt)* + } => { + spanless_eq_enum! { + $($name)::+; + $([$($variant)::+; $($fields)*])* + $next [$([$($named)*])* [$i this other] $(![$ignore])*] ($($field)*) + $($rest)* + } + }; + + { + $($name:ident)::+; + $([$($variant:ident)::+; $($fields:tt)*])* + $next:ident [$($named:tt)*] () + $($rest:tt)* + } => { + spanless_eq_enum! { + $($name)::+; + $([$($variant)::+; $($fields)*])* + [$($name)::+::$next; $($named)*] + $($rest)* + } + }; + + { + $($name:ident)::+; + $([$($variant:ident)::+; $($fields:tt)*])* + $next:ident ($($field:tt)*) + $($rest:tt)* + } => { + spanless_eq_enum! { + $($name)::+; + $([$($variant)::+; $($fields)*])* + $next [] ($($field)*) + $($rest)* + } + }; + + { + $($name:ident)::+; + $([$($variant:ident)::+; $($fields:tt)*])* + $next:ident + $($rest:tt)* + } => { + spanless_eq_enum! { + $($name)::+; + $([$($variant)::+; $($fields)*])* + [$($name)::+::$next;] + $($rest)* + } + }; +} + +spanless_eq_struct!(AngleBracketedArgs; span args); +spanless_eq_struct!(AnonConst; id value); +spanless_eq_struct!(Arm; attrs pat guard body span id is_placeholder); +spanless_eq_struct!(AssocItemConstraint; id ident gen_args kind span); +spanless_eq_struct!(AttrItem; unsafety path args tokens); +spanless_eq_struct!(AttrTokenStream; 0); +spanless_eq_struct!(Attribute; kind id style span); +spanless_eq_struct!(AttrsTarget; attrs tokens); +spanless_eq_struct!(BindingMode; 0 1); +spanless_eq_struct!(Block; stmts id rules span tokens); +spanless_eq_struct!(Closure; binder capture_clause constness coroutine_kind movability fn_decl body !fn_decl_span !fn_arg_span); +spanless_eq_struct!(ConstItem; defaultness ident generics ty rhs define_opaque); +spanless_eq_struct!(Crate; attrs items spans id is_placeholder); +spanless_eq_struct!(Delegation; id qself path ident rename body from_glob); +spanless_eq_struct!(DelegationMac; qself prefix suffixes body); +spanless_eq_struct!(DelimArgs; dspan delim tokens); +spanless_eq_struct!(DelimSpacing; open close); +spanless_eq_struct!(EnumDef; variants); +spanless_eq_struct!(Expr; id kind span attrs !tokens); +spanless_eq_struct!(ExprField; attrs id span ident expr is_shorthand is_placeholder); +spanless_eq_struct!(FieldDef; attrs id span vis safety ident ty default is_placeholder); +spanless_eq_struct!(Fn; defaultness ident generics sig contract define_opaque body); +spanless_eq_struct!(FnContract; declarations requires ensures); +spanless_eq_struct!(FnDecl; inputs output); +spanless_eq_struct!(FnHeader; constness coroutine_kind safety ext); +spanless_eq_struct!(FnPtrTy; safety ext generic_params decl decl_span); +spanless_eq_struct!(FnSig; header decl span); +spanless_eq_struct!(ForeignMod; extern_span safety abi items); +spanless_eq_struct!(FormatArgPosition; index kind span); +spanless_eq_struct!(FormatArgs; span template arguments uncooked_fmt_str is_source_literal); +spanless_eq_struct!(FormatArgument; kind expr); +spanless_eq_struct!(FormatOptions; width precision alignment fill sign alternate zero_pad debug_hex); +spanless_eq_struct!(FormatPlaceholder; argument span format_trait format_options); +spanless_eq_struct!(GenericParam; id ident attrs bounds is_placeholder kind !colon_span); +spanless_eq_struct!(Generics; params where_clause span); +spanless_eq_struct!(Impl; generics of_trait self_ty items); +spanless_eq_struct!(InlineAsm; asm_macro template template_strs operands clobber_abis options line_spans); +spanless_eq_struct!(InlineAsmSym; id qself path); +spanless_eq_struct!(Item<K>; attrs id span vis kind !tokens); +spanless_eq_struct!(Label; ident); +spanless_eq_struct!(Lifetime; id ident); +spanless_eq_struct!(Lit; kind symbol suffix); +spanless_eq_struct!(Local; id super_ pat ty kind span colon_sp attrs !tokens); +spanless_eq_struct!(MacCall; path args); +spanless_eq_struct!(MacCallStmt; mac style attrs tokens); +spanless_eq_struct!(MacroDef; body macro_rules); +spanless_eq_struct!(MetaItem; unsafety path kind span); +spanless_eq_struct!(MetaItemLit; symbol suffix kind span); +spanless_eq_struct!(MethodCall; seg receiver args !span); +spanless_eq_struct!(ModSpans; !inner_span !inject_use_span); +spanless_eq_struct!(MutTy; ty mutbl); +spanless_eq_struct!(NormalAttr; item tokens); +spanless_eq_struct!(ParenthesizedArgs; span inputs inputs_span output); +spanless_eq_struct!(Pat; id kind span tokens); +spanless_eq_struct!(PatField; ident pat is_shorthand attrs id span is_placeholder); +spanless_eq_struct!(Path; span segments tokens); +spanless_eq_struct!(PathSegment; ident id args); +spanless_eq_struct!(PolyTraitRef; bound_generic_params modifiers trait_ref span parens); +spanless_eq_struct!(QSelf; ty path_span position); +spanless_eq_struct!(StaticItem; ident ty safety mutability expr define_opaque); +spanless_eq_struct!(Stmt; id kind span); +spanless_eq_struct!(StrLit; symbol suffix symbol_unescaped style span); +spanless_eq_struct!(StructExpr; qself path fields rest); +spanless_eq_struct!(Token; kind span); +spanless_eq_struct!(Trait; constness safety is_auto ident generics bounds items); +spanless_eq_struct!(TraitAlias; constness ident generics bounds); +spanless_eq_struct!(TraitBoundModifiers; constness asyncness polarity); +spanless_eq_struct!(TraitImplHeader; defaultness safety constness polarity trait_ref); +spanless_eq_struct!(TraitRef; path ref_id); +spanless_eq_struct!(Ty; id kind span tokens); +spanless_eq_struct!(TyAlias; defaultness ident generics after_where_clause bounds ty); +spanless_eq_struct!(TyPat; id kind span tokens); +spanless_eq_struct!(UnsafeBinderTy; generic_params inner_ty); +spanless_eq_struct!(UseTree; prefix kind span); +spanless_eq_struct!(Variant; attrs id span !vis ident data disr_expr is_placeholder); +spanless_eq_struct!(Visibility; kind span tokens); +spanless_eq_struct!(WhereBoundPredicate; bound_generic_params bounded_ty bounds); +spanless_eq_struct!(WhereClause; has_where_token predicates span); +spanless_eq_struct!(WhereEqPredicate; lhs_ty rhs_ty); +spanless_eq_struct!(WherePredicate; attrs kind id span is_placeholder); +spanless_eq_struct!(WhereRegionPredicate; lifetime bounds); +spanless_eq_enum!(AngleBracketedArg; Arg(0) Constraint(0)); +spanless_eq_enum!(AsmMacro; Asm GlobalAsm NakedAsm); +spanless_eq_enum!(AssocItemConstraintKind; Equality(term) Bound(bounds)); +spanless_eq_enum!(AssocItemKind; Const(0) Fn(0) Type(0) MacCall(0) Delegation(0) DelegationMac(0)); +spanless_eq_enum!(AttrArgs; Empty Delimited(0) Eq(eq_span expr)); +spanless_eq_enum!(AttrStyle; Outer Inner); +spanless_eq_enum!(AttrTokenTree; Token(0 1) Delimited(0 1 2 3) AttrsTarget(0)); +spanless_eq_enum!(BinOpKind; Add Sub Mul Div Rem And Or BitXor BitAnd BitOr Shl Shr Eq Lt Le Ne Ge Gt); +spanless_eq_enum!(BlockCheckMode; Default Unsafe(0)); +spanless_eq_enum!(BorrowKind; Ref Raw Pin); +spanless_eq_enum!(BoundAsyncness; Normal Async(0)); +spanless_eq_enum!(BoundConstness; Never Always(0) Maybe(0)); +spanless_eq_enum!(BoundPolarity; Positive Negative(0) Maybe(0)); +spanless_eq_enum!(ByRef; Yes(0 1) No); +spanless_eq_enum!(CaptureBy; Value(move_kw) Ref Use(use_kw)); +spanless_eq_enum!(ClosureBinder; NotPresent For(span generic_params)); +spanless_eq_enum!(Const; Yes(0) No); +spanless_eq_enum!(ConstItemRhs; TypeConst(0) Body(0)); +spanless_eq_enum!(Defaultness; Default(0) Final); +spanless_eq_enum!(Extern; None Implicit(0) Explicit(0 1)); +spanless_eq_enum!(FloatTy; F16 F32 F64 F128); +spanless_eq_enum!(FnRetTy; Default(0) Ty(0)); +spanless_eq_enum!(ForLoopKind; For ForAwait); +spanless_eq_enum!(ForeignItemKind; Static(0) Fn(0) TyAlias(0) MacCall(0)); +spanless_eq_enum!(FormatAlignment; Left Right Center); +spanless_eq_enum!(FormatArgPositionKind; Implicit Number Named); +spanless_eq_enum!(FormatArgsPiece; Literal(0) Placeholder(0)); +spanless_eq_enum!(FormatArgumentKind; Normal Named(0) Captured(0)); +spanless_eq_enum!(FormatCount; Literal(0) Argument(0)); +spanless_eq_enum!(FormatDebugHex; Lower Upper); +spanless_eq_enum!(FormatSign; Plus Minus); +spanless_eq_enum!(FormatTrait; Display Debug LowerExp UpperExp Octal Pointer Binary LowerHex UpperHex); +spanless_eq_enum!(GenBlockKind; Async Gen AsyncGen); +spanless_eq_enum!(GenericArg; Lifetime(0) Type(0) Const(0)); +spanless_eq_enum!(GenericArgs; AngleBracketed(0) Parenthesized(0) ParenthesizedElided(0)); +spanless_eq_enum!(GenericBound; Trait(0) Outlives(0) Use(0 1)); +spanless_eq_enum!(GenericParamKind; Lifetime Type(default) Const(ty span default)); +spanless_eq_enum!(ImplPolarity; Positive Negative(0)); +spanless_eq_enum!(Inline; Yes No(had_parse_error)); +spanless_eq_enum!(InlineAsmRegOrRegClass; Reg(0) RegClass(0)); +spanless_eq_enum!(InlineAsmTemplatePiece; String(0) Placeholder(operand_idx modifier span)); +spanless_eq_enum!(IntTy; Isize I8 I16 I32 I64 I128); +spanless_eq_enum!(IsAuto; Yes No); +spanless_eq_enum!(LitFloatType; Suffixed(0) Unsuffixed); +spanless_eq_enum!(LitIntType; Signed(0) Unsigned(0) Unsuffixed); +spanless_eq_enum!(LocalKind; Decl Init(0) InitElse(0 1)); +spanless_eq_enum!(MacStmtStyle; Semicolon Braces NoBraces); +spanless_eq_enum!(MatchKind; Prefix Postfix); +spanless_eq_enum!(MetaItemKind; Word List(0) NameValue(0)); +spanless_eq_enum!(MetaItemInner; MetaItem(0) Lit(0)); +spanless_eq_enum!(ModKind; Loaded(0 1 2) Unloaded); +spanless_eq_enum!(Movability; Static Movable); +spanless_eq_enum!(Mutability; Mut Not); +spanless_eq_enum!(Parens; Yes No); +spanless_eq_enum!(PatFieldsRest; Rest(0) Recovered(0) None); +spanless_eq_enum!(Pinnedness; Not Pinned); +spanless_eq_enum!(PreciseCapturingArg; Lifetime(0) Arg(0 1)); +spanless_eq_enum!(RangeEnd; Included(0) Excluded); +spanless_eq_enum!(RangeLimits; HalfOpen Closed); +spanless_eq_enum!(Recovered; No Yes(0)); +spanless_eq_enum!(Safety; Unsafe(0) Safe(0) Default); +spanless_eq_enum!(StmtKind; Let(0) Item(0) Expr(0) Semi(0) Empty MacCall(0)); +spanless_eq_enum!(StrStyle; Cooked Raw(0)); +spanless_eq_enum!(StructRest; Base(0) Rest(0) None); +spanless_eq_enum!(Term; Ty(0) Const(0)); +spanless_eq_enum!(TokenTree; Token(0 1) Delimited(0 1 2 3)); +spanless_eq_enum!(TraitObjectSyntax; Dyn None); +spanless_eq_enum!(TyPatKind; Range(0 1 2) NotNull Or(0) Err(0)); +spanless_eq_enum!(UintTy; Usize U8 U16 U32 U64 U128); +spanless_eq_enum!(UnOp; Deref Not Neg); +spanless_eq_enum!(UnsafeBinderCastKind; Wrap Unwrap); +spanless_eq_enum!(UnsafeSource; CompilerGenerated UserProvided); +spanless_eq_enum!(UseTreeKind; Simple(0) Nested(items span) Glob); +spanless_eq_enum!(VariantData; Struct(fields recovered) Tuple(0 1) Unit(0)); +spanless_eq_enum!(VisibilityKind; Public Restricted(path id shorthand) Inherited); +spanless_eq_enum!(WherePredicateKind; BoundPredicate(0) RegionPredicate(0) EqPredicate(0)); +spanless_eq_enum!(YieldKind; Prefix(0) Postfix(0)); +spanless_eq_enum!(AssignOpKind; AddAssign SubAssign MulAssign DivAssign + RemAssign BitXorAssign BitAndAssign BitOrAssign ShlAssign ShrAssign); +spanless_eq_enum!(CoroutineKind; Async(span closure_id return_impl_trait_id) + Gen(span closure_id return_impl_trait_id) + AsyncGen(span closure_id return_impl_trait_id)); +spanless_eq_enum!(ExprKind; Array(0) ConstBlock(0) Call(0 1) MethodCall(0) + Tup(0) Binary(0 1 2) Unary(0 1) Lit(0) Cast(0 1) Type(0 1) Let(0 1 2 3) + If(0 1 2) While(0 1 2) ForLoop(pat iter body label kind) Loop(0 1 2) + Match(0 1 2) Closure(0) Block(0 1) Gen(0 1 2 3) Await(0 1) Use(0 1) + TryBlock(0) Assign(0 1 2) AssignOp(0 1 2) Field(0 1) Index(0 1 2) Underscore + Range(0 1 2) Path(0 1) AddrOf(0 1 2) Break(0 1) Continue(0) Ret(0) + InlineAsm(0) OffsetOf(0 1) MacCall(0) Struct(0) Repeat(0 1) Paren(0) Try(0) + Yield(0) Yeet(0) Become(0) IncludedBytes(0) FormatArgs(0) + UnsafeBinderCast(0 1 2) Err(0) Dummy); +spanless_eq_enum!(InlineAsmOperand; In(reg expr) Out(reg late expr) + InOut(reg late expr) SplitInOut(reg late in_expr out_expr) Const(anon_const) + Sym(sym) Label(block)); +spanless_eq_enum!(ItemKind; ExternCrate(0 1) Use(0) Static(0) Const(0) Fn(0) + Mod(0 1 2) ForeignMod(0) GlobalAsm(0) TyAlias(0) Enum(0 1 2) Struct(0 1 2) + Union(0 1 2) Trait(0) TraitAlias(0) Impl(0) MacCall(0) MacroDef(0 1) + Delegation(0) DelegationMac(0)); +spanless_eq_enum!(LitKind; Str(0 1) ByteStr(0 1) CStr(0 1) Byte(0) Char(0) + Int(0 1) Float(0 1) Bool(0) Err(0)); +spanless_eq_enum!(PatKind; Missing Wild Ident(0 1 2) Struct(0 1 2 3) + TupleStruct(0 1 2) Or(0) Path(0 1) Tuple(0) Box(0) Deref(0) Ref(0 1) Expr(0) + Range(0 1 2) Slice(0) Rest Never Guard(0 1) Paren(0) MacCall(0) Err(0)); +spanless_eq_enum!(TyKind; Slice(0) Array(0 1) Ptr(0) Ref(0 1) PinnedRef(0 1) + FnPtr(0) UnsafeBinder(0) Never Tup(0) Path(0 1) TraitObject(0 1) + ImplTrait(0 1) Paren(0) Typeof(0) Infer ImplicitSelf MacCall(0) CVarArgs + Pat(0 1) Dummy Err(0)); + +impl SpanlessEq for Ident { + fn eq(&self, other: &Self) -> bool { + self.as_str() == other.as_str() + } +} + +impl SpanlessEq for RangeSyntax { + fn eq(&self, _other: &Self) -> bool { + match self { + RangeSyntax::DotDotDot | RangeSyntax::DotDotEq => true, + } + } +} + +impl SpanlessEq for Param { + fn eq(&self, other: &Self) -> bool { + let Param { + attrs, + ty, + pat, + id, + span: _, + is_placeholder, + } = self; + let Param { + attrs: attrs2, + ty: ty2, + pat: pat2, + id: id2, + span: _, + is_placeholder: is_placeholder2, + } = other; + SpanlessEq::eq(id, id2) + && SpanlessEq::eq(is_placeholder, is_placeholder2) + && (matches!(ty.kind, TyKind::Err(_)) + || matches!(ty2.kind, TyKind::Err(_)) + || SpanlessEq::eq(attrs, attrs2) + && SpanlessEq::eq(ty, ty2) + && SpanlessEq::eq(pat, pat2)) + } +} + +impl SpanlessEq for TokenKind { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (TokenKind::Literal(this), TokenKind::Literal(other)) => SpanlessEq::eq(this, other), + (TokenKind::DotDotEq | TokenKind::DotDotDot, _) => match other { + TokenKind::DotDotEq | TokenKind::DotDotDot => true, + _ => false, + }, + _ => self == other, + } + } +} + +impl SpanlessEq for TokenStream { + fn eq(&self, other: &Self) -> bool { + let mut this_trees = self.iter(); + let mut other_trees = other.iter(); + loop { + let Some(this) = this_trees.next() else { + return other_trees.next().is_none(); + }; + let Some(other) = other_trees.next() else { + return false; + }; + if SpanlessEq::eq(this, other) { + continue; + } + if let (TokenTree::Token(this, _), TokenTree::Token(other, _)) = (this, other) { + if match (&this.kind, &other.kind) { + (TokenKind::Literal(this), TokenKind::Literal(other)) => { + SpanlessEq::eq(this, other) + } + (TokenKind::DocComment(_kind, style, symbol), TokenKind::Pound) => { + doc_comment(*style, *symbol, &mut other_trees) + } + (TokenKind::Pound, TokenKind::DocComment(_kind, style, symbol)) => { + doc_comment(*style, *symbol, &mut this_trees) + } + _ => false, + } { + continue; + } + } + return false; + } + } +} + +fn doc_comment<'a>( + style: AttrStyle, + unescaped: Symbol, + trees: &mut impl Iterator<Item = &'a TokenTree>, +) -> bool { + if match style { + AttrStyle::Outer => false, + AttrStyle::Inner => true, + } { + match trees.next() { + Some(TokenTree::Token( + Token { + kind: TokenKind::Bang, + span: _, + }, + _spacing, + )) => {} + _ => return false, + } + } + let Some(TokenTree::Delimited(_span, _spacing, Delimiter::Bracket, stream)) = trees.next() + else { + return false; + }; + let mut trees = stream.iter(); + match trees.next() { + Some(TokenTree::Token( + Token { + kind: TokenKind::Ident(symbol, IdentIsRaw::No), + span: _, + }, + _spacing, + )) if *symbol == sym::doc => {} + _ => return false, + } + match trees.next() { + Some(TokenTree::Token( + Token { + kind: TokenKind::Eq, + span: _, + }, + _spacing, + )) => {} + _ => return false, + } + match trees.next() { + Some(TokenTree::Token(token, _spacing)) => { + is_escaped_literal_token(token, unescaped) && trees.next().is_none() + } + _ => false, + } +} + +fn is_escaped_literal_token(token: &Token, unescaped: Symbol) -> bool { + match token { + Token { + kind: TokenKind::Literal(lit), + span: _, + } => match MetaItemLit::from_token_lit(*lit, DUMMY_SP) { + Ok(lit) => is_escaped_literal_meta_item_lit(&lit, unescaped), + Err(_) => false, + }, + _ => false, + } +} + +fn is_escaped_literal_meta_item_lit(lit: &MetaItemLit, unescaped: Symbol) -> bool { + match lit { + MetaItemLit { + symbol: _, + suffix: None, + kind, + span: _, + } => is_escaped_lit_kind(kind, unescaped), + _ => false, + } +} + +fn is_escaped_lit(lit: &Lit, unescaped: Symbol) -> bool { + match lit { + Lit { + kind: token::LitKind::Str, + symbol: _, + suffix: None, + } => match LitKind::from_token_lit(*lit) { + Ok(lit_kind) => is_escaped_lit_kind(&lit_kind, unescaped), + _ => false, + }, + _ => false, + } +} + +fn is_escaped_lit_kind(kind: &LitKind, unescaped: Symbol) -> bool { + match kind { + LitKind::Str(symbol, StrStyle::Cooked) => { + symbol.as_str().replace('\r', "") == unescaped.as_str().replace('\r', "") + } + _ => false, + } +} + +impl SpanlessEq for LazyAttrTokenStream { + fn eq(&self, other: &Self) -> bool { + let this = self.to_attr_token_stream(); + let other = other.to_attr_token_stream(); + SpanlessEq::eq(&this, &other) + } +} + +impl SpanlessEq for AttrKind { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (AttrKind::Normal(normal), AttrKind::Normal(normal2)) => { + SpanlessEq::eq(normal, normal2) + } + (AttrKind::DocComment(kind, symbol), AttrKind::DocComment(kind2, symbol2)) => { + SpanlessEq::eq(kind, kind2) && SpanlessEq::eq(symbol, symbol2) + } + (AttrKind::DocComment(kind, unescaped), AttrKind::Normal(normal2)) => { + match kind { + CommentKind::Line | CommentKind::Block => {} + } + let path = Path::from_ident(Ident::with_dummy_span(sym::doc)); + SpanlessEq::eq(&path, &normal2.item.path) + && match &normal2.item.args { + AttrArgs::Empty | AttrArgs::Delimited(_) => false, + AttrArgs::Eq { eq_span: _, expr } => match &expr.kind { + ExprKind::Lit(lit) => is_escaped_lit(lit, *unescaped), + _ => false, + }, + } + } + (AttrKind::Normal(_), AttrKind::DocComment(..)) => SpanlessEq::eq(other, self), + } + } +} + +impl SpanlessEq for FormatArguments { + fn eq(&self, other: &Self) -> bool { + SpanlessEq::eq(self.all_args(), other.all_args()) + } +} diff --git a/vendor/syn/tests/common/mod.rs b/vendor/syn/tests/common/mod.rs new file mode 100644 index 00000000000000..ead830f811656a --- /dev/null +++ b/vendor/syn/tests/common/mod.rs @@ -0,0 +1,6 @@ +#![allow(dead_code)] +#![allow(clippy::module_name_repetitions, clippy::shadow_unrelated)] + +pub mod eq; +pub mod parse; +pub mod visit; diff --git a/vendor/syn/tests/common/parse.rs b/vendor/syn/tests/common/parse.rs new file mode 100644 index 00000000000000..81ae357c1d1e9e --- /dev/null +++ b/vendor/syn/tests/common/parse.rs @@ -0,0 +1,52 @@ +extern crate rustc_ast; +extern crate rustc_driver; +extern crate rustc_expand; +extern crate rustc_parse; +extern crate rustc_session; +extern crate rustc_span; + +use rustc_ast::ast; +use rustc_parse::lexer::StripTokens; +use rustc_session::parse::ParseSess; +use rustc_span::FileName; +use std::panic; + +pub fn librustc_expr(input: &str) -> Option<Box<ast::Expr>> { + match panic::catch_unwind(|| { + let locale_resources = rustc_driver::DEFAULT_LOCALE_RESOURCES.to_vec(); + let sess = ParseSess::new(locale_resources); + let name = FileName::Custom("test_precedence".to_string()); + let mut parser = rustc_parse::new_parser_from_source_str( + &sess, + name, + input.to_string(), + StripTokens::ShebangAndFrontmatter, + ) + .unwrap(); + let presult = parser.parse_expr(); + match presult { + Ok(expr) => Some(expr), + Err(diagnostic) => { + diagnostic.emit(); + None + } + } + }) { + Ok(Some(e)) => Some(e), + Ok(None) => None, + Err(_) => { + errorf!("librustc panicked\n"); + None + } + } +} + +pub fn syn_expr(input: &str) -> Option<syn::Expr> { + match syn::parse_str(input) { + Ok(e) => Some(e), + Err(msg) => { + errorf!("syn failed to parse\n{:?}\n", msg); + None + } + } +} diff --git a/vendor/syn/tests/common/visit.rs b/vendor/syn/tests/common/visit.rs new file mode 100644 index 00000000000000..2d2a6c5382d53b --- /dev/null +++ b/vendor/syn/tests/common/visit.rs @@ -0,0 +1,119 @@ +use proc_macro2::{Delimiter, Group, TokenStream, TokenTree}; +use std::mem; +use syn::visit_mut::{self, VisitMut}; +use syn::{Expr, File, Generics, LifetimeParam, MacroDelimiter, Stmt, StmtMacro, TypeParam}; + +pub struct FlattenParens { + discard_paren_attrs: bool, +} + +impl FlattenParens { + pub fn discard_attrs() -> Self { + FlattenParens { + discard_paren_attrs: true, + } + } + + pub fn combine_attrs() -> Self { + FlattenParens { + discard_paren_attrs: false, + } + } + + pub fn visit_token_stream_mut(tokens: &mut TokenStream) { + *tokens = mem::take(tokens) + .into_iter() + .flat_map(|tt| { + if let TokenTree::Group(group) = tt { + let delimiter = group.delimiter(); + let mut content = group.stream(); + Self::visit_token_stream_mut(&mut content); + if let Delimiter::Parenthesis = delimiter { + content + } else { + TokenStream::from(TokenTree::Group(Group::new(delimiter, content))) + } + } else { + TokenStream::from(tt) + } + }) + .collect(); + } +} + +impl VisitMut for FlattenParens { + fn visit_expr_mut(&mut self, e: &mut Expr) { + while let Expr::Paren(paren) = e { + let paren_attrs = mem::take(&mut paren.attrs); + *e = mem::replace(&mut *paren.expr, Expr::PLACEHOLDER); + if !paren_attrs.is_empty() && !self.discard_paren_attrs { + let nested_attrs = match e { + Expr::Assign(e) => &mut e.attrs, + Expr::Binary(e) => &mut e.attrs, + Expr::Cast(e) => &mut e.attrs, + _ => unimplemented!(), + }; + assert!(nested_attrs.is_empty()); + *nested_attrs = paren_attrs; + } + } + visit_mut::visit_expr_mut(self, e); + } +} + +pub struct AsIfPrinted; + +impl VisitMut for AsIfPrinted { + fn visit_file_mut(&mut self, file: &mut File) { + file.shebang = None; + visit_mut::visit_file_mut(self, file); + } + + fn visit_generics_mut(&mut self, generics: &mut Generics) { + if generics.params.is_empty() { + generics.lt_token = None; + generics.gt_token = None; + } + if let Some(where_clause) = &generics.where_clause { + if where_clause.predicates.is_empty() { + generics.where_clause = None; + } + } + visit_mut::visit_generics_mut(self, generics); + } + + fn visit_lifetime_param_mut(&mut self, param: &mut LifetimeParam) { + if param.bounds.is_empty() { + param.colon_token = None; + } + visit_mut::visit_lifetime_param_mut(self, param); + } + + fn visit_stmt_mut(&mut self, stmt: &mut Stmt) { + if let Stmt::Expr(expr, semi) = stmt { + if let Expr::Macro(e) = expr { + if match e.mac.delimiter { + MacroDelimiter::Brace(_) => true, + MacroDelimiter::Paren(_) | MacroDelimiter::Bracket(_) => semi.is_some(), + } { + let Expr::Macro(expr) = mem::replace(expr, Expr::PLACEHOLDER) else { + unreachable!(); + }; + *stmt = Stmt::Macro(StmtMacro { + attrs: expr.attrs, + mac: expr.mac, + semi_token: *semi, + }); + } + } + } + visit_mut::visit_stmt_mut(self, stmt); + } + + fn visit_type_param_mut(&mut self, param: &mut TypeParam) { + if param.bounds.is_empty() { + param.colon_token = None; + } + visit_mut::visit_type_param_mut(self, param); + } +} diff --git a/vendor/syn/tests/debug/gen.rs b/vendor/syn/tests/debug/gen.rs new file mode 100644 index 00000000000000..f91977a6769dac --- /dev/null +++ b/vendor/syn/tests/debug/gen.rs @@ -0,0 +1,5239 @@ +// This file is @generated by syn-internal-codegen. +// It is not intended for manual editing. + +#![allow(repr_transparent_non_zst_fields)] +#![allow(clippy::match_wildcard_for_single_variants)] +use super::{Lite, Present}; +use ref_cast::RefCast; +use std::fmt::{self, Debug, Display}; +impl Debug for Lite<syn::Abi> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Abi"); + if let Some(val) = &self.value.name { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::LitStr); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("name", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::AngleBracketedGenericArguments> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("AngleBracketedGenericArguments"); + if self.value.colon2_token.is_some() { + formatter.field("colon2_token", &Present); + } + if !self.value.args.is_empty() { + formatter.field("args", Lite(&self.value.args)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::Arm> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Arm"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("pat", Lite(&self.value.pat)); + if let Some(val) = &self.value.guard { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::If, Box<syn::Expr>)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.1), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("guard", Print::ref_cast(val)); + } + formatter.field("body", Lite(&self.value.body)); + if self.value.comma.is_some() { + formatter.field("comma", &Present); + } + formatter.finish() + } +} +impl Debug for Lite<syn::AssocConst> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("AssocConst"); + formatter.field("ident", Lite(&self.value.ident)); + if let Some(val) = &self.value.generics { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::AngleBracketedGenericArguments); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("generics", Print::ref_cast(val)); + } + formatter.field("value", Lite(&self.value.value)); + formatter.finish() + } +} +impl Debug for Lite<syn::AssocType> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("AssocType"); + formatter.field("ident", Lite(&self.value.ident)); + if let Some(val) = &self.value.generics { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::AngleBracketedGenericArguments); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("generics", Print::ref_cast(val)); + } + formatter.field("ty", Lite(&self.value.ty)); + formatter.finish() + } +} +impl Debug for Lite<syn::AttrStyle> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::AttrStyle::Outer => formatter.write_str("AttrStyle::Outer"), + syn::AttrStyle::Inner(_val) => { + formatter.write_str("AttrStyle::Inner")?; + Ok(()) + } + } + } +} +impl Debug for Lite<syn::Attribute> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Attribute"); + formatter.field("style", Lite(&self.value.style)); + formatter.field("meta", Lite(&self.value.meta)); + formatter.finish() + } +} +impl Debug for Lite<syn::BareFnArg> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("BareFnArg"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.name { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((proc_macro2::Ident, syn::token::Colon)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("name", Print::ref_cast(val)); + } + formatter.field("ty", Lite(&self.value.ty)); + formatter.finish() + } +} +impl Debug for Lite<syn::BareVariadic> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("BareVariadic"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.name { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((proc_macro2::Ident, syn::token::Colon)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("name", Print::ref_cast(val)); + } + if self.value.comma.is_some() { + formatter.field("comma", &Present); + } + formatter.finish() + } +} +impl Debug for Lite<syn::BinOp> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::BinOp::Add(_val) => { + formatter.write_str("BinOp::Add")?; + Ok(()) + } + syn::BinOp::Sub(_val) => { + formatter.write_str("BinOp::Sub")?; + Ok(()) + } + syn::BinOp::Mul(_val) => { + formatter.write_str("BinOp::Mul")?; + Ok(()) + } + syn::BinOp::Div(_val) => { + formatter.write_str("BinOp::Div")?; + Ok(()) + } + syn::BinOp::Rem(_val) => { + formatter.write_str("BinOp::Rem")?; + Ok(()) + } + syn::BinOp::And(_val) => { + formatter.write_str("BinOp::And")?; + Ok(()) + } + syn::BinOp::Or(_val) => { + formatter.write_str("BinOp::Or")?; + Ok(()) + } + syn::BinOp::BitXor(_val) => { + formatter.write_str("BinOp::BitXor")?; + Ok(()) + } + syn::BinOp::BitAnd(_val) => { + formatter.write_str("BinOp::BitAnd")?; + Ok(()) + } + syn::BinOp::BitOr(_val) => { + formatter.write_str("BinOp::BitOr")?; + Ok(()) + } + syn::BinOp::Shl(_val) => { + formatter.write_str("BinOp::Shl")?; + Ok(()) + } + syn::BinOp::Shr(_val) => { + formatter.write_str("BinOp::Shr")?; + Ok(()) + } + syn::BinOp::Eq(_val) => { + formatter.write_str("BinOp::Eq")?; + Ok(()) + } + syn::BinOp::Lt(_val) => { + formatter.write_str("BinOp::Lt")?; + Ok(()) + } + syn::BinOp::Le(_val) => { + formatter.write_str("BinOp::Le")?; + Ok(()) + } + syn::BinOp::Ne(_val) => { + formatter.write_str("BinOp::Ne")?; + Ok(()) + } + syn::BinOp::Ge(_val) => { + formatter.write_str("BinOp::Ge")?; + Ok(()) + } + syn::BinOp::Gt(_val) => { + formatter.write_str("BinOp::Gt")?; + Ok(()) + } + syn::BinOp::AddAssign(_val) => { + formatter.write_str("BinOp::AddAssign")?; + Ok(()) + } + syn::BinOp::SubAssign(_val) => { + formatter.write_str("BinOp::SubAssign")?; + Ok(()) + } + syn::BinOp::MulAssign(_val) => { + formatter.write_str("BinOp::MulAssign")?; + Ok(()) + } + syn::BinOp::DivAssign(_val) => { + formatter.write_str("BinOp::DivAssign")?; + Ok(()) + } + syn::BinOp::RemAssign(_val) => { + formatter.write_str("BinOp::RemAssign")?; + Ok(()) + } + syn::BinOp::BitXorAssign(_val) => { + formatter.write_str("BinOp::BitXorAssign")?; + Ok(()) + } + syn::BinOp::BitAndAssign(_val) => { + formatter.write_str("BinOp::BitAndAssign")?; + Ok(()) + } + syn::BinOp::BitOrAssign(_val) => { + formatter.write_str("BinOp::BitOrAssign")?; + Ok(()) + } + syn::BinOp::ShlAssign(_val) => { + formatter.write_str("BinOp::ShlAssign")?; + Ok(()) + } + syn::BinOp::ShrAssign(_val) => { + formatter.write_str("BinOp::ShrAssign")?; + Ok(()) + } + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::Block> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Block"); + formatter.field("stmts", Lite(&self.value.stmts)); + formatter.finish() + } +} +impl Debug for Lite<syn::BoundLifetimes> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("BoundLifetimes"); + if !self.value.lifetimes.is_empty() { + formatter.field("lifetimes", Lite(&self.value.lifetimes)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::CapturedParam> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::CapturedParam::Lifetime(_val) => { + formatter.write_str("CapturedParam::Lifetime")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::CapturedParam::Ident(_val) => { + formatter.write_str("CapturedParam::Ident")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::ConstParam> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ConstParam"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("ty", Lite(&self.value.ty)); + if self.value.eq_token.is_some() { + formatter.field("eq_token", &Present); + } + if let Some(val) = &self.value.default { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Expr); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("default", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::Constraint> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Constraint"); + formatter.field("ident", Lite(&self.value.ident)); + if let Some(val) = &self.value.generics { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::AngleBracketedGenericArguments); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("generics", Print::ref_cast(val)); + } + if !self.value.bounds.is_empty() { + formatter.field("bounds", Lite(&self.value.bounds)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::Data> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::Data::Struct(_val) => { + let mut formatter = formatter.debug_struct("Data::Struct"); + formatter.field("fields", Lite(&_val.fields)); + if _val.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } + syn::Data::Enum(_val) => { + let mut formatter = formatter.debug_struct("Data::Enum"); + if !_val.variants.is_empty() { + formatter.field("variants", Lite(&_val.variants)); + } + formatter.finish() + } + syn::Data::Union(_val) => { + let mut formatter = formatter.debug_struct("Data::Union"); + formatter.field("fields", Lite(&_val.fields)); + formatter.finish() + } + } + } +} +impl Debug for Lite<syn::DataEnum> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("DataEnum"); + if !self.value.variants.is_empty() { + formatter.field("variants", Lite(&self.value.variants)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::DataStruct> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("DataStruct"); + formatter.field("fields", Lite(&self.value.fields)); + if self.value.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } +} +impl Debug for Lite<syn::DataUnion> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("DataUnion"); + formatter.field("fields", Lite(&self.value.fields)); + formatter.finish() + } +} +impl Debug for Lite<syn::DeriveInput> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("DeriveInput"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("generics", Lite(&self.value.generics)); + formatter.field("data", Lite(&self.value.data)); + formatter.finish() + } +} +impl Debug for Lite<syn::Expr> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::Expr::Array(_val) => { + let mut formatter = formatter.debug_struct("Expr::Array"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if !_val.elems.is_empty() { + formatter.field("elems", Lite(&_val.elems)); + } + formatter.finish() + } + syn::Expr::Assign(_val) => { + let mut formatter = formatter.debug_struct("Expr::Assign"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("left", Lite(&_val.left)); + formatter.field("right", Lite(&_val.right)); + formatter.finish() + } + syn::Expr::Async(_val) => { + let mut formatter = formatter.debug_struct("Expr::Async"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if _val.capture.is_some() { + formatter.field("capture", &Present); + } + formatter.field("block", Lite(&_val.block)); + formatter.finish() + } + syn::Expr::Await(_val) => { + let mut formatter = formatter.debug_struct("Expr::Await"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("base", Lite(&_val.base)); + formatter.finish() + } + syn::Expr::Binary(_val) => { + let mut formatter = formatter.debug_struct("Expr::Binary"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("left", Lite(&_val.left)); + formatter.field("op", Lite(&_val.op)); + formatter.field("right", Lite(&_val.right)); + formatter.finish() + } + syn::Expr::Block(_val) => { + let mut formatter = formatter.debug_struct("Expr::Block"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if let Some(val) = &_val.label { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Label); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("label", Print::ref_cast(val)); + } + formatter.field("block", Lite(&_val.block)); + formatter.finish() + } + syn::Expr::Break(_val) => { + let mut formatter = formatter.debug_struct("Expr::Break"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if let Some(val) = &_val.label { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Lifetime); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("label", Print::ref_cast(val)); + } + if let Some(val) = &_val.expr { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(Box<syn::Expr>); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("expr", Print::ref_cast(val)); + } + formatter.finish() + } + syn::Expr::Call(_val) => { + let mut formatter = formatter.debug_struct("Expr::Call"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("func", Lite(&_val.func)); + if !_val.args.is_empty() { + formatter.field("args", Lite(&_val.args)); + } + formatter.finish() + } + syn::Expr::Cast(_val) => { + let mut formatter = formatter.debug_struct("Expr::Cast"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("expr", Lite(&_val.expr)); + formatter.field("ty", Lite(&_val.ty)); + formatter.finish() + } + syn::Expr::Closure(_val) => { + let mut formatter = formatter.debug_struct("Expr::Closure"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if let Some(val) = &_val.lifetimes { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::BoundLifetimes); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("lifetimes", Print::ref_cast(val)); + } + if _val.constness.is_some() { + formatter.field("constness", &Present); + } + if _val.movability.is_some() { + formatter.field("movability", &Present); + } + if _val.asyncness.is_some() { + formatter.field("asyncness", &Present); + } + if _val.capture.is_some() { + formatter.field("capture", &Present); + } + if !_val.inputs.is_empty() { + formatter.field("inputs", Lite(&_val.inputs)); + } + formatter.field("output", Lite(&_val.output)); + formatter.field("body", Lite(&_val.body)); + formatter.finish() + } + syn::Expr::Const(_val) => { + let mut formatter = formatter.debug_struct("Expr::Const"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("block", Lite(&_val.block)); + formatter.finish() + } + syn::Expr::Continue(_val) => { + let mut formatter = formatter.debug_struct("Expr::Continue"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if let Some(val) = &_val.label { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Lifetime); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("label", Print::ref_cast(val)); + } + formatter.finish() + } + syn::Expr::Field(_val) => { + let mut formatter = formatter.debug_struct("Expr::Field"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("base", Lite(&_val.base)); + formatter.field("member", Lite(&_val.member)); + formatter.finish() + } + syn::Expr::ForLoop(_val) => { + let mut formatter = formatter.debug_struct("Expr::ForLoop"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if let Some(val) = &_val.label { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Label); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("label", Print::ref_cast(val)); + } + formatter.field("pat", Lite(&_val.pat)); + formatter.field("expr", Lite(&_val.expr)); + formatter.field("body", Lite(&_val.body)); + formatter.finish() + } + syn::Expr::Group(_val) => { + let mut formatter = formatter.debug_struct("Expr::Group"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("expr", Lite(&_val.expr)); + formatter.finish() + } + syn::Expr::If(_val) => { + let mut formatter = formatter.debug_struct("Expr::If"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("cond", Lite(&_val.cond)); + formatter.field("then_branch", Lite(&_val.then_branch)); + if let Some(val) = &_val.else_branch { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::Else, Box<syn::Expr>)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.1), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("else_branch", Print::ref_cast(val)); + } + formatter.finish() + } + syn::Expr::Index(_val) => { + let mut formatter = formatter.debug_struct("Expr::Index"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("expr", Lite(&_val.expr)); + formatter.field("index", Lite(&_val.index)); + formatter.finish() + } + syn::Expr::Infer(_val) => { + let mut formatter = formatter.debug_struct("Expr::Infer"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.finish() + } + syn::Expr::Let(_val) => { + let mut formatter = formatter.debug_struct("Expr::Let"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("pat", Lite(&_val.pat)); + formatter.field("expr", Lite(&_val.expr)); + formatter.finish() + } + syn::Expr::Lit(_val) => { + let mut formatter = formatter.debug_struct("Expr::Lit"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("lit", Lite(&_val.lit)); + formatter.finish() + } + syn::Expr::Loop(_val) => { + let mut formatter = formatter.debug_struct("Expr::Loop"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if let Some(val) = &_val.label { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Label); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("label", Print::ref_cast(val)); + } + formatter.field("body", Lite(&_val.body)); + formatter.finish() + } + syn::Expr::Macro(_val) => { + let mut formatter = formatter.debug_struct("Expr::Macro"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("mac", Lite(&_val.mac)); + formatter.finish() + } + syn::Expr::Match(_val) => { + let mut formatter = formatter.debug_struct("Expr::Match"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("expr", Lite(&_val.expr)); + if !_val.arms.is_empty() { + formatter.field("arms", Lite(&_val.arms)); + } + formatter.finish() + } + syn::Expr::MethodCall(_val) => { + let mut formatter = formatter.debug_struct("Expr::MethodCall"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("receiver", Lite(&_val.receiver)); + formatter.field("method", Lite(&_val.method)); + if let Some(val) = &_val.turbofish { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::AngleBracketedGenericArguments); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("turbofish", Print::ref_cast(val)); + } + if !_val.args.is_empty() { + formatter.field("args", Lite(&_val.args)); + } + formatter.finish() + } + syn::Expr::Paren(_val) => { + let mut formatter = formatter.debug_struct("Expr::Paren"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("expr", Lite(&_val.expr)); + formatter.finish() + } + syn::Expr::Path(_val) => { + let mut formatter = formatter.debug_struct("Expr::Path"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if let Some(val) = &_val.qself { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::QSelf); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("qself", Print::ref_cast(val)); + } + formatter.field("path", Lite(&_val.path)); + formatter.finish() + } + syn::Expr::Range(_val) => { + let mut formatter = formatter.debug_struct("Expr::Range"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if let Some(val) = &_val.start { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(Box<syn::Expr>); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("start", Print::ref_cast(val)); + } + formatter.field("limits", Lite(&_val.limits)); + if let Some(val) = &_val.end { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(Box<syn::Expr>); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("end", Print::ref_cast(val)); + } + formatter.finish() + } + syn::Expr::RawAddr(_val) => { + let mut formatter = formatter.debug_struct("Expr::RawAddr"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("mutability", Lite(&_val.mutability)); + formatter.field("expr", Lite(&_val.expr)); + formatter.finish() + } + syn::Expr::Reference(_val) => { + let mut formatter = formatter.debug_struct("Expr::Reference"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if _val.mutability.is_some() { + formatter.field("mutability", &Present); + } + formatter.field("expr", Lite(&_val.expr)); + formatter.finish() + } + syn::Expr::Repeat(_val) => { + let mut formatter = formatter.debug_struct("Expr::Repeat"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("expr", Lite(&_val.expr)); + formatter.field("len", Lite(&_val.len)); + formatter.finish() + } + syn::Expr::Return(_val) => { + let mut formatter = formatter.debug_struct("Expr::Return"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if let Some(val) = &_val.expr { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(Box<syn::Expr>); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("expr", Print::ref_cast(val)); + } + formatter.finish() + } + syn::Expr::Struct(_val) => { + let mut formatter = formatter.debug_struct("Expr::Struct"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if let Some(val) = &_val.qself { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::QSelf); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("qself", Print::ref_cast(val)); + } + formatter.field("path", Lite(&_val.path)); + if !_val.fields.is_empty() { + formatter.field("fields", Lite(&_val.fields)); + } + if _val.dot2_token.is_some() { + formatter.field("dot2_token", &Present); + } + if let Some(val) = &_val.rest { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(Box<syn::Expr>); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("rest", Print::ref_cast(val)); + } + formatter.finish() + } + syn::Expr::Try(_val) => { + let mut formatter = formatter.debug_struct("Expr::Try"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("expr", Lite(&_val.expr)); + formatter.finish() + } + syn::Expr::TryBlock(_val) => { + let mut formatter = formatter.debug_struct("Expr::TryBlock"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("block", Lite(&_val.block)); + formatter.finish() + } + syn::Expr::Tuple(_val) => { + let mut formatter = formatter.debug_struct("Expr::Tuple"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if !_val.elems.is_empty() { + formatter.field("elems", Lite(&_val.elems)); + } + formatter.finish() + } + syn::Expr::Unary(_val) => { + let mut formatter = formatter.debug_struct("Expr::Unary"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("op", Lite(&_val.op)); + formatter.field("expr", Lite(&_val.expr)); + formatter.finish() + } + syn::Expr::Unsafe(_val) => { + let mut formatter = formatter.debug_struct("Expr::Unsafe"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("block", Lite(&_val.block)); + formatter.finish() + } + syn::Expr::Verbatim(_val) => { + formatter.write_str("Expr::Verbatim")?; + formatter.write_str("(`")?; + Display::fmt(_val, formatter)?; + formatter.write_str("`)")?; + Ok(()) + } + syn::Expr::While(_val) => { + let mut formatter = formatter.debug_struct("Expr::While"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if let Some(val) = &_val.label { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Label); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("label", Print::ref_cast(val)); + } + formatter.field("cond", Lite(&_val.cond)); + formatter.field("body", Lite(&_val.body)); + formatter.finish() + } + syn::Expr::Yield(_val) => { + let mut formatter = formatter.debug_struct("Expr::Yield"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if let Some(val) = &_val.expr { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(Box<syn::Expr>); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("expr", Print::ref_cast(val)); + } + formatter.finish() + } + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::ExprArray> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprArray"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if !self.value.elems.is_empty() { + formatter.field("elems", Lite(&self.value.elems)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ExprAssign> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprAssign"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("left", Lite(&self.value.left)); + formatter.field("right", Lite(&self.value.right)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprAsync> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprAsync"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if self.value.capture.is_some() { + formatter.field("capture", &Present); + } + formatter.field("block", Lite(&self.value.block)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprAwait> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprAwait"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("base", Lite(&self.value.base)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprBinary> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprBinary"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("left", Lite(&self.value.left)); + formatter.field("op", Lite(&self.value.op)); + formatter.field("right", Lite(&self.value.right)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprBlock> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprBlock"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.label { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Label); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("label", Print::ref_cast(val)); + } + formatter.field("block", Lite(&self.value.block)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprBreak> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprBreak"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.label { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Lifetime); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("label", Print::ref_cast(val)); + } + if let Some(val) = &self.value.expr { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(Box<syn::Expr>); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("expr", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ExprCall> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprCall"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("func", Lite(&self.value.func)); + if !self.value.args.is_empty() { + formatter.field("args", Lite(&self.value.args)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ExprCast> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprCast"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("expr", Lite(&self.value.expr)); + formatter.field("ty", Lite(&self.value.ty)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprClosure> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprClosure"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.lifetimes { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::BoundLifetimes); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("lifetimes", Print::ref_cast(val)); + } + if self.value.constness.is_some() { + formatter.field("constness", &Present); + } + if self.value.movability.is_some() { + formatter.field("movability", &Present); + } + if self.value.asyncness.is_some() { + formatter.field("asyncness", &Present); + } + if self.value.capture.is_some() { + formatter.field("capture", &Present); + } + if !self.value.inputs.is_empty() { + formatter.field("inputs", Lite(&self.value.inputs)); + } + formatter.field("output", Lite(&self.value.output)); + formatter.field("body", Lite(&self.value.body)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprConst> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprConst"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("block", Lite(&self.value.block)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprContinue> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprContinue"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.label { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Lifetime); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("label", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ExprField> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprField"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("base", Lite(&self.value.base)); + formatter.field("member", Lite(&self.value.member)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprForLoop> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprForLoop"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.label { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Label); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("label", Print::ref_cast(val)); + } + formatter.field("pat", Lite(&self.value.pat)); + formatter.field("expr", Lite(&self.value.expr)); + formatter.field("body", Lite(&self.value.body)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprGroup> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprGroup"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("expr", Lite(&self.value.expr)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprIf> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprIf"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("cond", Lite(&self.value.cond)); + formatter.field("then_branch", Lite(&self.value.then_branch)); + if let Some(val) = &self.value.else_branch { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::Else, Box<syn::Expr>)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.1), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("else_branch", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ExprIndex> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprIndex"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("expr", Lite(&self.value.expr)); + formatter.field("index", Lite(&self.value.index)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprInfer> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprInfer"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ExprLet> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprLet"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("pat", Lite(&self.value.pat)); + formatter.field("expr", Lite(&self.value.expr)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprLit> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprLit"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("lit", Lite(&self.value.lit)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprLoop> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprLoop"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.label { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Label); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("label", Print::ref_cast(val)); + } + formatter.field("body", Lite(&self.value.body)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprMacro> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprMacro"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("mac", Lite(&self.value.mac)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprMatch> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprMatch"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("expr", Lite(&self.value.expr)); + if !self.value.arms.is_empty() { + formatter.field("arms", Lite(&self.value.arms)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ExprMethodCall> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprMethodCall"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("receiver", Lite(&self.value.receiver)); + formatter.field("method", Lite(&self.value.method)); + if let Some(val) = &self.value.turbofish { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::AngleBracketedGenericArguments); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("turbofish", Print::ref_cast(val)); + } + if !self.value.args.is_empty() { + formatter.field("args", Lite(&self.value.args)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ExprParen> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprParen"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("expr", Lite(&self.value.expr)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprPath> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprPath"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.qself { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::QSelf); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("qself", Print::ref_cast(val)); + } + formatter.field("path", Lite(&self.value.path)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprRange> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprRange"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.start { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(Box<syn::Expr>); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("start", Print::ref_cast(val)); + } + formatter.field("limits", Lite(&self.value.limits)); + if let Some(val) = &self.value.end { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(Box<syn::Expr>); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("end", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ExprRawAddr> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprRawAddr"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("mutability", Lite(&self.value.mutability)); + formatter.field("expr", Lite(&self.value.expr)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprReference> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprReference"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if self.value.mutability.is_some() { + formatter.field("mutability", &Present); + } + formatter.field("expr", Lite(&self.value.expr)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprRepeat> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprRepeat"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("expr", Lite(&self.value.expr)); + formatter.field("len", Lite(&self.value.len)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprReturn> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprReturn"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.expr { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(Box<syn::Expr>); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("expr", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ExprStruct> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprStruct"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.qself { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::QSelf); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("qself", Print::ref_cast(val)); + } + formatter.field("path", Lite(&self.value.path)); + if !self.value.fields.is_empty() { + formatter.field("fields", Lite(&self.value.fields)); + } + if self.value.dot2_token.is_some() { + formatter.field("dot2_token", &Present); + } + if let Some(val) = &self.value.rest { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(Box<syn::Expr>); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("rest", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ExprTry> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprTry"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("expr", Lite(&self.value.expr)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprTryBlock> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprTryBlock"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("block", Lite(&self.value.block)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprTuple> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprTuple"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if !self.value.elems.is_empty() { + formatter.field("elems", Lite(&self.value.elems)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ExprUnary> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprUnary"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("op", Lite(&self.value.op)); + formatter.field("expr", Lite(&self.value.expr)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprUnsafe> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprUnsafe"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("block", Lite(&self.value.block)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprWhile> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprWhile"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.label { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Label); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("label", Print::ref_cast(val)); + } + formatter.field("cond", Lite(&self.value.cond)); + formatter.field("body", Lite(&self.value.body)); + formatter.finish() + } +} +impl Debug for Lite<syn::ExprYield> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ExprYield"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.expr { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(Box<syn::Expr>); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("expr", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::Field> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Field"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + match self.value.mutability { + syn::FieldMutability::None => {} + _ => { + formatter.field("mutability", Lite(&self.value.mutability)); + } + } + if let Some(val) = &self.value.ident { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(proc_macro2::Ident); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("ident", Print::ref_cast(val)); + } + if self.value.colon_token.is_some() { + formatter.field("colon_token", &Present); + } + formatter.field("ty", Lite(&self.value.ty)); + formatter.finish() + } +} +impl Debug for Lite<syn::FieldMutability> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::FieldMutability::None => formatter.write_str("FieldMutability::None"), + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::FieldPat> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("FieldPat"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("member", Lite(&self.value.member)); + if self.value.colon_token.is_some() { + formatter.field("colon_token", &Present); + } + formatter.field("pat", Lite(&self.value.pat)); + formatter.finish() + } +} +impl Debug for Lite<syn::FieldValue> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("FieldValue"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("member", Lite(&self.value.member)); + if self.value.colon_token.is_some() { + formatter.field("colon_token", &Present); + } + formatter.field("expr", Lite(&self.value.expr)); + formatter.finish() + } +} +impl Debug for Lite<syn::Fields> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::Fields::Named(_val) => { + let mut formatter = formatter.debug_struct("Fields::Named"); + if !_val.named.is_empty() { + formatter.field("named", Lite(&_val.named)); + } + formatter.finish() + } + syn::Fields::Unnamed(_val) => { + let mut formatter = formatter.debug_struct("Fields::Unnamed"); + if !_val.unnamed.is_empty() { + formatter.field("unnamed", Lite(&_val.unnamed)); + } + formatter.finish() + } + syn::Fields::Unit => formatter.write_str("Fields::Unit"), + } + } +} +impl Debug for Lite<syn::FieldsNamed> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("FieldsNamed"); + if !self.value.named.is_empty() { + formatter.field("named", Lite(&self.value.named)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::FieldsUnnamed> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("FieldsUnnamed"); + if !self.value.unnamed.is_empty() { + formatter.field("unnamed", Lite(&self.value.unnamed)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::File> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("File"); + if let Some(val) = &self.value.shebang { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(String); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("shebang", Print::ref_cast(val)); + } + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if !self.value.items.is_empty() { + formatter.field("items", Lite(&self.value.items)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::FnArg> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::FnArg::Receiver(_val) => { + formatter.write_str("FnArg::Receiver")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::FnArg::Typed(_val) => { + formatter.write_str("FnArg::Typed")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + } +} +impl Debug for Lite<syn::ForeignItem> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::ForeignItem::Fn(_val) => { + let mut formatter = formatter.debug_struct("ForeignItem::Fn"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + formatter.field("sig", Lite(&_val.sig)); + formatter.finish() + } + syn::ForeignItem::Static(_val) => { + let mut formatter = formatter.debug_struct("ForeignItem::Static"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + match _val.mutability { + syn::StaticMutability::None => {} + _ => { + formatter.field("mutability", Lite(&_val.mutability)); + } + } + formatter.field("ident", Lite(&_val.ident)); + formatter.field("ty", Lite(&_val.ty)); + formatter.finish() + } + syn::ForeignItem::Type(_val) => { + let mut formatter = formatter.debug_struct("ForeignItem::Type"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + formatter.field("ident", Lite(&_val.ident)); + formatter.field("generics", Lite(&_val.generics)); + formatter.finish() + } + syn::ForeignItem::Macro(_val) => { + let mut formatter = formatter.debug_struct("ForeignItem::Macro"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("mac", Lite(&_val.mac)); + if _val.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } + syn::ForeignItem::Verbatim(_val) => { + formatter.write_str("ForeignItem::Verbatim")?; + formatter.write_str("(`")?; + Display::fmt(_val, formatter)?; + formatter.write_str("`)")?; + Ok(()) + } + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::ForeignItemFn> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ForeignItemFn"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + formatter.field("sig", Lite(&self.value.sig)); + formatter.finish() + } +} +impl Debug for Lite<syn::ForeignItemMacro> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ForeignItemMacro"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("mac", Lite(&self.value.mac)); + if self.value.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ForeignItemStatic> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ForeignItemStatic"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + match self.value.mutability { + syn::StaticMutability::None => {} + _ => { + formatter.field("mutability", Lite(&self.value.mutability)); + } + } + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("ty", Lite(&self.value.ty)); + formatter.finish() + } +} +impl Debug for Lite<syn::ForeignItemType> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ForeignItemType"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("generics", Lite(&self.value.generics)); + formatter.finish() + } +} +impl Debug for Lite<syn::GenericArgument> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::GenericArgument::Lifetime(_val) => { + formatter.write_str("GenericArgument::Lifetime")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::GenericArgument::Type(_val) => { + formatter.write_str("GenericArgument::Type")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::GenericArgument::Const(_val) => { + formatter.write_str("GenericArgument::Const")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::GenericArgument::AssocType(_val) => { + formatter.write_str("GenericArgument::AssocType")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::GenericArgument::AssocConst(_val) => { + formatter.write_str("GenericArgument::AssocConst")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::GenericArgument::Constraint(_val) => { + formatter.write_str("GenericArgument::Constraint")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::GenericParam> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::GenericParam::Lifetime(_val) => { + formatter.write_str("GenericParam::Lifetime")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::GenericParam::Type(_val) => { + formatter.write_str("GenericParam::Type")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::GenericParam::Const(_val) => { + formatter.write_str("GenericParam::Const")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + } +} +impl Debug for Lite<syn::Generics> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Generics"); + if self.value.lt_token.is_some() { + formatter.field("lt_token", &Present); + } + if !self.value.params.is_empty() { + formatter.field("params", Lite(&self.value.params)); + } + if self.value.gt_token.is_some() { + formatter.field("gt_token", &Present); + } + if let Some(val) = &self.value.where_clause { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::WhereClause); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("where_clause", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ImplItem> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::ImplItem::Const(_val) => { + let mut formatter = formatter.debug_struct("ImplItem::Const"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + if _val.defaultness.is_some() { + formatter.field("defaultness", &Present); + } + formatter.field("ident", Lite(&_val.ident)); + formatter.field("generics", Lite(&_val.generics)); + formatter.field("ty", Lite(&_val.ty)); + formatter.field("expr", Lite(&_val.expr)); + formatter.finish() + } + syn::ImplItem::Fn(_val) => { + let mut formatter = formatter.debug_struct("ImplItem::Fn"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + if _val.defaultness.is_some() { + formatter.field("defaultness", &Present); + } + formatter.field("sig", Lite(&_val.sig)); + formatter.field("block", Lite(&_val.block)); + formatter.finish() + } + syn::ImplItem::Type(_val) => { + let mut formatter = formatter.debug_struct("ImplItem::Type"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + if _val.defaultness.is_some() { + formatter.field("defaultness", &Present); + } + formatter.field("ident", Lite(&_val.ident)); + formatter.field("generics", Lite(&_val.generics)); + formatter.field("ty", Lite(&_val.ty)); + formatter.finish() + } + syn::ImplItem::Macro(_val) => { + let mut formatter = formatter.debug_struct("ImplItem::Macro"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("mac", Lite(&_val.mac)); + if _val.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } + syn::ImplItem::Verbatim(_val) => { + formatter.write_str("ImplItem::Verbatim")?; + formatter.write_str("(`")?; + Display::fmt(_val, formatter)?; + formatter.write_str("`)")?; + Ok(()) + } + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::ImplItemConst> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ImplItemConst"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + if self.value.defaultness.is_some() { + formatter.field("defaultness", &Present); + } + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("generics", Lite(&self.value.generics)); + formatter.field("ty", Lite(&self.value.ty)); + formatter.field("expr", Lite(&self.value.expr)); + formatter.finish() + } +} +impl Debug for Lite<syn::ImplItemFn> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ImplItemFn"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + if self.value.defaultness.is_some() { + formatter.field("defaultness", &Present); + } + formatter.field("sig", Lite(&self.value.sig)); + formatter.field("block", Lite(&self.value.block)); + formatter.finish() + } +} +impl Debug for Lite<syn::ImplItemMacro> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ImplItemMacro"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("mac", Lite(&self.value.mac)); + if self.value.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ImplItemType> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ImplItemType"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + if self.value.defaultness.is_some() { + formatter.field("defaultness", &Present); + } + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("generics", Lite(&self.value.generics)); + formatter.field("ty", Lite(&self.value.ty)); + formatter.finish() + } +} +impl Debug for Lite<syn::ImplRestriction> { + fn fmt(&self, _formatter: &mut fmt::Formatter) -> fmt::Result { + unreachable!() + } +} +impl Debug for Lite<syn::Index> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Index"); + formatter.field("index", Lite(&self.value.index)); + formatter.finish() + } +} +impl Debug for Lite<syn::Item> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::Item::Const(_val) => { + let mut formatter = formatter.debug_struct("Item::Const"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + formatter.field("ident", Lite(&_val.ident)); + formatter.field("generics", Lite(&_val.generics)); + formatter.field("ty", Lite(&_val.ty)); + formatter.field("expr", Lite(&_val.expr)); + formatter.finish() + } + syn::Item::Enum(_val) => { + let mut formatter = formatter.debug_struct("Item::Enum"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + formatter.field("ident", Lite(&_val.ident)); + formatter.field("generics", Lite(&_val.generics)); + if !_val.variants.is_empty() { + formatter.field("variants", Lite(&_val.variants)); + } + formatter.finish() + } + syn::Item::ExternCrate(_val) => { + let mut formatter = formatter.debug_struct("Item::ExternCrate"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + formatter.field("ident", Lite(&_val.ident)); + if let Some(val) = &_val.rename { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::As, proc_macro2::Ident)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.1), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("rename", Print::ref_cast(val)); + } + formatter.finish() + } + syn::Item::Fn(_val) => { + let mut formatter = formatter.debug_struct("Item::Fn"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + formatter.field("sig", Lite(&_val.sig)); + formatter.field("block", Lite(&_val.block)); + formatter.finish() + } + syn::Item::ForeignMod(_val) => { + let mut formatter = formatter.debug_struct("Item::ForeignMod"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if _val.unsafety.is_some() { + formatter.field("unsafety", &Present); + } + formatter.field("abi", Lite(&_val.abi)); + if !_val.items.is_empty() { + formatter.field("items", Lite(&_val.items)); + } + formatter.finish() + } + syn::Item::Impl(_val) => { + let mut formatter = formatter.debug_struct("Item::Impl"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if _val.defaultness.is_some() { + formatter.field("defaultness", &Present); + } + if _val.unsafety.is_some() { + formatter.field("unsafety", &Present); + } + formatter.field("generics", Lite(&_val.generics)); + if let Some(val) = &_val.trait_ { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((Option<syn::token::Not>, syn::Path, syn::token::For)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt( + &( + &super::Option { + present: self.0.0.is_some(), + }, + Lite(&self.0.1), + ), + formatter, + )?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("trait_", Print::ref_cast(val)); + } + formatter.field("self_ty", Lite(&_val.self_ty)); + if !_val.items.is_empty() { + formatter.field("items", Lite(&_val.items)); + } + formatter.finish() + } + syn::Item::Macro(_val) => { + let mut formatter = formatter.debug_struct("Item::Macro"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if let Some(val) = &_val.ident { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(proc_macro2::Ident); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("ident", Print::ref_cast(val)); + } + formatter.field("mac", Lite(&_val.mac)); + if _val.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } + syn::Item::Mod(_val) => { + let mut formatter = formatter.debug_struct("Item::Mod"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + if _val.unsafety.is_some() { + formatter.field("unsafety", &Present); + } + formatter.field("ident", Lite(&_val.ident)); + if let Some(val) = &_val.content { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::Brace, Vec<syn::Item>)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.1), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("content", Print::ref_cast(val)); + } + if _val.semi.is_some() { + formatter.field("semi", &Present); + } + formatter.finish() + } + syn::Item::Static(_val) => { + let mut formatter = formatter.debug_struct("Item::Static"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + match _val.mutability { + syn::StaticMutability::None => {} + _ => { + formatter.field("mutability", Lite(&_val.mutability)); + } + } + formatter.field("ident", Lite(&_val.ident)); + formatter.field("ty", Lite(&_val.ty)); + formatter.field("expr", Lite(&_val.expr)); + formatter.finish() + } + syn::Item::Struct(_val) => { + let mut formatter = formatter.debug_struct("Item::Struct"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + formatter.field("ident", Lite(&_val.ident)); + formatter.field("generics", Lite(&_val.generics)); + formatter.field("fields", Lite(&_val.fields)); + if _val.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } + syn::Item::Trait(_val) => { + let mut formatter = formatter.debug_struct("Item::Trait"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + if _val.unsafety.is_some() { + formatter.field("unsafety", &Present); + } + if _val.auto_token.is_some() { + formatter.field("auto_token", &Present); + } + if let Some(val) = &_val.restriction { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::ImplRestriction); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("restriction", Print::ref_cast(val)); + } + formatter.field("ident", Lite(&_val.ident)); + formatter.field("generics", Lite(&_val.generics)); + if _val.colon_token.is_some() { + formatter.field("colon_token", &Present); + } + if !_val.supertraits.is_empty() { + formatter.field("supertraits", Lite(&_val.supertraits)); + } + if !_val.items.is_empty() { + formatter.field("items", Lite(&_val.items)); + } + formatter.finish() + } + syn::Item::TraitAlias(_val) => { + let mut formatter = formatter.debug_struct("Item::TraitAlias"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + formatter.field("ident", Lite(&_val.ident)); + formatter.field("generics", Lite(&_val.generics)); + if !_val.bounds.is_empty() { + formatter.field("bounds", Lite(&_val.bounds)); + } + formatter.finish() + } + syn::Item::Type(_val) => { + let mut formatter = formatter.debug_struct("Item::Type"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + formatter.field("ident", Lite(&_val.ident)); + formatter.field("generics", Lite(&_val.generics)); + formatter.field("ty", Lite(&_val.ty)); + formatter.finish() + } + syn::Item::Union(_val) => { + let mut formatter = formatter.debug_struct("Item::Union"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + formatter.field("ident", Lite(&_val.ident)); + formatter.field("generics", Lite(&_val.generics)); + formatter.field("fields", Lite(&_val.fields)); + formatter.finish() + } + syn::Item::Use(_val) => { + let mut formatter = formatter.debug_struct("Item::Use"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("vis", Lite(&_val.vis)); + if _val.leading_colon.is_some() { + formatter.field("leading_colon", &Present); + } + formatter.field("tree", Lite(&_val.tree)); + formatter.finish() + } + syn::Item::Verbatim(_val) => { + formatter.write_str("Item::Verbatim")?; + formatter.write_str("(`")?; + Display::fmt(_val, formatter)?; + formatter.write_str("`)")?; + Ok(()) + } + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::ItemConst> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ItemConst"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("generics", Lite(&self.value.generics)); + formatter.field("ty", Lite(&self.value.ty)); + formatter.field("expr", Lite(&self.value.expr)); + formatter.finish() + } +} +impl Debug for Lite<syn::ItemEnum> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ItemEnum"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("generics", Lite(&self.value.generics)); + if !self.value.variants.is_empty() { + formatter.field("variants", Lite(&self.value.variants)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ItemExternCrate> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ItemExternCrate"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + formatter.field("ident", Lite(&self.value.ident)); + if let Some(val) = &self.value.rename { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::As, proc_macro2::Ident)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.1), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("rename", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ItemFn> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ItemFn"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + formatter.field("sig", Lite(&self.value.sig)); + formatter.field("block", Lite(&self.value.block)); + formatter.finish() + } +} +impl Debug for Lite<syn::ItemForeignMod> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ItemForeignMod"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if self.value.unsafety.is_some() { + formatter.field("unsafety", &Present); + } + formatter.field("abi", Lite(&self.value.abi)); + if !self.value.items.is_empty() { + formatter.field("items", Lite(&self.value.items)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ItemImpl> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ItemImpl"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if self.value.defaultness.is_some() { + formatter.field("defaultness", &Present); + } + if self.value.unsafety.is_some() { + formatter.field("unsafety", &Present); + } + formatter.field("generics", Lite(&self.value.generics)); + if let Some(val) = &self.value.trait_ { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((Option<syn::token::Not>, syn::Path, syn::token::For)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt( + &( + &super::Option { + present: self.0.0.is_some(), + }, + Lite(&self.0.1), + ), + formatter, + )?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("trait_", Print::ref_cast(val)); + } + formatter.field("self_ty", Lite(&self.value.self_ty)); + if !self.value.items.is_empty() { + formatter.field("items", Lite(&self.value.items)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ItemMacro> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ItemMacro"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.ident { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(proc_macro2::Ident); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("ident", Print::ref_cast(val)); + } + formatter.field("mac", Lite(&self.value.mac)); + if self.value.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ItemMod> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ItemMod"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + if self.value.unsafety.is_some() { + formatter.field("unsafety", &Present); + } + formatter.field("ident", Lite(&self.value.ident)); + if let Some(val) = &self.value.content { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::Brace, Vec<syn::Item>)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.1), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("content", Print::ref_cast(val)); + } + if self.value.semi.is_some() { + formatter.field("semi", &Present); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ItemStatic> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ItemStatic"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + match self.value.mutability { + syn::StaticMutability::None => {} + _ => { + formatter.field("mutability", Lite(&self.value.mutability)); + } + } + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("ty", Lite(&self.value.ty)); + formatter.field("expr", Lite(&self.value.expr)); + formatter.finish() + } +} +impl Debug for Lite<syn::ItemStruct> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ItemStruct"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("generics", Lite(&self.value.generics)); + formatter.field("fields", Lite(&self.value.fields)); + if self.value.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ItemTrait> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ItemTrait"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + if self.value.unsafety.is_some() { + formatter.field("unsafety", &Present); + } + if self.value.auto_token.is_some() { + formatter.field("auto_token", &Present); + } + if let Some(val) = &self.value.restriction { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::ImplRestriction); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("restriction", Print::ref_cast(val)); + } + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("generics", Lite(&self.value.generics)); + if self.value.colon_token.is_some() { + formatter.field("colon_token", &Present); + } + if !self.value.supertraits.is_empty() { + formatter.field("supertraits", Lite(&self.value.supertraits)); + } + if !self.value.items.is_empty() { + formatter.field("items", Lite(&self.value.items)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ItemTraitAlias> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ItemTraitAlias"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("generics", Lite(&self.value.generics)); + if !self.value.bounds.is_empty() { + formatter.field("bounds", Lite(&self.value.bounds)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::ItemType> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ItemType"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("generics", Lite(&self.value.generics)); + formatter.field("ty", Lite(&self.value.ty)); + formatter.finish() + } +} +impl Debug for Lite<syn::ItemUnion> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ItemUnion"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("generics", Lite(&self.value.generics)); + formatter.field("fields", Lite(&self.value.fields)); + formatter.finish() + } +} +impl Debug for Lite<syn::ItemUse> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ItemUse"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("vis", Lite(&self.value.vis)); + if self.value.leading_colon.is_some() { + formatter.field("leading_colon", &Present); + } + formatter.field("tree", Lite(&self.value.tree)); + formatter.finish() + } +} +impl Debug for Lite<syn::Label> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Label"); + formatter.field("name", Lite(&self.value.name)); + formatter.finish() + } +} +impl Debug for Lite<syn::Lifetime> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Lifetime"); + formatter.field("ident", Lite(&self.value.ident)); + formatter.finish() + } +} +impl Debug for Lite<syn::LifetimeParam> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("LifetimeParam"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("lifetime", Lite(&self.value.lifetime)); + if self.value.colon_token.is_some() { + formatter.field("colon_token", &Present); + } + if !self.value.bounds.is_empty() { + formatter.field("bounds", Lite(&self.value.bounds)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::Lit> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::Lit::Str(_val) => write!(formatter, "{:?}", _val.value()), + syn::Lit::ByteStr(_val) => write!(formatter, "{:?}", _val.value()), + syn::Lit::CStr(_val) => write!(formatter, "{:?}", _val.value()), + syn::Lit::Byte(_val) => write!(formatter, "{:?}", _val.value()), + syn::Lit::Char(_val) => write!(formatter, "{:?}", _val.value()), + syn::Lit::Int(_val) => write!(formatter, "{}", _val), + syn::Lit::Float(_val) => write!(formatter, "{}", _val), + syn::Lit::Bool(_val) => { + let mut formatter = formatter.debug_struct("Lit::Bool"); + formatter.field("value", Lite(&_val.value)); + formatter.finish() + } + syn::Lit::Verbatim(_val) => { + formatter.write_str("Lit::Verbatim")?; + formatter.write_str("(`")?; + Display::fmt(_val, formatter)?; + formatter.write_str("`)")?; + Ok(()) + } + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::LitBool> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("LitBool"); + formatter.field("value", Lite(&self.value.value)); + formatter.finish() + } +} +impl Debug for Lite<syn::LitByte> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "{:?}", self.value.value()) + } +} +impl Debug for Lite<syn::LitByteStr> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "{:?}", self.value.value()) + } +} +impl Debug for Lite<syn::LitCStr> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "{:?}", self.value.value()) + } +} +impl Debug for Lite<syn::LitChar> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "{:?}", self.value.value()) + } +} +impl Debug for Lite<syn::LitFloat> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "{}", & self.value) + } +} +impl Debug for Lite<syn::LitInt> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "{}", & self.value) + } +} +impl Debug for Lite<syn::LitStr> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "{:?}", self.value.value()) + } +} +impl Debug for Lite<syn::Local> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Local"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("pat", Lite(&self.value.pat)); + if let Some(val) = &self.value.init { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::LocalInit); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("init", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::LocalInit> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("LocalInit"); + formatter.field("expr", Lite(&self.value.expr)); + if let Some(val) = &self.value.diverge { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::Else, Box<syn::Expr>)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.1), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("diverge", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::Macro> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Macro"); + formatter.field("path", Lite(&self.value.path)); + formatter.field("delimiter", Lite(&self.value.delimiter)); + formatter.field("tokens", Lite(&self.value.tokens)); + formatter.finish() + } +} +impl Debug for Lite<syn::MacroDelimiter> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::MacroDelimiter::Paren(_val) => { + formatter.write_str("MacroDelimiter::Paren")?; + Ok(()) + } + syn::MacroDelimiter::Brace(_val) => { + formatter.write_str("MacroDelimiter::Brace")?; + Ok(()) + } + syn::MacroDelimiter::Bracket(_val) => { + formatter.write_str("MacroDelimiter::Bracket")?; + Ok(()) + } + } + } +} +impl Debug for Lite<syn::Member> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::Member::Named(_val) => { + formatter.write_str("Member::Named")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::Member::Unnamed(_val) => { + formatter.write_str("Member::Unnamed")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + } +} +impl Debug for Lite<syn::Meta> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::Meta::Path(_val) => { + let mut formatter = formatter.debug_struct("Meta::Path"); + if _val.leading_colon.is_some() { + formatter.field("leading_colon", &Present); + } + if !_val.segments.is_empty() { + formatter.field("segments", Lite(&_val.segments)); + } + formatter.finish() + } + syn::Meta::List(_val) => { + let mut formatter = formatter.debug_struct("Meta::List"); + formatter.field("path", Lite(&_val.path)); + formatter.field("delimiter", Lite(&_val.delimiter)); + formatter.field("tokens", Lite(&_val.tokens)); + formatter.finish() + } + syn::Meta::NameValue(_val) => { + let mut formatter = formatter.debug_struct("Meta::NameValue"); + formatter.field("path", Lite(&_val.path)); + formatter.field("value", Lite(&_val.value)); + formatter.finish() + } + } + } +} +impl Debug for Lite<syn::MetaList> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("MetaList"); + formatter.field("path", Lite(&self.value.path)); + formatter.field("delimiter", Lite(&self.value.delimiter)); + formatter.field("tokens", Lite(&self.value.tokens)); + formatter.finish() + } +} +impl Debug for Lite<syn::MetaNameValue> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("MetaNameValue"); + formatter.field("path", Lite(&self.value.path)); + formatter.field("value", Lite(&self.value.value)); + formatter.finish() + } +} +impl Debug for Lite<syn::ParenthesizedGenericArguments> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("ParenthesizedGenericArguments"); + if !self.value.inputs.is_empty() { + formatter.field("inputs", Lite(&self.value.inputs)); + } + formatter.field("output", Lite(&self.value.output)); + formatter.finish() + } +} +impl Debug for Lite<syn::Pat> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::Pat::Const(_val) => { + formatter.write_str("Pat::Const")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::Pat::Ident(_val) => { + let mut formatter = formatter.debug_struct("Pat::Ident"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if _val.by_ref.is_some() { + formatter.field("by_ref", &Present); + } + if _val.mutability.is_some() { + formatter.field("mutability", &Present); + } + formatter.field("ident", Lite(&_val.ident)); + if let Some(val) = &_val.subpat { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::At, Box<syn::Pat>)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.1), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("subpat", Print::ref_cast(val)); + } + formatter.finish() + } + syn::Pat::Lit(_val) => { + formatter.write_str("Pat::Lit")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::Pat::Macro(_val) => { + formatter.write_str("Pat::Macro")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::Pat::Or(_val) => { + let mut formatter = formatter.debug_struct("Pat::Or"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if _val.leading_vert.is_some() { + formatter.field("leading_vert", &Present); + } + if !_val.cases.is_empty() { + formatter.field("cases", Lite(&_val.cases)); + } + formatter.finish() + } + syn::Pat::Paren(_val) => { + let mut formatter = formatter.debug_struct("Pat::Paren"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("pat", Lite(&_val.pat)); + formatter.finish() + } + syn::Pat::Path(_val) => { + formatter.write_str("Pat::Path")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::Pat::Range(_val) => { + formatter.write_str("Pat::Range")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::Pat::Reference(_val) => { + let mut formatter = formatter.debug_struct("Pat::Reference"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if _val.mutability.is_some() { + formatter.field("mutability", &Present); + } + formatter.field("pat", Lite(&_val.pat)); + formatter.finish() + } + syn::Pat::Rest(_val) => { + let mut formatter = formatter.debug_struct("Pat::Rest"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.finish() + } + syn::Pat::Slice(_val) => { + let mut formatter = formatter.debug_struct("Pat::Slice"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if !_val.elems.is_empty() { + formatter.field("elems", Lite(&_val.elems)); + } + formatter.finish() + } + syn::Pat::Struct(_val) => { + let mut formatter = formatter.debug_struct("Pat::Struct"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if let Some(val) = &_val.qself { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::QSelf); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("qself", Print::ref_cast(val)); + } + formatter.field("path", Lite(&_val.path)); + if !_val.fields.is_empty() { + formatter.field("fields", Lite(&_val.fields)); + } + if let Some(val) = &_val.rest { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::PatRest); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("rest", Print::ref_cast(val)); + } + formatter.finish() + } + syn::Pat::Tuple(_val) => { + let mut formatter = formatter.debug_struct("Pat::Tuple"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if !_val.elems.is_empty() { + formatter.field("elems", Lite(&_val.elems)); + } + formatter.finish() + } + syn::Pat::TupleStruct(_val) => { + let mut formatter = formatter.debug_struct("Pat::TupleStruct"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + if let Some(val) = &_val.qself { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::QSelf); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("qself", Print::ref_cast(val)); + } + formatter.field("path", Lite(&_val.path)); + if !_val.elems.is_empty() { + formatter.field("elems", Lite(&_val.elems)); + } + formatter.finish() + } + syn::Pat::Type(_val) => { + let mut formatter = formatter.debug_struct("Pat::Type"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("pat", Lite(&_val.pat)); + formatter.field("ty", Lite(&_val.ty)); + formatter.finish() + } + syn::Pat::Verbatim(_val) => { + formatter.write_str("Pat::Verbatim")?; + formatter.write_str("(`")?; + Display::fmt(_val, formatter)?; + formatter.write_str("`)")?; + Ok(()) + } + syn::Pat::Wild(_val) => { + let mut formatter = formatter.debug_struct("Pat::Wild"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.finish() + } + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::PatIdent> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PatIdent"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if self.value.by_ref.is_some() { + formatter.field("by_ref", &Present); + } + if self.value.mutability.is_some() { + formatter.field("mutability", &Present); + } + formatter.field("ident", Lite(&self.value.ident)); + if let Some(val) = &self.value.subpat { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::At, Box<syn::Pat>)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.1), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("subpat", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::PatOr> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PatOr"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if self.value.leading_vert.is_some() { + formatter.field("leading_vert", &Present); + } + if !self.value.cases.is_empty() { + formatter.field("cases", Lite(&self.value.cases)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::PatParen> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PatParen"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("pat", Lite(&self.value.pat)); + formatter.finish() + } +} +impl Debug for Lite<syn::PatReference> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PatReference"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if self.value.mutability.is_some() { + formatter.field("mutability", &Present); + } + formatter.field("pat", Lite(&self.value.pat)); + formatter.finish() + } +} +impl Debug for Lite<syn::PatRest> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PatRest"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::PatSlice> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PatSlice"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if !self.value.elems.is_empty() { + formatter.field("elems", Lite(&self.value.elems)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::PatStruct> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PatStruct"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.qself { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::QSelf); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("qself", Print::ref_cast(val)); + } + formatter.field("path", Lite(&self.value.path)); + if !self.value.fields.is_empty() { + formatter.field("fields", Lite(&self.value.fields)); + } + if let Some(val) = &self.value.rest { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::PatRest); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("rest", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::PatTuple> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PatTuple"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if !self.value.elems.is_empty() { + formatter.field("elems", Lite(&self.value.elems)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::PatTupleStruct> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PatTupleStruct"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.qself { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::QSelf); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("qself", Print::ref_cast(val)); + } + formatter.field("path", Lite(&self.value.path)); + if !self.value.elems.is_empty() { + formatter.field("elems", Lite(&self.value.elems)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::PatType> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PatType"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("pat", Lite(&self.value.pat)); + formatter.field("ty", Lite(&self.value.ty)); + formatter.finish() + } +} +impl Debug for Lite<syn::PatWild> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PatWild"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::Path> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Path"); + if self.value.leading_colon.is_some() { + formatter.field("leading_colon", &Present); + } + if !self.value.segments.is_empty() { + formatter.field("segments", Lite(&self.value.segments)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::PathArguments> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::PathArguments::None => formatter.write_str("PathArguments::None"), + syn::PathArguments::AngleBracketed(_val) => { + let mut formatter = formatter + .debug_struct("PathArguments::AngleBracketed"); + if _val.colon2_token.is_some() { + formatter.field("colon2_token", &Present); + } + if !_val.args.is_empty() { + formatter.field("args", Lite(&_val.args)); + } + formatter.finish() + } + syn::PathArguments::Parenthesized(_val) => { + let mut formatter = formatter + .debug_struct("PathArguments::Parenthesized"); + if !_val.inputs.is_empty() { + formatter.field("inputs", Lite(&_val.inputs)); + } + formatter.field("output", Lite(&_val.output)); + formatter.finish() + } + } + } +} +impl Debug for Lite<syn::PathSegment> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PathSegment"); + formatter.field("ident", Lite(&self.value.ident)); + match self.value.arguments { + syn::PathArguments::None => {} + _ => { + formatter.field("arguments", Lite(&self.value.arguments)); + } + } + formatter.finish() + } +} +impl Debug for Lite<syn::PointerMutability> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::PointerMutability::Const(_val) => { + formatter.write_str("PointerMutability::Const")?; + Ok(()) + } + syn::PointerMutability::Mut(_val) => { + formatter.write_str("PointerMutability::Mut")?; + Ok(()) + } + } + } +} +impl Debug for Lite<syn::PreciseCapture> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PreciseCapture"); + if !self.value.params.is_empty() { + formatter.field("params", Lite(&self.value.params)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::PredicateLifetime> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PredicateLifetime"); + formatter.field("lifetime", Lite(&self.value.lifetime)); + if !self.value.bounds.is_empty() { + formatter.field("bounds", Lite(&self.value.bounds)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::PredicateType> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("PredicateType"); + if let Some(val) = &self.value.lifetimes { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::BoundLifetimes); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("lifetimes", Print::ref_cast(val)); + } + formatter.field("bounded_ty", Lite(&self.value.bounded_ty)); + if !self.value.bounds.is_empty() { + formatter.field("bounds", Lite(&self.value.bounds)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::QSelf> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("QSelf"); + formatter.field("ty", Lite(&self.value.ty)); + formatter.field("position", Lite(&self.value.position)); + if self.value.as_token.is_some() { + formatter.field("as_token", &Present); + } + formatter.finish() + } +} +impl Debug for Lite<syn::RangeLimits> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::RangeLimits::HalfOpen(_val) => { + formatter.write_str("RangeLimits::HalfOpen")?; + Ok(()) + } + syn::RangeLimits::Closed(_val) => { + formatter.write_str("RangeLimits::Closed")?; + Ok(()) + } + } + } +} +impl Debug for Lite<syn::Receiver> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Receiver"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.reference { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::And, Option<syn::Lifetime>)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt( + { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(Option<syn::Lifetime>); + impl Debug for Print { + fn fmt( + &self, + formatter: &mut fmt::Formatter, + ) -> fmt::Result { + match &self.0 { + Some(_val) => { + formatter.write_str("Some(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + None => formatter.write_str("None"), + } + } + } + Print::ref_cast(&self.0.1) + }, + formatter, + )?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("reference", Print::ref_cast(val)); + } + if self.value.mutability.is_some() { + formatter.field("mutability", &Present); + } + if self.value.colon_token.is_some() { + formatter.field("colon_token", &Present); + } + formatter.field("ty", Lite(&self.value.ty)); + formatter.finish() + } +} +impl Debug for Lite<syn::ReturnType> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::ReturnType::Default => formatter.write_str("ReturnType::Default"), + syn::ReturnType::Type(_v0, _v1) => { + let mut formatter = formatter.debug_tuple("ReturnType::Type"); + formatter.field(Lite(_v1)); + formatter.finish() + } + } + } +} +impl Debug for Lite<syn::Signature> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Signature"); + if self.value.constness.is_some() { + formatter.field("constness", &Present); + } + if self.value.asyncness.is_some() { + formatter.field("asyncness", &Present); + } + if self.value.unsafety.is_some() { + formatter.field("unsafety", &Present); + } + if let Some(val) = &self.value.abi { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Abi); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("abi", Print::ref_cast(val)); + } + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("generics", Lite(&self.value.generics)); + if !self.value.inputs.is_empty() { + formatter.field("inputs", Lite(&self.value.inputs)); + } + if let Some(val) = &self.value.variadic { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Variadic); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("variadic", Print::ref_cast(val)); + } + formatter.field("output", Lite(&self.value.output)); + formatter.finish() + } +} +impl Debug for Lite<syn::StaticMutability> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::StaticMutability::Mut(_val) => { + formatter.write_str("StaticMutability::Mut")?; + Ok(()) + } + syn::StaticMutability::None => formatter.write_str("StaticMutability::None"), + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::Stmt> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::Stmt::Local(_val) => { + let mut formatter = formatter.debug_struct("Stmt::Local"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("pat", Lite(&_val.pat)); + if let Some(val) = &_val.init { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::LocalInit); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("init", Print::ref_cast(val)); + } + formatter.finish() + } + syn::Stmt::Item(_val) => { + formatter.write_str("Stmt::Item")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::Stmt::Expr(_v0, _v1) => { + let mut formatter = formatter.debug_tuple("Stmt::Expr"); + formatter.field(Lite(_v0)); + formatter + .field( + &super::Option { + present: _v1.is_some(), + }, + ); + formatter.finish() + } + syn::Stmt::Macro(_val) => { + let mut formatter = formatter.debug_struct("Stmt::Macro"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("mac", Lite(&_val.mac)); + if _val.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } + } + } +} +impl Debug for Lite<syn::StmtMacro> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("StmtMacro"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("mac", Lite(&self.value.mac)); + if self.value.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } +} +impl Debug for Lite<syn::TraitBound> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TraitBound"); + if self.value.paren_token.is_some() { + formatter.field("paren_token", &Present); + } + match self.value.modifier { + syn::TraitBoundModifier::None => {} + _ => { + formatter.field("modifier", Lite(&self.value.modifier)); + } + } + if let Some(val) = &self.value.lifetimes { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::BoundLifetimes); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("lifetimes", Print::ref_cast(val)); + } + formatter.field("path", Lite(&self.value.path)); + formatter.finish() + } +} +impl Debug for Lite<syn::TraitBoundModifier> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::TraitBoundModifier::None => { + formatter.write_str("TraitBoundModifier::None") + } + syn::TraitBoundModifier::Maybe(_val) => { + formatter.write_str("TraitBoundModifier::Maybe")?; + Ok(()) + } + } + } +} +impl Debug for Lite<syn::TraitItem> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::TraitItem::Const(_val) => { + let mut formatter = formatter.debug_struct("TraitItem::Const"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("ident", Lite(&_val.ident)); + formatter.field("generics", Lite(&_val.generics)); + formatter.field("ty", Lite(&_val.ty)); + if let Some(val) = &_val.default { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::Eq, syn::Expr)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.1), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("default", Print::ref_cast(val)); + } + formatter.finish() + } + syn::TraitItem::Fn(_val) => { + let mut formatter = formatter.debug_struct("TraitItem::Fn"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("sig", Lite(&_val.sig)); + if let Some(val) = &_val.default { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Block); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("default", Print::ref_cast(val)); + } + if _val.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } + syn::TraitItem::Type(_val) => { + let mut formatter = formatter.debug_struct("TraitItem::Type"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("ident", Lite(&_val.ident)); + formatter.field("generics", Lite(&_val.generics)); + if _val.colon_token.is_some() { + formatter.field("colon_token", &Present); + } + if !_val.bounds.is_empty() { + formatter.field("bounds", Lite(&_val.bounds)); + } + if let Some(val) = &_val.default { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::Eq, syn::Type)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.1), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("default", Print::ref_cast(val)); + } + formatter.finish() + } + syn::TraitItem::Macro(_val) => { + let mut formatter = formatter.debug_struct("TraitItem::Macro"); + if !_val.attrs.is_empty() { + formatter.field("attrs", Lite(&_val.attrs)); + } + formatter.field("mac", Lite(&_val.mac)); + if _val.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } + syn::TraitItem::Verbatim(_val) => { + formatter.write_str("TraitItem::Verbatim")?; + formatter.write_str("(`")?; + Display::fmt(_val, formatter)?; + formatter.write_str("`)")?; + Ok(()) + } + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::TraitItemConst> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TraitItemConst"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("generics", Lite(&self.value.generics)); + formatter.field("ty", Lite(&self.value.ty)); + if let Some(val) = &self.value.default { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::Eq, syn::Expr)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.1), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("default", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::TraitItemFn> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TraitItemFn"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("sig", Lite(&self.value.sig)); + if let Some(val) = &self.value.default { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Block); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("default", Print::ref_cast(val)); + } + if self.value.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } +} +impl Debug for Lite<syn::TraitItemMacro> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TraitItemMacro"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("mac", Lite(&self.value.mac)); + if self.value.semi_token.is_some() { + formatter.field("semi_token", &Present); + } + formatter.finish() + } +} +impl Debug for Lite<syn::TraitItemType> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TraitItemType"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("generics", Lite(&self.value.generics)); + if self.value.colon_token.is_some() { + formatter.field("colon_token", &Present); + } + if !self.value.bounds.is_empty() { + formatter.field("bounds", Lite(&self.value.bounds)); + } + if let Some(val) = &self.value.default { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::Eq, syn::Type)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.1), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("default", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::Type> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::Type::Array(_val) => { + let mut formatter = formatter.debug_struct("Type::Array"); + formatter.field("elem", Lite(&_val.elem)); + formatter.field("len", Lite(&_val.len)); + formatter.finish() + } + syn::Type::BareFn(_val) => { + let mut formatter = formatter.debug_struct("Type::BareFn"); + if let Some(val) = &_val.lifetimes { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::BoundLifetimes); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("lifetimes", Print::ref_cast(val)); + } + if _val.unsafety.is_some() { + formatter.field("unsafety", &Present); + } + if let Some(val) = &_val.abi { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Abi); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("abi", Print::ref_cast(val)); + } + if !_val.inputs.is_empty() { + formatter.field("inputs", Lite(&_val.inputs)); + } + if let Some(val) = &_val.variadic { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::BareVariadic); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("variadic", Print::ref_cast(val)); + } + formatter.field("output", Lite(&_val.output)); + formatter.finish() + } + syn::Type::Group(_val) => { + let mut formatter = formatter.debug_struct("Type::Group"); + formatter.field("elem", Lite(&_val.elem)); + formatter.finish() + } + syn::Type::ImplTrait(_val) => { + let mut formatter = formatter.debug_struct("Type::ImplTrait"); + if !_val.bounds.is_empty() { + formatter.field("bounds", Lite(&_val.bounds)); + } + formatter.finish() + } + syn::Type::Infer(_val) => { + let mut formatter = formatter.debug_struct("Type::Infer"); + formatter.finish() + } + syn::Type::Macro(_val) => { + let mut formatter = formatter.debug_struct("Type::Macro"); + formatter.field("mac", Lite(&_val.mac)); + formatter.finish() + } + syn::Type::Never(_val) => { + let mut formatter = formatter.debug_struct("Type::Never"); + formatter.finish() + } + syn::Type::Paren(_val) => { + let mut formatter = formatter.debug_struct("Type::Paren"); + formatter.field("elem", Lite(&_val.elem)); + formatter.finish() + } + syn::Type::Path(_val) => { + let mut formatter = formatter.debug_struct("Type::Path"); + if let Some(val) = &_val.qself { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::QSelf); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("qself", Print::ref_cast(val)); + } + formatter.field("path", Lite(&_val.path)); + formatter.finish() + } + syn::Type::Ptr(_val) => { + let mut formatter = formatter.debug_struct("Type::Ptr"); + if _val.const_token.is_some() { + formatter.field("const_token", &Present); + } + if _val.mutability.is_some() { + formatter.field("mutability", &Present); + } + formatter.field("elem", Lite(&_val.elem)); + formatter.finish() + } + syn::Type::Reference(_val) => { + let mut formatter = formatter.debug_struct("Type::Reference"); + if let Some(val) = &_val.lifetime { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Lifetime); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("lifetime", Print::ref_cast(val)); + } + if _val.mutability.is_some() { + formatter.field("mutability", &Present); + } + formatter.field("elem", Lite(&_val.elem)); + formatter.finish() + } + syn::Type::Slice(_val) => { + let mut formatter = formatter.debug_struct("Type::Slice"); + formatter.field("elem", Lite(&_val.elem)); + formatter.finish() + } + syn::Type::TraitObject(_val) => { + let mut formatter = formatter.debug_struct("Type::TraitObject"); + if _val.dyn_token.is_some() { + formatter.field("dyn_token", &Present); + } + if !_val.bounds.is_empty() { + formatter.field("bounds", Lite(&_val.bounds)); + } + formatter.finish() + } + syn::Type::Tuple(_val) => { + let mut formatter = formatter.debug_struct("Type::Tuple"); + if !_val.elems.is_empty() { + formatter.field("elems", Lite(&_val.elems)); + } + formatter.finish() + } + syn::Type::Verbatim(_val) => { + formatter.write_str("Type::Verbatim")?; + formatter.write_str("(`")?; + Display::fmt(_val, formatter)?; + formatter.write_str("`)")?; + Ok(()) + } + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::TypeArray> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypeArray"); + formatter.field("elem", Lite(&self.value.elem)); + formatter.field("len", Lite(&self.value.len)); + formatter.finish() + } +} +impl Debug for Lite<syn::TypeBareFn> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypeBareFn"); + if let Some(val) = &self.value.lifetimes { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::BoundLifetimes); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("lifetimes", Print::ref_cast(val)); + } + if self.value.unsafety.is_some() { + formatter.field("unsafety", &Present); + } + if let Some(val) = &self.value.abi { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Abi); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("abi", Print::ref_cast(val)); + } + if !self.value.inputs.is_empty() { + formatter.field("inputs", Lite(&self.value.inputs)); + } + if let Some(val) = &self.value.variadic { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::BareVariadic); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("variadic", Print::ref_cast(val)); + } + formatter.field("output", Lite(&self.value.output)); + formatter.finish() + } +} +impl Debug for Lite<syn::TypeGroup> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypeGroup"); + formatter.field("elem", Lite(&self.value.elem)); + formatter.finish() + } +} +impl Debug for Lite<syn::TypeImplTrait> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypeImplTrait"); + if !self.value.bounds.is_empty() { + formatter.field("bounds", Lite(&self.value.bounds)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::TypeInfer> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypeInfer"); + formatter.finish() + } +} +impl Debug for Lite<syn::TypeMacro> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypeMacro"); + formatter.field("mac", Lite(&self.value.mac)); + formatter.finish() + } +} +impl Debug for Lite<syn::TypeNever> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypeNever"); + formatter.finish() + } +} +impl Debug for Lite<syn::TypeParam> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypeParam"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("ident", Lite(&self.value.ident)); + if self.value.colon_token.is_some() { + formatter.field("colon_token", &Present); + } + if !self.value.bounds.is_empty() { + formatter.field("bounds", Lite(&self.value.bounds)); + } + if self.value.eq_token.is_some() { + formatter.field("eq_token", &Present); + } + if let Some(val) = &self.value.default { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Type); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("default", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::TypeParamBound> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::TypeParamBound::Trait(_val) => { + formatter.write_str("TypeParamBound::Trait")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::TypeParamBound::Lifetime(_val) => { + let mut formatter = formatter.debug_struct("TypeParamBound::Lifetime"); + formatter.field("ident", Lite(&_val.ident)); + formatter.finish() + } + syn::TypeParamBound::PreciseCapture(_val) => { + formatter.write_str("TypeParamBound::PreciseCapture")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::TypeParamBound::Verbatim(_val) => { + formatter.write_str("TypeParamBound::Verbatim")?; + formatter.write_str("(`")?; + Display::fmt(_val, formatter)?; + formatter.write_str("`)")?; + Ok(()) + } + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::TypeParen> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypeParen"); + formatter.field("elem", Lite(&self.value.elem)); + formatter.finish() + } +} +impl Debug for Lite<syn::TypePath> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypePath"); + if let Some(val) = &self.value.qself { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::QSelf); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("qself", Print::ref_cast(val)); + } + formatter.field("path", Lite(&self.value.path)); + formatter.finish() + } +} +impl Debug for Lite<syn::TypePtr> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypePtr"); + if self.value.const_token.is_some() { + formatter.field("const_token", &Present); + } + if self.value.mutability.is_some() { + formatter.field("mutability", &Present); + } + formatter.field("elem", Lite(&self.value.elem)); + formatter.finish() + } +} +impl Debug for Lite<syn::TypeReference> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypeReference"); + if let Some(val) = &self.value.lifetime { + #[derive(RefCast)] + #[repr(transparent)] + struct Print(syn::Lifetime); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("lifetime", Print::ref_cast(val)); + } + if self.value.mutability.is_some() { + formatter.field("mutability", &Present); + } + formatter.field("elem", Lite(&self.value.elem)); + formatter.finish() + } +} +impl Debug for Lite<syn::TypeSlice> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypeSlice"); + formatter.field("elem", Lite(&self.value.elem)); + formatter.finish() + } +} +impl Debug for Lite<syn::TypeTraitObject> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypeTraitObject"); + if self.value.dyn_token.is_some() { + formatter.field("dyn_token", &Present); + } + if !self.value.bounds.is_empty() { + formatter.field("bounds", Lite(&self.value.bounds)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::TypeTuple> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("TypeTuple"); + if !self.value.elems.is_empty() { + formatter.field("elems", Lite(&self.value.elems)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::UnOp> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::UnOp::Deref(_val) => { + formatter.write_str("UnOp::Deref")?; + Ok(()) + } + syn::UnOp::Not(_val) => { + formatter.write_str("UnOp::Not")?; + Ok(()) + } + syn::UnOp::Neg(_val) => { + formatter.write_str("UnOp::Neg")?; + Ok(()) + } + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::UseGlob> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("UseGlob"); + formatter.finish() + } +} +impl Debug for Lite<syn::UseGroup> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("UseGroup"); + if !self.value.items.is_empty() { + formatter.field("items", Lite(&self.value.items)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::UseName> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("UseName"); + formatter.field("ident", Lite(&self.value.ident)); + formatter.finish() + } +} +impl Debug for Lite<syn::UsePath> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("UsePath"); + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("tree", Lite(&self.value.tree)); + formatter.finish() + } +} +impl Debug for Lite<syn::UseRename> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("UseRename"); + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("rename", Lite(&self.value.rename)); + formatter.finish() + } +} +impl Debug for Lite<syn::UseTree> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::UseTree::Path(_val) => { + formatter.write_str("UseTree::Path")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::UseTree::Name(_val) => { + formatter.write_str("UseTree::Name")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::UseTree::Rename(_val) => { + formatter.write_str("UseTree::Rename")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::UseTree::Glob(_val) => { + formatter.write_str("UseTree::Glob")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::UseTree::Group(_val) => { + formatter.write_str("UseTree::Group")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + } +} +impl Debug for Lite<syn::Variadic> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Variadic"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + if let Some(val) = &self.value.pat { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((Box<syn::Pat>, syn::token::Colon)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.0), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("pat", Print::ref_cast(val)); + } + if self.value.comma.is_some() { + formatter.field("comma", &Present); + } + formatter.finish() + } +} +impl Debug for Lite<syn::Variant> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("Variant"); + if !self.value.attrs.is_empty() { + formatter.field("attrs", Lite(&self.value.attrs)); + } + formatter.field("ident", Lite(&self.value.ident)); + formatter.field("fields", Lite(&self.value.fields)); + if let Some(val) = &self.value.discriminant { + #[derive(RefCast)] + #[repr(transparent)] + struct Print((syn::token::Eq, syn::Expr)); + impl Debug for Print { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some(")?; + Debug::fmt(Lite(&self.0.1), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + } + formatter.field("discriminant", Print::ref_cast(val)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::VisRestricted> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("VisRestricted"); + if self.value.in_token.is_some() { + formatter.field("in_token", &Present); + } + formatter.field("path", Lite(&self.value.path)); + formatter.finish() + } +} +impl Debug for Lite<syn::Visibility> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::Visibility::Public(_val) => { + formatter.write_str("Visibility::Public")?; + Ok(()) + } + syn::Visibility::Restricted(_val) => { + let mut formatter = formatter.debug_struct("Visibility::Restricted"); + if _val.in_token.is_some() { + formatter.field("in_token", &Present); + } + formatter.field("path", Lite(&_val.path)); + formatter.finish() + } + syn::Visibility::Inherited => formatter.write_str("Visibility::Inherited"), + } + } +} +impl Debug for Lite<syn::WhereClause> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut formatter = formatter.debug_struct("WhereClause"); + if !self.value.predicates.is_empty() { + formatter.field("predicates", Lite(&self.value.predicates)); + } + formatter.finish() + } +} +impl Debug for Lite<syn::WherePredicate> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.value { + syn::WherePredicate::Lifetime(_val) => { + formatter.write_str("WherePredicate::Lifetime")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + syn::WherePredicate::Type(_val) => { + formatter.write_str("WherePredicate::Type")?; + formatter.write_str("(")?; + Debug::fmt(Lite(_val), formatter)?; + formatter.write_str(")")?; + Ok(()) + } + _ => unreachable!(), + } + } +} +impl Debug for Lite<syn::token::Abstract> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![abstract]") + } +} +impl Debug for Lite<syn::token::And> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![&]") + } +} +impl Debug for Lite<syn::token::AndAnd> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![&&]") + } +} +impl Debug for Lite<syn::token::AndEq> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![&=]") + } +} +impl Debug for Lite<syn::token::As> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![as]") + } +} +impl Debug for Lite<syn::token::Async> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![async]") + } +} +impl Debug for Lite<syn::token::At> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![@]") + } +} +impl Debug for Lite<syn::token::Auto> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![auto]") + } +} +impl Debug for Lite<syn::token::Await> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![await]") + } +} +impl Debug for Lite<syn::token::Become> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![become]") + } +} +impl Debug for Lite<syn::token::Box> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![box]") + } +} +impl Debug for Lite<syn::token::Break> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![break]") + } +} +impl Debug for Lite<syn::token::Caret> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![^]") + } +} +impl Debug for Lite<syn::token::CaretEq> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![^=]") + } +} +impl Debug for Lite<syn::token::Colon> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![:]") + } +} +impl Debug for Lite<syn::token::Comma> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![,]") + } +} +impl Debug for Lite<syn::token::Const> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![const]") + } +} +impl Debug for Lite<syn::token::Continue> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![continue]") + } +} +impl Debug for Lite<syn::token::Crate> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![crate]") + } +} +impl Debug for Lite<syn::token::Default> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![default]") + } +} +impl Debug for Lite<syn::token::Do> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![do]") + } +} +impl Debug for Lite<syn::token::Dollar> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![$]") + } +} +impl Debug for Lite<syn::token::Dot> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![.]") + } +} +impl Debug for Lite<syn::token::DotDot> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![..]") + } +} +impl Debug for Lite<syn::token::DotDotDot> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![...]") + } +} +impl Debug for Lite<syn::token::DotDotEq> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![..=]") + } +} +impl Debug for Lite<syn::token::Dyn> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![dyn]") + } +} +impl Debug for Lite<syn::token::Else> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![else]") + } +} +impl Debug for Lite<syn::token::Enum> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![enum]") + } +} +impl Debug for Lite<syn::token::Eq> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![=]") + } +} +impl Debug for Lite<syn::token::EqEq> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![==]") + } +} +impl Debug for Lite<syn::token::Extern> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![extern]") + } +} +impl Debug for Lite<syn::token::FatArrow> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![=>]") + } +} +impl Debug for Lite<syn::token::Final> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![final]") + } +} +impl Debug for Lite<syn::token::Fn> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![fn]") + } +} +impl Debug for Lite<syn::token::For> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![for]") + } +} +impl Debug for Lite<syn::token::Ge> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![>=]") + } +} +impl Debug for Lite<syn::token::Gt> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![>]") + } +} +impl Debug for Lite<syn::token::If> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![if]") + } +} +impl Debug for Lite<syn::token::Impl> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![impl]") + } +} +impl Debug for Lite<syn::token::In> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![in]") + } +} +impl Debug for Lite<syn::token::LArrow> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![<-]") + } +} +impl Debug for Lite<syn::token::Le> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![<=]") + } +} +impl Debug for Lite<syn::token::Let> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![let]") + } +} +impl Debug for Lite<syn::token::Loop> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![loop]") + } +} +impl Debug for Lite<syn::token::Lt> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![<]") + } +} +impl Debug for Lite<syn::token::Macro> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![macro]") + } +} +impl Debug for Lite<syn::token::Match> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![match]") + } +} +impl Debug for Lite<syn::token::Minus> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![-]") + } +} +impl Debug for Lite<syn::token::MinusEq> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![-=]") + } +} +impl Debug for Lite<syn::token::Mod> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![mod]") + } +} +impl Debug for Lite<syn::token::Move> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![move]") + } +} +impl Debug for Lite<syn::token::Mut> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![mut]") + } +} +impl Debug for Lite<syn::token::Ne> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![!=]") + } +} +impl Debug for Lite<syn::token::Not> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![!]") + } +} +impl Debug for Lite<syn::token::Or> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![|]") + } +} +impl Debug for Lite<syn::token::OrEq> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![|=]") + } +} +impl Debug for Lite<syn::token::OrOr> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![||]") + } +} +impl Debug for Lite<syn::token::Override> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![override]") + } +} +impl Debug for Lite<syn::token::PathSep> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![::]") + } +} +impl Debug for Lite<syn::token::Percent> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![%]") + } +} +impl Debug for Lite<syn::token::PercentEq> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![%=]") + } +} +impl Debug for Lite<syn::token::Plus> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![+]") + } +} +impl Debug for Lite<syn::token::PlusEq> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![+=]") + } +} +impl Debug for Lite<syn::token::Pound> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![#]") + } +} +impl Debug for Lite<syn::token::Priv> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![priv]") + } +} +impl Debug for Lite<syn::token::Pub> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![pub]") + } +} +impl Debug for Lite<syn::token::Question> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![?]") + } +} +impl Debug for Lite<syn::token::RArrow> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![->]") + } +} +impl Debug for Lite<syn::token::Raw> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![raw]") + } +} +impl Debug for Lite<syn::token::Ref> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![ref]") + } +} +impl Debug for Lite<syn::token::Return> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![return]") + } +} +impl Debug for Lite<syn::token::SelfType> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![Self]") + } +} +impl Debug for Lite<syn::token::SelfValue> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![self]") + } +} +impl Debug for Lite<syn::token::Semi> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![;]") + } +} +impl Debug for Lite<syn::token::Shl> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![<<]") + } +} +impl Debug for Lite<syn::token::ShlEq> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![<<=]") + } +} +impl Debug for Lite<syn::token::Shr> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![>>]") + } +} +impl Debug for Lite<syn::token::ShrEq> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![>>=]") + } +} +impl Debug for Lite<syn::token::Slash> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![/]") + } +} +impl Debug for Lite<syn::token::SlashEq> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![/=]") + } +} +impl Debug for Lite<syn::token::Star> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![*]") + } +} +impl Debug for Lite<syn::token::StarEq> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![*=]") + } +} +impl Debug for Lite<syn::token::Static> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![static]") + } +} +impl Debug for Lite<syn::token::Struct> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![struct]") + } +} +impl Debug for Lite<syn::token::Super> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![super]") + } +} +impl Debug for Lite<syn::token::Tilde> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![~]") + } +} +impl Debug for Lite<syn::token::Trait> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![trait]") + } +} +impl Debug for Lite<syn::token::Try> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![try]") + } +} +impl Debug for Lite<syn::token::Type> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![type]") + } +} +impl Debug for Lite<syn::token::Typeof> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![typeof]") + } +} +impl Debug for Lite<syn::token::Underscore> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![_]") + } +} +impl Debug for Lite<syn::token::Union> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![union]") + } +} +impl Debug for Lite<syn::token::Unsafe> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![unsafe]") + } +} +impl Debug for Lite<syn::token::Unsized> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![unsized]") + } +} +impl Debug for Lite<syn::token::Use> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![use]") + } +} +impl Debug for Lite<syn::token::Virtual> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![virtual]") + } +} +impl Debug for Lite<syn::token::Where> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![where]") + } +} +impl Debug for Lite<syn::token::While> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![while]") + } +} +impl Debug for Lite<syn::token::Yield> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Token![yield]") + } +} diff --git a/vendor/syn/tests/debug/mod.rs b/vendor/syn/tests/debug/mod.rs new file mode 100644 index 00000000000000..7ab2b795d5d350 --- /dev/null +++ b/vendor/syn/tests/debug/mod.rs @@ -0,0 +1,147 @@ +#![allow( + clippy::no_effect_underscore_binding, + clippy::too_many_lines, + clippy::used_underscore_binding +)] + +#[rustfmt::skip] +mod gen; + +use proc_macro2::{Ident, Literal, TokenStream}; +use ref_cast::RefCast; +use std::fmt::{self, Debug}; +use std::ops::Deref; +use syn::punctuated::Punctuated; + +#[derive(RefCast)] +#[repr(transparent)] +pub struct Lite<T: ?Sized> { + value: T, +} + +#[allow(non_snake_case)] +pub fn Lite<T: ?Sized>(value: &T) -> &Lite<T> { + Lite::ref_cast(value) +} + +impl<T: ?Sized> Deref for Lite<T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.value + } +} + +impl Debug for Lite<bool> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "{}", self.value) + } +} + +impl Debug for Lite<u32> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "{}", self.value) + } +} + +impl Debug for Lite<usize> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "{}", self.value) + } +} + +impl Debug for Lite<String> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "{:?}", self.value) + } +} + +impl Debug for Lite<Ident> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "{:?}", self.value.to_string()) + } +} + +impl Debug for Lite<Literal> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "{}", self.value) + } +} + +impl Debug for Lite<TokenStream> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let string = self.value.to_string(); + if string.len() <= 80 { + write!(formatter, "TokenStream(`{}`)", self.value) + } else { + formatter + .debug_tuple("TokenStream") + .field(&format_args!("`{}`", string)) + .finish() + } + } +} + +impl<T> Debug for Lite<&T> +where + Lite<T>: Debug, +{ + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(Lite(self.value), formatter) + } +} + +impl<T> Debug for Lite<Box<T>> +where + Lite<T>: Debug, +{ + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(Lite(&*self.value), formatter) + } +} + +impl<T> Debug for Lite<Vec<T>> +where + Lite<T>: Debug, +{ + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter + .debug_list() + .entries(self.value.iter().map(Lite)) + .finish() + } +} + +impl<T, P> Debug for Lite<Punctuated<T, P>> +where + Lite<T>: Debug, + Lite<P>: Debug, +{ + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut list = formatter.debug_list(); + for pair in self.pairs() { + let (node, punct) = pair.into_tuple(); + list.entry(Lite(node)); + list.entries(punct.map(Lite)); + } + list.finish() + } +} + +struct Present; + +impl Debug for Present { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Some") + } +} + +struct Option { + present: bool, +} + +impl Debug for Option { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(if self.present { "Some" } else { "None" }) + } +} diff --git a/vendor/syn/tests/macros/mod.rs b/vendor/syn/tests/macros/mod.rs new file mode 100644 index 00000000000000..9c9a957f71c27f --- /dev/null +++ b/vendor/syn/tests/macros/mod.rs @@ -0,0 +1,7 @@ +macro_rules! errorf { + ($($tt:tt)*) => {{ + use ::std::io::Write; + let stderr = ::std::io::stderr(); + write!(stderr.lock(), $($tt)*).unwrap(); + }}; +} diff --git a/vendor/syn/tests/regression.rs b/vendor/syn/tests/regression.rs new file mode 100644 index 00000000000000..5c7fcddc8da9a6 --- /dev/null +++ b/vendor/syn/tests/regression.rs @@ -0,0 +1,5 @@ +#![allow(clippy::let_underscore_untyped, clippy::uninlined_format_args)] + +mod regression { + automod::dir!("tests/regression"); +} diff --git a/vendor/syn/tests/regression/issue1108.rs b/vendor/syn/tests/regression/issue1108.rs new file mode 100644 index 00000000000000..11a82adaadb0e7 --- /dev/null +++ b/vendor/syn/tests/regression/issue1108.rs @@ -0,0 +1,5 @@ +#[test] +fn issue1108() { + let data = "impl<x<>>::x for"; + let _ = syn::parse_file(data); +} diff --git a/vendor/syn/tests/regression/issue1235.rs b/vendor/syn/tests/regression/issue1235.rs new file mode 100644 index 00000000000000..8836030664b8b7 --- /dev/null +++ b/vendor/syn/tests/regression/issue1235.rs @@ -0,0 +1,32 @@ +use proc_macro2::{Delimiter, Group}; +use quote::quote; + +#[test] +fn main() { + // Okay. Rustc allows top-level `static` with no value syntactically, but + // not semantically. Syn parses as Item::Verbatim. + let tokens = quote! { + pub static FOO: usize; + pub static BAR: usize; + }; + let file = syn::parse2::<syn::File>(tokens).unwrap(); + println!("{:#?}", file); + + // Okay. + let inner = Group::new( + Delimiter::None, + quote!(static FOO: usize = 0; pub static BAR: usize = 0), + ); + let tokens = quote!(pub #inner;); + let file = syn::parse2::<syn::File>(tokens).unwrap(); + println!("{:#?}", file); + + // Formerly parser crash. + let inner = Group::new( + Delimiter::None, + quote!(static FOO: usize; pub static BAR: usize), + ); + let tokens = quote!(pub #inner;); + let file = syn::parse2::<syn::File>(tokens).unwrap(); + println!("{:#?}", file); +} diff --git a/vendor/syn/tests/repo/mod.rs b/vendor/syn/tests/repo/mod.rs new file mode 100644 index 00000000000000..8cbb83bf8e79fd --- /dev/null +++ b/vendor/syn/tests/repo/mod.rs @@ -0,0 +1,630 @@ +#![allow(clippy::manual_assert)] + +mod progress; + +use self::progress::Progress; +use anyhow::Result; +use flate2::read::GzDecoder; +use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; +use rayon::ThreadPoolBuilder; +use std::collections::BTreeSet; +use std::env; +use std::ffi::OsStr; +use std::fs; +use std::path::{Path, PathBuf}; +use tar::Archive; +use walkdir::{DirEntry, WalkDir}; + +// nightly-2025-08-14 +const REVISION: &str = "3672a55b7cfd0a12e7097197b6242872473ffaa7"; + +#[rustfmt::skip] +static EXCLUDE_FILES: &[&str] = &[ + // TODO: const traits: `pub const trait Trait {}` + // https://github.com/dtolnay/syn/issues/1887 + "src/tools/clippy/tests/ui/assign_ops.rs", + "src/tools/clippy/tests/ui/missing_const_for_fn/const_trait.rs", + "src/tools/clippy/tests/ui/trait_duplication_in_bounds.rs", + "src/tools/rust-analyzer/crates/test-utils/src/minicore.rs", + + // TODO: unsafe binders: `unsafe<'a> &'a T` + // https://github.com/dtolnay/syn/issues/1791 + "src/tools/rustfmt/tests/source/unsafe-binders.rs", + "src/tools/rustfmt/tests/target/unsafe-binders.rs", + "tests/mir-opt/gvn_on_unsafe_binder.rs", + "tests/rustdoc/auxiliary/unsafe-binder-dep.rs", + "tests/rustdoc/unsafe-binder.rs", + "tests/ui/unsafe-binders/cat-projection.rs", + + // TODO: unsafe fields: `struct S { unsafe field: T }` + // https://github.com/dtolnay/syn/issues/1792 + "src/tools/clippy/tests/ui/derive.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/record_field_list.rs", + "src/tools/rustfmt/tests/source/unsafe-field.rs", + "src/tools/rustfmt/tests/target/unsafe-field.rs", + "tests/ui/unsafe-fields/auxiliary/unsafe-fields-crate-dep.rs", + + // TODO: guard patterns: `match expr { (A if f()) | (B if g()) => {} }` + // https://github.com/dtolnay/syn/issues/1793 + "src/tools/rustfmt/tests/target/guard_patterns.rs", + "tests/ui/pattern/rfc-3637-guard-patterns/only-gather-locals-once.rs", + + // TODO: struct field default: `struct S { field: i32 = 1 }` + // https://github.com/dtolnay/syn/issues/1774 + "compiler/rustc_errors/src/markdown/parse.rs", + "compiler/rustc_session/src/config.rs", + "src/tools/clippy/tests/ui/exhaustive_items.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/record_field_default_values.rs", + "src/tools/rustfmt/tests/source/default-field-values.rs", + "src/tools/rustfmt/tests/target/default-field-values.rs", + "tests/ui/structs/default-field-values/auxiliary/struct_field_default.rs", + "tests/ui/structs/default-field-values/const-trait-default-field-value.rs", + "tests/ui/structs/default-field-values/field-references-param.rs", + "tests/ui/structs/default-field-values/support.rs", + "tests/ui/structs/default-field-values/use-normalized-ty-for-default-struct-value.rs", + + // TODO: return type notation: `where T: Trait<method(): Send>` and `where T::method(..): Send` + // https://github.com/dtolnay/syn/issues/1434 + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/return_type_syntax_in_path.rs", + "src/tools/rustfmt/tests/target/return-type-notation.rs", + "tests/rustdoc-json/return-type-notation.rs", + "tests/rustdoc/return-type-notation.rs", + "tests/ui/associated-type-bounds/all-generics-lookup.rs", + "tests/ui/associated-type-bounds/implied-from-self-where-clause.rs", + "tests/ui/associated-type-bounds/return-type-notation/basic.rs", + "tests/ui/associated-type-bounds/return-type-notation/higher-ranked-bound-works.rs", + "tests/ui/associated-type-bounds/return-type-notation/namespace-conflict.rs", + "tests/ui/associated-type-bounds/return-type-notation/path-constrained-in-method.rs", + "tests/ui/associated-type-bounds/return-type-notation/path-self-qself.rs", + "tests/ui/associated-type-bounds/return-type-notation/path-works.rs", + "tests/ui/associated-type-bounds/return-type-notation/unpretty-parenthesized.rs", + "tests/ui/async-await/return-type-notation/issue-110963-late.rs", + "tests/ui/async-await/return-type-notation/normalizing-self-auto-trait-issue-109924.rs", + "tests/ui/async-await/return-type-notation/rtn-implied-in-supertrait.rs", + "tests/ui/async-await/return-type-notation/super-method-bound.rs", + "tests/ui/async-await/return-type-notation/supertrait-bound.rs", + "tests/ui/borrowck/alias-liveness/rtn-static.rs", + "tests/ui/feature-gates/feature-gate-return_type_notation.rs", + + // TODO: lazy type alias syntax with where-clause in trailing position + // https://github.com/dtolnay/syn/issues/1525 + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/type_item_where_clause.rs", + "src/tools/rustfmt/tests/source/type-alias-where-clauses-with-comments.rs", + "src/tools/rustfmt/tests/source/type-alias-where-clauses.rs", + "src/tools/rustfmt/tests/target/type-alias-where-clauses-with-comments.rs", + "src/tools/rustfmt/tests/target/type-alias-where-clauses.rs", + "tests/rustdoc/typedef-inner-variants-lazy_type_alias.rs", + + // TODO: gen blocks and functions + // https://github.com/dtolnay/syn/issues/1526 + "compiler/rustc_codegen_cranelift/example/gen_block_iterate.rs", + "compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs", + "compiler/rustc_metadata/src/rmeta/decoder.rs", + "compiler/rustc_middle/src/ty/closure.rs", + "compiler/rustc_middle/src/ty/context.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/gen_blocks.rs", + "tests/ui/async-await/async-drop/assign-incompatible-types.rs", + "tests/ui/coroutine/async-gen-deduce-yield.rs", + "tests/ui/coroutine/async-gen-yield-ty-is-unit.rs", + "tests/ui/coroutine/async_gen_fn_iter.rs", + "tests/ui/coroutine/gen_block_is_fused_iter.rs", + "tests/ui/coroutine/gen_block_is_iter.rs", + "tests/ui/coroutine/gen_block_iterate.rs", + "tests/ui/coroutine/gen_fn_iter.rs", + "tests/ui/coroutine/gen_fn_lifetime_capture.rs", + "tests/ui/coroutine/other-attribute-on-gen.rs", + "tests/ui/coroutine/return-types-diverge.rs", + "tests/ui/higher-ranked/builtin-closure-like-bounds.rs", + "tests/ui/sanitizer/cfi/coroutine.rs", + + // TODO: postfix yield + // https://github.com/dtolnay/syn/issues/1890 + "tests/pretty/postfix-yield.rs", + "tests/ui/coroutine/postfix-yield.rs", + + // TODO: `!` as a pattern + // https://github.com/dtolnay/syn/issues/1546 + "tests/mir-opt/building/match/never_patterns.rs", + "tests/pretty/never-pattern.rs", + "tests/ui/rfcs/rfc-0000-never_patterns/always-read-in-closure-capture.rs", + "tests/ui/rfcs/rfc-0000-never_patterns/diverges.rs", + "tests/ui/rfcs/rfc-0000-never_patterns/use-bindings.rs", + + // TODO: async trait bounds: `impl async Fn()` + // https://github.com/dtolnay/syn/issues/1628 + "src/tools/miri/tests/pass/async-closure-captures.rs", + "src/tools/miri/tests/pass/async-closure-drop.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/async_trait_bound.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/for_binder_bound.rs", + "src/tools/rustfmt/tests/target/asyncness.rs", + "tests/coverage/async_closure.rs", + "tests/ui/async-await/async-closures/async-fn-mut-for-async-fn.rs", + "tests/ui/async-await/async-closures/async-fn-once-for-async-fn.rs", + "tests/ui/async-await/async-closures/auxiliary/foreign.rs", + "tests/ui/async-await/async-closures/body-check-on-non-fnmut.rs", + "tests/ui/async-await/async-closures/box-deref-in-debuginfo.rs", + "tests/ui/async-await/async-closures/brand.rs", + "tests/ui/async-await/async-closures/captures.rs", + "tests/ui/async-await/async-closures/clone-closure.rs", + "tests/ui/async-await/async-closures/constrained-but-no-upvars-yet.rs", + "tests/ui/async-await/async-closures/debuginfo-by-move-body.rs", + "tests/ui/async-await/async-closures/drop.rs", + "tests/ui/async-await/async-closures/force-move-due-to-inferred-kind.rs", + "tests/ui/async-await/async-closures/foreign.rs", + "tests/ui/async-await/async-closures/inline-body.rs", + "tests/ui/async-await/async-closures/mangle.rs", + "tests/ui/async-await/async-closures/moro-example.rs", + "tests/ui/async-await/async-closures/move-is-async-fn.rs", + "tests/ui/async-await/async-closures/mut-ref-reborrow.rs", + "tests/ui/async-await/async-closures/no-borrow-from-env.rs", + "tests/ui/async-await/async-closures/non-copy-arg-does-not-force-inner-move.rs", + "tests/ui/async-await/async-closures/overlapping-projs.rs", + "tests/ui/async-await/async-closures/precise-captures.rs", + "tests/ui/async-await/async-closures/refd.rs", + "tests/ui/async-await/async-closures/signature-deduction.rs", + "tests/ui/async-await/async-fn/edition-2015-not-async-bound.rs", + "tests/ui/async-await/async-fn/higher-ranked-async-fn.rs", + "tests/ui/async-await/async-fn/impl-trait.rs", + "tests/ui/async-await/async-fn/project.rs", + "tests/ui/async-await/async-fn/sugar.rs", + + // TODO: mutable by-reference bindings (mut ref) + // https://github.com/dtolnay/syn/issues/1629 + "src/tools/rustfmt/tests/source/mut_ref.rs", + "src/tools/rustfmt/tests/target/mut_ref.rs", + "tests/ui/mut/mut-ref.rs", + + // TODO: postfix match + // https://github.com/dtolnay/syn/issues/1630 + "src/tools/clippy/tests/ui/unnecessary_semicolon.rs", + "src/tools/rustfmt/tests/source/postfix-match/pf-match.rs", + "src/tools/rustfmt/tests/target/postfix-match/pf-match.rs", + "tests/pretty/postfix-match/simple-matches.rs", + "tests/ui/match/postfix-match/no-unused-parens.rs", + "tests/ui/match/postfix-match/pf-match-chain.rs", + "tests/ui/match/postfix-match/postfix-match.rs", + + // TODO: delegation: `reuse Trait::bar { Box::new(self.0) }` + // https://github.com/dtolnay/syn/issues/1580 + "tests/pretty/delegation.rs", + "tests/pretty/hir-delegation.rs", + "tests/ui/delegation/body-identity-glob.rs", + "tests/ui/delegation/body-identity-list.rs", + "tests/ui/delegation/explicit-paths-in-traits-pass.rs", + "tests/ui/delegation/explicit-paths-pass.rs", + "tests/ui/delegation/explicit-paths-signature-pass.rs", + "tests/ui/delegation/fn-header.rs", + "tests/ui/delegation/generics/free-fn-to-free-fn-pass.rs", + "tests/ui/delegation/generics/free-fn-to-trait-method-pass.rs", + "tests/ui/delegation/generics/impl-to-free-fn-pass.rs", + "tests/ui/delegation/generics/impl-trait-to-trait-method-pass.rs", + "tests/ui/delegation/generics/inherent-impl-to-trait-method-pass.rs", + "tests/ui/delegation/generics/trait-method-to-other-pass.rs", + "tests/ui/delegation/glob-glob.rs", + "tests/ui/delegation/glob-override.rs", + "tests/ui/delegation/glob.rs", + "tests/ui/delegation/impl-trait.rs", + "tests/ui/delegation/list.rs", + "tests/ui/delegation/macro-inside-glob.rs", + "tests/ui/delegation/macro-inside-list.rs", + "tests/ui/delegation/method-call-priority.rs", + "tests/ui/delegation/parse.rs", + "tests/ui/delegation/rename.rs", + "tests/ui/delegation/self-coercion.rs", + + // TODO: for await + // https://github.com/dtolnay/syn/issues/1631 + "tests/ui/async-await/for-await-2015.rs", + "tests/ui/async-await/for-await-passthrough.rs", + "tests/ui/async-await/for-await.rs", + + // TODO: unparenthesized half-open range pattern inside slice pattern: `[1..]` + // https://github.com/dtolnay/syn/issues/1769 + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/range_pat.rs", + "tests/ui/consts/miri_unleashed/const_refers_to_static_cross_crate.rs", + + // TODO: pinned type sugar: `&pin const Self` + // https://github.com/dtolnay/syn/issues/1770 + "src/tools/rustfmt/tests/source/pin_sugar.rs", + "src/tools/rustfmt/tests/target/pin_sugar.rs", + "tests/pretty/pin-ergonomics-hir.rs", + "tests/pretty/pin-ergonomics.rs", + "tests/ui/pin-ergonomics/borrow.rs", + "tests/ui/pin-ergonomics/sugar-self.rs", + "tests/ui/pin-ergonomics/sugar.rs", + + // TODO: attributes on where-predicates + // https://github.com/dtolnay/syn/issues/1705 + "src/tools/rustfmt/tests/target/cfg_attribute_in_where.rs", + + // TODO: super let + // https://github.com/dtolnay/syn/issues/1889 + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/let_stmt.rs", + + // TODO: "ergonomic clones": `f(obj.use)`, `thread::spawn(use || f(obj))`, `async use` + // https://github.com/dtolnay/syn/issues/1802 + "tests/codegen-llvm/ergonomic-clones/closure.rs", + "tests/mir-opt/ergonomic-clones/closure.rs", + "tests/ui/ergonomic-clones/async/basic.rs", + "tests/ui/ergonomic-clones/closure/basic.rs", + "tests/ui/ergonomic-clones/closure/const-closure.rs", + "tests/ui/ergonomic-clones/closure/mutation.rs", + "tests/ui/ergonomic-clones/closure/nested.rs", + "tests/ui/ergonomic-clones/closure/once-move-out-on-heap.rs", + "tests/ui/ergonomic-clones/closure/with-binders.rs", + "tests/ui/ergonomic-clones/dotuse/basic.rs", + "tests/ui/ergonomic-clones/dotuse/block.rs", + + // TODO: contracts + // https://github.com/dtolnay/syn/issues/1892 + "tests/ui/contracts/internal_machinery/contract-ast-extensions-nest.rs", + "tests/ui/contracts/internal_machinery/contract-ast-extensions-tail.rs", + "tests/ui/contracts/internal_machinery/contracts-lowering-ensures-is-not-inherited-when-nesting.rs", + "tests/ui/contracts/internal_machinery/contracts-lowering-requires-is-not-inherited-when-nesting.rs", + + // TODO: frontmatter + // https://github.com/dtolnay/syn/issues/1893 + "tests/ui/frontmatter/auxiliary/lib.rs", + "tests/ui/frontmatter/dot-in-infostring-non-leading.rs", + "tests/ui/frontmatter/escape.rs", + "tests/ui/frontmatter/frontmatter-inner-hyphens-1.rs", + "tests/ui/frontmatter/frontmatter-inner-hyphens-2.rs", + "tests/ui/frontmatter/frontmatter-non-lexible-tokens.rs", + "tests/ui/frontmatter/frontmatter-whitespace-3.rs", + "tests/ui/frontmatter/frontmatter-whitespace-4.rs", + "tests/ui/frontmatter/shebang.rs", + "tests/ui/unpretty/frontmatter.rs", + + // TODO: `|| .. .method()` + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/closure_range_method_call.rs", + "src/tools/rustfmt/tests/source/issue-4808.rs", + + // Negative inherent impl: `impl !Box<JoinHandle> {}` + "src/tools/rustfmt/tests/source/negative-impl.rs", + "src/tools/rustfmt/tests/target/negative-impl.rs", + + // Compile-fail expr parameter in const generic position: `f::<1 + 2>()` + "tests/ui/const-generics/early/closing-args-token.rs", + "tests/ui/const-generics/early/const-expression-parameter.rs", + + // Compile-fail variadics in not the last position of a function parameter list + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/fn_def_param.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/param_list_vararg.rs", + "tests/ui/parser/variadic-ffi-syntactic-pass.rs", + + // Need at least one trait in impl Trait, no such type as impl 'static + "tests/ui/type-alias-impl-trait/generic_type_does_not_live_long_enough.rs", + + // Negative polarity trait bound: `where T: !Copy` + "src/tools/rustfmt/tests/target/negative-bounds.rs", + "tests/ui/traits/negative-bounds/supertrait.rs", + + // Const impl that is not a trait impl: `impl ~const T {}` + "tests/ui/traits/const-traits/syntax.rs", + + // Lifetimes and types out of order in angle bracketed path arguments + "tests/ui/parser/constraints-before-generic-args-syntactic-pass.rs", + + // Deprecated anonymous parameter syntax in traits + "src/tools/rustfmt/tests/source/trait.rs", + "src/tools/rustfmt/tests/target/trait.rs", + "tests/pretty/hir-fn-params.rs", + "tests/rustdoc/anon-fn-params.rs", + "tests/rustdoc/auxiliary/ext-anon-fn-params.rs", + "tests/ui/fn/anonymous-parameters-trait-13105.rs", + "tests/ui/issues/issue-34074.rs", + "tests/ui/proc-macro/trait-fn-args-2015.rs", + "tests/ui/trait-bounds/anonymous-parameters-13775.rs", + + // Deprecated where-clause location + "src/tools/rustfmt/tests/source/issue_4257.rs", + "src/tools/rustfmt/tests/source/issue_4911.rs", + "src/tools/rustfmt/tests/target/issue_4257.rs", + "src/tools/rustfmt/tests/target/issue_4911.rs", + "tests/pretty/gat-bounds.rs", + "tests/rustdoc/generic-associated-types/gats.rs", + + // Deprecated trait object syntax with parenthesized generic arguments and no dyn keyword + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/path_fn_trait_args.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/typepathfn_with_coloncolon.rs", + "src/tools/rustfmt/tests/source/attrib.rs", + "src/tools/rustfmt/tests/source/closure.rs", + "src/tools/rustfmt/tests/source/existential_type.rs", + "src/tools/rustfmt/tests/source/fn-simple.rs", + "src/tools/rustfmt/tests/source/fn_args_layout-vertical.rs", + "src/tools/rustfmt/tests/source/issue-4689/one.rs", + "src/tools/rustfmt/tests/source/issue-4689/two.rs", + "src/tools/rustfmt/tests/source/paths.rs", + "src/tools/rustfmt/tests/source/structs.rs", + "src/tools/rustfmt/tests/target/attrib.rs", + "src/tools/rustfmt/tests/target/closure.rs", + "src/tools/rustfmt/tests/target/existential_type.rs", + "src/tools/rustfmt/tests/target/fn-simple.rs", + "src/tools/rustfmt/tests/target/fn.rs", + "src/tools/rustfmt/tests/target/fn_args_layout-vertical.rs", + "src/tools/rustfmt/tests/target/issue-4689/one.rs", + "src/tools/rustfmt/tests/target/issue-4689/two.rs", + "src/tools/rustfmt/tests/target/paths.rs", + "src/tools/rustfmt/tests/target/structs.rs", + "tests/codegen-units/item-collection/non-generic-closures.rs", + "tests/debuginfo/recursive-enum.rs", + "tests/pretty/closure-reform-pretty.rs", + "tests/run-make/reproducible-build-2/reproducible-build.rs", + "tests/run-make/reproducible-build/reproducible-build.rs", + "tests/ui/impl-trait/generic-with-implicit-hrtb-without-dyn.rs", + "tests/ui/lifetimes/auxiliary/lifetime_bound_will_change_warning_lib.rs", + "tests/ui/lifetimes/bare-trait-object-borrowck.rs", + "tests/ui/lifetimes/bare-trait-object.rs", + "tests/ui/parser/bounds-obj-parens.rs", + + // Various extensions to Rust syntax made up by rust-analyzer + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/assoc_type_bound.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/const_param_default_path.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/field_expr.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/generic_arg_bounds.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/global_asm.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/question_for_type_trait_bound.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/ref_expr.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/use_tree_abs_star.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/ok/0015_use_tree.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/ok/0029_range_forms.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/ok/0051_parameter_attrs.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/ok/0055_dot_dot_dot.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/ok/0068_item_modifiers.rs", + "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/0031_block_inner_attrs.rs", + "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/0038_endless_inclusive_range.rs", + "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/0045_ambiguous_trait_object.rs", + "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/0046_mutable_const_item.rs", + "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/0224_dangling_dyn.rs", + "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/0261_dangling_impl_undeclared_lifetime.rs", + "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/dangling_impl.rs", + "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/dangling_impl_reference.rs", + "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/impl_trait_lifetime_only.rs", + + // Placeholder syntax for "throw expressions" + "compiler/rustc_errors/src/translation.rs", + "compiler/rustc_expand/src/module.rs", + "compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs", + "src/tools/clippy/tests/ui/needless_return.rs", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/yeet_expr.rs", + "tests/pretty/yeet-expr.rs", + "tests/ui/try-trait/yeet-for-option.rs", + "tests/ui/try-trait/yeet-for-result.rs", + + // Edition 2015 code using identifiers that are now keywords + // TODO: some of these we should probably parse + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/dyn_trait_type_weak.rs", + "src/tools/rustfmt/tests/source/configs/indent_style/block_call.rs", + "src/tools/rustfmt/tests/source/configs/use_try_shorthand/false.rs", + "src/tools/rustfmt/tests/source/configs/use_try_shorthand/true.rs", + "src/tools/rustfmt/tests/source/issue_1306.rs", + "src/tools/rustfmt/tests/source/try-conversion.rs", + "src/tools/rustfmt/tests/target/configs/indent_style/block_call.rs", + "src/tools/rustfmt/tests/target/configs/use_try_shorthand/false.rs", + "src/tools/rustfmt/tests/target/issue-1681.rs", + "src/tools/rustfmt/tests/target/issue_1306.rs", + "tests/ui/dyn-keyword/dyn-2015-no-warnings-without-lints.rs", + "tests/ui/editions/edition-keywords-2015-2015.rs", + "tests/ui/editions/edition-keywords-2015-2018.rs", + "tests/ui/lint/keyword-idents/auxiliary/multi_file_submod.rs", + "tests/ui/lint/lint_pre_expansion_extern_module_aux.rs", + "tests/ui/macros/macro-comma-support-rpass.rs", + "tests/ui/macros/try-macro.rs", + "tests/ui/parser/extern-crate-async.rs", + "tests/ui/try-block/try-is-identifier-edition2015.rs", + + // Excessive nesting + "tests/ui/issues/issue-74564-if-expr-stack-overflow.rs", + + // Testing tools on invalid syntax + "src/tools/clippy/tests/ui/non_expressive_names_error_recovery.rs", + "src/tools/rustfmt/tests/coverage/target/comments.rs", + "src/tools/rustfmt/tests/parser/issue-4126/invalid.rs", + "src/tools/rustfmt/tests/parser/issue_4418.rs", + "src/tools/rustfmt/tests/parser/stashed-diag.rs", + "src/tools/rustfmt/tests/parser/stashed-diag2.rs", + "src/tools/rustfmt/tests/parser/unclosed-delims/issue_4466.rs", + "src/tools/rustfmt/tests/source/configs/disable_all_formatting/true.rs", + "src/tools/rustfmt/tests/source/configs/spaces_around_ranges/false.rs", + "src/tools/rustfmt/tests/source/configs/spaces_around_ranges/true.rs", + "src/tools/rustfmt/tests/source/type.rs", + "src/tools/rustfmt/tests/target/configs/spaces_around_ranges/false.rs", + "src/tools/rustfmt/tests/target/configs/spaces_around_ranges/true.rs", + "src/tools/rustfmt/tests/target/type.rs", + "src/tools/rustfmt/tests/target/unsafe_extern_blocks.rs", + "tests/run-make/translation/test.rs", + "tests/ui/generics/issue-94432-garbage-ice.rs", + + // Generated file containing a top-level expression, used with `include!` + "compiler/rustc_codegen_gcc/src/intrinsic/archs.rs", + + // Not actually test cases + "tests/ui/lint/expansion-time-include.rs", + "tests/ui/macros/auxiliary/macro-comma-support.rs", + "tests/ui/macros/auxiliary/macro-include-items-expr.rs", + "tests/ui/macros/include-single-expr-helper.rs", + "tests/ui/macros/include-single-expr-helper-1.rs", + "tests/ui/parser/issues/auxiliary/issue-21146-inc.rs", +]; + +#[rustfmt::skip] +static EXCLUDE_DIRS: &[&str] = &[ + // Inputs that intentionally do not parse + "src/tools/rust-analyzer/crates/parser/test_data/parser/err", + "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/err", + + // Inputs that lex but do not necessarily parse + "src/tools/rust-analyzer/crates/parser/test_data/lexer", + + // Inputs that used to crash rust-analyzer, but aren't necessarily supposed to parse + "src/tools/rust-analyzer/crates/syntax/test_data/parser/fuzz-failures", + "src/tools/rust-analyzer/crates/syntax/test_data/reparse/fuzz-failures", + + // Inputs that crash rustc, making no claim about whether they are valid Rust + "tests/crashes", +]; + +// Directories in which a .stderr implies the corresponding .rs is not expected +// to work. +static UI_TEST_DIRS: &[&str] = &["tests/ui", "tests/rustdoc-ui"]; + +pub fn for_each_rust_file(for_each: impl Fn(&Path) + Sync + Send) { + let mut rs_files = BTreeSet::new(); + + let repo_dir = Path::new("tests/rust"); + for entry in WalkDir::new(repo_dir) + .into_iter() + .filter_entry(base_dir_filter) + { + let entry = entry.unwrap(); + if !entry.file_type().is_dir() { + rs_files.insert(entry.into_path()); + } + } + + for ui_test_dir in UI_TEST_DIRS { + for entry in WalkDir::new(repo_dir.join(ui_test_dir)) { + let mut path = entry.unwrap().into_path(); + if path.extension() == Some(OsStr::new("stderr")) { + loop { + rs_files.remove(&path.with_extension("rs")); + path = path.with_extension(""); + if path.extension().is_none() { + break; + } + } + } + } + } + + rs_files.par_iter().map(PathBuf::as_path).for_each(for_each); +} + +pub fn base_dir_filter(entry: &DirEntry) -> bool { + let path = entry.path(); + + let mut path_string = path.to_string_lossy(); + if cfg!(windows) { + path_string = path_string.replace('\\', "/").into(); + } + let path_string = if path_string == "tests/rust" { + return true; + } else if let Some(path) = path_string.strip_prefix("tests/rust/") { + path + } else { + panic!("unexpected path in Rust dist: {}", path_string); + }; + + if path.is_dir() { + return !EXCLUDE_DIRS.contains(&path_string); + } + + if path.extension() != Some(OsStr::new("rs")) { + return false; + } + + !EXCLUDE_FILES.contains(&path_string) +} + +#[allow(dead_code)] +pub fn edition(path: &Path) -> &'static str { + if path.ends_with("dyn-2015-no-warnings-without-lints.rs") { + "2015" + } else { + "2021" + } +} + +#[allow(dead_code)] +pub fn abort_after() -> usize { + match env::var("ABORT_AFTER_FAILURE") { + Ok(s) => s.parse().expect("failed to parse ABORT_AFTER_FAILURE"), + Err(_) => usize::MAX, + } +} + +pub fn rayon_init() { + let stack_size = match env::var("RUST_MIN_STACK") { + Ok(s) => s.parse().expect("failed to parse RUST_MIN_STACK"), + Err(_) => 1024 * 1024 * if cfg!(debug_assertions) { 40 } else { 20 }, + }; + ThreadPoolBuilder::new() + .stack_size(stack_size) + .build_global() + .unwrap(); +} + +pub fn clone_rust() { + let needs_clone = match fs::read_to_string("tests/rust/COMMIT") { + Err(_) => true, + Ok(contents) => contents.trim() != REVISION, + }; + if needs_clone { + download_and_unpack().unwrap(); + } + + let mut missing = String::new(); + let test_src = Path::new("tests/rust"); + + let mut exclude_files_set = BTreeSet::new(); + for exclude in EXCLUDE_FILES { + if !exclude_files_set.insert(exclude) { + panic!("duplicate path in EXCLUDE_FILES: {}", exclude); + } + for dir in EXCLUDE_DIRS { + if Path::new(exclude).starts_with(dir) { + panic!("excluded file {} is inside an excluded dir", exclude); + } + } + if !test_src.join(exclude).is_file() { + missing += "\ntests/rust/"; + missing += exclude; + } + } + + let mut exclude_dirs_set = BTreeSet::new(); + for exclude in EXCLUDE_DIRS { + if !exclude_dirs_set.insert(exclude) { + panic!("duplicate path in EXCLUDE_DIRS: {}", exclude); + } + if !test_src.join(exclude).is_dir() { + missing += "\ntests/rust/"; + missing += exclude; + missing += "/"; + } + } + + if !missing.is_empty() { + panic!("excluded test file does not exist:{}\n", missing); + } +} + +fn download_and_unpack() -> Result<()> { + let url = format!("https://github.com/rust-lang/rust/archive/{REVISION}.tar.gz"); + errorf!("downloading {url}\n"); + + let response = reqwest::blocking::get(url)?.error_for_status()?; + let progress = Progress::new(response); + let decoder = GzDecoder::new(progress); + let mut archive = Archive::new(decoder); + let prefix = format!("rust-{}", REVISION); + + let tests_rust = Path::new("tests/rust"); + if tests_rust.exists() { + fs::remove_dir_all(tests_rust)?; + } + + for entry in archive.entries()? { + let mut entry = entry?; + let path = entry.path()?; + if path == Path::new("pax_global_header") { + continue; + } + let relative = path.strip_prefix(&prefix)?; + let out = tests_rust.join(relative); + entry.unpack(&out)?; + } + + fs::write("tests/rust/COMMIT", REVISION)?; + Ok(()) +} diff --git a/vendor/syn/tests/repo/progress.rs b/vendor/syn/tests/repo/progress.rs new file mode 100644 index 00000000000000..28c8a44b1298a8 --- /dev/null +++ b/vendor/syn/tests/repo/progress.rs @@ -0,0 +1,37 @@ +use std::io::{Read, Result}; +use std::time::{Duration, Instant}; + +pub struct Progress<R> { + bytes: usize, + tick: Instant, + stream: R, +} + +impl<R> Progress<R> { + pub fn new(stream: R) -> Self { + Progress { + bytes: 0, + tick: Instant::now() + Duration::from_millis(2000), + stream, + } + } +} + +impl<R: Read> Read for Progress<R> { + fn read(&mut self, buf: &mut [u8]) -> Result<usize> { + let num = self.stream.read(buf)?; + self.bytes += num; + let now = Instant::now(); + if now > self.tick { + self.tick = now + Duration::from_millis(500); + errorf!("downloading... {} bytes\n", self.bytes); + } + Ok(num) + } +} + +impl<R> Drop for Progress<R> { + fn drop(&mut self) { + errorf!("done ({} bytes)\n", self.bytes); + } +} diff --git a/vendor/syn/tests/snapshot/mod.rs b/vendor/syn/tests/snapshot/mod.rs new file mode 100644 index 00000000000000..98d2aebc9d303e --- /dev/null +++ b/vendor/syn/tests/snapshot/mod.rs @@ -0,0 +1,68 @@ +#![allow(unused_macros, unused_macro_rules)] + +use std::str::FromStr; +use syn::parse::Result; + +macro_rules! snapshot { + ($($args:tt)*) => { + snapshot_impl!(() $($args)*) + }; +} + +macro_rules! snapshot_impl { + (($expr:ident) as $t:ty, @$snapshot:literal) => { + let tokens = crate::snapshot::TryIntoTokens::try_into_tokens($expr).unwrap(); + let $expr: $t = syn::parse_quote!(#tokens); + let debug = crate::debug::Lite(&$expr); + if !cfg!(miri) { + #[allow(clippy::needless_raw_string_hashes)] // https://github.com/mitsuhiko/insta/issues/389 + { + insta::assert_debug_snapshot!(debug, @$snapshot); + } + } + }; + (($($expr:tt)*) as $t:ty, @$snapshot:literal) => {{ + let tokens = crate::snapshot::TryIntoTokens::try_into_tokens($($expr)*).unwrap(); + let syntax_tree: $t = syn::parse_quote!(#tokens); + let debug = crate::debug::Lite(&syntax_tree); + if !cfg!(miri) { + #[allow(clippy::needless_raw_string_hashes)] + { + insta::assert_debug_snapshot!(debug, @$snapshot); + } + } + syntax_tree + }}; + (($($expr:tt)*) , @$snapshot:literal) => {{ + let syntax_tree = $($expr)*; + let debug = crate::debug::Lite(&syntax_tree); + if !cfg!(miri) { + #[allow(clippy::needless_raw_string_hashes)] + { + insta::assert_debug_snapshot!(debug, @$snapshot); + } + } + syntax_tree + }}; + (($($expr:tt)*) $next:tt $($rest:tt)*) => { + snapshot_impl!(($($expr)* $next) $($rest)*) + }; +} + +pub trait TryIntoTokens { + #[allow(dead_code)] + fn try_into_tokens(self) -> Result<proc_macro2::TokenStream>; +} + +impl TryIntoTokens for &str { + fn try_into_tokens(self) -> Result<proc_macro2::TokenStream> { + let tokens = proc_macro2::TokenStream::from_str(self)?; + Ok(tokens) + } +} + +impl TryIntoTokens for proc_macro2::TokenStream { + fn try_into_tokens(self) -> Result<proc_macro2::TokenStream> { + Ok(self) + } +} diff --git a/vendor/syn/tests/test_asyncness.rs b/vendor/syn/tests/test_asyncness.rs new file mode 100644 index 00000000000000..c7aee3285bb29a --- /dev/null +++ b/vendor/syn/tests/test_asyncness.rs @@ -0,0 +1,49 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use syn::{Expr, Item}; + +#[test] +fn test_async_fn() { + let input = "async fn process() {}"; + + snapshot!(input as Item, @r#" + Item::Fn { + vis: Visibility::Inherited, + sig: Signature { + asyncness: Some, + ident: "process", + generics: Generics, + output: ReturnType::Default, + }, + block: Block { + stmts: [], + }, + } + "#); +} + +#[test] +fn test_async_closure() { + let input = "async || {}"; + + snapshot!(input as Expr, @r#" + Expr::Closure { + asyncness: Some, + output: ReturnType::Default, + body: Expr::Block { + block: Block { + stmts: [], + }, + }, + } + "#); +} diff --git a/vendor/syn/tests/test_attribute.rs b/vendor/syn/tests/test_attribute.rs new file mode 100644 index 00000000000000..81c485e6b28fc7 --- /dev/null +++ b/vendor/syn/tests/test_attribute.rs @@ -0,0 +1,231 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use syn::parse::Parser; +use syn::{Attribute, Meta}; + +#[test] +fn test_meta_item_word() { + let meta = test("#[foo]"); + + snapshot!(meta, @r#" + Meta::Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + } + "#); +} + +#[test] +fn test_meta_item_name_value() { + let meta = test("#[foo = 5]"); + + snapshot!(meta, @r#" + Meta::NameValue { + path: Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + }, + value: Expr::Lit { + lit: 5, + }, + } + "#); +} + +#[test] +fn test_meta_item_bool_value() { + let meta = test("#[foo = true]"); + + snapshot!(meta, @r#" + Meta::NameValue { + path: Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + }, + value: Expr::Lit { + lit: Lit::Bool { + value: true, + }, + }, + } + "#); + + let meta = test("#[foo = false]"); + + snapshot!(meta, @r#" + Meta::NameValue { + path: Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + }, + value: Expr::Lit { + lit: Lit::Bool { + value: false, + }, + }, + } + "#); +} + +#[test] +fn test_meta_item_list_lit() { + let meta = test("#[foo(5)]"); + + snapshot!(meta, @r#" + Meta::List { + path: Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(`5`), + } + "#); +} + +#[test] +fn test_meta_item_list_word() { + let meta = test("#[foo(bar)]"); + + snapshot!(meta, @r#" + Meta::List { + path: Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(`bar`), + } + "#); +} + +#[test] +fn test_meta_item_list_name_value() { + let meta = test("#[foo(bar = 5)]"); + + snapshot!(meta, @r#" + Meta::List { + path: Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(`bar = 5`), + } + "#); +} + +#[test] +fn test_meta_item_list_bool_value() { + let meta = test("#[foo(bar = true)]"); + + snapshot!(meta, @r#" + Meta::List { + path: Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(`bar = true`), + } + "#); +} + +#[test] +fn test_meta_item_multiple() { + let meta = test("#[foo(word, name = 5, list(name2 = 6), word2)]"); + + snapshot!(meta, @r#" + Meta::List { + path: Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(`word , name = 5 , list (name2 = 6) , word2`), + } + "#); +} + +#[test] +fn test_bool_lit() { + let meta = test("#[foo(true)]"); + + snapshot!(meta, @r#" + Meta::List { + path: Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(`true`), + } + "#); +} + +#[test] +fn test_negative_lit() { + let meta = test("#[form(min = -1, max = 200)]"); + + snapshot!(meta, @r#" + Meta::List { + path: Path { + segments: [ + PathSegment { + ident: "form", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(`min = - 1 , max = 200`), + } + "#); +} + +fn test(input: &str) -> Meta { + let attrs = Attribute::parse_outer.parse_str(input).unwrap(); + + assert_eq!(attrs.len(), 1); + let attr = attrs.into_iter().next().unwrap(); + + attr.meta +} diff --git a/vendor/syn/tests/test_derive_input.rs b/vendor/syn/tests/test_derive_input.rs new file mode 100644 index 00000000000000..790e2792adb3a7 --- /dev/null +++ b/vendor/syn/tests/test_derive_input.rs @@ -0,0 +1,785 @@ +#![allow( + clippy::assertions_on_result_states, + clippy::elidable_lifetime_names, + clippy::manual_let_else, + clippy::needless_lifetimes, + clippy::too_many_lines, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use quote::quote; +use syn::{Data, DeriveInput}; + +#[test] +fn test_unit() { + let input = quote! { + struct Unit; + }; + + snapshot!(input as DeriveInput, @r#" + DeriveInput { + vis: Visibility::Inherited, + ident: "Unit", + generics: Generics, + data: Data::Struct { + fields: Fields::Unit, + semi_token: Some, + }, + } + "#); +} + +#[test] +fn test_struct() { + let input = quote! { + #[derive(Debug, Clone)] + pub struct Item { + pub ident: Ident, + pub attrs: Vec<Attribute> + } + }; + + snapshot!(input as DeriveInput, @r#" + DeriveInput { + attrs: [ + Attribute { + style: AttrStyle::Outer, + meta: Meta::List { + path: Path { + segments: [ + PathSegment { + ident: "derive", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(`Debug , Clone`), + }, + }, + ], + vis: Visibility::Public, + ident: "Item", + generics: Generics, + data: Data::Struct { + fields: Fields::Named { + named: [ + Field { + vis: Visibility::Public, + ident: Some("ident"), + colon_token: Some, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Ident", + }, + ], + }, + }, + }, + Token![,], + Field { + vis: Visibility::Public, + ident: Some("attrs"), + colon_token: Some, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Vec", + arguments: PathArguments::AngleBracketed { + args: [ + GenericArgument::Type(Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Attribute", + }, + ], + }, + }), + ], + }, + }, + ], + }, + }, + }, + ], + }, + }, + } + "#); + + snapshot!(&input.attrs[0].meta, @r#" + Meta::List { + path: Path { + segments: [ + PathSegment { + ident: "derive", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(`Debug , Clone`), + } + "#); +} + +#[test] +fn test_union() { + let input = quote! { + union MaybeUninit<T> { + uninit: (), + value: T + } + }; + + snapshot!(input as DeriveInput, @r#" + DeriveInput { + vis: Visibility::Inherited, + ident: "MaybeUninit", + generics: Generics { + lt_token: Some, + params: [ + GenericParam::Type(TypeParam { + ident: "T", + }), + ], + gt_token: Some, + }, + data: Data::Union { + fields: FieldsNamed { + named: [ + Field { + vis: Visibility::Inherited, + ident: Some("uninit"), + colon_token: Some, + ty: Type::Tuple, + }, + Token![,], + Field { + vis: Visibility::Inherited, + ident: Some("value"), + colon_token: Some, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "T", + }, + ], + }, + }, + }, + ], + }, + }, + } + "#); +} + +#[test] +#[cfg(feature = "full")] +fn test_enum() { + let input = quote! { + /// See the std::result module documentation for details. + #[must_use] + pub enum Result<T, E> { + Ok(T), + Err(E), + Surprise = 0isize, + + // Smuggling data into a proc_macro_derive, + // in the style of https://github.com/dtolnay/proc-macro-hack + ProcMacroHack = (0, "data").0 + } + }; + + snapshot!(input as DeriveInput, @r#" + DeriveInput { + attrs: [ + Attribute { + style: AttrStyle::Outer, + meta: Meta::NameValue { + path: Path { + segments: [ + PathSegment { + ident: "doc", + }, + ], + }, + value: Expr::Lit { + lit: " See the std::result module documentation for details.", + }, + }, + }, + Attribute { + style: AttrStyle::Outer, + meta: Meta::Path { + segments: [ + PathSegment { + ident: "must_use", + }, + ], + }, + }, + ], + vis: Visibility::Public, + ident: "Result", + generics: Generics { + lt_token: Some, + params: [ + GenericParam::Type(TypeParam { + ident: "T", + }), + Token![,], + GenericParam::Type(TypeParam { + ident: "E", + }), + ], + gt_token: Some, + }, + data: Data::Enum { + variants: [ + Variant { + ident: "Ok", + fields: Fields::Unnamed { + unnamed: [ + Field { + vis: Visibility::Inherited, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "T", + }, + ], + }, + }, + }, + ], + }, + }, + Token![,], + Variant { + ident: "Err", + fields: Fields::Unnamed { + unnamed: [ + Field { + vis: Visibility::Inherited, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "E", + }, + ], + }, + }, + }, + ], + }, + }, + Token![,], + Variant { + ident: "Surprise", + fields: Fields::Unit, + discriminant: Some(Expr::Lit { + lit: 0isize, + }), + }, + Token![,], + Variant { + ident: "ProcMacroHack", + fields: Fields::Unit, + discriminant: Some(Expr::Field { + base: Expr::Tuple { + elems: [ + Expr::Lit { + lit: 0, + }, + Token![,], + Expr::Lit { + lit: "data", + }, + ], + }, + member: Member::Unnamed(Index { + index: 0, + }), + }), + }, + ], + }, + } + "#); + + let meta_items: Vec<_> = input.attrs.into_iter().map(|attr| attr.meta).collect(); + + snapshot!(meta_items, @r#" + [ + Meta::NameValue { + path: Path { + segments: [ + PathSegment { + ident: "doc", + }, + ], + }, + value: Expr::Lit { + lit: " See the std::result module documentation for details.", + }, + }, + Meta::Path { + segments: [ + PathSegment { + ident: "must_use", + }, + ], + }, + ] + "#); +} + +#[test] +fn test_attr_with_non_mod_style_path() { + let input = quote! { + #[inert <T>] + struct S; + }; + + syn::parse2::<DeriveInput>(input).unwrap_err(); +} + +#[test] +fn test_attr_with_mod_style_path_with_self() { + let input = quote! { + #[foo::self] + struct S; + }; + + snapshot!(input as DeriveInput, @r#" + DeriveInput { + attrs: [ + Attribute { + style: AttrStyle::Outer, + meta: Meta::Path { + segments: [ + PathSegment { + ident: "foo", + }, + Token![::], + PathSegment { + ident: "self", + }, + ], + }, + }, + ], + vis: Visibility::Inherited, + ident: "S", + generics: Generics, + data: Data::Struct { + fields: Fields::Unit, + semi_token: Some, + }, + } + "#); + + snapshot!(&input.attrs[0].meta, @r#" + Meta::Path { + segments: [ + PathSegment { + ident: "foo", + }, + Token![::], + PathSegment { + ident: "self", + }, + ], + } + "#); +} + +#[test] +fn test_pub_restricted() { + // Taken from tests/rust/src/test/ui/resolve/auxiliary/privacy-struct-ctor.rs + let input = quote! { + pub(in m) struct Z(pub(in m::n) u8); + }; + + snapshot!(input as DeriveInput, @r#" + DeriveInput { + vis: Visibility::Restricted { + in_token: Some, + path: Path { + segments: [ + PathSegment { + ident: "m", + }, + ], + }, + }, + ident: "Z", + generics: Generics, + data: Data::Struct { + fields: Fields::Unnamed { + unnamed: [ + Field { + vis: Visibility::Restricted { + in_token: Some, + path: Path { + segments: [ + PathSegment { + ident: "m", + }, + Token![::], + PathSegment { + ident: "n", + }, + ], + }, + }, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "u8", + }, + ], + }, + }, + }, + ], + }, + semi_token: Some, + }, + } + "#); +} + +#[test] +fn test_pub_restricted_crate() { + let input = quote! { + pub(crate) struct S; + }; + + snapshot!(input as DeriveInput, @r#" + DeriveInput { + vis: Visibility::Restricted { + path: Path { + segments: [ + PathSegment { + ident: "crate", + }, + ], + }, + }, + ident: "S", + generics: Generics, + data: Data::Struct { + fields: Fields::Unit, + semi_token: Some, + }, + } + "#); +} + +#[test] +fn test_pub_restricted_super() { + let input = quote! { + pub(super) struct S; + }; + + snapshot!(input as DeriveInput, @r#" + DeriveInput { + vis: Visibility::Restricted { + path: Path { + segments: [ + PathSegment { + ident: "super", + }, + ], + }, + }, + ident: "S", + generics: Generics, + data: Data::Struct { + fields: Fields::Unit, + semi_token: Some, + }, + } + "#); +} + +#[test] +fn test_pub_restricted_in_super() { + let input = quote! { + pub(in super) struct S; + }; + + snapshot!(input as DeriveInput, @r#" + DeriveInput { + vis: Visibility::Restricted { + in_token: Some, + path: Path { + segments: [ + PathSegment { + ident: "super", + }, + ], + }, + }, + ident: "S", + generics: Generics, + data: Data::Struct { + fields: Fields::Unit, + semi_token: Some, + }, + } + "#); +} + +#[test] +fn test_fields_on_unit_struct() { + let input = quote! { + struct S; + }; + + snapshot!(input as DeriveInput, @r#" + DeriveInput { + vis: Visibility::Inherited, + ident: "S", + generics: Generics, + data: Data::Struct { + fields: Fields::Unit, + semi_token: Some, + }, + } + "#); + + let data = match input.data { + Data::Struct(data) => data, + _ => panic!("expected a struct"), + }; + + assert_eq!(0, data.fields.iter().count()); +} + +#[test] +fn test_fields_on_named_struct() { + let input = quote! { + struct S { + foo: i32, + pub bar: String, + } + }; + + snapshot!(input as DeriveInput, @r#" + DeriveInput { + vis: Visibility::Inherited, + ident: "S", + generics: Generics, + data: Data::Struct { + fields: Fields::Named { + named: [ + Field { + vis: Visibility::Inherited, + ident: Some("foo"), + colon_token: Some, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "i32", + }, + ], + }, + }, + }, + Token![,], + Field { + vis: Visibility::Public, + ident: Some("bar"), + colon_token: Some, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "String", + }, + ], + }, + }, + }, + Token![,], + ], + }, + }, + } + "#); + + let data = match input.data { + Data::Struct(data) => data, + _ => panic!("expected a struct"), + }; + + snapshot!(data.fields.into_iter().collect::<Vec<_>>(), @r#" + [ + Field { + vis: Visibility::Inherited, + ident: Some("foo"), + colon_token: Some, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "i32", + }, + ], + }, + }, + }, + Field { + vis: Visibility::Public, + ident: Some("bar"), + colon_token: Some, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "String", + }, + ], + }, + }, + }, + ] + "#); +} + +#[test] +fn test_fields_on_tuple_struct() { + let input = quote! { + struct S(i32, pub String); + }; + + snapshot!(input as DeriveInput, @r#" + DeriveInput { + vis: Visibility::Inherited, + ident: "S", + generics: Generics, + data: Data::Struct { + fields: Fields::Unnamed { + unnamed: [ + Field { + vis: Visibility::Inherited, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "i32", + }, + ], + }, + }, + }, + Token![,], + Field { + vis: Visibility::Public, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "String", + }, + ], + }, + }, + }, + ], + }, + semi_token: Some, + }, + } + "#); + + let data = match input.data { + Data::Struct(data) => data, + _ => panic!("expected a struct"), + }; + + snapshot!(data.fields.iter().collect::<Vec<_>>(), @r#" + [ + Field { + vis: Visibility::Inherited, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "i32", + }, + ], + }, + }, + }, + Field { + vis: Visibility::Public, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "String", + }, + ], + }, + }, + }, + ] + "#); +} + +#[test] +fn test_ambiguous_crate() { + let input = quote! { + // The field type is `(crate::X)` not `crate (::X)`. + struct S(crate::X); + }; + + snapshot!(input as DeriveInput, @r#" + DeriveInput { + vis: Visibility::Inherited, + ident: "S", + generics: Generics, + data: Data::Struct { + fields: Fields::Unnamed { + unnamed: [ + Field { + vis: Visibility::Inherited, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "crate", + }, + Token![::], + PathSegment { + ident: "X", + }, + ], + }, + }, + }, + ], + }, + semi_token: Some, + }, + } + "#); +} diff --git a/vendor/syn/tests/test_expr.rs b/vendor/syn/tests/test_expr.rs new file mode 100644 index 00000000000000..e21373cf96d84c --- /dev/null +++ b/vendor/syn/tests/test_expr.rs @@ -0,0 +1,1702 @@ +#![cfg(not(miri))] +#![recursion_limit = "1024"] +#![feature(rustc_private)] +#![allow( + clippy::elidable_lifetime_names, + clippy::match_like_matches_macro, + clippy::needless_lifetimes, + clippy::single_element_loop, + clippy::too_many_lines, + clippy::uninlined_format_args, + clippy::unreadable_literal +)] + +#[macro_use] +mod macros; +#[macro_use] +mod snapshot; + +mod common; +mod debug; + +use crate::common::visit::{AsIfPrinted, FlattenParens}; +use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream}; +use quote::{quote, ToTokens as _}; +use std::process::ExitCode; +use syn::punctuated::Punctuated; +use syn::visit_mut::VisitMut as _; +use syn::{ + parse_quote, token, AngleBracketedGenericArguments, Arm, BinOp, Block, Expr, ExprArray, + ExprAssign, ExprAsync, ExprAwait, ExprBinary, ExprBlock, ExprBreak, ExprCall, ExprCast, + ExprClosure, ExprConst, ExprContinue, ExprField, ExprForLoop, ExprIf, ExprIndex, ExprLet, + ExprLit, ExprLoop, ExprMacro, ExprMatch, ExprMethodCall, ExprPath, ExprRange, ExprRawAddr, + ExprReference, ExprReturn, ExprStruct, ExprTry, ExprTryBlock, ExprTuple, ExprUnary, ExprUnsafe, + ExprWhile, ExprYield, GenericArgument, Label, Lifetime, Lit, LitInt, Macro, MacroDelimiter, + Member, Pat, PatWild, Path, PathArguments, PathSegment, PointerMutability, QSelf, RangeLimits, + ReturnType, Stmt, Token, Type, TypePath, UnOp, +}; + +#[test] +fn test_expr_parse() { + let tokens = quote!(..100u32); + snapshot!(tokens as Expr, @r#" + Expr::Range { + limits: RangeLimits::HalfOpen, + end: Some(Expr::Lit { + lit: 100u32, + }), + } + "#); + + let tokens = quote!(..100u32); + snapshot!(tokens as ExprRange, @r#" + ExprRange { + limits: RangeLimits::HalfOpen, + end: Some(Expr::Lit { + lit: 100u32, + }), + } + "#); +} + +#[test] +fn test_await() { + // Must not parse as Expr::Field. + let tokens = quote!(fut.await); + + snapshot!(tokens as Expr, @r#" + Expr::Await { + base: Expr::Path { + path: Path { + segments: [ + PathSegment { + ident: "fut", + }, + ], + }, + }, + } + "#); +} + +#[rustfmt::skip] +#[test] +fn test_tuple_multi_index() { + let expected = snapshot!("tuple.0.0" as Expr, @r#" + Expr::Field { + base: Expr::Field { + base: Expr::Path { + path: Path { + segments: [ + PathSegment { + ident: "tuple", + }, + ], + }, + }, + member: Member::Unnamed(Index { + index: 0, + }), + }, + member: Member::Unnamed(Index { + index: 0, + }), + } + "#); + + for &input in &[ + "tuple .0.0", + "tuple. 0.0", + "tuple.0 .0", + "tuple.0. 0", + "tuple . 0 . 0", + ] { + assert_eq!(expected, syn::parse_str(input).unwrap()); + } + + for tokens in [ + quote!(tuple.0.0), + quote!(tuple .0.0), + quote!(tuple. 0.0), + quote!(tuple.0 .0), + quote!(tuple.0. 0), + quote!(tuple . 0 . 0), + ] { + assert_eq!(expected, syn::parse2(tokens).unwrap()); + } +} + +#[test] +fn test_macro_variable_func() { + // mimics the token stream corresponding to `$fn()` + let path = Group::new(Delimiter::None, quote!(f)); + let tokens = quote!(#path()); + + snapshot!(tokens as Expr, @r#" + Expr::Call { + func: Expr::Group { + expr: Expr::Path { + path: Path { + segments: [ + PathSegment { + ident: "f", + }, + ], + }, + }, + }, + } + "#); + + let path = Group::new(Delimiter::None, quote! { #[inside] f }); + let tokens = quote!(#[outside] #path()); + + snapshot!(tokens as Expr, @r#" + Expr::Call { + attrs: [ + Attribute { + style: AttrStyle::Outer, + meta: Meta::Path { + segments: [ + PathSegment { + ident: "outside", + }, + ], + }, + }, + ], + func: Expr::Group { + expr: Expr::Path { + attrs: [ + Attribute { + style: AttrStyle::Outer, + meta: Meta::Path { + segments: [ + PathSegment { + ident: "inside", + }, + ], + }, + }, + ], + path: Path { + segments: [ + PathSegment { + ident: "f", + }, + ], + }, + }, + }, + } + "#); +} + +#[test] +fn test_macro_variable_macro() { + // mimics the token stream corresponding to `$macro!()` + let mac = Group::new(Delimiter::None, quote!(m)); + let tokens = quote!(#mac!()); + + snapshot!(tokens as Expr, @r#" + Expr::Macro { + mac: Macro { + path: Path { + segments: [ + PathSegment { + ident: "m", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(``), + }, + } + "#); +} + +#[test] +fn test_macro_variable_struct() { + // mimics the token stream corresponding to `$struct {}` + let s = Group::new(Delimiter::None, quote! { S }); + let tokens = quote!(#s {}); + + snapshot!(tokens as Expr, @r#" + Expr::Struct { + path: Path { + segments: [ + PathSegment { + ident: "S", + }, + ], + }, + } + "#); +} + +#[test] +fn test_macro_variable_unary() { + // mimics the token stream corresponding to `$expr.method()` where expr is `&self` + let inner = Group::new(Delimiter::None, quote!(&self)); + let tokens = quote!(#inner.method()); + snapshot!(tokens as Expr, @r#" + Expr::MethodCall { + receiver: Expr::Group { + expr: Expr::Reference { + expr: Expr::Path { + path: Path { + segments: [ + PathSegment { + ident: "self", + }, + ], + }, + }, + }, + }, + method: "method", + } + "#); +} + +#[test] +fn test_macro_variable_match_arm() { + // mimics the token stream corresponding to `match v { _ => $expr }` + let expr = Group::new(Delimiter::None, quote! { #[a] () }); + let tokens = quote!(match v { _ => #expr }); + snapshot!(tokens as Expr, @r#" + Expr::Match { + expr: Expr::Path { + path: Path { + segments: [ + PathSegment { + ident: "v", + }, + ], + }, + }, + arms: [ + Arm { + pat: Pat::Wild, + body: Expr::Group { + expr: Expr::Tuple { + attrs: [ + Attribute { + style: AttrStyle::Outer, + meta: Meta::Path { + segments: [ + PathSegment { + ident: "a", + }, + ], + }, + }, + ], + }, + }, + }, + ], + } + "#); + + let expr = Group::new(Delimiter::None, quote!(loop {} + 1)); + let tokens = quote!(match v { _ => #expr }); + snapshot!(tokens as Expr, @r#" + Expr::Match { + expr: Expr::Path { + path: Path { + segments: [ + PathSegment { + ident: "v", + }, + ], + }, + }, + arms: [ + Arm { + pat: Pat::Wild, + body: Expr::Group { + expr: Expr::Binary { + left: Expr::Loop { + body: Block { + stmts: [], + }, + }, + op: BinOp::Add, + right: Expr::Lit { + lit: 1, + }, + }, + }, + }, + ], + } + "#); +} + +// https://github.com/dtolnay/syn/issues/1019 +#[test] +fn test_closure_vs_rangefull() { + #[rustfmt::skip] // rustfmt bug: https://github.com/rust-lang/rustfmt/issues/4808 + let tokens = quote!(|| .. .method()); + snapshot!(tokens as Expr, @r#" + Expr::MethodCall { + receiver: Expr::Closure { + output: ReturnType::Default, + body: Expr::Range { + limits: RangeLimits::HalfOpen, + }, + }, + method: "method", + } + "#); +} + +#[test] +fn test_postfix_operator_after_cast() { + syn::parse_str::<Expr>("|| &x as T[0]").unwrap_err(); + syn::parse_str::<Expr>("|| () as ()()").unwrap_err(); +} + +#[test] +fn test_range_kinds() { + syn::parse_str::<Expr>("..").unwrap(); + syn::parse_str::<Expr>("..hi").unwrap(); + syn::parse_str::<Expr>("lo..").unwrap(); + syn::parse_str::<Expr>("lo..hi").unwrap(); + + syn::parse_str::<Expr>("..=").unwrap_err(); + syn::parse_str::<Expr>("..=hi").unwrap(); + syn::parse_str::<Expr>("lo..=").unwrap_err(); + syn::parse_str::<Expr>("lo..=hi").unwrap(); + + syn::parse_str::<Expr>("...").unwrap_err(); + syn::parse_str::<Expr>("...hi").unwrap_err(); + syn::parse_str::<Expr>("lo...").unwrap_err(); + syn::parse_str::<Expr>("lo...hi").unwrap_err(); +} + +#[test] +fn test_range_precedence() { + snapshot!(".. .." as Expr, @r#" + Expr::Range { + limits: RangeLimits::HalfOpen, + end: Some(Expr::Range { + limits: RangeLimits::HalfOpen, + }), + } + "#); + + snapshot!(".. .. ()" as Expr, @r#" + Expr::Range { + limits: RangeLimits::HalfOpen, + end: Some(Expr::Range { + limits: RangeLimits::HalfOpen, + end: Some(Expr::Tuple), + }), + } + "#); + + snapshot!("() .. .." as Expr, @r#" + Expr::Range { + start: Some(Expr::Tuple), + limits: RangeLimits::HalfOpen, + end: Some(Expr::Range { + limits: RangeLimits::HalfOpen, + }), + } + "#); + + snapshot!("() = .. + ()" as Expr, @r" + Expr::Binary { + left: Expr::Assign { + left: Expr::Tuple, + right: Expr::Range { + limits: RangeLimits::HalfOpen, + }, + }, + op: BinOp::Add, + right: Expr::Tuple, + } + "); + + // A range with a lower bound cannot be the upper bound of another range, + // and a range with an upper bound cannot be the lower bound of another + // range. + syn::parse_str::<Expr>(".. x ..").unwrap_err(); + syn::parse_str::<Expr>("x .. x ..").unwrap_err(); +} + +#[test] +fn test_range_attrs() { + // Attributes are not allowed on range expressions starting with `..` + syn::parse_str::<Expr>("#[allow()] ..").unwrap_err(); + syn::parse_str::<Expr>("#[allow()] .. hi").unwrap_err(); + + snapshot!("#[allow()] lo .. hi" as Expr, @r#" + Expr::Range { + start: Some(Expr::Path { + attrs: [ + Attribute { + style: AttrStyle::Outer, + meta: Meta::List { + path: Path { + segments: [ + PathSegment { + ident: "allow", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(``), + }, + }, + ], + path: Path { + segments: [ + PathSegment { + ident: "lo", + }, + ], + }, + }), + limits: RangeLimits::HalfOpen, + end: Some(Expr::Path { + path: Path { + segments: [ + PathSegment { + ident: "hi", + }, + ], + }, + }), + } + "#); +} + +#[test] +fn test_ranges_bailout() { + syn::parse_str::<Expr>(".. ?").unwrap_err(); + syn::parse_str::<Expr>(".. .field").unwrap_err(); + + snapshot!("return .. ?" as Expr, @r" + Expr::Try { + expr: Expr::Return { + expr: Some(Expr::Range { + limits: RangeLimits::HalfOpen, + }), + }, + } + "); + + snapshot!("break .. ?" as Expr, @r" + Expr::Try { + expr: Expr::Break { + expr: Some(Expr::Range { + limits: RangeLimits::HalfOpen, + }), + }, + } + "); + + snapshot!("|| .. ?" as Expr, @r" + Expr::Try { + expr: Expr::Closure { + output: ReturnType::Default, + body: Expr::Range { + limits: RangeLimits::HalfOpen, + }, + }, + } + "); + + snapshot!("return .. .field" as Expr, @r#" + Expr::Field { + base: Expr::Return { + expr: Some(Expr::Range { + limits: RangeLimits::HalfOpen, + }), + }, + member: Member::Named("field"), + } + "#); + + snapshot!("break .. .field" as Expr, @r#" + Expr::Field { + base: Expr::Break { + expr: Some(Expr::Range { + limits: RangeLimits::HalfOpen, + }), + }, + member: Member::Named("field"), + } + "#); + + snapshot!("|| .. .field" as Expr, @r#" + Expr::Field { + base: Expr::Closure { + output: ReturnType::Default, + body: Expr::Range { + limits: RangeLimits::HalfOpen, + }, + }, + member: Member::Named("field"), + } + "#); + + snapshot!("return .. = ()" as Expr, @r" + Expr::Assign { + left: Expr::Return { + expr: Some(Expr::Range { + limits: RangeLimits::HalfOpen, + }), + }, + right: Expr::Tuple, + } + "); + + snapshot!("return .. += ()" as Expr, @r" + Expr::Binary { + left: Expr::Return { + expr: Some(Expr::Range { + limits: RangeLimits::HalfOpen, + }), + }, + op: BinOp::AddAssign, + right: Expr::Tuple, + } + "); +} + +#[test] +fn test_ambiguous_label() { + for stmt in [ + quote! { + return 'label: loop { break 'label 42; }; + }, + quote! { + break ('label: loop { break 'label 42; }); + }, + quote! { + break 1 + 'label: loop { break 'label 42; }; + }, + quote! { + break 'outer 'inner: loop { break 'inner 42; }; + }, + ] { + syn::parse2::<Stmt>(stmt).unwrap(); + } + + for stmt in [ + // Parentheses required. See https://github.com/rust-lang/rust/pull/87026. + quote! { + break 'label: loop { break 'label 42; }; + }, + ] { + syn::parse2::<Stmt>(stmt).unwrap_err(); + } +} + +#[test] +fn test_extended_interpolated_path() { + let path = Group::new(Delimiter::None, quote!(a::b)); + + let tokens = quote!(if #path {}); + snapshot!(tokens as Expr, @r#" + Expr::If { + cond: Expr::Group { + expr: Expr::Path { + path: Path { + segments: [ + PathSegment { + ident: "a", + }, + Token![::], + PathSegment { + ident: "b", + }, + ], + }, + }, + }, + then_branch: Block { + stmts: [], + }, + } + "#); + + let tokens = quote!(#path {}); + snapshot!(tokens as Expr, @r#" + Expr::Struct { + path: Path { + segments: [ + PathSegment { + ident: "a", + }, + Token![::], + PathSegment { + ident: "b", + }, + ], + }, + } + "#); + + let tokens = quote!(#path :: c); + snapshot!(tokens as Expr, @r#" + Expr::Path { + path: Path { + segments: [ + PathSegment { + ident: "a", + }, + Token![::], + PathSegment { + ident: "b", + }, + Token![::], + PathSegment { + ident: "c", + }, + ], + }, + } + "#); + + let nested = Group::new(Delimiter::None, quote!(a::b || true)); + let tokens = quote!(if #nested && false {}); + snapshot!(tokens as Expr, @r#" + Expr::If { + cond: Expr::Binary { + left: Expr::Group { + expr: Expr::Binary { + left: Expr::Path { + path: Path { + segments: [ + PathSegment { + ident: "a", + }, + Token![::], + PathSegment { + ident: "b", + }, + ], + }, + }, + op: BinOp::Or, + right: Expr::Lit { + lit: Lit::Bool { + value: true, + }, + }, + }, + }, + op: BinOp::And, + right: Expr::Lit { + lit: Lit::Bool { + value: false, + }, + }, + }, + then_branch: Block { + stmts: [], + }, + } + "#); +} + +#[test] +fn test_tuple_comma() { + let mut expr = ExprTuple { + attrs: Vec::new(), + paren_token: token::Paren::default(), + elems: Punctuated::new(), + }; + snapshot!(expr.to_token_stream() as Expr, @"Expr::Tuple"); + + expr.elems.push_value(parse_quote!(continue)); + // Must not parse to Expr::Paren + snapshot!(expr.to_token_stream() as Expr, @r#" + Expr::Tuple { + elems: [ + Expr::Continue, + Token![,], + ], + } + "#); + + expr.elems.push_punct(<Token![,]>::default()); + snapshot!(expr.to_token_stream() as Expr, @r#" + Expr::Tuple { + elems: [ + Expr::Continue, + Token![,], + ], + } + "#); + + expr.elems.push_value(parse_quote!(continue)); + snapshot!(expr.to_token_stream() as Expr, @r#" + Expr::Tuple { + elems: [ + Expr::Continue, + Token![,], + Expr::Continue, + ], + } + "#); + + expr.elems.push_punct(<Token![,]>::default()); + snapshot!(expr.to_token_stream() as Expr, @r#" + Expr::Tuple { + elems: [ + Expr::Continue, + Token![,], + Expr::Continue, + Token![,], + ], + } + "#); +} + +#[test] +fn test_binop_associativity() { + // Left to right. + snapshot!("() + () + ()" as Expr, @r#" + Expr::Binary { + left: Expr::Binary { + left: Expr::Tuple, + op: BinOp::Add, + right: Expr::Tuple, + }, + op: BinOp::Add, + right: Expr::Tuple, + } + "#); + + // Right to left. + snapshot!("() += () += ()" as Expr, @r#" + Expr::Binary { + left: Expr::Tuple, + op: BinOp::AddAssign, + right: Expr::Binary { + left: Expr::Tuple, + op: BinOp::AddAssign, + right: Expr::Tuple, + }, + } + "#); + + // Parenthesization is required. + syn::parse_str::<Expr>("() == () == ()").unwrap_err(); +} + +#[test] +fn test_assign_range_precedence() { + // Range has higher precedence as the right-hand of an assignment, but + // ambiguous precedence as the left-hand of an assignment. + snapshot!("() = () .. ()" as Expr, @r#" + Expr::Assign { + left: Expr::Tuple, + right: Expr::Range { + start: Some(Expr::Tuple), + limits: RangeLimits::HalfOpen, + end: Some(Expr::Tuple), + }, + } + "#); + + snapshot!("() += () .. ()" as Expr, @r#" + Expr::Binary { + left: Expr::Tuple, + op: BinOp::AddAssign, + right: Expr::Range { + start: Some(Expr::Tuple), + limits: RangeLimits::HalfOpen, + end: Some(Expr::Tuple), + }, + } + "#); + + syn::parse_str::<Expr>("() .. () = ()").unwrap_err(); + syn::parse_str::<Expr>("() .. () += ()").unwrap_err(); +} + +#[test] +fn test_chained_comparison() { + // https://github.com/dtolnay/syn/issues/1738 + let _ = syn::parse_str::<Expr>("a = a < a <"); + let _ = syn::parse_str::<Expr>("a = a .. a .."); + let _ = syn::parse_str::<Expr>("a = a .. a +="); + + let err = syn::parse_str::<Expr>("a < a < a").unwrap_err(); + assert_eq!("comparison operators cannot be chained", err.to_string()); + + let err = syn::parse_str::<Expr>("a .. a .. a").unwrap_err(); + assert_eq!("unexpected token", err.to_string()); + + let err = syn::parse_str::<Expr>("a .. a += a").unwrap_err(); + assert_eq!("unexpected token", err.to_string()); +} + +#[test] +fn test_fixup() { + for tokens in [ + quote! { 2 * (1 + 1) }, + quote! { 0 + (0 + 0) }, + quote! { (a = b) = c }, + quote! { (x as i32) < 0 }, + quote! { 1 + (x as i32) < 0 }, + quote! { (1 + 1).abs() }, + quote! { (lo..hi)[..] }, + quote! { (a..b)..(c..d) }, + quote! { (x > ..) > x }, + quote! { (&mut fut).await }, + quote! { &mut (x as i32) }, + quote! { -(x as i32) }, + quote! { if (S {}) == 1 {} }, + quote! { { (m! {}) - 1 } }, + quote! { match m { _ => ({}) - 1 } }, + quote! { if let _ = (a && b) && c {} }, + quote! { if let _ = (S {}) {} }, + quote! { if (S {}) == 0 && let Some(_) = x {} }, + quote! { break ('a: loop { break 'a 1 } + 1) }, + quote! { a + (|| b) + c }, + quote! { if let _ = ((break) - 1 || true) {} }, + quote! { if let _ = (break + 1 || true) {} }, + quote! { if break (break) {} }, + quote! { if break break {} {} }, + quote! { if return (..) {} }, + quote! { if return .. {} {} }, + quote! { if || (Struct {}) {} }, + quote! { if || (Struct {}).await {} }, + quote! { if break || Struct {}.await {} }, + quote! { if break 'outer 'block: {} {} }, + quote! { if ..'block: {} {} }, + quote! { if break ({}).await {} }, + quote! { (break)() }, + quote! { (..) = () }, + quote! { (..) += () }, + quote! { (1 < 2) == (3 < 4) }, + quote! { { (let _ = ()) } }, + quote! { (#[attr] thing).field }, + quote! { #[attr] (1 + 1) }, + quote! { #[attr] (x = 1) }, + quote! { #[attr] (x += 1) }, + quote! { #[attr] (1 as T) }, + quote! { (return #[attr] (x + ..)).field }, + quote! { (self.f)() }, + quote! { (return)..=return }, + quote! { 1 + (return)..=1 + return }, + quote! { .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. }, + ] { + let original: Expr = syn::parse2(tokens).unwrap(); + + let mut flat = original.clone(); + FlattenParens::combine_attrs().visit_expr_mut(&mut flat); + let reconstructed: Expr = match syn::parse2(flat.to_token_stream()) { + Ok(reconstructed) => reconstructed, + Err(err) => panic!("failed to parse `{}`: {}", flat.to_token_stream(), err), + }; + + assert!( + original == reconstructed, + "original: {}\n{:#?}\nreconstructed: {}\n{:#?}", + original.to_token_stream(), + crate::debug::Lite(&original), + reconstructed.to_token_stream(), + crate::debug::Lite(&reconstructed), + ); + } +} + +#[test] +fn test_permutations() -> ExitCode { + fn iter(depth: usize, f: &mut dyn FnMut(Expr)) { + let span = Span::call_site(); + + // Expr::Path + f(Expr::Path(ExprPath { + // `x` + attrs: Vec::new(), + qself: None, + path: Path::from(Ident::new("x", span)), + })); + if false { + f(Expr::Path(ExprPath { + // `x::<T>` + attrs: Vec::new(), + qself: None, + path: Path { + leading_colon: None, + segments: Punctuated::from_iter([PathSegment { + ident: Ident::new("x", span), + arguments: PathArguments::AngleBracketed(AngleBracketedGenericArguments { + colon2_token: Some(Token![::](span)), + lt_token: Token![<](span), + args: Punctuated::from_iter([GenericArgument::Type(Type::Path( + TypePath { + qself: None, + path: Path::from(Ident::new("T", span)), + }, + ))]), + gt_token: Token![>](span), + }), + }]), + }, + })); + f(Expr::Path(ExprPath { + // `<T as Trait>::CONST` + attrs: Vec::new(), + qself: Some(QSelf { + lt_token: Token![<](span), + ty: Box::new(Type::Path(TypePath { + qself: None, + path: Path::from(Ident::new("T", span)), + })), + position: 1, + as_token: Some(Token![as](span)), + gt_token: Token![>](span), + }), + path: Path { + leading_colon: None, + segments: Punctuated::from_iter([ + PathSegment::from(Ident::new("Trait", span)), + PathSegment::from(Ident::new("CONST", span)), + ]), + }, + })); + } + + let Some(depth) = depth.checked_sub(1) else { + return; + }; + + // Expr::Assign + iter(depth, &mut |expr| { + iter(0, &mut |simple| { + f(Expr::Assign(ExprAssign { + // `x = $expr` + attrs: Vec::new(), + left: Box::new(simple.clone()), + eq_token: Token![=](span), + right: Box::new(expr.clone()), + })); + f(Expr::Assign(ExprAssign { + // `$expr = x` + attrs: Vec::new(), + left: Box::new(expr.clone()), + eq_token: Token![=](span), + right: Box::new(simple), + })); + }); + }); + + // Expr::Binary + iter(depth, &mut |expr| { + iter(0, &mut |simple| { + for op in [ + BinOp::Add(Token![+](span)), + //BinOp::Sub(Token![-](span)), + //BinOp::Mul(Token![*](span)), + //BinOp::Div(Token![/](span)), + //BinOp::Rem(Token![%](span)), + //BinOp::And(Token![&&](span)), + //BinOp::Or(Token![||](span)), + //BinOp::BitXor(Token![^](span)), + //BinOp::BitAnd(Token![&](span)), + //BinOp::BitOr(Token![|](span)), + //BinOp::Shl(Token![<<](span)), + //BinOp::Shr(Token![>>](span)), + //BinOp::Eq(Token![==](span)), + BinOp::Lt(Token![<](span)), + //BinOp::Le(Token![<=](span)), + //BinOp::Ne(Token![!=](span)), + //BinOp::Ge(Token![>=](span)), + //BinOp::Gt(Token![>](span)), + BinOp::ShlAssign(Token![<<=](span)), + ] { + f(Expr::Binary(ExprBinary { + // `x + $expr` + attrs: Vec::new(), + left: Box::new(simple.clone()), + op, + right: Box::new(expr.clone()), + })); + f(Expr::Binary(ExprBinary { + // `$expr + x` + attrs: Vec::new(), + left: Box::new(expr.clone()), + op, + right: Box::new(simple.clone()), + })); + } + }); + }); + + // Expr::Block + f(Expr::Block(ExprBlock { + // `{}` + attrs: Vec::new(), + label: None, + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + + // Expr::Break + f(Expr::Break(ExprBreak { + // `break` + attrs: Vec::new(), + break_token: Token![break](span), + label: None, + expr: None, + })); + iter(depth, &mut |expr| { + f(Expr::Break(ExprBreak { + // `break $expr` + attrs: Vec::new(), + break_token: Token![break](span), + label: None, + expr: Some(Box::new(expr)), + })); + }); + + // Expr::Call + iter(depth, &mut |expr| { + f(Expr::Call(ExprCall { + // `$expr()` + attrs: Vec::new(), + func: Box::new(expr), + paren_token: token::Paren(span), + args: Punctuated::new(), + })); + }); + + // Expr::Cast + iter(depth, &mut |expr| { + f(Expr::Cast(ExprCast { + // `$expr as T` + attrs: Vec::new(), + expr: Box::new(expr), + as_token: Token![as](span), + ty: Box::new(Type::Path(TypePath { + qself: None, + path: Path::from(Ident::new("T", span)), + })), + })); + }); + + // Expr::Closure + iter(depth, &mut |expr| { + f(Expr::Closure(ExprClosure { + // `|| $expr` + attrs: Vec::new(), + lifetimes: None, + constness: None, + movability: None, + asyncness: None, + capture: None, + or1_token: Token![|](span), + inputs: Punctuated::new(), + or2_token: Token![|](span), + output: ReturnType::Default, + body: Box::new(expr), + })); + }); + + // Expr::Field + iter(depth, &mut |expr| { + f(Expr::Field(ExprField { + // `$expr.field` + attrs: Vec::new(), + base: Box::new(expr), + dot_token: Token![.](span), + member: Member::Named(Ident::new("field", span)), + })); + }); + + // Expr::If + iter(depth, &mut |expr| { + f(Expr::If(ExprIf { + // `if $expr {}` + attrs: Vec::new(), + if_token: Token![if](span), + cond: Box::new(expr), + then_branch: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + else_branch: None, + })); + }); + + // Expr::Let + iter(depth, &mut |expr| { + f(Expr::Let(ExprLet { + attrs: Vec::new(), + let_token: Token![let](span), + pat: Box::new(Pat::Wild(PatWild { + attrs: Vec::new(), + underscore_token: Token![_](span), + })), + eq_token: Token![=](span), + expr: Box::new(expr), + })); + }); + + // Expr::Range + f(Expr::Range(ExprRange { + // `..` + attrs: Vec::new(), + start: None, + limits: RangeLimits::HalfOpen(Token![..](span)), + end: None, + })); + iter(depth, &mut |expr| { + f(Expr::Range(ExprRange { + // `..$expr` + attrs: Vec::new(), + start: None, + limits: RangeLimits::HalfOpen(Token![..](span)), + end: Some(Box::new(expr.clone())), + })); + f(Expr::Range(ExprRange { + // `$expr..` + attrs: Vec::new(), + start: Some(Box::new(expr)), + limits: RangeLimits::HalfOpen(Token![..](span)), + end: None, + })); + }); + + // Expr::Reference + iter(depth, &mut |expr| { + f(Expr::Reference(ExprReference { + // `&$expr` + attrs: Vec::new(), + and_token: Token![&](span), + mutability: None, + expr: Box::new(expr), + })); + }); + + // Expr::Return + f(Expr::Return(ExprReturn { + // `return` + attrs: Vec::new(), + return_token: Token![return](span), + expr: None, + })); + iter(depth, &mut |expr| { + f(Expr::Return(ExprReturn { + // `return $expr` + attrs: Vec::new(), + return_token: Token![return](span), + expr: Some(Box::new(expr)), + })); + }); + + // Expr::Try + iter(depth, &mut |expr| { + f(Expr::Try(ExprTry { + // `$expr?` + attrs: Vec::new(), + expr: Box::new(expr), + question_token: Token![?](span), + })); + }); + + // Expr::Unary + iter(depth, &mut |expr| { + for op in [ + UnOp::Deref(Token![*](span)), + //UnOp::Not(Token![!](span)), + //UnOp::Neg(Token![-](span)), + ] { + f(Expr::Unary(ExprUnary { + // `*$expr` + attrs: Vec::new(), + op, + expr: Box::new(expr.clone()), + })); + } + }); + + if false { + // Expr::Array + f(Expr::Array(ExprArray { + // `[]` + attrs: Vec::new(), + bracket_token: token::Bracket(span), + elems: Punctuated::new(), + })); + + // Expr::Async + f(Expr::Async(ExprAsync { + // `async {}` + attrs: Vec::new(), + async_token: Token![async](span), + capture: None, + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + + // Expr::Await + iter(depth, &mut |expr| { + f(Expr::Await(ExprAwait { + // `$expr.await` + attrs: Vec::new(), + base: Box::new(expr), + dot_token: Token![.](span), + await_token: Token![await](span), + })); + }); + + // Expr::Block + f(Expr::Block(ExprBlock { + // `'a: {}` + attrs: Vec::new(), + label: Some(Label { + name: Lifetime::new("'a", span), + colon_token: Token![:](span), + }), + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + iter(depth, &mut |expr| { + f(Expr::Block(ExprBlock { + // `{ $expr }` + attrs: Vec::new(), + label: None, + block: Block { + brace_token: token::Brace(span), + stmts: Vec::from([Stmt::Expr(expr.clone(), None)]), + }, + })); + f(Expr::Block(ExprBlock { + // `{ $expr; }` + attrs: Vec::new(), + label: None, + block: Block { + brace_token: token::Brace(span), + stmts: Vec::from([Stmt::Expr(expr, Some(Token![;](span)))]), + }, + })); + }); + + // Expr::Break + f(Expr::Break(ExprBreak { + // `break 'a` + attrs: Vec::new(), + break_token: Token![break](span), + label: Some(Lifetime::new("'a", span)), + expr: None, + })); + iter(depth, &mut |expr| { + f(Expr::Break(ExprBreak { + // `break 'a $expr` + attrs: Vec::new(), + break_token: Token![break](span), + label: Some(Lifetime::new("'a", span)), + expr: Some(Box::new(expr)), + })); + }); + + // Expr::Closure + f(Expr::Closure(ExprClosure { + // `|| -> T {}` + attrs: Vec::new(), + lifetimes: None, + constness: None, + movability: None, + asyncness: None, + capture: None, + or1_token: Token![|](span), + inputs: Punctuated::new(), + or2_token: Token![|](span), + output: ReturnType::Type( + Token![->](span), + Box::new(Type::Path(TypePath { + qself: None, + path: Path::from(Ident::new("T", span)), + })), + ), + body: Box::new(Expr::Block(ExprBlock { + attrs: Vec::new(), + label: None, + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })), + })); + + // Expr::Const + f(Expr::Const(ExprConst { + // `const {}` + attrs: Vec::new(), + const_token: Token![const](span), + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + + // Expr::Continue + f(Expr::Continue(ExprContinue { + // `continue` + attrs: Vec::new(), + continue_token: Token![continue](span), + label: None, + })); + f(Expr::Continue(ExprContinue { + // `continue 'a` + attrs: Vec::new(), + continue_token: Token![continue](span), + label: Some(Lifetime::new("'a", span)), + })); + + // Expr::ForLoop + iter(depth, &mut |expr| { + f(Expr::ForLoop(ExprForLoop { + // `for _ in $expr {}` + attrs: Vec::new(), + label: None, + for_token: Token![for](span), + pat: Box::new(Pat::Wild(PatWild { + attrs: Vec::new(), + underscore_token: Token![_](span), + })), + in_token: Token![in](span), + expr: Box::new(expr.clone()), + body: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + f(Expr::ForLoop(ExprForLoop { + // `'a: for _ in $expr {}` + attrs: Vec::new(), + label: Some(Label { + name: Lifetime::new("'a", span), + colon_token: Token![:](span), + }), + for_token: Token![for](span), + pat: Box::new(Pat::Wild(PatWild { + attrs: Vec::new(), + underscore_token: Token![_](span), + })), + in_token: Token![in](span), + expr: Box::new(expr), + body: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + }); + + // Expr::Index + iter(depth, &mut |expr| { + f(Expr::Index(ExprIndex { + // `$expr[0]` + attrs: Vec::new(), + expr: Box::new(expr), + bracket_token: token::Bracket(span), + index: Box::new(Expr::Lit(ExprLit { + attrs: Vec::new(), + lit: Lit::Int(LitInt::new("0", span)), + })), + })); + }); + + // Expr::Loop + f(Expr::Loop(ExprLoop { + // `loop {}` + attrs: Vec::new(), + label: None, + loop_token: Token![loop](span), + body: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + f(Expr::Loop(ExprLoop { + // `'a: loop {}` + attrs: Vec::new(), + label: Some(Label { + name: Lifetime::new("'a", span), + colon_token: Token![:](span), + }), + loop_token: Token![loop](span), + body: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + + // Expr::Macro + f(Expr::Macro(ExprMacro { + // `m!()` + attrs: Vec::new(), + mac: Macro { + path: Path::from(Ident::new("m", span)), + bang_token: Token![!](span), + delimiter: MacroDelimiter::Paren(token::Paren(span)), + tokens: TokenStream::new(), + }, + })); + f(Expr::Macro(ExprMacro { + // `m! {}` + attrs: Vec::new(), + mac: Macro { + path: Path::from(Ident::new("m", span)), + bang_token: Token![!](span), + delimiter: MacroDelimiter::Brace(token::Brace(span)), + tokens: TokenStream::new(), + }, + })); + + // Expr::Match + iter(depth, &mut |expr| { + f(Expr::Match(ExprMatch { + // `match $expr {}` + attrs: Vec::new(), + match_token: Token![match](span), + expr: Box::new(expr.clone()), + brace_token: token::Brace(span), + arms: Vec::new(), + })); + f(Expr::Match(ExprMatch { + // `match x { _ => $expr }` + attrs: Vec::new(), + match_token: Token![match](span), + expr: Box::new(Expr::Path(ExprPath { + attrs: Vec::new(), + qself: None, + path: Path::from(Ident::new("x", span)), + })), + brace_token: token::Brace(span), + arms: Vec::from([Arm { + attrs: Vec::new(), + pat: Pat::Wild(PatWild { + attrs: Vec::new(), + underscore_token: Token![_](span), + }), + guard: None, + fat_arrow_token: Token![=>](span), + body: Box::new(expr.clone()), + comma: None, + }]), + })); + f(Expr::Match(ExprMatch { + // `match x { _ if $expr => {} }` + attrs: Vec::new(), + match_token: Token![match](span), + expr: Box::new(Expr::Path(ExprPath { + attrs: Vec::new(), + qself: None, + path: Path::from(Ident::new("x", span)), + })), + brace_token: token::Brace(span), + arms: Vec::from([Arm { + attrs: Vec::new(), + pat: Pat::Wild(PatWild { + attrs: Vec::new(), + underscore_token: Token![_](span), + }), + guard: Some((Token![if](span), Box::new(expr))), + fat_arrow_token: Token![=>](span), + body: Box::new(Expr::Block(ExprBlock { + attrs: Vec::new(), + label: None, + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })), + comma: None, + }]), + })); + }); + + // Expr::MethodCall + iter(depth, &mut |expr| { + f(Expr::MethodCall(ExprMethodCall { + // `$expr.method()` + attrs: Vec::new(), + receiver: Box::new(expr.clone()), + dot_token: Token![.](span), + method: Ident::new("method", span), + turbofish: None, + paren_token: token::Paren(span), + args: Punctuated::new(), + })); + f(Expr::MethodCall(ExprMethodCall { + // `$expr.method::<T>()` + attrs: Vec::new(), + receiver: Box::new(expr), + dot_token: Token![.](span), + method: Ident::new("method", span), + turbofish: Some(AngleBracketedGenericArguments { + colon2_token: Some(Token![::](span)), + lt_token: Token![<](span), + args: Punctuated::from_iter([GenericArgument::Type(Type::Path( + TypePath { + qself: None, + path: Path::from(Ident::new("T", span)), + }, + ))]), + gt_token: Token![>](span), + }), + paren_token: token::Paren(span), + args: Punctuated::new(), + })); + }); + + // Expr::RawAddr + iter(depth, &mut |expr| { + f(Expr::RawAddr(ExprRawAddr { + // `&raw const $expr` + attrs: Vec::new(), + and_token: Token![&](span), + raw: Token![raw](span), + mutability: PointerMutability::Const(Token![const](span)), + expr: Box::new(expr), + })); + }); + + // Expr::Struct + f(Expr::Struct(ExprStruct { + // `Struct {}` + attrs: Vec::new(), + qself: None, + path: Path::from(Ident::new("Struct", span)), + brace_token: token::Brace(span), + fields: Punctuated::new(), + dot2_token: None, + rest: None, + })); + + // Expr::TryBlock + f(Expr::TryBlock(ExprTryBlock { + // `try {}` + attrs: Vec::new(), + try_token: Token![try](span), + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + + // Expr::Unsafe + f(Expr::Unsafe(ExprUnsafe { + // `unsafe {}` + attrs: Vec::new(), + unsafe_token: Token![unsafe](span), + block: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + + // Expr::While + iter(depth, &mut |expr| { + f(Expr::While(ExprWhile { + // `while $expr {}` + attrs: Vec::new(), + label: None, + while_token: Token![while](span), + cond: Box::new(expr.clone()), + body: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + f(Expr::While(ExprWhile { + // `'a: while $expr {}` + attrs: Vec::new(), + label: Some(Label { + name: Lifetime::new("'a", span), + colon_token: Token![:](span), + }), + while_token: Token![while](span), + cond: Box::new(expr), + body: Block { + brace_token: token::Brace(span), + stmts: Vec::new(), + }, + })); + }); + + // Expr::Yield + f(Expr::Yield(ExprYield { + // `yield` + attrs: Vec::new(), + yield_token: Token![yield](span), + expr: None, + })); + iter(depth, &mut |expr| { + f(Expr::Yield(ExprYield { + // `yield $expr` + attrs: Vec::new(), + yield_token: Token![yield](span), + expr: Some(Box::new(expr)), + })); + }); + } + } + + let mut failures = 0; + macro_rules! fail { + ($($message:tt)*) => {{ + eprintln!($($message)*); + failures += 1; + return; + }}; + } + let mut assert = |mut original: Expr| { + let tokens = original.to_token_stream(); + let Ok(mut parsed) = syn::parse2::<Expr>(tokens.clone()) else { + fail!( + "failed to parse: {}\n{:#?}", + tokens, + crate::debug::Lite(&original), + ); + }; + AsIfPrinted.visit_expr_mut(&mut original); + FlattenParens::combine_attrs().visit_expr_mut(&mut parsed); + if original != parsed { + fail!( + "before: {}\n{:#?}\nafter: {}\n{:#?}", + tokens, + crate::debug::Lite(&original), + parsed.to_token_stream(), + crate::debug::Lite(&parsed), + ); + } + let mut tokens_no_paren = tokens.clone(); + FlattenParens::visit_token_stream_mut(&mut tokens_no_paren); + if tokens.to_string() != tokens_no_paren.to_string() { + if let Ok(mut parsed2) = syn::parse2::<Expr>(tokens_no_paren) { + FlattenParens::combine_attrs().visit_expr_mut(&mut parsed2); + if original == parsed2 { + fail!("redundant parens: {}", tokens); + } + } + } + }; + + iter(4, &mut assert); + if failures > 0 { + eprintln!("FAILURES: {failures}"); + ExitCode::FAILURE + } else { + ExitCode::SUCCESS + } +} diff --git a/vendor/syn/tests/test_generics.rs b/vendor/syn/tests/test_generics.rs new file mode 100644 index 00000000000000..2cb05251c16e8c --- /dev/null +++ b/vendor/syn/tests/test_generics.rs @@ -0,0 +1,345 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::manual_let_else, + clippy::needless_lifetimes, + clippy::too_many_lines, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use quote::quote; +use syn::{ + parse_quote, DeriveInput, GenericParam, Generics, ItemFn, Lifetime, LifetimeParam, + TypeParamBound, WhereClause, WherePredicate, +}; + +#[test] +fn test_split_for_impl() { + let input = quote! { + struct S<'a, 'b: 'a, #[may_dangle] T: 'a = ()> where T: Debug; + }; + + snapshot!(input as DeriveInput, @r#" + DeriveInput { + vis: Visibility::Inherited, + ident: "S", + generics: Generics { + lt_token: Some, + params: [ + GenericParam::Lifetime(LifetimeParam { + lifetime: Lifetime { + ident: "a", + }, + }), + Token![,], + GenericParam::Lifetime(LifetimeParam { + lifetime: Lifetime { + ident: "b", + }, + colon_token: Some, + bounds: [ + Lifetime { + ident: "a", + }, + ], + }), + Token![,], + GenericParam::Type(TypeParam { + attrs: [ + Attribute { + style: AttrStyle::Outer, + meta: Meta::Path { + segments: [ + PathSegment { + ident: "may_dangle", + }, + ], + }, + }, + ], + ident: "T", + colon_token: Some, + bounds: [ + TypeParamBound::Lifetime { + ident: "a", + }, + ], + eq_token: Some, + default: Some(Type::Tuple), + }), + ], + gt_token: Some, + where_clause: Some(WhereClause { + predicates: [ + WherePredicate::Type(PredicateType { + bounded_ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "T", + }, + ], + }, + }, + bounds: [ + TypeParamBound::Trait(TraitBound { + path: Path { + segments: [ + PathSegment { + ident: "Debug", + }, + ], + }, + }), + ], + }), + ], + }), + }, + data: Data::Struct { + fields: Fields::Unit, + semi_token: Some, + }, + } + "#); + + let generics = input.generics; + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + let generated = quote! { + impl #impl_generics MyTrait for Test #ty_generics #where_clause {} + }; + let expected = quote! { + impl<'a, 'b: 'a, #[may_dangle] T: 'a> MyTrait + for Test<'a, 'b, T> + where + T: Debug + {} + }; + assert_eq!(generated.to_string(), expected.to_string()); + + let turbofish = ty_generics.as_turbofish(); + let generated = quote! { + Test #turbofish + }; + let expected = quote! { + Test::<'a, 'b, T> + }; + assert_eq!(generated.to_string(), expected.to_string()); +} + +#[test] +fn test_type_param_bound() { + let tokens = quote!('a); + snapshot!(tokens as TypeParamBound, @r#" + TypeParamBound::Lifetime { + ident: "a", + } + "#); + + let tokens = quote!('_); + snapshot!(tokens as TypeParamBound, @r#" + TypeParamBound::Lifetime { + ident: "_", + } + "#); + + let tokens = quote!(Debug); + snapshot!(tokens as TypeParamBound, @r#" + TypeParamBound::Trait(TraitBound { + path: Path { + segments: [ + PathSegment { + ident: "Debug", + }, + ], + }, + }) + "#); + + let tokens = quote!(?Sized); + snapshot!(tokens as TypeParamBound, @r#" + TypeParamBound::Trait(TraitBound { + modifier: TraitBoundModifier::Maybe, + path: Path { + segments: [ + PathSegment { + ident: "Sized", + }, + ], + }, + }) + "#); + + let tokens = quote!(for<'a> Trait); + snapshot!(tokens as TypeParamBound, @r#" + TypeParamBound::Trait(TraitBound { + lifetimes: Some(BoundLifetimes { + lifetimes: [ + GenericParam::Lifetime(LifetimeParam { + lifetime: Lifetime { + ident: "a", + }, + }), + ], + }), + path: Path { + segments: [ + PathSegment { + ident: "Trait", + }, + ], + }, + }) + "#); + + let tokens = quote!(for<> ?Trait); + let err = syn::parse2::<TypeParamBound>(tokens).unwrap_err(); + assert_eq!( + "`for<...>` binder not allowed with `?` trait polarity modifier", + err.to_string(), + ); + + let tokens = quote!(?for<> Trait); + let err = syn::parse2::<TypeParamBound>(tokens).unwrap_err(); + assert_eq!( + "`for<...>` binder not allowed with `?` trait polarity modifier", + err.to_string(), + ); +} + +#[test] +fn test_fn_precedence_in_where_clause() { + // This should parse as two separate bounds, `FnOnce() -> i32` and `Send` - not + // `FnOnce() -> (i32 + Send)`. + let input = quote! { + fn f<G>() + where + G: FnOnce() -> i32 + Send, + { + } + }; + + snapshot!(input as ItemFn, @r#" + ItemFn { + vis: Visibility::Inherited, + sig: Signature { + ident: "f", + generics: Generics { + lt_token: Some, + params: [ + GenericParam::Type(TypeParam { + ident: "G", + }), + ], + gt_token: Some, + where_clause: Some(WhereClause { + predicates: [ + WherePredicate::Type(PredicateType { + bounded_ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "G", + }, + ], + }, + }, + bounds: [ + TypeParamBound::Trait(TraitBound { + path: Path { + segments: [ + PathSegment { + ident: "FnOnce", + arguments: PathArguments::Parenthesized { + output: ReturnType::Type( + Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "i32", + }, + ], + }, + }, + ), + }, + }, + ], + }, + }), + Token![+], + TypeParamBound::Trait(TraitBound { + path: Path { + segments: [ + PathSegment { + ident: "Send", + }, + ], + }, + }), + ], + }), + Token![,], + ], + }), + }, + output: ReturnType::Default, + }, + block: Block { + stmts: [], + }, + } + "#); + + let where_clause = input.sig.generics.where_clause.as_ref().unwrap(); + assert_eq!(where_clause.predicates.len(), 1); + + let predicate = match &where_clause.predicates[0] { + WherePredicate::Type(pred) => pred, + _ => panic!("wrong predicate kind"), + }; + + assert_eq!(predicate.bounds.len(), 2, "{:#?}", predicate.bounds); + + let first_bound = &predicate.bounds[0]; + assert_eq!(quote!(#first_bound).to_string(), "FnOnce () -> i32"); + + let second_bound = &predicate.bounds[1]; + assert_eq!(quote!(#second_bound).to_string(), "Send"); +} + +#[test] +fn test_where_clause_at_end_of_input() { + let input = quote! { + where + }; + + snapshot!(input as WhereClause, @"WhereClause"); + + assert_eq!(input.predicates.len(), 0); +} + +// Regression test for https://github.com/dtolnay/syn/issues/1718 +#[test] +#[allow(clippy::map_unwrap_or)] +fn no_opaque_drop() { + let mut generics = Generics::default(); + + let _ = generics + .lifetimes() + .next() + .map(|param| param.lifetime.clone()) + .unwrap_or_else(|| { + let lifetime: Lifetime = parse_quote!('a); + generics.params.insert( + 0, + GenericParam::Lifetime(LifetimeParam::new(lifetime.clone())), + ); + lifetime + }); +} diff --git a/vendor/syn/tests/test_grouping.rs b/vendor/syn/tests/test_grouping.rs new file mode 100644 index 00000000000000..b466c7e7217e09 --- /dev/null +++ b/vendor/syn/tests/test_grouping.rs @@ -0,0 +1,59 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use proc_macro2::{Delimiter, Group, Literal, Punct, Spacing, TokenStream, TokenTree}; +use syn::Expr; + +#[test] +fn test_grouping() { + let tokens: TokenStream = TokenStream::from_iter([ + TokenTree::Literal(Literal::i32_suffixed(1)), + TokenTree::Punct(Punct::new('+', Spacing::Alone)), + TokenTree::Group(Group::new( + Delimiter::None, + TokenStream::from_iter([ + TokenTree::Literal(Literal::i32_suffixed(2)), + TokenTree::Punct(Punct::new('+', Spacing::Alone)), + TokenTree::Literal(Literal::i32_suffixed(3)), + ]), + )), + TokenTree::Punct(Punct::new('*', Spacing::Alone)), + TokenTree::Literal(Literal::i32_suffixed(4)), + ]); + + assert_eq!(tokens.to_string(), "1i32 + 2i32 + 3i32 * 4i32"); + + snapshot!(tokens as Expr, @r#" + Expr::Binary { + left: Expr::Lit { + lit: 1i32, + }, + op: BinOp::Add, + right: Expr::Binary { + left: Expr::Group { + expr: Expr::Binary { + left: Expr::Lit { + lit: 2i32, + }, + op: BinOp::Add, + right: Expr::Lit { + lit: 3i32, + }, + }, + }, + op: BinOp::Mul, + right: Expr::Lit { + lit: 4i32, + }, + }, + } + "#); +} diff --git a/vendor/syn/tests/test_ident.rs b/vendor/syn/tests/test_ident.rs new file mode 100644 index 00000000000000..10df0ad56c2ad6 --- /dev/null +++ b/vendor/syn/tests/test_ident.rs @@ -0,0 +1,87 @@ +use proc_macro2::{Ident, Span, TokenStream}; +use std::str::FromStr; +use syn::Result; + +#[track_caller] +fn parse(s: &str) -> Result<Ident> { + syn::parse2(TokenStream::from_str(s).unwrap()) +} + +#[track_caller] +fn new(s: &str) -> Ident { + Ident::new(s, Span::call_site()) +} + +#[test] +fn ident_parse() { + parse("String").unwrap(); +} + +#[test] +fn ident_parse_keyword() { + parse("abstract").unwrap_err(); +} + +#[test] +fn ident_parse_empty() { + parse("").unwrap_err(); +} + +#[test] +fn ident_parse_lifetime() { + parse("'static").unwrap_err(); +} + +#[test] +fn ident_parse_underscore() { + parse("_").unwrap_err(); +} + +#[test] +fn ident_parse_number() { + parse("255").unwrap_err(); +} + +#[test] +fn ident_parse_invalid() { + parse("a#").unwrap_err(); +} + +#[test] +fn ident_new() { + new("String"); +} + +#[test] +fn ident_new_keyword() { + new("abstract"); +} + +#[test] +#[should_panic(expected = "use Option<Ident>")] +fn ident_new_empty() { + new(""); +} + +#[test] +#[should_panic(expected = "not a valid Ident")] +fn ident_new_lifetime() { + new("'static"); +} + +#[test] +fn ident_new_underscore() { + new("_"); +} + +#[test] +#[should_panic(expected = "use Literal instead")] +fn ident_new_number() { + new("255"); +} + +#[test] +#[should_panic(expected = "\"a#\" is not a valid Ident")] +fn ident_new_invalid() { + new("a#"); +} diff --git a/vendor/syn/tests/test_item.rs b/vendor/syn/tests/test_item.rs new file mode 100644 index 00000000000000..d9a7b5b6b08b60 --- /dev/null +++ b/vendor/syn/tests/test_item.rs @@ -0,0 +1,316 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream, TokenTree}; +use quote::quote; +use syn::{Item, ItemTrait}; + +#[test] +fn test_macro_variable_attr() { + // mimics the token stream corresponding to `$attr fn f() {}` + let tokens = TokenStream::from_iter([ + TokenTree::Group(Group::new(Delimiter::None, quote! { #[test] })), + TokenTree::Ident(Ident::new("fn", Span::call_site())), + TokenTree::Ident(Ident::new("f", Span::call_site())), + TokenTree::Group(Group::new(Delimiter::Parenthesis, TokenStream::new())), + TokenTree::Group(Group::new(Delimiter::Brace, TokenStream::new())), + ]); + + snapshot!(tokens as Item, @r#" + Item::Fn { + attrs: [ + Attribute { + style: AttrStyle::Outer, + meta: Meta::Path { + segments: [ + PathSegment { + ident: "test", + }, + ], + }, + }, + ], + vis: Visibility::Inherited, + sig: Signature { + ident: "f", + generics: Generics, + output: ReturnType::Default, + }, + block: Block { + stmts: [], + }, + } + "#); +} + +#[test] +fn test_negative_impl() { + #[cfg(any())] + impl ! {} + let tokens = quote! { + impl ! {} + }; + snapshot!(tokens as Item, @r#" + Item::Impl { + generics: Generics, + self_ty: Type::Never, + } + "#); + + let tokens = quote! { + impl !Trait {} + }; + let err = syn::parse2::<Item>(tokens).unwrap_err(); + assert_eq!(err.to_string(), "inherent impls cannot be negative"); + + #[cfg(any())] + impl !Trait for T {} + let tokens = quote! { + impl !Trait for T {} + }; + snapshot!(tokens as Item, @r#" + Item::Impl { + generics: Generics, + trait_: Some(( + Some, + Path { + segments: [ + PathSegment { + ident: "Trait", + }, + ], + }, + )), + self_ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "T", + }, + ], + }, + }, + } + "#); +} + +#[test] +fn test_macro_variable_impl() { + // mimics the token stream corresponding to `impl $trait for $ty {}` + let tokens = TokenStream::from_iter([ + TokenTree::Ident(Ident::new("impl", Span::call_site())), + TokenTree::Group(Group::new(Delimiter::None, quote!(Trait))), + TokenTree::Ident(Ident::new("for", Span::call_site())), + TokenTree::Group(Group::new(Delimiter::None, quote!(Type))), + TokenTree::Group(Group::new(Delimiter::Brace, TokenStream::new())), + ]); + + snapshot!(tokens as Item, @r#" + Item::Impl { + generics: Generics, + trait_: Some(( + None, + Path { + segments: [ + PathSegment { + ident: "Trait", + }, + ], + }, + )), + self_ty: Type::Group { + elem: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Type", + }, + ], + }, + }, + }, + } + "#); +} + +#[test] +fn test_supertraits() { + // Rustc parses all of the following. + + #[rustfmt::skip] + let tokens = quote!(trait Trait where {}); + snapshot!(tokens as ItemTrait, @r#" + ItemTrait { + vis: Visibility::Inherited, + ident: "Trait", + generics: Generics { + where_clause: Some(WhereClause), + }, + } + "#); + + #[rustfmt::skip] + let tokens = quote!(trait Trait: where {}); + snapshot!(tokens as ItemTrait, @r#" + ItemTrait { + vis: Visibility::Inherited, + ident: "Trait", + generics: Generics { + where_clause: Some(WhereClause), + }, + colon_token: Some, + } + "#); + + #[rustfmt::skip] + let tokens = quote!(trait Trait: Sized where {}); + snapshot!(tokens as ItemTrait, @r#" + ItemTrait { + vis: Visibility::Inherited, + ident: "Trait", + generics: Generics { + where_clause: Some(WhereClause), + }, + colon_token: Some, + supertraits: [ + TypeParamBound::Trait(TraitBound { + path: Path { + segments: [ + PathSegment { + ident: "Sized", + }, + ], + }, + }), + ], + } + "#); + + #[rustfmt::skip] + let tokens = quote!(trait Trait: Sized + where {}); + snapshot!(tokens as ItemTrait, @r#" + ItemTrait { + vis: Visibility::Inherited, + ident: "Trait", + generics: Generics { + where_clause: Some(WhereClause), + }, + colon_token: Some, + supertraits: [ + TypeParamBound::Trait(TraitBound { + path: Path { + segments: [ + PathSegment { + ident: "Sized", + }, + ], + }, + }), + Token![+], + ], + } + "#); +} + +#[test] +fn test_type_empty_bounds() { + #[rustfmt::skip] + let tokens = quote! { + trait Foo { + type Bar: ; + } + }; + + snapshot!(tokens as ItemTrait, @r#" + ItemTrait { + vis: Visibility::Inherited, + ident: "Foo", + generics: Generics, + items: [ + TraitItem::Type { + ident: "Bar", + generics: Generics, + colon_token: Some, + }, + ], + } + "#); +} + +#[test] +fn test_impl_visibility() { + let tokens = quote! { + pub default unsafe impl union {} + }; + + snapshot!(tokens as Item, @"Item::Verbatim(`pub default unsafe impl union { }`)"); +} + +#[test] +fn test_impl_type_parameter_defaults() { + #[cfg(any())] + impl<T = ()> () {} + let tokens = quote! { + impl<T = ()> () {} + }; + snapshot!(tokens as Item, @r#" + Item::Impl { + generics: Generics { + lt_token: Some, + params: [ + GenericParam::Type(TypeParam { + ident: "T", + eq_token: Some, + default: Some(Type::Tuple), + }), + ], + gt_token: Some, + }, + self_ty: Type::Tuple, + } + "#); +} + +#[test] +fn test_impl_trait_trailing_plus() { + let tokens = quote! { + fn f() -> impl Sized + {} + }; + + snapshot!(tokens as Item, @r#" + Item::Fn { + vis: Visibility::Inherited, + sig: Signature { + ident: "f", + generics: Generics, + output: ReturnType::Type( + Type::ImplTrait { + bounds: [ + TypeParamBound::Trait(TraitBound { + path: Path { + segments: [ + PathSegment { + ident: "Sized", + }, + ], + }, + }), + Token![+], + ], + }, + ), + }, + block: Block { + stmts: [], + }, + } + "#); +} diff --git a/vendor/syn/tests/test_lit.rs b/vendor/syn/tests/test_lit.rs new file mode 100644 index 00000000000000..f2367b44165daf --- /dev/null +++ b/vendor/syn/tests/test_lit.rs @@ -0,0 +1,335 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::float_cmp, + clippy::needless_lifetimes, + clippy::needless_raw_string_hashes, + clippy::non_ascii_literal, + clippy::single_match_else, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use proc_macro2::{Delimiter, Group, Literal, Span, TokenStream, TokenTree}; +use quote::ToTokens; +use std::ffi::CStr; +use std::str::FromStr; +use syn::{Lit, LitFloat, LitInt, LitStr}; + +#[track_caller] +fn lit(s: &str) -> Lit { + let mut tokens = TokenStream::from_str(s).unwrap().into_iter(); + match tokens.next().unwrap() { + TokenTree::Literal(lit) => { + assert!(tokens.next().is_none()); + Lit::new(lit) + } + wrong => panic!("{:?}", wrong), + } +} + +#[test] +fn strings() { + #[track_caller] + fn test_string(s: &str, value: &str) { + let s = s.trim(); + match lit(s) { + Lit::Str(lit) => { + assert_eq!(lit.value(), value); + let again = lit.into_token_stream().to_string(); + if again != s { + test_string(&again, value); + } + } + wrong => panic!("{:?}", wrong), + } + } + + test_string(r#" "" "#, ""); + test_string(r#" "a" "#, "a"); + test_string(r#" "\n" "#, "\n"); + test_string(r#" "\r" "#, "\r"); + test_string(r#" "\t" "#, "\t"); + test_string(r#" "🐕" "#, "🐕"); // NOTE: This is an emoji + test_string(r#" "\"" "#, "\""); + test_string(r#" "'" "#, "'"); + test_string(r#" "\u{1F415}" "#, "\u{1F415}"); + test_string(r#" "\u{1_2__3_}" "#, "\u{123}"); + test_string( + "\"contains\nnewlines\\\nescaped newlines\"", + "contains\nnewlinesescaped newlines", + ); + test_string( + "\"escaped newline\\\n \x0C unsupported whitespace\"", + "escaped newline\x0C unsupported whitespace", + ); + test_string("r\"raw\nstring\\\nhere\"", "raw\nstring\\\nhere"); + test_string("\"...\"q", "..."); + test_string("r\"...\"q", "..."); + test_string("r##\"...\"##q", "..."); +} + +#[test] +fn byte_strings() { + #[track_caller] + fn test_byte_string(s: &str, value: &[u8]) { + let s = s.trim(); + match lit(s) { + Lit::ByteStr(lit) => { + assert_eq!(lit.value(), value); + let again = lit.into_token_stream().to_string(); + if again != s { + test_byte_string(&again, value); + } + } + wrong => panic!("{:?}", wrong), + } + } + + test_byte_string(r#" b"" "#, b""); + test_byte_string(r#" b"a" "#, b"a"); + test_byte_string(r#" b"\n" "#, b"\n"); + test_byte_string(r#" b"\r" "#, b"\r"); + test_byte_string(r#" b"\t" "#, b"\t"); + test_byte_string(r#" b"\"" "#, b"\""); + test_byte_string(r#" b"'" "#, b"'"); + test_byte_string( + "b\"contains\nnewlines\\\nescaped newlines\"", + b"contains\nnewlinesescaped newlines", + ); + test_byte_string("br\"raw\nstring\\\nhere\"", b"raw\nstring\\\nhere"); + test_byte_string("b\"...\"q", b"..."); + test_byte_string("br\"...\"q", b"..."); + test_byte_string("br##\"...\"##q", b"..."); +} + +#[test] +fn c_strings() { + #[track_caller] + fn test_c_string(s: &str, value: &CStr) { + let s = s.trim(); + match lit(s) { + Lit::CStr(lit) => { + assert_eq!(*lit.value(), *value); + let again = lit.into_token_stream().to_string(); + if again != s { + test_c_string(&again, value); + } + } + wrong => panic!("{:?}", wrong), + } + } + + test_c_string(r#" c"" "#, c""); + test_c_string(r#" c"a" "#, c"a"); + test_c_string(r#" c"\n" "#, c"\n"); + test_c_string(r#" c"\r" "#, c"\r"); + test_c_string(r#" c"\t" "#, c"\t"); + test_c_string(r#" c"\\" "#, c"\\"); + test_c_string(r#" c"\'" "#, c"'"); + test_c_string(r#" c"\"" "#, c"\""); + test_c_string( + "c\"contains\nnewlines\\\nescaped newlines\"", + c"contains\nnewlinesescaped newlines", + ); + test_c_string("cr\"raw\nstring\\\nhere\"", c"raw\nstring\\\nhere"); + test_c_string("c\"...\"q", c"..."); + test_c_string("cr\"...\"", c"..."); + test_c_string("cr##\"...\"##", c"..."); + test_c_string( + r#" c"hello\x80我叫\u{1F980}" "#, // from the RFC + c"hello\x80我叫\u{1F980}", + ); +} + +#[test] +fn bytes() { + #[track_caller] + fn test_byte(s: &str, value: u8) { + let s = s.trim(); + match lit(s) { + Lit::Byte(lit) => { + assert_eq!(lit.value(), value); + let again = lit.into_token_stream().to_string(); + assert_eq!(again, s); + } + wrong => panic!("{:?}", wrong), + } + } + + test_byte(r#" b'a' "#, b'a'); + test_byte(r#" b'\n' "#, b'\n'); + test_byte(r#" b'\r' "#, b'\r'); + test_byte(r#" b'\t' "#, b'\t'); + test_byte(r#" b'\'' "#, b'\''); + test_byte(r#" b'"' "#, b'"'); + test_byte(r#" b'a'q "#, b'a'); +} + +#[test] +fn chars() { + #[track_caller] + fn test_char(s: &str, value: char) { + let s = s.trim(); + match lit(s) { + Lit::Char(lit) => { + assert_eq!(lit.value(), value); + let again = lit.into_token_stream().to_string(); + if again != s { + test_char(&again, value); + } + } + wrong => panic!("{:?}", wrong), + } + } + + test_char(r#" 'a' "#, 'a'); + test_char(r#" '\n' "#, '\n'); + test_char(r#" '\r' "#, '\r'); + test_char(r#" '\t' "#, '\t'); + test_char(r#" '🐕' "#, '🐕'); // NOTE: This is an emoji + test_char(r#" '\'' "#, '\''); + test_char(r#" '"' "#, '"'); + test_char(r#" '\u{1F415}' "#, '\u{1F415}'); + test_char(r#" 'a'q "#, 'a'); +} + +#[test] +fn ints() { + #[track_caller] + fn test_int(s: &str, value: u64, suffix: &str) { + match lit(s) { + Lit::Int(lit) => { + assert_eq!(lit.base10_digits().parse::<u64>().unwrap(), value); + assert_eq!(lit.suffix(), suffix); + let again = lit.into_token_stream().to_string(); + if again != s { + test_int(&again, value, suffix); + } + } + wrong => panic!("{:?}", wrong), + } + } + + test_int("5", 5, ""); + test_int("5u32", 5, "u32"); + test_int("0E", 0, "E"); + test_int("0ECMA", 0, "ECMA"); + test_int("0o0A", 0, "A"); + test_int("5_0", 50, ""); + test_int("5_____0_____", 50, ""); + test_int("0x7f", 127, ""); + test_int("0x7F", 127, ""); + test_int("0b1001", 9, ""); + test_int("0o73", 59, ""); + test_int("0x7Fu8", 127, "u8"); + test_int("0b1001i8", 9, "i8"); + test_int("0o73u32", 59, "u32"); + test_int("0x__7___f_", 127, ""); + test_int("0x__7___F_", 127, ""); + test_int("0b_1_0__01", 9, ""); + test_int("0o_7__3", 59, ""); + test_int("0x_7F__u8", 127, "u8"); + test_int("0b__10__0_1i8", 9, "i8"); + test_int("0o__7__________________3u32", 59, "u32"); + test_int("0e1\u{5c5}", 0, "e1\u{5c5}"); +} + +#[test] +fn floats() { + #[track_caller] + fn test_float(s: &str, value: f64, suffix: &str) { + match lit(s) { + Lit::Float(lit) => { + assert_eq!(lit.base10_digits().parse::<f64>().unwrap(), value); + assert_eq!(lit.suffix(), suffix); + let again = lit.into_token_stream().to_string(); + if again != s { + test_float(&again, value, suffix); + } + } + wrong => panic!("{:?}", wrong), + } + } + + test_float("5.5", 5.5, ""); + test_float("5.5E12", 5.5e12, ""); + test_float("5.5e12", 5.5e12, ""); + test_float("1.0__3e-12", 1.03e-12, ""); + test_float("1.03e+12", 1.03e12, ""); + test_float("9e99e99", 9e99, "e99"); + test_float("1e_0", 1.0, ""); + test_float("0.0ECMA", 0.0, "ECMA"); +} + +#[test] +fn negative() { + let span = Span::call_site(); + assert_eq!("-1", LitInt::new("-1", span).to_string()); + assert_eq!("-1i8", LitInt::new("-1i8", span).to_string()); + assert_eq!("-1i16", LitInt::new("-1i16", span).to_string()); + assert_eq!("-1i32", LitInt::new("-1i32", span).to_string()); + assert_eq!("-1i64", LitInt::new("-1i64", span).to_string()); + assert_eq!("-1.5", LitFloat::new("-1.5", span).to_string()); + assert_eq!("-1.5f32", LitFloat::new("-1.5f32", span).to_string()); + assert_eq!("-1.5f64", LitFloat::new("-1.5f64", span).to_string()); +} + +#[test] +fn suffix() { + #[track_caller] + fn get_suffix(token: &str) -> String { + let lit = syn::parse_str::<Lit>(token).unwrap(); + match lit { + Lit::Str(lit) => lit.suffix().to_owned(), + Lit::ByteStr(lit) => lit.suffix().to_owned(), + Lit::CStr(lit) => lit.suffix().to_owned(), + Lit::Byte(lit) => lit.suffix().to_owned(), + Lit::Char(lit) => lit.suffix().to_owned(), + Lit::Int(lit) => lit.suffix().to_owned(), + Lit::Float(lit) => lit.suffix().to_owned(), + _ => unimplemented!(), + } + } + + assert_eq!(get_suffix("\"\"s"), "s"); + assert_eq!(get_suffix("r\"\"r"), "r"); + assert_eq!(get_suffix("r#\"\"#r"), "r"); + assert_eq!(get_suffix("b\"\"b"), "b"); + assert_eq!(get_suffix("br\"\"br"), "br"); + assert_eq!(get_suffix("br#\"\"#br"), "br"); + assert_eq!(get_suffix("c\"\"c"), "c"); + assert_eq!(get_suffix("cr\"\"cr"), "cr"); + assert_eq!(get_suffix("cr#\"\"#cr"), "cr"); + assert_eq!(get_suffix("'c'c"), "c"); + assert_eq!(get_suffix("b'b'b"), "b"); + assert_eq!(get_suffix("1i32"), "i32"); + assert_eq!(get_suffix("1_i32"), "i32"); + assert_eq!(get_suffix("1.0f32"), "f32"); + assert_eq!(get_suffix("1.0_f32"), "f32"); +} + +#[test] +fn test_deep_group_empty() { + let tokens = TokenStream::from_iter([TokenTree::Group(Group::new( + Delimiter::None, + TokenStream::from_iter([TokenTree::Group(Group::new( + Delimiter::None, + TokenStream::from_iter([TokenTree::Literal(Literal::string("hi"))]), + ))]), + ))]); + + snapshot!(tokens as Lit, @r#""hi""# ); +} + +#[test] +fn test_error() { + let err = syn::parse_str::<LitStr>("...").unwrap_err(); + assert_eq!("expected string literal", err.to_string()); + + let err = syn::parse_str::<LitStr>("5").unwrap_err(); + assert_eq!("expected string literal", err.to_string()); +} diff --git a/vendor/syn/tests/test_meta.rs b/vendor/syn/tests/test_meta.rs new file mode 100644 index 00000000000000..4e1f9caf38e0c1 --- /dev/null +++ b/vendor/syn/tests/test_meta.rs @@ -0,0 +1,180 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::shadow_unrelated, + clippy::too_many_lines, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use quote::quote; +use syn::parse::{ParseStream, Parser as _, Result}; +use syn::{Meta, MetaList, MetaNameValue, Token}; + +#[test] +fn test_parse_meta_item_word() { + let input = "hello"; + + snapshot!(input as Meta, @r#" + Meta::Path { + segments: [ + PathSegment { + ident: "hello", + }, + ], + } + "#); +} + +#[test] +fn test_parse_meta_name_value() { + let input = "foo = 5"; + let (inner, meta) = (input, input); + + snapshot!(inner as MetaNameValue, @r#" + MetaNameValue { + path: Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + }, + value: Expr::Lit { + lit: 5, + }, + } + "#); + + snapshot!(meta as Meta, @r#" + Meta::NameValue { + path: Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + }, + value: Expr::Lit { + lit: 5, + }, + } + "#); + + assert_eq!(meta, Meta::NameValue(inner)); +} + +#[test] +fn test_parse_meta_item_list_lit() { + let input = "foo(5)"; + let (inner, meta) = (input, input); + + snapshot!(inner as MetaList, @r#" + MetaList { + path: Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(`5`), + } + "#); + + snapshot!(meta as Meta, @r#" + Meta::List { + path: Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(`5`), + } + "#); + + assert_eq!(meta, Meta::List(inner)); +} + +#[test] +fn test_parse_meta_item_multiple() { + let input = "foo(word, name = 5, list(name2 = 6), word2)"; + let (inner, meta) = (input, input); + + snapshot!(inner as MetaList, @r#" + MetaList { + path: Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(`word , name = 5 , list (name2 = 6) , word2`), + } + "#); + + snapshot!(meta as Meta, @r#" + Meta::List { + path: Path { + segments: [ + PathSegment { + ident: "foo", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(`word , name = 5 , list (name2 = 6) , word2`), + } + "#); + + assert_eq!(meta, Meta::List(inner)); +} + +#[test] +fn test_parse_path() { + let input = "::serde::Serialize"; + snapshot!(input as Meta, @r#" + Meta::Path { + leading_colon: Some, + segments: [ + PathSegment { + ident: "serde", + }, + Token![::], + PathSegment { + ident: "Serialize", + }, + ], + } + "#); +} + +#[test] +fn test_fat_arrow_after_meta() { + fn parse(input: ParseStream) -> Result<()> { + while !input.is_empty() { + let _: Meta = input.parse()?; + let _: Token![=>] = input.parse()?; + let brace; + syn::braced!(brace in input); + } + Ok(()) + } + + let input = quote! { + target_os = "linux" => {} + windows => {} + }; + + parse.parse2(input).unwrap(); +} diff --git a/vendor/syn/tests/test_parse_buffer.rs b/vendor/syn/tests/test_parse_buffer.rs new file mode 100644 index 00000000000000..62abc6d2825407 --- /dev/null +++ b/vendor/syn/tests/test_parse_buffer.rs @@ -0,0 +1,103 @@ +#![allow(clippy::non_ascii_literal)] + +use proc_macro2::{Delimiter, Group, Ident, Punct, Spacing, TokenStream, TokenTree}; +use std::panic; +use syn::parse::discouraged::Speculative as _; +use syn::parse::{Parse, ParseStream, Parser, Result}; +use syn::{parenthesized, Token}; + +#[test] +#[should_panic(expected = "fork was not derived from the advancing parse stream")] +fn smuggled_speculative_cursor_between_sources() { + struct BreakRules; + impl Parse for BreakRules { + fn parse(input1: ParseStream) -> Result<Self> { + let nested = |input2: ParseStream| { + input1.advance_to(input2); + Ok(Self) + }; + nested.parse_str("") + } + } + + syn::parse_str::<BreakRules>("").unwrap(); +} + +#[test] +#[should_panic(expected = "fork was not derived from the advancing parse stream")] +fn smuggled_speculative_cursor_between_brackets() { + struct BreakRules; + impl Parse for BreakRules { + fn parse(input: ParseStream) -> Result<Self> { + let a; + let b; + parenthesized!(a in input); + parenthesized!(b in input); + a.advance_to(&b); + Ok(Self) + } + } + + syn::parse_str::<BreakRules>("()()").unwrap(); +} + +#[test] +#[should_panic(expected = "fork was not derived from the advancing parse stream")] +fn smuggled_speculative_cursor_into_brackets() { + struct BreakRules; + impl Parse for BreakRules { + fn parse(input: ParseStream) -> Result<Self> { + let a; + parenthesized!(a in input); + input.advance_to(&a); + Ok(Self) + } + } + + syn::parse_str::<BreakRules>("()").unwrap(); +} + +#[test] +fn trailing_empty_none_group() { + fn parse(input: ParseStream) -> Result<()> { + input.parse::<Token![+]>()?; + + let content; + parenthesized!(content in input); + content.parse::<Token![+]>()?; + + Ok(()) + } + + // `+ ( + «∅ ∅» ) «∅ «∅ ∅» ∅»` + let tokens = TokenStream::from_iter([ + TokenTree::Punct(Punct::new('+', Spacing::Alone)), + TokenTree::Group(Group::new( + Delimiter::Parenthesis, + TokenStream::from_iter([ + TokenTree::Punct(Punct::new('+', Spacing::Alone)), + TokenTree::Group(Group::new(Delimiter::None, TokenStream::new())), + ]), + )), + TokenTree::Group(Group::new(Delimiter::None, TokenStream::new())), + TokenTree::Group(Group::new( + Delimiter::None, + TokenStream::from_iter([TokenTree::Group(Group::new( + Delimiter::None, + TokenStream::new(), + ))]), + )), + ]); + + parse.parse2(tokens).unwrap(); +} + +#[test] +fn test_unwind_safe() { + fn parse(input: ParseStream) -> Result<Ident> { + let thread_result = panic::catch_unwind(|| input.parse()); + thread_result.unwrap() + } + + parse.parse_str("throw").unwrap(); +} diff --git a/vendor/syn/tests/test_parse_quote.rs b/vendor/syn/tests/test_parse_quote.rs new file mode 100644 index 00000000000000..600870bab58a43 --- /dev/null +++ b/vendor/syn/tests/test_parse_quote.rs @@ -0,0 +1,172 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use syn::punctuated::Punctuated; +use syn::{parse_quote, Attribute, Field, Lit, Pat, Stmt, Token}; + +#[test] +fn test_attribute() { + let attr: Attribute = parse_quote!(#[test]); + snapshot!(attr, @r#" + Attribute { + style: AttrStyle::Outer, + meta: Meta::Path { + segments: [ + PathSegment { + ident: "test", + }, + ], + }, + } + "#); + + let attr: Attribute = parse_quote!(#![no_std]); + snapshot!(attr, @r#" + Attribute { + style: AttrStyle::Inner, + meta: Meta::Path { + segments: [ + PathSegment { + ident: "no_std", + }, + ], + }, + } + "#); +} + +#[test] +fn test_field() { + let field: Field = parse_quote!(pub enabled: bool); + snapshot!(field, @r#" + Field { + vis: Visibility::Public, + ident: Some("enabled"), + colon_token: Some, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "bool", + }, + ], + }, + }, + } + "#); + + let field: Field = parse_quote!(primitive::bool); + snapshot!(field, @r#" + Field { + vis: Visibility::Inherited, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "primitive", + }, + Token![::], + PathSegment { + ident: "bool", + }, + ], + }, + }, + } + "#); +} + +#[test] +fn test_pat() { + let pat: Pat = parse_quote!(Some(false) | None); + snapshot!(&pat, @r#" + Pat::Or { + cases: [ + Pat::TupleStruct { + path: Path { + segments: [ + PathSegment { + ident: "Some", + }, + ], + }, + elems: [ + Pat::Lit(ExprLit { + lit: Lit::Bool { + value: false, + }, + }), + ], + }, + Token![|], + Pat::Ident { + ident: "None", + }, + ], + } + "#); + + let boxed_pat: Box<Pat> = parse_quote!(Some(false) | None); + assert_eq!(*boxed_pat, pat); +} + +#[test] +fn test_punctuated() { + let punctuated: Punctuated<Lit, Token![|]> = parse_quote!(true | true); + snapshot!(punctuated, @r#" + [ + Lit::Bool { + value: true, + }, + Token![|], + Lit::Bool { + value: true, + }, + ] + "#); + + let punctuated: Punctuated<Lit, Token![|]> = parse_quote!(true | true |); + snapshot!(punctuated, @r#" + [ + Lit::Bool { + value: true, + }, + Token![|], + Lit::Bool { + value: true, + }, + Token![|], + ] + "#); +} + +#[test] +fn test_vec_stmt() { + let stmts: Vec<Stmt> = parse_quote! { + let _; + true + }; + snapshot!(stmts, @r#" + [ + Stmt::Local { + pat: Pat::Wild, + }, + Stmt::Expr( + Expr::Lit { + lit: Lit::Bool { + value: true, + }, + }, + None, + ), + ] + "#); +} diff --git a/vendor/syn/tests/test_parse_stream.rs b/vendor/syn/tests/test_parse_stream.rs new file mode 100644 index 00000000000000..a650fc85346c25 --- /dev/null +++ b/vendor/syn/tests/test_parse_stream.rs @@ -0,0 +1,187 @@ +#![allow(clippy::items_after_statements, clippy::let_underscore_untyped)] + +use proc_macro2::{Delimiter, Group, Punct, Spacing, Span, TokenStream, TokenTree}; +use quote::quote; +use syn::ext::IdentExt as _; +use syn::parse::discouraged::AnyDelimiter; +use syn::parse::{ParseStream, Parser as _, Result}; +use syn::{parenthesized, token, Ident, Lifetime, Token}; + +#[test] +fn test_peek_punct() { + let tokens = quote!(+= + =); + + fn assert(input: ParseStream) -> Result<()> { + assert!(input.peek(Token![+])); + assert!(input.peek(Token![+=])); + + let _: Token![+] = input.parse()?; + + assert!(input.peek(Token![=])); + assert!(!input.peek(Token![==])); + assert!(!input.peek(Token![+])); + + let _: Token![=] = input.parse()?; + + assert!(input.peek(Token![+])); + assert!(!input.peek(Token![+=])); + + let _: Token![+] = input.parse()?; + let _: Token![=] = input.parse()?; + Ok(()) + } + + assert.parse2(tokens).unwrap(); +} + +#[test] +fn test_peek_lifetime() { + // 'static ; + let tokens = TokenStream::from_iter([ + TokenTree::Punct(Punct::new('\'', Spacing::Joint)), + TokenTree::Ident(Ident::new("static", Span::call_site())), + TokenTree::Punct(Punct::new(';', Spacing::Alone)), + ]); + + fn assert(input: ParseStream) -> Result<()> { + assert!(input.peek(Lifetime)); + assert!(input.peek2(Token![;])); + assert!(!input.peek2(Token![static])); + + let _: Lifetime = input.parse()?; + + assert!(input.peek(Token![;])); + + let _: Token![;] = input.parse()?; + Ok(()) + } + + assert.parse2(tokens).unwrap(); +} + +#[test] +fn test_peek_not_lifetime() { + // ' static + let tokens = TokenStream::from_iter([ + TokenTree::Punct(Punct::new('\'', Spacing::Alone)), + TokenTree::Ident(Ident::new("static", Span::call_site())), + ]); + + fn assert(input: ParseStream) -> Result<()> { + assert!(!input.peek(Lifetime)); + assert!(input.parse::<Option<Punct>>()?.is_none()); + + let _: TokenTree = input.parse()?; + + assert!(input.peek(Token![static])); + + let _: Token![static] = input.parse()?; + Ok(()) + } + + assert.parse2(tokens).unwrap(); +} + +#[test] +fn test_peek_ident() { + let tokens = quote!(static var); + + fn assert(input: ParseStream) -> Result<()> { + assert!(!input.peek(Ident)); + assert!(input.peek(Ident::peek_any)); + assert!(input.peek(Token![static])); + + let _: Token![static] = input.parse()?; + + assert!(input.peek(Ident)); + assert!(input.peek(Ident::peek_any)); + + let _: Ident = input.parse()?; + Ok(()) + } + + assert.parse2(tokens).unwrap(); +} + +#[test] +fn test_peek_groups() { + // pub ( :: ) «∅ ! = ∅» static + let tokens = TokenStream::from_iter([ + TokenTree::Ident(Ident::new("pub", Span::call_site())), + TokenTree::Group(Group::new( + Delimiter::Parenthesis, + TokenStream::from_iter([ + TokenTree::Punct(Punct::new(':', Spacing::Joint)), + TokenTree::Punct(Punct::new(':', Spacing::Alone)), + ]), + )), + TokenTree::Group(Group::new( + Delimiter::None, + TokenStream::from_iter([ + TokenTree::Punct(Punct::new('!', Spacing::Alone)), + TokenTree::Punct(Punct::new('=', Spacing::Alone)), + ]), + )), + TokenTree::Ident(Ident::new("static", Span::call_site())), + ]); + + fn assert(input: ParseStream) -> Result<()> { + assert!(input.peek2(token::Paren)); + assert!(input.peek3(token::Group)); + assert!(input.peek3(Token![!])); + + let _: Token![pub] = input.parse()?; + + assert!(input.peek(token::Paren)); + assert!(!input.peek(Token![::])); + assert!(!input.peek2(Token![::])); + assert!(input.peek2(Token![!])); + assert!(input.peek2(token::Group)); + assert!(input.peek3(Token![=])); + assert!(!input.peek3(Token![static])); + + let content; + parenthesized!(content in input); + + assert!(content.peek(Token![::])); + assert!(content.peek2(Token![:])); + assert!(!content.peek3(token::Group)); + assert!(!content.peek3(Token![!])); + + assert!(input.peek(token::Group)); + assert!(input.peek(Token![!])); + + let _: Token![::] = content.parse()?; + + assert!(input.peek(token::Group)); + assert!(input.peek(Token![!])); + assert!(input.peek2(Token![=])); + assert!(input.peek3(Token![static])); + assert!(!input.peek2(Token![static])); + + let implicit = input.fork(); + let explicit = input.fork(); + + let _: Token![!] = implicit.parse()?; + assert!(implicit.peek(Token![=])); + assert!(implicit.peek2(Token![static])); + let _: Token![=] = implicit.parse()?; + assert!(implicit.peek(Token![static])); + + let (delimiter, _span, grouped) = explicit.parse_any_delimiter()?; + assert_eq!(delimiter, Delimiter::None); + assert!(grouped.peek(Token![!])); + assert!(grouped.peek2(Token![=])); + assert!(!grouped.peek3(Token![static])); + let _: Token![!] = grouped.parse()?; + assert!(grouped.peek(Token![=])); + assert!(!grouped.peek2(Token![static])); + let _: Token![=] = grouped.parse()?; + assert!(!grouped.peek(Token![static])); + + let _: TokenStream = input.parse()?; + Ok(()) + } + + assert.parse2(tokens).unwrap(); +} diff --git a/vendor/syn/tests/test_pat.rs b/vendor/syn/tests/test_pat.rs new file mode 100644 index 00000000000000..f778928bc99341 --- /dev/null +++ b/vendor/syn/tests/test_pat.rs @@ -0,0 +1,158 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use proc_macro2::{Delimiter, Group, TokenStream, TokenTree}; +use quote::{quote, ToTokens as _}; +use syn::parse::Parser; +use syn::punctuated::Punctuated; +use syn::{parse_quote, token, Item, Pat, PatTuple, Stmt, Token}; + +#[test] +fn test_pat_ident() { + match Pat::parse_single.parse2(quote!(self)).unwrap() { + Pat::Ident(_) => (), + value => panic!("expected PatIdent, got {:?}", value), + } +} + +#[test] +fn test_pat_path() { + match Pat::parse_single.parse2(quote!(self::CONST)).unwrap() { + Pat::Path(_) => (), + value => panic!("expected PatPath, got {:?}", value), + } +} + +#[test] +fn test_leading_vert() { + // https://github.com/rust-lang/rust/blob/1.43.0/src/test/ui/or-patterns/remove-leading-vert.rs + + syn::parse_str::<Item>("fn f() {}").unwrap(); + syn::parse_str::<Item>("fn fun1(| A: E) {}").unwrap_err(); + syn::parse_str::<Item>("fn fun2(|| A: E) {}").unwrap_err(); + + syn::parse_str::<Stmt>("let | () = ();").unwrap_err(); + syn::parse_str::<Stmt>("let (| A): E;").unwrap(); + syn::parse_str::<Stmt>("let (|| A): (E);").unwrap_err(); + syn::parse_str::<Stmt>("let (| A,): (E,);").unwrap(); + syn::parse_str::<Stmt>("let [| A]: [E; 1];").unwrap(); + syn::parse_str::<Stmt>("let [|| A]: [E; 1];").unwrap_err(); + syn::parse_str::<Stmt>("let TS(| A): TS;").unwrap(); + syn::parse_str::<Stmt>("let TS(|| A): TS;").unwrap_err(); + syn::parse_str::<Stmt>("let NS { f: | A }: NS;").unwrap(); + syn::parse_str::<Stmt>("let NS { f: || A }: NS;").unwrap_err(); +} + +#[test] +fn test_group() { + let group = Group::new(Delimiter::None, quote!(Some(_))); + let tokens = TokenStream::from_iter([TokenTree::Group(group)]); + let pat = Pat::parse_single.parse2(tokens).unwrap(); + + snapshot!(pat, @r#" + Pat::TupleStruct { + path: Path { + segments: [ + PathSegment { + ident: "Some", + }, + ], + }, + elems: [ + Pat::Wild, + ], + } + "#); +} + +#[test] +fn test_ranges() { + Pat::parse_single.parse_str("..").unwrap(); + Pat::parse_single.parse_str("..hi").unwrap(); + Pat::parse_single.parse_str("lo..").unwrap(); + Pat::parse_single.parse_str("lo..hi").unwrap(); + + Pat::parse_single.parse_str("..=").unwrap_err(); + Pat::parse_single.parse_str("..=hi").unwrap(); + Pat::parse_single.parse_str("lo..=").unwrap_err(); + Pat::parse_single.parse_str("lo..=hi").unwrap(); + + Pat::parse_single.parse_str("...").unwrap_err(); + Pat::parse_single.parse_str("...hi").unwrap_err(); + Pat::parse_single.parse_str("lo...").unwrap_err(); + Pat::parse_single.parse_str("lo...hi").unwrap(); + + Pat::parse_single.parse_str("[lo..]").unwrap_err(); + Pat::parse_single.parse_str("[..=hi]").unwrap_err(); + Pat::parse_single.parse_str("[(lo..)]").unwrap(); + Pat::parse_single.parse_str("[(..=hi)]").unwrap(); + Pat::parse_single.parse_str("[lo..=hi]").unwrap(); + + Pat::parse_single.parse_str("[_, lo.., _]").unwrap_err(); + Pat::parse_single.parse_str("[_, ..=hi, _]").unwrap_err(); + Pat::parse_single.parse_str("[_, (lo..), _]").unwrap(); + Pat::parse_single.parse_str("[_, (..=hi), _]").unwrap(); + Pat::parse_single.parse_str("[_, lo..=hi, _]").unwrap(); +} + +#[test] +fn test_tuple_comma() { + let mut expr = PatTuple { + attrs: Vec::new(), + paren_token: token::Paren::default(), + elems: Punctuated::new(), + }; + snapshot!(expr.to_token_stream() as Pat, @"Pat::Tuple"); + + expr.elems.push_value(parse_quote!(_)); + // Must not parse to Pat::Paren + snapshot!(expr.to_token_stream() as Pat, @r#" + Pat::Tuple { + elems: [ + Pat::Wild, + Token![,], + ], + } + "#); + + expr.elems.push_punct(<Token![,]>::default()); + snapshot!(expr.to_token_stream() as Pat, @r#" + Pat::Tuple { + elems: [ + Pat::Wild, + Token![,], + ], + } + "#); + + expr.elems.push_value(parse_quote!(_)); + snapshot!(expr.to_token_stream() as Pat, @r#" + Pat::Tuple { + elems: [ + Pat::Wild, + Token![,], + Pat::Wild, + ], + } + "#); + + expr.elems.push_punct(<Token![,]>::default()); + snapshot!(expr.to_token_stream() as Pat, @r#" + Pat::Tuple { + elems: [ + Pat::Wild, + Token![,], + Pat::Wild, + Token![,], + ], + } + "#); +} diff --git a/vendor/syn/tests/test_path.rs b/vendor/syn/tests/test_path.rs new file mode 100644 index 00000000000000..7f9e515d26963e --- /dev/null +++ b/vendor/syn/tests/test_path.rs @@ -0,0 +1,116 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use proc_macro2::{Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream, TokenTree}; +use quote::{quote, ToTokens}; +use syn::{parse_quote, Expr, Type, TypePath}; + +#[test] +fn parse_interpolated_leading_component() { + // mimics the token stream corresponding to `$mod::rest` + let tokens = TokenStream::from_iter([ + TokenTree::Group(Group::new(Delimiter::None, quote! { first })), + TokenTree::Punct(Punct::new(':', Spacing::Joint)), + TokenTree::Punct(Punct::new(':', Spacing::Alone)), + TokenTree::Ident(Ident::new("rest", Span::call_site())), + ]); + + snapshot!(tokens.clone() as Expr, @r#" + Expr::Path { + path: Path { + segments: [ + PathSegment { + ident: "first", + }, + Token![::], + PathSegment { + ident: "rest", + }, + ], + }, + } + "#); + + snapshot!(tokens as Type, @r#" + Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "first", + }, + Token![::], + PathSegment { + ident: "rest", + }, + ], + }, + } + "#); +} + +#[test] +fn print_incomplete_qpath() { + // qpath with `as` token + let mut ty: TypePath = parse_quote!(<Self as A>::Q); + snapshot!(ty.to_token_stream(), @"TokenStream(`< Self as A > :: Q`)"); + assert!(ty.path.segments.pop().is_some()); + snapshot!(ty.to_token_stream(), @"TokenStream(`< Self as A > ::`)"); + assert!(ty.path.segments.pop().is_some()); + snapshot!(ty.to_token_stream(), @"TokenStream(`< Self >`)"); + assert!(ty.path.segments.pop().is_none()); + + // qpath without `as` token + let mut ty: TypePath = parse_quote!(<Self>::A::B); + snapshot!(ty.to_token_stream(), @"TokenStream(`< Self > :: A :: B`)"); + assert!(ty.path.segments.pop().is_some()); + snapshot!(ty.to_token_stream(), @"TokenStream(`< Self > :: A ::`)"); + assert!(ty.path.segments.pop().is_some()); + snapshot!(ty.to_token_stream(), @"TokenStream(`< Self > ::`)"); + assert!(ty.path.segments.pop().is_none()); + + // normal path + let mut ty: TypePath = parse_quote!(Self::A::B); + snapshot!(ty.to_token_stream(), @"TokenStream(`Self :: A :: B`)"); + assert!(ty.path.segments.pop().is_some()); + snapshot!(ty.to_token_stream(), @"TokenStream(`Self :: A ::`)"); + assert!(ty.path.segments.pop().is_some()); + snapshot!(ty.to_token_stream(), @"TokenStream(`Self ::`)"); + assert!(ty.path.segments.pop().is_some()); + snapshot!(ty.to_token_stream(), @"TokenStream(``)"); + assert!(ty.path.segments.pop().is_none()); +} + +#[test] +fn parse_parenthesized_path_arguments_with_disambiguator() { + #[rustfmt::skip] + let tokens = quote!(dyn FnOnce::() -> !); + snapshot!(tokens as Type, @r#" + Type::TraitObject { + dyn_token: Some, + bounds: [ + TypeParamBound::Trait(TraitBound { + path: Path { + segments: [ + PathSegment { + ident: "FnOnce", + arguments: PathArguments::Parenthesized { + output: ReturnType::Type( + Type::Never, + ), + }, + }, + ], + }, + }), + ], + } + "#); +} diff --git a/vendor/syn/tests/test_precedence.rs b/vendor/syn/tests/test_precedence.rs new file mode 100644 index 00000000000000..eb193a5aef0db9 --- /dev/null +++ b/vendor/syn/tests/test_precedence.rs @@ -0,0 +1,558 @@ +// This test does the following for every file in the rust-lang/rust repo: +// +// 1. Parse the file using syn into a syn::File. +// 2. Extract every syn::Expr from the file. +// 3. Print each expr to a string of source code. +// 4. Parse the source code using librustc_parse into a rustc_ast::Expr. +// 5. For both the syn::Expr and rustc_ast::Expr, crawl the syntax tree to +// insert parentheses surrounding every subexpression. +// 6. Serialize the fully parenthesized syn::Expr to a string of source code. +// 7. Parse the fully parenthesized source code using librustc_parse. +// 8. Compare the rustc_ast::Expr resulting from parenthesizing using rustc data +// structures vs syn data structures, ignoring spans. If they agree, rustc's +// parser and syn's parser have identical handling of expression precedence. + +#![cfg(not(syn_disable_nightly_tests))] +#![cfg(not(miri))] +#![recursion_limit = "1024"] +#![feature(rustc_private)] +#![allow( + clippy::blocks_in_conditions, + clippy::doc_markdown, + clippy::elidable_lifetime_names, + clippy::explicit_deref_methods, + clippy::let_underscore_untyped, + clippy::manual_assert, + clippy::manual_let_else, + clippy::match_like_matches_macro, + clippy::match_wildcard_for_single_variants, + clippy::needless_lifetimes, + clippy::too_many_lines, + clippy::uninlined_format_args, + clippy::unnecessary_box_returns +)] + +extern crate rustc_ast; +extern crate rustc_ast_pretty; +extern crate rustc_data_structures; +extern crate rustc_driver; +extern crate rustc_span; +extern crate smallvec; +extern crate thin_vec; + +use crate::common::eq::SpanlessEq; +use crate::common::parse; +use quote::ToTokens; +use rustc_ast::ast; +use rustc_ast_pretty::pprust; +use rustc_span::edition::Edition; +use std::fs; +use std::mem; +use std::path::Path; +use std::process; +use std::sync::atomic::{AtomicUsize, Ordering}; +use syn::parse::Parser as _; + +#[macro_use] +mod macros; + +mod common; +mod repo; + +#[path = "../src/scan_expr.rs"] +mod scan_expr; + +#[test] +fn test_rustc_precedence() { + repo::rayon_init(); + repo::clone_rust(); + let abort_after = repo::abort_after(); + if abort_after == 0 { + panic!("skipping all precedence tests"); + } + + let passed = AtomicUsize::new(0); + let failed = AtomicUsize::new(0); + + repo::for_each_rust_file(|path| { + let content = fs::read_to_string(path).unwrap(); + + let (l_passed, l_failed) = match syn::parse_file(&content) { + Ok(file) => { + let edition = repo::edition(path).parse().unwrap(); + let exprs = collect_exprs(file); + let (l_passed, l_failed) = test_expressions(path, edition, exprs); + errorf!( + "=== {}: {} passed | {} failed\n", + path.display(), + l_passed, + l_failed, + ); + (l_passed, l_failed) + } + Err(msg) => { + errorf!("\nFAIL {} - syn failed to parse: {}\n", path.display(), msg); + (0, 1) + } + }; + + passed.fetch_add(l_passed, Ordering::Relaxed); + let prev_failed = failed.fetch_add(l_failed, Ordering::Relaxed); + + if prev_failed + l_failed >= abort_after { + process::exit(1); + } + }); + + let passed = passed.into_inner(); + let failed = failed.into_inner(); + + errorf!("\n===== Precedence Test Results =====\n"); + errorf!("{} passed | {} failed\n", passed, failed); + + if failed > 0 { + panic!("{} failures", failed); + } +} + +fn test_expressions(path: &Path, edition: Edition, exprs: Vec<syn::Expr>) -> (usize, usize) { + let mut passed = 0; + let mut failed = 0; + + rustc_span::create_session_if_not_set_then(edition, |_| { + for expr in exprs { + let expr_tokens = expr.to_token_stream(); + let source_code = expr_tokens.to_string(); + let librustc_ast = if let Some(e) = librustc_parse_and_rewrite(&source_code) { + e + } else { + failed += 1; + errorf!( + "\nFAIL {} - librustc failed to parse original\n", + path.display(), + ); + continue; + }; + + let syn_parenthesized_code = + syn_parenthesize(expr.clone()).to_token_stream().to_string(); + let syn_ast = if let Some(e) = parse::librustc_expr(&syn_parenthesized_code) { + e + } else { + failed += 1; + errorf!( + "\nFAIL {} - librustc failed to parse parenthesized\n", + path.display(), + ); + continue; + }; + + if !SpanlessEq::eq(&syn_ast, &librustc_ast) { + failed += 1; + let syn_pretty = pprust::expr_to_string(&syn_ast); + let librustc_pretty = pprust::expr_to_string(&librustc_ast); + errorf!( + "\nFAIL {}\n{}\nsyn != rustc\n{}\n", + path.display(), + syn_pretty, + librustc_pretty, + ); + continue; + } + + let expr_invisible = make_parens_invisible(expr); + let Ok(reparsed_expr_invisible) = syn::parse2(expr_invisible.to_token_stream()) else { + failed += 1; + errorf!( + "\nFAIL {} - syn failed to parse invisible delimiters\n{}\n", + path.display(), + source_code, + ); + continue; + }; + if expr_invisible != reparsed_expr_invisible { + failed += 1; + errorf!( + "\nFAIL {} - mismatch after parsing invisible delimiters\n{}\n", + path.display(), + source_code, + ); + continue; + } + + if scan_expr::scan_expr.parse2(expr_tokens).is_err() { + failed += 1; + errorf!( + "\nFAIL {} - failed to scan expr\n{}\n", + path.display(), + source_code, + ); + continue; + } + + passed += 1; + } + }); + + (passed, failed) +} + +fn librustc_parse_and_rewrite(input: &str) -> Option<Box<ast::Expr>> { + parse::librustc_expr(input).map(librustc_parenthesize) +} + +fn librustc_parenthesize(mut librustc_expr: Box<ast::Expr>) -> Box<ast::Expr> { + use rustc_ast::ast::{ + AssocItem, AssocItemKind, Attribute, BinOpKind, Block, BoundConstness, Expr, ExprField, + ExprKind, GenericArg, GenericBound, Local, LocalKind, Pat, PolyTraitRef, Stmt, StmtKind, + StructExpr, StructRest, TraitBoundModifiers, Ty, + }; + use rustc_ast::mut_visit::{walk_flat_map_assoc_item, MutVisitor}; + use rustc_ast::visit::{AssocCtxt, BoundKind}; + use rustc_data_structures::flat_map_in_place::FlatMapInPlace; + use rustc_span::DUMMY_SP; + use smallvec::SmallVec; + use std::ops::DerefMut; + use thin_vec::ThinVec; + + struct FullyParenthesize; + + fn contains_let_chain(expr: &Expr) -> bool { + match &expr.kind { + ExprKind::Let(..) => true, + ExprKind::Binary(binop, left, right) => { + binop.node == BinOpKind::And + && (contains_let_chain(left) || contains_let_chain(right)) + } + _ => false, + } + } + + fn flat_map_field<T: MutVisitor>(mut f: ExprField, vis: &mut T) -> Vec<ExprField> { + if f.is_shorthand { + noop_visit_expr(&mut f.expr, vis); + } else { + vis.visit_expr(&mut f.expr); + } + vec![f] + } + + fn flat_map_stmt<T: MutVisitor>(stmt: Stmt, vis: &mut T) -> Vec<Stmt> { + let kind = match stmt.kind { + // Don't wrap toplevel expressions in statements. + StmtKind::Expr(mut e) => { + noop_visit_expr(&mut e, vis); + StmtKind::Expr(e) + } + StmtKind::Semi(mut e) => { + noop_visit_expr(&mut e, vis); + StmtKind::Semi(e) + } + s => s, + }; + + vec![Stmt { kind, ..stmt }] + } + + fn noop_visit_expr<T: MutVisitor>(e: &mut Expr, vis: &mut T) { + match &mut e.kind { + ExprKind::Become(..) => {} + ExprKind::Struct(expr) => { + let StructExpr { + qself, + path, + fields, + rest, + } = expr.deref_mut(); + if let Some(qself) = qself { + vis.visit_qself(qself); + } + vis.visit_path(path); + fields.flat_map_in_place(|field| flat_map_field(field, vis)); + if let StructRest::Base(rest) = rest { + vis.visit_expr(rest); + } + } + _ => rustc_ast::mut_visit::walk_expr(vis, e), + } + } + + impl MutVisitor for FullyParenthesize { + fn visit_expr(&mut self, e: &mut Expr) { + noop_visit_expr(e, self); + match e.kind { + ExprKind::Block(..) | ExprKind::If(..) | ExprKind::Let(..) => {} + ExprKind::Binary(..) if contains_let_chain(e) => {} + _ => { + let inner = mem::replace(e, Expr::dummy()); + *e = Expr { + id: ast::DUMMY_NODE_ID, + kind: ExprKind::Paren(Box::new(inner)), + span: DUMMY_SP, + attrs: ThinVec::new(), + tokens: None, + }; + } + } + } + + fn visit_generic_arg(&mut self, arg: &mut GenericArg) { + match arg { + GenericArg::Lifetime(_lifetime) => {} + GenericArg::Type(arg) => self.visit_ty(arg), + // Don't wrap unbraced const generic arg as that's invalid syntax. + GenericArg::Const(anon_const) => { + if let ExprKind::Block(..) = &mut anon_const.value.kind { + noop_visit_expr(&mut anon_const.value, self); + } + } + } + } + + fn visit_param_bound(&mut self, bound: &mut GenericBound, _ctxt: BoundKind) { + match bound { + GenericBound::Trait(PolyTraitRef { + modifiers: + TraitBoundModifiers { + constness: BoundConstness::Maybe(_), + .. + }, + .. + }) + | GenericBound::Outlives(..) + | GenericBound::Use(..) => {} + GenericBound::Trait(ty) => self.visit_poly_trait_ref(ty), + } + } + + fn visit_block(&mut self, block: &mut Block) { + self.visit_id(&mut block.id); + block + .stmts + .flat_map_in_place(|stmt| flat_map_stmt(stmt, self)); + self.visit_span(&mut block.span); + } + + fn visit_local(&mut self, local: &mut Local) { + match &mut local.kind { + LocalKind::Decl => {} + LocalKind::Init(init) => { + self.visit_expr(init); + } + LocalKind::InitElse(init, els) => { + self.visit_expr(init); + self.visit_block(els); + } + } + } + + fn flat_map_assoc_item( + &mut self, + item: Box<AssocItem>, + ctxt: AssocCtxt, + ) -> SmallVec<[Box<AssocItem>; 1]> { + match &item.kind { + AssocItemKind::Const(const_item) + if !const_item.generics.params.is_empty() + || !const_item.generics.where_clause.predicates.is_empty() => + { + SmallVec::from([item]) + } + _ => walk_flat_map_assoc_item(self, item, ctxt), + } + } + + // We don't want to look at expressions that might appear in patterns or + // types yet. We'll look into comparing those in the future. For now + // focus on expressions appearing in other places. + fn visit_pat(&mut self, pat: &mut Pat) { + let _ = pat; + } + + fn visit_ty(&mut self, ty: &mut Ty) { + let _ = ty; + } + + fn visit_attribute(&mut self, attr: &mut Attribute) { + let _ = attr; + } + } + + let mut folder = FullyParenthesize; + folder.visit_expr(&mut librustc_expr); + librustc_expr +} + +fn syn_parenthesize(syn_expr: syn::Expr) -> syn::Expr { + use syn::fold::{fold_expr, fold_generic_argument, Fold}; + use syn::{ + token, BinOp, Expr, ExprParen, GenericArgument, Lit, MetaNameValue, Pat, Stmt, Type, + }; + + struct FullyParenthesize; + + fn parenthesize(expr: Expr) -> Expr { + Expr::Paren(ExprParen { + attrs: Vec::new(), + expr: Box::new(expr), + paren_token: token::Paren::default(), + }) + } + + fn needs_paren(expr: &Expr) -> bool { + match expr { + Expr::Group(_) => unreachable!(), + Expr::If(_) | Expr::Unsafe(_) | Expr::Block(_) | Expr::Let(_) => false, + Expr::Binary(_) => !contains_let_chain(expr), + _ => true, + } + } + + fn contains_let_chain(expr: &Expr) -> bool { + match expr { + Expr::Let(_) => true, + Expr::Binary(expr) => { + matches!(expr.op, BinOp::And(_)) + && (contains_let_chain(&expr.left) || contains_let_chain(&expr.right)) + } + _ => false, + } + } + + impl Fold for FullyParenthesize { + fn fold_expr(&mut self, expr: Expr) -> Expr { + let needs_paren = needs_paren(&expr); + let folded = fold_expr(self, expr); + if needs_paren { + parenthesize(folded) + } else { + folded + } + } + + fn fold_generic_argument(&mut self, arg: GenericArgument) -> GenericArgument { + match arg { + GenericArgument::Const(arg) => GenericArgument::Const(match arg { + Expr::Block(_) => fold_expr(self, arg), + // Don't wrap unbraced const generic arg as that's invalid syntax. + _ => arg, + }), + _ => fold_generic_argument(self, arg), + } + } + + fn fold_stmt(&mut self, stmt: Stmt) -> Stmt { + match stmt { + // Don't wrap toplevel expressions in statements. + Stmt::Expr(Expr::Verbatim(_), Some(_)) => stmt, + Stmt::Expr(e, semi) => Stmt::Expr(fold_expr(self, e), semi), + s => s, + } + } + + fn fold_meta_name_value(&mut self, meta: MetaNameValue) -> MetaNameValue { + // Don't turn #[p = "..."] into #[p = ("...")]. + meta + } + + // We don't want to look at expressions that might appear in patterns or + // types yet. We'll look into comparing those in the future. For now + // focus on expressions appearing in other places. + fn fold_pat(&mut self, pat: Pat) -> Pat { + pat + } + + fn fold_type(&mut self, ty: Type) -> Type { + ty + } + + fn fold_lit(&mut self, lit: Lit) -> Lit { + if let Lit::Verbatim(lit) = &lit { + panic!("unexpected verbatim literal: {lit}"); + } + lit + } + } + + let mut folder = FullyParenthesize; + folder.fold_expr(syn_expr) +} + +fn make_parens_invisible(expr: syn::Expr) -> syn::Expr { + use syn::fold::{fold_expr, fold_stmt, Fold}; + use syn::{token, Expr, ExprGroup, ExprParen, Stmt}; + + struct MakeParensInvisible; + + impl Fold for MakeParensInvisible { + fn fold_expr(&mut self, mut expr: Expr) -> Expr { + if let Expr::Paren(paren) = expr { + expr = Expr::Group(ExprGroup { + attrs: paren.attrs, + group_token: token::Group(paren.paren_token.span.join()), + expr: paren.expr, + }); + } + fold_expr(self, expr) + } + + fn fold_stmt(&mut self, stmt: Stmt) -> Stmt { + if let Stmt::Expr(expr @ (Expr::Binary(_) | Expr::Call(_) | Expr::Cast(_)), None) = stmt + { + Stmt::Expr( + Expr::Paren(ExprParen { + attrs: Vec::new(), + paren_token: token::Paren::default(), + expr: Box::new(fold_expr(self, expr)), + }), + None, + ) + } else { + fold_stmt(self, stmt) + } + } + } + + let mut folder = MakeParensInvisible; + folder.fold_expr(expr) +} + +/// Walk through a crate collecting all expressions we can find in it. +fn collect_exprs(file: syn::File) -> Vec<syn::Expr> { + use syn::fold::Fold; + use syn::punctuated::Punctuated; + use syn::{token, ConstParam, Expr, ExprTuple, Pat, Path}; + + struct CollectExprs(Vec<Expr>); + impl Fold for CollectExprs { + fn fold_expr(&mut self, expr: Expr) -> Expr { + match expr { + Expr::Verbatim(_) => {} + _ => self.0.push(expr), + } + + Expr::Tuple(ExprTuple { + attrs: vec![], + elems: Punctuated::new(), + paren_token: token::Paren::default(), + }) + } + + fn fold_pat(&mut self, pat: Pat) -> Pat { + pat + } + + fn fold_path(&mut self, path: Path) -> Path { + // Skip traversing into const generic path arguments + path + } + + fn fold_const_param(&mut self, const_param: ConstParam) -> ConstParam { + const_param + } + } + + let mut folder = CollectExprs(vec![]); + folder.fold_file(file); + folder.0 +} diff --git a/vendor/syn/tests/test_punctuated.rs b/vendor/syn/tests/test_punctuated.rs new file mode 100644 index 00000000000000..14ea96c7717221 --- /dev/null +++ b/vendor/syn/tests/test_punctuated.rs @@ -0,0 +1,92 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] + +use syn::punctuated::{Pair, Punctuated}; +use syn::Token; + +macro_rules! punctuated { + ($($e:expr,)+) => {{ + let mut seq = ::syn::punctuated::Punctuated::new(); + $( + seq.push($e); + )+ + seq + }}; + + ($($e:expr),+) => { + punctuated!($($e,)+) + }; +} + +macro_rules! check_exact_size_iterator { + ($iter:expr) => {{ + let iter = $iter; + let size_hint = iter.size_hint(); + let len = iter.len(); + let count = iter.count(); + assert_eq!(len, count); + assert_eq!(size_hint, (count, Some(count))); + }}; +} + +#[test] +fn pairs() { + let mut p: Punctuated<_, Token![,]> = punctuated!(2, 3, 4); + + check_exact_size_iterator!(p.pairs()); + check_exact_size_iterator!(p.pairs_mut()); + check_exact_size_iterator!(p.into_pairs()); + + let mut p: Punctuated<_, Token![,]> = punctuated!(2, 3, 4); + + assert_eq!(p.pairs().next_back().map(Pair::into_value), Some(&4)); + assert_eq!( + p.pairs_mut().next_back().map(Pair::into_value), + Some(&mut 4) + ); + assert_eq!(p.into_pairs().next_back().map(Pair::into_value), Some(4)); +} + +#[test] +fn iter() { + let mut p: Punctuated<_, Token![,]> = punctuated!(2, 3, 4); + + check_exact_size_iterator!(p.iter()); + check_exact_size_iterator!(p.iter_mut()); + check_exact_size_iterator!(p.into_iter()); + + let mut p: Punctuated<_, Token![,]> = punctuated!(2, 3, 4); + + assert_eq!(p.iter().next_back(), Some(&4)); + assert_eq!(p.iter_mut().next_back(), Some(&mut 4)); + assert_eq!(p.into_iter().next_back(), Some(4)); +} + +#[test] +fn may_dangle() { + let p: Punctuated<_, Token![,]> = punctuated!(2, 3, 4); + for element in &p { + if *element == 2 { + drop(p); + break; + } + } + + let mut p: Punctuated<_, Token![,]> = punctuated!(2, 3, 4); + for element in &mut p { + if *element == 2 { + drop(p); + break; + } + } +} + +#[test] +#[should_panic = "index out of bounds: the len is 0 but the index is 0"] +fn index_out_of_bounds() { + let p = Punctuated::<syn::Ident, Token![,]>::new(); + let _ = p[0].clone(); +} diff --git a/vendor/syn/tests/test_receiver.rs b/vendor/syn/tests/test_receiver.rs new file mode 100644 index 00000000000000..98194101fdac0d --- /dev/null +++ b/vendor/syn/tests/test_receiver.rs @@ -0,0 +1,327 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use syn::{parse_quote, TraitItemFn}; + +#[test] +fn test_by_value() { + let TraitItemFn { sig, .. } = parse_quote! { + fn by_value(self: Self); + }; + snapshot!(&sig.inputs[0], @r#" + FnArg::Receiver(Receiver { + colon_token: Some, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Self", + }, + ], + }, + }, + }) + "#); +} + +#[test] +fn test_by_mut_value() { + let TraitItemFn { sig, .. } = parse_quote! { + fn by_mut(mut self: Self); + }; + snapshot!(&sig.inputs[0], @r#" + FnArg::Receiver(Receiver { + mutability: Some, + colon_token: Some, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Self", + }, + ], + }, + }, + }) + "#); +} + +#[test] +fn test_by_ref() { + let TraitItemFn { sig, .. } = parse_quote! { + fn by_ref(self: &Self); + }; + snapshot!(&sig.inputs[0], @r#" + FnArg::Receiver(Receiver { + colon_token: Some, + ty: Type::Reference { + elem: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Self", + }, + ], + }, + }, + }, + }) + "#); +} + +#[test] +fn test_by_box() { + let TraitItemFn { sig, .. } = parse_quote! { + fn by_box(self: Box<Self>); + }; + snapshot!(&sig.inputs[0], @r#" + FnArg::Receiver(Receiver { + colon_token: Some, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Box", + arguments: PathArguments::AngleBracketed { + args: [ + GenericArgument::Type(Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Self", + }, + ], + }, + }), + ], + }, + }, + ], + }, + }, + }) + "#); +} + +#[test] +fn test_by_pin() { + let TraitItemFn { sig, .. } = parse_quote! { + fn by_pin(self: Pin<Self>); + }; + snapshot!(&sig.inputs[0], @r#" + FnArg::Receiver(Receiver { + colon_token: Some, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Pin", + arguments: PathArguments::AngleBracketed { + args: [ + GenericArgument::Type(Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Self", + }, + ], + }, + }), + ], + }, + }, + ], + }, + }, + }) + "#); +} + +#[test] +fn test_explicit_type() { + let TraitItemFn { sig, .. } = parse_quote! { + fn explicit_type(self: Pin<MyType>); + }; + snapshot!(&sig.inputs[0], @r#" + FnArg::Receiver(Receiver { + colon_token: Some, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Pin", + arguments: PathArguments::AngleBracketed { + args: [ + GenericArgument::Type(Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "MyType", + }, + ], + }, + }), + ], + }, + }, + ], + }, + }, + }) + "#); +} + +#[test] +fn test_value_shorthand() { + let TraitItemFn { sig, .. } = parse_quote! { + fn value_shorthand(self); + }; + snapshot!(&sig.inputs[0], @r#" + FnArg::Receiver(Receiver { + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Self", + }, + ], + }, + }, + }) + "#); +} + +#[test] +fn test_mut_value_shorthand() { + let TraitItemFn { sig, .. } = parse_quote! { + fn mut_value_shorthand(mut self); + }; + snapshot!(&sig.inputs[0], @r#" + FnArg::Receiver(Receiver { + mutability: Some, + ty: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Self", + }, + ], + }, + }, + }) + "#); +} + +#[test] +fn test_ref_shorthand() { + let TraitItemFn { sig, .. } = parse_quote! { + fn ref_shorthand(&self); + }; + snapshot!(&sig.inputs[0], @r#" + FnArg::Receiver(Receiver { + reference: Some(None), + ty: Type::Reference { + elem: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Self", + }, + ], + }, + }, + }, + }) + "#); +} + +#[test] +fn test_ref_shorthand_with_lifetime() { + let TraitItemFn { sig, .. } = parse_quote! { + fn ref_shorthand(&'a self); + }; + snapshot!(&sig.inputs[0], @r#" + FnArg::Receiver(Receiver { + reference: Some(Some(Lifetime { + ident: "a", + })), + ty: Type::Reference { + lifetime: Some(Lifetime { + ident: "a", + }), + elem: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Self", + }, + ], + }, + }, + }, + }) + "#); +} + +#[test] +fn test_ref_mut_shorthand() { + let TraitItemFn { sig, .. } = parse_quote! { + fn ref_mut_shorthand(&mut self); + }; + snapshot!(&sig.inputs[0], @r#" + FnArg::Receiver(Receiver { + reference: Some(None), + mutability: Some, + ty: Type::Reference { + mutability: Some, + elem: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Self", + }, + ], + }, + }, + }, + }) + "#); +} + +#[test] +fn test_ref_mut_shorthand_with_lifetime() { + let TraitItemFn { sig, .. } = parse_quote! { + fn ref_mut_shorthand(&'a mut self); + }; + snapshot!(&sig.inputs[0], @r#" + FnArg::Receiver(Receiver { + reference: Some(Some(Lifetime { + ident: "a", + })), + mutability: Some, + ty: Type::Reference { + lifetime: Some(Lifetime { + ident: "a", + }), + mutability: Some, + elem: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Self", + }, + ], + }, + }, + }, + }) + "#); +} diff --git a/vendor/syn/tests/test_round_trip.rs b/vendor/syn/tests/test_round_trip.rs new file mode 100644 index 00000000000000..5b1b833a6a95a4 --- /dev/null +++ b/vendor/syn/tests/test_round_trip.rs @@ -0,0 +1,256 @@ +#![cfg(not(syn_disable_nightly_tests))] +#![cfg(not(miri))] +#![recursion_limit = "1024"] +#![feature(rustc_private)] +#![allow( + clippy::blocks_in_conditions, + clippy::elidable_lifetime_names, + clippy::manual_assert, + clippy::manual_let_else, + clippy::match_like_matches_macro, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] +#![allow(mismatched_lifetime_syntaxes)] + +extern crate rustc_ast; +extern crate rustc_ast_pretty; +extern crate rustc_data_structures; +extern crate rustc_driver; +extern crate rustc_error_messages; +extern crate rustc_errors; +extern crate rustc_expand; +extern crate rustc_parse; +extern crate rustc_session; +extern crate rustc_span; + +use crate::common::eq::SpanlessEq; +use quote::quote; +use rustc_ast::ast::{ + AngleBracketedArg, Crate, GenericArg, GenericArgs, GenericParamKind, Generics, +}; +use rustc_ast::mut_visit::{self, MutVisitor}; +use rustc_ast_pretty::pprust; +use rustc_data_structures::flat_map_in_place::FlatMapInPlace; +use rustc_error_messages::{DiagMessage, LazyFallbackBundle}; +use rustc_errors::{translation, Diag, PResult}; +use rustc_parse::lexer::StripTokens; +use rustc_session::parse::ParseSess; +use rustc_span::FileName; +use std::borrow::Cow; +use std::fs; +use std::panic; +use std::path::Path; +use std::process; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::time::Instant; + +#[macro_use] +mod macros; + +mod common; +mod repo; + +#[test] +fn test_round_trip() { + repo::rayon_init(); + repo::clone_rust(); + let abort_after = repo::abort_after(); + if abort_after == 0 { + panic!("skipping all round_trip tests"); + } + + let failed = AtomicUsize::new(0); + + repo::for_each_rust_file(|path| test(path, &failed, abort_after)); + + let failed = failed.into_inner(); + if failed > 0 { + panic!("{} failures", failed); + } +} + +fn test(path: &Path, failed: &AtomicUsize, abort_after: usize) { + let failed = || { + let prev_failed = failed.fetch_add(1, Ordering::Relaxed); + if prev_failed + 1 >= abort_after { + process::exit(1); + } + }; + + let content = fs::read_to_string(path).unwrap(); + + let (back, elapsed) = match panic::catch_unwind(|| { + let start = Instant::now(); + let result = syn::parse_file(&content); + let elapsed = start.elapsed(); + result.map(|krate| (quote!(#krate).to_string(), elapsed)) + }) { + Err(_) => { + errorf!("=== {}: syn panic\n", path.display()); + failed(); + return; + } + Ok(Err(msg)) => { + errorf!("=== {}: syn failed to parse\n{:?}\n", path.display(), msg); + failed(); + return; + } + Ok(Ok(result)) => result, + }; + + let edition = repo::edition(path).parse().unwrap(); + + rustc_span::create_session_if_not_set_then(edition, |_| { + let equal = match panic::catch_unwind(|| { + let locale_resources = rustc_driver::DEFAULT_LOCALE_RESOURCES.to_vec(); + let sess = ParseSess::new(locale_resources); + let before = match librustc_parse(content, &sess) { + Ok(before) => before, + Err(diagnostic) => { + errorf!( + "=== {}: ignore - librustc failed to parse original content: {}\n", + path.display(), + translate_message(&diagnostic), + ); + diagnostic.cancel(); + return Err(true); + } + }; + let after = match librustc_parse(back, &sess) { + Ok(after) => after, + Err(diagnostic) => { + errorf!("=== {}: librustc failed to parse", path.display()); + diagnostic.emit(); + return Err(false); + } + }; + Ok((before, after)) + }) { + Err(_) => { + errorf!("=== {}: ignoring librustc panic\n", path.display()); + true + } + Ok(Err(equal)) => equal, + Ok(Ok((mut before, mut after))) => { + normalize(&mut before); + normalize(&mut after); + if SpanlessEq::eq(&before, &after) { + errorf!( + "=== {}: pass in {}ms\n", + path.display(), + elapsed.as_secs() * 1000 + u64::from(elapsed.subsec_nanos()) / 1_000_000 + ); + true + } else { + errorf!( + "=== {}: FAIL\n{}\n!=\n{}\n", + path.display(), + pprust::crate_to_string_for_macros(&before), + pprust::crate_to_string_for_macros(&after), + ); + false + } + } + }; + if !equal { + failed(); + } + }); +} + +fn librustc_parse(content: String, sess: &ParseSess) -> PResult<Crate> { + static COUNTER: AtomicUsize = AtomicUsize::new(0); + let counter = COUNTER.fetch_add(1, Ordering::Relaxed); + let name = FileName::Custom(format!("test_round_trip{}", counter)); + let mut parser = rustc_parse::new_parser_from_source_str( + sess, + name, + content, + StripTokens::ShebangAndFrontmatter, + ) + .unwrap(); + parser.parse_crate_mod() +} + +fn translate_message(diagnostic: &Diag) -> Cow<'static, str> { + thread_local! { + static FLUENT_BUNDLE: LazyFallbackBundle = { + let locale_resources = rustc_driver::DEFAULT_LOCALE_RESOURCES.to_vec(); + let with_directionality_markers = false; + rustc_error_messages::fallback_fluent_bundle(locale_resources, with_directionality_markers) + }; + } + + let message = &diagnostic.messages[0].0; + let args = translation::to_fluent_args(diagnostic.args.iter()); + + let (identifier, attr) = match message { + DiagMessage::Str(msg) | DiagMessage::Translated(msg) => return msg.clone(), + DiagMessage::FluentIdentifier(identifier, attr) => (identifier, attr), + }; + + FLUENT_BUNDLE.with(|fluent_bundle| { + let message = fluent_bundle + .get_message(identifier) + .expect("missing diagnostic in fluent bundle"); + let value = match attr { + Some(attr) => message + .get_attribute(attr) + .expect("missing attribute in fluent message") + .value(), + None => message.value().expect("missing value in fluent message"), + }; + + let mut err = Vec::new(); + let translated = fluent_bundle.format_pattern(value, Some(&args), &mut err); + assert!(err.is_empty()); + Cow::Owned(translated.into_owned()) + }) +} + +fn normalize(krate: &mut Crate) { + struct NormalizeVisitor; + + impl MutVisitor for NormalizeVisitor { + fn visit_generic_args(&mut self, e: &mut GenericArgs) { + if let GenericArgs::AngleBracketed(e) = e { + #[derive(Ord, PartialOrd, Eq, PartialEq)] + enum Group { + Lifetimes, + TypesAndConsts, + Constraints, + } + e.args.sort_by_key(|arg| match arg { + AngleBracketedArg::Arg(arg) => match arg { + GenericArg::Lifetime(_) => Group::Lifetimes, + GenericArg::Type(_) | GenericArg::Const(_) => Group::TypesAndConsts, + }, + AngleBracketedArg::Constraint(_) => Group::Constraints, + }); + } + mut_visit::walk_generic_args(self, e); + } + + fn visit_generics(&mut self, e: &mut Generics) { + #[derive(Ord, PartialOrd, Eq, PartialEq)] + enum Group { + Lifetimes, + TypesAndConsts, + } + e.params.sort_by_key(|param| match param.kind { + GenericParamKind::Lifetime => Group::Lifetimes, + GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => { + Group::TypesAndConsts + } + }); + e.params + .flat_map_in_place(|param| self.flat_map_generic_param(param)); + if e.where_clause.predicates.is_empty() { + e.where_clause.has_where_token = false; + } + } + } + + NormalizeVisitor.visit_crate(krate); +} diff --git a/vendor/syn/tests/test_shebang.rs b/vendor/syn/tests/test_shebang.rs new file mode 100644 index 00000000000000..3b55ddfdd59d03 --- /dev/null +++ b/vendor/syn/tests/test_shebang.rs @@ -0,0 +1,73 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +#[test] +fn test_basic() { + let content = "#!/usr/bin/env rustx\nfn main() {}"; + let file = syn::parse_file(content).unwrap(); + snapshot!(file, @r##" + File { + shebang: Some("#!/usr/bin/env rustx"), + items: [ + Item::Fn { + vis: Visibility::Inherited, + sig: Signature { + ident: "main", + generics: Generics, + output: ReturnType::Default, + }, + block: Block { + stmts: [], + }, + }, + ], + } + "##); +} + +#[test] +fn test_comment() { + let content = "#!//am/i/a/comment\n[allow(dead_code)] fn main() {}"; + let file = syn::parse_file(content).unwrap(); + snapshot!(file, @r#" + File { + attrs: [ + Attribute { + style: AttrStyle::Inner, + meta: Meta::List { + path: Path { + segments: [ + PathSegment { + ident: "allow", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(`dead_code`), + }, + }, + ], + items: [ + Item::Fn { + vis: Visibility::Inherited, + sig: Signature { + ident: "main", + generics: Generics, + output: ReturnType::Default, + }, + block: Block { + stmts: [], + }, + }, + ], + } + "#); +} diff --git a/vendor/syn/tests/test_size.rs b/vendor/syn/tests/test_size.rs new file mode 100644 index 00000000000000..29fd43589d427a --- /dev/null +++ b/vendor/syn/tests/test_size.rs @@ -0,0 +1,54 @@ +// Assumes proc-macro2's "span-locations" feature is off. + +use std::mem; +use syn::{Expr, Item, Lit, Pat, Type}; + +#[rustversion::attr(before(2022-11-24), ignore = "requires nightly-2022-11-24 or newer")] +#[rustversion::attr( + since(2022-11-24), + cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit") +)] +#[test] +fn test_expr_size() { + assert_eq!(mem::size_of::<Expr>(), 176); +} + +#[rustversion::attr(before(2022-09-09), ignore = "requires nightly-2022-09-09 or newer")] +#[rustversion::attr( + since(2022-09-09), + cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit") +)] +#[test] +fn test_item_size() { + assert_eq!(mem::size_of::<Item>(), 352); +} + +#[rustversion::attr(before(2023-04-29), ignore = "requires nightly-2023-04-29 or newer")] +#[rustversion::attr( + since(2023-04-29), + cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit") +)] +#[test] +fn test_type_size() { + assert_eq!(mem::size_of::<Type>(), 224); +} + +#[rustversion::attr(before(2023-04-29), ignore = "requires nightly-2023-04-29 or newer")] +#[rustversion::attr( + since(2023-04-29), + cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit") +)] +#[test] +fn test_pat_size() { + assert_eq!(mem::size_of::<Pat>(), 184); +} + +#[rustversion::attr(before(2023-12-20), ignore = "requires nightly-2023-12-20 or newer")] +#[rustversion::attr( + since(2023-12-20), + cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit") +)] +#[test] +fn test_lit_size() { + assert_eq!(mem::size_of::<Lit>(), 24); +} diff --git a/vendor/syn/tests/test_stmt.rs b/vendor/syn/tests/test_stmt.rs new file mode 100644 index 00000000000000..101c1b1c906d2f --- /dev/null +++ b/vendor/syn/tests/test_stmt.rs @@ -0,0 +1,337 @@ +#![allow( + clippy::assertions_on_result_states, + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::non_ascii_literal, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream, TokenTree}; +use quote::{quote, ToTokens as _}; +use syn::parse::Parser as _; +use syn::{Block, Stmt}; + +#[test] +fn test_raw_operator() { + let stmt = syn::parse_str::<Stmt>("let _ = &raw const x;").unwrap(); + + snapshot!(stmt, @r#" + Stmt::Local { + pat: Pat::Wild, + init: Some(LocalInit { + expr: Expr::RawAddr { + mutability: PointerMutability::Const, + expr: Expr::Path { + path: Path { + segments: [ + PathSegment { + ident: "x", + }, + ], + }, + }, + }, + }), + } + "#); +} + +#[test] +fn test_raw_variable() { + let stmt = syn::parse_str::<Stmt>("let _ = &raw;").unwrap(); + + snapshot!(stmt, @r#" + Stmt::Local { + pat: Pat::Wild, + init: Some(LocalInit { + expr: Expr::Reference { + expr: Expr::Path { + path: Path { + segments: [ + PathSegment { + ident: "raw", + }, + ], + }, + }, + }, + }), + } + "#); +} + +#[test] +fn test_raw_invalid() { + assert!(syn::parse_str::<Stmt>("let _ = &raw x;").is_err()); +} + +#[test] +fn test_none_group() { + // «∅ async fn f() {} ∅» + let tokens = TokenStream::from_iter([TokenTree::Group(Group::new( + Delimiter::None, + TokenStream::from_iter([ + TokenTree::Ident(Ident::new("async", Span::call_site())), + TokenTree::Ident(Ident::new("fn", Span::call_site())), + TokenTree::Ident(Ident::new("f", Span::call_site())), + TokenTree::Group(Group::new(Delimiter::Parenthesis, TokenStream::new())), + TokenTree::Group(Group::new(Delimiter::Brace, TokenStream::new())), + ]), + ))]); + snapshot!(tokens as Stmt, @r#" + Stmt::Item(Item::Fn { + vis: Visibility::Inherited, + sig: Signature { + asyncness: Some, + ident: "f", + generics: Generics, + output: ReturnType::Default, + }, + block: Block { + stmts: [], + }, + }) + "#); + + let tokens = Group::new(Delimiter::None, quote!(let None = None)).to_token_stream(); + let stmts = Block::parse_within.parse2(tokens).unwrap(); + snapshot!(stmts, @r#" + [ + Stmt::Expr( + Expr::Group { + expr: Expr::Let { + pat: Pat::Ident { + ident: "None", + }, + expr: Expr::Path { + path: Path { + segments: [ + PathSegment { + ident: "None", + }, + ], + }, + }, + }, + }, + None, + ), + ] + "#); +} + +#[test] +fn test_let_dot_dot() { + let tokens = quote! { + let .. = 10; + }; + + snapshot!(tokens as Stmt, @r#" + Stmt::Local { + pat: Pat::Rest, + init: Some(LocalInit { + expr: Expr::Lit { + lit: 10, + }, + }), + } + "#); +} + +#[test] +fn test_let_else() { + let tokens = quote! { + let Some(x) = None else { return 0; }; + }; + + snapshot!(tokens as Stmt, @r#" + Stmt::Local { + pat: Pat::TupleStruct { + path: Path { + segments: [ + PathSegment { + ident: "Some", + }, + ], + }, + elems: [ + Pat::Ident { + ident: "x", + }, + ], + }, + init: Some(LocalInit { + expr: Expr::Path { + path: Path { + segments: [ + PathSegment { + ident: "None", + }, + ], + }, + }, + diverge: Some(Expr::Block { + block: Block { + stmts: [ + Stmt::Expr( + Expr::Return { + expr: Some(Expr::Lit { + lit: 0, + }), + }, + Some, + ), + ], + }, + }), + }), + } + "#); +} + +#[test] +fn test_macros() { + let tokens = quote! { + fn main() { + macro_rules! mac {} + thread_local! { static FOO } + println!(""); + vec![] + } + }; + + snapshot!(tokens as Stmt, @r#" + Stmt::Item(Item::Fn { + vis: Visibility::Inherited, + sig: Signature { + ident: "main", + generics: Generics, + output: ReturnType::Default, + }, + block: Block { + stmts: [ + Stmt::Item(Item::Macro { + ident: Some("mac"), + mac: Macro { + path: Path { + segments: [ + PathSegment { + ident: "macro_rules", + }, + ], + }, + delimiter: MacroDelimiter::Brace, + tokens: TokenStream(``), + }, + }), + Stmt::Macro { + mac: Macro { + path: Path { + segments: [ + PathSegment { + ident: "thread_local", + }, + ], + }, + delimiter: MacroDelimiter::Brace, + tokens: TokenStream(`static FOO`), + }, + }, + Stmt::Macro { + mac: Macro { + path: Path { + segments: [ + PathSegment { + ident: "println", + }, + ], + }, + delimiter: MacroDelimiter::Paren, + tokens: TokenStream(`""`), + }, + semi_token: Some, + }, + Stmt::Expr( + Expr::Macro { + mac: Macro { + path: Path { + segments: [ + PathSegment { + ident: "vec", + }, + ], + }, + delimiter: MacroDelimiter::Bracket, + tokens: TokenStream(``), + }, + }, + None, + ), + ], + }, + }) + "#); +} + +#[test] +fn test_early_parse_loop() { + // The following is an Expr::Loop followed by Expr::Tuple. It is not an + // Expr::Call. + let tokens = quote! { + loop {} + () + }; + + let stmts = Block::parse_within.parse2(tokens).unwrap(); + + snapshot!(stmts, @r#" + [ + Stmt::Expr( + Expr::Loop { + body: Block { + stmts: [], + }, + }, + None, + ), + Stmt::Expr( + Expr::Tuple, + None, + ), + ] + "#); + + let tokens = quote! { + 'a: loop {} + () + }; + + let stmts = Block::parse_within.parse2(tokens).unwrap(); + + snapshot!(stmts, @r#" + [ + Stmt::Expr( + Expr::Loop { + label: Some(Label { + name: Lifetime { + ident: "a", + }, + }), + body: Block { + stmts: [], + }, + }, + None, + ), + Stmt::Expr( + Expr::Tuple, + None, + ), + ] + "#); +} diff --git a/vendor/syn/tests/test_token_trees.rs b/vendor/syn/tests/test_token_trees.rs new file mode 100644 index 00000000000000..1b473858cd1b28 --- /dev/null +++ b/vendor/syn/tests/test_token_trees.rs @@ -0,0 +1,38 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use proc_macro2::TokenStream; +use quote::quote; +use syn::Lit; + +#[test] +fn test_struct() { + let input = " + #[derive(Debug, Clone)] + pub struct Item { + pub ident: Ident, + pub attrs: Vec<Attribute>, + } + "; + + snapshot!(input as TokenStream, @r##" + TokenStream( + `# [derive (Debug , Clone)] pub struct Item { pub ident : Ident , pub attrs : Vec < Attribute >, }`, + ) + "##); +} + +#[test] +fn test_literal_mangling() { + let code = "0_4"; + let parsed: Lit = syn::parse_str(code).unwrap(); + assert_eq!(code, quote!(#parsed).to_string()); +} diff --git a/vendor/syn/tests/test_ty.rs b/vendor/syn/tests/test_ty.rs new file mode 100644 index 00000000000000..5f29220114781a --- /dev/null +++ b/vendor/syn/tests/test_ty.rs @@ -0,0 +1,471 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use proc_macro2::{Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream, TokenTree}; +use quote::{quote, ToTokens as _}; +use syn::punctuated::Punctuated; +use syn::{parse_quote, token, Token, Type, TypeTuple}; + +#[test] +fn test_mut_self() { + syn::parse_str::<Type>("fn(mut self)").unwrap(); + syn::parse_str::<Type>("fn(mut self,)").unwrap(); + syn::parse_str::<Type>("fn(mut self: ())").unwrap(); + syn::parse_str::<Type>("fn(mut self: ...)").unwrap_err(); + syn::parse_str::<Type>("fn(mut self: mut self)").unwrap_err(); + syn::parse_str::<Type>("fn(mut self::T)").unwrap_err(); +} + +#[test] +fn test_macro_variable_type() { + // mimics the token stream corresponding to `$ty<T>` + let tokens = TokenStream::from_iter([ + TokenTree::Group(Group::new(Delimiter::None, quote! { ty })), + TokenTree::Punct(Punct::new('<', Spacing::Alone)), + TokenTree::Ident(Ident::new("T", Span::call_site())), + TokenTree::Punct(Punct::new('>', Spacing::Alone)), + ]); + + snapshot!(tokens as Type, @r#" + Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "ty", + arguments: PathArguments::AngleBracketed { + args: [ + GenericArgument::Type(Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "T", + }, + ], + }, + }), + ], + }, + }, + ], + }, + } + "#); + + // mimics the token stream corresponding to `$ty::<T>` + let tokens = TokenStream::from_iter([ + TokenTree::Group(Group::new(Delimiter::None, quote! { ty })), + TokenTree::Punct(Punct::new(':', Spacing::Joint)), + TokenTree::Punct(Punct::new(':', Spacing::Alone)), + TokenTree::Punct(Punct::new('<', Spacing::Alone)), + TokenTree::Ident(Ident::new("T", Span::call_site())), + TokenTree::Punct(Punct::new('>', Spacing::Alone)), + ]); + + snapshot!(tokens as Type, @r#" + Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "ty", + arguments: PathArguments::AngleBracketed { + colon2_token: Some, + args: [ + GenericArgument::Type(Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "T", + }, + ], + }, + }), + ], + }, + }, + ], + }, + } + "#); +} + +#[test] +fn test_group_angle_brackets() { + // mimics the token stream corresponding to `Option<$ty>` + let tokens = TokenStream::from_iter([ + TokenTree::Ident(Ident::new("Option", Span::call_site())), + TokenTree::Punct(Punct::new('<', Spacing::Alone)), + TokenTree::Group(Group::new(Delimiter::None, quote! { Vec<u8> })), + TokenTree::Punct(Punct::new('>', Spacing::Alone)), + ]); + + snapshot!(tokens as Type, @r#" + Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Option", + arguments: PathArguments::AngleBracketed { + args: [ + GenericArgument::Type(Type::Group { + elem: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Vec", + arguments: PathArguments::AngleBracketed { + args: [ + GenericArgument::Type(Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "u8", + }, + ], + }, + }), + ], + }, + }, + ], + }, + }, + }), + ], + }, + }, + ], + }, + } + "#); +} + +#[test] +fn test_group_colons() { + // mimics the token stream corresponding to `$ty::Item` + let tokens = TokenStream::from_iter([ + TokenTree::Group(Group::new(Delimiter::None, quote! { Vec<u8> })), + TokenTree::Punct(Punct::new(':', Spacing::Joint)), + TokenTree::Punct(Punct::new(':', Spacing::Alone)), + TokenTree::Ident(Ident::new("Item", Span::call_site())), + ]); + + snapshot!(tokens as Type, @r#" + Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "Vec", + arguments: PathArguments::AngleBracketed { + args: [ + GenericArgument::Type(Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "u8", + }, + ], + }, + }), + ], + }, + }, + Token![::], + PathSegment { + ident: "Item", + }, + ], + }, + } + "#); + + let tokens = TokenStream::from_iter([ + TokenTree::Group(Group::new(Delimiter::None, quote! { [T] })), + TokenTree::Punct(Punct::new(':', Spacing::Joint)), + TokenTree::Punct(Punct::new(':', Spacing::Alone)), + TokenTree::Ident(Ident::new("Element", Span::call_site())), + ]); + + snapshot!(tokens as Type, @r#" + Type::Path { + qself: Some(QSelf { + ty: Type::Slice { + elem: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "T", + }, + ], + }, + }, + }, + position: 0, + }), + path: Path { + leading_colon: Some, + segments: [ + PathSegment { + ident: "Element", + }, + ], + }, + } + "#); +} + +#[test] +fn test_trait_object() { + let tokens = quote!(dyn for<'a> Trait<'a> + 'static); + snapshot!(tokens as Type, @r#" + Type::TraitObject { + dyn_token: Some, + bounds: [ + TypeParamBound::Trait(TraitBound { + lifetimes: Some(BoundLifetimes { + lifetimes: [ + GenericParam::Lifetime(LifetimeParam { + lifetime: Lifetime { + ident: "a", + }, + }), + ], + }), + path: Path { + segments: [ + PathSegment { + ident: "Trait", + arguments: PathArguments::AngleBracketed { + args: [ + GenericArgument::Lifetime(Lifetime { + ident: "a", + }), + ], + }, + }, + ], + }, + }), + Token![+], + TypeParamBound::Lifetime { + ident: "static", + }, + ], + } + "#); + + let tokens = quote!(dyn 'a + Trait); + snapshot!(tokens as Type, @r#" + Type::TraitObject { + dyn_token: Some, + bounds: [ + TypeParamBound::Lifetime { + ident: "a", + }, + Token![+], + TypeParamBound::Trait(TraitBound { + path: Path { + segments: [ + PathSegment { + ident: "Trait", + }, + ], + }, + }), + ], + } + "#); + + // None of the following are valid Rust types. + syn::parse_str::<Type>("for<'a> dyn Trait<'a>").unwrap_err(); + syn::parse_str::<Type>("dyn for<'a> 'a + Trait").unwrap_err(); +} + +#[test] +fn test_trailing_plus() { + #[rustfmt::skip] + let tokens = quote!(impl Trait +); + snapshot!(tokens as Type, @r#" + Type::ImplTrait { + bounds: [ + TypeParamBound::Trait(TraitBound { + path: Path { + segments: [ + PathSegment { + ident: "Trait", + }, + ], + }, + }), + Token![+], + ], + } + "#); + + #[rustfmt::skip] + let tokens = quote!(dyn Trait +); + snapshot!(tokens as Type, @r#" + Type::TraitObject { + dyn_token: Some, + bounds: [ + TypeParamBound::Trait(TraitBound { + path: Path { + segments: [ + PathSegment { + ident: "Trait", + }, + ], + }, + }), + Token![+], + ], + } + "#); + + #[rustfmt::skip] + let tokens = quote!(Trait +); + snapshot!(tokens as Type, @r#" + Type::TraitObject { + bounds: [ + TypeParamBound::Trait(TraitBound { + path: Path { + segments: [ + PathSegment { + ident: "Trait", + }, + ], + }, + }), + Token![+], + ], + } + "#); +} + +#[test] +fn test_tuple_comma() { + let mut expr = TypeTuple { + paren_token: token::Paren::default(), + elems: Punctuated::new(), + }; + snapshot!(expr.to_token_stream() as Type, @"Type::Tuple"); + + expr.elems.push_value(parse_quote!(_)); + // Must not parse to Type::Paren + snapshot!(expr.to_token_stream() as Type, @r#" + Type::Tuple { + elems: [ + Type::Infer, + Token![,], + ], + } + "#); + + expr.elems.push_punct(<Token![,]>::default()); + snapshot!(expr.to_token_stream() as Type, @r#" + Type::Tuple { + elems: [ + Type::Infer, + Token![,], + ], + } + "#); + + expr.elems.push_value(parse_quote!(_)); + snapshot!(expr.to_token_stream() as Type, @r#" + Type::Tuple { + elems: [ + Type::Infer, + Token![,], + Type::Infer, + ], + } + "#); + + expr.elems.push_punct(<Token![,]>::default()); + snapshot!(expr.to_token_stream() as Type, @r#" + Type::Tuple { + elems: [ + Type::Infer, + Token![,], + Type::Infer, + Token![,], + ], + } + "#); +} + +#[test] +fn test_impl_trait_use() { + let tokens = quote! { + impl Sized + use<'_, 'a, A, Test> + }; + + snapshot!(tokens as Type, @r#" + Type::ImplTrait { + bounds: [ + TypeParamBound::Trait(TraitBound { + path: Path { + segments: [ + PathSegment { + ident: "Sized", + }, + ], + }, + }), + Token![+], + TypeParamBound::PreciseCapture(PreciseCapture { + params: [ + CapturedParam::Lifetime(Lifetime { + ident: "_", + }), + Token![,], + CapturedParam::Lifetime(Lifetime { + ident: "a", + }), + Token![,], + CapturedParam::Ident("A"), + Token![,], + CapturedParam::Ident("Test"), + ], + }), + ], + } + "#); + + let trailing = quote! { + impl Sized + use<'_,> + }; + + snapshot!(trailing as Type, @r#" + Type::ImplTrait { + bounds: [ + TypeParamBound::Trait(TraitBound { + path: Path { + segments: [ + PathSegment { + ident: "Sized", + }, + ], + }, + }), + Token![+], + TypeParamBound::PreciseCapture(PreciseCapture { + params: [ + CapturedParam::Lifetime(Lifetime { + ident: "_", + }), + Token![,], + ], + }), + ], + } + "#); +} diff --git a/vendor/syn/tests/test_unparenthesize.rs b/vendor/syn/tests/test_unparenthesize.rs new file mode 100644 index 00000000000000..5fa2e59e14be63 --- /dev/null +++ b/vendor/syn/tests/test_unparenthesize.rs @@ -0,0 +1,70 @@ +#![cfg(not(miri))] +#![recursion_limit = "1024"] +#![feature(rustc_private)] +#![allow( + clippy::elidable_lifetime_names, + clippy::manual_assert, + clippy::match_like_matches_macro, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] + +use crate::common::visit::{AsIfPrinted, FlattenParens}; +use quote::ToTokens as _; +use std::fs; +use std::panic; +use std::path::Path; +use std::sync::atomic::{AtomicUsize, Ordering}; +use syn::visit_mut::VisitMut as _; + +#[macro_use] +mod macros; + +mod common; +mod repo; + +#[test] +fn test_unparenthesize() { + repo::rayon_init(); + repo::clone_rust(); + + let failed = AtomicUsize::new(0); + + repo::for_each_rust_file(|path| test(path, &failed)); + + let failed = failed.into_inner(); + if failed > 0 { + panic!("{} failures", failed); + } +} + +fn test(path: &Path, failed: &AtomicUsize) { + let content = fs::read_to_string(path).unwrap(); + + match panic::catch_unwind(|| -> syn::Result<()> { + let mut before = syn::parse_file(&content)?; + FlattenParens::discard_attrs().visit_file_mut(&mut before); + let printed = before.to_token_stream(); + let mut after = syn::parse2::<syn::File>(printed.clone())?; + FlattenParens::discard_attrs().visit_file_mut(&mut after); + // Normalize features that we expect Syn not to print. + AsIfPrinted.visit_file_mut(&mut before); + if before != after { + errorf!("=== {}\n", path.display()); + if failed.fetch_add(1, Ordering::Relaxed) == 0 { + errorf!("BEFORE:\n{:#?}\nAFTER:\n{:#?}\n", before, after); + } + } + Ok(()) + }) { + Err(_) => { + errorf!("=== {}: syn panic\n", path.display()); + failed.fetch_add(1, Ordering::Relaxed); + } + Ok(Err(msg)) => { + errorf!("=== {}: syn failed to parse\n{:?}\n", path.display(), msg); + failed.fetch_add(1, Ordering::Relaxed); + } + Ok(Ok(())) => {} + } +} diff --git a/vendor/syn/tests/test_visibility.rs b/vendor/syn/tests/test_visibility.rs new file mode 100644 index 00000000000000..cf15574b510299 --- /dev/null +++ b/vendor/syn/tests/test_visibility.rs @@ -0,0 +1,191 @@ +#![allow( + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::uninlined_format_args +)] + +#[macro_use] +mod snapshot; + +mod debug; + +use proc_macro2::{Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream, TokenTree}; +use quote::quote; +use syn::parse::{Parse, ParseStream}; +use syn::{DeriveInput, Result, Visibility}; + +#[derive(Debug)] +struct VisRest { + vis: Visibility, + rest: TokenStream, +} + +impl Parse for VisRest { + fn parse(input: ParseStream) -> Result<Self> { + Ok(VisRest { + vis: input.parse()?, + rest: input.parse()?, + }) + } +} + +macro_rules! assert_vis_parse { + ($input:expr, Ok($p:pat)) => { + assert_vis_parse!($input, Ok($p) + ""); + }; + + ($input:expr, Ok($p:pat) + $rest:expr) => { + let expected = $rest.parse::<TokenStream>().unwrap(); + let parse: VisRest = syn::parse_str($input).unwrap(); + + match parse.vis { + $p => {} + _ => panic!("expected {}, got {:?}", stringify!($p), parse.vis), + } + + // NOTE: Round-trips through `to_string` to avoid potential whitespace + // diffs. + assert_eq!(parse.rest.to_string(), expected.to_string()); + }; + + ($input:expr, Err) => { + syn::parse2::<VisRest>($input.parse().unwrap()).unwrap_err(); + }; +} + +#[test] +fn test_pub() { + assert_vis_parse!("pub", Ok(Visibility::Public(_))); +} + +#[test] +fn test_inherited() { + assert_vis_parse!("", Ok(Visibility::Inherited)); +} + +#[test] +fn test_in() { + assert_vis_parse!("pub(in foo::bar)", Ok(Visibility::Restricted(_))); +} + +#[test] +fn test_pub_crate() { + assert_vis_parse!("pub(crate)", Ok(Visibility::Restricted(_))); +} + +#[test] +fn test_pub_self() { + assert_vis_parse!("pub(self)", Ok(Visibility::Restricted(_))); +} + +#[test] +fn test_pub_super() { + assert_vis_parse!("pub(super)", Ok(Visibility::Restricted(_))); +} + +#[test] +fn test_missing_in() { + assert_vis_parse!("pub(foo::bar)", Ok(Visibility::Public(_)) + "(foo::bar)"); +} + +#[test] +fn test_missing_in_path() { + assert_vis_parse!("pub(in)", Err); +} + +#[test] +fn test_crate_path() { + assert_vis_parse!( + "pub(crate::A, crate::B)", + Ok(Visibility::Public(_)) + "(crate::A, crate::B)" + ); +} + +#[test] +fn test_junk_after_in() { + assert_vis_parse!("pub(in some::path @@garbage)", Err); +} + +#[test] +fn test_inherited_vis_named_field() { + // mimics `struct S { $vis $field: () }` where $vis is empty + let tokens = TokenStream::from_iter([ + TokenTree::Ident(Ident::new("struct", Span::call_site())), + TokenTree::Ident(Ident::new("S", Span::call_site())), + TokenTree::Group(Group::new( + Delimiter::Brace, + TokenStream::from_iter([ + TokenTree::Group(Group::new(Delimiter::None, TokenStream::new())), + TokenTree::Group(Group::new(Delimiter::None, quote!(f))), + TokenTree::Punct(Punct::new(':', Spacing::Alone)), + TokenTree::Group(Group::new(Delimiter::Parenthesis, TokenStream::new())), + ]), + )), + ]); + + snapshot!(tokens as DeriveInput, @r#" + DeriveInput { + vis: Visibility::Inherited, + ident: "S", + generics: Generics, + data: Data::Struct { + fields: Fields::Named { + named: [ + Field { + vis: Visibility::Inherited, + ident: Some("f"), + colon_token: Some, + ty: Type::Tuple, + }, + ], + }, + }, + } + "#); +} + +#[test] +fn test_inherited_vis_unnamed_field() { + // mimics `struct S($vis $ty);` where $vis is empty + let tokens = TokenStream::from_iter([ + TokenTree::Ident(Ident::new("struct", Span::call_site())), + TokenTree::Ident(Ident::new("S", Span::call_site())), + TokenTree::Group(Group::new( + Delimiter::Parenthesis, + TokenStream::from_iter([ + TokenTree::Group(Group::new(Delimiter::None, TokenStream::new())), + TokenTree::Group(Group::new(Delimiter::None, quote!(str))), + ]), + )), + TokenTree::Punct(Punct::new(';', Spacing::Alone)), + ]); + + snapshot!(tokens as DeriveInput, @r#" + DeriveInput { + vis: Visibility::Inherited, + ident: "S", + generics: Generics, + data: Data::Struct { + fields: Fields::Unnamed { + unnamed: [ + Field { + vis: Visibility::Inherited, + ty: Type::Group { + elem: Type::Path { + path: Path { + segments: [ + PathSegment { + ident: "str", + }, + ], + }, + }, + }, + }, + ], + }, + semi_token: Some, + }, + } + "#); +} diff --git a/vendor/syn/tests/zzz_stable.rs b/vendor/syn/tests/zzz_stable.rs new file mode 100644 index 00000000000000..a1a670d9edeea1 --- /dev/null +++ b/vendor/syn/tests/zzz_stable.rs @@ -0,0 +1,33 @@ +#![cfg(syn_disable_nightly_tests)] + +use std::io::{self, Write}; +use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}; + +const MSG: &str = "\ +‖ +‖ WARNING: +‖ This is not a nightly compiler so not all tests were able to +‖ run. Syn includes tests that compare Syn's parser against the +‖ compiler's parser, which requires access to unstable librustc +‖ data structures and a nightly compiler. +‖ +"; + +#[test] +fn notice() -> io::Result<()> { + let header = "WARNING"; + let index_of_header = MSG.find(header).unwrap(); + let before = &MSG[..index_of_header]; + let after = &MSG[index_of_header + header.len()..]; + + let mut stderr = StandardStream::stderr(ColorChoice::Auto); + stderr.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))?; + write!(&mut stderr, "{}", before)?; + stderr.set_color(ColorSpec::new().set_bold(true).set_fg(Some(Color::Yellow)))?; + write!(&mut stderr, "{}", header)?; + stderr.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))?; + write!(&mut stderr, "{}", after)?; + stderr.reset()?; + + Ok(()) +} diff --git a/vendor/unicode-ident/.cargo-checksum.json b/vendor/unicode-ident/.cargo-checksum.json new file mode 100644 index 00000000000000..1eb9ea6340947c --- /dev/null +++ b/vendor/unicode-ident/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"776471b17558966a9555dd89a2b08f260fe63b13f6da824632175db9f6522ae8",".github/FUNDING.yml":"b017158736b3c9751a2d21edfce7fe61c8954e2fced8da8dd3013c2f3e295bd9",".github/workflows/ci.yml":"173314c15e4d92ca66030916da40438384de3125400464d2e1957826bf5c01a6","Cargo.lock":"9e475b8f7b444cf44cfe12774442b59716d5c16ea03914340ea3b43500d80aa0","Cargo.toml":"ed14e5fd17333842d830900782e2b4e38aaf3967f540b7635a45c09451fffa01","Cargo.toml.orig":"88664595aa541de6ba77d30d7d71b7f4a533c15c2bb9da5d4ab9bc5e46ef0d56","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","LICENSE-UNICODE":"f7db81051789b729fea528a63ec4c938fdcb93d9d61d97dc8cc2e9df6d47f2a1","README.md":"2fd3a0c6c9abd2c042fb319221f9ef2c95728a6e3725b0f067e4d315c5dbfe5a","benches/xid.rs":"7eb058c1140a253f7177af8868a95aabb1b92c52dc7eee5abbfeadb507d7845d","src/lib.rs":"3254e755eaf22e9e7f347439df7e3fc882e582a874cd5832e7f49478dd973799","src/tables.rs":"96d345fb3df2dc6718a6fe15cac1fcd21b7f006d5fb531dac7a5d0a0711e16bb","tests/compare.rs":"f2311271aa1db7380e5bf153ef83ee99777e14579e4f28c2b1a3e21877ffe715","tests/fst/.gitignore":"2cd419079c0a08bb15766520880998651dd1c72c55347a31f43357595b16ac10","tests/fst/mod.rs":"69a3aaf59acd8bca962ecc6234be56be8c0934ab79b253162f10eb881523901f","tests/fst/xid_continue.fst":"b58be4f0c498253e7a5ac664046096f15f249da66131347d4b822097623549a2","tests/fst/xid_start.fst":"aec7eecdacfce308d2e6210f47d28ed3aad5c8b048efbfbc11fc22e60fbf435b","tests/roaring/mod.rs":"f5c6d55463a7f53e92a493cf046d717149250fbafc0e0fe94bdb531377bf8b11","tests/static_size.rs":"52763dc203f211561d2ef150e8f970650459820bdc20ae18c6c046876b99cd2a","tests/tables/mod.rs":"e6949172d10fc4b2431ce7546269bfd4f9146454c8c3e31faf5e5d80c16a8ab6","tests/tables/tables.rs":"302d87306100b6280f8db93e167dc70c47f724045cf1312b7354683656c3f36b","tests/trie/mod.rs":"d4acbb716bcbaf80660039797f45e138ed8bbd66749fa3b19b1a971574679cc9","tests/trie/trie.rs":"f7b9edc1e8a98e3be42b653bba27bb4eb5fc48a559d6d8d1c6a4db4b5425b0d5"},"package":"9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"} \ No newline at end of file diff --git a/vendor/unicode-ident/.cargo_vcs_info.json b/vendor/unicode-ident/.cargo_vcs_info.json new file mode 100644 index 00000000000000..b80f22af9dd1ad --- /dev/null +++ b/vendor/unicode-ident/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "10d5e534c9e06fffcdc6896d4779ffb25641659b" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/unicode-ident/.github/FUNDING.yml b/vendor/unicode-ident/.github/FUNDING.yml new file mode 100644 index 00000000000000..750707701cdae9 --- /dev/null +++ b/vendor/unicode-ident/.github/FUNDING.yml @@ -0,0 +1 @@ +github: dtolnay diff --git a/vendor/unicode-ident/.github/workflows/ci.yml b/vendor/unicode-ident/.github/workflows/ci.yml new file mode 100644 index 00000000000000..dc92f2ebafa3df --- /dev/null +++ b/vendor/unicode-ident/.github/workflows/ci.yml @@ -0,0 +1,110 @@ +name: CI + +on: + push: + pull_request: + workflow_dispatch: + schedule: [cron: "40 1 * * *"] + +permissions: + contents: read + +env: + RUSTFLAGS: -Dwarnings + +jobs: + pre_ci: + uses: dtolnay/.github/.github/workflows/pre_ci.yml@master + + unicode: + name: latest Unicode + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@stable + - id: ucd-generate + run: echo "version=$(grep 'ucd-generate [0-9]\+\.[0-9]\+\.[0-9]\+' tests/tables/tables.rs --only-matching)" >> $GITHUB_OUTPUT + - run: cargo install ucd-generate + - run: curl https://www.unicode.org/Public/latest/ucd/UCD.zip --location --remote-name --silent --show-error --fail --retry 2 + - run: unzip UCD.zip -d UCD + - run: ucd-generate property-bool UCD --include XID_Start,XID_Continue > tests/tables/tables.rs + - run: ucd-generate property-bool UCD --include XID_Start,XID_Continue --fst-dir tests/fst + - run: ucd-generate property-bool UCD --include XID_Start,XID_Continue --trie-set > tests/trie/trie.rs + - run: cargo run --manifest-path generate/Cargo.toml + - run: sed --in-place 's/ucd-generate [0-9]\+\.[0-9]\+\.[0-9]\+/${{steps.ucd-generate.outputs.version}}/' tests/tables/tables.rs tests/trie/trie.rs + - run: git diff --exit-code + + test: + name: Rust ${{matrix.rust}} + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + rust: [nightly, beta, stable, 1.81.0] + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{matrix.rust}} + - name: Enable type layout randomization + run: echo RUSTFLAGS=${RUSTFLAGS}\ -Zrandomize-layout >> $GITHUB_ENV + if: matrix.rust == 'nightly' + - run: cargo test + - run: cargo check --benches + - uses: actions/upload-artifact@v4 + if: matrix.rust == 'nightly' && always() + with: + name: Cargo.lock + path: Cargo.lock + continue-on-error: true + + msrv: + name: Rust 1.31.0 + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@1.31.0 + - run: cargo check --manifest-path tests/crate/Cargo.toml + + doc: + name: Documentation + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + env: + RUSTDOCFLAGS: -Dwarnings + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + - uses: dtolnay/install@cargo-docs-rs + - run: cargo docs-rs + + clippy: + name: Clippy + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@clippy + - run: cargo clippy --tests --benches --workspace -- -Dclippy::all -Dclippy::pedantic + + outdated: + name: Outdated + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@stable + - uses: dtolnay/install@cargo-outdated + - run: cargo outdated --workspace --exit-code 1 diff --git a/vendor/unicode-ident/Cargo.lock b/vendor/unicode-ident/Cargo.lock new file mode 100644 index 00000000000000..e0bdfda23dd9e2 --- /dev/null +++ b/vendor/unicode-ident/Cargo.lock @@ -0,0 +1,499 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.7.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "anstyle" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "bytemuck" +version = "1.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ciborium-io 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ciborium-ll 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ciborium-io 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "half 2.7.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "clap" +version = "4.5.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "clap_builder 4.5.51 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "clap_builder" +version = "4.5.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "anstyle 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", + "clap_lex 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "clap_lex" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "criterion" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "anes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "cast 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ciborium 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 4.5.51 (registry+https://github.com/rust-lang/crates.io-index)", + "criterion-plot 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", + "oorandom 11.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.12.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.145 (registry+https://github.com/rust-lang/crates.io-index)", + "tinytemplate 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "walkdir 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "criterion-plot" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cast 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "fst" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.177 (registry+https://github.com/rust-lang/crates.io-index)", + "r-efi 5.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "wasip2 1.0.1+wasi-0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "half" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "crunchy 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "zerocopy 0.8.27 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "either 1.15.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libc" +version = "0.2.177" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "zerocopy 0.8.27 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "proc-macro2" +version = "1.0.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-ident 1.0.20 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "quote" +version = "1.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand_chacha 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ppv-lite86 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "getrandom 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex" +version = "1.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 1.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.7.6 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-automata 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.8.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex-automata" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 1.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.7.6 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.8.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex-syntax" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "roaring" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytemuck 1.24.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi-util 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde_core 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde_derive 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 2.0.108 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_json" +version = "1.0.145" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "itoa 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.7.6 (registry+https://github.com/rust-lang/crates.io-index)", + "ryu 1.0.20 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_core 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "syn" +version = "2.0.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-ident 1.0.20 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.145 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unicode-ident" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unicode-ident" +version = "1.0.22" +dependencies = [ + "criterion 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "fst 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", + "roaring 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ucd-trie 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "wit-bindgen 0.46.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "windows-sys 0.61.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "windows-link 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wit-bindgen" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "zerocopy" +version = "0.8.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "zerocopy-derive 0.8.27 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 2.0.108 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[metadata] +"checksum aho-corasick 1.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +"checksum anes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" +"checksum anstyle 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)" = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" +"checksum autocfg 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +"checksum bytemuck 1.24.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" +"checksum byteorder 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" +"checksum cast 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +"checksum cfg-if 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" +"checksum ciborium 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +"checksum ciborium-io 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" +"checksum ciborium-ll 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +"checksum clap 4.5.51 (registry+https://github.com/rust-lang/crates.io-index)" = "4c26d721170e0295f191a69bd9a1f93efcdb0aff38684b61ab5750468972e5f5" +"checksum clap_builder 4.5.51 (registry+https://github.com/rust-lang/crates.io-index)" = "75835f0c7bf681bfd05abe44e965760fea999a5286c6eb2d59883634fd02011a" +"checksum clap_lex 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" +"checksum criterion 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e1c047a62b0cc3e145fa84415a3191f628e980b194c2755aa12300a4e6cbd928" +"checksum criterion-plot 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9b1bcc0dc7dfae599d84ad0b1a55f80cde8af3725da8313b528da95ef783e338" +"checksum crunchy 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" +"checksum either 1.15.0 (registry+https://github.com/rust-lang/crates.io-index)" = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +"checksum fst 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)" = "7ab85b9b05e3978cc9a9cf8fea7f01b494e1a09ed3037e16ba39edc7a29eb61a" +"checksum getrandom 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +"checksum half 2.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" +"checksum itertools 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +"checksum itoa 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)" = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +"checksum libc 0.2.177 (registry+https://github.com/rust-lang/crates.io-index)" = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" +"checksum memchr 2.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +"checksum num-traits 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)" = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +"checksum oorandom 11.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" +"checksum ppv-lite86 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)" = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +"checksum proc-macro2 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +"checksum quote 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)" = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" +"checksum r-efi 5.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +"checksum rand 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +"checksum rand_chacha 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +"checksum rand_core 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +"checksum regex 1.12.2 (registry+https://github.com/rust-lang/crates.io-index)" = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +"checksum regex-automata 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +"checksum regex-syntax 0.8.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" +"checksum roaring 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f08d6a905edb32d74a5d5737a0c9d7e950c312f3c46cb0ca0a2ca09ea11878a0" +"checksum ryu 1.0.20 (registry+https://github.com/rust-lang/crates.io-index)" = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +"checksum serde 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)" = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +"checksum serde_core 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)" = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +"checksum serde_derive 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)" = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +"checksum serde_json 1.0.145 (registry+https://github.com/rust-lang/crates.io-index)" = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +"checksum syn 2.0.108 (registry+https://github.com/rust-lang/crates.io-index)" = "da58917d35242480a05c2897064da0a80589a2a0476c9a3f2fdc83b53502e917" +"checksum tinytemplate 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +"checksum ucd-trie 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" +"checksum unicode-ident 1.0.20 (registry+https://github.com/rust-lang/crates.io-index)" = "462eeb75aeb73aea900253ce739c8e18a67423fadf006037cd3ff27e82748a06" +"checksum unicode-xid 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" +"checksum walkdir 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +"checksum wasip2 1.0.1+wasi-0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +"checksum winapi-util 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +"checksum windows-link 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +"checksum windows-sys 0.61.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +"checksum wit-bindgen 0.46.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" +"checksum zerocopy 0.8.27 (registry+https://github.com/rust-lang/crates.io-index)" = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +"checksum zerocopy-derive 0.8.27 (registry+https://github.com/rust-lang/crates.io-index)" = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" diff --git a/vendor/unicode-ident/Cargo.toml b/vendor/unicode-ident/Cargo.toml new file mode 100644 index 00000000000000..3bdaced0399275 --- /dev/null +++ b/vendor/unicode-ident/Cargo.toml @@ -0,0 +1,84 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.31" +name = "unicode-ident" +version = "1.0.22" +authors = ["David Tolnay <dtolnay@gmail.com>"] +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Determine whether characters have the XID_Start or XID_Continue properties according to Unicode Standard Annex #31" +documentation = "https://docs.rs/unicode-ident" +readme = "README.md" +keywords = [ + "unicode", + "xid", +] +categories = [ + "development-tools::procedural-macro-helpers", + "no-std", + "no-std::no-alloc", +] +license = "(MIT OR Apache-2.0) AND Unicode-3.0" +repository = "https://github.com/dtolnay/unicode-ident" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] +rustdoc-args = [ + "--generate-link-to-definition", + "--generate-macro-expansion", + "--extern-html-root-url=core=https://doc.rust-lang.org", + "--extern-html-root-url=alloc=https://doc.rust-lang.org", + "--extern-html-root-url=std=https://doc.rust-lang.org", +] + +[lib] +name = "unicode_ident" +path = "src/lib.rs" + +[[test]] +name = "compare" +path = "tests/compare.rs" + +[[test]] +name = "static_size" +path = "tests/static_size.rs" + +[[bench]] +name = "xid" +path = "benches/xid.rs" +harness = false + +[dev-dependencies.criterion] +version = "0.7" +default-features = false + +[dev-dependencies.fst] +version = "0.4" + +[dev-dependencies.rand] +version = "0.9" + +[dev-dependencies.roaring] +version = "0.11" + +[dev-dependencies.ucd-trie] +version = "0.1" +default-features = false + +[dev-dependencies.unicode-xid] +version = "0.2.6" diff --git a/vendor/unicode-ident/LICENSE-APACHE b/vendor/unicode-ident/LICENSE-APACHE new file mode 100644 index 00000000000000..1b5ec8b78e237b --- /dev/null +++ b/vendor/unicode-ident/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/vendor/unicode-ident/LICENSE-MIT b/vendor/unicode-ident/LICENSE-MIT new file mode 100644 index 00000000000000..31aa79387f27e7 --- /dev/null +++ b/vendor/unicode-ident/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/unicode-ident/LICENSE-UNICODE b/vendor/unicode-ident/LICENSE-UNICODE new file mode 100644 index 00000000000000..11f2842a303a79 --- /dev/null +++ b/vendor/unicode-ident/LICENSE-UNICODE @@ -0,0 +1,39 @@ +UNICODE LICENSE V3 + +COPYRIGHT AND PERMISSION NOTICE + +Copyright © 1991-2023 Unicode, Inc. + +NOTICE TO USER: Carefully read the following legal agreement. BY +DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING DATA FILES, AND/OR +SOFTWARE, YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE +TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE, DO NOT +DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE THE DATA FILES OR SOFTWARE. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of data files and any associated documentation (the "Data Files") or +software and any associated documentation (the "Software") to deal in the +Data Files or Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, and/or sell +copies of the Data Files or Software, and to permit persons to whom the +Data Files or Software are furnished to do so, provided that either (a) +this copyright and permission notice appear with all copies of the Data +Files or Software, or (b) this copyright and permission notice appear in +associated Documentation. + +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF +THIRD PARTY RIGHTS. + +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE +BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, +OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THE DATA +FILES OR SOFTWARE. + +Except as contained in this notice, the name of a copyright holder shall +not be used in advertising or otherwise to promote the sale, use or other +dealings in these Data Files or Software without prior written +authorization of the copyright holder. diff --git a/vendor/unicode-ident/README.md b/vendor/unicode-ident/README.md new file mode 100644 index 00000000000000..2e4668ef62528c --- /dev/null +++ b/vendor/unicode-ident/README.md @@ -0,0 +1,274 @@ +Unicode ident +============= + +[<img alt="github" src="https://img.shields.io/badge/github-dtolnay/unicode--ident-8da0cb?style=for-the-badge&labelColor=555555&logo=github" height="20">](https://github.com/dtolnay/unicode-ident) +[<img alt="crates.io" src="https://img.shields.io/crates/v/unicode-ident.svg?style=for-the-badge&color=fc8d62&logo=rust" height="20">](https://crates.io/crates/unicode-ident) +[<img alt="docs.rs" src="https://img.shields.io/badge/docs.rs-unicode--ident-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs" height="20">](https://docs.rs/unicode-ident) +[<img alt="build status" src="https://img.shields.io/github/actions/workflow/status/dtolnay/unicode-ident/ci.yml?branch=master&style=for-the-badge" height="20">](https://github.com/dtolnay/unicode-ident/actions?query=branch%3Amaster) + +Implementation of [Unicode Standard Annex #31][tr31] for determining which +`char` values are valid in programming language identifiers. + +[tr31]: https://www.unicode.org/reports/tr31/ + +This crate is a better optimized implementation of the older `unicode-xid` +crate. This crate uses less static storage, and is able to classify both ASCII +and non-ASCII codepoints with better performance, 6× faster than +`unicode-xid`. + +<br> + +## Comparison of performance + +The following table shows a comparison between five Unicode identifier +implementations. + +- `unicode-ident` is this crate; +- [`unicode-xid`] is a widely used crate run by the "unicode-rs" org; +- `ucd-trie` and `fst` are two data structures supported by the [`ucd-generate`] tool; +- [`roaring`] is a Rust implementation of Roaring bitmap. + +The *static storage* column shows the total size of `static` tables that the +crate bakes into your binary, measured in 1000s of bytes. + +The remaining columns show the **cost per call** to evaluate whether a single +`char` has the XID\_Start or XID\_Continue Unicode property, comparing across +different ratios of ASCII to non-ASCII codepoints in the input data. + +[`unicode-xid`]: https://github.com/unicode-rs/unicode-xid +[`ucd-generate`]: https://github.com/BurntSushi/ucd-generate +[`roaring`]: https://github.com/RoaringBitmap/roaring-rs + +| | static storage | 0% nonascii | 1% | 10% | 100% nonascii | +|---|---|---|---|---|---| +| **`unicode-ident`** | 10.3 K | 0.41 ns | 0.44 ns | 0.44 ns | 0.93 ns | +| **`unicode-xid`** | 12.0 K | 2.43 ns | 2.50 ns | 2.85 ns | 8.65 ns | +| **`ucd-trie`** | 10.4 K | 1.28 ns | 1.25 ns | 1.20 ns | 1.97 ns | +| **`fst`** | 144 K | 50.9 ns | 51.0 ns | 48.5 ns | 26.7 ns | +| **`roaring`** | 66.1 K | 4.28 ns | 4.22 ns | 4.25 ns | 4.61 ns | + +Source code for the benchmark is provided in the *bench* directory of this repo +and may be repeated by running `cargo criterion`. + +<br> + +## Comparison of data structures + +#### unicode-xid + +They use a sorted array of character ranges, and do a binary search to look up +whether a given character lands inside one of those ranges. + +```rust +static XID_Continue_table: [(char, char); 763] = [ + ('\u{30}', '\u{39}'), // 0-9 + ('\u{41}', '\u{5a}'), // A-Z + … + ('\u{e0100}', '\u{e01ef}'), +]; +``` + +The static storage used by this data structure scales with the number of +contiguous ranges of identifier codepoints in Unicode. Every table entry +consumes 8 bytes, because it consists of a pair of 32-bit `char` values. + +In some ranges of the Unicode codepoint space, this is quite a sparse +representation – there are some ranges where tens of thousands of adjacent +codepoints are all valid identifier characters. In other places, the +representation is quite inefficient. A characater like `µ` (U+00B5) which is +surrounded by non-identifier codepoints consumes 64 bits in the table, while it +would be just 1 bit in a dense bitmap. + +On a system with 64-byte cache lines, binary searching the table touches 7 cache +lines on average. Each cache line fits only 8 table entries. Additionally, the +branching performed during the binary search is probably mostly unpredictable to +the branch predictor. + +Overall, the crate ends up being about 6× slower on non-ASCII input +compared to the fastest crate. + +A potential improvement would be to pack the table entries more compactly. +Rust's `char` type is a 21-bit integer padded to 32 bits, which means every +table entry is holding 22 bits of wasted space, adding up to 3.9 K. They could +instead fit every table entry into 6 bytes, leaving out some of the padding, for +a 25% improvement in space used. With some cleverness it may be possible to fit +in 5 bytes or even 4 bytes by storing a low char and an extent, instead of low +char and high char. I don't expect that performance would improve much but this +could be the most efficient for space across all the libraries, needing only +about 7 K to store. + +#### ucd-trie + +Their data structure is a compressed trie set specifically tailored for Unicode +codepoints. The design is credited to Raph Levien in [rust-lang/rust#33098]. + +[rust-lang/rust#33098]: https://github.com/rust-lang/rust/pull/33098 + +```rust +pub struct TrieSet { + tree1_level1: &'static [u64; 32], + tree2_level1: &'static [u8; 992], + tree2_level2: &'static [u64], + tree3_level1: &'static [u8; 256], + tree3_level2: &'static [u8], + tree3_level3: &'static [u64], +} +``` + +It represents codepoint sets using a trie to achieve prefix compression. The +final states of the trie are embedded in leaves or "chunks", where each chunk is +a 64-bit integer. Each bit position of the integer corresponds to whether a +particular codepoint is in the set or not. These chunks are not just a compact +representation of the final states of the trie, but are also a form of suffix +compression. In particular, if multiple ranges of 64 contiguous codepoints have +the same Unicode properties, then they all map to the same chunk in the final +level of the trie. + +Being tailored for Unicode codepoints, this trie is partitioned into three +disjoint sets: tree1, tree2, tree3. The first set corresponds to codepoints \[0, +0x800), the second \[0x800, 0x10000) and the third \[0x10000, 0x110000). These +partitions conveniently correspond to the space of 1 or 2 byte UTF-8 encoded +codepoints, 3 byte UTF-8 encoded codepoints and 4 byte UTF-8 encoded codepoints, +respectively. + +Lookups in this data structure are significantly more efficient than binary +search. A lookup touches either 1, 2, or 3 cache lines based on which of the +trie partitions is being accessed. + +One possible performance improvement would be for this crate to expose a way to +query based on a UTF-8 encoded string, returning the Unicode property +corresponding to the first character in the string. Without such an API, the +caller is required to tokenize their UTF-8 encoded input data into `char`, hand +the `char` into `ucd-trie`, only for `ucd-trie` to undo that work by converting +back into the variable-length representation for trie traversal. + +#### fst + +Uses a [finite state transducer][fst]. This representation is built into +[ucd-generate] but I am not aware of any advantage over the `ucd-trie` +representation. In particular `ucd-trie` is optimized for storing Unicode +properties while `fst` is not. + +[fst]: https://github.com/BurntSushi/fst +[ucd-generate]: https://github.com/BurntSushi/ucd-generate + +As far as I can tell, the main thing that causes `fst` to have large size and +slow lookups for this use case relative to `ucd-trie` is that it does not +specialize for the fact that only 21 of the 32 bits in a `char` are meaningful. +There are some dense arrays in the structure with large ranges that could never +possibly be used. + +#### roaring + +This crate is a pure-Rust implementation of [Roaring Bitmap], a data structure +designed for storing sets of 32-bit unsigned integers. + +[Roaring Bitmap]: https://roaringbitmap.org/about/ + +Roaring bitmaps are compressed bitmaps which tend to outperform conventional +compressed bitmaps such as WAH, EWAH or Concise. In some instances, they can be +hundreds of times faster and they often offer significantly better compression. + +In this use case the performance was reasonably competitive but still +substantially slower than the Unicode-optimized crates. Meanwhile the +compression was significantly worse, requiring 6× as much storage for the +data structure. + +I also benchmarked the [`croaring`] crate which is an FFI wrapper around the C +reference implementation of Roaring Bitmap. This crate was consistently about +15% slower than pure-Rust `roaring`, which could just be FFI overhead. I did not +investigate further. + +[`croaring`]: https://crates.io/crates/croaring + +#### unicode-ident + +This crate is most similar to the `ucd-trie` library, in that it's based on +bitmaps stored in the leafs of a trie representation, achieving both prefix +compression and suffix compression. + +The key differences are: + +- Uses a single 2-level trie, rather than 3 disjoint partitions of different + depth each. +- Uses significantly larger chunks: 512 bits rather than 64 bits. +- Compresses the XID\_Start and XID\_Continue properties together + simultaneously, rather than duplicating identical trie leaf chunks across the + two. + +The following diagram show the XID\_Start and XID\_Continue Unicode boolean +properties in uncompressed form, in row-major order: + +<table> +<tr><th>XID_Start</th><th>XID_Continue</th></tr> +<tr> +<td><img alt="XID_Start bitmap" width="256" src="https://user-images.githubusercontent.com/1940490/168647353-c6eeb922-afec-49b2-9ef5-c03e9d1e0760.png"></td> +<td><img alt="XID_Continue bitmap" width="256" src="https://user-images.githubusercontent.com/1940490/168647367-f447cca7-2362-4d7d-8cd7-d21c011d329b.png"></td> +</tr> +</table> + +Uncompressed, these would take 140 K to store, which is beyond what would be +reasonable. However, as you can see there is a large degree of similarity +between the two bitmaps and across the rows, which lends well to compression. + +This crate stores one 512-bit "row" of the above bitmaps in the leaf level of a +trie, and a single additional level to index into the leafs. It turns out there +are 124 unique 512-bit chunks across the two bitmaps so 7 bits are sufficient to +index them. + +The chunk size of 512 bits is selected as the size that minimizes the total size +of the data structure. A smaller chunk, like 256 or 128 bits, would achieve +better deduplication but require a larger index. A larger chunk would increase +redundancy in the leaf bitmaps. 512 bit chunks are the optimum for total size of +the index plus leaf bitmaps. + +In fact since there are only 124 unique chunks, we can use an 8-bit index with a +spare bit to index at the half-chunk level. This achieves an additional 8.5% +compression by eliminating redundancies between the second half of any chunk and +the first half of any other chunk. Note that this is not the same as using +chunks which are half the size, because it does not necessitate raising the size +of the trie's first level. + +In contrast to binary search or the `ucd-trie` crate, performing lookups in this +data structure is straight-line code with no need for branching. + +```asm +is_xid_start: + mov eax, edi + mov ecx, offset unicode_ident::ZERO + shr eax, 9 + cmp edi, 210432 + lea rax, [rax + unicode_ident::tables::TRIE_START] + cmovb rcx, rax + movzx eax, byte ptr [rcx] + mov ecx, 1539 + bextr ecx, edi, ecx + and edi, 7 + shl eax, 5 + movzx eax, byte ptr [rax + rcx + unicode_ident::tables::LEAF] + bt eax, edi + setb al + ret +``` + +<br> + +## License + +Use of the Unicode Character Database, as this crate does, is governed by the <a +href="LICENSE-UNICODE">Unicode license</a>. + +All intellectual property within this crate that is **not generated** using the +Unicode Character Database as input is licensed under either of <a +href="LICENSE-APACHE">Apache License, Version 2.0</a> or <a +href="LICENSE-MIT">MIT license</a> at your option. + +The **generated** files incorporate tabular data derived from the Unicode +Character Database, together with intellectual property from the original source +code content of the crate. One must comply with the terms of both the Unicode +License Agreement and either of the Apache license or MIT license when those +generated files are involved. + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this crate by you, as defined in the Apache-2.0 license, shall +be licensed as just described, without any additional terms or conditions. diff --git a/vendor/unicode-ident/benches/xid.rs b/vendor/unicode-ident/benches/xid.rs new file mode 100644 index 00000000000000..fc7b48e44b7334 --- /dev/null +++ b/vendor/unicode-ident/benches/xid.rs @@ -0,0 +1,126 @@ +// To run: `cargo criterion` +// +// This benchmarks each of the different libraries at several ratios of ASCII to +// non-ASCII content. There is one additional benchmark labeled "baseline" which +// just iterates over characters in a string, converting UTF-8 to 32-bit chars. +// +// Criterion will show a time in milliseconds. The non-baseline bench functions +// each make one million function calls (2 calls per character, 500K characters +// in the strings created by gen_string). The "time per call" listed in our +// readme is computed by subtracting this baseline from the other bench +// functions' time, then dividing by one million (ms -> ns). + +#![allow( + clippy::incompatible_msrv, // https://github.com/rust-lang/rust-clippy/issues/12257 + clippy::needless_pass_by_value, +)] + +#[path = "../tests/fst/mod.rs"] +mod fst; +#[path = "../tests/roaring/mod.rs"] +mod roaring; +#[path = "../tests/trie/mod.rs"] +mod trie; + +use criterion::{criterion_group, criterion_main, Criterion}; +use rand::distr::{Bernoulli, Distribution, Uniform}; +use rand::rngs::SmallRng; +use rand::SeedableRng; +use std::hint::black_box; +use std::time::Duration; + +fn gen_string(p_nonascii: u32) -> String { + let mut rng = SmallRng::from_seed([b'!'; 32]); + let pick_nonascii = Bernoulli::from_ratio(p_nonascii, 100).unwrap(); + let ascii = Uniform::new_inclusive('\0', '\x7f').unwrap(); + let nonascii = Uniform::new_inclusive(0x80 as char, char::MAX).unwrap(); + + let mut string = String::new(); + for _ in 0..500_000 { + let distribution = if pick_nonascii.sample(&mut rng) { + nonascii + } else { + ascii + }; + string.push(distribution.sample(&mut rng)); + } + + string +} + +fn bench(c: &mut Criterion, group_name: &str, string: String) { + let mut group = c.benchmark_group(group_name); + group.measurement_time(Duration::from_secs(10)); + group.bench_function("baseline", |b| { + b.iter(|| { + for ch in string.chars() { + black_box(ch); + } + }); + }); + group.bench_function("unicode-ident", |b| { + b.iter(|| { + for ch in string.chars() { + black_box(unicode_ident::is_xid_start(ch)); + black_box(unicode_ident::is_xid_continue(ch)); + } + }); + }); + group.bench_function("unicode-xid", |b| { + b.iter(|| { + for ch in string.chars() { + black_box(unicode_xid::UnicodeXID::is_xid_start(ch)); + black_box(unicode_xid::UnicodeXID::is_xid_continue(ch)); + } + }); + }); + group.bench_function("ucd-trie", |b| { + b.iter(|| { + for ch in string.chars() { + black_box(trie::XID_START.contains_char(ch)); + black_box(trie::XID_CONTINUE.contains_char(ch)); + } + }); + }); + group.bench_function("fst", |b| { + let xid_start_fst = fst::xid_start_fst(); + let xid_continue_fst = fst::xid_continue_fst(); + b.iter(|| { + for ch in string.chars() { + let ch_bytes = (ch as u32).to_be_bytes(); + black_box(xid_start_fst.contains(ch_bytes)); + black_box(xid_continue_fst.contains(ch_bytes)); + } + }); + }); + group.bench_function("roaring", |b| { + let xid_start_bitmap = roaring::xid_start_bitmap(); + let xid_continue_bitmap = roaring::xid_continue_bitmap(); + b.iter(|| { + for ch in string.chars() { + black_box(xid_start_bitmap.contains(ch as u32)); + black_box(xid_continue_bitmap.contains(ch as u32)); + } + }); + }); + group.finish(); +} + +fn bench0(c: &mut Criterion) { + bench(c, "0%-nonascii", gen_string(0)); +} + +fn bench1(c: &mut Criterion) { + bench(c, "1%-nonascii", gen_string(1)); +} + +fn bench10(c: &mut Criterion) { + bench(c, "10%-nonascii", gen_string(10)); +} + +fn bench100(c: &mut Criterion) { + bench(c, "100%-nonascii", gen_string(100)); +} + +criterion_group!(benches, bench0, bench1, bench10, bench100); +criterion_main!(benches); diff --git a/vendor/unicode-ident/src/lib.rs b/vendor/unicode-ident/src/lib.rs new file mode 100644 index 00000000000000..2df5244f86981d --- /dev/null +++ b/vendor/unicode-ident/src/lib.rs @@ -0,0 +1,281 @@ +//! [![github]](https://github.com/dtolnay/unicode-ident) [![crates-io]](https://crates.io/crates/unicode-ident) [![docs-rs]](https://docs.rs/unicode-ident) +//! +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust +//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs +//! +//! <br> +//! +//! Implementation of [Unicode Standard Annex #31][tr31] for determining which +//! `char` values are valid in programming language identifiers. +//! +//! [tr31]: https://www.unicode.org/reports/tr31/ +//! +//! This crate is a better optimized implementation of the older `unicode-xid` +//! crate. This crate uses less static storage, and is able to classify both +//! ASCII and non-ASCII codepoints with better performance, 6× faster than +//! `unicode-xid`. +//! +//! <br> +//! +//! ## Comparison of performance +//! +//! The following table shows a comparison between five Unicode identifier +//! implementations. +//! +//! - `unicode-ident` is this crate; +//! - [`unicode-xid`] is a widely used crate run by the "unicode-rs" org; +//! - `ucd-trie` and `fst` are two data structures supported by the +//! [`ucd-generate`] tool; +//! - [`roaring`] is a Rust implementation of Roaring bitmap. +//! +//! The *static storage* column shows the total size of `static` tables that the +//! crate bakes into your binary, measured in 1000s of bytes. +//! +//! The remaining columns show the **cost per call** to evaluate whether a +//! single `char` has the XID\_Start or XID\_Continue Unicode property, +//! comparing across different ratios of ASCII to non-ASCII codepoints in the +//! input data. +//! +//! [`unicode-xid`]: https://github.com/unicode-rs/unicode-xid +//! [`ucd-generate`]: https://github.com/BurntSushi/ucd-generate +//! [`roaring`]: https://github.com/RoaringBitmap/roaring-rs +//! +//! | | static storage | 0% nonascii | 1% | 10% | 100% nonascii | +//! |---|---|---|---|---|---| +//! | **`unicode-ident`** | 10.3 K | 0.41 ns | 0.44 ns | 0.44 ns | 0.93 ns | +//! | **`unicode-xid`** | 12.0 K | 2.43 ns | 2.50 ns | 2.85 ns | 8.65 ns | +//! | **`ucd-trie`** | 10.4 K | 1.28 ns | 1.25 ns | 1.20 ns | 1.97 ns | +//! | **`fst`** | 144 K | 50.9 ns | 51.0 ns | 48.5 ns | 26.7 ns | +//! | **`roaring`** | 66.1 K | 4.28 ns | 4.22 ns | 4.25 ns | 4.61 ns | +//! +//! Source code for the benchmark is provided in the *bench* directory of this +//! repo and may be repeated by running `cargo criterion`. +//! +//! <br> +//! +//! ## Comparison of data structures +//! +//! #### unicode-xid +//! +//! They use a sorted array of character ranges, and do a binary search to look +//! up whether a given character lands inside one of those ranges. +//! +//! ```rust +//! # const _: &str = stringify! { +//! static XID_Continue_table: [(char, char); 763] = [ +//! ('\u{30}', '\u{39}'), // 0-9 +//! ('\u{41}', '\u{5a}'), // A-Z +//! # " +//! … +//! # " +//! ('\u{e0100}', '\u{e01ef}'), +//! ]; +//! # }; +//! ``` +//! +//! The static storage used by this data structure scales with the number of +//! contiguous ranges of identifier codepoints in Unicode. Every table entry +//! consumes 8 bytes, because it consists of a pair of 32-bit `char` values. +//! +//! In some ranges of the Unicode codepoint space, this is quite a sparse +//! representation – there are some ranges where tens of thousands of +//! adjacent codepoints are all valid identifier characters. In other places, +//! the representation is quite inefficient. A characater like `µ` (U+00B5) +//! which is surrounded by non-identifier codepoints consumes 64 bits in the +//! table, while it would be just 1 bit in a dense bitmap. +//! +//! On a system with 64-byte cache lines, binary searching the table touches 7 +//! cache lines on average. Each cache line fits only 8 table entries. +//! Additionally, the branching performed during the binary search is probably +//! mostly unpredictable to the branch predictor. +//! +//! Overall, the crate ends up being about 6× slower on non-ASCII input +//! compared to the fastest crate. +//! +//! A potential improvement would be to pack the table entries more compactly. +//! Rust's `char` type is a 21-bit integer padded to 32 bits, which means every +//! table entry is holding 22 bits of wasted space, adding up to 3.9 K. They +//! could instead fit every table entry into 6 bytes, leaving out some of the +//! padding, for a 25% improvement in space used. With some cleverness it may be +//! possible to fit in 5 bytes or even 4 bytes by storing a low char and an +//! extent, instead of low char and high char. I don't expect that performance +//! would improve much but this could be the most efficient for space across all +//! the libraries, needing only about 7 K to store. +//! +//! #### ucd-trie +//! +//! Their data structure is a compressed trie set specifically tailored for +//! Unicode codepoints. The design is credited to Raph Levien in +//! [rust-lang/rust#33098]. +//! +//! [rust-lang/rust#33098]: https://github.com/rust-lang/rust/pull/33098 +//! +//! ```rust +//! pub struct TrieSet { +//! tree1_level1: &'static [u64; 32], +//! tree2_level1: &'static [u8; 992], +//! tree2_level2: &'static [u64], +//! tree3_level1: &'static [u8; 256], +//! tree3_level2: &'static [u8], +//! tree3_level3: &'static [u64], +//! } +//! ``` +//! +//! It represents codepoint sets using a trie to achieve prefix compression. The +//! final states of the trie are embedded in leaves or "chunks", where each +//! chunk is a 64-bit integer. Each bit position of the integer corresponds to +//! whether a particular codepoint is in the set or not. These chunks are not +//! just a compact representation of the final states of the trie, but are also +//! a form of suffix compression. In particular, if multiple ranges of 64 +//! contiguous codepoints have the same Unicode properties, then they all map to +//! the same chunk in the final level of the trie. +//! +//! Being tailored for Unicode codepoints, this trie is partitioned into three +//! disjoint sets: tree1, tree2, tree3. The first set corresponds to codepoints +//! \[0, 0x800), the second \[0x800, 0x10000) and the third \[0x10000, +//! 0x110000). These partitions conveniently correspond to the space of 1 or 2 +//! byte UTF-8 encoded codepoints, 3 byte UTF-8 encoded codepoints and 4 byte +//! UTF-8 encoded codepoints, respectively. +//! +//! Lookups in this data structure are significantly more efficient than binary +//! search. A lookup touches either 1, 2, or 3 cache lines based on which of the +//! trie partitions is being accessed. +//! +//! One possible performance improvement would be for this crate to expose a way +//! to query based on a UTF-8 encoded string, returning the Unicode property +//! corresponding to the first character in the string. Without such an API, the +//! caller is required to tokenize their UTF-8 encoded input data into `char`, +//! hand the `char` into `ucd-trie`, only for `ucd-trie` to undo that work by +//! converting back into the variable-length representation for trie traversal. +//! +//! #### fst +//! +//! Uses a [finite state transducer][fst]. This representation is built into +//! [ucd-generate] but I am not aware of any advantage over the `ucd-trie` +//! representation. In particular `ucd-trie` is optimized for storing Unicode +//! properties while `fst` is not. +//! +//! [fst]: https://github.com/BurntSushi/fst +//! [ucd-generate]: https://github.com/BurntSushi/ucd-generate +//! +//! As far as I can tell, the main thing that causes `fst` to have large size +//! and slow lookups for this use case relative to `ucd-trie` is that it does +//! not specialize for the fact that only 21 of the 32 bits in a `char` are +//! meaningful. There are some dense arrays in the structure with large ranges +//! that could never possibly be used. +//! +//! #### roaring +//! +//! This crate is a pure-Rust implementation of [Roaring Bitmap], a data +//! structure designed for storing sets of 32-bit unsigned integers. +//! +//! [Roaring Bitmap]: https://roaringbitmap.org/about/ +//! +//! Roaring bitmaps are compressed bitmaps which tend to outperform conventional +//! compressed bitmaps such as WAH, EWAH or Concise. In some instances, they can +//! be hundreds of times faster and they often offer significantly better +//! compression. +//! +//! In this use case the performance was reasonably competitive but still +//! substantially slower than the Unicode-optimized crates. Meanwhile the +//! compression was significantly worse, requiring 6× as much storage for +//! the data structure. +//! +//! I also benchmarked the [`croaring`] crate which is an FFI wrapper around the +//! C reference implementation of Roaring Bitmap. This crate was consistently +//! about 15% slower than pure-Rust `roaring`, which could just be FFI overhead. +//! I did not investigate further. +//! +//! [`croaring`]: https://crates.io/crates/croaring +//! +//! #### unicode-ident +//! +//! This crate is most similar to the `ucd-trie` library, in that it's based on +//! bitmaps stored in the leafs of a trie representation, achieving both prefix +//! compression and suffix compression. +//! +//! The key differences are: +//! +//! - Uses a single 2-level trie, rather than 3 disjoint partitions of different +//! depth each. +//! - Uses significantly larger chunks: 512 bits rather than 64 bits. +//! - Compresses the XID\_Start and XID\_Continue properties together +//! simultaneously, rather than duplicating identical trie leaf chunks across +//! the two. +//! +//! The following diagram show the XID\_Start and XID\_Continue Unicode boolean +//! properties in uncompressed form, in row-major order: +//! +//! <table> +//! <tr><th>XID_Start</th><th>XID_Continue</th></tr> +//! <tr> +//! <td><img alt="XID_Start bitmap" width="256" src="https://user-images.githubusercontent.com/1940490/168647353-c6eeb922-afec-49b2-9ef5-c03e9d1e0760.png"></td> +//! <td><img alt="XID_Continue bitmap" width="256" src="https://user-images.githubusercontent.com/1940490/168647367-f447cca7-2362-4d7d-8cd7-d21c011d329b.png"></td> +//! </tr> +//! </table> +//! +//! Uncompressed, these would take 140 K to store, which is beyond what would be +//! reasonable. However, as you can see there is a large degree of similarity +//! between the two bitmaps and across the rows, which lends well to +//! compression. +//! +//! This crate stores one 512-bit "row" of the above bitmaps in the leaf level +//! of a trie, and a single additional level to index into the leafs. It turns +//! out there are 124 unique 512-bit chunks across the two bitmaps so 7 bits are +//! sufficient to index them. +//! +//! The chunk size of 512 bits is selected as the size that minimizes the total +//! size of the data structure. A smaller chunk, like 256 or 128 bits, would +//! achieve better deduplication but require a larger index. A larger chunk +//! would increase redundancy in the leaf bitmaps. 512 bit chunks are the +//! optimum for total size of the index plus leaf bitmaps. +//! +//! In fact since there are only 124 unique chunks, we can use an 8-bit index +//! with a spare bit to index at the half-chunk level. This achieves an +//! additional 8.5% compression by eliminating redundancies between the second +//! half of any chunk and the first half of any other chunk. Note that this is +//! not the same as using chunks which are half the size, because it does not +//! necessitate raising the size of the trie's first level. +//! +//! In contrast to binary search or the `ucd-trie` crate, performing lookups in +//! this data structure is straight-line code with no need for branching. + +#![no_std] +#![doc(html_root_url = "https://docs.rs/unicode-ident/1.0.22")] +#![allow( + clippy::doc_markdown, + clippy::must_use_candidate, + clippy::unreadable_literal +)] + +#[rustfmt::skip] +mod tables; + +pub use crate::tables::UNICODE_VERSION; +use crate::tables::{ASCII_CONTINUE, ASCII_START, CHUNK, LEAF, TRIE_CONTINUE, TRIE_START}; + +static ZERO: u8 = 0; + +/// Whether the character has the Unicode property XID\_Start. +pub fn is_xid_start(ch: char) -> bool { + if ch.is_ascii() { + return ASCII_START & (1 << ch as u128) != 0; + } + let chunk = *TRIE_START.0.get(ch as usize / 8 / CHUNK).unwrap_or(&ZERO); + let offset = chunk as usize * CHUNK / 2 + ch as usize / 8 % CHUNK; + unsafe { LEAF.0.get_unchecked(offset) }.wrapping_shr(ch as u32 % 8) & 1 != 0 +} + +/// Whether the character has the Unicode property XID\_Continue. +pub fn is_xid_continue(ch: char) -> bool { + if ch.is_ascii() { + return ASCII_CONTINUE & (1 << ch as u128) != 0; + } + let chunk = *TRIE_CONTINUE + .0 + .get(ch as usize / 8 / CHUNK) + .unwrap_or(&ZERO); + let offset = chunk as usize * CHUNK / 2 + ch as usize / 8 % CHUNK; + unsafe { LEAF.0.get_unchecked(offset) }.wrapping_shr(ch as u32 % 8) & 1 != 0 +} diff --git a/vendor/unicode-ident/src/tables.rs b/vendor/unicode-ident/src/tables.rs new file mode 100644 index 00000000000000..59634efe78b82b --- /dev/null +++ b/vendor/unicode-ident/src/tables.rs @@ -0,0 +1,663 @@ +// @generated by ../generate. To regenerate, run the following in the repo root: +// +// $ curl -LO https://www.unicode.org/Public/17.0.0/ucd/UCD.zip +// $ unzip UCD.zip -d UCD +// $ cargo run --manifest-path generate/Cargo.toml + +#[repr(C, align(8))] +pub(crate) struct Align8<T>(pub(crate) T); +#[repr(C, align(64))] +pub(crate) struct Align64<T>(pub(crate) T); + +pub const UNICODE_VERSION: (u8, u8, u8) = (17, 0, 0); + +pub(crate) const ASCII_START: u128 = 0x7fffffe07fffffe0000000000000000; +pub(crate) const ASCII_CONTINUE: u128 = 0x7fffffe87fffffe03ff000000000000; + +pub(crate) const CHUNK: usize = 64; + +pub(crate) static TRIE_START: Align8<[u8; 411]> = Align8([ + 0x04, 0x0B, 0x0F, 0x13, 0x17, 0x1B, 0x1F, 0x23, 0x27, 0x2D, 0x31, 0x34, 0x38, 0x3C, 0x40, 0x02, + 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x00, 0x4D, 0x00, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x06, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x51, 0x54, 0x58, 0x5C, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x09, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x60, 0x64, 0x66, + 0x6A, 0x6E, 0x72, 0x28, 0x76, 0x78, 0x7C, 0x80, 0x84, 0x88, 0x8C, 0x90, 0x94, 0x98, 0x9C, 0xA0, + 0x05, 0x2B, 0xA4, 0x00, 0x00, 0x00, 0x00, 0xA6, 0x05, 0x05, 0xA8, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x32, 0x05, 0xAD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xAE, 0x00, 0x00, 0x00, 0x05, 0xB2, 0xB6, 0xBA, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xBE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x43, 0xC2, 0x00, 0x00, 0x00, 0x00, 0xC5, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD1, 0xD3, 0x00, 0x00, 0x00, 0xC9, + 0xD9, 0xDD, 0xE1, 0xE5, 0xE9, 0x00, 0x00, 0xED, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0xEF, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xF1, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xF3, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x52, 0x05, 0xF5, 0x00, 0x00, 0x00, 0x00, 0x05, 0xAF, 0x00, 0x00, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xA9, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xF7, +]); + +pub(crate) static TRIE_CONTINUE: Align8<[u8; 1793]> = Align8([ + 0x08, 0x0D, 0x11, 0x15, 0x19, 0x1D, 0x21, 0x25, 0x2A, 0x2F, 0x31, 0x36, 0x3A, 0x3E, 0x42, 0x02, + 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x4F, 0x00, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x06, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x51, 0x56, 0x5A, 0x5E, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x09, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x62, 0x64, 0x68, + 0x6C, 0x70, 0x74, 0x28, 0x76, 0x7A, 0x7E, 0x82, 0x86, 0x8A, 0x8E, 0x92, 0x96, 0x9A, 0x9E, 0xA2, + 0x05, 0x2B, 0xA4, 0x00, 0x00, 0x00, 0x00, 0xA6, 0x05, 0x05, 0xAB, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x32, 0x05, 0xAD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xB0, 0x00, 0x00, 0x00, 0x05, 0xB4, 0xB8, 0xBC, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xBE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x43, 0xC2, 0x00, 0x00, 0x00, 0x00, 0xC8, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xC3, 0xC6, 0xCE, 0xD1, 0xD5, 0x00, 0xD7, 0x00, 0xC9, + 0xDB, 0xDF, 0xE3, 0xE7, 0xEB, 0x00, 0x00, 0xED, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCC, 0x00, 0x00, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0xEF, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xF1, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xF3, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x52, 0x05, 0xF5, 0x00, 0x00, 0x00, 0x00, 0x05, 0xAF, 0x00, 0x00, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xA9, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xF7, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xCF, +]); + +pub(crate) static LEAF: Align64<[u8; 7968]> = Align64([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0x3F, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x3F, 0xFF, 0xAA, 0xFF, 0xFF, 0xFF, 0x3F, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0x5F, 0xDC, 0x1F, 0xCF, 0x0F, 0xFF, 0x1F, 0xDC, 0x1F, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x20, 0x04, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xA0, 0x04, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0xFF, 0xFF, 0x7F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC3, 0xFF, 0x03, 0x00, 0x1F, 0x50, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF, 0xB8, + 0x40, 0xD7, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC3, 0xFF, 0x03, 0x00, 0x1F, 0x50, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0xB8, + 0xC0, 0xD7, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x03, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0x7F, 0x02, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x87, 0x07, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFB, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0x7F, 0x02, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0x01, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0xB6, 0x00, 0xFF, 0xFF, 0xFF, 0x87, 0x07, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00, 0xC0, 0xFE, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x2F, 0x00, 0x60, 0xC0, 0x00, 0x9C, + 0x00, 0x00, 0xFD, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0xE0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x02, 0x00, 0x00, 0xFC, 0xFF, 0xFF, 0xFF, 0x07, 0x30, 0x04, + 0x00, 0x00, 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC3, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x9F, 0xFF, 0xFD, 0xFF, 0x9F, + 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x24, + 0xFF, 0xFF, 0x3F, 0x04, 0x10, 0x01, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x01, 0xFF, 0x07, 0xFF, 0xFF, + 0xFF, 0xFE, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xF0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x23, 0x00, 0x00, 0x01, 0xFF, 0x03, 0x00, 0xFE, 0xFF, + 0xE1, 0x9F, 0xF9, 0xFF, 0xFF, 0xFD, 0xC5, 0x23, 0x00, 0x40, 0x00, 0xB0, 0x03, 0x00, 0x03, 0x10, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0x07, 0xFF, 0xFF, + 0xFF, 0xFE, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xFF, 0xFE, 0xFF, + 0xEF, 0x9F, 0xF9, 0xFF, 0xFF, 0xFD, 0xC5, 0xF3, 0x9F, 0x79, 0x80, 0xB0, 0xCF, 0xFF, 0x03, 0x50, + 0xE0, 0x87, 0xF9, 0xFF, 0xFF, 0xFD, 0x6D, 0x03, 0x00, 0x00, 0x00, 0x5E, 0x00, 0x00, 0x1C, 0x00, + 0xE0, 0xBF, 0xFB, 0xFF, 0xFF, 0xFD, 0xED, 0x23, 0x00, 0x00, 0x01, 0x00, 0x03, 0x00, 0x00, 0x02, + 0xE0, 0x9F, 0xF9, 0xFF, 0xFF, 0xFD, 0xED, 0x23, 0x00, 0x00, 0x00, 0xB0, 0x03, 0x00, 0x02, 0x00, + 0xE8, 0xC7, 0x3D, 0xD6, 0x18, 0xC7, 0xFF, 0x03, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xEE, 0x87, 0xF9, 0xFF, 0xFF, 0xFD, 0x6D, 0xD3, 0x87, 0x39, 0x02, 0x5E, 0xC0, 0xFF, 0x3F, 0x00, + 0xEE, 0xBF, 0xFB, 0xFF, 0xFF, 0xFD, 0xED, 0xF3, 0xBF, 0x3B, 0x01, 0x00, 0xCF, 0xFF, 0x00, 0xFE, + 0xEE, 0x9F, 0xF9, 0xFF, 0xFF, 0xFD, 0xED, 0xF3, 0x9F, 0x39, 0xE0, 0xB0, 0xCF, 0xFF, 0x02, 0x00, + 0xEC, 0xC7, 0x3D, 0xD6, 0x18, 0xC7, 0xFF, 0xC3, 0xC7, 0x3D, 0x81, 0x00, 0xC0, 0xFF, 0x00, 0x00, + 0xE0, 0xDF, 0xFD, 0xFF, 0xFF, 0xFD, 0xFF, 0x23, 0x00, 0x00, 0x00, 0x37, 0x03, 0x00, 0x00, 0x00, + 0xE1, 0xDF, 0xFD, 0xFF, 0xFF, 0xFD, 0xEF, 0x23, 0x00, 0x00, 0x00, 0x70, 0x03, 0x00, 0x06, 0x00, + 0xF0, 0xDF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0x27, 0x00, 0x40, 0x70, 0x80, 0x03, 0x00, 0x00, 0xFC, + 0xE0, 0xFF, 0x7F, 0xFC, 0xFF, 0xFF, 0xFB, 0x2F, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xDF, 0xFD, 0xFF, 0xFF, 0xFD, 0xFF, 0xF3, 0xDF, 0x3D, 0x60, 0x37, 0xCF, 0xFF, 0x00, 0x00, + 0xEF, 0xDF, 0xFD, 0xFF, 0xFF, 0xFD, 0xEF, 0xF3, 0xDF, 0x3D, 0x60, 0x70, 0xCF, 0xFF, 0x0E, 0x00, + 0xFF, 0xDF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0x7D, 0xF0, 0x80, 0xCF, 0xFF, 0x00, 0xFC, + 0xEE, 0xFF, 0x7F, 0xFC, 0xFF, 0xFF, 0xFB, 0x2F, 0x7F, 0x84, 0x5F, 0xFF, 0xC0, 0xFF, 0x0C, 0x00, + 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x05, 0x00, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xD6, 0xF7, 0xFF, 0xFF, 0xAF, 0xFF, 0x05, 0x20, 0x5F, 0x00, 0x00, 0xF0, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, + 0x00, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0x7F, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, + 0xD6, 0xF7, 0xFF, 0xFF, 0xAF, 0xFF, 0xFF, 0x3F, 0x5F, 0x7F, 0xFF, 0xF3, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x03, 0xFF, 0x03, 0xA0, 0xC2, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0x1F, 0xFE, 0xFF, + 0xDF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0x1F, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x80, 0x00, 0x00, 0x3F, 0x3C, 0x62, 0xC0, 0xE1, 0xFF, + 0x03, 0x40, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0x20, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0x00, 0x00, 0x00, + 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFD, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0x20, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3D, 0x7F, 0x3D, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0x3D, 0xFF, 0xFF, 0xFF, 0xFF, 0x3D, 0x7F, 0x3D, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0x3D, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x3F, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3D, 0x7F, 0x3D, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0x3D, 0xFF, 0xFF, 0xFF, 0xFF, 0x3D, 0x7F, 0x3D, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0x3D, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0x00, 0xFE, 0x03, 0x00, + 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x3F, + 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x9F, 0xFF, 0xFF, + 0xFE, 0xFF, 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC7, 0xFF, 0x01, + 0xFF, 0xFF, 0x03, 0x80, 0xFF, 0xFF, 0x03, 0x00, 0xFF, 0xFF, 0x03, 0x00, 0xFF, 0xDF, 0x01, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x80, 0x10, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x9F, 0xFF, 0xFF, + 0xFE, 0xFF, 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC7, 0xFF, 0x01, + 0xFF, 0xFF, 0x3F, 0x80, 0xFF, 0xFF, 0x1F, 0x00, 0xFF, 0xFF, 0x0F, 0x00, 0xFF, 0xDF, 0x0D, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x8F, 0x30, 0xFF, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x05, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, + 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x3F, 0x1F, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xB8, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, + 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0x0F, 0xFF, 0x0F, 0xC0, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x1F, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x03, 0xFF, 0x07, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0x7F, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xE0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0xE0, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xF8, 0xFF, 0xFF, 0xFF, 0x01, 0xC0, 0x00, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0x9F, + 0xFF, 0x03, 0xFF, 0x03, 0x80, 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0x0F, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0xFF, 0x03, 0x00, 0xF8, 0x0F, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x00, 0xFC, 0xFF, 0xFF, 0xFF, 0x3F, + 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0x6F, 0x04, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xE3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, + 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0x00, 0x00, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x04, 0x00, 0x00, 0x00, 0x27, 0x00, 0xF0, 0x00, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x80, + 0x00, 0x00, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x84, 0xFC, 0x2F, 0x3F, 0x50, 0xFD, 0xFF, 0xF3, 0xE0, 0x43, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x02, 0x80, + 0x00, 0x00, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x1F, 0xE2, 0xFF, 0x01, 0x00, + 0x84, 0xFC, 0x2F, 0x3F, 0x50, 0xFD, 0xFF, 0xF3, 0xE0, 0x43, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x78, 0x0C, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0x20, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, + 0xFF, 0xFF, 0x7F, 0x00, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0xF8, 0x0F, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0x20, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x80, + 0xFF, 0xFF, 0x7F, 0x00, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, + 0xE0, 0x00, 0x00, 0x00, 0xFE, 0x03, 0x3E, 0x1F, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0x7F, 0xE0, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, + 0xE0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0x7F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, + 0xE0, 0x00, 0x00, 0x00, 0xFE, 0xFF, 0x3E, 0x1F, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0x7F, 0xE6, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xE0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0x7F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0xFF, 0xFF, + 0xFF, 0x1F, 0xFF, 0xFF, 0x00, 0x0C, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x80, + 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, + 0x00, 0x00, 0x80, 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xF9, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0xFE, 0xFF, + 0xFF, 0x1F, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0xBF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, + 0x00, 0x00, 0x80, 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xF9, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0xFE, 0xFF, + 0xBB, 0xF7, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, + 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x68, + 0x00, 0xFC, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x1F, + 0xF0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x80, 0x00, 0x00, 0xDF, 0xFF, 0x00, 0x7C, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x10, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0xE8, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0xFF, 0xFF, 0xFF, 0x1F, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x80, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0x7F, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0xF7, 0x0F, 0x00, 0x00, 0xFF, 0xFF, 0x7F, 0xC4, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x62, 0x3E, 0x05, 0x00, 0x00, 0x38, 0xFF, 0x07, 0x1C, 0x00, + 0x7E, 0x7E, 0x7E, 0x00, 0x7F, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFF, 0x03, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0xFF, 0x3F, 0xFF, 0x03, 0xFF, 0xFF, 0x7F, 0xFC, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x38, 0xFF, 0xFF, 0x7C, 0x00, + 0x7E, 0x7E, 0x7E, 0x00, 0x7F, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFF, 0x03, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x37, 0xFF, 0x03, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, + 0x7F, 0x00, 0xF8, 0xA0, 0xFF, 0xFD, 0x7F, 0x5F, 0xDB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, + 0x7F, 0x00, 0xF8, 0xE0, 0xFF, 0xFD, 0x7F, 0x5F, 0xDB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xF0, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xAA, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, + 0x00, 0x00, 0x00, 0x00, 0xFE, 0xFF, 0xFF, 0x07, 0xFE, 0xFF, 0xFF, 0x07, 0xC0, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0x7F, 0xFC, 0xFC, 0xFC, 0x1C, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 0x18, 0x00, 0x00, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xAA, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, + 0x00, 0x00, 0xFF, 0x03, 0xFE, 0xFF, 0xFF, 0x87, 0xFE, 0xFF, 0xFF, 0x07, 0xE0, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFC, 0xFC, 0xFC, 0x1C, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xEF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0xB7, 0xFF, 0x3F, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xEF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0xB7, 0xFF, 0x3F, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xE0, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, + 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0x3E, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xE0, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, + 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0x3E, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0xFF, 0xF7, + 0xFF, 0xF7, 0xB7, 0xFF, 0xFB, 0xFF, 0xFB, 0x1B, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0xFF, 0xF7, + 0xFF, 0xF7, 0xB7, 0xFF, 0xFB, 0xFF, 0xFB, 0x1B, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, + 0x3F, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0x91, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0x7F, 0x00, + 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x37, 0x00, + 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0xEF, 0xFE, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x1F, + 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFE, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0x07, 0x00, + 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x6F, 0xF0, 0xEF, 0xFE, 0xFF, 0xFF, 0x3F, 0x87, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x1F, + 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFE, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0x07, 0x00, + 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0xFC, 0xFF, 0xFF, 0x3F, 0x80, 0xFF, 0xFF, + 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xBE, 0xFF, 0xFF, + 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x03, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0x1F, 0x80, 0x00, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, + 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x7F, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1B, 0x03, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, + 0xFF, 0xFF, 0xFF, 0x1F, 0x80, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00, 0xFF, 0xFF, + 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x7F, 0x00, + 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x00, + 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, + 0xF8, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x90, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x47, 0x00, + 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x1E, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0xC0, 0xFF, 0x3F, 0x80, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x04, 0x00, 0xFF, 0xFF, 0xFF, 0x01, 0xFF, 0x03, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xF0, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x4F, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0xDE, 0xFF, 0x17, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0x0F, 0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7F, 0xBD, 0xFF, 0xBF, 0xFF, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x00, + 0xE0, 0x9F, 0xF9, 0xFF, 0xFF, 0xFD, 0xED, 0x23, 0x00, 0x00, 0x01, 0xE0, 0x03, 0x00, 0x00, 0x00, + 0xFF, 0x4B, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7F, 0xBD, 0xFF, 0xBF, 0xFF, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0x03, + 0xEF, 0x9F, 0xF9, 0xFF, 0xFF, 0xFD, 0xED, 0xFB, 0x9F, 0x39, 0x81, 0xE0, 0xCF, 0x1F, 0x1F, 0x00, + 0xFF, 0x4B, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xA5, 0xF7, 0x0F, 0x00, 0x06, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x80, 0x07, 0x00, 0x80, 0x03, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0xC3, 0x03, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0x00, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0x01, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00, 0x00, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x11, 0x00, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0xFF, 0x03, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xE7, 0xFF, 0x0F, 0xFF, 0x03, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x80, + 0x7F, 0xF2, 0x6F, 0xFF, 0xFF, 0xFF, 0x00, 0x80, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x0A, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x80, + 0x7F, 0xF2, 0x6F, 0xFF, 0xFF, 0xFF, 0xBF, 0xF9, 0x0F, 0x00, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x1B, 0x00, 0x00, 0x00, + 0x01, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x04, 0x00, 0x00, 0x01, 0xF0, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0x03, 0x00, 0x20, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x80, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0x23, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0xFF, 0x03, + 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0xFF, + 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7F, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x40, 0x00, 0x00, 0x00, 0xBF, 0xFD, 0xFF, 0xFF, + 0xFF, 0x03, 0x00, 0x01, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0x01, 0x00, 0xFF, 0x03, 0x00, 0x00, 0xFC, 0xFF, + 0xFF, 0xFF, 0xFC, 0xFF, 0xFF, 0xFE, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7F, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xB4, 0xFF, 0x00, 0xFF, 0x03, 0xBF, 0xFD, 0xFF, 0xFF, + 0xFF, 0x7F, 0xFB, 0x01, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x07, 0x00, + 0xF4, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x7F, 0x00, + 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xC7, 0x07, 0x00, 0xFF, 0x07, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x7E, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0xF8, 0xFF, 0xFF, 0xE0, + 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0x03, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0x3F, 0x1F, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x0F, 0x00, 0xFF, 0x03, 0xF8, 0xFF, 0xFF, 0xE0, + 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0xFF, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xF9, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xF8, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x7C, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xF9, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x87, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0x80, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x7F, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x80, + 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x6F, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0x1F, + 0xFF, 0x01, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xE3, 0x07, 0xF8, + 0xE7, 0x0F, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0x1F, + 0xFF, 0x01, 0xFF, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0x7F, 0xE0, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xDF, 0x64, 0xDE, 0xFF, 0xEB, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xBF, 0xE7, 0xDF, 0xDF, 0xFF, 0xFF, 0xFF, 0x7B, 0x5F, 0xFC, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xF7, 0xFF, 0xFF, 0xFF, 0xF7, + 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, + 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xF7, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xF7, 0xFF, 0xFF, 0xFF, 0xF7, + 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, + 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xF7, 0xCF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x20, 0x00, + 0x10, 0x00, 0x00, 0xF8, 0xFE, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x80, 0x3F, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x7F, 0xFF, 0xFF, 0xF9, 0xDB, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0xFF, 0x3F, 0xFF, 0x43, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x3F, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x7F, 0xB7, 0x3F, 0x1F, 0xC0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0x6F, 0xFF, 0x7F, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0x3F, 0xC0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0x6F, 0xFF, 0x7F, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xEF, 0xFF, 0xFF, 0xFF, 0x96, 0xFE, 0xF7, 0x0A, 0x84, 0xEA, 0x96, 0xAA, 0x96, 0xF7, 0xF7, 0x5E, + 0xFF, 0xFB, 0xFF, 0x0F, 0xEE, 0xFB, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +]); diff --git a/vendor/unicode-ident/tests/compare.rs b/vendor/unicode-ident/tests/compare.rs new file mode 100644 index 00000000000000..7ed13fa9999c7e --- /dev/null +++ b/vendor/unicode-ident/tests/compare.rs @@ -0,0 +1,68 @@ +#![allow( + clippy::incompatible_msrv, // https://github.com/rust-lang/rust-clippy/issues/12257 +)] + +mod fst; +mod roaring; +mod trie; + +#[test] +fn compare_all_implementations() { + let xid_start_fst = fst::xid_start_fst(); + let xid_continue_fst = fst::xid_continue_fst(); + let xid_start_roaring = roaring::xid_start_bitmap(); + let xid_continue_roaring = roaring::xid_continue_bitmap(); + + for ch in '\0'..=char::MAX { + let thought_to_be_start = unicode_ident::is_xid_start(ch); + let thought_to_be_continue = unicode_ident::is_xid_continue(ch); + + // unicode-xid + assert_eq!( + thought_to_be_start, + unicode_xid::UnicodeXID::is_xid_start(ch), + "{ch:?}", + ); + assert_eq!( + thought_to_be_continue, + unicode_xid::UnicodeXID::is_xid_continue(ch), + "{ch:?}", + ); + + // ucd-trie + assert_eq!( + thought_to_be_start, + trie::XID_START.contains_char(ch), + "{ch:?}", + ); + assert_eq!( + thought_to_be_continue, + trie::XID_CONTINUE.contains_char(ch), + "{ch:?}", + ); + + // fst + assert_eq!( + thought_to_be_start, + xid_start_fst.contains((ch as u32).to_be_bytes()), + "{ch:?}", + ); + assert_eq!( + thought_to_be_continue, + xid_continue_fst.contains((ch as u32).to_be_bytes()), + "{ch:?}", + ); + + // roaring + assert_eq!( + thought_to_be_start, + xid_start_roaring.contains(ch as u32), + "{ch:?}", + ); + assert_eq!( + thought_to_be_continue, + xid_continue_roaring.contains(ch as u32), + "{ch:?}", + ); + } +} diff --git a/vendor/unicode-ident/tests/fst/.gitignore b/vendor/unicode-ident/tests/fst/.gitignore new file mode 100644 index 00000000000000..0ebd2add95d418 --- /dev/null +++ b/vendor/unicode-ident/tests/fst/.gitignore @@ -0,0 +1 @@ +/prop_list.rs diff --git a/vendor/unicode-ident/tests/fst/mod.rs b/vendor/unicode-ident/tests/fst/mod.rs new file mode 100644 index 00000000000000..5195efb13d2106 --- /dev/null +++ b/vendor/unicode-ident/tests/fst/mod.rs @@ -0,0 +1,11 @@ +#![allow(clippy::module_name_repetitions)] + +pub fn xid_start_fst() -> fst::Set<&'static [u8]> { + let data = include_bytes!("xid_start.fst"); + fst::Set::from(fst::raw::Fst::new(data.as_slice()).unwrap()) +} + +pub fn xid_continue_fst() -> fst::Set<&'static [u8]> { + let data = include_bytes!("xid_continue.fst"); + fst::Set::from(fst::raw::Fst::new(data.as_slice()).unwrap()) +} diff --git a/vendor/unicode-ident/tests/fst/xid_continue.fst b/vendor/unicode-ident/tests/fst/xid_continue.fst new file mode 100644 index 0000000000000000000000000000000000000000..cc15f2d5dc8d10036bcc13cc6785f161bef921d3 GIT binary patch literal 76143 zcmeHQ2cQ+j6<v^SVQsO|L=Z&?T|^=90RcM-7E};oLkub)AlLxGh6U^ryJ8nx3>py7 z1W}{0gBpW?iY6j9j7G(qJAHO`X4~8M|9f9v3~%e~{+;_~@7#0lxtn<<_JLnE{`}LA z|NG&;-);Ezo3Fq6a{U+UKL5|ZKl^m;Cm(;bX7#EMSN`jRf4=|TyUX8s`>i+Mc>T41 zy!y)DUw-MuWiKrK+h3o5ZpmNfKlR|`N#{(QF#hauW5=9z<{6{^aQdjzMxJ`g$tRuo z`w=G`JLH(7k2-Sjpd$_+c-Wzb9DLA$2Mp-nZ~wl1dRKJYr|aHbI(O>Wp*^Jhcr|Kn zR;zZMy7lVw6B}&4#g<z&^taw-+wHdh%?>;6wDT^F8aHX$Y}e+ykpo&l%iUY;v1jYO z+O+*`yY?MAcIw<^@2>lF+jqb2J${$+%01Qby`Tc1H(l@3cmIC<2OMzVK?fgl=wSm7 zKVs0}Bab@zm?3cNamNoGcEa!xzd!M$lTSHy<Y}W$|HJ4r&OB?(*l}l%pD^(pm^As^ z^UlBE!YTgL60?ZPza3$N-^Uo?3WpUgUi9q31%G+w>G^+t>Q7HT@%Uqp&U@tHhaP<3 z{`>Bod(Yi>-Fe6Dx7~WnALrbB(~UP=f8Dj$%${}iRaagy^YY6sz2xE<(=VDfb;^Yo zoPXZAZZjT!!my#oALq8^UOj);qx*jQDwA&a+qP}?YQ5(kt#)tOVz=hIHf!3Xaid*! z-f71jezW~{+itV9-*Brfx7fTv{d#rl)ULHz%^IFEX}2x6aGP<@UKPC^Rv2T1!Um^K zyJ-51i!ZtKvdd>)aphH4&zgPBwbxyL!;LrHJm-(M+<M#Xcief`-S^DB_rCidc<`Zz zADQ>)V~;=a<e#4U^Zciu`OAWZ&n{ZL<hken`nRPoEPL^#m;e6CtN(cI^*7#p>+N@z zuXy*p_y769zgB*@YW13rKK^9wr=R`%KcBDrV*Qt2ef`b18@~JR_dopa$De-Q_zU>> z2&&^}{W>~^>ci%9OEwv6RU0cAwsnJ5!#Y`_=F2y|1#bs;;+p?Xbu`uJzex_z(N039 z@!2*!UqJ`w3vpDQIKk#vRL>C^#!o=|nt0A6n9Qt>ItVZYY{N4YVSENTK>J@u8$@;A zD-FI<Jre4VY;s4YOa<zX091`f;%}^m=1t~ymU*2RvX?FW+w;#YPUS`?cAtmXr+}v0 zTx_oyjM<;VZ3Z}sjlMXEOXfba!vUt)#%b3u4UfRQN8z!@;R$higiB2hA%2NClz#hf zRzov2{v@EWiQv3Y#U^NE0xj4UnaJPGmSK&8y$@|F80b-4(WZiQmov7JdOk>+qmhYZ zMxDx-gL6U<lR^MBEd<NF5MY(R+UW7>v%2xeY~FIE#4l7N&K!o@$Bo0(F13F&lTB-_ z03;0O7>G#}EgxIeZWb4_MKeen%K=v41n}MJW22V8B+~U&0b$`dm=WQ`<1P=vxngiC ziab0j^j6G3ThMICS}207nJ(F{1%b(`bG!u}38n`D(1370u{ekf#`zyuI=tLB{qnO8 zTTsyAUmvU>K*fv8C`ds}k_3mJ;DKR#TzJ;iA|`AJ2`gX&VU2>{|D_NQ22ijBXt;zh zLm6}cxa@KOdt4ZJU>NR4OD4f#0O<+<{CPeA2@!KEhlOO}*+tAm0rZHmI0WEzcmvAF zv*n|ug3XBiy^gkr7#V}eg!Mi3WVHPuEFn<|v_KP38%7X_!x2Lp(7$&DzO%b`+h^}C zojUTjwH<2k8#0gqF$UsT0>jP7v##F&!2YrZ#Lb9r3481bt@na9ZE@5C3Y?MiI(Gp; z_zai2kyZQ<APoePrVWR5C}fAwe-Ro3fsGvth>d|A920{foO>P+cx)=LU;qsT0HPCs z@C3j$TGM2a;1vK0v5>pDh5GlG-Y}<(W(BdBml0J(NOkcd9mqly>z=!BzwMSeHxuXz zkt+sUT{LZsF(eH=9+6gR9t2*cP8|iLVogLKQ9&bN1_EG5R)Ly`un>GJ0!U<)m;``A zSJ5U&XY}p^6etByP$>+O-NY9L9|_Fu!*zsDf#9dOc;ZQLGMsWME{!}5|3N4f{|%t2 zxK2kd{6?dpgBQRIT*J@B>`VNsf=XhG`1WU)o{=QGBZTgiHIX3*Vf*FpaH9&m1aTM= z6Ab%%kQkZ%CJfry(BlyYrG|DOLis^cqi{Z(J=n*iZ4mhs<`xD%iLHSR#>e3ZOb8!9 zfcQND;ZGp=C-27o1Bi*i!G{14KpxbvbDZdC@l6nV5D-1u)+h*q^x|d61CnwKx_TCX z#7F@_QXnWPZ}rZ*=mH|9Kp;~91)2h)rsCS<+n=ATdrS7HZ@*DyC#fQgDe^L>h&(Hi zd0}@ZFds(c@%_gz`JnNgK*%JNN21$aa^yX;`I*)yZI7ECPzDI?SwaKhT};^^0E`bp zX0m)cezA8Salybs4dG)MdHWf_yl!;>9Gu7YsL3&m9v+Ye$F8TadVqz`s(V25TU4@! z6}&NGTJOCprktFE^a35%93ktuF5U-ye@>q^RWXx-#H3!?c^rE1&hA`s-zrQ;qP;>n zNy>%NUzB>Uxt9kG2LApe4wluwf^ovZV(cSjVBE)tS;EFZ(Ld0@$Z!_(@$U`xT2-#S zkc1Ev<^$R2Ksuw%HTC1HHgvnXb?u^vKRfN{d5(vM?l}E$E*lbmv;((<t)L-USFb}* zv^IC>mz-h?r)jT1VHeu;&ocYtW<lDcpG(C;>TmzDsx*VDxHg#*BB3$}T7(&uVl;bI zn<vUwrS#F%yE0!2n<QeU4*wR9yvA%xi=pKJSAaH(MUmKG5xx3ifD~jlAZ#}O%c?N4 zD!gW7fY?nLCee^3oTy1?nz1ZR5)Da=F%yC&&PyoOm=KKx&$x8Vo4(EgI7t}rRgE)U zfW#074TIohR!v{!3?#tP1VYmU;6zP?mkF*6V30K-DO=NDUxlVtoJ%vpdp2+`4&E@S z#gY?`9O4`p#DqT_Pag)tg$FR$@NT!k<}$EY9ROl*@|`c8O1XtDs6aCFB!YW$g*P;G z1Avi@(Mfu}ICfG#ttK6kh0l>0v<pwwZ633k2jcRY^BSZJn0Og;E)|>PX&m_g(Agm8 z!~}XD%Ewf5xl_%#j^MPVwQ_fcNGL%h$(%4R>#p4NkYHS$#ndf<DDHdEp<cxaqCD(G zX(HI3mMOwtB<1OXs2Lxk(Zyh>jT?*OY`_pLd3L?DpuCDpG)<F-Bwy#j<T~L_$lZWo zVVEGH?IwYGm8V2JVIbjo^^y3XbTmxYB0W<ATfdiF;XZ_@Rr->;roQAqnak)@ijCys z?WV}7bLE5<><egVbeKh_b)Q1G>ytD(w#S|rDs0zgumK|tq;v*+0e<0d^j=~{M(7?v zYy9MsSjR7pU*$?h!<1s-jH+y6b8?j?hm)oW1+tGLEKP@u^hC|V@-%@a9dp|vU8Lt6 zt1i>QBQ+d`Ee$C8eWE%sQOH*0aFWy9d7P|K1x)@)yNY|!f2tIRwk-Fgr?|5OcVUeo zr!fdH-KVmMwfy18?BN2FhqERQlWr7=@EONDCyGQ<*a6N_V;wUZl|>vdqASH(Ftr$B za*II{$q9{Iw4s0d%d&KS$)6J?#Nf2k0(0P{e1if;b-jktiKCJ!I@*95FLi^U%aS*) z?-4AIkPw5iVP(BJ<-qIL_unpgv&(ja385|&55d=a3Xuj)pCPq6gF?52%1B&NLtdzs z=yI+w!pLvhtVfxxv{H*@0x*#fHvJ}Tx~Jfw**Bf%i2D%VDBA!DbsOj@{_<?rP!?zL zq%st|325CAotqWQ$yA+d+#q7Li1P?C>59bI?a<?f2z4B$jYE}jVn<r0C6aCi&!+B4 z9RkAEBxFqhp=$#J0qMIC6BGsD;x_5VREk(3UIp{e^w3CwGnR0T2#gTUIEnicyHxpP zm`+AE0BVf@7aPGf|NWE(s0$vg7Wssfz!p%9xp2(jUOl(eDH<4arq@sNPtDCNN7u4! zlbdSGC9+T_u!burf}|8<%$*6=1A<B9_+9b;jQ`CzN=6(0Z&N<r<cB9ldJm<8JCWQm z1$Xw?y#@bmD3P7INVB079PBNj8`WTQz@B8xX7&ee3){i=V8*uqh;kG?g33pcrFQHD z?cZcs#T4t+&Z@jce@OC9s1`>`?>Qh~RxH0c1Pufw&?15m$wvE9$Q3F)3R|Fd6#KPE z1u6Z<Q=Z^J^bai#&JAkv2|6rGH{#Pku7t*g(6|no3%xy)>-D8J*0!zk6gY$3X~(t# z%ssN|^(k(w1E=MNF<dtgTy|>kiQ~AxA`~2i?g;RBh0xj&0Sn-2Xo6@Bae8R}8_^wJ zwtM3&RpbO@9DPx^Z_dpwt1V2lj|%ot1xjOTh@yPdEC`(7`Y6&a^&)^uPXbW!Npens zaW@D2Mcix3rXd<pTs>0px9u64nUwp=dGcpkq=_k*+K|PSf;4O^N(qwMS_&<N0J@MA zQ*i5%ng$gz{WxvN&<Qac6d22dZ~SM<b`%sh9%pNtbVwayxTf*N*_vrPT!%Eq;$nCE zOGHnW<2R$VCQUmKDW0Xk->Y1Plb$zhGrU+ci9aA6et==C1?6aP3GP`}<_HRkXY>f; zwVYMZRV9y7HS*Zmjm`uxr`-{0%OaE%k{_q2tIb%_&TdE>Do+5j;cbHAHd9V#heEb1 z5M*Uq&cVN;oc%+W(HeeZAQT+p1Z2Tl69+g8QyZ6t8|*T<COu3d0+KRIH5*wPK#Jby zW~1`Y(H3+`XB11UPIGQF&YxT_c15I8dNWaZgmF%&Bm!s(TyZAZCU2#C(ye=FeJd8z zj+|uF6z7s}ds4IpwkE<^Rf{|jM3s-oR;hkuh~wj~r6lFbf5|oeRD<ojl1RLvzE-dZ zek9?_MuRp>G!$1iE=+?Cvr3f_=?WzxJzIOl<AJJdNX8LVzZjtEtki^HJ0jlvBnc%w ztXi_xO!d5#W(o62kpW1Bmx5G3Gg1^ca55ywUI}NoVpTDyUkC>9A~-4HL}B3^yG9e2 zXbJ?<r5Oa{MF6UV4Ld|DWTUg5ai^t}O*Wg9_hKv%0)Suu$oe9@V1qogniBu?pN&!w z_+nj-NlwYESe0}Nsvf6EVdXV&cnKU<`G(cKVO4K<(jKxJD2oArRdJGC%*#0eujd4$ zpi@E~D+FsXu_o07s?z6A@b)_>9xKb?u>0{>>AhX;mr0E=muFx_iMV`}8B=xX`fRG_ zghj<tjKuPHh6|3`PUMgn&mdgiiJauGtG4E>W?Q)g1vHYYNhC>{%TkUs60j-G76r;& zj)L7uKINK6W%;2Qku06z7-mGdH2GATi>YW~+G+AAYu`Q<y?#e=_kh={YemuudBLi9 zdhc;h0XOL-A3?AA$m=~qYs5#PEtjs<YCgttNGx>hr11g$R;|mas_FSJS~~aFvMH4j zTMCn!ZbP=ooN|M8D=+6_kjyNt(tt^=Je3irFA3@YSmrja{9I8bH=QvSI^b4iEz<C) z=hhZVR^202>Wjt;2;Yf)i>hPF_Lq$&D*j3=#gUZigmqG-m2OUFl$N5o=bt-i;<zzb z*ST{iGkg`7O=Wh4lDT9mm$<(fNky=JFbrVbu5WBfxuhs!52#{aWTsRcSq1OQf1u39 zG=x7HK+k45Y#_^mdPYoev{$M-tklyj$E$=O^qwcn664IXM0oKWN4#TJXc>T&S_WXn zmZ^%L`Nvt-{dryuZ+Ok8YIUx=SA#`cR7@Jvuwqg%{&(;;4Y$8zhog61*JyUrib=hi z7vmbmYV(;E>09pJ>hV2?x2fIk;|@1>`o1$ce_q$2`_}7mbI&@xpXuAYf5oI42i81f z?qL;^rXO+gk&};pX~=-%pBc7tc!v|;J^9*^{Z8Nfj3>@|bli0lDkk+kk9)L5<GmC{ zhYn;Nc4*(WO&(Rtm8mx04WLC!mQWQyB!WPMf*~9LL~iIqL@unlYsV5Va^9?3lKRtz zX>;@_g7c~%PS&$WcgvfAgA0=L!cYbxkhwXld=mo80)c}-+yGsMu*^Sn+t(~n7pU@u z8m<l_(KaV@bQ3WT6yLzbG*}uCcBOaWzm?JZ*qEvIw)U52T2ycE6#S%2S1Q6uSRcvX zJLBVaown-rw6W$u?y@<bEW%%%s>#Uy#g-a3(RGw`4JF6R!nnF9zhP{atNvlG2VL^p z-yxal!WjOVfU^)S8b8?{-?Ejsi>?=H8Gzk{Sqoi3_6=Ex272kCDS`_0_|vdrl$-XQ ztj;)Th>L17O>L}hm-OR5UGk=t_YEe&dy}YQ7OMaP#w|)$-zb?%Op1Ul0M@4x#Tu#A zqDZ{3W@q7Xa^NeLy3MOruhCRm*MCWl^S{d5f=Z-4DJPJEgtKq~ui<CFYj>*jh3l`o zhQ-v32#Gc(+s5HIvn>V@mvnEIjBIkA_-MRFv*HzS;sTa?1g+u~TprA5<uQ`mv3@f) znnh_?+za_GxfBMej1^lKL;7<wXzrHC-DkBgO#28Rt(pLEVM>=S2TeX<yQe01OZ7Ij zCAh>YzeMW~2G}kzLlnjcHk5JAzp6w=pYUybx8a*`R@H6Mj-G>*YB!ml-S_EA6XRnm z!Lu-bz1rF=n}`$zREYu-LtzRZbn1*<#Hz}6v*eek{&Tz&<pCOF+D4a_$7Nlxr~|?c zH>%eBo5Fon1zQ#`n$NXA(n8*fqHGQ#rqGfaN7AW~iHBthEvdi@JHjogBwC36&RRl> zwL0yh=+@)+`qQhR<Kr&5HZuQGK-H0vgUGB4g49F~8_4Yux?FW?W3^$L^$WF_+6E&5 zqiQz_cGL1T&f+y#kDAmQ#)`vn9nrB<1n&n=j*KzKHIDnml+smEk^3*smi%kt`Yiwb zVH|g?EKxKv+Legx+#M-%Yn!d(ixZLBM9$6?i8Jb%VYP9~dqM5Ta;s)Ps-3tLR=f!8 zF8Yc;CjU+>Zi^Q8B92R7DOVVmXHa_8Q8Jo>Ly9VjMi{u#d9=#CSu7y3PC(#%|Iy-_ zgQIme`3R9gDQh`dy1nu$ia%*(Pb~6<AV(2AmLfK;WYNIWuhbR)Y)5A#+05`~_MU(I zv4<a0fwj1wYNh^-mWxFK1_87HTJMGDw`prO!d@%p*7$YiKluA5dA>zGhNdlsr+u^n zwn^l&|1t-fPQ95C+p1C4N&}75gKR7oV=)|YpyS)lws9P6*=#KxYdKDAasUKkH~3SF z+P_6?T+vZK`_wT9cwPpdjCkc;TENmJ!#CaliTjmQLW=A_`$B&qa6RyFT6{_#6{XG| z7kB54Tq_$R3)fwL!;K-Sb`pc<M=3m?ZSdz8(IzUT=?}CTKd=3RNUB?htF&Q?2}uMh zNP4vz;OsTfiV*^ZE^@v?x{-`xyni98)Q^~?0Guighg=F(kk*K01r<tpv1@rL>w>?y zQmmxli>UD}1wdoIGw;8TOXS{e1X5`c>WkScwE{_UiRwutnFTY=P^uUjp%%(|hD?F2 zeUy0Pe%*Tz%p7`CGX!JU15_vH>CwE!sM$-*goU=Cazt2z{@jB5x%dsNSuFr*LTX$3 zyizG4^~&R4jaSW7C86j`kr!v=INs3%5pf>Szi*%3#E>vcgHC`tb>v}oJq=E|VtzwG zq|hQ}Dq|i<yv!0&L9!(cs<I@wcI`WWn)?8B?*V}308qhthmBEufRg+++(%!E>=^p5 zW+$kzJOYx*IfAH3HfuIq18^;U7lL>hYRv<~_5xc1EB;-@w}RZRpi+gG<)h3r0u}63 zkUpA`(<VyHE-pR8W>HC{7f638`317Z>fDnh*C!uo2Dn|cFPIMZKOnY5fBO&S)9I{| zoE$cijT#cp<!_njTP^<H<g;y^$PEy1d@Q%gW^|Sv#wLINd^)4Jt$eg*^(r$estQ>? zT{s5UH^9j6)UiWCG`4;{6foBSSplrr-xHC)YU~*Pf}mZbs#qC4=2*2v=7padT{V@r zClURt*oLbu<t!bSrknk{3o!vcja5NPV?f<mr5~jjP62iadVAzf)$fS?IUKcOD`ohv zzh5B_KEHDzzhp>D?m^x`YYW4wQu>}x4b-o@(BEzFu4nic=hvf!wDj>uV#H3Df+c2# z%JY&Ad>l_k0p9h4V=`9m0uZ&kz~4G%G6?9uD+gYGzN+)|t=X4F0+wE1Q_c+|CWQd! z@@XMlG(F<-{)|Gpuw>6or)(9GGIeWyx-0$36til^En=0z2zrWzumUEHxF)eKVOyMn zg)Lq#T2<dcwkS)em7M1!Y-hoA=1pcR*s7vYA@@+^Z3wE3U|Xm^p*Tyzp)XGBY8-RU z8jTAp4PUmy?^Hs+>mYYdjQv4`)SR&eL2&h+kRE(05^mUkPbbvvoQNq%&FB!h8ck)u z;IuU+jl6A*wr%-k%J$PO_%ln$F0$Q<azI&aS(?8Uo6V<?`I~MesN3vWiHa1BJeBb- zlmQBnS&}@P#s=%xGu)NB^-|d&_H!3BLy*iZ(eV}1%Vb+vpa{c^ag}9RU0EzR53KGL zZ*Yp#jAI*&l!3^X069acbn^y{FLyy02O#h7kj+r6XFaCLV$hgz;<tG`3on{uDQI{k zrh|`!^0EVuMvWy01h(h3a$sH!@pk-!tWUT!Hs<H^#c@1Lot{GTHltHDQ)FpICum-x z00hB{%h^60&G~am!$K<WE~;90uX<O{4VjrOQ^k^K;-HQw&dB_yrBu362IY-6d3E%# zRPVYk*85-PVp=jHlK2gk1R_sD6ql$2eItdQZy9ay`{vRWk&Q%a!R8>NS!&N)%zZ_y z%{fO^lhUZ95y=r2bP2Q4us<U5`B$HvRsEg_&bgDOuoT<>G0oqe$fl0!XAp4|zak>m zYDB}*%z4y&XUTI(-V^@X>SM9#|B@<{!iqERxPn}Y`&gM-TfvucUjZhBGqjT7zzP7i zLBDCGu%g_cFTeWwn{PLK_uub-_}`E8*XjS1-dyZwlocIXjZCGs<oCBf-b7m!h+pg% zfEw^i?Ez-HpAtxYNz}tx586P~UA|cNc?5M>v3hdWzg3l$h6R-m%_z=3iAcw}<=)~b z&W**Ac&n2GSehIysmM-9Mh6WCbLJ?zFcZFI%0wDzowXlbj0WI6GRM^n-Cvp2F0!3x z0fr{pq&HNAo=lw6MCKi;mU=aeps1yiR^UUEd^{$p+(IR=zQAb%dQmv8$toMJqy>L; zwodXqrFzE++R5@&3;nxN%|-EIZDka`&@1P2zTY+L1J8b*Amn56(2D{$r@0$WhGn6i zbD_^FRWGY9mMRBv*LTt=^b+Alg}b;m_5J?X?7dY<XL9U~5W0fEJc?Uf!dcX*&7gGY z^omVyO=@*yt|BtZyLGxbkJ7t4%kuR%z>PO)CvuooZt?G^Btxk5%Rn*!KW$25YGeIX zX;&)c$}}u4QgSolXSu(k(k&_ef*E93%&f+=c34<OY{I9V{V<p6pnP{a-;J3Qi~=`n z>NWkck&E1tNu<AJe3zE)5v3N+C)~&*k2ri_$L4v|p=ciHMgS~E5%IyC7=Tv60|+@^ zY-wZh+VZzNL3D7;ja3Er1^&X)wW~_Fvm`>(=@|cte;0Bu6A|%LU}sa9K8sGTs+4UC zO>1HLpUBVA|G5$jEX5^m)A;+Bim6CIHE~q6G!8m61|;@zi`3_(djTX@lTd_bUct8( z@5{eZ>p8{C_Zw)NvoAVuowh|xyr#;R9aJJe({w;eZSkIAkz0<<^?l^whaTjW-KR~R zLgz_Luaf?Z{3h6csHfm>mc1X#_sjx(HeNbJsMJ$!3w+WK+&lNK+i#1MfK5JC9XtsA zHxtSzNnDyL6l9(vC`S!$xfSaykRpBnq}Bqj;wQiic!CvLfNWGfic$L;%GX{Cxbp%` zJr~WX<W^54)w$L8qe?GQ=*1yI#id@h<I>0k;L+3ff#mN4h)P%@Q51G!g;K2qY$`&9 zmCF#(0g{Ku$@)<0Y@@ZGe)jMGe2%QtU$d$6UkS1J82(W}UhP$)uf`z~DL`VXP|UsO z?j(wSIaZRa65R@wWt2H|9*UWV)cXMLz0VPkgvBm)9lT2+>XjclCN@9_NcnM(3K(&# z<ka-TyfAuaDKsVQW-XB+Cu!ygACrgR48S`^`NqaDFGRiELM;%Zkq%C6ds$q?0T)T# z`m@LL$~z)~Qx#FU%F4Kddvtn+7J^y`&n{v{4-hOsnS^!ce_timrd`wsl+&=!^zo_K z0o1dtd@4u!*teL{ohBC8_o(2iSfi7AIBN_`z~obUu`{o4|3a-VA{JZI!XtU06bwqp z>;6ls9lvG76U=Yd)l}(SmW)Un>7-rRVmBV$4^@u{<sa7<9xzJ^0^l?!Xxa>DRUz63 zA+}ShpGRyro)mjT&IkA>=hR_`9w6&21XV$b7p}rb5d=y=tE5bx)BZU>tl~H4_sgj} zEX}9@<(KQD38|)z$UG7xb7L08e=O;)5JQj(HI^OqWli}*EKl3DDS0o1?z7A{#B__+ z3;eORc0EJO_L!NhE{;*=k1=%n$)9U$dal2h<@qJq1iySo+3e=<v688hx**Wd4VT%5 znCFZBLAdB3tvN{l&MtdLvsy6<qvOl#zaW_cyo!R~C`q@J%Q4FtwXF)mrDcK=IA+tL z2wZ3~ZL%&y5hQ2)NX(?=FOg5F<$BzL|4NapP9-iJRGNI*gUF*%IVy1vsx<cdM-{2N zGB&%wEC)lKOAH<VagHHXqx9}CAS?%x?83_l5}w0JzaMeJ(Bp<2J@|;j5eGczfd0Mr z>qb(Ul$LR;7R`5!(4YxP-dplq=mZd;IqW92jOn}s%R_c#-P?(LbtZ~bfYU(~GlK2_ z1R3cGy-?t|cOO6ious+|*FOVN3<d_{4<3#Ig$shgsvkR!wvRu2m`DO65Gstou3y-{ z$YOZBnTN$Po7D7P-OL+V$GfYJw@p27RDEwr18@8mUahUXHyV14xAvNB>oxq1S8pfp z+g-esO}w>DycSK#xyzb)A2;_tZ0R+wUE|n2ya{`Fx3(tBx&L=)TW@wd@0<?aXC1wz zo%toEclCbUhcD0EH^p_Q_Xr$3sF$~6Z}09t-t>NI`+@=9-~+ws2YPi6@tPgxT{_U~ zG|209q<7fS-m+sn|2Xf~<Gl^Ty!}Ram!9a=KgF9o(z|e!_uc8<s?lD>S>Enry+h9S zj+x-iJcqRT;AC&e`Cj)a-lNmJfz!Q3GrXVHGX*yy_WeHbpx%7rwO1ArxSZ4lQPyup zkIy_~^y&Bu7~vp^v}@U7w|t0=X!O#&T)_`{Kxl!BbP{|pzC2CnY;yxCz$~|oluK*@ zqm85B^gqDpGtN8<U<}zvLW;chJ6S!C{3AZ+lZEydWE{ZC=QLA7-kjSP%ILNjX8Ndh z6^r_mvW|eTSJ$_{s(fs*D0iA3($kUb=bTWsU70mp-MH9400Hc!DTFS{p992JNz`-} zvlcZ^Ray4Y$8=OrfO_d6J%VHZ^z%mlm!cR|`81lh`A3I(E&1mG@-ez4n?`@<JRATE z<!v(m0)Jel#mCB^Z*p;@C~^_$N-@Evs1e@qZC0=O=;Kc`s-FLKfff^YdbBu7NivFM ztk8INCAjJji+TRh9**AxLlX<OuYv#d6brx9;9J=Cy#D>DR-w(Vshg#R)eW+omDgXr zshui$0*F|qgyA>6$4dM?7JPBZp%}^Lbh-UEF#}<>-fg;VCW>5YfvVDkpQs^~rWY5q zeqHdgO8MqWi|f}jIh8|Cl`i;`392-qX^o=(_d|{Bmx$rgX9>AE0&Z2>5&2%ceV1ze zp0fOLrT(faiyT-vV;xp<+RWzf!~e>wv80B#w&uh2cW!j<eOnQhK(pH3l27{8q}Fll Oo;a@IXYPW<qyG>8?~OSC literal 0 HcmV?d00001 diff --git a/vendor/unicode-ident/tests/fst/xid_start.fst b/vendor/unicode-ident/tests/fst/xid_start.fst new file mode 100644 index 0000000000000000000000000000000000000000..771dc459c6868e86a080eb6c08c4502a0df1dbf7 GIT binary patch literal 67370 zcmeHQ37}3z8@?o4%1ER{$kw&*OSo4qk_ctX63LQ_qEO1#qMa7~zbGoHKc!IGNsH9q zo}xvelvKYhTPp8A+c{@u&dgc9?|fU`Pu*{3&di+i&Aeykop*WOW3tlV14sY*^N%CH zA3k_s|8M*D?%BQTf4~0n^Uj}k{P@H6@4wr&b<4NkZ2tPIFTdFM`DYtG{p911KK$VQ z_x}6tJL}(G_tu+lyuSA3RZFK_bM@p&6R)~*!uTsLA2)VP{^-j_jT|xj(qWejy{Lb` zzI}RMctNk8J<jjm?Yyq%b~&eWr;d3Y&TbF!ezFP`KBmaAMT-?LQS!J_$DdFdl{vBO zN#)9)T;UYf^Qje2t5o^)DpjkUQN2dZTD8y2u2Z*O{RRyiHEt5}&P|>A%}BC2&9`XT zs`XiI+P2HdZGU!$ypEkZpVQ^suIF`w?&tUD+3SJ}d-v(vum42@1`Zm0@sOdH47+sr zh>@c%8=XG}#*VxEit!V!yb4WBgPBk5BUp5I$cOMY87S|yHLG8J<>i-NeBt?3&po?x z#qwvKUiQ?JOP~19<Bu(Qbnzn(KeTA!gAXj2fB(FH|LeZF_ug~&U2|sNdB^Ry-FnN- zvu?WahU;g}m_BXlb=R6{cgV$q2Mruxrr_M1c5U07)w)&77R{SAZPK_=!v^*1)vc3# zX6;%vYg9j@TGcA2SFUtg#ZynIaB}%_CzU<13@UxX@uiL{S)zEcqQ@3Frf{JwGX<Mz zw`sHH2FmjxJd5roPMUo6HB+v=ZtAq@GiF|Y!;Lr1y7`t{Z@c}DJ7>?i>+XB*oqONE z{yp#h`3oL+aN(kd9)4u;qe~up{69}Dee$VgPd~GK#mZ-&TlM@4FTV8hE3d9z^V-_i z-+1$_b#JeK=iUFl_x=YTe)RDtpKkc<^NpLn`0}f-H-Gc(maW^q`+oZmKkoQx=g+_V z`oCSf_w3#G+x`Ow4;}vf$RB_HbrcXDg1_Tu*ROQl6qm}^*A@_~xJ;&kSjn<25-eTD z?LU-!d2R!IW|vzD^m)oj)m|^X_vx6HOWkUD2VN^@O~-5G<VkdIw}v8F-*kP1V~gVZ z;awg|vilwfc(;er;OzI#Y6<FAAHi}7_IinCq?`m&^#C^MC3UMV*rI35v|hH<Y}M#+ zswgh#eaV4Eb6Bw!!NAO3fBi8<W%3fu;{iFK_^(<3eHdZrPZ=6!&!VF^zdd+H(lg;3 zr8}TSK_@c#^#GrGcklY~htGA#+KBnJ%f~rKUzMg@2M1{cHJK0x2Tg0Ws7_tr2V;M_ zGn{HQ^rjLD+@;+2?t6H_y>p!_`1AoV58a=9VhD4`BM0_-EICX-vLaz;Q12xOIfKbk zSzr{DP{v8EFXgrJlPJhUxXaZ-T1xsJQ|;RG7m(xwG>;w(GnEOf>xjtw))Fq5Fv%qx zaoP}*G%dGw^;5$L8OTVLr^7}5oQ(gkVCUq-DGqlnGuOPDuv^g*mqd&o^?*yn<lX^X z5;EgjqP7f{N3#+*#o@aCbMGE3P5ox`hpUOqY9*If>BUud&9OvPs+el1Etl%iHa6K4 z8MqKzh%Gm5O2nT``YDZl3!ZZ%qS9pm_2f@3TQX9Biw2me0I-~t$gAKQ+<?fe@Ygib zSo};EGk_Up%qdGnaMLU&e=|;^Tf)s-9JPPnw)JB=+pC`So^LFjw1t!SusxUv9$U3+ z-mFPu&K>Bbil{V`C(<9t=9DCh76Wk0#mSotTqF)PYC^5raAr1bY5qed9~x+`V?I?a zwfR=Sa&b@u<LWXna1c<@IPPSa+7ZB|O40BUN`aLf$r5N(9FyeN^kY%PjOELZE?%_o zf%*3np)!^!GlB9A<MDerXdrtdeJ9Nl1Jj4MUPUrx0ID*BEx*9^7;Mh)q~MnItufGl zH)sfMU5B8{;H{bg6DZ@XHETH8ArCr2r_MNaP8a+KYc}xTZi3VfXEmSxnli5<bdlfK zC*=CRz1y~8rIz(#TABm>N=)Y1UW<ng88i@U1Y8TR{_Q*wi~RLdjbBbsv{u$W3#!5j z51{K+a|RW^ba(~<TDEe^bg9;)&{A08`6biKR93>Q5{(r&Dl4EmE8K33u1BW~Z`p9g zcqdO&6RwmB&{YYU%HW-Hb>(VXV`NRcsxWeEi<qJazu;I#(7kP7jgK(<MaWoKFM4sn zDoY~86m*u>?5mTH*((65b`(d`RzX^YSQTj{)o;d-(NaNAX)VWK@ntSP%+^#+gh^lt zLDkL!=h3v_GZjruqPBjUYN%IRTQ8pJB__onUB+*`ekRjS=UUrtU~Qf=yDojSer}<m zG<aO>Bb9~9;02&Hd%;0gNjvX1^aDpHEpEGbyQK<h7_Gf!{+O}jv=@Xzba@QZM%Pu* zUclyU6{`VmG?1H>CEPNWq;(c1ub}T6=y3xoG4Z&bs7t7E48xq<CPY_&tx1Av^*>n8 z>`43$$+IPG05&ukF?=Y|&ok?ioW$teb<<c!Eo62jq?8<n@fTU1TeKtv_=C+v$d_a) zLJF)8;jiQmu_JL_TCEug-EnCvyzTN+lCQV~YxRMs_0hx0C5V*YEvWWNu`?lbxt80Q z5DODu0~2zB3#@P;wA+s>9f)O=Wf<iv9Zs5A2@s1Wo9%}SDXRd&7UGySX@xfABz_Zs zuYm~=3ljitV*)FISeXDQb|w(i&;;#DIl)Eo0L&&Y0jhp%8Kqi6MPTGaq>ug<PC$(G z0H~Rso-GQd|LW6MUNQ|ib?sEEQIk9^?Z%^S1z@&OSkm-X6qJz3eFR;4Eq$$OUFydb zq`8e>=u<&Z@>eN4F_av)jUR$cCf*pHU0{2CRfNG6L}nvamkn!>(^UGg8q{{$hvx-H zB4OFEg4u|bF7**wvf};(V^+SAq?*;(yeSsDXMA>&sKjNPpR2KD4r0D6ZLgL!zOc<p z*RcjdvdYI=W%;^f=M31yUcL-^@uefZWCr*0?yhv9%U(+guhU&o%Fxsp)`v805)GTV z)F453o8FuJMNIOaz$=zAN3N_bgAF4831b$+@8rnWd=E#oMKlKZ&d)^erVWLex3@4( z8r1Sw@+orBNPKP<&e#P^dE3MQP=UM9m#Y1?G~XKUps}PGcM#GzY6zW%NJg#heq86y z<tw!a=L2K^JnYUTY#{C}L)s)PH!Qn#WC^1FOvndyONaMWcJ%W;i#_wSw_}y%<QLK- zijHo`)!8*8)V<HeNVuvQ3beFji(;ilz56)Xz$UoZB+_Z6rqB)PKnU_P965M^IfN2_ zP<#>(;cF+j*#&X3!_J{mdCaK;Ys_PFHs_kkit1<qKoHE+rXkE>0kF57b3dOHamxed zmj|=w;L6N;7TqmRPzL5QT%eQjPMbc2=X)8g%|N0H`pDD`{?I|}1}}NQ6Yuv;8;O?y z_k72Sp*7s~I3~|ypCxh7fc}>LoB7OBk9l7fi+LM|GXp06@~2h=F!{>Mw%(R0Cov0S zT1w~vJ)swv9p35rJCu&n@jRxAA*>dHr!iS#_haDx$2dn^kO7zzGND<?5t$SK_C^No zj*N5YE0qQ$*YN4%QE9b8Wck1<!R{0tfw+~chP}0GR_C+pIFc7Dj7^HjFD(S%Kp{XW z0A^W4E&-H<lb{^1X?7+VfCyh`1&WJ6Qf@2Hz`|(SQS$$w{*ld526Xh>J_5`kL2`I= z>Y~|d0WyLSl0%jdCJz~by)OhG0jGBUEWSSImkgjs`4MH17>*IcZ7#VD1&8V2C><Q6 z!)<Lzu{kd105UFNj{+m(+7#>6iV}PS*b$)?fCwNXZ;IaX9X_o?#OWnz5fnzCK)V_T z=>U$>VOdcC$LRnL)ImKiF;WL`s1D#*9d<va1X?c7U#90mTAhW|Gh;QgflyrvI2i!R zkA5kD426%eEln{z44pJA3^7d}*vLn`1~p{_YF$x|6sh~YOVy$~-wLN&{lS*qj2MG5 z3?q$*GHxsxAYd}+gUlsI)#AbLz?)c5B5!9=NvTI?H-rQZ1jn!hfD$oc6aM<46bV%W zB~?@-^it*$T~-z|7Yk~1YdN$O;9#jZSb<2#yHYqPEqpe8C6fs+E6W5J80+c82emES z2Af+aTQ!)qT(aCCY<ON(uN-HpD^+&VPRd&w=<<ek8nRS)n-{amOCW0(^#Rc8%qBkF z(<@srMX!vN7_|n5fh9WeK?vG}t4<ov+nY56a%Tx+&S0=(0PN=%!rj0T<#~Vz>VBMv z(}t1R&?mZSPG#BAzy4r*ZFmDL4NR)vDIwQ@CP|#XF_t8Ok|ZKPjFnc5Iv46Zn{>l$ zm+4?q{Z6@dW10vrUXj~}%`vY6-0BW|BnC7mmBe7YHwD&pwnr+v`zRz*sV77!_f}Mq zNWDZL&OqDChsGAlspSZbr9-4sK}p#t0FuX(r7mEVJt1-hX^15RQEnk-r;8bqroclO z^TC2Ob*@u@Q$TZ#)0OfISn(uS4P)5lH`Y>!+viuU2oG*MMGwh5!~$VHyM3;~ZCyGR zy168kMG|3_oza$37=0PM;Fk?+4ep~W{@=F2Frg~DNQT0MOrJrO?$iefZkC=$$Y6;D zfPjC?FJTy5p;EddQiY^k0o6NFzDTh8P~~DaBskscLO2|8P-Tv(%*@CbkTQc&<}MZl z^n^g30C%jnCy(191GgiykTe1re8>_p)}?_nik>fWcp&??=o0AtkdvR;)d^62P$>r$ z@^T%7Duy%R02Ba(Th0~14w@oK8o<IymC%6Uq!IcxvuV}i>4`y!KAwU~sNpw$*}BBr zcG^#!7D9d;j!_WeF&H*~dGXaQyI#i9acd$JRC_{@{vY&($E0V}B}Is03=2Z(*o38% zh~Nu(9opx%qk1zb?t;V1R-yqH<RA;;%yB*e7nKskeIS{dGbC|16DZ>fEO1Edgle|w zcay33sM>U>=nQCmMp0d~4;zYxHL-1?UW>)(IGG$<gps8Z+QxMV3&ubgn1;{{gB)Q5 z9f_8blHQxc<ywK3!V#ki@t~f^L{h|onp_gdRED;3uX<{jB_Fkkixe?#XiVX5+)tt} zal+!T>u62V2}XBAZqFQzcV}oubn3O&OukZu1=NpUl%*K(8h2M1jsYwX=TZTkKs*+i zcUUPmeh_q{Zf4S|3@gakX@prtu?5E`xR#HS))-pLEcr3xD*94~S(43~nq3Hmu4Coy zFg&Jfy^&TMd=DmH1Yi-i09cGIAaGp56PS|dp{zn#d4<m}@?p_+CHj=g&M#3qJ3kTs zwwIZH(sSjvRLH8hq*8W%wJM3YN22<i*eHC@3#y?S%W9$Qopl%1UsFFje`2G<O)537 z+49X+8_&vbH!3$fzjKFkI&C;7JAY-@L)`|Q->zrd3u^b?(r0DAbr*d$u*}7shTc4E z%803>vh(YW<Gg0Fj`e=~x7WeGQNwz5qX<vu&}+3b0H2tcSAi2~G1fQA=Y0}{^|1u2 zbDr9e;?eA?q@B_^icKS|_*97W8IA#UgFeV79w{1VZcf`an)TltNYNBW(F0bOy1XdF ziP0HtP-KpdOVO?c{6zzi2*oMHVxmuhHpE64x`3^&bAY;z2){d~q<O10Z_;km@|bQN zNh8!Ow!t>Fa>dj2d&?xhHT9Gi$xn=<6VKVj{fesMKoa1P&w{#}#yOqKsUpDgNFXtl z{b<0O`PN2acD>l%@u+Omvv9Z_whUF8H}8`cts3lr@APU1RMxWIK%qx%6A(jWThf~q z-4<wgTc>}&6nZwD2`a*83!-lg5<ktGWW!3Q)Llt6fJ$$*RTymm5%#-@F-UM`(#o+R zFb8NR_0W$Il1w^v<rUE0bX0rw74A8!XtjhGx%bWG@x&F27a^LsPN?cfwoyk8CsRv4 zu{bZXmh7((9D>CJaZYH!%>`iUG|N|!th=Cu6bBwt9I&e*Sk8*72aac4aq)T3#&n`F z{46#yjxR>I&9Si<^p=||L$O!sLsD?s4uXjaEN_pn-Z{{|YzG2zO5_wX(}Mv5+33I+ z9an3|TvP5S#vGcRX5`^K&Vl_TU@dXv7YscYdT)*fJ5D{x<g{tog8GxXqKfKSy5iV= zyf85!ClC_Yh>nEX4iFlH`4Rw$PD8@e82ta7`4bM*;&Iuo;LXo@;AYu9cqcSCi$G$H zS|2penWGe@UR(15_mU5*ZRu|e4XI*@IO|9_s2FRg%)Z5}h8Z=)>cRUvSojOKy(Q~4 z1jv@*G15a}<Aw8KX@osR0iJ#Xo+|`4pCKqSu2?%{i6AZ}hR}jmWI@BQx{E#09C39| zU<G$~u9Ay~Fpve6WswXVU9FsgR6z2%rKS_cW4vbBh)=)rd(LSEY7>QLwKTJxE{;(K zgoU5kP6yYLI)kKN`JL+;+kNLEPFa)WFbLA+glEwB6fXBM-CDyykXUC7q)cy98q($H zhU;g}2#mO_XDhH{T3iICA*&ckAqcd*B1Dqzbe-xU?(g0$wMWmyG|K@Ru@i(#bTa%> z^U*W8Ae(Z)XdzNGc8j*h$Plc~zMk|1>KkC})hcXDUa?#aCTTW+P5SRrZs;khm*n)U zAvT0nK2F>-<wBrrDk9k()LTN)D3PknT?#ixRb_e<##*OUJoS{=wpVAIDp^U+L@BJ1 zb=Cjdt#bEoBi7Icg%v9(X~P-cu7xJ3Z%UH$KkfKYk8U06fHiT#_!hAUWAHcxf6oNn zv^{=d6C{X+6_1^@b)vF+^3#kD^Sd355;~g1162^93=NYX_SHpRo4+EjcUTT+l7{V8 z!5lj0ne7uCj1fFCK}f(bfULMW05+~+AIS?_?`(}LN>L@lo~W%`vF|BZPA8Y}92WR$ z%jH5e!!;#IFYsZH)urDFW1<M=Xn@CAjPJ_Lsbh*c8swA(n)o<u)t85l=IYed6ox6{ z&>zqgnqkhG#3U8H^KCTW9zA)dUNI`vS0CFt`aTMpnZ!BY@W~4#T&JSF!C?<oESK50 z-)bYdPz(4TEU<eMEBdqJQzXV1L8Yy?oBnR=?FK9t0np1%Ew5v!RU4??18!O0J!%pJ zFO{RY>xd;}GTB^04nQun53s}!9|0r9KBV+0xJ=6XWYFj&PAk0?4i+40<HjdhJhQ~s zp=ro~6tPQ3+=E76I(M-Z8(4FN7m07F1-b17o}ue$%vkV^-mPm#%3_M@okc_u9Od<K z4)dzMdbS#jucoF4NLIP^5(iX%1b!?Mkt9fE)Fh#A(u+%jA58NSZ9JhHkA}L!fMtzj z$$~+kK*O|1Jr0y|v1ibr0CUHYdSW{&270!N45=qj-E^#kss$Lv=&FF9&{X9L)qTgv z{>3R}PsQ9=*gS6WfmR^kI`#WJ^B6QXx{1^E^!>JgcC^-h)IP_w$}!~^QInLACD;JP z2#d!UJV0fP9K13Z+2E3#DK+BET9XG5aQf8y4FJ!jFwpv@o0X)Ng+qn$aBFzbITW~L z>xv?dNCbMcZmbEHDxMdt^*~gpQge#{S~osbEL4X&Z`ndYI51|Rp_S}!C7f?vghI{f z4+pj)@v7WN1~yOu?-DKcr--e7%d+!$nVItCG<|*+s_H+8A!?|MxJ>f79DU2%Fww2+ z3x-GP`lXs9v8#?;U`711&cQhn_b1m)d@enrIcCuVn8~_TWozd7Eq9|vGL2%VU>0R1 ziYwcu8In}-3((GwBodKoFgqh`dIo+XZmAjcle2bCi!fwZ?(9Y$33QY0HPmSDQCVF@ z&$8wI!VJmMgNBNd_8d5&G%911LDJpC<F_UJQatBD-NT_qp5vL)i)J0MycoL;fmDrz zYCrt<b&Jr=ar9+qeo6@=1vgJMVv>fz9$<!ya7BMI9qOXBezT?SGPhx{aTZLQT@<Ql zQ6<=jINjX27yBol@*s#`eZBdcZ?|mS_TBf}NzAdGKmURelhE!+XsiRq5>AJ;#x=ef zRDkj#nF`NNT%HGhhb@&^RApeQSU=sy+xM_lwb~h8R%+5|Lic_vKNt=59RJL{nCgAO zj}v0z^+YfK%4`GhGl;947sR|#BxzqrJ3qRlz91=tLh&Q#e=cKh%M2GM1{m@PeKpY^ zQBj&90!N}(BDV0j))&)4l=kJQv(JRE@Yg0j9#!^fo*9>Aw@{Ti^FicOc`<Lv?)WRP z?!xP6)iNf7SKE~|fNnztfZ;@{7^kUbFD_CR{YizhR;*j7tACHsRTy5uRUOc}gc6%( zS|{r~gvpsK(W{X9<H@R|mMM9yijvmRIO-^E6P`r_!%f|o{Lyk;6R#I4-lq?BmWlUf z322Y%g&W)`Y8#PhUNrex$IF?;J8+?9SrgIE@BYRUEr3x0;@78mJsA!k)Q#u+ZCj-s zuZK6FRoz6H>sZzo+J&vlxpZtu5UV&pk{@lHec_pg0dgM{s>3e&+ln!=K<^+MR?el_ zggyQ33(j+o#BiRo>CrE-5p9ZMw}`k8jjC5==`GMd3(+B7I@FmRkszV6O5OzV;y283 zUk@29IV{S*PJJRhGrC8fVjRVl34sVq7&>0Qm|3}W6VMW1tKPkO_GoMfU26jHHDFLk z(+Gg!raeAbq7P6@u0SBrtP-CsJ3!zmgIS2T6`XMeM6YHRtwsE_K_DiBS~K}6s0udj zsKa*$fta-Q&Aa#P-S^x60|yTs{{6@w<aI=UModg`b>~>(*nWW$TNmQApAPEqypDma z_vx}I`E=tF9(vjK?R1QMY4wEvLAS>>lbPB$j%}O+M_b^f)c^IVCzn3)*pf#UE%by6 z3oi4x@B+N0XM~j*1dbK0;~MX|ckX>GwgeFP-Gu;)9s+n6A7WsM1sRPIqhSTCe3r(3 z0#EGHjhnvsQfq%h*F&^FQ>its=Z=HawSX;&-TFDzcgiDj^>#Qqni*E{^vWC(<|G26 zhdqhN2~0#=qd}=0S8>ac$O=)dm_;)acfoNV+Tqogo1Q_S6<%80?UmhG(vx9o_D?2t z{+<wu_c1!ap3{0(*BBFE$)g3KtfPuV>BIw|;Q-ju&Q^AKZSCuNRu{b1cFgMnVRoZ4 zIEZV_jl1!C+;Pqrq{6{Y(V`mLe6`AR_!$0nSHcC66_^GL@_}`*mS799!;I<x<;=jg zRIh0e@OM4W#=|zS9On3N4(|1AZHx*q+Q7i-HH;u*VZ?)Ks2RRZ6f#97_>1LP)4mO= z5#LKCL%W7m6oeg~_|%JG@0R%bs9j8>-<>ZXiPH)?Ki$G!a!P0_oq}2^k7me58Tv|u zzxPv$EF-5_<Gx~e*pwA}zhUmj(jz|VHyd|rEI~Wh^9}GCXmpxq)2ysL-Ueytbf5N` z%W!l*QMT}of7t%rwyi$sY`PzvNoC#yQ5o>w2snNwcS8V>eJ>j}@&-!9M+#AXzUzNv z!a3xn@BC`!Ck=~IaN5rhA(w&Sbi;Fc%eWrEVTd0h!Yxs@wSr5wM8Q@ZuU$}{(~thT zrkSr<UL0eRvZkn`-!Lcl`YU$)Bo5k0w|gwtSTNsF$xzju16DANXl97zD@ST2x|vHa z8G7-c0sZ@4*sCWNQg`jr`Rub=5i@27p`}*!YE?ZD>#a~YPz1-hu&{m<KY6@hLsfuk z;CPM!;qjmmaHuzG1B;<Hp*dh&3TS;6v;k;~#dbLaqY=lo08BMRHv`<}6$qvukl^wd zoxm}}gJG&YGo_fTCQgFMM2ZJ4Z2BlD3f+mLjaM%G<S{LaPAK|Cu?od|72jE+_i-Ca zoqoboXi3?-%3piR+=_E5&)ZS5YR#(M&X~6&wtq)!yjg4MnMLcoUYE4*-r$Y~<hxQM zuinZxwXUvfep}0Otq-<g`Fc5b=f0Ht_SwVo9`CrP^Rh0tb^Z9fJG&n^e`C*O7p&~P zs_%3C7YrygXyxFtLl0k4Xn3iSIhW1bF*yJDahtE`c;)u1dQ952lfpz}UStcAzP(Cx zCj{%&p*B@qfkSc-+uxxk_m3LYqk`HeVYT_k1pRAWcYhN}G5!!?G4S}tW-wq@*W)jt zM*H$8J0&MB!|>3&grG#jc&czFC6ncfI9KKB<gPO*xSQYMCSHp#4Pl0|O3xrYbxO1t zE}B&e(AE?XRXW`=v($Lu8GCQXGw;J4K$z3Ld1Y4F0L%eU2Eeud`^TnvX1R{BU-$O< zcis(z7$I5k{P`C;nohKsf@g`E{Jn);1?Fq<n6Ckg@-3?Mj9NV(r5-h4`)88BO7Xsk zjRh=DAXt7U0v=<TlefYvER75iw0jTQo8WcT+YvdvIaFor(lCn#XI0J_b!gO~LRsYR z+Ct=SEdQ1i3Et|_V*Yg(m-MewP$TFFV8bFMtaQQAGfr?P7Ctt0>$1EM+e-^(!D+h6 zz67t!Y2CfbhNwZ+(~bJm**#fuE_US4F7b1koI6u$On1%ITDXl%XaFfsM3wYrWV~!B z?WHHB2{Z*OCbAl36-wxThYQ7i3m;>|a>1<;eT)$^&v?j8ISrc?%35Fa*b1ZWCEB+t ZMY7iZ(5f)qddd~U^Nl|x?|**F{{g3x<S_sM literal 0 HcmV?d00001 diff --git a/vendor/unicode-ident/tests/roaring/mod.rs b/vendor/unicode-ident/tests/roaring/mod.rs new file mode 100644 index 00000000000000..9396996b746b34 --- /dev/null +++ b/vendor/unicode-ident/tests/roaring/mod.rs @@ -0,0 +1,23 @@ +#![allow(clippy::incompatible_msrv)] + +use roaring::RoaringBitmap; + +pub fn xid_start_bitmap() -> RoaringBitmap { + let mut bitmap = RoaringBitmap::new(); + for ch in '\0'..=char::MAX { + if unicode_ident::is_xid_start(ch) { + bitmap.insert(ch as u32); + } + } + bitmap +} + +pub fn xid_continue_bitmap() -> RoaringBitmap { + let mut bitmap = RoaringBitmap::new(); + for ch in '\0'..=char::MAX { + if unicode_ident::is_xid_continue(ch) { + bitmap.insert(ch as u32); + } + } + bitmap +} diff --git a/vendor/unicode-ident/tests/static_size.rs b/vendor/unicode-ident/tests/static_size.rs new file mode 100644 index 00000000000000..4b6f16f1981095 --- /dev/null +++ b/vendor/unicode-ident/tests/static_size.rs @@ -0,0 +1,95 @@ +#![allow(clippy::let_underscore_untyped, clippy::unreadable_literal)] + +use std::mem::size_of_val; + +#[test] +fn test_size() { + #[allow(dead_code)] + #[path = "../src/tables.rs"] + mod tables; + + let size = size_of_val(&tables::ASCII_START) + + size_of_val(&tables::ASCII_CONTINUE) + + size_of_val(&tables::TRIE_START) + + size_of_val(&tables::TRIE_CONTINUE) + + size_of_val(&tables::LEAF); + assert_eq!(10248, size); +} + +#[test] +fn test_xid_size() { + #[deny(dead_code)] + #[path = "tables/mod.rs"] + mod tables; + + let size = size_of_val(tables::XID_START) + size_of_val(tables::XID_CONTINUE); + assert_eq!(11976, size); + + let _ = tables::BY_NAME; +} + +#[cfg(target_pointer_width = "64")] +#[test] +fn test_trieset_size() { + #[deny(dead_code)] + #[allow(clippy::redundant_static_lifetimes)] + #[path = "trie/trie.rs"] + mod trie; + + let ucd_trie::TrieSet { + tree1_level1, + tree2_level1, + tree2_level2, + tree3_level1, + tree3_level2, + tree3_level3, + } = *trie::XID_START; + + let start_size = size_of_val(trie::XID_START) + + size_of_val(tree1_level1) + + size_of_val(tree2_level1) + + size_of_val(tree2_level2) + + size_of_val(tree3_level1) + + size_of_val(tree3_level2) + + size_of_val(tree3_level3); + + let ucd_trie::TrieSet { + tree1_level1, + tree2_level1, + tree2_level2, + tree3_level1, + tree3_level2, + tree3_level3, + } = *trie::XID_CONTINUE; + + let continue_size = size_of_val(trie::XID_CONTINUE) + + size_of_val(tree1_level1) + + size_of_val(tree2_level1) + + size_of_val(tree2_level2) + + size_of_val(tree3_level1) + + size_of_val(tree3_level2) + + size_of_val(tree3_level3); + + assert_eq!(10392, start_size + continue_size); + + let _ = trie::BY_NAME; +} + +#[test] +fn test_fst_size() { + let xid_start_fst = include_bytes!("fst/xid_start.fst"); + let xid_continue_fst = include_bytes!("fst/xid_continue.fst"); + let size = xid_start_fst.len() + xid_continue_fst.len(); + assert_eq!(143513, size); +} + +#[test] +fn test_roaring_size() { + #[path = "roaring/mod.rs"] + mod roaring; + + let xid_start_bitmap = roaring::xid_start_bitmap(); + let xid_continue_bitmap = roaring::xid_continue_bitmap(); + let size = xid_start_bitmap.serialized_size() + xid_continue_bitmap.serialized_size(); + assert_eq!(66104, size); +} diff --git a/vendor/unicode-ident/tests/tables/mod.rs b/vendor/unicode-ident/tests/tables/mod.rs new file mode 100644 index 00000000000000..72bfd8bd7b9507 --- /dev/null +++ b/vendor/unicode-ident/tests/tables/mod.rs @@ -0,0 +1,7 @@ +#![allow(clippy::module_inception)] + +#[allow(clippy::redundant_static_lifetimes)] +#[rustfmt::skip] +mod tables; + +pub(crate) use self::tables::*; diff --git a/vendor/unicode-ident/tests/tables/tables.rs b/vendor/unicode-ident/tests/tables/tables.rs new file mode 100644 index 00000000000000..9db6fe9589160b --- /dev/null +++ b/vendor/unicode-ident/tests/tables/tables.rs @@ -0,0 +1,361 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate property-bool UCD --include XID_Start,XID_Continue +// +// Unicode version: 17.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(u32, u32)])] = &[ + ("XID_Continue", XID_CONTINUE), ("XID_Start", XID_START), +]; + +pub const XID_CONTINUE: &'static [(u32, u32)] = &[ + (48, 57), (65, 90), (95, 95), (97, 122), (170, 170), (181, 181), (183, 183), + (186, 186), (192, 214), (216, 246), (248, 705), (710, 721), (736, 740), + (748, 748), (750, 750), (768, 884), (886, 887), (891, 893), (895, 895), + (902, 906), (908, 908), (910, 929), (931, 1013), (1015, 1153), (1155, 1159), + (1162, 1327), (1329, 1366), (1369, 1369), (1376, 1416), (1425, 1469), + (1471, 1471), (1473, 1474), (1476, 1477), (1479, 1479), (1488, 1514), + (1519, 1522), (1552, 1562), (1568, 1641), (1646, 1747), (1749, 1756), + (1759, 1768), (1770, 1788), (1791, 1791), (1808, 1866), (1869, 1969), + (1984, 2037), (2042, 2042), (2045, 2045), (2048, 2093), (2112, 2139), + (2144, 2154), (2160, 2183), (2185, 2191), (2199, 2273), (2275, 2403), + (2406, 2415), (2417, 2435), (2437, 2444), (2447, 2448), (2451, 2472), + (2474, 2480), (2482, 2482), (2486, 2489), (2492, 2500), (2503, 2504), + (2507, 2510), (2519, 2519), (2524, 2525), (2527, 2531), (2534, 2545), + (2556, 2556), (2558, 2558), (2561, 2563), (2565, 2570), (2575, 2576), + (2579, 2600), (2602, 2608), (2610, 2611), (2613, 2614), (2616, 2617), + (2620, 2620), (2622, 2626), (2631, 2632), (2635, 2637), (2641, 2641), + (2649, 2652), (2654, 2654), (2662, 2677), (2689, 2691), (2693, 2701), + (2703, 2705), (2707, 2728), (2730, 2736), (2738, 2739), (2741, 2745), + (2748, 2757), (2759, 2761), (2763, 2765), (2768, 2768), (2784, 2787), + (2790, 2799), (2809, 2815), (2817, 2819), (2821, 2828), (2831, 2832), + (2835, 2856), (2858, 2864), (2866, 2867), (2869, 2873), (2876, 2884), + (2887, 2888), (2891, 2893), (2901, 2903), (2908, 2909), (2911, 2915), + (2918, 2927), (2929, 2929), (2946, 2947), (2949, 2954), (2958, 2960), + (2962, 2965), (2969, 2970), (2972, 2972), (2974, 2975), (2979, 2980), + (2984, 2986), (2990, 3001), (3006, 3010), (3014, 3016), (3018, 3021), + (3024, 3024), (3031, 3031), (3046, 3055), (3072, 3084), (3086, 3088), + (3090, 3112), (3114, 3129), (3132, 3140), (3142, 3144), (3146, 3149), + (3157, 3158), (3160, 3162), (3164, 3165), (3168, 3171), (3174, 3183), + (3200, 3203), (3205, 3212), (3214, 3216), (3218, 3240), (3242, 3251), + (3253, 3257), (3260, 3268), (3270, 3272), (3274, 3277), (3285, 3286), + (3292, 3294), (3296, 3299), (3302, 3311), (3313, 3315), (3328, 3340), + (3342, 3344), (3346, 3396), (3398, 3400), (3402, 3406), (3412, 3415), + (3423, 3427), (3430, 3439), (3450, 3455), (3457, 3459), (3461, 3478), + (3482, 3505), (3507, 3515), (3517, 3517), (3520, 3526), (3530, 3530), + (3535, 3540), (3542, 3542), (3544, 3551), (3558, 3567), (3570, 3571), + (3585, 3642), (3648, 3662), (3664, 3673), (3713, 3714), (3716, 3716), + (3718, 3722), (3724, 3747), (3749, 3749), (3751, 3773), (3776, 3780), + (3782, 3782), (3784, 3790), (3792, 3801), (3804, 3807), (3840, 3840), + (3864, 3865), (3872, 3881), (3893, 3893), (3895, 3895), (3897, 3897), + (3902, 3911), (3913, 3948), (3953, 3972), (3974, 3991), (3993, 4028), + (4038, 4038), (4096, 4169), (4176, 4253), (4256, 4293), (4295, 4295), + (4301, 4301), (4304, 4346), (4348, 4680), (4682, 4685), (4688, 4694), + (4696, 4696), (4698, 4701), (4704, 4744), (4746, 4749), (4752, 4784), + (4786, 4789), (4792, 4798), (4800, 4800), (4802, 4805), (4808, 4822), + (4824, 4880), (4882, 4885), (4888, 4954), (4957, 4959), (4969, 4977), + (4992, 5007), (5024, 5109), (5112, 5117), (5121, 5740), (5743, 5759), + (5761, 5786), (5792, 5866), (5870, 5880), (5888, 5909), (5919, 5940), + (5952, 5971), (5984, 5996), (5998, 6000), (6002, 6003), (6016, 6099), + (6103, 6103), (6108, 6109), (6112, 6121), (6155, 6157), (6159, 6169), + (6176, 6264), (6272, 6314), (6320, 6389), (6400, 6430), (6432, 6443), + (6448, 6459), (6470, 6509), (6512, 6516), (6528, 6571), (6576, 6601), + (6608, 6618), (6656, 6683), (6688, 6750), (6752, 6780), (6783, 6793), + (6800, 6809), (6823, 6823), (6832, 6845), (6847, 6877), (6880, 6891), + (6912, 6988), (6992, 7001), (7019, 7027), (7040, 7155), (7168, 7223), + (7232, 7241), (7245, 7293), (7296, 7306), (7312, 7354), (7357, 7359), + (7376, 7378), (7380, 7418), (7424, 7957), (7960, 7965), (7968, 8005), + (8008, 8013), (8016, 8023), (8025, 8025), (8027, 8027), (8029, 8029), + (8031, 8061), (8064, 8116), (8118, 8124), (8126, 8126), (8130, 8132), + (8134, 8140), (8144, 8147), (8150, 8155), (8160, 8172), (8178, 8180), + (8182, 8188), (8204, 8205), (8255, 8256), (8276, 8276), (8305, 8305), + (8319, 8319), (8336, 8348), (8400, 8412), (8417, 8417), (8421, 8432), + (8450, 8450), (8455, 8455), (8458, 8467), (8469, 8469), (8472, 8477), + (8484, 8484), (8486, 8486), (8488, 8488), (8490, 8505), (8508, 8511), + (8517, 8521), (8526, 8526), (8544, 8584), (11264, 11492), (11499, 11507), + (11520, 11557), (11559, 11559), (11565, 11565), (11568, 11623), + (11631, 11631), (11647, 11670), (11680, 11686), (11688, 11694), + (11696, 11702), (11704, 11710), (11712, 11718), (11720, 11726), + (11728, 11734), (11736, 11742), (11744, 11775), (12293, 12295), + (12321, 12335), (12337, 12341), (12344, 12348), (12353, 12438), + (12441, 12442), (12445, 12447), (12449, 12543), (12549, 12591), + (12593, 12686), (12704, 12735), (12784, 12799), (13312, 19903), + (19968, 42124), (42192, 42237), (42240, 42508), (42512, 42539), + (42560, 42607), (42612, 42621), (42623, 42737), (42775, 42783), + (42786, 42888), (42891, 42972), (42993, 43047), (43052, 43052), + (43072, 43123), (43136, 43205), (43216, 43225), (43232, 43255), + (43259, 43259), (43261, 43309), (43312, 43347), (43360, 43388), + (43392, 43456), (43471, 43481), (43488, 43518), (43520, 43574), + (43584, 43597), (43600, 43609), (43616, 43638), (43642, 43714), + (43739, 43741), (43744, 43759), (43762, 43766), (43777, 43782), + (43785, 43790), (43793, 43798), (43808, 43814), (43816, 43822), + (43824, 43866), (43868, 43881), (43888, 44010), (44012, 44013), + (44016, 44025), (44032, 55203), (55216, 55238), (55243, 55291), + (63744, 64109), (64112, 64217), (64256, 64262), (64275, 64279), + (64285, 64296), (64298, 64310), (64312, 64316), (64318, 64318), + (64320, 64321), (64323, 64324), (64326, 64433), (64467, 64605), + (64612, 64829), (64848, 64911), (64914, 64967), (65008, 65017), + (65024, 65039), (65056, 65071), (65075, 65076), (65101, 65103), + (65137, 65137), (65139, 65139), (65143, 65143), (65145, 65145), + (65147, 65147), (65149, 65149), (65151, 65276), (65296, 65305), + (65313, 65338), (65343, 65343), (65345, 65370), (65381, 65470), + (65474, 65479), (65482, 65487), (65490, 65495), (65498, 65500), + (65536, 65547), (65549, 65574), (65576, 65594), (65596, 65597), + (65599, 65613), (65616, 65629), (65664, 65786), (65856, 65908), + (66045, 66045), (66176, 66204), (66208, 66256), (66272, 66272), + (66304, 66335), (66349, 66378), (66384, 66426), (66432, 66461), + (66464, 66499), (66504, 66511), (66513, 66517), (66560, 66717), + (66720, 66729), (66736, 66771), (66776, 66811), (66816, 66855), + (66864, 66915), (66928, 66938), (66940, 66954), (66956, 66962), + (66964, 66965), (66967, 66977), (66979, 66993), (66995, 67001), + (67003, 67004), (67008, 67059), (67072, 67382), (67392, 67413), + (67424, 67431), (67456, 67461), (67463, 67504), (67506, 67514), + (67584, 67589), (67592, 67592), (67594, 67637), (67639, 67640), + (67644, 67644), (67647, 67669), (67680, 67702), (67712, 67742), + (67808, 67826), (67828, 67829), (67840, 67861), (67872, 67897), + (67904, 67929), (67968, 68023), (68030, 68031), (68096, 68099), + (68101, 68102), (68108, 68115), (68117, 68119), (68121, 68149), + (68152, 68154), (68159, 68159), (68192, 68220), (68224, 68252), + (68288, 68295), (68297, 68326), (68352, 68405), (68416, 68437), + (68448, 68466), (68480, 68497), (68608, 68680), (68736, 68786), + (68800, 68850), (68864, 68903), (68912, 68921), (68928, 68965), + (68969, 68973), (68975, 68997), (69248, 69289), (69291, 69292), + (69296, 69297), (69314, 69319), (69370, 69404), (69415, 69415), + (69424, 69456), (69488, 69509), (69552, 69572), (69600, 69622), + (69632, 69702), (69734, 69749), (69759, 69818), (69826, 69826), + (69840, 69864), (69872, 69881), (69888, 69940), (69942, 69951), + (69956, 69959), (69968, 70003), (70006, 70006), (70016, 70084), + (70089, 70092), (70094, 70106), (70108, 70108), (70144, 70161), + (70163, 70199), (70206, 70209), (70272, 70278), (70280, 70280), + (70282, 70285), (70287, 70301), (70303, 70312), (70320, 70378), + (70384, 70393), (70400, 70403), (70405, 70412), (70415, 70416), + (70419, 70440), (70442, 70448), (70450, 70451), (70453, 70457), + (70459, 70468), (70471, 70472), (70475, 70477), (70480, 70480), + (70487, 70487), (70493, 70499), (70502, 70508), (70512, 70516), + (70528, 70537), (70539, 70539), (70542, 70542), (70544, 70581), + (70583, 70592), (70594, 70594), (70597, 70597), (70599, 70602), + (70604, 70611), (70625, 70626), (70656, 70730), (70736, 70745), + (70750, 70753), (70784, 70853), (70855, 70855), (70864, 70873), + (71040, 71093), (71096, 71104), (71128, 71133), (71168, 71232), + (71236, 71236), (71248, 71257), (71296, 71352), (71360, 71369), + (71376, 71395), (71424, 71450), (71453, 71467), (71472, 71481), + (71488, 71494), (71680, 71738), (71840, 71913), (71935, 71942), + (71945, 71945), (71948, 71955), (71957, 71958), (71960, 71989), + (71991, 71992), (71995, 72003), (72016, 72025), (72096, 72103), + (72106, 72151), (72154, 72161), (72163, 72164), (72192, 72254), + (72263, 72263), (72272, 72345), (72349, 72349), (72368, 72440), + (72544, 72551), (72640, 72672), (72688, 72697), (72704, 72712), + (72714, 72758), (72760, 72768), (72784, 72793), (72818, 72847), + (72850, 72871), (72873, 72886), (72960, 72966), (72968, 72969), + (72971, 73014), (73018, 73018), (73020, 73021), (73023, 73031), + (73040, 73049), (73056, 73061), (73063, 73064), (73066, 73102), + (73104, 73105), (73107, 73112), (73120, 73129), (73136, 73179), + (73184, 73193), (73440, 73462), (73472, 73488), (73490, 73530), + (73534, 73538), (73552, 73562), (73648, 73648), (73728, 74649), + (74752, 74862), (74880, 75075), (77712, 77808), (77824, 78895), + (78912, 78933), (78944, 82938), (82944, 83526), (90368, 90425), + (92160, 92728), (92736, 92766), (92768, 92777), (92784, 92862), + (92864, 92873), (92880, 92909), (92912, 92916), (92928, 92982), + (92992, 92995), (93008, 93017), (93027, 93047), (93053, 93071), + (93504, 93548), (93552, 93561), (93760, 93823), (93856, 93880), + (93883, 93907), (93952, 94026), (94031, 94087), (94095, 94111), + (94176, 94177), (94179, 94180), (94192, 94198), (94208, 101589), + (101631, 101662), (101760, 101874), (110576, 110579), (110581, 110587), + (110589, 110590), (110592, 110882), (110898, 110898), (110928, 110930), + (110933, 110933), (110948, 110951), (110960, 111355), (113664, 113770), + (113776, 113788), (113792, 113800), (113808, 113817), (113821, 113822), + (118000, 118009), (118528, 118573), (118576, 118598), (119141, 119145), + (119149, 119154), (119163, 119170), (119173, 119179), (119210, 119213), + (119362, 119364), (119808, 119892), (119894, 119964), (119966, 119967), + (119970, 119970), (119973, 119974), (119977, 119980), (119982, 119993), + (119995, 119995), (119997, 120003), (120005, 120069), (120071, 120074), + (120077, 120084), (120086, 120092), (120094, 120121), (120123, 120126), + (120128, 120132), (120134, 120134), (120138, 120144), (120146, 120485), + (120488, 120512), (120514, 120538), (120540, 120570), (120572, 120596), + (120598, 120628), (120630, 120654), (120656, 120686), (120688, 120712), + (120714, 120744), (120746, 120770), (120772, 120779), (120782, 120831), + (121344, 121398), (121403, 121452), (121461, 121461), (121476, 121476), + (121499, 121503), (121505, 121519), (122624, 122654), (122661, 122666), + (122880, 122886), (122888, 122904), (122907, 122913), (122915, 122916), + (122918, 122922), (122928, 122989), (123023, 123023), (123136, 123180), + (123184, 123197), (123200, 123209), (123214, 123214), (123536, 123566), + (123584, 123641), (124112, 124153), (124368, 124410), (124608, 124638), + (124640, 124661), (124670, 124671), (124896, 124902), (124904, 124907), + (124909, 124910), (124912, 124926), (124928, 125124), (125136, 125142), + (125184, 125259), (125264, 125273), (126464, 126467), (126469, 126495), + (126497, 126498), (126500, 126500), (126503, 126503), (126505, 126514), + (126516, 126519), (126521, 126521), (126523, 126523), (126530, 126530), + (126535, 126535), (126537, 126537), (126539, 126539), (126541, 126543), + (126545, 126546), (126548, 126548), (126551, 126551), (126553, 126553), + (126555, 126555), (126557, 126557), (126559, 126559), (126561, 126562), + (126564, 126564), (126567, 126570), (126572, 126578), (126580, 126583), + (126585, 126588), (126590, 126590), (126592, 126601), (126603, 126619), + (126625, 126627), (126629, 126633), (126635, 126651), (130032, 130041), + (131072, 173791), (173824, 178205), (178208, 183981), (183984, 191456), + (191472, 192093), (194560, 195101), (196608, 201546), (201552, 210041), + (917760, 917999), +]; + +pub const XID_START: &'static [(u32, u32)] = &[ + (65, 90), (97, 122), (170, 170), (181, 181), (186, 186), (192, 214), + (216, 246), (248, 705), (710, 721), (736, 740), (748, 748), (750, 750), + (880, 884), (886, 887), (891, 893), (895, 895), (902, 902), (904, 906), + (908, 908), (910, 929), (931, 1013), (1015, 1153), (1162, 1327), + (1329, 1366), (1369, 1369), (1376, 1416), (1488, 1514), (1519, 1522), + (1568, 1610), (1646, 1647), (1649, 1747), (1749, 1749), (1765, 1766), + (1774, 1775), (1786, 1788), (1791, 1791), (1808, 1808), (1810, 1839), + (1869, 1957), (1969, 1969), (1994, 2026), (2036, 2037), (2042, 2042), + (2048, 2069), (2074, 2074), (2084, 2084), (2088, 2088), (2112, 2136), + (2144, 2154), (2160, 2183), (2185, 2191), (2208, 2249), (2308, 2361), + (2365, 2365), (2384, 2384), (2392, 2401), (2417, 2432), (2437, 2444), + (2447, 2448), (2451, 2472), (2474, 2480), (2482, 2482), (2486, 2489), + (2493, 2493), (2510, 2510), (2524, 2525), (2527, 2529), (2544, 2545), + (2556, 2556), (2565, 2570), (2575, 2576), (2579, 2600), (2602, 2608), + (2610, 2611), (2613, 2614), (2616, 2617), (2649, 2652), (2654, 2654), + (2674, 2676), (2693, 2701), (2703, 2705), (2707, 2728), (2730, 2736), + (2738, 2739), (2741, 2745), (2749, 2749), (2768, 2768), (2784, 2785), + (2809, 2809), (2821, 2828), (2831, 2832), (2835, 2856), (2858, 2864), + (2866, 2867), (2869, 2873), (2877, 2877), (2908, 2909), (2911, 2913), + (2929, 2929), (2947, 2947), (2949, 2954), (2958, 2960), (2962, 2965), + (2969, 2970), (2972, 2972), (2974, 2975), (2979, 2980), (2984, 2986), + (2990, 3001), (3024, 3024), (3077, 3084), (3086, 3088), (3090, 3112), + (3114, 3129), (3133, 3133), (3160, 3162), (3164, 3165), (3168, 3169), + (3200, 3200), (3205, 3212), (3214, 3216), (3218, 3240), (3242, 3251), + (3253, 3257), (3261, 3261), (3292, 3294), (3296, 3297), (3313, 3314), + (3332, 3340), (3342, 3344), (3346, 3386), (3389, 3389), (3406, 3406), + (3412, 3414), (3423, 3425), (3450, 3455), (3461, 3478), (3482, 3505), + (3507, 3515), (3517, 3517), (3520, 3526), (3585, 3632), (3634, 3634), + (3648, 3654), (3713, 3714), (3716, 3716), (3718, 3722), (3724, 3747), + (3749, 3749), (3751, 3760), (3762, 3762), (3773, 3773), (3776, 3780), + (3782, 3782), (3804, 3807), (3840, 3840), (3904, 3911), (3913, 3948), + (3976, 3980), (4096, 4138), (4159, 4159), (4176, 4181), (4186, 4189), + (4193, 4193), (4197, 4198), (4206, 4208), (4213, 4225), (4238, 4238), + (4256, 4293), (4295, 4295), (4301, 4301), (4304, 4346), (4348, 4680), + (4682, 4685), (4688, 4694), (4696, 4696), (4698, 4701), (4704, 4744), + (4746, 4749), (4752, 4784), (4786, 4789), (4792, 4798), (4800, 4800), + (4802, 4805), (4808, 4822), (4824, 4880), (4882, 4885), (4888, 4954), + (4992, 5007), (5024, 5109), (5112, 5117), (5121, 5740), (5743, 5759), + (5761, 5786), (5792, 5866), (5870, 5880), (5888, 5905), (5919, 5937), + (5952, 5969), (5984, 5996), (5998, 6000), (6016, 6067), (6103, 6103), + (6108, 6108), (6176, 6264), (6272, 6312), (6314, 6314), (6320, 6389), + (6400, 6430), (6480, 6509), (6512, 6516), (6528, 6571), (6576, 6601), + (6656, 6678), (6688, 6740), (6823, 6823), (6917, 6963), (6981, 6988), + (7043, 7072), (7086, 7087), (7098, 7141), (7168, 7203), (7245, 7247), + (7258, 7293), (7296, 7306), (7312, 7354), (7357, 7359), (7401, 7404), + (7406, 7411), (7413, 7414), (7418, 7418), (7424, 7615), (7680, 7957), + (7960, 7965), (7968, 8005), (8008, 8013), (8016, 8023), (8025, 8025), + (8027, 8027), (8029, 8029), (8031, 8061), (8064, 8116), (8118, 8124), + (8126, 8126), (8130, 8132), (8134, 8140), (8144, 8147), (8150, 8155), + (8160, 8172), (8178, 8180), (8182, 8188), (8305, 8305), (8319, 8319), + (8336, 8348), (8450, 8450), (8455, 8455), (8458, 8467), (8469, 8469), + (8472, 8477), (8484, 8484), (8486, 8486), (8488, 8488), (8490, 8505), + (8508, 8511), (8517, 8521), (8526, 8526), (8544, 8584), (11264, 11492), + (11499, 11502), (11506, 11507), (11520, 11557), (11559, 11559), + (11565, 11565), (11568, 11623), (11631, 11631), (11648, 11670), + (11680, 11686), (11688, 11694), (11696, 11702), (11704, 11710), + (11712, 11718), (11720, 11726), (11728, 11734), (11736, 11742), + (12293, 12295), (12321, 12329), (12337, 12341), (12344, 12348), + (12353, 12438), (12445, 12447), (12449, 12538), (12540, 12543), + (12549, 12591), (12593, 12686), (12704, 12735), (12784, 12799), + (13312, 19903), (19968, 42124), (42192, 42237), (42240, 42508), + (42512, 42527), (42538, 42539), (42560, 42606), (42623, 42653), + (42656, 42735), (42775, 42783), (42786, 42888), (42891, 42972), + (42993, 43009), (43011, 43013), (43015, 43018), (43020, 43042), + (43072, 43123), (43138, 43187), (43250, 43255), (43259, 43259), + (43261, 43262), (43274, 43301), (43312, 43334), (43360, 43388), + (43396, 43442), (43471, 43471), (43488, 43492), (43494, 43503), + (43514, 43518), (43520, 43560), (43584, 43586), (43588, 43595), + (43616, 43638), (43642, 43642), (43646, 43695), (43697, 43697), + (43701, 43702), (43705, 43709), (43712, 43712), (43714, 43714), + (43739, 43741), (43744, 43754), (43762, 43764), (43777, 43782), + (43785, 43790), (43793, 43798), (43808, 43814), (43816, 43822), + (43824, 43866), (43868, 43881), (43888, 44002), (44032, 55203), + (55216, 55238), (55243, 55291), (63744, 64109), (64112, 64217), + (64256, 64262), (64275, 64279), (64285, 64285), (64287, 64296), + (64298, 64310), (64312, 64316), (64318, 64318), (64320, 64321), + (64323, 64324), (64326, 64433), (64467, 64605), (64612, 64829), + (64848, 64911), (64914, 64967), (65008, 65017), (65137, 65137), + (65139, 65139), (65143, 65143), (65145, 65145), (65147, 65147), + (65149, 65149), (65151, 65276), (65313, 65338), (65345, 65370), + (65382, 65437), (65440, 65470), (65474, 65479), (65482, 65487), + (65490, 65495), (65498, 65500), (65536, 65547), (65549, 65574), + (65576, 65594), (65596, 65597), (65599, 65613), (65616, 65629), + (65664, 65786), (65856, 65908), (66176, 66204), (66208, 66256), + (66304, 66335), (66349, 66378), (66384, 66421), (66432, 66461), + (66464, 66499), (66504, 66511), (66513, 66517), (66560, 66717), + (66736, 66771), (66776, 66811), (66816, 66855), (66864, 66915), + (66928, 66938), (66940, 66954), (66956, 66962), (66964, 66965), + (66967, 66977), (66979, 66993), (66995, 67001), (67003, 67004), + (67008, 67059), (67072, 67382), (67392, 67413), (67424, 67431), + (67456, 67461), (67463, 67504), (67506, 67514), (67584, 67589), + (67592, 67592), (67594, 67637), (67639, 67640), (67644, 67644), + (67647, 67669), (67680, 67702), (67712, 67742), (67808, 67826), + (67828, 67829), (67840, 67861), (67872, 67897), (67904, 67929), + (67968, 68023), (68030, 68031), (68096, 68096), (68112, 68115), + (68117, 68119), (68121, 68149), (68192, 68220), (68224, 68252), + (68288, 68295), (68297, 68324), (68352, 68405), (68416, 68437), + (68448, 68466), (68480, 68497), (68608, 68680), (68736, 68786), + (68800, 68850), (68864, 68899), (68938, 68965), (68975, 68997), + (69248, 69289), (69296, 69297), (69314, 69319), (69376, 69404), + (69415, 69415), (69424, 69445), (69488, 69505), (69552, 69572), + (69600, 69622), (69635, 69687), (69745, 69746), (69749, 69749), + (69763, 69807), (69840, 69864), (69891, 69926), (69956, 69956), + (69959, 69959), (69968, 70002), (70006, 70006), (70019, 70066), + (70081, 70084), (70106, 70106), (70108, 70108), (70144, 70161), + (70163, 70187), (70207, 70208), (70272, 70278), (70280, 70280), + (70282, 70285), (70287, 70301), (70303, 70312), (70320, 70366), + (70405, 70412), (70415, 70416), (70419, 70440), (70442, 70448), + (70450, 70451), (70453, 70457), (70461, 70461), (70480, 70480), + (70493, 70497), (70528, 70537), (70539, 70539), (70542, 70542), + (70544, 70581), (70583, 70583), (70609, 70609), (70611, 70611), + (70656, 70708), (70727, 70730), (70751, 70753), (70784, 70831), + (70852, 70853), (70855, 70855), (71040, 71086), (71128, 71131), + (71168, 71215), (71236, 71236), (71296, 71338), (71352, 71352), + (71424, 71450), (71488, 71494), (71680, 71723), (71840, 71903), + (71935, 71942), (71945, 71945), (71948, 71955), (71957, 71958), + (71960, 71983), (71999, 71999), (72001, 72001), (72096, 72103), + (72106, 72144), (72161, 72161), (72163, 72163), (72192, 72192), + (72203, 72242), (72250, 72250), (72272, 72272), (72284, 72329), + (72349, 72349), (72368, 72440), (72640, 72672), (72704, 72712), + (72714, 72750), (72768, 72768), (72818, 72847), (72960, 72966), + (72968, 72969), (72971, 73008), (73030, 73030), (73056, 73061), + (73063, 73064), (73066, 73097), (73112, 73112), (73136, 73179), + (73440, 73458), (73474, 73474), (73476, 73488), (73490, 73523), + (73648, 73648), (73728, 74649), (74752, 74862), (74880, 75075), + (77712, 77808), (77824, 78895), (78913, 78918), (78944, 82938), + (82944, 83526), (90368, 90397), (92160, 92728), (92736, 92766), + (92784, 92862), (92880, 92909), (92928, 92975), (92992, 92995), + (93027, 93047), (93053, 93071), (93504, 93548), (93760, 93823), + (93856, 93880), (93883, 93907), (93952, 94026), (94032, 94032), + (94099, 94111), (94176, 94177), (94179, 94179), (94194, 94198), + (94208, 101589), (101631, 101662), (101760, 101874), (110576, 110579), + (110581, 110587), (110589, 110590), (110592, 110882), (110898, 110898), + (110928, 110930), (110933, 110933), (110948, 110951), (110960, 111355), + (113664, 113770), (113776, 113788), (113792, 113800), (113808, 113817), + (119808, 119892), (119894, 119964), (119966, 119967), (119970, 119970), + (119973, 119974), (119977, 119980), (119982, 119993), (119995, 119995), + (119997, 120003), (120005, 120069), (120071, 120074), (120077, 120084), + (120086, 120092), (120094, 120121), (120123, 120126), (120128, 120132), + (120134, 120134), (120138, 120144), (120146, 120485), (120488, 120512), + (120514, 120538), (120540, 120570), (120572, 120596), (120598, 120628), + (120630, 120654), (120656, 120686), (120688, 120712), (120714, 120744), + (120746, 120770), (120772, 120779), (122624, 122654), (122661, 122666), + (122928, 122989), (123136, 123180), (123191, 123197), (123214, 123214), + (123536, 123565), (123584, 123627), (124112, 124139), (124368, 124397), + (124400, 124400), (124608, 124638), (124640, 124642), (124644, 124645), + (124647, 124653), (124656, 124660), (124670, 124671), (124896, 124902), + (124904, 124907), (124909, 124910), (124912, 124926), (124928, 125124), + (125184, 125251), (125259, 125259), (126464, 126467), (126469, 126495), + (126497, 126498), (126500, 126500), (126503, 126503), (126505, 126514), + (126516, 126519), (126521, 126521), (126523, 126523), (126530, 126530), + (126535, 126535), (126537, 126537), (126539, 126539), (126541, 126543), + (126545, 126546), (126548, 126548), (126551, 126551), (126553, 126553), + (126555, 126555), (126557, 126557), (126559, 126559), (126561, 126562), + (126564, 126564), (126567, 126570), (126572, 126578), (126580, 126583), + (126585, 126588), (126590, 126590), (126592, 126601), (126603, 126619), + (126625, 126627), (126629, 126633), (126635, 126651), (131072, 173791), + (173824, 178205), (178208, 183981), (183984, 191456), (191472, 192093), + (194560, 195101), (196608, 201546), (201552, 210041), +]; diff --git a/vendor/unicode-ident/tests/trie/mod.rs b/vendor/unicode-ident/tests/trie/mod.rs new file mode 100644 index 00000000000000..3e31c5cc5789c8 --- /dev/null +++ b/vendor/unicode-ident/tests/trie/mod.rs @@ -0,0 +1,7 @@ +#![allow(clippy::module_inception)] + +#[allow(dead_code, clippy::redundant_static_lifetimes, clippy::unreadable_literal)] +#[rustfmt::skip] +mod trie; + +pub(crate) use self::trie::*; diff --git a/vendor/unicode-ident/tests/trie/trie.rs b/vendor/unicode-ident/tests/trie/trie.rs new file mode 100644 index 00000000000000..0cca9ecce648a8 --- /dev/null +++ b/vendor/unicode-ident/tests/trie/trie.rs @@ -0,0 +1,453 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate property-bool UCD --include XID_Start,XID_Continue --trie-set +// +// Unicode version: 17.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static ::ucd_trie::TrieSet)] = &[ + ("XID_Continue", XID_CONTINUE), ("XID_Start", XID_START), +]; + +pub const XID_CONTINUE: &'static ::ucd_trie::TrieSet = &::ucd_trie::TrieSet { + tree1_level1: &[ + 0x3FF000000000000, 0x7FFFFFE87FFFFFE, 0x4A0040000000000, + 0xFF7FFFFFFF7FFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x501F0003FFC3, + 0xFFFFFFFFFFFFFFFF, 0xB8DFFFFFFFFFFFFF, 0xFFFFFFFBFFFFD7C0, + 0xFFBFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFCFB, 0xFFFFFFFFFFFFFFFF, 0xFFFEFFFFFFFFFFFF, + 0xFFFFFFFF027FFFFF, 0xBFFFFFFFFFFE01FF, 0x787FFFFFF00B6, + 0xFFFFFFFF07FF0000, 0xFFFFC3FFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, + 0x9FFFFDFF9FEFFFFF, 0xFFFFFFFFFFFF0000, 0xFFFFFFFFFFFFE7FF, + 0x3FFFFFFFFFFFF, 0x243FFFFFFFFFFFFF, + ], + tree2_level1: &[ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 4, 32, 33, 34, 4, 4, 4, 4, 4, + 35, 36, 37, 38, 39, 40, 41, 42, 4, 4, 4, 4, 4, 4, 4, 4, 43, 44, 45, 46, + 47, 4, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 4, 61, 4, 62, + 63, 64, 65, 66, 4, 4, 4, 4, 4, 4, 4, 4, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, + 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, + 78, 78, 78, 78, 78, 78, 78, 78, 4, 4, 4, 79, 80, 81, 82, 83, 78, 78, 78, + 78, 78, 78, 78, 78, 84, 42, 85, 4, 86, 4, 87, 88, 78, 78, 78, 78, 78, 78, + 78, 78, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 78, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 89, 90, 4, 4, 4, 4, 91, 92, 4, 93, 94, 4, 95, 96, 97, 62, 4, + 98, 99, 100, 4, 101, 102, 103, 4, 104, 105, 106, 4, 107, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 108, 109, 78, 78, + 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, + 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, + 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, + 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, + 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, + 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, + 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, + 78, 78, 78, 78, 4, 4, 4, 4, 4, 99, 4, 110, 111, 112, 93, 113, 4, 114, 4, + 4, 115, 116, 117, 118, 119, 120, 4, 121, 122, 123, 124, 125, + ], + tree2_level2: &[ + 0x3FFFFFFFFFFF, 0xFFFF07FF0FFFFFFF, 0xFFFFFFFFFF80FEFF, + 0xFFFFFFFBFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFEFFCFFFFFFFFF, + 0xF3C5FDFFFFF99FEF, 0x5003FFCFB080799F, 0xD36DFDFFFFF987EE, + 0x3FFFC05E023987, 0xF3EDFDFFFFFBBFEE, 0xFE00FFCF00013BBF, + 0xF3EDFDFFFFF99FEE, 0x2FFCFB0E0399F, 0xC3FFC718D63DC7EC, 0xFFC000813DC7, + 0xF3FFFDFFFFFDDFFF, 0xFFCF37603DDF, 0xF3EFFDFFFFFDDFEF, 0xEFFCF70603DDF, + 0xFFFFFFFFFFFDDFFF, 0xFC00FFCF80F07DDF, 0x2FFBFFFFFC7FFFEE, + 0xCFFC0FF5F847F, 0x7FFFFFFFFFFFFFE, 0x3FF7FFF, 0x3FFFFFAFFFFFF7D6, + 0xF3FF7F5F, 0xC2A003FF03000001, 0xFFFE1FFFFFFFFEFF, 0x1FFFFFFFFEFFFFDF, + 0x40, 0xFFFFFFFFFFFF03FF, 0xFFFFFFFF3FFFFFFF, 0xF7FFFFFFFFFF20BF, + 0xFFFFFFFF3D7F3DFF, 0x7F3DFFFFFFFF3DFF, 0xFFFFFFFFFF7FFF3D, + 0xFFFFFFFFFF3DFFFF, 0x3FE00E7FFFFFF, 0xFFFFFFFF0000FFFF, + 0x3F3FFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFE, 0xFFFF9FFFFFFFFFFF, + 0xFFFFFFFF07FFFFFE, 0x1FFC7FFFFFFFFFF, 0x1FFFFF803FFFFF, 0xDDFFF000FFFFF, + 0x3FF308FFFFF, 0xFFFFFFFF03FFB800, 0x1FFFFFFFFFFFFFF, 0xFFFF07FFFFFFFFFF, + 0x3FFFFFFFFFFFFF, 0xFFF0FFF7FFFFFFF, 0x1F3FFFFFFFFFC0, 0xFFFF0FFFFFFFFFFF, + 0x7FF03FF, 0xFFFFFFFF0FFFFFFF, 0x9FFFFFFF7FFFFFFF, 0xBFFF008003FF03FF, + 0xFFF3FFFFFFF, 0xFF80003FF1FFF, 0xFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFF, + 0x3FFFFFFFFFFFE3FF, 0xE7FFFFFFFFFF07FF, 0x7FFFFFFFFF70000, + 0xFFFFFFFF3F3FFFFF, 0x3FFFFFFFAAFF3F3F, 0x5FDFFFFFFFFFFFFF, + 0x1FDC1FFF0FCF1FDC, 0x8000000000003000, 0x8002000000100001, 0x1FFF0000, + 0x1FFE21FFF0000, 0xF3FFFD503F2FFC84, 0xFFFFFFFF000043E0, 0x1FF, 0, + 0xFF81FFFFFFFFF, 0xFFFF20BFFFFFFFFF, 0x800080FFFFFFFFFF, + 0x7F7F7F7F007FFFFF, 0xFFFFFFFF7F7F7F7F, 0x1F3EFFFE000000E0, + 0xFFFFFFFEE67FFFFF, 0xFFFEFFFFFFFFFFE0, 0xFFFFFFFF00007FFF, + 0xFFFF000000000000, 0x1FFF, 0x3FFFFFFFFFFF0000, 0xFFFFFFF1FFF, + 0xBFF0FFFFFFFFFFFF, 0x3FFFFFFFFFFFF, 0xFFFFFFFCFF800000, + 0xFFFFFFFFFFFFF9FF, 0xFFFE00001FFFFFFF, 0x10FFFFFFFFFF, + 0xE8FFFFFF03FF003F, 0xFFFF3FFFFFFFFFFF, 0x1FFFFFFF000FFFFF, + 0x7FFFFFFF03FF8001, 0x7FFFFFFFFFFFFF, 0xFC7FFFFF03FF3FFF, + 0x7CFFFF38000007, 0xFFFF7F7F007E7E7E, 0xFFFF03FFF7FFFFFF, + 0x3FF37FFFFFFFFFF, 0xFFFF000FFFFFFFFF, 0xFFFFFFFFFFFF87F, 0x3FFFFFF, + 0x5F7FFDFFE0F8007F, 0xFFFFFFFFFFFFFFDB, 0xFFFFFFFFFFF80000, + 0xFFFFFFF03FFFFFFF, 0x3FFFFFFFFFFFFFFF, 0xFFFFFFFFFFFF0000, + 0xFFFFFFFFFFFCFFFF, 0x3FF0000000000FF, 0x18FFFF0000FFFF, + 0xAA8A00000000E000, 0x1FFFFFFFFFFFFFFF, 0x87FFFFFE03FF0000, + 0xFFFFFFE007FFFFFE, 0x7FFFFFFFFFFFFFFF, 0x1CFCFCFC, + ], + tree3_level1: &[ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 5, 9, 10, 11, 12, 13, 14, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 15, 16, 17, 7, 18, 19, 7, 20, 7, 21, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 22, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + ], + tree3_level2: &[ + 0, 1, 2, 3, 4, 5, 4, 6, 4, 4, 7, 8, 9, 10, 11, 12, 2, 2, 13, 14, 15, 16, + 17, 18, 2, 2, 2, 2, 19, 20, 21, 4, 22, 23, 24, 25, 26, 27, 28, 4, 29, 30, + 31, 32, 33, 34, 35, 4, 2, 36, 37, 37, 38, 39, 40, 4, 4, 4, 41, 42, 43, 44, + 45, 46, 2, 47, 3, 48, 49, 50, 2, 51, 52, 53, 54, 55, 56, 57, 58, 59, 2, + 60, 2, 61, 4, 4, 62, 63, 2, 64, 65, 66, 67, 68, 4, 4, 3, 4, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 65, 4, 78, 4, 79, 80, 81, 82, 4, 83, 84, 85, 86, + 4, 4, 4, 87, 88, 89, 90, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 27, + 4, 2, 91, 2, 2, 2, 92, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 93, + 94, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 95, 96, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 68, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 97, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 65, 98, 75, 99, 19, 100, 101, 4, 4, 4, + 4, 4, 4, 102, 4, 4, 4, 2, 103, 104, 2, 105, 106, 107, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 108, 24, 4, 2, 37, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 109, 2, 2, 2, 2, 110, 111, 2, 2, 2, 2, 2, + 112, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 113, 114, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 115, 4, 4, 4, 4, 4, 4, 4, 4, 116, 68, 4, 4, 4, 4, 4, + 4, 4, 117, 118, 4, 4, 119, 4, 4, 4, 4, 4, 4, 2, 120, 121, 122, 123, 124, + 2, 2, 2, 2, 125, 126, 127, 128, 129, 130, 4, 4, 4, 4, 4, 4, 4, 4, 131, + 132, 133, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 134, 4, 4, 4, + 135, 136, 137, 4, 138, 139, 4, 4, 4, 4, 140, 97, 4, 4, 4, 4, 4, 4, 4, 141, + 4, 4, 4, 142, 4, 4, 4, 143, 4, 4, 4, 144, 2, 2, 2, 145, 2, 146, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 147, 148, 149, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 115, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 150, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 11, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 116, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 151, 2, 2, 2, 2, 2, 2, 2, 2, 2, 152, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 152, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 153, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 97, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 2, 2, 2, 95, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + ], + tree3_level3: &[ + 0xB7FFFF7FFFFFEFFF, 0x3FFF3FFF, 0xFFFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFF, 0, + 0x1FFFFFFFFFFFFF, 0x2000000000000000, 0xFFFFFFFF1FFFFFFF, 0x10001FFFF, + 0xFFFFE000FFFFFFFF, 0x7FFFFFFFFFF07FF, 0xFFFFFFFF3FFFFFFF, 0x3EFF0F, + 0xFFFF03FF3FFFFFFF, 0xFFFFFFFFF0FFFFF, 0xFFFF00FFFFFFFFFF, + 0xF7FF000FFFFFFFFF, 0x1BFBFFFBFFB7F7FF, 0xFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFF, + 0xFF003FFFFF, 0x7FDFFFFFFFFFFBF, 0x91BFFFFFFFFFFD3F, 0x7FFFFF003FFFFF, + 0x7FFFFFFF, 0x37FFFF00000000, 0x3FFFFFF003FFFFF, 0x3FFFFFF, + 0xC0FFFFFFFFFFFFFF, 0x873FFFFFFEEFF06F, 0x1FFFFFFF00000000, 0x1FFFFFFF, + 0x7FFFFFFEFF, 0x3FFFFFFFFFFFFF, 0x7FFFF003FFFFF, 0x3FFFF, 0x1FF, + 0x7FFFFFFFFFFFF, 0x3FF00FFFFFFFFFF, 0xFFFFBE3FFFFFFFFF, 0x3F, + 0x31BFFFFFFFFFF, 0xFC000000000000FC, 0xFFFF00801FFFFFFF, + 0xFFFF00000001FFFF, 0xFFFF00000000003F, 0x7FFFFF0000001F, + 0x803FFFC00000007F, 0x3FF01FFFFFF0004, 0xFFDFFFFFFFFFFFFF, + 0x4FFFFFFFFF00F0, 0x17FFDE1F, 0xC0FFFFFFFFFBFFFF, 0x3, 0xFFFF01FFBFFFBD7F, + 0x3FF07FFFFFFFFFF, 0xFBEDFDFFFFF99FEF, 0x1F1FCFE081399F, + 0xFFBFFFFFFFFF4BFF, 0x6000FF7A5, 0x3C3FF07FF, 0x3FF00BF, + 0xFF3FFFFFFFFFFFFF, 0x3F000001, 0x3FF0011, 0x1FFFFFFFFFFFFFF, 0xFFFFF03FF, + 0x3FF0FFFE7FFFFFF, 0x7F, 0xFFFFFFFF00000000, 0x800003FFFFFFFFFF, + 0xF9BFFFFFFF6FF27F, 0x3FF000F, 0xFFFFFCFF00000000, 0x1BFCFFFFFF, + 0x7FFFFFFFFFFFFFFF, 0xFFFFFFFFFFFF0080, 0xFFFF000023FFFFFF, 0xFF00000000, + 0x3FF0001FFFFFFFF, 0xFF7FFFFFFFFFFDFF, 0xFFFC000003FF0001, + 0x7FFEFFFFFCFFFF, 0xB47FFFFFFFFFFB7F, 0xFFFFFDBF03FF00FF, + 0xFFFF03FF01FB7FFF, 0x3FF0FFFFFFF, 0x7FFFFF00000000, 0xC7FFFFFFFFFDFFFF, + 0x7FF0007, 0x1000000000000, 0x7FFFFFFFFFFF, 0xF, 0xFFFFFFFFFFFF0000, + 0x1FFFFFFFFFFFF, 0xFFFFFFFFFFFF, 0xFFFFFFFF003FFFFF, 0x3FFFFFFFFFFFFFF, + 0xFFFF03FF7FFFFFFF, 0x1F3FFFFFFF03FF, 0xE0FFFFF803FF000F, 0xFFFF, + 0x3FF1FFFFFFFFFFF, 0xF9FFFFFF00000000, 0xFFFFF, 0xFFFFFFFFFFFF87FF, + 0xFFFF80FF, 0x7F001B00000000, 0x80000000003FFFFF, 0x6FEF000000000000, + 0x40007FFFFFFFF, 0xFFFF00F000270000, 0xFFFFFFFFFFFFFFF, + 0x1FFF07FFFFFFFFFF, 0x63FF01FF, 0x3FF000000000000, 0xFFFF3FFFFFFFFFFF, + 0xF807E3E000000000, 0x3C0000000FE7, 0x1C, 0xFFFFFFFFFFDFFFFF, + 0xEBFFDE64DFFFFFFF, 0xFFFFFFFFFFFFFFEF, 0x7BFFFFFFDFDFE7BF, + 0xFFFFFFFFFFFDFC5F, 0xFFFFFF3FFFFFFFFF, 0xF7FFFFFFF7FFFFFD, + 0xFFDFFFFFFFDFFFFF, 0xFFFF7FFFFFFF7FFF, 0xFFFFFDFFFFFFFDFF, + 0xFFFFFFFFFFFFCFF7, 0xF87FFFFFFFFFFFFF, 0x201FFFFFFFFFFF, 0xFFFEF8000010, + 0x7E07FFFFFFF, 0xFFFF07DBF9FFFF7F, 0x3FFFFFFFFFFF, 0x8000, + 0x3FFF1FFFFFFFFFFF, 0x43FF, 0x7FFFFFFF0000, 0x3FFFFFFFFFF0000, + 0x7FFFFFFFFFF0000, 0xC03FFFFF7FFFFFFF, 0x7FFF6F7F00000000, 0x7F001F, + 0x3FF0FFF, 0xAF7FE96FFFFFFEF, 0x5EF7F796AA96EA84, 0xFFFFBEE0FFFFBFF, + 0xFFFFFFFF, 0xFFFF0001FFFFFFFF, 0x3FFFFFFF, 0xFFFFFFFFFFFF07FF, + ], +}; + +pub const XID_START: &'static ::ucd_trie::TrieSet = &::ucd_trie::TrieSet { + tree1_level1: &[ + 0, 0x7FFFFFE07FFFFFE, 0x420040000000000, 0xFF7FFFFFFF7FFFFF, + 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, 0x501F0003FFC3, 0, 0xB8DF000000000000, + 0xFFFFFFFBFFFFD740, 0xFFBFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFC03, 0xFFFFFFFFFFFFFFFF, + 0xFFFEFFFFFFFFFFFF, 0xFFFFFFFF027FFFFF, 0x1FF, 0x787FFFFFF0000, + 0xFFFFFFFF00000000, 0xFFFEC000000007FF, 0xFFFFFFFFFFFFFFFF, + 0x9C00C060002FFFFF, 0xFFFFFFFD0000, 0xFFFFFFFFFFFFE000, 0x2003FFFFFFFFF, + 0x43007FFFFFFFC00, + ], + tree2_level1: &[ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 35, 35, + 35, 35, 36, 37, 38, 39, 40, 41, 42, 43, 35, 35, 35, 35, 35, 35, 35, 35, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 3, 58, 59, 60, 30, + 61, 62, 63, 64, 65, 66, 67, 68, 35, 35, 35, 30, 35, 35, 35, 35, 69, 70, + 71, 72, 30, 73, 74, 30, 75, 76, 77, 30, 30, 30, 30, 30, 30, 30, 30, 30, + 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, + 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 35, 35, 35, 78, + 79, 80, 81, 82, 30, 30, 30, 30, 30, 30, 30, 30, 83, 43, 84, 85, 86, 35, + 87, 88, 30, 30, 30, 30, 30, 30, 30, 30, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 30, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 89, 90, 35, 35, 35, 35, 91, 92, + 93, 94, 95, 35, 96, 97, 98, 49, 99, 100, 101, 102, 103, 104, 105, 106, + 107, 108, 109, 110, 35, 111, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, + 35, 112, 113, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, + 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, + 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, + 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, + 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, + 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, + 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, + 30, 30, 30, 30, 30, 30, 30, 30, 30, 35, 35, 35, 35, 35, 114, 35, 115, 116, + 117, 118, 119, 35, 120, 35, 35, 121, 122, 123, 124, 30, 125, 35, 126, 127, + 128, 129, 130, + ], + tree2_level2: &[ + 0x110043FFFFF, 0xFFFF07FF01FFFFFF, 0xFFFFFFFF0000FEFF, 0x3FF, + 0x23FFFFFFFFFFFFF0, 0xFFFE0003FF010000, 0x23C5FDFFFFF99FE1, + 0x10030003B0004000, 0x36DFDFFFFF987E0, 0x1C00005E000000, + 0x23EDFDFFFFFBBFE0, 0x200000300010000, 0x23EDFDFFFFF99FE0, + 0x20003B0000000, 0x3FFC718D63DC7E8, 0x10000, 0x23FFFDFFFFFDDFE0, + 0x337000000, 0x23EFFDFFFFFDDFE1, 0x6000370000000, 0x27FFFFFFFFFDDFF0, + 0xFC00000380704000, 0x2FFBFFFFFC7FFFE0, 0x7F, 0x5FFFFFFFFFFFE, + 0x2005FFAFFFFFF7D6, 0xF000005F, 0x1, 0x1FFFFFFFFEFF, 0x1F00, 0, + 0x800007FFFFFFFFFF, 0xFFE1C0623C3F0000, 0xFFFFFFFF00004003, + 0xF7FFFFFFFFFF20BF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF3D7F3DFF, + 0x7F3DFFFFFFFF3DFF, 0xFFFFFFFFFF7FFF3D, 0xFFFFFFFFFF3DFFFF, 0x7FFFFFF, + 0xFFFFFFFF0000FFFF, 0x3F3FFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFE, + 0xFFFF9FFFFFFFFFFF, 0xFFFFFFFF07FFFFFE, 0x1FFC7FFFFFFFFFF, + 0x3FFFF8003FFFF, 0x1DFFF0003FFFF, 0xFFFFFFFFFFFFF, 0x10800000, + 0xFFFFFFFF00000000, 0x1FFFFFFFFFFFFFF, 0xFFFF05FFFFFFFFFF, + 0x3FFFFFFFFFFFFF, 0x7FFFFFFF, 0x1F3FFFFFFF0000, 0xFFFF0FFFFFFFFFFF, + 0xFFFFFFFF007FFFFF, 0x1FFFFF, 0x8000000000, 0xFFFFFFFFFFFE0, 0x1FE0, + 0xFC00C001FFFFFFF8, 0x3FFFFFFFFF, 0xFFFFFFFFF, 0x3FFFFFFFFC00E000, + 0xE7FFFFFFFFFF07FF, 0x46FDE0000000000, 0xFFFFFFFF3F3FFFFF, + 0x3FFFFFFFAAFF3F3F, 0x5FDFFFFFFFFFFFFF, 0x1FDC1FFF0FCF1FDC, + 0x8002000000000000, 0x1FFF0000, 0xF3FFFD503F2FFC84, 0xFFFFFFFF000043E0, + 0x1FF, 0xC781FFFFFFFFF, 0xFFFF20BFFFFFFFFF, 0x80FFFFFFFFFF, + 0x7F7F7F7F007FFFFF, 0x7F7F7F7F, 0x1F3E03FE000000E0, 0xFFFFFFFEE07FFFFF, + 0xF7FFFFFFFFFFFFFF, 0xFFFEFFFFFFFFFFE0, 0xFFFFFFFF00007FFF, + 0xFFFF000000000000, 0x1FFF, 0x3FFFFFFFFFFF0000, 0xC00FFFF1FFF, + 0x80007FFFFFFFFFFF, 0xFFFFFFFF3FFFFFFF, 0xFFFFFFFFFFFF, + 0xFFFFFFFCFF800000, 0xFFFFFFFFFFFFF9FF, 0xFFFE00001FFFFFFF, 0x7FFFFF7BB, + 0xFFFFFFFFFFFFC, 0x68FC000000000000, 0xFFFF003FFFFFFC00, + 0x1FFFFFFF0000007F, 0x7FFFFFFFFFFF0, 0x7C00FFDF00008000, 0x1FFFFFFFFFF, + 0xC47FFFFF00000FF7, 0x3E62FFFFFFFFFFFF, 0x1C07FF38000005, + 0xFFFF7F7F007E7E7E, 0xFFFF03FFF7FFFFFF, 0x7FFFFFFFF, 0xFFFF000FFFFFFFFF, + 0xFFFFFFFFFFFF87F, 0xFFFF3FFFFFFFFFFF, 0x3FFFFFF, 0x5F7FFDFFA0F8007F, + 0xFFFFFFFFFFFFFFDB, 0x3FFFFFFFFFFFF, 0xFFFFFFFFFFF80000, + 0xFFFFFFF03FFFFFFF, 0x3FFFFFFFFFFFFFFF, 0xFFFFFFFFFFFF0000, + 0xFFFFFFFFFFFCFFFF, 0x3FF0000000000FF, 0xAA8A000000000000, + 0x1FFFFFFFFFFFFFFF, 0x7FFFFFE00000000, 0xFFFFFFC007FFFFFE, + 0x7FFFFFFF3FFFFFFF, 0x1CFCFCFC, + ], + tree3_level1: &[ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 5, 9, 10, 5, 11, 12, 5, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 13, 14, 15, 7, 16, 17, 7, 18, 7, 19, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + ], + tree3_level2: &[ + 0, 1, 2, 3, 4, 5, 4, 4, 4, 4, 6, 7, 8, 9, 10, 11, 2, 2, 12, 13, 14, 15, + 16, 17, 2, 2, 2, 2, 18, 19, 20, 4, 21, 22, 23, 24, 25, 26, 27, 4, 28, 29, + 30, 31, 32, 33, 34, 4, 2, 35, 36, 36, 37, 38, 39, 4, 4, 4, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 23, 57, 58, 59, 60, 5, + 61, 62, 63, 4, 4, 64, 65, 62, 66, 67, 4, 68, 69, 4, 4, 70, 4, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 4, 4, 4, 81, 82, 83, 84, 4, 85, 86, 87, 88, 4, + 4, 4, 89, 90, 4, 91, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 26, 4, + 2, 64, 2, 2, 2, 92, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 93, 94, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 62, 95, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 69, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 96, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 80, 97, 98, 99, 62, 100, 84, 4, 4, 4, 4, 4, + 4, 101, 4, 4, 4, 2, 102, 103, 2, 104, 105, 106, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 107, 23, 4, 2, 36, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 108, 2, 2, 2, 2, 109, 110, 2, 2, 2, 2, 2, 111, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 112, 113, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 114, 115, + 116, 117, 118, 2, 2, 2, 2, 119, 120, 121, 122, 123, 124, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 125, 4, + 4, 4, 126, 127, 4, 4, 128, 129, 4, 4, 4, 4, 99, 70, 4, 4, 4, 4, 4, 4, 4, + 130, 4, 4, 4, 131, 4, 4, 4, 132, 4, 4, 4, 133, 2, 2, 2, 134, 2, 135, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 136, 137, 138, 4, 4, 4, 4, + 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 139, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 10, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 140, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 141, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 96, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, + 2, 2, 96, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 142, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 143, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, + ], + tree3_level3: &[ + 0xB7FFFF7FFFFFEFFF, 0x3FFF3FFF, 0xFFFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFF, 0, + 0x1FFFFFFFFFFFFF, 0xFFFFFFFF1FFFFFFF, 0x1FFFF, 0xFFFFE000FFFFFFFF, + 0x3FFFFFFFFF07FF, 0xFFFFFFFF3FFFFFFF, 0x3EFF0F, 0xFFFF00003FFFFFFF, + 0xFFFFFFFFF0FFFFF, 0xFFFF00FFFFFFFFFF, 0xF7FF000FFFFFFFFF, + 0x1BFBFFFBFFB7F7FF, 0xFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFF, 0xFF003FFFFF, + 0x7FDFFFFFFFFFFBF, 0x91BFFFFFFFFFFD3F, 0x7FFFFF003FFFFF, 0x7FFFFFFF, + 0x37FFFF00000000, 0x3FFFFFF003FFFFF, 0x3FFFFFF, 0xC0FFFFFFFFFFFFFF, + 0x3FFFFFFEEF0001, 0x1FFFFFFF00000000, 0x1FFFFFFF, 0x1FFFFFFEFF, + 0x3FFFFFFFFFFFFF, 0x7FFFF003FFFFF, 0x3FFFF, 0x1FF, 0x7FFFFFFFFFFFF, + 0xFFFFFFFFF, 0xFFFF803FFFFFFC00, 0x3F, 0x303FFFFFFFFFF, 0xFC, + 0xFFFF00801FFFFFFF, 0xFFFF00000000003F, 0xFFFF000000000003, + 0x7FFFFF0000001F, 0xFFFFFFFFFFFFF8, 0x26000000000000, 0xFFFFFFFFFFF8, + 0x1FFFFFF0000, 0x7FFFFFFFF8, 0x47FFFFFFFF0090, 0x7FFFFFFFFFFF8, + 0x1400001E, 0x80000FFFFFFBFFFF, 0x1, 0xFFFF01FFBFFFBD7F, + 0x23EDFDFFFFF99FE0, 0x3E0010000, 0xBFFFFFFFFF4BFF, 0xA0000, 0x380000780, + 0xFFFFFFFFFFFF, 0xB0, 0x7FFFFFFFFFFF, 0xF000000, 0x10, 0x10007FFFFFFFFFF, + 0x7FFFFFF, 0x7F, 0xFFFFFFFFFFF, 0xFFFFFFFF00000000, 0x80000000FFFFFFFF, + 0x8000FFFFFF6FF27F, 0x2, 0xFFFFFCFF00000000, 0xA0001FFFF, + 0x407FFFFFFFFF801, 0xFFFFFFFFF0010000, 0xFFFF0000200003FF, + 0x1FFFFFFFFFFFFFF, 0x1FFFFFFFF, 0x7FFFFFFFFDFF, 0xFFFC000000000001, + 0xFFFF, 0x1FFFFFFFFFB7F, 0xFFFFFDBF00000040, 0xFFFF0000010003FF, + 0xFFFFFFF, 0x7FFFF00000000, 0xFFFFFFFFDFFF4, 0x1000000000000, 0xF, + 0xFFFFFFFFFFFF0000, 0x1FFFFFFFFFFFF, 0xFFFFFFFF0000007E, 0x3FFFFFFF, + 0xFFFF00007FFFFFFF, 0x7FFFFFFFFFFFFFFF, 0x3FFFFFFF0000, + 0xE0FFFFF80000000F, 0x1FFFFFFFFFFF, 0xF9FFFFFF00000000, 0xFFFFF, 0x107FF, + 0xFFF80000, 0x7C000B00000000, 0x80000000003FFFFF, 0x6FEF000000000000, + 0x40007FFFFFFFF, 0xFFFF00F000270000, 0xFFFFFFFFFFFFFFF, + 0x1FFF07FFFFFFFFFF, 0x3FF01FF, 0xFFFFFFFFFFDFFFFF, 0xEBFFDE64DFFFFFFF, + 0xFFFFFFFFFFFFFFEF, 0x7BFFFFFFDFDFE7BF, 0xFFFFFFFFFFFDFC5F, + 0xFFFFFF3FFFFFFFFF, 0xF7FFFFFFF7FFFFFD, 0xFFDFFFFFFFDFFFFF, + 0xFFFF7FFFFFFF7FFF, 0xFFFFFDFFFFFFFDFF, 0xFF7, 0x7E07FFFFFFF, + 0xFFFF000000000000, 0x3FFFFFFFFFFF, 0x3F801FFFFFFFFFFF, 0x4000, + 0xFFFFFFF0000, 0x13FFFFFFF0000, 0xC01F3FB77FFFFFFF, 0x7FFF6F7F00000000, + 0x1F, 0x80F, 0xAF7FE96FFFFFFEF, 0x5EF7F796AA96EA84, 0xFFFFBEE0FFFFBFF, + 0xFFFFFFFF, 0xFFFF3FFFFFFFFFFF, 0xFFFF0001FFFFFFFF, 0xFFFFFFFFFFFF07FF, + 0x3FFFFFFFFFFFFFF, + ], +}; diff --git a/vendor/windows-link/.cargo-checksum.json b/vendor/windows-link/.cargo-checksum.json new file mode 100644 index 00000000000000..4b8b7b4079bbef --- /dev/null +++ b/vendor/windows-link/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"02f3a04d4359721839ae43d0bc753d834717cb0bf600ee0b9d88d5299b94c121","Cargo.lock":"f33a3dccb85342cd5cb58d165dc6c0421e93aeaca9ea1cd82b81f0c204d316a8","Cargo.toml":"abf0b74b168ec7d7c600f44eb90502c47e44480a199b9adc9ec74ea990605707","Cargo.toml.orig":"4a4fb4a85656696687cf1f2a8725309930e39378616aa687737c38c0e28dfad1","license-apache-2.0":"c16f8dcf1a368b83be78d826ea23de4079fe1b4469a0ab9ee20563f37ff3d44b","license-mit":"c2cfccb812fe482101a8f04597dfc5a9991a6b2748266c47ac91b6a5aae15383","readme.md":"4bbe7714285567006b5b068dfc93cb3b633afae20766c9bf1fce2444874261fb","src/lib.rs":"ca9cf5a2a97cf72d855c677c936355b6d29e41682e5abfa505f28b3d216b5333"},"package":"f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"} \ No newline at end of file diff --git a/vendor/windows-link/.cargo_vcs_info.json b/vendor/windows-link/.cargo_vcs_info.json new file mode 100644 index 00000000000000..0ca517613e6a9c --- /dev/null +++ b/vendor/windows-link/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "d468916ac27a36fb8a12bafc1bf5c0ec2fe92238" + }, + "path_in_vcs": "crates/libs/link" +} \ No newline at end of file diff --git a/vendor/windows-link/Cargo.lock b/vendor/windows-link/Cargo.lock new file mode 100644 index 00000000000000..1fc750d4f4c9b9 --- /dev/null +++ b/vendor/windows-link/Cargo.lock @@ -0,0 +1,7 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "windows-link" +version = "0.2.1" diff --git a/vendor/windows-link/Cargo.toml b/vendor/windows-link/Cargo.toml new file mode 100644 index 00000000000000..6b29fd6b128c76 --- /dev/null +++ b/vendor/windows-link/Cargo.toml @@ -0,0 +1,39 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.71" +name = "windows-link" +version = "0.2.1" +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Linking for Windows" +readme = "readme.md" +categories = ["os::windows-apis"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/microsoft/windows-rs" + +[lib] +name = "windows_link" +path = "src/lib.rs" + +[lints.rust] +missing_unsafe_on_extern = "warn" + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = ["cfg(windows_raw_dylib, windows_slim_errors)"] diff --git a/vendor/windows-link/license-apache-2.0 b/vendor/windows-link/license-apache-2.0 new file mode 100644 index 00000000000000..b5ed4ecec27b39 --- /dev/null +++ b/vendor/windows-link/license-apache-2.0 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) Microsoft Corporation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/windows-link/license-mit b/vendor/windows-link/license-mit new file mode 100644 index 00000000000000..9e841e7a26e4eb --- /dev/null +++ b/vendor/windows-link/license-mit @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/vendor/windows-link/readme.md b/vendor/windows-link/readme.md new file mode 100644 index 00000000000000..f6c343f667968a --- /dev/null +++ b/vendor/windows-link/readme.md @@ -0,0 +1,26 @@ +## Linking for Windows + +The [windows-link](https://crates.io/crates/windows-link) crate provides the `link` macro that simplifies linking. The `link` macro is much the same as the one provided by [windows-targets](https://crates.io/crates/windows-targets) but uses `raw-dylib` and thus does not require import lib files. + +* [Getting started](https://kennykerr.ca/rust-getting-started/) +* [Samples](https://github.com/microsoft/windows-rs/tree/master/crates/samples) +* [Releases](https://github.com/microsoft/windows-rs/releases) + +Start by adding the following to your Cargo.toml file: + +```toml +[dependencies.windows-link] +version = "0.2" +``` + +Use the `link` macro to define the external functions you wish to call: + +```rust +windows_link::link!("kernel32.dll" "system" fn SetLastError(code: u32)); +windows_link::link!("kernel32.dll" "system" fn GetLastError() -> u32); + +unsafe { + SetLastError(1234); + assert_eq!(GetLastError(), 1234); +} +``` diff --git a/vendor/windows-link/src/lib.rs b/vendor/windows-link/src/lib.rs new file mode 100644 index 00000000000000..dbecf9f3b5e4fe --- /dev/null +++ b/vendor/windows-link/src/lib.rs @@ -0,0 +1,39 @@ +#![doc = include_str!("../readme.md")] +#![no_std] + +/// Defines an external function to import. +#[cfg(all(windows, target_arch = "x86"))] +#[macro_export] +macro_rules! link { + ($library:literal $abi:literal $($link_name:literal)? fn $($function:tt)*) => ( + #[link(name = $library, kind = "raw-dylib", modifiers = "+verbatim", import_name_type = "undecorated")] + extern $abi { + $(#[link_name=$link_name])? + pub fn $($function)*; + } + ) +} + +/// Defines an external function to import. +#[cfg(all(windows, not(target_arch = "x86")))] +#[macro_export] +macro_rules! link { + ($library:literal $abi:literal $($link_name:literal)? fn $($function:tt)*) => ( + #[link(name = $library, kind = "raw-dylib", modifiers = "+verbatim")] + extern $abi { + $(#[link_name=$link_name])? + pub fn $($function)*; + } + ) +} + +/// Defines an external function to import. +#[cfg(not(windows))] +#[macro_export] +macro_rules! link { + ($library:literal $abi:literal $($link_name:literal)? fn $($function:tt)*) => ( + extern $abi { + pub fn $($function)*; + } + ) +} From 81d46e3c670b1534f24692e588c6030995fc0b0c Mon Sep 17 00:00:00 2001 From: Emma Harper Smith <emma@emmatyping.dev> Date: Fri, 14 Nov 2025 15:44:35 -0800 Subject: [PATCH 03/20] Remove wrapper.h, it should be generated --- Modules/cpython-sys/.gitignore | 1 + Modules/cpython-sys/wrapper.h | 221 --------------------------------- 2 files changed, 1 insertion(+), 221 deletions(-) create mode 100644 Modules/cpython-sys/.gitignore delete mode 100644 Modules/cpython-sys/wrapper.h diff --git a/Modules/cpython-sys/.gitignore b/Modules/cpython-sys/.gitignore new file mode 100644 index 00000000000000..3536502b83a7c0 --- /dev/null +++ b/Modules/cpython-sys/.gitignore @@ -0,0 +1 @@ +/wrapper.h \ No newline at end of file diff --git a/Modules/cpython-sys/wrapper.h b/Modules/cpython-sys/wrapper.h deleted file mode 100644 index 1b0176a905b962..00000000000000 --- a/Modules/cpython-sys/wrapper.h +++ /dev/null @@ -1,221 +0,0 @@ -#define Py_BUILD_CORE -#include "Modules/expat/expat.h" -#include "Python.h" -#include "abstract.h" -#include "audit.h" -#include "bltinmodule.h" -#include "boolobject.h" -#include "bytearrayobject.h" -#include "bytesobject.h" -#include "ceval.h" -#include "codecs.h" -#include "compile.h" -#include "complexobject.h" -#include "critical_section.h" -#include "descrobject.h" -#include "dictobject.h" -#include "dynamic_annotations.h" -#include "enumobject.h" -#include "errcode.h" -#include "exports.h" -#include "fileobject.h" -#include "fileutils.h" -#include "floatobject.h" -#include "frameobject.h" -#include "genericaliasobject.h" -#include "import.h" -#include "intrcheck.h" -#include "iterobject.h" -#include "listobject.h" -#include "longobject.h" -#include "marshal.h" -#include "memoryobject.h" -#include "methodobject.h" -#include "modsupport.h" -#include "moduleobject.h" -#include "object.h" -#include "objimpl.h" -#include "opcode.h" -#include "opcode_ids.h" -#include "osdefs.h" -#include "osmodule.h" -#include "patchlevel.h" -#include "pyatomic.h" -#include "pybuffer.h" -#include "pycapsule.h" -#include "pydtrace.h" -#include "pyerrors.h" -#include "pyexpat.h" -#include "pyframe.h" -#include "pyhash.h" -#include "pylifecycle.h" -#include "pymacconfig.h" -#include "pymacro.h" -#include "pymath.h" -#include "pymem.h" -#include "pyport.h" -#include "pystate.h" -#include "pystats.h" -#include "pystrcmp.h" -#include "pystrtod.h" -#include "pythonrun.h" -#include "pythread.h" -#include "pytypedefs.h" -#include "rangeobject.h" -#include "refcount.h" -#include "setobject.h" -#include "sliceobject.h" -#include "structmember.h" -#include "structseq.h" -#include "sysmodule.h" -#include "traceback.h" -#include "tupleobject.h" -#include "typeslots.h" -#include "unicodeobject.h" -#include "warnings.h" -#include "weakrefobject.h" -#include "pyconfig.h" -#include "internal/pycore_parser.h" -#include "internal/pycore_mimalloc.h" -#include "internal/mimalloc/mimalloc.h" -#include "internal/mimalloc/mimalloc/atomic.h" -#include "internal/mimalloc/mimalloc/internal.h" -#include "internal/mimalloc/mimalloc/prim.h" -#include "internal/mimalloc/mimalloc/track.h" -#include "internal/mimalloc/mimalloc/types.h" -#include "internal/pycore_abstract.h" -#include "internal/pycore_asdl.h" -#include "internal/pycore_ast.h" -#include "internal/pycore_ast_state.h" -#include "internal/pycore_atexit.h" -#include "internal/pycore_audit.h" -#include "internal/pycore_backoff.h" -#include "internal/pycore_bitutils.h" -#include "internal/pycore_blocks_output_buffer.h" -#include "internal/pycore_brc.h" -#include "internal/pycore_bytes_methods.h" -#include "internal/pycore_bytesobject.h" -#include "internal/pycore_call.h" -#include "internal/pycore_capsule.h" -#include "internal/pycore_cell.h" -#include "internal/pycore_ceval.h" -#include "internal/pycore_ceval_state.h" -#include "internal/pycore_code.h" -#include "internal/pycore_codecs.h" -#include "internal/pycore_compile.h" -#include "internal/pycore_complexobject.h" -#include "internal/pycore_condvar.h" -#include "internal/pycore_context.h" -#include "internal/pycore_critical_section.h" -#include "internal/pycore_crossinterp.h" -#include "internal/pycore_debug_offsets.h" -#include "internal/pycore_descrobject.h" -#include "internal/pycore_dict.h" -#include "internal/pycore_dict_state.h" -#include "internal/pycore_dtoa.h" -#include "internal/pycore_exceptions.h" -#include "internal/pycore_faulthandler.h" -#include "internal/pycore_fileutils.h" -#include "internal/pycore_floatobject.h" -#include "internal/pycore_flowgraph.h" -#include "internal/pycore_format.h" -#include "internal/pycore_frame.h" -#include "internal/pycore_freelist.h" -#include "internal/pycore_freelist_state.h" -#include "internal/pycore_function.h" -#include "internal/pycore_gc.h" -#include "internal/pycore_genobject.h" -#include "internal/pycore_getopt.h" -#include "internal/pycore_gil.h" -#include "internal/pycore_global_objects.h" -#include "internal/pycore_global_objects_fini_generated.h" -#include "internal/pycore_global_strings.h" -#include "internal/pycore_hamt.h" -#include "internal/pycore_hashtable.h" -#include "internal/pycore_import.h" -#include "internal/pycore_importdl.h" -#include "internal/pycore_index_pool.h" -#include "internal/pycore_initconfig.h" -#include "internal/pycore_instruments.h" -#include "internal/pycore_instruction_sequence.h" -#include "internal/pycore_interp.h" -#include "internal/pycore_interp_structs.h" -#include "internal/pycore_interpframe.h" -#include "internal/pycore_interpframe_structs.h" -#include "internal/pycore_interpolation.h" -#include "internal/pycore_intrinsics.h" -#include "internal/pycore_jit.h" -#include "internal/pycore_list.h" -#include "internal/pycore_llist.h" -#include "internal/pycore_lock.h" -#include "internal/pycore_long.h" -#include "internal/pycore_memoryobject.h" -#include "internal/pycore_mimalloc.h" -#include "internal/pycore_modsupport.h" -#include "internal/pycore_moduleobject.h" -#include "internal/pycore_namespace.h" -#include "internal/pycore_object.h" -#include "internal/pycore_object_alloc.h" -#include "internal/pycore_object_deferred.h" -#include "internal/pycore_object_stack.h" -#include "internal/pycore_object_state.h" -#include "internal/pycore_obmalloc.h" -#include "internal/pycore_obmalloc_init.h" -#include "internal/pycore_opcode_metadata.h" -#include "internal/pycore_opcode_utils.h" -#include "internal/pycore_optimizer.h" -#include "internal/pycore_parking_lot.h" -#include "internal/pycore_parser.h" -#include "internal/pycore_pathconfig.h" -#include "internal/pycore_pyarena.h" -#include "internal/pycore_pyatomic_ft_wrappers.h" -#include "internal/pycore_pybuffer.h" -#include "internal/pycore_pyerrors.h" -#include "internal/pycore_pyhash.h" -#include "internal/pycore_pylifecycle.h" -#include "internal/pycore_pymath.h" -#include "internal/pycore_pymem.h" -#include "internal/pycore_pymem_init.h" -#include "internal/pycore_pystate.h" -#include "internal/pycore_pystats.h" -#include "internal/pycore_pythonrun.h" -#include "internal/pycore_pythread.h" -#include "internal/pycore_qsbr.h" -#include "internal/pycore_range.h" -#include "internal/pycore_runtime.h" -#include "internal/pycore_runtime_init.h" -#include "internal/pycore_runtime_init_generated.h" -#include "internal/pycore_runtime_structs.h" -#include "internal/pycore_semaphore.h" -#include "internal/pycore_setobject.h" -#include "internal/pycore_signal.h" -#include "internal/pycore_sliceobject.h" -#include "internal/pycore_stats.h" -#include "internal/pycore_strhex.h" -#include "internal/pycore_stackref.h" -#include "internal/pycore_structs.h" -#include "internal/pycore_structseq.h" -#include "internal/pycore_symtable.h" -#include "internal/pycore_sysmodule.h" -#include "internal/pycore_template.h" -#include "internal/pycore_time.h" -#include "internal/pycore_token.h" -#include "internal/pycore_traceback.h" -#include "internal/pycore_tracemalloc.h" -#include "internal/pycore_tstate.h" -#include "internal/pycore_tuple.h" -#include "internal/pycore_typedefs.h" -#include "internal/pycore_typeobject.h" -#include "internal/pycore_typevarobject.h" -#include "internal/pycore_ucnhash.h" -#include "internal/pycore_unicodectype.h" -#include "internal/pycore_unicodeobject.h" -#include "internal/pycore_unicodeobject_generated.h" -#include "internal/pycore_unionobject.h" -#include "internal/pycore_uniqueid.h" -#include "internal/pycore_uop.h" -#include "internal/pycore_uop_ids.h" -#include "internal/pycore_uop_metadata.h" -#include "internal/pycore_warnings.h" -#include "internal/pycore_weakref.h" -#include "Python/stdlib_module_names.h" From 3dc0283a1cc139ccfebd85872f96469479723e0c Mon Sep 17 00:00:00 2001 From: Kirill Podoprigora <kirill.bast9@mail.ru> Date: Sun, 16 Nov 2025 00:57:46 +0200 Subject: [PATCH 04/20] Make Rust extensions work on macOS (#12) --- Modules/cpython-sys/build.rs | 3 ++- Modules/makesetup | 11 ++++++++++- Python/remote_debug.h | 1 + Tools/build/regen-rust-wrapper-h.py | 11 ++++++++++- 4 files changed, 23 insertions(+), 3 deletions(-) diff --git a/Modules/cpython-sys/build.rs b/Modules/cpython-sys/build.rs index b55f03c5b066b5..c45ccc0b2684c7 100644 --- a/Modules/cpython-sys/build.rs +++ b/Modules/cpython-sys/build.rs @@ -8,6 +8,7 @@ fn main() { .header("wrapper.h") .clang_arg(format!("-I{}", srcdir.as_os_str().to_str().unwrap())) .clang_arg(format!("-I{}/Include", srcdir.as_os_str().to_str().unwrap())) + .clang_arg(format!("-I{}/Include/internal", srcdir.as_os_str().to_str().unwrap())) .allowlist_function("Py.*") .allowlist_function("_Py.*") .allowlist_type("Py.*") @@ -24,4 +25,4 @@ fn main() { bindings .write_to_file(out_path.join("bindings.rs")) .expect("Couldn't write bindings!"); -} \ No newline at end of file +} diff --git a/Modules/makesetup b/Modules/makesetup index b701a61a548bae..773de9117f4a22 100755 --- a/Modules/makesetup +++ b/Modules/makesetup @@ -83,6 +83,8 @@ case $makepre in '') makepre=Makefile.pre;; esac +UNAME_SYSTEM=`uname -s 2>/dev/null || echo unknown` + # Newline for sed i and a commands NL='\ ' @@ -289,7 +291,14 @@ sed -e 's/[ ]*#.*//' -e '/^[ ]*$/d' | echo "$rule" >>$rulesf for mod in $mods do - custom_ldflags="-Wl,--defsym=PyInit_$mod=PyInit_$mod" + case $UNAME_SYSTEM in + Darwin*) + custom_ldflags="$custom_ldflags -Wl,-u,_PyInit_$mod" + ;; + *) + custom_ldflags="$custom_ldflags -Wl,--defsym=PyInit_$mod=PyInit_$mod" + ;; + esac done fi case $doconfig in diff --git a/Python/remote_debug.h b/Python/remote_debug.h index e7676013197fa9..eac7f2aee132eb 100644 --- a/Python/remote_debug.h +++ b/Python/remote_debug.h @@ -29,6 +29,7 @@ extern "C" { #include "pyconfig.h" #include "internal/pycore_ceval.h" +#include "internal/pycore_debug_offsets.h" #ifdef __linux__ # include <elf.h> diff --git a/Tools/build/regen-rust-wrapper-h.py b/Tools/build/regen-rust-wrapper-h.py index 763bf1133d4ecb..998d808ea40ac2 100644 --- a/Tools/build/regen-rust-wrapper-h.py +++ b/Tools/build/regen-rust-wrapper-h.py @@ -3,8 +3,11 @@ from pathlib import Path ROOT = Path(__file__).resolve().parents[2] -INCLUDE = ROOT / "Include" WRAPPER_H = ROOT / "Modules" / "cpython-sys" / "wrapper.h" +SKIP_PREFIXES = ("cpython/",) +SKIP_EXACT = { + "internal/pycore_crossinterp_data_registry.h", +} def normalize_path(header: str) -> str: return re.sub(r'(:?\.\/)(:?Include\/)?', '', header) @@ -18,7 +21,13 @@ def main(output: str = WRAPPER_H) -> None: f.write("#include \"Modules/expat/expat.h\"\n") for header in headers.split(): normalized_path = normalize_path(header) + if normalized_path.startswith(SKIP_PREFIXES): + continue + if normalized_path in SKIP_EXACT: + continue f.write(f"#include \"{normalized_path}\"\n") + if normalized_path == "Python/remote_debug.h": + f.write("#undef UNUSED\n") if __name__ == "__main__": import sys From b8d6ea3de98904f9ffae9fa7b4c5ea6f3638f6a0 Mon Sep 17 00:00:00 2001 From: Kirill Podoprigora <kirill.bast9@mail.ru> Date: Sun, 16 Nov 2025 04:38:14 +0200 Subject: [PATCH 05/20] Faster `_base.b64encode` with custom implementation (#13) --- Cargo.lock | 7 -- Modules/_base64/Cargo.toml | 3 +- Modules/_base64/src/lib.rs | 186 ++++++++++++++++++++++++++++++++----- 3 files changed, 166 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5acffd0fac3baa..afc3744abd1cb6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6,7 +6,6 @@ version = 4 name = "_base64" version = "0.1.0" dependencies = [ - "base64", "cpython-sys", ] @@ -19,12 +18,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "base64" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" - [[package]] name = "bindgen" version = "0.72.1" diff --git a/Modules/_base64/Cargo.toml b/Modules/_base64/Cargo.toml index 038ec4bb02eb02..0810b787ab2773 100644 --- a/Modules/_base64/Cargo.toml +++ b/Modules/_base64/Cargo.toml @@ -4,9 +4,8 @@ version = "0.1.0" edition = "2024" [dependencies] -base64 = "0.22.1" cpython-sys ={ path = "../cpython-sys" } [lib] name = "_base64" -crate-type = ["staticlib"] \ No newline at end of file +crate-type = ["staticlib"] diff --git a/Modules/_base64/src/lib.rs b/Modules/_base64/src/lib.rs index f9c122314a3a32..330e00de3d2525 100644 --- a/Modules/_base64/src/lib.rs +++ b/Modules/_base64/src/lib.rs @@ -1,39 +1,183 @@ use std::cell::UnsafeCell; - -use std::ffi::CStr; -use std::ffi::CString; -use std::ffi::c_char; -use std::ffi::c_int; -use std::ffi::c_void; +use std::ffi::{c_char, c_int, c_void}; +use std::mem::MaybeUninit; +use std::ptr; +use std::slice; use cpython_sys::METH_FASTCALL; -use cpython_sys::Py_ssize_t; use cpython_sys::PyBytes_AsString; -use cpython_sys::PyBytes_FromString; +use cpython_sys::PyBytes_FromStringAndSize; +use cpython_sys::PyBuffer_Release; use cpython_sys::PyMethodDef; use cpython_sys::PyMethodDefFuncPointer; use cpython_sys::PyModuleDef; use cpython_sys::PyModuleDef_HEAD_INIT; use cpython_sys::PyModuleDef_Init; use cpython_sys::PyObject; +use cpython_sys::PyObject_GetBuffer; +use cpython_sys::Py_DecRef; +use cpython_sys::PyErr_NoMemory; +use cpython_sys::PyErr_SetString; +use cpython_sys::PyExc_TypeError; +use cpython_sys::Py_buffer; +use cpython_sys::Py_ssize_t; + +const PYBUF_SIMPLE: c_int = 0; +const PAD_BYTE: u8 = b'='; +const ENCODE_TABLE: [u8; 64] = + *b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +#[inline] +fn encoded_output_len(input_len: usize) -> Option<usize> { + input_len + .checked_add(2) + .map(|n| n / 3) + .and_then(|blocks| blocks.checked_mul(4)) +} + +#[inline] +fn encode_into(input: &[u8], output: &mut [u8]) -> usize { + let mut src_index = 0; + let mut dst_index = 0; + let len = input.len(); + + while src_index + 3 <= len { + let chunk = (u32::from(input[src_index]) << 16) + | (u32::from(input[src_index + 1]) << 8) + | u32::from(input[src_index + 2]); + output[dst_index] = ENCODE_TABLE[((chunk >> 18) & 0x3f) as usize]; + output[dst_index + 1] = ENCODE_TABLE[((chunk >> 12) & 0x3f) as usize]; + output[dst_index + 2] = ENCODE_TABLE[((chunk >> 6) & 0x3f) as usize]; + output[dst_index + 3] = ENCODE_TABLE[(chunk & 0x3f) as usize]; + src_index += 3; + dst_index += 4; + } -use base64::prelude::*; + match len - src_index { + 0 => {} + 1 => { + let chunk = u32::from(input[src_index]) << 16; + output[dst_index] = ENCODE_TABLE[((chunk >> 18) & 0x3f) as usize]; + output[dst_index + 1] = ENCODE_TABLE[((chunk >> 12) & 0x3f) as usize]; + output[dst_index + 2] = PAD_BYTE; + output[dst_index + 3] = PAD_BYTE; + dst_index += 4; + } + 2 => { + let chunk = (u32::from(input[src_index]) << 16) + | (u32::from(input[src_index + 1]) << 8); + output[dst_index] = ENCODE_TABLE[((chunk >> 18) & 0x3f) as usize]; + output[dst_index + 1] = ENCODE_TABLE[((chunk >> 12) & 0x3f) as usize]; + output[dst_index + 2] = ENCODE_TABLE[((chunk >> 6) & 0x3f) as usize]; + output[dst_index + 3] = PAD_BYTE; + dst_index += 4; + } + _ => unreachable!("len - src_index cannot exceed 2"), + } + + dst_index +} + +struct BorrowedBuffer { + view: Py_buffer, +} + +impl BorrowedBuffer { + unsafe fn from_object(obj: *mut PyObject) -> Result<Self, ()> { + let mut view = MaybeUninit::<Py_buffer>::uninit(); + if unsafe { PyObject_GetBuffer(obj, view.as_mut_ptr(), PYBUF_SIMPLE) } != 0 { + return Err(()); + } + Ok(Self { + view: unsafe { view.assume_init() }, + }) + } + + fn len(&self) -> Py_ssize_t { + self.view.len + } + + fn as_ptr(&self) -> *const u8 { + self.view.buf.cast::<u8>() as *const u8 + } +} + +impl Drop for BorrowedBuffer { + fn drop(&mut self) { + unsafe { + PyBuffer_Release(&mut self.view); + } + } +} #[unsafe(no_mangle)] -pub unsafe extern "C" fn standard_b64encode( +pub unsafe extern "C" fn b64encode( _module: *mut PyObject, args: *mut *mut PyObject, - _nargs: Py_ssize_t, + nargs: Py_ssize_t, ) -> *mut PyObject { - let buff = unsafe { *args }; - let ptr = unsafe { PyBytes_AsString(buff) }; - if ptr.is_null() { - // Error handling omitted for now - unimplemented!("Error handling goes here...") + if nargs != 1 { + unsafe { + PyErr_SetString( + PyExc_TypeError, + c"b64encode() takes exactly one argument".as_ptr(), + ); + } + return ptr::null_mut(); + } + + let source = unsafe { *args }; + let buffer = match unsafe { BorrowedBuffer::from_object(source) } { + Ok(buf) => buf, + Err(_) => return ptr::null_mut(), + }; + + let view_len = buffer.len(); + if view_len < 0 { + unsafe { + PyErr_SetString( + PyExc_TypeError, + c"b64encode() argument has negative length".as_ptr(), + ); + } + return ptr::null_mut(); } - let cdata = unsafe { CStr::from_ptr(ptr) }; - let res = BASE64_STANDARD.encode(cdata.to_bytes()); - unsafe { PyBytes_FromString(CString::new(res).unwrap().as_ptr()) } + let input_len = view_len as usize; + let input = unsafe { slice::from_raw_parts(buffer.as_ptr(), input_len) }; + + let Some(output_len) = encoded_output_len(input_len) else { + unsafe { + PyErr_NoMemory(); + } + return ptr::null_mut(); + }; + + if output_len > isize::MAX as usize { + unsafe { + PyErr_NoMemory(); + } + return ptr::null_mut(); + } + + let result = unsafe { + PyBytes_FromStringAndSize(ptr::null(), output_len as Py_ssize_t) + }; + if result.is_null() { + return ptr::null_mut(); + } + + let dest_ptr = unsafe { PyBytes_AsString(result) }; + if dest_ptr.is_null() { + unsafe { + Py_DecRef(result); + } + return ptr::null_mut(); + } + let dest = unsafe { slice::from_raw_parts_mut(dest_ptr.cast::<u8>(), output_len) }; + + let written = encode_into(input, dest); + debug_assert_eq!(written, output_len); + result } #[unsafe(no_mangle)] @@ -62,9 +206,9 @@ unsafe impl Sync for ModuleDef {} pub static _BASE64_MODULE_METHODS: [PyMethodDef; 2] = { [ PyMethodDef { - ml_name: c"standard_b64encode".as_ptr() as *mut c_char, + ml_name: c"b64encode".as_ptr() as *mut c_char, ml_meth: PyMethodDefFuncPointer { - PyCFunctionFast: standard_b64encode, + PyCFunctionFast: b64encode, }, ml_flags: METH_FASTCALL, ml_doc: c"Demo for the _base64 module".as_ptr() as *mut c_char, From 69f4d16c19cb6ffe8f45174c1b5169149f2436c3 Mon Sep 17 00:00:00 2001 From: Emma Harper Smith <emma@emmatyping.dev> Date: Sun, 16 Nov 2025 02:47:16 -0800 Subject: [PATCH 06/20] Remove vendored code --- vendor/aho-corasick/.cargo-checksum.json | 1 - vendor/aho-corasick/.cargo_vcs_info.json | 6 - vendor/aho-corasick/.github/FUNDING.yml | 1 - vendor/aho-corasick/.github/workflows/ci.yml | 148 - vendor/aho-corasick/.vim/coc-settings.json | 12 - vendor/aho-corasick/COPYING | 3 - vendor/aho-corasick/Cargo.lock | 39 - vendor/aho-corasick/Cargo.toml | 80 - vendor/aho-corasick/DESIGN.md | 481 - vendor/aho-corasick/LICENSE-MIT | 21 - vendor/aho-corasick/README.md | 174 - vendor/aho-corasick/UNLICENSE | 24 - vendor/aho-corasick/rustfmt.toml | 2 - vendor/aho-corasick/src/ahocorasick.rs | 2789 ---- vendor/aho-corasick/src/automaton.rs | 1608 -- vendor/aho-corasick/src/dfa.rs | 835 -- vendor/aho-corasick/src/lib.rs | 326 - vendor/aho-corasick/src/macros.rs | 18 - vendor/aho-corasick/src/nfa/contiguous.rs | 1141 -- vendor/aho-corasick/src/nfa/mod.rs | 40 - vendor/aho-corasick/src/nfa/noncontiguous.rs | 1762 --- vendor/aho-corasick/src/packed/api.rs | 687 - vendor/aho-corasick/src/packed/ext.rs | 39 - vendor/aho-corasick/src/packed/mod.rs | 120 - vendor/aho-corasick/src/packed/pattern.rs | 480 - vendor/aho-corasick/src/packed/rabinkarp.rs | 168 - .../aho-corasick/src/packed/teddy/README.md | 386 - .../aho-corasick/src/packed/teddy/builder.rs | 792 - .../aho-corasick/src/packed/teddy/generic.rs | 1382 -- vendor/aho-corasick/src/packed/teddy/mod.rs | 9 - vendor/aho-corasick/src/packed/tests.rs | 583 - vendor/aho-corasick/src/packed/vector.rs | 1757 --- vendor/aho-corasick/src/tests.rs | 1664 --- vendor/aho-corasick/src/transducer.rs | 270 - vendor/aho-corasick/src/util/alphabet.rs | 409 - vendor/aho-corasick/src/util/buffer.rs | 124 - .../aho-corasick/src/util/byte_frequencies.rs | 258 - vendor/aho-corasick/src/util/debug.rs | 26 - vendor/aho-corasick/src/util/error.rs | 259 - vendor/aho-corasick/src/util/int.rs | 278 - vendor/aho-corasick/src/util/mod.rs | 12 - vendor/aho-corasick/src/util/prefilter.rs | 924 -- vendor/aho-corasick/src/util/primitives.rs | 759 - vendor/aho-corasick/src/util/remapper.rs | 214 - vendor/aho-corasick/src/util/search.rs | 1148 -- vendor/aho-corasick/src/util/special.rs | 42 - vendor/base64/.cargo-checksum.json | 1 - vendor/base64/.cargo_vcs_info.json | 6 - vendor/base64/.circleci/config.yml | 135 - .../ISSUE_TEMPLATE/general-purpose-issue.md | 21 - vendor/base64/Cargo.lock | 1515 -- vendor/base64/Cargo.toml | 85 - vendor/base64/LICENSE-APACHE | 201 - vendor/base64/LICENSE-MIT | 21 - vendor/base64/README.md | 154 - vendor/base64/RELEASE-NOTES.md | 271 - vendor/base64/benches/benchmarks.rs | 238 - vendor/base64/clippy.toml | 1 - vendor/base64/examples/base64.rs | 81 - vendor/base64/icon_CLion.svg | 34 - vendor/base64/src/alphabet.rs | 285 - vendor/base64/src/chunked_encoder.rs | 172 - vendor/base64/src/decode.rs | 386 - vendor/base64/src/display.rs | 88 - vendor/base64/src/encode.rs | 492 - .../src/engine/general_purpose/decode.rs | 357 - .../engine/general_purpose/decode_suffix.rs | 162 - .../base64/src/engine/general_purpose/mod.rs | 352 - vendor/base64/src/engine/mod.rs | 478 - vendor/base64/src/engine/naive.rs | 195 - vendor/base64/src/engine/tests.rs | 1579 -- vendor/base64/src/lib.rs | 277 - vendor/base64/src/prelude.rs | 20 - vendor/base64/src/read/decoder.rs | 335 - vendor/base64/src/read/decoder_tests.rs | 487 - vendor/base64/src/read/mod.rs | 6 - vendor/base64/src/tests.rs | 117 - vendor/base64/src/write/encoder.rs | 407 - .../base64/src/write/encoder_string_writer.rs | 207 - vendor/base64/src/write/encoder_tests.rs | 554 - vendor/base64/src/write/mod.rs | 11 - vendor/base64/tests/encode.rs | 77 - vendor/base64/tests/tests.rs | 161 - vendor/bindgen/.cargo-checksum.json | 1 - vendor/bindgen/.cargo_vcs_info.json | 6 - vendor/bindgen/Cargo.lock | 485 - vendor/bindgen/Cargo.toml | 189 - vendor/bindgen/LICENSE | 29 - vendor/bindgen/README.md | 89 - vendor/bindgen/build.rs | 29 - vendor/bindgen/callbacks.rs | 317 - vendor/bindgen/clang.rs | 2448 ---- vendor/bindgen/codegen/bitfield_unit.rs | 112 - .../codegen/bitfield_unit_raw_ref_macros.rs | 191 - vendor/bindgen/codegen/bitfield_unit_tests.rs | 260 - vendor/bindgen/codegen/dyngen.rs | 258 - vendor/bindgen/codegen/error.rs | 52 - vendor/bindgen/codegen/helpers.rs | 395 - vendor/bindgen/codegen/impl_debug.rs | 243 - vendor/bindgen/codegen/impl_partialeq.rs | 142 - vendor/bindgen/codegen/mod.rs | 5991 -------- .../postprocessing/merge_extern_blocks.rs | 72 - vendor/bindgen/codegen/postprocessing/mod.rs | 57 - .../postprocessing/sort_semantically.rs | 46 - vendor/bindgen/codegen/serialize.rs | 443 - vendor/bindgen/codegen/struct_layout.rs | 458 - vendor/bindgen/deps.rs | 61 - vendor/bindgen/diagnostics.rs | 146 - vendor/bindgen/extra_assertions.rs | 17 - vendor/bindgen/features.rs | 570 - vendor/bindgen/ir/analysis/derive.rs | 726 - vendor/bindgen/ir/analysis/has_destructor.rs | 175 - vendor/bindgen/ir/analysis/has_float.rs | 248 - .../ir/analysis/has_type_param_in_array.rs | 242 - vendor/bindgen/ir/analysis/has_vtable.rs | 235 - vendor/bindgen/ir/analysis/mod.rs | 395 - vendor/bindgen/ir/analysis/sizedness.rs | 353 - vendor/bindgen/ir/analysis/template_params.rs | 601 - vendor/bindgen/ir/annotations.rs | 259 - vendor/bindgen/ir/comment.rs | 100 - vendor/bindgen/ir/comp.rs | 1921 --- vendor/bindgen/ir/context.rs | 3107 ---- vendor/bindgen/ir/derive.rs | 130 - vendor/bindgen/ir/dot.rs | 85 - vendor/bindgen/ir/enum_ty.rs | 321 - vendor/bindgen/ir/function.rs | 838 -- vendor/bindgen/ir/int.rs | 128 - vendor/bindgen/ir/item.rs | 1994 --- vendor/bindgen/ir/item_kind.rs | 135 - vendor/bindgen/ir/layout.rs | 126 - vendor/bindgen/ir/mod.rs | 25 - vendor/bindgen/ir/module.rs | 96 - vendor/bindgen/ir/objc.rs | 343 - vendor/bindgen/ir/template.rs | 335 - vendor/bindgen/ir/traversal.rs | 478 - vendor/bindgen/ir/ty.rs | 1256 -- vendor/bindgen/ir/var.rs | 523 - vendor/bindgen/lib.rs | 1422 -- vendor/bindgen/log_stubs.rs | 38 - vendor/bindgen/options/as_args.rs | 52 - vendor/bindgen/options/cli.rs | 1151 -- vendor/bindgen/options/helpers.rs | 43 - vendor/bindgen/options/mod.rs | 2286 --- vendor/bindgen/parse.rs | 41 - vendor/bindgen/regex_set.rs | 199 - vendor/bindgen/time.rs | 52 - vendor/bitflags/.cargo-checksum.json | 1 - vendor/bitflags/.cargo_vcs_info.json | 6 - vendor/bitflags/CHANGELOG.md | 636 - vendor/bitflags/CODE_OF_CONDUCT.md | 73 - vendor/bitflags/CONTRIBUTING.md | 9 - vendor/bitflags/Cargo.lock | 325 - vendor/bitflags/Cargo.toml | 120 - vendor/bitflags/LICENSE-APACHE | 201 - vendor/bitflags/LICENSE-MIT | 25 - vendor/bitflags/README.md | 88 - vendor/bitflags/SECURITY.md | 13 - vendor/bitflags/benches/parse.rs | 96 - vendor/bitflags/examples/custom_bits_type.rs | 97 - vendor/bitflags/examples/custom_derive.rs | 23 - vendor/bitflags/examples/fmt.rs | 49 - vendor/bitflags/examples/macro_free.rs | 61 - vendor/bitflags/examples/serde.rs | 39 - vendor/bitflags/spec.md | 556 - vendor/bitflags/src/example_generated.rs | 65 - vendor/bitflags/src/external.rs | 262 - vendor/bitflags/src/external/arbitrary.rs | 33 - vendor/bitflags/src/external/bytemuck.rs | 19 - vendor/bitflags/src/external/serde.rs | 94 - vendor/bitflags/src/internal.rs | 125 - vendor/bitflags/src/iter.rs | 182 - vendor/bitflags/src/lib.rs | 997 -- vendor/bitflags/src/parser.rs | 332 - vendor/bitflags/src/public.rs | 580 - vendor/bitflags/src/tests.rs | 135 - vendor/bitflags/src/tests/all.rs | 23 - vendor/bitflags/src/tests/bitflags_match.rs | 93 - vendor/bitflags/src/tests/bits.rs | 36 - vendor/bitflags/src/tests/clear.rs | 27 - vendor/bitflags/src/tests/complement.rs | 53 - vendor/bitflags/src/tests/contains.rs | 108 - vendor/bitflags/src/tests/difference.rs | 92 - vendor/bitflags/src/tests/empty.rs | 23 - vendor/bitflags/src/tests/eq.rs | 10 - vendor/bitflags/src/tests/extend.rs | 42 - vendor/bitflags/src/tests/flags.rs | 46 - vendor/bitflags/src/tests/fmt.rs | 97 - vendor/bitflags/src/tests/from_bits.rs | 45 - vendor/bitflags/src/tests/from_bits_retain.rs | 38 - .../bitflags/src/tests/from_bits_truncate.rs | 42 - vendor/bitflags/src/tests/from_name.rs | 42 - vendor/bitflags/src/tests/insert.rs | 91 - vendor/bitflags/src/tests/intersection.rs | 79 - vendor/bitflags/src/tests/intersects.rs | 91 - vendor/bitflags/src/tests/is_all.rs | 32 - vendor/bitflags/src/tests/is_empty.rs | 31 - vendor/bitflags/src/tests/iter.rs | 299 - vendor/bitflags/src/tests/parser.rs | 332 - vendor/bitflags/src/tests/remove.rs | 100 - .../src/tests/symmetric_difference.rs | 110 - vendor/bitflags/src/tests/truncate.rs | 29 - vendor/bitflags/src/tests/union.rs | 71 - vendor/bitflags/src/tests/unknown.rs | 40 - vendor/bitflags/src/traits.rs | 457 - vendor/cexpr/.cargo-checksum.json | 1 - vendor/cexpr/.cargo_vcs_info.json | 5 - vendor/cexpr/.github/workflows/ci.yml | 31 - vendor/cexpr/Cargo.toml | 29 - vendor/cexpr/LICENSE-APACHE | 201 - vendor/cexpr/LICENSE-MIT | 25 - vendor/cexpr/bors.toml | 3 - vendor/cexpr/rustfmt.toml | 1 - vendor/cexpr/src/expr.rs | 610 - vendor/cexpr/src/lib.rs | 149 - vendor/cexpr/src/literal.rs | 361 - vendor/cexpr/src/token.rs | 44 - vendor/cexpr/tests/clang.rs | 339 - vendor/cexpr/tests/input/chars.h | 3 - vendor/cexpr/tests/input/fail.h | 9 - vendor/cexpr/tests/input/floats.h | 8 - vendor/cexpr/tests/input/int_signed.h | 3 - vendor/cexpr/tests/input/int_unsigned.h | 29 - vendor/cexpr/tests/input/strings.h | 17 - vendor/cexpr/tests/input/test_llvm_bug_9069.h | 4 - vendor/cfg-if/.cargo-checksum.json | 1 - vendor/cfg-if/.cargo_vcs_info.json | 6 - vendor/cfg-if/.github/dependabot.yml | 14 - vendor/cfg-if/.github/workflows/main.yaml | 48 - vendor/cfg-if/.github/workflows/publish.yaml | 25 - vendor/cfg-if/CHANGELOG.md | 29 - vendor/cfg-if/Cargo.lock | 16 - vendor/cfg-if/Cargo.toml | 47 - vendor/cfg-if/LICENSE-APACHE | 201 - vendor/cfg-if/LICENSE-MIT | 25 - vendor/cfg-if/README.md | 56 - vendor/cfg-if/src/lib.rs | 212 - vendor/cfg-if/tests/xcrate.rs | 16 - vendor/clang-sys/.cargo-checksum.json | 1 - vendor/clang-sys/.cargo_vcs_info.json | 6 - vendor/clang-sys/.github/workflows/ci.yml | 56 - vendor/clang-sys/.github/workflows/ssh.yml | 40 - vendor/clang-sys/CHANGELOG.md | 552 - vendor/clang-sys/Cargo.toml | 77 - vendor/clang-sys/LICENSE.txt | 202 - vendor/clang-sys/README.md | 116 - vendor/clang-sys/build.rs | 77 - vendor/clang-sys/build/common.rs | 355 - vendor/clang-sys/build/dynamic.rs | 276 - vendor/clang-sys/build/macros.rs | 49 - vendor/clang-sys/build/static.rs | 146 - vendor/clang-sys/clippy.toml | 1 - vendor/clang-sys/src/lib.rs | 2433 ---- vendor/clang-sys/src/link.rs | 322 - vendor/clang-sys/src/support.rs | 238 - vendor/clang-sys/tests/build.rs | 356 - vendor/clang-sys/tests/header.h | 6 - vendor/clang-sys/tests/lib.rs | 52 - vendor/either/.cargo-checksum.json | 1 - vendor/either/.cargo_vcs_info.json | 6 - vendor/either/.github/workflows/ci.yml | 83 - vendor/either/Cargo.lock | 96 - vendor/either/Cargo.toml | 70 - vendor/either/LICENSE-APACHE | 201 - vendor/either/LICENSE-MIT | 25 - vendor/either/README-crates.io.md | 10 - vendor/either/README.rst | 204 - vendor/either/src/into_either.rs | 64 - vendor/either/src/iterator.rs | 315 - vendor/either/src/lib.rs | 1561 -- vendor/either/src/serde_untagged.rs | 69 - vendor/either/src/serde_untagged_optional.rs | 74 - vendor/glob/.cargo-checksum.json | 1 - vendor/glob/.cargo_vcs_info.json | 6 - vendor/glob/.github/dependabot.yml | 13 - vendor/glob/.github/workflows/publish.yml | 27 - vendor/glob/.github/workflows/rust.yml | 99 - vendor/glob/CHANGELOG.md | 44 - vendor/glob/Cargo.lock | 107 - vendor/glob/Cargo.toml | 45 - vendor/glob/LICENSE-APACHE | 201 - vendor/glob/LICENSE-MIT | 25 - vendor/glob/README.md | 38 - vendor/glob/src/lib.rs | 1511 -- vendor/glob/tests/glob-std.rs | 477 - vendor/glob/triagebot.toml | 1 - vendor/itertools/.cargo-checksum.json | 1 - vendor/itertools/.cargo_vcs_info.json | 6 - vendor/itertools/.codecov.yml | 7 - vendor/itertools/.github/dependabot.yml | 6 - vendor/itertools/.github/workflows/ci.yml | 85 - .../itertools/.github/workflows/coverage.yml | 34 - vendor/itertools/CHANGELOG.md | 539 - vendor/itertools/CONTRIBUTING.md | 189 - vendor/itertools/Cargo.lock | 740 - vendor/itertools/Cargo.toml | 105 - vendor/itertools/LICENSE-APACHE | 201 - vendor/itertools/LICENSE-MIT | 25 - vendor/itertools/README.md | 33 - vendor/itertools/benches/bench1.rs | 767 - vendor/itertools/benches/combinations.rs | 117 - .../benches/combinations_with_replacement.rs | 40 - .../itertools/benches/fold_specialization.rs | 75 - vendor/itertools/benches/powerset.rs | 97 - vendor/itertools/benches/specializations.rs | 667 - vendor/itertools/benches/tree_reduce.rs | 150 - .../itertools/benches/tuple_combinations.rs | 113 - vendor/itertools/benches/tuples.rs | 208 - vendor/itertools/examples/iris.data | 150 - vendor/itertools/examples/iris.rs | 140 - vendor/itertools/src/adaptors/coalesce.rs | 286 - vendor/itertools/src/adaptors/map.rs | 130 - vendor/itertools/src/adaptors/mod.rs | 1208 -- .../itertools/src/adaptors/multi_product.rs | 231 - vendor/itertools/src/combinations.rs | 243 - .../src/combinations_with_replacement.rs | 192 - vendor/itertools/src/concat_impl.rs | 30 - vendor/itertools/src/cons_tuples_impl.rs | 58 - vendor/itertools/src/diff.rs | 104 - vendor/itertools/src/duplicates_impl.rs | 216 - vendor/itertools/src/either_or_both.rs | 514 - vendor/itertools/src/exactly_one_err.rs | 125 - vendor/itertools/src/extrema_set.rs | 50 - vendor/itertools/src/flatten_ok.rs | 205 - vendor/itertools/src/format.rs | 178 - vendor/itertools/src/free.rs | 317 - vendor/itertools/src/group_map.rs | 32 - vendor/itertools/src/groupbylazy.rs | 613 - vendor/itertools/src/grouping_map.rs | 614 - vendor/itertools/src/impl_macros.rs | 34 - vendor/itertools/src/intersperse.rs | 142 - vendor/itertools/src/iter_index.rs | 116 - vendor/itertools/src/k_smallest.rs | 98 - vendor/itertools/src/kmerge_impl.rs | 240 - vendor/itertools/src/lazy_buffer.rs | 75 - vendor/itertools/src/lib.rs | 4365 ------ vendor/itertools/src/merge_join.rs | 347 - vendor/itertools/src/minmax.rs | 116 - vendor/itertools/src/multipeek_impl.rs | 116 - vendor/itertools/src/pad_tail.rs | 124 - vendor/itertools/src/peek_nth.rs | 178 - vendor/itertools/src/peeking_take_while.rs | 201 - vendor/itertools/src/permutations.rs | 186 - vendor/itertools/src/powerset.rs | 131 - vendor/itertools/src/process_results_impl.rs | 108 - vendor/itertools/src/put_back_n_impl.rs | 71 - vendor/itertools/src/rciter_impl.rs | 102 - vendor/itertools/src/repeatn.rs | 83 - vendor/itertools/src/size_hint.rs | 94 - vendor/itertools/src/sources.rs | 153 - vendor/itertools/src/take_while_inclusive.rs | 96 - vendor/itertools/src/tee.rs | 93 - vendor/itertools/src/tuple_impl.rs | 401 - vendor/itertools/src/unique_impl.rs | 188 - vendor/itertools/src/unziptuple.rs | 80 - vendor/itertools/src/with_position.rs | 124 - vendor/itertools/src/zip_eq_impl.rs | 64 - vendor/itertools/src/zip_longest.rs | 139 - vendor/itertools/src/ziptuple.rs | 137 - vendor/itertools/tests/adaptors_no_collect.rs | 51 - vendor/itertools/tests/flatten_ok.rs | 76 - vendor/itertools/tests/laziness.rs | 283 - vendor/itertools/tests/macros_hygiene.rs | 14 - vendor/itertools/tests/merge_join.rs | 101 - vendor/itertools/tests/peeking_take_while.rs | 69 - vendor/itertools/tests/quick.rs | 1967 --- vendor/itertools/tests/specializations.rs | 582 - vendor/itertools/tests/test_core.rs | 374 - vendor/itertools/tests/test_std.rs | 1523 -- vendor/itertools/tests/tuples.rs | 86 - vendor/itertools/tests/zip.rs | 56 - vendor/libc/.cargo-checksum.json | 1 - vendor/libc/.cargo_vcs_info.json | 6 - vendor/libc/.editorconfig | 7 - vendor/libc/.git-blame-ignore-revs | 6 - vendor/libc/.release-plz.toml | 49 - vendor/libc/CHANGELOG.md | 747 - vendor/libc/CONTRIBUTING.md | 126 - vendor/libc/Cargo.lock | 16 - vendor/libc/Cargo.toml | 201 - vendor/libc/LICENSE-APACHE | 176 - vendor/libc/LICENSE-MIT | 25 - vendor/libc/README.md | 117 - vendor/libc/build.rs | 298 - vendor/libc/cherry-pick-stable.sh | 150 - vendor/libc/rustfmt.toml | 4 - vendor/libc/src/fuchsia/aarch64.rs | 69 - vendor/libc/src/fuchsia/mod.rs | 4322 ------ vendor/libc/src/fuchsia/riscv64.rs | 46 - vendor/libc/src/fuchsia/x86_64.rs | 142 - vendor/libc/src/hermit.rs | 561 - vendor/libc/src/lib.rs | 159 - vendor/libc/src/macros.rs | 446 - vendor/libc/src/new/bionic/mod.rs | 2 - vendor/libc/src/new/bionic/sys/mod.rs | 2 - vendor/libc/src/new/bionic/sys/socket.rs | 51 - vendor/libc/src/new/linux_uapi/linux/can.rs | 136 - .../src/new/linux_uapi/linux/can/j1939.rs | 60 - .../libc/src/new/linux_uapi/linux/can/raw.rs | 15 - vendor/libc/src/new/linux_uapi/linux/mod.rs | 4 - vendor/libc/src/new/linux_uapi/mod.rs | 4 - vendor/libc/src/new/mod.rs | 15 - vendor/libc/src/primitives.rs | 95 - vendor/libc/src/psp.rs | 4131 ------ vendor/libc/src/sgx.rs | 15 - vendor/libc/src/solid/aarch64.rs | 1 - vendor/libc/src/solid/arm.rs | 1 - vendor/libc/src/solid/mod.rs | 876 -- vendor/libc/src/switch.rs | 16 - vendor/libc/src/teeos/mod.rs | 1355 -- vendor/libc/src/trusty.rs | 72 - vendor/libc/src/types.rs | 39 - vendor/libc/src/unix/aix/mod.rs | 3382 ----- vendor/libc/src/unix/aix/powerpc64.rs | 477 - vendor/libc/src/unix/bsd/apple/b32/mod.rs | 135 - .../src/unix/bsd/apple/b64/aarch64/mod.rs | 53 - vendor/libc/src/unix/bsd/apple/b64/mod.rs | 141 - .../libc/src/unix/bsd/apple/b64/x86_64/mod.rs | 179 - vendor/libc/src/unix/bsd/apple/mod.rs | 6245 -------- .../unix/bsd/freebsdlike/dragonfly/errno.rs | 17 - .../src/unix/bsd/freebsdlike/dragonfly/mod.rs | 1635 --- .../unix/bsd/freebsdlike/freebsd/aarch64.rs | 110 - .../src/unix/bsd/freebsdlike/freebsd/arm.rs | 53 - .../bsd/freebsdlike/freebsd/freebsd11/b32.rs | 37 - .../bsd/freebsdlike/freebsd/freebsd11/b64.rs | 36 - .../bsd/freebsdlike/freebsd/freebsd11/mod.rs | 449 - .../bsd/freebsdlike/freebsd/freebsd12/mod.rs | 487 - .../freebsdlike/freebsd/freebsd12/x86_64.rs | 7 - .../bsd/freebsdlike/freebsd/freebsd13/mod.rs | 531 - .../freebsdlike/freebsd/freebsd13/x86_64.rs | 7 - .../bsd/freebsdlike/freebsd/freebsd14/mod.rs | 532 - .../freebsdlike/freebsd/freebsd14/x86_64.rs | 14 - .../bsd/freebsdlike/freebsd/freebsd15/mod.rs | 534 - .../freebsdlike/freebsd/freebsd15/x86_64.rs | 14 - .../src/unix/bsd/freebsdlike/freebsd/mod.rs | 5659 -------- .../unix/bsd/freebsdlike/freebsd/powerpc.rs | 62 - .../unix/bsd/freebsdlike/freebsd/powerpc64.rs | 63 - .../unix/bsd/freebsdlike/freebsd/riscv64.rs | 116 - .../src/unix/bsd/freebsdlike/freebsd/x86.rs | 134 - .../bsd/freebsdlike/freebsd/x86_64/mod.rs | 346 - vendor/libc/src/unix/bsd/freebsdlike/mod.rs | 2009 --- vendor/libc/src/unix/bsd/mod.rs | 969 -- vendor/libc/src/unix/bsd/netbsdlike/mod.rs | 905 -- .../src/unix/bsd/netbsdlike/netbsd/aarch64.rs | 132 - .../src/unix/bsd/netbsdlike/netbsd/arm.rs | 70 - .../src/unix/bsd/netbsdlike/netbsd/mips.rs | 11 - .../src/unix/bsd/netbsdlike/netbsd/mod.rs | 3007 ---- .../src/unix/bsd/netbsdlike/netbsd/powerpc.rs | 10 - .../src/unix/bsd/netbsdlike/netbsd/riscv64.rs | 77 - .../src/unix/bsd/netbsdlike/netbsd/sparc64.rs | 7 - .../src/unix/bsd/netbsdlike/netbsd/x86.rs | 5 - .../src/unix/bsd/netbsdlike/netbsd/x86_64.rs | 56 - .../unix/bsd/netbsdlike/openbsd/aarch64.rs | 20 - .../src/unix/bsd/netbsdlike/openbsd/arm.rs | 5 - .../src/unix/bsd/netbsdlike/openbsd/mips64.rs | 4 - .../src/unix/bsd/netbsdlike/openbsd/mod.rs | 2149 --- .../unix/bsd/netbsdlike/openbsd/powerpc.rs | 5 - .../unix/bsd/netbsdlike/openbsd/powerpc64.rs | 5 - .../unix/bsd/netbsdlike/openbsd/riscv64.rs | 25 - .../unix/bsd/netbsdlike/openbsd/sparc64.rs | 4 - .../src/unix/bsd/netbsdlike/openbsd/x86.rs | 5 - .../src/unix/bsd/netbsdlike/openbsd/x86_64.rs | 109 - vendor/libc/src/unix/cygwin/mod.rs | 2477 ---- vendor/libc/src/unix/haiku/b32.rs | 18 - vendor/libc/src/unix/haiku/b64.rs | 18 - vendor/libc/src/unix/haiku/bsd.rs | 151 - vendor/libc/src/unix/haiku/mod.rs | 2097 --- vendor/libc/src/unix/haiku/native.rs | 1388 -- vendor/libc/src/unix/haiku/x86_64.rs | 208 - vendor/libc/src/unix/hurd/b32.rs | 92 - vendor/libc/src/unix/hurd/b64.rs | 94 - vendor/libc/src/unix/hurd/mod.rs | 4623 ------ .../src/unix/linux_like/android/b32/arm.rs | 532 - .../src/unix/linux_like/android/b32/mod.rs | 239 - .../unix/linux_like/android/b32/x86/mod.rs | 604 - .../linux_like/android/b64/aarch64/mod.rs | 473 - .../src/unix/linux_like/android/b64/mod.rs | 292 - .../linux_like/android/b64/riscv64/mod.rs | 384 - .../unix/linux_like/android/b64/x86_64/mod.rs | 748 - .../libc/src/unix/linux_like/android/mod.rs | 4157 ------ .../src/unix/linux_like/emscripten/lfs64.rs | 211 - .../src/unix/linux_like/emscripten/mod.rs | 1589 -- .../unix/linux_like/linux/arch/generic/mod.rs | 334 - .../unix/linux_like/linux/arch/mips/mod.rs | 333 - .../src/unix/linux_like/linux/arch/mod.rs | 20 - .../unix/linux_like/linux/arch/powerpc/mod.rs | 280 - .../unix/linux_like/linux/arch/sparc/mod.rs | 247 - .../unix/linux_like/linux/gnu/b32/arm/mod.rs | 928 -- .../unix/linux_like/linux/gnu/b32/csky/mod.rs | 745 - .../unix/linux_like/linux/gnu/b32/m68k/mod.rs | 863 -- .../unix/linux_like/linux/gnu/b32/mips/mod.rs | 925 -- .../src/unix/linux_like/linux/gnu/b32/mod.rs | 491 - .../unix/linux_like/linux/gnu/b32/powerpc.rs | 892 -- .../linux_like/linux/gnu/b32/riscv32/mod.rs | 808 -- .../linux_like/linux/gnu/b32/sparc/mod.rs | 865 -- .../unix/linux_like/linux/gnu/b32/x86/mod.rs | 1098 -- .../linux_like/linux/gnu/b64/aarch64/ilp32.rs | 54 - .../linux_like/linux/gnu/b64/aarch64/lp64.rs | 57 - .../linux_like/linux/gnu/b64/aarch64/mod.rs | 973 -- .../linux/gnu/b64/loongarch64/mod.rs | 922 -- .../linux_like/linux/gnu/b64/mips64/mod.rs | 930 -- .../src/unix/linux_like/linux/gnu/b64/mod.rs | 213 - .../linux_like/linux/gnu/b64/powerpc64/mod.rs | 974 -- .../linux_like/linux/gnu/b64/riscv64/mod.rs | 910 -- .../unix/linux_like/linux/gnu/b64/s390x.rs | 955 -- .../linux_like/linux/gnu/b64/sparc64/mod.rs | 930 -- .../linux_like/linux/gnu/b64/x86_64/mod.rs | 809 -- .../linux/gnu/b64/x86_64/not_x32.rs | 446 - .../linux_like/linux/gnu/b64/x86_64/x32.rs | 398 - .../libc/src/unix/linux_like/linux/gnu/mod.rs | 1382 -- vendor/libc/src/unix/linux_like/linux/mod.rs | 6830 --------- .../unix/linux_like/linux/musl/b32/arm/mod.rs | 792 - .../unix/linux_like/linux/musl/b32/hexagon.rs | 621 - .../linux_like/linux/musl/b32/mips/mod.rs | 775 - .../src/unix/linux_like/linux/musl/b32/mod.rs | 65 - .../unix/linux_like/linux/musl/b32/powerpc.rs | 766 - .../linux_like/linux/musl/b32/riscv32/mod.rs | 655 - .../unix/linux_like/linux/musl/b32/x86/mod.rs | 889 -- .../linux_like/linux/musl/b64/aarch64/mod.rs | 712 - .../linux/musl/b64/loongarch64/mod.rs | 667 - .../unix/linux_like/linux/musl/b64/mips64.rs | 708 - .../src/unix/linux_like/linux/musl/b64/mod.rs | 116 - .../linux_like/linux/musl/b64/powerpc64.rs | 752 - .../linux_like/linux/musl/b64/riscv64/mod.rs | 672 - .../unix/linux_like/linux/musl/b64/s390x.rs | 732 - .../linux_like/linux/musl/b64/wasm32/mod.rs | 688 - .../linux_like/linux/musl/b64/wasm32/wali.rs | 441 - .../linux_like/linux/musl/b64/x86_64/mod.rs | 915 -- .../src/unix/linux_like/linux/musl/lfs64.rs | 239 - .../src/unix/linux_like/linux/musl/mod.rs | 1006 -- .../unix/linux_like/linux/uclibc/arm/mod.rs | 925 -- .../linux/uclibc/mips/mips32/mod.rs | 695 - .../linux/uclibc/mips/mips64/mod.rs | 204 - .../unix/linux_like/linux/uclibc/mips/mod.rs | 312 - .../src/unix/linux_like/linux/uclibc/mod.rs | 517 - .../linux_like/linux/uclibc/x86_64/l4re.rs | 53 - .../linux_like/linux/uclibc/x86_64/mod.rs | 355 - .../linux_like/linux/uclibc/x86_64/other.rs | 7 - vendor/libc/src/unix/linux_like/mod.rs | 2214 --- vendor/libc/src/unix/mod.rs | 1901 --- vendor/libc/src/unix/newlib/aarch64/mod.rs | 52 - vendor/libc/src/unix/newlib/arm/mod.rs | 54 - vendor/libc/src/unix/newlib/espidf/mod.rs | 120 - vendor/libc/src/unix/newlib/generic.rs | 39 - vendor/libc/src/unix/newlib/horizon/mod.rs | 278 - vendor/libc/src/unix/newlib/mod.rs | 997 -- vendor/libc/src/unix/newlib/powerpc/mod.rs | 14 - vendor/libc/src/unix/newlib/rtems/mod.rs | 146 - vendor/libc/src/unix/newlib/vita/mod.rs | 235 - vendor/libc/src/unix/nto/aarch64.rs | 35 - vendor/libc/src/unix/nto/mod.rs | 3406 ----- vendor/libc/src/unix/nto/neutrino.rs | 1270 -- vendor/libc/src/unix/nto/x86_64.rs | 111 - vendor/libc/src/unix/nuttx/mod.rs | 597 - vendor/libc/src/unix/redox/mod.rs | 1496 -- vendor/libc/src/unix/solarish/compat.rs | 218 - vendor/libc/src/unix/solarish/illumos.rs | 343 - vendor/libc/src/unix/solarish/mod.rs | 3240 ----- vendor/libc/src/unix/solarish/solaris.rs | 239 - vendor/libc/src/unix/solarish/x86.rs | 31 - vendor/libc/src/unix/solarish/x86_64.rs | 170 - vendor/libc/src/unix/solarish/x86_common.rs | 69 - vendor/libc/src/vxworks/aarch64.rs | 1 - vendor/libc/src/vxworks/arm.rs | 1 - vendor/libc/src/vxworks/mod.rs | 2018 --- vendor/libc/src/vxworks/powerpc.rs | 1 - vendor/libc/src/vxworks/powerpc64.rs | 1 - vendor/libc/src/vxworks/riscv32.rs | 1 - vendor/libc/src/vxworks/riscv64.rs | 1 - vendor/libc/src/vxworks/x86.rs | 1 - vendor/libc/src/vxworks/x86_64.rs | 1 - vendor/libc/src/wasi/mod.rs | 853 -- vendor/libc/src/wasi/p2.rs | 188 - vendor/libc/src/windows/gnu/mod.rs | 36 - vendor/libc/src/windows/mod.rs | 611 - vendor/libc/src/windows/msvc/mod.rs | 17 - vendor/libc/src/xous.rs | 18 - vendor/libc/tests/const_fn.rs | 3 - vendor/libloading/.cargo-checksum.json | 1 - vendor/libloading/.cargo_vcs_info.json | 6 - .../.github/workflows/libloading.yml | 126 - vendor/libloading/Cargo.lock | 47 - vendor/libloading/Cargo.toml | 90 - vendor/libloading/LICENSE | 12 - vendor/libloading/README.mkd | 16 - vendor/libloading/src/changelog.rs | 405 - vendor/libloading/src/error.rs | 146 - vendor/libloading/src/lib.rs | 81 - vendor/libloading/src/os/mod.rs | 27 - vendor/libloading/src/os/unix/consts.rs | 265 - vendor/libloading/src/os/unix/mod.rs | 485 - vendor/libloading/src/os/windows/mod.rs | 590 - vendor/libloading/src/safe.rs | 318 - vendor/libloading/src/test_helpers.rs | 37 - vendor/libloading/src/util.rs | 34 - vendor/libloading/tests/constants.rs | 13 - vendor/libloading/tests/functions.rs | 312 - vendor/libloading/tests/library_filename.rs | 17 - vendor/libloading/tests/markers.rs | 96 - vendor/libloading/tests/windows.rs | 71 - vendor/log/.cargo-checksum.json | 1 - vendor/log/.cargo_vcs_info.json | 6 - vendor/log/.github/workflows/main.yml | 134 - vendor/log/CHANGELOG.md | 410 - vendor/log/Cargo.lock | 270 - vendor/log/Cargo.toml | 151 - vendor/log/LICENSE-APACHE | 201 - vendor/log/LICENSE-MIT | 25 - vendor/log/README.md | 134 - vendor/log/benches/value.rs | 27 - vendor/log/src/__private_api.rs | 151 - vendor/log/src/kv/error.rs | 94 - vendor/log/src/kv/key.rs | 163 - vendor/log/src/kv/mod.rs | 265 - vendor/log/src/kv/source.rs | 514 - vendor/log/src/kv/value.rs | 1395 -- vendor/log/src/lib.rs | 2005 --- vendor/log/src/macros.rs | 579 - vendor/log/src/serde.rs | 397 - vendor/log/tests/integration.rs | 101 - vendor/log/tests/macros.rs | 429 - vendor/log/triagebot.toml | 1 - vendor/memchr/.cargo-checksum.json | 1 - vendor/memchr/.cargo_vcs_info.json | 6 - vendor/memchr/.ignore | 1 - vendor/memchr/.vim/coc-settings.json | 16 - vendor/memchr/COPYING | 3 - vendor/memchr/Cargo.lock | 80 - vendor/memchr/Cargo.toml | 89 - vendor/memchr/LICENSE-MIT | 21 - vendor/memchr/README.md | 196 - vendor/memchr/UNLICENSE | 24 - vendor/memchr/rustfmt.toml | 2 - vendor/memchr/src/arch/aarch64/memchr.rs | 137 - vendor/memchr/src/arch/aarch64/mod.rs | 7 - vendor/memchr/src/arch/aarch64/neon/memchr.rs | 1031 -- vendor/memchr/src/arch/aarch64/neon/mod.rs | 6 - .../src/arch/aarch64/neon/packedpair.rs | 236 - vendor/memchr/src/arch/all/memchr.rs | 1022 -- vendor/memchr/src/arch/all/mod.rs | 234 - .../src/arch/all/packedpair/default_rank.rs | 258 - vendor/memchr/src/arch/all/packedpair/mod.rs | 359 - vendor/memchr/src/arch/all/rabinkarp.rs | 390 - vendor/memchr/src/arch/all/shiftor.rs | 89 - vendor/memchr/src/arch/all/twoway.rs | 877 -- vendor/memchr/src/arch/generic/memchr.rs | 1214 -- vendor/memchr/src/arch/generic/mod.rs | 14 - vendor/memchr/src/arch/generic/packedpair.rs | 317 - vendor/memchr/src/arch/mod.rs | 16 - vendor/memchr/src/arch/wasm32/memchr.rs | 124 - vendor/memchr/src/arch/wasm32/mod.rs | 7 - .../memchr/src/arch/wasm32/simd128/memchr.rs | 1020 -- vendor/memchr/src/arch/wasm32/simd128/mod.rs | 6 - .../src/arch/wasm32/simd128/packedpair.rs | 228 - vendor/memchr/src/arch/x86_64/avx2/memchr.rs | 1352 -- vendor/memchr/src/arch/x86_64/avx2/mod.rs | 6 - .../memchr/src/arch/x86_64/avx2/packedpair.rs | 272 - vendor/memchr/src/arch/x86_64/memchr.rs | 335 - vendor/memchr/src/arch/x86_64/mod.rs | 8 - vendor/memchr/src/arch/x86_64/sse2/memchr.rs | 1077 -- vendor/memchr/src/arch/x86_64/sse2/mod.rs | 6 - .../memchr/src/arch/x86_64/sse2/packedpair.rs | 232 - vendor/memchr/src/cow.rs | 107 - vendor/memchr/src/ext.rs | 54 - vendor/memchr/src/lib.rs | 221 - vendor/memchr/src/macros.rs | 20 - vendor/memchr/src/memchr.rs | 903 -- vendor/memchr/src/memmem/mod.rs | 737 - vendor/memchr/src/memmem/searcher.rs | 1030 -- vendor/memchr/src/tests/memchr/mod.rs | 307 - vendor/memchr/src/tests/memchr/naive.rs | 33 - vendor/memchr/src/tests/memchr/prop.rs | 323 - vendor/memchr/src/tests/mod.rs | 15 - vendor/memchr/src/tests/packedpair.rs | 216 - vendor/memchr/src/tests/substring/mod.rs | 232 - vendor/memchr/src/tests/substring/naive.rs | 45 - vendor/memchr/src/tests/substring/prop.rs | 126 - vendor/memchr/src/vector.rs | 501 - vendor/minimal-lexical/.cargo-checksum.json | 1 - vendor/minimal-lexical/.cargo_vcs_info.json | 5 - .../.github/ISSUE_TEMPLATE/bug_report.md | 41 - .../.github/ISSUE_TEMPLATE/custom.md | 21 - .../.github/ISSUE_TEMPLATE/documentation.md | 16 - .../.github/ISSUE_TEMPLATE/feature_request.md | 27 - .../.github/ISSUE_TEMPLATE/question.md | 11 - .../.github/PULL_REQUEST_TEMPLATE/bug_fix.md | 27 - .../.github/PULL_REQUEST_TEMPLATE/custom.md | 22 - .../PULL_REQUEST_TEMPLATE/documentation.md | 21 - .../.github/workflows/Cross.yml | 90 - .../.github/workflows/Features.yml | 23 - .../minimal-lexical/.github/workflows/OSX.yml | 40 - .../.github/workflows/Simple.yml | 41 - .../.github/workflows/Valgrind.yml | 24 - vendor/minimal-lexical/.gitmodules | 4 - vendor/minimal-lexical/CHANGELOG | 38 - vendor/minimal-lexical/CODE_OF_CONDUCT.md | 141 - vendor/minimal-lexical/Cargo.toml | 33 - vendor/minimal-lexical/LICENSE-APACHE | 201 - vendor/minimal-lexical/LICENSE-MIT | 23 - vendor/minimal-lexical/LICENSE.md | 37 - vendor/minimal-lexical/README.md | 102 - vendor/minimal-lexical/clippy.toml | 1 - vendor/minimal-lexical/rustfmt.toml | 16 - vendor/minimal-lexical/src/bellerophon.rs | 391 - vendor/minimal-lexical/src/bigint.rs | 788 - vendor/minimal-lexical/src/extended_float.rs | 24 - vendor/minimal-lexical/src/fpu.rs | 98 - vendor/minimal-lexical/src/heapvec.rs | 190 - vendor/minimal-lexical/src/lemire.rs | 225 - vendor/minimal-lexical/src/lib.rs | 68 - vendor/minimal-lexical/src/libm.rs | 1238 -- vendor/minimal-lexical/src/mask.rs | 60 - vendor/minimal-lexical/src/num.rs | 308 - vendor/minimal-lexical/src/number.rs | 83 - vendor/minimal-lexical/src/parse.rs | 201 - vendor/minimal-lexical/src/rounding.rs | 131 - vendor/minimal-lexical/src/slow.rs | 403 - vendor/minimal-lexical/src/stackvec.rs | 308 - vendor/minimal-lexical/src/table.rs | 11 - .../minimal-lexical/src/table_bellerophon.rs | 119 - vendor/minimal-lexical/src/table_lemire.rs | 676 - vendor/minimal-lexical/src/table_small.rs | 90 - vendor/minimal-lexical/tests/bellerophon.rs | 59 - .../tests/bellerophon_tests.rs | 231 - .../tests/integration_tests.rs | 228 - vendor/minimal-lexical/tests/lemire_tests.rs | 378 - vendor/minimal-lexical/tests/libm_tests.rs | 289 - vendor/minimal-lexical/tests/mask_tests.rs | 16 - vendor/minimal-lexical/tests/number_tests.rs | 88 - vendor/minimal-lexical/tests/parse_tests.rs | 189 - .../minimal-lexical/tests/rounding_tests.rs | 64 - vendor/minimal-lexical/tests/slow_tests.rs | 337 - vendor/minimal-lexical/tests/stackvec.rs | 32 - vendor/minimal-lexical/tests/vec_tests.rs | 395 - vendor/nom/.cargo-checksum.json | 1 - vendor/nom/.cargo_vcs_info.json | 6 - vendor/nom/CHANGELOG.md | 1551 -- vendor/nom/Cargo.lock | 282 - vendor/nom/Cargo.toml | 168 - vendor/nom/LICENSE | 20 - vendor/nom/README.md | 331 - vendor/nom/doc/nom_recipes.md | 395 - vendor/nom/src/bits/complete.rs | 197 - vendor/nom/src/bits/mod.rs | 179 - vendor/nom/src/bits/streaming.rs | 170 - vendor/nom/src/branch/mod.rs | 267 - vendor/nom/src/branch/tests.rs | 142 - vendor/nom/src/bytes/complete.rs | 756 - vendor/nom/src/bytes/mod.rs | 6 - vendor/nom/src/bytes/streaming.rs | 700 - vendor/nom/src/bytes/tests.rs | 636 - vendor/nom/src/character/complete.rs | 1227 -- vendor/nom/src/character/mod.rs | 116 - vendor/nom/src/character/streaming.rs | 1182 -- vendor/nom/src/character/tests.rs | 62 - vendor/nom/src/combinator/mod.rs | 809 -- vendor/nom/src/combinator/tests.rs | 275 - vendor/nom/src/error.rs | 831 -- vendor/nom/src/internal.rs | 489 - vendor/nom/src/lib.rs | 464 - vendor/nom/src/macros.rs | 23 - vendor/nom/src/multi/mod.rs | 1049 -- vendor/nom/src/multi/tests.rs | 534 - vendor/nom/src/number/complete.rs | 2126 --- vendor/nom/src/number/mod.rs | 15 - vendor/nom/src/number/streaming.rs | 2206 --- vendor/nom/src/sequence/mod.rs | 279 - vendor/nom/src/sequence/tests.rs | 290 - vendor/nom/src/str.rs | 536 - vendor/nom/src/traits.rs | 1441 -- vendor/nom/tests/arithmetic.rs | 94 - vendor/nom/tests/arithmetic_ast.rs | 161 - vendor/nom/tests/css.rs | 45 - vendor/nom/tests/custom_errors.rs | 48 - vendor/nom/tests/escaped.rs | 28 - vendor/nom/tests/float.rs | 46 - vendor/nom/tests/fnmut.rs | 39 - vendor/nom/tests/ini.rs | 207 - vendor/nom/tests/ini_str.rs | 217 - vendor/nom/tests/issues.rs | 242 - vendor/nom/tests/json.rs | 236 - vendor/nom/tests/mp4.rs | 320 - vendor/nom/tests/multiline.rs | 31 - vendor/nom/tests/overflow.rs | 145 - vendor/nom/tests/reborrow_fold.rs | 31 - vendor/prettyplease/.cargo-checksum.json | 1 - vendor/prettyplease/.cargo_vcs_info.json | 6 - vendor/prettyplease/.github/FUNDING.yml | 1 - vendor/prettyplease/.github/workflows/ci.yml | 123 - vendor/prettyplease/Cargo.lock | 54 - vendor/prettyplease/Cargo.toml | 90 - vendor/prettyplease/LICENSE-APACHE | 176 - vendor/prettyplease/LICENSE-MIT | 23 - vendor/prettyplease/README.md | 312 - vendor/prettyplease/build.rs | 21 - vendor/prettyplease/examples/.tokeignore | 1 - vendor/prettyplease/examples/input.rs | 1 - .../examples/output.prettyplease.rs | 593 - vendor/prettyplease/examples/output.rustc.rs | 506 - .../prettyplease/examples/output.rustfmt.rs | 552 - vendor/prettyplease/src/algorithm.rs | 386 - vendor/prettyplease/src/attr.rs | 288 - vendor/prettyplease/src/classify.rs | 324 - vendor/prettyplease/src/convenience.rs | 98 - vendor/prettyplease/src/data.rs | 79 - vendor/prettyplease/src/expr.rs | 1533 -- vendor/prettyplease/src/file.rs | 17 - vendor/prettyplease/src/fixup.rs | 676 - vendor/prettyplease/src/generics.rs | 426 - vendor/prettyplease/src/item.rs | 1813 --- vendor/prettyplease/src/iter.rs | 46 - vendor/prettyplease/src/lib.rs | 385 - vendor/prettyplease/src/lifetime.rs | 9 - vendor/prettyplease/src/lit.rs | 57 - vendor/prettyplease/src/mac.rs | 706 - vendor/prettyplease/src/pat.rs | 254 - vendor/prettyplease/src/path.rs | 194 - vendor/prettyplease/src/precedence.rs | 148 - vendor/prettyplease/src/ring.rs | 81 - vendor/prettyplease/src/stmt.rs | 221 - vendor/prettyplease/src/token.rs | 80 - vendor/prettyplease/src/ty.rs | 326 - vendor/prettyplease/tests/test.rs | 51 - vendor/prettyplease/tests/test_precedence.rs | 900 -- vendor/proc-macro2/.cargo-checksum.json | 1 - vendor/proc-macro2/.cargo_vcs_info.json | 6 - vendor/proc-macro2/.github/FUNDING.yml | 1 - vendor/proc-macro2/.github/workflows/ci.yml | 232 - vendor/proc-macro2/Cargo.lock | 326 - vendor/proc-macro2/Cargo.toml | 105 - vendor/proc-macro2/LICENSE-APACHE | 176 - vendor/proc-macro2/LICENSE-MIT | 23 - vendor/proc-macro2/README.md | 94 - vendor/proc-macro2/build.rs | 267 - vendor/proc-macro2/rust-toolchain.toml | 2 - vendor/proc-macro2/src/detection.rs | 75 - vendor/proc-macro2/src/extra.rs | 151 - vendor/proc-macro2/src/fallback.rs | 1256 -- vendor/proc-macro2/src/lib.rs | 1495 -- vendor/proc-macro2/src/location.rs | 29 - vendor/proc-macro2/src/marker.rs | 17 - vendor/proc-macro2/src/num.rs | 17 - vendor/proc-macro2/src/parse.rs | 995 -- vendor/proc-macro2/src/probe.rs | 10 - .../proc-macro2/src/probe/proc_macro_span.rs | 51 - .../src/probe/proc_macro_span_file.rs | 14 - .../src/probe/proc_macro_span_location.rs | 21 - vendor/proc-macro2/src/rcvec.rs | 146 - .../proc-macro2/src/rustc_literal_escaper.rs | 701 - vendor/proc-macro2/src/wrapper.rs | 984 -- vendor/proc-macro2/tests/comments.rs | 105 - vendor/proc-macro2/tests/features.rs | 10 - vendor/proc-macro2/tests/marker.rs | 97 - vendor/proc-macro2/tests/test.rs | 1094 -- vendor/proc-macro2/tests/test_fmt.rs | 28 - vendor/proc-macro2/tests/test_size.rs | 81 - vendor/quote/.cargo-checksum.json | 1 - vendor/quote/.cargo_vcs_info.json | 6 - vendor/quote/.github/FUNDING.yml | 1 - vendor/quote/.github/workflows/ci.yml | 112 - vendor/quote/Cargo.lock | 256 - vendor/quote/Cargo.toml | 70 - vendor/quote/LICENSE-APACHE | 176 - vendor/quote/LICENSE-MIT | 23 - vendor/quote/README.md | 271 - vendor/quote/build.rs | 32 - vendor/quote/rust-toolchain.toml | 2 - vendor/quote/src/ext.rs | 136 - vendor/quote/src/format.rs | 168 - vendor/quote/src/ident_fragment.rs | 88 - vendor/quote/src/lib.rs | 1455 -- vendor/quote/src/runtime.rs | 503 - vendor/quote/src/spanned.rs | 49 - vendor/quote/src/to_tokens.rs | 271 - vendor/quote/tests/compiletest.rs | 7 - vendor/quote/tests/test.rs | 568 - .../ui/does-not-have-iter-interpolated-dup.rs | 9 - ...does-not-have-iter-interpolated-dup.stderr | 13 - .../ui/does-not-have-iter-interpolated.rs | 9 - .../ui/does-not-have-iter-interpolated.stderr | 13 - .../tests/ui/does-not-have-iter-separated.rs | 5 - .../ui/does-not-have-iter-separated.stderr | 13 - vendor/quote/tests/ui/does-not-have-iter.rs | 5 - .../quote/tests/ui/does-not-have-iter.stderr | 13 - vendor/quote/tests/ui/not-quotable.rs | 7 - vendor/quote/tests/ui/not-quotable.stderr | 20 - vendor/quote/tests/ui/not-repeatable.rs | 8 - vendor/quote/tests/ui/not-repeatable.stderr | 42 - vendor/quote/tests/ui/wrong-type-span.rs | 7 - vendor/quote/tests/ui/wrong-type-span.stderr | 10 - vendor/regex-automata/.cargo-checksum.json | 1 - vendor/regex-automata/.cargo_vcs_info.json | 6 - vendor/regex-automata/Cargo.lock | 372 - vendor/regex-automata/Cargo.toml | 200 - vendor/regex-automata/LICENSE-APACHE | 201 - vendor/regex-automata/LICENSE-MIT | 25 - vendor/regex-automata/README.md | 117 - vendor/regex-automata/src/dfa/accel.rs | 517 - vendor/regex-automata/src/dfa/automaton.rs | 2260 --- vendor/regex-automata/src/dfa/dense.rs | 5260 ------- vendor/regex-automata/src/dfa/determinize.rs | 599 - vendor/regex-automata/src/dfa/minimize.rs | 463 - vendor/regex-automata/src/dfa/mod.rs | 360 - vendor/regex-automata/src/dfa/onepass.rs | 3192 ---- vendor/regex-automata/src/dfa/regex.rs | 870 -- vendor/regex-automata/src/dfa/remapper.rs | 242 - vendor/regex-automata/src/dfa/search.rs | 644 - vendor/regex-automata/src/dfa/sparse.rs | 2655 ---- vendor/regex-automata/src/dfa/special.rs | 494 - vendor/regex-automata/src/dfa/start.rs | 74 - vendor/regex-automata/src/hybrid/dfa.rs | 4434 ------ vendor/regex-automata/src/hybrid/error.rs | 241 - vendor/regex-automata/src/hybrid/id.rs | 354 - vendor/regex-automata/src/hybrid/mod.rs | 144 - vendor/regex-automata/src/hybrid/regex.rs | 895 -- vendor/regex-automata/src/hybrid/search.rs | 802 - vendor/regex-automata/src/lib.rs | 651 - vendor/regex-automata/src/macros.rs | 20 - vendor/regex-automata/src/meta/error.rs | 241 - vendor/regex-automata/src/meta/limited.rs | 251 - vendor/regex-automata/src/meta/literal.rs | 81 - vendor/regex-automata/src/meta/mod.rs | 62 - vendor/regex-automata/src/meta/regex.rs | 3706 ----- .../regex-automata/src/meta/reverse_inner.rs | 220 - vendor/regex-automata/src/meta/stopat.rs | 212 - vendor/regex-automata/src/meta/strategy.rs | 1905 --- vendor/regex-automata/src/meta/wrappers.rs | 1336 -- vendor/regex-automata/src/nfa/mod.rs | 55 - .../src/nfa/thompson/backtrack.rs | 1908 --- .../src/nfa/thompson/builder.rs | 1337 -- .../src/nfa/thompson/compiler.rs | 2368 --- .../regex-automata/src/nfa/thompson/error.rs | 182 - .../src/nfa/thompson/literal_trie.rs | 528 - vendor/regex-automata/src/nfa/thompson/map.rs | 296 - vendor/regex-automata/src/nfa/thompson/mod.rs | 81 - vendor/regex-automata/src/nfa/thompson/nfa.rs | 2098 --- .../regex-automata/src/nfa/thompson/pikevm.rs | 2359 --- .../src/nfa/thompson/range_trie.rs | 1051 -- vendor/regex-automata/src/util/alphabet.rs | 1139 -- vendor/regex-automata/src/util/captures.rs | 2551 ---- .../src/util/determinize/mod.rs | 682 - .../src/util/determinize/state.rs | 907 -- vendor/regex-automata/src/util/empty.rs | 265 - vendor/regex-automata/src/util/escape.rs | 84 - vendor/regex-automata/src/util/int.rs | 246 - vendor/regex-automata/src/util/interpolate.rs | 576 - vendor/regex-automata/src/util/iter.rs | 1022 -- vendor/regex-automata/src/util/lazy.rs | 461 - vendor/regex-automata/src/util/look.rs | 2547 ---- vendor/regex-automata/src/util/memchr.rs | 93 - vendor/regex-automata/src/util/mod.rs | 57 - vendor/regex-automata/src/util/pool.rs | 1199 -- .../src/util/prefilter/aho_corasick.rs | 149 - .../src/util/prefilter/byteset.rs | 58 - .../src/util/prefilter/memchr.rs | 186 - .../src/util/prefilter/memmem.rs | 88 - .../regex-automata/src/util/prefilter/mod.rs | 719 - .../src/util/prefilter/teddy.rs | 160 - vendor/regex-automata/src/util/primitives.rs | 776 - vendor/regex-automata/src/util/search.rs | 1988 --- vendor/regex-automata/src/util/sparse_set.rs | 239 - vendor/regex-automata/src/util/start.rs | 479 - vendor/regex-automata/src/util/syntax.rs | 482 - .../src/util/unicode_data/mod.rs | 17 - .../src/util/unicode_data/perl_word.rs | 806 - vendor/regex-automata/src/util/utf8.rs | 191 - vendor/regex-automata/src/util/wire.rs | 947 -- vendor/regex-automata/test | 95 - vendor/regex-automata/tests/dfa/api.rs | 162 - vendor/regex-automata/tests/dfa/mod.rs | 8 - .../regex-automata/tests/dfa/onepass/mod.rs | 2 - .../regex-automata/tests/dfa/onepass/suite.rs | 197 - vendor/regex-automata/tests/dfa/regression.rs | 48 - vendor/regex-automata/tests/dfa/suite.rs | 443 - vendor/regex-automata/tests/fuzz/dense.rs | 52 - vendor/regex-automata/tests/fuzz/mod.rs | 2 - vendor/regex-automata/tests/fuzz/sparse.rs | 132 - ...h-9486fb7c8a93b12c12a62166b43d31640c0208a9 | Bin 1894 -> 0 bytes ...m-9486fb7c8a93b12c12a62166b43d31640c0208a9 | Bin 1882 -> 0 bytes ...h-0da59c0434eaf35e5a6b470fa9244bb79c72b000 | Bin 941 -> 0 bytes ...h-18cfc246f2ddfc3dfc92b0c7893178c7cf65efa9 | Bin 924 -> 0 bytes ...h-61fd8e3003bf9d99f6c1e5a8488727eefd234b98 | Bin 933 -> 0 bytes ...h-a1b839d899ced76d5d7d0f78f9edb7a421505838 | Bin 802 -> 0 bytes ...h-c383ae07ec5e191422eadc492117439011816570 | Bin 924 -> 0 bytes ...h-d07703ceb94b10dcd9e4acb809f2051420449e2b | Bin 922 -> 0 bytes ...h-dbb8172d3984e7e7d03f4b5f8bb86ecd1460eff9 | Bin 728 -> 0 bytes vendor/regex-automata/tests/gen/README.md | 65 - vendor/regex-automata/tests/gen/dense/mod.rs | 22 - .../tests/gen/dense/multi_pattern_v2.rs | 43 - .../dense/multi_pattern_v2_fwd.bigendian.dfa | Bin 11100 -> 0 bytes .../multi_pattern_v2_fwd.littleendian.dfa | Bin 11100 -> 0 bytes .../dense/multi_pattern_v2_rev.bigendian.dfa | Bin 7584 -> 0 bytes .../multi_pattern_v2_rev.littleendian.dfa | Bin 7584 -> 0 bytes vendor/regex-automata/tests/gen/mod.rs | 2 - vendor/regex-automata/tests/gen/sparse/mod.rs | 22 - .../tests/gen/sparse/multi_pattern_v2.rs | 37 - .../sparse/multi_pattern_v2_fwd.bigendian.dfa | Bin 3476 -> 0 bytes .../multi_pattern_v2_fwd.littleendian.dfa | Bin 3476 -> 0 bytes .../sparse/multi_pattern_v2_rev.bigendian.dfa | Bin 1920 -> 0 bytes .../multi_pattern_v2_rev.littleendian.dfa | Bin 1920 -> 0 bytes vendor/regex-automata/tests/hybrid/api.rs | 171 - vendor/regex-automata/tests/hybrid/mod.rs | 3 - vendor/regex-automata/tests/hybrid/suite.rs | 347 - vendor/regex-automata/tests/lib.rs | 115 - vendor/regex-automata/tests/meta/mod.rs | 2 - vendor/regex-automata/tests/meta/suite.rs | 200 - vendor/regex-automata/tests/nfa/mod.rs | 1 - .../tests/nfa/thompson/backtrack/mod.rs | 2 - .../tests/nfa/thompson/backtrack/suite.rs | 213 - .../regex-automata/tests/nfa/thompson/mod.rs | 4 - .../tests/nfa/thompson/pikevm/mod.rs | 2 - .../tests/nfa/thompson/pikevm/suite.rs | 162 - vendor/regex-syntax/.cargo-checksum.json | 1 - vendor/regex-syntax/.cargo_vcs_info.json | 6 - vendor/regex-syntax/Cargo.lock | 65 - vendor/regex-syntax/Cargo.toml | 81 - vendor/regex-syntax/LICENSE-APACHE | 201 - vendor/regex-syntax/LICENSE-MIT | 25 - vendor/regex-syntax/README.md | 96 - vendor/regex-syntax/benches/bench.rs | 63 - vendor/regex-syntax/src/ast/mod.rs | 1807 --- vendor/regex-syntax/src/ast/parse.rs | 6377 -------- vendor/regex-syntax/src/ast/print.rs | 577 - vendor/regex-syntax/src/ast/visitor.rs | 522 - vendor/regex-syntax/src/debug.rs | 107 - vendor/regex-syntax/src/either.rs | 8 - vendor/regex-syntax/src/error.rs | 311 - vendor/regex-syntax/src/hir/interval.rs | 564 - vendor/regex-syntax/src/hir/literal.rs | 3214 ---- vendor/regex-syntax/src/hir/mod.rs | 3873 ----- vendor/regex-syntax/src/hir/print.rs | 608 - vendor/regex-syntax/src/hir/translate.rs | 3740 ----- vendor/regex-syntax/src/hir/visitor.rs | 215 - vendor/regex-syntax/src/lib.rs | 433 - vendor/regex-syntax/src/parser.rs | 254 - vendor/regex-syntax/src/rank.rs | 258 - vendor/regex-syntax/src/unicode.rs | 1041 -- .../src/unicode_tables/LICENSE-UNICODE | 57 - vendor/regex-syntax/src/unicode_tables/age.rs | 1846 --- .../src/unicode_tables/case_folding_simple.rs | 2948 ---- .../src/unicode_tables/general_category.rs | 6717 --------- .../unicode_tables/grapheme_cluster_break.rs | 1420 -- vendor/regex-syntax/src/unicode_tables/mod.rs | 57 - .../src/unicode_tables/perl_decimal.rs | 84 - .../src/unicode_tables/perl_space.rs | 23 - .../src/unicode_tables/perl_word.rs | 806 - .../src/unicode_tables/property_bool.rs | 12095 ---------------- .../src/unicode_tables/property_names.rs | 281 - .../src/unicode_tables/property_values.rs | 956 -- .../regex-syntax/src/unicode_tables/script.rs | 1300 -- .../src/unicode_tables/script_extension.rs | 1718 --- .../src/unicode_tables/sentence_break.rs | 2530 ---- .../src/unicode_tables/word_break.rs | 1152 -- vendor/regex-syntax/src/utf8.rs | 592 - vendor/regex-syntax/test | 30 - vendor/regex/.cargo-checksum.json | 1 - vendor/regex/.cargo_vcs_info.json | 6 - vendor/regex/.vim/coc-settings.json | 6 - vendor/regex/CHANGELOG.md | 1742 --- vendor/regex/Cargo.lock | 383 - vendor/regex/Cargo.toml | 207 - vendor/regex/Cross.toml | 7 - vendor/regex/LICENSE-APACHE | 201 - vendor/regex/LICENSE-MIT | 25 - vendor/regex/README.md | 336 - vendor/regex/UNICODE.md | 258 - vendor/regex/bench/README.md | 2 - vendor/regex/rustfmt.toml | 2 - vendor/regex/src/builders.rs | 2539 ---- vendor/regex/src/bytes.rs | 91 - vendor/regex/src/error.rs | 101 - vendor/regex/src/find_byte.rs | 17 - vendor/regex/src/lib.rs | 1353 -- vendor/regex/src/pattern.rs | 67 - vendor/regex/src/regex/bytes.rs | 2722 ---- vendor/regex/src/regex/mod.rs | 2 - vendor/regex/src/regex/string.rs | 2625 ---- vendor/regex/src/regexset/bytes.rs | 728 - vendor/regex/src/regexset/mod.rs | 2 - vendor/regex/src/regexset/string.rs | 724 - vendor/regex/test | 46 - vendor/regex/testdata/README.md | 22 - vendor/regex/testdata/anchored.toml | 127 - vendor/regex/testdata/bytes.toml | 235 - vendor/regex/testdata/crazy.toml | 315 - vendor/regex/testdata/crlf.toml | 117 - vendor/regex/testdata/earliest.toml | 52 - vendor/regex/testdata/empty.toml | 113 - vendor/regex/testdata/expensive.toml | 23 - vendor/regex/testdata/flags.toml | 68 - vendor/regex/testdata/fowler/basic.toml | 1611 -- vendor/regex/testdata/fowler/dat/README | 25 - vendor/regex/testdata/fowler/dat/basic.dat | 223 - .../regex/testdata/fowler/dat/nullsubexpr.dat | 74 - .../regex/testdata/fowler/dat/repetition.dat | 169 - vendor/regex/testdata/fowler/nullsubexpr.toml | 405 - vendor/regex/testdata/fowler/repetition.toml | 746 - vendor/regex/testdata/iter.toml | 143 - vendor/regex/testdata/leftmost-all.toml | 25 - vendor/regex/testdata/line-terminator.toml | 109 - vendor/regex/testdata/misc.toml | 99 - vendor/regex/testdata/multiline.toml | 845 -- vendor/regex/testdata/no-unicode.toml | 222 - vendor/regex/testdata/overlapping.toml | 280 - vendor/regex/testdata/regex-lite.toml | 98 - vendor/regex/testdata/regression.toml | 830 -- vendor/regex/testdata/set.toml | 641 - vendor/regex/testdata/substring.toml | 36 - vendor/regex/testdata/unicode.toml | 517 - vendor/regex/testdata/utf8.toml | 399 - .../regex/testdata/word-boundary-special.toml | 687 - vendor/regex/testdata/word-boundary.toml | 781 - vendor/regex/tests/lib.rs | 58 - vendor/regex/tests/misc.rs | 143 - vendor/regex/tests/regression.rs | 94 - vendor/regex/tests/regression_fuzz.rs | 61 - vendor/regex/tests/replace.rs | 183 - vendor/regex/tests/searcher.rs | 93 - vendor/regex/tests/suite_bytes.rs | 108 - vendor/regex/tests/suite_bytes_set.rs | 71 - vendor/regex/tests/suite_string.rs | 113 - vendor/regex/tests/suite_string_set.rs | 78 - vendor/rustc-hash/.cargo-checksum.json | 1 - vendor/rustc-hash/.cargo_vcs_info.json | 6 - vendor/rustc-hash/.github/workflows/rust.yml | 73 - vendor/rustc-hash/CHANGELOG.md | 32 - vendor/rustc-hash/CODE_OF_CONDUCT.md | 3 - vendor/rustc-hash/Cargo.lock | 75 - vendor/rustc-hash/Cargo.toml | 49 - vendor/rustc-hash/LICENSE-APACHE | 176 - vendor/rustc-hash/LICENSE-MIT | 23 - vendor/rustc-hash/README.md | 42 - vendor/rustc-hash/src/lib.rs | 459 - vendor/rustc-hash/src/random_state.rs | 101 - vendor/rustc-hash/src/seeded_state.rs | 76 - vendor/shlex/.cargo-checksum.json | 1 - vendor/shlex/.cargo_vcs_info.json | 6 - vendor/shlex/.github/workflows/test.yml | 36 - vendor/shlex/CHANGELOG.md | 21 - vendor/shlex/Cargo.toml | 35 - vendor/shlex/LICENSE-APACHE | 13 - vendor/shlex/LICENSE-MIT | 21 - vendor/shlex/README.md | 39 - vendor/shlex/src/bytes.rs | 576 - vendor/shlex/src/lib.rs | 358 - vendor/shlex/src/quoting_warning.md | 365 - vendor/syn/.cargo-checksum.json | 1 - vendor/syn/.cargo_vcs_info.json | 6 - vendor/syn/Cargo.lock | 1819 --- vendor/syn/Cargo.toml | 272 - vendor/syn/LICENSE-APACHE | 176 - vendor/syn/LICENSE-MIT | 23 - vendor/syn/README.md | 284 - vendor/syn/benches/file.rs | 59 - vendor/syn/benches/rust.rs | 194 - vendor/syn/src/attr.rs | 836 -- vendor/syn/src/bigint.rs | 66 - vendor/syn/src/buffer.rs | 435 - vendor/syn/src/classify.rs | 311 - vendor/syn/src/custom_keyword.rs | 260 - vendor/syn/src/custom_punctuation.rs | 305 - vendor/syn/src/data.rs | 424 - vendor/syn/src/derive.rs | 259 - vendor/syn/src/discouraged.rs | 225 - vendor/syn/src/drops.rs | 58 - vendor/syn/src/error.rs | 468 - vendor/syn/src/export.rs | 73 - vendor/syn/src/expr.rs | 4173 ------ vendor/syn/src/ext.rs | 179 - vendor/syn/src/file.rs | 125 - vendor/syn/src/fixup.rs | 773 - vendor/syn/src/gen/clone.rs | 2267 --- vendor/syn/src/gen/debug.rs | 3238 ----- vendor/syn/src/gen/eq.rs | 2306 --- vendor/syn/src/gen/fold.rs | 3902 ----- vendor/syn/src/gen/hash.rs | 2876 ---- vendor/syn/src/gen/token.css | 737 - vendor/syn/src/gen/visit.rs | 3941 ----- vendor/syn/src/gen/visit_mut.rs | 3759 ----- vendor/syn/src/generics.rs | 1477 -- vendor/syn/src/group.rs | 291 - vendor/syn/src/ident.rs | 108 - vendor/syn/src/item.rs | 3490 ----- vendor/syn/src/lib.rs | 1009 -- vendor/syn/src/lifetime.rs | 155 - vendor/syn/src/lit.rs | 1918 --- vendor/syn/src/lookahead.rs | 348 - vendor/syn/src/mac.rs | 225 - vendor/syn/src/macros.rs | 182 - vendor/syn/src/meta.rs | 427 - vendor/syn/src/op.rs | 219 - vendor/syn/src/parse.rs | 1419 -- vendor/syn/src/parse_macro_input.rs | 128 - vendor/syn/src/parse_quote.rs | 240 - vendor/syn/src/pat.rs | 955 -- vendor/syn/src/path.rs | 966 -- vendor/syn/src/precedence.rs | 210 - vendor/syn/src/print.rs | 16 - vendor/syn/src/punctuated.rs | 1169 -- vendor/syn/src/restriction.rs | 178 - vendor/syn/src/scan_expr.rs | 268 - vendor/syn/src/sealed.rs | 4 - vendor/syn/src/span.rs | 63 - vendor/syn/src/spanned.rs | 118 - vendor/syn/src/stmt.rs | 484 - vendor/syn/src/thread.rs | 60 - vendor/syn/src/token.rs | 1093 -- vendor/syn/src/tt.rs | 96 - vendor/syn/src/ty.rs | 1271 -- vendor/syn/src/verbatim.rs | 33 - vendor/syn/src/whitespace.rs | 65 - vendor/syn/tests/common/eq.rs | 898 -- vendor/syn/tests/common/mod.rs | 6 - vendor/syn/tests/common/parse.rs | 52 - vendor/syn/tests/common/visit.rs | 119 - vendor/syn/tests/debug/gen.rs | 5239 ------- vendor/syn/tests/debug/mod.rs | 147 - vendor/syn/tests/macros/mod.rs | 7 - vendor/syn/tests/regression.rs | 5 - vendor/syn/tests/regression/issue1108.rs | 5 - vendor/syn/tests/regression/issue1235.rs | 32 - vendor/syn/tests/repo/mod.rs | 630 - vendor/syn/tests/repo/progress.rs | 37 - vendor/syn/tests/snapshot/mod.rs | 68 - vendor/syn/tests/test_asyncness.rs | 49 - vendor/syn/tests/test_attribute.rs | 231 - vendor/syn/tests/test_derive_input.rs | 785 - vendor/syn/tests/test_expr.rs | 1702 --- vendor/syn/tests/test_generics.rs | 345 - vendor/syn/tests/test_grouping.rs | 59 - vendor/syn/tests/test_ident.rs | 87 - vendor/syn/tests/test_item.rs | 316 - vendor/syn/tests/test_lit.rs | 335 - vendor/syn/tests/test_meta.rs | 180 - vendor/syn/tests/test_parse_buffer.rs | 103 - vendor/syn/tests/test_parse_quote.rs | 172 - vendor/syn/tests/test_parse_stream.rs | 187 - vendor/syn/tests/test_pat.rs | 158 - vendor/syn/tests/test_path.rs | 116 - vendor/syn/tests/test_precedence.rs | 558 - vendor/syn/tests/test_punctuated.rs | 92 - vendor/syn/tests/test_receiver.rs | 327 - vendor/syn/tests/test_round_trip.rs | 256 - vendor/syn/tests/test_shebang.rs | 73 - vendor/syn/tests/test_size.rs | 54 - vendor/syn/tests/test_stmt.rs | 337 - vendor/syn/tests/test_token_trees.rs | 38 - vendor/syn/tests/test_ty.rs | 471 - vendor/syn/tests/test_unparenthesize.rs | 70 - vendor/syn/tests/test_visibility.rs | 191 - vendor/syn/tests/zzz_stable.rs | 33 - vendor/unicode-ident/.cargo-checksum.json | 1 - vendor/unicode-ident/.cargo_vcs_info.json | 6 - vendor/unicode-ident/.github/FUNDING.yml | 1 - vendor/unicode-ident/.github/workflows/ci.yml | 110 - vendor/unicode-ident/Cargo.lock | 499 - vendor/unicode-ident/Cargo.toml | 84 - vendor/unicode-ident/LICENSE-APACHE | 176 - vendor/unicode-ident/LICENSE-MIT | 23 - vendor/unicode-ident/LICENSE-UNICODE | 39 - vendor/unicode-ident/README.md | 274 - vendor/unicode-ident/benches/xid.rs | 126 - vendor/unicode-ident/src/lib.rs | 281 - vendor/unicode-ident/src/tables.rs | 663 - vendor/unicode-ident/tests/compare.rs | 68 - vendor/unicode-ident/tests/fst/.gitignore | 1 - vendor/unicode-ident/tests/fst/mod.rs | 11 - .../unicode-ident/tests/fst/xid_continue.fst | Bin 76143 -> 0 bytes vendor/unicode-ident/tests/fst/xid_start.fst | Bin 67370 -> 0 bytes vendor/unicode-ident/tests/roaring/mod.rs | 23 - vendor/unicode-ident/tests/static_size.rs | 95 - vendor/unicode-ident/tests/tables/mod.rs | 7 - vendor/unicode-ident/tests/tables/tables.rs | 361 - vendor/unicode-ident/tests/trie/mod.rs | 7 - vendor/unicode-ident/tests/trie/trie.rs | 453 - vendor/windows-link/.cargo-checksum.json | 1 - vendor/windows-link/.cargo_vcs_info.json | 6 - vendor/windows-link/Cargo.lock | 7 - vendor/windows-link/Cargo.toml | 39 - vendor/windows-link/license-apache-2.0 | 201 - vendor/windows-link/license-mit | 21 - vendor/windows-link/readme.md | 26 - vendor/windows-link/src/lib.rs | 39 - 1282 files changed, 546705 deletions(-) delete mode 100644 vendor/aho-corasick/.cargo-checksum.json delete mode 100644 vendor/aho-corasick/.cargo_vcs_info.json delete mode 100644 vendor/aho-corasick/.github/FUNDING.yml delete mode 100644 vendor/aho-corasick/.github/workflows/ci.yml delete mode 100644 vendor/aho-corasick/.vim/coc-settings.json delete mode 100644 vendor/aho-corasick/COPYING delete mode 100644 vendor/aho-corasick/Cargo.lock delete mode 100644 vendor/aho-corasick/Cargo.toml delete mode 100644 vendor/aho-corasick/DESIGN.md delete mode 100644 vendor/aho-corasick/LICENSE-MIT delete mode 100644 vendor/aho-corasick/README.md delete mode 100644 vendor/aho-corasick/UNLICENSE delete mode 100644 vendor/aho-corasick/rustfmt.toml delete mode 100644 vendor/aho-corasick/src/ahocorasick.rs delete mode 100644 vendor/aho-corasick/src/automaton.rs delete mode 100644 vendor/aho-corasick/src/dfa.rs delete mode 100644 vendor/aho-corasick/src/lib.rs delete mode 100644 vendor/aho-corasick/src/macros.rs delete mode 100644 vendor/aho-corasick/src/nfa/contiguous.rs delete mode 100644 vendor/aho-corasick/src/nfa/mod.rs delete mode 100644 vendor/aho-corasick/src/nfa/noncontiguous.rs delete mode 100644 vendor/aho-corasick/src/packed/api.rs delete mode 100644 vendor/aho-corasick/src/packed/ext.rs delete mode 100644 vendor/aho-corasick/src/packed/mod.rs delete mode 100644 vendor/aho-corasick/src/packed/pattern.rs delete mode 100644 vendor/aho-corasick/src/packed/rabinkarp.rs delete mode 100644 vendor/aho-corasick/src/packed/teddy/README.md delete mode 100644 vendor/aho-corasick/src/packed/teddy/builder.rs delete mode 100644 vendor/aho-corasick/src/packed/teddy/generic.rs delete mode 100644 vendor/aho-corasick/src/packed/teddy/mod.rs delete mode 100644 vendor/aho-corasick/src/packed/tests.rs delete mode 100644 vendor/aho-corasick/src/packed/vector.rs delete mode 100644 vendor/aho-corasick/src/tests.rs delete mode 100644 vendor/aho-corasick/src/transducer.rs delete mode 100644 vendor/aho-corasick/src/util/alphabet.rs delete mode 100644 vendor/aho-corasick/src/util/buffer.rs delete mode 100644 vendor/aho-corasick/src/util/byte_frequencies.rs delete mode 100644 vendor/aho-corasick/src/util/debug.rs delete mode 100644 vendor/aho-corasick/src/util/error.rs delete mode 100644 vendor/aho-corasick/src/util/int.rs delete mode 100644 vendor/aho-corasick/src/util/mod.rs delete mode 100644 vendor/aho-corasick/src/util/prefilter.rs delete mode 100644 vendor/aho-corasick/src/util/primitives.rs delete mode 100644 vendor/aho-corasick/src/util/remapper.rs delete mode 100644 vendor/aho-corasick/src/util/search.rs delete mode 100644 vendor/aho-corasick/src/util/special.rs delete mode 100644 vendor/base64/.cargo-checksum.json delete mode 100644 vendor/base64/.cargo_vcs_info.json delete mode 100644 vendor/base64/.circleci/config.yml delete mode 100644 vendor/base64/.github/ISSUE_TEMPLATE/general-purpose-issue.md delete mode 100644 vendor/base64/Cargo.lock delete mode 100644 vendor/base64/Cargo.toml delete mode 100644 vendor/base64/LICENSE-APACHE delete mode 100644 vendor/base64/LICENSE-MIT delete mode 100644 vendor/base64/README.md delete mode 100644 vendor/base64/RELEASE-NOTES.md delete mode 100644 vendor/base64/benches/benchmarks.rs delete mode 100644 vendor/base64/clippy.toml delete mode 100644 vendor/base64/examples/base64.rs delete mode 100644 vendor/base64/icon_CLion.svg delete mode 100644 vendor/base64/src/alphabet.rs delete mode 100644 vendor/base64/src/chunked_encoder.rs delete mode 100644 vendor/base64/src/decode.rs delete mode 100644 vendor/base64/src/display.rs delete mode 100644 vendor/base64/src/encode.rs delete mode 100644 vendor/base64/src/engine/general_purpose/decode.rs delete mode 100644 vendor/base64/src/engine/general_purpose/decode_suffix.rs delete mode 100644 vendor/base64/src/engine/general_purpose/mod.rs delete mode 100644 vendor/base64/src/engine/mod.rs delete mode 100644 vendor/base64/src/engine/naive.rs delete mode 100644 vendor/base64/src/engine/tests.rs delete mode 100644 vendor/base64/src/lib.rs delete mode 100644 vendor/base64/src/prelude.rs delete mode 100644 vendor/base64/src/read/decoder.rs delete mode 100644 vendor/base64/src/read/decoder_tests.rs delete mode 100644 vendor/base64/src/read/mod.rs delete mode 100644 vendor/base64/src/tests.rs delete mode 100644 vendor/base64/src/write/encoder.rs delete mode 100644 vendor/base64/src/write/encoder_string_writer.rs delete mode 100644 vendor/base64/src/write/encoder_tests.rs delete mode 100644 vendor/base64/src/write/mod.rs delete mode 100644 vendor/base64/tests/encode.rs delete mode 100644 vendor/base64/tests/tests.rs delete mode 100644 vendor/bindgen/.cargo-checksum.json delete mode 100644 vendor/bindgen/.cargo_vcs_info.json delete mode 100644 vendor/bindgen/Cargo.lock delete mode 100644 vendor/bindgen/Cargo.toml delete mode 100644 vendor/bindgen/LICENSE delete mode 100644 vendor/bindgen/README.md delete mode 100644 vendor/bindgen/build.rs delete mode 100644 vendor/bindgen/callbacks.rs delete mode 100644 vendor/bindgen/clang.rs delete mode 100644 vendor/bindgen/codegen/bitfield_unit.rs delete mode 100644 vendor/bindgen/codegen/bitfield_unit_raw_ref_macros.rs delete mode 100644 vendor/bindgen/codegen/bitfield_unit_tests.rs delete mode 100644 vendor/bindgen/codegen/dyngen.rs delete mode 100644 vendor/bindgen/codegen/error.rs delete mode 100644 vendor/bindgen/codegen/helpers.rs delete mode 100644 vendor/bindgen/codegen/impl_debug.rs delete mode 100644 vendor/bindgen/codegen/impl_partialeq.rs delete mode 100644 vendor/bindgen/codegen/mod.rs delete mode 100644 vendor/bindgen/codegen/postprocessing/merge_extern_blocks.rs delete mode 100644 vendor/bindgen/codegen/postprocessing/mod.rs delete mode 100644 vendor/bindgen/codegen/postprocessing/sort_semantically.rs delete mode 100644 vendor/bindgen/codegen/serialize.rs delete mode 100644 vendor/bindgen/codegen/struct_layout.rs delete mode 100644 vendor/bindgen/deps.rs delete mode 100644 vendor/bindgen/diagnostics.rs delete mode 100644 vendor/bindgen/extra_assertions.rs delete mode 100644 vendor/bindgen/features.rs delete mode 100644 vendor/bindgen/ir/analysis/derive.rs delete mode 100644 vendor/bindgen/ir/analysis/has_destructor.rs delete mode 100644 vendor/bindgen/ir/analysis/has_float.rs delete mode 100644 vendor/bindgen/ir/analysis/has_type_param_in_array.rs delete mode 100644 vendor/bindgen/ir/analysis/has_vtable.rs delete mode 100644 vendor/bindgen/ir/analysis/mod.rs delete mode 100644 vendor/bindgen/ir/analysis/sizedness.rs delete mode 100644 vendor/bindgen/ir/analysis/template_params.rs delete mode 100644 vendor/bindgen/ir/annotations.rs delete mode 100644 vendor/bindgen/ir/comment.rs delete mode 100644 vendor/bindgen/ir/comp.rs delete mode 100644 vendor/bindgen/ir/context.rs delete mode 100644 vendor/bindgen/ir/derive.rs delete mode 100644 vendor/bindgen/ir/dot.rs delete mode 100644 vendor/bindgen/ir/enum_ty.rs delete mode 100644 vendor/bindgen/ir/function.rs delete mode 100644 vendor/bindgen/ir/int.rs delete mode 100644 vendor/bindgen/ir/item.rs delete mode 100644 vendor/bindgen/ir/item_kind.rs delete mode 100644 vendor/bindgen/ir/layout.rs delete mode 100644 vendor/bindgen/ir/mod.rs delete mode 100644 vendor/bindgen/ir/module.rs delete mode 100644 vendor/bindgen/ir/objc.rs delete mode 100644 vendor/bindgen/ir/template.rs delete mode 100644 vendor/bindgen/ir/traversal.rs delete mode 100644 vendor/bindgen/ir/ty.rs delete mode 100644 vendor/bindgen/ir/var.rs delete mode 100644 vendor/bindgen/lib.rs delete mode 100644 vendor/bindgen/log_stubs.rs delete mode 100644 vendor/bindgen/options/as_args.rs delete mode 100644 vendor/bindgen/options/cli.rs delete mode 100644 vendor/bindgen/options/helpers.rs delete mode 100644 vendor/bindgen/options/mod.rs delete mode 100644 vendor/bindgen/parse.rs delete mode 100644 vendor/bindgen/regex_set.rs delete mode 100644 vendor/bindgen/time.rs delete mode 100644 vendor/bitflags/.cargo-checksum.json delete mode 100644 vendor/bitflags/.cargo_vcs_info.json delete mode 100644 vendor/bitflags/CHANGELOG.md delete mode 100644 vendor/bitflags/CODE_OF_CONDUCT.md delete mode 100644 vendor/bitflags/CONTRIBUTING.md delete mode 100644 vendor/bitflags/Cargo.lock delete mode 100644 vendor/bitflags/Cargo.toml delete mode 100644 vendor/bitflags/LICENSE-APACHE delete mode 100644 vendor/bitflags/LICENSE-MIT delete mode 100644 vendor/bitflags/README.md delete mode 100644 vendor/bitflags/SECURITY.md delete mode 100644 vendor/bitflags/benches/parse.rs delete mode 100644 vendor/bitflags/examples/custom_bits_type.rs delete mode 100644 vendor/bitflags/examples/custom_derive.rs delete mode 100644 vendor/bitflags/examples/fmt.rs delete mode 100644 vendor/bitflags/examples/macro_free.rs delete mode 100644 vendor/bitflags/examples/serde.rs delete mode 100644 vendor/bitflags/spec.md delete mode 100644 vendor/bitflags/src/example_generated.rs delete mode 100644 vendor/bitflags/src/external.rs delete mode 100644 vendor/bitflags/src/external/arbitrary.rs delete mode 100644 vendor/bitflags/src/external/bytemuck.rs delete mode 100644 vendor/bitflags/src/external/serde.rs delete mode 100644 vendor/bitflags/src/internal.rs delete mode 100644 vendor/bitflags/src/iter.rs delete mode 100644 vendor/bitflags/src/lib.rs delete mode 100644 vendor/bitflags/src/parser.rs delete mode 100644 vendor/bitflags/src/public.rs delete mode 100644 vendor/bitflags/src/tests.rs delete mode 100644 vendor/bitflags/src/tests/all.rs delete mode 100644 vendor/bitflags/src/tests/bitflags_match.rs delete mode 100644 vendor/bitflags/src/tests/bits.rs delete mode 100644 vendor/bitflags/src/tests/clear.rs delete mode 100644 vendor/bitflags/src/tests/complement.rs delete mode 100644 vendor/bitflags/src/tests/contains.rs delete mode 100644 vendor/bitflags/src/tests/difference.rs delete mode 100644 vendor/bitflags/src/tests/empty.rs delete mode 100644 vendor/bitflags/src/tests/eq.rs delete mode 100644 vendor/bitflags/src/tests/extend.rs delete mode 100644 vendor/bitflags/src/tests/flags.rs delete mode 100644 vendor/bitflags/src/tests/fmt.rs delete mode 100644 vendor/bitflags/src/tests/from_bits.rs delete mode 100644 vendor/bitflags/src/tests/from_bits_retain.rs delete mode 100644 vendor/bitflags/src/tests/from_bits_truncate.rs delete mode 100644 vendor/bitflags/src/tests/from_name.rs delete mode 100644 vendor/bitflags/src/tests/insert.rs delete mode 100644 vendor/bitflags/src/tests/intersection.rs delete mode 100644 vendor/bitflags/src/tests/intersects.rs delete mode 100644 vendor/bitflags/src/tests/is_all.rs delete mode 100644 vendor/bitflags/src/tests/is_empty.rs delete mode 100644 vendor/bitflags/src/tests/iter.rs delete mode 100644 vendor/bitflags/src/tests/parser.rs delete mode 100644 vendor/bitflags/src/tests/remove.rs delete mode 100644 vendor/bitflags/src/tests/symmetric_difference.rs delete mode 100644 vendor/bitflags/src/tests/truncate.rs delete mode 100644 vendor/bitflags/src/tests/union.rs delete mode 100644 vendor/bitflags/src/tests/unknown.rs delete mode 100644 vendor/bitflags/src/traits.rs delete mode 100644 vendor/cexpr/.cargo-checksum.json delete mode 100644 vendor/cexpr/.cargo_vcs_info.json delete mode 100644 vendor/cexpr/.github/workflows/ci.yml delete mode 100644 vendor/cexpr/Cargo.toml delete mode 100644 vendor/cexpr/LICENSE-APACHE delete mode 100644 vendor/cexpr/LICENSE-MIT delete mode 100644 vendor/cexpr/bors.toml delete mode 100644 vendor/cexpr/rustfmt.toml delete mode 100644 vendor/cexpr/src/expr.rs delete mode 100644 vendor/cexpr/src/lib.rs delete mode 100644 vendor/cexpr/src/literal.rs delete mode 100644 vendor/cexpr/src/token.rs delete mode 100644 vendor/cexpr/tests/clang.rs delete mode 100644 vendor/cexpr/tests/input/chars.h delete mode 100644 vendor/cexpr/tests/input/fail.h delete mode 100644 vendor/cexpr/tests/input/floats.h delete mode 100644 vendor/cexpr/tests/input/int_signed.h delete mode 100644 vendor/cexpr/tests/input/int_unsigned.h delete mode 100644 vendor/cexpr/tests/input/strings.h delete mode 100644 vendor/cexpr/tests/input/test_llvm_bug_9069.h delete mode 100644 vendor/cfg-if/.cargo-checksum.json delete mode 100644 vendor/cfg-if/.cargo_vcs_info.json delete mode 100644 vendor/cfg-if/.github/dependabot.yml delete mode 100644 vendor/cfg-if/.github/workflows/main.yaml delete mode 100644 vendor/cfg-if/.github/workflows/publish.yaml delete mode 100644 vendor/cfg-if/CHANGELOG.md delete mode 100644 vendor/cfg-if/Cargo.lock delete mode 100644 vendor/cfg-if/Cargo.toml delete mode 100644 vendor/cfg-if/LICENSE-APACHE delete mode 100644 vendor/cfg-if/LICENSE-MIT delete mode 100644 vendor/cfg-if/README.md delete mode 100644 vendor/cfg-if/src/lib.rs delete mode 100644 vendor/cfg-if/tests/xcrate.rs delete mode 100644 vendor/clang-sys/.cargo-checksum.json delete mode 100644 vendor/clang-sys/.cargo_vcs_info.json delete mode 100644 vendor/clang-sys/.github/workflows/ci.yml delete mode 100644 vendor/clang-sys/.github/workflows/ssh.yml delete mode 100644 vendor/clang-sys/CHANGELOG.md delete mode 100644 vendor/clang-sys/Cargo.toml delete mode 100644 vendor/clang-sys/LICENSE.txt delete mode 100644 vendor/clang-sys/README.md delete mode 100644 vendor/clang-sys/build.rs delete mode 100644 vendor/clang-sys/build/common.rs delete mode 100644 vendor/clang-sys/build/dynamic.rs delete mode 100644 vendor/clang-sys/build/macros.rs delete mode 100644 vendor/clang-sys/build/static.rs delete mode 100644 vendor/clang-sys/clippy.toml delete mode 100644 vendor/clang-sys/src/lib.rs delete mode 100644 vendor/clang-sys/src/link.rs delete mode 100644 vendor/clang-sys/src/support.rs delete mode 100644 vendor/clang-sys/tests/build.rs delete mode 100644 vendor/clang-sys/tests/header.h delete mode 100644 vendor/clang-sys/tests/lib.rs delete mode 100644 vendor/either/.cargo-checksum.json delete mode 100644 vendor/either/.cargo_vcs_info.json delete mode 100644 vendor/either/.github/workflows/ci.yml delete mode 100644 vendor/either/Cargo.lock delete mode 100644 vendor/either/Cargo.toml delete mode 100644 vendor/either/LICENSE-APACHE delete mode 100644 vendor/either/LICENSE-MIT delete mode 100644 vendor/either/README-crates.io.md delete mode 100644 vendor/either/README.rst delete mode 100644 vendor/either/src/into_either.rs delete mode 100644 vendor/either/src/iterator.rs delete mode 100644 vendor/either/src/lib.rs delete mode 100644 vendor/either/src/serde_untagged.rs delete mode 100644 vendor/either/src/serde_untagged_optional.rs delete mode 100644 vendor/glob/.cargo-checksum.json delete mode 100644 vendor/glob/.cargo_vcs_info.json delete mode 100644 vendor/glob/.github/dependabot.yml delete mode 100644 vendor/glob/.github/workflows/publish.yml delete mode 100644 vendor/glob/.github/workflows/rust.yml delete mode 100644 vendor/glob/CHANGELOG.md delete mode 100644 vendor/glob/Cargo.lock delete mode 100644 vendor/glob/Cargo.toml delete mode 100644 vendor/glob/LICENSE-APACHE delete mode 100644 vendor/glob/LICENSE-MIT delete mode 100644 vendor/glob/README.md delete mode 100644 vendor/glob/src/lib.rs delete mode 100644 vendor/glob/tests/glob-std.rs delete mode 100644 vendor/glob/triagebot.toml delete mode 100644 vendor/itertools/.cargo-checksum.json delete mode 100644 vendor/itertools/.cargo_vcs_info.json delete mode 100644 vendor/itertools/.codecov.yml delete mode 100644 vendor/itertools/.github/dependabot.yml delete mode 100644 vendor/itertools/.github/workflows/ci.yml delete mode 100644 vendor/itertools/.github/workflows/coverage.yml delete mode 100644 vendor/itertools/CHANGELOG.md delete mode 100644 vendor/itertools/CONTRIBUTING.md delete mode 100644 vendor/itertools/Cargo.lock delete mode 100644 vendor/itertools/Cargo.toml delete mode 100644 vendor/itertools/LICENSE-APACHE delete mode 100644 vendor/itertools/LICENSE-MIT delete mode 100644 vendor/itertools/README.md delete mode 100644 vendor/itertools/benches/bench1.rs delete mode 100644 vendor/itertools/benches/combinations.rs delete mode 100644 vendor/itertools/benches/combinations_with_replacement.rs delete mode 100644 vendor/itertools/benches/fold_specialization.rs delete mode 100644 vendor/itertools/benches/powerset.rs delete mode 100644 vendor/itertools/benches/specializations.rs delete mode 100644 vendor/itertools/benches/tree_reduce.rs delete mode 100644 vendor/itertools/benches/tuple_combinations.rs delete mode 100644 vendor/itertools/benches/tuples.rs delete mode 100644 vendor/itertools/examples/iris.data delete mode 100644 vendor/itertools/examples/iris.rs delete mode 100644 vendor/itertools/src/adaptors/coalesce.rs delete mode 100644 vendor/itertools/src/adaptors/map.rs delete mode 100644 vendor/itertools/src/adaptors/mod.rs delete mode 100644 vendor/itertools/src/adaptors/multi_product.rs delete mode 100644 vendor/itertools/src/combinations.rs delete mode 100644 vendor/itertools/src/combinations_with_replacement.rs delete mode 100644 vendor/itertools/src/concat_impl.rs delete mode 100644 vendor/itertools/src/cons_tuples_impl.rs delete mode 100644 vendor/itertools/src/diff.rs delete mode 100644 vendor/itertools/src/duplicates_impl.rs delete mode 100644 vendor/itertools/src/either_or_both.rs delete mode 100644 vendor/itertools/src/exactly_one_err.rs delete mode 100644 vendor/itertools/src/extrema_set.rs delete mode 100644 vendor/itertools/src/flatten_ok.rs delete mode 100644 vendor/itertools/src/format.rs delete mode 100644 vendor/itertools/src/free.rs delete mode 100644 vendor/itertools/src/group_map.rs delete mode 100644 vendor/itertools/src/groupbylazy.rs delete mode 100644 vendor/itertools/src/grouping_map.rs delete mode 100644 vendor/itertools/src/impl_macros.rs delete mode 100644 vendor/itertools/src/intersperse.rs delete mode 100644 vendor/itertools/src/iter_index.rs delete mode 100644 vendor/itertools/src/k_smallest.rs delete mode 100644 vendor/itertools/src/kmerge_impl.rs delete mode 100644 vendor/itertools/src/lazy_buffer.rs delete mode 100644 vendor/itertools/src/lib.rs delete mode 100644 vendor/itertools/src/merge_join.rs delete mode 100644 vendor/itertools/src/minmax.rs delete mode 100644 vendor/itertools/src/multipeek_impl.rs delete mode 100644 vendor/itertools/src/pad_tail.rs delete mode 100644 vendor/itertools/src/peek_nth.rs delete mode 100644 vendor/itertools/src/peeking_take_while.rs delete mode 100644 vendor/itertools/src/permutations.rs delete mode 100644 vendor/itertools/src/powerset.rs delete mode 100644 vendor/itertools/src/process_results_impl.rs delete mode 100644 vendor/itertools/src/put_back_n_impl.rs delete mode 100644 vendor/itertools/src/rciter_impl.rs delete mode 100644 vendor/itertools/src/repeatn.rs delete mode 100644 vendor/itertools/src/size_hint.rs delete mode 100644 vendor/itertools/src/sources.rs delete mode 100644 vendor/itertools/src/take_while_inclusive.rs delete mode 100644 vendor/itertools/src/tee.rs delete mode 100644 vendor/itertools/src/tuple_impl.rs delete mode 100644 vendor/itertools/src/unique_impl.rs delete mode 100644 vendor/itertools/src/unziptuple.rs delete mode 100644 vendor/itertools/src/with_position.rs delete mode 100644 vendor/itertools/src/zip_eq_impl.rs delete mode 100644 vendor/itertools/src/zip_longest.rs delete mode 100644 vendor/itertools/src/ziptuple.rs delete mode 100644 vendor/itertools/tests/adaptors_no_collect.rs delete mode 100644 vendor/itertools/tests/flatten_ok.rs delete mode 100644 vendor/itertools/tests/laziness.rs delete mode 100644 vendor/itertools/tests/macros_hygiene.rs delete mode 100644 vendor/itertools/tests/merge_join.rs delete mode 100644 vendor/itertools/tests/peeking_take_while.rs delete mode 100644 vendor/itertools/tests/quick.rs delete mode 100644 vendor/itertools/tests/specializations.rs delete mode 100644 vendor/itertools/tests/test_core.rs delete mode 100644 vendor/itertools/tests/test_std.rs delete mode 100644 vendor/itertools/tests/tuples.rs delete mode 100644 vendor/itertools/tests/zip.rs delete mode 100644 vendor/libc/.cargo-checksum.json delete mode 100644 vendor/libc/.cargo_vcs_info.json delete mode 100644 vendor/libc/.editorconfig delete mode 100644 vendor/libc/.git-blame-ignore-revs delete mode 100644 vendor/libc/.release-plz.toml delete mode 100644 vendor/libc/CHANGELOG.md delete mode 100644 vendor/libc/CONTRIBUTING.md delete mode 100644 vendor/libc/Cargo.lock delete mode 100644 vendor/libc/Cargo.toml delete mode 100644 vendor/libc/LICENSE-APACHE delete mode 100644 vendor/libc/LICENSE-MIT delete mode 100644 vendor/libc/README.md delete mode 100644 vendor/libc/build.rs delete mode 100755 vendor/libc/cherry-pick-stable.sh delete mode 100644 vendor/libc/rustfmt.toml delete mode 100644 vendor/libc/src/fuchsia/aarch64.rs delete mode 100644 vendor/libc/src/fuchsia/mod.rs delete mode 100644 vendor/libc/src/fuchsia/riscv64.rs delete mode 100644 vendor/libc/src/fuchsia/x86_64.rs delete mode 100644 vendor/libc/src/hermit.rs delete mode 100644 vendor/libc/src/lib.rs delete mode 100644 vendor/libc/src/macros.rs delete mode 100644 vendor/libc/src/new/bionic/mod.rs delete mode 100644 vendor/libc/src/new/bionic/sys/mod.rs delete mode 100644 vendor/libc/src/new/bionic/sys/socket.rs delete mode 100644 vendor/libc/src/new/linux_uapi/linux/can.rs delete mode 100644 vendor/libc/src/new/linux_uapi/linux/can/j1939.rs delete mode 100644 vendor/libc/src/new/linux_uapi/linux/can/raw.rs delete mode 100644 vendor/libc/src/new/linux_uapi/linux/mod.rs delete mode 100644 vendor/libc/src/new/linux_uapi/mod.rs delete mode 100644 vendor/libc/src/new/mod.rs delete mode 100644 vendor/libc/src/primitives.rs delete mode 100644 vendor/libc/src/psp.rs delete mode 100644 vendor/libc/src/sgx.rs delete mode 100644 vendor/libc/src/solid/aarch64.rs delete mode 100644 vendor/libc/src/solid/arm.rs delete mode 100644 vendor/libc/src/solid/mod.rs delete mode 100644 vendor/libc/src/switch.rs delete mode 100644 vendor/libc/src/teeos/mod.rs delete mode 100644 vendor/libc/src/trusty.rs delete mode 100644 vendor/libc/src/types.rs delete mode 100644 vendor/libc/src/unix/aix/mod.rs delete mode 100644 vendor/libc/src/unix/aix/powerpc64.rs delete mode 100644 vendor/libc/src/unix/bsd/apple/b32/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/apple/b64/aarch64/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/apple/b64/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/apple/b64/x86_64/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/apple/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/dragonfly/errno.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/aarch64.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/arm.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b32.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b64.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/x86_64.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/x86_64.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/x86_64.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/x86_64.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc64.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/riscv64.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/freebsdlike/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/aarch64.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/arm.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/mips.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/powerpc.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/riscv64.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/sparc64.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86_64.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/aarch64.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/arm.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/mips64.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/mod.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc64.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/riscv64.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/sparc64.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86.rs delete mode 100644 vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86_64.rs delete mode 100644 vendor/libc/src/unix/cygwin/mod.rs delete mode 100644 vendor/libc/src/unix/haiku/b32.rs delete mode 100644 vendor/libc/src/unix/haiku/b64.rs delete mode 100644 vendor/libc/src/unix/haiku/bsd.rs delete mode 100644 vendor/libc/src/unix/haiku/mod.rs delete mode 100644 vendor/libc/src/unix/haiku/native.rs delete mode 100644 vendor/libc/src/unix/haiku/x86_64.rs delete mode 100644 vendor/libc/src/unix/hurd/b32.rs delete mode 100644 vendor/libc/src/unix/hurd/b64.rs delete mode 100644 vendor/libc/src/unix/hurd/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/android/b32/arm.rs delete mode 100644 vendor/libc/src/unix/linux_like/android/b32/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/android/b32/x86/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/android/b64/aarch64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/android/b64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/android/b64/riscv64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/android/b64/x86_64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/android/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/emscripten/lfs64.rs delete mode 100644 vendor/libc/src/unix/linux_like/emscripten/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/arch/generic/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/arch/mips/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/arch/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/arch/powerpc/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/arch/sparc/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/arm/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/csky/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/m68k/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/mips/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/powerpc.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/riscv32/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/sparc/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b32/x86/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/ilp32.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/lp64.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/loongarch64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/mips64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/powerpc64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/riscv64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/s390x.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/sparc64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/not_x32.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/x32.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/gnu/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b32/arm/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b32/hexagon.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b32/mips/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b32/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b32/powerpc.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b32/riscv32/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b32/x86/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/loongarch64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/mips64.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/powerpc64.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/riscv64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/s390x.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/wali.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/b64/x86_64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/lfs64.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/musl/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/arm/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips32/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/mips/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/l4re.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/mod.rs delete mode 100644 vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/other.rs delete mode 100644 vendor/libc/src/unix/linux_like/mod.rs delete mode 100644 vendor/libc/src/unix/mod.rs delete mode 100644 vendor/libc/src/unix/newlib/aarch64/mod.rs delete mode 100644 vendor/libc/src/unix/newlib/arm/mod.rs delete mode 100644 vendor/libc/src/unix/newlib/espidf/mod.rs delete mode 100644 vendor/libc/src/unix/newlib/generic.rs delete mode 100644 vendor/libc/src/unix/newlib/horizon/mod.rs delete mode 100644 vendor/libc/src/unix/newlib/mod.rs delete mode 100644 vendor/libc/src/unix/newlib/powerpc/mod.rs delete mode 100644 vendor/libc/src/unix/newlib/rtems/mod.rs delete mode 100644 vendor/libc/src/unix/newlib/vita/mod.rs delete mode 100644 vendor/libc/src/unix/nto/aarch64.rs delete mode 100644 vendor/libc/src/unix/nto/mod.rs delete mode 100644 vendor/libc/src/unix/nto/neutrino.rs delete mode 100644 vendor/libc/src/unix/nto/x86_64.rs delete mode 100644 vendor/libc/src/unix/nuttx/mod.rs delete mode 100644 vendor/libc/src/unix/redox/mod.rs delete mode 100644 vendor/libc/src/unix/solarish/compat.rs delete mode 100644 vendor/libc/src/unix/solarish/illumos.rs delete mode 100644 vendor/libc/src/unix/solarish/mod.rs delete mode 100644 vendor/libc/src/unix/solarish/solaris.rs delete mode 100644 vendor/libc/src/unix/solarish/x86.rs delete mode 100644 vendor/libc/src/unix/solarish/x86_64.rs delete mode 100644 vendor/libc/src/unix/solarish/x86_common.rs delete mode 100644 vendor/libc/src/vxworks/aarch64.rs delete mode 100644 vendor/libc/src/vxworks/arm.rs delete mode 100644 vendor/libc/src/vxworks/mod.rs delete mode 100644 vendor/libc/src/vxworks/powerpc.rs delete mode 100644 vendor/libc/src/vxworks/powerpc64.rs delete mode 100644 vendor/libc/src/vxworks/riscv32.rs delete mode 100644 vendor/libc/src/vxworks/riscv64.rs delete mode 100644 vendor/libc/src/vxworks/x86.rs delete mode 100644 vendor/libc/src/vxworks/x86_64.rs delete mode 100644 vendor/libc/src/wasi/mod.rs delete mode 100644 vendor/libc/src/wasi/p2.rs delete mode 100644 vendor/libc/src/windows/gnu/mod.rs delete mode 100644 vendor/libc/src/windows/mod.rs delete mode 100644 vendor/libc/src/windows/msvc/mod.rs delete mode 100644 vendor/libc/src/xous.rs delete mode 100644 vendor/libc/tests/const_fn.rs delete mode 100644 vendor/libloading/.cargo-checksum.json delete mode 100644 vendor/libloading/.cargo_vcs_info.json delete mode 100644 vendor/libloading/.github/workflows/libloading.yml delete mode 100644 vendor/libloading/Cargo.lock delete mode 100644 vendor/libloading/Cargo.toml delete mode 100644 vendor/libloading/LICENSE delete mode 100644 vendor/libloading/README.mkd delete mode 100644 vendor/libloading/src/changelog.rs delete mode 100644 vendor/libloading/src/error.rs delete mode 100644 vendor/libloading/src/lib.rs delete mode 100644 vendor/libloading/src/os/mod.rs delete mode 100644 vendor/libloading/src/os/unix/consts.rs delete mode 100644 vendor/libloading/src/os/unix/mod.rs delete mode 100644 vendor/libloading/src/os/windows/mod.rs delete mode 100644 vendor/libloading/src/safe.rs delete mode 100644 vendor/libloading/src/test_helpers.rs delete mode 100644 vendor/libloading/src/util.rs delete mode 100644 vendor/libloading/tests/constants.rs delete mode 100644 vendor/libloading/tests/functions.rs delete mode 100644 vendor/libloading/tests/library_filename.rs delete mode 100644 vendor/libloading/tests/markers.rs delete mode 100644 vendor/libloading/tests/windows.rs delete mode 100644 vendor/log/.cargo-checksum.json delete mode 100644 vendor/log/.cargo_vcs_info.json delete mode 100644 vendor/log/.github/workflows/main.yml delete mode 100644 vendor/log/CHANGELOG.md delete mode 100644 vendor/log/Cargo.lock delete mode 100644 vendor/log/Cargo.toml delete mode 100644 vendor/log/LICENSE-APACHE delete mode 100644 vendor/log/LICENSE-MIT delete mode 100644 vendor/log/README.md delete mode 100644 vendor/log/benches/value.rs delete mode 100644 vendor/log/src/__private_api.rs delete mode 100644 vendor/log/src/kv/error.rs delete mode 100644 vendor/log/src/kv/key.rs delete mode 100644 vendor/log/src/kv/mod.rs delete mode 100644 vendor/log/src/kv/source.rs delete mode 100644 vendor/log/src/kv/value.rs delete mode 100644 vendor/log/src/lib.rs delete mode 100644 vendor/log/src/macros.rs delete mode 100644 vendor/log/src/serde.rs delete mode 100644 vendor/log/tests/integration.rs delete mode 100644 vendor/log/tests/macros.rs delete mode 100644 vendor/log/triagebot.toml delete mode 100644 vendor/memchr/.cargo-checksum.json delete mode 100644 vendor/memchr/.cargo_vcs_info.json delete mode 100644 vendor/memchr/.ignore delete mode 100644 vendor/memchr/.vim/coc-settings.json delete mode 100644 vendor/memchr/COPYING delete mode 100644 vendor/memchr/Cargo.lock delete mode 100644 vendor/memchr/Cargo.toml delete mode 100644 vendor/memchr/LICENSE-MIT delete mode 100644 vendor/memchr/README.md delete mode 100644 vendor/memchr/UNLICENSE delete mode 100644 vendor/memchr/rustfmt.toml delete mode 100644 vendor/memchr/src/arch/aarch64/memchr.rs delete mode 100644 vendor/memchr/src/arch/aarch64/mod.rs delete mode 100644 vendor/memchr/src/arch/aarch64/neon/memchr.rs delete mode 100644 vendor/memchr/src/arch/aarch64/neon/mod.rs delete mode 100644 vendor/memchr/src/arch/aarch64/neon/packedpair.rs delete mode 100644 vendor/memchr/src/arch/all/memchr.rs delete mode 100644 vendor/memchr/src/arch/all/mod.rs delete mode 100644 vendor/memchr/src/arch/all/packedpair/default_rank.rs delete mode 100644 vendor/memchr/src/arch/all/packedpair/mod.rs delete mode 100644 vendor/memchr/src/arch/all/rabinkarp.rs delete mode 100644 vendor/memchr/src/arch/all/shiftor.rs delete mode 100644 vendor/memchr/src/arch/all/twoway.rs delete mode 100644 vendor/memchr/src/arch/generic/memchr.rs delete mode 100644 vendor/memchr/src/arch/generic/mod.rs delete mode 100644 vendor/memchr/src/arch/generic/packedpair.rs delete mode 100644 vendor/memchr/src/arch/mod.rs delete mode 100644 vendor/memchr/src/arch/wasm32/memchr.rs delete mode 100644 vendor/memchr/src/arch/wasm32/mod.rs delete mode 100644 vendor/memchr/src/arch/wasm32/simd128/memchr.rs delete mode 100644 vendor/memchr/src/arch/wasm32/simd128/mod.rs delete mode 100644 vendor/memchr/src/arch/wasm32/simd128/packedpair.rs delete mode 100644 vendor/memchr/src/arch/x86_64/avx2/memchr.rs delete mode 100644 vendor/memchr/src/arch/x86_64/avx2/mod.rs delete mode 100644 vendor/memchr/src/arch/x86_64/avx2/packedpair.rs delete mode 100644 vendor/memchr/src/arch/x86_64/memchr.rs delete mode 100644 vendor/memchr/src/arch/x86_64/mod.rs delete mode 100644 vendor/memchr/src/arch/x86_64/sse2/memchr.rs delete mode 100644 vendor/memchr/src/arch/x86_64/sse2/mod.rs delete mode 100644 vendor/memchr/src/arch/x86_64/sse2/packedpair.rs delete mode 100644 vendor/memchr/src/cow.rs delete mode 100644 vendor/memchr/src/ext.rs delete mode 100644 vendor/memchr/src/lib.rs delete mode 100644 vendor/memchr/src/macros.rs delete mode 100644 vendor/memchr/src/memchr.rs delete mode 100644 vendor/memchr/src/memmem/mod.rs delete mode 100644 vendor/memchr/src/memmem/searcher.rs delete mode 100644 vendor/memchr/src/tests/memchr/mod.rs delete mode 100644 vendor/memchr/src/tests/memchr/naive.rs delete mode 100644 vendor/memchr/src/tests/memchr/prop.rs delete mode 100644 vendor/memchr/src/tests/mod.rs delete mode 100644 vendor/memchr/src/tests/packedpair.rs delete mode 100644 vendor/memchr/src/tests/substring/mod.rs delete mode 100644 vendor/memchr/src/tests/substring/naive.rs delete mode 100644 vendor/memchr/src/tests/substring/prop.rs delete mode 100644 vendor/memchr/src/vector.rs delete mode 100644 vendor/minimal-lexical/.cargo-checksum.json delete mode 100644 vendor/minimal-lexical/.cargo_vcs_info.json delete mode 100644 vendor/minimal-lexical/.github/ISSUE_TEMPLATE/bug_report.md delete mode 100644 vendor/minimal-lexical/.github/ISSUE_TEMPLATE/custom.md delete mode 100644 vendor/minimal-lexical/.github/ISSUE_TEMPLATE/documentation.md delete mode 100644 vendor/minimal-lexical/.github/ISSUE_TEMPLATE/feature_request.md delete mode 100644 vendor/minimal-lexical/.github/ISSUE_TEMPLATE/question.md delete mode 100644 vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/bug_fix.md delete mode 100644 vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/custom.md delete mode 100644 vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/documentation.md delete mode 100644 vendor/minimal-lexical/.github/workflows/Cross.yml delete mode 100644 vendor/minimal-lexical/.github/workflows/Features.yml delete mode 100644 vendor/minimal-lexical/.github/workflows/OSX.yml delete mode 100644 vendor/minimal-lexical/.github/workflows/Simple.yml delete mode 100644 vendor/minimal-lexical/.github/workflows/Valgrind.yml delete mode 100644 vendor/minimal-lexical/.gitmodules delete mode 100644 vendor/minimal-lexical/CHANGELOG delete mode 100644 vendor/minimal-lexical/CODE_OF_CONDUCT.md delete mode 100644 vendor/minimal-lexical/Cargo.toml delete mode 100644 vendor/minimal-lexical/LICENSE-APACHE delete mode 100644 vendor/minimal-lexical/LICENSE-MIT delete mode 100644 vendor/minimal-lexical/LICENSE.md delete mode 100644 vendor/minimal-lexical/README.md delete mode 100644 vendor/minimal-lexical/clippy.toml delete mode 100644 vendor/minimal-lexical/rustfmt.toml delete mode 100644 vendor/minimal-lexical/src/bellerophon.rs delete mode 100644 vendor/minimal-lexical/src/bigint.rs delete mode 100644 vendor/minimal-lexical/src/extended_float.rs delete mode 100644 vendor/minimal-lexical/src/fpu.rs delete mode 100644 vendor/minimal-lexical/src/heapvec.rs delete mode 100644 vendor/minimal-lexical/src/lemire.rs delete mode 100644 vendor/minimal-lexical/src/lib.rs delete mode 100644 vendor/minimal-lexical/src/libm.rs delete mode 100644 vendor/minimal-lexical/src/mask.rs delete mode 100644 vendor/minimal-lexical/src/num.rs delete mode 100644 vendor/minimal-lexical/src/number.rs delete mode 100644 vendor/minimal-lexical/src/parse.rs delete mode 100644 vendor/minimal-lexical/src/rounding.rs delete mode 100644 vendor/minimal-lexical/src/slow.rs delete mode 100644 vendor/minimal-lexical/src/stackvec.rs delete mode 100644 vendor/minimal-lexical/src/table.rs delete mode 100644 vendor/minimal-lexical/src/table_bellerophon.rs delete mode 100644 vendor/minimal-lexical/src/table_lemire.rs delete mode 100644 vendor/minimal-lexical/src/table_small.rs delete mode 100644 vendor/minimal-lexical/tests/bellerophon.rs delete mode 100644 vendor/minimal-lexical/tests/bellerophon_tests.rs delete mode 100644 vendor/minimal-lexical/tests/integration_tests.rs delete mode 100644 vendor/minimal-lexical/tests/lemire_tests.rs delete mode 100644 vendor/minimal-lexical/tests/libm_tests.rs delete mode 100644 vendor/minimal-lexical/tests/mask_tests.rs delete mode 100644 vendor/minimal-lexical/tests/number_tests.rs delete mode 100644 vendor/minimal-lexical/tests/parse_tests.rs delete mode 100644 vendor/minimal-lexical/tests/rounding_tests.rs delete mode 100644 vendor/minimal-lexical/tests/slow_tests.rs delete mode 100644 vendor/minimal-lexical/tests/stackvec.rs delete mode 100644 vendor/minimal-lexical/tests/vec_tests.rs delete mode 100644 vendor/nom/.cargo-checksum.json delete mode 100644 vendor/nom/.cargo_vcs_info.json delete mode 100644 vendor/nom/CHANGELOG.md delete mode 100644 vendor/nom/Cargo.lock delete mode 100644 vendor/nom/Cargo.toml delete mode 100644 vendor/nom/LICENSE delete mode 100644 vendor/nom/README.md delete mode 100644 vendor/nom/doc/nom_recipes.md delete mode 100644 vendor/nom/src/bits/complete.rs delete mode 100644 vendor/nom/src/bits/mod.rs delete mode 100644 vendor/nom/src/bits/streaming.rs delete mode 100644 vendor/nom/src/branch/mod.rs delete mode 100644 vendor/nom/src/branch/tests.rs delete mode 100644 vendor/nom/src/bytes/complete.rs delete mode 100644 vendor/nom/src/bytes/mod.rs delete mode 100644 vendor/nom/src/bytes/streaming.rs delete mode 100644 vendor/nom/src/bytes/tests.rs delete mode 100644 vendor/nom/src/character/complete.rs delete mode 100644 vendor/nom/src/character/mod.rs delete mode 100644 vendor/nom/src/character/streaming.rs delete mode 100644 vendor/nom/src/character/tests.rs delete mode 100644 vendor/nom/src/combinator/mod.rs delete mode 100644 vendor/nom/src/combinator/tests.rs delete mode 100644 vendor/nom/src/error.rs delete mode 100644 vendor/nom/src/internal.rs delete mode 100644 vendor/nom/src/lib.rs delete mode 100644 vendor/nom/src/macros.rs delete mode 100644 vendor/nom/src/multi/mod.rs delete mode 100644 vendor/nom/src/multi/tests.rs delete mode 100644 vendor/nom/src/number/complete.rs delete mode 100644 vendor/nom/src/number/mod.rs delete mode 100644 vendor/nom/src/number/streaming.rs delete mode 100644 vendor/nom/src/sequence/mod.rs delete mode 100644 vendor/nom/src/sequence/tests.rs delete mode 100644 vendor/nom/src/str.rs delete mode 100644 vendor/nom/src/traits.rs delete mode 100644 vendor/nom/tests/arithmetic.rs delete mode 100644 vendor/nom/tests/arithmetic_ast.rs delete mode 100644 vendor/nom/tests/css.rs delete mode 100644 vendor/nom/tests/custom_errors.rs delete mode 100644 vendor/nom/tests/escaped.rs delete mode 100644 vendor/nom/tests/float.rs delete mode 100644 vendor/nom/tests/fnmut.rs delete mode 100644 vendor/nom/tests/ini.rs delete mode 100644 vendor/nom/tests/ini_str.rs delete mode 100644 vendor/nom/tests/issues.rs delete mode 100644 vendor/nom/tests/json.rs delete mode 100644 vendor/nom/tests/mp4.rs delete mode 100644 vendor/nom/tests/multiline.rs delete mode 100644 vendor/nom/tests/overflow.rs delete mode 100644 vendor/nom/tests/reborrow_fold.rs delete mode 100644 vendor/prettyplease/.cargo-checksum.json delete mode 100644 vendor/prettyplease/.cargo_vcs_info.json delete mode 100644 vendor/prettyplease/.github/FUNDING.yml delete mode 100644 vendor/prettyplease/.github/workflows/ci.yml delete mode 100644 vendor/prettyplease/Cargo.lock delete mode 100644 vendor/prettyplease/Cargo.toml delete mode 100644 vendor/prettyplease/LICENSE-APACHE delete mode 100644 vendor/prettyplease/LICENSE-MIT delete mode 100644 vendor/prettyplease/README.md delete mode 100644 vendor/prettyplease/build.rs delete mode 100644 vendor/prettyplease/examples/.tokeignore delete mode 100644 vendor/prettyplease/examples/input.rs delete mode 100644 vendor/prettyplease/examples/output.prettyplease.rs delete mode 100644 vendor/prettyplease/examples/output.rustc.rs delete mode 100644 vendor/prettyplease/examples/output.rustfmt.rs delete mode 100644 vendor/prettyplease/src/algorithm.rs delete mode 100644 vendor/prettyplease/src/attr.rs delete mode 100644 vendor/prettyplease/src/classify.rs delete mode 100644 vendor/prettyplease/src/convenience.rs delete mode 100644 vendor/prettyplease/src/data.rs delete mode 100644 vendor/prettyplease/src/expr.rs delete mode 100644 vendor/prettyplease/src/file.rs delete mode 100644 vendor/prettyplease/src/fixup.rs delete mode 100644 vendor/prettyplease/src/generics.rs delete mode 100644 vendor/prettyplease/src/item.rs delete mode 100644 vendor/prettyplease/src/iter.rs delete mode 100644 vendor/prettyplease/src/lib.rs delete mode 100644 vendor/prettyplease/src/lifetime.rs delete mode 100644 vendor/prettyplease/src/lit.rs delete mode 100644 vendor/prettyplease/src/mac.rs delete mode 100644 vendor/prettyplease/src/pat.rs delete mode 100644 vendor/prettyplease/src/path.rs delete mode 100644 vendor/prettyplease/src/precedence.rs delete mode 100644 vendor/prettyplease/src/ring.rs delete mode 100644 vendor/prettyplease/src/stmt.rs delete mode 100644 vendor/prettyplease/src/token.rs delete mode 100644 vendor/prettyplease/src/ty.rs delete mode 100644 vendor/prettyplease/tests/test.rs delete mode 100644 vendor/prettyplease/tests/test_precedence.rs delete mode 100644 vendor/proc-macro2/.cargo-checksum.json delete mode 100644 vendor/proc-macro2/.cargo_vcs_info.json delete mode 100644 vendor/proc-macro2/.github/FUNDING.yml delete mode 100644 vendor/proc-macro2/.github/workflows/ci.yml delete mode 100644 vendor/proc-macro2/Cargo.lock delete mode 100644 vendor/proc-macro2/Cargo.toml delete mode 100644 vendor/proc-macro2/LICENSE-APACHE delete mode 100644 vendor/proc-macro2/LICENSE-MIT delete mode 100644 vendor/proc-macro2/README.md delete mode 100644 vendor/proc-macro2/build.rs delete mode 100644 vendor/proc-macro2/rust-toolchain.toml delete mode 100644 vendor/proc-macro2/src/detection.rs delete mode 100644 vendor/proc-macro2/src/extra.rs delete mode 100644 vendor/proc-macro2/src/fallback.rs delete mode 100644 vendor/proc-macro2/src/lib.rs delete mode 100644 vendor/proc-macro2/src/location.rs delete mode 100644 vendor/proc-macro2/src/marker.rs delete mode 100644 vendor/proc-macro2/src/num.rs delete mode 100644 vendor/proc-macro2/src/parse.rs delete mode 100644 vendor/proc-macro2/src/probe.rs delete mode 100644 vendor/proc-macro2/src/probe/proc_macro_span.rs delete mode 100644 vendor/proc-macro2/src/probe/proc_macro_span_file.rs delete mode 100644 vendor/proc-macro2/src/probe/proc_macro_span_location.rs delete mode 100644 vendor/proc-macro2/src/rcvec.rs delete mode 100644 vendor/proc-macro2/src/rustc_literal_escaper.rs delete mode 100644 vendor/proc-macro2/src/wrapper.rs delete mode 100644 vendor/proc-macro2/tests/comments.rs delete mode 100644 vendor/proc-macro2/tests/features.rs delete mode 100644 vendor/proc-macro2/tests/marker.rs delete mode 100644 vendor/proc-macro2/tests/test.rs delete mode 100644 vendor/proc-macro2/tests/test_fmt.rs delete mode 100644 vendor/proc-macro2/tests/test_size.rs delete mode 100644 vendor/quote/.cargo-checksum.json delete mode 100644 vendor/quote/.cargo_vcs_info.json delete mode 100644 vendor/quote/.github/FUNDING.yml delete mode 100644 vendor/quote/.github/workflows/ci.yml delete mode 100644 vendor/quote/Cargo.lock delete mode 100644 vendor/quote/Cargo.toml delete mode 100644 vendor/quote/LICENSE-APACHE delete mode 100644 vendor/quote/LICENSE-MIT delete mode 100644 vendor/quote/README.md delete mode 100644 vendor/quote/build.rs delete mode 100644 vendor/quote/rust-toolchain.toml delete mode 100644 vendor/quote/src/ext.rs delete mode 100644 vendor/quote/src/format.rs delete mode 100644 vendor/quote/src/ident_fragment.rs delete mode 100644 vendor/quote/src/lib.rs delete mode 100644 vendor/quote/src/runtime.rs delete mode 100644 vendor/quote/src/spanned.rs delete mode 100644 vendor/quote/src/to_tokens.rs delete mode 100644 vendor/quote/tests/compiletest.rs delete mode 100644 vendor/quote/tests/test.rs delete mode 100644 vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.rs delete mode 100644 vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.stderr delete mode 100644 vendor/quote/tests/ui/does-not-have-iter-interpolated.rs delete mode 100644 vendor/quote/tests/ui/does-not-have-iter-interpolated.stderr delete mode 100644 vendor/quote/tests/ui/does-not-have-iter-separated.rs delete mode 100644 vendor/quote/tests/ui/does-not-have-iter-separated.stderr delete mode 100644 vendor/quote/tests/ui/does-not-have-iter.rs delete mode 100644 vendor/quote/tests/ui/does-not-have-iter.stderr delete mode 100644 vendor/quote/tests/ui/not-quotable.rs delete mode 100644 vendor/quote/tests/ui/not-quotable.stderr delete mode 100644 vendor/quote/tests/ui/not-repeatable.rs delete mode 100644 vendor/quote/tests/ui/not-repeatable.stderr delete mode 100644 vendor/quote/tests/ui/wrong-type-span.rs delete mode 100644 vendor/quote/tests/ui/wrong-type-span.stderr delete mode 100644 vendor/regex-automata/.cargo-checksum.json delete mode 100644 vendor/regex-automata/.cargo_vcs_info.json delete mode 100644 vendor/regex-automata/Cargo.lock delete mode 100644 vendor/regex-automata/Cargo.toml delete mode 100644 vendor/regex-automata/LICENSE-APACHE delete mode 100644 vendor/regex-automata/LICENSE-MIT delete mode 100644 vendor/regex-automata/README.md delete mode 100644 vendor/regex-automata/src/dfa/accel.rs delete mode 100644 vendor/regex-automata/src/dfa/automaton.rs delete mode 100644 vendor/regex-automata/src/dfa/dense.rs delete mode 100644 vendor/regex-automata/src/dfa/determinize.rs delete mode 100644 vendor/regex-automata/src/dfa/minimize.rs delete mode 100644 vendor/regex-automata/src/dfa/mod.rs delete mode 100644 vendor/regex-automata/src/dfa/onepass.rs delete mode 100644 vendor/regex-automata/src/dfa/regex.rs delete mode 100644 vendor/regex-automata/src/dfa/remapper.rs delete mode 100644 vendor/regex-automata/src/dfa/search.rs delete mode 100644 vendor/regex-automata/src/dfa/sparse.rs delete mode 100644 vendor/regex-automata/src/dfa/special.rs delete mode 100644 vendor/regex-automata/src/dfa/start.rs delete mode 100644 vendor/regex-automata/src/hybrid/dfa.rs delete mode 100644 vendor/regex-automata/src/hybrid/error.rs delete mode 100644 vendor/regex-automata/src/hybrid/id.rs delete mode 100644 vendor/regex-automata/src/hybrid/mod.rs delete mode 100644 vendor/regex-automata/src/hybrid/regex.rs delete mode 100644 vendor/regex-automata/src/hybrid/search.rs delete mode 100644 vendor/regex-automata/src/lib.rs delete mode 100644 vendor/regex-automata/src/macros.rs delete mode 100644 vendor/regex-automata/src/meta/error.rs delete mode 100644 vendor/regex-automata/src/meta/limited.rs delete mode 100644 vendor/regex-automata/src/meta/literal.rs delete mode 100644 vendor/regex-automata/src/meta/mod.rs delete mode 100644 vendor/regex-automata/src/meta/regex.rs delete mode 100644 vendor/regex-automata/src/meta/reverse_inner.rs delete mode 100644 vendor/regex-automata/src/meta/stopat.rs delete mode 100644 vendor/regex-automata/src/meta/strategy.rs delete mode 100644 vendor/regex-automata/src/meta/wrappers.rs delete mode 100644 vendor/regex-automata/src/nfa/mod.rs delete mode 100644 vendor/regex-automata/src/nfa/thompson/backtrack.rs delete mode 100644 vendor/regex-automata/src/nfa/thompson/builder.rs delete mode 100644 vendor/regex-automata/src/nfa/thompson/compiler.rs delete mode 100644 vendor/regex-automata/src/nfa/thompson/error.rs delete mode 100644 vendor/regex-automata/src/nfa/thompson/literal_trie.rs delete mode 100644 vendor/regex-automata/src/nfa/thompson/map.rs delete mode 100644 vendor/regex-automata/src/nfa/thompson/mod.rs delete mode 100644 vendor/regex-automata/src/nfa/thompson/nfa.rs delete mode 100644 vendor/regex-automata/src/nfa/thompson/pikevm.rs delete mode 100644 vendor/regex-automata/src/nfa/thompson/range_trie.rs delete mode 100644 vendor/regex-automata/src/util/alphabet.rs delete mode 100644 vendor/regex-automata/src/util/captures.rs delete mode 100644 vendor/regex-automata/src/util/determinize/mod.rs delete mode 100644 vendor/regex-automata/src/util/determinize/state.rs delete mode 100644 vendor/regex-automata/src/util/empty.rs delete mode 100644 vendor/regex-automata/src/util/escape.rs delete mode 100644 vendor/regex-automata/src/util/int.rs delete mode 100644 vendor/regex-automata/src/util/interpolate.rs delete mode 100644 vendor/regex-automata/src/util/iter.rs delete mode 100644 vendor/regex-automata/src/util/lazy.rs delete mode 100644 vendor/regex-automata/src/util/look.rs delete mode 100644 vendor/regex-automata/src/util/memchr.rs delete mode 100644 vendor/regex-automata/src/util/mod.rs delete mode 100644 vendor/regex-automata/src/util/pool.rs delete mode 100644 vendor/regex-automata/src/util/prefilter/aho_corasick.rs delete mode 100644 vendor/regex-automata/src/util/prefilter/byteset.rs delete mode 100644 vendor/regex-automata/src/util/prefilter/memchr.rs delete mode 100644 vendor/regex-automata/src/util/prefilter/memmem.rs delete mode 100644 vendor/regex-automata/src/util/prefilter/mod.rs delete mode 100644 vendor/regex-automata/src/util/prefilter/teddy.rs delete mode 100644 vendor/regex-automata/src/util/primitives.rs delete mode 100644 vendor/regex-automata/src/util/search.rs delete mode 100644 vendor/regex-automata/src/util/sparse_set.rs delete mode 100644 vendor/regex-automata/src/util/start.rs delete mode 100644 vendor/regex-automata/src/util/syntax.rs delete mode 100644 vendor/regex-automata/src/util/unicode_data/mod.rs delete mode 100644 vendor/regex-automata/src/util/unicode_data/perl_word.rs delete mode 100644 vendor/regex-automata/src/util/utf8.rs delete mode 100644 vendor/regex-automata/src/util/wire.rs delete mode 100755 vendor/regex-automata/test delete mode 100644 vendor/regex-automata/tests/dfa/api.rs delete mode 100644 vendor/regex-automata/tests/dfa/mod.rs delete mode 100644 vendor/regex-automata/tests/dfa/onepass/mod.rs delete mode 100644 vendor/regex-automata/tests/dfa/onepass/suite.rs delete mode 100644 vendor/regex-automata/tests/dfa/regression.rs delete mode 100644 vendor/regex-automata/tests/dfa/suite.rs delete mode 100644 vendor/regex-automata/tests/fuzz/dense.rs delete mode 100644 vendor/regex-automata/tests/fuzz/mod.rs delete mode 100644 vendor/regex-automata/tests/fuzz/sparse.rs delete mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_dense_crash-9486fb7c8a93b12c12a62166b43d31640c0208a9 delete mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_dense_minimized-from-9486fb7c8a93b12c12a62166b43d31640c0208a9 delete mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-0da59c0434eaf35e5a6b470fa9244bb79c72b000 delete mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-18cfc246f2ddfc3dfc92b0c7893178c7cf65efa9 delete mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-61fd8e3003bf9d99f6c1e5a8488727eefd234b98 delete mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-a1b839d899ced76d5d7d0f78f9edb7a421505838 delete mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-c383ae07ec5e191422eadc492117439011816570 delete mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-d07703ceb94b10dcd9e4acb809f2051420449e2b delete mode 100644 vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-dbb8172d3984e7e7d03f4b5f8bb86ecd1460eff9 delete mode 100644 vendor/regex-automata/tests/gen/README.md delete mode 100644 vendor/regex-automata/tests/gen/dense/mod.rs delete mode 100644 vendor/regex-automata/tests/gen/dense/multi_pattern_v2.rs delete mode 100644 vendor/regex-automata/tests/gen/dense/multi_pattern_v2_fwd.bigendian.dfa delete mode 100644 vendor/regex-automata/tests/gen/dense/multi_pattern_v2_fwd.littleendian.dfa delete mode 100644 vendor/regex-automata/tests/gen/dense/multi_pattern_v2_rev.bigendian.dfa delete mode 100644 vendor/regex-automata/tests/gen/dense/multi_pattern_v2_rev.littleendian.dfa delete mode 100644 vendor/regex-automata/tests/gen/mod.rs delete mode 100644 vendor/regex-automata/tests/gen/sparse/mod.rs delete mode 100644 vendor/regex-automata/tests/gen/sparse/multi_pattern_v2.rs delete mode 100644 vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_fwd.bigendian.dfa delete mode 100644 vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_fwd.littleendian.dfa delete mode 100644 vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_rev.bigendian.dfa delete mode 100644 vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_rev.littleendian.dfa delete mode 100644 vendor/regex-automata/tests/hybrid/api.rs delete mode 100644 vendor/regex-automata/tests/hybrid/mod.rs delete mode 100644 vendor/regex-automata/tests/hybrid/suite.rs delete mode 100644 vendor/regex-automata/tests/lib.rs delete mode 100644 vendor/regex-automata/tests/meta/mod.rs delete mode 100644 vendor/regex-automata/tests/meta/suite.rs delete mode 100644 vendor/regex-automata/tests/nfa/mod.rs delete mode 100644 vendor/regex-automata/tests/nfa/thompson/backtrack/mod.rs delete mode 100644 vendor/regex-automata/tests/nfa/thompson/backtrack/suite.rs delete mode 100644 vendor/regex-automata/tests/nfa/thompson/mod.rs delete mode 100644 vendor/regex-automata/tests/nfa/thompson/pikevm/mod.rs delete mode 100644 vendor/regex-automata/tests/nfa/thompson/pikevm/suite.rs delete mode 100644 vendor/regex-syntax/.cargo-checksum.json delete mode 100644 vendor/regex-syntax/.cargo_vcs_info.json delete mode 100644 vendor/regex-syntax/Cargo.lock delete mode 100644 vendor/regex-syntax/Cargo.toml delete mode 100644 vendor/regex-syntax/LICENSE-APACHE delete mode 100644 vendor/regex-syntax/LICENSE-MIT delete mode 100644 vendor/regex-syntax/README.md delete mode 100644 vendor/regex-syntax/benches/bench.rs delete mode 100644 vendor/regex-syntax/src/ast/mod.rs delete mode 100644 vendor/regex-syntax/src/ast/parse.rs delete mode 100644 vendor/regex-syntax/src/ast/print.rs delete mode 100644 vendor/regex-syntax/src/ast/visitor.rs delete mode 100644 vendor/regex-syntax/src/debug.rs delete mode 100644 vendor/regex-syntax/src/either.rs delete mode 100644 vendor/regex-syntax/src/error.rs delete mode 100644 vendor/regex-syntax/src/hir/interval.rs delete mode 100644 vendor/regex-syntax/src/hir/literal.rs delete mode 100644 vendor/regex-syntax/src/hir/mod.rs delete mode 100644 vendor/regex-syntax/src/hir/print.rs delete mode 100644 vendor/regex-syntax/src/hir/translate.rs delete mode 100644 vendor/regex-syntax/src/hir/visitor.rs delete mode 100644 vendor/regex-syntax/src/lib.rs delete mode 100644 vendor/regex-syntax/src/parser.rs delete mode 100644 vendor/regex-syntax/src/rank.rs delete mode 100644 vendor/regex-syntax/src/unicode.rs delete mode 100644 vendor/regex-syntax/src/unicode_tables/LICENSE-UNICODE delete mode 100644 vendor/regex-syntax/src/unicode_tables/age.rs delete mode 100644 vendor/regex-syntax/src/unicode_tables/case_folding_simple.rs delete mode 100644 vendor/regex-syntax/src/unicode_tables/general_category.rs delete mode 100644 vendor/regex-syntax/src/unicode_tables/grapheme_cluster_break.rs delete mode 100644 vendor/regex-syntax/src/unicode_tables/mod.rs delete mode 100644 vendor/regex-syntax/src/unicode_tables/perl_decimal.rs delete mode 100644 vendor/regex-syntax/src/unicode_tables/perl_space.rs delete mode 100644 vendor/regex-syntax/src/unicode_tables/perl_word.rs delete mode 100644 vendor/regex-syntax/src/unicode_tables/property_bool.rs delete mode 100644 vendor/regex-syntax/src/unicode_tables/property_names.rs delete mode 100644 vendor/regex-syntax/src/unicode_tables/property_values.rs delete mode 100644 vendor/regex-syntax/src/unicode_tables/script.rs delete mode 100644 vendor/regex-syntax/src/unicode_tables/script_extension.rs delete mode 100644 vendor/regex-syntax/src/unicode_tables/sentence_break.rs delete mode 100644 vendor/regex-syntax/src/unicode_tables/word_break.rs delete mode 100644 vendor/regex-syntax/src/utf8.rs delete mode 100755 vendor/regex-syntax/test delete mode 100644 vendor/regex/.cargo-checksum.json delete mode 100644 vendor/regex/.cargo_vcs_info.json delete mode 100644 vendor/regex/.vim/coc-settings.json delete mode 100644 vendor/regex/CHANGELOG.md delete mode 100644 vendor/regex/Cargo.lock delete mode 100644 vendor/regex/Cargo.toml delete mode 100644 vendor/regex/Cross.toml delete mode 100644 vendor/regex/LICENSE-APACHE delete mode 100644 vendor/regex/LICENSE-MIT delete mode 100644 vendor/regex/README.md delete mode 100644 vendor/regex/UNICODE.md delete mode 100644 vendor/regex/bench/README.md delete mode 100644 vendor/regex/rustfmt.toml delete mode 100644 vendor/regex/src/builders.rs delete mode 100644 vendor/regex/src/bytes.rs delete mode 100644 vendor/regex/src/error.rs delete mode 100644 vendor/regex/src/find_byte.rs delete mode 100644 vendor/regex/src/lib.rs delete mode 100644 vendor/regex/src/pattern.rs delete mode 100644 vendor/regex/src/regex/bytes.rs delete mode 100644 vendor/regex/src/regex/mod.rs delete mode 100644 vendor/regex/src/regex/string.rs delete mode 100644 vendor/regex/src/regexset/bytes.rs delete mode 100644 vendor/regex/src/regexset/mod.rs delete mode 100644 vendor/regex/src/regexset/string.rs delete mode 100755 vendor/regex/test delete mode 100644 vendor/regex/testdata/README.md delete mode 100644 vendor/regex/testdata/anchored.toml delete mode 100644 vendor/regex/testdata/bytes.toml delete mode 100644 vendor/regex/testdata/crazy.toml delete mode 100644 vendor/regex/testdata/crlf.toml delete mode 100644 vendor/regex/testdata/earliest.toml delete mode 100644 vendor/regex/testdata/empty.toml delete mode 100644 vendor/regex/testdata/expensive.toml delete mode 100644 vendor/regex/testdata/flags.toml delete mode 100644 vendor/regex/testdata/fowler/basic.toml delete mode 100644 vendor/regex/testdata/fowler/dat/README delete mode 100644 vendor/regex/testdata/fowler/dat/basic.dat delete mode 100644 vendor/regex/testdata/fowler/dat/nullsubexpr.dat delete mode 100644 vendor/regex/testdata/fowler/dat/repetition.dat delete mode 100644 vendor/regex/testdata/fowler/nullsubexpr.toml delete mode 100644 vendor/regex/testdata/fowler/repetition.toml delete mode 100644 vendor/regex/testdata/iter.toml delete mode 100644 vendor/regex/testdata/leftmost-all.toml delete mode 100644 vendor/regex/testdata/line-terminator.toml delete mode 100644 vendor/regex/testdata/misc.toml delete mode 100644 vendor/regex/testdata/multiline.toml delete mode 100644 vendor/regex/testdata/no-unicode.toml delete mode 100644 vendor/regex/testdata/overlapping.toml delete mode 100644 vendor/regex/testdata/regex-lite.toml delete mode 100644 vendor/regex/testdata/regression.toml delete mode 100644 vendor/regex/testdata/set.toml delete mode 100644 vendor/regex/testdata/substring.toml delete mode 100644 vendor/regex/testdata/unicode.toml delete mode 100644 vendor/regex/testdata/utf8.toml delete mode 100644 vendor/regex/testdata/word-boundary-special.toml delete mode 100644 vendor/regex/testdata/word-boundary.toml delete mode 100644 vendor/regex/tests/lib.rs delete mode 100644 vendor/regex/tests/misc.rs delete mode 100644 vendor/regex/tests/regression.rs delete mode 100644 vendor/regex/tests/regression_fuzz.rs delete mode 100644 vendor/regex/tests/replace.rs delete mode 100644 vendor/regex/tests/searcher.rs delete mode 100644 vendor/regex/tests/suite_bytes.rs delete mode 100644 vendor/regex/tests/suite_bytes_set.rs delete mode 100644 vendor/regex/tests/suite_string.rs delete mode 100644 vendor/regex/tests/suite_string_set.rs delete mode 100644 vendor/rustc-hash/.cargo-checksum.json delete mode 100644 vendor/rustc-hash/.cargo_vcs_info.json delete mode 100644 vendor/rustc-hash/.github/workflows/rust.yml delete mode 100644 vendor/rustc-hash/CHANGELOG.md delete mode 100644 vendor/rustc-hash/CODE_OF_CONDUCT.md delete mode 100644 vendor/rustc-hash/Cargo.lock delete mode 100644 vendor/rustc-hash/Cargo.toml delete mode 100644 vendor/rustc-hash/LICENSE-APACHE delete mode 100644 vendor/rustc-hash/LICENSE-MIT delete mode 100644 vendor/rustc-hash/README.md delete mode 100644 vendor/rustc-hash/src/lib.rs delete mode 100644 vendor/rustc-hash/src/random_state.rs delete mode 100644 vendor/rustc-hash/src/seeded_state.rs delete mode 100644 vendor/shlex/.cargo-checksum.json delete mode 100644 vendor/shlex/.cargo_vcs_info.json delete mode 100644 vendor/shlex/.github/workflows/test.yml delete mode 100644 vendor/shlex/CHANGELOG.md delete mode 100644 vendor/shlex/Cargo.toml delete mode 100644 vendor/shlex/LICENSE-APACHE delete mode 100644 vendor/shlex/LICENSE-MIT delete mode 100644 vendor/shlex/README.md delete mode 100644 vendor/shlex/src/bytes.rs delete mode 100644 vendor/shlex/src/lib.rs delete mode 100644 vendor/shlex/src/quoting_warning.md delete mode 100644 vendor/syn/.cargo-checksum.json delete mode 100644 vendor/syn/.cargo_vcs_info.json delete mode 100644 vendor/syn/Cargo.lock delete mode 100644 vendor/syn/Cargo.toml delete mode 100644 vendor/syn/LICENSE-APACHE delete mode 100644 vendor/syn/LICENSE-MIT delete mode 100644 vendor/syn/README.md delete mode 100644 vendor/syn/benches/file.rs delete mode 100644 vendor/syn/benches/rust.rs delete mode 100644 vendor/syn/src/attr.rs delete mode 100644 vendor/syn/src/bigint.rs delete mode 100644 vendor/syn/src/buffer.rs delete mode 100644 vendor/syn/src/classify.rs delete mode 100644 vendor/syn/src/custom_keyword.rs delete mode 100644 vendor/syn/src/custom_punctuation.rs delete mode 100644 vendor/syn/src/data.rs delete mode 100644 vendor/syn/src/derive.rs delete mode 100644 vendor/syn/src/discouraged.rs delete mode 100644 vendor/syn/src/drops.rs delete mode 100644 vendor/syn/src/error.rs delete mode 100644 vendor/syn/src/export.rs delete mode 100644 vendor/syn/src/expr.rs delete mode 100644 vendor/syn/src/ext.rs delete mode 100644 vendor/syn/src/file.rs delete mode 100644 vendor/syn/src/fixup.rs delete mode 100644 vendor/syn/src/gen/clone.rs delete mode 100644 vendor/syn/src/gen/debug.rs delete mode 100644 vendor/syn/src/gen/eq.rs delete mode 100644 vendor/syn/src/gen/fold.rs delete mode 100644 vendor/syn/src/gen/hash.rs delete mode 100644 vendor/syn/src/gen/token.css delete mode 100644 vendor/syn/src/gen/visit.rs delete mode 100644 vendor/syn/src/gen/visit_mut.rs delete mode 100644 vendor/syn/src/generics.rs delete mode 100644 vendor/syn/src/group.rs delete mode 100644 vendor/syn/src/ident.rs delete mode 100644 vendor/syn/src/item.rs delete mode 100644 vendor/syn/src/lib.rs delete mode 100644 vendor/syn/src/lifetime.rs delete mode 100644 vendor/syn/src/lit.rs delete mode 100644 vendor/syn/src/lookahead.rs delete mode 100644 vendor/syn/src/mac.rs delete mode 100644 vendor/syn/src/macros.rs delete mode 100644 vendor/syn/src/meta.rs delete mode 100644 vendor/syn/src/op.rs delete mode 100644 vendor/syn/src/parse.rs delete mode 100644 vendor/syn/src/parse_macro_input.rs delete mode 100644 vendor/syn/src/parse_quote.rs delete mode 100644 vendor/syn/src/pat.rs delete mode 100644 vendor/syn/src/path.rs delete mode 100644 vendor/syn/src/precedence.rs delete mode 100644 vendor/syn/src/print.rs delete mode 100644 vendor/syn/src/punctuated.rs delete mode 100644 vendor/syn/src/restriction.rs delete mode 100644 vendor/syn/src/scan_expr.rs delete mode 100644 vendor/syn/src/sealed.rs delete mode 100644 vendor/syn/src/span.rs delete mode 100644 vendor/syn/src/spanned.rs delete mode 100644 vendor/syn/src/stmt.rs delete mode 100644 vendor/syn/src/thread.rs delete mode 100644 vendor/syn/src/token.rs delete mode 100644 vendor/syn/src/tt.rs delete mode 100644 vendor/syn/src/ty.rs delete mode 100644 vendor/syn/src/verbatim.rs delete mode 100644 vendor/syn/src/whitespace.rs delete mode 100644 vendor/syn/tests/common/eq.rs delete mode 100644 vendor/syn/tests/common/mod.rs delete mode 100644 vendor/syn/tests/common/parse.rs delete mode 100644 vendor/syn/tests/common/visit.rs delete mode 100644 vendor/syn/tests/debug/gen.rs delete mode 100644 vendor/syn/tests/debug/mod.rs delete mode 100644 vendor/syn/tests/macros/mod.rs delete mode 100644 vendor/syn/tests/regression.rs delete mode 100644 vendor/syn/tests/regression/issue1108.rs delete mode 100644 vendor/syn/tests/regression/issue1235.rs delete mode 100644 vendor/syn/tests/repo/mod.rs delete mode 100644 vendor/syn/tests/repo/progress.rs delete mode 100644 vendor/syn/tests/snapshot/mod.rs delete mode 100644 vendor/syn/tests/test_asyncness.rs delete mode 100644 vendor/syn/tests/test_attribute.rs delete mode 100644 vendor/syn/tests/test_derive_input.rs delete mode 100644 vendor/syn/tests/test_expr.rs delete mode 100644 vendor/syn/tests/test_generics.rs delete mode 100644 vendor/syn/tests/test_grouping.rs delete mode 100644 vendor/syn/tests/test_ident.rs delete mode 100644 vendor/syn/tests/test_item.rs delete mode 100644 vendor/syn/tests/test_lit.rs delete mode 100644 vendor/syn/tests/test_meta.rs delete mode 100644 vendor/syn/tests/test_parse_buffer.rs delete mode 100644 vendor/syn/tests/test_parse_quote.rs delete mode 100644 vendor/syn/tests/test_parse_stream.rs delete mode 100644 vendor/syn/tests/test_pat.rs delete mode 100644 vendor/syn/tests/test_path.rs delete mode 100644 vendor/syn/tests/test_precedence.rs delete mode 100644 vendor/syn/tests/test_punctuated.rs delete mode 100644 vendor/syn/tests/test_receiver.rs delete mode 100644 vendor/syn/tests/test_round_trip.rs delete mode 100644 vendor/syn/tests/test_shebang.rs delete mode 100644 vendor/syn/tests/test_size.rs delete mode 100644 vendor/syn/tests/test_stmt.rs delete mode 100644 vendor/syn/tests/test_token_trees.rs delete mode 100644 vendor/syn/tests/test_ty.rs delete mode 100644 vendor/syn/tests/test_unparenthesize.rs delete mode 100644 vendor/syn/tests/test_visibility.rs delete mode 100644 vendor/syn/tests/zzz_stable.rs delete mode 100644 vendor/unicode-ident/.cargo-checksum.json delete mode 100644 vendor/unicode-ident/.cargo_vcs_info.json delete mode 100644 vendor/unicode-ident/.github/FUNDING.yml delete mode 100644 vendor/unicode-ident/.github/workflows/ci.yml delete mode 100644 vendor/unicode-ident/Cargo.lock delete mode 100644 vendor/unicode-ident/Cargo.toml delete mode 100644 vendor/unicode-ident/LICENSE-APACHE delete mode 100644 vendor/unicode-ident/LICENSE-MIT delete mode 100644 vendor/unicode-ident/LICENSE-UNICODE delete mode 100644 vendor/unicode-ident/README.md delete mode 100644 vendor/unicode-ident/benches/xid.rs delete mode 100644 vendor/unicode-ident/src/lib.rs delete mode 100644 vendor/unicode-ident/src/tables.rs delete mode 100644 vendor/unicode-ident/tests/compare.rs delete mode 100644 vendor/unicode-ident/tests/fst/.gitignore delete mode 100644 vendor/unicode-ident/tests/fst/mod.rs delete mode 100644 vendor/unicode-ident/tests/fst/xid_continue.fst delete mode 100644 vendor/unicode-ident/tests/fst/xid_start.fst delete mode 100644 vendor/unicode-ident/tests/roaring/mod.rs delete mode 100644 vendor/unicode-ident/tests/static_size.rs delete mode 100644 vendor/unicode-ident/tests/tables/mod.rs delete mode 100644 vendor/unicode-ident/tests/tables/tables.rs delete mode 100644 vendor/unicode-ident/tests/trie/mod.rs delete mode 100644 vendor/unicode-ident/tests/trie/trie.rs delete mode 100644 vendor/windows-link/.cargo-checksum.json delete mode 100644 vendor/windows-link/.cargo_vcs_info.json delete mode 100644 vendor/windows-link/Cargo.lock delete mode 100644 vendor/windows-link/Cargo.toml delete mode 100644 vendor/windows-link/license-apache-2.0 delete mode 100644 vendor/windows-link/license-mit delete mode 100644 vendor/windows-link/readme.md delete mode 100644 vendor/windows-link/src/lib.rs diff --git a/vendor/aho-corasick/.cargo-checksum.json b/vendor/aho-corasick/.cargo-checksum.json deleted file mode 100644 index 720dfa888c8cbc..00000000000000 --- a/vendor/aho-corasick/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"ace303bf25937bf488919e5461f3994d1b88a19a40465ec8342c63af89cbebf7",".github/FUNDING.yml":"0c65f392d32a8639ba7986bbb42ca124505b462122382f314c89d84c95dd27f1",".github/workflows/ci.yml":"0605d9327a4633916dc789008d5686c692656bb3e1ee57f821f8537e9ad7d7b4",".vim/coc-settings.json":"8237c8f41db352b0d83f1bb10a60bc2f60f56f3234afbf696b4075c8d4d62d9b","COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.lock":"395d3e76f284190cef50c807ab2f00b9a5d388fde7a7bf88b73b02ed9fd346d1","Cargo.toml":"9384d7c725c5c2ebc8adc602081e7cbce8b214693e9e27edef1c40f33e925810","Cargo.toml.orig":"05304eb8b8821d48c0c4d2e991b9ed0f1a0b68cb70afb8881b81c4c317969663","DESIGN.md":"59c960e1b73b1d7fb41e4df6c0c1b1fcf44dd2ebc8a349597a7d0595f8cb5130","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","README.md":"afc4d559a98cf190029af0bf320fc0022725e349cd2a303aac860254e28f3c53","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","src/ahocorasick.rs":"c699c07df70be45c666e128509ad571a7649d2073e4ae16ac1efd6793c9c6890","src/automaton.rs":"22258a3e118672413119f8f543a9b912cce954e63524575c0ebfdf9011f9c2dd","src/dfa.rs":"197075923eb9d760a552f4e8652310fd4f657736613a9b1444ae05ef5d525da3","src/lib.rs":"66dea84d227f269b2f14ecc8109a97e96245b56c22eef0e8ce03b2343b8d6e66","src/macros.rs":"c6c52ae05b24433cffaca7b78b3645d797862c5d5feffddf9f54909095ed6e05","src/nfa/contiguous.rs":"f435c131ce84927e5600109722d006533ea21442dddaf18e03286d8caed82389","src/nfa/mod.rs":"ee7b3109774d14bbad5239c16bb980dd6b8185ec136d94fbaf2f0dc27d5ffa15","src/nfa/noncontiguous.rs":"de94f02b04efd8744fb096759a8897c22012b0e0ca3ace161fd87c71befefe04","src/packed/api.rs":"2197077ff7d7c731ae03a72bed0ae52d89fee56c5564be076313c9a573ce5013","src/packed/ext.rs":"66be06fde8558429da23a290584d4b9fae665bf64c2578db4fe5f5f3ee864869","src/packed/mod.rs":"0020cd6f07ba5c8955923a9516d7f758864260eda53a6b6f629131c45ddeec62","src/packed/pattern.rs":"0e4bca57d4b941495d31fc8246ad32904eed0cd89e3cda732ad35f4deeba3bef","src/packed/rabinkarp.rs":"403146eb1d838a84601d171393542340513cd1ee7ff750f2372161dd47746586","src/packed/teddy/README.md":"3a43194b64e221543d885176aba3beb1224a927385a20eca842daf6b0ea2f342","src/packed/teddy/builder.rs":"08ec116a4a842a2bb1221d296a2515ef3672c54906bed588fb733364c07855d3","src/packed/teddy/generic.rs":"ea252ab05b32cea7dd9d71e332071d243db7dd0362e049252a27e5881ba2bf39","src/packed/teddy/mod.rs":"17d741f7e2fb9dbac5ba7d1bd4542cf1e35e9f146ace728e23fe6bbed20028b2","src/packed/tests.rs":"8e2f56eb3890ed3876ecb47d3121996e416563127b6430110d7b516df3f83b4b","src/packed/vector.rs":"70c325cfa6f7c5c4c9a6af7b133b75a29e65990a7fe0b9a4c4ce3c3d5a0fe587","src/tests.rs":"c68192ab97b6161d0d6ee96fefd80cc7d14e4486ddcd8d1f82b5c92432c24ed5","src/transducer.rs":"02daa33a5d6dac41dcfd67f51df7c0d4a91c5131c781fb54c4de3520c585a6e1","src/util/alphabet.rs":"6dc22658a38deddc0279892035b18870d4585069e35ba7c7e649a24509acfbcc","src/util/buffer.rs":"f9e37f662c46c6ecd734458dedbe76c3bb0e84a93b6b0117c0d4ad3042413891","src/util/byte_frequencies.rs":"2fb85b381c038c1e44ce94294531cdcd339dca48b1e61f41455666e802cbbc9e","src/util/debug.rs":"ab301ad59aa912529cb97233a54a05914dd3cb2ec43e6fec7334170b97ac5998","src/util/error.rs":"ecccd60e7406305023efcc6adcc826eeeb083ab8f7fbfe3d97469438cd4c4e5c","src/util/int.rs":"e264e6abebf5622b59f6500210773db36048371c4e509c930263334095959a52","src/util/mod.rs":"7ab28d11323ecdbd982087f32eb8bceeee84f1a2583f3aae27039c36d58cf12c","src/util/prefilter.rs":"183e32aa9951d9957f89062e4a6ae7235df7060722a3c91995a3d36db5a98111","src/util/primitives.rs":"f89f3fa1d8db4e37de9ca767c6d05e346404837cade6d063bba68972fafa610b","src/util/remapper.rs":"9f12d911583a325c11806eeceb46d0dfec863cfcfa241aed84d31af73da746e5","src/util/search.rs":"6af803e08b8b8c8a33db100623f1621b0d741616524ce40893d8316897f27ffe","src/util/special.rs":"7d2f9cb9dd9771f59816e829b2d96b1239996f32939ba98764e121696c52b146"},"package":"ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301"} \ No newline at end of file diff --git a/vendor/aho-corasick/.cargo_vcs_info.json b/vendor/aho-corasick/.cargo_vcs_info.json deleted file mode 100644 index 51b411079c4244..00000000000000 --- a/vendor/aho-corasick/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "17f8b32e3b7c845ef3c5429b823804f552f14ec9" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/aho-corasick/.github/FUNDING.yml b/vendor/aho-corasick/.github/FUNDING.yml deleted file mode 100644 index 2869fec98f72fb..00000000000000 --- a/vendor/aho-corasick/.github/FUNDING.yml +++ /dev/null @@ -1 +0,0 @@ -github: [BurntSushi] diff --git a/vendor/aho-corasick/.github/workflows/ci.yml b/vendor/aho-corasick/.github/workflows/ci.yml deleted file mode 100644 index f1b34cf80418b5..00000000000000 --- a/vendor/aho-corasick/.github/workflows/ci.yml +++ /dev/null @@ -1,148 +0,0 @@ -name: ci -on: - pull_request: - push: - branches: - - master - schedule: - - cron: '00 01 * * *' - -# The section is needed to drop write-all permissions that are granted on -# `schedule` event. By specifying any permission explicitly all others are set -# to none. By using the principle of least privilege the damage a compromised -# workflow can do (because of an injection or compromised third party tool or -# action) is restricted. Currently the worklow doesn't need any additional -# permission except for pulling the code. Adding labels to issues, commenting -# on pull-requests, etc. may need additional permissions: -# -# Syntax for this section: -# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions -# -# Reference for how to assign permissions on a job-by-job basis: -# https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs -# -# Reference for available permissions that we can enable if needed: -# https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token -permissions: - # to fetch code (actions/checkout) - contents: read - -jobs: - test: - name: test - env: - # For some builds, we use cross to test on 32-bit and big-endian - # systems. - CARGO: cargo - # When CARGO is set to CROSS, TARGET is set to `--target matrix.target`. - # Note that we only use cross on Linux, so setting a target on a - # different OS will just use normal cargo. - TARGET: - # Bump this as appropriate. We pin to a version to make sure CI - # continues to work as cross releases in the past have broken things - # in subtle ways. - CROSS_VERSION: v0.2.5 - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - include: - - build: pinned - os: ubuntu-latest - rust: 1.60.0 - - build: stable - os: ubuntu-latest - rust: stable - - build: stable-x86 - os: ubuntu-latest - rust: stable - target: i686-unknown-linux-gnu - - build: stable-aarch64 - os: ubuntu-latest - rust: stable - target: aarch64-unknown-linux-gnu - - build: stable-powerpc64 - os: ubuntu-latest - rust: stable - target: powerpc64-unknown-linux-gnu - - build: stable-s390x - os: ubuntu-latest - rust: stable - target: s390x-unknown-linux-gnu - - build: beta - os: ubuntu-latest - rust: beta - - build: nightly - os: ubuntu-latest - rust: nightly - - build: macos - os: macos-latest - rust: stable - - build: win-msvc - os: windows-latest - rust: stable - - build: win-gnu - os: windows-latest - rust: stable-x86_64-gnu - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - name: Install Rust - uses: dtolnay/rust-toolchain@master - with: - toolchain: ${{ matrix.rust }} - - name: Install and configure Cross - if: matrix.os == 'ubuntu-latest' && matrix.target != '' - run: | - # In the past, new releases of 'cross' have broken CI. So for now, we - # pin it. We also use their pre-compiled binary releases because cross - # has over 100 dependencies and takes a bit to compile. - dir="$RUNNER_TEMP/cross-download" - mkdir "$dir" - echo "$dir" >> $GITHUB_PATH - cd "$dir" - curl -LO "https://github.com/cross-rs/cross/releases/download/$CROSS_VERSION/cross-x86_64-unknown-linux-musl.tar.gz" - tar xf cross-x86_64-unknown-linux-musl.tar.gz - - # We used to install 'cross' from master, but it kept failing. So now - # we build from a known-good version until 'cross' becomes more stable - # or we find an alternative. Notably, between v0.2.1 and current - # master (2022-06-14), the number of Cross's dependencies has doubled. - echo "CARGO=cross" >> $GITHUB_ENV - echo "TARGET=--target ${{ matrix.target }}" >> $GITHUB_ENV - - name: Show command used for Cargo - run: | - echo "cargo command is: ${{ env.CARGO }}" - echo "target flag is: ${{ env.TARGET }}" - - name: Show CPU info for debugging - if: matrix.os == 'ubuntu-latest' - run: lscpu - # See: https://github.com/rust-lang/regex/blob/a2887636930156023172e4b376a6febad4e49120/.github/workflows/ci.yml#L145-L163 - - name: Pin memchr to 2.6.2 - if: matrix.build == 'pinned' - run: cargo update -p memchr --precise 2.6.2 - - run: ${{ env.CARGO }} build --verbose $TARGET - - run: ${{ env.CARGO }} doc --verbose $TARGET - - run: ${{ env.CARGO }} test --verbose $TARGET - - run: ${{ env.CARGO }} test --lib --verbose --no-default-features --features std,perf-literal $TARGET - - run: ${{ env.CARGO }} test --lib --verbose --no-default-features $TARGET - - run: ${{ env.CARGO }} test --lib --verbose --no-default-features --features std $TARGET - - run: ${{ env.CARGO }} test --lib --verbose --no-default-features --features perf-literal $TARGET - - run: ${{ env.CARGO }} test --lib --verbose --no-default-features --features std,perf-literal,logging $TARGET - - if: matrix.build == 'nightly' - run: ${{ env.CARGO }} build --manifest-path aho-corasick-debug/Cargo.toml $TARGET - - rustfmt: - name: rustfmt - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - name: Install Rust - uses: dtolnay/rust-toolchain@master - with: - toolchain: stable - components: rustfmt - - name: Check formatting - run: | - cargo fmt --all -- --check diff --git a/vendor/aho-corasick/.vim/coc-settings.json b/vendor/aho-corasick/.vim/coc-settings.json deleted file mode 100644 index 887eb6fab6f5e8..00000000000000 --- a/vendor/aho-corasick/.vim/coc-settings.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "rust-analyzer.linkedProjects": [ - "aho-corasick-debug/Cargo.toml", - "benchmarks/engines/rust-aho-corasick/Cargo.toml", - "benchmarks/engines/rust-daachorse/Cargo.toml", - "benchmarks/engines/rust-jetscii/Cargo.toml", - "benchmarks/engines/naive/Cargo.toml", - "benchmarks/shared/Cargo.toml", - "fuzz/Cargo.toml", - "Cargo.toml" - ] -} diff --git a/vendor/aho-corasick/COPYING b/vendor/aho-corasick/COPYING deleted file mode 100644 index bb9c20a094e41b..00000000000000 --- a/vendor/aho-corasick/COPYING +++ /dev/null @@ -1,3 +0,0 @@ -This project is dual-licensed under the Unlicense and MIT licenses. - -You may use this code under the terms of either license. diff --git a/vendor/aho-corasick/Cargo.lock b/vendor/aho-corasick/Cargo.lock deleted file mode 100644 index 597fa43801cfe7..00000000000000 --- a/vendor/aho-corasick/Cargo.lock +++ /dev/null @@ -1,39 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "aho-corasick" -version = "1.1.4" -dependencies = [ - "doc-comment", - "log", - "memchr", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "doc-comment" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" - -[[package]] -name = "log" -version = "0.4.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "memchr" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" diff --git a/vendor/aho-corasick/Cargo.toml b/vendor/aho-corasick/Cargo.toml deleted file mode 100644 index c4492a0170b083..00000000000000 --- a/vendor/aho-corasick/Cargo.toml +++ /dev/null @@ -1,80 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.60.0" -name = "aho-corasick" -version = "1.1.4" -authors = ["Andrew Gallant <jamslam@gmail.com>"] -build = false -exclude = [ - "/aho-corasick-debug", - "/benchmarks", - "/tmp", -] -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = "Fast multiple substring searching." -homepage = "https://github.com/BurntSushi/aho-corasick" -readme = "README.md" -keywords = [ - "string", - "search", - "text", - "pattern", - "multi", -] -categories = ["text-processing"] -license = "Unlicense OR MIT" -repository = "https://github.com/BurntSushi/aho-corasick" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ - "--cfg", - "docsrs", - "--generate-link-to-definition", -] - -[features] -default = [ - "std", - "perf-literal", -] -logging = ["dep:log"] -perf-literal = ["dep:memchr"] -std = ["memchr?/std"] - -[lib] -name = "aho_corasick" -path = "src/lib.rs" - -[dependencies.log] -version = "0.4.17" -optional = true - -[dependencies.memchr] -version = "2.4.0" -optional = true -default-features = false - -[dev-dependencies.doc-comment] -version = "0.3.3" - -[profile.bench] -debug = 2 - -[profile.release] -debug = 2 diff --git a/vendor/aho-corasick/DESIGN.md b/vendor/aho-corasick/DESIGN.md deleted file mode 100644 index f911f0c3ada977..00000000000000 --- a/vendor/aho-corasick/DESIGN.md +++ /dev/null @@ -1,481 +0,0 @@ -This document describes the internal design of this crate, which is an object -lesson in what happens when you take a fairly simple old algorithm like -Aho-Corasick and make it fast and production ready. - -The target audience of this document is Rust programmers that have some -familiarity with string searching, however, one does not need to know the -Aho-Corasick algorithm in order to read this (it is explained below). One -should, however, know what a trie is. (If you don't, go read its Wikipedia -article.) - -The center-piece of this crate is an implementation of Aho-Corasick. On its -own, Aho-Corasick isn't that complicated. The complex pieces come from the -different variants of Aho-Corasick implemented in this crate. Specifically, -they are: - -* Aho-Corasick as a noncontiguous NFA. States have their transitions - represented sparsely, and each state puts its transitions in its own separate - allocation. Hence the same "noncontiguous." -* Aho-Corasick as a contiguous NFA. This NFA uses a single allocation to - represent the transitions of all states. That is, transitions are laid out - contiguously in memory. Moreover, states near the starting state are - represented densely, such that finding the next state ID takes a constant - number of instructions. -* Aho-Corasick as a DFA. In this case, all states are represented densely in - a transition table that uses one allocation. -* Supporting "standard" match semantics, along with its overlapping variant, - in addition to leftmost-first and leftmost-longest semantics. The "standard" - semantics are typically what you see in a textbook description of - Aho-Corasick. However, Aho-Corasick is also useful as an optimization in - regex engines, which often use leftmost-first or leftmost-longest semantics. - Thus, it is useful to implement those semantics here. The "standard" and - "leftmost" search algorithms are subtly different, and also require slightly - different construction algorithms. -* Support for ASCII case insensitive matching. -* Support for accelerating searches when the patterns all start with a small - number of fixed bytes. Or alternatively, when the patterns all contain a - small number of rare bytes. (Searching for these bytes uses SIMD vectorized - code courtesy of `memchr`.) -* Transparent support for alternative SIMD vectorized search routines for - smaller number of literals, such as the Teddy algorithm. We called these - "packed" search routines because they use SIMD. They can often be an order of - magnitude faster than just Aho-Corasick, but don't scale as well. -* Support for searching streams. This can reuse most of the underlying code, - but does require careful buffering support. -* Support for anchored searches, which permit efficient "is prefix" checks for - a large number of patterns. - -When you combine all of this together along with trying to make everything as -fast as possible, what you end up with is enitrely too much code with too much -`unsafe`. Alas, I was not smart enough to figure out how to reduce it. Instead, -we will explain it. - - -# Basics - -The fundamental problem this crate is trying to solve is to determine the -occurrences of possibly many patterns in a haystack. The naive way to solve -this is to look for a match for each pattern at each position in the haystack: - - for i in 0..haystack.len(): - for p in patterns.iter(): - if haystack[i..].starts_with(p.bytes()): - return Match(p.id(), i, i + p.bytes().len()) - -Those four lines are effectively all this crate does. The problem with those -four lines is that they are very slow, especially when you're searching for a -large number of patterns. - -While there are many different algorithms available to solve this, a popular -one is Aho-Corasick. It's a common solution because it's not too hard to -implement, scales quite well even when searching for thousands of patterns and -is generally pretty fast. Aho-Corasick does well here because, regardless of -the number of patterns you're searching for, it always visits each byte in the -haystack exactly once. This means, generally speaking, adding more patterns to -an Aho-Corasick automaton does not make it slower. (Strictly speaking, however, -this is not true, since a larger automaton will make less effective use of the -CPU's cache.) - -Aho-Corasick can be succinctly described as a trie with state transitions -between some of the nodes that efficiently instruct the search algorithm to -try matching alternative keys in the trie. The trick is that these state -transitions are arranged such that each byte of input needs to be inspected -only once. These state transitions are typically called "failure transitions," -because they instruct the searcher (the thing traversing the automaton while -reading from the haystack) what to do when a byte in the haystack does not -correspond to a valid transition in the current state of the trie. - -More formally, a failure transition points to a state in the automaton that may -lead to a match whose prefix is a proper suffix of the path traversed through -the trie so far. (If no such proper suffix exists, then the failure transition -points back to the start state of the trie, effectively restarting the search.) -This is perhaps simpler to explain pictorally. For example, let's say we built -an Aho-Corasick automaton with the following patterns: 'abcd' and 'cef'. The -trie looks like this: - - a - S1 - b - S2 - c - S3 - d - S4* - / - S0 - c - S5 - e - S6 - f - S7* - -where states marked with a `*` are match states (meaning, the search algorithm -should stop and report a match to the caller). - -So given this trie, it should be somewhat straight-forward to see how it can -be used to determine whether any particular haystack *starts* with either -`abcd` or `cef`. It's easy to express this in code: - - fn has_prefix(trie: &Trie, haystack: &[u8]) -> bool { - let mut state_id = trie.start(); - // If the empty pattern is in trie, then state_id is a match state. - if trie.is_match(state_id) { - return true; - } - for (i, &b) in haystack.iter().enumerate() { - state_id = match trie.next_state(state_id, b) { - Some(id) => id, - // If there was no transition for this state and byte, then we know - // the haystack does not start with one of the patterns in our trie. - None => return false, - }; - if trie.is_match(state_id) { - return true; - } - } - false - } - -And that's pretty much it. All we do is move through the trie starting with the -bytes at the beginning of the haystack. If we find ourselves in a position -where we can't move, or if we've looked through the entire haystack without -seeing a match state, then we know the haystack does not start with any of the -patterns in the trie. - -The meat of the Aho-Corasick algorithm is in how we add failure transitions to -our trie to keep searching efficient. Specifically, it permits us to not only -check whether a haystack *starts* with any one of a number of patterns, but -rather, whether the haystack contains any of a number of patterns *anywhere* in -the haystack. - -As mentioned before, failure transitions connect a proper suffix of the path -traversed through the trie before, with a path that leads to a match that has a -prefix corresponding to that proper suffix. So in our case, for patterns `abcd` -and `cef`, with a haystack `abcef`, we want to transition to state `S5` (from -the diagram above) from `S3` upon seeing that the byte following `c` is not -`d`. Namely, the proper suffix in this example is `c`, which is a prefix of -`cef`. So the modified diagram looks like this: - - - a - S1 - b - S2 - c - S3 - d - S4* - / / - / ---------------- - / / - S0 - c - S5 - e - S6 - f - S7* - -One thing that isn't shown in this diagram is that *all* states have a failure -transition, but only `S3` has a *non-trivial* failure transition. That is, all -other states have a failure transition back to the start state. So if our -haystack was `abzabcd`, then the searcher would transition back to `S0` after -seeing `z`, which effectively restarts the search. (Because there is no pattern -in our trie that has a prefix of `bz` or `z`.) - -The code for traversing this *automaton* or *finite state machine* (it is no -longer just a trie) is not that much different from the `has_prefix` code -above: - - fn contains(fsm: &FiniteStateMachine, haystack: &[u8]) -> bool { - let mut state_id = fsm.start(); - // If the empty pattern is in fsm, then state_id is a match state. - if fsm.is_match(state_id) { - return true; - } - for (i, &b) in haystack.iter().enumerate() { - // While the diagram above doesn't show this, we may wind up needing - // to follow multiple failure transitions before we land on a state - // in which we can advance. Therefore, when searching for the next - // state, we need to loop until we don't see a failure transition. - // - // This loop terminates because the start state has no empty - // transitions. Every transition from the start state either points to - // another state, or loops back to the start state. - loop { - match fsm.next_state(state_id, b) { - Some(id) => { - state_id = id; - break; - } - // Unlike our code above, if there was no transition for this - // state, then we don't quit. Instead, we look for this state's - // failure transition and follow that instead. - None => { - state_id = fsm.next_fail_state(state_id); - } - }; - } - if fsm.is_match(state_id) { - return true; - } - } - false - } - -Other than the complication around traversing failure transitions, this code -is still roughly "traverse the automaton with bytes from the haystack, and quit -when a match is seen." - -And that concludes our section on the basics. While we didn't go deep into how -the automaton is built (see `src/nfa/noncontiguous.rs`, which has detailed -comments about that), the basic structure of Aho-Corasick should be reasonably -clear. - - -# NFAs and DFAs - -There are generally two types of finite automata: non-deterministic finite -automata (NFA) and deterministic finite automata (DFA). The difference between -them is, principally, that an NFA can be in multiple states at once. This is -typically accomplished by things called _epsilon_ transitions, where one could -move to a new state without consuming any bytes from the input. (The other -mechanism by which NFAs can be in more than one state is where the same byte in -a particular state transitions to multiple distinct states.) In contrast, a DFA -can only ever be in one state at a time. A DFA has no epsilon transitions, and -for any given state, a byte transitions to at most one other state. - -By this formulation, the Aho-Corasick automaton described in the previous -section is an NFA. This is because failure transitions are, effectively, -epsilon transitions. That is, whenever the automaton is in state `S`, it is -actually in the set of states that are reachable by recursively following -failure transitions from `S` until you reach the start state. (This means -that, for example, the start state is always active since the start state is -reachable via failure transitions from any state in the automaton.) - -NFAs have a lot of nice properties. They tend to be easier to construct, and -also tend to use less memory. However, their primary downside is that they are -typically slower to execute a search with. For example, the code above showing -how to search with an Aho-Corasick automaton needs to potentially iterate -through many failure transitions for every byte of input. While this is a -fairly small amount of overhead, this can add up, especially if the automaton -has a lot of overlapping patterns with a lot of failure transitions. - -A DFA's search code, by contrast, looks like this: - - fn contains(dfa: &DFA, haystack: &[u8]) -> bool { - let mut state_id = dfa.start(); - // If the empty pattern is in dfa, then state_id is a match state. - if dfa.is_match(state_id) { - return true; - } - for (i, &b) in haystack.iter().enumerate() { - // An Aho-Corasick DFA *never* has a missing state that requires - // failure transitions to be followed. One byte of input advances the - // automaton by one state. Always. - state_id = dfa.next_state(state_id, b); - if dfa.is_match(state_id) { - return true; - } - } - false - } - -The search logic here is much simpler than for the NFA, and this tends to -translate into significant performance benefits as well, since there's a lot -less work being done for each byte in the haystack. How is this accomplished? -It's done by pre-following all failure transitions for all states for all bytes -in the alphabet, and then building a single state transition table. Building -this DFA can be much more costly than building the NFA, and use much more -memory, but the better performance can be worth it. - -Users of this crate can actually choose between using one of two possible NFAs -(noncontiguous or contiguous) or a DFA. By default, a contiguous NFA is used, -in most circumstances, but if the number of patterns is small enough a DFA will -be used. A contiguous NFA is chosen because it uses orders of magnitude less -memory than a DFA, takes only a little longer to build than a noncontiguous -NFA and usually gets pretty close to the search speed of a DFA. (Callers can -override this automatic selection via the `AhoCorasickBuilder::start_kind` -configuration.) - - -# More DFA tricks - -As described in the previous section, one of the downsides of using a DFA -is that it uses more memory and can take longer to build. One small way of -mitigating these concerns is to map the alphabet used by the automaton into -a smaller space. Typically, the alphabet of a DFA has 256 elements in it: -one element for each possible value that fits into a byte. However, in many -cases, one does not need the full alphabet. For example, if all patterns in an -Aho-Corasick automaton are ASCII letters, then this only uses up 52 distinct -bytes. As far as the automaton is concerned, the rest of the 204 bytes are -indistinguishable from one another: they will never disrciminate between a -match or a non-match. Therefore, in cases like that, the alphabet can be shrunk -to just 53 elements. One for each ASCII letter, and then another to serve as a -placeholder for every other unused byte. - -In practice, this library doesn't quite compute the optimal set of equivalence -classes, but it's close enough in most cases. The key idea is that this then -allows the transition table for the DFA to be potentially much smaller. The -downside of doing this, however, is that since the transition table is defined -in terms of this smaller alphabet space, every byte in the haystack must be -re-mapped to this smaller space. This requires an additional 256-byte table. -In practice, this can lead to a small search time hit, but it can be difficult -to measure. Moreover, it can sometimes lead to faster search times for bigger -automata, since it could be difference between more parts of the automaton -staying in the CPU cache or not. - -One other trick for DFAs employed by this crate is the notion of premultiplying -state identifiers. Specifically, the normal way to compute the next transition -in a DFA is via the following (assuming that the transition table is laid out -sequentially in memory, in row-major order, where the rows are states): - - next_state_id = dfa.transitions[current_state_id * 256 + current_byte] - -However, since the value `256` is a fixed constant, we can actually premultiply -the state identifiers in the table when we build the table initially. Then, the -next transition computation simply becomes: - - next_state_id = dfa.transitions[current_state_id + current_byte] - -This doesn't seem like much, but when this is being executed for every byte of -input that you're searching, saving that extra multiplication instruction can -add up. - -The same optimization works even when equivalence classes are enabled, as -described above. The only difference is that the premultiplication is by the -total number of equivalence classes instead of 256. - -There isn't much downside to premultiplying state identifiers, other than it -imposes a smaller limit on the total number of states in the DFA. Namely, with -premultiplied state identifiers, you run out of room in your state identifier -representation more rapidly than if the identifiers are just state indices. - -Both equivalence classes and premultiplication are always enabled. There is a -`AhoCorasickBuilder::byte_classes` configuration, but disabling this just makes -it so there are always 256 equivalence classes, i.e., every class corresponds -to precisely one byte. When it's disabled, the equivalence class map itself is -still used. The purpose of disabling it is when one is debugging the underlying -automaton. It can be easier to comprehend when it uses actual byte values for -its transitions instead of equivalence classes. - - -# Match semantics - -One of the more interesting things about this implementation of Aho-Corasick -that (as far as this author knows) separates it from other implementations, is -that it natively supports leftmost-first and leftmost-longest match semantics. -Briefly, match semantics refer to the decision procedure by which searching -will disambiguate matches when there are multiple to choose from: - -* **standard** match semantics emits matches as soon as they are detected by - the automaton. This is typically equivalent to the textbook non-overlapping - formulation of Aho-Corasick. -* **leftmost-first** match semantics means that 1) the next match is the match - starting at the leftmost position and 2) among multiple matches starting at - the same leftmost position, the match corresponding to the pattern provided - first by the caller is reported. -* **leftmost-longest** is like leftmost-first, except when there are multiple - matches starting at the same leftmost position, the pattern corresponding to - the longest match is returned. - -(The crate API documentation discusses these differences, with examples, in -more depth on the `MatchKind` type.) - -The reason why supporting these match semantics is important is because it -gives the user more control over the match procedure. For example, -leftmost-first permits users to implement match priority by simply putting the -higher priority patterns first. Leftmost-longest, on the other hand, permits -finding the longest possible match, which might be useful when trying to find -words matching a dictionary. Additionally, regex engines often want to use -Aho-Corasick as an optimization when searching for an alternation of literals. -In order to preserve correct match semantics, regex engines typically can't use -the standard textbook definition directly, since regex engines will implement -either leftmost-first (Perl-like) or leftmost-longest (POSIX) match semantics. - -Supporting leftmost semantics requires a couple key changes: - -* Constructing the Aho-Corasick automaton changes a bit in both how the trie is - constructed and how failure transitions are found. Namely, only a subset - of the failure transitions are added. Specifically, only the failure - transitions that either do not occur after a match or do occur after a match - but preserve that match are kept. (More details on this can be found in - `src/nfa/noncontiguous.rs`.) -* The search algorithm changes slightly. Since we are looking for the leftmost - match, we cannot quit as soon as a match is detected. Instead, after a match - is detected, we must keep searching until either the end of the input or - until a dead state is seen. (Dead states are not used for standard match - semantics. Dead states mean that searching should stop after a match has been - found.) - -Most other implementations of Aho-Corasick do support leftmost match semantics, -but they do it with more overhead at search time, or even worse, with a queue -of matches and sophisticated hijinks to disambiguate the matches. While our -construction algorithm becomes a bit more complicated, the correct match -semantics fall out from the structure of the automaton itself. - - -# Overlapping matches - -One of the nice properties of an Aho-Corasick automaton is that it can report -all possible matches, even when they overlap with one another. In this mode, -the match semantics don't matter, since all possible matches are reported. -Overlapping searches work just like regular searches, except the state -identifier at which the previous search left off is carried over to the next -search, so that it can pick up where it left off. If there are additional -matches at that state, then they are reported before resuming the search. - -Enabling leftmost-first or leftmost-longest match semantics causes the -automaton to use a subset of all failure transitions, which means that -overlapping searches cannot be used. Therefore, if leftmost match semantics are -used, attempting to do an overlapping search will return an error (or panic -when using the infallible APIs). Thus, to get overlapping searches, the caller -must use the default standard match semantics. This behavior was chosen because -there are only two alternatives, which were deemed worse: - -* Compile two automatons internally, one for standard semantics and one for - the semantics requested by the caller (if not standard). -* Create a new type, distinct from the `AhoCorasick` type, which has different - capabilities based on the configuration options. - -The first is untenable because of the amount of memory used by the automaton. -The second increases the complexity of the API too much by adding too many -types that do similar things. It is conceptually much simpler to keep all -searching isolated to a single type. - - -# Stream searching - -Since Aho-Corasick is an automaton, it is possible to do partial searches on -partial parts of the haystack, and then resume that search on subsequent pieces -of the haystack. This is useful when the haystack you're trying to search is -not stored contiguously in memory, or if one does not want to read the entire -haystack into memory at once. - -Currently, only standard semantics are supported for stream searching. This is -some of the more complicated code in this crate, and is something I would very -much like to improve. In particular, it currently has the restriction that it -must buffer at least enough of the haystack in memory in order to fit the -longest possible match. The difficulty in getting stream searching right is -that the implementation choices (such as the buffer size) often impact what the -API looks like and what it's allowed to do. - - -# Prefilters - -In some cases, Aho-Corasick is not the fastest way to find matches containing -multiple patterns. Sometimes, the search can be accelerated using highly -optimized SIMD routines. For example, consider searching the following -patterns: - - Sherlock - Moriarty - Watson - -It is plausible that it would be much faster to quickly look for occurrences of -the leading bytes, `S`, `M` or `W`, before trying to start searching via the -automaton. Indeed, this is exactly what this crate will do. - -When there are more than three distinct starting bytes, then this crate will -look for three distinct bytes occurring at any position in the patterns, while -preferring bytes that are heuristically determined to be rare over others. For -example: - - Abuzz - Sanchez - Vasquez - Topaz - Waltz - -Here, we have more than 3 distinct starting bytes, but all of the patterns -contain `z`, which is typically a rare byte. In this case, the prefilter will -scan for `z`, back up a bit, and then execute the Aho-Corasick automaton. - -If all of that fails, then a packed multiple substring algorithm will be -attempted. Currently, the only algorithm available for this is Teddy, but more -may be added in the future. Teddy is unlike the above prefilters in that it -confirms its own matches, so when Teddy is active, it might not be necessary -for Aho-Corasick to run at all. However, the current Teddy implementation -only works in `x86_64` when SSSE3 or AVX2 are available or in `aarch64` -(using NEON), and moreover, only works _well_ when there are a small number -of patterns (say, less than 100). Teddy also requires the haystack to be of a -certain length (more than 16-34 bytes). When the haystack is shorter than that, -Rabin-Karp is used instead. (See `src/packed/rabinkarp.rs`.) - -There is a more thorough description of Teddy at -[`src/packed/teddy/README.md`](src/packed/teddy/README.md). diff --git a/vendor/aho-corasick/LICENSE-MIT b/vendor/aho-corasick/LICENSE-MIT deleted file mode 100644 index 3b0a5dc09c1e16..00000000000000 --- a/vendor/aho-corasick/LICENSE-MIT +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Andrew Gallant - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/aho-corasick/README.md b/vendor/aho-corasick/README.md deleted file mode 100644 index c0f525fdc6be62..00000000000000 --- a/vendor/aho-corasick/README.md +++ /dev/null @@ -1,174 +0,0 @@ -aho-corasick -============ -A library for finding occurrences of many patterns at once with SIMD -acceleration in some cases. This library provides multiple pattern -search principally through an implementation of the -[Aho-Corasick algorithm](https://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_algorithm), -which builds a finite state machine for executing searches in linear time. -Features include case insensitive matching, overlapping matches, fast searching -via SIMD and optional full DFA construction and search & replace in streams. - -[![Build status](https://github.com/BurntSushi/aho-corasick/workflows/ci/badge.svg)](https://github.com/BurntSushi/aho-corasick/actions) -[![crates.io](https://img.shields.io/crates/v/aho-corasick.svg)](https://crates.io/crates/aho-corasick) - -Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/). - - -### Documentation - -https://docs.rs/aho-corasick - - -### Usage - -Run `cargo add aho-corasick` to automatically add this crate as a dependency -in your `Cargo.toml` file. - - -### Example: basic searching - -This example shows how to search for occurrences of multiple patterns -simultaneously. Each match includes the pattern that matched along with the -byte offsets of the match. - -```rust -use aho_corasick::{AhoCorasick, PatternID}; - -let patterns = &["apple", "maple", "Snapple"]; -let haystack = "Nobody likes maple in their apple flavored Snapple."; - -let ac = AhoCorasick::new(patterns).unwrap(); -let mut matches = vec![]; -for mat in ac.find_iter(haystack) { - matches.push((mat.pattern(), mat.start(), mat.end())); -} -assert_eq!(matches, vec![ - (PatternID::must(1), 13, 18), - (PatternID::must(0), 28, 33), - (PatternID::must(2), 43, 50), -]); -``` - - -### Example: ASCII case insensitivity - -This is like the previous example, but matches `Snapple` case insensitively -using `AhoCorasickBuilder`: - -```rust -use aho_corasick::{AhoCorasick, PatternID}; - -let patterns = &["apple", "maple", "snapple"]; -let haystack = "Nobody likes maple in their apple flavored Snapple."; - -let ac = AhoCorasick::builder() - .ascii_case_insensitive(true) - .build(patterns) - .unwrap(); -let mut matches = vec![]; -for mat in ac.find_iter(haystack) { - matches.push((mat.pattern(), mat.start(), mat.end())); -} -assert_eq!(matches, vec![ - (PatternID::must(1), 13, 18), - (PatternID::must(0), 28, 33), - (PatternID::must(2), 43, 50), -]); -``` - - -### Example: replacing matches in a stream - -This example shows how to execute a search and replace on a stream without -loading the entire stream into memory first. - -```rust,ignore -use aho_corasick::AhoCorasick; - -let patterns = &["fox", "brown", "quick"]; -let replace_with = &["sloth", "grey", "slow"]; - -// In a real example, these might be `std::fs::File`s instead. All you need to -// do is supply a pair of `std::io::Read` and `std::io::Write` implementations. -let rdr = "The quick brown fox."; -let mut wtr = vec![]; - -let ac = AhoCorasick::new(patterns).unwrap(); -ac.stream_replace_all(rdr.as_bytes(), &mut wtr, replace_with) - .expect("stream_replace_all failed"); -assert_eq!(b"The slow grey sloth.".to_vec(), wtr); -``` - - -### Example: finding the leftmost first match - -In the textbook description of Aho-Corasick, its formulation is typically -structured such that it reports all possible matches, even when they overlap -with another. In many cases, overlapping matches may not be desired, such as -the case of finding all successive non-overlapping matches like you might with -a standard regular expression. - -Unfortunately the "obvious" way to modify the Aho-Corasick algorithm to do -this doesn't always work in the expected way, since it will report matches as -soon as they are seen. For example, consider matching the regex `Samwise|Sam` -against the text `Samwise`. Most regex engines (that are Perl-like, or -non-POSIX) will report `Samwise` as a match, but the standard Aho-Corasick -algorithm modified for reporting non-overlapping matches will report `Sam`. - -A novel contribution of this library is the ability to change the match -semantics of Aho-Corasick (without additional search time overhead) such that -`Samwise` is reported instead. For example, here's the standard approach: - -```rust -use aho_corasick::AhoCorasick; - -let patterns = &["Samwise", "Sam"]; -let haystack = "Samwise"; - -let ac = AhoCorasick::new(patterns).unwrap(); -let mat = ac.find(haystack).expect("should have a match"); -assert_eq!("Sam", &haystack[mat.start()..mat.end()]); -``` - -And now here's the leftmost-first version, which matches how a Perl-like -regex will work: - -```rust -use aho_corasick::{AhoCorasick, MatchKind}; - -let patterns = &["Samwise", "Sam"]; -let haystack = "Samwise"; - -let ac = AhoCorasick::builder() - .match_kind(MatchKind::LeftmostFirst) - .build(patterns) - .unwrap(); -let mat = ac.find(haystack).expect("should have a match"); -assert_eq!("Samwise", &haystack[mat.start()..mat.end()]); -``` - -In addition to leftmost-first semantics, this library also supports -leftmost-longest semantics, which match the POSIX behavior of a regular -expression alternation. See `MatchKind` in the docs for more details. - - -### Minimum Rust version policy - -This crate's minimum supported `rustc` version is `1.60.0`. - -The current policy is that the minimum Rust version required to use this crate -can be increased in minor version updates. For example, if `crate 1.0` requires -Rust 1.20.0, then `crate 1.0.z` for all values of `z` will also require Rust -1.20.0 or newer. However, `crate 1.y` for `y > 0` may require a newer minimum -version of Rust. - -In general, this crate will be conservative with respect to the minimum -supported version of Rust. - - -### FFI bindings - -* [G-Research/ahocorasick_rs](https://github.com/G-Research/ahocorasick_rs/) -is a Python wrapper for this library. -* [tmikus/ahocorasick_rs](https://github.com/tmikus/ahocorasick_rs) is a Go - wrapper for this library. diff --git a/vendor/aho-corasick/UNLICENSE b/vendor/aho-corasick/UNLICENSE deleted file mode 100644 index 68a49daad8ff7e..00000000000000 --- a/vendor/aho-corasick/UNLICENSE +++ /dev/null @@ -1,24 +0,0 @@ -This is free and unencumbered software released into the public domain. - -Anyone is free to copy, modify, publish, use, compile, sell, or -distribute this software, either in source code form or as a compiled -binary, for any purpose, commercial or non-commercial, and by any -means. - -In jurisdictions that recognize copyright laws, the author or authors -of this software dedicate any and all copyright interest in the -software to the public domain. We make this dedication for the benefit -of the public at large and to the detriment of our heirs and -successors. We intend this dedication to be an overt act of -relinquishment in perpetuity of all present and future rights to this -software under copyright law. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -For more information, please refer to <http://unlicense.org/> diff --git a/vendor/aho-corasick/rustfmt.toml b/vendor/aho-corasick/rustfmt.toml deleted file mode 100644 index aa37a218b97e5f..00000000000000 --- a/vendor/aho-corasick/rustfmt.toml +++ /dev/null @@ -1,2 +0,0 @@ -max_width = 79 -use_small_heuristics = "max" diff --git a/vendor/aho-corasick/src/ahocorasick.rs b/vendor/aho-corasick/src/ahocorasick.rs deleted file mode 100644 index 2947627704d3d0..00000000000000 --- a/vendor/aho-corasick/src/ahocorasick.rs +++ /dev/null @@ -1,2789 +0,0 @@ -use core::{ - fmt::Debug, - panic::{RefUnwindSafe, UnwindSafe}, -}; - -use alloc::{string::String, sync::Arc, vec::Vec}; - -use crate::{ - automaton::{self, Automaton, OverlappingState}, - dfa, - nfa::{contiguous, noncontiguous}, - util::{ - error::{BuildError, MatchError}, - prefilter::Prefilter, - primitives::{PatternID, StateID}, - search::{Anchored, Input, Match, MatchKind, StartKind}, - }, -}; - -/// An automaton for searching multiple strings in linear time. -/// -/// The `AhoCorasick` type supports a few basic ways of constructing an -/// automaton, with the default being [`AhoCorasick::new`]. However, there -/// are a fair number of configurable options that can be set by using -/// [`AhoCorasickBuilder`] instead. Such options include, but are not limited -/// to, how matches are determined, simple case insensitivity, whether to use a -/// DFA or not and various knobs for controlling the space-vs-time trade offs -/// taken when building the automaton. -/// -/// # Resource usage -/// -/// Aho-Corasick automatons are always constructed in `O(p)` time, where -/// `p` is the combined length of all patterns being searched. With that -/// said, building an automaton can be fairly costly because of high constant -/// factors, particularly when enabling the [DFA](AhoCorasickKind::DFA) option -/// with [`AhoCorasickBuilder::kind`]. For this reason, it's generally a good -/// idea to build an automaton once and reuse it as much as possible. -/// -/// Aho-Corasick automatons can also use a fair bit of memory. To get -/// a concrete idea of how much memory is being used, try using the -/// [`AhoCorasick::memory_usage`] method. -/// -/// To give a quick idea of the differences between Aho-Corasick -/// implementations and their resource usage, here's a sample of construction -/// times and heap memory used after building an automaton from 100,000 -/// randomly selected titles from Wikipedia: -/// -/// * 99MB for a [`noncontiguous::NFA`] in 240ms. -/// * 21MB for a [`contiguous::NFA`] in 275ms. -/// * 1.6GB for a [`dfa::DFA`] in 1.88s. -/// -/// (Note that the memory usage above reflects the size of each automaton and -/// not peak memory usage. For example, building a contiguous NFA requires -/// first building a noncontiguous NFA. Once the contiguous NFA is built, the -/// noncontiguous NFA is freed.) -/// -/// This experiment very strongly argues that a contiguous NFA is often the -/// best balance in terms of resource usage. It takes a little longer to build, -/// but its memory usage is quite small. Its search speed (not listed) is -/// also often faster than a noncontiguous NFA, but a little slower than a -/// DFA. Indeed, when no specific [`AhoCorasickKind`] is used (which is the -/// default), a contiguous NFA is used in most cases. -/// -/// The only "catch" to using a contiguous NFA is that, because of its variety -/// of compression tricks, it may not be able to support automatons as large as -/// what the noncontiguous NFA supports. In which case, building a contiguous -/// NFA will fail and (by default) `AhoCorasick` will automatically fall -/// back to a noncontiguous NFA. (This typically only happens when building -/// automatons from millions of patterns.) Otherwise, the small additional time -/// for building a contiguous NFA is almost certainly worth it. -/// -/// # Cloning -/// -/// The `AhoCorasick` type uses thread safe reference counting internally. It -/// is guaranteed that it is cheap to clone. -/// -/// # Search configuration -/// -/// Most of the search routines accept anything that can be cheaply converted -/// to an [`Input`]. This includes `&[u8]`, `&str` and `Input` itself. -/// -/// # Construction failure -/// -/// It is generally possible for building an Aho-Corasick automaton to fail. -/// Construction can fail in generally one way: when the inputs provided are -/// too big. Whether that's a pattern that is too long, too many patterns -/// or some combination of both. A first approximation for the scale at which -/// construction can fail is somewhere around "millions of patterns." -/// -/// For that reason, if you're building an Aho-Corasick automaton from -/// untrusted input (or input that doesn't have any reasonable bounds on its -/// size), then it is strongly recommended to handle the possibility of an -/// error. -/// -/// If you're constructing an Aho-Corasick automaton from static or trusted -/// data, then it is likely acceptable to panic (by calling `unwrap()` or -/// `expect()`) if construction fails. -/// -/// # Fallibility -/// -/// The `AhoCorasick` type provides a number of methods for searching, as one -/// might expect. Depending on how the Aho-Corasick automaton was built and -/// depending on the search configuration, it is possible for a search to -/// return an error. Since an error is _never_ dependent on the actual contents -/// of the haystack, this type provides both infallible and fallible methods -/// for searching. The infallible methods panic if an error occurs, and can be -/// used for convenience and when you know the search will never return an -/// error. -/// -/// For example, the [`AhoCorasick::find_iter`] method is the infallible -/// version of the [`AhoCorasick::try_find_iter`] method. -/// -/// Examples of errors that can occur: -/// -/// * Running a search that requires [`MatchKind::Standard`] semantics (such -/// as a stream or overlapping search) with an automaton that was built with -/// [`MatchKind::LeftmostFirst`] or [`MatchKind::LeftmostLongest`] semantics. -/// * Running an anchored search with an automaton that only supports -/// unanchored searches. (By default, `AhoCorasick` only supports unanchored -/// searches. But this can be toggled with [`AhoCorasickBuilder::start_kind`].) -/// * Running an unanchored search with an automaton that only supports -/// anchored searches. -/// -/// The common thread between the different types of errors is that they are -/// all rooted in the automaton construction and search configurations. If -/// those configurations are a static property of your program, then it is -/// reasonable to call infallible routines since you know an error will never -/// occur. And if one _does_ occur, then it's a bug in your program. -/// -/// To re-iterate, if the patterns, build or search configuration come from -/// user or untrusted data, then you should handle errors at build or search -/// time. If only the haystack comes from user or untrusted data, then there -/// should be no need to handle errors anywhere and it is generally encouraged -/// to `unwrap()` (or `expect()`) both build and search time calls. -/// -/// # Examples -/// -/// This example shows how to search for occurrences of multiple patterns -/// simultaneously in a case insensitive fashion. Each match includes the -/// pattern that matched along with the byte offsets of the match. -/// -/// ``` -/// use aho_corasick::{AhoCorasick, PatternID}; -/// -/// let patterns = &["apple", "maple", "snapple"]; -/// let haystack = "Nobody likes maple in their apple flavored Snapple."; -/// -/// let ac = AhoCorasick::builder() -/// .ascii_case_insensitive(true) -/// .build(patterns) -/// .unwrap(); -/// let mut matches = vec![]; -/// for mat in ac.find_iter(haystack) { -/// matches.push((mat.pattern(), mat.start(), mat.end())); -/// } -/// assert_eq!(matches, vec![ -/// (PatternID::must(1), 13, 18), -/// (PatternID::must(0), 28, 33), -/// (PatternID::must(2), 43, 50), -/// ]); -/// ``` -/// -/// This example shows how to replace matches with some other string: -/// -/// ``` -/// use aho_corasick::AhoCorasick; -/// -/// let patterns = &["fox", "brown", "quick"]; -/// let haystack = "The quick brown fox."; -/// let replace_with = &["sloth", "grey", "slow"]; -/// -/// let ac = AhoCorasick::new(patterns).unwrap(); -/// let result = ac.replace_all(haystack, replace_with); -/// assert_eq!(result, "The slow grey sloth."); -/// ``` -#[derive(Clone)] -pub struct AhoCorasick { - /// The underlying Aho-Corasick automaton. It's one of - /// nfa::noncontiguous::NFA, nfa::contiguous::NFA or dfa::DFA. - aut: Arc<dyn AcAutomaton>, - /// The specific Aho-Corasick kind chosen. This makes it possible to - /// inspect any `AhoCorasick` and know what kind of search strategy it - /// uses. - kind: AhoCorasickKind, - /// The start kind of this automaton as configured by the caller. - /// - /// We don't really *need* to put this here, since the underlying automaton - /// will correctly return errors if the caller requests an unsupported - /// search type. But we do keep this here for API behavior consistency. - /// Namely, the NFAs in this crate support both unanchored and anchored - /// searches unconditionally. There's no way to disable one or the other. - /// They always both work. But the DFA in this crate specifically only - /// supports both unanchored and anchored searches if it's configured to - /// do so. Why? Because for the DFA, supporting both essentially requires - /// two copies of the transition table: one generated by following failure - /// transitions from the original NFA and one generated by not following - /// those failure transitions. - /// - /// So why record the start kind here? Well, consider what happens - /// when no specific 'AhoCorasickKind' is selected by the caller and - /// 'StartKind::Unanchored' is used (both are the default). It *might* - /// result in using a DFA or it might pick an NFA. If it picks an NFA, the - /// caller would then be able to run anchored searches, even though the - /// caller only asked for support for unanchored searches. Maybe that's - /// fine, but what if the DFA was chosen instead? Oops, the caller would - /// get an error. - /// - /// Basically, it seems bad to return an error or not based on some - /// internal implementation choice. So we smooth things out and ensure - /// anchored searches *always* report an error when only unanchored support - /// was asked for (and vice versa), even if the underlying automaton - /// supports it. - start_kind: StartKind, -} - -/// Convenience constructors for an Aho-Corasick searcher. To configure the -/// searcher, use an [`AhoCorasickBuilder`] instead. -impl AhoCorasick { - /// Create a new Aho-Corasick automaton using the default configuration. - /// - /// The default configuration optimizes for less space usage, but at the - /// expense of longer search times. To change the configuration, use - /// [`AhoCorasickBuilder`]. - /// - /// This uses the default [`MatchKind::Standard`] match semantics, which - /// reports a match as soon as it is found. This corresponds to the - /// standard match semantics supported by textbook descriptions of the - /// Aho-Corasick algorithm. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, PatternID}; - /// - /// let ac = AhoCorasick::new(&["foo", "bar", "baz"]).unwrap(); - /// assert_eq!( - /// Some(PatternID::must(1)), - /// ac.find("xxx bar xxx").map(|m| m.pattern()), - /// ); - /// ``` - pub fn new<I, P>(patterns: I) -> Result<AhoCorasick, BuildError> - where - I: IntoIterator<Item = P>, - P: AsRef<[u8]>, - { - AhoCorasickBuilder::new().build(patterns) - } - - /// A convenience method for returning a new Aho-Corasick builder. - /// - /// This usually permits one to just import the `AhoCorasick` type. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, Match, MatchKind}; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(&["samwise", "sam"]) - /// .unwrap(); - /// assert_eq!(Some(Match::must(0, 0..7)), ac.find("samwise")); - /// ``` - pub fn builder() -> AhoCorasickBuilder { - AhoCorasickBuilder::new() - } -} - -/// Infallible search routines. These APIs panic when the underlying search -/// would otherwise fail. Infallible routines are useful because the errors are -/// a result of both search-time configuration and what configuration is used -/// to build the Aho-Corasick searcher. Both of these things are not usually -/// the result of user input, and thus, an error is typically indicative of a -/// programmer error. In cases where callers want errors instead of panics, use -/// the corresponding `try` method in the section below. -impl AhoCorasick { - /// Returns true if and only if this automaton matches the haystack at any - /// position. - /// - /// `input` may be any type that is cheaply convertible to an `Input`. This - /// includes, but is not limited to, `&str` and `&[u8]`. - /// - /// Aside from convenience, when `AhoCorasick` was built with - /// leftmost-first or leftmost-longest semantics, this might result in a - /// search that visits less of the haystack than [`AhoCorasick::find`] - /// would otherwise. (For standard semantics, matches are always - /// immediately returned once they are seen, so there is no way for this to - /// do less work in that case.) - /// - /// Note that there is no corresponding fallible routine for this method. - /// If you need a fallible version of this, then [`AhoCorasick::try_find`] - /// can be used with [`Input::earliest`] enabled. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// let ac = AhoCorasick::new(&[ - /// "foo", "bar", "quux", "baz", - /// ]).unwrap(); - /// assert!(ac.is_match("xxx bar xxx")); - /// assert!(!ac.is_match("xxx qux xxx")); - /// ``` - pub fn is_match<'h, I: Into<Input<'h>>>(&self, input: I) -> bool { - self.aut - .try_find(&input.into().earliest(true)) - .expect("AhoCorasick::try_find is not expected to fail") - .is_some() - } - - /// Returns the location of the first match according to the match - /// semantics that this automaton was constructed with. - /// - /// `input` may be any type that is cheaply convertible to an `Input`. This - /// includes, but is not limited to, `&str` and `&[u8]`. - /// - /// This is the infallible version of [`AhoCorasick::try_find`]. - /// - /// # Panics - /// - /// This panics when [`AhoCorasick::try_find`] would return an error. - /// - /// # Examples - /// - /// Basic usage, with standard semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "abcd"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::Standard) // default, not necessary - /// .build(patterns) - /// .unwrap(); - /// let mat = ac.find(haystack).expect("should have a match"); - /// assert_eq!("b", &haystack[mat.start()..mat.end()]); - /// ``` - /// - /// Now with leftmost-first semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "abcd"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// let mat = ac.find(haystack).expect("should have a match"); - /// assert_eq!("abc", &haystack[mat.start()..mat.end()]); - /// ``` - /// - /// And finally, leftmost-longest semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "abcd"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostLongest) - /// .build(patterns) - /// .unwrap(); - /// let mat = ac.find(haystack).expect("should have a match"); - /// ``` - /// - /// # Example: configuring a search - /// - /// Because this method accepts anything that can be turned into an - /// [`Input`], it's possible to provide an `Input` directly in order to - /// configure the search. In this example, we show how to use the - /// `earliest` option to force the search to return as soon as it knows - /// a match has occurred. - /// - /// ``` - /// use aho_corasick::{AhoCorasick, Input, MatchKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "abcd"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostLongest) - /// .build(patterns) - /// .unwrap(); - /// let mat = ac.find(Input::new(haystack).earliest(true)) - /// .expect("should have a match"); - /// // The correct leftmost-longest match here is 'abcd', but since we - /// // told the search to quit as soon as it knows a match has occurred, - /// // we get a different match back. - /// assert_eq!("b", &haystack[mat.start()..mat.end()]); - /// ``` - pub fn find<'h, I: Into<Input<'h>>>(&self, input: I) -> Option<Match> { - self.try_find(input) - .expect("AhoCorasick::try_find is not expected to fail") - } - - /// Returns the location of the first overlapping match in the given - /// input with respect to the current state of the underlying searcher. - /// - /// `input` may be any type that is cheaply convertible to an `Input`. This - /// includes, but is not limited to, `&str` and `&[u8]`. - /// - /// Overlapping searches do not report matches in their return value. - /// Instead, matches can be accessed via [`OverlappingState::get_match`] - /// after a search call. - /// - /// This is the infallible version of - /// [`AhoCorasick::try_find_overlapping`]. - /// - /// # Panics - /// - /// This panics when [`AhoCorasick::try_find_overlapping`] would - /// return an error. For example, when the Aho-Corasick searcher - /// doesn't support overlapping searches. (Only searchers built with - /// [`MatchKind::Standard`] semantics support overlapping searches.) - /// - /// # Example - /// - /// This shows how we can repeatedly call an overlapping search without - /// ever needing to explicitly re-slice the haystack. Overlapping search - /// works this way because searches depend on state saved during the - /// previous search. - /// - /// ``` - /// use aho_corasick::{ - /// automaton::OverlappingState, - /// AhoCorasick, Input, Match, - /// }; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::new(patterns).unwrap(); - /// let mut state = OverlappingState::start(); - /// - /// ac.find_overlapping(haystack, &mut state); - /// assert_eq!(Some(Match::must(2, 0..3)), state.get_match()); - /// - /// ac.find_overlapping(haystack, &mut state); - /// assert_eq!(Some(Match::must(0, 0..6)), state.get_match()); - /// - /// ac.find_overlapping(haystack, &mut state); - /// assert_eq!(Some(Match::must(2, 11..14)), state.get_match()); - /// - /// ac.find_overlapping(haystack, &mut state); - /// assert_eq!(Some(Match::must(2, 22..25)), state.get_match()); - /// - /// ac.find_overlapping(haystack, &mut state); - /// assert_eq!(Some(Match::must(0, 22..28)), state.get_match()); - /// - /// ac.find_overlapping(haystack, &mut state); - /// assert_eq!(Some(Match::must(1, 22..31)), state.get_match()); - /// - /// // No more match matches to be found. - /// ac.find_overlapping(haystack, &mut state); - /// assert_eq!(None, state.get_match()); - /// ``` - pub fn find_overlapping<'h, I: Into<Input<'h>>>( - &self, - input: I, - state: &mut OverlappingState, - ) { - self.try_find_overlapping(input, state).expect( - "AhoCorasick::try_find_overlapping is not expected to fail", - ) - } - - /// Returns an iterator of non-overlapping matches, using the match - /// semantics that this automaton was constructed with. - /// - /// `input` may be any type that is cheaply convertible to an `Input`. This - /// includes, but is not limited to, `&str` and `&[u8]`. - /// - /// This is the infallible version of [`AhoCorasick::try_find_iter`]. - /// - /// # Panics - /// - /// This panics when [`AhoCorasick::try_find_iter`] would return an error. - /// - /// # Examples - /// - /// Basic usage, with standard semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind, PatternID}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::Standard) // default, not necessary - /// .build(patterns) - /// .unwrap(); - /// let matches: Vec<PatternID> = ac - /// .find_iter(haystack) - /// .map(|mat| mat.pattern()) - /// .collect(); - /// assert_eq!(vec![ - /// PatternID::must(2), - /// PatternID::must(2), - /// PatternID::must(2), - /// ], matches); - /// ``` - /// - /// Now with leftmost-first semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind, PatternID}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// let matches: Vec<PatternID> = ac - /// .find_iter(haystack) - /// .map(|mat| mat.pattern()) - /// .collect(); - /// assert_eq!(vec![ - /// PatternID::must(0), - /// PatternID::must(2), - /// PatternID::must(0), - /// ], matches); - /// ``` - /// - /// And finally, leftmost-longest semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind, PatternID}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostLongest) - /// .build(patterns) - /// .unwrap(); - /// let matches: Vec<PatternID> = ac - /// .find_iter(haystack) - /// .map(|mat| mat.pattern()) - /// .collect(); - /// assert_eq!(vec![ - /// PatternID::must(0), - /// PatternID::must(2), - /// PatternID::must(1), - /// ], matches); - /// ``` - pub fn find_iter<'a, 'h, I: Into<Input<'h>>>( - &'a self, - input: I, - ) -> FindIter<'a, 'h> { - self.try_find_iter(input) - .expect("AhoCorasick::try_find_iter is not expected to fail") - } - - /// Returns an iterator of overlapping matches. Stated differently, this - /// returns an iterator of all possible matches at every position. - /// - /// `input` may be any type that is cheaply convertible to an `Input`. This - /// includes, but is not limited to, `&str` and `&[u8]`. - /// - /// This is the infallible version of - /// [`AhoCorasick::try_find_overlapping_iter`]. - /// - /// # Panics - /// - /// This panics when `AhoCorasick::try_find_overlapping_iter` would return - /// an error. For example, when the Aho-Corasick searcher is built with - /// either leftmost-first or leftmost-longest match semantics. Stated - /// differently, overlapping searches require one to build the searcher - /// with [`MatchKind::Standard`] (it is the default). - /// - /// # Example: basic usage - /// - /// ``` - /// use aho_corasick::{AhoCorasick, PatternID}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::new(patterns).unwrap(); - /// let matches: Vec<PatternID> = ac - /// .find_overlapping_iter(haystack) - /// .map(|mat| mat.pattern()) - /// .collect(); - /// assert_eq!(vec![ - /// PatternID::must(2), - /// PatternID::must(0), - /// PatternID::must(2), - /// PatternID::must(2), - /// PatternID::must(0), - /// PatternID::must(1), - /// ], matches); - /// ``` - pub fn find_overlapping_iter<'a, 'h, I: Into<Input<'h>>>( - &'a self, - input: I, - ) -> FindOverlappingIter<'a, 'h> { - self.try_find_overlapping_iter(input).expect( - "AhoCorasick::try_find_overlapping_iter is not expected to fail", - ) - } - - /// Replace all matches with a corresponding value in the `replace_with` - /// slice given. Matches correspond to the same matches as reported by - /// [`AhoCorasick::find_iter`]. - /// - /// Replacements are determined by the index of the matching pattern. - /// For example, if the pattern with index `2` is found, then it is - /// replaced by `replace_with[2]`. - /// - /// This is the infallible version of [`AhoCorasick::try_replace_all`]. - /// - /// # Panics - /// - /// This panics when [`AhoCorasick::try_replace_all`] would return an - /// error. - /// - /// This also panics when `replace_with.len()` does not equal - /// [`AhoCorasick::patterns_len`]. - /// - /// # Example: basic usage - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// let result = ac.replace_all(haystack, &["x", "y", "z"]); - /// assert_eq!("x the z to the xage", result); - /// ``` - pub fn replace_all<B>(&self, haystack: &str, replace_with: &[B]) -> String - where - B: AsRef<str>, - { - self.try_replace_all(haystack, replace_with) - .expect("AhoCorasick::try_replace_all is not expected to fail") - } - - /// Replace all matches using raw bytes with a corresponding value in the - /// `replace_with` slice given. Matches correspond to the same matches as - /// reported by [`AhoCorasick::find_iter`]. - /// - /// Replacements are determined by the index of the matching pattern. - /// For example, if the pattern with index `2` is found, then it is - /// replaced by `replace_with[2]`. - /// - /// This is the infallible version of - /// [`AhoCorasick::try_replace_all_bytes`]. - /// - /// # Panics - /// - /// This panics when [`AhoCorasick::try_replace_all_bytes`] would return an - /// error. - /// - /// This also panics when `replace_with.len()` does not equal - /// [`AhoCorasick::patterns_len`]. - /// - /// # Example: basic usage - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = b"append the app to the appendage"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// let result = ac.replace_all_bytes(haystack, &["x", "y", "z"]); - /// assert_eq!(b"x the z to the xage".to_vec(), result); - /// ``` - pub fn replace_all_bytes<B>( - &self, - haystack: &[u8], - replace_with: &[B], - ) -> Vec<u8> - where - B: AsRef<[u8]>, - { - self.try_replace_all_bytes(haystack, replace_with) - .expect("AhoCorasick::try_replace_all_bytes should not fail") - } - - /// Replace all matches using a closure called on each match. - /// Matches correspond to the same matches as reported by - /// [`AhoCorasick::find_iter`]. - /// - /// The closure accepts three parameters: the match found, the text of - /// the match and a string buffer with which to write the replaced text - /// (if any). If the closure returns `true`, then it continues to the next - /// match. If the closure returns `false`, then searching is stopped. - /// - /// Note that any matches with boundaries that don't fall on a valid UTF-8 - /// boundary are silently skipped. - /// - /// This is the infallible version of - /// [`AhoCorasick::try_replace_all_with`]. - /// - /// # Panics - /// - /// This panics when [`AhoCorasick::try_replace_all_with`] would return an - /// error. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// let mut result = String::new(); - /// ac.replace_all_with(haystack, &mut result, |mat, _, dst| { - /// dst.push_str(&mat.pattern().as_usize().to_string()); - /// true - /// }); - /// assert_eq!("0 the 2 to the 0age", result); - /// ``` - /// - /// Stopping the replacement by returning `false` (continued from the - /// example above): - /// - /// ``` - /// # use aho_corasick::{AhoCorasick, MatchKind, PatternID}; - /// # let patterns = &["append", "appendage", "app"]; - /// # let haystack = "append the app to the appendage"; - /// # let ac = AhoCorasick::builder() - /// # .match_kind(MatchKind::LeftmostFirst) - /// # .build(patterns) - /// # .unwrap(); - /// let mut result = String::new(); - /// ac.replace_all_with(haystack, &mut result, |mat, _, dst| { - /// dst.push_str(&mat.pattern().as_usize().to_string()); - /// mat.pattern() != PatternID::must(2) - /// }); - /// assert_eq!("0 the 2 to the appendage", result); - /// ``` - pub fn replace_all_with<F>( - &self, - haystack: &str, - dst: &mut String, - replace_with: F, - ) where - F: FnMut(&Match, &str, &mut String) -> bool, - { - self.try_replace_all_with(haystack, dst, replace_with) - .expect("AhoCorasick::try_replace_all_with should not fail") - } - - /// Replace all matches using raw bytes with a closure called on each - /// match. Matches correspond to the same matches as reported by - /// [`AhoCorasick::find_iter`]. - /// - /// The closure accepts three parameters: the match found, the text of - /// the match and a byte buffer with which to write the replaced text - /// (if any). If the closure returns `true`, then it continues to the next - /// match. If the closure returns `false`, then searching is stopped. - /// - /// This is the infallible version of - /// [`AhoCorasick::try_replace_all_with_bytes`]. - /// - /// # Panics - /// - /// This panics when [`AhoCorasick::try_replace_all_with_bytes`] would - /// return an error. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = b"append the app to the appendage"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// let mut result = vec![]; - /// ac.replace_all_with_bytes(haystack, &mut result, |mat, _, dst| { - /// dst.extend(mat.pattern().as_usize().to_string().bytes()); - /// true - /// }); - /// assert_eq!(b"0 the 2 to the 0age".to_vec(), result); - /// ``` - /// - /// Stopping the replacement by returning `false` (continued from the - /// example above): - /// - /// ``` - /// # use aho_corasick::{AhoCorasick, MatchKind, PatternID}; - /// # let patterns = &["append", "appendage", "app"]; - /// # let haystack = b"append the app to the appendage"; - /// # let ac = AhoCorasick::builder() - /// # .match_kind(MatchKind::LeftmostFirst) - /// # .build(patterns) - /// # .unwrap(); - /// let mut result = vec![]; - /// ac.replace_all_with_bytes(haystack, &mut result, |mat, _, dst| { - /// dst.extend(mat.pattern().as_usize().to_string().bytes()); - /// mat.pattern() != PatternID::must(2) - /// }); - /// assert_eq!(b"0 the 2 to the appendage".to_vec(), result); - /// ``` - pub fn replace_all_with_bytes<F>( - &self, - haystack: &[u8], - dst: &mut Vec<u8>, - replace_with: F, - ) where - F: FnMut(&Match, &[u8], &mut Vec<u8>) -> bool, - { - self.try_replace_all_with_bytes(haystack, dst, replace_with) - .expect("AhoCorasick::try_replace_all_with_bytes should not fail") - } - - /// Returns an iterator of non-overlapping matches in the given - /// stream. Matches correspond to the same matches as reported by - /// [`AhoCorasick::find_iter`]. - /// - /// The matches yielded by this iterator use absolute position offsets in - /// the stream given, where the first byte has index `0`. Matches are - /// yieled until the stream is exhausted. - /// - /// Each item yielded by the iterator is an `Result<Match, - /// std::io::Error>`, where an error is yielded if there was a problem - /// reading from the reader given. - /// - /// When searching a stream, an internal buffer is used. Therefore, callers - /// should avoiding providing a buffered reader, if possible. - /// - /// This is the infallible version of - /// [`AhoCorasick::try_stream_find_iter`]. Note that both methods return - /// iterators that produce `Result` values. The difference is that this - /// routine panics if _construction_ of the iterator failed. The `Result` - /// values yield by the iterator come from whether the given reader returns - /// an error or not during the search. - /// - /// # Memory usage - /// - /// In general, searching streams will use a constant amount of memory for - /// its internal buffer. The one requirement is that the internal buffer - /// must be at least the size of the longest possible match. In most use - /// cases, the default buffer size will be much larger than any individual - /// match. - /// - /// # Panics - /// - /// This panics when [`AhoCorasick::try_stream_find_iter`] would return - /// an error. For example, when the Aho-Corasick searcher doesn't support - /// stream searches. (Only searchers built with [`MatchKind::Standard`] - /// semantics support stream searches.) - /// - /// # Example: basic usage - /// - /// ``` - /// use aho_corasick::{AhoCorasick, PatternID}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::new(patterns).unwrap(); - /// let mut matches = vec![]; - /// for result in ac.stream_find_iter(haystack.as_bytes()) { - /// let mat = result?; - /// matches.push(mat.pattern()); - /// } - /// assert_eq!(vec![ - /// PatternID::must(2), - /// PatternID::must(2), - /// PatternID::must(2), - /// ], matches); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - #[cfg(feature = "std")] - pub fn stream_find_iter<'a, R: std::io::Read>( - &'a self, - rdr: R, - ) -> StreamFindIter<'a, R> { - self.try_stream_find_iter(rdr) - .expect("AhoCorasick::try_stream_find_iter should not fail") - } -} - -/// Fallible search routines. These APIs return an error in cases where the -/// infallible routines would panic. -impl AhoCorasick { - /// Returns the location of the first match according to the match - /// semantics that this automaton was constructed with, and according - /// to the given `Input` configuration. - /// - /// This is the fallible version of [`AhoCorasick::find`]. - /// - /// # Errors - /// - /// This returns an error when this Aho-Corasick searcher does not support - /// the given `Input` configuration. - /// - /// For example, if the Aho-Corasick searcher only supports anchored - /// searches or only supports unanchored searches, then providing an - /// `Input` that requests an anchored (or unanchored) search when it isn't - /// supported would result in an error. - /// - /// # Example: leftmost-first searching - /// - /// Basic usage with leftmost-first semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind, Input}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "foo abcd"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// let mat = ac.try_find(haystack)?.expect("should have a match"); - /// assert_eq!("abc", &haystack[mat.span()]); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - /// - /// # Example: anchored leftmost-first searching - /// - /// This shows how to anchor the search, so that even if the haystack - /// contains a match somewhere, a match won't be reported unless one can - /// be found that starts at the beginning of the search: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, Anchored, Input, MatchKind, StartKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "foo abcd"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .start_kind(StartKind::Anchored) - /// .build(patterns) - /// .unwrap(); - /// let input = Input::new(haystack).anchored(Anchored::Yes); - /// assert_eq!(None, ac.try_find(input)?); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - /// - /// If the beginning of the search is changed to where a match begins, then - /// it will be found: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, Anchored, Input, MatchKind, StartKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "foo abcd"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .start_kind(StartKind::Anchored) - /// .build(patterns) - /// .unwrap(); - /// let input = Input::new(haystack).range(4..).anchored(Anchored::Yes); - /// let mat = ac.try_find(input)?.expect("should have a match"); - /// assert_eq!("abc", &haystack[mat.span()]); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - /// - /// # Example: earliest leftmost-first searching - /// - /// This shows how to run an "earliest" search even when the Aho-Corasick - /// searcher was compiled with leftmost-first match semantics. In this - /// case, the search is stopped as soon as it is known that a match has - /// occurred, even if it doesn't correspond to the leftmost-first match. - /// - /// ``` - /// use aho_corasick::{AhoCorasick, Input, MatchKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "foo abcd"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// let input = Input::new(haystack).earliest(true); - /// let mat = ac.try_find(input)?.expect("should have a match"); - /// assert_eq!("b", &haystack[mat.span()]); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - pub fn try_find<'h, I: Into<Input<'h>>>( - &self, - input: I, - ) -> Result<Option<Match>, MatchError> { - let input = input.into(); - enforce_anchored_consistency(self.start_kind, input.get_anchored())?; - self.aut.try_find(&input) - } - - /// Returns the location of the first overlapping match in the given - /// input with respect to the current state of the underlying searcher. - /// - /// Overlapping searches do not report matches in their return value. - /// Instead, matches can be accessed via [`OverlappingState::get_match`] - /// after a search call. - /// - /// This is the fallible version of [`AhoCorasick::find_overlapping`]. - /// - /// # Errors - /// - /// This returns an error when this Aho-Corasick searcher does not support - /// the given `Input` configuration or if overlapping search is not - /// supported. - /// - /// One example is that only Aho-Corasicker searchers built with - /// [`MatchKind::Standard`] semantics support overlapping searches. Using - /// any other match semantics will result in this returning an error. - /// - /// # Example: basic usage - /// - /// This shows how we can repeatedly call an overlapping search without - /// ever needing to explicitly re-slice the haystack. Overlapping search - /// works this way because searches depend on state saved during the - /// previous search. - /// - /// ``` - /// use aho_corasick::{ - /// automaton::OverlappingState, - /// AhoCorasick, Input, Match, - /// }; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::new(patterns).unwrap(); - /// let mut state = OverlappingState::start(); - /// - /// ac.try_find_overlapping(haystack, &mut state)?; - /// assert_eq!(Some(Match::must(2, 0..3)), state.get_match()); - /// - /// ac.try_find_overlapping(haystack, &mut state)?; - /// assert_eq!(Some(Match::must(0, 0..6)), state.get_match()); - /// - /// ac.try_find_overlapping(haystack, &mut state)?; - /// assert_eq!(Some(Match::must(2, 11..14)), state.get_match()); - /// - /// ac.try_find_overlapping(haystack, &mut state)?; - /// assert_eq!(Some(Match::must(2, 22..25)), state.get_match()); - /// - /// ac.try_find_overlapping(haystack, &mut state)?; - /// assert_eq!(Some(Match::must(0, 22..28)), state.get_match()); - /// - /// ac.try_find_overlapping(haystack, &mut state)?; - /// assert_eq!(Some(Match::must(1, 22..31)), state.get_match()); - /// - /// // No more match matches to be found. - /// ac.try_find_overlapping(haystack, &mut state)?; - /// assert_eq!(None, state.get_match()); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - /// - /// # Example: implementing your own overlapping iteration - /// - /// The previous example can be easily adapted to implement your own - /// iteration by repeatedly calling `try_find_overlapping` until either - /// an error occurs or no more matches are reported. - /// - /// This is effectively equivalent to the iterator returned by - /// [`AhoCorasick::try_find_overlapping_iter`], with the only difference - /// being that the iterator checks for errors before construction and - /// absolves the caller of needing to check for errors on every search - /// call. (Indeed, if the first `try_find_overlapping` call succeeds and - /// the same `Input` is given to subsequent calls, then all subsequent - /// calls are guaranteed to succeed.) - /// - /// ``` - /// use aho_corasick::{ - /// automaton::OverlappingState, - /// AhoCorasick, Input, Match, - /// }; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::new(patterns).unwrap(); - /// let mut state = OverlappingState::start(); - /// let mut matches = vec![]; - /// - /// loop { - /// ac.try_find_overlapping(haystack, &mut state)?; - /// let mat = match state.get_match() { - /// None => break, - /// Some(mat) => mat, - /// }; - /// matches.push(mat); - /// } - /// let expected = vec![ - /// Match::must(2, 0..3), - /// Match::must(0, 0..6), - /// Match::must(2, 11..14), - /// Match::must(2, 22..25), - /// Match::must(0, 22..28), - /// Match::must(1, 22..31), - /// ]; - /// assert_eq!(expected, matches); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - /// - /// # Example: anchored iteration - /// - /// The previous example can also be adapted to implement - /// iteration over all anchored matches. In particular, - /// [`AhoCorasick::try_find_overlapping_iter`] does not support this - /// because it isn't totally clear what the match semantics ought to be. - /// - /// In this example, we will find all overlapping matches that start at - /// the beginning of our search. - /// - /// ``` - /// use aho_corasick::{ - /// automaton::OverlappingState, - /// AhoCorasick, Anchored, Input, Match, StartKind, - /// }; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::builder() - /// .start_kind(StartKind::Anchored) - /// .build(patterns) - /// .unwrap(); - /// let input = Input::new(haystack).anchored(Anchored::Yes); - /// let mut state = OverlappingState::start(); - /// let mut matches = vec![]; - /// - /// loop { - /// ac.try_find_overlapping(input.clone(), &mut state)?; - /// let mat = match state.get_match() { - /// None => break, - /// Some(mat) => mat, - /// }; - /// matches.push(mat); - /// } - /// let expected = vec![ - /// Match::must(2, 0..3), - /// Match::must(0, 0..6), - /// ]; - /// assert_eq!(expected, matches); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - pub fn try_find_overlapping<'h, I: Into<Input<'h>>>( - &self, - input: I, - state: &mut OverlappingState, - ) -> Result<(), MatchError> { - let input = input.into(); - enforce_anchored_consistency(self.start_kind, input.get_anchored())?; - self.aut.try_find_overlapping(&input, state) - } - - /// Returns an iterator of non-overlapping matches, using the match - /// semantics that this automaton was constructed with. - /// - /// This is the fallible version of [`AhoCorasick::find_iter`]. - /// - /// Note that the error returned by this method occurs during construction - /// of the iterator. The iterator itself yields `Match` values. That is, - /// once the iterator is constructed, the iteration itself will never - /// report an error. - /// - /// # Errors - /// - /// This returns an error when this Aho-Corasick searcher does not support - /// the given `Input` configuration. - /// - /// For example, if the Aho-Corasick searcher only supports anchored - /// searches or only supports unanchored searches, then providing an - /// `Input` that requests an anchored (or unanchored) search when it isn't - /// supported would result in an error. - /// - /// # Example: leftmost-first searching - /// - /// Basic usage with leftmost-first semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, Input, MatchKind, PatternID}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// let matches: Vec<PatternID> = ac - /// .try_find_iter(Input::new(haystack))? - /// .map(|mat| mat.pattern()) - /// .collect(); - /// assert_eq!(vec![ - /// PatternID::must(0), - /// PatternID::must(2), - /// PatternID::must(0), - /// ], matches); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - /// - /// # Example: anchored leftmost-first searching - /// - /// This shows how to anchor the search, such that all matches must begin - /// at the starting location of the search. For an iterator, an anchored - /// search implies that all matches are adjacent. - /// - /// ``` - /// use aho_corasick::{ - /// AhoCorasick, Anchored, Input, MatchKind, PatternID, StartKind, - /// }; - /// - /// let patterns = &["foo", "bar", "quux"]; - /// let haystack = "fooquuxbar foo"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .start_kind(StartKind::Anchored) - /// .build(patterns) - /// .unwrap(); - /// let matches: Vec<PatternID> = ac - /// .try_find_iter(Input::new(haystack).anchored(Anchored::Yes))? - /// .map(|mat| mat.pattern()) - /// .collect(); - /// assert_eq!(vec![ - /// PatternID::must(0), - /// PatternID::must(2), - /// PatternID::must(1), - /// // The final 'foo' is not found because it is not adjacent to the - /// // 'bar' match. It needs to be adjacent because our search is - /// // anchored. - /// ], matches); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - pub fn try_find_iter<'a, 'h, I: Into<Input<'h>>>( - &'a self, - input: I, - ) -> Result<FindIter<'a, 'h>, MatchError> { - let input = input.into(); - enforce_anchored_consistency(self.start_kind, input.get_anchored())?; - Ok(FindIter(self.aut.try_find_iter(input)?)) - } - - /// Returns an iterator of overlapping matches. - /// - /// This is the fallible version of [`AhoCorasick::find_overlapping_iter`]. - /// - /// Note that the error returned by this method occurs during construction - /// of the iterator. The iterator itself yields `Match` values. That is, - /// once the iterator is constructed, the iteration itself will never - /// report an error. - /// - /// # Errors - /// - /// This returns an error when this Aho-Corasick searcher does not support - /// the given `Input` configuration or does not support overlapping - /// searches. - /// - /// One example is that only Aho-Corasicker searchers built with - /// [`MatchKind::Standard`] semantics support overlapping searches. Using - /// any other match semantics will result in this returning an error. - /// - /// # Example: basic usage - /// - /// ``` - /// use aho_corasick::{AhoCorasick, Input, PatternID}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::new(patterns).unwrap(); - /// let matches: Vec<PatternID> = ac - /// .try_find_overlapping_iter(Input::new(haystack))? - /// .map(|mat| mat.pattern()) - /// .collect(); - /// assert_eq!(vec![ - /// PatternID::must(2), - /// PatternID::must(0), - /// PatternID::must(2), - /// PatternID::must(2), - /// PatternID::must(0), - /// PatternID::must(1), - /// ], matches); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - /// - /// # Example: anchored overlapping search returns an error - /// - /// It isn't clear what the match semantics for anchored overlapping - /// iterators *ought* to be, so currently an error is returned. Callers - /// may use [`AhoCorasick::try_find_overlapping`] to implement their own - /// semantics if desired. - /// - /// ``` - /// use aho_corasick::{AhoCorasick, Anchored, Input, StartKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "appendappendage app"; - /// - /// let ac = AhoCorasick::builder() - /// .start_kind(StartKind::Anchored) - /// .build(patterns) - /// .unwrap(); - /// let input = Input::new(haystack).anchored(Anchored::Yes); - /// assert!(ac.try_find_overlapping_iter(input).is_err()); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - pub fn try_find_overlapping_iter<'a, 'h, I: Into<Input<'h>>>( - &'a self, - input: I, - ) -> Result<FindOverlappingIter<'a, 'h>, MatchError> { - let input = input.into(); - enforce_anchored_consistency(self.start_kind, input.get_anchored())?; - Ok(FindOverlappingIter(self.aut.try_find_overlapping_iter(input)?)) - } - - /// Replace all matches with a corresponding value in the `replace_with` - /// slice given. Matches correspond to the same matches as reported by - /// [`AhoCorasick::try_find_iter`]. - /// - /// Replacements are determined by the index of the matching pattern. - /// For example, if the pattern with index `2` is found, then it is - /// replaced by `replace_with[2]`. - /// - /// # Panics - /// - /// This panics when `replace_with.len()` does not equal - /// [`AhoCorasick::patterns_len`]. - /// - /// # Errors - /// - /// This returns an error when this Aho-Corasick searcher does not support - /// the default `Input` configuration. More specifically, this occurs only - /// when the Aho-Corasick searcher does not support unanchored searches - /// since this replacement routine always does an unanchored search. - /// - /// # Example: basic usage - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// let result = ac.try_replace_all(haystack, &["x", "y", "z"])?; - /// assert_eq!("x the z to the xage", result); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - pub fn try_replace_all<B>( - &self, - haystack: &str, - replace_with: &[B], - ) -> Result<String, MatchError> - where - B: AsRef<str>, - { - enforce_anchored_consistency(self.start_kind, Anchored::No)?; - self.aut.try_replace_all(haystack, replace_with) - } - - /// Replace all matches using raw bytes with a corresponding value in the - /// `replace_with` slice given. Matches correspond to the same matches as - /// reported by [`AhoCorasick::try_find_iter`]. - /// - /// Replacements are determined by the index of the matching pattern. - /// For example, if the pattern with index `2` is found, then it is - /// replaced by `replace_with[2]`. - /// - /// This is the fallible version of [`AhoCorasick::replace_all_bytes`]. - /// - /// # Panics - /// - /// This panics when `replace_with.len()` does not equal - /// [`AhoCorasick::patterns_len`]. - /// - /// # Errors - /// - /// This returns an error when this Aho-Corasick searcher does not support - /// the default `Input` configuration. More specifically, this occurs only - /// when the Aho-Corasick searcher does not support unanchored searches - /// since this replacement routine always does an unanchored search. - /// - /// # Example: basic usage - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = b"append the app to the appendage"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// let result = ac.try_replace_all_bytes(haystack, &["x", "y", "z"])?; - /// assert_eq!(b"x the z to the xage".to_vec(), result); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - pub fn try_replace_all_bytes<B>( - &self, - haystack: &[u8], - replace_with: &[B], - ) -> Result<Vec<u8>, MatchError> - where - B: AsRef<[u8]>, - { - enforce_anchored_consistency(self.start_kind, Anchored::No)?; - self.aut.try_replace_all_bytes(haystack, replace_with) - } - - /// Replace all matches using a closure called on each match. - /// Matches correspond to the same matches as reported by - /// [`AhoCorasick::try_find_iter`]. - /// - /// The closure accepts three parameters: the match found, the text of - /// the match and a string buffer with which to write the replaced text - /// (if any). If the closure returns `true`, then it continues to the next - /// match. If the closure returns `false`, then searching is stopped. - /// - /// Note that any matches with boundaries that don't fall on a valid UTF-8 - /// boundary are silently skipped. - /// - /// This is the fallible version of [`AhoCorasick::replace_all_with`]. - /// - /// # Errors - /// - /// This returns an error when this Aho-Corasick searcher does not support - /// the default `Input` configuration. More specifically, this occurs only - /// when the Aho-Corasick searcher does not support unanchored searches - /// since this replacement routine always does an unanchored search. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// let mut result = String::new(); - /// ac.try_replace_all_with(haystack, &mut result, |mat, _, dst| { - /// dst.push_str(&mat.pattern().as_usize().to_string()); - /// true - /// })?; - /// assert_eq!("0 the 2 to the 0age", result); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - /// - /// Stopping the replacement by returning `false` (continued from the - /// example above): - /// - /// ``` - /// # use aho_corasick::{AhoCorasick, MatchKind, PatternID}; - /// # let patterns = &["append", "appendage", "app"]; - /// # let haystack = "append the app to the appendage"; - /// # let ac = AhoCorasick::builder() - /// # .match_kind(MatchKind::LeftmostFirst) - /// # .build(patterns) - /// # .unwrap(); - /// let mut result = String::new(); - /// ac.try_replace_all_with(haystack, &mut result, |mat, _, dst| { - /// dst.push_str(&mat.pattern().as_usize().to_string()); - /// mat.pattern() != PatternID::must(2) - /// })?; - /// assert_eq!("0 the 2 to the appendage", result); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - pub fn try_replace_all_with<F>( - &self, - haystack: &str, - dst: &mut String, - replace_with: F, - ) -> Result<(), MatchError> - where - F: FnMut(&Match, &str, &mut String) -> bool, - { - enforce_anchored_consistency(self.start_kind, Anchored::No)?; - self.aut.try_replace_all_with(haystack, dst, replace_with) - } - - /// Replace all matches using raw bytes with a closure called on each - /// match. Matches correspond to the same matches as reported by - /// [`AhoCorasick::try_find_iter`]. - /// - /// The closure accepts three parameters: the match found, the text of - /// the match and a byte buffer with which to write the replaced text - /// (if any). If the closure returns `true`, then it continues to the next - /// match. If the closure returns `false`, then searching is stopped. - /// - /// This is the fallible version of - /// [`AhoCorasick::replace_all_with_bytes`]. - /// - /// # Errors - /// - /// This returns an error when this Aho-Corasick searcher does not support - /// the default `Input` configuration. More specifically, this occurs only - /// when the Aho-Corasick searcher does not support unanchored searches - /// since this replacement routine always does an unanchored search. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = b"append the app to the appendage"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// let mut result = vec![]; - /// ac.try_replace_all_with_bytes(haystack, &mut result, |mat, _, dst| { - /// dst.extend(mat.pattern().as_usize().to_string().bytes()); - /// true - /// })?; - /// assert_eq!(b"0 the 2 to the 0age".to_vec(), result); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - /// - /// Stopping the replacement by returning `false` (continued from the - /// example above): - /// - /// ``` - /// # use aho_corasick::{AhoCorasick, MatchKind, PatternID}; - /// # let patterns = &["append", "appendage", "app"]; - /// # let haystack = b"append the app to the appendage"; - /// # let ac = AhoCorasick::builder() - /// # .match_kind(MatchKind::LeftmostFirst) - /// # .build(patterns) - /// # .unwrap(); - /// let mut result = vec![]; - /// ac.try_replace_all_with_bytes(haystack, &mut result, |mat, _, dst| { - /// dst.extend(mat.pattern().as_usize().to_string().bytes()); - /// mat.pattern() != PatternID::must(2) - /// })?; - /// assert_eq!(b"0 the 2 to the appendage".to_vec(), result); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - pub fn try_replace_all_with_bytes<F>( - &self, - haystack: &[u8], - dst: &mut Vec<u8>, - replace_with: F, - ) -> Result<(), MatchError> - where - F: FnMut(&Match, &[u8], &mut Vec<u8>) -> bool, - { - enforce_anchored_consistency(self.start_kind, Anchored::No)?; - self.aut.try_replace_all_with_bytes(haystack, dst, replace_with) - } - - /// Returns an iterator of non-overlapping matches in the given - /// stream. Matches correspond to the same matches as reported by - /// [`AhoCorasick::try_find_iter`]. - /// - /// The matches yielded by this iterator use absolute position offsets in - /// the stream given, where the first byte has index `0`. Matches are - /// yieled until the stream is exhausted. - /// - /// Each item yielded by the iterator is an `Result<Match, - /// std::io::Error>`, where an error is yielded if there was a problem - /// reading from the reader given. - /// - /// When searching a stream, an internal buffer is used. Therefore, callers - /// should avoiding providing a buffered reader, if possible. - /// - /// This is the fallible version of [`AhoCorasick::stream_find_iter`]. - /// Note that both methods return iterators that produce `Result` values. - /// The difference is that this routine returns an error if _construction_ - /// of the iterator failed. The `Result` values yield by the iterator - /// come from whether the given reader returns an error or not during the - /// search. - /// - /// # Memory usage - /// - /// In general, searching streams will use a constant amount of memory for - /// its internal buffer. The one requirement is that the internal buffer - /// must be at least the size of the longest possible match. In most use - /// cases, the default buffer size will be much larger than any individual - /// match. - /// - /// # Errors - /// - /// This returns an error when this Aho-Corasick searcher does not support - /// the default `Input` configuration. More specifically, this occurs only - /// when the Aho-Corasick searcher does not support unanchored searches - /// since this stream searching routine always does an unanchored search. - /// - /// This also returns an error if the searcher does not support stream - /// searches. Only searchers built with [`MatchKind::Standard`] semantics - /// support stream searches. - /// - /// # Example: basic usage - /// - /// ``` - /// use aho_corasick::{AhoCorasick, PatternID}; - /// - /// let patterns = &["append", "appendage", "app"]; - /// let haystack = "append the app to the appendage"; - /// - /// let ac = AhoCorasick::new(patterns).unwrap(); - /// let mut matches = vec![]; - /// for result in ac.try_stream_find_iter(haystack.as_bytes())? { - /// let mat = result?; - /// matches.push(mat.pattern()); - /// } - /// assert_eq!(vec![ - /// PatternID::must(2), - /// PatternID::must(2), - /// PatternID::must(2), - /// ], matches); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - #[cfg(feature = "std")] - pub fn try_stream_find_iter<'a, R: std::io::Read>( - &'a self, - rdr: R, - ) -> Result<StreamFindIter<'a, R>, MatchError> { - enforce_anchored_consistency(self.start_kind, Anchored::No)?; - self.aut.try_stream_find_iter(rdr).map(StreamFindIter) - } - - /// Search for and replace all matches of this automaton in - /// the given reader, and write the replacements to the given - /// writer. Matches correspond to the same matches as reported by - /// [`AhoCorasick::try_find_iter`]. - /// - /// Replacements are determined by the index of the matching pattern. For - /// example, if the pattern with index `2` is found, then it is replaced by - /// `replace_with[2]`. - /// - /// After all matches are replaced, the writer is _not_ flushed. - /// - /// If there was a problem reading from the given reader or writing to the - /// given writer, then the corresponding `io::Error` is returned and all - /// replacement is stopped. - /// - /// When searching a stream, an internal buffer is used. Therefore, callers - /// should avoiding providing a buffered reader, if possible. However, - /// callers may want to provide a buffered writer. - /// - /// Note that there is currently no infallible version of this routine. - /// - /// # Memory usage - /// - /// In general, searching streams will use a constant amount of memory for - /// its internal buffer. The one requirement is that the internal buffer - /// must be at least the size of the longest possible match. In most use - /// cases, the default buffer size will be much larger than any individual - /// match. - /// - /// # Panics - /// - /// This panics when `replace_with.len()` does not equal - /// [`AhoCorasick::patterns_len`]. - /// - /// # Errors - /// - /// This returns an error when this Aho-Corasick searcher does not support - /// the default `Input` configuration. More specifically, this occurs only - /// when the Aho-Corasick searcher does not support unanchored searches - /// since this stream searching routine always does an unanchored search. - /// - /// This also returns an error if the searcher does not support stream - /// searches. Only searchers built with [`MatchKind::Standard`] semantics - /// support stream searches. - /// - /// # Example: basic usage - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// let patterns = &["fox", "brown", "quick"]; - /// let haystack = "The quick brown fox."; - /// let replace_with = &["sloth", "grey", "slow"]; - /// - /// let ac = AhoCorasick::new(patterns).unwrap(); - /// let mut result = vec![]; - /// ac.try_stream_replace_all( - /// haystack.as_bytes(), - /// &mut result, - /// replace_with, - /// )?; - /// assert_eq!(b"The slow grey sloth.".to_vec(), result); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - #[cfg(feature = "std")] - pub fn try_stream_replace_all<R, W, B>( - &self, - rdr: R, - wtr: W, - replace_with: &[B], - ) -> Result<(), std::io::Error> - where - R: std::io::Read, - W: std::io::Write, - B: AsRef<[u8]>, - { - enforce_anchored_consistency(self.start_kind, Anchored::No) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; - self.aut.try_stream_replace_all(rdr, wtr, replace_with) - } - - /// Search the given reader and replace all matches of this automaton - /// using the given closure. The result is written to the given - /// writer. Matches correspond to the same matches as reported by - /// [`AhoCorasick::try_find_iter`]. - /// - /// The closure accepts three parameters: the match found, the text of - /// the match and the writer with which to write the replaced text (if any). - /// - /// After all matches are replaced, the writer is _not_ flushed. - /// - /// If there was a problem reading from the given reader or writing to the - /// given writer, then the corresponding `io::Error` is returned and all - /// replacement is stopped. - /// - /// When searching a stream, an internal buffer is used. Therefore, callers - /// should avoiding providing a buffered reader, if possible. However, - /// callers may want to provide a buffered writer. - /// - /// Note that there is currently no infallible version of this routine. - /// - /// # Memory usage - /// - /// In general, searching streams will use a constant amount of memory for - /// its internal buffer. The one requirement is that the internal buffer - /// must be at least the size of the longest possible match. In most use - /// cases, the default buffer size will be much larger than any individual - /// match. - /// - /// # Errors - /// - /// This returns an error when this Aho-Corasick searcher does not support - /// the default `Input` configuration. More specifically, this occurs only - /// when the Aho-Corasick searcher does not support unanchored searches - /// since this stream searching routine always does an unanchored search. - /// - /// This also returns an error if the searcher does not support stream - /// searches. Only searchers built with [`MatchKind::Standard`] semantics - /// support stream searches. - /// - /// # Example: basic usage - /// - /// ``` - /// use std::io::Write; - /// use aho_corasick::AhoCorasick; - /// - /// let patterns = &["fox", "brown", "quick"]; - /// let haystack = "The quick brown fox."; - /// - /// let ac = AhoCorasick::new(patterns).unwrap(); - /// let mut result = vec![]; - /// ac.try_stream_replace_all_with( - /// haystack.as_bytes(), - /// &mut result, - /// |mat, _, wtr| { - /// wtr.write_all(mat.pattern().as_usize().to_string().as_bytes()) - /// }, - /// )?; - /// assert_eq!(b"The 2 1 0.".to_vec(), result); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - #[cfg(feature = "std")] - pub fn try_stream_replace_all_with<R, W, F>( - &self, - rdr: R, - wtr: W, - replace_with: F, - ) -> Result<(), std::io::Error> - where - R: std::io::Read, - W: std::io::Write, - F: FnMut(&Match, &[u8], &mut W) -> Result<(), std::io::Error>, - { - enforce_anchored_consistency(self.start_kind, Anchored::No) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; - self.aut.try_stream_replace_all_with(rdr, wtr, replace_with) - } -} - -/// Routines for querying information about the Aho-Corasick automaton. -impl AhoCorasick { - /// Returns the kind of the Aho-Corasick automaton used by this searcher. - /// - /// Knowing the Aho-Corasick kind is principally useful for diagnostic - /// purposes. In particular, if no specific kind was given to - /// [`AhoCorasickBuilder::kind`], then one is automatically chosen and - /// this routine will report which one. - /// - /// Note that the heuristics used for choosing which `AhoCorasickKind` - /// may be changed in a semver compatible release. - /// - /// # Examples - /// - /// ``` - /// use aho_corasick::{AhoCorasick, AhoCorasickKind}; - /// - /// let ac = AhoCorasick::new(&["foo", "bar", "quux", "baz"]).unwrap(); - /// // The specific Aho-Corasick kind chosen is not guaranteed! - /// assert_eq!(AhoCorasickKind::DFA, ac.kind()); - /// ``` - pub fn kind(&self) -> AhoCorasickKind { - self.kind - } - - /// Returns the type of starting search configuration supported by this - /// Aho-Corasick automaton. - /// - /// # Examples - /// - /// ``` - /// use aho_corasick::{AhoCorasick, StartKind}; - /// - /// let ac = AhoCorasick::new(&["foo", "bar", "quux", "baz"]).unwrap(); - /// assert_eq!(StartKind::Unanchored, ac.start_kind()); - /// ``` - pub fn start_kind(&self) -> StartKind { - self.start_kind - } - - /// Returns the match kind used by this automaton. - /// - /// The match kind is important because it determines what kinds of - /// matches are returned. Also, some operations (such as overlapping - /// search and stream searching) are only supported when using the - /// [`MatchKind::Standard`] match kind. - /// - /// # Examples - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let ac = AhoCorasick::new(&["foo", "bar", "quux", "baz"]).unwrap(); - /// assert_eq!(MatchKind::Standard, ac.match_kind()); - /// ``` - pub fn match_kind(&self) -> MatchKind { - self.aut.match_kind() - } - - /// Returns the length of the shortest pattern matched by this automaton. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// let ac = AhoCorasick::new(&["foo", "bar", "quux", "baz"]).unwrap(); - /// assert_eq!(3, ac.min_pattern_len()); - /// ``` - /// - /// Note that an `AhoCorasick` automaton has a minimum length of `0` if - /// and only if it can match the empty string: - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// let ac = AhoCorasick::new(&["foo", "", "quux", "baz"]).unwrap(); - /// assert_eq!(0, ac.min_pattern_len()); - /// ``` - pub fn min_pattern_len(&self) -> usize { - self.aut.min_pattern_len() - } - - /// Returns the length of the longest pattern matched by this automaton. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// let ac = AhoCorasick::new(&["foo", "bar", "quux", "baz"]).unwrap(); - /// assert_eq!(4, ac.max_pattern_len()); - /// ``` - pub fn max_pattern_len(&self) -> usize { - self.aut.max_pattern_len() - } - - /// Return the total number of patterns matched by this automaton. - /// - /// This includes patterns that may never participate in a match. For - /// example, if [`MatchKind::LeftmostFirst`] match semantics are used, and - /// the patterns `Sam` and `Samwise` were used to build the automaton (in - /// that order), then `Samwise` can never participate in a match because - /// `Sam` will always take priority. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// let ac = AhoCorasick::new(&["foo", "bar", "baz"]).unwrap(); - /// assert_eq!(3, ac.patterns_len()); - /// ``` - pub fn patterns_len(&self) -> usize { - self.aut.patterns_len() - } - - /// Returns the approximate total amount of heap used by this automaton, in - /// units of bytes. - /// - /// # Examples - /// - /// This example shows the difference in heap usage between a few - /// configurations: - /// - /// ``` - /// # if !cfg!(target_pointer_width = "64") { return; } - /// use aho_corasick::{AhoCorasick, AhoCorasickKind, MatchKind}; - /// - /// let ac = AhoCorasick::builder() - /// .kind(None) // default - /// .build(&["foobar", "bruce", "triskaidekaphobia", "springsteen"]) - /// .unwrap(); - /// assert_eq!(5_632, ac.memory_usage()); - /// - /// let ac = AhoCorasick::builder() - /// .kind(None) // default - /// .ascii_case_insensitive(true) - /// .build(&["foobar", "bruce", "triskaidekaphobia", "springsteen"]) - /// .unwrap(); - /// assert_eq!(11_136, ac.memory_usage()); - /// - /// let ac = AhoCorasick::builder() - /// .kind(Some(AhoCorasickKind::NoncontiguousNFA)) - /// .ascii_case_insensitive(true) - /// .build(&["foobar", "bruce", "triskaidekaphobia", "springsteen"]) - /// .unwrap(); - /// assert_eq!(10_879, ac.memory_usage()); - /// - /// let ac = AhoCorasick::builder() - /// .kind(Some(AhoCorasickKind::ContiguousNFA)) - /// .ascii_case_insensitive(true) - /// .build(&["foobar", "bruce", "triskaidekaphobia", "springsteen"]) - /// .unwrap(); - /// assert_eq!(2_584, ac.memory_usage()); - /// - /// let ac = AhoCorasick::builder() - /// .kind(Some(AhoCorasickKind::DFA)) - /// .ascii_case_insensitive(true) - /// .build(&["foobar", "bruce", "triskaidekaphobia", "springsteen"]) - /// .unwrap(); - /// // While this shows the DFA being the biggest here by a small margin, - /// // don't let the difference fool you. With such a small number of - /// // patterns, the difference is small, but a bigger number of patterns - /// // will reveal that the rate of growth of the DFA is far bigger than - /// // the NFAs above. For a large number of patterns, it is easy for the - /// // DFA to take an order of magnitude more heap space (or more!). - /// assert_eq!(11_136, ac.memory_usage()); - /// ``` - pub fn memory_usage(&self) -> usize { - self.aut.memory_usage() - } -} - -// We provide a manual debug impl so that we don't include the 'start_kind', -// principally because it's kind of weird to do so and because it screws with -// the carefully curated debug output for the underlying automaton. -impl core::fmt::Debug for AhoCorasick { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - f.debug_tuple("AhoCorasick").field(&self.aut).finish() - } -} - -/// An iterator of non-overlapping matches in a particular haystack. -/// -/// This iterator yields matches according to the [`MatchKind`] used by this -/// automaton. -/// -/// This iterator is constructed via the [`AhoCorasick::find_iter`] and -/// [`AhoCorasick::try_find_iter`] methods. -/// -/// The lifetime `'a` refers to the lifetime of the `AhoCorasick` automaton. -/// -/// The lifetime `'h` refers to the lifetime of the haystack being searched. -#[derive(Debug)] -pub struct FindIter<'a, 'h>(automaton::FindIter<'a, 'h, Arc<dyn AcAutomaton>>); - -impl<'a, 'h> Iterator for FindIter<'a, 'h> { - type Item = Match; - - #[inline] - fn next(&mut self) -> Option<Match> { - self.0.next() - } -} - -/// An iterator of overlapping matches in a particular haystack. -/// -/// This iterator will report all possible matches in a particular haystack, -/// even when the matches overlap. -/// -/// This iterator is constructed via the [`AhoCorasick::find_overlapping_iter`] -/// and [`AhoCorasick::try_find_overlapping_iter`] methods. -/// -/// The lifetime `'a` refers to the lifetime of the `AhoCorasick` automaton. -/// -/// The lifetime `'h` refers to the lifetime of the haystack being searched. -#[derive(Debug)] -pub struct FindOverlappingIter<'a, 'h>( - automaton::FindOverlappingIter<'a, 'h, Arc<dyn AcAutomaton>>, -); - -impl<'a, 'h> Iterator for FindOverlappingIter<'a, 'h> { - type Item = Match; - - #[inline] - fn next(&mut self) -> Option<Match> { - self.0.next() - } -} - -/// An iterator that reports Aho-Corasick matches in a stream. -/// -/// This iterator yields elements of type `Result<Match, std::io::Error>`, -/// where an error is reported if there was a problem reading from the -/// underlying stream. The iterator terminates only when the underlying stream -/// reaches `EOF`. -/// -/// This iterator is constructed via the [`AhoCorasick::stream_find_iter`] and -/// [`AhoCorasick::try_stream_find_iter`] methods. -/// -/// The type variable `R` refers to the `io::Read` stream that is being read -/// from. -/// -/// The lifetime `'a` refers to the lifetime of the corresponding -/// [`AhoCorasick`] searcher. -#[cfg(feature = "std")] -#[derive(Debug)] -pub struct StreamFindIter<'a, R>( - automaton::StreamFindIter<'a, Arc<dyn AcAutomaton>, R>, -); - -#[cfg(feature = "std")] -impl<'a, R: std::io::Read> Iterator for StreamFindIter<'a, R> { - type Item = Result<Match, std::io::Error>; - - fn next(&mut self) -> Option<Result<Match, std::io::Error>> { - self.0.next() - } -} - -/// A builder for configuring an Aho-Corasick automaton. -/// -/// # Quick advice -/// -/// * Use [`AhoCorasickBuilder::match_kind`] to configure your searcher -/// with [`MatchKind::LeftmostFirst`] if you want to match how backtracking -/// regex engines execute searches for `pat1|pat2|..|patN`. Use -/// [`MatchKind::LeftmostLongest`] if you want to match how POSIX regex engines -/// do it. -/// * If you need an anchored search, use [`AhoCorasickBuilder::start_kind`] to -/// set the [`StartKind::Anchored`] mode since [`StartKind::Unanchored`] is the -/// default. Or just use [`StartKind::Both`] to support both types of searches. -/// * You might want to use [`AhoCorasickBuilder::kind`] to set your searcher -/// to always use a [`AhoCorasickKind::DFA`] if search speed is critical and -/// memory usage isn't a concern. Otherwise, not setting a kind will probably -/// make the right choice for you. Beware that if you use [`StartKind::Both`] -/// to build a searcher that supports both unanchored and anchored searches -/// _and_ you set [`AhoCorasickKind::DFA`], then the DFA will essentially be -/// duplicated to support both simultaneously. This results in very high memory -/// usage. -/// * For all other options, their defaults are almost certainly what you want. -#[derive(Clone, Debug, Default)] -pub struct AhoCorasickBuilder { - nfa_noncontiguous: noncontiguous::Builder, - nfa_contiguous: contiguous::Builder, - dfa: dfa::Builder, - kind: Option<AhoCorasickKind>, - start_kind: StartKind, -} - -impl AhoCorasickBuilder { - /// Create a new builder for configuring an Aho-Corasick automaton. - /// - /// The builder provides a way to configure a number of things, including - /// ASCII case insensitivity and what kind of match semantics are used. - pub fn new() -> AhoCorasickBuilder { - AhoCorasickBuilder::default() - } - - /// Build an Aho-Corasick automaton using the configuration set on this - /// builder. - /// - /// A builder may be reused to create more automatons. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{AhoCorasickBuilder, PatternID}; - /// - /// let patterns = &["foo", "bar", "baz"]; - /// let ac = AhoCorasickBuilder::new().build(patterns).unwrap(); - /// assert_eq!( - /// Some(PatternID::must(1)), - /// ac.find("xxx bar xxx").map(|m| m.pattern()), - /// ); - /// ``` - pub fn build<I, P>(&self, patterns: I) -> Result<AhoCorasick, BuildError> - where - I: IntoIterator<Item = P>, - P: AsRef<[u8]>, - { - let nfa = self.nfa_noncontiguous.build(patterns)?; - let (aut, kind): (Arc<dyn AcAutomaton>, AhoCorasickKind) = - match self.kind { - None => { - debug!( - "asked for automatic Aho-Corasick implementation, \ - criteria: <patterns: {:?}, max pattern len: {:?}, \ - start kind: {:?}>", - nfa.patterns_len(), - nfa.max_pattern_len(), - self.start_kind, - ); - self.build_auto(nfa) - } - Some(AhoCorasickKind::NoncontiguousNFA) => { - debug!("forcefully chose noncontiguous NFA"); - (Arc::new(nfa), AhoCorasickKind::NoncontiguousNFA) - } - Some(AhoCorasickKind::ContiguousNFA) => { - debug!("forcefully chose contiguous NFA"); - let cnfa = - self.nfa_contiguous.build_from_noncontiguous(&nfa)?; - (Arc::new(cnfa), AhoCorasickKind::ContiguousNFA) - } - Some(AhoCorasickKind::DFA) => { - debug!("forcefully chose DFA"); - let dfa = self.dfa.build_from_noncontiguous(&nfa)?; - (Arc::new(dfa), AhoCorasickKind::DFA) - } - }; - Ok(AhoCorasick { aut, kind, start_kind: self.start_kind }) - } - - /// Implements the automatic selection logic for the Aho-Corasick - /// implementation to use. Since all Aho-Corasick automatons are built - /// from a non-contiguous NFA, the caller is responsible for building - /// that first. - fn build_auto( - &self, - nfa: noncontiguous::NFA, - ) -> (Arc<dyn AcAutomaton>, AhoCorasickKind) { - // We try to build a DFA if we have a very small number of patterns, - // otherwise the memory usage just gets too crazy. We also only do it - // when the start kind is unanchored or anchored, but not both, because - // both implies two full copies of the transition table. - let try_dfa = !matches!(self.start_kind, StartKind::Both) - && nfa.patterns_len() <= 100; - if try_dfa { - match self.dfa.build_from_noncontiguous(&nfa) { - Ok(dfa) => { - debug!("chose a DFA"); - return (Arc::new(dfa), AhoCorasickKind::DFA); - } - Err(_err) => { - debug!( - "failed to build DFA, trying something else: {}", - _err - ); - } - } - } - // We basically always want a contiguous NFA if the limited - // circumstances in which we use a DFA are not true. It is quite fast - // and has excellent memory usage. The only way we don't use it is if - // there are so many states that it can't fit in a contiguous NFA. - // And the only way to know that is to try to build it. Building a - // contiguous NFA is mostly just reshuffling data from a noncontiguous - // NFA, so it isn't too expensive, especially relative to building a - // noncontiguous NFA in the first place. - match self.nfa_contiguous.build_from_noncontiguous(&nfa) { - Ok(nfa) => { - debug!("chose contiguous NFA"); - return (Arc::new(nfa), AhoCorasickKind::ContiguousNFA); - } - #[allow(unused_variables)] // unused when 'logging' is disabled - Err(_err) => { - debug!( - "failed to build contiguous NFA, \ - trying something else: {}", - _err - ); - } - } - debug!("chose non-contiguous NFA"); - (Arc::new(nfa), AhoCorasickKind::NoncontiguousNFA) - } - - /// Set the desired match semantics. - /// - /// The default is [`MatchKind::Standard`], which corresponds to the match - /// semantics supported by the standard textbook description of the - /// Aho-Corasick algorithm. Namely, matches are reported as soon as they - /// are found. Moreover, this is the only way to get overlapping matches - /// or do stream searching. - /// - /// The other kinds of match semantics that are supported are - /// [`MatchKind::LeftmostFirst`] and [`MatchKind::LeftmostLongest`]. The - /// former corresponds to the match you would get if you were to try to - /// match each pattern at each position in the haystack in the same order - /// that you give to the automaton. That is, it returns the leftmost match - /// corresponding to the earliest pattern given to the automaton. The - /// latter corresponds to finding the longest possible match among all - /// leftmost matches. - /// - /// For more details on match semantics, see the [documentation for - /// `MatchKind`](MatchKind). - /// - /// Note that setting this to [`MatchKind::LeftmostFirst`] or - /// [`MatchKind::LeftmostLongest`] will cause some search routines on - /// [`AhoCorasick`] to return an error (or panic if you're using the - /// infallible API). Notably, this includes stream and overlapping - /// searches. - /// - /// # Examples - /// - /// In these examples, we demonstrate the differences between match - /// semantics for a particular set of patterns in a specific order: - /// `b`, `abc`, `abcd`. - /// - /// Standard semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "abcd"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::Standard) // default, not necessary - /// .build(patterns) - /// .unwrap(); - /// let mat = ac.find(haystack).expect("should have a match"); - /// assert_eq!("b", &haystack[mat.start()..mat.end()]); - /// ``` - /// - /// Leftmost-first semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "abcd"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// let mat = ac.find(haystack).expect("should have a match"); - /// assert_eq!("abc", &haystack[mat.start()..mat.end()]); - /// ``` - /// - /// Leftmost-longest semantics: - /// - /// ``` - /// use aho_corasick::{AhoCorasick, MatchKind}; - /// - /// let patterns = &["b", "abc", "abcd"]; - /// let haystack = "abcd"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostLongest) - /// .build(patterns) - /// .unwrap(); - /// let mat = ac.find(haystack).expect("should have a match"); - /// assert_eq!("abcd", &haystack[mat.start()..mat.end()]); - /// ``` - pub fn match_kind(&mut self, kind: MatchKind) -> &mut AhoCorasickBuilder { - self.nfa_noncontiguous.match_kind(kind); - self.nfa_contiguous.match_kind(kind); - self.dfa.match_kind(kind); - self - } - - /// Sets the starting state configuration for the automaton. - /// - /// Every Aho-Corasick automaton is capable of having two start states: one - /// that is used for unanchored searches and one that is used for anchored - /// searches. Some automatons, like the NFAs, support this with almost zero - /// additional cost. Other automatons, like the DFA, require two copies of - /// the underlying transition table to support both simultaneously. - /// - /// Because there may be an added non-trivial cost to supporting both, it - /// is possible to configure which starting state configuration is needed. - /// - /// Indeed, since anchored searches tend to be somewhat more rare, - /// _only_ unanchored searches are supported by default. Thus, - /// [`StartKind::Unanchored`] is the default. - /// - /// Note that when this is set to [`StartKind::Unanchored`], then - /// running an anchored search will result in an error (or a panic - /// if using the infallible APIs). Similarly, when this is set to - /// [`StartKind::Anchored`], then running an unanchored search will - /// result in an error (or a panic if using the infallible APIs). When - /// [`StartKind::Both`] is used, then both unanchored and anchored searches - /// are always supported. - /// - /// Also note that even if an `AhoCorasick` searcher is using an NFA - /// internally (which always supports both unanchored and anchored - /// searches), an error will still be reported for a search that isn't - /// supported by the configuration set via this method. This means, - /// for example, that an error is never dependent on which internal - /// implementation of Aho-Corasick is used. - /// - /// # Example: anchored search - /// - /// This shows how to build a searcher that only supports anchored - /// searches: - /// - /// ``` - /// use aho_corasick::{ - /// AhoCorasick, Anchored, Input, Match, MatchKind, StartKind, - /// }; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .start_kind(StartKind::Anchored) - /// .build(&["b", "abc", "abcd"]) - /// .unwrap(); - /// - /// // An unanchored search is not supported! An error here is guaranteed - /// // given the configuration above regardless of which kind of - /// // Aho-Corasick implementation ends up being used internally. - /// let input = Input::new("foo abcd").anchored(Anchored::No); - /// assert!(ac.try_find(input).is_err()); - /// - /// let input = Input::new("foo abcd").anchored(Anchored::Yes); - /// assert_eq!(None, ac.try_find(input)?); - /// - /// let input = Input::new("abcd").anchored(Anchored::Yes); - /// assert_eq!(Some(Match::must(1, 0..3)), ac.try_find(input)?); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - /// - /// # Example: unanchored and anchored searches - /// - /// This shows how to build a searcher that supports both unanchored and - /// anchored searches: - /// - /// ``` - /// use aho_corasick::{ - /// AhoCorasick, Anchored, Input, Match, MatchKind, StartKind, - /// }; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .start_kind(StartKind::Both) - /// .build(&["b", "abc", "abcd"]) - /// .unwrap(); - /// - /// let input = Input::new("foo abcd").anchored(Anchored::No); - /// assert_eq!(Some(Match::must(1, 4..7)), ac.try_find(input)?); - /// - /// let input = Input::new("foo abcd").anchored(Anchored::Yes); - /// assert_eq!(None, ac.try_find(input)?); - /// - /// let input = Input::new("abcd").anchored(Anchored::Yes); - /// assert_eq!(Some(Match::must(1, 0..3)), ac.try_find(input)?); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - pub fn start_kind(&mut self, kind: StartKind) -> &mut AhoCorasickBuilder { - self.dfa.start_kind(kind); - self.start_kind = kind; - self - } - - /// Enable ASCII-aware case insensitive matching. - /// - /// When this option is enabled, searching will be performed without - /// respect to case for ASCII letters (`a-z` and `A-Z`) only. - /// - /// Enabling this option does not change the search algorithm, but it may - /// increase the size of the automaton. - /// - /// **NOTE:** It is unlikely that support for Unicode case folding will - /// be added in the future. The ASCII case works via a simple hack to the - /// underlying automaton, but full Unicode handling requires a fair bit of - /// sophistication. If you do need Unicode handling, you might consider - /// using the [`regex` crate](https://docs.rs/regex) or the lower level - /// [`regex-automata` crate](https://docs.rs/regex-automata). - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::AhoCorasick; - /// - /// let patterns = &["FOO", "bAr", "BaZ"]; - /// let haystack = "foo bar baz"; - /// - /// let ac = AhoCorasick::builder() - /// .ascii_case_insensitive(true) - /// .build(patterns) - /// .unwrap(); - /// assert_eq!(3, ac.find_iter(haystack).count()); - /// ``` - pub fn ascii_case_insensitive( - &mut self, - yes: bool, - ) -> &mut AhoCorasickBuilder { - self.nfa_noncontiguous.ascii_case_insensitive(yes); - self.nfa_contiguous.ascii_case_insensitive(yes); - self.dfa.ascii_case_insensitive(yes); - self - } - - /// Choose the type of underlying automaton to use. - /// - /// Currently, there are four choices: - /// - /// * [`AhoCorasickKind::NoncontiguousNFA`] instructs the searcher to - /// use a [`noncontiguous::NFA`]. A noncontiguous NFA is the fastest to - /// be built, has moderate memory usage and is typically the slowest to - /// execute a search. - /// * [`AhoCorasickKind::ContiguousNFA`] instructs the searcher to use a - /// [`contiguous::NFA`]. A contiguous NFA is a little slower to build than - /// a noncontiguous NFA, has excellent memory usage and is typically a - /// little slower than a DFA for a search. - /// * [`AhoCorasickKind::DFA`] instructs the searcher to use a - /// [`dfa::DFA`]. A DFA is very slow to build, uses exorbitant amounts of - /// memory, but will typically execute searches the fastest. - /// * `None` (the default) instructs the searcher to choose the "best" - /// Aho-Corasick implementation. This choice is typically based primarily - /// on the number of patterns. - /// - /// Setting this configuration does not change the time complexity for - /// constructing the Aho-Corasick automaton (which is `O(p)` where `p` - /// is the total number of patterns being compiled). Setting this to - /// [`AhoCorasickKind::DFA`] does however reduce the time complexity of - /// non-overlapping searches from `O(n + p)` to `O(n)`, where `n` is the - /// length of the haystack. - /// - /// In general, you should probably stick to the default unless you have - /// some kind of reason to use a specific Aho-Corasick implementation. For - /// example, you might choose `AhoCorasickKind::DFA` if you don't care - /// about memory usage and want the fastest possible search times. - /// - /// Setting this guarantees that the searcher returned uses the chosen - /// implementation. If that implementation could not be constructed, then - /// an error will be returned. In contrast, when `None` is used, it is - /// possible for it to attempt to construct, for example, a contiguous - /// NFA and have it fail. In which case, it will fall back to using a - /// noncontiguous NFA. - /// - /// If `None` is given, then one may use [`AhoCorasick::kind`] to determine - /// which Aho-Corasick implementation was chosen. - /// - /// Note that the heuristics used for choosing which `AhoCorasickKind` - /// may be changed in a semver compatible release. - pub fn kind( - &mut self, - kind: Option<AhoCorasickKind>, - ) -> &mut AhoCorasickBuilder { - self.kind = kind; - self - } - - /// Enable heuristic prefilter optimizations. - /// - /// When enabled, searching will attempt to quickly skip to match - /// candidates using specialized literal search routines. A prefilter - /// cannot always be used, and is generally treated as a heuristic. It - /// can be useful to disable this if the prefilter is observed to be - /// sub-optimal for a particular workload. - /// - /// Currently, prefilters are typically only active when building searchers - /// with a small (less than 100) number of patterns. - /// - /// This is enabled by default. - pub fn prefilter(&mut self, yes: bool) -> &mut AhoCorasickBuilder { - self.nfa_noncontiguous.prefilter(yes); - self.nfa_contiguous.prefilter(yes); - self.dfa.prefilter(yes); - self - } - - /// Set the limit on how many states use a dense representation for their - /// transitions. Other states will generally use a sparse representation. - /// - /// A dense representation uses more memory but is generally faster, since - /// the next transition in a dense representation can be computed in a - /// constant number of instructions. A sparse representation uses less - /// memory but is generally slower, since the next transition in a sparse - /// representation requires executing a variable number of instructions. - /// - /// This setting is only used when an Aho-Corasick implementation is used - /// that supports the dense versus sparse representation trade off. Not all - /// do. - /// - /// This limit is expressed in terms of the depth of a state, i.e., the - /// number of transitions from the starting state of the automaton. The - /// idea is that most of the time searching will be spent near the starting - /// state of the automaton, so states near the start state should use a - /// dense representation. States further away from the start state would - /// then use a sparse representation. - /// - /// By default, this is set to a low but non-zero number. Setting this to - /// `0` is almost never what you want, since it is likely to make searches - /// very slow due to the start state itself being forced to use a sparse - /// representation. However, it is unlikely that increasing this number - /// will help things much, since the most active states have a small depth. - /// More to the point, the memory usage increases superlinearly as this - /// number increases. - pub fn dense_depth(&mut self, depth: usize) -> &mut AhoCorasickBuilder { - self.nfa_noncontiguous.dense_depth(depth); - self.nfa_contiguous.dense_depth(depth); - self - } - - /// A debug settting for whether to attempt to shrink the size of the - /// automaton's alphabet or not. - /// - /// This option is enabled by default and should never be disabled unless - /// one is debugging the underlying automaton. - /// - /// When enabled, some (but not all) Aho-Corasick automatons will use a map - /// from all possible bytes to their corresponding equivalence class. Each - /// equivalence class represents a set of bytes that does not discriminate - /// between a match and a non-match in the automaton. - /// - /// The advantage of this map is that the size of the transition table can - /// be reduced drastically from `#states * 256 * sizeof(u32)` to - /// `#states * k * sizeof(u32)` where `k` is the number of equivalence - /// classes (rounded up to the nearest power of 2). As a result, total - /// space usage can decrease substantially. Moreover, since a smaller - /// alphabet is used, automaton compilation becomes faster as well. - /// - /// **WARNING:** This is only useful for debugging automatons. Disabling - /// this does not yield any speed advantages. Namely, even when this is - /// disabled, a byte class map is still used while searching. The only - /// difference is that every byte will be forced into its own distinct - /// equivalence class. This is useful for debugging the actual generated - /// transitions because it lets one see the transitions defined on actual - /// bytes instead of the equivalence classes. - pub fn byte_classes(&mut self, yes: bool) -> &mut AhoCorasickBuilder { - self.nfa_contiguous.byte_classes(yes); - self.dfa.byte_classes(yes); - self - } -} - -/// The type of Aho-Corasick implementation to use in an [`AhoCorasick`] -/// searcher. -/// -/// This is principally used as an input to the -/// [`AhoCorasickBuilder::start_kind`] method. Its documentation goes into more -/// detail about each choice. -#[non_exhaustive] -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum AhoCorasickKind { - /// Use a noncontiguous NFA. - NoncontiguousNFA, - /// Use a contiguous NFA. - ContiguousNFA, - /// Use a DFA. Warning: DFAs typically use a large amount of memory. - DFA, -} - -/// A trait that effectively gives us practical dynamic dispatch over anything -/// that impls `Automaton`, but without needing to add a bunch of bounds to -/// the core `Automaton` trait. Basically, we provide all of the marker traits -/// that our automatons have, in addition to `Debug` impls and requiring that -/// there is no borrowed data. Without these, the main `AhoCorasick` type would -/// not be able to meaningfully impl `Debug` or the marker traits without also -/// requiring that all impls of `Automaton` do so, which would be not great. -trait AcAutomaton: - Automaton + Debug + Send + Sync + UnwindSafe + RefUnwindSafe + 'static -{ -} - -impl<A> AcAutomaton for A where - A: Automaton + Debug + Send + Sync + UnwindSafe + RefUnwindSafe + 'static -{ -} - -impl crate::automaton::private::Sealed for Arc<dyn AcAutomaton> {} - -// I'm not sure why this trait impl shows up in the docs, as the AcAutomaton -// trait is not exported. So we forcefully hide it. -// -// SAFETY: This just defers to the underlying 'AcAutomaton' and thus inherits -// its safety properties. -#[doc(hidden)] -unsafe impl Automaton for Arc<dyn AcAutomaton> { - #[inline(always)] - fn start_state(&self, anchored: Anchored) -> Result<StateID, MatchError> { - (**self).start_state(anchored) - } - - #[inline(always)] - fn next_state( - &self, - anchored: Anchored, - sid: StateID, - byte: u8, - ) -> StateID { - (**self).next_state(anchored, sid, byte) - } - - #[inline(always)] - fn is_special(&self, sid: StateID) -> bool { - (**self).is_special(sid) - } - - #[inline(always)] - fn is_dead(&self, sid: StateID) -> bool { - (**self).is_dead(sid) - } - - #[inline(always)] - fn is_match(&self, sid: StateID) -> bool { - (**self).is_match(sid) - } - - #[inline(always)] - fn is_start(&self, sid: StateID) -> bool { - (**self).is_start(sid) - } - - #[inline(always)] - fn match_kind(&self) -> MatchKind { - (**self).match_kind() - } - - #[inline(always)] - fn match_len(&self, sid: StateID) -> usize { - (**self).match_len(sid) - } - - #[inline(always)] - fn match_pattern(&self, sid: StateID, index: usize) -> PatternID { - (**self).match_pattern(sid, index) - } - - #[inline(always)] - fn patterns_len(&self) -> usize { - (**self).patterns_len() - } - - #[inline(always)] - fn pattern_len(&self, pid: PatternID) -> usize { - (**self).pattern_len(pid) - } - - #[inline(always)] - fn min_pattern_len(&self) -> usize { - (**self).min_pattern_len() - } - - #[inline(always)] - fn max_pattern_len(&self) -> usize { - (**self).max_pattern_len() - } - - #[inline(always)] - fn memory_usage(&self) -> usize { - (**self).memory_usage() - } - - #[inline(always)] - fn prefilter(&self) -> Option<&Prefilter> { - (**self).prefilter() - } - - // Even though 'try_find' and 'try_find_overlapping' each have their - // own default impls, we explicitly define them here to fix a perf bug. - // Without these explicit definitions, the default impl will wind up using - // dynamic dispatch for all 'Automaton' method calls, including things like - // 'next_state' that absolutely must get inlined or else perf is trashed. - // Defining them explicitly here like this still requires dynamic dispatch - // to call 'try_find' itself, but all uses of 'Automaton' within 'try_find' - // are monomorphized. - // - // We don't need to explicitly impl any other methods, I think, because - // they are all implemented themselves in terms of 'try_find' and - // 'try_find_overlapping'. We still might wind up with an extra virtual - // call here or there, but that's okay since it's outside of any perf - // critical areas. - - #[inline(always)] - fn try_find( - &self, - input: &Input<'_>, - ) -> Result<Option<Match>, MatchError> { - (**self).try_find(input) - } - - #[inline(always)] - fn try_find_overlapping( - &self, - input: &Input<'_>, - state: &mut OverlappingState, - ) -> Result<(), MatchError> { - (**self).try_find_overlapping(input, state) - } -} - -/// Returns an error if the start state configuration does not support the -/// desired search configuration. See the internal 'AhoCorasick::start_kind' -/// field docs for more details. -fn enforce_anchored_consistency( - have: StartKind, - want: Anchored, -) -> Result<(), MatchError> { - match have { - StartKind::Both => Ok(()), - StartKind::Unanchored if !want.is_anchored() => Ok(()), - StartKind::Unanchored => Err(MatchError::invalid_input_anchored()), - StartKind::Anchored if want.is_anchored() => Ok(()), - StartKind::Anchored => Err(MatchError::invalid_input_unanchored()), - } -} diff --git a/vendor/aho-corasick/src/automaton.rs b/vendor/aho-corasick/src/automaton.rs deleted file mode 100644 index c41dc6e1db305e..00000000000000 --- a/vendor/aho-corasick/src/automaton.rs +++ /dev/null @@ -1,1608 +0,0 @@ -/*! -Provides [`Automaton`] trait for abstracting over Aho-Corasick automata. - -The `Automaton` trait provides a way to write generic code over any -Aho-Corasick automaton. It also provides access to lower level APIs that -permit walking the state transitions of an Aho-Corasick automaton manually. -*/ - -use alloc::{string::String, vec::Vec}; - -use crate::util::{ - error::MatchError, - primitives::PatternID, - search::{Anchored, Input, Match, MatchKind, Span}, -}; - -pub use crate::util::{ - prefilter::{Candidate, Prefilter}, - primitives::{StateID, StateIDError}, -}; - -/// We seal the `Automaton` trait for now. It's a big trait, and it's -/// conceivable that I might want to add new required methods, and sealing the -/// trait permits doing that in a backwards compatible fashion. On other the -/// hand, if you have a solid use case for implementing the trait yourself, -/// please file an issue and we can discuss it. This was *mostly* done as a -/// conservative step. -pub(crate) mod private { - pub trait Sealed {} -} -impl private::Sealed for crate::nfa::noncontiguous::NFA {} -impl private::Sealed for crate::nfa::contiguous::NFA {} -impl private::Sealed for crate::dfa::DFA {} - -impl<'a, T: private::Sealed + ?Sized> private::Sealed for &'a T {} - -/// A trait that abstracts over Aho-Corasick automata. -/// -/// This trait primarily exists for niche use cases such as: -/// -/// * Using an NFA or DFA directly, bypassing the top-level -/// [`AhoCorasick`](crate::AhoCorasick) searcher. Currently, these include -/// [`noncontiguous::NFA`](crate::nfa::noncontiguous::NFA), -/// [`contiguous::NFA`](crate::nfa::contiguous::NFA) and -/// [`dfa::DFA`](crate::dfa::DFA). -/// * Implementing your own custom search routine by walking the automaton -/// yourself. This might be useful for implementing search on non-contiguous -/// strings or streams. -/// -/// For most use cases, it is not expected that users will need -/// to use or even know about this trait. Indeed, the top level -/// [`AhoCorasick`](crate::AhoCorasick) searcher does not expose any details -/// about this trait, nor does it implement it itself. -/// -/// Note that this trait defines a number of default methods, such as -/// [`Automaton::try_find`] and [`Automaton::try_find_iter`], which implement -/// higher level search routines in terms of the lower level automata API. -/// -/// # Sealed -/// -/// Currently, this trait is sealed. That means users of this crate can write -/// generic routines over this trait but cannot implement it themselves. This -/// restriction may be lifted in the future, but sealing the trait permits -/// adding new required methods in a backwards compatible fashion. -/// -/// # Special states -/// -/// This trait encodes a notion of "special" states in an automaton. Namely, -/// a state is treated as special if it is a dead, match or start state: -/// -/// * A dead state is a state that cannot be left once entered. All transitions -/// on a dead state lead back to itself. The dead state is meant to be treated -/// as a sentinel indicating that the search should stop and return a match if -/// one has been found, and nothing otherwise. -/// * A match state is a state that indicates one or more patterns have -/// matched. Depending on the [`MatchKind`] of the automaton, a search may -/// stop once a match is seen, or it may continue looking for matches until -/// it enters a dead state or sees the end of the haystack. -/// * A start state is a state that a search begins in. It is useful to know -/// when a search enters a start state because it may mean that a prefilter can -/// be used to skip ahead and quickly look for candidate matches. Unlike dead -/// and match states, it is never necessary to explicitly handle start states -/// for correctness. Indeed, in this crate, implementations of `Automaton` -/// will only treat start states as "special" when a prefilter is enabled and -/// active. Otherwise, treating it as special has no purpose and winds up -/// slowing down the overall search because it results in ping-ponging between -/// the main state transition and the "special" state logic. -/// -/// Since checking whether a state is special by doing three different -/// checks would be too expensive inside a fast search loop, the -/// [`Automaton::is_special`] method is provided for quickly checking whether -/// the state is special. The `Automaton::is_dead`, `Automaton::is_match` and -/// `Automaton::is_start` predicates can then be used to determine which kind -/// of special state it is. -/// -/// # Panics -/// -/// Most of the APIs on this trait should panic or give incorrect results -/// if invalid inputs are given to it. For example, `Automaton::next_state` -/// has unspecified behavior if the state ID given to it is not a valid -/// state ID for the underlying automaton. Valid state IDs can only be -/// retrieved in one of two ways: calling `Automaton::start_state` or calling -/// `Automaton::next_state` with a valid state ID. -/// -/// # Safety -/// -/// This trait is not safe to implement so that code may rely on the -/// correctness of implementations of this trait to avoid undefined behavior. -/// The primary correctness guarantees are: -/// -/// * `Automaton::start_state` always returns a valid state ID or an error or -/// panics. -/// * `Automaton::next_state`, when given a valid state ID, always returns -/// a valid state ID for all values of `anchored` and `byte`, or otherwise -/// panics. -/// -/// In general, the rest of the methods on `Automaton` need to uphold their -/// contracts as well. For example, `Automaton::is_dead` should only returns -/// true if the given state ID is actually a dead state. -/// -/// Note that currently this crate does not rely on the safety property defined -/// here to avoid undefined behavior. Instead, this was done to make it -/// _possible_ to do in the future. -/// -/// # Example -/// -/// This example shows how one might implement a basic but correct search -/// routine. We keep things simple by not using prefilters or worrying about -/// anchored searches, but do make sure our search is correct for all possible -/// [`MatchKind`] semantics. (The comments in the code below note the parts -/// that are needed to support certain `MatchKind` semantics.) -/// -/// ``` -/// use aho_corasick::{ -/// automaton::Automaton, -/// nfa::noncontiguous::NFA, -/// Anchored, Match, MatchError, MatchKind, -/// }; -/// -/// // Run an unanchored search for 'aut' in 'haystack'. Return the first match -/// // seen according to the automaton's match semantics. This returns an error -/// // if the given automaton does not support unanchored searches. -/// fn find<A: Automaton>( -/// aut: A, -/// haystack: &[u8], -/// ) -> Result<Option<Match>, MatchError> { -/// let mut sid = aut.start_state(Anchored::No)?; -/// let mut at = 0; -/// let mut mat = None; -/// let get_match = |sid, at| { -/// let pid = aut.match_pattern(sid, 0); -/// let len = aut.pattern_len(pid); -/// Match::new(pid, (at - len)..at) -/// }; -/// // Start states can be match states! -/// if aut.is_match(sid) { -/// mat = Some(get_match(sid, at)); -/// // Standard semantics require matches to be reported as soon as -/// // they're seen. Otherwise, we continue until we see a dead state -/// // or the end of the haystack. -/// if matches!(aut.match_kind(), MatchKind::Standard) { -/// return Ok(mat); -/// } -/// } -/// while at < haystack.len() { -/// sid = aut.next_state(Anchored::No, sid, haystack[at]); -/// if aut.is_special(sid) { -/// if aut.is_dead(sid) { -/// return Ok(mat); -/// } else if aut.is_match(sid) { -/// mat = Some(get_match(sid, at + 1)); -/// // As above, standard semantics require that we return -/// // immediately once a match is found. -/// if matches!(aut.match_kind(), MatchKind::Standard) { -/// return Ok(mat); -/// } -/// } -/// } -/// at += 1; -/// } -/// Ok(mat) -/// } -/// -/// // Show that it works for standard searches. -/// let nfa = NFA::new(&["samwise", "sam"]).unwrap(); -/// assert_eq!(Some(Match::must(1, 0..3)), find(&nfa, b"samwise")?); -/// -/// // But also works when using leftmost-first. Notice how the match result -/// // has changed! -/// let nfa = NFA::builder() -/// .match_kind(MatchKind::LeftmostFirst) -/// .build(&["samwise", "sam"]) -/// .unwrap(); -/// assert_eq!(Some(Match::must(0, 0..7)), find(&nfa, b"samwise")?); -/// -/// # Ok::<(), Box<dyn std::error::Error>>(()) -/// ``` -pub unsafe trait Automaton: private::Sealed { - /// Returns the starting state for the given anchor mode. - /// - /// Upon success, the state ID returned is guaranteed to be valid for - /// this automaton. - /// - /// # Errors - /// - /// This returns an error when the given search configuration is not - /// supported by the underlying automaton. For example, if the underlying - /// automaton only supports unanchored searches but the given configuration - /// was set to an anchored search, then this must return an error. - fn start_state(&self, anchored: Anchored) -> Result<StateID, MatchError>; - - /// Performs a state transition from `sid` for `byte` and returns the next - /// state. - /// - /// `anchored` should be [`Anchored::Yes`] when executing an anchored - /// search and [`Anchored::No`] otherwise. For some implementations of - /// `Automaton`, it is required to know whether the search is anchored - /// or not in order to avoid following failure transitions. Other - /// implementations may ignore `anchored` altogether and depend on - /// `Automaton::start_state` returning a state that walks a different path - /// through the automaton depending on whether the search is anchored or - /// not. - /// - /// # Panics - /// - /// This routine may panic or return incorrect results when the given state - /// ID is invalid. A state ID is valid if and only if: - /// - /// 1. It came from a call to `Automaton::start_state`, or - /// 2. It came from a previous call to `Automaton::next_state` with a - /// valid state ID. - /// - /// Implementations must treat all possible values of `byte` as valid. - /// - /// Implementations may panic on unsupported values of `anchored`, but are - /// not required to do so. - fn next_state( - &self, - anchored: Anchored, - sid: StateID, - byte: u8, - ) -> StateID; - - /// Returns true if the given ID represents a "special" state. A special - /// state is a dead, match or start state. - /// - /// Note that implementations may choose to return false when the given ID - /// corresponds to a start state. Namely, it always correct to treat start - /// states as non-special. Implementations must return true for states that - /// are dead or contain matches. - /// - /// This has unspecified behavior when given an invalid state ID. - fn is_special(&self, sid: StateID) -> bool; - - /// Returns true if the given ID represents a dead state. - /// - /// A dead state is a type of "sink" in a finite state machine. It - /// corresponds to a state whose transitions all loop back to itself. That - /// is, once entered, it can never be left. In practice, it serves as a - /// sentinel indicating that the search should terminate. - /// - /// This has unspecified behavior when given an invalid state ID. - fn is_dead(&self, sid: StateID) -> bool; - - /// Returns true if the given ID represents a match state. - /// - /// A match state is always associated with one or more pattern IDs that - /// matched at the position in the haystack when the match state was - /// entered. When a match state is entered, the match semantics dictate - /// whether it should be returned immediately (for `MatchKind::Standard`) - /// or if the search should continue (for `MatchKind::LeftmostFirst` and - /// `MatchKind::LeftmostLongest`) until a dead state is seen or the end of - /// the haystack has been reached. - /// - /// This has unspecified behavior when given an invalid state ID. - fn is_match(&self, sid: StateID) -> bool; - - /// Returns true if the given ID represents a start state. - /// - /// While it is never incorrect to ignore start states during a search - /// (except for the start of the search of course), knowing whether one has - /// entered a start state can be useful for certain classes of performance - /// optimizations. For example, if one is in a start state, it may be legal - /// to try to skip ahead and look for match candidates more quickly than - /// would otherwise be accomplished by walking the automaton. - /// - /// Implementations of `Automaton` in this crate "unspecialize" start - /// states when a prefilter is not active or enabled. In this case, it - /// is possible for `Automaton::is_special(sid)` to return false while - /// `Automaton::is_start(sid)` returns true. - /// - /// This has unspecified behavior when given an invalid state ID. - fn is_start(&self, sid: StateID) -> bool; - - /// Returns the match semantics that this automaton was built with. - fn match_kind(&self) -> MatchKind; - - /// Returns the total number of matches for the given state ID. - /// - /// This has unspecified behavior if the given ID does not refer to a match - /// state. - fn match_len(&self, sid: StateID) -> usize; - - /// Returns the pattern ID for the match state given by `sid` at the - /// `index` given. - /// - /// Typically, `index` is only ever greater than `0` when implementing an - /// overlapping search. Otherwise, it's likely that your search only cares - /// about reporting the first pattern ID in a match state. - /// - /// This has unspecified behavior if the given ID does not refer to a match - /// state, or if the index is greater than or equal to the total number of - /// matches in this match state. - fn match_pattern(&self, sid: StateID, index: usize) -> PatternID; - - /// Returns the total number of patterns compiled into this automaton. - fn patterns_len(&self) -> usize; - - /// Returns the length of the pattern for the given ID. - /// - /// This has unspecified behavior when given an invalid pattern - /// ID. A pattern ID is valid if and only if it is less than - /// `Automaton::patterns_len`. - fn pattern_len(&self, pid: PatternID) -> usize; - - /// Returns the length, in bytes, of the shortest pattern in this - /// automaton. - fn min_pattern_len(&self) -> usize; - - /// Returns the length, in bytes, of the longest pattern in this automaton. - fn max_pattern_len(&self) -> usize; - - /// Returns the heap memory usage, in bytes, used by this automaton. - fn memory_usage(&self) -> usize; - - /// Returns a prefilter, if available, that can be used to accelerate - /// searches for this automaton. - /// - /// The typical way this is used is when the start state is entered during - /// a search. When that happens, one can use a prefilter to skip ahead and - /// look for candidate matches without having to walk the automaton on the - /// bytes between candidates. - /// - /// Typically a prefilter is only available when there are a small (<100) - /// number of patterns built into the automaton. - fn prefilter(&self) -> Option<&Prefilter>; - - /// Executes a non-overlapping search with this automaton using the given - /// configuration. - /// - /// See - /// [`AhoCorasick::try_find`](crate::AhoCorasick::try_find) - /// for more documentation and examples. - fn try_find( - &self, - input: &Input<'_>, - ) -> Result<Option<Match>, MatchError> { - try_find_fwd(&self, input) - } - - /// Executes a overlapping search with this automaton using the given - /// configuration. - /// - /// See - /// [`AhoCorasick::try_find_overlapping`](crate::AhoCorasick::try_find_overlapping) - /// for more documentation and examples. - fn try_find_overlapping( - &self, - input: &Input<'_>, - state: &mut OverlappingState, - ) -> Result<(), MatchError> { - try_find_overlapping_fwd(&self, input, state) - } - - /// Returns an iterator of non-overlapping matches with this automaton - /// using the given configuration. - /// - /// See - /// [`AhoCorasick::try_find_iter`](crate::AhoCorasick::try_find_iter) - /// for more documentation and examples. - fn try_find_iter<'a, 'h>( - &'a self, - input: Input<'h>, - ) -> Result<FindIter<'a, 'h, Self>, MatchError> - where - Self: Sized, - { - FindIter::new(self, input) - } - - /// Returns an iterator of overlapping matches with this automaton - /// using the given configuration. - /// - /// See - /// [`AhoCorasick::try_find_overlapping_iter`](crate::AhoCorasick::try_find_overlapping_iter) - /// for more documentation and examples. - fn try_find_overlapping_iter<'a, 'h>( - &'a self, - input: Input<'h>, - ) -> Result<FindOverlappingIter<'a, 'h, Self>, MatchError> - where - Self: Sized, - { - if !self.match_kind().is_standard() { - return Err(MatchError::unsupported_overlapping( - self.match_kind(), - )); - } - // We might consider lifting this restriction. The reason why I added - // it was to ban the combination of "anchored search" and "overlapping - // iteration." The match semantics aren't totally clear in that case. - // Should we allow *any* matches that are adjacent to *any* previous - // match? Or only following the most recent one? Or only matches - // that start at the beginning of the search? We might also elect to - // just keep this restriction in place, as callers should be able to - // implement it themselves if they want to. - if input.get_anchored().is_anchored() { - return Err(MatchError::invalid_input_anchored()); - } - let _ = self.start_state(input.get_anchored())?; - let state = OverlappingState::start(); - Ok(FindOverlappingIter { aut: self, input, state }) - } - - /// Replaces all non-overlapping matches in `haystack` with - /// strings from `replace_with` depending on the pattern that - /// matched. The `replace_with` slice must have length equal to - /// `Automaton::patterns_len`. - /// - /// See - /// [`AhoCorasick::try_replace_all`](crate::AhoCorasick::try_replace_all) - /// for more documentation and examples. - fn try_replace_all<B>( - &self, - haystack: &str, - replace_with: &[B], - ) -> Result<String, MatchError> - where - Self: Sized, - B: AsRef<str>, - { - assert_eq!( - replace_with.len(), - self.patterns_len(), - "replace_all requires a replacement for every pattern \ - in the automaton" - ); - let mut dst = String::with_capacity(haystack.len()); - self.try_replace_all_with(haystack, &mut dst, |mat, _, dst| { - dst.push_str(replace_with[mat.pattern()].as_ref()); - true - })?; - Ok(dst) - } - - /// Replaces all non-overlapping matches in `haystack` with - /// strings from `replace_with` depending on the pattern that - /// matched. The `replace_with` slice must have length equal to - /// `Automaton::patterns_len`. - /// - /// See - /// [`AhoCorasick::try_replace_all_bytes`](crate::AhoCorasick::try_replace_all_bytes) - /// for more documentation and examples. - fn try_replace_all_bytes<B>( - &self, - haystack: &[u8], - replace_with: &[B], - ) -> Result<Vec<u8>, MatchError> - where - Self: Sized, - B: AsRef<[u8]>, - { - assert_eq!( - replace_with.len(), - self.patterns_len(), - "replace_all requires a replacement for every pattern \ - in the automaton" - ); - let mut dst = Vec::with_capacity(haystack.len()); - self.try_replace_all_with_bytes(haystack, &mut dst, |mat, _, dst| { - dst.extend(replace_with[mat.pattern()].as_ref()); - true - })?; - Ok(dst) - } - - /// Replaces all non-overlapping matches in `haystack` by calling the - /// `replace_with` closure given. - /// - /// See - /// [`AhoCorasick::try_replace_all_with`](crate::AhoCorasick::try_replace_all_with) - /// for more documentation and examples. - fn try_replace_all_with<F>( - &self, - haystack: &str, - dst: &mut String, - mut replace_with: F, - ) -> Result<(), MatchError> - where - Self: Sized, - F: FnMut(&Match, &str, &mut String) -> bool, - { - let mut last_match = 0; - for m in self.try_find_iter(Input::new(haystack))? { - // Since there are no restrictions on what kinds of patterns are - // in an Aho-Corasick automaton, we might get matches that split - // a codepoint, or even matches of a partial codepoint. When that - // happens, we just skip the match. - if !haystack.is_char_boundary(m.start()) - || !haystack.is_char_boundary(m.end()) - { - continue; - } - dst.push_str(&haystack[last_match..m.start()]); - last_match = m.end(); - if !replace_with(&m, &haystack[m.start()..m.end()], dst) { - break; - }; - } - dst.push_str(&haystack[last_match..]); - Ok(()) - } - - /// Replaces all non-overlapping matches in `haystack` by calling the - /// `replace_with` closure given. - /// - /// See - /// [`AhoCorasick::try_replace_all_with_bytes`](crate::AhoCorasick::try_replace_all_with_bytes) - /// for more documentation and examples. - fn try_replace_all_with_bytes<F>( - &self, - haystack: &[u8], - dst: &mut Vec<u8>, - mut replace_with: F, - ) -> Result<(), MatchError> - where - Self: Sized, - F: FnMut(&Match, &[u8], &mut Vec<u8>) -> bool, - { - let mut last_match = 0; - for m in self.try_find_iter(Input::new(haystack))? { - dst.extend(&haystack[last_match..m.start()]); - last_match = m.end(); - if !replace_with(&m, &haystack[m.start()..m.end()], dst) { - break; - }; - } - dst.extend(&haystack[last_match..]); - Ok(()) - } - - /// Returns an iterator of non-overlapping matches with this automaton - /// from the stream given. - /// - /// See - /// [`AhoCorasick::try_stream_find_iter`](crate::AhoCorasick::try_stream_find_iter) - /// for more documentation and examples. - #[cfg(feature = "std")] - fn try_stream_find_iter<'a, R: std::io::Read>( - &'a self, - rdr: R, - ) -> Result<StreamFindIter<'a, Self, R>, MatchError> - where - Self: Sized, - { - Ok(StreamFindIter { it: StreamChunkIter::new(self, rdr)? }) - } - - /// Replaces all non-overlapping matches in `rdr` with strings from - /// `replace_with` depending on the pattern that matched, and writes the - /// result to `wtr`. The `replace_with` slice must have length equal to - /// `Automaton::patterns_len`. - /// - /// See - /// [`AhoCorasick::try_stream_replace_all`](crate::AhoCorasick::try_stream_replace_all) - /// for more documentation and examples. - #[cfg(feature = "std")] - fn try_stream_replace_all<R, W, B>( - &self, - rdr: R, - wtr: W, - replace_with: &[B], - ) -> std::io::Result<()> - where - Self: Sized, - R: std::io::Read, - W: std::io::Write, - B: AsRef<[u8]>, - { - assert_eq!( - replace_with.len(), - self.patterns_len(), - "streaming replace_all requires a replacement for every pattern \ - in the automaton", - ); - self.try_stream_replace_all_with(rdr, wtr, |mat, _, wtr| { - wtr.write_all(replace_with[mat.pattern()].as_ref()) - }) - } - - /// Replaces all non-overlapping matches in `rdr` by calling the - /// `replace_with` closure given and writing the result to `wtr`. - /// - /// See - /// [`AhoCorasick::try_stream_replace_all_with`](crate::AhoCorasick::try_stream_replace_all_with) - /// for more documentation and examples. - #[cfg(feature = "std")] - fn try_stream_replace_all_with<R, W, F>( - &self, - rdr: R, - mut wtr: W, - mut replace_with: F, - ) -> std::io::Result<()> - where - Self: Sized, - R: std::io::Read, - W: std::io::Write, - F: FnMut(&Match, &[u8], &mut W) -> std::io::Result<()>, - { - let mut it = StreamChunkIter::new(self, rdr).map_err(|e| { - let kind = std::io::ErrorKind::Other; - std::io::Error::new(kind, e) - })?; - while let Some(result) = it.next() { - let chunk = result?; - match chunk { - StreamChunk::NonMatch { bytes, .. } => { - wtr.write_all(bytes)?; - } - StreamChunk::Match { bytes, mat } => { - replace_with(&mat, bytes, &mut wtr)?; - } - } - } - Ok(()) - } -} - -// SAFETY: This just defers to the underlying 'AcAutomaton' and thus inherits -// its safety properties. -unsafe impl<'a, A: Automaton + ?Sized> Automaton for &'a A { - #[inline(always)] - fn start_state(&self, anchored: Anchored) -> Result<StateID, MatchError> { - (**self).start_state(anchored) - } - - #[inline(always)] - fn next_state( - &self, - anchored: Anchored, - sid: StateID, - byte: u8, - ) -> StateID { - (**self).next_state(anchored, sid, byte) - } - - #[inline(always)] - fn is_special(&self, sid: StateID) -> bool { - (**self).is_special(sid) - } - - #[inline(always)] - fn is_dead(&self, sid: StateID) -> bool { - (**self).is_dead(sid) - } - - #[inline(always)] - fn is_match(&self, sid: StateID) -> bool { - (**self).is_match(sid) - } - - #[inline(always)] - fn is_start(&self, sid: StateID) -> bool { - (**self).is_start(sid) - } - - #[inline(always)] - fn match_kind(&self) -> MatchKind { - (**self).match_kind() - } - - #[inline(always)] - fn match_len(&self, sid: StateID) -> usize { - (**self).match_len(sid) - } - - #[inline(always)] - fn match_pattern(&self, sid: StateID, index: usize) -> PatternID { - (**self).match_pattern(sid, index) - } - - #[inline(always)] - fn patterns_len(&self) -> usize { - (**self).patterns_len() - } - - #[inline(always)] - fn pattern_len(&self, pid: PatternID) -> usize { - (**self).pattern_len(pid) - } - - #[inline(always)] - fn min_pattern_len(&self) -> usize { - (**self).min_pattern_len() - } - - #[inline(always)] - fn max_pattern_len(&self) -> usize { - (**self).max_pattern_len() - } - - #[inline(always)] - fn memory_usage(&self) -> usize { - (**self).memory_usage() - } - - #[inline(always)] - fn prefilter(&self) -> Option<&Prefilter> { - (**self).prefilter() - } -} - -/// Represents the current state of an overlapping search. -/// -/// This is used for overlapping searches since they need to know something -/// about the previous search. For example, when multiple patterns match at the -/// same position, this state tracks the last reported pattern so that the next -/// search knows whether to report another matching pattern or continue with -/// the search at the next position. Additionally, it also tracks which state -/// the last search call terminated in and the current offset of the search -/// in the haystack. -/// -/// This type provides limited introspection capabilities. The only thing a -/// caller can do is construct it and pass it around to permit search routines -/// to use it to track state, and to ask whether a match has been found. -/// -/// Callers should always provide a fresh state constructed via -/// [`OverlappingState::start`] when starting a new search. That same state -/// should be reused for subsequent searches on the same `Input`. The state -/// given will advance through the haystack itself. Callers can detect the end -/// of a search when neither an error nor a match is returned. -/// -/// # Example -/// -/// This example shows how to manually iterate over all overlapping matches. If -/// you need this, you might consider using -/// [`AhoCorasick::find_overlapping_iter`](crate::AhoCorasick::find_overlapping_iter) -/// instead, but this shows how to correctly use an `OverlappingState`. -/// -/// ``` -/// use aho_corasick::{ -/// automaton::OverlappingState, -/// AhoCorasick, Input, Match, -/// }; -/// -/// let patterns = &["append", "appendage", "app"]; -/// let haystack = "append the app to the appendage"; -/// -/// let ac = AhoCorasick::new(patterns).unwrap(); -/// let mut state = OverlappingState::start(); -/// let mut matches = vec![]; -/// -/// loop { -/// ac.find_overlapping(haystack, &mut state); -/// let mat = match state.get_match() { -/// None => break, -/// Some(mat) => mat, -/// }; -/// matches.push(mat); -/// } -/// let expected = vec![ -/// Match::must(2, 0..3), -/// Match::must(0, 0..6), -/// Match::must(2, 11..14), -/// Match::must(2, 22..25), -/// Match::must(0, 22..28), -/// Match::must(1, 22..31), -/// ]; -/// assert_eq!(expected, matches); -/// ``` -#[derive(Clone, Debug)] -pub struct OverlappingState { - /// The match reported by the most recent overlapping search to use this - /// state. - /// - /// If a search does not find any matches, then it is expected to clear - /// this value. - mat: Option<Match>, - /// The state ID of the state at which the search was in when the call - /// terminated. When this is a match state, `last_match` must be set to a - /// non-None value. - /// - /// A `None` value indicates the start state of the corresponding - /// automaton. We cannot use the actual ID, since any one automaton may - /// have many start states, and which one is in use depends on search-time - /// factors (such as whether the search is anchored or not). - id: Option<StateID>, - /// The position of the search. - /// - /// When `id` is None (i.e., we are starting a search), this is set to - /// the beginning of the search as given by the caller regardless of its - /// current value. Subsequent calls to an overlapping search pick up at - /// this offset. - at: usize, - /// The index into the matching patterns of the next match to report if the - /// current state is a match state. Note that this may be 1 greater than - /// the total number of matches to report for the current match state. (In - /// which case, no more matches should be reported at the current position - /// and the search should advance to the next position.) - next_match_index: Option<usize>, -} - -impl OverlappingState { - /// Create a new overlapping state that begins at the start state. - pub fn start() -> OverlappingState { - OverlappingState { mat: None, id: None, at: 0, next_match_index: None } - } - - /// Return the match result of the most recent search to execute with this - /// state. - /// - /// Every search will clear this result automatically, such that if no - /// match is found, this will always correctly report `None`. - pub fn get_match(&self) -> Option<Match> { - self.mat - } -} - -/// An iterator of non-overlapping matches in a particular haystack. -/// -/// This iterator yields matches according to the [`MatchKind`] used by this -/// automaton. -/// -/// This iterator is constructed via the [`Automaton::try_find_iter`] method. -/// -/// The type variable `A` refers to the implementation of the [`Automaton`] -/// trait used to execute the search. -/// -/// The lifetime `'a` refers to the lifetime of the [`Automaton`] -/// implementation. -/// -/// The lifetime `'h` refers to the lifetime of the haystack being searched. -#[derive(Debug)] -pub struct FindIter<'a, 'h, A> { - /// The automaton used to drive the search. - aut: &'a A, - /// The input parameters to give to each search call. - /// - /// The start position of the search is mutated during iteration. - input: Input<'h>, - /// Records the end offset of the most recent match. This is necessary to - /// handle a corner case for preventing empty matches from overlapping with - /// the ending bounds of a prior match. - last_match_end: Option<usize>, -} - -impl<'a, 'h, A: Automaton> FindIter<'a, 'h, A> { - /// Creates a new non-overlapping iterator. If the given automaton would - /// return an error on a search with the given input configuration, then - /// that error is returned here. - fn new( - aut: &'a A, - input: Input<'h>, - ) -> Result<FindIter<'a, 'h, A>, MatchError> { - // The only way this search can fail is if we cannot retrieve the start - // state. e.g., Asking for an anchored search when only unanchored - // searches are supported. - let _ = aut.start_state(input.get_anchored())?; - Ok(FindIter { aut, input, last_match_end: None }) - } - - /// Executes a search and returns a match if one is found. - /// - /// This does not advance the input forward. It just executes a search - /// based on the current configuration/offsets. - fn search(&self) -> Option<Match> { - // The unwrap is OK here because we check at iterator construction time - // that no subsequent search call (using the same configuration) will - // ever return an error. - self.aut - .try_find(&self.input) - .expect("already checked that no match error can occur") - } - - /// Handles the special case of an empty match by ensuring that 1) the - /// iterator always advances and 2) empty matches never overlap with other - /// matches. - /// - /// (1) is necessary because we principally make progress by setting the - /// starting location of the next search to the ending location of the last - /// match. But if a match is empty, then this results in a search that does - /// not advance and thus does not terminate. - /// - /// (2) is not strictly necessary, but makes intuitive sense and matches - /// the presiding behavior of most general purpose regex engines. - /// (Obviously this crate isn't a regex engine, but we choose to match - /// their semantics.) The "intuitive sense" here is that we want to report - /// NON-overlapping matches. So for example, given the patterns 'a' and - /// '' (an empty string) against the haystack 'a', without the special - /// handling, you'd get the matches [0, 1) and [1, 1), where the latter - /// overlaps with the end bounds of the former. - /// - /// Note that we mark this cold and forcefully prevent inlining because - /// handling empty matches like this is extremely rare and does require - /// quite a bit of code, comparatively. Keeping this code out of the main - /// iterator function keeps it smaller and more amenable to inlining - /// itself. - #[cold] - #[inline(never)] - fn handle_overlapping_empty_match( - &mut self, - mut m: Match, - ) -> Option<Match> { - assert!(m.is_empty()); - if Some(m.end()) == self.last_match_end { - self.input.set_start(self.input.start().checked_add(1).unwrap()); - m = self.search()?; - } - Some(m) - } -} - -impl<'a, 'h, A: Automaton> Iterator for FindIter<'a, 'h, A> { - type Item = Match; - - #[inline(always)] - fn next(&mut self) -> Option<Match> { - let mut m = self.search()?; - if m.is_empty() { - m = self.handle_overlapping_empty_match(m)?; - } - self.input.set_start(m.end()); - self.last_match_end = Some(m.end()); - Some(m) - } -} - -/// An iterator of overlapping matches in a particular haystack. -/// -/// This iterator will report all possible matches in a particular haystack, -/// even when the matches overlap. -/// -/// This iterator is constructed via the -/// [`Automaton::try_find_overlapping_iter`] method. -/// -/// The type variable `A` refers to the implementation of the [`Automaton`] -/// trait used to execute the search. -/// -/// The lifetime `'a` refers to the lifetime of the [`Automaton`] -/// implementation. -/// -/// The lifetime `'h` refers to the lifetime of the haystack being searched. -#[derive(Debug)] -pub struct FindOverlappingIter<'a, 'h, A> { - aut: &'a A, - input: Input<'h>, - state: OverlappingState, -} - -impl<'a, 'h, A: Automaton> Iterator for FindOverlappingIter<'a, 'h, A> { - type Item = Match; - - #[inline(always)] - fn next(&mut self) -> Option<Match> { - self.aut - .try_find_overlapping(&self.input, &mut self.state) - .expect("already checked that no match error can occur here"); - self.state.get_match() - } -} - -/// An iterator that reports matches in a stream. -/// -/// This iterator yields elements of type `io::Result<Match>`, where an error -/// is reported if there was a problem reading from the underlying stream. -/// The iterator terminates only when the underlying stream reaches `EOF`. -/// -/// This iterator is constructed via the [`Automaton::try_stream_find_iter`] -/// method. -/// -/// The type variable `A` refers to the implementation of the [`Automaton`] -/// trait used to execute the search. -/// -/// The type variable `R` refers to the `io::Read` stream that is being read -/// from. -/// -/// The lifetime `'a` refers to the lifetime of the [`Automaton`] -/// implementation. -#[cfg(feature = "std")] -#[derive(Debug)] -pub struct StreamFindIter<'a, A, R> { - it: StreamChunkIter<'a, A, R>, -} - -#[cfg(feature = "std")] -impl<'a, A: Automaton, R: std::io::Read> Iterator - for StreamFindIter<'a, A, R> -{ - type Item = std::io::Result<Match>; - - fn next(&mut self) -> Option<std::io::Result<Match>> { - loop { - match self.it.next() { - None => return None, - Some(Err(err)) => return Some(Err(err)), - Some(Ok(StreamChunk::NonMatch { .. })) => {} - Some(Ok(StreamChunk::Match { mat, .. })) => { - return Some(Ok(mat)); - } - } - } - } -} - -/// An iterator that reports matches in a stream. -/// -/// (This doesn't actually implement the `Iterator` trait because it returns -/// something with a lifetime attached to a buffer it owns, but that's OK. It -/// still has a `next` method and is iterator-like enough to be fine.) -/// -/// This iterator yields elements of type `io::Result<StreamChunk>`, where -/// an error is reported if there was a problem reading from the underlying -/// stream. The iterator terminates only when the underlying stream reaches -/// `EOF`. -/// -/// The idea here is that each chunk represents either a match or a non-match, -/// and if you concatenated all of the chunks together, you'd reproduce the -/// entire contents of the stream, byte-for-byte. -/// -/// This chunk machinery is a bit complicated and it isn't strictly required -/// for a stream searcher that just reports matches. But we do need something -/// like this to deal with the "replacement" API, which needs to know which -/// chunks it can copy and which it needs to replace. -#[cfg(feature = "std")] -#[derive(Debug)] -struct StreamChunkIter<'a, A, R> { - /// The underlying automaton to do the search. - aut: &'a A, - /// The source of bytes we read from. - rdr: R, - /// A roll buffer for managing bytes from `rdr`. Basically, this is used - /// to handle the case of a match that is split by two different - /// calls to `rdr.read()`. This isn't strictly needed if all we needed to - /// do was report matches, but here we are reporting chunks of non-matches - /// and matches and in order to do that, we really just cannot treat our - /// stream as non-overlapping blocks of bytes. We need to permit some - /// overlap while we retain bytes from a previous `read` call in memory. - buf: crate::util::buffer::Buffer, - /// The unanchored starting state of this automaton. - start: StateID, - /// The state of the automaton. - sid: StateID, - /// The absolute position over the entire stream. - absolute_pos: usize, - /// The position we're currently at within `buf`. - buffer_pos: usize, - /// The buffer position of the end of the bytes that we last returned - /// to the caller. Basically, whenever we find a match, we look to see if - /// there is a difference between where the match started and the position - /// of the last byte we returned to the caller. If there's a difference, - /// then we need to return a 'NonMatch' chunk. - buffer_reported_pos: usize, -} - -#[cfg(feature = "std")] -impl<'a, A: Automaton, R: std::io::Read> StreamChunkIter<'a, A, R> { - fn new( - aut: &'a A, - rdr: R, - ) -> Result<StreamChunkIter<'a, A, R>, MatchError> { - // This restriction is a carry-over from older versions of this crate. - // I didn't have the bandwidth to think through how to handle, say, - // leftmost-first or leftmost-longest matching, but... it should be - // possible? The main problem is that once you see a match state in - // leftmost-first semantics, you can't just stop at that point and - // report a match. You have to keep going until you either hit a dead - // state or EOF. So how do you know when you'll hit a dead state? Well, - // you don't. With Aho-Corasick, I believe you can put a bound on it - // and say, "once a match has been seen, you'll need to scan forward at - // most N bytes" where N=aut.max_pattern_len(). - // - // Which is fine, but it does mean that state about whether we're still - // looking for a dead state or not needs to persist across buffer - // refills. Which this code doesn't really handle. It does preserve - // *some* state across buffer refills, basically ensuring that a match - // span is always in memory. - if !aut.match_kind().is_standard() { - return Err(MatchError::unsupported_stream(aut.match_kind())); - } - // This is kind of a cop-out, but empty matches are SUPER annoying. - // If we know they can't happen (which is what we enforce here), then - // it makes a lot of logic much simpler. With that said, I'm open to - // supporting this case, but we need to define proper semantics for it - // first. It wasn't totally clear to me what it should do at the time - // of writing, so I decided to just be conservative. - // - // It also seems like a very weird case to support anyway. Why search a - // stream if you're just going to get a match at every position? - // - // ¯\_(ツ)_/¯ - if aut.min_pattern_len() == 0 { - return Err(MatchError::unsupported_empty()); - } - let start = aut.start_state(Anchored::No)?; - Ok(StreamChunkIter { - aut, - rdr, - buf: crate::util::buffer::Buffer::new(aut.max_pattern_len()), - start, - sid: start, - absolute_pos: 0, - buffer_pos: 0, - buffer_reported_pos: 0, - }) - } - - fn next(&mut self) -> Option<std::io::Result<StreamChunk>> { - // This code is pretty gnarly. It IS simpler than the equivalent code - // in the previous aho-corasick release, in part because we inline - // automaton traversal here and also in part because we have abdicated - // support for automatons that contain an empty pattern. - // - // I suspect this code could be made a bit simpler by designing a - // better buffer abstraction. - // - // But in general, this code is basically write-only. So you'll need - // to go through it step-by-step to grok it. One of the key bits of - // complexity is tracking a few different offsets. 'buffer_pos' is - // where we are in the buffer for search. 'buffer_reported_pos' is the - // position immediately following the last byte in the buffer that - // we've returned to the caller. And 'absolute_pos' is the overall - // current absolute position of the search in the entire stream, and - // this is what match spans are reported in terms of. - loop { - if self.aut.is_match(self.sid) { - let mat = self.get_match(); - if let Some(r) = self.get_non_match_chunk(mat) { - self.buffer_reported_pos += r.len(); - let bytes = &self.buf.buffer()[r]; - return Some(Ok(StreamChunk::NonMatch { bytes })); - } - self.sid = self.start; - let r = self.get_match_chunk(mat); - self.buffer_reported_pos += r.len(); - let bytes = &self.buf.buffer()[r]; - return Some(Ok(StreamChunk::Match { bytes, mat })); - } - if self.buffer_pos >= self.buf.buffer().len() { - if let Some(r) = self.get_pre_roll_non_match_chunk() { - self.buffer_reported_pos += r.len(); - let bytes = &self.buf.buffer()[r]; - return Some(Ok(StreamChunk::NonMatch { bytes })); - } - if self.buf.buffer().len() >= self.buf.min_buffer_len() { - self.buffer_pos = self.buf.min_buffer_len(); - self.buffer_reported_pos -= - self.buf.buffer().len() - self.buf.min_buffer_len(); - self.buf.roll(); - } - match self.buf.fill(&mut self.rdr) { - Err(err) => return Some(Err(err)), - Ok(true) => {} - Ok(false) => { - // We've hit EOF, but if there are still some - // unreported bytes remaining, return them now. - if let Some(r) = self.get_eof_non_match_chunk() { - self.buffer_reported_pos += r.len(); - let bytes = &self.buf.buffer()[r]; - return Some(Ok(StreamChunk::NonMatch { bytes })); - } - // We've reported everything! - return None; - } - } - } - let start = self.absolute_pos; - for &byte in self.buf.buffer()[self.buffer_pos..].iter() { - self.sid = self.aut.next_state(Anchored::No, self.sid, byte); - self.absolute_pos += 1; - if self.aut.is_match(self.sid) { - break; - } - } - self.buffer_pos += self.absolute_pos - start; - } - } - - /// Return a match chunk for the given match. It is assumed that the match - /// ends at the current `buffer_pos`. - fn get_match_chunk(&self, mat: Match) -> core::ops::Range<usize> { - let start = self.buffer_pos - mat.len(); - let end = self.buffer_pos; - start..end - } - - /// Return a non-match chunk, if necessary, just before reporting a match. - /// This returns `None` if there is nothing to report. Otherwise, this - /// assumes that the given match ends at the current `buffer_pos`. - fn get_non_match_chunk( - &self, - mat: Match, - ) -> Option<core::ops::Range<usize>> { - let buffer_mat_start = self.buffer_pos - mat.len(); - if buffer_mat_start > self.buffer_reported_pos { - let start = self.buffer_reported_pos; - let end = buffer_mat_start; - return Some(start..end); - } - None - } - - /// Look for any bytes that should be reported as a non-match just before - /// rolling the buffer. - /// - /// Note that this only reports bytes up to `buffer.len() - - /// min_buffer_len`, as it's not possible to know whether the bytes - /// following that will participate in a match or not. - fn get_pre_roll_non_match_chunk(&self) -> Option<core::ops::Range<usize>> { - let end = - self.buf.buffer().len().saturating_sub(self.buf.min_buffer_len()); - if self.buffer_reported_pos < end { - return Some(self.buffer_reported_pos..end); - } - None - } - - /// Return any unreported bytes as a non-match up to the end of the buffer. - /// - /// This should only be called when the entire contents of the buffer have - /// been searched and EOF has been hit when trying to fill the buffer. - fn get_eof_non_match_chunk(&self) -> Option<core::ops::Range<usize>> { - if self.buffer_reported_pos < self.buf.buffer().len() { - return Some(self.buffer_reported_pos..self.buf.buffer().len()); - } - None - } - - /// Return the match at the current position for the current state. - /// - /// This panics if `self.aut.is_match(self.sid)` isn't true. - fn get_match(&self) -> Match { - get_match(self.aut, self.sid, 0, self.absolute_pos) - } -} - -/// A single chunk yielded by the stream chunk iterator. -/// -/// The `'r` lifetime refers to the lifetime of the stream chunk iterator. -#[cfg(feature = "std")] -#[derive(Debug)] -enum StreamChunk<'r> { - /// A chunk that does not contain any matches. - NonMatch { bytes: &'r [u8] }, - /// A chunk that precisely contains a match. - Match { bytes: &'r [u8], mat: Match }, -} - -#[inline(never)] -pub(crate) fn try_find_fwd<A: Automaton + ?Sized>( - aut: &A, - input: &Input<'_>, -) -> Result<Option<Match>, MatchError> { - if input.is_done() { - return Ok(None); - } - let earliest = aut.match_kind().is_standard() || input.get_earliest(); - if input.get_anchored().is_anchored() { - try_find_fwd_imp(aut, input, None, Anchored::Yes, earliest) - } else if let Some(pre) = aut.prefilter() { - if earliest { - try_find_fwd_imp(aut, input, Some(pre), Anchored::No, true) - } else { - try_find_fwd_imp(aut, input, Some(pre), Anchored::No, false) - } - } else { - if earliest { - try_find_fwd_imp(aut, input, None, Anchored::No, true) - } else { - try_find_fwd_imp(aut, input, None, Anchored::No, false) - } - } -} - -#[inline(always)] -fn try_find_fwd_imp<A: Automaton + ?Sized>( - aut: &A, - input: &Input<'_>, - pre: Option<&Prefilter>, - anchored: Anchored, - earliest: bool, -) -> Result<Option<Match>, MatchError> { - let mut sid = aut.start_state(input.get_anchored())?; - let mut at = input.start(); - let mut mat = None; - if aut.is_match(sid) { - mat = Some(get_match(aut, sid, 0, at)); - if earliest { - return Ok(mat); - } - } - if let Some(pre) = pre { - match pre.find_in(input.haystack(), input.get_span()) { - Candidate::None => return Ok(None), - Candidate::Match(m) => return Ok(Some(m)), - Candidate::PossibleStartOfMatch(i) => { - at = i; - } - } - } - while at < input.end() { - // I've tried unrolling this loop and eliding bounds checks, but no - // matter what I did, I could not observe a consistent improvement on - // any benchmark I could devise. (If someone wants to re-litigate this, - // the way to do it is to add an 'next_state_unchecked' method to the - // 'Automaton' trait with a default impl that uses 'next_state'. Then - // use 'aut.next_state_unchecked' here and implement it on DFA using - // unchecked slice index acces.) - sid = aut.next_state(anchored, sid, input.haystack()[at]); - if aut.is_special(sid) { - if aut.is_dead(sid) { - return Ok(mat); - } else if aut.is_match(sid) { - // We use 'at + 1' here because the match state is entered - // at the last byte of the pattern. Since we use half-open - // intervals, the end of the range of the match is one past the - // last byte. - let m = get_match(aut, sid, 0, at + 1); - // For the automata in this crate, we make a size trade off - // where we reuse the same automaton for both anchored and - // unanchored searches. We achieve this, principally, by simply - // not following failure transitions while computing the next - // state. Instead, if we fail to find the next state, we return - // a dead state, which instructs the search to stop. (This - // is why 'next_state' needs to know whether the search is - // anchored or not.) In addition, we have different start - // states for anchored and unanchored searches. The latter has - // a self-loop where as the former does not. - // - // In this way, we can use the same trie to execute both - // anchored and unanchored searches. There is a catch though. - // When building an Aho-Corasick automaton for unanchored - // searches, we copy matches from match states to other states - // (which would otherwise not be match states) if they are - // reachable via a failure transition. In the case of an - // anchored search, we *specifically* do not want to report - // these matches because they represent matches that start past - // the beginning of the search. - // - // Now we could tweak the automaton somehow to differentiate - // anchored from unanchored match states, but this would make - // 'aut.is_match' and potentially 'aut.is_special' slower. And - // also make the automaton itself more complex. - // - // Instead, we insert a special hack: if the search is - // anchored, we simply ignore matches that don't begin at - // the start of the search. This is not quite ideal, but we - // do specialize this function in such a way that unanchored - // searches don't pay for this additional branch. While this - // might cause a search to continue on for more than it - // otherwise optimally would, it will be no more than the - // longest pattern in the automaton. The reason for this is - // that we ensure we don't follow failure transitions during - // an anchored search. Combined with using a different anchored - // starting state with no self-loop, we guarantee that we'll - // at worst move through a number of transitions equal to the - // longest pattern. - // - // Now for DFAs, the whole point of them is to eliminate - // failure transitions entirely. So there is no way to say "if - // it's an anchored search don't follow failure transitions." - // Instead, we actually have to build two entirely separate - // automatons into the transition table. One with failure - // transitions built into it and another that is effectively - // just an encoding of the base trie into a transition table. - // DFAs still need this check though, because the match states - // still carry matches only reachable via a failure transition. - // Why? Because removing them seems difficult, although I - // haven't given it a lot of thought. - if !(anchored.is_anchored() && m.start() > input.start()) { - mat = Some(m); - if earliest { - return Ok(mat); - } - } - } else if let Some(pre) = pre { - // If we're here, we know it's a special state that is not a - // dead or a match state AND that a prefilter is active. Thus, - // it must be a start state. - debug_assert!(aut.is_start(sid)); - // We don't care about 'Candidate::Match' here because if such - // a match were possible, it would have been returned above - // when we run the prefilter before walking the automaton. - let span = Span::from(at..input.end()); - match pre.find_in(input.haystack(), span).into_option() { - None => return Ok(None), - Some(i) => { - if i > at { - at = i; - continue; - } - } - } - } else { - // When pre.is_none(), then starting states should not be - // treated as special. That is, without a prefilter, is_special - // should only return true when the state is a dead or a match - // state. - // - // It is possible to execute a search without a prefilter even - // when the underlying searcher has one: an anchored search. - // But in this case, the automaton makes it impossible to move - // back to the start state by construction, and thus, we should - // never reach this branch. - debug_assert!(false, "unreachable"); - } - } - at += 1; - } - Ok(mat) -} - -#[inline(never)] -fn try_find_overlapping_fwd<A: Automaton + ?Sized>( - aut: &A, - input: &Input<'_>, - state: &mut OverlappingState, -) -> Result<(), MatchError> { - state.mat = None; - if input.is_done() { - return Ok(()); - } - // Searching with a pattern ID is always anchored, so we should only ever - // use a prefilter when no pattern ID is given. - if aut.prefilter().is_some() && !input.get_anchored().is_anchored() { - let pre = aut.prefilter().unwrap(); - try_find_overlapping_fwd_imp(aut, input, Some(pre), state) - } else { - try_find_overlapping_fwd_imp(aut, input, None, state) - } -} - -#[inline(always)] -fn try_find_overlapping_fwd_imp<A: Automaton + ?Sized>( - aut: &A, - input: &Input<'_>, - pre: Option<&Prefilter>, - state: &mut OverlappingState, -) -> Result<(), MatchError> { - let mut sid = match state.id { - None => { - let sid = aut.start_state(input.get_anchored())?; - // Handle the case where the start state is a match state. That is, - // the empty string is in our automaton. We report every match we - // can here before moving on and updating 'state.at' and 'state.id' - // to find more matches in other parts of the haystack. - if aut.is_match(sid) { - let i = state.next_match_index.unwrap_or(0); - let len = aut.match_len(sid); - if i < len { - state.next_match_index = Some(i + 1); - state.mat = Some(get_match(aut, sid, i, input.start())); - return Ok(()); - } - } - state.at = input.start(); - state.id = Some(sid); - state.next_match_index = None; - state.mat = None; - sid - } - Some(sid) => { - // If we still have matches left to report in this state then - // report them until we've exhausted them. Only after that do we - // advance to the next offset in the haystack. - if let Some(i) = state.next_match_index { - let len = aut.match_len(sid); - if i < len { - state.next_match_index = Some(i + 1); - state.mat = Some(get_match(aut, sid, i, state.at + 1)); - return Ok(()); - } - // Once we've reported all matches at a given position, we need - // to advance the search to the next position. - state.at += 1; - state.next_match_index = None; - state.mat = None; - } - sid - } - }; - while state.at < input.end() { - sid = aut.next_state( - input.get_anchored(), - sid, - input.haystack()[state.at], - ); - if aut.is_special(sid) { - state.id = Some(sid); - if aut.is_dead(sid) { - return Ok(()); - } else if aut.is_match(sid) { - state.next_match_index = Some(1); - state.mat = Some(get_match(aut, sid, 0, state.at + 1)); - return Ok(()); - } else if let Some(pre) = pre { - // If we're here, we know it's a special state that is not a - // dead or a match state AND that a prefilter is active. Thus, - // it must be a start state. - debug_assert!(aut.is_start(sid)); - let span = Span::from(state.at..input.end()); - match pre.find_in(input.haystack(), span).into_option() { - None => return Ok(()), - Some(i) => { - if i > state.at { - state.at = i; - continue; - } - } - } - } else { - // When pre.is_none(), then starting states should not be - // treated as special. That is, without a prefilter, is_special - // should only return true when the state is a dead or a match - // state. - // - // ... except for one special case: in stream searching, we - // currently call overlapping search with a 'None' prefilter, - // regardless of whether one exists or not, because stream - // searching can't currently deal with prefilters correctly in - // all cases. - } - } - state.at += 1; - } - state.id = Some(sid); - Ok(()) -} - -#[inline(always)] -fn get_match<A: Automaton + ?Sized>( - aut: &A, - sid: StateID, - index: usize, - at: usize, -) -> Match { - let pid = aut.match_pattern(sid, index); - let len = aut.pattern_len(pid); - Match::new(pid, (at - len)..at) -} - -/// Write a prefix "state" indicator for fmt::Debug impls. It always writes -/// exactly two printable bytes to the given formatter. -/// -/// Specifically, this tries to succinctly distinguish the different types of -/// states: dead states, start states and match states. It even accounts for -/// the possible overlappings of different state types. (The only possible -/// overlapping is that of match and start states.) -pub(crate) fn fmt_state_indicator<A: Automaton>( - f: &mut core::fmt::Formatter<'_>, - aut: A, - id: StateID, -) -> core::fmt::Result { - if aut.is_dead(id) { - write!(f, "D ")?; - } else if aut.is_match(id) { - if aut.is_start(id) { - write!(f, "*>")?; - } else { - write!(f, "* ")?; - } - } else if aut.is_start(id) { - write!(f, " >")?; - } else { - write!(f, " ")?; - } - Ok(()) -} - -/// Return an iterator of transitions in a sparse format given an iterator -/// of all explicitly defined transitions. The iterator yields ranges of -/// transitions, such that any adjacent transitions mapped to the same -/// state are combined into a single range. -pub(crate) fn sparse_transitions<'a>( - mut it: impl Iterator<Item = (u8, StateID)> + 'a, -) -> impl Iterator<Item = (u8, u8, StateID)> + 'a { - let mut cur: Option<(u8, u8, StateID)> = None; - core::iter::from_fn(move || { - while let Some((class, next)) = it.next() { - let (prev_start, prev_end, prev_next) = match cur { - Some(x) => x, - None => { - cur = Some((class, class, next)); - continue; - } - }; - if prev_next == next { - cur = Some((prev_start, class, prev_next)); - } else { - cur = Some((class, class, next)); - return Some((prev_start, prev_end, prev_next)); - } - } - if let Some((start, end, next)) = cur.take() { - return Some((start, end, next)); - } - None - }) -} diff --git a/vendor/aho-corasick/src/dfa.rs b/vendor/aho-corasick/src/dfa.rs deleted file mode 100644 index 1aa4f0e5cff1df..00000000000000 --- a/vendor/aho-corasick/src/dfa.rs +++ /dev/null @@ -1,835 +0,0 @@ -/*! -Provides direct access to a DFA implementation of Aho-Corasick. - -This is a low-level API that generally only needs to be used in niche -circumstances. When possible, prefer using [`AhoCorasick`](crate::AhoCorasick) -instead of a DFA directly. Using an `DFA` directly is typically only necessary -when one needs access to the [`Automaton`] trait implementation. -*/ - -use alloc::{vec, vec::Vec}; - -use crate::{ - automaton::Automaton, - nfa::noncontiguous, - util::{ - alphabet::ByteClasses, - error::{BuildError, MatchError}, - int::{Usize, U32}, - prefilter::Prefilter, - primitives::{IteratorIndexExt, PatternID, SmallIndex, StateID}, - search::{Anchored, MatchKind, StartKind}, - special::Special, - }, -}; - -/// A DFA implementation of Aho-Corasick. -/// -/// When possible, prefer using [`AhoCorasick`](crate::AhoCorasick) instead of -/// this type directly. Using a `DFA` directly is typically only necessary when -/// one needs access to the [`Automaton`] trait implementation. -/// -/// This DFA can only be built by first constructing a [`noncontiguous::NFA`]. -/// Both [`DFA::new`] and [`Builder::build`] do this for you automatically, but -/// [`Builder::build_from_noncontiguous`] permits doing it explicitly. -/// -/// A DFA provides the best possible search performance (in this crate) via two -/// mechanisms: -/// -/// * All states use a dense representation for their transitions. -/// * All failure transitions are pre-computed such that they are never -/// explicitly handled at search time. -/// -/// These two facts combined mean that every state transition is performed -/// using a constant number of instructions. However, this comes at -/// great cost. The memory usage of a DFA can be quite exorbitant. -/// It is potentially multiple orders of magnitude greater than a -/// [`contiguous::NFA`](crate::nfa::contiguous::NFA) for example. In exchange, -/// a DFA will typically have better search speed than a `contiguous::NFA`, but -/// not by orders of magnitude. -/// -/// Unless you have a small number of patterns or memory usage is not a concern -/// and search performance is critical, a DFA is usually not the best choice. -/// -/// Moreover, unlike the NFAs in this crate, it is costly for a DFA to -/// support for anchored and unanchored search configurations. Namely, -/// since failure transitions are pre-computed, supporting both anchored -/// and unanchored searches requires a duplication of the transition table, -/// making the memory usage of such a DFA ever bigger. (The NFAs in this crate -/// unconditionally support both anchored and unanchored searches because there -/// is essentially no added cost for doing so.) It is for this reason that -/// a DFA's support for anchored and unanchored searches can be configured -/// via [`Builder::start_kind`]. By default, a DFA only supports unanchored -/// searches. -/// -/// # Example -/// -/// This example shows how to build an `DFA` directly and use it to execute -/// [`Automaton::try_find`]: -/// -/// ``` -/// use aho_corasick::{ -/// automaton::Automaton, -/// dfa::DFA, -/// Input, Match, -/// }; -/// -/// let patterns = &["b", "abc", "abcd"]; -/// let haystack = "abcd"; -/// -/// let nfa = DFA::new(patterns).unwrap(); -/// assert_eq!( -/// Some(Match::must(0, 1..2)), -/// nfa.try_find(&Input::new(haystack))?, -/// ); -/// # Ok::<(), Box<dyn std::error::Error>>(()) -/// ``` -/// -/// It is also possible to implement your own version of `try_find`. See the -/// [`Automaton`] documentation for an example. -#[derive(Clone)] -pub struct DFA { - /// The DFA transition table. IDs in this table are pre-multiplied. So - /// instead of the IDs being 0, 1, 2, 3, ..., they are 0*stride, 1*stride, - /// 2*stride, 3*stride, ... - trans: Vec<StateID>, - /// The matches for every match state in this DFA. This is first indexed by - /// state index (so that's `sid >> stride2`) and then by order in which the - /// matches are meant to occur. - matches: Vec<Vec<PatternID>>, - /// The amount of heap memory used, in bytes, by the inner Vecs of - /// 'matches'. - matches_memory_usage: usize, - /// The length of each pattern. This is used to compute the start offset - /// of a match. - pattern_lens: Vec<SmallIndex>, - /// A prefilter for accelerating searches, if one exists. - prefilter: Option<Prefilter>, - /// The match semantics built into this DFA. - match_kind: MatchKind, - /// The total number of states in this DFA. - state_len: usize, - /// The alphabet size, or total number of equivalence classes, for this - /// DFA. Note that the actual number of transitions in each state is - /// stride=2^stride2, where stride is the smallest power of 2 greater than - /// or equal to alphabet_len. We do things this way so that we can use - /// bitshifting to go from a state ID to an index into 'matches'. - alphabet_len: usize, - /// The exponent with a base 2, such that stride=2^stride2. Given a state - /// index 'i', its state identifier is 'i << stride2'. Given a state - /// identifier 'sid', its state index is 'sid >> stride2'. - stride2: usize, - /// The equivalence classes for this DFA. All transitions are defined on - /// equivalence classes and not on the 256 distinct byte values. - byte_classes: ByteClasses, - /// The length of the shortest pattern in this automaton. - min_pattern_len: usize, - /// The length of the longest pattern in this automaton. - max_pattern_len: usize, - /// The information required to deduce which states are "special" in this - /// DFA. - special: Special, -} - -impl DFA { - /// Create a new Aho-Corasick DFA using the default configuration. - /// - /// Use a [`Builder`] if you want to change the configuration. - pub fn new<I, P>(patterns: I) -> Result<DFA, BuildError> - where - I: IntoIterator<Item = P>, - P: AsRef<[u8]>, - { - DFA::builder().build(patterns) - } - - /// A convenience method for returning a new Aho-Corasick DFA builder. - /// - /// This usually permits one to just import the `DFA` type. - pub fn builder() -> Builder { - Builder::new() - } -} - -impl DFA { - /// A sentinel state ID indicating that a search should stop once it has - /// entered this state. When a search stops, it returns a match if one has - /// been found, otherwise no match. A DFA always has an actual dead state - /// at this ID. - /// - /// N.B. DFAs, unlike NFAs, do not have any notion of a FAIL state. - /// Namely, the whole point of a DFA is that the FAIL state is completely - /// compiled away. That is, DFA construction involves pre-computing the - /// failure transitions everywhere, such that failure transitions are no - /// longer used at search time. This, combined with its uniformly dense - /// representation, are the two most important factors in why it's faster - /// than the NFAs in this crate. - const DEAD: StateID = StateID::new_unchecked(0); - - /// Adds the given pattern IDs as matches to the given state and also - /// records the added memory usage. - fn set_matches( - &mut self, - sid: StateID, - pids: impl Iterator<Item = PatternID>, - ) { - let index = (sid.as_usize() >> self.stride2).checked_sub(2).unwrap(); - let mut at_least_one = false; - for pid in pids { - self.matches[index].push(pid); - self.matches_memory_usage += PatternID::SIZE; - at_least_one = true; - } - assert!(at_least_one, "match state must have non-empty pids"); - } -} - -// SAFETY: 'start_state' always returns a valid state ID, 'next_state' always -// returns a valid state ID given a valid state ID. We otherwise claim that -// all other methods are correct as well. -unsafe impl Automaton for DFA { - #[inline(always)] - fn start_state(&self, anchored: Anchored) -> Result<StateID, MatchError> { - // Either of the start state IDs can be DEAD, in which case, support - // for that type of search is not provided by this DFA. Which start - // state IDs are inactive depends on the 'StartKind' configuration at - // DFA construction time. - match anchored { - Anchored::No => { - let start = self.special.start_unanchored_id; - if start == DFA::DEAD { - Err(MatchError::invalid_input_unanchored()) - } else { - Ok(start) - } - } - Anchored::Yes => { - let start = self.special.start_anchored_id; - if start == DFA::DEAD { - Err(MatchError::invalid_input_anchored()) - } else { - Ok(start) - } - } - } - } - - #[inline(always)] - fn next_state( - &self, - _anchored: Anchored, - sid: StateID, - byte: u8, - ) -> StateID { - let class = self.byte_classes.get(byte); - self.trans[(sid.as_u32() + u32::from(class)).as_usize()] - } - - #[inline(always)] - fn is_special(&self, sid: StateID) -> bool { - sid <= self.special.max_special_id - } - - #[inline(always)] - fn is_dead(&self, sid: StateID) -> bool { - sid == DFA::DEAD - } - - #[inline(always)] - fn is_match(&self, sid: StateID) -> bool { - !self.is_dead(sid) && sid <= self.special.max_match_id - } - - #[inline(always)] - fn is_start(&self, sid: StateID) -> bool { - sid == self.special.start_unanchored_id - || sid == self.special.start_anchored_id - } - - #[inline(always)] - fn match_kind(&self) -> MatchKind { - self.match_kind - } - - #[inline(always)] - fn patterns_len(&self) -> usize { - self.pattern_lens.len() - } - - #[inline(always)] - fn pattern_len(&self, pid: PatternID) -> usize { - self.pattern_lens[pid].as_usize() - } - - #[inline(always)] - fn min_pattern_len(&self) -> usize { - self.min_pattern_len - } - - #[inline(always)] - fn max_pattern_len(&self) -> usize { - self.max_pattern_len - } - - #[inline(always)] - fn match_len(&self, sid: StateID) -> usize { - debug_assert!(self.is_match(sid)); - let offset = (sid.as_usize() >> self.stride2) - 2; - self.matches[offset].len() - } - - #[inline(always)] - fn match_pattern(&self, sid: StateID, index: usize) -> PatternID { - debug_assert!(self.is_match(sid)); - let offset = (sid.as_usize() >> self.stride2) - 2; - self.matches[offset][index] - } - - #[inline(always)] - fn memory_usage(&self) -> usize { - use core::mem::size_of; - - (self.trans.len() * size_of::<u32>()) - + (self.matches.len() * size_of::<Vec<PatternID>>()) - + self.matches_memory_usage - + (self.pattern_lens.len() * size_of::<SmallIndex>()) - + self.prefilter.as_ref().map_or(0, |p| p.memory_usage()) - } - - #[inline(always)] - fn prefilter(&self) -> Option<&Prefilter> { - self.prefilter.as_ref() - } -} - -impl core::fmt::Debug for DFA { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - use crate::{ - automaton::{fmt_state_indicator, sparse_transitions}, - util::debug::DebugByte, - }; - - writeln!(f, "dfa::DFA(")?; - for index in 0..self.state_len { - let sid = StateID::new_unchecked(index << self.stride2); - // While we do currently include the FAIL state in the transition - // table (to simplify construction), it is never actually used. It - // poses problems with the code below because it gets treated as - // a match state incidentally when it is, of course, not. So we - // special case it. The fail state is always the first state after - // the dead state. - // - // If the construction is changed to remove the fail state (it - // probably should be), then this special case should be updated. - if index == 1 { - writeln!(f, "F {:06}:", sid.as_usize())?; - continue; - } - fmt_state_indicator(f, self, sid)?; - write!(f, "{:06}: ", sid.as_usize())?; - - let it = (0..self.byte_classes.alphabet_len()).map(|class| { - (class.as_u8(), self.trans[sid.as_usize() + class]) - }); - for (i, (start, end, next)) in sparse_transitions(it).enumerate() { - if i > 0 { - write!(f, ", ")?; - } - if start == end { - write!( - f, - "{:?} => {:?}", - DebugByte(start), - next.as_usize() - )?; - } else { - write!( - f, - "{:?}-{:?} => {:?}", - DebugByte(start), - DebugByte(end), - next.as_usize() - )?; - } - } - write!(f, "\n")?; - if self.is_match(sid) { - write!(f, " matches: ")?; - for i in 0..self.match_len(sid) { - if i > 0 { - write!(f, ", ")?; - } - let pid = self.match_pattern(sid, i); - write!(f, "{}", pid.as_usize())?; - } - write!(f, "\n")?; - } - } - writeln!(f, "match kind: {:?}", self.match_kind)?; - writeln!(f, "prefilter: {:?}", self.prefilter.is_some())?; - writeln!(f, "state length: {:?}", self.state_len)?; - writeln!(f, "pattern length: {:?}", self.patterns_len())?; - writeln!(f, "shortest pattern length: {:?}", self.min_pattern_len)?; - writeln!(f, "longest pattern length: {:?}", self.max_pattern_len)?; - writeln!(f, "alphabet length: {:?}", self.alphabet_len)?; - writeln!(f, "stride: {:?}", 1 << self.stride2)?; - writeln!(f, "byte classes: {:?}", self.byte_classes)?; - writeln!(f, "memory usage: {:?}", self.memory_usage())?; - writeln!(f, ")")?; - Ok(()) - } -} - -/// A builder for configuring an Aho-Corasick DFA. -/// -/// This builder has a subset of the options available to a -/// [`AhoCorasickBuilder`](crate::AhoCorasickBuilder). Of the shared options, -/// their behavior is identical. -#[derive(Clone, Debug)] -pub struct Builder { - noncontiguous: noncontiguous::Builder, - start_kind: StartKind, - byte_classes: bool, -} - -impl Default for Builder { - fn default() -> Builder { - Builder { - noncontiguous: noncontiguous::Builder::new(), - start_kind: StartKind::Unanchored, - byte_classes: true, - } - } -} - -impl Builder { - /// Create a new builder for configuring an Aho-Corasick DFA. - pub fn new() -> Builder { - Builder::default() - } - - /// Build an Aho-Corasick DFA from the given iterator of patterns. - /// - /// A builder may be reused to create more DFAs. - pub fn build<I, P>(&self, patterns: I) -> Result<DFA, BuildError> - where - I: IntoIterator<Item = P>, - P: AsRef<[u8]>, - { - let nnfa = self.noncontiguous.build(patterns)?; - self.build_from_noncontiguous(&nnfa) - } - - /// Build an Aho-Corasick DFA from the given noncontiguous NFA. - /// - /// Note that when this method is used, only the `start_kind` and - /// `byte_classes` settings on this builder are respected. The other - /// settings only apply to the initial construction of the Aho-Corasick - /// automaton. Since using this method requires that initial construction - /// has already completed, all settings impacting only initial construction - /// are no longer relevant. - pub fn build_from_noncontiguous( - &self, - nnfa: &noncontiguous::NFA, - ) -> Result<DFA, BuildError> { - debug!("building DFA"); - let byte_classes = if self.byte_classes { - nnfa.byte_classes().clone() - } else { - ByteClasses::singletons() - }; - let state_len = match self.start_kind { - StartKind::Unanchored | StartKind::Anchored => nnfa.states().len(), - StartKind::Both => { - // These unwraps are OK because we know that the number of - // NFA states is < StateID::LIMIT which is in turn less than - // i32::MAX. Thus, there is always room to multiply by 2. - // Finally, the number of states is always at least 4 in the - // NFA (DEAD, FAIL, START-UNANCHORED, START-ANCHORED), so the - // subtraction of 4 is okay. - // - // Note that we subtract 4 because the "anchored" part of - // the DFA duplicates the unanchored part (without failure - // transitions), but reuses the DEAD, FAIL and START states. - nnfa.states() - .len() - .checked_mul(2) - .unwrap() - .checked_sub(4) - .unwrap() - } - }; - let trans_len = - match state_len.checked_shl(byte_classes.stride2().as_u32()) { - Some(trans_len) => trans_len, - None => { - return Err(BuildError::state_id_overflow( - StateID::MAX.as_u64(), - usize::MAX.as_u64(), - )) - } - }; - StateID::new(trans_len.checked_sub(byte_classes.stride()).unwrap()) - .map_err(|e| { - BuildError::state_id_overflow( - StateID::MAX.as_u64(), - e.attempted(), - ) - })?; - let num_match_states = match self.start_kind { - StartKind::Unanchored | StartKind::Anchored => { - nnfa.special().max_match_id.as_usize().checked_sub(1).unwrap() - } - StartKind::Both => nnfa - .special() - .max_match_id - .as_usize() - .checked_sub(1) - .unwrap() - .checked_mul(2) - .unwrap(), - }; - let mut dfa = DFA { - trans: vec![DFA::DEAD; trans_len], - matches: vec![vec![]; num_match_states], - matches_memory_usage: 0, - pattern_lens: nnfa.pattern_lens_raw().to_vec(), - prefilter: nnfa.prefilter().cloned(), - match_kind: nnfa.match_kind(), - state_len, - alphabet_len: byte_classes.alphabet_len(), - stride2: byte_classes.stride2(), - byte_classes, - min_pattern_len: nnfa.min_pattern_len(), - max_pattern_len: nnfa.max_pattern_len(), - // The special state IDs are set later. - special: Special::zero(), - }; - match self.start_kind { - StartKind::Both => { - self.finish_build_both_starts(nnfa, &mut dfa); - } - StartKind::Unanchored => { - self.finish_build_one_start(Anchored::No, nnfa, &mut dfa); - } - StartKind::Anchored => { - self.finish_build_one_start(Anchored::Yes, nnfa, &mut dfa) - } - } - debug!( - "DFA built, <states: {:?}, size: {:?}, \ - alphabet len: {:?}, stride: {:?}>", - dfa.state_len, - dfa.memory_usage(), - dfa.byte_classes.alphabet_len(), - dfa.byte_classes.stride(), - ); - // The vectors can grow ~twice as big during construction because a - // Vec amortizes growth. But here, let's shrink things back down to - // what we actually need since we're never going to add more to it. - dfa.trans.shrink_to_fit(); - dfa.pattern_lens.shrink_to_fit(); - dfa.matches.shrink_to_fit(); - // TODO: We might also want to shrink each Vec inside of `dfa.matches`, - // or even better, convert it to one contiguous allocation. But I think - // I went with nested allocs for good reason (can't remember), so this - // may be tricky to do. I decided not to shrink them here because it - // might require a fair bit of work to do. It's unclear whether it's - // worth it. - Ok(dfa) - } - - /// Finishes building a DFA for either unanchored or anchored searches, - /// but NOT both. - fn finish_build_one_start( - &self, - anchored: Anchored, - nnfa: &noncontiguous::NFA, - dfa: &mut DFA, - ) { - // This function always succeeds because we check above that all of the - // states in the NFA can be mapped to DFA state IDs. - let stride2 = dfa.stride2; - let old2new = |oldsid: StateID| { - StateID::new_unchecked(oldsid.as_usize() << stride2) - }; - for (oldsid, state) in nnfa.states().iter().with_state_ids() { - let newsid = old2new(oldsid); - if state.is_match() { - dfa.set_matches(newsid, nnfa.iter_matches(oldsid)); - } - sparse_iter( - nnfa, - oldsid, - &dfa.byte_classes, - |byte, class, mut oldnextsid| { - if oldnextsid == noncontiguous::NFA::FAIL { - if anchored.is_anchored() { - oldnextsid = noncontiguous::NFA::DEAD; - } else if state.fail() == noncontiguous::NFA::DEAD { - // This is a special case that avoids following - // DEAD transitions in a non-contiguous NFA. - // Following these transitions is pretty slow - // because the non-contiguous NFA will always use - // a sparse representation for it (because the - // DEAD state is usually treated as a sentinel). - // The *vast* majority of failure states are DEAD - // states, so this winds up being pretty slow if - // we go through the non-contiguous NFA state - // transition logic. Instead, just do it ourselves. - oldnextsid = noncontiguous::NFA::DEAD; - } else { - oldnextsid = nnfa.next_state( - Anchored::No, - state.fail(), - byte, - ); - } - } - dfa.trans[newsid.as_usize() + usize::from(class)] = - old2new(oldnextsid); - }, - ); - } - // Now that we've remapped all the IDs in our states, all that's left - // is remapping the special state IDs. - let old = nnfa.special(); - let new = &mut dfa.special; - new.max_special_id = old2new(old.max_special_id); - new.max_match_id = old2new(old.max_match_id); - if anchored.is_anchored() { - new.start_unanchored_id = DFA::DEAD; - new.start_anchored_id = old2new(old.start_anchored_id); - } else { - new.start_unanchored_id = old2new(old.start_unanchored_id); - new.start_anchored_id = DFA::DEAD; - } - } - - /// Finishes building a DFA that supports BOTH unanchored and anchored - /// searches. It works by inter-leaving unanchored states with anchored - /// states in the same transition table. This way, we avoid needing to - /// re-shuffle states afterward to ensure that our states still look like - /// DEAD, MATCH, ..., START-UNANCHORED, START-ANCHORED, NON-MATCH, ... - /// - /// Honestly this is pretty inscrutable... Simplifications are most - /// welcome. - fn finish_build_both_starts( - &self, - nnfa: &noncontiguous::NFA, - dfa: &mut DFA, - ) { - let stride2 = dfa.stride2; - let stride = 1 << stride2; - let mut remap_unanchored = vec![DFA::DEAD; nnfa.states().len()]; - let mut remap_anchored = vec![DFA::DEAD; nnfa.states().len()]; - let mut is_anchored = vec![false; dfa.state_len]; - let mut newsid = DFA::DEAD; - let next_dfa_id = - |sid: StateID| StateID::new_unchecked(sid.as_usize() + stride); - for (oldsid, state) in nnfa.states().iter().with_state_ids() { - if oldsid == noncontiguous::NFA::DEAD - || oldsid == noncontiguous::NFA::FAIL - { - remap_unanchored[oldsid] = newsid; - remap_anchored[oldsid] = newsid; - newsid = next_dfa_id(newsid); - } else if oldsid == nnfa.special().start_unanchored_id - || oldsid == nnfa.special().start_anchored_id - { - if oldsid == nnfa.special().start_unanchored_id { - remap_unanchored[oldsid] = newsid; - remap_anchored[oldsid] = DFA::DEAD; - } else { - remap_unanchored[oldsid] = DFA::DEAD; - remap_anchored[oldsid] = newsid; - is_anchored[newsid.as_usize() >> stride2] = true; - } - if state.is_match() { - dfa.set_matches(newsid, nnfa.iter_matches(oldsid)); - } - sparse_iter( - nnfa, - oldsid, - &dfa.byte_classes, - |_, class, oldnextsid| { - let class = usize::from(class); - if oldnextsid == noncontiguous::NFA::FAIL { - dfa.trans[newsid.as_usize() + class] = DFA::DEAD; - } else { - dfa.trans[newsid.as_usize() + class] = oldnextsid; - } - }, - ); - newsid = next_dfa_id(newsid); - } else { - let unewsid = newsid; - newsid = next_dfa_id(newsid); - let anewsid = newsid; - newsid = next_dfa_id(newsid); - - remap_unanchored[oldsid] = unewsid; - remap_anchored[oldsid] = anewsid; - is_anchored[anewsid.as_usize() >> stride2] = true; - if state.is_match() { - dfa.set_matches(unewsid, nnfa.iter_matches(oldsid)); - dfa.set_matches(anewsid, nnfa.iter_matches(oldsid)); - } - sparse_iter( - nnfa, - oldsid, - &dfa.byte_classes, - |byte, class, oldnextsid| { - let class = usize::from(class); - if oldnextsid == noncontiguous::NFA::FAIL { - let oldnextsid = - if state.fail() == noncontiguous::NFA::DEAD { - noncontiguous::NFA::DEAD - } else { - nnfa.next_state( - Anchored::No, - state.fail(), - byte, - ) - }; - dfa.trans[unewsid.as_usize() + class] = oldnextsid; - } else { - dfa.trans[unewsid.as_usize() + class] = oldnextsid; - dfa.trans[anewsid.as_usize() + class] = oldnextsid; - } - }, - ); - } - } - for i in 0..dfa.state_len { - let sid = i << stride2; - if is_anchored[i] { - for next in dfa.trans[sid..][..stride].iter_mut() { - *next = remap_anchored[*next]; - } - } else { - for next in dfa.trans[sid..][..stride].iter_mut() { - *next = remap_unanchored[*next]; - } - } - } - // Now that we've remapped all the IDs in our states, all that's left - // is remapping the special state IDs. - let old = nnfa.special(); - let new = &mut dfa.special; - new.max_special_id = remap_anchored[old.max_special_id]; - new.max_match_id = remap_anchored[old.max_match_id]; - new.start_unanchored_id = remap_unanchored[old.start_unanchored_id]; - new.start_anchored_id = remap_anchored[old.start_anchored_id]; - } - - /// Set the desired match semantics. - /// - /// This only applies when using [`Builder::build`] and not - /// [`Builder::build_from_noncontiguous`]. - /// - /// See - /// [`AhoCorasickBuilder::match_kind`](crate::AhoCorasickBuilder::match_kind) - /// for more documentation and examples. - pub fn match_kind(&mut self, kind: MatchKind) -> &mut Builder { - self.noncontiguous.match_kind(kind); - self - } - - /// Enable ASCII-aware case insensitive matching. - /// - /// This only applies when using [`Builder::build`] and not - /// [`Builder::build_from_noncontiguous`]. - /// - /// See - /// [`AhoCorasickBuilder::ascii_case_insensitive`](crate::AhoCorasickBuilder::ascii_case_insensitive) - /// for more documentation and examples. - pub fn ascii_case_insensitive(&mut self, yes: bool) -> &mut Builder { - self.noncontiguous.ascii_case_insensitive(yes); - self - } - - /// Enable heuristic prefilter optimizations. - /// - /// This only applies when using [`Builder::build`] and not - /// [`Builder::build_from_noncontiguous`]. - /// - /// See - /// [`AhoCorasickBuilder::prefilter`](crate::AhoCorasickBuilder::prefilter) - /// for more documentation and examples. - pub fn prefilter(&mut self, yes: bool) -> &mut Builder { - self.noncontiguous.prefilter(yes); - self - } - - /// Sets the starting state configuration for the automaton. - /// - /// See - /// [`AhoCorasickBuilder::start_kind`](crate::AhoCorasickBuilder::start_kind) - /// for more documentation and examples. - pub fn start_kind(&mut self, kind: StartKind) -> &mut Builder { - self.start_kind = kind; - self - } - - /// A debug setting for whether to attempt to shrink the size of the - /// automaton's alphabet or not. - /// - /// This should never be enabled unless you're debugging an automaton. - /// Namely, disabling byte classes makes transitions easier to reason - /// about, since they use the actual bytes instead of equivalence classes. - /// Disabling this confers no performance benefit at search time. - /// - /// See - /// [`AhoCorasickBuilder::byte_classes`](crate::AhoCorasickBuilder::byte_classes) - /// for more documentation and examples. - pub fn byte_classes(&mut self, yes: bool) -> &mut Builder { - self.byte_classes = yes; - self - } -} - -/// Iterate over all possible equivalence class transitions in this state. -/// The closure is called for all transitions with a distinct equivalence -/// class, even those not explicitly represented in this sparse state. For -/// any implicitly defined transitions, the given closure is called with -/// the fail state ID. -/// -/// The closure is guaranteed to be called precisely -/// `byte_classes.alphabet_len()` times, once for every possible class in -/// ascending order. -fn sparse_iter<F: FnMut(u8, u8, StateID)>( - nnfa: &noncontiguous::NFA, - oldsid: StateID, - classes: &ByteClasses, - mut f: F, -) { - let mut prev_class = None; - let mut byte = 0usize; - for t in nnfa.iter_trans(oldsid) { - while byte < usize::from(t.byte()) { - let rep = byte.as_u8(); - let class = classes.get(rep); - byte += 1; - if prev_class != Some(class) { - f(rep, class, noncontiguous::NFA::FAIL); - prev_class = Some(class); - } - } - let rep = t.byte(); - let class = classes.get(rep); - byte += 1; - if prev_class != Some(class) { - f(rep, class, t.next()); - prev_class = Some(class); - } - } - for b in byte..=255 { - let rep = b.as_u8(); - let class = classes.get(rep); - if prev_class != Some(class) { - f(rep, class, noncontiguous::NFA::FAIL); - prev_class = Some(class); - } - } -} diff --git a/vendor/aho-corasick/src/lib.rs b/vendor/aho-corasick/src/lib.rs deleted file mode 100644 index 273dd3bfe95781..00000000000000 --- a/vendor/aho-corasick/src/lib.rs +++ /dev/null @@ -1,326 +0,0 @@ -/*! -A library for finding occurrences of many patterns at once. This library -provides multiple pattern search principally through an implementation of the -[Aho-Corasick algorithm](https://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_algorithm), -which builds a fast finite state machine for executing searches in linear time. - -Additionally, this library provides a number of configuration options for -building the automaton that permit controlling the space versus time trade -off. Other features include simple ASCII case insensitive matching, finding -overlapping matches, replacements, searching streams and even searching and -replacing text in streams. - -Finally, unlike most other Aho-Corasick implementations, this one -supports enabling [leftmost-first](MatchKind::LeftmostFirst) or -[leftmost-longest](MatchKind::LeftmostLongest) match semantics, using a -(seemingly) novel alternative construction algorithm. For more details on what -match semantics means, see the [`MatchKind`] type. - -# Overview - -This section gives a brief overview of the primary types in this crate: - -* [`AhoCorasick`] is the primary type and represents an Aho-Corasick automaton. -This is the type you use to execute searches. -* [`AhoCorasickBuilder`] can be used to build an Aho-Corasick automaton, and -supports configuring a number of options. -* [`Match`] represents a single match reported by an Aho-Corasick automaton. -Each match has two pieces of information: the pattern that matched and the -start and end byte offsets corresponding to the position in the haystack at -which it matched. - -# Example: basic searching - -This example shows how to search for occurrences of multiple patterns -simultaneously. Each match includes the pattern that matched along with the -byte offsets of the match. - -``` -use aho_corasick::{AhoCorasick, PatternID}; - -let patterns = &["apple", "maple", "Snapple"]; -let haystack = "Nobody likes maple in their apple flavored Snapple."; - -let ac = AhoCorasick::new(patterns).unwrap(); -let mut matches = vec![]; -for mat in ac.find_iter(haystack) { - matches.push((mat.pattern(), mat.start(), mat.end())); -} -assert_eq!(matches, vec![ - (PatternID::must(1), 13, 18), - (PatternID::must(0), 28, 33), - (PatternID::must(2), 43, 50), -]); -``` - -# Example: case insensitivity - -This is like the previous example, but matches `Snapple` case insensitively -using `AhoCorasickBuilder`: - -``` -use aho_corasick::{AhoCorasick, PatternID}; - -let patterns = &["apple", "maple", "snapple"]; -let haystack = "Nobody likes maple in their apple flavored Snapple."; - -let ac = AhoCorasick::builder() - .ascii_case_insensitive(true) - .build(patterns) - .unwrap(); -let mut matches = vec![]; -for mat in ac.find_iter(haystack) { - matches.push((mat.pattern(), mat.start(), mat.end())); -} -assert_eq!(matches, vec![ - (PatternID::must(1), 13, 18), - (PatternID::must(0), 28, 33), - (PatternID::must(2), 43, 50), -]); -``` - -# Example: replacing matches in a stream - -This example shows how to execute a search and replace on a stream without -loading the entire stream into memory first. - -``` -# #[cfg(feature = "std")] { -use aho_corasick::AhoCorasick; - -# fn example() -> Result<(), std::io::Error> { -let patterns = &["fox", "brown", "quick"]; -let replace_with = &["sloth", "grey", "slow"]; - -// In a real example, these might be `std::fs::File`s instead. All you need to -// do is supply a pair of `std::io::Read` and `std::io::Write` implementations. -let rdr = "The quick brown fox."; -let mut wtr = vec![]; - -let ac = AhoCorasick::new(patterns).unwrap(); -ac.try_stream_replace_all(rdr.as_bytes(), &mut wtr, replace_with)?; -assert_eq!(b"The slow grey sloth.".to_vec(), wtr); -# Ok(()) }; example().unwrap() -# } -``` - -# Example: finding the leftmost first match - -In the textbook description of Aho-Corasick, its formulation is typically -structured such that it reports all possible matches, even when they overlap -with another. In many cases, overlapping matches may not be desired, such as -the case of finding all successive non-overlapping matches like you might with -a standard regular expression. - -Unfortunately the "obvious" way to modify the Aho-Corasick algorithm to do -this doesn't always work in the expected way, since it will report matches as -soon as they are seen. For example, consider matching the regex `Samwise|Sam` -against the text `Samwise`. Most regex engines (that are Perl-like, or -non-POSIX) will report `Samwise` as a match, but the standard Aho-Corasick -algorithm modified for reporting non-overlapping matches will report `Sam`. - -A novel contribution of this library is the ability to change the match -semantics of Aho-Corasick (without additional search time overhead) such that -`Samwise` is reported instead. For example, here's the standard approach: - -``` -use aho_corasick::AhoCorasick; - -let patterns = &["Samwise", "Sam"]; -let haystack = "Samwise"; - -let ac = AhoCorasick::new(patterns).unwrap(); -let mat = ac.find(haystack).expect("should have a match"); -assert_eq!("Sam", &haystack[mat.start()..mat.end()]); -``` - -And now here's the leftmost-first version, which matches how a Perl-like -regex will work: - -``` -use aho_corasick::{AhoCorasick, MatchKind}; - -let patterns = &["Samwise", "Sam"]; -let haystack = "Samwise"; - -let ac = AhoCorasick::builder() - .match_kind(MatchKind::LeftmostFirst) - .build(patterns) - .unwrap(); -let mat = ac.find(haystack).expect("should have a match"); -assert_eq!("Samwise", &haystack[mat.start()..mat.end()]); -``` - -In addition to leftmost-first semantics, this library also supports -leftmost-longest semantics, which match the POSIX behavior of a regular -expression alternation. See [`MatchKind`] for more details. - -# Prefilters - -While an Aho-Corasick automaton can perform admirably when compared to more -naive solutions, it is generally slower than more specialized algorithms that -are accelerated using vector instructions such as SIMD. - -For that reason, this library will internally use a "prefilter" to attempt -to accelerate searches when possible. Currently, this library has several -different algorithms it might use depending on the patterns provided. Once the -number of patterns gets too big, prefilters are no longer used. - -While a prefilter is generally good to have on by default since it works -well in the common case, it can lead to less predictable or even sub-optimal -performance in some cases. For that reason, prefilters can be explicitly -disabled via [`AhoCorasickBuilder::prefilter`]. - -# Lower level APIs - -This crate also provides several sub-modules that collectively expose many of -the implementation details of the main [`AhoCorasick`] type. Most users of this -library can completely ignore the submodules and their contents, but if you -needed finer grained control, some parts of them may be useful to you. Here is -a brief overview of each and why you might want to use them: - -* The [`packed`] sub-module contains a lower level API for using fast -vectorized routines for finding a small number of patterns in a haystack. -You might want to use this API when you want to completely side-step using -Aho-Corasick automata. Otherwise, the fast vectorized routines are used -automatically as prefilters for `AhoCorasick` searches whenever possible. -* The [`automaton`] sub-module provides a lower level finite state -machine interface that the various Aho-Corasick implementations in -this crate implement. This sub-module's main contribution is the -[`Automaton`](automaton::Automaton) trait, which permits manually walking the -state transitions of an Aho-Corasick automaton. -* The [`dfa`] and [`nfa`] sub-modules provide DFA and NFA implementations of -the aforementioned `Automaton` trait. The main reason one might want to use -these sub-modules is to get access to a type that implements the `Automaton` -trait. (The top-level `AhoCorasick` type does not implement the `Automaton` -trait.) - -As mentioned above, if you aren't sure whether you need these sub-modules, -you should be able to safely ignore them and just focus on the [`AhoCorasick`] -type. - -# Crate features - -This crate exposes a few features for controlling dependency usage and whether -this crate can be used without the standard library. - -* **std** - - Enables support for the standard library. This feature is enabled by - default. When disabled, only `core` and `alloc` are used. At an API - level, enabling `std` enables `std::error::Error` trait impls for the - various error types, and higher level stream search routines such as - [`AhoCorasick::try_stream_find_iter`]. But the `std` feature is also required - to enable vectorized prefilters. Prefilters can greatly accelerate searches, - but generally only apply when the number of patterns is small (less than - ~100). -* **perf-literal** - - Enables support for literal prefilters that use vectorized routines from - external crates. This feature is enabled by default. If you're only using - Aho-Corasick for large numbers of patterns or otherwise can abide lower - throughput when searching with a small number of patterns, then it is - reasonable to disable this feature. -* **logging** - - Enables a dependency on the `log` crate and emits messages to aide in - diagnostics. This feature is disabled by default. -*/ - -#![no_std] -#![deny(missing_docs)] -#![deny(rustdoc::broken_intra_doc_links)] -#![cfg_attr(docsrs, feature(doc_cfg))] - -extern crate alloc; -#[cfg(any(test, feature = "std"))] -extern crate std; - -#[cfg(doctest)] -doc_comment::doctest!("../README.md"); - -#[cfg(feature = "std")] -pub use crate::ahocorasick::StreamFindIter; -pub use crate::{ - ahocorasick::{ - AhoCorasick, AhoCorasickBuilder, AhoCorasickKind, FindIter, - FindOverlappingIter, - }, - util::{ - error::{BuildError, MatchError, MatchErrorKind}, - primitives::{PatternID, PatternIDError}, - search::{Anchored, Input, Match, MatchKind, Span, StartKind}, - }, -}; - -#[macro_use] -mod macros; - -mod ahocorasick; -pub mod automaton; -pub mod dfa; -pub mod nfa; -pub mod packed; -#[cfg(test)] -mod tests; -// I wrote out the module for implementing fst::Automaton only to later realize -// that this would make fst a public dependency and fst is not at 1.0 yet. I -// decided to just keep the code in tree, but build it only during tests. -// -// TODO: I think I've changed my mind again. I'm considering pushing it out -// into either a separate crate or into 'fst' directly as an optional feature. -// #[cfg(test)] -// #[allow(dead_code)] -// mod transducer; -pub(crate) mod util; - -#[cfg(test)] -mod testoibits { - use std::panic::{RefUnwindSafe, UnwindSafe}; - - use super::*; - - fn assert_all<T: Send + Sync + UnwindSafe + RefUnwindSafe>() {} - - #[test] - fn oibits_main() { - assert_all::<AhoCorasick>(); - assert_all::<AhoCorasickBuilder>(); - assert_all::<AhoCorasickKind>(); - assert_all::<FindIter>(); - assert_all::<FindOverlappingIter>(); - - assert_all::<BuildError>(); - assert_all::<MatchError>(); - assert_all::<MatchErrorKind>(); - - assert_all::<Anchored>(); - assert_all::<Input>(); - assert_all::<Match>(); - assert_all::<MatchKind>(); - assert_all::<Span>(); - assert_all::<StartKind>(); - } - - #[test] - fn oibits_automaton() { - use crate::{automaton, dfa::DFA}; - - assert_all::<automaton::FindIter<DFA>>(); - assert_all::<automaton::FindOverlappingIter<DFA>>(); - #[cfg(feature = "std")] - assert_all::<automaton::StreamFindIter<DFA, std::io::Stdin>>(); - assert_all::<automaton::OverlappingState>(); - - assert_all::<automaton::Prefilter>(); - assert_all::<automaton::Candidate>(); - } - - #[test] - fn oibits_packed() { - use crate::packed; - - assert_all::<packed::Config>(); - assert_all::<packed::Builder>(); - assert_all::<packed::Searcher>(); - assert_all::<packed::FindIter>(); - assert_all::<packed::MatchKind>(); - } -} diff --git a/vendor/aho-corasick/src/macros.rs b/vendor/aho-corasick/src/macros.rs deleted file mode 100644 index fc73e6eddd82ef..00000000000000 --- a/vendor/aho-corasick/src/macros.rs +++ /dev/null @@ -1,18 +0,0 @@ -#![allow(unused_macros)] - -macro_rules! log { - ($($tt:tt)*) => { - #[cfg(feature = "logging")] - { - $($tt)* - } - } -} - -macro_rules! debug { - ($($tt:tt)*) => { log!(log::debug!($($tt)*)) } -} - -macro_rules! trace { - ($($tt:tt)*) => { log!(log::trace!($($tt)*)) } -} diff --git a/vendor/aho-corasick/src/nfa/contiguous.rs b/vendor/aho-corasick/src/nfa/contiguous.rs deleted file mode 100644 index 6ea2a47f849ee2..00000000000000 --- a/vendor/aho-corasick/src/nfa/contiguous.rs +++ /dev/null @@ -1,1141 +0,0 @@ -/*! -Provides a contiguous NFA implementation of Aho-Corasick. - -This is a low-level API that generally only needs to be used in niche -circumstances. When possible, prefer using [`AhoCorasick`](crate::AhoCorasick) -instead of a contiguous NFA directly. Using an `NFA` directly is typically only -necessary when one needs access to the [`Automaton`] trait implementation. -*/ - -use alloc::{vec, vec::Vec}; - -use crate::{ - automaton::Automaton, - nfa::noncontiguous, - util::{ - alphabet::ByteClasses, - error::{BuildError, MatchError}, - int::{Usize, U16, U32}, - prefilter::Prefilter, - primitives::{IteratorIndexExt, PatternID, SmallIndex, StateID}, - search::{Anchored, MatchKind}, - special::Special, - }, -}; - -/// A contiguous NFA implementation of Aho-Corasick. -/// -/// When possible, prefer using [`AhoCorasick`](crate::AhoCorasick) instead of -/// this type directly. Using an `NFA` directly is typically only necessary -/// when one needs access to the [`Automaton`] trait implementation. -/// -/// This NFA can only be built by first constructing a [`noncontiguous::NFA`]. -/// Both [`NFA::new`] and [`Builder::build`] do this for you automatically, but -/// [`Builder::build_from_noncontiguous`] permits doing it explicitly. -/// -/// The main difference between a noncontiguous NFA and a contiguous NFA is -/// that the latter represents all of its states and transitions in a single -/// allocation, where as the former uses a separate allocation for each state. -/// Doing this at construction time while keeping a low memory footprint isn't -/// feasible, which is primarily why there are two different NFA types: one -/// that does the least amount of work possible to build itself, and another -/// that does a little extra work to compact itself and make state transitions -/// faster by making some states use a dense representation. -/// -/// Because a contiguous NFA uses a single allocation, there is a lot more -/// opportunity for compression tricks to reduce the heap memory used. Indeed, -/// it is not uncommon for a contiguous NFA to use an order of magnitude less -/// heap memory than a noncontiguous NFA. Since building a contiguous NFA -/// usually only takes a fraction of the time it takes to build a noncontiguous -/// NFA, the overall build time is not much slower. Thus, in most cases, a -/// contiguous NFA is the best choice. -/// -/// Since a contiguous NFA uses various tricks for compression and to achieve -/// faster state transitions, currently, its limit on the number of states -/// is somewhat smaller than what a noncontiguous NFA can achieve. Generally -/// speaking, you shouldn't expect to run into this limit if the number of -/// patterns is under 1 million. It is plausible that this limit will be -/// increased in the future. If the limit is reached, building a contiguous NFA -/// will return an error. Often, since building a contiguous NFA is relatively -/// cheap, it can make sense to always try it even if you aren't sure if it -/// will fail or not. If it does, you can always fall back to a noncontiguous -/// NFA. (Indeed, the main [`AhoCorasick`](crate::AhoCorasick) type employs a -/// strategy similar to this at construction time.) -/// -/// # Example -/// -/// This example shows how to build an `NFA` directly and use it to execute -/// [`Automaton::try_find`]: -/// -/// ``` -/// use aho_corasick::{ -/// automaton::Automaton, -/// nfa::contiguous::NFA, -/// Input, Match, -/// }; -/// -/// let patterns = &["b", "abc", "abcd"]; -/// let haystack = "abcd"; -/// -/// let nfa = NFA::new(patterns).unwrap(); -/// assert_eq!( -/// Some(Match::must(0, 1..2)), -/// nfa.try_find(&Input::new(haystack))?, -/// ); -/// # Ok::<(), Box<dyn std::error::Error>>(()) -/// ``` -/// -/// It is also possible to implement your own version of `try_find`. See the -/// [`Automaton`] documentation for an example. -#[derive(Clone)] -pub struct NFA { - /// The raw NFA representation. Each state is packed with a header - /// (containing the format of the state, the failure transition and, for - /// a sparse state, the number of transitions), its transitions and any - /// matching pattern IDs for match states. - repr: Vec<u32>, - /// The length of each pattern. This is used to compute the start offset - /// of a match. - pattern_lens: Vec<SmallIndex>, - /// The total number of states in this NFA. - state_len: usize, - /// A prefilter for accelerating searches, if one exists. - prefilter: Option<Prefilter>, - /// The match semantics built into this NFA. - match_kind: MatchKind, - /// The alphabet size, or total number of equivalence classes, for this - /// NFA. Dense states always have this many transitions. - alphabet_len: usize, - /// The equivalence classes for this NFA. All transitions, dense and - /// sparse, are defined on equivalence classes and not on the 256 distinct - /// byte values. - byte_classes: ByteClasses, - /// The length of the shortest pattern in this automaton. - min_pattern_len: usize, - /// The length of the longest pattern in this automaton. - max_pattern_len: usize, - /// The information required to deduce which states are "special" in this - /// NFA. - special: Special, -} - -impl NFA { - /// Create a new Aho-Corasick contiguous NFA using the default - /// configuration. - /// - /// Use a [`Builder`] if you want to change the configuration. - pub fn new<I, P>(patterns: I) -> Result<NFA, BuildError> - where - I: IntoIterator<Item = P>, - P: AsRef<[u8]>, - { - NFA::builder().build(patterns) - } - - /// A convenience method for returning a new Aho-Corasick contiguous NFA - /// builder. - /// - /// This usually permits one to just import the `NFA` type. - pub fn builder() -> Builder { - Builder::new() - } -} - -impl NFA { - /// A sentinel state ID indicating that a search should stop once it has - /// entered this state. When a search stops, it returns a match if one - /// has been found, otherwise no match. A contiguous NFA always has an - /// actual dead state at this ID. - const DEAD: StateID = StateID::new_unchecked(0); - /// Another sentinel state ID indicating that a search should move through - /// current state's failure transition. - /// - /// Note that unlike DEAD, this does not actually point to a valid state - /// in a contiguous NFA. (noncontiguous::NFA::FAIL does point to a valid - /// state.) Instead, this points to the position that is guaranteed to - /// never be a valid state ID (by making sure it points to a place in the - /// middle of the encoding of the DEAD state). Since we never need to - /// actually look at the FAIL state itself, this works out. - /// - /// By why do it this way? So that FAIL is a constant. I don't have any - /// concrete evidence that this materially helps matters, but it's easy to - /// do. The alternative would be making the FAIL ID point to the second - /// state, which could be made a constant but is a little trickier to do. - /// The easiest path is to just make the FAIL state a runtime value, but - /// since comparisons with FAIL occur in perf critical parts of the search, - /// we want it to be as tight as possible and not waste any registers. - /// - /// Very hand wavy... But the code complexity that results from this is - /// very mild. - const FAIL: StateID = StateID::new_unchecked(1); -} - -// SAFETY: 'start_state' always returns a valid state ID, 'next_state' always -// returns a valid state ID given a valid state ID. We otherwise claim that -// all other methods are correct as well. -unsafe impl Automaton for NFA { - #[inline(always)] - fn start_state(&self, anchored: Anchored) -> Result<StateID, MatchError> { - match anchored { - Anchored::No => Ok(self.special.start_unanchored_id), - Anchored::Yes => Ok(self.special.start_anchored_id), - } - } - - #[inline(always)] - fn next_state( - &self, - anchored: Anchored, - mut sid: StateID, - byte: u8, - ) -> StateID { - let repr = &self.repr; - let class = self.byte_classes.get(byte); - let u32tosid = StateID::from_u32_unchecked; - loop { - let o = sid.as_usize(); - let kind = repr[o] & 0xFF; - // I tried to encapsulate the "next transition" logic into its own - // function, but it seemed to always result in sub-optimal codegen - // that led to real and significant slowdowns. So we just inline - // the logic here. - // - // I've also tried a lot of different ways to speed up this - // routine, and most of them have failed. - if kind == State::KIND_DENSE { - let next = u32tosid(repr[o + 2 + usize::from(class)]); - if next != NFA::FAIL { - return next; - } - } else if kind == State::KIND_ONE { - if class == repr[o].low_u16().high_u8() { - return u32tosid(repr[o + 2]); - } - } else { - // NOTE: I tried a SWAR technique in the loop below, but found - // it slower. See the 'swar' test in the tests for this module. - let trans_len = kind.as_usize(); - let classes_len = u32_len(trans_len); - let trans_offset = o + 2 + classes_len; - for (i, &chunk) in - repr[o + 2..][..classes_len].iter().enumerate() - { - let classes = chunk.to_ne_bytes(); - if classes[0] == class { - return u32tosid(repr[trans_offset + i * 4]); - } - if classes[1] == class { - return u32tosid(repr[trans_offset + i * 4 + 1]); - } - if classes[2] == class { - return u32tosid(repr[trans_offset + i * 4 + 2]); - } - if classes[3] == class { - return u32tosid(repr[trans_offset + i * 4 + 3]); - } - } - } - // For an anchored search, we never follow failure transitions - // because failure transitions lead us down a path to matching - // a *proper* suffix of the path we were on. Thus, it can only - // produce matches that appear after the beginning of the search. - if anchored.is_anchored() { - return NFA::DEAD; - } - sid = u32tosid(repr[o + 1]); - } - } - - #[inline(always)] - fn is_special(&self, sid: StateID) -> bool { - sid <= self.special.max_special_id - } - - #[inline(always)] - fn is_dead(&self, sid: StateID) -> bool { - sid == NFA::DEAD - } - - #[inline(always)] - fn is_match(&self, sid: StateID) -> bool { - !self.is_dead(sid) && sid <= self.special.max_match_id - } - - #[inline(always)] - fn is_start(&self, sid: StateID) -> bool { - sid == self.special.start_unanchored_id - || sid == self.special.start_anchored_id - } - - #[inline(always)] - fn match_kind(&self) -> MatchKind { - self.match_kind - } - - #[inline(always)] - fn patterns_len(&self) -> usize { - self.pattern_lens.len() - } - - #[inline(always)] - fn pattern_len(&self, pid: PatternID) -> usize { - self.pattern_lens[pid].as_usize() - } - - #[inline(always)] - fn min_pattern_len(&self) -> usize { - self.min_pattern_len - } - - #[inline(always)] - fn max_pattern_len(&self) -> usize { - self.max_pattern_len - } - - #[inline(always)] - fn match_len(&self, sid: StateID) -> usize { - State::match_len(self.alphabet_len, &self.repr[sid.as_usize()..]) - } - - #[inline(always)] - fn match_pattern(&self, sid: StateID, index: usize) -> PatternID { - State::match_pattern( - self.alphabet_len, - &self.repr[sid.as_usize()..], - index, - ) - } - - #[inline(always)] - fn memory_usage(&self) -> usize { - use core::mem::size_of; - - (self.repr.len() * size_of::<u32>()) - + (self.pattern_lens.len() * size_of::<SmallIndex>()) - + self.prefilter.as_ref().map_or(0, |p| p.memory_usage()) - } - - #[inline(always)] - fn prefilter(&self) -> Option<&Prefilter> { - self.prefilter.as_ref() - } -} - -impl core::fmt::Debug for NFA { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - use crate::automaton::fmt_state_indicator; - - writeln!(f, "contiguous::NFA(")?; - let mut sid = NFA::DEAD; // always the first state and always present - loop { - let raw = &self.repr[sid.as_usize()..]; - if raw.is_empty() { - break; - } - let is_match = self.is_match(sid); - let state = State::read(self.alphabet_len, is_match, raw); - fmt_state_indicator(f, self, sid)?; - write!( - f, - "{:06}({:06}): ", - sid.as_usize(), - state.fail.as_usize() - )?; - state.fmt(f)?; - write!(f, "\n")?; - if self.is_match(sid) { - write!(f, " matches: ")?; - for i in 0..state.match_len { - let pid = State::match_pattern(self.alphabet_len, raw, i); - if i > 0 { - write!(f, ", ")?; - } - write!(f, "{}", pid.as_usize())?; - } - write!(f, "\n")?; - } - // The FAIL state doesn't actually have space for a state allocated - // for it, so we have to treat it as a special case. write below - // the DEAD state. - if sid == NFA::DEAD { - writeln!(f, "F {:06}:", NFA::FAIL.as_usize())?; - } - let len = State::len(self.alphabet_len, is_match, raw); - sid = StateID::new(sid.as_usize().checked_add(len).unwrap()) - .unwrap(); - } - writeln!(f, "match kind: {:?}", self.match_kind)?; - writeln!(f, "prefilter: {:?}", self.prefilter.is_some())?; - writeln!(f, "state length: {:?}", self.state_len)?; - writeln!(f, "pattern length: {:?}", self.patterns_len())?; - writeln!(f, "shortest pattern length: {:?}", self.min_pattern_len)?; - writeln!(f, "longest pattern length: {:?}", self.max_pattern_len)?; - writeln!(f, "alphabet length: {:?}", self.alphabet_len)?; - writeln!(f, "byte classes: {:?}", self.byte_classes)?; - writeln!(f, "memory usage: {:?}", self.memory_usage())?; - writeln!(f, ")")?; - - Ok(()) - } -} - -/// The "in memory" representation a single dense or sparse state. -/// -/// A `State`'s in memory representation is not ever actually materialized -/// during a search with a contiguous NFA. Doing so would be too slow. (Indeed, -/// the only time a `State` is actually constructed is in `Debug` impls.) -/// Instead, a `State` exposes a number of static methods for reading certain -/// things from the raw binary encoding of the state. -#[derive(Clone)] -struct State<'a> { - /// The state to transition to when 'class_to_next' yields a transition - /// to the FAIL state. - fail: StateID, - /// The number of pattern IDs in this state. For a non-match state, this is - /// always zero. Otherwise it is always bigger than zero. - match_len: usize, - /// The sparse or dense representation of the transitions for this state. - trans: StateTrans<'a>, -} - -/// The underlying representation of sparse or dense transitions for a state. -/// -/// Note that like `State`, we don't typically construct values of this type -/// during a search since we don't always need all values and thus would -/// represent a lot of wasteful work. -#[derive(Clone)] -enum StateTrans<'a> { - /// A sparse representation of transitions for a state, where only non-FAIL - /// transitions are explicitly represented. - Sparse { - classes: &'a [u32], - /// The transitions for this state, where each transition is packed - /// into a u32. The low 8 bits correspond to the byte class for the - /// transition, and the high 24 bits correspond to the next state ID. - /// - /// This packing is why the max state ID allowed for a contiguous - /// NFA is 2^24-1. - nexts: &'a [u32], - }, - /// A "one transition" state that is never a match state. - /// - /// These are by far the most common state, so we use a specialized and - /// very compact representation for them. - One { - /// The element of this NFA's alphabet that this transition is - /// defined for. - class: u8, - /// The state this should transition to if the current symbol is - /// equal to 'class'. - next: u32, - }, - /// A dense representation of transitions for a state, where all - /// transitions are explicitly represented, including transitions to the - /// FAIL state. - Dense { - /// A dense set of transitions to other states. The transitions may - /// point to a FAIL state, in which case, the search should try the - /// same transition lookup at 'fail'. - /// - /// Note that this is indexed by byte equivalence classes and not - /// byte values. That means 'class_to_next[byte]' is wrong and - /// 'class_to_next[classes.get(byte)]' is correct. The number of - /// transitions is always equivalent to 'classes.alphabet_len()'. - class_to_next: &'a [u32], - }, -} - -impl<'a> State<'a> { - /// The offset of where the "kind" of a state is stored. If it isn't one - /// of the sentinel values below, then it's a sparse state and the kind - /// corresponds to the number of transitions in the state. - const KIND: usize = 0; - - /// A sentinel value indicating that the state uses a dense representation. - const KIND_DENSE: u32 = 0xFF; - /// A sentinel value indicating that the state uses a special "one - /// transition" encoding. In practice, non-match states with one transition - /// make up the overwhelming majority of all states in any given - /// Aho-Corasick automaton, so we can specialize them using a very compact - /// representation. - const KIND_ONE: u32 = 0xFE; - - /// The maximum number of transitions to encode as a sparse state. Usually - /// states with a lot of transitions are either very rare, or occur near - /// the start state. In the latter case, they are probably dense already - /// anyway. In the former case, making them dense is fine because they're - /// rare. - /// - /// This needs to be small enough to permit each of the sentinel values for - /// 'KIND' above. Namely, a sparse state embeds the number of transitions - /// into the 'KIND'. Basically, "sparse" is a state kind too, but it's the - /// "else" branch. - /// - /// N.B. There isn't anything particularly magical about 127 here. I - /// just picked it because I figured any sparse state with this many - /// transitions is going to be exceptionally rare, and if it did have this - /// many transitions, then it would be quite slow to do a linear scan on - /// the transitions during a search anyway. - const MAX_SPARSE_TRANSITIONS: usize = 127; - - /// Remap state IDs in-place. - /// - /// `state` should be the the raw binary encoding of a state. (The start - /// of the slice must correspond to the start of the state, but the slice - /// may extend past the end of the encoding of the state.) - fn remap( - alphabet_len: usize, - old_to_new: &[StateID], - state: &mut [u32], - ) -> Result<(), BuildError> { - let kind = State::kind(state); - if kind == State::KIND_DENSE { - state[1] = old_to_new[state[1].as_usize()].as_u32(); - for next in state[2..][..alphabet_len].iter_mut() { - *next = old_to_new[next.as_usize()].as_u32(); - } - } else if kind == State::KIND_ONE { - state[1] = old_to_new[state[1].as_usize()].as_u32(); - state[2] = old_to_new[state[2].as_usize()].as_u32(); - } else { - let trans_len = State::sparse_trans_len(state); - let classes_len = u32_len(trans_len); - state[1] = old_to_new[state[1].as_usize()].as_u32(); - for next in state[2 + classes_len..][..trans_len].iter_mut() { - *next = old_to_new[next.as_usize()].as_u32(); - } - } - Ok(()) - } - - /// Returns the length, in number of u32s, of this state. - /// - /// This is useful for reading states consecutively, e.g., in the Debug - /// impl without needing to store a separate map from state index to state - /// identifier. - /// - /// `state` should be the the raw binary encoding of a state. (The start - /// of the slice must correspond to the start of the state, but the slice - /// may extend past the end of the encoding of the state.) - fn len(alphabet_len: usize, is_match: bool, state: &[u32]) -> usize { - let kind_len = 1; - let fail_len = 1; - let kind = State::kind(state); - let (classes_len, trans_len) = if kind == State::KIND_DENSE { - (0, alphabet_len) - } else if kind == State::KIND_ONE { - (0, 1) - } else { - let trans_len = State::sparse_trans_len(state); - let classes_len = u32_len(trans_len); - (classes_len, trans_len) - }; - let match_len = if !is_match { - 0 - } else if State::match_len(alphabet_len, state) == 1 { - // This is a special case because when there is one pattern ID for - // a match state, it is represented by a single u32 with its high - // bit set (which is impossible for a valid pattern ID). - 1 - } else { - // We add 1 to include the u32 that indicates the number of - // pattern IDs that follow. - 1 + State::match_len(alphabet_len, state) - }; - kind_len + fail_len + classes_len + trans_len + match_len - } - - /// Returns the kind of this state. - /// - /// This only includes the low byte. - #[inline(always)] - fn kind(state: &[u32]) -> u32 { - state[State::KIND] & 0xFF - } - - /// Get the number of sparse transitions in this state. This can never - /// be more than State::MAX_SPARSE_TRANSITIONS, as all states with more - /// transitions are encoded as dense states. - /// - /// `state` should be the the raw binary encoding of a sparse state. (The - /// start of the slice must correspond to the start of the state, but the - /// slice may extend past the end of the encoding of the state.) If this - /// isn't a sparse state, then the return value is unspecified. - /// - /// Do note that this is only legal to call on a sparse state. So for - /// example, "one transition" state is not a sparse state, so it would not - /// be legal to call this method on such a state. - #[inline(always)] - fn sparse_trans_len(state: &[u32]) -> usize { - (state[State::KIND] & 0xFF).as_usize() - } - - /// Returns the total number of matching pattern IDs in this state. Calling - /// this on a state that isn't a match results in unspecified behavior. - /// Thus, the returned number is never 0 for all correct calls. - /// - /// `state` should be the the raw binary encoding of a state. (The start - /// of the slice must correspond to the start of the state, but the slice - /// may extend past the end of the encoding of the state.) - #[inline(always)] - fn match_len(alphabet_len: usize, state: &[u32]) -> usize { - // We don't need to handle KIND_ONE here because it can never be a - // match state. - let packed = if State::kind(state) == State::KIND_DENSE { - let start = 2 + alphabet_len; - state[start].as_usize() - } else { - let trans_len = State::sparse_trans_len(state); - let classes_len = u32_len(trans_len); - let start = 2 + classes_len + trans_len; - state[start].as_usize() - }; - if packed & (1 << 31) == 0 { - packed - } else { - 1 - } - } - - /// Returns the pattern ID corresponding to the given index for the state - /// given. The `index` provided must be less than the number of pattern IDs - /// in this state. - /// - /// `state` should be the the raw binary encoding of a state. (The start of - /// the slice must correspond to the start of the state, but the slice may - /// extend past the end of the encoding of the state.) - /// - /// If the given state is not a match state or if the index is out of - /// bounds, then this has unspecified behavior. - #[inline(always)] - fn match_pattern( - alphabet_len: usize, - state: &[u32], - index: usize, - ) -> PatternID { - // We don't need to handle KIND_ONE here because it can never be a - // match state. - let start = if State::kind(state) == State::KIND_DENSE { - 2 + alphabet_len - } else { - let trans_len = State::sparse_trans_len(state); - let classes_len = u32_len(trans_len); - 2 + classes_len + trans_len - }; - let packed = state[start]; - let pid = if packed & (1 << 31) == 0 { - state[start + 1 + index] - } else { - assert_eq!(0, index); - packed & !(1 << 31) - }; - PatternID::from_u32_unchecked(pid) - } - - /// Read a state's binary encoding to its in-memory representation. - /// - /// `alphabet_len` should be the total number of transitions defined for - /// dense states. - /// - /// `is_match` should be true if this state is a match state and false - /// otherwise. - /// - /// `state` should be the the raw binary encoding of a state. (The start - /// of the slice must correspond to the start of the state, but the slice - /// may extend past the end of the encoding of the state.) - fn read( - alphabet_len: usize, - is_match: bool, - state: &'a [u32], - ) -> State<'a> { - let kind = State::kind(state); - let match_len = - if !is_match { 0 } else { State::match_len(alphabet_len, state) }; - let (trans, fail) = if kind == State::KIND_DENSE { - let fail = StateID::from_u32_unchecked(state[1]); - let class_to_next = &state[2..][..alphabet_len]; - (StateTrans::Dense { class_to_next }, fail) - } else if kind == State::KIND_ONE { - let fail = StateID::from_u32_unchecked(state[1]); - let class = state[State::KIND].low_u16().high_u8(); - let next = state[2]; - (StateTrans::One { class, next }, fail) - } else { - let fail = StateID::from_u32_unchecked(state[1]); - let trans_len = State::sparse_trans_len(state); - let classes_len = u32_len(trans_len); - let classes = &state[2..][..classes_len]; - let nexts = &state[2 + classes_len..][..trans_len]; - (StateTrans::Sparse { classes, nexts }, fail) - }; - State { fail, match_len, trans } - } - - /// Encode the "old" state from a noncontiguous NFA to its binary - /// representation to the given `dst` slice. `classes` should be the byte - /// classes computed for the noncontiguous NFA that the given state came - /// from. - /// - /// This returns an error if `dst` became so big that `StateID`s can no - /// longer be created for new states. Otherwise, it returns the state ID of - /// the new state created. - /// - /// When `force_dense` is true, then the encoded state will always use a - /// dense format. Otherwise, the choice between dense and sparse will be - /// automatically chosen based on the old state. - fn write( - nnfa: &noncontiguous::NFA, - oldsid: StateID, - old: &noncontiguous::State, - classes: &ByteClasses, - dst: &mut Vec<u32>, - force_dense: bool, - ) -> Result<StateID, BuildError> { - let sid = StateID::new(dst.len()).map_err(|e| { - BuildError::state_id_overflow(StateID::MAX.as_u64(), e.attempted()) - })?; - let old_len = nnfa.iter_trans(oldsid).count(); - // For states with a lot of transitions, we might as well just make - // them dense. These kinds of hot states tend to be very rare, so we're - // okay with it. This also gives us more sentinels in the state's - // 'kind', which lets us create different state kinds to save on - // space. - let kind = if force_dense || old_len > State::MAX_SPARSE_TRANSITIONS { - State::KIND_DENSE - } else if old_len == 1 && !old.is_match() { - State::KIND_ONE - } else { - // For a sparse state, the kind is just the number of transitions. - u32::try_from(old_len).unwrap() - }; - if kind == State::KIND_DENSE { - dst.push(kind); - dst.push(old.fail().as_u32()); - State::write_dense_trans(nnfa, oldsid, classes, dst)?; - } else if kind == State::KIND_ONE { - let t = nnfa.iter_trans(oldsid).next().unwrap(); - let class = u32::from(classes.get(t.byte())); - dst.push(kind | (class << 8)); - dst.push(old.fail().as_u32()); - dst.push(t.next().as_u32()); - } else { - dst.push(kind); - dst.push(old.fail().as_u32()); - State::write_sparse_trans(nnfa, oldsid, classes, dst)?; - } - // Now finally write the number of matches and the matches themselves. - if old.is_match() { - let matches_len = nnfa.iter_matches(oldsid).count(); - if matches_len == 1 { - let pid = nnfa.iter_matches(oldsid).next().unwrap().as_u32(); - assert_eq!(0, pid & (1 << 31)); - dst.push((1 << 31) | pid); - } else { - assert_eq!(0, matches_len & (1 << 31)); - dst.push(matches_len.as_u32()); - dst.extend(nnfa.iter_matches(oldsid).map(|pid| pid.as_u32())); - } - } - Ok(sid) - } - - /// Encode the "old" state transitions from a noncontiguous NFA to its - /// binary sparse representation to the given `dst` slice. `classes` should - /// be the byte classes computed for the noncontiguous NFA that the given - /// state came from. - /// - /// This returns an error if `dst` became so big that `StateID`s can no - /// longer be created for new states. - fn write_sparse_trans( - nnfa: &noncontiguous::NFA, - oldsid: StateID, - classes: &ByteClasses, - dst: &mut Vec<u32>, - ) -> Result<(), BuildError> { - let (mut chunk, mut len) = ([0; 4], 0); - for t in nnfa.iter_trans(oldsid) { - chunk[len] = classes.get(t.byte()); - len += 1; - if len == 4 { - dst.push(u32::from_ne_bytes(chunk)); - chunk = [0; 4]; - len = 0; - } - } - if len > 0 { - // In the case where the number of transitions isn't divisible - // by 4, the last u32 chunk will have some left over room. In - // this case, we "just" repeat the last equivalence class. By - // doing this, we know the leftover faux transitions will never - // be followed because if they were, it would have been followed - // prior to it in the last equivalence class. This saves us some - // branching in the search time state transition code. - let repeat = chunk[len - 1]; - while len < 4 { - chunk[len] = repeat; - len += 1; - } - dst.push(u32::from_ne_bytes(chunk)); - } - for t in nnfa.iter_trans(oldsid) { - dst.push(t.next().as_u32()); - } - Ok(()) - } - - /// Encode the "old" state transitions from a noncontiguous NFA to its - /// binary dense representation to the given `dst` slice. `classes` should - /// be the byte classes computed for the noncontiguous NFA that the given - /// state came from. - /// - /// This returns an error if `dst` became so big that `StateID`s can no - /// longer be created for new states. - fn write_dense_trans( - nnfa: &noncontiguous::NFA, - oldsid: StateID, - classes: &ByteClasses, - dst: &mut Vec<u32>, - ) -> Result<(), BuildError> { - // Our byte classes let us shrink the size of our dense states to the - // number of equivalence classes instead of just fixing it to 256. - // Any non-explicitly defined transition is just a transition to the - // FAIL state, so we fill that in first and then overwrite them with - // explicitly defined transitions. (Most states probably only have one - // or two explicitly defined transitions.) - // - // N.B. Remember that while building the contiguous NFA, we use state - // IDs from the noncontiguous NFA. It isn't until we've added all - // states that we go back and map noncontiguous IDs to contiguous IDs. - let start = dst.len(); - dst.extend( - core::iter::repeat(noncontiguous::NFA::FAIL.as_u32()) - .take(classes.alphabet_len()), - ); - assert!(start < dst.len(), "equivalence classes are never empty"); - for t in nnfa.iter_trans(oldsid) { - dst[start + usize::from(classes.get(t.byte()))] = - t.next().as_u32(); - } - Ok(()) - } - - /// Return an iterator over every explicitly defined transition in this - /// state. - fn transitions(&self) -> impl Iterator<Item = (u8, StateID)> + '_ { - let mut i = 0; - core::iter::from_fn(move || match self.trans { - StateTrans::Sparse { classes, nexts } => { - if i >= nexts.len() { - return None; - } - let chunk = classes[i / 4]; - let class = chunk.to_ne_bytes()[i % 4]; - let next = StateID::from_u32_unchecked(nexts[i]); - i += 1; - Some((class, next)) - } - StateTrans::One { class, next } => { - if i == 0 { - i += 1; - Some((class, StateID::from_u32_unchecked(next))) - } else { - None - } - } - StateTrans::Dense { class_to_next } => { - if i >= class_to_next.len() { - return None; - } - let class = i.as_u8(); - let next = StateID::from_u32_unchecked(class_to_next[i]); - i += 1; - Some((class, next)) - } - }) - } -} - -impl<'a> core::fmt::Debug for State<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - use crate::{automaton::sparse_transitions, util::debug::DebugByte}; - - let it = sparse_transitions(self.transitions()) - // Writing out all FAIL transitions is quite noisy. Instead, we - // just require readers of the output to assume anything absent - // maps to the FAIL transition. - .filter(|&(_, _, sid)| sid != NFA::FAIL) - .enumerate(); - for (i, (start, end, sid)) in it { - if i > 0 { - write!(f, ", ")?; - } - if start == end { - write!(f, "{:?} => {:?}", DebugByte(start), sid.as_usize())?; - } else { - write!( - f, - "{:?}-{:?} => {:?}", - DebugByte(start), - DebugByte(end), - sid.as_usize() - )?; - } - } - Ok(()) - } -} - -/// A builder for configuring an Aho-Corasick contiguous NFA. -/// -/// This builder has a subset of the options available to a -/// [`AhoCorasickBuilder`](crate::AhoCorasickBuilder). Of the shared options, -/// their behavior is identical. -#[derive(Clone, Debug)] -pub struct Builder { - noncontiguous: noncontiguous::Builder, - dense_depth: usize, - byte_classes: bool, -} - -impl Default for Builder { - fn default() -> Builder { - Builder { - noncontiguous: noncontiguous::Builder::new(), - dense_depth: 2, - byte_classes: true, - } - } -} - -impl Builder { - /// Create a new builder for configuring an Aho-Corasick contiguous NFA. - pub fn new() -> Builder { - Builder::default() - } - - /// Build an Aho-Corasick contiguous NFA from the given iterator of - /// patterns. - /// - /// A builder may be reused to create more NFAs. - pub fn build<I, P>(&self, patterns: I) -> Result<NFA, BuildError> - where - I: IntoIterator<Item = P>, - P: AsRef<[u8]>, - { - let nnfa = self.noncontiguous.build(patterns)?; - self.build_from_noncontiguous(&nnfa) - } - - /// Build an Aho-Corasick contiguous NFA from the given noncontiguous NFA. - /// - /// Note that when this method is used, only the `dense_depth` and - /// `byte_classes` settings on this builder are respected. The other - /// settings only apply to the initial construction of the Aho-Corasick - /// automaton. Since using this method requires that initial construction - /// has already completed, all settings impacting only initial construction - /// are no longer relevant. - pub fn build_from_noncontiguous( - &self, - nnfa: &noncontiguous::NFA, - ) -> Result<NFA, BuildError> { - debug!("building contiguous NFA"); - let byte_classes = if self.byte_classes { - nnfa.byte_classes().clone() - } else { - ByteClasses::singletons() - }; - let mut index_to_state_id = vec![NFA::DEAD; nnfa.states().len()]; - let mut nfa = NFA { - repr: vec![], - pattern_lens: nnfa.pattern_lens_raw().to_vec(), - state_len: nnfa.states().len(), - prefilter: nnfa.prefilter().map(|p| p.clone()), - match_kind: nnfa.match_kind(), - alphabet_len: byte_classes.alphabet_len(), - byte_classes, - min_pattern_len: nnfa.min_pattern_len(), - max_pattern_len: nnfa.max_pattern_len(), - // The special state IDs are set later. - special: Special::zero(), - }; - for (oldsid, state) in nnfa.states().iter().with_state_ids() { - // We don't actually encode a fail state since it isn't necessary. - // But we still want to make sure any FAIL ids are mapped - // correctly. - if oldsid == noncontiguous::NFA::FAIL { - index_to_state_id[oldsid] = NFA::FAIL; - continue; - } - let force_dense = state.depth().as_usize() < self.dense_depth; - let newsid = State::write( - nnfa, - oldsid, - state, - &nfa.byte_classes, - &mut nfa.repr, - force_dense, - )?; - index_to_state_id[oldsid] = newsid; - } - for &newsid in index_to_state_id.iter() { - if newsid == NFA::FAIL { - continue; - } - let state = &mut nfa.repr[newsid.as_usize()..]; - State::remap(nfa.alphabet_len, &index_to_state_id, state)?; - } - // Now that we've remapped all the IDs in our states, all that's left - // is remapping the special state IDs. - let remap = &index_to_state_id; - let old = nnfa.special(); - let new = &mut nfa.special; - new.max_special_id = remap[old.max_special_id]; - new.max_match_id = remap[old.max_match_id]; - new.start_unanchored_id = remap[old.start_unanchored_id]; - new.start_anchored_id = remap[old.start_anchored_id]; - debug!( - "contiguous NFA built, <states: {:?}, size: {:?}, \ - alphabet len: {:?}>", - nfa.state_len, - nfa.memory_usage(), - nfa.byte_classes.alphabet_len(), - ); - // The vectors can grow ~twice as big during construction because a - // Vec amortizes growth. But here, let's shrink things back down to - // what we actually need since we're never going to add more to it. - nfa.repr.shrink_to_fit(); - nfa.pattern_lens.shrink_to_fit(); - Ok(nfa) - } - - /// Set the desired match semantics. - /// - /// This only applies when using [`Builder::build`] and not - /// [`Builder::build_from_noncontiguous`]. - /// - /// See - /// [`AhoCorasickBuilder::match_kind`](crate::AhoCorasickBuilder::match_kind) - /// for more documentation and examples. - pub fn match_kind(&mut self, kind: MatchKind) -> &mut Builder { - self.noncontiguous.match_kind(kind); - self - } - - /// Enable ASCII-aware case insensitive matching. - /// - /// This only applies when using [`Builder::build`] and not - /// [`Builder::build_from_noncontiguous`]. - /// - /// See - /// [`AhoCorasickBuilder::ascii_case_insensitive`](crate::AhoCorasickBuilder::ascii_case_insensitive) - /// for more documentation and examples. - pub fn ascii_case_insensitive(&mut self, yes: bool) -> &mut Builder { - self.noncontiguous.ascii_case_insensitive(yes); - self - } - - /// Enable heuristic prefilter optimizations. - /// - /// This only applies when using [`Builder::build`] and not - /// [`Builder::build_from_noncontiguous`]. - /// - /// See - /// [`AhoCorasickBuilder::prefilter`](crate::AhoCorasickBuilder::prefilter) - /// for more documentation and examples. - pub fn prefilter(&mut self, yes: bool) -> &mut Builder { - self.noncontiguous.prefilter(yes); - self - } - - /// Set the limit on how many states use a dense representation for their - /// transitions. Other states will generally use a sparse representation. - /// - /// See - /// [`AhoCorasickBuilder::dense_depth`](crate::AhoCorasickBuilder::dense_depth) - /// for more documentation and examples. - pub fn dense_depth(&mut self, depth: usize) -> &mut Builder { - self.dense_depth = depth; - self - } - - /// A debug setting for whether to attempt to shrink the size of the - /// automaton's alphabet or not. - /// - /// This should never be enabled unless you're debugging an automaton. - /// Namely, disabling byte classes makes transitions easier to reason - /// about, since they use the actual bytes instead of equivalence classes. - /// Disabling this confers no performance benefit at search time. - /// - /// See - /// [`AhoCorasickBuilder::byte_classes`](crate::AhoCorasickBuilder::byte_classes) - /// for more documentation and examples. - pub fn byte_classes(&mut self, yes: bool) -> &mut Builder { - self.byte_classes = yes; - self - } -} - -/// Computes the number of u32 values needed to represent one byte per the -/// number of transitions given. -fn u32_len(ntrans: usize) -> usize { - if ntrans % 4 == 0 { - ntrans >> 2 - } else { - (ntrans >> 2) + 1 - } -} - -#[cfg(test)] -mod tests { - // This test demonstrates a SWAR technique I tried in the sparse transition - // code inside of 'next_state'. Namely, sparse transitions work by - // iterating over u32 chunks, with each chunk containing up to 4 classes - // corresponding to 4 transitions. This SWAR technique lets us find a - // matching transition without converting the u32 to a [u8; 4]. - // - // It turned out to be a little slower unfortunately, which isn't too - // surprising, since this is likely a throughput oriented optimization. - // Loop unrolling doesn't really help us because the vast majority of - // states have very few transitions. - // - // Anyway, this code was a little tricky to write, so I converted it to a - // test in case someone figures out how to use it more effectively than - // I could. - // - // (This also only works on little endian. So big endian would need to be - // accounted for if we ever decided to use this I think.) - #[cfg(target_endian = "little")] - #[test] - fn swar() { - use super::*; - - fn has_zero_byte(x: u32) -> u32 { - const LO_U32: u32 = 0x01010101; - const HI_U32: u32 = 0x80808080; - - x.wrapping_sub(LO_U32) & !x & HI_U32 - } - - fn broadcast(b: u8) -> u32 { - (u32::from(b)) * (u32::MAX / 255) - } - - fn index_of(x: u32) -> usize { - let o = - (((x - 1) & 0x01010101).wrapping_mul(0x01010101) >> 24) - 1; - o.as_usize() - } - - let bytes: [u8; 4] = [b'1', b'A', b'a', b'z']; - let chunk = u32::from_ne_bytes(bytes); - - let needle = broadcast(b'1'); - assert_eq!(0, index_of(has_zero_byte(needle ^ chunk))); - let needle = broadcast(b'A'); - assert_eq!(1, index_of(has_zero_byte(needle ^ chunk))); - let needle = broadcast(b'a'); - assert_eq!(2, index_of(has_zero_byte(needle ^ chunk))); - let needle = broadcast(b'z'); - assert_eq!(3, index_of(has_zero_byte(needle ^ chunk))); - } -} diff --git a/vendor/aho-corasick/src/nfa/mod.rs b/vendor/aho-corasick/src/nfa/mod.rs deleted file mode 100644 index 93f4dc25c21f6c..00000000000000 --- a/vendor/aho-corasick/src/nfa/mod.rs +++ /dev/null @@ -1,40 +0,0 @@ -/*! -Provides direct access to NFA implementations of Aho-Corasick. - -The principle characteristic of an NFA in this crate is that it may -transition through multiple states per byte of haystack. In Aho-Corasick -parlance, NFAs follow failure transitions during a search. In contrast, -a [`DFA`](crate::dfa::DFA) pre-computes all failure transitions during -compilation at the expense of a much bigger memory footprint. - -Currently, there are two NFA implementations provided: noncontiguous and -contiguous. The names reflect their internal representation, and consequently, -the trade offs associated with them: - -* A [`noncontiguous::NFA`] uses a separate allocation for every NFA state to -represent its transitions in a sparse format. This is ideal for building an -NFA, since it cheaply permits different states to have a different number of -transitions. A noncontiguous NFA is where the main Aho-Corasick construction -algorithm is implemented. All other Aho-Corasick implementations are built by -first constructing a noncontiguous NFA. -* A [`contiguous::NFA`] is uses a single allocation to represent all states, -while still encoding most states as sparse states but permitting states near -the starting state to have a dense representation. The dense representation -uses more memory, but permits computing transitions during a search more -quickly. By only making the most active states dense (the states near the -starting state), a contiguous NFA better balances memory usage with search -speed. The single contiguous allocation also uses less overhead per state and -enables compression tricks where most states only use 8 bytes of heap memory. - -When given the choice between these two, you almost always want to pick a -contiguous NFA. It takes only a little longer to build, but both its memory -usage and search speed are typically much better than a noncontiguous NFA. A -noncontiguous NFA is useful when prioritizing build times, or when there are -so many patterns that a contiguous NFA could not be built. (Currently, because -of both memory and search speed improvements, a contiguous NFA has a smaller -internal limit on the total number of NFA states it can represent. But you -would likely need to have hundreds of thousands or even millions of patterns -before you hit this limit.) -*/ -pub mod contiguous; -pub mod noncontiguous; diff --git a/vendor/aho-corasick/src/nfa/noncontiguous.rs b/vendor/aho-corasick/src/nfa/noncontiguous.rs deleted file mode 100644 index af32617c900745..00000000000000 --- a/vendor/aho-corasick/src/nfa/noncontiguous.rs +++ /dev/null @@ -1,1762 +0,0 @@ -/*! -Provides a noncontiguous NFA implementation of Aho-Corasick. - -This is a low-level API that generally only needs to be used in niche -circumstances. When possible, prefer using [`AhoCorasick`](crate::AhoCorasick) -instead of a noncontiguous NFA directly. Using an `NFA` directly is typically -only necessary when one needs access to the [`Automaton`] trait implementation. -*/ - -use alloc::{ - collections::{BTreeSet, VecDeque}, - vec, - vec::Vec, -}; - -use crate::{ - automaton::Automaton, - util::{ - alphabet::{ByteClassSet, ByteClasses}, - error::{BuildError, MatchError}, - prefilter::{self, opposite_ascii_case, Prefilter}, - primitives::{IteratorIndexExt, PatternID, SmallIndex, StateID}, - remapper::Remapper, - search::{Anchored, MatchKind}, - special::Special, - }, -}; - -/// A noncontiguous NFA implementation of Aho-Corasick. -/// -/// When possible, prefer using [`AhoCorasick`](crate::AhoCorasick) instead of -/// this type directly. Using an `NFA` directly is typically only necessary -/// when one needs access to the [`Automaton`] trait implementation. -/// -/// This NFA represents the "core" implementation of Aho-Corasick in this -/// crate. Namely, constructing this NFA involving building a trie and then -/// filling in the failure transitions between states, similar to what is -/// described in any standard textbook description of Aho-Corasick. -/// -/// In order to minimize heap usage and to avoid additional construction costs, -/// this implementation represents the transitions of all states as distinct -/// sparse memory allocations. This is where it gets its name from. That is, -/// this NFA has no contiguous memory allocation for its transition table. Each -/// state gets its own allocation. -/// -/// While the sparse representation keeps memory usage to somewhat reasonable -/// levels, it is still quite large and also results in somewhat mediocre -/// search performance. For this reason, it is almost always a good idea to -/// use a [`contiguous::NFA`](crate::nfa::contiguous::NFA) instead. It is -/// marginally slower to build, but has higher throughput and can sometimes use -/// an order of magnitude less memory. The main reason to use a noncontiguous -/// NFA is when you need the fastest possible construction time, or when a -/// contiguous NFA does not have the desired capacity. (The total number of NFA -/// states it can have is fewer than a noncontiguous NFA.) -/// -/// # Example -/// -/// This example shows how to build an `NFA` directly and use it to execute -/// [`Automaton::try_find`]: -/// -/// ``` -/// use aho_corasick::{ -/// automaton::Automaton, -/// nfa::noncontiguous::NFA, -/// Input, Match, -/// }; -/// -/// let patterns = &["b", "abc", "abcd"]; -/// let haystack = "abcd"; -/// -/// let nfa = NFA::new(patterns).unwrap(); -/// assert_eq!( -/// Some(Match::must(0, 1..2)), -/// nfa.try_find(&Input::new(haystack))?, -/// ); -/// # Ok::<(), Box<dyn std::error::Error>>(()) -/// ``` -/// -/// It is also possible to implement your own version of `try_find`. See the -/// [`Automaton`] documentation for an example. -#[derive(Clone)] -pub struct NFA { - /// The match semantics built into this NFA. - match_kind: MatchKind, - /// A set of states. Each state defines its own transitions, a fail - /// transition and a set of indices corresponding to matches. - /// - /// The first state is always the fail state, which is used only as a - /// sentinel. Namely, in the final NFA, no transition into the fail state - /// exists. (Well, they do, but they aren't followed. Instead, the state's - /// failure transition is followed.) - /// - /// The second state (index 1) is always the dead state. Dead states are - /// in every automaton, but only used when leftmost-{first,longest} match - /// semantics are enabled. Specifically, they instruct search to stop - /// at specific points in order to report the correct match location. In - /// the standard Aho-Corasick construction, there are no transitions to - /// the dead state. - /// - /// The third state (index 2) is generally intended to be the starting or - /// "root" state. - states: Vec<State>, - /// Transitions stored in a sparse representation via a linked list. - /// - /// Each transition contains three pieces of information: the byte it - /// is defined for, the state it transitions to and a link to the next - /// transition in the same state (or `StateID::ZERO` if it is the last - /// transition). - /// - /// The first transition for each state is determined by `State::sparse`. - /// - /// Note that this contains a complete set of all transitions in this NFA, - /// including states that have a dense representation for transitions. - /// (Adding dense transitions for a state doesn't remove its sparse - /// transitions, since deleting transitions from this particular sparse - /// representation would be fairly expensive.) - sparse: Vec<Transition>, - /// Transitions stored in a dense representation. - /// - /// A state has a row in this table if and only if `State::dense` is - /// not equal to `StateID::ZERO`. When not zero, there are precisely - /// `NFA::byte_classes::alphabet_len()` entries beginning at `State::dense` - /// in this table. - /// - /// Generally a very small minority of states have a dense representation - /// since it uses so much memory. - dense: Vec<StateID>, - /// Matches stored in linked list for each state. - /// - /// Like sparse transitions, each match has a link to the next match in the - /// state. - /// - /// The first match for each state is determined by `State::matches`. - matches: Vec<Match>, - /// The length, in bytes, of each pattern in this NFA. This slice is - /// indexed by `PatternID`. - /// - /// The number of entries in this vector corresponds to the total number of - /// patterns in this automaton. - pattern_lens: Vec<SmallIndex>, - /// A prefilter for quickly skipping to candidate matches, if pertinent. - prefilter: Option<Prefilter>, - /// A set of equivalence classes in terms of bytes. We compute this while - /// building the NFA, but don't use it in the NFA's states. Instead, we - /// use this for building the DFA. We store it on the NFA since it's easy - /// to compute while visiting the patterns. - byte_classes: ByteClasses, - /// The length, in bytes, of the shortest pattern in this automaton. This - /// information is useful for detecting whether an automaton matches the - /// empty string or not. - min_pattern_len: usize, - /// The length, in bytes, of the longest pattern in this automaton. This - /// information is useful for keeping correct buffer sizes when searching - /// on streams. - max_pattern_len: usize, - /// The information required to deduce which states are "special" in this - /// NFA. - /// - /// Since the DEAD and FAIL states are always the first two states and - /// there are only ever two start states (which follow all of the match - /// states), it follows that we can determine whether a state is a fail, - /// dead, match or start with just a few comparisons on the ID itself: - /// - /// is_dead(sid): sid == NFA::DEAD - /// is_fail(sid): sid == NFA::FAIL - /// is_match(sid): NFA::FAIL < sid && sid <= max_match_id - /// is_start(sid): sid == start_unanchored_id || sid == start_anchored_id - /// - /// Note that this only applies to the NFA after it has been constructed. - /// During construction, the start states are the first ones added and the - /// match states are inter-leaved with non-match states. Once all of the - /// states have been added, the states are shuffled such that the above - /// predicates hold. - special: Special, -} - -impl NFA { - /// Create a new Aho-Corasick noncontiguous NFA using the default - /// configuration. - /// - /// Use a [`Builder`] if you want to change the configuration. - pub fn new<I, P>(patterns: I) -> Result<NFA, BuildError> - where - I: IntoIterator<Item = P>, - P: AsRef<[u8]>, - { - NFA::builder().build(patterns) - } - - /// A convenience method for returning a new Aho-Corasick noncontiguous NFA - /// builder. - /// - /// This usually permits one to just import the `NFA` type. - pub fn builder() -> Builder { - Builder::new() - } -} - -impl NFA { - /// The DEAD state is a sentinel state like the FAIL state. The DEAD state - /// instructs any search to stop and return any currently recorded match, - /// or no match otherwise. Generally speaking, it is impossible for an - /// unanchored standard search to enter a DEAD state. But an anchored - /// search can, and so to can a leftmost search. - /// - /// We put DEAD before FAIL so that DEAD is always 0. We repeat this - /// decision across the other Aho-Corasicm automata, so that DEAD - /// states there are always 0 too. It's not that we need all of the - /// implementations to agree, but rather, the contiguous NFA and the DFA - /// use a sort of "premultiplied" state identifier where the only state - /// whose ID is always known and constant is the first state. Subsequent - /// state IDs depend on how much space has already been used in the - /// transition table. - pub(crate) const DEAD: StateID = StateID::new_unchecked(0); - /// The FAIL state mostly just corresponds to the ID of any transition on a - /// state that isn't explicitly defined. When one transitions into the FAIL - /// state, one must follow the previous state's failure transition before - /// doing the next state lookup. In this way, FAIL is more of a sentinel - /// than a state that one actually transitions into. In particular, it is - /// never exposed in the `Automaton` interface. - pub(crate) const FAIL: StateID = StateID::new_unchecked(1); - - /// Returns the equivalence classes of bytes found while constructing - /// this NFA. - /// - /// Note that the NFA doesn't actually make use of these equivalence - /// classes. Instead, these are useful for building the DFA when desired. - pub(crate) fn byte_classes(&self) -> &ByteClasses { - &self.byte_classes - } - - /// Returns a slice containing the length of each pattern in this searcher. - /// It is indexed by `PatternID` and has length `NFA::patterns_len`. - /// - /// This is exposed for convenience when building a contiguous NFA. But it - /// can be reconstructed from the `Automaton` API if necessary. - pub(crate) fn pattern_lens_raw(&self) -> &[SmallIndex] { - &self.pattern_lens - } - - /// Returns a slice of all states in this non-contiguous NFA. - pub(crate) fn states(&self) -> &[State] { - &self.states - } - - /// Returns the underlying "special" state information for this NFA. - pub(crate) fn special(&self) -> &Special { - &self.special - } - - /// Swaps the states at `id1` and `id2`. - /// - /// This does not update the transitions of any state to account for the - /// state swap. - pub(crate) fn swap_states(&mut self, id1: StateID, id2: StateID) { - self.states.swap(id1.as_usize(), id2.as_usize()); - } - - /// Re-maps all state IDs in this NFA according to the `map` function - /// given. - pub(crate) fn remap(&mut self, map: impl Fn(StateID) -> StateID) { - let alphabet_len = self.byte_classes.alphabet_len(); - for state in self.states.iter_mut() { - state.fail = map(state.fail); - let mut link = state.sparse; - while link != StateID::ZERO { - let t = &mut self.sparse[link]; - t.next = map(t.next); - link = t.link; - } - if state.dense != StateID::ZERO { - let start = state.dense.as_usize(); - for next in self.dense[start..][..alphabet_len].iter_mut() { - *next = map(*next); - } - } - } - } - - /// Iterate over all of the transitions for the given state ID. - pub(crate) fn iter_trans( - &self, - sid: StateID, - ) -> impl Iterator<Item = Transition> + '_ { - let mut link = self.states[sid].sparse; - core::iter::from_fn(move || { - if link == StateID::ZERO { - return None; - } - let t = self.sparse[link]; - link = t.link; - Some(t) - }) - } - - /// Iterate over all of the matches for the given state ID. - pub(crate) fn iter_matches( - &self, - sid: StateID, - ) -> impl Iterator<Item = PatternID> + '_ { - let mut link = self.states[sid].matches; - core::iter::from_fn(move || { - if link == StateID::ZERO { - return None; - } - let m = self.matches[link]; - link = m.link; - Some(m.pid) - }) - } - - /// Return the link following the one given. If the one given is the last - /// link for the given state, then return `None`. - /// - /// If no previous link is given, then this returns the first link in the - /// state, if one exists. - /// - /// This is useful for manually iterating over the transitions in a single - /// state without borrowing the NFA. This permits mutating other parts of - /// the NFA during iteration. Namely, one can access the transition pointed - /// to by the link via `self.sparse[link]`. - fn next_link( - &self, - sid: StateID, - prev: Option<StateID>, - ) -> Option<StateID> { - let link = - prev.map_or(self.states[sid].sparse, |p| self.sparse[p].link); - if link == StateID::ZERO { - None - } else { - Some(link) - } - } - - /// Follow the transition for the given byte in the given state. If no such - /// transition exists, then the FAIL state ID is returned. - #[inline(always)] - fn follow_transition(&self, sid: StateID, byte: u8) -> StateID { - let s = &self.states[sid]; - // This is a special case that targets starting states and states - // near a start state. Namely, after the initial trie is constructed, - // we look for states close to the start state to convert to a dense - // representation for their transitions. This winds up using a lot more - // memory per state in exchange for faster transition lookups. But - // since we only do this for a small number of states (by default), the - // memory usage is usually minimal. - // - // This has *massive* benefit when executing searches because the - // unanchored starting state is by far the hottest state and is - // frequently visited. Moreover, the 'for' loop below that works - // decently on an actually sparse state is disastrous on a state that - // is nearly or completely dense. - if s.dense == StateID::ZERO { - self.follow_transition_sparse(sid, byte) - } else { - let class = usize::from(self.byte_classes.get(byte)); - self.dense[s.dense.as_usize() + class] - } - } - - /// Like `follow_transition`, but always uses the sparse representation. - #[inline(always)] - fn follow_transition_sparse(&self, sid: StateID, byte: u8) -> StateID { - for t in self.iter_trans(sid) { - if byte <= t.byte { - if byte == t.byte { - return t.next; - } - break; - } - } - NFA::FAIL - } - - /// Set the transition for the given byte to the state ID given. - /// - /// Note that one should not set transitions to the FAIL state. It is not - /// technically incorrect, but it wastes space. If a transition is not - /// defined, then it is automatically assumed to lead to the FAIL state. - fn add_transition( - &mut self, - prev: StateID, - byte: u8, - next: StateID, - ) -> Result<(), BuildError> { - if self.states[prev].dense != StateID::ZERO { - let dense = self.states[prev].dense; - let class = usize::from(self.byte_classes.get(byte)); - self.dense[dense.as_usize() + class] = next; - } - - let head = self.states[prev].sparse; - if head == StateID::ZERO || byte < self.sparse[head].byte { - let new_link = self.alloc_transition()?; - self.sparse[new_link] = Transition { byte, next, link: head }; - self.states[prev].sparse = new_link; - return Ok(()); - } else if byte == self.sparse[head].byte { - self.sparse[head].next = next; - return Ok(()); - } - - // We handled the only cases where the beginning of the transition - // chain needs to change. At this point, we now know that there is - // at least one entry in the transition chain and the byte for that - // transition is less than the byte for the transition we're adding. - let (mut link_prev, mut link_next) = (head, self.sparse[head].link); - while link_next != StateID::ZERO && byte > self.sparse[link_next].byte - { - link_prev = link_next; - link_next = self.sparse[link_next].link; - } - if link_next == StateID::ZERO || byte < self.sparse[link_next].byte { - let link = self.alloc_transition()?; - self.sparse[link] = Transition { byte, next, link: link_next }; - self.sparse[link_prev].link = link; - } else { - assert_eq!(byte, self.sparse[link_next].byte); - self.sparse[link_next].next = next; - } - Ok(()) - } - - /// This sets every possible transition (all 255 of them) for the given - /// state to the name `next` value. - /// - /// This is useful for efficiently initializing start/dead states. - /// - /// # Panics - /// - /// This requires that the state has no transitions added to it already. - /// If it has any transitions, then this panics. It will also panic if - /// the state has been densified prior to calling this. - fn init_full_state( - &mut self, - prev: StateID, - next: StateID, - ) -> Result<(), BuildError> { - assert_eq!( - StateID::ZERO, - self.states[prev].dense, - "state must not be dense yet" - ); - assert_eq!( - StateID::ZERO, - self.states[prev].sparse, - "state must have zero transitions" - ); - let mut prev_link = StateID::ZERO; - for byte in 0..=255 { - let new_link = self.alloc_transition()?; - self.sparse[new_link] = - Transition { byte, next, link: StateID::ZERO }; - if prev_link == StateID::ZERO { - self.states[prev].sparse = new_link; - } else { - self.sparse[prev_link].link = new_link; - } - prev_link = new_link; - } - Ok(()) - } - - /// Add a match for the given pattern ID to the state for the given ID. - fn add_match( - &mut self, - sid: StateID, - pid: PatternID, - ) -> Result<(), BuildError> { - let head = self.states[sid].matches; - let mut link = head; - while self.matches[link].link != StateID::ZERO { - link = self.matches[link].link; - } - let new_match_link = self.alloc_match()?; - self.matches[new_match_link].pid = pid; - if link == StateID::ZERO { - self.states[sid].matches = new_match_link; - } else { - self.matches[link].link = new_match_link; - } - Ok(()) - } - - /// Copy matches from the `src` state to the `dst` state. This is useful - /// when a match state can be reached via a failure transition. In which - /// case, you'll want to copy the matches (if any) from the state reached - /// by the failure transition to the original state you were at. - fn copy_matches( - &mut self, - src: StateID, - dst: StateID, - ) -> Result<(), BuildError> { - let head_dst = self.states[dst].matches; - let mut link_dst = head_dst; - while self.matches[link_dst].link != StateID::ZERO { - link_dst = self.matches[link_dst].link; - } - let mut link_src = self.states[src].matches; - while link_src != StateID::ZERO { - let new_match_link = - StateID::new(self.matches.len()).map_err(|e| { - BuildError::state_id_overflow( - StateID::MAX.as_u64(), - e.attempted(), - ) - })?; - self.matches.push(Match { - pid: self.matches[link_src].pid, - link: StateID::ZERO, - }); - if link_dst == StateID::ZERO { - self.states[dst].matches = new_match_link; - } else { - self.matches[link_dst].link = new_match_link; - } - - link_dst = new_match_link; - link_src = self.matches[link_src].link; - } - Ok(()) - } - - /// Create a new entry in `NFA::trans`, if there's room, and return that - /// entry's ID. If there's no room, then an error is returned. - fn alloc_transition(&mut self) -> Result<StateID, BuildError> { - let id = StateID::new(self.sparse.len()).map_err(|e| { - BuildError::state_id_overflow(StateID::MAX.as_u64(), e.attempted()) - })?; - self.sparse.push(Transition::default()); - Ok(id) - } - - /// Create a new entry in `NFA::matches`, if there's room, and return that - /// entry's ID. If there's no room, then an error is returned. - fn alloc_match(&mut self) -> Result<StateID, BuildError> { - let id = StateID::new(self.matches.len()).map_err(|e| { - BuildError::state_id_overflow(StateID::MAX.as_u64(), e.attempted()) - })?; - self.matches.push(Match::default()); - Ok(id) - } - - /// Create a new set of `N` transitions in this NFA's dense transition - /// table. The ID return corresponds to the index at which the `N` - /// transitions begin. So `id+0` is the first transition and `id+(N-1)` is - /// the last. - /// - /// `N` is determined via `NFA::byte_classes::alphabet_len`. - fn alloc_dense_state(&mut self) -> Result<StateID, BuildError> { - let id = StateID::new(self.dense.len()).map_err(|e| { - BuildError::state_id_overflow(StateID::MAX.as_u64(), e.attempted()) - })?; - // We use FAIL because it's the correct default. If a state doesn't - // have a transition defined for every possible byte value, then the - // transition function should return NFA::FAIL. - self.dense.extend( - core::iter::repeat(NFA::FAIL) - .take(self.byte_classes.alphabet_len()), - ); - Ok(id) - } - - /// Allocate and add a fresh state to the underlying NFA and return its - /// ID (guaranteed to be one more than the ID of the previously allocated - /// state). If the ID would overflow `StateID`, then this returns an error. - fn alloc_state(&mut self, depth: usize) -> Result<StateID, BuildError> { - // This is OK because we error when building the trie if we see a - // pattern whose length cannot fit into a 'SmallIndex', and the longest - // possible depth corresponds to the length of the longest pattern. - let depth = SmallIndex::new(depth) - .expect("patterns longer than SmallIndex::MAX are not allowed"); - let id = StateID::new(self.states.len()).map_err(|e| { - BuildError::state_id_overflow(StateID::MAX.as_u64(), e.attempted()) - })?; - self.states.push(State { - sparse: StateID::ZERO, - dense: StateID::ZERO, - matches: StateID::ZERO, - fail: self.special.start_unanchored_id, - depth, - }); - Ok(id) - } -} - -// SAFETY: 'start_state' always returns a valid state ID, 'next_state' always -// returns a valid state ID given a valid state ID. We otherwise claim that -// all other methods are correct as well. -unsafe impl Automaton for NFA { - #[inline(always)] - fn start_state(&self, anchored: Anchored) -> Result<StateID, MatchError> { - match anchored { - Anchored::No => Ok(self.special.start_unanchored_id), - Anchored::Yes => Ok(self.special.start_anchored_id), - } - } - - #[inline(always)] - fn next_state( - &self, - anchored: Anchored, - mut sid: StateID, - byte: u8, - ) -> StateID { - // This terminates since: - // - // 1. state.fail never points to the FAIL state. - // 2. All state.fail values point to a state closer to the start state. - // 3. The start state has no transitions to the FAIL state. - loop { - let next = self.follow_transition(sid, byte); - if next != NFA::FAIL { - return next; - } - // For an anchored search, we never follow failure transitions - // because failure transitions lead us down a path to matching - // a *proper* suffix of the path we were on. Thus, it can only - // produce matches that appear after the beginning of the search. - if anchored.is_anchored() { - return NFA::DEAD; - } - sid = self.states[sid].fail(); - } - } - - #[inline(always)] - fn is_special(&self, sid: StateID) -> bool { - sid <= self.special.max_special_id - } - - #[inline(always)] - fn is_dead(&self, sid: StateID) -> bool { - sid == NFA::DEAD - } - - #[inline(always)] - fn is_match(&self, sid: StateID) -> bool { - // N.B. This returns true when sid==NFA::FAIL but that's okay because - // NFA::FAIL is not actually a valid state ID from the perspective of - // the Automaton trait. Namely, it is never returned by 'start_state' - // or by 'next_state'. So we don't need to care about it here. - !self.is_dead(sid) && sid <= self.special.max_match_id - } - - #[inline(always)] - fn is_start(&self, sid: StateID) -> bool { - sid == self.special.start_unanchored_id - || sid == self.special.start_anchored_id - } - - #[inline(always)] - fn match_kind(&self) -> MatchKind { - self.match_kind - } - - #[inline(always)] - fn patterns_len(&self) -> usize { - self.pattern_lens.len() - } - - #[inline(always)] - fn pattern_len(&self, pid: PatternID) -> usize { - self.pattern_lens[pid].as_usize() - } - - #[inline(always)] - fn min_pattern_len(&self) -> usize { - self.min_pattern_len - } - - #[inline(always)] - fn max_pattern_len(&self) -> usize { - self.max_pattern_len - } - - #[inline(always)] - fn match_len(&self, sid: StateID) -> usize { - self.iter_matches(sid).count() - } - - #[inline(always)] - fn match_pattern(&self, sid: StateID, index: usize) -> PatternID { - self.iter_matches(sid).nth(index).unwrap() - } - - #[inline(always)] - fn memory_usage(&self) -> usize { - self.states.len() * core::mem::size_of::<State>() - + self.sparse.len() * core::mem::size_of::<Transition>() - + self.matches.len() * core::mem::size_of::<Match>() - + self.dense.len() * StateID::SIZE - + self.pattern_lens.len() * SmallIndex::SIZE - + self.prefilter.as_ref().map_or(0, |p| p.memory_usage()) - } - - #[inline(always)] - fn prefilter(&self) -> Option<&Prefilter> { - self.prefilter.as_ref() - } -} - -/// A representation of a sparse NFA state for an Aho-Corasick automaton. -/// -/// It contains the transitions to the next state, a failure transition for -/// cases where there exists no other transition for the current input byte -/// and the matches implied by visiting this state (if any). -#[derive(Clone, Debug)] -pub(crate) struct State { - /// A pointer to `NFA::trans` corresponding to the head of a linked list - /// containing all of the transitions for this state. - /// - /// This is `StateID::ZERO` if and only if this state has zero transitions. - sparse: StateID, - /// A pointer to a row of `N` transitions in `NFA::dense`. These - /// transitions correspond precisely to what is obtained by traversing - /// `sparse`, but permits constant time lookup. - /// - /// When this is zero (which is true for most states in the default - /// configuration), then this state has no dense representation. - /// - /// Note that `N` is equal to `NFA::byte_classes::alphabet_len()`. This is - /// typically much less than 256 (the maximum value). - dense: StateID, - /// A pointer to `NFA::matches` corresponding to the head of a linked list - /// containing all of the matches for this state. - /// - /// This is `StateID::ZERO` if and only if this state is not a match state. - matches: StateID, - /// The state that should be transitioned to if the current byte in the - /// haystack does not have a corresponding transition defined in this - /// state. - fail: StateID, - /// The depth of this state. Specifically, this is the distance from this - /// state to the starting state. (For the special sentinel states DEAD and - /// FAIL, their depth is always 0.) The depth of a starting state is 0. - /// - /// Note that depth is currently not used in this non-contiguous NFA. It - /// may in the future, but it is used in the contiguous NFA. Namely, it - /// permits an optimization where states near the starting state have their - /// transitions stored in a dense fashion, but all other states have their - /// transitions stored in a sparse fashion. (This non-contiguous NFA uses - /// a sparse representation for all states unconditionally.) In any case, - /// this is really the only convenient place to compute and store this - /// information, which we need when building the contiguous NFA. - depth: SmallIndex, -} - -impl State { - /// Return true if and only if this state is a match state. - pub(crate) fn is_match(&self) -> bool { - self.matches != StateID::ZERO - } - - /// Returns the failure transition for this state. - pub(crate) fn fail(&self) -> StateID { - self.fail - } - - /// Returns the depth of this state. That is, the number of transitions - /// this state is from the start state of the NFA. - pub(crate) fn depth(&self) -> SmallIndex { - self.depth - } -} - -/// A single transition in a non-contiguous NFA. -#[derive(Clone, Copy, Default)] -#[repr(packed)] -pub(crate) struct Transition { - byte: u8, - next: StateID, - link: StateID, -} - -impl Transition { - /// Return the byte for which this transition is defined. - pub(crate) fn byte(&self) -> u8 { - self.byte - } - - /// Return the ID of the state that this transition points to. - pub(crate) fn next(&self) -> StateID { - self.next - } - - /// Return the ID of the next transition. - fn link(&self) -> StateID { - self.link - } -} - -impl core::fmt::Debug for Transition { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!( - f, - "Transition(byte: {:X?}, next: {:?}, link: {:?})", - self.byte, - self.next().as_usize(), - self.link().as_usize() - ) - } -} - -/// A single match in a non-contiguous NFA. -#[derive(Clone, Copy, Default)] -struct Match { - pid: PatternID, - link: StateID, -} - -impl Match { - /// Return the pattern ID for this match. - pub(crate) fn pattern(&self) -> PatternID { - self.pid - } - - /// Return the ID of the next match. - fn link(&self) -> StateID { - self.link - } -} - -impl core::fmt::Debug for Match { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!( - f, - "Match(pid: {:?}, link: {:?})", - self.pattern().as_usize(), - self.link().as_usize() - ) - } -} - -/// A builder for configuring an Aho-Corasick noncontiguous NFA. -/// -/// This builder has a subset of the options available to a -/// [`AhoCorasickBuilder`](crate::AhoCorasickBuilder). Of the shared options, -/// their behavior is identical. -#[derive(Clone, Debug)] -pub struct Builder { - match_kind: MatchKind, - prefilter: bool, - ascii_case_insensitive: bool, - dense_depth: usize, -} - -impl Default for Builder { - fn default() -> Builder { - Builder { - match_kind: MatchKind::default(), - prefilter: true, - ascii_case_insensitive: false, - dense_depth: 3, - } - } -} - -impl Builder { - /// Create a new builder for configuring an Aho-Corasick noncontiguous NFA. - pub fn new() -> Builder { - Builder::default() - } - - /// Build an Aho-Corasick noncontiguous NFA from the given iterator of - /// patterns. - /// - /// A builder may be reused to create more NFAs. - pub fn build<I, P>(&self, patterns: I) -> Result<NFA, BuildError> - where - I: IntoIterator<Item = P>, - P: AsRef<[u8]>, - { - debug!("building non-contiguous NFA"); - let nfa = Compiler::new(self)?.compile(patterns)?; - debug!( - "non-contiguous NFA built, <states: {:?}, size: {:?}>", - nfa.states.len(), - nfa.memory_usage() - ); - Ok(nfa) - } - - /// Set the desired match semantics. - /// - /// See - /// [`AhoCorasickBuilder::match_kind`](crate::AhoCorasickBuilder::match_kind) - /// for more documentation and examples. - pub fn match_kind(&mut self, kind: MatchKind) -> &mut Builder { - self.match_kind = kind; - self - } - - /// Enable ASCII-aware case insensitive matching. - /// - /// See - /// [`AhoCorasickBuilder::ascii_case_insensitive`](crate::AhoCorasickBuilder::ascii_case_insensitive) - /// for more documentation and examples. - pub fn ascii_case_insensitive(&mut self, yes: bool) -> &mut Builder { - self.ascii_case_insensitive = yes; - self - } - - /// Set the limit on how many states use a dense representation for their - /// transitions. Other states will generally use a sparse representation. - /// - /// See - /// [`AhoCorasickBuilder::dense_depth`](crate::AhoCorasickBuilder::dense_depth) - /// for more documentation and examples. - pub fn dense_depth(&mut self, depth: usize) -> &mut Builder { - self.dense_depth = depth; - self - } - - /// Enable heuristic prefilter optimizations. - /// - /// See - /// [`AhoCorasickBuilder::prefilter`](crate::AhoCorasickBuilder::prefilter) - /// for more documentation and examples. - pub fn prefilter(&mut self, yes: bool) -> &mut Builder { - self.prefilter = yes; - self - } -} - -/// A compiler uses a builder configuration and builds up the NFA formulation -/// of an Aho-Corasick automaton. This roughly corresponds to the standard -/// formulation described in textbooks, with some tweaks to support leftmost -/// searching. -#[derive(Debug)] -struct Compiler<'a> { - builder: &'a Builder, - prefilter: prefilter::Builder, - nfa: NFA, - byteset: ByteClassSet, -} - -impl<'a> Compiler<'a> { - fn new(builder: &'a Builder) -> Result<Compiler<'a>, BuildError> { - let prefilter = prefilter::Builder::new(builder.match_kind) - .ascii_case_insensitive(builder.ascii_case_insensitive); - Ok(Compiler { - builder, - prefilter, - nfa: NFA { - match_kind: builder.match_kind, - states: vec![], - sparse: vec![], - dense: vec![], - matches: vec![], - pattern_lens: vec![], - prefilter: None, - byte_classes: ByteClasses::singletons(), - min_pattern_len: usize::MAX, - max_pattern_len: 0, - special: Special::zero(), - }, - byteset: ByteClassSet::empty(), - }) - } - - fn compile<I, P>(mut self, patterns: I) -> Result<NFA, BuildError> - where - I: IntoIterator<Item = P>, - P: AsRef<[u8]>, - { - // Add dummy transition/match links, so that no valid link will point - // to another link at index 0. - self.nfa.sparse.push(Transition::default()); - self.nfa.matches.push(Match::default()); - // Add a dummy dense transition so that no states can have dense==0 - // represent a valid pointer to dense transitions. This permits - // dense==0 to be a sentinel indicating "no dense transitions." - self.nfa.dense.push(NFA::DEAD); - // the dead state, only used for leftmost and fixed to id==0 - self.nfa.alloc_state(0)?; - // the fail state, which is never entered and fixed to id==1 - self.nfa.alloc_state(0)?; - // unanchored start state, initially fixed to id==2 but later shuffled - // to appear after all non-start match states. - self.nfa.special.start_unanchored_id = self.nfa.alloc_state(0)?; - // anchored start state, initially fixed to id==3 but later shuffled - // to appear after unanchored start state. - self.nfa.special.start_anchored_id = self.nfa.alloc_state(0)?; - // Initialize the unanchored starting state in order to make it dense, - // and thus make transition lookups on this state faster. - self.init_unanchored_start_state()?; - // Set all transitions on the DEAD state to point to itself. This way, - // the DEAD state can never be escaped. It MUST be used as a sentinel - // in any correct search. - self.add_dead_state_loop()?; - // Build the base trie from the given patterns. - self.build_trie(patterns)?; - self.nfa.states.shrink_to_fit(); - // Turn our set of bytes into equivalent classes. This NFA - // implementation uses byte classes only for states that use a dense - // representation of transitions. (And that's why this comes before - // `self.densify()`, as the byte classes need to be set first.) - self.nfa.byte_classes = self.byteset.byte_classes(); - // Add transitions (and maybe matches) to the anchored starting state. - // The anchored starting state is used for anchored searches. The only - // mechanical difference between it and the unanchored start state is - // that missing transitions map to the DEAD state instead of the FAIL - // state. - self.set_anchored_start_state()?; - // Rewrite transitions to the FAIL state on the unanchored start state - // as self-transitions. This keeps the start state active at all times. - self.add_unanchored_start_state_loop(); - // Make some (possibly zero) states use a dense representation for - // transitions. It's important to do this right after the states - // and non-failure transitions are solidified. That way, subsequent - // accesses (particularly `fill_failure_transitions`) will benefit from - // the faster transition lookup in densified states. - self.densify()?; - // The meat of the Aho-Corasick algorithm: compute and write failure - // transitions. i.e., the state to move to when a transition isn't - // defined in the current state. These are epsilon transitions and thus - // make this formulation an NFA. - self.fill_failure_transitions()?; - // Handle a special case under leftmost semantics when at least one - // of the patterns is the empty string. - self.close_start_state_loop_for_leftmost(); - // Shuffle states so that we have DEAD, FAIL, MATCH, ..., START, START, - // NON-MATCH, ... This permits us to very quickly query the type of - // the state we're currently in during a search. - self.shuffle(); - self.nfa.prefilter = self.prefilter.build(); - // Store the maximum ID of all *relevant* special states. Start states - // are only relevant when we have a prefilter, otherwise, there is zero - // reason to care about whether a state is a start state or not during - // a search. Indeed, without a prefilter, we are careful to explicitly - // NOT care about start states, otherwise the search can ping pong - // between the unrolled loop and the handling of special-status states - // and destroy perf. - self.nfa.special.max_special_id = if self.nfa.prefilter.is_some() { - // Why the anchored starting state? Because we always put it - // after the unanchored starting state and it is therefore the - // maximum. Why put unanchored followed by anchored? No particular - // reason, but that's how the states are logically organized in the - // Thompson NFA implementation found in regex-automata. ¯\_(ツ)_/¯ - self.nfa.special.start_anchored_id - } else { - self.nfa.special.max_match_id - }; - self.nfa.sparse.shrink_to_fit(); - self.nfa.dense.shrink_to_fit(); - self.nfa.matches.shrink_to_fit(); - self.nfa.pattern_lens.shrink_to_fit(); - Ok(self.nfa) - } - - /// This sets up the initial prefix trie that makes up the Aho-Corasick - /// automaton. Effectively, it creates the basic structure of the - /// automaton, where every pattern given has a path from the start state to - /// the end of the pattern. - fn build_trie<I, P>(&mut self, patterns: I) -> Result<(), BuildError> - where - I: IntoIterator<Item = P>, - P: AsRef<[u8]>, - { - 'PATTERNS: for (i, pat) in patterns.into_iter().enumerate() { - let pid = PatternID::new(i).map_err(|e| { - BuildError::pattern_id_overflow( - PatternID::MAX.as_u64(), - e.attempted(), - ) - })?; - let pat = pat.as_ref(); - let patlen = SmallIndex::new(pat.len()) - .map_err(|_| BuildError::pattern_too_long(pid, pat.len()))?; - self.nfa.min_pattern_len = - core::cmp::min(self.nfa.min_pattern_len, pat.len()); - self.nfa.max_pattern_len = - core::cmp::max(self.nfa.max_pattern_len, pat.len()); - assert_eq!( - i, - self.nfa.pattern_lens.len(), - "expected number of patterns to match pattern ID" - ); - self.nfa.pattern_lens.push(patlen); - // We add the pattern to the prefilter here because the pattern - // ID in the prefilter is determined with respect to the patterns - // added to the prefilter. That is, it isn't the ID we have here, - // but the one determined by its own accounting of patterns. - // To ensure they line up, we add every pattern we see to the - // prefilter, even if some patterns ultimately are impossible to - // match (in leftmost-first semantics specifically). - // - // Another way of doing this would be to expose an API in the - // prefilter to permit setting your own pattern IDs. Or to just use - // our own map and go between them. But this case is sufficiently - // rare that we don't bother and just make sure they're in sync. - if self.builder.prefilter { - self.prefilter.add(pat); - } - - let mut prev = self.nfa.special.start_unanchored_id; - let mut saw_match = false; - for (depth, &b) in pat.iter().enumerate() { - // When leftmost-first match semantics are requested, we - // specifically stop adding patterns when a previously added - // pattern is a prefix of it. We avoid adding it because - // leftmost-first semantics imply that the pattern can never - // match. This is not just an optimization to save space! It - // is necessary for correctness. In fact, this is the only - // difference in the automaton between the implementations for - // leftmost-first and leftmost-longest. - saw_match = saw_match || self.nfa.states[prev].is_match(); - if self.builder.match_kind.is_leftmost_first() && saw_match { - // Skip to the next pattern immediately. This avoids - // incorrectly adding a match after this loop terminates. - continue 'PATTERNS; - } - - // Add this byte to our equivalence classes. These don't - // get used while building the trie, but other Aho-Corasick - // implementations may use them. - self.byteset.set_range(b, b); - if self.builder.ascii_case_insensitive { - let b = opposite_ascii_case(b); - self.byteset.set_range(b, b); - } - - // If the transition from prev using the current byte already - // exists, then just move through it. Otherwise, add a new - // state. We track the depth here so that we can determine - // how to represent transitions. States near the start state - // use a dense representation that uses more memory but is - // faster. Other states use a sparse representation that uses - // less memory but is slower. - let next = self.nfa.follow_transition(prev, b); - if next != NFA::FAIL { - prev = next; - } else { - let next = self.nfa.alloc_state(depth)?; - self.nfa.add_transition(prev, b, next)?; - if self.builder.ascii_case_insensitive { - let b = opposite_ascii_case(b); - self.nfa.add_transition(prev, b, next)?; - } - prev = next; - } - } - // Once the pattern has been added, log the match in the final - // state that it reached. - self.nfa.add_match(prev, pid)?; - } - Ok(()) - } - - /// This routine creates failure transitions according to the standard - /// textbook formulation of the Aho-Corasick algorithm, with a couple small - /// tweaks to support "leftmost" semantics. - /// - /// Building failure transitions is the most interesting part of building - /// the Aho-Corasick automaton, because they are what allow searches to - /// be performed in linear time. Specifically, a failure transition is - /// a single transition associated with each state that points back to - /// the longest proper suffix of the pattern being searched. The failure - /// transition is followed whenever there exists no transition on the - /// current state for the current input byte. If there is no other proper - /// suffix, then the failure transition points back to the starting state. - /// - /// For example, let's say we built an Aho-Corasick automaton with the - /// following patterns: 'abcd' and 'cef'. The trie looks like this: - /// - /// ```ignore - /// a - S1 - b - S2 - c - S3 - d - S4* - /// / - /// S0 - c - S5 - e - S6 - f - S7* - /// ``` - /// - /// At this point, it should be fairly straight-forward to see how this - /// trie can be used in a simplistic way. At any given position in the - /// text we're searching (called the "subject" string), all we need to do - /// is follow the transitions in the trie by consuming one transition for - /// each byte in the subject string. If we reach a match state, then we can - /// report that location as a match. - /// - /// The trick comes when searching a subject string like 'abcef'. We'll - /// initially follow the transition from S0 to S1 and wind up in S3 after - /// observng the 'c' byte. At this point, the next byte is 'e' but state - /// S3 has no transition for 'e', so the search fails. We then would need - /// to restart the search at the next position in 'abcef', which - /// corresponds to 'b'. The match would fail, but the next search starting - /// at 'c' would finally succeed. The problem with this approach is that - /// we wind up searching the subject string potentially many times. In - /// effect, this makes the algorithm have worst case `O(n * m)` complexity, - /// where `n ~ len(subject)` and `m ~ len(all patterns)`. We would instead - /// like to achieve a `O(n + m)` worst case complexity. - /// - /// This is where failure transitions come in. Instead of dying at S3 in - /// the first search, the automaton can instruct the search to move to - /// another part of the automaton that corresponds to a suffix of what - /// we've seen so far. Recall that we've seen 'abc' in the subject string, - /// and the automaton does indeed have a non-empty suffix, 'c', that could - /// potentially lead to another match. Thus, the actual Aho-Corasick - /// automaton for our patterns in this case looks like this: - /// - /// ```ignore - /// a - S1 - b - S2 - c - S3 - d - S4* - /// / / - /// / ---------------- - /// / / - /// S0 - c - S5 - e - S6 - f - S7* - /// ``` - /// - /// That is, we have a failure transition from S3 to S5, which is followed - /// exactly in cases when we are in state S3 but see any byte other than - /// 'd' (that is, we've "failed" to find a match in this portion of our - /// trie). We know we can transition back to S5 because we've already seen - /// a 'c' byte, so we don't need to re-scan it. We can then pick back up - /// with the search starting at S5 and complete our match. - /// - /// Adding failure transitions to a trie is fairly simple, but subtle. The - /// key issue is that you might have multiple failure transition that you - /// need to follow. For example, look at the trie for the patterns - /// 'abcd', 'b', 'bcd' and 'cd': - /// - /// ```ignore - /// - a - S1 - b - S2* - c - S3 - d - S4* - /// / / / - /// / ------- ------- - /// / / / - /// S0 --- b - S5* - c - S6 - d - S7* - /// \ / - /// \ -------- - /// \ / - /// - c - S8 - d - S9* - /// ``` - /// - /// The failure transitions for this trie are defined from S2 to S5, - /// S3 to S6 and S6 to S8. Moreover, state S2 needs to track that it - /// corresponds to a match, since its failure transition to S5 is itself - /// a match state. - /// - /// Perhaps simplest way to think about adding these failure transitions - /// is recursively. That is, if you know the failure transitions for every - /// possible previous state that could be visited (e.g., when computing the - /// failure transition for S3, you already know the failure transitions - /// for S0, S1 and S2), then you can simply follow the failure transition - /// of the previous state and check whether the incoming transition is - /// defined after following the failure transition. - /// - /// For example, when determining the failure state for S3, by our - /// assumptions, we already know that there is a failure transition from - /// S2 (the previous state) to S5. So we follow that transition and check - /// whether the transition connecting S2 to S3 is defined. Indeed, it is, - /// as there is a transition from S5 to S6 for the byte 'c'. If no such - /// transition existed, we could keep following the failure transitions - /// until we reach the start state, which is the failure transition for - /// every state that has no corresponding proper suffix. - /// - /// We don't actually use recursion to implement this, but instead, use a - /// breadth first search of the automaton. Our base case is the start - /// state, whose failure transition is just a transition to itself. - /// - /// When building a leftmost automaton, we proceed as above, but only - /// include a subset of failure transitions. Namely, we omit any failure - /// transitions that appear after a match state in the trie. This is - /// because failure transitions always point back to a proper suffix of - /// what has been seen so far. Thus, following a failure transition after - /// a match implies looking for a match that starts after the one that has - /// already been seen, which is of course therefore not the leftmost match. - /// - /// N.B. I came up with this algorithm on my own, and after scouring all of - /// the other AC implementations I know of (Perl, Snort, many on GitHub). - /// I couldn't find any that implement leftmost semantics like this. - /// Perl of course needs leftmost-first semantics, but they implement it - /// with a seeming hack at *search* time instead of encoding it into the - /// automaton. There are also a couple Java libraries that support leftmost - /// longest semantics, but they do it by building a queue of matches at - /// search time, which is even worse than what Perl is doing. ---AG - fn fill_failure_transitions(&mut self) -> Result<(), BuildError> { - let is_leftmost = self.builder.match_kind.is_leftmost(); - let start_uid = self.nfa.special.start_unanchored_id; - // Initialize the queue for breadth first search with all transitions - // out of the start state. We handle the start state specially because - // we only want to follow non-self transitions. If we followed self - // transitions, then this would never terminate. - let mut queue = VecDeque::new(); - let mut seen = self.queued_set(); - let mut prev_link = None; - while let Some(link) = self.nfa.next_link(start_uid, prev_link) { - prev_link = Some(link); - let t = self.nfa.sparse[link]; - - // Skip anything we've seen before and any self-transitions on the - // start state. - if start_uid == t.next() || seen.contains(t.next) { - continue; - } - queue.push_back(t.next); - seen.insert(t.next); - // Under leftmost semantics, if a state immediately following - // the start state is a match state, then we never want to - // follow its failure transition since the failure transition - // necessarily leads back to the start state, which we never - // want to do for leftmost matching after a match has been - // found. - // - // We apply the same logic to non-start states below as well. - if is_leftmost && self.nfa.states[t.next].is_match() { - self.nfa.states[t.next].fail = NFA::DEAD; - } - } - while let Some(id) = queue.pop_front() { - let mut prev_link = None; - while let Some(link) = self.nfa.next_link(id, prev_link) { - prev_link = Some(link); - let t = self.nfa.sparse[link]; - - if seen.contains(t.next) { - // The only way to visit a duplicate state in a transition - // list is when ASCII case insensitivity is enabled. In - // this case, we want to skip it since it's redundant work. - // But it would also end up duplicating matches, which - // results in reporting duplicate matches in some cases. - // See the 'acasei010' regression test. - continue; - } - queue.push_back(t.next); - seen.insert(t.next); - - // As above for start states, under leftmost semantics, once - // we see a match all subsequent states should have no failure - // transitions because failure transitions always imply looking - // for a match that is a suffix of what has been seen so far - // (where "seen so far" corresponds to the string formed by - // following the transitions from the start state to the - // current state). Under leftmost semantics, we specifically do - // not want to allow this to happen because we always want to - // report the match found at the leftmost position. - // - // The difference between leftmost-first and leftmost-longest - // occurs previously while we build the trie. For - // leftmost-first, we simply omit any entries that would - // otherwise require passing through a match state. - // - // Note that for correctness, the failure transition has to be - // set to the dead state for ALL states following a match, not - // just the match state itself. However, by setting the failure - // transition to the dead state on all match states, the dead - // state will automatically propagate to all subsequent states - // via the failure state computation below. - if is_leftmost && self.nfa.states[t.next].is_match() { - self.nfa.states[t.next].fail = NFA::DEAD; - continue; - } - let mut fail = self.nfa.states[id].fail; - while self.nfa.follow_transition(fail, t.byte) == NFA::FAIL { - fail = self.nfa.states[fail].fail; - } - fail = self.nfa.follow_transition(fail, t.byte); - self.nfa.states[t.next].fail = fail; - self.nfa.copy_matches(fail, t.next)?; - } - // If the start state is a match state, then this automaton can - // match the empty string. This implies all states are match states - // since every position matches the empty string, so copy the - // matches from the start state to every state. Strictly speaking, - // this is only necessary for overlapping matches since each - // non-empty non-start match state needs to report empty matches - // in addition to its own. For the non-overlapping case, such - // states only report the first match, which is never empty since - // it isn't a start state. - if !is_leftmost { - self.nfa - .copy_matches(self.nfa.special.start_unanchored_id, id)?; - } - } - Ok(()) - } - - /// Shuffle the states so that they appear in this sequence: - /// - /// DEAD, FAIL, MATCH..., START, START, NON-MATCH... - /// - /// The idea here is that if we know how special states are laid out in our - /// transition table, then we can determine what "kind" of state we're in - /// just by comparing our current state ID with a particular value. In this - /// way, we avoid doing extra memory lookups. - /// - /// Before shuffling begins, our states look something like this: - /// - /// DEAD, FAIL, START, START, (MATCH | NON-MATCH)... - /// - /// So all we need to do is move all of the MATCH states so that they - /// all appear before any NON-MATCH state, like so: - /// - /// DEAD, FAIL, START, START, MATCH... NON-MATCH... - /// - /// Then it's just a simple matter of swapping the two START states with - /// the last two MATCH states. - /// - /// (This is the same technique used for fully compiled DFAs in - /// regex-automata.) - fn shuffle(&mut self) { - let old_start_uid = self.nfa.special.start_unanchored_id; - let old_start_aid = self.nfa.special.start_anchored_id; - assert!(old_start_uid < old_start_aid); - assert_eq!( - 3, - old_start_aid.as_usize(), - "anchored start state should be at index 3" - ); - // We implement shuffling by a sequence of pairwise swaps of states. - // Since we have a number of things referencing states via their - // IDs and swapping them changes their IDs, we need to record every - // swap we make so that we can remap IDs. The remapper handles this - // book-keeping for us. - let mut remapper = Remapper::new(&self.nfa, 0); - // The way we proceed here is by moving all match states so that - // they directly follow the start states. So it will go: DEAD, FAIL, - // START-UNANCHORED, START-ANCHORED, MATCH, ..., NON-MATCH, ... - // - // To do that, we proceed forward through all states after - // START-ANCHORED and swap match states so that they appear before all - // non-match states. - let mut next_avail = StateID::from(4u8); - for i in next_avail.as_usize()..self.nfa.states.len() { - let sid = StateID::new(i).unwrap(); - if !self.nfa.states[sid].is_match() { - continue; - } - remapper.swap(&mut self.nfa, sid, next_avail); - // The key invariant here is that only non-match states exist - // between 'next_avail' and 'sid' (with them being potentially - // equivalent). Thus, incrementing 'next_avail' by 1 is guaranteed - // to land on the leftmost non-match state. (Unless 'next_avail' - // and 'sid' are equivalent, in which case, a swap will occur but - // it is a no-op.) - next_avail = StateID::new(next_avail.one_more()).unwrap(); - } - // Now we'd like to move the start states to immediately following the - // match states. (The start states may themselves be match states, but - // we'll handle that later.) We arrange the states this way so that we - // don't necessarily need to check whether a state is a start state or - // not before checking whether a state is a match state. For example, - // we'd like to be able to write this as our state machine loop: - // - // sid = start() - // for byte in haystack: - // sid = next(sid, byte) - // if sid <= nfa.max_start_id: - // if sid <= nfa.max_dead_id: - // # search complete - // elif sid <= nfa.max_match_id: - // # found match - // - // The important context here is that we might not want to look for - // start states at all. Namely, if a searcher doesn't have a prefilter, - // then there is no reason to care about whether we're in a start state - // or not. And indeed, if we did check for it, this very hot loop would - // ping pong between the special state handling and the main state - // transition logic. This in turn stalls the CPU by killing branch - // prediction. - // - // So essentially, we really want to be able to "forget" that start - // states even exist and this is why we put them at the end. - let new_start_aid = - StateID::new(next_avail.as_usize().checked_sub(1).unwrap()) - .unwrap(); - remapper.swap(&mut self.nfa, old_start_aid, new_start_aid); - let new_start_uid = - StateID::new(next_avail.as_usize().checked_sub(2).unwrap()) - .unwrap(); - remapper.swap(&mut self.nfa, old_start_uid, new_start_uid); - let new_max_match_id = - StateID::new(next_avail.as_usize().checked_sub(3).unwrap()) - .unwrap(); - self.nfa.special.max_match_id = new_max_match_id; - self.nfa.special.start_unanchored_id = new_start_uid; - self.nfa.special.start_anchored_id = new_start_aid; - // If one start state is a match state, then they both are. - if self.nfa.states[self.nfa.special.start_anchored_id].is_match() { - self.nfa.special.max_match_id = self.nfa.special.start_anchored_id; - } - remapper.remap(&mut self.nfa); - } - - /// Attempts to convert the transition representation of a subset of states - /// in this NFA from sparse to dense. This can greatly improve search - /// performance since states with a higher number of transitions tend to - /// correlate with very active states. - /// - /// We generally only densify states that are close to the start state. - /// These tend to be the most active states and thus benefit from a dense - /// representation more than other states. - /// - /// This tends to best balance between memory usage and performance. In - /// particular, the *vast majority* of all states in a typical Aho-Corasick - /// automaton have only 1 transition and are usually farther from the start - /// state and thus don't get densified. - /// - /// Note that this doesn't remove the sparse representation of transitions - /// for states that are densified. It could be done, but actually removing - /// entries from `NFA::sparse` is likely more expensive than it's worth. - fn densify(&mut self) -> Result<(), BuildError> { - for i in 0..self.nfa.states.len() { - let sid = StateID::new(i).unwrap(); - // Don't bother densifying states that are only used as sentinels. - if sid == NFA::DEAD || sid == NFA::FAIL { - continue; - } - // Only densify states that are "close enough" to the start state. - if self.nfa.states[sid].depth.as_usize() - >= self.builder.dense_depth - { - continue; - } - let dense = self.nfa.alloc_dense_state()?; - let mut prev_link = None; - while let Some(link) = self.nfa.next_link(sid, prev_link) { - prev_link = Some(link); - let t = self.nfa.sparse[link]; - - let class = usize::from(self.nfa.byte_classes.get(t.byte)); - let index = dense.as_usize() + class; - self.nfa.dense[index] = t.next; - } - self.nfa.states[sid].dense = dense; - } - Ok(()) - } - - /// Returns a set that tracked queued states. - /// - /// This is only necessary when ASCII case insensitivity is enabled, since - /// it is the only way to visit the same state twice. Otherwise, this - /// returns an inert set that nevers adds anything and always reports - /// `false` for every member test. - fn queued_set(&self) -> QueuedSet { - if self.builder.ascii_case_insensitive { - QueuedSet::active() - } else { - QueuedSet::inert() - } - } - - /// Initializes the unanchored start state by making it dense. This is - /// achieved by explicitly setting every transition to the FAIL state. - /// This isn't necessary for correctness, since any missing transition is - /// automatically assumed to be mapped to the FAIL state. We do this to - /// make the unanchored starting state dense, and thus in turn make - /// transition lookups on it faster. (Which is worth doing because it's - /// the most active state.) - fn init_unanchored_start_state(&mut self) -> Result<(), BuildError> { - let start_uid = self.nfa.special.start_unanchored_id; - let start_aid = self.nfa.special.start_anchored_id; - self.nfa.init_full_state(start_uid, NFA::FAIL)?; - self.nfa.init_full_state(start_aid, NFA::FAIL)?; - Ok(()) - } - - /// Setup the anchored start state by copying all of the transitions and - /// matches from the unanchored starting state with one change: the failure - /// transition is changed to the DEAD state, so that for any undefined - /// transitions, the search will stop. - fn set_anchored_start_state(&mut self) -> Result<(), BuildError> { - let start_uid = self.nfa.special.start_unanchored_id; - let start_aid = self.nfa.special.start_anchored_id; - let (mut uprev_link, mut aprev_link) = (None, None); - loop { - let unext = self.nfa.next_link(start_uid, uprev_link); - let anext = self.nfa.next_link(start_aid, aprev_link); - let (ulink, alink) = match (unext, anext) { - (Some(ulink), Some(alink)) => (ulink, alink), - (None, None) => break, - _ => unreachable!(), - }; - uprev_link = Some(ulink); - aprev_link = Some(alink); - self.nfa.sparse[alink].next = self.nfa.sparse[ulink].next; - } - self.nfa.copy_matches(start_uid, start_aid)?; - // This is the main difference between the unanchored and anchored - // starting states. If a lookup on an anchored starting state fails, - // then the search should stop. - // - // N.B. This assumes that the loop on the unanchored starting state - // hasn't been created yet. - self.nfa.states[start_aid].fail = NFA::DEAD; - Ok(()) - } - - /// Set the failure transitions on the start state to loop back to the - /// start state. This effectively permits the Aho-Corasick automaton to - /// match at any position. This is also required for finding the next - /// state to terminate, namely, finding the next state should never return - /// a fail_id. - /// - /// This must be done after building the initial trie, since trie - /// construction depends on transitions to `fail_id` to determine whether a - /// state already exists or not. - fn add_unanchored_start_state_loop(&mut self) { - let start_uid = self.nfa.special.start_unanchored_id; - let mut prev_link = None; - while let Some(link) = self.nfa.next_link(start_uid, prev_link) { - prev_link = Some(link); - if self.nfa.sparse[link].next() == NFA::FAIL { - self.nfa.sparse[link].next = start_uid; - } - } - } - - /// Remove the start state loop by rewriting any transitions on the start - /// state back to the start state with transitions to the dead state. - /// - /// The loop is only closed when two conditions are met: the start state - /// is a match state and the match kind is leftmost-first or - /// leftmost-longest. - /// - /// The reason for this is that under leftmost semantics, a start state - /// that is also a match implies that we should never restart the search - /// process. We allow normal transitions out of the start state, but if - /// none exist, we transition to the dead state, which signals that - /// searching should stop. - fn close_start_state_loop_for_leftmost(&mut self) { - let start_uid = self.nfa.special.start_unanchored_id; - let start = &mut self.nfa.states[start_uid]; - let dense = start.dense; - if self.builder.match_kind.is_leftmost() && start.is_match() { - let mut prev_link = None; - while let Some(link) = self.nfa.next_link(start_uid, prev_link) { - prev_link = Some(link); - if self.nfa.sparse[link].next() == start_uid { - self.nfa.sparse[link].next = NFA::DEAD; - if dense != StateID::ZERO { - let b = self.nfa.sparse[link].byte; - let class = usize::from(self.nfa.byte_classes.get(b)); - self.nfa.dense[dense.as_usize() + class] = NFA::DEAD; - } - } - } - } - } - - /// Sets all transitions on the dead state to point back to the dead state. - /// Normally, missing transitions map back to the failure state, but the - /// point of the dead state is to act as a sink that can never be escaped. - fn add_dead_state_loop(&mut self) -> Result<(), BuildError> { - self.nfa.init_full_state(NFA::DEAD, NFA::DEAD)?; - Ok(()) - } -} - -/// A set of state identifiers used to avoid revisiting the same state multiple -/// times when filling in failure transitions. -/// -/// This set has an "inert" and an "active" mode. When inert, the set never -/// stores anything and always returns `false` for every member test. This is -/// useful to avoid the performance and memory overhead of maintaining this -/// set when it is not needed. -#[derive(Debug)] -struct QueuedSet { - set: Option<BTreeSet<StateID>>, -} - -impl QueuedSet { - /// Return an inert set that returns `false` for every state ID membership - /// test. - fn inert() -> QueuedSet { - QueuedSet { set: None } - } - - /// Return an active set that tracks state ID membership. - fn active() -> QueuedSet { - QueuedSet { set: Some(BTreeSet::new()) } - } - - /// Inserts the given state ID into this set. (If the set is inert, then - /// this is a no-op.) - fn insert(&mut self, state_id: StateID) { - if let Some(ref mut set) = self.set { - set.insert(state_id); - } - } - - /// Returns true if and only if the given state ID is in this set. If the - /// set is inert, this always returns false. - fn contains(&self, state_id: StateID) -> bool { - match self.set { - None => false, - Some(ref set) => set.contains(&state_id), - } - } -} - -impl core::fmt::Debug for NFA { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - use crate::{ - automaton::{fmt_state_indicator, sparse_transitions}, - util::debug::DebugByte, - }; - - writeln!(f, "noncontiguous::NFA(")?; - for (sid, state) in self.states.iter().with_state_ids() { - // The FAIL state doesn't actually have space for a state allocated - // for it, so we have to treat it as a special case. - if sid == NFA::FAIL { - writeln!(f, "F {:06}:", sid.as_usize())?; - continue; - } - fmt_state_indicator(f, self, sid)?; - write!( - f, - "{:06}({:06}): ", - sid.as_usize(), - state.fail.as_usize() - )?; - - let it = sparse_transitions( - self.iter_trans(sid).map(|t| (t.byte, t.next)), - ) - .enumerate(); - for (i, (start, end, sid)) in it { - if i > 0 { - write!(f, ", ")?; - } - if start == end { - write!( - f, - "{:?} => {:?}", - DebugByte(start), - sid.as_usize() - )?; - } else { - write!( - f, - "{:?}-{:?} => {:?}", - DebugByte(start), - DebugByte(end), - sid.as_usize() - )?; - } - } - - write!(f, "\n")?; - if self.is_match(sid) { - write!(f, " matches: ")?; - for (i, pid) in self.iter_matches(sid).enumerate() { - if i > 0 { - write!(f, ", ")?; - } - write!(f, "{}", pid.as_usize())?; - } - write!(f, "\n")?; - } - } - writeln!(f, "match kind: {:?}", self.match_kind)?; - writeln!(f, "prefilter: {:?}", self.prefilter.is_some())?; - writeln!(f, "state length: {:?}", self.states.len())?; - writeln!(f, "pattern length: {:?}", self.patterns_len())?; - writeln!(f, "shortest pattern length: {:?}", self.min_pattern_len)?; - writeln!(f, "longest pattern length: {:?}", self.max_pattern_len)?; - writeln!(f, "memory usage: {:?}", self.memory_usage())?; - writeln!(f, ")")?; - Ok(()) - } -} diff --git a/vendor/aho-corasick/src/packed/api.rs b/vendor/aho-corasick/src/packed/api.rs deleted file mode 100644 index 35ebf7e334da30..00000000000000 --- a/vendor/aho-corasick/src/packed/api.rs +++ /dev/null @@ -1,687 +0,0 @@ -use alloc::sync::Arc; - -use crate::{ - packed::{pattern::Patterns, rabinkarp::RabinKarp, teddy}, - util::search::{Match, Span}, -}; - -/// This is a limit placed on the total number of patterns we're willing to try -/// and match at once. As more sophisticated algorithms are added, this number -/// may be increased. -const PATTERN_LIMIT: usize = 128; - -/// A knob for controlling the match semantics of a packed multiple string -/// searcher. -/// -/// This differs from the [`MatchKind`](crate::MatchKind) type in the top-level -/// crate module in that it doesn't support "standard" match semantics, -/// and instead only supports leftmost-first or leftmost-longest. Namely, -/// "standard" semantics cannot be easily supported by packed searchers. -/// -/// For more information on the distinction between leftmost-first and -/// leftmost-longest, see the docs on the top-level `MatchKind` type. -/// -/// Unlike the top-level `MatchKind` type, the default match semantics for this -/// type are leftmost-first. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -#[non_exhaustive] -pub enum MatchKind { - /// Use leftmost-first match semantics, which reports leftmost matches. - /// When there are multiple possible leftmost matches, the match - /// corresponding to the pattern that appeared earlier when constructing - /// the automaton is reported. - /// - /// This is the default. - LeftmostFirst, - /// Use leftmost-longest match semantics, which reports leftmost matches. - /// When there are multiple possible leftmost matches, the longest match - /// is chosen. - LeftmostLongest, -} - -impl Default for MatchKind { - fn default() -> MatchKind { - MatchKind::LeftmostFirst - } -} - -/// The configuration for a packed multiple pattern searcher. -/// -/// The configuration is currently limited only to being able to select the -/// match semantics (leftmost-first or leftmost-longest) of a searcher. In the -/// future, more knobs may be made available. -/// -/// A configuration produces a [`packed::Builder`](Builder), which in turn can -/// be used to construct a [`packed::Searcher`](Searcher) for searching. -/// -/// # Example -/// -/// This example shows how to use leftmost-longest semantics instead of the -/// default (leftmost-first). -/// -/// ``` -/// use aho_corasick::{packed::{Config, MatchKind}, PatternID}; -/// -/// # fn example() -> Option<()> { -/// let searcher = Config::new() -/// .match_kind(MatchKind::LeftmostLongest) -/// .builder() -/// .add("foo") -/// .add("foobar") -/// .build()?; -/// let matches: Vec<PatternID> = searcher -/// .find_iter("foobar") -/// .map(|mat| mat.pattern()) -/// .collect(); -/// assert_eq!(vec![PatternID::must(1)], matches); -/// # Some(()) } -/// # if cfg!(all(feature = "std", any( -/// # target_arch = "x86_64", target_arch = "aarch64", -/// # ))) { -/// # example().unwrap() -/// # } else { -/// # assert!(example().is_none()); -/// # } -/// ``` -#[derive(Clone, Debug)] -pub struct Config { - kind: MatchKind, - force: Option<ForceAlgorithm>, - only_teddy_fat: Option<bool>, - only_teddy_256bit: Option<bool>, - heuristic_pattern_limits: bool, -} - -/// An internal option for forcing the use of a particular packed algorithm. -/// -/// When an algorithm is forced, if a searcher could not be constructed for it, -/// then no searcher will be returned even if an alternative algorithm would -/// work. -#[derive(Clone, Debug)] -enum ForceAlgorithm { - Teddy, - RabinKarp, -} - -impl Default for Config { - fn default() -> Config { - Config::new() - } -} - -impl Config { - /// Create a new default configuration. A default configuration uses - /// leftmost-first match semantics. - pub fn new() -> Config { - Config { - kind: MatchKind::LeftmostFirst, - force: None, - only_teddy_fat: None, - only_teddy_256bit: None, - heuristic_pattern_limits: true, - } - } - - /// Create a packed builder from this configuration. The builder can be - /// used to accumulate patterns and create a [`Searcher`] from them. - pub fn builder(&self) -> Builder { - Builder::from_config(self.clone()) - } - - /// Set the match semantics for this configuration. - pub fn match_kind(&mut self, kind: MatchKind) -> &mut Config { - self.kind = kind; - self - } - - /// An undocumented method for forcing the use of the Teddy algorithm. - /// - /// This is only exposed for more precise testing and benchmarks. Callers - /// should not use it as it is not part of the API stability guarantees of - /// this crate. - #[doc(hidden)] - pub fn only_teddy(&mut self, yes: bool) -> &mut Config { - if yes { - self.force = Some(ForceAlgorithm::Teddy); - } else { - self.force = None; - } - self - } - - /// An undocumented method for forcing the use of the Fat Teddy algorithm. - /// - /// This is only exposed for more precise testing and benchmarks. Callers - /// should not use it as it is not part of the API stability guarantees of - /// this crate. - #[doc(hidden)] - pub fn only_teddy_fat(&mut self, yes: Option<bool>) -> &mut Config { - self.only_teddy_fat = yes; - self - } - - /// An undocumented method for forcing the use of SSE (`Some(false)`) or - /// AVX (`Some(true)`) algorithms. - /// - /// This is only exposed for more precise testing and benchmarks. Callers - /// should not use it as it is not part of the API stability guarantees of - /// this crate. - #[doc(hidden)] - pub fn only_teddy_256bit(&mut self, yes: Option<bool>) -> &mut Config { - self.only_teddy_256bit = yes; - self - } - - /// An undocumented method for forcing the use of the Rabin-Karp algorithm. - /// - /// This is only exposed for more precise testing and benchmarks. Callers - /// should not use it as it is not part of the API stability guarantees of - /// this crate. - #[doc(hidden)] - pub fn only_rabin_karp(&mut self, yes: bool) -> &mut Config { - if yes { - self.force = Some(ForceAlgorithm::RabinKarp); - } else { - self.force = None; - } - self - } - - /// Request that heuristic limitations on the number of patterns be - /// employed. This useful to disable for benchmarking where one wants to - /// explore how Teddy performs on large number of patterns even if the - /// heuristics would otherwise refuse construction. - /// - /// This is enabled by default. - pub fn heuristic_pattern_limits(&mut self, yes: bool) -> &mut Config { - self.heuristic_pattern_limits = yes; - self - } -} - -/// A builder for constructing a packed searcher from a collection of patterns. -/// -/// # Example -/// -/// This example shows how to use a builder to construct a searcher. By -/// default, leftmost-first match semantics are used. -/// -/// ``` -/// use aho_corasick::{packed::{Builder, MatchKind}, PatternID}; -/// -/// # fn example() -> Option<()> { -/// let searcher = Builder::new() -/// .add("foobar") -/// .add("foo") -/// .build()?; -/// let matches: Vec<PatternID> = searcher -/// .find_iter("foobar") -/// .map(|mat| mat.pattern()) -/// .collect(); -/// assert_eq!(vec![PatternID::ZERO], matches); -/// # Some(()) } -/// # if cfg!(all(feature = "std", any( -/// # target_arch = "x86_64", target_arch = "aarch64", -/// # ))) { -/// # example().unwrap() -/// # } else { -/// # assert!(example().is_none()); -/// # } -/// ``` -#[derive(Clone, Debug)] -pub struct Builder { - /// The configuration of this builder and subsequent matcher. - config: Config, - /// Set to true if the builder detects that a matcher cannot be built. - inert: bool, - /// The patterns provided by the caller. - patterns: Patterns, -} - -impl Builder { - /// Create a new builder for constructing a multi-pattern searcher. This - /// constructor uses the default configuration. - pub fn new() -> Builder { - Builder::from_config(Config::new()) - } - - fn from_config(config: Config) -> Builder { - Builder { config, inert: false, patterns: Patterns::new() } - } - - /// Build a searcher from the patterns added to this builder so far. - pub fn build(&self) -> Option<Searcher> { - if self.inert || self.patterns.is_empty() { - return None; - } - let mut patterns = self.patterns.clone(); - patterns.set_match_kind(self.config.kind); - let patterns = Arc::new(patterns); - let rabinkarp = RabinKarp::new(&patterns); - // Effectively, we only want to return a searcher if we can use Teddy, - // since Teddy is our only fast packed searcher at the moment. - // Rabin-Karp is only used when searching haystacks smaller than what - // Teddy can support. Thus, the only way to get a Rabin-Karp searcher - // is to force it using undocumented APIs (for tests/benchmarks). - let (search_kind, minimum_len) = match self.config.force { - None | Some(ForceAlgorithm::Teddy) => { - debug!("trying to build Teddy packed matcher"); - let teddy = match self.build_teddy(Arc::clone(&patterns)) { - None => return None, - Some(teddy) => teddy, - }; - let minimum_len = teddy.minimum_len(); - (SearchKind::Teddy(teddy), minimum_len) - } - Some(ForceAlgorithm::RabinKarp) => { - debug!("using Rabin-Karp packed matcher"); - (SearchKind::RabinKarp, 0) - } - }; - Some(Searcher { patterns, rabinkarp, search_kind, minimum_len }) - } - - fn build_teddy(&self, patterns: Arc<Patterns>) -> Option<teddy::Searcher> { - teddy::Builder::new() - .only_256bit(self.config.only_teddy_256bit) - .only_fat(self.config.only_teddy_fat) - .heuristic_pattern_limits(self.config.heuristic_pattern_limits) - .build(patterns) - } - - /// Add the given pattern to this set to match. - /// - /// The order in which patterns are added is significant. Namely, when - /// using leftmost-first match semantics, then when multiple patterns can - /// match at a particular location, the pattern that was added first is - /// used as the match. - /// - /// If the number of patterns added exceeds the amount supported by packed - /// searchers, then the builder will stop accumulating patterns and render - /// itself inert. At this point, constructing a searcher will always return - /// `None`. - pub fn add<P: AsRef<[u8]>>(&mut self, pattern: P) -> &mut Builder { - if self.inert { - return self; - } else if self.patterns.len() >= PATTERN_LIMIT { - self.inert = true; - self.patterns.reset(); - return self; - } - // Just in case PATTERN_LIMIT increases beyond u16::MAX. - assert!(self.patterns.len() <= core::u16::MAX as usize); - - let pattern = pattern.as_ref(); - if pattern.is_empty() { - self.inert = true; - self.patterns.reset(); - return self; - } - self.patterns.add(pattern); - self - } - - /// Add the given iterator of patterns to this set to match. - /// - /// The iterator must yield elements that can be converted into a `&[u8]`. - /// - /// The order in which patterns are added is significant. Namely, when - /// using leftmost-first match semantics, then when multiple patterns can - /// match at a particular location, the pattern that was added first is - /// used as the match. - /// - /// If the number of patterns added exceeds the amount supported by packed - /// searchers, then the builder will stop accumulating patterns and render - /// itself inert. At this point, constructing a searcher will always return - /// `None`. - pub fn extend<I, P>(&mut self, patterns: I) -> &mut Builder - where - I: IntoIterator<Item = P>, - P: AsRef<[u8]>, - { - for p in patterns { - self.add(p); - } - self - } - - /// Returns the number of patterns added to this builder. - pub fn len(&self) -> usize { - self.patterns.len() - } - - /// Returns the length, in bytes, of the shortest pattern added. - pub fn minimum_len(&self) -> usize { - self.patterns.minimum_len() - } -} - -impl Default for Builder { - fn default() -> Builder { - Builder::new() - } -} - -/// A packed searcher for quickly finding occurrences of multiple patterns. -/// -/// If callers need more flexible construction, or if one wants to change the -/// match semantics (either leftmost-first or leftmost-longest), then one can -/// use the [`Config`] and/or [`Builder`] types for more fine grained control. -/// -/// # Example -/// -/// This example shows how to create a searcher from an iterator of patterns. -/// By default, leftmost-first match semantics are used. -/// -/// ``` -/// use aho_corasick::{packed::{MatchKind, Searcher}, PatternID}; -/// -/// # fn example() -> Option<()> { -/// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; -/// let matches: Vec<PatternID> = searcher -/// .find_iter("foobar") -/// .map(|mat| mat.pattern()) -/// .collect(); -/// assert_eq!(vec![PatternID::ZERO], matches); -/// # Some(()) } -/// # if cfg!(all(feature = "std", any( -/// # target_arch = "x86_64", target_arch = "aarch64", -/// # ))) { -/// # example().unwrap() -/// # } else { -/// # assert!(example().is_none()); -/// # } -/// ``` -#[derive(Clone, Debug)] -pub struct Searcher { - patterns: Arc<Patterns>, - rabinkarp: RabinKarp, - search_kind: SearchKind, - minimum_len: usize, -} - -#[derive(Clone, Debug)] -enum SearchKind { - Teddy(teddy::Searcher), - RabinKarp, -} - -impl Searcher { - /// A convenience function for constructing a searcher from an iterator - /// of things that can be converted to a `&[u8]`. - /// - /// If a searcher could not be constructed (either because of an - /// unsupported CPU or because there are too many patterns), then `None` - /// is returned. - /// - /// # Example - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{packed::{MatchKind, Searcher}, PatternID}; - /// - /// # fn example() -> Option<()> { - /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; - /// let matches: Vec<PatternID> = searcher - /// .find_iter("foobar") - /// .map(|mat| mat.pattern()) - /// .collect(); - /// assert_eq!(vec![PatternID::ZERO], matches); - /// # Some(()) } - /// # if cfg!(all(feature = "std", any( - /// # target_arch = "x86_64", target_arch = "aarch64", - /// # ))) { - /// # example().unwrap() - /// # } else { - /// # assert!(example().is_none()); - /// # } - /// ``` - pub fn new<I, P>(patterns: I) -> Option<Searcher> - where - I: IntoIterator<Item = P>, - P: AsRef<[u8]>, - { - Builder::new().extend(patterns).build() - } - - /// A convenience function for calling `Config::new()`. - /// - /// This is useful for avoiding an additional import. - pub fn config() -> Config { - Config::new() - } - - /// A convenience function for calling `Builder::new()`. - /// - /// This is useful for avoiding an additional import. - pub fn builder() -> Builder { - Builder::new() - } - - /// Return the first occurrence of any of the patterns in this searcher, - /// according to its match semantics, in the given haystack. The `Match` - /// returned will include the identifier of the pattern that matched, which - /// corresponds to the index of the pattern (starting from `0`) in which it - /// was added. - /// - /// # Example - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{packed::{MatchKind, Searcher}, PatternID}; - /// - /// # fn example() -> Option<()> { - /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; - /// let mat = searcher.find("foobar")?; - /// assert_eq!(PatternID::ZERO, mat.pattern()); - /// assert_eq!(0, mat.start()); - /// assert_eq!(6, mat.end()); - /// # Some(()) } - /// # if cfg!(all(feature = "std", any( - /// # target_arch = "x86_64", target_arch = "aarch64", - /// # ))) { - /// # example().unwrap() - /// # } else { - /// # assert!(example().is_none()); - /// # } - /// ``` - #[inline] - pub fn find<B: AsRef<[u8]>>(&self, haystack: B) -> Option<Match> { - let haystack = haystack.as_ref(); - self.find_in(haystack, Span::from(0..haystack.len())) - } - - /// Return the first occurrence of any of the patterns in this searcher, - /// according to its match semantics, in the given haystack starting from - /// the given position. - /// - /// The `Match` returned will include the identifier of the pattern that - /// matched, which corresponds to the index of the pattern (starting from - /// `0`) in which it was added. The offsets in the `Match` will be relative - /// to the start of `haystack` (and not `at`). - /// - /// # Example - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{packed::{MatchKind, Searcher}, PatternID, Span}; - /// - /// # fn example() -> Option<()> { - /// let haystack = "foofoobar"; - /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; - /// let mat = searcher.find_in(haystack, Span::from(3..haystack.len()))?; - /// assert_eq!(PatternID::ZERO, mat.pattern()); - /// assert_eq!(3, mat.start()); - /// assert_eq!(9, mat.end()); - /// # Some(()) } - /// # if cfg!(all(feature = "std", any( - /// # target_arch = "x86_64", target_arch = "aarch64", - /// # ))) { - /// # example().unwrap() - /// # } else { - /// # assert!(example().is_none()); - /// # } - /// ``` - #[inline] - pub fn find_in<B: AsRef<[u8]>>( - &self, - haystack: B, - span: Span, - ) -> Option<Match> { - let haystack = haystack.as_ref(); - match self.search_kind { - SearchKind::Teddy(ref teddy) => { - if haystack[span].len() < teddy.minimum_len() { - return self.find_in_slow(haystack, span); - } - teddy.find(&haystack[..span.end], span.start) - } - SearchKind::RabinKarp => { - self.rabinkarp.find_at(&haystack[..span.end], span.start) - } - } - } - - /// Return an iterator of non-overlapping occurrences of the patterns in - /// this searcher, according to its match semantics, in the given haystack. - /// - /// # Example - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::{packed::{MatchKind, Searcher}, PatternID}; - /// - /// # fn example() -> Option<()> { - /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; - /// let matches: Vec<PatternID> = searcher - /// .find_iter("foobar fooba foofoo") - /// .map(|mat| mat.pattern()) - /// .collect(); - /// assert_eq!(vec![ - /// PatternID::must(0), - /// PatternID::must(1), - /// PatternID::must(1), - /// PatternID::must(1), - /// ], matches); - /// # Some(()) } - /// # if cfg!(all(feature = "std", any( - /// # target_arch = "x86_64", target_arch = "aarch64", - /// # ))) { - /// # example().unwrap() - /// # } else { - /// # assert!(example().is_none()); - /// # } - /// ``` - #[inline] - pub fn find_iter<'a, 'b, B: ?Sized + AsRef<[u8]>>( - &'a self, - haystack: &'b B, - ) -> FindIter<'a, 'b> { - let haystack = haystack.as_ref(); - let span = Span::from(0..haystack.len()); - FindIter { searcher: self, haystack, span } - } - - /// Returns the match kind used by this packed searcher. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use aho_corasick::packed::{MatchKind, Searcher}; - /// - /// # fn example() -> Option<()> { - /// let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; - /// // leftmost-first is the default. - /// assert_eq!(&MatchKind::LeftmostFirst, searcher.match_kind()); - /// # Some(()) } - /// # if cfg!(all(feature = "std", any( - /// # target_arch = "x86_64", target_arch = "aarch64", - /// # ))) { - /// # example().unwrap() - /// # } else { - /// # assert!(example().is_none()); - /// # } - /// ``` - #[inline] - pub fn match_kind(&self) -> &MatchKind { - self.patterns.match_kind() - } - - /// Returns the minimum length of a haystack that is required in order for - /// packed searching to be effective. - /// - /// In some cases, the underlying packed searcher may not be able to search - /// very short haystacks. When that occurs, the implementation will defer - /// to a slower non-packed searcher (which is still generally faster than - /// Aho-Corasick for a small number of patterns). However, callers may - /// want to avoid ever using the slower variant, which one can do by - /// never passing a haystack shorter than the minimum length returned by - /// this method. - #[inline] - pub fn minimum_len(&self) -> usize { - self.minimum_len - } - - /// Returns the approximate total amount of heap used by this searcher, in - /// units of bytes. - #[inline] - pub fn memory_usage(&self) -> usize { - self.patterns.memory_usage() - + self.rabinkarp.memory_usage() - + self.search_kind.memory_usage() - } - - /// Use a slow (non-packed) searcher. - /// - /// This is useful when a packed searcher could be constructed, but could - /// not be used to search a specific haystack. For example, if Teddy was - /// built but the haystack is smaller than ~34 bytes, then Teddy might not - /// be able to run. - fn find_in_slow(&self, haystack: &[u8], span: Span) -> Option<Match> { - self.rabinkarp.find_at(&haystack[..span.end], span.start) - } -} - -impl SearchKind { - fn memory_usage(&self) -> usize { - match *self { - SearchKind::Teddy(ref ted) => ted.memory_usage(), - SearchKind::RabinKarp => 0, - } - } -} - -/// An iterator over non-overlapping matches from a packed searcher. -/// -/// The lifetime `'s` refers to the lifetime of the underlying [`Searcher`], -/// while the lifetime `'h` refers to the lifetime of the haystack being -/// searched. -#[derive(Debug)] -pub struct FindIter<'s, 'h> { - searcher: &'s Searcher, - haystack: &'h [u8], - span: Span, -} - -impl<'s, 'h> Iterator for FindIter<'s, 'h> { - type Item = Match; - - fn next(&mut self) -> Option<Match> { - if self.span.start > self.span.end { - return None; - } - match self.searcher.find_in(self.haystack, self.span) { - None => None, - Some(m) => { - self.span.start = m.end(); - Some(m) - } - } - } -} diff --git a/vendor/aho-corasick/src/packed/ext.rs b/vendor/aho-corasick/src/packed/ext.rs deleted file mode 100644 index b689642bca351b..00000000000000 --- a/vendor/aho-corasick/src/packed/ext.rs +++ /dev/null @@ -1,39 +0,0 @@ -/// A trait for adding some helper routines to pointers. -pub(crate) trait Pointer { - /// Returns the distance, in units of `T`, between `self` and `origin`. - /// - /// # Safety - /// - /// Same as `ptr::offset_from` in addition to `self >= origin`. - unsafe fn distance(self, origin: Self) -> usize; - - /// Casts this pointer to `usize`. - /// - /// Callers should not convert the `usize` back to a pointer if at all - /// possible. (And if you believe it's necessary, open an issue to discuss - /// why. Otherwise, it has the potential to violate pointer provenance.) - /// The purpose of this function is just to be able to do arithmetic, i.e., - /// computing offsets or alignments. - fn as_usize(self) -> usize; -} - -impl<T> Pointer for *const T { - unsafe fn distance(self, origin: *const T) -> usize { - // TODO: Replace with `ptr::sub_ptr` once stabilized. - usize::try_from(self.offset_from(origin)).unwrap_unchecked() - } - - fn as_usize(self) -> usize { - self as usize - } -} - -impl<T> Pointer for *mut T { - unsafe fn distance(self, origin: *mut T) -> usize { - (self as *const T).distance(origin as *const T) - } - - fn as_usize(self) -> usize { - (self as *const T).as_usize() - } -} diff --git a/vendor/aho-corasick/src/packed/mod.rs b/vendor/aho-corasick/src/packed/mod.rs deleted file mode 100644 index 3990bc9330f7fd..00000000000000 --- a/vendor/aho-corasick/src/packed/mod.rs +++ /dev/null @@ -1,120 +0,0 @@ -/*! -Provides packed multiple substring search, principally for a small number of -patterns. - -This sub-module provides vectorized routines for quickly finding -matches of a small number of patterns. In general, users of this crate -shouldn't need to interface with this module directly, as the primary -[`AhoCorasick`](crate::AhoCorasick) searcher will use these routines -automatically as a prefilter when applicable. However, in some cases, callers -may want to bypass the Aho-Corasick machinery entirely and use this vectorized -searcher directly. - -# Overview - -The primary types in this sub-module are: - -* [`Searcher`] executes the actual search algorithm to report matches in a -haystack. -* [`Builder`] accumulates patterns incrementally and can construct a -`Searcher`. -* [`Config`] permits tuning the searcher, and itself will produce a `Builder` -(which can then be used to build a `Searcher`). Currently, the only tuneable -knob are the match semantics, but this may be expanded in the future. - -# Examples - -This example shows how to create a searcher from an iterator of patterns. -By default, leftmost-first match semantics are used. (See the top-level -[`MatchKind`] type for more details about match semantics, which apply -similarly to packed substring search.) - -``` -use aho_corasick::{packed::{MatchKind, Searcher}, PatternID}; - -# fn example() -> Option<()> { -let searcher = Searcher::new(["foobar", "foo"].iter().cloned())?; -let matches: Vec<PatternID> = searcher - .find_iter("foobar") - .map(|mat| mat.pattern()) - .collect(); -assert_eq!(vec![PatternID::ZERO], matches); -# Some(()) } -# if cfg!(all(feature = "std", any( -# target_arch = "x86_64", target_arch = "aarch64", -# ))) { -# example().unwrap() -# } else { -# assert!(example().is_none()); -# } -``` - -This example shows how to use [`Config`] to change the match semantics to -leftmost-longest: - -``` -use aho_corasick::{packed::{Config, MatchKind}, PatternID}; - -# fn example() -> Option<()> { -let searcher = Config::new() - .match_kind(MatchKind::LeftmostLongest) - .builder() - .add("foo") - .add("foobar") - .build()?; -let matches: Vec<PatternID> = searcher - .find_iter("foobar") - .map(|mat| mat.pattern()) - .collect(); -assert_eq!(vec![PatternID::must(1)], matches); -# Some(()) } -# if cfg!(all(feature = "std", any( -# target_arch = "x86_64", target_arch = "aarch64", -# ))) { -# example().unwrap() -# } else { -# assert!(example().is_none()); -# } -``` - -# Packed substring searching - -Packed substring searching refers to the use of SIMD (Single Instruction, -Multiple Data) to accelerate the detection of matches in a haystack. Unlike -conventional algorithms, such as Aho-Corasick, SIMD algorithms for substring -search tend to do better with a small number of patterns, where as Aho-Corasick -generally maintains reasonably consistent performance regardless of the number -of patterns you give it. Because of this, the vectorized searcher in this -sub-module cannot be used as a general purpose searcher, since building the -searcher may fail even when given a small number of patterns. However, in -exchange, when searching for a small number of patterns, searching can be quite -a bit faster than Aho-Corasick (sometimes by an order of magnitude). - -The key take away here is that constructing a searcher from a list of patterns -is a fallible operation with no clear rules for when it will fail. While the -precise conditions under which building a searcher can fail is specifically an -implementation detail, here are some common reasons: - -* Too many patterns were given. Typically, the limit is on the order of 100 or - so, but this limit may fluctuate based on available CPU features. -* The available packed algorithms require CPU features that aren't available. - For example, currently, this crate only provides packed algorithms for - `x86_64` and `aarch64`. Therefore, constructing a packed searcher on any - other target will always fail. -* Zero patterns were given, or one of the patterns given was empty. Packed - searchers require at least one pattern and that all patterns are non-empty. -* Something else about the nature of the patterns (typically based on - heuristics) suggests that a packed searcher would perform very poorly, so - no searcher is built. -*/ - -pub use crate::packed::api::{Builder, Config, FindIter, MatchKind, Searcher}; - -mod api; -mod ext; -mod pattern; -mod rabinkarp; -mod teddy; -#[cfg(all(feature = "std", test))] -mod tests; -mod vector; diff --git a/vendor/aho-corasick/src/packed/pattern.rs b/vendor/aho-corasick/src/packed/pattern.rs deleted file mode 100644 index 14da87aabc9e04..00000000000000 --- a/vendor/aho-corasick/src/packed/pattern.rs +++ /dev/null @@ -1,480 +0,0 @@ -use core::{cmp, fmt, mem, u16, usize}; - -use alloc::{boxed::Box, string::String, vec, vec::Vec}; - -use crate::{ - packed::{api::MatchKind, ext::Pointer}, - PatternID, -}; - -/// A non-empty collection of non-empty patterns to search for. -/// -/// This collection of patterns is what is passed around to both execute -/// searches and to construct the searchers themselves. Namely, this permits -/// searches to avoid copying all of the patterns, and allows us to keep only -/// one copy throughout all packed searchers. -/// -/// Note that this collection is not a set. The same pattern can appear more -/// than once. -#[derive(Clone, Debug)] -pub(crate) struct Patterns { - /// The match semantics supported by this collection of patterns. - /// - /// The match semantics determines the order of the iterator over patterns. - /// For leftmost-first, patterns are provided in the same order as were - /// provided by the caller. For leftmost-longest, patterns are provided in - /// descending order of length, with ties broken by the order in which they - /// were provided by the caller. - kind: MatchKind, - /// The collection of patterns, indexed by their identifier. - by_id: Vec<Vec<u8>>, - /// The order of patterns defined for iteration, given by pattern - /// identifiers. The order of `by_id` and `order` is always the same for - /// leftmost-first semantics, but may be different for leftmost-longest - /// semantics. - order: Vec<PatternID>, - /// The length of the smallest pattern, in bytes. - minimum_len: usize, - /// The total number of pattern bytes across the entire collection. This - /// is used for reporting total heap usage in constant time. - total_pattern_bytes: usize, -} - -// BREADCRUMBS: I think we want to experiment with a different bucket -// representation. Basically, each bucket is just a Range<usize> to a single -// contiguous allocation? Maybe length-prefixed patterns or something? The -// idea is to try to get rid of the pointer chasing in verification. I don't -// know that that is the issue, but I suspect it is. - -impl Patterns { - /// Create a new collection of patterns for the given match semantics. The - /// ID of each pattern is the index of the pattern at which it occurs in - /// the `by_id` slice. - /// - /// If any of the patterns in the slice given are empty, then this panics. - /// Similarly, if the number of patterns given is zero, then this also - /// panics. - pub(crate) fn new() -> Patterns { - Patterns { - kind: MatchKind::default(), - by_id: vec![], - order: vec![], - minimum_len: usize::MAX, - total_pattern_bytes: 0, - } - } - - /// Add a pattern to this collection. - /// - /// This panics if the pattern given is empty. - pub(crate) fn add(&mut self, bytes: &[u8]) { - assert!(!bytes.is_empty()); - assert!(self.by_id.len() <= u16::MAX as usize); - - let id = PatternID::new(self.by_id.len()).unwrap(); - self.order.push(id); - self.by_id.push(bytes.to_vec()); - self.minimum_len = cmp::min(self.minimum_len, bytes.len()); - self.total_pattern_bytes += bytes.len(); - } - - /// Set the match kind semantics for this collection of patterns. - /// - /// If the kind is not set, then the default is leftmost-first. - pub(crate) fn set_match_kind(&mut self, kind: MatchKind) { - self.kind = kind; - match self.kind { - MatchKind::LeftmostFirst => { - self.order.sort(); - } - MatchKind::LeftmostLongest => { - let (order, by_id) = (&mut self.order, &mut self.by_id); - order.sort_by(|&id1, &id2| { - by_id[id1].len().cmp(&by_id[id2].len()).reverse() - }); - } - } - } - - /// Return the number of patterns in this collection. - /// - /// This is guaranteed to be greater than zero. - pub(crate) fn len(&self) -> usize { - self.by_id.len() - } - - /// Returns true if and only if this collection of patterns is empty. - pub(crate) fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the approximate total amount of heap used by these patterns, in - /// units of bytes. - pub(crate) fn memory_usage(&self) -> usize { - self.order.len() * mem::size_of::<PatternID>() - + self.by_id.len() * mem::size_of::<Vec<u8>>() - + self.total_pattern_bytes - } - - /// Clears all heap memory associated with this collection of patterns and - /// resets all state such that it is a valid empty collection. - pub(crate) fn reset(&mut self) { - self.kind = MatchKind::default(); - self.by_id.clear(); - self.order.clear(); - self.minimum_len = usize::MAX; - } - - /// Returns the length, in bytes, of the smallest pattern. - /// - /// This is guaranteed to be at least one. - pub(crate) fn minimum_len(&self) -> usize { - self.minimum_len - } - - /// Returns the match semantics used by these patterns. - pub(crate) fn match_kind(&self) -> &MatchKind { - &self.kind - } - - /// Return the pattern with the given identifier. If such a pattern does - /// not exist, then this panics. - pub(crate) fn get(&self, id: PatternID) -> Pattern<'_> { - Pattern(&self.by_id[id]) - } - - /// Return the pattern with the given identifier without performing bounds - /// checks. - /// - /// # Safety - /// - /// Callers must ensure that a pattern with the given identifier exists - /// before using this method. - pub(crate) unsafe fn get_unchecked(&self, id: PatternID) -> Pattern<'_> { - Pattern(self.by_id.get_unchecked(id.as_usize())) - } - - /// Return an iterator over all the patterns in this collection, in the - /// order in which they should be matched. - /// - /// Specifically, in a naive multi-pattern matcher, the following is - /// guaranteed to satisfy the match semantics of this collection of - /// patterns: - /// - /// ```ignore - /// for i in 0..haystack.len(): - /// for p in patterns.iter(): - /// if haystack[i..].starts_with(p.bytes()): - /// return Match(p.id(), i, i + p.bytes().len()) - /// ``` - /// - /// Namely, among the patterns in a collection, if they are matched in - /// the order provided by this iterator, then the result is guaranteed - /// to satisfy the correct match semantics. (Either leftmost-first or - /// leftmost-longest.) - pub(crate) fn iter(&self) -> PatternIter<'_> { - PatternIter { patterns: self, i: 0 } - } -} - -/// An iterator over the patterns in the `Patterns` collection. -/// -/// The order of the patterns provided by this iterator is consistent with the -/// match semantics of the originating collection of patterns. -/// -/// The lifetime `'p` corresponds to the lifetime of the collection of patterns -/// this is iterating over. -#[derive(Debug)] -pub(crate) struct PatternIter<'p> { - patterns: &'p Patterns, - i: usize, -} - -impl<'p> Iterator for PatternIter<'p> { - type Item = (PatternID, Pattern<'p>); - - fn next(&mut self) -> Option<(PatternID, Pattern<'p>)> { - if self.i >= self.patterns.len() { - return None; - } - let id = self.patterns.order[self.i]; - let p = self.patterns.get(id); - self.i += 1; - Some((id, p)) - } -} - -/// A pattern that is used in packed searching. -#[derive(Clone)] -pub(crate) struct Pattern<'a>(&'a [u8]); - -impl<'a> fmt::Debug for Pattern<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Pattern") - .field("lit", &String::from_utf8_lossy(self.0)) - .finish() - } -} - -impl<'p> Pattern<'p> { - /// Returns the length of this pattern, in bytes. - pub(crate) fn len(&self) -> usize { - self.0.len() - } - - /// Returns the bytes of this pattern. - pub(crate) fn bytes(&self) -> &[u8] { - self.0 - } - - /// Returns the first `len` low nybbles from this pattern. If this pattern - /// is shorter than `len`, then this panics. - pub(crate) fn low_nybbles(&self, len: usize) -> Box<[u8]> { - let mut nybs = vec![0; len].into_boxed_slice(); - for (i, byte) in self.bytes().iter().take(len).enumerate() { - nybs[i] = byte & 0xF; - } - nybs - } - - /// Returns true if this pattern is a prefix of the given bytes. - #[inline(always)] - pub(crate) fn is_prefix(&self, bytes: &[u8]) -> bool { - is_prefix(bytes, self.bytes()) - } - - /// Returns true if this pattern is a prefix of the haystack given by the - /// raw `start` and `end` pointers. - /// - /// # Safety - /// - /// * It must be the case that `start < end` and that the distance between - /// them is at least equal to `V::BYTES`. That is, it must always be valid - /// to do at least an unaligned load of `V` at `start`. - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - #[inline(always)] - pub(crate) unsafe fn is_prefix_raw( - &self, - start: *const u8, - end: *const u8, - ) -> bool { - let patlen = self.bytes().len(); - let haylen = end.distance(start); - if patlen > haylen { - return false; - } - // SAFETY: We've checked that the haystack has length at least equal - // to this pattern. All other safety concerns are the responsibility - // of the caller. - is_equal_raw(start, self.bytes().as_ptr(), patlen) - } -} - -/// Returns true if and only if `needle` is a prefix of `haystack`. -/// -/// This uses a latency optimized variant of `memcmp` internally which *might* -/// make this faster for very short strings. -/// -/// # Inlining -/// -/// This routine is marked `inline(always)`. If you want to call this function -/// in a way that is not always inlined, you'll need to wrap a call to it in -/// another function that is marked as `inline(never)` or just `inline`. -#[inline(always)] -fn is_prefix(haystack: &[u8], needle: &[u8]) -> bool { - if needle.len() > haystack.len() { - return false; - } - // SAFETY: Our pointers are derived directly from borrowed slices which - // uphold all of our safety guarantees except for length. We account for - // length with the check above. - unsafe { is_equal_raw(haystack.as_ptr(), needle.as_ptr(), needle.len()) } -} - -/// Compare corresponding bytes in `x` and `y` for equality. -/// -/// That is, this returns true if and only if `x.len() == y.len()` and -/// `x[i] == y[i]` for all `0 <= i < x.len()`. -/// -/// Note that this isn't used. We only use it in tests as a convenient way -/// of testing `is_equal_raw`. -/// -/// # Inlining -/// -/// This routine is marked `inline(always)`. If you want to call this function -/// in a way that is not always inlined, you'll need to wrap a call to it in -/// another function that is marked as `inline(never)` or just `inline`. -/// -/// # Motivation -/// -/// Why not use slice equality instead? Well, slice equality usually results in -/// a call out to the current platform's `libc` which might not be inlineable -/// or have other overhead. This routine isn't guaranteed to be a win, but it -/// might be in some cases. -#[cfg(test)] -#[inline(always)] -fn is_equal(x: &[u8], y: &[u8]) -> bool { - if x.len() != y.len() { - return false; - } - // SAFETY: Our pointers are derived directly from borrowed slices which - // uphold all of our safety guarantees except for length. We account for - // length with the check above. - unsafe { is_equal_raw(x.as_ptr(), y.as_ptr(), x.len()) } -} - -/// Compare `n` bytes at the given pointers for equality. -/// -/// This returns true if and only if `*x.add(i) == *y.add(i)` for all -/// `0 <= i < n`. -/// -/// # Inlining -/// -/// This routine is marked `inline(always)`. If you want to call this function -/// in a way that is not always inlined, you'll need to wrap a call to it in -/// another function that is marked as `inline(never)` or just `inline`. -/// -/// # Motivation -/// -/// Why not use slice equality instead? Well, slice equality usually results in -/// a call out to the current platform's `libc` which might not be inlineable -/// or have other overhead. This routine isn't guaranteed to be a win, but it -/// might be in some cases. -/// -/// # Safety -/// -/// * Both `x` and `y` must be valid for reads of up to `n` bytes. -/// * Both `x` and `y` must point to an initialized value. -/// * Both `x` and `y` must each point to an allocated object and -/// must either be in bounds or at most one byte past the end of the -/// allocated object. `x` and `y` do not need to point to the same allocated -/// object, but they may. -/// * Both `x` and `y` must be _derived from_ a pointer to their respective -/// allocated objects. -/// * The distance between `x` and `x+n` must not overflow `isize`. Similarly -/// for `y` and `y+n`. -/// * The distance being in bounds must not rely on "wrapping around" the -/// address space. -#[inline(always)] -unsafe fn is_equal_raw(mut x: *const u8, mut y: *const u8, n: usize) -> bool { - // If we don't have enough bytes to do 4-byte at a time loads, then - // handle each possible length specially. Note that I used to have a - // byte-at-a-time loop here and that turned out to be quite a bit slower - // for the memmem/pathological/defeat-simple-vector-alphabet benchmark. - if n < 4 { - return match n { - 0 => true, - 1 => x.read() == y.read(), - 2 => { - x.cast::<u16>().read_unaligned() - == y.cast::<u16>().read_unaligned() - } - // I also tried copy_nonoverlapping here and it looks like the - // codegen is the same. - 3 => x.cast::<[u8; 3]>().read() == y.cast::<[u8; 3]>().read(), - _ => unreachable!(), - }; - } - // When we have 4 or more bytes to compare, then proceed in chunks of 4 at - // a time using unaligned loads. - // - // Also, why do 4 byte loads instead of, say, 8 byte loads? The reason is - // that this particular version of memcmp is likely to be called with tiny - // needles. That means that if we do 8 byte loads, then a higher proportion - // of memcmp calls will use the slower variant above. With that said, this - // is a hypothesis and is only loosely supported by benchmarks. There's - // likely some improvement that could be made here. The main thing here - // though is to optimize for latency, not throughput. - - // SAFETY: The caller is responsible for ensuring the pointers we get are - // valid and readable for at least `n` bytes. We also do unaligned loads, - // so there's no need to ensure we're aligned. (This is justified by this - // routine being specifically for short strings.) - let xend = x.add(n.wrapping_sub(4)); - let yend = y.add(n.wrapping_sub(4)); - while x < xend { - let vx = x.cast::<u32>().read_unaligned(); - let vy = y.cast::<u32>().read_unaligned(); - if vx != vy { - return false; - } - x = x.add(4); - y = y.add(4); - } - let vx = xend.cast::<u32>().read_unaligned(); - let vy = yend.cast::<u32>().read_unaligned(); - vx == vy -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn equals_different_lengths() { - assert!(!is_equal(b"", b"a")); - assert!(!is_equal(b"a", b"")); - assert!(!is_equal(b"ab", b"a")); - assert!(!is_equal(b"a", b"ab")); - } - - #[test] - fn equals_mismatch() { - let one_mismatch = [ - (&b"a"[..], &b"x"[..]), - (&b"ab"[..], &b"ax"[..]), - (&b"abc"[..], &b"abx"[..]), - (&b"abcd"[..], &b"abcx"[..]), - (&b"abcde"[..], &b"abcdx"[..]), - (&b"abcdef"[..], &b"abcdex"[..]), - (&b"abcdefg"[..], &b"abcdefx"[..]), - (&b"abcdefgh"[..], &b"abcdefgx"[..]), - (&b"abcdefghi"[..], &b"abcdefghx"[..]), - (&b"abcdefghij"[..], &b"abcdefghix"[..]), - (&b"abcdefghijk"[..], &b"abcdefghijx"[..]), - (&b"abcdefghijkl"[..], &b"abcdefghijkx"[..]), - (&b"abcdefghijklm"[..], &b"abcdefghijklx"[..]), - (&b"abcdefghijklmn"[..], &b"abcdefghijklmx"[..]), - ]; - for (x, y) in one_mismatch { - assert_eq!(x.len(), y.len(), "lengths should match"); - assert!(!is_equal(x, y)); - assert!(!is_equal(y, x)); - } - } - - #[test] - fn equals_yes() { - assert!(is_equal(b"", b"")); - assert!(is_equal(b"a", b"a")); - assert!(is_equal(b"ab", b"ab")); - assert!(is_equal(b"abc", b"abc")); - assert!(is_equal(b"abcd", b"abcd")); - assert!(is_equal(b"abcde", b"abcde")); - assert!(is_equal(b"abcdef", b"abcdef")); - assert!(is_equal(b"abcdefg", b"abcdefg")); - assert!(is_equal(b"abcdefgh", b"abcdefgh")); - assert!(is_equal(b"abcdefghi", b"abcdefghi")); - } - - #[test] - fn prefix() { - assert!(is_prefix(b"", b"")); - assert!(is_prefix(b"a", b"")); - assert!(is_prefix(b"ab", b"")); - assert!(is_prefix(b"foo", b"foo")); - assert!(is_prefix(b"foobar", b"foo")); - - assert!(!is_prefix(b"foo", b"fob")); - assert!(!is_prefix(b"foobar", b"fob")); - } -} diff --git a/vendor/aho-corasick/src/packed/rabinkarp.rs b/vendor/aho-corasick/src/packed/rabinkarp.rs deleted file mode 100644 index fdd8a6f0b4d8fa..00000000000000 --- a/vendor/aho-corasick/src/packed/rabinkarp.rs +++ /dev/null @@ -1,168 +0,0 @@ -use alloc::{sync::Arc, vec, vec::Vec}; - -use crate::{packed::pattern::Patterns, util::search::Match, PatternID}; - -/// The type of the rolling hash used in the Rabin-Karp algorithm. -type Hash = usize; - -/// The number of buckets to store our patterns in. We don't want this to be -/// too big in order to avoid wasting memory, but we don't want it to be too -/// small either to avoid spending too much time confirming literals. -/// -/// The number of buckets MUST be a power of two. Otherwise, determining the -/// bucket from a hash will slow down the code considerably. Using a power -/// of two means `hash % NUM_BUCKETS` can compile down to a simple `and` -/// instruction. -const NUM_BUCKETS: usize = 64; - -/// An implementation of the Rabin-Karp algorithm. The main idea of this -/// algorithm is to maintain a rolling hash as it moves through the input, and -/// then check whether that hash corresponds to the same hash for any of the -/// patterns we're looking for. -/// -/// A draw back of naively scaling Rabin-Karp to multiple patterns is that -/// it requires all of the patterns to be the same length, which in turn -/// corresponds to the number of bytes to hash. We adapt this to work for -/// multiple patterns of varying size by fixing the number of bytes to hash -/// to be the length of the smallest pattern. We also split the patterns into -/// several buckets to hopefully make the confirmation step faster. -/// -/// Wikipedia has a decent explanation, if a bit heavy on the theory: -/// https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm -/// -/// But ESMAJ provides something a bit more concrete: -/// https://www-igm.univ-mlv.fr/~lecroq/string/node5.html -#[derive(Clone, Debug)] -pub(crate) struct RabinKarp { - /// The patterns we're searching for. - patterns: Arc<Patterns>, - /// The order of patterns in each bucket is significant. Namely, they are - /// arranged such that the first one to match is the correct match. This - /// may not necessarily correspond to the order provided by the caller. - /// For example, if leftmost-longest semantics are used, then the patterns - /// are sorted by their length in descending order. If leftmost-first - /// semantics are used, then the patterns are sorted by their pattern ID - /// in ascending order (which corresponds to the caller's order). - buckets: Vec<Vec<(Hash, PatternID)>>, - /// The length of the hashing window. Generally, this corresponds to the - /// length of the smallest pattern. - hash_len: usize, - /// The factor to subtract out of a hash before updating it with a new - /// byte. - hash_2pow: usize, -} - -impl RabinKarp { - /// Compile a new Rabin-Karp matcher from the patterns given. - /// - /// This panics if any of the patterns in the collection are empty, or if - /// the collection is itself empty. - pub(crate) fn new(patterns: &Arc<Patterns>) -> RabinKarp { - assert!(patterns.len() >= 1); - let hash_len = patterns.minimum_len(); - assert!(hash_len >= 1); - - let mut hash_2pow = 1usize; - for _ in 1..hash_len { - hash_2pow = hash_2pow.wrapping_shl(1); - } - - let mut rk = RabinKarp { - patterns: Arc::clone(patterns), - buckets: vec![vec![]; NUM_BUCKETS], - hash_len, - hash_2pow, - }; - for (id, pat) in patterns.iter() { - let hash = rk.hash(&pat.bytes()[..rk.hash_len]); - let bucket = hash % NUM_BUCKETS; - rk.buckets[bucket].push((hash, id)); - } - rk - } - - /// Return the first matching pattern in the given haystack, begining the - /// search at `at`. - pub(crate) fn find_at( - &self, - haystack: &[u8], - mut at: usize, - ) -> Option<Match> { - assert_eq!(NUM_BUCKETS, self.buckets.len()); - - if at + self.hash_len > haystack.len() { - return None; - } - let mut hash = self.hash(&haystack[at..at + self.hash_len]); - loop { - let bucket = &self.buckets[hash % NUM_BUCKETS]; - for &(phash, pid) in bucket { - if phash == hash { - if let Some(c) = self.verify(pid, haystack, at) { - return Some(c); - } - } - } - if at + self.hash_len >= haystack.len() { - return None; - } - hash = self.update_hash( - hash, - haystack[at], - haystack[at + self.hash_len], - ); - at += 1; - } - } - - /// Returns the approximate total amount of heap used by this searcher, in - /// units of bytes. - pub(crate) fn memory_usage(&self) -> usize { - self.buckets.len() * core::mem::size_of::<Vec<(Hash, PatternID)>>() - + self.patterns.len() * core::mem::size_of::<(Hash, PatternID)>() - } - - /// Verify whether the pattern with the given id matches at - /// `haystack[at..]`. - /// - /// We tag this function as `cold` because it helps improve codegen. - /// Intuitively, it would seem like inlining it would be better. However, - /// the only time this is called and a match is not found is when there - /// there is a hash collision, or when a prefix of a pattern matches but - /// the entire pattern doesn't match. This is hopefully fairly rare, and - /// if it does occur a lot, it's going to be slow no matter what we do. - #[cold] - fn verify( - &self, - id: PatternID, - haystack: &[u8], - at: usize, - ) -> Option<Match> { - let pat = self.patterns.get(id); - if pat.is_prefix(&haystack[at..]) { - Some(Match::new(id, at..at + pat.len())) - } else { - None - } - } - - /// Hash the given bytes. - fn hash(&self, bytes: &[u8]) -> Hash { - assert_eq!(self.hash_len, bytes.len()); - - let mut hash = 0usize; - for &b in bytes { - hash = hash.wrapping_shl(1).wrapping_add(b as usize); - } - hash - } - - /// Update the hash given based on removing `old_byte` at the beginning - /// of some byte string, and appending `new_byte` to the end of that same - /// byte string. - fn update_hash(&self, prev: Hash, old_byte: u8, new_byte: u8) -> Hash { - prev.wrapping_sub((old_byte as usize).wrapping_mul(self.hash_2pow)) - .wrapping_shl(1) - .wrapping_add(new_byte as usize) - } -} diff --git a/vendor/aho-corasick/src/packed/teddy/README.md b/vendor/aho-corasick/src/packed/teddy/README.md deleted file mode 100644 index f0928cbe5ceca2..00000000000000 --- a/vendor/aho-corasick/src/packed/teddy/README.md +++ /dev/null @@ -1,386 +0,0 @@ -Teddy is a SIMD accelerated multiple substring matching algorithm. The name -and the core ideas in the algorithm were learned from the [Hyperscan][1_u] -project. The implementation in this repository was mostly motivated for use in -accelerating regex searches by searching for small sets of required literals -extracted from the regex. - - -# Background - -The key idea of Teddy is to do *packed* substring matching. In the literature, -packed substring matching is the idea of examining multiple bytes in a haystack -at a time to detect matches. Implementations of, for example, memchr (which -detects matches of a single byte) have been doing this for years. Only -recently, with the introduction of various SIMD instructions, has this been -extended to substring matching. The PCMPESTRI instruction (and its relatives), -for example, implements substring matching in hardware. It is, however, limited -to substrings of length 16 bytes or fewer, but this restriction is fine in a -regex engine, since we rarely care about the performance difference between -searching for a 16 byte literal and a 16 + N literal; 16 is already long -enough. The key downside of the PCMPESTRI instruction, on current (2016) CPUs -at least, is its latency and throughput. As a result, it is often faster to -do substring search with a Boyer-Moore (or Two-Way) variant and a well placed -memchr to quickly skip through the haystack. - -There are fewer results from the literature on packed substring matching, -and even fewer for packed multiple substring matching. Ben-Kiki et al. [2] -describes use of PCMPESTRI for substring matching, but is mostly theoretical -and hand-waves performance. There is other theoretical work done by Bille [3] -as well. - -The rest of the work in the field, as far as I'm aware, is by Faro and Kulekci -and is generally focused on multiple pattern search. Their first paper [4a] -introduces the concept of a fingerprint, which is computed for every block of -N bytes in every pattern. The haystack is then scanned N bytes at a time and -a fingerprint is computed in the same way it was computed for blocks in the -patterns. If the fingerprint corresponds to one that was found in a pattern, -then a verification step follows to confirm that one of the substrings with the -corresponding fingerprint actually matches at the current location. Various -implementation tricks are employed to make sure the fingerprint lookup is fast; -typically by truncating the fingerprint. (This may, of course, provoke more -steps in the verification process, so a balance must be struck.) - -The main downside of [4a] is that the minimum substring length is 32 bytes, -presumably because of how the algorithm uses certain SIMD instructions. This -essentially makes it useless for general purpose regex matching, where a small -number of short patterns is far more likely. - -Faro and Kulekci published another paper [4b] that is conceptually very similar -to [4a]. The key difference is that it uses the CRC32 instruction (introduced -as part of SSE 4.2) to compute fingerprint values. This also enables the -algorithm to work effectively on substrings as short as 7 bytes with 4 byte -windows. 7 bytes is unfortunately still too long. The window could be -technically shrunk to 2 bytes, thereby reducing minimum length to 3, but the -small window size ends up negating most performance benefits—and it's likely -the common case in a general purpose regex engine. - -Faro and Kulekci also published [4c] that appears to be intended as a -replacement to using PCMPESTRI. In particular, it is specifically motivated by -the high throughput/latency time of PCMPESTRI and therefore chooses other SIMD -instructions that are faster. While this approach works for short substrings, -I personally couldn't see a way to generalize it to multiple substring search. - -Faro and Kulekci have another paper [4d] that I haven't been able to read -because it is behind a paywall. - - -# Teddy - -Finally, we get to Teddy. If the above literature review is complete, then it -appears that Teddy is a novel algorithm. More than that, in my experience, it -completely blows away the competition for short substrings, which is exactly -what we want in a general purpose regex engine. Again, the algorithm appears -to be developed by the authors of [Hyperscan][1_u]. Hyperscan was open sourced -late 2015, and no earlier history could be found. Therefore, tracking the exact -provenance of the algorithm with respect to the published literature seems -difficult. - -At a high level, Teddy works somewhat similarly to the fingerprint algorithms -published by Faro and Kulekci, but Teddy does it in a way that scales a bit -better. Namely: - -1. Teddy's core algorithm scans the haystack in 16 (for SSE, or 32 for AVX) - byte chunks. 16 (or 32) is significant because it corresponds to the number - of bytes in a SIMD vector. -2. Bitwise operations are performed on each chunk to discover if any region of - it matches a set of precomputed fingerprints from the patterns. If there are - matches, then a verification step is performed. In this implementation, our - verification step is naive. This can be improved upon. - -The details to make this work are quite clever. First, we must choose how to -pick our fingerprints. In Hyperscan's implementation, I *believe* they use the -last N bytes of each substring, where N must be at least the minimum length of -any substring in the set being searched. In this implementation, we use the -first N bytes of each substring. (The tradeoffs between these choices aren't -yet clear to me.) We then must figure out how to quickly test whether an -occurrence of any fingerprint from the set of patterns appears in a 16 byte -block from the haystack. To keep things simple, let's assume N = 1 and examine -some examples to motivate the approach. Here are our patterns: - -```ignore -foo -bar -baz -``` - -The corresponding fingerprints, for N = 1, are `f`, `b` and `b`. Now let's set -our 16 byte block to: - -```ignore -bat cat foo bump -xxxxxxxxxxxxxxxx -``` - -To cut to the chase, Teddy works by using bitsets. In particular, Teddy creates -a mask that allows us to quickly compute membership of a fingerprint in a 16 -byte block that also tells which pattern the fingerprint corresponds to. In -this case, our fingerprint is a single byte, so an appropriate abstraction is -a map from a single byte to a list of patterns that contain that fingerprint: - -```ignore -f |--> foo -b |--> bar, baz -``` - -Now, all we need to do is figure out how to represent this map in vector space -and use normal SIMD operations to perform a lookup. The first simplification -we can make is to represent our patterns as bit fields occupying a single -byte. This is important, because a single SIMD vector can store 16 bytes. - -```ignore -f |--> 00000001 -b |--> 00000010, 00000100 -``` - -How do we perform lookup though? It turns out that SSSE3 introduced a very cool -instruction called PSHUFB. The instruction takes two SIMD vectors, `A` and `B`, -and returns a third vector `C`. All vectors are treated as 16 8-bit integers. -`C` is formed by `C[i] = A[B[i]]`. (This is a bit of a simplification, but true -for the purposes of this algorithm. For full details, see [Intel's Intrinsics -Guide][5_u].) This essentially lets us use the values in `B` to lookup values -in `A`. - -If we could somehow cause `B` to contain our 16 byte block from the haystack, -and if `A` could contain our bitmasks, then we'd end up with something like -this for `A`: - -```ignore - 0x00 0x01 ... 0x62 ... 0x66 ... 0xFF -A = 0 0 00000110 00000001 0 -``` - -And if `B` contains our window from our haystack, we could use shuffle to take -the values from `B` and use them to look up our bitsets in `A`. But of course, -we can't do this because `A` in the above example contains 256 bytes, which -is much larger than the size of a SIMD vector. - -Nybbles to the rescue! A nybble is 4 bits. Instead of one mask to hold all of -our bitsets, we can use two masks, where one mask corresponds to the lower four -bits of our fingerprint and the other mask corresponds to the upper four bits. -So our map now looks like: - -```ignore -'f' & 0xF = 0x6 |--> 00000001 -'f' >> 4 = 0x6 |--> 00000111 -'b' & 0xF = 0x2 |--> 00000110 -'b' >> 4 = 0x6 |--> 00000111 -``` - -Notice that the bitsets for each nybble correspond to the union of all -fingerprints that contain that nybble. For example, both `f` and `b` have the -same upper 4 bits but differ on the lower 4 bits. Putting this together, we -have `A0`, `A1` and `B`, where `A0` is our mask for the lower nybble, `A1` is -our mask for the upper nybble and `B` is our 16 byte block from the haystack: - -```ignore - 0x00 0x01 0x02 0x03 ... 0x06 ... 0xF -A0 = 0 0 00000110 0 00000001 0 -A1 = 0 0 0 0 00000111 0 -B = b a t _ t p -B = 0x62 0x61 0x74 0x20 0x74 0x70 -``` - -But of course, we can't use `B` with `PSHUFB` yet, since its values are 8 bits, -and we need indexes that are at most 4 bits (corresponding to one of 16 -values). We can apply the same transformation to split `B` into lower and upper -nybbles as we did `A`. As before, `B0` corresponds to the lower nybbles and -`B1` corresponds to the upper nybbles: - -```ignore - b a t _ c a t _ f o o _ b u m p -B0 = 0x2 0x1 0x4 0x0 0x3 0x1 0x4 0x0 0x6 0xF 0xF 0x0 0x2 0x5 0xD 0x0 -B1 = 0x6 0x6 0x7 0x2 0x6 0x6 0x7 0x2 0x6 0x6 0x6 0x2 0x6 0x7 0x6 0x7 -``` - -And now we have a nice correspondence. `B0` can index `A0` and `B1` can index -`A1`. Here's what we get when we apply `C0 = PSHUFB(A0, B0)`: - -```ignore - b a ... f o ... p - A0[0x2] A0[0x1] A0[0x6] A0[0xF] A0[0x0] -C0 = 00000110 0 00000001 0 0 -``` - -And `C1 = PSHUFB(A1, B1)`: - -```ignore - b a ... f o ... p - A1[0x6] A1[0x6] A1[0x6] A1[0x6] A1[0x7] -C1 = 00000111 00000111 00000111 00000111 0 -``` - -Notice how neither one of `C0` or `C1` is guaranteed to report fully correct -results all on its own. For example, `C1` claims that `b` is a fingerprint for -the pattern `foo` (since `A1[0x6] = 00000111`), and that `o` is a fingerprint -for all of our patterns. But if we combined `C0` and `C1` with an `AND` -operation: - -```ignore - b a ... f o ... p -C = 00000110 0 00000001 0 0 -``` - -Then we now have that `C[i]` contains a bitset corresponding to the matching -fingerprints in a haystack's 16 byte block, where `i` is the `ith` byte in that -block. - -Once we have that, we can look for the position of the least significant bit -in `C`. (Least significant because we only target little endian here. Thus, -the least significant bytes correspond to bytes in our haystack at a lower -address.) That position, modulo `8`, gives us the pattern that the fingerprint -matches. That position, integer divided by `8`, also gives us the byte offset -that the fingerprint occurs in inside the 16 byte haystack block. Using those -two pieces of information, we can run a verification procedure that tries -to match all substrings containing that fingerprint at that position in the -haystack. - - -# Implementation notes - -The problem with the algorithm as described above is that it uses a single byte -for a fingerprint. This will work well if the fingerprints are rare in the -haystack (e.g., capital letters or special characters in normal English text), -but if the fingerprints are common, you'll wind up spending too much time in -the verification step, which effectively negates the performance benefits of -scanning 16 bytes at a time. Remember, the key to the performance of this -algorithm is to do as little work as possible per 16 (or 32) bytes. - -This algorithm can be extrapolated in a relatively straight-forward way to use -larger fingerprints. That is, instead of a single byte prefix, we might use a -two or three byte prefix. The implementation here implements N = {1, 2, 3} -and always picks the largest N possible. The rationale is that the bigger the -fingerprint, the fewer verification steps we'll do. Of course, if N is too -large, then we'll end up doing too much on each step. - -The way to extend it is: - -1. Add a mask for each byte in the fingerprint. (Remember that each mask is - composed of two SIMD vectors.) This results in a value of `C` for each byte - in the fingerprint while searching. -2. When testing each 16 (or 32) byte block, each value of `C` must be shifted - so that they are aligned. Once aligned, they should all be `AND`'d together. - This will give you only the bitsets corresponding to the full match of the - fingerprint. To do this, one needs to save the last byte (for N=2) or last - two bytes (for N=3) from the previous iteration, and then line them up with - the first one or two bytes of the next iteration. - -## Verification - -Verification generally follows the procedure outlined above. The tricky parts -are in the right formulation of operations to get our bits out of our vectors. -We have a limited set of operations available to us on SIMD vectors as 128-bit -or 256-bit numbers, so we wind up needing to rip out 2 (or 4) 64-bit integers -from our vectors, and then run our verification step on each of those. The -verification step looks at the least significant bit set, and from its -position, we can derive the byte offset and bucket. (Again, as described -above.) Once we know the bucket, we do a fairly naive exhaustive search for -every literal in that bucket. (Hyperscan is a bit smarter here and uses a hash -table, but I haven't had time to thoroughly explore that. A few initial -half-hearted attempts resulted in worse performance.) - -## AVX - -The AVX version of Teddy extrapolates almost perfectly from the SSE version. -The only hickup is that PALIGNR is used to align chunks in the 16-bit version, -and there is no equivalent instruction in AVX. AVX does have VPALIGNR, but it -only works within 128-bit lanes. So there's a bit of tomfoolery to get around -this by shuffling the vectors before calling VPALIGNR. - -The only other aspect to AVX is that since our masks are still fundamentally -16-bytes (0x0-0xF), they are duplicated to 32-bytes, so that they can apply to -32-byte chunks. - -## Fat Teddy - -In the version of Teddy described above, 8 buckets are used to group patterns -that we want to search for. However, when AVX is available, we can extend the -number of buckets to 16 by permitting each byte in our masks to use 16-bits -instead of 8-bits to represent the buckets it belongs to. (This variant is also -in Hyperscan.) However, what we give up is the ability to scan 32 bytes at a -time, even though we're using AVX. Instead, we have to scan 16 bytes at a time. -What we gain, though, is (hopefully) less work in our verification routine. -It patterns are more spread out across more buckets, then there should overall -be fewer false positives. In general, Fat Teddy permits us to grow our capacity -a bit and search for more literals before Teddy gets overwhelmed. - -The tricky part of Fat Teddy is in how we adjust our masks and our verification -procedure. For the masks, we simply represent the first 8 buckets in each of -the low 16 bytes, and then the second 8 buckets in each of the high 16 bytes. -Then, in the search loop, instead of loading 32 bytes from the haystack, we -load the same 16 bytes from the haystack into both the low and high 16 byte -portions of our 256-bit vector. So for example, a mask might look like this: - - bits: 00100001 00000000 ... 11000000 00000000 00000001 ... 00000000 - byte: 31 30 16 15 14 0 - offset: 15 14 0 15 14 0 - buckets: 8-15 8-15 8-15 0-7 0-7 0-7 - -Where `byte` is the position in the vector (higher numbers corresponding to -more significant bits), `offset` is the corresponding position in the haystack -chunk, and `buckets` corresponds to the bucket assignments for that particular -byte. - -In particular, notice that the bucket assignments for offset `0` are spread -out between bytes `0` and `16`. This works well for the chunk-by-chunk search -procedure, but verification really wants to process all bucket assignments for -each offset at once. Otherwise, we might wind up finding a match at offset -`1` in one the first 8 buckets, when we really should have reported a match -at offset `0` in one of the second 8 buckets. (Because we want the leftmost -match.) - -Thus, for verification, we rearrange the above vector such that it is a -sequence of 16-bit integers, where the least significant 16-bit integer -corresponds to all of the bucket assignments for offset `0`. So with the -above vector, the least significant 16-bit integer would be - - 11000000 000000 - -which was taken from bytes `16` and `0`. Then the verification step pretty much -runs as described, except with 16 buckets instead of 8. - - -# References - -- **[1]** [Hyperscan on GitHub](https://github.com/intel/hyperscan), - [webpage](https://www.hyperscan.io/) -- **[2a]** Ben-Kiki, O., Bille, P., Breslauer, D., Gasieniec, L., Grossi, R., - & Weimann, O. (2011). - _Optimal packed string matching_. - In LIPIcs-Leibniz International Proceedings in Informatics (Vol. 13). - Schloss Dagstuhl-Leibniz-Zentrum fuer Informatik. - DOI: 10.4230/LIPIcs.FSTTCS.2011.423. - [PDF](https://drops.dagstuhl.de/opus/volltexte/2011/3355/pdf/37.pdf). -- **[2b]** Ben-Kiki, O., Bille, P., Breslauer, D., Ga̧sieniec, L., Grossi, R., - & Weimann, O. (2014). - _Towards optimal packed string matching_. - Theoretical Computer Science, 525, 111-129. - DOI: 10.1016/j.tcs.2013.06.013. - [PDF](https://www.cs.haifa.ac.il/~oren/Publications/bpsm.pdf). -- **[3]** Bille, P. (2011). - _Fast searching in packed strings_. - Journal of Discrete Algorithms, 9(1), 49-56. - DOI: 10.1016/j.jda.2010.09.003. - [PDF](https://www.sciencedirect.com/science/article/pii/S1570866710000353). -- **[4a]** Faro, S., & Külekci, M. O. (2012, October). - _Fast multiple string matching using streaming SIMD extensions technology_. - In String Processing and Information Retrieval (pp. 217-228). - Springer Berlin Heidelberg. - DOI: 10.1007/978-3-642-34109-0_23. - [PDF](https://www.dmi.unict.it/faro/papers/conference/faro32.pdf). -- **[4b]** Faro, S., & Külekci, M. O. (2013, September). - _Towards a Very Fast Multiple String Matching Algorithm for Short Patterns_. - In Stringology (pp. 78-91). - [PDF](https://www.dmi.unict.it/faro/papers/conference/faro36.pdf). -- **[4c]** Faro, S., & Külekci, M. O. (2013, January). - _Fast packed string matching for short patterns_. - In Proceedings of the Meeting on Algorithm Engineering & Expermiments - (pp. 113-121). - Society for Industrial and Applied Mathematics. - [PDF](https://arxiv.org/pdf/1209.6449.pdf). -- **[4d]** Faro, S., & Külekci, M. O. (2014). - _Fast and flexible packed string matching_. - Journal of Discrete Algorithms, 28, 61-72. - DOI: 10.1016/j.jda.2014.07.003. - -[1_u]: https://github.com/intel/hyperscan -[5_u]: https://software.intel.com/sites/landingpage/IntrinsicsGuide diff --git a/vendor/aho-corasick/src/packed/teddy/builder.rs b/vendor/aho-corasick/src/packed/teddy/builder.rs deleted file mode 100644 index e9bb68b299f081..00000000000000 --- a/vendor/aho-corasick/src/packed/teddy/builder.rs +++ /dev/null @@ -1,792 +0,0 @@ -use core::{ - fmt::Debug, - panic::{RefUnwindSafe, UnwindSafe}, -}; - -use alloc::sync::Arc; - -use crate::packed::{ext::Pointer, pattern::Patterns, teddy::generic::Match}; - -/// A builder for constructing a Teddy matcher. -/// -/// The builder primarily permits fine grained configuration of the Teddy -/// matcher. Most options are made only available for testing/benchmarking -/// purposes. In reality, options are automatically determined by the nature -/// and number of patterns given to the builder. -#[derive(Clone, Debug)] -pub(crate) struct Builder { - /// When none, this is automatically determined. Otherwise, `false` means - /// slim Teddy is used (8 buckets) and `true` means fat Teddy is used - /// (16 buckets). Fat Teddy requires AVX2, so if that CPU feature isn't - /// available and Fat Teddy was requested, no matcher will be built. - only_fat: Option<bool>, - /// When none, this is automatically determined. Otherwise, `false` means - /// that 128-bit vectors will be used (up to SSSE3 instructions) where as - /// `true` means that 256-bit vectors will be used. As with `fat`, if - /// 256-bit vectors are requested and they aren't available, then a - /// searcher will not be built. - only_256bit: Option<bool>, - /// When true (the default), the number of patterns will be used as a - /// heuristic for refusing construction of a Teddy searcher. The point here - /// is that too many patterns can overwhelm Teddy. But this can be disabled - /// in cases where the caller knows better. - heuristic_pattern_limits: bool, -} - -impl Default for Builder { - fn default() -> Builder { - Builder::new() - } -} - -impl Builder { - /// Create a new builder for configuring a Teddy matcher. - pub(crate) fn new() -> Builder { - Builder { - only_fat: None, - only_256bit: None, - heuristic_pattern_limits: true, - } - } - - /// Build a matcher for the set of patterns given. If a matcher could not - /// be built, then `None` is returned. - /// - /// Generally, a matcher isn't built if the necessary CPU features aren't - /// available, an unsupported target or if the searcher is believed to be - /// slower than standard techniques (i.e., if there are too many literals). - pub(crate) fn build(&self, patterns: Arc<Patterns>) -> Option<Searcher> { - self.build_imp(patterns) - } - - /// Require the use of Fat (true) or Slim (false) Teddy. Fat Teddy uses - /// 16 buckets where as Slim Teddy uses 8 buckets. More buckets are useful - /// for a larger set of literals. - /// - /// `None` is the default, which results in an automatic selection based - /// on the number of literals and available CPU features. - pub(crate) fn only_fat(&mut self, yes: Option<bool>) -> &mut Builder { - self.only_fat = yes; - self - } - - /// Request the use of 256-bit vectors (true) or 128-bit vectors (false). - /// Generally, a larger vector size is better since it either permits - /// matching more patterns or matching more bytes in the haystack at once. - /// - /// `None` is the default, which results in an automatic selection based on - /// the number of literals and available CPU features. - pub(crate) fn only_256bit(&mut self, yes: Option<bool>) -> &mut Builder { - self.only_256bit = yes; - self - } - - /// Request that heuristic limitations on the number of patterns be - /// employed. This useful to disable for benchmarking where one wants to - /// explore how Teddy performs on large number of patterns even if the - /// heuristics would otherwise refuse construction. - /// - /// This is enabled by default. - pub(crate) fn heuristic_pattern_limits( - &mut self, - yes: bool, - ) -> &mut Builder { - self.heuristic_pattern_limits = yes; - self - } - - fn build_imp(&self, patterns: Arc<Patterns>) -> Option<Searcher> { - let patlimit = self.heuristic_pattern_limits; - // There's no particular reason why we limit ourselves to little endian - // here, but it seems likely that some parts of Teddy as they are - // currently written (e.g., the uses of `trailing_zeros`) are likely - // wrong on non-little-endian targets. Such things are likely easy to - // fix, but at the time of writing (2023/09/18), I actually do not know - // how to test this code on a big-endian target. So for now, we're - // conservative and just bail out. - if !cfg!(target_endian = "little") { - debug!("skipping Teddy because target isn't little endian"); - return None; - } - // Too many patterns will overwhelm Teddy and likely lead to slow - // downs, typically in the verification step. - if patlimit && patterns.len() > 64 { - debug!("skipping Teddy because of too many patterns"); - return None; - } - - #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] - { - use self::x86_64::{FatAVX2, SlimAVX2, SlimSSSE3}; - - let mask_len = core::cmp::min(4, patterns.minimum_len()); - let beefy = patterns.len() > 32; - let has_avx2 = self::x86_64::is_available_avx2(); - let has_ssse3 = has_avx2 || self::x86_64::is_available_ssse3(); - let use_avx2 = if self.only_256bit == Some(true) { - if !has_avx2 { - debug!( - "skipping Teddy because avx2 was demanded but unavailable" - ); - return None; - } - true - } else if self.only_256bit == Some(false) { - if !has_ssse3 { - debug!( - "skipping Teddy because ssse3 was demanded but unavailable" - ); - return None; - } - false - } else if !has_ssse3 && !has_avx2 { - debug!( - "skipping Teddy because ssse3 and avx2 are unavailable" - ); - return None; - } else { - has_avx2 - }; - let fat = match self.only_fat { - None => use_avx2 && beefy, - Some(false) => false, - Some(true) if !use_avx2 => { - debug!( - "skipping Teddy because fat was demanded, but fat \ - Teddy requires avx2 which is unavailable" - ); - return None; - } - Some(true) => true, - }; - // Just like for aarch64, it's possible that too many patterns will - // overhwelm Teddy. Unlike aarch64 though, we have Fat teddy which - // helps things scale a bit more by spreading patterns over more - // buckets. - // - // These thresholds were determined by looking at the measurements - // for the rust/aho-corasick/packed/leftmost-first and - // rust/aho-corasick/dfa/leftmost-first engines on the `teddy/` - // benchmarks. - if patlimit && mask_len == 1 && patterns.len() > 16 { - debug!( - "skipping Teddy (mask len: 1) because there are \ - too many patterns", - ); - return None; - } - match (mask_len, use_avx2, fat) { - (1, false, _) => { - debug!("Teddy choice: 128-bit slim, 1 byte"); - SlimSSSE3::<1>::new(&patterns) - } - (1, true, false) => { - debug!("Teddy choice: 256-bit slim, 1 byte"); - SlimAVX2::<1>::new(&patterns) - } - (1, true, true) => { - debug!("Teddy choice: 256-bit fat, 1 byte"); - FatAVX2::<1>::new(&patterns) - } - (2, false, _) => { - debug!("Teddy choice: 128-bit slim, 2 bytes"); - SlimSSSE3::<2>::new(&patterns) - } - (2, true, false) => { - debug!("Teddy choice: 256-bit slim, 2 bytes"); - SlimAVX2::<2>::new(&patterns) - } - (2, true, true) => { - debug!("Teddy choice: 256-bit fat, 2 bytes"); - FatAVX2::<2>::new(&patterns) - } - (3, false, _) => { - debug!("Teddy choice: 128-bit slim, 3 bytes"); - SlimSSSE3::<3>::new(&patterns) - } - (3, true, false) => { - debug!("Teddy choice: 256-bit slim, 3 bytes"); - SlimAVX2::<3>::new(&patterns) - } - (3, true, true) => { - debug!("Teddy choice: 256-bit fat, 3 bytes"); - FatAVX2::<3>::new(&patterns) - } - (4, false, _) => { - debug!("Teddy choice: 128-bit slim, 4 bytes"); - SlimSSSE3::<4>::new(&patterns) - } - (4, true, false) => { - debug!("Teddy choice: 256-bit slim, 4 bytes"); - SlimAVX2::<4>::new(&patterns) - } - (4, true, true) => { - debug!("Teddy choice: 256-bit fat, 4 bytes"); - FatAVX2::<4>::new(&patterns) - } - _ => { - debug!("no supported Teddy configuration found"); - None - } - } - } - #[cfg(all( - target_arch = "aarch64", - target_feature = "neon", - target_endian = "little" - ))] - { - use self::aarch64::SlimNeon; - - let mask_len = core::cmp::min(4, patterns.minimum_len()); - if self.only_256bit == Some(true) { - debug!( - "skipping Teddy because 256-bits were demanded \ - but unavailable" - ); - return None; - } - if self.only_fat == Some(true) { - debug!( - "skipping Teddy because fat was demanded but unavailable" - ); - } - // Since we don't have Fat teddy in aarch64 (I think we'd want at - // least 256-bit vectors for that), we need to be careful not to - // allow too many patterns as it might overwhelm Teddy. Generally - // speaking, as the mask length goes up, the more patterns we can - // handle because the mask length results in fewer candidates - // generated. - // - // These thresholds were determined by looking at the measurements - // for the rust/aho-corasick/packed/leftmost-first and - // rust/aho-corasick/dfa/leftmost-first engines on the `teddy/` - // benchmarks. - match mask_len { - 1 => { - if patlimit && patterns.len() > 16 { - debug!( - "skipping Teddy (mask len: 1) because there are \ - too many patterns", - ); - } - debug!("Teddy choice: 128-bit slim, 1 byte"); - SlimNeon::<1>::new(&patterns) - } - 2 => { - if patlimit && patterns.len() > 32 { - debug!( - "skipping Teddy (mask len: 2) because there are \ - too many patterns", - ); - } - debug!("Teddy choice: 128-bit slim, 2 bytes"); - SlimNeon::<2>::new(&patterns) - } - 3 => { - if patlimit && patterns.len() > 48 { - debug!( - "skipping Teddy (mask len: 3) because there are \ - too many patterns", - ); - } - debug!("Teddy choice: 128-bit slim, 3 bytes"); - SlimNeon::<3>::new(&patterns) - } - 4 => { - debug!("Teddy choice: 128-bit slim, 4 bytes"); - SlimNeon::<4>::new(&patterns) - } - _ => { - debug!("no supported Teddy configuration found"); - None - } - } - } - #[cfg(not(any( - all(target_arch = "x86_64", target_feature = "sse2"), - all( - target_arch = "aarch64", - target_feature = "neon", - target_endian = "little" - ) - )))] - { - None - } - } -} - -/// A searcher that dispatches to one of several possible Teddy variants. -#[derive(Clone, Debug)] -pub(crate) struct Searcher { - /// The Teddy variant we use. We use dynamic dispatch under the theory that - /// it results in better codegen then a enum, although this is a specious - /// claim. - /// - /// This `Searcher` is essentially a wrapper for a `SearcherT` trait - /// object. We just make `memory_usage` and `minimum_len` available without - /// going through dynamic dispatch. - imp: Arc<dyn SearcherT>, - /// Total heap memory used by the Teddy variant. - memory_usage: usize, - /// The minimum haystack length this searcher can handle. It is intended - /// for callers to use some other search routine (such as Rabin-Karp) in - /// cases where the haystack (or remainer of the haystack) is too short. - minimum_len: usize, -} - -impl Searcher { - /// Look for the leftmost occurrence of any pattern in this search in the - /// given haystack starting at the given position. - /// - /// # Panics - /// - /// This panics when `haystack[at..].len()` is less than the minimum length - /// for this haystack. - #[inline(always)] - pub(crate) fn find( - &self, - haystack: &[u8], - at: usize, - ) -> Option<crate::Match> { - // SAFETY: The Teddy implementations all require a minimum haystack - // length, and this is required for safety. Therefore, we assert it - // here in order to make this method sound. - assert!(haystack[at..].len() >= self.minimum_len); - let hayptr = haystack.as_ptr(); - // SAFETY: Construction of the searcher guarantees that we are able - // to run it in the current environment (i.e., we won't get an AVX2 - // searcher on a x86-64 CPU without AVX2 support). Also, the pointers - // are valid as they are derived directly from a borrowed slice. - let teddym = unsafe { - self.imp.find(hayptr.add(at), hayptr.add(haystack.len()))? - }; - let start = teddym.start().as_usize().wrapping_sub(hayptr.as_usize()); - let end = teddym.end().as_usize().wrapping_sub(hayptr.as_usize()); - let span = crate::Span { start, end }; - // OK because we won't permit the construction of a searcher that - // could report a pattern ID bigger than what can fit in the crate-wide - // PatternID type. - let pid = crate::PatternID::new_unchecked(teddym.pattern().as_usize()); - let m = crate::Match::new(pid, span); - Some(m) - } - - /// Returns the approximate total amount of heap used by this type, in - /// units of bytes. - #[inline(always)] - pub(crate) fn memory_usage(&self) -> usize { - self.memory_usage - } - - /// Returns the minimum length, in bytes, that a haystack must be in order - /// to use it with this searcher. - #[inline(always)] - pub(crate) fn minimum_len(&self) -> usize { - self.minimum_len - } -} - -/// A trait that provides dynamic dispatch over the different possible Teddy -/// variants on the same algorithm. -/// -/// On `x86_64` for example, it isn't known until runtime which of 12 possible -/// variants will be used. One might use one of the four slim 128-bit vector -/// variants, or one of the four 256-bit vector variants or even one of the -/// four fat 256-bit vector variants. -/// -/// Since this choice is generally made when the Teddy searcher is constructed -/// and this choice is based on the patterns given and what the current CPU -/// supports, it follows that there must be some kind of indirection at search -/// time that "selects" the variant chosen at build time. -/// -/// There are a few different ways to go about this. One approach is to use an -/// enum. It works fine, but in my experiments, this generally results in worse -/// codegen. Another approach, which is what we use here, is dynamic dispatch -/// via a trait object. We basically implement this trait for each possible -/// variant, select the variant we want at build time and convert it to a -/// trait object for use at search time. -/// -/// Another approach is to use function pointers and stick each of the possible -/// variants into a union. This is essentially isomorphic to the dynamic -/// dispatch approach, but doesn't require any allocations. Since this crate -/// requires `alloc`, there's no real reason (AFAIK) to go down this path. (The -/// `memchr` crate does this.) -trait SearcherT: - Debug + Send + Sync + UnwindSafe + RefUnwindSafe + 'static -{ - /// Execute a search on the given haystack (identified by `start` and `end` - /// raw pointers). - /// - /// # Safety - /// - /// Essentially, the `start` and `end` pointers must be valid and point - /// to a haystack one can read. As long as you derive them from, for - /// example, a `&[u8]`, they should automatically satisfy all of the safety - /// obligations: - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// * It must be the case that `start <= end`. - /// * `end - start` must be greater than the minimum length for this - /// searcher. - /// - /// Also, it is expected that implementations of this trait will tag this - /// method with a `target_feature` attribute. Callers must ensure that - /// they are executing this method in an environment where that attribute - /// is valid. - unsafe fn find(&self, start: *const u8, end: *const u8) -> Option<Match>; -} - -#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] -mod x86_64 { - use core::arch::x86_64::{__m128i, __m256i}; - - use alloc::sync::Arc; - - use crate::packed::{ - ext::Pointer, - pattern::Patterns, - teddy::generic::{self, Match}, - }; - - use super::{Searcher, SearcherT}; - - #[derive(Clone, Debug)] - pub(super) struct SlimSSSE3<const BYTES: usize> { - slim128: generic::Slim<__m128i, BYTES>, - } - - // Defines SlimSSSE3 wrapper functions for 1, 2, 3 and 4 bytes. - macro_rules! slim_ssse3 { - ($len:expr) => { - impl SlimSSSE3<$len> { - /// Creates a new searcher using "slim" Teddy with 128-bit - /// vectors. If SSSE3 is not available in the current - /// environment, then this returns `None`. - pub(super) fn new( - patterns: &Arc<Patterns>, - ) -> Option<Searcher> { - if !is_available_ssse3() { - return None; - } - Some(unsafe { SlimSSSE3::<$len>::new_unchecked(patterns) }) - } - - /// Creates a new searcher using "slim" Teddy with 256-bit - /// vectors without checking whether SSSE3 is available or not. - /// - /// # Safety - /// - /// Callers must ensure that SSSE3 is available in the current - /// environment. - #[target_feature(enable = "ssse3")] - unsafe fn new_unchecked(patterns: &Arc<Patterns>) -> Searcher { - let slim128 = generic::Slim::<__m128i, $len>::new( - Arc::clone(patterns), - ); - let memory_usage = slim128.memory_usage(); - let minimum_len = slim128.minimum_len(); - let imp = Arc::new(SlimSSSE3 { slim128 }); - Searcher { imp, memory_usage, minimum_len } - } - } - - impl SearcherT for SlimSSSE3<$len> { - #[target_feature(enable = "ssse3")] - #[inline] - unsafe fn find( - &self, - start: *const u8, - end: *const u8, - ) -> Option<Match> { - // SAFETY: All obligations except for `target_feature` are - // passed to the caller. Our use of `target_feature` is - // safe because construction of this type requires that the - // requisite target features are available. - self.slim128.find(start, end) - } - } - }; - } - - slim_ssse3!(1); - slim_ssse3!(2); - slim_ssse3!(3); - slim_ssse3!(4); - - #[derive(Clone, Debug)] - pub(super) struct SlimAVX2<const BYTES: usize> { - slim128: generic::Slim<__m128i, BYTES>, - slim256: generic::Slim<__m256i, BYTES>, - } - - // Defines SlimAVX2 wrapper functions for 1, 2, 3 and 4 bytes. - macro_rules! slim_avx2 { - ($len:expr) => { - impl SlimAVX2<$len> { - /// Creates a new searcher using "slim" Teddy with 256-bit - /// vectors. If AVX2 is not available in the current - /// environment, then this returns `None`. - pub(super) fn new( - patterns: &Arc<Patterns>, - ) -> Option<Searcher> { - if !is_available_avx2() { - return None; - } - Some(unsafe { SlimAVX2::<$len>::new_unchecked(patterns) }) - } - - /// Creates a new searcher using "slim" Teddy with 256-bit - /// vectors without checking whether AVX2 is available or not. - /// - /// # Safety - /// - /// Callers must ensure that AVX2 is available in the current - /// environment. - #[target_feature(enable = "avx2")] - unsafe fn new_unchecked(patterns: &Arc<Patterns>) -> Searcher { - let slim128 = generic::Slim::<__m128i, $len>::new( - Arc::clone(&patterns), - ); - let slim256 = generic::Slim::<__m256i, $len>::new( - Arc::clone(&patterns), - ); - let memory_usage = - slim128.memory_usage() + slim256.memory_usage(); - let minimum_len = slim128.minimum_len(); - let imp = Arc::new(SlimAVX2 { slim128, slim256 }); - Searcher { imp, memory_usage, minimum_len } - } - } - - impl SearcherT for SlimAVX2<$len> { - #[target_feature(enable = "avx2")] - #[inline] - unsafe fn find( - &self, - start: *const u8, - end: *const u8, - ) -> Option<Match> { - // SAFETY: All obligations except for `target_feature` are - // passed to the caller. Our use of `target_feature` is - // safe because construction of this type requires that the - // requisite target features are available. - let len = end.distance(start); - if len < self.slim256.minimum_len() { - self.slim128.find(start, end) - } else { - self.slim256.find(start, end) - } - } - } - }; - } - - slim_avx2!(1); - slim_avx2!(2); - slim_avx2!(3); - slim_avx2!(4); - - #[derive(Clone, Debug)] - pub(super) struct FatAVX2<const BYTES: usize> { - fat256: generic::Fat<__m256i, BYTES>, - } - - // Defines SlimAVX2 wrapper functions for 1, 2, 3 and 4 bytes. - macro_rules! fat_avx2 { - ($len:expr) => { - impl FatAVX2<$len> { - /// Creates a new searcher using "slim" Teddy with 256-bit - /// vectors. If AVX2 is not available in the current - /// environment, then this returns `None`. - pub(super) fn new( - patterns: &Arc<Patterns>, - ) -> Option<Searcher> { - if !is_available_avx2() { - return None; - } - Some(unsafe { FatAVX2::<$len>::new_unchecked(patterns) }) - } - - /// Creates a new searcher using "slim" Teddy with 256-bit - /// vectors without checking whether AVX2 is available or not. - /// - /// # Safety - /// - /// Callers must ensure that AVX2 is available in the current - /// environment. - #[target_feature(enable = "avx2")] - unsafe fn new_unchecked(patterns: &Arc<Patterns>) -> Searcher { - let fat256 = generic::Fat::<__m256i, $len>::new( - Arc::clone(&patterns), - ); - let memory_usage = fat256.memory_usage(); - let minimum_len = fat256.minimum_len(); - let imp = Arc::new(FatAVX2 { fat256 }); - Searcher { imp, memory_usage, minimum_len } - } - } - - impl SearcherT for FatAVX2<$len> { - #[target_feature(enable = "avx2")] - #[inline] - unsafe fn find( - &self, - start: *const u8, - end: *const u8, - ) -> Option<Match> { - // SAFETY: All obligations except for `target_feature` are - // passed to the caller. Our use of `target_feature` is - // safe because construction of this type requires that the - // requisite target features are available. - self.fat256.find(start, end) - } - } - }; - } - - fat_avx2!(1); - fat_avx2!(2); - fat_avx2!(3); - fat_avx2!(4); - - #[inline] - pub(super) fn is_available_ssse3() -> bool { - #[cfg(not(target_feature = "sse2"))] - { - false - } - #[cfg(target_feature = "sse2")] - { - #[cfg(target_feature = "ssse3")] - { - true - } - #[cfg(not(target_feature = "ssse3"))] - { - #[cfg(feature = "std")] - { - std::is_x86_feature_detected!("ssse3") - } - #[cfg(not(feature = "std"))] - { - false - } - } - } - } - - #[inline] - pub(super) fn is_available_avx2() -> bool { - #[cfg(not(target_feature = "sse2"))] - { - false - } - #[cfg(target_feature = "sse2")] - { - #[cfg(target_feature = "avx2")] - { - true - } - #[cfg(not(target_feature = "avx2"))] - { - #[cfg(feature = "std")] - { - std::is_x86_feature_detected!("avx2") - } - #[cfg(not(feature = "std"))] - { - false - } - } - } - } -} - -#[cfg(all( - target_arch = "aarch64", - target_feature = "neon", - target_endian = "little" -))] -mod aarch64 { - use core::arch::aarch64::uint8x16_t; - - use alloc::sync::Arc; - - use crate::packed::{ - pattern::Patterns, - teddy::generic::{self, Match}, - }; - - use super::{Searcher, SearcherT}; - - #[derive(Clone, Debug)] - pub(super) struct SlimNeon<const BYTES: usize> { - slim128: generic::Slim<uint8x16_t, BYTES>, - } - - // Defines SlimSSSE3 wrapper functions for 1, 2, 3 and 4 bytes. - macro_rules! slim_neon { - ($len:expr) => { - impl SlimNeon<$len> { - /// Creates a new searcher using "slim" Teddy with 128-bit - /// vectors. If SSSE3 is not available in the current - /// environment, then this returns `None`. - pub(super) fn new( - patterns: &Arc<Patterns>, - ) -> Option<Searcher> { - Some(unsafe { SlimNeon::<$len>::new_unchecked(patterns) }) - } - - /// Creates a new searcher using "slim" Teddy with 256-bit - /// vectors without checking whether SSSE3 is available or not. - /// - /// # Safety - /// - /// Callers must ensure that SSSE3 is available in the current - /// environment. - #[target_feature(enable = "neon")] - unsafe fn new_unchecked(patterns: &Arc<Patterns>) -> Searcher { - let slim128 = generic::Slim::<uint8x16_t, $len>::new( - Arc::clone(patterns), - ); - let memory_usage = slim128.memory_usage(); - let minimum_len = slim128.minimum_len(); - let imp = Arc::new(SlimNeon { slim128 }); - Searcher { imp, memory_usage, minimum_len } - } - } - - impl SearcherT for SlimNeon<$len> { - #[target_feature(enable = "neon")] - #[inline] - unsafe fn find( - &self, - start: *const u8, - end: *const u8, - ) -> Option<Match> { - // SAFETY: All obligations except for `target_feature` are - // passed to the caller. Our use of `target_feature` is - // safe because construction of this type requires that the - // requisite target features are available. - self.slim128.find(start, end) - } - } - }; - } - - slim_neon!(1); - slim_neon!(2); - slim_neon!(3); - slim_neon!(4); -} diff --git a/vendor/aho-corasick/src/packed/teddy/generic.rs b/vendor/aho-corasick/src/packed/teddy/generic.rs deleted file mode 100644 index 2aacd003576069..00000000000000 --- a/vendor/aho-corasick/src/packed/teddy/generic.rs +++ /dev/null @@ -1,1382 +0,0 @@ -use core::fmt::Debug; - -use alloc::{ - boxed::Box, collections::BTreeMap, format, sync::Arc, vec, vec::Vec, -}; - -use crate::{ - packed::{ - ext::Pointer, - pattern::Patterns, - vector::{FatVector, Vector}, - }, - util::int::U32, - PatternID, -}; - -/// A match type specialized to the Teddy implementations below. -/// -/// Essentially, instead of representing a match at byte offsets, we use -/// raw pointers. This is because the implementations below operate on raw -/// pointers, and so this is a more natural return type based on how the -/// implementation works. -/// -/// Also, the `PatternID` used here is a `u16`. -#[derive(Clone, Copy, Debug)] -pub(crate) struct Match { - pid: PatternID, - start: *const u8, - end: *const u8, -} - -impl Match { - /// Returns the ID of the pattern that matched. - pub(crate) fn pattern(&self) -> PatternID { - self.pid - } - - /// Returns a pointer into the haystack at which the match starts. - pub(crate) fn start(&self) -> *const u8 { - self.start - } - - /// Returns a pointer into the haystack at which the match ends. - pub(crate) fn end(&self) -> *const u8 { - self.end - } -} - -/// A "slim" Teddy implementation that is generic over both the vector type -/// and the minimum length of the patterns being searched for. -/// -/// Only 1, 2, 3 and 4 bytes are supported as minimum lengths. -#[derive(Clone, Debug)] -pub(crate) struct Slim<V, const BYTES: usize> { - /// A generic data structure for doing "slim" Teddy verification. - teddy: Teddy<8>, - /// The masks used as inputs to the shuffle operation to generate - /// candidates (which are fed into the verification routines). - masks: [Mask<V>; BYTES], -} - -impl<V: Vector, const BYTES: usize> Slim<V, BYTES> { - /// Create a new "slim" Teddy searcher for the given patterns. - /// - /// # Panics - /// - /// This panics when `BYTES` is any value other than 1, 2, 3 or 4. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - #[inline(always)] - pub(crate) unsafe fn new(patterns: Arc<Patterns>) -> Slim<V, BYTES> { - assert!( - 1 <= BYTES && BYTES <= 4, - "only 1, 2, 3 or 4 bytes are supported" - ); - let teddy = Teddy::new(patterns); - let masks = SlimMaskBuilder::from_teddy(&teddy); - Slim { teddy, masks } - } - - /// Returns the approximate total amount of heap used by this type, in - /// units of bytes. - #[inline(always)] - pub(crate) fn memory_usage(&self) -> usize { - self.teddy.memory_usage() - } - - /// Returns the minimum length, in bytes, that a haystack must be in order - /// to use it with this searcher. - #[inline(always)] - pub(crate) fn minimum_len(&self) -> usize { - V::BYTES + (BYTES - 1) - } -} - -impl<V: Vector> Slim<V, 1> { - /// Look for an occurrences of the patterns in this finder in the haystack - /// given by the `start` and `end` pointers. - /// - /// If no match could be found, then `None` is returned. - /// - /// # Safety - /// - /// The given pointers representing the haystack must be valid to read - /// from. They must also point to a region of memory that is at least the - /// minimum length required by this searcher. - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - #[inline(always)] - pub(crate) unsafe fn find( - &self, - start: *const u8, - end: *const u8, - ) -> Option<Match> { - let len = end.distance(start); - debug_assert!(len >= self.minimum_len()); - let mut cur = start; - while cur <= end.sub(V::BYTES) { - if let Some(m) = self.find_one(cur, end) { - return Some(m); - } - cur = cur.add(V::BYTES); - } - if cur < end { - cur = end.sub(V::BYTES); - if let Some(m) = self.find_one(cur, end) { - return Some(m); - } - } - None - } - - /// Look for a match starting at the `V::BYTES` at and after `cur`. If - /// there isn't one, then `None` is returned. - /// - /// # Safety - /// - /// The given pointers representing the haystack must be valid to read - /// from. They must also point to a region of memory that is at least the - /// minimum length required by this searcher. - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - #[inline(always)] - unsafe fn find_one( - &self, - cur: *const u8, - end: *const u8, - ) -> Option<Match> { - let c = self.candidate(cur); - if !c.is_zero() { - if let Some(m) = self.teddy.verify(cur, end, c) { - return Some(m); - } - } - None - } - - /// Look for a candidate match (represented as a vector) starting at the - /// `V::BYTES` at and after `cur`. If there isn't one, then a vector with - /// all bits set to zero is returned. - /// - /// # Safety - /// - /// The given pointer representing the haystack must be valid to read - /// from. - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - #[inline(always)] - unsafe fn candidate(&self, cur: *const u8) -> V { - let chunk = V::load_unaligned(cur); - Mask::members1(chunk, self.masks) - } -} - -impl<V: Vector> Slim<V, 2> { - /// See Slim<V, 1>::find. - #[inline(always)] - pub(crate) unsafe fn find( - &self, - start: *const u8, - end: *const u8, - ) -> Option<Match> { - let len = end.distance(start); - debug_assert!(len >= self.minimum_len()); - let mut cur = start.add(1); - let mut prev0 = V::splat(0xFF); - while cur <= end.sub(V::BYTES) { - if let Some(m) = self.find_one(cur, end, &mut prev0) { - return Some(m); - } - cur = cur.add(V::BYTES); - } - if cur < end { - cur = end.sub(V::BYTES); - prev0 = V::splat(0xFF); - if let Some(m) = self.find_one(cur, end, &mut prev0) { - return Some(m); - } - } - None - } - - /// See Slim<V, 1>::find_one. - #[inline(always)] - unsafe fn find_one( - &self, - cur: *const u8, - end: *const u8, - prev0: &mut V, - ) -> Option<Match> { - let c = self.candidate(cur, prev0); - if !c.is_zero() { - if let Some(m) = self.teddy.verify(cur.sub(1), end, c) { - return Some(m); - } - } - None - } - - /// See Slim<V, 1>::candidate. - #[inline(always)] - unsafe fn candidate(&self, cur: *const u8, prev0: &mut V) -> V { - let chunk = V::load_unaligned(cur); - let (res0, res1) = Mask::members2(chunk, self.masks); - let res0prev0 = res0.shift_in_one_byte(*prev0); - let res = res0prev0.and(res1); - *prev0 = res0; - res - } -} - -impl<V: Vector> Slim<V, 3> { - /// See Slim<V, 1>::find. - #[inline(always)] - pub(crate) unsafe fn find( - &self, - start: *const u8, - end: *const u8, - ) -> Option<Match> { - let len = end.distance(start); - debug_assert!(len >= self.minimum_len()); - let mut cur = start.add(2); - let mut prev0 = V::splat(0xFF); - let mut prev1 = V::splat(0xFF); - while cur <= end.sub(V::BYTES) { - if let Some(m) = self.find_one(cur, end, &mut prev0, &mut prev1) { - return Some(m); - } - cur = cur.add(V::BYTES); - } - if cur < end { - cur = end.sub(V::BYTES); - prev0 = V::splat(0xFF); - prev1 = V::splat(0xFF); - if let Some(m) = self.find_one(cur, end, &mut prev0, &mut prev1) { - return Some(m); - } - } - None - } - - /// See Slim<V, 1>::find_one. - #[inline(always)] - unsafe fn find_one( - &self, - cur: *const u8, - end: *const u8, - prev0: &mut V, - prev1: &mut V, - ) -> Option<Match> { - let c = self.candidate(cur, prev0, prev1); - if !c.is_zero() { - if let Some(m) = self.teddy.verify(cur.sub(2), end, c) { - return Some(m); - } - } - None - } - - /// See Slim<V, 1>::candidate. - #[inline(always)] - unsafe fn candidate( - &self, - cur: *const u8, - prev0: &mut V, - prev1: &mut V, - ) -> V { - let chunk = V::load_unaligned(cur); - let (res0, res1, res2) = Mask::members3(chunk, self.masks); - let res0prev0 = res0.shift_in_two_bytes(*prev0); - let res1prev1 = res1.shift_in_one_byte(*prev1); - let res = res0prev0.and(res1prev1).and(res2); - *prev0 = res0; - *prev1 = res1; - res - } -} - -impl<V: Vector> Slim<V, 4> { - /// See Slim<V, 1>::find. - #[inline(always)] - pub(crate) unsafe fn find( - &self, - start: *const u8, - end: *const u8, - ) -> Option<Match> { - let len = end.distance(start); - debug_assert!(len >= self.minimum_len()); - let mut cur = start.add(3); - let mut prev0 = V::splat(0xFF); - let mut prev1 = V::splat(0xFF); - let mut prev2 = V::splat(0xFF); - while cur <= end.sub(V::BYTES) { - if let Some(m) = - self.find_one(cur, end, &mut prev0, &mut prev1, &mut prev2) - { - return Some(m); - } - cur = cur.add(V::BYTES); - } - if cur < end { - cur = end.sub(V::BYTES); - prev0 = V::splat(0xFF); - prev1 = V::splat(0xFF); - prev2 = V::splat(0xFF); - if let Some(m) = - self.find_one(cur, end, &mut prev0, &mut prev1, &mut prev2) - { - return Some(m); - } - } - None - } - - /// See Slim<V, 1>::find_one. - #[inline(always)] - unsafe fn find_one( - &self, - cur: *const u8, - end: *const u8, - prev0: &mut V, - prev1: &mut V, - prev2: &mut V, - ) -> Option<Match> { - let c = self.candidate(cur, prev0, prev1, prev2); - if !c.is_zero() { - if let Some(m) = self.teddy.verify(cur.sub(3), end, c) { - return Some(m); - } - } - None - } - - /// See Slim<V, 1>::candidate. - #[inline(always)] - unsafe fn candidate( - &self, - cur: *const u8, - prev0: &mut V, - prev1: &mut V, - prev2: &mut V, - ) -> V { - let chunk = V::load_unaligned(cur); - let (res0, res1, res2, res3) = Mask::members4(chunk, self.masks); - let res0prev0 = res0.shift_in_three_bytes(*prev0); - let res1prev1 = res1.shift_in_two_bytes(*prev1); - let res2prev2 = res2.shift_in_one_byte(*prev2); - let res = res0prev0.and(res1prev1).and(res2prev2).and(res3); - *prev0 = res0; - *prev1 = res1; - *prev2 = res2; - res - } -} - -/// A "fat" Teddy implementation that is generic over both the vector type -/// and the minimum length of the patterns being searched for. -/// -/// Only 1, 2, 3 and 4 bytes are supported as minimum lengths. -#[derive(Clone, Debug)] -pub(crate) struct Fat<V, const BYTES: usize> { - /// A generic data structure for doing "fat" Teddy verification. - teddy: Teddy<16>, - /// The masks used as inputs to the shuffle operation to generate - /// candidates (which are fed into the verification routines). - masks: [Mask<V>; BYTES], -} - -impl<V: FatVector, const BYTES: usize> Fat<V, BYTES> { - /// Create a new "fat" Teddy searcher for the given patterns. - /// - /// # Panics - /// - /// This panics when `BYTES` is any value other than 1, 2, 3 or 4. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - #[inline(always)] - pub(crate) unsafe fn new(patterns: Arc<Patterns>) -> Fat<V, BYTES> { - assert!( - 1 <= BYTES && BYTES <= 4, - "only 1, 2, 3 or 4 bytes are supported" - ); - let teddy = Teddy::new(patterns); - let masks = FatMaskBuilder::from_teddy(&teddy); - Fat { teddy, masks } - } - - /// Returns the approximate total amount of heap used by this type, in - /// units of bytes. - #[inline(always)] - pub(crate) fn memory_usage(&self) -> usize { - self.teddy.memory_usage() - } - - /// Returns the minimum length, in bytes, that a haystack must be in order - /// to use it with this searcher. - #[inline(always)] - pub(crate) fn minimum_len(&self) -> usize { - V::Half::BYTES + (BYTES - 1) - } -} - -impl<V: FatVector> Fat<V, 1> { - /// Look for an occurrences of the patterns in this finder in the haystack - /// given by the `start` and `end` pointers. - /// - /// If no match could be found, then `None` is returned. - /// - /// # Safety - /// - /// The given pointers representing the haystack must be valid to read - /// from. They must also point to a region of memory that is at least the - /// minimum length required by this searcher. - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - #[inline(always)] - pub(crate) unsafe fn find( - &self, - start: *const u8, - end: *const u8, - ) -> Option<Match> { - let len = end.distance(start); - debug_assert!(len >= self.minimum_len()); - let mut cur = start; - while cur <= end.sub(V::Half::BYTES) { - if let Some(m) = self.find_one(cur, end) { - return Some(m); - } - cur = cur.add(V::Half::BYTES); - } - if cur < end { - cur = end.sub(V::Half::BYTES); - if let Some(m) = self.find_one(cur, end) { - return Some(m); - } - } - None - } - - /// Look for a match starting at the `V::BYTES` at and after `cur`. If - /// there isn't one, then `None` is returned. - /// - /// # Safety - /// - /// The given pointers representing the haystack must be valid to read - /// from. They must also point to a region of memory that is at least the - /// minimum length required by this searcher. - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - #[inline(always)] - unsafe fn find_one( - &self, - cur: *const u8, - end: *const u8, - ) -> Option<Match> { - let c = self.candidate(cur); - if !c.is_zero() { - if let Some(m) = self.teddy.verify(cur, end, c) { - return Some(m); - } - } - None - } - - /// Look for a candidate match (represented as a vector) starting at the - /// `V::BYTES` at and after `cur`. If there isn't one, then a vector with - /// all bits set to zero is returned. - /// - /// # Safety - /// - /// The given pointer representing the haystack must be valid to read - /// from. - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - #[inline(always)] - unsafe fn candidate(&self, cur: *const u8) -> V { - let chunk = V::load_half_unaligned(cur); - Mask::members1(chunk, self.masks) - } -} - -impl<V: FatVector> Fat<V, 2> { - /// See `Fat<V, 1>::find`. - #[inline(always)] - pub(crate) unsafe fn find( - &self, - start: *const u8, - end: *const u8, - ) -> Option<Match> { - let len = end.distance(start); - debug_assert!(len >= self.minimum_len()); - let mut cur = start.add(1); - let mut prev0 = V::splat(0xFF); - while cur <= end.sub(V::Half::BYTES) { - if let Some(m) = self.find_one(cur, end, &mut prev0) { - return Some(m); - } - cur = cur.add(V::Half::BYTES); - } - if cur < end { - cur = end.sub(V::Half::BYTES); - prev0 = V::splat(0xFF); - if let Some(m) = self.find_one(cur, end, &mut prev0) { - return Some(m); - } - } - None - } - - /// See `Fat<V, 1>::find_one`. - #[inline(always)] - unsafe fn find_one( - &self, - cur: *const u8, - end: *const u8, - prev0: &mut V, - ) -> Option<Match> { - let c = self.candidate(cur, prev0); - if !c.is_zero() { - if let Some(m) = self.teddy.verify(cur.sub(1), end, c) { - return Some(m); - } - } - None - } - - /// See `Fat<V, 1>::candidate`. - #[inline(always)] - unsafe fn candidate(&self, cur: *const u8, prev0: &mut V) -> V { - let chunk = V::load_half_unaligned(cur); - let (res0, res1) = Mask::members2(chunk, self.masks); - let res0prev0 = res0.half_shift_in_one_byte(*prev0); - let res = res0prev0.and(res1); - *prev0 = res0; - res - } -} - -impl<V: FatVector> Fat<V, 3> { - /// See `Fat<V, 1>::find`. - #[inline(always)] - pub(crate) unsafe fn find( - &self, - start: *const u8, - end: *const u8, - ) -> Option<Match> { - let len = end.distance(start); - debug_assert!(len >= self.minimum_len()); - let mut cur = start.add(2); - let mut prev0 = V::splat(0xFF); - let mut prev1 = V::splat(0xFF); - while cur <= end.sub(V::Half::BYTES) { - if let Some(m) = self.find_one(cur, end, &mut prev0, &mut prev1) { - return Some(m); - } - cur = cur.add(V::Half::BYTES); - } - if cur < end { - cur = end.sub(V::Half::BYTES); - prev0 = V::splat(0xFF); - prev1 = V::splat(0xFF); - if let Some(m) = self.find_one(cur, end, &mut prev0, &mut prev1) { - return Some(m); - } - } - None - } - - /// See `Fat<V, 1>::find_one`. - #[inline(always)] - unsafe fn find_one( - &self, - cur: *const u8, - end: *const u8, - prev0: &mut V, - prev1: &mut V, - ) -> Option<Match> { - let c = self.candidate(cur, prev0, prev1); - if !c.is_zero() { - if let Some(m) = self.teddy.verify(cur.sub(2), end, c) { - return Some(m); - } - } - None - } - - /// See `Fat<V, 1>::candidate`. - #[inline(always)] - unsafe fn candidate( - &self, - cur: *const u8, - prev0: &mut V, - prev1: &mut V, - ) -> V { - let chunk = V::load_half_unaligned(cur); - let (res0, res1, res2) = Mask::members3(chunk, self.masks); - let res0prev0 = res0.half_shift_in_two_bytes(*prev0); - let res1prev1 = res1.half_shift_in_one_byte(*prev1); - let res = res0prev0.and(res1prev1).and(res2); - *prev0 = res0; - *prev1 = res1; - res - } -} - -impl<V: FatVector> Fat<V, 4> { - /// See `Fat<V, 1>::find`. - #[inline(always)] - pub(crate) unsafe fn find( - &self, - start: *const u8, - end: *const u8, - ) -> Option<Match> { - let len = end.distance(start); - debug_assert!(len >= self.minimum_len()); - let mut cur = start.add(3); - let mut prev0 = V::splat(0xFF); - let mut prev1 = V::splat(0xFF); - let mut prev2 = V::splat(0xFF); - while cur <= end.sub(V::Half::BYTES) { - if let Some(m) = - self.find_one(cur, end, &mut prev0, &mut prev1, &mut prev2) - { - return Some(m); - } - cur = cur.add(V::Half::BYTES); - } - if cur < end { - cur = end.sub(V::Half::BYTES); - prev0 = V::splat(0xFF); - prev1 = V::splat(0xFF); - prev2 = V::splat(0xFF); - if let Some(m) = - self.find_one(cur, end, &mut prev0, &mut prev1, &mut prev2) - { - return Some(m); - } - } - None - } - - /// See `Fat<V, 1>::find_one`. - #[inline(always)] - unsafe fn find_one( - &self, - cur: *const u8, - end: *const u8, - prev0: &mut V, - prev1: &mut V, - prev2: &mut V, - ) -> Option<Match> { - let c = self.candidate(cur, prev0, prev1, prev2); - if !c.is_zero() { - if let Some(m) = self.teddy.verify(cur.sub(3), end, c) { - return Some(m); - } - } - None - } - - /// See `Fat<V, 1>::candidate`. - #[inline(always)] - unsafe fn candidate( - &self, - cur: *const u8, - prev0: &mut V, - prev1: &mut V, - prev2: &mut V, - ) -> V { - let chunk = V::load_half_unaligned(cur); - let (res0, res1, res2, res3) = Mask::members4(chunk, self.masks); - let res0prev0 = res0.half_shift_in_three_bytes(*prev0); - let res1prev1 = res1.half_shift_in_two_bytes(*prev1); - let res2prev2 = res2.half_shift_in_one_byte(*prev2); - let res = res0prev0.and(res1prev1).and(res2prev2).and(res3); - *prev0 = res0; - *prev1 = res1; - *prev2 = res2; - res - } -} - -/// The common elements of all "slim" and "fat" Teddy search implementations. -/// -/// Essentially, this contains the patterns and the buckets. Namely, it -/// contains enough to implement the verification step after candidates are -/// identified via the shuffle masks. -/// -/// It is generic over the number of buckets used. In general, the number of -/// buckets is either 8 (for "slim" Teddy) or 16 (for "fat" Teddy). The generic -/// parameter isn't really meant to be instantiated for any value other than -/// 8 or 16, although it is technically possible. The main hiccup is that there -/// is some bit-shifting done in the critical part of verification that could -/// be quite expensive if `N` is not a multiple of 2. -#[derive(Clone, Debug)] -struct Teddy<const BUCKETS: usize> { - /// The patterns we are searching for. - /// - /// A pattern string can be found by its `PatternID`. - patterns: Arc<Patterns>, - /// The allocation of patterns in buckets. This only contains the IDs of - /// patterns. In order to do full verification, callers must provide the - /// actual patterns when using Teddy. - buckets: [Vec<PatternID>; BUCKETS], - // N.B. The above representation is very simple, but it definitely results - // in ping-ponging between different allocations during verification. I've - // tried experimenting with other representations that flatten the pattern - // strings into a single allocation, but it doesn't seem to help much. - // Probably everything is small enough to fit into cache anyway, and so the - // pointer chasing isn't a big deal? - // - // One other avenue I haven't explored is some kind of hashing trick - // that let's us do another high-confidence check before launching into - // `memcmp`. -} - -impl<const BUCKETS: usize> Teddy<BUCKETS> { - /// Create a new generic data structure for Teddy verification. - fn new(patterns: Arc<Patterns>) -> Teddy<BUCKETS> { - assert_ne!(0, patterns.len(), "Teddy requires at least one pattern"); - assert_ne!( - 0, - patterns.minimum_len(), - "Teddy does not support zero-length patterns" - ); - assert!( - BUCKETS == 8 || BUCKETS == 16, - "Teddy only supports 8 or 16 buckets" - ); - // MSRV(1.63): Use core::array::from_fn below instead of allocating a - // superfluous outer Vec. Not a big deal (especially given the BTreeMap - // allocation below), but nice to not do it. - let buckets = - <[Vec<PatternID>; BUCKETS]>::try_from(vec![vec![]; BUCKETS]) - .unwrap(); - let mut t = Teddy { patterns, buckets }; - - let mut map: BTreeMap<Box<[u8]>, usize> = BTreeMap::new(); - for (id, pattern) in t.patterns.iter() { - // We try to be slightly clever in how we assign patterns into - // buckets. Generally speaking, we want patterns with the same - // prefix to be in the same bucket, since it minimizes the amount - // of time we spend churning through buckets in the verification - // step. - // - // So we could assign patterns with the same N-prefix (where N is - // the size of the mask, which is one of {1, 2, 3}) to the same - // bucket. However, case insensitive searches are fairly common, so - // we'd for example, ideally want to treat `abc` and `ABC` as if - // they shared the same prefix. ASCII has the nice property that - // the lower 4 bits of A and a are the same, so we therefore group - // patterns with the same low-nybble-N-prefix into the same bucket. - // - // MOREOVER, this is actually necessary for correctness! In - // particular, by grouping patterns with the same prefix into the - // same bucket, we ensure that we preserve correct leftmost-first - // and leftmost-longest match semantics. In addition to the fact - // that `patterns.iter()` iterates in the correct order, this - // guarantees that all possible ambiguous matches will occur in - // the same bucket. The verification routine could be adjusted to - // support correct leftmost match semantics regardless of bucket - // allocation, but that results in a performance hit. It's much - // nicer to be able to just stop as soon as a match is found. - let lonybs = pattern.low_nybbles(t.mask_len()); - if let Some(&bucket) = map.get(&lonybs) { - t.buckets[bucket].push(id); - } else { - // N.B. We assign buckets in reverse because it shouldn't have - // any influence on performance, but it does make it harder to - // get leftmost match semantics accidentally correct. - let bucket = (BUCKETS - 1) - (id.as_usize() % BUCKETS); - t.buckets[bucket].push(id); - map.insert(lonybs, bucket); - } - } - t - } - - /// Verify whether there are any matches starting at or after `cur` in the - /// haystack. The candidate chunk given should correspond to 8-bit bitsets - /// for N buckets. - /// - /// # Safety - /// - /// The given pointers representing the haystack must be valid to read - /// from. - #[inline(always)] - unsafe fn verify64( - &self, - cur: *const u8, - end: *const u8, - mut candidate_chunk: u64, - ) -> Option<Match> { - while candidate_chunk != 0 { - let bit = candidate_chunk.trailing_zeros().as_usize(); - candidate_chunk &= !(1 << bit); - - let cur = cur.add(bit / BUCKETS); - let bucket = bit % BUCKETS; - if let Some(m) = self.verify_bucket(cur, end, bucket) { - return Some(m); - } - } - None - } - - /// Verify whether there are any matches starting at `at` in the given - /// `haystack` corresponding only to patterns in the given bucket. - /// - /// # Safety - /// - /// The given pointers representing the haystack must be valid to read - /// from. - /// - /// The bucket index must be less than or equal to `self.buckets.len()`. - #[inline(always)] - unsafe fn verify_bucket( - &self, - cur: *const u8, - end: *const u8, - bucket: usize, - ) -> Option<Match> { - debug_assert!(bucket < self.buckets.len()); - // SAFETY: The caller must ensure that the bucket index is correct. - for pid in self.buckets.get_unchecked(bucket).iter().copied() { - // SAFETY: This is safe because we are guaranteed that every - // index in a Teddy bucket is a valid index into `pats`, by - // construction. - debug_assert!(pid.as_usize() < self.patterns.len()); - let pat = self.patterns.get_unchecked(pid); - if pat.is_prefix_raw(cur, end) { - let start = cur; - let end = start.add(pat.len()); - return Some(Match { pid, start, end }); - } - } - None - } - - /// Returns the total number of masks required by the patterns in this - /// Teddy searcher. - /// - /// Basically, the mask length corresponds to the type of Teddy searcher - /// to use: a 1-byte, 2-byte, 3-byte or 4-byte searcher. The bigger the - /// better, typically, since searching for longer substrings usually - /// decreases the rate of false positives. Therefore, the number of masks - /// needed is the length of the shortest pattern in this searcher. If the - /// length of the shortest pattern (in bytes) is bigger than 4, then the - /// mask length is 4 since there are no Teddy searchers for more than 4 - /// bytes. - fn mask_len(&self) -> usize { - core::cmp::min(4, self.patterns.minimum_len()) - } - - /// Returns the approximate total amount of heap used by this type, in - /// units of bytes. - fn memory_usage(&self) -> usize { - // This is an upper bound rather than a precise accounting. No - // particular reason, other than it's probably very close to actual - // memory usage in practice. - self.patterns.len() * core::mem::size_of::<PatternID>() - } -} - -impl Teddy<8> { - /// Runs the verification routine for "slim" Teddy. - /// - /// The candidate given should be a collection of 8-bit bitsets (one bitset - /// per lane), where the ith bit is set in the jth lane if and only if the - /// byte occurring at `at + j` in `cur` is in the bucket `i`. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - /// - /// The given pointers must be valid to read from. - #[inline(always)] - unsafe fn verify<V: Vector>( - &self, - mut cur: *const u8, - end: *const u8, - candidate: V, - ) -> Option<Match> { - debug_assert!(!candidate.is_zero()); - // Convert the candidate into 64-bit chunks, and then verify each of - // those chunks. - candidate.for_each_64bit_lane( - #[inline(always)] - |_, chunk| { - let result = self.verify64(cur, end, chunk); - cur = cur.add(8); - result - }, - ) - } -} - -impl Teddy<16> { - /// Runs the verification routine for "fat" Teddy. - /// - /// The candidate given should be a collection of 8-bit bitsets (one bitset - /// per lane), where the ith bit is set in the jth lane if and only if the - /// byte occurring at `at + (j < 16 ? j : j - 16)` in `cur` is in the - /// bucket `j < 16 ? i : i + 8`. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - /// - /// The given pointers must be valid to read from. - #[inline(always)] - unsafe fn verify<V: FatVector>( - &self, - mut cur: *const u8, - end: *const u8, - candidate: V, - ) -> Option<Match> { - // This is a bit tricky, but we basically want to convert our - // candidate, which looks like this (assuming a 256-bit vector): - // - // a31 a30 ... a17 a16 a15 a14 ... a01 a00 - // - // where each a(i) is an 8-bit bitset corresponding to the activated - // buckets, to this - // - // a31 a15 a30 a14 a29 a13 ... a18 a02 a17 a01 a16 a00 - // - // Namely, for Fat Teddy, the high 128-bits of the candidate correspond - // to the same bytes in the haystack in the low 128-bits (so we only - // scan 16 bytes at a time), but are for buckets 8-15 instead of 0-7. - // - // The verification routine wants to look at all potentially matching - // buckets before moving on to the next lane. So for example, both - // a16 and a00 both correspond to the first byte in our window; a00 - // contains buckets 0-7 and a16 contains buckets 8-15. Specifically, - // a16 should be checked before a01. So the transformation shown above - // allows us to use our normal verification procedure with one small - // change: we treat each bitset as 16 bits instead of 8 bits. - debug_assert!(!candidate.is_zero()); - - // Swap the 128-bit lanes in the candidate vector. - let swapped = candidate.swap_halves(); - // Interleave the bytes from the low 128-bit lanes, starting with - // cand first. - let r1 = candidate.interleave_low_8bit_lanes(swapped); - // Interleave the bytes from the high 128-bit lanes, starting with - // cand first. - let r2 = candidate.interleave_high_8bit_lanes(swapped); - // Now just take the 2 low 64-bit integers from both r1 and r2. We - // can drop the high 64-bit integers because they are a mirror image - // of the low 64-bit integers. All we care about are the low 128-bit - // lanes of r1 and r2. Combined, they contain all our 16-bit bitsets - // laid out in the desired order, as described above. - r1.for_each_low_64bit_lane( - r2, - #[inline(always)] - |_, chunk| { - let result = self.verify64(cur, end, chunk); - cur = cur.add(4); - result - }, - ) - } -} - -/// A vector generic mask for the low and high nybbles in a set of patterns. -/// Each 8-bit lane `j` in a vector corresponds to a bitset where the `i`th bit -/// is set if and only if the nybble `j` is in the bucket `i` at a particular -/// position. -/// -/// This is slightly tweaked dependending on whether Slim or Fat Teddy is being -/// used. For Slim Teddy, the bitsets in the lower half are the same as the -/// bitsets in the higher half, so that we can search `V::BYTES` bytes at a -/// time. (Remember, the nybbles in the haystack are used as indices into these -/// masks, and 256-bit shuffles only operate on 128-bit lanes.) -/// -/// For Fat Teddy, the bitsets are not repeated, but instead, the high half -/// bits correspond to an addition 8 buckets. So that a bitset `00100010` has -/// buckets 1 and 5 set if it's in the lower half, but has buckets 9 and 13 set -/// if it's in the higher half. -#[derive(Clone, Copy, Debug)] -struct Mask<V> { - lo: V, - hi: V, -} - -impl<V: Vector> Mask<V> { - /// Return a candidate for Teddy (fat or slim) that is searching for 1-byte - /// candidates. - /// - /// If a candidate is returned, it will be a collection of 8-bit bitsets - /// (one bitset per lane), where the ith bit is set in the jth lane if and - /// only if the byte occurring at the jth lane in `chunk` is in the bucket - /// `i`. If no candidate is found, then the vector returned will have all - /// lanes set to zero. - /// - /// `chunk` should correspond to a `V::BYTES` window of the haystack (where - /// the least significant byte corresponds to the start of the window). For - /// fat Teddy, the haystack window length should be `V::BYTES / 2`, with - /// the window repeated in each half of the vector. - /// - /// `mask1` should correspond to a low/high mask for the first byte of all - /// patterns that are being searched. - #[inline(always)] - unsafe fn members1(chunk: V, masks: [Mask<V>; 1]) -> V { - let lomask = V::splat(0xF); - let hlo = chunk.and(lomask); - let hhi = chunk.shift_8bit_lane_right::<4>().and(lomask); - let locand = masks[0].lo.shuffle_bytes(hlo); - let hicand = masks[0].hi.shuffle_bytes(hhi); - locand.and(hicand) - } - - /// Return a candidate for Teddy (fat or slim) that is searching for 2-byte - /// candidates. - /// - /// If candidates are returned, each will be a collection of 8-bit bitsets - /// (one bitset per lane), where the ith bit is set in the jth lane if and - /// only if the byte occurring at the jth lane in `chunk` is in the bucket - /// `i`. Each candidate returned corresponds to the first and second bytes - /// of the patterns being searched. If no candidate is found, then all of - /// the lanes will be set to zero in at least one of the vectors returned. - /// - /// `chunk` should correspond to a `V::BYTES` window of the haystack (where - /// the least significant byte corresponds to the start of the window). For - /// fat Teddy, the haystack window length should be `V::BYTES / 2`, with - /// the window repeated in each half of the vector. - /// - /// The masks should correspond to the masks computed for the first and - /// second bytes of all patterns that are being searched. - #[inline(always)] - unsafe fn members2(chunk: V, masks: [Mask<V>; 2]) -> (V, V) { - let lomask = V::splat(0xF); - let hlo = chunk.and(lomask); - let hhi = chunk.shift_8bit_lane_right::<4>().and(lomask); - - let locand1 = masks[0].lo.shuffle_bytes(hlo); - let hicand1 = masks[0].hi.shuffle_bytes(hhi); - let cand1 = locand1.and(hicand1); - - let locand2 = masks[1].lo.shuffle_bytes(hlo); - let hicand2 = masks[1].hi.shuffle_bytes(hhi); - let cand2 = locand2.and(hicand2); - - (cand1, cand2) - } - - /// Return a candidate for Teddy (fat or slim) that is searching for 3-byte - /// candidates. - /// - /// If candidates are returned, each will be a collection of 8-bit bitsets - /// (one bitset per lane), where the ith bit is set in the jth lane if and - /// only if the byte occurring at the jth lane in `chunk` is in the bucket - /// `i`. Each candidate returned corresponds to the first, second and third - /// bytes of the patterns being searched. If no candidate is found, then - /// all of the lanes will be set to zero in at least one of the vectors - /// returned. - /// - /// `chunk` should correspond to a `V::BYTES` window of the haystack (where - /// the least significant byte corresponds to the start of the window). For - /// fat Teddy, the haystack window length should be `V::BYTES / 2`, with - /// the window repeated in each half of the vector. - /// - /// The masks should correspond to the masks computed for the first, second - /// and third bytes of all patterns that are being searched. - #[inline(always)] - unsafe fn members3(chunk: V, masks: [Mask<V>; 3]) -> (V, V, V) { - let lomask = V::splat(0xF); - let hlo = chunk.and(lomask); - let hhi = chunk.shift_8bit_lane_right::<4>().and(lomask); - - let locand1 = masks[0].lo.shuffle_bytes(hlo); - let hicand1 = masks[0].hi.shuffle_bytes(hhi); - let cand1 = locand1.and(hicand1); - - let locand2 = masks[1].lo.shuffle_bytes(hlo); - let hicand2 = masks[1].hi.shuffle_bytes(hhi); - let cand2 = locand2.and(hicand2); - - let locand3 = masks[2].lo.shuffle_bytes(hlo); - let hicand3 = masks[2].hi.shuffle_bytes(hhi); - let cand3 = locand3.and(hicand3); - - (cand1, cand2, cand3) - } - - /// Return a candidate for Teddy (fat or slim) that is searching for 4-byte - /// candidates. - /// - /// If candidates are returned, each will be a collection of 8-bit bitsets - /// (one bitset per lane), where the ith bit is set in the jth lane if and - /// only if the byte occurring at the jth lane in `chunk` is in the bucket - /// `i`. Each candidate returned corresponds to the first, second, third - /// and fourth bytes of the patterns being searched. If no candidate is - /// found, then all of the lanes will be set to zero in at least one of the - /// vectors returned. - /// - /// `chunk` should correspond to a `V::BYTES` window of the haystack (where - /// the least significant byte corresponds to the start of the window). For - /// fat Teddy, the haystack window length should be `V::BYTES / 2`, with - /// the window repeated in each half of the vector. - /// - /// The masks should correspond to the masks computed for the first, - /// second, third and fourth bytes of all patterns that are being searched. - #[inline(always)] - unsafe fn members4(chunk: V, masks: [Mask<V>; 4]) -> (V, V, V, V) { - let lomask = V::splat(0xF); - let hlo = chunk.and(lomask); - let hhi = chunk.shift_8bit_lane_right::<4>().and(lomask); - - let locand1 = masks[0].lo.shuffle_bytes(hlo); - let hicand1 = masks[0].hi.shuffle_bytes(hhi); - let cand1 = locand1.and(hicand1); - - let locand2 = masks[1].lo.shuffle_bytes(hlo); - let hicand2 = masks[1].hi.shuffle_bytes(hhi); - let cand2 = locand2.and(hicand2); - - let locand3 = masks[2].lo.shuffle_bytes(hlo); - let hicand3 = masks[2].hi.shuffle_bytes(hhi); - let cand3 = locand3.and(hicand3); - - let locand4 = masks[3].lo.shuffle_bytes(hlo); - let hicand4 = masks[3].hi.shuffle_bytes(hhi); - let cand4 = locand4.and(hicand4); - - (cand1, cand2, cand3, cand4) - } -} - -/// Represents the low and high nybble masks that will be used during -/// search. Each mask is 32 bytes wide, although only the first 16 bytes are -/// used for 128-bit vectors. -/// -/// Each byte in the mask corresponds to a 8-bit bitset, where bit `i` is set -/// if and only if the corresponding nybble is in the ith bucket. The index of -/// the byte (0-15, inclusive) corresponds to the nybble. -/// -/// Each mask is used as the target of a shuffle, where the indices for the -/// shuffle are taken from the haystack. AND'ing the shuffles for both the -/// low and high masks together also results in 8-bit bitsets, but where bit -/// `i` is set if and only if the correspond *byte* is in the ith bucket. -#[derive(Clone, Default)] -struct SlimMaskBuilder { - lo: [u8; 32], - hi: [u8; 32], -} - -impl SlimMaskBuilder { - /// Update this mask by adding the given byte to the given bucket. The - /// given bucket must be in the range 0-7. - /// - /// # Panics - /// - /// When `bucket >= 8`. - fn add(&mut self, bucket: usize, byte: u8) { - assert!(bucket < 8); - - let bucket = u8::try_from(bucket).unwrap(); - let byte_lo = usize::from(byte & 0xF); - let byte_hi = usize::from((byte >> 4) & 0xF); - // When using 256-bit vectors, we need to set this bucket assignment in - // the low and high 128-bit portions of the mask. This allows us to - // process 32 bytes at a time. Namely, AVX2 shuffles operate on each - // of the 128-bit lanes, rather than the full 256-bit vector at once. - self.lo[byte_lo] |= 1 << bucket; - self.lo[byte_lo + 16] |= 1 << bucket; - self.hi[byte_hi] |= 1 << bucket; - self.hi[byte_hi + 16] |= 1 << bucket; - } - - /// Turn this builder into a vector mask. - /// - /// # Panics - /// - /// When `V` represents a vector bigger than what `MaskBytes` can contain. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - #[inline(always)] - unsafe fn build<V: Vector>(&self) -> Mask<V> { - assert!(V::BYTES <= self.lo.len()); - assert!(V::BYTES <= self.hi.len()); - Mask { - lo: V::load_unaligned(self.lo[..].as_ptr()), - hi: V::load_unaligned(self.hi[..].as_ptr()), - } - } - - /// A convenience function for building `N` vector masks from a slim - /// `Teddy` value. - /// - /// # Panics - /// - /// When `V` represents a vector bigger than what `MaskBytes` can contain. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - #[inline(always)] - unsafe fn from_teddy<const BYTES: usize, V: Vector>( - teddy: &Teddy<8>, - ) -> [Mask<V>; BYTES] { - // MSRV(1.63): Use core::array::from_fn to just build the array here - // instead of creating a vector and turning it into an array. - let mut mask_builders = vec![SlimMaskBuilder::default(); BYTES]; - for (bucket_index, bucket) in teddy.buckets.iter().enumerate() { - for pid in bucket.iter().copied() { - let pat = teddy.patterns.get(pid); - for (i, builder) in mask_builders.iter_mut().enumerate() { - builder.add(bucket_index, pat.bytes()[i]); - } - } - } - let array = - <[SlimMaskBuilder; BYTES]>::try_from(mask_builders).unwrap(); - array.map(|builder| builder.build()) - } -} - -impl Debug for SlimMaskBuilder { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let (mut parts_lo, mut parts_hi) = (vec![], vec![]); - for i in 0..32 { - parts_lo.push(format!("{:02}: {:08b}", i, self.lo[i])); - parts_hi.push(format!("{:02}: {:08b}", i, self.hi[i])); - } - f.debug_struct("SlimMaskBuilder") - .field("lo", &parts_lo) - .field("hi", &parts_hi) - .finish() - } -} - -/// Represents the low and high nybble masks that will be used during "fat" -/// Teddy search. -/// -/// Each mask is 32 bytes wide, and at the time of writing, only 256-bit vectors -/// support fat Teddy. -/// -/// A fat Teddy mask is like a slim Teddy mask, except that instead of -/// repeating the bitsets in the high and low 128-bits in 256-bit vectors, the -/// high and low 128-bit halves each represent distinct buckets. (Bringing the -/// total to 16 instead of 8.) This permits spreading the patterns out a bit -/// more and thus putting less pressure on verification to be fast. -/// -/// Each byte in the mask corresponds to a 8-bit bitset, where bit `i` is set -/// if and only if the corresponding nybble is in the ith bucket. The index of -/// the byte (0-15, inclusive) corresponds to the nybble. -#[derive(Clone, Copy, Default)] -struct FatMaskBuilder { - lo: [u8; 32], - hi: [u8; 32], -} - -impl FatMaskBuilder { - /// Update this mask by adding the given byte to the given bucket. The - /// given bucket must be in the range 0-15. - /// - /// # Panics - /// - /// When `bucket >= 16`. - fn add(&mut self, bucket: usize, byte: u8) { - assert!(bucket < 16); - - let bucket = u8::try_from(bucket).unwrap(); - let byte_lo = usize::from(byte & 0xF); - let byte_hi = usize::from((byte >> 4) & 0xF); - // Unlike slim teddy, fat teddy only works with AVX2. For fat teddy, - // the high 128 bits of our mask correspond to buckets 8-15, while the - // low 128 bits correspond to buckets 0-7. - if bucket < 8 { - self.lo[byte_lo] |= 1 << bucket; - self.hi[byte_hi] |= 1 << bucket; - } else { - self.lo[byte_lo + 16] |= 1 << (bucket % 8); - self.hi[byte_hi + 16] |= 1 << (bucket % 8); - } - } - - /// Turn this builder into a vector mask. - /// - /// # Panics - /// - /// When `V` represents a vector bigger than what `MaskBytes` can contain. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - #[inline(always)] - unsafe fn build<V: Vector>(&self) -> Mask<V> { - assert!(V::BYTES <= self.lo.len()); - assert!(V::BYTES <= self.hi.len()); - Mask { - lo: V::load_unaligned(self.lo[..].as_ptr()), - hi: V::load_unaligned(self.hi[..].as_ptr()), - } - } - - /// A convenience function for building `N` vector masks from a fat - /// `Teddy` value. - /// - /// # Panics - /// - /// When `V` represents a vector bigger than what `MaskBytes` can contain. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - #[inline(always)] - unsafe fn from_teddy<const BYTES: usize, V: Vector>( - teddy: &Teddy<16>, - ) -> [Mask<V>; BYTES] { - // MSRV(1.63): Use core::array::from_fn to just build the array here - // instead of creating a vector and turning it into an array. - let mut mask_builders = vec![FatMaskBuilder::default(); BYTES]; - for (bucket_index, bucket) in teddy.buckets.iter().enumerate() { - for pid in bucket.iter().copied() { - let pat = teddy.patterns.get(pid); - for (i, builder) in mask_builders.iter_mut().enumerate() { - builder.add(bucket_index, pat.bytes()[i]); - } - } - } - let array = - <[FatMaskBuilder; BYTES]>::try_from(mask_builders).unwrap(); - array.map(|builder| builder.build()) - } -} - -impl Debug for FatMaskBuilder { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let (mut parts_lo, mut parts_hi) = (vec![], vec![]); - for i in 0..32 { - parts_lo.push(format!("{:02}: {:08b}", i, self.lo[i])); - parts_hi.push(format!("{:02}: {:08b}", i, self.hi[i])); - } - f.debug_struct("FatMaskBuilder") - .field("lo", &parts_lo) - .field("hi", &parts_hi) - .finish() - } -} diff --git a/vendor/aho-corasick/src/packed/teddy/mod.rs b/vendor/aho-corasick/src/packed/teddy/mod.rs deleted file mode 100644 index 26cfcdc450ff02..00000000000000 --- a/vendor/aho-corasick/src/packed/teddy/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -// Regrettable, but Teddy stuff just isn't used on all targets. And for some -// targets, like aarch64, only "slim" Teddy is used and so "fat" Teddy gets a -// bunch of dead-code warnings. Just not worth trying to squash them. Blech. -#![allow(dead_code)] - -pub(crate) use self::builder::{Builder, Searcher}; - -mod builder; -mod generic; diff --git a/vendor/aho-corasick/src/packed/tests.rs b/vendor/aho-corasick/src/packed/tests.rs deleted file mode 100644 index 2b0d44ee6f83ca..00000000000000 --- a/vendor/aho-corasick/src/packed/tests.rs +++ /dev/null @@ -1,583 +0,0 @@ -use std::collections::HashMap; - -use alloc::{ - format, - string::{String, ToString}, - vec, - vec::Vec, -}; - -use crate::{ - packed::{Config, MatchKind}, - util::search::Match, -}; - -/// A description of a single test against a multi-pattern searcher. -/// -/// A single test may not necessarily pass on every configuration of a -/// searcher. The tests are categorized and grouped appropriately below. -#[derive(Clone, Debug, Eq, PartialEq)] -struct SearchTest { - /// The name of this test, for debugging. - name: &'static str, - /// The patterns to search for. - patterns: &'static [&'static str], - /// The text to search. - haystack: &'static str, - /// Each match is a triple of (pattern_index, start, end), where - /// pattern_index is an index into `patterns` and `start`/`end` are indices - /// into `haystack`. - matches: &'static [(usize, usize, usize)], -} - -struct SearchTestOwned { - offset: usize, - name: String, - patterns: Vec<String>, - haystack: String, - matches: Vec<(usize, usize, usize)>, -} - -impl SearchTest { - fn variations(&self) -> Vec<SearchTestOwned> { - let count = if cfg!(miri) { 1 } else { 261 }; - let mut tests = vec![]; - for i in 0..count { - tests.push(self.offset_prefix(i)); - tests.push(self.offset_suffix(i)); - tests.push(self.offset_both(i)); - } - tests - } - - fn offset_both(&self, off: usize) -> SearchTestOwned { - SearchTestOwned { - offset: off, - name: self.name.to_string(), - patterns: self.patterns.iter().map(|s| s.to_string()).collect(), - haystack: format!( - "{}{}{}", - "Z".repeat(off), - self.haystack, - "Z".repeat(off) - ), - matches: self - .matches - .iter() - .map(|&(id, s, e)| (id, s + off, e + off)) - .collect(), - } - } - - fn offset_prefix(&self, off: usize) -> SearchTestOwned { - SearchTestOwned { - offset: off, - name: self.name.to_string(), - patterns: self.patterns.iter().map(|s| s.to_string()).collect(), - haystack: format!("{}{}", "Z".repeat(off), self.haystack), - matches: self - .matches - .iter() - .map(|&(id, s, e)| (id, s + off, e + off)) - .collect(), - } - } - - fn offset_suffix(&self, off: usize) -> SearchTestOwned { - SearchTestOwned { - offset: off, - name: self.name.to_string(), - patterns: self.patterns.iter().map(|s| s.to_string()).collect(), - haystack: format!("{}{}", self.haystack, "Z".repeat(off)), - matches: self.matches.to_vec(), - } - } -} - -/// Short-hand constructor for SearchTest. We use it a lot below. -macro_rules! t { - ($name:ident, $patterns:expr, $haystack:expr, $matches:expr) => { - SearchTest { - name: stringify!($name), - patterns: $patterns, - haystack: $haystack, - matches: $matches, - } - }; -} - -/// A collection of test groups. -type TestCollection = &'static [&'static [SearchTest]]; - -// Define several collections corresponding to the different type of match -// semantics supported. These collections have some overlap, but each -// collection should have some tests that no other collection has. - -/// Tests for leftmost-first match semantics. -const PACKED_LEFTMOST_FIRST: TestCollection = - &[BASICS, LEFTMOST, LEFTMOST_FIRST, REGRESSION, TEDDY]; - -/// Tests for leftmost-longest match semantics. -const PACKED_LEFTMOST_LONGEST: TestCollection = - &[BASICS, LEFTMOST, LEFTMOST_LONGEST, REGRESSION, TEDDY]; - -// Now define the individual tests that make up the collections above. - -/// A collection of tests for the that should always be true regardless of -/// match semantics. That is, all combinations of leftmost-{first, longest} -/// should produce the same answer. -const BASICS: &'static [SearchTest] = &[ - t!(basic001, &["a"], "", &[]), - t!(basic010, &["a"], "a", &[(0, 0, 1)]), - t!(basic020, &["a"], "aa", &[(0, 0, 1), (0, 1, 2)]), - t!(basic030, &["a"], "aaa", &[(0, 0, 1), (0, 1, 2), (0, 2, 3)]), - t!(basic040, &["a"], "aba", &[(0, 0, 1), (0, 2, 3)]), - t!(basic050, &["a"], "bba", &[(0, 2, 3)]), - t!(basic060, &["a"], "bbb", &[]), - t!(basic070, &["a"], "bababbbba", &[(0, 1, 2), (0, 3, 4), (0, 8, 9)]), - t!(basic100, &["aa"], "", &[]), - t!(basic110, &["aa"], "aa", &[(0, 0, 2)]), - t!(basic120, &["aa"], "aabbaa", &[(0, 0, 2), (0, 4, 6)]), - t!(basic130, &["aa"], "abbab", &[]), - t!(basic140, &["aa"], "abbabaa", &[(0, 5, 7)]), - t!(basic150, &["aaa"], "aaa", &[(0, 0, 3)]), - t!(basic200, &["abc"], "abc", &[(0, 0, 3)]), - t!(basic210, &["abc"], "zazabzabcz", &[(0, 6, 9)]), - t!(basic220, &["abc"], "zazabczabcz", &[(0, 3, 6), (0, 7, 10)]), - t!(basic230, &["abcd"], "abcd", &[(0, 0, 4)]), - t!(basic240, &["abcd"], "zazabzabcdz", &[(0, 6, 10)]), - t!(basic250, &["abcd"], "zazabcdzabcdz", &[(0, 3, 7), (0, 8, 12)]), - t!(basic300, &["a", "b"], "", &[]), - t!(basic310, &["a", "b"], "z", &[]), - t!(basic320, &["a", "b"], "b", &[(1, 0, 1)]), - t!(basic330, &["a", "b"], "a", &[(0, 0, 1)]), - t!( - basic340, - &["a", "b"], - "abba", - &[(0, 0, 1), (1, 1, 2), (1, 2, 3), (0, 3, 4),] - ), - t!( - basic350, - &["b", "a"], - "abba", - &[(1, 0, 1), (0, 1, 2), (0, 2, 3), (1, 3, 4),] - ), - t!(basic360, &["abc", "bc"], "xbc", &[(1, 1, 3),]), - t!(basic400, &["foo", "bar"], "", &[]), - t!(basic410, &["foo", "bar"], "foobar", &[(0, 0, 3), (1, 3, 6),]), - t!(basic420, &["foo", "bar"], "barfoo", &[(1, 0, 3), (0, 3, 6),]), - t!(basic430, &["foo", "bar"], "foofoo", &[(0, 0, 3), (0, 3, 6),]), - t!(basic440, &["foo", "bar"], "barbar", &[(1, 0, 3), (1, 3, 6),]), - t!(basic450, &["foo", "bar"], "bafofoo", &[(0, 4, 7),]), - t!(basic460, &["bar", "foo"], "bafofoo", &[(1, 4, 7),]), - t!(basic470, &["foo", "bar"], "fobabar", &[(1, 4, 7),]), - t!(basic480, &["bar", "foo"], "fobabar", &[(0, 4, 7),]), - t!(basic700, &["yabcdef", "abcdezghi"], "yabcdefghi", &[(0, 0, 7),]), - t!(basic710, &["yabcdef", "abcdezghi"], "yabcdezghi", &[(1, 1, 10),]), - t!( - basic720, - &["yabcdef", "bcdeyabc", "abcdezghi"], - "yabcdezghi", - &[(2, 1, 10),] - ), - t!(basic810, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4),]), - t!(basic820, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4),]), - t!(basic830, &["abc", "bc"], "zazabcz", &[(0, 3, 6),]), - t!( - basic840, - &["ab", "ba"], - "abababa", - &[(0, 0, 2), (0, 2, 4), (0, 4, 6),] - ), - t!(basic850, &["foo", "foo"], "foobarfoo", &[(0, 0, 3), (0, 6, 9),]), -]; - -/// Tests for leftmost match semantics. These should pass for both -/// leftmost-first and leftmost-longest match kinds. Stated differently, among -/// ambiguous matches, the longest match and the match that appeared first when -/// constructing the automaton should always be the same. -const LEFTMOST: &'static [SearchTest] = &[ - t!(leftmost000, &["ab", "ab"], "abcd", &[(0, 0, 2)]), - t!(leftmost030, &["a", "ab"], "aa", &[(0, 0, 1), (0, 1, 2)]), - t!(leftmost031, &["ab", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]), - t!(leftmost032, &["ab", "a"], "xayabbbz", &[(1, 1, 2), (0, 3, 5)]), - t!(leftmost300, &["abcd", "bce", "b"], "abce", &[(1, 1, 4)]), - t!(leftmost310, &["abcd", "ce", "bc"], "abce", &[(2, 1, 3)]), - t!(leftmost320, &["abcd", "bce", "ce", "b"], "abce", &[(1, 1, 4)]), - t!(leftmost330, &["abcd", "bce", "cz", "bc"], "abcz", &[(3, 1, 3)]), - t!(leftmost340, &["bce", "cz", "bc"], "bcz", &[(2, 0, 2)]), - t!(leftmost350, &["abc", "bd", "ab"], "abd", &[(2, 0, 2)]), - t!( - leftmost360, - &["abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(2, 0, 8),] - ), - t!( - leftmost370, - &["abcdefghi", "cde", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - leftmost380, - &["abcdefghi", "hz", "abcdefgh", "a"], - "abcdefghz", - &[(2, 0, 8),] - ), - t!( - leftmost390, - &["b", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - leftmost400, - &["h", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - leftmost410, - &["z", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8), (0, 8, 9),] - ), -]; - -/// Tests for non-overlapping leftmost-first match semantics. These tests -/// should generally be specific to leftmost-first, which means they should -/// generally fail under leftmost-longest semantics. -const LEFTMOST_FIRST: &'static [SearchTest] = &[ - t!(leftfirst000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]), - t!(leftfirst020, &["abcd", "ab"], "abcd", &[(0, 0, 4)]), - t!(leftfirst030, &["ab", "ab"], "abcd", &[(0, 0, 2)]), - t!(leftfirst040, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (0, 3, 4)]), - t!(leftfirst100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(1, 1, 5)]), - t!(leftfirst110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]), - t!(leftfirst300, &["abcd", "b", "bce"], "abce", &[(1, 1, 2)]), - t!( - leftfirst310, - &["abcd", "b", "bce", "ce"], - "abce", - &[(1, 1, 2), (3, 2, 4),] - ), - t!( - leftfirst320, - &["a", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(0, 0, 1), (2, 7, 9),] - ), - t!(leftfirst330, &["a", "abab"], "abab", &[(0, 0, 1), (0, 2, 3)]), - t!( - leftfirst340, - &["abcdef", "x", "x", "x", "x", "x", "x", "abcde"], - "abcdef", - &[(0, 0, 6)] - ), -]; - -/// Tests for non-overlapping leftmost-longest match semantics. These tests -/// should generally be specific to leftmost-longest, which means they should -/// generally fail under leftmost-first semantics. -const LEFTMOST_LONGEST: &'static [SearchTest] = &[ - t!(leftlong000, &["ab", "abcd"], "abcd", &[(1, 0, 4)]), - t!(leftlong010, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4),]), - t!(leftlong040, &["a", "ab"], "a", &[(0, 0, 1)]), - t!(leftlong050, &["a", "ab"], "ab", &[(1, 0, 2)]), - t!(leftlong060, &["ab", "a"], "a", &[(1, 0, 1)]), - t!(leftlong070, &["ab", "a"], "ab", &[(0, 0, 2)]), - t!(leftlong100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(2, 1, 6)]), - t!(leftlong110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]), - t!(leftlong300, &["abcd", "b", "bce"], "abce", &[(2, 1, 4)]), - t!( - leftlong310, - &["a", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!(leftlong320, &["a", "abab"], "abab", &[(1, 0, 4)]), - t!(leftlong330, &["abcd", "b", "ce"], "abce", &[(1, 1, 2), (2, 2, 4),]), - t!(leftlong340, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (1, 3, 5)]), -]; - -/// Regression tests that are applied to all combinations. -/// -/// If regression tests are needed for specific match semantics, then add them -/// to the appropriate group above. -const REGRESSION: &'static [SearchTest] = &[ - t!(regression010, &["inf", "ind"], "infind", &[(0, 0, 3), (1, 3, 6),]), - t!(regression020, &["ind", "inf"], "infind", &[(1, 0, 3), (0, 3, 6),]), - t!( - regression030, - &["libcore/", "libstd/"], - "libcore/char/methods.rs", - &[(0, 0, 8),] - ), - t!( - regression040, - &["libstd/", "libcore/"], - "libcore/char/methods.rs", - &[(1, 0, 8),] - ), - t!( - regression050, - &["\x00\x00\x01", "\x00\x00\x00"], - "\x00\x00\x00", - &[(1, 0, 3),] - ), - t!( - regression060, - &["\x00\x00\x00", "\x00\x00\x01"], - "\x00\x00\x00", - &[(0, 0, 3),] - ), -]; - -const TEDDY: &'static [SearchTest] = &[ - t!( - teddy010, - &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"], - "abcdefghijk", - &[ - (0, 0, 1), - (1, 1, 2), - (2, 2, 3), - (3, 3, 4), - (4, 4, 5), - (5, 5, 6), - (6, 6, 7), - (7, 7, 8), - (8, 8, 9), - (9, 9, 10), - (10, 10, 11) - ] - ), - t!( - teddy020, - &["ab", "bc", "cd", "de", "ef", "fg", "gh", "hi", "ij", "jk", "kl"], - "abcdefghijk", - &[(0, 0, 2), (2, 2, 4), (4, 4, 6), (6, 6, 8), (8, 8, 10),] - ), - t!( - teddy030, - &["abc"], - "abcdefghijklmnopqrstuvwxyzabcdefghijk", - &[(0, 0, 3), (0, 26, 29)] - ), -]; - -// Now define a test for each combination of things above that we want to run. -// Since there are a few different combinations for each collection of tests, -// we define a couple of macros to avoid repetition drudgery. The testconfig -// macro constructs the automaton from a given match kind, and runs the search -// tests one-by-one over the given collection. The `with` parameter allows one -// to configure the config with additional parameters. The testcombo macro -// invokes testconfig in precisely this way: it sets up several tests where -// each one turns a different knob on Config. - -macro_rules! testconfig { - ($name:ident, $collection:expr, $with:expr) => { - #[test] - fn $name() { - run_search_tests($collection, |test| { - let mut config = Config::new(); - $with(&mut config); - let mut builder = config.builder(); - builder.extend(test.patterns.iter().map(|p| p.as_bytes())); - let searcher = match builder.build() { - Some(searcher) => searcher, - None => { - // For x86-64 and aarch64, not building a searcher is - // probably a bug, so be loud. - if cfg!(any( - target_arch = "x86_64", - target_arch = "aarch64" - )) { - panic!("failed to build packed searcher") - } - return None; - } - }; - Some(searcher.find_iter(&test.haystack).collect()) - }); - } - }; -} - -testconfig!( - search_default_leftmost_first, - PACKED_LEFTMOST_FIRST, - |_: &mut Config| {} -); - -testconfig!( - search_default_leftmost_longest, - PACKED_LEFTMOST_LONGEST, - |c: &mut Config| { - c.match_kind(MatchKind::LeftmostLongest); - } -); - -testconfig!( - search_teddy_leftmost_first, - PACKED_LEFTMOST_FIRST, - |c: &mut Config| { - c.only_teddy(true); - } -); - -testconfig!( - search_teddy_leftmost_longest, - PACKED_LEFTMOST_LONGEST, - |c: &mut Config| { - c.only_teddy(true).match_kind(MatchKind::LeftmostLongest); - } -); - -testconfig!( - search_teddy_ssse3_leftmost_first, - PACKED_LEFTMOST_FIRST, - |c: &mut Config| { - c.only_teddy(true); - #[cfg(target_arch = "x86_64")] - if std::is_x86_feature_detected!("ssse3") { - c.only_teddy_256bit(Some(false)); - } - } -); - -testconfig!( - search_teddy_ssse3_leftmost_longest, - PACKED_LEFTMOST_LONGEST, - |c: &mut Config| { - c.only_teddy(true).match_kind(MatchKind::LeftmostLongest); - #[cfg(target_arch = "x86_64")] - if std::is_x86_feature_detected!("ssse3") { - c.only_teddy_256bit(Some(false)); - } - } -); - -testconfig!( - search_teddy_avx2_leftmost_first, - PACKED_LEFTMOST_FIRST, - |c: &mut Config| { - c.only_teddy(true); - #[cfg(target_arch = "x86_64")] - if std::is_x86_feature_detected!("avx2") { - c.only_teddy_256bit(Some(true)); - } - } -); - -testconfig!( - search_teddy_avx2_leftmost_longest, - PACKED_LEFTMOST_LONGEST, - |c: &mut Config| { - c.only_teddy(true).match_kind(MatchKind::LeftmostLongest); - #[cfg(target_arch = "x86_64")] - if std::is_x86_feature_detected!("avx2") { - c.only_teddy_256bit(Some(true)); - } - } -); - -testconfig!( - search_teddy_fat_leftmost_first, - PACKED_LEFTMOST_FIRST, - |c: &mut Config| { - c.only_teddy(true); - #[cfg(target_arch = "x86_64")] - if std::is_x86_feature_detected!("avx2") { - c.only_teddy_fat(Some(true)); - } - } -); - -testconfig!( - search_teddy_fat_leftmost_longest, - PACKED_LEFTMOST_LONGEST, - |c: &mut Config| { - c.only_teddy(true).match_kind(MatchKind::LeftmostLongest); - #[cfg(target_arch = "x86_64")] - if std::is_x86_feature_detected!("avx2") { - c.only_teddy_fat(Some(true)); - } - } -); - -testconfig!( - search_rabinkarp_leftmost_first, - PACKED_LEFTMOST_FIRST, - |c: &mut Config| { - c.only_rabin_karp(true); - } -); - -testconfig!( - search_rabinkarp_leftmost_longest, - PACKED_LEFTMOST_LONGEST, - |c: &mut Config| { - c.only_rabin_karp(true).match_kind(MatchKind::LeftmostLongest); - } -); - -#[test] -fn search_tests_have_unique_names() { - let assert = |constname, tests: &[SearchTest]| { - let mut seen = HashMap::new(); // map from test name to position - for (i, test) in tests.iter().enumerate() { - if !seen.contains_key(test.name) { - seen.insert(test.name, i); - } else { - let last = seen[test.name]; - panic!( - "{} tests have duplicate names at positions {} and {}", - constname, last, i - ); - } - } - }; - assert("BASICS", BASICS); - assert("LEFTMOST", LEFTMOST); - assert("LEFTMOST_FIRST", LEFTMOST_FIRST); - assert("LEFTMOST_LONGEST", LEFTMOST_LONGEST); - assert("REGRESSION", REGRESSION); - assert("TEDDY", TEDDY); -} - -fn run_search_tests<F: FnMut(&SearchTestOwned) -> Option<Vec<Match>>>( - which: TestCollection, - mut f: F, -) { - let get_match_triples = - |matches: Vec<Match>| -> Vec<(usize, usize, usize)> { - matches - .into_iter() - .map(|m| (m.pattern().as_usize(), m.start(), m.end())) - .collect() - }; - for &tests in which { - for spec in tests { - for test in spec.variations() { - let results = match f(&test) { - None => continue, - Some(results) => results, - }; - assert_eq!( - test.matches, - get_match_triples(results).as_slice(), - "test: {}, patterns: {:?}, haystack(len={:?}): {:?}, \ - offset: {:?}", - test.name, - test.patterns, - test.haystack.len(), - test.haystack, - test.offset, - ); - } - } - } -} diff --git a/vendor/aho-corasick/src/packed/vector.rs b/vendor/aho-corasick/src/packed/vector.rs deleted file mode 100644 index 57c02ccf8f320a..00000000000000 --- a/vendor/aho-corasick/src/packed/vector.rs +++ /dev/null @@ -1,1757 +0,0 @@ -// NOTE: The descriptions for each of the vector methods on the traits below -// are pretty inscrutable. For this reason, there are tests for every method -// on for every trait impl below. If you're confused about what an op does, -// consult its test. (They probably should be doc tests, but I couldn't figure -// out how to write them in a non-annoying way.) - -use core::{ - fmt::Debug, - panic::{RefUnwindSafe, UnwindSafe}, -}; - -/// A trait for describing vector operations used by vectorized searchers. -/// -/// The trait is highly constrained to low level vector operations needed for -/// the specific algorithms used in this crate. In general, it was invented -/// mostly to be generic over x86's __m128i and __m256i types. At time of -/// writing, it also supports wasm and aarch64 128-bit vector types as well. -/// -/// # Safety -/// -/// All methods are not safe since they are intended to be implemented using -/// vendor intrinsics, which are also not safe. Callers must ensure that -/// the appropriate target features are enabled in the calling function, -/// and that the current CPU supports them. All implementations should -/// avoid marking the routines with `#[target_feature]` and instead mark -/// them as `#[inline(always)]` to ensure they get appropriately inlined. -/// (`inline(always)` cannot be used with target_feature.) -pub(crate) trait Vector: - Copy + Debug + Send + Sync + UnwindSafe + RefUnwindSafe -{ - /// The number of bits in the vector. - const BITS: usize; - /// The number of bytes in the vector. That is, this is the size of the - /// vector in memory. - const BYTES: usize; - - /// Create a vector with 8-bit lanes with the given byte repeated into each - /// lane. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn splat(byte: u8) -> Self; - - /// Read a vector-size number of bytes from the given pointer. The pointer - /// does not need to be aligned. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - /// - /// Callers must guarantee that at least `BYTES` bytes are readable from - /// `data`. - unsafe fn load_unaligned(data: *const u8) -> Self; - - /// Returns true if and only if this vector has zero in all of its lanes. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn is_zero(self) -> bool; - - /// Do an 8-bit pairwise equality check. If lane `i` is equal in this - /// vector and the one given, then lane `i` in the resulting vector is set - /// to `0xFF`. Otherwise, it is set to `0x00`. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn cmpeq(self, vector2: Self) -> Self; - - /// Perform a bitwise 'and' of this vector and the one given and return - /// the result. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn and(self, vector2: Self) -> Self; - - /// Perform a bitwise 'or' of this vector and the one given and return - /// the result. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - #[allow(dead_code)] // unused, but useful enough to keep around? - unsafe fn or(self, vector2: Self) -> Self; - - /// Shift each 8-bit lane in this vector to the right by the number of - /// bits indictated by the `BITS` type parameter. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn shift_8bit_lane_right<const BITS: i32>(self) -> Self; - - /// Shift this vector to the left by one byte and shift the most - /// significant byte of `vector2` into the least significant position of - /// this vector. - /// - /// Stated differently, this behaves as if `self` and `vector2` were - /// concatenated into a `2 * Self::BITS` temporary buffer and then shifted - /// right by `Self::BYTES - 1` bytes. - /// - /// With respect to the Teddy algorithm, `vector2` is usually a previous - /// `Self::BYTES` chunk from the haystack and `self` is the chunk - /// immediately following it. This permits combining the last two bytes - /// from the previous chunk (`vector2`) with the first `Self::BYTES - 1` - /// bytes from the current chunk. This permits aligning the result of - /// various shuffles so that they can be and-ed together and a possible - /// candidate discovered. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn shift_in_one_byte(self, vector2: Self) -> Self; - - /// Shift this vector to the left by two bytes and shift the two most - /// significant bytes of `vector2` into the least significant position of - /// this vector. - /// - /// Stated differently, this behaves as if `self` and `vector2` were - /// concatenated into a `2 * Self::BITS` temporary buffer and then shifted - /// right by `Self::BYTES - 2` bytes. - /// - /// With respect to the Teddy algorithm, `vector2` is usually a previous - /// `Self::BYTES` chunk from the haystack and `self` is the chunk - /// immediately following it. This permits combining the last two bytes - /// from the previous chunk (`vector2`) with the first `Self::BYTES - 2` - /// bytes from the current chunk. This permits aligning the result of - /// various shuffles so that they can be and-ed together and a possible - /// candidate discovered. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn shift_in_two_bytes(self, vector2: Self) -> Self; - - /// Shift this vector to the left by three bytes and shift the three most - /// significant bytes of `vector2` into the least significant position of - /// this vector. - /// - /// Stated differently, this behaves as if `self` and `vector2` were - /// concatenated into a `2 * Self::BITS` temporary buffer and then shifted - /// right by `Self::BYTES - 3` bytes. - /// - /// With respect to the Teddy algorithm, `vector2` is usually a previous - /// `Self::BYTES` chunk from the haystack and `self` is the chunk - /// immediately following it. This permits combining the last three bytes - /// from the previous chunk (`vector2`) with the first `Self::BYTES - 3` - /// bytes from the current chunk. This permits aligning the result of - /// various shuffles so that they can be and-ed together and a possible - /// candidate discovered. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn shift_in_three_bytes(self, vector2: Self) -> Self; - - /// Shuffles the bytes in this vector according to the indices in each of - /// the corresponding lanes in `indices`. - /// - /// If `i` is the index of corresponding lanes, `A` is this vector, `B` is - /// indices and `C` is the resulting vector, then `C = A[B[i]]`. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn shuffle_bytes(self, indices: Self) -> Self; - - /// Call the provided function for each 64-bit lane in this vector. The - /// given function is provided the lane index and lane value as a `u64`. - /// - /// If `f` returns `Some`, then iteration over the lanes is stopped and the - /// value is returned. Otherwise, this returns `None`. - /// - /// # Notes - /// - /// Conceptually it would be nice if we could have a - /// `unpack64(self) -> [u64; BITS / 64]` method, but defining that is - /// tricky given Rust's [current support for const generics][support]. - /// And even if we could, it would be tricky to write generic code over - /// it. (Not impossible. We could introduce another layer that requires - /// `AsRef<[u64]>` or something.) - /// - /// [support]: https://github.com/rust-lang/rust/issues/60551 - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn for_each_64bit_lane<T>( - self, - f: impl FnMut(usize, u64) -> Option<T>, - ) -> Option<T>; -} - -/// This trait extends the `Vector` trait with additional operations to support -/// Fat Teddy. -/// -/// Fat Teddy uses 16 buckets instead of 8, but reads half as many bytes (as -/// the vector size) instead of the full size of a vector per iteration. For -/// example, when using a 256-bit vector, Slim Teddy reads 32 bytes at a timr -/// but Fat Teddy reads 16 bytes at a time. -/// -/// Fat Teddy is useful when searching for a large number of literals. -/// The extra number of buckets spreads the literals out more and reduces -/// verification time. -/// -/// Currently we only implement this for AVX on x86_64. It would be nice to -/// implement this for SSE on x86_64 and NEON on aarch64, with the latter two -/// only reading 8 bytes at a time. It's not clear how well it would work, but -/// there are some tricky things to figure out in terms of implementation. The -/// `half_shift_in_{one,two,three}_bytes` methods in particular are probably -/// the trickiest of the bunch. For AVX2, these are implemented by taking -/// advantage of the fact that `_mm256_alignr_epi8` operates on each 128-bit -/// half instead of the full 256-bit vector. (Where as `_mm_alignr_epi8` -/// operates on the full 128-bit vector and not on each 64-bit half.) I didn't -/// do a careful survey of NEON to see if it could easily support these -/// operations. -pub(crate) trait FatVector: Vector { - type Half: Vector; - - /// Read a half-vector-size number of bytes from the given pointer, and - /// broadcast it across both halfs of a full vector. The pointer does not - /// need to be aligned. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - /// - /// Callers must guarantee that at least `Self::HALF::BYTES` bytes are - /// readable from `data`. - unsafe fn load_half_unaligned(data: *const u8) -> Self; - - /// Like `Vector::shift_in_one_byte`, except this is done for each half - /// of the vector instead. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn half_shift_in_one_byte(self, vector2: Self) -> Self; - - /// Like `Vector::shift_in_two_bytes`, except this is done for each half - /// of the vector instead. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn half_shift_in_two_bytes(self, vector2: Self) -> Self; - - /// Like `Vector::shift_in_two_bytes`, except this is done for each half - /// of the vector instead. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn half_shift_in_three_bytes(self, vector2: Self) -> Self; - - /// Swap the 128-bit lanes in this vector. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn swap_halves(self) -> Self; - - /// Unpack and interleave the 8-bit lanes from the low 128 bits of each - /// vector and return the result. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn interleave_low_8bit_lanes(self, vector2: Self) -> Self; - - /// Unpack and interleave the 8-bit lanes from the high 128 bits of each - /// vector and return the result. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn interleave_high_8bit_lanes(self, vector2: Self) -> Self; - - /// Call the provided function for each 64-bit lane in the lower half - /// of this vector and then in the other vector. The given function is - /// provided the lane index and lane value as a `u64`. (The high 128-bits - /// of each vector are ignored.) - /// - /// If `f` returns `Some`, then iteration over the lanes is stopped and the - /// value is returned. Otherwise, this returns `None`. - /// - /// # Safety - /// - /// Callers must ensure that this is okay to call in the current target for - /// the current CPU. - unsafe fn for_each_low_64bit_lane<T>( - self, - vector2: Self, - f: impl FnMut(usize, u64) -> Option<T>, - ) -> Option<T>; -} - -#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] -mod x86_64_ssse3 { - use core::arch::x86_64::*; - - use crate::util::int::{I32, I8}; - - use super::Vector; - - impl Vector for __m128i { - const BITS: usize = 128; - const BYTES: usize = 16; - - #[inline(always)] - unsafe fn splat(byte: u8) -> __m128i { - _mm_set1_epi8(i8::from_bits(byte)) - } - - #[inline(always)] - unsafe fn load_unaligned(data: *const u8) -> __m128i { - _mm_loadu_si128(data.cast::<__m128i>()) - } - - #[inline(always)] - unsafe fn is_zero(self) -> bool { - let cmp = self.cmpeq(Self::splat(0)); - _mm_movemask_epi8(cmp).to_bits() == 0xFFFF - } - - #[inline(always)] - unsafe fn cmpeq(self, vector2: Self) -> __m128i { - _mm_cmpeq_epi8(self, vector2) - } - - #[inline(always)] - unsafe fn and(self, vector2: Self) -> __m128i { - _mm_and_si128(self, vector2) - } - - #[inline(always)] - unsafe fn or(self, vector2: Self) -> __m128i { - _mm_or_si128(self, vector2) - } - - #[inline(always)] - unsafe fn shift_8bit_lane_right<const BITS: i32>(self) -> Self { - // Apparently there is no _mm_srli_epi8, so we emulate it by - // shifting 16-bit integers and masking out the high nybble of each - // 8-bit lane (since that nybble will contain bits from the low - // nybble of the previous lane). - let lomask = Self::splat(0xF); - _mm_srli_epi16(self, BITS).and(lomask) - } - - #[inline(always)] - unsafe fn shift_in_one_byte(self, vector2: Self) -> Self { - _mm_alignr_epi8(self, vector2, 15) - } - - #[inline(always)] - unsafe fn shift_in_two_bytes(self, vector2: Self) -> Self { - _mm_alignr_epi8(self, vector2, 14) - } - - #[inline(always)] - unsafe fn shift_in_three_bytes(self, vector2: Self) -> Self { - _mm_alignr_epi8(self, vector2, 13) - } - - #[inline(always)] - unsafe fn shuffle_bytes(self, indices: Self) -> Self { - _mm_shuffle_epi8(self, indices) - } - - #[inline(always)] - unsafe fn for_each_64bit_lane<T>( - self, - mut f: impl FnMut(usize, u64) -> Option<T>, - ) -> Option<T> { - // We could just use _mm_extract_epi64 here, but that requires - // SSE 4.1. It isn't necessarily a problem to just require SSE 4.1, - // but everything else works with SSSE3 so we stick to that subset. - let lanes: [u64; 2] = core::mem::transmute(self); - if let Some(t) = f(0, lanes[0]) { - return Some(t); - } - if let Some(t) = f(1, lanes[1]) { - return Some(t); - } - None - } - } -} - -#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] -mod x86_64_avx2 { - use core::arch::x86_64::*; - - use crate::util::int::{I32, I64, I8}; - - use super::{FatVector, Vector}; - - impl Vector for __m256i { - const BITS: usize = 256; - const BYTES: usize = 32; - - #[inline(always)] - unsafe fn splat(byte: u8) -> __m256i { - _mm256_set1_epi8(i8::from_bits(byte)) - } - - #[inline(always)] - unsafe fn load_unaligned(data: *const u8) -> __m256i { - _mm256_loadu_si256(data.cast::<__m256i>()) - } - - #[inline(always)] - unsafe fn is_zero(self) -> bool { - let cmp = self.cmpeq(Self::splat(0)); - _mm256_movemask_epi8(cmp).to_bits() == 0xFFFFFFFF - } - - #[inline(always)] - unsafe fn cmpeq(self, vector2: Self) -> __m256i { - _mm256_cmpeq_epi8(self, vector2) - } - - #[inline(always)] - unsafe fn and(self, vector2: Self) -> __m256i { - _mm256_and_si256(self, vector2) - } - - #[inline(always)] - unsafe fn or(self, vector2: Self) -> __m256i { - _mm256_or_si256(self, vector2) - } - - #[inline(always)] - unsafe fn shift_8bit_lane_right<const BITS: i32>(self) -> Self { - let lomask = Self::splat(0xF); - _mm256_srli_epi16(self, BITS).and(lomask) - } - - #[inline(always)] - unsafe fn shift_in_one_byte(self, vector2: Self) -> Self { - // Credit goes to jneem for figuring this out: - // https://github.com/jneem/teddy/blob/9ab5e899ad6ef6911aecd3cf1033f1abe6e1f66c/src/x86/teddy_simd.rs#L145-L184 - // - // TL;DR avx2's PALIGNR instruction is actually just two 128-bit - // PALIGNR instructions, which is not what we want, so we need to - // do some extra shuffling. - let v = _mm256_permute2x128_si256(vector2, self, 0x21); - _mm256_alignr_epi8(self, v, 15) - } - - #[inline(always)] - unsafe fn shift_in_two_bytes(self, vector2: Self) -> Self { - // Credit goes to jneem for figuring this out: - // https://github.com/jneem/teddy/blob/9ab5e899ad6ef6911aecd3cf1033f1abe6e1f66c/src/x86/teddy_simd.rs#L145-L184 - // - // TL;DR avx2's PALIGNR instruction is actually just two 128-bit - // PALIGNR instructions, which is not what we want, so we need to - // do some extra shuffling. - let v = _mm256_permute2x128_si256(vector2, self, 0x21); - _mm256_alignr_epi8(self, v, 14) - } - - #[inline(always)] - unsafe fn shift_in_three_bytes(self, vector2: Self) -> Self { - // Credit goes to jneem for figuring this out: - // https://github.com/jneem/teddy/blob/9ab5e899ad6ef6911aecd3cf1033f1abe6e1f66c/src/x86/teddy_simd.rs#L145-L184 - // - // TL;DR avx2's PALIGNR instruction is actually just two 128-bit - // PALIGNR instructions, which is not what we want, so we need to - // do some extra shuffling. - let v = _mm256_permute2x128_si256(vector2, self, 0x21); - _mm256_alignr_epi8(self, v, 13) - } - - #[inline(always)] - unsafe fn shuffle_bytes(self, indices: Self) -> Self { - _mm256_shuffle_epi8(self, indices) - } - - #[inline(always)] - unsafe fn for_each_64bit_lane<T>( - self, - mut f: impl FnMut(usize, u64) -> Option<T>, - ) -> Option<T> { - // NOTE: At one point in the past, I used transmute to this to - // get a [u64; 4], but it turned out to lead to worse codegen IIRC. - // I've tried it more recently, and it looks like that's no longer - // the case. But since there's no difference, we stick with the - // slightly more complicated but transmute-free version. - let lane = _mm256_extract_epi64(self, 0).to_bits(); - if let Some(t) = f(0, lane) { - return Some(t); - } - let lane = _mm256_extract_epi64(self, 1).to_bits(); - if let Some(t) = f(1, lane) { - return Some(t); - } - let lane = _mm256_extract_epi64(self, 2).to_bits(); - if let Some(t) = f(2, lane) { - return Some(t); - } - let lane = _mm256_extract_epi64(self, 3).to_bits(); - if let Some(t) = f(3, lane) { - return Some(t); - } - None - } - } - - impl FatVector for __m256i { - type Half = __m128i; - - #[inline(always)] - unsafe fn load_half_unaligned(data: *const u8) -> Self { - let half = Self::Half::load_unaligned(data); - _mm256_broadcastsi128_si256(half) - } - - #[inline(always)] - unsafe fn half_shift_in_one_byte(self, vector2: Self) -> Self { - _mm256_alignr_epi8(self, vector2, 15) - } - - #[inline(always)] - unsafe fn half_shift_in_two_bytes(self, vector2: Self) -> Self { - _mm256_alignr_epi8(self, vector2, 14) - } - - #[inline(always)] - unsafe fn half_shift_in_three_bytes(self, vector2: Self) -> Self { - _mm256_alignr_epi8(self, vector2, 13) - } - - #[inline(always)] - unsafe fn swap_halves(self) -> Self { - _mm256_permute4x64_epi64(self, 0x4E) - } - - #[inline(always)] - unsafe fn interleave_low_8bit_lanes(self, vector2: Self) -> Self { - _mm256_unpacklo_epi8(self, vector2) - } - - #[inline(always)] - unsafe fn interleave_high_8bit_lanes(self, vector2: Self) -> Self { - _mm256_unpackhi_epi8(self, vector2) - } - - #[inline(always)] - unsafe fn for_each_low_64bit_lane<T>( - self, - vector2: Self, - mut f: impl FnMut(usize, u64) -> Option<T>, - ) -> Option<T> { - let lane = _mm256_extract_epi64(self, 0).to_bits(); - if let Some(t) = f(0, lane) { - return Some(t); - } - let lane = _mm256_extract_epi64(self, 1).to_bits(); - if let Some(t) = f(1, lane) { - return Some(t); - } - let lane = _mm256_extract_epi64(vector2, 0).to_bits(); - if let Some(t) = f(2, lane) { - return Some(t); - } - let lane = _mm256_extract_epi64(vector2, 1).to_bits(); - if let Some(t) = f(3, lane) { - return Some(t); - } - None - } - } -} - -#[cfg(all( - target_arch = "aarch64", - target_feature = "neon", - target_endian = "little" -))] -mod aarch64_neon { - use core::arch::aarch64::*; - - use super::Vector; - - impl Vector for uint8x16_t { - const BITS: usize = 128; - const BYTES: usize = 16; - - #[inline(always)] - unsafe fn splat(byte: u8) -> uint8x16_t { - vdupq_n_u8(byte) - } - - #[inline(always)] - unsafe fn load_unaligned(data: *const u8) -> uint8x16_t { - vld1q_u8(data) - } - - #[inline(always)] - unsafe fn is_zero(self) -> bool { - // Could also use vmaxvq_u8. - // ... I tried that and couldn't observe any meaningful difference - // in benchmarks. - let maxes = vreinterpretq_u64_u8(vpmaxq_u8(self, self)); - vgetq_lane_u64(maxes, 0) == 0 - } - - #[inline(always)] - unsafe fn cmpeq(self, vector2: Self) -> uint8x16_t { - vceqq_u8(self, vector2) - } - - #[inline(always)] - unsafe fn and(self, vector2: Self) -> uint8x16_t { - vandq_u8(self, vector2) - } - - #[inline(always)] - unsafe fn or(self, vector2: Self) -> uint8x16_t { - vorrq_u8(self, vector2) - } - - #[inline(always)] - unsafe fn shift_8bit_lane_right<const BITS: i32>(self) -> Self { - debug_assert!(BITS <= 7); - vshrq_n_u8(self, BITS) - } - - #[inline(always)] - unsafe fn shift_in_one_byte(self, vector2: Self) -> Self { - vextq_u8(vector2, self, 15) - } - - #[inline(always)] - unsafe fn shift_in_two_bytes(self, vector2: Self) -> Self { - vextq_u8(vector2, self, 14) - } - - #[inline(always)] - unsafe fn shift_in_three_bytes(self, vector2: Self) -> Self { - vextq_u8(vector2, self, 13) - } - - #[inline(always)] - unsafe fn shuffle_bytes(self, indices: Self) -> Self { - vqtbl1q_u8(self, indices) - } - - #[inline(always)] - unsafe fn for_each_64bit_lane<T>( - self, - mut f: impl FnMut(usize, u64) -> Option<T>, - ) -> Option<T> { - let this = vreinterpretq_u64_u8(self); - let lane = vgetq_lane_u64(this, 0); - if let Some(t) = f(0, lane) { - return Some(t); - } - let lane = vgetq_lane_u64(this, 1); - if let Some(t) = f(1, lane) { - return Some(t); - } - None - } - } -} - -#[cfg(all(test, target_arch = "x86_64", target_feature = "sse2"))] -mod tests_x86_64_ssse3 { - use core::arch::x86_64::*; - - use crate::util::int::{I32, U32}; - - use super::*; - - fn is_runnable() -> bool { - std::is_x86_feature_detected!("ssse3") - } - - #[target_feature(enable = "ssse3")] - unsafe fn load(lanes: [u8; 16]) -> __m128i { - __m128i::load_unaligned(&lanes as *const u8) - } - - #[target_feature(enable = "ssse3")] - unsafe fn unload(v: __m128i) -> [u8; 16] { - [ - _mm_extract_epi8(v, 0).to_bits().low_u8(), - _mm_extract_epi8(v, 1).to_bits().low_u8(), - _mm_extract_epi8(v, 2).to_bits().low_u8(), - _mm_extract_epi8(v, 3).to_bits().low_u8(), - _mm_extract_epi8(v, 4).to_bits().low_u8(), - _mm_extract_epi8(v, 5).to_bits().low_u8(), - _mm_extract_epi8(v, 6).to_bits().low_u8(), - _mm_extract_epi8(v, 7).to_bits().low_u8(), - _mm_extract_epi8(v, 8).to_bits().low_u8(), - _mm_extract_epi8(v, 9).to_bits().low_u8(), - _mm_extract_epi8(v, 10).to_bits().low_u8(), - _mm_extract_epi8(v, 11).to_bits().low_u8(), - _mm_extract_epi8(v, 12).to_bits().low_u8(), - _mm_extract_epi8(v, 13).to_bits().low_u8(), - _mm_extract_epi8(v, 14).to_bits().low_u8(), - _mm_extract_epi8(v, 15).to_bits().low_u8(), - ] - } - - #[test] - fn vector_splat() { - #[target_feature(enable = "ssse3")] - unsafe fn test() { - let v = __m128i::splat(0xAF); - assert_eq!( - unload(v), - [ - 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, - 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF - ] - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_is_zero() { - #[target_feature(enable = "ssse3")] - unsafe fn test() { - let v = load([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - assert!(!v.is_zero()); - let v = load([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - assert!(v.is_zero()); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_cmpeq() { - #[target_feature(enable = "ssse3")] - unsafe fn test() { - let v1 = - load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1]); - let v2 = - load([16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]); - assert_eq!( - unload(v1.cmpeq(v2)), - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF] - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_and() { - #[target_feature(enable = "ssse3")] - unsafe fn test() { - let v1 = - load([0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - let v2 = - load([0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - assert_eq!( - unload(v1.and(v2)), - [0, 0, 0, 0, 0, 0b1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_or() { - #[target_feature(enable = "ssse3")] - unsafe fn test() { - let v1 = - load([0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - let v2 = - load([0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - assert_eq!( - unload(v1.or(v2)), - [0, 0, 0, 0, 0, 0b1011, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_shift_8bit_lane_right() { - #[target_feature(enable = "ssse3")] - unsafe fn test() { - let v = load([ - 0, 0, 0, 0, 0b1011, 0b0101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ]); - assert_eq!( - unload(v.shift_8bit_lane_right::<2>()), - [0, 0, 0, 0, 0b0010, 0b0001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_shift_in_one_byte() { - #[target_feature(enable = "ssse3")] - unsafe fn test() { - let v1 = - load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); - let v2 = load([ - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - assert_eq!( - unload(v1.shift_in_one_byte(v2)), - [32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_shift_in_two_bytes() { - #[target_feature(enable = "ssse3")] - unsafe fn test() { - let v1 = - load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); - let v2 = load([ - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - assert_eq!( - unload(v1.shift_in_two_bytes(v2)), - [31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_shift_in_three_bytes() { - #[target_feature(enable = "ssse3")] - unsafe fn test() { - let v1 = - load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); - let v2 = load([ - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - assert_eq!( - unload(v1.shift_in_three_bytes(v2)), - [30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_shuffle_bytes() { - #[target_feature(enable = "ssse3")] - unsafe fn test() { - let v1 = - load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); - let v2 = - load([0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12]); - assert_eq!( - unload(v1.shuffle_bytes(v2)), - [1, 1, 1, 1, 5, 5, 5, 5, 9, 9, 9, 9, 13, 13, 13, 13], - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_for_each_64bit_lane() { - #[target_feature(enable = "ssse3")] - unsafe fn test() { - let v = load([ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, - 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, - ]); - let mut lanes = [0u64; 2]; - v.for_each_64bit_lane(|i, lane| { - lanes[i] = lane; - None::<()> - }); - assert_eq!(lanes, [0x0807060504030201, 0x100F0E0D0C0B0A09],); - } - if !is_runnable() { - return; - } - unsafe { test() } - } -} - -#[cfg(all(test, target_arch = "x86_64", target_feature = "sse2"))] -mod tests_x86_64_avx2 { - use core::arch::x86_64::*; - - use crate::util::int::{I32, U32}; - - use super::*; - - fn is_runnable() -> bool { - std::is_x86_feature_detected!("avx2") - } - - #[target_feature(enable = "avx2")] - unsafe fn load(lanes: [u8; 32]) -> __m256i { - __m256i::load_unaligned(&lanes as *const u8) - } - - #[target_feature(enable = "avx2")] - unsafe fn load_half(lanes: [u8; 16]) -> __m256i { - __m256i::load_half_unaligned(&lanes as *const u8) - } - - #[target_feature(enable = "avx2")] - unsafe fn unload(v: __m256i) -> [u8; 32] { - [ - _mm256_extract_epi8(v, 0).to_bits().low_u8(), - _mm256_extract_epi8(v, 1).to_bits().low_u8(), - _mm256_extract_epi8(v, 2).to_bits().low_u8(), - _mm256_extract_epi8(v, 3).to_bits().low_u8(), - _mm256_extract_epi8(v, 4).to_bits().low_u8(), - _mm256_extract_epi8(v, 5).to_bits().low_u8(), - _mm256_extract_epi8(v, 6).to_bits().low_u8(), - _mm256_extract_epi8(v, 7).to_bits().low_u8(), - _mm256_extract_epi8(v, 8).to_bits().low_u8(), - _mm256_extract_epi8(v, 9).to_bits().low_u8(), - _mm256_extract_epi8(v, 10).to_bits().low_u8(), - _mm256_extract_epi8(v, 11).to_bits().low_u8(), - _mm256_extract_epi8(v, 12).to_bits().low_u8(), - _mm256_extract_epi8(v, 13).to_bits().low_u8(), - _mm256_extract_epi8(v, 14).to_bits().low_u8(), - _mm256_extract_epi8(v, 15).to_bits().low_u8(), - _mm256_extract_epi8(v, 16).to_bits().low_u8(), - _mm256_extract_epi8(v, 17).to_bits().low_u8(), - _mm256_extract_epi8(v, 18).to_bits().low_u8(), - _mm256_extract_epi8(v, 19).to_bits().low_u8(), - _mm256_extract_epi8(v, 20).to_bits().low_u8(), - _mm256_extract_epi8(v, 21).to_bits().low_u8(), - _mm256_extract_epi8(v, 22).to_bits().low_u8(), - _mm256_extract_epi8(v, 23).to_bits().low_u8(), - _mm256_extract_epi8(v, 24).to_bits().low_u8(), - _mm256_extract_epi8(v, 25).to_bits().low_u8(), - _mm256_extract_epi8(v, 26).to_bits().low_u8(), - _mm256_extract_epi8(v, 27).to_bits().low_u8(), - _mm256_extract_epi8(v, 28).to_bits().low_u8(), - _mm256_extract_epi8(v, 29).to_bits().low_u8(), - _mm256_extract_epi8(v, 30).to_bits().low_u8(), - _mm256_extract_epi8(v, 31).to_bits().low_u8(), - ] - } - - #[test] - fn vector_splat() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v = __m256i::splat(0xAF); - assert_eq!( - unload(v), - [ - 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, - 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, - 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, - 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, - ] - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_is_zero() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v = load([ - 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ]); - assert!(!v.is_zero()); - let v = load([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ]); - assert!(v.is_zero()); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_cmpeq() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v1 = load([ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 1, - ]); - let v2 = load([ - 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, - 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, - ]); - assert_eq!( - unload(v1.cmpeq(v2)), - [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF - ] - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_and() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v1 = load([ - 0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ]); - let v2 = load([ - 0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ]); - assert_eq!( - unload(v1.and(v2)), - [ - 0, 0, 0, 0, 0, 0b1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ] - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_or() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v1 = load([ - 0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ]); - let v2 = load([ - 0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ]); - assert_eq!( - unload(v1.or(v2)), - [ - 0, 0, 0, 0, 0, 0b1011, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ] - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_shift_8bit_lane_right() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v = load([ - 0, 0, 0, 0, 0b1011, 0b0101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ]); - assert_eq!( - unload(v.shift_8bit_lane_right::<2>()), - [ - 0, 0, 0, 0, 0b0010, 0b0001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ] - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_shift_in_one_byte() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v1 = load([ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - let v2 = load([ - 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, - 63, 64, - ]); - assert_eq!( - unload(v1.shift_in_one_byte(v2)), - [ - 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, - 31, - ], - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_shift_in_two_bytes() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v1 = load([ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - let v2 = load([ - 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, - 63, 64, - ]); - assert_eq!( - unload(v1.shift_in_two_bytes(v2)), - [ - 63, 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, - ], - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_shift_in_three_bytes() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v1 = load([ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - let v2 = load([ - 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, - 63, 64, - ]); - assert_eq!( - unload(v1.shift_in_three_bytes(v2)), - [ - 62, 63, 64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, - 29, - ], - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_shuffle_bytes() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v1 = load([ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - let v2 = load([ - 0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, - 16, 16, 20, 20, 20, 20, 24, 24, 24, 24, 28, 28, 28, 28, - ]); - assert_eq!( - unload(v1.shuffle_bytes(v2)), - [ - 1, 1, 1, 1, 5, 5, 5, 5, 9, 9, 9, 9, 13, 13, 13, 13, 17, - 17, 17, 17, 21, 21, 21, 21, 25, 25, 25, 25, 29, 29, 29, - 29 - ], - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn vector_for_each_64bit_lane() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v = load([ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, - 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, - 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, - 0x1F, 0x20, - ]); - let mut lanes = [0u64; 4]; - v.for_each_64bit_lane(|i, lane| { - lanes[i] = lane; - None::<()> - }); - assert_eq!( - lanes, - [ - 0x0807060504030201, - 0x100F0E0D0C0B0A09, - 0x1817161514131211, - 0x201F1E1D1C1B1A19 - ] - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn fat_vector_half_shift_in_one_byte() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v1 = load_half([ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - ]); - let v2 = load_half([ - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - assert_eq!( - unload(v1.half_shift_in_one_byte(v2)), - [ - 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 32, - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 - ], - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn fat_vector_half_shift_in_two_bytes() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v1 = load_half([ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - ]); - let v2 = load_half([ - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - assert_eq!( - unload(v1.half_shift_in_two_bytes(v2)), - [ - 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 31, - 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - ], - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn fat_vector_half_shift_in_three_bytes() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v1 = load_half([ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - ]); - let v2 = load_half([ - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - assert_eq!( - unload(v1.half_shift_in_three_bytes(v2)), - [ - 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 30, - 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, - ], - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn fat_vector_swap_halves() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v = load([ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - assert_eq!( - unload(v.swap_halves()), - [ - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, - 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, - ], - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn fat_vector_interleave_low_8bit_lanes() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v1 = load([ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - let v2 = load([ - 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, - 63, 64, - ]); - assert_eq!( - unload(v1.interleave_low_8bit_lanes(v2)), - [ - 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39, 8, 40, - 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55, - 24, 56, - ], - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn fat_vector_interleave_high_8bit_lanes() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v1 = load([ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - let v2 = load([ - 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, - 63, 64, - ]); - assert_eq!( - unload(v1.interleave_high_8bit_lanes(v2)), - [ - 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47, 16, - 48, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, - 63, 32, 64, - ], - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } - - #[test] - fn fat_vector_for_each_low_64bit_lane() { - #[target_feature(enable = "avx2")] - unsafe fn test() { - let v1 = load([ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, - 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, - 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, - 0x1F, 0x20, - ]); - let v2 = load([ - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, - 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, - 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, - 0x3F, 0x40, - ]); - let mut lanes = [0u64; 4]; - v1.for_each_low_64bit_lane(v2, |i, lane| { - lanes[i] = lane; - None::<()> - }); - assert_eq!( - lanes, - [ - 0x0807060504030201, - 0x100F0E0D0C0B0A09, - 0x2827262524232221, - 0x302F2E2D2C2B2A29 - ] - ); - } - if !is_runnable() { - return; - } - unsafe { test() } - } -} - -#[cfg(all(test, target_arch = "aarch64", target_feature = "neon"))] -mod tests_aarch64_neon { - use core::arch::aarch64::*; - - use super::*; - - #[target_feature(enable = "neon")] - unsafe fn load(lanes: [u8; 16]) -> uint8x16_t { - uint8x16_t::load_unaligned(&lanes as *const u8) - } - - #[target_feature(enable = "neon")] - unsafe fn unload(v: uint8x16_t) -> [u8; 16] { - [ - vgetq_lane_u8(v, 0), - vgetq_lane_u8(v, 1), - vgetq_lane_u8(v, 2), - vgetq_lane_u8(v, 3), - vgetq_lane_u8(v, 4), - vgetq_lane_u8(v, 5), - vgetq_lane_u8(v, 6), - vgetq_lane_u8(v, 7), - vgetq_lane_u8(v, 8), - vgetq_lane_u8(v, 9), - vgetq_lane_u8(v, 10), - vgetq_lane_u8(v, 11), - vgetq_lane_u8(v, 12), - vgetq_lane_u8(v, 13), - vgetq_lane_u8(v, 14), - vgetq_lane_u8(v, 15), - ] - } - - // Example functions. These don't test the Vector traits, but rather, - // specific NEON instructions. They are basically little experiments I - // wrote to figure out what an instruction does since their descriptions - // are so dense. I decided to keep the experiments around as example tests - // in case there' useful. - - #[test] - fn example_vmaxvq_u8_non_zero() { - #[target_feature(enable = "neon")] - unsafe fn example() { - let v = load([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - assert_eq!(vmaxvq_u8(v), 1); - } - unsafe { example() } - } - - #[test] - fn example_vmaxvq_u8_zero() { - #[target_feature(enable = "neon")] - unsafe fn example() { - let v = load([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - assert_eq!(vmaxvq_u8(v), 0); - } - unsafe { example() } - } - - #[test] - fn example_vpmaxq_u8_non_zero() { - #[target_feature(enable = "neon")] - unsafe fn example() { - let v = load([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - let r = vpmaxq_u8(v, v); - assert_eq!( - unload(r), - [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - ); - } - unsafe { example() } - } - - #[test] - fn example_vpmaxq_u8_self() { - #[target_feature(enable = "neon")] - unsafe fn example() { - let v = - load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); - let r = vpmaxq_u8(v, v); - assert_eq!( - unload(r), - [2, 4, 6, 8, 10, 12, 14, 16, 2, 4, 6, 8, 10, 12, 14, 16] - ); - } - unsafe { example() } - } - - #[test] - fn example_vpmaxq_u8_other() { - #[target_feature(enable = "neon")] - unsafe fn example() { - let v1 = - load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); - let v2 = load([ - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - let r = vpmaxq_u8(v1, v2); - assert_eq!( - unload(r), - [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32] - ); - } - unsafe { example() } - } - - // Now we test the actual methods on the Vector trait. - - #[test] - fn vector_splat() { - #[target_feature(enable = "neon")] - unsafe fn test() { - let v = uint8x16_t::splat(0xAF); - assert_eq!( - unload(v), - [ - 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, - 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF - ] - ); - } - unsafe { test() } - } - - #[test] - fn vector_is_zero() { - #[target_feature(enable = "neon")] - unsafe fn test() { - let v = load([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - assert!(!v.is_zero()); - let v = load([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - assert!(v.is_zero()); - } - unsafe { test() } - } - - #[test] - fn vector_cmpeq() { - #[target_feature(enable = "neon")] - unsafe fn test() { - let v1 = - load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1]); - let v2 = - load([16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]); - assert_eq!( - unload(v1.cmpeq(v2)), - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF] - ); - } - unsafe { test() } - } - - #[test] - fn vector_and() { - #[target_feature(enable = "neon")] - unsafe fn test() { - let v1 = - load([0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - let v2 = - load([0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - assert_eq!( - unload(v1.and(v2)), - [0, 0, 0, 0, 0, 0b1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ); - } - unsafe { test() } - } - - #[test] - fn vector_or() { - #[target_feature(enable = "neon")] - unsafe fn test() { - let v1 = - load([0, 0, 0, 0, 0, 0b1001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - let v2 = - load([0, 0, 0, 0, 0, 0b1010, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - assert_eq!( - unload(v1.or(v2)), - [0, 0, 0, 0, 0, 0b1011, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ); - } - unsafe { test() } - } - - #[test] - fn vector_shift_8bit_lane_right() { - #[target_feature(enable = "neon")] - unsafe fn test() { - let v = load([ - 0, 0, 0, 0, 0b1011, 0b0101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ]); - assert_eq!( - unload(v.shift_8bit_lane_right::<2>()), - [0, 0, 0, 0, 0b0010, 0b0001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ); - } - unsafe { test() } - } - - #[test] - fn vector_shift_in_one_byte() { - #[target_feature(enable = "neon")] - unsafe fn test() { - let v1 = - load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); - let v2 = load([ - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - assert_eq!( - unload(v1.shift_in_one_byte(v2)), - [32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - ); - } - unsafe { test() } - } - - #[test] - fn vector_shift_in_two_bytes() { - #[target_feature(enable = "neon")] - unsafe fn test() { - let v1 = - load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); - let v2 = load([ - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - assert_eq!( - unload(v1.shift_in_two_bytes(v2)), - [31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], - ); - } - unsafe { test() } - } - - #[test] - fn vector_shift_in_three_bytes() { - #[target_feature(enable = "neon")] - unsafe fn test() { - let v1 = - load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); - let v2 = load([ - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, - ]); - assert_eq!( - unload(v1.shift_in_three_bytes(v2)), - [30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], - ); - } - unsafe { test() } - } - - #[test] - fn vector_shuffle_bytes() { - #[target_feature(enable = "neon")] - unsafe fn test() { - let v1 = - load([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); - let v2 = - load([0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12]); - assert_eq!( - unload(v1.shuffle_bytes(v2)), - [1, 1, 1, 1, 5, 5, 5, 5, 9, 9, 9, 9, 13, 13, 13, 13], - ); - } - unsafe { test() } - } - - #[test] - fn vector_for_each_64bit_lane() { - #[target_feature(enable = "neon")] - unsafe fn test() { - let v = load([ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, - 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, - ]); - let mut lanes = [0u64; 2]; - v.for_each_64bit_lane(|i, lane| { - lanes[i] = lane; - None::<()> - }); - assert_eq!(lanes, [0x0807060504030201, 0x100F0E0D0C0B0A09],); - } - unsafe { test() } - } -} diff --git a/vendor/aho-corasick/src/tests.rs b/vendor/aho-corasick/src/tests.rs deleted file mode 100644 index a5276f85f6e0b6..00000000000000 --- a/vendor/aho-corasick/src/tests.rs +++ /dev/null @@ -1,1664 +0,0 @@ -use std::{collections::HashMap, format, string::String, vec::Vec}; - -use crate::{ - AhoCorasick, AhoCorasickBuilder, AhoCorasickKind, Anchored, Input, Match, - MatchKind, StartKind, -}; - -/// A description of a single test against an Aho-Corasick automaton. -/// -/// A single test may not necessarily pass on every configuration of an -/// Aho-Corasick automaton. The tests are categorized and grouped appropriately -/// below. -#[derive(Clone, Debug, Eq, PartialEq)] -struct SearchTest { - /// The name of this test, for debugging. - name: &'static str, - /// The patterns to search for. - patterns: &'static [&'static str], - /// The text to search. - haystack: &'static str, - /// Each match is a triple of (pattern_index, start, end), where - /// pattern_index is an index into `patterns` and `start`/`end` are indices - /// into `haystack`. - matches: &'static [(usize, usize, usize)], -} - -/// Short-hand constructor for SearchTest. We use it a lot below. -macro_rules! t { - ($name:ident, $patterns:expr, $haystack:expr, $matches:expr) => { - SearchTest { - name: stringify!($name), - patterns: $patterns, - haystack: $haystack, - matches: $matches, - } - }; -} - -/// A collection of test groups. -type TestCollection = &'static [&'static [SearchTest]]; - -// Define several collections corresponding to the different type of match -// semantics supported by Aho-Corasick. These collections have some overlap, -// but each collection should have some tests that no other collection has. - -/// Tests for Aho-Corasick's standard non-overlapping match semantics. -const AC_STANDARD_NON_OVERLAPPING: TestCollection = - &[BASICS, NON_OVERLAPPING, STANDARD, REGRESSION]; - -/// Tests for Aho-Corasick's anchored standard non-overlapping match semantics. -const AC_STANDARD_ANCHORED_NON_OVERLAPPING: TestCollection = - &[ANCHORED_BASICS, ANCHORED_NON_OVERLAPPING, STANDARD_ANCHORED]; - -/// Tests for Aho-Corasick's standard overlapping match semantics. -const AC_STANDARD_OVERLAPPING: TestCollection = - &[BASICS, OVERLAPPING, REGRESSION]; - -/* -Iterators of anchored overlapping searches were removed from the API in -after 0.7, but we leave the tests commented out for posterity. -/// Tests for Aho-Corasick's anchored standard overlapping match semantics. -const AC_STANDARD_ANCHORED_OVERLAPPING: TestCollection = - &[ANCHORED_BASICS, ANCHORED_OVERLAPPING]; -*/ - -/// Tests for Aho-Corasick's leftmost-first match semantics. -const AC_LEFTMOST_FIRST: TestCollection = - &[BASICS, NON_OVERLAPPING, LEFTMOST, LEFTMOST_FIRST, REGRESSION]; - -/// Tests for Aho-Corasick's anchored leftmost-first match semantics. -const AC_LEFTMOST_FIRST_ANCHORED: TestCollection = &[ - ANCHORED_BASICS, - ANCHORED_NON_OVERLAPPING, - ANCHORED_LEFTMOST, - ANCHORED_LEFTMOST_FIRST, -]; - -/// Tests for Aho-Corasick's leftmost-longest match semantics. -const AC_LEFTMOST_LONGEST: TestCollection = - &[BASICS, NON_OVERLAPPING, LEFTMOST, LEFTMOST_LONGEST, REGRESSION]; - -/// Tests for Aho-Corasick's anchored leftmost-longest match semantics. -const AC_LEFTMOST_LONGEST_ANCHORED: TestCollection = &[ - ANCHORED_BASICS, - ANCHORED_NON_OVERLAPPING, - ANCHORED_LEFTMOST, - ANCHORED_LEFTMOST_LONGEST, -]; - -// Now define the individual tests that make up the collections above. - -/// A collection of tests for the Aho-Corasick algorithm that should always be -/// true regardless of match semantics. That is, all combinations of -/// leftmost-{shortest, first, longest} x {overlapping, non-overlapping} -/// should produce the same answer. -const BASICS: &'static [SearchTest] = &[ - t!(basic000, &[], "", &[]), - t!(basic001, &[""], "a", &[(0, 0, 0), (0, 1, 1)]), - t!(basic002, &["a"], "", &[]), - t!(basic010, &["a"], "a", &[(0, 0, 1)]), - t!(basic020, &["a"], "aa", &[(0, 0, 1), (0, 1, 2)]), - t!(basic030, &["a"], "aaa", &[(0, 0, 1), (0, 1, 2), (0, 2, 3)]), - t!(basic040, &["a"], "aba", &[(0, 0, 1), (0, 2, 3)]), - t!(basic050, &["a"], "bba", &[(0, 2, 3)]), - t!(basic060, &["a"], "bbb", &[]), - t!(basic070, &["a"], "bababbbba", &[(0, 1, 2), (0, 3, 4), (0, 8, 9)]), - t!(basic100, &["aa"], "", &[]), - t!(basic110, &["aa"], "aa", &[(0, 0, 2)]), - t!(basic120, &["aa"], "aabbaa", &[(0, 0, 2), (0, 4, 6)]), - t!(basic130, &["aa"], "abbab", &[]), - t!(basic140, &["aa"], "abbabaa", &[(0, 5, 7)]), - t!(basic200, &["abc"], "abc", &[(0, 0, 3)]), - t!(basic210, &["abc"], "zazabzabcz", &[(0, 6, 9)]), - t!(basic220, &["abc"], "zazabczabcz", &[(0, 3, 6), (0, 7, 10)]), - t!(basic300, &["a", "b"], "", &[]), - t!(basic310, &["a", "b"], "z", &[]), - t!(basic320, &["a", "b"], "b", &[(1, 0, 1)]), - t!(basic330, &["a", "b"], "a", &[(0, 0, 1)]), - t!( - basic340, - &["a", "b"], - "abba", - &[(0, 0, 1), (1, 1, 2), (1, 2, 3), (0, 3, 4),] - ), - t!( - basic350, - &["b", "a"], - "abba", - &[(1, 0, 1), (0, 1, 2), (0, 2, 3), (1, 3, 4),] - ), - t!(basic360, &["abc", "bc"], "xbc", &[(1, 1, 3),]), - t!(basic400, &["foo", "bar"], "", &[]), - t!(basic410, &["foo", "bar"], "foobar", &[(0, 0, 3), (1, 3, 6),]), - t!(basic420, &["foo", "bar"], "barfoo", &[(1, 0, 3), (0, 3, 6),]), - t!(basic430, &["foo", "bar"], "foofoo", &[(0, 0, 3), (0, 3, 6),]), - t!(basic440, &["foo", "bar"], "barbar", &[(1, 0, 3), (1, 3, 6),]), - t!(basic450, &["foo", "bar"], "bafofoo", &[(0, 4, 7),]), - t!(basic460, &["bar", "foo"], "bafofoo", &[(1, 4, 7),]), - t!(basic470, &["foo", "bar"], "fobabar", &[(1, 4, 7),]), - t!(basic480, &["bar", "foo"], "fobabar", &[(0, 4, 7),]), - t!(basic600, &[""], "", &[(0, 0, 0)]), - t!(basic610, &[""], "a", &[(0, 0, 0), (0, 1, 1)]), - t!(basic620, &[""], "abc", &[(0, 0, 0), (0, 1, 1), (0, 2, 2), (0, 3, 3)]), - t!(basic700, &["yabcdef", "abcdezghi"], "yabcdefghi", &[(0, 0, 7),]), - t!(basic710, &["yabcdef", "abcdezghi"], "yabcdezghi", &[(1, 1, 10),]), - t!( - basic720, - &["yabcdef", "bcdeyabc", "abcdezghi"], - "yabcdezghi", - &[(2, 1, 10),] - ), -]; - -/// A collection of *anchored* tests for the Aho-Corasick algorithm that should -/// always be true regardless of match semantics. That is, all combinations of -/// leftmost-{shortest, first, longest} x {overlapping, non-overlapping} should -/// produce the same answer. -const ANCHORED_BASICS: &'static [SearchTest] = &[ - t!(abasic000, &[], "", &[]), - t!(abasic001, &[], "a", &[]), - t!(abasic002, &[], "abc", &[]), - t!(abasic010, &[""], "", &[(0, 0, 0)]), - t!(abasic020, &[""], "a", &[(0, 0, 0), (0, 1, 1)]), - t!(abasic030, &[""], "abc", &[(0, 0, 0), (0, 1, 1), (0, 2, 2), (0, 3, 3)]), - t!(abasic100, &["a"], "a", &[(0, 0, 1)]), - t!(abasic110, &["a"], "aa", &[(0, 0, 1), (0, 1, 2)]), - t!(abasic120, &["a", "b"], "ab", &[(0, 0, 1), (1, 1, 2)]), - t!(abasic130, &["a", "b"], "ba", &[(1, 0, 1), (0, 1, 2)]), - t!(abasic140, &["foo", "foofoo"], "foo", &[(0, 0, 3)]), - t!(abasic150, &["foofoo", "foo"], "foo", &[(1, 0, 3)]), - t!(abasic200, &["foo"], "foofoo foo", &[(0, 0, 3), (0, 3, 6)]), -]; - -/// Tests for non-overlapping standard match semantics. -/// -/// These tests generally shouldn't pass for leftmost-{first,longest}, although -/// some do in order to write clearer tests. For example, standard000 will -/// pass with leftmost-first semantics, but standard010 will not. We write -/// both to emphasize how the match semantics work. -const STANDARD: &'static [SearchTest] = &[ - t!(standard000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]), - t!(standard010, &["abcd", "ab"], "abcd", &[(1, 0, 2)]), - t!(standard020, &["abcd", "ab", "abc"], "abcd", &[(1, 0, 2)]), - t!(standard030, &["abcd", "abc", "ab"], "abcd", &[(2, 0, 2)]), - t!(standard040, &["a", ""], "a", &[(1, 0, 0), (1, 1, 1)]), - t!( - standard400, - &["abcd", "bcd", "cd", "b"], - "abcd", - &[(3, 1, 2), (2, 2, 4),] - ), - t!(standard410, &["", "a"], "a", &[(0, 0, 0), (0, 1, 1),]), - t!(standard420, &["", "a"], "aa", &[(0, 0, 0), (0, 1, 1), (0, 2, 2),]), - t!(standard430, &["", "a", ""], "a", &[(0, 0, 0), (0, 1, 1),]), - t!(standard440, &["a", "", ""], "a", &[(1, 0, 0), (1, 1, 1),]), - t!(standard450, &["", "", "a"], "a", &[(0, 0, 0), (0, 1, 1),]), -]; - -/// Like STANDARD, but for anchored searches. -const STANDARD_ANCHORED: &'static [SearchTest] = &[ - t!(astandard000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]), - t!(astandard010, &["abcd", "ab"], "abcd", &[(1, 0, 2)]), - t!(astandard020, &["abcd", "ab", "abc"], "abcd", &[(1, 0, 2)]), - t!(astandard030, &["abcd", "abc", "ab"], "abcd", &[(2, 0, 2)]), - t!(astandard040, &["a", ""], "a", &[(1, 0, 0), (1, 1, 1)]), - t!(astandard050, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4)]), - t!(astandard410, &["", "a"], "a", &[(0, 0, 0), (0, 1, 1)]), - t!(astandard420, &["", "a"], "aa", &[(0, 0, 0), (0, 1, 1), (0, 2, 2)]), - t!(astandard430, &["", "a", ""], "a", &[(0, 0, 0), (0, 1, 1)]), - t!(astandard440, &["a", "", ""], "a", &[(1, 0, 0), (1, 1, 1)]), - t!(astandard450, &["", "", "a"], "a", &[(0, 0, 0), (0, 1, 1)]), -]; - -/// Tests for non-overlapping leftmost match semantics. These should pass for -/// both leftmost-first and leftmost-longest match kinds. Stated differently, -/// among ambiguous matches, the longest match and the match that appeared -/// first when constructing the automaton should always be the same. -const LEFTMOST: &'static [SearchTest] = &[ - t!(leftmost000, &["ab", "ab"], "abcd", &[(0, 0, 2)]), - t!(leftmost010, &["a", ""], "a", &[(0, 0, 1)]), - t!(leftmost011, &["a", ""], "ab", &[(0, 0, 1), (1, 2, 2)]), - t!(leftmost020, &["", ""], "a", &[(0, 0, 0), (0, 1, 1)]), - t!(leftmost030, &["a", "ab"], "aa", &[(0, 0, 1), (0, 1, 2)]), - t!(leftmost031, &["ab", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]), - t!(leftmost032, &["ab", "a"], "xayabbbz", &[(1, 1, 2), (0, 3, 5)]), - t!(leftmost300, &["abcd", "bce", "b"], "abce", &[(1, 1, 4)]), - t!(leftmost310, &["abcd", "ce", "bc"], "abce", &[(2, 1, 3)]), - t!(leftmost320, &["abcd", "bce", "ce", "b"], "abce", &[(1, 1, 4)]), - t!(leftmost330, &["abcd", "bce", "cz", "bc"], "abcz", &[(3, 1, 3)]), - t!(leftmost340, &["bce", "cz", "bc"], "bcz", &[(2, 0, 2)]), - t!(leftmost350, &["abc", "bd", "ab"], "abd", &[(2, 0, 2)]), - t!( - leftmost360, - &["abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(2, 0, 8),] - ), - t!( - leftmost370, - &["abcdefghi", "cde", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - leftmost380, - &["abcdefghi", "hz", "abcdefgh", "a"], - "abcdefghz", - &[(2, 0, 8),] - ), - t!( - leftmost390, - &["b", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - leftmost400, - &["h", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - leftmost410, - &["z", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8), (0, 8, 9),] - ), -]; - -/// Like LEFTMOST, but for anchored searches. -const ANCHORED_LEFTMOST: &'static [SearchTest] = &[ - t!(aleftmost000, &["ab", "ab"], "abcd", &[(0, 0, 2)]), - // We shouldn't allow an empty match immediately following a match, right? - t!(aleftmost010, &["a", ""], "a", &[(0, 0, 1)]), - t!(aleftmost020, &["", ""], "a", &[(0, 0, 0), (0, 1, 1)]), - t!(aleftmost030, &["a", "ab"], "aa", &[(0, 0, 1), (0, 1, 2)]), - t!(aleftmost031, &["ab", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]), - t!(aleftmost032, &["ab", "a"], "xayabbbz", &[]), - t!(aleftmost300, &["abcd", "bce", "b"], "abce", &[]), - t!(aleftmost301, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4)]), - t!(aleftmost310, &["abcd", "ce", "bc"], "abce", &[]), - t!(aleftmost320, &["abcd", "bce", "ce", "b"], "abce", &[]), - t!(aleftmost330, &["abcd", "bce", "cz", "bc"], "abcz", &[]), - t!(aleftmost340, &["bce", "cz", "bc"], "bcz", &[(2, 0, 2)]), - t!(aleftmost350, &["abc", "bd", "ab"], "abd", &[(2, 0, 2)]), - t!( - aleftmost360, - &["abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(2, 0, 8),] - ), - t!( - aleftmost370, - &["abcdefghi", "cde", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - aleftmost380, - &["abcdefghi", "hz", "abcdefgh", "a"], - "abcdefghz", - &[(2, 0, 8),] - ), - t!( - aleftmost390, - &["b", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - aleftmost400, - &["h", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!( - aleftmost410, - &["z", "abcdefghi", "hz", "abcdefgh"], - "abcdefghzyz", - &[(3, 0, 8), (0, 8, 9)] - ), -]; - -/// Tests for non-overlapping leftmost-first match semantics. These tests -/// should generally be specific to leftmost-first, which means they should -/// generally fail under leftmost-longest semantics. -const LEFTMOST_FIRST: &'static [SearchTest] = &[ - t!(leftfirst000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]), - t!(leftfirst010, &["", "a"], "a", &[(0, 0, 0), (0, 1, 1)]), - t!(leftfirst011, &["", "a", ""], "a", &[(0, 0, 0), (0, 1, 1),]), - t!(leftfirst012, &["a", "", ""], "a", &[(0, 0, 1)]), - t!(leftfirst013, &["", "", "a"], "a", &[(0, 0, 0), (0, 1, 1)]), - t!(leftfirst014, &["a", ""], "a", &[(0, 0, 1)]), - t!(leftfirst015, &["a", ""], "ab", &[(0, 0, 1), (1, 2, 2)]), - t!(leftfirst020, &["abcd", "ab"], "abcd", &[(0, 0, 4)]), - t!(leftfirst030, &["ab", "ab"], "abcd", &[(0, 0, 2)]), - t!(leftfirst040, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (0, 3, 4)]), - t!(leftfirst100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(1, 1, 5)]), - t!(leftfirst110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]), - t!(leftfirst300, &["abcd", "b", "bce"], "abce", &[(1, 1, 2)]), - t!( - leftfirst310, - &["abcd", "b", "bce", "ce"], - "abce", - &[(1, 1, 2), (3, 2, 4),] - ), - t!( - leftfirst320, - &["a", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(0, 0, 1), (2, 7, 9),] - ), - t!(leftfirst330, &["a", "abab"], "abab", &[(0, 0, 1), (0, 2, 3)]), - t!(leftfirst400, &["amwix", "samwise", "sam"], "Zsamwix", &[(2, 1, 4)]), -]; - -/// Like LEFTMOST_FIRST, but for anchored searches. -const ANCHORED_LEFTMOST_FIRST: &'static [SearchTest] = &[ - t!(aleftfirst000, &["ab", "abcd"], "abcd", &[(0, 0, 2)]), - t!(aleftfirst010, &["", "a"], "a", &[(0, 0, 0), (0, 1, 1)]), - t!(aleftfirst011, &["", "a", ""], "a", &[(0, 0, 0), (0, 1, 1)]), - t!(aleftfirst012, &["a", "", ""], "a", &[(0, 0, 1)]), - t!(aleftfirst013, &["", "", "a"], "a", &[(0, 0, 0), (0, 1, 1)]), - t!(aleftfirst020, &["abcd", "ab"], "abcd", &[(0, 0, 4)]), - t!(aleftfirst030, &["ab", "ab"], "abcd", &[(0, 0, 2)]), - t!(aleftfirst040, &["a", "ab"], "xayabbbz", &[]), - t!(aleftfirst100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[]), - t!(aleftfirst110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[]), - t!(aleftfirst300, &["abcd", "b", "bce"], "abce", &[]), - t!(aleftfirst310, &["abcd", "b", "bce", "ce"], "abce", &[]), - t!( - aleftfirst320, - &["a", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(0, 0, 1)] - ), - t!(aleftfirst330, &["a", "abab"], "abab", &[(0, 0, 1)]), - t!(aleftfirst400, &["wise", "samwise", "sam"], "samwix", &[(2, 0, 3)]), -]; - -/// Tests for non-overlapping leftmost-longest match semantics. These tests -/// should generally be specific to leftmost-longest, which means they should -/// generally fail under leftmost-first semantics. -const LEFTMOST_LONGEST: &'static [SearchTest] = &[ - t!(leftlong000, &["ab", "abcd"], "abcd", &[(1, 0, 4)]), - t!(leftlong010, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4),]), - t!(leftlong020, &["", "a"], "a", &[(1, 0, 1)]), - t!(leftlong021, &["", "a", ""], "a", &[(1, 0, 1)]), - t!(leftlong022, &["a", "", ""], "a", &[(0, 0, 1)]), - t!(leftlong023, &["", "", "a"], "a", &[(2, 0, 1)]), - t!(leftlong024, &["", "a"], "ab", &[(1, 0, 1), (0, 2, 2)]), - t!(leftlong030, &["", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]), - t!(leftlong040, &["a", "ab"], "a", &[(0, 0, 1)]), - t!(leftlong050, &["a", "ab"], "ab", &[(1, 0, 2)]), - t!(leftlong060, &["ab", "a"], "a", &[(1, 0, 1)]), - t!(leftlong070, &["ab", "a"], "ab", &[(0, 0, 2)]), - t!(leftlong100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[(2, 1, 6)]), - t!(leftlong110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[(1, 1, 6)]), - t!(leftlong300, &["abcd", "b", "bce"], "abce", &[(2, 1, 4)]), - t!( - leftlong310, - &["a", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!(leftlong320, &["a", "abab"], "abab", &[(1, 0, 4)]), - t!(leftlong330, &["abcd", "b", "ce"], "abce", &[(1, 1, 2), (2, 2, 4),]), - t!(leftlong340, &["a", "ab"], "xayabbbz", &[(0, 1, 2), (1, 3, 5)]), -]; - -/// Like LEFTMOST_LONGEST, but for anchored searches. -const ANCHORED_LEFTMOST_LONGEST: &'static [SearchTest] = &[ - t!(aleftlong000, &["ab", "abcd"], "abcd", &[(1, 0, 4)]), - t!(aleftlong010, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4),]), - t!(aleftlong020, &["", "a"], "a", &[(1, 0, 1)]), - t!(aleftlong021, &["", "a", ""], "a", &[(1, 0, 1)]), - t!(aleftlong022, &["a", "", ""], "a", &[(0, 0, 1)]), - t!(aleftlong023, &["", "", "a"], "a", &[(2, 0, 1)]), - t!(aleftlong030, &["", "a"], "aa", &[(1, 0, 1), (1, 1, 2)]), - t!(aleftlong040, &["a", "ab"], "a", &[(0, 0, 1)]), - t!(aleftlong050, &["a", "ab"], "ab", &[(1, 0, 2)]), - t!(aleftlong060, &["ab", "a"], "a", &[(1, 0, 1)]), - t!(aleftlong070, &["ab", "a"], "ab", &[(0, 0, 2)]), - t!(aleftlong100, &["abcdefg", "bcde", "bcdef"], "abcdef", &[]), - t!(aleftlong110, &["abcdefg", "bcdef", "bcde"], "abcdef", &[]), - t!(aleftlong300, &["abcd", "b", "bce"], "abce", &[]), - t!( - aleftlong310, - &["a", "abcdefghi", "hz", "abcdefgh"], - "abcdefghz", - &[(3, 0, 8),] - ), - t!(aleftlong320, &["a", "abab"], "abab", &[(1, 0, 4)]), - t!(aleftlong330, &["abcd", "b", "ce"], "abce", &[]), - t!(aleftlong340, &["a", "ab"], "xayabbbz", &[]), -]; - -/// Tests for non-overlapping match semantics. -/// -/// Generally these tests shouldn't pass when using overlapping semantics. -/// These should pass for both standard and leftmost match semantics. -const NON_OVERLAPPING: &'static [SearchTest] = &[ - t!(nover010, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4),]), - t!(nover020, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4),]), - t!(nover030, &["abc", "bc"], "zazabcz", &[(0, 3, 6),]), - t!( - nover100, - &["ab", "ba"], - "abababa", - &[(0, 0, 2), (0, 2, 4), (0, 4, 6),] - ), - t!(nover200, &["foo", "foo"], "foobarfoo", &[(0, 0, 3), (0, 6, 9),]), - t!(nover300, &["", ""], "", &[(0, 0, 0),]), - t!(nover310, &["", ""], "a", &[(0, 0, 0), (0, 1, 1),]), -]; - -/// Like NON_OVERLAPPING, but for anchored searches. -const ANCHORED_NON_OVERLAPPING: &'static [SearchTest] = &[ - t!(anover010, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4),]), - t!(anover020, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4),]), - t!(anover030, &["abc", "bc"], "zazabcz", &[]), - t!( - anover100, - &["ab", "ba"], - "abababa", - &[(0, 0, 2), (0, 2, 4), (0, 4, 6)] - ), - t!(anover200, &["foo", "foo"], "foobarfoo", &[(0, 0, 3)]), - t!(anover300, &["", ""], "", &[(0, 0, 0)]), - t!(anover310, &["", ""], "a", &[(0, 0, 0), (0, 1, 1)]), -]; - -/// Tests for overlapping match semantics. -/// -/// This only supports standard match semantics, since leftmost-{first,longest} -/// do not support overlapping matches. -const OVERLAPPING: &'static [SearchTest] = &[ - t!( - over000, - &["abcd", "bcd", "cd", "b"], - "abcd", - &[(3, 1, 2), (0, 0, 4), (1, 1, 4), (2, 2, 4),] - ), - t!( - over010, - &["bcd", "cd", "b", "abcd"], - "abcd", - &[(2, 1, 2), (3, 0, 4), (0, 1, 4), (1, 2, 4),] - ), - t!( - over020, - &["abcd", "bcd", "cd"], - "abcd", - &[(0, 0, 4), (1, 1, 4), (2, 2, 4),] - ), - t!( - over030, - &["bcd", "abcd", "cd"], - "abcd", - &[(1, 0, 4), (0, 1, 4), (2, 2, 4),] - ), - t!( - over040, - &["bcd", "cd", "abcd"], - "abcd", - &[(2, 0, 4), (0, 1, 4), (1, 2, 4),] - ), - t!(over050, &["abc", "bc"], "zazabcz", &[(0, 3, 6), (1, 4, 6),]), - t!( - over100, - &["ab", "ba"], - "abababa", - &[(0, 0, 2), (1, 1, 3), (0, 2, 4), (1, 3, 5), (0, 4, 6), (1, 5, 7),] - ), - t!( - over200, - &["foo", "foo"], - "foobarfoo", - &[(0, 0, 3), (1, 0, 3), (0, 6, 9), (1, 6, 9),] - ), - t!(over300, &["", ""], "", &[(0, 0, 0), (1, 0, 0),]), - t!( - over310, - &["", ""], - "a", - &[(0, 0, 0), (1, 0, 0), (0, 1, 1), (1, 1, 1),] - ), - t!(over320, &["", "a"], "a", &[(0, 0, 0), (1, 0, 1), (0, 1, 1),]), - t!( - over330, - &["", "a", ""], - "a", - &[(0, 0, 0), (2, 0, 0), (1, 0, 1), (0, 1, 1), (2, 1, 1),] - ), - t!( - over340, - &["a", "", ""], - "a", - &[(1, 0, 0), (2, 0, 0), (0, 0, 1), (1, 1, 1), (2, 1, 1),] - ), - t!( - over350, - &["", "", "a"], - "a", - &[(0, 0, 0), (1, 0, 0), (2, 0, 1), (0, 1, 1), (1, 1, 1),] - ), - t!( - over360, - &["foo", "foofoo"], - "foofoo", - &[(0, 0, 3), (1, 0, 6), (0, 3, 6)] - ), -]; - -/* -Iterators of anchored overlapping searches were removed from the API in -after 0.7, but we leave the tests commented out for posterity. -/// Like OVERLAPPING, but for anchored searches. -const ANCHORED_OVERLAPPING: &'static [SearchTest] = &[ - t!(aover000, &["abcd", "bcd", "cd", "b"], "abcd", &[(0, 0, 4)]), - t!(aover010, &["bcd", "cd", "b", "abcd"], "abcd", &[(3, 0, 4)]), - t!(aover020, &["abcd", "bcd", "cd"], "abcd", &[(0, 0, 4)]), - t!(aover030, &["bcd", "abcd", "cd"], "abcd", &[(1, 0, 4)]), - t!(aover040, &["bcd", "cd", "abcd"], "abcd", &[(2, 0, 4)]), - t!(aover050, &["abc", "bc"], "zazabcz", &[]), - t!(aover100, &["ab", "ba"], "abababa", &[(0, 0, 2)]), - t!(aover200, &["foo", "foo"], "foobarfoo", &[(0, 0, 3), (1, 0, 3)]), - t!(aover300, &["", ""], "", &[(0, 0, 0), (1, 0, 0),]), - t!(aover310, &["", ""], "a", &[(0, 0, 0), (1, 0, 0)]), - t!(aover320, &["", "a"], "a", &[(0, 0, 0), (1, 0, 1)]), - t!(aover330, &["", "a", ""], "a", &[(0, 0, 0), (2, 0, 0), (1, 0, 1)]), - t!(aover340, &["a", "", ""], "a", &[(1, 0, 0), (2, 0, 0), (0, 0, 1)]), - t!(aover350, &["", "", "a"], "a", &[(0, 0, 0), (1, 0, 0), (2, 0, 1)]), - t!(aover360, &["foo", "foofoo"], "foofoo", &[(0, 0, 3), (1, 0, 6)]), -]; -*/ - -/// Tests for ASCII case insensitivity. -/// -/// These tests should all have the same behavior regardless of match semantics -/// or whether the search is overlapping. -const ASCII_CASE_INSENSITIVE: &'static [SearchTest] = &[ - t!(acasei000, &["a"], "A", &[(0, 0, 1)]), - t!(acasei010, &["Samwise"], "SAMWISE", &[(0, 0, 7)]), - t!(acasei011, &["Samwise"], "SAMWISE.abcd", &[(0, 0, 7)]), - t!(acasei020, &["fOoBaR"], "quux foobar baz", &[(0, 5, 11)]), -]; - -/// Like ASCII_CASE_INSENSITIVE, but specifically for non-overlapping tests. -const ASCII_CASE_INSENSITIVE_NON_OVERLAPPING: &'static [SearchTest] = &[ - t!(acasei000, &["foo", "FOO"], "fOo", &[(0, 0, 3)]), - t!(acasei000, &["FOO", "foo"], "fOo", &[(0, 0, 3)]), - t!(acasei010, &["abc", "def"], "abcdef", &[(0, 0, 3), (1, 3, 6)]), -]; - -/// Like ASCII_CASE_INSENSITIVE, but specifically for overlapping tests. -const ASCII_CASE_INSENSITIVE_OVERLAPPING: &'static [SearchTest] = &[ - t!(acasei000, &["foo", "FOO"], "fOo", &[(0, 0, 3), (1, 0, 3)]), - t!(acasei001, &["FOO", "foo"], "fOo", &[(0, 0, 3), (1, 0, 3)]), - // This is a regression test from: - // https://github.com/BurntSushi/aho-corasick/issues/68 - // Previously, it was reporting a duplicate (1, 3, 6) match. - t!( - acasei010, - &["abc", "def", "abcdef"], - "abcdef", - &[(0, 0, 3), (2, 0, 6), (1, 3, 6)] - ), -]; - -/// Regression tests that are applied to all Aho-Corasick combinations. -/// -/// If regression tests are needed for specific match semantics, then add them -/// to the appropriate group above. -const REGRESSION: &'static [SearchTest] = &[ - t!(regression010, &["inf", "ind"], "infind", &[(0, 0, 3), (1, 3, 6),]), - t!(regression020, &["ind", "inf"], "infind", &[(1, 0, 3), (0, 3, 6),]), - t!( - regression030, - &["libcore/", "libstd/"], - "libcore/char/methods.rs", - &[(0, 0, 8),] - ), - t!( - regression040, - &["libstd/", "libcore/"], - "libcore/char/methods.rs", - &[(1, 0, 8),] - ), - t!( - regression050, - &["\x00\x00\x01", "\x00\x00\x00"], - "\x00\x00\x00", - &[(1, 0, 3),] - ), - t!( - regression060, - &["\x00\x00\x00", "\x00\x00\x01"], - "\x00\x00\x00", - &[(0, 0, 3),] - ), -]; - -// Now define a test for each combination of things above that we want to run. -// Since there are a few different combinations for each collection of tests, -// we define a couple of macros to avoid repetition drudgery. The testconfig -// macro constructs the automaton from a given match kind, and runs the search -// tests one-by-one over the given collection. The `with` parameter allows one -// to configure the builder with additional parameters. The testcombo macro -// invokes testconfig in precisely this way: it sets up several tests where -// each one turns a different knob on AhoCorasickBuilder. - -macro_rules! testconfig { - (anchored, $name:ident, $collection:expr, $kind:ident, $with:expr) => { - #[test] - fn $name() { - run_search_tests($collection, |test| { - let mut builder = AhoCorasick::builder(); - $with(&mut builder); - let input = Input::new(test.haystack).anchored(Anchored::Yes); - builder - .match_kind(MatchKind::$kind) - .build(test.patterns) - .unwrap() - .try_find_iter(input) - .unwrap() - .collect() - }); - } - }; - (overlapping, $name:ident, $collection:expr, $kind:ident, $with:expr) => { - #[test] - fn $name() { - run_search_tests($collection, |test| { - let mut builder = AhoCorasick::builder(); - $with(&mut builder); - builder - .match_kind(MatchKind::$kind) - .build(test.patterns) - .unwrap() - .find_overlapping_iter(test.haystack) - .collect() - }); - } - }; - (stream, $name:ident, $collection:expr, $kind:ident, $with:expr) => { - #[test] - fn $name() { - run_stream_search_tests($collection, |test| { - let buf = std::io::BufReader::with_capacity( - 1, - test.haystack.as_bytes(), - ); - let mut builder = AhoCorasick::builder(); - $with(&mut builder); - builder - .match_kind(MatchKind::$kind) - .build(test.patterns) - .unwrap() - .stream_find_iter(buf) - .map(|result| result.unwrap()) - .collect() - }); - } - }; - ($name:ident, $collection:expr, $kind:ident, $with:expr) => { - #[test] - fn $name() { - run_search_tests($collection, |test| { - let mut builder = AhoCorasick::builder(); - $with(&mut builder); - builder - .match_kind(MatchKind::$kind) - .build(test.patterns) - .unwrap() - .find_iter(test.haystack) - .collect() - }); - } - }; -} - -macro_rules! testcombo { - ($name:ident, $collection:expr, $kind:ident) => { - mod $name { - use super::*; - - testconfig!(default, $collection, $kind, |_| ()); - testconfig!( - nfa_default, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::NoncontiguousNFA)); - } - ); - testconfig!( - nfa_noncontig_no_prefilter, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::NoncontiguousNFA)) - .prefilter(false); - } - ); - testconfig!( - nfa_noncontig_all_sparse, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::NoncontiguousNFA)) - .dense_depth(0); - } - ); - testconfig!( - nfa_noncontig_all_dense, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::NoncontiguousNFA)) - .dense_depth(usize::MAX); - } - ); - testconfig!( - nfa_contig_default, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::ContiguousNFA)); - } - ); - testconfig!( - nfa_contig_no_prefilter, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::ContiguousNFA)) - .prefilter(false); - } - ); - testconfig!( - nfa_contig_all_sparse, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::ContiguousNFA)) - .dense_depth(0); - } - ); - testconfig!( - nfa_contig_all_dense, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::ContiguousNFA)) - .dense_depth(usize::MAX); - } - ); - testconfig!( - nfa_contig_no_byte_class, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::ContiguousNFA)) - .byte_classes(false); - } - ); - testconfig!( - dfa_default, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)); - } - ); - testconfig!( - dfa_start_both, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)) - .start_kind(StartKind::Both); - } - ); - testconfig!( - dfa_no_prefilter, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)).prefilter(false); - } - ); - testconfig!( - dfa_start_both_no_prefilter, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)) - .start_kind(StartKind::Both) - .prefilter(false); - } - ); - testconfig!( - dfa_no_byte_class, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)).byte_classes(false); - } - ); - testconfig!( - dfa_start_both_no_byte_class, - $collection, - $kind, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)) - .start_kind(StartKind::Both) - .byte_classes(false); - } - ); - } - }; -} - -// Write out the various combinations of match semantics given the variety of -// configurations tested by 'testcombo!'. -testcombo!(search_leftmost_longest, AC_LEFTMOST_LONGEST, LeftmostLongest); -testcombo!(search_leftmost_first, AC_LEFTMOST_FIRST, LeftmostFirst); -testcombo!( - search_standard_nonoverlapping, - AC_STANDARD_NON_OVERLAPPING, - Standard -); - -// Write out the overlapping combo by hand since there is only one of them. -testconfig!( - overlapping, - search_standard_overlapping_default, - AC_STANDARD_OVERLAPPING, - Standard, - |_| () -); -testconfig!( - overlapping, - search_standard_overlapping_nfa_noncontig_default, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::NoncontiguousNFA)); - } -); -testconfig!( - overlapping, - search_standard_overlapping_nfa_noncontig_no_prefilter, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::NoncontiguousNFA)).prefilter(false); - } -); -testconfig!( - overlapping, - search_standard_overlapping_nfa_contig_default, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::ContiguousNFA)); - } -); -testconfig!( - overlapping, - search_standard_overlapping_nfa_contig_no_prefilter, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::ContiguousNFA)).prefilter(false); - } -); -testconfig!( - overlapping, - search_standard_overlapping_nfa_contig_all_sparse, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::ContiguousNFA)).dense_depth(0); - } -); -testconfig!( - overlapping, - search_standard_overlapping_nfa_contig_all_dense, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::ContiguousNFA)).dense_depth(usize::MAX); - } -); -testconfig!( - overlapping, - search_standard_overlapping_dfa_default, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)); - } -); -testconfig!( - overlapping, - search_standard_overlapping_dfa_start_both, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)).start_kind(StartKind::Both); - } -); -testconfig!( - overlapping, - search_standard_overlapping_dfa_no_prefilter, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)).prefilter(false); - } -); -testconfig!( - overlapping, - search_standard_overlapping_dfa_start_both_no_prefilter, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)) - .start_kind(StartKind::Both) - .prefilter(false); - } -); -testconfig!( - overlapping, - search_standard_overlapping_dfa_no_byte_class, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)).byte_classes(false); - } -); -testconfig!( - overlapping, - search_standard_overlapping_dfa_start_both_no_byte_class, - AC_STANDARD_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)) - .start_kind(StartKind::Both) - .byte_classes(false); - } -); - -// Also write out tests manually for streams, since we only test the standard -// match semantics. We also don't bother testing different automaton -// configurations, since those are well covered by tests above. -#[cfg(feature = "std")] -testconfig!( - stream, - search_standard_stream_default, - AC_STANDARD_NON_OVERLAPPING, - Standard, - |_| () -); -#[cfg(feature = "std")] -testconfig!( - stream, - search_standard_stream_nfa_noncontig_default, - AC_STANDARD_NON_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::NoncontiguousNFA)); - } -); -#[cfg(feature = "std")] -testconfig!( - stream, - search_standard_stream_nfa_contig_default, - AC_STANDARD_NON_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::ContiguousNFA)); - } -); -#[cfg(feature = "std")] -testconfig!( - stream, - search_standard_stream_dfa_default, - AC_STANDARD_NON_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)); - } -); - -// Same thing for anchored searches. Write them out manually. -testconfig!( - anchored, - search_standard_anchored_default, - AC_STANDARD_ANCHORED_NON_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.start_kind(StartKind::Anchored); - } -); -testconfig!( - anchored, - search_standard_anchored_nfa_noncontig_default, - AC_STANDARD_ANCHORED_NON_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.start_kind(StartKind::Anchored) - .kind(Some(AhoCorasickKind::NoncontiguousNFA)); - } -); -testconfig!( - anchored, - search_standard_anchored_nfa_contig_default, - AC_STANDARD_ANCHORED_NON_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.start_kind(StartKind::Anchored) - .kind(Some(AhoCorasickKind::ContiguousNFA)); - } -); -testconfig!( - anchored, - search_standard_anchored_dfa_default, - AC_STANDARD_ANCHORED_NON_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.start_kind(StartKind::Anchored).kind(Some(AhoCorasickKind::DFA)); - } -); -testconfig!( - anchored, - search_standard_anchored_dfa_start_both, - AC_STANDARD_ANCHORED_NON_OVERLAPPING, - Standard, - |b: &mut AhoCorasickBuilder| { - b.start_kind(StartKind::Both).kind(Some(AhoCorasickKind::DFA)); - } -); -testconfig!( - anchored, - search_leftmost_first_anchored_default, - AC_LEFTMOST_FIRST_ANCHORED, - LeftmostFirst, - |b: &mut AhoCorasickBuilder| { - b.start_kind(StartKind::Anchored); - } -); -testconfig!( - anchored, - search_leftmost_first_anchored_nfa_noncontig_default, - AC_LEFTMOST_FIRST_ANCHORED, - LeftmostFirst, - |b: &mut AhoCorasickBuilder| { - b.start_kind(StartKind::Anchored) - .kind(Some(AhoCorasickKind::NoncontiguousNFA)); - } -); -testconfig!( - anchored, - search_leftmost_first_anchored_nfa_contig_default, - AC_LEFTMOST_FIRST_ANCHORED, - LeftmostFirst, - |b: &mut AhoCorasickBuilder| { - b.start_kind(StartKind::Anchored) - .kind(Some(AhoCorasickKind::ContiguousNFA)); - } -); -testconfig!( - anchored, - search_leftmost_first_anchored_dfa_default, - AC_LEFTMOST_FIRST_ANCHORED, - LeftmostFirst, - |b: &mut AhoCorasickBuilder| { - b.start_kind(StartKind::Anchored).kind(Some(AhoCorasickKind::DFA)); - } -); -testconfig!( - anchored, - search_leftmost_first_anchored_dfa_start_both, - AC_LEFTMOST_FIRST_ANCHORED, - LeftmostFirst, - |b: &mut AhoCorasickBuilder| { - b.start_kind(StartKind::Both).kind(Some(AhoCorasickKind::DFA)); - } -); -testconfig!( - anchored, - search_leftmost_longest_anchored_default, - AC_LEFTMOST_LONGEST_ANCHORED, - LeftmostLongest, - |b: &mut AhoCorasickBuilder| { - b.start_kind(StartKind::Anchored); - } -); -testconfig!( - anchored, - search_leftmost_longest_anchored_nfa_noncontig_default, - AC_LEFTMOST_LONGEST_ANCHORED, - LeftmostLongest, - |b: &mut AhoCorasickBuilder| { - b.start_kind(StartKind::Anchored) - .kind(Some(AhoCorasickKind::NoncontiguousNFA)); - } -); -testconfig!( - anchored, - search_leftmost_longest_anchored_nfa_contig_default, - AC_LEFTMOST_LONGEST_ANCHORED, - LeftmostLongest, - |b: &mut AhoCorasickBuilder| { - b.start_kind(StartKind::Anchored) - .kind(Some(AhoCorasickKind::ContiguousNFA)); - } -); -testconfig!( - anchored, - search_leftmost_longest_anchored_dfa_default, - AC_LEFTMOST_LONGEST_ANCHORED, - LeftmostLongest, - |b: &mut AhoCorasickBuilder| { - b.start_kind(StartKind::Anchored).kind(Some(AhoCorasickKind::DFA)); - } -); -testconfig!( - anchored, - search_leftmost_longest_anchored_dfa_start_both, - AC_LEFTMOST_LONGEST_ANCHORED, - LeftmostLongest, - |b: &mut AhoCorasickBuilder| { - b.start_kind(StartKind::Both).kind(Some(AhoCorasickKind::DFA)); - } -); - -// And also write out the test combinations for ASCII case insensitivity. -testconfig!( - acasei_standard_default, - &[ASCII_CASE_INSENSITIVE], - Standard, - |b: &mut AhoCorasickBuilder| { - b.prefilter(false).ascii_case_insensitive(true); - } -); -testconfig!( - acasei_standard_nfa_noncontig_default, - &[ASCII_CASE_INSENSITIVE], - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::NoncontiguousNFA)) - .prefilter(false) - .ascii_case_insensitive(true); - } -); -testconfig!( - acasei_standard_nfa_contig_default, - &[ASCII_CASE_INSENSITIVE], - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::ContiguousNFA)) - .prefilter(false) - .ascii_case_insensitive(true); - } -); -testconfig!( - acasei_standard_dfa_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)).ascii_case_insensitive(true); - } -); -testconfig!( - overlapping, - acasei_standard_overlapping_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_OVERLAPPING], - Standard, - |b: &mut AhoCorasickBuilder| { - b.ascii_case_insensitive(true); - } -); -testconfig!( - overlapping, - acasei_standard_overlapping_nfa_noncontig_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_OVERLAPPING], - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::NoncontiguousNFA)) - .ascii_case_insensitive(true); - } -); -testconfig!( - overlapping, - acasei_standard_overlapping_nfa_contig_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_OVERLAPPING], - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::ContiguousNFA)) - .ascii_case_insensitive(true); - } -); -testconfig!( - overlapping, - acasei_standard_overlapping_dfa_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_OVERLAPPING], - Standard, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)).ascii_case_insensitive(true); - } -); -testconfig!( - acasei_leftmost_first_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], - LeftmostFirst, - |b: &mut AhoCorasickBuilder| { - b.ascii_case_insensitive(true); - } -); -testconfig!( - acasei_leftmost_first_nfa_noncontig_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], - LeftmostFirst, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::NoncontiguousNFA)) - .ascii_case_insensitive(true); - } -); -testconfig!( - acasei_leftmost_first_nfa_contig_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], - LeftmostFirst, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::ContiguousNFA)) - .ascii_case_insensitive(true); - } -); -testconfig!( - acasei_leftmost_first_dfa_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], - LeftmostFirst, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)).ascii_case_insensitive(true); - } -); -testconfig!( - acasei_leftmost_longest_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], - LeftmostLongest, - |b: &mut AhoCorasickBuilder| { - b.ascii_case_insensitive(true); - } -); -testconfig!( - acasei_leftmost_longest_nfa_noncontig_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], - LeftmostLongest, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::NoncontiguousNFA)) - .ascii_case_insensitive(true); - } -); -testconfig!( - acasei_leftmost_longest_nfa_contig_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], - LeftmostLongest, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::ContiguousNFA)) - .ascii_case_insensitive(true); - } -); -testconfig!( - acasei_leftmost_longest_dfa_default, - &[ASCII_CASE_INSENSITIVE, ASCII_CASE_INSENSITIVE_NON_OVERLAPPING], - LeftmostLongest, - |b: &mut AhoCorasickBuilder| { - b.kind(Some(AhoCorasickKind::DFA)).ascii_case_insensitive(true); - } -); - -fn run_search_tests<F: FnMut(&SearchTest) -> Vec<Match>>( - which: TestCollection, - mut f: F, -) { - let get_match_triples = - |matches: Vec<Match>| -> Vec<(usize, usize, usize)> { - matches - .into_iter() - .map(|m| (m.pattern().as_usize(), m.start(), m.end())) - .collect() - }; - for &tests in which { - for test in tests { - assert_eq!( - test.matches, - get_match_triples(f(&test)).as_slice(), - "test: {}, patterns: {:?}, haystack: {:?}", - test.name, - test.patterns, - test.haystack - ); - } - } -} - -// Like 'run_search_tests', but we skip any tests that contain the empty -// pattern because stream searching doesn't support it. -#[cfg(feature = "std")] -fn run_stream_search_tests<F: FnMut(&SearchTest) -> Vec<Match>>( - which: TestCollection, - mut f: F, -) { - let get_match_triples = - |matches: Vec<Match>| -> Vec<(usize, usize, usize)> { - matches - .into_iter() - .map(|m| (m.pattern().as_usize(), m.start(), m.end())) - .collect() - }; - for &tests in which { - for test in tests { - if test.patterns.iter().any(|p| p.is_empty()) { - continue; - } - assert_eq!( - test.matches, - get_match_triples(f(&test)).as_slice(), - "test: {}, patterns: {:?}, haystack: {:?}", - test.name, - test.patterns, - test.haystack - ); - } - } -} - -#[test] -fn search_tests_have_unique_names() { - let assert = |constname, tests: &[SearchTest]| { - let mut seen = HashMap::new(); // map from test name to position - for (i, test) in tests.iter().enumerate() { - if !seen.contains_key(test.name) { - seen.insert(test.name, i); - } else { - let last = seen[test.name]; - panic!( - "{} tests have duplicate names at positions {} and {}", - constname, last, i - ); - } - } - }; - assert("BASICS", BASICS); - assert("STANDARD", STANDARD); - assert("LEFTMOST", LEFTMOST); - assert("LEFTMOST_FIRST", LEFTMOST_FIRST); - assert("LEFTMOST_LONGEST", LEFTMOST_LONGEST); - assert("NON_OVERLAPPING", NON_OVERLAPPING); - assert("OVERLAPPING", OVERLAPPING); - assert("REGRESSION", REGRESSION); -} - -#[cfg(feature = "std")] -#[test] -#[should_panic] -fn stream_not_allowed_leftmost_first() { - let fsm = AhoCorasick::builder() - .match_kind(MatchKind::LeftmostFirst) - .build(None::<String>) - .unwrap(); - assert_eq!(fsm.stream_find_iter(&b""[..]).count(), 0); -} - -#[cfg(feature = "std")] -#[test] -#[should_panic] -fn stream_not_allowed_leftmost_longest() { - let fsm = AhoCorasick::builder() - .match_kind(MatchKind::LeftmostLongest) - .build(None::<String>) - .unwrap(); - assert_eq!(fsm.stream_find_iter(&b""[..]).count(), 0); -} - -#[test] -#[should_panic] -fn overlapping_not_allowed_leftmost_first() { - let fsm = AhoCorasick::builder() - .match_kind(MatchKind::LeftmostFirst) - .build(None::<String>) - .unwrap(); - assert_eq!(fsm.find_overlapping_iter("").count(), 0); -} - -#[test] -#[should_panic] -fn overlapping_not_allowed_leftmost_longest() { - let fsm = AhoCorasick::builder() - .match_kind(MatchKind::LeftmostLongest) - .build(None::<String>) - .unwrap(); - assert_eq!(fsm.find_overlapping_iter("").count(), 0); -} - -// This tests that if we build an AC matcher with an "unanchored" start kind, -// then we can't run an anchored search even if the underlying searcher -// supports it. -// -// The key bit here is that both of the NFAs in this crate unconditionally -// support both unanchored and anchored searches, but the DFA does not because -// of the added cost of doing so. To avoid the top-level AC matcher sometimes -// supporting anchored and sometimes not (depending on which searcher it -// chooses to use internally), we ensure that the given 'StartKind' is always -// respected. -#[test] -fn anchored_not_allowed_even_if_technically_available() { - let ac = AhoCorasick::builder() - .kind(Some(AhoCorasickKind::NoncontiguousNFA)) - .start_kind(StartKind::Unanchored) - .build(&["foo"]) - .unwrap(); - assert!(ac.try_find(Input::new("foo").anchored(Anchored::Yes)).is_err()); - - let ac = AhoCorasick::builder() - .kind(Some(AhoCorasickKind::ContiguousNFA)) - .start_kind(StartKind::Unanchored) - .build(&["foo"]) - .unwrap(); - assert!(ac.try_find(Input::new("foo").anchored(Anchored::Yes)).is_err()); - - // For completeness, check that the DFA returns an error too. - let ac = AhoCorasick::builder() - .kind(Some(AhoCorasickKind::DFA)) - .start_kind(StartKind::Unanchored) - .build(&["foo"]) - .unwrap(); - assert!(ac.try_find(Input::new("foo").anchored(Anchored::Yes)).is_err()); -} - -// This is like the test aboved, but with unanchored and anchored flipped. That -// is, we asked for an AC searcher with anchored support and we check that -// unanchored searches return an error even if the underlying searcher would -// technically support it. -#[test] -fn unanchored_not_allowed_even_if_technically_available() { - let ac = AhoCorasick::builder() - .kind(Some(AhoCorasickKind::NoncontiguousNFA)) - .start_kind(StartKind::Anchored) - .build(&["foo"]) - .unwrap(); - assert!(ac.try_find(Input::new("foo").anchored(Anchored::No)).is_err()); - - let ac = AhoCorasick::builder() - .kind(Some(AhoCorasickKind::ContiguousNFA)) - .start_kind(StartKind::Anchored) - .build(&["foo"]) - .unwrap(); - assert!(ac.try_find(Input::new("foo").anchored(Anchored::No)).is_err()); - - // For completeness, check that the DFA returns an error too. - let ac = AhoCorasick::builder() - .kind(Some(AhoCorasickKind::DFA)) - .start_kind(StartKind::Anchored) - .build(&["foo"]) - .unwrap(); - assert!(ac.try_find(Input::new("foo").anchored(Anchored::No)).is_err()); -} - -// This tests that a prefilter does not cause a search to report a match -// outside the bounds provided by the caller. -// -// This is a regression test for a bug I introduced during the rewrite of most -// of the crate after 0.7. It was never released. The tricky part here is -// ensuring we get a prefilter that can report matches on its own (such as the -// packed searcher). Otherwise, prefilters that report false positives might -// have searched past the bounds provided by the caller, but confirming the -// match would subsequently fail. -#[test] -fn prefilter_stays_in_bounds() { - let ac = AhoCorasick::builder() - .match_kind(MatchKind::LeftmostFirst) - .build(&["sam", "frodo", "pippin", "merry", "gandalf", "sauron"]) - .unwrap(); - let haystack = "foo gandalf"; - assert_eq!(None, ac.find(Input::new(haystack).range(0..10))); -} - -// See: https://github.com/BurntSushi/aho-corasick/issues/44 -// -// In short, this test ensures that enabling ASCII case insensitivity does not -// visit an exponential number of states when filling in failure transitions. -#[test] -fn regression_ascii_case_insensitive_no_exponential() { - let ac = AhoCorasick::builder() - .ascii_case_insensitive(true) - .build(&["Tsubaki House-Triple Shot Vol01校花三姐妹"]) - .unwrap(); - assert!(ac.find("").is_none()); -} - -// See: https://github.com/BurntSushi/aho-corasick/issues/53 -// -// This test ensures that the rare byte prefilter works in a particular corner -// case. In particular, the shift offset detected for '/' in the patterns below -// was incorrect, leading to a false negative. -#[test] -fn regression_rare_byte_prefilter() { - use crate::AhoCorasick; - - let ac = AhoCorasick::new(&["ab/j/", "x/"]).unwrap(); - assert!(ac.is_match("ab/j/")); -} - -#[test] -fn regression_case_insensitive_prefilter() { - for c in b'a'..b'z' { - for c2 in b'a'..b'z' { - let c = c as char; - let c2 = c2 as char; - let needle = format!("{}{}", c, c2).to_lowercase(); - let haystack = needle.to_uppercase(); - let ac = AhoCorasick::builder() - .ascii_case_insensitive(true) - .prefilter(true) - .build(&[&needle]) - .unwrap(); - assert_eq!( - 1, - ac.find_iter(&haystack).count(), - "failed to find {:?} in {:?}\n\nautomaton:\n{:?}", - needle, - haystack, - ac, - ); - } - } -} - -// See: https://github.com/BurntSushi/aho-corasick/issues/64 -// -// This occurs when the rare byte prefilter is active. -#[cfg(feature = "std")] -#[test] -fn regression_stream_rare_byte_prefilter() { - use std::io::Read; - - // NOTE: The test only fails if this ends with j. - const MAGIC: [u8; 5] = *b"1234j"; - - // NOTE: The test fails for value in 8188..=8191 These value put the string - // to search accross two call to read because the buffer size is 64KB by - // default. - const BEGIN: usize = 65_535; - - /// This is just a structure that implements Reader. The reader - /// implementation will simulate a file filled with 0, except for the MAGIC - /// string at offset BEGIN. - #[derive(Default)] - struct R { - read: usize, - } - - impl Read for R { - fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { - if self.read > 100000 { - return Ok(0); - } - let mut from = 0; - if self.read < BEGIN { - from = buf.len().min(BEGIN - self.read); - for x in 0..from { - buf[x] = 0; - } - self.read += from; - } - if self.read >= BEGIN && self.read <= BEGIN + MAGIC.len() { - let to = buf.len().min(BEGIN + MAGIC.len() - self.read + from); - if to > from { - buf[from..to].copy_from_slice( - &MAGIC - [self.read - BEGIN..self.read - BEGIN + to - from], - ); - self.read += to - from; - from = to; - } - } - for x in from..buf.len() { - buf[x] = 0; - self.read += 1; - } - Ok(buf.len()) - } - } - - fn run() -> std::io::Result<()> { - let aut = AhoCorasick::builder() - // Enable byte classes to make debugging the automaton easier. It - // should have no effect on the test result. - .byte_classes(false) - .build(&[&MAGIC]) - .unwrap(); - - // While reading from a vector, it works: - let mut buf = alloc::vec![]; - R::default().read_to_end(&mut buf)?; - let from_whole = aut.find_iter(&buf).next().unwrap().start(); - - // But using stream_find_iter fails! - let mut file = std::io::BufReader::new(R::default()); - let begin = aut - .stream_find_iter(&mut file) - .next() - .expect("NOT FOUND!!!!")? // Panic here - .start(); - assert_eq!(from_whole, begin); - Ok(()) - } - - run().unwrap() -} diff --git a/vendor/aho-corasick/src/transducer.rs b/vendor/aho-corasick/src/transducer.rs deleted file mode 100644 index 39bb240f4461ba..00000000000000 --- a/vendor/aho-corasick/src/transducer.rs +++ /dev/null @@ -1,270 +0,0 @@ -/*! -Provides implementations of `fst::Automaton` for Aho-Corasick automata. - -This works by providing two wrapper types, [`Anchored`] and [`Unanchored`]. -The former executes an anchored search on an FST while the latter executes -an unanchored search. Building these wrappers is fallible and will fail if -the underlying Aho-Corasick automaton does not support the type of search it -represents. -*/ - -use crate::{ - automaton::{Automaton, StateID}, - Anchored as AcAnchored, Input, MatchError, -}; - -/// Represents an unanchored Aho-Corasick search of a finite state transducer. -/// -/// Wrapping an Aho-Corasick automaton in `Unanchored` will fail if the -/// underlying automaton does not support unanchored searches. -/// -/// # Example -/// -/// This shows how to build an FST of keys and then run an unanchored search on -/// those keys using an Aho-Corasick automaton. -/// -/// ``` -/// use aho_corasick::{nfa::contiguous::NFA, transducer::Unanchored}; -/// use fst::{Automaton, IntoStreamer, Set, Streamer}; -/// -/// let set = Set::from_iter(&["abcd", "bc", "bcd", "xyz"]).unwrap(); -/// let nfa = NFA::new(&["bcd", "x"]).unwrap(); -/// // NFAs always support both unanchored and anchored searches. -/// let searcher = Unanchored::new(&nfa).unwrap(); -/// -/// let mut stream = set.search(searcher).into_stream(); -/// let mut results = vec![]; -/// while let Some(key) = stream.next() { -/// results.push(std::str::from_utf8(key).unwrap().to_string()); -/// } -/// assert_eq!(vec!["abcd", "bcd", "xyz"], results); -/// ``` -#[derive(Clone, Debug)] -pub struct Unanchored<A>(A); - -impl<A: Automaton> Unanchored<A> { - /// Create a new `Unanchored` implementation of the `fst::Automaton` trait. - /// - /// If the given Aho-Corasick automaton does not support unanchored - /// searches, then this returns an error. - pub fn new(aut: A) -> Result<Unanchored<A>, MatchError> { - let input = Input::new("").anchored(AcAnchored::No); - let _ = aut.start_state(&input)?; - Ok(Unanchored(aut)) - } - - /// Returns a borrow to the underlying automaton. - pub fn as_ref(&self) -> &A { - &self.0 - } - - /// Unwrap this value and return the inner automaton. - pub fn into_inner(self) -> A { - self.0 - } -} - -impl<A: Automaton> fst::Automaton for Unanchored<A> { - type State = StateID; - - #[inline] - fn start(&self) -> StateID { - let input = Input::new("").anchored(AcAnchored::No); - self.0.start_state(&input).expect("support for unanchored searches") - } - - #[inline] - fn is_match(&self, state: &StateID) -> bool { - self.0.is_match(*state) - } - - #[inline] - fn accept(&self, state: &StateID, byte: u8) -> StateID { - if fst::Automaton::is_match(self, state) { - return *state; - } - self.0.next_state(AcAnchored::No, *state, byte) - } - - #[inline] - fn can_match(&self, state: &StateID) -> bool { - !self.0.is_dead(*state) - } -} - -/// Represents an anchored Aho-Corasick search of a finite state transducer. -/// -/// Wrapping an Aho-Corasick automaton in `Unanchored` will fail if the -/// underlying automaton does not support unanchored searches. -/// -/// # Example -/// -/// This shows how to build an FST of keys and then run an anchored search on -/// those keys using an Aho-Corasick automaton. -/// -/// ``` -/// use aho_corasick::{nfa::contiguous::NFA, transducer::Anchored}; -/// use fst::{Automaton, IntoStreamer, Set, Streamer}; -/// -/// let set = Set::from_iter(&["abcd", "bc", "bcd", "xyz"]).unwrap(); -/// let nfa = NFA::new(&["bcd", "x"]).unwrap(); -/// // NFAs always support both unanchored and anchored searches. -/// let searcher = Anchored::new(&nfa).unwrap(); -/// -/// let mut stream = set.search(searcher).into_stream(); -/// let mut results = vec![]; -/// while let Some(key) = stream.next() { -/// results.push(std::str::from_utf8(key).unwrap().to_string()); -/// } -/// assert_eq!(vec!["bcd", "xyz"], results); -/// ``` -/// -/// This is like the example above, except we use an Aho-Corasick DFA, which -/// requires explicitly configuring it to support anchored searches. (NFAs -/// unconditionally support both unanchored and anchored searches.) -/// -/// ``` -/// use aho_corasick::{dfa::DFA, transducer::Anchored, StartKind}; -/// use fst::{Automaton, IntoStreamer, Set, Streamer}; -/// -/// let set = Set::from_iter(&["abcd", "bc", "bcd", "xyz"]).unwrap(); -/// let dfa = DFA::builder() -/// .start_kind(StartKind::Anchored) -/// .build(&["bcd", "x"]) -/// .unwrap(); -/// // We've explicitly configured our DFA to support anchored searches. -/// let searcher = Anchored::new(&dfa).unwrap(); -/// -/// let mut stream = set.search(searcher).into_stream(); -/// let mut results = vec![]; -/// while let Some(key) = stream.next() { -/// results.push(std::str::from_utf8(key).unwrap().to_string()); -/// } -/// assert_eq!(vec!["bcd", "xyz"], results); -/// ``` -#[derive(Clone, Debug)] -pub struct Anchored<A>(A); - -impl<A: Automaton> Anchored<A> { - /// Create a new `Anchored` implementation of the `fst::Automaton` trait. - /// - /// If the given Aho-Corasick automaton does not support anchored searches, - /// then this returns an error. - pub fn new(aut: A) -> Result<Anchored<A>, MatchError> { - let input = Input::new("").anchored(AcAnchored::Yes); - let _ = aut.start_state(&input)?; - Ok(Anchored(aut)) - } - - /// Returns a borrow to the underlying automaton. - pub fn as_ref(&self) -> &A { - &self.0 - } - - /// Unwrap this value and return the inner automaton. - pub fn into_inner(self) -> A { - self.0 - } -} - -impl<A: Automaton> fst::Automaton for Anchored<A> { - type State = StateID; - - #[inline] - fn start(&self) -> StateID { - let input = Input::new("").anchored(AcAnchored::Yes); - self.0.start_state(&input).expect("support for unanchored searches") - } - - #[inline] - fn is_match(&self, state: &StateID) -> bool { - self.0.is_match(*state) - } - - #[inline] - fn accept(&self, state: &StateID, byte: u8) -> StateID { - if fst::Automaton::is_match(self, state) { - return *state; - } - self.0.next_state(AcAnchored::Yes, *state, byte) - } - - #[inline] - fn can_match(&self, state: &StateID) -> bool { - !self.0.is_dead(*state) - } -} - -#[cfg(test)] -mod tests { - use alloc::{string::String, vec, vec::Vec}; - - use fst::{Automaton, IntoStreamer, Set, Streamer}; - - use crate::{ - dfa::DFA, - nfa::{contiguous, noncontiguous}, - StartKind, - }; - - use super::*; - - fn search<A: Automaton, D: AsRef<[u8]>>( - set: &Set<D>, - aut: A, - ) -> Vec<String> { - let mut stream = set.search(aut).into_stream(); - let mut results = vec![]; - while let Some(key) = stream.next() { - results.push(String::from(core::str::from_utf8(key).unwrap())); - } - results - } - - #[test] - fn unanchored() { - let set = - Set::from_iter(&["a", "bar", "baz", "wat", "xba", "xbax", "z"]) - .unwrap(); - let patterns = vec!["baz", "bax"]; - let expected = vec!["baz", "xbax"]; - - let aut = Unanchored(noncontiguous::NFA::new(&patterns).unwrap()); - let got = search(&set, &aut); - assert_eq!(got, expected); - - let aut = Unanchored(contiguous::NFA::new(&patterns).unwrap()); - let got = search(&set, &aut); - assert_eq!(got, expected); - - let aut = Unanchored(DFA::new(&patterns).unwrap()); - let got = search(&set, &aut); - assert_eq!(got, expected); - } - - #[test] - fn anchored() { - let set = - Set::from_iter(&["a", "bar", "baz", "wat", "xba", "xbax", "z"]) - .unwrap(); - let patterns = vec!["baz", "bax"]; - let expected = vec!["baz"]; - - let aut = Anchored(noncontiguous::NFA::new(&patterns).unwrap()); - let got = search(&set, &aut); - assert_eq!(got, expected); - - let aut = Anchored(contiguous::NFA::new(&patterns).unwrap()); - let got = search(&set, &aut); - assert_eq!(got, expected); - - let aut = Anchored( - DFA::builder() - .start_kind(StartKind::Anchored) - .build(&patterns) - .unwrap(), - ); - let got = search(&set, &aut); - assert_eq!(got, expected); - } -} diff --git a/vendor/aho-corasick/src/util/alphabet.rs b/vendor/aho-corasick/src/util/alphabet.rs deleted file mode 100644 index 69724fa3abe627..00000000000000 --- a/vendor/aho-corasick/src/util/alphabet.rs +++ /dev/null @@ -1,409 +0,0 @@ -use crate::util::int::Usize; - -/// A representation of byte oriented equivalence classes. -/// -/// This is used in finite state machines to reduce the size of the transition -/// table. This can have a particularly large impact not only on the total size -/// of an FSM, but also on FSM build times because it reduces the number of -/// transitions that need to be visited/set. -#[derive(Clone, Copy)] -pub(crate) struct ByteClasses([u8; 256]); - -impl ByteClasses { - /// Creates a new set of equivalence classes where all bytes are mapped to - /// the same class. - pub(crate) fn empty() -> ByteClasses { - ByteClasses([0; 256]) - } - - /// Creates a new set of equivalence classes where each byte belongs to - /// its own equivalence class. - pub(crate) fn singletons() -> ByteClasses { - let mut classes = ByteClasses::empty(); - for b in 0..=255 { - classes.set(b, b); - } - classes - } - - /// Set the equivalence class for the given byte. - #[inline] - pub(crate) fn set(&mut self, byte: u8, class: u8) { - self.0[usize::from(byte)] = class; - } - - /// Get the equivalence class for the given byte. - #[inline] - pub(crate) fn get(&self, byte: u8) -> u8 { - self.0[usize::from(byte)] - } - - /// Return the total number of elements in the alphabet represented by - /// these equivalence classes. Equivalently, this returns the total number - /// of equivalence classes. - #[inline] - pub(crate) fn alphabet_len(&self) -> usize { - // Add one since the number of equivalence classes is one bigger than - // the last one. - usize::from(self.0[255]) + 1 - } - - /// Returns the stride, as a base-2 exponent, required for these - /// equivalence classes. - /// - /// The stride is always the smallest power of 2 that is greater than or - /// equal to the alphabet length. This is done so that converting between - /// state IDs and indices can be done with shifts alone, which is much - /// faster than integer division. The "stride2" is the exponent. i.e., - /// `2^stride2 = stride`. - pub(crate) fn stride2(&self) -> usize { - let zeros = self.alphabet_len().next_power_of_two().trailing_zeros(); - usize::try_from(zeros).unwrap() - } - - /// Returns the stride for these equivalence classes, which corresponds - /// to the smallest power of 2 greater than or equal to the number of - /// equivalence classes. - pub(crate) fn stride(&self) -> usize { - 1 << self.stride2() - } - - /// Returns true if and only if every byte in this class maps to its own - /// equivalence class. Equivalently, there are 257 equivalence classes - /// and each class contains exactly one byte (plus the special EOI class). - #[inline] - pub(crate) fn is_singleton(&self) -> bool { - self.alphabet_len() == 256 - } - - /// Returns an iterator over all equivalence classes in this set. - pub(crate) fn iter(&self) -> ByteClassIter { - ByteClassIter { it: 0..self.alphabet_len() } - } - - /// Returns an iterator of the bytes in the given equivalence class. - pub(crate) fn elements(&self, class: u8) -> ByteClassElements { - ByteClassElements { classes: self, class, bytes: 0..=255 } - } - - /// Returns an iterator of byte ranges in the given equivalence class. - /// - /// That is, a sequence of contiguous ranges are returned. Typically, every - /// class maps to a single contiguous range. - fn element_ranges(&self, class: u8) -> ByteClassElementRanges { - ByteClassElementRanges { elements: self.elements(class), range: None } - } -} - -impl core::fmt::Debug for ByteClasses { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - if self.is_singleton() { - write!(f, "ByteClasses(<one-class-per-byte>)") - } else { - write!(f, "ByteClasses(")?; - for (i, class) in self.iter().enumerate() { - if i > 0 { - write!(f, ", ")?; - } - write!(f, "{:?} => [", class)?; - for (start, end) in self.element_ranges(class) { - if start == end { - write!(f, "{:?}", start)?; - } else { - write!(f, "{:?}-{:?}", start, end)?; - } - } - write!(f, "]")?; - } - write!(f, ")") - } - } -} - -/// An iterator over each equivalence class. -#[derive(Debug)] -pub(crate) struct ByteClassIter { - it: core::ops::Range<usize>, -} - -impl Iterator for ByteClassIter { - type Item = u8; - - fn next(&mut self) -> Option<u8> { - self.it.next().map(|class| class.as_u8()) - } -} - -/// An iterator over all elements in a specific equivalence class. -#[derive(Debug)] -pub(crate) struct ByteClassElements<'a> { - classes: &'a ByteClasses, - class: u8, - bytes: core::ops::RangeInclusive<u8>, -} - -impl<'a> Iterator for ByteClassElements<'a> { - type Item = u8; - - fn next(&mut self) -> Option<u8> { - while let Some(byte) = self.bytes.next() { - if self.class == self.classes.get(byte) { - return Some(byte); - } - } - None - } -} - -/// An iterator over all elements in an equivalence class expressed as a -/// sequence of contiguous ranges. -#[derive(Debug)] -pub(crate) struct ByteClassElementRanges<'a> { - elements: ByteClassElements<'a>, - range: Option<(u8, u8)>, -} - -impl<'a> Iterator for ByteClassElementRanges<'a> { - type Item = (u8, u8); - - fn next(&mut self) -> Option<(u8, u8)> { - loop { - let element = match self.elements.next() { - None => return self.range.take(), - Some(element) => element, - }; - match self.range.take() { - None => { - self.range = Some((element, element)); - } - Some((start, end)) => { - if usize::from(end) + 1 != usize::from(element) { - self.range = Some((element, element)); - return Some((start, end)); - } - self.range = Some((start, element)); - } - } - } - } -} - -/// A partitioning of bytes into equivalence classes. -/// -/// A byte class set keeps track of an *approximation* of equivalence classes -/// of bytes during NFA construction. That is, every byte in an equivalence -/// class cannot discriminate between a match and a non-match. -/// -/// Note that this may not compute the minimal set of equivalence classes. -/// Basically, any byte in a pattern given to the noncontiguous NFA builder -/// will automatically be treated as its own equivalence class. All other -/// bytes---any byte not in any pattern---will be treated as their own -/// equivalence classes. In theory, all bytes not in any pattern should -/// be part of a single equivalence class, but in practice, we only treat -/// contiguous ranges of bytes as an equivalence class. So the number of -/// classes computed may be bigger than necessary. This usually doesn't make -/// much of a difference, and keeps the implementation simple. -#[derive(Clone, Debug)] -pub(crate) struct ByteClassSet(ByteSet); - -impl Default for ByteClassSet { - fn default() -> ByteClassSet { - ByteClassSet::empty() - } -} - -impl ByteClassSet { - /// Create a new set of byte classes where all bytes are part of the same - /// equivalence class. - pub(crate) fn empty() -> Self { - ByteClassSet(ByteSet::empty()) - } - - /// Indicate the the range of byte given (inclusive) can discriminate a - /// match between it and all other bytes outside of the range. - pub(crate) fn set_range(&mut self, start: u8, end: u8) { - debug_assert!(start <= end); - if start > 0 { - self.0.add(start - 1); - } - self.0.add(end); - } - - /// Convert this boolean set to a map that maps all byte values to their - /// corresponding equivalence class. The last mapping indicates the largest - /// equivalence class identifier (which is never bigger than 255). - pub(crate) fn byte_classes(&self) -> ByteClasses { - let mut classes = ByteClasses::empty(); - let mut class = 0u8; - let mut b = 0u8; - loop { - classes.set(b, class); - if b == 255 { - break; - } - if self.0.contains(b) { - class = class.checked_add(1).unwrap(); - } - b = b.checked_add(1).unwrap(); - } - classes - } -} - -/// A simple set of bytes that is reasonably cheap to copy and allocation free. -#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] -pub(crate) struct ByteSet { - bits: BitSet, -} - -/// The representation of a byte set. Split out so that we can define a -/// convenient Debug impl for it while keeping "ByteSet" in the output. -#[derive(Clone, Copy, Default, Eq, PartialEq)] -struct BitSet([u128; 2]); - -impl ByteSet { - /// Create an empty set of bytes. - pub(crate) fn empty() -> ByteSet { - ByteSet { bits: BitSet([0; 2]) } - } - - /// Add a byte to this set. - /// - /// If the given byte already belongs to this set, then this is a no-op. - pub(crate) fn add(&mut self, byte: u8) { - let bucket = byte / 128; - let bit = byte % 128; - self.bits.0[usize::from(bucket)] |= 1 << bit; - } - - /// Return true if and only if the given byte is in this set. - pub(crate) fn contains(&self, byte: u8) -> bool { - let bucket = byte / 128; - let bit = byte % 128; - self.bits.0[usize::from(bucket)] & (1 << bit) > 0 - } -} - -impl core::fmt::Debug for BitSet { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let mut fmtd = f.debug_set(); - for b in 0u8..=255 { - if (ByteSet { bits: *self }).contains(b) { - fmtd.entry(&b); - } - } - fmtd.finish() - } -} - -#[cfg(test)] -mod tests { - use alloc::{vec, vec::Vec}; - - use super::*; - - #[test] - fn byte_classes() { - let mut set = ByteClassSet::empty(); - set.set_range(b'a', b'z'); - - let classes = set.byte_classes(); - assert_eq!(classes.get(0), 0); - assert_eq!(classes.get(1), 0); - assert_eq!(classes.get(2), 0); - assert_eq!(classes.get(b'a' - 1), 0); - assert_eq!(classes.get(b'a'), 1); - assert_eq!(classes.get(b'm'), 1); - assert_eq!(classes.get(b'z'), 1); - assert_eq!(classes.get(b'z' + 1), 2); - assert_eq!(classes.get(254), 2); - assert_eq!(classes.get(255), 2); - - let mut set = ByteClassSet::empty(); - set.set_range(0, 2); - set.set_range(4, 6); - let classes = set.byte_classes(); - assert_eq!(classes.get(0), 0); - assert_eq!(classes.get(1), 0); - assert_eq!(classes.get(2), 0); - assert_eq!(classes.get(3), 1); - assert_eq!(classes.get(4), 2); - assert_eq!(classes.get(5), 2); - assert_eq!(classes.get(6), 2); - assert_eq!(classes.get(7), 3); - assert_eq!(classes.get(255), 3); - } - - #[test] - fn full_byte_classes() { - let mut set = ByteClassSet::empty(); - for b in 0u8..=255 { - set.set_range(b, b); - } - assert_eq!(set.byte_classes().alphabet_len(), 256); - } - - #[test] - fn elements_typical() { - let mut set = ByteClassSet::empty(); - set.set_range(b'b', b'd'); - set.set_range(b'g', b'm'); - set.set_range(b'z', b'z'); - let classes = set.byte_classes(); - // class 0: \x00-a - // class 1: b-d - // class 2: e-f - // class 3: g-m - // class 4: n-y - // class 5: z-z - // class 6: \x7B-\xFF - assert_eq!(classes.alphabet_len(), 7); - - let elements = classes.elements(0).collect::<Vec<_>>(); - assert_eq!(elements.len(), 98); - assert_eq!(elements[0], b'\x00'); - assert_eq!(elements[97], b'a'); - - let elements = classes.elements(1).collect::<Vec<_>>(); - assert_eq!(elements, vec![b'b', b'c', b'd'],); - - let elements = classes.elements(2).collect::<Vec<_>>(); - assert_eq!(elements, vec![b'e', b'f'],); - - let elements = classes.elements(3).collect::<Vec<_>>(); - assert_eq!(elements, vec![b'g', b'h', b'i', b'j', b'k', b'l', b'm',],); - - let elements = classes.elements(4).collect::<Vec<_>>(); - assert_eq!(elements.len(), 12); - assert_eq!(elements[0], b'n'); - assert_eq!(elements[11], b'y'); - - let elements = classes.elements(5).collect::<Vec<_>>(); - assert_eq!(elements, vec![b'z']); - - let elements = classes.elements(6).collect::<Vec<_>>(); - assert_eq!(elements.len(), 133); - assert_eq!(elements[0], b'\x7B'); - assert_eq!(elements[132], b'\xFF'); - } - - #[test] - fn elements_singletons() { - let classes = ByteClasses::singletons(); - assert_eq!(classes.alphabet_len(), 256); - - let elements = classes.elements(b'a').collect::<Vec<_>>(); - assert_eq!(elements, vec![b'a']); - } - - #[test] - fn elements_empty() { - let classes = ByteClasses::empty(); - assert_eq!(classes.alphabet_len(), 1); - - let elements = classes.elements(0).collect::<Vec<_>>(); - assert_eq!(elements.len(), 256); - assert_eq!(elements[0], b'\x00'); - assert_eq!(elements[255], b'\xFF'); - } -} diff --git a/vendor/aho-corasick/src/util/buffer.rs b/vendor/aho-corasick/src/util/buffer.rs deleted file mode 100644 index e9e982af588592..00000000000000 --- a/vendor/aho-corasick/src/util/buffer.rs +++ /dev/null @@ -1,124 +0,0 @@ -use alloc::{vec, vec::Vec}; - -/// The default buffer capacity that we use for the stream buffer. -const DEFAULT_BUFFER_CAPACITY: usize = 64 * (1 << 10); // 64 KB - -/// A fairly simple roll buffer for supporting stream searches. -/// -/// This buffer acts as a temporary place to store a fixed amount of data when -/// reading from a stream. Its central purpose is to allow "rolling" some -/// suffix of the data to the beginning of the buffer before refilling it with -/// more data from the stream. For example, let's say we are trying to match -/// "foobar" on a stream. When we report the match, we'd like to not only -/// report the correct offsets at which the match occurs, but also the matching -/// bytes themselves. So let's say our stream is a file with the following -/// contents: `test test foobar test test`. Now assume that we happen to read -/// the aforementioned file in two chunks: `test test foo` and `bar test test`. -/// Naively, it would not be possible to report a single contiguous `foobar` -/// match, but this roll buffer allows us to do that. Namely, after the second -/// read, the contents of the buffer should be `st foobar test test`, where the -/// search should ultimately resume immediately after `foo`. (The prefix `st ` -/// is included because the roll buffer saves N bytes at the end of the buffer, -/// where N is the maximum possible length of a match.) -/// -/// A lot of the logic for dealing with this is unfortunately split out between -/// this roll buffer and the `StreamChunkIter`. -/// -/// Note also that this buffer is not actually required to just report matches. -/// Because a `Match` is just some offsets. But it *is* required for supporting -/// things like `try_stream_replace_all` because that needs some mechanism for -/// knowing which bytes in the stream correspond to a match and which don't. So -/// when a match occurs across two `read` calls, *something* needs to retain -/// the bytes from the previous `read` call because you don't know before the -/// second read call whether a match exists or not. -#[derive(Debug)] -pub(crate) struct Buffer { - /// The raw buffer contents. This has a fixed size and never increases. - buf: Vec<u8>, - /// The minimum size of the buffer, which is equivalent to the maximum - /// possible length of a match. This corresponds to the amount that we - /// roll - min: usize, - /// The end of the contents of this buffer. - end: usize, -} - -impl Buffer { - /// Create a new buffer for stream searching. The minimum buffer length - /// given should be the size of the maximum possible match length. - pub(crate) fn new(min_buffer_len: usize) -> Buffer { - let min = core::cmp::max(1, min_buffer_len); - // The minimum buffer amount is also the amount that we roll our - // buffer in order to support incremental searching. To this end, - // our actual capacity needs to be at least 1 byte bigger than our - // minimum amount, otherwise we won't have any overlap. In actuality, - // we want our buffer to be a bit bigger than that for performance - // reasons, so we set a lower bound of `8 * min`. - // - // TODO: It would be good to find a way to test the streaming - // implementation with the minimal buffer size. For now, we just - // uncomment out the next line and comment out the subsequent line. - // let capacity = 1 + min; - let capacity = core::cmp::max(min * 8, DEFAULT_BUFFER_CAPACITY); - Buffer { buf: vec![0; capacity], min, end: 0 } - } - - /// Return the contents of this buffer. - #[inline] - pub(crate) fn buffer(&self) -> &[u8] { - &self.buf[..self.end] - } - - /// Return the minimum size of the buffer. The only way a buffer may be - /// smaller than this is if the stream itself contains less than the - /// minimum buffer amount. - #[inline] - pub(crate) fn min_buffer_len(&self) -> usize { - self.min - } - - /// Return all free capacity in this buffer. - fn free_buffer(&mut self) -> &mut [u8] { - &mut self.buf[self.end..] - } - - /// Refill the contents of this buffer by reading as much as possible into - /// this buffer's free capacity. If no more bytes could be read, then this - /// returns false. Otherwise, this reads until it has filled the buffer - /// past the minimum amount. - pub(crate) fn fill<R: std::io::Read>( - &mut self, - mut rdr: R, - ) -> std::io::Result<bool> { - let mut readany = false; - loop { - let readlen = rdr.read(self.free_buffer())?; - if readlen == 0 { - return Ok(readany); - } - readany = true; - self.end += readlen; - if self.buffer().len() >= self.min { - return Ok(true); - } - } - } - - /// Roll the contents of the buffer so that the suffix of this buffer is - /// moved to the front and all other contents are dropped. The size of the - /// suffix corresponds precisely to the minimum buffer length. - /// - /// This should only be called when the entire contents of this buffer have - /// been searched. - pub(crate) fn roll(&mut self) { - let roll_start = self - .end - .checked_sub(self.min) - .expect("buffer capacity should be bigger than minimum amount"); - let roll_end = roll_start + self.min; - - assert!(roll_end <= self.end); - self.buf.copy_within(roll_start..roll_end, 0); - self.end = self.min; - } -} diff --git a/vendor/aho-corasick/src/util/byte_frequencies.rs b/vendor/aho-corasick/src/util/byte_frequencies.rs deleted file mode 100644 index c313b629db5d53..00000000000000 --- a/vendor/aho-corasick/src/util/byte_frequencies.rs +++ /dev/null @@ -1,258 +0,0 @@ -pub const BYTE_FREQUENCIES: [u8; 256] = [ - 55, // '\x00' - 52, // '\x01' - 51, // '\x02' - 50, // '\x03' - 49, // '\x04' - 48, // '\x05' - 47, // '\x06' - 46, // '\x07' - 45, // '\x08' - 103, // '\t' - 242, // '\n' - 66, // '\x0b' - 67, // '\x0c' - 229, // '\r' - 44, // '\x0e' - 43, // '\x0f' - 42, // '\x10' - 41, // '\x11' - 40, // '\x12' - 39, // '\x13' - 38, // '\x14' - 37, // '\x15' - 36, // '\x16' - 35, // '\x17' - 34, // '\x18' - 33, // '\x19' - 56, // '\x1a' - 32, // '\x1b' - 31, // '\x1c' - 30, // '\x1d' - 29, // '\x1e' - 28, // '\x1f' - 255, // ' ' - 148, // '!' - 164, // '"' - 149, // '#' - 136, // '$' - 160, // '%' - 155, // '&' - 173, // "'" - 221, // '(' - 222, // ')' - 134, // '*' - 122, // '+' - 232, // ',' - 202, // '-' - 215, // '.' - 224, // '/' - 208, // '0' - 220, // '1' - 204, // '2' - 187, // '3' - 183, // '4' - 179, // '5' - 177, // '6' - 168, // '7' - 178, // '8' - 200, // '9' - 226, // ':' - 195, // ';' - 154, // '<' - 184, // '=' - 174, // '>' - 126, // '?' - 120, // '@' - 191, // 'A' - 157, // 'B' - 194, // 'C' - 170, // 'D' - 189, // 'E' - 162, // 'F' - 161, // 'G' - 150, // 'H' - 193, // 'I' - 142, // 'J' - 137, // 'K' - 171, // 'L' - 176, // 'M' - 185, // 'N' - 167, // 'O' - 186, // 'P' - 112, // 'Q' - 175, // 'R' - 192, // 'S' - 188, // 'T' - 156, // 'U' - 140, // 'V' - 143, // 'W' - 123, // 'X' - 133, // 'Y' - 128, // 'Z' - 147, // '[' - 138, // '\\' - 146, // ']' - 114, // '^' - 223, // '_' - 151, // '`' - 249, // 'a' - 216, // 'b' - 238, // 'c' - 236, // 'd' - 253, // 'e' - 227, // 'f' - 218, // 'g' - 230, // 'h' - 247, // 'i' - 135, // 'j' - 180, // 'k' - 241, // 'l' - 233, // 'm' - 246, // 'n' - 244, // 'o' - 231, // 'p' - 139, // 'q' - 245, // 'r' - 243, // 's' - 251, // 't' - 235, // 'u' - 201, // 'v' - 196, // 'w' - 240, // 'x' - 214, // 'y' - 152, // 'z' - 182, // '{' - 205, // '|' - 181, // '}' - 127, // '~' - 27, // '\x7f' - 212, // '\x80' - 211, // '\x81' - 210, // '\x82' - 213, // '\x83' - 228, // '\x84' - 197, // '\x85' - 169, // '\x86' - 159, // '\x87' - 131, // '\x88' - 172, // '\x89' - 105, // '\x8a' - 80, // '\x8b' - 98, // '\x8c' - 96, // '\x8d' - 97, // '\x8e' - 81, // '\x8f' - 207, // '\x90' - 145, // '\x91' - 116, // '\x92' - 115, // '\x93' - 144, // '\x94' - 130, // '\x95' - 153, // '\x96' - 121, // '\x97' - 107, // '\x98' - 132, // '\x99' - 109, // '\x9a' - 110, // '\x9b' - 124, // '\x9c' - 111, // '\x9d' - 82, // '\x9e' - 108, // '\x9f' - 118, // '\xa0' - 141, // '¡' - 113, // '¢' - 129, // '£' - 119, // '¤' - 125, // '¥' - 165, // '¦' - 117, // '§' - 92, // '¨' - 106, // '©' - 83, // 'ª' - 72, // '«' - 99, // '¬' - 93, // '\xad' - 65, // '®' - 79, // '¯' - 166, // '°' - 237, // '±' - 163, // '²' - 199, // '³' - 190, // '´' - 225, // 'µ' - 209, // '¶' - 203, // '·' - 198, // '¸' - 217, // '¹' - 219, // 'º' - 206, // '»' - 234, // '¼' - 248, // '½' - 158, // '¾' - 239, // '¿' - 255, // 'À' - 255, // 'Á' - 255, // 'Â' - 255, // 'Ã' - 255, // 'Ä' - 255, // 'Å' - 255, // 'Æ' - 255, // 'Ç' - 255, // 'È' - 255, // 'É' - 255, // 'Ê' - 255, // 'Ë' - 255, // 'Ì' - 255, // 'Í' - 255, // 'Î' - 255, // 'Ï' - 255, // 'Ð' - 255, // 'Ñ' - 255, // 'Ò' - 255, // 'Ó' - 255, // 'Ô' - 255, // 'Õ' - 255, // 'Ö' - 255, // '×' - 255, // 'Ø' - 255, // 'Ù' - 255, // 'Ú' - 255, // 'Û' - 255, // 'Ü' - 255, // 'Ý' - 255, // 'Þ' - 255, // 'ß' - 255, // 'à' - 255, // 'á' - 255, // 'â' - 255, // 'ã' - 255, // 'ä' - 255, // 'å' - 255, // 'æ' - 255, // 'ç' - 255, // 'è' - 255, // 'é' - 255, // 'ê' - 255, // 'ë' - 255, // 'ì' - 255, // 'í' - 255, // 'î' - 255, // 'ï' - 255, // 'ð' - 255, // 'ñ' - 255, // 'ò' - 255, // 'ó' - 255, // 'ô' - 255, // 'õ' - 255, // 'ö' - 255, // '÷' - 255, // 'ø' - 255, // 'ù' - 255, // 'ú' - 255, // 'û' - 255, // 'ü' - 255, // 'ý' - 255, // 'þ' - 255, // 'ÿ' -]; diff --git a/vendor/aho-corasick/src/util/debug.rs b/vendor/aho-corasick/src/util/debug.rs deleted file mode 100644 index 22b5f2231f282b..00000000000000 --- a/vendor/aho-corasick/src/util/debug.rs +++ /dev/null @@ -1,26 +0,0 @@ -/// A type that wraps a single byte with a convenient fmt::Debug impl that -/// escapes the byte. -pub(crate) struct DebugByte(pub(crate) u8); - -impl core::fmt::Debug for DebugByte { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - // Special case ASCII space. It's too hard to read otherwise, so - // put quotes around it. I sometimes wonder whether just '\x20' would - // be better... - if self.0 == b' ' { - return write!(f, "' '"); - } - // 10 bytes is enough to cover any output from ascii::escape_default. - let mut bytes = [0u8; 10]; - let mut len = 0; - for (i, mut b) in core::ascii::escape_default(self.0).enumerate() { - // capitalize \xab to \xAB - if i >= 2 && b'a' <= b && b <= b'f' { - b -= 32; - } - bytes[len] = b; - len += 1; - } - write!(f, "{}", core::str::from_utf8(&bytes[..len]).unwrap()) - } -} diff --git a/vendor/aho-corasick/src/util/error.rs b/vendor/aho-corasick/src/util/error.rs deleted file mode 100644 index 326d04657b2480..00000000000000 --- a/vendor/aho-corasick/src/util/error.rs +++ /dev/null @@ -1,259 +0,0 @@ -use crate::util::{ - primitives::{PatternID, SmallIndex}, - search::MatchKind, -}; - -/// An error that occurred during the construction of an Aho-Corasick -/// automaton. -/// -/// Build errors occur when some kind of limit has been exceeded, either in the -/// number of states, the number of patterns of the length of a pattern. These -/// limits aren't part of the public API, but they should generally be large -/// enough to handle most use cases. -/// -/// When the `std` feature is enabled, this implements the `std::error::Error` -/// trait. -#[derive(Clone, Debug)] -pub struct BuildError { - kind: ErrorKind, -} - -/// The kind of error that occurred. -#[derive(Clone, Debug)] -enum ErrorKind { - /// An error that occurs when allocating a new state would result in an - /// identifier that exceeds the capacity of a `StateID`. - StateIDOverflow { - /// The maximum possible id. - max: u64, - /// The maximum ID requested. - requested_max: u64, - }, - /// An error that occurs when adding a pattern to an Aho-Corasick - /// automaton would result in an identifier that exceeds the capacity of a - /// `PatternID`. - PatternIDOverflow { - /// The maximum possible id. - max: u64, - /// The maximum ID requested. - requested_max: u64, - }, - /// Occurs when a pattern string is given to the Aho-Corasick constructor - /// that is too long. - PatternTooLong { - /// The ID of the pattern that was too long. - pattern: PatternID, - /// The length that was too long. - len: usize, - }, -} - -impl BuildError { - pub(crate) fn state_id_overflow( - max: u64, - requested_max: u64, - ) -> BuildError { - BuildError { kind: ErrorKind::StateIDOverflow { max, requested_max } } - } - - pub(crate) fn pattern_id_overflow( - max: u64, - requested_max: u64, - ) -> BuildError { - BuildError { - kind: ErrorKind::PatternIDOverflow { max, requested_max }, - } - } - - pub(crate) fn pattern_too_long( - pattern: PatternID, - len: usize, - ) -> BuildError { - BuildError { kind: ErrorKind::PatternTooLong { pattern, len } } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for BuildError {} - -impl core::fmt::Display for BuildError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self.kind { - ErrorKind::StateIDOverflow { max, requested_max } => { - write!( - f, - "state identifier overflow: failed to create state ID \ - from {}, which exceeds the max of {}", - requested_max, max, - ) - } - ErrorKind::PatternIDOverflow { max, requested_max } => { - write!( - f, - "pattern identifier overflow: failed to create pattern ID \ - from {}, which exceeds the max of {}", - requested_max, max, - ) - } - ErrorKind::PatternTooLong { pattern, len } => { - write!( - f, - "pattern {} with length {} exceeds \ - the maximum pattern length of {}", - pattern.as_usize(), - len, - SmallIndex::MAX.as_usize(), - ) - } - } - } -} - -/// An error that occurred during an Aho-Corasick search. -/// -/// An error that occurs during a search is limited to some kind of -/// misconfiguration that resulted in an illegal call. Stated differently, -/// whether an error occurs is not dependent on the specific bytes in the -/// haystack. -/// -/// Examples of misconfiguration: -/// -/// * Executing a stream or overlapping search on a searcher that was built was -/// something other than [`MatchKind::Standard`](crate::MatchKind::Standard) -/// semantics. -/// * Requested an anchored or an unanchored search on a searcher that doesn't -/// support unanchored or anchored searches, respectively. -/// -/// When the `std` feature is enabled, this implements the `std::error::Error` -/// trait. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct MatchError(alloc::boxed::Box<MatchErrorKind>); - -impl MatchError { - /// Create a new error value with the given kind. - /// - /// This is a more verbose version of the kind-specific constructors, e.g., - /// `MatchError::unsupported_stream`. - pub fn new(kind: MatchErrorKind) -> MatchError { - MatchError(alloc::boxed::Box::new(kind)) - } - - /// Returns a reference to the underlying error kind. - pub fn kind(&self) -> &MatchErrorKind { - &self.0 - } - - /// Create a new "invalid anchored search" error. This occurs when the - /// caller requests an anchored search but where anchored searches aren't - /// supported. - /// - /// This is the same as calling `MatchError::new` with a - /// [`MatchErrorKind::InvalidInputAnchored`] kind. - pub fn invalid_input_anchored() -> MatchError { - MatchError::new(MatchErrorKind::InvalidInputAnchored) - } - - /// Create a new "invalid unanchored search" error. This occurs when the - /// caller requests an unanchored search but where unanchored searches - /// aren't supported. - /// - /// This is the same as calling `MatchError::new` with a - /// [`MatchErrorKind::InvalidInputUnanchored`] kind. - pub fn invalid_input_unanchored() -> MatchError { - MatchError::new(MatchErrorKind::InvalidInputUnanchored) - } - - /// Create a new "unsupported stream search" error. This occurs when the - /// caller requests a stream search while using an Aho-Corasick automaton - /// with a match kind other than [`MatchKind::Standard`]. - /// - /// The match kind given should be the match kind of the automaton. It - /// should never be `MatchKind::Standard`. - pub fn unsupported_stream(got: MatchKind) -> MatchError { - MatchError::new(MatchErrorKind::UnsupportedStream { got }) - } - - /// Create a new "unsupported overlapping search" error. This occurs when - /// the caller requests an overlapping search while using an Aho-Corasick - /// automaton with a match kind other than [`MatchKind::Standard`]. - /// - /// The match kind given should be the match kind of the automaton. It - /// should never be `MatchKind::Standard`. - pub fn unsupported_overlapping(got: MatchKind) -> MatchError { - MatchError::new(MatchErrorKind::UnsupportedOverlapping { got }) - } - - /// Create a new "unsupported empty pattern" error. This occurs when the - /// caller requests a search for which matching an automaton that contains - /// an empty pattern string is not supported. - pub fn unsupported_empty() -> MatchError { - MatchError::new(MatchErrorKind::UnsupportedEmpty) - } -} - -/// The underlying kind of a [`MatchError`]. -/// -/// This is a **non-exhaustive** enum. That means new variants may be added in -/// a semver-compatible release. -#[non_exhaustive] -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum MatchErrorKind { - /// An error indicating that an anchored search was requested, but from a - /// searcher that was built without anchored support. - InvalidInputAnchored, - /// An error indicating that an unanchored search was requested, but from a - /// searcher that was built without unanchored support. - InvalidInputUnanchored, - /// An error indicating that a stream search was attempted on an - /// Aho-Corasick automaton with an unsupported `MatchKind`. - UnsupportedStream { - /// The match semantics for the automaton that was used. - got: MatchKind, - }, - /// An error indicating that an overlapping search was attempted on an - /// Aho-Corasick automaton with an unsupported `MatchKind`. - UnsupportedOverlapping { - /// The match semantics for the automaton that was used. - got: MatchKind, - }, - /// An error indicating that the operation requested doesn't support - /// automatons that contain an empty pattern string. - UnsupportedEmpty, -} - -#[cfg(feature = "std")] -impl std::error::Error for MatchError {} - -impl core::fmt::Display for MatchError { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - match *self.kind() { - MatchErrorKind::InvalidInputAnchored => { - write!(f, "anchored searches are not supported or enabled") - } - MatchErrorKind::InvalidInputUnanchored => { - write!(f, "unanchored searches are not supported or enabled") - } - MatchErrorKind::UnsupportedStream { got } => { - write!( - f, - "match kind {:?} does not support stream searching", - got, - ) - } - MatchErrorKind::UnsupportedOverlapping { got } => { - write!( - f, - "match kind {:?} does not support overlapping searches", - got, - ) - } - MatchErrorKind::UnsupportedEmpty => { - write!( - f, - "matching with an empty pattern string is not \ - supported for this operation", - ) - } - } - } -} diff --git a/vendor/aho-corasick/src/util/int.rs b/vendor/aho-corasick/src/util/int.rs deleted file mode 100644 index 54762b66046893..00000000000000 --- a/vendor/aho-corasick/src/util/int.rs +++ /dev/null @@ -1,278 +0,0 @@ -/*! -This module provides several integer oriented traits for converting between -both fixed size integers and integers whose size varies based on the target -(like `usize`). - -The main design principle for this module is to centralize all uses of `as`. -The thinking here is that `as` makes it very easy to perform accidental lossy -conversions, and if we centralize all its uses here under more descriptive -higher level operations, its use and correctness becomes easier to audit. - -This was copied mostly wholesale from `regex-automata`. - -NOTE: for simplicity, we don't take target pointer width into account here for -`usize` conversions. Since we currently only panic in debug mode, skipping the -check when it can be proven it isn't needed at compile time doesn't really -matter. Now, if we wind up wanting to do as many checks as possible in release -mode, then we would want to skip those when we know the conversions are always -non-lossy. -*/ - -// We define a little more than what we need, but I'd rather just have -// everything via a consistent and uniform API then have holes. -#![allow(dead_code)] - -pub(crate) trait U8 { - fn as_usize(self) -> usize; -} - -impl U8 for u8 { - fn as_usize(self) -> usize { - usize::from(self) - } -} - -pub(crate) trait U16 { - fn as_usize(self) -> usize; - fn low_u8(self) -> u8; - fn high_u8(self) -> u8; -} - -impl U16 for u16 { - fn as_usize(self) -> usize { - usize::from(self) - } - - fn low_u8(self) -> u8 { - self as u8 - } - - fn high_u8(self) -> u8 { - (self >> 8) as u8 - } -} - -pub(crate) trait U32 { - fn as_usize(self) -> usize; - fn low_u8(self) -> u8; - fn low_u16(self) -> u16; - fn high_u16(self) -> u16; -} - -impl U32 for u32 { - #[inline] - fn as_usize(self) -> usize { - #[cfg(debug_assertions)] - { - usize::try_from(self).expect("u32 overflowed usize") - } - #[cfg(not(debug_assertions))] - { - self as usize - } - } - - fn low_u8(self) -> u8 { - self as u8 - } - - fn low_u16(self) -> u16 { - self as u16 - } - - fn high_u16(self) -> u16 { - (self >> 16) as u16 - } -} - -pub(crate) trait U64 { - fn as_usize(self) -> usize; - fn low_u8(self) -> u8; - fn low_u16(self) -> u16; - fn low_u32(self) -> u32; - fn high_u32(self) -> u32; -} - -impl U64 for u64 { - fn as_usize(self) -> usize { - #[cfg(debug_assertions)] - { - usize::try_from(self).expect("u64 overflowed usize") - } - #[cfg(not(debug_assertions))] - { - self as usize - } - } - - fn low_u8(self) -> u8 { - self as u8 - } - - fn low_u16(self) -> u16 { - self as u16 - } - - fn low_u32(self) -> u32 { - self as u32 - } - - fn high_u32(self) -> u32 { - (self >> 32) as u32 - } -} - -pub(crate) trait I8 { - fn as_usize(self) -> usize; - fn to_bits(self) -> u8; - fn from_bits(n: u8) -> i8; -} - -impl I8 for i8 { - fn as_usize(self) -> usize { - #[cfg(debug_assertions)] - { - usize::try_from(self).expect("i8 overflowed usize") - } - #[cfg(not(debug_assertions))] - { - self as usize - } - } - - fn to_bits(self) -> u8 { - self as u8 - } - - fn from_bits(n: u8) -> i8 { - n as i8 - } -} - -pub(crate) trait I32 { - fn as_usize(self) -> usize; - fn to_bits(self) -> u32; - fn from_bits(n: u32) -> i32; -} - -impl I32 for i32 { - fn as_usize(self) -> usize { - #[cfg(debug_assertions)] - { - usize::try_from(self).expect("i32 overflowed usize") - } - #[cfg(not(debug_assertions))] - { - self as usize - } - } - - fn to_bits(self) -> u32 { - self as u32 - } - - fn from_bits(n: u32) -> i32 { - n as i32 - } -} - -pub(crate) trait I64 { - fn as_usize(self) -> usize; - fn to_bits(self) -> u64; - fn from_bits(n: u64) -> i64; -} - -impl I64 for i64 { - fn as_usize(self) -> usize { - #[cfg(debug_assertions)] - { - usize::try_from(self).expect("i64 overflowed usize") - } - #[cfg(not(debug_assertions))] - { - self as usize - } - } - - fn to_bits(self) -> u64 { - self as u64 - } - - fn from_bits(n: u64) -> i64 { - n as i64 - } -} - -pub(crate) trait Usize { - fn as_u8(self) -> u8; - fn as_u16(self) -> u16; - fn as_u32(self) -> u32; - fn as_u64(self) -> u64; -} - -impl Usize for usize { - fn as_u8(self) -> u8 { - #[cfg(debug_assertions)] - { - u8::try_from(self).expect("usize overflowed u8") - } - #[cfg(not(debug_assertions))] - { - self as u8 - } - } - - fn as_u16(self) -> u16 { - #[cfg(debug_assertions)] - { - u16::try_from(self).expect("usize overflowed u16") - } - #[cfg(not(debug_assertions))] - { - self as u16 - } - } - - fn as_u32(self) -> u32 { - #[cfg(debug_assertions)] - { - u32::try_from(self).expect("usize overflowed u32") - } - #[cfg(not(debug_assertions))] - { - self as u32 - } - } - - fn as_u64(self) -> u64 { - #[cfg(debug_assertions)] - { - u64::try_from(self).expect("usize overflowed u64") - } - #[cfg(not(debug_assertions))] - { - self as u64 - } - } -} - -// Pointers aren't integers, but we convert pointers to integers to perform -// offset arithmetic in some places. (And no, we don't convert the integers -// back to pointers.) So add 'as_usize' conversions here too for completeness. -// -// These 'as' casts are actually okay because they're always non-lossy. But the -// idea here is to just try and remove as much 'as' as possible, particularly -// in this crate where we are being really paranoid about offsets and making -// sure we don't panic on inputs that might be untrusted. This way, the 'as' -// casts become easier to audit if they're all in one place, even when some of -// them are actually okay 100% of the time. - -pub(crate) trait Pointer { - fn as_usize(self) -> usize; -} - -impl<T> Pointer for *const T { - fn as_usize(self) -> usize { - self as usize - } -} diff --git a/vendor/aho-corasick/src/util/mod.rs b/vendor/aho-corasick/src/util/mod.rs deleted file mode 100644 index f7a1ddd07b8f85..00000000000000 --- a/vendor/aho-corasick/src/util/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -pub(crate) mod alphabet; -#[cfg(feature = "std")] -pub(crate) mod buffer; -pub(crate) mod byte_frequencies; -pub(crate) mod debug; -pub(crate) mod error; -pub(crate) mod int; -pub(crate) mod prefilter; -pub(crate) mod primitives; -pub(crate) mod remapper; -pub(crate) mod search; -pub(crate) mod special; diff --git a/vendor/aho-corasick/src/util/prefilter.rs b/vendor/aho-corasick/src/util/prefilter.rs deleted file mode 100644 index ec3171694f10cf..00000000000000 --- a/vendor/aho-corasick/src/util/prefilter.rs +++ /dev/null @@ -1,924 +0,0 @@ -use core::{ - cmp, - fmt::Debug, - panic::{RefUnwindSafe, UnwindSafe}, - u8, -}; - -use alloc::{sync::Arc, vec, vec::Vec}; - -use crate::{ - packed, - util::{ - alphabet::ByteSet, - search::{Match, MatchKind, Span}, - }, -}; - -/// A prefilter for accelerating a search. -/// -/// This crate uses prefilters in the core search implementations to accelerate -/// common cases. They typically only apply to cases where there are a small -/// number of patterns (less than 100 or so), but when they do, thoughput can -/// be boosted considerably, perhaps by an order of magnitude. When a prefilter -/// is active, it is used whenever a search enters an automaton's start state. -/// -/// Currently, prefilters cannot be constructed by -/// callers. A `Prefilter` can only be accessed via the -/// [`Automaton::prefilter`](crate::automaton::Automaton::prefilter) -/// method and used to execute a search. In other words, a prefilter can be -/// used to optimize your own search implementation if necessary, but cannot do -/// much else. If you have a use case for more APIs, please submit an issue. -#[derive(Clone, Debug)] -pub struct Prefilter { - finder: Arc<dyn PrefilterI>, - memory_usage: usize, -} - -impl Prefilter { - /// Execute a search in the haystack within the span given. If a match or - /// a possible match is returned, then it is guaranteed to occur within - /// the bounds of the span. - /// - /// If the span provided is invalid for the given haystack, then behavior - /// is unspecified. - #[inline] - pub fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { - self.finder.find_in(haystack, span) - } - - #[inline] - pub(crate) fn memory_usage(&self) -> usize { - self.memory_usage - } -} - -/// A candidate is the result of running a prefilter on a haystack at a -/// particular position. -/// -/// The result is either no match, a confirmed match or a possible match. -/// -/// When no match is returned, the prefilter is guaranteeing that no possible -/// match can be found in the haystack, and the caller may trust this. That is, -/// all correct prefilters must never report false negatives. -/// -/// In some cases, a prefilter can confirm a match very quickly, in which case, -/// the caller may use this to stop what it's doing and report the match. In -/// this case, prefilter implementations must never report a false positive. -/// In other cases, the prefilter can only report a potential match, in which -/// case the callers must attempt to confirm the match. In this case, prefilter -/// implementations are permitted to return false positives. -#[derive(Clone, Debug)] -pub enum Candidate { - /// No match was found. Since false negatives are not possible, this means - /// the search can quit as it is guaranteed not to find another match. - None, - /// A confirmed match was found. Callers do not need to confirm it. - Match(Match), - /// The start of a possible match was found. Callers must confirm it before - /// reporting it as a match. - PossibleStartOfMatch(usize), -} - -impl Candidate { - /// Convert this candidate into an option. This is useful when callers - /// do not distinguish between true positives and false positives (i.e., - /// the caller must always confirm the match). - pub fn into_option(self) -> Option<usize> { - match self { - Candidate::None => None, - Candidate::Match(ref m) => Some(m.start()), - Candidate::PossibleStartOfMatch(start) => Some(start), - } - } -} - -/// A prefilter describes the behavior of fast literal scanners for quickly -/// skipping past bytes in the haystack that we know cannot possibly -/// participate in a match. -trait PrefilterI: - Send + Sync + RefUnwindSafe + UnwindSafe + Debug + 'static -{ - /// Returns the next possible match candidate. This may yield false - /// positives, so callers must confirm a match starting at the position - /// returned. This, however, must never produce false negatives. That is, - /// this must, at minimum, return the starting position of the next match - /// in the given haystack after or at the given position. - fn find_in(&self, haystack: &[u8], span: Span) -> Candidate; -} - -impl<P: PrefilterI + ?Sized> PrefilterI for Arc<P> { - #[inline(always)] - fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { - (**self).find_in(haystack, span) - } -} - -/// A builder for constructing the best possible prefilter. When constructed, -/// this builder will heuristically select the best prefilter it can build, -/// if any, and discard the rest. -#[derive(Debug)] -pub(crate) struct Builder { - count: usize, - ascii_case_insensitive: bool, - start_bytes: StartBytesBuilder, - rare_bytes: RareBytesBuilder, - memmem: MemmemBuilder, - packed: Option<packed::Builder>, - // If we run across a condition that suggests we shouldn't use a prefilter - // at all (like an empty pattern), then disable prefilters entirely. - enabled: bool, -} - -impl Builder { - /// Create a new builder for constructing the best possible prefilter. - pub(crate) fn new(kind: MatchKind) -> Builder { - let pbuilder = kind - .as_packed() - .map(|kind| packed::Config::new().match_kind(kind).builder()); - Builder { - count: 0, - ascii_case_insensitive: false, - start_bytes: StartBytesBuilder::new(), - rare_bytes: RareBytesBuilder::new(), - memmem: MemmemBuilder::default(), - packed: pbuilder, - enabled: true, - } - } - - /// Enable ASCII case insensitivity. When set, byte strings added to this - /// builder will be interpreted without respect to ASCII case. - pub(crate) fn ascii_case_insensitive(mut self, yes: bool) -> Builder { - self.ascii_case_insensitive = yes; - self.start_bytes = self.start_bytes.ascii_case_insensitive(yes); - self.rare_bytes = self.rare_bytes.ascii_case_insensitive(yes); - self - } - - /// Return a prefilter suitable for quickly finding potential matches. - /// - /// All patterns added to an Aho-Corasick automaton should be added to this - /// builder before attempting to construct the prefilter. - pub(crate) fn build(&self) -> Option<Prefilter> { - if !self.enabled { - debug!("prefilter not enabled, skipping"); - return None; - } - // If we only have one pattern, then deferring to memmem is always - // the best choice. This is kind of a weird case, because, well, why - // use Aho-Corasick if you only have one pattern? But maybe you don't - // know exactly how many patterns you'll get up front, and you need to - // support the option of multiple patterns. So instead of relying on - // the caller to branch and use memmem explicitly, we just do it for - // them. - if !self.ascii_case_insensitive { - if let Some(pre) = self.memmem.build() { - debug!("using memmem prefilter"); - return Some(pre); - } - } - let (packed, patlen, minlen) = if self.ascii_case_insensitive { - (None, usize::MAX, 0) - } else { - let patlen = self.packed.as_ref().map_or(usize::MAX, |p| p.len()); - let minlen = self.packed.as_ref().map_or(0, |p| p.minimum_len()); - let packed = - self.packed.as_ref().and_then(|b| b.build()).map(|s| { - let memory_usage = s.memory_usage(); - debug!( - "built packed prefilter (len: {}, \ - minimum pattern len: {}, memory usage: {}) \ - for consideration", - patlen, minlen, memory_usage, - ); - Prefilter { finder: Arc::new(Packed(s)), memory_usage } - }); - (packed, patlen, minlen) - }; - match (self.start_bytes.build(), self.rare_bytes.build()) { - // If we could build both start and rare prefilters, then there are - // a few cases in which we'd want to use the start-byte prefilter - // over the rare-byte prefilter, since the former has lower - // overhead. - (prestart @ Some(_), prerare @ Some(_)) => { - debug!( - "both start (len={}, rank={}) and \ - rare (len={}, rank={}) byte prefilters \ - are available", - self.start_bytes.count, - self.start_bytes.rank_sum, - self.rare_bytes.count, - self.rare_bytes.rank_sum, - ); - if patlen <= 16 - && minlen >= 2 - && self.start_bytes.count >= 3 - && self.rare_bytes.count >= 3 - { - debug!( - "start and rare byte prefilters available, but \ - they're probably slower than packed so using \ - packed" - ); - return packed; - } - // If the start-byte prefilter can scan for a smaller number - // of bytes than the rare-byte prefilter, then it's probably - // faster. - let has_fewer_bytes = - self.start_bytes.count < self.rare_bytes.count; - // Otherwise, if the combined frequency rank of the detected - // bytes in the start-byte prefilter is "close" to the combined - // frequency rank of the rare-byte prefilter, then we pick - // the start-byte prefilter even if the rare-byte prefilter - // heuristically searches for rare bytes. This is because the - // rare-byte prefilter has higher constant costs, so we tend to - // prefer the start-byte prefilter when we can. - let has_rarer_bytes = - self.start_bytes.rank_sum <= self.rare_bytes.rank_sum + 50; - if has_fewer_bytes { - debug!( - "using start byte prefilter because it has fewer - bytes to search for than the rare byte prefilter", - ); - prestart - } else if has_rarer_bytes { - debug!( - "using start byte prefilter because its byte \ - frequency rank was determined to be \ - \"good enough\" relative to the rare byte prefilter \ - byte frequency rank", - ); - prestart - } else { - debug!("using rare byte prefilter"); - prerare - } - } - (prestart @ Some(_), None) => { - if patlen <= 16 && minlen >= 2 && self.start_bytes.count >= 3 { - debug!( - "start byte prefilter available, but \ - it's probably slower than packed so using \ - packed" - ); - return packed; - } - debug!( - "have start byte prefilter but not rare byte prefilter, \ - so using start byte prefilter", - ); - prestart - } - (None, prerare @ Some(_)) => { - if patlen <= 16 && minlen >= 2 && self.rare_bytes.count >= 3 { - debug!( - "rare byte prefilter available, but \ - it's probably slower than packed so using \ - packed" - ); - return packed; - } - debug!( - "have rare byte prefilter but not start byte prefilter, \ - so using rare byte prefilter", - ); - prerare - } - (None, None) if self.ascii_case_insensitive => { - debug!( - "no start or rare byte prefilter and ASCII case \ - insensitivity was enabled, so skipping prefilter", - ); - None - } - (None, None) => { - if packed.is_some() { - debug!("falling back to packed prefilter"); - } else { - debug!("no prefilter available"); - } - packed - } - } - } - - /// Add a literal string to this prefilter builder. - pub(crate) fn add(&mut self, bytes: &[u8]) { - if bytes.is_empty() { - self.enabled = false; - } - if !self.enabled { - return; - } - self.count += 1; - self.start_bytes.add(bytes); - self.rare_bytes.add(bytes); - self.memmem.add(bytes); - if let Some(ref mut pbuilder) = self.packed { - pbuilder.add(bytes); - } - } -} - -/// A type that wraps a packed searcher and implements the `Prefilter` -/// interface. -#[derive(Clone, Debug)] -struct Packed(packed::Searcher); - -impl PrefilterI for Packed { - fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { - self.0 - .find_in(haystack, span) - .map_or(Candidate::None, Candidate::Match) - } -} - -/// A builder for constructing a prefilter that uses memmem. -#[derive(Debug, Default)] -struct MemmemBuilder { - /// The number of patterns that have been added. - count: usize, - /// The singular pattern to search for. This is only set when count==1. - one: Option<Vec<u8>>, -} - -impl MemmemBuilder { - fn build(&self) -> Option<Prefilter> { - #[cfg(all(feature = "std", feature = "perf-literal"))] - fn imp(builder: &MemmemBuilder) -> Option<Prefilter> { - let pattern = builder.one.as_ref()?; - assert_eq!(1, builder.count); - let finder = Arc::new(Memmem( - memchr::memmem::Finder::new(pattern).into_owned(), - )); - let memory_usage = pattern.len(); - Some(Prefilter { finder, memory_usage }) - } - - #[cfg(not(all(feature = "std", feature = "perf-literal")))] - fn imp(_: &MemmemBuilder) -> Option<Prefilter> { - None - } - - imp(self) - } - - fn add(&mut self, bytes: &[u8]) { - self.count += 1; - if self.count == 1 { - self.one = Some(bytes.to_vec()); - } else { - self.one = None; - } - } -} - -/// A type that wraps a SIMD accelerated single substring search from the -/// `memchr` crate for use as a prefilter. -/// -/// Currently, this prefilter is only active for Aho-Corasick searchers with -/// a single pattern. In theory, this could be extended to support searchers -/// that have a common prefix of more than one byte (for one byte, we would use -/// memchr), but it's not clear if it's worth it or not. -/// -/// Also, unfortunately, this currently also requires the 'std' feature to -/// be enabled. That's because memchr doesn't have a no-std-but-with-alloc -/// mode, and so APIs like Finder::into_owned aren't available when 'std' is -/// disabled. But there should be an 'alloc' feature that brings in APIs like -/// Finder::into_owned but doesn't use std-only features like runtime CPU -/// feature detection. -#[cfg(all(feature = "std", feature = "perf-literal"))] -#[derive(Clone, Debug)] -struct Memmem(memchr::memmem::Finder<'static>); - -#[cfg(all(feature = "std", feature = "perf-literal"))] -impl PrefilterI for Memmem { - fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { - use crate::util::primitives::PatternID; - - self.0.find(&haystack[span]).map_or(Candidate::None, |i| { - let start = span.start + i; - let end = start + self.0.needle().len(); - // N.B. We can declare a match and use a fixed pattern ID here - // because a Memmem prefilter is only ever created for searchers - // with exactly one pattern. Thus, every match is always a match - // and it is always for the first and only pattern. - Candidate::Match(Match::new(PatternID::ZERO, start..end)) - }) - } -} - -/// A builder for constructing a rare byte prefilter. -/// -/// A rare byte prefilter attempts to pick out a small set of rare bytes that -/// occurr in the patterns, and then quickly scan to matches of those rare -/// bytes. -#[derive(Clone, Debug)] -struct RareBytesBuilder { - /// Whether this prefilter should account for ASCII case insensitivity or - /// not. - ascii_case_insensitive: bool, - /// A set of rare bytes, indexed by byte value. - rare_set: ByteSet, - /// A set of byte offsets associated with bytes in a pattern. An entry - /// corresponds to a particular bytes (its index) and is only non-zero if - /// the byte occurred at an offset greater than 0 in at least one pattern. - /// - /// If a byte's offset is not representable in 8 bits, then the rare bytes - /// prefilter becomes inert. - byte_offsets: RareByteOffsets, - /// Whether this is available as a prefilter or not. This can be set to - /// false during construction if a condition is seen that invalidates the - /// use of the rare-byte prefilter. - available: bool, - /// The number of bytes set to an active value in `byte_offsets`. - count: usize, - /// The sum of frequency ranks for the rare bytes detected. This is - /// intended to give a heuristic notion of how rare the bytes are. - rank_sum: u16, -} - -/// A set of byte offsets, keyed by byte. -#[derive(Clone, Copy)] -struct RareByteOffsets { - /// Each entry corresponds to the maximum offset of the corresponding - /// byte across all patterns seen. - set: [RareByteOffset; 256], -} - -impl RareByteOffsets { - /// Create a new empty set of rare byte offsets. - pub(crate) fn empty() -> RareByteOffsets { - RareByteOffsets { set: [RareByteOffset::default(); 256] } - } - - /// Add the given offset for the given byte to this set. If the offset is - /// greater than the existing offset, then it overwrites the previous - /// value and returns false. If there is no previous value set, then this - /// sets it and returns true. - pub(crate) fn set(&mut self, byte: u8, off: RareByteOffset) { - self.set[byte as usize].max = - cmp::max(self.set[byte as usize].max, off.max); - } -} - -impl core::fmt::Debug for RareByteOffsets { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let mut offsets = vec![]; - for off in self.set.iter() { - if off.max > 0 { - offsets.push(off); - } - } - f.debug_struct("RareByteOffsets").field("set", &offsets).finish() - } -} - -/// Offsets associated with an occurrence of a "rare" byte in any of the -/// patterns used to construct a single Aho-Corasick automaton. -#[derive(Clone, Copy, Debug)] -struct RareByteOffset { - /// The maximum offset at which a particular byte occurs from the start - /// of any pattern. This is used as a shift amount. That is, when an - /// occurrence of this byte is found, the candidate position reported by - /// the prefilter is `position_of_byte - max`, such that the automaton - /// will begin its search at a position that is guaranteed to observe a - /// match. - /// - /// To avoid accidentally quadratic behavior, a prefilter is considered - /// ineffective when it is asked to start scanning from a position that it - /// has already scanned past. - /// - /// Using a `u8` here means that if we ever see a pattern that's longer - /// than 255 bytes, then the entire rare byte prefilter is disabled. - max: u8, -} - -impl Default for RareByteOffset { - fn default() -> RareByteOffset { - RareByteOffset { max: 0 } - } -} - -impl RareByteOffset { - /// Create a new rare byte offset. If the given offset is too big, then - /// None is returned. In that case, callers should render the rare bytes - /// prefilter inert. - fn new(max: usize) -> Option<RareByteOffset> { - if max > u8::MAX as usize { - None - } else { - Some(RareByteOffset { max: max as u8 }) - } - } -} - -impl RareBytesBuilder { - /// Create a new builder for constructing a rare byte prefilter. - fn new() -> RareBytesBuilder { - RareBytesBuilder { - ascii_case_insensitive: false, - rare_set: ByteSet::empty(), - byte_offsets: RareByteOffsets::empty(), - available: true, - count: 0, - rank_sum: 0, - } - } - - /// Enable ASCII case insensitivity. When set, byte strings added to this - /// builder will be interpreted without respect to ASCII case. - fn ascii_case_insensitive(mut self, yes: bool) -> RareBytesBuilder { - self.ascii_case_insensitive = yes; - self - } - - /// Build the rare bytes prefilter. - /// - /// If there are more than 3 distinct rare bytes found, or if heuristics - /// otherwise determine that this prefilter should not be used, then `None` - /// is returned. - fn build(&self) -> Option<Prefilter> { - #[cfg(feature = "perf-literal")] - fn imp(builder: &RareBytesBuilder) -> Option<Prefilter> { - if !builder.available || builder.count > 3 { - return None; - } - let (mut bytes, mut len) = ([0; 3], 0); - for b in 0..=255 { - if builder.rare_set.contains(b) { - bytes[len] = b; - len += 1; - } - } - let finder: Arc<dyn PrefilterI> = match len { - 0 => return None, - 1 => Arc::new(RareBytesOne { - byte1: bytes[0], - offset: builder.byte_offsets.set[bytes[0] as usize], - }), - 2 => Arc::new(RareBytesTwo { - offsets: builder.byte_offsets, - byte1: bytes[0], - byte2: bytes[1], - }), - 3 => Arc::new(RareBytesThree { - offsets: builder.byte_offsets, - byte1: bytes[0], - byte2: bytes[1], - byte3: bytes[2], - }), - _ => unreachable!(), - }; - Some(Prefilter { finder, memory_usage: 0 }) - } - - #[cfg(not(feature = "perf-literal"))] - fn imp(_: &RareBytesBuilder) -> Option<Prefilter> { - None - } - - imp(self) - } - - /// Add a byte string to this builder. - /// - /// All patterns added to an Aho-Corasick automaton should be added to this - /// builder before attempting to construct the prefilter. - fn add(&mut self, bytes: &[u8]) { - // If we've already given up, then do nothing. - if !self.available { - return; - } - // If we've already blown our budget, then don't waste time looking - // for more rare bytes. - if self.count > 3 { - self.available = false; - return; - } - // If the pattern is too long, then our offset table is bunk, so - // give up. - if bytes.len() >= 256 { - self.available = false; - return; - } - let mut rarest = match bytes.first() { - None => return, - Some(&b) => (b, freq_rank(b)), - }; - // The idea here is to look for the rarest byte in each pattern, and - // add that to our set. As a special exception, if we see a byte that - // we've already added, then we immediately stop and choose that byte, - // even if there's another rare byte in the pattern. This helps us - // apply the rare byte optimization in more cases by attempting to pick - // bytes that are in common between patterns. So for example, if we - // were searching for `Sherlock` and `lockjaw`, then this would pick - // `k` for both patterns, resulting in the use of `memchr` instead of - // `memchr2` for `k` and `j`. - let mut found = false; - for (pos, &b) in bytes.iter().enumerate() { - self.set_offset(pos, b); - if found { - continue; - } - if self.rare_set.contains(b) { - found = true; - continue; - } - let rank = freq_rank(b); - if rank < rarest.1 { - rarest = (b, rank); - } - } - if !found { - self.add_rare_byte(rarest.0); - } - } - - fn set_offset(&mut self, pos: usize, byte: u8) { - // This unwrap is OK because pos is never bigger than our max. - let offset = RareByteOffset::new(pos).unwrap(); - self.byte_offsets.set(byte, offset); - if self.ascii_case_insensitive { - self.byte_offsets.set(opposite_ascii_case(byte), offset); - } - } - - fn add_rare_byte(&mut self, byte: u8) { - self.add_one_rare_byte(byte); - if self.ascii_case_insensitive { - self.add_one_rare_byte(opposite_ascii_case(byte)); - } - } - - fn add_one_rare_byte(&mut self, byte: u8) { - if !self.rare_set.contains(byte) { - self.rare_set.add(byte); - self.count += 1; - self.rank_sum += freq_rank(byte) as u16; - } - } -} - -/// A prefilter for scanning for a single "rare" byte. -#[cfg(feature = "perf-literal")] -#[derive(Clone, Debug)] -struct RareBytesOne { - byte1: u8, - offset: RareByteOffset, -} - -#[cfg(feature = "perf-literal")] -impl PrefilterI for RareBytesOne { - fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { - memchr::memchr(self.byte1, &haystack[span]) - .map(|i| { - let pos = span.start + i; - cmp::max( - span.start, - pos.saturating_sub(usize::from(self.offset.max)), - ) - }) - .map_or(Candidate::None, Candidate::PossibleStartOfMatch) - } -} - -/// A prefilter for scanning for two "rare" bytes. -#[cfg(feature = "perf-literal")] -#[derive(Clone, Debug)] -struct RareBytesTwo { - offsets: RareByteOffsets, - byte1: u8, - byte2: u8, -} - -#[cfg(feature = "perf-literal")] -impl PrefilterI for RareBytesTwo { - fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { - memchr::memchr2(self.byte1, self.byte2, &haystack[span]) - .map(|i| { - let pos = span.start + i; - let offset = self.offsets.set[usize::from(haystack[pos])].max; - cmp::max(span.start, pos.saturating_sub(usize::from(offset))) - }) - .map_or(Candidate::None, Candidate::PossibleStartOfMatch) - } -} - -/// A prefilter for scanning for three "rare" bytes. -#[cfg(feature = "perf-literal")] -#[derive(Clone, Debug)] -struct RareBytesThree { - offsets: RareByteOffsets, - byte1: u8, - byte2: u8, - byte3: u8, -} - -#[cfg(feature = "perf-literal")] -impl PrefilterI for RareBytesThree { - fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { - memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span]) - .map(|i| { - let pos = span.start + i; - let offset = self.offsets.set[usize::from(haystack[pos])].max; - cmp::max(span.start, pos.saturating_sub(usize::from(offset))) - }) - .map_or(Candidate::None, Candidate::PossibleStartOfMatch) - } -} - -/// A builder for constructing a starting byte prefilter. -/// -/// A starting byte prefilter is a simplistic prefilter that looks for possible -/// matches by reporting all positions corresponding to a particular byte. This -/// generally only takes affect when there are at most 3 distinct possible -/// starting bytes. e.g., the patterns `foo`, `bar`, and `baz` have two -/// distinct starting bytes (`f` and `b`), and this prefilter returns all -/// occurrences of either `f` or `b`. -/// -/// In some cases, a heuristic frequency analysis may determine that it would -/// be better not to use this prefilter even when there are 3 or fewer distinct -/// starting bytes. -#[derive(Clone, Debug)] -struct StartBytesBuilder { - /// Whether this prefilter should account for ASCII case insensitivity or - /// not. - ascii_case_insensitive: bool, - /// The set of starting bytes observed. - byteset: Vec<bool>, - /// The number of bytes set to true in `byteset`. - count: usize, - /// The sum of frequency ranks for the rare bytes detected. This is - /// intended to give a heuristic notion of how rare the bytes are. - rank_sum: u16, -} - -impl StartBytesBuilder { - /// Create a new builder for constructing a start byte prefilter. - fn new() -> StartBytesBuilder { - StartBytesBuilder { - ascii_case_insensitive: false, - byteset: vec![false; 256], - count: 0, - rank_sum: 0, - } - } - - /// Enable ASCII case insensitivity. When set, byte strings added to this - /// builder will be interpreted without respect to ASCII case. - fn ascii_case_insensitive(mut self, yes: bool) -> StartBytesBuilder { - self.ascii_case_insensitive = yes; - self - } - - /// Build the starting bytes prefilter. - /// - /// If there are more than 3 distinct starting bytes, or if heuristics - /// otherwise determine that this prefilter should not be used, then `None` - /// is returned. - fn build(&self) -> Option<Prefilter> { - #[cfg(feature = "perf-literal")] - fn imp(builder: &StartBytesBuilder) -> Option<Prefilter> { - if builder.count > 3 { - return None; - } - let (mut bytes, mut len) = ([0; 3], 0); - for b in 0..256 { - if !builder.byteset[b] { - continue; - } - // We don't handle non-ASCII bytes for now. Getting non-ASCII - // bytes right is trickier, since we generally don't want to put - // a leading UTF-8 code unit into a prefilter that isn't ASCII, - // since they can frequently. Instead, it would be better to use a - // continuation byte, but this requires more sophisticated analysis - // of the automaton and a richer prefilter API. - if b > 0x7F { - return None; - } - bytes[len] = b as u8; - len += 1; - } - let finder: Arc<dyn PrefilterI> = match len { - 0 => return None, - 1 => Arc::new(StartBytesOne { byte1: bytes[0] }), - 2 => Arc::new(StartBytesTwo { - byte1: bytes[0], - byte2: bytes[1], - }), - 3 => Arc::new(StartBytesThree { - byte1: bytes[0], - byte2: bytes[1], - byte3: bytes[2], - }), - _ => unreachable!(), - }; - Some(Prefilter { finder, memory_usage: 0 }) - } - - #[cfg(not(feature = "perf-literal"))] - fn imp(_: &StartBytesBuilder) -> Option<Prefilter> { - None - } - - imp(self) - } - - /// Add a byte string to this builder. - /// - /// All patterns added to an Aho-Corasick automaton should be added to this - /// builder before attempting to construct the prefilter. - fn add(&mut self, bytes: &[u8]) { - if self.count > 3 { - return; - } - if let Some(&byte) = bytes.first() { - self.add_one_byte(byte); - if self.ascii_case_insensitive { - self.add_one_byte(opposite_ascii_case(byte)); - } - } - } - - fn add_one_byte(&mut self, byte: u8) { - if !self.byteset[byte as usize] { - self.byteset[byte as usize] = true; - self.count += 1; - self.rank_sum += freq_rank(byte) as u16; - } - } -} - -/// A prefilter for scanning for a single starting byte. -#[cfg(feature = "perf-literal")] -#[derive(Clone, Debug)] -struct StartBytesOne { - byte1: u8, -} - -#[cfg(feature = "perf-literal")] -impl PrefilterI for StartBytesOne { - fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { - memchr::memchr(self.byte1, &haystack[span]) - .map(|i| span.start + i) - .map_or(Candidate::None, Candidate::PossibleStartOfMatch) - } -} - -/// A prefilter for scanning for two starting bytes. -#[cfg(feature = "perf-literal")] -#[derive(Clone, Debug)] -struct StartBytesTwo { - byte1: u8, - byte2: u8, -} - -#[cfg(feature = "perf-literal")] -impl PrefilterI for StartBytesTwo { - fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { - memchr::memchr2(self.byte1, self.byte2, &haystack[span]) - .map(|i| span.start + i) - .map_or(Candidate::None, Candidate::PossibleStartOfMatch) - } -} - -/// A prefilter for scanning for three starting bytes. -#[cfg(feature = "perf-literal")] -#[derive(Clone, Debug)] -struct StartBytesThree { - byte1: u8, - byte2: u8, - byte3: u8, -} - -#[cfg(feature = "perf-literal")] -impl PrefilterI for StartBytesThree { - fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { - memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span]) - .map(|i| span.start + i) - .map_or(Candidate::None, Candidate::PossibleStartOfMatch) - } -} - -/// If the given byte is an ASCII letter, then return it in the opposite case. -/// e.g., Given `b'A'`, this returns `b'a'`, and given `b'a'`, this returns -/// `b'A'`. If a non-ASCII letter is given, then the given byte is returned. -pub(crate) fn opposite_ascii_case(b: u8) -> u8 { - if b'A' <= b && b <= b'Z' { - b.to_ascii_lowercase() - } else if b'a' <= b && b <= b'z' { - b.to_ascii_uppercase() - } else { - b - } -} - -/// Return the frequency rank of the given byte. The higher the rank, the more -/// common the byte (heuristically speaking). -fn freq_rank(b: u8) -> u8 { - use crate::util::byte_frequencies::BYTE_FREQUENCIES; - BYTE_FREQUENCIES[b as usize] -} diff --git a/vendor/aho-corasick/src/util/primitives.rs b/vendor/aho-corasick/src/util/primitives.rs deleted file mode 100644 index 784d3971713d10..00000000000000 --- a/vendor/aho-corasick/src/util/primitives.rs +++ /dev/null @@ -1,759 +0,0 @@ -/*! -Lower level primitive types that are useful in a variety of circumstances. - -# Overview - -This list represents the principle types in this module and briefly describes -when you might want to use them. - -* [`PatternID`] - A type that represents the identifier of a regex pattern. -This is probably the most widely used type in this module (which is why it's -also re-exported in the crate root). -* [`StateID`] - A type the represents the identifier of a finite automaton -state. This is used for both NFAs and DFAs, with the notable exception of -the hybrid NFA/DFA. (The hybrid NFA/DFA uses a special purpose "lazy" state -identifier.) -* [`SmallIndex`] - The internal representation of both a `PatternID` and a -`StateID`. Its purpose is to serve as a type that can index memory without -being as big as a `usize` on 64-bit targets. The main idea behind this type -is that there are many things in regex engines that will, in practice, never -overflow a 32-bit integer. (For example, like the number of patterns in a regex -or the number of states in an NFA.) Thus, a `SmallIndex` can be used to index -memory without peppering `as` casts everywhere. Moreover, it forces callers -to handle errors in the case where, somehow, the value would otherwise overflow -either a 32-bit integer or a `usize` (e.g., on 16-bit targets). -*/ - -// The macro we use to define some types below adds methods that we don't -// use on some of the types. There isn't much, so we just squash the warning. -#![allow(dead_code)] - -use alloc::vec::Vec; - -use crate::util::int::{Usize, U16, U32, U64}; - -/// A type that represents a "small" index. -/// -/// The main idea of this type is to provide something that can index memory, -/// but uses less memory than `usize` on 64-bit systems. Specifically, its -/// representation is always a `u32` and has `repr(transparent)` enabled. (So -/// it is safe to transmute between a `u32` and a `SmallIndex`.) -/// -/// A small index is typically useful in cases where there is no practical way -/// that the index will overflow a 32-bit integer. A good example of this is -/// an NFA state. If you could somehow build an NFA with `2^30` states, its -/// memory usage would be exorbitant and its runtime execution would be so -/// slow as to be completely worthless. Therefore, this crate generally deems -/// it acceptable to return an error if it would otherwise build an NFA that -/// requires a slice longer than what a 32-bit integer can index. In exchange, -/// we can use 32-bit indices instead of 64-bit indices in various places. -/// -/// This type ensures this by providing a constructor that will return an error -/// if its argument cannot fit into the type. This makes it much easier to -/// handle these sorts of boundary cases that are otherwise extremely subtle. -/// -/// On all targets, this type guarantees that its value will fit in a `u32`, -/// `i32`, `usize` and an `isize`. This means that on 16-bit targets, for -/// example, this type's maximum value will never overflow an `isize`, -/// which means it will never overflow a `i16` even though its internal -/// representation is still a `u32`. -/// -/// The purpose for making the type fit into even signed integer types like -/// `isize` is to guarantee that the difference between any two small indices -/// is itself also a small index. This is useful in certain contexts, e.g., -/// for delta encoding. -/// -/// # Other types -/// -/// The following types wrap `SmallIndex` to provide a more focused use case: -/// -/// * [`PatternID`] is for representing the identifiers of patterns. -/// * [`StateID`] is for representing the identifiers of states in finite -/// automata. It is used for both NFAs and DFAs. -/// -/// # Representation -/// -/// This type is always represented internally by a `u32` and is marked as -/// `repr(transparent)`. Thus, this type always has the same representation as -/// a `u32`. It is thus safe to transmute between a `u32` and a `SmallIndex`. -/// -/// # Indexing -/// -/// For convenience, callers may use a `SmallIndex` to index slices. -/// -/// # Safety -/// -/// While a `SmallIndex` is meant to guarantee that its value fits into `usize` -/// without using as much space as a `usize` on all targets, callers must -/// not rely on this property for safety. Callers may choose to rely on this -/// property for correctness however. For example, creating a `SmallIndex` with -/// an invalid value can be done in entirely safe code. This may in turn result -/// in panics or silent logical errors. -#[derive( - Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord, -)] -#[repr(transparent)] -pub(crate) struct SmallIndex(u32); - -impl SmallIndex { - /// The maximum index value. - #[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] - pub const MAX: SmallIndex = - // FIXME: Use as_usize() once const functions in traits are stable. - SmallIndex::new_unchecked(core::i32::MAX as usize - 1); - - /// The maximum index value. - #[cfg(target_pointer_width = "16")] - pub const MAX: SmallIndex = - SmallIndex::new_unchecked(core::isize::MAX - 1); - - /// The total number of values that can be represented as a small index. - pub const LIMIT: usize = SmallIndex::MAX.as_usize() + 1; - - /// The zero index value. - pub const ZERO: SmallIndex = SmallIndex::new_unchecked(0); - - /// The number of bytes that a single small index uses in memory. - pub const SIZE: usize = core::mem::size_of::<SmallIndex>(); - - /// Create a new small index. - /// - /// If the given index exceeds [`SmallIndex::MAX`], then this returns - /// an error. - #[inline] - pub fn new(index: usize) -> Result<SmallIndex, SmallIndexError> { - SmallIndex::try_from(index) - } - - /// Create a new small index without checking whether the given value - /// exceeds [`SmallIndex::MAX`]. - /// - /// Using this routine with an invalid index value will result in - /// unspecified behavior, but *not* undefined behavior. In particular, an - /// invalid index value is likely to cause panics or possibly even silent - /// logical errors. - /// - /// Callers must never rely on a `SmallIndex` to be within a certain range - /// for memory safety. - #[inline] - pub const fn new_unchecked(index: usize) -> SmallIndex { - // FIXME: Use as_u32() once const functions in traits are stable. - SmallIndex::from_u32_unchecked(index as u32) - } - - /// Create a new small index from a `u32` without checking whether the - /// given value exceeds [`SmallIndex::MAX`]. - /// - /// Using this routine with an invalid index value will result in - /// unspecified behavior, but *not* undefined behavior. In particular, an - /// invalid index value is likely to cause panics or possibly even silent - /// logical errors. - /// - /// Callers must never rely on a `SmallIndex` to be within a certain range - /// for memory safety. - #[inline] - pub const fn from_u32_unchecked(index: u32) -> SmallIndex { - SmallIndex(index) - } - - /// Like [`SmallIndex::new`], but panics if the given index is not valid. - #[inline] - pub fn must(index: usize) -> SmallIndex { - SmallIndex::new(index).expect("invalid small index") - } - - /// Return this small index as a `usize`. This is guaranteed to never - /// overflow `usize`. - #[inline] - pub const fn as_usize(&self) -> usize { - // FIXME: Use as_usize() once const functions in traits are stable. - self.0 as usize - } - - /// Return this small index as a `u64`. This is guaranteed to never - /// overflow. - #[inline] - pub const fn as_u64(&self) -> u64 { - // FIXME: Use u64::from() once const functions in traits are stable. - self.0 as u64 - } - - /// Return the internal `u32` of this small index. This is guaranteed to - /// never overflow `u32`. - #[inline] - pub const fn as_u32(&self) -> u32 { - self.0 - } - - /// Return the internal `u32` of this small index represented as an `i32`. - /// This is guaranteed to never overflow an `i32`. - #[inline] - pub const fn as_i32(&self) -> i32 { - // This is OK because we guarantee that our max value is <= i32::MAX. - self.0 as i32 - } - - /// Returns one more than this small index as a usize. - /// - /// Since a small index has constraints on its maximum value, adding `1` to - /// it will always fit in a `usize`, `isize`, `u32` and a `i32`. - #[inline] - pub fn one_more(&self) -> usize { - self.as_usize() + 1 - } - - /// Decode this small index from the bytes given using the native endian - /// byte order for the current target. - /// - /// If the decoded integer is not representable as a small index for the - /// current target, then this returns an error. - #[inline] - pub fn from_ne_bytes( - bytes: [u8; 4], - ) -> Result<SmallIndex, SmallIndexError> { - let id = u32::from_ne_bytes(bytes); - if id > SmallIndex::MAX.as_u32() { - return Err(SmallIndexError { attempted: u64::from(id) }); - } - Ok(SmallIndex::new_unchecked(id.as_usize())) - } - - /// Decode this small index from the bytes given using the native endian - /// byte order for the current target. - /// - /// This is analogous to [`SmallIndex::new_unchecked`] in that is does not - /// check whether the decoded integer is representable as a small index. - #[inline] - pub fn from_ne_bytes_unchecked(bytes: [u8; 4]) -> SmallIndex { - SmallIndex::new_unchecked(u32::from_ne_bytes(bytes).as_usize()) - } - - /// Return the underlying small index integer as raw bytes in native endian - /// format. - #[inline] - pub fn to_ne_bytes(&self) -> [u8; 4] { - self.0.to_ne_bytes() - } -} - -impl<T> core::ops::Index<SmallIndex> for [T] { - type Output = T; - - #[inline] - fn index(&self, index: SmallIndex) -> &T { - &self[index.as_usize()] - } -} - -impl<T> core::ops::IndexMut<SmallIndex> for [T] { - #[inline] - fn index_mut(&mut self, index: SmallIndex) -> &mut T { - &mut self[index.as_usize()] - } -} - -impl<T> core::ops::Index<SmallIndex> for Vec<T> { - type Output = T; - - #[inline] - fn index(&self, index: SmallIndex) -> &T { - &self[index.as_usize()] - } -} - -impl<T> core::ops::IndexMut<SmallIndex> for Vec<T> { - #[inline] - fn index_mut(&mut self, index: SmallIndex) -> &mut T { - &mut self[index.as_usize()] - } -} - -impl From<StateID> for SmallIndex { - fn from(sid: StateID) -> SmallIndex { - sid.0 - } -} - -impl From<PatternID> for SmallIndex { - fn from(pid: PatternID) -> SmallIndex { - pid.0 - } -} - -impl From<u8> for SmallIndex { - fn from(index: u8) -> SmallIndex { - SmallIndex::new_unchecked(usize::from(index)) - } -} - -impl TryFrom<u16> for SmallIndex { - type Error = SmallIndexError; - - fn try_from(index: u16) -> Result<SmallIndex, SmallIndexError> { - if u32::from(index) > SmallIndex::MAX.as_u32() { - return Err(SmallIndexError { attempted: u64::from(index) }); - } - Ok(SmallIndex::new_unchecked(index.as_usize())) - } -} - -impl TryFrom<u32> for SmallIndex { - type Error = SmallIndexError; - - fn try_from(index: u32) -> Result<SmallIndex, SmallIndexError> { - if index > SmallIndex::MAX.as_u32() { - return Err(SmallIndexError { attempted: u64::from(index) }); - } - Ok(SmallIndex::new_unchecked(index.as_usize())) - } -} - -impl TryFrom<u64> for SmallIndex { - type Error = SmallIndexError; - - fn try_from(index: u64) -> Result<SmallIndex, SmallIndexError> { - if index > SmallIndex::MAX.as_u64() { - return Err(SmallIndexError { attempted: index }); - } - Ok(SmallIndex::new_unchecked(index.as_usize())) - } -} - -impl TryFrom<usize> for SmallIndex { - type Error = SmallIndexError; - - fn try_from(index: usize) -> Result<SmallIndex, SmallIndexError> { - if index > SmallIndex::MAX.as_usize() { - return Err(SmallIndexError { attempted: index.as_u64() }); - } - Ok(SmallIndex::new_unchecked(index)) - } -} - -/// This error occurs when a small index could not be constructed. -/// -/// This occurs when given an integer exceeding the maximum small index value. -/// -/// When the `std` feature is enabled, this implements the `Error` trait. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct SmallIndexError { - attempted: u64, -} - -impl SmallIndexError { - /// Returns the value that could not be converted to a small index. - pub fn attempted(&self) -> u64 { - self.attempted - } -} - -#[cfg(feature = "std")] -impl std::error::Error for SmallIndexError {} - -impl core::fmt::Display for SmallIndexError { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!( - f, - "failed to create small index from {:?}, which exceeds {:?}", - self.attempted(), - SmallIndex::MAX, - ) - } -} - -#[derive(Clone, Debug)] -pub(crate) struct SmallIndexIter { - rng: core::ops::Range<usize>, -} - -impl Iterator for SmallIndexIter { - type Item = SmallIndex; - - fn next(&mut self) -> Option<SmallIndex> { - if self.rng.start >= self.rng.end { - return None; - } - let next_id = self.rng.start + 1; - let id = core::mem::replace(&mut self.rng.start, next_id); - // new_unchecked is OK since we asserted that the number of - // elements in this iterator will fit in an ID at construction. - Some(SmallIndex::new_unchecked(id)) - } -} - -macro_rules! index_type_impls { - ($name:ident, $err:ident, $iter:ident, $withiter:ident) => { - impl $name { - /// The maximum value. - pub const MAX: $name = $name(SmallIndex::MAX); - - /// The total number of values that can be represented. - pub const LIMIT: usize = SmallIndex::LIMIT; - - /// The zero value. - pub const ZERO: $name = $name(SmallIndex::ZERO); - - /// The number of bytes that a single value uses in memory. - pub const SIZE: usize = SmallIndex::SIZE; - - /// Create a new value that is represented by a "small index." - /// - /// If the given index exceeds the maximum allowed value, then this - /// returns an error. - #[inline] - pub fn new(value: usize) -> Result<$name, $err> { - SmallIndex::new(value).map($name).map_err($err) - } - - /// Create a new value without checking whether the given argument - /// exceeds the maximum. - /// - /// Using this routine with an invalid value will result in - /// unspecified behavior, but *not* undefined behavior. In - /// particular, an invalid ID value is likely to cause panics or - /// possibly even silent logical errors. - /// - /// Callers must never rely on this type to be within a certain - /// range for memory safety. - #[inline] - pub const fn new_unchecked(value: usize) -> $name { - $name(SmallIndex::new_unchecked(value)) - } - - /// Create a new value from a `u32` without checking whether the - /// given value exceeds the maximum. - /// - /// Using this routine with an invalid value will result in - /// unspecified behavior, but *not* undefined behavior. In - /// particular, an invalid ID value is likely to cause panics or - /// possibly even silent logical errors. - /// - /// Callers must never rely on this type to be within a certain - /// range for memory safety. - #[inline] - pub const fn from_u32_unchecked(index: u32) -> $name { - $name(SmallIndex::from_u32_unchecked(index)) - } - - /// Like `new`, but panics if the given value is not valid. - #[inline] - pub fn must(value: usize) -> $name { - $name::new(value).expect(concat!( - "invalid ", - stringify!($name), - " value" - )) - } - - /// Return the internal value as a `usize`. This is guaranteed to - /// never overflow `usize`. - #[inline] - pub const fn as_usize(&self) -> usize { - self.0.as_usize() - } - - /// Return the internal value as a `u64`. This is guaranteed to - /// never overflow. - #[inline] - pub const fn as_u64(&self) -> u64 { - self.0.as_u64() - } - - /// Return the internal value as a `u32`. This is guaranteed to - /// never overflow `u32`. - #[inline] - pub const fn as_u32(&self) -> u32 { - self.0.as_u32() - } - - /// Return the internal value as a `i32`. This is guaranteed to - /// never overflow an `i32`. - #[inline] - pub const fn as_i32(&self) -> i32 { - self.0.as_i32() - } - - /// Returns one more than this value as a usize. - /// - /// Since values represented by a "small index" have constraints - /// on their maximum value, adding `1` to it will always fit in a - /// `usize`, `u32` and a `i32`. - #[inline] - pub fn one_more(&self) -> usize { - self.0.one_more() - } - - /// Decode this value from the bytes given using the native endian - /// byte order for the current target. - /// - /// If the decoded integer is not representable as a small index - /// for the current target, then this returns an error. - #[inline] - pub fn from_ne_bytes(bytes: [u8; 4]) -> Result<$name, $err> { - SmallIndex::from_ne_bytes(bytes).map($name).map_err($err) - } - - /// Decode this value from the bytes given using the native endian - /// byte order for the current target. - /// - /// This is analogous to `new_unchecked` in that is does not check - /// whether the decoded integer is representable as a small index. - #[inline] - pub fn from_ne_bytes_unchecked(bytes: [u8; 4]) -> $name { - $name(SmallIndex::from_ne_bytes_unchecked(bytes)) - } - - /// Return the underlying integer as raw bytes in native endian - /// format. - #[inline] - pub fn to_ne_bytes(&self) -> [u8; 4] { - self.0.to_ne_bytes() - } - - /// Returns an iterator over all values from 0 up to and not - /// including the given length. - /// - /// If the given length exceeds this type's limit, then this - /// panics. - pub(crate) fn iter(len: usize) -> $iter { - $iter::new(len) - } - } - - // We write our own Debug impl so that we get things like PatternID(5) - // instead of PatternID(SmallIndex(5)). - impl core::fmt::Debug for $name { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - f.debug_tuple(stringify!($name)).field(&self.as_u32()).finish() - } - } - - impl<T> core::ops::Index<$name> for [T] { - type Output = T; - - #[inline] - fn index(&self, index: $name) -> &T { - &self[index.as_usize()] - } - } - - impl<T> core::ops::IndexMut<$name> for [T] { - #[inline] - fn index_mut(&mut self, index: $name) -> &mut T { - &mut self[index.as_usize()] - } - } - - impl<T> core::ops::Index<$name> for Vec<T> { - type Output = T; - - #[inline] - fn index(&self, index: $name) -> &T { - &self[index.as_usize()] - } - } - - impl<T> core::ops::IndexMut<$name> for Vec<T> { - #[inline] - fn index_mut(&mut self, index: $name) -> &mut T { - &mut self[index.as_usize()] - } - } - - impl From<SmallIndex> for $name { - fn from(index: SmallIndex) -> $name { - $name(index) - } - } - - impl From<u8> for $name { - fn from(value: u8) -> $name { - $name(SmallIndex::from(value)) - } - } - - impl TryFrom<u16> for $name { - type Error = $err; - - fn try_from(value: u16) -> Result<$name, $err> { - SmallIndex::try_from(value).map($name).map_err($err) - } - } - - impl TryFrom<u32> for $name { - type Error = $err; - - fn try_from(value: u32) -> Result<$name, $err> { - SmallIndex::try_from(value).map($name).map_err($err) - } - } - - impl TryFrom<u64> for $name { - type Error = $err; - - fn try_from(value: u64) -> Result<$name, $err> { - SmallIndex::try_from(value).map($name).map_err($err) - } - } - - impl TryFrom<usize> for $name { - type Error = $err; - - fn try_from(value: usize) -> Result<$name, $err> { - SmallIndex::try_from(value).map($name).map_err($err) - } - } - - /// This error occurs when an ID could not be constructed. - /// - /// This occurs when given an integer exceeding the maximum allowed - /// value. - /// - /// When the `std` feature is enabled, this implements the `Error` - /// trait. - #[derive(Clone, Debug, Eq, PartialEq)] - pub struct $err(SmallIndexError); - - impl $err { - /// Returns the value that could not be converted to an ID. - pub fn attempted(&self) -> u64 { - self.0.attempted() - } - } - - #[cfg(feature = "std")] - impl std::error::Error for $err {} - - impl core::fmt::Display for $err { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!( - f, - "failed to create {} from {:?}, which exceeds {:?}", - stringify!($name), - self.attempted(), - $name::MAX, - ) - } - } - - #[derive(Clone, Debug)] - pub(crate) struct $iter(SmallIndexIter); - - impl $iter { - fn new(len: usize) -> $iter { - assert!( - len <= $name::LIMIT, - "cannot create iterator for {} when number of \ - elements exceed {:?}", - stringify!($name), - $name::LIMIT, - ); - $iter(SmallIndexIter { rng: 0..len }) - } - } - - impl Iterator for $iter { - type Item = $name; - - fn next(&mut self) -> Option<$name> { - self.0.next().map($name) - } - } - - /// An iterator adapter that is like std::iter::Enumerate, but attaches - /// small index values instead. It requires `ExactSizeIterator`. At - /// construction, it ensures that the index of each element in the - /// iterator is representable in the corresponding small index type. - #[derive(Clone, Debug)] - pub(crate) struct $withiter<I> { - it: I, - ids: $iter, - } - - impl<I: Iterator + ExactSizeIterator> $withiter<I> { - fn new(it: I) -> $withiter<I> { - let ids = $name::iter(it.len()); - $withiter { it, ids } - } - } - - impl<I: Iterator + ExactSizeIterator> Iterator for $withiter<I> { - type Item = ($name, I::Item); - - fn next(&mut self) -> Option<($name, I::Item)> { - let item = self.it.next()?; - // Number of elements in this iterator must match, according - // to contract of ExactSizeIterator. - let id = self.ids.next().unwrap(); - Some((id, item)) - } - } - }; -} - -/// The identifier of a pattern in an Aho-Corasick automaton. -/// -/// It is represented by a `u32` even on 64-bit systems in order to conserve -/// space. Namely, on all targets, this type guarantees that its value will -/// fit in a `u32`, `i32`, `usize` and an `isize`. This means that on 16-bit -/// targets, for example, this type's maximum value will never overflow an -/// `isize`, which means it will never overflow a `i16` even though its -/// internal representation is still a `u32`. -/// -/// # Safety -/// -/// While a `PatternID` is meant to guarantee that its value fits into `usize` -/// without using as much space as a `usize` on all targets, callers must -/// not rely on this property for safety. Callers may choose to rely on this -/// property for correctness however. For example, creating a `StateID` with an -/// invalid value can be done in entirely safe code. This may in turn result in -/// panics or silent logical errors. -#[derive(Clone, Copy, Default, Eq, Hash, PartialEq, PartialOrd, Ord)] -#[repr(transparent)] -pub struct PatternID(SmallIndex); - -/// The identifier of a finite automaton state. -/// -/// It is represented by a `u32` even on 64-bit systems in order to conserve -/// space. Namely, on all targets, this type guarantees that its value will -/// fit in a `u32`, `i32`, `usize` and an `isize`. This means that on 16-bit -/// targets, for example, this type's maximum value will never overflow an -/// `isize`, which means it will never overflow a `i16` even though its -/// internal representation is still a `u32`. -/// -/// # Safety -/// -/// While a `StateID` is meant to guarantee that its value fits into `usize` -/// without using as much space as a `usize` on all targets, callers must -/// not rely on this property for safety. Callers may choose to rely on this -/// property for correctness however. For example, creating a `StateID` with an -/// invalid value can be done in entirely safe code. This may in turn result in -/// panics or silent logical errors. -#[derive(Clone, Copy, Default, Eq, Hash, PartialEq, PartialOrd, Ord)] -#[repr(transparent)] -pub struct StateID(SmallIndex); - -index_type_impls!(PatternID, PatternIDError, PatternIDIter, WithPatternIDIter); -index_type_impls!(StateID, StateIDError, StateIDIter, WithStateIDIter); - -/// A utility trait that defines a couple of adapters for making it convenient -/// to access indices as "small index" types. We require ExactSizeIterator so -/// that iterator construction can do a single check to make sure the index of -/// each element is representable by its small index type. -pub(crate) trait IteratorIndexExt: Iterator { - fn with_pattern_ids(self) -> WithPatternIDIter<Self> - where - Self: Sized + ExactSizeIterator, - { - WithPatternIDIter::new(self) - } - - fn with_state_ids(self) -> WithStateIDIter<Self> - where - Self: Sized + ExactSizeIterator, - { - WithStateIDIter::new(self) - } -} - -impl<I: Iterator> IteratorIndexExt for I {} diff --git a/vendor/aho-corasick/src/util/remapper.rs b/vendor/aho-corasick/src/util/remapper.rs deleted file mode 100644 index 7c47a082cdd221..00000000000000 --- a/vendor/aho-corasick/src/util/remapper.rs +++ /dev/null @@ -1,214 +0,0 @@ -use alloc::vec::Vec; - -use crate::{nfa::noncontiguous, util::primitives::StateID}; - -/// Remappable is a tightly coupled abstraction that facilitates remapping -/// state identifiers in DFAs. -/// -/// The main idea behind remapping state IDs is that DFAs often need to check -/// if a certain state is a "special" state of some kind (like a match state) -/// during a search. Since this is extremely perf critical code, we want this -/// check to be as fast as possible. Partitioning state IDs into, for example, -/// into "non-match" and "match" states means one can tell if a state is a -/// match state via a simple comparison of the state ID. -/// -/// The issue is that during the DFA construction process, it's not -/// particularly easy to partition the states. Instead, the simplest thing is -/// to often just do a pass over all of the states and shuffle them into their -/// desired partitionings. To do that, we need a mechanism for swapping states. -/// Hence, this abstraction. -/// -/// Normally, for such little code, I would just duplicate it. But this is a -/// key optimization and the implementation is a bit subtle. So the abstraction -/// is basically a ham-fisted attempt at DRY. The only place we use this is in -/// the dense and one-pass DFAs. -/// -/// See also src/dfa/special.rs for a more detailed explanation of how dense -/// DFAs are partitioned. -pub(crate) trait Remappable: core::fmt::Debug { - /// Return the total number of states. - fn state_len(&self) -> usize; - - /// Swap the states pointed to by the given IDs. The underlying finite - /// state machine should be mutated such that all of the transitions in - /// `id1` are now in the memory region where the transitions for `id2` - /// were, and all of the transitions in `id2` are now in the memory region - /// where the transitions for `id1` were. - /// - /// Essentially, this "moves" `id1` to `id2` and `id2` to `id1`. - /// - /// It is expected that, after calling this, the underlying state machine - /// will be left in an inconsistent state, since any other transitions - /// pointing to, e.g., `id1` need to be updated to point to `id2`, since - /// that's where `id1` moved to. - /// - /// In order to "fix" the underlying inconsistent state, a `Remapper` - /// should be used to guarantee that `remap` is called at the appropriate - /// time. - fn swap_states(&mut self, id1: StateID, id2: StateID); - - /// This must remap every single state ID in the underlying value according - /// to the function given. For example, in a DFA, this should remap every - /// transition and every starting state ID. - fn remap(&mut self, map: impl Fn(StateID) -> StateID); -} - -/// Remapper is an abstraction the manages the remapping of state IDs in a -/// finite state machine. This is useful when one wants to shuffle states into -/// different positions in the machine. -/// -/// One of the key complexities this manages is the ability to correctly move -/// one state multiple times. -/// -/// Once shuffling is complete, `remap` must be called, which will rewrite -/// all pertinent transitions to updated state IDs. Neglecting to call `remap` -/// will almost certainly result in a corrupt machine. -#[derive(Debug)] -pub(crate) struct Remapper { - /// A map from the index of a state to its pre-multiplied identifier. - /// - /// When a state is swapped with another, then their corresponding - /// locations in this map are also swapped. Thus, its new position will - /// still point to its old pre-multiplied StateID. - /// - /// While there is a bit more to it, this then allows us to rewrite the - /// state IDs in a DFA's transition table in a single pass. This is done - /// by iterating over every ID in this map, then iterating over each - /// transition for the state at that ID and re-mapping the transition from - /// `old_id` to `map[dfa.to_index(old_id)]`. That is, we find the position - /// in this map where `old_id` *started*, and set it to where it ended up - /// after all swaps have been completed. - map: Vec<StateID>, - /// A way to map indices to state IDs (and back). - idx: IndexMapper, -} - -impl Remapper { - /// Create a new remapper from the given remappable implementation. The - /// remapper can then be used to swap states. The remappable value given - /// here must the same one given to `swap` and `remap`. - /// - /// The given stride should be the stride of the transition table expressed - /// as a power of 2. This stride is used to map between state IDs and state - /// indices. If state IDs and state indices are equivalent, then provide - /// a `stride2` of `0`, which acts as an identity. - pub(crate) fn new(r: &impl Remappable, stride2: usize) -> Remapper { - let idx = IndexMapper { stride2 }; - let map = (0..r.state_len()).map(|i| idx.to_state_id(i)).collect(); - Remapper { map, idx } - } - - /// Swap two states. Once this is called, callers must follow through to - /// call `remap`, or else it's possible for the underlying remappable - /// value to be in a corrupt state. - pub(crate) fn swap( - &mut self, - r: &mut impl Remappable, - id1: StateID, - id2: StateID, - ) { - if id1 == id2 { - return; - } - r.swap_states(id1, id2); - self.map.swap(self.idx.to_index(id1), self.idx.to_index(id2)); - } - - /// Complete the remapping process by rewriting all state IDs in the - /// remappable value according to the swaps performed. - pub(crate) fn remap(mut self, r: &mut impl Remappable) { - // Update the map to account for states that have been swapped - // multiple times. For example, if (A, C) and (C, G) are swapped, then - // transitions previously pointing to A should now point to G. But if - // we don't update our map, they will erroneously be set to C. All we - // do is follow the swaps in our map until we see our original state - // ID. - // - // The intuition here is to think about how changes are made to the - // map: only through pairwise swaps. That means that starting at any - // given state, it is always possible to find the loop back to that - // state by following the swaps represented in the map (which might be - // 0 swaps). - // - // We are also careful to clone the map before starting in order to - // freeze it. We use the frozen map to find our loops, since we need to - // update our map as well. Without freezing it, our updates could break - // the loops referenced above and produce incorrect results. - let oldmap = self.map.clone(); - for i in 0..r.state_len() { - let cur_id = self.idx.to_state_id(i); - let mut new_id = oldmap[i]; - if cur_id == new_id { - continue; - } - loop { - let id = oldmap[self.idx.to_index(new_id)]; - if cur_id == id { - self.map[i] = new_id; - break; - } - new_id = id; - } - } - r.remap(|sid| self.map[self.idx.to_index(sid)]); - } -} - -/// A simple type for mapping between state indices and state IDs. -/// -/// The reason why this exists is because state IDs are "premultiplied" in a -/// DFA. That is, in order to get to the transitions for a particular state, -/// one need only use the state ID as-is, instead of having to multiply it by -/// transition table's stride. -/// -/// The downside of this is that it's inconvenient to map between state IDs -/// using a dense map, e.g., Vec<StateID>. That's because state IDs look like -/// `0`, `stride`, `2*stride`, `3*stride`, etc., instead of `0`, `1`, `2`, `3`, -/// etc. -/// -/// Since our state IDs are premultiplied, we can convert back-and-forth -/// between IDs and indices by simply unmultiplying the IDs and multiplying the -/// indices. -/// -/// Note that for a sparse NFA, state IDs and indices are equivalent. In this -/// case, we set the stride of the index mapped to be `0`, which acts as an -/// identity. -#[derive(Debug)] -struct IndexMapper { - /// The power of 2 corresponding to the stride of the corresponding - /// transition table. 'id >> stride2' de-multiplies an ID while 'index << - /// stride2' pre-multiplies an index to an ID. - stride2: usize, -} - -impl IndexMapper { - /// Convert a state ID to a state index. - fn to_index(&self, id: StateID) -> usize { - id.as_usize() >> self.stride2 - } - - /// Convert a state index to a state ID. - fn to_state_id(&self, index: usize) -> StateID { - // CORRECTNESS: If the given index is not valid, then it is not - // required for this to panic or return a valid state ID. We'll "just" - // wind up with panics or silent logic errors at some other point. But - // this is OK because if Remappable::state_len is correct and so is - // 'to_index', then all inputs to 'to_state_id' should be valid indices - // and thus transform into valid state IDs. - StateID::new_unchecked(index << self.stride2) - } -} - -impl Remappable for noncontiguous::NFA { - fn state_len(&self) -> usize { - noncontiguous::NFA::states(self).len() - } - - fn swap_states(&mut self, id1: StateID, id2: StateID) { - noncontiguous::NFA::swap_states(self, id1, id2) - } - - fn remap(&mut self, map: impl Fn(StateID) -> StateID) { - noncontiguous::NFA::remap(self, map) - } -} diff --git a/vendor/aho-corasick/src/util/search.rs b/vendor/aho-corasick/src/util/search.rs deleted file mode 100644 index 59b7035e1ffd17..00000000000000 --- a/vendor/aho-corasick/src/util/search.rs +++ /dev/null @@ -1,1148 +0,0 @@ -use core::ops::{Range, RangeBounds}; - -use crate::util::primitives::PatternID; - -/// The configuration and the haystack to use for an Aho-Corasick search. -/// -/// When executing a search, there are a few parameters one might want to -/// configure: -/// -/// * The haystack to search, provided to the [`Input::new`] constructor. This -/// is the only required parameter. -/// * The span _within_ the haystack to limit a search to. (The default -/// is the entire haystack.) This is configured via [`Input::span`] or -/// [`Input::range`]. -/// * Whether to run an unanchored (matches can occur anywhere after the -/// start of the search) or anchored (matches can only occur beginning at -/// the start of the search) search. Unanchored search is the default. This is -/// configured via [`Input::anchored`]. -/// * Whether to quit the search as soon as a match has been found, regardless -/// of the [`MatchKind`] that the searcher was built with. This is configured -/// via [`Input::earliest`]. -/// -/// For most cases, the defaults for all optional parameters are appropriate. -/// The utility of this type is that it keeps the default or common case simple -/// while permitting tweaking parameters in more niche use cases while reusing -/// the same search APIs. -/// -/// # Valid bounds and search termination -/// -/// An `Input` permits setting the bounds of a search via either -/// [`Input::span`] or [`Input::range`]. The bounds set must be valid, or -/// else a panic will occur. Bounds are valid if and only if: -/// -/// * The bounds represent a valid range into the input's haystack. -/// * **or** the end bound is a valid ending bound for the haystack *and* -/// the start bound is exactly one greater than the end bound. -/// -/// In the latter case, [`Input::is_done`] will return true and indicates any -/// search receiving such an input should immediately return with no match. -/// -/// Other than representing "search is complete," the `Input::span` and -/// `Input::range` APIs are never necessary. Instead, callers can slice the -/// haystack instead, e.g., with `&haystack[start..end]`. With that said, they -/// can be more convenient than slicing because the match positions reported -/// when using `Input::span` or `Input::range` are in terms of the original -/// haystack. If you instead use `&haystack[start..end]`, then you'll need to -/// add `start` to any match position returned in order for it to be a correct -/// index into `haystack`. -/// -/// # Example: `&str` and `&[u8]` automatically convert to an `Input` -/// -/// There is a `From<&T> for Input` implementation for all `T: AsRef<[u8]>`. -/// Additionally, the [`AhoCorasick`](crate::AhoCorasick) search APIs accept -/// a `Into<Input>`. These two things combined together mean you can provide -/// things like `&str` and `&[u8]` to search APIs when the defaults are -/// suitable, but also an `Input` when they're not. For example: -/// -/// ``` -/// use aho_corasick::{AhoCorasick, Anchored, Input, Match, StartKind}; -/// -/// // Build a searcher that supports both unanchored and anchored modes. -/// let ac = AhoCorasick::builder() -/// .start_kind(StartKind::Both) -/// .build(&["abcd", "b"]) -/// .unwrap(); -/// let haystack = "abcd"; -/// -/// // A search using default parameters is unanchored. With standard -/// // semantics, this finds `b` first. -/// assert_eq!( -/// Some(Match::must(1, 1..2)), -/// ac.find(haystack), -/// ); -/// // Using the same 'find' routine, we can provide an 'Input' explicitly -/// // that is configured to do an anchored search. Since 'b' doesn't start -/// // at the beginning of the search, it is not reported as a match. -/// assert_eq!( -/// Some(Match::must(0, 0..4)), -/// ac.find(Input::new(haystack).anchored(Anchored::Yes)), -/// ); -/// ``` -#[derive(Clone)] -pub struct Input<'h> { - haystack: &'h [u8], - span: Span, - anchored: Anchored, - earliest: bool, -} - -impl<'h> Input<'h> { - /// Create a new search configuration for the given haystack. - #[inline] - pub fn new<H: ?Sized + AsRef<[u8]>>(haystack: &'h H) -> Input<'h> { - Input { - haystack: haystack.as_ref(), - span: Span { start: 0, end: haystack.as_ref().len() }, - anchored: Anchored::No, - earliest: false, - } - } - - /// Set the span for this search. - /// - /// This routine is generic over how a span is provided. While - /// a [`Span`] may be given directly, one may also provide a - /// `std::ops::Range<usize>`. To provide anything supported by range - /// syntax, use the [`Input::range`] method. - /// - /// The default span is the entire haystack. - /// - /// Note that [`Input::range`] overrides this method and vice versa. - /// - /// # Panics - /// - /// This panics if the given span does not correspond to valid bounds in - /// the haystack or the termination of a search. - /// - /// # Example - /// - /// This example shows how the span of the search can impact whether a - /// match is reported or not. - /// - /// ``` - /// use aho_corasick::{AhoCorasick, Input, MatchKind}; - /// - /// let patterns = &["b", "abcd", "abc"]; - /// let haystack = "abcd"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// let input = Input::new(haystack).span(0..3); - /// let mat = ac.try_find(input)?.expect("should have a match"); - /// // Without the span stopping the search early, 'abcd' would be reported - /// // because it is the correct leftmost-first match. - /// assert_eq!("abc", &haystack[mat.span()]); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - #[inline] - pub fn span<S: Into<Span>>(mut self, span: S) -> Input<'h> { - self.set_span(span); - self - } - - /// Like `Input::span`, but accepts any range instead. - /// - /// The default range is the entire haystack. - /// - /// Note that [`Input::span`] overrides this method and vice versa. - /// - /// # Panics - /// - /// This routine will panic if the given range could not be converted - /// to a valid [`Range`]. For example, this would panic when given - /// `0..=usize::MAX` since it cannot be represented using a half-open - /// interval in terms of `usize`. - /// - /// This routine also panics if the given range does not correspond to - /// valid bounds in the haystack or the termination of a search. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::Input; - /// - /// let input = Input::new("foobar"); - /// assert_eq!(0..6, input.get_range()); - /// - /// let input = Input::new("foobar").range(2..=4); - /// assert_eq!(2..5, input.get_range()); - /// ``` - #[inline] - pub fn range<R: RangeBounds<usize>>(mut self, range: R) -> Input<'h> { - self.set_range(range); - self - } - - /// Sets the anchor mode of a search. - /// - /// When a search is anchored (via [`Anchored::Yes`]), a match must begin - /// at the start of a search. When a search is not anchored (that's - /// [`Anchored::No`]), searchers will look for a match anywhere in the - /// haystack. - /// - /// By default, the anchored mode is [`Anchored::No`]. - /// - /// # Support for anchored searches - /// - /// Anchored or unanchored searches might not always be available, - /// depending on the type of searcher used and its configuration: - /// - /// * [`noncontiguous::NFA`](crate::nfa::noncontiguous::NFA) always - /// supports both unanchored and anchored searches. - /// * [`contiguous::NFA`](crate::nfa::contiguous::NFA) always supports both - /// unanchored and anchored searches. - /// * [`dfa::DFA`](crate::dfa::DFA) supports only unanchored - /// searches by default. - /// [`dfa::Builder::start_kind`](crate::dfa::Builder::start_kind) can - /// be used to change the default to supporting both kinds of searches - /// or even just anchored searches. - /// * [`AhoCorasick`](crate::AhoCorasick) inherits the same setup as a - /// `DFA`. Namely, it only supports unanchored searches by default, but - /// [`AhoCorasickBuilder::start_kind`](crate::AhoCorasickBuilder::start_kind) - /// can change this. - /// - /// If you try to execute a search using a `try_` ("fallible") method with - /// an unsupported anchor mode, then an error will be returned. For calls - /// to infallible search methods, a panic will result. - /// - /// # Example - /// - /// This demonstrates the differences between an anchored search and - /// an unanchored search. Notice that we build our `AhoCorasick` searcher - /// with [`StartKind::Both`] so that it supports both unanchored and - /// anchored searches simultaneously. - /// - /// ``` - /// use aho_corasick::{ - /// AhoCorasick, Anchored, Input, MatchKind, StartKind, - /// }; - /// - /// let patterns = &["bcd"]; - /// let haystack = "abcd"; - /// - /// let ac = AhoCorasick::builder() - /// .start_kind(StartKind::Both) - /// .build(patterns) - /// .unwrap(); - /// - /// // Note that 'Anchored::No' is the default, so it doesn't need to - /// // be explicitly specified here. - /// let input = Input::new(haystack); - /// let mat = ac.try_find(input)?.expect("should have a match"); - /// assert_eq!("bcd", &haystack[mat.span()]); - /// - /// // While 'bcd' occurs in the haystack, it does not begin where our - /// // search begins, so no match is found. - /// let input = Input::new(haystack).anchored(Anchored::Yes); - /// assert_eq!(None, ac.try_find(input)?); - /// - /// // However, if we start our search where 'bcd' starts, then we will - /// // find a match. - /// let input = Input::new(haystack).range(1..).anchored(Anchored::Yes); - /// let mat = ac.try_find(input)?.expect("should have a match"); - /// assert_eq!("bcd", &haystack[mat.span()]); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - #[inline] - pub fn anchored(mut self, mode: Anchored) -> Input<'h> { - self.set_anchored(mode); - self - } - - /// Whether to execute an "earliest" search or not. - /// - /// When running a non-overlapping search, an "earliest" search will - /// return the match location as early as possible. For example, given - /// the patterns `abc` and `b`, and a haystack of `abc`, a normal - /// leftmost-first search will return `abc` as a match. But an "earliest" - /// search will return as soon as it is known that a match occurs, which - /// happens once `b` is seen. - /// - /// Note that when using [`MatchKind::Standard`], the "earliest" option - /// has no effect since standard semantics are already "earliest." Note - /// also that this has no effect in overlapping searches, since overlapping - /// searches also use standard semantics and report all possible matches. - /// - /// This is disabled by default. - /// - /// # Example - /// - /// This example shows the difference between "earliest" searching and - /// normal leftmost searching. - /// - /// ``` - /// use aho_corasick::{AhoCorasick, Anchored, Input, MatchKind, StartKind}; - /// - /// let patterns = &["abc", "b"]; - /// let haystack = "abc"; - /// - /// let ac = AhoCorasick::builder() - /// .match_kind(MatchKind::LeftmostFirst) - /// .build(patterns) - /// .unwrap(); - /// - /// // The normal leftmost-first match. - /// let input = Input::new(haystack); - /// let mat = ac.try_find(input)?.expect("should have a match"); - /// assert_eq!("abc", &haystack[mat.span()]); - /// - /// // The "earliest" possible match, even if it isn't leftmost-first. - /// let input = Input::new(haystack).earliest(true); - /// let mat = ac.try_find(input)?.expect("should have a match"); - /// assert_eq!("b", &haystack[mat.span()]); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - #[inline] - pub fn earliest(mut self, yes: bool) -> Input<'h> { - self.set_earliest(yes); - self - } - - /// Set the span for this search configuration. - /// - /// This is like the [`Input::span`] method, except this mutates the - /// span in place. - /// - /// This routine is generic over how a span is provided. While - /// a [`Span`] may be given directly, one may also provide a - /// `std::ops::Range<usize>`. - /// - /// # Panics - /// - /// This panics if the given span does not correspond to valid bounds in - /// the haystack or the termination of a search. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::Input; - /// - /// let mut input = Input::new("foobar"); - /// assert_eq!(0..6, input.get_range()); - /// input.set_span(2..4); - /// assert_eq!(2..4, input.get_range()); - /// ``` - #[inline] - pub fn set_span<S: Into<Span>>(&mut self, span: S) { - let span = span.into(); - assert!( - span.end <= self.haystack.len() - && span.start <= span.end.wrapping_add(1), - "invalid span {:?} for haystack of length {}", - span, - self.haystack.len(), - ); - self.span = span; - } - - /// Set the span for this search configuration given any range. - /// - /// This is like the [`Input::range`] method, except this mutates the - /// span in place. - /// - /// # Panics - /// - /// This routine will panic if the given range could not be converted - /// to a valid [`Range`]. For example, this would panic when given - /// `0..=usize::MAX` since it cannot be represented using a half-open - /// interval in terms of `usize`. - /// - /// This routine also panics if the given range does not correspond to - /// valid bounds in the haystack or the termination of a search. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::Input; - /// - /// let mut input = Input::new("foobar"); - /// assert_eq!(0..6, input.get_range()); - /// input.set_range(2..=4); - /// assert_eq!(2..5, input.get_range()); - /// ``` - #[inline] - pub fn set_range<R: RangeBounds<usize>>(&mut self, range: R) { - use core::ops::Bound; - - // It's a little weird to convert ranges into spans, and then spans - // back into ranges when we actually slice the haystack. Because - // of that process, we always represent everything as a half-open - // internal. Therefore, handling things like m..=n is a little awkward. - let start = match range.start_bound() { - Bound::Included(&i) => i, - // Can this case ever happen? Range syntax doesn't support it... - Bound::Excluded(&i) => i.checked_add(1).unwrap(), - Bound::Unbounded => 0, - }; - let end = match range.end_bound() { - Bound::Included(&i) => i.checked_add(1).unwrap(), - Bound::Excluded(&i) => i, - Bound::Unbounded => self.haystack().len(), - }; - self.set_span(Span { start, end }); - } - - /// Set the starting offset for the span for this search configuration. - /// - /// This is a convenience routine for only mutating the start of a span - /// without having to set the entire span. - /// - /// # Panics - /// - /// This panics if the given span does not correspond to valid bounds in - /// the haystack or the termination of a search. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::Input; - /// - /// let mut input = Input::new("foobar"); - /// assert_eq!(0..6, input.get_range()); - /// input.set_start(5); - /// assert_eq!(5..6, input.get_range()); - /// ``` - #[inline] - pub fn set_start(&mut self, start: usize) { - self.set_span(Span { start, ..self.get_span() }); - } - - /// Set the ending offset for the span for this search configuration. - /// - /// This is a convenience routine for only mutating the end of a span - /// without having to set the entire span. - /// - /// # Panics - /// - /// This panics if the given span does not correspond to valid bounds in - /// the haystack or the termination of a search. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::Input; - /// - /// let mut input = Input::new("foobar"); - /// assert_eq!(0..6, input.get_range()); - /// input.set_end(5); - /// assert_eq!(0..5, input.get_range()); - /// ``` - #[inline] - pub fn set_end(&mut self, end: usize) { - self.set_span(Span { end, ..self.get_span() }); - } - - /// Set the anchor mode of a search. - /// - /// This is like [`Input::anchored`], except it mutates the search - /// configuration in place. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::{Anchored, Input}; - /// - /// let mut input = Input::new("foobar"); - /// assert_eq!(Anchored::No, input.get_anchored()); - /// - /// input.set_anchored(Anchored::Yes); - /// assert_eq!(Anchored::Yes, input.get_anchored()); - /// ``` - #[inline] - pub fn set_anchored(&mut self, mode: Anchored) { - self.anchored = mode; - } - - /// Set whether the search should execute in "earliest" mode or not. - /// - /// This is like [`Input::earliest`], except it mutates the search - /// configuration in place. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::Input; - /// - /// let mut input = Input::new("foobar"); - /// assert!(!input.get_earliest()); - /// input.set_earliest(true); - /// assert!(input.get_earliest()); - /// ``` - #[inline] - pub fn set_earliest(&mut self, yes: bool) { - self.earliest = yes; - } - - /// Return a borrow of the underlying haystack as a slice of bytes. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::Input; - /// - /// let input = Input::new("foobar"); - /// assert_eq!(b"foobar", input.haystack()); - /// ``` - #[inline] - pub fn haystack(&self) -> &[u8] { - self.haystack - } - - /// Return the start position of this search. - /// - /// This is a convenience routine for `search.get_span().start()`. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::Input; - /// - /// let input = Input::new("foobar"); - /// assert_eq!(0, input.start()); - /// - /// let input = Input::new("foobar").span(2..4); - /// assert_eq!(2, input.start()); - /// ``` - #[inline] - pub fn start(&self) -> usize { - self.get_span().start - } - - /// Return the end position of this search. - /// - /// This is a convenience routine for `search.get_span().end()`. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::Input; - /// - /// let input = Input::new("foobar"); - /// assert_eq!(6, input.end()); - /// - /// let input = Input::new("foobar").span(2..4); - /// assert_eq!(4, input.end()); - /// ``` - #[inline] - pub fn end(&self) -> usize { - self.get_span().end - } - - /// Return the span for this search configuration. - /// - /// If one was not explicitly set, then the span corresponds to the entire - /// range of the haystack. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::{Input, Span}; - /// - /// let input = Input::new("foobar"); - /// assert_eq!(Span { start: 0, end: 6 }, input.get_span()); - /// ``` - #[inline] - pub fn get_span(&self) -> Span { - self.span - } - - /// Return the span as a range for this search configuration. - /// - /// If one was not explicitly set, then the span corresponds to the entire - /// range of the haystack. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::Input; - /// - /// let input = Input::new("foobar"); - /// assert_eq!(0..6, input.get_range()); - /// ``` - #[inline] - pub fn get_range(&self) -> Range<usize> { - self.get_span().range() - } - - /// Return the anchored mode for this search configuration. - /// - /// If no anchored mode was set, then it defaults to [`Anchored::No`]. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::{Anchored, Input}; - /// - /// let mut input = Input::new("foobar"); - /// assert_eq!(Anchored::No, input.get_anchored()); - /// - /// input.set_anchored(Anchored::Yes); - /// assert_eq!(Anchored::Yes, input.get_anchored()); - /// ``` - #[inline] - pub fn get_anchored(&self) -> Anchored { - self.anchored - } - - /// Return whether this search should execute in "earliest" mode. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::Input; - /// - /// let input = Input::new("foobar"); - /// assert!(!input.get_earliest()); - /// ``` - #[inline] - pub fn get_earliest(&self) -> bool { - self.earliest - } - - /// Return true if this input has been exhausted, which in turn means all - /// subsequent searches will return no matches. - /// - /// This occurs precisely when the start position of this search is greater - /// than the end position of the search. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::Input; - /// - /// let mut input = Input::new("foobar"); - /// assert!(!input.is_done()); - /// input.set_start(6); - /// assert!(!input.is_done()); - /// input.set_start(7); - /// assert!(input.is_done()); - /// ``` - #[inline] - pub fn is_done(&self) -> bool { - self.get_span().start > self.get_span().end - } -} - -impl<'h> core::fmt::Debug for Input<'h> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let mut fmter = f.debug_struct("Input"); - match core::str::from_utf8(self.haystack()) { - Ok(nice) => fmter.field("haystack", &nice), - Err(_) => fmter.field("haystack", &self.haystack()), - } - .field("span", &self.span) - .field("anchored", &self.anchored) - .field("earliest", &self.earliest) - .finish() - } -} - -impl<'h, H: ?Sized + AsRef<[u8]>> From<&'h H> for Input<'h> { - #[inline] - fn from(haystack: &'h H) -> Input<'h> { - Input::new(haystack) - } -} - -/// A representation of a range in a haystack. -/// -/// A span corresponds to the starting and ending _byte offsets_ of a -/// contiguous region of bytes. The starting offset is inclusive while the -/// ending offset is exclusive. That is, a span is a half-open interval. -/// -/// A span is used to report the offsets of a match, but it is also used to -/// convey which region of a haystack should be searched via routines like -/// [`Input::span`]. -/// -/// This is basically equivalent to a `std::ops::Range<usize>`, except this -/// type implements `Copy` which makes it more ergonomic to use in the context -/// of this crate. Indeed, `Span` exists only because `Range<usize>` does -/// not implement `Copy`. Like a range, this implements `Index` for `[u8]` -/// and `str`, and `IndexMut` for `[u8]`. For convenience, this also impls -/// `From<Range>`, which means things like `Span::from(5..10)` work. -/// -/// There are no constraints on the values of a span. It is, for example, legal -/// to create a span where `start > end`. -#[derive(Clone, Copy, Eq, Hash, PartialEq)] -pub struct Span { - /// The start offset of the span, inclusive. - pub start: usize, - /// The end offset of the span, exclusive. - pub end: usize, -} - -impl Span { - /// Returns this span as a range. - #[inline] - pub fn range(&self) -> Range<usize> { - Range::from(*self) - } - - /// Returns true when this span is empty. That is, when `start >= end`. - #[inline] - pub fn is_empty(&self) -> bool { - self.start >= self.end - } - - /// Returns the length of this span. - /// - /// This returns `0` in precisely the cases that `is_empty` returns `true`. - #[inline] - pub fn len(&self) -> usize { - self.end.saturating_sub(self.start) - } - - /// Returns true when the given offset is contained within this span. - /// - /// Note that an empty span contains no offsets and will always return - /// false. - #[inline] - pub fn contains(&self, offset: usize) -> bool { - !self.is_empty() && self.start <= offset && offset <= self.end - } - - /// Returns a new span with `offset` added to this span's `start` and `end` - /// values. - #[inline] - pub fn offset(&self, offset: usize) -> Span { - Span { start: self.start + offset, end: self.end + offset } - } -} - -impl core::fmt::Debug for Span { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "{}..{}", self.start, self.end) - } -} - -impl core::ops::Index<Span> for [u8] { - type Output = [u8]; - - #[inline] - fn index(&self, index: Span) -> &[u8] { - &self[index.range()] - } -} - -impl core::ops::IndexMut<Span> for [u8] { - #[inline] - fn index_mut(&mut self, index: Span) -> &mut [u8] { - &mut self[index.range()] - } -} - -impl core::ops::Index<Span> for str { - type Output = str; - - #[inline] - fn index(&self, index: Span) -> &str { - &self[index.range()] - } -} - -impl From<Range<usize>> for Span { - #[inline] - fn from(range: Range<usize>) -> Span { - Span { start: range.start, end: range.end } - } -} - -impl From<Span> for Range<usize> { - #[inline] - fn from(span: Span) -> Range<usize> { - Range { start: span.start, end: span.end } - } -} - -impl PartialEq<Range<usize>> for Span { - #[inline] - fn eq(&self, range: &Range<usize>) -> bool { - self.start == range.start && self.end == range.end - } -} - -impl PartialEq<Span> for Range<usize> { - #[inline] - fn eq(&self, span: &Span) -> bool { - self.start == span.start && self.end == span.end - } -} - -/// The type of anchored search to perform. -/// -/// If an Aho-Corasick searcher does not support the anchored mode selected, -/// then the search will return an error or panic, depending on whether a -/// fallible or an infallible routine was called. -#[non_exhaustive] -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum Anchored { - /// Run an unanchored search. This means a match may occur anywhere at or - /// after the start position of the search up until the end position of the - /// search. - No, - /// Run an anchored search. This means that a match must begin at the start - /// position of the search and end before the end position of the search. - Yes, -} - -impl Anchored { - /// Returns true if and only if this anchor mode corresponds to an anchored - /// search. - /// - /// # Example - /// - /// ``` - /// use aho_corasick::Anchored; - /// - /// assert!(!Anchored::No.is_anchored()); - /// assert!(Anchored::Yes.is_anchored()); - /// ``` - #[inline] - pub fn is_anchored(&self) -> bool { - matches!(*self, Anchored::Yes) - } -} - -/// A representation of a match reported by an Aho-Corasick searcher. -/// -/// A match has two essential pieces of information: the [`PatternID`] that -/// matches, and the [`Span`] of the match in a haystack. -/// -/// The pattern is identified by an ID, which corresponds to its position -/// (starting from `0`) relative to other patterns used to construct the -/// corresponding searcher. If only a single pattern is provided, then all -/// matches are guaranteed to have a pattern ID of `0`. -/// -/// Every match reported by a searcher guarantees that its span has its start -/// offset as less than or equal to its end offset. -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] -pub struct Match { - /// The pattern ID. - pattern: PatternID, - /// The underlying match span. - span: Span, -} - -impl Match { - /// Create a new match from a pattern ID and a span. - /// - /// This constructor is generic over how a span is provided. While - /// a [`Span`] may be given directly, one may also provide a - /// `std::ops::Range<usize>`. - /// - /// # Panics - /// - /// This panics if `end < start`. - /// - /// # Example - /// - /// This shows how to create a match for the first pattern in an - /// Aho-Corasick searcher using convenient range syntax. - /// - /// ``` - /// use aho_corasick::{Match, PatternID}; - /// - /// let m = Match::new(PatternID::ZERO, 5..10); - /// assert_eq!(0, m.pattern().as_usize()); - /// assert_eq!(5, m.start()); - /// assert_eq!(10, m.end()); - /// ``` - #[inline] - pub fn new<S: Into<Span>>(pattern: PatternID, span: S) -> Match { - let span = span.into(); - assert!(span.start <= span.end, "invalid match span"); - Match { pattern, span } - } - - /// Create a new match from a pattern ID and a byte offset span. - /// - /// This constructor is generic over how a span is provided. While - /// a [`Span`] may be given directly, one may also provide a - /// `std::ops::Range<usize>`. - /// - /// This is like [`Match::new`], but accepts a `usize` instead of a - /// [`PatternID`]. This panics if the given `usize` is not representable - /// as a `PatternID`. - /// - /// # Panics - /// - /// This panics if `end < start` or if `pattern > PatternID::MAX`. - /// - /// # Example - /// - /// This shows how to create a match for the third pattern in an - /// Aho-Corasick searcher using convenient range syntax. - /// - /// ``` - /// use aho_corasick::Match; - /// - /// let m = Match::must(3, 5..10); - /// assert_eq!(3, m.pattern().as_usize()); - /// assert_eq!(5, m.start()); - /// assert_eq!(10, m.end()); - /// ``` - #[inline] - pub fn must<S: Into<Span>>(pattern: usize, span: S) -> Match { - Match::new(PatternID::must(pattern), span) - } - - /// Returns the ID of the pattern that matched. - /// - /// The ID of a pattern is derived from the position in which it was - /// originally inserted into the corresponding searcher. The first pattern - /// has identifier `0`, and each subsequent pattern is `1`, `2` and so on. - #[inline] - pub fn pattern(&self) -> PatternID { - self.pattern - } - - /// The starting position of the match. - /// - /// This is a convenience routine for `Match::span().start`. - #[inline] - pub fn start(&self) -> usize { - self.span().start - } - - /// The ending position of the match. - /// - /// This is a convenience routine for `Match::span().end`. - #[inline] - pub fn end(&self) -> usize { - self.span().end - } - - /// Returns the match span as a range. - /// - /// This is a convenience routine for `Match::span().range()`. - #[inline] - pub fn range(&self) -> core::ops::Range<usize> { - self.span().range() - } - - /// Returns the span for this match. - #[inline] - pub fn span(&self) -> Span { - self.span - } - - /// Returns true when the span in this match is empty. - /// - /// An empty match can only be returned when empty pattern is in the - /// Aho-Corasick searcher. - #[inline] - pub fn is_empty(&self) -> bool { - self.span().is_empty() - } - - /// Returns the length of this match. - /// - /// This returns `0` in precisely the cases that `is_empty` returns `true`. - #[inline] - pub fn len(&self) -> usize { - self.span().len() - } - - /// Returns a new match with `offset` added to its span's `start` and `end` - /// values. - #[inline] - pub fn offset(&self, offset: usize) -> Match { - Match { - pattern: self.pattern, - span: Span { - start: self.start() + offset, - end: self.end() + offset, - }, - } - } -} - -/// A knob for controlling the match semantics of an Aho-Corasick automaton. -/// -/// There are two generally different ways that Aho-Corasick automatons can -/// report matches. The first way is the "standard" approach that results from -/// implementing most textbook explanations of Aho-Corasick. The second way is -/// to report only the leftmost non-overlapping matches. The leftmost approach -/// is in turn split into two different ways of resolving ambiguous matches: -/// leftmost-first and leftmost-longest. -/// -/// The `Standard` match kind is the default and is the only one that supports -/// overlapping matches and stream searching. (Trying to find overlapping or -/// streaming matches using leftmost match semantics will result in an error in -/// fallible APIs and a panic when using infallibe APIs.) The `Standard` match -/// kind will report matches as they are seen. When searching for overlapping -/// matches, then all possible matches are reported. When searching for -/// non-overlapping matches, the first match seen is reported. For example, for -/// non-overlapping matches, given the patterns `abcd` and `b` and the haystack -/// `abcdef`, only a match for `b` is reported since it is detected first. The -/// `abcd` match is never reported since it overlaps with the `b` match. -/// -/// In contrast, the leftmost match kind always prefers the leftmost match -/// among all possible matches. Given the same example as above with `abcd` and -/// `b` as patterns and `abcdef` as the haystack, the leftmost match is `abcd` -/// since it begins before the `b` match, even though the `b` match is detected -/// before the `abcd` match. In this case, the `b` match is not reported at all -/// since it overlaps with the `abcd` match. -/// -/// The difference between leftmost-first and leftmost-longest is in how they -/// resolve ambiguous matches when there are multiple leftmost matches to -/// choose from. Leftmost-first always chooses the pattern that was provided -/// earliest, where as leftmost-longest always chooses the longest matching -/// pattern. For example, given the patterns `a` and `ab` and the subject -/// string `ab`, the leftmost-first match is `a` but the leftmost-longest match -/// is `ab`. Conversely, if the patterns were given in reverse order, i.e., -/// `ab` and `a`, then both the leftmost-first and leftmost-longest matches -/// would be `ab`. Stated differently, the leftmost-first match depends on the -/// order in which the patterns were given to the Aho-Corasick automaton. -/// Because of that, when leftmost-first matching is used, if a pattern `A` -/// that appears before a pattern `B` is a prefix of `B`, then it is impossible -/// to ever observe a match of `B`. -/// -/// If you're not sure which match kind to pick, then stick with the standard -/// kind, which is the default. In particular, if you need overlapping or -/// streaming matches, then you _must_ use the standard kind. The leftmost -/// kinds are useful in specific circumstances. For example, leftmost-first can -/// be very useful as a way to implement match priority based on the order of -/// patterns given and leftmost-longest can be useful for dictionary searching -/// such that only the longest matching words are reported. -/// -/// # Relationship with regular expression alternations -/// -/// Understanding match semantics can be a little tricky, and one easy way -/// to conceptualize non-overlapping matches from an Aho-Corasick automaton -/// is to think about them as a simple alternation of literals in a regular -/// expression. For example, let's say we wanted to match the strings -/// `Sam` and `Samwise`, which would turn into the regex `Sam|Samwise`. It -/// turns out that regular expression engines have two different ways of -/// matching this alternation. The first way, leftmost-longest, is commonly -/// found in POSIX compatible implementations of regular expressions (such as -/// `grep`). The second way, leftmost-first, is commonly found in backtracking -/// implementations such as Perl. (Some regex engines, such as RE2 and Rust's -/// regex engine do not use backtracking, but still implement leftmost-first -/// semantics in an effort to match the behavior of dominant backtracking -/// regex engines such as those found in Perl, Ruby, Python, Javascript and -/// PHP.) -/// -/// That is, when matching `Sam|Samwise` against `Samwise`, a POSIX regex -/// will match `Samwise` because it is the longest possible match, but a -/// Perl-like regex will match `Sam` since it appears earlier in the -/// alternation. Indeed, the regex `Sam|Samwise` in a Perl-like regex engine -/// will never match `Samwise` since `Sam` will always have higher priority. -/// Conversely, matching the regex `Samwise|Sam` against `Samwise` will lead to -/// a match of `Samwise` in both POSIX and Perl-like regexes since `Samwise` is -/// still longest match, but it also appears earlier than `Sam`. -/// -/// The "standard" match semantics of Aho-Corasick generally don't correspond -/// to the match semantics of any large group of regex implementations, so -/// there's no direct analogy that can be made here. Standard match semantics -/// are generally useful for overlapping matches, or if you just want to see -/// matches as they are detected. -/// -/// The main conclusion to draw from this section is that the match semantics -/// can be tweaked to precisely match either Perl-like regex alternations or -/// POSIX regex alternations. -#[non_exhaustive] -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum MatchKind { - /// Use standard match semantics, which support overlapping matches. When - /// used with non-overlapping matches, matches are reported as they are - /// seen. - Standard, - /// Use leftmost-first match semantics, which reports leftmost matches. - /// When there are multiple possible leftmost matches, the match - /// corresponding to the pattern that appeared earlier when constructing - /// the automaton is reported. - /// - /// This does **not** support overlapping matches or stream searching. If - /// this match kind is used, attempting to find overlapping matches or - /// stream matches will fail. - LeftmostFirst, - /// Use leftmost-longest match semantics, which reports leftmost matches. - /// When there are multiple possible leftmost matches, the longest match - /// is chosen. - /// - /// This does **not** support overlapping matches or stream searching. If - /// this match kind is used, attempting to find overlapping matches or - /// stream matches will fail. - LeftmostLongest, -} - -/// The default match kind is `MatchKind::Standard`. -impl Default for MatchKind { - fn default() -> MatchKind { - MatchKind::Standard - } -} - -impl MatchKind { - #[inline] - pub(crate) fn is_standard(&self) -> bool { - matches!(*self, MatchKind::Standard) - } - - #[inline] - pub(crate) fn is_leftmost(&self) -> bool { - matches!(*self, MatchKind::LeftmostFirst | MatchKind::LeftmostLongest) - } - - #[inline] - pub(crate) fn is_leftmost_first(&self) -> bool { - matches!(*self, MatchKind::LeftmostFirst) - } - - /// Convert this match kind into a packed match kind. If this match kind - /// corresponds to standard semantics, then this returns None, since - /// packed searching does not support standard semantics. - #[inline] - pub(crate) fn as_packed(&self) -> Option<crate::packed::MatchKind> { - match *self { - MatchKind::Standard => None, - MatchKind::LeftmostFirst => { - Some(crate::packed::MatchKind::LeftmostFirst) - } - MatchKind::LeftmostLongest => { - Some(crate::packed::MatchKind::LeftmostLongest) - } - } - } -} - -/// The kind of anchored starting configurations to support in an Aho-Corasick -/// searcher. -/// -/// Depending on which searcher is used internally by -/// [`AhoCorasick`](crate::AhoCorasick), supporting both unanchored -/// and anchored searches can be quite costly. For this reason, -/// [`AhoCorasickBuilder::start_kind`](crate::AhoCorasickBuilder::start_kind) -/// can be used to configure whether your searcher supports unanchored, -/// anchored or both kinds of searches. -/// -/// This searcher configuration knob works in concert with the search time -/// configuration [`Input::anchored`]. Namely, if one requests an unsupported -/// anchored mode, then the search will either panic or return an error, -/// depending on whether you're using infallible or fallibe APIs, respectively. -/// -/// `AhoCorasick` by default only supports unanchored searches. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum StartKind { - /// Support both anchored and unanchored searches. - Both, - /// Support only unanchored searches. Requesting an anchored search will - /// return an error in fallible APIs and panic in infallible APIs. - Unanchored, - /// Support only anchored searches. Requesting an unanchored search will - /// return an error in fallible APIs and panic in infallible APIs. - Anchored, -} - -impl Default for StartKind { - fn default() -> StartKind { - StartKind::Unanchored - } -} diff --git a/vendor/aho-corasick/src/util/special.rs b/vendor/aho-corasick/src/util/special.rs deleted file mode 100644 index beeba40c893107..00000000000000 --- a/vendor/aho-corasick/src/util/special.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::util::primitives::StateID; - -/// A collection of sentinel state IDs for Aho-Corasick automata. -/// -/// This specifically enables the technique by which we determine which states -/// are dead, matches or start states. Namely, by arranging states in a -/// particular order, we can determine the type of a state simply by looking at -/// its ID. -#[derive(Clone, Debug)] -pub(crate) struct Special { - /// The maximum ID of all the "special" states. This corresponds either to - /// start_anchored_id when a prefilter is active and max_match_id when a - /// prefilter is not active. The idea here is that if there is no prefilter, - /// then there is no point in treating start states as special. - pub(crate) max_special_id: StateID, - /// The maximum ID of all the match states. Any state ID bigger than this - /// is guaranteed to be a non-match ID. - /// - /// It is possible and legal for max_match_id to be equal to - /// start_anchored_id, which occurs precisely in the case where the empty - /// string is a pattern that was added to the underlying automaton. - pub(crate) max_match_id: StateID, - /// The state ID of the start state used for unanchored searches. - pub(crate) start_unanchored_id: StateID, - /// The state ID of the start state used for anchored searches. This is - /// always start_unanchored_id+1. - pub(crate) start_anchored_id: StateID, -} - -impl Special { - /// Create a new set of "special" state IDs with all IDs initialized to - /// zero. The general idea here is that they will be updated and set to - /// correct values later. - pub(crate) fn zero() -> Special { - Special { - max_special_id: StateID::ZERO, - max_match_id: StateID::ZERO, - start_unanchored_id: StateID::ZERO, - start_anchored_id: StateID::ZERO, - } - } -} diff --git a/vendor/base64/.cargo-checksum.json b/vendor/base64/.cargo-checksum.json deleted file mode 100644 index b0b083ea550910..00000000000000 --- a/vendor/base64/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"4d505b174c7ecd854ad5668b1750be31305ca69e4634131e7bb262e98f3cf50e",".circleci/config.yml":"c44defbad42a19f8c5fb8aeb9e71beaf1d0e920d615a06f42e4936c29e53547f",".github/ISSUE_TEMPLATE/general-purpose-issue.md":"9e89c069e50dc24a09ece40bd6d02618ab044b2b53d2e5221defd6c884c96964","Cargo.lock":"cee37732975a1ffc1f956d3d05b6edf1baec72841cfabc384a21b02b3bfa0275","Cargo.toml":"52bee6a418e14918d37058fd15fccfd0f417a06fe4f9668b6f97866bf7f991e3","Cargo.toml.orig":"ff2d361bc5f6ec9b4738c293b3dfa65278e93a2664040f75ef6c944441818afe","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0dd882e53de11566d50f8e8e2d5a651bcf3fabee4987d70f306233cf39094ba7","README.md":"df01f5b4317d601e7de86743f9818aec9196abf9e298f5e47679b7a966ecd945","RELEASE-NOTES.md":"997a5193317a8bff266ecfe4f015ba070b782b6df7d3a1738b9b52584d57f9c6","benches/benchmarks.rs":"cebbcc8649e760e569c6be04f5e727aee2c2568ced7faab580fc0aa0d0426d26","clippy.toml":"b26be4d15ed059985ce6994f11817fd7562046f46e460a0dc64dbb71cfc246d1","examples/base64.rs":"b75ead2199a9b4389c69fe6f1ae988176a263b8fc84e7a4fea1d7e5a41592078","icon_CLion.svg":"cffa044ba75cb998ee3306991dc4a3755ec2f39ab95ddd4b74bc21988389020f","src/alphabet.rs":"5de2beb8fcccb078c61cac2c0477ebbde145122d6c10a0f7ea2e57e8159318e0","src/chunked_encoder.rs":"edfdbb9a4329b80fb2c769ada81e234e00839e0fa85faaa70bacf40ce12e951c","src/decode.rs":"b046a72d62eaac58dc42efcf7848d9d96d022f6594e851cf87074b77ce45c04a","src/display.rs":"31bf3e19274a0b80dd8948a81ea535944f756ef5b88736124c940f5fe1e8c71c","src/encode.rs":"44ddcc162f3fe9817b6e857dda0a3b9197b90a657e5f71c44aacabf5431ccf7d","src/engine/general_purpose/decode.rs":"d865b057e5788e7fefd189cf57ec913df263e6a0742dfa52513f587e14fa1a92","src/engine/general_purpose/decode_suffix.rs":"689688f7bf442b232d3b9f56a1b41c56d9393ace88556a165c224b93dd19b74e","src/engine/general_purpose/mod.rs":"901760a7f5721ec3bafad5fea6251f57de0f767ecb2e1e2fdfe64d661404ec34","src/engine/mod.rs":"5e4a6c0e86417f3b62350264ef383f91e9864390f7c315d786ecd8e9c920ee9f","src/engine/naive.rs":"70de29d909c3fe7918d2965782088b05047b8b6e30d1d2bf11ba073d3f8633ff","src/engine/tests.rs":"2cc8d1431f40f5b9c3ad8970e6fb73bba8be3f2317553dd026539f41908aaa19","src/lib.rs":"c4db7bd31ace78aec2ecd151cef3ad90dfdc76097ba12027bde79d3c82612f7c","src/prelude.rs":"c1587138e5301ac797c5c362cb3638649b33f79c20c16db6f38ad44330540752","src/read/decoder.rs":"00aaa0553a54fcf12762658c4e56663a9705cc30c07af30976291e6f69d78c3d","src/read/decoder_tests.rs":"66ec39bf6e86f21f4db1afd6c5cd63d4a4931ab896b9c38de25d99b803804bbf","src/read/mod.rs":"e0b714eda02d16b1ffa6f78fd09b2f963e01c881b1f7c17b39db4e904be5e746","src/tests.rs":"90cb9f8a1ccb7c4ddc4f8618208e0031fc97e0df0e5aa466d6a5cf45d25967d8","src/write/encoder.rs":"c889c853249220fe2ddaeb77ee6e2ee2945f7db88cd6658ef89ff71b81255ea8","src/write/encoder_string_writer.rs":"0326c9d120369b9bbc35697b5b9b141bed24283374c93d5af1052eb042e47799","src/write/encoder_tests.rs":"28695a485b17cf5db73656aae5d90127f726e02c6d70efd83e5ab53a4cc17b38","src/write/mod.rs":"73cd98dadc9d712b3fefd9449d97e825e097397441b90588e0051e4d3b0911b9","tests/encode.rs":"5309f4538b1df611436f7bfba7409c725161b6f841b1bbf8d9890ae185de7d88","tests/tests.rs":"78efcf0dc4bb6ae52f7a91fcad89e44e4dce578224c36b4e6c1c306459be8500"},"package":"72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"} \ No newline at end of file diff --git a/vendor/base64/.cargo_vcs_info.json b/vendor/base64/.cargo_vcs_info.json deleted file mode 100644 index 50adb81ec205f5..00000000000000 --- a/vendor/base64/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "e14400697453bcc85997119b874bc03d9601d0af" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/base64/.circleci/config.yml b/vendor/base64/.circleci/config.yml deleted file mode 100644 index 4d2576dc824a49..00000000000000 --- a/vendor/base64/.circleci/config.yml +++ /dev/null @@ -1,135 +0,0 @@ -version: '2.1' - -workflows: - version: 2 - build: - jobs: - - build: - matrix: - parameters: - rust_img: [ - # Yes, a single-parameter axis, but means it can be referred to as a cache parameter easily without - # duplicating the magic version number throughout this file. - # The default rust images (not -slim or -alpine) are based on buildpack-deps. Hopefully this will - # be easier on the CI hosts since presumably those fat lower layers will already be cached, and - # therefore faster than a minimal, customized alpine. - # MSRV - 'rust:1.48.0' - ] - # a hacky scheme to work around CircleCI's inability to deal with mutable docker tags, forcing us to - # get a nightly or stable toolchain via rustup instead of a mutable docker tag - toolchain_override: [ - '__msrv__', # won't add any other toolchains, just uses what's in the docker image - '1.70.0', # minimum needed to build dev-dependencies - 'stable', - 'beta', - 'nightly' - ] - -jobs: - build: - parameters: - rust_img: - type: string - toolchain_override: - type: string - docker: - - image: << parameters.rust_img >> - steps: - - checkout - - restore_cache: - key: project-cache-v5-<< parameters.rust_img >>-<< parameters.toolchain_override >>-{{ checksum "Cargo.toml" }} - - run: - name: Setup toolchain - command: | - if [[ '<< parameters.toolchain_override >>' != '__msrv__' ]] - then - rustup toolchain add '<< parameters.toolchain_override >>' - rustup default '<< parameters.toolchain_override >>' - fi - - run: - name: Log rustc version - command: rustc --version - - run: - name: Build main target - # update first to select dependencies appropriate for this toolchain - command: | - cargo update - cargo build - - run: - name: Check formatting - command: | - rustup component add rustfmt - cargo fmt -- --check - - run: - name: Check clippy lints - # we only care about stable clippy -- nightly clippy is a bit wild - command: | - if [[ '<< parameters.toolchain_override >>' == 'stable' ]] - then - rustup component add clippy - cargo clippy --all-targets - fi - - run: - name: Build all targets - command: | - if [[ '<< parameters.toolchain_override >>' != '__msrv__' ]] - then - cargo build --all-targets - fi - - run: - name: Build without default features - command: | - cargo build --no-default-features - if [[ '<< parameters.toolchain_override >>' != '__msrv__' ]] - then - cargo build --no-default-features --all-targets - fi - - run: - name: Build with only alloc - command: | - cargo build --no-default-features --features alloc - if [[ '<< parameters.toolchain_override >>' != '__msrv__' ]] - then - cargo build --no-default-features --features alloc --all-targets - fi - - run: - name: Add arm toolchain - command: rustup target add thumbv6m-none-eabi - - run: - name: Build ARM without default features (no_std) - command: cargo build --target thumbv6m-none-eabi --no-default-features - - run: - name: Build ARM with only alloc feature - command: cargo build --target thumbv6m-none-eabi --no-default-features --features alloc - - run: - # dev dependencies can't build on 1.48.0 - name: Run tests - command: | - if [[ '<< parameters.toolchain_override >>' != '__msrv__' ]] - then - cargo test --no-default-features - cargo test - fi - - run: - name: Build docs - command: cargo doc --verbose - - run: - name: Confirm fuzzers can run - # TERM=dumb prevents cargo fuzz list from printing with color - environment: - TERM: dumb - command: | - if [[ '<< parameters.toolchain_override >>' = 'nightly' ]] - then - cargo install cargo-fuzz - cargo fuzz list | xargs -I FUZZER cargo fuzz run FUZZER -- -max_total_time=1 - fi - - - save_cache: - key: project-cache-v5-<< parameters.rust_img >>-<< parameters.toolchain_override >>-{{ checksum "Cargo.toml" }} - paths: - # rust docker img doesn't use $HOME/[.cargo,.rustup] - - /usr/local/cargo - - /usr/local/rustup - - ./target diff --git a/vendor/base64/.github/ISSUE_TEMPLATE/general-purpose-issue.md b/vendor/base64/.github/ISSUE_TEMPLATE/general-purpose-issue.md deleted file mode 100644 index b35b2f3eb65ead..00000000000000 --- a/vendor/base64/.github/ISSUE_TEMPLATE/general-purpose-issue.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -name: General purpose issue -about: General purpose issue -title: Default issue -labels: '' -assignees: '' - ---- - -# Before you file an issue - -- Did you read the docs? -- Did you read the README? - -# The problem - -- - -# How I, the issue filer, am going to help solve it - -- diff --git a/vendor/base64/Cargo.lock b/vendor/base64/Cargo.lock deleted file mode 100644 index 84e188d12c4b89..00000000000000 --- a/vendor/base64/Cargo.lock +++ /dev/null @@ -1,1515 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "anes" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" - -[[package]] -name = "async-attributes" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" -dependencies = [ - "quote", - "syn 1.0.109", -] - -[[package]] -name = "async-channel" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" -dependencies = [ - "concurrent-queue", - "event-listener 2.5.3", - "futures-core", -] - -[[package]] -name = "async-channel" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3" -dependencies = [ - "concurrent-queue", - "event-listener 5.2.0", - "event-listener-strategy 0.5.0", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-executor" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" -dependencies = [ - "async-lock 3.3.0", - "async-task", - "concurrent-queue", - "fastrand 2.0.1", - "futures-lite 2.2.0", - "slab", -] - -[[package]] -name = "async-global-executor" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" -dependencies = [ - "async-channel 2.2.0", - "async-executor", - "async-io 2.3.1", - "async-lock 3.3.0", - "blocking", - "futures-lite 2.2.0", - "once_cell", -] - -[[package]] -name = "async-io" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" -dependencies = [ - "async-lock 2.8.0", - "autocfg", - "cfg-if", - "concurrent-queue", - "futures-lite 1.13.0", - "log", - "parking", - "polling 2.8.0", - "rustix 0.37.27", - "slab", - "socket2", - "waker-fn", -] - -[[package]] -name = "async-io" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f97ab0c5b00a7cdbe5a371b9a782ee7be1316095885c8a4ea1daf490eb0ef65" -dependencies = [ - "async-lock 3.3.0", - "cfg-if", - "concurrent-queue", - "futures-io", - "futures-lite 2.2.0", - "parking", - "polling 3.4.0", - "rustix 0.38.9", - "slab", - "tracing", - "windows-sys 0.52.0", -] - -[[package]] -name = "async-lock" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" -dependencies = [ - "event-listener 2.5.3", -] - -[[package]] -name = "async-lock" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" -dependencies = [ - "event-listener 4.0.3", - "event-listener-strategy 0.4.0", - "pin-project-lite", -] - -[[package]] -name = "async-std" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" -dependencies = [ - "async-attributes", - "async-channel 1.9.0", - "async-global-executor", - "async-io 1.13.0", - "async-lock 2.8.0", - "crossbeam-utils", - "futures-channel", - "futures-core", - "futures-io", - "futures-lite 1.13.0", - "gloo-timers", - "kv-log-macro", - "log", - "memchr", - "once_cell", - "pin-project-lite", - "pin-utils", - "slab", - "wasm-bindgen-futures", -] - -[[package]] -name = "async-task" -version = "4.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" - -[[package]] -name = "atomic-waker" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "base64" -version = "0.22.1" -dependencies = [ - "clap", - "criterion", - "once_cell", - "rand", - "rstest", - "rstest_reuse", - "strum", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" - -[[package]] -name = "blocking" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" -dependencies = [ - "async-channel 2.2.0", - "async-lock 3.3.0", - "async-task", - "fastrand 2.0.1", - "futures-io", - "futures-lite 2.2.0", - "piper", - "tracing", -] - -[[package]] -name = "bumpalo" -version = "3.15.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b" - -[[package]] -name = "cast" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "ciborium" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" -dependencies = [ - "ciborium-io", - "ciborium-ll", - "serde", -] - -[[package]] -name = "ciborium-io" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" - -[[package]] -name = "ciborium-ll" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" -dependencies = [ - "ciborium-io", - "half", -] - -[[package]] -name = "clap" -version = "3.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" -dependencies = [ - "atty", - "bitflags 1.3.2", - "clap_derive", - "clap_lex", - "indexmap", - "once_cell", - "strsim", - "termcolor", - "textwrap", -] - -[[package]] -name = "clap_derive" -version = "3.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" -dependencies = [ - "heck", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "clap_lex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] - -[[package]] -name = "concurrent-queue" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "criterion" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb" -dependencies = [ - "anes", - "atty", - "cast", - "ciborium", - "clap", - "criterion-plot", - "itertools", - "lazy_static", - "num-traits", - "oorandom", - "plotters", - "rayon", - "regex", - "serde", - "serde_derive", - "serde_json", - "tinytemplate", - "walkdir", -] - -[[package]] -name = "criterion-plot" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" -dependencies = [ - "cast", - "itertools", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "ctor" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" -dependencies = [ - "quote", - "syn 1.0.109", -] - -[[package]] -name = "either" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" - -[[package]] -name = "errno" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - -[[package]] -name = "event-listener" -version = "2.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" - -[[package]] -name = "event-listener" -version = "4.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener" -version = "5.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" -dependencies = [ - "event-listener 4.0.3", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feedafcaa9b749175d5ac357452a9d41ea2911da598fde46ce1fe02c37751291" -dependencies = [ - "event-listener 5.2.0", - "pin-project-lite", -] - -[[package]] -name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "fastrand" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" - -[[package]] -name = "futures" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" - -[[package]] -name = "futures-executor" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-io" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" - -[[package]] -name = "futures-lite" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" -dependencies = [ - "fastrand 1.9.0", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", -] - -[[package]] -name = "futures-lite" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445ba825b27408685aaecefd65178908c36c6e96aaf6d8599419d46e624192ba" -dependencies = [ - "fastrand 2.0.1", - "futures-core", - "futures-io", - "parking", - "pin-project-lite", -] - -[[package]] -name = "futures-macro" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.52", -] - -[[package]] -name = "futures-sink" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" - -[[package]] -name = "futures-task" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" - -[[package]] -name = "futures-timer" -version = "3.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" - -[[package]] -name = "futures-util" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "getrandom" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "gloo-timers" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "half" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5eceaaeec696539ddaf7b333340f1af35a5aa87ae3e4f3ead0532f72affab2e" -dependencies = [ - "cfg-if", - "crunchy", -] - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" - -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi 0.3.9", - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" - -[[package]] -name = "js-sys" -version = "0.3.68" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "kv-log-macro" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" -dependencies = [ - "log", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.153" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" - -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - -[[package]] -name = "linux-raw-sys" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" - -[[package]] -name = "log" -version = "0.4.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", - "value-bag", -] - -[[package]] -name = "memchr" -version = "2.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" - -[[package]] -name = "num-traits" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" -dependencies = [ - "autocfg", -] - -[[package]] -name = "once_cell" -version = "1.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" - -[[package]] -name = "oorandom" -version = "11.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" - -[[package]] -name = "os_str_bytes" -version = "6.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" - -[[package]] -name = "parking" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" - -[[package]] -name = "pin-project-lite" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "piper" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" -dependencies = [ - "atomic-waker", - "fastrand 2.0.1", - "futures-io", -] - -[[package]] -name = "plotters" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" -dependencies = [ - "num-traits", - "plotters-backend", - "plotters-svg", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "plotters-backend" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" - -[[package]] -name = "plotters-svg" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" -dependencies = [ - "plotters-backend", -] - -[[package]] -name = "polling" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" -dependencies = [ - "autocfg", - "bitflags 1.3.2", - "cfg-if", - "concurrent-queue", - "libc", - "log", - "pin-project-lite", - "windows-sys 0.48.0", -] - -[[package]] -name = "polling" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30054e72317ab98eddd8561db0f6524df3367636884b7b21b703e4b280a84a14" -dependencies = [ - "cfg-if", - "concurrent-queue", - "pin-project-lite", - "rustix 0.38.9", - "tracing", - "windows-sys 0.52.0", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - -[[package]] -name = "proc-macro2" -version = "1.0.78" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "rayon" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" -dependencies = [ - "crossbeam-deque", - "crossbeam-utils", -] - -[[package]] -name = "regex" -version = "1.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" -dependencies = [ - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" - -[[package]] -name = "rstest" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b939295f93cb1d12bc1a83cf9ee963199b133fb8a79832dd51b68bb9f59a04dc" -dependencies = [ - "async-std", - "futures", - "futures-timer", - "rstest_macros", - "rustc_version", -] - -[[package]] -name = "rstest_macros" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f78aba848123782ba59340928ec7d876ebe745aa0365d6af8a630f19a5c16116" -dependencies = [ - "cfg-if", - "proc-macro2", - "quote", - "rustc_version", - "syn 1.0.109", -] - -[[package]] -name = "rstest_reuse" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88530b681abe67924d42cca181d070e3ac20e0740569441a9e35a7cedd2b34a4" -dependencies = [ - "quote", - "rand", - "rustc_version", - "syn 2.0.52", -] - -[[package]] -name = "rustc_version" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" -dependencies = [ - "semver", -] - -[[package]] -name = "rustix" -version = "0.37.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustix" -version = "0.38.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bfe0f2582b4931a45d1fa608f8a8722e8b3c7ac54dd6d5f3b3212791fedef49" -dependencies = [ - "bitflags 2.4.2", - "errno", - "libc", - "linux-raw-sys 0.4.13", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustversion" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" - -[[package]] -name = "ryu" -version = "1.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "semver" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" - -[[package]] -name = "serde" -version = "1.0.197" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.197" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.52", -] - -[[package]] -name = "serde_json" -version = "1.0.114" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "slab" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] - -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "strum" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.25.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "rustversion", - "syn 2.0.52", -] - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.52" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "termcolor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "textwrap" -version = "0.16.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" - -[[package]] -name = "tinytemplate" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "tracing" -version = "0.1.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" -dependencies = [ - "pin-project-lite", - "tracing-core", -] - -[[package]] -name = "tracing-core" -version = "0.1.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" - -[[package]] -name = "unicode-ident" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" - -[[package]] -name = "value-bag" -version = "1.0.0-alpha.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" -dependencies = [ - "ctor", - "version_check", -] - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "waker-fn" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" - -[[package]] -name = "walkdir" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" -dependencies = [ - "same-file", - "winapi-util", -] - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.91" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" -dependencies = [ - "cfg-if", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.91" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" -dependencies = [ - "bumpalo", - "log", - "once_cell", - "proc-macro2", - "quote", - "syn 2.0.52", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" -dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.91" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.91" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.52", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.91" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" - -[[package]] -name = "web-sys" -version = "0.3.68" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets 0.48.5", -] - -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets 0.52.4", -] - -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", -] - -[[package]] -name = "windows-targets" -version = "0.52.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" -dependencies = [ - "windows_aarch64_gnullvm 0.52.4", - "windows_aarch64_msvc 0.52.4", - "windows_i686_gnu 0.52.4", - "windows_i686_msvc 0.52.4", - "windows_x86_64_gnu 0.52.4", - "windows_x86_64_gnullvm 0.52.4", - "windows_x86_64_msvc 0.52.4", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" diff --git a/vendor/base64/Cargo.toml b/vendor/base64/Cargo.toml deleted file mode 100644 index e1b35fc46c869c..00000000000000 --- a/vendor/base64/Cargo.toml +++ /dev/null @@ -1,85 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.48.0" -name = "base64" -version = "0.22.1" -authors = ["Marshall Pierce <marshall@mpierce.org>"] -description = "encodes and decodes base64 as bytes or utf8" -documentation = "https://docs.rs/base64" -readme = "README.md" -keywords = [ - "base64", - "utf8", - "encode", - "decode", - "no_std", -] -categories = ["encoding"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/marshallpierce/rust-base64" - -[package.metadata.docs.rs] -rustdoc-args = ["--generate-link-to-definition"] - -[profile.bench] -debug = 2 - -[profile.test] -opt-level = 3 - -[[example]] -name = "base64" -required-features = ["std"] - -[[test]] -name = "tests" -required-features = ["alloc"] - -[[test]] -name = "encode" -required-features = ["alloc"] - -[[bench]] -name = "benchmarks" -harness = false -required-features = ["std"] - -[dev-dependencies.clap] -version = "3.2.25" -features = ["derive"] - -[dev-dependencies.criterion] -version = "0.4.0" - -[dev-dependencies.once_cell] -version = "1" - -[dev-dependencies.rand] -version = "0.8.5" -features = ["small_rng"] - -[dev-dependencies.rstest] -version = "0.13.0" - -[dev-dependencies.rstest_reuse] -version = "0.6.0" - -[dev-dependencies.strum] -version = "0.25" -features = ["derive"] - -[features] -alloc = [] -default = ["std"] -std = ["alloc"] diff --git a/vendor/base64/LICENSE-APACHE b/vendor/base64/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e802f..00000000000000 --- a/vendor/base64/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/base64/LICENSE-MIT b/vendor/base64/LICENSE-MIT deleted file mode 100644 index 7bc10f80a0499e..00000000000000 --- a/vendor/base64/LICENSE-MIT +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Alice Maz - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/base64/README.md b/vendor/base64/README.md deleted file mode 100644 index f566756d51fa49..00000000000000 --- a/vendor/base64/README.md +++ /dev/null @@ -1,154 +0,0 @@ -# [base64](https://crates.io/crates/base64) - -[![](https://img.shields.io/crates/v/base64.svg)](https://crates.io/crates/base64) [![Docs](https://docs.rs/base64/badge.svg)](https://docs.rs/base64) [![CircleCI](https://circleci.com/gh/marshallpierce/rust-base64/tree/master.svg?style=shield)](https://circleci.com/gh/marshallpierce/rust-base64/tree/master) [![codecov](https://codecov.io/gh/marshallpierce/rust-base64/branch/master/graph/badge.svg)](https://codecov.io/gh/marshallpierce/rust-base64) [![unsafe forbidden](https://img.shields.io/badge/unsafe-forbidden-success.svg)](https://github.com/rust-secure-code/safety-dance/) - -<a href="https://www.jetbrains.com/?from=rust-base64"><img src="/icon_CLion.svg" height="40px"/></a> - -Made with CLion. Thanks to JetBrains for supporting open source! - -It's base64. What more could anyone want? - -This library's goals are to be *correct* and *fast*. It's thoroughly tested and widely used. It exposes functionality at -multiple levels of abstraction so you can choose the level of convenience vs performance that you want, -e.g. `decode_engine_slice` decodes into an existing `&mut [u8]` and is pretty fast (2.6GiB/s for a 3 KiB input), -whereas `decode_engine` allocates a new `Vec<u8>` and returns it, which might be more convenient in some cases, but is -slower (although still fast enough for almost any purpose) at 2.1 GiB/s. - -See the [docs](https://docs.rs/base64) for all the details. - -## FAQ - -### I need to decode base64 with whitespace/null bytes/other random things interspersed in it. What should I do? - -Remove non-base64 characters from your input before decoding. - -If you have a `Vec` of base64, [retain](https://doc.rust-lang.org/std/vec/struct.Vec.html#method.retain) can be used to -strip out whatever you need removed. - -If you have a `Read` (e.g. reading a file or network socket), there are various approaches. - -- Use [iter_read](https://crates.io/crates/iter-read) together with `Read`'s `bytes()` to filter out unwanted bytes. -- Implement `Read` with a `read()` impl that delegates to your actual `Read`, and then drops any bytes you don't want. - -### I need to line-wrap base64, e.g. for MIME/PEM. - -[line-wrap](https://crates.io/crates/line-wrap) does just that. - -### I want canonical base64 encoding/decoding. - -First, don't do this. You should no more expect Base64 to be canonical than you should expect compression algorithms to -produce canonical output across all usage in the wild (hint: they don't). -However, [people are drawn to their own destruction like moths to a flame](https://eprint.iacr.org/2022/361), so here we -are. - -There are two opportunities for non-canonical encoding (and thus, detection of the same during decoding): the final bits -of the last encoded token in two or three token suffixes, and the `=` token used to inflate the suffix to a full four -tokens. - -The trailing bits issue is unavoidable: with 6 bits available in each encoded token, 1 input byte takes 2 tokens, -with the second one having some bits unused. Same for two input bytes: 16 bits, but 3 tokens have 18 bits. Unless we -decide to stop shipping whole bytes around, we're stuck with those extra bits that a sneaky or buggy encoder might set -to 1 instead of 0. - -The `=` pad bytes, on the other hand, are entirely a self-own by the Base64 standard. They do not affect decoding other -than to provide an opportunity to say "that padding is incorrect". Exabytes of storage and transfer have no doubt been -wasted on pointless `=` bytes. Somehow we all seem to be quite comfortable with, say, hex-encoded data just stopping -when it's done rather than requiring a confirmation that the author of the encoder could count to four. Anyway, there -are two ways to make pad bytes predictable: require canonical padding to the next multiple of four bytes as per the RFC, -or, if you control all producers and consumers, save a few bytes by requiring no padding (especially applicable to the -url-safe alphabet). - -All `Engine` implementations must at a minimum support treating non-canonical padding of both types as an error, and -optionally may allow other behaviors. - -## Rust version compatibility - -The minimum supported Rust version is 1.48.0. - -# Contributing - -Contributions are very welcome. However, because this library is used widely, and in security-sensitive contexts, all -PRs will be carefully scrutinized. Beyond that, this sort of low level library simply needs to be 100% correct. Nobody -wants to chase bugs in encoding of any sort. - -All this means that it takes me a fair amount of time to review each PR, so it might take quite a while to carve out the -free time to give each PR the attention it deserves. I will get to everyone eventually! - -## Developing - -Benchmarks are in `benches/`. - -```bash -cargo bench -``` - -## no_std - -This crate supports no_std. By default the crate targets std via the `std` feature. You can deactivate -the `default-features` to target `core` instead. In that case you lose out on all the functionality revolving -around `std::io`, `std::error::Error`, and heap allocations. There is an additional `alloc` feature that you can activate -to bring back the support for heap allocations. - -## Profiling - -On Linux, you can use [perf](https://perf.wiki.kernel.org/index.php/Main_Page) for profiling. Then compile the -benchmarks with `cargo bench --no-run`. - -Run the benchmark binary with `perf` (shown here filtering to one particular benchmark, which will make the results -easier to read). `perf` is only available to the root user on most systems as it fiddles with event counters in your -CPU, so use `sudo`. We need to run the actual benchmark binary, hence the path into `target`. You can see the actual -full path with `cargo bench -v`; it will print out the commands it runs. If you use the exact path -that `bench` outputs, make sure you get the one that's for the benchmarks, not the tests. You may also want -to `cargo clean` so you have only one `benchmarks-` binary (they tend to accumulate). - -```bash -sudo perf record target/release/deps/benchmarks-* --bench decode_10mib_reuse -``` - -Then analyze the results, again with perf: - -```bash -sudo perf annotate -l -``` - -You'll see a bunch of interleaved rust source and assembly like this. The section with `lib.rs:327` is telling us that -4.02% of samples saw the `movzbl` aka bit shift as the active instruction. However, this percentage is not as exact as -it seems due to a phenomenon called *skid*. Basically, a consequence of how fancy modern CPUs are is that this sort of -instruction profiling is inherently inaccurate, especially in branch-heavy code. - -```text - lib.rs:322 0.70 : 10698: mov %rdi,%rax - 2.82 : 1069b: shr $0x38,%rax - : if morsel == decode_tables::INVALID_VALUE { - : bad_byte_index = input_index; - : break; - : }; - : accum = (morsel as u64) << 58; - lib.rs:327 4.02 : 1069f: movzbl (%r9,%rax,1),%r15d - : // fast loop of 8 bytes at a time - : while input_index < length_of_full_chunks { - : let mut accum: u64; - : - : let input_chunk = BigEndian::read_u64(&input_bytes[input_index..(input_index + 8)]); - : morsel = decode_table[(input_chunk >> 56) as usize]; - lib.rs:322 3.68 : 106a4: cmp $0xff,%r15 - : if morsel == decode_tables::INVALID_VALUE { - 0.00 : 106ab: je 1090e <base64::decode_config_buf::hbf68a45fefa299c1+0x46e> -``` - -## Fuzzing - -This uses [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz). See `fuzz/fuzzers` for the available fuzzing scripts. -To run, use an invocation like these: - -```bash -cargo +nightly fuzz run roundtrip -cargo +nightly fuzz run roundtrip_no_pad -cargo +nightly fuzz run roundtrip_random_config -- -max_len=10240 -cargo +nightly fuzz run decode_random -``` - -## License - -This project is dual-licensed under MIT and Apache 2.0. - diff --git a/vendor/base64/RELEASE-NOTES.md b/vendor/base64/RELEASE-NOTES.md deleted file mode 100644 index 91b68a6782095d..00000000000000 --- a/vendor/base64/RELEASE-NOTES.md +++ /dev/null @@ -1,271 +0,0 @@ -# 0.22.1 - -- Correct the symbols used for the predefined `alphabet::BIN_HEX`. - -# 0.22.0 - -- `DecodeSliceError::OutputSliceTooSmall` is now conservative rather than precise. That is, the error will only occur if the decoded output _cannot_ fit, meaning that `Engine::decode_slice` can now be used with exactly-sized output slices. As part of this, `Engine::internal_decode` now returns `DecodeSliceError` instead of `DecodeError`, but that is not expected to affect any external callers. -- `DecodeError::InvalidLength` now refers specifically to the _number of valid symbols_ being invalid (i.e. `len % 4 == 1`), rather than just the number of input bytes. This avoids confusing scenarios when based on interpretation you could make a case for either `InvalidLength` or `InvalidByte` being appropriate. -- Decoding is somewhat faster (5-10%) - -# 0.21.7 - -- Support getting an alphabet's contents as a str via `Alphabet::as_str()` - -# 0.21.6 - -- Improved introductory documentation and example - -# 0.21.5 - -- Add `Debug` and `Clone` impls for the general purpose Engine - -# 0.21.4 - -- Make `encoded_len` `const`, allowing the creation of arrays sized to encode compile-time-known data lengths - -# 0.21.3 - -- Implement `source` instead of `cause` on Error types -- Roll back MSRV to 1.48.0 so Debian can continue to live in a time warp -- Slightly faster chunked encoding for short inputs -- Decrease binary size - -# 0.21.2 - -- Rollback MSRV to 1.57.0 -- only dev dependencies need 1.60, not the main code - -# 0.21.1 - -- Remove the possibility of panicking during decoded length calculations -- `DecoderReader` no longer sometimes erroneously ignores - padding [#226](https://github.com/marshallpierce/rust-base64/issues/226) - -## Breaking changes - -- `Engine.internal_decode` return type changed -- Update MSRV to 1.60.0 - -# 0.21.0 - -## Migration - -### Functions - -| < 0.20 function | 0.21 equivalent | -|-------------------------|-------------------------------------------------------------------------------------| -| `encode()` | `engine::general_purpose::STANDARD.encode()` or `prelude::BASE64_STANDARD.encode()` | -| `encode_config()` | `engine.encode()` | -| `encode_config_buf()` | `engine.encode_string()` | -| `encode_config_slice()` | `engine.encode_slice()` | -| `decode()` | `engine::general_purpose::STANDARD.decode()` or `prelude::BASE64_STANDARD.decode()` | -| `decode_config()` | `engine.decode()` | -| `decode_config_buf()` | `engine.decode_vec()` | -| `decode_config_slice()` | `engine.decode_slice()` | - -The short-lived 0.20 functions were the 0.13 functions with `config` replaced with `engine`. - -### Padding - -If applicable, use the preset engines `engine::STANDARD`, `engine::STANDARD_NO_PAD`, `engine::URL_SAFE`, -or `engine::URL_SAFE_NO_PAD`. -The `NO_PAD` ones require that padding is absent when decoding, and the others require that -canonical padding is present . - -If you need the < 0.20 behavior that did not care about padding, or want to recreate < 0.20.0's predefined `Config`s -precisely, see the following table. - -| 0.13.1 Config | 0.20.0+ alphabet | `encode_padding` | `decode_padding_mode` | -|-----------------|------------------|------------------|-----------------------| -| STANDARD | STANDARD | true | Indifferent | -| STANDARD_NO_PAD | STANDARD | false | Indifferent | -| URL_SAFE | URL_SAFE | true | Indifferent | -| URL_SAFE_NO_PAD | URL_SAFE | false | Indifferent | - -# 0.21.0-rc.1 - -- Restore the ability to decode into a slice of precisely the correct length with `Engine.decode_slice_unchecked`. -- Add `Engine` as a `pub use` in `prelude`. - -# 0.21.0-beta.2 - -## Breaking changes - -- Re-exports of preconfigured engines in `engine` are removed in favor of `base64::prelude::...` that are better suited - to those who wish to `use` the entire path to a name. - -# 0.21.0-beta.1 - -## Breaking changes - -- `FastPortable` was only meant to be an interim name, and shouldn't have shipped in 0.20. It is now `GeneralPurpose` to - make its intended usage more clear. -- `GeneralPurpose` and its config are now `pub use`'d in the `engine` module for convenience. -- Change a few `from()` functions to be `new()`. `from()` causes confusing compiler errors because of confusion - with `From::from`, and is a little misleading because some of those invocations are not very cheap as one would - usually expect from a `from` call. -- `encode*` and `decode*` top level functions are now methods on `Engine`. -- `DEFAULT_ENGINE` was replaced by `engine::general_purpose::STANDARD` -- Predefined engine consts `engine::general_purpose::{STANDARD, STANDARD_NO_PAD, URL_SAFE, URL_SAFE_NO_PAD}` - - These are `pub use`d into `engine` as well -- The `*_slice` decode/encode functions now return an error instead of panicking when the output slice is too small - - As part of this, there isn't now a public way to decode into a slice _exactly_ the size needed for inputs that - aren't multiples of 4 tokens. If adding up to 2 bytes to always be a multiple of 3 bytes for the decode buffer is - a problem, file an issue. - -## Other changes - -- `decoded_len_estimate()` is provided to make it easy to size decode buffers correctly. - -# 0.20.0 - -## Breaking changes - -- Update MSRV to 1.57.0 -- Decoding can now either ignore padding, require correct padding, or require no padding. The default is to require - correct padding. - - The `NO_PAD` config now requires that padding be absent when decoding. - -## 0.20.0-alpha.1 - -### Breaking changes - -- Extended the `Config` concept into the `Engine` abstraction, allowing the user to pick different encoding / decoding - implementations. - - What was formerly the only algorithm is now the `FastPortable` engine, so named because it's portable (works on - any CPU) and relatively fast. - - This opens the door to a portable constant-time - implementation ([#153](https://github.com/marshallpierce/rust-base64/pull/153), - presumably `ConstantTimePortable`?) for security-sensitive applications that need side-channel resistance, and - CPU-specific SIMD implementations for more speed. - - Standard base64 per the RFC is available via `DEFAULT_ENGINE`. To use different alphabets or other settings ( - padding, etc), create your own engine instance. -- `CharacterSet` is now `Alphabet` (per the RFC), and allows creating custom alphabets. The corresponding tables that - were previously code-generated are now built dynamically. -- Since there are already multiple breaking changes, various functions are renamed to be more consistent and - discoverable. -- MSRV is now 1.47.0 to allow various things to use `const fn`. -- `DecoderReader` now owns its inner reader, and can expose it via `into_inner()`. For symmetry, `EncoderWriter` can do - the same with its writer. -- `encoded_len` is now public so you can size encode buffers precisely. - -# 0.13.1 - -- More precise decode buffer sizing, avoiding unnecessary allocation in `decode_config`. - -# 0.13.0 - -- Config methods are const -- Added `EncoderStringWriter` to allow encoding directly to a String -- `EncoderWriter` now owns its delegate writer rather than keeping a reference to it (though refs still work) - - As a consequence, it is now possible to extract the delegate writer from an `EncoderWriter` via `finish()`, which - returns `Result<W>` instead of `Result<()>`. If you were calling `finish()` explicitly, you will now need to - use `let _ = foo.finish()` instead of just `foo.finish()` to avoid a warning about the unused value. -- When decoding input that has both an invalid length and an invalid symbol as the last byte, `InvalidByte` will be - emitted instead of `InvalidLength` to make the problem more obvious. - -# 0.12.2 - -- Add `BinHex` alphabet - -# 0.12.1 - -- Add `Bcrypt` alphabet - -# 0.12.0 - -- A `Read` implementation (`DecoderReader`) to let users transparently decoded data from a b64 input source -- IMAP's modified b64 alphabet -- Relaxed type restrictions to just `AsRef<[ut8]>` for main `encode*`/`decode*` functions -- A minor performance improvement in encoding - -# 0.11.0 - -- Minimum rust version 1.34.0 -- `no_std` is now supported via the two new features `alloc` and `std`. - -# 0.10.1 - -- Minimum rust version 1.27.2 -- Fix bug in streaming encoding ([#90](https://github.com/marshallpierce/rust-base64/pull/90)): if the underlying writer - didn't write all the bytes given to it, the remaining bytes would not be retried later. See the docs - on `EncoderWriter::write`. -- Make it configurable whether or not to return an error when decoding detects excess trailing bits. - -# 0.10.0 - -- Remove line wrapping. Line wrapping was never a great conceptual fit in this library, and other features (streaming - encoding, etc) either couldn't support it or could support only special cases of it with a great increase in - complexity. Line wrapping has been pulled out into a [line-wrap](https://crates.io/crates/line-wrap) crate, so it's - still available if you need it. - - `Base64Display` creation no longer uses a `Result` because it can't fail, which means its helper methods for - common - configs that `unwrap()` for you are no longer needed -- Add a streaming encoder `Write` impl to transparently base64 as you write. -- Remove the remaining `unsafe` code. -- Remove whitespace stripping to simplify `no_std` support. No out of the box configs use it, and it's trivial to do - yourself if needed: `filter(|b| !b" \n\t\r\x0b\x0c".contains(b)`. -- Detect invalid trailing symbols when decoding and return an error rather than silently ignoring them. - -# 0.9.3 - -- Update safemem - -# 0.9.2 - -- Derive `Clone` for `DecodeError`. - -# 0.9.1 - -- Add support for `crypt(3)`'s base64 variant. - -# 0.9.0 - -- `decode_config_slice` function for no-allocation decoding, analogous to `encode_config_slice` -- Decode performance optimization - -# 0.8.0 - -- `encode_config_slice` function for no-allocation encoding - -# 0.7.0 - -- `STANDARD_NO_PAD` config -- `Base64Display` heap-free wrapper for use in format strings, etc - -# 0.6.0 - -- Decode performance improvements -- Use `unsafe` in fewer places -- Added fuzzers - -# 0.5.2 - -- Avoid usize overflow when calculating length -- Better line wrapping performance - -# 0.5.1 - -- Temporarily disable line wrapping -- Add Apache 2.0 license - -# 0.5.0 - -- MIME support, including configurable line endings and line wrapping -- Removed `decode_ws` -- Renamed `Base64Error` to `DecodeError` - -# 0.4.1 - -- Allow decoding a `AsRef<[u8]>` instead of just a `&str` - -# 0.4.0 - -- Configurable padding -- Encode performance improvements - -# 0.3.0 - -- Added encode/decode functions that do not allocate their own storage -- Decode performance improvements -- Extraneous padding bytes are no longer ignored. Now, an error will be returned. diff --git a/vendor/base64/benches/benchmarks.rs b/vendor/base64/benches/benchmarks.rs deleted file mode 100644 index 8f041854e1085a..00000000000000 --- a/vendor/base64/benches/benchmarks.rs +++ /dev/null @@ -1,238 +0,0 @@ -#[macro_use] -extern crate criterion; - -use base64::{ - display, - engine::{general_purpose::STANDARD, Engine}, - write, -}; -use criterion::{black_box, Bencher, BenchmarkId, Criterion, Throughput}; -use rand::{Rng, SeedableRng}; -use std::io::{self, Read, Write}; - -fn do_decode_bench(b: &mut Bencher, &size: &usize) { - let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4); - fill(&mut v); - let encoded = STANDARD.encode(&v); - - b.iter(|| { - let orig = STANDARD.decode(&encoded); - black_box(&orig); - }); -} - -fn do_decode_bench_reuse_buf(b: &mut Bencher, &size: &usize) { - let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4); - fill(&mut v); - let encoded = STANDARD.encode(&v); - - let mut buf = Vec::new(); - b.iter(|| { - STANDARD.decode_vec(&encoded, &mut buf).unwrap(); - black_box(&buf); - buf.clear(); - }); -} - -fn do_decode_bench_slice(b: &mut Bencher, &size: &usize) { - let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4); - fill(&mut v); - let encoded = STANDARD.encode(&v); - - let mut buf = vec![0; size]; - b.iter(|| { - STANDARD.decode_slice(&encoded, &mut buf).unwrap(); - black_box(&buf); - }); -} - -fn do_decode_bench_stream(b: &mut Bencher, &size: &usize) { - let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4); - fill(&mut v); - let encoded = STANDARD.encode(&v); - - let mut buf = vec![0; size]; - buf.truncate(0); - - b.iter(|| { - let mut cursor = io::Cursor::new(&encoded[..]); - let mut decoder = base64::read::DecoderReader::new(&mut cursor, &STANDARD); - decoder.read_to_end(&mut buf).unwrap(); - buf.clear(); - black_box(&buf); - }); -} - -fn do_encode_bench(b: &mut Bencher, &size: &usize) { - let mut v: Vec<u8> = Vec::with_capacity(size); - fill(&mut v); - b.iter(|| { - let e = STANDARD.encode(&v); - black_box(&e); - }); -} - -fn do_encode_bench_display(b: &mut Bencher, &size: &usize) { - let mut v: Vec<u8> = Vec::with_capacity(size); - fill(&mut v); - b.iter(|| { - let e = format!("{}", display::Base64Display::new(&v, &STANDARD)); - black_box(&e); - }); -} - -fn do_encode_bench_reuse_buf(b: &mut Bencher, &size: &usize) { - let mut v: Vec<u8> = Vec::with_capacity(size); - fill(&mut v); - let mut buf = String::new(); - b.iter(|| { - STANDARD.encode_string(&v, &mut buf); - buf.clear(); - }); -} - -fn do_encode_bench_slice(b: &mut Bencher, &size: &usize) { - let mut v: Vec<u8> = Vec::with_capacity(size); - fill(&mut v); - // conservative estimate of encoded size - let mut buf = vec![0; v.len() * 2]; - b.iter(|| STANDARD.encode_slice(&v, &mut buf).unwrap()); -} - -fn do_encode_bench_stream(b: &mut Bencher, &size: &usize) { - let mut v: Vec<u8> = Vec::with_capacity(size); - fill(&mut v); - let mut buf = Vec::with_capacity(size * 2); - - b.iter(|| { - buf.clear(); - let mut stream_enc = write::EncoderWriter::new(&mut buf, &STANDARD); - stream_enc.write_all(&v).unwrap(); - stream_enc.flush().unwrap(); - }); -} - -fn do_encode_bench_string_stream(b: &mut Bencher, &size: &usize) { - let mut v: Vec<u8> = Vec::with_capacity(size); - fill(&mut v); - - b.iter(|| { - let mut stream_enc = write::EncoderStringWriter::new(&STANDARD); - stream_enc.write_all(&v).unwrap(); - stream_enc.flush().unwrap(); - let _ = stream_enc.into_inner(); - }); -} - -fn do_encode_bench_string_reuse_buf_stream(b: &mut Bencher, &size: &usize) { - let mut v: Vec<u8> = Vec::with_capacity(size); - fill(&mut v); - - let mut buf = String::new(); - b.iter(|| { - buf.clear(); - let mut stream_enc = write::EncoderStringWriter::from_consumer(&mut buf, &STANDARD); - stream_enc.write_all(&v).unwrap(); - stream_enc.flush().unwrap(); - let _ = stream_enc.into_inner(); - }); -} - -fn fill(v: &mut Vec<u8>) { - let cap = v.capacity(); - // weak randomness is plenty; we just want to not be completely friendly to the branch predictor - let mut r = rand::rngs::SmallRng::from_entropy(); - while v.len() < cap { - v.push(r.gen::<u8>()); - } -} - -const BYTE_SIZES: [usize; 5] = [3, 50, 100, 500, 3 * 1024]; - -// Benchmarks over these byte sizes take longer so we will run fewer samples to -// keep the benchmark runtime reasonable. -const LARGE_BYTE_SIZES: [usize; 3] = [3 * 1024 * 1024, 10 * 1024 * 1024, 30 * 1024 * 1024]; - -fn encode_benchmarks(c: &mut Criterion, label: &str, byte_sizes: &[usize]) { - let mut group = c.benchmark_group(label); - group - .warm_up_time(std::time::Duration::from_millis(500)) - .measurement_time(std::time::Duration::from_secs(3)); - - for size in byte_sizes { - group - .throughput(Throughput::Bytes(*size as u64)) - .bench_with_input(BenchmarkId::new("encode", size), size, do_encode_bench) - .bench_with_input( - BenchmarkId::new("encode_display", size), - size, - do_encode_bench_display, - ) - .bench_with_input( - BenchmarkId::new("encode_reuse_buf", size), - size, - do_encode_bench_reuse_buf, - ) - .bench_with_input( - BenchmarkId::new("encode_slice", size), - size, - do_encode_bench_slice, - ) - .bench_with_input( - BenchmarkId::new("encode_reuse_buf_stream", size), - size, - do_encode_bench_stream, - ) - .bench_with_input( - BenchmarkId::new("encode_string_stream", size), - size, - do_encode_bench_string_stream, - ) - .bench_with_input( - BenchmarkId::new("encode_string_reuse_buf_stream", size), - size, - do_encode_bench_string_reuse_buf_stream, - ); - } - - group.finish(); -} - -fn decode_benchmarks(c: &mut Criterion, label: &str, byte_sizes: &[usize]) { - let mut group = c.benchmark_group(label); - - for size in byte_sizes { - group - .warm_up_time(std::time::Duration::from_millis(500)) - .measurement_time(std::time::Duration::from_secs(3)) - .throughput(Throughput::Bytes(*size as u64)) - .bench_with_input(BenchmarkId::new("decode", size), size, do_decode_bench) - .bench_with_input( - BenchmarkId::new("decode_reuse_buf", size), - size, - do_decode_bench_reuse_buf, - ) - .bench_with_input( - BenchmarkId::new("decode_slice", size), - size, - do_decode_bench_slice, - ) - .bench_with_input( - BenchmarkId::new("decode_stream", size), - size, - do_decode_bench_stream, - ); - } - - group.finish(); -} - -fn bench(c: &mut Criterion) { - encode_benchmarks(c, "encode_small_input", &BYTE_SIZES[..]); - encode_benchmarks(c, "encode_large_input", &LARGE_BYTE_SIZES[..]); - decode_benchmarks(c, "decode_small_input", &BYTE_SIZES[..]); - decode_benchmarks(c, "decode_large_input", &LARGE_BYTE_SIZES[..]); -} - -criterion_group!(benches, bench); -criterion_main!(benches); diff --git a/vendor/base64/clippy.toml b/vendor/base64/clippy.toml deleted file mode 100644 index 11d46a73f3328a..00000000000000 --- a/vendor/base64/clippy.toml +++ /dev/null @@ -1 +0,0 @@ -msrv = "1.48.0" diff --git a/vendor/base64/examples/base64.rs b/vendor/base64/examples/base64.rs deleted file mode 100644 index 0c8aa3fe76c50f..00000000000000 --- a/vendor/base64/examples/base64.rs +++ /dev/null @@ -1,81 +0,0 @@ -use std::fs::File; -use std::io::{self, Read}; -use std::path::PathBuf; -use std::process; - -use base64::{alphabet, engine, read, write}; -use clap::Parser; - -#[derive(Clone, Debug, Parser, strum::EnumString, Default)] -#[strum(serialize_all = "kebab-case")] -enum Alphabet { - #[default] - Standard, - UrlSafe, -} - -/// Base64 encode or decode FILE (or standard input), to standard output. -#[derive(Debug, Parser)] -struct Opt { - /// Decode the base64-encoded input (default: encode the input as base64). - #[structopt(short = 'd', long = "decode")] - decode: bool, - - /// The encoding alphabet: "standard" (default) or "url-safe". - #[structopt(long = "alphabet")] - alphabet: Option<Alphabet>, - - /// Omit padding characters while encoding, and reject them while decoding. - #[structopt(short = 'p', long = "no-padding")] - no_padding: bool, - - /// The file to encode or decode. - #[structopt(name = "FILE", parse(from_os_str))] - file: Option<PathBuf>, -} - -fn main() { - let opt = Opt::parse(); - let stdin; - let mut input: Box<dyn Read> = match opt.file { - None => { - stdin = io::stdin(); - Box::new(stdin.lock()) - } - Some(ref f) if f.as_os_str() == "-" => { - stdin = io::stdin(); - Box::new(stdin.lock()) - } - Some(f) => Box::new(File::open(f).unwrap()), - }; - - let alphabet = opt.alphabet.unwrap_or_default(); - let engine = engine::GeneralPurpose::new( - &match alphabet { - Alphabet::Standard => alphabet::STANDARD, - Alphabet::UrlSafe => alphabet::URL_SAFE, - }, - match opt.no_padding { - true => engine::general_purpose::NO_PAD, - false => engine::general_purpose::PAD, - }, - ); - - let stdout = io::stdout(); - let mut stdout = stdout.lock(); - let r = if opt.decode { - let mut decoder = read::DecoderReader::new(&mut input, &engine); - io::copy(&mut decoder, &mut stdout) - } else { - let mut encoder = write::EncoderWriter::new(&mut stdout, &engine); - io::copy(&mut input, &mut encoder) - }; - if let Err(e) = r { - eprintln!( - "Base64 {} failed with {}", - if opt.decode { "decode" } else { "encode" }, - e - ); - process::exit(1); - } -} diff --git a/vendor/base64/icon_CLion.svg b/vendor/base64/icon_CLion.svg deleted file mode 100644 index e9edb0445ea387..00000000000000 --- a/vendor/base64/icon_CLion.svg +++ /dev/null @@ -1,34 +0,0 @@ -<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 128 128"> - <defs> - <linearGradient id="linear-gradient" x1="40.69" y1="-676.56" x2="83.48" y2="-676.56" gradientTransform="matrix(1, 0, 0, -1, 0, -648.86)" gradientUnits="userSpaceOnUse"> - <stop offset="0" stop-color="#ed358c"/> - <stop offset="0.16" stop-color="#e9388c"/> - <stop offset="0.3" stop-color="#de418c"/> - <stop offset="0.43" stop-color="#cc508c"/> - <stop offset="0.57" stop-color="#b2658d"/> - <stop offset="0.7" stop-color="#90808d"/> - <stop offset="0.83" stop-color="#67a18e"/> - <stop offset="0.95" stop-color="#37c78f"/> - <stop offset="1" stop-color="#22d88f"/> - </linearGradient> - <linearGradient id="linear-gradient-2" x1="32.58" y1="-665.27" x2="13.76" y2="-791.59" gradientTransform="matrix(1, 0, 0, -1, 0, -648.86)" gradientUnits="userSpaceOnUse"> - <stop offset="0.09" stop-color="#22d88f"/> - <stop offset="0.9" stop-color="#029de0"/> - </linearGradient> - <linearGradient id="linear-gradient-3" x1="116.68" y1="-660.66" x2="-12.09" y2="-796.66" xlink:href="#linear-gradient-2"/> - <linearGradient id="linear-gradient-4" x1="73.35" y1="-739.1" x2="122.29" y2="-746.06" xlink:href="#linear-gradient-2"/> - </defs> - <title>icon_CLion - - - - - - - - - - - - - diff --git a/vendor/base64/src/alphabet.rs b/vendor/base64/src/alphabet.rs deleted file mode 100644 index b07bfdfe65823a..00000000000000 --- a/vendor/base64/src/alphabet.rs +++ /dev/null @@ -1,285 +0,0 @@ -//! Provides [Alphabet] and constants for alphabets commonly used in the wild. - -use crate::PAD_BYTE; -use core::{convert, fmt}; -#[cfg(any(feature = "std", test))] -use std::error; - -const ALPHABET_SIZE: usize = 64; - -/// An alphabet defines the 64 ASCII characters (symbols) used for base64. -/// -/// Common alphabets are provided as constants, and custom alphabets -/// can be made via `from_str` or the `TryFrom` implementation. -/// -/// # Examples -/// -/// Building and using a custom Alphabet: -/// -/// ``` -/// let custom = base64::alphabet::Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/").unwrap(); -/// -/// let engine = base64::engine::GeneralPurpose::new( -/// &custom, -/// base64::engine::general_purpose::PAD); -/// ``` -/// -/// Building a const: -/// -/// ``` -/// use base64::alphabet::Alphabet; -/// -/// static CUSTOM: Alphabet = { -/// // Result::unwrap() isn't const yet, but panic!() is OK -/// match Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/") { -/// Ok(x) => x, -/// Err(_) => panic!("creation of alphabet failed"), -/// } -/// }; -/// ``` -/// -/// Building lazily: -/// -/// ``` -/// use base64::{ -/// alphabet::Alphabet, -/// engine::{general_purpose::GeneralPurpose, GeneralPurposeConfig}, -/// }; -/// use once_cell::sync::Lazy; -/// -/// static CUSTOM: Lazy = Lazy::new(|| -/// Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/").unwrap() -/// ); -/// ``` -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct Alphabet { - pub(crate) symbols: [u8; ALPHABET_SIZE], -} - -impl Alphabet { - /// Performs no checks so that it can be const. - /// Used only for known-valid strings. - const fn from_str_unchecked(alphabet: &str) -> Self { - let mut symbols = [0_u8; ALPHABET_SIZE]; - let source_bytes = alphabet.as_bytes(); - - // a way to copy that's allowed in const fn - let mut index = 0; - while index < ALPHABET_SIZE { - symbols[index] = source_bytes[index]; - index += 1; - } - - Self { symbols } - } - - /// Create an `Alphabet` from a string of 64 unique printable ASCII bytes. - /// - /// The `=` byte is not allowed as it is used for padding. - pub const fn new(alphabet: &str) -> Result { - let bytes = alphabet.as_bytes(); - if bytes.len() != ALPHABET_SIZE { - return Err(ParseAlphabetError::InvalidLength); - } - - { - let mut index = 0; - while index < ALPHABET_SIZE { - let byte = bytes[index]; - - // must be ascii printable. 127 (DEL) is commonly considered printable - // for some reason but clearly unsuitable for base64. - if !(byte >= 32_u8 && byte <= 126_u8) { - return Err(ParseAlphabetError::UnprintableByte(byte)); - } - // = is assumed to be padding, so cannot be used as a symbol - if byte == PAD_BYTE { - return Err(ParseAlphabetError::ReservedByte(byte)); - } - - // Check for duplicates while staying within what const allows. - // It's n^2, but only over 64 hot bytes, and only once, so it's likely in the single digit - // microsecond range. - - let mut probe_index = 0; - while probe_index < ALPHABET_SIZE { - if probe_index == index { - probe_index += 1; - continue; - } - - let probe_byte = bytes[probe_index]; - - if byte == probe_byte { - return Err(ParseAlphabetError::DuplicatedByte(byte)); - } - - probe_index += 1; - } - - index += 1; - } - } - - Ok(Self::from_str_unchecked(alphabet)) - } - - /// Create a `&str` from the symbols in the `Alphabet` - pub fn as_str(&self) -> &str { - core::str::from_utf8(&self.symbols).unwrap() - } -} - -impl convert::TryFrom<&str> for Alphabet { - type Error = ParseAlphabetError; - - fn try_from(value: &str) -> Result { - Self::new(value) - } -} - -/// Possible errors when constructing an [Alphabet] from a `str`. -#[derive(Debug, Eq, PartialEq)] -pub enum ParseAlphabetError { - /// Alphabets must be 64 ASCII bytes - InvalidLength, - /// All bytes must be unique - DuplicatedByte(u8), - /// All bytes must be printable (in the range `[32, 126]`). - UnprintableByte(u8), - /// `=` cannot be used - ReservedByte(u8), -} - -impl fmt::Display for ParseAlphabetError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::InvalidLength => write!(f, "Invalid length - must be 64 bytes"), - Self::DuplicatedByte(b) => write!(f, "Duplicated byte: {:#04x}", b), - Self::UnprintableByte(b) => write!(f, "Unprintable byte: {:#04x}", b), - Self::ReservedByte(b) => write!(f, "Reserved byte: {:#04x}", b), - } - } -} - -#[cfg(any(feature = "std", test))] -impl error::Error for ParseAlphabetError {} - -/// The standard alphabet (with `+` and `/`) specified in [RFC 4648][]. -/// -/// [RFC 4648]: https://datatracker.ietf.org/doc/html/rfc4648#section-4 -pub const STANDARD: Alphabet = Alphabet::from_str_unchecked( - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", -); - -/// The URL-safe alphabet (with `-` and `_`) specified in [RFC 4648][]. -/// -/// [RFC 4648]: https://datatracker.ietf.org/doc/html/rfc4648#section-5 -pub const URL_SAFE: Alphabet = Alphabet::from_str_unchecked( - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_", -); - -/// The `crypt(3)` alphabet (with `.` and `/` as the _first_ two characters). -/// -/// Not standardized, but folk wisdom on the net asserts that this alphabet is what crypt uses. -pub const CRYPT: Alphabet = Alphabet::from_str_unchecked( - "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", -); - -/// The bcrypt alphabet. -pub const BCRYPT: Alphabet = Alphabet::from_str_unchecked( - "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", -); - -/// The alphabet used in IMAP-modified UTF-7 (with `+` and `,`). -/// -/// See [RFC 3501](https://tools.ietf.org/html/rfc3501#section-5.1.3) -pub const IMAP_MUTF7: Alphabet = Alphabet::from_str_unchecked( - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,", -); - -/// The alphabet used in BinHex 4.0 files. -/// -/// See [BinHex 4.0 Definition](http://files.stairways.com/other/binhex-40-specs-info.txt) -pub const BIN_HEX: Alphabet = Alphabet::from_str_unchecked( - "!\"#$%&'()*+,-012345689@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr", -); - -#[cfg(test)] -mod tests { - use crate::alphabet::*; - use core::convert::TryFrom as _; - - #[test] - fn detects_duplicate_start() { - assert_eq!( - ParseAlphabetError::DuplicatedByte(b'A'), - Alphabet::new("AACDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/") - .unwrap_err() - ); - } - - #[test] - fn detects_duplicate_end() { - assert_eq!( - ParseAlphabetError::DuplicatedByte(b'/'), - Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789//") - .unwrap_err() - ); - } - - #[test] - fn detects_duplicate_middle() { - assert_eq!( - ParseAlphabetError::DuplicatedByte(b'Z'), - Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZZbcdefghijklmnopqrstuvwxyz0123456789+/") - .unwrap_err() - ); - } - - #[test] - fn detects_length() { - assert_eq!( - ParseAlphabetError::InvalidLength, - Alphabet::new( - "xxxxxxxxxABCDEFGHIJKLMNOPQRSTUVWXYZZbcdefghijklmnopqrstuvwxyz0123456789+/", - ) - .unwrap_err() - ); - } - - #[test] - fn detects_padding() { - assert_eq!( - ParseAlphabetError::ReservedByte(b'='), - Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+=") - .unwrap_err() - ); - } - - #[test] - fn detects_unprintable() { - // form feed - assert_eq!( - ParseAlphabetError::UnprintableByte(0xc), - Alphabet::new("\x0cBCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/") - .unwrap_err() - ); - } - - #[test] - fn same_as_unchecked() { - assert_eq!( - STANDARD, - Alphabet::try_from("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/") - .unwrap() - ); - } - - #[test] - fn str_same_as_input() { - let alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; - let a = Alphabet::try_from(alphabet).unwrap(); - assert_eq!(alphabet, a.as_str()) - } -} diff --git a/vendor/base64/src/chunked_encoder.rs b/vendor/base64/src/chunked_encoder.rs deleted file mode 100644 index 817b339f3b8151..00000000000000 --- a/vendor/base64/src/chunked_encoder.rs +++ /dev/null @@ -1,172 +0,0 @@ -use crate::{ - encode::add_padding, - engine::{Config, Engine}, -}; -#[cfg(any(feature = "alloc", test))] -use alloc::string::String; -#[cfg(any(feature = "alloc", test))] -use core::str; - -/// The output mechanism for ChunkedEncoder's encoded bytes. -pub trait Sink { - type Error; - - /// Handle a chunk of encoded base64 data (as UTF-8 bytes) - fn write_encoded_bytes(&mut self, encoded: &[u8]) -> Result<(), Self::Error>; -} - -/// A base64 encoder that emits encoded bytes in chunks without heap allocation. -pub struct ChunkedEncoder<'e, E: Engine + ?Sized> { - engine: &'e E, -} - -impl<'e, E: Engine + ?Sized> ChunkedEncoder<'e, E> { - pub fn new(engine: &'e E) -> ChunkedEncoder<'e, E> { - ChunkedEncoder { engine } - } - - pub fn encode(&self, bytes: &[u8], sink: &mut S) -> Result<(), S::Error> { - const BUF_SIZE: usize = 1024; - const CHUNK_SIZE: usize = BUF_SIZE / 4 * 3; - - let mut buf = [0; BUF_SIZE]; - for chunk in bytes.chunks(CHUNK_SIZE) { - let mut len = self.engine.internal_encode(chunk, &mut buf); - if chunk.len() != CHUNK_SIZE && self.engine.config().encode_padding() { - // Final, potentially partial, chunk. - // Only need to consider if padding is needed on a partial chunk since full chunk - // is a multiple of 3, which therefore won't be padded. - // Pad output to multiple of four bytes if required by config. - len += add_padding(len, &mut buf[len..]); - } - sink.write_encoded_bytes(&buf[..len])?; - } - - Ok(()) - } -} - -// A really simple sink that just appends to a string -#[cfg(any(feature = "alloc", test))] -pub(crate) struct StringSink<'a> { - string: &'a mut String, -} - -#[cfg(any(feature = "alloc", test))] -impl<'a> StringSink<'a> { - pub(crate) fn new(s: &mut String) -> StringSink { - StringSink { string: s } - } -} - -#[cfg(any(feature = "alloc", test))] -impl<'a> Sink for StringSink<'a> { - type Error = (); - - fn write_encoded_bytes(&mut self, s: &[u8]) -> Result<(), Self::Error> { - self.string.push_str(str::from_utf8(s).unwrap()); - - Ok(()) - } -} - -#[cfg(test)] -pub mod tests { - use rand::{ - distributions::{Distribution, Uniform}, - Rng, SeedableRng, - }; - - use crate::{ - alphabet::STANDARD, - engine::general_purpose::{GeneralPurpose, GeneralPurposeConfig, PAD}, - tests::random_engine, - }; - - use super::*; - - #[test] - fn chunked_encode_empty() { - assert_eq!("", chunked_encode_str(&[], PAD)); - } - - #[test] - fn chunked_encode_intermediate_fast_loop() { - // > 8 bytes input, will enter the pretty fast loop - assert_eq!("Zm9vYmFyYmF6cXV4", chunked_encode_str(b"foobarbazqux", PAD)); - } - - #[test] - fn chunked_encode_fast_loop() { - // > 32 bytes input, will enter the uber fast loop - assert_eq!( - "Zm9vYmFyYmF6cXV4cXV1eGNvcmdlZ3JhdWx0Z2FycGx5eg==", - chunked_encode_str(b"foobarbazquxquuxcorgegraultgarplyz", PAD) - ); - } - - #[test] - fn chunked_encode_slow_loop_only() { - // < 8 bytes input, slow loop only - assert_eq!("Zm9vYmFy", chunked_encode_str(b"foobar", PAD)); - } - - #[test] - fn chunked_encode_matches_normal_encode_random_string_sink() { - let helper = StringSinkTestHelper; - chunked_encode_matches_normal_encode_random(&helper); - } - - pub fn chunked_encode_matches_normal_encode_random(sink_test_helper: &S) { - let mut input_buf: Vec = Vec::new(); - let mut output_buf = String::new(); - let mut rng = rand::rngs::SmallRng::from_entropy(); - let input_len_range = Uniform::new(1, 10_000); - - for _ in 0..20_000 { - input_buf.clear(); - output_buf.clear(); - - let buf_len = input_len_range.sample(&mut rng); - for _ in 0..buf_len { - input_buf.push(rng.gen()); - } - - let engine = random_engine(&mut rng); - - let chunk_encoded_string = sink_test_helper.encode_to_string(&engine, &input_buf); - engine.encode_string(&input_buf, &mut output_buf); - - assert_eq!(output_buf, chunk_encoded_string, "input len={}", buf_len); - } - } - - fn chunked_encode_str(bytes: &[u8], config: GeneralPurposeConfig) -> String { - let mut s = String::new(); - - let mut sink = StringSink::new(&mut s); - let engine = GeneralPurpose::new(&STANDARD, config); - let encoder = ChunkedEncoder::new(&engine); - encoder.encode(bytes, &mut sink).unwrap(); - - s - } - - // An abstraction around sinks so that we can have tests that easily to any sink implementation - pub trait SinkTestHelper { - fn encode_to_string(&self, engine: &E, bytes: &[u8]) -> String; - } - - struct StringSinkTestHelper; - - impl SinkTestHelper for StringSinkTestHelper { - fn encode_to_string(&self, engine: &E, bytes: &[u8]) -> String { - let encoder = ChunkedEncoder::new(engine); - let mut s = String::new(); - let mut sink = StringSink::new(&mut s); - encoder.encode(bytes, &mut sink).unwrap(); - - s - } - } -} diff --git a/vendor/base64/src/decode.rs b/vendor/base64/src/decode.rs deleted file mode 100644 index 6df8abad2ca202..00000000000000 --- a/vendor/base64/src/decode.rs +++ /dev/null @@ -1,386 +0,0 @@ -use crate::engine::{general_purpose::STANDARD, DecodeEstimate, Engine}; -#[cfg(any(feature = "alloc", test))] -use alloc::vec::Vec; -use core::fmt; -#[cfg(any(feature = "std", test))] -use std::error; - -/// Errors that can occur while decoding. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum DecodeError { - /// An invalid byte was found in the input. The offset and offending byte are provided. - /// - /// Padding characters (`=`) interspersed in the encoded form are invalid, as they may only - /// be present as the last 0-2 bytes of input. - /// - /// This error may also indicate that extraneous trailing input bytes are present, causing - /// otherwise valid padding to no longer be the last bytes of input. - InvalidByte(usize, u8), - /// The length of the input, as measured in valid base64 symbols, is invalid. - /// There must be 2-4 symbols in the last input quad. - InvalidLength(usize), - /// The last non-padding input symbol's encoded 6 bits have nonzero bits that will be discarded. - /// This is indicative of corrupted or truncated Base64. - /// Unlike [DecodeError::InvalidByte], which reports symbols that aren't in the alphabet, - /// this error is for symbols that are in the alphabet but represent nonsensical encodings. - InvalidLastSymbol(usize, u8), - /// The nature of the padding was not as configured: absent or incorrect when it must be - /// canonical, or present when it must be absent, etc. - InvalidPadding, -} - -impl fmt::Display for DecodeError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Self::InvalidByte(index, byte) => { - write!(f, "Invalid symbol {}, offset {}.", byte, index) - } - Self::InvalidLength(len) => write!(f, "Invalid input length: {}", len), - Self::InvalidLastSymbol(index, byte) => { - write!(f, "Invalid last symbol {}, offset {}.", byte, index) - } - Self::InvalidPadding => write!(f, "Invalid padding"), - } - } -} - -#[cfg(any(feature = "std", test))] -impl error::Error for DecodeError {} - -/// Errors that can occur while decoding into a slice. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum DecodeSliceError { - /// A [DecodeError] occurred - DecodeError(DecodeError), - /// The provided slice is too small. - OutputSliceTooSmall, -} - -impl fmt::Display for DecodeSliceError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::DecodeError(e) => write!(f, "DecodeError: {}", e), - Self::OutputSliceTooSmall => write!(f, "Output slice too small"), - } - } -} - -#[cfg(any(feature = "std", test))] -impl error::Error for DecodeSliceError { - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match self { - DecodeSliceError::DecodeError(e) => Some(e), - DecodeSliceError::OutputSliceTooSmall => None, - } - } -} - -impl From for DecodeSliceError { - fn from(e: DecodeError) -> Self { - DecodeSliceError::DecodeError(e) - } -} - -/// Decode base64 using the [`STANDARD` engine](STANDARD). -/// -/// See [Engine::decode]. -#[deprecated(since = "0.21.0", note = "Use Engine::decode")] -#[cfg(any(feature = "alloc", test))] -pub fn decode>(input: T) -> Result, DecodeError> { - STANDARD.decode(input) -} - -/// Decode from string reference as octets using the specified [Engine]. -/// -/// See [Engine::decode]. -///Returns a `Result` containing a `Vec`. -#[deprecated(since = "0.21.0", note = "Use Engine::decode")] -#[cfg(any(feature = "alloc", test))] -pub fn decode_engine>( - input: T, - engine: &E, -) -> Result, DecodeError> { - engine.decode(input) -} - -/// Decode from string reference as octets. -/// -/// See [Engine::decode_vec]. -#[cfg(any(feature = "alloc", test))] -#[deprecated(since = "0.21.0", note = "Use Engine::decode_vec")] -pub fn decode_engine_vec>( - input: T, - buffer: &mut Vec, - engine: &E, -) -> Result<(), DecodeError> { - engine.decode_vec(input, buffer) -} - -/// Decode the input into the provided output slice. -/// -/// See [Engine::decode_slice]. -#[deprecated(since = "0.21.0", note = "Use Engine::decode_slice")] -pub fn decode_engine_slice>( - input: T, - output: &mut [u8], - engine: &E, -) -> Result { - engine.decode_slice(input, output) -} - -/// Returns a conservative estimate of the decoded size of `encoded_len` base64 symbols (rounded up -/// to the next group of 3 decoded bytes). -/// -/// The resulting length will be a safe choice for the size of a decode buffer, but may have up to -/// 2 trailing bytes that won't end up being needed. -/// -/// # Examples -/// -/// ``` -/// use base64::decoded_len_estimate; -/// -/// assert_eq!(3, decoded_len_estimate(1)); -/// assert_eq!(3, decoded_len_estimate(2)); -/// assert_eq!(3, decoded_len_estimate(3)); -/// assert_eq!(3, decoded_len_estimate(4)); -/// // start of the next quad of encoded symbols -/// assert_eq!(6, decoded_len_estimate(5)); -/// ``` -pub fn decoded_len_estimate(encoded_len: usize) -> usize { - STANDARD - .internal_decoded_len_estimate(encoded_len) - .decoded_len_estimate() -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - alphabet, - engine::{general_purpose, Config, GeneralPurpose}, - tests::{assert_encode_sanity, random_engine}, - }; - use rand::{ - distributions::{Distribution, Uniform}, - Rng, SeedableRng, - }; - - #[test] - fn decode_into_nonempty_vec_doesnt_clobber_existing_prefix() { - let mut orig_data = Vec::new(); - let mut encoded_data = String::new(); - let mut decoded_with_prefix = Vec::new(); - let mut decoded_without_prefix = Vec::new(); - let mut prefix = Vec::new(); - - let prefix_len_range = Uniform::new(0, 1000); - let input_len_range = Uniform::new(0, 1000); - - let mut rng = rand::rngs::SmallRng::from_entropy(); - - for _ in 0..10_000 { - orig_data.clear(); - encoded_data.clear(); - decoded_with_prefix.clear(); - decoded_without_prefix.clear(); - prefix.clear(); - - let input_len = input_len_range.sample(&mut rng); - - for _ in 0..input_len { - orig_data.push(rng.gen()); - } - - let engine = random_engine(&mut rng); - engine.encode_string(&orig_data, &mut encoded_data); - assert_encode_sanity(&encoded_data, engine.config().encode_padding(), input_len); - - let prefix_len = prefix_len_range.sample(&mut rng); - - // fill the buf with a prefix - for _ in 0..prefix_len { - prefix.push(rng.gen()); - } - - decoded_with_prefix.resize(prefix_len, 0); - decoded_with_prefix.copy_from_slice(&prefix); - - // decode into the non-empty buf - engine - .decode_vec(&encoded_data, &mut decoded_with_prefix) - .unwrap(); - // also decode into the empty buf - engine - .decode_vec(&encoded_data, &mut decoded_without_prefix) - .unwrap(); - - assert_eq!( - prefix_len + decoded_without_prefix.len(), - decoded_with_prefix.len() - ); - assert_eq!(orig_data, decoded_without_prefix); - - // append plain decode onto prefix - prefix.append(&mut decoded_without_prefix); - - assert_eq!(prefix, decoded_with_prefix); - } - } - - #[test] - fn decode_slice_doesnt_clobber_existing_prefix_or_suffix() { - do_decode_slice_doesnt_clobber_existing_prefix_or_suffix(|e, input, output| { - e.decode_slice(input, output).unwrap() - }) - } - - #[test] - fn decode_slice_unchecked_doesnt_clobber_existing_prefix_or_suffix() { - do_decode_slice_doesnt_clobber_existing_prefix_or_suffix(|e, input, output| { - e.decode_slice_unchecked(input, output).unwrap() - }) - } - - #[test] - fn decode_engine_estimation_works_for_various_lengths() { - let engine = GeneralPurpose::new(&alphabet::STANDARD, general_purpose::NO_PAD); - for num_prefix_quads in 0..100 { - for suffix in &["AA", "AAA", "AAAA"] { - let mut prefix = "AAAA".repeat(num_prefix_quads); - prefix.push_str(suffix); - // make sure no overflow (and thus a panic) occurs - let res = engine.decode(prefix); - assert!(res.is_ok()); - } - } - } - - #[test] - fn decode_slice_output_length_errors() { - for num_quads in 1..100 { - let input = "AAAA".repeat(num_quads); - let mut vec = vec![0; (num_quads - 1) * 3]; - assert_eq!( - DecodeSliceError::OutputSliceTooSmall, - STANDARD.decode_slice(&input, &mut vec).unwrap_err() - ); - vec.push(0); - assert_eq!( - DecodeSliceError::OutputSliceTooSmall, - STANDARD.decode_slice(&input, &mut vec).unwrap_err() - ); - vec.push(0); - assert_eq!( - DecodeSliceError::OutputSliceTooSmall, - STANDARD.decode_slice(&input, &mut vec).unwrap_err() - ); - vec.push(0); - // now it works - assert_eq!( - num_quads * 3, - STANDARD.decode_slice(&input, &mut vec).unwrap() - ); - } - } - - fn do_decode_slice_doesnt_clobber_existing_prefix_or_suffix< - F: Fn(&GeneralPurpose, &[u8], &mut [u8]) -> usize, - >( - call_decode: F, - ) { - let mut orig_data = Vec::new(); - let mut encoded_data = String::new(); - let mut decode_buf = Vec::new(); - let mut decode_buf_copy: Vec = Vec::new(); - - let input_len_range = Uniform::new(0, 1000); - - let mut rng = rand::rngs::SmallRng::from_entropy(); - - for _ in 0..10_000 { - orig_data.clear(); - encoded_data.clear(); - decode_buf.clear(); - decode_buf_copy.clear(); - - let input_len = input_len_range.sample(&mut rng); - - for _ in 0..input_len { - orig_data.push(rng.gen()); - } - - let engine = random_engine(&mut rng); - engine.encode_string(&orig_data, &mut encoded_data); - assert_encode_sanity(&encoded_data, engine.config().encode_padding(), input_len); - - // fill the buffer with random garbage, long enough to have some room before and after - for _ in 0..5000 { - decode_buf.push(rng.gen()); - } - - // keep a copy for later comparison - decode_buf_copy.extend(decode_buf.iter()); - - let offset = 1000; - - // decode into the non-empty buf - let decode_bytes_written = - call_decode(&engine, encoded_data.as_bytes(), &mut decode_buf[offset..]); - - assert_eq!(orig_data.len(), decode_bytes_written); - assert_eq!( - orig_data, - &decode_buf[offset..(offset + decode_bytes_written)] - ); - assert_eq!(&decode_buf_copy[0..offset], &decode_buf[0..offset]); - assert_eq!( - &decode_buf_copy[offset + decode_bytes_written..], - &decode_buf[offset + decode_bytes_written..] - ); - } - } -} - -#[allow(deprecated)] -#[cfg(test)] -mod coverage_gaming { - use super::*; - use std::error::Error; - - #[test] - fn decode_error() { - let _ = format!("{:?}", DecodeError::InvalidPadding.clone()); - let _ = format!( - "{} {} {} {}", - DecodeError::InvalidByte(0, 0), - DecodeError::InvalidLength(0), - DecodeError::InvalidLastSymbol(0, 0), - DecodeError::InvalidPadding, - ); - } - - #[test] - fn decode_slice_error() { - let _ = format!("{:?}", DecodeSliceError::OutputSliceTooSmall.clone()); - let _ = format!( - "{} {}", - DecodeSliceError::OutputSliceTooSmall, - DecodeSliceError::DecodeError(DecodeError::InvalidPadding) - ); - let _ = DecodeSliceError::OutputSliceTooSmall.source(); - let _ = DecodeSliceError::DecodeError(DecodeError::InvalidPadding).source(); - } - - #[test] - fn deprecated_fns() { - let _ = decode(""); - let _ = decode_engine("", &crate::prelude::BASE64_STANDARD); - let _ = decode_engine_vec("", &mut Vec::new(), &crate::prelude::BASE64_STANDARD); - let _ = decode_engine_slice("", &mut [], &crate::prelude::BASE64_STANDARD); - } - - #[test] - fn decoded_len_est() { - assert_eq!(3, decoded_len_estimate(4)); - } -} diff --git a/vendor/base64/src/display.rs b/vendor/base64/src/display.rs deleted file mode 100644 index fc292f1b00a66a..00000000000000 --- a/vendor/base64/src/display.rs +++ /dev/null @@ -1,88 +0,0 @@ -//! Enables base64'd output anywhere you might use a `Display` implementation, like a format string. -//! -//! ``` -//! use base64::{display::Base64Display, engine::general_purpose::STANDARD}; -//! -//! let data = vec![0x0, 0x1, 0x2, 0x3]; -//! let wrapper = Base64Display::new(&data, &STANDARD); -//! -//! assert_eq!("base64: AAECAw==", format!("base64: {}", wrapper)); -//! ``` - -use super::chunked_encoder::ChunkedEncoder; -use crate::engine::Engine; -use core::fmt::{Display, Formatter}; -use core::{fmt, str}; - -/// A convenience wrapper for base64'ing bytes into a format string without heap allocation. -pub struct Base64Display<'a, 'e, E: Engine> { - bytes: &'a [u8], - chunked_encoder: ChunkedEncoder<'e, E>, -} - -impl<'a, 'e, E: Engine> Base64Display<'a, 'e, E> { - /// Create a `Base64Display` with the provided engine. - pub fn new(bytes: &'a [u8], engine: &'e E) -> Base64Display<'a, 'e, E> { - Base64Display { - bytes, - chunked_encoder: ChunkedEncoder::new(engine), - } - } -} - -impl<'a, 'e, E: Engine> Display for Base64Display<'a, 'e, E> { - fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> { - let mut sink = FormatterSink { f: formatter }; - self.chunked_encoder.encode(self.bytes, &mut sink) - } -} - -struct FormatterSink<'a, 'b: 'a> { - f: &'a mut Formatter<'b>, -} - -impl<'a, 'b: 'a> super::chunked_encoder::Sink for FormatterSink<'a, 'b> { - type Error = fmt::Error; - - fn write_encoded_bytes(&mut self, encoded: &[u8]) -> Result<(), Self::Error> { - // Avoid unsafe. If max performance is needed, write your own display wrapper that uses - // unsafe here to gain about 10-15%. - self.f - .write_str(str::from_utf8(encoded).expect("base64 data was not utf8")) - } -} - -#[cfg(test)] -mod tests { - use super::super::chunked_encoder::tests::{ - chunked_encode_matches_normal_encode_random, SinkTestHelper, - }; - use super::*; - use crate::engine::general_purpose::STANDARD; - - #[test] - fn basic_display() { - assert_eq!( - "~$Zm9vYmFy#*", - format!("~${}#*", Base64Display::new(b"foobar", &STANDARD)) - ); - assert_eq!( - "~$Zm9vYmFyZg==#*", - format!("~${}#*", Base64Display::new(b"foobarf", &STANDARD)) - ); - } - - #[test] - fn display_encode_matches_normal_encode() { - let helper = DisplaySinkTestHelper; - chunked_encode_matches_normal_encode_random(&helper); - } - - struct DisplaySinkTestHelper; - - impl SinkTestHelper for DisplaySinkTestHelper { - fn encode_to_string(&self, engine: &E, bytes: &[u8]) -> String { - format!("{}", Base64Display::new(bytes, engine)) - } - } -} diff --git a/vendor/base64/src/encode.rs b/vendor/base64/src/encode.rs deleted file mode 100644 index ae6d79074d71f9..00000000000000 --- a/vendor/base64/src/encode.rs +++ /dev/null @@ -1,492 +0,0 @@ -#[cfg(any(feature = "alloc", test))] -use alloc::string::String; -use core::fmt; -#[cfg(any(feature = "std", test))] -use std::error; - -#[cfg(any(feature = "alloc", test))] -use crate::engine::general_purpose::STANDARD; -use crate::engine::{Config, Engine}; -use crate::PAD_BYTE; - -/// Encode arbitrary octets as base64 using the [`STANDARD` engine](STANDARD). -/// -/// See [Engine::encode]. -#[allow(unused)] -#[deprecated(since = "0.21.0", note = "Use Engine::encode")] -#[cfg(any(feature = "alloc", test))] -pub fn encode>(input: T) -> String { - STANDARD.encode(input) -} - -///Encode arbitrary octets as base64 using the provided `Engine` into a new `String`. -/// -/// See [Engine::encode]. -#[allow(unused)] -#[deprecated(since = "0.21.0", note = "Use Engine::encode")] -#[cfg(any(feature = "alloc", test))] -pub fn encode_engine>(input: T, engine: &E) -> String { - engine.encode(input) -} - -///Encode arbitrary octets as base64 into a supplied `String`. -/// -/// See [Engine::encode_string]. -#[allow(unused)] -#[deprecated(since = "0.21.0", note = "Use Engine::encode_string")] -#[cfg(any(feature = "alloc", test))] -pub fn encode_engine_string>( - input: T, - output_buf: &mut String, - engine: &E, -) { - engine.encode_string(input, output_buf) -} - -/// Encode arbitrary octets as base64 into a supplied slice. -/// -/// See [Engine::encode_slice]. -#[allow(unused)] -#[deprecated(since = "0.21.0", note = "Use Engine::encode_slice")] -pub fn encode_engine_slice>( - input: T, - output_buf: &mut [u8], - engine: &E, -) -> Result { - engine.encode_slice(input, output_buf) -} - -/// B64-encode and pad (if configured). -/// -/// This helper exists to avoid recalculating encoded_size, which is relatively expensive on short -/// inputs. -/// -/// `encoded_size` is the encoded size calculated for `input`. -/// -/// `output` must be of size `encoded_size`. -/// -/// All bytes in `output` will be written to since it is exactly the size of the output. -pub(crate) fn encode_with_padding( - input: &[u8], - output: &mut [u8], - engine: &E, - expected_encoded_size: usize, -) { - debug_assert_eq!(expected_encoded_size, output.len()); - - let b64_bytes_written = engine.internal_encode(input, output); - - let padding_bytes = if engine.config().encode_padding() { - add_padding(b64_bytes_written, &mut output[b64_bytes_written..]) - } else { - 0 - }; - - let encoded_bytes = b64_bytes_written - .checked_add(padding_bytes) - .expect("usize overflow when calculating b64 length"); - - debug_assert_eq!(expected_encoded_size, encoded_bytes); -} - -/// Calculate the base64 encoded length for a given input length, optionally including any -/// appropriate padding bytes. -/// -/// Returns `None` if the encoded length can't be represented in `usize`. This will happen for -/// input lengths in approximately the top quarter of the range of `usize`. -pub const fn encoded_len(bytes_len: usize, padding: bool) -> Option { - let rem = bytes_len % 3; - - let complete_input_chunks = bytes_len / 3; - // `?` is disallowed in const, and `let Some(_) = _ else` requires 1.65.0, whereas this - // messier syntax works on 1.48 - let complete_chunk_output = - if let Some(complete_chunk_output) = complete_input_chunks.checked_mul(4) { - complete_chunk_output - } else { - return None; - }; - - if rem > 0 { - if padding { - complete_chunk_output.checked_add(4) - } else { - let encoded_rem = match rem { - 1 => 2, - // only other possible remainder is 2 - // can't use a separate _ => unreachable!() in const fns in ancient rust versions - _ => 3, - }; - complete_chunk_output.checked_add(encoded_rem) - } - } else { - Some(complete_chunk_output) - } -} - -/// Write padding characters. -/// `unpadded_output_len` is the size of the unpadded but base64 encoded data. -/// `output` is the slice where padding should be written, of length at least 2. -/// -/// Returns the number of padding bytes written. -pub(crate) fn add_padding(unpadded_output_len: usize, output: &mut [u8]) -> usize { - let pad_bytes = (4 - (unpadded_output_len % 4)) % 4; - // for just a couple bytes, this has better performance than using - // .fill(), or iterating over mutable refs, which call memset() - #[allow(clippy::needless_range_loop)] - for i in 0..pad_bytes { - output[i] = PAD_BYTE; - } - - pad_bytes -} - -/// Errors that can occur while encoding into a slice. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum EncodeSliceError { - /// The provided slice is too small. - OutputSliceTooSmall, -} - -impl fmt::Display for EncodeSliceError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::OutputSliceTooSmall => write!(f, "Output slice too small"), - } - } -} - -#[cfg(any(feature = "std", test))] -impl error::Error for EncodeSliceError {} - -#[cfg(test)] -mod tests { - use super::*; - - use crate::{ - alphabet, - engine::general_purpose::{GeneralPurpose, NO_PAD, STANDARD}, - tests::{assert_encode_sanity, random_config, random_engine}, - }; - use rand::{ - distributions::{Distribution, Uniform}, - Rng, SeedableRng, - }; - use std::str; - - const URL_SAFE_NO_PAD_ENGINE: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, NO_PAD); - - #[test] - fn encoded_size_correct_standard() { - assert_encoded_length(0, 0, &STANDARD, true); - - assert_encoded_length(1, 4, &STANDARD, true); - assert_encoded_length(2, 4, &STANDARD, true); - assert_encoded_length(3, 4, &STANDARD, true); - - assert_encoded_length(4, 8, &STANDARD, true); - assert_encoded_length(5, 8, &STANDARD, true); - assert_encoded_length(6, 8, &STANDARD, true); - - assert_encoded_length(7, 12, &STANDARD, true); - assert_encoded_length(8, 12, &STANDARD, true); - assert_encoded_length(9, 12, &STANDARD, true); - - assert_encoded_length(54, 72, &STANDARD, true); - - assert_encoded_length(55, 76, &STANDARD, true); - assert_encoded_length(56, 76, &STANDARD, true); - assert_encoded_length(57, 76, &STANDARD, true); - - assert_encoded_length(58, 80, &STANDARD, true); - } - - #[test] - fn encoded_size_correct_no_pad() { - assert_encoded_length(0, 0, &URL_SAFE_NO_PAD_ENGINE, false); - - assert_encoded_length(1, 2, &URL_SAFE_NO_PAD_ENGINE, false); - assert_encoded_length(2, 3, &URL_SAFE_NO_PAD_ENGINE, false); - assert_encoded_length(3, 4, &URL_SAFE_NO_PAD_ENGINE, false); - - assert_encoded_length(4, 6, &URL_SAFE_NO_PAD_ENGINE, false); - assert_encoded_length(5, 7, &URL_SAFE_NO_PAD_ENGINE, false); - assert_encoded_length(6, 8, &URL_SAFE_NO_PAD_ENGINE, false); - - assert_encoded_length(7, 10, &URL_SAFE_NO_PAD_ENGINE, false); - assert_encoded_length(8, 11, &URL_SAFE_NO_PAD_ENGINE, false); - assert_encoded_length(9, 12, &URL_SAFE_NO_PAD_ENGINE, false); - - assert_encoded_length(54, 72, &URL_SAFE_NO_PAD_ENGINE, false); - - assert_encoded_length(55, 74, &URL_SAFE_NO_PAD_ENGINE, false); - assert_encoded_length(56, 75, &URL_SAFE_NO_PAD_ENGINE, false); - assert_encoded_length(57, 76, &URL_SAFE_NO_PAD_ENGINE, false); - - assert_encoded_length(58, 78, &URL_SAFE_NO_PAD_ENGINE, false); - } - - #[test] - fn encoded_size_overflow() { - assert_eq!(None, encoded_len(usize::MAX, true)); - } - - #[test] - fn encode_engine_string_into_nonempty_buffer_doesnt_clobber_prefix() { - let mut orig_data = Vec::new(); - let mut prefix = String::new(); - let mut encoded_data_no_prefix = String::new(); - let mut encoded_data_with_prefix = String::new(); - let mut decoded = Vec::new(); - - let prefix_len_range = Uniform::new(0, 1000); - let input_len_range = Uniform::new(0, 1000); - - let mut rng = rand::rngs::SmallRng::from_entropy(); - - for _ in 0..10_000 { - orig_data.clear(); - prefix.clear(); - encoded_data_no_prefix.clear(); - encoded_data_with_prefix.clear(); - decoded.clear(); - - let input_len = input_len_range.sample(&mut rng); - - for _ in 0..input_len { - orig_data.push(rng.gen()); - } - - let prefix_len = prefix_len_range.sample(&mut rng); - for _ in 0..prefix_len { - // getting convenient random single-byte printable chars that aren't base64 is - // annoying - prefix.push('#'); - } - encoded_data_with_prefix.push_str(&prefix); - - let engine = random_engine(&mut rng); - engine.encode_string(&orig_data, &mut encoded_data_no_prefix); - engine.encode_string(&orig_data, &mut encoded_data_with_prefix); - - assert_eq!( - encoded_data_no_prefix.len() + prefix_len, - encoded_data_with_prefix.len() - ); - assert_encode_sanity( - &encoded_data_no_prefix, - engine.config().encode_padding(), - input_len, - ); - assert_encode_sanity( - &encoded_data_with_prefix[prefix_len..], - engine.config().encode_padding(), - input_len, - ); - - // append plain encode onto prefix - prefix.push_str(&encoded_data_no_prefix); - - assert_eq!(prefix, encoded_data_with_prefix); - - engine - .decode_vec(&encoded_data_no_prefix, &mut decoded) - .unwrap(); - assert_eq!(orig_data, decoded); - } - } - - #[test] - fn encode_engine_slice_into_nonempty_buffer_doesnt_clobber_suffix() { - let mut orig_data = Vec::new(); - let mut encoded_data = Vec::new(); - let mut encoded_data_original_state = Vec::new(); - let mut decoded = Vec::new(); - - let input_len_range = Uniform::new(0, 1000); - - let mut rng = rand::rngs::SmallRng::from_entropy(); - - for _ in 0..10_000 { - orig_data.clear(); - encoded_data.clear(); - encoded_data_original_state.clear(); - decoded.clear(); - - let input_len = input_len_range.sample(&mut rng); - - for _ in 0..input_len { - orig_data.push(rng.gen()); - } - - // plenty of existing garbage in the encoded buffer - for _ in 0..10 * input_len { - encoded_data.push(rng.gen()); - } - - encoded_data_original_state.extend_from_slice(&encoded_data); - - let engine = random_engine(&mut rng); - - let encoded_size = encoded_len(input_len, engine.config().encode_padding()).unwrap(); - - assert_eq!( - encoded_size, - engine.encode_slice(&orig_data, &mut encoded_data).unwrap() - ); - - assert_encode_sanity( - str::from_utf8(&encoded_data[0..encoded_size]).unwrap(), - engine.config().encode_padding(), - input_len, - ); - - assert_eq!( - &encoded_data[encoded_size..], - &encoded_data_original_state[encoded_size..] - ); - - engine - .decode_vec(&encoded_data[0..encoded_size], &mut decoded) - .unwrap(); - assert_eq!(orig_data, decoded); - } - } - - #[test] - fn encode_to_slice_random_valid_utf8() { - let mut input = Vec::new(); - let mut output = Vec::new(); - - let input_len_range = Uniform::new(0, 1000); - - let mut rng = rand::rngs::SmallRng::from_entropy(); - - for _ in 0..10_000 { - input.clear(); - output.clear(); - - let input_len = input_len_range.sample(&mut rng); - - for _ in 0..input_len { - input.push(rng.gen()); - } - - let config = random_config(&mut rng); - let engine = random_engine(&mut rng); - - // fill up the output buffer with garbage - let encoded_size = encoded_len(input_len, config.encode_padding()).unwrap(); - for _ in 0..encoded_size { - output.push(rng.gen()); - } - - let orig_output_buf = output.clone(); - - let bytes_written = engine.internal_encode(&input, &mut output); - - // make sure the part beyond bytes_written is the same garbage it was before - assert_eq!(orig_output_buf[bytes_written..], output[bytes_written..]); - - // make sure the encoded bytes are UTF-8 - let _ = str::from_utf8(&output[0..bytes_written]).unwrap(); - } - } - - #[test] - fn encode_with_padding_random_valid_utf8() { - let mut input = Vec::new(); - let mut output = Vec::new(); - - let input_len_range = Uniform::new(0, 1000); - - let mut rng = rand::rngs::SmallRng::from_entropy(); - - for _ in 0..10_000 { - input.clear(); - output.clear(); - - let input_len = input_len_range.sample(&mut rng); - - for _ in 0..input_len { - input.push(rng.gen()); - } - - let engine = random_engine(&mut rng); - - // fill up the output buffer with garbage - let encoded_size = encoded_len(input_len, engine.config().encode_padding()).unwrap(); - for _ in 0..encoded_size + 1000 { - output.push(rng.gen()); - } - - let orig_output_buf = output.clone(); - - encode_with_padding(&input, &mut output[0..encoded_size], &engine, encoded_size); - - // make sure the part beyond b64 is the same garbage it was before - assert_eq!(orig_output_buf[encoded_size..], output[encoded_size..]); - - // make sure the encoded bytes are UTF-8 - let _ = str::from_utf8(&output[0..encoded_size]).unwrap(); - } - } - - #[test] - fn add_padding_random_valid_utf8() { - let mut output = Vec::new(); - - let mut rng = rand::rngs::SmallRng::from_entropy(); - - // cover our bases for length % 4 - for unpadded_output_len in 0..20 { - output.clear(); - - // fill output with random - for _ in 0..100 { - output.push(rng.gen()); - } - - let orig_output_buf = output.clone(); - - let bytes_written = add_padding(unpadded_output_len, &mut output); - - // make sure the part beyond bytes_written is the same garbage it was before - assert_eq!(orig_output_buf[bytes_written..], output[bytes_written..]); - - // make sure the encoded bytes are UTF-8 - let _ = str::from_utf8(&output[0..bytes_written]).unwrap(); - } - } - - fn assert_encoded_length( - input_len: usize, - enc_len: usize, - engine: &E, - padded: bool, - ) { - assert_eq!(enc_len, encoded_len(input_len, padded).unwrap()); - - let mut bytes: Vec = Vec::new(); - let mut rng = rand::rngs::SmallRng::from_entropy(); - - for _ in 0..input_len { - bytes.push(rng.gen()); - } - - let encoded = engine.encode(&bytes); - assert_encode_sanity(&encoded, padded, input_len); - - assert_eq!(enc_len, encoded.len()); - } - - #[test] - fn encode_imap() { - assert_eq!( - &GeneralPurpose::new(&alphabet::IMAP_MUTF7, NO_PAD).encode(b"\xFB\xFF"), - &GeneralPurpose::new(&alphabet::STANDARD, NO_PAD) - .encode(b"\xFB\xFF") - .replace('/', ",") - ); - } -} diff --git a/vendor/base64/src/engine/general_purpose/decode.rs b/vendor/base64/src/engine/general_purpose/decode.rs deleted file mode 100644 index b55d3fc5c8f7e5..00000000000000 --- a/vendor/base64/src/engine/general_purpose/decode.rs +++ /dev/null @@ -1,357 +0,0 @@ -use crate::{ - engine::{general_purpose::INVALID_VALUE, DecodeEstimate, DecodeMetadata, DecodePaddingMode}, - DecodeError, DecodeSliceError, PAD_BYTE, -}; - -#[doc(hidden)] -pub struct GeneralPurposeEstimate { - /// input len % 4 - rem: usize, - conservative_decoded_len: usize, -} - -impl GeneralPurposeEstimate { - pub(crate) fn new(encoded_len: usize) -> Self { - let rem = encoded_len % 4; - Self { - rem, - conservative_decoded_len: (encoded_len / 4 + (rem > 0) as usize) * 3, - } - } -} - -impl DecodeEstimate for GeneralPurposeEstimate { - fn decoded_len_estimate(&self) -> usize { - self.conservative_decoded_len - } -} - -/// Helper to avoid duplicating num_chunks calculation, which is costly on short inputs. -/// Returns the decode metadata, or an error. -// We're on the fragile edge of compiler heuristics here. If this is not inlined, slow. If this is -// inlined(always), a different slow. plain ol' inline makes the benchmarks happiest at the moment, -// but this is fragile and the best setting changes with only minor code modifications. -#[inline] -pub(crate) fn decode_helper( - input: &[u8], - estimate: GeneralPurposeEstimate, - output: &mut [u8], - decode_table: &[u8; 256], - decode_allow_trailing_bits: bool, - padding_mode: DecodePaddingMode, -) -> Result { - let input_complete_nonterminal_quads_len = - complete_quads_len(input, estimate.rem, output.len(), decode_table)?; - - const UNROLLED_INPUT_CHUNK_SIZE: usize = 32; - const UNROLLED_OUTPUT_CHUNK_SIZE: usize = UNROLLED_INPUT_CHUNK_SIZE / 4 * 3; - - let input_complete_quads_after_unrolled_chunks_len = - input_complete_nonterminal_quads_len % UNROLLED_INPUT_CHUNK_SIZE; - - let input_unrolled_loop_len = - input_complete_nonterminal_quads_len - input_complete_quads_after_unrolled_chunks_len; - - // chunks of 32 bytes - for (chunk_index, chunk) in input[..input_unrolled_loop_len] - .chunks_exact(UNROLLED_INPUT_CHUNK_SIZE) - .enumerate() - { - let input_index = chunk_index * UNROLLED_INPUT_CHUNK_SIZE; - let chunk_output = &mut output[chunk_index * UNROLLED_OUTPUT_CHUNK_SIZE - ..(chunk_index + 1) * UNROLLED_OUTPUT_CHUNK_SIZE]; - - decode_chunk_8( - &chunk[0..8], - input_index, - decode_table, - &mut chunk_output[0..6], - )?; - decode_chunk_8( - &chunk[8..16], - input_index + 8, - decode_table, - &mut chunk_output[6..12], - )?; - decode_chunk_8( - &chunk[16..24], - input_index + 16, - decode_table, - &mut chunk_output[12..18], - )?; - decode_chunk_8( - &chunk[24..32], - input_index + 24, - decode_table, - &mut chunk_output[18..24], - )?; - } - - // remaining quads, except for the last possibly partial one, as it may have padding - let output_unrolled_loop_len = input_unrolled_loop_len / 4 * 3; - let output_complete_quad_len = input_complete_nonterminal_quads_len / 4 * 3; - { - let output_after_unroll = &mut output[output_unrolled_loop_len..output_complete_quad_len]; - - for (chunk_index, chunk) in input - [input_unrolled_loop_len..input_complete_nonterminal_quads_len] - .chunks_exact(4) - .enumerate() - { - let chunk_output = &mut output_after_unroll[chunk_index * 3..chunk_index * 3 + 3]; - - decode_chunk_4( - chunk, - input_unrolled_loop_len + chunk_index * 4, - decode_table, - chunk_output, - )?; - } - } - - super::decode_suffix::decode_suffix( - input, - input_complete_nonterminal_quads_len, - output, - output_complete_quad_len, - decode_table, - decode_allow_trailing_bits, - padding_mode, - ) -} - -/// Returns the length of complete quads, except for the last one, even if it is complete. -/// -/// Returns an error if the output len is not big enough for decoding those complete quads, or if -/// the input % 4 == 1, and that last byte is an invalid value other than a pad byte. -/// -/// - `input` is the base64 input -/// - `input_len_rem` is input len % 4 -/// - `output_len` is the length of the output slice -pub(crate) fn complete_quads_len( - input: &[u8], - input_len_rem: usize, - output_len: usize, - decode_table: &[u8; 256], -) -> Result { - debug_assert!(input.len() % 4 == input_len_rem); - - // detect a trailing invalid byte, like a newline, as a user convenience - if input_len_rem == 1 { - let last_byte = input[input.len() - 1]; - // exclude pad bytes; might be part of padding that extends from earlier in the input - if last_byte != PAD_BYTE && decode_table[usize::from(last_byte)] == INVALID_VALUE { - return Err(DecodeError::InvalidByte(input.len() - 1, last_byte).into()); - } - }; - - // skip last quad, even if it's complete, as it may have padding - let input_complete_nonterminal_quads_len = input - .len() - .saturating_sub(input_len_rem) - // if rem was 0, subtract 4 to avoid padding - .saturating_sub((input_len_rem == 0) as usize * 4); - debug_assert!( - input.is_empty() || (1..=4).contains(&(input.len() - input_complete_nonterminal_quads_len)) - ); - - // check that everything except the last quad handled by decode_suffix will fit - if output_len < input_complete_nonterminal_quads_len / 4 * 3 { - return Err(DecodeSliceError::OutputSliceTooSmall); - }; - Ok(input_complete_nonterminal_quads_len) -} - -/// Decode 8 bytes of input into 6 bytes of output. -/// -/// `input` is the 8 bytes to decode. -/// `index_at_start_of_input` is the offset in the overall input (used for reporting errors -/// accurately) -/// `decode_table` is the lookup table for the particular base64 alphabet. -/// `output` will have its first 6 bytes overwritten -// yes, really inline (worth 30-50% speedup) -#[inline(always)] -fn decode_chunk_8( - input: &[u8], - index_at_start_of_input: usize, - decode_table: &[u8; 256], - output: &mut [u8], -) -> Result<(), DecodeError> { - let morsel = decode_table[usize::from(input[0])]; - if morsel == INVALID_VALUE { - return Err(DecodeError::InvalidByte(index_at_start_of_input, input[0])); - } - let mut accum = u64::from(morsel) << 58; - - let morsel = decode_table[usize::from(input[1])]; - if morsel == INVALID_VALUE { - return Err(DecodeError::InvalidByte( - index_at_start_of_input + 1, - input[1], - )); - } - accum |= u64::from(morsel) << 52; - - let morsel = decode_table[usize::from(input[2])]; - if morsel == INVALID_VALUE { - return Err(DecodeError::InvalidByte( - index_at_start_of_input + 2, - input[2], - )); - } - accum |= u64::from(morsel) << 46; - - let morsel = decode_table[usize::from(input[3])]; - if morsel == INVALID_VALUE { - return Err(DecodeError::InvalidByte( - index_at_start_of_input + 3, - input[3], - )); - } - accum |= u64::from(morsel) << 40; - - let morsel = decode_table[usize::from(input[4])]; - if morsel == INVALID_VALUE { - return Err(DecodeError::InvalidByte( - index_at_start_of_input + 4, - input[4], - )); - } - accum |= u64::from(morsel) << 34; - - let morsel = decode_table[usize::from(input[5])]; - if morsel == INVALID_VALUE { - return Err(DecodeError::InvalidByte( - index_at_start_of_input + 5, - input[5], - )); - } - accum |= u64::from(morsel) << 28; - - let morsel = decode_table[usize::from(input[6])]; - if morsel == INVALID_VALUE { - return Err(DecodeError::InvalidByte( - index_at_start_of_input + 6, - input[6], - )); - } - accum |= u64::from(morsel) << 22; - - let morsel = decode_table[usize::from(input[7])]; - if morsel == INVALID_VALUE { - return Err(DecodeError::InvalidByte( - index_at_start_of_input + 7, - input[7], - )); - } - accum |= u64::from(morsel) << 16; - - output[..6].copy_from_slice(&accum.to_be_bytes()[..6]); - - Ok(()) -} - -/// Like [decode_chunk_8] but for 4 bytes of input and 3 bytes of output. -#[inline(always)] -fn decode_chunk_4( - input: &[u8], - index_at_start_of_input: usize, - decode_table: &[u8; 256], - output: &mut [u8], -) -> Result<(), DecodeError> { - let morsel = decode_table[usize::from(input[0])]; - if morsel == INVALID_VALUE { - return Err(DecodeError::InvalidByte(index_at_start_of_input, input[0])); - } - let mut accum = u32::from(morsel) << 26; - - let morsel = decode_table[usize::from(input[1])]; - if morsel == INVALID_VALUE { - return Err(DecodeError::InvalidByte( - index_at_start_of_input + 1, - input[1], - )); - } - accum |= u32::from(morsel) << 20; - - let morsel = decode_table[usize::from(input[2])]; - if morsel == INVALID_VALUE { - return Err(DecodeError::InvalidByte( - index_at_start_of_input + 2, - input[2], - )); - } - accum |= u32::from(morsel) << 14; - - let morsel = decode_table[usize::from(input[3])]; - if morsel == INVALID_VALUE { - return Err(DecodeError::InvalidByte( - index_at_start_of_input + 3, - input[3], - )); - } - accum |= u32::from(morsel) << 8; - - output[..3].copy_from_slice(&accum.to_be_bytes()[..3]); - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - - use crate::engine::general_purpose::STANDARD; - - #[test] - fn decode_chunk_8_writes_only_6_bytes() { - let input = b"Zm9vYmFy"; // "foobar" - let mut output = [0_u8, 1, 2, 3, 4, 5, 6, 7]; - - decode_chunk_8(&input[..], 0, &STANDARD.decode_table, &mut output).unwrap(); - assert_eq!(&vec![b'f', b'o', b'o', b'b', b'a', b'r', 6, 7], &output); - } - - #[test] - fn decode_chunk_4_writes_only_3_bytes() { - let input = b"Zm9v"; // "foobar" - let mut output = [0_u8, 1, 2, 3]; - - decode_chunk_4(&input[..], 0, &STANDARD.decode_table, &mut output).unwrap(); - assert_eq!(&vec![b'f', b'o', b'o', 3], &output); - } - - #[test] - fn estimate_short_lengths() { - for (range, decoded_len_estimate) in [ - (0..=0, 0), - (1..=4, 3), - (5..=8, 6), - (9..=12, 9), - (13..=16, 12), - (17..=20, 15), - ] { - for encoded_len in range { - let estimate = GeneralPurposeEstimate::new(encoded_len); - assert_eq!(decoded_len_estimate, estimate.decoded_len_estimate()); - } - } - } - - #[test] - fn estimate_via_u128_inflation() { - // cover both ends of usize - (0..1000) - .chain(usize::MAX - 1000..=usize::MAX) - .for_each(|encoded_len| { - // inflate to 128 bit type to be able to safely use the easy formulas - let len_128 = encoded_len as u128; - - let estimate = GeneralPurposeEstimate::new(encoded_len); - assert_eq!( - (len_128 + 3) / 4 * 3, - estimate.conservative_decoded_len as u128 - ); - }) - } -} diff --git a/vendor/base64/src/engine/general_purpose/decode_suffix.rs b/vendor/base64/src/engine/general_purpose/decode_suffix.rs deleted file mode 100644 index 02aaf5141e1a41..00000000000000 --- a/vendor/base64/src/engine/general_purpose/decode_suffix.rs +++ /dev/null @@ -1,162 +0,0 @@ -use crate::{ - engine::{general_purpose::INVALID_VALUE, DecodeMetadata, DecodePaddingMode}, - DecodeError, DecodeSliceError, PAD_BYTE, -}; - -/// Decode the last 0-4 bytes, checking for trailing set bits and padding per the provided -/// parameters. -/// -/// Returns the decode metadata representing the total number of bytes decoded, including the ones -/// indicated as already written by `output_index`. -pub(crate) fn decode_suffix( - input: &[u8], - input_index: usize, - output: &mut [u8], - mut output_index: usize, - decode_table: &[u8; 256], - decode_allow_trailing_bits: bool, - padding_mode: DecodePaddingMode, -) -> Result { - debug_assert!((input.len() - input_index) <= 4); - - // Decode any leftovers that might not be a complete input chunk of 4 bytes. - // Use a u32 as a stack-resident 4 byte buffer. - let mut morsels_in_leftover = 0; - let mut padding_bytes_count = 0; - // offset from input_index - let mut first_padding_offset: usize = 0; - let mut last_symbol = 0_u8; - let mut morsels = [0_u8; 4]; - - for (leftover_index, &b) in input[input_index..].iter().enumerate() { - // '=' padding - if b == PAD_BYTE { - // There can be bad padding bytes in a few ways: - // 1 - Padding with non-padding characters after it - // 2 - Padding after zero or one characters in the current quad (should only - // be after 2 or 3 chars) - // 3 - More than two characters of padding. If 3 or 4 padding chars - // are in the same quad, that implies it will be caught by #2. - // If it spreads from one quad to another, it will be an invalid byte - // in the first quad. - // 4 - Non-canonical padding -- 1 byte when it should be 2, etc. - // Per config, non-canonical but still functional non- or partially-padded base64 - // may be treated as an error condition. - - if leftover_index < 2 { - // Check for error #2. - // Either the previous byte was padding, in which case we would have already hit - // this case, or it wasn't, in which case this is the first such error. - debug_assert!( - leftover_index == 0 || (leftover_index == 1 && padding_bytes_count == 0) - ); - let bad_padding_index = input_index + leftover_index; - return Err(DecodeError::InvalidByte(bad_padding_index, b).into()); - } - - if padding_bytes_count == 0 { - first_padding_offset = leftover_index; - } - - padding_bytes_count += 1; - continue; - } - - // Check for case #1. - // To make '=' handling consistent with the main loop, don't allow - // non-suffix '=' in trailing chunk either. Report error as first - // erroneous padding. - if padding_bytes_count > 0 { - return Err( - DecodeError::InvalidByte(input_index + first_padding_offset, PAD_BYTE).into(), - ); - } - - last_symbol = b; - - // can use up to 8 * 6 = 48 bits of the u64, if last chunk has no padding. - // Pack the leftovers from left to right. - let morsel = decode_table[b as usize]; - if morsel == INVALID_VALUE { - return Err(DecodeError::InvalidByte(input_index + leftover_index, b).into()); - } - - morsels[morsels_in_leftover] = morsel; - morsels_in_leftover += 1; - } - - // If there was 1 trailing byte, and it was valid, and we got to this point without hitting - // an invalid byte, now we can report invalid length - if !input.is_empty() && morsels_in_leftover < 2 { - return Err(DecodeError::InvalidLength(input_index + morsels_in_leftover).into()); - } - - match padding_mode { - DecodePaddingMode::Indifferent => { /* everything we care about was already checked */ } - DecodePaddingMode::RequireCanonical => { - // allow empty input - if (padding_bytes_count + morsels_in_leftover) % 4 != 0 { - return Err(DecodeError::InvalidPadding.into()); - } - } - DecodePaddingMode::RequireNone => { - if padding_bytes_count > 0 { - // check at the end to make sure we let the cases of padding that should be InvalidByte - // get hit - return Err(DecodeError::InvalidPadding.into()); - } - } - } - - // When encoding 1 trailing byte (e.g. 0xFF), 2 base64 bytes ("/w") are needed. - // / is the symbol for 63 (0x3F, bottom 6 bits all set) and w is 48 (0x30, top 2 bits - // of bottom 6 bits set). - // When decoding two symbols back to one trailing byte, any final symbol higher than - // w would still decode to the original byte because we only care about the top two - // bits in the bottom 6, but would be a non-canonical encoding. So, we calculate a - // mask based on how many bits are used for just the canonical encoding, and optionally - // error if any other bits are set. In the example of one encoded byte -> 2 symbols, - // 2 symbols can technically encode 12 bits, but the last 4 are non-canonical, and - // useless since there are no more symbols to provide the necessary 4 additional bits - // to finish the second original byte. - - let leftover_bytes_to_append = morsels_in_leftover * 6 / 8; - // Put the up to 6 complete bytes as the high bytes. - // Gain a couple percent speedup from nudging these ORs to use more ILP with a two-way split. - let mut leftover_num = (u32::from(morsels[0]) << 26) - | (u32::from(morsels[1]) << 20) - | (u32::from(morsels[2]) << 14) - | (u32::from(morsels[3]) << 8); - - // if there are bits set outside the bits we care about, last symbol encodes trailing bits that - // will not be included in the output - let mask = !0_u32 >> (leftover_bytes_to_append * 8); - if !decode_allow_trailing_bits && (leftover_num & mask) != 0 { - // last morsel is at `morsels_in_leftover` - 1 - return Err(DecodeError::InvalidLastSymbol( - input_index + morsels_in_leftover - 1, - last_symbol, - ) - .into()); - } - - // Strangely, this approach benchmarks better than writing bytes one at a time, - // or copy_from_slice into output. - for _ in 0..leftover_bytes_to_append { - let hi_byte = (leftover_num >> 24) as u8; - leftover_num <<= 8; - *output - .get_mut(output_index) - .ok_or(DecodeSliceError::OutputSliceTooSmall)? = hi_byte; - output_index += 1; - } - - Ok(DecodeMetadata::new( - output_index, - if padding_bytes_count > 0 { - Some(input_index + first_padding_offset) - } else { - None - }, - )) -} diff --git a/vendor/base64/src/engine/general_purpose/mod.rs b/vendor/base64/src/engine/general_purpose/mod.rs deleted file mode 100644 index 6fe958097b2878..00000000000000 --- a/vendor/base64/src/engine/general_purpose/mod.rs +++ /dev/null @@ -1,352 +0,0 @@ -//! Provides the [GeneralPurpose] engine and associated config types. -use crate::{ - alphabet, - alphabet::Alphabet, - engine::{Config, DecodeMetadata, DecodePaddingMode}, - DecodeSliceError, -}; -use core::convert::TryInto; - -pub(crate) mod decode; -pub(crate) mod decode_suffix; - -pub use decode::GeneralPurposeEstimate; - -pub(crate) const INVALID_VALUE: u8 = 255; - -/// A general-purpose base64 engine. -/// -/// - It uses no vector CPU instructions, so it will work on any system. -/// - It is reasonably fast (~2-3GiB/s). -/// - It is not constant-time, though, so it is vulnerable to timing side-channel attacks. For loading cryptographic keys, etc, it is suggested to use the forthcoming constant-time implementation. - -#[derive(Debug, Clone)] -pub struct GeneralPurpose { - encode_table: [u8; 64], - decode_table: [u8; 256], - config: GeneralPurposeConfig, -} - -impl GeneralPurpose { - /// Create a `GeneralPurpose` engine from an [Alphabet]. - /// - /// While not very expensive to initialize, ideally these should be cached - /// if the engine will be used repeatedly. - pub const fn new(alphabet: &Alphabet, config: GeneralPurposeConfig) -> Self { - Self { - encode_table: encode_table(alphabet), - decode_table: decode_table(alphabet), - config, - } - } -} - -impl super::Engine for GeneralPurpose { - type Config = GeneralPurposeConfig; - type DecodeEstimate = GeneralPurposeEstimate; - - fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize { - let mut input_index: usize = 0; - - const BLOCKS_PER_FAST_LOOP: usize = 4; - const LOW_SIX_BITS: u64 = 0x3F; - - // we read 8 bytes at a time (u64) but only actually consume 6 of those bytes. Thus, we need - // 2 trailing bytes to be available to read.. - let last_fast_index = input.len().saturating_sub(BLOCKS_PER_FAST_LOOP * 6 + 2); - let mut output_index = 0; - - if last_fast_index > 0 { - while input_index <= last_fast_index { - // Major performance wins from letting the optimizer do the bounds check once, mostly - // on the output side - let input_chunk = - &input[input_index..(input_index + (BLOCKS_PER_FAST_LOOP * 6 + 2))]; - let output_chunk = - &mut output[output_index..(output_index + BLOCKS_PER_FAST_LOOP * 8)]; - - // Hand-unrolling for 32 vs 16 or 8 bytes produces yields performance about equivalent - // to unsafe pointer code on a Xeon E5-1650v3. 64 byte unrolling was slightly better for - // large inputs but significantly worse for 50-byte input, unsurprisingly. I suspect - // that it's a not uncommon use case to encode smallish chunks of data (e.g. a 64-byte - // SHA-512 digest), so it would be nice if that fit in the unrolled loop at least once. - // Plus, single-digit percentage performance differences might well be quite different - // on different hardware. - - let input_u64 = read_u64(&input_chunk[0..]); - - output_chunk[0] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize]; - output_chunk[1] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize]; - output_chunk[2] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize]; - output_chunk[3] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize]; - output_chunk[4] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize]; - output_chunk[5] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize]; - output_chunk[6] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize]; - output_chunk[7] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize]; - - let input_u64 = read_u64(&input_chunk[6..]); - - output_chunk[8] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize]; - output_chunk[9] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize]; - output_chunk[10] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize]; - output_chunk[11] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize]; - output_chunk[12] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize]; - output_chunk[13] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize]; - output_chunk[14] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize]; - output_chunk[15] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize]; - - let input_u64 = read_u64(&input_chunk[12..]); - - output_chunk[16] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize]; - output_chunk[17] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize]; - output_chunk[18] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize]; - output_chunk[19] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize]; - output_chunk[20] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize]; - output_chunk[21] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize]; - output_chunk[22] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize]; - output_chunk[23] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize]; - - let input_u64 = read_u64(&input_chunk[18..]); - - output_chunk[24] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize]; - output_chunk[25] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize]; - output_chunk[26] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize]; - output_chunk[27] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize]; - output_chunk[28] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize]; - output_chunk[29] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize]; - output_chunk[30] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize]; - output_chunk[31] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize]; - - output_index += BLOCKS_PER_FAST_LOOP * 8; - input_index += BLOCKS_PER_FAST_LOOP * 6; - } - } - - // Encode what's left after the fast loop. - - const LOW_SIX_BITS_U8: u8 = 0x3F; - - let rem = input.len() % 3; - let start_of_rem = input.len() - rem; - - // start at the first index not handled by fast loop, which may be 0. - - while input_index < start_of_rem { - let input_chunk = &input[input_index..(input_index + 3)]; - let output_chunk = &mut output[output_index..(output_index + 4)]; - - output_chunk[0] = self.encode_table[(input_chunk[0] >> 2) as usize]; - output_chunk[1] = self.encode_table - [((input_chunk[0] << 4 | input_chunk[1] >> 4) & LOW_SIX_BITS_U8) as usize]; - output_chunk[2] = self.encode_table - [((input_chunk[1] << 2 | input_chunk[2] >> 6) & LOW_SIX_BITS_U8) as usize]; - output_chunk[3] = self.encode_table[(input_chunk[2] & LOW_SIX_BITS_U8) as usize]; - - input_index += 3; - output_index += 4; - } - - if rem == 2 { - output[output_index] = self.encode_table[(input[start_of_rem] >> 2) as usize]; - output[output_index + 1] = - self.encode_table[((input[start_of_rem] << 4 | input[start_of_rem + 1] >> 4) - & LOW_SIX_BITS_U8) as usize]; - output[output_index + 2] = - self.encode_table[((input[start_of_rem + 1] << 2) & LOW_SIX_BITS_U8) as usize]; - output_index += 3; - } else if rem == 1 { - output[output_index] = self.encode_table[(input[start_of_rem] >> 2) as usize]; - output[output_index + 1] = - self.encode_table[((input[start_of_rem] << 4) & LOW_SIX_BITS_U8) as usize]; - output_index += 2; - } - - output_index - } - - fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate { - GeneralPurposeEstimate::new(input_len) - } - - fn internal_decode( - &self, - input: &[u8], - output: &mut [u8], - estimate: Self::DecodeEstimate, - ) -> Result { - decode::decode_helper( - input, - estimate, - output, - &self.decode_table, - self.config.decode_allow_trailing_bits, - self.config.decode_padding_mode, - ) - } - - fn config(&self) -> &Self::Config { - &self.config - } -} - -/// Returns a table mapping a 6-bit index to the ASCII byte encoding of the index -pub(crate) const fn encode_table(alphabet: &Alphabet) -> [u8; 64] { - // the encode table is just the alphabet: - // 6-bit index lookup -> printable byte - let mut encode_table = [0_u8; 64]; - { - let mut index = 0; - while index < 64 { - encode_table[index] = alphabet.symbols[index]; - index += 1; - } - } - - encode_table -} - -/// Returns a table mapping base64 bytes as the lookup index to either: -/// - [INVALID_VALUE] for bytes that aren't members of the alphabet -/// - a byte whose lower 6 bits are the value that was encoded into the index byte -pub(crate) const fn decode_table(alphabet: &Alphabet) -> [u8; 256] { - let mut decode_table = [INVALID_VALUE; 256]; - - // Since the table is full of `INVALID_VALUE` already, we only need to overwrite - // the parts that are valid. - let mut index = 0; - while index < 64 { - // The index in the alphabet is the 6-bit value we care about. - // Since the index is in 0-63, it is safe to cast to u8. - decode_table[alphabet.symbols[index] as usize] = index as u8; - index += 1; - } - - decode_table -} - -#[inline] -fn read_u64(s: &[u8]) -> u64 { - u64::from_be_bytes(s[..8].try_into().unwrap()) -} - -/// Contains configuration parameters for base64 encoding and decoding. -/// -/// ``` -/// # use base64::engine::GeneralPurposeConfig; -/// let config = GeneralPurposeConfig::new() -/// .with_encode_padding(false); -/// // further customize using `.with_*` methods as needed -/// ``` -/// -/// The constants [PAD] and [NO_PAD] cover most use cases. -/// -/// To specify the characters used, see [Alphabet]. -#[derive(Clone, Copy, Debug)] -pub struct GeneralPurposeConfig { - encode_padding: bool, - decode_allow_trailing_bits: bool, - decode_padding_mode: DecodePaddingMode, -} - -impl GeneralPurposeConfig { - /// Create a new config with `padding` = `true`, `decode_allow_trailing_bits` = `false`, and - /// `decode_padding_mode = DecodePaddingMode::RequireCanonicalPadding`. - /// - /// This probably matches most people's expectations, but consider disabling padding to save - /// a few bytes unless you specifically need it for compatibility with some legacy system. - pub const fn new() -> Self { - Self { - // RFC states that padding must be applied by default - encode_padding: true, - decode_allow_trailing_bits: false, - decode_padding_mode: DecodePaddingMode::RequireCanonical, - } - } - - /// Create a new config based on `self` with an updated `padding` setting. - /// - /// If `padding` is `true`, encoding will append either 1 or 2 `=` padding characters as needed - /// to produce an output whose length is a multiple of 4. - /// - /// Padding is not needed for correct decoding and only serves to waste bytes, but it's in the - /// [spec](https://datatracker.ietf.org/doc/html/rfc4648#section-3.2). - /// - /// For new applications, consider not using padding if the decoders you're using don't require - /// padding to be present. - pub const fn with_encode_padding(self, padding: bool) -> Self { - Self { - encode_padding: padding, - ..self - } - } - - /// Create a new config based on `self` with an updated `decode_allow_trailing_bits` setting. - /// - /// Most users will not need to configure this. It's useful if you need to decode base64 - /// produced by a buggy encoder that has bits set in the unused space on the last base64 - /// character as per [forgiving-base64 decode](https://infra.spec.whatwg.org/#forgiving-base64-decode). - /// If invalid trailing bits are present and this is `true`, those bits will - /// be silently ignored, else `DecodeError::InvalidLastSymbol` will be emitted. - pub const fn with_decode_allow_trailing_bits(self, allow: bool) -> Self { - Self { - decode_allow_trailing_bits: allow, - ..self - } - } - - /// Create a new config based on `self` with an updated `decode_padding_mode` setting. - /// - /// Padding is not useful in terms of representing encoded data -- it makes no difference to - /// the decoder if padding is present or not, so if you have some un-padded input to decode, it - /// is perfectly fine to use `DecodePaddingMode::Indifferent` to prevent errors from being - /// emitted. - /// - /// However, since in practice - /// [people who learned nothing from BER vs DER seem to expect base64 to have one canonical encoding](https://eprint.iacr.org/2022/361), - /// the default setting is the stricter `DecodePaddingMode::RequireCanonicalPadding`. - /// - /// Or, if "canonical" in your circumstance means _no_ padding rather than padding to the - /// next multiple of four, there's `DecodePaddingMode::RequireNoPadding`. - pub const fn with_decode_padding_mode(self, mode: DecodePaddingMode) -> Self { - Self { - decode_padding_mode: mode, - ..self - } - } -} - -impl Default for GeneralPurposeConfig { - /// Delegates to [GeneralPurposeConfig::new]. - fn default() -> Self { - Self::new() - } -} - -impl Config for GeneralPurposeConfig { - fn encode_padding(&self) -> bool { - self.encode_padding - } -} - -/// A [GeneralPurpose] engine using the [alphabet::STANDARD] base64 alphabet and [PAD] config. -pub const STANDARD: GeneralPurpose = GeneralPurpose::new(&alphabet::STANDARD, PAD); - -/// A [GeneralPurpose] engine using the [alphabet::STANDARD] base64 alphabet and [NO_PAD] config. -pub const STANDARD_NO_PAD: GeneralPurpose = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD); - -/// A [GeneralPurpose] engine using the [alphabet::URL_SAFE] base64 alphabet and [PAD] config. -pub const URL_SAFE: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, PAD); - -/// A [GeneralPurpose] engine using the [alphabet::URL_SAFE] base64 alphabet and [NO_PAD] config. -pub const URL_SAFE_NO_PAD: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, NO_PAD); - -/// Include padding bytes when encoding, and require that they be present when decoding. -/// -/// This is the standard per the base64 RFC, but consider using [NO_PAD] instead as padding serves -/// little purpose in practice. -pub const PAD: GeneralPurposeConfig = GeneralPurposeConfig::new(); - -/// Don't add padding when encoding, and require no padding when decoding. -pub const NO_PAD: GeneralPurposeConfig = GeneralPurposeConfig::new() - .with_encode_padding(false) - .with_decode_padding_mode(DecodePaddingMode::RequireNone); diff --git a/vendor/base64/src/engine/mod.rs b/vendor/base64/src/engine/mod.rs deleted file mode 100644 index f2cc33f607c12e..00000000000000 --- a/vendor/base64/src/engine/mod.rs +++ /dev/null @@ -1,478 +0,0 @@ -//! Provides the [Engine] abstraction and out of the box implementations. -#[cfg(any(feature = "alloc", test))] -use crate::chunked_encoder; -use crate::{ - encode::{encode_with_padding, EncodeSliceError}, - encoded_len, DecodeError, DecodeSliceError, -}; -#[cfg(any(feature = "alloc", test))] -use alloc::vec::Vec; - -#[cfg(any(feature = "alloc", test))] -use alloc::{string::String, vec}; - -pub mod general_purpose; - -#[cfg(test)] -mod naive; - -#[cfg(test)] -mod tests; - -pub use general_purpose::{GeneralPurpose, GeneralPurposeConfig}; - -/// An `Engine` provides low-level encoding and decoding operations that all other higher-level parts of the API use. Users of the library will generally not need to implement this. -/// -/// Different implementations offer different characteristics. The library currently ships with -/// [GeneralPurpose] that offers good speed and works on any CPU, with more choices -/// coming later, like a constant-time one when side channel resistance is called for, and vendor-specific vectorized ones for more speed. -/// -/// See [general_purpose::STANDARD_NO_PAD] if you just want standard base64. Otherwise, when possible, it's -/// recommended to store the engine in a `const` so that references to it won't pose any lifetime -/// issues, and to avoid repeating the cost of engine setup. -/// -/// Since almost nobody will need to implement `Engine`, docs for internal methods are hidden. -// When adding an implementation of Engine, include them in the engine test suite: -// - add an implementation of [engine::tests::EngineWrapper] -// - add the implementation to the `all_engines` macro -// All tests run on all engines listed in the macro. -pub trait Engine: Send + Sync { - /// The config type used by this engine - type Config: Config; - /// The decode estimate used by this engine - type DecodeEstimate: DecodeEstimate; - - /// This is not meant to be called directly; it is only for `Engine` implementors. - /// See the other `encode*` functions on this trait. - /// - /// Encode the `input` bytes into the `output` buffer based on the mapping in `encode_table`. - /// - /// `output` will be long enough to hold the encoded data. - /// - /// Returns the number of bytes written. - /// - /// No padding should be written; that is handled separately. - /// - /// Must not write any bytes into the output slice other than the encoded data. - #[doc(hidden)] - fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize; - - /// This is not meant to be called directly; it is only for `Engine` implementors. - /// - /// As an optimization to prevent the decoded length from being calculated twice, it is - /// sometimes helpful to have a conservative estimate of the decoded size before doing the - /// decoding, so this calculation is done separately and passed to [Engine::decode()] as needed. - #[doc(hidden)] - fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate; - - /// This is not meant to be called directly; it is only for `Engine` implementors. - /// See the other `decode*` functions on this trait. - /// - /// Decode `input` base64 bytes into the `output` buffer. - /// - /// `decode_estimate` is the result of [Engine::internal_decoded_len_estimate()], which is passed in to avoid - /// calculating it again (expensive on short inputs).` - /// - /// Each complete 4-byte chunk of encoded data decodes to 3 bytes of decoded data, but this - /// function must also handle the final possibly partial chunk. - /// If the input length is not a multiple of 4, or uses padding bytes to reach a multiple of 4, - /// the trailing 2 or 3 bytes must decode to 1 or 2 bytes, respectively, as per the - /// [RFC](https://tools.ietf.org/html/rfc4648#section-3.5). - /// - /// Decoding must not write any bytes into the output slice other than the decoded data. - /// - /// Non-canonical trailing bits in the final tokens or non-canonical padding must be reported as - /// errors unless the engine is configured otherwise. - #[doc(hidden)] - fn internal_decode( - &self, - input: &[u8], - output: &mut [u8], - decode_estimate: Self::DecodeEstimate, - ) -> Result; - - /// Returns the config for this engine. - fn config(&self) -> &Self::Config; - - /// Encode arbitrary octets as base64 using the provided `Engine`. - /// Returns a `String`. - /// - /// # Example - /// - /// ```rust - /// use base64::{Engine as _, engine::{self, general_purpose}, alphabet}; - /// - /// let b64 = general_purpose::STANDARD.encode(b"hello world~"); - /// println!("{}", b64); - /// - /// const CUSTOM_ENGINE: engine::GeneralPurpose = - /// engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::NO_PAD); - /// - /// let b64_url = CUSTOM_ENGINE.encode(b"hello internet~"); - /// ``` - #[cfg(any(feature = "alloc", test))] - #[inline] - fn encode>(&self, input: T) -> String { - fn inner(engine: &E, input_bytes: &[u8]) -> String - where - E: Engine + ?Sized, - { - let encoded_size = encoded_len(input_bytes.len(), engine.config().encode_padding()) - .expect("integer overflow when calculating buffer size"); - - let mut buf = vec![0; encoded_size]; - - encode_with_padding(input_bytes, &mut buf[..], engine, encoded_size); - - String::from_utf8(buf).expect("Invalid UTF8") - } - - inner(self, input.as_ref()) - } - - /// Encode arbitrary octets as base64 into a supplied `String`. - /// Writes into the supplied `String`, which may allocate if its internal buffer isn't big enough. - /// - /// # Example - /// - /// ```rust - /// use base64::{Engine as _, engine::{self, general_purpose}, alphabet}; - /// const CUSTOM_ENGINE: engine::GeneralPurpose = - /// engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::NO_PAD); - /// - /// fn main() { - /// let mut buf = String::new(); - /// general_purpose::STANDARD.encode_string(b"hello world~", &mut buf); - /// println!("{}", buf); - /// - /// buf.clear(); - /// CUSTOM_ENGINE.encode_string(b"hello internet~", &mut buf); - /// println!("{}", buf); - /// } - /// ``` - #[cfg(any(feature = "alloc", test))] - #[inline] - fn encode_string>(&self, input: T, output_buf: &mut String) { - fn inner(engine: &E, input_bytes: &[u8], output_buf: &mut String) - where - E: Engine + ?Sized, - { - let mut sink = chunked_encoder::StringSink::new(output_buf); - - chunked_encoder::ChunkedEncoder::new(engine) - .encode(input_bytes, &mut sink) - .expect("Writing to a String shouldn't fail"); - } - - inner(self, input.as_ref(), output_buf) - } - - /// Encode arbitrary octets as base64 into a supplied slice. - /// Writes into the supplied output buffer. - /// - /// This is useful if you wish to avoid allocation entirely (e.g. encoding into a stack-resident - /// or statically-allocated buffer). - /// - /// # Example - /// - #[cfg_attr(feature = "alloc", doc = "```")] - #[cfg_attr(not(feature = "alloc"), doc = "```ignore")] - /// use base64::{Engine as _, engine::general_purpose}; - /// let s = b"hello internet!"; - /// let mut buf = Vec::new(); - /// // make sure we'll have a slice big enough for base64 + padding - /// buf.resize(s.len() * 4 / 3 + 4, 0); - /// - /// let bytes_written = general_purpose::STANDARD.encode_slice(s, &mut buf).unwrap(); - /// - /// // shorten our vec down to just what was written - /// buf.truncate(bytes_written); - /// - /// assert_eq!(s, general_purpose::STANDARD.decode(&buf).unwrap().as_slice()); - /// ``` - #[inline] - fn encode_slice>( - &self, - input: T, - output_buf: &mut [u8], - ) -> Result { - fn inner( - engine: &E, - input_bytes: &[u8], - output_buf: &mut [u8], - ) -> Result - where - E: Engine + ?Sized, - { - let encoded_size = encoded_len(input_bytes.len(), engine.config().encode_padding()) - .expect("usize overflow when calculating buffer size"); - - if output_buf.len() < encoded_size { - return Err(EncodeSliceError::OutputSliceTooSmall); - } - - let b64_output = &mut output_buf[0..encoded_size]; - - encode_with_padding(input_bytes, b64_output, engine, encoded_size); - - Ok(encoded_size) - } - - inner(self, input.as_ref(), output_buf) - } - - /// Decode the input into a new `Vec`. - /// - /// # Example - /// - /// ```rust - /// use base64::{Engine as _, alphabet, engine::{self, general_purpose}}; - /// - /// let bytes = general_purpose::STANDARD - /// .decode("aGVsbG8gd29ybGR+Cg==").unwrap(); - /// println!("{:?}", bytes); - /// - /// // custom engine setup - /// let bytes_url = engine::GeneralPurpose::new( - /// &alphabet::URL_SAFE, - /// general_purpose::NO_PAD) - /// .decode("aGVsbG8gaW50ZXJuZXR-Cg").unwrap(); - /// println!("{:?}", bytes_url); - /// ``` - #[cfg(any(feature = "alloc", test))] - #[inline] - fn decode>(&self, input: T) -> Result, DecodeError> { - fn inner(engine: &E, input_bytes: &[u8]) -> Result, DecodeError> - where - E: Engine + ?Sized, - { - let estimate = engine.internal_decoded_len_estimate(input_bytes.len()); - let mut buffer = vec![0; estimate.decoded_len_estimate()]; - - let bytes_written = engine - .internal_decode(input_bytes, &mut buffer, estimate) - .map_err(|e| match e { - DecodeSliceError::DecodeError(e) => e, - DecodeSliceError::OutputSliceTooSmall => { - unreachable!("Vec is sized conservatively") - } - })? - .decoded_len; - - buffer.truncate(bytes_written); - - Ok(buffer) - } - - inner(self, input.as_ref()) - } - - /// Decode the `input` into the supplied `buffer`. - /// - /// Writes into the supplied `Vec`, which may allocate if its internal buffer isn't big enough. - /// Returns a `Result` containing an empty tuple, aka `()`. - /// - /// # Example - /// - /// ```rust - /// use base64::{Engine as _, alphabet, engine::{self, general_purpose}}; - /// const CUSTOM_ENGINE: engine::GeneralPurpose = - /// engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::PAD); - /// - /// fn main() { - /// use base64::Engine; - /// let mut buffer = Vec::::new(); - /// // with the default engine - /// general_purpose::STANDARD - /// .decode_vec("aGVsbG8gd29ybGR+Cg==", &mut buffer,).unwrap(); - /// println!("{:?}", buffer); - /// - /// buffer.clear(); - /// - /// // with a custom engine - /// CUSTOM_ENGINE.decode_vec( - /// "aGVsbG8gaW50ZXJuZXR-Cg==", - /// &mut buffer, - /// ).unwrap(); - /// println!("{:?}", buffer); - /// } - /// ``` - #[cfg(any(feature = "alloc", test))] - #[inline] - fn decode_vec>( - &self, - input: T, - buffer: &mut Vec, - ) -> Result<(), DecodeError> { - fn inner(engine: &E, input_bytes: &[u8], buffer: &mut Vec) -> Result<(), DecodeError> - where - E: Engine + ?Sized, - { - let starting_output_len = buffer.len(); - let estimate = engine.internal_decoded_len_estimate(input_bytes.len()); - - let total_len_estimate = estimate - .decoded_len_estimate() - .checked_add(starting_output_len) - .expect("Overflow when calculating output buffer length"); - - buffer.resize(total_len_estimate, 0); - - let buffer_slice = &mut buffer.as_mut_slice()[starting_output_len..]; - - let bytes_written = engine - .internal_decode(input_bytes, buffer_slice, estimate) - .map_err(|e| match e { - DecodeSliceError::DecodeError(e) => e, - DecodeSliceError::OutputSliceTooSmall => { - unreachable!("Vec is sized conservatively") - } - })? - .decoded_len; - - buffer.truncate(starting_output_len + bytes_written); - - Ok(()) - } - - inner(self, input.as_ref(), buffer) - } - - /// Decode the input into the provided output slice. - /// - /// Returns the number of bytes written to the slice, or an error if `output` is smaller than - /// the estimated decoded length. - /// - /// This will not write any bytes past exactly what is decoded (no stray garbage bytes at the end). - /// - /// See [crate::decoded_len_estimate] for calculating buffer sizes. - /// - /// See [Engine::decode_slice_unchecked] for a version that panics instead of returning an error - /// if the output buffer is too small. - #[inline] - fn decode_slice>( - &self, - input: T, - output: &mut [u8], - ) -> Result { - fn inner( - engine: &E, - input_bytes: &[u8], - output: &mut [u8], - ) -> Result - where - E: Engine + ?Sized, - { - engine - .internal_decode( - input_bytes, - output, - engine.internal_decoded_len_estimate(input_bytes.len()), - ) - .map(|dm| dm.decoded_len) - } - - inner(self, input.as_ref(), output) - } - - /// Decode the input into the provided output slice. - /// - /// Returns the number of bytes written to the slice. - /// - /// This will not write any bytes past exactly what is decoded (no stray garbage bytes at the end). - /// - /// See [crate::decoded_len_estimate] for calculating buffer sizes. - /// - /// See [Engine::decode_slice] for a version that returns an error instead of panicking if the output - /// buffer is too small. - /// - /// # Panics - /// - /// Panics if the provided output buffer is too small for the decoded data. - #[inline] - fn decode_slice_unchecked>( - &self, - input: T, - output: &mut [u8], - ) -> Result { - fn inner(engine: &E, input_bytes: &[u8], output: &mut [u8]) -> Result - where - E: Engine + ?Sized, - { - engine - .internal_decode( - input_bytes, - output, - engine.internal_decoded_len_estimate(input_bytes.len()), - ) - .map(|dm| dm.decoded_len) - .map_err(|e| match e { - DecodeSliceError::DecodeError(e) => e, - DecodeSliceError::OutputSliceTooSmall => { - panic!("Output slice is too small") - } - }) - } - - inner(self, input.as_ref(), output) - } -} - -/// The minimal level of configuration that engines must support. -pub trait Config { - /// Returns `true` if padding should be added after the encoded output. - /// - /// Padding is added outside the engine's encode() since the engine may be used - /// to encode only a chunk of the overall output, so it can't always know when - /// the output is "done" and would therefore need padding (if configured). - // It could be provided as a separate parameter when encoding, but that feels like - // leaking an implementation detail to the user, and it's hopefully more convenient - // to have to only pass one thing (the engine) to any part of the API. - fn encode_padding(&self) -> bool; -} - -/// The decode estimate used by an engine implementation. Users do not need to interact with this; -/// it is only for engine implementors. -/// -/// Implementors may store relevant data here when constructing this to avoid having to calculate -/// them again during actual decoding. -pub trait DecodeEstimate { - /// Returns a conservative (err on the side of too big) estimate of the decoded length to use - /// for pre-allocating buffers, etc. - /// - /// The estimate must be no larger than the next largest complete triple of decoded bytes. - /// That is, the final quad of tokens to decode may be assumed to be complete with no padding. - fn decoded_len_estimate(&self) -> usize; -} - -/// Controls how pad bytes are handled when decoding. -/// -/// Each [Engine] must support at least the behavior indicated by -/// [DecodePaddingMode::RequireCanonical], and may support other modes. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum DecodePaddingMode { - /// Canonical padding is allowed, but any fewer padding bytes than that is also allowed. - Indifferent, - /// Padding must be canonical (0, 1, or 2 `=` as needed to produce a 4 byte suffix). - RequireCanonical, - /// Padding must be absent -- for when you want predictable padding, without any wasted bytes. - RequireNone, -} - -/// Metadata about the result of a decode operation -#[derive(PartialEq, Eq, Debug)] -pub struct DecodeMetadata { - /// Number of decoded bytes output - pub(crate) decoded_len: usize, - /// Offset of the first padding byte in the input, if any - pub(crate) padding_offset: Option, -} - -impl DecodeMetadata { - pub(crate) fn new(decoded_bytes: usize, padding_index: Option) -> Self { - Self { - decoded_len: decoded_bytes, - padding_offset: padding_index, - } - } -} diff --git a/vendor/base64/src/engine/naive.rs b/vendor/base64/src/engine/naive.rs deleted file mode 100644 index af509bfa56b6a5..00000000000000 --- a/vendor/base64/src/engine/naive.rs +++ /dev/null @@ -1,195 +0,0 @@ -use crate::{ - alphabet::Alphabet, - engine::{ - general_purpose::{self, decode_table, encode_table}, - Config, DecodeEstimate, DecodeMetadata, DecodePaddingMode, Engine, - }, - DecodeError, DecodeSliceError, -}; -use std::ops::{BitAnd, BitOr, Shl, Shr}; - -/// Comparatively simple implementation that can be used as something to compare against in tests -pub struct Naive { - encode_table: [u8; 64], - decode_table: [u8; 256], - config: NaiveConfig, -} - -impl Naive { - const ENCODE_INPUT_CHUNK_SIZE: usize = 3; - const DECODE_INPUT_CHUNK_SIZE: usize = 4; - - pub const fn new(alphabet: &Alphabet, config: NaiveConfig) -> Self { - Self { - encode_table: encode_table(alphabet), - decode_table: decode_table(alphabet), - config, - } - } - - fn decode_byte_into_u32(&self, offset: usize, byte: u8) -> Result { - let decoded = self.decode_table[byte as usize]; - - if decoded == general_purpose::INVALID_VALUE { - return Err(DecodeError::InvalidByte(offset, byte)); - } - - Ok(decoded as u32) - } -} - -impl Engine for Naive { - type Config = NaiveConfig; - type DecodeEstimate = NaiveEstimate; - - fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize { - // complete chunks first - - const LOW_SIX_BITS: u32 = 0x3F; - - let rem = input.len() % Self::ENCODE_INPUT_CHUNK_SIZE; - // will never underflow - let complete_chunk_len = input.len() - rem; - - let mut input_index = 0_usize; - let mut output_index = 0_usize; - if let Some(last_complete_chunk_index) = - complete_chunk_len.checked_sub(Self::ENCODE_INPUT_CHUNK_SIZE) - { - while input_index <= last_complete_chunk_index { - let chunk = &input[input_index..input_index + Self::ENCODE_INPUT_CHUNK_SIZE]; - - // populate low 24 bits from 3 bytes - let chunk_int: u32 = - (chunk[0] as u32).shl(16) | (chunk[1] as u32).shl(8) | (chunk[2] as u32); - // encode 4x 6-bit output bytes - output[output_index] = self.encode_table[chunk_int.shr(18) as usize]; - output[output_index + 1] = - self.encode_table[chunk_int.shr(12_u8).bitand(LOW_SIX_BITS) as usize]; - output[output_index + 2] = - self.encode_table[chunk_int.shr(6_u8).bitand(LOW_SIX_BITS) as usize]; - output[output_index + 3] = - self.encode_table[chunk_int.bitand(LOW_SIX_BITS) as usize]; - - input_index += Self::ENCODE_INPUT_CHUNK_SIZE; - output_index += 4; - } - } - - // then leftovers - if rem == 2 { - let chunk = &input[input_index..input_index + 2]; - - // high six bits of chunk[0] - output[output_index] = self.encode_table[chunk[0].shr(2) as usize]; - // bottom 2 bits of [0], high 4 bits of [1] - output[output_index + 1] = - self.encode_table[(chunk[0].shl(4_u8).bitor(chunk[1].shr(4_u8)) as u32) - .bitand(LOW_SIX_BITS) as usize]; - // bottom 4 bits of [1], with the 2 bottom bits as zero - output[output_index + 2] = - self.encode_table[(chunk[1].shl(2_u8) as u32).bitand(LOW_SIX_BITS) as usize]; - - output_index += 3; - } else if rem == 1 { - let byte = input[input_index]; - output[output_index] = self.encode_table[byte.shr(2) as usize]; - output[output_index + 1] = - self.encode_table[(byte.shl(4_u8) as u32).bitand(LOW_SIX_BITS) as usize]; - output_index += 2; - } - - output_index - } - - fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate { - NaiveEstimate::new(input_len) - } - - fn internal_decode( - &self, - input: &[u8], - output: &mut [u8], - estimate: Self::DecodeEstimate, - ) -> Result { - let complete_nonterminal_quads_len = general_purpose::decode::complete_quads_len( - input, - estimate.rem, - output.len(), - &self.decode_table, - )?; - - const BOTTOM_BYTE: u32 = 0xFF; - - for (chunk_index, chunk) in input[..complete_nonterminal_quads_len] - .chunks_exact(4) - .enumerate() - { - let input_index = chunk_index * 4; - let output_index = chunk_index * 3; - - let decoded_int: u32 = self.decode_byte_into_u32(input_index, chunk[0])?.shl(18) - | self - .decode_byte_into_u32(input_index + 1, chunk[1])? - .shl(12) - | self.decode_byte_into_u32(input_index + 2, chunk[2])?.shl(6) - | self.decode_byte_into_u32(input_index + 3, chunk[3])?; - - output[output_index] = decoded_int.shr(16_u8).bitand(BOTTOM_BYTE) as u8; - output[output_index + 1] = decoded_int.shr(8_u8).bitand(BOTTOM_BYTE) as u8; - output[output_index + 2] = decoded_int.bitand(BOTTOM_BYTE) as u8; - } - - general_purpose::decode_suffix::decode_suffix( - input, - complete_nonterminal_quads_len, - output, - complete_nonterminal_quads_len / 4 * 3, - &self.decode_table, - self.config.decode_allow_trailing_bits, - self.config.decode_padding_mode, - ) - } - - fn config(&self) -> &Self::Config { - &self.config - } -} - -pub struct NaiveEstimate { - /// remainder from dividing input by `Naive::DECODE_CHUNK_SIZE` - rem: usize, - /// Length of input that is in complete `Naive::DECODE_CHUNK_SIZE`-length chunks - complete_chunk_len: usize, -} - -impl NaiveEstimate { - fn new(input_len: usize) -> Self { - let rem = input_len % Naive::DECODE_INPUT_CHUNK_SIZE; - let complete_chunk_len = input_len - rem; - - Self { - rem, - complete_chunk_len, - } - } -} - -impl DecodeEstimate for NaiveEstimate { - fn decoded_len_estimate(&self) -> usize { - ((self.complete_chunk_len / 4) + ((self.rem > 0) as usize)) * 3 - } -} - -#[derive(Clone, Copy, Debug)] -pub struct NaiveConfig { - pub encode_padding: bool, - pub decode_allow_trailing_bits: bool, - pub decode_padding_mode: DecodePaddingMode, -} - -impl Config for NaiveConfig { - fn encode_padding(&self) -> bool { - self.encode_padding - } -} diff --git a/vendor/base64/src/engine/tests.rs b/vendor/base64/src/engine/tests.rs deleted file mode 100644 index 72bbf4bb046d29..00000000000000 --- a/vendor/base64/src/engine/tests.rs +++ /dev/null @@ -1,1579 +0,0 @@ -// rstest_reuse template functions have unused variables -#![allow(unused_variables)] - -use rand::{ - self, - distributions::{self, Distribution as _}, - rngs, Rng as _, SeedableRng as _, -}; -use rstest::rstest; -use rstest_reuse::{apply, template}; -use std::{collections, fmt, io::Read as _}; - -use crate::{ - alphabet::{Alphabet, STANDARD}, - encode::add_padding, - encoded_len, - engine::{ - general_purpose, naive, Config, DecodeEstimate, DecodeMetadata, DecodePaddingMode, Engine, - }, - read::DecoderReader, - tests::{assert_encode_sanity, random_alphabet, random_config}, - DecodeError, DecodeSliceError, PAD_BYTE, -}; - -// the case::foo syntax includes the "foo" in the generated test method names -#[template] -#[rstest(engine_wrapper, -case::general_purpose(GeneralPurposeWrapper {}), -case::naive(NaiveWrapper {}), -case::decoder_reader(DecoderReaderEngineWrapper {}), -)] -fn all_engines(engine_wrapper: E) {} - -/// Some decode tests don't make sense for use with `DecoderReader` as they are difficult to -/// reason about or otherwise inapplicable given how DecoderReader slice up its input along -/// chunk boundaries. -#[template] -#[rstest(engine_wrapper, -case::general_purpose(GeneralPurposeWrapper {}), -case::naive(NaiveWrapper {}), -)] -fn all_engines_except_decoder_reader(engine_wrapper: E) {} - -#[apply(all_engines)] -fn rfc_test_vectors_std_alphabet(engine_wrapper: E) { - let data = vec![ - ("", ""), - ("f", "Zg=="), - ("fo", "Zm8="), - ("foo", "Zm9v"), - ("foob", "Zm9vYg=="), - ("fooba", "Zm9vYmE="), - ("foobar", "Zm9vYmFy"), - ]; - - let engine = E::standard(); - let engine_no_padding = E::standard_unpadded(); - - for (orig, encoded) in &data { - let encoded_without_padding = encoded.trim_end_matches('='); - - // unpadded - { - let mut encode_buf = [0_u8; 8]; - let mut decode_buf = [0_u8; 6]; - - let encode_len = - engine_no_padding.internal_encode(orig.as_bytes(), &mut encode_buf[..]); - assert_eq!( - &encoded_without_padding, - &std::str::from_utf8(&encode_buf[0..encode_len]).unwrap() - ); - let decode_len = engine_no_padding - .decode_slice_unchecked(encoded_without_padding.as_bytes(), &mut decode_buf[..]) - .unwrap(); - assert_eq!(orig.len(), decode_len); - - assert_eq!( - orig, - &std::str::from_utf8(&decode_buf[0..decode_len]).unwrap() - ); - - // if there was any padding originally, the no padding engine won't decode it - if encoded.as_bytes().contains(&PAD_BYTE) { - assert_eq!( - Err(DecodeError::InvalidPadding), - engine_no_padding.decode(encoded) - ) - } - } - - // padded - { - let mut encode_buf = [0_u8; 8]; - let mut decode_buf = [0_u8; 6]; - - let encode_len = engine.internal_encode(orig.as_bytes(), &mut encode_buf[..]); - assert_eq!( - // doesn't have padding added yet - &encoded_without_padding, - &std::str::from_utf8(&encode_buf[0..encode_len]).unwrap() - ); - let pad_len = add_padding(encode_len, &mut encode_buf[encode_len..]); - assert_eq!(encoded.as_bytes(), &encode_buf[..encode_len + pad_len]); - - let decode_len = engine - .decode_slice_unchecked(encoded.as_bytes(), &mut decode_buf[..]) - .unwrap(); - assert_eq!(orig.len(), decode_len); - - assert_eq!( - orig, - &std::str::from_utf8(&decode_buf[0..decode_len]).unwrap() - ); - - // if there was (canonical) padding, and we remove it, the standard engine won't decode - if encoded.as_bytes().contains(&PAD_BYTE) { - assert_eq!( - Err(DecodeError::InvalidPadding), - engine.decode(encoded_without_padding) - ) - } - } - } -} - -#[apply(all_engines)] -fn roundtrip_random(engine_wrapper: E) { - let mut rng = seeded_rng(); - - let mut orig_data = Vec::::new(); - let mut encode_buf = Vec::::new(); - let mut decode_buf = Vec::::new(); - - let len_range = distributions::Uniform::new(1, 1_000); - - for _ in 0..10_000 { - let engine = E::random(&mut rng); - - orig_data.clear(); - encode_buf.clear(); - decode_buf.clear(); - - let (orig_len, _, encoded_len) = generate_random_encoded_data( - &engine, - &mut orig_data, - &mut encode_buf, - &mut rng, - &len_range, - ); - - // exactly the right size - decode_buf.resize(orig_len, 0); - - let dec_len = engine - .decode_slice_unchecked(&encode_buf[0..encoded_len], &mut decode_buf[..]) - .unwrap(); - - assert_eq!(orig_len, dec_len); - assert_eq!(&orig_data[..], &decode_buf[..dec_len]); - } -} - -#[apply(all_engines)] -fn encode_doesnt_write_extra_bytes(engine_wrapper: E) { - let mut rng = seeded_rng(); - - let mut orig_data = Vec::::new(); - let mut encode_buf = Vec::::new(); - let mut encode_buf_backup = Vec::::new(); - - let input_len_range = distributions::Uniform::new(0, 1000); - - for _ in 0..10_000 { - let engine = E::random(&mut rng); - let padded = engine.config().encode_padding(); - - orig_data.clear(); - encode_buf.clear(); - encode_buf_backup.clear(); - - let orig_len = fill_rand(&mut orig_data, &mut rng, &input_len_range); - - let prefix_len = 1024; - // plenty of prefix and suffix - fill_rand_len(&mut encode_buf, &mut rng, prefix_len * 2 + orig_len * 2); - encode_buf_backup.extend_from_slice(&encode_buf[..]); - - let expected_encode_len_no_pad = encoded_len(orig_len, false).unwrap(); - - let encoded_len_no_pad = - engine.internal_encode(&orig_data[..], &mut encode_buf[prefix_len..]); - assert_eq!(expected_encode_len_no_pad, encoded_len_no_pad); - - // no writes past what it claimed to write - assert_eq!(&encode_buf_backup[..prefix_len], &encode_buf[..prefix_len]); - assert_eq!( - &encode_buf_backup[(prefix_len + encoded_len_no_pad)..], - &encode_buf[(prefix_len + encoded_len_no_pad)..] - ); - - let encoded_data = &encode_buf[prefix_len..(prefix_len + encoded_len_no_pad)]; - assert_encode_sanity( - std::str::from_utf8(encoded_data).unwrap(), - // engines don't pad - false, - orig_len, - ); - - // pad so we can decode it in case our random engine requires padding - let pad_len = if padded { - add_padding( - encoded_len_no_pad, - &mut encode_buf[prefix_len + encoded_len_no_pad..], - ) - } else { - 0 - }; - - assert_eq!( - orig_data, - engine - .decode(&encode_buf[prefix_len..(prefix_len + encoded_len_no_pad + pad_len)],) - .unwrap() - ); - } -} - -#[apply(all_engines)] -fn encode_engine_slice_fits_into_precisely_sized_slice(engine_wrapper: E) { - let mut orig_data = Vec::new(); - let mut encoded_data = Vec::new(); - let mut decoded = Vec::new(); - - let input_len_range = distributions::Uniform::new(0, 1000); - - let mut rng = rngs::SmallRng::from_entropy(); - - for _ in 0..10_000 { - orig_data.clear(); - encoded_data.clear(); - decoded.clear(); - - let input_len = input_len_range.sample(&mut rng); - - for _ in 0..input_len { - orig_data.push(rng.gen()); - } - - let engine = E::random(&mut rng); - - let encoded_size = encoded_len(input_len, engine.config().encode_padding()).unwrap(); - - encoded_data.resize(encoded_size, 0); - - assert_eq!( - encoded_size, - engine.encode_slice(&orig_data, &mut encoded_data).unwrap() - ); - - assert_encode_sanity( - std::str::from_utf8(&encoded_data[0..encoded_size]).unwrap(), - engine.config().encode_padding(), - input_len, - ); - - engine - .decode_vec(&encoded_data[0..encoded_size], &mut decoded) - .unwrap(); - assert_eq!(orig_data, decoded); - } -} - -#[apply(all_engines)] -fn decode_doesnt_write_extra_bytes(engine_wrapper: E) -where - E: EngineWrapper, - <::Engine as Engine>::Config: fmt::Debug, -{ - let mut rng = seeded_rng(); - - let mut orig_data = Vec::::new(); - let mut encode_buf = Vec::::new(); - let mut decode_buf = Vec::::new(); - let mut decode_buf_backup = Vec::::new(); - - let len_range = distributions::Uniform::new(1, 1_000); - - for _ in 0..10_000 { - let engine = E::random(&mut rng); - - orig_data.clear(); - encode_buf.clear(); - decode_buf.clear(); - decode_buf_backup.clear(); - - let orig_len = fill_rand(&mut orig_data, &mut rng, &len_range); - encode_buf.resize(orig_len * 2 + 100, 0); - - let encoded_len = engine - .encode_slice(&orig_data[..], &mut encode_buf[..]) - .unwrap(); - encode_buf.truncate(encoded_len); - - // oversize decode buffer so we can easily tell if it writes anything more than - // just the decoded data - let prefix_len = 1024; - // plenty of prefix and suffix - fill_rand_len(&mut decode_buf, &mut rng, prefix_len * 2 + orig_len * 2); - decode_buf_backup.extend_from_slice(&decode_buf[..]); - - let dec_len = engine - .decode_slice_unchecked(&encode_buf, &mut decode_buf[prefix_len..]) - .unwrap(); - - assert_eq!(orig_len, dec_len); - assert_eq!( - &orig_data[..], - &decode_buf[prefix_len..prefix_len + dec_len] - ); - assert_eq!(&decode_buf_backup[..prefix_len], &decode_buf[..prefix_len]); - assert_eq!( - &decode_buf_backup[prefix_len + dec_len..], - &decode_buf[prefix_len + dec_len..] - ); - } -} - -#[apply(all_engines)] -fn decode_detect_invalid_last_symbol(engine_wrapper: E) { - // 0xFF -> "/w==", so all letters > w, 0-9, and '+', '/' should get InvalidLastSymbol - let engine = E::standard(); - - assert_eq!(Ok(vec![0x89, 0x85]), engine.decode("iYU=")); - assert_eq!(Ok(vec![0xFF]), engine.decode("/w==")); - - for (suffix, offset) in vec![ - // suffix, offset of bad byte from start of suffix - ("/x==", 1_usize), - ("/z==", 1_usize), - ("/0==", 1_usize), - ("/9==", 1_usize), - ("/+==", 1_usize), - ("//==", 1_usize), - // trailing 01 - ("iYV=", 2_usize), - // trailing 10 - ("iYW=", 2_usize), - // trailing 11 - ("iYX=", 2_usize), - ] { - for prefix_quads in 0..256 { - let mut encoded = "AAAA".repeat(prefix_quads); - encoded.push_str(suffix); - - assert_eq!( - Err(DecodeError::InvalidLastSymbol( - encoded.len() - 4 + offset, - suffix.as_bytes()[offset], - )), - engine.decode(encoded.as_str()) - ); - } - } -} - -#[apply(all_engines)] -fn decode_detect_1_valid_symbol_in_last_quad_invalid_length(engine_wrapper: E) { - for len in (0_usize..256).map(|len| len * 4 + 1) { - for mode in all_pad_modes() { - let mut input = vec![b'A'; len]; - - let engine = E::standard_with_pad_mode(true, mode); - - assert_eq!(Err(DecodeError::InvalidLength(len)), engine.decode(&input)); - // if we add padding, then the first pad byte in the quad is invalid because it should - // be the second symbol - for _ in 0..3 { - input.push(PAD_BYTE); - assert_eq!( - Err(DecodeError::InvalidByte(len, PAD_BYTE)), - engine.decode(&input) - ); - } - } - } -} - -#[apply(all_engines)] -fn decode_detect_1_invalid_byte_in_last_quad_invalid_byte(engine_wrapper: E) { - for prefix_len in (0_usize..256).map(|len| len * 4) { - for mode in all_pad_modes() { - let mut input = vec![b'A'; prefix_len]; - input.push(b'*'); - - let engine = E::standard_with_pad_mode(true, mode); - - assert_eq!( - Err(DecodeError::InvalidByte(prefix_len, b'*')), - engine.decode(&input) - ); - // adding padding doesn't matter - for _ in 0..3 { - input.push(PAD_BYTE); - assert_eq!( - Err(DecodeError::InvalidByte(prefix_len, b'*')), - engine.decode(&input) - ); - } - } - } -} - -#[apply(all_engines)] -fn decode_detect_invalid_last_symbol_every_possible_two_symbols( - engine_wrapper: E, -) { - let engine = E::standard(); - - let mut base64_to_bytes = collections::HashMap::new(); - - for b in 0_u8..=255 { - let mut b64 = vec![0_u8; 4]; - assert_eq!(2, engine.internal_encode(&[b], &mut b64[..])); - let _ = add_padding(2, &mut b64[2..]); - - assert!(base64_to_bytes.insert(b64, vec![b]).is_none()); - } - - // every possible combination of trailing symbols must either decode to 1 byte or get InvalidLastSymbol, with or without any leading chunks - - let mut prefix = Vec::new(); - for _ in 0..256 { - let mut clone = prefix.clone(); - - let mut symbols = [0_u8; 4]; - for &s1 in STANDARD.symbols.iter() { - symbols[0] = s1; - for &s2 in STANDARD.symbols.iter() { - symbols[1] = s2; - symbols[2] = PAD_BYTE; - symbols[3] = PAD_BYTE; - - // chop off previous symbols - clone.truncate(prefix.len()); - clone.extend_from_slice(&symbols[..]); - let decoded_prefix_len = prefix.len() / 4 * 3; - - match base64_to_bytes.get(&symbols[..]) { - Some(bytes) => { - let res = engine - .decode(&clone) - // remove prefix - .map(|decoded| decoded[decoded_prefix_len..].to_vec()); - - assert_eq!(Ok(bytes.clone()), res); - } - None => assert_eq!( - Err(DecodeError::InvalidLastSymbol(1, s2)), - engine.decode(&symbols[..]) - ), - } - } - } - - prefix.extend_from_slice(b"AAAA"); - } -} - -#[apply(all_engines)] -fn decode_detect_invalid_last_symbol_every_possible_three_symbols( - engine_wrapper: E, -) { - let engine = E::standard(); - - let mut base64_to_bytes = collections::HashMap::new(); - - let mut bytes = [0_u8; 2]; - for b1 in 0_u8..=255 { - bytes[0] = b1; - for b2 in 0_u8..=255 { - bytes[1] = b2; - let mut b64 = vec![0_u8; 4]; - assert_eq!(3, engine.internal_encode(&bytes, &mut b64[..])); - let _ = add_padding(3, &mut b64[3..]); - - let mut v = Vec::with_capacity(2); - v.extend_from_slice(&bytes[..]); - - assert!(base64_to_bytes.insert(b64, v).is_none()); - } - } - - // every possible combination of symbols must either decode to 2 bytes or get InvalidLastSymbol, with or without any leading chunks - - let mut prefix = Vec::new(); - let mut input = Vec::new(); - for _ in 0..256 { - input.clear(); - input.extend_from_slice(&prefix); - - let mut symbols = [0_u8; 4]; - for &s1 in STANDARD.symbols.iter() { - symbols[0] = s1; - for &s2 in STANDARD.symbols.iter() { - symbols[1] = s2; - for &s3 in STANDARD.symbols.iter() { - symbols[2] = s3; - symbols[3] = PAD_BYTE; - - // chop off previous symbols - input.truncate(prefix.len()); - input.extend_from_slice(&symbols[..]); - let decoded_prefix_len = prefix.len() / 4 * 3; - - match base64_to_bytes.get(&symbols[..]) { - Some(bytes) => { - let res = engine - .decode(&input) - // remove prefix - .map(|decoded| decoded[decoded_prefix_len..].to_vec()); - - assert_eq!(Ok(bytes.clone()), res); - } - None => assert_eq!( - Err(DecodeError::InvalidLastSymbol(2, s3)), - engine.decode(&symbols[..]) - ), - } - } - } - } - prefix.extend_from_slice(b"AAAA"); - } -} - -#[apply(all_engines)] -fn decode_invalid_trailing_bits_ignored_when_configured(engine_wrapper: E) { - let strict = E::standard(); - let forgiving = E::standard_allow_trailing_bits(); - - fn assert_tolerant_decode( - engine: &E, - input: &mut String, - b64_prefix_len: usize, - expected_decode_bytes: Vec, - data: &str, - ) { - let prefixed = prefixed_data(input, b64_prefix_len, data); - let decoded = engine.decode(prefixed); - // prefix is always complete chunks - let decoded_prefix_len = b64_prefix_len / 4 * 3; - assert_eq!( - Ok(expected_decode_bytes), - decoded.map(|v| v[decoded_prefix_len..].to_vec()) - ); - } - - let mut prefix = String::new(); - for _ in 0..256 { - let mut input = prefix.clone(); - - // example from https://github.com/marshallpierce/rust-base64/issues/75 - assert!(strict - .decode(prefixed_data(&mut input, prefix.len(), "/w==")) - .is_ok()); - assert!(strict - .decode(prefixed_data(&mut input, prefix.len(), "iYU=")) - .is_ok()); - // trailing 01 - assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![255], "/x=="); - assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![137, 133], "iYV="); - // trailing 10 - assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![255], "/y=="); - assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![137, 133], "iYW="); - // trailing 11 - assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![255], "/z=="); - assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![137, 133], "iYX="); - - prefix.push_str("AAAA"); - } -} - -#[apply(all_engines)] -fn decode_invalid_byte_error(engine_wrapper: E) { - let mut rng = seeded_rng(); - - let mut orig_data = Vec::::new(); - let mut encode_buf = Vec::::new(); - let mut decode_buf = Vec::::new(); - - let len_range = distributions::Uniform::new(1, 1_000); - - for _ in 0..100_000 { - let alphabet = random_alphabet(&mut rng); - let engine = E::random_alphabet(&mut rng, alphabet); - - orig_data.clear(); - encode_buf.clear(); - decode_buf.clear(); - - let (orig_len, encoded_len_just_data, encoded_len_with_padding) = - generate_random_encoded_data( - &engine, - &mut orig_data, - &mut encode_buf, - &mut rng, - &len_range, - ); - - // exactly the right size - decode_buf.resize(orig_len, 0); - - // replace one encoded byte with an invalid byte - let invalid_byte: u8 = loop { - let byte: u8 = rng.gen(); - - if alphabet.symbols.contains(&byte) || byte == PAD_BYTE { - continue; - } else { - break byte; - } - }; - - let invalid_range = distributions::Uniform::new(0, orig_len); - let invalid_index = invalid_range.sample(&mut rng); - encode_buf[invalid_index] = invalid_byte; - - assert_eq!( - Err(DecodeError::InvalidByte(invalid_index, invalid_byte)), - engine.decode_slice_unchecked( - &encode_buf[0..encoded_len_with_padding], - &mut decode_buf[..], - ) - ); - } -} - -/// Any amount of padding anywhere before the final non padding character = invalid byte at first -/// pad byte. -/// From this and [decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad_non_canonical_padding_suffix_all_modes], -/// we know padding must extend contiguously to the end of the input. -#[apply(all_engines)] -fn decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad_all_modes< - E: EngineWrapper, ->( - engine_wrapper: E, -) { - // Different amounts of padding, w/ offset from end for the last non-padding char. - // Only canonical padding, so Canonical mode will work. - let suffixes = &[("AA==", 2), ("AAA=", 1), ("AAAA", 0)]; - - for mode in pad_modes_allowing_padding() { - // We don't encode, so we don't care about encode padding. - let engine = E::standard_with_pad_mode(true, mode); - - decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad( - engine, - suffixes.as_slice(), - ); - } -} - -/// See [decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad_all_modes] -#[apply(all_engines)] -fn decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad_non_canonical_padding_suffix< - E: EngineWrapper, ->( - engine_wrapper: E, -) { - // Different amounts of padding, w/ offset from end for the last non-padding char, and - // non-canonical padding. - let suffixes = [ - ("AA==", 2), - ("AA=", 1), - ("AA", 0), - ("AAA=", 1), - ("AAA", 0), - ("AAAA", 0), - ]; - - // We don't encode, so we don't care about encode padding. - // Decoding is indifferent so that we don't get caught by missing padding on the last quad - let engine = E::standard_with_pad_mode(true, DecodePaddingMode::Indifferent); - - decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad( - engine, - suffixes.as_slice(), - ) -} - -fn decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad( - engine: impl Engine, - suffixes: &[(&str, usize)], -) { - let mut rng = seeded_rng(); - - let prefix_quads_range = distributions::Uniform::from(0..=256); - - for _ in 0..100_000 { - for (suffix, suffix_offset) in suffixes.iter() { - let mut s = "AAAA".repeat(prefix_quads_range.sample(&mut rng)); - s.push_str(suffix); - let mut encoded = s.into_bytes(); - - // calculate a range to write padding into that leaves at least one non padding char - let last_non_padding_offset = encoded.len() - 1 - suffix_offset; - - // don't include last non padding char as it must stay not padding - let padding_end = rng.gen_range(0..last_non_padding_offset); - - // don't use more than 100 bytes of padding, but also use shorter lengths when - // padding_end is near the start of the encoded data to avoid biasing to padding - // the entire prefix on short lengths - let padding_len = rng.gen_range(1..=usize::min(100, padding_end + 1)); - let padding_start = padding_end.saturating_sub(padding_len); - - encoded[padding_start..=padding_end].fill(PAD_BYTE); - - // should still have non-padding before any final padding - assert_ne!(PAD_BYTE, encoded[last_non_padding_offset]); - assert_eq!( - Err(DecodeError::InvalidByte(padding_start, PAD_BYTE)), - engine.decode(&encoded), - "len: {}, input: {}", - encoded.len(), - String::from_utf8(encoded).unwrap() - ); - } - } -} - -/// Any amount of padding before final chunk that crosses over into final chunk with 1-4 bytes = -/// invalid byte at first pad byte. -/// From this we know the padding must start in the final chunk. -#[apply(all_engines)] -fn decode_padding_starts_before_final_chunk_error_invalid_byte_at_first_pad( - engine_wrapper: E, -) { - let mut rng = seeded_rng(); - - // must have at least one prefix quad - let prefix_quads_range = distributions::Uniform::from(1..256); - let suffix_pad_len_range = distributions::Uniform::from(1..=4); - // don't use no-padding mode, as the reader decode might decode a block that ends with - // valid padding, which should then be referenced when encountering the later invalid byte - for mode in pad_modes_allowing_padding() { - // we don't encode so we don't care about encode padding - let engine = E::standard_with_pad_mode(true, mode); - for _ in 0..100_000 { - let suffix_len = suffix_pad_len_range.sample(&mut rng); - // all 0 bits so we don't hit InvalidLastSymbol with the reader decoder - let mut encoded = "AAAA" - .repeat(prefix_quads_range.sample(&mut rng)) - .into_bytes(); - encoded.resize(encoded.len() + suffix_len, PAD_BYTE); - - // amount of padding must be long enough to extend back from suffix into previous - // quads - let padding_len = rng.gen_range(suffix_len + 1..encoded.len()); - // no non-padding after padding in this test, so padding goes to the end - let padding_start = encoded.len() - padding_len; - encoded[padding_start..].fill(PAD_BYTE); - - assert_eq!( - Err(DecodeError::InvalidByte(padding_start, PAD_BYTE)), - engine.decode(&encoded), - "suffix_len: {}, padding_len: {}, b64: {}", - suffix_len, - padding_len, - std::str::from_utf8(&encoded).unwrap() - ); - } - } -} - -/// 0-1 bytes of data before any amount of padding in final chunk = invalid byte, since padding -/// is not valid data (consistent with error for pad bytes in earlier chunks). -/// From this we know there must be 2-3 bytes of data before padding -#[apply(all_engines)] -fn decode_too_little_data_before_padding_error_invalid_byte(engine_wrapper: E) { - let mut rng = seeded_rng(); - - // want to test no prefix quad case, so start at 0 - let prefix_quads_range = distributions::Uniform::from(0_usize..256); - let suffix_data_len_range = distributions::Uniform::from(0_usize..=1); - for mode in all_pad_modes() { - // we don't encode so we don't care about encode padding - let engine = E::standard_with_pad_mode(true, mode); - for _ in 0..100_000 { - let suffix_data_len = suffix_data_len_range.sample(&mut rng); - let prefix_quad_len = prefix_quads_range.sample(&mut rng); - - // for all possible padding lengths - for padding_len in 1..=(4 - suffix_data_len) { - let mut encoded = "ABCD".repeat(prefix_quad_len).into_bytes(); - encoded.resize(encoded.len() + suffix_data_len, b'A'); - encoded.resize(encoded.len() + padding_len, PAD_BYTE); - - assert_eq!( - Err(DecodeError::InvalidByte( - prefix_quad_len * 4 + suffix_data_len, - PAD_BYTE, - )), - engine.decode(&encoded), - "input {} suffix data len {} pad len {}", - String::from_utf8(encoded).unwrap(), - suffix_data_len, - padding_len - ); - } - } - } -} - -// https://eprint.iacr.org/2022/361.pdf table 2, test 1 -#[apply(all_engines)] -fn decode_malleability_test_case_3_byte_suffix_valid(engine_wrapper: E) { - assert_eq!( - b"Hello".as_slice(), - &E::standard().decode("SGVsbG8=").unwrap() - ); -} - -// https://eprint.iacr.org/2022/361.pdf table 2, test 2 -#[apply(all_engines)] -fn decode_malleability_test_case_3_byte_suffix_invalid_trailing_symbol( - engine_wrapper: E, -) { - assert_eq!( - DecodeError::InvalidLastSymbol(6, 0x39), - E::standard().decode("SGVsbG9=").unwrap_err() - ); -} - -// https://eprint.iacr.org/2022/361.pdf table 2, test 3 -#[apply(all_engines)] -fn decode_malleability_test_case_3_byte_suffix_no_padding(engine_wrapper: E) { - assert_eq!( - DecodeError::InvalidPadding, - E::standard().decode("SGVsbG9").unwrap_err() - ); -} - -// https://eprint.iacr.org/2022/361.pdf table 2, test 4 -#[apply(all_engines)] -fn decode_malleability_test_case_2_byte_suffix_valid_two_padding_symbols( - engine_wrapper: E, -) { - assert_eq!( - b"Hell".as_slice(), - &E::standard().decode("SGVsbA==").unwrap() - ); -} - -// https://eprint.iacr.org/2022/361.pdf table 2, test 5 -#[apply(all_engines)] -fn decode_malleability_test_case_2_byte_suffix_short_padding(engine_wrapper: E) { - assert_eq!( - DecodeError::InvalidPadding, - E::standard().decode("SGVsbA=").unwrap_err() - ); -} - -// https://eprint.iacr.org/2022/361.pdf table 2, test 6 -#[apply(all_engines)] -fn decode_malleability_test_case_2_byte_suffix_no_padding(engine_wrapper: E) { - assert_eq!( - DecodeError::InvalidPadding, - E::standard().decode("SGVsbA").unwrap_err() - ); -} - -// https://eprint.iacr.org/2022/361.pdf table 2, test 7 -// DecoderReader pseudo-engine gets InvalidByte at 8 (extra padding) since it decodes the first -// two complete quads correctly. -#[apply(all_engines_except_decoder_reader)] -fn decode_malleability_test_case_2_byte_suffix_too_much_padding( - engine_wrapper: E, -) { - assert_eq!( - DecodeError::InvalidByte(6, PAD_BYTE), - E::standard().decode("SGVsbA====").unwrap_err() - ); -} - -/// Requires canonical padding -> accepts 2 + 2, 3 + 1, 4 + 0 final quad configurations -#[apply(all_engines)] -fn decode_pad_mode_requires_canonical_accepts_canonical(engine_wrapper: E) { - assert_all_suffixes_ok( - E::standard_with_pad_mode(true, DecodePaddingMode::RequireCanonical), - vec!["/w==", "iYU=", "AAAA"], - ); -} - -/// Requires canonical padding -> rejects 2 + 0-1, 3 + 0 final chunk configurations -#[apply(all_engines)] -fn decode_pad_mode_requires_canonical_rejects_non_canonical(engine_wrapper: E) { - let engine = E::standard_with_pad_mode(true, DecodePaddingMode::RequireCanonical); - - let suffixes = ["/w", "/w=", "iYU"]; - for num_prefix_quads in 0..256 { - for &suffix in suffixes.iter() { - let mut encoded = "AAAA".repeat(num_prefix_quads); - encoded.push_str(suffix); - - let res = engine.decode(&encoded); - - assert_eq!(Err(DecodeError::InvalidPadding), res); - } - } -} - -/// Requires no padding -> accepts 2 + 0, 3 + 0, 4 + 0 final chunk configuration -#[apply(all_engines)] -fn decode_pad_mode_requires_no_padding_accepts_no_padding(engine_wrapper: E) { - assert_all_suffixes_ok( - E::standard_with_pad_mode(true, DecodePaddingMode::RequireNone), - vec!["/w", "iYU", "AAAA"], - ); -} - -/// Requires no padding -> rejects 2 + 1-2, 3 + 1 final chunk configuration -#[apply(all_engines)] -fn decode_pad_mode_requires_no_padding_rejects_any_padding(engine_wrapper: E) { - let engine = E::standard_with_pad_mode(true, DecodePaddingMode::RequireNone); - - let suffixes = ["/w=", "/w==", "iYU="]; - for num_prefix_quads in 0..256 { - for &suffix in suffixes.iter() { - let mut encoded = "AAAA".repeat(num_prefix_quads); - encoded.push_str(suffix); - - let res = engine.decode(&encoded); - - assert_eq!(Err(DecodeError::InvalidPadding), res); - } - } -} - -/// Indifferent padding accepts 2 + 0-2, 3 + 0-1, 4 + 0 final chunk configuration -#[apply(all_engines)] -fn decode_pad_mode_indifferent_padding_accepts_anything(engine_wrapper: E) { - assert_all_suffixes_ok( - E::standard_with_pad_mode(true, DecodePaddingMode::Indifferent), - vec!["/w", "/w=", "/w==", "iYU", "iYU=", "AAAA"], - ); -} - -/// 1 trailing byte that's not padding is detected as invalid byte even though there's padding -/// in the middle of the input. This is essentially mandating the eager check for 1 trailing byte -/// to catch the \n suffix case. -// DecoderReader pseudo-engine can't handle DecodePaddingMode::RequireNone since it will decode -// a complete quad with padding in it before encountering the stray byte that makes it an invalid -// length -#[apply(all_engines_except_decoder_reader)] -fn decode_invalid_trailing_bytes_all_pad_modes_invalid_byte(engine_wrapper: E) { - for mode in all_pad_modes() { - do_invalid_trailing_byte(E::standard_with_pad_mode(true, mode), mode); - } -} - -#[apply(all_engines)] -fn decode_invalid_trailing_bytes_invalid_byte(engine_wrapper: E) { - // excluding no padding mode because the DecoderWrapper pseudo-engine will fail with - // InvalidPadding because it will decode the last complete quad with padding first - for mode in pad_modes_allowing_padding() { - do_invalid_trailing_byte(E::standard_with_pad_mode(true, mode), mode); - } -} -fn do_invalid_trailing_byte(engine: impl Engine, mode: DecodePaddingMode) { - for last_byte in [b'*', b'\n'] { - for num_prefix_quads in 0..256 { - let mut s: String = "ABCD".repeat(num_prefix_quads); - s.push_str("Cg=="); - let mut input = s.into_bytes(); - input.push(last_byte); - - // The case of trailing newlines is common enough to warrant a test for a good error - // message. - assert_eq!( - Err(DecodeError::InvalidByte( - num_prefix_quads * 4 + 4, - last_byte - )), - engine.decode(&input), - "mode: {:?}, input: {}", - mode, - String::from_utf8(input).unwrap() - ); - } - } -} - -/// When there's 1 trailing byte, but it's padding, it's only InvalidByte if there isn't padding -/// earlier. -#[apply(all_engines)] -fn decode_invalid_trailing_padding_as_invalid_byte_at_first_pad_byte( - engine_wrapper: E, -) { - // excluding no padding mode because the DecoderWrapper pseudo-engine will fail with - // InvalidPadding because it will decode the last complete quad with padding first - for mode in pad_modes_allowing_padding() { - do_invalid_trailing_padding_as_invalid_byte_at_first_padding( - E::standard_with_pad_mode(true, mode), - mode, - ); - } -} - -// DecoderReader pseudo-engine can't handle DecodePaddingMode::RequireNone since it will decode -// a complete quad with padding in it before encountering the stray byte that makes it an invalid -// length -#[apply(all_engines_except_decoder_reader)] -fn decode_invalid_trailing_padding_as_invalid_byte_at_first_byte_all_modes( - engine_wrapper: E, -) { - for mode in all_pad_modes() { - do_invalid_trailing_padding_as_invalid_byte_at_first_padding( - E::standard_with_pad_mode(true, mode), - mode, - ); - } -} -fn do_invalid_trailing_padding_as_invalid_byte_at_first_padding( - engine: impl Engine, - mode: DecodePaddingMode, -) { - for num_prefix_quads in 0..256 { - for (suffix, pad_offset) in [("AA===", 2), ("AAA==", 3), ("AAAA=", 4)] { - let mut s: String = "ABCD".repeat(num_prefix_quads); - s.push_str(suffix); - - assert_eq!( - // pad after `g`, not the last one - Err(DecodeError::InvalidByte( - num_prefix_quads * 4 + pad_offset, - PAD_BYTE - )), - engine.decode(&s), - "mode: {:?}, input: {}", - mode, - s - ); - } - } -} - -#[apply(all_engines)] -fn decode_into_slice_fits_in_precisely_sized_slice(engine_wrapper: E) { - let mut orig_data = Vec::new(); - let mut encoded_data = String::new(); - let mut decode_buf = Vec::new(); - - let input_len_range = distributions::Uniform::new(0, 1000); - let mut rng = rngs::SmallRng::from_entropy(); - - for _ in 0..10_000 { - orig_data.clear(); - encoded_data.clear(); - decode_buf.clear(); - - let input_len = input_len_range.sample(&mut rng); - - for _ in 0..input_len { - orig_data.push(rng.gen()); - } - - let engine = E::random(&mut rng); - engine.encode_string(&orig_data, &mut encoded_data); - assert_encode_sanity(&encoded_data, engine.config().encode_padding(), input_len); - - decode_buf.resize(input_len, 0); - // decode into the non-empty buf - let decode_bytes_written = engine - .decode_slice_unchecked(encoded_data.as_bytes(), &mut decode_buf[..]) - .unwrap(); - assert_eq!(orig_data.len(), decode_bytes_written); - assert_eq!(orig_data, decode_buf); - - // same for checked variant - decode_buf.clear(); - decode_buf.resize(input_len, 0); - // decode into the non-empty buf - let decode_bytes_written = engine - .decode_slice(encoded_data.as_bytes(), &mut decode_buf[..]) - .unwrap(); - assert_eq!(orig_data.len(), decode_bytes_written); - assert_eq!(orig_data, decode_buf); - } -} - -#[apply(all_engines)] -fn inner_decode_reports_padding_position(engine_wrapper: E) { - let mut b64 = String::new(); - let mut decoded = Vec::new(); - let engine = E::standard(); - - for pad_position in 1..10_000 { - b64.clear(); - decoded.clear(); - // plenty of room for original data - decoded.resize(pad_position, 0); - - for _ in 0..pad_position { - b64.push('A'); - } - // finish the quad with padding - for _ in 0..(4 - (pad_position % 4)) { - b64.push('='); - } - - let decode_res = engine.internal_decode( - b64.as_bytes(), - &mut decoded[..], - engine.internal_decoded_len_estimate(b64.len()), - ); - if pad_position % 4 < 2 { - // impossible padding - assert_eq!( - Err(DecodeSliceError::DecodeError(DecodeError::InvalidByte( - pad_position, - PAD_BYTE - ))), - decode_res - ); - } else { - let decoded_bytes = pad_position / 4 * 3 - + match pad_position % 4 { - 0 => 0, - 2 => 1, - 3 => 2, - _ => unreachable!(), - }; - assert_eq!( - Ok(DecodeMetadata::new(decoded_bytes, Some(pad_position))), - decode_res - ); - } - } -} - -#[apply(all_engines)] -fn decode_length_estimate_delta(engine_wrapper: E) { - for engine in [E::standard(), E::standard_unpadded()] { - for &padding in &[true, false] { - for orig_len in 0..1000 { - let encoded_len = encoded_len(orig_len, padding).unwrap(); - - let decoded_estimate = engine - .internal_decoded_len_estimate(encoded_len) - .decoded_len_estimate(); - assert!(decoded_estimate >= orig_len); - assert!( - decoded_estimate - orig_len < 3, - "estimate: {}, encoded: {}, orig: {}", - decoded_estimate, - encoded_len, - orig_len - ); - } - } - } -} - -#[apply(all_engines)] -fn estimate_via_u128_inflation(engine_wrapper: E) { - // cover both ends of usize - (0..1000) - .chain(usize::MAX - 1000..=usize::MAX) - .for_each(|encoded_len| { - // inflate to 128 bit type to be able to safely use the easy formulas - let len_128 = encoded_len as u128; - - let estimate = E::standard() - .internal_decoded_len_estimate(encoded_len) - .decoded_len_estimate(); - - // This check is a little too strict: it requires using the (len + 3) / 4 * 3 formula - // or equivalent, but until other engines come along that use a different formula - // requiring that we think more carefully about what the allowable criteria are, this - // will do. - assert_eq!( - ((len_128 + 3) / 4 * 3) as usize, - estimate, - "enc len {}", - encoded_len - ); - }) -} - -#[apply(all_engines)] -fn decode_slice_checked_fails_gracefully_at_all_output_lengths( - engine_wrapper: E, -) { - let mut rng = seeded_rng(); - for original_len in 0..1000 { - let mut original = vec![0; original_len]; - rng.fill(&mut original[..]); - - for mode in all_pad_modes() { - let engine = E::standard_with_pad_mode( - match mode { - DecodePaddingMode::Indifferent | DecodePaddingMode::RequireCanonical => true, - DecodePaddingMode::RequireNone => false, - }, - mode, - ); - - let encoded = engine.encode(&original); - let mut decode_buf = Vec::with_capacity(original_len); - for decode_buf_len in 0..original_len { - decode_buf.resize(decode_buf_len, 0); - assert_eq!( - DecodeSliceError::OutputSliceTooSmall, - engine - .decode_slice(&encoded, &mut decode_buf[..]) - .unwrap_err(), - "original len: {}, encoded len: {}, buf len: {}, mode: {:?}", - original_len, - encoded.len(), - decode_buf_len, - mode - ); - // internal method works the same - assert_eq!( - DecodeSliceError::OutputSliceTooSmall, - engine - .internal_decode( - encoded.as_bytes(), - &mut decode_buf[..], - engine.internal_decoded_len_estimate(encoded.len()) - ) - .unwrap_err() - ); - } - - decode_buf.resize(original_len, 0); - rng.fill(&mut decode_buf[..]); - assert_eq!( - original_len, - engine.decode_slice(&encoded, &mut decode_buf[..]).unwrap() - ); - assert_eq!(original, decode_buf); - } - } -} - -/// Returns a tuple of the original data length, the encoded data length (just data), and the length including padding. -/// -/// Vecs provided should be empty. -fn generate_random_encoded_data>( - engine: &E, - orig_data: &mut Vec, - encode_buf: &mut Vec, - rng: &mut R, - length_distribution: &D, -) -> (usize, usize, usize) { - let padding: bool = engine.config().encode_padding(); - - let orig_len = fill_rand(orig_data, rng, length_distribution); - let expected_encoded_len = encoded_len(orig_len, padding).unwrap(); - encode_buf.resize(expected_encoded_len, 0); - - let base_encoded_len = engine.internal_encode(&orig_data[..], &mut encode_buf[..]); - - let enc_len_with_padding = if padding { - base_encoded_len + add_padding(base_encoded_len, &mut encode_buf[base_encoded_len..]) - } else { - base_encoded_len - }; - - assert_eq!(expected_encoded_len, enc_len_with_padding); - - (orig_len, base_encoded_len, enc_len_with_padding) -} - -// fill to a random length -fn fill_rand>( - vec: &mut Vec, - rng: &mut R, - length_distribution: &D, -) -> usize { - let len = length_distribution.sample(rng); - for _ in 0..len { - vec.push(rng.gen()); - } - - len -} - -fn fill_rand_len(vec: &mut Vec, rng: &mut R, len: usize) { - for _ in 0..len { - vec.push(rng.gen()); - } -} - -fn prefixed_data<'i>(input_with_prefix: &'i mut String, prefix_len: usize, data: &str) -> &'i str { - input_with_prefix.truncate(prefix_len); - input_with_prefix.push_str(data); - input_with_prefix.as_str() -} - -/// A wrapper to make using engines in rstest fixtures easier. -/// The functions don't need to be instance methods, but rstest does seem -/// to want an instance, so instances are passed to test functions and then ignored. -trait EngineWrapper { - type Engine: Engine; - - /// Return an engine configured for RFC standard base64 - fn standard() -> Self::Engine; - - /// Return an engine configured for RFC standard base64, except with no padding appended on - /// encode, and required no padding on decode. - fn standard_unpadded() -> Self::Engine; - - /// Return an engine configured for RFC standard alphabet with the provided encode and decode - /// pad settings - fn standard_with_pad_mode(encode_pad: bool, decode_pad_mode: DecodePaddingMode) - -> Self::Engine; - - /// Return an engine configured for RFC standard base64 that allows invalid trailing bits - fn standard_allow_trailing_bits() -> Self::Engine; - - /// Return an engine configured with a randomized alphabet and config - fn random(rng: &mut R) -> Self::Engine; - - /// Return an engine configured with the specified alphabet and randomized config - fn random_alphabet(rng: &mut R, alphabet: &Alphabet) -> Self::Engine; -} - -struct GeneralPurposeWrapper {} - -impl EngineWrapper for GeneralPurposeWrapper { - type Engine = general_purpose::GeneralPurpose; - - fn standard() -> Self::Engine { - general_purpose::GeneralPurpose::new(&STANDARD, general_purpose::PAD) - } - - fn standard_unpadded() -> Self::Engine { - general_purpose::GeneralPurpose::new(&STANDARD, general_purpose::NO_PAD) - } - - fn standard_with_pad_mode( - encode_pad: bool, - decode_pad_mode: DecodePaddingMode, - ) -> Self::Engine { - general_purpose::GeneralPurpose::new( - &STANDARD, - general_purpose::GeneralPurposeConfig::new() - .with_encode_padding(encode_pad) - .with_decode_padding_mode(decode_pad_mode), - ) - } - - fn standard_allow_trailing_bits() -> Self::Engine { - general_purpose::GeneralPurpose::new( - &STANDARD, - general_purpose::GeneralPurposeConfig::new().with_decode_allow_trailing_bits(true), - ) - } - - fn random(rng: &mut R) -> Self::Engine { - let alphabet = random_alphabet(rng); - - Self::random_alphabet(rng, alphabet) - } - - fn random_alphabet(rng: &mut R, alphabet: &Alphabet) -> Self::Engine { - general_purpose::GeneralPurpose::new(alphabet, random_config(rng)) - } -} - -struct NaiveWrapper {} - -impl EngineWrapper for NaiveWrapper { - type Engine = naive::Naive; - - fn standard() -> Self::Engine { - naive::Naive::new( - &STANDARD, - naive::NaiveConfig { - encode_padding: true, - decode_allow_trailing_bits: false, - decode_padding_mode: DecodePaddingMode::RequireCanonical, - }, - ) - } - - fn standard_unpadded() -> Self::Engine { - naive::Naive::new( - &STANDARD, - naive::NaiveConfig { - encode_padding: false, - decode_allow_trailing_bits: false, - decode_padding_mode: DecodePaddingMode::RequireNone, - }, - ) - } - - fn standard_with_pad_mode( - encode_pad: bool, - decode_pad_mode: DecodePaddingMode, - ) -> Self::Engine { - naive::Naive::new( - &STANDARD, - naive::NaiveConfig { - encode_padding: encode_pad, - decode_allow_trailing_bits: false, - decode_padding_mode: decode_pad_mode, - }, - ) - } - - fn standard_allow_trailing_bits() -> Self::Engine { - naive::Naive::new( - &STANDARD, - naive::NaiveConfig { - encode_padding: true, - decode_allow_trailing_bits: true, - decode_padding_mode: DecodePaddingMode::RequireCanonical, - }, - ) - } - - fn random(rng: &mut R) -> Self::Engine { - let alphabet = random_alphabet(rng); - - Self::random_alphabet(rng, alphabet) - } - - fn random_alphabet(rng: &mut R, alphabet: &Alphabet) -> Self::Engine { - let mode = rng.gen(); - - let config = naive::NaiveConfig { - encode_padding: match mode { - DecodePaddingMode::Indifferent => rng.gen(), - DecodePaddingMode::RequireCanonical => true, - DecodePaddingMode::RequireNone => false, - }, - decode_allow_trailing_bits: rng.gen(), - decode_padding_mode: mode, - }; - - naive::Naive::new(alphabet, config) - } -} - -/// A pseudo-Engine that routes all decoding through [DecoderReader] -struct DecoderReaderEngine { - engine: E, -} - -impl From for DecoderReaderEngine { - fn from(value: E) -> Self { - Self { engine: value } - } -} - -impl Engine for DecoderReaderEngine { - type Config = E::Config; - type DecodeEstimate = E::DecodeEstimate; - - fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize { - self.engine.internal_encode(input, output) - } - - fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate { - self.engine.internal_decoded_len_estimate(input_len) - } - - fn internal_decode( - &self, - input: &[u8], - output: &mut [u8], - decode_estimate: Self::DecodeEstimate, - ) -> Result { - let mut reader = DecoderReader::new(input, &self.engine); - let mut buf = vec![0; input.len()]; - // to avoid effects like not detecting invalid length due to progressively growing - // the output buffer in read_to_end etc, read into a big enough buffer in one go - // to make behavior more consistent with normal engines - let _ = reader - .read(&mut buf) - .and_then(|len| { - buf.truncate(len); - // make sure we got everything - reader.read_to_end(&mut buf) - }) - .map_err(|io_error| { - *io_error - .into_inner() - .and_then(|inner| inner.downcast::().ok()) - .unwrap() - })?; - if output.len() < buf.len() { - return Err(DecodeSliceError::OutputSliceTooSmall); - } - output[..buf.len()].copy_from_slice(&buf); - Ok(DecodeMetadata::new( - buf.len(), - input - .iter() - .enumerate() - .filter(|(_offset, byte)| **byte == PAD_BYTE) - .map(|(offset, _byte)| offset) - .next(), - )) - } - - fn config(&self) -> &Self::Config { - self.engine.config() - } -} - -struct DecoderReaderEngineWrapper {} - -impl EngineWrapper for DecoderReaderEngineWrapper { - type Engine = DecoderReaderEngine; - - fn standard() -> Self::Engine { - GeneralPurposeWrapper::standard().into() - } - - fn standard_unpadded() -> Self::Engine { - GeneralPurposeWrapper::standard_unpadded().into() - } - - fn standard_with_pad_mode( - encode_pad: bool, - decode_pad_mode: DecodePaddingMode, - ) -> Self::Engine { - GeneralPurposeWrapper::standard_with_pad_mode(encode_pad, decode_pad_mode).into() - } - - fn standard_allow_trailing_bits() -> Self::Engine { - GeneralPurposeWrapper::standard_allow_trailing_bits().into() - } - - fn random(rng: &mut R) -> Self::Engine { - GeneralPurposeWrapper::random(rng).into() - } - - fn random_alphabet(rng: &mut R, alphabet: &Alphabet) -> Self::Engine { - GeneralPurposeWrapper::random_alphabet(rng, alphabet).into() - } -} - -fn seeded_rng() -> impl rand::Rng { - rngs::SmallRng::from_entropy() -} - -fn all_pad_modes() -> Vec { - vec![ - DecodePaddingMode::Indifferent, - DecodePaddingMode::RequireCanonical, - DecodePaddingMode::RequireNone, - ] -} - -fn pad_modes_allowing_padding() -> Vec { - vec![ - DecodePaddingMode::Indifferent, - DecodePaddingMode::RequireCanonical, - ] -} - -fn assert_all_suffixes_ok(engine: E, suffixes: Vec<&str>) { - for num_prefix_quads in 0..256 { - for &suffix in suffixes.iter() { - let mut encoded = "AAAA".repeat(num_prefix_quads); - encoded.push_str(suffix); - - let res = &engine.decode(&encoded); - assert!(res.is_ok()); - } - } -} diff --git a/vendor/base64/src/lib.rs b/vendor/base64/src/lib.rs deleted file mode 100644 index 579a7225cb75ea..00000000000000 --- a/vendor/base64/src/lib.rs +++ /dev/null @@ -1,277 +0,0 @@ -//! Correct, fast, and configurable [base64][] decoding and encoding. Base64 -//! transports binary data efficiently in contexts where only plain text is -//! allowed. -//! -//! [base64]: https://developer.mozilla.org/en-US/docs/Glossary/Base64 -//! -//! # Usage -//! -//! Use an [`Engine`] to decode or encode base64, configured with the base64 -//! alphabet and padding behavior best suited to your application. -//! -//! ## Engine setup -//! -//! There is more than one way to encode a stream of bytes as “base64”. -//! Different applications use different encoding -//! [alphabets][alphabet::Alphabet] and -//! [padding behaviors][engine::general_purpose::GeneralPurposeConfig]. -//! -//! ### Encoding alphabet -//! -//! Almost all base64 [alphabets][alphabet::Alphabet] use `A-Z`, `a-z`, and -//! `0-9`, which gives nearly 64 characters (26 + 26 + 10 = 62), but they differ -//! in their choice of their final 2. -//! -//! Most applications use the [standard][alphabet::STANDARD] alphabet specified -//! in [RFC 4648][rfc-alphabet]. If that’s all you need, you can get started -//! quickly by using the pre-configured -//! [`STANDARD`][engine::general_purpose::STANDARD] engine, which is also available -//! in the [`prelude`] module as shown here, if you prefer a minimal `use` -//! footprint. -//! -#![cfg_attr(feature = "alloc", doc = "```")] -#![cfg_attr(not(feature = "alloc"), doc = "```ignore")] -//! use base64::prelude::*; -//! -//! # fn main() -> Result<(), base64::DecodeError> { -//! assert_eq!(BASE64_STANDARD.decode(b"+uwgVQA=")?, b"\xFA\xEC\x20\x55\0"); -//! assert_eq!(BASE64_STANDARD.encode(b"\xFF\xEC\x20\x55\0"), "/+wgVQA="); -//! # Ok(()) -//! # } -//! ``` -//! -//! [rfc-alphabet]: https://datatracker.ietf.org/doc/html/rfc4648#section-4 -//! -//! Other common alphabets are available in the [`alphabet`] module. -//! -//! #### URL-safe alphabet -//! -//! The standard alphabet uses `+` and `/` as its two non-alphanumeric tokens, -//! which cannot be safely used in URL’s without encoding them as `%2B` and -//! `%2F`. -//! -//! To avoid that, some applications use a [“URL-safe” alphabet][alphabet::URL_SAFE], -//! which uses `-` and `_` instead. To use that alternative alphabet, use the -//! [`URL_SAFE`][engine::general_purpose::URL_SAFE] engine. This example doesn't -//! use [`prelude`] to show what a more explicit `use` would look like. -//! -#![cfg_attr(feature = "alloc", doc = "```")] -#![cfg_attr(not(feature = "alloc"), doc = "```ignore")] -//! use base64::{engine::general_purpose::URL_SAFE, Engine as _}; -//! -//! # fn main() -> Result<(), base64::DecodeError> { -//! assert_eq!(URL_SAFE.decode(b"-uwgVQA=")?, b"\xFA\xEC\x20\x55\0"); -//! assert_eq!(URL_SAFE.encode(b"\xFF\xEC\x20\x55\0"), "_-wgVQA="); -//! # Ok(()) -//! # } -//! ``` -//! -//! ### Padding characters -//! -//! Each base64 character represents 6 bits (2⁶ = 64) of the original binary -//! data, and every 3 bytes of input binary data will encode to 4 base64 -//! characters (8 bits × 3 = 6 bits × 4 = 24 bits). -//! -//! When the input is not an even multiple of 3 bytes in length, [canonical][] -//! base64 encoders insert padding characters at the end, so that the output -//! length is always a multiple of 4: -//! -//! [canonical]: https://datatracker.ietf.org/doc/html/rfc4648#section-3.5 -//! -#![cfg_attr(feature = "alloc", doc = "```")] -#![cfg_attr(not(feature = "alloc"), doc = "```ignore")] -//! use base64::{engine::general_purpose::STANDARD, Engine as _}; -//! -//! assert_eq!(STANDARD.encode(b""), ""); -//! assert_eq!(STANDARD.encode(b"f"), "Zg=="); -//! assert_eq!(STANDARD.encode(b"fo"), "Zm8="); -//! assert_eq!(STANDARD.encode(b"foo"), "Zm9v"); -//! ``` -//! -//! Canonical encoding ensures that base64 encodings will be exactly the same, -//! byte-for-byte, regardless of input length. But the `=` padding characters -//! aren’t necessary for decoding, and they may be omitted by using a -//! [`NO_PAD`][engine::general_purpose::NO_PAD] configuration: -//! -#![cfg_attr(feature = "alloc", doc = "```")] -#![cfg_attr(not(feature = "alloc"), doc = "```ignore")] -//! use base64::{engine::general_purpose::STANDARD_NO_PAD, Engine as _}; -//! -//! assert_eq!(STANDARD_NO_PAD.encode(b""), ""); -//! assert_eq!(STANDARD_NO_PAD.encode(b"f"), "Zg"); -//! assert_eq!(STANDARD_NO_PAD.encode(b"fo"), "Zm8"); -//! assert_eq!(STANDARD_NO_PAD.encode(b"foo"), "Zm9v"); -//! ``` -//! -//! The pre-configured `NO_PAD` engines will reject inputs containing padding -//! `=` characters. To encode without padding and still accept padding while -//! decoding, create an [engine][engine::general_purpose::GeneralPurpose] with -//! that [padding mode][engine::DecodePaddingMode]. -//! -#![cfg_attr(feature = "alloc", doc = "```")] -#![cfg_attr(not(feature = "alloc"), doc = "```ignore")] -//! # use base64::{engine::general_purpose::STANDARD_NO_PAD, Engine as _}; -//! assert_eq!(STANDARD_NO_PAD.decode(b"Zm8="), Err(base64::DecodeError::InvalidPadding)); -//! ``` -//! -//! ### Further customization -//! -//! Decoding and encoding behavior can be customized by creating an -//! [engine][engine::GeneralPurpose] with an [alphabet][alphabet::Alphabet] and -//! [padding configuration][engine::GeneralPurposeConfig]: -//! -#![cfg_attr(feature = "alloc", doc = "```")] -#![cfg_attr(not(feature = "alloc"), doc = "```ignore")] -//! use base64::{engine, alphabet, Engine as _}; -//! -//! // bizarro-world base64: +/ as the first symbols instead of the last -//! let alphabet = -//! alphabet::Alphabet::new("+/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") -//! .unwrap(); -//! -//! // a very weird config that encodes with padding but requires no padding when decoding...? -//! let crazy_config = engine::GeneralPurposeConfig::new() -//! .with_decode_allow_trailing_bits(true) -//! .with_encode_padding(true) -//! .with_decode_padding_mode(engine::DecodePaddingMode::RequireNone); -//! -//! let crazy_engine = engine::GeneralPurpose::new(&alphabet, crazy_config); -//! -//! let encoded = crazy_engine.encode(b"abc 123"); -//! -//! ``` -//! -//! ## Memory allocation -//! -//! The [decode][Engine::decode()] and [encode][Engine::encode()] engine methods -//! allocate memory for their results – `decode` returns a `Vec` and -//! `encode` returns a `String`. To instead decode or encode into a buffer that -//! you allocated, use one of the alternative methods: -//! -//! #### Decoding -//! -//! | Method | Output | Allocates memory | -//! | -------------------------- | ----------------------------- | ----------------------------- | -//! | [`Engine::decode`] | returns a new `Vec` | always | -//! | [`Engine::decode_vec`] | appends to provided `Vec` | if `Vec` lacks capacity | -//! | [`Engine::decode_slice`] | writes to provided `&[u8]` | never -//! -//! #### Encoding -//! -//! | Method | Output | Allocates memory | -//! | -------------------------- | ---------------------------- | ------------------------------ | -//! | [`Engine::encode`] | returns a new `String` | always | -//! | [`Engine::encode_string`] | appends to provided `String` | if `String` lacks capacity | -//! | [`Engine::encode_slice`] | writes to provided `&[u8]` | never | -//! -//! ## Input and output -//! -//! The `base64` crate can [decode][Engine::decode()] and -//! [encode][Engine::encode()] values in memory, or -//! [`DecoderReader`][read::DecoderReader] and -//! [`EncoderWriter`][write::EncoderWriter] provide streaming decoding and -//! encoding for any [readable][std::io::Read] or [writable][std::io::Write] -//! byte stream. -//! -//! #### Decoding -//! -#![cfg_attr(feature = "std", doc = "```")] -#![cfg_attr(not(feature = "std"), doc = "```ignore")] -//! # use std::io; -//! use base64::{engine::general_purpose::STANDARD, read::DecoderReader}; -//! -//! # fn main() -> Result<(), Box> { -//! let mut input = io::stdin(); -//! let mut decoder = DecoderReader::new(&mut input, &STANDARD); -//! io::copy(&mut decoder, &mut io::stdout())?; -//! # Ok(()) -//! # } -//! ``` -//! -//! #### Encoding -//! -#![cfg_attr(feature = "std", doc = "```")] -#![cfg_attr(not(feature = "std"), doc = "```ignore")] -//! # use std::io; -//! use base64::{engine::general_purpose::STANDARD, write::EncoderWriter}; -//! -//! # fn main() -> Result<(), Box> { -//! let mut output = io::stdout(); -//! let mut encoder = EncoderWriter::new(&mut output, &STANDARD); -//! io::copy(&mut io::stdin(), &mut encoder)?; -//! # Ok(()) -//! # } -//! ``` -//! -//! #### Display -//! -//! If you only need a base64 representation for implementing the -//! [`Display`][std::fmt::Display] trait, use -//! [`Base64Display`][display::Base64Display]: -//! -//! ``` -//! use base64::{display::Base64Display, engine::general_purpose::STANDARD}; -//! -//! let value = Base64Display::new(b"\0\x01\x02\x03", &STANDARD); -//! assert_eq!("base64: AAECAw==", format!("base64: {}", value)); -//! ``` -//! -//! # Panics -//! -//! If length calculations result in overflowing `usize`, a panic will result. - -#![cfg_attr(feature = "cargo-clippy", allow(clippy::cast_lossless))] -#![deny( - missing_docs, - trivial_casts, - trivial_numeric_casts, - unused_extern_crates, - unused_import_braces, - unused_results, - variant_size_differences -)] -#![forbid(unsafe_code)] -// Allow globally until https://github.com/rust-lang/rust-clippy/issues/8768 is resolved. -// The desired state is to allow it only for the rstest_reuse import. -#![allow(clippy::single_component_path_imports)] -#![cfg_attr(not(any(feature = "std", test)), no_std)] - -#[cfg(any(feature = "alloc", test))] -extern crate alloc; - -// has to be included at top level because of the way rstest_reuse defines its macros -#[cfg(test)] -use rstest_reuse; - -mod chunked_encoder; -pub mod display; -#[cfg(any(feature = "std", test))] -pub mod read; -#[cfg(any(feature = "std", test))] -pub mod write; - -pub mod engine; -pub use engine::Engine; - -pub mod alphabet; - -mod encode; -#[allow(deprecated)] -#[cfg(any(feature = "alloc", test))] -pub use crate::encode::{encode, encode_engine, encode_engine_string}; -#[allow(deprecated)] -pub use crate::encode::{encode_engine_slice, encoded_len, EncodeSliceError}; - -mod decode; -#[allow(deprecated)] -#[cfg(any(feature = "alloc", test))] -pub use crate::decode::{decode, decode_engine, decode_engine_vec}; -#[allow(deprecated)] -pub use crate::decode::{decode_engine_slice, decoded_len_estimate, DecodeError, DecodeSliceError}; - -pub mod prelude; - -#[cfg(test)] -mod tests; - -const PAD_BYTE: u8 = b'='; diff --git a/vendor/base64/src/prelude.rs b/vendor/base64/src/prelude.rs deleted file mode 100644 index df5fdb497c6ce6..00000000000000 --- a/vendor/base64/src/prelude.rs +++ /dev/null @@ -1,20 +0,0 @@ -//! Preconfigured engines for common use cases. -//! -//! These are re-exports of `const` engines in [crate::engine::general_purpose], renamed with a `BASE64_` -//! prefix for those who prefer to `use` the entire path to a name. -//! -//! # Examples -//! -#![cfg_attr(feature = "alloc", doc = "```")] -#![cfg_attr(not(feature = "alloc"), doc = "```ignore")] -//! use base64::prelude::{Engine as _, BASE64_STANDARD_NO_PAD}; -//! -//! assert_eq!("c29tZSBieXRlcw", &BASE64_STANDARD_NO_PAD.encode(b"some bytes")); -//! ``` - -pub use crate::engine::Engine; - -pub use crate::engine::general_purpose::STANDARD as BASE64_STANDARD; -pub use crate::engine::general_purpose::STANDARD_NO_PAD as BASE64_STANDARD_NO_PAD; -pub use crate::engine::general_purpose::URL_SAFE as BASE64_URL_SAFE; -pub use crate::engine::general_purpose::URL_SAFE_NO_PAD as BASE64_URL_SAFE_NO_PAD; diff --git a/vendor/base64/src/read/decoder.rs b/vendor/base64/src/read/decoder.rs deleted file mode 100644 index 781f6f880e5537..00000000000000 --- a/vendor/base64/src/read/decoder.rs +++ /dev/null @@ -1,335 +0,0 @@ -use crate::{engine::Engine, DecodeError, DecodeSliceError, PAD_BYTE}; -use std::{cmp, fmt, io}; - -// This should be large, but it has to fit on the stack. -pub(crate) const BUF_SIZE: usize = 1024; - -// 4 bytes of base64 data encode 3 bytes of raw data (modulo padding). -const BASE64_CHUNK_SIZE: usize = 4; -const DECODED_CHUNK_SIZE: usize = 3; - -/// A `Read` implementation that decodes base64 data read from an underlying reader. -/// -/// # Examples -/// -/// ``` -/// use std::io::Read; -/// use std::io::Cursor; -/// use base64::engine::general_purpose; -/// -/// // use a cursor as the simplest possible `Read` -- in real code this is probably a file, etc. -/// let mut wrapped_reader = Cursor::new(b"YXNkZg=="); -/// let mut decoder = base64::read::DecoderReader::new( -/// &mut wrapped_reader, -/// &general_purpose::STANDARD); -/// -/// // handle errors as you normally would -/// let mut result = Vec::new(); -/// decoder.read_to_end(&mut result).unwrap(); -/// -/// assert_eq!(b"asdf", &result[..]); -/// -/// ``` -pub struct DecoderReader<'e, E: Engine, R: io::Read> { - engine: &'e E, - /// Where b64 data is read from - inner: R, - - /// Holds b64 data read from the delegate reader. - b64_buffer: [u8; BUF_SIZE], - /// The start of the pending buffered data in `b64_buffer`. - b64_offset: usize, - /// The amount of buffered b64 data after `b64_offset` in `b64_len`. - b64_len: usize, - /// Since the caller may provide us with a buffer of size 1 or 2 that's too small to copy a - /// decoded chunk in to, we have to be able to hang on to a few decoded bytes. - /// Technically we only need to hold 2 bytes, but then we'd need a separate temporary buffer to - /// decode 3 bytes into and then juggle copying one byte into the provided read buf and the rest - /// into here, which seems like a lot of complexity for 1 extra byte of storage. - decoded_chunk_buffer: [u8; DECODED_CHUNK_SIZE], - /// Index of start of decoded data in `decoded_chunk_buffer` - decoded_offset: usize, - /// Length of decoded data after `decoded_offset` in `decoded_chunk_buffer` - decoded_len: usize, - /// Input length consumed so far. - /// Used to provide accurate offsets in errors - input_consumed_len: usize, - /// offset of previously seen padding, if any - padding_offset: Option, -} - -// exclude b64_buffer as it's uselessly large -impl<'e, E: Engine, R: io::Read> fmt::Debug for DecoderReader<'e, E, R> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("DecoderReader") - .field("b64_offset", &self.b64_offset) - .field("b64_len", &self.b64_len) - .field("decoded_chunk_buffer", &self.decoded_chunk_buffer) - .field("decoded_offset", &self.decoded_offset) - .field("decoded_len", &self.decoded_len) - .field("input_consumed_len", &self.input_consumed_len) - .field("padding_offset", &self.padding_offset) - .finish() - } -} - -impl<'e, E: Engine, R: io::Read> DecoderReader<'e, E, R> { - /// Create a new decoder that will read from the provided reader `r`. - pub fn new(reader: R, engine: &'e E) -> Self { - DecoderReader { - engine, - inner: reader, - b64_buffer: [0; BUF_SIZE], - b64_offset: 0, - b64_len: 0, - decoded_chunk_buffer: [0; DECODED_CHUNK_SIZE], - decoded_offset: 0, - decoded_len: 0, - input_consumed_len: 0, - padding_offset: None, - } - } - - /// Write as much as possible of the decoded buffer into the target buffer. - /// Must only be called when there is something to write and space to write into. - /// Returns a Result with the number of (decoded) bytes copied. - fn flush_decoded_buf(&mut self, buf: &mut [u8]) -> io::Result { - debug_assert!(self.decoded_len > 0); - debug_assert!(!buf.is_empty()); - - let copy_len = cmp::min(self.decoded_len, buf.len()); - debug_assert!(copy_len > 0); - debug_assert!(copy_len <= self.decoded_len); - - buf[..copy_len].copy_from_slice( - &self.decoded_chunk_buffer[self.decoded_offset..self.decoded_offset + copy_len], - ); - - self.decoded_offset += copy_len; - self.decoded_len -= copy_len; - - debug_assert!(self.decoded_len < DECODED_CHUNK_SIZE); - - Ok(copy_len) - } - - /// Read into the remaining space in the buffer after the current contents. - /// Must only be called when there is space to read into in the buffer. - /// Returns the number of bytes read. - fn read_from_delegate(&mut self) -> io::Result { - debug_assert!(self.b64_offset + self.b64_len < BUF_SIZE); - - let read = self - .inner - .read(&mut self.b64_buffer[self.b64_offset + self.b64_len..])?; - self.b64_len += read; - - debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE); - - Ok(read) - } - - /// Decode the requested number of bytes from the b64 buffer into the provided buffer. It's the - /// caller's responsibility to choose the number of b64 bytes to decode correctly. - /// - /// Returns a Result with the number of decoded bytes written to `buf`. - /// - /// # Panics - /// - /// panics if `buf` is too small - fn decode_to_buf(&mut self, b64_len_to_decode: usize, buf: &mut [u8]) -> io::Result { - debug_assert!(self.b64_len >= b64_len_to_decode); - debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE); - debug_assert!(!buf.is_empty()); - - let b64_to_decode = &self.b64_buffer[self.b64_offset..self.b64_offset + b64_len_to_decode]; - let decode_metadata = self - .engine - .internal_decode( - b64_to_decode, - buf, - self.engine.internal_decoded_len_estimate(b64_len_to_decode), - ) - .map_err(|dse| match dse { - DecodeSliceError::DecodeError(de) => { - match de { - DecodeError::InvalidByte(offset, byte) => { - match (byte, self.padding_offset) { - // if there was padding in a previous block of decoding that happened to - // be correct, and we now find more padding that happens to be incorrect, - // to be consistent with non-reader decodes, record the error at the first - // padding - (PAD_BYTE, Some(first_pad_offset)) => { - DecodeError::InvalidByte(first_pad_offset, PAD_BYTE) - } - _ => { - DecodeError::InvalidByte(self.input_consumed_len + offset, byte) - } - } - } - DecodeError::InvalidLength(len) => { - DecodeError::InvalidLength(self.input_consumed_len + len) - } - DecodeError::InvalidLastSymbol(offset, byte) => { - DecodeError::InvalidLastSymbol(self.input_consumed_len + offset, byte) - } - DecodeError::InvalidPadding => DecodeError::InvalidPadding, - } - } - DecodeSliceError::OutputSliceTooSmall => { - unreachable!("buf is sized correctly in calling code") - } - }) - .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; - - if let Some(offset) = self.padding_offset { - // we've already seen padding - if decode_metadata.decoded_len > 0 { - // we read more after already finding padding; report error at first padding byte - return Err(io::Error::new( - io::ErrorKind::InvalidData, - DecodeError::InvalidByte(offset, PAD_BYTE), - )); - } - } - - self.padding_offset = self.padding_offset.or(decode_metadata - .padding_offset - .map(|offset| self.input_consumed_len + offset)); - self.input_consumed_len += b64_len_to_decode; - self.b64_offset += b64_len_to_decode; - self.b64_len -= b64_len_to_decode; - - debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE); - - Ok(decode_metadata.decoded_len) - } - - /// Unwraps this `DecoderReader`, returning the base reader which it reads base64 encoded - /// input from. - /// - /// Because `DecoderReader` performs internal buffering, the state of the inner reader is - /// unspecified. This function is mainly provided because the inner reader type may provide - /// additional functionality beyond the `Read` implementation which may still be useful. - pub fn into_inner(self) -> R { - self.inner - } -} - -impl<'e, E: Engine, R: io::Read> io::Read for DecoderReader<'e, E, R> { - /// Decode input from the wrapped reader. - /// - /// Under non-error circumstances, this returns `Ok` with the value being the number of bytes - /// written in `buf`. - /// - /// Where possible, this function buffers base64 to minimize the number of read() calls to the - /// delegate reader. - /// - /// # Errors - /// - /// Any errors emitted by the delegate reader are returned. Decoding errors due to invalid - /// base64 are also possible, and will have `io::ErrorKind::InvalidData`. - fn read(&mut self, buf: &mut [u8]) -> io::Result { - if buf.is_empty() { - return Ok(0); - } - - // offset == BUF_SIZE when we copied it all last time - debug_assert!(self.b64_offset <= BUF_SIZE); - debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE); - debug_assert!(if self.b64_offset == BUF_SIZE { - self.b64_len == 0 - } else { - self.b64_len <= BUF_SIZE - }); - - debug_assert!(if self.decoded_len == 0 { - // can be = when we were able to copy the complete chunk - self.decoded_offset <= DECODED_CHUNK_SIZE - } else { - self.decoded_offset < DECODED_CHUNK_SIZE - }); - - // We shouldn't ever decode into decoded_buffer when we can't immediately write at least one - // byte into the provided buf, so the effective length should only be 3 momentarily between - // when we decode and when we copy into the target buffer. - debug_assert!(self.decoded_len < DECODED_CHUNK_SIZE); - debug_assert!(self.decoded_len + self.decoded_offset <= DECODED_CHUNK_SIZE); - - if self.decoded_len > 0 { - // we have a few leftover decoded bytes; flush that rather than pull in more b64 - self.flush_decoded_buf(buf) - } else { - let mut at_eof = false; - while self.b64_len < BASE64_CHUNK_SIZE { - // Copy any bytes we have to the start of the buffer. - self.b64_buffer - .copy_within(self.b64_offset..self.b64_offset + self.b64_len, 0); - self.b64_offset = 0; - - // then fill in more data - let read = self.read_from_delegate()?; - if read == 0 { - // we never read into an empty buf, so 0 => we've hit EOF - at_eof = true; - break; - } - } - - if self.b64_len == 0 { - debug_assert!(at_eof); - // we must be at EOF, and we have no data left to decode - return Ok(0); - }; - - debug_assert!(if at_eof { - // if we are at eof, we may not have a complete chunk - self.b64_len > 0 - } else { - // otherwise, we must have at least one chunk - self.b64_len >= BASE64_CHUNK_SIZE - }); - - debug_assert_eq!(0, self.decoded_len); - - if buf.len() < DECODED_CHUNK_SIZE { - // caller requested an annoyingly short read - // have to write to a tmp buf first to avoid double mutable borrow - let mut decoded_chunk = [0_u8; DECODED_CHUNK_SIZE]; - // if we are at eof, could have less than BASE64_CHUNK_SIZE, in which case we have - // to assume that these last few tokens are, in fact, valid (i.e. must be 2-4 b64 - // tokens, not 1, since 1 token can't decode to 1 byte). - let to_decode = cmp::min(self.b64_len, BASE64_CHUNK_SIZE); - - let decoded = self.decode_to_buf(to_decode, &mut decoded_chunk[..])?; - self.decoded_chunk_buffer[..decoded].copy_from_slice(&decoded_chunk[..decoded]); - - self.decoded_offset = 0; - self.decoded_len = decoded; - - // can be less than 3 on last block due to padding - debug_assert!(decoded <= 3); - - self.flush_decoded_buf(buf) - } else { - let b64_bytes_that_can_decode_into_buf = (buf.len() / DECODED_CHUNK_SIZE) - .checked_mul(BASE64_CHUNK_SIZE) - .expect("too many chunks"); - debug_assert!(b64_bytes_that_can_decode_into_buf >= BASE64_CHUNK_SIZE); - - let b64_bytes_available_to_decode = if at_eof { - self.b64_len - } else { - // only use complete chunks - self.b64_len - self.b64_len % 4 - }; - - let actual_decode_len = cmp::min( - b64_bytes_that_can_decode_into_buf, - b64_bytes_available_to_decode, - ); - self.decode_to_buf(actual_decode_len, buf) - } - } - } -} diff --git a/vendor/base64/src/read/decoder_tests.rs b/vendor/base64/src/read/decoder_tests.rs deleted file mode 100644 index f343145744815b..00000000000000 --- a/vendor/base64/src/read/decoder_tests.rs +++ /dev/null @@ -1,487 +0,0 @@ -use std::{ - cmp, - io::{self, Read as _}, - iter, -}; - -use rand::{Rng as _, RngCore as _}; - -use super::decoder::{DecoderReader, BUF_SIZE}; -use crate::{ - alphabet, - engine::{general_purpose::STANDARD, Engine, GeneralPurpose}, - tests::{random_alphabet, random_config, random_engine}, - DecodeError, PAD_BYTE, -}; - -#[test] -fn simple() { - let tests: &[(&[u8], &[u8])] = &[ - (&b"0"[..], &b"MA=="[..]), - (b"01", b"MDE="), - (b"012", b"MDEy"), - (b"0123", b"MDEyMw=="), - (b"01234", b"MDEyMzQ="), - (b"012345", b"MDEyMzQ1"), - (b"0123456", b"MDEyMzQ1Ng=="), - (b"01234567", b"MDEyMzQ1Njc="), - (b"012345678", b"MDEyMzQ1Njc4"), - (b"0123456789", b"MDEyMzQ1Njc4OQ=="), - ][..]; - - for (text_expected, base64data) in tests.iter() { - // Read n bytes at a time. - for n in 1..base64data.len() + 1 { - let mut wrapped_reader = io::Cursor::new(base64data); - let mut decoder = DecoderReader::new(&mut wrapped_reader, &STANDARD); - - // handle errors as you normally would - let mut text_got = Vec::new(); - let mut buffer = vec![0u8; n]; - while let Ok(read) = decoder.read(&mut buffer[..]) { - if read == 0 { - break; - } - text_got.extend_from_slice(&buffer[..read]); - } - - assert_eq!( - text_got, - *text_expected, - "\nGot: {}\nExpected: {}", - String::from_utf8_lossy(&text_got[..]), - String::from_utf8_lossy(text_expected) - ); - } - } -} - -// Make sure we error out on trailing junk. -#[test] -fn trailing_junk() { - let tests: &[&[u8]] = &[&b"MDEyMzQ1Njc4*!@#$%^&"[..], b"MDEyMzQ1Njc4OQ== "][..]; - - for base64data in tests.iter() { - // Read n bytes at a time. - for n in 1..base64data.len() + 1 { - let mut wrapped_reader = io::Cursor::new(base64data); - let mut decoder = DecoderReader::new(&mut wrapped_reader, &STANDARD); - - // handle errors as you normally would - let mut buffer = vec![0u8; n]; - let mut saw_error = false; - loop { - match decoder.read(&mut buffer[..]) { - Err(_) => { - saw_error = true; - break; - } - Ok(0) => break, - Ok(_len) => (), - } - } - - assert!(saw_error); - } - } -} - -#[test] -fn handles_short_read_from_delegate() { - let mut rng = rand::thread_rng(); - let mut bytes = Vec::new(); - let mut b64 = String::new(); - let mut decoded = Vec::new(); - - for _ in 0..10_000 { - bytes.clear(); - b64.clear(); - decoded.clear(); - - let size = rng.gen_range(0..(10 * BUF_SIZE)); - bytes.extend(iter::repeat(0).take(size)); - bytes.truncate(size); - rng.fill_bytes(&mut bytes[..size]); - assert_eq!(size, bytes.len()); - - let engine = random_engine(&mut rng); - engine.encode_string(&bytes[..], &mut b64); - - let mut wrapped_reader = io::Cursor::new(b64.as_bytes()); - let mut short_reader = RandomShortRead { - delegate: &mut wrapped_reader, - rng: &mut rng, - }; - - let mut decoder = DecoderReader::new(&mut short_reader, &engine); - - let decoded_len = decoder.read_to_end(&mut decoded).unwrap(); - assert_eq!(size, decoded_len); - assert_eq!(&bytes[..], &decoded[..]); - } -} - -#[test] -fn read_in_short_increments() { - let mut rng = rand::thread_rng(); - let mut bytes = Vec::new(); - let mut b64 = String::new(); - let mut decoded = Vec::new(); - - for _ in 0..10_000 { - bytes.clear(); - b64.clear(); - decoded.clear(); - - let size = rng.gen_range(0..(10 * BUF_SIZE)); - bytes.extend(iter::repeat(0).take(size)); - // leave room to play around with larger buffers - decoded.extend(iter::repeat(0).take(size * 3)); - - rng.fill_bytes(&mut bytes[..]); - assert_eq!(size, bytes.len()); - - let engine = random_engine(&mut rng); - - engine.encode_string(&bytes[..], &mut b64); - - let mut wrapped_reader = io::Cursor::new(&b64[..]); - let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine); - - consume_with_short_reads_and_validate(&mut rng, &bytes[..], &mut decoded, &mut decoder); - } -} - -#[test] -fn read_in_short_increments_with_short_delegate_reads() { - let mut rng = rand::thread_rng(); - let mut bytes = Vec::new(); - let mut b64 = String::new(); - let mut decoded = Vec::new(); - - for _ in 0..10_000 { - bytes.clear(); - b64.clear(); - decoded.clear(); - - let size = rng.gen_range(0..(10 * BUF_SIZE)); - bytes.extend(iter::repeat(0).take(size)); - // leave room to play around with larger buffers - decoded.extend(iter::repeat(0).take(size * 3)); - - rng.fill_bytes(&mut bytes[..]); - assert_eq!(size, bytes.len()); - - let engine = random_engine(&mut rng); - - engine.encode_string(&bytes[..], &mut b64); - - let mut base_reader = io::Cursor::new(&b64[..]); - let mut decoder = DecoderReader::new(&mut base_reader, &engine); - let mut short_reader = RandomShortRead { - delegate: &mut decoder, - rng: &mut rand::thread_rng(), - }; - - consume_with_short_reads_and_validate( - &mut rng, - &bytes[..], - &mut decoded, - &mut short_reader, - ); - } -} - -#[test] -fn reports_invalid_last_symbol_correctly() { - let mut rng = rand::thread_rng(); - let mut bytes = Vec::new(); - let mut b64 = String::new(); - let mut b64_bytes = Vec::new(); - let mut decoded = Vec::new(); - let mut bulk_decoded = Vec::new(); - - for _ in 0..1_000 { - bytes.clear(); - b64.clear(); - b64_bytes.clear(); - - let size = rng.gen_range(1..(10 * BUF_SIZE)); - bytes.extend(iter::repeat(0).take(size)); - decoded.extend(iter::repeat(0).take(size)); - rng.fill_bytes(&mut bytes[..]); - assert_eq!(size, bytes.len()); - - let config = random_config(&mut rng); - let alphabet = random_alphabet(&mut rng); - // changing padding will cause invalid padding errors when we twiddle the last byte - let engine = GeneralPurpose::new(alphabet, config.with_encode_padding(false)); - engine.encode_string(&bytes[..], &mut b64); - b64_bytes.extend(b64.bytes()); - assert_eq!(b64_bytes.len(), b64.len()); - - // change the last character to every possible symbol. Should behave the same as bulk - // decoding whether invalid or valid. - for &s1 in alphabet.symbols.iter() { - decoded.clear(); - bulk_decoded.clear(); - - // replace the last - *b64_bytes.last_mut().unwrap() = s1; - let bulk_res = engine.decode_vec(&b64_bytes[..], &mut bulk_decoded); - - let mut wrapped_reader = io::Cursor::new(&b64_bytes[..]); - let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine); - - let stream_res = decoder.read_to_end(&mut decoded).map(|_| ()).map_err(|e| { - e.into_inner() - .and_then(|e| e.downcast::().ok()) - }); - - assert_eq!(bulk_res.map_err(|e| Some(Box::new(e))), stream_res); - } - } -} - -#[test] -fn reports_invalid_byte_correctly() { - let mut rng = rand::thread_rng(); - let mut bytes = Vec::new(); - let mut b64 = String::new(); - let mut stream_decoded = Vec::new(); - let mut bulk_decoded = Vec::new(); - - for _ in 0..10_000 { - bytes.clear(); - b64.clear(); - stream_decoded.clear(); - bulk_decoded.clear(); - - let size = rng.gen_range(1..(10 * BUF_SIZE)); - bytes.extend(iter::repeat(0).take(size)); - rng.fill_bytes(&mut bytes[..size]); - assert_eq!(size, bytes.len()); - - let engine = GeneralPurpose::new(&alphabet::STANDARD, random_config(&mut rng)); - - engine.encode_string(&bytes[..], &mut b64); - // replace one byte, somewhere, with '*', which is invalid - let bad_byte_pos = rng.gen_range(0..b64.len()); - let mut b64_bytes = b64.bytes().collect::>(); - b64_bytes[bad_byte_pos] = b'*'; - - let mut wrapped_reader = io::Cursor::new(b64_bytes.clone()); - let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine); - - let read_decode_err = decoder - .read_to_end(&mut stream_decoded) - .map_err(|e| { - let kind = e.kind(); - let inner = e - .into_inner() - .and_then(|e| e.downcast::().ok()); - inner.map(|i| (*i, kind)) - }) - .err() - .and_then(|o| o); - - let bulk_decode_err = engine.decode_vec(&b64_bytes[..], &mut bulk_decoded).err(); - - // it's tricky to predict where the invalid data's offset will be since if it's in the last - // chunk it will be reported at the first padding location because it's treated as invalid - // padding. So, we just check that it's the same as it is for decoding all at once. - assert_eq!( - bulk_decode_err.map(|e| (e, io::ErrorKind::InvalidData)), - read_decode_err - ); - } -} - -#[test] -fn internal_padding_error_with_short_read_concatenated_texts_invalid_byte_error() { - let mut rng = rand::thread_rng(); - let mut bytes = Vec::new(); - let mut b64 = String::new(); - let mut reader_decoded = Vec::new(); - let mut bulk_decoded = Vec::new(); - - // encodes with padding, requires that padding be present so we don't get InvalidPadding - // just because padding is there at all - let engine = STANDARD; - - for _ in 0..10_000 { - bytes.clear(); - b64.clear(); - reader_decoded.clear(); - bulk_decoded.clear(); - - // at least 2 bytes so there can be a split point between bytes - let size = rng.gen_range(2..(10 * BUF_SIZE)); - bytes.resize(size, 0); - rng.fill_bytes(&mut bytes[..size]); - - // Concatenate two valid b64s, yielding padding in the middle. - // This avoids scenarios that are challenging to assert on, like random padding location - // that might be InvalidLastSymbol when decoded at certain buffer sizes but InvalidByte - // when done all at once. - let split = loop { - // find a split point that will produce padding on the first part - let s = rng.gen_range(1..size); - if s % 3 != 0 { - // short enough to need padding - break s; - }; - }; - - engine.encode_string(&bytes[..split], &mut b64); - assert!(b64.contains('='), "split: {}, b64: {}", split, b64); - let bad_byte_pos = b64.find('=').unwrap(); - engine.encode_string(&bytes[split..], &mut b64); - let b64_bytes = b64.as_bytes(); - - // short read to make it plausible for padding to happen on a read boundary - let read_len = rng.gen_range(1..10); - let mut wrapped_reader = ShortRead { - max_read_len: read_len, - delegate: io::Cursor::new(&b64_bytes), - }; - - let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine); - - let read_decode_err = decoder - .read_to_end(&mut reader_decoded) - .map_err(|e| { - *e.into_inner() - .and_then(|e| e.downcast::().ok()) - .unwrap() - }) - .unwrap_err(); - - let bulk_decode_err = engine.decode_vec(b64_bytes, &mut bulk_decoded).unwrap_err(); - - assert_eq!( - bulk_decode_err, - read_decode_err, - "read len: {}, bad byte pos: {}, b64: {}", - read_len, - bad_byte_pos, - std::str::from_utf8(b64_bytes).unwrap() - ); - assert_eq!( - DecodeError::InvalidByte( - split / 3 * 4 - + match split % 3 { - 1 => 2, - 2 => 3, - _ => unreachable!(), - }, - PAD_BYTE - ), - read_decode_err - ); - } -} - -#[test] -fn internal_padding_anywhere_error() { - let mut rng = rand::thread_rng(); - let mut bytes = Vec::new(); - let mut b64 = String::new(); - let mut reader_decoded = Vec::new(); - - // encodes with padding, requires that padding be present so we don't get InvalidPadding - // just because padding is there at all - let engine = STANDARD; - - for _ in 0..10_000 { - bytes.clear(); - b64.clear(); - reader_decoded.clear(); - - bytes.resize(10 * BUF_SIZE, 0); - rng.fill_bytes(&mut bytes[..]); - - // Just shove a padding byte in there somewhere. - // The specific error to expect is challenging to predict precisely because it - // will vary based on the position of the padding in the quad and the read buffer - // length, but SOMETHING should go wrong. - - engine.encode_string(&bytes[..], &mut b64); - let mut b64_bytes = b64.as_bytes().to_vec(); - // put padding somewhere other than the last quad - b64_bytes[rng.gen_range(0..bytes.len() - 4)] = PAD_BYTE; - - // short read to make it plausible for padding to happen on a read boundary - let read_len = rng.gen_range(1..10); - let mut wrapped_reader = ShortRead { - max_read_len: read_len, - delegate: io::Cursor::new(&b64_bytes), - }; - - let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine); - - let result = decoder.read_to_end(&mut reader_decoded); - assert!(result.is_err()); - } -} - -fn consume_with_short_reads_and_validate( - rng: &mut rand::rngs::ThreadRng, - expected_bytes: &[u8], - decoded: &mut [u8], - short_reader: &mut R, -) { - let mut total_read = 0_usize; - loop { - assert!( - total_read <= expected_bytes.len(), - "tr {} size {}", - total_read, - expected_bytes.len() - ); - if total_read == expected_bytes.len() { - assert_eq!(expected_bytes, &decoded[..total_read]); - // should be done - assert_eq!(0, short_reader.read(&mut *decoded).unwrap()); - // didn't write anything - assert_eq!(expected_bytes, &decoded[..total_read]); - - break; - } - let decode_len = rng.gen_range(1..cmp::max(2, expected_bytes.len() * 2)); - - let read = short_reader - .read(&mut decoded[total_read..total_read + decode_len]) - .unwrap(); - total_read += read; - } -} - -/// Limits how many bytes a reader will provide in each read call. -/// Useful for shaking out code that may work fine only with typical input sources that always fill -/// the buffer. -struct RandomShortRead<'a, 'b, R: io::Read, N: rand::Rng> { - delegate: &'b mut R, - rng: &'a mut N, -} - -impl<'a, 'b, R: io::Read, N: rand::Rng> io::Read for RandomShortRead<'a, 'b, R, N> { - fn read(&mut self, buf: &mut [u8]) -> Result { - // avoid 0 since it means EOF for non-empty buffers - let effective_len = cmp::min(self.rng.gen_range(1..20), buf.len()); - - self.delegate.read(&mut buf[..effective_len]) - } -} - -struct ShortRead { - delegate: R, - max_read_len: usize, -} - -impl io::Read for ShortRead { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - let len = self.max_read_len.max(buf.len()); - self.delegate.read(&mut buf[..len]) - } -} diff --git a/vendor/base64/src/read/mod.rs b/vendor/base64/src/read/mod.rs deleted file mode 100644 index 856064481cba17..00000000000000 --- a/vendor/base64/src/read/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -//! Implementations of `io::Read` to transparently decode base64. -mod decoder; -pub use self::decoder::DecoderReader; - -#[cfg(test)] -mod decoder_tests; diff --git a/vendor/base64/src/tests.rs b/vendor/base64/src/tests.rs deleted file mode 100644 index 7083b5433fe2ef..00000000000000 --- a/vendor/base64/src/tests.rs +++ /dev/null @@ -1,117 +0,0 @@ -use std::str; - -use rand::{ - distributions, - distributions::{Distribution as _, Uniform}, - seq::SliceRandom, - Rng, SeedableRng, -}; - -use crate::{ - alphabet, - encode::encoded_len, - engine::{ - general_purpose::{GeneralPurpose, GeneralPurposeConfig}, - Config, DecodePaddingMode, Engine, - }, -}; - -#[test] -fn roundtrip_random_config_short() { - // exercise the slower encode/decode routines that operate on shorter buffers more vigorously - roundtrip_random_config(Uniform::new(0, 50), 10_000); -} - -#[test] -fn roundtrip_random_config_long() { - roundtrip_random_config(Uniform::new(0, 1000), 10_000); -} - -pub fn assert_encode_sanity(encoded: &str, padded: bool, input_len: usize) { - let input_rem = input_len % 3; - let expected_padding_len = if input_rem > 0 { - if padded { - 3 - input_rem - } else { - 0 - } - } else { - 0 - }; - - let expected_encoded_len = encoded_len(input_len, padded).unwrap(); - - assert_eq!(expected_encoded_len, encoded.len()); - - let padding_len = encoded.chars().filter(|&c| c == '=').count(); - - assert_eq!(expected_padding_len, padding_len); - - let _ = str::from_utf8(encoded.as_bytes()).expect("Base64 should be valid utf8"); -} - -fn roundtrip_random_config(input_len_range: Uniform, iterations: u32) { - let mut input_buf: Vec = Vec::new(); - let mut encoded_buf = String::new(); - let mut rng = rand::rngs::SmallRng::from_entropy(); - - for _ in 0..iterations { - input_buf.clear(); - encoded_buf.clear(); - - let input_len = input_len_range.sample(&mut rng); - - let engine = random_engine(&mut rng); - - for _ in 0..input_len { - input_buf.push(rng.gen()); - } - - engine.encode_string(&input_buf, &mut encoded_buf); - - assert_encode_sanity(&encoded_buf, engine.config().encode_padding(), input_len); - - assert_eq!(input_buf, engine.decode(&encoded_buf).unwrap()); - } -} - -pub fn random_config(rng: &mut R) -> GeneralPurposeConfig { - let mode = rng.gen(); - GeneralPurposeConfig::new() - .with_encode_padding(match mode { - DecodePaddingMode::Indifferent => rng.gen(), - DecodePaddingMode::RequireCanonical => true, - DecodePaddingMode::RequireNone => false, - }) - .with_decode_padding_mode(mode) - .with_decode_allow_trailing_bits(rng.gen()) -} - -impl distributions::Distribution for distributions::Standard { - fn sample(&self, rng: &mut R) -> DecodePaddingMode { - match rng.gen_range(0..=2) { - 0 => DecodePaddingMode::Indifferent, - 1 => DecodePaddingMode::RequireCanonical, - _ => DecodePaddingMode::RequireNone, - } - } -} - -pub fn random_alphabet(rng: &mut R) -> &'static alphabet::Alphabet { - ALPHABETS.choose(rng).unwrap() -} - -pub fn random_engine(rng: &mut R) -> GeneralPurpose { - let alphabet = random_alphabet(rng); - let config = random_config(rng); - GeneralPurpose::new(alphabet, config) -} - -const ALPHABETS: &[alphabet::Alphabet] = &[ - alphabet::URL_SAFE, - alphabet::STANDARD, - alphabet::CRYPT, - alphabet::BCRYPT, - alphabet::IMAP_MUTF7, - alphabet::BIN_HEX, -]; diff --git a/vendor/base64/src/write/encoder.rs b/vendor/base64/src/write/encoder.rs deleted file mode 100644 index 1c19bb42ab73a1..00000000000000 --- a/vendor/base64/src/write/encoder.rs +++ /dev/null @@ -1,407 +0,0 @@ -use crate::engine::Engine; -use std::{ - cmp, fmt, io, - io::{ErrorKind, Result}, -}; - -pub(crate) const BUF_SIZE: usize = 1024; -/// The most bytes whose encoding will fit in `BUF_SIZE` -const MAX_INPUT_LEN: usize = BUF_SIZE / 4 * 3; -// 3 bytes of input = 4 bytes of base64, always (because we don't allow line wrapping) -const MIN_ENCODE_CHUNK_SIZE: usize = 3; - -/// A `Write` implementation that base64 encodes data before delegating to the wrapped writer. -/// -/// Because base64 has special handling for the end of the input data (padding, etc), there's a -/// `finish()` method on this type that encodes any leftover input bytes and adds padding if -/// appropriate. It's called automatically when deallocated (see the `Drop` implementation), but -/// any error that occurs when invoking the underlying writer will be suppressed. If you want to -/// handle such errors, call `finish()` yourself. -/// -/// # Examples -/// -/// ``` -/// use std::io::Write; -/// use base64::engine::general_purpose; -/// -/// // use a vec as the simplest possible `Write` -- in real code this is probably a file, etc. -/// let mut enc = base64::write::EncoderWriter::new(Vec::new(), &general_purpose::STANDARD); -/// -/// // handle errors as you normally would -/// enc.write_all(b"asdf").unwrap(); -/// -/// // could leave this out to be called by Drop, if you don't care -/// // about handling errors or getting the delegate writer back -/// let delegate = enc.finish().unwrap(); -/// -/// // base64 was written to the writer -/// assert_eq!(b"YXNkZg==", &delegate[..]); -/// -/// ``` -/// -/// # Panics -/// -/// Calling `write()` (or related methods) or `finish()` after `finish()` has completed without -/// error is invalid and will panic. -/// -/// # Errors -/// -/// Base64 encoding itself does not generate errors, but errors from the wrapped writer will be -/// returned as per the contract of `Write`. -/// -/// # Performance -/// -/// It has some minor performance loss compared to encoding slices (a couple percent). -/// It does not do any heap allocation. -/// -/// # Limitations -/// -/// Owing to the specification of the `write` and `flush` methods on the `Write` trait and their -/// implications for a buffering implementation, these methods may not behave as expected. In -/// particular, calling `write_all` on this interface may fail with `io::ErrorKind::WriteZero`. -/// See the documentation of the `Write` trait implementation for further details. -pub struct EncoderWriter<'e, E: Engine, W: io::Write> { - engine: &'e E, - /// Where encoded data is written to. It's an Option as it's None immediately before Drop is - /// called so that finish() can return the underlying writer. None implies that finish() has - /// been called successfully. - delegate: Option, - /// Holds a partial chunk, if any, after the last `write()`, so that we may then fill the chunk - /// with the next `write()`, encode it, then proceed with the rest of the input normally. - extra_input: [u8; MIN_ENCODE_CHUNK_SIZE], - /// How much of `extra` is occupied, in `[0, MIN_ENCODE_CHUNK_SIZE]`. - extra_input_occupied_len: usize, - /// Buffer to encode into. May hold leftover encoded bytes from a previous write call that the underlying writer - /// did not write last time. - output: [u8; BUF_SIZE], - /// How much of `output` is occupied with encoded data that couldn't be written last time - output_occupied_len: usize, - /// panic safety: don't write again in destructor if writer panicked while we were writing to it - panicked: bool, -} - -impl<'e, E: Engine, W: io::Write> fmt::Debug for EncoderWriter<'e, E, W> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "extra_input: {:?} extra_input_occupied_len:{:?} output[..5]: {:?} output_occupied_len: {:?}", - self.extra_input, - self.extra_input_occupied_len, - &self.output[0..5], - self.output_occupied_len - ) - } -} - -impl<'e, E: Engine, W: io::Write> EncoderWriter<'e, E, W> { - /// Create a new encoder that will write to the provided delegate writer. - pub fn new(delegate: W, engine: &'e E) -> EncoderWriter<'e, E, W> { - EncoderWriter { - engine, - delegate: Some(delegate), - extra_input: [0u8; MIN_ENCODE_CHUNK_SIZE], - extra_input_occupied_len: 0, - output: [0u8; BUF_SIZE], - output_occupied_len: 0, - panicked: false, - } - } - - /// Encode all remaining buffered data and write it, including any trailing incomplete input - /// triples and associated padding. - /// - /// Once this succeeds, no further writes or calls to this method are allowed. - /// - /// This may write to the delegate writer multiple times if the delegate writer does not accept - /// all input provided to its `write` each invocation. - /// - /// If you don't care about error handling, it is not necessary to call this function, as the - /// equivalent finalization is done by the Drop impl. - /// - /// Returns the writer that this was constructed around. - /// - /// # Errors - /// - /// The first error that is not of `ErrorKind::Interrupted` will be returned. - pub fn finish(&mut self) -> Result { - // If we could consume self in finish(), we wouldn't have to worry about this case, but - // finish() is retryable in the face of I/O errors, so we can't consume here. - if self.delegate.is_none() { - panic!("Encoder has already had finish() called"); - }; - - self.write_final_leftovers()?; - - let writer = self.delegate.take().expect("Writer must be present"); - - Ok(writer) - } - - /// Write any remaining buffered data to the delegate writer. - fn write_final_leftovers(&mut self) -> Result<()> { - if self.delegate.is_none() { - // finish() has already successfully called this, and we are now in drop() with a None - // writer, so just no-op - return Ok(()); - } - - self.write_all_encoded_output()?; - - if self.extra_input_occupied_len > 0 { - let encoded_len = self - .engine - .encode_slice( - &self.extra_input[..self.extra_input_occupied_len], - &mut self.output[..], - ) - .expect("buffer is large enough"); - - self.output_occupied_len = encoded_len; - - self.write_all_encoded_output()?; - - // write succeeded, do not write the encoding of extra again if finish() is retried - self.extra_input_occupied_len = 0; - } - - Ok(()) - } - - /// Write as much of the encoded output to the delegate writer as it will accept, and store the - /// leftovers to be attempted at the next write() call. Updates `self.output_occupied_len`. - /// - /// # Errors - /// - /// Errors from the delegate writer are returned. In the case of an error, - /// `self.output_occupied_len` will not be updated, as errors from `write` are specified to mean - /// that no write took place. - fn write_to_delegate(&mut self, current_output_len: usize) -> Result<()> { - self.panicked = true; - let res = self - .delegate - .as_mut() - .expect("Writer must be present") - .write(&self.output[..current_output_len]); - self.panicked = false; - - res.map(|consumed| { - debug_assert!(consumed <= current_output_len); - - if consumed < current_output_len { - self.output_occupied_len = current_output_len.checked_sub(consumed).unwrap(); - // If we're blocking on I/O, the minor inefficiency of copying bytes to the - // start of the buffer is the least of our concerns... - // TODO Rotate moves more than we need to; copy_within now stable. - self.output.rotate_left(consumed); - } else { - self.output_occupied_len = 0; - } - }) - } - - /// Write all buffered encoded output. If this returns `Ok`, `self.output_occupied_len` is `0`. - /// - /// This is basically write_all for the remaining buffered data but without the undesirable - /// abort-on-`Ok(0)` behavior. - /// - /// # Errors - /// - /// Any error emitted by the delegate writer abort the write loop and is returned, unless it's - /// `Interrupted`, in which case the error is ignored and writes will continue. - fn write_all_encoded_output(&mut self) -> Result<()> { - while self.output_occupied_len > 0 { - let remaining_len = self.output_occupied_len; - match self.write_to_delegate(remaining_len) { - // try again on interrupts ala write_all - Err(ref e) if e.kind() == ErrorKind::Interrupted => {} - // other errors return - Err(e) => return Err(e), - // success no-ops because remaining length is already updated - Ok(_) => {} - }; - } - - debug_assert_eq!(0, self.output_occupied_len); - Ok(()) - } - - /// Unwraps this `EncoderWriter`, returning the base writer it writes base64 encoded output - /// to. - /// - /// Normally this method should not be needed, since `finish()` returns the inner writer if - /// it completes successfully. That will also ensure all data has been flushed, which the - /// `into_inner()` function does *not* do. - /// - /// Calling this method after `finish()` has completed successfully will panic, since the - /// writer has already been returned. - /// - /// This method may be useful if the writer implements additional APIs beyond the `Write` - /// trait. Note that the inner writer might be in an error state or have an incomplete - /// base64 string written to it. - pub fn into_inner(mut self) -> W { - self.delegate - .take() - .expect("Encoder has already had finish() called") - } -} - -impl<'e, E: Engine, W: io::Write> io::Write for EncoderWriter<'e, E, W> { - /// Encode input and then write to the delegate writer. - /// - /// Under non-error circumstances, this returns `Ok` with the value being the number of bytes - /// of `input` consumed. The value may be `0`, which interacts poorly with `write_all`, which - /// interprets `Ok(0)` as an error, despite it being allowed by the contract of `write`. See - /// for more on that. - /// - /// If the previous call to `write` provided more (encoded) data than the delegate writer could - /// accept in a single call to its `write`, the remaining data is buffered. As long as buffered - /// data is present, subsequent calls to `write` will try to write the remaining buffered data - /// to the delegate and return either `Ok(0)` -- and therefore not consume any of `input` -- or - /// an error. - /// - /// # Errors - /// - /// Any errors emitted by the delegate writer are returned. - fn write(&mut self, input: &[u8]) -> Result { - if self.delegate.is_none() { - panic!("Cannot write more after calling finish()"); - } - - if input.is_empty() { - return Ok(0); - } - - // The contract of `Write::write` places some constraints on this implementation: - // - a call to `write()` represents at most one call to a wrapped `Write`, so we can't - // iterate over the input and encode multiple chunks. - // - Errors mean that "no bytes were written to this writer", so we need to reset the - // internal state to what it was before the error occurred - - // before reading any input, write any leftover encoded output from last time - if self.output_occupied_len > 0 { - let current_len = self.output_occupied_len; - return self - .write_to_delegate(current_len) - // did not read any input - .map(|_| 0); - } - - debug_assert_eq!(0, self.output_occupied_len); - - // how many bytes, if any, were read into `extra` to create a triple to encode - let mut extra_input_read_len = 0; - let mut input = input; - - let orig_extra_len = self.extra_input_occupied_len; - - let mut encoded_size = 0; - // always a multiple of MIN_ENCODE_CHUNK_SIZE - let mut max_input_len = MAX_INPUT_LEN; - - // process leftover un-encoded input from last write - if self.extra_input_occupied_len > 0 { - debug_assert!(self.extra_input_occupied_len < 3); - if input.len() + self.extra_input_occupied_len >= MIN_ENCODE_CHUNK_SIZE { - // Fill up `extra`, encode that into `output`, and consume as much of the rest of - // `input` as possible. - // We could write just the encoding of `extra` by itself but then we'd have to - // return after writing only 4 bytes, which is inefficient if the underlying writer - // would make a syscall. - extra_input_read_len = MIN_ENCODE_CHUNK_SIZE - self.extra_input_occupied_len; - debug_assert!(extra_input_read_len > 0); - // overwrite only bytes that weren't already used. If we need to rollback extra_len - // (when the subsequent write errors), the old leading bytes will still be there. - self.extra_input[self.extra_input_occupied_len..MIN_ENCODE_CHUNK_SIZE] - .copy_from_slice(&input[0..extra_input_read_len]); - - let len = self.engine.internal_encode( - &self.extra_input[0..MIN_ENCODE_CHUNK_SIZE], - &mut self.output[..], - ); - debug_assert_eq!(4, len); - - input = &input[extra_input_read_len..]; - - // consider extra to be used up, since we encoded it - self.extra_input_occupied_len = 0; - // don't clobber where we just encoded to - encoded_size = 4; - // and don't read more than can be encoded - max_input_len = MAX_INPUT_LEN - MIN_ENCODE_CHUNK_SIZE; - - // fall through to normal encoding - } else { - // `extra` and `input` are non empty, but `|extra| + |input| < 3`, so there must be - // 1 byte in each. - debug_assert_eq!(1, input.len()); - debug_assert_eq!(1, self.extra_input_occupied_len); - - self.extra_input[self.extra_input_occupied_len] = input[0]; - self.extra_input_occupied_len += 1; - return Ok(1); - }; - } else if input.len() < MIN_ENCODE_CHUNK_SIZE { - // `extra` is empty, and `input` fits inside it - self.extra_input[0..input.len()].copy_from_slice(input); - self.extra_input_occupied_len = input.len(); - return Ok(input.len()); - }; - - // either 0 or 1 complete chunks encoded from extra - debug_assert!(encoded_size == 0 || encoded_size == 4); - debug_assert!( - // didn't encode extra input - MAX_INPUT_LEN == max_input_len - // encoded one triple - || MAX_INPUT_LEN == max_input_len + MIN_ENCODE_CHUNK_SIZE - ); - - // encode complete triples only - let input_complete_chunks_len = input.len() - (input.len() % MIN_ENCODE_CHUNK_SIZE); - let input_chunks_to_encode_len = cmp::min(input_complete_chunks_len, max_input_len); - debug_assert_eq!(0, max_input_len % MIN_ENCODE_CHUNK_SIZE); - debug_assert_eq!(0, input_chunks_to_encode_len % MIN_ENCODE_CHUNK_SIZE); - - encoded_size += self.engine.internal_encode( - &input[..(input_chunks_to_encode_len)], - &mut self.output[encoded_size..], - ); - - // not updating `self.output_occupied_len` here because if the below write fails, it should - // "never take place" -- the buffer contents we encoded are ignored and perhaps retried - // later, if the consumer chooses. - - self.write_to_delegate(encoded_size) - // no matter whether we wrote the full encoded buffer or not, we consumed the same - // input - .map(|_| extra_input_read_len + input_chunks_to_encode_len) - .map_err(|e| { - // in case we filled and encoded `extra`, reset extra_len - self.extra_input_occupied_len = orig_extra_len; - - e - }) - } - - /// Because this is usually treated as OK to call multiple times, it will *not* flush any - /// incomplete chunks of input or write padding. - /// # Errors - /// - /// The first error that is not of [`ErrorKind::Interrupted`] will be returned. - fn flush(&mut self) -> Result<()> { - self.write_all_encoded_output()?; - self.delegate - .as_mut() - .expect("Writer must be present") - .flush() - } -} - -impl<'e, E: Engine, W: io::Write> Drop for EncoderWriter<'e, E, W> { - fn drop(&mut self) { - if !self.panicked { - // like `BufWriter`, ignore errors during drop - let _ = self.write_final_leftovers(); - } - } -} diff --git a/vendor/base64/src/write/encoder_string_writer.rs b/vendor/base64/src/write/encoder_string_writer.rs deleted file mode 100644 index 9c02bcde84fb4d..00000000000000 --- a/vendor/base64/src/write/encoder_string_writer.rs +++ /dev/null @@ -1,207 +0,0 @@ -use super::encoder::EncoderWriter; -use crate::engine::Engine; -use std::io; - -/// A `Write` implementation that base64-encodes data using the provided config and accumulates the -/// resulting base64 utf8 `&str` in a [StrConsumer] implementation (typically `String`), which is -/// then exposed via `into_inner()`. -/// -/// # Examples -/// -/// Buffer base64 in a new String: -/// -/// ``` -/// use std::io::Write; -/// use base64::engine::general_purpose; -/// -/// let mut enc = base64::write::EncoderStringWriter::new(&general_purpose::STANDARD); -/// -/// enc.write_all(b"asdf").unwrap(); -/// -/// // get the resulting String -/// let b64_string = enc.into_inner(); -/// -/// assert_eq!("YXNkZg==", &b64_string); -/// ``` -/// -/// Or, append to an existing `String`, which implements `StrConsumer`: -/// -/// ``` -/// use std::io::Write; -/// use base64::engine::general_purpose; -/// -/// let mut buf = String::from("base64: "); -/// -/// let mut enc = base64::write::EncoderStringWriter::from_consumer( -/// &mut buf, -/// &general_purpose::STANDARD); -/// -/// enc.write_all(b"asdf").unwrap(); -/// -/// // release the &mut reference on buf -/// let _ = enc.into_inner(); -/// -/// assert_eq!("base64: YXNkZg==", &buf); -/// ``` -/// -/// # Performance -/// -/// Because it has to validate that the base64 is UTF-8, it is about 80% as fast as writing plain -/// bytes to a `io::Write`. -pub struct EncoderStringWriter<'e, E: Engine, S: StrConsumer> { - encoder: EncoderWriter<'e, E, Utf8SingleCodeUnitWriter>, -} - -impl<'e, E: Engine, S: StrConsumer> EncoderStringWriter<'e, E, S> { - /// Create a EncoderStringWriter that will append to the provided `StrConsumer`. - pub fn from_consumer(str_consumer: S, engine: &'e E) -> Self { - EncoderStringWriter { - encoder: EncoderWriter::new(Utf8SingleCodeUnitWriter { str_consumer }, engine), - } - } - - /// Encode all remaining buffered data, including any trailing incomplete input triples and - /// associated padding. - /// - /// Returns the base64-encoded form of the accumulated written data. - pub fn into_inner(mut self) -> S { - self.encoder - .finish() - .expect("Writing to a consumer should never fail") - .str_consumer - } -} - -impl<'e, E: Engine> EncoderStringWriter<'e, E, String> { - /// Create a EncoderStringWriter that will encode into a new `String` with the provided config. - pub fn new(engine: &'e E) -> Self { - EncoderStringWriter::from_consumer(String::new(), engine) - } -} - -impl<'e, E: Engine, S: StrConsumer> io::Write for EncoderStringWriter<'e, E, S> { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.encoder.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.encoder.flush() - } -} - -/// An abstraction around consuming `str`s produced by base64 encoding. -pub trait StrConsumer { - /// Consume the base64 encoded data in `buf` - fn consume(&mut self, buf: &str); -} - -/// As for io::Write, `StrConsumer` is implemented automatically for `&mut S`. -impl StrConsumer for &mut S { - fn consume(&mut self, buf: &str) { - (**self).consume(buf); - } -} - -/// Pushes the str onto the end of the String -impl StrConsumer for String { - fn consume(&mut self, buf: &str) { - self.push_str(buf); - } -} - -/// A `Write` that only can handle bytes that are valid single-byte UTF-8 code units. -/// -/// This is safe because we only use it when writing base64, which is always valid UTF-8. -struct Utf8SingleCodeUnitWriter { - str_consumer: S, -} - -impl io::Write for Utf8SingleCodeUnitWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - // Because we expect all input to be valid utf-8 individual bytes, we can encode any buffer - // length - let s = std::str::from_utf8(buf).expect("Input must be valid UTF-8"); - - self.str_consumer.consume(s); - - Ok(buf.len()) - } - - fn flush(&mut self) -> io::Result<()> { - // no op - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use crate::{ - engine::Engine, tests::random_engine, write::encoder_string_writer::EncoderStringWriter, - }; - use rand::Rng; - use std::cmp; - use std::io::Write; - - #[test] - fn every_possible_split_of_input() { - let mut rng = rand::thread_rng(); - let mut orig_data = Vec::::new(); - let mut normal_encoded = String::new(); - - let size = 5_000; - - for i in 0..size { - orig_data.clear(); - normal_encoded.clear(); - - orig_data.resize(size, 0); - rng.fill(&mut orig_data[..]); - - let engine = random_engine(&mut rng); - engine.encode_string(&orig_data, &mut normal_encoded); - - let mut stream_encoder = EncoderStringWriter::new(&engine); - // Write the first i bytes, then the rest - stream_encoder.write_all(&orig_data[0..i]).unwrap(); - stream_encoder.write_all(&orig_data[i..]).unwrap(); - - let stream_encoded = stream_encoder.into_inner(); - - assert_eq!(normal_encoded, stream_encoded); - } - } - #[test] - fn incremental_writes() { - let mut rng = rand::thread_rng(); - let mut orig_data = Vec::::new(); - let mut normal_encoded = String::new(); - - let size = 5_000; - - for _ in 0..size { - orig_data.clear(); - normal_encoded.clear(); - - orig_data.resize(size, 0); - rng.fill(&mut orig_data[..]); - - let engine = random_engine(&mut rng); - engine.encode_string(&orig_data, &mut normal_encoded); - - let mut stream_encoder = EncoderStringWriter::new(&engine); - // write small nibbles of data - let mut offset = 0; - while offset < size { - let nibble_size = cmp::min(rng.gen_range(0..=64), size - offset); - let len = stream_encoder - .write(&orig_data[offset..offset + nibble_size]) - .unwrap(); - offset += len; - } - - let stream_encoded = stream_encoder.into_inner(); - - assert_eq!(normal_encoded, stream_encoded); - } - } -} diff --git a/vendor/base64/src/write/encoder_tests.rs b/vendor/base64/src/write/encoder_tests.rs deleted file mode 100644 index 1f1a1650a6b47d..00000000000000 --- a/vendor/base64/src/write/encoder_tests.rs +++ /dev/null @@ -1,554 +0,0 @@ -use std::io::{Cursor, Write}; -use std::{cmp, io, str}; - -use rand::Rng; - -use crate::{ - alphabet::{STANDARD, URL_SAFE}, - engine::{ - general_purpose::{GeneralPurpose, NO_PAD, PAD}, - Engine, - }, - tests::random_engine, -}; - -use super::EncoderWriter; - -const URL_SAFE_ENGINE: GeneralPurpose = GeneralPurpose::new(&URL_SAFE, PAD); -const NO_PAD_ENGINE: GeneralPurpose = GeneralPurpose::new(&STANDARD, NO_PAD); - -#[test] -fn encode_three_bytes() { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); - - let sz = enc.write(b"abc").unwrap(); - assert_eq!(sz, 3); - } - assert_eq!(&c.get_ref()[..], URL_SAFE_ENGINE.encode("abc").as_bytes()); -} - -#[test] -fn encode_nine_bytes_two_writes() { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); - - let sz = enc.write(b"abcdef").unwrap(); - assert_eq!(sz, 6); - let sz = enc.write(b"ghi").unwrap(); - assert_eq!(sz, 3); - } - assert_eq!( - &c.get_ref()[..], - URL_SAFE_ENGINE.encode("abcdefghi").as_bytes() - ); -} - -#[test] -fn encode_one_then_two_bytes() { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); - - let sz = enc.write(b"a").unwrap(); - assert_eq!(sz, 1); - let sz = enc.write(b"bc").unwrap(); - assert_eq!(sz, 2); - } - assert_eq!(&c.get_ref()[..], URL_SAFE_ENGINE.encode("abc").as_bytes()); -} - -#[test] -fn encode_one_then_five_bytes() { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); - - let sz = enc.write(b"a").unwrap(); - assert_eq!(sz, 1); - let sz = enc.write(b"bcdef").unwrap(); - assert_eq!(sz, 5); - } - assert_eq!( - &c.get_ref()[..], - URL_SAFE_ENGINE.encode("abcdef").as_bytes() - ); -} - -#[test] -fn encode_1_2_3_bytes() { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); - - let sz = enc.write(b"a").unwrap(); - assert_eq!(sz, 1); - let sz = enc.write(b"bc").unwrap(); - assert_eq!(sz, 2); - let sz = enc.write(b"def").unwrap(); - assert_eq!(sz, 3); - } - assert_eq!( - &c.get_ref()[..], - URL_SAFE_ENGINE.encode("abcdef").as_bytes() - ); -} - -#[test] -fn encode_with_padding() { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); - - enc.write_all(b"abcd").unwrap(); - - enc.flush().unwrap(); - } - assert_eq!(&c.get_ref()[..], URL_SAFE_ENGINE.encode("abcd").as_bytes()); -} - -#[test] -fn encode_with_padding_multiple_writes() { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); - - assert_eq!(1, enc.write(b"a").unwrap()); - assert_eq!(2, enc.write(b"bc").unwrap()); - assert_eq!(3, enc.write(b"def").unwrap()); - assert_eq!(1, enc.write(b"g").unwrap()); - - enc.flush().unwrap(); - } - assert_eq!( - &c.get_ref()[..], - URL_SAFE_ENGINE.encode("abcdefg").as_bytes() - ); -} - -#[test] -fn finish_writes_extra_byte() { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE); - - assert_eq!(6, enc.write(b"abcdef").unwrap()); - - // will be in extra - assert_eq!(1, enc.write(b"g").unwrap()); - - // 1 trailing byte = 2 encoded chars - let _ = enc.finish().unwrap(); - } - assert_eq!( - &c.get_ref()[..], - URL_SAFE_ENGINE.encode("abcdefg").as_bytes() - ); -} - -#[test] -fn write_partial_chunk_encodes_partial_chunk() { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); - - // nothing encoded yet - assert_eq!(2, enc.write(b"ab").unwrap()); - // encoded here - let _ = enc.finish().unwrap(); - } - assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("ab").as_bytes()); - assert_eq!(3, c.get_ref().len()); -} - -#[test] -fn write_1_chunk_encodes_complete_chunk() { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); - - assert_eq!(3, enc.write(b"abc").unwrap()); - let _ = enc.finish().unwrap(); - } - assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes()); - assert_eq!(4, c.get_ref().len()); -} - -#[test] -fn write_1_chunk_and_partial_encodes_only_complete_chunk() { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); - - // "d" not consumed since it's not a full chunk - assert_eq!(3, enc.write(b"abcd").unwrap()); - let _ = enc.finish().unwrap(); - } - assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes()); - assert_eq!(4, c.get_ref().len()); -} - -#[test] -fn write_2_partials_to_exactly_complete_chunk_encodes_complete_chunk() { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); - - assert_eq!(1, enc.write(b"a").unwrap()); - assert_eq!(2, enc.write(b"bc").unwrap()); - let _ = enc.finish().unwrap(); - } - assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes()); - assert_eq!(4, c.get_ref().len()); -} - -#[test] -fn write_partial_then_enough_to_complete_chunk_but_not_complete_another_chunk_encodes_complete_chunk_without_consuming_remaining( -) { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); - - assert_eq!(1, enc.write(b"a").unwrap()); - // doesn't consume "d" - assert_eq!(2, enc.write(b"bcd").unwrap()); - let _ = enc.finish().unwrap(); - } - assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes()); - assert_eq!(4, c.get_ref().len()); -} - -#[test] -fn write_partial_then_enough_to_complete_chunk_and_another_chunk_encodes_complete_chunks() { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); - - assert_eq!(1, enc.write(b"a").unwrap()); - // completes partial chunk, and another chunk - assert_eq!(5, enc.write(b"bcdef").unwrap()); - let _ = enc.finish().unwrap(); - } - assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abcdef").as_bytes()); - assert_eq!(8, c.get_ref().len()); -} - -#[test] -fn write_partial_then_enough_to_complete_chunk_and_another_chunk_and_another_partial_chunk_encodes_only_complete_chunks( -) { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); - - assert_eq!(1, enc.write(b"a").unwrap()); - // completes partial chunk, and another chunk, with one more partial chunk that's not - // consumed - assert_eq!(5, enc.write(b"bcdefe").unwrap()); - let _ = enc.finish().unwrap(); - } - assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abcdef").as_bytes()); - assert_eq!(8, c.get_ref().len()); -} - -#[test] -fn drop_calls_finish_for_you() { - let mut c = Cursor::new(Vec::new()); - { - let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE); - assert_eq!(1, enc.write(b"a").unwrap()); - } - assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("a").as_bytes()); - assert_eq!(2, c.get_ref().len()); -} - -#[test] -fn every_possible_split_of_input() { - let mut rng = rand::thread_rng(); - let mut orig_data = Vec::::new(); - let mut stream_encoded = Vec::::new(); - let mut normal_encoded = String::new(); - - let size = 5_000; - - for i in 0..size { - orig_data.clear(); - stream_encoded.clear(); - normal_encoded.clear(); - - for _ in 0..size { - orig_data.push(rng.gen()); - } - - let engine = random_engine(&mut rng); - engine.encode_string(&orig_data, &mut normal_encoded); - - { - let mut stream_encoder = EncoderWriter::new(&mut stream_encoded, &engine); - // Write the first i bytes, then the rest - stream_encoder.write_all(&orig_data[0..i]).unwrap(); - stream_encoder.write_all(&orig_data[i..]).unwrap(); - } - - assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap()); - } -} - -#[test] -fn encode_random_config_matches_normal_encode_reasonable_input_len() { - // choose up to 2 * buf size, so ~half the time it'll use a full buffer - do_encode_random_config_matches_normal_encode(super::encoder::BUF_SIZE * 2); -} - -#[test] -fn encode_random_config_matches_normal_encode_tiny_input_len() { - do_encode_random_config_matches_normal_encode(10); -} - -#[test] -fn retrying_writes_that_error_with_interrupted_works() { - let mut rng = rand::thread_rng(); - let mut orig_data = Vec::::new(); - let mut stream_encoded = Vec::::new(); - let mut normal_encoded = String::new(); - - for _ in 0..1_000 { - orig_data.clear(); - stream_encoded.clear(); - normal_encoded.clear(); - - let orig_len: usize = rng.gen_range(100..20_000); - for _ in 0..orig_len { - orig_data.push(rng.gen()); - } - - // encode the normal way - let engine = random_engine(&mut rng); - engine.encode_string(&orig_data, &mut normal_encoded); - - // encode via the stream encoder - { - let mut interrupt_rng = rand::thread_rng(); - let mut interrupting_writer = InterruptingWriter { - w: &mut stream_encoded, - rng: &mut interrupt_rng, - fraction: 0.8, - }; - - let mut stream_encoder = EncoderWriter::new(&mut interrupting_writer, &engine); - let mut bytes_consumed = 0; - while bytes_consumed < orig_len { - // use short inputs since we want to use `extra` a lot as that's what needs rollback - // when errors occur - let input_len: usize = cmp::min(rng.gen_range(0..10), orig_len - bytes_consumed); - - retry_interrupted_write_all( - &mut stream_encoder, - &orig_data[bytes_consumed..bytes_consumed + input_len], - ) - .unwrap(); - - bytes_consumed += input_len; - } - - loop { - let res = stream_encoder.finish(); - match res { - Ok(_) => break, - Err(e) => match e.kind() { - io::ErrorKind::Interrupted => continue, - _ => panic!("{:?}", e), // bail - }, - } - } - - assert_eq!(orig_len, bytes_consumed); - } - - assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap()); - } -} - -#[test] -fn writes_that_only_write_part_of_input_and_sometimes_interrupt_produce_correct_encoded_data() { - let mut rng = rand::thread_rng(); - let mut orig_data = Vec::::new(); - let mut stream_encoded = Vec::::new(); - let mut normal_encoded = String::new(); - - for _ in 0..1_000 { - orig_data.clear(); - stream_encoded.clear(); - normal_encoded.clear(); - - let orig_len: usize = rng.gen_range(100..20_000); - for _ in 0..orig_len { - orig_data.push(rng.gen()); - } - - // encode the normal way - let engine = random_engine(&mut rng); - engine.encode_string(&orig_data, &mut normal_encoded); - - // encode via the stream encoder - { - let mut partial_rng = rand::thread_rng(); - let mut partial_writer = PartialInterruptingWriter { - w: &mut stream_encoded, - rng: &mut partial_rng, - full_input_fraction: 0.1, - no_interrupt_fraction: 0.1, - }; - - let mut stream_encoder = EncoderWriter::new(&mut partial_writer, &engine); - let mut bytes_consumed = 0; - while bytes_consumed < orig_len { - // use at most medium-length inputs to exercise retry logic more aggressively - let input_len: usize = cmp::min(rng.gen_range(0..100), orig_len - bytes_consumed); - - let res = - stream_encoder.write(&orig_data[bytes_consumed..bytes_consumed + input_len]); - - // retry on interrupt - match res { - Ok(len) => bytes_consumed += len, - Err(e) => match e.kind() { - io::ErrorKind::Interrupted => continue, - _ => { - panic!("should not see other errors"); - } - }, - } - } - - let _ = stream_encoder.finish().unwrap(); - - assert_eq!(orig_len, bytes_consumed); - } - - assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap()); - } -} - -/// Retry writes until all the data is written or an error that isn't Interrupted is returned. -fn retry_interrupted_write_all(w: &mut W, buf: &[u8]) -> io::Result<()> { - let mut bytes_consumed = 0; - - while bytes_consumed < buf.len() { - let res = w.write(&buf[bytes_consumed..]); - - match res { - Ok(len) => bytes_consumed += len, - Err(e) => match e.kind() { - io::ErrorKind::Interrupted => continue, - _ => return Err(e), - }, - } - } - - Ok(()) -} - -fn do_encode_random_config_matches_normal_encode(max_input_len: usize) { - let mut rng = rand::thread_rng(); - let mut orig_data = Vec::::new(); - let mut stream_encoded = Vec::::new(); - let mut normal_encoded = String::new(); - - for _ in 0..1_000 { - orig_data.clear(); - stream_encoded.clear(); - normal_encoded.clear(); - - let orig_len: usize = rng.gen_range(100..20_000); - for _ in 0..orig_len { - orig_data.push(rng.gen()); - } - - // encode the normal way - let engine = random_engine(&mut rng); - engine.encode_string(&orig_data, &mut normal_encoded); - - // encode via the stream encoder - { - let mut stream_encoder = EncoderWriter::new(&mut stream_encoded, &engine); - let mut bytes_consumed = 0; - while bytes_consumed < orig_len { - let input_len: usize = - cmp::min(rng.gen_range(0..max_input_len), orig_len - bytes_consumed); - - // write a little bit of the data - stream_encoder - .write_all(&orig_data[bytes_consumed..bytes_consumed + input_len]) - .unwrap(); - - bytes_consumed += input_len; - } - - let _ = stream_encoder.finish().unwrap(); - - assert_eq!(orig_len, bytes_consumed); - } - - assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap()); - } -} - -/// A `Write` implementation that returns Interrupted some fraction of the time, randomly. -struct InterruptingWriter<'a, W: 'a + Write, R: 'a + Rng> { - w: &'a mut W, - rng: &'a mut R, - /// In [0, 1]. If a random number in [0, 1] is `<= threshold`, `Write` methods will return - /// an `Interrupted` error - fraction: f64, -} - -impl<'a, W: Write, R: Rng> Write for InterruptingWriter<'a, W, R> { - fn write(&mut self, buf: &[u8]) -> io::Result { - if self.rng.gen_range(0.0..1.0) <= self.fraction { - return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted")); - } - - self.w.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - if self.rng.gen_range(0.0..1.0) <= self.fraction { - return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted")); - } - - self.w.flush() - } -} - -/// A `Write` implementation that sometimes will only write part of its input. -struct PartialInterruptingWriter<'a, W: 'a + Write, R: 'a + Rng> { - w: &'a mut W, - rng: &'a mut R, - /// In [0, 1]. If a random number in [0, 1] is `<= threshold`, `write()` will write all its - /// input. Otherwise, it will write a random substring - full_input_fraction: f64, - no_interrupt_fraction: f64, -} - -impl<'a, W: Write, R: Rng> Write for PartialInterruptingWriter<'a, W, R> { - fn write(&mut self, buf: &[u8]) -> io::Result { - if self.rng.gen_range(0.0..1.0) > self.no_interrupt_fraction { - return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted")); - } - - if self.rng.gen_range(0.0..1.0) <= self.full_input_fraction || buf.is_empty() { - // pass through the buf untouched - self.w.write(buf) - } else { - // only use a prefix of it - self.w - .write(&buf[0..(self.rng.gen_range(0..(buf.len() - 1)))]) - } - } - - fn flush(&mut self) -> io::Result<()> { - self.w.flush() - } -} diff --git a/vendor/base64/src/write/mod.rs b/vendor/base64/src/write/mod.rs deleted file mode 100644 index 2a617db9de7b2f..00000000000000 --- a/vendor/base64/src/write/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -//! Implementations of `io::Write` to transparently handle base64. -mod encoder; -mod encoder_string_writer; - -pub use self::{ - encoder::EncoderWriter, - encoder_string_writer::{EncoderStringWriter, StrConsumer}, -}; - -#[cfg(test)] -mod encoder_tests; diff --git a/vendor/base64/tests/encode.rs b/vendor/base64/tests/encode.rs deleted file mode 100644 index 9d6944741aea4c..00000000000000 --- a/vendor/base64/tests/encode.rs +++ /dev/null @@ -1,77 +0,0 @@ -use base64::{ - alphabet::URL_SAFE, engine::general_purpose::PAD, engine::general_purpose::STANDARD, *, -}; - -fn compare_encode(expected: &str, target: &[u8]) { - assert_eq!(expected, STANDARD.encode(target)); -} - -#[test] -fn encode_all_ascii() { - let ascii: Vec = (0..=127).collect(); - - compare_encode( - "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7P\ - D0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn8\ - =", - &ascii, - ); -} - -#[test] -fn encode_all_bytes() { - let bytes: Vec = (0..=255).collect(); - - compare_encode( - "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7P\ - D0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn\ - +AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6\ - /wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/w==", - &bytes, - ); -} - -#[test] -fn encode_all_bytes_url() { - let bytes: Vec = (0..=255).collect(); - - assert_eq!( - "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0\ - -P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn\ - -AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq\ - -wsbKztLW2t7i5uru8vb6_wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t_g4eLj5OXm5-jp6uvs7e7v8PHy\ - 8_T19vf4-fr7_P3-_w==", - &engine::GeneralPurpose::new(&URL_SAFE, PAD).encode(bytes) - ); -} - -#[test] -fn encoded_len_unpadded() { - assert_eq!(0, encoded_len(0, false).unwrap()); - assert_eq!(2, encoded_len(1, false).unwrap()); - assert_eq!(3, encoded_len(2, false).unwrap()); - assert_eq!(4, encoded_len(3, false).unwrap()); - assert_eq!(6, encoded_len(4, false).unwrap()); - assert_eq!(7, encoded_len(5, false).unwrap()); - assert_eq!(8, encoded_len(6, false).unwrap()); - assert_eq!(10, encoded_len(7, false).unwrap()); -} - -#[test] -fn encoded_len_padded() { - assert_eq!(0, encoded_len(0, true).unwrap()); - assert_eq!(4, encoded_len(1, true).unwrap()); - assert_eq!(4, encoded_len(2, true).unwrap()); - assert_eq!(4, encoded_len(3, true).unwrap()); - assert_eq!(8, encoded_len(4, true).unwrap()); - assert_eq!(8, encoded_len(5, true).unwrap()); - assert_eq!(8, encoded_len(6, true).unwrap()); - assert_eq!(12, encoded_len(7, true).unwrap()); -} -#[test] -fn encoded_len_overflow() { - let max_size = usize::MAX / 4 * 3 + 2; - assert_eq!(2, max_size % 3); - assert_eq!(Some(usize::MAX), encoded_len(max_size, false)); - assert_eq!(None, encoded_len(max_size + 1, false)); -} diff --git a/vendor/base64/tests/tests.rs b/vendor/base64/tests/tests.rs deleted file mode 100644 index eceff40d6a33be..00000000000000 --- a/vendor/base64/tests/tests.rs +++ /dev/null @@ -1,161 +0,0 @@ -use rand::{Rng, SeedableRng}; - -use base64::engine::{general_purpose::STANDARD, Engine}; -use base64::*; - -use base64::engine::general_purpose::{GeneralPurpose, NO_PAD}; - -// generate random contents of the specified length and test encode/decode roundtrip -fn roundtrip_random( - byte_buf: &mut Vec, - str_buf: &mut String, - engine: &E, - byte_len: usize, - approx_values_per_byte: u8, - max_rounds: u64, -) { - // let the short ones be short but don't let it get too crazy large - let num_rounds = calculate_number_of_rounds(byte_len, approx_values_per_byte, max_rounds); - let mut r = rand::rngs::SmallRng::from_entropy(); - let mut decode_buf = Vec::new(); - - for _ in 0..num_rounds { - byte_buf.clear(); - str_buf.clear(); - decode_buf.clear(); - while byte_buf.len() < byte_len { - byte_buf.push(r.gen::()); - } - - engine.encode_string(&byte_buf, str_buf); - engine.decode_vec(&str_buf, &mut decode_buf).unwrap(); - - assert_eq!(byte_buf, &decode_buf); - } -} - -fn calculate_number_of_rounds(byte_len: usize, approx_values_per_byte: u8, max: u64) -> u64 { - // don't overflow - let mut prod = approx_values_per_byte as u64; - - for _ in 0..byte_len { - if prod > max { - return max; - } - - prod = prod.saturating_mul(prod); - } - - prod -} - -#[test] -fn roundtrip_random_short_standard() { - let mut byte_buf: Vec = Vec::new(); - let mut str_buf = String::new(); - - for input_len in 0..40 { - roundtrip_random(&mut byte_buf, &mut str_buf, &STANDARD, input_len, 4, 10000); - } -} - -#[test] -fn roundtrip_random_with_fast_loop_standard() { - let mut byte_buf: Vec = Vec::new(); - let mut str_buf = String::new(); - - for input_len in 40..100 { - roundtrip_random(&mut byte_buf, &mut str_buf, &STANDARD, input_len, 4, 1000); - } -} - -#[test] -fn roundtrip_random_short_no_padding() { - let mut byte_buf: Vec = Vec::new(); - let mut str_buf = String::new(); - - let engine = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD); - for input_len in 0..40 { - roundtrip_random(&mut byte_buf, &mut str_buf, &engine, input_len, 4, 10000); - } -} - -#[test] -fn roundtrip_random_no_padding() { - let mut byte_buf: Vec = Vec::new(); - let mut str_buf = String::new(); - - let engine = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD); - - for input_len in 40..100 { - roundtrip_random(&mut byte_buf, &mut str_buf, &engine, input_len, 4, 1000); - } -} - -#[test] -fn roundtrip_decode_trailing_10_bytes() { - // This is a special case because we decode 8 byte blocks of input at a time as much as we can, - // ideally unrolled to 32 bytes at a time, in stages 1 and 2. Since we also write a u64's worth - // of bytes (8) to the output, we always write 2 garbage bytes that then will be overwritten by - // the NEXT block. However, if the next block only contains 2 bytes, it will decode to 1 byte, - // and therefore be too short to cover up the trailing 2 garbage bytes. Thus, we have stage 3 - // to handle that case. - - for num_quads in 0..25 { - let mut s: String = "ABCD".repeat(num_quads); - s.push_str("EFGHIJKLZg"); - - let engine = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD); - let decoded = engine.decode(&s).unwrap(); - assert_eq!(num_quads * 3 + 7, decoded.len()); - - assert_eq!(s, engine.encode(&decoded)); - } -} - -#[test] -fn display_wrapper_matches_normal_encode() { - let mut bytes = Vec::::with_capacity(256); - - for i in 0..255 { - bytes.push(i); - } - bytes.push(255); - - assert_eq!( - STANDARD.encode(&bytes), - format!("{}", display::Base64Display::new(&bytes, &STANDARD)) - ); -} - -#[test] -fn encode_engine_slice_error_when_buffer_too_small() { - for num_triples in 1..100 { - let input = "AAA".repeat(num_triples); - let mut vec = vec![0; (num_triples - 1) * 4]; - assert_eq!( - EncodeSliceError::OutputSliceTooSmall, - STANDARD.encode_slice(&input, &mut vec).unwrap_err() - ); - vec.push(0); - assert_eq!( - EncodeSliceError::OutputSliceTooSmall, - STANDARD.encode_slice(&input, &mut vec).unwrap_err() - ); - vec.push(0); - assert_eq!( - EncodeSliceError::OutputSliceTooSmall, - STANDARD.encode_slice(&input, &mut vec).unwrap_err() - ); - vec.push(0); - assert_eq!( - EncodeSliceError::OutputSliceTooSmall, - STANDARD.encode_slice(&input, &mut vec).unwrap_err() - ); - vec.push(0); - assert_eq!( - num_triples * 4, - STANDARD.encode_slice(&input, &mut vec).unwrap() - ); - } -} diff --git a/vendor/bindgen/.cargo-checksum.json b/vendor/bindgen/.cargo-checksum.json deleted file mode 100644 index 3fd0aa70ed81ad..00000000000000 --- a/vendor/bindgen/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"b5c3dc198affdf569150b54e8b50a92f1f8dbc4127f07bcd2728f55570394a15","Cargo.lock":"775138b42c9ceb7d012985ea43cb12cd32c325c9c5af2edd5d9d4913d7a44a07","Cargo.toml":"f72dfce465f8e986f51506cda6090d754057b55318712c6c13ef96230d9b1a42","Cargo.toml.orig":"26d7094dff93b9e475855e6e97ccc03f77844e5198eb933ce873049af9b6bca7","LICENSE":"c23953d9deb0a3312dbeaf6c128a657f3591acee45067612fa68405eaa4525db","README.md":"79b56b76b0e4c133c704f21776329b02228279a0d5b90f5aa401e51fb59b43bb","build.rs":"f7a10af0a21662e104e0058da7e3471a20be328eef6c7c41988525be90fdfe92","callbacks.rs":"de8bbe96753b6c5107984f2d26f13abf8fd2dd914ca688a989bf90c8e63c435f","clang.rs":"e991300ce9b1f0b9fb4a0b4bd32e899b6cfa546f034858f08bf57678b3d7044c","codegen/bitfield_unit.rs":"bcec32a8289eb8643bdc7aae0636aecac9f28e83895ebd330a4b0d1d3468bb4c","codegen/bitfield_unit_raw_ref_macros.rs":"cd9a02db7a0f0d2db79dc6b54c64eec2b438d2124928127266a5e30cf451696e","codegen/bitfield_unit_tests.rs":"9915cb19bf37fc1013fc72753bae153f7c249280daec94b25fb461f9936dffa4","codegen/dyngen.rs":"1b42f7fa9fb65ff2fa898b289a3b934d7b21c5ba6c1b3e37aa97f0faa87769a0","codegen/error.rs":"67680c4d171d63848d9eb4ddd5f100be7463564e8c1c9203fefc0e61a19dfdec","codegen/helpers.rs":"2f2873a8bf98a7583c30d42758f0c229ae7d7a6ee71b89bb33a79ffa73ab4ab2","codegen/impl_debug.rs":"dff9ca17a9397f327841f6321056cc0bdffe1f52106ec08879bcc74c22d4f383","codegen/impl_partialeq.rs":"60623e75c079ccadc4b928acd2cf78449db84591b720e242fc10ee1417678981","codegen/mod.rs":"a865463a58ef01a49079118f648b8e139eb63790b7d511d03588e0ac52bf35ee","codegen/postprocessing/merge_extern_blocks.rs":"3e244fe62abcadcb6dae069c37d21220f5351dc7b8c33c5576a5193731933c4c","codegen/postprocessing/mod.rs":"160a6d6701cabf2514e23570df1bd1b648c909cc27b7c583f21d98fe0c16722e","codegen/postprocessing/sort_semantically.rs":"5099e8fc134a92cb72b585bd95854a52ff81e2f5307c3b83617d83e7408302ee","codegen/serialize.rs":"d57eb31ba0fda825241f886336279573050b396e52badf3886e0bc76a15110ad","codegen/struct_layout.rs":"78b38cc064491d854516dcf36e268b45e549d2bd3150ea4cb390529f656b2132","deps.rs":"297dcc2be53af1a3ea4f77e16902a641f3e6f0baad09c06a6ea26050a0281c18","diagnostics.rs":"9c80043ac9fa8f683019577f311853a0d5929e41a95b3255736f80105914cdfa","extra_assertions.rs":"1596b7e7f031714dc699ebda135e795f1ecfc971ce9de6861a3c00e77fcef011","features.rs":"ad17b96bf6b97cecb33d2b4710261341b1e3828263531708157d2b206af65c77","ir/analysis/derive.rs":"f0bd1b6362ed9a8e8bc6933bf1af29cceabde84015dc34d76f34d3e4c56a4103","ir/analysis/has_destructor.rs":"64dc142ff8c56db94b464f0f03ecd25317c8c1d6cd2a7304d2d3a0ff0a0db890","ir/analysis/has_float.rs":"9fb88d05c5920e9000e5cb6e87775c0d1042a6b11205577e281ba281e6e6acdb","ir/analysis/has_type_param_in_array.rs":"0975d1ce43bcba97eb303635118e74d494d46ac67cf5ee53faf6f6584a556cef","ir/analysis/has_vtable.rs":"3e1a807feccb55d6148e81512417b56cd1d70a70783508b65a3ff2abde461d2b","ir/analysis/mod.rs":"93edca96d765dfa19ac231198027b0ba48c623502a8be1dbf799a241cd6b304b","ir/analysis/sizedness.rs":"0b78e70737e038ebdee2c3d195194a060c9287000b9059ded0686728a89b4ff2","ir/analysis/template_params.rs":"a8dfd3e02b1745a5b7c6faa16309bd0b8a88d76b7fedca27c322782cc9e77177","ir/annotations.rs":"8397ced62808fe99dfdde35792cd8b2389e7828d752a6c8aa3a70c1e14595e11","ir/comment.rs":"57863204d329ae82872ecc4829cc299969ff07da3a32a4a13d7d84429f55b84d","ir/comp.rs":"6dce8c17967a2219ccf8ac2bf11ca97a046ca28df258f47e1f2cc8c34e2237ed","ir/context.rs":"59b73ef695f98adda0b54828a820739121d0f2c869f06b75a0cbc1a84c3ca887","ir/derive.rs":"09860cffec0ebecce31da0c6c9ea0cf9a0d4784262ff4eb16ea459c0d0782ac9","ir/dot.rs":"8b8f6dd13e662fcb4114949025cb43467b34fa4998a3371c101db5dd82688f44","ir/enum_ty.rs":"5d7ae2e3de172d9812425e8cc6e30d559b0743620b3b09f7d72f3b05a7e1ce98","ir/function.rs":"a8296565624f1be38eaa01cea638e39eb1e2ee9de6859fc63a070f5a190c4c8c","ir/int.rs":"1bf1e4d87eca13ee2fc38ff4d56c266f303f188796f5c0d290f53162798d2d01","ir/item.rs":"d11623c01e1a9128063be4e4bde5c459a0a2f10fd9e323fced3cd4bc8d394b6c","ir/item_kind.rs":"799fab994b5ed35045786a68003c2c12b6601cf3b07e8ccc0b9acd6f921217e0","ir/layout.rs":"5b2958eb3d5e5d96bd85ce02925d936e89d3147c62cd225e3a0ae7f042b74fca","ir/mod.rs":"a3b98b1732111a980a795c72eaf1e09101e842ef2de76b4f2d4a7857f8d4cee4","ir/module.rs":"617867617ebd7e56157a9ba057441ce11a33c25138a1da64646f44ccaae7c762","ir/objc.rs":"092c7f32cec4191aa6235e4554420ab2053e0c7fec5ece016a7ed303763e8547","ir/template.rs":"1114c0924323f8b30bb32dbb3f6730dd7f5bc1f0771ad5099738e8e57111c07d","ir/traversal.rs":"3ebde94ead0fe69d51541ab61d700c7c1f6382574e4c110b8c7fe3e2c6218f19","ir/ty.rs":"e2516217fa439e65ef38dce2acd712a89879045e21e64b6f317a81b0d22927dc","ir/var.rs":"2a94decd3adfdccd3bd0015b460d180838e3c92b122b632eed44032b80cad120","lib.rs":"fe023071fb5e39938173ee7e160a4c7f6b735afc71c8d164c397842d69b055de","log_stubs.rs":"a636d59af2fd3745c2e416e1ab8a1e1a3888ea84cd4657a321ce22f15e0c5a87","options/as_args.rs":"a1a5e7f0dde82590371fc1a9ea5fde7f3e2252530ca74d98bb49a8ce06cc864f","options/cli.rs":"ce154bf2b5dfb6771e90526424b94169f6b0ac3a4ec035772a835827795eaaae","options/helpers.rs":"f4a7681e29b2dcc3be9249478c499d685b9e29d4f4ca4ae8bff7a91668cd8f15","options/mod.rs":"beaaccfdf79a309bc272fc72a9278568d9e0f4dff1edd71fceffc3bf1e9baec2","parse.rs":"fce3616e0464aa7414888e5d00d4df18c83bb3034a1c807d36a07a3c586e475a","regex_set.rs":"d8995adb9e5cecc2d738e662a62d5081150bf967cb67e1206070e22b7265578a","time.rs":"1429af446b2b38c70ceec82c4202d4822c618cad47ba502dce72dbdc4cbb425e"},"package":"993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895"} \ No newline at end of file diff --git a/vendor/bindgen/.cargo_vcs_info.json b/vendor/bindgen/.cargo_vcs_info.json deleted file mode 100644 index 786f0e3e412926..00000000000000 --- a/vendor/bindgen/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "d874de8d646d9b8a3e7ba2db2bcd52f2fba8f1f5" - }, - "path_in_vcs": "bindgen" -} \ No newline at end of file diff --git a/vendor/bindgen/Cargo.lock b/vendor/bindgen/Cargo.lock deleted file mode 100644 index 0778c7fa950037..00000000000000 --- a/vendor/bindgen/Cargo.lock +++ /dev/null @@ -1,485 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "aho-corasick" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" -dependencies = [ - "memchr", -] - -[[package]] -name = "annotate-snippets" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24e35ed54e5ea7997c14ed4c70ba043478db1112e98263b3b035907aa197d991" -dependencies = [ - "anstyle", - "unicode-width", -] - -[[package]] -name = "anstyle" -version = "1.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" - -[[package]] -name = "bindgen" -version = "0.72.1" -dependencies = [ - "annotate-snippets", - "bitflags 2.2.1", - "cexpr", - "clang-sys", - "clap", - "clap_complete", - "itertools", - "log", - "prettyplease", - "proc-macro2", - "quote", - "regex", - "rustc-hash", - "shlex", - "syn 2.0.90", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24a6904aef64d73cf10ab17ebace7befb918b82164785cb89907993be7f83813" - -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "clang-sys" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" -dependencies = [ - "glob", - "libc", - "libloading", -] - -[[package]] -name = "clap" -version = "4.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f13b9c79b5d1dd500d20ef541215a6423c75829ef43117e1b4d17fd8af0b5d76" -dependencies = [ - "bitflags 1.3.2", - "clap_derive", - "clap_lex", - "is-terminal", - "once_cell", - "strsim", - "termcolor", -] - -[[package]] -name = "clap_complete" -version = "4.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01c22dcfb410883764b29953103d9ef7bb8fe21b3fa1158bc99986c2067294bd" -dependencies = [ - "clap", -] - -[[package]] -name = "clap_derive" -version = "4.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "684a277d672e91966334af371f1a7b5833f9aa00b07c84e92fbce95e00208ce8" -dependencies = [ - "heck", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "clap_lex" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "783fe232adfca04f90f56201b26d79682d4cd2625e0bc7290b95123afe558ade" -dependencies = [ - "os_str_bytes", -] - -[[package]] -name = "either" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" - -[[package]] -name = "glob" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" - -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - -[[package]] -name = "hermit-abi" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" - -[[package]] -name = "is-terminal" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" -dependencies = [ - "hermit-abi", - "libc", - "windows-sys", -] - -[[package]] -name = "itertools" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" -dependencies = [ - "either", -] - -[[package]] -name = "libc" -version = "0.2.167" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc" - -[[package]] -name = "libloading" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" -dependencies = [ - "cfg-if", - "windows-targets", -] - -[[package]] -name = "log" -version = "0.4.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" - -[[package]] -name = "memchr" -version = "2.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" - -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - -[[package]] -name = "once_cell" -version = "1.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" - -[[package]] -name = "os_str_bytes" -version = "6.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" - -[[package]] -name = "prettyplease" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" -dependencies = [ - "proc-macro2", - "syn 2.0.90", -] - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - -[[package]] -name = "proc-macro2" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "regex" -version = "1.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" -dependencies = [ - "aho-corasick", - "memchr", - "regex-automata", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" - -[[package]] -name = "rustc-hash" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" - -[[package]] -name = "shlex" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" - -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.90" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "termcolor" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "unicode-ident" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" - -[[package]] -name = "unicode-width" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" diff --git a/vendor/bindgen/Cargo.toml b/vendor/bindgen/Cargo.toml deleted file mode 100644 index b26f28a7cf85cd..00000000000000 --- a/vendor/bindgen/Cargo.toml +++ /dev/null @@ -1,189 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.70.0" -name = "bindgen" -version = "0.72.1" -authors = [ - "Jyun-Yan You ", - "Emilio Cobos Álvarez ", - "Nick Fitzgerald ", - "The Servo project developers", -] -build = "build.rs" -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = "Automatically generates Rust FFI bindings to C and C++ libraries." -homepage = "https://rust-lang.github.io/rust-bindgen/" -documentation = "https://docs.rs/bindgen" -readme = "README.md" -keywords = [ - "bindings", - "ffi", - "code-generation", -] -categories = [ - "external-ffi-bindings", - "development-tools::ffi", -] -license = "BSD-3-Clause" -repository = "https://github.com/rust-lang/rust-bindgen" - -[package.metadata.docs.rs] -features = ["experimental"] - -[package.metadata.release] -pre-release-hook = [ - "../node_modules/doctoc/doctoc.js", - "../CHANGELOG.md", -] -release = true - -[[package.metadata.release.pre-release-replacements]] -file = "../CHANGELOG.md" -replace = """ -# Unreleased -## Added -## Changed -## Removed -## Fixed -## Security - -# {{version}} ({{date}})""" -search = "# Unreleased" - -[features] -__cli = [ - "dep:clap", - "dep:clap_complete", -] -__testing_only_extra_assertions = [] -__testing_only_libclang_16 = [] -__testing_only_libclang_9 = [] -default = [ - "logging", - "prettyplease", - "runtime", -] -experimental = ["dep:annotate-snippets"] -logging = ["dep:log"] -runtime = ["clang-sys/runtime"] -static = ["clang-sys/static"] - -[lib] -name = "bindgen" -path = "lib.rs" - -[dependencies.annotate-snippets] -version = "0.11.4" -optional = true - -[dependencies.bitflags] -version = "2.2.1" - -[dependencies.cexpr] -version = "0.6" - -[dependencies.clang-sys] -version = "1" -features = ["clang_11_0"] - -[dependencies.clap] -version = "4" -features = ["derive"] -optional = true - -[dependencies.clap_complete] -version = "4" -optional = true - -[dependencies.itertools] -version = ">=0.10,<0.14" -default-features = false - -[dependencies.log] -version = "0.4" -optional = true - -[dependencies.prettyplease] -version = "0.2.7" -features = ["verbatim"] -optional = true - -[dependencies.proc-macro2] -version = "1.0.80" - -[dependencies.quote] -version = "1" -default-features = false - -[dependencies.regex] -version = "1.5.3" -features = [ - "std", - "unicode-perl", -] -default-features = false - -[dependencies.rustc-hash] -version = "2.1.0" - -[dependencies.shlex] -version = "1" - -[dependencies.syn] -version = "2.0" -features = [ - "full", - "extra-traits", - "visit-mut", -] - -[lints.clippy] -cast_possible_truncation = "allow" -cast_possible_wrap = "allow" -cast_precision_loss = "allow" -cast_sign_loss = "allow" -default_trait_access = "allow" -enum_glob_use = "allow" -ignored_unit_patterns = "allow" -implicit_hasher = "allow" -items_after_statements = "allow" -match_same_arms = "allow" -maybe_infinite_iter = "allow" -missing_errors_doc = "allow" -missing_panics_doc = "allow" -module_name_repetitions = "allow" -must_use_candidate = "allow" -redundant_closure_for_method_calls = "allow" -return_self_not_must_use = "allow" -similar_names = "allow" -struct_excessive_bools = "allow" -struct_field_names = "allow" -too_many_lines = "allow" -trivially_copy_pass_by_ref = "allow" -unnecessary_wraps = "allow" -unreadable_literal = "allow" -unused_self = "allow" -used_underscore_binding = "allow" -wildcard_imports = "allow" - -[lints.clippy.pedantic] -level = "warn" -priority = -1 - -[lints.rust] -unused_qualifications = "warn" diff --git a/vendor/bindgen/LICENSE b/vendor/bindgen/LICENSE deleted file mode 100644 index 62f55f45a1d1f7..00000000000000 --- a/vendor/bindgen/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -BSD 3-Clause License - -Copyright (c) 2013, Jyun-Yan You -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/bindgen/README.md b/vendor/bindgen/README.md deleted file mode 100644 index b35dee3bef41f7..00000000000000 --- a/vendor/bindgen/README.md +++ /dev/null @@ -1,89 +0,0 @@ -[![crates.io](https://img.shields.io/crates/v/bindgen.svg)](https://crates.io/crates/bindgen) -[![docs.rs](https://docs.rs/bindgen/badge.svg)](https://docs.rs/bindgen/) - -# `bindgen` - -**`bindgen` automatically generates Rust FFI bindings to C (and some C++) libraries.** - -For example, given the C header `doggo.h`: - -```c -typedef struct Doggo { - int many; - char wow; -} Doggo; - -void eleven_out_of_ten_majestic_af(Doggo* pupper); -``` - -`bindgen` produces Rust FFI code allowing you to call into the `doggo` library's -functions and use its types: - -```rust -/* automatically generated by rust-bindgen 0.99.9 */ - -#[repr(C)] -pub struct Doggo { - pub many: ::std::os::raw::c_int, - pub wow: ::std::os::raw::c_char, -} - -extern "C" { - pub fn eleven_out_of_ten_majestic_af(pupper: *mut Doggo); -} -``` - -## Users Guide - -[📚 Read the `bindgen` users guide here! 📚](https://rust-lang.github.io/rust-bindgen) - -## MSRV - -The `bindgen` minimum supported Rust version is **1.70.0**. - -The `bindgen-cli` minimum supported Rust version is **1.70.0**. - -No MSRV bump policy has been established yet, so MSRV may increase in any release. - -The MSRV is the minimum Rust version that can be used to *compile* each crate. However, `bindgen` and `bindgen-cli` can generate bindings that are compatible with Rust versions below the current MSRV. - -Most of the time, the `bindgen-cli` crate will have a more recent MSRV than `bindgen` as crates such as `clap` require it. - -## API Reference - -[API reference documentation is on docs.rs](https://docs.rs/bindgen) - -## Environment Variables - -In addition to the [library API](https://docs.rs/bindgen) and [executable command-line API][bindgen-cmdline], -`bindgen` can be controlled through environment variables. - -End-users should set these environment variables to modify `bindgen`'s behavior without modifying the source code of direct consumers of `bindgen`. - -- `BINDGEN_EXTRA_CLANG_ARGS`: extra arguments to pass to `clang` - - Arguments are whitespace-separated - - Use shell-style quoting to pass through whitespace - - Examples: - - Specify alternate sysroot: `--sysroot=/path/to/sysroot` - - Add include search path with spaces: `-I"/path/with spaces"` -- `BINDGEN_EXTRA_CLANG_ARGS_`: similar to `BINDGEN_EXTRA_CLANG_ARGS`, - but used to set per-target arguments to pass to clang. Useful to set system include - directories in a target-specific way in cross-compilation environments with multiple targets. - Has precedence over `BINDGEN_EXTRA_CLANG_ARGS`. - -Additionally, `bindgen` uses `libclang` to parse C and C++ header files. -To modify how `bindgen` searches for `libclang`, see the [`clang-sys` documentation][clang-sys-env]. -For more details on how `bindgen` uses `libclang`, see the [`bindgen` users guide][bindgen-book-clang]. - -## Releases - -We don't follow a specific release calendar, but if you need a release please -file an issue requesting that (ping `@emilio` for increased effectiveness). - -## Contributing - -[See `CONTRIBUTING.md` for hacking on `bindgen`!](./CONTRIBUTING.md) - -[bindgen-cmdline]: https://rust-lang.github.io/rust-bindgen/command-line-usage.html -[clang-sys-env]: https://github.com/KyleMayes/clang-sys#environment-variables -[bindgen-book-clang]: https://rust-lang.github.io/rust-bindgen/requirements.html#clang diff --git a/vendor/bindgen/build.rs b/vendor/bindgen/build.rs deleted file mode 100644 index 4fb2d3075ecdbc..00000000000000 --- a/vendor/bindgen/build.rs +++ /dev/null @@ -1,29 +0,0 @@ -use std::env; -use std::fs::File; -use std::io::Write; -use std::path::{Path, PathBuf}; - -fn main() { - let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); - - let mut dst = - File::create(Path::new(&out_dir).join("host-target.txt")).unwrap(); - dst.write_all(env::var("TARGET").unwrap().as_bytes()) - .unwrap(); - - // On behalf of clang_sys, rebuild ourselves if important configuration - // variables change, to ensure that bindings get rebuilt if the - // underlying libclang changes. - println!("cargo:rerun-if-env-changed=LLVM_CONFIG_PATH"); - println!("cargo:rerun-if-env-changed=LIBCLANG_PATH"); - println!("cargo:rerun-if-env-changed=LIBCLANG_STATIC_PATH"); - println!("cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS"); - println!( - "cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS_{}", - env::var("TARGET").unwrap() - ); - println!( - "cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS_{}", - env::var("TARGET").unwrap().replace('-', "_") - ); -} diff --git a/vendor/bindgen/callbacks.rs b/vendor/bindgen/callbacks.rs deleted file mode 100644 index 93005ce8e523ea..00000000000000 --- a/vendor/bindgen/callbacks.rs +++ /dev/null @@ -1,317 +0,0 @@ -//! A public API for more fine-grained customization of bindgen behavior. - -pub use crate::ir::analysis::DeriveTrait; -pub use crate::ir::derive::CanDerive as ImplementsTrait; -pub use crate::ir::enum_ty::{EnumVariantCustomBehavior, EnumVariantValue}; -pub use crate::ir::int::IntKind; -pub use cexpr::token::Kind as TokenKind; -pub use cexpr::token::Token; -use std::fmt; - -/// An enum to allow ignoring parsing of macros. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Default)] -pub enum MacroParsingBehavior { - /// Ignore the macro, generating no code for it, or anything that depends on - /// it. - Ignore, - /// The default behavior bindgen would have otherwise. - #[default] - Default, -} - -/// A trait to allow configuring different kinds of types in different -/// situations. -pub trait ParseCallbacks: fmt::Debug { - #[cfg(feature = "__cli")] - #[doc(hidden)] - fn cli_args(&self) -> Vec { - vec![] - } - - /// This function will be run on every macro that is identified. - fn will_parse_macro(&self, _name: &str) -> MacroParsingBehavior { - MacroParsingBehavior::Default - } - - /// This function will run for every extern variable and function. The returned value determines - /// the name visible in the bindings. - fn generated_name_override( - &self, - _item_info: ItemInfo<'_>, - ) -> Option { - None - } - - /// This function will run for every extern variable and function. The returned value determines - /// the link name in the bindings. - fn generated_link_name_override( - &self, - _item_info: ItemInfo<'_>, - ) -> Option { - None - } - - /// Modify the contents of a macro - fn modify_macro(&self, _name: &str, _tokens: &mut Vec) {} - - /// The integer kind an integer macro should have, given a name and the - /// value of that macro, or `None` if you want the default to be chosen. - fn int_macro(&self, _name: &str, _value: i64) -> Option { - None - } - - /// This will be run on every string macro. The callback cannot influence the further - /// treatment of the macro, but may use the value to generate additional code or configuration. - fn str_macro(&self, _name: &str, _value: &[u8]) {} - - /// This will be run on every function-like macro. The callback cannot - /// influence the further treatment of the macro, but may use the value to - /// generate additional code or configuration. - /// - /// The first parameter represents the name and argument list (including the - /// parentheses) of the function-like macro. The second parameter represents - /// the expansion of the macro as a sequence of tokens. - fn func_macro(&self, _name: &str, _value: &[&[u8]]) {} - - /// This function should return whether, given an enum variant - /// name, and value, this enum variant will forcibly be a constant. - fn enum_variant_behavior( - &self, - _enum_name: Option<&str>, - _original_variant_name: &str, - _variant_value: EnumVariantValue, - ) -> Option { - None - } - - /// Allows to rename an enum variant, replacing `_original_variant_name`. - fn enum_variant_name( - &self, - _enum_name: Option<&str>, - _original_variant_name: &str, - _variant_value: EnumVariantValue, - ) -> Option { - None - } - - /// Allows to rename an item, replacing `_item_info.name`. - fn item_name(&self, _item_info: ItemInfo) -> Option { - None - } - - /// This will be called on every header filename passed to (`Builder::header`)[`crate::Builder::header`]. - fn header_file(&self, _filename: &str) {} - - /// This will be called on every file inclusion, with the full path of the included file. - fn include_file(&self, _filename: &str) {} - - /// This will be called every time `bindgen` reads an environment variable whether it has any - /// content or not. - fn read_env_var(&self, _key: &str) {} - - /// This will be called to determine whether a particular blocklisted type - /// implements a trait or not. This will be used to implement traits on - /// other types containing the blocklisted type. - /// - /// * `None`: use the default behavior - /// * `Some(ImplementsTrait::Yes)`: `_name` implements `_derive_trait` - /// * `Some(ImplementsTrait::Manually)`: any type including `_name` can't - /// derive `_derive_trait` but can implemented it manually - /// * `Some(ImplementsTrait::No)`: `_name` doesn't implement `_derive_trait` - fn blocklisted_type_implements_trait( - &self, - _name: &str, - _derive_trait: DeriveTrait, - ) -> Option { - None - } - - /// Provide a list of custom derive attributes. - /// - /// If no additional attributes are wanted, this function should return an - /// empty `Vec`. - fn add_derives(&self, _info: &DeriveInfo<'_>) -> Vec { - vec![] - } - - /// Provide a list of custom attributes. - /// - /// If no additional attributes are wanted, this function should return an - /// empty `Vec`. - fn add_attributes(&self, _info: &AttributeInfo<'_>) -> Vec { - vec![] - } - - /// Process a source code comment. - fn process_comment(&self, _comment: &str) -> Option { - None - } - - /// Potentially override the visibility of a composite type field. - /// - /// Caution: This allows overriding standard C++ visibility inferred by - /// `respect_cxx_access_specs`. - fn field_visibility( - &self, - _info: FieldInfo<'_>, - ) -> Option { - None - } - - /// Process a function name that as exactly one `va_list` argument - /// to be wrapped as a variadic function with the wrapped static function - /// feature. - /// - /// The returned string is new function name. - #[cfg(feature = "experimental")] - fn wrap_as_variadic_fn(&self, _name: &str) -> Option { - None - } - - /// This will get called everytime an item (currently struct, union, and alias) is found with some information about it - fn new_item_found(&self, _id: DiscoveredItemId, _item: DiscoveredItem) {} - - // TODO add callback for ResolvedTypeRef -} - -/// An identifier for a discovered item. Used to identify an aliased type (see [`DiscoveredItem::Alias`]) -#[derive(Ord, PartialOrd, PartialEq, Eq, Hash, Debug, Clone, Copy)] -pub struct DiscoveredItemId(usize); - -impl DiscoveredItemId { - /// Constructor - pub fn new(value: usize) -> Self { - Self(value) - } -} - -/// Struct passed to [`ParseCallbacks::new_item_found`] containing information about discovered -/// items (struct, union, and alias) -#[derive(Debug, Hash, Clone, Ord, PartialOrd, Eq, PartialEq)] -pub enum DiscoveredItem { - /// Represents a struct with its original name in C and its generated binding name - Struct { - /// The original name (learnt from C) of the structure - /// Can be None if the union is anonymous. - original_name: Option, - - /// The name of the generated binding - final_name: String, - }, - - /// Represents a union with its original name in C and its generated binding name - Union { - /// The original name (learnt from C) of the structure. - /// Can be None if the union is anonymous. - original_name: Option, - - /// The name of the generated binding - final_name: String, - }, - - /// Represents an alias like a typedef - /// ```c - /// typedef struct MyStruct { - /// ... - /// } StructAlias; - /// ``` - /// Here, the name of the alias is `StructAlias` and it's an alias for `MyStruct` - Alias { - /// The name of the alias in C (`StructAlias`) - alias_name: String, - - /// The identifier of the discovered type - alias_for: DiscoveredItemId, - }, - - /// Represents an enum. - Enum { - /// The final name of the generated binding - final_name: String, - }, - - /// A function or method. - Function { - /// The final name used. - final_name: String, - }, - - /// A method. - Method { - /// The final name used. - final_name: String, - - /// Type to which this method belongs. - parent: DiscoveredItemId, - }, // modules, etc. -} - -/// Relevant information about a type to which new derive attributes will be added using -/// [`ParseCallbacks::add_derives`]. -#[derive(Debug)] -#[non_exhaustive] -pub struct DeriveInfo<'a> { - /// The name of the type. - pub name: &'a str, - /// The kind of the type. - pub kind: TypeKind, -} - -/// Relevant information about a type to which new attributes will be added using -/// [`ParseCallbacks::add_attributes`]. -#[derive(Debug)] -#[non_exhaustive] -pub struct AttributeInfo<'a> { - /// The name of the type. - pub name: &'a str, - /// The kind of the type. - pub kind: TypeKind, -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -/// The kind of the current type. -pub enum TypeKind { - /// The type is a Rust `struct`. - Struct, - /// The type is a Rust `enum`. - Enum, - /// The type is a Rust `union`. - Union, -} - -/// A struct providing information about the item being passed to [`ParseCallbacks::generated_name_override`]. -#[derive(Clone, Copy)] -#[non_exhaustive] -pub struct ItemInfo<'a> { - /// The name of the item - pub name: &'a str, - /// The kind of item - pub kind: ItemKind, -} - -/// An enum indicating the kind of item for an `ItemInfo`. -#[derive(Clone, Copy)] -#[non_exhaustive] -pub enum ItemKind { - /// A module - Module, - /// A type - Type, - /// A Function - Function, - /// A Variable - Var, -} - -/// Relevant information about a field for which visibility can be determined using -/// [`ParseCallbacks::field_visibility`]. -#[derive(Debug)] -#[non_exhaustive] -pub struct FieldInfo<'a> { - /// The name of the type. - pub type_name: &'a str, - /// The name of the field. - pub field_name: &'a str, - /// The name of the type of the field. - pub field_type_name: Option<&'a str>, -} diff --git a/vendor/bindgen/clang.rs b/vendor/bindgen/clang.rs deleted file mode 100644 index 1e8326ed82082f..00000000000000 --- a/vendor/bindgen/clang.rs +++ /dev/null @@ -1,2448 +0,0 @@ -//! A higher level Clang API built on top of the generated bindings in the -//! `clang_sys` module. - -#![allow(non_upper_case_globals, dead_code)] -#![deny(clippy::missing_docs_in_private_items)] - -use crate::ir::context::BindgenContext; -use clang_sys::*; -use std::cmp; - -use std::ffi::{CStr, CString}; -use std::fmt; -use std::fs::OpenOptions; -use std::hash::Hash; -use std::hash::Hasher; -use std::os::raw::{c_char, c_int, c_longlong, c_uint, c_ulong, c_ulonglong}; -use std::sync::OnceLock; -use std::{mem, ptr, slice}; - -/// Type representing a clang attribute. -/// -/// Values of this type can be used to check for different attributes using the `has_attrs` -/// function. -pub(crate) struct Attribute { - name: &'static [u8], - kind: Option, - token_kind: CXTokenKind, -} - -impl Attribute { - /// A `warn_unused_result` attribute. - pub(crate) const MUST_USE: Self = Self { - name: b"warn_unused_result", - // FIXME(emilio): clang-sys doesn't expose `CXCursor_WarnUnusedResultAttr` (from clang 9). - kind: Some(440), - token_kind: CXToken_Identifier, - }; - - /// A `_Noreturn` attribute. - pub(crate) const NO_RETURN: Self = Self { - name: b"_Noreturn", - kind: None, - token_kind: CXToken_Keyword, - }; - - /// A `[[noreturn]]` attribute. - pub(crate) const NO_RETURN_CPP: Self = Self { - name: b"noreturn", - kind: None, - token_kind: CXToken_Identifier, - }; -} - -/// A cursor into the Clang AST, pointing to an AST node. -/// -/// We call the AST node pointed to by the cursor the cursor's "referent". -#[derive(Copy, Clone)] -pub(crate) struct Cursor { - x: CXCursor, -} - -impl fmt::Debug for Cursor { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!( - fmt, - "Cursor({} kind: {}, loc: {}, usr: {:?})", - self.spelling(), - kind_to_str(self.kind()), - self.location(), - self.usr() - ) - } -} - -impl Cursor { - /// Get the Unified Symbol Resolution for this cursor's referent, if - /// available. - /// - /// The USR can be used to compare entities across translation units. - pub(crate) fn usr(&self) -> Option { - let s = unsafe { cxstring_into_string(clang_getCursorUSR(self.x)) }; - if s.is_empty() { - None - } else { - Some(s) - } - } - - /// Is this cursor's referent a declaration? - pub(crate) fn is_declaration(&self) -> bool { - unsafe { clang_isDeclaration(self.kind()) != 0 } - } - - /// Is this cursor's referent an anonymous record or so? - pub(crate) fn is_anonymous(&self) -> bool { - unsafe { clang_Cursor_isAnonymous(self.x) != 0 } - } - - /// Get this cursor's referent's spelling. - pub(crate) fn spelling(&self) -> String { - unsafe { cxstring_into_string(clang_getCursorSpelling(self.x)) } - } - - /// Get this cursor's referent's display name. - /// - /// This is not necessarily a valid identifier. It includes extra - /// information, such as parameters for a function, etc. - pub(crate) fn display_name(&self) -> String { - unsafe { cxstring_into_string(clang_getCursorDisplayName(self.x)) } - } - - /// Get the mangled name of this cursor's referent. - pub(crate) fn mangling(&self) -> String { - unsafe { cxstring_into_string(clang_Cursor_getMangling(self.x)) } - } - - /// Gets the C++ manglings for this cursor, or an error if the manglings - /// are not available. - pub(crate) fn cxx_manglings(&self) -> Result, ()> { - use clang_sys::*; - unsafe { - let manglings = clang_Cursor_getCXXManglings(self.x); - if manglings.is_null() { - return Err(()); - } - let count = (*manglings).Count as usize; - - let mut result = Vec::with_capacity(count); - for i in 0..count { - let string_ptr = (*manglings).Strings.add(i); - result.push(cxstring_to_string_leaky(*string_ptr)); - } - clang_disposeStringSet(manglings); - Ok(result) - } - } - - /// Returns whether the cursor refers to a built-in definition. - pub(crate) fn is_builtin(&self) -> bool { - let (file, _, _, _) = self.location().location(); - file.name().is_none() - } - - /// Get the `Cursor` for this cursor's referent's lexical parent. - /// - /// The lexical parent is the parent of the definition. The semantic parent - /// is the parent of the declaration. Generally, the lexical parent doesn't - /// have any effect on semantics, while the semantic parent does. - /// - /// In the following snippet, the `Foo` class would be the semantic parent - /// of the out-of-line `method` definition, while the lexical parent is the - /// translation unit. - /// - /// ```c++ - /// class Foo { - /// void method(); - /// }; - /// - /// void Foo::method() { /* ... */ } - /// ``` - pub(crate) fn lexical_parent(&self) -> Cursor { - unsafe { - Cursor { - x: clang_getCursorLexicalParent(self.x), - } - } - } - - /// Get the referent's semantic parent, if one is available. - /// - /// See documentation for `lexical_parent` for details on semantic vs - /// lexical parents. - pub(crate) fn fallible_semantic_parent(&self) -> Option { - let sp = unsafe { - Cursor { - x: clang_getCursorSemanticParent(self.x), - } - }; - if sp == *self || !sp.is_valid() { - return None; - } - Some(sp) - } - - /// Get the referent's semantic parent. - /// - /// See documentation for `lexical_parent` for details on semantic vs - /// lexical parents. - pub(crate) fn semantic_parent(&self) -> Cursor { - self.fallible_semantic_parent().unwrap() - } - - /// Return the number of template arguments used by this cursor's referent, - /// if the referent is either a template instantiation. Returns `None` - /// otherwise. - /// - /// NOTE: This may not return `Some` for partial template specializations, - /// see #193 and #194. - pub(crate) fn num_template_args(&self) -> Option { - // XXX: `clang_Type_getNumTemplateArguments` is sort of reliable, while - // `clang_Cursor_getNumTemplateArguments` is totally unreliable. - // Therefore, try former first, and only fallback to the latter if we - // have to. - self.cur_type() - .num_template_args() - .or_else(|| { - let n: c_int = - unsafe { clang_Cursor_getNumTemplateArguments(self.x) }; - - if n >= 0 { - Some(n as u32) - } else { - debug_assert_eq!(n, -1); - None - } - }) - .or_else(|| { - let canonical = self.canonical(); - if canonical == *self { - None - } else { - canonical.num_template_args() - } - }) - } - - /// Get a cursor pointing to this referent's containing translation unit. - /// - /// Note that we shouldn't create a `TranslationUnit` struct here, because - /// bindgen assumes there will only be one of them alive at a time, and - /// disposes it on drop. That can change if this would be required, but I - /// think we can survive fine without it. - pub(crate) fn translation_unit(&self) -> Cursor { - assert!(self.is_valid()); - unsafe { - let tu = clang_Cursor_getTranslationUnit(self.x); - let cursor = Cursor { - x: clang_getTranslationUnitCursor(tu), - }; - assert!(cursor.is_valid()); - cursor - } - } - - /// Is the referent a top level construct? - pub(crate) fn is_toplevel(&self) -> bool { - let mut semantic_parent = self.fallible_semantic_parent(); - - while semantic_parent.is_some() && - (semantic_parent.unwrap().kind() == CXCursor_Namespace || - semantic_parent.unwrap().kind() == - CXCursor_NamespaceAlias || - semantic_parent.unwrap().kind() == CXCursor_NamespaceRef) - { - semantic_parent = - semantic_parent.unwrap().fallible_semantic_parent(); - } - - let tu = self.translation_unit(); - // Yes, this can happen with, e.g., macro definitions. - semantic_parent == tu.fallible_semantic_parent() - } - - /// There are a few kinds of types that we need to treat specially, mainly - /// not tracking the type declaration but the location of the cursor, given - /// clang doesn't expose a proper declaration for these types. - pub(crate) fn is_template_like(&self) -> bool { - matches!( - self.kind(), - CXCursor_ClassTemplate | - CXCursor_ClassTemplatePartialSpecialization | - CXCursor_TypeAliasTemplateDecl - ) - } - - /// Is this Cursor pointing to a function-like macro definition? - pub(crate) fn is_macro_function_like(&self) -> bool { - unsafe { clang_Cursor_isMacroFunctionLike(self.x) != 0 } - } - - /// Get the kind of referent this cursor is pointing to. - pub(crate) fn kind(&self) -> CXCursorKind { - self.x.kind - } - - /// Returns true if the cursor is a definition - pub(crate) fn is_definition(&self) -> bool { - unsafe { clang_isCursorDefinition(self.x) != 0 } - } - - /// Is the referent a template specialization? - pub(crate) fn is_template_specialization(&self) -> bool { - self.specialized().is_some() - } - - /// Is the referent a fully specialized template specialization without any - /// remaining free template arguments? - pub(crate) fn is_fully_specialized_template(&self) -> bool { - self.is_template_specialization() && - self.kind() != CXCursor_ClassTemplatePartialSpecialization && - self.num_template_args().unwrap_or(0) > 0 - } - - /// Is the referent a template specialization that still has remaining free - /// template arguments? - pub(crate) fn is_in_non_fully_specialized_template(&self) -> bool { - if self.is_toplevel() { - return false; - } - - let parent = self.semantic_parent(); - if parent.is_fully_specialized_template() { - return false; - } - - if !parent.is_template_like() { - return parent.is_in_non_fully_specialized_template(); - } - - true - } - - /// Is the referent any kind of template parameter? - pub(crate) fn is_template_parameter(&self) -> bool { - matches!( - self.kind(), - CXCursor_TemplateTemplateParameter | - CXCursor_TemplateTypeParameter | - CXCursor_NonTypeTemplateParameter - ) - } - - /// Does the referent's type or value depend on a template parameter? - pub(crate) fn is_dependent_on_template_parameter(&self) -> bool { - fn visitor( - found_template_parameter: &mut bool, - cur: Cursor, - ) -> CXChildVisitResult { - // If we found a template parameter, it is dependent. - if cur.is_template_parameter() { - *found_template_parameter = true; - return CXChildVisit_Break; - } - - // Get the referent and traverse it as well. - if let Some(referenced) = cur.referenced() { - if referenced.is_template_parameter() { - *found_template_parameter = true; - return CXChildVisit_Break; - } - - referenced - .visit(|next| visitor(found_template_parameter, next)); - if *found_template_parameter { - return CXChildVisit_Break; - } - } - - // Continue traversing the AST at the original cursor. - CXChildVisit_Recurse - } - - if self.is_template_parameter() { - return true; - } - - let mut found_template_parameter = false; - self.visit(|next| visitor(&mut found_template_parameter, next)); - - found_template_parameter - } - - /// Is this cursor pointing a valid referent? - pub(crate) fn is_valid(&self) -> bool { - unsafe { clang_isInvalid(self.kind()) == 0 } - } - - /// Get the source location for the referent. - pub(crate) fn location(&self) -> SourceLocation { - unsafe { - SourceLocation { - x: clang_getCursorLocation(self.x), - } - } - } - - /// Get the source location range for the referent. - pub(crate) fn extent(&self) -> CXSourceRange { - unsafe { clang_getCursorExtent(self.x) } - } - - /// Get the raw declaration comment for this referent, if one exists. - pub(crate) fn raw_comment(&self) -> Option { - let s = unsafe { - cxstring_into_string(clang_Cursor_getRawCommentText(self.x)) - }; - if s.is_empty() { - None - } else { - Some(s) - } - } - - /// Get the referent's parsed comment. - pub(crate) fn comment(&self) -> Comment { - unsafe { - Comment { - x: clang_Cursor_getParsedComment(self.x), - } - } - } - - /// Get the referent's type. - pub(crate) fn cur_type(&self) -> Type { - unsafe { - Type { - x: clang_getCursorType(self.x), - } - } - } - - /// Given that this cursor's referent is a reference to another type, or is - /// a declaration, get the cursor pointing to the referenced type or type of - /// the declared thing. - pub(crate) fn definition(&self) -> Option { - unsafe { - let ret = Cursor { - x: clang_getCursorDefinition(self.x), - }; - - if ret.is_valid() && ret.kind() != CXCursor_NoDeclFound { - Some(ret) - } else { - None - } - } - } - - /// Given that this cursor's referent is reference type, get the cursor - /// pointing to the referenced type. - pub(crate) fn referenced(&self) -> Option { - unsafe { - let ret = Cursor { - x: clang_getCursorReferenced(self.x), - }; - - if ret.is_valid() { - Some(ret) - } else { - None - } - } - } - - /// Get the canonical cursor for this referent. - /// - /// Many types can be declared multiple times before finally being properly - /// defined. This method allows us to get the canonical cursor for the - /// referent type. - pub(crate) fn canonical(&self) -> Cursor { - unsafe { - Cursor { - x: clang_getCanonicalCursor(self.x), - } - } - } - - /// Given that this cursor points to either a template specialization or a - /// template instantiation, get a cursor pointing to the template definition - /// that is being specialized. - pub(crate) fn specialized(&self) -> Option { - unsafe { - let ret = Cursor { - x: clang_getSpecializedCursorTemplate(self.x), - }; - if ret.is_valid() { - Some(ret) - } else { - None - } - } - } - - /// Assuming that this cursor's referent is a template declaration, get the - /// kind of cursor that would be generated for its specializations. - pub(crate) fn template_kind(&self) -> CXCursorKind { - unsafe { clang_getTemplateCursorKind(self.x) } - } - - /// Traverse this cursor's referent and its children. - /// - /// Call the given function on each AST node traversed. - pub(crate) fn visit(&self, mut visitor: Visitor) - where - Visitor: FnMut(Cursor) -> CXChildVisitResult, - { - let data = ptr::addr_of_mut!(visitor); - unsafe { - clang_visitChildren(self.x, visit_children::, data.cast()); - } - } - - /// Traverse all of this cursor's children, sorted by where they appear in source code. - /// - /// Call the given function on each AST node traversed. - pub(crate) fn visit_sorted( - &self, - ctx: &mut BindgenContext, - mut visitor: Visitor, - ) where - Visitor: FnMut(&mut BindgenContext, Cursor), - { - // FIXME(#2556): The current source order stuff doesn't account well for different levels - // of includes, or includes that show up at the same byte offset because they are passed in - // via CLI. - const SOURCE_ORDER_ENABLED: bool = false; - if !SOURCE_ORDER_ENABLED { - return self.visit(|c| { - visitor(ctx, c); - CXChildVisit_Continue - }); - } - - let mut children = self.collect_children(); - for child in &children { - if child.kind() == CXCursor_InclusionDirective { - if let Some(included_file) = child.get_included_file_name() { - let location = child.location(); - let (source_file, _, _, offset) = location.location(); - - if let Some(source_file) = source_file.name() { - ctx.add_include(source_file, included_file, offset); - } - } - } - } - children - .sort_by(|child1, child2| child1.cmp_by_source_order(child2, ctx)); - for child in children { - visitor(ctx, child); - } - } - - /// Compare source order of two cursors, considering `#include` directives. - /// - /// Built-in items provided by the compiler (which don't have a source file), - /// are sorted first. Remaining files are sorted by their position in the source file. - /// If the items' source files differ, they are sorted by the position of the first - /// `#include` for their source file. If no source files are included, `None` is returned. - fn cmp_by_source_order( - &self, - other: &Self, - ctx: &BindgenContext, - ) -> cmp::Ordering { - let (file, _, _, offset) = self.location().location(); - let (other_file, _, _, other_offset) = other.location().location(); - - let (file, other_file) = match (file.name(), other_file.name()) { - (Some(file), Some(other_file)) => (file, other_file), - // Built-in definitions should come first. - (Some(_), None) => return cmp::Ordering::Greater, - (None, Some(_)) => return cmp::Ordering::Less, - (None, None) => return cmp::Ordering::Equal, - }; - - if file == other_file { - // Both items are in the same source file, compare by byte offset. - return offset.cmp(&other_offset); - } - - let include_location = ctx.included_file_location(&file); - let other_include_location = ctx.included_file_location(&other_file); - match (include_location, other_include_location) { - (Some((file2, offset2)), _) if file2 == other_file => { - offset2.cmp(&other_offset) - } - (Some(_), None) => cmp::Ordering::Greater, - (_, Some((other_file2, other_offset2))) if file == other_file2 => { - offset.cmp(&other_offset2) - } - (None, Some(_)) => cmp::Ordering::Less, - (Some((file2, offset2)), Some((other_file2, other_offset2))) => { - if file2 == other_file2 { - offset2.cmp(&other_offset2) - } else { - cmp::Ordering::Equal - } - } - (None, None) => cmp::Ordering::Equal, - } - } - - /// Collect all of this cursor's children into a vec and return them. - pub(crate) fn collect_children(&self) -> Vec { - let mut children = vec![]; - self.visit(|c| { - children.push(c); - CXChildVisit_Continue - }); - children - } - - /// Does this cursor have any children? - pub(crate) fn has_children(&self) -> bool { - let mut has_children = false; - self.visit(|_| { - has_children = true; - CXChildVisit_Break - }); - has_children - } - - /// Does this cursor have at least `n` children? - pub(crate) fn has_at_least_num_children(&self, n: usize) -> bool { - assert!(n > 0); - let mut num_left = n; - self.visit(|_| { - num_left -= 1; - if num_left == 0 { - CXChildVisit_Break - } else { - CXChildVisit_Continue - } - }); - num_left == 0 - } - - /// Returns whether the given location contains a cursor with the given - /// kind in the first level of nesting underneath (doesn't look - /// recursively). - pub(crate) fn contains_cursor(&self, kind: CXCursorKind) -> bool { - let mut found = false; - - self.visit(|c| { - if c.kind() == kind { - found = true; - CXChildVisit_Break - } else { - CXChildVisit_Continue - } - }); - - found - } - - /// Is the referent an inlined function? - pub(crate) fn is_inlined_function(&self) -> bool { - unsafe { clang_Cursor_isFunctionInlined(self.x) != 0 } - } - - /// Is the referent a defaulted function? - pub(crate) fn is_defaulted_function(&self) -> bool { - unsafe { clang_CXXMethod_isDefaulted(self.x) != 0 } - } - - /// Is the referent a deleted function? - pub(crate) fn is_deleted_function(&self) -> bool { - // Unfortunately, libclang doesn't yet have an API for checking if a - // member function is deleted, but the following should be a good - // enough approximation. - // Deleted functions are implicitly inline according to paragraph 4 of - // [dcl.fct.def.delete] in the C++ standard. Normal inline functions - // have a definition in the same translation unit, so if this is an - // inline function without a definition, and it's not a defaulted - // function, we can reasonably safely conclude that it's a deleted - // function. - self.is_inlined_function() && - self.definition().is_none() && - !self.is_defaulted_function() - } - - /// Is the referent a bit field declaration? - pub(crate) fn is_bit_field(&self) -> bool { - unsafe { clang_Cursor_isBitField(self.x) != 0 } - } - - /// Get a cursor to the bit field's width expression, or `None` if it's not - /// a bit field. - pub(crate) fn bit_width_expr(&self) -> Option { - if !self.is_bit_field() { - return None; - } - - let mut result = None; - self.visit(|cur| { - // The first child may or may not be a TypeRef, depending on whether - // the field's type is builtin. Skip it. - if cur.kind() == CXCursor_TypeRef { - return CXChildVisit_Continue; - } - - // The next expression or literal is the bit width. - result = Some(cur); - - CXChildVisit_Break - }); - - result - } - - /// Get the width of this cursor's referent bit field, or `None` if the - /// referent is not a bit field or if the width could not be evaluated. - pub(crate) fn bit_width(&self) -> Option { - // It is not safe to check the bit width without ensuring it doesn't - // depend on a template parameter. See - // https://github.com/rust-lang/rust-bindgen/issues/2239 - if self.bit_width_expr()?.is_dependent_on_template_parameter() { - return None; - } - - unsafe { - let w = clang_getFieldDeclBitWidth(self.x); - if w == -1 { - None - } else { - Some(w as u32) - } - } - } - - /// Get the integer representation type used to hold this cursor's referent - /// enum type. - pub(crate) fn enum_type(&self) -> Option { - unsafe { - let t = Type { - x: clang_getEnumDeclIntegerType(self.x), - }; - if t.is_valid() { - Some(t) - } else { - None - } - } - } - - /// Get the boolean constant value for this cursor's enum variant referent. - /// - /// Returns None if the cursor's referent is not an enum variant. - pub(crate) fn enum_val_boolean(&self) -> Option { - unsafe { - if self.kind() == CXCursor_EnumConstantDecl { - Some(clang_getEnumConstantDeclValue(self.x) != 0) - } else { - None - } - } - } - - /// Get the signed constant value for this cursor's enum variant referent. - /// - /// Returns None if the cursor's referent is not an enum variant. - pub(crate) fn enum_val_signed(&self) -> Option { - unsafe { - if self.kind() == CXCursor_EnumConstantDecl { - #[allow(clippy::unnecessary_cast)] - Some(clang_getEnumConstantDeclValue(self.x) as i64) - } else { - None - } - } - } - - /// Get the unsigned constant value for this cursor's enum variant referent. - /// - /// Returns None if the cursor's referent is not an enum variant. - pub(crate) fn enum_val_unsigned(&self) -> Option { - unsafe { - if self.kind() == CXCursor_EnumConstantDecl { - #[allow(clippy::unnecessary_cast)] - Some(clang_getEnumConstantDeclUnsignedValue(self.x) as u64) - } else { - None - } - } - } - - /// Does this cursor have the given attributes? - pub(crate) fn has_attrs( - &self, - attrs: &[Attribute; N], - ) -> [bool; N] { - let mut found_attrs = [false; N]; - let mut found_count = 0; - - self.visit(|cur| { - let kind = cur.kind(); - for (idx, attr) in attrs.iter().enumerate() { - let found_attr = &mut found_attrs[idx]; - if !*found_attr { - // `attr.name` and` attr.token_kind` are checked against unexposed attributes only. - if attr.kind == Some(kind) || - (kind == CXCursor_UnexposedAttr && - cur.tokens().iter().any(|t| { - t.kind == attr.token_kind && - t.spelling() == attr.name - })) - { - *found_attr = true; - found_count += 1; - - if found_count == N { - return CXChildVisit_Break; - } - } - } - } - - CXChildVisit_Continue - }); - - found_attrs - } - - /// Given that this cursor's referent is a `typedef`, get the `Type` that is - /// being aliased. - pub(crate) fn typedef_type(&self) -> Option { - let inner = Type { - x: unsafe { clang_getTypedefDeclUnderlyingType(self.x) }, - }; - - if inner.is_valid() { - Some(inner) - } else { - None - } - } - - /// Get the linkage kind for this cursor's referent. - /// - /// This only applies to functions and variables. - pub(crate) fn linkage(&self) -> CXLinkageKind { - unsafe { clang_getCursorLinkage(self.x) } - } - - /// Get the visibility of this cursor's referent. - pub(crate) fn visibility(&self) -> CXVisibilityKind { - unsafe { clang_getCursorVisibility(self.x) } - } - - /// Given that this cursor's referent is a function, return cursors to its - /// parameters. - /// - /// Returns None if the cursor's referent is not a function/method call or - /// declaration. - pub(crate) fn args(&self) -> Option> { - // match self.kind() { - // CXCursor_FunctionDecl | - // CXCursor_CXXMethod => { - self.num_args().ok().map(|num| { - (0..num) - .map(|i| Cursor { - x: unsafe { clang_Cursor_getArgument(self.x, i as c_uint) }, - }) - .collect() - }) - } - - /// Given that this cursor's referent is a function/method call or - /// declaration, return the number of arguments it takes. - /// - /// Returns Err if the cursor's referent is not a function/method call or - /// declaration. - pub(crate) fn num_args(&self) -> Result { - unsafe { - let w = clang_Cursor_getNumArguments(self.x); - if w == -1 { - Err(()) - } else { - Ok(w as u32) - } - } - } - - /// Get the access specifier for this cursor's referent. - pub(crate) fn access_specifier(&self) -> CX_CXXAccessSpecifier { - unsafe { clang_getCXXAccessSpecifier(self.x) } - } - - /// Is the cursor's referent publicly accessible in C++? - /// - /// Returns true if `self.access_specifier()` is `CX_CXXPublic` or - /// `CX_CXXInvalidAccessSpecifier`. - pub(crate) fn public_accessible(&self) -> bool { - let access = self.access_specifier(); - access == CX_CXXPublic || access == CX_CXXInvalidAccessSpecifier - } - - /// Is this cursor's referent a field declaration that is marked as - /// `mutable`? - pub(crate) fn is_mutable_field(&self) -> bool { - unsafe { clang_CXXField_isMutable(self.x) != 0 } - } - - /// Get the offset of the field represented by the Cursor. - pub(crate) fn offset_of_field(&self) -> Result { - let offset = unsafe { clang_Cursor_getOffsetOfField(self.x) }; - - if offset < 0 { - Err(LayoutError::from(offset as i32)) - } else { - Ok(offset as usize) - } - } - - /// Is this cursor's referent a member function that is declared `static`? - pub(crate) fn method_is_static(&self) -> bool { - unsafe { clang_CXXMethod_isStatic(self.x) != 0 } - } - - /// Is this cursor's referent a member function that is declared `const`? - pub(crate) fn method_is_const(&self) -> bool { - unsafe { clang_CXXMethod_isConst(self.x) != 0 } - } - - /// Is this cursor's referent a member function that is virtual? - pub(crate) fn method_is_virtual(&self) -> bool { - unsafe { clang_CXXMethod_isVirtual(self.x) != 0 } - } - - /// Is this cursor's referent a member function that is pure virtual? - pub(crate) fn method_is_pure_virtual(&self) -> bool { - unsafe { clang_CXXMethod_isPureVirtual(self.x) != 0 } - } - - /// Is this cursor's referent a struct or class with virtual members? - pub(crate) fn is_virtual_base(&self) -> bool { - unsafe { clang_isVirtualBase(self.x) != 0 } - } - - /// Try to evaluate this cursor. - pub(crate) fn evaluate(&self) -> Option { - EvalResult::new(*self) - } - - /// Return the result type for this cursor - pub(crate) fn ret_type(&self) -> Option { - let rt = Type { - x: unsafe { clang_getCursorResultType(self.x) }, - }; - if rt.is_valid() { - Some(rt) - } else { - None - } - } - - /// Gets the tokens that correspond to that cursor. - pub(crate) fn tokens(&self) -> RawTokens<'_> { - RawTokens::new(self) - } - - /// Gets the tokens that correspond to that cursor as `cexpr` tokens. - pub(crate) fn cexpr_tokens(self) -> Vec { - self.tokens() - .iter() - .filter_map(|token| token.as_cexpr_token()) - .collect() - } - - /// Obtain the real path name of a cursor of `InclusionDirective` kind. - /// - /// Returns None if the cursor does not include a file, otherwise the file's full name - pub(crate) fn get_included_file_name(&self) -> Option { - let file = unsafe { clang_getIncludedFile(self.x) }; - if file.is_null() { - None - } else { - Some(unsafe { cxstring_into_string(clang_getFileName(file)) }) - } - } - - /// Is this cursor's referent a namespace that is inline? - pub(crate) fn is_inline_namespace(&self) -> bool { - unsafe { clang_Cursor_isInlineNamespace(self.x) != 0 } - } -} - -/// A struct that owns the tokenizer result from a given cursor. -pub(crate) struct RawTokens<'a> { - cursor: &'a Cursor, - tu: CXTranslationUnit, - tokens: *mut CXToken, - token_count: c_uint, -} - -impl<'a> RawTokens<'a> { - fn new(cursor: &'a Cursor) -> Self { - let mut tokens = ptr::null_mut(); - let mut token_count = 0; - let range = cursor.extent(); - let tu = unsafe { clang_Cursor_getTranslationUnit(cursor.x) }; - unsafe { clang_tokenize(tu, range, &mut tokens, &mut token_count) }; - Self { - cursor, - tu, - tokens, - token_count, - } - } - - fn as_slice(&self) -> &[CXToken] { - if self.tokens.is_null() { - return &[]; - } - unsafe { slice::from_raw_parts(self.tokens, self.token_count as usize) } - } - - /// Get an iterator over these tokens. - pub(crate) fn iter(&self) -> ClangTokenIterator<'_> { - ClangTokenIterator { - tu: self.tu, - raw: self.as_slice().iter(), - } - } -} - -impl Drop for RawTokens<'_> { - fn drop(&mut self) { - if !self.tokens.is_null() { - unsafe { - clang_disposeTokens( - self.tu, - self.tokens, - self.token_count as c_uint, - ); - } - } - } -} - -/// A raw clang token, that exposes only kind, spelling, and extent. This is a -/// slightly more convenient version of `CXToken` which owns the spelling -/// string and extent. -#[derive(Debug)] -pub(crate) struct ClangToken { - spelling: CXString, - /// The extent of the token. This is the same as the relevant member from - /// `CXToken`. - pub(crate) extent: CXSourceRange, - /// The kind of the token. This is the same as the relevant member from - /// `CXToken`. - pub(crate) kind: CXTokenKind, -} - -impl ClangToken { - /// Get the token spelling, without being converted to utf-8. - pub(crate) fn spelling(&self) -> &[u8] { - let c_str = unsafe { CStr::from_ptr(clang_getCString(self.spelling)) }; - c_str.to_bytes() - } - - /// Converts a `ClangToken` to a `cexpr` token if possible. - pub(crate) fn as_cexpr_token(&self) -> Option { - use cexpr::token; - - let kind = match self.kind { - CXToken_Punctuation => token::Kind::Punctuation, - CXToken_Literal => token::Kind::Literal, - CXToken_Identifier => token::Kind::Identifier, - CXToken_Keyword => token::Kind::Keyword, - // NB: cexpr is not too happy about comments inside - // expressions, so we strip them down here. - CXToken_Comment => return None, - _ => { - warn!("Found unexpected token kind: {self:?}"); - return None; - } - }; - - Some(token::Token { - kind, - raw: self.spelling().to_vec().into_boxed_slice(), - }) - } -} - -impl Drop for ClangToken { - fn drop(&mut self) { - unsafe { clang_disposeString(self.spelling) } - } -} - -/// An iterator over a set of Tokens. -pub(crate) struct ClangTokenIterator<'a> { - tu: CXTranslationUnit, - raw: slice::Iter<'a, CXToken>, -} - -impl Iterator for ClangTokenIterator<'_> { - type Item = ClangToken; - - fn next(&mut self) -> Option { - let raw = self.raw.next()?; - unsafe { - let kind = clang_getTokenKind(*raw); - let spelling = clang_getTokenSpelling(self.tu, *raw); - let extent = clang_getTokenExtent(self.tu, *raw); - Some(ClangToken { - spelling, - extent, - kind, - }) - } - } -} - -/// Checks whether the name looks like an identifier, i.e. is alphanumeric -/// (including '_') and does not start with a digit. -pub(crate) fn is_valid_identifier(name: &str) -> bool { - let mut chars = name.chars(); - let first_valid = - chars.next().is_some_and(|c| c.is_alphabetic() || c == '_'); - - first_valid && chars.all(|c| c.is_alphanumeric() || c == '_') -} - -extern "C" fn visit_children( - cur: CXCursor, - _parent: CXCursor, - data: CXClientData, -) -> CXChildVisitResult -where - Visitor: FnMut(Cursor) -> CXChildVisitResult, -{ - let func: &mut Visitor = unsafe { &mut *data.cast::() }; - let child = Cursor { x: cur }; - - (*func)(child) -} - -impl PartialEq for Cursor { - fn eq(&self, other: &Cursor) -> bool { - unsafe { clang_equalCursors(self.x, other.x) == 1 } - } -} - -impl Eq for Cursor {} - -impl Hash for Cursor { - fn hash(&self, state: &mut H) { - unsafe { clang_hashCursor(self.x) }.hash(state); - } -} - -/// The type of a node in clang's AST. -#[derive(Clone, Copy)] -pub(crate) struct Type { - x: CXType, -} - -impl PartialEq for Type { - fn eq(&self, other: &Self) -> bool { - unsafe { clang_equalTypes(self.x, other.x) != 0 } - } -} - -impl Eq for Type {} - -impl fmt::Debug for Type { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!( - fmt, - "Type({}, kind: {}, cconv: {}, decl: {:?}, canon: {:?})", - self.spelling(), - type_to_str(self.kind()), - self.call_conv(), - self.declaration(), - self.declaration().canonical() - ) - } -} - -/// An error about the layout of a struct, class, or type. -#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] -pub(crate) enum LayoutError { - /// Asked for the layout of an invalid type. - Invalid, - /// Asked for the layout of an incomplete type. - Incomplete, - /// Asked for the layout of a dependent type. - Dependent, - /// Asked for the layout of a type that does not have constant size. - NotConstantSize, - /// Asked for the layout of a field in a type that does not have such a - /// field. - InvalidFieldName, - /// An unknown layout error. - Unknown, -} - -impl ::std::convert::From for LayoutError { - fn from(val: i32) -> Self { - use self::LayoutError::*; - - match val { - CXTypeLayoutError_Invalid => Invalid, - CXTypeLayoutError_Incomplete => Incomplete, - CXTypeLayoutError_Dependent => Dependent, - CXTypeLayoutError_NotConstantSize => NotConstantSize, - CXTypeLayoutError_InvalidFieldName => InvalidFieldName, - _ => Unknown, - } - } -} - -impl Type { - /// Get this type's kind. - pub(crate) fn kind(&self) -> CXTypeKind { - self.x.kind - } - - /// Get a cursor pointing to this type's declaration. - pub(crate) fn declaration(&self) -> Cursor { - let decl = Cursor { - x: unsafe { clang_getTypeDeclaration(self.x) }, - }; - // Prior to clang 22, the declaration pointed to the definition. - decl.definition().unwrap_or(decl) - } - - /// Get the canonical declaration of this type, if it is available. - pub(crate) fn canonical_declaration( - &self, - location: Option<&Cursor>, - ) -> Option { - let mut declaration = self.declaration(); - if !declaration.is_valid() { - if let Some(location) = location { - let mut location = *location; - if let Some(referenced) = location.referenced() { - location = referenced; - } - if location.is_template_like() { - declaration = location; - } - } - } - - let canonical = declaration.canonical(); - if canonical.is_valid() && canonical.kind() != CXCursor_NoDeclFound { - Some(CanonicalTypeDeclaration(*self, canonical)) - } else { - None - } - } - - /// Get a raw display name for this type. - pub(crate) fn spelling(&self) -> String { - let s = unsafe { cxstring_into_string(clang_getTypeSpelling(self.x)) }; - // Clang 5.0 introduced changes in the spelling API so it returned the - // full qualified name. Let's undo that here. - if s.split("::").all(is_valid_identifier) { - if let Some(s) = s.split("::").last() { - return s.to_owned(); - } - } - - s - } - - /// Is this type const qualified? - pub(crate) fn is_const(&self) -> bool { - unsafe { clang_isConstQualifiedType(self.x) != 0 } - } - - #[inline] - fn is_non_deductible_auto_type(&self) -> bool { - debug_assert_eq!(self.kind(), CXType_Auto); - self.canonical_type() == *self - } - - #[inline] - fn clang_size_of(&self, ctx: &BindgenContext) -> c_longlong { - match self.kind() { - // Work-around https://bugs.llvm.org/show_bug.cgi?id=40975 - CXType_RValueReference | CXType_LValueReference => { - ctx.target_pointer_size() as c_longlong - } - // Work-around https://bugs.llvm.org/show_bug.cgi?id=40813 - CXType_Auto if self.is_non_deductible_auto_type() => -6, - _ => unsafe { clang_Type_getSizeOf(self.x) }, - } - } - - #[inline] - fn clang_align_of(&self, ctx: &BindgenContext) -> c_longlong { - match self.kind() { - // Work-around https://bugs.llvm.org/show_bug.cgi?id=40975 - CXType_RValueReference | CXType_LValueReference => { - ctx.target_pointer_size() as c_longlong - } - // Work-around https://bugs.llvm.org/show_bug.cgi?id=40813 - CXType_Auto if self.is_non_deductible_auto_type() => -6, - _ => unsafe { clang_Type_getAlignOf(self.x) }, - } - } - - /// What is the size of this type? Paper over invalid types by returning `0` - /// for them. - pub(crate) fn size(&self, ctx: &BindgenContext) -> usize { - let val = self.clang_size_of(ctx); - if val < 0 { - 0 - } else { - val as usize - } - } - - /// What is the size of this type? - pub(crate) fn fallible_size( - &self, - ctx: &BindgenContext, - ) -> Result { - let val = self.clang_size_of(ctx); - if val < 0 { - Err(LayoutError::from(val as i32)) - } else { - Ok(val as usize) - } - } - - /// What is the alignment of this type? Paper over invalid types by - /// returning `0`. - pub(crate) fn align(&self, ctx: &BindgenContext) -> usize { - let val = self.clang_align_of(ctx); - if val < 0 { - 0 - } else { - val as usize - } - } - - /// What is the alignment of this type? - pub(crate) fn fallible_align( - &self, - ctx: &BindgenContext, - ) -> Result { - let val = self.clang_align_of(ctx); - if val < 0 { - Err(LayoutError::from(val as i32)) - } else { - Ok(val as usize) - } - } - - /// Get the layout for this type, or an error describing why it does not - /// have a valid layout. - pub(crate) fn fallible_layout( - &self, - ctx: &BindgenContext, - ) -> Result { - use crate::ir::layout::Layout; - let size = self.fallible_size(ctx)?; - let align = self.fallible_align(ctx)?; - Ok(Layout::new(size, align)) - } - - /// Get the number of template arguments this type has, or `None` if it is - /// not some kind of template. - pub(crate) fn num_template_args(&self) -> Option { - let n = unsafe { clang_Type_getNumTemplateArguments(self.x) }; - if n >= 0 { - Some(n as u32) - } else { - debug_assert_eq!(n, -1); - None - } - } - - /// If this type is a class template specialization, return its - /// template arguments. Otherwise, return None. - pub(crate) fn template_args(&self) -> Option { - self.num_template_args().map(|n| TypeTemplateArgIterator { - x: self.x, - length: n, - index: 0, - }) - } - - /// Given that this type is a function prototype, return the types of its parameters. - /// - /// Returns None if the type is not a function prototype. - pub(crate) fn args(&self) -> Option> { - self.num_args().ok().map(|num| { - (0..num) - .map(|i| Type { - x: unsafe { clang_getArgType(self.x, i as c_uint) }, - }) - .collect() - }) - } - - /// Given that this type is a function prototype, return the number of arguments it takes. - /// - /// Returns Err if the type is not a function prototype. - pub(crate) fn num_args(&self) -> Result { - unsafe { - let w = clang_getNumArgTypes(self.x); - if w == -1 { - Err(()) - } else { - Ok(w as u32) - } - } - } - - /// Given that this type is a pointer type, return the type that it points - /// to. - pub(crate) fn pointee_type(&self) -> Option { - match self.kind() { - CXType_Pointer | - CXType_RValueReference | - CXType_LValueReference | - CXType_MemberPointer | - CXType_BlockPointer | - CXType_ObjCObjectPointer => { - let ret = Type { - x: unsafe { clang_getPointeeType(self.x) }, - }; - debug_assert!(ret.is_valid()); - Some(ret) - } - _ => None, - } - } - - /// Given that this type is an array, vector, or complex type, return the - /// type of its elements. - pub(crate) fn elem_type(&self) -> Option { - let current_type = Type { - x: unsafe { clang_getElementType(self.x) }, - }; - if current_type.is_valid() { - Some(current_type) - } else { - None - } - } - - /// Given that this type is an array or vector type, return its number of - /// elements. - pub(crate) fn num_elements(&self) -> Option { - let num_elements_returned = unsafe { clang_getNumElements(self.x) }; - if num_elements_returned == -1 { - None - } else { - Some(num_elements_returned as usize) - } - } - - /// Get the canonical version of this type. This sees through `typedef`s and - /// aliases to get the underlying, canonical type. - pub(crate) fn canonical_type(&self) -> Type { - unsafe { - Type { - x: clang_getCanonicalType(self.x), - } - } - } - - /// Is this type a variadic function type? - pub(crate) fn is_variadic(&self) -> bool { - unsafe { clang_isFunctionTypeVariadic(self.x) != 0 } - } - - /// Given that this type is a function type, get the type of its return - /// value. - pub(crate) fn ret_type(&self) -> Option { - let rt = Type { - x: unsafe { clang_getResultType(self.x) }, - }; - if rt.is_valid() { - Some(rt) - } else { - None - } - } - - /// Given that this type is a function type, get its calling convention. If - /// this is not a function type, `CXCallingConv_Invalid` is returned. - pub(crate) fn call_conv(&self) -> CXCallingConv { - unsafe { clang_getFunctionTypeCallingConv(self.x) } - } - - /// For elaborated types (types which use `class`, `struct`, or `union` to - /// disambiguate types from local bindings), get the underlying type. - pub(crate) fn named(&self) -> Type { - unsafe { - Type { - x: clang_Type_getNamedType(self.x), - } - } - } - - /// For atomic types, get the underlying type. - pub(crate) fn atomic_value_type(&self) -> Type { - unsafe { - Type { - x: clang_Type_getValueType(self.x), - } - } - } - - /// Is this a valid type? - pub(crate) fn is_valid(&self) -> bool { - self.kind() != CXType_Invalid - } - - /// Is this a valid and exposed type? - pub(crate) fn is_valid_and_exposed(&self) -> bool { - self.is_valid() && self.kind() != CXType_Unexposed - } - - /// Is this type a fully instantiated template? - pub(crate) fn is_fully_instantiated_template(&self) -> bool { - // Yep, the spelling of this containing type-parameter is extremely - // nasty... But can happen in . Unfortunately I couldn't - // reduce it enough :( - self.template_args().is_some_and(|args| args.len() > 0) && - !matches!( - self.declaration().kind(), - CXCursor_ClassTemplatePartialSpecialization | - CXCursor_TypeAliasTemplateDecl | - CXCursor_TemplateTemplateParameter - ) - } - - /// Is this type an associated template type? Eg `T::Associated` in - /// this example: - /// - /// ```c++ - /// template - /// class Foo { - /// typename T::Associated member; - /// }; - /// ``` - pub(crate) fn is_associated_type(&self) -> bool { - // This is terrible :( - fn hacky_parse_associated_type>(spelling: S) -> bool { - static ASSOC_TYPE_RE: OnceLock = OnceLock::new(); - ASSOC_TYPE_RE - .get_or_init(|| { - regex::Regex::new(r"typename type\-parameter\-\d+\-\d+::.+") - .unwrap() - }) - .is_match(spelling.as_ref()) - } - - self.kind() == CXType_Unexposed && - (hacky_parse_associated_type(self.spelling()) || - hacky_parse_associated_type( - self.canonical_type().spelling(), - )) - } -} - -/// The `CanonicalTypeDeclaration` type exists as proof-by-construction that its -/// cursor is the canonical declaration for its type. If you have a -/// `CanonicalTypeDeclaration` instance, you know for sure that the type and -/// cursor match up in a canonical declaration relationship, and it simply -/// cannot be otherwise. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub(crate) struct CanonicalTypeDeclaration(Type, Cursor); - -impl CanonicalTypeDeclaration { - /// Get the type. - pub(crate) fn ty(&self) -> &Type { - &self.0 - } - - /// Get the type's canonical declaration cursor. - pub(crate) fn cursor(&self) -> &Cursor { - &self.1 - } -} - -/// An iterator for a type's template arguments. -pub(crate) struct TypeTemplateArgIterator { - x: CXType, - length: u32, - index: u32, -} - -impl Iterator for TypeTemplateArgIterator { - type Item = Type; - fn next(&mut self) -> Option { - if self.index < self.length { - let idx = self.index as c_uint; - self.index += 1; - Some(Type { - x: unsafe { clang_Type_getTemplateArgumentAsType(self.x, idx) }, - }) - } else { - None - } - } -} - -impl ExactSizeIterator for TypeTemplateArgIterator { - fn len(&self) -> usize { - assert!(self.index <= self.length); - (self.length - self.index) as usize - } -} - -/// A `SourceLocation` is a file, line, column, and byte offset location for -/// some source text. -pub(crate) struct SourceLocation { - x: CXSourceLocation, -} - -impl SourceLocation { - /// Get the (file, line, column, byte offset) tuple for this source - /// location. - pub(crate) fn location(&self) -> (File, usize, usize, usize) { - unsafe { - let mut file = mem::zeroed(); - let mut line = 0; - let mut col = 0; - let mut off = 0; - clang_getFileLocation( - self.x, &mut file, &mut line, &mut col, &mut off, - ); - (File { x: file }, line as usize, col as usize, off as usize) - } - } -} - -impl fmt::Display for SourceLocation { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let (file, line, col, _) = self.location(); - if let Some(name) = file.name() { - write!(f, "{name}:{line}:{col}") - } else { - "builtin definitions".fmt(f) - } - } -} - -impl fmt::Debug for SourceLocation { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{self}") - } -} - -/// A comment in the source text. -/// -/// Comments are sort of parsed by Clang, and have a tree structure. -pub(crate) struct Comment { - x: CXComment, -} - -impl Comment { - /// What kind of comment is this? - pub(crate) fn kind(&self) -> CXCommentKind { - unsafe { clang_Comment_getKind(self.x) } - } - - /// Get this comment's children comment - pub(crate) fn get_children(&self) -> CommentChildrenIterator { - CommentChildrenIterator { - parent: self.x, - length: unsafe { clang_Comment_getNumChildren(self.x) }, - index: 0, - } - } - - /// Given that this comment is the start or end of an HTML tag, get its tag - /// name. - pub(crate) fn get_tag_name(&self) -> String { - unsafe { cxstring_into_string(clang_HTMLTagComment_getTagName(self.x)) } - } - - /// Given that this comment is an HTML start tag, get its attributes. - pub(crate) fn get_tag_attrs(&self) -> CommentAttributesIterator { - CommentAttributesIterator { - x: self.x, - length: unsafe { clang_HTMLStartTag_getNumAttrs(self.x) }, - index: 0, - } - } -} - -/// An iterator for a comment's children -pub(crate) struct CommentChildrenIterator { - parent: CXComment, - length: c_uint, - index: c_uint, -} - -impl Iterator for CommentChildrenIterator { - type Item = Comment; - fn next(&mut self) -> Option { - if self.index < self.length { - let idx = self.index; - self.index += 1; - Some(Comment { - x: unsafe { clang_Comment_getChild(self.parent, idx) }, - }) - } else { - None - } - } -} - -/// An HTML start tag comment attribute -pub(crate) struct CommentAttribute { - /// HTML start tag attribute name - pub(crate) name: String, - /// HTML start tag attribute value - pub(crate) value: String, -} - -/// An iterator for a comment's attributes -pub(crate) struct CommentAttributesIterator { - x: CXComment, - length: c_uint, - index: c_uint, -} - -impl Iterator for CommentAttributesIterator { - type Item = CommentAttribute; - fn next(&mut self) -> Option { - if self.index < self.length { - let idx = self.index; - self.index += 1; - Some(CommentAttribute { - name: unsafe { - cxstring_into_string(clang_HTMLStartTag_getAttrName( - self.x, idx, - )) - }, - value: unsafe { - cxstring_into_string(clang_HTMLStartTag_getAttrValue( - self.x, idx, - )) - }, - }) - } else { - None - } - } -} - -/// A source file. -pub(crate) struct File { - x: CXFile, -} - -impl File { - /// Get the name of this source file. - pub(crate) fn name(&self) -> Option { - if self.x.is_null() { - return None; - } - Some(unsafe { cxstring_into_string(clang_getFileName(self.x)) }) - } -} - -fn cxstring_to_string_leaky(s: CXString) -> String { - if s.data.is_null() { - return String::new(); - } - let c_str = unsafe { CStr::from_ptr(clang_getCString(s)) }; - c_str.to_string_lossy().into_owned() -} - -fn cxstring_into_string(s: CXString) -> String { - let ret = cxstring_to_string_leaky(s); - unsafe { clang_disposeString(s) }; - ret -} - -/// An `Index` is an environment for a set of translation units that will -/// typically end up linked together in one final binary. -pub(crate) struct Index { - x: CXIndex, -} - -impl Index { - /// Construct a new `Index`. - /// - /// The `pch` parameter controls whether declarations in pre-compiled - /// headers are included when enumerating a translation unit's "locals". - /// - /// The `diag` parameter controls whether debugging diagnostics are enabled. - pub(crate) fn new(pch: bool, diag: bool) -> Index { - unsafe { - Index { - x: clang_createIndex(c_int::from(pch), c_int::from(diag)), - } - } - } -} - -impl fmt::Debug for Index { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "Index {{ }}") - } -} - -impl Drop for Index { - fn drop(&mut self) { - unsafe { - clang_disposeIndex(self.x); - } - } -} - -/// A translation unit (or "compilation unit"). -pub(crate) struct TranslationUnit { - x: CXTranslationUnit, -} - -impl fmt::Debug for TranslationUnit { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "TranslationUnit {{ }}") - } -} - -impl TranslationUnit { - /// Parse a source file into a translation unit. - pub(crate) fn parse( - ix: &Index, - file: &str, - cmd_args: &[Box], - unsaved: &[UnsavedFile], - opts: CXTranslationUnit_Flags, - ) -> Option { - let fname = CString::new(file).unwrap(); - let _c_args: Vec = cmd_args - .iter() - .map(|s| CString::new(s.as_bytes()).unwrap()) - .collect(); - let c_args: Vec<*const c_char> = - _c_args.iter().map(|s| s.as_ptr()).collect(); - let mut c_unsaved: Vec = - unsaved.iter().map(|f| f.x).collect(); - let tu = unsafe { - clang_parseTranslationUnit( - ix.x, - fname.as_ptr(), - c_args.as_ptr(), - c_args.len() as c_int, - c_unsaved.as_mut_ptr(), - c_unsaved.len() as c_uint, - opts, - ) - }; - if tu.is_null() { - None - } else { - Some(TranslationUnit { x: tu }) - } - } - - /// Get the Clang diagnostic information associated with this translation - /// unit. - pub(crate) fn diags(&self) -> Vec { - unsafe { - let num = clang_getNumDiagnostics(self.x) as usize; - let mut diags = vec![]; - for i in 0..num { - diags.push(Diagnostic { - x: clang_getDiagnostic(self.x, i as c_uint), - }); - } - diags - } - } - - /// Get a cursor pointing to the root of this translation unit's AST. - pub(crate) fn cursor(&self) -> Cursor { - unsafe { - Cursor { - x: clang_getTranslationUnitCursor(self.x), - } - } - } - - /// Save a translation unit to the given file. - pub(crate) fn save(&mut self, file: &str) -> Result<(), CXSaveError> { - let Ok(file) = CString::new(file) else { - return Err(CXSaveError_Unknown); - }; - let ret = unsafe { - clang_saveTranslationUnit( - self.x, - file.as_ptr(), - clang_defaultSaveOptions(self.x), - ) - }; - if ret != 0 { - Err(ret) - } else { - Ok(()) - } - } - - /// Is this the null translation unit? - pub(crate) fn is_null(&self) -> bool { - self.x.is_null() - } -} - -impl Drop for TranslationUnit { - fn drop(&mut self) { - unsafe { - clang_disposeTranslationUnit(self.x); - } - } -} - -/// Translation unit used for macro fallback parsing -pub(crate) struct FallbackTranslationUnit { - file_path: String, - pch_path: String, - idx: Box, - tu: TranslationUnit, -} - -impl fmt::Debug for FallbackTranslationUnit { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "FallbackTranslationUnit {{ }}") - } -} - -impl FallbackTranslationUnit { - /// Create a new fallback translation unit - pub(crate) fn new( - file: String, - pch_path: String, - c_args: &[Box], - ) -> Option { - // Create empty file - OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(&file) - .ok()?; - - let f_index = Box::new(Index::new(true, false)); - let f_translation_unit = TranslationUnit::parse( - &f_index, - &file, - c_args, - &[], - CXTranslationUnit_None, - )?; - Some(FallbackTranslationUnit { - file_path: file, - pch_path, - tu: f_translation_unit, - idx: f_index, - }) - } - - /// Get reference to underlying translation unit. - pub(crate) fn translation_unit(&self) -> &TranslationUnit { - &self.tu - } - - /// Reparse a translation unit. - pub(crate) fn reparse( - &mut self, - unsaved_contents: &str, - ) -> Result<(), CXErrorCode> { - let unsaved = &[UnsavedFile::new(&self.file_path, unsaved_contents)]; - let mut c_unsaved: Vec = - unsaved.iter().map(|f| f.x).collect(); - let ret = unsafe { - clang_reparseTranslationUnit( - self.tu.x, - unsaved.len() as c_uint, - c_unsaved.as_mut_ptr(), - clang_defaultReparseOptions(self.tu.x), - ) - }; - if ret != 0 { - Err(ret) - } else { - Ok(()) - } - } -} - -impl Drop for FallbackTranslationUnit { - fn drop(&mut self) { - let _ = std::fs::remove_file(&self.file_path); - let _ = std::fs::remove_file(&self.pch_path); - } -} - -/// A diagnostic message generated while parsing a translation unit. -pub(crate) struct Diagnostic { - x: CXDiagnostic, -} - -impl Diagnostic { - /// Format this diagnostic message as a string, using the given option bit - /// flags. - pub(crate) fn format(&self) -> String { - unsafe { - let opts = clang_defaultDiagnosticDisplayOptions(); - cxstring_into_string(clang_formatDiagnostic(self.x, opts)) - } - } - - /// What is the severity of this diagnostic message? - pub(crate) fn severity(&self) -> CXDiagnosticSeverity { - unsafe { clang_getDiagnosticSeverity(self.x) } - } -} - -impl Drop for Diagnostic { - /// Destroy this diagnostic message. - fn drop(&mut self) { - unsafe { - clang_disposeDiagnostic(self.x); - } - } -} - -/// A file which has not been saved to disk. -pub(crate) struct UnsavedFile { - x: CXUnsavedFile, - /// The name of the unsaved file. Kept here to avoid leaving dangling pointers in - /// `CXUnsavedFile`. - pub(crate) name: CString, - contents: CString, -} - -impl UnsavedFile { - /// Construct a new unsaved file with the given `name` and `contents`. - pub(crate) fn new(name: &str, contents: &str) -> UnsavedFile { - let name = CString::new(name.as_bytes()).unwrap(); - let contents = CString::new(contents.as_bytes()).unwrap(); - let x = CXUnsavedFile { - Filename: name.as_ptr(), - Contents: contents.as_ptr(), - Length: contents.as_bytes().len() as c_ulong, - }; - UnsavedFile { x, name, contents } - } -} - -impl fmt::Debug for UnsavedFile { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!( - fmt, - "UnsavedFile(name: {:?}, contents: {:?})", - self.name, self.contents - ) - } -} - -/// Convert a cursor kind into a static string. -pub(crate) fn kind_to_str(x: CXCursorKind) -> String { - unsafe { cxstring_into_string(clang_getCursorKindSpelling(x)) } -} - -/// Convert a type kind to a static string. -pub(crate) fn type_to_str(x: CXTypeKind) -> String { - unsafe { cxstring_into_string(clang_getTypeKindSpelling(x)) } -} - -/// Dump the Clang AST to stdout for debugging purposes. -pub(crate) fn ast_dump(c: &Cursor, depth: isize) -> CXChildVisitResult { - fn print_indent>(depth: isize, s: S) { - for _ in 0..depth { - print!(" "); - } - println!("{}", s.as_ref()); - } - - fn print_cursor>(depth: isize, prefix: S, c: &Cursor) { - let prefix = prefix.as_ref(); - print_indent( - depth, - format!(" {prefix}kind = {}", kind_to_str(c.kind())), - ); - print_indent( - depth, - format!(" {prefix}spelling = \"{}\"", c.spelling()), - ); - print_indent(depth, format!(" {prefix}location = {}", c.location())); - print_indent( - depth, - format!(" {prefix}is-definition? {}", c.is_definition()), - ); - print_indent( - depth, - format!(" {prefix}is-declaration? {}", c.is_declaration()), - ); - print_indent( - depth, - format!( - " {prefix}is-inlined-function? {}", - c.is_inlined_function() - ), - ); - - let templ_kind = c.template_kind(); - if templ_kind != CXCursor_NoDeclFound { - print_indent( - depth, - format!(" {prefix}template-kind = {}", kind_to_str(templ_kind)), - ); - } - if let Some(usr) = c.usr() { - print_indent(depth, format!(" {prefix}usr = \"{usr}\"")); - } - if let Ok(num) = c.num_args() { - print_indent(depth, format!(" {prefix}number-of-args = {num}")); - } - if let Some(num) = c.num_template_args() { - print_indent( - depth, - format!(" {prefix}number-of-template-args = {num}"), - ); - } - - if c.is_bit_field() { - let width = match c.bit_width() { - Some(w) => w.to_string(), - None => "".to_string(), - }; - print_indent(depth, format!(" {prefix}bit-width = {width}")); - } - - if let Some(ty) = c.enum_type() { - print_indent( - depth, - format!(" {prefix}enum-type = {}", type_to_str(ty.kind())), - ); - } - if let Some(val) = c.enum_val_signed() { - print_indent(depth, format!(" {prefix}enum-val = {val}")); - } - if let Some(ty) = c.typedef_type() { - print_indent( - depth, - format!(" {prefix}typedef-type = {}", type_to_str(ty.kind())), - ); - } - if let Some(ty) = c.ret_type() { - print_indent( - depth, - format!(" {prefix}ret-type = {}", type_to_str(ty.kind())), - ); - } - - if let Some(refd) = c.referenced() { - if refd != *c { - println!(); - print_cursor( - depth, - String::from(prefix) + "referenced.", - &refd, - ); - } - } - - let canonical = c.canonical(); - if canonical != *c { - println!(); - print_cursor( - depth, - String::from(prefix) + "canonical.", - &canonical, - ); - } - - if let Some(specialized) = c.specialized() { - if specialized != *c { - println!(); - print_cursor( - depth, - String::from(prefix) + "specialized.", - &specialized, - ); - } - } - - if let Some(parent) = c.fallible_semantic_parent() { - println!(); - print_cursor( - depth, - String::from(prefix) + "semantic-parent.", - &parent, - ); - } - } - - fn print_type>(depth: isize, prefix: S, ty: &Type) { - let prefix = prefix.as_ref(); - - let kind = ty.kind(); - print_indent(depth, format!(" {prefix}kind = {}", type_to_str(kind))); - if kind == CXType_Invalid { - return; - } - - print_indent(depth, format!(" {prefix}cconv = {}", ty.call_conv())); - - print_indent( - depth, - format!(" {prefix}spelling = \"{}\"", ty.spelling()), - ); - let num_template_args = - unsafe { clang_Type_getNumTemplateArguments(ty.x) }; - if num_template_args >= 0 { - print_indent( - depth, - format!( - " {prefix}number-of-template-args = {num_template_args}" - ), - ); - } - if let Some(num) = ty.num_elements() { - print_indent(depth, format!(" {prefix}number-of-elements = {num}")); - } - print_indent( - depth, - format!(" {prefix}is-variadic? {}", ty.is_variadic()), - ); - - let canonical = ty.canonical_type(); - if canonical != *ty { - println!(); - print_type(depth, String::from(prefix) + "canonical.", &canonical); - } - - if let Some(pointee) = ty.pointee_type() { - if pointee != *ty { - println!(); - print_type(depth, String::from(prefix) + "pointee.", &pointee); - } - } - - if let Some(elem) = ty.elem_type() { - if elem != *ty { - println!(); - print_type(depth, String::from(prefix) + "elements.", &elem); - } - } - - if let Some(ret) = ty.ret_type() { - if ret != *ty { - println!(); - print_type(depth, String::from(prefix) + "return.", &ret); - } - } - - let named = ty.named(); - if named != *ty && named.is_valid() { - println!(); - print_type(depth, String::from(prefix) + "named.", &named); - } - } - - print_indent(depth, "("); - print_cursor(depth, "", c); - - println!(); - let ty = c.cur_type(); - print_type(depth, "type.", &ty); - - let declaration = ty.declaration(); - if declaration != *c && declaration.kind() != CXCursor_NoDeclFound { - println!(); - print_cursor(depth, "type.declaration.", &declaration); - } - - // Recurse. - let mut found_children = false; - c.visit(|s| { - if !found_children { - println!(); - found_children = true; - } - ast_dump(&s, depth + 1) - }); - - print_indent(depth, ")"); - - CXChildVisit_Continue -} - -/// Try to extract the clang version to a string -pub(crate) fn extract_clang_version() -> String { - unsafe { cxstring_into_string(clang_getClangVersion()) } -} - -/// A wrapper for the result of evaluating an expression. -#[derive(Debug)] -pub(crate) struct EvalResult { - x: CXEvalResult, - ty: Type, -} - -impl EvalResult { - /// Evaluate `cursor` and return the result. - pub(crate) fn new(cursor: Cursor) -> Option { - // Work around https://bugs.llvm.org/show_bug.cgi?id=42532, see: - // * https://github.com/rust-lang/rust-bindgen/issues/283 - // * https://github.com/rust-lang/rust-bindgen/issues/1590 - { - let mut found_cant_eval = false; - cursor.visit(|c| { - if c.kind() == CXCursor_TypeRef && - c.cur_type().canonical_type().kind() == CXType_Unexposed - { - found_cant_eval = true; - return CXChildVisit_Break; - } - - CXChildVisit_Recurse - }); - - if found_cant_eval { - return None; - } - } - Some(EvalResult { - x: unsafe { clang_Cursor_Evaluate(cursor.x) }, - ty: cursor.cur_type().canonical_type(), - }) - } - - fn kind(&self) -> CXEvalResultKind { - unsafe { clang_EvalResult_getKind(self.x) } - } - - /// Try to get back the result as a double. - pub(crate) fn as_double(&self) -> Option { - match self.kind() { - CXEval_Float => { - Some(unsafe { clang_EvalResult_getAsDouble(self.x) }) - } - _ => None, - } - } - - /// Try to get back the result as an integer. - pub(crate) fn as_int(&self) -> Option { - if self.kind() != CXEval_Int { - return None; - } - - if unsafe { clang_EvalResult_isUnsignedInt(self.x) } != 0 { - let value = unsafe { clang_EvalResult_getAsUnsigned(self.x) }; - if value > i64::MAX as c_ulonglong { - return None; - } - - return Some(value as i64); - } - - let value = unsafe { clang_EvalResult_getAsLongLong(self.x) }; - if value > i64::MAX as c_longlong { - return None; - } - if value < i64::MIN as c_longlong { - return None; - } - #[allow(clippy::unnecessary_cast)] - Some(value as i64) - } - - /// Evaluates the expression as a literal string, that may or may not be - /// valid utf-8. - pub(crate) fn as_literal_string(&self) -> Option> { - if self.kind() != CXEval_StrLiteral { - return None; - } - - let char_ty = self.ty.pointee_type().or_else(|| self.ty.elem_type())?; - match char_ty.kind() { - CXType_Char_S | CXType_SChar | CXType_Char_U | CXType_UChar => { - let ret = unsafe { - CStr::from_ptr(clang_EvalResult_getAsStr(self.x)) - }; - Some(ret.to_bytes().to_vec()) - } - // FIXME: Support generating these. - CXType_Char16 => None, - CXType_Char32 => None, - CXType_WChar => None, - _ => None, - } - } -} - -impl Drop for EvalResult { - fn drop(&mut self) { - unsafe { clang_EvalResult_dispose(self.x) }; - } -} -/// ABI kinds as defined in -/// -#[derive(Debug, Eq, PartialEq, Copy, Clone)] -pub(crate) enum ABIKind { - /// All the regular targets like Linux, Mac, WASM, etc. implement the Itanium ABI - GenericItanium, - /// The ABI used when compiling for the MSVC target - Microsoft, -} - -/// Target information obtained from libclang. -#[derive(Debug)] -pub(crate) struct TargetInfo { - /// The target triple. - pub(crate) triple: String, - /// The width of the pointer _in bits_. - pub(crate) pointer_width: usize, - /// The ABI of the target - pub(crate) abi: ABIKind, -} - -impl TargetInfo { - /// Tries to obtain target information from libclang. - pub(crate) fn new(tu: &TranslationUnit) -> Self { - let triple; - let pointer_width; - unsafe { - let ti = clang_getTranslationUnitTargetInfo(tu.x); - triple = cxstring_into_string(clang_TargetInfo_getTriple(ti)); - pointer_width = clang_TargetInfo_getPointerWidth(ti); - clang_TargetInfo_dispose(ti); - } - assert!(pointer_width > 0); - assert_eq!(pointer_width % 8, 0); - - let abi = if triple.contains("msvc") { - ABIKind::Microsoft - } else { - ABIKind::GenericItanium - }; - - TargetInfo { - triple, - pointer_width: pointer_width as usize, - abi, - } - } -} diff --git a/vendor/bindgen/codegen/bitfield_unit.rs b/vendor/bindgen/codegen/bitfield_unit.rs deleted file mode 100644 index 59c66f8cb733b4..00000000000000 --- a/vendor/bindgen/codegen/bitfield_unit.rs +++ /dev/null @@ -1,112 +0,0 @@ -#[repr(C)] -#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct __BindgenBitfieldUnit { - storage: Storage, -} - -impl __BindgenBitfieldUnit { - #[inline] - pub const fn new(storage: Storage) -> Self { - Self { storage } - } -} - -impl __BindgenBitfieldUnit -where - Storage: AsRef<[u8]> + AsMut<[u8]>, -{ - #[inline] - fn extract_bit(byte: u8, index: usize) -> bool { - let bit_index = if cfg!(target_endian = "big") { - 7 - (index % 8) - } else { - index % 8 - }; - - let mask = 1 << bit_index; - - byte & mask == mask - } - - #[inline] - pub fn get_bit(&self, index: usize) -> bool { - debug_assert!(index / 8 < self.storage.as_ref().len()); - - let byte_index = index / 8; - let byte = self.storage.as_ref()[byte_index]; - - Self::extract_bit(byte, index) - } - - #[inline] - fn change_bit(byte: u8, index: usize, val: bool) -> u8 { - let bit_index = if cfg!(target_endian = "big") { - 7 - (index % 8) - } else { - index % 8 - }; - - let mask = 1 << bit_index; - if val { - byte | mask - } else { - byte & !mask - } - } - - #[inline] - pub fn set_bit(&mut self, index: usize, val: bool) { - debug_assert!(index / 8 < self.storage.as_ref().len()); - - let byte_index = index / 8; - let byte = &mut self.storage.as_mut()[byte_index]; - - *byte = Self::change_bit(*byte, index, val); - } - - #[inline] - pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { - debug_assert!(bit_width <= 64); - debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); - debug_assert!( - (bit_offset + (bit_width as usize)) / 8 <= - self.storage.as_ref().len() - ); - - let mut val = 0; - - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; - } - } - - val - } - - #[inline] - pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { - debug_assert!(bit_width <= 64); - debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); - debug_assert!( - (bit_offset + (bit_width as usize)) / 8 <= - self.storage.as_ref().len() - ); - - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); - } - } -} diff --git a/vendor/bindgen/codegen/bitfield_unit_raw_ref_macros.rs b/vendor/bindgen/codegen/bitfield_unit_raw_ref_macros.rs deleted file mode 100644 index 0c864c7369ebe5..00000000000000 --- a/vendor/bindgen/codegen/bitfield_unit_raw_ref_macros.rs +++ /dev/null @@ -1,191 +0,0 @@ -#[repr(C)] -#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct __BindgenBitfieldUnit { - storage: Storage, -} - -impl __BindgenBitfieldUnit { - #[inline] - pub const fn new(storage: Storage) -> Self { - Self { storage } - } -} - -impl __BindgenBitfieldUnit -where - Storage: AsRef<[u8]> + AsMut<[u8]>, -{ - #[inline] - fn extract_bit(byte: u8, index: usize) -> bool { - let bit_index = if cfg!(target_endian = "big") { - 7 - (index % 8) - } else { - index % 8 - }; - - let mask = 1 << bit_index; - - byte & mask == mask - } - - #[inline] - pub fn get_bit(&self, index: usize) -> bool { - debug_assert!(index / 8 < self.storage.as_ref().len()); - - let byte_index = index / 8; - let byte = self.storage.as_ref()[byte_index]; - - Self::extract_bit(byte, index) - } - - #[inline] - pub unsafe fn raw_get_bit(this: *const Self, index: usize) -> bool { - debug_assert!(index / 8 < core::mem::size_of::()); - - let byte_index = index / 8; - let byte = unsafe { *(core::ptr::addr_of!((*this).storage) as *const u8) - .offset(byte_index as isize) }; - - Self::extract_bit(byte, index) - } - - #[inline] - fn change_bit(byte: u8, index: usize, val: bool) -> u8 { - let bit_index = if cfg!(target_endian = "big") { - 7 - (index % 8) - } else { - index % 8 - }; - - let mask = 1 << bit_index; - if val { - byte | mask - } else { - byte & !mask - } - } - - #[inline] - pub fn set_bit(&mut self, index: usize, val: bool) { - debug_assert!(index / 8 < self.storage.as_ref().len()); - - let byte_index = index / 8; - let byte = &mut self.storage.as_mut()[byte_index]; - - *byte = Self::change_bit(*byte, index, val); - } - - #[inline] - pub unsafe fn raw_set_bit(this: *mut Self, index: usize, val: bool) { - debug_assert!(index / 8 < core::mem::size_of::()); - - let byte_index = index / 8; - let byte = unsafe { - (core::ptr::addr_of_mut!((*this).storage) as *mut u8) - .offset(byte_index as isize) - }; - - unsafe { *byte = Self::change_bit(*byte, index, val) }; - } - - #[inline] - pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { - debug_assert!(bit_width <= 64); - debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); - debug_assert!( - (bit_offset + (bit_width as usize)) / 8 <= - self.storage.as_ref().len() - ); - - let mut val = 0; - - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; - } - } - - val - } - - #[inline] - pub unsafe fn raw_get( - this: *const Self, - bit_offset: usize, - bit_width: u8, - ) -> u64 { - debug_assert!(bit_width <= 64); - debug_assert!(bit_offset / 8 < core::mem::size_of::()); - debug_assert!( - (bit_offset + (bit_width as usize)) / 8 <= - core::mem::size_of::() - ); - - let mut val = 0; - - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; - } - } - - val - } - - #[inline] - pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { - debug_assert!(bit_width <= 64); - debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); - debug_assert!( - (bit_offset + (bit_width as usize)) / 8 <= - self.storage.as_ref().len() - ); - - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); - } - } - - #[inline] - pub unsafe fn raw_set( - this: *mut Self, - bit_offset: usize, - bit_width: u8, - val: u64, - ) { - debug_assert!(bit_width <= 64); - debug_assert!(bit_offset / 8 < core::mem::size_of::()); - debug_assert!( - (bit_offset + (bit_width as usize)) / 8 <= - core::mem::size_of::() - ); - - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; - } - } -} diff --git a/vendor/bindgen/codegen/bitfield_unit_tests.rs b/vendor/bindgen/codegen/bitfield_unit_tests.rs deleted file mode 100644 index ead0ffec0c2d76..00000000000000 --- a/vendor/bindgen/codegen/bitfield_unit_tests.rs +++ /dev/null @@ -1,260 +0,0 @@ -//! Tests for `__BindgenBitfieldUnit`. -//! -//! Note that bit-fields are allocated right to left (least to most significant -//! bits). -//! -//! From the x86 PS ABI: -//! -//! ```c -//! struct { -//! int j : 5; -//! int k : 6; -//! int m : 7; -//! }; -//! ``` -//! -//! ```ignore -//! +------------------------------------------------------------+ -//! | | | | | -//! | padding | m | k | j | -//! |31 18|17 11|10 5|4 0| -//! +------------------------------------------------------------+ -//! ``` - -use super::bitfield_unit::__BindgenBitfieldUnit; - -#[test] -fn bitfield_unit_get_bit() { - let unit = __BindgenBitfieldUnit::<[u8; 2]>::new([0b10011101, 0b00011101]); - - let mut bits = vec![]; - for i in 0..16 { - bits.push(unit.get_bit(i)); - } - - println!(); - println!("bits = {bits:?}"); - assert_eq!( - bits, - &[ - // 0b10011101 - true, false, true, true, true, false, false, true, - // 0b00011101 - true, false, true, true, true, false, false, false - ] - ); -} - -#[test] -fn bitfield_unit_set_bit() { - let mut unit = - __BindgenBitfieldUnit::<[u8; 2]>::new([0b00000000, 0b00000000]); - - for i in 0..16 { - if i % 3 == 0 { - unit.set_bit(i, true); - } - } - - for i in 0..16 { - assert_eq!(unit.get_bit(i), i % 3 == 0); - } - - let mut unit = - __BindgenBitfieldUnit::<[u8; 2]>::new([0b11111111, 0b11111111]); - - for i in 0..16 { - if i % 3 == 0 { - unit.set_bit(i, false); - } - } - - for i in 0..16 { - assert_eq!(unit.get_bit(i), i % 3 != 0); - } -} - -macro_rules! bitfield_unit_get { - ( - $( - With $storage:expr , then get($start:expr, $len:expr) is $expected:expr; - )* - ) => { - #[test] - fn bitfield_unit_get() { - $({ - let expected = $expected; - let unit = __BindgenBitfieldUnit::<_>::new($storage); - let actual = unit.get($start, $len); - - println!(); - println!("expected = {expected:064b}"); - println!("actual = {actual:064b}"); - - assert_eq!(expected, actual); - })* - } - } -} - -bitfield_unit_get! { - // Let's just exhaustively test getting the bits from a single byte, since - // there are few enough combinations... - - With [0b11100010], then get(0, 1) is 0; - With [0b11100010], then get(1, 1) is 1; - With [0b11100010], then get(2, 1) is 0; - With [0b11100010], then get(3, 1) is 0; - With [0b11100010], then get(4, 1) is 0; - With [0b11100010], then get(5, 1) is 1; - With [0b11100010], then get(6, 1) is 1; - With [0b11100010], then get(7, 1) is 1; - - With [0b11100010], then get(0, 2) is 0b10; - With [0b11100010], then get(1, 2) is 0b01; - With [0b11100010], then get(2, 2) is 0b00; - With [0b11100010], then get(3, 2) is 0b00; - With [0b11100010], then get(4, 2) is 0b10; - With [0b11100010], then get(5, 2) is 0b11; - With [0b11100010], then get(6, 2) is 0b11; - - With [0b11100010], then get(0, 3) is 0b010; - With [0b11100010], then get(1, 3) is 0b001; - With [0b11100010], then get(2, 3) is 0b000; - With [0b11100010], then get(3, 3) is 0b100; - With [0b11100010], then get(4, 3) is 0b110; - With [0b11100010], then get(5, 3) is 0b111; - - With [0b11100010], then get(0, 4) is 0b0010; - With [0b11100010], then get(1, 4) is 0b0001; - With [0b11100010], then get(2, 4) is 0b1000; - With [0b11100010], then get(3, 4) is 0b1100; - With [0b11100010], then get(4, 4) is 0b1110; - - With [0b11100010], then get(0, 5) is 0b00010; - With [0b11100010], then get(1, 5) is 0b10001; - With [0b11100010], then get(2, 5) is 0b11000; - With [0b11100010], then get(3, 5) is 0b11100; - - With [0b11100010], then get(0, 6) is 0b100010; - With [0b11100010], then get(1, 6) is 0b110001; - With [0b11100010], then get(2, 6) is 0b111000; - - With [0b11100010], then get(0, 7) is 0b1100010; - With [0b11100010], then get(1, 7) is 0b1110001; - - With [0b11100010], then get(0, 8) is 0b11100010; - - // OK. Now let's test getting bits from across byte boundaries. - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(0, 16) is 0b1111111101010101; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(1, 16) is 0b0111111110101010; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(2, 16) is 0b0011111111010101; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(3, 16) is 0b0001111111101010; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(4, 16) is 0b0000111111110101; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(5, 16) is 0b0000011111111010; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(6, 16) is 0b0000001111111101; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(7, 16) is 0b0000000111111110; - - With [0b01010101, 0b11111111, 0b00000000, 0b11111111], - then get(8, 16) is 0b0000000011111111; -} - -macro_rules! bitfield_unit_set { - ( - $( - set($start:expr, $len:expr, $val:expr) is $expected:expr; - )* - ) => { - #[test] - fn bitfield_unit_set() { - $( - let mut unit = __BindgenBitfieldUnit::<[u8; 4]>::new([0, 0, 0, 0]); - unit.set($start, $len, $val); - let actual = unit.get(0, 32); - - println!(); - println!("set({}, {}, {:032b}", $start, $len, $val); - println!("expected = {:064b}", $expected); - println!("actual = {actual:064b}"); - - assert_eq!($expected, actual); - )* - } - } -} - -bitfield_unit_set! { - // Once again, let's exhaustively test single byte combinations. - - set(0, 1, 0b11111111) is 0b00000001; - set(1, 1, 0b11111111) is 0b00000010; - set(2, 1, 0b11111111) is 0b00000100; - set(3, 1, 0b11111111) is 0b00001000; - set(4, 1, 0b11111111) is 0b00010000; - set(5, 1, 0b11111111) is 0b00100000; - set(6, 1, 0b11111111) is 0b01000000; - set(7, 1, 0b11111111) is 0b10000000; - - set(0, 2, 0b11111111) is 0b00000011; - set(1, 2, 0b11111111) is 0b00000110; - set(2, 2, 0b11111111) is 0b00001100; - set(3, 2, 0b11111111) is 0b00011000; - set(4, 2, 0b11111111) is 0b00110000; - set(5, 2, 0b11111111) is 0b01100000; - set(6, 2, 0b11111111) is 0b11000000; - - set(0, 3, 0b11111111) is 0b00000111; - set(1, 3, 0b11111111) is 0b00001110; - set(2, 3, 0b11111111) is 0b00011100; - set(3, 3, 0b11111111) is 0b00111000; - set(4, 3, 0b11111111) is 0b01110000; - set(5, 3, 0b11111111) is 0b11100000; - - set(0, 4, 0b11111111) is 0b00001111; - set(1, 4, 0b11111111) is 0b00011110; - set(2, 4, 0b11111111) is 0b00111100; - set(3, 4, 0b11111111) is 0b01111000; - set(4, 4, 0b11111111) is 0b11110000; - - set(0, 5, 0b11111111) is 0b00011111; - set(1, 5, 0b11111111) is 0b00111110; - set(2, 5, 0b11111111) is 0b01111100; - set(3, 5, 0b11111111) is 0b11111000; - - set(0, 6, 0b11111111) is 0b00111111; - set(1, 6, 0b11111111) is 0b01111110; - set(2, 6, 0b11111111) is 0b11111100; - - set(0, 7, 0b11111111) is 0b01111111; - set(1, 7, 0b11111111) is 0b11111110; - - set(0, 8, 0b11111111) is 0b11111111; - - // And, now let's cross byte boundaries. - - set(0, 16, 0b1111111111111111) is 0b00000000000000001111111111111111; - set(1, 16, 0b1111111111111111) is 0b00000000000000011111111111111110; - set(2, 16, 0b1111111111111111) is 0b00000000000000111111111111111100; - set(3, 16, 0b1111111111111111) is 0b00000000000001111111111111111000; - set(4, 16, 0b1111111111111111) is 0b00000000000011111111111111110000; - set(5, 16, 0b1111111111111111) is 0b00000000000111111111111111100000; - set(6, 16, 0b1111111111111111) is 0b00000000001111111111111111000000; - set(7, 16, 0b1111111111111111) is 0b00000000011111111111111110000000; - set(8, 16, 0b1111111111111111) is 0b00000000111111111111111100000000; -} diff --git a/vendor/bindgen/codegen/dyngen.rs b/vendor/bindgen/codegen/dyngen.rs deleted file mode 100644 index 76f3805795326a..00000000000000 --- a/vendor/bindgen/codegen/dyngen.rs +++ /dev/null @@ -1,258 +0,0 @@ -use crate::codegen; -use crate::ir::context::BindgenContext; -use crate::ir::function::ClangAbi; -use proc_macro2::{Ident, TokenStream}; - -/// Used to build the output tokens for dynamic bindings. -#[derive(Default)] -pub(crate) struct DynamicItems { - /// Tracks the tokens that will appears inside the library struct -- e.g.: - /// ```ignore - /// struct Lib { - /// __library: ::libloading::Library, - /// pub x: Result, // <- tracks these - /// ... - /// } - /// ``` - struct_members: Vec, - - /// Tracks the tokens that will appear inside the library struct's implementation, e.g.: - /// - /// ```ignore - /// impl Lib { - /// ... - /// pub unsafe fn foo(&self, ...) { // <- tracks these - /// ... - /// } - /// } - /// ``` - struct_implementation: Vec, - - /// Tracks the initialization of the fields inside the `::new` constructor of the library - /// struct, e.g.: - /// ```ignore - /// impl Lib { - /// - /// pub unsafe fn new

(path: P) -> Result - /// where - /// P: AsRef<::std::ffi::OsStr>, - /// { - /// ... - /// let foo = __library.get(...) ...; // <- tracks these - /// ... - /// } - /// - /// ... - /// } - /// ``` - constructor_inits: Vec, - - /// Tracks the information that is passed to the library struct at the end of the `::new` - /// constructor, e.g.: - /// ```ignore - /// impl LibFoo { - /// pub unsafe fn new

(path: P) -> Result - /// where - /// P: AsRef<::std::ffi::OsStr>, - /// { - /// ... - /// Ok(LibFoo { - /// __library: __library, - /// foo, - /// bar, // <- tracks these - /// ... - /// }) - /// } - /// } - /// ``` - init_fields: Vec, -} - -impl DynamicItems { - pub(crate) fn new() -> Self { - Self::default() - } - - pub(crate) fn get_tokens( - &self, - lib_ident: &Ident, - ctx: &BindgenContext, - ) -> TokenStream { - let struct_members = &self.struct_members; - let constructor_inits = &self.constructor_inits; - let init_fields = &self.init_fields; - let struct_implementation = &self.struct_implementation; - - let library_new = if ctx.options().wrap_unsafe_ops { - quote!(unsafe { ::libloading::Library::new(path) }) - } else { - quote!(::libloading::Library::new(path)) - }; - - let from_library = if ctx.options().wrap_unsafe_ops { - quote!(unsafe { Self::from_library(library) }) - } else { - quote!(Self::from_library(library)) - }; - - quote! { - pub struct #lib_ident { - __library: ::libloading::Library, - #(#struct_members)* - } - - impl #lib_ident { - pub unsafe fn new

*/ - /// Baz = 0, - /// }; - /// ``` - /// - /// In that case, bindgen will generate a constant for `Bar` instead of - /// `Baz`. - constify_enum_variant: bool, - /// List of explicit derives for this type. - derives: Vec, - /// List of explicit attributes for this type. - attributes: Vec, -} - -fn parse_accessor(s: &str) -> FieldAccessorKind { - match s { - "false" => FieldAccessorKind::None, - "unsafe" => FieldAccessorKind::Unsafe, - "immutable" => FieldAccessorKind::Immutable, - _ => FieldAccessorKind::Regular, - } -} - -impl Annotations { - /// Construct new annotations for the given cursor and its bindgen comments - /// (if any). - pub(crate) fn new(cursor: &clang::Cursor) -> Option { - let mut anno = Annotations::default(); - let mut matched_one = false; - anno.parse(&cursor.comment(), &mut matched_one); - - if matched_one { - Some(anno) - } else { - None - } - } - - /// Should this type be hidden? - pub(crate) fn hide(&self) -> bool { - self.hide - } - - /// Should this type be opaque? - pub(crate) fn opaque(&self) -> bool { - self.opaque - } - - /// For a given type, indicates the type it should replace. - /// - /// For example, in the following code: - /// - /// ```cpp - /// - /// /**
*/ - /// struct Foo { int x; }; - /// - /// struct Bar { char foo; }; - /// ``` - /// - /// the generated code would look something like: - /// - /// ``` - /// /**
*/ - /// struct Bar { - /// x: ::std::os::raw::c_int, - /// }; - /// ``` - /// - /// That is, code for `Foo` is used to generate `Bar`. - pub(crate) fn use_instead_of(&self) -> Option<&[String]> { - self.use_instead_of.as_deref() - } - - /// The list of derives that have been specified in this annotation. - pub(crate) fn derives(&self) -> &[String] { - &self.derives - } - - /// The list of attributes that have been specified in this annotation. - pub(crate) fn attributes(&self) -> &[String] { - &self.attributes - } - - /// Should we avoid implementing the `Copy` trait? - pub(crate) fn disallow_copy(&self) -> bool { - self.disallow_copy - } - - /// Should we avoid implementing the `Debug` trait? - pub(crate) fn disallow_debug(&self) -> bool { - self.disallow_debug - } - - /// Should we avoid implementing the `Default` trait? - pub(crate) fn disallow_default(&self) -> bool { - self.disallow_default - } - - /// Should this type get a `#[must_use]` annotation? - pub(crate) fn must_use_type(&self) -> bool { - self.must_use_type - } - - /// What kind of accessors should we provide for this type's fields? - pub(crate) fn visibility_kind(&self) -> Option { - self.visibility_kind - } - - /// What kind of accessors should we provide for this type's fields? - pub(crate) fn accessor_kind(&self) -> Option { - self.accessor_kind - } - - fn parse(&mut self, comment: &clang::Comment, matched: &mut bool) { - use clang_sys::CXComment_HTMLStartTag; - if comment.kind() == CXComment_HTMLStartTag && - comment.get_tag_name() == "div" && - comment - .get_tag_attrs() - .next() - .is_some_and(|attr| attr.name == "rustbindgen") - { - *matched = true; - for attr in comment.get_tag_attrs() { - match attr.name.as_str() { - "opaque" => self.opaque = true, - "hide" => self.hide = true, - "nocopy" => self.disallow_copy = true, - "nodebug" => self.disallow_debug = true, - "nodefault" => self.disallow_default = true, - "mustusetype" => self.must_use_type = true, - "replaces" => { - self.use_instead_of = Some( - attr.value.split("::").map(Into::into).collect(), - ); - } - "derive" => self.derives.push(attr.value), - "attribute" => self.attributes.push(attr.value), - "private" => { - self.visibility_kind = if attr.value == "false" { - Some(FieldVisibilityKind::Public) - } else { - Some(FieldVisibilityKind::Private) - }; - } - "accessor" => { - self.accessor_kind = Some(parse_accessor(&attr.value)); - } - "constant" => self.constify_enum_variant = true, - _ => {} - } - } - } - - for child in comment.get_children() { - self.parse(&child, matched); - } - } - - /// Returns whether we've parsed a "constant" attribute. - pub(crate) fn constify_enum_variant(&self) -> bool { - self.constify_enum_variant - } -} diff --git a/vendor/bindgen/ir/comment.rs b/vendor/bindgen/ir/comment.rs deleted file mode 100644 index a4ba3201867bc8..00000000000000 --- a/vendor/bindgen/ir/comment.rs +++ /dev/null @@ -1,100 +0,0 @@ -//! Utilities for manipulating C/C++ comments. - -/// The type of a comment. -#[derive(Debug, PartialEq, Eq)] -enum Kind { - /// A `///` comment, or something of the like. - /// All lines in a comment should start with the same symbol. - SingleLines, - /// A `/**` comment, where each other line can start with `*` and the - /// entire block ends with `*/`. - MultiLine, -} - -/// Preprocesses a C/C++ comment so that it is a valid Rust comment. -pub(crate) fn preprocess(comment: &str) -> String { - match kind(comment) { - Some(Kind::SingleLines) => preprocess_single_lines(comment), - Some(Kind::MultiLine) => preprocess_multi_line(comment), - None => comment.to_owned(), - } -} - -/// Gets the kind of the doc comment, if it is one. -fn kind(comment: &str) -> Option { - if comment.starts_with("/*") { - Some(Kind::MultiLine) - } else if comment.starts_with("//") { - Some(Kind::SingleLines) - } else { - None - } -} - -/// Preprocesses multiple single line comments. -/// -/// Handles lines starting with both `//` and `///`. -fn preprocess_single_lines(comment: &str) -> String { - debug_assert!(comment.starts_with("//"), "comment is not single line"); - - let lines: Vec<_> = comment - .lines() - .map(|l| l.trim().trim_start_matches('/')) - .collect(); - lines.join("\n") -} - -fn preprocess_multi_line(comment: &str) -> String { - let comment = comment - .trim_start_matches('/') - .trim_end_matches('/') - .trim_end_matches('*'); - - // Strip any potential `*` characters preceding each line. - let mut lines: Vec<_> = comment - .lines() - .map(|line| line.trim().trim_start_matches('*').trim_start_matches('!')) - .skip_while(|line| line.trim().is_empty()) // Skip the first empty lines. - .collect(); - - // Remove the trailing line corresponding to the `*/`. - if lines.last().is_some_and(|l| l.trim().is_empty()) { - lines.pop(); - } - - lines.join("\n") -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn picks_up_single_and_multi_line_doc_comments() { - assert_eq!(kind("/// hello"), Some(Kind::SingleLines)); - assert_eq!(kind("/** world */"), Some(Kind::MultiLine)); - } - - #[test] - fn processes_single_lines_correctly() { - assert_eq!(preprocess("///"), ""); - assert_eq!(preprocess("/// hello"), " hello"); - assert_eq!(preprocess("// hello"), " hello"); - assert_eq!(preprocess("// hello"), " hello"); - } - - #[test] - fn processes_multi_lines_correctly() { - assert_eq!(preprocess("/**/"), ""); - - assert_eq!( - preprocess("/** hello \n * world \n * foo \n */"), - " hello\n world\n foo" - ); - - assert_eq!( - preprocess("/**\nhello\n*world\n*foo\n*/"), - "hello\nworld\nfoo" - ); - } -} diff --git a/vendor/bindgen/ir/comp.rs b/vendor/bindgen/ir/comp.rs deleted file mode 100644 index 655e0f1fa5d939..00000000000000 --- a/vendor/bindgen/ir/comp.rs +++ /dev/null @@ -1,1921 +0,0 @@ -//! Compound types (unions and structs) in our intermediate representation. - -use itertools::Itertools; - -use super::analysis::Sizedness; -use super::annotations::Annotations; -use super::context::{BindgenContext, FunctionId, ItemId, TypeId, VarId}; -use super::dot::DotAttributes; -use super::item::{IsOpaque, Item}; -use super::layout::Layout; -use super::template::TemplateParameters; -use super::traversal::{EdgeKind, Trace, Tracer}; -use super::ty::RUST_DERIVE_IN_ARRAY_LIMIT; -use crate::clang; -use crate::codegen::struct_layout::{align_to, bytes_from_bits_pow2}; -use crate::ir::derive::CanDeriveCopy; -use crate::parse::ParseError; -use crate::HashMap; -use crate::NonCopyUnionStyle; -use std::cmp; -use std::io; -use std::mem; - -/// The kind of compound type. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub(crate) enum CompKind { - /// A struct. - Struct, - /// A union. - Union, -} - -/// The kind of C++ method. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub(crate) enum MethodKind { - /// A constructor. We represent it as method for convenience, to avoid code - /// duplication. - Constructor, - /// A destructor. - Destructor, - /// A virtual destructor. - VirtualDestructor { - /// Whether it's pure virtual. - pure_virtual: bool, - }, - /// A static method. - Static, - /// A normal method. - Normal, - /// A virtual method. - Virtual { - /// Whether it's pure virtual. - pure_virtual: bool, - }, -} - -impl MethodKind { - /// Is this a destructor method? - pub(crate) fn is_destructor(self) -> bool { - matches!( - self, - MethodKind::Destructor | MethodKind::VirtualDestructor { .. } - ) - } - - /// Is this a pure virtual method? - pub(crate) fn is_pure_virtual(self) -> bool { - match self { - MethodKind::Virtual { pure_virtual } | - MethodKind::VirtualDestructor { pure_virtual } => pure_virtual, - _ => false, - } - } -} - -/// A struct representing a C++ method, either static, normal, or virtual. -#[derive(Debug)] -pub(crate) struct Method { - kind: MethodKind, - /// The signature of the method. Take into account this is not a `Type` - /// item, but a `Function` one. - /// - /// This is tricky and probably this field should be renamed. - signature: FunctionId, - is_const: bool, -} - -impl Method { - /// Construct a new `Method`. - pub(crate) fn new( - kind: MethodKind, - signature: FunctionId, - is_const: bool, - ) -> Self { - Method { - kind, - signature, - is_const, - } - } - - /// What kind of method is this? - pub(crate) fn kind(&self) -> MethodKind { - self.kind - } - - /// Is this a constructor? - pub(crate) fn is_constructor(&self) -> bool { - self.kind == MethodKind::Constructor - } - - /// Is this a virtual method? - pub(crate) fn is_virtual(&self) -> bool { - matches!( - self.kind, - MethodKind::Virtual { .. } | MethodKind::VirtualDestructor { .. } - ) - } - - /// Is this a static method? - pub(crate) fn is_static(&self) -> bool { - self.kind == MethodKind::Static - } - - /// Get the ID for the `Function` signature for this method. - pub(crate) fn signature(&self) -> FunctionId { - self.signature - } - - /// Is this a const qualified method? - pub(crate) fn is_const(&self) -> bool { - self.is_const - } -} - -/// Methods common to the various field types. -pub(crate) trait FieldMethods { - /// Get the name of this field. - fn name(&self) -> Option<&str>; - - /// Get the type of this field. - fn ty(&self) -> TypeId; - - /// Get the comment for this field. - fn comment(&self) -> Option<&str>; - - /// If this is a bitfield, how many bits does it need? - fn bitfield_width(&self) -> Option; - - /// Is this field declared public? - fn is_public(&self) -> bool; - - /// Get the annotations for this field. - fn annotations(&self) -> &Annotations; - - /// The offset of the field (in bits) - fn offset(&self) -> Option; -} - -/// A contiguous set of logical bitfields that live within the same physical -/// allocation unit. See 9.2.4 [class.bit] in the C++ standard and [section -/// 2.4.II.1 in the Itanium C++ -/// ABI](http://itanium-cxx-abi.github.io/cxx-abi/abi.html#class-types). -#[derive(Debug)] -pub(crate) struct BitfieldUnit { - nth: usize, - layout: Layout, - bitfields: Vec, -} - -impl BitfieldUnit { - /// Get the 1-based index of this bitfield unit within its containing - /// struct. Useful for generating a Rust struct's field name for this unit - /// of bitfields. - pub(crate) fn nth(&self) -> usize { - self.nth - } - - /// Get the layout within which these bitfields reside. - pub(crate) fn layout(&self) -> Layout { - self.layout - } - - /// Get the bitfields within this unit. - pub(crate) fn bitfields(&self) -> &[Bitfield] { - &self.bitfields - } -} - -/// A struct representing a C++ field. -#[derive(Debug)] -pub(crate) enum Field { - /// A normal data member. - DataMember(FieldData), - - /// A physical allocation unit containing many logical bitfields. - Bitfields(BitfieldUnit), -} - -impl Field { - /// Get this field's layout. - pub(crate) fn layout(&self, ctx: &BindgenContext) -> Option { - match *self { - Field::Bitfields(BitfieldUnit { layout, .. }) => Some(layout), - Field::DataMember(ref data) => { - ctx.resolve_type(data.ty).layout(ctx) - } - } - } -} - -impl Trace for Field { - type Extra = (); - - fn trace(&self, _: &BindgenContext, tracer: &mut T, _: &()) - where - T: Tracer, - { - match *self { - Field::DataMember(ref data) => { - tracer.visit_kind(data.ty.into(), EdgeKind::Field); - } - Field::Bitfields(BitfieldUnit { ref bitfields, .. }) => { - for bf in bitfields { - tracer.visit_kind(bf.ty().into(), EdgeKind::Field); - } - } - } - } -} - -impl DotAttributes for Field { - fn dot_attributes( - &self, - ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - match *self { - Field::DataMember(ref data) => data.dot_attributes(ctx, out), - Field::Bitfields(BitfieldUnit { - layout, - ref bitfields, - .. - }) => { - writeln!( - out, - r#" - bitfield unit - - - - - - - - - "#, - layout.size, layout.align - )?; - for bf in bitfields { - bf.dot_attributes(ctx, out)?; - } - writeln!(out, "
unit.size{}
unit.align{}
") - } - } - } -} - -impl DotAttributes for FieldData { - fn dot_attributes( - &self, - _ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - writeln!( - out, - "{}{:?}", - self.name().unwrap_or("(anonymous)"), - self.ty() - ) - } -} - -impl DotAttributes for Bitfield { - fn dot_attributes( - &self, - _ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - writeln!( - out, - "{} : {}{:?}", - self.name().unwrap_or("(anonymous)"), - self.width(), - self.ty() - ) - } -} - -/// A logical bitfield within some physical bitfield allocation unit. -#[derive(Debug)] -pub(crate) struct Bitfield { - /// Index of the bit within this bitfield's allocation unit where this - /// bitfield's bits begin. - offset_into_unit: usize, - - /// The field data for this bitfield. - data: FieldData, - - /// Name of the generated Rust getter for this bitfield. - /// - /// Should be assigned before codegen. - getter_name: Option, - - /// Name of the generated Rust setter for this bitfield. - /// - /// Should be assigned before codegen. - setter_name: Option, -} - -impl Bitfield { - /// Construct a new bitfield. - fn new(offset_into_unit: usize, raw: RawField) -> Bitfield { - assert!(raw.bitfield_width().is_some()); - - Bitfield { - offset_into_unit, - data: raw.0, - getter_name: None, - setter_name: None, - } - } - - /// Get the index of the bit within this bitfield's allocation unit where - /// this bitfield begins. - pub(crate) fn offset_into_unit(&self) -> usize { - self.offset_into_unit - } - - /// Get the bit width of this bitfield. - pub(crate) fn width(&self) -> u32 { - self.data.bitfield_width().unwrap() - } - - /// Name of the generated Rust getter for this bitfield. - /// - /// Panics if called before assigning bitfield accessor names or if - /// this bitfield have no name. - pub(crate) fn getter_name(&self) -> &str { - assert!( - self.name().is_some(), - "`Bitfield::getter_name` called on anonymous field" - ); - self.getter_name.as_ref().expect( - "`Bitfield::getter_name` should only be called after\ - assigning bitfield accessor names", - ) - } - - /// Name of the generated Rust setter for this bitfield. - /// - /// Panics if called before assigning bitfield accessor names or if - /// this bitfield have no name. - pub(crate) fn setter_name(&self) -> &str { - assert!( - self.name().is_some(), - "`Bitfield::setter_name` called on anonymous field" - ); - self.setter_name.as_ref().expect( - "`Bitfield::setter_name` should only be called\ - after assigning bitfield accessor names", - ) - } -} - -impl FieldMethods for Bitfield { - fn name(&self) -> Option<&str> { - self.data.name() - } - - fn ty(&self) -> TypeId { - self.data.ty() - } - - fn comment(&self) -> Option<&str> { - self.data.comment() - } - - fn bitfield_width(&self) -> Option { - self.data.bitfield_width() - } - - fn is_public(&self) -> bool { - self.data.is_public() - } - - fn annotations(&self) -> &Annotations { - self.data.annotations() - } - - fn offset(&self) -> Option { - self.data.offset() - } -} - -/// A raw field might be either of a plain data member or a bitfield within a -/// bitfield allocation unit, but we haven't processed it and determined which -/// yet (which would involve allocating it into a bitfield unit if it is a -/// bitfield). -#[derive(Debug)] -struct RawField(FieldData); - -impl RawField { - /// Construct a new `RawField`. - fn new( - name: Option, - ty: TypeId, - comment: Option, - annotations: Option, - bitfield_width: Option, - public: bool, - offset: Option, - ) -> RawField { - RawField(FieldData { - name, - ty, - comment, - annotations: annotations.unwrap_or_default(), - bitfield_width, - public, - offset, - }) - } -} - -impl FieldMethods for RawField { - fn name(&self) -> Option<&str> { - self.0.name() - } - - fn ty(&self) -> TypeId { - self.0.ty() - } - - fn comment(&self) -> Option<&str> { - self.0.comment() - } - - fn bitfield_width(&self) -> Option { - self.0.bitfield_width() - } - - fn is_public(&self) -> bool { - self.0.is_public() - } - - fn annotations(&self) -> &Annotations { - self.0.annotations() - } - - fn offset(&self) -> Option { - self.0.offset() - } -} - -/// Convert the given ordered set of raw fields into a list of either plain data -/// members, and/or bitfield units containing multiple bitfields. -/// -/// If we do not have the layout for a bitfield's type, then we can't reliably -/// compute its allocation unit. In such cases, we return an error. -fn raw_fields_to_fields_and_bitfield_units( - ctx: &BindgenContext, - raw_fields: I, - packed: bool, -) -> Result<(Vec, bool), ()> -where - I: IntoIterator, -{ - let mut raw_fields = raw_fields.into_iter().fuse().peekable(); - let mut fields = vec![]; - let mut bitfield_unit_count = 0; - - loop { - // While we have plain old data members, just keep adding them to our - // resulting fields. We introduce a scope here so that we can use - // `raw_fields` again after the `by_ref` iterator adaptor is dropped. - { - let non_bitfields = raw_fields - .by_ref() - .peeking_take_while(|f| f.bitfield_width().is_none()) - .map(|f| Field::DataMember(f.0)); - fields.extend(non_bitfields); - } - - // Now gather all the consecutive bitfields. Only consecutive bitfields - // may potentially share a bitfield allocation unit with each other in - // the Itanium C++ ABI. - let mut bitfields = raw_fields - .by_ref() - .peeking_take_while(|f| f.bitfield_width().is_some()) - .peekable(); - - if bitfields.peek().is_none() { - break; - } - - bitfields_to_allocation_units( - ctx, - &mut bitfield_unit_count, - &mut fields, - bitfields, - packed, - )?; - } - - assert!( - raw_fields.next().is_none(), - "The above loop should consume all items in `raw_fields`" - ); - - Ok((fields, bitfield_unit_count != 0)) -} - -/// Given a set of contiguous raw bitfields, group and allocate them into -/// (potentially multiple) bitfield units. -fn bitfields_to_allocation_units( - ctx: &BindgenContext, - bitfield_unit_count: &mut usize, - fields: &mut E, - raw_bitfields: I, - packed: bool, -) -> Result<(), ()> -where - E: Extend, - I: IntoIterator, -{ - assert!(ctx.collected_typerefs()); - - // NOTE: What follows is reverse-engineered from LLVM's - // lib/AST/RecordLayoutBuilder.cpp - // - // FIXME(emilio): There are some differences between Microsoft and the - // Itanium ABI, but we'll ignore those and stick to Itanium for now. - // - // Also, we need to handle packed bitfields and stuff. - // - // TODO(emilio): Take into account C++'s wide bitfields, and - // packing, sigh. - - fn flush_allocation_unit( - fields: &mut E, - bitfield_unit_count: &mut usize, - unit_size_in_bits: usize, - unit_align_in_bits: usize, - bitfields: Vec, - packed: bool, - ) where - E: Extend, - { - *bitfield_unit_count += 1; - let align = if packed { - 1 - } else { - bytes_from_bits_pow2(unit_align_in_bits) - }; - let size = align_to(unit_size_in_bits, 8) / 8; - let layout = Layout::new(size, align); - fields.extend(Some(Field::Bitfields(BitfieldUnit { - nth: *bitfield_unit_count, - layout, - bitfields, - }))); - } - - let mut max_align = 0; - let mut unfilled_bits_in_unit = 0; - let mut unit_size_in_bits = 0; - let mut unit_align = 0; - let mut bitfields_in_unit = vec![]; - - // TODO(emilio): Determine this from attributes or pragma ms_struct - // directives. Also, perhaps we should check if the target is MSVC? - const is_ms_struct: bool = false; - - for bitfield in raw_bitfields { - let bitfield_width = bitfield.bitfield_width().unwrap() as usize; - let bitfield_layout = - ctx.resolve_type(bitfield.ty()).layout(ctx).ok_or(())?; - let bitfield_size = bitfield_layout.size; - let bitfield_align = bitfield_layout.align; - - let mut offset = unit_size_in_bits; - if !packed { - if is_ms_struct { - if unit_size_in_bits != 0 && - (bitfield_width == 0 || - bitfield_width > unfilled_bits_in_unit) - { - // We've reached the end of this allocation unit, so flush it - // and its bitfields. - unit_size_in_bits = - align_to(unit_size_in_bits, unit_align * 8); - flush_allocation_unit( - fields, - bitfield_unit_count, - unit_size_in_bits, - unit_align, - mem::take(&mut bitfields_in_unit), - packed, - ); - - // Now we're working on a fresh bitfield allocation unit, so reset - // the current unit size and alignment. - offset = 0; - unit_align = 0; - } - } else if offset != 0 && - (bitfield_width == 0 || - (offset & (bitfield_align * 8 - 1)) + bitfield_width > - bitfield_size * 8) - { - offset = align_to(offset, bitfield_align * 8); - } - } - - // According to the x86[-64] ABI spec: "Unnamed bit-fields’ types do not - // affect the alignment of a structure or union". This makes sense: such - // bit-fields are only used for padding, and we can't perform an - // un-aligned read of something we can't read because we can't even name - // it. - if bitfield.name().is_some() { - max_align = cmp::max(max_align, bitfield_align); - - // NB: The `bitfield_width` here is completely, absolutely - // intentional. Alignment of the allocation unit is based on the - // maximum bitfield width, not (directly) on the bitfields' types' - // alignment. - unit_align = cmp::max(unit_align, bitfield_width); - } - - // Always keep all bitfields around. While unnamed bitifields are used - // for padding (and usually not needed hereafter), large unnamed - // bitfields over their types size cause weird allocation size behavior from clang. - // Therefore, all bitfields needed to be kept around in order to check for this - // and make the struct opaque in this case - bitfields_in_unit.push(Bitfield::new(offset, bitfield)); - - unit_size_in_bits = offset + bitfield_width; - - // Compute what the physical unit's final size would be given what we - // have seen so far, and use that to compute how many bits are still - // available in the unit. - let data_size = align_to(unit_size_in_bits, bitfield_align * 8); - unfilled_bits_in_unit = data_size - unit_size_in_bits; - } - - if unit_size_in_bits != 0 { - // Flush the last allocation unit and its bitfields. - flush_allocation_unit( - fields, - bitfield_unit_count, - unit_size_in_bits, - unit_align, - bitfields_in_unit, - packed, - ); - } - - Ok(()) -} - -/// A compound structure's fields are initially raw, and have bitfields that -/// have not been grouped into allocation units. During this time, the fields -/// are mutable and we build them up during parsing. -/// -/// Then, once resolving typerefs is completed, we compute all structs' fields' -/// bitfield allocation units, and they remain frozen and immutable forever -/// after. -#[derive(Debug)] -enum CompFields { - Before(Vec), - After { - fields: Vec, - has_bitfield_units: bool, - }, - Error, -} - -impl Default for CompFields { - fn default() -> CompFields { - CompFields::Before(vec![]) - } -} - -impl CompFields { - fn append_raw_field(&mut self, raw: RawField) { - match *self { - CompFields::Before(ref mut raws) => { - raws.push(raw); - } - _ => { - panic!( - "Must not append new fields after computing bitfield allocation units" - ); - } - } - } - - fn compute_bitfield_units(&mut self, ctx: &BindgenContext, packed: bool) { - let raws = match *self { - CompFields::Before(ref mut raws) => mem::take(raws), - _ => { - panic!("Already computed bitfield units"); - } - }; - - let result = raw_fields_to_fields_and_bitfield_units(ctx, raws, packed); - - match result { - Ok((fields, has_bitfield_units)) => { - *self = CompFields::After { - fields, - has_bitfield_units, - }; - } - Err(()) => { - *self = CompFields::Error; - } - } - } - - fn deanonymize_fields(&mut self, ctx: &BindgenContext, methods: &[Method]) { - let fields = match *self { - CompFields::After { ref mut fields, .. } => fields, - // Nothing to do here. - CompFields::Error => return, - CompFields::Before(_) => { - panic!("Not yet computed bitfield units."); - } - }; - - fn has_method( - methods: &[Method], - ctx: &BindgenContext, - name: &str, - ) -> bool { - methods.iter().any(|method| { - let method_name = ctx.resolve_func(method.signature()).name(); - method_name == name || ctx.rust_mangle(method_name) == name - }) - } - - struct AccessorNamesPair { - getter: String, - setter: String, - } - - let mut accessor_names: HashMap = fields - .iter() - .flat_map(|field| match *field { - Field::Bitfields(ref bu) => &*bu.bitfields, - Field::DataMember(_) => &[], - }) - .filter_map(|bitfield| bitfield.name()) - .map(|bitfield_name| { - let bitfield_name = bitfield_name.to_string(); - let getter = { - let mut getter = - ctx.rust_mangle(&bitfield_name).to_string(); - if has_method(methods, ctx, &getter) { - getter.push_str("_bindgen_bitfield"); - } - getter - }; - let setter = { - let setter = format!("set_{bitfield_name}"); - let mut setter = ctx.rust_mangle(&setter).to_string(); - if has_method(methods, ctx, &setter) { - setter.push_str("_bindgen_bitfield"); - } - setter - }; - (bitfield_name, AccessorNamesPair { getter, setter }) - }) - .collect(); - - let mut anon_field_counter = 0; - for field in fields.iter_mut() { - match *field { - Field::DataMember(FieldData { ref mut name, .. }) => { - if name.is_some() { - continue; - } - - anon_field_counter += 1; - *name = Some(format!( - "{}{anon_field_counter}", - ctx.options().anon_fields_prefix, - )); - } - Field::Bitfields(ref mut bu) => { - for bitfield in &mut bu.bitfields { - if bitfield.name().is_none() { - continue; - } - - if let Some(AccessorNamesPair { getter, setter }) = - accessor_names.remove(bitfield.name().unwrap()) - { - bitfield.getter_name = Some(getter); - bitfield.setter_name = Some(setter); - } - } - } - } - } - } - - /// Return the flex array member for the struct/class, if any. - fn flex_array_member(&self, ctx: &BindgenContext) -> Option { - let fields = match self { - CompFields::Before(_) => panic!("raw fields"), - CompFields::After { fields, .. } => fields, - CompFields::Error => return None, // panic? - }; - - match fields.last()? { - Field::Bitfields(..) => None, - Field::DataMember(FieldData { ty, .. }) => ctx - .resolve_type(*ty) - .is_incomplete_array(ctx) - .map(|item| item.expect_type_id(ctx)), - } - } -} - -impl Trace for CompFields { - type Extra = (); - - fn trace(&self, context: &BindgenContext, tracer: &mut T, _: &()) - where - T: Tracer, - { - match *self { - CompFields::Error => {} - CompFields::Before(ref fields) => { - for f in fields { - tracer.visit_kind(f.ty().into(), EdgeKind::Field); - } - } - CompFields::After { ref fields, .. } => { - for f in fields { - f.trace(context, tracer, &()); - } - } - } - } -} - -/// Common data shared across different field types. -#[derive(Clone, Debug)] -pub(crate) struct FieldData { - /// The name of the field, empty if it's an unnamed bitfield width. - name: Option, - - /// The inner type. - ty: TypeId, - - /// The doc comment on the field if any. - comment: Option, - - /// Annotations for this field, or the default. - annotations: Annotations, - - /// If this field is a bitfield, and how many bits does it contain if it is. - bitfield_width: Option, - - /// If the C++ field is declared `public` - public: bool, - - /// The offset of the field (in bits) - offset: Option, -} - -impl FieldMethods for FieldData { - fn name(&self) -> Option<&str> { - self.name.as_deref() - } - - fn ty(&self) -> TypeId { - self.ty - } - - fn comment(&self) -> Option<&str> { - self.comment.as_deref() - } - - fn bitfield_width(&self) -> Option { - self.bitfield_width - } - - fn is_public(&self) -> bool { - self.public - } - - fn annotations(&self) -> &Annotations { - &self.annotations - } - - fn offset(&self) -> Option { - self.offset - } -} - -/// The kind of inheritance a base class is using. -#[derive(Clone, Debug, PartialEq, Eq)] -pub(crate) enum BaseKind { - /// Normal inheritance, like: - /// - /// ```cpp - /// class A : public B {}; - /// ``` - Normal, - /// Virtual inheritance, like: - /// - /// ```cpp - /// class A: public virtual B {}; - /// ``` - Virtual, -} - -/// A base class. -#[derive(Clone, Debug)] -pub(crate) struct Base { - /// The type of this base class. - pub(crate) ty: TypeId, - /// The kind of inheritance we're doing. - pub(crate) kind: BaseKind, - /// Name of the field in which this base should be stored. - pub(crate) field_name: String, - /// Whether this base is inherited from publicly. - pub(crate) is_pub: bool, -} - -impl Base { - /// Whether this base class is inheriting virtually. - pub(crate) fn is_virtual(&self) -> bool { - self.kind == BaseKind::Virtual - } - - /// Whether this base class should have it's own field for storage. - pub(crate) fn requires_storage(&self, ctx: &BindgenContext) -> bool { - // Virtual bases are already taken into account by the vtable - // pointer. - // - // FIXME(emilio): Is this always right? - if self.is_virtual() { - return false; - } - - // NB: We won't include zero-sized types in our base chain because they - // would contribute to our size given the dummy field we insert for - // zero-sized types. - if self.ty.is_zero_sized(ctx) { - return false; - } - - true - } - - /// Whether this base is inherited from publicly. - pub(crate) fn is_public(&self) -> bool { - self.is_pub - } -} - -/// A compound type. -/// -/// Either a struct or union, a compound type is built up from the combination -/// of fields which also are associated with their own (potentially compound) -/// type. -#[derive(Debug)] -pub(crate) struct CompInfo { - /// Whether this is a struct or a union. - kind: CompKind, - - /// The members of this struct or union. - fields: CompFields, - - /// The abstract template parameters of this class. Note that these are NOT - /// concrete template arguments, and should always be a - /// `Type(TypeKind::TypeParam(name))`. For concrete template arguments, see - /// `TypeKind::TemplateInstantiation`. - template_params: Vec, - - /// The method declarations inside this class, if in C++ mode. - methods: Vec, - - /// The different constructors this struct or class contains. - constructors: Vec, - - /// The destructor of this type. The bool represents whether this destructor - /// is virtual. - destructor: Option<(MethodKind, FunctionId)>, - - /// Vector of classes this one inherits from. - base_members: Vec, - - /// The inner types that were declared inside this class, in something like: - /// - /// ```c++ - /// class Foo { - /// typedef int FooTy; - /// struct Bar { - /// int baz; - /// }; - /// } - /// - /// static Foo::Bar const = {3}; - /// ``` - inner_types: Vec, - - /// Set of static constants declared inside this class. - inner_vars: Vec, - - /// Whether this type should generate an vtable (TODO: Should be able to - /// look at the virtual methods and ditch this field). - has_own_virtual_method: bool, - - /// Whether this type has destructor. - has_destructor: bool, - - /// Whether this type has a base type with more than one member. - /// - /// TODO: We should be able to compute this. - has_nonempty_base: bool, - - /// If this type has a template parameter which is not a type (e.g.: a - /// `size_t`) - has_non_type_template_params: bool, - - /// Whether this type has a bit field member whose width couldn't be - /// evaluated (e.g. if it depends on a template parameter). We generate an - /// opaque type in this case. - has_unevaluable_bit_field_width: bool, - - /// Whether we saw `__attribute__((packed))` on or within this type. - packed_attr: bool, - - /// Used to know if we've found an opaque attribute that could cause us to - /// generate a type with invalid layout. This is explicitly used to avoid us - /// generating bad alignments when parsing types like `max_align_t`. - /// - /// It's not clear what the behavior should be here, if generating the item - /// and pray, or behave as an opaque type. - found_unknown_attr: bool, - - /// Used to indicate when a struct has been forward declared. Usually used - /// in headers so that APIs can't modify them directly. - is_forward_declaration: bool, -} - -impl CompInfo { - /// Construct a new compound type. - pub(crate) fn new(kind: CompKind) -> Self { - CompInfo { - kind, - fields: CompFields::default(), - template_params: vec![], - methods: vec![], - constructors: vec![], - destructor: None, - base_members: vec![], - inner_types: vec![], - inner_vars: vec![], - has_own_virtual_method: false, - has_destructor: false, - has_nonempty_base: false, - has_non_type_template_params: false, - has_unevaluable_bit_field_width: false, - packed_attr: false, - found_unknown_attr: false, - is_forward_declaration: false, - } - } - - /// Compute the layout of this type. - /// - /// This is called as a fallback under some circumstances where LLVM doesn't - /// give us the correct layout. - /// - /// If we're a union without known layout, we try to compute it from our - /// members. This is not ideal, but clang fails to report the size for these - /// kind of unions, see `test/headers/template_union.hpp` - pub(crate) fn layout(&self, ctx: &BindgenContext) -> Option { - // We can't do better than clang here, sorry. - if self.kind == CompKind::Struct { - return None; - } - - // By definition, we don't have the right layout information here if - // we're a forward declaration. - if self.is_forward_declaration() { - return None; - } - - // empty union case - if !self.has_fields() { - return None; - } - - let mut max_size = 0; - // Don't allow align(0) - let mut max_align = 1; - self.each_known_field_layout(ctx, |layout| { - max_size = cmp::max(max_size, layout.size); - max_align = cmp::max(max_align, layout.align); - }); - - Some(Layout::new(max_size, max_align)) - } - - /// Get this type's set of fields. - pub(crate) fn fields(&self) -> &[Field] { - match self.fields { - CompFields::Error => &[], - CompFields::After { ref fields, .. } => fields, - CompFields::Before(..) => { - panic!("Should always have computed bitfield units first"); - } - } - } - - /// Return the flex array member and its element type if any - pub(crate) fn flex_array_member( - &self, - ctx: &BindgenContext, - ) -> Option { - self.fields.flex_array_member(ctx) - } - - fn has_fields(&self) -> bool { - match self.fields { - CompFields::Error => false, - CompFields::After { ref fields, .. } => !fields.is_empty(), - CompFields::Before(ref raw_fields) => !raw_fields.is_empty(), - } - } - - fn each_known_field_layout( - &self, - ctx: &BindgenContext, - mut callback: impl FnMut(Layout), - ) { - match self.fields { - CompFields::Error => {} - CompFields::After { ref fields, .. } => { - for field in fields { - if let Some(layout) = field.layout(ctx) { - callback(layout); - } - } - } - CompFields::Before(ref raw_fields) => { - for field in raw_fields { - let field_ty = ctx.resolve_type(field.0.ty); - if let Some(layout) = field_ty.layout(ctx) { - callback(layout); - } - } - } - } - } - - fn has_bitfields(&self) -> bool { - match self.fields { - CompFields::Error => false, - CompFields::After { - has_bitfield_units, .. - } => has_bitfield_units, - CompFields::Before(_) => { - panic!("Should always have computed bitfield units first"); - } - } - } - - /// Returns whether we have a too large bitfield unit, in which case we may - /// not be able to derive some of the things we should be able to normally - /// derive. - pub(crate) fn has_too_large_bitfield_unit(&self) -> bool { - if !self.has_bitfields() { - return false; - } - self.fields().iter().any(|field| match *field { - Field::DataMember(..) => false, - Field::Bitfields(ref unit) => { - unit.layout.size > RUST_DERIVE_IN_ARRAY_LIMIT - } - }) - } - - /// Does this type have any template parameters that aren't types - /// (e.g. int)? - pub(crate) fn has_non_type_template_params(&self) -> bool { - self.has_non_type_template_params - } - - /// Do we see a virtual function during parsing? - /// Get the `has_own_virtual_method` boolean. - pub(crate) fn has_own_virtual_method(&self) -> bool { - self.has_own_virtual_method - } - - /// Did we see a destructor when parsing this type? - pub(crate) fn has_own_destructor(&self) -> bool { - self.has_destructor - } - - /// Get this type's set of methods. - pub(crate) fn methods(&self) -> &[Method] { - &self.methods - } - - /// Get this type's set of constructors. - pub(crate) fn constructors(&self) -> &[FunctionId] { - &self.constructors - } - - /// Get this type's destructor. - pub(crate) fn destructor(&self) -> Option<(MethodKind, FunctionId)> { - self.destructor - } - - /// What kind of compound type is this? - pub(crate) fn kind(&self) -> CompKind { - self.kind - } - - /// Is this a union? - pub(crate) fn is_union(&self) -> bool { - self.kind() == CompKind::Union - } - - /// The set of types that this one inherits from. - pub(crate) fn base_members(&self) -> &[Base] { - &self.base_members - } - - /// Construct a new compound type from a Clang type. - pub(crate) fn from_ty( - potential_id: ItemId, - ty: &clang::Type, - location: Option, - ctx: &mut BindgenContext, - ) -> Result { - use clang_sys::*; - assert!( - ty.template_args().is_none(), - "We handle template instantiations elsewhere" - ); - - let mut cursor = ty.declaration(); - let mut kind = Self::kind_from_cursor(&cursor); - if kind.is_err() { - if let Some(location) = location { - kind = Self::kind_from_cursor(&location); - cursor = location; - } - } - - let kind = kind?; - - debug!("CompInfo::from_ty({kind:?}, {cursor:?})"); - - let mut ci = CompInfo::new(kind); - ci.is_forward_declaration = - location.map_or(true, |cur| match cur.kind() { - CXCursor_ParmDecl => true, - CXCursor_StructDecl | CXCursor_UnionDecl | - CXCursor_ClassDecl => !cur.is_definition(), - _ => false, - }); - - let mut maybe_anonymous_struct_field = None; - cursor.visit(|cur| { - if cur.kind() != CXCursor_FieldDecl { - if let Some((ty, clang_ty, public, offset)) = - maybe_anonymous_struct_field.take() - { - if cur.kind() == CXCursor_TypedefDecl && - cur.typedef_type().unwrap().canonical_type() == - clang_ty - { - // Typedefs of anonymous structs appear later in the ast - // than the struct itself, that would otherwise be an - // anonymous field. Detect that case here, and do - // nothing. - } else { - let field = RawField::new( - None, ty, None, None, None, public, offset, - ); - ci.fields.append_raw_field(field); - } - } - } - - match cur.kind() { - CXCursor_FieldDecl => { - if let Some((ty, clang_ty, public, offset)) = - maybe_anonymous_struct_field.take() - { - let mut used = false; - cur.visit(|child| { - if child.cur_type() == clang_ty { - used = true; - } - CXChildVisit_Continue - }); - - if !used { - let field = RawField::new( - None, ty, None, None, None, public, offset, - ); - ci.fields.append_raw_field(field); - } - } - - let bit_width = if cur.is_bit_field() { - let width = cur.bit_width(); - - // Make opaque type if the bit width couldn't be - // evaluated. - if width.is_none() { - ci.has_unevaluable_bit_field_width = true; - return CXChildVisit_Break; - } - - width - } else { - None - }; - - let field_type = Item::from_ty_or_ref( - cur.cur_type(), - cur, - Some(potential_id), - ctx, - ); - - let comment = cur.raw_comment(); - let annotations = Annotations::new(&cur); - let name = cur.spelling(); - let is_public = cur.public_accessible(); - let offset = cur.offset_of_field().ok(); - - // Name can be empty if there are bitfields, for example, - // see tests/headers/struct_with_bitfields.h - assert!( - !name.is_empty() || bit_width.is_some(), - "Empty field name?" - ); - - let name = if name.is_empty() { None } else { Some(name) }; - - let field = RawField::new( - name, - field_type, - comment, - annotations, - bit_width, - is_public, - offset, - ); - ci.fields.append_raw_field(field); - - // No we look for things like attributes and stuff. - cur.visit(|cur| { - if cur.kind() == CXCursor_UnexposedAttr { - ci.found_unknown_attr = true; - } - CXChildVisit_Continue - }); - } - CXCursor_UnexposedAttr => { - ci.found_unknown_attr = true; - } - CXCursor_EnumDecl | - CXCursor_TypeAliasDecl | - CXCursor_TypeAliasTemplateDecl | - CXCursor_TypedefDecl | - CXCursor_StructDecl | - CXCursor_UnionDecl | - CXCursor_ClassTemplate | - CXCursor_ClassDecl => { - // We can find non-semantic children here, clang uses a - // StructDecl to note incomplete structs that haven't been - // forward-declared before, see [1]. - // - // Also, clang seems to scope struct definitions inside - // unions, and other named struct definitions inside other - // structs to the whole translation unit. - // - // Let's just assume that if the cursor we've found is a - // definition, it's a valid inner type. - // - // [1]: https://github.com/rust-lang/rust-bindgen/issues/482 - let is_inner_struct = - cur.semantic_parent() == cursor || cur.is_definition(); - if !is_inner_struct { - return CXChildVisit_Continue; - } - - // Even if this is a definition, we may not be the semantic - // parent, see #1281. - let inner = Item::parse(cur, Some(potential_id), ctx) - .expect("Inner ClassDecl"); - - // If we avoided recursion parsing this type (in - // `Item::from_ty_with_id()`), then this might not be a - // valid type ID, so check and gracefully handle this. - if ctx.resolve_item_fallible(inner).is_some() { - let inner = inner.expect_type_id(ctx); - - ci.inner_types.push(inner); - - // A declaration of an union or a struct without name - // could also be an unnamed field, unfortunately. - if cur.is_anonymous() && cur.kind() != CXCursor_EnumDecl - { - let ty = cur.cur_type(); - let public = cur.public_accessible(); - let offset = cur.offset_of_field().ok(); - - maybe_anonymous_struct_field = - Some((inner, ty, public, offset)); - } - } - } - CXCursor_PackedAttr => { - ci.packed_attr = true; - } - CXCursor_TemplateTypeParameter => { - let param = Item::type_param(None, cur, ctx).expect( - "Item::type_param shouldn't fail when pointing \ - at a TemplateTypeParameter", - ); - ci.template_params.push(param); - } - CXCursor_CXXBaseSpecifier => { - let is_virtual_base = cur.is_virtual_base(); - ci.has_own_virtual_method |= is_virtual_base; - - let kind = if is_virtual_base { - BaseKind::Virtual - } else { - BaseKind::Normal - }; - - let field_name = match ci.base_members.len() { - 0 => "_base".into(), - n => format!("_base_{n}"), - }; - let type_id = - Item::from_ty_or_ref(cur.cur_type(), cur, None, ctx); - ci.base_members.push(Base { - ty: type_id, - kind, - field_name, - is_pub: cur.access_specifier() == CX_CXXPublic, - }); - } - CXCursor_Constructor | CXCursor_Destructor | - CXCursor_CXXMethod => { - let is_virtual = cur.method_is_virtual(); - let is_static = cur.method_is_static(); - debug_assert!(!(is_static && is_virtual), "How?"); - - ci.has_destructor |= cur.kind() == CXCursor_Destructor; - ci.has_own_virtual_method |= is_virtual; - - // This used to not be here, but then I tried generating - // stylo bindings with this (without path filters), and - // cried a lot with a method in gfx/Point.h - // (ToUnknownPoint), that somehow was causing the same type - // to be inserted in the map two times. - // - // I couldn't make a reduced test case, but anyway... - // Methods of template functions not only used to be inlined, - // but also instantiated, and we wouldn't be able to call - // them, so just bail out. - if !ci.template_params.is_empty() { - return CXChildVisit_Continue; - } - - // NB: This gets us an owned `Function`, not a - // `FunctionSig`. - let signature = - match Item::parse(cur, Some(potential_id), ctx) { - Ok(item) - if ctx - .resolve_item(item) - .kind() - .is_function() => - { - item - } - _ => return CXChildVisit_Continue, - }; - - let signature = signature.expect_function_id(ctx); - - match cur.kind() { - CXCursor_Constructor => { - ci.constructors.push(signature); - } - CXCursor_Destructor => { - let kind = if is_virtual { - MethodKind::VirtualDestructor { - pure_virtual: cur.method_is_pure_virtual(), - } - } else { - MethodKind::Destructor - }; - ci.destructor = Some((kind, signature)); - } - CXCursor_CXXMethod => { - let is_const = cur.method_is_const(); - let method_kind = if is_static { - MethodKind::Static - } else if is_virtual { - MethodKind::Virtual { - pure_virtual: cur.method_is_pure_virtual(), - } - } else { - MethodKind::Normal - }; - - let method = - Method::new(method_kind, signature, is_const); - - ci.methods.push(method); - } - _ => unreachable!("How can we see this here?"), - } - } - CXCursor_NonTypeTemplateParameter => { - ci.has_non_type_template_params = true; - } - CXCursor_VarDecl => { - let linkage = cur.linkage(); - if linkage != CXLinkage_External && - linkage != CXLinkage_UniqueExternal - { - return CXChildVisit_Continue; - } - - let visibility = cur.visibility(); - if visibility != CXVisibility_Default { - return CXChildVisit_Continue; - } - - if let Ok(item) = Item::parse(cur, Some(potential_id), ctx) - { - ci.inner_vars.push(item.as_var_id_unchecked()); - } - } - // Intentionally not handled - CXCursor_CXXAccessSpecifier | - CXCursor_CXXFinalAttr | - CXCursor_FunctionTemplate | - CXCursor_ConversionFunction => {} - _ => { - warn!( - "unhandled comp member `{}` (kind {:?}) in `{}` ({})", - cur.spelling(), - clang::kind_to_str(cur.kind()), - cursor.spelling(), - cur.location() - ); - } - } - CXChildVisit_Continue - }); - - if let Some((ty, _, public, offset)) = maybe_anonymous_struct_field { - let field = - RawField::new(None, ty, None, None, None, public, offset); - ci.fields.append_raw_field(field); - } - - Ok(ci) - } - - fn kind_from_cursor( - cursor: &clang::Cursor, - ) -> Result { - use clang_sys::*; - Ok(match cursor.kind() { - CXCursor_UnionDecl => CompKind::Union, - CXCursor_ClassDecl | CXCursor_StructDecl => CompKind::Struct, - CXCursor_CXXBaseSpecifier | - CXCursor_ClassTemplatePartialSpecialization | - CXCursor_ClassTemplate => match cursor.template_kind() { - CXCursor_UnionDecl => CompKind::Union, - _ => CompKind::Struct, - }, - _ => { - warn!("Unknown kind for comp type: {cursor:?}"); - return Err(ParseError::Continue); - } - }) - } - - /// Get the set of types that were declared within this compound type - /// (e.g. nested class definitions). - pub(crate) fn inner_types(&self) -> &[TypeId] { - &self.inner_types - } - - /// Get the set of static variables declared within this compound type. - pub(crate) fn inner_vars(&self) -> &[VarId] { - &self.inner_vars - } - - /// Have we found a field with an opaque type that could potentially mess up - /// the layout of this compound type? - pub(crate) fn found_unknown_attr(&self) -> bool { - self.found_unknown_attr - } - - /// Is this compound type packed? - pub(crate) fn is_packed( - &self, - ctx: &BindgenContext, - layout: Option<&Layout>, - ) -> bool { - if self.packed_attr { - return true; - } - - // Even though `libclang` doesn't expose `#pragma packed(...)`, we can - // detect it through its effects. - if let Some(parent_layout) = layout { - let mut packed = false; - self.each_known_field_layout(ctx, |layout| { - packed = packed || layout.align > parent_layout.align; - }); - if packed { - info!("Found a struct that was defined within `#pragma packed(...)`"); - return true; - } - - if self.has_own_virtual_method && parent_layout.align == 1 { - return true; - } - } - - false - } - - /// Return true if a compound type is "naturally packed". This means we can exclude the - /// "packed" attribute without changing the layout. - /// This is useful for types that need an "align(N)" attribute since rustc won't compile - /// structs that have both of those attributes. - pub(crate) fn already_packed(&self, ctx: &BindgenContext) -> Option { - let mut total_size: usize = 0; - - for field in self.fields() { - let layout = field.layout(ctx)?; - - if layout.align != 0 && total_size % layout.align != 0 { - return Some(false); - } - - total_size += layout.size; - } - - Some(true) - } - - /// Returns true if compound type has been forward declared - pub(crate) fn is_forward_declaration(&self) -> bool { - self.is_forward_declaration - } - - /// Compute this compound structure's bitfield allocation units. - pub(crate) fn compute_bitfield_units( - &mut self, - ctx: &BindgenContext, - layout: Option<&Layout>, - ) { - let packed = self.is_packed(ctx, layout); - self.fields.compute_bitfield_units(ctx, packed); - } - - /// Assign for each anonymous field a generated name. - pub(crate) fn deanonymize_fields(&mut self, ctx: &BindgenContext) { - self.fields.deanonymize_fields(ctx, &self.methods); - } - - /// Returns whether the current union can be represented as a Rust `union` - /// - /// Requirements: - /// 1. Current `RustTarget` allows for `untagged_union` - /// 2. Each field can derive `Copy` or we use `ManuallyDrop`. - /// 3. It's not zero-sized. - /// - /// Second boolean returns whether all fields can be copied (and thus - /// `ManuallyDrop` is not needed). - pub(crate) fn is_rust_union( - &self, - ctx: &BindgenContext, - layout: Option<&Layout>, - name: &str, - ) -> (bool, bool) { - if !self.is_union() { - return (false, false); - } - - if !ctx.options().untagged_union { - return (false, false); - } - - if self.is_forward_declaration() { - return (false, false); - } - - let union_style = if ctx.options().bindgen_wrapper_union.matches(name) { - NonCopyUnionStyle::BindgenWrapper - } else if ctx.options().manually_drop_union.matches(name) { - NonCopyUnionStyle::ManuallyDrop - } else { - ctx.options().default_non_copy_union_style - }; - - let all_can_copy = self.fields().iter().all(|f| match *f { - Field::DataMember(ref field_data) => { - field_data.ty().can_derive_copy(ctx) - } - Field::Bitfields(_) => true, - }); - - if !all_can_copy && union_style == NonCopyUnionStyle::BindgenWrapper { - return (false, false); - } - - if layout.is_some_and(|l| l.size == 0) { - return (false, false); - } - - (true, all_can_copy) - } -} - -impl DotAttributes for CompInfo { - fn dot_attributes( - &self, - ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - writeln!(out, "CompKind{:?}", self.kind)?; - - if self.has_own_virtual_method { - writeln!(out, "has_vtabletrue")?; - } - - if self.has_destructor { - writeln!(out, "has_destructortrue")?; - } - - if self.has_nonempty_base { - writeln!(out, "has_nonempty_basetrue")?; - } - - if self.has_non_type_template_params { - writeln!( - out, - "has_non_type_template_paramstrue" - )?; - } - - if self.packed_attr { - writeln!(out, "packed_attrtrue")?; - } - - if self.is_forward_declaration { - writeln!( - out, - "is_forward_declarationtrue" - )?; - } - - if !self.fields().is_empty() { - writeln!(out, r#"fields"#)?; - for field in self.fields() { - field.dot_attributes(ctx, out)?; - } - writeln!(out, "
")?; - } - - Ok(()) - } -} - -impl IsOpaque for CompInfo { - type Extra = Option; - - fn is_opaque(&self, ctx: &BindgenContext, layout: &Option) -> bool { - if self.has_non_type_template_params || - self.has_unevaluable_bit_field_width - { - return true; - } - - // When we do not have the layout for a bitfield's type (for example, it - // is a type parameter), then we can't compute bitfield units. We are - // left with no choice but to make the whole struct opaque, or else we - // might generate structs with incorrect sizes and alignments. - if let CompFields::Error = self.fields { - return true; - } - - // Bitfields with a width that is larger than their unit's width have - // some strange things going on, and the best we can do is make the - // whole struct opaque. - if self.fields().iter().any(|f| match *f { - Field::DataMember(_) => false, - Field::Bitfields(ref unit) => unit.bitfields().iter().any(|bf| { - let bitfield_layout = ctx - .resolve_type(bf.ty()) - .layout(ctx) - .expect("Bitfield without layout? Gah!"); - bf.width() / 8 > bitfield_layout.size as u32 - }), - }) { - return true; - } - - if !ctx.options().rust_features().repr_packed_n { - // If we don't have `#[repr(packed(N)]`, the best we can - // do is make this struct opaque. - // - // See https://github.com/rust-lang/rust-bindgen/issues/537 and - // https://github.com/rust-lang/rust/issues/33158 - if self.is_packed(ctx, layout.as_ref()) && - layout.is_some_and(|l| l.align > 1) - { - warn!("Found a type that is both packed and aligned to greater than \ - 1; Rust before version 1.33 doesn't have `#[repr(packed(N))]`, so we \ - are treating it as opaque. You may wish to set bindgen's rust target \ - version to 1.33 or later to enable `#[repr(packed(N))]` support."); - return true; - } - } - - false - } -} - -impl TemplateParameters for CompInfo { - fn self_template_params(&self, _ctx: &BindgenContext) -> Vec { - self.template_params.clone() - } -} - -impl Trace for CompInfo { - type Extra = Item; - - fn trace(&self, context: &BindgenContext, tracer: &mut T, item: &Item) - where - T: Tracer, - { - for p in item.all_template_params(context) { - tracer.visit_kind(p.into(), EdgeKind::TemplateParameterDefinition); - } - - for ty in self.inner_types() { - tracer.visit_kind(ty.into(), EdgeKind::InnerType); - } - - for &var in self.inner_vars() { - tracer.visit_kind(var.into(), EdgeKind::InnerVar); - } - - for method in self.methods() { - tracer.visit_kind(method.signature.into(), EdgeKind::Method); - } - - if let Some((_kind, signature)) = self.destructor() { - tracer.visit_kind(signature.into(), EdgeKind::Destructor); - } - - for ctor in self.constructors() { - tracer.visit_kind(ctor.into(), EdgeKind::Constructor); - } - - // Base members and fields are not generated for opaque types (but all - // of the above things are) so stop here. - if item.is_opaque(context, &()) { - return; - } - - for base in self.base_members() { - tracer.visit_kind(base.ty.into(), EdgeKind::BaseMember); - } - - self.fields.trace(context, tracer, &()); - } -} diff --git a/vendor/bindgen/ir/context.rs b/vendor/bindgen/ir/context.rs deleted file mode 100644 index c0201a114b7a46..00000000000000 --- a/vendor/bindgen/ir/context.rs +++ /dev/null @@ -1,3107 +0,0 @@ -//! Common context that is passed around during parsing and codegen. - -use super::super::time::Timer; -use super::analysis::{ - analyze, as_cannot_derive_set, CannotDerive, DeriveTrait, - HasDestructorAnalysis, HasFloat, HasTypeParameterInArray, - HasVtableAnalysis, HasVtableResult, SizednessAnalysis, SizednessResult, - UsedTemplateParameters, -}; -use super::derive::{ - CanDerive, CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq, - CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd, -}; -use super::function::Function; -use super::int::IntKind; -use super::item::{IsOpaque, Item, ItemAncestors, ItemSet}; -use super::item_kind::ItemKind; -use super::module::{Module, ModuleKind}; -use super::template::{TemplateInstantiation, TemplateParameters}; -use super::traversal::{self, Edge, ItemTraversal}; -use super::ty::{FloatKind, Type, TypeKind}; -use crate::clang::{self, ABIKind, Cursor}; -use crate::codegen::CodegenError; -use crate::BindgenOptions; -use crate::{Entry, HashMap, HashSet}; - -use proc_macro2::{Ident, Span, TokenStream}; -use quote::ToTokens; -use std::borrow::Cow; -use std::cell::{Cell, RefCell}; -use std::collections::{BTreeSet, HashMap as StdHashMap}; -use std::mem; -use std::path::Path; - -/// An identifier for some kind of IR item. -#[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, Hash)] -pub(crate) struct ItemId(usize); - -/// Declare a newtype around `ItemId` with conversion methods. -macro_rules! item_id_newtype { - ( - $( #[$attr:meta] )* - pub(crate) struct $name:ident(ItemId) - where - $( #[$checked_attr:meta] )* - checked = $checked:ident with $check_method:ident, - $( #[$expected_attr:meta] )* - expected = $expected:ident, - $( #[$unchecked_attr:meta] )* - unchecked = $unchecked:ident; - ) => { - $( #[$attr] )* - #[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, Hash)] - pub(crate) struct $name(ItemId); - - impl $name { - /// Create an `ItemResolver` from this ID. - #[allow(dead_code)] - pub(crate) fn into_resolver(self) -> ItemResolver { - let id: ItemId = self.into(); - id.into() - } - } - - impl ::std::cmp::PartialEq for $name - where - T: Copy + Into - { - fn eq(&self, rhs: &T) -> bool { - let rhs: ItemId = (*rhs).into(); - self.0 == rhs - } - } - - impl From<$name> for ItemId { - fn from(id: $name) -> ItemId { - id.0 - } - } - - impl<'a> From<&'a $name> for ItemId { - fn from(id: &'a $name) -> ItemId { - id.0 - } - } - - #[allow(dead_code)] - impl ItemId { - $( #[$checked_attr] )* - pub(crate) fn $checked(&self, ctx: &BindgenContext) -> Option<$name> { - if ctx.resolve_item(*self).kind().$check_method() { - Some($name(*self)) - } else { - None - } - } - - $( #[$expected_attr] )* - pub(crate) fn $expected(&self, ctx: &BindgenContext) -> $name { - self.$checked(ctx) - .expect(concat!( - stringify!($expected), - " called with ItemId that points to the wrong ItemKind" - )) - } - - $( #[$unchecked_attr] )* - pub(crate) fn $unchecked(&self) -> $name { - $name(*self) - } - } - } -} - -item_id_newtype! { - /// An identifier for an `Item` whose `ItemKind` is known to be - /// `ItemKind::Type`. - pub(crate) struct TypeId(ItemId) - where - /// Convert this `ItemId` into a `TypeId` if its associated item is a type, - /// otherwise return `None`. - checked = as_type_id with is_type, - - /// Convert this `ItemId` into a `TypeId`. - /// - /// If this `ItemId` does not point to a type, then panic. - expected = expect_type_id, - - /// Convert this `ItemId` into a `TypeId` without actually checking whether - /// this ID actually points to a `Type`. - unchecked = as_type_id_unchecked; -} - -item_id_newtype! { - /// An identifier for an `Item` whose `ItemKind` is known to be - /// `ItemKind::Module`. - pub(crate) struct ModuleId(ItemId) - where - /// Convert this `ItemId` into a `ModuleId` if its associated item is a - /// module, otherwise return `None`. - checked = as_module_id with is_module, - - /// Convert this `ItemId` into a `ModuleId`. - /// - /// If this `ItemId` does not point to a module, then panic. - expected = expect_module_id, - - /// Convert this `ItemId` into a `ModuleId` without actually checking - /// whether this ID actually points to a `Module`. - unchecked = as_module_id_unchecked; -} - -item_id_newtype! { - /// An identifier for an `Item` whose `ItemKind` is known to be - /// `ItemKind::Var`. - pub(crate) struct VarId(ItemId) - where - /// Convert this `ItemId` into a `VarId` if its associated item is a var, - /// otherwise return `None`. - checked = as_var_id with is_var, - - /// Convert this `ItemId` into a `VarId`. - /// - /// If this `ItemId` does not point to a var, then panic. - expected = expect_var_id, - - /// Convert this `ItemId` into a `VarId` without actually checking whether - /// this ID actually points to a `Var`. - unchecked = as_var_id_unchecked; -} - -item_id_newtype! { - /// An identifier for an `Item` whose `ItemKind` is known to be - /// `ItemKind::Function`. - pub(crate) struct FunctionId(ItemId) - where - /// Convert this `ItemId` into a `FunctionId` if its associated item is a function, - /// otherwise return `None`. - checked = as_function_id with is_function, - - /// Convert this `ItemId` into a `FunctionId`. - /// - /// If this `ItemId` does not point to a function, then panic. - expected = expect_function_id, - - /// Convert this `ItemId` into a `FunctionId` without actually checking whether - /// this ID actually points to a `Function`. - unchecked = as_function_id_unchecked; -} - -impl From for usize { - fn from(id: ItemId) -> usize { - id.0 - } -} - -impl ItemId { - /// Get a numeric representation of this ID. - pub(crate) fn as_usize(self) -> usize { - self.into() - } -} - -impl ::std::cmp::PartialEq for ItemId -where - T: Copy + Into, -{ - fn eq(&self, rhs: &T) -> bool { - let rhs: ItemId = (*rhs).into(); - self.0 == rhs.0 - } -} - -impl CanDeriveDebug for T -where - T: Copy + Into, -{ - fn can_derive_debug(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_debug && ctx.lookup_can_derive_debug(*self) - } -} - -impl CanDeriveDefault for T -where - T: Copy + Into, -{ - fn can_derive_default(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_default && ctx.lookup_can_derive_default(*self) - } -} - -impl CanDeriveCopy for T -where - T: Copy + Into, -{ - fn can_derive_copy(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_copy && ctx.lookup_can_derive_copy(*self) - } -} - -impl CanDeriveHash for T -where - T: Copy + Into, -{ - fn can_derive_hash(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_hash && ctx.lookup_can_derive_hash(*self) - } -} - -impl CanDerivePartialOrd for T -where - T: Copy + Into, -{ - fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_partialord && - ctx.lookup_can_derive_partialeq_or_partialord(*self) == - CanDerive::Yes - } -} - -impl CanDerivePartialEq for T -where - T: Copy + Into, -{ - fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_partialeq && - ctx.lookup_can_derive_partialeq_or_partialord(*self) == - CanDerive::Yes - } -} - -impl CanDeriveEq for T -where - T: Copy + Into, -{ - fn can_derive_eq(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_eq && - ctx.lookup_can_derive_partialeq_or_partialord(*self) == - CanDerive::Yes && - !ctx.lookup_has_float(*self) - } -} - -impl CanDeriveOrd for T -where - T: Copy + Into, -{ - fn can_derive_ord(&self, ctx: &BindgenContext) -> bool { - ctx.options().derive_ord && - ctx.lookup_can_derive_partialeq_or_partialord(*self) == - CanDerive::Yes && - !ctx.lookup_has_float(*self) - } -} - -/// A key used to index a resolved type, so we only process it once. -/// -/// This is almost always a USR string (an unique identifier generated by -/// clang), but it can also be the canonical declaration if the type is unnamed, -/// in which case clang may generate the same USR for multiple nested unnamed -/// types. -#[derive(Eq, PartialEq, Hash, Debug)] -enum TypeKey { - Usr(String), - Declaration(Cursor), -} - -/// A context used during parsing and generation of structs. -#[derive(Debug)] -pub(crate) struct BindgenContext { - /// The map of all the items parsed so far, keyed off `ItemId`. - items: Vec>, - - /// Clang USR to type map. This is needed to be able to associate types with - /// item ids during parsing. - types: HashMap, - - /// Maps from a cursor to the item ID of the named template type parameter - /// for that cursor. - type_params: HashMap, - - /// A cursor to module map. Similar reason than above. - modules: HashMap, - - /// The root module, this is guaranteed to be an item of kind Module. - root_module: ModuleId, - - /// Current module being traversed. - current_module: ModuleId, - - /// A `HashMap` keyed on a type definition, and whose value is the parent ID - /// of the declaration. - /// - /// This is used to handle the cases where the semantic and the lexical - /// parents of the cursor differ, like when a nested class is defined - /// outside of the parent class. - semantic_parents: HashMap, - - /// A stack with the current type declarations and types we're parsing. This - /// is needed to avoid infinite recursion when parsing a type like: - /// - /// struct c { struct c* next; }; - /// - /// This means effectively, that a type has a potential ID before knowing if - /// it's a correct type. But that's not important in practice. - /// - /// We could also use the `types` `HashMap`, but my intention with it is that - /// only valid types and declarations end up there, and this could - /// potentially break that assumption. - currently_parsed_types: Vec, - - /// A map with all the already parsed macro names. This is done to avoid - /// hard errors while parsing duplicated macros, as well to allow macro - /// expression parsing. - /// - /// This needs to be an `std::HashMap` because the `cexpr` API requires it. - parsed_macros: StdHashMap, cexpr::expr::EvalResult>, - - /// A map with all include locations. - /// - /// This is needed so that items are created in the order they are defined in. - /// - /// The key is the included file, the value is a pair of the source file and - /// the position of the `#include` directive in the source file. - includes: StdHashMap, - - /// A set of all the included filenames. - deps: BTreeSet>, - - /// The active replacements collected from replaces="xxx" annotations. - replacements: HashMap, ItemId>, - - collected_typerefs: bool, - - in_codegen: bool, - - /// The translation unit for parsing. - translation_unit: clang::TranslationUnit, - - /// The translation unit for macro fallback parsing. - fallback_tu: Option, - - /// Target information that can be useful for some stuff. - target_info: clang::TargetInfo, - - /// The options given by the user via cli or other medium. - options: BindgenOptions, - - /// Whether an opaque array was generated - generated_opaque_array: Cell, - - /// Whether a bindgen complex was generated - generated_bindgen_complex: Cell, - - /// Whether a bindgen float16 was generated - generated_bindgen_float16: Cell, - - /// The set of `ItemId`s that are allowlisted. This the very first thing - /// computed after parsing our IR, and before running any of our analyses. - allowlisted: Option, - - /// Cache for calls to `ParseCallbacks::blocklisted_type_implements_trait` - blocklisted_types_implement_traits: - RefCell>>, - - /// The set of `ItemId`s that are allowlisted for code generation _and_ that - /// we should generate accounting for the codegen options. - /// - /// It's computed right after computing the allowlisted items. - codegen_items: Option, - - /// Map from an item's ID to the set of template parameter items that it - /// uses. See `ir::named` for more details. Always `Some` during the codegen - /// phase. - used_template_parameters: Option>, - - /// The set of `TypeKind::Comp` items found during parsing that need their - /// bitfield allocation units computed. Drained in `compute_bitfield_units`. - need_bitfield_allocation: Vec, - - /// The set of enums that are defined by a pair of `enum` and `typedef`, - /// which is legal in C (but not C++). - /// - /// ```c++ - /// // in either order - /// enum Enum { Variants... }; - /// typedef int16_t Enum; - /// ``` - /// - /// The stored `ItemId` is that of the `TypeKind::Enum`, not of the - /// `TypeKind::Alias`. - /// - /// This is populated when we enter codegen by `compute_enum_typedef_combos` - /// and is always `None` before that and `Some` after. - enum_typedef_combos: Option>, - - /// The set of (`ItemId`s of) types that can't derive debug. - /// - /// This is populated when we enter codegen by `compute_cannot_derive_debug` - /// and is always `None` before that and `Some` after. - cannot_derive_debug: Option>, - - /// The set of (`ItemId`s of) types that can't derive default. - /// - /// This is populated when we enter codegen by `compute_cannot_derive_default` - /// and is always `None` before that and `Some` after. - cannot_derive_default: Option>, - - /// The set of (`ItemId`s of) types that can't derive copy. - /// - /// This is populated when we enter codegen by `compute_cannot_derive_copy` - /// and is always `None` before that and `Some` after. - cannot_derive_copy: Option>, - - /// The set of (`ItemId`s of) types that can't derive hash. - /// - /// This is populated when we enter codegen by `compute_can_derive_hash` - /// and is always `None` before that and `Some` after. - cannot_derive_hash: Option>, - - /// The map why specified `ItemId`s of) types that can't derive hash. - /// - /// This is populated when we enter codegen by - /// `compute_cannot_derive_partialord_partialeq_or_eq` and is always `None` - /// before that and `Some` after. - cannot_derive_partialeq_or_partialord: Option>, - - /// The sizedness of types. - /// - /// This is populated by `compute_sizedness` and is always `None` before - /// that function is invoked and `Some` afterwards. - sizedness: Option>, - - /// The set of (`ItemId's of`) types that has vtable. - /// - /// Populated when we enter codegen by `compute_has_vtable`; always `None` - /// before that and `Some` after. - have_vtable: Option>, - - /// The set of (`ItemId's of`) types that has destructor. - /// - /// Populated when we enter codegen by `compute_has_destructor`; always `None` - /// before that and `Some` after. - have_destructor: Option>, - - /// The set of (`ItemId's of`) types that has array. - /// - /// Populated when we enter codegen by `compute_has_type_param_in_array`; always `None` - /// before that and `Some` after. - has_type_param_in_array: Option>, - - /// The set of (`ItemId's of`) types that has float. - /// - /// Populated when we enter codegen by `compute_has_float`; always `None` - /// before that and `Some` after. - has_float: Option>, -} - -/// A traversal of allowlisted items. -struct AllowlistedItemsTraversal<'ctx> { - ctx: &'ctx BindgenContext, - traversal: ItemTraversal<'ctx, ItemSet, Vec>, -} - -impl Iterator for AllowlistedItemsTraversal<'_> { - type Item = ItemId; - - fn next(&mut self) -> Option { - loop { - let id = self.traversal.next()?; - - if self.ctx.resolve_item(id).is_blocklisted(self.ctx) { - continue; - } - - return Some(id); - } - } -} - -impl<'ctx> AllowlistedItemsTraversal<'ctx> { - /// Construct a new allowlisted items traversal. - pub(crate) fn new( - ctx: &'ctx BindgenContext, - roots: R, - predicate: for<'a> fn(&'a BindgenContext, Edge) -> bool, - ) -> Self - where - R: IntoIterator, - { - AllowlistedItemsTraversal { - ctx, - traversal: ItemTraversal::new(ctx, roots, predicate), - } - } -} - -impl BindgenContext { - /// Construct the context for the given `options`. - pub(crate) fn new( - options: BindgenOptions, - input_unsaved_files: &[clang::UnsavedFile], - ) -> Self { - // TODO(emilio): Use the CXTargetInfo here when available. - // - // see: https://reviews.llvm.org/D32389 - let index = clang::Index::new(false, true); - - let parse_options = - clang_sys::CXTranslationUnit_DetailedPreprocessingRecord; - - let translation_unit = { - let _t = - Timer::new("translation_unit").with_output(options.time_phases); - - clang::TranslationUnit::parse( - &index, - "", - &options.clang_args, - input_unsaved_files, - parse_options, - ).expect("libclang error; possible causes include: -- Invalid flag syntax -- Unrecognized flags -- Invalid flag arguments -- File I/O errors -- Host vs. target architecture mismatch -If you encounter an error missing from this list, please file an issue or a PR!") - }; - - let target_info = clang::TargetInfo::new(&translation_unit); - let root_module = Self::build_root_module(ItemId(0)); - let root_module_id = root_module.id().as_module_id_unchecked(); - - // depfiles need to include the explicitly listed headers too - let deps = options.input_headers.iter().cloned().collect(); - - BindgenContext { - items: vec![Some(root_module)], - includes: Default::default(), - deps, - types: Default::default(), - type_params: Default::default(), - modules: Default::default(), - root_module: root_module_id, - current_module: root_module_id, - semantic_parents: Default::default(), - currently_parsed_types: vec![], - parsed_macros: Default::default(), - replacements: Default::default(), - collected_typerefs: false, - in_codegen: false, - translation_unit, - fallback_tu: None, - target_info, - options, - generated_bindgen_complex: Cell::new(false), - generated_bindgen_float16: Cell::new(false), - generated_opaque_array: Cell::new(false), - allowlisted: None, - blocklisted_types_implement_traits: Default::default(), - codegen_items: None, - used_template_parameters: None, - need_bitfield_allocation: Default::default(), - enum_typedef_combos: None, - cannot_derive_debug: None, - cannot_derive_default: None, - cannot_derive_copy: None, - cannot_derive_hash: None, - cannot_derive_partialeq_or_partialord: None, - sizedness: None, - have_vtable: None, - have_destructor: None, - has_type_param_in_array: None, - has_float: None, - } - } - - /// Returns `true` if the target architecture is wasm32 - pub(crate) fn is_target_wasm32(&self) -> bool { - self.target_info.triple.starts_with("wasm32-") - } - - /// Creates a timer for the current bindgen phase. If `time_phases` is `true`, - /// the timer will print to stderr when it is dropped, otherwise it will do - /// nothing. - pub(crate) fn timer<'a>(&self, name: &'a str) -> Timer<'a> { - Timer::new(name).with_output(self.options.time_phases) - } - - /// Returns the pointer width to use for the target for the current - /// translation. - pub(crate) fn target_pointer_size(&self) -> usize { - self.target_info.pointer_width / 8 - } - - /// Returns the ABI, which is mostly useful for determining the mangling kind. - pub(crate) fn abi_kind(&self) -> ABIKind { - self.target_info.abi - } - - /// Get the stack of partially parsed types that we are in the middle of - /// parsing. - pub(crate) fn currently_parsed_types(&self) -> &[PartialType] { - &self.currently_parsed_types[..] - } - - /// Begin parsing the given partial type, and push it onto the - /// `currently_parsed_types` stack so that we won't infinite recurse if we - /// run into a reference to it while parsing it. - pub(crate) fn begin_parsing(&mut self, partial_ty: PartialType) { - self.currently_parsed_types.push(partial_ty); - } - - /// Finish parsing the current partial type, pop it off the - /// `currently_parsed_types` stack, and return it. - pub(crate) fn finish_parsing(&mut self) -> PartialType { - self.currently_parsed_types.pop().expect( - "should have been parsing a type, if we finished parsing a type", - ) - } - - /// Add the location of the `#include` directive for the `included_file`. - pub(crate) fn add_include( - &mut self, - source_file: String, - included_file: String, - offset: usize, - ) { - self.includes - .entry(included_file) - .or_insert((source_file, offset)); - } - - /// Get the location of the first `#include` directive for the `included_file`. - pub(crate) fn included_file_location( - &self, - included_file: &str, - ) -> Option<(String, usize)> { - self.includes.get(included_file).cloned() - } - - /// Add an included file. - pub(crate) fn add_dep(&mut self, dep: Box) { - self.deps.insert(dep); - } - - /// Get any included files. - pub(crate) fn deps(&self) -> &BTreeSet> { - &self.deps - } - - /// Define a new item. - /// - /// This inserts it into the internal items set, and its type into the - /// internal types set. - pub(crate) fn add_item( - &mut self, - item: Item, - declaration: Option, - location: Option, - ) { - debug!("BindgenContext::add_item({item:?}, declaration: {declaration:?}, loc: {location:?}"); - debug_assert!( - declaration.is_some() || - !item.kind().is_type() || - item.kind().expect_type().is_builtin_or_type_param() || - item.kind().expect_type().is_opaque(self, &item) || - item.kind().expect_type().is_unresolved_ref(), - "Adding a type without declaration?" - ); - - let id = item.id(); - let is_type = item.kind().is_type(); - let is_unnamed = is_type && item.expect_type().name().is_none(); - let is_template_instantiation = - is_type && item.expect_type().is_template_instantiation(); - - if item.id() != self.root_module { - self.add_item_to_module(&item); - } - - if is_type && item.expect_type().is_comp() { - self.need_bitfield_allocation.push(id); - } - - let old_item = mem::replace(&mut self.items[id.0], Some(item)); - assert!( - old_item.is_none(), - "should not have already associated an item with the given id" - ); - - // Unnamed items can have an USR, but they can't be referenced from - // other sites explicitly and the USR can match if the unnamed items are - // nested, so don't bother tracking them. - if !is_type || is_template_instantiation { - return; - } - if let Some(mut declaration) = declaration { - if !declaration.is_valid() { - if let Some(location) = location { - if location.is_template_like() { - declaration = location; - } - } - } - declaration = declaration.canonical(); - if !declaration.is_valid() { - // This could happen, for example, with types like `int*` or - // similar. - // - // Fortunately, we don't care about those types being - // duplicated, so we can just ignore them. - debug!( - "Invalid declaration {declaration:?} found for type {:?}", - self.resolve_item_fallible(id) - .unwrap() - .kind() - .expect_type() - ); - return; - } - - let key = if is_unnamed { - TypeKey::Declaration(declaration) - } else if let Some(usr) = declaration.usr() { - TypeKey::Usr(usr) - } else { - warn!("Valid declaration with no USR: {declaration:?}, {location:?}"); - TypeKey::Declaration(declaration) - }; - - let old = self.types.insert(key, id.as_type_id_unchecked()); - debug_assert_eq!(old, None); - } - } - - /// Ensure that every item (other than the root module) is in a module's - /// children list. This is to make sure that every allowlisted item get's - /// codegen'd, even if its parent is not allowlisted. See issue #769 for - /// details. - fn add_item_to_module(&mut self, item: &Item) { - assert_ne!(item.id(), self.root_module); - assert!(self.resolve_item_fallible(item.id()).is_none()); - - if let Some(ref mut parent) = self.items[item.parent_id().0] { - if let Some(module) = parent.as_module_mut() { - debug!( - "add_item_to_module: adding {:?} as child of parent module {:?}", - item.id(), - item.parent_id() - ); - - module.children_mut().insert(item.id()); - return; - } - } - - debug!( - "add_item_to_module: adding {:?} as child of current module {:?}", - item.id(), - self.current_module - ); - - self.items[self.current_module.0 .0] - .as_mut() - .expect("Should always have an item for self.current_module") - .as_module_mut() - .expect("self.current_module should always be a module") - .children_mut() - .insert(item.id()); - } - - /// Add a new named template type parameter to this context's item set. - pub(crate) fn add_type_param(&mut self, item: Item, definition: Cursor) { - debug!("BindgenContext::add_type_param: item = {item:?}; definition = {definition:?}"); - - assert!( - item.expect_type().is_type_param(), - "Should directly be a named type, not a resolved reference or anything" - ); - assert_eq!( - definition.kind(), - clang_sys::CXCursor_TemplateTypeParameter - ); - - self.add_item_to_module(&item); - - let id = item.id(); - let old_item = mem::replace(&mut self.items[id.0], Some(item)); - assert!( - old_item.is_none(), - "should not have already associated an item with the given id" - ); - - let old_named_ty = self - .type_params - .insert(definition, id.as_type_id_unchecked()); - assert!( - old_named_ty.is_none(), - "should not have already associated a named type with this id" - ); - } - - /// Get the named type defined at the given cursor location, if we've - /// already added one. - pub(crate) fn get_type_param(&self, definition: &Cursor) -> Option { - assert_eq!( - definition.kind(), - clang_sys::CXCursor_TemplateTypeParameter - ); - self.type_params.get(definition).copied() - } - - // TODO: Move all this syntax crap to other part of the code. - - /// Mangles a name so it doesn't conflict with any keyword. - #[rustfmt::skip] - pub(crate) fn rust_mangle<'a>(&self, name: &'a str) -> Cow<'a, str> { - if name.contains('@') || - name.contains('?') || - name.contains('$') || - matches!( - name, - "abstract" | "alignof" | "as" | "async" | "await" | "become" | - "box" | "break" | "const" | "continue" | "crate" | "do" | - "dyn" | "else" | "enum" | "extern" | "false" | "final" | - "fn" | "for" | "gen" | "if" | "impl" | "in" | "let" | "loop" | - "macro" | "match" | "mod" | "move" | "mut" | "offsetof" | - "override" | "priv" | "proc" | "pub" | "pure" | "ref" | - "return" | "Self" | "self" | "sizeof" | "static" | - "struct" | "super" | "trait" | "true" | "try" | "type" | "typeof" | - "unsafe" | "unsized" | "use" | "virtual" | "where" | - "while" | "yield" | "str" | "bool" | "f32" | "f64" | - "usize" | "isize" | "u128" | "i128" | "u64" | "i64" | - "u32" | "i32" | "u16" | "i16" | "u8" | "i8" | "_" - ) - { - let mut s = name.to_owned(); - s = s.replace('@', "_"); - s = s.replace('?', "_"); - s = s.replace('$', "_"); - s.push('_'); - return Cow::Owned(s); - } - Cow::Borrowed(name) - } - - /// Returns a mangled name as a rust identifier. - pub(crate) fn rust_ident(&self, name: S) -> Ident - where - S: AsRef, - { - self.rust_ident_raw(self.rust_mangle(name.as_ref())) - } - - /// Returns a mangled name as a rust identifier. - pub(crate) fn rust_ident_raw(&self, name: T) -> Ident - where - T: AsRef, - { - Ident::new(name.as_ref(), Span::call_site()) - } - - /// Iterate over all items that have been defined. - pub(crate) fn items(&self) -> impl Iterator { - self.items.iter().enumerate().filter_map(|(index, item)| { - let item = item.as_ref()?; - Some((ItemId(index), item)) - }) - } - - /// Have we collected all unresolved type references yet? - pub(crate) fn collected_typerefs(&self) -> bool { - self.collected_typerefs - } - - /// Gather all the unresolved type references. - fn collect_typerefs( - &mut self, - ) -> Vec<(ItemId, clang::Type, Cursor, Option)> { - debug_assert!(!self.collected_typerefs); - self.collected_typerefs = true; - let mut typerefs = vec![]; - - for (id, item) in self.items() { - let kind = item.kind(); - let Some(ty) = kind.as_type() else { continue }; - - if let TypeKind::UnresolvedTypeRef(ref ty, loc, parent_id) = - *ty.kind() - { - typerefs.push((id, *ty, loc, parent_id)); - } - } - typerefs - } - - /// Collect all of our unresolved type references and resolve them. - fn resolve_typerefs(&mut self) { - let _t = self.timer("resolve_typerefs"); - - let typerefs = self.collect_typerefs(); - - for (id, ty, loc, parent_id) in typerefs { - let _resolved = - { - let resolved = Item::from_ty(&ty, loc, parent_id, self) - .unwrap_or_else(|_| { - warn!("Could not resolve type reference, falling back \ - to opaque blob"); - Item::new_opaque_type(self.next_item_id(), &ty, self) - }); - - let item = self.items[id.0].as_mut().unwrap(); - *item.kind_mut().as_type_mut().unwrap().kind_mut() = - TypeKind::ResolvedTypeRef(resolved); - resolved - }; - - // Something in the STL is trolling me. I don't need this assertion - // right now, but worth investigating properly once this lands. - // - // debug_assert!(self.items.get(&resolved).is_some(), "How?"); - // - // if let Some(parent_id) = parent_id { - // assert_eq!(self.items[&resolved].parent_id(), parent_id); - // } - } - } - - /// Temporarily loan `Item` with the given `ItemId`. This provides means to - /// mutably borrow `Item` while having a reference to `BindgenContext`. - /// - /// `Item` with the given `ItemId` is removed from the context, given - /// closure is executed and then `Item` is placed back. - /// - /// # Panics - /// - /// Panics if attempt to resolve given `ItemId` inside the given - /// closure is made. - fn with_loaned_item(&mut self, id: ItemId, f: F) -> T - where - F: (FnOnce(&BindgenContext, &mut Item) -> T), - { - let mut item = self.items[id.0].take().unwrap(); - - let result = f(self, &mut item); - - let existing = mem::replace(&mut self.items[id.0], Some(item)); - assert!(existing.is_none()); - - result - } - - /// Compute the bitfield allocation units for all `TypeKind::Comp` items we - /// parsed. - fn compute_bitfield_units(&mut self) { - let _t = self.timer("compute_bitfield_units"); - - assert!(self.collected_typerefs()); - - let need_bitfield_allocation = - mem::take(&mut self.need_bitfield_allocation); - for id in need_bitfield_allocation { - self.with_loaned_item(id, |ctx, item| { - let ty = item.kind_mut().as_type_mut().unwrap(); - let layout = ty.layout(ctx); - ty.as_comp_mut() - .unwrap() - .compute_bitfield_units(ctx, layout.as_ref()); - }); - } - } - - /// Assign a new generated name for each anonymous field. - fn deanonymize_fields(&mut self) { - let _t = self.timer("deanonymize_fields"); - - let comp_item_ids: Vec = self - .items() - .filter_map(|(id, item)| { - if item.kind().as_type()?.is_comp() { - return Some(id); - } - None - }) - .collect(); - - for id in comp_item_ids { - self.with_loaned_item(id, |ctx, item| { - item.kind_mut() - .as_type_mut() - .unwrap() - .as_comp_mut() - .unwrap() - .deanonymize_fields(ctx); - }); - } - } - - /// Iterate over all items and replace any item that has been named in a - /// `replaces="SomeType"` annotation with the replacement type. - fn process_replacements(&mut self) { - let _t = self.timer("process_replacements"); - if self.replacements.is_empty() { - debug!("No replacements to process"); - return; - } - - // FIXME: This is linear, but the replaces="xxx" annotation was already - // there, and for better or worse it's useful, sigh... - // - // We leverage the ResolvedTypeRef thing, though, which is cool :P. - - let mut replacements = vec![]; - - for (id, item) in self.items() { - if item.annotations().use_instead_of().is_some() { - continue; - } - - // Calls to `canonical_name` are expensive, so eagerly filter out - // items that cannot be replaced. - let Some(ty) = item.kind().as_type() else { - continue; - }; - - match *ty.kind() { - TypeKind::Comp(..) | - TypeKind::TemplateAlias(..) | - TypeKind::Enum(..) | - TypeKind::Alias(..) => {} - _ => continue, - } - - let path = item.path_for_allowlisting(self); - let replacement = self.replacements.get(&path[1..]); - - if let Some(replacement) = replacement { - if *replacement != id { - // We set this just after parsing the annotation. It's - // very unlikely, but this can happen. - if self.resolve_item_fallible(*replacement).is_some() { - replacements.push(( - id.expect_type_id(self), - replacement.expect_type_id(self), - )); - } - } - } - } - - for (id, replacement_id) in replacements { - debug!("Replacing {id:?} with {replacement_id:?}"); - let new_parent = { - let item_id: ItemId = id.into(); - let item = self.items[item_id.0].as_mut().unwrap(); - *item.kind_mut().as_type_mut().unwrap().kind_mut() = - TypeKind::ResolvedTypeRef(replacement_id); - item.parent_id() - }; - - // Relocate the replacement item from where it was declared, to - // where the thing it is replacing was declared. - // - // First, we'll make sure that its parent ID is correct. - - let old_parent = self.resolve_item(replacement_id).parent_id(); - if new_parent == old_parent { - // Same parent and therefore also same containing - // module. Nothing to do here. - continue; - } - - let replacement_item_id: ItemId = replacement_id.into(); - self.items[replacement_item_id.0] - .as_mut() - .unwrap() - .set_parent_for_replacement(new_parent); - - // Second, make sure that it is in the correct module's children - // set. - - let old_module = { - let immut_self = &*self; - old_parent - .ancestors(immut_self) - .chain(Some(immut_self.root_module.into())) - .find(|id| { - let item = immut_self.resolve_item(*id); - item.as_module().is_some_and(|m| { - m.children().contains(&replacement_id.into()) - }) - }) - }; - let old_module = old_module - .expect("Every replacement item should be in a module"); - - let new_module = { - let immut_self = &*self; - new_parent - .ancestors(immut_self) - .find(|id| immut_self.resolve_item(*id).is_module()) - }; - let new_module = - new_module.unwrap_or_else(|| self.root_module.into()); - - if new_module == old_module { - // Already in the correct module. - continue; - } - - self.items[old_module.0] - .as_mut() - .unwrap() - .as_module_mut() - .unwrap() - .children_mut() - .remove(&replacement_id.into()); - - self.items[new_module.0] - .as_mut() - .unwrap() - .as_module_mut() - .unwrap() - .children_mut() - .insert(replacement_id.into()); - } - } - - /// Enter the code generation phase, invoke the given callback `cb`, and - /// leave the code generation phase. - pub(crate) fn gen( - mut self, - cb: F, - ) -> Result<(Out, BindgenOptions), CodegenError> - where - F: FnOnce(&Self) -> Result, - { - self.in_codegen = true; - - self.resolve_typerefs(); - self.compute_bitfield_units(); - self.process_replacements(); - - self.deanonymize_fields(); - - self.assert_no_dangling_references(); - - // Compute the allowlisted set after processing replacements and - // resolving type refs, as those are the final mutations of the IR - // graph, and their completion means that the IR graph is now frozen. - self.compute_allowlisted_and_codegen_items(); - - // Make sure to do this after processing replacements, since that messes - // with the parentage and module children, and we want to assert that it - // messes with them correctly. - self.assert_every_item_in_a_module(); - - self.compute_has_vtable(); - self.compute_sizedness(); - self.compute_has_destructor(); - self.find_used_template_parameters(); - self.compute_enum_typedef_combos(); - self.compute_cannot_derive_debug(); - self.compute_cannot_derive_default(); - self.compute_cannot_derive_copy(); - self.compute_has_type_param_in_array(); - self.compute_has_float(); - self.compute_cannot_derive_hash(); - self.compute_cannot_derive_partialord_partialeq_or_eq(); - - let ret = cb(&self)?; - Ok((ret, self.options)) - } - - /// When the `__testing_only_extra_assertions` feature is enabled, this - /// function walks the IR graph and asserts that we do not have any edges - /// referencing an `ItemId` for which we do not have an associated IR item. - fn assert_no_dangling_references(&self) { - if cfg!(feature = "__testing_only_extra_assertions") { - for _ in self.assert_no_dangling_item_traversal() { - // The iterator's next method does the asserting for us. - } - } - } - - fn assert_no_dangling_item_traversal( - &self, - ) -> traversal::AssertNoDanglingItemsTraversal<'_> { - assert!(self.in_codegen_phase()); - assert_eq!(self.current_module, self.root_module); - - let roots = self.items().map(|(id, _)| id); - traversal::AssertNoDanglingItemsTraversal::new( - self, - roots, - traversal::all_edges, - ) - } - - /// When the `__testing_only_extra_assertions` feature is enabled, walk over - /// every item and ensure that it is in the children set of one of its - /// module ancestors. - fn assert_every_item_in_a_module(&self) { - if cfg!(feature = "__testing_only_extra_assertions") { - assert!(self.in_codegen_phase()); - assert_eq!(self.current_module, self.root_module); - - for (id, _item) in self.items() { - if id == self.root_module { - continue; - } - - assert!( - { - let id = id - .into_resolver() - .through_type_refs() - .through_type_aliases() - .resolve(self) - .id(); - id.ancestors(self) - .chain(Some(self.root_module.into())) - .any(|ancestor| { - debug!("Checking if {id:?} is a child of {ancestor:?}"); - self.resolve_item(ancestor) - .as_module() - .is_some_and(|m| m.children().contains(&id)) - }) - }, - "{id:?} should be in some ancestor module's children set" - ); - } - } - } - - /// Compute for every type whether it is sized or not, and whether it is - /// sized or not as a base class. - fn compute_sizedness(&mut self) { - let _t = self.timer("compute_sizedness"); - assert!(self.sizedness.is_none()); - self.sizedness = Some(analyze::(self)); - } - - /// Look up whether the type with the given ID is sized or not. - pub(crate) fn lookup_sizedness(&self, id: TypeId) -> SizednessResult { - assert!( - self.in_codegen_phase(), - "We only compute sizedness after we've entered codegen" - ); - - self.sizedness - .as_ref() - .unwrap() - .get(&id) - .copied() - .unwrap_or(SizednessResult::ZeroSized) - } - - /// Compute whether the type has vtable. - fn compute_has_vtable(&mut self) { - let _t = self.timer("compute_has_vtable"); - assert!(self.have_vtable.is_none()); - self.have_vtable = Some(analyze::(self)); - } - - /// Look up whether the item with `id` has vtable or not. - pub(crate) fn lookup_has_vtable(&self, id: TypeId) -> HasVtableResult { - assert!( - self.in_codegen_phase(), - "We only compute vtables when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` has a - // vtable or not. - self.have_vtable - .as_ref() - .unwrap() - .get(&id.into()) - .copied() - .unwrap_or(HasVtableResult::No) - } - - /// Compute whether the type has a destructor. - fn compute_has_destructor(&mut self) { - let _t = self.timer("compute_has_destructor"); - assert!(self.have_destructor.is_none()); - self.have_destructor = Some(analyze::(self)); - } - - /// Look up whether the item with `id` has a destructor. - pub(crate) fn lookup_has_destructor(&self, id: TypeId) -> bool { - assert!( - self.in_codegen_phase(), - "We only compute destructors when we enter codegen" - ); - - self.have_destructor.as_ref().unwrap().contains(&id.into()) - } - - fn find_used_template_parameters(&mut self) { - let _t = self.timer("find_used_template_parameters"); - if self.options.allowlist_recursively { - let used_params = analyze::(self); - self.used_template_parameters = Some(used_params); - } else { - // If you aren't recursively allowlisting, then we can't really make - // any sense of template parameter usage, and you're on your own. - let mut used_params = HashMap::default(); - for &id in self.allowlisted_items() { - used_params.entry(id).or_insert_with(|| { - id.self_template_params(self) - .into_iter() - .map(|p| p.into()) - .collect() - }); - } - self.used_template_parameters = Some(used_params); - } - } - - /// Return `true` if `item` uses the given `template_param`, `false` - /// otherwise. - /// - /// This method may only be called during the codegen phase, because the - /// template usage information is only computed as we enter the codegen - /// phase. - /// - /// If the item is blocklisted, then we say that it always uses the template - /// parameter. This is a little subtle. The template parameter usage - /// analysis only considers allowlisted items, and if any blocklisted item - /// shows up in the generated bindings, it is the user's responsibility to - /// manually provide a definition for them. To give them the most - /// flexibility when doing that, we assume that they use every template - /// parameter and always pass template arguments through in instantiations. - pub(crate) fn uses_template_parameter( - &self, - item: ItemId, - template_param: TypeId, - ) -> bool { - assert!( - self.in_codegen_phase(), - "We only compute template parameter usage as we enter codegen" - ); - - if self.resolve_item(item).is_blocklisted(self) { - return true; - } - - let template_param = template_param - .into_resolver() - .through_type_refs() - .through_type_aliases() - .resolve(self) - .id(); - - self.used_template_parameters - .as_ref() - .expect("should have found template parameter usage if we're in codegen") - .get(&item).is_some_and(|items_used_params| items_used_params.contains(&template_param)) - } - - /// Return `true` if `item` uses any unbound, generic template parameters, - /// `false` otherwise. - /// - /// Has the same restrictions that `uses_template_parameter` has. - pub(crate) fn uses_any_template_parameters(&self, item: ItemId) -> bool { - assert!( - self.in_codegen_phase(), - "We only compute template parameter usage as we enter codegen" - ); - - self.used_template_parameters - .as_ref() - .expect( - "should have template parameter usage info in codegen phase", - ) - .get(&item) - .is_some_and(|used| !used.is_empty()) - } - - // This deserves a comment. Builtin types don't get a valid declaration, so - // we can't add it to the cursor->type map. - // - // That being said, they're not generated anyway, and are few, so the - // duplication and special-casing is fine. - // - // If at some point we care about the memory here, probably a map TypeKind - // -> builtin type ItemId would be the best to improve that. - fn add_builtin_item(&mut self, item: Item) { - debug!("add_builtin_item: item = {item:?}"); - debug_assert!(item.kind().is_type()); - self.add_item_to_module(&item); - let id = item.id(); - let old_item = mem::replace(&mut self.items[id.0], Some(item)); - assert!(old_item.is_none(), "Inserted type twice?"); - } - - fn build_root_module(id: ItemId) -> Item { - let module = Module::new(Some("root".into()), ModuleKind::Normal); - Item::new(id, None, None, id, ItemKind::Module(module), None) - } - - /// Get the root module. - pub(crate) fn root_module(&self) -> ModuleId { - self.root_module - } - - /// Resolve a type with the given ID. - /// - /// Panics if there is no item for the given `TypeId` or if the resolved - /// item is not a `Type`. - pub(crate) fn resolve_type(&self, type_id: TypeId) -> &Type { - self.resolve_item(type_id).kind().expect_type() - } - - /// Resolve a function with the given ID. - /// - /// Panics if there is no item for the given `FunctionId` or if the resolved - /// item is not a `Function`. - pub(crate) fn resolve_func(&self, func_id: FunctionId) -> &Function { - self.resolve_item(func_id).kind().expect_function() - } - - /// Resolve the given `ItemId` as a type, or `None` if there is no item with - /// the given ID. - /// - /// Panics if the ID resolves to an item that is not a type. - pub(crate) fn safe_resolve_type(&self, type_id: TypeId) -> Option<&Type> { - self.resolve_item_fallible(type_id) - .map(|t| t.kind().expect_type()) - } - - /// Resolve the given `ItemId` into an `Item`, or `None` if no such item - /// exists. - pub(crate) fn resolve_item_fallible>( - &self, - id: Id, - ) -> Option<&Item> { - self.items.get(id.into().0)?.as_ref() - } - - /// Resolve the given `ItemId` into an `Item`. - /// - /// Panics if the given ID does not resolve to any item. - pub(crate) fn resolve_item>(&self, item_id: Id) -> &Item { - let item_id = item_id.into(); - match self.resolve_item_fallible(item_id) { - Some(item) => item, - None => panic!("Not an item: {item_id:?}"), - } - } - - /// Get the current module. - pub(crate) fn current_module(&self) -> ModuleId { - self.current_module - } - - /// Add a semantic parent for a given type definition. - /// - /// We do this from the type declaration, in order to be able to find the - /// correct type definition afterwards. - /// - /// TODO(emilio): We could consider doing this only when - /// `declaration.lexical_parent() != definition.lexical_parent()`, but it's - /// not sure it's worth it. - pub(crate) fn add_semantic_parent( - &mut self, - definition: Cursor, - parent_id: ItemId, - ) { - self.semantic_parents.insert(definition, parent_id); - } - - /// Returns a known semantic parent for a given definition. - pub(crate) fn known_semantic_parent( - &self, - definition: Cursor, - ) -> Option { - self.semantic_parents.get(&definition).copied() - } - - /// Given a cursor pointing to the location of a template instantiation, - /// return a tuple of the form `(declaration_cursor, declaration_id, - /// num_expected_template_args)`. - /// - /// Note that `declaration_id` is not guaranteed to be in the context's item - /// set! It is possible that it is a partial type that we are still in the - /// middle of parsing. - fn get_declaration_info_for_template_instantiation( - &self, - instantiation: &Cursor, - ) -> Option<(Cursor, ItemId, usize)> { - instantiation - .cur_type() - .canonical_declaration(Some(instantiation)) - .and_then(|canon_decl| { - self.get_resolved_type(&canon_decl).and_then( - |template_decl_id| { - let num_params = - template_decl_id.num_self_template_params(self); - if num_params == 0 { - None - } else { - Some(( - *canon_decl.cursor(), - template_decl_id.into(), - num_params, - )) - } - }, - ) - }) - .or_else(|| { - // If we haven't already parsed the declaration of - // the template being instantiated, then it *must* - // be on the stack of types we are currently - // parsing. If it wasn't then clang would have - // already errored out before we started - // constructing our IR because you can't instantiate - // a template until it is fully defined. - instantiation - .referenced() - .and_then(|referenced| { - self.currently_parsed_types() - .iter() - .find(|partial_ty| *partial_ty.decl() == referenced) - }) - .and_then(|template_decl| { - let num_template_params = - template_decl.num_self_template_params(self); - if num_template_params == 0 { - None - } else { - Some(( - *template_decl.decl(), - template_decl.id(), - num_template_params, - )) - } - }) - }) - } - - /// Parse a template instantiation, eg `Foo`. - /// - /// This is surprisingly difficult to do with libclang, due to the fact that - /// it doesn't provide explicit template argument information, except for - /// function template declarations(!?!??!). - /// - /// The only way to do this is manually inspecting the AST and looking for - /// `TypeRefs` and `TemplateRefs` inside. This, unfortunately, doesn't work for - /// more complex cases, see the comment on the assertion below. - /// - /// To add insult to injury, the AST itself has structure that doesn't make - /// sense. Sometimes `Foo>` has an AST with nesting like you might - /// expect: `(Foo (Bar (int)))`. Other times, the AST we get is completely - /// flat: `(Foo Bar int)`. - /// - /// To see an example of what this method handles: - /// - /// ```c++ - /// template - /// class Incomplete { - /// T p; - /// }; - /// - /// template - /// class Foo { - /// Incomplete bar; - /// }; - /// ``` - /// - /// Finally, template instantiations are always children of the current - /// module. They use their template's definition for their name, so the - /// parent is only useful for ensuring that their layout tests get - /// codegen'd. - fn instantiate_template( - &mut self, - with_id: ItemId, - template: TypeId, - ty: &clang::Type, - location: Cursor, - ) -> Option { - let num_expected_args = - self.resolve_type(template).num_self_template_params(self); - if num_expected_args == 0 { - warn!( - "Tried to instantiate a template for which we could not \ - determine any template parameters" - ); - return None; - } - - let mut args = vec![]; - let mut found_const_arg = false; - let mut children = location.collect_children(); - - if children.iter().all(|c| !c.has_children()) { - // This is insanity... If clang isn't giving us a properly nested - // AST for which template arguments belong to which template we are - // instantiating, we'll need to construct it ourselves. However, - // there is an extra `NamespaceRef, NamespaceRef, ..., TemplateRef` - // representing a reference to the outermost template declaration - // that we need to filter out of the children. We need to do this - // filtering because we already know which template declaration is - // being specialized via the `location`'s type, and if we do not - // filter it out, we'll add an extra layer of template instantiation - // on accident. - let idx = children - .iter() - .position(|c| c.kind() == clang_sys::CXCursor_TemplateRef); - if let Some(idx) = idx { - if children - .iter() - .take(idx) - .all(|c| c.kind() == clang_sys::CXCursor_NamespaceRef) - { - children = children.into_iter().skip(idx + 1).collect(); - } - } - } - - for child in children.iter().rev() { - match child.kind() { - clang_sys::CXCursor_TypeRef | - clang_sys::CXCursor_TypedefDecl | - clang_sys::CXCursor_TypeAliasDecl => { - // The `with_id` ID will potentially end up unused if we give up - // on this type (for example, because it has const value - // template args), so if we pass `with_id` as the parent, it is - // potentially a dangling reference. Instead, use the canonical - // template declaration as the parent. It is already parsed and - // has a known-resolvable `ItemId`. - let ty = Item::from_ty_or_ref( - child.cur_type(), - *child, - Some(template.into()), - self, - ); - args.push(ty); - } - clang_sys::CXCursor_TemplateRef => { - let ( - template_decl_cursor, - template_decl_id, - num_expected_template_args, - ) = self.get_declaration_info_for_template_instantiation( - child, - )?; - - if num_expected_template_args == 0 || - child.has_at_least_num_children( - num_expected_template_args, - ) - { - // Do a happy little parse. See comment in the TypeRef - // match arm about parent IDs. - let ty = Item::from_ty_or_ref( - child.cur_type(), - *child, - Some(template.into()), - self, - ); - args.push(ty); - } else { - // This is the case mentioned in the doc comment where - // clang gives us a flattened AST and we have to - // reconstruct which template arguments go to which - // instantiation :( - let args_len = args.len(); - if args_len < num_expected_template_args { - warn!( - "Found a template instantiation without \ - enough template arguments" - ); - return None; - } - - let mut sub_args: Vec<_> = args - .drain(args_len - num_expected_template_args..) - .collect(); - sub_args.reverse(); - - let sub_name = Some(template_decl_cursor.spelling()); - let sub_inst = TemplateInstantiation::new( - // This isn't guaranteed to be a type that we've - // already finished parsing yet. - template_decl_id.as_type_id_unchecked(), - sub_args, - ); - let sub_kind = - TypeKind::TemplateInstantiation(sub_inst); - let sub_ty = Type::new( - sub_name, - template_decl_cursor - .cur_type() - .fallible_layout(self) - .ok(), - sub_kind, - false, - ); - let sub_id = self.next_item_id(); - let sub_item = Item::new( - sub_id, - None, - None, - self.current_module.into(), - ItemKind::Type(sub_ty), - Some(child.location()), - ); - - // Bypass all the validations in add_item explicitly. - debug!( - "instantiate_template: inserting nested \ - instantiation item: {:?}", - sub_item - ); - self.add_item_to_module(&sub_item); - debug_assert_eq!(sub_id, sub_item.id()); - self.items[sub_id.0] = Some(sub_item); - args.push(sub_id.as_type_id_unchecked()); - } - } - _ => { - warn!( - "Found template arg cursor we can't handle: {child:?}" - ); - found_const_arg = true; - } - } - } - - if found_const_arg { - // This is a dependently typed template instantiation. That is, an - // instantiation of a template with one or more const values as - // template arguments, rather than only types as template - // arguments. For example, `Foo` versus `Bar`. - // We can't handle these instantiations, so just punt in this - // situation... - warn!( - "Found template instantiated with a const value; \ - bindgen can't handle this kind of template instantiation!" - ); - return None; - } - - if args.len() != num_expected_args { - warn!( - "Found a template with an unexpected number of template \ - arguments" - ); - return None; - } - - args.reverse(); - let type_kind = TypeKind::TemplateInstantiation( - TemplateInstantiation::new(template, args), - ); - let name = ty.spelling(); - let name = if name.is_empty() { None } else { Some(name) }; - let ty = Type::new( - name, - ty.fallible_layout(self).ok(), - type_kind, - ty.is_const(), - ); - let item = Item::new( - with_id, - None, - None, - self.current_module.into(), - ItemKind::Type(ty), - Some(location.location()), - ); - - // Bypass all the validations in add_item explicitly. - debug!("instantiate_template: inserting item: {item:?}"); - self.add_item_to_module(&item); - debug_assert_eq!(with_id, item.id()); - self.items[with_id.0] = Some(item); - Some(with_id.as_type_id_unchecked()) - } - - /// If we have already resolved the type for the given type declaration, - /// return its `ItemId`. Otherwise, return `None`. - pub(crate) fn get_resolved_type( - &self, - decl: &clang::CanonicalTypeDeclaration, - ) -> Option { - self.types - .get(&TypeKey::Declaration(*decl.cursor())) - .or_else(|| { - decl.cursor() - .usr() - .and_then(|usr| self.types.get(&TypeKey::Usr(usr))) - }) - .copied() - } - - /// Looks up for an already resolved type, either because it's builtin, or - /// because we already have it in the map. - pub(crate) fn builtin_or_resolved_ty( - &mut self, - with_id: ItemId, - parent_id: Option, - ty: &clang::Type, - location: Option, - ) -> Option { - use clang_sys::{CXCursor_TypeAliasTemplateDecl, CXCursor_TypeRef}; - debug!("builtin_or_resolved_ty: {ty:?}, {location:?}, {with_id:?}, {parent_id:?}"); - - if let Some(decl) = ty.canonical_declaration(location.as_ref()) { - if let Some(id) = self.get_resolved_type(&decl) { - debug!( - "Already resolved ty {id:?}, {decl:?}, {ty:?} {location:?}" - ); - // If the declaration already exists, then either: - // - // * the declaration is a template declaration of some sort, - // and we are looking at an instantiation or specialization - // of it, or - // * we have already parsed and resolved this type, and - // there's nothing left to do. - if let Some(location) = location { - if decl.cursor().is_template_like() && - *ty != decl.cursor().cur_type() - { - // For specialized type aliases, there's no way to get the - // template parameters as of this writing (for a struct - // specialization we wouldn't be in this branch anyway). - // - // Explicitly return `None` if there aren't any - // unspecialized parameters (contains any `TypeRef`) so we - // resolve the canonical type if there is one and it's - // exposed. - // - // This is _tricky_, I know :( - if decl.cursor().kind() == - CXCursor_TypeAliasTemplateDecl && - !location.contains_cursor(CXCursor_TypeRef) && - ty.canonical_type().is_valid_and_exposed() - { - return None; - } - - return self - .instantiate_template(with_id, id, ty, location) - .or(Some(id)); - } - } - - return Some(self.build_ty_wrapper(with_id, id, parent_id, ty)); - } - } - - debug!("Not resolved, maybe builtin?"); - self.build_builtin_ty(ty) - } - - /// Make a new item that is a resolved type reference to the `wrapped_id`. - /// - /// This is unfortunately a lot of bloat, but is needed to properly track - /// constness et al. - /// - /// We should probably make the constness tracking separate, so it doesn't - /// bloat that much, but hey, we already bloat the heck out of builtin - /// types. - pub(crate) fn build_ty_wrapper( - &mut self, - with_id: ItemId, - wrapped_id: TypeId, - parent_id: Option, - ty: &clang::Type, - ) -> TypeId { - self.build_wrapper(with_id, wrapped_id, parent_id, ty, ty.is_const()) - } - - /// A wrapper over a type that adds a const qualifier explicitly. - /// - /// Needed to handle const methods in C++, wrapping the type . - pub(crate) fn build_const_wrapper( - &mut self, - with_id: ItemId, - wrapped_id: TypeId, - parent_id: Option, - ty: &clang::Type, - ) -> TypeId { - self.build_wrapper( - with_id, wrapped_id, parent_id, ty, /* is_const = */ true, - ) - } - - fn build_wrapper( - &mut self, - with_id: ItemId, - wrapped_id: TypeId, - parent_id: Option, - ty: &clang::Type, - is_const: bool, - ) -> TypeId { - let spelling = ty.spelling(); - let layout = ty.fallible_layout(self).ok(); - let location = ty.declaration().location(); - let type_kind = TypeKind::ResolvedTypeRef(wrapped_id); - let ty = Type::new(Some(spelling), layout, type_kind, is_const); - let item = Item::new( - with_id, - None, - None, - parent_id.unwrap_or_else(|| self.current_module.into()), - ItemKind::Type(ty), - Some(location), - ); - self.add_builtin_item(item); - with_id.as_type_id_unchecked() - } - - /// Returns the next item ID to be used for an item. - pub(crate) fn next_item_id(&mut self) -> ItemId { - let ret = ItemId(self.items.len()); - self.items.push(None); - ret - } - - fn build_builtin_ty(&mut self, ty: &clang::Type) -> Option { - use clang_sys::*; - let type_kind = match ty.kind() { - CXType_NullPtr => TypeKind::NullPtr, - CXType_Void => TypeKind::Void, - CXType_Bool => TypeKind::Int(IntKind::Bool), - CXType_Int => TypeKind::Int(IntKind::Int), - CXType_UInt => TypeKind::Int(IntKind::UInt), - CXType_Char_S => TypeKind::Int(IntKind::Char { is_signed: true }), - CXType_Char_U => TypeKind::Int(IntKind::Char { is_signed: false }), - CXType_SChar => TypeKind::Int(IntKind::SChar), - CXType_UChar => TypeKind::Int(IntKind::UChar), - CXType_Short => TypeKind::Int(IntKind::Short), - CXType_UShort => TypeKind::Int(IntKind::UShort), - CXType_WChar => TypeKind::Int(IntKind::WChar), - CXType_Char16 if self.options().use_distinct_char16_t => { - TypeKind::Int(IntKind::Char16) - } - CXType_Char16 => TypeKind::Int(IntKind::U16), - CXType_Char32 => TypeKind::Int(IntKind::U32), - CXType_Long => TypeKind::Int(IntKind::Long), - CXType_ULong => TypeKind::Int(IntKind::ULong), - CXType_LongLong => TypeKind::Int(IntKind::LongLong), - CXType_ULongLong => TypeKind::Int(IntKind::ULongLong), - CXType_Int128 => TypeKind::Int(IntKind::I128), - CXType_UInt128 => TypeKind::Int(IntKind::U128), - CXType_Float16 | CXType_Half => TypeKind::Float(FloatKind::Float16), - CXType_Float => TypeKind::Float(FloatKind::Float), - CXType_Double => TypeKind::Float(FloatKind::Double), - CXType_LongDouble => TypeKind::Float(FloatKind::LongDouble), - CXType_Float128 => TypeKind::Float(FloatKind::Float128), - CXType_Complex => { - let float_type = - ty.elem_type().expect("Not able to resolve complex type?"); - let float_kind = match float_type.kind() { - CXType_Float16 | CXType_Half => FloatKind::Float16, - CXType_Float => FloatKind::Float, - CXType_Double => FloatKind::Double, - CXType_LongDouble => FloatKind::LongDouble, - CXType_Float128 => FloatKind::Float128, - _ => panic!( - "Non floating-type complex? {ty:?}, {float_type:?}", - ), - }; - TypeKind::Complex(float_kind) - } - _ => return None, - }; - - let spelling = ty.spelling(); - let is_const = ty.is_const(); - let layout = ty.fallible_layout(self).ok(); - let location = ty.declaration().location(); - let ty = Type::new(Some(spelling), layout, type_kind, is_const); - let id = self.next_item_id(); - let item = Item::new( - id, - None, - None, - self.root_module.into(), - ItemKind::Type(ty), - Some(location), - ); - self.add_builtin_item(item); - Some(id.as_type_id_unchecked()) - } - - /// Get the current Clang translation unit that is being processed. - pub(crate) fn translation_unit(&self) -> &clang::TranslationUnit { - &self.translation_unit - } - - /// Initialize fallback translation unit if it does not exist and - /// then return a mutable reference to the fallback translation unit. - pub(crate) fn try_ensure_fallback_translation_unit( - &mut self, - ) -> Option<&mut clang::FallbackTranslationUnit> { - if self.fallback_tu.is_none() { - let file = format!( - "{}/.macro_eval.c", - match self.options().clang_macro_fallback_build_dir { - Some(ref path) => path.as_os_str().to_str()?, - None => ".", - } - ); - - let index = clang::Index::new(false, false); - - let mut header_names_to_compile = Vec::new(); - let mut header_paths = Vec::new(); - let mut header_includes = Vec::new(); - let single_header = self.options().input_headers.last().cloned()?; - for input_header in &self.options.input_headers - [..self.options.input_headers.len() - 1] - { - let path = Path::new(input_header.as_ref()); - if let Some(header_path) = path.parent() { - if header_path == Path::new("") { - header_paths.push("."); - } else { - header_paths.push(header_path.as_os_str().to_str()?); - } - } else { - header_paths.push("."); - } - let header_name = path.file_name()?.to_str()?; - header_includes.push(header_name.to_string()); - header_names_to_compile - .push(header_name.split(".h").next()?.to_string()); - } - let pch = format!( - "{}/{}", - match self.options().clang_macro_fallback_build_dir { - Some(ref path) => path.as_os_str().to_str()?, - None => ".", - }, - header_names_to_compile.join("-") + "-precompile.h.pch" - ); - - let mut c_args = self.options.fallback_clang_args.clone(); - c_args.push("-x".to_string().into_boxed_str()); - c_args.push("c-header".to_string().into_boxed_str()); - for header_path in header_paths { - c_args.push(format!("-I{header_path}").into_boxed_str()); - } - for header_include in header_includes { - c_args.push("-include".to_string().into_boxed_str()); - c_args.push(header_include.into_boxed_str()); - } - let mut tu = clang::TranslationUnit::parse( - &index, - &single_header, - &c_args, - &[], - clang_sys::CXTranslationUnit_ForSerialization, - )?; - tu.save(&pch).ok()?; - - let mut c_args = vec![ - "-include-pch".to_string().into_boxed_str(), - pch.clone().into_boxed_str(), - ]; - let mut skip_next = false; - for arg in &self.options.fallback_clang_args { - if arg.as_ref() == "-include" { - skip_next = true; - } else if skip_next { - skip_next = false; - } else { - c_args.push(arg.clone()); - } - } - self.fallback_tu = - Some(clang::FallbackTranslationUnit::new(file, pch, &c_args)?); - } - - self.fallback_tu.as_mut() - } - - /// Have we parsed the macro named `macro_name` already? - pub(crate) fn parsed_macro(&self, macro_name: &[u8]) -> bool { - self.parsed_macros.contains_key(macro_name) - } - - /// Get the currently parsed macros. - pub(crate) fn parsed_macros( - &self, - ) -> &StdHashMap, cexpr::expr::EvalResult> { - debug_assert!(!self.in_codegen_phase()); - &self.parsed_macros - } - - /// Mark the macro named `macro_name` as parsed. - pub(crate) fn note_parsed_macro( - &mut self, - id: Vec, - value: cexpr::expr::EvalResult, - ) { - self.parsed_macros.insert(id, value); - } - - /// Are we in the codegen phase? - pub(crate) fn in_codegen_phase(&self) -> bool { - self.in_codegen - } - - /// Mark the type with the given `name` as replaced by the type with ID - /// `potential_ty`. - /// - /// Replacement types are declared using the `replaces="xxx"` annotation, - /// and implies that the original type is hidden. - pub(crate) fn replace(&mut self, name: &[String], potential_ty: ItemId) { - match self.replacements.entry(name.into()) { - Entry::Vacant(entry) => { - debug!("Defining replacement for {name:?} as {potential_ty:?}"); - entry.insert(potential_ty); - } - Entry::Occupied(occupied) => { - warn!( - "Replacement for {name:?} already defined as {:?}; \ - ignoring duplicate replacement definition as {potential_ty:?}", - occupied.get(), - ); - } - } - } - - /// Has the item with the given `name` and `id` been replaced by another - /// type? - pub(crate) fn is_replaced_type>( - &self, - path: &[String], - id: Id, - ) -> bool { - let id = id.into(); - matches!(self.replacements.get(path), Some(replaced_by) if *replaced_by != id) - } - - /// Is the type with the given `name` marked as opaque? - pub(crate) fn opaque_by_name(&self, path: &[String]) -> bool { - debug_assert!( - self.in_codegen_phase(), - "You're not supposed to call this yet" - ); - self.options.opaque_types.matches(path[1..].join("::")) - } - - /// Get the options used to configure this bindgen context. - pub(crate) fn options(&self) -> &BindgenOptions { - &self.options - } - - /// Tokenizes a namespace cursor in order to get the name and kind of the - /// namespace. - fn tokenize_namespace( - &self, - cursor: &Cursor, - ) -> (Option, ModuleKind) { - assert_eq!( - cursor.kind(), - ::clang_sys::CXCursor_Namespace, - "Be a nice person" - ); - - let mut module_name = None; - let spelling = cursor.spelling(); - if !spelling.is_empty() { - module_name = Some(spelling); - } - - let mut kind = ModuleKind::Normal; - let mut looking_for_name = false; - for token in cursor.tokens().iter() { - match token.spelling() { - b"inline" => { - debug_assert!( - kind != ModuleKind::Inline, - "Multiple inline keywords?" - ); - kind = ModuleKind::Inline; - // When hitting a nested inline namespace we get a spelling - // that looks like ["inline", "foo"]. Deal with it properly. - looking_for_name = true; - } - // The double colon allows us to handle nested namespaces like - // namespace foo::bar { } - // - // libclang still gives us two namespace cursors, which is cool, - // but the tokenization of the second begins with the double - // colon. That's ok, so we only need to handle the weird - // tokenization here. - b"namespace" | b"::" => { - looking_for_name = true; - } - b"{" => { - // This should be an anonymous namespace. - assert!(looking_for_name); - break; - } - name => { - if looking_for_name { - if module_name.is_none() { - module_name = Some( - String::from_utf8_lossy(name).into_owned(), - ); - } - break; - } - // This is _likely_, but not certainly, a macro that's - // been placed just before the namespace keyword. - // Unfortunately, clang tokens don't let us easily see - // through the ifdef tokens, so we don't know what this - // token should really be. Instead of panicking though, - // we warn the user that we assumed the token was blank, - // and then move on. - // - // See also https://github.com/rust-lang/rust-bindgen/issues/1676. - warn!("Ignored unknown namespace prefix '{}' at {token:?} in {cursor:?}", String::from_utf8_lossy(name)); - } - } - } - - if cursor.is_inline_namespace() { - kind = ModuleKind::Inline; - } - - (module_name, kind) - } - - /// Given a `CXCursor_Namespace` cursor, return the item ID of the - /// corresponding module, or create one on the fly. - pub(crate) fn module(&mut self, cursor: Cursor) -> ModuleId { - use clang_sys::*; - assert_eq!(cursor.kind(), CXCursor_Namespace, "Be a nice person"); - let cursor = cursor.canonical(); - if let Some(id) = self.modules.get(&cursor) { - return *id; - } - - let (module_name, kind) = self.tokenize_namespace(&cursor); - - let module_id = self.next_item_id(); - let module = Module::new(module_name, kind); - let module = Item::new( - module_id, - None, - None, - self.current_module.into(), - ItemKind::Module(module), - Some(cursor.location()), - ); - - let module_id = module.id().as_module_id_unchecked(); - self.modules.insert(cursor, module_id); - - self.add_item(module, None, None); - - module_id - } - - /// Start traversing the module with the given `module_id`, invoke the - /// callback `cb`, and then return to traversing the original module. - pub(crate) fn with_module(&mut self, module_id: ModuleId, cb: F) - where - F: FnOnce(&mut Self), - { - debug_assert!(self.resolve_item(module_id).kind().is_module(), "Wat"); - - let previous_id = self.current_module; - self.current_module = module_id; - - cb(self); - - self.current_module = previous_id; - } - - /// Iterate over all (explicitly or transitively) allowlisted items. - /// - /// If no items are explicitly allowlisted, then all items are considered - /// allowlisted. - pub(crate) fn allowlisted_items(&self) -> &ItemSet { - assert!(self.in_codegen_phase()); - assert_eq!(self.current_module, self.root_module); - - self.allowlisted.as_ref().unwrap() - } - - /// Check whether a particular blocklisted type implements a trait or not. - /// Results may be cached. - pub(crate) fn blocklisted_type_implements_trait( - &self, - item: &Item, - derive_trait: DeriveTrait, - ) -> CanDerive { - assert!(self.in_codegen_phase()); - assert_eq!(self.current_module, self.root_module); - - *self - .blocklisted_types_implement_traits - .borrow_mut() - .entry(derive_trait) - .or_default() - .entry(item.id()) - .or_insert_with(|| { - item.expect_type() - .name() - .and_then(|name| { - if self.options.parse_callbacks.is_empty() { - // Sized integer types from get mapped to Rust primitive - // types regardless of whether they are blocklisted, so ensure that - // standard traits are considered derivable for them too. - if self.is_stdint_type(name) { - Some(CanDerive::Yes) - } else { - Some(CanDerive::No) - } - } else { - self.options.last_callback(|cb| { - cb.blocklisted_type_implements_trait( - name, - derive_trait, - ) - }) - } - }) - .unwrap_or(CanDerive::No) - }) - } - - /// Is the given type a type from that corresponds to a Rust primitive type? - pub(crate) fn is_stdint_type(&self, name: &str) -> bool { - match name { - "int8_t" | "uint8_t" | "int16_t" | "uint16_t" | "int32_t" | - "uint32_t" | "int64_t" | "uint64_t" | "uintptr_t" | - "intptr_t" | "ptrdiff_t" => true, - "size_t" | "ssize_t" => self.options.size_t_is_usize, - _ => false, - } - } - - /// Get a reference to the set of items we should generate. - pub(crate) fn codegen_items(&self) -> &ItemSet { - assert!(self.in_codegen_phase()); - assert_eq!(self.current_module, self.root_module); - self.codegen_items.as_ref().unwrap() - } - - /// Compute the allowlisted items set and populate `self.allowlisted`. - fn compute_allowlisted_and_codegen_items(&mut self) { - assert!(self.in_codegen_phase()); - assert_eq!(self.current_module, self.root_module); - assert!(self.allowlisted.is_none()); - let _t = self.timer("compute_allowlisted_and_codegen_items"); - - let roots = { - let mut roots = self - .items() - // Only consider roots that are enabled for codegen. - .filter(|&(_, item)| item.is_enabled_for_codegen(self)) - .filter(|&(_, item)| { - // If nothing is explicitly allowlisted, then everything is fair - // game. - if self.options().allowlisted_types.is_empty() && - self.options().allowlisted_functions.is_empty() && - self.options().allowlisted_vars.is_empty() && - self.options().allowlisted_files.is_empty() && - self.options().allowlisted_items.is_empty() - { - return true; - } - - // If this is a type that explicitly replaces another, we assume - // you know what you're doing. - if item.annotations().use_instead_of().is_some() { - return true; - } - - // Items with a source location in an explicitly allowlisted file - // are always included. - if !self.options().allowlisted_files.is_empty() { - if let Some(location) = item.location() { - let (file, _, _, _) = location.location(); - if let Some(filename) = file.name() { - if self - .options() - .allowlisted_files - .matches(filename) - { - return true; - } - } - } - } - - let name = item.path_for_allowlisting(self)[1..].join("::"); - debug!("allowlisted_items: testing {name:?}"); - - if self.options().allowlisted_items.matches(&name) { - return true; - } - - match *item.kind() { - ItemKind::Module(..) => true, - ItemKind::Function(_) => { - self.options().allowlisted_functions.matches(&name) - } - ItemKind::Var(_) => { - self.options().allowlisted_vars.matches(&name) - } - ItemKind::Type(ref ty) => { - if self.options().allowlisted_types.matches(&name) { - return true; - } - - // Auto-allowlist types that don't need code - // generation if not allowlisting recursively, to - // make the #[derive] analysis not be lame. - if !self.options().allowlist_recursively { - match *ty.kind() { - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::Complex(..) | - TypeKind::Array(..) | - TypeKind::Vector(..) | - TypeKind::Pointer(..) | - TypeKind::Reference(..) | - TypeKind::Function(..) | - TypeKind::ResolvedTypeRef(..) | - TypeKind::Opaque | - TypeKind::TypeParam => return true, - _ => {} - } - if self.is_stdint_type(&name) { - return true; - } - } - - // Unnamed top-level enums are special and we - // allowlist them via the `allowlisted_vars` filter, - // since they're effectively top-level constants, - // and there's no way for them to be referenced - // consistently. - let parent = self.resolve_item(item.parent_id()); - if !parent.is_module() { - return false; - } - - let TypeKind::Enum(ref enum_) = *ty.kind() else { - return false; - }; - - if ty.name().is_some() { - return false; - } - - let mut prefix_path = - parent.path_for_allowlisting(self).clone(); - enum_.variants().iter().any(|variant| { - prefix_path.push( - variant.name_for_allowlisting().into(), - ); - let name = prefix_path[1..].join("::"); - prefix_path.pop().unwrap(); - self.options().allowlisted_vars.matches(&name) - || self - .options() - .allowlisted_items - .matches(name) - }) - } - } - }) - .map(|(id, _)| id) - .collect::>(); - - // The reversal preserves the expected ordering of traversal, - // resulting in more stable-ish bindgen-generated names for - // anonymous types (like unions). - roots.reverse(); - roots - }; - - let allowlisted_items_predicate = - if self.options().allowlist_recursively { - traversal::all_edges - } else { - // Only follow InnerType edges from the allowlisted roots. - // Such inner types (e.g. anonymous structs/unions) are - // always emitted by codegen, and they need to be allowlisted - // to make sure they are processed by e.g. the derive analysis. - traversal::only_inner_type_edges - }; - - let allowlisted = AllowlistedItemsTraversal::new( - self, - roots.clone(), - allowlisted_items_predicate, - ) - .collect::(); - - let codegen_items = if self.options().allowlist_recursively { - AllowlistedItemsTraversal::new( - self, - roots, - traversal::codegen_edges, - ) - .collect::() - } else { - allowlisted.clone() - }; - - self.allowlisted = Some(allowlisted); - self.codegen_items = Some(codegen_items); - - for item in self.options().allowlisted_functions.unmatched_items() { - unused_regex_diagnostic(item, "--allowlist-function", self); - } - - for item in self.options().allowlisted_vars.unmatched_items() { - unused_regex_diagnostic(item, "--allowlist-var", self); - } - - for item in self.options().allowlisted_types.unmatched_items() { - unused_regex_diagnostic(item, "--allowlist-type", self); - } - - for item in self.options().allowlisted_items.unmatched_items() { - unused_regex_diagnostic(item, "--allowlist-items", self); - } - } - - /// Convenient method for getting the prefix to use for most traits in - /// codegen depending on the `use_core` option. - pub(crate) fn trait_prefix(&self) -> Ident { - if self.options().use_core { - self.rust_ident_raw("core") - } else { - self.rust_ident_raw("std") - } - } - - /// Call if an opaque array is generated - pub(crate) fn generated_opaque_array(&self) { - self.generated_opaque_array.set(true); - } - - /// Whether we need to generate the opaque array type - pub(crate) fn need_opaque_array_type(&self) -> bool { - self.generated_opaque_array.get() - } - - /// Call if a bindgen complex is generated - pub(crate) fn generated_bindgen_complex(&self) { - self.generated_bindgen_complex.set(true); - } - - /// Whether we need to generate the bindgen complex type - pub(crate) fn need_bindgen_complex_type(&self) -> bool { - self.generated_bindgen_complex.get() - } - - /// Call if a bindgen float16 is generated - pub(crate) fn generated_bindgen_float16(&self) { - self.generated_bindgen_float16.set(true); - } - - /// Whether we need to generate the bindgen float16 type - pub(crate) fn need_bindgen_float16_type(&self) -> bool { - self.generated_bindgen_float16.get() - } - - /// Compute which `enum`s have an associated `typedef` definition. - fn compute_enum_typedef_combos(&mut self) { - let _t = self.timer("compute_enum_typedef_combos"); - assert!(self.enum_typedef_combos.is_none()); - - let mut enum_typedef_combos = HashSet::default(); - for item in &self.items { - if let Some(ItemKind::Module(module)) = - item.as_ref().map(Item::kind) - { - // Find typedefs in this module, and build set of their names. - let mut names_of_typedefs = HashSet::default(); - for child_id in module.children() { - if let Some(ItemKind::Type(ty)) = - self.items[child_id.0].as_ref().map(Item::kind) - { - if let (Some(name), TypeKind::Alias(type_id)) = - (ty.name(), ty.kind()) - { - // We disregard aliases that refer to the enum - // itself, such as in `typedef enum { ... } Enum;`. - if type_id - .into_resolver() - .through_type_refs() - .through_type_aliases() - .resolve(self) - .expect_type() - .is_int() - { - names_of_typedefs.insert(name); - } - } - } - } - - // Find enums in this module, and record the ID of each one that - // has a typedef. - for child_id in module.children() { - if let Some(ItemKind::Type(ty)) = - self.items[child_id.0].as_ref().map(Item::kind) - { - if let (Some(name), true) = (ty.name(), ty.is_enum()) { - if names_of_typedefs.contains(name) { - enum_typedef_combos.insert(*child_id); - } - } - } - } - } - } - - self.enum_typedef_combos = Some(enum_typedef_combos); - } - - /// Look up whether `id` refers to an `enum` whose underlying type is - /// defined by a `typedef`. - pub(crate) fn is_enum_typedef_combo(&self, id: ItemId) -> bool { - assert!( - self.in_codegen_phase(), - "We only compute enum_typedef_combos when we enter codegen", - ); - self.enum_typedef_combos.as_ref().unwrap().contains(&id) - } - - /// Compute whether we can derive debug. - fn compute_cannot_derive_debug(&mut self) { - let _t = self.timer("compute_cannot_derive_debug"); - assert!(self.cannot_derive_debug.is_none()); - if self.options.derive_debug { - self.cannot_derive_debug = - Some(as_cannot_derive_set(analyze::(( - self, - DeriveTrait::Debug, - )))); - } - } - - /// Look up whether the item with `id` can - /// derive debug or not. - pub(crate) fn lookup_can_derive_debug>( - &self, - id: Id, - ) -> bool { - let id = id.into(); - assert!( - self.in_codegen_phase(), - "We only compute can_derive_debug when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` can - // derive debug or not. - !self.cannot_derive_debug.as_ref().unwrap().contains(&id) - } - - /// Compute whether we can derive default. - fn compute_cannot_derive_default(&mut self) { - let _t = self.timer("compute_cannot_derive_default"); - assert!(self.cannot_derive_default.is_none()); - if self.options.derive_default { - self.cannot_derive_default = - Some(as_cannot_derive_set(analyze::(( - self, - DeriveTrait::Default, - )))); - } - } - - /// Look up whether the item with `id` can - /// derive default or not. - pub(crate) fn lookup_can_derive_default>( - &self, - id: Id, - ) -> bool { - let id = id.into(); - assert!( - self.in_codegen_phase(), - "We only compute can_derive_default when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` can - // derive default or not. - !self.cannot_derive_default.as_ref().unwrap().contains(&id) - } - - /// Compute whether we can derive copy. - fn compute_cannot_derive_copy(&mut self) { - let _t = self.timer("compute_cannot_derive_copy"); - assert!(self.cannot_derive_copy.is_none()); - self.cannot_derive_copy = - Some(as_cannot_derive_set(analyze::(( - self, - DeriveTrait::Copy, - )))); - } - - /// Compute whether we can derive hash. - fn compute_cannot_derive_hash(&mut self) { - let _t = self.timer("compute_cannot_derive_hash"); - assert!(self.cannot_derive_hash.is_none()); - if self.options.derive_hash { - self.cannot_derive_hash = - Some(as_cannot_derive_set(analyze::(( - self, - DeriveTrait::Hash, - )))); - } - } - - /// Look up whether the item with `id` can - /// derive hash or not. - pub(crate) fn lookup_can_derive_hash>( - &self, - id: Id, - ) -> bool { - let id = id.into(); - assert!( - self.in_codegen_phase(), - "We only compute can_derive_debug when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` can - // derive hash or not. - !self.cannot_derive_hash.as_ref().unwrap().contains(&id) - } - - /// Compute whether we can derive `PartialOrd`, `PartialEq` or `Eq`. - fn compute_cannot_derive_partialord_partialeq_or_eq(&mut self) { - let _t = self.timer("compute_cannot_derive_partialord_partialeq_or_eq"); - assert!(self.cannot_derive_partialeq_or_partialord.is_none()); - if self.options.derive_partialord || - self.options.derive_partialeq || - self.options.derive_eq - { - self.cannot_derive_partialeq_or_partialord = - Some(analyze::(( - self, - DeriveTrait::PartialEqOrPartialOrd, - ))); - } - } - - /// Look up whether the item with `id` can derive `Partial{Eq,Ord}`. - pub(crate) fn lookup_can_derive_partialeq_or_partialord< - Id: Into, - >( - &self, - id: Id, - ) -> CanDerive { - let id = id.into(); - assert!( - self.in_codegen_phase(), - "We only compute can_derive_partialeq_or_partialord when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` can - // derive partialeq or not. - self.cannot_derive_partialeq_or_partialord - .as_ref() - .unwrap() - .get(&id) - .copied() - .unwrap_or(CanDerive::Yes) - } - - /// Look up whether the item with `id` can derive `Copy` or not. - pub(crate) fn lookup_can_derive_copy>( - &self, - id: Id, - ) -> bool { - assert!( - self.in_codegen_phase(), - "We only compute can_derive_debug when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` can - // derive `Copy` or not. - let id = id.into(); - - !self.lookup_has_type_param_in_array(id) && - !self.cannot_derive_copy.as_ref().unwrap().contains(&id) - } - - /// Compute whether the type has type parameter in array. - fn compute_has_type_param_in_array(&mut self) { - let _t = self.timer("compute_has_type_param_in_array"); - assert!(self.has_type_param_in_array.is_none()); - self.has_type_param_in_array = - Some(analyze::(self)); - } - - /// Look up whether the item with `id` has type parameter in array or not. - pub(crate) fn lookup_has_type_param_in_array>( - &self, - id: Id, - ) -> bool { - assert!( - self.in_codegen_phase(), - "We only compute has array when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` has - // type parameter in array or not. - self.has_type_param_in_array - .as_ref() - .unwrap() - .contains(&id.into()) - } - - /// Compute whether the type has float. - fn compute_has_float(&mut self) { - let _t = self.timer("compute_has_float"); - assert!(self.has_float.is_none()); - if self.options.derive_eq || self.options.derive_ord { - self.has_float = Some(analyze::(self)); - } - } - - /// Look up whether the item with `id` has array or not. - pub(crate) fn lookup_has_float>(&self, id: Id) -> bool { - assert!( - self.in_codegen_phase(), - "We only compute has float when we enter codegen" - ); - - // Look up the computed value for whether the item with `id` has - // float or not. - self.has_float.as_ref().unwrap().contains(&id.into()) - } - - /// Check if `--no-partialeq` flag is enabled for this item. - pub(crate) fn no_partialeq_by_name(&self, item: &Item) -> bool { - let name = item.path_for_allowlisting(self)[1..].join("::"); - self.options().no_partialeq_types.matches(name) - } - - /// Check if `--no-copy` flag is enabled for this item. - pub(crate) fn no_copy_by_name(&self, item: &Item) -> bool { - let name = item.path_for_allowlisting(self)[1..].join("::"); - self.options().no_copy_types.matches(name) - } - - /// Check if `--no-debug` flag is enabled for this item. - pub(crate) fn no_debug_by_name(&self, item: &Item) -> bool { - let name = item.path_for_allowlisting(self)[1..].join("::"); - self.options().no_debug_types.matches(name) - } - - /// Check if `--no-default` flag is enabled for this item. - pub(crate) fn no_default_by_name(&self, item: &Item) -> bool { - let name = item.path_for_allowlisting(self)[1..].join("::"); - self.options().no_default_types.matches(name) - } - - /// Check if `--no-hash` flag is enabled for this item. - pub(crate) fn no_hash_by_name(&self, item: &Item) -> bool { - let name = item.path_for_allowlisting(self)[1..].join("::"); - self.options().no_hash_types.matches(name) - } - - /// Check if `--must-use-type` flag is enabled for this item. - pub(crate) fn must_use_type_by_name(&self, item: &Item) -> bool { - let name = item.path_for_allowlisting(self)[1..].join("::"); - self.options().must_use_types.matches(name) - } - - /// Wrap some tokens in an `unsafe` block if the `--wrap-unsafe-ops` option is enabled. - pub(crate) fn wrap_unsafe_ops(&self, tokens: impl ToTokens) -> TokenStream { - if self.options.wrap_unsafe_ops { - quote!(unsafe { #tokens }) - } else { - tokens.into_token_stream() - } - } - - /// Get the suffix to be added to `static` functions if the `--wrap-static-fns` option is - /// enabled. - pub(crate) fn wrap_static_fns_suffix(&self) -> &str { - self.options() - .wrap_static_fns_suffix - .as_deref() - .unwrap_or(crate::DEFAULT_NON_EXTERN_FNS_SUFFIX) - } -} - -/// A builder struct for configuring item resolution options. -#[derive(Debug, Copy, Clone)] -pub(crate) struct ItemResolver { - id: ItemId, - through_type_refs: bool, - through_type_aliases: bool, -} - -impl ItemId { - /// Create an `ItemResolver` from this item ID. - pub(crate) fn into_resolver(self) -> ItemResolver { - self.into() - } -} - -impl From for ItemResolver -where - T: Into, -{ - fn from(id: T) -> ItemResolver { - ItemResolver::new(id) - } -} - -impl ItemResolver { - /// Construct a new `ItemResolver` from the given ID. - pub(crate) fn new>(id: Id) -> ItemResolver { - let id = id.into(); - ItemResolver { - id, - through_type_refs: false, - through_type_aliases: false, - } - } - - /// Keep resolving through `Type::TypeRef` items. - pub(crate) fn through_type_refs(mut self) -> ItemResolver { - self.through_type_refs = true; - self - } - - /// Keep resolving through `Type::Alias` items. - pub(crate) fn through_type_aliases(mut self) -> ItemResolver { - self.through_type_aliases = true; - self - } - - /// Finish configuring and perform the actual item resolution. - pub(crate) fn resolve(self, ctx: &BindgenContext) -> &Item { - assert!(ctx.collected_typerefs()); - - let mut id = self.id; - let mut seen_ids = HashSet::default(); - loop { - let item = ctx.resolve_item(id); - - // Detect cycles and bail out. These can happen in certain cases - // involving incomplete qualified dependent types (#2085). - if !seen_ids.insert(id) { - return item; - } - - let ty_kind = item.as_type().map(|t| t.kind()); - match ty_kind { - Some(&TypeKind::ResolvedTypeRef(next_id)) - if self.through_type_refs => - { - id = next_id.into(); - } - // We intentionally ignore template aliases here, as they are - // more complicated, and don't represent a simple renaming of - // some type. - Some(&TypeKind::Alias(next_id)) - if self.through_type_aliases => - { - id = next_id.into(); - } - _ => return item, - } - } - } -} - -/// A type that we are in the middle of parsing. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub(crate) struct PartialType { - decl: Cursor, - // Just an ItemId, and not a TypeId, because we haven't finished this type - // yet, so there's still time for things to go wrong. - id: ItemId, -} - -impl PartialType { - /// Construct a new `PartialType`. - pub(crate) fn new(decl: Cursor, id: ItemId) -> PartialType { - // assert!(decl == decl.canonical()); - PartialType { decl, id } - } - - /// The cursor pointing to this partial type's declaration location. - pub(crate) fn decl(&self) -> &Cursor { - &self.decl - } - - /// The item ID allocated for this type. This is *NOT* a key for an entry in - /// the context's item set yet! - pub(crate) fn id(&self) -> ItemId { - self.id - } -} - -impl TemplateParameters for PartialType { - fn self_template_params(&self, _ctx: &BindgenContext) -> Vec { - // Maybe at some point we will eagerly parse named types, but for now we - // don't and this information is unavailable. - vec![] - } - - fn num_self_template_params(&self, _ctx: &BindgenContext) -> usize { - // Wouldn't it be nice if libclang would reliably give us this - // information‽ - match self.decl().kind() { - clang_sys::CXCursor_ClassTemplate | - clang_sys::CXCursor_FunctionTemplate | - clang_sys::CXCursor_TypeAliasTemplateDecl => { - let mut num_params = 0; - self.decl().visit(|c| { - match c.kind() { - clang_sys::CXCursor_TemplateTypeParameter | - clang_sys::CXCursor_TemplateTemplateParameter | - clang_sys::CXCursor_NonTypeTemplateParameter => { - num_params += 1; - } - _ => {} - } - clang_sys::CXChildVisit_Continue - }); - num_params - } - _ => 0, - } - } -} - -fn unused_regex_diagnostic(item: &str, name: &str, _ctx: &BindgenContext) { - warn!("unused option: {name} {item}"); - - #[cfg(feature = "experimental")] - if _ctx.options().emit_diagnostics { - use crate::diagnostics::{Diagnostic, Level}; - - Diagnostic::default() - .with_title( - format!("Unused regular expression: `{item}`."), - Level::Warning, - ) - .add_annotation( - format!("This regular expression was passed to `{name}`."), - Level::Note, - ) - .display(); - } -} diff --git a/vendor/bindgen/ir/derive.rs b/vendor/bindgen/ir/derive.rs deleted file mode 100644 index 3ee6476af9a76d..00000000000000 --- a/vendor/bindgen/ir/derive.rs +++ /dev/null @@ -1,130 +0,0 @@ -//! Traits for determining whether we can derive traits for a thing or not. -//! -//! These traits tend to come in pairs: -//! -//! 1. A "trivial" version, whose implementations aren't allowed to recursively -//! look at other types or the results of fix point analyses. -//! -//! 2. A "normal" version, whose implementations simply query the results of a -//! fix point analysis. -//! -//! The former is used by the analyses when creating the results queried by the -//! second. - -use super::context::BindgenContext; - -use std::cmp; -use std::ops; - -/// A trait that encapsulates the logic for whether or not we can derive `Debug` -/// for a given thing. -pub(crate) trait CanDeriveDebug { - /// Return `true` if `Debug` can be derived for this thing, `false` - /// otherwise. - fn can_derive_debug(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait that encapsulates the logic for whether or not we can derive `Copy` -/// for a given thing. -pub(crate) trait CanDeriveCopy { - /// Return `true` if `Copy` can be derived for this thing, `false` - /// otherwise. - fn can_derive_copy(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait that encapsulates the logic for whether or not we can derive -/// `Default` for a given thing. -pub(crate) trait CanDeriveDefault { - /// Return `true` if `Default` can be derived for this thing, `false` - /// otherwise. - fn can_derive_default(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait that encapsulates the logic for whether or not we can derive `Hash` -/// for a given thing. -pub(crate) trait CanDeriveHash { - /// Return `true` if `Hash` can be derived for this thing, `false` - /// otherwise. - fn can_derive_hash(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait that encapsulates the logic for whether or not we can derive -/// `PartialEq` for a given thing. -pub(crate) trait CanDerivePartialEq { - /// Return `true` if `PartialEq` can be derived for this thing, `false` - /// otherwise. - fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait that encapsulates the logic for whether or not we can derive -/// `PartialOrd` for a given thing. -pub(crate) trait CanDerivePartialOrd { - /// Return `true` if `PartialOrd` can be derived for this thing, `false` - /// otherwise. - fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait that encapsulates the logic for whether or not we can derive `Eq` -/// for a given thing. -pub(crate) trait CanDeriveEq { - /// Return `true` if `Eq` can be derived for this thing, `false` otherwise. - fn can_derive_eq(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait that encapsulates the logic for whether or not we can derive `Ord` -/// for a given thing. -pub(crate) trait CanDeriveOrd { - /// Return `true` if `Ord` can be derived for this thing, `false` otherwise. - fn can_derive_ord(&self, ctx: &BindgenContext) -> bool; -} - -/// Whether it is possible or not to automatically derive trait for an item. -/// -/// ```ignore -/// No -/// ^ -/// | -/// Manually -/// ^ -/// | -/// Yes -/// ``` -/// -/// Initially we assume that we can derive trait for all types and then -/// update our understanding as we learn more about each type. -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Default)] -pub enum CanDerive { - /// Yes, we can derive automatically. - #[default] - Yes, - - /// The only thing that stops us from automatically deriving is that - /// array with more than maximum number of elements is used. - /// - /// This means we probably can "manually" implement such trait. - Manually, - - /// No, we cannot. - No, -} - -impl CanDerive { - /// Take the least upper bound of `self` and `rhs`. - pub(crate) fn join(self, rhs: Self) -> Self { - cmp::max(self, rhs) - } -} - -impl ops::BitOr for CanDerive { - type Output = Self; - - fn bitor(self, rhs: Self) -> Self::Output { - self.join(rhs) - } -} - -impl ops::BitOrAssign for CanDerive { - fn bitor_assign(&mut self, rhs: Self) { - *self = self.join(rhs); - } -} diff --git a/vendor/bindgen/ir/dot.rs b/vendor/bindgen/ir/dot.rs deleted file mode 100644 index 9bfc559f41fa88..00000000000000 --- a/vendor/bindgen/ir/dot.rs +++ /dev/null @@ -1,85 +0,0 @@ -//! Generating Graphviz `dot` files from our IR. - -use super::context::{BindgenContext, ItemId}; -use super::traversal::Trace; -use std::fs::File; -use std::io::{self, Write}; -use std::path::Path; - -/// A trait for anything that can write attributes as `` rows to a dot -/// file. -pub(crate) trait DotAttributes { - /// Write this thing's attributes to the given output. Each attribute must - /// be its own `...`. - fn dot_attributes( - &self, - ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: Write; -} - -/// Write a graphviz dot file containing our IR. -pub(crate) fn write_dot_file

(ctx: &BindgenContext, path: P) -> io::Result<()> -where - P: AsRef, -{ - let file = File::create(path)?; - let mut dot_file = io::BufWriter::new(file); - writeln!(&mut dot_file, "digraph {{")?; - - let mut err: Option> = None; - - for (id, item) in ctx.items() { - let is_allowlisted = ctx.allowlisted_items().contains(&id); - - writeln!( - &mut dot_file, - r#"{} [fontname="courier", color={}, label=<

"#, - id.as_usize(), - if is_allowlisted { "black" } else { "gray" } - )?; - item.dot_attributes(ctx, &mut dot_file)?; - writeln!(&mut dot_file, "
>];")?; - - item.trace( - ctx, - &mut |sub_id: ItemId, edge_kind| { - if err.is_some() { - return; - } - - match writeln!( - &mut dot_file, - "{} -> {} [label={edge_kind:?}, color={}];", - id.as_usize(), - sub_id.as_usize(), - if is_allowlisted { "black" } else { "gray" } - ) { - Ok(_) => {} - Err(e) => err = Some(Err(e)), - } - }, - &(), - ); - - if let Some(err) = err { - return err; - } - - if let Some(module) = item.as_module() { - for child in module.children() { - writeln!( - &mut dot_file, - "{} -> {} [style=dotted, color=gray]", - item.id().as_usize(), - child.as_usize() - )?; - } - } - } - - writeln!(&mut dot_file, "}}")?; - Ok(()) -} diff --git a/vendor/bindgen/ir/enum_ty.rs b/vendor/bindgen/ir/enum_ty.rs deleted file mode 100644 index 9b08da3bce108e..00000000000000 --- a/vendor/bindgen/ir/enum_ty.rs +++ /dev/null @@ -1,321 +0,0 @@ -//! Intermediate representation for C/C++ enumerations. - -use super::super::codegen::EnumVariation; -use super::context::{BindgenContext, TypeId}; -use super::item::Item; -use super::ty::{Type, TypeKind}; -use crate::clang; -use crate::ir::annotations::Annotations; -use crate::parse::ParseError; -use crate::regex_set::RegexSet; - -/// An enum representing custom handling that can be given to a variant. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum EnumVariantCustomBehavior { - /// This variant will be a module containing constants. - ModuleConstify, - /// This variant will be constified, that is, forced to generate a constant. - Constify, - /// This variant will be hidden entirely from the resulting enum. - Hide, -} - -/// A C/C++ enumeration. -#[derive(Debug)] -pub(crate) struct Enum { - /// The representation used for this enum; it should be an `IntKind` type or - /// an alias to one. - /// - /// It's `None` if the enum is a forward declaration and isn't defined - /// anywhere else, see `tests/headers/func_ptr_in_struct.h`. - repr: Option, - - /// The different variants, with explicit values. - variants: Vec, -} - -impl Enum { - /// Construct a new `Enum` with the given representation and variants. - pub(crate) fn new( - repr: Option, - variants: Vec, - ) -> Self { - Enum { repr, variants } - } - - /// Get this enumeration's representation. - pub(crate) fn repr(&self) -> Option { - self.repr - } - - /// Get this enumeration's variants. - pub(crate) fn variants(&self) -> &[EnumVariant] { - &self.variants - } - - /// Construct an enumeration from the given Clang type. - pub(crate) fn from_ty( - ty: &clang::Type, - ctx: &mut BindgenContext, - ) -> Result { - use clang_sys::*; - debug!("Enum::from_ty {ty:?}"); - - if ty.kind() != CXType_Enum { - return Err(ParseError::Continue); - } - - let declaration = ty.declaration().canonical(); - let repr = declaration - .enum_type() - .and_then(|et| Item::from_ty(&et, declaration, None, ctx).ok()); - let mut variants = vec![]; - - let variant_ty = - repr.and_then(|r| ctx.resolve_type(r).safe_canonical_type(ctx)); - let is_bool = variant_ty.is_some_and(Type::is_bool); - - // Assume signedness since the default type by the C standard is an int. - let is_signed = variant_ty.map_or(true, |ty| match *ty.kind() { - TypeKind::Int(ref int_kind) => int_kind.is_signed(), - ref other => { - panic!("Since when enums can be non-integers? {other:?}") - } - }); - - let type_name = ty.spelling(); - let type_name = if type_name.is_empty() { - None - } else { - Some(type_name) - }; - let type_name = type_name.as_deref(); - - let definition = declaration.definition().unwrap_or(declaration); - definition.visit(|cursor| { - if cursor.kind() == CXCursor_EnumConstantDecl { - let value = if is_bool { - cursor.enum_val_boolean().map(EnumVariantValue::Boolean) - } else if is_signed { - cursor.enum_val_signed().map(EnumVariantValue::Signed) - } else { - cursor.enum_val_unsigned().map(EnumVariantValue::Unsigned) - }; - if let Some(val) = value { - let name = cursor.spelling(); - let annotations = Annotations::new(&cursor); - let custom_behavior = ctx - .options() - .last_callback(|callbacks| { - callbacks - .enum_variant_behavior(type_name, &name, val) - }) - .or_else(|| { - let annotations = annotations.as_ref()?; - if annotations.hide() { - Some(EnumVariantCustomBehavior::Hide) - } else if annotations.constify_enum_variant() { - Some(EnumVariantCustomBehavior::Constify) - } else { - None - } - }); - - let new_name = ctx - .options() - .last_callback(|callbacks| { - callbacks.enum_variant_name(type_name, &name, val) - }) - .or_else(|| { - annotations - .as_ref()? - .use_instead_of()? - .last() - .cloned() - }) - .unwrap_or_else(|| name.clone()); - - let comment = cursor.raw_comment(); - variants.push(EnumVariant::new( - new_name, - name, - comment, - val, - custom_behavior, - )); - } - } - CXChildVisit_Continue - }); - Ok(Enum::new(repr, variants)) - } - - fn is_matching_enum( - &self, - ctx: &BindgenContext, - enums: &RegexSet, - item: &Item, - ) -> bool { - let path = item.path_for_allowlisting(ctx); - let enum_ty = item.expect_type(); - - if enums.matches(path[1..].join("::")) { - return true; - } - - // Test the variants if the enum is anonymous. - if enum_ty.name().is_some() { - return false; - } - - self.variants().iter().any(|v| enums.matches(v.name())) - } - - /// Returns the final representation of the enum. - pub(crate) fn computed_enum_variation( - &self, - ctx: &BindgenContext, - item: &Item, - ) -> EnumVariation { - // ModuleConsts has higher precedence before Rust in order to avoid - // problems with overlapping match patterns. - if self.is_matching_enum( - ctx, - &ctx.options().constified_enum_modules, - item, - ) { - EnumVariation::ModuleConsts - } else if self.is_matching_enum( - ctx, - &ctx.options().bitfield_enums, - item, - ) { - EnumVariation::NewType { - is_bitfield: true, - is_global: false, - } - } else if self.is_matching_enum(ctx, &ctx.options().newtype_enums, item) - { - EnumVariation::NewType { - is_bitfield: false, - is_global: false, - } - } else if self.is_matching_enum( - ctx, - &ctx.options().newtype_global_enums, - item, - ) { - EnumVariation::NewType { - is_bitfield: false, - is_global: true, - } - } else if self.is_matching_enum( - ctx, - &ctx.options().rustified_enums, - item, - ) { - EnumVariation::Rust { - non_exhaustive: false, - } - } else if self.is_matching_enum( - ctx, - &ctx.options().rustified_non_exhaustive_enums, - item, - ) { - EnumVariation::Rust { - non_exhaustive: true, - } - } else if self.is_matching_enum( - ctx, - &ctx.options().constified_enums, - item, - ) { - EnumVariation::Consts - } else { - ctx.options().default_enum_style - } - } -} - -/// A single enum variant, to be contained only in an enum. -#[derive(Debug)] -pub(crate) struct EnumVariant { - /// The name of the variant. - name: String, - - /// The original name of the variant (without user mangling) - name_for_allowlisting: String, - - /// An optional doc comment. - comment: Option, - - /// The integer value of the variant. - val: EnumVariantValue, - - /// The custom behavior this variant may have, if any. - custom_behavior: Option, -} - -/// A constant value assigned to an enumeration variant. -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum EnumVariantValue { - /// A boolean constant. - Boolean(bool), - - /// A signed constant. - Signed(i64), - - /// An unsigned constant. - Unsigned(u64), -} - -impl EnumVariant { - /// Construct a new enumeration variant from the given parts. - pub(crate) fn new( - name: String, - name_for_allowlisting: String, - comment: Option, - val: EnumVariantValue, - custom_behavior: Option, - ) -> Self { - EnumVariant { - name, - name_for_allowlisting, - comment, - val, - custom_behavior, - } - } - - /// Get this variant's name. - pub(crate) fn name(&self) -> &str { - &self.name - } - - /// Get this variant's name. - pub(crate) fn name_for_allowlisting(&self) -> &str { - &self.name_for_allowlisting - } - - /// Get this variant's value. - pub(crate) fn val(&self) -> EnumVariantValue { - self.val - } - - /// Get this variant's documentation. - pub(crate) fn comment(&self) -> Option<&str> { - self.comment.as_deref() - } - - /// Returns whether this variant should be enforced to be a constant by code - /// generation. - pub(crate) fn force_constification(&self) -> bool { - self.custom_behavior == Some(EnumVariantCustomBehavior::Constify) - } - - /// Returns whether the current variant should be hidden completely from the - /// resulting rust enum. - pub(crate) fn hidden(&self) -> bool { - self.custom_behavior == Some(EnumVariantCustomBehavior::Hide) - } -} diff --git a/vendor/bindgen/ir/function.rs b/vendor/bindgen/ir/function.rs deleted file mode 100644 index 65a12d4bb2dbfb..00000000000000 --- a/vendor/bindgen/ir/function.rs +++ /dev/null @@ -1,838 +0,0 @@ -//! Intermediate representation for C/C++ functions and methods. - -use super::comp::MethodKind; -use super::context::{BindgenContext, TypeId}; -use super::dot::DotAttributes; -use super::item::Item; -use super::traversal::{EdgeKind, Trace, Tracer}; -use super::ty::TypeKind; -use crate::callbacks::{ItemInfo, ItemKind}; -use crate::clang::{self, ABIKind, Attribute}; -use crate::parse::{ClangSubItemParser, ParseError, ParseResult}; -use clang_sys::CXCallingConv; - -use quote::TokenStreamExt; -use std::io; -use std::str::FromStr; - -const RUST_DERIVE_FUNPTR_LIMIT: usize = 12; - -/// What kind of function are we looking at? -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub(crate) enum FunctionKind { - /// A plain, free function. - Function, - /// A method of some kind. - Method(MethodKind), -} - -impl FunctionKind { - /// Given a clang cursor, return the kind of function it represents, or - /// `None` otherwise. - pub(crate) fn from_cursor(cursor: &clang::Cursor) -> Option { - // FIXME(emilio): Deduplicate logic with `ir::comp`. - Some(match cursor.kind() { - clang_sys::CXCursor_FunctionDecl => FunctionKind::Function, - clang_sys::CXCursor_Constructor => { - FunctionKind::Method(MethodKind::Constructor) - } - clang_sys::CXCursor_Destructor => { - FunctionKind::Method(if cursor.method_is_virtual() { - MethodKind::VirtualDestructor { - pure_virtual: cursor.method_is_pure_virtual(), - } - } else { - MethodKind::Destructor - }) - } - clang_sys::CXCursor_CXXMethod => { - if cursor.method_is_virtual() { - FunctionKind::Method(MethodKind::Virtual { - pure_virtual: cursor.method_is_pure_virtual(), - }) - } else if cursor.method_is_static() { - FunctionKind::Method(MethodKind::Static) - } else { - FunctionKind::Method(MethodKind::Normal) - } - } - _ => return None, - }) - } -} - -/// The style of linkage -#[derive(Debug, Clone, Copy)] -pub(crate) enum Linkage { - /// Externally visible and can be linked against - External, - /// Not exposed externally. 'static inline' functions will have this kind of linkage - Internal, -} - -/// A function declaration, with a signature, arguments, and argument names. -/// -/// The argument names vector must be the same length as the ones in the -/// signature. -#[derive(Debug)] -pub(crate) struct Function { - /// The name of this function. - name: String, - - /// The mangled name, that is, the symbol. - mangled_name: Option, - - /// The link name. If specified, overwrite `mangled_name`. - link_name: Option, - - /// The ID pointing to the current function signature. - signature: TypeId, - - /// The kind of function this is. - kind: FunctionKind, - - /// The linkage of the function. - linkage: Linkage, -} - -impl Function { - /// Construct a new function. - pub(crate) fn new( - name: String, - mangled_name: Option, - link_name: Option, - signature: TypeId, - kind: FunctionKind, - linkage: Linkage, - ) -> Self { - Function { - name, - mangled_name, - link_name, - signature, - kind, - linkage, - } - } - - /// Get this function's name. - pub(crate) fn name(&self) -> &str { - &self.name - } - - /// Get this function's name. - pub(crate) fn mangled_name(&self) -> Option<&str> { - self.mangled_name.as_deref() - } - - /// Get this function's link name. - pub fn link_name(&self) -> Option<&str> { - self.link_name.as_deref() - } - - /// Get this function's signature type. - pub(crate) fn signature(&self) -> TypeId { - self.signature - } - - /// Get this function's kind. - pub(crate) fn kind(&self) -> FunctionKind { - self.kind - } - - /// Get this function's linkage. - pub(crate) fn linkage(&self) -> Linkage { - self.linkage - } -} - -impl DotAttributes for Function { - fn dot_attributes( - &self, - _ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - if let Some(ref mangled) = self.mangled_name { - let mangled: String = - mangled.chars().flat_map(|c| c.escape_default()).collect(); - writeln!(out, "mangled name{mangled}")?; - } - - Ok(()) - } -} - -/// A valid rust ABI. -#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] -pub enum Abi { - /// The default C ABI. - C, - /// The "stdcall" ABI. - Stdcall, - /// The "efiapi" ABI. - EfiApi, - /// The "fastcall" ABI. - Fastcall, - /// The "thiscall" ABI. - ThisCall, - /// The "vectorcall" ABI. - Vectorcall, - /// The "aapcs" ABI. - Aapcs, - /// The "win64" ABI. - Win64, - /// The "C-unwind" ABI. - CUnwind, - /// The "system" ABI. - System, -} - -impl FromStr for Abi { - type Err = String; - - fn from_str(s: &str) -> Result { - match s { - "C" => Ok(Self::C), - "stdcall" => Ok(Self::Stdcall), - "efiapi" => Ok(Self::EfiApi), - "fastcall" => Ok(Self::Fastcall), - "thiscall" => Ok(Self::ThisCall), - "vectorcall" => Ok(Self::Vectorcall), - "aapcs" => Ok(Self::Aapcs), - "win64" => Ok(Self::Win64), - "C-unwind" => Ok(Self::CUnwind), - "system" => Ok(Self::System), - _ => Err(format!("Invalid or unknown ABI {s:?}")), - } - } -} - -impl std::fmt::Display for Abi { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let s = match *self { - Self::C => "C", - Self::Stdcall => "stdcall", - Self::EfiApi => "efiapi", - Self::Fastcall => "fastcall", - Self::ThisCall => "thiscall", - Self::Vectorcall => "vectorcall", - Self::Aapcs => "aapcs", - Self::Win64 => "win64", - Self::CUnwind => "C-unwind", - Abi::System => "system", - }; - - s.fmt(f) - } -} - -impl quote::ToTokens for Abi { - fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { - let abi = self.to_string(); - tokens.append_all(quote! { #abi }); - } -} - -/// An ABI extracted from a clang cursor. -#[derive(Debug, Copy, Clone)] -pub(crate) enum ClangAbi { - /// An ABI known by Rust. - Known(Abi), - /// An unknown or invalid ABI. - Unknown(CXCallingConv), -} - -impl ClangAbi { - /// Returns whether this Abi is known or not. - fn is_unknown(self) -> bool { - matches!(self, ClangAbi::Unknown(..)) - } -} - -impl quote::ToTokens for ClangAbi { - fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { - match *self { - Self::Known(abi) => abi.to_tokens(tokens), - Self::Unknown(cc) => panic!( - "Cannot turn unknown calling convention to tokens: {cc:?}" - ), - } - } -} - -/// A function signature. -#[derive(Debug)] -pub(crate) struct FunctionSig { - /// The name of this function signature. - name: String, - - /// The return type of the function. - return_type: TypeId, - - /// The type of the arguments, optionally with the name of the argument when - /// declared. - argument_types: Vec<(Option, TypeId)>, - - /// Whether this function is variadic. - is_variadic: bool, - is_divergent: bool, - - /// Whether this function's return value must be used. - must_use: bool, - - /// The ABI of this function. - abi: ClangAbi, -} - -fn get_abi(cc: CXCallingConv) -> ClangAbi { - use clang_sys::*; - match cc { - CXCallingConv_Default | CXCallingConv_C => ClangAbi::Known(Abi::C), - CXCallingConv_X86StdCall => ClangAbi::Known(Abi::Stdcall), - CXCallingConv_X86FastCall => ClangAbi::Known(Abi::Fastcall), - CXCallingConv_X86ThisCall => ClangAbi::Known(Abi::ThisCall), - CXCallingConv_X86VectorCall | CXCallingConv_AArch64VectorCall => { - ClangAbi::Known(Abi::Vectorcall) - } - CXCallingConv_AAPCS => ClangAbi::Known(Abi::Aapcs), - CXCallingConv_X86_64Win64 => ClangAbi::Known(Abi::Win64), - other => ClangAbi::Unknown(other), - } -} - -/// Get the mangled name for the cursor's referent. -pub(crate) fn cursor_mangling( - ctx: &BindgenContext, - cursor: &clang::Cursor, -) -> Option { - if !ctx.options().enable_mangling { - return None; - } - - // We early return here because libclang may crash in some case - // if we pass in a variable inside a partial specialized template. - // See rust-lang/rust-bindgen#67, and rust-lang/rust-bindgen#462. - if cursor.is_in_non_fully_specialized_template() { - return None; - } - - let is_itanium_abi = ctx.abi_kind() == ABIKind::GenericItanium; - let is_destructor = cursor.kind() == clang_sys::CXCursor_Destructor; - if let Ok(mut manglings) = cursor.cxx_manglings() { - while let Some(m) = manglings.pop() { - // Only generate the destructor group 1, see below. - if is_itanium_abi && is_destructor && !m.ends_with("D1Ev") { - continue; - } - - return Some(m); - } - } - - let mut mangling = cursor.mangling(); - if mangling.is_empty() { - return None; - } - - if is_itanium_abi && is_destructor { - // With old (3.8-) libclang versions, and the Itanium ABI, clang returns - // the "destructor group 0" symbol, which means that it'll try to free - // memory, which definitely isn't what we want. - // - // Explicitly force the destructor group 1 symbol. - // - // See http://refspecs.linuxbase.org/cxxabi-1.83.html#mangling-special - // for the reference, and http://stackoverflow.com/a/6614369/1091587 for - // a more friendly explanation. - // - // We don't need to do this for constructors since clang seems to always - // have returned the C1 constructor. - // - // FIXME(emilio): Can a legit symbol in other ABIs end with this string? - // I don't think so, but if it can this would become a linker error - // anyway, not an invalid free at runtime. - // - // TODO(emilio, #611): Use cpp_demangle if this becomes nastier with - // time. - if mangling.ends_with("D0Ev") { - let new_len = mangling.len() - 4; - mangling.truncate(new_len); - mangling.push_str("D1Ev"); - } - } - - Some(mangling) -} - -fn args_from_ty_and_cursor( - ty: &clang::Type, - cursor: &clang::Cursor, - ctx: &mut BindgenContext, -) -> Vec<(Option, TypeId)> { - let cursor_args = cursor.args().unwrap_or_default().into_iter(); - let type_args = ty.args().unwrap_or_default().into_iter(); - - // Argument types can be found in either the cursor or the type, but argument names may only be - // found on the cursor. We often have access to both a type and a cursor for each argument, but - // in some cases we may only have one. - // - // Prefer using the type as the source of truth for the argument's type, but fall back to - // inspecting the cursor (this happens for Objective C interfaces). - // - // Prefer using the cursor for the argument's type, but fall back to using the parent's cursor - // (this happens for function pointer return types). - cursor_args - .map(Some) - .chain(std::iter::repeat(None)) - .zip(type_args.map(Some).chain(std::iter::repeat(None))) - .take_while(|(cur, ty)| cur.is_some() || ty.is_some()) - .map(|(arg_cur, arg_ty)| { - let name = arg_cur.map(|a| a.spelling()).and_then(|name| { - if name.is_empty() { - None - } else { - Some(name) - } - }); - - let cursor = arg_cur.unwrap_or(*cursor); - let ty = arg_ty.unwrap_or_else(|| cursor.cur_type()); - (name, Item::from_ty_or_ref(ty, cursor, None, ctx)) - }) - .collect() -} - -impl FunctionSig { - /// Get the function name. - pub(crate) fn name(&self) -> &str { - &self.name - } - - /// Construct a new function signature from the given Clang type. - pub(crate) fn from_ty( - ty: &clang::Type, - cursor: &clang::Cursor, - ctx: &mut BindgenContext, - ) -> Result { - use clang_sys::*; - debug!("FunctionSig::from_ty {ty:?} {cursor:?}"); - - // Skip function templates - let kind = cursor.kind(); - if kind == CXCursor_FunctionTemplate { - return Err(ParseError::Continue); - } - - let spelling = cursor.spelling(); - - // Don't parse operatorxx functions in C++ - let is_operator = |spelling: &str| { - spelling.starts_with("operator") && - !clang::is_valid_identifier(spelling) - }; - if is_operator(&spelling) && !ctx.options().represent_cxx_operators { - return Err(ParseError::Continue); - } - - // Constructors of non-type template parameter classes for some reason - // include the template parameter in their name. Just skip them, since - // we don't handle well non-type template parameters anyway. - if (kind == CXCursor_Constructor || kind == CXCursor_Destructor) && - spelling.contains('<') - { - return Err(ParseError::Continue); - } - - let cursor = if cursor.is_valid() { - *cursor - } else { - ty.declaration() - }; - - let mut args = match kind { - CXCursor_FunctionDecl | - CXCursor_Constructor | - CXCursor_CXXMethod | - CXCursor_ObjCInstanceMethodDecl | - CXCursor_ObjCClassMethodDecl => { - args_from_ty_and_cursor(ty, &cursor, ctx) - } - _ => { - // For non-CXCursor_FunctionDecl, visiting the cursor's children - // is the only reliable way to get parameter names. - let mut args = vec![]; - cursor.visit(|c| { - if c.kind() == CXCursor_ParmDecl { - let ty = - Item::from_ty_or_ref(c.cur_type(), c, None, ctx); - let name = c.spelling(); - let name = - if name.is_empty() { None } else { Some(name) }; - args.push((name, ty)); - } - CXChildVisit_Continue - }); - - if args.is_empty() { - // FIXME(emilio): Sometimes libclang doesn't expose the - // right AST for functions tagged as stdcall and such... - // - // https://bugs.llvm.org/show_bug.cgi?id=45919 - args_from_ty_and_cursor(ty, &cursor, ctx) - } else { - args - } - } - }; - - let (must_use, mut is_divergent) = - if ctx.options().enable_function_attribute_detection { - let [must_use, no_return, no_return_cpp] = cursor.has_attrs(&[ - Attribute::MUST_USE, - Attribute::NO_RETURN, - Attribute::NO_RETURN_CPP, - ]); - (must_use, no_return || no_return_cpp) - } else { - Default::default() - }; - - // Check if the type contains __attribute__((noreturn)) outside of parentheses. This is - // somewhat fragile, but it seems to be the only way to get at this information as of - // libclang 9. - let ty_spelling = ty.spelling(); - let has_attribute_noreturn = ty_spelling - .match_indices("__attribute__((noreturn))") - .any(|(i, _)| { - let depth = ty_spelling[..i] - .bytes() - .filter_map(|ch| match ch { - b'(' => Some(1), - b')' => Some(-1), - _ => None, - }) - .sum::(); - depth == 0 - }); - is_divergent = is_divergent || has_attribute_noreturn; - - let is_method = kind == CXCursor_CXXMethod; - let is_constructor = kind == CXCursor_Constructor; - let is_destructor = kind == CXCursor_Destructor; - if (is_constructor || is_destructor || is_method) && - cursor.lexical_parent() != cursor.semantic_parent() - { - // Only parse constructors once. - return Err(ParseError::Continue); - } - - if is_method || is_constructor || is_destructor { - let is_const = is_method && cursor.method_is_const(); - let is_virtual = is_method && cursor.method_is_virtual(); - let is_static = is_method && cursor.method_is_static(); - if !is_static && - (!is_virtual || - ctx.options().use_specific_virtual_function_receiver) - { - let parent = cursor.semantic_parent(); - let class = Item::parse(parent, None, ctx) - .expect("Expected to parse the class"); - // The `class` most likely is not finished parsing yet, so use - // the unchecked variant. - let class = class.as_type_id_unchecked(); - - let class = if is_const { - let const_class_id = ctx.next_item_id(); - ctx.build_const_wrapper( - const_class_id, - class, - None, - &parent.cur_type(), - ) - } else { - class - }; - - let ptr = - Item::builtin_type(TypeKind::Pointer(class), false, ctx); - args.insert(0, (Some("this".into()), ptr)); - } else if is_virtual { - let void = Item::builtin_type(TypeKind::Void, false, ctx); - let ptr = - Item::builtin_type(TypeKind::Pointer(void), false, ctx); - args.insert(0, (Some("this".into()), ptr)); - } - } - - let ty_ret_type = if kind == CXCursor_ObjCInstanceMethodDecl || - kind == CXCursor_ObjCClassMethodDecl - { - ty.ret_type() - .or_else(|| cursor.ret_type()) - .ok_or(ParseError::Continue)? - } else { - ty.ret_type().ok_or(ParseError::Continue)? - }; - - let ret = if is_constructor && ctx.is_target_wasm32() { - // Constructors in Clang wasm32 target return a pointer to the object - // being constructed. - let void = Item::builtin_type(TypeKind::Void, false, ctx); - Item::builtin_type(TypeKind::Pointer(void), false, ctx) - } else { - Item::from_ty_or_ref(ty_ret_type, cursor, None, ctx) - }; - - // Clang plays with us at "find the calling convention", see #549 and - // co. This seems to be a better fix than that commit. - let mut call_conv = ty.call_conv(); - if let Some(ty) = cursor.cur_type().canonical_type().pointee_type() { - let cursor_call_conv = ty.call_conv(); - if cursor_call_conv != CXCallingConv_Invalid { - call_conv = cursor_call_conv; - } - } - - let abi = get_abi(call_conv); - - if abi.is_unknown() { - warn!("Unknown calling convention: {call_conv:?}"); - } - - Ok(Self { - name: spelling, - return_type: ret, - argument_types: args, - is_variadic: ty.is_variadic(), - is_divergent, - must_use, - abi, - }) - } - - /// Get this function signature's return type. - pub(crate) fn return_type(&self) -> TypeId { - self.return_type - } - - /// Get this function signature's argument (name, type) pairs. - pub(crate) fn argument_types(&self) -> &[(Option, TypeId)] { - &self.argument_types - } - - /// Get this function signature's ABI. - pub(crate) fn abi( - &self, - ctx: &BindgenContext, - name: Option<&str>, - ) -> crate::codegen::error::Result { - // FIXME (pvdrz): Try to do this check lazily instead. Maybe store the ABI inside `ctx` - // instead?. - let abi = if let Some(name) = name { - if let Some((abi, _)) = ctx - .options() - .abi_overrides - .iter() - .find(|(_, regex_set)| regex_set.matches(name)) - { - ClangAbi::Known(*abi) - } else { - self.abi - } - } else if let Some((abi, _)) = ctx - .options() - .abi_overrides - .iter() - .find(|(_, regex_set)| regex_set.matches(&self.name)) - { - ClangAbi::Known(*abi) - } else { - self.abi - }; - - match abi { - ClangAbi::Known(Abi::ThisCall) - if !ctx.options().rust_features().thiscall_abi => - { - Err(crate::codegen::error::Error::UnsupportedAbi("thiscall")) - } - ClangAbi::Known(Abi::Vectorcall) - if !ctx.options().rust_features().vectorcall_abi => - { - Err(crate::codegen::error::Error::UnsupportedAbi("vectorcall")) - } - ClangAbi::Known(Abi::CUnwind) - if !ctx.options().rust_features().c_unwind_abi => - { - Err(crate::codegen::error::Error::UnsupportedAbi("C-unwind")) - } - ClangAbi::Known(Abi::EfiApi) - if !ctx.options().rust_features().abi_efiapi => - { - Err(crate::codegen::error::Error::UnsupportedAbi("efiapi")) - } - ClangAbi::Known(Abi::Win64) if self.is_variadic() => { - Err(crate::codegen::error::Error::UnsupportedAbi("Win64")) - } - abi => Ok(abi), - } - } - - /// Is this function signature variadic? - pub(crate) fn is_variadic(&self) -> bool { - // Clang reports some functions as variadic when they *might* be - // variadic. We do the argument check because rust doesn't codegen well - // variadic functions without an initial argument. - self.is_variadic && !self.argument_types.is_empty() - } - - /// Must this function's return value be used? - pub(crate) fn must_use(&self) -> bool { - self.must_use - } - - /// Are function pointers with this signature able to derive Rust traits? - /// Rust only supports deriving traits for function pointers with a limited - /// number of parameters and a couple ABIs. - /// - /// For more details, see: - /// - /// * , - /// * , - /// * and - pub(crate) fn function_pointers_can_derive(&self) -> bool { - if self.argument_types.len() > RUST_DERIVE_FUNPTR_LIMIT { - return false; - } - - matches!(self.abi, ClangAbi::Known(Abi::C) | ClangAbi::Unknown(..)) - } - - /// Whether this function has attributes marking it as divergent. - pub(crate) fn is_divergent(&self) -> bool { - self.is_divergent - } -} - -impl ClangSubItemParser for Function { - fn parse( - cursor: clang::Cursor, - context: &mut BindgenContext, - ) -> Result, ParseError> { - use clang_sys::*; - - let kind = match FunctionKind::from_cursor(&cursor) { - None => return Err(ParseError::Continue), - Some(k) => k, - }; - - debug!("Function::parse({cursor:?}, {:?})", cursor.cur_type()); - let visibility = cursor.visibility(); - if visibility != CXVisibility_Default { - return Err(ParseError::Continue); - } - if cursor.access_specifier() == CX_CXXPrivate && - !context.options().generate_private_functions - { - return Err(ParseError::Continue); - } - - let linkage = cursor.linkage(); - let linkage = match linkage { - CXLinkage_External | CXLinkage_UniqueExternal => Linkage::External, - CXLinkage_Internal => Linkage::Internal, - _ => return Err(ParseError::Continue), - }; - - if cursor.is_inlined_function() || - cursor.definition().is_some_and(|x| x.is_inlined_function()) - { - if !context.options().generate_inline_functions && - !context.options().wrap_static_fns - { - return Err(ParseError::Continue); - } - - if cursor.is_deleted_function() && - !context.options().generate_deleted_functions - { - return Err(ParseError::Continue); - } - - // We cannot handle `inline` functions that are not `static`. - if context.options().wrap_static_fns && - cursor.is_inlined_function() && - matches!(linkage, Linkage::External) - { - return Err(ParseError::Continue); - } - } - - // Grab the signature using Item::from_ty. - let sig = Item::from_ty(&cursor.cur_type(), cursor, None, context)?; - - let mut name = cursor.spelling(); - assert!(!name.is_empty(), "Empty function name?"); - - if cursor.kind() == CXCursor_Destructor { - // Remove the leading `~`. The alternative to this is special-casing - // code-generation for destructor functions, which seems less than - // ideal. - if name.starts_with('~') { - name.remove(0); - } - - // Add a suffix to avoid colliding with constructors. This would be - // technically fine (since we handle duplicated functions/methods), - // but seems easy enough to handle it here. - name.push_str("_destructor"); - } - if let Some(nm) = context.options().last_callback(|callbacks| { - callbacks.generated_name_override(ItemInfo { - name: name.as_str(), - kind: ItemKind::Function, - }) - }) { - name = nm; - } - assert!(!name.is_empty(), "Empty function name."); - - let mangled_name = cursor_mangling(context, &cursor); - - let link_name = context.options().last_callback(|callbacks| { - callbacks.generated_link_name_override(ItemInfo { - name: name.as_str(), - kind: ItemKind::Function, - }) - }); - - let function = Self::new( - name.clone(), - mangled_name, - link_name, - sig, - kind, - linkage, - ); - - Ok(ParseResult::New(function, Some(cursor))) - } -} - -impl Trace for FunctionSig { - type Extra = (); - - fn trace(&self, _: &BindgenContext, tracer: &mut T, _: &()) - where - T: Tracer, - { - tracer.visit_kind(self.return_type().into(), EdgeKind::FunctionReturn); - - for &(_, ty) in self.argument_types() { - tracer.visit_kind(ty.into(), EdgeKind::FunctionParameter); - } - } -} diff --git a/vendor/bindgen/ir/int.rs b/vendor/bindgen/ir/int.rs deleted file mode 100644 index ed18a999492ba5..00000000000000 --- a/vendor/bindgen/ir/int.rs +++ /dev/null @@ -1,128 +0,0 @@ -//! Intermediate representation for integral types. - -/// Which integral type are we dealing with? -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum IntKind { - /// A `bool`. - Bool, - - /// A `signed char`. - SChar, - - /// An `unsigned char`. - UChar, - - /// A `wchar_t`. - WChar, - - /// A platform-dependent `char` type, with the signedness support. - Char { - /// Whether the char is signed for the target platform. - is_signed: bool, - }, - - /// A `short`. - Short, - - /// An `unsigned short`. - UShort, - - /// An `int`. - Int, - - /// An `unsigned int`. - UInt, - - /// A `long`. - Long, - - /// An `unsigned long`. - ULong, - - /// A `long long`. - LongLong, - - /// An `unsigned long long`. - ULongLong, - - /// A 8-bit signed integer. - I8, - - /// A 8-bit unsigned integer. - U8, - - /// A 16-bit signed integer. - I16, - - /// A 16-bit integer, used only for enum size representation. - U16, - - /// The C++ type `char16_t`, which is its own type (unlike in C). - Char16, - - /// A 32-bit signed integer. - I32, - - /// A 32-bit unsigned integer. - U32, - - /// A 64-bit signed integer. - I64, - - /// A 64-bit unsigned integer. - U64, - - /// An `int128_t` - I128, - - /// A `uint128_t`. - U128, - - /// A custom integer type, used to allow custom macro types depending on - /// range. - Custom { - /// The name of the type, which would be used without modification. - name: &'static str, - /// Whether the type is signed or not. - is_signed: bool, - }, -} - -impl IntKind { - /// Is this integral type signed? - pub(crate) fn is_signed(&self) -> bool { - use self::IntKind::*; - match *self { - // TODO(emilio): wchar_t can in theory be signed, but we have no way - // to know whether it is or not right now (unlike char, there's no - // WChar_S / WChar_U). - Bool | UChar | UShort | UInt | ULong | ULongLong | U8 | U16 | - Char16 | WChar | U32 | U64 | U128 => false, - - SChar | Short | Int | Long | LongLong | I8 | I16 | I32 | I64 | - I128 => true, - - Char { is_signed } | Custom { is_signed, .. } => is_signed, - } - } - - /// If this type has a known size, return it (in bytes). This is to - /// alleviate libclang sometimes not giving us a layout (like in the case - /// when an enum is defined inside a class with template parameters). - pub(crate) fn known_size(&self) -> Option { - use self::IntKind::*; - Some(match *self { - Bool | UChar | SChar | U8 | I8 | Char { .. } => 1, - U16 | I16 | Char16 => 2, - U32 | I32 => 4, - U64 | I64 => 8, - I128 | U128 => 16, - _ => return None, - }) - } - - /// Whether this type's signedness matches the value. - pub(crate) fn signedness_matches(&self, val: i64) -> bool { - val >= 0 || self.is_signed() - } -} diff --git a/vendor/bindgen/ir/item.rs b/vendor/bindgen/ir/item.rs deleted file mode 100644 index d38879f390c9f6..00000000000000 --- a/vendor/bindgen/ir/item.rs +++ /dev/null @@ -1,1994 +0,0 @@ -//! Bindgen's core intermediate representation type. - -use super::super::codegen::{EnumVariation, CONSTIFIED_ENUM_MODULE_REPR_NAME}; -use super::analysis::{HasVtable, HasVtableResult, Sizedness, SizednessResult}; -use super::annotations::Annotations; -use super::comp::{CompKind, MethodKind}; -use super::context::{BindgenContext, ItemId, PartialType, TypeId}; -use super::derive::{ - CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq, - CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd, -}; -use super::dot::DotAttributes; -use super::function::{Function, FunctionKind}; -use super::item_kind::ItemKind; -use super::layout::Opaque; -use super::module::Module; -use super::template::{AsTemplateParam, TemplateParameters}; -use super::traversal::{EdgeKind, Trace, Tracer}; -use super::ty::{Type, TypeKind}; -use crate::callbacks::ItemInfo; -use crate::clang; -use crate::parse::{ClangSubItemParser, ParseError, ParseResult}; - -use std::cell::{Cell, OnceCell}; -use std::collections::BTreeSet; -use std::fmt::Write; -use std::io; -use std::iter; -use std::sync::OnceLock; - -/// A trait to get the canonical name from an item. -/// -/// This is the trait that will eventually isolate all the logic related to name -/// mangling and that kind of stuff. -/// -/// This assumes no nested paths, at some point I'll have to make it a more -/// complex thing. -/// -/// This name is required to be safe for Rust, that is, is not expected to -/// return any rust keyword from here. -pub(crate) trait ItemCanonicalName { - /// Get the canonical name for this item. - fn canonical_name(&self, ctx: &BindgenContext) -> String; -} - -/// The same, but specifies the path that needs to be followed to reach an item. -/// -/// To contrast with `canonical_name`, here's an example: -/// -/// ```c++ -/// namespace foo { -/// const BAR = 3; -/// } -/// ``` -/// -/// For bar, the canonical path is `vec!["foo", "BAR"]`, while the canonical -/// name is just `"BAR"`. -pub(crate) trait ItemCanonicalPath { - /// Get the namespace-aware canonical path for this item. This means that if - /// namespaces are disabled, you'll get a single item, and otherwise you get - /// the whole path. - fn namespace_aware_canonical_path( - &self, - ctx: &BindgenContext, - ) -> Vec; - - /// Get the canonical path for this item. - fn canonical_path(&self, ctx: &BindgenContext) -> Vec; -} - -/// A trait for determining if some IR thing is opaque or not. -pub(crate) trait IsOpaque { - /// Extra context the IR thing needs to determine if it is opaque or not. - type Extra; - - /// Returns `true` if the thing is opaque, and `false` otherwise. - /// - /// May only be called when `ctx` is in the codegen phase. - fn is_opaque(&self, ctx: &BindgenContext, extra: &Self::Extra) -> bool; -} - -/// A trait for determining if some IR thing has type parameter in array or not. -pub(crate) trait HasTypeParamInArray { - /// Returns `true` if the thing has Array, and `false` otherwise. - fn has_type_param_in_array(&self, ctx: &BindgenContext) -> bool; -} - -/// A trait for iterating over an item and its parents and up its ancestor chain -/// up to (but not including) the implicit root module. -pub(crate) trait ItemAncestors { - /// Get an iterable over this item's ancestors. - fn ancestors<'a>(&self, ctx: &'a BindgenContext) -> ItemAncestorsIter<'a>; -} - -#[cfg(feature = "__testing_only_extra_assertions")] -type DebugOnlyItemSet = ItemSet; - -#[cfg(not(feature = "__testing_only_extra_assertions"))] -struct DebugOnlyItemSet; - -#[cfg(not(feature = "__testing_only_extra_assertions"))] -impl DebugOnlyItemSet { - fn new() -> Self { - DebugOnlyItemSet - } - - #[allow(clippy::trivially_copy_pass_by_ref)] - fn contains(&self, _id: &ItemId) -> bool { - false - } - - fn insert(&mut self, _id: ItemId) {} -} - -/// An iterator over an item and its ancestors. -pub(crate) struct ItemAncestorsIter<'a> { - item: ItemId, - ctx: &'a BindgenContext, - seen: DebugOnlyItemSet, -} - -impl<'a> ItemAncestorsIter<'a> { - fn new>(ctx: &'a BindgenContext, id: Id) -> Self { - ItemAncestorsIter { - item: id.into(), - ctx, - seen: DebugOnlyItemSet::new(), - } - } -} - -impl Iterator for ItemAncestorsIter<'_> { - type Item = ItemId; - - fn next(&mut self) -> Option { - let item = self.ctx.resolve_item(self.item); - - if item.parent_id() == self.item { - None - } else { - self.item = item.parent_id(); - - extra_assert!(!self.seen.contains(&item.id())); - self.seen.insert(item.id()); - - Some(item.id()) - } - } -} - -impl AsTemplateParam for T -where - T: Copy + Into, -{ - type Extra = (); - - fn as_template_param( - &self, - ctx: &BindgenContext, - _: &(), - ) -> Option { - ctx.resolve_item((*self).into()).as_template_param(ctx, &()) - } -} - -impl AsTemplateParam for Item { - type Extra = (); - - fn as_template_param( - &self, - ctx: &BindgenContext, - _: &(), - ) -> Option { - self.kind.as_template_param(ctx, self) - } -} - -impl AsTemplateParam for ItemKind { - type Extra = Item; - - fn as_template_param( - &self, - ctx: &BindgenContext, - item: &Item, - ) -> Option { - match *self { - ItemKind::Type(ref ty) => ty.as_template_param(ctx, item), - ItemKind::Module(..) | - ItemKind::Function(..) | - ItemKind::Var(..) => None, - } - } -} - -impl ItemCanonicalName for T -where - T: Copy + Into, -{ - fn canonical_name(&self, ctx: &BindgenContext) -> String { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - ctx.resolve_item(*self).canonical_name(ctx) - } -} - -impl ItemCanonicalPath for T -where - T: Copy + Into, -{ - fn namespace_aware_canonical_path( - &self, - ctx: &BindgenContext, - ) -> Vec { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - ctx.resolve_item(*self).namespace_aware_canonical_path(ctx) - } - - fn canonical_path(&self, ctx: &BindgenContext) -> Vec { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - ctx.resolve_item(*self).canonical_path(ctx) - } -} - -impl ItemAncestors for T -where - T: Copy + Into, -{ - fn ancestors<'a>(&self, ctx: &'a BindgenContext) -> ItemAncestorsIter<'a> { - ItemAncestorsIter::new(ctx, *self) - } -} - -impl ItemAncestors for Item { - fn ancestors<'a>(&self, ctx: &'a BindgenContext) -> ItemAncestorsIter<'a> { - self.id().ancestors(ctx) - } -} - -impl Trace for Id -where - Id: Copy + Into, -{ - type Extra = (); - - fn trace(&self, ctx: &BindgenContext, tracer: &mut T, extra: &()) - where - T: Tracer, - { - ctx.resolve_item(*self).trace(ctx, tracer, extra); - } -} - -impl Trace for Item { - type Extra = (); - - fn trace(&self, ctx: &BindgenContext, tracer: &mut T, _extra: &()) - where - T: Tracer, - { - // Even if this item is blocklisted/hidden, we want to trace it. It is - // traversal iterators' consumers' responsibility to filter items as - // needed. Generally, this filtering happens in the implementation of - // `Iterator` for `allowlistedItems`. Fully tracing blocklisted items is - // necessary for things like the template parameter usage analysis to - // function correctly. - - match *self.kind() { - ItemKind::Type(ref ty) => { - // There are some types, like resolved type references, where we - // don't want to stop collecting types even though they may be - // opaque. - if ty.should_be_traced_unconditionally() || - !self.is_opaque(ctx, &()) - { - ty.trace(ctx, tracer, self); - } - } - ItemKind::Function(ref fun) => { - // Just the same way, it has not real meaning for a function to - // be opaque, so we trace across it. - tracer.visit(fun.signature().into()); - } - ItemKind::Var(ref var) => { - tracer.visit_kind(var.ty().into(), EdgeKind::VarType); - } - ItemKind::Module(_) => { - // Module -> children edges are "weak", and we do not want to - // trace them. If we did, then allowlisting wouldn't work as - // expected: everything in every module would end up - // allowlisted. - // - // TODO: make a new edge kind for module -> children edges and - // filter them during allowlisting traversals. - } - } - } -} - -impl CanDeriveDebug for Item { - fn can_derive_debug(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_debug(ctx) - } -} - -impl CanDeriveDefault for Item { - fn can_derive_default(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_default(ctx) - } -} - -impl CanDeriveCopy for Item { - fn can_derive_copy(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_copy(ctx) - } -} - -impl CanDeriveHash for Item { - fn can_derive_hash(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_hash(ctx) - } -} - -impl CanDerivePartialOrd for Item { - fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_partialord(ctx) - } -} - -impl CanDerivePartialEq for Item { - fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_partialeq(ctx) - } -} - -impl CanDeriveEq for Item { - fn can_derive_eq(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_eq(ctx) - } -} - -impl CanDeriveOrd for Item { - fn can_derive_ord(&self, ctx: &BindgenContext) -> bool { - self.id().can_derive_ord(ctx) - } -} - -/// An item is the base of the bindgen representation, it can be either a -/// module, a type, a function, or a variable (see `ItemKind` for more -/// information). -/// -/// Items refer to each other by `ItemId`. Every item has its parent's -/// ID. Depending on the kind of item this is, it may also refer to other items, -/// such as a compound type item referring to other types. Collectively, these -/// references form a graph. -/// -/// The entry-point to this graph is the "root module": a meta-item used to hold -/// all top-level items. -/// -/// An item may have a comment, and annotations (see the `annotations` module). -/// -/// Note that even though we parse all the types of annotations in comments, not -/// all of them apply to every item. Those rules are described in the -/// `annotations` module. -#[derive(Debug)] -pub(crate) struct Item { - /// This item's ID. - id: ItemId, - - /// The item's local ID, unique only amongst its siblings. Only used for - /// anonymous items. - /// - /// Lazily initialized in `local_id()`. - /// - /// Note that only structs, unions, and enums get a local type ID. In any - /// case this is an implementation detail. - local_id: OnceCell, - - /// The next local ID to use for a child or template instantiation. - next_child_local_id: Cell, - - /// A cached copy of the canonical name, as returned by `canonical_name`. - /// - /// This is a fairly used operation during codegen so this makes bindgen - /// considerably faster in those cases. - canonical_name: OnceCell, - - /// The path to use for allowlisting and other name-based checks, as - /// returned by `path_for_allowlisting`, lazily constructed. - path_for_allowlisting: OnceCell>, - - /// A doc comment over the item, if any. - comment: Option, - /// Annotations extracted from the doc comment, or the default ones - /// otherwise. - annotations: Annotations, - /// An item's parent ID. This will most likely be a class where this item - /// was declared, or a module, etc. - /// - /// All the items have a parent, except the root module, in which case the - /// parent ID is its own ID. - parent_id: ItemId, - /// The item kind. - kind: ItemKind, - /// The source location of the item. - location: Option, -} - -impl AsRef for Item { - fn as_ref(&self) -> &ItemId { - &self.id - } -} - -impl Item { - /// Construct a new `Item`. - pub(crate) fn new( - id: ItemId, - comment: Option, - annotations: Option, - parent_id: ItemId, - kind: ItemKind, - location: Option, - ) -> Self { - debug_assert!(id != parent_id || kind.is_module()); - Item { - id, - local_id: OnceCell::new(), - next_child_local_id: Cell::new(1), - canonical_name: OnceCell::new(), - path_for_allowlisting: OnceCell::new(), - parent_id, - comment, - annotations: annotations.unwrap_or_default(), - kind, - location, - } - } - - /// Construct a new opaque item type. - pub(crate) fn new_opaque_type( - with_id: ItemId, - ty: &clang::Type, - ctx: &mut BindgenContext, - ) -> TypeId { - let location = ty.declaration().location(); - let ty = Opaque::from_clang_ty(ty, ctx); - let kind = ItemKind::Type(ty); - let parent = ctx.root_module().into(); - ctx.add_item( - Item::new(with_id, None, None, parent, kind, Some(location)), - None, - None, - ); - with_id.as_type_id_unchecked() - } - - /// Get this `Item`'s identifier. - pub(crate) fn id(&self) -> ItemId { - self.id - } - - /// Get this `Item`'s parent's identifier. - /// - /// For the root module, the parent's ID is its own ID. - pub(crate) fn parent_id(&self) -> ItemId { - self.parent_id - } - - /// Set this item's parent ID. - /// - /// This is only used so replacements get generated in the proper module. - pub(crate) fn set_parent_for_replacement>( - &mut self, - id: Id, - ) { - self.parent_id = id.into(); - } - - /// Returns the depth this item is indented to. - /// - /// FIXME(emilio): This may need fixes for the enums within modules stuff. - pub(crate) fn codegen_depth(&self, ctx: &BindgenContext) -> usize { - if !ctx.options().enable_cxx_namespaces { - return 0; - } - - self.ancestors(ctx) - .filter(|id| { - ctx.resolve_item(*id).as_module().is_some_and(|module| { - !module.is_inline() || - ctx.options().conservative_inline_namespaces - }) - }) - .count() + - 1 - } - - /// Get this `Item`'s comment, if it has any, already preprocessed and with - /// the right indentation. - pub(crate) fn comment(&self, ctx: &BindgenContext) -> Option { - if !ctx.options().generate_comments { - return None; - } - - self.comment - .as_ref() - .map(|comment| ctx.options().process_comment(comment)) - } - - /// What kind of item is this? - pub(crate) fn kind(&self) -> &ItemKind { - &self.kind - } - - /// Get a mutable reference to this item's kind. - pub(crate) fn kind_mut(&mut self) -> &mut ItemKind { - &mut self.kind - } - - /// Where in the source is this item located? - pub(crate) fn location(&self) -> Option<&clang::SourceLocation> { - self.location.as_ref() - } - - /// Get an identifier that differentiates this item from its siblings. - /// - /// This should stay relatively stable in the face of code motion outside or - /// below this item's lexical scope, meaning that this can be useful for - /// generating relatively stable identifiers within a scope. - pub(crate) fn local_id(&self, ctx: &BindgenContext) -> usize { - *self.local_id.get_or_init(|| { - let parent = ctx.resolve_item(self.parent_id); - parent.next_child_local_id() - }) - } - - /// Get an identifier that differentiates a child of this item of other - /// related items. - /// - /// This is currently used for anonymous items, and template instantiation - /// tests, in both cases in order to reduce noise when system headers are at - /// place. - pub(crate) fn next_child_local_id(&self) -> usize { - let local_id = self.next_child_local_id.get(); - self.next_child_local_id.set(local_id + 1); - local_id - } - - /// Returns whether this item is a top-level item, from the point of view of - /// bindgen. - /// - /// This point of view changes depending on whether namespaces are enabled - /// or not. That way, in the following example: - /// - /// ```c++ - /// namespace foo { - /// static int var; - /// } - /// ``` - /// - /// `var` would be a toplevel item if namespaces are disabled, but won't if - /// they aren't. - /// - /// This function is used to determine when the codegen phase should call - /// `codegen` on an item, since any item that is not top-level will be - /// generated by its parent. - pub(crate) fn is_toplevel(&self, ctx: &BindgenContext) -> bool { - // FIXME: Workaround for some types falling behind when parsing weird - // stl classes, for example. - if ctx.options().enable_cxx_namespaces && - self.kind().is_module() && - self.id() != ctx.root_module() - { - return false; - } - - let mut parent = self.parent_id; - loop { - let Some(parent_item) = ctx.resolve_item_fallible(parent) else { - return false; - }; - - if parent_item.id() == ctx.root_module() { - return true; - } else if ctx.options().enable_cxx_namespaces || - !parent_item.kind().is_module() - { - return false; - } - - parent = parent_item.parent_id(); - } - } - - /// Get a reference to this item's underlying `Type`. Panic if this is some - /// other kind of item. - pub(crate) fn expect_type(&self) -> &Type { - self.kind().expect_type() - } - - /// Get a reference to this item's underlying `Type`, or `None` if this is - /// some other kind of item. - pub(crate) fn as_type(&self) -> Option<&Type> { - self.kind().as_type() - } - - /// Get a reference to this item's underlying `Function`. Panic if this is - /// some other kind of item. - pub(crate) fn expect_function(&self) -> &Function { - self.kind().expect_function() - } - - /// Is this item a module? - pub(crate) fn is_module(&self) -> bool { - matches!(self.kind, ItemKind::Module(..)) - } - - /// Get this item's annotations. - pub(crate) fn annotations(&self) -> &Annotations { - &self.annotations - } - - /// Whether this item should be blocklisted. - /// - /// This may be due to either annotations or to other kind of configuration. - pub(crate) fn is_blocklisted(&self, ctx: &BindgenContext) -> bool { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - if self.annotations.hide() { - return true; - } - - if !ctx.options().blocklisted_files.is_empty() { - if let Some(location) = &self.location { - let (file, _, _, _) = location.location(); - if let Some(filename) = file.name() { - if ctx.options().blocklisted_files.matches(filename) { - return true; - } - } - } - } - - let path = self.path_for_allowlisting(ctx); - let name = path[1..].join("::"); - ctx.options().blocklisted_items.matches(&name) || - match self.kind { - ItemKind::Type(..) => { - ctx.options().blocklisted_types.matches(&name) || - ctx.is_replaced_type(path, self.id) - } - ItemKind::Function(..) => { - ctx.options().blocklisted_functions.matches(&name) - } - ItemKind::Var(..) => { - ctx.options().blocklisted_vars.matches(&name) - } - // TODO: Add namespace blocklisting? - ItemKind::Module(..) => false, - } - } - - /// Take out item `NameOptions` - pub(crate) fn name<'a>( - &'a self, - ctx: &'a BindgenContext, - ) -> NameOptions<'a> { - NameOptions::new(self, ctx) - } - - /// Get the target item ID for name generation. - fn name_target(&self, ctx: &BindgenContext) -> ItemId { - let mut targets_seen = DebugOnlyItemSet::new(); - let mut item = self; - - loop { - extra_assert!(!targets_seen.contains(&item.id())); - targets_seen.insert(item.id()); - - if self.annotations().use_instead_of().is_some() { - return self.id(); - } - - match *item.kind() { - ItemKind::Type(ref ty) => match *ty.kind() { - TypeKind::ResolvedTypeRef(inner) => { - item = ctx.resolve_item(inner); - } - TypeKind::TemplateInstantiation(ref inst) => { - item = ctx.resolve_item(inst.template_definition()); - } - _ => return item.id(), - }, - _ => return item.id(), - } - } - } - - /// Create a fully disambiguated name for an item, including template - /// parameters if it is a type - pub(crate) fn full_disambiguated_name( - &self, - ctx: &BindgenContext, - ) -> String { - let mut s = String::new(); - let level = 0; - self.push_disambiguated_name(ctx, &mut s, level); - s - } - - /// Helper function for `full_disambiguated_name` - fn push_disambiguated_name( - &self, - ctx: &BindgenContext, - to: &mut String, - level: u8, - ) { - to.push_str(&self.canonical_name(ctx)); - if let ItemKind::Type(ref ty) = *self.kind() { - if let TypeKind::TemplateInstantiation(ref inst) = *ty.kind() { - let _ = write!(to, "_open{level}_"); - for arg in inst.template_arguments() { - arg.into_resolver() - .through_type_refs() - .resolve(ctx) - .push_disambiguated_name(ctx, to, level + 1); - to.push('_'); - } - let _ = write!(to, "close{level}"); - } - } - } - - /// Get this function item's name, or `None` if this item is not a function. - fn func_name(&self) -> Option<&str> { - match *self.kind() { - ItemKind::Function(ref func) => Some(func.name()), - _ => None, - } - } - - /// Get the overload index for this method. If this is not a method, return - /// `None`. - fn overload_index(&self, ctx: &BindgenContext) -> Option { - self.func_name().and_then(|func_name| { - let parent = ctx.resolve_item(self.parent_id()); - if let ItemKind::Type(ref ty) = *parent.kind() { - if let TypeKind::Comp(ref ci) = *ty.kind() { - // All the constructors have the same name, so no need to - // resolve and check. - return ci - .constructors() - .iter() - .position(|c| *c == self.id()) - .or_else(|| { - ci.methods() - .iter() - .filter(|m| { - let item = ctx.resolve_item(m.signature()); - let func = item.expect_function(); - func.name() == func_name - }) - .position(|m| m.signature() == self.id()) - }); - } - } - - None - }) - } - - /// Get this item's base name (aka non-namespaced name). - fn base_name(&self, ctx: &BindgenContext) -> String { - if let Some(path) = self.annotations().use_instead_of() { - return path.last().unwrap().clone(); - } - - match *self.kind() { - ItemKind::Var(ref var) => var.name().to_owned(), - ItemKind::Module(ref module) => module.name().map_or_else( - || format!("_bindgen_mod_{}", self.exposed_id(ctx)), - ToOwned::to_owned, - ), - ItemKind::Type(ref ty) => ty.sanitized_name(ctx).map_or_else( - || format!("_bindgen_ty_{}", self.exposed_id(ctx)), - Into::into, - ), - ItemKind::Function(ref fun) => { - let mut name = fun.name().to_owned(); - - if let Some(idx) = self.overload_index(ctx) { - if idx > 0 { - write!(&mut name, "{idx}").unwrap(); - } - } - - name - } - } - } - - fn is_anon(&self) -> bool { - match self.kind() { - ItemKind::Module(module) => module.name().is_none(), - ItemKind::Type(ty) => ty.name().is_none(), - ItemKind::Function(_) => false, - ItemKind::Var(_) => false, - } - } - - /// Get the canonical name without taking into account the replaces - /// annotation. - /// - /// This is the base logic used to implement hiding and replacing via - /// annotations, and also to implement proper name mangling. - /// - /// The idea is that each generated type in the same "level" (read: module - /// or namespace) has a unique canonical name. - /// - /// This name should be derived from the immutable state contained in the - /// type and the parent chain, since it should be consistent. - /// - /// If `BindgenOptions::disable_nested_struct_naming` is true then returned - /// name is the inner most non-anonymous name plus all the anonymous base names - /// that follows. - pub(crate) fn real_canonical_name( - &self, - ctx: &BindgenContext, - opt: &NameOptions, - ) -> String { - let target = ctx.resolve_item(self.name_target(ctx)); - - // Short-circuit if the target has an override, and just use that. - if let Some(path) = target.annotations.use_instead_of() { - if ctx.options().enable_cxx_namespaces { - return path.last().unwrap().clone(); - } - return path.join("_"); - } - - let base_name = target.base_name(ctx); - - // Named template type arguments are never namespaced, and never - // mangled. - if target.is_template_param(ctx, &()) { - return base_name; - } - - // Ancestors' ID iter - let mut ids_iter = target - .parent_id() - .ancestors(ctx) - .filter(|id| *id != ctx.root_module()) - .take_while(|id| { - // Stop iterating ancestors once we reach a non-inline namespace - // when opt.within_namespaces is set. - !opt.within_namespaces || !ctx.resolve_item(*id).is_module() - }) - .filter(|id| { - if !ctx.options().conservative_inline_namespaces { - if let ItemKind::Module(ref module) = - *ctx.resolve_item(*id).kind() - { - return !module.is_inline(); - } - } - - true - }); - - let ids: Vec<_> = if ctx.options().disable_nested_struct_naming { - let mut ids = Vec::new(); - - // If target is anonymous we need find its first named ancestor. - if target.is_anon() { - for id in ids_iter.by_ref() { - ids.push(id); - - if !ctx.resolve_item(id).is_anon() { - break; - } - } - } - - ids - } else { - ids_iter.collect() - }; - - // Concatenate this item's ancestors' names together. - let mut names: Vec<_> = ids - .into_iter() - .map(|id| { - let item = ctx.resolve_item(id); - let target = ctx.resolve_item(item.name_target(ctx)); - target.base_name(ctx) - }) - .filter(|name| !name.is_empty()) - .collect(); - - names.reverse(); - - if !base_name.is_empty() { - names.push(base_name); - } - - if ctx.options().c_naming { - if let Some(prefix) = self.c_naming_prefix() { - names.insert(0, prefix.to_string()); - } - } - - let name = names.join("_"); - - let name = if opt.user_mangled == UserMangled::Yes { - let item_info = ItemInfo { - name: &name, - kind: match self.kind() { - ItemKind::Module(..) => crate::callbacks::ItemKind::Module, - ItemKind::Type(..) => crate::callbacks::ItemKind::Type, - ItemKind::Function(..) => { - crate::callbacks::ItemKind::Function - } - ItemKind::Var(..) => crate::callbacks::ItemKind::Var, - }, - }; - ctx.options() - .last_callback(|callbacks| callbacks.item_name(item_info)) - .unwrap_or(name) - } else { - name - }; - - ctx.rust_mangle(&name).into_owned() - } - - /// The exposed ID that represents an unique ID among the siblings of a - /// given item. - pub(crate) fn exposed_id(&self, ctx: &BindgenContext) -> String { - // Only use local ids for enums, classes, structs and union types. All - // other items use their global ID. - let ty_kind = self.kind().as_type().map(|t| t.kind()); - if let Some( - TypeKind::Comp(..) | - TypeKind::TemplateInstantiation(..) | - TypeKind::Enum(..), - ) = ty_kind - { - return self.local_id(ctx).to_string(); - } - - // Note that this `id_` prefix prevents (really unlikely) collisions - // between the global ID and the local ID of an item with the same - // parent. - format!("id_{}", self.id().as_usize()) - } - - /// Get a reference to this item's `Module`, or `None` if this is not a - /// `Module` item. - pub(crate) fn as_module(&self) -> Option<&Module> { - match self.kind { - ItemKind::Module(ref module) => Some(module), - _ => None, - } - } - - /// Get a mutable reference to this item's `Module`, or `None` if this is - /// not a `Module` item. - pub(crate) fn as_module_mut(&mut self) -> Option<&mut Module> { - match self.kind { - ItemKind::Module(ref mut module) => Some(module), - _ => None, - } - } - - /// Returns whether the item is a constified module enum - fn is_constified_enum_module(&self, ctx: &BindgenContext) -> bool { - // Do not jump through aliases, except for aliases that point to a type - // with the same name, since we dont generate coe for them. - let item = self.id.into_resolver().through_type_refs().resolve(ctx); - let ItemKind::Type(ref type_) = *item.kind() else { - return false; - }; - - match *type_.kind() { - TypeKind::Enum(ref enum_) => { - enum_.computed_enum_variation(ctx, self) == - EnumVariation::ModuleConsts - } - TypeKind::Alias(inner_id) => { - // TODO(emilio): Make this "hop through type aliases that aren't - // really generated" an option in `ItemResolver`? - let inner_item = ctx.resolve_item(inner_id); - let name = item.canonical_name(ctx); - - if inner_item.canonical_name(ctx) == name { - inner_item.is_constified_enum_module(ctx) - } else { - false - } - } - _ => false, - } - } - - /// Is this item of a kind that is enabled for code generation? - pub(crate) fn is_enabled_for_codegen(&self, ctx: &BindgenContext) -> bool { - let cc = &ctx.options().codegen_config; - match *self.kind() { - ItemKind::Module(..) => true, - ItemKind::Var(_) => cc.vars(), - ItemKind::Type(_) => cc.types(), - ItemKind::Function(ref f) => match f.kind() { - FunctionKind::Function => cc.functions(), - FunctionKind::Method(MethodKind::Constructor) => { - cc.constructors() - } - FunctionKind::Method( - MethodKind::Destructor | - MethodKind::VirtualDestructor { .. }, - ) => cc.destructors(), - FunctionKind::Method( - MethodKind::Static | - MethodKind::Normal | - MethodKind::Virtual { .. }, - ) => cc.methods(), - }, - } - } - - /// Returns the path we should use for allowlisting / blocklisting, which - /// doesn't include user-mangling. - pub(crate) fn path_for_allowlisting( - &self, - ctx: &BindgenContext, - ) -> &Vec { - self.path_for_allowlisting - .get_or_init(|| self.compute_path(ctx, UserMangled::No)) - } - - fn compute_path( - &self, - ctx: &BindgenContext, - mangled: UserMangled, - ) -> Vec { - if let Some(path) = self.annotations().use_instead_of() { - let mut ret = - vec![ctx.resolve_item(ctx.root_module()).name(ctx).get()]; - ret.extend_from_slice(path); - return ret; - } - - let target = ctx.resolve_item(self.name_target(ctx)); - let mut path: Vec<_> = target - .ancestors(ctx) - .chain(iter::once(ctx.root_module().into())) - .map(|id| ctx.resolve_item(id)) - .filter(|item| { - item.id() == target.id() || - item.as_module().is_some_and(|module| { - !module.is_inline() || - ctx.options().conservative_inline_namespaces - }) - }) - .map(|item| { - ctx.resolve_item(item.name_target(ctx)) - .name(ctx) - .within_namespaces() - .user_mangled(mangled) - .get() - }) - .collect(); - path.reverse(); - path - } - - /// Returns a prefix for the canonical name when C naming is enabled. - fn c_naming_prefix(&self) -> Option<&str> { - let ItemKind::Type(ref ty) = self.kind else { - return None; - }; - - Some(match ty.kind() { - TypeKind::Comp(ref ci) => match ci.kind() { - CompKind::Struct => "struct", - CompKind::Union => "union", - }, - TypeKind::Enum(..) => "enum", - _ => return None, - }) - } - - /// Whether this is a `#[must_use]` type. - pub(crate) fn must_use(&self, ctx: &BindgenContext) -> bool { - self.annotations().must_use_type() || ctx.must_use_type_by_name(self) - } -} - -impl IsOpaque for T -where - T: Copy + Into, -{ - type Extra = (); - - fn is_opaque(&self, ctx: &BindgenContext, _: &()) -> bool { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - ctx.resolve_item((*self).into()).is_opaque(ctx, &()) - } -} - -impl IsOpaque for Item { - type Extra = (); - - fn is_opaque(&self, ctx: &BindgenContext, _: &()) -> bool { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - self.annotations.opaque() || - self.as_type().is_some_and(|ty| ty.is_opaque(ctx, self)) || - ctx.opaque_by_name(self.path_for_allowlisting(ctx)) - } -} - -impl HasVtable for T -where - T: Copy + Into, -{ - fn has_vtable(&self, ctx: &BindgenContext) -> bool { - let id: ItemId = (*self).into(); - id.as_type_id(ctx).is_some_and(|id| { - !matches!(ctx.lookup_has_vtable(id), HasVtableResult::No) - }) - } - - fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool { - let id: ItemId = (*self).into(); - id.as_type_id(ctx).is_some_and(|id| { - matches!(ctx.lookup_has_vtable(id), HasVtableResult::SelfHasVtable) - }) - } -} - -impl HasVtable for Item { - fn has_vtable(&self, ctx: &BindgenContext) -> bool { - self.id().has_vtable(ctx) - } - - fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool { - self.id().has_vtable_ptr(ctx) - } -} - -impl Sizedness for T -where - T: Copy + Into, -{ - fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult { - let id: ItemId = (*self).into(); - id.as_type_id(ctx) - .map_or(SizednessResult::default(), |id| ctx.lookup_sizedness(id)) - } -} - -impl Sizedness for Item { - fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult { - self.id().sizedness(ctx) - } -} - -impl HasTypeParamInArray for T -where - T: Copy + Into, -{ - fn has_type_param_in_array(&self, ctx: &BindgenContext) -> bool { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - ctx.lookup_has_type_param_in_array(*self) - } -} - -impl HasTypeParamInArray for Item { - fn has_type_param_in_array(&self, ctx: &BindgenContext) -> bool { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - ctx.lookup_has_type_param_in_array(self.id()) - } -} - -/// A set of items. -pub(crate) type ItemSet = BTreeSet; - -impl DotAttributes for Item { - fn dot_attributes( - &self, - ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - writeln!( - out, - "{:?} - name{}", - self.id, - self.name(ctx).get() - )?; - - if self.is_opaque(ctx, &()) { - writeln!(out, "opaquetrue")?; - } - - self.kind.dot_attributes(ctx, out) - } -} - -impl TemplateParameters for T -where - T: Copy + Into, -{ - fn self_template_params(&self, ctx: &BindgenContext) -> Vec { - ctx.resolve_item_fallible(*self) - .map_or(vec![], |item| item.self_template_params(ctx)) - } -} - -impl TemplateParameters for Item { - fn self_template_params(&self, ctx: &BindgenContext) -> Vec { - self.kind.self_template_params(ctx) - } -} - -impl TemplateParameters for ItemKind { - fn self_template_params(&self, ctx: &BindgenContext) -> Vec { - match *self { - ItemKind::Type(ref ty) => ty.self_template_params(ctx), - // If we start emitting bindings to explicitly instantiated - // functions, then we'll need to check ItemKind::Function for - // template params. - ItemKind::Function(_) | ItemKind::Module(_) | ItemKind::Var(_) => { - vec![] - } - } - } -} - -// An utility function to handle recursing inside nested types. -fn visit_child( - cur: clang::Cursor, - id: ItemId, - ty: &clang::Type, - parent_id: Option, - ctx: &mut BindgenContext, - result: &mut Result, -) -> clang_sys::CXChildVisitResult { - use clang_sys::*; - if result.is_ok() { - return CXChildVisit_Break; - } - - *result = Item::from_ty_with_id(id, ty, cur, parent_id, ctx); - - match *result { - Ok(..) => CXChildVisit_Break, - Err(ParseError::Recurse) => { - cur.visit(|c| visit_child(c, id, ty, parent_id, ctx, result)); - CXChildVisit_Continue - } - Err(ParseError::Continue) => CXChildVisit_Continue, - } -} - -impl Item { - /// Create a builtin type. - pub(crate) fn builtin_type( - kind: TypeKind, - is_const: bool, - ctx: &mut BindgenContext, - ) -> TypeId { - // Feel free to add more here, I'm just lazy. - match kind { - TypeKind::Void | - TypeKind::Int(..) | - TypeKind::Pointer(..) | - TypeKind::Float(..) => {} - _ => panic!("Unsupported builtin type"), - } - - let ty = Type::new(None, None, kind, is_const); - let id = ctx.next_item_id(); - let module = ctx.root_module().into(); - ctx.add_item( - Item::new(id, None, None, module, ItemKind::Type(ty), None), - None, - None, - ); - id.as_type_id_unchecked() - } - - /// Parse this item from the given Clang cursor. - pub(crate) fn parse( - cursor: clang::Cursor, - parent_id: Option, - ctx: &mut BindgenContext, - ) -> Result { - use crate::ir::var::Var; - use clang_sys::*; - - if !cursor.is_valid() { - return Err(ParseError::Continue); - } - - let comment = cursor.raw_comment(); - let annotations = Annotations::new(&cursor); - - let current_module = ctx.current_module().into(); - let relevant_parent_id = parent_id.unwrap_or(current_module); - - #[allow(clippy::missing_docs_in_private_items)] - macro_rules! try_parse { - ($what:ident) => { - match $what::parse(cursor, ctx) { - Ok(ParseResult::New(item, declaration)) => { - let id = ctx.next_item_id(); - - ctx.add_item( - Item::new( - id, - comment, - annotations, - relevant_parent_id, - ItemKind::$what(item), - Some(cursor.location()), - ), - declaration, - Some(cursor), - ); - return Ok(id); - } - Ok(ParseResult::AlreadyResolved(id)) => { - return Ok(id); - } - Err(ParseError::Recurse) => return Err(ParseError::Recurse), - Err(ParseError::Continue) => {} - } - }; - } - - try_parse!(Module); - - // NOTE: Is extremely important to parse functions and vars **before** - // types. Otherwise we can parse a function declaration as a type - // (which is legal), and lose functions to generate. - // - // In general, I'm not totally confident this split between - // ItemKind::Function and TypeKind::FunctionSig is totally worth it, but - // I guess we can try. - try_parse!(Function); - try_parse!(Var); - - // Types are sort of special, so to avoid parsing template classes - // twice, handle them separately. - { - let definition = cursor.definition(); - let applicable_cursor = definition.unwrap_or(cursor); - - let relevant_parent_id = match definition { - Some(definition) => { - if definition != cursor { - ctx.add_semantic_parent(definition, relevant_parent_id); - return Ok(Item::from_ty_or_ref( - applicable_cursor.cur_type(), - cursor, - parent_id, - ctx, - ) - .into()); - } - ctx.known_semantic_parent(definition) - .or(parent_id) - .unwrap_or_else(|| ctx.current_module().into()) - } - None => relevant_parent_id, - }; - - match Item::from_ty( - &applicable_cursor.cur_type(), - applicable_cursor, - Some(relevant_parent_id), - ctx, - ) { - Ok(ty) => return Ok(ty.into()), - Err(ParseError::Recurse) => return Err(ParseError::Recurse), - Err(ParseError::Continue) => {} - } - } - - match cursor.kind() { - // On Clang 18+, extern "C" is reported accurately as a LinkageSpec. - // Older LLVM treat it as UnexposedDecl. - CXCursor_LinkageSpec | CXCursor_UnexposedDecl => { - Err(ParseError::Recurse) - } - - // We allowlist cursors here known to be unhandled, to prevent being - // too noisy about this. - CXCursor_MacroDefinition | - CXCursor_MacroExpansion | - CXCursor_UsingDeclaration | - CXCursor_UsingDirective | - CXCursor_StaticAssert | - CXCursor_FunctionTemplate => { - debug!("Unhandled cursor kind {:?}: {cursor:?}", cursor.kind()); - Err(ParseError::Continue) - } - - CXCursor_InclusionDirective => { - let file = cursor.get_included_file_name(); - match file { - None => { - warn!("Inclusion of a nameless file in {cursor:?}"); - } - Some(included_file) => { - for cb in &ctx.options().parse_callbacks { - cb.include_file(&included_file); - } - - ctx.add_dep(included_file.into_boxed_str()); - } - } - Err(ParseError::Continue) - } - - _ => { - // ignore toplevel operator overloads - let spelling = cursor.spelling(); - if !spelling.starts_with("operator") { - warn!( - "Unhandled cursor kind {:?}: {cursor:?}", - cursor.kind(), - ); - } - Err(ParseError::Continue) - } - } - } - - /// Parse this item from the given Clang type, or if we haven't resolved all - /// the other items this one depends on, an unresolved reference. - pub(crate) fn from_ty_or_ref( - ty: clang::Type, - location: clang::Cursor, - parent_id: Option, - ctx: &mut BindgenContext, - ) -> TypeId { - let id = ctx.next_item_id(); - Self::from_ty_or_ref_with_id(id, ty, location, parent_id, ctx) - } - - /// Parse a C++ type. If we find a reference to a type that has not been - /// defined yet, use `UnresolvedTypeRef` as a placeholder. - /// - /// This logic is needed to avoid parsing items with the incorrect parent - /// and it's sort of complex to explain, so I'll just point to - /// `tests/headers/typeref.hpp` to see the kind of constructs that forced - /// this. - /// - /// Typerefs are resolved once parsing is completely done, see - /// `BindgenContext::resolve_typerefs`. - pub(crate) fn from_ty_or_ref_with_id( - potential_id: ItemId, - ty: clang::Type, - location: clang::Cursor, - parent_id: Option, - ctx: &mut BindgenContext, - ) -> TypeId { - debug!("from_ty_or_ref_with_id: {potential_id:?} {ty:?}, {location:?}, {parent_id:?}"); - - if ctx.collected_typerefs() { - debug!("refs already collected, resolving directly"); - return Item::from_ty_with_id( - potential_id, - &ty, - location, - parent_id, - ctx, - ) - .unwrap_or_else(|_| Item::new_opaque_type(potential_id, &ty, ctx)); - } - - if let Some(ty) = ctx.builtin_or_resolved_ty( - potential_id, - parent_id, - &ty, - Some(location), - ) { - debug!("{ty:?} already resolved: {location:?}"); - return ty; - } - - debug!("New unresolved type reference: {ty:?}, {location:?}"); - - let is_const = ty.is_const(); - let kind = TypeKind::UnresolvedTypeRef(ty, location, parent_id); - let current_module = ctx.current_module(); - - ctx.add_item( - Item::new( - potential_id, - None, - None, - parent_id.unwrap_or_else(|| current_module.into()), - ItemKind::Type(Type::new(None, None, kind, is_const)), - Some(location.location()), - ), - None, - None, - ); - potential_id.as_type_id_unchecked() - } - - /// Parse this item from the given Clang type. See [`Item::from_ty_with_id`]. - pub(crate) fn from_ty( - ty: &clang::Type, - location: clang::Cursor, - parent_id: Option, - ctx: &mut BindgenContext, - ) -> Result { - let id = ctx.next_item_id(); - Item::from_ty_with_id(id, ty, location, parent_id, ctx) - } - - /// This is one of the trickiest methods you'll find (probably along with - /// some of the ones that handle templates in `BindgenContext`). - /// - /// This method parses a type, given the potential ID of that type (if - /// parsing it was correct), an optional location we're scanning, which is - /// critical some times to obtain information, an optional parent item ID, - /// that will, if it's `None`, become the current module ID, and the - /// context. - pub(crate) fn from_ty_with_id( - id: ItemId, - ty: &clang::Type, - location: clang::Cursor, - parent_id: Option, - ctx: &mut BindgenContext, - ) -> Result { - use clang_sys::*; - - debug!( - "Item::from_ty_with_id: {id:?}\n\ - \tty = {ty:?},\n\ - \tlocation = {location:?}", - ); - - if ty.kind() == CXType_Unexposed || - location.cur_type().kind() == CXType_Unexposed - { - if ty.is_associated_type() || - location.cur_type().is_associated_type() - { - return Ok(Item::new_opaque_type(id, ty, ctx)); - } - - if let Some(param_id) = Item::type_param(None, location, ctx) { - return Ok(ctx.build_ty_wrapper(id, param_id, None, ty)); - } - } - - // Treat all types that are declared inside functions as opaque. The Rust binding - // won't be able to do anything with them anyway. - // - // (If we don't do this check here, we can have subtle logic bugs because we generally - // ignore function bodies. See issue #2036.) - if let Some(ref parent) = ty.declaration().fallible_semantic_parent() { - if FunctionKind::from_cursor(parent).is_some() { - debug!("Skipping type declared inside function: {ty:?}"); - return Ok(Item::new_opaque_type(id, ty, ctx)); - } - } - - let decl = { - let canonical_def = ty.canonical_type().declaration().definition(); - canonical_def.unwrap_or_else(|| ty.declaration()) - }; - - let comment = location.raw_comment().or_else(|| decl.raw_comment()); - - let annotations = - Annotations::new(&decl).or_else(|| Annotations::new(&location)); - - if let Some(ref annotations) = annotations { - if let Some(replaced) = annotations.use_instead_of() { - ctx.replace(replaced, id); - } - } - - if let Some(ty) = - ctx.builtin_or_resolved_ty(id, parent_id, ty, Some(location)) - { - return Ok(ty); - } - - // First, check we're not recursing. - let mut valid_decl = decl.kind() != CXCursor_NoDeclFound; - let declaration_to_look_for = if valid_decl { - decl.canonical() - } else if location.kind() == CXCursor_ClassTemplate { - valid_decl = true; - location - } else { - decl - }; - - if valid_decl { - if let Some(partial) = ctx - .currently_parsed_types() - .iter() - .find(|ty| *ty.decl() == declaration_to_look_for) - { - debug!("Avoiding recursion parsing type: {ty:?}"); - // Unchecked because we haven't finished this type yet. - return Ok(partial.id().as_type_id_unchecked()); - } - } - - let current_module = ctx.current_module().into(); - let partial_ty = PartialType::new(declaration_to_look_for, id); - if valid_decl { - ctx.begin_parsing(partial_ty); - } - - let result = Type::from_clang_ty(id, ty, location, parent_id, ctx); - let relevant_parent_id = parent_id.unwrap_or(current_module); - let ret = match result { - Ok(ParseResult::AlreadyResolved(ty)) => { - Ok(ty.as_type_id_unchecked()) - } - Ok(ParseResult::New(item, declaration)) => { - ctx.add_item( - Item::new( - id, - comment, - annotations, - relevant_parent_id, - ItemKind::Type(item), - Some(location.location()), - ), - declaration, - Some(location), - ); - Ok(id.as_type_id_unchecked()) - } - Err(ParseError::Continue) => Err(ParseError::Continue), - Err(ParseError::Recurse) => { - debug!("Item::from_ty recursing in the ast"); - let mut result = Err(ParseError::Recurse); - - // Need to pop here, otherwise we'll get stuck. - // - // TODO: Find a nicer interface, really. Also, the - // declaration_to_look_for suspiciously shares a lot of - // logic with ir::context, so we should refactor that. - if valid_decl { - let finished = ctx.finish_parsing(); - assert_eq!(*finished.decl(), declaration_to_look_for); - } - - location.visit(|cur| { - visit_child(cur, id, ty, parent_id, ctx, &mut result) - }); - - if valid_decl { - let partial_ty = - PartialType::new(declaration_to_look_for, id); - ctx.begin_parsing(partial_ty); - } - - // If we have recursed into the AST all we know, and we still - // haven't found what we've got, let's just try and make a named - // type. - // - // This is what happens with some template members, for example. - if let Err(ParseError::Recurse) = result { - warn!( - "Unknown type, assuming named template type: \ - id = {:?}; spelling = {}", - id, - ty.spelling() - ); - Item::type_param(Some(id), location, ctx) - .ok_or(ParseError::Recurse) - } else { - result - } - } - }; - - if valid_decl { - let partial_ty = ctx.finish_parsing(); - assert_eq!(*partial_ty.decl(), declaration_to_look_for); - } - - ret - } - - /// A named type is a template parameter, e.g., the `T` in `Foo`. They're always local so - /// it's the only exception when there's no declaration for a type. - pub(crate) fn type_param( - with_id: Option, - location: clang::Cursor, - ctx: &mut BindgenContext, - ) -> Option { - let ty = location.cur_type(); - - debug!( - "Item::type_param:\n\ - \twith_id = {:?},\n\ - \tty = {} {:?},\n\ - \tlocation: {:?}", - with_id, - ty.spelling(), - ty, - location - ); - - if ty.kind() != clang_sys::CXType_Unexposed { - // If the given cursor's type's kind is not Unexposed, then we - // aren't looking at a template parameter. This check may need to be - // updated in the future if they start properly exposing template - // type parameters. - return None; - } - - let ty_spelling = ty.spelling(); - - // Clang does not expose any information about template type parameters - // via their clang::Type, nor does it give us their canonical cursors - // the straightforward way. However, there are three situations from - // which we can find the definition of the template type parameter, if - // the cursor is indeed looking at some kind of a template type - // parameter or use of one: - // - // 1. The cursor is pointing at the template type parameter's - // definition. This is the trivial case. - // - // (kind = TemplateTypeParameter, ...) - // - // 2. The cursor is pointing at a TypeRef whose referenced() cursor is - // situation (1). - // - // (kind = TypeRef, - // referenced = (kind = TemplateTypeParameter, ...), - // ...) - // - // 3. The cursor is pointing at some use of a template type parameter - // (for example, in a FieldDecl), and this cursor has a child cursor - // whose spelling is the same as the parent's type's spelling, and whose - // kind is a TypeRef of the situation (2) variety. - // - // (kind = FieldDecl, - // type = (kind = Unexposed, - // spelling = "T", - // ...), - // children = - // (kind = TypeRef, - // spelling = "T", - // referenced = (kind = TemplateTypeParameter, - // spelling = "T", - // ...), - // ...) - // ...) - // - // TODO: The alternative to this hacky pattern matching would be to - // maintain proper scopes of template parameters while parsing and use - // de Brujin indices to access template parameters, which clang exposes - // in the cursor's type's canonical type's spelling: - // "type-parameter-x-y". That is probably a better approach long-term, - // but maintaining these scopes properly would require more changes to - // the whole libclang -> IR parsing code. - - fn is_template_with_spelling( - refd: &clang::Cursor, - spelling: &str, - ) -> bool { - static ANON_TYPE_PARAM_RE: OnceLock = OnceLock::new(); - let anon_type_param_re = ANON_TYPE_PARAM_RE.get_or_init(|| { - regex::Regex::new(r"^type\-parameter\-\d+\-\d+$").unwrap() - }); - - if refd.kind() != clang_sys::CXCursor_TemplateTypeParameter { - return false; - } - - let refd_spelling = refd.spelling(); - refd_spelling == spelling || - // Allow for anonymous template parameters. - (refd_spelling.is_empty() && anon_type_param_re.is_match(spelling.as_ref())) - } - - let definition = if is_template_with_spelling(&location, &ty_spelling) { - // Situation (1) - location - } else if location.kind() == clang_sys::CXCursor_TypeRef { - // Situation (2) - match location.referenced() { - Some(refd) - if is_template_with_spelling(&refd, &ty_spelling) => - { - refd - } - _ => return None, - } - } else { - // Situation (3) - let mut definition = None; - - location.visit(|child| { - let child_ty = child.cur_type(); - if child_ty.kind() == clang_sys::CXCursor_TypeRef && - child_ty.spelling() == ty_spelling - { - match child.referenced() { - Some(refd) - if is_template_with_spelling( - &refd, - &ty_spelling, - ) => - { - definition = Some(refd); - return clang_sys::CXChildVisit_Break; - } - _ => {} - } - } - - clang_sys::CXChildVisit_Continue - }); - - definition? - }; - assert!(is_template_with_spelling(&definition, &ty_spelling)); - - // Named types are always parented to the root module. They are never - // referenced with namespace prefixes, and they can't inherit anything - // from their parent either, so it is simplest to just hang them off - // something we know will always exist. - let parent = ctx.root_module().into(); - - if let Some(id) = ctx.get_type_param(&definition) { - return Some(if let Some(with_id) = with_id { - ctx.build_ty_wrapper(with_id, id, Some(parent), &ty) - } else { - id - }); - } - - // See tests/headers/const_tparam.hpp and - // tests/headers/variadic_tname.hpp. - let name = ty_spelling.replace("const ", "").replace('.', ""); - - let id = with_id.unwrap_or_else(|| ctx.next_item_id()); - let item = Item::new( - id, - None, - None, - parent, - ItemKind::Type(Type::named(name)), - Some(location.location()), - ); - ctx.add_type_param(item, definition); - Some(id.as_type_id_unchecked()) - } -} - -impl ItemCanonicalName for Item { - fn canonical_name(&self, ctx: &BindgenContext) -> String { - debug_assert!( - ctx.in_codegen_phase(), - "You're not supposed to call this yet" - ); - self.canonical_name - .get_or_init(|| { - let in_namespace = ctx.options().enable_cxx_namespaces || - ctx.options().disable_name_namespacing; - - if in_namespace { - self.name(ctx).within_namespaces().get() - } else { - self.name(ctx).get() - } - }) - .clone() - } -} - -impl ItemCanonicalPath for Item { - fn namespace_aware_canonical_path( - &self, - ctx: &BindgenContext, - ) -> Vec { - let mut path = self.canonical_path(ctx); - - // ASSUMPTION: (disable_name_namespacing && cxx_namespaces) - // is equivalent to - // disable_name_namespacing - if ctx.options().disable_name_namespacing { - // Only keep the last item in path - let split_idx = path.len() - 1; - path = path.split_off(split_idx); - } else if !ctx.options().enable_cxx_namespaces { - // Ignore first item "root" - path = vec![path[1..].join("_")]; - } - - if self.is_constified_enum_module(ctx) { - path.push(CONSTIFIED_ENUM_MODULE_REPR_NAME.into()); - } - - path - } - - fn canonical_path(&self, ctx: &BindgenContext) -> Vec { - self.compute_path(ctx, UserMangled::Yes) - } -} - -/// Whether to use the user-mangled name (mangled by the `item_name` callback or -/// not. -/// -/// Most of the callers probably want just yes, but the ones dealing with -/// allowlisting and blocklisting don't. -#[derive(Copy, Clone, Debug, PartialEq)] -enum UserMangled { - No, - Yes, -} - -/// Builder struct for naming variations, which hold inside different -/// flags for naming options. -#[derive(Debug)] -pub(crate) struct NameOptions<'a> { - item: &'a Item, - ctx: &'a BindgenContext, - within_namespaces: bool, - user_mangled: UserMangled, -} - -impl<'a> NameOptions<'a> { - /// Construct a new `NameOptions` - pub(crate) fn new(item: &'a Item, ctx: &'a BindgenContext) -> Self { - NameOptions { - item, - ctx, - within_namespaces: false, - user_mangled: UserMangled::Yes, - } - } - - /// Construct the name without the item's containing C++ namespaces mangled - /// into it. In other words, the item's name within the item's namespace. - pub(crate) fn within_namespaces(&mut self) -> &mut Self { - self.within_namespaces = true; - self - } - - fn user_mangled(&mut self, user_mangled: UserMangled) -> &mut Self { - self.user_mangled = user_mangled; - self - } - - /// Construct a name `String` - pub(crate) fn get(&self) -> String { - self.item.real_canonical_name(self.ctx, self) - } -} diff --git a/vendor/bindgen/ir/item_kind.rs b/vendor/bindgen/ir/item_kind.rs deleted file mode 100644 index 9221b50579b523..00000000000000 --- a/vendor/bindgen/ir/item_kind.rs +++ /dev/null @@ -1,135 +0,0 @@ -//! Different variants of an `Item` in our intermediate representation. - -use super::context::BindgenContext; -use super::dot::DotAttributes; -use super::function::Function; -use super::module::Module; -use super::ty::Type; -use super::var::Var; -use std::io; - -/// A item we parse and translate. -#[derive(Debug)] -pub(crate) enum ItemKind { - /// A module, created implicitly once (the root module), or via C++ - /// namespaces. - Module(Module), - - /// A type declared in any of the multiple ways it can be declared. - Type(Type), - - /// A function or method declaration. - Function(Function), - - /// A variable declaration, most likely a static. - Var(Var), -} - -impl ItemKind { - /// Get a reference to this `ItemKind`'s underlying `Module`, or `None` if it - /// is some other kind. - pub(crate) fn as_module(&self) -> Option<&Module> { - match *self { - ItemKind::Module(ref module) => Some(module), - _ => None, - } - } - - /// Transform our `ItemKind` into a string. - pub(crate) fn kind_name(&self) -> &'static str { - match *self { - ItemKind::Module(..) => "Module", - ItemKind::Type(..) => "Type", - ItemKind::Function(..) => "Function", - ItemKind::Var(..) => "Var", - } - } - - /// Is this a module? - pub(crate) fn is_module(&self) -> bool { - self.as_module().is_some() - } - - /// Get a reference to this `ItemKind`'s underlying `Function`, or `None` if - /// it is some other kind. - pub(crate) fn as_function(&self) -> Option<&Function> { - match *self { - ItemKind::Function(ref func) => Some(func), - _ => None, - } - } - - /// Is this a function? - pub(crate) fn is_function(&self) -> bool { - self.as_function().is_some() - } - - /// Get a reference to this `ItemKind`'s underlying `Function`, or panic if - /// it is some other kind. - pub(crate) fn expect_function(&self) -> &Function { - self.as_function().expect("Not a function") - } - - /// Get a reference to this `ItemKind`'s underlying `Type`, or `None` if - /// it is some other kind. - pub(crate) fn as_type(&self) -> Option<&Type> { - match *self { - ItemKind::Type(ref ty) => Some(ty), - _ => None, - } - } - - /// Get a mutable reference to this `ItemKind`'s underlying `Type`, or `None` - /// if it is some other kind. - pub(crate) fn as_type_mut(&mut self) -> Option<&mut Type> { - match *self { - ItemKind::Type(ref mut ty) => Some(ty), - _ => None, - } - } - - /// Is this a type? - pub(crate) fn is_type(&self) -> bool { - self.as_type().is_some() - } - - /// Get a reference to this `ItemKind`'s underlying `Type`, or panic if it is - /// some other kind. - pub(crate) fn expect_type(&self) -> &Type { - self.as_type().expect("Not a type") - } - - /// Get a reference to this `ItemKind`'s underlying `Var`, or `None` if it is - /// some other kind. - pub(crate) fn as_var(&self) -> Option<&Var> { - match *self { - ItemKind::Var(ref v) => Some(v), - _ => None, - } - } - - /// Is this a variable? - pub(crate) fn is_var(&self) -> bool { - self.as_var().is_some() - } -} - -impl DotAttributes for ItemKind { - fn dot_attributes( - &self, - ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - writeln!(out, "kind{}", self.kind_name())?; - - match *self { - ItemKind::Module(ref module) => module.dot_attributes(ctx, out), - ItemKind::Type(ref ty) => ty.dot_attributes(ctx, out), - ItemKind::Function(ref func) => func.dot_attributes(ctx, out), - ItemKind::Var(ref var) => var.dot_attributes(ctx, out), - } - } -} diff --git a/vendor/bindgen/ir/layout.rs b/vendor/bindgen/ir/layout.rs deleted file mode 100644 index 905e47c732a225..00000000000000 --- a/vendor/bindgen/ir/layout.rs +++ /dev/null @@ -1,126 +0,0 @@ -//! Intermediate representation for the physical layout of some type. - -use super::derive::CanDerive; -use super::ty::{Type, TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT}; -use crate::clang; -use crate::ir::context::BindgenContext; -use std::cmp; - -/// A type that represents the struct layout of a type. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub(crate) struct Layout { - /// The size (in bytes) of this layout. - pub(crate) size: usize, - /// The alignment (in bytes) of this layout. - pub(crate) align: usize, - /// Whether this layout's members are packed or not. - pub(crate) packed: bool, -} - -#[test] -fn test_layout_for_size() { - use std::mem::size_of; - let ptr_size = size_of::<*mut ()>(); - assert_eq!( - Layout::for_size_internal(ptr_size, ptr_size), - Layout::new(ptr_size, ptr_size) - ); - assert_eq!( - Layout::for_size_internal(ptr_size, 3 * ptr_size), - Layout::new(3 * ptr_size, ptr_size) - ); -} - -impl Layout { - /// Gets the integer type name for a given known size. - pub(crate) fn known_type_for_size(size: usize) -> Option { - Some(match size { - 16 => syn::parse_quote! { u128 }, - 8 => syn::parse_quote! { u64 }, - 4 => syn::parse_quote! { u32 }, - 2 => syn::parse_quote! { u16 }, - 1 => syn::parse_quote! { u8 }, - _ => return None, - }) - } - - /// Construct a new `Layout` with the given `size` and `align`. It is not - /// packed. - pub(crate) fn new(size: usize, align: usize) -> Self { - Layout { - size, - align, - packed: false, - } - } - - fn for_size_internal(ptr_size: usize, size: usize) -> Self { - let mut next_align = 2; - while size % next_align == 0 && next_align <= ptr_size { - next_align *= 2; - } - Layout { - size, - align: next_align / 2, - packed: false, - } - } - - /// Creates a non-packed layout for a given size, trying to use the maximum - /// alignment possible. - pub(crate) fn for_size(ctx: &BindgenContext, size: usize) -> Self { - Self::for_size_internal(ctx.target_pointer_size(), size) - } - - /// Get this layout as an opaque type. - pub(crate) fn opaque(&self) -> Opaque { - Opaque(*self) - } -} - -/// When we are treating a type as opaque, it is just a blob with a `Layout`. -#[derive(Clone, Debug, PartialEq, Eq)] -pub(crate) struct Opaque(pub(crate) Layout); - -impl Opaque { - /// Construct a new opaque type from the given clang type. - pub(crate) fn from_clang_ty( - ty: &clang::Type, - ctx: &BindgenContext, - ) -> Type { - let layout = Layout::new(ty.size(ctx), ty.align(ctx)); - let ty_kind = TypeKind::Opaque; - let is_const = ty.is_const(); - Type::new(None, Some(layout), ty_kind, is_const) - } - - /// Return the known rust type we should use to create a correctly-aligned - /// field with this layout. - pub(crate) fn known_rust_type_for_array(&self) -> Option { - Layout::known_type_for_size(self.0.align) - } - - /// Return the array size that an opaque type for this layout should have if - /// we know the correct type for it, or `None` otherwise. - pub(crate) fn array_size(&self) -> Option { - if self.known_rust_type_for_array().is_some() { - Some(self.0.size / cmp::max(self.0.align, 1)) - } else { - None - } - } - - /// Return `true` if this opaque layout's array size will fit within the - /// maximum number of array elements that Rust allows deriving traits - /// with. Return `false` otherwise. - pub(crate) fn array_size_within_derive_limit(&self) -> CanDerive { - if self - .array_size() - .is_some_and(|size| size <= RUST_DERIVE_IN_ARRAY_LIMIT) - { - CanDerive::Yes - } else { - CanDerive::Manually - } - } -} diff --git a/vendor/bindgen/ir/mod.rs b/vendor/bindgen/ir/mod.rs deleted file mode 100644 index acdb4896cda7cc..00000000000000 --- a/vendor/bindgen/ir/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -//! The ir module defines bindgen's intermediate representation. -//! -//! Parsing C/C++ generates the IR, while code generation outputs Rust code from -//! the IR. -#![deny(clippy::missing_docs_in_private_items)] - -pub(crate) mod analysis; -pub(crate) mod annotations; -pub(crate) mod comment; -pub(crate) mod comp; -pub(crate) mod context; -pub(crate) mod derive; -pub(crate) mod dot; -pub(crate) mod enum_ty; -pub(crate) mod function; -pub(crate) mod int; -pub(crate) mod item; -pub(crate) mod item_kind; -pub(crate) mod layout; -pub(crate) mod module; -pub(crate) mod objc; -pub(crate) mod template; -pub(crate) mod traversal; -pub(crate) mod ty; -pub(crate) mod var; diff --git a/vendor/bindgen/ir/module.rs b/vendor/bindgen/ir/module.rs deleted file mode 100644 index 4788cf4285fc17..00000000000000 --- a/vendor/bindgen/ir/module.rs +++ /dev/null @@ -1,96 +0,0 @@ -//! Intermediate representation for modules (AKA C++ namespaces). - -use super::context::BindgenContext; -use super::dot::DotAttributes; -use super::item::ItemSet; -use crate::clang; -use crate::parse::{ClangSubItemParser, ParseError, ParseResult}; -use crate::parse_one; - -use std::io; - -/// Whether this module is inline or not. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub(crate) enum ModuleKind { - /// This module is not inline. - Normal, - /// This module is inline, as in `inline namespace foo {}`. - Inline, -} - -/// A module, as in, a C++ namespace. -#[derive(Clone, Debug)] -pub(crate) struct Module { - /// The name of the module, or none if it's anonymous. - name: Option, - /// The kind of module this is. - kind: ModuleKind, - /// The children of this module, just here for convenience. - children: ItemSet, -} - -impl Module { - /// Construct a new `Module`. - pub(crate) fn new(name: Option, kind: ModuleKind) -> Self { - Module { - name, - kind, - children: ItemSet::new(), - } - } - - /// Get this module's name. - pub(crate) fn name(&self) -> Option<&str> { - self.name.as_deref() - } - - /// Get a mutable reference to this module's children. - pub(crate) fn children_mut(&mut self) -> &mut ItemSet { - &mut self.children - } - - /// Get this module's children. - pub(crate) fn children(&self) -> &ItemSet { - &self.children - } - - /// Whether this namespace is inline. - pub(crate) fn is_inline(&self) -> bool { - self.kind == ModuleKind::Inline - } -} - -impl DotAttributes for Module { - fn dot_attributes( - &self, - _ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - writeln!(out, "ModuleKind{:?}", self.kind) - } -} - -impl ClangSubItemParser for Module { - fn parse( - cursor: clang::Cursor, - ctx: &mut BindgenContext, - ) -> Result, ParseError> { - use clang_sys::*; - match cursor.kind() { - CXCursor_Namespace => { - let module_id = ctx.module(cursor); - ctx.with_module(module_id, |ctx| { - cursor.visit_sorted(ctx, |ctx, child| { - parse_one(ctx, child, Some(module_id.into())); - }); - }); - - Ok(ParseResult::AlreadyResolved(module_id.into())) - } - _ => Err(ParseError::Continue), - } - } -} diff --git a/vendor/bindgen/ir/objc.rs b/vendor/bindgen/ir/objc.rs deleted file mode 100644 index 6cdadb131d45a2..00000000000000 --- a/vendor/bindgen/ir/objc.rs +++ /dev/null @@ -1,343 +0,0 @@ -//! Objective C types - -use super::context::{BindgenContext, ItemId}; -use super::function::FunctionSig; -use super::item::Item; -use super::traversal::{Trace, Tracer}; -use super::ty::TypeKind; -use crate::clang; -use clang_sys::CXChildVisit_Continue; -use clang_sys::CXCursor_ObjCCategoryDecl; -use clang_sys::CXCursor_ObjCClassMethodDecl; -use clang_sys::CXCursor_ObjCClassRef; -use clang_sys::CXCursor_ObjCInstanceMethodDecl; -use clang_sys::CXCursor_ObjCProtocolDecl; -use clang_sys::CXCursor_ObjCProtocolRef; -use clang_sys::CXCursor_ObjCSuperClassRef; -use clang_sys::CXCursor_TemplateTypeParameter; -use proc_macro2::{Ident, Span, TokenStream}; - -/// Objective-C interface as used in `TypeKind` -/// -/// Also, protocols and categories are parsed as this type -#[derive(Debug)] -pub(crate) struct ObjCInterface { - /// The name - /// like, `NSObject` - name: String, - - category: Option, - - is_protocol: bool, - - /// The list of template names almost always, `ObjectType` or `KeyType` - pub(crate) template_names: Vec, - - /// The list of protocols that this interface conforms to. - pub(crate) conforms_to: Vec, - - /// The direct parent for this interface. - pub(crate) parent_class: Option, - - /// List of the methods defined in this interface - methods: Vec, - - class_methods: Vec, -} - -/// The objective c methods -#[derive(Debug)] -pub(crate) struct ObjCMethod { - /// The original method selector name - /// like, dataWithBytes:length: - name: String, - - /// Method name as converted to rust - /// like, `dataWithBytes_length`_ - rust_name: String, - - signature: FunctionSig, - - /// Is class method? - is_class_method: bool, -} - -impl ObjCInterface { - fn new(name: &str) -> ObjCInterface { - ObjCInterface { - name: name.to_owned(), - category: None, - is_protocol: false, - template_names: Vec::new(), - parent_class: None, - conforms_to: Vec::new(), - methods: Vec::new(), - class_methods: Vec::new(), - } - } - - /// The name - /// like, `NSObject` - pub(crate) fn name(&self) -> &str { - self.name.as_ref() - } - - /// Formats the name for rust - /// Can be like `NSObject`, but with categories might be like `NSObject_NSCoderMethods` - /// and protocols are like `PNSObject` - pub(crate) fn rust_name(&self) -> String { - if let Some(ref cat) = self.category { - format!("{}_{cat}", self.name()) - } else if self.is_protocol { - format!("P{}", self.name()) - } else { - format!("I{}", self.name().to_owned()) - } - } - - /// Is this a template interface? - pub(crate) fn is_template(&self) -> bool { - !self.template_names.is_empty() - } - - /// List of the methods defined in this interface - pub(crate) fn methods(&self) -> &Vec { - &self.methods - } - - /// Is this a protocol? - pub(crate) fn is_protocol(&self) -> bool { - self.is_protocol - } - - /// Is this a category? - pub(crate) fn is_category(&self) -> bool { - self.category.is_some() - } - - /// List of the class methods defined in this interface - pub(crate) fn class_methods(&self) -> &Vec { - &self.class_methods - } - - /// Parses the Objective C interface from the cursor - pub(crate) fn from_ty( - cursor: &clang::Cursor, - ctx: &mut BindgenContext, - ) -> Option { - let name = cursor.spelling(); - let mut interface = Self::new(&name); - - if cursor.kind() == CXCursor_ObjCProtocolDecl { - interface.is_protocol = true; - } - - cursor.visit(|c| { - match c.kind() { - CXCursor_ObjCClassRef => { - if cursor.kind() == CXCursor_ObjCCategoryDecl { - // We are actually a category extension, and we found the reference - // to the original interface, so name this interface appropriately - interface.name = c.spelling(); - interface.category = Some(cursor.spelling()); - } - } - CXCursor_ObjCProtocolRef => { - // Gather protocols this interface conforms to - let needle = format!("P{}", c.spelling()); - let items_map = ctx.items(); - debug!( - "Interface {} conforms to {needle}, find the item", - interface.name, - ); - - for (id, item) in items_map { - if let Some(ty) = item.as_type() { - if let TypeKind::ObjCInterface(ref protocol) = - *ty.kind() - { - if protocol.is_protocol { - debug!( - "Checking protocol {}, ty.name {:?}", - protocol.name, - ty.name() - ); - if Some(needle.as_ref()) == ty.name() { - debug!("Found conforming protocol {item:?}"); - interface.conforms_to.push(id); - break; - } - } - } - } - } - } - CXCursor_ObjCInstanceMethodDecl | - CXCursor_ObjCClassMethodDecl => { - let name = c.spelling(); - let signature = - FunctionSig::from_ty(&c.cur_type(), &c, ctx) - .expect("Invalid function sig"); - let is_class_method = - c.kind() == CXCursor_ObjCClassMethodDecl; - let method = - ObjCMethod::new(&name, signature, is_class_method); - interface.add_method(method); - } - CXCursor_TemplateTypeParameter => { - let name = c.spelling(); - interface.template_names.push(name); - } - CXCursor_ObjCSuperClassRef => { - let item = Item::from_ty_or_ref(c.cur_type(), c, None, ctx); - interface.parent_class = Some(item.into()); - } - _ => {} - } - CXChildVisit_Continue - }); - Some(interface) - } - - fn add_method(&mut self, method: ObjCMethod) { - if method.is_class_method { - self.class_methods.push(method); - } else { - self.methods.push(method); - } - } -} - -impl ObjCMethod { - fn new( - name: &str, - signature: FunctionSig, - is_class_method: bool, - ) -> ObjCMethod { - let split_name: Vec<&str> = name.split(':').collect(); - - let rust_name = split_name.join("_"); - - ObjCMethod { - name: name.to_owned(), - rust_name, - signature, - is_class_method, - } - } - - /// Method name as converted to rust - /// like, `dataWithBytes_length`_ - pub(crate) fn rust_name(&self) -> &str { - self.rust_name.as_ref() - } - - /// Returns the methods signature as `FunctionSig` - pub(crate) fn signature(&self) -> &FunctionSig { - &self.signature - } - - /// Is this a class method? - pub(crate) fn is_class_method(&self) -> bool { - self.is_class_method - } - - /// Formats the method call - pub(crate) fn format_method_call( - &self, - args: &[TokenStream], - ) -> TokenStream { - let split_name: Vec> = self - .name - .split(':') - .enumerate() - .map(|(idx, name)| { - if name.is_empty() { - None - } else if idx == 0 { - // Try to parse the method name as an identifier. Having a keyword is ok - // unless it is `crate`, `self`, `super` or `Self`, so we try to add the `_` - // suffix to it and parse it. - if ["crate", "self", "super", "Self"].contains(&name) { - Some(Ident::new(&format!("{name}_"), Span::call_site())) - } else { - Some(Ident::new(name, Span::call_site())) - } - } else { - // Try to parse the current joining name as an identifier. This might fail if the name - // is a keyword, so we try to "r#" to it and parse again, this could also fail - // if the name is `crate`, `self`, `super` or `Self`, so we try to add the `_` - // suffix to it and parse again. If this also fails, we panic with the first - // error. - Some( - syn::parse_str::(name) - .or_else(|err| { - syn::parse_str::(&format!("r#{name}")) - .map_err(|_| err) - }) - .or_else(|err| { - syn::parse_str::(&format!("{name}_")) - .map_err(|_| err) - }) - .expect("Invalid identifier"), - ) - } - }) - .collect(); - - // No arguments - if args.is_empty() && split_name.len() == 1 { - let name = &split_name[0]; - return quote! { - #name - }; - } - - // Check right amount of arguments - assert_eq!(args.len(), split_name.len() - 1, "Incorrect method name or arguments for objc method, {args:?} vs {split_name:?}"); - - // Get arguments without type signatures to pass to `msg_send!` - let mut args_without_types = vec![]; - for arg in args { - let arg = arg.to_string(); - let name_and_sig: Vec<&str> = arg.split(' ').collect(); - let name = name_and_sig[0]; - args_without_types.push(Ident::new(name, Span::call_site())); - } - - let args = split_name.into_iter().zip(args_without_types).map( - |(arg, arg_val)| { - if let Some(arg) = arg { - quote! { #arg: #arg_val } - } else { - quote! { #arg_val: #arg_val } - } - }, - ); - - quote! { - #( #args )* - } - } -} - -impl Trace for ObjCInterface { - type Extra = (); - - fn trace(&self, context: &BindgenContext, tracer: &mut T, _: &()) - where - T: Tracer, - { - for method in &self.methods { - method.signature.trace(context, tracer, &()); - } - - for class_method in &self.class_methods { - class_method.signature.trace(context, tracer, &()); - } - - for protocol in &self.conforms_to { - tracer.visit(*protocol); - } - } -} diff --git a/vendor/bindgen/ir/template.rs b/vendor/bindgen/ir/template.rs deleted file mode 100644 index 7f3667879d98eb..00000000000000 --- a/vendor/bindgen/ir/template.rs +++ /dev/null @@ -1,335 +0,0 @@ -//! Template declaration and instantiation related things. -//! -//! The nomenclature surrounding templates is often confusing, so here are a few -//! brief definitions: -//! -//! * "Template definition": a class/struct/alias/function definition that takes -//! generic template parameters. For example: -//! -//! ```c++ -//! template -//! class List { -//! // ... -//! }; -//! ``` -//! -//! * "Template instantiation": an instantiation is a use of a template with -//! concrete template arguments. For example, `List`. -//! -//! * "Template specialization": an alternative template definition providing a -//! custom definition for instantiations with the matching template -//! arguments. This C++ feature is unsupported by bindgen. For example: -//! -//! ```c++ -//! template<> -//! class List { -//! // Special layout for int lists... -//! }; -//! ``` - -use super::context::{BindgenContext, ItemId, TypeId}; -use super::item::{IsOpaque, Item, ItemAncestors}; -use super::traversal::{EdgeKind, Trace, Tracer}; -use crate::clang; - -/// Template declaration (and such declaration's template parameters) related -/// methods. -/// -/// This trait's methods distinguish between `None` and `Some([])` for -/// declarations that are not templates and template declarations with zero -/// parameters, in general. -/// -/// Consider this example: -/// -/// ```c++ -/// template -/// class Foo { -/// T use_of_t; -/// U use_of_u; -/// -/// template -/// using Bar = V*; -/// -/// class Inner { -/// T x; -/// U y; -/// Bar z; -/// }; -/// -/// template -/// class Lol { -/// // No use of W, but here's a use of T. -/// T t; -/// }; -/// -/// template -/// class Wtf { -/// // X is not used because W is not used. -/// Lol lololol; -/// }; -/// }; -/// -/// class Qux { -/// int y; -/// }; -/// ``` -/// -/// The following table depicts the results of each trait method when invoked on -/// each of the declarations above: -/// -/// |Decl. | self_template_params | num_self_template_params | all_template_parameters | -/// |------|----------------------|--------------------------|-------------------------| -/// |Foo | T, U | 2 | T, U | -/// |Bar | V | 1 | T, U, V | -/// |Inner | | 0 | T, U | -/// |Lol | W | 1 | T, U, W | -/// |Wtf | X | 1 | T, U, X | -/// |Qux | | 0 | | -/// -/// | Decl. | used_template_params | -/// |-------|----------------------| -/// | Foo | T, U | -/// | Bar | V | -/// | Inner | | -/// | Lol | T | -/// | Wtf | T | -/// | Qux | | -pub(crate) trait TemplateParameters: Sized { - /// Get the set of `ItemId`s that make up this template declaration's free - /// template parameters. - /// - /// Note that these might *not* all be named types: C++ allows - /// constant-value template parameters as well as template-template - /// parameters. Of course, Rust does not allow generic parameters to be - /// anything but types, so we must treat them as opaque, and avoid - /// instantiating them. - fn self_template_params(&self, ctx: &BindgenContext) -> Vec; - - /// Get the number of free template parameters this template declaration - /// has. - fn num_self_template_params(&self, ctx: &BindgenContext) -> usize { - self.self_template_params(ctx).len() - } - - /// Get the complete set of template parameters that can affect this - /// declaration. - /// - /// Note that this item doesn't need to be a template declaration itself for - /// `Some` to be returned here (in contrast to `self_template_params`). If - /// this item is a member of a template declaration, then the parent's - /// template parameters are included here. - /// - /// In the example above, `Inner` depends on both of the `T` and `U` type - /// parameters, even though it is not itself a template declaration and - /// therefore has no type parameters itself. Perhaps it helps to think about - /// how we would fully reference such a member type in C++: - /// `Foo::Inner`. `Foo` *must* be instantiated with template - /// arguments before we can gain access to the `Inner` member type. - fn all_template_params(&self, ctx: &BindgenContext) -> Vec - where - Self: ItemAncestors, - { - let mut ancestors: Vec<_> = self.ancestors(ctx).collect(); - ancestors.reverse(); - ancestors - .into_iter() - .flat_map(|id| id.self_template_params(ctx).into_iter()) - .collect() - } - - /// Get only the set of template parameters that this item uses. This is a - /// subset of `all_template_params` and does not necessarily contain any of - /// `self_template_params`. - fn used_template_params(&self, ctx: &BindgenContext) -> Vec - where - Self: AsRef, - { - assert!( - ctx.in_codegen_phase(), - "template parameter usage is not computed until codegen" - ); - - let id = *self.as_ref(); - ctx.resolve_item(id) - .all_template_params(ctx) - .into_iter() - .filter(|p| ctx.uses_template_parameter(id, *p)) - .collect() - } -} - -/// A trait for things which may or may not be a named template type parameter. -pub(crate) trait AsTemplateParam { - /// Any extra information the implementor might need to make this decision. - type Extra; - - /// Convert this thing to the item ID of a named template type parameter. - fn as_template_param( - &self, - ctx: &BindgenContext, - extra: &Self::Extra, - ) -> Option; - - /// Is this a named template type parameter? - fn is_template_param( - &self, - ctx: &BindgenContext, - extra: &Self::Extra, - ) -> bool { - self.as_template_param(ctx, extra).is_some() - } -} - -/// A concrete instantiation of a generic template. -#[derive(Clone, Debug)] -pub(crate) struct TemplateInstantiation { - /// The template definition which this is instantiating. - definition: TypeId, - /// The concrete template arguments, which will be substituted in the - /// definition for the generic template parameters. - args: Vec, -} - -impl TemplateInstantiation { - /// Construct a new template instantiation from the given parts. - pub(crate) fn new(definition: TypeId, args: I) -> TemplateInstantiation - where - I: IntoIterator, - { - TemplateInstantiation { - definition, - args: args.into_iter().collect(), - } - } - - /// Get the template definition for this instantiation. - pub(crate) fn template_definition(&self) -> TypeId { - self.definition - } - - /// Get the concrete template arguments used in this instantiation. - pub(crate) fn template_arguments(&self) -> &[TypeId] { - &self.args[..] - } - - /// Parse a `TemplateInstantiation` from a clang `Type`. - pub(crate) fn from_ty( - ty: &clang::Type, - ctx: &mut BindgenContext, - ) -> Option { - use clang_sys::*; - - let template_args = ty.template_args().map_or(vec![], |args| match ty - .canonical_type() - .template_args() - { - Some(canonical_args) => { - let arg_count = args.len(); - args.chain(canonical_args.skip(arg_count)) - .filter(|t| t.kind() != CXType_Invalid) - .map(|t| { - Item::from_ty_or_ref(t, t.declaration(), None, ctx) - }) - .collect() - } - None => args - .filter(|t| t.kind() != CXType_Invalid) - .map(|t| Item::from_ty_or_ref(t, t.declaration(), None, ctx)) - .collect(), - }); - - let declaration = ty.declaration(); - let definition = if declaration.kind() == CXCursor_TypeAliasTemplateDecl - { - Some(declaration) - } else { - declaration.specialized().or_else(|| { - let mut template_ref = None; - ty.declaration().visit(|child| { - if child.kind() == CXCursor_TemplateRef { - template_ref = Some(child); - return CXVisit_Break; - } - - // Instantiations of template aliases might have the - // TemplateRef to the template alias definition arbitrarily - // deep, so we need to recurse here and not only visit - // direct children. - CXChildVisit_Recurse - }); - - template_ref.and_then(|cur| cur.referenced()) - }) - }; - - let Some(definition) = definition else { - if !ty.declaration().is_builtin() { - warn!( - "Could not find template definition for template \ - instantiation" - ); - } - return None; - }; - - let template_definition = - Item::from_ty_or_ref(definition.cur_type(), definition, None, ctx); - - Some(TemplateInstantiation::new( - template_definition, - template_args, - )) - } -} - -impl IsOpaque for TemplateInstantiation { - type Extra = Item; - - /// Is this an opaque template instantiation? - fn is_opaque(&self, ctx: &BindgenContext, item: &Item) -> bool { - if self.template_definition().is_opaque(ctx, &()) { - return true; - } - - // TODO(#774): This doesn't properly handle opaque instantiations where - // an argument is itself an instantiation because `canonical_name` does - // not insert the template arguments into the name, ie it for nested - // template arguments it creates "Foo" instead of "Foo". The fully - // correct fix is to make `canonical_{name,path}` include template - // arguments properly. - - let mut path = item.path_for_allowlisting(ctx).clone(); - let args: Vec<_> = self - .template_arguments() - .iter() - .map(|arg| { - let arg_path = - ctx.resolve_item(*arg).path_for_allowlisting(ctx); - arg_path[1..].join("::") - }) - .collect(); - { - let last = path.last_mut().unwrap(); - last.push('<'); - last.push_str(&args.join(", ")); - last.push('>'); - } - - ctx.opaque_by_name(&path) - } -} - -impl Trace for TemplateInstantiation { - type Extra = (); - - fn trace(&self, _ctx: &BindgenContext, tracer: &mut T, _: &()) - where - T: Tracer, - { - tracer - .visit_kind(self.definition.into(), EdgeKind::TemplateDeclaration); - for arg in self.template_arguments() { - tracer.visit_kind(arg.into(), EdgeKind::TemplateArgument); - } - } -} diff --git a/vendor/bindgen/ir/traversal.rs b/vendor/bindgen/ir/traversal.rs deleted file mode 100644 index 01f3a8bd507f4a..00000000000000 --- a/vendor/bindgen/ir/traversal.rs +++ /dev/null @@ -1,478 +0,0 @@ -//! Traversal of the graph of IR items and types. - -use super::context::{BindgenContext, ItemId}; -use super::item::ItemSet; -use std::collections::{BTreeMap, VecDeque}; - -/// An outgoing edge in the IR graph is a reference from some item to another -/// item: -/// -/// from --> to -/// -/// The `from` is left implicit: it is the concrete `Trace` implementer which -/// yielded this outgoing edge. -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub(crate) struct Edge { - to: ItemId, - kind: EdgeKind, -} - -impl Edge { - /// Construct a new edge whose referent is `to` and is of the given `kind`. - pub(crate) fn new(to: ItemId, kind: EdgeKind) -> Edge { - Edge { to, kind } - } -} - -impl From for ItemId { - fn from(val: Edge) -> Self { - val.to - } -} - -/// The kind of edge reference. This is useful when we wish to only consider -/// certain kinds of edges for a particular traversal or analysis. -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub(crate) enum EdgeKind { - /// A generic, catch-all edge. - Generic, - - /// An edge from a template declaration, to the definition of a named type - /// parameter. For example, the edge from `Foo` to `T` in the following - /// snippet: - /// - /// ```C++ - /// template - /// class Foo { }; - /// ``` - TemplateParameterDefinition, - - /// An edge from a template instantiation to the template declaration that - /// is being instantiated. For example, the edge from `Foo` to - /// to `Foo`: - /// - /// ```C++ - /// template - /// class Foo { }; - /// - /// using Bar = Foo; - /// ``` - TemplateDeclaration, - - /// An edge from a template instantiation to its template argument. For - /// example, `Foo` to `Bar`: - /// - /// ```C++ - /// template - /// class Foo { }; - /// - /// class Bar { }; - /// - /// using FooBar = Foo; - /// ``` - TemplateArgument, - - /// An edge from a compound type to one of its base member types. For - /// example, the edge from `Bar` to `Foo`: - /// - /// ```C++ - /// class Foo { }; - /// - /// class Bar : public Foo { }; - /// ``` - BaseMember, - - /// An edge from a compound type to the types of one of its fields. For - /// example, the edge from `Foo` to `int`: - /// - /// ```C++ - /// class Foo { - /// int x; - /// }; - /// ``` - Field, - - /// An edge from an class or struct type to an inner type member. For - /// example, the edge from `Foo` to `Foo::Bar` here: - /// - /// ```C++ - /// class Foo { - /// struct Bar { }; - /// }; - /// ``` - InnerType, - - /// An edge from an class or struct type to an inner static variable. For - /// example, the edge from `Foo` to `Foo::BAR` here: - /// - /// ```C++ - /// class Foo { - /// static const char* BAR; - /// }; - /// ``` - InnerVar, - - /// An edge from a class or struct type to one of its method functions. For - /// example, the edge from `Foo` to `Foo::bar`: - /// - /// ```C++ - /// class Foo { - /// bool bar(int x, int y); - /// }; - /// ``` - Method, - - /// An edge from a class or struct type to one of its constructor - /// functions. For example, the edge from `Foo` to `Foo::Foo(int x, int y)`: - /// - /// ```C++ - /// class Foo { - /// int my_x; - /// int my_y; - /// - /// public: - /// Foo(int x, int y); - /// }; - /// ``` - Constructor, - - /// An edge from a class or struct type to its destructor function. For - /// example, the edge from `Doggo` to `Doggo::~Doggo()`: - /// - /// ```C++ - /// struct Doggo { - /// char* wow; - /// - /// public: - /// ~Doggo(); - /// }; - /// ``` - Destructor, - - /// An edge from a function declaration to its return type. For example, the - /// edge from `foo` to `int`: - /// - /// ```C++ - /// int foo(char* string); - /// ``` - FunctionReturn, - - /// An edge from a function declaration to one of its parameter types. For - /// example, the edge from `foo` to `char*`: - /// - /// ```C++ - /// int foo(char* string); - /// ``` - FunctionParameter, - - /// An edge from a static variable to its type. For example, the edge from - /// `FOO` to `const char*`: - /// - /// ```C++ - /// static const char* FOO; - /// ``` - VarType, - - /// An edge from a non-templated alias or typedef to the referenced type. - TypeReference, -} - -/// A predicate to allow visiting only sub-sets of the whole IR graph by -/// excluding certain edges from being followed by the traversal. -/// -/// The predicate must return true if the traversal should follow this edge -/// and visit everything that is reachable through it. -pub(crate) type TraversalPredicate = - for<'a> fn(&'a BindgenContext, Edge) -> bool; - -/// A `TraversalPredicate` implementation that follows all edges, and therefore -/// traversals using this predicate will see the whole IR graph reachable from -/// the traversal's roots. -pub(crate) fn all_edges(_: &BindgenContext, _: Edge) -> bool { - true -} - -/// A `TraversalPredicate` implementation that only follows -/// `EdgeKind::InnerType` edges, and therefore traversals using this predicate -/// will only visit the traversal's roots and their inner types. This is used -/// in no-recursive-allowlist mode, where inner types such as anonymous -/// structs/unions still need to be processed. -pub(crate) fn only_inner_type_edges(_: &BindgenContext, edge: Edge) -> bool { - edge.kind == EdgeKind::InnerType -} - -/// A `TraversalPredicate` implementation that only follows edges to items that -/// are enabled for code generation. This lets us skip considering items for -/// which are not reachable from code generation. -pub(crate) fn codegen_edges(ctx: &BindgenContext, edge: Edge) -> bool { - let cc = &ctx.options().codegen_config; - match edge.kind { - EdgeKind::Generic => { - ctx.resolve_item(edge.to).is_enabled_for_codegen(ctx) - } - - // We statically know the kind of item that non-generic edges can point - // to, so we don't need to actually resolve the item and check - // `Item::is_enabled_for_codegen`. - EdgeKind::TemplateParameterDefinition | - EdgeKind::TemplateArgument | - EdgeKind::TemplateDeclaration | - EdgeKind::BaseMember | - EdgeKind::Field | - EdgeKind::InnerType | - EdgeKind::FunctionReturn | - EdgeKind::FunctionParameter | - EdgeKind::VarType | - EdgeKind::TypeReference => cc.types(), - EdgeKind::InnerVar => cc.vars(), - EdgeKind::Method => cc.methods(), - EdgeKind::Constructor => cc.constructors(), - EdgeKind::Destructor => cc.destructors(), - } -} - -/// The storage for the set of items that have been seen (although their -/// outgoing edges might not have been fully traversed yet) in an active -/// traversal. -pub(crate) trait TraversalStorage<'ctx> { - /// Construct a new instance of this `TraversalStorage`, for a new traversal. - fn new(ctx: &'ctx BindgenContext) -> Self; - - /// Add the given item to the storage. If the item has never been seen - /// before, return `true`. Otherwise, return `false`. - /// - /// The `from` item is the item from which we discovered this item, or is - /// `None` if this item is a root. - fn add(&mut self, from: Option, item: ItemId) -> bool; -} - -impl<'ctx> TraversalStorage<'ctx> for ItemSet { - fn new(_: &'ctx BindgenContext) -> Self { - ItemSet::new() - } - - fn add(&mut self, _: Option, item: ItemId) -> bool { - self.insert(item) - } -} - -/// A `TraversalStorage` implementation that keeps track of how we first reached -/// each item. This is useful for providing debug assertions with meaningful -/// diagnostic messages about dangling items. -#[derive(Debug)] -pub(crate) struct Paths<'ctx>(BTreeMap, &'ctx BindgenContext); - -impl<'ctx> TraversalStorage<'ctx> for Paths<'ctx> { - fn new(ctx: &'ctx BindgenContext) -> Self { - Paths(BTreeMap::new(), ctx) - } - - fn add(&mut self, from: Option, item: ItemId) -> bool { - let newly_discovered = - self.0.insert(item, from.unwrap_or(item)).is_none(); - - if self.1.resolve_item_fallible(item).is_none() { - let mut path = vec![]; - let mut current = item; - loop { - let predecessor = *self.0.get(¤t).expect( - "We know we found this item id, so it must have a \ - predecessor", - ); - if predecessor == current { - break; - } - path.push(predecessor); - current = predecessor; - } - path.reverse(); - panic!( - "Found reference to dangling id = {item:?}\nvia path = {path:?}" - ); - } - - newly_discovered - } -} - -/// The queue of seen-but-not-yet-traversed items. -/// -/// Using a FIFO queue with a traversal will yield a breadth-first traversal, -/// while using a LIFO queue will result in a depth-first traversal of the IR -/// graph. -pub(crate) trait TraversalQueue: Default { - /// Add a newly discovered item to the queue. - fn push(&mut self, item: ItemId); - - /// Pop the next item to traverse, if any. - fn next(&mut self) -> Option; -} - -impl TraversalQueue for Vec { - fn push(&mut self, item: ItemId) { - self.push(item); - } - - fn next(&mut self) -> Option { - self.pop() - } -} - -impl TraversalQueue for VecDeque { - fn push(&mut self, item: ItemId) { - self.push_back(item); - } - - fn next(&mut self) -> Option { - self.pop_front() - } -} - -/// Something that can receive edges from a `Trace` implementation. -pub(crate) trait Tracer { - /// Note an edge between items. Called from within a `Trace` implementation. - fn visit_kind(&mut self, item: ItemId, kind: EdgeKind); - - /// A synonym for `tracer.visit_kind(item, EdgeKind::Generic)`. - fn visit(&mut self, item: ItemId) { - self.visit_kind(item, EdgeKind::Generic); - } -} - -impl Tracer for F -where - F: FnMut(ItemId, EdgeKind), -{ - fn visit_kind(&mut self, item: ItemId, kind: EdgeKind) { - (*self)(item, kind); - } -} - -/// Trace all of the outgoing edges to other items. Implementations should call -/// one of `tracer.visit(edge)` or `tracer.visit_kind(edge, EdgeKind::Whatever)` -/// for each of their outgoing edges. -pub(crate) trait Trace { - /// If a particular type needs extra information beyond what it has in - /// `self` and `context` to find its referenced items, its implementation - /// can define this associated type, forcing callers to pass the needed - /// information through. - type Extra; - - /// Trace all of this item's outgoing edges to other items. - fn trace( - &self, - context: &BindgenContext, - tracer: &mut T, - extra: &Self::Extra, - ) where - T: Tracer; -} - -/// An graph traversal of the transitive closure of references between items. -/// -/// See `BindgenContext::allowlisted_items` for more information. -pub(crate) struct ItemTraversal<'ctx, Storage, Queue> -where - Storage: TraversalStorage<'ctx>, - Queue: TraversalQueue, -{ - ctx: &'ctx BindgenContext, - - /// The set of items we have seen thus far in this traversal. - seen: Storage, - - /// The set of items that we have seen, but have yet to traverse. - queue: Queue, - - /// The predicate that determines which edges this traversal will follow. - predicate: TraversalPredicate, - - /// The item we are currently traversing. - currently_traversing: Option, -} - -impl<'ctx, Storage, Queue> ItemTraversal<'ctx, Storage, Queue> -where - Storage: TraversalStorage<'ctx>, - Queue: TraversalQueue, -{ - /// Begin a new traversal, starting from the given roots. - pub(crate) fn new( - ctx: &'ctx BindgenContext, - roots: R, - predicate: TraversalPredicate, - ) -> ItemTraversal<'ctx, Storage, Queue> - where - R: IntoIterator, - { - let mut seen = Storage::new(ctx); - let mut queue = Queue::default(); - - for id in roots { - seen.add(None, id); - queue.push(id); - } - - ItemTraversal { - ctx, - seen, - queue, - predicate, - currently_traversing: None, - } - } -} - -impl<'ctx, Storage, Queue> Tracer for ItemTraversal<'ctx, Storage, Queue> -where - Storage: TraversalStorage<'ctx>, - Queue: TraversalQueue, -{ - fn visit_kind(&mut self, item: ItemId, kind: EdgeKind) { - let edge = Edge::new(item, kind); - if !(self.predicate)(self.ctx, edge) { - return; - } - - let is_newly_discovered = - self.seen.add(self.currently_traversing, item); - if is_newly_discovered { - self.queue.push(item); - } - } -} - -impl<'ctx, Storage, Queue> Iterator for ItemTraversal<'ctx, Storage, Queue> -where - Storage: TraversalStorage<'ctx>, - Queue: TraversalQueue, -{ - type Item = ItemId; - - fn next(&mut self) -> Option { - let id = self.queue.next()?; - - let newly_discovered = self.seen.add(None, id); - debug_assert!( - !newly_discovered, - "should have already seen anything we get out of our queue" - ); - debug_assert!( - self.ctx.resolve_item_fallible(id).is_some(), - "should only get IDs of actual items in our context during traversal" - ); - - self.currently_traversing = Some(id); - id.trace(self.ctx, self, &()); - self.currently_traversing = None; - - Some(id) - } -} - -/// An iterator to find any dangling items. -/// -/// See `BindgenContext::assert_no_dangling_item_traversal` for more -/// information. -pub(crate) type AssertNoDanglingItemsTraversal<'ctx> = - ItemTraversal<'ctx, Paths<'ctx>, VecDeque>; diff --git a/vendor/bindgen/ir/ty.rs b/vendor/bindgen/ir/ty.rs deleted file mode 100644 index 38a7f6344a9d12..00000000000000 --- a/vendor/bindgen/ir/ty.rs +++ /dev/null @@ -1,1256 +0,0 @@ -//! Everything related to types in our intermediate representation. - -use super::comp::CompInfo; -use super::context::{BindgenContext, ItemId, TypeId}; -use super::dot::DotAttributes; -use super::enum_ty::Enum; -use super::function::FunctionSig; -use super::item::{IsOpaque, Item}; -use super::layout::{Layout, Opaque}; -use super::objc::ObjCInterface; -use super::template::{ - AsTemplateParam, TemplateInstantiation, TemplateParameters, -}; -use super::traversal::{EdgeKind, Trace, Tracer}; -use crate::clang::{self, Cursor}; -use crate::parse::{ParseError, ParseResult}; -use std::borrow::Cow; -use std::io; - -pub use super::int::IntKind; - -/// The base representation of a type in bindgen. -/// -/// A type has an optional name, which if present cannot be empty, a `layout` -/// (size, alignment and packedness) if known, a `Kind`, which determines which -/// kind of type it is, and whether the type is const. -#[derive(Debug)] -pub(crate) struct Type { - /// The name of the type, or None if it was an unnamed struct or union. - name: Option, - /// The layout of the type, if known. - layout: Option, - /// The inner kind of the type - kind: TypeKind, - /// Whether this type is const-qualified. - is_const: bool, -} - -/// The maximum number of items in an array for which Rust implements common -/// traits, and so if we have a type containing an array with more than this -/// many items, we won't be able to derive common traits on that type. -/// -pub(crate) const RUST_DERIVE_IN_ARRAY_LIMIT: usize = 32; - -impl Type { - /// Get the underlying `CompInfo` for this type as a mutable reference, or - /// `None` if this is some other kind of type. - pub(crate) fn as_comp_mut(&mut self) -> Option<&mut CompInfo> { - match self.kind { - TypeKind::Comp(ref mut ci) => Some(ci), - _ => None, - } - } - - /// Construct a new `Type`. - pub(crate) fn new( - name: Option, - layout: Option, - kind: TypeKind, - is_const: bool, - ) -> Self { - Type { - name, - layout, - kind, - is_const, - } - } - - /// Which kind of type is this? - pub(crate) fn kind(&self) -> &TypeKind { - &self.kind - } - - /// Get a mutable reference to this type's kind. - pub(crate) fn kind_mut(&mut self) -> &mut TypeKind { - &mut self.kind - } - - /// Get this type's name. - pub(crate) fn name(&self) -> Option<&str> { - self.name.as_deref() - } - - /// Whether this is a block pointer type. - pub(crate) fn is_block_pointer(&self) -> bool { - matches!(self.kind, TypeKind::BlockPointer(..)) - } - - /// Is this an integer type, including `bool` or `char`? - pub(crate) fn is_int(&self) -> bool { - matches!(self.kind, TypeKind::Int(_)) - } - - /// Is this a compound type? - pub(crate) fn is_comp(&self) -> bool { - matches!(self.kind, TypeKind::Comp(..)) - } - - /// Is this a union? - pub(crate) fn is_union(&self) -> bool { - match self.kind { - TypeKind::Comp(ref comp) => comp.is_union(), - _ => false, - } - } - - /// Is this type of kind `TypeKind::TypeParam`? - pub(crate) fn is_type_param(&self) -> bool { - matches!(self.kind, TypeKind::TypeParam) - } - - /// Is this a template instantiation type? - pub(crate) fn is_template_instantiation(&self) -> bool { - matches!(self.kind, TypeKind::TemplateInstantiation(..)) - } - - /// Is this a function type? - pub(crate) fn is_function(&self) -> bool { - matches!(self.kind, TypeKind::Function(..)) - } - - /// Is this an enum type? - pub(crate) fn is_enum(&self) -> bool { - matches!(self.kind, TypeKind::Enum(..)) - } - - /// Is this void? - pub(crate) fn is_void(&self) -> bool { - matches!(self.kind, TypeKind::Void) - } - /// Is this either a builtin or named type? - pub(crate) fn is_builtin_or_type_param(&self) -> bool { - matches!( - self.kind, - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Function(..) | - TypeKind::Array(..) | - TypeKind::Reference(..) | - TypeKind::Pointer(..) | - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::TypeParam - ) - } - - /// Creates a new named type, with name `name`. - pub(crate) fn named(name: String) -> Self { - let name = if name.is_empty() { None } else { Some(name) }; - Self::new(name, None, TypeKind::TypeParam, false) - } - - /// Is this a floating point type? - pub(crate) fn is_float(&self) -> bool { - matches!(self.kind, TypeKind::Float(..)) - } - - /// Is this a boolean type? - pub(crate) fn is_bool(&self) -> bool { - matches!(self.kind, TypeKind::Int(IntKind::Bool)) - } - - /// Is this an integer type? - pub(crate) fn is_integer(&self) -> bool { - matches!(self.kind, TypeKind::Int(..)) - } - - /// Cast this type to an integer kind, or `None` if it is not an integer - /// type. - pub(crate) fn as_integer(&self) -> Option { - match self.kind { - TypeKind::Int(int_kind) => Some(int_kind), - _ => None, - } - } - - /// Is this a `const` qualified type? - pub(crate) fn is_const(&self) -> bool { - self.is_const - } - - /// Is this an unresolved reference? - pub(crate) fn is_unresolved_ref(&self) -> bool { - matches!(self.kind, TypeKind::UnresolvedTypeRef(_, _, _)) - } - - /// Is this a incomplete array type? - pub(crate) fn is_incomplete_array( - &self, - ctx: &BindgenContext, - ) -> Option { - match self.kind { - TypeKind::Array(item, len) => { - if len == 0 { - Some(item.into()) - } else { - None - } - } - TypeKind::ResolvedTypeRef(inner) => { - ctx.resolve_type(inner).is_incomplete_array(ctx) - } - _ => None, - } - } - - /// What is the layout of this type? - pub(crate) fn layout(&self, ctx: &BindgenContext) -> Option { - self.layout.or_else(|| { - match self.kind { - TypeKind::Comp(ref ci) => ci.layout(ctx), - TypeKind::Array(inner, 0) => Some(Layout::new( - 0, - ctx.resolve_type(inner).layout(ctx)?.align, - )), - // FIXME(emilio): This is a hack for anonymous union templates. - // Use the actual pointer size! - TypeKind::Pointer(..) => Some(Layout::new( - ctx.target_pointer_size(), - ctx.target_pointer_size(), - )), - TypeKind::ResolvedTypeRef(inner) => { - ctx.resolve_type(inner).layout(ctx) - } - _ => None, - } - }) - } - - /// Whether this named type is an invalid C++ identifier. This is done to - /// avoid generating invalid code with some cases we can't handle, see: - /// - /// tests/headers/381-decltype-alias.hpp - pub(crate) fn is_invalid_type_param(&self) -> bool { - match self.kind { - TypeKind::TypeParam => { - let name = self.name().expect("Unnamed named type?"); - !clang::is_valid_identifier(name) - } - _ => false, - } - } - - /// Takes `name`, and returns a suitable identifier representation for it. - fn sanitize_name(name: &str) -> Cow<'_, str> { - if clang::is_valid_identifier(name) { - return Cow::Borrowed(name); - } - - let name = name.replace([' ', ':', '.'], "_"); - Cow::Owned(name) - } - - /// Get this type's sanitized name. - pub(crate) fn sanitized_name<'a>( - &'a self, - ctx: &BindgenContext, - ) -> Option> { - let name_info = match *self.kind() { - TypeKind::Pointer(inner) => Some((inner, Cow::Borrowed("ptr"))), - TypeKind::Reference(inner) => Some((inner, Cow::Borrowed("ref"))), - TypeKind::Array(inner, length) => { - Some((inner, format!("array{length}").into())) - } - _ => None, - }; - if let Some((inner, prefix)) = name_info { - ctx.resolve_item(inner) - .expect_type() - .sanitized_name(ctx) - .map(|name| format!("{prefix}_{name}").into()) - } else { - self.name().map(Self::sanitize_name) - } - } - - /// See [`Self::safe_canonical_type`]. - pub(crate) fn canonical_type<'tr>( - &'tr self, - ctx: &'tr BindgenContext, - ) -> &'tr Type { - self.safe_canonical_type(ctx) - .expect("Should have been resolved after parsing!") - } - - /// Returns the canonical type of this type, that is, the "inner type". - /// - /// For example, for a `typedef`, the canonical type would be the - /// `typedef`ed type, for a template instantiation, would be the template - /// its specializing, and so on. Return None if the type is unresolved. - pub(crate) fn safe_canonical_type<'tr>( - &'tr self, - ctx: &'tr BindgenContext, - ) -> Option<&'tr Type> { - match self.kind { - TypeKind::TypeParam | - TypeKind::Array(..) | - TypeKind::Vector(..) | - TypeKind::Comp(..) | - TypeKind::Opaque | - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::Complex(..) | - TypeKind::Function(..) | - TypeKind::Enum(..) | - TypeKind::Reference(..) | - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Pointer(..) | - TypeKind::BlockPointer(..) | - TypeKind::ObjCId | - TypeKind::ObjCSel | - TypeKind::ObjCInterface(..) => Some(self), - - TypeKind::ResolvedTypeRef(inner) | - TypeKind::Alias(inner) | - TypeKind::TemplateAlias(inner, _) => { - ctx.resolve_type(inner).safe_canonical_type(ctx) - } - TypeKind::TemplateInstantiation(ref inst) => ctx - .resolve_type(inst.template_definition()) - .safe_canonical_type(ctx), - - TypeKind::UnresolvedTypeRef(..) => None, - } - } - - /// There are some types we don't want to stop at when finding an opaque - /// item, so we can arrive to the proper item that needs to be generated. - pub(crate) fn should_be_traced_unconditionally(&self) -> bool { - matches!( - self.kind, - TypeKind::Comp(..) | - TypeKind::Function(..) | - TypeKind::Pointer(..) | - TypeKind::Array(..) | - TypeKind::Reference(..) | - TypeKind::TemplateInstantiation(..) | - TypeKind::ResolvedTypeRef(..) - ) - } -} - -impl IsOpaque for Type { - type Extra = Item; - - fn is_opaque(&self, ctx: &BindgenContext, item: &Item) -> bool { - match self.kind { - TypeKind::Opaque => true, - TypeKind::TemplateInstantiation(ref inst) => { - inst.is_opaque(ctx, item) - } - TypeKind::Comp(ref comp) => comp.is_opaque(ctx, &self.layout), - TypeKind::ResolvedTypeRef(to) => to.is_opaque(ctx, &()), - _ => false, - } - } -} - -impl AsTemplateParam for Type { - type Extra = Item; - - fn as_template_param( - &self, - ctx: &BindgenContext, - item: &Item, - ) -> Option { - self.kind.as_template_param(ctx, item) - } -} - -impl AsTemplateParam for TypeKind { - type Extra = Item; - - fn as_template_param( - &self, - ctx: &BindgenContext, - item: &Item, - ) -> Option { - match *self { - TypeKind::TypeParam => Some(item.id().expect_type_id(ctx)), - TypeKind::ResolvedTypeRef(id) => id.as_template_param(ctx, &()), - _ => None, - } - } -} - -impl DotAttributes for Type { - fn dot_attributes( - &self, - ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - if let Some(ref layout) = self.layout { - writeln!( - out, - "size{} - align{}", - layout.size, layout.align - )?; - if layout.packed { - writeln!(out, "packedtrue")?; - } - } - - if self.is_const { - writeln!(out, "consttrue")?; - } - - self.kind.dot_attributes(ctx, out) - } -} - -impl DotAttributes for TypeKind { - fn dot_attributes( - &self, - ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - writeln!( - out, - "type kind{}", - self.kind_name() - )?; - - if let TypeKind::Comp(ref comp) = *self { - comp.dot_attributes(ctx, out)?; - } - - Ok(()) - } -} - -impl TypeKind { - fn kind_name(&self) -> &'static str { - match *self { - TypeKind::Void => "Void", - TypeKind::NullPtr => "NullPtr", - TypeKind::Comp(..) => "Comp", - TypeKind::Opaque => "Opaque", - TypeKind::Int(..) => "Int", - TypeKind::Float(..) => "Float", - TypeKind::Complex(..) => "Complex", - TypeKind::Alias(..) => "Alias", - TypeKind::TemplateAlias(..) => "TemplateAlias", - TypeKind::Array(..) => "Array", - TypeKind::Vector(..) => "Vector", - TypeKind::Function(..) => "Function", - TypeKind::Enum(..) => "Enum", - TypeKind::Pointer(..) => "Pointer", - TypeKind::BlockPointer(..) => "BlockPointer", - TypeKind::Reference(..) => "Reference", - TypeKind::TemplateInstantiation(..) => "TemplateInstantiation", - TypeKind::UnresolvedTypeRef(..) => "UnresolvedTypeRef", - TypeKind::ResolvedTypeRef(..) => "ResolvedTypeRef", - TypeKind::TypeParam => "TypeParam", - TypeKind::ObjCInterface(..) => "ObjCInterface", - TypeKind::ObjCId => "ObjCId", - TypeKind::ObjCSel => "ObjCSel", - } - } -} - -#[test] -fn is_invalid_type_param_valid() { - let ty = Type::new(Some("foo".into()), None, TypeKind::TypeParam, false); - assert!(!ty.is_invalid_type_param()); -} - -#[test] -fn is_invalid_type_param_valid_underscore_and_numbers() { - let ty = Type::new( - Some("_foo123456789_".into()), - None, - TypeKind::TypeParam, - false, - ); - assert!(!ty.is_invalid_type_param()); -} - -#[test] -fn is_invalid_type_param_valid_unnamed_kind() { - let ty = Type::new(Some("foo".into()), None, TypeKind::Void, false); - assert!(!ty.is_invalid_type_param()); -} - -#[test] -fn is_invalid_type_param_invalid_start() { - let ty = Type::new(Some("1foo".into()), None, TypeKind::TypeParam, false); - assert!(ty.is_invalid_type_param()); -} - -#[test] -fn is_invalid_type_param_invalid_remaining() { - let ty = Type::new(Some("foo-".into()), None, TypeKind::TypeParam, false); - assert!(ty.is_invalid_type_param()); -} - -#[test] -#[should_panic(expected = "Unnamed named type")] -fn is_invalid_type_param_unnamed() { - let ty = Type::new(None, None, TypeKind::TypeParam, false); - assert!(ty.is_invalid_type_param()); -} - -#[test] -fn is_invalid_type_param_empty_name() { - let ty = Type::new(Some(String::new()), None, TypeKind::TypeParam, false); - assert!(ty.is_invalid_type_param()); -} - -impl TemplateParameters for Type { - fn self_template_params(&self, ctx: &BindgenContext) -> Vec { - self.kind.self_template_params(ctx) - } -} - -impl TemplateParameters for TypeKind { - fn self_template_params(&self, ctx: &BindgenContext) -> Vec { - match *self { - TypeKind::ResolvedTypeRef(id) => { - ctx.resolve_type(id).self_template_params(ctx) - } - TypeKind::Comp(ref comp) => comp.self_template_params(ctx), - TypeKind::TemplateAlias(_, ref args) => args.clone(), - - TypeKind::Opaque | - TypeKind::TemplateInstantiation(..) | - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(_) | - TypeKind::Float(_) | - TypeKind::Complex(_) | - TypeKind::Array(..) | - TypeKind::Vector(..) | - TypeKind::Function(_) | - TypeKind::Enum(_) | - TypeKind::Pointer(_) | - TypeKind::BlockPointer(_) | - TypeKind::Reference(_) | - TypeKind::UnresolvedTypeRef(..) | - TypeKind::TypeParam | - TypeKind::Alias(_) | - TypeKind::ObjCId | - TypeKind::ObjCSel | - TypeKind::ObjCInterface(_) => vec![], - } - } -} - -/// The kind of float this type represents. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub(crate) enum FloatKind { - /// A half (`_Float16` or `__fp16`) - Float16, - /// A `float`. - Float, - /// A `double`. - Double, - /// A `long double`. - LongDouble, - /// A `__float128`. - Float128, -} - -/// The different kinds of types that we can parse. -#[derive(Debug)] -pub(crate) enum TypeKind { - /// The void type. - Void, - - /// The `nullptr_t` type. - NullPtr, - - /// A compound type, that is, a class, struct, or union. - Comp(CompInfo), - - /// An opaque type that we just don't understand. All usage of this should - /// result in an opaque blob of bytes generated from the containing type's - /// layout. - Opaque, - - /// An integer type, of a given kind. `bool` and `char` are also considered - /// integers. - Int(IntKind), - - /// A floating point type. - Float(FloatKind), - - /// A complex floating point type. - Complex(FloatKind), - - /// A type alias, with a name, that points to another type. - Alias(TypeId), - - /// A templated alias, pointing to an inner type, just as `Alias`, but with - /// template parameters. - TemplateAlias(TypeId, Vec), - - /// A packed vector type: element type, number of elements - Vector(TypeId, usize), - - /// An array of a type and a length. - Array(TypeId, usize), - - /// A function type, with a given signature. - Function(FunctionSig), - - /// An `enum` type. - Enum(Enum), - - /// A pointer to a type. The bool field represents whether it's const or - /// not. - Pointer(TypeId), - - /// A pointer to an Apple block. - BlockPointer(TypeId), - - /// A reference to a type, as in: int& `foo()`. - Reference(TypeId), - - /// An instantiation of an abstract template definition with a set of - /// concrete template arguments. - TemplateInstantiation(TemplateInstantiation), - - /// A reference to a yet-to-resolve type. This stores the clang cursor - /// itself, and postpones its resolution. - /// - /// These are gone in a phase after parsing where these are mapped to - /// already known types, and are converted to `ResolvedTypeRef`. - /// - /// see tests/headers/typeref.hpp to see somewhere where this is a problem. - UnresolvedTypeRef(clang::Type, Cursor, /* parent_id */ Option), - - /// An indirection to another type. - /// - /// These are generated after we resolve a forward declaration, or when we - /// replace one type with another. - ResolvedTypeRef(TypeId), - - /// A named type, that is, a template parameter. - TypeParam, - - /// Objective C interface. Always referenced through a pointer - ObjCInterface(ObjCInterface), - - /// Objective C 'id' type, points to any object - ObjCId, - - /// Objective C selector type - ObjCSel, -} - -impl Type { - /// This is another of the nasty methods. This one is the one that takes - /// care of the core logic of converting a clang type to a `Type`. - /// - /// It's sort of nasty and full of special-casing, but hopefully the - /// comments in every special case justify why they're there. - pub(crate) fn from_clang_ty( - potential_id: ItemId, - ty: &clang::Type, - location: Cursor, - parent_id: Option, - ctx: &mut BindgenContext, - ) -> Result, ParseError> { - use clang_sys::*; - { - let already_resolved = ctx.builtin_or_resolved_ty( - potential_id, - parent_id, - ty, - Some(location), - ); - if let Some(ty) = already_resolved { - debug!("{ty:?} already resolved: {location:?}"); - return Ok(ParseResult::AlreadyResolved(ty.into())); - } - } - - let layout = ty.fallible_layout(ctx).ok(); - let cursor = ty.declaration(); - let is_anonymous = cursor.is_anonymous(); - let mut name = if is_anonymous { - None - } else { - Some(cursor.spelling()).filter(|n| !n.is_empty()) - }; - - debug!( - "from_clang_ty: {potential_id:?}, ty: {ty:?}, loc: {location:?}" - ); - debug!("currently_parsed_types: {:?}", ctx.currently_parsed_types()); - - let canonical_ty = ty.canonical_type(); - - // Parse objc protocols as if they were interfaces - let mut ty_kind = ty.kind(); - match location.kind() { - CXCursor_ObjCProtocolDecl | CXCursor_ObjCCategoryDecl => { - ty_kind = CXType_ObjCInterface; - } - _ => {} - } - - // Objective C template type parameter - // FIXME: This is probably wrong, we are attempting to find the - // objc template params, which seem to manifest as a typedef. - // We are rewriting them as ID to suppress multiple conflicting - // typedefs at root level - if ty_kind == CXType_Typedef { - let is_template_type_param = - ty.declaration().kind() == CXCursor_TemplateTypeParameter; - let is_canonical_objcpointer = - canonical_ty.kind() == CXType_ObjCObjectPointer; - - // We have found a template type for objc interface - if is_canonical_objcpointer && is_template_type_param { - // Objective-C generics are just ids with fancy name. - // To keep it simple, just name them ids - name = Some("id".to_owned()); - } - } - - if location.kind() == CXCursor_ClassTemplatePartialSpecialization { - // Sorry! (Not sorry) - warn!( - "Found a partial template specialization; bindgen does not \ - support partial template specialization! Constructing \ - opaque type instead." - ); - return Ok(ParseResult::New( - Opaque::from_clang_ty(&canonical_ty, ctx), - None, - )); - } - - let kind = if location.kind() == CXCursor_TemplateRef || - (ty.template_args().is_some() && ty_kind != CXType_Typedef) - { - // This is a template instantiation. - match TemplateInstantiation::from_ty(ty, ctx) { - Some(inst) => TypeKind::TemplateInstantiation(inst), - None => TypeKind::Opaque, - } - } else { - match ty_kind { - CXType_Unexposed - if *ty != canonical_ty && - canonical_ty.kind() != CXType_Invalid && - ty.ret_type().is_none() && - // Sometime clang desugars some types more than - // what we need, specially with function - // pointers. - // - // We should also try the solution of inverting - // those checks instead of doing this, that is, - // something like: - // - // CXType_Unexposed if ty.ret_type().is_some() - // => { ... } - // - // etc. - !canonical_ty.spelling().contains("type-parameter") => - { - debug!("Looking for canonical type: {canonical_ty:?}"); - return Self::from_clang_ty( - potential_id, - &canonical_ty, - location, - parent_id, - ctx, - ); - } - CXType_Unexposed | CXType_Invalid => { - // For some reason Clang doesn't give us any hint in some - // situations where we should generate a function pointer (see - // tests/headers/func_ptr_in_struct.h), so we do a guess here - // trying to see if it has a valid return type. - if ty.ret_type().is_some() { - let signature = - FunctionSig::from_ty(ty, &location, ctx)?; - TypeKind::Function(signature) - // Same here, with template specialisations we can safely - // assume this is a Comp(..) - } else if ty.is_fully_instantiated_template() { - debug!("Template specialization: {ty:?}, {location:?} {canonical_ty:?}"); - let complex = CompInfo::from_ty( - potential_id, - ty, - Some(location), - ctx, - ) - .expect("C'mon"); - TypeKind::Comp(complex) - } else { - match location.kind() { - CXCursor_CXXBaseSpecifier | - CXCursor_ClassTemplate => { - if location.kind() == CXCursor_CXXBaseSpecifier - { - // In the case we're parsing a base specifier - // inside an unexposed or invalid type, it means - // that we're parsing one of two things: - // - // * A template parameter. - // * A complex class that isn't exposed. - // - // This means, unfortunately, that there's no - // good way to differentiate between them. - // - // Probably we could try to look at the - // declaration and complicate more this logic, - // but we'll keep it simple... if it's a valid - // C++ identifier, we'll consider it as a - // template parameter. - // - // This is because: - // - // * We expect every other base that is a - // proper identifier (that is, a simple - // struct/union declaration), to be exposed, - // so this path can't be reached in that - // case. - // - // * Quite conveniently, complex base - // specifiers preserve their full names (that - // is: Foo instead of Foo). We can take - // advantage of this. - // - // If we find some edge case where this doesn't - // work (which I guess is unlikely, see the - // different test cases[1][2][3][4]), we'd need - // to find more creative ways of differentiating - // these two cases. - // - // [1]: inherit_named.hpp - // [2]: forward-inherit-struct-with-fields.hpp - // [3]: forward-inherit-struct.hpp - // [4]: inherit-namespaced.hpp - if location.spelling().chars().all(|c| { - c.is_alphanumeric() || c == '_' - }) { - return Err(ParseError::Recurse); - } - } else { - name = Some(location.spelling()); - } - - let complex = CompInfo::from_ty( - potential_id, - ty, - Some(location), - ctx, - ); - if let Ok(complex) = complex { - TypeKind::Comp(complex) - } else { - warn!( - "Could not create complex type \ - from class template or base \ - specifier, using opaque blob" - ); - let opaque = Opaque::from_clang_ty(ty, ctx); - return Ok(ParseResult::New(opaque, None)); - } - } - CXCursor_TypeAliasTemplateDecl => { - debug!("TypeAliasTemplateDecl"); - - // We need to manually unwind this one. - let mut inner = Err(ParseError::Continue); - let mut args = vec![]; - - location.visit(|cur| { - match cur.kind() { - CXCursor_TypeAliasDecl => { - let current = cur.cur_type(); - - debug_assert_eq!( - current.kind(), - CXType_Typedef - ); - - name = Some(location.spelling()); - - let inner_ty = cur - .typedef_type() - .expect("Not valid Type?"); - inner = Ok(Item::from_ty_or_ref( - inner_ty, - cur, - Some(potential_id), - ctx, - )); - } - CXCursor_TemplateTypeParameter => { - let param = Item::type_param( - None, cur, ctx, - ) - .expect( - "Item::type_param shouldn't \ - ever fail if we are looking \ - at a TemplateTypeParameter", - ); - args.push(param); - } - _ => {} - } - CXChildVisit_Continue - }); - - let Ok(inner_type) = inner else { - warn!( - "Failed to parse template alias \ - {:?}", - location - ); - return Err(ParseError::Continue); - }; - - TypeKind::TemplateAlias(inner_type, args) - } - CXCursor_TemplateRef => { - let referenced = location.referenced().unwrap(); - let referenced_ty = referenced.cur_type(); - - debug!("TemplateRef: location = {location:?}; referenced = {referenced:?}; referenced_ty = {referenced_ty:?}"); - - return Self::from_clang_ty( - potential_id, - &referenced_ty, - referenced, - parent_id, - ctx, - ); - } - CXCursor_TypeRef => { - let referenced = location.referenced().unwrap(); - let referenced_ty = referenced.cur_type(); - let declaration = referenced_ty.declaration(); - - debug!("TypeRef: location = {location:?}; referenced = {referenced:?}; referenced_ty = {referenced_ty:?}"); - - let id = Item::from_ty_or_ref_with_id( - potential_id, - referenced_ty, - declaration, - parent_id, - ctx, - ); - return Ok(ParseResult::AlreadyResolved( - id.into(), - )); - } - CXCursor_NamespaceRef => { - return Err(ParseError::Continue); - } - _ => { - if ty.kind() == CXType_Unexposed { - warn!("Unexposed type {ty:?}, recursing inside, loc: {location:?}"); - return Err(ParseError::Recurse); - } - - warn!("invalid type {ty:?}"); - return Err(ParseError::Continue); - } - } - } - } - CXType_Auto => { - if canonical_ty == *ty { - debug!("Couldn't find deduced type: {ty:?}"); - return Err(ParseError::Continue); - } - - return Self::from_clang_ty( - potential_id, - &canonical_ty, - location, - parent_id, - ctx, - ); - } - // NOTE: We don't resolve pointers eagerly because the pointee type - // might not have been parsed, and if it contains templates or - // something else we might get confused, see the comment inside - // TypeRef. - // - // We might need to, though, if the context is already in the - // process of resolving them. - CXType_ObjCObjectPointer | - CXType_MemberPointer | - CXType_Pointer => { - let mut pointee = ty.pointee_type().unwrap(); - if *ty != canonical_ty { - let canonical_pointee = - canonical_ty.pointee_type().unwrap(); - // clang sometimes loses pointee constness here, see - // #2244. - if canonical_pointee.is_const() != pointee.is_const() { - pointee = canonical_pointee; - } - } - let inner = - Item::from_ty_or_ref(pointee, location, None, ctx); - TypeKind::Pointer(inner) - } - CXType_BlockPointer => { - let pointee = ty.pointee_type().expect("Not valid Type?"); - let inner = - Item::from_ty_or_ref(pointee, location, None, ctx); - TypeKind::BlockPointer(inner) - } - // XXX: RValueReference is most likely wrong, but I don't think we - // can even add bindings for that, so huh. - CXType_RValueReference | CXType_LValueReference => { - let inner = Item::from_ty_or_ref( - ty.pointee_type().unwrap(), - location, - None, - ctx, - ); - TypeKind::Reference(inner) - } - // XXX DependentSizedArray is wrong - CXType_VariableArray | CXType_DependentSizedArray => { - let inner = Item::from_ty( - ty.elem_type().as_ref().unwrap(), - location, - None, - ctx, - ) - .expect("Not able to resolve array element?"); - TypeKind::Pointer(inner) - } - CXType_IncompleteArray => { - let inner = Item::from_ty( - ty.elem_type().as_ref().unwrap(), - location, - None, - ctx, - ) - .expect("Not able to resolve array element?"); - TypeKind::Array(inner, 0) - } - CXType_FunctionNoProto | CXType_FunctionProto => { - let signature = FunctionSig::from_ty(ty, &location, ctx)?; - TypeKind::Function(signature) - } - CXType_Typedef => { - let inner = cursor.typedef_type().expect("Not valid Type?"); - let inner_id = - Item::from_ty_or_ref(inner, location, None, ctx); - if inner_id == potential_id { - warn!( - "Generating opaque type instead of self-referential \ - typedef"); - // This can happen if we bail out of recursive situations - // within the clang parsing. - TypeKind::Opaque - } else { - // Check if this type definition is an alias to a pointer of a `struct` / - // `union` / `enum` with the same name and add the `_ptr` suffix to it to - // avoid name collisions. - if let Some(ref mut name) = name { - if inner.kind() == CXType_Pointer && - !ctx.options().c_naming - { - let pointee = inner.pointee_type().unwrap(); - if pointee.kind() == CXType_Elaborated && - pointee.declaration().spelling() == *name - { - *name += "_ptr"; - } - } - } - TypeKind::Alias(inner_id) - } - } - CXType_Enum => { - let enum_ = Enum::from_ty(ty, ctx).expect("Not an enum?"); - - if !is_anonymous { - let pretty_name = ty.spelling(); - if clang::is_valid_identifier(&pretty_name) { - name = Some(pretty_name); - } - } - - TypeKind::Enum(enum_) - } - CXType_Record => { - let complex = CompInfo::from_ty( - potential_id, - ty, - Some(location), - ctx, - ) - .expect("Not a complex type?"); - - if !is_anonymous { - // The pretty-printed name may contain typedefed name, - // but may also be "struct (anonymous at .h:1)" - let pretty_name = ty.spelling(); - if clang::is_valid_identifier(&pretty_name) { - name = Some(pretty_name); - } - } - - TypeKind::Comp(complex) - } - CXType_Vector | CXType_ExtVector => { - let inner = Item::from_ty( - ty.elem_type().as_ref().unwrap(), - location, - None, - ctx, - )?; - TypeKind::Vector(inner, ty.num_elements().unwrap()) - } - CXType_ConstantArray => { - let inner = Item::from_ty( - ty.elem_type().as_ref().unwrap(), - location, - None, - ctx, - ) - .expect("Not able to resolve array element?"); - TypeKind::Array(inner, ty.num_elements().unwrap()) - } - CXType_Atomic => { - // TODO(emilio): Maybe we can preserve the "is atomic" bit somehow and generate - // something more useful... But for now this is better than panicking or - // generating nothing. - return Self::from_clang_ty( - potential_id, - &ty.atomic_value_type(), - location, - parent_id, - ctx, - ); - } - CXType_Elaborated => { - return Self::from_clang_ty( - potential_id, - &ty.named(), - location, - parent_id, - ctx, - ); - } - CXType_ObjCId => TypeKind::ObjCId, - CXType_ObjCSel => TypeKind::ObjCSel, - CXType_ObjCClass | CXType_ObjCInterface => { - let interface = ObjCInterface::from_ty(&location, ctx) - .expect("Not a valid objc interface?"); - if !is_anonymous { - name = Some(interface.rust_name()); - } - TypeKind::ObjCInterface(interface) - } - CXType_Dependent => { - return Err(ParseError::Continue); - } - _ => { - warn!( - "unsupported type: kind = {:?}; ty = {ty:?}; at {location:?}", - ty.kind(), - ); - return Err(ParseError::Continue); - } - } - }; - - name = name.filter(|n| !n.is_empty()); - - let is_const = ty.is_const() || - (ty.kind() == CXType_ConstantArray && - ty.elem_type().is_some_and(|element| element.is_const())); - - let ty = Type::new(name, layout, kind, is_const); - // TODO: maybe declaration.canonical()? - Ok(ParseResult::New(ty, Some(cursor.canonical()))) - } -} - -impl Trace for Type { - type Extra = Item; - - fn trace(&self, context: &BindgenContext, tracer: &mut T, item: &Item) - where - T: Tracer, - { - if self.name().is_some_and(|name| context.is_stdint_type(name)) { - // These types are special-cased in codegen and don't need to be traversed. - return; - } - match *self.kind() { - TypeKind::Pointer(inner) | - TypeKind::Reference(inner) | - TypeKind::Array(inner, _) | - TypeKind::Vector(inner, _) | - TypeKind::BlockPointer(inner) | - TypeKind::Alias(inner) | - TypeKind::ResolvedTypeRef(inner) => { - tracer.visit_kind(inner.into(), EdgeKind::TypeReference); - } - TypeKind::TemplateAlias(inner, ref template_params) => { - tracer.visit_kind(inner.into(), EdgeKind::TypeReference); - for param in template_params { - tracer.visit_kind( - param.into(), - EdgeKind::TemplateParameterDefinition, - ); - } - } - TypeKind::TemplateInstantiation(ref inst) => { - inst.trace(context, tracer, &()); - } - TypeKind::Comp(ref ci) => ci.trace(context, tracer, item), - TypeKind::Function(ref sig) => sig.trace(context, tracer, &()), - TypeKind::Enum(ref en) => { - if let Some(repr) = en.repr() { - tracer.visit(repr.into()); - } - } - TypeKind::UnresolvedTypeRef(_, _, Some(id)) => { - tracer.visit(id); - } - - TypeKind::ObjCInterface(ref interface) => { - interface.trace(context, tracer, &()); - } - - // None of these variants have edges to other items and types. - TypeKind::Opaque | - TypeKind::UnresolvedTypeRef(_, _, None) | - TypeKind::TypeParam | - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(_) | - TypeKind::Float(_) | - TypeKind::Complex(_) | - TypeKind::ObjCId | - TypeKind::ObjCSel => {} - } - } -} diff --git a/vendor/bindgen/ir/var.rs b/vendor/bindgen/ir/var.rs deleted file mode 100644 index 45f4ba1ba01a33..00000000000000 --- a/vendor/bindgen/ir/var.rs +++ /dev/null @@ -1,523 +0,0 @@ -//! Intermediate representation of variables. - -use super::super::codegen::MacroTypeVariation; -use super::context::{BindgenContext, TypeId}; -use super::dot::DotAttributes; -use super::function::cursor_mangling; -use super::int::IntKind; -use super::item::Item; -use super::ty::{FloatKind, TypeKind}; -use crate::callbacks::{ItemInfo, ItemKind, MacroParsingBehavior}; -use crate::clang; -use crate::clang::ClangToken; -use crate::parse::{ClangSubItemParser, ParseError, ParseResult}; - -use std::io; -use std::num::Wrapping; - -/// The type for a constant variable. -#[derive(Debug)] -pub(crate) enum VarType { - /// A boolean. - Bool(bool), - /// An integer. - Int(i64), - /// A floating point number. - Float(f64), - /// A character. - Char(u8), - /// A string, not necessarily well-formed utf-8. - String(Vec), -} - -/// A `Var` is our intermediate representation of a variable. -#[derive(Debug)] -pub(crate) struct Var { - /// The name of the variable. - name: String, - /// The mangled name of the variable. - mangled_name: Option, - /// The link name of the variable. - link_name: Option, - /// The type of the variable. - ty: TypeId, - /// The value of the variable, that needs to be suitable for `ty`. - val: Option, - /// Whether this variable is const. - is_const: bool, -} - -impl Var { - /// Construct a new `Var`. - pub(crate) fn new( - name: String, - mangled_name: Option, - link_name: Option, - ty: TypeId, - val: Option, - is_const: bool, - ) -> Var { - assert!(!name.is_empty()); - Var { - name, - mangled_name, - link_name, - ty, - val, - is_const, - } - } - - /// Is this variable `const` qualified? - pub(crate) fn is_const(&self) -> bool { - self.is_const - } - - /// The value of this constant variable, if any. - pub(crate) fn val(&self) -> Option<&VarType> { - self.val.as_ref() - } - - /// Get this variable's type. - pub(crate) fn ty(&self) -> TypeId { - self.ty - } - - /// Get this variable's name. - pub(crate) fn name(&self) -> &str { - &self.name - } - - /// Get this variable's mangled name. - pub(crate) fn mangled_name(&self) -> Option<&str> { - self.mangled_name.as_deref() - } - - /// Get this variable's link name. - pub fn link_name(&self) -> Option<&str> { - self.link_name.as_deref() - } -} - -impl DotAttributes for Var { - fn dot_attributes( - &self, - _ctx: &BindgenContext, - out: &mut W, - ) -> io::Result<()> - where - W: io::Write, - { - if self.is_const { - writeln!(out, "consttrue")?; - } - - if let Some(ref mangled) = self.mangled_name { - writeln!(out, "mangled name{mangled}")?; - } - - Ok(()) - } -} - -fn default_macro_constant_type(ctx: &BindgenContext, value: i64) -> IntKind { - if value < 0 || - ctx.options().default_macro_constant_type == - MacroTypeVariation::Signed - { - if value < i64::from(i32::MIN) || value > i64::from(i32::MAX) { - IntKind::I64 - } else if !ctx.options().fit_macro_constants || - value < i64::from(i16::MIN) || - value > i64::from(i16::MAX) - { - IntKind::I32 - } else if value < i64::from(i8::MIN) || value > i64::from(i8::MAX) { - IntKind::I16 - } else { - IntKind::I8 - } - } else if value > i64::from(u32::MAX) { - IntKind::U64 - } else if !ctx.options().fit_macro_constants || value > i64::from(u16::MAX) - { - IntKind::U32 - } else if value > i64::from(u8::MAX) { - IntKind::U16 - } else { - IntKind::U8 - } -} - -/// Parses tokens from a `CXCursor_MacroDefinition` pointing into a function-like -/// macro, and calls the `func_macro` callback. -fn handle_function_macro( - cursor: &clang::Cursor, - callbacks: &dyn crate::callbacks::ParseCallbacks, -) { - let is_closing_paren = |t: &ClangToken| { - // Test cheap token kind before comparing exact spellings. - t.kind == clang_sys::CXToken_Punctuation && t.spelling() == b")" - }; - let tokens: Vec<_> = cursor.tokens().iter().collect(); - if let Some(boundary) = tokens.iter().position(is_closing_paren) { - let mut spelled = tokens.iter().map(ClangToken::spelling); - // Add 1, to convert index to length. - let left = spelled.by_ref().take(boundary + 1); - let left = left.collect::>().concat(); - if let Ok(left) = String::from_utf8(left) { - let right: Vec<_> = spelled.collect(); - callbacks.func_macro(&left, &right); - } - } -} - -impl ClangSubItemParser for Var { - fn parse( - cursor: clang::Cursor, - ctx: &mut BindgenContext, - ) -> Result, ParseError> { - use cexpr::expr::EvalResult; - use cexpr::literal::CChar; - use clang_sys::*; - match cursor.kind() { - CXCursor_MacroDefinition => { - for callbacks in &ctx.options().parse_callbacks { - match callbacks.will_parse_macro(&cursor.spelling()) { - MacroParsingBehavior::Ignore => { - return Err(ParseError::Continue); - } - MacroParsingBehavior::Default => {} - } - - if cursor.is_macro_function_like() { - handle_function_macro(&cursor, callbacks.as_ref()); - // We handled the macro, skip macro processing below. - return Err(ParseError::Continue); - } - } - - let value = parse_macro(ctx, &cursor); - - let Some((id, value)) = value else { - return Err(ParseError::Continue); - }; - - assert!(!id.is_empty(), "Empty macro name?"); - - let previously_defined = ctx.parsed_macro(&id); - - // NB: It's important to "note" the macro even if the result is - // not an integer, otherwise we might loose other kind of - // derived macros. - ctx.note_parsed_macro(id.clone(), value.clone()); - - if previously_defined { - let name = String::from_utf8(id).unwrap(); - duplicated_macro_diagnostic(&name, cursor.location(), ctx); - return Err(ParseError::Continue); - } - - // NOTE: Unwrapping, here and above, is safe, because the - // identifier of a token comes straight from clang, and we - // enforce utf8 there, so we should have already panicked at - // this point. - let name = String::from_utf8(id).unwrap(); - let (type_kind, val) = match value { - EvalResult::Invalid => return Err(ParseError::Continue), - EvalResult::Float(f) => { - (TypeKind::Float(FloatKind::Double), VarType::Float(f)) - } - EvalResult::Char(c) => { - let c = match c { - CChar::Char(c) => { - assert_eq!(c.len_utf8(), 1); - c as u8 - } - CChar::Raw(c) => u8::try_from(c).unwrap(), - }; - - (TypeKind::Int(IntKind::U8), VarType::Char(c)) - } - EvalResult::Str(val) => { - let char_ty = Item::builtin_type( - TypeKind::Int(IntKind::U8), - true, - ctx, - ); - for callbacks in &ctx.options().parse_callbacks { - callbacks.str_macro(&name, &val); - } - (TypeKind::Pointer(char_ty), VarType::String(val)) - } - EvalResult::Int(Wrapping(value)) => { - let kind = ctx - .options() - .last_callback(|c| c.int_macro(&name, value)) - .unwrap_or_else(|| { - default_macro_constant_type(ctx, value) - }); - - (TypeKind::Int(kind), VarType::Int(value)) - } - }; - - let ty = Item::builtin_type(type_kind, true, ctx); - - Ok(ParseResult::New( - Var::new(name, None, None, ty, Some(val), true), - Some(cursor), - )) - } - CXCursor_VarDecl => { - let mut name = cursor.spelling(); - if cursor.linkage() == CXLinkage_External { - if let Some(nm) = ctx.options().last_callback(|callbacks| { - callbacks.generated_name_override(ItemInfo { - name: name.as_str(), - kind: ItemKind::Var, - }) - }) { - name = nm; - } - } - // No more changes to name - let name = name; - - if name.is_empty() { - warn!("Empty constant name?"); - return Err(ParseError::Continue); - } - - let link_name = ctx.options().last_callback(|callbacks| { - callbacks.generated_link_name_override(ItemInfo { - name: name.as_str(), - kind: ItemKind::Var, - }) - }); - - let ty = cursor.cur_type(); - - // TODO(emilio): do we have to special-case constant arrays in - // some other places? - let is_const = ty.is_const() || - ([CXType_ConstantArray, CXType_IncompleteArray] - .contains(&ty.kind()) && - ty.elem_type() - .is_some_and(|element| element.is_const())); - - let ty = match Item::from_ty(&ty, cursor, None, ctx) { - Ok(ty) => ty, - Err(e) => { - assert!( - matches!(ty.kind(), CXType_Auto | CXType_Unexposed), - "Couldn't resolve constant type, and it \ - wasn't an nondeductible auto type or unexposed \ - type: {ty:?}" - ); - return Err(e); - } - }; - - // Note: Ty might not be totally resolved yet, see - // tests/headers/inner_const.hpp - // - // That's fine because in that case we know it's not a literal. - let canonical_ty = ctx - .safe_resolve_type(ty) - .and_then(|t| t.safe_canonical_type(ctx)); - - let is_integer = canonical_ty.is_some_and(|t| t.is_integer()); - let is_float = canonical_ty.is_some_and(|t| t.is_float()); - - // TODO: We could handle `char` more gracefully. - // TODO: Strings, though the lookup is a bit more hard (we need - // to look at the canonical type of the pointee too, and check - // is char, u8, or i8 I guess). - let value = if is_integer { - let TypeKind::Int(kind) = *canonical_ty.unwrap().kind() - else { - unreachable!() - }; - - let mut val = cursor.evaluate().and_then(|v| v.as_int()); - if val.is_none() || !kind.signedness_matches(val.unwrap()) { - val = get_integer_literal_from_cursor(&cursor); - } - - val.map(|val| { - if kind == IntKind::Bool { - VarType::Bool(val != 0) - } else { - VarType::Int(val) - } - }) - } else if is_float { - cursor - .evaluate() - .and_then(|v| v.as_double()) - .map(VarType::Float) - } else { - cursor - .evaluate() - .and_then(|v| v.as_literal_string()) - .map(VarType::String) - }; - - let mangling = cursor_mangling(ctx, &cursor); - let var = - Var::new(name, mangling, link_name, ty, value, is_const); - - Ok(ParseResult::New(var, Some(cursor))) - } - _ => { - /* TODO */ - Err(ParseError::Continue) - } - } - } -} - -/// This function uses a [`FallbackTranslationUnit`][clang::FallbackTranslationUnit] to parse each -/// macro that cannot be parsed by the normal bindgen process for `#define`s. -/// -/// To construct the [`FallbackTranslationUnit`][clang::FallbackTranslationUnit], first precompiled -/// headers are generated for all input headers. An empty temporary `.c` file is generated to pass -/// to the translation unit. On the evaluation of each macro, a [`String`] is generated with the -/// new contents of the empty file and passed in for reparsing. The precompiled headers and -/// preservation of the [`FallbackTranslationUnit`][clang::FallbackTranslationUnit] across macro -/// evaluations are both optimizations that have significantly improved the performance. -fn parse_macro_clang_fallback( - ctx: &mut BindgenContext, - cursor: &clang::Cursor, -) -> Option<(Vec, cexpr::expr::EvalResult)> { - if !ctx.options().clang_macro_fallback { - return None; - } - - let ftu = ctx.try_ensure_fallback_translation_unit()?; - let contents = format!("int main() {{ {}; }}", cursor.spelling()); - ftu.reparse(&contents).ok()?; - // Children of root node of AST - let root_children = ftu.translation_unit().cursor().collect_children(); - // Last child in root is function declaration - // Should be FunctionDecl - let main_func = root_children.last()?; - // Children should all be statements in function declaration - let all_stmts = main_func.collect_children(); - // First child in all_stmts should be the statement containing the macro to evaluate - // Should be CompoundStmt - let macro_stmt = all_stmts.first()?; - // Children should all be expressions from the compound statement - let paren_exprs = macro_stmt.collect_children(); - // First child in all_exprs is the expression utilizing the given macro to be evaluated - // Should be ParenExpr - let paren = paren_exprs.first()?; - - Some(( - cursor.spelling().into_bytes(), - cexpr::expr::EvalResult::Int(Wrapping(paren.evaluate()?.as_int()?)), - )) -} - -/// Try and parse a macro using all the macros parsed until now. -fn parse_macro( - ctx: &mut BindgenContext, - cursor: &clang::Cursor, -) -> Option<(Vec, cexpr::expr::EvalResult)> { - use cexpr::expr; - - let mut cexpr_tokens = cursor.cexpr_tokens(); - - for callbacks in &ctx.options().parse_callbacks { - callbacks.modify_macro(&cursor.spelling(), &mut cexpr_tokens); - } - - let parser = expr::IdentifierParser::new(ctx.parsed_macros()); - - match parser.macro_definition(&cexpr_tokens) { - Ok((_, (id, val))) => Some((id.into(), val)), - _ => parse_macro_clang_fallback(ctx, cursor), - } -} - -fn parse_int_literal_tokens(cursor: &clang::Cursor) -> Option { - use cexpr::expr; - use cexpr::expr::EvalResult; - - let cexpr_tokens = cursor.cexpr_tokens(); - - // TODO(emilio): We can try to parse other kinds of literals. - match expr::expr(&cexpr_tokens) { - Ok((_, EvalResult::Int(Wrapping(val)))) => Some(val), - _ => None, - } -} - -fn get_integer_literal_from_cursor(cursor: &clang::Cursor) -> Option { - use clang_sys::*; - let mut value = None; - cursor.visit(|c| { - match c.kind() { - CXCursor_IntegerLiteral | CXCursor_UnaryOperator => { - value = parse_int_literal_tokens(&c); - } - CXCursor_UnexposedExpr => { - value = get_integer_literal_from_cursor(&c); - } - _ => (), - } - if value.is_some() { - CXChildVisit_Break - } else { - CXChildVisit_Continue - } - }); - value -} - -fn duplicated_macro_diagnostic( - macro_name: &str, - _location: clang::SourceLocation, - _ctx: &BindgenContext, -) { - warn!("Duplicated macro definition: {macro_name}"); - - #[cfg(feature = "experimental")] - // FIXME (pvdrz & amanjeev): This diagnostic message shows way too often to be actually - // useful. We have to change the logic where this function is called to be able to emit this - // message only when the duplication is an actual issue. - // - // If I understood correctly, `bindgen` ignores all `#undef` directives. Meaning that this: - // ```c - // #define FOO 1 - // #undef FOO - // #define FOO 2 - // ``` - // - // Will trigger this message even though there's nothing wrong with it. - #[allow(clippy::overly_complex_bool_expr)] - if false && _ctx.options().emit_diagnostics { - use crate::diagnostics::{get_line, Diagnostic, Level, Slice}; - use std::borrow::Cow; - - let mut slice = Slice::default(); - let mut source = Cow::from(macro_name); - - let (file, line, col, _) = _location.location(); - if let Some(filename) = file.name() { - if let Ok(Some(code)) = get_line(&filename, line) { - source = code.into(); - } - slice.with_location(filename, line, col); - } - - slice.with_source(source); - - Diagnostic::default() - .with_title("Duplicated macro definition.", Level::Warning) - .add_slice(slice) - .add_annotation("This macro had a duplicate.", Level::Note) - .display(); - } -} diff --git a/vendor/bindgen/lib.rs b/vendor/bindgen/lib.rs deleted file mode 100644 index b2fecc2c3b0e29..00000000000000 --- a/vendor/bindgen/lib.rs +++ /dev/null @@ -1,1422 +0,0 @@ -//! Generate Rust bindings for C and C++ libraries. -//! -//! Provide a C/C++ header file, receive Rust FFI code to call into C/C++ -//! functions and use types defined in the header. -//! -//! See the [`Builder`](./struct.Builder.html) struct for usage. -//! -//! See the [Users Guide](https://rust-lang.github.io/rust-bindgen/) for -//! additional documentation. -#![deny(missing_docs)] -#![deny(unused_extern_crates)] -#![deny(clippy::disallowed_methods)] -// To avoid rather annoying warnings when matching with CXCursor_xxx as a -// constant. -#![allow(non_upper_case_globals)] -// `quote!` nests quite deeply. -#![recursion_limit = "128"] - -#[macro_use] -extern crate bitflags; -#[macro_use] -extern crate quote; - -#[cfg(feature = "logging")] -#[macro_use] -extern crate log; - -#[cfg(not(feature = "logging"))] -#[macro_use] -mod log_stubs; - -#[macro_use] -mod extra_assertions; - -mod codegen; -mod deps; -mod options; -mod time; - -pub mod callbacks; - -mod clang; -#[cfg(feature = "experimental")] -mod diagnostics; -mod features; -mod ir; -mod parse; -mod regex_set; - -pub use codegen::{ - AliasVariation, EnumVariation, MacroTypeVariation, NonCopyUnionStyle, -}; -pub use features::{RustEdition, RustTarget, LATEST_STABLE_RUST}; -pub use ir::annotations::FieldVisibilityKind; -pub use ir::function::Abi; -#[cfg(feature = "__cli")] -pub use options::cli::builder_from_flags; - -use codegen::CodegenError; -use features::RustFeatures; -use ir::comment; -use ir::context::{BindgenContext, ItemId}; -use ir::item::Item; -use options::BindgenOptions; -use parse::ParseError; - -use std::borrow::Cow; -use std::collections::hash_map::Entry; -use std::env; -use std::ffi::OsStr; -use std::fs::{File, OpenOptions}; -use std::io::{self, Write}; -use std::mem::size_of; -use std::path::{Path, PathBuf}; -use std::process::{Command, Stdio}; -use std::rc::Rc; -use std::str::FromStr; - -// Some convenient typedefs for a fast hash map and hash set. -type HashMap = rustc_hash::FxHashMap; -type HashSet = rustc_hash::FxHashSet; - -/// Default prefix for the anon fields. -pub const DEFAULT_ANON_FIELDS_PREFIX: &str = "__bindgen_anon_"; - -const DEFAULT_NON_EXTERN_FNS_SUFFIX: &str = "__extern"; - -fn file_is_cpp(name_file: &str) -> bool { - Path::new(name_file).extension().is_some_and(|ext| { - ext.eq_ignore_ascii_case("hpp") || - ext.eq_ignore_ascii_case("hxx") || - ext.eq_ignore_ascii_case("hh") || - ext.eq_ignore_ascii_case("h++") - }) -} - -fn args_are_cpp(clang_args: &[Box]) -> bool { - for w in clang_args.windows(2) { - if w[0].as_ref() == "-xc++" || w[1].as_ref() == "-xc++" { - return true; - } - if w[0].as_ref() == "-x" && w[1].as_ref() == "c++" { - return true; - } - if w[0].as_ref() == "-include" && file_is_cpp(w[1].as_ref()) { - return true; - } - } - false -} - -bitflags! { - /// A type used to indicate which kind of items we have to generate. - #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] - pub struct CodegenConfig: u32 { - /// Whether to generate functions. - const FUNCTIONS = 1 << 0; - /// Whether to generate types. - const TYPES = 1 << 1; - /// Whether to generate constants. - const VARS = 1 << 2; - /// Whether to generate methods. - const METHODS = 1 << 3; - /// Whether to generate constructors - const CONSTRUCTORS = 1 << 4; - /// Whether to generate destructors. - const DESTRUCTORS = 1 << 5; - } -} - -impl CodegenConfig { - /// Returns true if functions should be generated. - pub fn functions(self) -> bool { - self.contains(CodegenConfig::FUNCTIONS) - } - - /// Returns true if types should be generated. - pub fn types(self) -> bool { - self.contains(CodegenConfig::TYPES) - } - - /// Returns true if constants should be generated. - pub fn vars(self) -> bool { - self.contains(CodegenConfig::VARS) - } - - /// Returns true if methods should be generated. - pub fn methods(self) -> bool { - self.contains(CodegenConfig::METHODS) - } - - /// Returns true if constructors should be generated. - pub fn constructors(self) -> bool { - self.contains(CodegenConfig::CONSTRUCTORS) - } - - /// Returns true if destructors should be generated. - pub fn destructors(self) -> bool { - self.contains(CodegenConfig::DESTRUCTORS) - } -} - -impl Default for CodegenConfig { - fn default() -> Self { - CodegenConfig::all() - } -} - -/// Formatting tools that can be used to format the bindings -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[non_exhaustive] -pub enum Formatter { - /// Do not format the bindings. - None, - /// Use `rustfmt` to format the bindings. - Rustfmt, - #[cfg(feature = "prettyplease")] - /// Use `prettyplease` to format the bindings. - Prettyplease, -} - -impl Default for Formatter { - fn default() -> Self { - Self::Rustfmt - } -} - -impl FromStr for Formatter { - type Err = String; - - fn from_str(s: &str) -> Result { - match s { - "none" => Ok(Self::None), - "rustfmt" => Ok(Self::Rustfmt), - #[cfg(feature = "prettyplease")] - "prettyplease" => Ok(Self::Prettyplease), - _ => Err(format!("`{s}` is not a valid formatter")), - } - } -} - -impl std::fmt::Display for Formatter { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let s = match self { - Self::None => "none", - Self::Rustfmt => "rustfmt", - #[cfg(feature = "prettyplease")] - Self::Prettyplease => "prettyplease", - }; - - std::fmt::Display::fmt(&s, f) - } -} - -/// Configure and generate Rust bindings for a C/C++ header. -/// -/// This is the main entry point to the library. -/// -/// ```ignore -/// use bindgen::builder; -/// -/// // Configure and generate bindings. -/// let bindings = builder().header("path/to/input/header") -/// .allowlist_type("SomeCoolClass") -/// .allowlist_function("do_some_cool_thing") -/// .generate()?; -/// -/// // Write the generated bindings to an output file. -/// bindings.write_to_file("path/to/output.rs")?; -/// ``` -/// -/// # Enums -/// -/// Bindgen can map C/C++ enums into Rust in different ways. The way bindgen maps enums depends on -/// the pattern passed to several methods: -/// -/// 1. [`constified_enum_module()`](#method.constified_enum_module) -/// 2. [`bitfield_enum()`](#method.bitfield_enum) -/// 3. [`newtype_enum()`](#method.newtype_enum) -/// 4. [`rustified_enum()`](#method.rustified_enum) -/// 5. [`rustified_non_exhaustive_enum()`](#method.rustified_non_exhaustive_enum) -/// -/// For each C enum, bindgen tries to match the pattern in the following order: -/// -/// 1. Constified enum module -/// 2. Bitfield enum -/// 3. Newtype enum -/// 4. Rustified enum -/// -/// If none of the above patterns match, then bindgen will generate a set of Rust constants. -/// -/// # Clang arguments -/// -/// Extra arguments can be passed to with clang: -/// 1. [`clang_arg()`](#method.clang_arg): takes a single argument -/// 2. [`clang_args()`](#method.clang_args): takes an iterator of arguments -/// 3. `BINDGEN_EXTRA_CLANG_ARGS` environment variable: whitespace separate -/// environment variable of arguments -/// -/// Clang arguments specific to your crate should be added via the -/// `clang_arg()`/`clang_args()` methods. -/// -/// End-users of the crate may need to set the `BINDGEN_EXTRA_CLANG_ARGS` environment variable to -/// add additional arguments. For example, to build against a different sysroot a user could set -/// `BINDGEN_EXTRA_CLANG_ARGS` to `--sysroot=/path/to/sysroot`. -/// -/// # Regular expression arguments -/// -/// Some [`Builder`] methods, such as `allowlist_*` and `blocklist_*`, allow regular -/// expressions as arguments. These regular expressions will be enclosed in parentheses and -/// anchored with `^` and `$`. So, if the argument passed is ``, the regular expression to be -/// stored will be `^()$`. -/// -/// As a consequence, regular expressions passed to `bindgen` will try to match the whole name of -/// an item instead of a section of it, which means that to match any items with the prefix -/// `prefix`, the `prefix.*` regular expression must be used. -/// -/// Certain methods, like [`Builder::allowlist_function`], use regular expressions over function -/// names. To match C++ methods, prefix the name of the type where they belong, followed by an -/// underscore. So, if the type `Foo` has a method `bar`, it can be matched with the `Foo_bar` -/// regular expression. -/// -/// Additionally, Objective-C interfaces can be matched by prefixing the regular expression with -/// `I`. For example, the `IFoo` regular expression matches the `Foo` interface, and the `IFoo_foo` -/// regular expression matches the `foo` method of the `Foo` interface. -/// -/// Releases of `bindgen` with a version lesser or equal to `0.62.0` used to accept the wildcard -/// pattern `*` as a valid regular expression. This behavior has been deprecated, and the `.*` -/// regular expression must be used instead. -#[derive(Debug, Default, Clone)] -pub struct Builder { - options: BindgenOptions, -} - -/// Construct a new [`Builder`](./struct.Builder.html). -pub fn builder() -> Builder { - Default::default() -} - -fn get_extra_clang_args( - parse_callbacks: &[Rc], -) -> Vec { - // Add any extra arguments from the environment to the clang command line. - let extra_clang_args = match get_target_dependent_env_var( - parse_callbacks, - "BINDGEN_EXTRA_CLANG_ARGS", - ) { - None => return vec![], - Some(s) => s, - }; - - // Try to parse it with shell quoting. If we fail, make it one single big argument. - if let Some(strings) = shlex::split(&extra_clang_args) { - return strings; - } - vec![extra_clang_args] -} - -impl Builder { - /// Generate the Rust bindings using the options built up thus far. - pub fn generate(mut self) -> Result { - // Keep rust_features synced with rust_target - self.options.rust_features = match self.options.rust_edition { - Some(edition) => { - if !edition.is_available(self.options.rust_target) { - return Err(BindgenError::UnsupportedEdition( - edition, - self.options.rust_target, - )); - } - RustFeatures::new(self.options.rust_target, edition) - } - None => { - RustFeatures::new_with_latest_edition(self.options.rust_target) - } - }; - - // Add any extra arguments from the environment to the clang command line. - self.options.clang_args.extend( - get_extra_clang_args(&self.options.parse_callbacks) - .into_iter() - .map(String::into_boxed_str), - ); - - for header in &self.options.input_headers { - self.options - .for_each_callback(|cb| cb.header_file(header.as_ref())); - } - - // Transform input headers to arguments on the clang command line. - self.options.fallback_clang_args = self - .options - .clang_args - .iter() - .filter(|arg| { - !arg.starts_with("-MMD") && - !arg.starts_with("-MD") && - !arg.starts_with("--write-user-dependencies") && - !arg.starts_with("--user-dependencies") - }) - .cloned() - .collect::>(); - self.options.clang_args.extend( - self.options.input_headers - [..self.options.input_headers.len().saturating_sub(1)] - .iter() - .flat_map(|header| ["-include".into(), header.clone()]), - ); - - let input_unsaved_files = - std::mem::take(&mut self.options.input_header_contents) - .into_iter() - .map(|(name, contents)| { - clang::UnsavedFile::new(name.as_ref(), contents.as_ref()) - }) - .collect::>(); - - Bindings::generate(self.options, &input_unsaved_files) - } - - /// Preprocess and dump the input header files to disk. - /// - /// This is useful when debugging bindgen, using C-Reduce, or when filing - /// issues. The resulting file will be named something like `__bindgen.i` or - /// `__bindgen.ii` - pub fn dump_preprocessed_input(&self) -> io::Result<()> { - let clang = - clang_sys::support::Clang::find(None, &[]).ok_or_else(|| { - io::Error::new( - io::ErrorKind::Other, - "Cannot find clang executable", - ) - })?; - - // The contents of a wrapper file that includes all the input header - // files. - let mut wrapper_contents = String::new(); - - // Whether we are working with C or C++ inputs. - let mut is_cpp = args_are_cpp(&self.options.clang_args); - - // For each input header, add `#include "$header"`. - for header in &self.options.input_headers { - is_cpp |= file_is_cpp(header); - - wrapper_contents.push_str("#include \""); - wrapper_contents.push_str(header); - wrapper_contents.push_str("\"\n"); - } - - // For each input header content, add a prefix line of `#line 0 "$name"` - // followed by the contents. - for (name, contents) in &self.options.input_header_contents { - is_cpp |= file_is_cpp(name); - - wrapper_contents.push_str("#line 0 \""); - wrapper_contents.push_str(name); - wrapper_contents.push_str("\"\n"); - wrapper_contents.push_str(contents); - } - - let wrapper_path = PathBuf::from(if is_cpp { - "__bindgen.cpp" - } else { - "__bindgen.c" - }); - - { - let mut wrapper_file = File::create(&wrapper_path)?; - wrapper_file.write_all(wrapper_contents.as_bytes())?; - } - - let mut cmd = Command::new(clang.path); - cmd.arg("-save-temps") - .arg("-E") - .arg("-C") - .arg("-c") - .arg(&wrapper_path) - .stdout(Stdio::piped()); - - for a in &self.options.clang_args { - cmd.arg(a.as_ref()); - } - - for a in get_extra_clang_args(&self.options.parse_callbacks) { - cmd.arg(a); - } - - let mut child = cmd.spawn()?; - - let mut preprocessed = child.stdout.take().unwrap(); - let mut file = File::create(if is_cpp { - "__bindgen.ii" - } else { - "__bindgen.i" - })?; - io::copy(&mut preprocessed, &mut file)?; - - if child.wait()?.success() { - Ok(()) - } else { - Err(io::Error::new( - io::ErrorKind::Other, - "clang exited with non-zero status", - )) - } - } -} - -impl BindgenOptions { - fn build(&mut self) { - const REGEX_SETS_LEN: usize = 29; - - let regex_sets: [_; REGEX_SETS_LEN] = [ - &mut self.blocklisted_types, - &mut self.blocklisted_functions, - &mut self.blocklisted_items, - &mut self.blocklisted_files, - &mut self.blocklisted_vars, - &mut self.opaque_types, - &mut self.allowlisted_vars, - &mut self.allowlisted_types, - &mut self.allowlisted_functions, - &mut self.allowlisted_files, - &mut self.allowlisted_items, - &mut self.bitfield_enums, - &mut self.constified_enums, - &mut self.constified_enum_modules, - &mut self.newtype_enums, - &mut self.newtype_global_enums, - &mut self.rustified_enums, - &mut self.rustified_non_exhaustive_enums, - &mut self.type_alias, - &mut self.new_type_alias, - &mut self.new_type_alias_deref, - &mut self.bindgen_wrapper_union, - &mut self.manually_drop_union, - &mut self.no_partialeq_types, - &mut self.no_copy_types, - &mut self.no_debug_types, - &mut self.no_default_types, - &mut self.no_hash_types, - &mut self.must_use_types, - ]; - - let record_matches = self.record_matches; - #[cfg(feature = "experimental")] - { - let sets_len = REGEX_SETS_LEN + self.abi_overrides.len(); - let names = if self.emit_diagnostics { - <[&str; REGEX_SETS_LEN]>::into_iter([ - "--blocklist-type", - "--blocklist-function", - "--blocklist-item", - "--blocklist-file", - "--blocklist-var", - "--opaque-type", - "--allowlist-type", - "--allowlist-function", - "--allowlist-var", - "--allowlist-file", - "--allowlist-item", - "--bitfield-enum", - "--newtype-enum", - "--newtype-global-enum", - "--rustified-enum", - "--rustified-enum-non-exhaustive", - "--constified-enum-module", - "--constified-enum", - "--type-alias", - "--new-type-alias", - "--new-type-alias-deref", - "--bindgen-wrapper-union", - "--manually-drop-union", - "--no-partialeq", - "--no-copy", - "--no-debug", - "--no-default", - "--no-hash", - "--must-use", - ]) - .chain((0..self.abi_overrides.len()).map(|_| "--override-abi")) - .map(Some) - .collect() - } else { - vec![None; sets_len] - }; - - for (regex_set, name) in - self.abi_overrides.values_mut().chain(regex_sets).zip(names) - { - regex_set.build_with_diagnostics(record_matches, name); - } - } - #[cfg(not(feature = "experimental"))] - for regex_set in self.abi_overrides.values_mut().chain(regex_sets) { - regex_set.build(record_matches); - } - } - - /// Update rust target version - pub fn set_rust_target(&mut self, rust_target: RustTarget) { - self.rust_target = rust_target; - } - - /// Get features supported by target Rust version - pub fn rust_features(&self) -> RustFeatures { - self.rust_features - } - - fn last_callback( - &self, - f: impl Fn(&dyn callbacks::ParseCallbacks) -> Option, - ) -> Option { - self.parse_callbacks - .iter() - .filter_map(|cb| f(cb.as_ref())) - .next_back() - } - - fn all_callbacks( - &self, - f: impl Fn(&dyn callbacks::ParseCallbacks) -> Vec, - ) -> Vec { - self.parse_callbacks - .iter() - .flat_map(|cb| f(cb.as_ref())) - .collect() - } - - fn for_each_callback(&self, f: impl Fn(&dyn callbacks::ParseCallbacks)) { - self.parse_callbacks.iter().for_each(|cb| f(cb.as_ref())); - } - - fn process_comment(&self, comment: &str) -> String { - let comment = comment::preprocess(comment); - self.last_callback(|cb| cb.process_comment(&comment)) - .unwrap_or(comment) - } -} - -#[cfg(feature = "runtime")] -fn ensure_libclang_is_loaded() { - use std::sync::{Arc, OnceLock}; - - if clang_sys::is_loaded() { - return; - } - - // XXX (issue #350): Ensure that our dynamically loaded `libclang` - // doesn't get dropped prematurely, nor is loaded multiple times - // across different threads. - - static LIBCLANG: OnceLock> = OnceLock::new(); - let libclang = LIBCLANG.get_or_init(|| { - clang_sys::load().expect("Unable to find libclang"); - clang_sys::get_library() - .expect("We just loaded libclang and it had better still be here!") - }); - - clang_sys::set_library(Some(libclang.clone())); -} - -#[cfg(not(feature = "runtime"))] -fn ensure_libclang_is_loaded() {} - -/// Error type for rust-bindgen. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum BindgenError { - /// The header was a folder. - FolderAsHeader(PathBuf), - /// Permissions to read the header is insufficient. - InsufficientPermissions(PathBuf), - /// The header does not exist. - NotExist(PathBuf), - /// Clang diagnosed an error. - ClangDiagnostic(String), - /// Code generation reported an error. - Codegen(CodegenError), - /// The passed edition is not available on that Rust target. - UnsupportedEdition(RustEdition, RustTarget), -} - -impl std::fmt::Display for BindgenError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - BindgenError::FolderAsHeader(h) => { - write!(f, "'{}' is a folder", h.display()) - } - BindgenError::InsufficientPermissions(h) => { - write!(f, "insufficient permissions to read '{}'", h.display()) - } - BindgenError::NotExist(h) => { - write!(f, "header '{}' does not exist.", h.display()) - } - BindgenError::ClangDiagnostic(message) => { - write!(f, "clang diagnosed error: {message}") - } - BindgenError::Codegen(err) => { - write!(f, "codegen error: {err}") - } - BindgenError::UnsupportedEdition(edition, target) => { - write!(f, "edition {edition} is not available on Rust {target}") - } - } - } -} - -impl std::error::Error for BindgenError {} - -/// Generated Rust bindings. -#[derive(Debug)] -pub struct Bindings { - options: BindgenOptions, - module: proc_macro2::TokenStream, -} - -pub(crate) const HOST_TARGET: &str = - include_str!(concat!(env!("OUT_DIR"), "/host-target.txt")); - -// Some architecture triplets are different between rust and libclang, see #1211 -// and duplicates. -fn rust_to_clang_target(rust_target: &str) -> Box { - const TRIPLE_HYPHENS_MESSAGE: &str = "Target triple should contain hyphens"; - - let mut triple: Vec<&str> = rust_target.split_terminator('-').collect(); - - assert!(!triple.is_empty(), "{}", TRIPLE_HYPHENS_MESSAGE); - triple.resize(4, ""); - - // RISC-V - if triple[0].starts_with("riscv32") { - triple[0] = "riscv32"; - } else if triple[0].starts_with("riscv64") { - triple[0] = "riscv64"; - } - - // Apple - if triple[1] == "apple" { - if triple[0] == "aarch64" { - triple[0] = "arm64"; - } - if triple[3] == "sim" { - triple[3] = "simulator"; - } - } - - // ESP-IDF - if triple[2] == "espidf" { - triple[2] = "elf"; - } - - triple - .iter() - .skip(1) - .fold(triple[0].to_string(), |triple, part| { - if part.is_empty() { - triple - } else { - triple + "-" + part - } - }) - .into() -} - -/// Returns the effective target, and whether it was explicitly specified on the -/// clang flags. -fn find_effective_target(clang_args: &[Box]) -> (Box, bool) { - let mut args = clang_args.iter(); - while let Some(opt) = args.next() { - if opt.starts_with("--target=") { - let mut split = opt.split('='); - split.next(); - return (split.next().unwrap().into(), true); - } - - if opt.as_ref() == "-target" { - if let Some(target) = args.next() { - return (target.clone(), true); - } - } - } - - // If we're running from a build script, try to find the cargo target. - if let Ok(t) = env::var("TARGET") { - return (rust_to_clang_target(&t), false); - } - - (rust_to_clang_target(HOST_TARGET), false) -} - -impl Bindings { - /// Generate bindings for the given options. - pub(crate) fn generate( - mut options: BindgenOptions, - input_unsaved_files: &[clang::UnsavedFile], - ) -> Result { - ensure_libclang_is_loaded(); - - #[cfg(feature = "runtime")] - match clang_sys::get_library().unwrap().version() { - None => { - warn!("Could not detect a Clang version, make sure you are using libclang 9 or newer"); - } - Some(version) => { - if version < clang_sys::Version::V9_0 { - warn!("Detected Clang version {version:?} which is unsupported and can cause invalid code generation, use libclang 9 or newer"); - } - } - } - - #[cfg(feature = "runtime")] - debug!( - "Generating bindings, libclang at {}", - clang_sys::get_library().unwrap().path().display() - ); - #[cfg(not(feature = "runtime"))] - debug!("Generating bindings, libclang linked"); - - options.build(); - - let (effective_target, explicit_target) = - find_effective_target(&options.clang_args); - - let is_host_build = - rust_to_clang_target(HOST_TARGET) == effective_target; - - // NOTE: The is_host_build check wouldn't be sound normally in some - // cases if we were to call a binary (if you have a 32-bit clang and are - // building on a 64-bit system for example). But since we rely on - // opening libclang.so, it has to be the same architecture and thus the - // check is fine. - if !explicit_target && !is_host_build { - options.clang_args.insert( - 0, - format!("--target={effective_target}").into_boxed_str(), - ); - } - - fn detect_include_paths(options: &mut BindgenOptions) { - if !options.detect_include_paths { - return; - } - - // Filter out include paths and similar stuff, so we don't incorrectly - // promote them to `-isystem`. - let clang_args_for_clang_sys = { - let mut last_was_include_prefix = false; - options - .clang_args - .iter() - .filter(|arg| { - if last_was_include_prefix { - last_was_include_prefix = false; - return false; - } - - let arg = arg.as_ref(); - - // https://clang.llvm.org/docs/ClangCommandLineReference.html - // -isystem and -isystem-after are harmless. - if arg == "-I" || arg == "--include-directory" { - last_was_include_prefix = true; - return false; - } - - if arg.starts_with("-I") || - arg.starts_with("--include-directory=") - { - return false; - } - - true - }) - .map(|arg| arg.clone().into()) - .collect::>() - }; - - debug!( - "Trying to find clang with flags: {clang_args_for_clang_sys:?}" - ); - - let clang = match clang_sys::support::Clang::find( - None, - &clang_args_for_clang_sys, - ) { - None => return, - Some(clang) => clang, - }; - - debug!("Found clang: {clang:?}"); - - // Whether we are working with C or C++ inputs. - let is_cpp = args_are_cpp(&options.clang_args) || - options.input_headers.iter().any(|h| file_is_cpp(h)); - - let search_paths = if is_cpp { - clang.cpp_search_paths - } else { - clang.c_search_paths - }; - - if let Some(search_paths) = search_paths { - for path in search_paths { - if let Ok(path) = path.into_os_string().into_string() { - options.clang_args.push("-isystem".into()); - options.clang_args.push(path.into_boxed_str()); - } - } - } - } - - detect_include_paths(&mut options); - - #[cfg(unix)] - fn can_read(perms: &std::fs::Permissions) -> bool { - use std::os::unix::fs::PermissionsExt; - perms.mode() & 0o444 > 0 - } - - #[cfg(not(unix))] - fn can_read(_: &std::fs::Permissions) -> bool { - true - } - - if let Some(h) = options.input_headers.last() { - let path = Path::new(h.as_ref()); - if let Ok(md) = std::fs::metadata(path) { - if md.is_dir() { - return Err(BindgenError::FolderAsHeader(path.into())); - } - if !can_read(&md.permissions()) { - return Err(BindgenError::InsufficientPermissions( - path.into(), - )); - } - options.clang_args.push(h.clone()); - } else { - return Err(BindgenError::NotExist(path.into())); - } - } - - for (idx, f) in input_unsaved_files.iter().enumerate() { - if idx != 0 || !options.input_headers.is_empty() { - options.clang_args.push("-include".into()); - } - options.clang_args.push(f.name.to_str().unwrap().into()); - } - - debug!("Fixed-up options: {options:?}"); - - let time_phases = options.time_phases; - let mut context = BindgenContext::new(options, input_unsaved_files); - - if is_host_build { - debug_assert_eq!( - context.target_pointer_size(), - size_of::<*mut ()>(), - "{effective_target:?} {HOST_TARGET:?}" - ); - } - - { - let _t = time::Timer::new("parse").with_output(time_phases); - parse(&mut context)?; - } - - let (module, options) = - codegen::codegen(context).map_err(BindgenError::Codegen)?; - - Ok(Bindings { options, module }) - } - - /// Write these bindings as source text to a file. - pub fn write_to_file>(&self, path: P) -> io::Result<()> { - let file = OpenOptions::new() - .write(true) - .truncate(true) - .create(true) - .open(path.as_ref())?; - self.write(Box::new(file))?; - Ok(()) - } - - /// Write these bindings as source text to the given `Write`able. - pub fn write<'a>(&self, mut writer: Box) -> io::Result<()> { - const NL: &str = if cfg!(windows) { "\r\n" } else { "\n" }; - - if !self.options.disable_header_comment { - let version = - option_env!("CARGO_PKG_VERSION").unwrap_or("(unknown version)"); - write!( - writer, - "/* automatically generated by rust-bindgen {version} */{NL}{NL}", - )?; - } - - for line in &self.options.raw_lines { - writer.write_all(line.as_bytes())?; - writer.write_all(NL.as_bytes())?; - } - - if !self.options.raw_lines.is_empty() { - writer.write_all(NL.as_bytes())?; - } - - match self.format_tokens(&self.module) { - Ok(formatted_bindings) => { - writer.write_all(formatted_bindings.as_bytes())?; - } - Err(err) => { - eprintln!( - "Failed to run rustfmt: {err} (non-fatal, continuing)" - ); - writer.write_all(self.module.to_string().as_bytes())?; - } - } - Ok(()) - } - - /// Gets the rustfmt path to rustfmt the generated bindings. - fn rustfmt_path(&self) -> io::Result> { - debug_assert!(matches!(self.options.formatter, Formatter::Rustfmt)); - if let Some(ref p) = self.options.rustfmt_path { - return Ok(Cow::Borrowed(p)); - } - if let Ok(rustfmt) = env::var("RUSTFMT") { - return Ok(Cow::Owned(rustfmt.into())); - } - // No rustfmt binary was specified, so assume that the binary is called - // "rustfmt" and that it is in the user's PATH. - Ok(Cow::Owned("rustfmt".into())) - } - - /// Formats a token stream with the formatter set up in `BindgenOptions`. - fn format_tokens( - &self, - tokens: &proc_macro2::TokenStream, - ) -> io::Result { - let _t = time::Timer::new("rustfmt_generated_string") - .with_output(self.options.time_phases); - - match self.options.formatter { - Formatter::None => return Ok(tokens.to_string()), - #[cfg(feature = "prettyplease")] - Formatter::Prettyplease => { - return Ok(prettyplease::unparse(&syn::parse_quote!(#tokens))); - } - Formatter::Rustfmt => (), - } - - let rustfmt = self.rustfmt_path()?; - let mut cmd = Command::new(&*rustfmt); - - cmd.stdin(Stdio::piped()).stdout(Stdio::piped()); - - if let Some(path) = self - .options - .rustfmt_configuration_file - .as_ref() - .and_then(|f| f.to_str()) - { - cmd.args(["--config-path", path]); - } - - let edition = self - .options - .rust_edition - .unwrap_or_else(|| self.options.rust_target.latest_edition()); - cmd.args(["--edition", &format!("{edition}")]); - - let mut child = cmd.spawn()?; - let mut child_stdin = child.stdin.take().unwrap(); - let mut child_stdout = child.stdout.take().unwrap(); - - let source = tokens.to_string(); - - // Write to stdin in a new thread, so that we can read from stdout on this - // thread. This keeps the child from blocking on writing to its stdout which - // might block us from writing to its stdin. - let stdin_handle = ::std::thread::spawn(move || { - let _ = child_stdin.write_all(source.as_bytes()); - source - }); - - let mut output = vec![]; - io::copy(&mut child_stdout, &mut output)?; - - let status = child.wait()?; - let source = stdin_handle.join().expect( - "The thread writing to rustfmt's stdin doesn't do \ - anything that could panic", - ); - - match String::from_utf8(output) { - Ok(bindings) => match status.code() { - Some(0) => Ok(bindings), - Some(2) => Err(io::Error::new( - io::ErrorKind::Other, - "Rustfmt parsing errors.".to_string(), - )), - Some(3) => { - rustfmt_non_fatal_error_diagnostic( - "Rustfmt could not format some lines", - &self.options, - ); - Ok(bindings) - } - _ => Err(io::Error::new( - io::ErrorKind::Other, - "Internal rustfmt error".to_string(), - )), - }, - _ => Ok(source), - } - } -} - -fn rustfmt_non_fatal_error_diagnostic(msg: &str, _options: &BindgenOptions) { - warn!("{msg}"); - - #[cfg(feature = "experimental")] - if _options.emit_diagnostics { - use crate::diagnostics::{Diagnostic, Level}; - - Diagnostic::default() - .with_title(msg, Level::Warning) - .add_annotation( - "The bindings will be generated but not formatted.", - Level::Note, - ) - .display(); - } -} - -impl std::fmt::Display for Bindings { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut bytes = vec![]; - self.write(Box::new(&mut bytes) as Box) - .expect("writing to a vec cannot fail"); - f.write_str( - std::str::from_utf8(&bytes) - .expect("we should only write bindings that are valid utf-8"), - ) - } -} - -/// Determines whether the given cursor is in any of the files matched by the -/// options. -fn filter_builtins(ctx: &BindgenContext, cursor: &clang::Cursor) -> bool { - ctx.options().builtins || !cursor.is_builtin() -} - -/// Parse one `Item` from the Clang cursor. -fn parse_one( - ctx: &mut BindgenContext, - cursor: clang::Cursor, - parent: Option, -) { - if !filter_builtins(ctx, &cursor) { - return; - } - - match Item::parse(cursor, parent, ctx) { - Ok(..) => {} - Err(ParseError::Continue) => {} - Err(ParseError::Recurse) => { - cursor - .visit_sorted(ctx, |ctx, child| parse_one(ctx, child, parent)); - } - } -} - -/// Parse the Clang AST into our `Item` internal representation. -fn parse(context: &mut BindgenContext) -> Result<(), BindgenError> { - use clang_sys::*; - - let mut error = None; - for d in &context.translation_unit().diags() { - let msg = d.format(); - let is_err = d.severity() >= CXDiagnostic_Error; - if is_err { - let error = error.get_or_insert_with(String::new); - error.push_str(&msg); - error.push('\n'); - } else { - eprintln!("clang diag: {msg}"); - } - } - - if let Some(message) = error { - return Err(BindgenError::ClangDiagnostic(message)); - } - - let cursor = context.translation_unit().cursor(); - - if context.options().emit_ast { - fn dump_if_not_builtin(cur: &clang::Cursor) -> CXChildVisitResult { - if cur.is_builtin() { - CXChildVisit_Continue - } else { - clang::ast_dump(cur, 0) - } - } - cursor.visit(|cur| dump_if_not_builtin(&cur)); - } - - let root = context.root_module(); - context.with_module(root, |ctx| { - cursor.visit_sorted(ctx, |ctx, child| parse_one(ctx, child, None)); - }); - - assert_eq!( - context.current_module(), - context.root_module(), - "How did this happen?" - ); - Ok(()) -} - -/// Extracted Clang version data -#[derive(Debug)] -pub struct ClangVersion { - /// Major and minor semver, if parsing was successful - pub parsed: Option<(u32, u32)>, - /// full version string - pub full: String, -} - -/// Get the major and the minor semver numbers of Clang's version -pub fn clang_version() -> ClangVersion { - ensure_libclang_is_loaded(); - - //Debian clang version 11.0.1-2 - let raw_v: String = clang::extract_clang_version(); - let split_v: Option> = raw_v - .split_whitespace() - .find(|t| t.chars().next().is_some_and(|v| v.is_ascii_digit())) - .map(|v| v.split('.').collect()); - if let Some(v) = split_v { - if v.len() >= 2 { - let maybe_major = v[0].parse::(); - let maybe_minor = v[1].parse::(); - if let (Ok(major), Ok(minor)) = (maybe_major, maybe_minor) { - return ClangVersion { - parsed: Some((major, minor)), - full: raw_v.clone(), - }; - } - } - } - ClangVersion { - parsed: None, - full: raw_v.clone(), - } -} - -fn env_var + AsRef>( - parse_callbacks: &[Rc], - key: K, -) -> Result { - for callback in parse_callbacks { - callback.read_env_var(key.as_ref()); - } - env::var(key) -} - -/// Looks for the env var `var_${TARGET}`, and falls back to just `var` when it is not found. -fn get_target_dependent_env_var( - parse_callbacks: &[Rc], - var: &str, -) -> Option { - if let Ok(target) = env_var(parse_callbacks, "TARGET") { - if let Ok(v) = env_var(parse_callbacks, format!("{var}_{target}")) { - return Some(v); - } - if let Ok(v) = env_var( - parse_callbacks, - format!("{var}_{}", target.replace('-', "_")), - ) { - return Some(v); - } - } - - env_var(parse_callbacks, var).ok() -} - -/// A `ParseCallbacks` implementation that will act on file includes by echoing a rerun-if-changed -/// line and on env variable usage by echoing a rerun-if-env-changed line -/// -/// When running inside a `build.rs` script, this can be used to make cargo invalidate the -/// generated bindings whenever any of the files included from the header change: -/// ``` -/// use bindgen::builder; -/// let bindings = builder() -/// .header("path/to/input/header") -/// .parse_callbacks(Box::new(bindgen::CargoCallbacks::new())) -/// .generate(); -/// ``` -#[derive(Debug)] -pub struct CargoCallbacks { - rerun_on_header_files: bool, -} - -/// Create a new `CargoCallbacks` value with [`CargoCallbacks::rerun_on_header_files`] disabled. -/// -/// This constructor has been deprecated in favor of [`CargoCallbacks::new`] where -/// [`CargoCallbacks::rerun_on_header_files`] is enabled by default. -#[deprecated = "Use `CargoCallbacks::new()` instead. Please, check the documentation for further information."] -pub const CargoCallbacks: CargoCallbacks = CargoCallbacks { - rerun_on_header_files: false, -}; - -impl CargoCallbacks { - /// Create a new `CargoCallbacks` value. - pub fn new() -> Self { - Self { - rerun_on_header_files: true, - } - } - - /// Whether Cargo should re-run the build script if any of the input header files has changed. - /// - /// This option is enabled by default unless the deprecated [`const@CargoCallbacks`] - /// constructor is used. - pub fn rerun_on_header_files(mut self, doit: bool) -> Self { - self.rerun_on_header_files = doit; - self - } -} - -impl Default for CargoCallbacks { - fn default() -> Self { - Self::new() - } -} - -impl callbacks::ParseCallbacks for CargoCallbacks { - fn header_file(&self, filename: &str) { - if self.rerun_on_header_files { - println!("cargo:rerun-if-changed={filename}"); - } - } - - fn include_file(&self, filename: &str) { - println!("cargo:rerun-if-changed={filename}"); - } - - fn read_env_var(&self, key: &str) { - println!("cargo:rerun-if-env-changed={key}"); - } -} - -/// Test `command_line_flag` function. -#[test] -fn commandline_flag_unit_test_function() { - //Test 1 - let bindings = builder(); - let command_line_flags = bindings.command_line_flags(); - - let test_cases = [ - "--rust-target", - "--no-derive-default", - "--generate", - "functions,types,vars,methods,constructors,destructors", - ] - .iter() - .map(|&x| x.into()) - .collect::>(); - - assert!(test_cases.iter().all(|x| command_line_flags.contains(x))); - - //Test 2 - let bindings = builder() - .header("input_header") - .allowlist_type("Distinct_Type") - .allowlist_function("safe_function"); - - let command_line_flags = bindings.command_line_flags(); - let test_cases = [ - "--rust-target", - "input_header", - "--no-derive-default", - "--generate", - "functions,types,vars,methods,constructors,destructors", - "--allowlist-type", - "Distinct_Type", - "--allowlist-function", - "safe_function", - ] - .iter() - .map(|&x| x.into()) - .collect::>(); - println!("{command_line_flags:?}"); - - assert!(test_cases.iter().all(|x| command_line_flags.contains(x))); -} - -#[test] -fn test_rust_to_clang_target() { - assert_eq!( - rust_to_clang_target("aarch64-apple-ios").as_ref(), - "arm64-apple-ios" - ); -} - -#[test] -fn test_rust_to_clang_target_riscv() { - assert_eq!( - rust_to_clang_target("riscv64gc-unknown-linux-gnu").as_ref(), - "riscv64-unknown-linux-gnu" - ); - assert_eq!( - rust_to_clang_target("riscv64imac-unknown-none-elf").as_ref(), - "riscv64-unknown-none-elf" - ); - assert_eq!( - rust_to_clang_target("riscv32imc-unknown-none-elf").as_ref(), - "riscv32-unknown-none-elf" - ); - assert_eq!( - rust_to_clang_target("riscv32imac-unknown-none-elf").as_ref(), - "riscv32-unknown-none-elf" - ); - assert_eq!( - rust_to_clang_target("riscv32imafc-unknown-none-elf").as_ref(), - "riscv32-unknown-none-elf" - ); - assert_eq!( - rust_to_clang_target("riscv32i-unknown-none-elf").as_ref(), - "riscv32-unknown-none-elf" - ); -} - -#[test] -fn test_rust_to_clang_target_espidf() { - assert_eq!( - rust_to_clang_target("riscv32imc-esp-espidf").as_ref(), - "riscv32-esp-elf" - ); - assert_eq!( - rust_to_clang_target("xtensa-esp32-espidf").as_ref(), - "xtensa-esp32-elf" - ); -} - -#[test] -fn test_rust_to_clang_target_simulator() { - assert_eq!( - rust_to_clang_target("aarch64-apple-ios-sim").as_ref(), - "arm64-apple-ios-simulator" - ); - assert_eq!( - rust_to_clang_target("aarch64-apple-tvos-sim").as_ref(), - "arm64-apple-tvos-simulator" - ); - assert_eq!( - rust_to_clang_target("aarch64-apple-watchos-sim").as_ref(), - "arm64-apple-watchos-simulator" - ); -} diff --git a/vendor/bindgen/log_stubs.rs b/vendor/bindgen/log_stubs.rs deleted file mode 100644 index 51d2f81fd1346b..00000000000000 --- a/vendor/bindgen/log_stubs.rs +++ /dev/null @@ -1,38 +0,0 @@ -#![allow(unused)] - -#[clippy::format_args] -macro_rules! log { - (target: $target:expr, $lvl:expr, $($arg:tt)+) => {{ - let _ = $target; - let _ = log!($lvl, $($arg)+); - }}; - ($lvl:expr, $($arg:tt)+) => {{ - let _ = $lvl; - let _ = format_args!($($arg)+); - }}; -} -#[clippy::format_args] -macro_rules! error { - (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; - ($($arg:tt)+) => { log!("", $($arg)+) }; -} -#[clippy::format_args] -macro_rules! warn { - (target: $target:expr, $($arg:tt)*) => { log!(target: $target, "", $($arg)*) }; - ($($arg:tt)*) => { log!("", $($arg)*) }; -} -#[clippy::format_args] -macro_rules! info { - (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; - ($($arg:tt)+) => { log!("", $($arg)+) }; -} -#[clippy::format_args] -macro_rules! debug { - (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; - ($($arg:tt)+) => { log!("", $($arg)+) }; -} -#[clippy::format_args] -macro_rules! trace { - (target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) }; - ($($arg:tt)+) => { log!("", $($arg)+) }; -} diff --git a/vendor/bindgen/options/as_args.rs b/vendor/bindgen/options/as_args.rs deleted file mode 100644 index 83103fdaf48a40..00000000000000 --- a/vendor/bindgen/options/as_args.rs +++ /dev/null @@ -1,52 +0,0 @@ -use std::path::PathBuf; - -use crate::regex_set::RegexSet; - -/// Trait used to turn [`crate::BindgenOptions`] fields into CLI args. -pub(super) trait AsArgs { - fn as_args(&self, args: &mut Vec, flag: &str); -} - -/// If the `bool` is `true`, `flag` is pushed into `args`. -/// -/// be careful about the truth value of the field as some options, like `--no-layout-tests`, are -/// actually negations of the fields. -impl AsArgs for bool { - fn as_args(&self, args: &mut Vec, flag: &str) { - if *self { - args.push(flag.to_string()); - } - } -} - -/// Iterate over all the items of the `RegexSet` and push `flag` followed by the item into `args` -/// for each item. -impl AsArgs for RegexSet { - fn as_args(&self, args: &mut Vec, flag: &str) { - for item in self.get_items() { - args.extend_from_slice(&[flag.to_owned(), item.clone().into()]); - } - } -} - -/// If the `Option` is `Some(value)`, push `flag` followed by `value`. -impl AsArgs for Option { - fn as_args(&self, args: &mut Vec, flag: &str) { - if let Some(string) = self { - args.extend_from_slice(&[flag.to_owned(), string.clone()]); - } - } -} - -/// If the `Option` is `Some(path)`, push `flag` followed by the [`std::path::Path::display`] -/// representation of `path`. -impl AsArgs for Option { - fn as_args(&self, args: &mut Vec, flag: &str) { - if let Some(path) = self { - args.extend_from_slice(&[ - flag.to_owned(), - path.display().to_string(), - ]); - } - } -} diff --git a/vendor/bindgen/options/cli.rs b/vendor/bindgen/options/cli.rs deleted file mode 100644 index bce7faed35263d..00000000000000 --- a/vendor/bindgen/options/cli.rs +++ /dev/null @@ -1,1151 +0,0 @@ -#![allow(unused_qualifications)] // Clap somehow generates a lot of these - -use crate::{ - builder, - callbacks::{ - AttributeInfo, DeriveInfo, ItemInfo, ParseCallbacks, TypeKind, - }, - features::{RustEdition, EARLIEST_STABLE_RUST}, - regex_set::RegexSet, - Abi, AliasVariation, Builder, CodegenConfig, EnumVariation, - FieldVisibilityKind, Formatter, MacroTypeVariation, NonCopyUnionStyle, - RustTarget, -}; -use clap::{ - error::{Error, ErrorKind}, - CommandFactory, Parser, -}; -use proc_macro2::TokenStream; -use std::io; -use std::path::{Path, PathBuf}; -use std::str::FromStr; -use std::{fs::File, process::exit}; - -fn rust_target_help() -> String { - format!( - "Version of the Rust compiler to target. Any Rust version after {EARLIEST_STABLE_RUST} is supported. Defaults to {}.", - RustTarget::default() - ) -} - -fn rust_edition_help() -> String { - format!("Rust edition to target. Defaults to the latest edition supported by the chosen Rust target. Possible values: ({}). ", RustEdition::ALL.map(|e| e.to_string()).join("|")) -} - -fn parse_codegen_config( - what_to_generate: &str, -) -> Result { - let mut config = CodegenConfig::empty(); - for what in what_to_generate.split(',') { - match what { - "functions" => config.insert(CodegenConfig::FUNCTIONS), - "types" => config.insert(CodegenConfig::TYPES), - "vars" => config.insert(CodegenConfig::VARS), - "methods" => config.insert(CodegenConfig::METHODS), - "constructors" => config.insert(CodegenConfig::CONSTRUCTORS), - "destructors" => config.insert(CodegenConfig::DESTRUCTORS), - otherwise => { - return Err(Error::raw( - ErrorKind::InvalidValue, - format!("Unknown codegen item kind: {otherwise}"), - )); - } - } - } - - Ok(config) -} - -fn parse_rustfmt_config_path(path_str: &str) -> Result { - let path = Path::new(path_str); - - if !path.is_absolute() { - return Err(Error::raw( - ErrorKind::InvalidValue, - "--rustfmt-configuration-file needs to be an absolute path!", - )); - } - - if path.to_str().is_none() { - return Err(Error::raw( - ErrorKind::InvalidUtf8, - "--rustfmt-configuration-file contains non-valid UTF8 characters.", - )); - } - - Ok(path.to_path_buf()) -} - -fn parse_abi_override(abi_override: &str) -> Result<(Abi, String), Error> { - let (regex, abi_str) = abi_override - .rsplit_once('=') - .ok_or_else(|| Error::raw(ErrorKind::InvalidValue, "Missing `=`"))?; - - let abi = abi_str - .parse() - .map_err(|err| Error::raw(ErrorKind::InvalidValue, err))?; - - Ok((abi, regex.to_owned())) -} - -fn parse_custom_derive( - custom_derive: &str, -) -> Result<(Vec, String), Error> { - let (regex, derives) = custom_derive - .rsplit_once('=') - .ok_or_else(|| Error::raw(ErrorKind::InvalidValue, "Missing `=`"))?; - - let derives = derives.split(',').map(|s| s.to_owned()).collect(); - - Ok((derives, regex.to_owned())) -} - -fn parse_custom_attribute( - custom_attribute: &str, -) -> Result<(Vec, String), Error> { - let mut brace_level = 0; - let (regex, attributes) = custom_attribute - .rsplit_once(|c| { - match c { - ']' => brace_level += 1, - '[' => brace_level -= 1, - _ => {} - } - c == '=' && brace_level == 0 - }) - .ok_or_else(|| Error::raw(ErrorKind::InvalidValue, "Missing `=`"))?; - - let mut brace_level = 0; - let attributes = attributes - .split(|c| { - match c { - ']' => brace_level += 1, - '[' => brace_level -= 1, - _ => {} - } - c == ',' && brace_level == 0 - }) - .map(|s| s.to_owned()) - .collect::>(); - - for attribute in &attributes { - if let Err(err) = TokenStream::from_str(attribute) { - return Err(Error::raw(ErrorKind::InvalidValue, err)); - } - } - - Ok((attributes, regex.to_owned())) -} - -#[derive(Parser, Debug)] -#[clap( - about = "Generates Rust bindings from C/C++ headers.", - override_usage = "bindgen
-- ...", - trailing_var_arg = true -)] -#[allow(clippy::doc_markdown)] -struct BindgenCommand { - /// C or C++ header file. - header: Option, - /// Path to write depfile to. - #[arg(long)] - depfile: Option, - /// The default STYLE of code used to generate enums. - #[arg(long, value_name = "STYLE")] - default_enum_style: Option, - /// Mark any enum whose name matches REGEX as a set of bitfield flags. - #[arg(long, value_name = "REGEX")] - bitfield_enum: Vec, - /// Mark any enum whose name matches REGEX as a newtype. - #[arg(long, value_name = "REGEX")] - newtype_enum: Vec, - /// Mark any enum whose name matches REGEX as a global newtype. - #[arg(long, value_name = "REGEX")] - newtype_global_enum: Vec, - /// Mark any enum whose name matches REGEX as a Rust enum. - #[arg(long, value_name = "REGEX")] - rustified_enum: Vec, - /// Mark any enum whose name matches REGEX as a non-exhaustive Rust enum. - #[arg(long, value_name = "REGEX")] - rustified_non_exhaustive_enum: Vec, - /// Mark any enum whose name matches REGEX as a series of constants. - #[arg(long, value_name = "REGEX")] - constified_enum: Vec, - /// Mark any enum whose name matches REGEX as a module of constants. - #[arg(long, value_name = "REGEX")] - constified_enum_module: Vec, - /// The default signed/unsigned TYPE for C macro constants. - #[arg(long, value_name = "TYPE")] - default_macro_constant_type: Option, - /// The default STYLE of code used to generate typedefs. - #[arg(long, value_name = "STYLE")] - default_alias_style: Option, - /// Mark any typedef alias whose name matches REGEX to use normal type aliasing. - #[arg(long, value_name = "REGEX")] - normal_alias: Vec, - /// Mark any typedef alias whose name matches REGEX to have a new type generated for it. - #[arg(long, value_name = "REGEX")] - new_type_alias: Vec, - /// Mark any typedef alias whose name matches REGEX to have a new type with Deref and DerefMut to the inner type. - #[arg(long, value_name = "REGEX")] - new_type_alias_deref: Vec, - /// The default STYLE of code used to generate unions with non-Copy members. Note that ManuallyDrop was first stabilized in Rust 1.20.0. - #[arg(long, value_name = "STYLE")] - default_non_copy_union_style: Option, - /// Mark any union whose name matches REGEX and who has a non-Copy member to use a bindgen-generated wrapper for fields. - #[arg(long, value_name = "REGEX")] - bindgen_wrapper_union: Vec, - /// Mark any union whose name matches REGEX and who has a non-Copy member to use ManuallyDrop (stabilized in Rust 1.20.0) for fields. - #[arg(long, value_name = "REGEX")] - manually_drop_union: Vec, - /// Mark TYPE as hidden. - #[arg(long, value_name = "TYPE")] - blocklist_type: Vec, - /// Mark FUNCTION as hidden. - #[arg(long, value_name = "FUNCTION")] - blocklist_function: Vec, - /// Mark ITEM as hidden. - #[arg(long, value_name = "ITEM")] - blocklist_item: Vec, - /// Mark FILE as hidden. - #[arg(long, value_name = "FILE")] - blocklist_file: Vec, - /// Mark VAR as hidden. - #[arg(long, value_name = "VAR")] - blocklist_var: Vec, - /// Avoid generating layout tests for any type. - #[arg(long)] - no_layout_tests: bool, - /// Avoid deriving Copy on any type. - #[arg(long)] - no_derive_copy: bool, - /// Avoid deriving Debug on any type. - #[arg(long)] - no_derive_debug: bool, - /// Avoid deriving Default on any type. - #[arg(long, hide = true)] - no_derive_default: bool, - /// Create a Debug implementation if it cannot be derived automatically. - #[arg(long)] - impl_debug: bool, - /// Create a PartialEq implementation if it cannot be derived automatically. - #[arg(long)] - impl_partialeq: bool, - /// Derive Default on any type. - #[arg(long)] - with_derive_default: bool, - /// Derive Hash on any type. - #[arg(long)] - with_derive_hash: bool, - /// Derive PartialEq on any type. - #[arg(long)] - with_derive_partialeq: bool, - /// Derive PartialOrd on any type. - #[arg(long)] - with_derive_partialord: bool, - /// Derive Eq on any type. - #[arg(long)] - with_derive_eq: bool, - /// Derive Ord on any type. - #[arg(long)] - with_derive_ord: bool, - /// Avoid including doc comments in the output, see: - #[arg(long)] - no_doc_comments: bool, - /// Disable allowlisting types recursively. This will cause bindgen to emit Rust code that won't compile! See the `bindgen::Builder::allowlist_recursively` method's documentation for details. - #[arg(long)] - no_recursive_allowlist: bool, - /// Use extern crate instead of use for objc. - #[arg(long)] - objc_extern_crate: bool, - /// Generate block signatures instead of void pointers. - #[arg(long)] - generate_block: bool, - /// Generate string constants as `&CStr` instead of `&[u8]`. - #[arg(long)] - generate_cstr: bool, - /// Use extern crate instead of use for block. - #[arg(long)] - block_extern_crate: bool, - /// Do not trust the libclang-provided mangling - #[arg(long)] - distrust_clang_mangling: bool, - /// Output bindings for builtin definitions, e.g. __builtin_va_list. - #[arg(long)] - builtins: bool, - /// Use the given PREFIX before raw types instead of ::std::os::raw. - #[arg(long, value_name = "PREFIX")] - ctypes_prefix: Option, - /// Use the given PREFIX for anonymous fields. - #[arg(long, value_name = "PREFIX")] - anon_fields_prefix: Option, - /// Time the different bindgen phases and print to stderr - #[arg(long)] - time_phases: bool, - /// Output the Clang AST for debugging purposes. - #[arg(long)] - emit_clang_ast: bool, - /// Output our internal IR for debugging purposes. - #[arg(long)] - emit_ir: bool, - /// Dump a graphviz dot file to PATH. - #[arg(long, value_name = "PATH")] - emit_ir_graphviz: Option, - /// Enable support for C++ namespaces. - #[arg(long)] - enable_cxx_namespaces: bool, - /// Disable namespacing via mangling, causing bindgen to generate names like `Baz` instead of `foo_bar_Baz` for an input name `foo::bar::Baz`. - #[arg(long)] - disable_name_namespacing: bool, - /// Disable nested struct naming, causing bindgen to generate names like `bar` instead of `foo_bar` for a nested definition `struct foo { struct bar { } b; };`. - #[arg(long)] - disable_nested_struct_naming: bool, - /// Disable support for native Rust unions. - #[arg(long)] - disable_untagged_union: bool, - /// Suppress insertion of bindgen's version identifier into generated bindings. - #[arg(long)] - disable_header_comment: bool, - /// Do not generate bindings for functions or methods. This is useful when you only care about struct layouts. - #[arg(long)] - ignore_functions: bool, - /// Generate only given items, split by commas. Valid values are `functions`,`types`, `vars`, `methods`, `constructors` and `destructors`. - #[arg(long, value_parser = parse_codegen_config)] - generate: Option, - /// Do not generate bindings for methods. - #[arg(long)] - ignore_methods: bool, - /// Do not automatically convert floats to f32/f64. - #[arg(long)] - no_convert_floats: bool, - /// Do not prepend the enum name to constant or newtype variants. - #[arg(long)] - no_prepend_enum_name: bool, - /// Do not try to detect default include paths - #[arg(long)] - no_include_path_detection: bool, - /// Try to fit macro constants into types smaller than u32/i32 - #[arg(long)] - fit_macro_constant_types: bool, - /// Mark TYPE as opaque. - #[arg(long, value_name = "TYPE")] - opaque_type: Vec, - /// Write Rust bindings to OUTPUT. - #[arg(long, short, value_name = "OUTPUT")] - output: Option, - /// Add a raw line of Rust code at the beginning of output. - #[arg(long)] - raw_line: Vec, - /// Add a RAW_LINE of Rust code to a given module with name MODULE_NAME. - #[arg(long, number_of_values = 2, value_names = ["MODULE_NAME", "RAW_LINE"])] - module_raw_line: Vec, - #[arg(long, help = rust_target_help())] - rust_target: Option, - #[arg(long, value_name = "EDITION", help = rust_edition_help())] - rust_edition: Option, - /// Use types from Rust core instead of std. - #[arg(long)] - use_core: bool, - /// Conservatively generate inline namespaces to avoid name conflicts. - #[arg(long)] - conservative_inline_namespaces: bool, - /// Allowlist all the free-standing functions matching REGEX. Other non-allowlisted functions will not be generated. - #[arg(long, value_name = "REGEX")] - allowlist_function: Vec, - /// Generate inline functions. - #[arg(long)] - generate_inline_functions: bool, - /// Only generate types matching REGEX. Other non-allowlisted types will not be generated. - #[arg(long, value_name = "REGEX")] - allowlist_type: Vec, - /// Allowlist all the free-standing variables matching REGEX. Other non-allowlisted variables will not be generated. - #[arg(long, value_name = "REGEX")] - allowlist_var: Vec, - /// Allowlist all contents of PATH. - #[arg(long, value_name = "PATH")] - allowlist_file: Vec, - /// Allowlist all items matching REGEX. Other non-allowlisted items will not be generated. - #[arg(long, value_name = "REGEX")] - allowlist_item: Vec, - /// Print verbose error messages. - #[arg(long)] - verbose: bool, - /// Preprocess and dump the input header files to disk. Useful when debugging bindgen, using C-Reduce, or when filing issues. The resulting file will be named something like `__bindgen.i` or `__bindgen.ii`. - #[arg(long)] - dump_preprocessed_input: bool, - /// Do not record matching items in the regex sets. This disables reporting of unused items. - #[arg(long)] - no_record_matches: bool, - /// Do not bind size_t as usize (useful on platforms where those types are incompatible). - #[arg(long = "no-size_t-is-usize")] - no_size_t_is_usize: bool, - /// Do not format the generated bindings with rustfmt. This option is deprecated, please use - /// `--formatter=none` instead. - #[arg(long)] - no_rustfmt_bindings: bool, - /// Which FORMATTER should be used for the bindings - #[arg( - long, - value_name = "FORMATTER", - conflicts_with = "no_rustfmt_bindings" - )] - formatter: Option, - /// The absolute PATH to the rustfmt configuration file. The configuration file will be used for formatting the bindings. This parameter sets `formatter` to `rustfmt`. - #[arg(long, value_name = "PATH", conflicts_with = "no_rustfmt_bindings", value_parser=parse_rustfmt_config_path)] - rustfmt_configuration_file: Option, - /// Avoid deriving PartialEq for types matching REGEX. - #[arg(long, value_name = "REGEX")] - no_partialeq: Vec, - /// Avoid deriving Copy and Clone for types matching REGEX. - #[arg(long, value_name = "REGEX")] - no_copy: Vec, - /// Avoid deriving Debug for types matching REGEX. - #[arg(long, value_name = "REGEX")] - no_debug: Vec, - /// Avoid deriving/implementing Default for types matching REGEX. - #[arg(long, value_name = "REGEX")] - no_default: Vec, - /// Avoid deriving Hash for types matching REGEX. - #[arg(long, value_name = "REGEX")] - no_hash: Vec, - /// Add `#[must_use]` annotation to types matching REGEX. - #[arg(long, value_name = "REGEX")] - must_use_type: Vec, - /// Enables detecting unexposed attributes in functions (slow). Used to generate `#[must_use]` annotations. - #[arg(long)] - enable_function_attribute_detection: bool, - /// Use `*const [T; size]` instead of `*const T` for C arrays - #[arg(long)] - use_array_pointers_in_arguments: bool, - /// The NAME to be used in a #[link(wasm_import_module = ...)] statement - #[arg(long, value_name = "NAME")] - wasm_import_module_name: Option, - /// Use dynamic loading mode with the given library NAME. - #[arg(long, value_name = "NAME")] - dynamic_loading: Option, - /// Require successful linkage to all functions in the library. - #[arg(long)] - dynamic_link_require_all: bool, - /// Prefix the name of exported symbols. - #[arg(long)] - prefix_link_name: Option, - /// Makes generated bindings `pub` only for items if the items are publicly accessible in C++. - #[arg(long)] - respect_cxx_access_specs: bool, - /// Always translate enum integer types to native Rust integer types. - #[arg(long)] - translate_enum_integer_types: bool, - /// Generate types with C style naming. - #[arg(long)] - c_naming: bool, - /// Always output explicit padding fields. - #[arg(long)] - explicit_padding: bool, - /// Always be specific about the 'receiver' of a virtual function. - #[arg(long)] - use_specific_virtual_function_receiver: bool, - /// Use distinct char16_t - #[arg(long)] - use_distinct_char16_t: bool, - /// Output C++ overloaded operators - #[arg(long)] - represent_cxx_operators: bool, - /// Enables generation of vtable functions. - #[arg(long)] - vtable_generation: bool, - /// Enables sorting of code generation in a predefined manner. - #[arg(long)] - sort_semantically: bool, - /// Deduplicates extern blocks. - #[arg(long)] - merge_extern_blocks: bool, - /// Overrides the ABI of functions matching REGEX. The OVERRIDE value must be of the shape REGEX=ABI where ABI can be one of C, stdcall, efiapi, fastcall, thiscall, aapcs, win64 or C-unwind<.> - #[arg(long, value_name = "OVERRIDE", value_parser = parse_abi_override)] - override_abi: Vec<(Abi, String)>, - /// Wrap unsafe operations in unsafe blocks. - #[arg(long)] - wrap_unsafe_ops: bool, - /// Enable fallback for clang macro parsing. - #[arg(long)] - clang_macro_fallback: bool, - /// Set path for temporary files generated by fallback for clang macro parsing. - #[arg(long)] - clang_macro_fallback_build_dir: Option, - /// Use DSTs to represent structures with flexible array members. - #[arg(long)] - flexarray_dst: bool, - /// Derive custom traits on any kind of type. The CUSTOM value must be of the shape REGEX=DERIVE where DERIVE is a coma-separated list of derive macros. - #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_derive)] - with_derive_custom: Vec<(Vec, String)>, - /// Derive custom traits on a `struct`. The CUSTOM value must be of the shape REGEX=DERIVE where DERIVE is a coma-separated list of derive macros. - #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_derive)] - with_derive_custom_struct: Vec<(Vec, String)>, - /// Derive custom traits on an `enum`. The CUSTOM value must be of the shape REGEX=DERIVE where DERIVE is a coma-separated list of derive macros. - #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_derive)] - with_derive_custom_enum: Vec<(Vec, String)>, - /// Derive custom traits on a `union`. The CUSTOM value must be of the shape REGEX=DERIVE where DERIVE is a coma-separated list of derive macros. - #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_derive)] - with_derive_custom_union: Vec<(Vec, String)>, - /// Add custom attributes on any kind of type. The CUSTOM value must be of the shape REGEX=ATTRIBUTE where ATTRIBUTE is a coma-separated list of attributes. - #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_attribute)] - with_attribute_custom: Vec<(Vec, String)>, - /// Add custom attributes on a `struct`. The CUSTOM value must be of the shape REGEX=ATTRIBUTE where ATTRIBUTE is a coma-separated list of attributes. - #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_attribute)] - with_attribute_custom_struct: Vec<(Vec, String)>, - /// Add custom attributes on an `enum`. The CUSTOM value must be of the shape REGEX=ATTRIBUTE where ATTRIBUTE is a coma-separated list of attributes. - #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_attribute)] - with_attribute_custom_enum: Vec<(Vec, String)>, - /// Add custom attributes on a `union`. The CUSTOM value must be of the shape REGEX=ATTRIBUTE where ATTRIBUTE is a coma-separated list of attributes. - #[arg(long, value_name = "CUSTOM", value_parser = parse_custom_attribute)] - with_attribute_custom_union: Vec<(Vec, String)>, - /// Generate wrappers for `static` and `static inline` functions. - #[arg(long)] - wrap_static_fns: bool, - /// Sets the PATH for the source file that must be created due to the presence of `static` and - /// `static inline` functions. - #[arg(long, value_name = "PATH")] - wrap_static_fns_path: Option, - /// Sets the SUFFIX added to the extern wrapper functions generated for `static` and `static - /// inline` functions. - #[arg(long, value_name = "SUFFIX")] - wrap_static_fns_suffix: Option, - /// Set the default VISIBILITY of fields, including bitfields and accessor methods for - /// bitfields. This flag is ignored if the `--respect-cxx-access-specs` flag is used. - #[arg(long, value_name = "VISIBILITY")] - default_visibility: Option, - /// Whether to generate C++ functions marked with "=delete" even though they - /// can't be called. - #[arg(long)] - generate_deleted_functions: bool, - /// Whether to generate C++ "pure virtual" functions even though they can't - /// be called. - #[arg(long)] - generate_pure_virtual_functions: bool, - /// Whether to generate C++ private functions even though they can't - /// be called. - #[arg(long)] - generate_private_functions: bool, - /// Whether to emit diagnostics or not. - #[cfg(feature = "experimental")] - #[arg(long, requires = "experimental")] - emit_diagnostics: bool, - /// Generates completions for the specified SHELL, sends them to `stdout` and exits. - #[arg(long, value_name = "SHELL")] - generate_shell_completions: Option, - /// Enables experimental features. - #[arg(long)] - experimental: bool, - /// Prints the version, and exits - #[arg(short = 'V', long)] - version: bool, - /// Arguments to be passed straight through to clang. - clang_args: Vec, -} - -/// Construct a new [`Builder`](./struct.Builder.html) from command line flags. -pub fn builder_from_flags( - args: I, -) -> Result<(Builder, Box, bool), io::Error> -where - I: Iterator, -{ - let command = BindgenCommand::parse_from(args); - - let BindgenCommand { - header, - depfile, - default_enum_style, - bitfield_enum, - newtype_enum, - newtype_global_enum, - rustified_enum, - rustified_non_exhaustive_enum, - constified_enum, - constified_enum_module, - default_macro_constant_type, - default_alias_style, - normal_alias, - new_type_alias, - new_type_alias_deref, - default_non_copy_union_style, - bindgen_wrapper_union, - manually_drop_union, - blocklist_type, - blocklist_function, - blocklist_item, - blocklist_file, - blocklist_var, - no_layout_tests, - no_derive_copy, - no_derive_debug, - no_derive_default, - impl_debug, - impl_partialeq, - with_derive_default, - with_derive_hash, - with_derive_partialeq, - with_derive_partialord, - with_derive_eq, - with_derive_ord, - no_doc_comments, - no_recursive_allowlist, - objc_extern_crate, - generate_block, - generate_cstr, - block_extern_crate, - distrust_clang_mangling, - builtins, - ctypes_prefix, - anon_fields_prefix, - time_phases, - emit_clang_ast, - emit_ir, - emit_ir_graphviz, - enable_cxx_namespaces, - disable_name_namespacing, - disable_nested_struct_naming, - disable_untagged_union, - disable_header_comment, - ignore_functions, - generate, - ignore_methods, - no_convert_floats, - no_prepend_enum_name, - no_include_path_detection, - fit_macro_constant_types, - opaque_type, - output, - raw_line, - module_raw_line, - rust_target, - rust_edition, - use_core, - conservative_inline_namespaces, - allowlist_function, - generate_inline_functions, - allowlist_type, - allowlist_var, - allowlist_file, - allowlist_item, - verbose, - dump_preprocessed_input, - no_record_matches, - no_size_t_is_usize, - no_rustfmt_bindings, - formatter, - rustfmt_configuration_file, - no_partialeq, - no_copy, - no_debug, - no_default, - no_hash, - must_use_type, - enable_function_attribute_detection, - use_array_pointers_in_arguments, - wasm_import_module_name, - dynamic_loading, - dynamic_link_require_all, - prefix_link_name, - respect_cxx_access_specs, - translate_enum_integer_types, - c_naming, - explicit_padding, - use_specific_virtual_function_receiver, - use_distinct_char16_t, - represent_cxx_operators, - vtable_generation, - sort_semantically, - merge_extern_blocks, - override_abi, - wrap_unsafe_ops, - clang_macro_fallback, - clang_macro_fallback_build_dir, - flexarray_dst, - with_derive_custom, - with_derive_custom_struct, - with_derive_custom_enum, - with_derive_custom_union, - with_attribute_custom, - with_attribute_custom_struct, - with_attribute_custom_enum, - with_attribute_custom_union, - wrap_static_fns, - wrap_static_fns_path, - wrap_static_fns_suffix, - default_visibility, - generate_deleted_functions, - generate_pure_virtual_functions, - generate_private_functions, - #[cfg(feature = "experimental")] - emit_diagnostics, - generate_shell_completions, - experimental: _, - version, - clang_args, - } = command; - - if let Some(shell) = generate_shell_completions { - clap_complete::generate( - shell, - &mut BindgenCommand::command(), - "bindgen", - &mut io::stdout(), - ); - - exit(0) - } - - if version { - println!( - "bindgen {}", - option_env!("CARGO_PKG_VERSION").unwrap_or("unknown") - ); - if verbose { - println!("Clang: {}", crate::clang_version().full); - } - - exit(0) - } - - if header.is_none() { - return Err(io::Error::new(io::ErrorKind::Other, "Header not found")); - } - - let mut builder = builder(); - - #[derive(Debug)] - struct PrefixLinkNameCallback { - prefix: String, - } - - impl ParseCallbacks for PrefixLinkNameCallback { - fn generated_link_name_override( - &self, - item_info: ItemInfo<'_>, - ) -> Option { - let mut prefix = self.prefix.clone(); - prefix.push_str(item_info.name); - Some(prefix) - } - } - - #[derive(Debug)] - struct CustomDeriveCallback { - derives: Vec, - kind: Option, - regex_set: RegexSet, - } - - impl ParseCallbacks for CustomDeriveCallback { - fn cli_args(&self) -> Vec { - let mut args = vec![]; - - let flag = match &self.kind { - None => "--with-derive-custom", - Some(TypeKind::Struct) => "--with-derive-custom-struct", - Some(TypeKind::Enum) => "--with-derive-custom-enum", - Some(TypeKind::Union) => "--with-derive-custom-union", - }; - - let derives = self.derives.join(","); - - for item in self.regex_set.get_items() { - args.extend_from_slice(&[ - flag.to_owned(), - format!("{item}={derives}"), - ]); - } - - args - } - - fn add_derives(&self, info: &DeriveInfo<'_>) -> Vec { - if self.kind.map_or(true, |kind| kind == info.kind) && - self.regex_set.matches(info.name) - { - return self.derives.clone(); - } - vec![] - } - } - - #[derive(Debug)] - struct CustomAttributeCallback { - attributes: Vec, - kind: Option, - regex_set: RegexSet, - } - - impl ParseCallbacks for CustomAttributeCallback { - fn cli_args(&self) -> Vec { - let mut args = vec![]; - - let flag = match &self.kind { - None => "--with-attribute-custom", - Some(TypeKind::Struct) => "--with-attribute-custom-struct", - Some(TypeKind::Enum) => "--with-attribute-custom-enum", - Some(TypeKind::Union) => "--with-attribute-custom-union", - }; - - let attributes = self.attributes.join(","); - - for item in self.regex_set.get_items() { - args.extend_from_slice(&[ - flag.to_owned(), - format!("{item}={attributes}"), - ]); - } - - args - } - - fn add_attributes(&self, info: &AttributeInfo<'_>) -> Vec { - if self.kind.map_or(true, |kind| kind == info.kind) && - self.regex_set.matches(info.name) - { - return self.attributes.clone(); - } - vec![] - } - } - - /// Macro used to apply CLI arguments to a builder. - /// - /// This is done by passing an identifier for each argument and a function to be applied over - /// the builder. For example: - /// ```rust,ignore - /// fn apply_arg(builder: Builder, arg_value: Value) -> Builder { - /// todo!() - /// } - /// - /// apply_args!( - /// builder { - /// arg => apply_arg, - /// } - /// ); - /// ``` - /// - /// If the identifier of the argument is the same as an already existing builder method then - /// you can omit the second part: - /// ```rust,ignore - /// apply_args!( - /// builder { - /// arg - /// } - /// ); - /// ``` - /// Which expands to the same code as: - /// ```rust,ignore - /// apply_args!( - /// builder { - /// arg => Builder::arg, - /// } - /// ); - /// ``` - macro_rules! apply_args { - ($builder:ident {}) => { $builder }; - ($builder:ident {$arg:ident => $function:expr, $($token:tt)*}) => { - { - $builder = CliArg::apply($arg, $builder, $function); - apply_args!($builder {$($token)*}) - } - }; - ($builder:ident {$arg:ident, $($token:tt)*}) => { - { - $builder = CliArg::apply($arg, $builder, Builder::$arg); - apply_args!($builder {$($token)*}) - } - } - } - - builder = apply_args!( - builder { - header, - rust_target, - rust_edition, - default_enum_style, - bitfield_enum, - newtype_enum, - newtype_global_enum, - rustified_enum, - rustified_non_exhaustive_enum, - constified_enum, - constified_enum_module, - default_macro_constant_type, - default_alias_style, - normal_alias => Builder::type_alias, - new_type_alias, - new_type_alias_deref, - default_non_copy_union_style, - bindgen_wrapper_union, - manually_drop_union, - blocklist_type, - blocklist_function, - blocklist_item, - blocklist_file, - blocklist_var, - builtins => |b, _| b.emit_builtins(), - no_layout_tests => |b, _| b.layout_tests(false), - no_derive_copy => |b, _| b.derive_copy(false), - no_derive_debug => |b, _| b.derive_debug(false), - impl_debug, - impl_partialeq, - with_derive_default => Builder::derive_default, - with_derive_hash => Builder::derive_hash, - with_derive_partialeq => Builder::derive_partialeq, - with_derive_partialord => Builder::derive_partialord, - with_derive_eq => Builder::derive_eq, - with_derive_ord => Builder::derive_ord, - no_derive_default => |b, _| b.derive_default(false), - no_prepend_enum_name => |b, _| b.prepend_enum_name(false), - no_include_path_detection => |b, _| b.detect_include_paths(false), - fit_macro_constant_types => Builder::fit_macro_constants, - time_phases, - use_array_pointers_in_arguments => Builder::array_pointers_in_arguments, - wasm_import_module_name, - ctypes_prefix, - anon_fields_prefix, - generate => Builder::with_codegen_config, - emit_clang_ast => |b, _| b.emit_clang_ast(), - emit_ir => |b, _| b.emit_ir(), - emit_ir_graphviz, - enable_cxx_namespaces => |b, _| b.enable_cxx_namespaces(), - enable_function_attribute_detection => |b, _| b.enable_function_attribute_detection(), - disable_name_namespacing => |b, _| b.disable_name_namespacing(), - disable_nested_struct_naming => |b, _| b.disable_nested_struct_naming(), - disable_untagged_union => |b, _| b.disable_untagged_union(), - disable_header_comment => |b, _| b.disable_header_comment(), - ignore_functions => |b, _| b.ignore_functions(), - ignore_methods => |b, _| b.ignore_methods(), - no_convert_floats => |b, _| b.no_convert_floats(), - no_doc_comments => |b, _| b.generate_comments(false), - no_recursive_allowlist => |b, _| b.allowlist_recursively(false), - objc_extern_crate, - generate_block, - generate_cstr, - block_extern_crate, - opaque_type, - raw_line, - use_core => |b, _| b.use_core(), - distrust_clang_mangling => |b, _| b.trust_clang_mangling(false), - conservative_inline_namespaces => |b, _| b.conservative_inline_namespaces(), - generate_inline_functions, - allowlist_function, - allowlist_type, - allowlist_var, - allowlist_file, - allowlist_item, - clang_args => Builder::clang_arg, - no_record_matches => |b, _| b.record_matches(false), - no_size_t_is_usize => |b, _| b.size_t_is_usize(false), - no_rustfmt_bindings => |b, _| b.formatter(Formatter::None), - formatter, - no_partialeq, - no_copy, - no_debug, - no_default, - no_hash, - must_use_type, - dynamic_loading => Builder::dynamic_library_name, - dynamic_link_require_all, - prefix_link_name => |b, prefix| b.parse_callbacks(Box::new(PrefixLinkNameCallback { prefix })), - respect_cxx_access_specs, - translate_enum_integer_types, - c_naming, - explicit_padding, - use_specific_virtual_function_receiver, - use_distinct_char16_t, - represent_cxx_operators, - vtable_generation, - sort_semantically, - merge_extern_blocks, - override_abi => |b, (abi, regex)| b.override_abi(abi, regex), - wrap_unsafe_ops, - clang_macro_fallback => |b, _| b.clang_macro_fallback(), - clang_macro_fallback_build_dir, - flexarray_dst, - wrap_static_fns, - wrap_static_fns_path, - wrap_static_fns_suffix, - default_visibility, - generate_deleted_functions, - generate_pure_virtual_functions, - generate_private_functions, - } - ); - - let mut values = module_raw_line.into_iter(); - while let Some(module) = values.next() { - let line = values.next().unwrap(); - builder = builder.module_raw_line(module, line); - } - - let output = if let Some(path) = &output { - let file = File::create(path)?; - if let Some(depfile) = depfile { - builder = builder.depfile(path, depfile); - } - Box::new(io::BufWriter::new(file)) as Box - } else { - if let Some(depfile) = depfile { - builder = builder.depfile("-", depfile); - } - Box::new(io::BufWriter::new(io::stdout())) as Box - }; - - if dump_preprocessed_input { - builder.dump_preprocessed_input()?; - } - - if let Some(path) = rustfmt_configuration_file { - builder = builder.rustfmt_configuration_file(Some(path)); - } - - for (custom_derives, kind, _name) in [ - (with_derive_custom, None, "--with-derive-custom"), - ( - with_derive_custom_struct, - Some(TypeKind::Struct), - "--with-derive-custom-struct", - ), - ( - with_derive_custom_enum, - Some(TypeKind::Enum), - "--with-derive-custom-enum", - ), - ( - with_derive_custom_union, - Some(TypeKind::Union), - "--with-derive-custom-union", - ), - ] { - #[cfg(feature = "experimental")] - let name = emit_diagnostics.then_some(_name); - - for (derives, regex) in custom_derives { - let mut regex_set = RegexSet::default(); - regex_set.insert(regex); - - #[cfg(feature = "experimental")] - regex_set.build_with_diagnostics(false, name); - #[cfg(not(feature = "experimental"))] - regex_set.build(false); - - builder = builder.parse_callbacks(Box::new(CustomDeriveCallback { - derives, - kind, - regex_set, - })); - } - } - - for (custom_attributes, kind, _name) in [ - (with_attribute_custom, None, "--with-attribute-custom"), - ( - with_attribute_custom_struct, - Some(TypeKind::Struct), - "--with-attribute-custom-struct", - ), - ( - with_attribute_custom_enum, - Some(TypeKind::Enum), - "--with-attribute-custom-enum", - ), - ( - with_attribute_custom_union, - Some(TypeKind::Union), - "--with-attribute-custom-union", - ), - ] { - #[cfg(feature = "experimental")] - let name = emit_diagnostics.then_some(_name); - - for (attributes, regex) in custom_attributes { - let mut regex_set = RegexSet::default(); - regex_set.insert(regex); - - #[cfg(feature = "experimental")] - regex_set.build_with_diagnostics(false, name); - #[cfg(not(feature = "experimental"))] - regex_set.build(false); - - builder = - builder.parse_callbacks(Box::new(CustomAttributeCallback { - attributes, - kind, - regex_set, - })); - } - } - - #[cfg(feature = "experimental")] - if emit_diagnostics { - builder = builder.emit_diagnostics(); - } - - Ok((builder, output, verbose)) -} - -/// Trait for CLI arguments that can be applied to a [`Builder`]. -trait CliArg { - /// The value of this argument. - type Value; - - /// Apply the current argument to the passed [`Builder`]. - fn apply( - self, - builder: Builder, - f: impl Fn(Builder, Self::Value) -> Builder, - ) -> Builder; -} - -/// Boolean arguments are applied when they evaluate to `true`. -impl CliArg for bool { - type Value = bool; - - fn apply( - self, - mut builder: Builder, - f: impl Fn(Builder, Self::Value) -> Builder, - ) -> Builder { - if self { - builder = f(builder, self); - } - - builder - } -} - -/// Optional arguments are applied when they are `Some`. -impl CliArg for Option { - type Value = T; - - fn apply( - self, - mut builder: Builder, - f: impl Fn(Builder, Self::Value) -> Builder, - ) -> Builder { - if let Some(value) = self { - builder = f(builder, value); - } - - builder - } -} - -/// Multiple valued arguments are applied once for each value. -impl CliArg for Vec { - type Value = T; - - fn apply( - self, - mut builder: Builder, - f: impl Fn(Builder, Self::Value) -> Builder, - ) -> Builder { - for value in self { - builder = f(builder, value); - } - - builder - } -} diff --git a/vendor/bindgen/options/helpers.rs b/vendor/bindgen/options/helpers.rs deleted file mode 100644 index 1816c72b572b04..00000000000000 --- a/vendor/bindgen/options/helpers.rs +++ /dev/null @@ -1,43 +0,0 @@ -/// Helper function that appends extra documentation to [`crate::Builder`] methods that support regular -/// expressions in their input. -macro_rules! regex_option { - ($(#[$attrs:meta])* pub fn $($tokens:tt)*) => { - $(#[$attrs])* - /// - /// Regular expressions are supported. Check the [regular expression - /// arguments](./struct.Builder.html#regular-expression-arguments) section and the - /// [regex](https://docs.rs/regex) crate documentation for further information. - pub fn $($tokens)* - }; -} - -/// Helper macro to set the default value of each option. -/// -/// This macro is an internal implementation detail of the `options` macro and should not be used -/// directly. -macro_rules! default { - () => { - Default::default() - }; - ($expr:expr) => { - $expr - }; -} - -/// Helper macro to set the conversion to CLI arguments for each option. -/// -/// This macro is an internal implementation detail of the `options` macro and should not be used -/// directly. -macro_rules! as_args { - ($flag:literal) => { - |field, args| AsArgs::as_args(field, args, $flag) - }; - ($expr:expr) => { - $expr - }; -} - -/// Helper function to ignore an option when converting it into CLI arguments. -/// -/// This function is only used inside `options` and should not be used in other contexts. -pub(super) fn ignore(_: &T, _: &mut Vec) {} diff --git a/vendor/bindgen/options/mod.rs b/vendor/bindgen/options/mod.rs deleted file mode 100644 index c9ef7c8b490da7..00000000000000 --- a/vendor/bindgen/options/mod.rs +++ /dev/null @@ -1,2286 +0,0 @@ -//! Declarations and setter methods for `bindgen` options. -//! -//! The main entry point of this module is the `options` macro. -#[macro_use] -mod helpers; -mod as_args; -#[cfg(feature = "__cli")] -pub(crate) mod cli; - -use crate::callbacks::ParseCallbacks; -use crate::codegen::{ - AliasVariation, EnumVariation, MacroTypeVariation, NonCopyUnionStyle, -}; -use crate::deps::DepfileSpec; -use crate::features::{RustEdition, RustFeatures, RustTarget}; -use crate::regex_set::RegexSet; -use crate::Abi; -use crate::Builder; -use crate::CodegenConfig; -use crate::FieldVisibilityKind; -use crate::Formatter; -use crate::HashMap; -use crate::DEFAULT_ANON_FIELDS_PREFIX; - -use std::env; -use std::path::{Path, PathBuf}; -use std::rc::Rc; - -use as_args::AsArgs; -use helpers::ignore; - -/// Macro used to generate the [`BindgenOptions`] type and the [`Builder`] setter methods for each -/// one of the fields of `BindgenOptions`. -/// -/// The input format of this macro resembles a `struct` pattern. Each field of the `BindgenOptions` -/// type is declared by adding the name of the field and its type using the `name: type` syntax and -/// a block of code with the following items: -/// -/// - `default`: The default value for the field. If this item is omitted, `Default::default()` is -/// used instead, meaning that the type of the field must implement `Default`. -/// - `methods`: A block of code containing methods for the `Builder` type. These methods should be -/// related to the field being declared. -/// - `as_args`: This item declares how the field should be converted into a valid CLI argument for -/// `bindgen` and is used in the [`Builder::command_line_flags`] method which is used to do a -/// roundtrip test of the CLI args in the `bindgen-test` crate. This item can take one of the -/// following: -/// - A string literal with the flag if the type of the field implements the [`AsArgs`] trait. -/// - A closure with the signature `|field, args: &mut Vec| -> ()` that pushes arguments -/// into the `args` buffer based on the value of the field. This is used if the field does not -/// implement `AsArgs` or if the implementation of the trait is not logically correct for the -/// option and a custom behavior must be taken into account. -/// - The `ignore` literal, which does not emit any CLI arguments for this field. This is useful -/// if the field cannot be used from the `bindgen` CLI. -/// -/// As an example, this would be the declaration of a `bool` field called `be_fun` whose default -/// value is `false` (the `Default` value for `bool`): -/// ```rust,ignore -/// be_fun: bool { -/// methods: { -/// /// Ask `bindgen` to be fun. This option is disabled by default. -/// fn be_fun(mut self) -> Self { -/// self.options.be_fun = true; -/// self -/// } -/// }, -/// as_args: "--be-fun", -/// } -/// ``` -/// -/// However, we could also set the `be_fun` field to `true` by default and use a `--not-fun` flag -/// instead. This means that we have to add the `default` item and use a closure in the `as_args` -/// item: -/// ```rust,ignore -/// be_fun: bool { -/// default: true, -/// methods: { -/// /// Ask `bindgen` to not be fun. `bindgen` is fun by default. -/// fn not_fun(mut self) -> Self { -/// self.options.be_fun = false; -/// self -/// } -/// }, -/// as_args: |be_fun, args| (!be_fun).as_args(args, "--not-fun"), -/// } -/// ``` -/// More complex examples can be found in the sole invocation of this macro. -macro_rules! options { - ($( - $(#[doc = $docs:literal])+ - $field:ident: $ty:ty { - $(default: $default:expr,)? - methods: {$($methods_tokens:tt)*}$(,)? - as_args: $as_args:expr$(,)? - }$(,)? - )*) => { - #[derive(Debug, Clone)] - pub(crate) struct BindgenOptions { - $($(#[doc = $docs])* pub(crate) $field: $ty,)* - } - - impl Default for BindgenOptions { - fn default() -> Self { - Self { - $($field: default!($($default)*),)* - } - } - } - - impl Builder { - /// Generates the command line flags used to create this [`Builder`]. - pub fn command_line_flags(&self) -> Vec { - let mut args = vec![]; - - let headers = match self.options.input_headers.split_last() { - Some((header, headers)) => { - // The last input header is passed as an argument in the first position. - args.push(header.clone().into()); - headers - }, - None => &[] - }; - - $({ - let func: fn(&$ty, &mut Vec) = as_args!($as_args); - func(&self.options.$field, &mut args); - })* - - // Add the `--experimental` flag if `bindgen` is built with the `experimental` - // feature. - if cfg!(feature = "experimental") { - args.push("--experimental".to_owned()); - } - - // Add all the clang arguments. - args.push("--".to_owned()); - - if !self.options.clang_args.is_empty() { - args.extend(self.options.clang_args.iter().map(|s| s.clone().into())); - } - - // We need to pass all but the last header via the `-include` clang argument. - for header in headers { - args.push("-include".to_owned()); - args.push(header.clone().into()); - } - - args - } - - $($($methods_tokens)*)* - } - }; -} - -options! { - /// Whether to specify the type of a virtual function receiver - use_specific_virtual_function_receiver: bool { - methods: { - /// Normally, virtual functions have void* as their 'this' type. - /// If this flag is enabled, override that behavior to indicate a - /// pointer of the specific type. - /// Disabled by default. - pub fn use_specific_virtual_function_receiver(mut self, doit: bool) -> Builder { - self.options.use_specific_virtual_function_receiver = doit; - self - } - }, - as_args: "--use-specific-virtual-function-receiver", - }, - - /// Whether we should distinguish between C++'s 'char16_t' and 'u16'. - /// The C++ type `char16_t` is its own special type; it's not a typedef - /// of some other integer (this differs from C). - /// As standard, bindgen represents C++ `char16_t` as `u16`. - /// Rust does not have a `std::os::raw::c_char16_t` type, and thus - /// we can't use a built-in Rust type in the generated bindings (and - /// nor would it be appropriate as it's a C++-specific type.) - /// But for some uses of bindgen, especially when downstream - /// post-processing occurs, it's important to distinguish `char16_t` - /// from normal `uint16_t`. When this option is enabled, bindgen - /// generates a fake type called `bindgen_cchar16_t`. Downstream - /// code post-processors should arrange to replace this with a - /// real type. - use_distinct_char16_t: bool { - methods: { - /// If this is true, denote 'char16_t' as a separate type from 'u16' - /// Disabled by default. - pub fn use_distinct_char16_t(mut self, doit: bool) -> Builder { - self.options.use_distinct_char16_t = doit; - self - } - }, - as_args: "--use-distinct-char16-t", - }, - /// Whether we should output C++ overloaded operators. By itself, - /// this option is not sufficient to produce valid output, because - /// such operators will have names that are not acceptable Rust - /// names (for example `operator=`). If you use this option, you'll also - /// have to rename the resulting functions - for example by using - /// [`ParseCallbacks::generated_name_override`]. - represent_cxx_operators: bool { - methods: { - /// If this is true, output existence of C++ overloaded operators. - /// At present, only operator= is noted. - /// Disabled by default. - pub fn represent_cxx_operators(mut self, doit: bool) -> Builder { - self.options.represent_cxx_operators = doit; - self - } - }, - as_args: "--represent-cxx-operators", - }, - - /// Types that have been blocklisted and should not appear anywhere in the generated code. - blocklisted_types: RegexSet { - methods: { - regex_option! { - /// Do not generate any bindings for the given type. - /// - /// This option is not recursive, meaning that it will only block types whose names - /// explicitly match the argument of this method. - pub fn blocklist_type>(mut self, arg: T) -> Builder { - self.options.blocklisted_types.insert(arg); - self - } - } - }, - as_args: "--blocklist-type", - }, - /// Functions that have been blocklisted and should not appear in the generated code. - blocklisted_functions: RegexSet { - methods: { - regex_option! { - /// Do not generate any bindings for the given function. - /// - /// This option is not recursive, meaning that it will only block functions whose - /// names explicitly match the argument of this method. - pub fn blocklist_function>(mut self, arg: T) -> Builder { - self.options.blocklisted_functions.insert(arg); - self - } - } - }, - as_args: "--blocklist-function", - }, - /// Items that have been blocklisted and should not appear in the generated code. - blocklisted_items: RegexSet { - methods: { - regex_option! { - /// Do not generate any bindings for the given item, regardless of whether it is a - /// type, function, module, etc. - /// - /// This option is not recursive, meaning that it will only block items whose names - /// explicitly match the argument of this method. - pub fn blocklist_item>(mut self, arg: T) -> Builder { - self.options.blocklisted_items.insert(arg); - self - } - } - }, - as_args: "--blocklist-item", - }, - /// Files whose contents should be blocklisted and should not appear in the generated code. - blocklisted_files: RegexSet { - methods: { - regex_option! { - /// Do not generate any bindings for the contents of the given file, regardless of - /// whether the contents of the file are types, functions, modules, etc. - /// - /// This option is not recursive, meaning that it will only block files whose names - /// explicitly match the argument of this method. - /// - /// This method will use the argument to match the complete path of the file - /// instead of a section of it. - pub fn blocklist_file>(mut self, arg: T) -> Builder { - self.options.blocklisted_files.insert(arg); - self - } - } - }, - as_args: "--blocklist-file", - }, - /// Variables that have been blocklisted and should not appear in the generated code. - blocklisted_vars: RegexSet { - methods: { - regex_option! { - /// Do not generate any bindings for the given variable. - /// - /// This option is not recursive, meaning that it will only block variables whose - /// names explicitly match the argument of this method. - pub fn blocklist_var>(mut self, arg: T) -> Builder { - self.options.blocklisted_vars.insert(arg); - self - } - } - }, - as_args: "--blocklist-var", - }, - /// Types that should be treated as opaque structures in the generated code. - opaque_types: RegexSet { - methods: { - regex_option! { - /// Treat the given type as opaque in the generated bindings. - /// - /// Opaque in this context means that none of the generated bindings will contain - /// information about the inner representation of the type and the type itself will - /// be represented as a chunk of bytes with the alignment and size of the type. - pub fn opaque_type>(mut self, arg: T) -> Builder { - self.options.opaque_types.insert(arg); - self - } - } - }, - as_args: "--opaque-type", - }, - /// The explicit `rustfmt` path. - rustfmt_path: Option { - methods: { - /// Set an explicit path to the `rustfmt` binary. - /// - /// This option only comes into effect if `rustfmt` is set to be the formatter used by - /// `bindgen`. Check the documentation of the [`Builder::formatter`] method for more - /// information. - pub fn with_rustfmt>(mut self, path: P) -> Self { - self.options.rustfmt_path = Some(path.into()); - self - } - }, - // This option cannot be set from the CLI. - as_args: ignore, - }, - /// The path to which we should write a Makefile-syntax depfile (if any). - depfile: Option { - methods: { - /// Add a depfile output which will be written alongside the generated bindings. - pub fn depfile, D: Into>( - mut self, - output_module: H, - depfile: D, - ) -> Builder { - self.options.depfile = Some(DepfileSpec { - output_module: output_module.into(), - depfile_path: depfile.into(), - }); - self - } - }, - as_args: |depfile, args| { - if let Some(depfile) = depfile { - args.push("--depfile".into()); - args.push(depfile.depfile_path.display().to_string()); - } - }, - }, - /// Types that have been allowlisted and should appear in the generated code. - allowlisted_types: RegexSet { - methods: { - regex_option! { - /// Generate bindings for the given type. - /// - /// This option is transitive by default. Check the documentation of the - /// [`Builder::allowlist_recursively`] method for further information. - pub fn allowlist_type>(mut self, arg: T) -> Builder { - self.options.allowlisted_types.insert(arg); - self - } - } - }, - as_args: "--allowlist-type", - }, - /// Functions that have been allowlisted and should appear in the generated code. - allowlisted_functions: RegexSet { - methods: { - regex_option! { - /// Generate bindings for the given function. - /// - /// This option is transitive by default. Check the documentation of the - /// [`Builder::allowlist_recursively`] method for further information. - pub fn allowlist_function>(mut self, arg: T) -> Builder { - self.options.allowlisted_functions.insert(arg); - self - } - } - }, - as_args: "--allowlist-function", - }, - /// Variables that have been allowlisted and should appear in the generated code. - allowlisted_vars: RegexSet { - methods: { - regex_option! { - /// Generate bindings for the given variable. - /// - /// This option is transitive by default. Check the documentation of the - /// [`Builder::allowlist_recursively`] method for further information. - pub fn allowlist_var>(mut self, arg: T) -> Builder { - self.options.allowlisted_vars.insert(arg); - self - } - } - }, - as_args: "--allowlist-var", - }, - /// Files whose contents have been allowlisted and should appear in the generated code. - allowlisted_files: RegexSet { - methods: { - regex_option! { - /// Generate bindings for the content of the given file. - /// - /// This option is transitive by default. Check the documentation of the - /// [`Builder::allowlist_recursively`] method for further information. - /// - /// This method will use the argument to match the complete path of the file - /// instead of a section of it. - pub fn allowlist_file>(mut self, arg: T) -> Builder { - self.options.allowlisted_files.insert(arg); - self - } - } - }, - as_args: "--allowlist-file", - }, - /// Items that have been allowlisted and should appear in the generated code. - allowlisted_items: RegexSet { - methods: { - regex_option! { - /// Generate bindings for the given item, regardless of whether it is a type, - /// function, module, etc. - /// - /// This option is transitive by default. Check the documentation of the - /// [`Builder::allowlist_recursively`] method for further information. - pub fn allowlist_item>(mut self, arg: T) -> Builder { - self.options.allowlisted_items.insert(arg); - self - } - } - }, - as_args: "--allowlist-item", - }, - /// The default style of for generated `enum`s. - default_enum_style: EnumVariation { - methods: { - /// Set the default style for generated `enum`s. - /// - /// If this method is not called, the [`EnumVariation::Consts`] style will be used by - /// default. - /// - /// To set the style for individual `enum`s, use [`Builder::bitfield_enum`], - /// [`Builder::newtype_enum`], [`Builder::newtype_global_enum`], - /// [`Builder::rustified_enum`], [`Builder::rustified_non_exhaustive_enum`], - /// [`Builder::constified_enum_module`] or [`Builder::constified_enum`]. - pub fn default_enum_style( - mut self, - arg: EnumVariation, - ) -> Builder { - self.options.default_enum_style = arg; - self - } - }, - as_args: |variation, args| { - if *variation != Default::default() { - args.push("--default-enum-style".to_owned()); - args.push(variation.to_string()); - } - }, - }, - /// `enum`s marked as bitfield-like. This is, newtypes with bitwise operations. - bitfield_enums: RegexSet { - methods: { - regex_option! { - /// Mark the given `enum` as being bitfield-like. - /// - /// This is similar to the [`Builder::newtype_enum`] style, but with the bitwise - /// operators implemented. - pub fn bitfield_enum>(mut self, arg: T) -> Builder { - self.options.bitfield_enums.insert(arg); - self - } - } - }, - as_args: "--bitfield-enum", - }, - /// `enum`s marked as newtypes. - newtype_enums: RegexSet { - methods: { - regex_option! { - /// Mark the given `enum` as a newtype. - /// - /// This means that an integer newtype will be declared to represent the `enum` - /// type and its variants will be represented as constants inside of this type's - /// `impl` block. - pub fn newtype_enum>(mut self, arg: T) -> Builder { - self.options.newtype_enums.insert(arg); - self - } - } - }, - as_args: "--newtype-enum", - }, - /// `enum`s marked as global newtypes . - newtype_global_enums: RegexSet { - methods: { - regex_option! { - /// Mark the given `enum` as a global newtype. - /// - /// This is similar to the [`Builder::newtype_enum`] style, but the constants for - /// each variant are free constants instead of being declared inside an `impl` - /// block for the newtype. - pub fn newtype_global_enum>(mut self, arg: T) -> Builder { - self.options.newtype_global_enums.insert(arg); - self - } - } - }, - as_args: "--newtype-global-enum", - }, - /// `enum`s marked as Rust `enum`s. - rustified_enums: RegexSet { - methods: { - regex_option! { - /// Mark the given `enum` as a Rust `enum`. - /// - /// This means that each variant of the `enum` will be represented as a Rust `enum` - /// variant. - /// - /// **Use this with caution**, creating an instance of a Rust `enum` with an - /// invalid value will cause undefined behaviour. To avoid this, use the - /// [`Builder::newtype_enum`] style instead. - pub fn rustified_enum>(mut self, arg: T) -> Builder { - self.options.rustified_enums.insert(arg); - self - } - } - }, - as_args: "--rustified-enum", - }, - /// `enum`s marked as non-exhaustive Rust `enum`s. - rustified_non_exhaustive_enums: RegexSet { - methods: { - regex_option! { - /// Mark the given `enum` as a non-exhaustive Rust `enum`. - /// - /// This is similar to the [`Builder::rustified_enum`] style, but the `enum` is - /// tagged with the `#[non_exhaustive]` attribute. - pub fn rustified_non_exhaustive_enum>(mut self, arg: T) -> Builder { - self.options.rustified_non_exhaustive_enums.insert(arg); - self - } - } - }, - as_args: "--rustified-non-exhaustive-enums", - }, - /// `enum`s marked as modules of constants. - constified_enum_modules: RegexSet { - methods: { - regex_option! { - /// Mark the given `enum` as a module with a set of integer constants. - pub fn constified_enum_module>(mut self, arg: T) -> Builder { - self.options.constified_enum_modules.insert(arg); - self - } - } - }, - as_args: "--constified-enum-module", - }, - /// `enum`s marked as a set of constants. - constified_enums: RegexSet { - methods: { - regex_option! { - /// Mark the given `enum` as a set of integer constants. - /// - /// This is similar to the [`Builder::constified_enum_module`] style, but the - /// constants are generated in the current module instead of in a new module. - pub fn constified_enum>(mut self, arg: T) -> Builder { - self.options.constified_enums.insert(arg); - self - } - } - }, - as_args: "--constified-enum", - }, - /// The default type signedness for C macro constants. - default_macro_constant_type: MacroTypeVariation { - methods: { - /// Set the default type signedness to be used for macro constants. - /// - /// If this method is not called, [`MacroTypeVariation::Unsigned`] is used by default. - /// - /// To set the type for individual macro constants, use the - /// [`ParseCallbacks::int_macro`] method. - pub fn default_macro_constant_type(mut self, arg: MacroTypeVariation) -> Builder { - self.options.default_macro_constant_type = arg; - self - } - - }, - as_args: |variation, args| { - if *variation != Default::default() { - args.push("--default-macro-constant-type".to_owned()); - args.push(variation.to_string()); - } - }, - }, - /// The default style of code generation for `typedef`s. - default_alias_style: AliasVariation { - methods: { - /// Set the default style of code generation for `typedef`s. - /// - /// If this method is not called, the [`AliasVariation::TypeAlias`] style is used by - /// default. - /// - /// To set the style for individual `typedefs`s, use [`Builder::type_alias`], - /// [`Builder::new_type_alias`] or [`Builder::new_type_alias_deref`]. - pub fn default_alias_style( - mut self, - arg: AliasVariation, - ) -> Builder { - self.options.default_alias_style = arg; - self - } - }, - as_args: |variation, args| { - if *variation != Default::default() { - args.push("--default-alias-style".to_owned()); - args.push(variation.to_string()); - } - }, - }, - /// `typedef` patterns that will use regular type aliasing. - type_alias: RegexSet { - methods: { - regex_option! { - /// Mark the given `typedef` as a regular Rust `type` alias. - /// - /// This is the default behavior, meaning that this method only comes into effect - /// if a style different from [`AliasVariation::TypeAlias`] was passed to the - /// [`Builder::default_alias_style`] method. - pub fn type_alias>(mut self, arg: T) -> Builder { - self.options.type_alias.insert(arg); - self - } - } - }, - as_args: "--type-alias", - }, - /// `typedef` patterns that will be aliased by creating a newtype. - new_type_alias: RegexSet { - methods: { - regex_option! { - /// Mark the given `typedef` as a Rust newtype by having the aliased - /// type be wrapped in a `struct` with `#[repr(transparent)]`. - /// - /// This method can be used to enforce stricter type checking. - pub fn new_type_alias>(mut self, arg: T) -> Builder { - self.options.new_type_alias.insert(arg); - self - } - } - }, - as_args: "--new-type-alias", - }, - /// `typedef` patterns that will be wrapped in a newtype implementing `Deref` and `DerefMut`. - new_type_alias_deref: RegexSet { - methods: { - regex_option! { - /// Mark the given `typedef` to be generated as a newtype that can be dereferenced. - /// - /// This is similar to the [`Builder::new_type_alias`] style, but the newtype - /// implements `Deref` and `DerefMut` with the aliased type as a target. - pub fn new_type_alias_deref>(mut self, arg: T) -> Builder { - self.options.new_type_alias_deref.insert(arg); - self - } - } - }, - as_args: "--new-type-alias-deref", - }, - /// The default style of code to generate for `union`s containing non-`Copy` members. - default_non_copy_union_style: NonCopyUnionStyle { - methods: { - /// Set the default style of code to generate for `union`s with non-`Copy` members. - /// - /// If this method is not called, the [`NonCopyUnionStyle::BindgenWrapper`] style is - /// used by default. - /// - /// To set the style for individual `union`s, use [`Builder::bindgen_wrapper_union`] or - /// [`Builder::manually_drop_union`]. - pub fn default_non_copy_union_style(mut self, arg: NonCopyUnionStyle) -> Self { - self.options.default_non_copy_union_style = arg; - self - } - }, - as_args: |style, args| { - if *style != Default::default() { - args.push("--default-non-copy-union-style".to_owned()); - args.push(style.to_string()); - } - }, - }, - /// The patterns marking non-`Copy` `union`s as using the `bindgen` generated wrapper. - bindgen_wrapper_union: RegexSet { - methods: { - regex_option! { - /// Mark the given `union` to use a `bindgen`-generated wrapper for its members if at - /// least one them is not `Copy`. - /// - /// This is the default behavior, meaning that this method only comes into effect - /// if a style different from [`NonCopyUnionStyle::BindgenWrapper`] was passed to - /// the [`Builder::default_non_copy_union_style`] method. - pub fn bindgen_wrapper_union>(mut self, arg: T) -> Self { - self.options.bindgen_wrapper_union.insert(arg); - self - } - } - }, - as_args: "--bindgen-wrapper-union", - }, - /// The patterns marking non-`Copy` `union`s as using the `ManuallyDrop` wrapper. - manually_drop_union: RegexSet { - methods: { - regex_option! { - /// Mark the given `union` to use [`::core::mem::ManuallyDrop`] for its members if - /// at least one of them is not `Copy`. - /// - /// The `ManuallyDrop` type was stabilized in Rust 1.20.0, do not use this option - /// if your target version is lower than this. - pub fn manually_drop_union>(mut self, arg: T) -> Self { - self.options.manually_drop_union.insert(arg); - self - } - } - - }, - as_args: "--manually-drop-union", - }, - - - /// Whether we should generate built-in definitions. - builtins: bool { - methods: { - /// Generate Rust bindings for built-in definitions (for example `__builtin_va_list`). - /// - /// Bindings for built-in definitions are not emitted by default. - pub fn emit_builtins(mut self) -> Builder { - self.options.builtins = true; - self - } - }, - as_args: "--builtins", - }, - /// Whether we should dump the Clang AST for debugging purposes. - emit_ast: bool { - methods: { - /// Emit the Clang AST to `stdout` for debugging purposes. - /// - /// The Clang AST is not emitted by default. - pub fn emit_clang_ast(mut self) -> Builder { - self.options.emit_ast = true; - self - } - }, - as_args: "--emit-clang-ast", - }, - /// Whether we should dump our IR for debugging purposes. - emit_ir: bool { - methods: { - /// Emit the `bindgen` internal representation to `stdout` for debugging purposes. - /// - /// This internal representation is not emitted by default. - pub fn emit_ir(mut self) -> Builder { - self.options.emit_ir = true; - self - } - }, - as_args: "--emit-ir", - }, - /// Output path for the `graphviz` DOT file. - emit_ir_graphviz: Option { - methods: { - /// Set the path for the file where the`bindgen` internal representation will be - /// emitted as a graph using the `graphviz` DOT language. - /// - /// This graph representation is not emitted by default. - pub fn emit_ir_graphviz>(mut self, path: T) -> Builder { - let path = path.into(); - self.options.emit_ir_graphviz = Some(path); - self - } - }, - as_args: "--emit-ir-graphviz", - }, - - /// Whether we should emulate C++ namespaces with Rust modules. - enable_cxx_namespaces: bool { - methods: { - /// Emulate C++ namespaces using Rust modules in the generated bindings. - /// - /// C++ namespaces are not emulated by default. - pub fn enable_cxx_namespaces(mut self) -> Builder { - self.options.enable_cxx_namespaces = true; - self - } - }, - as_args: "--enable-cxx-namespaces", - }, - /// Whether we should try to find unexposed attributes in functions. - enable_function_attribute_detection: bool { - methods: { - /// Enable detecting function attributes on C functions. - /// - /// This enables the following features: - /// - Add `#[must_use]` attributes to Rust items whose C counterparts are marked as so. - /// This feature also requires that the Rust target version supports the attribute. - /// - Set `!` as the return type for Rust functions whose C counterparts are marked as - /// diverging. - /// - /// This option can be quite slow in some cases (check [#1465]), so it is disabled by - /// default. - /// - /// [#1465]: https://github.com/rust-lang/rust-bindgen/issues/1465 - pub fn enable_function_attribute_detection(mut self) -> Self { - self.options.enable_function_attribute_detection = true; - self - } - - }, - as_args: "--enable-function-attribute-detection", - }, - /// Whether we should avoid mangling names with namespaces. - disable_name_namespacing: bool { - methods: { - /// Disable name auto-namespacing. - /// - /// By default, `bindgen` mangles names like `foo::bar::Baz` to look like `foo_bar_Baz` - /// instead of just `Baz`. This method disables that behavior. - /// - /// Note that this does not change the names used for allowlisting and blocklisting, - /// which should still be mangled with the namespaces. Additionally, this option may - /// cause `bindgen` to generate duplicate names. - pub fn disable_name_namespacing(mut self) -> Builder { - self.options.disable_name_namespacing = true; - self - } - }, - as_args: "--disable-name-namespacing", - }, - /// Whether we should avoid generating nested `struct` names. - disable_nested_struct_naming: bool { - methods: { - /// Disable nested `struct` naming. - /// - /// The following `struct`s have different names for C and C++. In C, they are visible - /// as `foo` and `bar`. In C++, they are visible as `foo` and `foo::bar`. - /// - /// ```c - /// struct foo { - /// struct bar { - /// } b; - /// }; - /// ``` - /// - /// `bindgen` tries to avoid duplicate names by default, so it follows the C++ naming - /// convention and it generates `foo` and `foo_bar` instead of just `foo` and `bar`. - /// - /// This method disables this behavior and it is indented to be used only for headers - /// that were written in C. - pub fn disable_nested_struct_naming(mut self) -> Builder { - self.options.disable_nested_struct_naming = true; - self - } - }, - as_args: "--disable-nested-struct-naming", - }, - /// Whether we should avoid embedding version identifiers into source code. - disable_header_comment: bool { - methods: { - /// Do not insert the `bindgen` version identifier into the generated bindings. - /// - /// This identifier is inserted by default. - pub fn disable_header_comment(mut self) -> Self { - self.options.disable_header_comment = true; - self - } - - }, - as_args: "--disable-header-comment", - }, - /// Whether we should generate layout tests for generated `struct`s. - layout_tests: bool { - default: true, - methods: { - /// Set whether layout tests should be generated. - /// - /// Layout tests are generated by default. - pub fn layout_tests(mut self, doit: bool) -> Self { - self.options.layout_tests = doit; - self - } - }, - as_args: |value, args| (!value).as_args(args, "--no-layout-tests"), - }, - /// Whether we should implement `Debug` for types that cannot derive it. - impl_debug: bool { - methods: { - /// Set whether `Debug` should be implemented for types that cannot derive it. - /// - /// This option is disabled by default. - pub fn impl_debug(mut self, doit: bool) -> Self { - self.options.impl_debug = doit; - self - } - - }, - as_args: "--impl-debug", - }, - /// Whether we should implement `PartialEq` types that cannot derive it. - impl_partialeq: bool { - methods: { - /// Set whether `PartialEq` should be implemented for types that cannot derive it. - /// - /// This option is disabled by default. - pub fn impl_partialeq(mut self, doit: bool) -> Self { - self.options.impl_partialeq = doit; - self - } - }, - as_args: "--impl-partialeq", - }, - /// Whether we should derive `Copy` when possible. - derive_copy: bool { - default: true, - methods: { - /// Set whether the `Copy` trait should be derived when possible. - /// - /// `Copy` is derived by default. - pub fn derive_copy(mut self, doit: bool) -> Self { - self.options.derive_copy = doit; - self - } - }, - as_args: |value, args| (!value).as_args(args, "--no-derive-copy"), - }, - - /// Whether we should derive `Debug` when possible. - derive_debug: bool { - default: true, - methods: { - /// Set whether the `Debug` trait should be derived when possible. - /// - /// The [`Builder::impl_debug`] method can be used to implement `Debug` for types that - /// cannot derive it. - /// - /// `Debug` is derived by default. - pub fn derive_debug(mut self, doit: bool) -> Self { - self.options.derive_debug = doit; - self - } - }, - as_args: |value, args| (!value).as_args(args, "--no-derive-debug"), - }, - - /// Whether we should derive `Default` when possible. - derive_default: bool { - methods: { - /// Set whether the `Default` trait should be derived when possible. - /// - /// `Default` is not derived by default. - pub fn derive_default(mut self, doit: bool) -> Self { - self.options.derive_default = doit; - self - } - }, - as_args: |&value, args| { - let arg = if value { - "--with-derive-default" - } else { - "--no-derive-default" - }; - - args.push(arg.to_owned()); - }, - }, - /// Whether we should derive `Hash` when possible. - derive_hash: bool { - methods: { - /// Set whether the `Hash` trait should be derived when possible. - /// - /// `Hash` is not derived by default. - pub fn derive_hash(mut self, doit: bool) -> Self { - self.options.derive_hash = doit; - self - } - }, - as_args: "--with-derive-hash", - }, - /// Whether we should derive `PartialOrd` when possible. - derive_partialord: bool { - methods: { - /// Set whether the `PartialOrd` trait should be derived when possible. - /// - /// Take into account that `Ord` cannot be derived for a type that does not implement - /// `PartialOrd`. For this reason, setting this method to `false` also sets - /// automatically [`Builder::derive_ord`] to `false`. - /// - /// `PartialOrd` is not derived by default. - pub fn derive_partialord(mut self, doit: bool) -> Self { - self.options.derive_partialord = doit; - if !doit { - self.options.derive_ord = false; - } - self - } - }, - as_args: "--with-derive-partialord", - }, - /// Whether we should derive `Ord` when possible. - derive_ord: bool { - methods: { - /// Set whether the `Ord` trait should be derived when possible. - /// - /// Take into account that `Ord` cannot be derived for a type that does not implement - /// `PartialOrd`. For this reason, the value set with this method will also be set - /// automatically for [`Builder::derive_partialord`]. - /// - /// `Ord` is not derived by default. - pub fn derive_ord(mut self, doit: bool) -> Self { - self.options.derive_ord = doit; - self.options.derive_partialord = doit; - self - } - }, - as_args: "--with-derive-ord", - }, - /// Whether we should derive `PartialEq` when possible. - derive_partialeq: bool { - methods: { - /// Set whether the `PartialEq` trait should be derived when possible. - /// - /// Take into account that `Eq` cannot be derived for a type that does not implement - /// `PartialEq`. For this reason, setting this method to `false` also sets - /// automatically [`Builder::derive_eq`] to `false`. - /// - /// The [`Builder::impl_partialeq`] method can be used to implement `PartialEq` for - /// types that cannot derive it. - /// - /// `PartialEq` is not derived by default. - pub fn derive_partialeq(mut self, doit: bool) -> Self { - self.options.derive_partialeq = doit; - if !doit { - self.options.derive_eq = false; - } - self - } - }, - as_args: "--with-derive-partialeq", - }, - /// Whether we should derive `Eq` when possible. - derive_eq: bool { - methods: { - /// Set whether the `Eq` trait should be derived when possible. - /// - /// Take into account that `Eq` cannot be derived for a type that does not implement - /// `PartialEq`. For this reason, the value set with this method will also be set - /// automatically for [`Builder::derive_partialeq`]. - /// - /// `Eq` is not derived by default. - pub fn derive_eq(mut self, doit: bool) -> Self { - self.options.derive_eq = doit; - if doit { - self.options.derive_partialeq = doit; - } - self - } - }, - as_args: "--with-derive-eq", - }, - /// Whether we should use `core` instead of `std`. - /// - /// If this option is enabled and the Rust target version is greater than 1.64, the prefix for - /// C platform-specific types will be `::core::ffi` instead of `::core::os::raw`. - use_core: bool { - methods: { - /// Use `core` instead of `std` in the generated bindings. - /// - /// `std` is used by default. - pub fn use_core(mut self) -> Builder { - self.options.use_core = true; - self - } - - }, - as_args: "--use-core", - }, - /// An optional prefix for the C platform-specific types. - ctypes_prefix: Option { - methods: { - /// Use the given prefix for the C platform-specific types instead of `::std::os::raw`. - /// - /// Alternatively, the [`Builder::use_core`] method can be used to set the prefix to - /// `::core::ffi` or `::core::os::raw`. - pub fn ctypes_prefix>(mut self, prefix: T) -> Builder { - self.options.ctypes_prefix = Some(prefix.into()); - self - } - }, - as_args: "--ctypes-prefix", - }, - /// The prefix for anonymous fields. - anon_fields_prefix: String { - default: DEFAULT_ANON_FIELDS_PREFIX.into(), - methods: { - /// Use the given prefix for the anonymous fields. - /// - /// An anonymous field, is a field of a C/C++ type that does not have a name. For - /// example, in the following C code: - /// ```c - /// struct integer { - /// struct { - /// int inner; - /// }; - /// } - /// ``` - /// - /// The only field of the `integer` `struct` is an anonymous field and its Rust - /// representation will be named using this prefix followed by an integer identifier. - /// - /// The default prefix is `__bindgen_anon_`. - pub fn anon_fields_prefix>(mut self, prefix: T) -> Builder { - self.options.anon_fields_prefix = prefix.into(); - self - } - }, - as_args: |prefix, args| { - if prefix != DEFAULT_ANON_FIELDS_PREFIX { - args.push("--anon-fields-prefix".to_owned()); - args.push(prefix.clone()); - } - }, - }, - /// Whether to measure the time for each one of the `bindgen` phases. - time_phases: bool { - methods: { - /// Set whether to measure the elapsed time for each one of the `bindgen` phases. This - /// information is printed to `stderr`. - /// - /// The elapsed time is not measured by default. - pub fn time_phases(mut self, doit: bool) -> Self { - self.options.time_phases = doit; - self - } - }, - as_args: "--time-phases", - }, - /// Whether to convert C float types to `f32` and `f64`. - convert_floats: bool { - default: true, - methods: { - /// Avoid converting C float types to `f32` and `f64`. - pub fn no_convert_floats(mut self) -> Self { - self.options.convert_floats = false; - self - } - }, - as_args: |value, args| (!value).as_args(args, "--no-convert-floats"), - }, - /// The set of raw lines to be prepended to the top-level module of the generated Rust code. - raw_lines: Vec> { - methods: { - /// Add a line of Rust code at the beginning of the generated bindings. The string is - /// passed through without any modification. - pub fn raw_line>(mut self, arg: T) -> Self { - self.options.raw_lines.push(arg.into().into_boxed_str()); - self - } - }, - as_args: |raw_lines, args| { - for line in raw_lines { - args.push("--raw-line".to_owned()); - args.push(line.clone().into()); - } - }, - }, - /// The set of raw lines to prepend to different modules. - module_lines: HashMap, Vec>> { - methods: { - /// Add a given line to the beginning of a given module. - /// - /// This option only comes into effect if the [`Builder::enable_cxx_namespaces`] method - /// is also being called. - pub fn module_raw_line(mut self, module: T, line: U) -> Self - where - T: Into, - U: Into, - { - self.options - .module_lines - .entry(module.into().into_boxed_str()) - .or_default() - .push(line.into().into_boxed_str()); - self - } - }, - as_args: |module_lines, args| { - for (module, lines) in module_lines { - for line in lines { - args.push("--module-raw-line".to_owned()); - args.push(module.clone().into()); - args.push(line.clone().into()); - } - } - }, - }, - /// The input header files. - input_headers: Vec> { - methods: { - /// Add an input C/C++ header to generate bindings for. - /// - /// This can be used to generate bindings for a single header: - /// - /// ```ignore - /// let bindings = bindgen::Builder::default() - /// .header("input.h") - /// .generate() - /// .unwrap(); - /// ``` - /// - /// Or for multiple headers: - /// - /// ```ignore - /// let bindings = bindgen::Builder::default() - /// .header("first.h") - /// .header("second.h") - /// .header("third.h") - /// .generate() - /// .unwrap(); - /// ``` - pub fn header>(mut self, header: T) -> Builder { - self.options.input_headers.push(header.into().into_boxed_str()); - self - } - - /// Add input C/C++ header(s) to generate bindings for. - /// - /// This can be used to generate bindings for a single header: - /// - /// ```ignore - /// let bindings = bindgen::Builder::default() - /// .headers(["input.h"]) - /// .generate() - /// .unwrap(); - /// ``` - /// - /// Or for multiple headers: - /// - /// ```ignore - /// let bindings = bindgen::Builder::default() - /// .headers(["first.h", "second.h", "third.h"]) - /// .generate() - /// .unwrap(); - /// ``` - pub fn headers(mut self, headers: I) -> Builder - where - I::Item: Into, - { - self.options - .input_headers - .extend(headers.into_iter().map(Into::into).map(Into::into)); - self - } - }, - // This field is handled specially inside the macro. - as_args: ignore, - }, - /// The set of arguments to be passed straight through to Clang. - clang_args: Vec> { - methods: { - /// Add an argument to be passed straight through to Clang. - pub fn clang_arg>(self, arg: T) -> Builder { - self.clang_args([arg.into().into_boxed_str()]) - } - - /// Add several arguments to be passed straight through to Clang. - pub fn clang_args(mut self, args: I) -> Builder - where - I::Item: AsRef, - { - for arg in args { - self.options.clang_args.push(arg.as_ref().to_owned().into_boxed_str()); - } - self - } - }, - // This field is handled specially inside the macro. - as_args: ignore, - }, - /// The set of arguments to be passed straight through to Clang for the macro fallback code. - fallback_clang_args: Vec> { - methods: {}, - as_args: ignore, - }, - /// Tuples of unsaved file contents of the form (name, contents). - input_header_contents: Vec<(Box, Box)> { - methods: { - /// Add `contents` as an input C/C++ header named `name`. - /// - /// This can be used to inject additional C/C++ code as an input without having to - /// create additional header files. - pub fn header_contents(mut self, name: &str, contents: &str) -> Builder { - // Apparently clang relies on having virtual FS correspondent to - // the real one, so we need absolute paths here - let absolute_path = env::current_dir() - .expect("Cannot retrieve current directory") - .join(name) - .to_str() - .expect("Cannot convert current directory name to string") - .into(); - self.options - .input_header_contents - .push((absolute_path, contents.into())); - self - } - }, - // Header contents cannot be added from the CLI. - as_args: ignore, - }, - /// A user-provided visitor to allow customizing different kinds of situations. - parse_callbacks: Vec> { - methods: { - /// Add a new [`ParseCallbacks`] instance to configure types in different situations. - /// - /// This can also be used with [`CargoCallbacks`](struct@crate::CargoCallbacks) to emit - /// `cargo:rerun-if-changed=...` for all `#include`d header files. - pub fn parse_callbacks(mut self, cb: Box) -> Self { - self.options.parse_callbacks.push(Rc::from(cb)); - self - } - }, - as_args: |_callbacks, _args| { - #[cfg(feature = "__cli")] - for cb in _callbacks { - _args.extend(cb.cli_args()); - } - }, - }, - /// Which kind of items should we generate. We generate all of them by default. - codegen_config: CodegenConfig { - default: CodegenConfig::all(), - methods: { - /// Do not generate any functions. - /// - /// Functions are generated by default. - pub fn ignore_functions(mut self) -> Builder { - self.options.codegen_config.remove(CodegenConfig::FUNCTIONS); - self - } - - /// Do not generate any methods. - /// - /// Methods are generated by default. - pub fn ignore_methods(mut self) -> Builder { - self.options.codegen_config.remove(CodegenConfig::METHODS); - self - } - - /// Choose what to generate using a [`CodegenConfig`]. - /// - /// This option overlaps with [`Builder::ignore_functions`] and - /// [`Builder::ignore_methods`]. - /// - /// All the items in `CodegenConfig` are generated by default. - pub fn with_codegen_config(mut self, config: CodegenConfig) -> Self { - self.options.codegen_config = config; - self - } - }, - as_args: |codegen_config, args| { - if !codegen_config.functions() { - args.push("--ignore-functions".to_owned()); - } - - args.push("--generate".to_owned()); - - //Temporary placeholder for the 4 options below. - let mut options: Vec = Vec::new(); - if codegen_config.functions() { - options.push("functions".to_owned()); - } - - if codegen_config.types() { - options.push("types".to_owned()); - } - - if codegen_config.vars() { - options.push("vars".to_owned()); - } - - if codegen_config.methods() { - options.push("methods".to_owned()); - } - - if codegen_config.constructors() { - options.push("constructors".to_owned()); - } - - if codegen_config.destructors() { - options.push("destructors".to_owned()); - } - - args.push(options.join(",")); - - if !codegen_config.methods() { - args.push("--ignore-methods".to_owned()); - } - }, - }, - /// Whether to treat inline namespaces conservatively. - conservative_inline_namespaces: bool { - methods: { - /// Treat inline namespaces conservatively. - /// - /// This is tricky, because in C++ is technically legal to override an item - /// defined in an inline namespace: - /// - /// ```cpp - /// inline namespace foo { - /// using Bar = int; - /// } - /// using Bar = long; - /// ``` - /// - /// Even though referencing `Bar` is a compiler error. - /// - /// We want to support this (arguably esoteric) use case, but we do not want to make - /// the rest of `bindgen` users pay an usability penalty for that. - /// - /// To support this, we need to keep all the inline namespaces around, but then using - /// `bindgen` becomes a bit more difficult, because you cannot reference paths like - /// `std::string` (you'd need to use the proper inline namespace). - /// - /// We could complicate a lot of the logic to detect name collisions and, in the - /// absence of collisions, generate a `pub use inline_ns::*` or something like that. - /// - /// That is probably something we can do to improve the usability of this option if we - /// realize it is needed way more often. Our guess is that this extra logic is not - /// going to be very useful. - /// - /// This option is disabled by default. - pub fn conservative_inline_namespaces(mut self) -> Builder { - self.options.conservative_inline_namespaces = true; - self - } - }, - as_args: "--conservative-inline-namespaces", - }, - /// Whether to keep documentation comments in the generated output. - generate_comments: bool { - default: true, - methods: { - /// Set whether the generated bindings should contain documentation comments. - /// - /// Documentation comments are included by default. - /// - /// Note that clang excludes comments from system headers by default, pass - /// `"-fretain-comments-from-system-headers"` to the [`Builder::clang_arg`] method to - /// include them. - /// - /// It is also possible to process all comments and not just documentation using the - /// `"-fparse-all-comments"` flag. Check [these slides on clang comment parsing]( - /// https://llvm.org/devmtg/2012-11/Gribenko_CommentParsing.pdf) for more information - /// and examples. - pub fn generate_comments(mut self, doit: bool) -> Self { - self.options.generate_comments = doit; - self - } - }, - as_args: |value, args| (!value).as_args(args, "--no-doc-comments"), - }, - /// Whether to generate inline functions. - generate_inline_functions: bool { - methods: { - /// Set whether to generate inline functions. - /// - /// This option is disabled by default. - /// - /// Note that they will usually not work. However you can use `-fkeep-inline-functions` - /// or `-fno-inline-functions` if you are responsible of compiling the library to make - /// them callable. - /// - /// Check the [`Builder::wrap_static_fns`] method for an alternative. - pub fn generate_inline_functions(mut self, doit: bool) -> Self { - self.options.generate_inline_functions = doit; - self - } - }, - as_args: "--generate-inline-functions", - }, - /// Whether to allowlist types recursively. - allowlist_recursively: bool { - default: true, - methods: { - /// Set whether to recursively allowlist items. - /// - /// Items are allowlisted recursively by default. - /// - /// Given that we have explicitly allowlisted the `initiate_dance_party` function in - /// this C header: - /// - /// ```c - /// typedef struct MoonBoots { - /// int bouncy_level; - /// } MoonBoots; - /// - /// void initiate_dance_party(MoonBoots* boots); - /// ``` - /// - /// We would normally generate bindings to both the `initiate_dance_party` function and - /// the `MoonBoots` type that it transitively references. If `false` is passed to this - /// method, `bindgen` will not emit bindings for anything except the explicitly - /// allowlisted items, meaning that the definition for `MoonBoots` would not be - /// generated. However, the `initiate_dance_party` function would still reference - /// `MoonBoots`! - /// - /// **Disabling this feature will almost certainly cause `bindgen` to emit bindings - /// that will not compile!** If you disable this feature, then it is *your* - /// responsibility to provide definitions for every type that is referenced from an - /// explicitly allowlisted item. One way to provide the missing definitions is by using - /// the [`Builder::raw_line`] method, another would be to define them in Rust and then - /// `include!(...)` the bindings immediately afterwards. - pub fn allowlist_recursively(mut self, doit: bool) -> Self { - self.options.allowlist_recursively = doit; - self - } - }, - as_args: |value, args| (!value).as_args(args, "--no-recursive-allowlist"), - }, - /// Whether to emit `#[macro_use] extern crate objc;` instead of `use objc;` in the prologue of - /// the files generated from objective-c files. - objc_extern_crate: bool { - methods: { - /// Emit `#[macro_use] extern crate objc;` instead of `use objc;` in the prologue of - /// the files generated from objective-c files. - /// - /// `use objc;` is emitted by default. - pub fn objc_extern_crate(mut self, doit: bool) -> Self { - self.options.objc_extern_crate = doit; - self - } - }, - as_args: "--objc-extern-crate", - }, - /// Whether to generate proper block signatures instead of `void` pointers. - generate_block: bool { - methods: { - /// Generate proper block signatures instead of `void` pointers. - /// - /// `void` pointers are used by default. - pub fn generate_block(mut self, doit: bool) -> Self { - self.options.generate_block = doit; - self - } - }, - as_args: "--generate-block", - }, - /// Whether to generate strings as `CStr`. - generate_cstr: bool { - methods: { - /// Set whether string constants should be generated as `&CStr` instead of `&[u8]`. - /// - /// A minimum Rust target of 1.59 is required for this to have any effect as support - /// for `CStr::from_bytes_with_nul_unchecked` in `const` contexts is needed. - /// - /// This option is disabled by default but will become enabled by default in a future - /// release, so enabling this is recommended. - pub fn generate_cstr(mut self, doit: bool) -> Self { - self.options.generate_cstr = doit; - self - } - }, - as_args: "--generate-cstr", - }, - /// Whether to emit `#[macro_use] extern crate block;` instead of `use block;` in the prologue - /// of the files generated from apple block files. - block_extern_crate: bool { - methods: { - /// Emit `#[macro_use] extern crate block;` instead of `use block;` in the prologue of - /// the files generated from apple block files. - /// - /// `use block;` is emitted by default. - pub fn block_extern_crate(mut self, doit: bool) -> Self { - self.options.block_extern_crate = doit; - self - } - }, - as_args: "--block-extern-crate", - }, - /// Whether to use the clang-provided name mangling. - enable_mangling: bool { - default: true, - methods: { - /// Set whether to use the clang-provided name mangling. This is probably needed for - /// C++ features. - /// - /// The mangling provided by clang is used by default. - /// - /// We allow disabling this option because some old `libclang` versions seem to return - /// incorrect results in some cases for non-mangled functions, check [#528] for more - /// information. - /// - /// [#528]: https://github.com/rust-lang/rust-bindgen/issues/528 - pub fn trust_clang_mangling(mut self, doit: bool) -> Self { - self.options.enable_mangling = doit; - self - } - - }, - as_args: |value, args| (!value).as_args(args, "--distrust-clang-mangling"), - }, - /// Whether to detect include paths using `clang_sys`. - detect_include_paths: bool { - default: true, - methods: { - /// Set whether to detect include paths using `clang_sys`. - /// - /// `clang_sys` is used to detect include paths by default. - pub fn detect_include_paths(mut self, doit: bool) -> Self { - self.options.detect_include_paths = doit; - self - } - }, - as_args: |value, args| (!value).as_args(args, "--no-include-path-detection"), - }, - /// Whether we should try to fit macro constants into types smaller than `u32` and `i32`. - fit_macro_constants: bool { - methods: { - /// Set whether `bindgen` should try to fit macro constants into types smaller than `u32` - /// and `i32`. - /// - /// This option is disabled by default. - pub fn fit_macro_constants(mut self, doit: bool) -> Self { - self.options.fit_macro_constants = doit; - self - } - }, - as_args: "--fit-macro-constant-types", - }, - /// Whether to prepend the `enum` name to constant or newtype variants. - prepend_enum_name: bool { - default: true, - methods: { - /// Set whether to prepend the `enum` name to constant or newtype variants. - /// - /// The `enum` name is prepended by default. - pub fn prepend_enum_name(mut self, doit: bool) -> Self { - self.options.prepend_enum_name = doit; - self - } - }, - as_args: |value, args| (!value).as_args(args, "--no-prepend-enum-name"), - }, - /// Version of the Rust compiler to target. - rust_target: RustTarget { - methods: { - /// Specify the Rust target version. - /// - /// The default target is the latest stable Rust version. - pub fn rust_target(mut self, rust_target: RustTarget) -> Self { - self.options.set_rust_target(rust_target); - self - } - }, - as_args: |rust_target, args| { - args.push("--rust-target".to_owned()); - args.push(rust_target.to_string()); - }, - }, - /// The Rust edition to use for code generation. - rust_edition: Option { - methods: { - /// Specify the Rust target edition. - /// - /// The default edition is the latest edition supported by the chosen Rust target. - pub fn rust_edition(mut self, rust_edition: RustEdition) -> Self { - self.options.rust_edition = Some(rust_edition); - self - } - } - as_args: |edition, args| { - if let Some(edition) = edition { - args.push("--rust-edition".to_owned()); - args.push(edition.to_string()); - } - }, - }, - /// Features to be enabled. They are derived from `rust_target`. - rust_features: RustFeatures { - methods: {}, - // This field cannot be set from the CLI, - as_args: ignore, - }, - /// Enable support for native Rust unions if they are supported. - untagged_union: bool { - default: true, - methods: { - /// Disable support for native Rust unions, if supported. - /// - /// The default value of this option is set based on the value passed to - /// [`Builder::rust_target`]. - pub fn disable_untagged_union(mut self) -> Self { - self.options.untagged_union = false; - self - } - } - as_args: |value, args| (!value).as_args(args, "--disable-untagged-union"), - }, - /// Whether we should record which items in the regex sets did match any C items. - record_matches: bool { - default: true, - methods: { - /// Set whether we should record which items in our regex sets did match any C items. - /// - /// Matches are recorded by default. - pub fn record_matches(mut self, doit: bool) -> Self { - self.options.record_matches = doit; - self - } - - }, - as_args: |value, args| (!value).as_args(args, "--no-record-matches"), - }, - /// Whether `size_t` should be translated to `usize` automatically. - size_t_is_usize: bool { - default: true, - methods: { - /// Set whether `size_t` should be translated to `usize`. - /// - /// If `size_t` is translated to `usize`, type definitions for `size_t` will not be - /// emitted. - /// - /// `size_t` is translated to `usize` by default. - pub fn size_t_is_usize(mut self, is: bool) -> Self { - self.options.size_t_is_usize = is; - self - } - }, - as_args: |value, args| (!value).as_args(args, "--no-size_t-is-usize"), - }, - /// The tool that should be used to format the generated bindings. - formatter: Formatter { - methods: { - /// Set whether `rustfmt` should be used to format the generated bindings. - /// - /// `rustfmt` is used by default. - /// - /// This method overlaps in functionality with the more general [`Builder::formatter`]. - /// Thus, the latter should be preferred. - #[deprecated] - pub fn rustfmt_bindings(mut self, doit: bool) -> Self { - self.options.formatter = if doit { - Formatter::Rustfmt - } else { - Formatter::None - }; - self - } - - /// Set which tool should be used to format the generated bindings. - /// - /// The default formatter is [`Formatter::Rustfmt`]. - /// - /// To be able to use `prettyplease` as a formatter, the `"prettyplease"` feature for - /// `bindgen` must be enabled in the Cargo manifest. - pub fn formatter(mut self, formatter: Formatter) -> Self { - self.options.formatter = formatter; - self - } - }, - as_args: |formatter, args| { - if *formatter != Default::default() { - args.push("--formatter".to_owned()); - args.push(formatter.to_string()); - } - }, - }, - /// The absolute path to the `rustfmt` configuration file. - rustfmt_configuration_file: Option { - methods: { - /// Set the absolute path to the `rustfmt` configuration file. - /// - /// The default `rustfmt` options are used if `None` is passed to this method or if - /// this method is not called at all. - /// - /// Calling this method will set the [`Builder::rustfmt_bindings`] option to `true` - /// and the [`Builder::formatter`] option to [`Formatter::Rustfmt`]. - pub fn rustfmt_configuration_file(mut self, path: Option) -> Self { - self = self.formatter(Formatter::Rustfmt); - self.options.rustfmt_configuration_file = path; - self - } - }, - as_args: "--rustfmt-configuration-file", - }, - /// Types that should not derive `PartialEq`. - no_partialeq_types: RegexSet { - methods: { - regex_option! { - /// Do not derive `PartialEq` for a given type. - pub fn no_partialeq>(mut self, arg: T) -> Builder { - self.options.no_partialeq_types.insert(arg.into()); - self - } - } - }, - as_args: "--no-partialeq", - }, - /// Types that should not derive `Copy`. - no_copy_types: RegexSet { - methods: { - regex_option! { - /// Do not derive `Copy` and `Clone` for a given type. - pub fn no_copy>(mut self, arg: T) -> Self { - self.options.no_copy_types.insert(arg.into()); - self - } - } - }, - as_args: "--no-copy", - }, - /// Types that should not derive `Debug`. - no_debug_types: RegexSet { - methods: { - regex_option! { - /// Do not derive `Debug` for a given type. - pub fn no_debug>(mut self, arg: T) -> Self { - self.options.no_debug_types.insert(arg.into()); - self - } - } - }, - as_args: "--no-debug", - }, - /// Types that should not derive or implement `Default`. - no_default_types: RegexSet { - methods: { - regex_option! { - /// Do not derive or implement `Default` for a given type. - pub fn no_default>(mut self, arg: T) -> Self { - self.options.no_default_types.insert(arg.into()); - self - } - } - }, - as_args: "--no-default", - }, - /// Types that should not derive `Hash`. - no_hash_types: RegexSet { - methods: { - regex_option! { - /// Do not derive `Hash` for a given type. - pub fn no_hash>(mut self, arg: T) -> Builder { - self.options.no_hash_types.insert(arg.into()); - self - } - } - }, - as_args: "--no-hash", - }, - /// Types that should be annotated with `#[must_use]`. - must_use_types: RegexSet { - methods: { - regex_option! { - /// Annotate the given type with the `#[must_use]` attribute. - pub fn must_use_type>(mut self, arg: T) -> Builder { - self.options.must_use_types.insert(arg.into()); - self - } - } - }, - as_args: "--must-use-type", - }, - /// Whether C arrays should be regular pointers in rust or array pointers - array_pointers_in_arguments: bool { - methods: { - /// Translate arrays `T arr[size]` into array pointers `*mut [T; size]` instead of - /// translating them as `*mut T` which is the default. - /// - /// The same is done for `*const` pointers. - pub fn array_pointers_in_arguments(mut self, doit: bool) -> Self { - self.options.array_pointers_in_arguments = doit; - self - } - - }, - as_args: "--use-array-pointers-in-arguments", - }, - /// The name of the `wasm_import_module`. - wasm_import_module_name: Option { - methods: { - /// Adds the `#[link(wasm_import_module = import_name)]` attribute to all the `extern` - /// blocks generated by `bindgen`. - /// - /// This attribute is not added by default. - pub fn wasm_import_module_name>( - mut self, - import_name: T, - ) -> Self { - self.options.wasm_import_module_name = Some(import_name.into()); - self - } - }, - as_args: "--wasm-import-module-name", - }, - /// The name of the dynamic library (if we are generating bindings for a shared library). - dynamic_library_name: Option { - methods: { - /// Generate bindings for a shared library with the given name. - /// - /// This option is disabled by default. - pub fn dynamic_library_name>( - mut self, - dynamic_library_name: T, - ) -> Self { - self.options.dynamic_library_name = Some(dynamic_library_name.into()); - self - } - }, - as_args: "--dynamic-loading", - }, - /// Whether to require successful linkage for all routines in a shared library. - dynamic_link_require_all: bool { - methods: { - /// Set whether to require successful linkage for all routines in a shared library. - /// This allows us to optimize function calls by being able to safely assume function - /// pointers are valid. - /// - /// This option only comes into effect if the [`Builder::dynamic_library_name`] option - /// is set. - /// - /// This option is disabled by default. - pub fn dynamic_link_require_all(mut self, req: bool) -> Self { - self.options.dynamic_link_require_all = req; - self - } - }, - as_args: "--dynamic-link-require-all", - }, - /// Whether to only make generated bindings `pub` if the items would be publicly accessible by - /// C++. - respect_cxx_access_specs: bool { - methods: { - /// Set whether to respect the C++ access specifications. - /// - /// Passing `true` to this method will set the visibility of the generated Rust items - /// as `pub` only if the corresponding C++ items are publicly accessible instead of - /// marking all the items as public, which is the default. - pub fn respect_cxx_access_specs(mut self, doit: bool) -> Self { - self.options.respect_cxx_access_specs = doit; - self - } - - }, - as_args: "--respect-cxx-access-specs", - }, - /// Whether to translate `enum` integer types to native Rust integer types. - translate_enum_integer_types: bool { - methods: { - /// Set whether to always translate `enum` integer types to native Rust integer types. - /// - /// Passing `true` to this method will result in `enum`s having types such as `u32` and - /// `i16` instead of `c_uint` and `c_short` which is the default. The `#[repr]` types - /// of Rust `enum`s are always translated to Rust integer types. - pub fn translate_enum_integer_types(mut self, doit: bool) -> Self { - self.options.translate_enum_integer_types = doit; - self - } - }, - as_args: "--translate-enum-integer-types", - }, - /// Whether to generate types with C style naming. - c_naming: bool { - methods: { - /// Set whether to generate types with C style naming. - /// - /// Passing `true` to this method will add prefixes to the generated type names. For - /// example, instead of a `struct` with name `A` we will generate a `struct` with - /// `struct_A`. Currently applies to `struct`s, `union`s, and `enum`s. - pub fn c_naming(mut self, doit: bool) -> Self { - self.options.c_naming = doit; - self - } - }, - as_args: "--c-naming", - }, - /// Whether to always emit explicit padding fields. - force_explicit_padding: bool { - methods: { - /// Set whether to always emit explicit padding fields. - /// - /// This option should be enabled if a `struct` needs to be serialized in its native - /// format (padding bytes and all). This could be required if such `struct` will be - /// written to a file or sent over the network, as anything reading the padding bytes - /// of a struct may cause undefined behavior. - /// - /// Padding fields are not emitted by default. - pub fn explicit_padding(mut self, doit: bool) -> Self { - self.options.force_explicit_padding = doit; - self - } - }, - as_args: "--explicit-padding", - }, - /// Whether to emit vtable functions. - vtable_generation: bool { - methods: { - /// Set whether to enable experimental support to generate virtual table functions. - /// - /// This option should mostly work, though some edge cases are likely to be broken. - /// - /// Virtual table generation is disabled by default. - pub fn vtable_generation(mut self, doit: bool) -> Self { - self.options.vtable_generation = doit; - self - } - }, - as_args: "--vtable-generation", - }, - /// Whether to sort the generated Rust items. - sort_semantically: bool { - methods: { - /// Set whether to sort the generated Rust items in a predefined manner. - /// - /// Items are not ordered by default. - pub fn sort_semantically(mut self, doit: bool) -> Self { - self.options.sort_semantically = doit; - self - } - }, - as_args: "--sort-semantically", - }, - /// Whether to deduplicate `extern` blocks. - merge_extern_blocks: bool { - methods: { - /// Merge all extern blocks under the same module into a single one. - /// - /// Extern blocks are not merged by default. - pub fn merge_extern_blocks(mut self, doit: bool) -> Self { - self.options.merge_extern_blocks = doit; - self - } - }, - as_args: "--merge-extern-blocks", - }, - /// Whether to wrap unsafe operations in unsafe blocks. - wrap_unsafe_ops: bool { - methods: { - /// Wrap all unsafe operations in unsafe blocks. - /// - /// Unsafe operations are not wrapped by default. - pub fn wrap_unsafe_ops(mut self, doit: bool) -> Self { - self.options.wrap_unsafe_ops = doit; - self - } - }, - as_args: "--wrap-unsafe-ops", - }, - /// Use DSTs to represent structures with flexible array members. - flexarray_dst: bool { - methods: { - /// Use DSTs to represent structures with flexible array members. - /// - /// This option is disabled by default. - pub fn flexarray_dst(mut self, doit: bool) -> Self { - self.options.flexarray_dst = doit; - self - } - }, - as_args: "--flexarray-dst", - }, - /// Patterns for functions whose ABI should be overridden. - abi_overrides: HashMap { - methods: { - regex_option! { - /// Override the ABI of a given function. - pub fn override_abi>(mut self, abi: Abi, arg: T) -> Self { - self.options - .abi_overrides - .entry(abi) - .or_default() - .insert(arg.into()); - self - } - } - }, - as_args: |overrides, args| { - for (abi, set) in overrides { - for item in set.get_items() { - args.push("--override-abi".to_owned()); - args.push(format!("{item}={abi}")); - } - } - }, - }, - /// Whether to generate wrappers for `static` functions. - wrap_static_fns: bool { - methods: { - /// Set whether to generate wrappers for `static`` functions. - /// - /// Passing `true` to this method will generate a C source file with non-`static` - /// functions that call the `static` functions found in the input headers and can be - /// called from Rust once the source file is compiled. - /// - /// The path of this source file can be set using the [`Builder::wrap_static_fns_path`] - /// method. - pub fn wrap_static_fns(mut self, doit: bool) -> Self { - self.options.wrap_static_fns = doit; - self - } - }, - as_args: "--wrap-static-fns", - }, - /// The suffix to be added to the function wrappers for `static` functions. - wrap_static_fns_suffix: Option { - methods: { - /// Set the suffix added to the wrappers for `static` functions. - /// - /// This option only comes into effect if `true` is passed to the - /// [`Builder::wrap_static_fns`] method. - /// - /// The default suffix is `__extern`. - pub fn wrap_static_fns_suffix>(mut self, suffix: T) -> Self { - self.options.wrap_static_fns_suffix = Some(suffix.as_ref().to_owned()); - self - } - }, - as_args: "--wrap-static-fns-suffix", - }, - /// The path of the file where the wrappers for `static` functions will be emitted. - wrap_static_fns_path: Option { - methods: { - /// Set the path for the source code file that would be created if any wrapper - /// functions must be generated due to the presence of `static` functions. - /// - /// `bindgen` will automatically add the right extension to the header and source code - /// files. - /// - /// This option only comes into effect if `true` is passed to the - /// [`Builder::wrap_static_fns`] method. - /// - /// The default path is `temp_dir/bindgen/extern`, where `temp_dir` is the path - /// returned by [`std::env::temp_dir`] . - pub fn wrap_static_fns_path>(mut self, path: T) -> Self { - self.options.wrap_static_fns_path = Some(path.as_ref().to_owned()); - self - } - }, - as_args: "--wrap-static-fns-path", - }, - /// Default visibility of fields. - default_visibility: FieldVisibilityKind { - methods: { - /// Set the default visibility of fields, including bitfields and accessor methods for - /// bitfields. - /// - /// This option only comes into effect if the [`Builder::respect_cxx_access_specs`] - /// option is disabled. - pub fn default_visibility( - mut self, - visibility: FieldVisibilityKind, - ) -> Self { - self.options.default_visibility = visibility; - self - } - }, - as_args: |visibility, args| { - if *visibility != Default::default() { - args.push("--default-visibility".to_owned()); - args.push(visibility.to_string()); - } - }, - }, - /// Whether to emit diagnostics or not. - emit_diagnostics: bool { - methods: { - #[cfg(feature = "experimental")] - /// Emit diagnostics. - /// - /// These diagnostics are emitted to `stderr` if you are using `bindgen-cli` or printed - /// using `cargo:warning=` if you are using `bindgen` as a `build-dependency`. - /// - /// Diagnostics are not emitted by default. - /// - /// The layout and contents of these diagnostic messages are not covered by versioning - /// and can change without notice. - pub fn emit_diagnostics(mut self) -> Self { - self.options.emit_diagnostics = true; - self - } - }, - as_args: "--emit-diagnostics", - }, - /// Whether to use Clang evaluation on temporary files as a fallback for macros that fail to - /// parse. - clang_macro_fallback: bool { - methods: { - /// Use Clang as a fallback for macros that fail to parse using `CExpr`. - /// - /// This uses a workaround to evaluate each macro in a temporary file. Because this - /// results in slower compilation, this option is opt-in. - pub fn clang_macro_fallback(mut self) -> Self { - self.options.clang_macro_fallback = true; - self - } - }, - as_args: "--clang-macro-fallback", - } - /// Path to use for temporary files created by clang macro fallback code like precompiled - /// headers. - clang_macro_fallback_build_dir: Option { - methods: { - /// Set a path to a directory to which `.c` and `.h.pch` files should be written for the - /// purpose of using clang to evaluate macros that can't be easily parsed. - /// - /// The default location for `.h.pch` files is the directory that the corresponding - /// `.h` file is located in. The default for the temporary `.c` file used for clang - /// parsing is the current working directory. Both of these defaults are overridden - /// by this option. - pub fn clang_macro_fallback_build_dir>(mut self, path: P) -> Self { - self.options.clang_macro_fallback_build_dir = Some(path.as_ref().to_owned()); - self - } - }, - as_args: "--clang-macro-fallback-build-dir", - } - /// Whether to always report C++ "deleted" functions. - generate_deleted_functions: bool { - methods: { - /// Set whether to generate C++ functions even marked "=deleted" - /// - /// Although not useful to call these functions, downstream code - /// generators may need to know whether they've been deleted in - /// order to determine the relocatability of a C++ type - /// (specifically by virtue of which constructors exist.) - pub fn generate_deleted_functions(mut self, doit: bool) -> Self { - self.options.generate_deleted_functions = doit; - self - } - - }, - as_args: "--generate-deleted-functions", - }, - /// Whether to always report C++ "pure virtual" functions. - generate_pure_virtual_functions: bool { - methods: { - /// Set whether to generate C++ functions that are pure virtual. - /// - /// These functions can't be called, so the only reason - /// to generate them is if downstream postprocessors - /// need to know of their existence. This is necessary, - /// for instance, to determine whether a type itself is - /// pure virtual and thus can't be allocated. - /// Downstream code generators may choose to make code to - /// allow types to be allocated but need to avoid doing so - /// if the type contains pure virtual functions. - pub fn generate_pure_virtual_functions(mut self, doit: bool) -> Self { - self.options.generate_pure_virtual_functions = doit; - self - } - - }, - as_args: "--generate-pure-virtual-functions", - }, - /// Whether to always report C++ "private" functions. - generate_private_functions: bool { - methods: { - /// Set whether to generate C++ functions that are private. - /// - /// These functions can't be called, so the only reason - /// to generate them is if downstream postprocessors - /// need to know of their existence. - pub fn generate_private_functions(mut self, doit: bool) -> Self { - self.options.generate_private_functions = doit; - self - } - - }, - as_args: "--generate-private-functions", - }, -} diff --git a/vendor/bindgen/parse.rs b/vendor/bindgen/parse.rs deleted file mode 100644 index d29b090fcb6a84..00000000000000 --- a/vendor/bindgen/parse.rs +++ /dev/null @@ -1,41 +0,0 @@ -//! Common traits and types related to parsing our IR from Clang cursors. -#![deny(clippy::missing_docs_in_private_items)] - -use crate::clang; -use crate::ir::context::{BindgenContext, ItemId}; - -/// Not so much an error in the traditional sense, but a control flow message -/// when walking over Clang's AST with a cursor. -#[derive(Debug)] -pub(crate) enum ParseError { - /// Recurse down the current AST node's children. - Recurse, - /// Continue on to the next sibling AST node, or back up to the parent's - /// siblings if we've exhausted all of this node's siblings (and so on). - Continue, -} - -/// The result of parsing a Clang AST node. -#[derive(Debug)] -pub(crate) enum ParseResult { - /// We've already resolved this item before, here is the extant `ItemId` for - /// it. - AlreadyResolved(ItemId), - - /// This is a newly parsed item. If the cursor is `Some`, it points to the - /// AST node where the new `T` was declared. - New(T, Option), -} - -/// An intermediate representation "sub-item" (i.e. one of the types contained -/// inside an `ItemKind` variant) that can be parsed from a Clang cursor. -pub(crate) trait ClangSubItemParser: Sized { - /// Attempt to parse this type from the given cursor. - /// - /// The fact that is a reference guarantees it's held by the context, and - /// allow returning already existing types. - fn parse( - cursor: clang::Cursor, - context: &mut BindgenContext, - ) -> Result, ParseError>; -} diff --git a/vendor/bindgen/regex_set.rs b/vendor/bindgen/regex_set.rs deleted file mode 100644 index 32279557b535a0..00000000000000 --- a/vendor/bindgen/regex_set.rs +++ /dev/null @@ -1,199 +0,0 @@ -//! A type that represents the union of a set of regular expressions. -#![deny(clippy::missing_docs_in_private_items)] - -use regex::RegexSet as RxSet; -use std::cell::Cell; - -/// A dynamic set of regular expressions. -#[derive(Clone, Debug, Default)] -pub(crate) struct RegexSet { - items: Vec>, - /// Whether any of the items in the set was ever matched. The length of this - /// vector is exactly the length of `items`. - matched: Vec>, - set: Option, - /// Whether we should record matching items in the `matched` vector or not. - record_matches: bool, -} - -impl RegexSet { - /// Is this set empty? - pub(crate) fn is_empty(&self) -> bool { - self.items.is_empty() - } - - /// Insert a new regex into this set. - pub(crate) fn insert(&mut self, string: S) - where - S: AsRef, - { - self.items.push(string.as_ref().to_owned().into_boxed_str()); - self.matched.push(Cell::new(false)); - self.set = None; - } - - /// Returns slice of String from its field 'items' - pub(crate) fn get_items(&self) -> &[Box] { - &self.items - } - - /// Returns an iterator over regexes in the set which didn't match any - /// strings yet. - pub(crate) fn unmatched_items(&self) -> impl Iterator { - self.items.iter().enumerate().filter_map(move |(i, item)| { - if !self.record_matches || self.matched[i].get() { - return None; - } - - Some(item.as_ref()) - }) - } - - /// Construct a `RegexSet` from the set of entries we've accumulated. - /// - /// Must be called before calling `matches()`, or it will always return - /// false. - #[inline] - #[allow(unused)] - pub(crate) fn build(&mut self, record_matches: bool) { - self.build_inner(record_matches, None); - } - - #[cfg(all(feature = "__cli", feature = "experimental"))] - /// Construct a `RegexSet` from the set of entries we've accumulated and emit diagnostics if the - /// name of the regex set is passed to it. - /// - /// Must be called before calling `matches()`, or it will always return - /// false. - #[inline] - pub(crate) fn build_with_diagnostics( - &mut self, - record_matches: bool, - name: Option<&'static str>, - ) { - self.build_inner(record_matches, name); - } - - #[cfg(all(not(feature = "__cli"), feature = "experimental"))] - /// Construct a RegexSet from the set of entries we've accumulated and emit diagnostics if the - /// name of the regex set is passed to it. - /// - /// Must be called before calling `matches()`, or it will always return - /// false. - #[inline] - pub(crate) fn build_with_diagnostics( - &mut self, - record_matches: bool, - name: Option<&'static str>, - ) { - self.build_inner(record_matches, name); - } - - fn build_inner( - &mut self, - record_matches: bool, - _name: Option<&'static str>, - ) { - let items = self.items.iter().map(|item| format!("^({item})$")); - self.record_matches = record_matches; - self.set = match RxSet::new(items) { - Ok(x) => Some(x), - Err(e) => { - warn!("Invalid regex in {:?}: {e:?}", self.items); - #[cfg(feature = "experimental")] - if let Some(name) = _name { - invalid_regex_warning(self, e, name); - } - None - } - } - } - - /// Does the given `string` match any of the regexes in this set? - pub(crate) fn matches(&self, string: S) -> bool - where - S: AsRef, - { - let s = string.as_ref(); - let Some(ref set) = self.set else { - return false; - }; - - if !self.record_matches { - return set.is_match(s); - } - - let matches = set.matches(s); - if !matches.matched_any() { - return false; - } - for i in &matches { - self.matched[i].set(true); - } - - true - } -} - -#[cfg(feature = "experimental")] -fn invalid_regex_warning( - set: &RegexSet, - err: regex::Error, - name: &'static str, -) { - use crate::diagnostics::{Diagnostic, Level, Slice}; - - let mut diagnostic = Diagnostic::default(); - - match err { - regex::Error::Syntax(string) => { - if string.starts_with("regex parse error:\n") { - let mut source = String::new(); - - let mut parsing_source = true; - - for line in string.lines().skip(1) { - if parsing_source { - if line.starts_with(' ') { - source.push_str(line); - source.push('\n'); - continue; - } - parsing_source = false; - } - let error = "error: "; - if line.starts_with(error) { - let (_, msg) = line.split_at(error.len()); - diagnostic.add_annotation(msg.to_owned(), Level::Error); - } else { - diagnostic.add_annotation(line.to_owned(), Level::Info); - } - } - let mut slice = Slice::default(); - slice.with_source(source); - diagnostic.add_slice(slice); - - diagnostic.with_title( - "Error while parsing a regular expression.", - Level::Warning, - ); - } else { - diagnostic.with_title(string, Level::Warning); - } - } - err => { - let err = err.to_string(); - diagnostic.with_title(err, Level::Warning); - } - } - - diagnostic.add_annotation( - format!("This regular expression was passed via `{name}`."), - Level::Note, - ); - - if set.items.iter().any(|item| item.as_ref() == "*") { - diagnostic.add_annotation("Wildcard patterns \"*\" are no longer considered valid. Use \".*\" instead.", Level::Help); - } - diagnostic.display(); -} diff --git a/vendor/bindgen/time.rs b/vendor/bindgen/time.rs deleted file mode 100644 index 2952e36f760c2a..00000000000000 --- a/vendor/bindgen/time.rs +++ /dev/null @@ -1,52 +0,0 @@ -use std::io::{self, Write}; -use std::time::{Duration, Instant}; - -/// RAII timer to measure how long phases take. -#[derive(Debug)] -pub struct Timer<'a> { - output: bool, - name: &'a str, - start: Instant, -} - -impl<'a> Timer<'a> { - /// Creates a Timer with the given name, and starts it. By default, - /// will print to stderr when it is `drop`'d - pub fn new(name: &'a str) -> Self { - Timer { - output: true, - name, - start: Instant::now(), - } - } - - /// Sets whether or not the Timer will print a message - /// when it is dropped. - pub fn with_output(mut self, output: bool) -> Self { - self.output = output; - self - } - - /// Returns the time elapsed since the timer's creation - pub fn elapsed(&self) -> Duration { - self.start.elapsed() - } - - fn print_elapsed(&mut self) { - if self.output { - let elapsed = self.elapsed(); - let time = (elapsed.as_secs() as f64) * 1e3 + - f64::from(elapsed.subsec_nanos()) / 1e6; - let stderr = io::stderr(); - // Arbitrary output format, subject to change. - writeln!(stderr.lock(), " time: {time:>9.3} ms.\t{}", self.name) - .expect("timer write should not fail"); - } - } -} - -impl Drop for Timer<'_> { - fn drop(&mut self) { - self.print_elapsed(); - } -} diff --git a/vendor/bitflags/.cargo-checksum.json b/vendor/bitflags/.cargo-checksum.json deleted file mode 100644 index 734a5dc6fed4a9..00000000000000 --- a/vendor/bitflags/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"886ab20f7366d7decd3716d19aff27f3ddde9684f5c71acca1684d867e692235","CHANGELOG.md":"648bc400e8387d19c7170890bb6e45207d63cb3149f5591936b317ac7952bbb9","CODE_OF_CONDUCT.md":"42634d0f6d922f49857175af991802822f7f920487aefa2ee250a50d12251a66","CONTRIBUTING.md":"6c9f96eacb20af877ae2d16f024904f3038b93448a8488e9dbcac0df7f6439a5","Cargo.lock":"eb3583e00fadd27f10c93df9fb63695ca1889436cebb4c8b58414243cdda9d59","Cargo.toml":"1d496ea35bdd5b8e3ee00cfa6fd515d89842c793c3e86f450f8c963b5b3a84eb","Cargo.toml.orig":"25266ca314ead26f44356315628a4136adfefdcaf7bd86e15c3d55903bda7c6d","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"b3d42f34936fd897dc891094f0dbbfb5f41f7255f48a3fe2470b1b420b6235d6","SECURITY.md":"68704c8128fa2e776ed7cbda741fbf61ad52f998a96350ee7ee4dbf64c6573bc","benches/parse.rs":"f1390d62322c6880d65bd931e183d49b313f287879a6bfaa36b1cb1921090b51","examples/custom_bits_type.rs":"e53b32051adc5d97860e0b48c8f3a301a041d73b4939c0d7caa5f0cfcc0b9739","examples/custom_derive.rs":"730589695eb68dda21d0d9f69e90cbdbf9823b13d6f16c5f22b0083c00981813","examples/fmt.rs":"87ba37a1fb8528570c74ea26d8e8948e1179c3d867b928bea1080880258e0a99","examples/macro_free.rs":"69e7f284b53b5214d51228a686e87f127b52a3b74711e45537ebfa5583a180e5","examples/serde.rs":"dfc7cd50232c6763f7cd05b4089ef9408db9368ee42c3fd5c116ff424810a2b0","spec.md":"f0657642c7cf470e6d6e55362aaab224b3df0f22cb7796b109bb41687acea8b1","src/example_generated.rs":"d018caf059f6ffc4c2403b771a6d76679fa5af03c329a91bd9252957df695e7f","src/external.rs":"59e962382560a5362953dd4396a09ba78e3f4446ef46c6a153a9cadb1c329506","src/external/arbitrary.rs":"43908bb4fe0a076078dcb3fa70c654aaed8c7b38aa66574414165a82037def83","src/external/bytemuck.rs":"3afcef382122867040fddd5e4153d633d1ed5596fe5d7dfac66a8e61c2513df5","src/external/serde.rs":"0f7339036f41cd93f29b21b954bc1f0fd747762f7a4f20d4ebfc848b20584dc8","src/internal.rs":"645b13af0c7302258df61239073a4b8203d09f27b6c17f8a6f1f8c3e427f5334","src/iter.rs":"18db983a501b02c71fda1301a9c020322bd684fe043ccccc8221a706a53c1f31","src/lib.rs":"bf13327fb5ee4fd149acb95c0f007dc1222b8e028217800ffbdf1dd70e176e13","src/parser.rs":"4e788b29f5d0542c409a8b43c703bcb4a6c2a57c181cadd17f565f0abb39681e","src/public.rs":"2a695651626cf7442cc83e52c410e01ceeb50902345e2a85988ad27c951287ac","src/tests.rs":"8e480dc78bd29bbb62cfaf62c3c8f779b39f96edc1e83f230a353296bfb4ffff","src/tests/all.rs":"e99a865cd4271a524c2fe95503e96d851b35990570aed6fb2e9dac7a14da31b6","src/tests/bitflags_match.rs":"601ad186930908b681f24312132000518fc927ba569d394e5c4440462f037aec","src/tests/bits.rs":"3840c34b2ea5d1802404b9ce5bcc1d3fa6ccd8dfba2e29e6d07c605f817d90df","src/tests/clear.rs":"6976fcda2f3367c8219485d33bd5d754da6769770cf164c12baace010ad7686d","src/tests/complement.rs":"d0e6d4c3daf49e0a7438c9f1c1ac91fad1b37f258c03593f6cd6a695ee626f5e","src/tests/contains.rs":"58bb3cb8c86550e775d11134da1d4aca85c83f943ea454e3a5f222772c674a24","src/tests/difference.rs":"d0d2b96bb52658b8ac019210da74ca75a53e76622f668855142ea6e97c28cb0e","src/tests/empty.rs":"817d6e93ced7cb7576ff0e334aa1a44703f3f96871ff2c6bdcb8f207e6551f67","src/tests/eq.rs":"b816767680a029e9c163e37af074dd4e604c4a3e4936f829f0ca3774fd5f0e37","src/tests/extend.rs":"5fabb9fd0254c64da019149c24063fceff72da3eb4ad73b57c1cc4c04b008364","src/tests/flags.rs":"2f48d3a25db1cf66fe98c9959abc70875deb9f7b38b2c278dc70c46e0d4ec277","src/tests/fmt.rs":"a2d4148491f3202f030f63633eee941b741e3be29a68cf376f008dbe5cb11e5c","src/tests/from_bits.rs":"d94c65b88bf89961d0cfc1b3152a7f1acc285bae160a1628438effda11b8e2c1","src/tests/from_bits_retain.rs":"980591dfaf91e940f42d9a1ce890f237514dd59d458fc264abcf9ceabbc40677","src/tests/from_bits_truncate.rs":"d3406b5e107ebb6449b98a59eee6cc5d84f947d4aaee1ee7e80dc7202de179f0","src/tests/from_name.rs":"f4a055d1f3c86decef70ef8f3020cef5c4e229718c20b3d59d5a3abc3a8b1298","src/tests/insert.rs":"3fab5da800a6fc0654dfb5f859f95da65a507eb9fda8695083c2712266dff0b9","src/tests/intersection.rs":"baf1454c9e4eba552264870a556ee0032d9f2bb8cac361833d571235e0b52221","src/tests/intersects.rs":"c55e36179fd8bc636f04ea9bbce346dcaafe57915d13f1df28c5b83117dbd08e","src/tests/is_all.rs":"b2f11faa7c954bd85c8fb39999e0c37d983cf7895152bc13c7ddde106aa33b6d","src/tests/is_empty.rs":"11f21323cdca7ff92dd89e09de667dba69e8dce88e2d3e27ea68ace91d15d070","src/tests/iter.rs":"db96736e94686f4c66c012b20e4059fc3e61205feda8b4f1ad7aa16615071c18","src/tests/parser.rs":"fa2fb8dedcf16601af609a5e21d9c5840c7f96a1e3a587f7f2ea3dc8387f7628","src/tests/remove.rs":"6e75f8508d2dc1a2cba89ef691f4387a665a4fd13853bb1dd0fd80c783b89947","src/tests/symmetric_difference.rs":"0a89f084f9de1dd5b1932fe72c3b10a3c93cbaa16832b3a31b6a85e3bbd3ba6e","src/tests/truncate.rs":"683430af4a0e47ec73c737a6908fac5d851bed7c41d47c73a642e96d966aa5ae","src/tests/union.rs":"88f398ee4600bb1e59bf6d02d1f6ff33f5f853eab5a6c700bd8a683c6ee4651a","src/tests/unknown.rs":"fa9e8ee461f176c0d892cde487fef0fe66df2aa5906aaef21b093102e590f5f5","src/traits.rs":"c8757d4f5aa26ac2c2c154bd293f647d0722ac65e977fb9d19f41c83798cae40"},"package":"812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3"} \ No newline at end of file diff --git a/vendor/bitflags/.cargo_vcs_info.json b/vendor/bitflags/.cargo_vcs_info.json deleted file mode 100644 index c4d1f9e9636e47..00000000000000 --- a/vendor/bitflags/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "7cc8595e93d04d180d39e2f25242dca85dd71228" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/bitflags/CHANGELOG.md b/vendor/bitflags/CHANGELOG.md deleted file mode 100644 index b03810ae3a79a3..00000000000000 --- a/vendor/bitflags/CHANGELOG.md +++ /dev/null @@ -1,636 +0,0 @@ -# 2.10.0 - -## What's Changed -* Implement iterator for all named flags by @ssrlive in https://github.com/bitflags/bitflags/pull/465 -* Depend on serde_core instead of serde by @KodrAus in https://github.com/bitflags/bitflags/pull/467 - -## New Contributors -* @ssrlive made their first contribution in https://github.com/bitflags/bitflags/pull/465 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.9.4...2.10.0 - -# 2.9.4 - -## What's Changed -* Add Cargo features to readme by @KodrAus in https://github.com/bitflags/bitflags/pull/460 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.9.3...2.9.4 - -# 2.9.3 - -## What's Changed -* Streamline generated code by @nnethercote in https://github.com/bitflags/bitflags/pull/458 - -## New Contributors -* @nnethercote made their first contribution in https://github.com/bitflags/bitflags/pull/458 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.9.2...2.9.3 - -# 2.9.2 - -## What's Changed -* Fix difference in the spec by @KodrAus in https://github.com/bitflags/bitflags/pull/446 -* Fix up inaccurate docs on bitflags_match by @KodrAus in https://github.com/bitflags/bitflags/pull/453 -* Remove rustc internal crate feature by @KodrAus in https://github.com/bitflags/bitflags/pull/454 - - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.9.1...2.9.2 - -# 2.9.1 - -## What's Changed -* Document Cargo features by @KodrAus in https://github.com/bitflags/bitflags/pull/444 - - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.9.0...2.9.1 - -# 2.9.0 - -## What's Changed -* `Flags` trait: add `clear(&mut self)` method by @wysiwys in https://github.com/bitflags/bitflags/pull/437 -* Fix up UI tests by @KodrAus in https://github.com/bitflags/bitflags/pull/438 - - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.8.0...2.9.0 - -# 2.8.0 - -## What's Changed -* feat(core): Add bitflags_match macro for bitflag matching by @YuniqueUnic in https://github.com/bitflags/bitflags/pull/423 -* Finalize bitflags_match by @KodrAus in https://github.com/bitflags/bitflags/pull/431 - -## New Contributors -* @YuniqueUnic made their first contribution in https://github.com/bitflags/bitflags/pull/423 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.7.0...2.8.0 - -# 2.7.0 - -## What's Changed -* Fix `clippy::doc_lazy_continuation` lints by @waywardmonkeys in https://github.com/bitflags/bitflags/pull/414 -* Run clippy on extra features in CI. by @waywardmonkeys in https://github.com/bitflags/bitflags/pull/415 -* Fix CI: trybuild refresh, allow some clippy restrictions. by @waywardmonkeys in https://github.com/bitflags/bitflags/pull/417 -* Update zerocopy version in example by @KodrAus in https://github.com/bitflags/bitflags/pull/422 -* Add method to check if unknown bits are set by @wysiwys in https://github.com/bitflags/bitflags/pull/426 -* Update error messages by @KodrAus in https://github.com/bitflags/bitflags/pull/427 -* Add `truncate(&mut self)` method to unset unknown bits by @wysiwys in https://github.com/bitflags/bitflags/pull/428 -* Update error messages by @KodrAus in https://github.com/bitflags/bitflags/pull/429 - -## New Contributors -* @wysiwys made their first contribution in https://github.com/bitflags/bitflags/pull/426 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.6.0...2.7.0 - -# 2.6.0 - -## What's Changed -* Sync CHANGELOG.md with github release notes by @dextero in https://github.com/bitflags/bitflags/pull/402 -* Update error messages and zerocopy by @KodrAus in https://github.com/bitflags/bitflags/pull/403 -* Bump minimum declared versions of dependencies by @dextero in https://github.com/bitflags/bitflags/pull/404 -* chore(deps): bump serde_derive and bytemuck versions by @joshka in https://github.com/bitflags/bitflags/pull/405 -* add OSFF Scorecard workflow by @KodrAus in https://github.com/bitflags/bitflags/pull/396 -* Update stderr messages by @KodrAus in https://github.com/bitflags/bitflags/pull/408 -* Fix typo by @waywardmonkeys in https://github.com/bitflags/bitflags/pull/410 -* Allow specifying outer attributes in impl mode by @KodrAus in https://github.com/bitflags/bitflags/pull/411 - -## New Contributors -* @dextero made their first contribution in https://github.com/bitflags/bitflags/pull/402 -* @joshka made their first contribution in https://github.com/bitflags/bitflags/pull/405 -* @waywardmonkeys made their first contribution in https://github.com/bitflags/bitflags/pull/410 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.5.0...2.6.0 - -# 2.5.0 - -## What's Changed -* Derive `Debug` for `Flag` by @tgross35 in https://github.com/bitflags/bitflags/pull/398 -* Support truncating or strict-named variants of parsing and formatting by @KodrAus in https://github.com/bitflags/bitflags/pull/400 - -## New Contributors -* @tgross35 made their first contribution in https://github.com/bitflags/bitflags/pull/398 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.4.2...2.5.0 - -# 2.4.2 - -## What's Changed -* Cargo.toml: Anchor excludes to root of the package by @jamessan in https://github.com/bitflags/bitflags/pull/387 -* Update error messages by @KodrAus in https://github.com/bitflags/bitflags/pull/390 -* Add support for impl mode structs to be repr(packed) by @GnomedDev in https://github.com/bitflags/bitflags/pull/388 -* Remove old `unused_tuple_struct_fields` lint by @dtolnay in https://github.com/bitflags/bitflags/pull/393 -* Delete use of `local_inner_macros` by @dtolnay in https://github.com/bitflags/bitflags/pull/392 - -## New Contributors -* @jamessan made their first contribution in https://github.com/bitflags/bitflags/pull/387 -* @GnomedDev made their first contribution in https://github.com/bitflags/bitflags/pull/388 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.4.1...2.4.2 - -# 2.4.1 - -## What's Changed -* Allow some new pedantic clippy lints by @KodrAus in https://github.com/bitflags/bitflags/pull/380 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.4.0...2.4.1 - -# 2.4.0 - -## What's Changed -* Remove html_root_url by @eldruin in https://github.com/bitflags/bitflags/pull/368 -* Support unnamed flags by @KodrAus in https://github.com/bitflags/bitflags/pull/371 -* Update smoke test to verify all Clippy and rustc lints by @MitMaro in https://github.com/bitflags/bitflags/pull/374 -* Specify the behavior of bitflags by @KodrAus in https://github.com/bitflags/bitflags/pull/369 - -## New Contributors -* @eldruin made their first contribution in https://github.com/bitflags/bitflags/pull/368 -* @MitMaro made their first contribution in https://github.com/bitflags/bitflags/pull/374 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.3.3...2.4.0 - -# 2.3.3 - -## Changes to `-=` - -The `-=` operator was incorrectly changed to truncate bits that didn't correspond to valid flags in `2.3.0`. This has -been fixed up so it once again behaves the same as `-` and `difference`. - -## Changes to `!` - -The `!` operator previously called `Self::from_bits_truncate`, which would truncate any bits that only partially -overlapped with a valid flag. It will now use `bits & Self::all().bits()`, so any bits that overlap any bits -specified by any flag will be respected. This is unlikely to have any practical implications, but enables defining -a flag like `const ALL = !0` as a way to signal that any bit pattern is a known set of flags. - -## Changes to formatting - -Zero-valued flags will never be printed. You'll either get `0x0` for empty flags using debug formatting, or the -set of flags with zero-valued flags omitted for others. - -Composite flags will no longer be redundantly printed if there are extra bits to print at the end that don't correspond -to a valid flag. - -## What's Changed -* Fix up incorrect sub assign behavior and other cleanups by @KodrAus in https://github.com/bitflags/bitflags/pull/366 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.3.2...2.3.3 - -# 2.3.2 - -## What's Changed -* [doc] [src/lib.rs] delete redundant path prefix by @OccupyMars2025 in https://github.com/bitflags/bitflags/pull/361 - -## New Contributors -* @OccupyMars2025 made their first contribution in https://github.com/bitflags/bitflags/pull/361 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.3.1...2.3.2 - -# 2.3.1 - -## What's Changed -* Fix Self in flags value expressions by @KodrAus in https://github.com/bitflags/bitflags/pull/355 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.3.0...2.3.1 - -# 2.3.0 - -## Major changes - -### `BitFlags` trait deprecated in favor of `Flags` trait - -This release introduces the `Flags` trait and deprecates the `BitFlags` trait. These two traits are semver compatible so if you have public API code depending on `BitFlags` you can move to `Flags` without breaking end-users. This is possible because the `BitFlags` trait was never publicly implementable, so it now carries `Flags` as a supertrait. All implementations of `Flags` additionally implement `BitFlags`. - -The `Flags` trait is a publicly implementable version of the old `BitFlags` trait. The original `BitFlags` trait carried some macro baggage that made it difficult to implement, so a new `Flags` trait has been introduced as the _One True Trait_ for interacting with flags types generically. See the the `macro_free` and `custom_derive` examples for more details. - -### `Bits` trait publicly exposed - -The `Bits` trait for the underlying storage of flags values is also now publicly implementable. This lets you define your own exotic backing storage for flags. See the `custom_bits_type` example for more details. - -## What's Changed -* Use explicit hashes for actions steps by @KodrAus in https://github.com/bitflags/bitflags/pull/350 -* Support ejecting flags types from the bitflags macro by @KodrAus in https://github.com/bitflags/bitflags/pull/351 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.2.1...2.3.0 - -# 2.2.1 - -## What's Changed -* Refactor attribute filtering to apply per-flag by @KodrAus in https://github.com/bitflags/bitflags/pull/345 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.2.0...2.2.1 - -# 2.2.0 - -## What's Changed -* Create SECURITY.md by @KodrAus in https://github.com/bitflags/bitflags/pull/338 -* add docs to describe the behavior of multi-bit flags by @nicholasbishop in https://github.com/bitflags/bitflags/pull/340 -* Add support for bytemuck by @KodrAus in https://github.com/bitflags/bitflags/pull/336 -* Add a top-level macro for filtering attributes by @KodrAus in https://github.com/bitflags/bitflags/pull/341 - -## New Contributors -* @nicholasbishop made their first contribution in https://github.com/bitflags/bitflags/pull/340 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.1.0...2.2.0 - -# 2.1.0 - -## What's Changed -* Add docs for the internal Field0 and examples of formatting/parsing by @KodrAus in https://github.com/bitflags/bitflags/pull/328 -* Add support for arbitrary by @KodrAus in https://github.com/bitflags/bitflags/pull/324 -* Fix up missing docs for consts within consts by @KodrAus in https://github.com/bitflags/bitflags/pull/330 -* Ignore clippy lint in generated code by @Jake-Shadle in https://github.com/bitflags/bitflags/pull/331 - -## New Contributors -* @Jake-Shadle made their first contribution in https://github.com/bitflags/bitflags/pull/331 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.0.2...2.1.0 - -# 2.0.2 - -## What's Changed -* Fix up missing isize and usize Bits impls by @KodrAus in https://github.com/bitflags/bitflags/pull/321 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.0.1...2.0.2 - -# 2.0.1 - -## What's Changed -* Fix up some docs issues by @KodrAus in https://github.com/bitflags/bitflags/pull/309 -* Make empty_flag() const. by @tormeh in https://github.com/bitflags/bitflags/pull/313 -* Fix formatting of multi-bit flags with partial overlap by @KodrAus in https://github.com/bitflags/bitflags/pull/316 - -## New Contributors -* @tormeh made their first contribution in https://github.com/bitflags/bitflags/pull/313 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.0.0...2.0.1 - -# 2.0.0 - -## Major changes - -This release includes some major changes over `1.x`. If you use `bitflags!` types in your public API then upgrading this library may cause breakage in your downstream users. - -### ⚠️ Serialization - -You'll need to add the `serde` Cargo feature in order to `#[derive(Serialize, Deserialize)]` on your generated flags types: - -```rust -bitflags! { - #[derive(Serialize, Deserialize)] - #[serde(transparent)] - pub struct Flags: T { - .. - } -} -``` - -where `T` is the underlying bits type you're using, such as `u32`. - -The default serialization format with `serde` **has changed** if you `#[derive(Serialize, Deserialize)]` on your generated flags types. It will now use a formatted string for human-readable formats and the underlying bits type for compact formats. - -To keep the old format, see the https://github.com/KodrAus/bitflags-serde-legacy library. - -### ⚠️ Traits - -Generated flags types now derive fewer traits. If you need to maintain backwards compatibility, you can derive the following yourself: - -```rust -#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)] -``` - -### ⚠️ Methods - -The unsafe `from_bits_unchecked` method is now a safe `from_bits_retain` method. - -You can add the following method to your generated types to keep them compatible: - -```rust -#[deprecated = "use the safe `from_bits_retain` method instead"] -pub unsafe fn from_bits_unchecked(bits: T) -> Self { - Self::from_bits_retain(bits) -} -``` - -where `T` is the underlying bits type you're using, such as `u32`. - -### ⚠️ `.bits` field - -You can now use the `.bits()` method instead of the old `.bits`. - -The representation of generated flags types has changed from a struct with the single field `bits` to a newtype. - -## What's Changed -* Fix a typo and call out MSRV bump by @KodrAus in https://github.com/bitflags/bitflags/pull/259 -* BitFlags trait by @arturoc in https://github.com/bitflags/bitflags/pull/220 -* Add a hidden trait to discourage manual impls of BitFlags by @KodrAus in https://github.com/bitflags/bitflags/pull/261 -* Sanitize `Ok` by @konsumlamm in https://github.com/bitflags/bitflags/pull/266 -* Fix bug in `Debug` implementation by @konsumlamm in https://github.com/bitflags/bitflags/pull/268 -* Fix a typo in the generated documentation by @wackbyte in https://github.com/bitflags/bitflags/pull/271 -* Use SPDX license format by @atouchet in https://github.com/bitflags/bitflags/pull/272 -* serde tests fail in CI by @arturoc in https://github.com/bitflags/bitflags/pull/277 -* Fix beta test output by @KodrAus in https://github.com/bitflags/bitflags/pull/279 -* Add example to the README.md file by @tiaanl in https://github.com/bitflags/bitflags/pull/270 -* Iterator over all the enabled options by @arturoc in https://github.com/bitflags/bitflags/pull/278 -* from_bits_(truncate) fail with composite flags by @arturoc in https://github.com/bitflags/bitflags/pull/276 -* Add more platform coverage to CI by @KodrAus in https://github.com/bitflags/bitflags/pull/280 -* rework the way cfgs are handled by @KodrAus in https://github.com/bitflags/bitflags/pull/281 -* Split generated code into two types by @KodrAus in https://github.com/bitflags/bitflags/pull/282 -* expose bitflags iters using nameable types by @KodrAus in https://github.com/bitflags/bitflags/pull/286 -* Support creating flags from their names by @KodrAus in https://github.com/bitflags/bitflags/pull/287 -* Update README.md by @KodrAus in https://github.com/bitflags/bitflags/pull/288 -* Prepare for 2.0.0-rc.1 release by @KodrAus in https://github.com/bitflags/bitflags/pull/289 -* Add missing "if" to contains doc-comment in traits.rs by @rusty-snake in https://github.com/bitflags/bitflags/pull/291 -* Forbid unsafe_code by @fintelia in https://github.com/bitflags/bitflags/pull/294 -* serde: enable no-std support by @nim65s in https://github.com/bitflags/bitflags/pull/296 -* Add a parser for flags formatted as bar-separated-values by @KodrAus in https://github.com/bitflags/bitflags/pull/297 -* Prepare for 2.0.0-rc.2 release by @KodrAus in https://github.com/bitflags/bitflags/pull/299 -* Use strip_prefix instead of starts_with + slice by @QuinnPainter in https://github.com/bitflags/bitflags/pull/301 -* Fix up some clippy lints by @KodrAus in https://github.com/bitflags/bitflags/pull/302 -* Prepare for 2.0.0-rc.3 release by @KodrAus in https://github.com/bitflags/bitflags/pull/303 -* feat: Add minimum permissions to rust.yml workflow by @gabibguti in https://github.com/bitflags/bitflags/pull/305 - -## New Contributors -* @wackbyte made their first contribution in https://github.com/bitflags/bitflags/pull/271 -* @atouchet made their first contribution in https://github.com/bitflags/bitflags/pull/272 -* @tiaanl made their first contribution in https://github.com/bitflags/bitflags/pull/270 -* @rusty-snake made their first contribution in https://github.com/bitflags/bitflags/pull/291 -* @fintelia made their first contribution in https://github.com/bitflags/bitflags/pull/294 -* @nim65s made their first contribution in https://github.com/bitflags/bitflags/pull/296 -* @QuinnPainter made their first contribution in https://github.com/bitflags/bitflags/pull/301 -* @gabibguti made their first contribution in https://github.com/bitflags/bitflags/pull/305 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/1.3.2...2.0.0 - -# 2.0.0-rc.3 - -## What's Changed -* Use strip_prefix instead of starts_with + slice by @QuinnPainter in https://github.com/bitflags/bitflags/pull/301 -* Fix up some clippy lints by @KodrAus in https://github.com/bitflags/bitflags/pull/302 - -## New Contributors -* @QuinnPainter made their first contribution in https://github.com/bitflags/bitflags/pull/301 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.0.0-rc.2...2.0.0-rc.3 - -# 2.0.0-rc.2 - -## Changes to `serde` serialization - -**⚠️ NOTE ⚠️** This release changes the default serialization you'll get if you `#[derive(Serialize, Deserialize)]` -on your generated flags types. It will now use a formatted string for human-readable formats and the underlying bits -type for compact formats. - -To keep the old behavior, see the [`bitflags-serde-legacy`](https://github.com/KodrAus/bitflags-serde-legacy) library. - -## What's Changed - -* Add missing "if" to contains doc-comment in traits.rs by @rusty-snake in https://github.com/bitflags/bitflags/pull/291 -* Forbid unsafe_code by @fintelia in https://github.com/bitflags/bitflags/pull/294 -* serde: enable no-std support by @nim65s in https://github.com/bitflags/bitflags/pull/296 -* Add a parser for flags formatted as bar-separated-values by @KodrAus in https://github.com/bitflags/bitflags/pull/297 - -## New Contributors -* @rusty-snake made their first contribution in https://github.com/bitflags/bitflags/pull/291 -* @fintelia made their first contribution in https://github.com/bitflags/bitflags/pull/294 -* @nim65s made their first contribution in https://github.com/bitflags/bitflags/pull/296 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/2.0.0-rc.1...2.0.0-rc.2 - -# 2.0.0-rc.1 - -This is a big release including a few years worth of work on a new `BitFlags` trait, iteration, and better macro organization for future extensibility. - -## What's Changed -* Fix a typo and call out MSRV bump by @KodrAus in https://github.com/bitflags/bitflags/pull/259 -* BitFlags trait by @arturoc in https://github.com/bitflags/bitflags/pull/220 -* Add a hidden trait to discourage manual impls of BitFlags by @KodrAus in https://github.com/bitflags/bitflags/pull/261 -* Sanitize `Ok` by @konsumlamm in https://github.com/bitflags/bitflags/pull/266 -* Fix bug in `Debug` implementation by @konsumlamm in https://github.com/bitflags/bitflags/pull/268 -* Fix a typo in the generated documentation by @wackbyte in https://github.com/bitflags/bitflags/pull/271 -* Use SPDX license format by @atouchet in https://github.com/bitflags/bitflags/pull/272 -* serde tests fail in CI by @arturoc in https://github.com/bitflags/bitflags/pull/277 -* Fix beta test output by @KodrAus in https://github.com/bitflags/bitflags/pull/279 -* Add example to the README.md file by @tiaanl in https://github.com/bitflags/bitflags/pull/270 -* Iterator over all the enabled options by @arturoc in https://github.com/bitflags/bitflags/pull/278 -* from_bits_(truncate) fail with composite flags by @arturoc in https://github.com/bitflags/bitflags/pull/276 -* Add more platform coverage to CI by @KodrAus in https://github.com/bitflags/bitflags/pull/280 -* rework the way cfgs are handled by @KodrAus in https://github.com/bitflags/bitflags/pull/281 -* Split generated code into two types by @KodrAus in https://github.com/bitflags/bitflags/pull/282 -* expose bitflags iters using nameable types by @KodrAus in https://github.com/bitflags/bitflags/pull/286 -* Support creating flags from their names by @KodrAus in https://github.com/bitflags/bitflags/pull/287 -* Update README.md by @KodrAus in https://github.com/bitflags/bitflags/pull/288 - -## New Contributors -* @wackbyte made their first contribution in https://github.com/bitflags/bitflags/pull/271 -* @atouchet made their first contribution in https://github.com/bitflags/bitflags/pull/272 -* @tiaanl made their first contribution in https://github.com/bitflags/bitflags/pull/270 - -**Full Changelog**: https://github.com/bitflags/bitflags/compare/1.3.2...2.0.0-rc.1 - -# 1.3.2 - -- Allow `non_snake_case` in generated flags types ([#256]) - -[#256]: https://github.com/bitflags/bitflags/pull/256 - -# 1.3.1 - -- Revert unconditional `#[repr(transparent)]` ([#252]) - -[#252]: https://github.com/bitflags/bitflags/pull/252 - -# 1.3.0 (yanked) - -**This release bumps the Minimum Supported Rust Version to `1.46.0`** - -- Add `#[repr(transparent)]` ([#187]) - -- End `empty` doc comment with full stop ([#202]) - -- Fix typo in crate root docs ([#206]) - -- Document from_bits_unchecked unsafety ([#207]) - -- Let `is_all` ignore extra bits ([#211]) - -- Allows empty flag definition ([#225]) - -- Making crate accessible from std ([#227]) - -- Make `from_bits` a const fn ([#229]) - -- Allow multiple bitflags structs in one macro invocation ([#235]) - -- Add named functions to perform set operations ([#244]) - -- Fix typos in method docs ([#245]) - -- Modernization of the `bitflags` macro to take advantage of newer features and 2018 idioms ([#246]) - -- Fix regression (in an unreleased feature) and simplify tests ([#247]) - -- Use `Self` and fix bug when overriding `stringify!` ([#249]) - -[#187]: https://github.com/bitflags/bitflags/pull/187 -[#202]: https://github.com/bitflags/bitflags/pull/202 -[#206]: https://github.com/bitflags/bitflags/pull/206 -[#207]: https://github.com/bitflags/bitflags/pull/207 -[#211]: https://github.com/bitflags/bitflags/pull/211 -[#225]: https://github.com/bitflags/bitflags/pull/225 -[#227]: https://github.com/bitflags/bitflags/pull/227 -[#229]: https://github.com/bitflags/bitflags/pull/229 -[#235]: https://github.com/bitflags/bitflags/pull/235 -[#244]: https://github.com/bitflags/bitflags/pull/244 -[#245]: https://github.com/bitflags/bitflags/pull/245 -[#246]: https://github.com/bitflags/bitflags/pull/246 -[#247]: https://github.com/bitflags/bitflags/pull/247 -[#249]: https://github.com/bitflags/bitflags/pull/249 - -# 1.2.1 - -- Remove extraneous `#[inline]` attributes ([#194]) - -[#194]: https://github.com/bitflags/bitflags/pull/194 - -# 1.2.0 - -- Fix typo: {Lower, Upper}Exp - {Lower, Upper}Hex ([#183]) - -- Add support for "unknown" bits ([#188]) - -[#183]: https://github.com/rust-lang-nursery/bitflags/pull/183 -[#188]: https://github.com/rust-lang-nursery/bitflags/pull/188 - -# 1.1.0 - -This is a re-release of `1.0.5`, which was yanked due to a bug in the RLS. - -# 1.0.5 - -- Use compiletest_rs flags supported by stable toolchain ([#171]) - -- Put the user provided attributes first ([#173]) - -- Make bitflags methods `const` on newer compilers ([#175]) - -[#171]: https://github.com/rust-lang-nursery/bitflags/pull/171 -[#173]: https://github.com/rust-lang-nursery/bitflags/pull/173 -[#175]: https://github.com/rust-lang-nursery/bitflags/pull/175 - -# 1.0.4 - -- Support Rust 2018 style macro imports ([#165]) - - ```rust - use bitflags::bitflags; - ``` - -[#165]: https://github.com/rust-lang-nursery/bitflags/pull/165 - -# 1.0.3 - -- Improve zero value flag handling and documentation ([#157]) - -[#157]: https://github.com/rust-lang-nursery/bitflags/pull/157 - -# 1.0.2 - -- 30% improvement in compile time of bitflags crate ([#156]) - -- Documentation improvements ([#153]) - -- Implementation cleanup ([#149]) - -[#156]: https://github.com/rust-lang-nursery/bitflags/pull/156 -[#153]: https://github.com/rust-lang-nursery/bitflags/pull/153 -[#149]: https://github.com/rust-lang-nursery/bitflags/pull/149 - -# 1.0.1 -- Add support for `pub(restricted)` specifier on the bitflags struct ([#135]) -- Optimize performance of `all()` when called from a separate crate ([#136]) - -[#135]: https://github.com/rust-lang-nursery/bitflags/pull/135 -[#136]: https://github.com/rust-lang-nursery/bitflags/pull/136 - -# 1.0.0 -- **[breaking change]** Macro now generates [associated constants](https://doc.rust-lang.org/reference/items.html#associated-constants) ([#24]) - -- **[breaking change]** Minimum supported version is Rust **1.20**, due to usage of associated constants - -- After being broken in 0.9, the `#[deprecated]` attribute is now supported again ([#112]) - -- Other improvements to unit tests and documentation ([#106] and [#115]) - -[#24]: https://github.com/rust-lang-nursery/bitflags/pull/24 -[#106]: https://github.com/rust-lang-nursery/bitflags/pull/106 -[#112]: https://github.com/rust-lang-nursery/bitflags/pull/112 -[#115]: https://github.com/rust-lang-nursery/bitflags/pull/115 - -## How to update your code to use associated constants -Assuming the following structure definition: -```rust -bitflags! { - struct Something: u8 { - const FOO = 0b01, - const BAR = 0b10 - } -} -``` -In 0.9 and older you could do: -```rust -let x = FOO.bits | BAR.bits; -``` -Now you must use: -```rust -let x = Something::FOO.bits | Something::BAR.bits; -``` - -# 0.9.1 -- Fix the implementation of `Formatting` traits when other formatting traits were present in scope ([#105]) - -[#105]: https://github.com/rust-lang-nursery/bitflags/pull/105 - -# 0.9.0 -- **[breaking change]** Use struct keyword instead of flags to define bitflag types ([#84]) - -- **[breaking change]** Terminate const items with semicolons instead of commas ([#87]) - -- Implement the `Hex`, `Octal`, and `Binary` formatting traits ([#86]) - -- Printing an empty flag value with the `Debug` trait now prints "(empty)" instead of nothing ([#85]) - -- The `bitflags!` macro can now be used inside of a fn body, to define a type local to that function ([#74]) - -[#74]: https://github.com/rust-lang-nursery/bitflags/pull/74 -[#84]: https://github.com/rust-lang-nursery/bitflags/pull/84 -[#85]: https://github.com/rust-lang-nursery/bitflags/pull/85 -[#86]: https://github.com/rust-lang-nursery/bitflags/pull/86 -[#87]: https://github.com/rust-lang-nursery/bitflags/pull/87 - -# 0.8.2 -- Update feature flag used when building bitflags as a dependency of the Rust toolchain - -# 0.8.1 -- Allow bitflags to be used as a dependency of the Rust toolchain - -# 0.8.0 -- Add support for the experimental `i128` and `u128` integer types ([#57]) -- Add set method: `flags.set(SOME_FLAG, true)` or `flags.set(SOME_FLAG, false)` ([#55]) - This may break code that defines its own set method - -[#55]: https://github.com/rust-lang-nursery/bitflags/pull/55 -[#57]: https://github.com/rust-lang-nursery/bitflags/pull/57 - -# 0.7.1 -*(yanked)* - -# 0.7.0 -- Implement the Extend trait ([#49]) -- Allow definitions inside the `bitflags!` macro to refer to items imported from other modules ([#51]) - -[#49]: https://github.com/rust-lang-nursery/bitflags/pull/49 -[#51]: https://github.com/rust-lang-nursery/bitflags/pull/51 - -# 0.6.0 -- The `no_std` feature was removed as it is now the default -- The `assignment_operators` feature was remove as it is now enabled by default -- Some clippy suggestions have been applied diff --git a/vendor/bitflags/CODE_OF_CONDUCT.md b/vendor/bitflags/CODE_OF_CONDUCT.md deleted file mode 100644 index f7add90ae35556..00000000000000 --- a/vendor/bitflags/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,73 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -education, socio-economic status, nationality, personal appearance, race, -religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at coc@senaite.org. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org \ No newline at end of file diff --git a/vendor/bitflags/CONTRIBUTING.md b/vendor/bitflags/CONTRIBUTING.md deleted file mode 100644 index 588336398290c4..00000000000000 --- a/vendor/bitflags/CONTRIBUTING.md +++ /dev/null @@ -1,9 +0,0 @@ -# Updating compile-fail test outputs - -`bitflags` uses the `trybuild` crate to integration test its macros. Since Rust error messages change frequently enough that `nightly` builds produce spurious failures, we only check the compiler output in `beta` builds. If you run: - -``` -TRYBUILD=overwrite cargo +beta test --all -``` - -it will run the tests and update the `trybuild` output files. diff --git a/vendor/bitflags/Cargo.lock b/vendor/bitflags/Cargo.lock deleted file mode 100644 index 23ecff1378e608..00000000000000 --- a/vendor/bitflags/Cargo.lock +++ /dev/null @@ -1,325 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "arbitrary" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" -dependencies = [ - "derive_arbitrary", -] - -[[package]] -name = "bitflags" -version = "2.10.0" -dependencies = [ - "arbitrary", - "bytemuck", - "rustversion", - "serde", - "serde_core", - "serde_json", - "serde_test", - "trybuild", - "zerocopy", -] - -[[package]] -name = "bytemuck" -version = "1.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" -dependencies = [ - "bytemuck_derive", -] - -[[package]] -name = "bytemuck_derive" -version = "1.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9abbd1bc6865053c427f7198e6af43bfdedc55ab791faed4fbd361d789575ff" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "derive_arbitrary" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "equivalent" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" - -[[package]] -name = "glob" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" - -[[package]] -name = "hashbrown" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" - -[[package]] -name = "indexmap" -version = "2.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" -dependencies = [ - "equivalent", - "hashbrown", -] - -[[package]] -name = "itoa" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" - -[[package]] -name = "memchr" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" - -[[package]] -name = "proc-macro2" -version = "1.0.101" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rustversion" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" - -[[package]] -name = "ryu" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" - -[[package]] -name = "serde" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" -dependencies = [ - "serde_core", - "serde_derive", -] - -[[package]] -name = "serde_core" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.145" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" -dependencies = [ - "itoa", - "memchr", - "ryu", - "serde", - "serde_core", -] - -[[package]] -name = "serde_spanned" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392" -dependencies = [ - "serde_core", -] - -[[package]] -name = "serde_test" -version = "1.0.177" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f901ee573cab6b3060453d2d5f0bae4e6d628c23c0a962ff9b5f1d7c8d4f1ed" -dependencies = [ - "serde", -] - -[[package]] -name = "syn" -version = "2.0.107" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a26dbd934e5451d21ef060c018dae56fc073894c5a7896f882928a76e6d081b" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "target-triple" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ac9aa371f599d22256307c24a9d748c041e548cbf599f35d890f9d365361790" - -[[package]] -name = "termcolor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "toml" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" -dependencies = [ - "indexmap", - "serde_core", - "serde_spanned", - "toml_datetime", - "toml_parser", - "toml_writer", - "winnow", -] - -[[package]] -name = "toml_datetime" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" -dependencies = [ - "serde_core", -] - -[[package]] -name = "toml_parser" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" -dependencies = [ - "winnow", -] - -[[package]] -name = "toml_writer" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" - -[[package]] -name = "trybuild" -version = "1.0.112" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d66678374d835fe847e0dc8348fde2ceb5be4a7ec204437d8367f0d8df266a5" -dependencies = [ - "glob", - "serde", - "serde_derive", - "serde_json", - "target-triple", - "termcolor", - "toml", -] - -[[package]] -name = "unicode-ident" -version = "1.0.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" - -[[package]] -name = "winapi-util" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" -dependencies = [ - "windows-sys", -] - -[[package]] -name = "windows-link" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" - -[[package]] -name = "windows-sys" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" -dependencies = [ - "windows-link", -] - -[[package]] -name = "winnow" -version = "0.7.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" - -[[package]] -name = "zerocopy" -version = "0.8.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" -dependencies = [ - "zerocopy-derive", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] diff --git a/vendor/bitflags/Cargo.toml b/vendor/bitflags/Cargo.toml deleted file mode 100644 index f950e7e3d7c4cb..00000000000000 --- a/vendor/bitflags/Cargo.toml +++ /dev/null @@ -1,120 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.56.0" -name = "bitflags" -version = "2.10.0" -authors = ["The Rust Project Developers"] -build = false -exclude = [ - "/tests", - "/.github", -] -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = """ -A macro to generate structures which behave like bitflags. -""" -homepage = "https://github.com/bitflags/bitflags" -documentation = "https://docs.rs/bitflags" -readme = "README.md" -keywords = [ - "bit", - "bitmask", - "bitflags", - "flags", -] -categories = ["no-std"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/bitflags/bitflags" - -[package.metadata.docs.rs] -features = ["example_generated"] - -[features] -example_generated = [] -serde = ["serde_core"] -std = [] - -[lib] -name = "bitflags" -path = "src/lib.rs" - -[[example]] -name = "custom_bits_type" -path = "examples/custom_bits_type.rs" - -[[example]] -name = "custom_derive" -path = "examples/custom_derive.rs" - -[[example]] -name = "fmt" -path = "examples/fmt.rs" - -[[example]] -name = "macro_free" -path = "examples/macro_free.rs" - -[[example]] -name = "serde" -path = "examples/serde.rs" - -[[bench]] -name = "parse" -path = "benches/parse.rs" - -[dependencies.arbitrary] -version = "1.0" -optional = true - -[dependencies.bytemuck] -version = "1.12" -optional = true - -[dependencies.serde_core] -version = "1.0.228" -optional = true -default-features = false - -[dev-dependencies.arbitrary] -version = "1.0" -features = ["derive"] - -[dev-dependencies.bytemuck] -version = "1.12.2" -features = ["derive"] - -[dev-dependencies.rustversion] -version = "1.0" - -[dev-dependencies.serde_json] -version = "1.0" - -[dev-dependencies.serde_lib] -version = "1.0.103" -features = ["derive"] -package = "serde" - -[dev-dependencies.serde_test] -version = "1.0.19" - -[dev-dependencies.trybuild] -version = "1.0.18" - -[dev-dependencies.zerocopy] -version = "0.8" -features = ["derive"] diff --git a/vendor/bitflags/LICENSE-APACHE b/vendor/bitflags/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e802f..00000000000000 --- a/vendor/bitflags/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/bitflags/LICENSE-MIT b/vendor/bitflags/LICENSE-MIT deleted file mode 100644 index 39d4bdb5acd313..00000000000000 --- a/vendor/bitflags/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/bitflags/README.md b/vendor/bitflags/README.md deleted file mode 100644 index 5f8a1be35804ff..00000000000000 --- a/vendor/bitflags/README.md +++ /dev/null @@ -1,88 +0,0 @@ -bitflags -======== - -[![Rust](https://github.com/bitflags/bitflags/workflows/Rust/badge.svg)](https://github.com/bitflags/bitflags/actions) -[![Latest version](https://img.shields.io/crates/v/bitflags.svg)](https://crates.io/crates/bitflags) -[![Documentation](https://docs.rs/bitflags/badge.svg)](https://docs.rs/bitflags) -![License](https://img.shields.io/crates/l/bitflags.svg) - -`bitflags` generates flags enums with well-defined semantics and ergonomic end-user APIs. - -You can use `bitflags` to: - -- provide more user-friendly bindings to C APIs where flags may or may not be fully known in advance. -- generate efficient options types with string parsing and formatting support. - -You can't use `bitflags` to: - -- guarantee only bits corresponding to defined flags will ever be set. `bitflags` allows access to the underlying bits type so arbitrary bits may be set. -- define bitfields. `bitflags` only generates types where set bits denote the presence of some combination of flags. - -- [Documentation](https://docs.rs/bitflags) -- [Specification](https://github.com/bitflags/bitflags/blob/main/spec.md) -- [Release notes](https://github.com/bitflags/bitflags/releases) - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -bitflags = "2.10.0" -``` - -and this to your source code: - -```rust -use bitflags::bitflags; -``` - -## Example - -Generate a flags structure: - -```rust -use bitflags::bitflags; - -// The `bitflags!` macro generates `struct`s that manage a set of flags. -bitflags! { - /// Represents a set of flags. - #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] - struct Flags: u32 { - /// The value `A`, at bit position `0`. - const A = 0b00000001; - /// The value `B`, at bit position `1`. - const B = 0b00000010; - /// The value `C`, at bit position `2`. - const C = 0b00000100; - - /// The combination of `A`, `B`, and `C`. - const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); - } -} - -fn main() { - let e1 = Flags::A | Flags::C; - let e2 = Flags::B | Flags::C; - assert_eq!((e1 | e2), Flags::ABC); // union - assert_eq!((e1 & e2), Flags::C); // intersection - assert_eq!((e1 - e2), Flags::A); // set difference - assert_eq!(!e2, Flags::A); // set complement -} -``` - -## Cargo features - -The `bitflags` library defines a few Cargo features that you can opt-in to: - -- `std`: Implement the `Error` trait on error types used by `bitflags`. -- `serde`: Support deriving `serde` traits on generated flags types. -- `arbitrary`: Support deriving `arbitrary` traits on generated flags types. -- `bytemuck`: Support deriving `bytemuck` traits on generated flags types. - -Also see [`bitflags_derive`](https://github.com/bitflags/bitflags-derive) for other flags-aware traits. - -## Rust Version Support - -The minimum supported Rust version is documented in the `Cargo.toml` file. -This may be bumped in minor releases as necessary. diff --git a/vendor/bitflags/SECURITY.md b/vendor/bitflags/SECURITY.md deleted file mode 100644 index 790ac5b59debde..00000000000000 --- a/vendor/bitflags/SECURITY.md +++ /dev/null @@ -1,13 +0,0 @@ -# Security Policy - -## Supported Versions - -Security updates are applied only to the latest release. - -## Reporting a Vulnerability - -If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. - -Please disclose it at [security advisory](https://github.com/bitflags/bitflags/security/advisories/new). - -This project is maintained by a team of volunteers on a reasonable-effort basis. As such, please give us at least 90 days to work on a fix before public exposure. diff --git a/vendor/bitflags/benches/parse.rs b/vendor/bitflags/benches/parse.rs deleted file mode 100644 index caa9203451a12e..00000000000000 --- a/vendor/bitflags/benches/parse.rs +++ /dev/null @@ -1,96 +0,0 @@ -#![feature(test)] - -extern crate test; - -use std::{ - fmt::{self, Display}, - str::FromStr, -}; - -bitflags::bitflags! { - struct Flags10: u32 { - const A = 0b0000_0000_0000_0001; - const B = 0b0000_0000_0000_0010; - const C = 0b0000_0000_0000_0100; - const D = 0b0000_0000_0000_1000; - const E = 0b0000_0000_0001_0000; - const F = 0b0000_0000_0010_0000; - const G = 0b0000_0000_0100_0000; - const H = 0b0000_0000_1000_0000; - const I = 0b0000_0001_0000_0000; - const J = 0b0000_0010_0000_0000; - } -} - -impl FromStr for Flags10 { - type Err = bitflags::parser::ParseError; - - fn from_str(flags: &str) -> Result { - Ok(Flags10(flags.parse()?)) - } -} - -impl Display for Flags10 { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&self.0, f) - } -} - -#[bench] -fn format_flags_1_present(b: &mut test::Bencher) { - b.iter(|| Flags10::J.to_string()) -} - -#[bench] -fn format_flags_5_present(b: &mut test::Bencher) { - b.iter(|| (Flags10::F | Flags10::G | Flags10::H | Flags10::I | Flags10::J).to_string()) -} - -#[bench] -fn format_flags_10_present(b: &mut test::Bencher) { - b.iter(|| { - (Flags10::A - | Flags10::B - | Flags10::C - | Flags10::D - | Flags10::E - | Flags10::F - | Flags10::G - | Flags10::H - | Flags10::I - | Flags10::J) - .to_string() - }) -} - -#[bench] -fn parse_flags_1_10(b: &mut test::Bencher) { - b.iter(|| { - let flags: Flags10 = "J".parse().unwrap(); - flags - }) -} - -#[bench] -fn parse_flags_5_10(b: &mut test::Bencher) { - b.iter(|| { - let flags: Flags10 = "F | G | H | I | J".parse().unwrap(); - flags - }) -} - -#[bench] -fn parse_flags_10_10(b: &mut test::Bencher) { - b.iter(|| { - let flags: Flags10 = "A | B | C | D | E | F | G | H | I | J".parse().unwrap(); - flags - }) -} - -#[bench] -fn parse_flags_1_10_hex(b: &mut test::Bencher) { - b.iter(|| { - let flags: Flags10 = "0xFF".parse().unwrap(); - flags - }) -} diff --git a/vendor/bitflags/examples/custom_bits_type.rs b/vendor/bitflags/examples/custom_bits_type.rs deleted file mode 100644 index 8924bfdf31a6e4..00000000000000 --- a/vendor/bitflags/examples/custom_bits_type.rs +++ /dev/null @@ -1,97 +0,0 @@ -use std::ops::{BitAnd, BitOr, BitXor, Not}; - -use bitflags::{Bits, Flag, Flags}; - -// Define a custom container that can be used in flags types -// Note custom bits types can't be used in `bitflags!` -// without making the trait impls `const`. This is currently -// unstable -#[derive(Clone, Copy, Debug)] -pub struct CustomBits([bool; 3]); - -impl Bits for CustomBits { - const EMPTY: Self = CustomBits([false; 3]); - - const ALL: Self = CustomBits([true; 3]); -} - -impl PartialEq for CustomBits { - fn eq(&self, other: &Self) -> bool { - self.0 == other.0 - } -} - -impl BitAnd for CustomBits { - type Output = Self; - - fn bitand(self, other: Self) -> Self { - CustomBits([ - self.0[0] & other.0[0], - self.0[1] & other.0[1], - self.0[2] & other.0[2], - ]) - } -} - -impl BitOr for CustomBits { - type Output = Self; - - fn bitor(self, other: Self) -> Self { - CustomBits([ - self.0[0] | other.0[0], - self.0[1] | other.0[1], - self.0[2] | other.0[2], - ]) - } -} - -impl BitXor for CustomBits { - type Output = Self; - - fn bitxor(self, other: Self) -> Self { - CustomBits([ - self.0[0] & other.0[0], - self.0[1] & other.0[1], - self.0[2] & other.0[2], - ]) - } -} - -impl Not for CustomBits { - type Output = Self; - - fn not(self) -> Self { - CustomBits([!self.0[0], !self.0[1], !self.0[2]]) - } -} - -#[derive(Clone, Copy, Debug)] -pub struct CustomFlags(CustomBits); - -impl CustomFlags { - pub const A: Self = CustomFlags(CustomBits([true, false, false])); - pub const B: Self = CustomFlags(CustomBits([false, true, false])); - pub const C: Self = CustomFlags(CustomBits([false, false, true])); -} - -impl Flags for CustomFlags { - const FLAGS: &'static [Flag] = &[ - Flag::new("A", Self::A), - Flag::new("B", Self::B), - Flag::new("C", Self::C), - ]; - - type Bits = CustomBits; - - fn bits(&self) -> Self::Bits { - self.0 - } - - fn from_bits_retain(bits: Self::Bits) -> Self { - CustomFlags(bits) - } -} - -fn main() { - println!("{:?}", CustomFlags::A.union(CustomFlags::C)); -} diff --git a/vendor/bitflags/examples/custom_derive.rs b/vendor/bitflags/examples/custom_derive.rs deleted file mode 100644 index ba26723f0c4c41..00000000000000 --- a/vendor/bitflags/examples/custom_derive.rs +++ /dev/null @@ -1,23 +0,0 @@ -//! An example of implementing the `BitFlags` trait manually for a flags type. - -use std::str; - -use bitflags::bitflags; - -// Define a flags type outside of the `bitflags` macro as a newtype -// It can accept custom derives for libraries `bitflags` doesn't support natively -#[derive(zerocopy::IntoBytes, zerocopy::FromBytes, zerocopy::KnownLayout, zerocopy::Immutable)] -#[repr(transparent)] -pub struct ManualFlags(u32); - -// Next: use `impl Flags` instead of `struct Flags` -bitflags! { - impl ManualFlags: u32 { - const A = 0b00000001; - const B = 0b00000010; - const C = 0b00000100; - const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); - } -} - -fn main() {} diff --git a/vendor/bitflags/examples/fmt.rs b/vendor/bitflags/examples/fmt.rs deleted file mode 100644 index 724b2074cf0c85..00000000000000 --- a/vendor/bitflags/examples/fmt.rs +++ /dev/null @@ -1,49 +0,0 @@ -//! An example of implementing Rust's standard formatting and parsing traits for flags types. - -use core::{fmt, str}; - -bitflags::bitflags! { - // You can `#[derive]` the `Debug` trait, but implementing it manually - // can produce output like `A | B` instead of `Flags(A | B)`. - // #[derive(Debug)] - #[derive(PartialEq, Eq)] - pub struct Flags: u32 { - const A = 1; - const B = 2; - const C = 4; - const D = 8; - } -} - -impl fmt::Debug for Flags { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - bitflags::parser::to_writer(self, f) - } -} - -impl fmt::Display for Flags { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - bitflags::parser::to_writer(self, f) - } -} - -impl str::FromStr for Flags { - type Err = bitflags::parser::ParseError; - - fn from_str(flags: &str) -> Result { - bitflags::parser::from_str(flags) - } -} - -fn main() -> Result<(), bitflags::parser::ParseError> { - let flags = Flags::A | Flags::B; - - println!("{}", flags); - - let formatted = flags.to_string(); - let parsed: Flags = formatted.parse()?; - - assert_eq!(flags, parsed); - - Ok(()) -} diff --git a/vendor/bitflags/examples/macro_free.rs b/vendor/bitflags/examples/macro_free.rs deleted file mode 100644 index 7563379005c813..00000000000000 --- a/vendor/bitflags/examples/macro_free.rs +++ /dev/null @@ -1,61 +0,0 @@ -//! An example of implementing the `BitFlags` trait manually for a flags type. -//! -//! This example doesn't use any macros. - -use std::{fmt, str}; - -use bitflags::{Flag, Flags}; - -// First: Define your flags type. It just needs to be `Sized + 'static`. -pub struct ManualFlags(u32); - -// Not required: Define some constants for valid flags -impl ManualFlags { - pub const A: ManualFlags = ManualFlags(0b00000001); - pub const B: ManualFlags = ManualFlags(0b00000010); - pub const C: ManualFlags = ManualFlags(0b00000100); - pub const ABC: ManualFlags = ManualFlags(0b00000111); -} - -// Next: Implement the `BitFlags` trait, specifying your set of valid flags -// and iterators -impl Flags for ManualFlags { - const FLAGS: &'static [Flag] = &[ - Flag::new("A", Self::A), - Flag::new("B", Self::B), - Flag::new("C", Self::C), - ]; - - type Bits = u32; - - fn bits(&self) -> u32 { - self.0 - } - - fn from_bits_retain(bits: u32) -> Self { - Self(bits) - } -} - -// Not required: Add parsing support -impl str::FromStr for ManualFlags { - type Err = bitflags::parser::ParseError; - - fn from_str(input: &str) -> Result { - bitflags::parser::from_str(input) - } -} - -// Not required: Add formatting support -impl fmt::Display for ManualFlags { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - bitflags::parser::to_writer(self, f) - } -} - -fn main() { - println!( - "{}", - ManualFlags::A.union(ManualFlags::B).union(ManualFlags::C) - ); -} diff --git a/vendor/bitflags/examples/serde.rs b/vendor/bitflags/examples/serde.rs deleted file mode 100644 index 3b72e1a81ef422..00000000000000 --- a/vendor/bitflags/examples/serde.rs +++ /dev/null @@ -1,39 +0,0 @@ -//! An example of implementing `serde::Serialize` and `serde::Deserialize`. -//! The `#[serde(transparent)]` attribute is recommended to serialize directly -//! to the underlying bits type without wrapping it in a `serde` newtype. - -#[cfg(feature = "serde")] -fn main() { - use serde_lib::*; - - bitflags::bitflags! { - #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] - #[serde(transparent)] - // NOTE: We alias the `serde` crate as `serde_lib` in this repository, - // but you don't need to do this - #[serde(crate = "serde_lib")] - pub struct Flags: u32 { - const A = 1; - const B = 2; - const C = 4; - const D = 8; - } - } - - let flags = Flags::A | Flags::B; - - let serialized = serde_json::to_string(&flags).unwrap(); - - println!("{:?} -> {}", flags, serialized); - - assert_eq!(serialized, r#""A | B""#); - - let deserialized: Flags = serde_json::from_str(&serialized).unwrap(); - - println!("{} -> {:?}", serialized, flags); - - assert_eq!(deserialized, flags); -} - -#[cfg(not(feature = "serde"))] -fn main() {} diff --git a/vendor/bitflags/spec.md b/vendor/bitflags/spec.md deleted file mode 100644 index 10a55db8dd0d3e..00000000000000 --- a/vendor/bitflags/spec.md +++ /dev/null @@ -1,556 +0,0 @@ -# Bitflags - -`bitflags` generates flags enums with well-defined semantics and ergonomic end-user APIs. - -You can use `bitflags` to: - -- provide more user-friendly bindings to C APIs where flags may or may not be fully known in advance. -- generate efficient options types with string parsing and formatting support. - -You can't use `bitflags` to: - -- guarantee only bits corresponding to defined flags will ever be set. `bitflags` allows access to the underlying bits type so arbitrary bits may be set. -- define bitfields. `bitflags` only generates types where set bits denote the presence of some combination of flags. - -## Definitions - -This section formally defines the terminology and semantics of `bitflags`. It's organized so more fundamental concepts are introduced before those that build on them. It may be helpful to start from the bottom of the section and refer back up to concepts defined earlier. - -Examples use `bitflags` syntax with `u8` as the bits type. - -### Bits type - -A type that defines a fixed number of bits at specific locations. - ----- - -Bits types are typically fixed-width unsigned integers. For example, `u8` is a bits type that defines 8 bits; bit-0 through bit-7. - -### Bits value - -An instance of a bits type where each bit may be set (`1`) or unset (`0`). - ----- - -Some examples of bits values for the bits type `u8` are: - -```rust -0b0000_0000 -0b1111_1111 -0b1010_0101 -``` - -#### Equality - -Two bits values are equal if their bits are in the same configuration; set bits in one are set in the other, and unset bits in one are unset in the other. - -#### Operations - -Bits values define the bitwise operators and (`&`), or (`|`), exclusive-or (`^`), and negation (`!`) that apply to each of their bits. - -### Flag - -A set of bits in a bits type that may have a unique name. - ----- - -Bits are not required to be exclusive to a flag. Bits are not required to be contiguous. - -The following is a flag for `u8` with the name `A` that includes bit-0: - -```rust -const A = 0b0000_0001; -``` - -The following is a flag for `u8` with the name `B` that includes bit-0, and bit-5: - -```rust -const B = 0b0010_0001; -``` - -#### Named flag - -A flag with a name. - ----- - -The following is a named flag, where the name is `A`: - -```rust -const A = 0b0000_0001; -``` - -#### Unnamed flag - -A flag without a name. - ----- - -The following is an unnamed flag: - -```rust -const _ = 0b0000_0001; -``` - -#### Zero-bit flag - -A flag with a set of zero bits. - ----- - -The following is a zero-bit flag: - -```rust -const ZERO = 0b0000_0000; -``` - -#### Single-bit flag - -A flag with a set of one bit. - ----- - -The following are single-bit flags: - -```rust -const A = 0b0000_0001; -const B = 0b0000_0010; -``` - -#### Multi-bit flag - -A flag with a set of more than one bit. - ----- - -The following are multi-bit flags: - -```rust -const A = 0b0000_0011; -const B = 0b1111_1111; -``` - -### Flags type - -A set of defined flags over a specific bits type. - -#### Known bit - -A bit in any defined flag. - ----- - -In the following flags type: - -```rust -struct Flags { - const A = 0b0000_0001; - const B = 0b0000_0010; - const C = 0b0000_0100; -} -``` - -the known bits are: - -```rust -0b0000_0111 -``` - -#### Unknown bit - -A bit not in any defined flag. - ----- - -In the following flags type: - -```rust -struct Flags { - const A = 0b0000_0001; - const B = 0b0000_0010; - const C = 0b0000_0100; -} -``` - -the unknown bits are: - -```rust -0b1111_1000 -``` - -### Flags value - -An instance of a flags type using its specific bits value for storage. - -The flags value of a flag is one where each of its bits is set, and all others are unset. - -#### Contains - -Whether all set bits in a source flags value are also set in a target flags value. - ----- - -Given the flags value: - -```rust -0b0000_0011 -``` - -the following flags values are contained: - -```rust -0b0000_0000 -0b0000_0010 -0b0000_0001 -0b0000_0011 -``` - -but the following flags values are not contained: - -```rust -0b0000_1000 -0b0000_0110 -``` - -#### Intersects - -Whether any set bits in a source flags value are also set in a target flags value. - ----- - -Given the flags value: - -```rust -0b0000_0011 -``` - -the following flags intersect: - -```rust -0b0000_0010 -0b0000_0001 -0b1111_1111 -``` - -but the following flags values do not intersect: - -```rust -0b0000_0000 -0b1111_0000 -``` - -#### Empty - -Whether all bits in a flags value are unset. - ----- - -The following flags value is empty: - -```rust -0b0000_0000 -``` - -The following flags values are not empty: - -```rust -0b0000_0001 -0b0110_0000 -``` - -#### All - -Whether all defined flags are contained in a flags value. - ----- - -Given a flags type: - -```rust -struct Flags { - const A = 0b0000_0001; - const B = 0b0000_0010; -} -``` - -the following flags values all satisfy all: - -```rust -0b0000_0011 -0b1000_0011 -0b1111_1111 -``` - -### Operations - -Examples in this section all use the given flags type: - -```rust -struct Flags { - const A = 0b0000_0001; - const B = 0b0000_0010; - const C = 0b0000_1100; -} -``` - -#### Truncate - -Unset all unknown bits in a flags value. - ----- - -Given the flags value: - -```rust -0b1111_1111 -``` - -the result of truncation will be: - -```rust -0b0000_1111 -``` - ----- - -Truncating doesn't guarantee that a non-empty result will contain any defined flags. Given the following flags type: - -```rust -struct Flags { - const A = 0b0000_0101; -} -``` - -and the following flags value: - -```rust -0b0000_1110; -``` - -The result of truncation will be: - -```rust -0b0000_0100; -``` - -which intersects the flag `A`, but doesn't contain it. - -This behavior is possible even when only operating with flags values containing defined flags. Given the following flags type: - -```rust -struct Flags { - const A = 0b0000_0101; - const B = 0b0000_0001; -} -``` - -The result of `A ^ B` is `0b0000_0100`, which also doesn't contain any defined flag. - ----- - -If all known bits are in the set of at least one defined single-bit flag, then all operations that produce non-empty results will always contain defined flags. - -#### Union - -The bitwise or (`|`) of the bits in two flags values. - ----- - -The following are examples of the result of unioning flags values: - -```rust -0b0000_0001 | 0b0000_0010 = 0b0000_0011 -0b0000_0000 | 0b1111_1111 = 0b1111_1111 -``` - -#### Intersection - -The bitwise and (`&`) of the bits in two flags values. - ----- - -The following are examples of the result of intersecting flags values: - -```rust -0b0000_0001 & 0b0000_0010 = 0b0000_0000 -0b1111_1100 & 0b1111_0111 = 0b1111_0100 -0b1111_1111 & 0b1111_1111 = 0b1111_1111 -``` - -#### Symmetric difference - -The bitwise exclusive-or (`^`) of the bits in two flags values. - ----- - -The following are examples of the symmetric difference between two flags values: - -```rust -0b0000_0001 ^ 0b0000_0010 = 0b0000_0011 -0b0000_1111 ^ 0b0000_0011 = 0b0000_1100 -0b1100_0000 ^ 0b0011_0000 = 0b1111_0000 -``` - -#### Complement - -The bitwise negation (`!`) of the bits in a flags value, truncating the result. - ----- - -The complement is the only operation that explicitly truncates its result, because it doesn't accept a second flags value as input and so is likely to set unknown bits. - ----- - -The following are examples of the complement of a flags value: - -```rust -!0b0000_0000 = 0b0000_1111 -!0b0000_1111 = 0b0000_0000 -!0b1111_1000 = 0b0000_0111 -``` - -#### Difference - -The bitwise intersection (`&`) of the bits in one flags value and the bitwise negation (`!`) of the bits in another. - ----- - -This operation is not equivalent to the intersection of one flags value with the complement of another (`&!`). -The former will truncate the result in the complement, where difference will not. - ----- - -The following are examples of the difference between two flags values: - -```rust -0b0000_0001 & !0b0000_0010 = 0b0000_0001 -0b0000_1101 & !0b0000_0011 = 0b0000_1100 -0b1111_1111 & !0b0000_0001 = 0b1111_1110 -``` - -### Iteration - -Yield the bits of a source flags value in a set of contained flags values. - ----- - -To be most useful, each yielded flags value should set exactly the bits of a defined flag contained in the source. Any known bits that aren't in the set of any contained flag should be yielded together as a final flags value. - ----- - -Given the following flags type: - -```rust -struct Flags { - const A = 0b0000_0001; - const B = 0b0000_0010; - const AB = 0b0000_0011; -} -``` - -and the following flags value: - -```rust -0b0000_1111 -``` - -When iterated it may yield a flags value for `A` and `B`, then a final flag with the unknown bits: - -```rust -0b0000_0001 -0b0000_0010 -0b0000_1100 -``` - -It may also yield a flags value for `AB`, then a final flag with the unknown bits: - -```rust -0b0000_0011 -0b0000_1100 -``` - ----- - -Given the following flags type: - -```rust -struct Flags { - const A = 0b0000_0011; -} -``` - -and the following flags value: - -```rust -0b0000_0001 -``` - -When iterated it will still yield a flags value for the known bit `0b0000_0001` even though it doesn't contain a flag. - -### Formatting - -Format and parse a flags value as text using the following grammar: - -- _Flags:_ (_Whitespace_ _Flag_ _Whitespace_)`|`* -- _Flag:_ _Name_ | _Hex Number_ -- _Name:_ The name of any defined flag -- _Hex Number_: `0x`([0-9a-fA-F])* -- _Whitespace_: (\s)* - -Flags values can be formatted as _Flags_ by iterating over them, formatting each yielded flags value as a _Flag_. Any yielded flags value that sets exactly the bits of a defined flag with a name should be formatted as a _Name_. Otherwise it must be formatted as a _Hex Number_. - -Formatting and parsing supports three modes: - -- **Retain**: Formatting and parsing roundtrips exactly the bits of the source flags value. This is the default behavior. -- **Truncate**: Flags values are truncated before formatting, and truncated after parsing. -- **Strict**: A _Flag_ may only be formatted and parsed as a _Name_. _Hex numbers_ are not allowed. A consequence of this is that unknown bits and any bits that aren't in a contained named flag will be ignored. This is recommended for flags values serialized across API boundaries, like web services. - -Text that is empty or whitespace is an empty flags value. - ----- - -Given the following flags type: - -```rust -struct Flags { - const A = 0b0000_0001; - const B = 0b0000_0010; - const AB = 0b0000_0011; - const C = 0b0000_1100; -} -``` - -The following are examples of how flags values can be formatted using any mode: - -```rust -0b0000_0000 = "" -0b0000_0001 = "A" -0b0000_0010 = "B" -0b0000_0011 = "A | B" -0b0000_0011 = "AB" -0b0000_1111 = "A | B | C" -``` - -Truncate mode will unset any unknown bits: - -```rust -0b1000_0000 = "" -0b1111_1111 = "A | B | C" -0b0000_1000 = "0x8" -``` - -Retain mode will include any unknown bits as a final _Flag_: - -```rust -0b1000_0000 = "0x80" -0b1111_1111 = "A | B | C | 0xf0" -0b0000_1000 = "0x8" -``` - -Strict mode will unset any unknown bits, as well as bits not contained in any defined named flags: - -```rust -0b1000_0000 = "" -0b1111_1111 = "A | B | C" -0b0000_1000 = "" -``` diff --git a/vendor/bitflags/src/example_generated.rs b/vendor/bitflags/src/example_generated.rs deleted file mode 100644 index abb1118fa14a41..00000000000000 --- a/vendor/bitflags/src/example_generated.rs +++ /dev/null @@ -1,65 +0,0 @@ -//! This module shows an example of code generated by the macro. **IT MUST NOT BE USED OUTSIDE THIS -//! CRATE**. -//! -//! Usually, when you call the `bitflags!` macro, only the `Flags` type would be visible. In this -//! example, the `Field0`, `Iter`, and `IterRaw` types are also exposed so that you can explore -//! their APIs. The `Field0` type can be accessed as `self.0` on an instance of `Flags`. - -__declare_public_bitflags! { - /// This is the same `Flags` struct defined in the [crate level example](../index.html#example). - /// Note that this struct is just for documentation purposes only, it must not be used outside - /// this crate. - pub struct Flags -} - -__declare_internal_bitflags! { - pub struct Field0: u32 -} - -__impl_internal_bitflags! { - Field0: u32, Flags { - // Field `A`. - /// - /// This flag has the value `0b00000001`. - const A = 0b00000001; - /// Field `B`. - /// - /// This flag has the value `0b00000010`. - const B = 0b00000010; - /// Field `C`. - /// - /// This flag has the value `0b00000100`. - const C = 0b00000100; - const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); - } -} - -__impl_public_bitflags_forward! { - Flags: u32, Field0 -} - -__impl_public_bitflags_ops! { - Flags -} - -__impl_public_bitflags_iter! { - Flags: u32, Flags -} - -__impl_public_bitflags_consts! { - Flags: u32 { - /// Field `A`. - /// - /// This flag has the value `0b00000001`. - const A = 0b00000001; - /// Field `B`. - /// - /// This flag has the value `0b00000010`. - const B = 0b00000010; - /// Field `C`. - /// - /// This flag has the value `0b00000100`. - const C = 0b00000100; - const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); - } -} diff --git a/vendor/bitflags/src/external.rs b/vendor/bitflags/src/external.rs deleted file mode 100644 index a60abec4c08df8..00000000000000 --- a/vendor/bitflags/src/external.rs +++ /dev/null @@ -1,262 +0,0 @@ -//! Conditional trait implementations for external libraries. - -/* -How do I support a new external library? - -Let's say we want to add support for `my_library`. - -First, we create a module under `external`, like `serde` with any specialized code. -Ideally, any utilities in here should just work off the `Flags` trait and maybe a -few other assumed bounds. - -Next, re-export the library from the `__private` module here. - -Next, define a macro like so: - -```rust -#[macro_export] -#[doc(hidden)] -#[cfg(feature = "serde")] -macro_rules! __impl_external_bitflags_my_library { - ( - $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { - $( - $(#[$inner:ident $($args:tt)*])* - const $Flag:tt; - )* - } - ) => { - // Implementation goes here - }; -} - -#[macro_export] -#[doc(hidden)] -#[cfg(not(feature = "my_library"))] -macro_rules! __impl_external_bitflags_my_library { - ( - $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { - $( - $(#[$inner:ident $($args:tt)*])* - const $Flag:tt; - )* - } - ) => {}; -} -``` - -Note that the macro is actually defined twice; once for when the `my_library` feature -is available, and once for when it's not. This is because the `__impl_external_bitflags_my_library` -macro is called in an end-user's library, not in `bitflags`. In an end-user's library we don't -know whether or not a particular feature of `bitflags` is enabled, so we unconditionally call -the macro, where the body of that macro depends on the feature flag. - -Now, we add our macro call to the `__impl_external_bitflags` macro body: - -```rust -__impl_external_bitflags_my_library! { - $InternalBitFlags: $T, $PublicBitFlags { - $( - $(#[$inner $($args)*])* - const $Flag; - )* - } -} -``` -*/ - -pub(crate) mod __private { - #[cfg(feature = "serde")] - pub use serde_core as serde; - - #[cfg(feature = "arbitrary")] - pub use arbitrary; - - #[cfg(feature = "bytemuck")] - pub use bytemuck; -} - -/// Implements traits from external libraries for the internal bitflags type. -#[macro_export] -#[doc(hidden)] -macro_rules! __impl_external_bitflags { - ( - $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { - $( - $(#[$inner:ident $($args:tt)*])* - const $Flag:tt; - )* - } - ) => { - // Any new library traits impls should be added here - // Use `serde` as an example: generate code when the feature is available, - // and a no-op when it isn't - - $crate::__impl_external_bitflags_serde! { - $InternalBitFlags: $T, $PublicBitFlags { - $( - $(#[$inner $($args)*])* - const $Flag; - )* - } - } - - $crate::__impl_external_bitflags_arbitrary! { - $InternalBitFlags: $T, $PublicBitFlags { - $( - $(#[$inner $($args)*])* - const $Flag; - )* - } - } - - $crate::__impl_external_bitflags_bytemuck! { - $InternalBitFlags: $T, $PublicBitFlags { - $( - $(#[$inner $($args)*])* - const $Flag; - )* - } - } - }; -} - -#[cfg(feature = "serde")] -pub mod serde; - -/// Implement `Serialize` and `Deserialize` for the internal bitflags type. -#[macro_export] -#[doc(hidden)] -#[cfg(feature = "serde")] -macro_rules! __impl_external_bitflags_serde { - ( - $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { - $( - $(#[$inner:ident $($args:tt)*])* - const $Flag:tt; - )* - } - ) => { - impl $crate::__private::serde::Serialize for $InternalBitFlags { - fn serialize( - &self, - serializer: S, - ) -> $crate::__private::core::result::Result { - $crate::serde::serialize( - &$PublicBitFlags::from_bits_retain(self.bits()), - serializer, - ) - } - } - - impl<'de> $crate::__private::serde::Deserialize<'de> for $InternalBitFlags { - fn deserialize>( - deserializer: D, - ) -> $crate::__private::core::result::Result { - let flags: $PublicBitFlags = $crate::serde::deserialize(deserializer)?; - - Ok(flags.0) - } - } - }; -} - -#[macro_export] -#[doc(hidden)] -#[cfg(not(feature = "serde"))] -macro_rules! __impl_external_bitflags_serde { - ( - $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { - $( - $(#[$inner:ident $($args:tt)*])* - const $Flag:tt; - )* - } - ) => {}; -} - -#[cfg(feature = "arbitrary")] -pub mod arbitrary; - -#[cfg(feature = "bytemuck")] -mod bytemuck; - -/// Implement `Arbitrary` for the internal bitflags type. -#[macro_export] -#[doc(hidden)] -#[cfg(feature = "arbitrary")] -macro_rules! __impl_external_bitflags_arbitrary { - ( - $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { - $( - $(#[$inner:ident $($args:tt)*])* - const $Flag:tt; - )* - } - ) => { - impl<'a> $crate::__private::arbitrary::Arbitrary<'a> for $InternalBitFlags { - fn arbitrary( - u: &mut $crate::__private::arbitrary::Unstructured<'a>, - ) -> $crate::__private::arbitrary::Result { - $crate::arbitrary::arbitrary::<$PublicBitFlags>(u).map(|flags| flags.0) - } - } - }; -} - -#[macro_export] -#[doc(hidden)] -#[cfg(not(feature = "arbitrary"))] -macro_rules! __impl_external_bitflags_arbitrary { - ( - $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { - $( - $(#[$inner:ident $($args:tt)*])* - const $Flag:tt; - )* - } - ) => {}; -} - -/// Implement `Pod` and `Zeroable` for the internal bitflags type. -#[macro_export] -#[doc(hidden)] -#[cfg(feature = "bytemuck")] -macro_rules! __impl_external_bitflags_bytemuck { - ( - $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { - $( - $(#[$inner:ident $($args:tt)*])* - const $Flag:tt; - )* - } - ) => { - // SAFETY: $InternalBitFlags is guaranteed to have the same ABI as $T, - // and $T implements Pod - unsafe impl $crate::__private::bytemuck::Pod for $InternalBitFlags where - $T: $crate::__private::bytemuck::Pod - { - } - - // SAFETY: $InternalBitFlags is guaranteed to have the same ABI as $T, - // and $T implements Zeroable - unsafe impl $crate::__private::bytemuck::Zeroable for $InternalBitFlags where - $T: $crate::__private::bytemuck::Zeroable - { - } - }; -} - -#[macro_export] -#[doc(hidden)] -#[cfg(not(feature = "bytemuck"))] -macro_rules! __impl_external_bitflags_bytemuck { - ( - $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { - $( - $(#[$inner:ident $($args:tt)*])* - const $Flag:tt; - )* - } - ) => {}; -} diff --git a/vendor/bitflags/src/external/arbitrary.rs b/vendor/bitflags/src/external/arbitrary.rs deleted file mode 100644 index edde9b5ec4e0d8..00000000000000 --- a/vendor/bitflags/src/external/arbitrary.rs +++ /dev/null @@ -1,33 +0,0 @@ -//! Specialized fuzzing for flags types using `arbitrary`. - -use crate::Flags; - -/** -Generate some arbitrary flags value with only known bits set. -*/ -pub fn arbitrary<'a, B: Flags>(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result -where - B::Bits: arbitrary::Arbitrary<'a>, -{ - B::from_bits(u.arbitrary()?).ok_or(arbitrary::Error::IncorrectFormat) -} - -#[cfg(test)] -mod tests { - use arbitrary::Arbitrary; - - bitflags! { - #[derive(Arbitrary)] - struct Color: u32 { - const RED = 0x1; - const GREEN = 0x2; - const BLUE = 0x4; - } - } - - #[test] - fn test_arbitrary() { - let mut unstructured = arbitrary::Unstructured::new(&[0_u8; 256]); - let _color = Color::arbitrary(&mut unstructured); - } -} diff --git a/vendor/bitflags/src/external/bytemuck.rs b/vendor/bitflags/src/external/bytemuck.rs deleted file mode 100644 index a0cd68c9d7e736..00000000000000 --- a/vendor/bitflags/src/external/bytemuck.rs +++ /dev/null @@ -1,19 +0,0 @@ -#[cfg(test)] -mod tests { - use bytemuck::{Pod, Zeroable}; - - bitflags! { - #[derive(Pod, Zeroable, Clone, Copy)] - #[repr(transparent)] - struct Color: u32 { - const RED = 0x1; - const GREEN = 0x2; - const BLUE = 0x4; - } - } - - #[test] - fn test_bytemuck() { - assert_eq!(0x1, bytemuck::cast::(Color::RED)); - } -} diff --git a/vendor/bitflags/src/external/serde.rs b/vendor/bitflags/src/external/serde.rs deleted file mode 100644 index ff327b4b32093c..00000000000000 --- a/vendor/bitflags/src/external/serde.rs +++ /dev/null @@ -1,94 +0,0 @@ -//! Specialized serialization for flags types using `serde`. - -use crate::{ - parser::{self, ParseHex, WriteHex}, - Flags, -}; -use core::{fmt, str}; -use serde_core::{ - de::{Error, Visitor}, - Deserialize, Deserializer, Serialize, Serializer, -}; - -/** -Serialize a set of flags as a human-readable string or their underlying bits. - -Any unknown bits will be retained. -*/ -pub fn serialize(flags: &B, serializer: S) -> Result -where - B::Bits: WriteHex + Serialize, -{ - // Serialize human-readable flags as a string like `"A | B"` - if serializer.is_human_readable() { - serializer.collect_str(&parser::AsDisplay(flags)) - } - // Serialize non-human-readable flags directly as the underlying bits - else { - flags.bits().serialize(serializer) - } -} - -/** -Deserialize a set of flags from a human-readable string or their underlying bits. - -Any unknown bits will be retained. -*/ -pub fn deserialize<'de, B: Flags, D: Deserializer<'de>>(deserializer: D) -> Result -where - B::Bits: ParseHex + Deserialize<'de>, -{ - if deserializer.is_human_readable() { - // Deserialize human-readable flags by parsing them from strings like `"A | B"` - struct FlagsVisitor(core::marker::PhantomData); - - impl<'de, B: Flags> Visitor<'de> for FlagsVisitor - where - B::Bits: ParseHex, - { - type Value = B; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("a string value of `|` separated flags") - } - - fn visit_str(self, flags: &str) -> Result { - parser::from_str(flags).map_err(|e| E::custom(e)) - } - } - - deserializer.deserialize_str(FlagsVisitor(Default::default())) - } else { - // Deserialize non-human-readable flags directly from the underlying bits - let bits = B::Bits::deserialize(deserializer)?; - - Ok(B::from_bits_retain(bits)) - } -} - -#[cfg(test)] -mod tests { - use serde_test::{assert_tokens, Configure, Token::*}; - - bitflags! { - #[derive(serde_lib::Serialize, serde_lib::Deserialize, Debug, PartialEq, Eq)] - #[serde(crate = "serde_lib", transparent)] - struct SerdeFlags: u32 { - const A = 1; - const B = 2; - const C = 4; - const D = 8; - } - } - - #[test] - fn test_serde_bitflags_default() { - assert_tokens(&SerdeFlags::empty().readable(), &[Str("")]); - - assert_tokens(&SerdeFlags::empty().compact(), &[U32(0)]); - - assert_tokens(&(SerdeFlags::A | SerdeFlags::B).readable(), &[Str("A | B")]); - - assert_tokens(&(SerdeFlags::A | SerdeFlags::B).compact(), &[U32(1 | 2)]); - } -} diff --git a/vendor/bitflags/src/internal.rs b/vendor/bitflags/src/internal.rs deleted file mode 100644 index 87d01cc0cb5f55..00000000000000 --- a/vendor/bitflags/src/internal.rs +++ /dev/null @@ -1,125 +0,0 @@ -//! Generate the internal `bitflags`-facing flags type. -//! -//! The code generated here is owned by `bitflags`, but still part of its public API. -//! Changes to the types generated here need to be considered like any other public API change. - -/// Declare the `bitflags`-facing bitflags struct. -/// -/// This type is part of the `bitflags` crate's public API, but not part of the user's. -#[macro_export] -#[doc(hidden)] -macro_rules! __declare_internal_bitflags { - ( - $vis:vis struct $InternalBitFlags:ident: $T:ty - ) => { - // NOTE: The ABI of this type is _guaranteed_ to be the same as `T` - // This is relied on by some external libraries like `bytemuck` to make - // its `unsafe` trait impls sound. - #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] - #[repr(transparent)] - $vis struct $InternalBitFlags($T); - }; -} - -/// Implement functions on the private (bitflags-facing) bitflags type. -/// -/// Methods and trait implementations can be freely added here without breaking end-users. -/// If we want to expose new functionality to `#[derive]`, this is the place to do it. -#[macro_export] -#[doc(hidden)] -macro_rules! __impl_internal_bitflags { - ( - $InternalBitFlags:ident: $T:ty, $PublicBitFlags:ident { - $( - $(#[$inner:ident $($args:tt)*])* - const $Flag:tt = $value:expr; - )* - } - ) => { - // NOTE: This impl is also used to prevent using bits types from non-primitive types - // in the `bitflags` macro. If this approach is changed, this guard will need to be - // retained somehow - impl $crate::__private::PublicFlags for $PublicBitFlags { - type Primitive = $T; - type Internal = $InternalBitFlags; - } - - impl $crate::__private::core::default::Default for $InternalBitFlags { - #[inline] - fn default() -> Self { - $InternalBitFlags::empty() - } - } - - impl $crate::__private::core::fmt::Debug for $InternalBitFlags { - fn fmt(&self, f: &mut $crate::__private::core::fmt::Formatter<'_>) -> $crate::__private::core::fmt::Result { - if self.is_empty() { - // If no flags are set then write an empty hex flag to avoid - // writing an empty string. In some contexts, like serialization, - // an empty string is preferable, but it may be unexpected in - // others for a format not to produce any output. - // - // We can remove this `0x0` and remain compatible with `FromStr`, - // because an empty string will still parse to an empty set of flags, - // just like `0x0` does. - $crate::__private::core::write!(f, "{:#x}", <$T as $crate::Bits>::EMPTY) - } else { - $crate::__private::core::fmt::Display::fmt(self, f) - } - } - } - - impl $crate::__private::core::fmt::Display for $InternalBitFlags { - fn fmt(&self, f: &mut $crate::__private::core::fmt::Formatter<'_>) -> $crate::__private::core::fmt::Result { - $crate::parser::to_writer(&$PublicBitFlags(*self), f) - } - } - - impl $crate::__private::core::str::FromStr for $InternalBitFlags { - type Err = $crate::parser::ParseError; - - fn from_str(s: &str) -> $crate::__private::core::result::Result { - $crate::parser::from_str::<$PublicBitFlags>(s).map(|flags| flags.0) - } - } - - impl $crate::__private::core::convert::AsRef<$T> for $InternalBitFlags { - fn as_ref(&self) -> &$T { - &self.0 - } - } - - impl $crate::__private::core::convert::From<$T> for $InternalBitFlags { - fn from(bits: $T) -> Self { - Self::from_bits_retain(bits) - } - } - - // The internal flags type offers a similar API to the public one - - $crate::__impl_public_bitflags! { - $InternalBitFlags: $T, $PublicBitFlags { - $( - $(#[$inner $($args)*])* - const $Flag = $value; - )* - } - } - - $crate::__impl_public_bitflags_ops! { - $InternalBitFlags - } - - $crate::__impl_public_bitflags_iter! { - $InternalBitFlags: $T, $PublicBitFlags - } - - impl $InternalBitFlags { - /// Returns a mutable reference to the raw value of the flags currently stored. - #[inline] - pub fn bits_mut(&mut self) -> &mut $T { - &mut self.0 - } - } - }; -} diff --git a/vendor/bitflags/src/iter.rs b/vendor/bitflags/src/iter.rs deleted file mode 100644 index ae0efc930917fa..00000000000000 --- a/vendor/bitflags/src/iter.rs +++ /dev/null @@ -1,182 +0,0 @@ -/*! -Yield the bits of a source flags value in a set of contained flags values. -*/ - -use crate::{Flag, Flags}; - -/** -An iterator over flags values. - -This iterator will yield flags values for contained, defined flags first, with any remaining bits yielded -as a final flags value. -*/ -pub struct Iter { - inner: IterNames, - done: bool, -} - -impl Iter { - pub(crate) fn new(flags: &B) -> Self { - Iter { - inner: IterNames::new(flags), - done: false, - } - } -} - -impl Iter { - // Used by the `bitflags` macro - #[doc(hidden)] - pub const fn __private_const_new(flags: &'static [Flag], source: B, remaining: B) -> Self { - Iter { - inner: IterNames::__private_const_new(flags, source, remaining), - done: false, - } - } -} - -impl Iterator for Iter { - type Item = B; - - fn next(&mut self) -> Option { - match self.inner.next() { - Some((_, flag)) => Some(flag), - None if !self.done => { - self.done = true; - - // After iterating through valid names, if there are any bits left over - // then return one final value that includes them. This makes `into_iter` - // and `from_iter` roundtrip - if !self.inner.remaining().is_empty() { - Some(B::from_bits_retain(self.inner.remaining.bits())) - } else { - None - } - } - None => None, - } - } -} - -/** -An iterator over flags values. - -This iterator only yields flags values for contained, defined, named flags. Any remaining bits -won't be yielded, but can be found with the [`IterNames::remaining`] method. -*/ -pub struct IterNames { - flags: &'static [Flag], - idx: usize, - source: B, - remaining: B, -} - -impl IterNames { - pub(crate) fn new(flags: &B) -> Self { - IterNames { - flags: B::FLAGS, - idx: 0, - remaining: B::from_bits_retain(flags.bits()), - source: B::from_bits_retain(flags.bits()), - } - } -} - -impl IterNames { - // Used by the bitflags macro - #[doc(hidden)] - pub const fn __private_const_new(flags: &'static [Flag], source: B, remaining: B) -> Self { - IterNames { - flags, - idx: 0, - remaining, - source, - } - } - - /// Get a flags value of any remaining bits that haven't been yielded yet. - /// - /// Once the iterator has finished, this method can be used to - /// check whether or not there are any bits that didn't correspond - /// to a contained, defined, named flag remaining. - pub fn remaining(&self) -> &B { - &self.remaining - } -} - -impl Iterator for IterNames { - type Item = (&'static str, B); - - fn next(&mut self) -> Option { - while let Some(flag) = self.flags.get(self.idx) { - // Short-circuit if our state is empty - if self.remaining.is_empty() { - return None; - } - - self.idx += 1; - - // Skip unnamed flags - if flag.name().is_empty() { - continue; - } - - let bits = flag.value().bits(); - - // If the flag is set in the original source _and_ it has bits that haven't - // been covered by a previous flag yet then yield it. These conditions cover - // two cases for multi-bit flags: - // - // 1. When flags partially overlap, such as `0b00000001` and `0b00000101`, we'll - // yield both flags. - // 2. When flags fully overlap, such as in convenience flags that are a shorthand for others, - // we won't yield both flags. - if self.source.contains(B::from_bits_retain(bits)) - && self.remaining.intersects(B::from_bits_retain(bits)) - { - self.remaining.remove(B::from_bits_retain(bits)); - - return Some((flag.name(), B::from_bits_retain(bits))); - } - } - - None - } -} - -/** -An iterator over all defined named flags. - -This iterator will yield flags values for all defined named flags, regardless of -whether they are contained in a particular flags value. -*/ -pub struct IterDefinedNames { - flags: &'static [Flag], - idx: usize, -} - -impl IterDefinedNames { - pub(crate) fn new() -> Self { - IterDefinedNames { - flags: B::FLAGS, - idx: 0, - } - } -} - -impl Iterator for IterDefinedNames { - type Item = (&'static str, B); - - fn next(&mut self) -> Option { - while let Some(flag) = self.flags.get(self.idx) { - self.idx += 1; - - // Only yield named flags - if flag.is_named() { - return Some((flag.name(), B::from_bits_retain(flag.value().bits()))); - } - } - - None - } -} diff --git a/vendor/bitflags/src/lib.rs b/vendor/bitflags/src/lib.rs deleted file mode 100644 index b672ec066b8feb..00000000000000 --- a/vendor/bitflags/src/lib.rs +++ /dev/null @@ -1,997 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/*! -Generate types for C-style flags with ergonomic APIs. - -# Getting started - -Add `bitflags` to your `Cargo.toml`: - -```toml -[dependencies.bitflags] -version = "2.10.0" -``` - -## Crate features - -The `bitflags` library defines a few Cargo features that you can opt-in to: - -- `std`: Implement the `Error` trait on error types used by `bitflags`. -- `serde`: Support deriving `serde` traits on generated flags types. -- `arbitrary`: Support deriving `arbitrary` traits on generated flags types. -- `bytemuck`: Support deriving `bytemuck` traits on generated flags types. - -## Generating flags types - -Use the [`bitflags`] macro to generate flags types: - -```rust -use bitflags::bitflags; - -bitflags! { - pub struct Flags: u32 { - const A = 0b00000001; - const B = 0b00000010; - const C = 0b00000100; - } -} -``` - -See the docs for the `bitflags` macro for the full syntax. - -Also see the [`example_generated`](./example_generated/index.html) module for an example of what the `bitflags` macro generates for a flags type. - -### Externally defined flags - -If you're generating flags types for an external source, such as a C API, you can define -an extra unnamed flag as a mask of all bits the external source may ever set. Usually this would be all bits (`!0`): - -```rust -# use bitflags::bitflags; -bitflags! { - pub struct Flags: u32 { - const A = 0b00000001; - const B = 0b00000010; - const C = 0b00000100; - - // The source may set any bits - const _ = !0; - } -} -``` - -Why should you do this? Generated methods like `all` and truncating operators like `!` only consider -bits in defined flags. Adding an unnamed flag makes those methods consider additional bits, -without generating additional constants for them. It helps compatibility when the external source -may start setting additional bits at any time. The [known and unknown bits](#known-and-unknown-bits) -section has more details on this behavior. - -### Custom derives - -You can derive some traits on generated flags types if you enable Cargo features. The following -libraries are currently supported: - -- `serde`: Support `#[derive(Serialize, Deserialize)]`, using text for human-readable formats, - and a raw number for binary formats. -- `arbitrary`: Support `#[derive(Arbitrary)]`, only generating flags values with known bits. -- `bytemuck`: Support `#[derive(Pod, Zeroable)]`, for casting between flags values and their - underlying bits values. - -You can also define your own flags type outside of the [`bitflags`] macro and then use it to generate methods. -This can be useful if you need a custom `#[derive]` attribute for a library that `bitflags` doesn't -natively support: - -```rust -# use std::fmt::Debug as SomeTrait; -# use bitflags::bitflags; -#[derive(SomeTrait)] -pub struct Flags(u32); - -bitflags! { - impl Flags: u32 { - const A = 0b00000001; - const B = 0b00000010; - const C = 0b00000100; - } -} -``` - -### Adding custom methods - -The [`bitflags`] macro supports attributes on generated flags types within the macro itself, while -`impl` blocks can be added outside of it: - -```rust -# use bitflags::bitflags; -bitflags! { - // Attributes can be applied to flags types - #[repr(transparent)] - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] - pub struct Flags: u32 { - const A = 0b00000001; - const B = 0b00000010; - const C = 0b00000100; - } -} - -// Impl blocks can be added to flags types -impl Flags { - pub fn as_u64(&self) -> u64 { - self.bits() as u64 - } -} -``` - -## Working with flags values - -Use generated constants and standard bitwise operators to interact with flags values: - -```rust -# use bitflags::bitflags; -# bitflags! { -# #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -# pub struct Flags: u32 { -# const A = 0b00000001; -# const B = 0b00000010; -# const C = 0b00000100; -# } -# } -// union -let ab = Flags::A | Flags::B; - -// intersection -let a = ab & Flags::A; - -// difference -let b = ab - Flags::A; - -// complement -let c = !ab; -``` - -See the docs for the [`Flags`] trait for more details on operators and how they behave. - -# Formatting and parsing - -`bitflags` defines a text format that can be used to convert any flags value to and from strings. - -See the [`parser`] module for more details. - -# Specification - -The terminology and behavior of generated flags types is -[specified in the source repository](https://github.com/bitflags/bitflags/blob/main/spec.md). -Details are repeated in these docs where appropriate, but is exhaustively listed in the spec. Some -things are worth calling out explicitly here. - -## Flags types, flags values, flags - -The spec and these docs use consistent terminology to refer to things in the bitflags domain: - -- **Bits type**: A type that defines a fixed number of bits at specific locations. -- **Flag**: A set of bits in a bits type that may have a unique name. -- **Flags type**: A set of defined flags over a specific bits type. -- **Flags value**: An instance of a flags type using its specific bits value for storage. - -``` -# use bitflags::bitflags; -bitflags! { - struct FlagsType: u8 { -// -- Bits type -// --------- Flags type - const A = 1; -// ----- Flag - } -} - -let flag = FlagsType::A; -// ---- Flags value -``` - -## Known and unknown bits - -Any bits in a flag you define are called _known bits_. Any other bits are _unknown bits_. -In the following flags type: - -``` -# use bitflags::bitflags; -bitflags! { - struct Flags: u8 { - const A = 1; - const B = 1 << 1; - const C = 1 << 2; - } -} -``` - -The known bits are `0b0000_0111` and the unknown bits are `0b1111_1000`. - -`bitflags` doesn't guarantee that a flags value will only ever have known bits set, but some operators -will unset any unknown bits they encounter. In a future version of `bitflags`, all operators will -unset unknown bits. - -If you're using `bitflags` for flags types defined externally, such as from C, you probably want all -bits to be considered known, in case that external source changes. You can do this using an unnamed -flag, as described in [externally defined flags](#externally-defined-flags). - -## Zero-bit flags - -Flags with no bits set should be avoided because they interact strangely with [`Flags::contains`] -and [`Flags::intersects`]. A zero-bit flag is always contained, but is never intersected. The -names of zero-bit flags can be parsed, but are never formatted. - -## Multi-bit flags - -Flags that set multiple bits should be avoided unless each bit is also in a single-bit flag. -Take the following flags type as an example: - -``` -# use bitflags::bitflags; -bitflags! { - struct Flags: u8 { - const A = 1; - const B = 1 | 1 << 1; - } -} -``` - -The result of `Flags::A ^ Flags::B` is `0b0000_0010`, which doesn't correspond to either -`Flags::A` or `Flags::B` even though it's still a known bit. -*/ - -#![cfg_attr(not(any(feature = "std", test)), no_std)] -#![cfg_attr(not(test), forbid(unsafe_code))] -#![cfg_attr(test, allow(mixed_script_confusables))] - -#[doc(inline)] -pub use traits::{Bits, Flag, Flags}; - -pub mod iter; -pub mod parser; - -mod traits; - -#[doc(hidden)] -pub mod __private { - #[allow(unused_imports)] - // Easier than conditionally checking any optional external dependencies - pub use crate::{external::__private::*, traits::__private::*}; - - pub use core; -} - -#[allow(unused_imports)] -pub use external::*; - -#[allow(deprecated)] -pub use traits::BitFlags; - -/* -How does the bitflags crate work? - -This library generates a `struct` in the end-user's crate with a bunch of constants on it that represent flags. -The difference between `bitflags` and a lot of other libraries is that we don't actually control the generated `struct` in the end. -It's part of the end-user's crate, so it belongs to them. That makes it difficult to extend `bitflags` with new functionality -because we could end up breaking valid code that was already written. - -Our solution is to split the type we generate into two: the public struct owned by the end-user, and an internal struct owned by `bitflags` (us). -To give you an example, let's say we had a crate that called `bitflags!`: - -```rust -bitflags! { - pub struct MyFlags: u32 { - const A = 1; - const B = 2; - } -} -``` - -What they'd end up with looks something like this: - -```rust -pub struct MyFlags(::InternalBitFlags); - -const _: () = { - #[repr(transparent)] - #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] - pub struct MyInternalBitFlags { - bits: u32, - } - - impl PublicFlags for MyFlags { - type Internal = InternalBitFlags; - } -}; -``` - -If we want to expose something like a new trait impl for generated flags types, we add it to our generated `MyInternalBitFlags`, -and let `#[derive]` on `MyFlags` pick up that implementation, if an end-user chooses to add one. - -The public API is generated in the `__impl_public_flags!` macro, and the internal API is generated in -the `__impl_internal_flags!` macro. - -The macros are split into 3 modules: - -- `public`: where the user-facing flags types are generated. -- `internal`: where the `bitflags`-facing flags types are generated. -- `external`: where external library traits are implemented conditionally. -*/ - -/** -Generate a flags type. - -# `struct` mode - -A declaration that begins with `$vis struct` will generate a `struct` for a flags type, along with -methods and trait implementations for it. The body of the declaration defines flags as constants, -where each constant is a flags value of the generated flags type. - -## Examples - -Generate a flags type using `u8` as the bits type: - -``` -# use bitflags::bitflags; -bitflags! { - struct Flags: u8 { - const A = 1; - const B = 1 << 1; - const C = 0b0000_0100; - } -} -``` - -Flags types are private by default and accept standard visibility modifiers. Flags themselves -are always public: - -``` -# use bitflags::bitflags; -bitflags! { - pub struct Flags: u8 { - // Constants are always `pub` - const A = 1; - } -} -``` - -Flags may refer to other flags using their [`Flags::bits`] value: - -``` -# use bitflags::bitflags; -bitflags! { - struct Flags: u8 { - const A = 1; - const B = 1 << 1; - const AB = Flags::A.bits() | Flags::B.bits(); - } -} -``` - -A single `bitflags` invocation may include zero or more flags type declarations: - -``` -# use bitflags::bitflags; -bitflags! {} - -bitflags! { - struct Flags1: u8 { - const A = 1; - } - - struct Flags2: u8 { - const A = 1; - } -} -``` - -# `impl` mode - -A declaration that begins with `impl` will only generate methods and trait implementations for the -`struct` defined outside of the `bitflags` macro. - -The struct itself must be a newtype using the bits type as its field. - -The syntax for `impl` mode is identical to `struct` mode besides the starting token. - -## Examples - -Implement flags methods and traits for a custom flags type using `u8` as its underlying bits type: - -``` -# use bitflags::bitflags; -struct Flags(u8); - -bitflags! { - impl Flags: u8 { - const A = 1; - const B = 1 << 1; - const C = 0b0000_0100; - } -} -``` - -# Named and unnamed flags - -Constants in the body of a declaration are flags. The identifier of the constant is the name of -the flag. If the identifier is `_`, then the flag is unnamed. Unnamed flags don't appear in the -generated API, but affect how bits are truncated. - -## Examples - -Adding an unnamed flag that makes all bits known: - -``` -# use bitflags::bitflags; -bitflags! { - struct Flags: u8 { - const A = 1; - const B = 1 << 1; - - const _ = !0; - } -} -``` - -Flags types may define multiple unnamed flags: - -``` -# use bitflags::bitflags; -bitflags! { - struct Flags: u8 { - const _ = 1; - const _ = 1 << 1; - } -} -``` -*/ -#[macro_export] -macro_rules! bitflags { - ( - $(#[$outer:meta])* - $vis:vis struct $BitFlags:ident: $T:ty { - $( - $(#[$inner:ident $($args:tt)*])* - const $Flag:tt = $value:expr; - )* - } - - $($t:tt)* - ) => { - // Declared in the scope of the `bitflags!` call - // This type appears in the end-user's API - $crate::__declare_public_bitflags! { - $(#[$outer])* - $vis struct $BitFlags - } - - // Workaround for: https://github.com/bitflags/bitflags/issues/320 - $crate::__impl_public_bitflags_consts! { - $BitFlags: $T { - $( - $(#[$inner $($args)*])* - const $Flag = $value; - )* - } - } - - #[allow( - dead_code, - deprecated, - unused_doc_comments, - unused_attributes, - unused_mut, - unused_imports, - non_upper_case_globals, - clippy::assign_op_pattern, - clippy::indexing_slicing, - clippy::same_name_method, - clippy::iter_without_into_iter, - )] - const _: () = { - // Declared in a "hidden" scope that can't be reached directly - // These types don't appear in the end-user's API - $crate::__declare_internal_bitflags! { - $vis struct InternalBitFlags: $T - } - - $crate::__impl_internal_bitflags! { - InternalBitFlags: $T, $BitFlags { - $( - $(#[$inner $($args)*])* - const $Flag = $value; - )* - } - } - - // This is where new library trait implementations can be added - $crate::__impl_external_bitflags! { - InternalBitFlags: $T, $BitFlags { - $( - $(#[$inner $($args)*])* - const $Flag; - )* - } - } - - $crate::__impl_public_bitflags_forward! { - $BitFlags: $T, InternalBitFlags - } - - $crate::__impl_public_bitflags_ops! { - $BitFlags - } - - $crate::__impl_public_bitflags_iter! { - $BitFlags: $T, $BitFlags - } - }; - - $crate::bitflags! { - $($t)* - } - }; - ( - $(#[$outer:meta])* - impl $BitFlags:ident: $T:ty { - $( - $(#[$inner:ident $($args:tt)*])* - const $Flag:tt = $value:expr; - )* - } - - $($t:tt)* - ) => { - $crate::__impl_public_bitflags_consts! { - $BitFlags: $T { - $( - $(#[$inner $($args)*])* - const $Flag = $value; - )* - } - } - - #[allow( - dead_code, - deprecated, - unused_doc_comments, - unused_attributes, - unused_mut, - unused_imports, - non_upper_case_globals, - clippy::assign_op_pattern, - clippy::iter_without_into_iter, - )] - const _: () = { - $crate::__impl_public_bitflags! { - $(#[$outer])* - $BitFlags: $T, $BitFlags { - $( - $(#[$inner $($args)*])* - const $Flag = $value; - )* - } - } - - $crate::__impl_public_bitflags_ops! { - $BitFlags - } - - $crate::__impl_public_bitflags_iter! { - $BitFlags: $T, $BitFlags - } - }; - - $crate::bitflags! { - $($t)* - } - }; - () => {}; -} - -/// Implement functions on bitflags types. -/// -/// We need to be careful about adding new methods and trait implementations here because they -/// could conflict with items added by the end-user. -#[macro_export] -#[doc(hidden)] -macro_rules! __impl_bitflags { - ( - // These param names must be passed in to make the macro work. - // Just use `params: self, bits, name, other, value;`. - params: $self:ident, $bits:ident, $name:ident, $other:ident, $value:ident; - $(#[$outer:meta])* - $PublicBitFlags:ident: $T:ty { - fn empty() $empty_body:block - fn all() $all_body:block - fn bits(&self) $bits_body:block - fn from_bits(bits) $from_bits_body:block - fn from_bits_truncate(bits) $from_bits_truncate_body:block - fn from_bits_retain(bits) $from_bits_retain_body:block - fn from_name(name) $from_name_body:block - fn is_empty(&self) $is_empty_body:block - fn is_all(&self) $is_all_body:block - fn intersects(&self, other) $intersects_body:block - fn contains(&self, other) $contains_body:block - fn insert(&mut self, other) $insert_body:block - fn remove(&mut self, other) $remove_body:block - fn toggle(&mut self, other) $toggle_body:block - fn set(&mut self, other, value) $set_body:block - fn intersection(self, other) $intersection_body:block - fn union(self, other) $union_body:block - fn difference(self, other) $difference_body:block - fn symmetric_difference(self, other) $symmetric_difference_body:block - fn complement(self) $complement_body:block - } - ) => { - #[allow(dead_code, deprecated, unused_attributes)] - $(#[$outer])* - impl $PublicBitFlags { - /// Get a flags value with all bits unset. - #[inline] - pub const fn empty() -> Self - $empty_body - - /// Get a flags value with all known bits set. - #[inline] - pub const fn all() -> Self - $all_body - - /// Get the underlying bits value. - /// - /// The returned value is exactly the bits set in this flags value. - #[inline] - pub const fn bits(&$self) -> $T - $bits_body - - /// Convert from a bits value. - /// - /// This method will return `None` if any unknown bits are set. - #[inline] - pub const fn from_bits($bits: $T) -> $crate::__private::core::option::Option - $from_bits_body - - /// Convert from a bits value, unsetting any unknown bits. - #[inline] - pub const fn from_bits_truncate($bits: $T) -> Self - $from_bits_truncate_body - - /// Convert from a bits value exactly. - #[inline] - pub const fn from_bits_retain($bits: $T) -> Self - $from_bits_retain_body - - /// Get a flags value with the bits of a flag with the given name set. - /// - /// This method will return `None` if `name` is empty or doesn't - /// correspond to any named flag. - #[inline] - pub fn from_name($name: &str) -> $crate::__private::core::option::Option - $from_name_body - - /// Whether all bits in this flags value are unset. - #[inline] - pub const fn is_empty(&$self) -> bool - $is_empty_body - - /// Whether all known bits in this flags value are set. - #[inline] - pub const fn is_all(&$self) -> bool - $is_all_body - - /// Whether any set bits in a source flags value are also set in a target flags value. - #[inline] - pub const fn intersects(&$self, $other: Self) -> bool - $intersects_body - - /// Whether all set bits in a source flags value are also set in a target flags value. - #[inline] - pub const fn contains(&$self, $other: Self) -> bool - $contains_body - - /// The bitwise or (`|`) of the bits in two flags values. - #[inline] - pub fn insert(&mut $self, $other: Self) - $insert_body - - /// The intersection of a source flags value with the complement of a target flags - /// value (`&!`). - /// - /// This method is not equivalent to `self & !other` when `other` has unknown bits set. - /// `remove` won't truncate `other`, but the `!` operator will. - #[inline] - pub fn remove(&mut $self, $other: Self) - $remove_body - - /// The bitwise exclusive-or (`^`) of the bits in two flags values. - #[inline] - pub fn toggle(&mut $self, $other: Self) - $toggle_body - - /// Call `insert` when `value` is `true` or `remove` when `value` is `false`. - #[inline] - pub fn set(&mut $self, $other: Self, $value: bool) - $set_body - - /// The bitwise and (`&`) of the bits in two flags values. - #[inline] - #[must_use] - pub const fn intersection($self, $other: Self) -> Self - $intersection_body - - /// The bitwise or (`|`) of the bits in two flags values. - #[inline] - #[must_use] - pub const fn union($self, $other: Self) -> Self - $union_body - - /// The intersection of a source flags value with the complement of a target flags - /// value (`&!`). - /// - /// This method is not equivalent to `self & !other` when `other` has unknown bits set. - /// `difference` won't truncate `other`, but the `!` operator will. - #[inline] - #[must_use] - pub const fn difference($self, $other: Self) -> Self - $difference_body - - /// The bitwise exclusive-or (`^`) of the bits in two flags values. - #[inline] - #[must_use] - pub const fn symmetric_difference($self, $other: Self) -> Self - $symmetric_difference_body - - /// The bitwise negation (`!`) of the bits in a flags value, truncating the result. - #[inline] - #[must_use] - pub const fn complement($self) -> Self - $complement_body - } - }; -} - -/// A macro that matches flags values, similar to Rust's `match` statement. -/// -/// In a regular `match` statement, the syntax `Flag::A | Flag::B` is interpreted as an or-pattern, -/// instead of the bitwise-or of `Flag::A` and `Flag::B`. This can be surprising when combined with flags types -/// because `Flag::A | Flag::B` won't match the pattern `Flag::A | Flag::B`. This macro is an alternative to -/// `match` for flags values that doesn't have this issue. -/// -/// # Syntax -/// -/// ```ignore -/// bitflags_match!(expression, { -/// pattern1 => result1, -/// pattern2 => result2, -/// .. -/// _ => default_result, -/// }) -/// ``` -/// -/// The final `_ => default_result` arm is required, otherwise the macro will fail to compile. -/// -/// # Examples -/// -/// ```rust -/// use bitflags::{bitflags, bitflags_match}; -/// -/// bitflags! { -/// #[derive(PartialEq)] -/// struct Flags: u8 { -/// const A = 1 << 0; -/// const B = 1 << 1; -/// const C = 1 << 2; -/// } -/// } -/// -/// let flags = Flags::A | Flags::B; -/// -/// // Prints `the value is A and B` -/// bitflags_match!(flags, { -/// Flags::A | Flags::B => println!("the value is A and B"), -/// _ => println!("the value is not A and B"), -/// }); -/// -/// // Prints `the value is not A` -/// bitflags_match!(flags, { -/// Flags::A => println!("the value is A"), -/// _ => println!("the value is not A"), -/// }); -/// ``` -/// -/// # How it works -/// -/// The macro expands to a series of `if` statements, **checking equality** between the input expression -/// and each pattern. This allows for correct matching of bitflag combinations, which is not possible -/// with a regular match expression due to the way bitflags are implemented. -/// -/// Patterns are evaluated in the order they appear in the macro. -#[macro_export] -macro_rules! bitflags_match { - ($operation:expr, { - $($t:tt)* - }) => { - // Expand to a closure so we can use `return` - // This makes it possible to apply attributes to the "match arms" - (|| { - $crate::__bitflags_match!($operation, { $($t)* }) - })() - }; -} - -/// Expand the `bitflags_match` macro -#[macro_export] -#[doc(hidden)] -macro_rules! __bitflags_match { - // Eat an optional `,` following a block match arm - ($operation:expr, { $pattern:expr => { $($body:tt)* } , $($t:tt)+ }) => { - $crate::__bitflags_match!($operation, { $pattern => { $($body)* } $($t)+ }) - }; - // Expand a block match arm `A => { .. }` - ($operation:expr, { $pattern:expr => { $($body:tt)* } $($t:tt)+ }) => { - { - if $operation == $pattern { - return { - $($body)* - }; - } - - $crate::__bitflags_match!($operation, { $($t)+ }) - } - }; - // Expand an expression match arm `A => x,` - ($operation:expr, { $pattern:expr => $body:expr , $($t:tt)+ }) => { - { - if $operation == $pattern { - return $body; - } - - $crate::__bitflags_match!($operation, { $($t)+ }) - } - }; - // Expand the default case - ($operation:expr, { _ => $default:expr $(,)? }) => { - $default - } -} - -/// A macro that processed the input to `bitflags!` and shuffles attributes around -/// based on whether or not they're "expression-safe". -/// -/// This macro is a token-tree muncher that works on 2 levels: -/// -/// For each attribute, we explicitly match on its identifier, like `cfg` to determine -/// whether or not it should be considered expression-safe. -/// -/// If you find yourself with an attribute that should be considered expression-safe -/// and isn't, it can be added here. -#[macro_export] -#[doc(hidden)] -macro_rules! __bitflags_expr_safe_attrs { - // Entrypoint: Move all flags and all attributes into `unprocessed` lists - // where they'll be munched one-at-a-time - ( - $(#[$inner:ident $($args:tt)*])* - { $e:expr } - ) => { - $crate::__bitflags_expr_safe_attrs! { - expr: { $e }, - attrs: { - // All attributes start here - unprocessed: [$(#[$inner $($args)*])*], - // Attributes that are safe on expressions go here - processed: [], - }, - } - }; - // Process the next attribute on the current flag - // `cfg`: The next flag should be propagated to expressions - // NOTE: You can copy this rules block and replace `cfg` with - // your attribute name that should be considered expression-safe - ( - expr: { $e:expr }, - attrs: { - unprocessed: [ - // cfg matched here - #[cfg $($args:tt)*] - $($attrs_rest:tt)* - ], - processed: [$($expr:tt)*], - }, - ) => { - $crate::__bitflags_expr_safe_attrs! { - expr: { $e }, - attrs: { - unprocessed: [ - $($attrs_rest)* - ], - processed: [ - $($expr)* - // cfg added here - #[cfg $($args)*] - ], - }, - } - }; - // Process the next attribute on the current flag - // `$other`: The next flag should not be propagated to expressions - ( - expr: { $e:expr }, - attrs: { - unprocessed: [ - // $other matched here - #[$other:ident $($args:tt)*] - $($attrs_rest:tt)* - ], - processed: [$($expr:tt)*], - }, - ) => { - $crate::__bitflags_expr_safe_attrs! { - expr: { $e }, - attrs: { - unprocessed: [ - $($attrs_rest)* - ], - processed: [ - // $other not added here - $($expr)* - ], - }, - } - }; - // Once all attributes on all flags are processed, generate the actual code - ( - expr: { $e:expr }, - attrs: { - unprocessed: [], - processed: [$(#[$expr:ident $($exprargs:tt)*])*], - }, - ) => { - $(#[$expr $($exprargs)*])* - { $e } - } -} - -/// Implement a flag, which may be a wildcard `_`. -#[macro_export] -#[doc(hidden)] -macro_rules! __bitflags_flag { - ( - { - name: _, - named: { $($named:tt)* }, - unnamed: { $($unnamed:tt)* }, - } - ) => { - $($unnamed)* - }; - ( - { - name: $Flag:ident, - named: { $($named:tt)* }, - unnamed: { $($unnamed:tt)* }, - } - ) => { - $($named)* - }; -} - -#[macro_use] -mod public; -#[macro_use] -mod internal; -#[macro_use] -mod external; - -#[cfg(feature = "example_generated")] -pub mod example_generated; - -#[cfg(test)] -mod tests; diff --git a/vendor/bitflags/src/parser.rs b/vendor/bitflags/src/parser.rs deleted file mode 100644 index 34b432da39b8fd..00000000000000 --- a/vendor/bitflags/src/parser.rs +++ /dev/null @@ -1,332 +0,0 @@ -/*! -Parsing flags from text. - -Format and parse a flags value as text using the following grammar: - -- _Flags:_ (_Whitespace_ _Flag_ _Whitespace_)`|`* -- _Flag:_ _Name_ | _Hex Number_ -- _Name:_ The name of any defined flag -- _Hex Number_: `0x`([0-9a-fA-F])* -- _Whitespace_: (\s)* - -As an example, this is how `Flags::A | Flags::B | 0x0c` can be represented as text: - -```text -A | B | 0x0c -``` - -Alternatively, it could be represented without whitespace: - -```text -A|B|0x0C -``` - -Note that identifiers are *case-sensitive*, so the following is *not equivalent*: - -```text -a|b|0x0C -``` -*/ - -#![allow(clippy::let_unit_value)] - -use core::fmt::{self, Write}; - -use crate::{Bits, Flags}; - -/** -Write a flags value as text. - -Any bits that aren't part of a contained flag will be formatted as a hex number. -*/ -pub fn to_writer(flags: &B, mut writer: impl Write) -> Result<(), fmt::Error> -where - B::Bits: WriteHex, -{ - // A formatter for bitflags that produces text output like: - // - // A | B | 0xf6 - // - // The names of set flags are written in a bar-separated-format, - // followed by a hex number of any remaining bits that are set - // but don't correspond to any flags. - - // Iterate over known flag values - let mut first = true; - let mut iter = flags.iter_names(); - for (name, _) in &mut iter { - if !first { - writer.write_str(" | ")?; - } - - first = false; - writer.write_str(name)?; - } - - // Append any extra bits that correspond to flags to the end of the format - let remaining = iter.remaining().bits(); - if remaining != B::Bits::EMPTY { - if !first { - writer.write_str(" | ")?; - } - - writer.write_str("0x")?; - remaining.write_hex(writer)?; - } - - fmt::Result::Ok(()) -} - -#[cfg(feature = "serde")] -pub(crate) struct AsDisplay<'a, B>(pub(crate) &'a B); - -#[cfg(feature = "serde")] -impl<'a, B: Flags> fmt::Display for AsDisplay<'a, B> -where - B::Bits: WriteHex, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - to_writer(self.0, f) - } -} - -/** -Parse a flags value from text. - -This function will fail on any names that don't correspond to defined flags. -Unknown bits will be retained. -*/ -pub fn from_str(input: &str) -> Result -where - B::Bits: ParseHex, -{ - let mut parsed_flags = B::empty(); - - // If the input is empty then return an empty set of flags - if input.trim().is_empty() { - return Ok(parsed_flags); - } - - for flag in input.split('|') { - let flag = flag.trim(); - - // If the flag is empty then we've got missing input - if flag.is_empty() { - return Err(ParseError::empty_flag()); - } - - // If the flag starts with `0x` then it's a hex number - // Parse it directly to the underlying bits type - let parsed_flag = if let Some(flag) = flag.strip_prefix("0x") { - let bits = - ::parse_hex(flag).map_err(|_| ParseError::invalid_hex_flag(flag))?; - - B::from_bits_retain(bits) - } - // Otherwise the flag is a name - // The generated flags type will determine whether - // or not it's a valid identifier - else { - B::from_name(flag).ok_or_else(|| ParseError::invalid_named_flag(flag))? - }; - - parsed_flags.insert(parsed_flag); - } - - Ok(parsed_flags) -} - -/** -Write a flags value as text, ignoring any unknown bits. -*/ -pub fn to_writer_truncate(flags: &B, writer: impl Write) -> Result<(), fmt::Error> -where - B::Bits: WriteHex, -{ - to_writer(&B::from_bits_truncate(flags.bits()), writer) -} - -/** -Parse a flags value from text. - -This function will fail on any names that don't correspond to defined flags. -Unknown bits will be ignored. -*/ -pub fn from_str_truncate(input: &str) -> Result -where - B::Bits: ParseHex, -{ - Ok(B::from_bits_truncate(from_str::(input)?.bits())) -} - -/** -Write only the contained, defined, named flags in a flags value as text. -*/ -pub fn to_writer_strict(flags: &B, mut writer: impl Write) -> Result<(), fmt::Error> { - // This is a simplified version of `to_writer` that ignores - // any bits not corresponding to a named flag - - let mut first = true; - let mut iter = flags.iter_names(); - for (name, _) in &mut iter { - if !first { - writer.write_str(" | ")?; - } - - first = false; - writer.write_str(name)?; - } - - fmt::Result::Ok(()) -} - -/** -Parse a flags value from text. - -This function will fail on any names that don't correspond to defined flags. -This function will fail to parse hex values. -*/ -pub fn from_str_strict(input: &str) -> Result { - // This is a simplified version of `from_str` that ignores - // any bits not corresponding to a named flag - - let mut parsed_flags = B::empty(); - - // If the input is empty then return an empty set of flags - if input.trim().is_empty() { - return Ok(parsed_flags); - } - - for flag in input.split('|') { - let flag = flag.trim(); - - // If the flag is empty then we've got missing input - if flag.is_empty() { - return Err(ParseError::empty_flag()); - } - - // If the flag starts with `0x` then it's a hex number - // These aren't supported in the strict parser - if flag.starts_with("0x") { - return Err(ParseError::invalid_hex_flag("unsupported hex flag value")); - } - - let parsed_flag = B::from_name(flag).ok_or_else(|| ParseError::invalid_named_flag(flag))?; - - parsed_flags.insert(parsed_flag); - } - - Ok(parsed_flags) -} - -/** -Encode a value as a hex string. - -Implementors of this trait should not write the `0x` prefix. -*/ -pub trait WriteHex { - /// Write the value as hex. - fn write_hex(&self, writer: W) -> fmt::Result; -} - -/** -Parse a value from a hex string. -*/ -pub trait ParseHex { - /// Parse the value from hex. - fn parse_hex(input: &str) -> Result - where - Self: Sized; -} - -/// An error encountered while parsing flags from text. -#[derive(Debug)] -pub struct ParseError(ParseErrorKind); - -#[derive(Debug)] -#[allow(clippy::enum_variant_names)] -enum ParseErrorKind { - EmptyFlag, - InvalidNamedFlag { - #[cfg(not(feature = "std"))] - got: (), - #[cfg(feature = "std")] - got: String, - }, - InvalidHexFlag { - #[cfg(not(feature = "std"))] - got: (), - #[cfg(feature = "std")] - got: String, - }, -} - -impl ParseError { - /// An invalid hex flag was encountered. - pub fn invalid_hex_flag(flag: impl fmt::Display) -> Self { - let _flag = flag; - - let got = { - #[cfg(feature = "std")] - { - _flag.to_string() - } - }; - - ParseError(ParseErrorKind::InvalidHexFlag { got }) - } - - /// A named flag that doesn't correspond to any on the flags type was encountered. - pub fn invalid_named_flag(flag: impl fmt::Display) -> Self { - let _flag = flag; - - let got = { - #[cfg(feature = "std")] - { - _flag.to_string() - } - }; - - ParseError(ParseErrorKind::InvalidNamedFlag { got }) - } - - /// A hex or named flag wasn't found between separators. - pub const fn empty_flag() -> Self { - ParseError(ParseErrorKind::EmptyFlag) - } -} - -impl fmt::Display for ParseError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match &self.0 { - ParseErrorKind::InvalidNamedFlag { got } => { - let _got = got; - - write!(f, "unrecognized named flag")?; - - #[cfg(feature = "std")] - { - write!(f, " `{}`", _got)?; - } - } - ParseErrorKind::InvalidHexFlag { got } => { - let _got = got; - - write!(f, "invalid hex flag")?; - - #[cfg(feature = "std")] - { - write!(f, " `{}`", _got)?; - } - } - ParseErrorKind::EmptyFlag => { - write!(f, "encountered empty flag")?; - } - } - - Ok(()) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for ParseError {} diff --git a/vendor/bitflags/src/public.rs b/vendor/bitflags/src/public.rs deleted file mode 100644 index 1326a572a53205..00000000000000 --- a/vendor/bitflags/src/public.rs +++ /dev/null @@ -1,580 +0,0 @@ -//! Generate the user-facing flags type. -//! -//! The code here belongs to the end-user, so new trait implementations and methods can't be -//! added without potentially breaking users. - -/// Declare the user-facing bitflags struct. -/// -/// This type is guaranteed to be a newtype with a `bitflags`-facing type as its single field. -#[macro_export] -#[doc(hidden)] -macro_rules! __declare_public_bitflags { - ( - $(#[$outer:meta])* - $vis:vis struct $PublicBitFlags:ident - ) => { - $(#[$outer])* - $vis struct $PublicBitFlags(<$PublicBitFlags as $crate::__private::PublicFlags>::Internal); - }; -} - -/// Implement functions on the public (user-facing) bitflags type. -/// -/// We need to be careful about adding new methods and trait implementations here because they -/// could conflict with items added by the end-user. -#[macro_export] -#[doc(hidden)] -macro_rules! __impl_public_bitflags_forward { - ( - $(#[$outer:meta])* - $PublicBitFlags:ident: $T:ty, $InternalBitFlags:ident - ) => { - $crate::__impl_bitflags! { - params: self, bits, name, other, value; - $(#[$outer])* - $PublicBitFlags: $T { - fn empty() { - Self($InternalBitFlags::empty()) - } - - fn all() { - Self($InternalBitFlags::all()) - } - - fn bits(&self) { - self.0.bits() - } - - fn from_bits(bits) { - match $InternalBitFlags::from_bits(bits) { - $crate::__private::core::option::Option::Some(bits) => $crate::__private::core::option::Option::Some(Self(bits)), - $crate::__private::core::option::Option::None => $crate::__private::core::option::Option::None, - } - } - - fn from_bits_truncate(bits) { - Self($InternalBitFlags::from_bits_truncate(bits)) - } - - fn from_bits_retain(bits) { - Self($InternalBitFlags::from_bits_retain(bits)) - } - - fn from_name(name) { - match $InternalBitFlags::from_name(name) { - $crate::__private::core::option::Option::Some(bits) => $crate::__private::core::option::Option::Some(Self(bits)), - $crate::__private::core::option::Option::None => $crate::__private::core::option::Option::None, - } - } - - fn is_empty(&self) { - self.0.is_empty() - } - - fn is_all(&self) { - self.0.is_all() - } - - fn intersects(&self, other) { - self.0.intersects(other.0) - } - - fn contains(&self, other) { - self.0.contains(other.0) - } - - fn insert(&mut self, other) { - self.0.insert(other.0) - } - - fn remove(&mut self, other) { - self.0.remove(other.0) - } - - fn toggle(&mut self, other) { - self.0.toggle(other.0) - } - - fn set(&mut self, other, value) { - self.0.set(other.0, value) - } - - fn intersection(self, other) { - Self(self.0.intersection(other.0)) - } - - fn union(self, other) { - Self(self.0.union(other.0)) - } - - fn difference(self, other) { - Self(self.0.difference(other.0)) - } - - fn symmetric_difference(self, other) { - Self(self.0.symmetric_difference(other.0)) - } - - fn complement(self) { - Self(self.0.complement()) - } - } - } - }; -} - -/// Implement functions on the public (user-facing) bitflags type. -/// -/// We need to be careful about adding new methods and trait implementations here because they -/// could conflict with items added by the end-user. -#[macro_export] -#[doc(hidden)] -macro_rules! __impl_public_bitflags { - ( - $(#[$outer:meta])* - $BitFlags:ident: $T:ty, $PublicBitFlags:ident { - $( - $(#[$inner:ident $($args:tt)*])* - const $Flag:tt = $value:expr; - )* - } - ) => { - $crate::__impl_bitflags! { - params: self, bits, name, other, value; - $(#[$outer])* - $BitFlags: $T { - fn empty() { - Self(<$T as $crate::Bits>::EMPTY) - } - - fn all() { - let mut truncated = <$T as $crate::Bits>::EMPTY; - let mut i = 0; - - $( - $crate::__bitflags_expr_safe_attrs!( - $(#[$inner $($args)*])* - {{ - let flag = <$PublicBitFlags as $crate::Flags>::FLAGS[i].value().bits(); - - truncated = truncated | flag; - i += 1; - }} - ); - )* - - let _ = i; - Self(truncated) - } - - fn bits(&self) { - self.0 - } - - fn from_bits(bits) { - let truncated = Self::from_bits_truncate(bits).0; - - if truncated == bits { - $crate::__private::core::option::Option::Some(Self(bits)) - } else { - $crate::__private::core::option::Option::None - } - } - - fn from_bits_truncate(bits) { - Self(bits & Self::all().0) - } - - fn from_bits_retain(bits) { - Self(bits) - } - - fn from_name(name) { - $( - $crate::__bitflags_flag!({ - name: $Flag, - named: { - $crate::__bitflags_expr_safe_attrs!( - $(#[$inner $($args)*])* - { - if name == $crate::__private::core::stringify!($Flag) { - return $crate::__private::core::option::Option::Some(Self($PublicBitFlags::$Flag.bits())); - } - } - ); - }, - unnamed: {}, - }); - )* - - let _ = name; - $crate::__private::core::option::Option::None - } - - fn is_empty(&self) { - self.0 == <$T as $crate::Bits>::EMPTY - } - - fn is_all(&self) { - // NOTE: We check against `Self::all` here, not `Self::Bits::ALL` - // because the set of all flags may not use all bits - Self::all().0 | self.0 == self.0 - } - - fn intersects(&self, other) { - self.0 & other.0 != <$T as $crate::Bits>::EMPTY - } - - fn contains(&self, other) { - self.0 & other.0 == other.0 - } - - fn insert(&mut self, other) { - *self = Self(self.0).union(other); - } - - fn remove(&mut self, other) { - *self = Self(self.0).difference(other); - } - - fn toggle(&mut self, other) { - *self = Self(self.0).symmetric_difference(other); - } - - fn set(&mut self, other, value) { - if value { - self.insert(other); - } else { - self.remove(other); - } - } - - fn intersection(self, other) { - Self(self.0 & other.0) - } - - fn union(self, other) { - Self(self.0 | other.0) - } - - fn difference(self, other) { - Self(self.0 & !other.0) - } - - fn symmetric_difference(self, other) { - Self(self.0 ^ other.0) - } - - fn complement(self) { - Self::from_bits_truncate(!self.0) - } - } - } - }; -} - -/// Implement iterators on the public (user-facing) bitflags type. -#[macro_export] -#[doc(hidden)] -macro_rules! __impl_public_bitflags_iter { - ( - $(#[$outer:meta])* - $BitFlags:ident: $T:ty, $PublicBitFlags:ident - ) => { - $(#[$outer])* - impl $BitFlags { - /// Yield a set of contained flags values. - /// - /// Each yielded flags value will correspond to a defined named flag. Any unknown bits - /// will be yielded together as a final flags value. - #[inline] - pub const fn iter(&self) -> $crate::iter::Iter<$PublicBitFlags> { - $crate::iter::Iter::__private_const_new( - <$PublicBitFlags as $crate::Flags>::FLAGS, - $PublicBitFlags::from_bits_retain(self.bits()), - $PublicBitFlags::from_bits_retain(self.bits()), - ) - } - - /// Yield a set of contained named flags values. - /// - /// This method is like [`iter`](#method.iter), except only yields bits in contained named flags. - /// Any unknown bits, or bits not corresponding to a contained flag will not be yielded. - #[inline] - pub const fn iter_names(&self) -> $crate::iter::IterNames<$PublicBitFlags> { - $crate::iter::IterNames::__private_const_new( - <$PublicBitFlags as $crate::Flags>::FLAGS, - $PublicBitFlags::from_bits_retain(self.bits()), - $PublicBitFlags::from_bits_retain(self.bits()), - ) - } - } - - $(#[$outer:meta])* - impl $crate::__private::core::iter::IntoIterator for $BitFlags { - type Item = $PublicBitFlags; - type IntoIter = $crate::iter::Iter<$PublicBitFlags>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } - } - }; -} - -/// Implement traits on the public (user-facing) bitflags type. -#[macro_export] -#[doc(hidden)] -macro_rules! __impl_public_bitflags_ops { - ( - $(#[$outer:meta])* - $PublicBitFlags:ident - ) => { - - $(#[$outer])* - impl $crate::__private::core::fmt::Binary for $PublicBitFlags { - fn fmt( - &self, - f: &mut $crate::__private::core::fmt::Formatter, - ) -> $crate::__private::core::fmt::Result { - let inner = self.0; - $crate::__private::core::fmt::Binary::fmt(&inner, f) - } - } - - $(#[$outer])* - impl $crate::__private::core::fmt::Octal for $PublicBitFlags { - fn fmt( - &self, - f: &mut $crate::__private::core::fmt::Formatter, - ) -> $crate::__private::core::fmt::Result { - let inner = self.0; - $crate::__private::core::fmt::Octal::fmt(&inner, f) - } - } - - $(#[$outer])* - impl $crate::__private::core::fmt::LowerHex for $PublicBitFlags { - fn fmt( - &self, - f: &mut $crate::__private::core::fmt::Formatter, - ) -> $crate::__private::core::fmt::Result { - let inner = self.0; - $crate::__private::core::fmt::LowerHex::fmt(&inner, f) - } - } - - $(#[$outer])* - impl $crate::__private::core::fmt::UpperHex for $PublicBitFlags { - fn fmt( - &self, - f: &mut $crate::__private::core::fmt::Formatter, - ) -> $crate::__private::core::fmt::Result { - let inner = self.0; - $crate::__private::core::fmt::UpperHex::fmt(&inner, f) - } - } - - $(#[$outer])* - impl $crate::__private::core::ops::BitOr for $PublicBitFlags { - type Output = Self; - - /// The bitwise or (`|`) of the bits in two flags values. - #[inline] - fn bitor(self, other: $PublicBitFlags) -> Self { - self.union(other) - } - } - - $(#[$outer])* - impl $crate::__private::core::ops::BitOrAssign for $PublicBitFlags { - /// The bitwise or (`|`) of the bits in two flags values. - #[inline] - fn bitor_assign(&mut self, other: Self) { - self.insert(other); - } - } - - $(#[$outer])* - impl $crate::__private::core::ops::BitXor for $PublicBitFlags { - type Output = Self; - - /// The bitwise exclusive-or (`^`) of the bits in two flags values. - #[inline] - fn bitxor(self, other: Self) -> Self { - self.symmetric_difference(other) - } - } - - $(#[$outer])* - impl $crate::__private::core::ops::BitXorAssign for $PublicBitFlags { - /// The bitwise exclusive-or (`^`) of the bits in two flags values. - #[inline] - fn bitxor_assign(&mut self, other: Self) { - self.toggle(other); - } - } - - $(#[$outer])* - impl $crate::__private::core::ops::BitAnd for $PublicBitFlags { - type Output = Self; - - /// The bitwise and (`&`) of the bits in two flags values. - #[inline] - fn bitand(self, other: Self) -> Self { - self.intersection(other) - } - } - - $(#[$outer])* - impl $crate::__private::core::ops::BitAndAssign for $PublicBitFlags { - /// The bitwise and (`&`) of the bits in two flags values. - #[inline] - fn bitand_assign(&mut self, other: Self) { - *self = Self::from_bits_retain(self.bits()).intersection(other); - } - } - - $(#[$outer])* - impl $crate::__private::core::ops::Sub for $PublicBitFlags { - type Output = Self; - - /// The intersection of a source flags value with the complement of a target flags value (`&!`). - /// - /// This method is not equivalent to `self & !other` when `other` has unknown bits set. - /// `difference` won't truncate `other`, but the `!` operator will. - #[inline] - fn sub(self, other: Self) -> Self { - self.difference(other) - } - } - - $(#[$outer])* - impl $crate::__private::core::ops::SubAssign for $PublicBitFlags { - /// The intersection of a source flags value with the complement of a target flags value (`&!`). - /// - /// This method is not equivalent to `self & !other` when `other` has unknown bits set. - /// `difference` won't truncate `other`, but the `!` operator will. - #[inline] - fn sub_assign(&mut self, other: Self) { - self.remove(other); - } - } - - $(#[$outer])* - impl $crate::__private::core::ops::Not for $PublicBitFlags { - type Output = Self; - - /// The bitwise negation (`!`) of the bits in a flags value, truncating the result. - #[inline] - fn not(self) -> Self { - self.complement() - } - } - - $(#[$outer])* - impl $crate::__private::core::iter::Extend<$PublicBitFlags> for $PublicBitFlags { - /// The bitwise or (`|`) of the bits in each flags value. - fn extend>( - &mut self, - iterator: T, - ) { - for item in iterator { - self.insert(item) - } - } - } - - $(#[$outer])* - impl $crate::__private::core::iter::FromIterator<$PublicBitFlags> for $PublicBitFlags { - /// The bitwise or (`|`) of the bits in each flags value. - fn from_iter>( - iterator: T, - ) -> Self { - use $crate::__private::core::iter::Extend; - - let mut result = Self::empty(); - result.extend(iterator); - result - } - } - }; -} - -/// Implement constants on the public (user-facing) bitflags type. -#[macro_export] -#[doc(hidden)] -macro_rules! __impl_public_bitflags_consts { - ( - $(#[$outer:meta])* - $PublicBitFlags:ident: $T:ty { - $( - $(#[$inner:ident $($args:tt)*])* - const $Flag:tt = $value:expr; - )* - } - ) => { - $(#[$outer])* - impl $PublicBitFlags { - $( - $crate::__bitflags_flag!({ - name: $Flag, - named: { - $(#[$inner $($args)*])* - #[allow( - deprecated, - non_upper_case_globals, - )] - pub const $Flag: Self = Self::from_bits_retain($value); - }, - unnamed: {}, - }); - )* - } - - $(#[$outer])* - impl $crate::Flags for $PublicBitFlags { - const FLAGS: &'static [$crate::Flag<$PublicBitFlags>] = &[ - $( - $crate::__bitflags_flag!({ - name: $Flag, - named: { - $crate::__bitflags_expr_safe_attrs!( - $(#[$inner $($args)*])* - { - #[allow( - deprecated, - non_upper_case_globals, - )] - $crate::Flag::new($crate::__private::core::stringify!($Flag), $PublicBitFlags::$Flag) - } - ) - }, - unnamed: { - $crate::__bitflags_expr_safe_attrs!( - $(#[$inner $($args)*])* - { - #[allow( - deprecated, - non_upper_case_globals, - )] - $crate::Flag::new("", $PublicBitFlags::from_bits_retain($value)) - } - ) - }, - }), - )* - ]; - - type Bits = $T; - - fn bits(&self) -> $T { - $PublicBitFlags::bits(self) - } - - fn from_bits_retain(bits: $T) -> $PublicBitFlags { - $PublicBitFlags::from_bits_retain(bits) - } - } - }; -} diff --git a/vendor/bitflags/src/tests.rs b/vendor/bitflags/src/tests.rs deleted file mode 100644 index 0770e1b3f93487..00000000000000 --- a/vendor/bitflags/src/tests.rs +++ /dev/null @@ -1,135 +0,0 @@ -mod all; -mod bitflags_match; -mod bits; -mod clear; -mod complement; -mod contains; -mod difference; -mod empty; -mod eq; -mod extend; -mod flags; -mod fmt; -mod from_bits; -mod from_bits_retain; -mod from_bits_truncate; -mod from_name; -mod insert; -mod intersection; -mod intersects; -mod is_all; -mod is_empty; -mod iter; -mod parser; -mod remove; -mod symmetric_difference; -mod truncate; -mod union; -mod unknown; - -bitflags! { - #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] - pub struct TestFlags: u8 { - /// 1 - const A = 1; - - /// 1 << 1 - const B = 1 << 1; - - /// 1 << 2 - const C = 1 << 2; - - /// 1 | (1 << 1) | (1 << 2) - const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); - } - - #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] - pub struct TestFlagsInvert: u8 { - /// 1 | (1 << 1) | (1 << 2) - const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); - - /// 1 - const A = 1; - - /// 1 << 1 - const B = 1 << 1; - - /// 1 << 2 - const C = 1 << 2; - } - - #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] - pub struct TestZero: u8 { - /// 0 - const ZERO = 0; - } - - #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] - pub struct TestZeroOne: u8 { - /// 0 - const ZERO = 0; - - /// 1 - const ONE = 1; - } - - #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] - pub struct TestUnicode: u8 { - /// 1 - const 一 = 1; - - /// 2 - const 二 = 1 << 1; - } - - #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] - pub struct TestEmpty: u8 {} - - #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] - pub struct TestOverlapping: u8 { - /// 1 | (1 << 1) - const AB = 1 | (1 << 1); - - /// (1 << 1) | (1 << 2) - const BC = (1 << 1) | (1 << 2); - } - - #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] - pub struct TestOverlappingFull: u8 { - /// 1 - const A = 1; - - /// 1 - const B = 1; - - /// 1 - const C = 1; - - /// 2 - const D = 1 << 1; - } - - #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] - pub struct TestExternal: u8 { - /// 1 - const A = 1; - - /// 1 << 1 - const B = 1 << 1; - - /// 1 << 2 - const C = 1 << 2; - - /// 1 | (1 << 1) | (1 << 2) - const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); - - /// External - const _ = !0; - } - - #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] - pub struct TestExternalFull: u8 { - /// External - const _ = !0; - } -} diff --git a/vendor/bitflags/src/tests/all.rs b/vendor/bitflags/src/tests/all.rs deleted file mode 100644 index cceb93a4691b07..00000000000000 --- a/vendor/bitflags/src/tests/all.rs +++ /dev/null @@ -1,23 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case(1 | 1 << 1 | 1 << 2, TestFlags::all); - - case(0, TestZero::all); - - case(0, TestEmpty::all); - - case(!0, TestExternal::all); -} - -#[track_caller] -fn case(expected: T::Bits, inherent: impl FnOnce() -> T) -where - ::Bits: std::fmt::Debug + PartialEq, -{ - assert_eq!(expected, inherent().bits(), "T::all()"); - assert_eq!(expected, T::all().bits(), "Flags::all()"); -} diff --git a/vendor/bitflags/src/tests/bitflags_match.rs b/vendor/bitflags/src/tests/bitflags_match.rs deleted file mode 100644 index 93190f8bb4ef64..00000000000000 --- a/vendor/bitflags/src/tests/bitflags_match.rs +++ /dev/null @@ -1,93 +0,0 @@ -bitflags! { - #[derive(PartialEq)] - struct Flags: u8 { - const A = 1 << 0; - const B = 1 << 1; - const C = 1 << 2; - const D = 1 << 3; - } -} - -fn flag_to_string(flag: Flags) -> String { - bitflags_match!(flag, { - Flags::A => "A".to_string(), - Flags::B => { "B".to_string() } - Flags::C => "C".to_string(), - Flags::D => "D".to_string(), - Flags::A | Flags::B => "A or B".to_string(), - Flags::A & Flags::B => { "A and B | empty".to_string() }, - Flags::A ^ Flags::B => "A xor B".to_string(), - Flags::A | Flags::B | Flags::C => "A or B or C".to_string(), - Flags::A & Flags::B & Flags::C => "A and B and C".to_string(), - Flags::A ^ Flags::B ^ Flags::C => "A xor B xor C".to_string(), - Flags::A | Flags::B | Flags::C | Flags::D => "All flags".to_string(), - _ => "Unknown combination".to_string() - }) -} - -#[test] -fn test_single_flags() { - assert_eq!(flag_to_string(Flags::A), "A"); - assert_eq!(flag_to_string(Flags::B), "B"); - assert_eq!(flag_to_string(Flags::C), "C"); - assert_eq!(flag_to_string(Flags::D), "D"); -} - -#[test] -fn test_or_operations() { - assert_eq!(flag_to_string(Flags::A | Flags::B), "A or B"); - assert_eq!( - flag_to_string(Flags::A | Flags::B | Flags::C), - "A or B or C" - ); - assert_eq!( - flag_to_string(Flags::A | Flags::B | Flags::C | Flags::D), - "All flags" - ); -} - -#[test] -fn test_and_operations() { - assert_eq!(flag_to_string(Flags::A & Flags::A), "A"); - assert_eq!(flag_to_string(Flags::A & Flags::B), "A and B | empty"); - assert_eq!( - flag_to_string(Flags::A & Flags::B & Flags::C), - "A and B | empty" - ); // Since A, B, and C are mutually exclusive, the result of A & B & C is 0 ==> A & B & C = 0000 (i.e., empty). - // However, in the bitflags_match! statement (actually is if {..} else if {..} .. else {..}), - // the "A & B = 0000" condition is listed first, so 0000 will match "A & B" first, - // resulting in the output of the "A and B | empty" branch. - assert_eq!( - flag_to_string(Flags::A & Flags::B & Flags::C & Flags::D), - "A and B | empty" - ); -} - -#[test] -fn test_xor_operations() { - assert_eq!(flag_to_string(Flags::A ^ Flags::B), "A or B"); // A | B = A ^ B == 0011 - assert_eq!(flag_to_string(Flags::A ^ Flags::A), "A and B | empty"); - assert_eq!( - flag_to_string(Flags::A ^ Flags::B ^ Flags::C), - "A or B or C" - ); -} - -#[test] -fn test_complex_operations() { - assert_eq!(flag_to_string(Flags::A | (Flags::B & Flags::C)), "A"); - assert_eq!( - flag_to_string((Flags::A | Flags::B) & (Flags::B | Flags::C)), - "B" - ); - assert_eq!( - flag_to_string(Flags::A ^ (Flags::B | Flags::C)), - "A or B or C" - ); -} - -#[test] -fn test_empty_and_full_flags() { - assert_eq!(flag_to_string(Flags::empty()), "A and B | empty"); - assert_eq!(flag_to_string(Flags::all()), "All flags"); -} diff --git a/vendor/bitflags/src/tests/bits.rs b/vendor/bitflags/src/tests/bits.rs deleted file mode 100644 index 678f153e36b15a..00000000000000 --- a/vendor/bitflags/src/tests/bits.rs +++ /dev/null @@ -1,36 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case(0, TestFlags::empty(), TestFlags::bits); - - case(1, TestFlags::A, TestFlags::bits); - case(1 | 1 << 1 | 1 << 2, TestFlags::ABC, TestFlags::bits); - - case(!0, TestFlags::from_bits_retain(u8::MAX), TestFlags::bits); - case(1 << 3, TestFlags::from_bits_retain(1 << 3), TestFlags::bits); - - case(1 << 3, TestZero::from_bits_retain(1 << 3), TestZero::bits); - - case(1 << 3, TestEmpty::from_bits_retain(1 << 3), TestEmpty::bits); - - case( - 1 << 4 | 1 << 6, - TestExternal::from_bits_retain(1 << 4 | 1 << 6), - TestExternal::bits, - ); -} - -#[track_caller] -fn case( - expected: T::Bits, - value: T, - inherent: impl FnOnce(&T) -> T::Bits, -) where - T::Bits: std::fmt::Debug + PartialEq, -{ - assert_eq!(expected, inherent(&value), "{:?}.bits()", value); - assert_eq!(expected, Flags::bits(&value), "Flags::bits({:?})", value); -} diff --git a/vendor/bitflags/src/tests/clear.rs b/vendor/bitflags/src/tests/clear.rs deleted file mode 100644 index 2d42cce4e3f98b..00000000000000 --- a/vendor/bitflags/src/tests/clear.rs +++ /dev/null @@ -1,27 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case(TestFlags::from_bits_retain(0)); - - case(TestFlags::from_bits_retain(1 << 3)); - - case(TestFlags::ABC | TestFlags::from_bits_retain(1 << 3)); - - case(TestZero::empty()); - - case(TestZero::all()); - - case(TestFlags::from_bits_retain(1 << 3) | TestFlags::all()); -} - -#[track_caller] -fn case(mut flags: T) -where - T: std::fmt::Debug + PartialEq + Copy, -{ - flags.clear(); - assert_eq!(flags, T::empty(), "{:?}.clear()", flags); -} diff --git a/vendor/bitflags/src/tests/complement.rs b/vendor/bitflags/src/tests/complement.rs deleted file mode 100644 index ac7a421af0beb6..00000000000000 --- a/vendor/bitflags/src/tests/complement.rs +++ /dev/null @@ -1,53 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case(0, TestFlags::all(), TestFlags::complement); - case(0, TestFlags::from_bits_retain(!0), TestFlags::complement); - - case(1 | 1 << 1, TestFlags::C, TestFlags::complement); - case( - 1 | 1 << 1, - TestFlags::C | TestFlags::from_bits_retain(1 << 3), - TestFlags::complement, - ); - - case( - 1 | 1 << 1 | 1 << 2, - TestFlags::empty(), - TestFlags::complement, - ); - case( - 1 | 1 << 1 | 1 << 2, - TestFlags::from_bits_retain(1 << 3), - TestFlags::complement, - ); - - case(0, TestZero::empty(), TestZero::complement); - - case(0, TestEmpty::empty(), TestEmpty::complement); - - case(1 << 2, TestOverlapping::AB, TestOverlapping::complement); - - case(!0, TestExternal::empty(), TestExternal::complement); -} - -#[track_caller] -fn case + Copy>( - expected: T::Bits, - value: T, - inherent: impl FnOnce(T) -> T, -) where - T::Bits: std::fmt::Debug + PartialEq, -{ - assert_eq!(expected, inherent(value).bits(), "{:?}.complement()", value); - assert_eq!( - expected, - Flags::complement(value).bits(), - "Flags::complement({:?})", - value - ); - assert_eq!(expected, (!value).bits(), "!{:?}", value); -} diff --git a/vendor/bitflags/src/tests/contains.rs b/vendor/bitflags/src/tests/contains.rs deleted file mode 100644 index 12428ddcb09c76..00000000000000 --- a/vendor/bitflags/src/tests/contains.rs +++ /dev/null @@ -1,108 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case( - TestFlags::empty(), - &[ - (TestFlags::empty(), true), - (TestFlags::A, false), - (TestFlags::B, false), - (TestFlags::C, false), - (TestFlags::from_bits_retain(1 << 3), false), - ], - TestFlags::contains, - ); - - case( - TestFlags::A, - &[ - (TestFlags::empty(), true), - (TestFlags::A, true), - (TestFlags::B, false), - (TestFlags::C, false), - (TestFlags::ABC, false), - (TestFlags::from_bits_retain(1 << 3), false), - (TestFlags::from_bits_retain(1 | (1 << 3)), false), - ], - TestFlags::contains, - ); - - case( - TestFlags::ABC, - &[ - (TestFlags::empty(), true), - (TestFlags::A, true), - (TestFlags::B, true), - (TestFlags::C, true), - (TestFlags::ABC, true), - (TestFlags::from_bits_retain(1 << 3), false), - ], - TestFlags::contains, - ); - - case( - TestFlags::from_bits_retain(1 << 3), - &[ - (TestFlags::empty(), true), - (TestFlags::A, false), - (TestFlags::B, false), - (TestFlags::C, false), - (TestFlags::from_bits_retain(1 << 3), true), - ], - TestFlags::contains, - ); - - case( - TestZero::ZERO, - &[(TestZero::ZERO, true)], - TestZero::contains, - ); - - case( - TestOverlapping::AB, - &[ - (TestOverlapping::AB, true), - (TestOverlapping::BC, false), - (TestOverlapping::from_bits_retain(1 << 1), true), - ], - TestOverlapping::contains, - ); - - case( - TestExternal::all(), - &[ - (TestExternal::A, true), - (TestExternal::B, true), - (TestExternal::C, true), - (TestExternal::from_bits_retain(1 << 5 | 1 << 7), true), - ], - TestExternal::contains, - ); -} - -#[track_caller] -fn case( - value: T, - inputs: &[(T, bool)], - mut inherent: impl FnMut(&T, T) -> bool, -) { - for (input, expected) in inputs { - assert_eq!( - *expected, - inherent(&value, *input), - "{:?}.contains({:?})", - value, - input - ); - assert_eq!( - *expected, - Flags::contains(&value, *input), - "Flags::contains({:?}, {:?})", - value, - input - ); - } -} diff --git a/vendor/bitflags/src/tests/difference.rs b/vendor/bitflags/src/tests/difference.rs deleted file mode 100644 index 6ce9c0bf1981a3..00000000000000 --- a/vendor/bitflags/src/tests/difference.rs +++ /dev/null @@ -1,92 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case( - TestFlags::A | TestFlags::B, - &[ - (TestFlags::A, 1 << 1), - (TestFlags::B, 1), - (TestFlags::from_bits_retain(1 << 3), 1 | 1 << 1), - ], - TestFlags::difference, - ); - - case( - TestFlags::from_bits_retain(1 | 1 << 3), - &[ - (TestFlags::A, 1 << 3), - (TestFlags::from_bits_retain(1 << 3), 1), - ], - TestFlags::difference, - ); - - case( - TestExternal::from_bits_retain(!0), - &[(TestExternal::A, 0b1111_1110)], - TestExternal::difference, - ); - - assert_eq!( - 0b1111_1110, - (TestExternal::from_bits_retain(!0) & !TestExternal::A).bits() - ); - - assert_eq!( - 0b1111_1110, - (TestFlags::from_bits_retain(!0).difference(TestFlags::A)).bits() - ); - - // The `!` operator unsets bits that don't correspond to known flags - assert_eq!( - 1 << 1 | 1 << 2, - (TestFlags::from_bits_retain(!0) & !TestFlags::A).bits() - ); -} - -#[track_caller] -fn case + std::ops::SubAssign + Copy>( - value: T, - inputs: &[(T, T::Bits)], - mut inherent: impl FnMut(T, T) -> T, -) where - T::Bits: std::fmt::Debug + PartialEq + Copy, -{ - for (input, expected) in inputs { - assert_eq!( - *expected, - inherent(value, *input).bits(), - "{:?}.difference({:?})", - value, - input - ); - assert_eq!( - *expected, - Flags::difference(value, *input).bits(), - "Flags::difference({:?}, {:?})", - value, - input - ); - assert_eq!( - *expected, - (value - *input).bits(), - "{:?} - {:?}", - value, - input - ); - assert_eq!( - *expected, - { - let mut value = value; - value -= *input; - value - } - .bits(), - "{:?} -= {:?}", - value, - input, - ); - } -} diff --git a/vendor/bitflags/src/tests/empty.rs b/vendor/bitflags/src/tests/empty.rs deleted file mode 100644 index 57fb1c7cf18789..00000000000000 --- a/vendor/bitflags/src/tests/empty.rs +++ /dev/null @@ -1,23 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case(0, TestFlags::empty); - - case(0, TestZero::empty); - - case(0, TestEmpty::empty); - - case(0, TestExternal::empty); -} - -#[track_caller] -fn case(expected: T::Bits, inherent: impl FnOnce() -> T) -where - ::Bits: std::fmt::Debug + PartialEq, -{ - assert_eq!(expected, inherent().bits(), "T::empty()"); - assert_eq!(expected, T::empty().bits(), "Flags::empty()"); -} diff --git a/vendor/bitflags/src/tests/eq.rs b/vendor/bitflags/src/tests/eq.rs deleted file mode 100644 index 9779af7629a988..00000000000000 --- a/vendor/bitflags/src/tests/eq.rs +++ /dev/null @@ -1,10 +0,0 @@ -use super::*; - -#[test] -fn cases() { - assert_eq!(TestFlags::empty(), TestFlags::empty()); - assert_eq!(TestFlags::all(), TestFlags::all()); - - assert!(TestFlags::from_bits_retain(1) < TestFlags::from_bits_retain(2)); - assert!(TestFlags::from_bits_retain(2) > TestFlags::from_bits_retain(1)); -} diff --git a/vendor/bitflags/src/tests/extend.rs b/vendor/bitflags/src/tests/extend.rs deleted file mode 100644 index 869dc17fc81b61..00000000000000 --- a/vendor/bitflags/src/tests/extend.rs +++ /dev/null @@ -1,42 +0,0 @@ -use super::*; - -#[test] -fn cases() { - let mut flags = TestFlags::empty(); - - flags.extend(TestFlags::A); - - assert_eq!(TestFlags::A, flags); - - flags.extend(TestFlags::A | TestFlags::B | TestFlags::C); - - assert_eq!(TestFlags::ABC, flags); - - flags.extend(TestFlags::from_bits_retain(1 << 5)); - - assert_eq!(TestFlags::ABC | TestFlags::from_bits_retain(1 << 5), flags); -} - -mod external { - use super::*; - - #[test] - fn cases() { - let mut flags = TestExternal::empty(); - - flags.extend(TestExternal::A); - - assert_eq!(TestExternal::A, flags); - - flags.extend(TestExternal::A | TestExternal::B | TestExternal::C); - - assert_eq!(TestExternal::ABC, flags); - - flags.extend(TestExternal::from_bits_retain(1 << 5)); - - assert_eq!( - TestExternal::ABC | TestExternal::from_bits_retain(1 << 5), - flags - ); - } -} diff --git a/vendor/bitflags/src/tests/flags.rs b/vendor/bitflags/src/tests/flags.rs deleted file mode 100644 index 7a625b312c1ef0..00000000000000 --- a/vendor/bitflags/src/tests/flags.rs +++ /dev/null @@ -1,46 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - let flags = TestFlags::FLAGS - .iter() - .map(|flag| (flag.name(), flag.value().bits())) - .collect::>(); - - assert_eq!( - vec![ - ("A", 1u8), - ("B", 1 << 1), - ("C", 1 << 2), - ("ABC", 1 | 1 << 1 | 1 << 2), - ], - flags, - ); - - assert_eq!(0, TestEmpty::FLAGS.iter().count()); -} - -mod external { - use super::*; - - #[test] - fn cases() { - let flags = TestExternal::FLAGS - .iter() - .map(|flag| (flag.name(), flag.value().bits())) - .collect::>(); - - assert_eq!( - vec![ - ("A", 1u8), - ("B", 1 << 1), - ("C", 1 << 2), - ("ABC", 1 | 1 << 1 | 1 << 2), - ("", !0), - ], - flags, - ); - } -} diff --git a/vendor/bitflags/src/tests/fmt.rs b/vendor/bitflags/src/tests/fmt.rs deleted file mode 100644 index ed4571877dc44f..00000000000000 --- a/vendor/bitflags/src/tests/fmt.rs +++ /dev/null @@ -1,97 +0,0 @@ -use super::*; - -#[test] -fn cases() { - case(TestFlags::empty(), "TestFlags(0x0)", "0", "0", "0", "0"); - case(TestFlags::A, "TestFlags(A)", "1", "1", "1", "1"); - case( - TestFlags::all(), - "TestFlags(A | B | C)", - "7", - "7", - "7", - "111", - ); - case( - TestFlags::from_bits_retain(1 << 3), - "TestFlags(0x8)", - "8", - "8", - "10", - "1000", - ); - case( - TestFlags::A | TestFlags::from_bits_retain(1 << 3), - "TestFlags(A | 0x8)", - "9", - "9", - "11", - "1001", - ); - - case(TestZero::ZERO, "TestZero(0x0)", "0", "0", "0", "0"); - case( - TestZero::ZERO | TestZero::from_bits_retain(1), - "TestZero(0x1)", - "1", - "1", - "1", - "1", - ); - - case(TestZeroOne::ONE, "TestZeroOne(ONE)", "1", "1", "1", "1"); - - case( - TestOverlapping::from_bits_retain(1 << 1), - "TestOverlapping(0x2)", - "2", - "2", - "2", - "10", - ); - - case( - TestExternal::from_bits_retain(1 | 1 << 1 | 1 << 3), - "TestExternal(A | B | 0x8)", - "B", - "b", - "13", - "1011", - ); - - case( - TestExternal::all(), - "TestExternal(A | B | C | 0xf8)", - "FF", - "ff", - "377", - "11111111", - ); - - case( - TestExternalFull::all(), - "TestExternalFull(0xff)", - "FF", - "ff", - "377", - "11111111", - ); -} - -#[track_caller] -fn case< - T: std::fmt::Debug + std::fmt::UpperHex + std::fmt::LowerHex + std::fmt::Octal + std::fmt::Binary, ->( - value: T, - debug: &str, - uhex: &str, - lhex: &str, - oct: &str, - bin: &str, -) { - assert_eq!(debug, format!("{:?}", value)); - assert_eq!(uhex, format!("{:X}", value)); - assert_eq!(lhex, format!("{:x}", value)); - assert_eq!(oct, format!("{:o}", value)); - assert_eq!(bin, format!("{:b}", value)); -} diff --git a/vendor/bitflags/src/tests/from_bits.rs b/vendor/bitflags/src/tests/from_bits.rs deleted file mode 100644 index dada9aff82326c..00000000000000 --- a/vendor/bitflags/src/tests/from_bits.rs +++ /dev/null @@ -1,45 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case(Some(0), 0, TestFlags::from_bits); - case(Some(1), 1, TestFlags::from_bits); - case( - Some(1 | 1 << 1 | 1 << 2), - 1 | 1 << 1 | 1 << 2, - TestFlags::from_bits, - ); - - case(None, 1 << 3, TestFlags::from_bits); - case(None, 1 | 1 << 3, TestFlags::from_bits); - - case(Some(1 | 1 << 1), 1 | 1 << 1, TestOverlapping::from_bits); - - case(Some(1 << 1), 1 << 1, TestOverlapping::from_bits); - - case(Some(1 << 5), 1 << 5, TestExternal::from_bits); -} - -#[track_caller] -fn case( - expected: Option, - input: T::Bits, - inherent: impl FnOnce(T::Bits) -> Option, -) where - ::Bits: std::fmt::Debug + PartialEq, -{ - assert_eq!( - expected, - inherent(input).map(|f| f.bits()), - "T::from_bits({:?})", - input - ); - assert_eq!( - expected, - T::from_bits(input).map(|f| f.bits()), - "Flags::from_bits({:?})", - input - ); -} diff --git a/vendor/bitflags/src/tests/from_bits_retain.rs b/vendor/bitflags/src/tests/from_bits_retain.rs deleted file mode 100644 index 1ae28a663fd62d..00000000000000 --- a/vendor/bitflags/src/tests/from_bits_retain.rs +++ /dev/null @@ -1,38 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case(0, TestFlags::from_bits_retain); - case(1, TestFlags::from_bits_retain); - case(1 | 1 << 1 | 1 << 2, TestFlags::from_bits_retain); - - case(1 << 3, TestFlags::from_bits_retain); - case(1 | 1 << 3, TestFlags::from_bits_retain); - - case(1 | 1 << 1, TestOverlapping::from_bits_retain); - - case(1 << 1, TestOverlapping::from_bits_retain); - - case(1 << 5, TestExternal::from_bits_retain); -} - -#[track_caller] -fn case(input: T::Bits, inherent: impl FnOnce(T::Bits) -> T) -where - ::Bits: std::fmt::Debug + PartialEq, -{ - assert_eq!( - input, - inherent(input).bits(), - "T::from_bits_retain({:?})", - input - ); - assert_eq!( - input, - T::from_bits_retain(input).bits(), - "Flags::from_bits_retain({:?})", - input - ); -} diff --git a/vendor/bitflags/src/tests/from_bits_truncate.rs b/vendor/bitflags/src/tests/from_bits_truncate.rs deleted file mode 100644 index e4f3e537c4a3f1..00000000000000 --- a/vendor/bitflags/src/tests/from_bits_truncate.rs +++ /dev/null @@ -1,42 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case(0, 0, TestFlags::from_bits_truncate); - case(1, 1, TestFlags::from_bits_truncate); - case( - 1 | 1 << 1 | 1 << 2, - 1 | 1 << 1 | 1 << 2, - TestFlags::from_bits_truncate, - ); - - case(0, 1 << 3, TestFlags::from_bits_truncate); - case(1, 1 | 1 << 3, TestFlags::from_bits_truncate); - - case(1 | 1 << 1, 1 | 1 << 1, TestOverlapping::from_bits_truncate); - - case(1 << 1, 1 << 1, TestOverlapping::from_bits_truncate); - - case(1 << 5, 1 << 5, TestExternal::from_bits_truncate); -} - -#[track_caller] -fn case(expected: T::Bits, input: T::Bits, inherent: impl FnOnce(T::Bits) -> T) -where - ::Bits: std::fmt::Debug + PartialEq, -{ - assert_eq!( - expected, - inherent(input).bits(), - "T::from_bits_truncate({:?})", - input - ); - assert_eq!( - expected, - T::from_bits_truncate(input).bits(), - "Flags::from_bits_truncate({:?})", - input - ); -} diff --git a/vendor/bitflags/src/tests/from_name.rs b/vendor/bitflags/src/tests/from_name.rs deleted file mode 100644 index 1d9a4e48b650b3..00000000000000 --- a/vendor/bitflags/src/tests/from_name.rs +++ /dev/null @@ -1,42 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case(Some(1), "A", TestFlags::from_name); - case(Some(1 << 1), "B", TestFlags::from_name); - case(Some(1 | 1 << 1 | 1 << 2), "ABC", TestFlags::from_name); - - case(None, "", TestFlags::from_name); - case(None, "a", TestFlags::from_name); - case(None, "0x1", TestFlags::from_name); - case(None, "A | B", TestFlags::from_name); - - case(Some(0), "ZERO", TestZero::from_name); - - case(Some(2), "二", TestUnicode::from_name); - - case(None, "_", TestExternal::from_name); - - case(None, "", TestExternal::from_name); -} - -#[track_caller] -fn case(expected: Option, input: &str, inherent: impl FnOnce(&str) -> Option) -where - ::Bits: std::fmt::Debug + PartialEq, -{ - assert_eq!( - expected, - inherent(input).map(|f| f.bits()), - "T::from_name({:?})", - input - ); - assert_eq!( - expected, - T::from_name(input).map(|f| f.bits()), - "Flags::from_name({:?})", - input - ); -} diff --git a/vendor/bitflags/src/tests/insert.rs b/vendor/bitflags/src/tests/insert.rs deleted file mode 100644 index b18cd17235288c..00000000000000 --- a/vendor/bitflags/src/tests/insert.rs +++ /dev/null @@ -1,91 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case( - TestFlags::empty(), - &[ - (TestFlags::A, 1), - (TestFlags::A | TestFlags::B, 1 | 1 << 1), - (TestFlags::empty(), 0), - (TestFlags::from_bits_retain(1 << 3), 1 << 3), - ], - TestFlags::insert, - TestFlags::set, - ); - - case( - TestFlags::A, - &[ - (TestFlags::A, 1), - (TestFlags::empty(), 1), - (TestFlags::B, 1 | 1 << 1), - ], - TestFlags::insert, - TestFlags::set, - ); -} - -#[track_caller] -fn case( - value: T, - inputs: &[(T, T::Bits)], - mut inherent_insert: impl FnMut(&mut T, T), - mut inherent_set: impl FnMut(&mut T, T, bool), -) where - T::Bits: std::fmt::Debug + PartialEq + Copy, -{ - for (input, expected) in inputs { - assert_eq!( - *expected, - { - let mut value = value; - inherent_insert(&mut value, *input); - value - } - .bits(), - "{:?}.insert({:?})", - value, - input - ); - assert_eq!( - *expected, - { - let mut value = value; - Flags::insert(&mut value, *input); - value - } - .bits(), - "Flags::insert({:?}, {:?})", - value, - input - ); - - assert_eq!( - *expected, - { - let mut value = value; - inherent_set(&mut value, *input, true); - value - } - .bits(), - "{:?}.set({:?}, true)", - value, - input - ); - assert_eq!( - *expected, - { - let mut value = value; - Flags::set(&mut value, *input, true); - value - } - .bits(), - "Flags::set({:?}, {:?}, true)", - value, - input - ); - } -} diff --git a/vendor/bitflags/src/tests/intersection.rs b/vendor/bitflags/src/tests/intersection.rs deleted file mode 100644 index 10a8ae9fb6b6a4..00000000000000 --- a/vendor/bitflags/src/tests/intersection.rs +++ /dev/null @@ -1,79 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case( - TestFlags::empty(), - &[(TestFlags::empty(), 0), (TestFlags::all(), 0)], - TestFlags::intersection, - ); - - case( - TestFlags::all(), - &[ - (TestFlags::all(), 1 | 1 << 1 | 1 << 2), - (TestFlags::A, 1), - (TestFlags::from_bits_retain(1 << 3), 0), - ], - TestFlags::intersection, - ); - - case( - TestFlags::from_bits_retain(1 << 3), - &[(TestFlags::from_bits_retain(1 << 3), 1 << 3)], - TestFlags::intersection, - ); - - case( - TestOverlapping::AB, - &[(TestOverlapping::BC, 1 << 1)], - TestOverlapping::intersection, - ); -} - -#[track_caller] -fn case + std::ops::BitAndAssign + Copy>( - value: T, - inputs: &[(T, T::Bits)], - mut inherent: impl FnMut(T, T) -> T, -) where - T::Bits: std::fmt::Debug + PartialEq + Copy, -{ - for (input, expected) in inputs { - assert_eq!( - *expected, - inherent(value, *input).bits(), - "{:?}.intersection({:?})", - value, - input - ); - assert_eq!( - *expected, - Flags::intersection(value, *input).bits(), - "Flags::intersection({:?}, {:?})", - value, - input - ); - assert_eq!( - *expected, - (value & *input).bits(), - "{:?} & {:?}", - value, - input - ); - assert_eq!( - *expected, - { - let mut value = value; - value &= *input; - value - } - .bits(), - "{:?} &= {:?}", - value, - input, - ); - } -} diff --git a/vendor/bitflags/src/tests/intersects.rs b/vendor/bitflags/src/tests/intersects.rs deleted file mode 100644 index fe907981a2ad66..00000000000000 --- a/vendor/bitflags/src/tests/intersects.rs +++ /dev/null @@ -1,91 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case( - TestFlags::empty(), - &[ - (TestFlags::empty(), false), - (TestFlags::A, false), - (TestFlags::B, false), - (TestFlags::C, false), - (TestFlags::from_bits_retain(1 << 3), false), - ], - TestFlags::intersects, - ); - - case( - TestFlags::A, - &[ - (TestFlags::empty(), false), - (TestFlags::A, true), - (TestFlags::B, false), - (TestFlags::C, false), - (TestFlags::ABC, true), - (TestFlags::from_bits_retain(1 << 3), false), - (TestFlags::from_bits_retain(1 | (1 << 3)), true), - ], - TestFlags::intersects, - ); - - case( - TestFlags::ABC, - &[ - (TestFlags::empty(), false), - (TestFlags::A, true), - (TestFlags::B, true), - (TestFlags::C, true), - (TestFlags::ABC, true), - (TestFlags::from_bits_retain(1 << 3), false), - ], - TestFlags::intersects, - ); - - case( - TestFlags::from_bits_retain(1 << 3), - &[ - (TestFlags::empty(), false), - (TestFlags::A, false), - (TestFlags::B, false), - (TestFlags::C, false), - (TestFlags::from_bits_retain(1 << 3), true), - ], - TestFlags::intersects, - ); - - case( - TestOverlapping::AB, - &[ - (TestOverlapping::AB, true), - (TestOverlapping::BC, true), - (TestOverlapping::from_bits_retain(1 << 1), true), - ], - TestOverlapping::intersects, - ); -} - -#[track_caller] -fn case( - value: T, - inputs: &[(T, bool)], - mut inherent: impl FnMut(&T, T) -> bool, -) { - for (input, expected) in inputs { - assert_eq!( - *expected, - inherent(&value, *input), - "{:?}.intersects({:?})", - value, - input - ); - assert_eq!( - *expected, - Flags::intersects(&value, *input), - "Flags::intersects({:?}, {:?})", - value, - input - ); - } -} diff --git a/vendor/bitflags/src/tests/is_all.rs b/vendor/bitflags/src/tests/is_all.rs deleted file mode 100644 index 382a458f610b0f..00000000000000 --- a/vendor/bitflags/src/tests/is_all.rs +++ /dev/null @@ -1,32 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case(false, TestFlags::empty(), TestFlags::is_all); - case(false, TestFlags::A, TestFlags::is_all); - - case(true, TestFlags::ABC, TestFlags::is_all); - - case( - true, - TestFlags::ABC | TestFlags::from_bits_retain(1 << 3), - TestFlags::is_all, - ); - - case(true, TestZero::empty(), TestZero::is_all); - - case(true, TestEmpty::empty(), TestEmpty::is_all); -} - -#[track_caller] -fn case(expected: bool, value: T, inherent: impl FnOnce(&T) -> bool) { - assert_eq!(expected, inherent(&value), "{:?}.is_all()", value); - assert_eq!( - expected, - Flags::is_all(&value), - "Flags::is_all({:?})", - value - ); -} diff --git a/vendor/bitflags/src/tests/is_empty.rs b/vendor/bitflags/src/tests/is_empty.rs deleted file mode 100644 index 92165f18e36bf0..00000000000000 --- a/vendor/bitflags/src/tests/is_empty.rs +++ /dev/null @@ -1,31 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case(true, TestFlags::empty(), TestFlags::is_empty); - - case(false, TestFlags::A, TestFlags::is_empty); - case(false, TestFlags::ABC, TestFlags::is_empty); - case( - false, - TestFlags::from_bits_retain(1 << 3), - TestFlags::is_empty, - ); - - case(true, TestZero::empty(), TestZero::is_empty); - - case(true, TestEmpty::empty(), TestEmpty::is_empty); -} - -#[track_caller] -fn case(expected: bool, value: T, inherent: impl FnOnce(&T) -> bool) { - assert_eq!(expected, inherent(&value), "{:?}.is_empty()", value); - assert_eq!( - expected, - Flags::is_empty(&value), - "Flags::is_empty({:?})", - value - ); -} diff --git a/vendor/bitflags/src/tests/iter.rs b/vendor/bitflags/src/tests/iter.rs deleted file mode 100644 index d4b2ea068915bc..00000000000000 --- a/vendor/bitflags/src/tests/iter.rs +++ /dev/null @@ -1,299 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -#[cfg(not(miri))] // Very slow in miri -fn roundtrip() { - for a in 0u8..=255 { - for b in 0u8..=255 { - let f = TestFlags::from_bits_retain(a | b); - - assert_eq!(f, f.iter().collect::()); - assert_eq!( - TestFlags::from_bits_truncate(f.bits()), - f.iter_names().map(|(_, f)| f).collect::() - ); - - let f = TestExternal::from_bits_retain(a | b); - - assert_eq!(f, f.iter().collect::()); - } - } -} - -mod collect { - use super::*; - - #[test] - fn cases() { - assert_eq!(0, [].into_iter().collect::().bits()); - - assert_eq!(1, [TestFlags::A,].into_iter().collect::().bits()); - - assert_eq!( - 1 | 1 << 1 | 1 << 2, - [TestFlags::A, TestFlags::B | TestFlags::C,] - .into_iter() - .collect::() - .bits() - ); - - assert_eq!( - 1 | 1 << 3, - [ - TestFlags::from_bits_retain(1 << 3), - TestFlags::empty(), - TestFlags::A, - ] - .into_iter() - .collect::() - .bits() - ); - - assert_eq!( - 1 << 5 | 1 << 7, - [ - TestExternal::empty(), - TestExternal::from_bits_retain(1 << 5), - TestExternal::from_bits_retain(1 << 7), - ] - .into_iter() - .collect::() - .bits() - ); - } -} - -mod iter { - use super::*; - - #[test] - fn cases() { - case(&[], TestFlags::empty(), TestFlags::iter); - - case(&[1], TestFlags::A, TestFlags::iter); - case(&[1, 1 << 1], TestFlags::A | TestFlags::B, TestFlags::iter); - case( - &[1, 1 << 1, 1 << 3], - TestFlags::A | TestFlags::B | TestFlags::from_bits_retain(1 << 3), - TestFlags::iter, - ); - - case(&[1, 1 << 1, 1 << 2], TestFlags::ABC, TestFlags::iter); - case( - &[1, 1 << 1, 1 << 2, 1 << 3], - TestFlags::ABC | TestFlags::from_bits_retain(1 << 3), - TestFlags::iter, - ); - - case( - &[1 | 1 << 1 | 1 << 2], - TestFlagsInvert::ABC, - TestFlagsInvert::iter, - ); - - case(&[], TestZero::ZERO, TestZero::iter); - - case( - &[1, 1 << 1, 1 << 2, 0b1111_1000], - TestExternal::all(), - TestExternal::iter, - ); - } - - #[track_caller] - fn case + Copy>( - expected: &[T::Bits], - value: T, - inherent: impl FnOnce(&T) -> crate::iter::Iter, - ) where - T::Bits: std::fmt::Debug + PartialEq, - { - assert_eq!( - expected, - inherent(&value).map(|f| f.bits()).collect::>(), - "{:?}.iter()", - value - ); - assert_eq!( - expected, - Flags::iter(&value).map(|f| f.bits()).collect::>(), - "Flags::iter({:?})", - value - ); - assert_eq!( - expected, - value.into_iter().map(|f| f.bits()).collect::>(), - "{:?}.into_iter()", - value - ); - } -} - -mod iter_names { - use super::*; - - #[test] - fn cases() { - case(&[], TestFlags::empty(), TestFlags::iter_names); - - case(&[("A", 1)], TestFlags::A, TestFlags::iter_names); - case( - &[("A", 1), ("B", 1 << 1)], - TestFlags::A | TestFlags::B, - TestFlags::iter_names, - ); - case( - &[("A", 1), ("B", 1 << 1)], - TestFlags::A | TestFlags::B | TestFlags::from_bits_retain(1 << 3), - TestFlags::iter_names, - ); - - case( - &[("A", 1), ("B", 1 << 1), ("C", 1 << 2)], - TestFlags::ABC, - TestFlags::iter_names, - ); - case( - &[("A", 1), ("B", 1 << 1), ("C", 1 << 2)], - TestFlags::ABC | TestFlags::from_bits_retain(1 << 3), - TestFlags::iter_names, - ); - - case( - &[("ABC", 1 | 1 << 1 | 1 << 2)], - TestFlagsInvert::ABC, - TestFlagsInvert::iter_names, - ); - - case(&[], TestZero::ZERO, TestZero::iter_names); - - case( - &[("A", 1)], - TestOverlappingFull::A, - TestOverlappingFull::iter_names, - ); - case( - &[("A", 1), ("D", 1 << 1)], - TestOverlappingFull::A | TestOverlappingFull::D, - TestOverlappingFull::iter_names, - ); - } - - #[track_caller] - fn case( - expected: &[(&'static str, T::Bits)], - value: T, - inherent: impl FnOnce(&T) -> crate::iter::IterNames, - ) where - T::Bits: std::fmt::Debug + PartialEq, - { - assert_eq!( - expected, - inherent(&value) - .map(|(n, f)| (n, f.bits())) - .collect::>(), - "{:?}.iter_names()", - value - ); - assert_eq!( - expected, - Flags::iter_names(&value) - .map(|(n, f)| (n, f.bits())) - .collect::>(), - "Flags::iter_names({:?})", - value - ); - } -} - -mod iter_defined_names { - use crate::Flags; - - #[test] - fn test_defined_names() { - bitflags! { - #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] - struct TestFlags: u32 { - const A = 0b00000001; - const ZERO = 0; - const B = 0b00000010; - const C = 0b00000100; - const CC = Self::C.bits(); - const D = 0b10000100; - const ABC = Self::A.bits() | Self::B.bits() | Self::C.bits(); - const AB = Self::A.bits() | Self::B.bits(); - const AC = Self::A.bits() | Self::C.bits(); - const CB = Self::B.bits() | Self::C.bits(); - } - } - - // Test all named flags produced by the iterator - let all_named: Vec<(&'static str, TestFlags)> = TestFlags::iter_defined_names().collect(); - - // Verify all named flags are included - let expected_flags = vec![ - ("A", TestFlags::A), - ("ZERO", TestFlags::ZERO), - ("B", TestFlags::B), - ("C", TestFlags::C), - // Note: CC and C have the same bit value, but both are named flags - ("CC", TestFlags::CC), - ("D", TestFlags::D), - ("ABC", TestFlags::ABC), - ("AB", TestFlags::AB), - ("AC", TestFlags::AC), - ("CB", TestFlags::CB), - ]; - - assert_eq!( - all_named.len(), - expected_flags.len(), - "Should have 10 named flags" - ); - - // Verify each expected flag is in the result - for expected_flag in &expected_flags { - assert!( - all_named.contains(expected_flag), - "Missing flag: {:?}", - expected_flag - ); - } - - // Test if iterator order is consistent with definition order - let flags_in_order: Vec<(&'static str, TestFlags)> = - TestFlags::iter_defined_names().collect(); - assert_eq!( - flags_in_order, expected_flags, - "Flag order should match definition order" - ); - - // Test that iterator can be used multiple times - let first_iteration: Vec<(&'static str, TestFlags)> = - TestFlags::iter_defined_names().collect(); - let second_iteration: Vec<(&'static str, TestFlags)> = - TestFlags::iter_defined_names().collect(); - assert_eq!( - first_iteration, second_iteration, - "Multiple iterations should produce the same result" - ); - - // Test consistency with FLAGS constant - let flags_from_iter: std::collections::HashSet = TestFlags::iter_defined_names() - .map(|(_, f)| f.bits()) - .collect(); - - let flags_from_const: std::collections::HashSet = TestFlags::FLAGS - .iter() - .filter(|f| f.is_named()) - .map(|f| f.value().bits()) - .collect(); - - assert_eq!( - flags_from_iter, flags_from_const, - "iter_defined_names() should be consistent with named flags in FLAGS" - ); - } -} diff --git a/vendor/bitflags/src/tests/parser.rs b/vendor/bitflags/src/tests/parser.rs deleted file mode 100644 index fb27225ecef604..00000000000000 --- a/vendor/bitflags/src/tests/parser.rs +++ /dev/null @@ -1,332 +0,0 @@ -use super::*; - -use crate::{parser::*, Flags}; - -#[test] -#[cfg(not(miri))] // Very slow in miri -fn roundtrip() { - let mut s = String::new(); - - for a in 0u8..=255 { - for b in 0u8..=255 { - let f = TestFlags::from_bits_retain(a | b); - - s.clear(); - to_writer(&f, &mut s).unwrap(); - - assert_eq!(f, from_str::(&s).unwrap()); - } - } -} - -#[test] -#[cfg(not(miri))] // Very slow in miri -fn roundtrip_truncate() { - let mut s = String::new(); - - for a in 0u8..=255 { - for b in 0u8..=255 { - let f = TestFlags::from_bits_retain(a | b); - - s.clear(); - to_writer_truncate(&f, &mut s).unwrap(); - - assert_eq!( - TestFlags::from_bits_truncate(f.bits()), - from_str_truncate::(&s).unwrap() - ); - } - } -} - -#[test] -#[cfg(not(miri))] // Very slow in miri -fn roundtrip_strict() { - let mut s = String::new(); - - for a in 0u8..=255 { - for b in 0u8..=255 { - let f = TestFlags::from_bits_retain(a | b); - - s.clear(); - to_writer_strict(&f, &mut s).unwrap(); - - let mut strict = TestFlags::empty(); - for (_, flag) in f.iter_names() { - strict |= flag; - } - let f = strict; - - if let Ok(s) = from_str_strict::(&s) { - assert_eq!(f, s); - } - } - } -} - -mod from_str { - use super::*; - - #[test] - fn valid() { - assert_eq!(0, from_str::("").unwrap().bits()); - - assert_eq!(1, from_str::("A").unwrap().bits()); - assert_eq!(1, from_str::(" A ").unwrap().bits()); - assert_eq!( - 1 | 1 << 1 | 1 << 2, - from_str::("A | B | C").unwrap().bits() - ); - assert_eq!( - 1 | 1 << 1 | 1 << 2, - from_str::("A\n|\tB\r\n| C ").unwrap().bits() - ); - assert_eq!( - 1 | 1 << 1 | 1 << 2, - from_str::("A|B|C").unwrap().bits() - ); - - assert_eq!(1 << 3, from_str::("0x8").unwrap().bits()); - assert_eq!(1 | 1 << 3, from_str::("A | 0x8").unwrap().bits()); - assert_eq!( - 1 | 1 << 1 | 1 << 3, - from_str::("0x1 | 0x8 | B").unwrap().bits() - ); - - assert_eq!( - 1 | 1 << 1, - from_str::("一 | 二").unwrap().bits() - ); - } - - #[test] - fn invalid() { - assert!(from_str::("a") - .unwrap_err() - .to_string() - .starts_with("unrecognized named flag")); - assert!(from_str::("A & B") - .unwrap_err() - .to_string() - .starts_with("unrecognized named flag")); - - assert!(from_str::("0xg") - .unwrap_err() - .to_string() - .starts_with("invalid hex flag")); - assert!(from_str::("0xffffffffffff") - .unwrap_err() - .to_string() - .starts_with("invalid hex flag")); - } -} - -mod to_writer { - use super::*; - - #[test] - fn cases() { - assert_eq!("", write(TestFlags::empty())); - assert_eq!("A", write(TestFlags::A)); - assert_eq!("A | B | C", write(TestFlags::all())); - assert_eq!("0x8", write(TestFlags::from_bits_retain(1 << 3))); - assert_eq!( - "A | 0x8", - write(TestFlags::A | TestFlags::from_bits_retain(1 << 3)) - ); - - assert_eq!("", write(TestZero::ZERO)); - - assert_eq!("ABC", write(TestFlagsInvert::all())); - - assert_eq!("0x1", write(TestOverlapping::from_bits_retain(1))); - - assert_eq!("A", write(TestOverlappingFull::C)); - assert_eq!( - "A | D", - write(TestOverlappingFull::C | TestOverlappingFull::D) - ); - } - - fn write(value: F) -> String - where - F::Bits: crate::parser::WriteHex, - { - let mut s = String::new(); - - to_writer(&value, &mut s).unwrap(); - s - } -} - -mod from_str_truncate { - use super::*; - - #[test] - fn valid() { - assert_eq!(0, from_str_truncate::("").unwrap().bits()); - - assert_eq!(1, from_str_truncate::("A").unwrap().bits()); - assert_eq!(1, from_str_truncate::(" A ").unwrap().bits()); - assert_eq!( - 1 | 1 << 1 | 1 << 2, - from_str_truncate::("A | B | C").unwrap().bits() - ); - assert_eq!( - 1 | 1 << 1 | 1 << 2, - from_str_truncate::("A\n|\tB\r\n| C ") - .unwrap() - .bits() - ); - assert_eq!( - 1 | 1 << 1 | 1 << 2, - from_str_truncate::("A|B|C").unwrap().bits() - ); - - assert_eq!(0, from_str_truncate::("0x8").unwrap().bits()); - assert_eq!(1, from_str_truncate::("A | 0x8").unwrap().bits()); - assert_eq!( - 1 | 1 << 1, - from_str_truncate::("0x1 | 0x8 | B") - .unwrap() - .bits() - ); - - assert_eq!( - 1 | 1 << 1, - from_str_truncate::("一 | 二").unwrap().bits() - ); - } -} - -mod to_writer_truncate { - use super::*; - - #[test] - fn cases() { - assert_eq!("", write(TestFlags::empty())); - assert_eq!("A", write(TestFlags::A)); - assert_eq!("A | B | C", write(TestFlags::all())); - assert_eq!("", write(TestFlags::from_bits_retain(1 << 3))); - assert_eq!( - "A", - write(TestFlags::A | TestFlags::from_bits_retain(1 << 3)) - ); - - assert_eq!("", write(TestZero::ZERO)); - - assert_eq!("ABC", write(TestFlagsInvert::all())); - - assert_eq!("0x1", write(TestOverlapping::from_bits_retain(1))); - - assert_eq!("A", write(TestOverlappingFull::C)); - assert_eq!( - "A | D", - write(TestOverlappingFull::C | TestOverlappingFull::D) - ); - } - - fn write(value: F) -> String - where - F::Bits: crate::parser::WriteHex, - { - let mut s = String::new(); - - to_writer_truncate(&value, &mut s).unwrap(); - s - } -} - -mod from_str_strict { - use super::*; - - #[test] - fn valid() { - assert_eq!(0, from_str_strict::("").unwrap().bits()); - - assert_eq!(1, from_str_strict::("A").unwrap().bits()); - assert_eq!(1, from_str_strict::(" A ").unwrap().bits()); - assert_eq!( - 1 | 1 << 1 | 1 << 2, - from_str_strict::("A | B | C").unwrap().bits() - ); - assert_eq!( - 1 | 1 << 1 | 1 << 2, - from_str_strict::("A\n|\tB\r\n| C ") - .unwrap() - .bits() - ); - assert_eq!( - 1 | 1 << 1 | 1 << 2, - from_str_strict::("A|B|C").unwrap().bits() - ); - - assert_eq!( - 1 | 1 << 1, - from_str_strict::("一 | 二").unwrap().bits() - ); - } - - #[test] - fn invalid() { - assert!(from_str_strict::("a") - .unwrap_err() - .to_string() - .starts_with("unrecognized named flag")); - assert!(from_str_strict::("A & B") - .unwrap_err() - .to_string() - .starts_with("unrecognized named flag")); - - assert!(from_str_strict::("0x1") - .unwrap_err() - .to_string() - .starts_with("invalid hex flag")); - assert!(from_str_strict::("0xg") - .unwrap_err() - .to_string() - .starts_with("invalid hex flag")); - assert!(from_str_strict::("0xffffffffffff") - .unwrap_err() - .to_string() - .starts_with("invalid hex flag")); - } -} - -mod to_writer_strict { - use super::*; - - #[test] - fn cases() { - assert_eq!("", write(TestFlags::empty())); - assert_eq!("A", write(TestFlags::A)); - assert_eq!("A | B | C", write(TestFlags::all())); - assert_eq!("", write(TestFlags::from_bits_retain(1 << 3))); - assert_eq!( - "A", - write(TestFlags::A | TestFlags::from_bits_retain(1 << 3)) - ); - - assert_eq!("", write(TestZero::ZERO)); - - assert_eq!("ABC", write(TestFlagsInvert::all())); - - assert_eq!("", write(TestOverlapping::from_bits_retain(1))); - - assert_eq!("A", write(TestOverlappingFull::C)); - assert_eq!( - "A | D", - write(TestOverlappingFull::C | TestOverlappingFull::D) - ); - } - - fn write(value: F) -> String - where - F::Bits: crate::parser::WriteHex, - { - let mut s = String::new(); - - to_writer_strict(&value, &mut s).unwrap(); - s - } -} diff --git a/vendor/bitflags/src/tests/remove.rs b/vendor/bitflags/src/tests/remove.rs deleted file mode 100644 index 574b1edbf228f2..00000000000000 --- a/vendor/bitflags/src/tests/remove.rs +++ /dev/null @@ -1,100 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case( - TestFlags::empty(), - &[ - (TestFlags::A, 0), - (TestFlags::empty(), 0), - (TestFlags::from_bits_retain(1 << 3), 0), - ], - TestFlags::remove, - TestFlags::set, - ); - - case( - TestFlags::A, - &[ - (TestFlags::A, 0), - (TestFlags::empty(), 1), - (TestFlags::B, 1), - ], - TestFlags::remove, - TestFlags::set, - ); - - case( - TestFlags::ABC, - &[ - (TestFlags::A, 1 << 1 | 1 << 2), - (TestFlags::A | TestFlags::C, 1 << 1), - ], - TestFlags::remove, - TestFlags::set, - ); -} - -#[track_caller] -fn case( - value: T, - inputs: &[(T, T::Bits)], - mut inherent_remove: impl FnMut(&mut T, T), - mut inherent_set: impl FnMut(&mut T, T, bool), -) where - T::Bits: std::fmt::Debug + PartialEq + Copy, -{ - for (input, expected) in inputs { - assert_eq!( - *expected, - { - let mut value = value; - inherent_remove(&mut value, *input); - value - } - .bits(), - "{:?}.remove({:?})", - value, - input - ); - assert_eq!( - *expected, - { - let mut value = value; - Flags::remove(&mut value, *input); - value - } - .bits(), - "Flags::remove({:?}, {:?})", - value, - input - ); - - assert_eq!( - *expected, - { - let mut value = value; - inherent_set(&mut value, *input, false); - value - } - .bits(), - "{:?}.set({:?}, false)", - value, - input - ); - assert_eq!( - *expected, - { - let mut value = value; - Flags::set(&mut value, *input, false); - value - } - .bits(), - "Flags::set({:?}, {:?}, false)", - value, - input - ); - } -} diff --git a/vendor/bitflags/src/tests/symmetric_difference.rs b/vendor/bitflags/src/tests/symmetric_difference.rs deleted file mode 100644 index 75e9123ac5da42..00000000000000 --- a/vendor/bitflags/src/tests/symmetric_difference.rs +++ /dev/null @@ -1,110 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case( - TestFlags::empty(), - &[ - (TestFlags::empty(), 0), - (TestFlags::all(), 1 | 1 << 1 | 1 << 2), - (TestFlags::from_bits_retain(1 << 3), 1 << 3), - ], - TestFlags::symmetric_difference, - TestFlags::toggle, - ); - - case( - TestFlags::A, - &[ - (TestFlags::empty(), 1), - (TestFlags::A, 0), - (TestFlags::all(), 1 << 1 | 1 << 2), - ], - TestFlags::symmetric_difference, - TestFlags::toggle, - ); - - case( - TestFlags::A | TestFlags::B | TestFlags::from_bits_retain(1 << 3), - &[ - (TestFlags::ABC, 1 << 2 | 1 << 3), - (TestFlags::from_bits_retain(1 << 3), 1 | 1 << 1), - ], - TestFlags::symmetric_difference, - TestFlags::toggle, - ); -} - -#[track_caller] -fn case + std::ops::BitXorAssign + Copy>( - value: T, - inputs: &[(T, T::Bits)], - mut inherent_sym_diff: impl FnMut(T, T) -> T, - mut inherent_toggle: impl FnMut(&mut T, T), -) where - T::Bits: std::fmt::Debug + PartialEq + Copy, -{ - for (input, expected) in inputs { - assert_eq!( - *expected, - inherent_sym_diff(value, *input).bits(), - "{:?}.symmetric_difference({:?})", - value, - input - ); - assert_eq!( - *expected, - Flags::symmetric_difference(value, *input).bits(), - "Flags::symmetric_difference({:?}, {:?})", - value, - input - ); - assert_eq!( - *expected, - (value ^ *input).bits(), - "{:?} ^ {:?}", - value, - input - ); - assert_eq!( - *expected, - { - let mut value = value; - value ^= *input; - value - } - .bits(), - "{:?} ^= {:?}", - value, - input, - ); - - assert_eq!( - *expected, - { - let mut value = value; - inherent_toggle(&mut value, *input); - value - } - .bits(), - "{:?}.toggle({:?})", - value, - input, - ); - - assert_eq!( - *expected, - { - let mut value = value; - Flags::toggle(&mut value, *input); - value - } - .bits(), - "{:?}.toggle({:?})", - value, - input, - ); - } -} diff --git a/vendor/bitflags/src/tests/truncate.rs b/vendor/bitflags/src/tests/truncate.rs deleted file mode 100644 index e38df48dc59870..00000000000000 --- a/vendor/bitflags/src/tests/truncate.rs +++ /dev/null @@ -1,29 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case( - TestFlags::ABC | TestFlags::from_bits_retain(1 << 3), - TestFlags::ABC, - ); - - case(TestZero::empty(), TestZero::empty()); - - case(TestZero::all(), TestZero::all()); - - case( - TestFlags::from_bits_retain(1 << 3) | TestFlags::all(), - TestFlags::all(), - ); -} - -#[track_caller] -fn case(mut before: T, after: T) -where - T: std::fmt::Debug + PartialEq + Copy, -{ - before.truncate(); - assert_eq!(before, after, "{:?}.truncate()", before); -} diff --git a/vendor/bitflags/src/tests/union.rs b/vendor/bitflags/src/tests/union.rs deleted file mode 100644 index 6190681931cac1..00000000000000 --- a/vendor/bitflags/src/tests/union.rs +++ /dev/null @@ -1,71 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case( - TestFlags::empty(), - &[ - (TestFlags::A, 1), - (TestFlags::all(), 1 | 1 << 1 | 1 << 2), - (TestFlags::empty(), 0), - (TestFlags::from_bits_retain(1 << 3), 1 << 3), - ], - TestFlags::union, - ); - - case( - TestFlags::A | TestFlags::C, - &[ - (TestFlags::A | TestFlags::B, 1 | 1 << 1 | 1 << 2), - (TestFlags::A, 1 | 1 << 2), - ], - TestFlags::union, - ); -} - -#[track_caller] -fn case + std::ops::BitOrAssign + Copy>( - value: T, - inputs: &[(T, T::Bits)], - mut inherent: impl FnMut(T, T) -> T, -) where - T::Bits: std::fmt::Debug + PartialEq + Copy, -{ - for (input, expected) in inputs { - assert_eq!( - *expected, - inherent(value, *input).bits(), - "{:?}.union({:?})", - value, - input - ); - assert_eq!( - *expected, - Flags::union(value, *input).bits(), - "Flags::union({:?}, {:?})", - value, - input - ); - assert_eq!( - *expected, - (value | *input).bits(), - "{:?} | {:?}", - value, - input - ); - assert_eq!( - *expected, - { - let mut value = value; - value |= *input; - value - } - .bits(), - "{:?} |= {:?}", - value, - input, - ); - } -} diff --git a/vendor/bitflags/src/tests/unknown.rs b/vendor/bitflags/src/tests/unknown.rs deleted file mode 100644 index 020f7e927e242d..00000000000000 --- a/vendor/bitflags/src/tests/unknown.rs +++ /dev/null @@ -1,40 +0,0 @@ -use super::*; - -use crate::Flags; - -#[test] -fn cases() { - case(false, TestFlags::empty(), TestFlags::contains_unknown_bits); - case(false, TestFlags::A, TestFlags::contains_unknown_bits); - - case( - true, - TestFlags::ABC | TestFlags::from_bits_retain(1 << 3), - TestFlags::contains_unknown_bits, - ); - - case( - true, - TestFlags::empty() | TestFlags::from_bits_retain(1 << 3), - TestFlags::contains_unknown_bits, - ); - - case(false, TestFlags::all(), TestFlags::contains_unknown_bits); - - case(false, TestZero::empty(), TestZero::contains_unknown_bits); -} -#[track_caller] -fn case(expected: bool, value: T, inherent: impl FnOnce(&T) -> bool) { - assert_eq!( - expected, - inherent(&value), - "{:?}.contains_unknown_bits()", - value - ); - assert_eq!( - expected, - Flags::contains_unknown_bits(&value), - "Flags::contains_unknown_bits({:?})", - value - ); -} diff --git a/vendor/bitflags/src/traits.rs b/vendor/bitflags/src/traits.rs deleted file mode 100644 index efb438739fb602..00000000000000 --- a/vendor/bitflags/src/traits.rs +++ /dev/null @@ -1,457 +0,0 @@ -use core::{ - fmt, - ops::{BitAnd, BitOr, BitXor, Not}, -}; - -use crate::{ - iter, - parser::{ParseError, ParseHex, WriteHex}, -}; - -/** -A defined flags value that may be named or unnamed. -*/ -#[derive(Debug)] -pub struct Flag { - name: &'static str, - value: B, -} - -impl Flag { - /** - Define a flag. - - If `name` is non-empty then the flag is named, otherwise it's unnamed. - */ - pub const fn new(name: &'static str, value: B) -> Self { - Flag { name, value } - } - - /** - Get the name of this flag. - - If the flag is unnamed then the returned string will be empty. - */ - pub const fn name(&self) -> &'static str { - self.name - } - - /** - Get the flags value of this flag. - */ - pub const fn value(&self) -> &B { - &self.value - } - - /** - Whether the flag is named. - - If [`Flag::name`] returns a non-empty string then this method will return `true`. - */ - pub const fn is_named(&self) -> bool { - !self.name.is_empty() - } - - /** - Whether the flag is unnamed. - - If [`Flag::name`] returns a non-empty string then this method will return `false`. - */ - pub const fn is_unnamed(&self) -> bool { - self.name.is_empty() - } -} - -/** -A set of defined flags using a bits type as storage. - -## Implementing `Flags` - -This trait is implemented by the [`bitflags`](macro.bitflags.html) macro: - -``` -use bitflags::bitflags; - -bitflags! { - struct MyFlags: u8 { - const A = 1; - const B = 1 << 1; - } -} -``` - -It can also be implemented manually: - -``` -use bitflags::{Flag, Flags}; - -struct MyFlags(u8); - -impl Flags for MyFlags { - const FLAGS: &'static [Flag] = &[ - Flag::new("A", MyFlags(1)), - Flag::new("B", MyFlags(1 << 1)), - ]; - - type Bits = u8; - - fn from_bits_retain(bits: Self::Bits) -> Self { - MyFlags(bits) - } - - fn bits(&self) -> Self::Bits { - self.0 - } -} -``` - -## Using `Flags` - -The `Flags` trait can be used generically to work with any flags types. In this example, -we can count the number of defined named flags: - -``` -# use bitflags::{bitflags, Flags}; -fn defined_flags() -> usize { - F::FLAGS.iter().filter(|f| f.is_named()).count() -} - -bitflags! { - struct MyFlags: u8 { - const A = 1; - const B = 1 << 1; - const C = 1 << 2; - - const _ = !0; - } -} - -assert_eq!(3, defined_flags::()); -``` -*/ -pub trait Flags: Sized + 'static { - /// The set of defined flags. - const FLAGS: &'static [Flag]; - - /// The underlying bits type. - type Bits: Bits; - - /// Get a flags value with all bits unset. - fn empty() -> Self { - Self::from_bits_retain(Self::Bits::EMPTY) - } - - /// Get a flags value with all known bits set. - fn all() -> Self { - let mut truncated = Self::Bits::EMPTY; - - for flag in Self::FLAGS.iter() { - truncated = truncated | flag.value().bits(); - } - - Self::from_bits_retain(truncated) - } - - /// This method will return `true` if any unknown bits are set. - fn contains_unknown_bits(&self) -> bool { - Self::all().bits() & self.bits() != self.bits() - } - - /// Get the underlying bits value. - /// - /// The returned value is exactly the bits set in this flags value. - fn bits(&self) -> Self::Bits; - - /// Convert from a bits value. - /// - /// This method will return `None` if any unknown bits are set. - fn from_bits(bits: Self::Bits) -> Option { - let truncated = Self::from_bits_truncate(bits); - - if truncated.bits() == bits { - Some(truncated) - } else { - None - } - } - - /// Convert from a bits value, unsetting any unknown bits. - fn from_bits_truncate(bits: Self::Bits) -> Self { - Self::from_bits_retain(bits & Self::all().bits()) - } - - /// Convert from a bits value exactly. - fn from_bits_retain(bits: Self::Bits) -> Self; - - /// Get a flags value with the bits of a flag with the given name set. - /// - /// This method will return `None` if `name` is empty or doesn't - /// correspond to any named flag. - fn from_name(name: &str) -> Option { - // Don't parse empty names as empty flags - if name.is_empty() { - return None; - } - - for flag in Self::FLAGS { - if flag.name() == name { - return Some(Self::from_bits_retain(flag.value().bits())); - } - } - - None - } - - /// Yield a set of contained flags values. - /// - /// Each yielded flags value will correspond to a defined named flag. Any unknown bits - /// will be yielded together as a final flags value. - fn iter(&self) -> iter::Iter { - iter::Iter::new(self) - } - - /// Yield a set of contained named flags values. - /// - /// This method is like [`Flags::iter`], except only yields bits in contained named flags. - /// Any unknown bits, or bits not corresponding to a contained flag will not be yielded. - fn iter_names(&self) -> iter::IterNames { - iter::IterNames::new(self) - } - - /// Yield a set of all named flags defined by [`Self::FLAGS`]. - fn iter_defined_names() -> iter::IterDefinedNames { - iter::IterDefinedNames::new() - } - - /// Whether all bits in this flags value are unset. - fn is_empty(&self) -> bool { - self.bits() == Self::Bits::EMPTY - } - - /// Whether all known bits in this flags value are set. - fn is_all(&self) -> bool { - // NOTE: We check against `Self::all` here, not `Self::Bits::ALL` - // because the set of all flags may not use all bits - Self::all().bits() | self.bits() == self.bits() - } - - /// Whether any set bits in a source flags value are also set in a target flags value. - fn intersects(&self, other: Self) -> bool - where - Self: Sized, - { - self.bits() & other.bits() != Self::Bits::EMPTY - } - - /// Whether all set bits in a source flags value are also set in a target flags value. - fn contains(&self, other: Self) -> bool - where - Self: Sized, - { - self.bits() & other.bits() == other.bits() - } - - /// Remove any unknown bits from the flags. - fn truncate(&mut self) - where - Self: Sized, - { - *self = Self::from_bits_truncate(self.bits()); - } - - /// The bitwise or (`|`) of the bits in two flags values. - fn insert(&mut self, other: Self) - where - Self: Sized, - { - *self = Self::from_bits_retain(self.bits()).union(other); - } - - /// The intersection of a source flags value with the complement of a target flags value (`&!`). - /// - /// This method is not equivalent to `self & !other` when `other` has unknown bits set. - /// `remove` won't truncate `other`, but the `!` operator will. - fn remove(&mut self, other: Self) - where - Self: Sized, - { - *self = Self::from_bits_retain(self.bits()).difference(other); - } - - /// The bitwise exclusive-or (`^`) of the bits in two flags values. - fn toggle(&mut self, other: Self) - where - Self: Sized, - { - *self = Self::from_bits_retain(self.bits()).symmetric_difference(other); - } - - /// Call [`Flags::insert`] when `value` is `true` or [`Flags::remove`] when `value` is `false`. - fn set(&mut self, other: Self, value: bool) - where - Self: Sized, - { - if value { - self.insert(other); - } else { - self.remove(other); - } - } - - /// Unsets all bits in the flags. - fn clear(&mut self) - where - Self: Sized, - { - *self = Self::empty(); - } - - /// The bitwise and (`&`) of the bits in two flags values. - #[must_use] - fn intersection(self, other: Self) -> Self { - Self::from_bits_retain(self.bits() & other.bits()) - } - - /// The bitwise or (`|`) of the bits in two flags values. - #[must_use] - fn union(self, other: Self) -> Self { - Self::from_bits_retain(self.bits() | other.bits()) - } - - /// The intersection of a source flags value with the complement of a target flags value (`&!`). - /// - /// This method is not equivalent to `self & !other` when `other` has unknown bits set. - /// `difference` won't truncate `other`, but the `!` operator will. - #[must_use] - fn difference(self, other: Self) -> Self { - Self::from_bits_retain(self.bits() & !other.bits()) - } - - /// The bitwise exclusive-or (`^`) of the bits in two flags values. - #[must_use] - fn symmetric_difference(self, other: Self) -> Self { - Self::from_bits_retain(self.bits() ^ other.bits()) - } - - /// The bitwise negation (`!`) of the bits in a flags value, truncating the result. - #[must_use] - fn complement(self) -> Self { - Self::from_bits_truncate(!self.bits()) - } -} - -/** -A bits type that can be used as storage for a flags type. -*/ -pub trait Bits: - Clone - + Copy - + PartialEq - + BitAnd - + BitOr - + BitXor - + Not - + Sized - + 'static -{ - /// A value with all bits unset. - const EMPTY: Self; - - /// A value with all bits set. - const ALL: Self; -} - -// Not re-exported: prevent custom `Bits` impls being used in the `bitflags!` macro, -// or they may fail to compile based on crate features -pub trait Primitive {} - -macro_rules! impl_bits { - ($($u:ty, $i:ty,)*) => { - $( - impl Bits for $u { - const EMPTY: $u = 0; - const ALL: $u = <$u>::MAX; - } - - impl Bits for $i { - const EMPTY: $i = 0; - const ALL: $i = <$u>::MAX as $i; - } - - impl ParseHex for $u { - fn parse_hex(input: &str) -> Result { - <$u>::from_str_radix(input, 16).map_err(|_| ParseError::invalid_hex_flag(input)) - } - } - - impl ParseHex for $i { - fn parse_hex(input: &str) -> Result { - <$i>::from_str_radix(input, 16).map_err(|_| ParseError::invalid_hex_flag(input)) - } - } - - impl WriteHex for $u { - fn write_hex(&self, mut writer: W) -> fmt::Result { - write!(writer, "{:x}", self) - } - } - - impl WriteHex for $i { - fn write_hex(&self, mut writer: W) -> fmt::Result { - write!(writer, "{:x}", self) - } - } - - impl Primitive for $i {} - impl Primitive for $u {} - )* - } -} - -impl_bits! { - u8, i8, - u16, i16, - u32, i32, - u64, i64, - u128, i128, - usize, isize, -} - -/// A trait for referencing the `bitflags`-owned internal type -/// without exposing it publicly. -pub trait PublicFlags { - /// The type of the underlying storage. - type Primitive: Primitive; - - /// The type of the internal field on the generated flags type. - type Internal; -} - -#[doc(hidden)] -#[deprecated(note = "use the `Flags` trait instead")] -pub trait BitFlags: ImplementedByBitFlagsMacro + Flags { - /// An iterator over enabled flags in an instance of the type. - type Iter: Iterator; - - /// An iterator over the raw names and bits for enabled flags in an instance of the type. - type IterNames: Iterator; -} - -#[allow(deprecated)] -impl BitFlags for B { - type Iter = iter::Iter; - type IterNames = iter::IterNames; -} - -impl ImplementedByBitFlagsMacro for B {} - -/// A marker trait that signals that an implementation of `BitFlags` came from the `bitflags!` macro. -/// -/// There's nothing stopping an end-user from implementing this trait, but we don't guarantee their -/// manual implementations won't break between non-breaking releases. -#[doc(hidden)] -pub trait ImplementedByBitFlagsMacro {} - -pub(crate) mod __private { - pub use super::{ImplementedByBitFlagsMacro, PublicFlags}; -} diff --git a/vendor/cexpr/.cargo-checksum.json b/vendor/cexpr/.cargo-checksum.json deleted file mode 100644 index 2c634bf62c7229..00000000000000 --- a/vendor/cexpr/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"f5fa050aba66bc280e9163f1c2e309f87dfb3a0dc4a511b1379d767dc42bf4d1",".github/workflows/ci.yml":"ed1d33f83f25e920c5ecaec59f51fd9209fcf1da912cbff4e5a6d7da6b737922","Cargo.toml":"3300e6f2f5fc184c613a78251df3d1333530c9b54193e137b75c88f6db5a6fa6","Cargo.toml.orig":"cb6c93b8f4c1b681296427608ca2e7c6772ba3a6b824df6d968ed027afb1d851","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"d9771b8c6cf4426d3846de54c1febe20907f1eeadf7adfb5ade89a83bd9ea77f","bors.toml":"1c81ede536a37edd30fe4e622ff0531b25372403ac9475a5d6c50f14156565a2","rustfmt.toml":"d8e7f616455a670ba75e3e94bf6f88f4c168c481664d12501820c7dfff5c3cc2","src/expr.rs":"dad9327dac3af9d2f5818937aac91000e02d835dc600da6185c06e9e12047b1e","src/lib.rs":"ff218b9b734ab2eaef813a6ee3a907cb5cd71d483dfaa28d44926824a5b6d804","src/literal.rs":"6fdefc0357b8a14444df21b05c90556861dc0466e63a57669786f3ef3a3dc1c3","src/token.rs":"cd1ba6315b0137de9a0711670dd1840ac76c41f3b88dcd1a93ad77e1800c703f","tests/clang.rs":"5bb9807f35f760065d15cb9dfb7d8b79c2f734aef7ba5fe3737154155ed8ee73","tests/input/chars.h":"69c8141870872b795b5174bad125b748732c2b01d0e98ffcfc37b19f3f791f69","tests/input/fail.h":"b0b6cffd2dd17410b5eb02ee79ab75754820480b960db8a9866cc9983bd36b65","tests/input/floats.h":"28ec664e793c494e1a31f3bc5b790014e9921fc741bf475a86319b9a9eee5915","tests/input/int_signed.h":"934199eded85dd7820ca08c0beb1381ee6d9339970d2720a69c23025571707ce","tests/input/int_unsigned.h":"7b8023ba468ec76b184912692bc40e8fbcdd92ad86ec5a7c0dbcb02f2b8d961d","tests/input/strings.h":"2dd11bc066f34e8cb1916a28353e9e9a3a21cd406651b2f94fc47e89c95d9cba","tests/input/test_llvm_bug_9069.h":"8d9ae1d1eadc8f6d5c14296f984547fe894d0f2ce5cd6d7aa8caad40a56bc5e1"},"package":"6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766"} \ No newline at end of file diff --git a/vendor/cexpr/.cargo_vcs_info.json b/vendor/cexpr/.cargo_vcs_info.json deleted file mode 100644 index 72d29e1e7693d1..00000000000000 --- a/vendor/cexpr/.cargo_vcs_info.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "git": { - "sha1": "c7ccdfbc37b508cfda1171ab4f89afaeb72e82f3" - } -} diff --git a/vendor/cexpr/.github/workflows/ci.yml b/vendor/cexpr/.github/workflows/ci.yml deleted file mode 100644 index 8af3b706469e7c..00000000000000 --- a/vendor/cexpr/.github/workflows/ci.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: CI -on: - push: - branches: - - master - pull_request: - branches: - - master - - -jobs: - build_and_test: - name: Build and Test - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Install LLVM and Clang - uses: KyleMayes/install-llvm-action@v1 - with: - version: "11.0" - directory: ${{ runner.temp }}/llvm-11.0 - - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - - - uses: actions-rs/cargo@v1 - with: - command: test - args: --verbose --all diff --git a/vendor/cexpr/Cargo.toml b/vendor/cexpr/Cargo.toml deleted file mode 100644 index 4956001cdda7af..00000000000000 --- a/vendor/cexpr/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "cexpr" -version = "0.6.0" -authors = ["Jethro Beekman "] -description = "A C expression parser and evaluator" -documentation = "https://docs.rs/cexpr/" -keywords = ["C", "expression", "parser"] -license = "Apache-2.0/MIT" -repository = "https://github.com/jethrogb/rust-cexpr" -[dependencies.nom] -version = "7" -features = ["std"] -default-features = false -[dev-dependencies.clang-sys] -version = ">= 0.13.0, < 0.29.0" -[badges.travis-ci] -repository = "jethrogb/rust-cexpr" diff --git a/vendor/cexpr/LICENSE-APACHE b/vendor/cexpr/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e802f..00000000000000 --- a/vendor/cexpr/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/cexpr/LICENSE-MIT b/vendor/cexpr/LICENSE-MIT deleted file mode 100644 index ed958e7ade0fc4..00000000000000 --- a/vendor/cexpr/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -(C) Copyright 2016 Jethro G. Beekman - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/cexpr/bors.toml b/vendor/cexpr/bors.toml deleted file mode 100644 index ca08e818bf3e37..00000000000000 --- a/vendor/cexpr/bors.toml +++ /dev/null @@ -1,3 +0,0 @@ -status = [ - "continuous-integration/travis-ci/push", -] diff --git a/vendor/cexpr/rustfmt.toml b/vendor/cexpr/rustfmt.toml deleted file mode 100644 index 32a9786fa1c4a9..00000000000000 --- a/vendor/cexpr/rustfmt.toml +++ /dev/null @@ -1 +0,0 @@ -edition = "2018" diff --git a/vendor/cexpr/src/expr.rs b/vendor/cexpr/src/expr.rs deleted file mode 100644 index 7f7e458bd4639b..00000000000000 --- a/vendor/cexpr/src/expr.rs +++ /dev/null @@ -1,610 +0,0 @@ -// (C) Copyright 2016 Jethro G. Beekman -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -//! Evaluating C expressions from tokens. -//! -//! Numerical operators are supported. All numerical values are treated as -//! `i64` or `f64`. Type casting is not supported. `i64` are converted to -//! `f64` when used in conjunction with a `f64`. Right shifts are always -//! arithmetic shifts. -//! -//! The `sizeof` operator is not supported. -//! -//! String concatenation is supported, but width prefixes are ignored; all -//! strings are treated as narrow strings. -//! -//! Use the `IdentifierParser` to substitute identifiers found in expressions. - -use std::collections::HashMap; -use std::num::Wrapping; -use std::ops::{ - AddAssign, BitAndAssign, BitOrAssign, BitXorAssign, DivAssign, MulAssign, RemAssign, ShlAssign, - ShrAssign, SubAssign, -}; - -use crate::literal::{self, CChar}; -use crate::token::{Kind as TokenKind, Token}; -use crate::ToCexprResult; -use nom::branch::alt; -use nom::combinator::{complete, map, map_opt}; -use nom::multi::{fold_many0, many0, separated_list0}; -use nom::sequence::{delimited, pair, preceded}; -use nom::*; - -/// Expression parser/evaluator that supports identifiers. -#[derive(Debug)] -pub struct IdentifierParser<'ident> { - identifiers: &'ident HashMap, EvalResult>, -} -#[derive(Copy, Clone)] -struct PRef<'a>(&'a IdentifierParser<'a>); - -/// A shorthand for the type of cexpr expression evaluation results. -pub type CResult<'a, R> = IResult<&'a [Token], R, crate::Error<&'a [Token]>>; - -/// The result of parsing a literal or evaluating an expression. -#[derive(Debug, Clone, PartialEq)] -#[allow(missing_docs)] -pub enum EvalResult { - Int(Wrapping), - Float(f64), - Char(CChar), - Str(Vec), - Invalid, -} - -macro_rules! result_opt ( - (fn $n:ident: $e:ident -> $t:ty) => ( - #[allow(dead_code)] - #[allow(clippy::wrong_self_convention)] - fn $n(self) -> Option<$t> { - if let EvalResult::$e(v) = self { - Some(v) - } else { - None - } - } - ); -); - -impl EvalResult { - result_opt!(fn as_int: Int -> Wrapping); - result_opt!(fn as_float: Float -> f64); - result_opt!(fn as_char: Char -> CChar); - result_opt!(fn as_str: Str -> Vec); - - #[allow(clippy::wrong_self_convention)] - fn as_numeric(self) -> Option { - match self { - EvalResult::Int(_) | EvalResult::Float(_) => Some(self), - _ => None, - } - } -} - -impl From> for EvalResult { - fn from(s: Vec) -> EvalResult { - EvalResult::Str(s) - } -} - -// =========================================== -// ============= Clang tokens ================ -// =========================================== - -macro_rules! exact_token ( - ($k:ident, $c:expr) => ({ - move |input: &[Token]| { - if input.is_empty() { - let res: CResult<'_, &[u8]> = Err(crate::nom::Err::Incomplete(Needed::new($c.len()))); - res - } else { - if input[0].kind==TokenKind::$k && &input[0].raw[..]==$c { - Ok((&input[1..], &input[0].raw[..])) - } else { - Err(crate::nom::Err::Error((input, crate::ErrorKind::ExactToken(TokenKind::$k,$c)).into())) - } - } - } - }); -); - -fn identifier_token(input: &[Token]) -> CResult<'_, &[u8]> { - if input.is_empty() { - let res: CResult<'_, &[u8]> = Err(nom::Err::Incomplete(Needed::new(1))); - res - } else { - if input[0].kind == TokenKind::Identifier { - Ok((&input[1..], &input[0].raw[..])) - } else { - Err(crate::nom::Err::Error((input, crate::ErrorKind::TypedToken(TokenKind::Identifier)).into())) - } - } -} - -fn p(c: &'static str) -> impl Fn(&[Token]) -> CResult<'_, &[u8]> { - exact_token!(Punctuation, c.as_bytes()) -} - -fn one_of_punctuation(c: &'static [&'static str]) -> impl Fn(&[Token]) -> CResult<'_, &[u8]> { - move |input| { - if input.is_empty() { - let min = c - .iter() - .map(|opt| opt.len()) - .min() - .expect("at least one option"); - Err(crate::nom::Err::Incomplete(Needed::new(min))) - } else if input[0].kind == TokenKind::Punctuation - && c.iter().any(|opt| opt.as_bytes() == &input[0].raw[..]) - { - Ok((&input[1..], &input[0].raw[..])) - } else { - Err(crate::nom::Err::Error( - ( - input, - crate::ErrorKind::ExactTokens(TokenKind::Punctuation, c), - ) - .into(), - )) - } - } -} - -// ================================================== -// ============= Numeric expressions ================ -// ================================================== - -impl<'a> AddAssign<&'a EvalResult> for EvalResult { - fn add_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self = match (&*self, rhs) { - (&Int(a), &Int(b)) => Int(a + b), - (&Float(a), &Int(b)) => Float(a + (b.0 as f64)), - (&Int(a), &Float(b)) => Float(a.0 as f64 + b), - (&Float(a), &Float(b)) => Float(a + b), - _ => Invalid, - }; - } -} -impl<'a> BitAndAssign<&'a EvalResult> for EvalResult { - fn bitand_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self = match (&*self, rhs) { - (&Int(a), &Int(b)) => Int(a & b), - _ => Invalid, - }; - } -} -impl<'a> BitOrAssign<&'a EvalResult> for EvalResult { - fn bitor_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self = match (&*self, rhs) { - (&Int(a), &Int(b)) => Int(a | b), - _ => Invalid, - }; - } -} -impl<'a> BitXorAssign<&'a EvalResult> for EvalResult { - fn bitxor_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self = match (&*self, rhs) { - (&Int(a), &Int(b)) => Int(a ^ b), - _ => Invalid, - }; - } -} -impl<'a> DivAssign<&'a EvalResult> for EvalResult { - fn div_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self = match (&*self, rhs) { - (&Int(a), &Int(b)) => Int(a / b), - (&Float(a), &Int(b)) => Float(a / (b.0 as f64)), - (&Int(a), &Float(b)) => Float(a.0 as f64 / b), - (&Float(a), &Float(b)) => Float(a / b), - _ => Invalid, - }; - } -} -impl<'a> MulAssign<&'a EvalResult> for EvalResult { - fn mul_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self = match (&*self, rhs) { - (&Int(a), &Int(b)) => Int(a * b), - (&Float(a), &Int(b)) => Float(a * (b.0 as f64)), - (&Int(a), &Float(b)) => Float(a.0 as f64 * b), - (&Float(a), &Float(b)) => Float(a * b), - _ => Invalid, - }; - } -} -impl<'a> RemAssign<&'a EvalResult> for EvalResult { - fn rem_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self = match (&*self, rhs) { - (&Int(a), &Int(b)) => Int(a % b), - (&Float(a), &Int(b)) => Float(a % (b.0 as f64)), - (&Int(a), &Float(b)) => Float(a.0 as f64 % b), - (&Float(a), &Float(b)) => Float(a % b), - _ => Invalid, - }; - } -} -impl<'a> ShlAssign<&'a EvalResult> for EvalResult { - fn shl_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self = match (&*self, rhs) { - (&Int(a), &Int(b)) => Int(a << (b.0 as usize)), - _ => Invalid, - }; - } -} -impl<'a> ShrAssign<&'a EvalResult> for EvalResult { - fn shr_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self = match (&*self, rhs) { - (&Int(a), &Int(b)) => Int(a >> (b.0 as usize)), - _ => Invalid, - }; - } -} -impl<'a> SubAssign<&'a EvalResult> for EvalResult { - fn sub_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self = match (&*self, rhs) { - (&Int(a), &Int(b)) => Int(a - b), - (&Float(a), &Int(b)) => Float(a - (b.0 as f64)), - (&Int(a), &Float(b)) => Float(a.0 as f64 - b), - (&Float(a), &Float(b)) => Float(a - b), - _ => Invalid, - }; - } -} - -fn unary_op(input: (&[u8], EvalResult)) -> Option { - use self::EvalResult::*; - assert_eq!(input.0.len(), 1); - match (input.0[0], input.1) { - (b'+', i) => Some(i), - (b'-', Int(i)) => Some(Int(Wrapping(i.0.wrapping_neg()))), // impl Neg for Wrapping not until rust 1.10... - (b'-', Float(i)) => Some(Float(-i)), - (b'-', _) => unreachable!("non-numeric unary op"), - (b'~', Int(i)) => Some(Int(!i)), - (b'~', Float(_)) => None, - (b'~', _) => unreachable!("non-numeric unary op"), - _ => unreachable!("invalid unary op"), - } -} - -fn numeric, F>( - f: F, -) -> impl FnMut(I) -> nom::IResult -where - F: FnMut(I) -> nom::IResult, -{ - nom::combinator::map_opt(f, EvalResult::as_numeric) -} - -impl<'a> PRef<'a> { - fn unary(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { - alt(( - delimited(p("("), |i| self.numeric_expr(i), p(")")), - numeric(|i| self.literal(i)), - numeric(|i| self.identifier(i)), - map_opt( - pair(one_of_punctuation(&["+", "-", "~"][..]), |i| self.unary(i)), - unary_op, - ), - ))(input) - } - - fn mul_div_rem(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { - let (input, acc) = self.unary(input)?; - fold_many0( - pair(complete(one_of_punctuation(&["*", "/", "%"][..])), |i| { - self.unary(i) - }), - move || acc.clone(), - |mut acc, (op, val): (&[u8], EvalResult)| { - match op[0] as char { - '*' => acc *= &val, - '/' => acc /= &val, - '%' => acc %= &val, - _ => unreachable!(), - }; - acc - }, - )(input) - } - - fn add_sub(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { - let (input, acc) = self.mul_div_rem(input)?; - fold_many0( - pair(complete(one_of_punctuation(&["+", "-"][..])), |i| { - self.mul_div_rem(i) - }), - move || acc.clone(), - |mut acc, (op, val): (&[u8], EvalResult)| { - match op[0] as char { - '+' => acc += &val, - '-' => acc -= &val, - _ => unreachable!(), - }; - acc - }, - )(input) - } - - fn shl_shr(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { - let (input, acc) = self.add_sub(input)?; - numeric(fold_many0( - pair(complete(one_of_punctuation(&["<<", ">>"][..])), |i| { - self.add_sub(i) - }), - move || acc.clone(), - |mut acc, (op, val): (&[u8], EvalResult)| { - match op { - b"<<" => acc <<= &val, - b">>" => acc >>= &val, - _ => unreachable!(), - }; - acc - }, - ))(input) - } - - fn and(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { - let (input, acc) = self.shl_shr(input)?; - numeric(fold_many0( - preceded(complete(p("&")), |i| self.shl_shr(i)), - move || acc.clone(), - |mut acc, val: EvalResult| { - acc &= &val; - acc - }, - ))(input) - } - - fn xor(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { - let (input, acc) = self.and(input)?; - numeric(fold_many0( - preceded(complete(p("^")), |i| self.and(i)), - move || acc.clone(), - |mut acc, val: EvalResult| { - acc ^= &val; - acc - }, - ))(input) - } - - fn or(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { - let (input, acc) = self.xor(input)?; - numeric(fold_many0( - preceded(complete(p("|")), |i| self.xor(i)), - move || acc.clone(), - |mut acc, val: EvalResult| { - acc |= &val; - acc - }, - ))(input) - } - - #[inline(always)] - fn numeric_expr(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { - self.or(input) - } -} - -// ======================================================= -// ============= Literals and identifiers ================ -// ======================================================= - -impl<'a> PRef<'a> { - fn identifier(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { - match input.split_first() { - None => Err(Err::Incomplete(Needed::new(1))), - Some(( - &Token { - kind: TokenKind::Identifier, - ref raw, - }, - rest, - )) => { - if let Some(r) = self.identifiers.get(&raw[..]) { - Ok((rest, r.clone())) - } else { - Err(Err::Error( - (input, crate::ErrorKind::UnknownIdentifier).into(), - )) - } - } - Some(_) => Err(Err::Error( - (input, crate::ErrorKind::TypedToken(TokenKind::Identifier)).into(), - )), - } - } - - fn literal(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { - match input.split_first() { - None => Err(Err::Incomplete(Needed::new(1))), - Some(( - &Token { - kind: TokenKind::Literal, - ref raw, - }, - rest, - )) => match literal::parse(raw) { - Ok((_, result)) => Ok((rest, result)), - _ => Err(Err::Error((input, crate::ErrorKind::InvalidLiteral).into())), - }, - Some(_) => Err(Err::Error( - (input, crate::ErrorKind::TypedToken(TokenKind::Literal)).into(), - )), - } - } - - fn string(self, input: &'_ [Token]) -> CResult<'_, Vec> { - alt(( - map_opt(|i| self.literal(i), EvalResult::as_str), - map_opt(|i| self.identifier(i), EvalResult::as_str), - ))(input) - .to_cexpr_result() - } - - // "string1" "string2" etc... - fn concat_str(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { - map( - pair(|i| self.string(i), many0(complete(|i| self.string(i)))), - |(first, v)| { - Vec::into_iter(v) - .fold(first, |mut s, elem| { - Vec::extend_from_slice(&mut s, Vec::::as_slice(&elem)); - s - }) - .into() - }, - )(input) - .to_cexpr_result() - } - - fn expr(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { - alt(( - |i| self.numeric_expr(i), - delimited(p("("), |i| self.expr(i), p(")")), - |i| self.concat_str(i), - |i| self.literal(i), - |i| self.identifier(i), - ))(input) - .to_cexpr_result() - } - - fn macro_definition(self, input: &'_ [Token]) -> CResult<'_, (&'_ [u8], EvalResult)> { - pair(identifier_token, |i| self.expr(i))(input) - } -} - -impl<'a> ::std::ops::Deref for PRef<'a> { - type Target = IdentifierParser<'a>; - fn deref(&self) -> &IdentifierParser<'a> { - self.0 - } -} - -impl<'ident> IdentifierParser<'ident> { - fn as_ref(&self) -> PRef<'_> { - PRef(self) - } - - /// Create a new `IdentifierParser` with a set of known identifiers. When - /// a known identifier is encountered during parsing, it is substituted - /// for the value specified. - pub fn new(identifiers: &HashMap, EvalResult>) -> IdentifierParser<'_> { - IdentifierParser { identifiers } - } - - /// Parse and evaluate an expression of a list of tokens. - /// - /// Returns an error if the input is not a valid expression or if the token - /// stream contains comments, keywords or unknown identifiers. - pub fn expr<'a>(&self, input: &'a [Token]) -> CResult<'a, EvalResult> { - self.as_ref().expr(input) - } - - /// Parse and evaluate a macro definition from a list of tokens. - /// - /// Returns the identifier for the macro and its replacement evaluated as an - /// expression. The input should not include `#define`. - /// - /// Returns an error if the replacement is not a valid expression, if called - /// on most function-like macros, or if the token stream contains comments, - /// keywords or unknown identifiers. - /// - /// N.B. This is intended to fail on function-like macros, but if it the - /// macro takes a single argument, the argument name is defined as an - /// identifier, and the macro otherwise parses as an expression, it will - /// return a result even on function-like macros. - /// - /// ```c - /// // will evaluate into IDENTIFIER - /// #define DELETE(IDENTIFIER) - /// // will evaluate into IDENTIFIER-3 - /// #define NEGATIVE_THREE(IDENTIFIER) -3 - /// ``` - pub fn macro_definition<'a>(&self, input: &'a [Token]) -> CResult<'a, (&'a [u8], EvalResult)> { - crate::assert_full_parse(self.as_ref().macro_definition(input)) - } -} - -/// Parse and evaluate an expression of a list of tokens. -/// -/// Returns an error if the input is not a valid expression or if the token -/// stream contains comments, keywords or identifiers. -pub fn expr(input: &[Token]) -> CResult<'_, EvalResult> { - IdentifierParser::new(&HashMap::new()).expr(input) -} - -/// Parse and evaluate a macro definition from a list of tokens. -/// -/// Returns the identifier for the macro and its replacement evaluated as an -/// expression. The input should not include `#define`. -/// -/// Returns an error if the replacement is not a valid expression, if called -/// on a function-like macro, or if the token stream contains comments, -/// keywords or identifiers. -pub fn macro_definition(input: &[Token]) -> CResult<'_, (&'_ [u8], EvalResult)> { - IdentifierParser::new(&HashMap::new()).macro_definition(input) -} - -/// Parse a functional macro declaration from a list of tokens. -/// -/// Returns the identifier for the macro and the argument list (in order). The -/// input should not include `#define`. The actual definition is not parsed and -/// may be obtained from the unparsed data returned. -/// -/// Returns an error if the input is not a functional macro or if the token -/// stream contains comments. -/// -/// # Example -/// ``` -/// use cexpr::expr::{IdentifierParser, EvalResult, fn_macro_declaration}; -/// use cexpr::assert_full_parse; -/// use cexpr::token::Kind::*; -/// use cexpr::token::Token; -/// -/// // #define SUFFIX(arg) arg "suffix" -/// let tokens = vec![ -/// (Identifier, &b"SUFFIX"[..]).into(), -/// (Punctuation, &b"("[..]).into(), -/// (Identifier, &b"arg"[..]).into(), -/// (Punctuation, &b")"[..]).into(), -/// (Identifier, &b"arg"[..]).into(), -/// (Literal, &br#""suffix""#[..]).into(), -/// ]; -/// -/// // Try to parse the functional part -/// let (expr, (ident, args)) = fn_macro_declaration(&tokens).unwrap(); -/// assert_eq!(ident, b"SUFFIX"); -/// -/// // Create dummy arguments -/// let idents = args.into_iter().map(|arg| -/// (arg.to_owned(), EvalResult::Str(b"test".to_vec())) -/// ).collect(); -/// -/// // Evaluate the macro -/// let (_, evaluated) = assert_full_parse(IdentifierParser::new(&idents).expr(expr)).unwrap(); -/// assert_eq!(evaluated, EvalResult::Str(b"testsuffix".to_vec())); -/// ``` -pub fn fn_macro_declaration(input: &[Token]) -> CResult<'_, (&[u8], Vec<&[u8]>)> { - pair( - identifier_token, - delimited( - p("("), - separated_list0(p(","), identifier_token), - p(")"), - ), - )(input) -} diff --git a/vendor/cexpr/src/lib.rs b/vendor/cexpr/src/lib.rs deleted file mode 100644 index 5170f97d135c6c..00000000000000 --- a/vendor/cexpr/src/lib.rs +++ /dev/null @@ -1,149 +0,0 @@ -// (C) Copyright 2016 Jethro G. Beekman -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -//! A C expression parser and evaluator. -//! -//! This crate provides methods for parsing and evaluating simple C expressions. In general, the -//! crate can handle most arithmetic expressions that would appear in macros or the definition of -//! constants, as well as string and character constants. -//! -//! The main entry point for is [`token::parse`], which parses a byte string and returns its -//! evaluated value. -#![warn(rust_2018_idioms)] -#![warn(missing_docs)] -#![allow(deprecated)] - -pub mod nom { - //! nom's result types, re-exported. - pub use nom::{error::ErrorKind, error::Error, Err, IResult, Needed}; -} -pub mod expr; -pub mod literal; -pub mod token; - -/// Parsing errors specific to C parsing -#[derive(Debug)] -pub enum ErrorKind { - /// Expected the specified token - ExactToken(token::Kind, &'static [u8]), - /// Expected one of the specified tokens - ExactTokens(token::Kind, &'static [&'static str]), - /// Expected a token of the specified kind - TypedToken(token::Kind), - /// An unknown identifier was encountered - UnknownIdentifier, - /// An invalid literal was encountered. - /// - /// When encountered, this generally means a bug exists in the data that - /// was passed in or the parsing logic. - InvalidLiteral, - /// A full parse was requested, but data was left over after parsing finished. - Partial, - /// An error occurred in an underlying nom parser. - Parser(nom::ErrorKind), -} - -impl From for ErrorKind { - fn from(k: nom::ErrorKind) -> Self { - ErrorKind::Parser(k) - } -} - -impl From for ErrorKind { - fn from(_: u32) -> Self { - ErrorKind::InvalidLiteral - } -} - -/// Parsing errors specific to C parsing. -/// -/// This is a superset of `(I, nom::ErrorKind)` that includes the additional errors specified by -/// [`ErrorKind`]. -#[derive(Debug)] -pub struct Error { - /// The remainder of the input stream at the time of the error. - pub input: I, - /// The error that occurred. - pub error: ErrorKind, -} - -impl From<(I, nom::ErrorKind)> for Error { - fn from(e: (I, nom::ErrorKind)) -> Self { - Self::from((e.0, ErrorKind::from(e.1))) - } -} - -impl From<(I, ErrorKind)> for Error { - fn from(e: (I, ErrorKind)) -> Self { - Self { - input: e.0, - error: e.1, - } - } -} - -impl From<::nom::error::Error> for Error { - fn from(e: ::nom::error::Error) -> Self { - Self { - input: e.input, - error: e.code.into(), - } - } -} - -impl ::nom::error::ParseError for Error { - fn from_error_kind(input: I, kind: nom::ErrorKind) -> Self { - Self { - input, - error: kind.into(), - } - } - - fn append(_: I, _: nom::ErrorKind, other: Self) -> Self { - other - } -} - -// in lieu of https://github.com/Geal/nom/issues/1010 -trait ToCexprResult { - fn to_cexpr_result(self) -> nom::IResult>; -} -impl ToCexprResult for nom::IResult -where - Error: From, -{ - fn to_cexpr_result(self) -> nom::IResult> { - match self { - Ok(v) => Ok(v), - Err(nom::Err::Incomplete(n)) => Err(nom::Err::Incomplete(n)), - Err(nom::Err::Error(e)) => Err(nom::Err::Error(e.into())), - Err(nom::Err::Failure(e)) => Err(nom::Err::Failure(e.into())), - } - } -} - -/// If the input result indicates a succesful parse, but there is data left, -/// return an `Error::Partial` instead. -pub fn assert_full_parse<'i, I: 'i, O, E>( - result: nom::IResult<&'i [I], O, E>, -) -> nom::IResult<&'i [I], O, Error<&'i [I]>> -where - Error<&'i [I]>: From, -{ - match result.to_cexpr_result() { - Ok((rem, output)) => { - if rem.is_empty() { - Ok((rem, output)) - } else { - Err(nom::Err::Error((rem, ErrorKind::Partial).into())) - } - } - Err(nom::Err::Incomplete(n)) => Err(nom::Err::Incomplete(n)), - Err(nom::Err::Failure(e)) => Err(nom::Err::Failure(e)), - Err(nom::Err::Error(e)) => Err(nom::Err::Error(e)), - } -} diff --git a/vendor/cexpr/src/literal.rs b/vendor/cexpr/src/literal.rs deleted file mode 100644 index 68e85c7dadbd0d..00000000000000 --- a/vendor/cexpr/src/literal.rs +++ /dev/null @@ -1,361 +0,0 @@ -// (C) Copyright 2016 Jethro G. Beekman -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -//! Parsing C literals from byte slices. -//! -//! This will parse a representation of a C literal into a Rust type. -//! -//! # characters -//! Character literals are stored into the `CChar` type, which can hold values -//! that are not valid Unicode code points. ASCII characters are represented as -//! `char`, literal bytes with the high byte set are converted into the raw -//! representation. Escape sequences are supported. If hex and octal escapes -//! map to an ASCII character, that is used, otherwise, the raw encoding is -//! used, including for values over 255. Unicode escapes are checked for -//! validity and mapped to `char`. Character sequences are not supported. Width -//! prefixes are ignored. -//! -//! # strings -//! Strings are interpreted as byte vectors. Escape sequences are supported. If -//! hex and octal escapes map onto multi-byte characters, they are truncated to -//! one 8-bit character. Unicode escapes are converted into their UTF-8 -//! encoding. Width prefixes are ignored. -//! -//! # integers -//! Integers are read into `i64`. Binary, octal, decimal and hexadecimal are -//! all supported. If the literal value is between `i64::MAX` and `u64::MAX`, -//! it is bit-cast to `i64`. Values over `u64::MAX` cannot be parsed. Width and -//! sign suffixes are ignored. Sign prefixes are not supported. -//! -//! # real numbers -//! Reals are read into `f64`. Width suffixes are ignored. Sign prefixes are -//! not supported in the significand. Hexadecimal floating points are not -//! supported. - -use std::char; -use std::str::{self, FromStr}; - -use nom::branch::alt; -use nom::bytes::complete::is_not; -use nom::bytes::complete::tag; -use nom::character::complete::{char, one_of}; -use nom::combinator::{complete, map, map_opt, opt, recognize}; -use nom::multi::{fold_many0, many0, many1, many_m_n}; -use nom::sequence::{delimited, pair, preceded, terminated, tuple}; -use nom::*; - -use crate::expr::EvalResult; -use crate::ToCexprResult; - -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -/// Representation of a C character -pub enum CChar { - /// A character that can be represented as a `char` - Char(char), - /// Any other character (8-bit characters, unicode surrogates, etc.) - Raw(u64), -} - -impl From for CChar { - fn from(i: u8) -> CChar { - match i { - 0..=0x7f => CChar::Char(i as u8 as char), - _ => CChar::Raw(i as u64), - } - } -} - -// A non-allocating version of this would be nice... -impl std::convert::Into> for CChar { - fn into(self) -> Vec { - match self { - CChar::Char(c) => { - let mut s = String::with_capacity(4); - s.extend(&[c]); - s.into_bytes() - } - CChar::Raw(i) => { - let mut v = Vec::with_capacity(1); - v.push(i as u8); - v - } - } - } -} - -/// ensures the child parser consumes the whole input -pub fn full( - f: F, -) -> impl Fn(I) -> nom::IResult -where - I: nom::InputLength, - F: Fn(I) -> nom::IResult, -{ - move |input| { - let res = f(input); - match res { - Ok((i, o)) => { - if i.input_len() == 0 { - Ok((i, o)) - } else { - Err(nom::Err::Error(nom::error::Error::new(i, nom::error::ErrorKind::Complete))) - } - } - r => r, - } - } -} - -// ================================= -// ======== matching digits ======== -// ================================= - -macro_rules! byte { - ($($p: pat)|* ) => {{ - fn parser(i: &[u8]) -> crate::nom::IResult<&[u8], u8> { - match i.split_first() { - $(Some((&c @ $p,rest)))|* => Ok((rest,c)), - Some(_) => Err(nom::Err::Error(nom::error::Error::new(i, nom::error::ErrorKind::OneOf))), - None => Err(nom::Err::Incomplete(Needed::new(1))), - } - } - - parser - }} -} - -fn binary(i: &[u8]) -> nom::IResult<&[u8], u8> { - byte!(b'0'..=b'1')(i) -} - -fn octal(i: &[u8]) -> nom::IResult<&[u8], u8> { - byte!(b'0'..=b'7')(i) -} - -fn decimal(i: &[u8]) -> nom::IResult<&[u8], u8> { - byte!(b'0'..=b'9')(i) -} - -fn hexadecimal(i: &[u8]) -> nom::IResult<&[u8], u8> { - byte!(b'0' ..= b'9' | b'a' ..= b'f' | b'A' ..= b'F')(i) -} - -// ======================================== -// ======== characters and strings ======== -// ======================================== - -fn escape2char(c: char) -> CChar { - CChar::Char(match c { - 'a' => '\x07', - 'b' => '\x08', - 'f' => '\x0c', - 'n' => '\n', - 'r' => '\r', - 't' => '\t', - 'v' => '\x0b', - _ => unreachable!("invalid escape {}", c), - }) -} - -fn c_raw_escape(n: Vec, radix: u32) -> Option { - str::from_utf8(&n) - .ok() - .and_then(|i| u64::from_str_radix(i, radix).ok()) - .map(|i| match i { - 0..=0x7f => CChar::Char(i as u8 as char), - _ => CChar::Raw(i), - }) -} - -fn c_unicode_escape(n: Vec) -> Option { - str::from_utf8(&n) - .ok() - .and_then(|i| u32::from_str_radix(i, 16).ok()) - .and_then(char::from_u32) - .map(CChar::Char) -} - -fn escaped_char(i: &[u8]) -> nom::IResult<&[u8], CChar> { - preceded( - char('\\'), - alt(( - map(one_of(r#"'"?\"#), CChar::Char), - map(one_of("abfnrtv"), escape2char), - map_opt(many_m_n(1, 3, octal), |v| c_raw_escape(v, 8)), - map_opt(preceded(char('x'), many1(hexadecimal)), |v| { - c_raw_escape(v, 16) - }), - map_opt( - preceded(char('u'), many_m_n(4, 4, hexadecimal)), - c_unicode_escape, - ), - map_opt( - preceded(char('U'), many_m_n(8, 8, hexadecimal)), - c_unicode_escape, - ), - )), - )(i) -} - -fn c_width_prefix(i: &[u8]) -> nom::IResult<&[u8], &[u8]> { - alt((tag("u8"), tag("u"), tag("U"), tag("L")))(i) -} - -fn c_char(i: &[u8]) -> nom::IResult<&[u8], CChar> { - delimited( - terminated(opt(c_width_prefix), char('\'')), - alt(( - escaped_char, - map(byte!(0 ..= 91 /* \=92 */ | 93 ..= 255), CChar::from), - )), - char('\''), - )(i) -} - -fn c_string(i: &[u8]) -> nom::IResult<&[u8], Vec> { - delimited( - alt((preceded(c_width_prefix, char('"')), char('"'))), - fold_many0( - alt(( - map(escaped_char, |c: CChar| c.into()), - map(is_not([b'\\', b'"']), |c: &[u8]| c.into()), - )), - Vec::new, - |mut v: Vec, res: Vec| { - v.extend_from_slice(&res); - v - }, - ), - char('"'), - )(i) -} - -// ================================ -// ======== parse integers ======== -// ================================ - -fn c_int_radix(n: Vec, radix: u32) -> Option { - str::from_utf8(&n) - .ok() - .and_then(|i| u64::from_str_radix(i, radix).ok()) -} - -fn take_ul(input: &[u8]) -> IResult<&[u8], &[u8]> { - let r = input.split_at_position(|c| c != b'u' && c != b'U' && c != b'l' && c != b'L'); - match r { - Err(Err::Incomplete(_)) => Ok((&input[input.len()..], input)), - res => res, - } -} - -fn c_int(i: &[u8]) -> nom::IResult<&[u8], i64> { - map( - terminated( - alt(( - map_opt(preceded(tag("0x"), many1(complete(hexadecimal))), |v| { - c_int_radix(v, 16) - }), - map_opt(preceded(tag("0X"), many1(complete(hexadecimal))), |v| { - c_int_radix(v, 16) - }), - map_opt(preceded(tag("0b"), many1(complete(binary))), |v| { - c_int_radix(v, 2) - }), - map_opt(preceded(tag("0B"), many1(complete(binary))), |v| { - c_int_radix(v, 2) - }), - map_opt(preceded(char('0'), many1(complete(octal))), |v| { - c_int_radix(v, 8) - }), - map_opt(many1(complete(decimal)), |v| c_int_radix(v, 10)), - |input| Err(crate::nom::Err::Error(nom::error::Error::new(input, crate::nom::ErrorKind::Fix))), - )), - opt(take_ul), - ), - |i| i as i64, - )(i) -} - -// ============================== -// ======== parse floats ======== -// ============================== - -fn float_width(i: &[u8]) -> nom::IResult<&[u8], u8> { - nom::combinator::complete(byte!(b'f' | b'l' | b'F' | b'L'))(i) -} - -fn float_exp(i: &[u8]) -> nom::IResult<&[u8], (Option, Vec)> { - preceded( - byte!(b'e' | b'E'), - pair(opt(byte!(b'-' | b'+')), many1(complete(decimal))), - )(i) -} - -fn c_float(i: &[u8]) -> nom::IResult<&[u8], f64> { - map_opt( - alt(( - terminated( - recognize(tuple(( - many1(complete(decimal)), - byte!(b'.'), - many0(complete(decimal)), - ))), - opt(float_width), - ), - terminated( - recognize(tuple(( - many0(complete(decimal)), - byte!(b'.'), - many1(complete(decimal)), - ))), - opt(float_width), - ), - terminated( - recognize(tuple(( - many0(complete(decimal)), - opt(byte!(b'.')), - many1(complete(decimal)), - float_exp, - ))), - opt(float_width), - ), - terminated( - recognize(tuple(( - many1(complete(decimal)), - opt(byte!(b'.')), - many0(complete(decimal)), - float_exp, - ))), - opt(float_width), - ), - terminated(recognize(many1(complete(decimal))), float_width), - )), - |v| str::from_utf8(v).ok().and_then(|i| f64::from_str(i).ok()), - )(i) -} - -// ================================ -// ======== main interface ======== -// ================================ - -fn one_literal(input: &[u8]) -> nom::IResult<&[u8], EvalResult, crate::Error<&[u8]>> { - alt(( - map(full(c_char), EvalResult::Char), - map(full(c_int), |i| EvalResult::Int(::std::num::Wrapping(i))), - map(full(c_float), EvalResult::Float), - map(full(c_string), EvalResult::Str), - ))(input) - .to_cexpr_result() -} - -/// Parse a C literal. -/// -/// The input must contain exactly the representation of a single literal -/// token, and in particular no whitespace or sign prefixes. -pub fn parse(input: &[u8]) -> IResult<&[u8], EvalResult, crate::Error<&[u8]>> { - crate::assert_full_parse(one_literal(input)) -} diff --git a/vendor/cexpr/src/token.rs b/vendor/cexpr/src/token.rs deleted file mode 100644 index dbc5949cd4fcb2..00000000000000 --- a/vendor/cexpr/src/token.rs +++ /dev/null @@ -1,44 +0,0 @@ -// (C) Copyright 2016 Jethro G. Beekman -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -//! Representation of a C token -//! -//! This is designed to map onto a libclang CXToken. - -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -#[allow(missing_docs)] -pub enum Kind { - Punctuation, - Keyword, - Identifier, - Literal, - Comment, -} - -/// A single token in a C expression. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Token { - /// The type of this token. - pub kind: Kind, - /// The bytes that make up the token. - pub raw: Box<[u8]>, -} - -impl<'a> From<(Kind, &'a [u8])> for Token { - fn from((kind, value): (Kind, &'a [u8])) -> Token { - Token { - kind, - raw: value.to_owned().into_boxed_slice(), - } - } -} - -/// Remove all comment tokens from a vector of tokens -pub fn remove_comments(v: &mut Vec) -> &mut Vec { - v.retain(|t| t.kind != Kind::Comment); - v -} diff --git a/vendor/cexpr/tests/clang.rs b/vendor/cexpr/tests/clang.rs deleted file mode 100644 index b2484f0778288b..00000000000000 --- a/vendor/cexpr/tests/clang.rs +++ /dev/null @@ -1,339 +0,0 @@ -// (C) Copyright 2016 Jethro G. Beekman -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -extern crate cexpr; -extern crate clang_sys; - -use std::collections::HashMap; -use std::io::Write; -use std::str::{self, FromStr}; -use std::{char, ffi, mem, ptr, slice}; - -use cexpr::assert_full_parse; -use cexpr::expr::{fn_macro_declaration, EvalResult, IdentifierParser}; -use cexpr::literal::CChar; -use cexpr::token::Token; -use clang_sys::*; - -// main testing routine -fn test_definition( - ident: Vec, - tokens: &[Token], - idents: &mut HashMap, EvalResult>, -) -> bool { - fn bytes_to_int(value: &[u8]) -> Option { - str::from_utf8(value) - .ok() - .map(|s| s.replace("n", "-")) - .map(|s| s.replace("_", "")) - .and_then(|v| i64::from_str(&v).ok()) - .map(::std::num::Wrapping) - .map(Int) - } - - use cexpr::expr::EvalResult::*; - - let display_name = String::from_utf8_lossy(&ident).into_owned(); - - let functional; - let test = { - // Split name such as Str_test_string into (Str,test_string) - let pos = ident - .iter() - .position(|c| *c == b'_') - .expect(&format!("Invalid definition in testcase: {}", display_name)); - let mut expected = &ident[..pos]; - let mut value = &ident[(pos + 1)..]; - - functional = expected == b"Fn"; - - if functional { - let ident = value; - let pos = ident - .iter() - .position(|c| *c == b'_') - .expect(&format!("Invalid definition in testcase: {}", display_name)); - expected = &ident[..pos]; - value = &ident[(pos + 1)..]; - } - - if expected == b"Str" { - let mut splits = value.split(|c| *c == b'U'); - let mut s = Vec::with_capacity(value.len()); - s.extend_from_slice(splits.next().unwrap()); - for split in splits { - let (chr, rest) = split.split_at(6); - let chr = u32::from_str_radix(str::from_utf8(chr).unwrap(), 16).unwrap(); - write!(s, "{}", char::from_u32(chr).unwrap()).unwrap(); - s.extend_from_slice(rest); - } - Some(Str(s)) - } else if expected == b"Int" { - bytes_to_int(value) - } else if expected == b"Float" { - str::from_utf8(value) - .ok() - .map(|s| s.replace("n", "-").replace("p", ".")) - .and_then(|v| f64::from_str(&v).ok()) - .map(Float) - } else if expected == b"CharRaw" { - str::from_utf8(value) - .ok() - .and_then(|v| u64::from_str(v).ok()) - .map(CChar::Raw) - .map(Char) - } else if expected == b"CharChar" { - str::from_utf8(value) - .ok() - .and_then(|v| u32::from_str(v).ok()) - .and_then(char::from_u32) - .map(CChar::Char) - .map(Char) - } else { - Some(Invalid) - } - .expect(&format!("Invalid definition in testcase: {}", display_name)) - }; - - let result = if functional { - let mut fnidents; - let expr_tokens; - match fn_macro_declaration(&tokens) { - Ok((rest, (_, args))) => { - fnidents = idents.clone(); - expr_tokens = rest; - for arg in args { - let val = match test { - Int(_) => bytes_to_int(&arg), - Str(_) => Some(Str(arg.to_owned())), - _ => unimplemented!(), - } - .expect(&format!( - "Invalid argument in functional macro testcase: {}", - display_name - )); - fnidents.insert(arg.to_owned(), val); - } - } - e => { - println!( - "Failed test for {}, unable to parse functional macro declaration: {:?}", - display_name, e - ); - return false; - } - } - assert_full_parse(IdentifierParser::new(&fnidents).expr(&expr_tokens)) - } else { - IdentifierParser::new(idents) - .macro_definition(&tokens) - .map(|(i, (_, val))| (i, val)) - }; - - match result { - Ok((_, val)) => { - if val == test { - if let Some(_) = idents.insert(ident, val) { - panic!("Duplicate definition for testcase: {}", display_name); - } - true - } else { - println!( - "Failed test for {}, expected {:?}, got {:?}", - display_name, test, val - ); - false - } - } - e => { - if test == Invalid { - true - } else { - println!( - "Failed test for {}, expected {:?}, got {:?}", - display_name, test, e - ); - false - } - } - } -} - -// support code for the clang lexer -unsafe fn clang_str_to_vec(s: CXString) -> Vec { - let vec = ffi::CStr::from_ptr(clang_getCString(s)) - .to_bytes() - .to_owned(); - clang_disposeString(s); - vec -} - -#[allow(non_upper_case_globals)] -unsafe fn token_clang_to_cexpr(tu: CXTranslationUnit, orig: &CXToken) -> Token { - Token { - kind: match clang_getTokenKind(*orig) { - CXToken_Comment => cexpr::token::Kind::Comment, - CXToken_Identifier => cexpr::token::Kind::Identifier, - CXToken_Keyword => cexpr::token::Kind::Keyword, - CXToken_Literal => cexpr::token::Kind::Literal, - CXToken_Punctuation => cexpr::token::Kind::Punctuation, - _ => panic!("invalid token kind: {:?}", *orig), - }, - raw: clang_str_to_vec(clang_getTokenSpelling(tu, *orig)).into_boxed_slice(), - } -} - -extern "C" fn visit_children_thunk( - cur: CXCursor, - parent: CXCursor, - closure: CXClientData, -) -> CXChildVisitResult -where - F: FnMut(CXCursor, CXCursor) -> CXChildVisitResult, -{ - unsafe { (&mut *(closure as *mut F))(cur, parent) } -} - -unsafe fn visit_children(cursor: CXCursor, mut f: F) -where - F: FnMut(CXCursor, CXCursor) -> CXChildVisitResult, -{ - clang_visitChildren( - cursor, - visit_children_thunk:: as _, - &mut f as *mut F as CXClientData, - ); -} - -unsafe fn location_in_scope(r: CXSourceRange) -> bool { - let start = clang_getRangeStart(r); - let mut file = ptr::null_mut(); - clang_getSpellingLocation( - start, - &mut file, - ptr::null_mut(), - ptr::null_mut(), - ptr::null_mut(), - ); - clang_Location_isFromMainFile(start) != 0 - && clang_Location_isInSystemHeader(start) == 0 - && file != ptr::null_mut() -} - -/// tokenize_range_adjust can be used to work around LLVM bug 9069 -/// https://bugs.llvm.org//show_bug.cgi?id=9069 -fn file_visit_macros, Vec)>( - file: &str, - tokenize_range_adjust: bool, - mut visitor: F, -) { - unsafe { - let tu = { - let index = clang_createIndex(true as _, false as _); - let cfile = ffi::CString::new(file).unwrap(); - let mut tu = mem::MaybeUninit::uninit(); - assert!( - clang_parseTranslationUnit2( - index, - cfile.as_ptr(), - [b"-std=c11\0".as_ptr() as *const ::std::os::raw::c_char].as_ptr(), - 1, - ptr::null_mut(), - 0, - CXTranslationUnit_DetailedPreprocessingRecord, - &mut *tu.as_mut_ptr() - ) == CXError_Success, - "Failure reading test case {}", - file - ); - tu.assume_init() - }; - visit_children(clang_getTranslationUnitCursor(tu), |cur, _parent| { - if cur.kind == CXCursor_MacroDefinition { - let mut range = clang_getCursorExtent(cur); - if !location_in_scope(range) { - return CXChildVisit_Continue; - } - range.end_int_data -= if tokenize_range_adjust { 1 } else { 0 }; - let mut token_ptr = ptr::null_mut(); - let mut num = 0; - clang_tokenize(tu, range, &mut token_ptr, &mut num); - if token_ptr != ptr::null_mut() { - let tokens = slice::from_raw_parts(token_ptr, num as usize); - let tokens: Vec<_> = tokens - .iter() - .filter_map(|t| { - if clang_getTokenKind(*t) != CXToken_Comment { - Some(token_clang_to_cexpr(tu, t)) - } else { - None - } - }) - .collect(); - clang_disposeTokens(tu, token_ptr, num); - visitor(clang_str_to_vec(clang_getCursorSpelling(cur)), tokens) - } - } - CXChildVisit_Continue - }); - clang_disposeTranslationUnit(tu); - }; -} - -fn test_file(file: &str) -> bool { - let mut idents = HashMap::new(); - let mut all_succeeded = true; - file_visit_macros(file, fix_bug_9069(), |ident, tokens| { - all_succeeded &= test_definition(ident, &tokens, &mut idents) - }); - all_succeeded -} - -fn fix_bug_9069() -> bool { - fn check_bug_9069() -> bool { - let mut token_sets = vec![]; - file_visit_macros( - "tests/input/test_llvm_bug_9069.h", - false, - |ident, tokens| { - assert_eq!(&ident, b"A"); - token_sets.push(tokens); - }, - ); - assert_eq!(token_sets.len(), 2); - token_sets[0] != token_sets[1] - } - - use std::sync::atomic::{AtomicBool, Ordering}; - use std::sync::Once; - - static CHECK_FIX: Once = Once::new(); - static FIX: AtomicBool = AtomicBool::new(false); - - CHECK_FIX.call_once(|| FIX.store(check_bug_9069(), Ordering::SeqCst)); - - FIX.load(Ordering::SeqCst) -} - -macro_rules! test_file { - ($f:ident) => { - #[test] - fn $f() { - assert!( - test_file(concat!("tests/input/", stringify!($f), ".h")), - "test_file" - ) - } - }; -} - -test_file!(floats); -test_file!(chars); -test_file!(strings); -test_file!(int_signed); -test_file!(int_unsigned); -test_file!(fail); diff --git a/vendor/cexpr/tests/input/chars.h b/vendor/cexpr/tests/input/chars.h deleted file mode 100644 index 45351d3259bd37..00000000000000 --- a/vendor/cexpr/tests/input/chars.h +++ /dev/null @@ -1,3 +0,0 @@ -#define CharChar_65 'A' -#define CharChar_127849 '\U0001f369' // 🍩 -#define CharRaw_255 U'\xff' diff --git a/vendor/cexpr/tests/input/fail.h b/vendor/cexpr/tests/input/fail.h deleted file mode 100644 index fd416bc7cb0f4d..00000000000000 --- a/vendor/cexpr/tests/input/fail.h +++ /dev/null @@ -1,9 +0,0 @@ -#define FAIL_function_like(x) 3 -#define FAIL_empty -#define FAIL_invalid_for_radix 0b2 -#define FAIL_shift_by_float 3<<1f -#define FAIL_unknown_identifier UNKNOWN -#define Int_0 0 -#define Str_str "str" -#define FAIL_concat_integer "test" Str_str Int_0 -#define FAIL_too_large_int 18446744073709551616 diff --git a/vendor/cexpr/tests/input/floats.h b/vendor/cexpr/tests/input/floats.h deleted file mode 100644 index 61942cf41fe3e5..00000000000000 --- a/vendor/cexpr/tests/input/floats.h +++ /dev/null @@ -1,8 +0,0 @@ -#define Float_0 0. -#define Float_1 1f -#define Float_p1 .1 -#define Float_2 2.0 -#define Float_1000 1e3 -#define Float_2000 2e+3 -#define Float_p001 1e-3 -#define Float_80 10.0*(1<<3) diff --git a/vendor/cexpr/tests/input/int_signed.h b/vendor/cexpr/tests/input/int_signed.h deleted file mode 100644 index 65854a63e30787..00000000000000 --- a/vendor/cexpr/tests/input/int_signed.h +++ /dev/null @@ -1,3 +0,0 @@ -#define Int_n3 -(-(-3)) -#define Int_n5 -3-2 -#define Int_n9223372036854775808 -9223372036854775808 diff --git a/vendor/cexpr/tests/input/int_unsigned.h b/vendor/cexpr/tests/input/int_unsigned.h deleted file mode 100644 index 6663dda3d6e5ac..00000000000000 --- a/vendor/cexpr/tests/input/int_unsigned.h +++ /dev/null @@ -1,29 +0,0 @@ -#define Int_456 456 -#define Int_0 0 -#define Int_1 0b1 -#define Int_2 0x2 -#define Int_3 3L -#define Int_4 0X4 -#define Int_5 0B101 -#define Int_63 077 -#define Int_123 123 -#define Int_124 124u -#define Int_125 125uL -#define Int_126 126LuL -#define Int_16 (((1)<<4ULL))/*comment*/ -#define Int_13 1|8^6&2<<1 - -#define Int_47 32|15 -#define Int_38 (32|15)^9 -#define Int_6 ((32|15)^9)&7 -#define Int_12 (((32|15)^9)&7)<<1 -#define Int_17 ((((32|15)^9)&7)<<1)+5 -#define Int_15 (((((32|15)^9)&7)<<1)+5)-2 -#define Int_60 ((((((32|15)^9)&7)<<1)+5)-2)*4 -#define Int_30 (((((((32|15)^9)&7)<<1)+5)-2)*4)/2 -#define Int_39 32|15^9&7<<1+5-2*4/2 - -#define Int_n1 18446744073709551615 /*2^64-1*/ -#define Int_n9223372036854775808 9223372036854775808 - -#define Fn_Int_9(_3) _3*3 diff --git a/vendor/cexpr/tests/input/strings.h b/vendor/cexpr/tests/input/strings.h deleted file mode 100644 index d01d409cbfc485..00000000000000 --- a/vendor/cexpr/tests/input/strings.h +++ /dev/null @@ -1,17 +0,0 @@ -#define Str_ "" -#define Str_str "str" -#define Str_unicode u"unicode" -#define Str_long L"long" -#define Str_concat u"con" L"cat" -#define Str_concat_parens ("concat" U"_parens") -#define Str_concat_identifier (Str_concat L"_identifier") -#define Str_hex_escape_all "\x68\x65\x78\x5f\x65\x73\x63\x61\x70\x65\x5f\x61\x6c\x6c" -#define Str_hex_escape_hex "h\x65x_\x65s\x63\x61p\x65_h\x65x" -#define Str_quote_U000022_escape "quote_\"_escape" -#define Str_Fly_away_in_my_space_U01F680_You_no_need_put_U01F4B5_in_my_pocket \ - u8"Fly_away_in_my_space_🚀_You_no_need_put_💵_in_my_pocket" -#define Fn_Str_no_args() "no_args" -#define Fn_Str_no_args_concat() "no_args_" Str_concat -#define Fn_Str_prepend_arg(arg) "prepend_" arg -#define Fn_Str_two_args(two, args) two "_" args -#define Fn_Str_three_args(three, _, args) three _ args diff --git a/vendor/cexpr/tests/input/test_llvm_bug_9069.h b/vendor/cexpr/tests/input/test_llvm_bug_9069.h deleted file mode 100644 index a92374efee136d..00000000000000 --- a/vendor/cexpr/tests/input/test_llvm_bug_9069.h +++ /dev/null @@ -1,4 +0,0 @@ -// The following two definitions should yield the same list of tokens. -// If https://bugs.llvm.org//show_bug.cgi?id=9069 is not fixed, they don't. -#define A 1 -#define A 1 diff --git a/vendor/cfg-if/.cargo-checksum.json b/vendor/cfg-if/.cargo-checksum.json deleted file mode 100644 index 78d3f3fbcf3746..00000000000000 --- a/vendor/cfg-if/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"65840ba044457862e25b2d7d53f4a2de232adf933bd31aba8f2bd6a1f51a6881",".github/dependabot.yml":"828e3ecefdc555a5210a5bdffd5621ef3625ceb35c7fc91a0b4faef6f9921b75",".github/workflows/main.yaml":"6612a51b1f1479eabac7d3bd14aa609811d4afd2df2d454b9a1f6d6f3748f5b2",".github/workflows/publish.yaml":"1417805078704eecbaeea8611c5a44df575bfe1908d6969d909224a6e5e26ca8","CHANGELOG.md":"08ba7340057565b338afaa29b36bd2a1c46f5495b043bc49d12230a6a82d5f76","Cargo.lock":"26922b9384045e5a3d496f21ec7c355da585d0caa1d13b887b634527d36fc450","Cargo.toml":"281d508beb1fe3927cf03d3f2f8c9a5117b1e4fe97ae21b9026cf318e8c35273","Cargo.toml.orig":"5a17ee17da78f5179373b8324a1180e71efe2bcf3e3c9ca18c1bdb1e3faa9792","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"cd565d563a2c70d197bb6fee1678e122214e22af7bdb046b80f52c1d953cd72f","src/lib.rs":"c09723e0890d15810374009e96b20bf0eb2f65f383006516f34db36240835c85","tests/xcrate.rs":"bcec148e69db81b1a618bdd6f96a25d9a0442e6ecc692fe28f1206d9bffc006a"},"package":"9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"} \ No newline at end of file diff --git a/vendor/cfg-if/.cargo_vcs_info.json b/vendor/cfg-if/.cargo_vcs_info.json deleted file mode 100644 index d4bec315ac6e9d..00000000000000 --- a/vendor/cfg-if/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "3510ca6abea34cbbc702509a4e50ea9709925eda" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/cfg-if/.github/dependabot.yml b/vendor/cfg-if/.github/dependabot.yml deleted file mode 100644 index 36e4ff06363a32..00000000000000 --- a/vendor/cfg-if/.github/dependabot.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: 2 -updates: - - package-ecosystem: cargo - directory: "/" - schedule: - interval: daily - time: "08:00" - open-pull-requests-limit: 10 - - - package-ecosystem: github-actions - directory: "/" - schedule: - interval: weekly - open-pull-requests-limit: 3 diff --git a/vendor/cfg-if/.github/workflows/main.yaml b/vendor/cfg-if/.github/workflows/main.yaml deleted file mode 100644 index 7288a62d253538..00000000000000 --- a/vendor/cfg-if/.github/workflows/main.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: CI -on: [push, pull_request] - -permissions: - contents: read - -env: - RUSTDOCFLAGS: -Dwarnings - RUSTFLAGS: -Dwarnings - -jobs: - test: - name: Test - runs-on: ubuntu-latest - strategy: - matrix: - rust: - - "1.32" # msrv - - stable - - beta - - nightly - steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - name: Install Rust ${{ matrix.rust }} - run: | - rustup self update - rustup update ${{ matrix.rust }} - rustup default ${{ matrix.rust }} - rustc -vV - - name: Run tests - run: | - set -eux - # Remove `-Dwarnings` at the MSRV since lints may be different - [ "${{ matrix.rust }}" = "1.32" ] && export RUSTFLAGS="--cfg msrv_test" - cargo test - - rustfmt: - name: Rustfmt - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - name: Install Rust Stable - run: | - rustup update stable - rustup default stable - rustup component add rustfmt - - name: Run rustfmt - run: cargo fmt -- --check diff --git a/vendor/cfg-if/.github/workflows/publish.yaml b/vendor/cfg-if/.github/workflows/publish.yaml deleted file mode 100644 index 248e3ccdd9ad6e..00000000000000 --- a/vendor/cfg-if/.github/workflows/publish.yaml +++ /dev/null @@ -1,25 +0,0 @@ -name: Release-plz - -permissions: - pull-requests: write - contents: write - -on: - push: { branches: [main] } - -jobs: - release-plz: - name: Release-plz - runs-on: ubuntu-24.04 - steps: - - name: Checkout repository - uses: actions/checkout@v5 - with: - fetch-depth: 0 - - name: Install Rust (rustup) - run: rustup update nightly --no-self-update && rustup default nightly - - name: Run release-plz - uses: MarcoIeni/release-plz-action@v0.5 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} diff --git a/vendor/cfg-if/CHANGELOG.md b/vendor/cfg-if/CHANGELOG.md deleted file mode 100644 index 55b54ece74c2e9..00000000000000 --- a/vendor/cfg-if/CHANGELOG.md +++ /dev/null @@ -1,29 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [1.0.4](https://github.com/rust-lang/cfg-if/compare/v1.0.3...v1.0.4) - 2025-10-15 - -- Support `cfg(true)` and `cfg(false)` ([#99](https://github.com/rust-lang/cfg-if/pull/99)) -- Set and test a MSRV of 1.32 -- Have a single top-level rule - -## [1.0.3](https://github.com/rust-lang/cfg-if/compare/v1.0.2...v1.0.3) - 2025-08-19 - -- Revert "Remove `@__identity` rule." - -## [1.0.2](https://github.com/rust-lang/cfg-if/compare/v1.0.1...v1.0.2) - 2025-08-19 - -- Remove `@__identity` rule. - -## [1.0.1](https://github.com/rust-lang/cfg-if/compare/v1.0.0...v1.0.1) - 2025-06-09 - -- Remove `compiler-builtins` from `rustc-dep-of-std` dependencies -- Remove redundant configuration from Cargo.toml -- More readable formatting and identifier names. ([#39](https://github.com/rust-lang/cfg-if/pull/39)) -- Add expanded example to readme ([#38](https://github.com/rust-lang/cfg-if/pull/38)) diff --git a/vendor/cfg-if/Cargo.lock b/vendor/cfg-if/Cargo.lock deleted file mode 100644 index 57166796745a7e..00000000000000 --- a/vendor/cfg-if/Cargo.lock +++ /dev/null @@ -1,16 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -[[package]] -name = "cfg-if" -version = "1.0.4" -dependencies = [ - "rustc-std-workspace-core 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rustc-std-workspace-core" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[metadata] -"checksum rustc-std-workspace-core 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "aa9c45b374136f52f2d6311062c7146bff20fec063c3f5d46a410bd937746955" diff --git a/vendor/cfg-if/Cargo.toml b/vendor/cfg-if/Cargo.toml deleted file mode 100644 index 450f7a2df1e06c..00000000000000 --- a/vendor/cfg-if/Cargo.toml +++ /dev/null @@ -1,47 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.32" -name = "cfg-if" -version = "1.0.4" -authors = ["Alex Crichton "] -build = false -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = """ -A macro to ergonomically define an item depending on a large number of #[cfg] -parameters. Structured like an if-else chain, the first matching branch is the -item that gets emitted. -""" -readme = "README.md" -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/cfg-if" - -[features] -rustc-dep-of-std = ["core"] - -[lib] -name = "cfg_if" -path = "src/lib.rs" - -[[test]] -name = "xcrate" -path = "tests/xcrate.rs" - -[dependencies.core] -version = "1.0.0" -optional = true -package = "rustc-std-workspace-core" diff --git a/vendor/cfg-if/LICENSE-APACHE b/vendor/cfg-if/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e802f..00000000000000 --- a/vendor/cfg-if/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/cfg-if/LICENSE-MIT b/vendor/cfg-if/LICENSE-MIT deleted file mode 100644 index 39e0ed6602151f..00000000000000 --- a/vendor/cfg-if/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014 Alex Crichton - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/cfg-if/README.md b/vendor/cfg-if/README.md deleted file mode 100644 index d174b6eda69c5d..00000000000000 --- a/vendor/cfg-if/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# cfg-if - -[Documentation](https://docs.rs/cfg-if) - -A macro to ergonomically define an item depending on a large number of #[cfg] -parameters. Structured like an if-else chain, the first matching branch is the -item that gets emitted. - -```toml -[dependencies] -cfg-if = "1.0" -``` - -## Example - -```rust -cfg_if::cfg_if! { - if #[cfg(unix)] { - fn foo() { /* unix specific functionality */ } - } else if #[cfg(target_pointer_width = "32")] { - fn foo() { /* non-unix, 32-bit functionality */ } - } else { - fn foo() { /* fallback implementation */ } - } -} - -fn main() { - foo(); -} -``` -The `cfg_if!` block above is expanded to: -```rust -#[cfg(unix)] -fn foo() { /* unix specific functionality */ } -#[cfg(all(target_pointer_width = "32", not(unix)))] -fn foo() { /* non-unix, 32-bit functionality */ } -#[cfg(not(any(unix, target_pointer_width = "32")))] -fn foo() { /* fallback implementation */ } -``` - -# License - -This project is licensed under either of - - * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or - https://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or - https://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in `cfg-if` by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. diff --git a/vendor/cfg-if/src/lib.rs b/vendor/cfg-if/src/lib.rs deleted file mode 100644 index 2c7414eb81c1ea..00000000000000 --- a/vendor/cfg-if/src/lib.rs +++ /dev/null @@ -1,212 +0,0 @@ -//! A macro for defining `#[cfg]` if-else statements. -//! -//! The macro provided by this crate, `cfg_if`, is similar to the `if/elif` C -//! preprocessor macro by allowing definition of a cascade of `#[cfg]` cases, -//! emitting the implementation which matches first. -//! -//! This allows you to conveniently provide a long list `#[cfg]`'d blocks of code -//! without having to rewrite each clause multiple times. -//! -//! # Example -//! -//! ``` -//! cfg_if::cfg_if! { -//! if #[cfg(unix)] { -//! fn foo() { /* unix specific functionality */ } -//! } else if #[cfg(target_pointer_width = "32")] { -//! fn foo() { /* non-unix, 32-bit functionality */ } -//! } else { -//! fn foo() { /* fallback implementation */ } -//! } -//! } -//! -//! # fn main() {} -//! ``` - -#![no_std] -#![doc(html_root_url = "https://docs.rs/cfg-if")] -#![deny(missing_docs)] -#![cfg_attr(test, allow(unexpected_cfgs))] // we test with features that do not exist - -/// The main macro provided by this crate. See crate documentation for more -/// information. -#[macro_export] -macro_rules! cfg_if { - ( - if #[cfg( $($i_meta:tt)+ )] { $( $i_tokens:tt )* } - $( - else if #[cfg( $($ei_meta:tt)+ )] { $( $ei_tokens:tt )* } - )* - $( - else { $( $e_tokens:tt )* } - )? - ) => { - $crate::cfg_if! { - @__items () ; - (( $($i_meta)+ ) ( $( $i_tokens )* )), - $( - (( $($ei_meta)+ ) ( $( $ei_tokens )* )), - )* - $( - (() ( $( $e_tokens )* )), - )? - } - }; - - // Internal and recursive macro to emit all the items - // - // Collects all the previous cfgs in a list at the beginning, so they can be - // negated. After the semicolon are all the remaining items. - (@__items ( $( ($($_:tt)*) , )* ) ; ) => {}; - ( - @__items ( $( ($($no:tt)+) , )* ) ; - (( $( $($yes:tt)+ )? ) ( $( $tokens:tt )* )), - $( $rest:tt , )* - ) => { - // Emit all items within one block, applying an appropriate #[cfg]. The - // #[cfg] will require all `$yes` matchers specified and must also negate - // all previous matchers. - #[cfg(all( - $( $($yes)+ , )? - not(any( $( $($no)+ ),* )) - ))] - // Subtle: You might think we could put `$( $tokens )*` here. But if - // that contains multiple items then the `#[cfg(all(..))]` above would - // only apply to the first one. By wrapping `$( $tokens )*` in this - // macro call, we temporarily group the items into a single thing (the - // macro call) that will be included/excluded by the `#[cfg(all(..))]` - // as appropriate. If the `#[cfg(all(..))]` succeeds, the macro call - // will be included, and then evaluated, producing `$( $tokens )*`. See - // also the "issue #90" test below. - $crate::cfg_if! { @__temp_group $( $tokens )* } - - // Recurse to emit all other items in `$rest`, and when we do so add all - // our `$yes` matchers to the list of `$no` matchers as future emissions - // will have to negate everything we just matched as well. - $crate::cfg_if! { - @__items ( $( ($($no)+) , )* $( ($($yes)+) , )? ) ; - $( $rest , )* - } - }; - - // See the "Subtle" comment above. - (@__temp_group $( $tokens:tt )* ) => { - $( $tokens )* - }; -} - -#[cfg(test)] -mod tests { - cfg_if! { - if #[cfg(test)] { - use core::option::Option as Option2; - fn works1() -> Option2 { Some(1) } - } else { - fn works1() -> Option { None } - } - } - - cfg_if! { - if #[cfg(foo)] { - fn works2() -> bool { false } - } else if #[cfg(test)] { - fn works2() -> bool { true } - } else { - fn works2() -> bool { false } - } - } - - cfg_if! { - if #[cfg(foo)] { - fn works3() -> bool { false } - } else { - fn works3() -> bool { true } - } - } - - cfg_if! { - if #[cfg(test)] { - use core::option::Option as Option3; - fn works4() -> Option3 { Some(1) } - } - } - - cfg_if! { - if #[cfg(foo)] { - fn works5() -> bool { false } - } else if #[cfg(test)] { - fn works5() -> bool { true } - } - } - - // In issue #90 there was a bug that caused only the first item within a - // block to be annotated with the produced `#[cfg(...)]`. In this example, - // it meant that the first `type _B` wasn't being omitted as it should have - // been, which meant we had two `type _B`s, which caused an error. See also - // the "Subtle" comment above. - cfg_if!( - if #[cfg(target_os = "no-such-operating-system-good-sir!")] { - type _A = usize; - type _B = usize; - } else { - type _A = i32; - type _B = i32; - } - ); - - #[cfg(not(msrv_test))] - cfg_if! { - if #[cfg(false)] { - fn works6() -> bool { false } - } else if #[cfg(true)] { - fn works6() -> bool { true } - } else if #[cfg(false)] { - fn works6() -> bool { false } - } - } - - #[test] - fn it_works() { - assert!(works1().is_some()); - assert!(works2()); - assert!(works3()); - assert!(works4().is_some()); - assert!(works5()); - #[cfg(not(msrv_test))] - assert!(works6()); - } - - #[test] - #[allow(clippy::assertions_on_constants)] - fn test_usage_within_a_function() { - cfg_if! { - if #[cfg(debug_assertions)] { - // we want to put more than one thing here to make sure that they - // all get configured properly. - assert!(cfg!(debug_assertions)); - assert_eq!(4, 2 + 2); - } else { - assert!(works1().is_some()); - assert_eq!(10, 5 + 5); - } - } - } - - #[allow(dead_code)] - trait Trait { - fn blah(&self); - } - - #[allow(dead_code)] - struct Struct; - - impl Trait for Struct { - cfg_if! { - if #[cfg(feature = "blah")] { - fn blah(&self) { unimplemented!(); } - } else { - fn blah(&self) { unimplemented!(); } - } - } - } -} diff --git a/vendor/cfg-if/tests/xcrate.rs b/vendor/cfg-if/tests/xcrate.rs deleted file mode 100644 index 454e90f0dc891a..00000000000000 --- a/vendor/cfg-if/tests/xcrate.rs +++ /dev/null @@ -1,16 +0,0 @@ -#![allow(unexpected_cfgs)] // `foo` doesn't exist - -cfg_if::cfg_if! { - if #[cfg(foo)] { - fn works() -> bool { false } - } else if #[cfg(test)] { - fn works() -> bool { true } - } else { - fn works() -> bool { false } - } -} - -#[test] -fn smoke() { - assert!(works()); -} diff --git a/vendor/clang-sys/.cargo-checksum.json b/vendor/clang-sys/.cargo-checksum.json deleted file mode 100644 index 623f070c77d279..00000000000000 --- a/vendor/clang-sys/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"a2221882ba4c52abe2344a74a26f535df2641864f5a49435975f23ae2fcd5b3a",".github/workflows/ci.yml":"b5dc986d9f7ed68b8f3022a7f9e71739d7f297b4b6719c2913c1c77e3b9d93c5",".github/workflows/ssh.yml":"d1b12ff03ea5cd5814d5d2c0563d5291e9e847de13be7bedaf411c7f97f20953","CHANGELOG.md":"9db56336c2fd1dddbacc861f42b697f218a9dccb663aaa1ad042cfe940a0c232","Cargo.toml":"db6730e270afa1f936b6f14264be0b0aaa506b88d91ab4805cf270595f3b568b","Cargo.toml.orig":"c6241039bc28f47561154b404d3fe28fe4b582977c8e6ca9288305171a7968f8","LICENSE.txt":"3ddf9be5c28fe27dad143a5dc76eea25222ad1dd68934a047064e56ed2fa40c5","README.md":"ca106237bdacd8aee43af3bc2ad94771b1c1fe029a7d6f622989c00d5a74f4eb","build.rs":"321ac62c88932a3831be9c96f526a21f65ea22df01639946bd0033d1bcf8900e","build/common.rs":"c827ffc2761c4b96952334e35ff443198adfc86fbe2822c309dfe5ea1bcc8cc0","build/dynamic.rs":"c28adab4ea893d12f47d8b229c38a134a6553c654a1d243f37f7f03ed82e5723","build/macros.rs":"41eef7020d4c28ce70c71036009be4be5844572b26e32b840f671b924174475e","build/static.rs":"51316c6274c15f939fff637499163a7312c97d95cea6959825f1ca52af35a726","clippy.toml":"fcf54943ba571514b244cc098ce08671b4117167733e8107e799d533a12a2195","src/lib.rs":"dc1707cf08d65b2bf8e0b9f836f5c2e74af399ea10476a36238056ad1dcc926b","src/link.rs":"d12eda4e3f76f00168615b4cba67b0b1fff8e6dbb06df80302561baa9472eec3","src/support.rs":"4f5f2e76f9352b6b02a1519857de773b6ab064c7bdfab15bf63d4f712f0c7b61","tests/build.rs":"b9bc3b4af58ab815e9ef56538b58448f19ede42591078ef02c6ff9f946456315","tests/header.h":"1b15a686d1c06561960045a26c25a34d840f26c8246f2f5e630f993b69c7492c","tests/lib.rs":"7ddd85162a682328b4eea499526a14f4a841c10ac673a5871f02050b428231d4"},"package":"0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4"} \ No newline at end of file diff --git a/vendor/clang-sys/.cargo_vcs_info.json b/vendor/clang-sys/.cargo_vcs_info.json deleted file mode 100644 index deb83a612063b5..00000000000000 --- a/vendor/clang-sys/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "cf3874b2480b9ca12f367a54a4835dd2920847de" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/clang-sys/.github/workflows/ci.yml b/vendor/clang-sys/.github/workflows/ci.yml deleted file mode 100644 index 08c2cba46f2a76..00000000000000 --- a/vendor/clang-sys/.github/workflows/ci.yml +++ /dev/null @@ -1,56 +0,0 @@ -name: CI - -on: - push: - branches: - - master - pull_request: - branches: - - master - -jobs: - test: - name: Test - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [macos-latest, ubuntu-latest, windows-latest] - clang: [["14.0", "clang_14_0"]] - rust: ["1.60.0"] - steps: - - name: Checkout Repository - uses: actions/checkout@v4 - # LLVM and Clang - - name: Install LLVM and Clang - uses: KyleMayes/install-llvm-action@v2.0.3 - with: - version: ${{ matrix.clang[0] }} - directory: ${{ runner.temp }}/llvm-${{ matrix.clang[0] }} - # Rust - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - with: - toolchain: ${{ matrix.rust }} - # Test - - name: Cargo Test (Dynamic) - run: cargo test --verbose --features ${{ matrix.clang[1] }} -- --nocapture - - name: Cargo Test (Runtime) - run: cargo test --verbose --features "${{ matrix.clang[1] }} runtime" -- --nocapture - test-bindgen: - name: Test (bindgen) - runs-on: ubuntu-latest - steps: - - name: Checkout Repository - uses: actions/checkout@v4 - # LLVM and Clang - - name: Install LLVM and Clang - uses: KyleMayes/install-llvm-action@v2.0.3 - with: - version: 14 - directory: ${{ runner.temp }}/llvm - # Rust - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - # Test - - name: Cargo Run (bindgen-test) - run: cargo run --manifest-path bindgen-test/Cargo.toml diff --git a/vendor/clang-sys/.github/workflows/ssh.yml b/vendor/clang-sys/.github/workflows/ssh.yml deleted file mode 100644 index 188fa3d349fed4..00000000000000 --- a/vendor/clang-sys/.github/workflows/ssh.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: SSH - -on: - workflow_dispatch: - inputs: - os: - description: "Operating System" - required: true - default: "ubuntu-latest" - -jobs: - ssh: - name: SSH - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [macos-latest, ubuntu-latest, windows-latest] - clang: [["13.0", "clang_13_0"]] - rust: ["1.60.0"] - steps: - - name: Checkout Repository - uses: actions/checkout@v2 - if: github.event.inputs.os == matrix.os - # LLVM and Clang - - name: Install LLVM and Clang - uses: KyleMayes/install-llvm-action@v2.0.3 - if: github.event.inputs.os == matrix.os - with: - version: ${{ matrix.clang[0] }} - directory: ${{ runner.temp }}/llvm-${{ matrix.clang[0] }} - # Rust - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - if: github.event.inputs.os == matrix.os - with: - toolchain: ${{ matrix.rust }} - # SSH - - name: Enable SSH - uses: mxschmitt/action-tmate@v3 - if: github.event.inputs.os == matrix.os diff --git a/vendor/clang-sys/CHANGELOG.md b/vendor/clang-sys/CHANGELOG.md deleted file mode 100644 index dc6c75e02dfd30..00000000000000 --- a/vendor/clang-sys/CHANGELOG.md +++ /dev/null @@ -1,552 +0,0 @@ -## [1.8.1] - 2024-05-28 - -### Added -- Added support for `clang` 18.0.x - -### Fixed -- Improve DLL search on Windows to take target architecture into account (e.g., ARM64 vs x86-64) -- Improved detection of `libclang` installed with Visual Studio on Windows - -## [1.8.0] - 2024-05-26 - -### Changed -- Bumped minimum supported Rust version (MSRV) to 1.60.0 -- Added error logging when `CLANG_PATH` set but it isn't a full path to an executable -- Removed reference to `libclang` 3.5 in error message for attempting to call an unsupported function - -### Added -- Added `libcpp` Cargo feature which enables linking to `libc++` instead of `libstdc++` when linking to `libclang` statically on Linux or Haiku - -### Fixed -- Fixed handling of paths that contain characters that have special meaning in -glob patterns (e.g., `[` or `]`) -- Fixed `Clang::find` to support both the `-target` and `--target` arguments -when using target-prefixed `clang` binaries - -## [1.7.0] - 2023-12-31 - -### Added -- Added support for `clang` 17.0.x - -## [1.6.1] - 2023-03-29 - -### Fixed -- Improved error message when calling a `libclang` function that is not supported by the loaded `libclang` instance (https://github.com/rust-lang/rust-bindgen/issues/2446) - -## [1.6.0] - 2023-02-18 - -### Changed -- MinGW directories are not searched for `libclang` instances on Windows when -compiling for an MSVC target -- Bumped minimum supported Rust version (MSRV) to 1.51.0 -- Changed Windows search directory preferences (`libclang` instances from -Visual Studio installs are now the lowest priority rather than the second -highest) - -## ~~[1.5.1] - 2023-02-05~~ (YANKED) - -### Changed -- MinGW directories are not searched for `libclang` instances on Windows when -compiling for an MSVC target - -## ~~[1.5.0] - 2023-02-05~~ (YANKED) - -### Changed -- Bumped minimum supported Rust version (MSRV) to 1.51.0 -- Changed Windows search directory preferences (`libclang` instances from -Visual Studio installs are now the lowest priority rather than the second -highest) - -### Added -- Added additional support for `clang` 16.0.x - -## [1.4.0] - 2022-09-22 - -### Changed -- The value of an `EntityKind` enum variant -(`EntityKind::CXCursor_TranslationUnit`) has been updated for Clang 15.0 and -later to match the -[breaking change made in `libclang`](https://github.com/llvm/llvm-project/commit/bb83f8e70bd1d56152f02307adacd718cd67e312#diff-674613a0e47f4e66cc19061e28e3296d39be2d124dceefb68237b30b8e241e7c) - -### Added -- Added support for `clang` 16.0.x -- Added support for `clang` 15.0.x -- Added support for `clang` 14.0.x - -## [1.3.3] - 2022-05-28 - -### Fixed -- Fixed `Clang::find` to check that `CLANG_PATH` is an executable file before -selecting it - -## [1.3.2] - 2022-05-18 - -### Added -- Added support for illumos and derivatives - -## [1.3.1] - 2022-02-03 - -### Added -- Added missing `clang_getToken` function - -## [1.3.0] - 2021-10-31 - -### Added -- Added support for `clang` 13.0.x -- Added support for `clang` 12.0.x -- Added support for the Haiku operating system - -## [1.2.2] - 2021-09-02 - -### Fixed -- Fixed handling of paths that contain characters that have special meaning in -glob patterns (e.g., `[` or `]`) - -## [1.2.1] - 2021-08-24 - -### Changed -- Updated build script to check the install location used by the -[Scoop](https://scoop.sh/) command-line installer on Windows - -### Fixed -- Updated build script to support environments where the `PATH` environment -variable is not set - -## [1.2.0] - 2021-04-08 - -### Changed -- Changed `Clang::find` to prefer target-prefixed binaries when a `-target` -argument is provided (e.g., if the arguments `-target` and -`x86_64-unknown-linux-gnu` are provided, a target-prefixed Clang executable -such as `x86_64-unknown-linux-gnu-clang` will be preferred over a non-target -prefixed Clang executable) - -### Fixed -- Fixed build script to split paths in environment variables (e.g., -`LD_LIBRARY_PATH`) using the appropriate separator for the platform (previously -`:` was used as the separator but some platforms such as Windows use `;`) - -## [1.1.1] - 2021-02-19 - -### Changed -- Bumped `libloading` version to `0.7` - -## [1.1.0] - 2021-02-09 - -### Changed -- Added Visual Studio LLVM component directory to search paths on Windows -([#121](https://github.com/KyleMayes/clang-sys/issues/121)) - -### Added -- Added support for `clang` 11.0.x - -## [1.0.3] - 2020-11-19 - -### Fixed -- Fixed `Clang::find` panicking when `llvm-config` or `xcode-build` don't output anything to `stdout` - -## [1.0.2] - 2020-11-17 - -### Fixed -- Fixed `Clang::find` to properly search directories returned by the -`llvm-config --bindir` and `xcodebuild -find clang` commands -- Improved version selection algorithm in the case where there are multiple -instances of `libclang` with the highest version found; previously the lowest -priority instance would be selected instead of the highest priority instance -(e.g., the versions found by searching the fallback directories were preferred -over the versions found by searching the `llvm-config --prefix` directory) - -## [1.0.1] - 2020-10-01 - -### Changed -- Improved panic error message when calling an unloaded function - -## [1.0.0] - 2020-07-14 - -### Changed -- Bumped `libloading` version to `0.6.0` -- Updated build script to not print warnings about failures to execute -`llvm-config` and `xcode-select` unless an instance of `libclang` is not found - -### Added -- Added support for `clang` 10.0.x - -### Removed -- Removed `gte_clang_*` Cargo features (these were an implementation detail) - -## [0.29.3] - 2020-03-31 - -### Added -- Added ability to determine version of runtime-linked instance of `libclang` - -## [0.29.2] - 2020-03-09 - -### Added -- Revert unnecessary increase of minimum version of `libc` and `libloading` - -## [0.29.2] - 2020-03-09 - -### Added -- Revert unnecessary increase of minimum version of `libc` and `libloading` - -## [0.29.1] - 2020-03-06 - -### Added -- Added support for finding instances of `libclang` matching `libclang-*.so.*` - -## [0.29.0] - 2020-02-17 - -### Changed -- Wrapped function pointer fields in `Option` in the `CXCursorAndRangeVisitor` -and `IndexerCallbacks` structs (to permit nullability and to avoid undefined -behavior caused by `Default` implementations for these structs which returns a -zeroed value) - -### Added -- Added support for `clang` 9.0.x -- Added missing `CXCallingConv_AArch64VectorCall` variant to `CXCallingConv` enum -- Added missing `clang_CompileCommand_getNumMappedSources` function - -## [0.28.1] - 2019-07-28 - -### Changed -- Bumped `glob` version to `0.3.0` -- Improved error message when an invocation of an executable is not successful -- Allowed `LIBCLANG_PATH` to refer to a specific `libclang` instance (e.g., - `/usr/local/lib/libclang.so.10`) - -### Fixed -- Fixed - [`libclang-cpp`](https://github.com/llvm-mirror/clang/commit/90d6722bdcbc2af52306f7e948c556ad6185ac48) - being linked instead of `libclang` - -## [0.28.0] - 2019-02-17 - -### Changed -- Changed `llvm-config` to be first search candidate on macOS - -### Added -- Added support for `clang` 8.0.x - -### Removed -- Removed `assert-minimum` feature -- Removed version detection for libraries without versions embedded in the filename - -## [0.27.0] - 2019-01-10 - -### Changed -- Added version detection for libraries without versions embedded in the filename - -### Added -- Added `assert-minimum` feature (see `README.md` for details) - -## [0.26.4] - 2018-12-29 - -### Changed -- Added shared library path to `SharedLibrary` struct - -## [0.26.3] - 2018-11-14 - -### Changed -- Disable default features of `libc` dependency - -## [0.26.2] - 2018-11-03 - -### Fixed -- Fixed dynamic linking on macOS - -## [0.26.1] - 2018-10-10 - -### Fixed -- Fixed support for finding libraries in `bin` directories on Windows - -## [0.26.0] - 2018-10-07 - -### Changed -- Added support for finding libraries with version suffixes on Linux when using runtime linking (e.g., `libclang.so.1`) - -## [0.25.0] - 2018-10-06 - -### Changed -- Added support for versioned libraries on BSDs - -## [0.24.0] - 2018-09-15 - -### Changed -- Reworked finding of libraries (see `README.md` for details) - -### Added -- Added support for `clang` 7.0.x - -## [0.23.0] - 2018-06-16 - -### Changed -- Changed `Clang::find` to skip dynamic libraries for an incorrect architecture on Windows - -## [0.22.0] - 2018-03-11 - -### Added -- Added support for `clang` 6.0.x -- Bumped `libc` version to `0.2.39` -- Bumped `libloading` version to `0.5.0` - -## [0.21.2] - 2018-02-17 - -### Changed -- Added original errors to error messages -- Added support for searching for libraries in `LD_LIBRARY_PATH` directories - -## [0.21.1] - 2017-11-24 - -### Changed -- Improved finding of versioned libraries (e.g., `libclang-3.9.so`) - -### Fixed -* Fixed compilation failures on the beta and nightly channels caused by a [compiler bug](https://github.com/KyleMayes/clang-sys/pull/69) - -## [0.21.0] - 2017-10-11 - -### Changed -* Replaced `bitflags` usage with constants which avoids crashes on 32-bit Linux platforms - -## [0.20.1] - 2017-09-16 - -### Fixed -- Fixed static linking - -## [0.20.0] - 2017-09-14 - -### Added -- Added support for `clang` 5.0.x -- Added `clang` as a link target of this package -- Added dummy implementations of `is_loaded` for builds with the `static` Cargo feature enabled - -## [0.19.0] - 2017-07-02 - -### Changed -- Bumped `bitflags` version to `0.9.1` -- Added `args` parameter to `Clang::new` function which passes arguments to the Clang executable - -## [0.18.0] - 2017-05-16 - -### Changed -- Improved finding of versioned libraries (e.g., `libclang.so.3.9`) - -## [0.17.0] - 2017-05-08 - -### Changed -- Changed storage type of include search paths from `Vec` to `Option>` - -## [0.16.0] - 2017-05-02 - -### Changed -- Bumped `libloading` version to `0.4.0` - -## [0.15.2] - 2017-04-28 - -### Fixed -- Fixed finding of `libclang.so.1` on Linux - -## [0.15.1] - 2017-03-29 - -### Fixed -- Fixed static linking when libraries are in [different directories](https://github.com/KyleMayes/clang-sys/issues/50) - -## [0.15.0] - 2017-03-13 - -### Added -- Added support for `clang` 4.0.x - -### Changed -- Changed functions in the `Functions` struct to be `unsafe` (`runtime` feature only) -- Changed `Clang::find` method to ignore directories and non-executable files -- Changed `Clang::find` to skip dynamic libraries for an incorrect architecture on FreeBSD and Linux -- Bumped `bitflags` version to `0.7.0` - -## [0.14.0] - 2017-01-30 - -### Changed -- Changed all enum types from tuple structs to raw integers to avoid - [segmentation faults](https://github.com/rust-lang/rust/issues/39394) on some platforms - -## [0.13.0] - 2017-01-29 - -### Changed -- Changed all opaque pointers types from tuple structs to raw pointers to avoid - [segmentation faults](https://github.com/rust-lang/rust/issues/39394) on some platforms - -## [0.12.0] - 2016-12-13 - -### Changed -- Altered the runtime linking API to allow for testing the presence of functions - -## [0.11.1] - 2016-12-07 - -### Added -- Added support for linking to Clang on Windows from unofficial LLVM sources such as MSYS and MinGW - -## [0.11.0] - 2016-10-07 - -### Changed -- Changed all enums from Rust enums to typed constants to avoid - [undefined behavior](https://github.com/KyleMayes/clang-sys/issues/42) - -## [0.10.1] - 2016-08-21 - -### Changed -- Changed static linking on FreeBSD and macOS to link against `libc++` instead of `libstd++` - -## [0.10.0] - 2016-08-01 - -### Changed -- Added `runtime` Cargo feature that links to `libclang` shared library at runtime -- Added `from_raw` method to `CXTypeLayoutError` enum -- Added implementations of `Deref` for opaque FFI structs -- Changed `Default` implementations for structs to zero out the struct - -## [0.9.0] - 2016-07-21 - -### Added -- Added documentation bindings - -## [0.8.1] - 2016-07-20 - -### Changed -- Added `CLANG_PATH` environment variable for providing a path to `clang` executable -- Added usage of `llvm-config` to search for `clang` -- Added usage of `xcodebuild` to search for `clang` on macOS - -## [0.8.0] - 2016-07-18 - -### Added -- Added support for `clang` 3.9.x - -### Changed -- Bumped `libc` version to `0.2.14` - -### Fixed -- Fixed `LIBCLANG_PATH` usage on Windows to search both the `bin` and `lib` directories -- Fixed search path parsing on macOS -- Fixed search path parsing on Windows -- Fixed default search path ordering on macOS - -## [0.7.2] - 2016-06-17 - -### Fixed -- Fixed finding of `clang` executables when system has executables matching `clang-*` - (e.g., `clang-format`) - -## [0.7.1] - 2016-06-10 - -### Changed -- Bumped `libc` version to `0.2.12` - -### Fixed -- Fixed finding of `clang` executables suffixed by their version (e.g., `clang-3.5`) - -## [0.7.0] - 2016-05-31 - -### Changed -- Changed `Clang` struct `version` field type to `Option` - -## [0.6.0] - 2016-05-26 - -### Added -- Added `support` module - -### Fixed -- Fixed `libclang` linking on FreeBSD -- Fixed `libclang` linking on Windows with the MSVC toolchain -- Improved `libclang` static linking - -## [0.5.4] - 20160-5-19 - -### Changed -- Added implementations of `Default` for FFI structs - -## [0.5.3] - 2016-05-17 - -### Changed -- Bumped `bitflags` version to `0.7.0` - -## [0.5.2] - 2016-05-12 - -### Fixed -- Fixed `libclang` static linking - -## [0.5.1] - 2016-05-10 - -### Fixed -- Fixed `libclang` linking on macOS -- Fixed `libclang` linking on Windows - -## [0.5.0] - 2016-05-10 - -### Removed -- Removed `rustc_version` dependency -- Removed support for `LIBCLANG_STATIC` environment variable - -### Changed -- Bumped `bitflags` version to `0.6.0` -- Bumped `libc` version to `0.2.11` -- Improved `libclang` search path -- Improved `libclang` static linking - -## [0.4.2] - 2016-04-20 - -### Changed -- Bumped `libc` version to `0.2.10` - -## [0.4.1] - 2016-04-02 - -### Changed -- Bumped `libc` version to `0.2.9` -- Bumped `rustc_version` version to `0.1.7` - -## [0.4.0] - 2016-03-28 - -### Removed -- Removed support for `clang` 3.4.x - -## [0.3.1] - 2016-03-21 - -### Added -- Added support for finding `libclang` - -## [0.3.0] - 2016-03-16 - -### Removed -- Removed build system types and functions - -### Added -- Added support for `clang` 3.4.x - -### Changed -- Bumped `bitflags` version to `0.5.0` -- Bumped `libc` version to `0.2.8` - -## [0.2.1] - 2016-02-13 - -### Changed -- Simplified internal usage of conditional compilation -- Bumped `bitflags` version to `0.4.0` -- Bumped `libc` version to `0.2.7` -- Bumped `rustc_version` version to `0.1.6` - -## [0.2.0] - 2016-02-12 - -### Added -- Added support for `clang` 3.8.x - -## [0.1.2] - 2015-12-29 - -### Added -- Added derivations of `Debug` for FFI structs - -## [0.1.1] - 2015-12-26 - -### Added -- Added derivations of `PartialOrd` and `Ord` for FFI enums - -## [0.1.0] - 2015-12-22 -- Initial release diff --git a/vendor/clang-sys/Cargo.toml b/vendor/clang-sys/Cargo.toml deleted file mode 100644 index ae9a8042b9cd0b..00000000000000 --- a/vendor/clang-sys/Cargo.toml +++ /dev/null @@ -1,77 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -name = "clang-sys" -version = "1.8.1" -authors = ["Kyle Mayes "] -build = "build.rs" -links = "clang" -description = "Rust bindings for libclang." -documentation = "https://docs.rs/clang-sys" -readme = "README.md" -license = "Apache-2.0" -repository = "https://github.com/KyleMayes/clang-sys" - -[package.metadata.docs.rs] -features = [ - "clang_18_0", - "runtime", -] - -[dependencies.glob] -version = "0.3" - -[dependencies.libc] -version = "0.2.39" -default-features = false - -[dependencies.libloading] -version = "0.8" -optional = true - -[dev-dependencies.glob] -version = "0.3" - -[dev-dependencies.lazy_static] -version = "1" - -[dev-dependencies.tempfile] -version = ">=3.0.0, <3.7.0" - -[build-dependencies.glob] -version = "0.3" - -[features] -clang_10_0 = ["clang_9_0"] -clang_11_0 = ["clang_10_0"] -clang_12_0 = ["clang_11_0"] -clang_13_0 = ["clang_12_0"] -clang_14_0 = ["clang_13_0"] -clang_15_0 = ["clang_14_0"] -clang_16_0 = ["clang_15_0"] -clang_17_0 = ["clang_16_0"] -clang_18_0 = ["clang_17_0"] -clang_3_5 = [] -clang_3_6 = ["clang_3_5"] -clang_3_7 = ["clang_3_6"] -clang_3_8 = ["clang_3_7"] -clang_3_9 = ["clang_3_8"] -clang_4_0 = ["clang_3_9"] -clang_5_0 = ["clang_4_0"] -clang_6_0 = ["clang_5_0"] -clang_7_0 = ["clang_6_0"] -clang_8_0 = ["clang_7_0"] -clang_9_0 = ["clang_8_0"] -libcpp = [] -runtime = ["libloading"] -static = [] diff --git a/vendor/clang-sys/LICENSE.txt b/vendor/clang-sys/LICENSE.txt deleted file mode 100644 index 75b52484ea471f..00000000000000 --- a/vendor/clang-sys/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/clang-sys/README.md b/vendor/clang-sys/README.md deleted file mode 100644 index be86f940073764..00000000000000 --- a/vendor/clang-sys/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# clang-sys - -[![Crate](https://img.shields.io/crates/v/clang-sys.svg)](https://crates.io/crates/clang-sys) -[![Documentation](https://docs.rs/clang-sys/badge.svg)](https://docs.rs/clang-sys) -[![CI](https://img.shields.io/github/actions/workflow/status/KyleMayes/clang-sys/ci.yml?branch=master)](https://github.com/KyleMayes/clang-sys/actions?query=workflow%3ACI) -![MSRV](https://img.shields.io/badge/MSRV-1.60.0-blue) - -Rust bindings for `libclang`. - -If you are interested in a somewhat idiomatic Rust wrapper for these bindings, see [`clang-rs`](https://github.com/KyleMayes/clang-rs). - -Released under the Apache License 2.0. - -## [Documentation](https://docs.rs/clang-sys) - -Note that the documentation on https://docs.rs for this crate assumes usage of the `runtime` Cargo feature as well as the Cargo feature for the latest supported version of `libclang` (e.g., `clang_16_0`), neither of which are enabled by default. - -Due to the usage of the `runtime` Cargo feature, this documentation will contain some additional types and functions to manage a dynamically loaded `libclang` instance at runtime. - -Due to the usage of the Cargo feature for the latest supported version of `libclang`, this documentation will contain constants and functions that are not available in the oldest supported version of `libclang` (3.5). All of these types and functions have a documentation comment which specifies the minimum `libclang` version required to use the item. - -## Supported Versions - -To target a version of `libclang`, enable a Cargo features such as one of the following: - -* `clang_3_5` - requires `libclang` 3.5 or later -* `clang_3_6` - requires `libclang` 3.6 or later -* etc... -* `clang_17_0` - requires `libclang` 17.0 or later -* `clang_18_0` - requires `libclang` 18.0 or later - -If you do not enable one of these features, the API provided by `libclang` 3.5 will be available by default. - -**Note:** If you are using Clang 15.0 or later, you should enable the `clang_15_0` feature or a more recent version feature. Clang 15.0 introduced [a breaking change to the `EntityKind` enum](https://github.com/llvm/llvm-project/commit/bb83f8e70bd1d56152f02307adacd718cd67e312#diff-674613a0e47f4e66cc19061e28e3296d39be2d124dceefb68237b30b8e241e7c) which resulted in a mismatch between the values returned by `libclang` and the values for `EntityKind` defined by this crate in previous versions. - -## Dependencies - -By default, this crate will attempt to link to `libclang` dynamically. In this case, this crate depends on the `libclang` shared library (`libclang.so` on Linux, `libclang.dylib` on macOS, `libclang.dll` on Windows). If you want to link to `libclang` statically instead, enable the `static` Cargo feature. In this case, this crate depends on the LLVM and Clang static libraries. If you don't want to link to `libclang` at compiletime but instead want to load it at runtime, enable the `runtime` Cargo feature. - -These libraries can be either be installed as a part of Clang or downloaded [here](http://llvm.org/releases/download.html). - -**Note:** The downloads for LLVM and Clang 3.8 and later do not include the `libclang.a` static library. This means you cannot link to any of these versions of `libclang` statically unless you build it from source. - -### Versioned Dependencies - -This crate supports finding versioned instances of `libclang.so` (e.g.,`libclang-3.9.so`). In the case where there are multiple instances to choose from, this crate will prefer instances with higher versions. For example, the following instances of `libclang.so` are listed in descending order of preference: - -1. `libclang-4.0.so` -2. `libclang-4.so` -3. `libclang-3.9.so` -4. `libclang-3.so` -5. `libclang.so` - -**Note:** On BSD distributions, versioned instances of `libclang.so` matching the pattern `libclang.so.*` (e.g., `libclang.so.7.0`) are also included. - -**Note:** On Linux distributions when the `runtime` features is enabled, versioned instances of `libclang.so` matching the pattern `libclang.so.*` (e.g., `libclang.so.1`) are also included. - -## Environment Variables - -The following environment variables, if set, are used by this crate to find the required libraries and executables: - -* `LLVM_CONFIG_PATH` **(compiletime)** - provides a full path to an `llvm-config` executable (including the executable itself [i.e., `/usr/local/bin/llvm-config-8.0`]) -* `LIBCLANG_PATH` **(compiletime)** - provides a path to a directory containing a `libclang` shared library or a full path to a specific `libclang` shared library -* `LIBCLANG_STATIC_PATH` **(compiletime)** - provides a path to a directory containing LLVM and Clang static libraries -* `CLANG_PATH` **(runtime)** - provides a path to a `clang` executable - -## Linking - -### Dynamic - -`libclang` shared libraries will be searched for in the following directories: - -* the directory provided by the `LIBCLANG_PATH` environment variable -* the `bin` and `lib` directories in the directory provided by `llvm-config --libdir` -* the directories provided by `LD_LIBRARY_PATH` environment variable -* a list of likely directories for the target platform (e.g., `/usr/local/lib` on Linux) -* **macOS only:** the toolchain directory in the directory provided by `xcode-select --print-path` - -On Linux, running an executable that has been dynamically linked to `libclang` may require you to add a path to `libclang.so` to the `LD_LIBRARY_PATH` environment variable. The same is true on OS X, except the `DYLD_LIBRARY_PATH` environment variable is used instead. - -On Windows, running an executable that has been dynamically linked to `libclang` requires that `libclang.dll` can be found by the executable at runtime. See [here](https://msdn.microsoft.com/en-us/library/7d83bc18.aspx) for more information. - -### Static - -The availability of `llvm-config` is not optional for static linking. Ensure that an instance of this executable can be found on your system's path or set the `LLVM_CONFIG_PATH` environment variable. The required LLVM and Clang static libraries will be searched for in the same way as shared libraries are searched for, except the `LIBCLANG_STATIC_PATH` environment variable is used in place of the `LIBCLANG_PATH` environment variable. - -**Note:** The `libcpp` Cargo feature can be used to enable linking to `libc++` instead of `libstd++` when linking to `libclang` statically on Linux or Haiku. - -#### Static Library Availability - -Linking to `libclang` statically on *nix systems requires that the `libclang.a` static library be available. -This library is usually *not* included in most distributions of LLVM and Clang (e.g., `libclang-dev` on Debian-based systems). -If you need to link to `libclang` statically then most likely the only consistent way to get your hands on `libclang.a` is to build it yourself. - -Here's an example of building the required static libraries and using them with `clang-sys`: - -```text -git clone git@github.com:llvm/llvm-project.git -cd llvm-project - -cmake -S llvm -B build -G Ninja -DLLVM_ENABLE_PROJECTS=clang -DLIBCLANG_BUILD_STATIC=ON -ninja -C build - -cd .. -git clone git@github.com:KyleMayes/clang-sys.git -cd clang-sys - -LLVM_CONFIG_PATH=../llvm-project/build/bin/llvm-config cargo test --features static -``` - -Linking to `libclang` statically requires linking a large number of big static libraries. -Using [`rust-lld` as a linker](https://blog.rust-lang.org/2024/05/17/enabling-rust-lld-on-linux.html) can greatly reduce linking times. - -### Runtime - -The `clang_sys::load` function is used to load a `libclang` shared library for use in the thread in which it is called. The `clang_sys::unload` function will unload the `libclang` shared library. `clang_sys::load` searches for a `libclang` shared library in the same way one is searched for when linking to `libclang` dynamically at compiletime. diff --git a/vendor/clang-sys/build.rs b/vendor/clang-sys/build.rs deleted file mode 100644 index 4155b9781ec94f..00000000000000 --- a/vendor/clang-sys/build.rs +++ /dev/null @@ -1,77 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -//! Finds `libclang` static or shared libraries and links to them. -//! -//! # Environment Variables -//! -//! This build script can make use of several environment variables to help it -//! find the required static or shared libraries. -//! -//! * `LLVM_CONFIG_PATH` - provides a path to an `llvm-config` executable -//! * `LIBCLANG_PATH` - provides a path to a directory containing a `libclang` -//! shared library or a path to a specific `libclang` shared library -//! * `LIBCLANG_STATIC_PATH` - provides a path to a directory containing LLVM -//! and Clang static libraries - -#![allow(unused_attributes)] - -use std::path::Path; - -#[macro_use] -#[path = "build/macros.rs"] -pub mod macros; - -#[path = "build/common.rs"] -pub mod common; -#[path = "build/dynamic.rs"] -pub mod dynamic; -#[path = "build/static.rs"] -pub mod r#static; - -/// Copies a file. -#[cfg(feature = "runtime")] -fn copy(source: &str, destination: &Path) { - use std::fs::File; - use std::io::{Read, Write}; - - let mut string = String::new(); - File::open(source) - .unwrap() - .read_to_string(&mut string) - .unwrap(); - File::create(destination) - .unwrap() - .write_all(string.as_bytes()) - .unwrap(); -} - -/// Copies the code used to find and link to `libclang` shared libraries into -/// the build output directory so that it may be used when linking at runtime. -#[cfg(feature = "runtime")] -fn main() { - use std::env; - - if cfg!(feature = "static") { - panic!("`runtime` and `static` features can't be combined"); - } - - let out = env::var("OUT_DIR").unwrap(); - copy("build/macros.rs", &Path::new(&out).join("macros.rs")); - copy("build/common.rs", &Path::new(&out).join("common.rs")); - copy("build/dynamic.rs", &Path::new(&out).join("dynamic.rs")); -} - -/// Finds and links to the required libraries dynamically or statically. -#[cfg(not(feature = "runtime"))] -fn main() { - if cfg!(feature = "static") { - r#static::link(); - } else { - dynamic::link(); - } - - if let Some(output) = common::run_llvm_config(&["--includedir"]) { - let directory = Path::new(output.trim_end()); - println!("cargo:include={}", directory.display()); - } -} diff --git a/vendor/clang-sys/build/common.rs b/vendor/clang-sys/build/common.rs deleted file mode 100644 index 4d144cb2a9a938..00000000000000 --- a/vendor/clang-sys/build/common.rs +++ /dev/null @@ -1,355 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -use std::cell::RefCell; -use std::collections::HashMap; -use std::env; -use std::path::{Path, PathBuf}; -use std::process::Command; - -use glob::{MatchOptions, Pattern}; - -//================================================ -// Commands -//================================================ - -thread_local! { - /// The errors encountered by the build script while executing commands. - static COMMAND_ERRORS: RefCell>> = RefCell::default(); -} - -/// Adds an error encountered by the build script while executing a command. -fn add_command_error(name: &str, path: &str, arguments: &[&str], message: String) { - COMMAND_ERRORS.with(|e| { - e.borrow_mut() - .entry(name.into()) - .or_default() - .push(format!( - "couldn't execute `{} {}` (path={}) ({})", - name, - arguments.join(" "), - path, - message, - )) - }); -} - -/// A struct that prints the errors encountered by the build script while -/// executing commands when dropped (unless explictly discarded). -/// -/// This is handy because we only want to print these errors when the build -/// script fails to link to an instance of `libclang`. For example, if -/// `llvm-config` couldn't be executed but an instance of `libclang` was found -/// anyway we don't want to pollute the build output with irrelevant errors. -#[derive(Default)] -pub struct CommandErrorPrinter { - discard: bool, -} - -impl CommandErrorPrinter { - pub fn discard(mut self) { - self.discard = true; - } -} - -impl Drop for CommandErrorPrinter { - fn drop(&mut self) { - if self.discard { - return; - } - - let errors = COMMAND_ERRORS.with(|e| e.borrow().clone()); - - if let Some(errors) = errors.get("llvm-config") { - println!( - "cargo:warning=could not execute `llvm-config` one or more \ - times, if the LLVM_CONFIG_PATH environment variable is set to \ - a full path to valid `llvm-config` executable it will be used \ - to try to find an instance of `libclang` on your system: {}", - errors - .iter() - .map(|e| format!("\"{}\"", e)) - .collect::>() - .join("\n "), - ) - } - - if let Some(errors) = errors.get("xcode-select") { - println!( - "cargo:warning=could not execute `xcode-select` one or more \ - times, if a valid instance of this executable is on your PATH \ - it will be used to try to find an instance of `libclang` on \ - your system: {}", - errors - .iter() - .map(|e| format!("\"{}\"", e)) - .collect::>() - .join("\n "), - ) - } - } -} - -#[cfg(test)] -lazy_static::lazy_static! { - pub static ref RUN_COMMAND_MOCK: std::sync::Mutex< - Option Option + Send + Sync + 'static>>, - > = std::sync::Mutex::new(None); -} - -/// Executes a command and returns the `stdout` output if the command was -/// successfully executed (errors are added to `COMMAND_ERRORS`). -fn run_command(name: &str, path: &str, arguments: &[&str]) -> Option { - #[cfg(test)] - if let Some(command) = &*RUN_COMMAND_MOCK.lock().unwrap() { - return command(name, path, arguments); - } - - let output = match Command::new(path).args(arguments).output() { - Ok(output) => output, - Err(error) => { - let message = format!("error: {}", error); - add_command_error(name, path, arguments, message); - return None; - } - }; - - if output.status.success() { - Some(String::from_utf8_lossy(&output.stdout).into_owned()) - } else { - let message = format!("exit code: {}", output.status); - add_command_error(name, path, arguments, message); - None - } -} - -/// Executes the `llvm-config` command and returns the `stdout` output if the -/// command was successfully executed (errors are added to `COMMAND_ERRORS`). -pub fn run_llvm_config(arguments: &[&str]) -> Option { - let path = env::var("LLVM_CONFIG_PATH").unwrap_or_else(|_| "llvm-config".into()); - run_command("llvm-config", &path, arguments) -} - -/// Executes the `xcode-select` command and returns the `stdout` output if the -/// command was successfully executed (errors are added to `COMMAND_ERRORS`). -pub fn run_xcode_select(arguments: &[&str]) -> Option { - run_command("xcode-select", "xcode-select", arguments) -} - -//================================================ -// Search Directories -//================================================ -// These search directories are listed in order of -// preference, so if multiple `libclang` instances -// are found when searching matching directories, -// the `libclang` instances from earlier -// directories will be preferred (though version -// takes precedence over location). -//================================================ - -/// `libclang` directory patterns for Haiku. -const DIRECTORIES_HAIKU: &[&str] = &[ - "/boot/home/config/non-packaged/develop/lib", - "/boot/home/config/non-packaged/lib", - "/boot/system/non-packaged/develop/lib", - "/boot/system/non-packaged/lib", - "/boot/system/develop/lib", - "/boot/system/lib", -]; - -/// `libclang` directory patterns for Linux (and FreeBSD). -const DIRECTORIES_LINUX: &[&str] = &[ - "/usr/local/llvm*/lib*", - "/usr/local/lib*/*/*", - "/usr/local/lib*/*", - "/usr/local/lib*", - "/usr/lib*/*/*", - "/usr/lib*/*", - "/usr/lib*", -]; - -/// `libclang` directory patterns for macOS. -const DIRECTORIES_MACOS: &[&str] = &[ - "/usr/local/opt/llvm*/lib/llvm*/lib", - "/Library/Developer/CommandLineTools/usr/lib", - "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib", - "/usr/local/opt/llvm*/lib", -]; - -/// `libclang` directory patterns for Windows. -/// -/// The boolean indicates whether the directory pattern should be used when -/// compiling for an MSVC target environment. -const DIRECTORIES_WINDOWS: &[(&str, bool)] = &[ - // LLVM + Clang can be installed using Scoop (https://scoop.sh). - // Other Windows package managers install LLVM + Clang to other listed - // system-wide directories. - ("C:\\Users\\*\\scoop\\apps\\llvm\\current\\lib", true), - ("C:\\MSYS*\\MinGW*\\lib", false), - ("C:\\Program Files*\\LLVM\\lib", true), - ("C:\\LLVM\\lib", true), - // LLVM + Clang can be installed as a component of Visual Studio. - // https://github.com/KyleMayes/clang-sys/issues/121 - ("C:\\Program Files*\\Microsoft Visual Studio\\*\\VC\\Tools\\Llvm\\**\\lib", true), -]; - -/// `libclang` directory patterns for illumos -const DIRECTORIES_ILLUMOS: &[&str] = &[ - "/opt/ooce/llvm-*/lib", - "/opt/ooce/clang-*/lib", -]; - -//================================================ -// Searching -//================================================ - -/// Finds the files in a directory that match one or more filename glob patterns -/// and returns the paths to and filenames of those files. -fn search_directory(directory: &Path, filenames: &[String]) -> Vec<(PathBuf, String)> { - // Escape the specified directory in case it contains characters that have - // special meaning in glob patterns (e.g., `[` or `]`). - let directory = Pattern::escape(directory.to_str().unwrap()); - let directory = Path::new(&directory); - - // Join the escaped directory to the filename glob patterns to obtain - // complete glob patterns for the files being searched for. - let paths = filenames - .iter() - .map(|f| directory.join(f).to_str().unwrap().to_owned()); - - // Prevent wildcards from matching path separators to ensure that the search - // is limited to the specified directory. - let mut options = MatchOptions::new(); - options.require_literal_separator = true; - - paths - .map(|p| glob::glob_with(&p, options)) - .filter_map(Result::ok) - .flatten() - .filter_map(|p| { - let path = p.ok()?; - let filename = path.file_name()?.to_str().unwrap(); - - // The `libclang_shared` library has been renamed to `libclang-cpp` - // in Clang 10. This can cause instances of this library (e.g., - // `libclang-cpp.so.10`) to be matched by patterns looking for - // instances of `libclang`. - if filename.contains("-cpp.") { - return None; - } - - Some((path.parent().unwrap().to_owned(), filename.into())) - }) - .collect::>() -} - -/// Finds the files in a directory (and any relevant sibling directories) that -/// match one or more filename glob patterns and returns the paths to and -/// filenames of those files. -fn search_directories(directory: &Path, filenames: &[String]) -> Vec<(PathBuf, String)> { - let mut results = search_directory(directory, filenames); - - // On Windows, `libclang.dll` is usually found in the LLVM `bin` directory - // while `libclang.lib` is usually found in the LLVM `lib` directory. To - // keep things consistent with other platforms, only LLVM `lib` directories - // are included in the backup search directory globs so we need to search - // the LLVM `bin` directory here. - if target_os!("windows") && directory.ends_with("lib") { - let sibling = directory.parent().unwrap().join("bin"); - results.extend(search_directory(&sibling, filenames)); - } - - results -} - -/// Finds the `libclang` static or dynamic libraries matching one or more -/// filename glob patterns and returns the paths to and filenames of those files. -pub fn search_libclang_directories(filenames: &[String], variable: &str) -> Vec<(PathBuf, String)> { - // Search only the path indicated by the relevant environment variable - // (e.g., `LIBCLANG_PATH`) if it is set. - if let Ok(path) = env::var(variable).map(|d| Path::new(&d).to_path_buf()) { - // Check if the path is a matching file. - if let Some(parent) = path.parent() { - let filename = path.file_name().unwrap().to_str().unwrap(); - let libraries = search_directories(parent, filenames); - if libraries.iter().any(|(_, f)| f == filename) { - return vec![(parent.into(), filename.into())]; - } - } - - // Check if the path is directory containing a matching file. - return search_directories(&path, filenames); - } - - let mut found = vec![]; - - // Search the `bin` and `lib` directories in the directory returned by - // `llvm-config --prefix`. - if let Some(output) = run_llvm_config(&["--prefix"]) { - let directory = Path::new(output.lines().next().unwrap()).to_path_buf(); - found.extend(search_directories(&directory.join("bin"), filenames)); - found.extend(search_directories(&directory.join("lib"), filenames)); - found.extend(search_directories(&directory.join("lib64"), filenames)); - } - - // Search the toolchain directory in the directory returned by - // `xcode-select --print-path`. - if target_os!("macos") { - if let Some(output) = run_xcode_select(&["--print-path"]) { - let directory = Path::new(output.lines().next().unwrap()).to_path_buf(); - let directory = directory.join("Toolchains/XcodeDefault.xctoolchain/usr/lib"); - found.extend(search_directories(&directory, filenames)); - } - } - - // Search the directories in the `LD_LIBRARY_PATH` environment variable. - if let Ok(path) = env::var("LD_LIBRARY_PATH") { - for directory in env::split_paths(&path) { - found.extend(search_directories(&directory, filenames)); - } - } - - // Determine the `libclang` directory patterns. - let directories: Vec<&str> = if target_os!("haiku") { - DIRECTORIES_HAIKU.into() - } else if target_os!("linux") || target_os!("freebsd") { - DIRECTORIES_LINUX.into() - } else if target_os!("macos") { - DIRECTORIES_MACOS.into() - } else if target_os!("windows") { - let msvc = target_env!("msvc"); - DIRECTORIES_WINDOWS - .iter() - .filter(|d| d.1 || !msvc) - .map(|d| d.0) - .collect() - } else if target_os!("illumos") { - DIRECTORIES_ILLUMOS.into() - } else { - vec![] - }; - - // We use temporary directories when testing the build script so we'll - // remove the prefixes that make the directories absolute. - let directories = if test!() { - directories - .iter() - .map(|d| d.strip_prefix('/').or_else(|| d.strip_prefix("C:\\")).unwrap_or(d)) - .collect::>() - } else { - directories - }; - - // Search the directories provided by the `libclang` directory patterns. - let mut options = MatchOptions::new(); - options.case_sensitive = false; - options.require_literal_separator = true; - for directory in directories.iter() { - if let Ok(directories) = glob::glob_with(directory, options) { - for directory in directories.filter_map(Result::ok).filter(|p| p.is_dir()) { - found.extend(search_directories(&directory, filenames)); - } - } - } - - found -} diff --git a/vendor/clang-sys/build/dynamic.rs b/vendor/clang-sys/build/dynamic.rs deleted file mode 100644 index f3d5a626837b52..00000000000000 --- a/vendor/clang-sys/build/dynamic.rs +++ /dev/null @@ -1,276 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -use std::env; -use std::fs::File; -use std::io::{self, Error, ErrorKind, Read, Seek, SeekFrom}; -use std::path::{Path, PathBuf}; - -use super::common; - -//================================================ -// Validation -//================================================ - -/// Extracts the ELF class from the ELF header in a shared library. -fn parse_elf_header(path: &Path) -> io::Result { - let mut file = File::open(path)?; - let mut buffer = [0; 5]; - file.read_exact(&mut buffer)?; - if buffer[..4] == [127, 69, 76, 70] { - Ok(buffer[4]) - } else { - Err(Error::new(ErrorKind::InvalidData, "invalid ELF header")) - } -} - -/// Extracts the magic number and machine type from the PE header in a shared library. -fn parse_pe_header(path: &Path) -> io::Result<(u16, u16)> { - let mut file = File::open(path)?; - - // Extract the header offset. - let mut buffer = [0; 4]; - let start = SeekFrom::Start(0x3C); - file.seek(start)?; - file.read_exact(&mut buffer)?; - let offset = i32::from_le_bytes(buffer); - - // Check the validity of the header. - file.seek(SeekFrom::Start(offset as u64))?; - file.read_exact(&mut buffer)?; - if buffer != [80, 69, 0, 0] { - return Err(Error::new(ErrorKind::InvalidData, "invalid PE header")); - } - - // Extract the magic number. - let mut buffer = [0; 2]; - file.seek(SeekFrom::Current(20))?; - file.read_exact(&mut buffer)?; - let magic_number = u16::from_le_bytes(buffer); - - // Extract the machine type. - let mut buffer = [0; 2]; - file.seek(SeekFrom::Current(-22))?; - file.read_exact(&mut buffer)?; - let machine_type = u16::from_le_bytes(buffer); - - return Ok((magic_number, machine_type)); -} - -/// Checks that a `libclang` shared library matches the target platform. -fn validate_library(path: &Path) -> Result<(), String> { - if target_os!("linux") || target_os!("freebsd") { - let class = parse_elf_header(path).map_err(|e| e.to_string())?; - - if target_pointer_width!("32") && class != 1 { - return Err("invalid ELF class (64-bit)".into()); - } - - if target_pointer_width!("64") && class != 2 { - return Err("invalid ELF class (32-bit)".into()); - } - - Ok(()) - } else if target_os!("windows") { - let (magic, machine_type) = parse_pe_header(path).map_err(|e| e.to_string())?; - - if target_pointer_width!("32") && magic != 267 { - return Err("invalid DLL (64-bit)".into()); - } - - if target_pointer_width!("64") && magic != 523 { - return Err("invalid DLL (32-bit)".into()); - } - - let arch_mismatch = match machine_type { - 0x014C if !target_arch!("x86") => Some("x86"), - 0x8664 if !target_arch!("x86_64") => Some("x86-64"), - 0xAA64 if !target_arch!("aarch64") => Some("ARM64"), - _ => None, - }; - - if let Some(arch) = arch_mismatch { - Err(format!("invalid DLL ({arch})")) - } else { - Ok(()) - } - } else { - Ok(()) - } -} - -//================================================ -// Searching -//================================================ - -/// Extracts the version components in a `libclang` shared library filename. -fn parse_version(filename: &str) -> Vec { - let version = if let Some(version) = filename.strip_prefix("libclang.so.") { - version - } else if filename.starts_with("libclang-") { - &filename[9..filename.len() - 3] - } else { - return vec![]; - }; - - version.split('.').map(|s| s.parse().unwrap_or(0)).collect() -} - -/// Finds `libclang` shared libraries and returns the paths to, filenames of, -/// and versions of those shared libraries. -fn search_libclang_directories(runtime: bool) -> Result)>, String> { - let mut files = vec![format!( - "{}clang{}", - env::consts::DLL_PREFIX, - env::consts::DLL_SUFFIX - )]; - - if target_os!("linux") { - // Some Linux distributions don't create a `libclang.so` symlink, so we - // need to look for versioned files (e.g., `libclang-3.9.so`). - files.push("libclang-*.so".into()); - - // Some Linux distributions don't create a `libclang.so` symlink and - // don't have versioned files as described above, so we need to look for - // suffix versioned files (e.g., `libclang.so.1`). However, `ld` cannot - // link to these files, so this will only be included when linking at - // runtime. - if runtime { - files.push("libclang.so.*".into()); - files.push("libclang-*.so.*".into()); - } - } - - if target_os!("freebsd") || target_os!("haiku") || target_os!("netbsd") || target_os!("openbsd") { - // Some BSD distributions don't create a `libclang.so` symlink either, - // but use a different naming scheme for versioned files (e.g., - // `libclang.so.7.0`). - files.push("libclang.so.*".into()); - } - - if target_os!("windows") { - // The official LLVM build uses `libclang.dll` on Windows instead of - // `clang.dll`. However, unofficial builds such as MinGW use `clang.dll`. - files.push("libclang.dll".into()); - } - - // Find and validate `libclang` shared libraries and collect the versions. - let mut valid = vec![]; - let mut invalid = vec![]; - for (directory, filename) in common::search_libclang_directories(&files, "LIBCLANG_PATH") { - let path = directory.join(&filename); - match validate_library(&path) { - Ok(()) => { - let version = parse_version(&filename); - valid.push((directory, filename, version)) - } - Err(message) => invalid.push(format!("({}: {})", path.display(), message)), - } - } - - if !valid.is_empty() { - return Ok(valid); - } - - let message = format!( - "couldn't find any valid shared libraries matching: [{}], set the \ - `LIBCLANG_PATH` environment variable to a path where one of these files \ - can be found (invalid: [{}])", - files - .iter() - .map(|f| format!("'{}'", f)) - .collect::>() - .join(", "), - invalid.join(", "), - ); - - Err(message) -} - -/// Finds the "best" `libclang` shared library and returns the directory and -/// filename of that library. -pub fn find(runtime: bool) -> Result<(PathBuf, String), String> { - search_libclang_directories(runtime)? - .iter() - // We want to find the `libclang` shared library with the highest - // version number, hence `max_by_key` below. - // - // However, in the case where there are multiple such `libclang` shared - // libraries, we want to use the order in which they appeared in the - // list returned by `search_libclang_directories` as a tiebreaker since - // that function returns `libclang` shared libraries in descending order - // of preference by how they were found. - // - // `max_by_key`, perhaps surprisingly, returns the *last* item with the - // maximum key rather than the first which results in the opposite of - // the tiebreaking behavior we want. This is easily fixed by reversing - // the list first. - .rev() - .max_by_key(|f| &f.2) - .cloned() - .map(|(path, filename, _)| (path, filename)) - .ok_or_else(|| "unreachable".into()) -} - -//================================================ -// Linking -//================================================ - -/// Finds and links to a `libclang` shared library. -#[cfg(not(feature = "runtime"))] -pub fn link() { - let cep = common::CommandErrorPrinter::default(); - - use std::fs; - - let (directory, filename) = find(false).unwrap(); - println!("cargo:rustc-link-search={}", directory.display()); - - if cfg!(all(target_os = "windows", target_env = "msvc")) { - // Find the `libclang` stub static library required for the MSVC - // toolchain. - let lib = if !directory.ends_with("bin") { - directory - } else { - directory.parent().unwrap().join("lib") - }; - - if lib.join("libclang.lib").exists() { - println!("cargo:rustc-link-search={}", lib.display()); - } else if lib.join("libclang.dll.a").exists() { - // MSYS and MinGW use `libclang.dll.a` instead of `libclang.lib`. - // It is linkable with the MSVC linker, but Rust doesn't recognize - // the `.a` suffix, so we need to copy it with a different name. - // - // FIXME: Maybe we can just hardlink or symlink it? - let out = env::var("OUT_DIR").unwrap(); - fs::copy( - lib.join("libclang.dll.a"), - Path::new(&out).join("libclang.lib"), - ) - .unwrap(); - println!("cargo:rustc-link-search=native={}", out); - } else { - panic!( - "using '{}', so 'libclang.lib' or 'libclang.dll.a' must be \ - available in {}", - filename, - lib.display(), - ); - } - - println!("cargo:rustc-link-lib=dylib=libclang"); - } else { - let name = filename.trim_start_matches("lib"); - - // Strip extensions and trailing version numbers (e.g., the `.so.7.0` in - // `libclang.so.7.0`). - let name = match name.find(".dylib").or_else(|| name.find(".so")) { - Some(index) => &name[0..index], - None => name, - }; - - println!("cargo:rustc-link-lib=dylib={}", name); - } - - cep.discard(); -} diff --git a/vendor/clang-sys/build/macros.rs b/vendor/clang-sys/build/macros.rs deleted file mode 100644 index a766a6e27c4427..00000000000000 --- a/vendor/clang-sys/build/macros.rs +++ /dev/null @@ -1,49 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -macro_rules! test { - () => (cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok()); -} - -macro_rules! target_os { - ($os:expr) => { - if cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok() { - let var = ::std::env::var("_CLANG_SYS_TEST_OS"); - var.map_or(false, |v| v == $os) - } else { - cfg!(target_os = $os) - } - }; -} - -macro_rules! target_arch { - ($arch:expr) => { - if cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok() { - let var = ::std::env::var("_CLANG_SYS_TEST_ARCH"); - var.map_or(false, |v| v == $arch) - } else { - cfg!(target_arch = $arch) - } - }; -} - -macro_rules! target_pointer_width { - ($pointer_width:expr) => { - if cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok() { - let var = ::std::env::var("_CLANG_SYS_TEST_POINTER_WIDTH"); - var.map_or(false, |v| v == $pointer_width) - } else { - cfg!(target_pointer_width = $pointer_width) - } - }; -} - -macro_rules! target_env { - ($env:expr) => { - if cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok() { - let var = ::std::env::var("_CLANG_SYS_TEST_ENV"); - var.map_or(false, |v| v == $env) - } else { - cfg!(target_env = $env) - } - }; -} diff --git a/vendor/clang-sys/build/static.rs b/vendor/clang-sys/build/static.rs deleted file mode 100644 index c1b70eb08b2654..00000000000000 --- a/vendor/clang-sys/build/static.rs +++ /dev/null @@ -1,146 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -use std::path::{Path, PathBuf}; - -use glob::Pattern; - -use super::common; - -//================================================ -// Searching -//================================================ - -/// Clang static libraries required to link to `libclang` 3.5 and later. -const CLANG_LIBRARIES: &[&str] = &[ - "clang", - "clangAST", - "clangAnalysis", - "clangBasic", - "clangDriver", - "clangEdit", - "clangFrontend", - "clangIndex", - "clangLex", - "clangParse", - "clangRewrite", - "clangSema", - "clangSerialization", -]; - -/// Gets the name of an LLVM or Clang static library from a path. -fn get_library_name(path: &Path) -> Option { - path.file_stem().map(|p| { - let string = p.to_string_lossy(); - if let Some(name) = string.strip_prefix("lib") { - name.to_owned() - } else { - string.to_string() - } - }) -} - -/// Gets the LLVM static libraries required to link to `libclang`. -fn get_llvm_libraries() -> Vec { - common::run_llvm_config(&["--libs"]) - .unwrap() - .split_whitespace() - .filter_map(|p| { - // Depending on the version of `llvm-config` in use, listed - // libraries may be in one of two forms, a full path to the library - // or simply prefixed with `-l`. - if let Some(path) = p.strip_prefix("-l") { - Some(path.into()) - } else { - get_library_name(Path::new(p)) - } - }) - .collect() -} - -/// Gets the Clang static libraries required to link to `libclang`. -fn get_clang_libraries>(directory: P) -> Vec { - // Escape the directory in case it contains characters that have special - // meaning in glob patterns (e.g., `[` or `]`). - let directory = Pattern::escape(directory.as_ref().to_str().unwrap()); - let directory = Path::new(&directory); - - let pattern = directory.join("libclang*.a").to_str().unwrap().to_owned(); - if let Ok(libraries) = glob::glob(&pattern) { - libraries - .filter_map(|l| l.ok().and_then(|l| get_library_name(&l))) - .collect() - } else { - CLANG_LIBRARIES.iter().map(|l| (*l).to_string()).collect() - } -} - -/// Finds a directory containing LLVM and Clang static libraries and returns the -/// path to that directory. -fn find() -> PathBuf { - let name = if target_os!("windows") { - "libclang.lib" - } else { - "libclang.a" - }; - - let files = common::search_libclang_directories(&[name.into()], "LIBCLANG_STATIC_PATH"); - if let Some((directory, _)) = files.into_iter().next() { - directory - } else { - panic!( - "could not find the required `{name}` static library, see the \ - README for more information on how to link to `libclang` statically: \ - https://github.com/KyleMayes/clang-sys?tab=readme-ov-file#static" - ); - } -} - -//================================================ -// Linking -//================================================ - -/// Finds and links to `libclang` static libraries. -pub fn link() { - let cep = common::CommandErrorPrinter::default(); - - let directory = find(); - - // Specify required Clang static libraries. - println!("cargo:rustc-link-search=native={}", directory.display()); - for library in get_clang_libraries(directory) { - println!("cargo:rustc-link-lib=static={}", library); - } - - // Determine the shared mode used by LLVM. - let mode = common::run_llvm_config(&["--shared-mode"]).map(|m| m.trim().to_owned()); - let prefix = if mode.map_or(false, |m| m == "static") { - "static=" - } else { - "" - }; - - // Specify required LLVM static libraries. - println!( - "cargo:rustc-link-search=native={}", - common::run_llvm_config(&["--libdir"]).unwrap().trim_end() - ); - for library in get_llvm_libraries() { - println!("cargo:rustc-link-lib={}{}", prefix, library); - } - - // Specify required system libraries. - // MSVC doesn't need this, as it tracks dependencies inside `.lib` files. - if cfg!(target_os = "freebsd") { - println!("cargo:rustc-flags=-l ffi -l ncursesw -l c++ -l z"); - } else if cfg!(any(target_os = "haiku", target_os = "linux")) { - if cfg!(feature = "libcpp") { - println!("cargo:rustc-flags=-l c++"); - } else { - println!("cargo:rustc-flags=-l ffi -l ncursesw -l stdc++ -l z"); - } - } else if cfg!(target_os = "macos") { - println!("cargo:rustc-flags=-l ffi -l ncurses -l c++ -l z"); - } - - cep.discard(); -} diff --git a/vendor/clang-sys/clippy.toml b/vendor/clang-sys/clippy.toml deleted file mode 100644 index 6f41284e10733b..00000000000000 --- a/vendor/clang-sys/clippy.toml +++ /dev/null @@ -1 +0,0 @@ -doc-valid-idents = ["FreeBSD"] diff --git a/vendor/clang-sys/src/lib.rs b/vendor/clang-sys/src/lib.rs deleted file mode 100644 index 5f5383b9fcc6ac..00000000000000 --- a/vendor/clang-sys/src/lib.rs +++ /dev/null @@ -1,2433 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -//! Rust bindings for `libclang`. -//! -//! ## [Documentation](https://docs.rs/clang-sys) -//! -//! Note that the documentation on https://docs.rs for this crate assumes usage -//! of the `runtime` Cargo feature as well as the Cargo feature for the latest -//! supported version of `libclang` (e.g., `clang_11_0`), neither of which are -//! enabled by default. -//! -//! Due to the usage of the `runtime` Cargo feature, this documentation will -//! contain some additional types and functions to manage a dynamically loaded -//! `libclang` instance at runtime. -//! -//! Due to the usage of the Cargo feature for the latest supported version of -//! `libclang`, this documentation will contain constants and functions that are -//! not available in the oldest supported version of `libclang` (3.5). All of -//! these types and functions have a documentation comment which specifies the -//! minimum `libclang` version required to use the item. - -#![allow(non_camel_case_types, non_snake_case, non_upper_case_globals)] -#![cfg_attr(feature = "cargo-clippy", allow(clippy::unreadable_literal))] - -pub mod support; - -#[macro_use] -mod link; - -use std::mem; - -use libc::*; - -pub type CXClientData = *mut c_void; -pub type CXCursorVisitor = extern "C" fn(CXCursor, CXCursor, CXClientData) -> CXChildVisitResult; -#[cfg(feature = "clang_3_7")] -pub type CXFieldVisitor = extern "C" fn(CXCursor, CXClientData) -> CXVisitorResult; -pub type CXInclusionVisitor = extern "C" fn(CXFile, *mut CXSourceLocation, c_uint, CXClientData); - -//================================================ -// Macros -//================================================ - -/// Defines a C enum as a series of constants. -macro_rules! cenum { - (#[repr($ty:ty)] $(#[$meta:meta])* enum $name:ident { - $($(#[$vmeta:meta])* const $variant:ident = $value:expr), +, - }) => ( - pub type $name = $ty; - - $($(#[$vmeta])* pub const $variant: $name = $value;)+ - ); - (#[repr($ty:ty)] $(#[$meta:meta])* enum $name:ident { - $($(#[$vmeta:meta])* const $variant:ident = $value:expr); +; - }) => ( - pub type $name = $ty; - - $($(#[$vmeta])* pub const $variant: $name = $value;)+ - ); - ($(#[$meta:meta])* enum $name:ident { - $($(#[$vmeta:meta])* const $variant:ident = $value:expr), +, - }) => ( - pub type $name = c_int; - - $($(#[$vmeta])* pub const $variant: $name = $value;)+ - ); - ($(#[$meta:meta])* enum $name:ident { - $($(#[$vmeta:meta])* const $variant:ident = $value:expr); +; - }) => ( - pub type $name = c_int; - - $($(#[$vmeta])* pub const $variant: $name = $value;)+ - ); -} - -/// Implements a zeroing implementation of `Default` for the supplied type. -macro_rules! default { - (#[$meta:meta] $ty:ty) => { - #[$meta] - impl Default for $ty { - fn default() -> $ty { - unsafe { mem::zeroed() } - } - } - }; - - ($ty:ty) => { - impl Default for $ty { - fn default() -> $ty { - unsafe { mem::zeroed() } - } - } - }; -} - -//================================================ -// Enums -//================================================ - -cenum! { - enum CXAvailabilityKind { - const CXAvailability_Available = 0, - const CXAvailability_Deprecated = 1, - const CXAvailability_NotAvailable = 2, - const CXAvailability_NotAccessible = 3, - } -} - -cenum! { - /// Only available on `libclang` 17.0 and later. - #[cfg(feature = "clang_17_0")] - enum CXBinaryOperatorKind { - const CXBinaryOperator_Invalid = 0, - const CXBinaryOperator_PtrMemD = 1, - const CXBinaryOperator_PtrMemI = 2, - const CXBinaryOperator_Mul = 3, - const CXBinaryOperator_Div = 4, - const CXBinaryOperator_Rem = 5, - const CXBinaryOperator_Add = 6, - const CXBinaryOperator_Sub = 7, - const CXBinaryOperator_Shl = 8, - const CXBinaryOperator_Shr = 9, - const CXBinaryOperator_Cmp = 10, - const CXBinaryOperator_LT = 11, - const CXBinaryOperator_GT = 12, - const CXBinaryOperator_LE = 13, - const CXBinaryOperator_GE = 14, - const CXBinaryOperator_EQ = 15, - const CXBinaryOperator_NE = 16, - const CXBinaryOperator_And = 17, - const CXBinaryOperator_Xor = 18, - const CXBinaryOperator_Or = 19, - const CXBinaryOperator_LAnd = 20, - const CXBinaryOperator_LOr = 21, - const CXBinaryOperator_Assign = 22, - const CXBinaryOperator_MulAssign = 23, - const CXBinaryOperator_DivAssign = 24, - const CXBinaryOperator_RemAssign = 25, - const CXBinaryOperator_AddAssign = 26, - const CXBinaryOperator_SubAssign = 27, - const CXBinaryOperator_ShlAssign = 28, - const CXBinaryOperator_ShrAssign = 29, - const CXBinaryOperator_AndAssign = 30, - const CXBinaryOperator_XorAssign = 31, - const CXBinaryOperator_OrAssign = 32, - const CXBinaryOperator_Comma = 33, - } -} - -cenum! { - enum CXCallingConv { - const CXCallingConv_Default = 0, - const CXCallingConv_C = 1, - const CXCallingConv_X86StdCall = 2, - const CXCallingConv_X86FastCall = 3, - const CXCallingConv_X86ThisCall = 4, - const CXCallingConv_X86Pascal = 5, - const CXCallingConv_AAPCS = 6, - const CXCallingConv_AAPCS_VFP = 7, - /// Only produced by `libclang` 4.0 and later. - const CXCallingConv_X86RegCall = 8, - const CXCallingConv_IntelOclBicc = 9, - const CXCallingConv_Win64 = 10, - const CXCallingConv_X86_64Win64 = 10, - const CXCallingConv_X86_64SysV = 11, - /// Only produced by `libclang` 3.6 and later. - const CXCallingConv_X86VectorCall = 12, - /// Only produced by `libclang` 3.9 and later. - const CXCallingConv_Swift = 13, - /// Only produced by `libclang` 3.9 and later. - const CXCallingConv_PreserveMost = 14, - /// Only produced by `libclang` 3.9 and later. - const CXCallingConv_PreserveAll = 15, - /// Only produced by `libclang` 8.0 and later. - const CXCallingConv_AArch64VectorCall = 16, - const CXCallingConv_Invalid = 100, - const CXCallingConv_Unexposed = 200, - /// Only produced by `libclang` 13.0 and later. - const CXCallingConv_SwiftAsync = 17, - /// Only produced by `libclang` 15.0 and later. - const CXCallingConv_AArch64SVEPCS = 18, - /// Only produced by `libclang` 18.0 and later. - const CXCallingConv_M68kRTD = 19, - } -} - -cenum! { - enum CXChildVisitResult { - const CXChildVisit_Break = 0, - const CXChildVisit_Continue = 1, - const CXChildVisit_Recurse = 2, - } -} - -cenum! { - #[repr(c_uchar)] - /// Only available on `libclang` 17.0 and later. - #[cfg(feature = "clang_17_0")] - enum CXChoice { - const CXChoice_Default = 0, - const CXChoice_Enabled = 1, - const CXChoice_Disabled = 2, - } -} - -cenum! { - enum CXCommentInlineCommandRenderKind { - const CXCommentInlineCommandRenderKind_Normal = 0, - const CXCommentInlineCommandRenderKind_Bold = 1, - const CXCommentInlineCommandRenderKind_Monospaced = 2, - const CXCommentInlineCommandRenderKind_Emphasized = 3, - } -} - -cenum! { - enum CXCommentKind { - const CXComment_Null = 0, - const CXComment_Text = 1, - const CXComment_InlineCommand = 2, - const CXComment_HTMLStartTag = 3, - const CXComment_HTMLEndTag = 4, - const CXComment_Paragraph = 5, - const CXComment_BlockCommand = 6, - const CXComment_ParamCommand = 7, - const CXComment_TParamCommand = 8, - const CXComment_VerbatimBlockCommand = 9, - const CXComment_VerbatimBlockLine = 10, - const CXComment_VerbatimLine = 11, - const CXComment_FullComment = 12, - } -} - -cenum! { - enum CXCommentParamPassDirection { - const CXCommentParamPassDirection_In = 0, - const CXCommentParamPassDirection_Out = 1, - const CXCommentParamPassDirection_InOut = 2, - } -} - -cenum! { - enum CXCompilationDatabase_Error { - const CXCompilationDatabase_NoError = 0, - const CXCompilationDatabase_CanNotLoadDatabase = 1, - } -} - -cenum! { - enum CXCompletionChunkKind { - const CXCompletionChunk_Optional = 0, - const CXCompletionChunk_TypedText = 1, - const CXCompletionChunk_Text = 2, - const CXCompletionChunk_Placeholder = 3, - const CXCompletionChunk_Informative = 4, - const CXCompletionChunk_CurrentParameter = 5, - const CXCompletionChunk_LeftParen = 6, - const CXCompletionChunk_RightParen = 7, - const CXCompletionChunk_LeftBracket = 8, - const CXCompletionChunk_RightBracket = 9, - const CXCompletionChunk_LeftBrace = 10, - const CXCompletionChunk_RightBrace = 11, - const CXCompletionChunk_LeftAngle = 12, - const CXCompletionChunk_RightAngle = 13, - const CXCompletionChunk_Comma = 14, - const CXCompletionChunk_ResultType = 15, - const CXCompletionChunk_Colon = 16, - const CXCompletionChunk_SemiColon = 17, - const CXCompletionChunk_Equal = 18, - const CXCompletionChunk_HorizontalSpace = 19, - const CXCompletionChunk_VerticalSpace = 20, - } -} - -cenum! { - enum CXCursorKind { - const CXCursor_UnexposedDecl = 1, - const CXCursor_StructDecl = 2, - const CXCursor_UnionDecl = 3, - const CXCursor_ClassDecl = 4, - const CXCursor_EnumDecl = 5, - const CXCursor_FieldDecl = 6, - const CXCursor_EnumConstantDecl = 7, - const CXCursor_FunctionDecl = 8, - const CXCursor_VarDecl = 9, - const CXCursor_ParmDecl = 10, - const CXCursor_ObjCInterfaceDecl = 11, - const CXCursor_ObjCCategoryDecl = 12, - const CXCursor_ObjCProtocolDecl = 13, - const CXCursor_ObjCPropertyDecl = 14, - const CXCursor_ObjCIvarDecl = 15, - const CXCursor_ObjCInstanceMethodDecl = 16, - const CXCursor_ObjCClassMethodDecl = 17, - const CXCursor_ObjCImplementationDecl = 18, - const CXCursor_ObjCCategoryImplDecl = 19, - const CXCursor_TypedefDecl = 20, - const CXCursor_CXXMethod = 21, - const CXCursor_Namespace = 22, - const CXCursor_LinkageSpec = 23, - const CXCursor_Constructor = 24, - const CXCursor_Destructor = 25, - const CXCursor_ConversionFunction = 26, - const CXCursor_TemplateTypeParameter = 27, - const CXCursor_NonTypeTemplateParameter = 28, - const CXCursor_TemplateTemplateParameter = 29, - const CXCursor_FunctionTemplate = 30, - const CXCursor_ClassTemplate = 31, - const CXCursor_ClassTemplatePartialSpecialization = 32, - const CXCursor_NamespaceAlias = 33, - const CXCursor_UsingDirective = 34, - const CXCursor_UsingDeclaration = 35, - const CXCursor_TypeAliasDecl = 36, - const CXCursor_ObjCSynthesizeDecl = 37, - const CXCursor_ObjCDynamicDecl = 38, - const CXCursor_CXXAccessSpecifier = 39, - const CXCursor_ObjCSuperClassRef = 40, - const CXCursor_ObjCProtocolRef = 41, - const CXCursor_ObjCClassRef = 42, - const CXCursor_TypeRef = 43, - const CXCursor_CXXBaseSpecifier = 44, - const CXCursor_TemplateRef = 45, - const CXCursor_NamespaceRef = 46, - const CXCursor_MemberRef = 47, - const CXCursor_LabelRef = 48, - const CXCursor_OverloadedDeclRef = 49, - const CXCursor_VariableRef = 50, - const CXCursor_InvalidFile = 70, - const CXCursor_NoDeclFound = 71, - const CXCursor_NotImplemented = 72, - const CXCursor_InvalidCode = 73, - const CXCursor_UnexposedExpr = 100, - const CXCursor_DeclRefExpr = 101, - const CXCursor_MemberRefExpr = 102, - const CXCursor_CallExpr = 103, - const CXCursor_ObjCMessageExpr = 104, - const CXCursor_BlockExpr = 105, - const CXCursor_IntegerLiteral = 106, - const CXCursor_FloatingLiteral = 107, - const CXCursor_ImaginaryLiteral = 108, - const CXCursor_StringLiteral = 109, - const CXCursor_CharacterLiteral = 110, - const CXCursor_ParenExpr = 111, - const CXCursor_UnaryOperator = 112, - const CXCursor_ArraySubscriptExpr = 113, - const CXCursor_BinaryOperator = 114, - const CXCursor_CompoundAssignOperator = 115, - const CXCursor_ConditionalOperator = 116, - const CXCursor_CStyleCastExpr = 117, - const CXCursor_CompoundLiteralExpr = 118, - const CXCursor_InitListExpr = 119, - const CXCursor_AddrLabelExpr = 120, - const CXCursor_StmtExpr = 121, - const CXCursor_GenericSelectionExpr = 122, - const CXCursor_GNUNullExpr = 123, - const CXCursor_CXXStaticCastExpr = 124, - const CXCursor_CXXDynamicCastExpr = 125, - const CXCursor_CXXReinterpretCastExpr = 126, - const CXCursor_CXXConstCastExpr = 127, - const CXCursor_CXXFunctionalCastExpr = 128, - const CXCursor_CXXTypeidExpr = 129, - const CXCursor_CXXBoolLiteralExpr = 130, - const CXCursor_CXXNullPtrLiteralExpr = 131, - const CXCursor_CXXThisExpr = 132, - const CXCursor_CXXThrowExpr = 133, - const CXCursor_CXXNewExpr = 134, - const CXCursor_CXXDeleteExpr = 135, - const CXCursor_UnaryExpr = 136, - const CXCursor_ObjCStringLiteral = 137, - const CXCursor_ObjCEncodeExpr = 138, - const CXCursor_ObjCSelectorExpr = 139, - const CXCursor_ObjCProtocolExpr = 140, - const CXCursor_ObjCBridgedCastExpr = 141, - const CXCursor_PackExpansionExpr = 142, - const CXCursor_SizeOfPackExpr = 143, - const CXCursor_LambdaExpr = 144, - const CXCursor_ObjCBoolLiteralExpr = 145, - const CXCursor_ObjCSelfExpr = 146, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_OMPArraySectionExpr = 147, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_ObjCAvailabilityCheckExpr = 148, - /// Only produced by `libclang` 7.0 and later. - const CXCursor_FixedPointLiteral = 149, - /// Only produced by `libclang` 12.0 and later. - const CXCursor_OMPArrayShapingExpr = 150, - /// Only produced by `libclang` 12.0 and later. - const CXCursor_OMPIteratorExpr = 151, - /// Only produced by `libclang` 12.0 and later. - const CXCursor_CXXAddrspaceCastExpr = 152, - /// Only produced by `libclang` 15.0 and later. - const CXCursor_ConceptSpecializationExpr = 153, - /// Only produced by `libclang` 15.0 and later. - const CXCursor_RequiresExpr = 154, - /// Only produced by `libclang` 16.0 and later. - const CXCursor_CXXParenListInitExpr = 155, - const CXCursor_UnexposedStmt = 200, - const CXCursor_LabelStmt = 201, - const CXCursor_CompoundStmt = 202, - const CXCursor_CaseStmt = 203, - const CXCursor_DefaultStmt = 204, - const CXCursor_IfStmt = 205, - const CXCursor_SwitchStmt = 206, - const CXCursor_WhileStmt = 207, - const CXCursor_DoStmt = 208, - const CXCursor_ForStmt = 209, - const CXCursor_GotoStmt = 210, - const CXCursor_IndirectGotoStmt = 211, - const CXCursor_ContinueStmt = 212, - const CXCursor_BreakStmt = 213, - const CXCursor_ReturnStmt = 214, - /// Duplicate of `CXCursor_GccAsmStmt`. - const CXCursor_AsmStmt = 215, - const CXCursor_ObjCAtTryStmt = 216, - const CXCursor_ObjCAtCatchStmt = 217, - const CXCursor_ObjCAtFinallyStmt = 218, - const CXCursor_ObjCAtThrowStmt = 219, - const CXCursor_ObjCAtSynchronizedStmt = 220, - const CXCursor_ObjCAutoreleasePoolStmt = 221, - const CXCursor_ObjCForCollectionStmt = 222, - const CXCursor_CXXCatchStmt = 223, - const CXCursor_CXXTryStmt = 224, - const CXCursor_CXXForRangeStmt = 225, - const CXCursor_SEHTryStmt = 226, - const CXCursor_SEHExceptStmt = 227, - const CXCursor_SEHFinallyStmt = 228, - const CXCursor_MSAsmStmt = 229, - const CXCursor_NullStmt = 230, - const CXCursor_DeclStmt = 231, - const CXCursor_OMPParallelDirective = 232, - const CXCursor_OMPSimdDirective = 233, - const CXCursor_OMPForDirective = 234, - const CXCursor_OMPSectionsDirective = 235, - const CXCursor_OMPSectionDirective = 236, - const CXCursor_OMPSingleDirective = 237, - const CXCursor_OMPParallelForDirective = 238, - const CXCursor_OMPParallelSectionsDirective = 239, - const CXCursor_OMPTaskDirective = 240, - const CXCursor_OMPMasterDirective = 241, - const CXCursor_OMPCriticalDirective = 242, - const CXCursor_OMPTaskyieldDirective = 243, - const CXCursor_OMPBarrierDirective = 244, - const CXCursor_OMPTaskwaitDirective = 245, - const CXCursor_OMPFlushDirective = 246, - const CXCursor_SEHLeaveStmt = 247, - /// Only produced by `libclang` 3.6 and later. - const CXCursor_OMPOrderedDirective = 248, - /// Only produced by `libclang` 3.6 and later. - const CXCursor_OMPAtomicDirective = 249, - /// Only produced by `libclang` 3.6 and later. - const CXCursor_OMPForSimdDirective = 250, - /// Only produced by `libclang` 3.6 and later. - const CXCursor_OMPParallelForSimdDirective = 251, - /// Only produced by `libclang` 3.6 and later. - const CXCursor_OMPTargetDirective = 252, - /// Only produced by `libclang` 3.6 and later. - const CXCursor_OMPTeamsDirective = 253, - /// Only produced by `libclang` 3.7 and later. - const CXCursor_OMPTaskgroupDirective = 254, - /// Only produced by `libclang` 3.7 and later. - const CXCursor_OMPCancellationPointDirective = 255, - /// Only produced by `libclang` 3.7 and later. - const CXCursor_OMPCancelDirective = 256, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_OMPTargetDataDirective = 257, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_OMPTaskLoopDirective = 258, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_OMPTaskLoopSimdDirective = 259, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_OMPDistributeDirective = 260, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPTargetEnterDataDirective = 261, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPTargetExitDataDirective = 262, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPTargetParallelDirective = 263, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPTargetParallelForDirective = 264, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPTargetUpdateDirective = 265, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPDistributeParallelForDirective = 266, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPDistributeParallelForSimdDirective = 267, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPDistributeSimdDirective = 268, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPTargetParallelForSimdDirective = 269, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTargetSimdDirective = 270, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTeamsDistributeDirective = 271, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTeamsDistributeSimdDirective = 272, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTeamsDistributeParallelForSimdDirective = 273, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTeamsDistributeParallelForDirective = 274, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTargetTeamsDirective = 275, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTargetTeamsDistributeDirective = 276, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTargetTeamsDistributeParallelForDirective = 277, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTargetTeamsDistributeParallelForSimdDirective = 278, - /// Only producer by `libclang` 4.0 and later. - const CXCursor_OMPTargetTeamsDistributeSimdDirective = 279, - /// Only produced by 'libclang' 9.0 and later. - const CXCursor_BuiltinBitCastExpr = 280, - /// Only produced by `libclang` 10.0 and later. - const CXCursor_OMPMasterTaskLoopDirective = 281, - /// Only produced by `libclang` 10.0 and later. - const CXCursor_OMPParallelMasterTaskLoopDirective = 282, - /// Only produced by `libclang` 10.0 and later. - const CXCursor_OMPMasterTaskLoopSimdDirective = 283, - /// Only produced by `libclang` 10.0 and later. - const CXCursor_OMPParallelMasterTaskLoopSimdDirective = 284, - /// Only produced by `libclang` 10.0 and later. - const CXCursor_OMPParallelMasterDirective = 285, - /// Only produced by `libclang` 11.0 and later. - const CXCursor_OMPDepobjDirective = 286, - /// Only produced by `libclang` 11.0 and later. - const CXCursor_OMPScanDirective = 287, - /// Only produced by `libclang` 13.0 and later. - const CXCursor_OMPTileDirective = 288, - /// Only produced by `libclang` 13.0 and later. - const CXCursor_OMPCanonicalLoop = 289, - /// Only produced by `libclang` 13.0 and later. - const CXCursor_OMPInteropDirective = 290, - /// Only produced by `libclang` 13.0 and later. - const CXCursor_OMPDispatchDirective = 291, - /// Only produced by `libclang` 13.0 and later. - const CXCursor_OMPMaskedDirective = 292, - /// Only produced by `libclang` 13.0 and later. - const CXCursor_OMPUnrollDirective = 293, - /// Only produced by `libclang` 14.0 and later. - const CXCursor_OMPMetaDirective = 294, - /// Only produced by `libclang` 14.0 and later. - const CXCursor_OMPGenericLoopDirective = 295, - /// Only produced by `libclang` 15.0 and later. - const CXCursor_OMPTeamsGenericLoopDirective = 296, - /// Only produced by `libclang` 15.0 and later. - const CXCursor_OMPTargetTeamsGenericLoopDirective = 297, - /// Only produced by `libclang` 15.0 and later. - const CXCursor_OMPParallelGenericLoopDirective = 298, - /// Only produced by `libclang` 15.0 and later. - const CXCursor_OMPTargetParallelGenericLoopDirective = 299, - /// Only produced by `libclang` 15.0 and later. - const CXCursor_OMPParallelMaskedDirective = 300, - /// Only produced by `libclang` 15.0 and later. - const CXCursor_OMPMaskedTaskLoopDirective = 301, - /// Only produced by `libclang` 15.0 and later. - const CXCursor_OMPMaskedTaskLoopSimdDirective = 302, - /// Only produced by `libclang` 15.0 and later. - const CXCursor_OMPParallelMaskedTaskLoopDirective = 303, - /// Only produced by `libclang` 15.0 and later. - const CXCursor_OMPParallelMaskedTaskLoopSimdDirective = 304, - /// Only produced by `libclang` 16.0 and later. - const CXCursor_OMPErrorDirective = 305, - /// Only produced by `libclang` 18.0 and later. - const CXCursor_OMPScopeDirective = 306, - #[cfg(not(feature="clang_15_0"))] - const CXCursor_TranslationUnit = 300, - #[cfg(feature="clang_15_0")] - const CXCursor_TranslationUnit = 350, - const CXCursor_UnexposedAttr = 400, - const CXCursor_IBActionAttr = 401, - const CXCursor_IBOutletAttr = 402, - const CXCursor_IBOutletCollectionAttr = 403, - const CXCursor_CXXFinalAttr = 404, - const CXCursor_CXXOverrideAttr = 405, - const CXCursor_AnnotateAttr = 406, - const CXCursor_AsmLabelAttr = 407, - const CXCursor_PackedAttr = 408, - const CXCursor_PureAttr = 409, - const CXCursor_ConstAttr = 410, - const CXCursor_NoDuplicateAttr = 411, - const CXCursor_CUDAConstantAttr = 412, - const CXCursor_CUDADeviceAttr = 413, - const CXCursor_CUDAGlobalAttr = 414, - const CXCursor_CUDAHostAttr = 415, - /// Only produced by `libclang` 3.6 and later. - const CXCursor_CUDASharedAttr = 416, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_VisibilityAttr = 417, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_DLLExport = 418, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_DLLImport = 419, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_NSReturnsRetained = 420, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_NSReturnsNotRetained = 421, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_NSReturnsAutoreleased = 422, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_NSConsumesSelf = 423, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_NSConsumed = 424, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCException = 425, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCNSObject = 426, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCIndependentClass = 427, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCPreciseLifetime = 428, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCReturnsInnerPointer = 429, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCRequiresSuper = 430, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCRootClass = 431, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCSubclassingRestricted = 432, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCExplicitProtocolImpl = 433, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCDesignatedInitializer = 434, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCRuntimeVisible = 435, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCBoxable = 436, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_FlagEnum = 437, - /// Only produced by `libclang` 9.0 and later. - const CXCursor_ConvergentAttr = 438, - /// Only produced by `libclang` 9.0 and later. - const CXCursor_WarnUnusedAttr = 439, - /// Only produced by `libclang` 9.0 and later. - const CXCursor_WarnUnusedResultAttr = 440, - /// Only produced by `libclang` 9.0 and later. - const CXCursor_AlignedAttr = 441, - const CXCursor_PreprocessingDirective = 500, - const CXCursor_MacroDefinition = 501, - /// Duplicate of `CXCursor_MacroInstantiation`. - const CXCursor_MacroExpansion = 502, - const CXCursor_InclusionDirective = 503, - const CXCursor_ModuleImportDecl = 600, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_TypeAliasTemplateDecl = 601, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_StaticAssert = 602, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_FriendDecl = 603, - /// Only produced by `libclang` 15.0 and later. - const CXCursor_ConceptDecl = 604, - /// Only produced by `libclang` 3.7 and later. - const CXCursor_OverloadCandidate = 700, - } -} - -cenum! { - /// Only available on `libclang` 5.0 and later. - #[cfg(feature = "clang_5_0")] - enum CXCursor_ExceptionSpecificationKind { - const CXCursor_ExceptionSpecificationKind_None = 0, - const CXCursor_ExceptionSpecificationKind_DynamicNone = 1, - const CXCursor_ExceptionSpecificationKind_Dynamic = 2, - const CXCursor_ExceptionSpecificationKind_MSAny = 3, - const CXCursor_ExceptionSpecificationKind_BasicNoexcept = 4, - const CXCursor_ExceptionSpecificationKind_ComputedNoexcept = 5, - const CXCursor_ExceptionSpecificationKind_Unevaluated = 6, - const CXCursor_ExceptionSpecificationKind_Uninstantiated = 7, - const CXCursor_ExceptionSpecificationKind_Unparsed = 8, - /// Only available on `libclang` 9.0 and later. - #[cfg(feature = "clang_9_0")] - const CXCursor_ExceptionSpecificationKind_NoThrow = 9, - } -} - -cenum! { - enum CXDiagnosticSeverity { - const CXDiagnostic_Ignored = 0, - const CXDiagnostic_Note = 1, - const CXDiagnostic_Warning = 2, - const CXDiagnostic_Error = 3, - const CXDiagnostic_Fatal = 4, - } -} - -cenum! { - enum CXErrorCode { - const CXError_Success = 0, - const CXError_Failure = 1, - const CXError_Crashed = 2, - const CXError_InvalidArguments = 3, - const CXError_ASTReadError = 4, - } -} - -cenum! { - enum CXEvalResultKind { - const CXEval_UnExposed = 0, - const CXEval_Int = 1 , - const CXEval_Float = 2, - const CXEval_ObjCStrLiteral = 3, - const CXEval_StrLiteral = 4, - const CXEval_CFStr = 5, - const CXEval_Other = 6, - } -} - -cenum! { - enum CXIdxAttrKind { - const CXIdxAttr_Unexposed = 0, - const CXIdxAttr_IBAction = 1, - const CXIdxAttr_IBOutlet = 2, - const CXIdxAttr_IBOutletCollection = 3, - } -} - -cenum! { - enum CXIdxEntityCXXTemplateKind { - const CXIdxEntity_NonTemplate = 0, - const CXIdxEntity_Template = 1, - const CXIdxEntity_TemplatePartialSpecialization = 2, - const CXIdxEntity_TemplateSpecialization = 3, - } -} - -cenum! { - enum CXIdxEntityKind { - const CXIdxEntity_Unexposed = 0, - const CXIdxEntity_Typedef = 1, - const CXIdxEntity_Function = 2, - const CXIdxEntity_Variable = 3, - const CXIdxEntity_Field = 4, - const CXIdxEntity_EnumConstant = 5, - const CXIdxEntity_ObjCClass = 6, - const CXIdxEntity_ObjCProtocol = 7, - const CXIdxEntity_ObjCCategory = 8, - const CXIdxEntity_ObjCInstanceMethod = 9, - const CXIdxEntity_ObjCClassMethod = 10, - const CXIdxEntity_ObjCProperty = 11, - const CXIdxEntity_ObjCIvar = 12, - const CXIdxEntity_Enum = 13, - const CXIdxEntity_Struct = 14, - const CXIdxEntity_Union = 15, - const CXIdxEntity_CXXClass = 16, - const CXIdxEntity_CXXNamespace = 17, - const CXIdxEntity_CXXNamespaceAlias = 18, - const CXIdxEntity_CXXStaticVariable = 19, - const CXIdxEntity_CXXStaticMethod = 20, - const CXIdxEntity_CXXInstanceMethod = 21, - const CXIdxEntity_CXXConstructor = 22, - const CXIdxEntity_CXXDestructor = 23, - const CXIdxEntity_CXXConversionFunction = 24, - const CXIdxEntity_CXXTypeAlias = 25, - const CXIdxEntity_CXXInterface = 26, - /// Only produced by `libclang` 15.0 and later. - const CXIdxEntity_CXXConcept = 27, - } -} - -cenum! { - enum CXIdxEntityLanguage { - const CXIdxEntityLang_None = 0, - const CXIdxEntityLang_C = 1, - const CXIdxEntityLang_ObjC = 2, - const CXIdxEntityLang_CXX = 3, - /// Only produced by `libclang` 5.0 and later. - const CXIdxEntityLang_Swift = 4, - } -} - -cenum! { - enum CXIdxEntityRefKind { - const CXIdxEntityRef_Direct = 1, - const CXIdxEntityRef_Implicit = 2, - } -} - -cenum! { - enum CXIdxObjCContainerKind { - const CXIdxObjCContainer_ForwardRef = 0, - const CXIdxObjCContainer_Interface = 1, - const CXIdxObjCContainer_Implementation = 2, - } -} - -cenum! { - enum CXLanguageKind { - const CXLanguage_Invalid = 0, - const CXLanguage_C = 1, - const CXLanguage_ObjC = 2, - const CXLanguage_CPlusPlus = 3, - } -} - -cenum! { - enum CXLinkageKind { - const CXLinkage_Invalid = 0, - const CXLinkage_NoLinkage = 1, - const CXLinkage_Internal = 2, - const CXLinkage_UniqueExternal = 3, - const CXLinkage_External = 4, - } -} - -cenum! { - enum CXLoadDiag_Error { - const CXLoadDiag_None = 0, - const CXLoadDiag_Unknown = 1, - const CXLoadDiag_CannotLoad = 2, - const CXLoadDiag_InvalidFile = 3, - } -} - -cenum! { - /// Only available on `libclang` 7.0 and later. - #[cfg(feature = "clang_7_0")] - enum CXPrintingPolicyProperty { - const CXPrintingPolicy_Indentation = 0, - const CXPrintingPolicy_SuppressSpecifiers = 1, - const CXPrintingPolicy_SuppressTagKeyword = 2, - const CXPrintingPolicy_IncludeTagDefinition = 3, - const CXPrintingPolicy_SuppressScope = 4, - const CXPrintingPolicy_SuppressUnwrittenScope = 5, - const CXPrintingPolicy_SuppressInitializers = 6, - const CXPrintingPolicy_ConstantArraySizeAsWritten = 7, - const CXPrintingPolicy_AnonymousTagLocations = 8, - const CXPrintingPolicy_SuppressStrongLifetime = 9, - const CXPrintingPolicy_SuppressLifetimeQualifiers = 10, - const CXPrintingPolicy_SuppressTemplateArgsInCXXConstructors = 11, - const CXPrintingPolicy_Bool = 12, - const CXPrintingPolicy_Restrict = 13, - const CXPrintingPolicy_Alignof = 14, - const CXPrintingPolicy_UnderscoreAlignof = 15, - const CXPrintingPolicy_UseVoidForZeroParams = 16, - const CXPrintingPolicy_TerseOutput = 17, - const CXPrintingPolicy_PolishForDeclaration = 18, - const CXPrintingPolicy_Half = 19, - const CXPrintingPolicy_MSWChar = 20, - const CXPrintingPolicy_IncludeNewlines = 21, - const CXPrintingPolicy_MSVCFormatting = 22, - const CXPrintingPolicy_ConstantsAsWritten = 23, - const CXPrintingPolicy_SuppressImplicitBase = 24, - const CXPrintingPolicy_FullyQualifiedName = 25, - } -} - -cenum! { - enum CXRefQualifierKind { - const CXRefQualifier_None = 0, - const CXRefQualifier_LValue = 1, - const CXRefQualifier_RValue = 2, - } -} - -cenum! { - enum CXResult { - const CXResult_Success = 0, - const CXResult_Invalid = 1, - const CXResult_VisitBreak = 2, - } -} - -cenum! { - enum CXSaveError { - const CXSaveError_None = 0, - const CXSaveError_Unknown = 1, - const CXSaveError_TranslationErrors = 2, - const CXSaveError_InvalidTU = 3, - } -} - -cenum! { - /// Only available on `libclang` 6.0 and later. - #[cfg(feature = "clang_6_0")] - enum CXTLSKind { - const CXTLS_None = 0, - const CXTLS_Dynamic = 1, - const CXTLS_Static = 2, - } -} - -cenum! { - enum CXTUResourceUsageKind { - const CXTUResourceUsage_AST = 1, - const CXTUResourceUsage_Identifiers = 2, - const CXTUResourceUsage_Selectors = 3, - const CXTUResourceUsage_GlobalCompletionResults = 4, - const CXTUResourceUsage_SourceManagerContentCache = 5, - const CXTUResourceUsage_AST_SideTables = 6, - const CXTUResourceUsage_SourceManager_Membuffer_Malloc = 7, - const CXTUResourceUsage_SourceManager_Membuffer_MMap = 8, - const CXTUResourceUsage_ExternalASTSource_Membuffer_Malloc = 9, - const CXTUResourceUsage_ExternalASTSource_Membuffer_MMap = 10, - const CXTUResourceUsage_Preprocessor = 11, - const CXTUResourceUsage_PreprocessingRecord = 12, - const CXTUResourceUsage_SourceManager_DataStructures = 13, - const CXTUResourceUsage_Preprocessor_HeaderSearch = 14, - } -} - -cenum! { - /// Only available on `libclang` 3.6 and later. - #[cfg(feature = "clang_3_6")] - enum CXTemplateArgumentKind { - const CXTemplateArgumentKind_Null = 0, - const CXTemplateArgumentKind_Type = 1, - const CXTemplateArgumentKind_Declaration = 2, - const CXTemplateArgumentKind_NullPtr = 3, - const CXTemplateArgumentKind_Integral = 4, - const CXTemplateArgumentKind_Template = 5, - const CXTemplateArgumentKind_TemplateExpansion = 6, - const CXTemplateArgumentKind_Expression = 7, - const CXTemplateArgumentKind_Pack = 8, - const CXTemplateArgumentKind_Invalid = 9, - } -} - -cenum! { - enum CXTokenKind { - const CXToken_Punctuation = 0, - const CXToken_Keyword = 1, - const CXToken_Identifier = 2, - const CXToken_Literal = 3, - const CXToken_Comment = 4, - } -} - -cenum! { - enum CXTypeKind { - const CXType_Invalid = 0, - const CXType_Unexposed = 1, - const CXType_Void = 2, - const CXType_Bool = 3, - const CXType_Char_U = 4, - const CXType_UChar = 5, - const CXType_Char16 = 6, - const CXType_Char32 = 7, - const CXType_UShort = 8, - const CXType_UInt = 9, - const CXType_ULong = 10, - const CXType_ULongLong = 11, - const CXType_UInt128 = 12, - const CXType_Char_S = 13, - const CXType_SChar = 14, - const CXType_WChar = 15, - const CXType_Short = 16, - const CXType_Int = 17, - const CXType_Long = 18, - const CXType_LongLong = 19, - const CXType_Int128 = 20, - const CXType_Float = 21, - const CXType_Double = 22, - const CXType_LongDouble = 23, - const CXType_NullPtr = 24, - const CXType_Overload = 25, - const CXType_Dependent = 26, - const CXType_ObjCId = 27, - const CXType_ObjCClass = 28, - const CXType_ObjCSel = 29, - /// Only produced by `libclang` 3.9 and later. - const CXType_Float128 = 30, - /// Only produced by `libclang` 5.0 and later. - const CXType_Half = 31, - /// Only produced by `libclang` 6.0 and later. - const CXType_Float16 = 32, - /// Only produced by `libclang` 7.0 and later. - const CXType_ShortAccum = 33, - /// Only produced by `libclang` 7.0 and later. - const CXType_Accum = 34, - /// Only produced by `libclang` 7.0 and later. - const CXType_LongAccum = 35, - /// Only produced by `libclang` 7.0 and later. - const CXType_UShortAccum = 36, - /// Only produced by `libclang` 7.0 and later. - const CXType_UAccum = 37, - /// Only produced by `libclang` 7.0 and later. - const CXType_ULongAccum = 38, - /// Only produced by `libclang` 11.0 and later. - const CXType_BFloat16 = 39, - /// Only produced by `libclang` 14.0 and later. - const CXType_Ibm128 = 40, - const CXType_Complex = 100, - const CXType_Pointer = 101, - const CXType_BlockPointer = 102, - const CXType_LValueReference = 103, - const CXType_RValueReference = 104, - const CXType_Record = 105, - const CXType_Enum = 106, - const CXType_Typedef = 107, - const CXType_ObjCInterface = 108, - const CXType_ObjCObjectPointer = 109, - const CXType_FunctionNoProto = 110, - const CXType_FunctionProto = 111, - const CXType_ConstantArray = 112, - const CXType_Vector = 113, - const CXType_IncompleteArray = 114, - const CXType_VariableArray = 115, - const CXType_DependentSizedArray = 116, - const CXType_MemberPointer = 117, - /// Only produced by `libclang` 3.8 and later. - const CXType_Auto = 118, - /// Only produced by `libclang` 3.9 and later. - const CXType_Elaborated = 119, - /// Only produced by `libclang` 5.0 and later. - const CXType_Pipe = 120, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dRO = 121, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dArrayRO = 122, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dBufferRO = 123, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dRO = 124, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayRO = 125, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dDepthRO = 126, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayDepthRO = 127, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dMSAARO = 128, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayMSAARO = 129, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dMSAADepthRO = 130, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayMSAADepthRO = 131, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage3dRO = 132, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dWO = 133, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dArrayWO = 134, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dBufferWO = 135, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dWO = 136, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayWO = 137, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dDepthWO = 138, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayDepthWO = 139, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dMSAAWO = 140, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayMSAAWO = 141, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dMSAADepthWO = 142, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayMSAADepthWO = 143, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage3dWO = 144, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dRW = 145, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dArrayRW = 146, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dBufferRW = 147, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dRW = 148, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayRW = 149, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dDepthRW = 150, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayDepthRW = 151, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dMSAARW = 152, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayMSAARW = 153, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dMSAADepthRW = 154, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayMSAADepthRW = 155, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage3dRW = 156, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLSampler = 157, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLEvent = 158, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLQueue = 159, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLReserveID = 160, - /// Only produced by `libclang` 8.0 and later. - const CXType_ObjCObject = 161, - /// Only produced by `libclang` 8.0 and later. - const CXType_ObjCTypeParam = 162, - /// Only produced by `libclang` 8.0 and later. - const CXType_Attributed = 163, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCMcePayload = 164, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCImePayload = 165, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCRefPayload = 166, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCSicPayload = 167, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCMceResult = 168, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCImeResult = 169, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCRefResult = 170, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCSicResult = 171, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCImeResultSingleRefStreamout = 172, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCImeResultDualRefStreamout = 173, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCImeSingleRefStreamin = 174, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCImeDualRefStreamin = 175, - /// Only produced by `libclang` 9.0 and later. - const CXType_ExtVector = 176, - /// Only produced by `libclang` 11.0 and later. - const CXType_Atomic = 177, - /// Only produced by `libclang` 15.0 and later. - const CXType_BTFTagAttributed = 178, - } -} - -cenum! { - enum CXTypeLayoutError { - const CXTypeLayoutError_Invalid = -1, - const CXTypeLayoutError_Incomplete = -2, - const CXTypeLayoutError_Dependent = -3, - const CXTypeLayoutError_NotConstantSize = -4, - const CXTypeLayoutError_InvalidFieldName = -5, - /// Only produced by `libclang` 9.0 and later. - const CXTypeLayoutError_Undeduced = -6, - } -} - -cenum! { - /// Only available on `libclang` 3.8 and later. - #[cfg(feature = "clang_3_8")] - enum CXVisibilityKind { - const CXVisibility_Invalid = 0, - const CXVisibility_Hidden = 1, - const CXVisibility_Protected = 2, - const CXVisibility_Default = 3, - } -} - -cenum! { - /// Only available on `libclang` 8.0 and later. - #[cfg(feature = "clang_8_0")] - enum CXTypeNullabilityKind { - const CXTypeNullability_NonNull = 0, - const CXTypeNullability_Nullable = 1, - const CXTypeNullability_Unspecified = 2, - const CXTypeNullability_Invalid = 3, - /// Only produced by `libclang` 12.0 and later. - const CXTypeNullability_NullableResult = 4, - } -} - -cenum! { - /// Only available on `libclang` 17.0 and later. - #[cfg(feature = "clang_17_0")] - enum CXUnaryOperatorKind { - const CXUnaryOperator_Invalid = 0, - const CXUnaryOperator_PostInc = 1, - const CXUnaryOperator_PostDec = 2, - const CXUnaryOperator_PreInc = 3, - const CXUnaryOperator_PreDec = 4, - const CXUnaryOperator_AddrOf = 5, - const CXUnaryOperator_Deref = 6, - const CXUnaryOperator_Plus = 7, - const CXUnaryOperator_Minus = 8, - const CXUnaryOperator_Not = 9, - const CXUnaryOperator_LNot = 10, - const CXUnaryOperator_Real = 11, - const CXUnaryOperator_Imag = 12, - const CXUnaryOperator_Extension = 13, - const CXUnaryOperator_Coawait = 14, - } -} - -cenum! { - enum CXVisitorResult { - const CXVisit_Break = 0, - const CXVisit_Continue = 1, - } -} - -cenum! { - enum CX_CXXAccessSpecifier { - const CX_CXXInvalidAccessSpecifier = 0, - const CX_CXXPublic = 1, - const CX_CXXProtected = 2, - const CX_CXXPrivate = 3, - } -} - -cenum! { - /// Only available on `libclang` 3.6 and later. - #[cfg(feature = "clang_3_6")] - enum CX_StorageClass { - const CX_SC_Invalid = 0, - const CX_SC_None = 1, - const CX_SC_Extern = 2, - const CX_SC_Static = 3, - const CX_SC_PrivateExtern = 4, - const CX_SC_OpenCLWorkGroupLocal = 5, - const CX_SC_Auto = 6, - const CX_SC_Register = 7, - } -} - -//================================================ -// Flags -//================================================ - -cenum! { - enum CXCodeComplete_Flags { - const CXCodeComplete_IncludeMacros = 1; - const CXCodeComplete_IncludeCodePatterns = 2; - const CXCodeComplete_IncludeBriefComments = 4; - const CXCodeComplete_SkipPreamble = 8; - const CXCodeComplete_IncludeCompletionsWithFixIts = 16; - } -} - -cenum! { - enum CXCompletionContext { - const CXCompletionContext_Unexposed = 0; - const CXCompletionContext_AnyType = 1; - const CXCompletionContext_AnyValue = 2; - const CXCompletionContext_ObjCObjectValue = 4; - const CXCompletionContext_ObjCSelectorValue = 8; - const CXCompletionContext_CXXClassTypeValue = 16; - const CXCompletionContext_DotMemberAccess = 32; - const CXCompletionContext_ArrowMemberAccess = 64; - const CXCompletionContext_ObjCPropertyAccess = 128; - const CXCompletionContext_EnumTag = 256; - const CXCompletionContext_UnionTag = 512; - const CXCompletionContext_StructTag = 1024; - const CXCompletionContext_ClassTag = 2048; - const CXCompletionContext_Namespace = 4096; - const CXCompletionContext_NestedNameSpecifier = 8192; - const CXCompletionContext_ObjCInterface = 16384; - const CXCompletionContext_ObjCProtocol = 32768; - const CXCompletionContext_ObjCCategory = 65536; - const CXCompletionContext_ObjCInstanceMessage = 131072; - const CXCompletionContext_ObjCClassMessage = 262144; - const CXCompletionContext_ObjCSelectorName = 524288; - const CXCompletionContext_MacroName = 1048576; - const CXCompletionContext_NaturalLanguage = 2097152; - const CXCompletionContext_IncludedFile = 4194304; - const CXCompletionContext_Unknown = 8388607; - } -} - -cenum! { - enum CXDiagnosticDisplayOptions { - const CXDiagnostic_DisplaySourceLocation = 1; - const CXDiagnostic_DisplayColumn = 2; - const CXDiagnostic_DisplaySourceRanges = 4; - const CXDiagnostic_DisplayOption = 8; - const CXDiagnostic_DisplayCategoryId = 16; - const CXDiagnostic_DisplayCategoryName = 32; - } -} - -cenum! { - enum CXGlobalOptFlags { - const CXGlobalOpt_None = 0; - const CXGlobalOpt_ThreadBackgroundPriorityForIndexing = 1; - const CXGlobalOpt_ThreadBackgroundPriorityForEditing = 2; - const CXGlobalOpt_ThreadBackgroundPriorityForAll = 3; - } -} - -cenum! { - enum CXIdxDeclInfoFlags { - const CXIdxDeclFlag_Skipped = 1; - } -} - -cenum! { - enum CXIndexOptFlags { - const CXIndexOptNone = 0; - const CXIndexOptSuppressRedundantRefs = 1; - const CXIndexOptIndexFunctionLocalSymbols = 2; - const CXIndexOptIndexImplicitTemplateInstantiations = 4; - const CXIndexOptSuppressWarnings = 8; - const CXIndexOptSkipParsedBodiesInSession = 16; - } -} - -/// Only available on `libclang` 17.0 and later. -#[cfg(feature = "clang_17_0")] -#[cfg(not(target_os = "windows"))] -pub type CXIndexOptions_Flags = c_ushort; - -/// Only available on `libclang` 17.0 and later. -#[cfg(feature = "clang_17_0")] -#[cfg(target_os = "windows")] -pub type CXIndexOptions_Flags = c_uint; - -/// Only available on `libclang` 17.0 and later. -#[cfg(feature = "clang_17_0")] -pub const CXIndexOptions_ExcludeDeclarationsFromPCH: CXIndexOptions_Flags = 1; - -/// Only available on `libclang` 17.0 and later. -#[cfg(feature = "clang_17_0")] -pub const CXIndexOptions_DisplayDiagnostics: CXIndexOptions_Flags = 2; - -/// Only available on `libclang` 17.0 and later. -#[cfg(feature = "clang_17_0")] -pub const CXIndexOptions_StorePreamblesInMemory: CXIndexOptions_Flags = 4; - -cenum! { - enum CXNameRefFlags { - const CXNameRange_WantQualifier = 1; - const CXNameRange_WantTemplateArgs = 2; - const CXNameRange_WantSinglePiece = 4; - } -} - -cenum! { - enum CXObjCDeclQualifierKind { - const CXObjCDeclQualifier_None = 0; - const CXObjCDeclQualifier_In = 1; - const CXObjCDeclQualifier_Inout = 2; - const CXObjCDeclQualifier_Out = 4; - const CXObjCDeclQualifier_Bycopy = 8; - const CXObjCDeclQualifier_Byref = 16; - const CXObjCDeclQualifier_Oneway = 32; - } -} - -cenum! { - enum CXObjCPropertyAttrKind { - const CXObjCPropertyAttr_noattr = 0; - const CXObjCPropertyAttr_readonly = 1; - const CXObjCPropertyAttr_getter = 2; - const CXObjCPropertyAttr_assign = 4; - const CXObjCPropertyAttr_readwrite = 8; - const CXObjCPropertyAttr_retain = 16; - const CXObjCPropertyAttr_copy = 32; - const CXObjCPropertyAttr_nonatomic = 64; - const CXObjCPropertyAttr_setter = 128; - const CXObjCPropertyAttr_atomic = 256; - const CXObjCPropertyAttr_weak = 512; - const CXObjCPropertyAttr_strong = 1024; - const CXObjCPropertyAttr_unsafe_unretained = 2048; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - const CXObjCPropertyAttr_class = 4096; - } -} - -cenum! { - enum CXReparse_Flags { - const CXReparse_None = 0; - } -} - -cenum! { - enum CXSaveTranslationUnit_Flags { - const CXSaveTranslationUnit_None = 0; - } -} - -cenum! { - /// Only available on `libclang` 7.0 and later. - #[cfg(feature = "clang_7_0")] - enum CXSymbolRole { - const CXSymbolRole_None = 0; - const CXSymbolRole_Declaration = 1; - const CXSymbolRole_Definition = 2; - const CXSymbolRole_Reference = 4; - const CXSymbolRole_Read = 8; - const CXSymbolRole_Write = 16; - const CXSymbolRole_Call = 32; - const CXSymbolRole_Dynamic = 64; - const CXSymbolRole_AddressOf = 128; - const CXSymbolRole_Implicit = 256; - } -} - -cenum! { - enum CXTranslationUnit_Flags { - const CXTranslationUnit_None = 0; - const CXTranslationUnit_DetailedPreprocessingRecord = 1; - const CXTranslationUnit_Incomplete = 2; - const CXTranslationUnit_PrecompiledPreamble = 4; - const CXTranslationUnit_CacheCompletionResults = 8; - const CXTranslationUnit_ForSerialization = 16; - const CXTranslationUnit_CXXChainedPCH = 32; - const CXTranslationUnit_SkipFunctionBodies = 64; - const CXTranslationUnit_IncludeBriefCommentsInCodeCompletion = 128; - /// Only available on `libclang` 3.8 and later. - #[cfg(feature = "clang_3_8")] - const CXTranslationUnit_CreatePreambleOnFirstParse = 256; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - const CXTranslationUnit_KeepGoing = 512; - /// Only available on `libclang` 5.0 and later. - #[cfg(feature = "clang_5_0")] - const CXTranslationUnit_SingleFileParse = 1024; - /// Only available on `libclang` 7.0 and later. - #[cfg(feature = "clang_7_0")] - const CXTranslationUnit_LimitSkipFunctionBodiesToPreamble = 2048; - /// Only available on `libclang` 8.0 and later. - #[cfg(feature = "clang_8_0")] - const CXTranslationUnit_IncludeAttributedTypes = 4096; - /// Only available on `libclang` 8.0 and later. - #[cfg(feature = "clang_8_0")] - const CXTranslationUnit_VisitImplicitAttributes = 8192; - /// Only available on `libclang` 9.0 and later. - #[cfg(feature = "clang_9_0")] - const CXTranslationUnit_IgnoreNonErrorsFromIncludedFiles = 16384; - /// Only available on `libclang` 10.0 and later. - #[cfg(feature = "clang_10_0")] - const CXTranslationUnit_RetainExcludedConditionalBlocks = 32768; - } -} - -//================================================ -// Structs -//================================================ - -// Opaque ________________________________________ - -macro_rules! opaque { - ($name:ident) => { - pub type $name = *mut c_void; - }; -} - -opaque!(CXCompilationDatabase); -opaque!(CXCompileCommand); -opaque!(CXCompileCommands); -opaque!(CXCompletionString); -opaque!(CXCursorSet); -opaque!(CXDiagnostic); -opaque!(CXDiagnosticSet); -#[cfg(feature = "clang_3_9")] -opaque!(CXEvalResult); -opaque!(CXFile); -opaque!(CXIdxClientASTFile); -opaque!(CXIdxClientContainer); -opaque!(CXIdxClientEntity); -opaque!(CXIdxClientFile); -opaque!(CXIndex); -opaque!(CXIndexAction); -opaque!(CXModule); -#[cfg(feature = "clang_7_0")] -opaque!(CXPrintingPolicy); -opaque!(CXRemapping); -#[cfg(feature = "clang_5_0")] -opaque!(CXTargetInfo); -opaque!(CXTranslationUnit); - -// Transparent ___________________________________ - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXCodeCompleteResults { - pub Results: *mut CXCompletionResult, - pub NumResults: c_uint, -} - -default!(CXCodeCompleteResults); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXComment { - pub ASTNode: *const c_void, - pub TranslationUnit: CXTranslationUnit, -} - -default!(CXComment); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXCompletionResult { - pub CursorKind: CXCursorKind, - pub CompletionString: CXCompletionString, -} - -default!(CXCompletionResult); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXCursor { - pub kind: CXCursorKind, - pub xdata: c_int, - pub data: [*const c_void; 3], -} - -default!(CXCursor); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXCursorAndRangeVisitor { - pub context: *mut c_void, - pub visit: Option CXVisitorResult>, -} - -default!(CXCursorAndRangeVisitor); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXFileUniqueID { - pub data: [c_ulonglong; 3], -} - -default!(CXFileUniqueID); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxAttrInfo { - pub kind: CXIdxAttrKind, - pub cursor: CXCursor, - pub loc: CXIdxLoc, -} - -default!(CXIdxAttrInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxBaseClassInfo { - pub base: *const CXIdxEntityInfo, - pub cursor: CXCursor, - pub loc: CXIdxLoc, -} - -default!(CXIdxBaseClassInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxCXXClassDeclInfo { - pub declInfo: *const CXIdxDeclInfo, - pub bases: *const *const CXIdxBaseClassInfo, - pub numBases: c_uint, -} - -default!(CXIdxCXXClassDeclInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxContainerInfo { - pub cursor: CXCursor, -} - -default!(CXIdxContainerInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxDeclInfo { - pub entityInfo: *const CXIdxEntityInfo, - pub cursor: CXCursor, - pub loc: CXIdxLoc, - pub semanticContainer: *const CXIdxContainerInfo, - pub lexicalContainer: *const CXIdxContainerInfo, - pub isRedeclaration: c_int, - pub isDefinition: c_int, - pub isContainer: c_int, - pub declAsContainer: *const CXIdxContainerInfo, - pub isImplicit: c_int, - pub attributes: *const *const CXIdxAttrInfo, - pub numAttributes: c_uint, - pub flags: c_uint, -} - -default!(CXIdxDeclInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxEntityInfo { - pub kind: CXIdxEntityKind, - pub templateKind: CXIdxEntityCXXTemplateKind, - pub lang: CXIdxEntityLanguage, - pub name: *const c_char, - pub USR: *const c_char, - pub cursor: CXCursor, - pub attributes: *const *const CXIdxAttrInfo, - pub numAttributes: c_uint, -} - -default!(CXIdxEntityInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxEntityRefInfo { - pub kind: CXIdxEntityRefKind, - pub cursor: CXCursor, - pub loc: CXIdxLoc, - pub referencedEntity: *const CXIdxEntityInfo, - pub parentEntity: *const CXIdxEntityInfo, - pub container: *const CXIdxContainerInfo, - /// Only available on `libclang` 7.0 and later. - #[cfg(feature = "clang_7_0")] - pub role: CXSymbolRole, -} - -default!(CXIdxEntityRefInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxIBOutletCollectionAttrInfo { - pub attrInfo: *const CXIdxAttrInfo, - pub objcClass: *const CXIdxEntityInfo, - pub classCursor: CXCursor, - pub classLoc: CXIdxLoc, -} - -default!(CXIdxIBOutletCollectionAttrInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxImportedASTFileInfo { - pub file: CXFile, - pub module: CXModule, - pub loc: CXIdxLoc, - pub isImplicit: c_int, -} - -default!(CXIdxImportedASTFileInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxIncludedFileInfo { - pub hashLoc: CXIdxLoc, - pub filename: *const c_char, - pub file: CXFile, - pub isImport: c_int, - pub isAngled: c_int, - pub isModuleImport: c_int, -} - -default!(CXIdxIncludedFileInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxLoc { - pub ptr_data: [*mut c_void; 2], - pub int_data: c_uint, -} - -default!(CXIdxLoc); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxObjCCategoryDeclInfo { - pub containerInfo: *const CXIdxObjCContainerDeclInfo, - pub objcClass: *const CXIdxEntityInfo, - pub classCursor: CXCursor, - pub classLoc: CXIdxLoc, - pub protocols: *const CXIdxObjCProtocolRefListInfo, -} - -default!(CXIdxObjCCategoryDeclInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxObjCContainerDeclInfo { - pub declInfo: *const CXIdxDeclInfo, - pub kind: CXIdxObjCContainerKind, -} - -default!(CXIdxObjCContainerDeclInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxObjCInterfaceDeclInfo { - pub containerInfo: *const CXIdxObjCContainerDeclInfo, - pub superInfo: *const CXIdxBaseClassInfo, - pub protocols: *const CXIdxObjCProtocolRefListInfo, -} - -default!(CXIdxObjCInterfaceDeclInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxObjCPropertyDeclInfo { - pub declInfo: *const CXIdxDeclInfo, - pub getter: *const CXIdxEntityInfo, - pub setter: *const CXIdxEntityInfo, -} - -default!(CXIdxObjCPropertyDeclInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxObjCProtocolRefInfo { - pub protocol: *const CXIdxEntityInfo, - pub cursor: CXCursor, - pub loc: CXIdxLoc, -} - -default!(CXIdxObjCProtocolRefInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxObjCProtocolRefListInfo { - pub protocols: *const *const CXIdxObjCProtocolRefInfo, - pub numProtocols: c_uint, -} - -default!(CXIdxObjCProtocolRefListInfo); - -#[cfg(feature = "clang_17_0")] -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIndexOptions { - pub Size: c_uint, - pub ThreadBackgroundPriorityForIndexing: CXChoice, - pub ThreadBackgroundPriorityForEditing: CXChoice, - pub flags: CXIndexOptions_Flags, - pub PreambleStoragePath: *const c_char, - pub InvocationEmissionPath: *const c_char, -} - -#[cfg(feature = "clang_17_0")] -default!(CXIndexOptions); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXPlatformAvailability { - pub Platform: CXString, - pub Introduced: CXVersion, - pub Deprecated: CXVersion, - pub Obsoleted: CXVersion, - pub Unavailable: c_int, - pub Message: CXString, -} - -default!(CXPlatformAvailability); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXSourceLocation { - pub ptr_data: [*const c_void; 2], - pub int_data: c_uint, -} - -default!(CXSourceLocation); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXSourceRange { - pub ptr_data: [*const c_void; 2], - pub begin_int_data: c_uint, - pub end_int_data: c_uint, -} - -default!(CXSourceRange); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXSourceRangeList { - pub count: c_uint, - pub ranges: *mut CXSourceRange, -} - -default!(CXSourceRangeList); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXString { - pub data: *const c_void, - pub private_flags: c_uint, -} - -default!(CXString); - -#[cfg(feature = "clang_3_8")] -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXStringSet { - pub Strings: *mut CXString, - pub Count: c_uint, -} - -#[cfg(feature = "clang_3_8")] -default!(CXStringSet); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXTUResourceUsage { - pub data: *mut c_void, - pub numEntries: c_uint, - pub entries: *mut CXTUResourceUsageEntry, -} - -default!(CXTUResourceUsage); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXTUResourceUsageEntry { - pub kind: CXTUResourceUsageKind, - pub amount: c_ulong, -} - -default!(CXTUResourceUsageEntry); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXToken { - pub int_data: [c_uint; 4], - pub ptr_data: *mut c_void, -} - -default!(CXToken); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXType { - pub kind: CXTypeKind, - pub data: [*mut c_void; 2], -} - -default!(CXType); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXUnsavedFile { - pub Filename: *const c_char, - pub Contents: *const c_char, - pub Length: c_ulong, -} - -default!(CXUnsavedFile); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXVersion { - pub Major: c_int, - pub Minor: c_int, - pub Subminor: c_int, -} - -default!(CXVersion); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -#[rustfmt::skip] -pub struct IndexerCallbacks { - pub abortQuery: Option c_int>, - pub diagnostic: Option, - pub enteredMainFile: Option CXIdxClientFile>, - pub ppIncludedFile: Option CXIdxClientFile>, - pub importedASTFile: Option CXIdxClientASTFile>, - pub startedTranslationUnit: Option CXIdxClientContainer>, - pub indexDeclaration: Option, - pub indexEntityReference: Option, -} - -default!(IndexerCallbacks); - -//================================================ -// Functions -//================================================ - -link! { - pub fn clang_CXCursorSet_contains(set: CXCursorSet, cursor: CXCursor) -> c_uint; - pub fn clang_CXCursorSet_insert(set: CXCursorSet, cursor: CXCursor) -> c_uint; - pub fn clang_CXIndex_getGlobalOptions(index: CXIndex) -> CXGlobalOptFlags; - pub fn clang_CXIndex_setGlobalOptions(index: CXIndex, flags: CXGlobalOptFlags); - /// Only available on `libclang` 6.0 and later. - #[cfg(feature = "clang_6_0")] - pub fn clang_CXIndex_setInvocationEmissionPathOption(index: CXIndex, path: *const c_char); - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_CXXConstructor_isConvertingConstructor(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_CXXConstructor_isCopyConstructor(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_CXXConstructor_isDefaultConstructor(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_CXXConstructor_isMoveConstructor(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 3.8 and later. - #[cfg(feature = "clang_3_8")] - pub fn clang_CXXField_isMutable(cursor: CXCursor) -> c_uint; - pub fn clang_CXXMethod_isConst(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 16.0 and later. - #[cfg(feature = "clang_16_0")] - pub fn clang_CXXMethod_isCopyAssignmentOperator(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_CXXMethod_isDefaulted(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 16.0 and later. - #[cfg(feature = "clang_16_0")] - pub fn clang_CXXMethod_isDeleted(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 16.0 and later. - #[cfg(feature = "clang_16_0")] - pub fn clang_CXXMethod_isMoveAssignmentOperator(cursor: CXCursor) -> c_uint; - pub fn clang_CXXMethod_isPureVirtual(cursor: CXCursor) -> c_uint; - pub fn clang_CXXMethod_isStatic(cursor: CXCursor) -> c_uint; - pub fn clang_CXXMethod_isVirtual(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 17.0 and later. - #[cfg(feature = "clang_17_0")] - pub fn clang_CXXMethod_isExplicit(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 6.0 and later. - #[cfg(feature = "clang_6_0")] - pub fn clang_CXXRecord_isAbstract(cursor: CXCursor) -> c_uint; - pub fn clang_CompilationDatabase_dispose(database: CXCompilationDatabase); - pub fn clang_CompilationDatabase_fromDirectory(directory: *const c_char, error: *mut CXCompilationDatabase_Error) -> CXCompilationDatabase; - pub fn clang_CompilationDatabase_getAllCompileCommands(database: CXCompilationDatabase) -> CXCompileCommands; - pub fn clang_CompilationDatabase_getCompileCommands(database: CXCompilationDatabase, filename: *const c_char) -> CXCompileCommands; - pub fn clang_CompileCommand_getArg(command: CXCompileCommand, index: c_uint) -> CXString; - pub fn clang_CompileCommand_getDirectory(command: CXCompileCommand) -> CXString; - /// Only available on `libclang` 3.8 and later. - #[cfg(feature = "clang_3_8")] - pub fn clang_CompileCommand_getFilename(command: CXCompileCommand) -> CXString; - /// Only available on `libclang` 3.8 and later. - #[cfg(feature = "clang_3_8")] - pub fn clang_CompileCommand_getMappedSourceContent(command: CXCompileCommand, index: c_uint) -> CXString; - /// Only available on `libclang` 3.8 and later. - #[cfg(feature = "clang_3_8")] - pub fn clang_CompileCommand_getMappedSourcePath(command: CXCompileCommand, index: c_uint) -> CXString; - pub fn clang_CompileCommand_getNumArgs(command: CXCompileCommand) -> c_uint; - pub fn clang_CompileCommand_getNumMappedSources(command: CXCompileCommand) -> c_uint; - pub fn clang_CompileCommands_dispose(command: CXCompileCommands); - pub fn clang_CompileCommands_getCommand(command: CXCompileCommands, index: c_uint) -> CXCompileCommand; - pub fn clang_CompileCommands_getSize(command: CXCompileCommands) -> c_uint; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_Cursor_Evaluate(cursor: CXCursor) -> CXEvalResult; - pub fn clang_Cursor_getArgument(cursor: CXCursor, index: c_uint) -> CXCursor; - pub fn clang_Cursor_getBriefCommentText(cursor: CXCursor) -> CXString; - /// Only available on `libclang` 3.8 and later. - #[cfg(feature = "clang_3_8")] - pub fn clang_Cursor_getCXXManglings(cursor: CXCursor) -> *mut CXStringSet; - pub fn clang_Cursor_getCommentRange(cursor: CXCursor) -> CXSourceRange; - /// Only available on `libclang` 3.6 and later. - #[cfg(feature = "clang_3_6")] - pub fn clang_Cursor_getMangling(cursor: CXCursor) -> CXString; - pub fn clang_Cursor_getModule(cursor: CXCursor) -> CXModule; - pub fn clang_Cursor_getNumArguments(cursor: CXCursor) -> c_int; - /// Only available on `libclang` 3.6 and later. - #[cfg(feature = "clang_3_6")] - pub fn clang_Cursor_getNumTemplateArguments(cursor: CXCursor) -> c_int; - pub fn clang_Cursor_getObjCDeclQualifiers(cursor: CXCursor) -> CXObjCDeclQualifierKind; - /// Only available on `libclang` 6.0 and later. - #[cfg(feature = "clang_6_0")] - pub fn clang_Cursor_getObjCManglings(cursor: CXCursor) -> *mut CXStringSet; - pub fn clang_Cursor_getObjCPropertyAttributes(cursor: CXCursor, reserved: c_uint) -> CXObjCPropertyAttrKind; - /// Only available on `libclang` 8.0 and later. - #[cfg(feature = "clang_8_0")] - pub fn clang_Cursor_getObjCPropertyGetterName(cursor: CXCursor) -> CXString; - /// Only available on `libclang` 8.0 and later. - #[cfg(feature = "clang_8_0")] - pub fn clang_Cursor_getObjCPropertySetterName(cursor: CXCursor) -> CXString; - pub fn clang_Cursor_getObjCSelectorIndex(cursor: CXCursor) -> c_int; - /// Only available on `libclang` 3.7 and later. - #[cfg(feature = "clang_3_7")] - pub fn clang_Cursor_getOffsetOfField(cursor: CXCursor) -> c_longlong; - pub fn clang_Cursor_getRawCommentText(cursor: CXCursor) -> CXString; - pub fn clang_Cursor_getReceiverType(cursor: CXCursor) -> CXType; - pub fn clang_Cursor_getSpellingNameRange(cursor: CXCursor, index: c_uint, reserved: c_uint) -> CXSourceRange; - /// Only available on `libclang` 3.6 and later. - #[cfg(feature = "clang_3_6")] - pub fn clang_Cursor_getStorageClass(cursor: CXCursor) -> CX_StorageClass; - /// Only available on `libclang` 3.6 and later. - #[cfg(feature = "clang_3_6")] - pub fn clang_Cursor_getTemplateArgumentKind(cursor: CXCursor, index: c_uint) -> CXTemplateArgumentKind; - /// Only available on `libclang` 3.6 and later. - #[cfg(feature = "clang_3_6")] - pub fn clang_Cursor_getTemplateArgumentType(cursor: CXCursor, index: c_uint) -> CXType; - /// Only available on `libclang` 3.6 and later. - #[cfg(feature = "clang_3_6")] - pub fn clang_Cursor_getTemplateArgumentUnsignedValue(cursor: CXCursor, index: c_uint) -> c_ulonglong; - /// Only available on `libclang` 3.6 and later. - #[cfg(feature = "clang_3_6")] - pub fn clang_Cursor_getTemplateArgumentValue(cursor: CXCursor, index: c_uint) -> c_longlong; - pub fn clang_Cursor_getTranslationUnit(cursor: CXCursor) -> CXTranslationUnit; - /// Only available on `libclang` 12.0 and later. - #[cfg(feature = "clang_12_0")] - pub fn clang_Cursor_getVarDeclInitializer(cursor: CXCursor) -> CXCursor; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_Cursor_hasAttrs(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 12.0 and later. - #[cfg(feature = "clang_12_0")] - pub fn clang_Cursor_hasVarDeclGlobalStorage(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 12.0 and later. - #[cfg(feature = "clang_12_0")] - pub fn clang_Cursor_hasVarDeclExternalStorage(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 3.7 and later. - #[cfg(feature = "clang_3_7")] - pub fn clang_Cursor_isAnonymous(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 9.0 and later. - #[cfg(feature = "clang_9_0")] - pub fn clang_Cursor_isAnonymousRecordDecl(cursor: CXCursor) -> c_uint; - pub fn clang_Cursor_isBitField(cursor: CXCursor) -> c_uint; - pub fn clang_Cursor_isDynamicCall(cursor: CXCursor) -> c_int; - /// Only available on `libclang` 5.0 and later. - #[cfg(feature = "clang_5_0")] - pub fn clang_Cursor_isExternalSymbol(cursor: CXCursor, language: *mut CXString, from: *mut CXString, generated: *mut c_uint) -> c_uint; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_Cursor_isFunctionInlined(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 9.0 and later. - #[cfg(feature = "clang_9_0")] - pub fn clang_Cursor_isInlineNamespace(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_Cursor_isMacroBuiltin(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_Cursor_isMacroFunctionLike(cursor: CXCursor) -> c_uint; - pub fn clang_Cursor_isNull(cursor: CXCursor) -> c_int; - pub fn clang_Cursor_isObjCOptional(cursor: CXCursor) -> c_uint; - pub fn clang_Cursor_isVariadic(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 5.0 and later. - #[cfg(feature = "clang_5_0")] - pub fn clang_EnumDecl_isScoped(cursor: CXCursor) -> c_uint; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_EvalResult_dispose(result: CXEvalResult); - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_EvalResult_getAsDouble(result: CXEvalResult) -> libc::c_double; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_EvalResult_getAsInt(result: CXEvalResult) -> c_int; - /// Only available on `libclang` 4.0 and later. - #[cfg(feature = "clang_4_0")] - pub fn clang_EvalResult_getAsLongLong(result: CXEvalResult) -> c_longlong; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_EvalResult_getAsStr(result: CXEvalResult) -> *const c_char; - /// Only available on `libclang` 4.0 and later. - #[cfg(feature = "clang_4_0")] - pub fn clang_EvalResult_getAsUnsigned(result: CXEvalResult) -> c_ulonglong; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_EvalResult_getKind(result: CXEvalResult) -> CXEvalResultKind; - /// Only available on `libclang` 4.0 and later. - #[cfg(feature = "clang_4_0")] - pub fn clang_EvalResult_isUnsignedInt(result: CXEvalResult) -> c_uint; - /// Only available on `libclang` 3.6 and later. - #[cfg(feature = "clang_3_6")] - pub fn clang_File_isEqual(left: CXFile, right: CXFile) -> c_int; - /// Only available on `libclang` 7.0 and later. - #[cfg(feature = "clang_7_0")] - pub fn clang_File_tryGetRealPathName(file: CXFile) -> CXString; - pub fn clang_IndexAction_create(index: CXIndex) -> CXIndexAction; - pub fn clang_IndexAction_dispose(index: CXIndexAction); - pub fn clang_Location_isFromMainFile(location: CXSourceLocation) -> c_int; - pub fn clang_Location_isInSystemHeader(location: CXSourceLocation) -> c_int; - pub fn clang_Module_getASTFile(module: CXModule) -> CXFile; - pub fn clang_Module_getFullName(module: CXModule) -> CXString; - pub fn clang_Module_getName(module: CXModule) -> CXString; - pub fn clang_Module_getNumTopLevelHeaders(tu: CXTranslationUnit, module: CXModule) -> c_uint; - pub fn clang_Module_getParent(module: CXModule) -> CXModule; - pub fn clang_Module_getTopLevelHeader(tu: CXTranslationUnit, module: CXModule, index: c_uint) -> CXFile; - pub fn clang_Module_isSystem(module: CXModule) -> c_int; - /// Only available on `libclang` 7.0 and later. - #[cfg(feature = "clang_7_0")] - pub fn clang_PrintingPolicy_dispose(policy: CXPrintingPolicy); - /// Only available on `libclang` 7.0 and later. - #[cfg(feature = "clang_7_0")] - pub fn clang_PrintingPolicy_getProperty(policy: CXPrintingPolicy, property: CXPrintingPolicyProperty) -> c_uint; - /// Only available on `libclang` 7.0 and later. - #[cfg(feature = "clang_7_0")] - pub fn clang_PrintingPolicy_setProperty(policy: CXPrintingPolicy, property: CXPrintingPolicyProperty, value: c_uint); - pub fn clang_Range_isNull(range: CXSourceRange) -> c_int; - /// Only available on `libclang` 5.0 and later. - #[cfg(feature = "clang_5_0")] - pub fn clang_TargetInfo_dispose(info: CXTargetInfo); - /// Only available on `libclang` 5.0 and later. - #[cfg(feature = "clang_5_0")] - pub fn clang_TargetInfo_getPointerWidth(info: CXTargetInfo) -> c_int; - /// Only available on `libclang` 5.0 and later. - #[cfg(feature = "clang_5_0")] - pub fn clang_TargetInfo_getTriple(info: CXTargetInfo) -> CXString; - pub fn clang_Type_getAlignOf(type_: CXType) -> c_longlong; - pub fn clang_Type_getCXXRefQualifier(type_: CXType) -> CXRefQualifierKind; - pub fn clang_Type_getClassType(type_: CXType) -> CXType; - /// Only available on `libclang` 8.0 and later. - #[cfg(feature = "clang_8_0")] - pub fn clang_Type_getModifiedType(type_: CXType) -> CXType; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_Type_getNamedType(type_: CXType) -> CXType; - /// Only available on `libclang` 8.0 and later. - #[cfg(feature = "clang_8_0")] - pub fn clang_Type_getNullability(type_: CXType) -> CXTypeNullabilityKind; - /// Only available on `libclang` 8.0 and later. - #[cfg(feature = "clang_8_0")] - pub fn clang_Type_getNumObjCProtocolRefs(type_: CXType) -> c_uint; - /// Only available on `libclang` 8.0 and later. - #[cfg(feature = "clang_8_0")] - pub fn clang_Type_getNumObjCTypeArgs(type_: CXType) -> c_uint; - pub fn clang_Type_getNumTemplateArguments(type_: CXType) -> c_int; - /// Only available on `libclang` 3.9 and later. - #[cfg(feature = "clang_3_9")] - pub fn clang_Type_getObjCEncoding(type_: CXType) -> CXString; - /// Only available on `libclang` 8.0 and later. - #[cfg(feature = "clang_8_0")] - pub fn clang_Type_getObjCObjectBaseType(type_: CXType) -> CXType; - /// Only available on `libclang` 8.0 and later. - #[cfg(feature = "clang_8_0")] - pub fn clang_Type_getObjCProtocolDecl(type_: CXType, index: c_uint) -> CXCursor; - /// Only available on `libclang` 8.0 and later. - #[cfg(feature = "clang_8_0")] - pub fn clang_Type_getObjCTypeArg(type_: CXType, index: c_uint) -> CXType; - pub fn clang_Type_getOffsetOf(type_: CXType, field: *const c_char) -> c_longlong; - pub fn clang_Type_getSizeOf(type_: CXType) -> c_longlong; - pub fn clang_Type_getTemplateArgumentAsType(type_: CXType, index: c_uint) -> CXType; - /// Only available on `libclang` 11.0 and later. - #[cfg(feature = "clang_11_0")] - pub fn clang_Type_getValueType(type_: CXType) -> CXType; - /// Only available on `libclang` 5.0 and later. - #[cfg(feature = "clang_5_0")] - pub fn clang_Type_isTransparentTagTypedef(type_: CXType) -> c_uint; - /// Only available on `libclang` 3.7 and later. - #[cfg(feature = "clang_3_7")] - pub fn clang_Type_visitFields(type_: CXType, visitor: CXFieldVisitor, data: CXClientData) -> CXVisitorResult; - pub fn clang_annotateTokens(tu: CXTranslationUnit, tokens: *mut CXToken, n_tokens: c_uint, cursors: *mut CXCursor); - pub fn clang_codeCompleteAt(tu: CXTranslationUnit, file: *const c_char, line: c_uint, column: c_uint, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXCodeComplete_Flags) -> *mut CXCodeCompleteResults; - pub fn clang_codeCompleteGetContainerKind(results: *mut CXCodeCompleteResults, incomplete: *mut c_uint) -> CXCursorKind; - pub fn clang_codeCompleteGetContainerUSR(results: *mut CXCodeCompleteResults) -> CXString; - pub fn clang_codeCompleteGetContexts(results: *mut CXCodeCompleteResults) -> c_ulonglong; - pub fn clang_codeCompleteGetDiagnostic(results: *mut CXCodeCompleteResults, index: c_uint) -> CXDiagnostic; - pub fn clang_codeCompleteGetNumDiagnostics(results: *mut CXCodeCompleteResults) -> c_uint; - pub fn clang_codeCompleteGetObjCSelector(results: *mut CXCodeCompleteResults) -> CXString; - pub fn clang_constructUSR_ObjCCategory(class: *const c_char, category: *const c_char) -> CXString; - pub fn clang_constructUSR_ObjCClass(class: *const c_char) -> CXString; - pub fn clang_constructUSR_ObjCIvar(name: *const c_char, usr: CXString) -> CXString; - pub fn clang_constructUSR_ObjCMethod(name: *const c_char, instance: c_uint, usr: CXString) -> CXString; - pub fn clang_constructUSR_ObjCProperty(property: *const c_char, usr: CXString) -> CXString; - pub fn clang_constructUSR_ObjCProtocol(protocol: *const c_char) -> CXString; - pub fn clang_createCXCursorSet() -> CXCursorSet; - pub fn clang_createIndex(exclude: c_int, display: c_int) -> CXIndex; - /// Only available on `libclang` 17.0 and later. - #[cfg(feature = "clang_17_0")] - pub fn clang_createIndexWithOptions(options: CXIndexOptions) -> CXIndex; - pub fn clang_createTranslationUnit(index: CXIndex, file: *const c_char) -> CXTranslationUnit; - pub fn clang_createTranslationUnit2(index: CXIndex, file: *const c_char, tu: *mut CXTranslationUnit) -> CXErrorCode; - pub fn clang_createTranslationUnitFromSourceFile(index: CXIndex, file: *const c_char, n_arguments: c_int, arguments: *const *const c_char, n_unsaved: c_uint, unsaved: *mut CXUnsavedFile) -> CXTranslationUnit; - pub fn clang_defaultCodeCompleteOptions() -> CXCodeComplete_Flags; - pub fn clang_defaultDiagnosticDisplayOptions() -> CXDiagnosticDisplayOptions; - pub fn clang_defaultEditingTranslationUnitOptions() -> CXTranslationUnit_Flags; - pub fn clang_defaultReparseOptions(tu: CXTranslationUnit) -> CXReparse_Flags; - pub fn clang_defaultSaveOptions(tu: CXTranslationUnit) -> CXSaveTranslationUnit_Flags; - pub fn clang_disposeCXCursorSet(set: CXCursorSet); - pub fn clang_disposeCXPlatformAvailability(availability: *mut CXPlatformAvailability); - pub fn clang_disposeCXTUResourceUsage(usage: CXTUResourceUsage); - pub fn clang_disposeCodeCompleteResults(results: *mut CXCodeCompleteResults); - pub fn clang_disposeDiagnostic(diagnostic: CXDiagnostic); - pub fn clang_disposeDiagnosticSet(diagnostic: CXDiagnosticSet); - pub fn clang_disposeIndex(index: CXIndex); - pub fn clang_disposeOverriddenCursors(cursors: *mut CXCursor); - pub fn clang_disposeSourceRangeList(list: *mut CXSourceRangeList); - pub fn clang_disposeString(string: CXString); - /// Only available on `libclang` 3.8 and later. - #[cfg(feature = "clang_3_8")] - pub fn clang_disposeStringSet(set: *mut CXStringSet); - pub fn clang_disposeTokens(tu: CXTranslationUnit, tokens: *mut CXToken, n_tokens: c_uint); - pub fn clang_disposeTranslationUnit(tu: CXTranslationUnit); - pub fn clang_enableStackTraces(); - pub fn clang_equalCursors(left: CXCursor, right: CXCursor) -> c_uint; - pub fn clang_equalLocations(left: CXSourceLocation, right: CXSourceLocation) -> c_uint; - pub fn clang_equalRanges(left: CXSourceRange, right: CXSourceRange) -> c_uint; - pub fn clang_equalTypes(left: CXType, right: CXType) -> c_uint; - pub fn clang_executeOnThread(function: extern fn(*mut c_void), data: *mut c_void, stack: c_uint); - pub fn clang_findIncludesInFile(tu: CXTranslationUnit, file: CXFile, cursor: CXCursorAndRangeVisitor) -> CXResult; - pub fn clang_findReferencesInFile(cursor: CXCursor, file: CXFile, visitor: CXCursorAndRangeVisitor) -> CXResult; - pub fn clang_formatDiagnostic(diagnostic: CXDiagnostic, flags: CXDiagnosticDisplayOptions) -> CXString; - /// Only available on `libclang` 3.7 and later. - #[cfg(feature = "clang_3_7")] - pub fn clang_free(buffer: *mut c_void); - /// Only available on `libclang` 5.0 and later. - #[cfg(feature = "clang_5_0")] - pub fn clang_getAddressSpace(type_: CXType) -> c_uint; - /// Only available on `libclang` 4.0 and later. - #[cfg(feature = "clang_4_0")] - pub fn clang_getAllSkippedRanges(tu: CXTranslationUnit) -> *mut CXSourceRangeList; - pub fn clang_getArgType(type_: CXType, index: c_uint) -> CXType; - pub fn clang_getArrayElementType(type_: CXType) -> CXType; - pub fn clang_getArraySize(type_: CXType) -> c_longlong; - /// Only available on `libclang` 17.0 and later. - #[cfg(feature = "clang_17_0")] - pub fn clang_getBinaryOperatorKindSpelling(kind: CXBinaryOperatorKind) -> CXString; - pub fn clang_getCString(string: CXString) -> *const c_char; - pub fn clang_getCXTUResourceUsage(tu: CXTranslationUnit) -> CXTUResourceUsage; - pub fn clang_getCXXAccessSpecifier(cursor: CXCursor) -> CX_CXXAccessSpecifier; - pub fn clang_getCanonicalCursor(cursor: CXCursor) -> CXCursor; - pub fn clang_getCanonicalType(type_: CXType) -> CXType; - pub fn clang_getChildDiagnostics(diagnostic: CXDiagnostic) -> CXDiagnosticSet; - pub fn clang_getClangVersion() -> CXString; - pub fn clang_getCompletionAnnotation(string: CXCompletionString, index: c_uint) -> CXString; - pub fn clang_getCompletionAvailability(string: CXCompletionString) -> CXAvailabilityKind; - pub fn clang_getCompletionBriefComment(string: CXCompletionString) -> CXString; - pub fn clang_getCompletionChunkCompletionString(string: CXCompletionString, index: c_uint) -> CXCompletionString; - pub fn clang_getCompletionChunkKind(string: CXCompletionString, index: c_uint) -> CXCompletionChunkKind; - pub fn clang_getCompletionChunkText(string: CXCompletionString, index: c_uint) -> CXString; - /// Only available on `libclang` 7.0 and later. - #[cfg(feature = "clang_7_0")] - pub fn clang_getCompletionFixIt(results: *mut CXCodeCompleteResults, completion_index: c_uint, fixit_index: c_uint, range: *mut CXSourceRange) -> CXString; - pub fn clang_getCompletionNumAnnotations(string: CXCompletionString) -> c_uint; - /// Only available on `libclang` 7.0 and later. - #[cfg(feature = "clang_7_0")] - pub fn clang_getCompletionNumFixIts(results: *mut CXCodeCompleteResults, completion_index: c_uint) -> c_uint; - pub fn clang_getCompletionParent(string: CXCompletionString, kind: *mut CXCursorKind) -> CXString; - pub fn clang_getCompletionPriority(string: CXCompletionString) -> c_uint; - pub fn clang_getCursor(tu: CXTranslationUnit, location: CXSourceLocation) -> CXCursor; - pub fn clang_getCursorAvailability(cursor: CXCursor) -> CXAvailabilityKind; - /// Only available on `libclang` 17.0 and later. - #[cfg(feature = "clang_17_0")] - pub fn clang_getCursorBinaryOperatorKind(cursor: CXCursor) -> CXBinaryOperatorKind; - pub fn clang_getCursorCompletionString(cursor: CXCursor) -> CXCompletionString; - pub fn clang_getCursorDefinition(cursor: CXCursor) -> CXCursor; - pub fn clang_getCursorDisplayName(cursor: CXCursor) -> CXString; - /// Only available on `libclang` 5.0 and later. - #[cfg(feature = "clang_5_0")] - pub fn clang_getCursorExceptionSpecificationType(cursor: CXCursor) -> CXCursor_ExceptionSpecificationKind; - pub fn clang_getCursorExtent(cursor: CXCursor) -> CXSourceRange; - pub fn clang_getCursorKind(cursor: CXCursor) -> CXCursorKind; - pub fn clang_getCursorKindSpelling(kind: CXCursorKind) -> CXString; - pub fn clang_getCursorLanguage(cursor: CXCursor) -> CXLanguageKind; - pub fn clang_getCursorLexicalParent(cursor: CXCursor) -> CXCursor; - pub fn clang_getCursorLinkage(cursor: CXCursor) -> CXLinkageKind; - pub fn clang_getCursorLocation(cursor: CXCursor) -> CXSourceLocation; - pub fn clang_getCursorPlatformAvailability(cursor: CXCursor, deprecated: *mut c_int, deprecated_message: *mut CXString, unavailable: *mut c_int, unavailable_message: *mut CXString, availability: *mut CXPlatformAvailability, n_availability: c_int) -> c_int; - /// Only available on `libclang` 7.0 and later. - #[cfg(feature = "clang_7_0")] - pub fn clang_getCursorPrettyPrinted(cursor: CXCursor, policy: CXPrintingPolicy) -> CXString; - /// Only available on `libclang` 7.0 and later. - #[cfg(feature = "clang_7_0")] - pub fn clang_getCursorPrintingPolicy(cursor: CXCursor) -> CXPrintingPolicy; - pub fn clang_getCursorReferenceNameRange(cursor: CXCursor, flags: CXNameRefFlags, index: c_uint) -> CXSourceRange; - pub fn clang_getCursorReferenced(cursor: CXCursor) -> CXCursor; - pub fn clang_getCursorResultType(cursor: CXCursor) -> CXType; - pub fn clang_getCursorSemanticParent(cursor: CXCursor) -> CXCursor; - pub fn clang_getCursorSpelling(cursor: CXCursor) -> CXString; - /// Only available on `libclang` 6.0 and later. - #[cfg(feature = "clang_6_0")] - pub fn clang_getCursorTLSKind(cursor: CXCursor) -> CXTLSKind; - pub fn clang_getCursorType(cursor: CXCursor) -> CXType; - /// Only available on `libclang` 17.0 and later. - #[cfg(feature = "clang_17_0")] - pub fn clang_getCursorUnaryOperatorKind(cursor: CXCursor) -> CXUnaryOperatorKind; - pub fn clang_getCursorUSR(cursor: CXCursor) -> CXString; - /// Only available on `libclang` 3.8 and later. - #[cfg(feature = "clang_3_8")] - pub fn clang_getCursorVisibility(cursor: CXCursor) -> CXVisibilityKind; - pub fn clang_getDeclObjCTypeEncoding(cursor: CXCursor) -> CXString; - pub fn clang_getDefinitionSpellingAndExtent(cursor: CXCursor, start: *mut *const c_char, end: *mut *const c_char, start_line: *mut c_uint, start_column: *mut c_uint, end_line: *mut c_uint, end_column: *mut c_uint); - pub fn clang_getDiagnostic(tu: CXTranslationUnit, index: c_uint) -> CXDiagnostic; - pub fn clang_getDiagnosticCategory(diagnostic: CXDiagnostic) -> c_uint; - pub fn clang_getDiagnosticCategoryName(category: c_uint) -> CXString; - pub fn clang_getDiagnosticCategoryText(diagnostic: CXDiagnostic) -> CXString; - pub fn clang_getDiagnosticFixIt(diagnostic: CXDiagnostic, index: c_uint, range: *mut CXSourceRange) -> CXString; - pub fn clang_getDiagnosticInSet(diagnostic: CXDiagnosticSet, index: c_uint) -> CXDiagnostic; - pub fn clang_getDiagnosticLocation(diagnostic: CXDiagnostic) -> CXSourceLocation; - pub fn clang_getDiagnosticNumFixIts(diagnostic: CXDiagnostic) -> c_uint; - pub fn clang_getDiagnosticNumRanges(diagnostic: CXDiagnostic) -> c_uint; - pub fn clang_getDiagnosticOption(diagnostic: CXDiagnostic, option: *mut CXString) -> CXString; - pub fn clang_getDiagnosticRange(diagnostic: CXDiagnostic, index: c_uint) -> CXSourceRange; - pub fn clang_getDiagnosticSetFromTU(tu: CXTranslationUnit) -> CXDiagnosticSet; - pub fn clang_getDiagnosticSeverity(diagnostic: CXDiagnostic) -> CXDiagnosticSeverity; - pub fn clang_getDiagnosticSpelling(diagnostic: CXDiagnostic) -> CXString; - pub fn clang_getElementType(type_: CXType) -> CXType; - pub fn clang_getEnumConstantDeclUnsignedValue(cursor: CXCursor) -> c_ulonglong; - pub fn clang_getEnumConstantDeclValue(cursor: CXCursor) -> c_longlong; - pub fn clang_getEnumDeclIntegerType(cursor: CXCursor) -> CXType; - /// Only available on `libclang` 5.0 and later. - #[cfg(feature = "clang_5_0")] - pub fn clang_getExceptionSpecificationType(type_: CXType) -> CXCursor_ExceptionSpecificationKind; - pub fn clang_getExpansionLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); - pub fn clang_getFieldDeclBitWidth(cursor: CXCursor) -> c_int; - pub fn clang_getFile(tu: CXTranslationUnit, file: *const c_char) -> CXFile; - /// Only available on `libclang` 6.0 and later. - #[cfg(feature = "clang_6_0")] - pub fn clang_getFileContents(tu: CXTranslationUnit, file: CXFile, size: *mut size_t) -> *const c_char; - pub fn clang_getFileLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); - pub fn clang_getFileName(file: CXFile) -> CXString; - pub fn clang_getFileTime(file: CXFile) -> time_t; - pub fn clang_getFileUniqueID(file: CXFile, id: *mut CXFileUniqueID) -> c_int; - pub fn clang_getFunctionTypeCallingConv(type_: CXType) -> CXCallingConv; - pub fn clang_getIBOutletCollectionType(cursor: CXCursor) -> CXType; - pub fn clang_getIncludedFile(cursor: CXCursor) -> CXFile; - pub fn clang_getInclusions(tu: CXTranslationUnit, visitor: CXInclusionVisitor, data: CXClientData); - pub fn clang_getInstantiationLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); - pub fn clang_getLocation(tu: CXTranslationUnit, file: CXFile, line: c_uint, column: c_uint) -> CXSourceLocation; - pub fn clang_getLocationForOffset(tu: CXTranslationUnit, file: CXFile, offset: c_uint) -> CXSourceLocation; - pub fn clang_getModuleForFile(tu: CXTranslationUnit, file: CXFile) -> CXModule; - /// Only available on `libclang` 16.0 and later. - #[cfg(feature = "clang_16_0")] - pub fn clang_getNonReferenceType(type_: CXType) -> CXType; - pub fn clang_getNullCursor() -> CXCursor; - pub fn clang_getNullLocation() -> CXSourceLocation; - pub fn clang_getNullRange() -> CXSourceRange; - pub fn clang_getNumArgTypes(type_: CXType) -> c_int; - pub fn clang_getNumCompletionChunks(string: CXCompletionString) -> c_uint; - pub fn clang_getNumDiagnostics(tu: CXTranslationUnit) -> c_uint; - pub fn clang_getNumDiagnosticsInSet(diagnostic: CXDiagnosticSet) -> c_uint; - pub fn clang_getNumElements(type_: CXType) -> c_longlong; - pub fn clang_getNumOverloadedDecls(cursor: CXCursor) -> c_uint; - pub fn clang_getOverloadedDecl(cursor: CXCursor, index: c_uint) -> CXCursor; - pub fn clang_getOverriddenCursors(cursor: CXCursor, cursors: *mut *mut CXCursor, n_cursors: *mut c_uint); - pub fn clang_getPointeeType(type_: CXType) -> CXType; - pub fn clang_getPresumedLocation(location: CXSourceLocation, file: *mut CXString, line: *mut c_uint, column: *mut c_uint); - pub fn clang_getRange(start: CXSourceLocation, end: CXSourceLocation) -> CXSourceRange; - pub fn clang_getRangeEnd(range: CXSourceRange) -> CXSourceLocation; - pub fn clang_getRangeStart(range: CXSourceRange) -> CXSourceLocation; - pub fn clang_getRemappings(file: *const c_char) -> CXRemapping; - pub fn clang_getRemappingsFromFileList(files: *mut *const c_char, n_files: c_uint) -> CXRemapping; - pub fn clang_getResultType(type_: CXType) -> CXType; - pub fn clang_getSkippedRanges(tu: CXTranslationUnit, file: CXFile) -> *mut CXSourceRangeList; - pub fn clang_getSpecializedCursorTemplate(cursor: CXCursor) -> CXCursor; - pub fn clang_getSpellingLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); - pub fn clang_getTUResourceUsageName(kind: CXTUResourceUsageKind) -> *const c_char; - pub fn clang_getTemplateCursorKind(cursor: CXCursor) -> CXCursorKind; - pub fn clang_getToken(tu: CXTranslationUnit, location: CXSourceLocation) -> *mut CXToken; - pub fn clang_getTokenExtent(tu: CXTranslationUnit, token: CXToken) -> CXSourceRange; - pub fn clang_getTokenKind(token: CXToken) -> CXTokenKind; - pub fn clang_getTokenLocation(tu: CXTranslationUnit, token: CXToken) -> CXSourceLocation; - pub fn clang_getTokenSpelling(tu: CXTranslationUnit, token: CXToken) -> CXString; - pub fn clang_getTranslationUnitCursor(tu: CXTranslationUnit) -> CXCursor; - pub fn clang_getTranslationUnitSpelling(tu: CXTranslationUnit) -> CXString; - /// Only available on `libclang` 5.0 and later. - #[cfg(feature = "clang_5_0")] - pub fn clang_getTranslationUnitTargetInfo(tu: CXTranslationUnit) -> CXTargetInfo; - /// Only available on `libclang` 17.0 and later. - #[cfg(feature = "clang_17_0")] - pub fn clang_getUnaryOperatorKindSpelling(kind: CXUnaryOperatorKind) -> CXString; - /// Only available on `libclang` 16.0 and later. - #[cfg(feature = "clang_16_0")] - pub fn clang_getUnqualifiedType(type_: CXType) -> CXType; - pub fn clang_getTypeDeclaration(type_: CXType) -> CXCursor; - pub fn clang_getTypeKindSpelling(type_: CXTypeKind) -> CXString; - pub fn clang_getTypeSpelling(type_: CXType) -> CXString; - pub fn clang_getTypedefDeclUnderlyingType(cursor: CXCursor) -> CXType; - /// Only available on `libclang` 5.0 and later. - #[cfg(feature = "clang_5_0")] - pub fn clang_getTypedefName(type_: CXType) -> CXString; - pub fn clang_hashCursor(cursor: CXCursor) -> c_uint; - pub fn clang_indexLoc_getCXSourceLocation(location: CXIdxLoc) -> CXSourceLocation; - pub fn clang_indexLoc_getFileLocation(location: CXIdxLoc, index_file: *mut CXIdxClientFile, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); - pub fn clang_indexSourceFile(index: CXIndexAction, data: CXClientData, callbacks: *mut IndexerCallbacks, n_callbacks: c_uint, index_flags: CXIndexOptFlags, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, tu: *mut CXTranslationUnit, tu_flags: CXTranslationUnit_Flags) -> CXErrorCode; - /// Only available on `libclang` 3.8 and later. - #[cfg(feature = "clang_3_8")] - pub fn clang_indexSourceFileFullArgv(index: CXIndexAction, data: CXClientData, callbacks: *mut IndexerCallbacks, n_callbacks: c_uint, index_flags: CXIndexOptFlags, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, tu: *mut CXTranslationUnit, tu_flags: CXTranslationUnit_Flags) -> CXErrorCode; - pub fn clang_indexTranslationUnit(index: CXIndexAction, data: CXClientData, callbacks: *mut IndexerCallbacks, n_callbacks: c_uint, flags: CXIndexOptFlags, tu: CXTranslationUnit) -> c_int; - pub fn clang_index_getCXXClassDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxCXXClassDeclInfo; - pub fn clang_index_getClientContainer(info: *const CXIdxContainerInfo) -> CXIdxClientContainer; - pub fn clang_index_getClientEntity(info: *const CXIdxEntityInfo) -> CXIdxClientEntity; - pub fn clang_index_getIBOutletCollectionAttrInfo(info: *const CXIdxAttrInfo) -> *const CXIdxIBOutletCollectionAttrInfo; - pub fn clang_index_getObjCCategoryDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCCategoryDeclInfo; - pub fn clang_index_getObjCContainerDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCContainerDeclInfo; - pub fn clang_index_getObjCInterfaceDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCInterfaceDeclInfo; - pub fn clang_index_getObjCPropertyDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCPropertyDeclInfo; - pub fn clang_index_getObjCProtocolRefListInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCProtocolRefListInfo; - pub fn clang_index_isEntityObjCContainerKind(info: CXIdxEntityKind) -> c_int; - pub fn clang_index_setClientContainer(info: *const CXIdxContainerInfo, container: CXIdxClientContainer); - pub fn clang_index_setClientEntity(info: *const CXIdxEntityInfo, entity: CXIdxClientEntity); - pub fn clang_isAttribute(kind: CXCursorKind) -> c_uint; - pub fn clang_isConstQualifiedType(type_: CXType) -> c_uint; - pub fn clang_isCursorDefinition(cursor: CXCursor) -> c_uint; - pub fn clang_isDeclaration(kind: CXCursorKind) -> c_uint; - pub fn clang_isExpression(kind: CXCursorKind) -> c_uint; - pub fn clang_isFileMultipleIncludeGuarded(tu: CXTranslationUnit, file: CXFile) -> c_uint; - pub fn clang_isFunctionTypeVariadic(type_: CXType) -> c_uint; - pub fn clang_isInvalid(kind: CXCursorKind) -> c_uint; - /// Only available on `libclang` 7.0 and later. - #[cfg(feature = "clang_7_0")] - pub fn clang_isInvalidDeclaration(cursor: CXCursor) -> c_uint; - pub fn clang_isPODType(type_: CXType) -> c_uint; - pub fn clang_isPreprocessing(kind: CXCursorKind) -> c_uint; - pub fn clang_isReference(kind: CXCursorKind) -> c_uint; - pub fn clang_isRestrictQualifiedType(type_: CXType) -> c_uint; - pub fn clang_isStatement(kind: CXCursorKind) -> c_uint; - pub fn clang_isTranslationUnit(kind: CXCursorKind) -> c_uint; - pub fn clang_isUnexposed(kind: CXCursorKind) -> c_uint; - pub fn clang_isVirtualBase(cursor: CXCursor) -> c_uint; - pub fn clang_isVolatileQualifiedType(type_: CXType) -> c_uint; - pub fn clang_loadDiagnostics(file: *const c_char, error: *mut CXLoadDiag_Error, message: *mut CXString) -> CXDiagnosticSet; - pub fn clang_parseTranslationUnit(index: CXIndex, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXTranslationUnit_Flags) -> CXTranslationUnit; - pub fn clang_parseTranslationUnit2(index: CXIndex, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXTranslationUnit_Flags, tu: *mut CXTranslationUnit) -> CXErrorCode; - /// Only available on `libclang` 3.8 and later. - #[cfg(feature = "clang_3_8")] - pub fn clang_parseTranslationUnit2FullArgv(index: CXIndex, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXTranslationUnit_Flags, tu: *mut CXTranslationUnit) -> CXErrorCode; - pub fn clang_remap_dispose(remapping: CXRemapping); - pub fn clang_remap_getFilenames(remapping: CXRemapping, index: c_uint, original: *mut CXString, transformed: *mut CXString); - pub fn clang_remap_getNumFiles(remapping: CXRemapping) -> c_uint; - pub fn clang_reparseTranslationUnit(tu: CXTranslationUnit, n_unsaved: c_uint, unsaved: *mut CXUnsavedFile, flags: CXReparse_Flags) -> CXErrorCode; - pub fn clang_saveTranslationUnit(tu: CXTranslationUnit, file: *const c_char, options: CXSaveTranslationUnit_Flags) -> CXSaveError; - pub fn clang_sortCodeCompletionResults(results: *mut CXCompletionResult, n_results: c_uint); - /// Only available on `libclang` 5.0 and later. - #[cfg(feature = "clang_5_0")] - pub fn clang_suspendTranslationUnit(tu: CXTranslationUnit) -> c_uint; - pub fn clang_toggleCrashRecovery(recovery: c_uint); - pub fn clang_tokenize(tu: CXTranslationUnit, range: CXSourceRange, tokens: *mut *mut CXToken, n_tokens: *mut c_uint); - pub fn clang_visitChildren(cursor: CXCursor, visitor: CXCursorVisitor, data: CXClientData) -> c_uint; - - // Documentation - pub fn clang_BlockCommandComment_getArgText(comment: CXComment, index: c_uint) -> CXString; - pub fn clang_BlockCommandComment_getCommandName(comment: CXComment) -> CXString; - pub fn clang_BlockCommandComment_getNumArgs(comment: CXComment) -> c_uint; - pub fn clang_BlockCommandComment_getParagraph(comment: CXComment) -> CXComment; - pub fn clang_Comment_getChild(comment: CXComment, index: c_uint) -> CXComment; - pub fn clang_Comment_getKind(comment: CXComment) -> CXCommentKind; - pub fn clang_Comment_getNumChildren(comment: CXComment) -> c_uint; - pub fn clang_Comment_isWhitespace(comment: CXComment) -> c_uint; - pub fn clang_Cursor_getParsedComment(C: CXCursor) -> CXComment; - pub fn clang_FullComment_getAsHTML(comment: CXComment) -> CXString; - pub fn clang_FullComment_getAsXML(comment: CXComment) -> CXString; - pub fn clang_HTMLStartTag_getAttrName(comment: CXComment, index: c_uint) -> CXString; - pub fn clang_HTMLStartTag_getAttrValue(comment: CXComment, index: c_uint) -> CXString; - pub fn clang_HTMLStartTag_getNumAttrs(comment: CXComment) -> c_uint; - pub fn clang_HTMLStartTagComment_isSelfClosing(comment: CXComment) -> c_uint; - pub fn clang_HTMLTagComment_getAsString(comment: CXComment) -> CXString; - pub fn clang_HTMLTagComment_getTagName(comment: CXComment) -> CXString; - pub fn clang_InlineCommandComment_getArgText(comment: CXComment, index: c_uint) -> CXString; - pub fn clang_InlineCommandComment_getCommandName(comment: CXComment) -> CXString; - pub fn clang_InlineCommandComment_getNumArgs(comment: CXComment) -> c_uint; - pub fn clang_InlineCommandComment_getRenderKind(comment: CXComment) -> CXCommentInlineCommandRenderKind; - pub fn clang_InlineContentComment_hasTrailingNewline(comment: CXComment) -> c_uint; - pub fn clang_ParamCommandComment_getDirection(comment: CXComment) -> CXCommentParamPassDirection; - pub fn clang_ParamCommandComment_getParamIndex(comment: CXComment) -> c_uint; - pub fn clang_ParamCommandComment_getParamName(comment: CXComment) -> CXString; - pub fn clang_ParamCommandComment_isDirectionExplicit(comment: CXComment) -> c_uint; - pub fn clang_ParamCommandComment_isParamIndexValid(comment: CXComment) -> c_uint; - pub fn clang_TextComment_getText(comment: CXComment) -> CXString; - pub fn clang_TParamCommandComment_getDepth(comment: CXComment) -> c_uint; - pub fn clang_TParamCommandComment_getIndex(comment: CXComment, depth: c_uint) -> c_uint; - pub fn clang_TParamCommandComment_getParamName(comment: CXComment) -> CXString; - pub fn clang_TParamCommandComment_isParamPositionValid(comment: CXComment) -> c_uint; - pub fn clang_VerbatimBlockLineComment_getText(comment: CXComment) -> CXString; - pub fn clang_VerbatimLineComment_getText(comment: CXComment) -> CXString; -} diff --git a/vendor/clang-sys/src/link.rs b/vendor/clang-sys/src/link.rs deleted file mode 100644 index 1adb0957fd89b0..00000000000000 --- a/vendor/clang-sys/src/link.rs +++ /dev/null @@ -1,322 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -//================================================ -// Macros -//================================================ - -#[cfg(feature = "runtime")] -macro_rules! link { - ( - @LOAD: - $(#[doc=$doc:expr])* - #[cfg($cfg:meta)] - fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)* - ) => ( - $(#[doc=$doc])* - #[cfg($cfg)] - pub fn $name(library: &mut super::SharedLibrary) { - let symbol = unsafe { library.library.get(stringify!($name).as_bytes()) }.ok(); - library.functions.$name = match symbol { - Some(s) => *s, - None => None, - }; - } - - #[cfg(not($cfg))] - pub fn $name(_: &mut super::SharedLibrary) {} - ); - - ( - @LOAD: - fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)* - ) => ( - link!(@LOAD: #[cfg(feature = "runtime")] fn $name($($pname: $pty), *) $(-> $ret)*); - ); - - ( - $( - $(#[doc=$doc:expr] #[cfg($cfg:meta)])* - pub fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*; - )+ - ) => ( - use std::cell::{RefCell}; - use std::fmt; - use std::sync::{Arc}; - use std::path::{Path, PathBuf}; - - /// The (minimum) version of a `libclang` shared library. - #[allow(missing_docs)] - #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] - pub enum Version { - V3_5 = 35, - V3_6 = 36, - V3_7 = 37, - V3_8 = 38, - V3_9 = 39, - V4_0 = 40, - V5_0 = 50, - V6_0 = 60, - V7_0 = 70, - V8_0 = 80, - V9_0 = 90, - V11_0 = 110, - V12_0 = 120, - V16_0 = 160, - V17_0 = 170, - } - - impl fmt::Display for Version { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use Version::*; - match self { - V3_5 => write!(f, "3.5.x"), - V3_6 => write!(f, "3.6.x"), - V3_7 => write!(f, "3.7.x"), - V3_8 => write!(f, "3.8.x"), - V3_9 => write!(f, "3.9.x"), - V4_0 => write!(f, "4.0.x"), - V5_0 => write!(f, "5.0.x"), - V6_0 => write!(f, "6.0.x"), - V7_0 => write!(f, "7.0.x"), - V8_0 => write!(f, "8.0.x"), - V9_0 => write!(f, "9.0.x - 10.0.x"), - V11_0 => write!(f, "11.0.x"), - V12_0 => write!(f, "12.0.x - 15.0.x"), - V16_0 => write!(f, "16.0.x"), - V17_0 => write!(f, "17.0.x or later"), - } - } - } - - /// The set of functions loaded dynamically. - #[derive(Debug, Default)] - pub struct Functions { - $( - $(#[doc=$doc] #[cfg($cfg)])* - pub $name: Option $ret)*>, - )+ - } - - /// A dynamically loaded instance of the `libclang` library. - #[derive(Debug)] - pub struct SharedLibrary { - library: libloading::Library, - path: PathBuf, - pub functions: Functions, - } - - impl SharedLibrary { - fn new(library: libloading::Library, path: PathBuf) -> Self { - Self { library, path, functions: Functions::default() } - } - - /// Returns the path to this `libclang` shared library. - pub fn path(&self) -> &Path { - &self.path - } - - /// Returns the (minimum) version of this `libclang` shared library. - /// - /// If this returns `None`, it indicates that the version is too old - /// to be supported by this crate (i.e., `3.4` or earlier). If the - /// version of this shared library is more recent than that fully - /// supported by this crate, the most recent fully supported version - /// will be returned. - pub fn version(&self) -> Option { - macro_rules! check { - ($fn:expr, $version:ident) => { - if self.library.get::($fn).is_ok() { - return Some(Version::$version); - } - }; - } - - unsafe { - check!(b"clang_CXXMethod_isExplicit", V17_0); - check!(b"clang_CXXMethod_isCopyAssignmentOperator", V16_0); - check!(b"clang_Cursor_getVarDeclInitializer", V12_0); - check!(b"clang_Type_getValueType", V11_0); - check!(b"clang_Cursor_isAnonymousRecordDecl", V9_0); - check!(b"clang_Cursor_getObjCPropertyGetterName", V8_0); - check!(b"clang_File_tryGetRealPathName", V7_0); - check!(b"clang_CXIndex_setInvocationEmissionPathOption", V6_0); - check!(b"clang_Cursor_isExternalSymbol", V5_0); - check!(b"clang_EvalResult_getAsLongLong", V4_0); - check!(b"clang_CXXConstructor_isConvertingConstructor", V3_9); - check!(b"clang_CXXField_isMutable", V3_8); - check!(b"clang_Cursor_getOffsetOfField", V3_7); - check!(b"clang_Cursor_getStorageClass", V3_6); - check!(b"clang_Type_getNumTemplateArguments", V3_5); - } - - None - } - } - - thread_local!(static LIBRARY: RefCell>> = RefCell::new(None)); - - /// Returns whether a `libclang` shared library is loaded on this thread. - pub fn is_loaded() -> bool { - LIBRARY.with(|l| l.borrow().is_some()) - } - - fn with_library(f: F) -> Option where F: FnOnce(&SharedLibrary) -> T { - LIBRARY.with(|l| { - match l.borrow().as_ref() { - Some(library) => Some(f(&library)), - _ => None, - } - }) - } - - $( - #[cfg_attr(feature="cargo-clippy", allow(clippy::missing_safety_doc))] - #[cfg_attr(feature="cargo-clippy", allow(clippy::too_many_arguments))] - $(#[doc=$doc] #[cfg($cfg)])* - pub unsafe fn $name($($pname: $pty), *) $(-> $ret)* { - let f = with_library(|library| { - if let Some(function) = library.functions.$name { - function - } else { - panic!( - r#" -A `libclang` function was called that is not supported by the loaded `libclang` instance. - - called function = `{0}` - loaded `libclang` instance = {1} - -The minimum `libclang` requirement for this particular function can be found here: -https://docs.rs/clang-sys/latest/clang_sys/{0}/index.html - -Instructions for installing `libclang` can be found here: -https://rust-lang.github.io/rust-bindgen/requirements.html -"#, - stringify!($name), - library - .version() - .map(|v| format!("{}", v)) - .unwrap_or_else(|| "unsupported version".into()), - ); - } - }).expect("a `libclang` shared library is not loaded on this thread"); - f($($pname), *) - } - - $(#[doc=$doc] #[cfg($cfg)])* - pub mod $name { - pub fn is_loaded() -> bool { - super::with_library(|l| l.functions.$name.is_some()).unwrap_or(false) - } - } - )+ - - mod load { - $(link!(@LOAD: $(#[cfg($cfg)])* fn $name($($pname: $pty), *) $(-> $ret)*);)+ - } - - /// Loads a `libclang` shared library and returns the library instance. - /// - /// This function does not attempt to load any functions from the shared library. The caller - /// is responsible for loading the functions they require. - /// - /// # Failures - /// - /// * a `libclang` shared library could not be found - /// * the `libclang` shared library could not be opened - pub fn load_manually() -> Result { - #[allow(dead_code)] - mod build { - include!(concat!(env!("OUT_DIR"), "/macros.rs")); - pub mod common { include!(concat!(env!("OUT_DIR"), "/common.rs")); } - pub mod dynamic { include!(concat!(env!("OUT_DIR"), "/dynamic.rs")); } - } - - let (directory, filename) = build::dynamic::find(true)?; - let path = directory.join(filename); - - unsafe { - let library = libloading::Library::new(&path).map_err(|e| { - format!( - "the `libclang` shared library at {} could not be opened: {}", - path.display(), - e, - ) - }); - - let mut library = SharedLibrary::new(library?, path); - $(load::$name(&mut library);)+ - Ok(library) - } - } - - /// Loads a `libclang` shared library for use in the current thread. - /// - /// This functions attempts to load all the functions in the shared library. Whether a - /// function has been loaded can be tested by calling the `is_loaded` function on the - /// module with the same name as the function (e.g., `clang_createIndex::is_loaded()` for - /// the `clang_createIndex` function). - /// - /// # Failures - /// - /// * a `libclang` shared library could not be found - /// * the `libclang` shared library could not be opened - #[allow(dead_code)] - pub fn load() -> Result<(), String> { - let library = Arc::new(load_manually()?); - LIBRARY.with(|l| *l.borrow_mut() = Some(library)); - Ok(()) - } - - /// Unloads the `libclang` shared library in use in the current thread. - /// - /// # Failures - /// - /// * a `libclang` shared library is not in use in the current thread - pub fn unload() -> Result<(), String> { - let library = set_library(None); - if library.is_some() { - Ok(()) - } else { - Err("a `libclang` shared library is not in use in the current thread".into()) - } - } - - /// Returns the library instance stored in TLS. - /// - /// This functions allows for sharing library instances between threads. - pub fn get_library() -> Option> { - LIBRARY.with(|l| l.borrow_mut().clone()) - } - - /// Sets the library instance stored in TLS and returns the previous library. - /// - /// This functions allows for sharing library instances between threads. - pub fn set_library(library: Option>) -> Option> { - LIBRARY.with(|l| mem::replace(&mut *l.borrow_mut(), library)) - } - ) -} - -#[cfg(not(feature = "runtime"))] -macro_rules! link { - ( - $( - $(#[doc=$doc:expr] #[cfg($cfg:meta)])* - pub fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*; - )+ - ) => ( - extern { - $( - $(#[doc=$doc] #[cfg($cfg)])* - pub fn $name($($pname: $pty), *) $(-> $ret)*; - )+ - } - - $( - $(#[doc=$doc] #[cfg($cfg)])* - pub mod $name { - pub fn is_loaded() -> bool { true } - } - )+ - ) -} diff --git a/vendor/clang-sys/src/support.rs b/vendor/clang-sys/src/support.rs deleted file mode 100644 index bd20da6fe83a99..00000000000000 --- a/vendor/clang-sys/src/support.rs +++ /dev/null @@ -1,238 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -//! Provides helper functionality. - -use std::path::{Path, PathBuf}; -use std::process::Command; -use std::{env, io}; - -use glob::{self, Pattern}; - -use libc::c_int; - -use super::CXVersion; - -//================================================ -// Structs -//================================================ - -/// A `clang` executable. -#[derive(Clone, Debug)] -pub struct Clang { - /// The path to this `clang` executable. - pub path: PathBuf, - /// The version of this `clang` executable if it could be parsed. - pub version: Option, - /// The directories searched by this `clang` executable for C headers if - /// they could be parsed. - pub c_search_paths: Option>, - /// The directories searched by this `clang` executable for C++ headers if - /// they could be parsed. - pub cpp_search_paths: Option>, -} - -impl Clang { - fn new(path: impl AsRef, args: &[String]) -> Self { - Self { - path: path.as_ref().into(), - version: parse_version(path.as_ref()), - c_search_paths: parse_search_paths(path.as_ref(), "c", args), - cpp_search_paths: parse_search_paths(path.as_ref(), "c++", args), - } - } - - /// Returns a `clang` executable if one can be found. - /// - /// If the `CLANG_PATH` environment variable is set, that is the instance of - /// `clang` used. Otherwise, a series of directories are searched. First, if - /// a path is supplied, that is the first directory searched. Then, the - /// directory returned by `llvm-config --bindir` is searched. On macOS - /// systems, `xcodebuild -find clang` will next be queried. Last, the - /// directories in the system's `PATH` are searched. - /// - /// ## Cross-compilation - /// - /// If target arguments are provided (e.g., `--target` followed by a target - /// like `x86_64-unknown-linux-gnu`) then this method will prefer a - /// target-prefixed instance of `clang` (e.g., - /// `x86_64-unknown-linux-gnu-clang` for the above example). - pub fn find(path: Option<&Path>, args: &[String]) -> Option { - if let Ok(path) = env::var("CLANG_PATH") { - let p = Path::new(&path); - if p.is_file() && is_executable(p).unwrap_or(false) { - return Some(Clang::new(p, args)); - } else { - eprintln!("`CLANG_PATH` env var set but is not a full path to an executable"); - } - } - - // Determine the cross-compilation target, if any. - - let mut target = None; - for i in 0..args.len() { - if (args[i] == "-target" || args[i] == "-target") && i + 1 < args.len() { - target = Some(&args[i + 1]); - } - } - - // Collect the paths to search for a `clang` executable in. - - let mut paths = vec![]; - - if let Some(path) = path { - paths.push(path.into()); - } - - if let Ok(path) = run_llvm_config(&["--bindir"]) { - if let Some(line) = path.lines().next() { - paths.push(line.into()); - } - } - - if cfg!(target_os = "macos") { - if let Ok((path, _)) = run("xcodebuild", &["-find", "clang"]) { - if let Some(line) = path.lines().next() { - paths.push(line.into()); - } - } - } - - if let Ok(path) = env::var("PATH") { - paths.extend(env::split_paths(&path)); - } - - // First, look for a target-prefixed `clang` executable. - - if let Some(target) = target { - let default = format!("{}-clang{}", target, env::consts::EXE_SUFFIX); - let versioned = format!("{}-clang-[0-9]*{}", target, env::consts::EXE_SUFFIX); - let patterns = &[&default[..], &versioned[..]]; - for path in &paths { - if let Some(path) = find(path, patterns) { - return Some(Clang::new(path, args)); - } - } - } - - // Otherwise, look for any other `clang` executable. - - let default = format!("clang{}", env::consts::EXE_SUFFIX); - let versioned = format!("clang-[0-9]*{}", env::consts::EXE_SUFFIX); - let patterns = &[&default[..], &versioned[..]]; - for path in paths { - if let Some(path) = find(&path, patterns) { - return Some(Clang::new(path, args)); - } - } - - None - } -} - -//================================================ -// Functions -//================================================ - -/// Returns the first match to the supplied glob patterns in the supplied -/// directory if there are any matches. -fn find(directory: &Path, patterns: &[&str]) -> Option { - // Escape the directory in case it contains characters that have special - // meaning in glob patterns (e.g., `[` or `]`). - let directory = if let Some(directory) = directory.to_str() { - Path::new(&Pattern::escape(directory)).to_owned() - } else { - return None; - }; - - for pattern in patterns { - let pattern = directory.join(pattern).to_string_lossy().into_owned(); - if let Some(path) = glob::glob(&pattern).ok()?.filter_map(|p| p.ok()).next() { - if path.is_file() && is_executable(&path).unwrap_or(false) { - return Some(path); - } - } - } - - None -} - -#[cfg(unix)] -fn is_executable(path: &Path) -> io::Result { - use std::ffi::CString; - use std::os::unix::ffi::OsStrExt; - - let path = CString::new(path.as_os_str().as_bytes())?; - unsafe { Ok(libc::access(path.as_ptr(), libc::X_OK) == 0) } -} - -#[cfg(not(unix))] -fn is_executable(_: &Path) -> io::Result { - Ok(true) -} - -/// Attempts to run an executable, returning the `stdout` and `stderr` output if -/// successful. -fn run(executable: &str, arguments: &[&str]) -> Result<(String, String), String> { - Command::new(executable) - .args(arguments) - .output() - .map(|o| { - let stdout = String::from_utf8_lossy(&o.stdout).into_owned(); - let stderr = String::from_utf8_lossy(&o.stderr).into_owned(); - (stdout, stderr) - }) - .map_err(|e| format!("could not run executable `{}`: {}", executable, e)) -} - -/// Runs `clang`, returning the `stdout` and `stderr` output. -fn run_clang(path: &Path, arguments: &[&str]) -> (String, String) { - run(&path.to_string_lossy(), arguments).unwrap() -} - -/// Runs `llvm-config`, returning the `stdout` output if successful. -fn run_llvm_config(arguments: &[&str]) -> Result { - let config = env::var("LLVM_CONFIG_PATH").unwrap_or_else(|_| "llvm-config".to_string()); - run(&config, arguments).map(|(o, _)| o) -} - -/// Parses a version number if possible, ignoring trailing non-digit characters. -fn parse_version_number(number: &str) -> Option { - number - .chars() - .take_while(|c| c.is_ascii_digit()) - .collect::() - .parse() - .ok() -} - -/// Parses the version from the output of a `clang` executable if possible. -fn parse_version(path: &Path) -> Option { - let output = run_clang(path, &["--version"]).0; - let start = output.find("version ")? + 8; - let mut numbers = output[start..].split_whitespace().next()?.split('.'); - let major = numbers.next().and_then(parse_version_number)?; - let minor = numbers.next().and_then(parse_version_number)?; - let subminor = numbers.next().and_then(parse_version_number).unwrap_or(0); - Some(CXVersion { - Major: major, - Minor: minor, - Subminor: subminor, - }) -} - -/// Parses the search paths from the output of a `clang` executable if possible. -fn parse_search_paths(path: &Path, language: &str, args: &[String]) -> Option> { - let mut clang_args = vec!["-E", "-x", language, "-", "-v"]; - clang_args.extend(args.iter().map(|s| &**s)); - let output = run_clang(path, &clang_args).1; - let start = output.find("#include <...> search starts here:")? + 34; - let end = output.find("End of search list.")?; - let paths = output[start..end].replace("(framework directory)", ""); - Some( - paths - .lines() - .filter(|l| !l.is_empty()) - .map(|l| Path::new(l.trim()).into()) - .collect(), - ) -} diff --git a/vendor/clang-sys/tests/build.rs b/vendor/clang-sys/tests/build.rs deleted file mode 100644 index 1ac4e617046a48..00000000000000 --- a/vendor/clang-sys/tests/build.rs +++ /dev/null @@ -1,356 +0,0 @@ -#![allow(dead_code)] - -use core::fmt; -use std::collections::HashMap; -use std::env; -use std::fs; -use std::path::PathBuf; -use std::sync::Arc; -use std::sync::Mutex; - -use tempfile::TempDir; - -#[macro_use] -#[path = "../build/macros.rs"] -mod macros; - -#[path = "../build/common.rs"] -mod common; -#[path = "../build/dynamic.rs"] -mod dynamic; -#[path = "../build/static.rs"] -mod r#static; - -#[derive(Debug, Default)] -struct RunCommandMock { - invocations: Vec<(String, String, Vec)>, - responses: HashMap, String>, -} - - -#[derive(Copy, Clone, Debug)] -enum Arch { - ARM64, - X86, - X86_64, -} - -impl Arch { - fn pe_machine_type(self) -> u16 { - match self { - Arch::ARM64 => 0xAA64, - Arch::X86 => 0x014C, - Arch::X86_64 => 0x8664, - } - } -} - -impl fmt::Display for Arch { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Arch::ARM64 => write!(f, "aarch64"), - Arch::X86 => write!(f, "x86"), - Arch::X86_64 => write!(f, "x86_64"), - } - } -} - -#[derive(Debug)] -struct Env { - os: String, - arch: Arch, - pointer_width: String, - env: Option, - vars: HashMap, Option)>, - cwd: PathBuf, - tmp: TempDir, - files: Vec, - commands: Arc>, -} - -impl Env { - fn new(os: &str, arch: Arch, pointer_width: &str) -> Self { - Env { - os: os.into(), - arch, - pointer_width: pointer_width.into(), - env: None, - vars: HashMap::new(), - cwd: env::current_dir().unwrap(), - tmp: tempfile::Builder::new().prefix("clang_sys_test").tempdir().unwrap(), - files: vec![], - commands: Default::default(), - } - .var("CLANG_PATH", None) - .var("LD_LIBRARY_PATH", None) - .var("LIBCLANG_PATH", None) - .var("LIBCLANG_STATIC_PATH", None) - .var("LLVM_CONFIG_PATH", None) - .var("PATH", None) - } - - fn env(mut self, env: &str) -> Self { - self.env = Some(env.into()); - self - } - - fn var(mut self, name: &str, value: Option<&str>) -> Self { - let previous = env::var(name).ok(); - self.vars.insert(name.into(), (value.map(|v| v.into()), previous)); - self - } - - fn dir(mut self, path: &str) -> Self { - self.files.push(path.into()); - let path = self.tmp.path().join(path); - fs::create_dir_all(path).unwrap(); - self - } - - fn file(mut self, path: &str, contents: &[u8]) -> Self { - self.files.push(path.into()); - let path = self.tmp.path().join(path); - fs::create_dir_all(path.parent().unwrap()).unwrap(); - fs::write(self.tmp.path().join(path), contents).unwrap(); - self - } - - fn dll(self, path: &str, arch: Arch, pointer_width: &str) -> Self { - // PE header. - let mut contents = [0; 64]; - contents[0x3C..0x3C + 4].copy_from_slice(&i32::to_le_bytes(10)); - contents[10..14].copy_from_slice(&[b'P', b'E', 0, 0]); - contents[14..16].copy_from_slice(&u16::to_le_bytes(arch.pe_machine_type())); - let magic = if pointer_width == "64" { 523 } else { 267 }; - contents[34..36].copy_from_slice(&u16::to_le_bytes(magic)); - - self.file(path, &contents) - } - - fn so(self, path: &str, pointer_width: &str) -> Self { - // ELF header. - let class = if pointer_width == "64" { 2 } else { 1 }; - let contents = [127, 69, 76, 70, class]; - - self.file(path, &contents) - } - - fn command(self, command: &str, args: &[&str], response: &str) -> Self { - let command = command.to_string(); - let args = args.iter().map(|a| a.to_string()).collect::>(); - - let mut key = vec![command]; - key.extend(args); - self.commands.lock().unwrap().responses.insert(key, response.into()); - - self - } - - fn enable(self) -> Self { - env::set_var("_CLANG_SYS_TEST", "yep"); - env::set_var("_CLANG_SYS_TEST_OS", &self.os); - env::set_var("_CLANG_SYS_TEST_ARCH", &format!("{}", self.arch)); - env::set_var("_CLANG_SYS_TEST_POINTER_WIDTH", &self.pointer_width); - if let Some(env) = &self.env { - env::set_var("_CLANG_SYS_TEST_ENV", env); - } - - for (name, (value, _)) in &self.vars { - if let Some(value) = value { - env::set_var(name, value); - } else { - env::remove_var(name); - } - } - - env::set_current_dir(&self.tmp).unwrap(); - - let commands = self.commands.clone(); - let mock = &mut *common::RUN_COMMAND_MOCK.lock().unwrap(); - *mock = Some(Box::new(move |command, path, args| { - let command = command.to_string(); - let path = path.to_string(); - let args = args.iter().map(|a| a.to_string()).collect::>(); - - let mut commands = commands.lock().unwrap(); - commands.invocations.push((command.clone(), path, args.clone())); - - let mut key = vec![command]; - key.extend(args); - commands.responses.get(&key).cloned() - })); - - self - } -} - -impl Drop for Env { - fn drop(&mut self) { - env::remove_var("_CLANG_SYS_TEST"); - env::remove_var("_CLANG_SYS_TEST_OS"); - env::remove_var("_CLANG_SYS_TEST_ARCH"); - env::remove_var("_CLANG_SYS_TEST_POINTER_WIDTH"); - env::remove_var("_CLANG_SYS_TEST_ENV"); - - for (name, (_, previous)) in &self.vars { - if let Some(previous) = previous { - env::set_var(name, previous); - } else { - env::remove_var(name); - } - } - - if let Err(error) = env::set_current_dir(&self.cwd) { - println!("Failed to reset working directory: {:?}", error); - } - } -} - -#[test] -fn test_all() { - // Run tests serially since they alter the environment. - - test_linux_directory_preference(); - test_linux_version_preference(); - test_linux_directory_and_version_preference(); - - #[cfg(target_os = "windows")] - { - test_windows_bin_sibling(); - test_windows_mingw_gnu(); - test_windows_mingw_msvc(); - test_windows_arm64_on_x86_64(); - test_windows_x86_64_on_arm64(); - } -} - -macro_rules! assert_error { - ($result:expr, $contents:expr $(,)?) => { - if let Err(error) = $result { - if !error.contains($contents) { - panic!("expected error to contain {:?}, received: {error:?}", $contents); - } - } else { - panic!("expected error, received: {:?}", $result); - } - }; -} - -//================================================ -// Dynamic -//================================================ - -// Linux ----------------------------------------- - -fn test_linux_directory_preference() { - let _env = Env::new("linux", Arch::X86_64, "64") - .so("usr/lib/libclang.so.1", "64") - .so("usr/local/lib/libclang.so.1", "64") - .enable(); - - assert_eq!( - dynamic::find(true), - Ok(("usr/local/lib".into(), "libclang.so.1".into())), - ); -} - -fn test_linux_version_preference() { - let _env = Env::new("linux", Arch::X86_64, "64") - .so("usr/lib/libclang-3.so", "64") - .so("usr/lib/libclang-3.5.so", "64") - .so("usr/lib/libclang-3.5.0.so", "64") - .enable(); - - assert_eq!( - dynamic::find(true), - Ok(("usr/lib".into(), "libclang-3.5.0.so".into())), - ); -} - -fn test_linux_directory_and_version_preference() { - let _env = Env::new("linux", Arch::X86_64, "64") - .so("usr/local/llvm/lib/libclang-3.so", "64") - .so("usr/local/lib/libclang-3.5.so", "64") - .so("usr/lib/libclang-3.5.0.so", "64") - .enable(); - - assert_eq!( - dynamic::find(true), - Ok(("usr/lib".into(), "libclang-3.5.0.so".into())), - ); -} - -// Windows --------------------------------------- - -#[cfg(target_os = "windows")] -fn test_windows_bin_sibling() { - let _env = Env::new("windows", Arch::X86_64, "64") - .dir("Program Files\\LLVM\\lib") - .dll("Program Files\\LLVM\\bin\\libclang.dll", Arch::X86_64, "64") - .enable(); - - assert_eq!( - dynamic::find(true), - Ok(("Program Files\\LLVM\\bin".into(), "libclang.dll".into())), - ); -} - -#[cfg(target_os = "windows")] -fn test_windows_mingw_gnu() { - let _env = Env::new("windows", Arch::X86_64, "64") - .env("gnu") - .dir("MSYS\\MinGW\\lib") - .dll("MSYS\\MinGW\\bin\\clang.dll", Arch::X86_64, "64") - .dir("Program Files\\LLVM\\lib") - .dll("Program Files\\LLVM\\bin\\libclang.dll", Arch::X86_64, "64") - .enable(); - - assert_eq!( - dynamic::find(true), - Ok(("MSYS\\MinGW\\bin".into(), "clang.dll".into())), - ); -} - -#[cfg(target_os = "windows")] -fn test_windows_mingw_msvc() { - let _env = Env::new("windows", Arch::X86_64, "64") - .env("msvc") - .dir("MSYS\\MinGW\\lib") - .dll("MSYS\\MinGW\\bin\\clang.dll", Arch::X86_64, "64") - .dir("Program Files\\LLVM\\lib") - .dll("Program Files\\LLVM\\bin\\libclang.dll", Arch::X86_64, "64") - .enable(); - - assert_eq!( - dynamic::find(true), - Ok(("Program Files\\LLVM\\bin".into(), "libclang.dll".into())), - ); -} - -#[cfg(target_os = "windows")] -fn test_windows_arm64_on_x86_64() { - let _env = Env::new("windows", Arch::X86_64, "64") - .env("msvc") - .dir("Program Files\\LLVM\\lib") - .dll("Program Files\\LLVM\\bin\\libclang.dll", Arch::ARM64, "64") - .enable(); - - assert_error!( - dynamic::find(true), - "invalid: [(Program Files\\LLVM\\bin\\libclang.dll: invalid DLL (ARM64)", - ); -} - -#[cfg(target_os = "windows")] -fn test_windows_x86_64_on_arm64() { - let _env = Env::new("windows", Arch::ARM64, "64") - .env("msvc") - .dir("Program Files\\LLVM\\lib") - .dll("Program Files\\LLVM\\bin\\libclang.dll", Arch::X86_64, "64") - .enable(); - - assert_error!( - dynamic::find(true), - "invalid: [(Program Files\\LLVM\\bin\\libclang.dll: invalid DLL (x86-64)", - ); -} diff --git a/vendor/clang-sys/tests/header.h b/vendor/clang-sys/tests/header.h deleted file mode 100644 index 5c392d31455a87..00000000000000 --- a/vendor/clang-sys/tests/header.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef HEADER_H_ -#define HEADER_H_ - -int add(int a, int b); - -#endif diff --git a/vendor/clang-sys/tests/lib.rs b/vendor/clang-sys/tests/lib.rs deleted file mode 100644 index 1f152f7883aabd..00000000000000 --- a/vendor/clang-sys/tests/lib.rs +++ /dev/null @@ -1,52 +0,0 @@ -use std::ptr; - -use clang_sys::*; - -use libc::c_char; - -fn parse() { - unsafe { - let index = clang_createIndex(0, 0); - assert!(!index.is_null()); - - let tu = clang_parseTranslationUnit( - index, - "tests/header.h\0".as_ptr() as *const c_char, - ptr::null_mut(), - 0, - ptr::null_mut(), - 0, - 0, - ); - assert!(!tu.is_null()); - } -} - -#[cfg(feature = "runtime")] -#[test] -fn test() { - load().unwrap(); - let library = get_library().unwrap(); - println!("{:?} ({:?})", library.version(), library.path()); - parse(); - unload().unwrap(); -} - -#[cfg(not(feature = "runtime"))] -#[test] -fn test() { - parse(); -} - -#[test] -fn test_support() { - let clang = support::Clang::find(None, &[]).unwrap(); - println!("{:?}", clang); -} - -#[test] -fn test_support_target() { - let args = &["--target".into(), "x86_64-unknown-linux-gnu".into()]; - let clang = support::Clang::find(None, args).unwrap(); - println!("{:?}", clang); -} diff --git a/vendor/either/.cargo-checksum.json b/vendor/either/.cargo-checksum.json deleted file mode 100644 index 0b95517a79697b..00000000000000 --- a/vendor/either/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"a476e926b135f5b5862629aa62aee044b0ea4b5328e4de5f46f43d3858d3bbfe",".github/workflows/ci.yml":"1980de2333ca92700b4cacc285e75dc8a3ee4f561ee8c962989469da6be1980d","Cargo.lock":"fa51302ea4d0f21da8621d0376bc26e2ffc28754fef88a80fa8d48897cbef662","Cargo.toml":"e88e6acce3b0cbf6734ab7c41cbc01a6b368843eb29d57f06e3b917ec9af03e0","Cargo.toml.orig":"e38380a7d61979d78ece13c1e45386b8f81b50375aad7887671c0ab026056687","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7576269ea71f767b99297934c0b2367532690f8c4badc695edf8e04ab6a1e545","README-crates.io.md":"b775991a01ab4a0a8de6169f597775319d9ce8178f5c74ccdc634f13a286b20c","README.rst":"fb08fabe5268b1f350bf8772c240c93190f7b88ae856dd09e77248e65881eebf","src/into_either.rs":"0477f226bbba78ef017de08b87d421d3cd99fbc95b90ba4e6e3e803e3d15254e","src/iterator.rs":"eef042c8fa7d2d2cb002ed81dedf8c124ec36252ae8bd0368039c788f686edd8","src/lib.rs":"430b3125aa77ab51bca768e6be8d4a2cbdb9932338f9e5d90803a0a2fe99b371","src/serde_untagged.rs":"e826ee0ab31616e49c3e3f3711c8441001ee424b3e7a8c4c466cfcc4f8a7701a","src/serde_untagged_optional.rs":"86265f09d0795428bb2ce013b070d1badf1e2210217844a9ff3f04b2795868ab"},"package":"48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"} \ No newline at end of file diff --git a/vendor/either/.cargo_vcs_info.json b/vendor/either/.cargo_vcs_info.json deleted file mode 100644 index 1cca00e9368565..00000000000000 --- a/vendor/either/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "59ae1fce0cec62c886fcd486e06b7e219bc7ce48" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/either/.github/workflows/ci.yml b/vendor/either/.github/workflows/ci.yml deleted file mode 100644 index 2f3843b7a2ec6e..00000000000000 --- a/vendor/either/.github/workflows/ci.yml +++ /dev/null @@ -1,83 +0,0 @@ -on: - push: - branches: [ main ] - pull_request: - merge_group: - -name: CI - -jobs: - ci: - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - rust: - - 1.63.0 # MSRV - - stable - - beta - - nightly - features: - - "" - - "serde" - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Cache the registry - uses: actions/cache@v4 - if: startsWith(matrix.rust, '1') - with: - path: ~/.cargo/registry/index - key: cargo-${{ matrix.rust }}-git-index - - - name: Set up Rust - uses: dtolnay/rust-toolchain@master - with: - toolchain: ${{ matrix.rust }} - - - name: Build (no_std) - run: cargo build --no-default-features --features "${{ matrix.features }}" - - - name: Build - run: cargo build --features "${{ matrix.features }}" - - - name: Test - run: cargo test --features "${{ matrix.features }}" - - - name: Doc - run: cargo doc --features "${{ matrix.features }}" - - clippy: - name: Rustfmt and Clippy - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Set up nightly Rust - uses: dtolnay/rust-toolchain@nightly - with: - components: rustfmt, clippy - - - name: Rustfmt - run: cargo fmt --all -- --check - - - name: Clippy - run: cargo clippy # -- -D warnings - - # One job that "summarizes" the success state of this pipeline. This can then be added to branch - # protection, rather than having to add each job separately. - success: - name: Success - runs-on: ubuntu-latest - needs: [ci, clippy] - # Github branch protection is exceedingly silly and treats "jobs skipped because a dependency - # failed" as success. So we have to do some contortions to ensure the job fails if any of its - # dependencies fails. - if: always() # make sure this is never "skipped" - steps: - # Manually check the status of all dependencies. `if: failure()` does not work. - - name: check if any dependency failed - run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}' diff --git a/vendor/either/Cargo.lock b/vendor/either/Cargo.lock deleted file mode 100644 index 3c336a9a934e58..00000000000000 --- a/vendor/either/Cargo.lock +++ /dev/null @@ -1,96 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "either" -version = "1.15.0" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "itoa" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" - -[[package]] -name = "memchr" -version = "2.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" - -[[package]] -name = "proc-macro2" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.39" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "ryu" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" - -[[package]] -name = "serde" -version = "1.0.218" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.218" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.140" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" -dependencies = [ - "itoa", - "memchr", - "ryu", - "serde", -] - -[[package]] -name = "syn" -version = "2.0.99" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e02e925281e18ffd9d640e234264753c43edc62d64b2d4cf898f1bc5e75f3fc2" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "unicode-ident" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" diff --git a/vendor/either/Cargo.toml b/vendor/either/Cargo.toml deleted file mode 100644 index 68b38fd8097e4f..00000000000000 --- a/vendor/either/Cargo.toml +++ /dev/null @@ -1,70 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.63.0" -name = "either" -version = "1.15.0" -authors = ["bluss"] -build = false -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = """ -The enum `Either` with variants `Left` and `Right` is a general purpose sum type with two cases. -""" -documentation = "https://docs.rs/either/1/" -readme = "README-crates.io.md" -keywords = [ - "data-structure", - "no_std", -] -categories = [ - "data-structures", - "no-std", -] -license = "MIT OR Apache-2.0" -repository = "https://github.com/rayon-rs/either" - -[package.metadata.docs.rs] -features = ["serde"] - -[package.metadata.playground] -features = ["serde"] - -[package.metadata.release] -allow-branch = ["main"] -sign-tag = true -tag-name = "{{version}}" - -[features] -default = ["std"] -std = [] -use_std = ["std"] - -[lib] -name = "either" -path = "src/lib.rs" - -[dependencies.serde] -version = "1.0.95" -features = [ - "alloc", - "derive", -] -optional = true -default-features = false - -[dev-dependencies.serde_json] -version = "1.0.0" diff --git a/vendor/either/LICENSE-APACHE b/vendor/either/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e802f..00000000000000 --- a/vendor/either/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/either/LICENSE-MIT b/vendor/either/LICENSE-MIT deleted file mode 100644 index 9203baa055d41d..00000000000000 --- a/vendor/either/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2015 - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/either/README-crates.io.md b/vendor/either/README-crates.io.md deleted file mode 100644 index d36890278b328e..00000000000000 --- a/vendor/either/README-crates.io.md +++ /dev/null @@ -1,10 +0,0 @@ -The enum `Either` with variants `Left` and `Right` is a general purpose -sum type with two cases. - -Either has methods that are similar to Option and Result, and it also implements -traits like `Iterator`. - -Includes macros `try_left!()` and `try_right!()` to use for -short-circuiting logic, similar to how the `?` operator is used with `Result`. -Note that `Either` is general purpose. For describing success or error, use the -regular `Result`. diff --git a/vendor/either/README.rst b/vendor/either/README.rst deleted file mode 100644 index 7665f1ff0dd71a..00000000000000 --- a/vendor/either/README.rst +++ /dev/null @@ -1,204 +0,0 @@ - -Either -====== - -The enum ``Either`` with variants ``Left`` and ``Right`` and trait -implementations including Iterator, Read, Write. - -Either has methods that are similar to Option and Result. - -Includes convenience macros ``try_left!()`` and ``try_right!()`` to use for -short-circuiting logic. - -Please read the `API documentation here`__ - -__ https://docs.rs/either/ - -|build_status|_ |crates|_ - -.. |build_status| image:: https://github.com/rayon-rs/either/workflows/CI/badge.svg?branch=main -.. _build_status: https://github.com/rayon-rs/either/actions - -.. |crates| image:: https://img.shields.io/crates/v/either.svg -.. _crates: https://crates.io/crates/either - -How to use with cargo:: - - [dependencies] - either = "1" - - -Recent Changes --------------- - -- 1.15.0 - - - Fix ``serde`` support when building without ``std``, by @klkvr (#119) - - - Use a more common ``std`` feature for default enablement, deprecating - the ``use_std`` feature as a mere alias of the new name. - -- 1.14.0 - - - **MSRV**: ``either`` now requires Rust 1.63 or later. - - - Implement ``fmt::Write`` for ``Either``, by @yotamofek (#113) - - - Replace ``Into for Either`` with ``From for Result``, by @cuviper (#118) - -- 1.13.0 - - - Add new methods ``.cloned()`` and ``.copied()``, by @ColonelThirtyTwo (#107) - -- 1.12.0 - - - **MSRV**: ``either`` now requires Rust 1.37 or later. - - - Specialize ``nth_back`` for ``Either`` and ``IterEither``, by @cuviper (#106) - -- 1.11.0 - - - Add new trait ``IntoEither`` that is useful to convert to ``Either`` in method chains, - by @SFM61319 (#101) - -- 1.10.0 - - - Add new methods ``.factor_iter()``, ``.factor_iter_mut()``, and ``.factor_into_iter()`` - that return ``Either`` items, plus ``.iter()`` and ``.iter_mut()`` to convert to direct - reference iterators; by @aj-bagwell and @cuviper (#91) - -- 1.9.0 - - - Add new methods ``.map_either()`` and ``.map_either_with()``, by @nasadorian (#82) - -- 1.8.1 - - - Clarified that the multiple licenses are combined with OR. - -- 1.8.0 - - - **MSRV**: ``either`` now requires Rust 1.36 or later. - - - Add new methods ``.as_pin_ref()`` and ``.as_pin_mut()`` to project a - pinned ``Either`` as inner ``Pin`` variants, by @cuviper (#77) - - - Implement the ``Future`` trait, by @cuviper (#77) - - - Specialize more methods of the ``io`` traits, by @Kixunil and @cuviper (#75) - -- 1.7.0 - - - **MSRV**: ``either`` now requires Rust 1.31 or later. - - - Export the macro ``for_both!``, by @thomaseizinger (#58) - - - Implement the ``io::Seek`` trait, by @Kerollmops (#60) - - - Add new method ``.either_into()`` for ``Into`` conversion, by @TonalidadeHidrica (#63) - - - Add new methods ``.factor_ok()``, ``.factor_err()``, and ``.factor_none()``, - by @zachs18 (#67) - - - Specialize ``source`` in the ``Error`` implementation, by @thomaseizinger (#69) - - - Specialize more iterator methods and implement the ``FusedIterator`` trait, - by @Ten0 (#66) and @cuviper (#71) - - - Specialize ``Clone::clone_from``, by @cuviper (#72) - -- 1.6.1 - - - Add new methods ``.expect_left()``, ``.unwrap_left()``, - and equivalents on the right, by @spenserblack (#51) - -- 1.6.0 - - - Add new modules ``serde_untagged`` and ``serde_untagged_optional`` to customize - how ``Either`` fields are serialized in other types, by @MikailBag (#49) - -- 1.5.3 - - - Add new method ``.map()`` for ``Either`` by @nvzqz (#40). - -- 1.5.2 - - - Add new methods ``.left_or()``, ``.left_or_default()``, ``.left_or_else()``, - and equivalents on the right, by @DCjanus (#36) - -- 1.5.1 - - - Add ``AsRef`` and ``AsMut`` implementations for common unsized types: - ``str``, ``[T]``, ``CStr``, ``OsStr``, and ``Path``, by @mexus (#29) - -- 1.5.0 - - - Add new methods ``.factor_first()``, ``.factor_second()`` and ``.into_inner()`` - by @mathstuf (#19) - -- 1.4.0 - - - Add inherent method ``.into_iter()`` by @cuviper (#12) - -- 1.3.0 - - - Add opt-in serde support by @hcpl - -- 1.2.0 - - - Add method ``.either_with()`` by @Twey (#13) - -- 1.1.0 - - - Add methods ``left_and_then``, ``right_and_then`` by @rampantmonkey - - Include license files in the repository and released crate - -- 1.0.3 - - - Add crate categories - -- 1.0.2 - - - Forward more ``Iterator`` methods - - Implement ``Extend`` for ``Either`` if ``L, R`` do. - -- 1.0.1 - - - Fix ``Iterator`` impl for ``Either`` to forward ``.fold()``. - -- 1.0.0 - - - Add default crate feature ``use_std`` so that you can opt out of linking to - std. - -- 0.1.7 - - - Add methods ``.map_left()``, ``.map_right()`` and ``.either()``. - - Add more documentation - -- 0.1.3 - - - Implement Display, Error - -- 0.1.2 - - - Add macros ``try_left!`` and ``try_right!``. - -- 0.1.1 - - - Implement Deref, DerefMut - -- 0.1.0 - - - Initial release - - Support Iterator, Read, Write - -License -------- - -Dual-licensed to be compatible with the Rust project. - -Licensed under the Apache License, Version 2.0 -https://www.apache.org/licenses/LICENSE-2.0 or the MIT license -https://opensource.org/licenses/MIT, at your -option. This file may not be copied, modified, or distributed -except according to those terms. diff --git a/vendor/either/src/into_either.rs b/vendor/either/src/into_either.rs deleted file mode 100644 index 73746c80f11f15..00000000000000 --- a/vendor/either/src/into_either.rs +++ /dev/null @@ -1,64 +0,0 @@ -//! The trait [`IntoEither`] provides methods for converting a type `Self`, whose -//! size is constant and known at compile-time, into an [`Either`] variant. - -use super::{Either, Left, Right}; - -/// Provides methods for converting a type `Self` into either a [`Left`] or [`Right`] -/// variant of [`Either`](Either). -/// -/// The [`into_either`](IntoEither::into_either) method takes a [`bool`] to determine -/// whether to convert to [`Left`] or [`Right`]. -/// -/// The [`into_either_with`](IntoEither::into_either_with) method takes a -/// [predicate function](FnOnce) to determine whether to convert to [`Left`] or [`Right`]. -pub trait IntoEither: Sized { - /// Converts `self` into a [`Left`] variant of [`Either`](Either) - /// if `into_left` is `true`. - /// Converts `self` into a [`Right`] variant of [`Either`](Either) - /// otherwise. - /// - /// # Examples - /// - /// ``` - /// use either::{IntoEither, Left, Right}; - /// - /// let x = 0; - /// assert_eq!(x.into_either(true), Left(x)); - /// assert_eq!(x.into_either(false), Right(x)); - /// ``` - fn into_either(self, into_left: bool) -> Either { - if into_left { - Left(self) - } else { - Right(self) - } - } - - /// Converts `self` into a [`Left`] variant of [`Either`](Either) - /// if `into_left(&self)` returns `true`. - /// Converts `self` into a [`Right`] variant of [`Either`](Either) - /// otherwise. - /// - /// # Examples - /// - /// ``` - /// use either::{IntoEither, Left, Right}; - /// - /// fn is_even(x: &u8) -> bool { - /// x % 2 == 0 - /// } - /// - /// let x = 0; - /// assert_eq!(x.into_either_with(is_even), Left(x)); - /// assert_eq!(x.into_either_with(|x| !is_even(x)), Right(x)); - /// ``` - fn into_either_with(self, into_left: F) -> Either - where - F: FnOnce(&Self) -> bool, - { - let into_left = into_left(&self); - self.into_either(into_left) - } -} - -impl IntoEither for T {} diff --git a/vendor/either/src/iterator.rs b/vendor/either/src/iterator.rs deleted file mode 100644 index d54fab793d969a..00000000000000 --- a/vendor/either/src/iterator.rs +++ /dev/null @@ -1,315 +0,0 @@ -use super::{for_both, Either, Left, Right}; -use core::iter; - -macro_rules! wrap_either { - ($value:expr => $( $tail:tt )*) => { - match $value { - Left(inner) => inner.map(Left) $($tail)*, - Right(inner) => inner.map(Right) $($tail)*, - } - }; -} - -/// Iterator that maps left or right iterators to corresponding `Either`-wrapped items. -/// -/// This struct is created by the [`Either::factor_into_iter`], -/// [`factor_iter`][Either::factor_iter], -/// and [`factor_iter_mut`][Either::factor_iter_mut] methods. -#[derive(Clone, Debug)] -pub struct IterEither { - inner: Either, -} - -impl IterEither { - pub(crate) fn new(inner: Either) -> Self { - IterEither { inner } - } -} - -impl Extend for Either -where - L: Extend, - R: Extend, -{ - fn extend(&mut self, iter: T) - where - T: IntoIterator, - { - for_both!(self, inner => inner.extend(iter)) - } -} - -/// `Either` is an iterator if both `L` and `R` are iterators. -impl Iterator for Either -where - L: Iterator, - R: Iterator, -{ - type Item = L::Item; - - fn next(&mut self) -> Option { - for_both!(self, inner => inner.next()) - } - - fn size_hint(&self) -> (usize, Option) { - for_both!(self, inner => inner.size_hint()) - } - - fn fold(self, init: Acc, f: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, - { - for_both!(self, inner => inner.fold(init, f)) - } - - fn for_each(self, f: F) - where - F: FnMut(Self::Item), - { - for_both!(self, inner => inner.for_each(f)) - } - - fn count(self) -> usize { - for_both!(self, inner => inner.count()) - } - - fn last(self) -> Option { - for_both!(self, inner => inner.last()) - } - - fn nth(&mut self, n: usize) -> Option { - for_both!(self, inner => inner.nth(n)) - } - - fn collect(self) -> B - where - B: iter::FromIterator, - { - for_both!(self, inner => inner.collect()) - } - - fn partition(self, f: F) -> (B, B) - where - B: Default + Extend, - F: FnMut(&Self::Item) -> bool, - { - for_both!(self, inner => inner.partition(f)) - } - - fn all(&mut self, f: F) -> bool - where - F: FnMut(Self::Item) -> bool, - { - for_both!(self, inner => inner.all(f)) - } - - fn any(&mut self, f: F) -> bool - where - F: FnMut(Self::Item) -> bool, - { - for_both!(self, inner => inner.any(f)) - } - - fn find

(&mut self, predicate: P) -> Option - where - P: FnMut(&Self::Item) -> bool, - { - for_both!(self, inner => inner.find(predicate)) - } - - fn find_map(&mut self, f: F) -> Option - where - F: FnMut(Self::Item) -> Option, - { - for_both!(self, inner => inner.find_map(f)) - } - - fn position

(&mut self, predicate: P) -> Option - where - P: FnMut(Self::Item) -> bool, - { - for_both!(self, inner => inner.position(predicate)) - } -} - -impl DoubleEndedIterator for Either -where - L: DoubleEndedIterator, - R: DoubleEndedIterator, -{ - fn next_back(&mut self) -> Option { - for_both!(self, inner => inner.next_back()) - } - - fn nth_back(&mut self, n: usize) -> Option { - for_both!(self, inner => inner.nth_back(n)) - } - - fn rfold(self, init: Acc, f: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, - { - for_both!(self, inner => inner.rfold(init, f)) - } - - fn rfind

(&mut self, predicate: P) -> Option - where - P: FnMut(&Self::Item) -> bool, - { - for_both!(self, inner => inner.rfind(predicate)) - } -} - -impl ExactSizeIterator for Either -where - L: ExactSizeIterator, - R: ExactSizeIterator, -{ - fn len(&self) -> usize { - for_both!(self, inner => inner.len()) - } -} - -impl iter::FusedIterator for Either -where - L: iter::FusedIterator, - R: iter::FusedIterator, -{ -} - -impl Iterator for IterEither -where - L: Iterator, - R: Iterator, -{ - type Item = Either; - - fn next(&mut self) -> Option { - Some(map_either!(self.inner, ref mut inner => inner.next()?)) - } - - fn size_hint(&self) -> (usize, Option) { - for_both!(self.inner, ref inner => inner.size_hint()) - } - - fn fold(self, init: Acc, f: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, - { - wrap_either!(self.inner => .fold(init, f)) - } - - fn for_each(self, f: F) - where - F: FnMut(Self::Item), - { - wrap_either!(self.inner => .for_each(f)) - } - - fn count(self) -> usize { - for_both!(self.inner, inner => inner.count()) - } - - fn last(self) -> Option { - Some(map_either!(self.inner, inner => inner.last()?)) - } - - fn nth(&mut self, n: usize) -> Option { - Some(map_either!(self.inner, ref mut inner => inner.nth(n)?)) - } - - fn collect(self) -> B - where - B: iter::FromIterator, - { - wrap_either!(self.inner => .collect()) - } - - fn partition(self, f: F) -> (B, B) - where - B: Default + Extend, - F: FnMut(&Self::Item) -> bool, - { - wrap_either!(self.inner => .partition(f)) - } - - fn all(&mut self, f: F) -> bool - where - F: FnMut(Self::Item) -> bool, - { - wrap_either!(&mut self.inner => .all(f)) - } - - fn any(&mut self, f: F) -> bool - where - F: FnMut(Self::Item) -> bool, - { - wrap_either!(&mut self.inner => .any(f)) - } - - fn find

(&mut self, predicate: P) -> Option - where - P: FnMut(&Self::Item) -> bool, - { - wrap_either!(&mut self.inner => .find(predicate)) - } - - fn find_map(&mut self, f: F) -> Option - where - F: FnMut(Self::Item) -> Option, - { - wrap_either!(&mut self.inner => .find_map(f)) - } - - fn position

(&mut self, predicate: P) -> Option - where - P: FnMut(Self::Item) -> bool, - { - wrap_either!(&mut self.inner => .position(predicate)) - } -} - -impl DoubleEndedIterator for IterEither -where - L: DoubleEndedIterator, - R: DoubleEndedIterator, -{ - fn next_back(&mut self) -> Option { - Some(map_either!(self.inner, ref mut inner => inner.next_back()?)) - } - - fn nth_back(&mut self, n: usize) -> Option { - Some(map_either!(self.inner, ref mut inner => inner.nth_back(n)?)) - } - - fn rfold(self, init: Acc, f: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, - { - wrap_either!(self.inner => .rfold(init, f)) - } - - fn rfind

(&mut self, predicate: P) -> Option - where - P: FnMut(&Self::Item) -> bool, - { - wrap_either!(&mut self.inner => .rfind(predicate)) - } -} - -impl ExactSizeIterator for IterEither -where - L: ExactSizeIterator, - R: ExactSizeIterator, -{ - fn len(&self) -> usize { - for_both!(self.inner, ref inner => inner.len()) - } -} - -impl iter::FusedIterator for IterEither -where - L: iter::FusedIterator, - R: iter::FusedIterator, -{ -} diff --git a/vendor/either/src/lib.rs b/vendor/either/src/lib.rs deleted file mode 100644 index e2265eb7104057..00000000000000 --- a/vendor/either/src/lib.rs +++ /dev/null @@ -1,1561 +0,0 @@ -//! The enum [`Either`] with variants `Left` and `Right` is a general purpose -//! sum type with two cases. -//! -//! [`Either`]: enum.Either.html -//! -//! **Crate features:** -//! -//! * `"std"` -//! Enabled by default. Disable to make the library `#![no_std]`. -//! -//! * `"serde"` -//! Disabled by default. Enable to `#[derive(Serialize, Deserialize)]` for `Either` -//! - -#![doc(html_root_url = "https://docs.rs/either/1/")] -#![no_std] - -#[cfg(any(test, feature = "std"))] -extern crate std; - -#[cfg(feature = "serde")] -pub mod serde_untagged; - -#[cfg(feature = "serde")] -pub mod serde_untagged_optional; - -use core::convert::{AsMut, AsRef}; -use core::fmt; -use core::future::Future; -use core::ops::Deref; -use core::ops::DerefMut; -use core::pin::Pin; - -#[cfg(any(test, feature = "std"))] -use std::error::Error; -#[cfg(any(test, feature = "std"))] -use std::io::{self, BufRead, Read, Seek, SeekFrom, Write}; - -pub use crate::Either::{Left, Right}; - -/// The enum `Either` with variants `Left` and `Right` is a general purpose -/// sum type with two cases. -/// -/// The `Either` type is symmetric and treats its variants the same way, without -/// preference. -/// (For representing success or error, use the regular `Result` enum instead.) -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[derive(Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -pub enum Either { - /// A value of type `L`. - Left(L), - /// A value of type `R`. - Right(R), -} - -/// Evaluate the provided expression for both [`Either::Left`] and [`Either::Right`]. -/// -/// This macro is useful in cases where both sides of [`Either`] can be interacted with -/// in the same way even though the don't share the same type. -/// -/// Syntax: `either::for_both!(` *expression* `,` *pattern* `=>` *expression* `)` -/// -/// # Example -/// -/// ``` -/// use either::Either; -/// -/// fn length(owned_or_borrowed: Either) -> usize { -/// either::for_both!(owned_or_borrowed, s => s.len()) -/// } -/// -/// fn main() { -/// let borrowed = Either::Right("Hello world!"); -/// let owned = Either::Left("Hello world!".to_owned()); -/// -/// assert_eq!(length(borrowed), 12); -/// assert_eq!(length(owned), 12); -/// } -/// ``` -#[macro_export] -macro_rules! for_both { - ($value:expr, $pattern:pat => $result:expr) => { - match $value { - $crate::Either::Left($pattern) => $result, - $crate::Either::Right($pattern) => $result, - } - }; -} - -/// Macro for unwrapping the left side of an [`Either`], which fails early -/// with the opposite side. Can only be used in functions that return -/// `Either` because of the early return of `Right` that it provides. -/// -/// See also [`try_right!`] for its dual, which applies the same just to the -/// right side. -/// -/// # Example -/// -/// ``` -/// use either::{Either, Left, Right}; -/// -/// fn twice(wrapper: Either) -> Either { -/// let value = either::try_left!(wrapper); -/// Left(value * 2) -/// } -/// -/// fn main() { -/// assert_eq!(twice(Left(2)), Left(4)); -/// assert_eq!(twice(Right("ups")), Right("ups")); -/// } -/// ``` -#[macro_export] -macro_rules! try_left { - ($expr:expr) => { - match $expr { - $crate::Left(val) => val, - $crate::Right(err) => return $crate::Right(::core::convert::From::from(err)), - } - }; -} - -/// Dual to [`try_left!`], see its documentation for more information. -#[macro_export] -macro_rules! try_right { - ($expr:expr) => { - match $expr { - $crate::Left(err) => return $crate::Left(::core::convert::From::from(err)), - $crate::Right(val) => val, - } - }; -} - -macro_rules! map_either { - ($value:expr, $pattern:pat => $result:expr) => { - match $value { - Left($pattern) => Left($result), - Right($pattern) => Right($result), - } - }; -} - -mod iterator; -pub use self::iterator::IterEither; - -mod into_either; -pub use self::into_either::IntoEither; - -impl Clone for Either { - fn clone(&self) -> Self { - match self { - Left(inner) => Left(inner.clone()), - Right(inner) => Right(inner.clone()), - } - } - - fn clone_from(&mut self, source: &Self) { - match (self, source) { - (Left(dest), Left(source)) => dest.clone_from(source), - (Right(dest), Right(source)) => dest.clone_from(source), - (dest, source) => *dest = source.clone(), - } - } -} - -impl Either { - /// Return true if the value is the `Left` variant. - /// - /// ``` - /// use either::*; - /// - /// let values = [Left(1), Right("the right value")]; - /// assert_eq!(values[0].is_left(), true); - /// assert_eq!(values[1].is_left(), false); - /// ``` - pub fn is_left(&self) -> bool { - match self { - Left(_) => true, - Right(_) => false, - } - } - - /// Return true if the value is the `Right` variant. - /// - /// ``` - /// use either::*; - /// - /// let values = [Left(1), Right("the right value")]; - /// assert_eq!(values[0].is_right(), false); - /// assert_eq!(values[1].is_right(), true); - /// ``` - pub fn is_right(&self) -> bool { - !self.is_left() - } - - /// Convert the left side of `Either` to an `Option`. - /// - /// ``` - /// use either::*; - /// - /// let left: Either<_, ()> = Left("some value"); - /// assert_eq!(left.left(), Some("some value")); - /// - /// let right: Either<(), _> = Right(321); - /// assert_eq!(right.left(), None); - /// ``` - pub fn left(self) -> Option { - match self { - Left(l) => Some(l), - Right(_) => None, - } - } - - /// Convert the right side of `Either` to an `Option`. - /// - /// ``` - /// use either::*; - /// - /// let left: Either<_, ()> = Left("some value"); - /// assert_eq!(left.right(), None); - /// - /// let right: Either<(), _> = Right(321); - /// assert_eq!(right.right(), Some(321)); - /// ``` - pub fn right(self) -> Option { - match self { - Left(_) => None, - Right(r) => Some(r), - } - } - - /// Convert `&Either` to `Either<&L, &R>`. - /// - /// ``` - /// use either::*; - /// - /// let left: Either<_, ()> = Left("some value"); - /// assert_eq!(left.as_ref(), Left(&"some value")); - /// - /// let right: Either<(), _> = Right("some value"); - /// assert_eq!(right.as_ref(), Right(&"some value")); - /// ``` - pub fn as_ref(&self) -> Either<&L, &R> { - map_either!(self, inner => inner) - } - - /// Convert `&mut Either` to `Either<&mut L, &mut R>`. - /// - /// ``` - /// use either::*; - /// - /// fn mutate_left(value: &mut Either) { - /// if let Some(l) = value.as_mut().left() { - /// *l = 999; - /// } - /// } - /// - /// let mut left = Left(123); - /// let mut right = Right(123); - /// mutate_left(&mut left); - /// mutate_left(&mut right); - /// assert_eq!(left, Left(999)); - /// assert_eq!(right, Right(123)); - /// ``` - pub fn as_mut(&mut self) -> Either<&mut L, &mut R> { - map_either!(self, inner => inner) - } - - /// Convert `Pin<&Either>` to `Either, Pin<&R>>`, - /// pinned projections of the inner variants. - pub fn as_pin_ref(self: Pin<&Self>) -> Either, Pin<&R>> { - // SAFETY: We can use `new_unchecked` because the `inner` parts are - // guaranteed to be pinned, as they come from `self` which is pinned. - unsafe { map_either!(Pin::get_ref(self), inner => Pin::new_unchecked(inner)) } - } - - /// Convert `Pin<&mut Either>` to `Either, Pin<&mut R>>`, - /// pinned projections of the inner variants. - pub fn as_pin_mut(self: Pin<&mut Self>) -> Either, Pin<&mut R>> { - // SAFETY: `get_unchecked_mut` is fine because we don't move anything. - // We can use `new_unchecked` because the `inner` parts are guaranteed - // to be pinned, as they come from `self` which is pinned, and we never - // offer an unpinned `&mut L` or `&mut R` through `Pin<&mut Self>`. We - // also don't have an implementation of `Drop`, nor manual `Unpin`. - unsafe { map_either!(Pin::get_unchecked_mut(self), inner => Pin::new_unchecked(inner)) } - } - - /// Convert `Either` to `Either`. - /// - /// ``` - /// use either::*; - /// - /// let left: Either<_, ()> = Left(123); - /// assert_eq!(left.flip(), Right(123)); - /// - /// let right: Either<(), _> = Right("some value"); - /// assert_eq!(right.flip(), Left("some value")); - /// ``` - pub fn flip(self) -> Either { - match self { - Left(l) => Right(l), - Right(r) => Left(r), - } - } - - /// Apply the function `f` on the value in the `Left` variant if it is present rewrapping the - /// result in `Left`. - /// - /// ``` - /// use either::*; - /// - /// let left: Either<_, u32> = Left(123); - /// assert_eq!(left.map_left(|x| x * 2), Left(246)); - /// - /// let right: Either = Right(123); - /// assert_eq!(right.map_left(|x| x * 2), Right(123)); - /// ``` - pub fn map_left(self, f: F) -> Either - where - F: FnOnce(L) -> M, - { - match self { - Left(l) => Left(f(l)), - Right(r) => Right(r), - } - } - - /// Apply the function `f` on the value in the `Right` variant if it is present rewrapping the - /// result in `Right`. - /// - /// ``` - /// use either::*; - /// - /// let left: Either<_, u32> = Left(123); - /// assert_eq!(left.map_right(|x| x * 2), Left(123)); - /// - /// let right: Either = Right(123); - /// assert_eq!(right.map_right(|x| x * 2), Right(246)); - /// ``` - pub fn map_right(self, f: F) -> Either - where - F: FnOnce(R) -> S, - { - match self { - Left(l) => Left(l), - Right(r) => Right(f(r)), - } - } - - /// Apply the functions `f` and `g` to the `Left` and `Right` variants - /// respectively. This is equivalent to - /// [bimap](https://hackage.haskell.org/package/bifunctors-5/docs/Data-Bifunctor.html) - /// in functional programming. - /// - /// ``` - /// use either::*; - /// - /// let f = |s: String| s.len(); - /// let g = |u: u8| u.to_string(); - /// - /// let left: Either = Left("loopy".into()); - /// assert_eq!(left.map_either(f, g), Left(5)); - /// - /// let right: Either = Right(42); - /// assert_eq!(right.map_either(f, g), Right("42".into())); - /// ``` - pub fn map_either(self, f: F, g: G) -> Either - where - F: FnOnce(L) -> M, - G: FnOnce(R) -> S, - { - match self { - Left(l) => Left(f(l)), - Right(r) => Right(g(r)), - } - } - - /// Similar to [`map_either`][Self::map_either], with an added context `ctx` accessible to - /// both functions. - /// - /// ``` - /// use either::*; - /// - /// let mut sum = 0; - /// - /// // Both closures want to update the same value, so pass it as context. - /// let mut f = |sum: &mut usize, s: String| { *sum += s.len(); s.to_uppercase() }; - /// let mut g = |sum: &mut usize, u: usize| { *sum += u; u.to_string() }; - /// - /// let left: Either = Left("loopy".into()); - /// assert_eq!(left.map_either_with(&mut sum, &mut f, &mut g), Left("LOOPY".into())); - /// - /// let right: Either = Right(42); - /// assert_eq!(right.map_either_with(&mut sum, &mut f, &mut g), Right("42".into())); - /// - /// assert_eq!(sum, 47); - /// ``` - pub fn map_either_with(self, ctx: Ctx, f: F, g: G) -> Either - where - F: FnOnce(Ctx, L) -> M, - G: FnOnce(Ctx, R) -> S, - { - match self { - Left(l) => Left(f(ctx, l)), - Right(r) => Right(g(ctx, r)), - } - } - - /// Apply one of two functions depending on contents, unifying their result. If the value is - /// `Left(L)` then the first function `f` is applied; if it is `Right(R)` then the second - /// function `g` is applied. - /// - /// ``` - /// use either::*; - /// - /// fn square(n: u32) -> i32 { (n * n) as i32 } - /// fn negate(n: i32) -> i32 { -n } - /// - /// let left: Either = Left(4); - /// assert_eq!(left.either(square, negate), 16); - /// - /// let right: Either = Right(-4); - /// assert_eq!(right.either(square, negate), 4); - /// ``` - pub fn either(self, f: F, g: G) -> T - where - F: FnOnce(L) -> T, - G: FnOnce(R) -> T, - { - match self { - Left(l) => f(l), - Right(r) => g(r), - } - } - - /// Like [`either`][Self::either], but provide some context to whichever of the - /// functions ends up being called. - /// - /// ``` - /// // In this example, the context is a mutable reference - /// use either::*; - /// - /// let mut result = Vec::new(); - /// - /// let values = vec![Left(2), Right(2.7)]; - /// - /// for value in values { - /// value.either_with(&mut result, - /// |ctx, integer| ctx.push(integer), - /// |ctx, real| ctx.push(f64::round(real) as i32)); - /// } - /// - /// assert_eq!(result, vec![2, 3]); - /// ``` - pub fn either_with(self, ctx: Ctx, f: F, g: G) -> T - where - F: FnOnce(Ctx, L) -> T, - G: FnOnce(Ctx, R) -> T, - { - match self { - Left(l) => f(ctx, l), - Right(r) => g(ctx, r), - } - } - - /// Apply the function `f` on the value in the `Left` variant if it is present. - /// - /// ``` - /// use either::*; - /// - /// let left: Either<_, u32> = Left(123); - /// assert_eq!(left.left_and_then::<_,()>(|x| Right(x * 2)), Right(246)); - /// - /// let right: Either = Right(123); - /// assert_eq!(right.left_and_then(|x| Right::<(), _>(x * 2)), Right(123)); - /// ``` - pub fn left_and_then(self, f: F) -> Either - where - F: FnOnce(L) -> Either, - { - match self { - Left(l) => f(l), - Right(r) => Right(r), - } - } - - /// Apply the function `f` on the value in the `Right` variant if it is present. - /// - /// ``` - /// use either::*; - /// - /// let left: Either<_, u32> = Left(123); - /// assert_eq!(left.right_and_then(|x| Right(x * 2)), Left(123)); - /// - /// let right: Either = Right(123); - /// assert_eq!(right.right_and_then(|x| Right(x * 2)), Right(246)); - /// ``` - pub fn right_and_then(self, f: F) -> Either - where - F: FnOnce(R) -> Either, - { - match self { - Left(l) => Left(l), - Right(r) => f(r), - } - } - - /// Convert the inner value to an iterator. - /// - /// This requires the `Left` and `Right` iterators to have the same item type. - /// See [`factor_into_iter`][Either::factor_into_iter] to iterate different types. - /// - /// ``` - /// use either::*; - /// - /// let left: Either<_, Vec> = Left(vec![1, 2, 3, 4, 5]); - /// let mut right: Either, _> = Right(vec![]); - /// right.extend(left.into_iter()); - /// assert_eq!(right, Right(vec![1, 2, 3, 4, 5])); - /// ``` - #[allow(clippy::should_implement_trait)] - pub fn into_iter(self) -> Either - where - L: IntoIterator, - R: IntoIterator, - { - map_either!(self, inner => inner.into_iter()) - } - - /// Borrow the inner value as an iterator. - /// - /// This requires the `Left` and `Right` iterators to have the same item type. - /// See [`factor_iter`][Either::factor_iter] to iterate different types. - /// - /// ``` - /// use either::*; - /// - /// let left: Either<_, &[u32]> = Left(vec![2, 3]); - /// let mut right: Either, _> = Right(&[4, 5][..]); - /// let mut all = vec![1]; - /// all.extend(left.iter()); - /// all.extend(right.iter()); - /// assert_eq!(all, vec![1, 2, 3, 4, 5]); - /// ``` - pub fn iter(&self) -> Either<<&L as IntoIterator>::IntoIter, <&R as IntoIterator>::IntoIter> - where - for<'a> &'a L: IntoIterator, - for<'a> &'a R: IntoIterator::Item>, - { - map_either!(self, inner => inner.into_iter()) - } - - /// Mutably borrow the inner value as an iterator. - /// - /// This requires the `Left` and `Right` iterators to have the same item type. - /// See [`factor_iter_mut`][Either::factor_iter_mut] to iterate different types. - /// - /// ``` - /// use either::*; - /// - /// let mut left: Either<_, &mut [u32]> = Left(vec![2, 3]); - /// for l in left.iter_mut() { - /// *l *= *l - /// } - /// assert_eq!(left, Left(vec![4, 9])); - /// - /// let mut inner = [4, 5]; - /// let mut right: Either, _> = Right(&mut inner[..]); - /// for r in right.iter_mut() { - /// *r *= *r - /// } - /// assert_eq!(inner, [16, 25]); - /// ``` - pub fn iter_mut( - &mut self, - ) -> Either<<&mut L as IntoIterator>::IntoIter, <&mut R as IntoIterator>::IntoIter> - where - for<'a> &'a mut L: IntoIterator, - for<'a> &'a mut R: IntoIterator::Item>, - { - map_either!(self, inner => inner.into_iter()) - } - - /// Converts an `Either` of `Iterator`s to be an `Iterator` of `Either`s - /// - /// Unlike [`into_iter`][Either::into_iter], this does not require the - /// `Left` and `Right` iterators to have the same item type. - /// - /// ``` - /// use either::*; - /// let left: Either<_, Vec> = Left(&["hello"]); - /// assert_eq!(left.factor_into_iter().next(), Some(Left(&"hello"))); - /// - /// let right: Either<&[&str], _> = Right(vec![0, 1]); - /// assert_eq!(right.factor_into_iter().collect::>(), vec![Right(0), Right(1)]); - /// - /// ``` - // TODO(MSRV): doc(alias) was stabilized in Rust 1.48 - // #[doc(alias = "transpose")] - pub fn factor_into_iter(self) -> IterEither - where - L: IntoIterator, - R: IntoIterator, - { - IterEither::new(map_either!(self, inner => inner.into_iter())) - } - - /// Borrows an `Either` of `Iterator`s to be an `Iterator` of `Either`s - /// - /// Unlike [`iter`][Either::iter], this does not require the - /// `Left` and `Right` iterators to have the same item type. - /// - /// ``` - /// use either::*; - /// let left: Either<_, Vec> = Left(["hello"]); - /// assert_eq!(left.factor_iter().next(), Some(Left(&"hello"))); - /// - /// let right: Either<[&str; 2], _> = Right(vec![0, 1]); - /// assert_eq!(right.factor_iter().collect::>(), vec![Right(&0), Right(&1)]); - /// - /// ``` - pub fn factor_iter( - &self, - ) -> IterEither<<&L as IntoIterator>::IntoIter, <&R as IntoIterator>::IntoIter> - where - for<'a> &'a L: IntoIterator, - for<'a> &'a R: IntoIterator, - { - IterEither::new(map_either!(self, inner => inner.into_iter())) - } - - /// Mutably borrows an `Either` of `Iterator`s to be an `Iterator` of `Either`s - /// - /// Unlike [`iter_mut`][Either::iter_mut], this does not require the - /// `Left` and `Right` iterators to have the same item type. - /// - /// ``` - /// use either::*; - /// let mut left: Either<_, Vec> = Left(["hello"]); - /// left.factor_iter_mut().for_each(|x| *x.unwrap_left() = "goodbye"); - /// assert_eq!(left, Left(["goodbye"])); - /// - /// let mut right: Either<[&str; 2], _> = Right(vec![0, 1, 2]); - /// right.factor_iter_mut().for_each(|x| if let Right(r) = x { *r = -*r; }); - /// assert_eq!(right, Right(vec![0, -1, -2])); - /// - /// ``` - pub fn factor_iter_mut( - &mut self, - ) -> IterEither<<&mut L as IntoIterator>::IntoIter, <&mut R as IntoIterator>::IntoIter> - where - for<'a> &'a mut L: IntoIterator, - for<'a> &'a mut R: IntoIterator, - { - IterEither::new(map_either!(self, inner => inner.into_iter())) - } - - /// Return left value or given value - /// - /// Arguments passed to `left_or` are eagerly evaluated; if you are passing - /// the result of a function call, it is recommended to use - /// [`left_or_else`][Self::left_or_else], which is lazily evaluated. - /// - /// # Examples - /// - /// ``` - /// # use either::*; - /// let left: Either<&str, &str> = Left("left"); - /// assert_eq!(left.left_or("foo"), "left"); - /// - /// let right: Either<&str, &str> = Right("right"); - /// assert_eq!(right.left_or("left"), "left"); - /// ``` - pub fn left_or(self, other: L) -> L { - match self { - Either::Left(l) => l, - Either::Right(_) => other, - } - } - - /// Return left or a default - /// - /// # Examples - /// - /// ``` - /// # use either::*; - /// let left: Either = Left("left".to_string()); - /// assert_eq!(left.left_or_default(), "left"); - /// - /// let right: Either = Right(42); - /// assert_eq!(right.left_or_default(), String::default()); - /// ``` - pub fn left_or_default(self) -> L - where - L: Default, - { - match self { - Either::Left(l) => l, - Either::Right(_) => L::default(), - } - } - - /// Returns left value or computes it from a closure - /// - /// # Examples - /// - /// ``` - /// # use either::*; - /// let left: Either = Left("3".to_string()); - /// assert_eq!(left.left_or_else(|_| unreachable!()), "3"); - /// - /// let right: Either = Right(3); - /// assert_eq!(right.left_or_else(|x| x.to_string()), "3"); - /// ``` - pub fn left_or_else(self, f: F) -> L - where - F: FnOnce(R) -> L, - { - match self { - Either::Left(l) => l, - Either::Right(r) => f(r), - } - } - - /// Return right value or given value - /// - /// Arguments passed to `right_or` are eagerly evaluated; if you are passing - /// the result of a function call, it is recommended to use - /// [`right_or_else`][Self::right_or_else], which is lazily evaluated. - /// - /// # Examples - /// - /// ``` - /// # use either::*; - /// let right: Either<&str, &str> = Right("right"); - /// assert_eq!(right.right_or("foo"), "right"); - /// - /// let left: Either<&str, &str> = Left("left"); - /// assert_eq!(left.right_or("right"), "right"); - /// ``` - pub fn right_or(self, other: R) -> R { - match self { - Either::Left(_) => other, - Either::Right(r) => r, - } - } - - /// Return right or a default - /// - /// # Examples - /// - /// ``` - /// # use either::*; - /// let left: Either = Left("left".to_string()); - /// assert_eq!(left.right_or_default(), u32::default()); - /// - /// let right: Either = Right(42); - /// assert_eq!(right.right_or_default(), 42); - /// ``` - pub fn right_or_default(self) -> R - where - R: Default, - { - match self { - Either::Left(_) => R::default(), - Either::Right(r) => r, - } - } - - /// Returns right value or computes it from a closure - /// - /// # Examples - /// - /// ``` - /// # use either::*; - /// let left: Either = Left("3".to_string()); - /// assert_eq!(left.right_or_else(|x| x.parse().unwrap()), 3); - /// - /// let right: Either = Right(3); - /// assert_eq!(right.right_or_else(|_| unreachable!()), 3); - /// ``` - pub fn right_or_else(self, f: F) -> R - where - F: FnOnce(L) -> R, - { - match self { - Either::Left(l) => f(l), - Either::Right(r) => r, - } - } - - /// Returns the left value - /// - /// # Examples - /// - /// ``` - /// # use either::*; - /// let left: Either<_, ()> = Left(3); - /// assert_eq!(left.unwrap_left(), 3); - /// ``` - /// - /// # Panics - /// - /// When `Either` is a `Right` value - /// - /// ```should_panic - /// # use either::*; - /// let right: Either<(), _> = Right(3); - /// right.unwrap_left(); - /// ``` - pub fn unwrap_left(self) -> L - where - R: core::fmt::Debug, - { - match self { - Either::Left(l) => l, - Either::Right(r) => { - panic!("called `Either::unwrap_left()` on a `Right` value: {:?}", r) - } - } - } - - /// Returns the right value - /// - /// # Examples - /// - /// ``` - /// # use either::*; - /// let right: Either<(), _> = Right(3); - /// assert_eq!(right.unwrap_right(), 3); - /// ``` - /// - /// # Panics - /// - /// When `Either` is a `Left` value - /// - /// ```should_panic - /// # use either::*; - /// let left: Either<_, ()> = Left(3); - /// left.unwrap_right(); - /// ``` - pub fn unwrap_right(self) -> R - where - L: core::fmt::Debug, - { - match self { - Either::Right(r) => r, - Either::Left(l) => panic!("called `Either::unwrap_right()` on a `Left` value: {:?}", l), - } - } - - /// Returns the left value - /// - /// # Examples - /// - /// ``` - /// # use either::*; - /// let left: Either<_, ()> = Left(3); - /// assert_eq!(left.expect_left("value was Right"), 3); - /// ``` - /// - /// # Panics - /// - /// When `Either` is a `Right` value - /// - /// ```should_panic - /// # use either::*; - /// let right: Either<(), _> = Right(3); - /// right.expect_left("value was Right"); - /// ``` - pub fn expect_left(self, msg: &str) -> L - where - R: core::fmt::Debug, - { - match self { - Either::Left(l) => l, - Either::Right(r) => panic!("{}: {:?}", msg, r), - } - } - - /// Returns the right value - /// - /// # Examples - /// - /// ``` - /// # use either::*; - /// let right: Either<(), _> = Right(3); - /// assert_eq!(right.expect_right("value was Left"), 3); - /// ``` - /// - /// # Panics - /// - /// When `Either` is a `Left` value - /// - /// ```should_panic - /// # use either::*; - /// let left: Either<_, ()> = Left(3); - /// left.expect_right("value was Right"); - /// ``` - pub fn expect_right(self, msg: &str) -> R - where - L: core::fmt::Debug, - { - match self { - Either::Right(r) => r, - Either::Left(l) => panic!("{}: {:?}", msg, l), - } - } - - /// Convert the contained value into `T` - /// - /// # Examples - /// - /// ``` - /// # use either::*; - /// // Both u16 and u32 can be converted to u64. - /// let left: Either = Left(3u16); - /// assert_eq!(left.either_into::(), 3u64); - /// let right: Either = Right(7u32); - /// assert_eq!(right.either_into::(), 7u64); - /// ``` - pub fn either_into(self) -> T - where - L: Into, - R: Into, - { - for_both!(self, inner => inner.into()) - } -} - -impl Either, Option> { - /// Factors out `None` from an `Either` of [`Option`]. - /// - /// ``` - /// use either::*; - /// let left: Either<_, Option> = Left(Some(vec![0])); - /// assert_eq!(left.factor_none(), Some(Left(vec![0]))); - /// - /// let right: Either>, _> = Right(Some(String::new())); - /// assert_eq!(right.factor_none(), Some(Right(String::new()))); - /// ``` - // TODO(MSRV): doc(alias) was stabilized in Rust 1.48 - // #[doc(alias = "transpose")] - pub fn factor_none(self) -> Option> { - match self { - Left(l) => l.map(Either::Left), - Right(r) => r.map(Either::Right), - } - } -} - -impl Either, Result> { - /// Factors out a homogenous type from an `Either` of [`Result`]. - /// - /// Here, the homogeneous type is the `Err` type of the [`Result`]. - /// - /// ``` - /// use either::*; - /// let left: Either<_, Result> = Left(Ok(vec![0])); - /// assert_eq!(left.factor_err(), Ok(Left(vec![0]))); - /// - /// let right: Either, u32>, _> = Right(Ok(String::new())); - /// assert_eq!(right.factor_err(), Ok(Right(String::new()))); - /// ``` - // TODO(MSRV): doc(alias) was stabilized in Rust 1.48 - // #[doc(alias = "transpose")] - pub fn factor_err(self) -> Result, E> { - match self { - Left(l) => l.map(Either::Left), - Right(r) => r.map(Either::Right), - } - } -} - -impl Either, Result> { - /// Factors out a homogenous type from an `Either` of [`Result`]. - /// - /// Here, the homogeneous type is the `Ok` type of the [`Result`]. - /// - /// ``` - /// use either::*; - /// let left: Either<_, Result> = Left(Err(vec![0])); - /// assert_eq!(left.factor_ok(), Err(Left(vec![0]))); - /// - /// let right: Either>, _> = Right(Err(String::new())); - /// assert_eq!(right.factor_ok(), Err(Right(String::new()))); - /// ``` - // TODO(MSRV): doc(alias) was stabilized in Rust 1.48 - // #[doc(alias = "transpose")] - pub fn factor_ok(self) -> Result> { - match self { - Left(l) => l.map_err(Either::Left), - Right(r) => r.map_err(Either::Right), - } - } -} - -impl Either<(T, L), (T, R)> { - /// Factor out a homogeneous type from an either of pairs. - /// - /// Here, the homogeneous type is the first element of the pairs. - /// - /// ``` - /// use either::*; - /// let left: Either<_, (u32, String)> = Left((123, vec![0])); - /// assert_eq!(left.factor_first().0, 123); - /// - /// let right: Either<(u32, Vec), _> = Right((123, String::new())); - /// assert_eq!(right.factor_first().0, 123); - /// ``` - pub fn factor_first(self) -> (T, Either) { - match self { - Left((t, l)) => (t, Left(l)), - Right((t, r)) => (t, Right(r)), - } - } -} - -impl Either<(L, T), (R, T)> { - /// Factor out a homogeneous type from an either of pairs. - /// - /// Here, the homogeneous type is the second element of the pairs. - /// - /// ``` - /// use either::*; - /// let left: Either<_, (String, u32)> = Left((vec![0], 123)); - /// assert_eq!(left.factor_second().1, 123); - /// - /// let right: Either<(Vec, u32), _> = Right((String::new(), 123)); - /// assert_eq!(right.factor_second().1, 123); - /// ``` - pub fn factor_second(self) -> (Either, T) { - match self { - Left((l, t)) => (Left(l), t), - Right((r, t)) => (Right(r), t), - } - } -} - -impl Either { - /// Extract the value of an either over two equivalent types. - /// - /// ``` - /// use either::*; - /// - /// let left: Either<_, u32> = Left(123); - /// assert_eq!(left.into_inner(), 123); - /// - /// let right: Either = Right(123); - /// assert_eq!(right.into_inner(), 123); - /// ``` - pub fn into_inner(self) -> T { - for_both!(self, inner => inner) - } - - /// Map `f` over the contained value and return the result in the - /// corresponding variant. - /// - /// ``` - /// use either::*; - /// - /// let value: Either<_, i32> = Right(42); - /// - /// let other = value.map(|x| x * 2); - /// assert_eq!(other, Right(84)); - /// ``` - pub fn map(self, f: F) -> Either - where - F: FnOnce(T) -> M, - { - match self { - Left(l) => Left(f(l)), - Right(r) => Right(f(r)), - } - } -} - -impl Either<&L, &R> { - /// Maps an `Either<&L, &R>` to an `Either` by cloning the contents of - /// either branch. - pub fn cloned(self) -> Either - where - L: Clone, - R: Clone, - { - map_either!(self, inner => inner.clone()) - } - - /// Maps an `Either<&L, &R>` to an `Either` by copying the contents of - /// either branch. - pub fn copied(self) -> Either - where - L: Copy, - R: Copy, - { - map_either!(self, inner => *inner) - } -} - -impl Either<&mut L, &mut R> { - /// Maps an `Either<&mut L, &mut R>` to an `Either` by cloning the contents of - /// either branch. - pub fn cloned(self) -> Either - where - L: Clone, - R: Clone, - { - map_either!(self, inner => inner.clone()) - } - - /// Maps an `Either<&mut L, &mut R>` to an `Either` by copying the contents of - /// either branch. - pub fn copied(self) -> Either - where - L: Copy, - R: Copy, - { - map_either!(self, inner => *inner) - } -} - -/// Convert from `Result` to `Either` with `Ok => Right` and `Err => Left`. -impl From> for Either { - fn from(r: Result) -> Self { - match r { - Err(e) => Left(e), - Ok(o) => Right(o), - } - } -} - -/// Convert from `Either` to `Result` with `Right => Ok` and `Left => Err`. -impl From> for Result { - fn from(val: Either) -> Self { - match val { - Left(l) => Err(l), - Right(r) => Ok(r), - } - } -} - -/// `Either` is a future if both `L` and `R` are futures. -impl Future for Either -where - L: Future, - R: Future, -{ - type Output = L::Output; - - fn poll( - self: Pin<&mut Self>, - cx: &mut core::task::Context<'_>, - ) -> core::task::Poll { - for_both!(self.as_pin_mut(), inner => inner.poll(cx)) - } -} - -#[cfg(any(test, feature = "std"))] -/// `Either` implements `Read` if both `L` and `R` do. -/// -/// Requires crate feature `"std"` -impl Read for Either -where - L: Read, - R: Read, -{ - fn read(&mut self, buf: &mut [u8]) -> io::Result { - for_both!(self, inner => inner.read(buf)) - } - - fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> { - for_both!(self, inner => inner.read_exact(buf)) - } - - fn read_to_end(&mut self, buf: &mut std::vec::Vec) -> io::Result { - for_both!(self, inner => inner.read_to_end(buf)) - } - - fn read_to_string(&mut self, buf: &mut std::string::String) -> io::Result { - for_both!(self, inner => inner.read_to_string(buf)) - } -} - -#[cfg(any(test, feature = "std"))] -/// `Either` implements `Seek` if both `L` and `R` do. -/// -/// Requires crate feature `"std"` -impl Seek for Either -where - L: Seek, - R: Seek, -{ - fn seek(&mut self, pos: SeekFrom) -> io::Result { - for_both!(self, inner => inner.seek(pos)) - } -} - -#[cfg(any(test, feature = "std"))] -/// Requires crate feature `"std"` -impl BufRead for Either -where - L: BufRead, - R: BufRead, -{ - fn fill_buf(&mut self) -> io::Result<&[u8]> { - for_both!(self, inner => inner.fill_buf()) - } - - fn consume(&mut self, amt: usize) { - for_both!(self, inner => inner.consume(amt)) - } - - fn read_until(&mut self, byte: u8, buf: &mut std::vec::Vec) -> io::Result { - for_both!(self, inner => inner.read_until(byte, buf)) - } - - fn read_line(&mut self, buf: &mut std::string::String) -> io::Result { - for_both!(self, inner => inner.read_line(buf)) - } -} - -#[cfg(any(test, feature = "std"))] -/// `Either` implements `Write` if both `L` and `R` do. -/// -/// Requires crate feature `"std"` -impl Write for Either -where - L: Write, - R: Write, -{ - fn write(&mut self, buf: &[u8]) -> io::Result { - for_both!(self, inner => inner.write(buf)) - } - - fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { - for_both!(self, inner => inner.write_all(buf)) - } - - fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> { - for_both!(self, inner => inner.write_fmt(fmt)) - } - - fn flush(&mut self) -> io::Result<()> { - for_both!(self, inner => inner.flush()) - } -} - -impl AsRef for Either -where - L: AsRef, - R: AsRef, -{ - fn as_ref(&self) -> &Target { - for_both!(self, inner => inner.as_ref()) - } -} - -macro_rules! impl_specific_ref_and_mut { - ($t:ty, $($attr:meta),* ) => { - $(#[$attr])* - impl AsRef<$t> for Either - where L: AsRef<$t>, R: AsRef<$t> - { - fn as_ref(&self) -> &$t { - for_both!(self, inner => inner.as_ref()) - } - } - - $(#[$attr])* - impl AsMut<$t> for Either - where L: AsMut<$t>, R: AsMut<$t> - { - fn as_mut(&mut self) -> &mut $t { - for_both!(self, inner => inner.as_mut()) - } - } - }; -} - -impl_specific_ref_and_mut!(str,); -impl_specific_ref_and_mut!( - ::std::path::Path, - cfg(feature = "std"), - doc = "Requires crate feature `std`." -); -impl_specific_ref_and_mut!( - ::std::ffi::OsStr, - cfg(feature = "std"), - doc = "Requires crate feature `std`." -); -impl_specific_ref_and_mut!( - ::std::ffi::CStr, - cfg(feature = "std"), - doc = "Requires crate feature `std`." -); - -impl AsRef<[Target]> for Either -where - L: AsRef<[Target]>, - R: AsRef<[Target]>, -{ - fn as_ref(&self) -> &[Target] { - for_both!(self, inner => inner.as_ref()) - } -} - -impl AsMut for Either -where - L: AsMut, - R: AsMut, -{ - fn as_mut(&mut self) -> &mut Target { - for_both!(self, inner => inner.as_mut()) - } -} - -impl AsMut<[Target]> for Either -where - L: AsMut<[Target]>, - R: AsMut<[Target]>, -{ - fn as_mut(&mut self) -> &mut [Target] { - for_both!(self, inner => inner.as_mut()) - } -} - -impl Deref for Either -where - L: Deref, - R: Deref, -{ - type Target = L::Target; - - fn deref(&self) -> &Self::Target { - for_both!(self, inner => &**inner) - } -} - -impl DerefMut for Either -where - L: DerefMut, - R: DerefMut, -{ - fn deref_mut(&mut self) -> &mut Self::Target { - for_both!(self, inner => &mut *inner) - } -} - -#[cfg(any(test, feature = "std"))] -/// `Either` implements `Error` if *both* `L` and `R` implement it. -/// -/// Requires crate feature `"std"` -impl Error for Either -where - L: Error, - R: Error, -{ - fn source(&self) -> Option<&(dyn Error + 'static)> { - for_both!(self, inner => inner.source()) - } - - #[allow(deprecated)] - fn description(&self) -> &str { - for_both!(self, inner => inner.description()) - } - - #[allow(deprecated)] - fn cause(&self) -> Option<&dyn Error> { - for_both!(self, inner => inner.cause()) - } -} - -impl fmt::Display for Either -where - L: fmt::Display, - R: fmt::Display, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - for_both!(self, inner => inner.fmt(f)) - } -} - -impl fmt::Write for Either -where - L: fmt::Write, - R: fmt::Write, -{ - fn write_str(&mut self, s: &str) -> fmt::Result { - for_both!(self, inner => inner.write_str(s)) - } - - fn write_char(&mut self, c: char) -> fmt::Result { - for_both!(self, inner => inner.write_char(c)) - } - - fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result { - for_both!(self, inner => inner.write_fmt(args)) - } -} - -#[test] -fn basic() { - let mut e = Left(2); - let r = Right(2); - assert_eq!(e, Left(2)); - e = r; - assert_eq!(e, Right(2)); - assert_eq!(e.left(), None); - assert_eq!(e.right(), Some(2)); - assert_eq!(e.as_ref().right(), Some(&2)); - assert_eq!(e.as_mut().right(), Some(&mut 2)); -} - -#[test] -fn macros() { - use std::string::String; - - fn a() -> Either { - let x: u32 = try_left!(Right(1337u32)); - Left(x * 2) - } - assert_eq!(a(), Right(1337)); - - fn b() -> Either { - Right(try_right!(Left("foo bar"))) - } - assert_eq!(b(), Left(String::from("foo bar"))); -} - -#[test] -fn deref() { - use std::string::String; - - fn is_str(_: &str) {} - let value: Either = Left(String::from("test")); - is_str(&value); -} - -#[test] -fn iter() { - let x = 3; - let mut iter = match x { - 3 => Left(0..10), - _ => Right(17..), - }; - - assert_eq!(iter.next(), Some(0)); - assert_eq!(iter.count(), 9); -} - -#[test] -fn seek() { - use std::io; - - let use_empty = false; - let mut mockdata = [0x00; 256]; - for (i, data) in mockdata.iter_mut().enumerate() { - *data = i as u8; - } - - let mut reader = if use_empty { - // Empty didn't impl Seek until Rust 1.51 - Left(io::Cursor::new([])) - } else { - Right(io::Cursor::new(&mockdata[..])) - }; - - let mut buf = [0u8; 16]; - assert_eq!(reader.read(&mut buf).unwrap(), buf.len()); - assert_eq!(buf, mockdata[..buf.len()]); - - // the first read should advance the cursor and return the next 16 bytes thus the `ne` - assert_eq!(reader.read(&mut buf).unwrap(), buf.len()); - assert_ne!(buf, mockdata[..buf.len()]); - - // if the seek operation fails it should read 16..31 instead of 0..15 - reader.seek(io::SeekFrom::Start(0)).unwrap(); - assert_eq!(reader.read(&mut buf).unwrap(), buf.len()); - assert_eq!(buf, mockdata[..buf.len()]); -} - -#[test] -fn read_write() { - use std::io; - - let use_stdio = false; - let mockdata = [0xff; 256]; - - let mut reader = if use_stdio { - Left(io::stdin()) - } else { - Right(&mockdata[..]) - }; - - let mut buf = [0u8; 16]; - assert_eq!(reader.read(&mut buf).unwrap(), buf.len()); - assert_eq!(&buf, &mockdata[..buf.len()]); - - let mut mockbuf = [0u8; 256]; - let mut writer = if use_stdio { - Left(io::stdout()) - } else { - Right(&mut mockbuf[..]) - }; - - let buf = [1u8; 16]; - assert_eq!(writer.write(&buf).unwrap(), buf.len()); -} - -#[test] -fn error() { - let invalid_utf8 = b"\xff"; - #[allow(invalid_from_utf8)] - let res = if let Err(error) = ::std::str::from_utf8(invalid_utf8) { - Err(Left(error)) - } else if let Err(error) = "x".parse::() { - Err(Right(error)) - } else { - Ok(()) - }; - assert!(res.is_err()); - #[allow(deprecated)] - res.unwrap_err().description(); // make sure this can be called -} - -/// A helper macro to check if AsRef and AsMut are implemented for a given type. -macro_rules! check_t { - ($t:ty) => {{ - fn check_ref>() {} - fn propagate_ref, T2: AsRef<$t>>() { - check_ref::>() - } - fn check_mut>() {} - fn propagate_mut, T2: AsMut<$t>>() { - check_mut::>() - } - }}; -} - -// This "unused" method is here to ensure that compilation doesn't fail on given types. -fn _unsized_ref_propagation() { - check_t!(str); - - fn check_array_ref, Item>() {} - fn check_array_mut, Item>() {} - - fn propagate_array_ref, T2: AsRef<[Item]>, Item>() { - check_array_ref::, _>() - } - - fn propagate_array_mut, T2: AsMut<[Item]>, Item>() { - check_array_mut::, _>() - } -} - -// This "unused" method is here to ensure that compilation doesn't fail on given types. -#[cfg(feature = "std")] -fn _unsized_std_propagation() { - check_t!(::std::path::Path); - check_t!(::std::ffi::OsStr); - check_t!(::std::ffi::CStr); -} diff --git a/vendor/either/src/serde_untagged.rs b/vendor/either/src/serde_untagged.rs deleted file mode 100644 index 72078c3ec8e88e..00000000000000 --- a/vendor/either/src/serde_untagged.rs +++ /dev/null @@ -1,69 +0,0 @@ -//! Untagged serialization/deserialization support for Either. -//! -//! `Either` uses default, externally-tagged representation. -//! However, sometimes it is useful to support several alternative types. -//! For example, we may have a field which is generally Map -//! but in typical cases Vec would suffice, too. -//! -//! ```rust -//! # fn main() -> Result<(), Box> { -//! use either::Either; -//! use std::collections::HashMap; -//! -//! #[derive(serde::Serialize, serde::Deserialize, Debug)] -//! #[serde(transparent)] -//! struct IntOrString { -//! #[serde(with = "either::serde_untagged")] -//! inner: Either, HashMap> -//! }; -//! -//! // serialization -//! let data = IntOrString { -//! inner: Either::Left(vec!["Hello".to_string()]) -//! }; -//! // notice: no tags are emitted. -//! assert_eq!(serde_json::to_string(&data)?, r#"["Hello"]"#); -//! -//! // deserialization -//! let data: IntOrString = serde_json::from_str( -//! r#"{"a": 0, "b": 14}"# -//! )?; -//! println!("found {:?}", data); -//! # Ok(()) -//! # } -//! ``` - -use serde::{Deserialize, Deserializer, Serialize, Serializer}; - -#[derive(serde::Serialize, serde::Deserialize)] -#[serde(untagged)] -enum Either { - Left(L), - Right(R), -} - -pub fn serialize(this: &super::Either, serializer: S) -> Result -where - S: Serializer, - L: Serialize, - R: Serialize, -{ - let untagged = match this { - super::Either::Left(left) => Either::Left(left), - super::Either::Right(right) => Either::Right(right), - }; - untagged.serialize(serializer) -} - -pub fn deserialize<'de, L, R, D>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - L: Deserialize<'de>, - R: Deserialize<'de>, -{ - match Either::deserialize(deserializer) { - Ok(Either::Left(left)) => Ok(super::Either::Left(left)), - Ok(Either::Right(right)) => Ok(super::Either::Right(right)), - Err(error) => Err(error), - } -} diff --git a/vendor/either/src/serde_untagged_optional.rs b/vendor/either/src/serde_untagged_optional.rs deleted file mode 100644 index fb3239ace1d5e4..00000000000000 --- a/vendor/either/src/serde_untagged_optional.rs +++ /dev/null @@ -1,74 +0,0 @@ -//! Untagged serialization/deserialization support for Option>. -//! -//! `Either` uses default, externally-tagged representation. -//! However, sometimes it is useful to support several alternative types. -//! For example, we may have a field which is generally Map -//! but in typical cases Vec would suffice, too. -//! -//! ```rust -//! # fn main() -> Result<(), Box> { -//! use either::Either; -//! use std::collections::HashMap; -//! -//! #[derive(serde::Serialize, serde::Deserialize, Debug)] -//! #[serde(transparent)] -//! struct IntOrString { -//! #[serde(with = "either::serde_untagged_optional")] -//! inner: Option, HashMap>> -//! }; -//! -//! // serialization -//! let data = IntOrString { -//! inner: Some(Either::Left(vec!["Hello".to_string()])) -//! }; -//! // notice: no tags are emitted. -//! assert_eq!(serde_json::to_string(&data)?, r#"["Hello"]"#); -//! -//! // deserialization -//! let data: IntOrString = serde_json::from_str( -//! r#"{"a": 0, "b": 14}"# -//! )?; -//! println!("found {:?}", data); -//! # Ok(()) -//! # } -//! ``` - -use serde::{Deserialize, Deserializer, Serialize, Serializer}; - -#[derive(Serialize, Deserialize)] -#[serde(untagged)] -enum Either { - Left(L), - Right(R), -} - -pub fn serialize( - this: &Option>, - serializer: S, -) -> Result -where - S: Serializer, - L: Serialize, - R: Serialize, -{ - let untagged = match this { - Some(super::Either::Left(left)) => Some(Either::Left(left)), - Some(super::Either::Right(right)) => Some(Either::Right(right)), - None => None, - }; - untagged.serialize(serializer) -} - -pub fn deserialize<'de, L, R, D>(deserializer: D) -> Result>, D::Error> -where - D: Deserializer<'de>, - L: Deserialize<'de>, - R: Deserialize<'de>, -{ - match Option::deserialize(deserializer) { - Ok(Some(Either::Left(left))) => Ok(Some(super::Either::Left(left))), - Ok(Some(Either::Right(right))) => Ok(Some(super::Either::Right(right))), - Ok(None) => Ok(None), - Err(error) => Err(error), - } -} diff --git a/vendor/glob/.cargo-checksum.json b/vendor/glob/.cargo-checksum.json deleted file mode 100644 index b5a6521b19a1ae..00000000000000 --- a/vendor/glob/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"861149153b886c52f0f5b13f9401c61e7cb7581d8a1e0874be0c57983d232c9e",".github/dependabot.yml":"8e93631a765d23b8eeabcf3c5da80c850f2cab429c4e5c2c8d81f562f522bb3c",".github/workflows/publish.yml":"1bfb8b9fb856e6dfeaf481d7a440071e3dc8248f32b5c63ef03cb285d7f10b6e",".github/workflows/rust.yml":"b8738c208278b79af3e540339461065596907b9508208974c3c5b68f1a9e13b9","CHANGELOG.md":"1cf3525be59a348ffcda444cac1f16eba48b5a9177587ecd8d55af5b5a097a73","Cargo.lock":"745d71fb944e4c1ff5fe99d4cc61c12be4d602509692ca3b662d8cf1d0131c48","Cargo.toml":"1962525cc2a684e334a07ad2996eb587b5dbf678e58eb65733471570b49c0b6c","Cargo.toml.orig":"c5dde6f8a5a9bfe170a059cd67fa3cde5897a91da1b56bb036d405475cb3dadb","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"0ceda5714ffd02ecc88084521bcd258f90ce4b01eca51d0d1cb602aaf5c47288","src/lib.rs":"2d714448a69d329a6dc51da264555321d2d20e6e84842a22036e63b4509e87ec","tests/glob-std.rs":"720727be7dde4d11d581c00abc1ac48fff864aac6cfedc13858d4f13bb38ff79","triagebot.toml":"a135e10c777cd13459559bdf74fb704c1379af7c9b0f70bc49fa6f5a837daa81"},"package":"0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280"} \ No newline at end of file diff --git a/vendor/glob/.cargo_vcs_info.json b/vendor/glob/.cargo_vcs_info.json deleted file mode 100644 index 195cd9f9fe2262..00000000000000 --- a/vendor/glob/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "952da299a3a98893805133ec852ab29877e64e98" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/glob/.github/dependabot.yml b/vendor/glob/.github/dependabot.yml deleted file mode 100644 index de9707038a5771..00000000000000 --- a/vendor/glob/.github/dependabot.yml +++ /dev/null @@ -1,13 +0,0 @@ -version: 2 -updates: - - package-ecosystem: "cargo" - directory: "/" - schedule: - interval: "monthly" - open-pull-requests-limit: 10 - ignore: - - dependency-name: "tempdir" - - package-ecosystem: "github-actions" - directory: "/" - schedule: - interval: "monthly" diff --git a/vendor/glob/.github/workflows/publish.yml b/vendor/glob/.github/workflows/publish.yml deleted file mode 100644 index e715c61871fdda..00000000000000 --- a/vendor/glob/.github/workflows/publish.yml +++ /dev/null @@ -1,27 +0,0 @@ -name: Release-plz - -permissions: - pull-requests: write - contents: write - -on: - push: - branches: - - master - -jobs: - release-plz: - name: Release-plz - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Install Rust (rustup) - run: rustup update nightly --no-self-update && rustup default nightly - - name: Run release-plz - uses: MarcoIeni/release-plz-action@v0.5 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} diff --git a/vendor/glob/.github/workflows/rust.yml b/vendor/glob/.github/workflows/rust.yml deleted file mode 100644 index e16d2a9a066f1b..00000000000000 --- a/vendor/glob/.github/workflows/rust.yml +++ /dev/null @@ -1,99 +0,0 @@ -name: CI - -env: - CARGO_TERM_VERBOSE: true - RUSTDOCFLAGS: -Dwarnings - RUSTFLAGS: -Dwarnings - -on: - pull_request: - push: - branches: - - master - -jobs: - test: - name: Tests - runs-on: ${{ matrix.os }} - strategy: - matrix: - channel: - - stable - - nightly - - 1.63.0 # MSRV of test dependencies - os: - - macos-13 # x86 MacOS - - macos-15 # Arm MacOS - - windows-2025 - - ubuntu-24.04 - include: - - channel: beta - os: ubuntu-24.04 - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Update rust - run: | - rustup default ${{ matrix.channel }} - rustup update --no-self-update - - - run: cargo test --all - - clippy: - name: Clippy - runs-on: ubuntu-24.04 - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Update rust - run: | - # use beta since it gives us near-latest fixes but isn't as volatile as nightly - rustup default beta - rustup component add clippy - rustup update --no-self-update - - run: cargo clippy --all -- -Aclippy::while_let_loop - - msrv: - name: Check building with the MSRV - runs-on: ubuntu-24.04 - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Update rust - run: | - rustup default 1.63.0 - rustup update --no-self-update - - - run: cargo build - - rustfmt: - name: Rustfmt - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@master - - name: Install Rust - run: | - rustup default nightly - rustup update --no-self-update - rustup component add rustfmt - - run: cargo fmt -- --check - - success: - needs: - - test - - clippy - - msrv - - rustfmt - runs-on: ubuntu-latest - # GitHub branch protection is exceedingly silly and treats "jobs skipped because a dependency - # failed" as success. So we have to do some contortions to ensure the job fails if any of its - # dependencies fails. - if: always() # make sure this is never "skipped" - steps: - # Manually check the status of all dependencies. `if: failure()` does not work. - - name: check if any dependency failed - run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}' diff --git a/vendor/glob/CHANGELOG.md b/vendor/glob/CHANGELOG.md deleted file mode 100644 index 52d7c25af7fdeb..00000000000000 --- a/vendor/glob/CHANGELOG.md +++ /dev/null @@ -1,44 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [0.3.3](https://github.com/rust-lang/glob/compare/v0.3.2...v0.3.3) - 2025-08-11 - -- Optimize memory allocations ([#147](https://github.com/rust-lang/glob/pull/147)) -- Bump the MSRV to 1.63 ([#172](https://github.com/rust-lang/glob/pull/172)) -- Fix spelling in pattern documentation ([#164](https://github.com/rust-lang/glob/pull/164)) -- Fix version numbers and some formatting ([#157](https://github.com/rust-lang/glob/pull/157)) -- Style fixes ([#137](https://github.com/rust-lang/glob/pull/137)) - -## [0.3.2](https://github.com/rust-lang/glob/compare/v0.3.1...v0.3.2) - 2024-12-28 - -## What's Changed -* Add fs::symlink_metadata to detect broken symlinks by @kyoheiu in https://github.com/rust-lang/glob/pull/105 -* Add support for windows verbatim disk paths by @nico-abram in https://github.com/rust-lang/glob/pull/112 -* Respect `require_literal_leading_dot` option in `glob_with` method for path components by @JohnTitor in https://github.com/rust-lang/glob/pull/128 -* Harden tests for symlink by @JohnTitor in https://github.com/rust-lang/glob/pull/127 -* Remove "extern crate" directions from README by @zmitchell in https://github.com/rust-lang/glob/pull/131 -* Add FIXME for tempdir by @JohnTitor in https://github.com/rust-lang/glob/pull/126 -* Cache information about file type by @Kobzol in https://github.com/rust-lang/glob/pull/135 -* Document the behaviour of ** with files by @Wilfred in https://github.com/rust-lang/glob/pull/138 -* Add dependabot by @oriontvv in https://github.com/rust-lang/glob/pull/139 -* Bump actions/checkout from 3 to 4 by @dependabot in https://github.com/rust-lang/glob/pull/140 -* Check only (no longer test) at the MSRV by @tgross35 in https://github.com/rust-lang/glob/pull/151 -* Add release-plz for automated releases by @tgross35 in https://github.com/rust-lang/glob/pull/150 - -## New Contributors -* @kyoheiu made their first contribution in https://github.com/rust-lang/glob/pull/105 -* @nico-abram made their first contribution in https://github.com/rust-lang/glob/pull/112 -* @zmitchell made their first contribution in https://github.com/rust-lang/glob/pull/131 -* @Kobzol made their first contribution in https://github.com/rust-lang/glob/pull/135 -* @Wilfred made their first contribution in https://github.com/rust-lang/glob/pull/138 -* @oriontvv made their first contribution in https://github.com/rust-lang/glob/pull/139 -* @dependabot made their first contribution in https://github.com/rust-lang/glob/pull/140 -* @tgross35 made their first contribution in https://github.com/rust-lang/glob/pull/151 - -**Full Changelog**: https://github.com/rust-lang/glob/compare/0.3.1...0.3.2 diff --git a/vendor/glob/Cargo.lock b/vendor/glob/Cargo.lock deleted file mode 100644 index d1da04baabfc16..00000000000000 --- a/vendor/glob/Cargo.lock +++ /dev/null @@ -1,107 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "doc-comment" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" - -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - -[[package]] -name = "glob" -version = "0.3.3" -dependencies = [ - "doc-comment", - "tempdir", -] - -[[package]] -name = "libc" -version = "0.2.175" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" - -[[package]] -name = "rand" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" -dependencies = [ - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "rdrand", - "winapi", -] - -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - -[[package]] -name = "tempdir" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" -dependencies = [ - "rand", - "remove_dir_all", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/vendor/glob/Cargo.toml b/vendor/glob/Cargo.toml deleted file mode 100644 index c72d5c564c7b11..00000000000000 --- a/vendor/glob/Cargo.toml +++ /dev/null @@ -1,45 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -rust-version = "1.63.0" -name = "glob" -version = "0.3.3" -authors = ["The Rust Project Developers"] -build = false -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = """ -Support for matching file paths against Unix shell style patterns. -""" -homepage = "https://github.com/rust-lang/glob" -documentation = "https://docs.rs/glob" -readme = "README.md" -categories = ["filesystem"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/glob" - -[lib] -name = "glob" -path = "src/lib.rs" - -[[test]] -name = "glob-std" -path = "tests/glob-std.rs" - -[dev-dependencies.doc-comment] -version = "0.3" - -[dev-dependencies.tempdir] -version = "0.3" diff --git a/vendor/glob/LICENSE-APACHE b/vendor/glob/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e802f..00000000000000 --- a/vendor/glob/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/glob/LICENSE-MIT b/vendor/glob/LICENSE-MIT deleted file mode 100644 index 39d4bdb5acd313..00000000000000 --- a/vendor/glob/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/glob/README.md b/vendor/glob/README.md deleted file mode 100644 index 3ad9ff8b41fff2..00000000000000 --- a/vendor/glob/README.md +++ /dev/null @@ -1,38 +0,0 @@ -glob -==== - -Support for matching file paths against Unix shell style patterns. - -[![Continuous integration](https://github.com/rust-lang/glob/actions/workflows/rust.yml/badge.svg)](https://github.com/rust-lang/glob/actions/workflows/rust.yml) - -[Documentation](https://docs.rs/glob) - -## Usage - -To use `glob`, add this to your `Cargo.toml`: - -```toml -[dependencies] -glob = "0.3.2" -``` - -If you're using Rust 1.30 or earlier, or edition 2015, add this to your crate root: - -```rust -extern crate glob; -``` - -## Examples - -Print all jpg files in /media/ and all of its subdirectories. - -```rust -use glob::glob; - -for entry in glob("/media/**/*.jpg").expect("Failed to read glob pattern") { - match entry { - Ok(path) => println!("{:?}", path.display()), - Err(e) => println!("{:?}", e), - } -} -``` diff --git a/vendor/glob/src/lib.rs b/vendor/glob/src/lib.rs deleted file mode 100644 index 133a17a3343811..00000000000000 --- a/vendor/glob/src/lib.rs +++ /dev/null @@ -1,1511 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Support for matching file paths against Unix shell style patterns. -//! -//! The `glob` and `glob_with` functions allow querying the filesystem for all -//! files that match a particular pattern (similar to the libc `glob` function). -//! The methods on the `Pattern` type provide functionality for checking if -//! individual paths match a particular pattern (similar to the libc `fnmatch` -//! function). -//! -//! For consistency across platforms, and for Windows support, this module -//! is implemented entirely in Rust rather than deferring to the libc -//! `glob`/`fnmatch` functions. -//! -//! # Examples -//! -//! To print all jpg files in `/media/` and all of its subdirectories. -//! -//! ```rust,no_run -//! use glob::glob; -//! -//! for entry in glob("/media/**/*.jpg").expect("Failed to read glob pattern") { -//! match entry { -//! Ok(path) => println!("{:?}", path.display()), -//! Err(e) => println!("{:?}", e), -//! } -//! } -//! ``` -//! -//! To print all files containing the letter "a", case insensitive, in a `local` -//! directory relative to the current working directory. This ignores errors -//! instead of printing them. -//! -//! ```rust,no_run -//! use glob::glob_with; -//! use glob::MatchOptions; -//! -//! let options = MatchOptions { -//! case_sensitive: false, -//! require_literal_separator: false, -//! require_literal_leading_dot: false, -//! }; -//! for entry in glob_with("local/*a*", options).unwrap() { -//! if let Ok(path) = entry { -//! println!("{:?}", path.display()) -//! } -//! } -//! ``` - -#![doc( - html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://www.rust-lang.org/favicon.ico", - html_root_url = "https://docs.rs/glob/0.3.1" -)] -#![deny(missing_docs)] -#![allow(clippy::while_let_loop)] - -#[cfg(test)] -#[macro_use] -extern crate doc_comment; - -#[cfg(test)] -doctest!("../README.md"); - -use std::cmp; -use std::cmp::Ordering; -use std::error::Error; -use std::fmt; -use std::fs; -use std::fs::DirEntry; -use std::io; -use std::ops::Deref; -use std::path::{self, Component, Path, PathBuf}; -use std::str::FromStr; - -use CharSpecifier::{CharRange, SingleChar}; -use MatchResult::{EntirePatternDoesntMatch, Match, SubPatternDoesntMatch}; -use PatternToken::AnyExcept; -use PatternToken::{AnyChar, AnyRecursiveSequence, AnySequence, AnyWithin, Char}; - -/// An iterator that yields `Path`s from the filesystem that match a particular -/// pattern. -/// -/// Note that it yields `GlobResult` in order to report any `IoErrors` that may -/// arise during iteration. If a directory matches but is unreadable, -/// thereby preventing its contents from being checked for matches, a -/// `GlobError` is returned to express this. -/// -/// See the `glob` function for more details. -#[derive(Debug)] -pub struct Paths { - dir_patterns: Vec, - require_dir: bool, - options: MatchOptions, - todo: Vec>, - scope: Option, -} - -/// Return an iterator that produces all the `Path`s that match the given -/// pattern using default match options, which may be absolute or relative to -/// the current working directory. -/// -/// This may return an error if the pattern is invalid. -/// -/// This method uses the default match options and is equivalent to calling -/// `glob_with(pattern, MatchOptions::new())`. Use `glob_with` directly if you -/// want to use non-default match options. -/// -/// When iterating, each result is a `GlobResult` which expresses the -/// possibility that there was an `IoError` when attempting to read the contents -/// of the matched path. In other words, each item returned by the iterator -/// will either be an `Ok(Path)` if the path matched, or an `Err(GlobError)` if -/// the path (partially) matched _but_ its contents could not be read in order -/// to determine if its contents matched. -/// -/// See the `Paths` documentation for more information. -/// -/// # Examples -/// -/// Consider a directory `/media/pictures` containing only the files -/// `kittens.jpg`, `puppies.jpg` and `hamsters.gif`: -/// -/// ```rust,no_run -/// use glob::glob; -/// -/// for entry in glob("/media/pictures/*.jpg").unwrap() { -/// match entry { -/// Ok(path) => println!("{:?}", path.display()), -/// -/// // if the path matched but was unreadable, -/// // thereby preventing its contents from matching -/// Err(e) => println!("{:?}", e), -/// } -/// } -/// ``` -/// -/// The above code will print: -/// -/// ```ignore -/// /media/pictures/kittens.jpg -/// /media/pictures/puppies.jpg -/// ``` -/// -/// If you want to ignore unreadable paths, you can use something like -/// `filter_map`: -/// -/// ```rust -/// use glob::glob; -/// use std::result::Result; -/// -/// for path in glob("/media/pictures/*.jpg").unwrap().filter_map(Result::ok) { -/// println!("{}", path.display()); -/// } -/// ``` -/// Paths are yielded in alphabetical order. -pub fn glob(pattern: &str) -> Result { - glob_with(pattern, MatchOptions::new()) -} - -/// Return an iterator that produces all the `Path`s that match the given -/// pattern using the specified match options, which may be absolute or relative -/// to the current working directory. -/// -/// This may return an error if the pattern is invalid. -/// -/// This function accepts Unix shell style patterns as described by -/// `Pattern::new(..)`. The options given are passed through unchanged to -/// `Pattern::matches_with(..)` with the exception that -/// `require_literal_separator` is always set to `true` regardless of the value -/// passed to this function. -/// -/// Paths are yielded in alphabetical order. -pub fn glob_with(pattern: &str, options: MatchOptions) -> Result { - #[cfg(windows)] - fn check_windows_verbatim(p: &Path) -> bool { - match p.components().next() { - Some(Component::Prefix(ref p)) => { - // Allow VerbatimDisk paths. std canonicalize() generates them, and they work fine - p.kind().is_verbatim() - && if let std::path::Prefix::VerbatimDisk(_) = p.kind() { - false - } else { - true - } - } - _ => false, - } - } - #[cfg(not(windows))] - fn check_windows_verbatim(_: &Path) -> bool { - false - } - - #[cfg(windows)] - fn to_scope(p: &Path) -> PathBuf { - // FIXME handle volume relative paths here - p.to_path_buf() - } - #[cfg(not(windows))] - fn to_scope(p: &Path) -> PathBuf { - p.to_path_buf() - } - - // make sure that the pattern is valid first, else early return with error - let _ = Pattern::new(pattern)?; - - let mut components = Path::new(pattern).components().peekable(); - loop { - match components.peek() { - Some(&Component::Prefix(..)) | Some(&Component::RootDir) => { - components.next(); - } - _ => break, - } - } - let rest = components.map(|s| s.as_os_str()).collect::(); - let normalized_pattern = Path::new(pattern).iter().collect::(); - let root_len = normalized_pattern.to_str().unwrap().len() - rest.to_str().unwrap().len(); - let root = if root_len > 0 { - Some(Path::new(&pattern[..root_len])) - } else { - None - }; - - if root_len > 0 && check_windows_verbatim(root.unwrap()) { - // FIXME: How do we want to handle verbatim paths? I'm inclined to - // return nothing, since we can't very well find all UNC shares with a - // 1-letter server name. - return Ok(Paths { - dir_patterns: Vec::new(), - require_dir: false, - options, - todo: Vec::new(), - scope: None, - }); - } - - let scope = root.map_or_else(|| PathBuf::from("."), to_scope); - let scope = PathWrapper::from_path(scope); - - let mut dir_patterns = Vec::new(); - let components = - pattern[cmp::min(root_len, pattern.len())..].split_terminator(path::is_separator); - - for component in components { - dir_patterns.push(Pattern::new(component)?); - } - - if root_len == pattern.len() { - dir_patterns.push(Pattern { - original: "".to_string(), - tokens: Vec::new(), - is_recursive: false, - has_metachars: false, - }); - } - - let last_is_separator = pattern.chars().next_back().map(path::is_separator); - let require_dir = last_is_separator == Some(true); - let todo = Vec::new(); - - Ok(Paths { - dir_patterns, - require_dir, - options, - todo, - scope: Some(scope), - }) -} - -/// A glob iteration error. -/// -/// This is typically returned when a particular path cannot be read -/// to determine if its contents match the glob pattern. This is possible -/// if the program lacks the appropriate permissions, for example. -#[derive(Debug)] -pub struct GlobError { - path: PathBuf, - error: io::Error, -} - -impl GlobError { - /// The Path that the error corresponds to. - pub fn path(&self) -> &Path { - &self.path - } - - /// The error in question. - pub fn error(&self) -> &io::Error { - &self.error - } - - /// Consumes self, returning the _raw_ underlying `io::Error` - pub fn into_error(self) -> io::Error { - self.error - } -} - -impl Error for GlobError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.error.description() - } - - #[allow(unknown_lints, bare_trait_objects)] - fn cause(&self) -> Option<&Error> { - Some(&self.error) - } -} - -impl fmt::Display for GlobError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "attempting to read `{}` resulted in an error: {}", - self.path.display(), - self.error - ) - } -} - -#[derive(Debug)] -struct PathWrapper { - path: PathBuf, - is_directory: bool, -} - -impl PathWrapper { - fn from_dir_entry(path: PathBuf, e: DirEntry) -> Self { - let is_directory = e - .file_type() - .ok() - .and_then(|file_type| { - // We need to use fs::metadata to resolve the actual path - // if it's a symlink. - if file_type.is_symlink() { - None - } else { - Some(file_type.is_dir()) - } - }) - .or_else(|| fs::metadata(&path).map(|m| m.is_dir()).ok()) - .unwrap_or(false); - Self { path, is_directory } - } - fn from_path(path: PathBuf) -> Self { - let is_directory = fs::metadata(&path).map(|m| m.is_dir()).unwrap_or(false); - Self { path, is_directory } - } - - fn into_path(self) -> PathBuf { - self.path - } -} - -impl Deref for PathWrapper { - type Target = Path; - - fn deref(&self) -> &Self::Target { - self.path.deref() - } -} - -impl AsRef for PathWrapper { - fn as_ref(&self) -> &Path { - self.path.as_ref() - } -} - -/// An alias for a glob iteration result. -/// -/// This represents either a matched path or a glob iteration error, -/// such as failing to read a particular directory's contents. -pub type GlobResult = Result; - -impl Iterator for Paths { - type Item = GlobResult; - - fn next(&mut self) -> Option { - // the todo buffer hasn't been initialized yet, so it's done at this - // point rather than in glob() so that the errors are unified that is, - // failing to fill the buffer is an iteration error construction of the - // iterator (i.e. glob()) only fails if it fails to compile the Pattern - if let Some(scope) = self.scope.take() { - if !self.dir_patterns.is_empty() { - // Shouldn't happen, but we're using -1 as a special index. - assert!(self.dir_patterns.len() < usize::MAX); - - fill_todo(&mut self.todo, &self.dir_patterns, 0, &scope, self.options); - } - } - - loop { - if self.dir_patterns.is_empty() || self.todo.is_empty() { - return None; - } - - let (path, mut idx) = match self.todo.pop().unwrap() { - Ok(pair) => pair, - Err(e) => return Some(Err(e)), - }; - - // idx -1: was already checked by fill_todo, maybe path was '.' or - // '..' that we can't match here because of normalization. - if idx == usize::MAX { - if self.require_dir && !path.is_directory { - continue; - } - return Some(Ok(path.into_path())); - } - - if self.dir_patterns[idx].is_recursive { - let mut next = idx; - - // collapse consecutive recursive patterns - while (next + 1) < self.dir_patterns.len() - && self.dir_patterns[next + 1].is_recursive - { - next += 1; - } - - if path.is_directory { - // the path is a directory, so it's a match - - // push this directory's contents - fill_todo( - &mut self.todo, - &self.dir_patterns, - next, - &path, - self.options, - ); - - if next == self.dir_patterns.len() - 1 { - // pattern ends in recursive pattern, so return this - // directory as a result - return Some(Ok(path.into_path())); - } else { - // advanced to the next pattern for this path - idx = next + 1; - } - } else if next == self.dir_patterns.len() - 1 { - // not a directory and it's the last pattern, meaning no - // match - continue; - } else { - // advanced to the next pattern for this path - idx = next + 1; - } - } - - // not recursive, so match normally - if self.dir_patterns[idx].matches_with( - { - match path.file_name().and_then(|s| s.to_str()) { - // FIXME (#9639): How do we handle non-utf8 filenames? - // Ignore them for now; ideally we'd still match them - // against a * - None => continue, - Some(x) => x, - } - }, - self.options, - ) { - if idx == self.dir_patterns.len() - 1 { - // it is not possible for a pattern to match a directory - // *AND* its children so we don't need to check the - // children - - if !self.require_dir || path.is_directory { - return Some(Ok(path.into_path())); - } - } else { - fill_todo( - &mut self.todo, - &self.dir_patterns, - idx + 1, - &path, - self.options, - ); - } - } - } - } -} - -/// A pattern parsing error. -#[derive(Debug)] -#[allow(missing_copy_implementations)] -pub struct PatternError { - /// The approximate character index of where the error occurred. - pub pos: usize, - - /// A message describing the error. - pub msg: &'static str, -} - -impl Error for PatternError { - fn description(&self) -> &str { - self.msg - } -} - -impl fmt::Display for PatternError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "Pattern syntax error near position {}: {}", - self.pos, self.msg - ) - } -} - -/// A compiled Unix shell style pattern. -/// -/// - `?` matches any single character. -/// -/// - `*` matches any (possibly empty) sequence of characters. -/// -/// - `**` matches the current directory and arbitrary -/// subdirectories. To match files in arbitrary subdirectories, use -/// `**/*`. -/// -/// This sequence **must** form a single path component, so both -/// `**a` and `b**` are invalid and will result in an error. A -/// sequence of more than two consecutive `*` characters is also -/// invalid. -/// -/// - `[...]` matches any character inside the brackets. Character sequences -/// can also specify ranges of characters, as ordered by Unicode, so e.g. -/// `[0-9]` specifies any character between 0 and 9 inclusive. An unclosed -/// bracket is invalid. -/// -/// - `[!...]` is the negation of `[...]`, i.e. it matches any characters -/// **not** in the brackets. -/// -/// - The metacharacters `?`, `*`, `[`, `]` can be matched by using brackets -/// (e.g. `[?]`). When a `]` occurs immediately following `[` or `[!` then it -/// is interpreted as being part of, rather then ending, the character set, so -/// `]` and NOT `]` can be matched by `[]]` and `[!]]` respectively. The `-` -/// character can be specified inside a character sequence pattern by placing -/// it at the start or the end, e.g. `[abc-]`. -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Debug)] -pub struct Pattern { - original: String, - tokens: Vec, - is_recursive: bool, - /// A bool value that indicates whether the pattern contains any metacharacters. - /// We use this information for some fast path optimizations. - has_metachars: bool, -} - -/// Show the original glob pattern. -impl fmt::Display for Pattern { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.original.fmt(f) - } -} - -impl FromStr for Pattern { - type Err = PatternError; - - fn from_str(s: &str) -> Result { - Self::new(s) - } -} - -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -enum PatternToken { - Char(char), - AnyChar, - AnySequence, - AnyRecursiveSequence, - AnyWithin(Vec), - AnyExcept(Vec), -} - -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -enum CharSpecifier { - SingleChar(char), - CharRange(char, char), -} - -#[derive(Copy, Clone, PartialEq)] -enum MatchResult { - Match, - SubPatternDoesntMatch, - EntirePatternDoesntMatch, -} - -const ERROR_WILDCARDS: &str = "wildcards are either regular `*` or recursive `**`"; -const ERROR_RECURSIVE_WILDCARDS: &str = "recursive wildcards must form a single path \ - component"; -const ERROR_INVALID_RANGE: &str = "invalid range pattern"; - -impl Pattern { - /// This function compiles Unix shell style patterns. - /// - /// An invalid glob pattern will yield a `PatternError`. - pub fn new(pattern: &str) -> Result { - let chars = pattern.chars().collect::>(); - let mut tokens = Vec::new(); - let mut is_recursive = false; - let mut has_metachars = false; - let mut i = 0; - - while i < chars.len() { - match chars[i] { - '?' => { - has_metachars = true; - tokens.push(AnyChar); - i += 1; - } - '*' => { - has_metachars = true; - - let old = i; - - while i < chars.len() && chars[i] == '*' { - i += 1; - } - - let count = i - old; - - match count.cmp(&2) { - Ordering::Greater => { - return Err(PatternError { - pos: old + 2, - msg: ERROR_WILDCARDS, - }) - } - Ordering::Equal => { - // ** can only be an entire path component - // i.e. a/**/b is valid, but a**/b or a/**b is not - // invalid matches are treated literally - let is_valid = if i == 2 || path::is_separator(chars[i - count - 1]) { - // it ends in a '/' - if i < chars.len() && path::is_separator(chars[i]) { - i += 1; - true - // or the pattern ends here - // this enables the existing globbing mechanism - } else if i == chars.len() { - true - // `**` ends in non-separator - } else { - return Err(PatternError { - pos: i, - msg: ERROR_RECURSIVE_WILDCARDS, - }); - } - // `**` begins with non-separator - } else { - return Err(PatternError { - pos: old - 1, - msg: ERROR_RECURSIVE_WILDCARDS, - }); - }; - - if is_valid { - // collapse consecutive AnyRecursiveSequence to a - // single one - - let tokens_len = tokens.len(); - - if !(tokens_len > 1 - && tokens[tokens_len - 1] == AnyRecursiveSequence) - { - is_recursive = true; - tokens.push(AnyRecursiveSequence); - } - } - } - Ordering::Less => tokens.push(AnySequence), - } - } - '[' => { - has_metachars = true; - - if i + 4 <= chars.len() && chars[i + 1] == '!' { - match chars[i + 3..].iter().position(|x| *x == ']') { - None => (), - Some(j) => { - let chars = &chars[i + 2..i + 3 + j]; - let cs = parse_char_specifiers(chars); - tokens.push(AnyExcept(cs)); - i += j + 4; - continue; - } - } - } else if i + 3 <= chars.len() && chars[i + 1] != '!' { - match chars[i + 2..].iter().position(|x| *x == ']') { - None => (), - Some(j) => { - let cs = parse_char_specifiers(&chars[i + 1..i + 2 + j]); - tokens.push(AnyWithin(cs)); - i += j + 3; - continue; - } - } - } - - // if we get here then this is not a valid range pattern - return Err(PatternError { - pos: i, - msg: ERROR_INVALID_RANGE, - }); - } - c => { - tokens.push(Char(c)); - i += 1; - } - } - } - - Ok(Self { - tokens, - original: pattern.to_string(), - is_recursive, - has_metachars, - }) - } - - /// Escape metacharacters within the given string by surrounding them in - /// brackets. The resulting string will, when compiled into a `Pattern`, - /// match the input string and nothing else. - pub fn escape(s: &str) -> String { - let mut escaped = String::new(); - for c in s.chars() { - match c { - // note that ! does not need escaping because it is only special - // inside brackets - '?' | '*' | '[' | ']' => { - escaped.push('['); - escaped.push(c); - escaped.push(']'); - } - c => { - escaped.push(c); - } - } - } - escaped - } - - /// Return if the given `str` matches this `Pattern` using the default - /// match options (i.e. `MatchOptions::new()`). - /// - /// # Examples - /// - /// ```rust - /// use glob::Pattern; - /// - /// assert!(Pattern::new("c?t").unwrap().matches("cat")); - /// assert!(Pattern::new("k[!e]tteh").unwrap().matches("kitteh")); - /// assert!(Pattern::new("d*g").unwrap().matches("doog")); - /// ``` - pub fn matches(&self, str: &str) -> bool { - self.matches_with(str, MatchOptions::new()) - } - - /// Return if the given `Path`, when converted to a `str`, matches this - /// `Pattern` using the default match options (i.e. `MatchOptions::new()`). - pub fn matches_path(&self, path: &Path) -> bool { - // FIXME (#9639): This needs to handle non-utf8 paths - path.to_str().map_or(false, |s| self.matches(s)) - } - - /// Return if the given `str` matches this `Pattern` using the specified - /// match options. - pub fn matches_with(&self, str: &str, options: MatchOptions) -> bool { - self.matches_from(true, str.chars(), 0, options) == Match - } - - /// Return if the given `Path`, when converted to a `str`, matches this - /// `Pattern` using the specified match options. - pub fn matches_path_with(&self, path: &Path, options: MatchOptions) -> bool { - // FIXME (#9639): This needs to handle non-utf8 paths - path.to_str() - .map_or(false, |s| self.matches_with(s, options)) - } - - /// Access the original glob pattern. - pub fn as_str(&self) -> &str { - &self.original - } - - fn matches_from( - &self, - mut follows_separator: bool, - mut file: std::str::Chars, - i: usize, - options: MatchOptions, - ) -> MatchResult { - for (ti, token) in self.tokens[i..].iter().enumerate() { - match *token { - AnySequence | AnyRecursiveSequence => { - // ** must be at the start. - debug_assert!(match *token { - AnyRecursiveSequence => follows_separator, - _ => true, - }); - - // Empty match - match self.matches_from(follows_separator, file.clone(), i + ti + 1, options) { - SubPatternDoesntMatch => (), // keep trying - m => return m, - }; - - while let Some(c) = file.next() { - if follows_separator && options.require_literal_leading_dot && c == '.' { - return SubPatternDoesntMatch; - } - follows_separator = path::is_separator(c); - match *token { - AnyRecursiveSequence if !follows_separator => continue, - AnySequence - if options.require_literal_separator && follows_separator => - { - return SubPatternDoesntMatch - } - _ => (), - } - match self.matches_from( - follows_separator, - file.clone(), - i + ti + 1, - options, - ) { - SubPatternDoesntMatch => (), // keep trying - m => return m, - } - } - } - _ => { - let c = match file.next() { - Some(c) => c, - None => return EntirePatternDoesntMatch, - }; - - let is_sep = path::is_separator(c); - - if !match *token { - AnyChar | AnyWithin(..) | AnyExcept(..) - if (options.require_literal_separator && is_sep) - || (follows_separator - && options.require_literal_leading_dot - && c == '.') => - { - false - } - AnyChar => true, - AnyWithin(ref specifiers) => in_char_specifiers(specifiers, c, options), - AnyExcept(ref specifiers) => !in_char_specifiers(specifiers, c, options), - Char(c2) => chars_eq(c, c2, options.case_sensitive), - AnySequence | AnyRecursiveSequence => unreachable!(), - } { - return SubPatternDoesntMatch; - } - follows_separator = is_sep; - } - } - } - - // Iter is fused. - if file.next().is_none() { - Match - } else { - SubPatternDoesntMatch - } - } -} - -// Fills `todo` with paths under `path` to be matched by `patterns[idx]`, -// special-casing patterns to match `.` and `..`, and avoiding `readdir()` -// calls when there are no metacharacters in the pattern. -fn fill_todo( - todo: &mut Vec>, - patterns: &[Pattern], - idx: usize, - path: &PathWrapper, - options: MatchOptions, -) { - let add = |todo: &mut Vec<_>, next_path: PathWrapper| { - if idx + 1 == patterns.len() { - // We know it's good, so don't make the iterator match this path - // against the pattern again. In particular, it can't match - // . or .. globs since these never show up as path components. - todo.push(Ok((next_path, usize::MAX))); - } else { - fill_todo(todo, patterns, idx + 1, &next_path, options); - } - }; - - let pattern = &patterns[idx]; - let is_dir = path.is_directory; - let curdir = path.as_ref() == Path::new("."); - match (pattern.has_metachars, is_dir) { - (false, _) => { - debug_assert!( - pattern - .tokens - .iter() - .all(|tok| matches!(tok, PatternToken::Char(_))), - "broken invariant: pattern has metachars but shouldn't" - ); - let s = pattern.as_str(); - - // This pattern component doesn't have any metacharacters, so we - // don't need to read the current directory to know where to - // continue. So instead of passing control back to the iterator, - // we can just check for that one entry and potentially recurse - // right away. - let special = "." == s || ".." == s; - let next_path = if curdir { - PathBuf::from(s) - } else { - path.join(s) - }; - let next_path = PathWrapper::from_path(next_path); - if (special && is_dir) - || (!special - && (fs::metadata(&next_path).is_ok() - || fs::symlink_metadata(&next_path).is_ok())) - { - add(todo, next_path); - } - } - (true, true) => { - let dirs = fs::read_dir(path).and_then(|d| { - d.map(|e| { - e.map(|e| { - let path = if curdir { - PathBuf::from(e.path().file_name().unwrap()) - } else { - e.path() - }; - PathWrapper::from_dir_entry(path, e) - }) - }) - .collect::, _>>() - }); - match dirs { - Ok(mut children) => { - if options.require_literal_leading_dot { - children - .retain(|x| !x.file_name().unwrap().to_str().unwrap().starts_with('.')); - } - children.sort_by(|p1, p2| p2.file_name().cmp(&p1.file_name())); - todo.extend(children.into_iter().map(|x| Ok((x, idx)))); - - // Matching the special directory entries . and .. that - // refer to the current and parent directory respectively - // requires that the pattern has a leading dot, even if the - // `MatchOptions` field `require_literal_leading_dot` is not - // set. - if !pattern.tokens.is_empty() && pattern.tokens[0] == Char('.') { - for &special in &[".", ".."] { - if pattern.matches_with(special, options) { - add(todo, PathWrapper::from_path(path.join(special))); - } - } - } - } - Err(e) => { - todo.push(Err(GlobError { - path: path.to_path_buf(), - error: e, - })); - } - } - } - (true, false) => { - // not a directory, nothing more to find - } - } -} - -fn parse_char_specifiers(s: &[char]) -> Vec { - let mut cs = Vec::new(); - let mut i = 0; - while i < s.len() { - if i + 3 <= s.len() && s[i + 1] == '-' { - cs.push(CharRange(s[i], s[i + 2])); - i += 3; - } else { - cs.push(SingleChar(s[i])); - i += 1; - } - } - cs -} - -fn in_char_specifiers(specifiers: &[CharSpecifier], c: char, options: MatchOptions) -> bool { - for &specifier in specifiers.iter() { - match specifier { - SingleChar(sc) => { - if chars_eq(c, sc, options.case_sensitive) { - return true; - } - } - CharRange(start, end) => { - // FIXME: work with non-ascii chars properly (issue #1347) - if !options.case_sensitive && c.is_ascii() && start.is_ascii() && end.is_ascii() { - let start = start.to_ascii_lowercase(); - let end = end.to_ascii_lowercase(); - - let start_up = start.to_uppercase().next().unwrap(); - let end_up = end.to_uppercase().next().unwrap(); - - // only allow case insensitive matching when - // both start and end are within a-z or A-Z - if start != start_up && end != end_up { - let c = c.to_ascii_lowercase(); - if c >= start && c <= end { - return true; - } - } - } - - if c >= start && c <= end { - return true; - } - } - } - } - - false -} - -/// A helper function to determine if two chars are (possibly case-insensitively) equal. -fn chars_eq(a: char, b: char, case_sensitive: bool) -> bool { - if cfg!(windows) && path::is_separator(a) && path::is_separator(b) { - true - } else if !case_sensitive && a.is_ascii() && b.is_ascii() { - // FIXME: work with non-ascii chars properly (issue #9084) - a.eq_ignore_ascii_case(&b) - } else { - a == b - } -} - -/// Configuration options to modify the behaviour of `Pattern::matches_with(..)`. -#[allow(missing_copy_implementations)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] -pub struct MatchOptions { - /// Whether or not patterns should be matched in a case-sensitive manner. - /// This currently only considers upper/lower case relationships between - /// ASCII characters, but in future this might be extended to work with - /// Unicode. - pub case_sensitive: bool, - - /// Whether or not path-component separator characters (e.g. `/` on - /// Posix) must be matched by a literal `/`, rather than by `*` or `?` or - /// `[...]`. - pub require_literal_separator: bool, - - /// Whether or not paths that contain components that start with a `.` - /// will require that `.` appears literally in the pattern; `*`, `?`, `**`, - /// or `[...]` will not match. This is useful because such files are - /// conventionally considered hidden on Unix systems and it might be - /// desirable to skip them when listing files. - pub require_literal_leading_dot: bool, -} - -impl MatchOptions { - /// Constructs a new `MatchOptions` with default field values. This is used - /// when calling functions that do not take an explicit `MatchOptions` - /// parameter. - /// - /// This function always returns this value: - /// - /// ```rust,ignore - /// MatchOptions { - /// case_sensitive: true, - /// require_literal_separator: false, - /// require_literal_leading_dot: false - /// } - /// ``` - /// - /// # Note - /// The behavior of this method doesn't match `default()`'s. This returns - /// `case_sensitive` as `true` while `default()` does it as `false`. - // FIXME: Consider unity the behavior with `default()` in a next major release. - pub fn new() -> Self { - Self { - case_sensitive: true, - require_literal_separator: false, - require_literal_leading_dot: false, - } - } -} - -#[cfg(test)] -mod test { - use super::{glob, MatchOptions, Pattern}; - use std::path::Path; - - #[test] - fn test_pattern_from_str() { - assert!("a*b".parse::().unwrap().matches("a_b")); - assert!("a/**b".parse::().unwrap_err().pos == 4); - } - - #[test] - fn test_wildcard_errors() { - assert!(Pattern::new("a/**b").unwrap_err().pos == 4); - assert!(Pattern::new("a/bc**").unwrap_err().pos == 3); - assert!(Pattern::new("a/*****").unwrap_err().pos == 4); - assert!(Pattern::new("a/b**c**d").unwrap_err().pos == 2); - assert!(Pattern::new("a**b").unwrap_err().pos == 0); - } - - #[test] - fn test_unclosed_bracket_errors() { - assert!(Pattern::new("abc[def").unwrap_err().pos == 3); - assert!(Pattern::new("abc[!def").unwrap_err().pos == 3); - assert!(Pattern::new("abc[").unwrap_err().pos == 3); - assert!(Pattern::new("abc[!").unwrap_err().pos == 3); - assert!(Pattern::new("abc[d").unwrap_err().pos == 3); - assert!(Pattern::new("abc[!d").unwrap_err().pos == 3); - assert!(Pattern::new("abc[]").unwrap_err().pos == 3); - assert!(Pattern::new("abc[!]").unwrap_err().pos == 3); - } - - #[test] - fn test_glob_errors() { - assert!(glob("a/**b").err().unwrap().pos == 4); - assert!(glob("abc[def").err().unwrap().pos == 3); - } - - // this test assumes that there is a /root directory and that - // the user running this test is not root or otherwise doesn't - // have permission to read its contents - #[cfg(all(unix, not(target_os = "macos")))] - #[test] - fn test_iteration_errors() { - use std::io; - let mut iter = glob("/root/*").unwrap(); - - // GlobErrors shouldn't halt iteration - let next = iter.next(); - assert!(next.is_some()); - - let err = next.unwrap(); - assert!(err.is_err()); - - let err = err.err().unwrap(); - assert!(err.path() == Path::new("/root")); - assert!(err.error().kind() == io::ErrorKind::PermissionDenied); - } - - #[test] - fn test_absolute_pattern() { - assert!(glob("/").unwrap().next().is_some()); - assert!(glob("//").unwrap().next().is_some()); - - // assume that the filesystem is not empty! - assert!(glob("/*").unwrap().next().is_some()); - - #[cfg(not(windows))] - fn win() {} - - #[cfg(windows)] - fn win() { - use std::env::current_dir; - use std::path::Component; - - // check windows absolute paths with host/device components - let root_with_device = current_dir() - .ok() - .and_then(|p| match p.components().next().unwrap() { - Component::Prefix(prefix_component) => { - let path = Path::new(prefix_component.as_os_str()).join("*"); - Some(path.to_path_buf()) - } - _ => panic!("no prefix in this path"), - }) - .unwrap(); - // FIXME (#9639): This needs to handle non-utf8 paths - assert!(glob(root_with_device.as_os_str().to_str().unwrap()) - .unwrap() - .next() - .is_some()); - } - win() - } - - #[test] - fn test_wildcards() { - assert!(Pattern::new("a*b").unwrap().matches("a_b")); - assert!(Pattern::new("a*b*c").unwrap().matches("abc")); - assert!(!Pattern::new("a*b*c").unwrap().matches("abcd")); - assert!(Pattern::new("a*b*c").unwrap().matches("a_b_c")); - assert!(Pattern::new("a*b*c").unwrap().matches("a___b___c")); - assert!(Pattern::new("abc*abc*abc") - .unwrap() - .matches("abcabcabcabcabcabcabc")); - assert!(!Pattern::new("abc*abc*abc") - .unwrap() - .matches("abcabcabcabcabcabcabca")); - assert!(Pattern::new("a*a*a*a*a*a*a*a*a") - .unwrap() - .matches("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")); - assert!(Pattern::new("a*b[xyz]c*d").unwrap().matches("abxcdbxcddd")); - } - - #[test] - fn test_recursive_wildcards() { - let pat = Pattern::new("some/**/needle.txt").unwrap(); - assert!(pat.matches("some/needle.txt")); - assert!(pat.matches("some/one/needle.txt")); - assert!(pat.matches("some/one/two/needle.txt")); - assert!(pat.matches("some/other/needle.txt")); - assert!(!pat.matches("some/other/notthis.txt")); - - // a single ** should be valid, for globs - // Should accept anything - let pat = Pattern::new("**").unwrap(); - assert!(pat.is_recursive); - assert!(pat.matches("abcde")); - assert!(pat.matches("")); - assert!(pat.matches(".asdf")); - assert!(pat.matches("/x/.asdf")); - - // collapse consecutive wildcards - let pat = Pattern::new("some/**/**/needle.txt").unwrap(); - assert!(pat.matches("some/needle.txt")); - assert!(pat.matches("some/one/needle.txt")); - assert!(pat.matches("some/one/two/needle.txt")); - assert!(pat.matches("some/other/needle.txt")); - assert!(!pat.matches("some/other/notthis.txt")); - - // ** can begin the pattern - let pat = Pattern::new("**/test").unwrap(); - assert!(pat.matches("one/two/test")); - assert!(pat.matches("one/test")); - assert!(pat.matches("test")); - - // /** can begin the pattern - let pat = Pattern::new("/**/test").unwrap(); - assert!(pat.matches("/one/two/test")); - assert!(pat.matches("/one/test")); - assert!(pat.matches("/test")); - assert!(!pat.matches("/one/notthis")); - assert!(!pat.matches("/notthis")); - - // Only start sub-patterns on start of path segment. - let pat = Pattern::new("**/.*").unwrap(); - assert!(pat.matches(".abc")); - assert!(pat.matches("abc/.abc")); - assert!(!pat.matches("ab.c")); - assert!(!pat.matches("abc/ab.c")); - } - - #[test] - fn test_lots_of_files() { - // this is a good test because it touches lots of differently named files - glob("/*/*/*/*").unwrap().skip(10000).next(); - } - - #[test] - fn test_range_pattern() { - let pat = Pattern::new("a[0-9]b").unwrap(); - for i in 0..10 { - assert!(pat.matches(&format!("a{}b", i))); - } - assert!(!pat.matches("a_b")); - - let pat = Pattern::new("a[!0-9]b").unwrap(); - for i in 0..10 { - assert!(!pat.matches(&format!("a{}b", i))); - } - assert!(pat.matches("a_b")); - - let pats = ["[a-z123]", "[1a-z23]", "[123a-z]"]; - for &p in pats.iter() { - let pat = Pattern::new(p).unwrap(); - for c in "abcdefghijklmnopqrstuvwxyz".chars() { - assert!(pat.matches(&c.to_string())); - } - for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ".chars() { - let options = MatchOptions { - case_sensitive: false, - ..MatchOptions::new() - }; - assert!(pat.matches_with(&c.to_string(), options)); - } - assert!(pat.matches("1")); - assert!(pat.matches("2")); - assert!(pat.matches("3")); - } - - let pats = ["[abc-]", "[-abc]", "[a-c-]"]; - for &p in pats.iter() { - let pat = Pattern::new(p).unwrap(); - assert!(pat.matches("a")); - assert!(pat.matches("b")); - assert!(pat.matches("c")); - assert!(pat.matches("-")); - assert!(!pat.matches("d")); - } - - let pat = Pattern::new("[2-1]").unwrap(); - assert!(!pat.matches("1")); - assert!(!pat.matches("2")); - - assert!(Pattern::new("[-]").unwrap().matches("-")); - assert!(!Pattern::new("[!-]").unwrap().matches("-")); - } - - #[test] - fn test_pattern_matches() { - let txt_pat = Pattern::new("*hello.txt").unwrap(); - assert!(txt_pat.matches("hello.txt")); - assert!(txt_pat.matches("gareth_says_hello.txt")); - assert!(txt_pat.matches("some/path/to/hello.txt")); - assert!(txt_pat.matches("some\\path\\to\\hello.txt")); - assert!(txt_pat.matches("/an/absolute/path/to/hello.txt")); - assert!(!txt_pat.matches("hello.txt-and-then-some")); - assert!(!txt_pat.matches("goodbye.txt")); - - let dir_pat = Pattern::new("*some/path/to/hello.txt").unwrap(); - assert!(dir_pat.matches("some/path/to/hello.txt")); - assert!(dir_pat.matches("a/bigger/some/path/to/hello.txt")); - assert!(!dir_pat.matches("some/path/to/hello.txt-and-then-some")); - assert!(!dir_pat.matches("some/other/path/to/hello.txt")); - } - - #[test] - fn test_pattern_escape() { - let s = "_[_]_?_*_!_"; - assert_eq!(Pattern::escape(s), "_[[]_[]]_[?]_[*]_!_".to_string()); - assert!(Pattern::new(&Pattern::escape(s)).unwrap().matches(s)); - } - - #[test] - fn test_pattern_matches_case_insensitive() { - let pat = Pattern::new("aBcDeFg").unwrap(); - let options = MatchOptions { - case_sensitive: false, - require_literal_separator: false, - require_literal_leading_dot: false, - }; - - assert!(pat.matches_with("aBcDeFg", options)); - assert!(pat.matches_with("abcdefg", options)); - assert!(pat.matches_with("ABCDEFG", options)); - assert!(pat.matches_with("AbCdEfG", options)); - } - - #[test] - fn test_pattern_matches_case_insensitive_range() { - let pat_within = Pattern::new("[a]").unwrap(); - let pat_except = Pattern::new("[!a]").unwrap(); - - let options_case_insensitive = MatchOptions { - case_sensitive: false, - require_literal_separator: false, - require_literal_leading_dot: false, - }; - let options_case_sensitive = MatchOptions { - case_sensitive: true, - require_literal_separator: false, - require_literal_leading_dot: false, - }; - - assert!(pat_within.matches_with("a", options_case_insensitive)); - assert!(pat_within.matches_with("A", options_case_insensitive)); - assert!(!pat_within.matches_with("A", options_case_sensitive)); - - assert!(!pat_except.matches_with("a", options_case_insensitive)); - assert!(!pat_except.matches_with("A", options_case_insensitive)); - assert!(pat_except.matches_with("A", options_case_sensitive)); - } - - #[test] - fn test_pattern_matches_require_literal_separator() { - let options_require_literal = MatchOptions { - case_sensitive: true, - require_literal_separator: true, - require_literal_leading_dot: false, - }; - let options_not_require_literal = MatchOptions { - case_sensitive: true, - require_literal_separator: false, - require_literal_leading_dot: false, - }; - - assert!(Pattern::new("abc/def") - .unwrap() - .matches_with("abc/def", options_require_literal)); - assert!(!Pattern::new("abc?def") - .unwrap() - .matches_with("abc/def", options_require_literal)); - assert!(!Pattern::new("abc*def") - .unwrap() - .matches_with("abc/def", options_require_literal)); - assert!(!Pattern::new("abc[/]def") - .unwrap() - .matches_with("abc/def", options_require_literal)); - - assert!(Pattern::new("abc/def") - .unwrap() - .matches_with("abc/def", options_not_require_literal)); - assert!(Pattern::new("abc?def") - .unwrap() - .matches_with("abc/def", options_not_require_literal)); - assert!(Pattern::new("abc*def") - .unwrap() - .matches_with("abc/def", options_not_require_literal)); - assert!(Pattern::new("abc[/]def") - .unwrap() - .matches_with("abc/def", options_not_require_literal)); - } - - #[test] - fn test_pattern_matches_require_literal_leading_dot() { - let options_require_literal_leading_dot = MatchOptions { - case_sensitive: true, - require_literal_separator: false, - require_literal_leading_dot: true, - }; - let options_not_require_literal_leading_dot = MatchOptions { - case_sensitive: true, - require_literal_separator: false, - require_literal_leading_dot: false, - }; - - let f = |options| { - Pattern::new("*.txt") - .unwrap() - .matches_with(".hello.txt", options) - }; - assert!(f(options_not_require_literal_leading_dot)); - assert!(!f(options_require_literal_leading_dot)); - - let f = |options| { - Pattern::new(".*.*") - .unwrap() - .matches_with(".hello.txt", options) - }; - assert!(f(options_not_require_literal_leading_dot)); - assert!(f(options_require_literal_leading_dot)); - - let f = |options| { - Pattern::new("aaa/bbb/*") - .unwrap() - .matches_with("aaa/bbb/.ccc", options) - }; - assert!(f(options_not_require_literal_leading_dot)); - assert!(!f(options_require_literal_leading_dot)); - - let f = |options| { - Pattern::new("aaa/bbb/*") - .unwrap() - .matches_with("aaa/bbb/c.c.c.", options) - }; - assert!(f(options_not_require_literal_leading_dot)); - assert!(f(options_require_literal_leading_dot)); - - let f = |options| { - Pattern::new("aaa/bbb/.*") - .unwrap() - .matches_with("aaa/bbb/.ccc", options) - }; - assert!(f(options_not_require_literal_leading_dot)); - assert!(f(options_require_literal_leading_dot)); - - let f = |options| { - Pattern::new("aaa/?bbb") - .unwrap() - .matches_with("aaa/.bbb", options) - }; - assert!(f(options_not_require_literal_leading_dot)); - assert!(!f(options_require_literal_leading_dot)); - - let f = |options| { - Pattern::new("aaa/[.]bbb") - .unwrap() - .matches_with("aaa/.bbb", options) - }; - assert!(f(options_not_require_literal_leading_dot)); - assert!(!f(options_require_literal_leading_dot)); - - let f = |options| Pattern::new("**/*").unwrap().matches_with(".bbb", options); - assert!(f(options_not_require_literal_leading_dot)); - assert!(!f(options_require_literal_leading_dot)); - } - - #[test] - fn test_matches_path() { - // on windows, (Path::new("a/b").as_str().unwrap() == "a\\b"), so this - // tests that / and \ are considered equivalent on windows - assert!(Pattern::new("a/b").unwrap().matches_path(Path::new("a/b"))); - } - - #[test] - fn test_path_join() { - let pattern = Path::new("one").join(Path::new("**/*.rs")); - assert!(Pattern::new(pattern.to_str().unwrap()).is_ok()); - } -} diff --git a/vendor/glob/tests/glob-std.rs b/vendor/glob/tests/glob-std.rs deleted file mode 100644 index ba12701e36f9f6..00000000000000 --- a/vendor/glob/tests/glob-std.rs +++ /dev/null @@ -1,477 +0,0 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// ignore-windows TempDir may cause IoError on windows: #10462 - -#![cfg_attr(test, deny(warnings))] - -extern crate glob; -extern crate tempdir; - -use glob::{glob, glob_with}; -use std::env; -use std::fs; -use std::path::PathBuf; -use tempdir::TempDir; - -#[test] -fn main() { - fn mk_file(path: &str, directory: bool) { - if directory { - fs::create_dir(path).unwrap(); - } else { - fs::File::create(path).unwrap(); - } - } - - fn mk_symlink_file(original: &str, link: &str) { - #[cfg(unix)] - { - use std::os::unix::fs::symlink; - symlink(original, link).unwrap(); - } - #[cfg(windows)] - { - use std::os::windows::fs::symlink_file; - symlink_file(original, link).unwrap(); - } - } - - fn mk_symlink_dir(original: &str, link: &str) { - #[cfg(unix)] - { - use std::os::unix::fs::symlink; - symlink(original, link).unwrap(); - } - #[cfg(windows)] - { - use std::os::windows::fs::symlink_dir; - symlink_dir(original, link).unwrap(); - } - } - - fn glob_vec(pattern: &str) -> Vec { - glob(pattern).unwrap().map(|r| r.unwrap()).collect() - } - - fn glob_with_vec(pattern: &str, options: glob::MatchOptions) -> Vec { - glob_with(pattern, options) - .unwrap() - .map(|r| r.unwrap()) - .collect() - } - - let root = TempDir::new("glob-tests"); - let root = root.ok().expect("Should have created a temp directory"); - assert!(env::set_current_dir(root.path()).is_ok()); - - mk_file("aaa", true); - mk_file("aaa/apple", true); - mk_file("aaa/orange", true); - mk_file("aaa/tomato", true); - mk_file("aaa/tomato/tomato.txt", false); - mk_file("aaa/tomato/tomoto.txt", false); - mk_file("bbb", true); - mk_file("bbb/specials", true); - mk_file("bbb/specials/!", false); - // a valid symlink - mk_symlink_file("aaa/apple", "aaa/green_apple"); - // a broken symlink - mk_symlink_file("aaa/setsuna", "aaa/kazusa"); - - // windows does not allow `*` or `?` characters to exist in filenames - if env::consts::FAMILY != "windows" { - mk_file("bbb/specials/*", false); - mk_file("bbb/specials/?", false); - } - - mk_file("bbb/specials/[", false); - mk_file("bbb/specials/]", false); - mk_file("ccc", true); - mk_file("xyz", true); - mk_file("xyz/x", false); - mk_file("xyz/y", false); - mk_file("xyz/z", false); - - mk_file("r", true); - mk_file("r/current_dir.md", false); - mk_file("r/one", true); - mk_file("r/one/a.md", false); - mk_file("r/one/another", true); - mk_file("r/one/another/a.md", false); - mk_file("r/one/another/deep", true); - mk_file("r/one/another/deep/spelunking.md", false); - mk_file("r/another", true); - mk_file("r/another/a.md", false); - mk_file("r/two", true); - mk_file("r/two/b.md", false); - mk_file("r/three", true); - mk_file("r/three/c.md", false); - - mk_file("dirsym", true); - mk_symlink_dir(root.path().join("r").to_str().unwrap(), "dirsym/link"); - - assert_eq!( - glob_vec("dirsym/**/*.md"), - vec!( - PathBuf::from("dirsym/link/another/a.md"), - PathBuf::from("dirsym/link/current_dir.md"), - PathBuf::from("dirsym/link/one/a.md"), - PathBuf::from("dirsym/link/one/another/a.md"), - PathBuf::from("dirsym/link/one/another/deep/spelunking.md"), - PathBuf::from("dirsym/link/three/c.md"), - PathBuf::from("dirsym/link/two/b.md") - ) - ); - - // all recursive entities - assert_eq!( - glob_vec("r/**"), - vec!( - PathBuf::from("r/another"), - PathBuf::from("r/one"), - PathBuf::from("r/one/another"), - PathBuf::from("r/one/another/deep"), - PathBuf::from("r/three"), - PathBuf::from("r/two") - ) - ); - - // std-canonicalized windows verbatim disk paths should work - if env::consts::FAMILY == "windows" { - let r_verbatim = PathBuf::from("r").canonicalize().unwrap(); - assert_eq!( - glob_vec(&format!("{}\\**", r_verbatim.display().to_string())) - .into_iter() - .map(|p| p.strip_prefix(&r_verbatim).unwrap().to_owned()) - .collect::>(), - vec!( - PathBuf::from("another"), - PathBuf::from("one"), - PathBuf::from("one\\another"), - PathBuf::from("one\\another\\deep"), - PathBuf::from("three"), - PathBuf::from("two") - ) - ); - } - - // collapse consecutive recursive patterns - assert_eq!( - glob_vec("r/**/**"), - vec!( - PathBuf::from("r/another"), - PathBuf::from("r/one"), - PathBuf::from("r/one/another"), - PathBuf::from("r/one/another/deep"), - PathBuf::from("r/three"), - PathBuf::from("r/two") - ) - ); - - assert_eq!( - glob_vec("r/**/*"), - vec!( - PathBuf::from("r/another"), - PathBuf::from("r/another/a.md"), - PathBuf::from("r/current_dir.md"), - PathBuf::from("r/one"), - PathBuf::from("r/one/a.md"), - PathBuf::from("r/one/another"), - PathBuf::from("r/one/another/a.md"), - PathBuf::from("r/one/another/deep"), - PathBuf::from("r/one/another/deep/spelunking.md"), - PathBuf::from("r/three"), - PathBuf::from("r/three/c.md"), - PathBuf::from("r/two"), - PathBuf::from("r/two/b.md") - ) - ); - - // followed by a wildcard - assert_eq!( - glob_vec("r/**/*.md"), - vec!( - PathBuf::from("r/another/a.md"), - PathBuf::from("r/current_dir.md"), - PathBuf::from("r/one/a.md"), - PathBuf::from("r/one/another/a.md"), - PathBuf::from("r/one/another/deep/spelunking.md"), - PathBuf::from("r/three/c.md"), - PathBuf::from("r/two/b.md") - ) - ); - - // followed by a precise pattern - assert_eq!( - glob_vec("r/one/**/a.md"), - vec!( - PathBuf::from("r/one/a.md"), - PathBuf::from("r/one/another/a.md") - ) - ); - - // followed by another recursive pattern - // collapses consecutive recursives into one - assert_eq!( - glob_vec("r/one/**/**/a.md"), - vec!( - PathBuf::from("r/one/a.md"), - PathBuf::from("r/one/another/a.md") - ) - ); - - // followed by two precise patterns - assert_eq!( - glob_vec("r/**/another/a.md"), - vec!( - PathBuf::from("r/another/a.md"), - PathBuf::from("r/one/another/a.md") - ) - ); - - assert_eq!(glob_vec(""), Vec::::new()); - assert_eq!(glob_vec("."), vec!(PathBuf::from("."))); - assert_eq!(glob_vec(".."), vec!(PathBuf::from(".."))); - - assert_eq!(glob_vec("aaa"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("aaa/"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("a"), Vec::::new()); - assert_eq!(glob_vec("aa"), Vec::::new()); - assert_eq!(glob_vec("aaaa"), Vec::::new()); - - assert_eq!(glob_vec("aaa/apple"), vec!(PathBuf::from("aaa/apple"))); - assert_eq!(glob_vec("aaa/apple/nope"), Vec::::new()); - - // windows should support both / and \ as directory separators - if env::consts::FAMILY == "windows" { - assert_eq!(glob_vec("aaa\\apple"), vec!(PathBuf::from("aaa/apple"))); - } - - assert_eq!( - glob_vec("???/"), - vec!( - PathBuf::from("aaa"), - PathBuf::from("bbb"), - PathBuf::from("ccc"), - PathBuf::from("xyz") - ) - ); - - assert_eq!( - glob_vec("aaa/tomato/tom?to.txt"), - vec!( - PathBuf::from("aaa/tomato/tomato.txt"), - PathBuf::from("aaa/tomato/tomoto.txt") - ) - ); - - assert_eq!( - glob_vec("xyz/?"), - vec!( - PathBuf::from("xyz/x"), - PathBuf::from("xyz/y"), - PathBuf::from("xyz/z") - ) - ); - - assert_eq!(glob_vec("a*"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("*a*"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("a*a"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("aaa*"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("*aaa"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("*aaa*"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("*a*a*a*"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("aaa*/"), vec!(PathBuf::from("aaa"))); - - assert_eq!( - glob_vec("aaa/*"), - vec!( - PathBuf::from("aaa/apple"), - PathBuf::from("aaa/green_apple"), - PathBuf::from("aaa/kazusa"), - PathBuf::from("aaa/orange"), - PathBuf::from("aaa/tomato"), - ) - ); - - assert_eq!( - glob_vec("aaa/*a*"), - vec!( - PathBuf::from("aaa/apple"), - PathBuf::from("aaa/green_apple"), - PathBuf::from("aaa/kazusa"), - PathBuf::from("aaa/orange"), - PathBuf::from("aaa/tomato") - ) - ); - - assert_eq!( - glob_vec("*/*/*.txt"), - vec!( - PathBuf::from("aaa/tomato/tomato.txt"), - PathBuf::from("aaa/tomato/tomoto.txt") - ) - ); - - assert_eq!( - glob_vec("*/*/t[aob]m?to[.]t[!y]t"), - vec!( - PathBuf::from("aaa/tomato/tomato.txt"), - PathBuf::from("aaa/tomato/tomoto.txt") - ) - ); - - assert_eq!(glob_vec("./aaa"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("./*"), glob_vec("*")); - assert_eq!(glob_vec("*/..").pop().unwrap(), PathBuf::from("xyz/..")); - assert_eq!(glob_vec("aaa/../bbb"), vec!(PathBuf::from("aaa/../bbb"))); - assert_eq!(glob_vec("nonexistent/../bbb"), Vec::::new()); - assert_eq!(glob_vec("aaa/tomato/tomato.txt/.."), Vec::::new()); - - assert_eq!(glob_vec("aaa/tomato/tomato.txt/"), Vec::::new()); - - // Ensure to find a broken symlink. - assert_eq!(glob_vec("aaa/kazusa"), vec!(PathBuf::from("aaa/kazusa"))); - - assert_eq!(glob_vec("aa[a]"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("aa[abc]"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("a[bca]a"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("aa[b]"), Vec::::new()); - assert_eq!(glob_vec("aa[xyz]"), Vec::::new()); - assert_eq!(glob_vec("aa[]]"), Vec::::new()); - - assert_eq!(glob_vec("aa[!b]"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("aa[!bcd]"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("a[!bcd]a"), vec!(PathBuf::from("aaa"))); - assert_eq!(glob_vec("aa[!a]"), Vec::::new()); - assert_eq!(glob_vec("aa[!abc]"), Vec::::new()); - - assert_eq!( - glob_vec("bbb/specials/[[]"), - vec!(PathBuf::from("bbb/specials/[")) - ); - assert_eq!( - glob_vec("bbb/specials/!"), - vec!(PathBuf::from("bbb/specials/!")) - ); - assert_eq!( - glob_vec("bbb/specials/[]]"), - vec!(PathBuf::from("bbb/specials/]")) - ); - - mk_file("i", true); - mk_file("i/qwe", true); - mk_file("i/qwe/.aaa", false); - mk_file("i/qwe/.bbb", true); - mk_file("i/qwe/.bbb/ccc", false); - mk_file("i/qwe/.bbb/.ddd", false); - mk_file("i/qwe/eee", false); - - let options = glob::MatchOptions { - case_sensitive: false, - require_literal_separator: true, - require_literal_leading_dot: true, - }; - assert_eq!(glob_with_vec("i/**/*a*", options), Vec::::new()); - assert_eq!(glob_with_vec("i/**/*c*", options), Vec::::new()); - assert_eq!(glob_with_vec("i/**/*d*", options), Vec::::new()); - assert_eq!( - glob_with_vec("i/**/*e*", options), - vec!(PathBuf::from("i/qwe"), PathBuf::from("i/qwe/eee")) - ); - - if env::consts::FAMILY != "windows" { - assert_eq!( - glob_vec("bbb/specials/[*]"), - vec!(PathBuf::from("bbb/specials/*")) - ); - assert_eq!( - glob_vec("bbb/specials/[?]"), - vec!(PathBuf::from("bbb/specials/?")) - ); - } - - if env::consts::FAMILY == "windows" { - assert_eq!( - glob_vec("bbb/specials/[![]"), - vec!( - PathBuf::from("bbb/specials/!"), - PathBuf::from("bbb/specials/]") - ) - ); - - assert_eq!( - glob_vec("bbb/specials/[!]]"), - vec!( - PathBuf::from("bbb/specials/!"), - PathBuf::from("bbb/specials/[") - ) - ); - - assert_eq!( - glob_vec("bbb/specials/[!!]"), - vec!( - PathBuf::from("bbb/specials/["), - PathBuf::from("bbb/specials/]") - ) - ); - } else { - assert_eq!( - glob_vec("bbb/specials/[![]"), - vec!( - PathBuf::from("bbb/specials/!"), - PathBuf::from("bbb/specials/*"), - PathBuf::from("bbb/specials/?"), - PathBuf::from("bbb/specials/]") - ) - ); - - assert_eq!( - glob_vec("bbb/specials/[!]]"), - vec!( - PathBuf::from("bbb/specials/!"), - PathBuf::from("bbb/specials/*"), - PathBuf::from("bbb/specials/?"), - PathBuf::from("bbb/specials/[") - ) - ); - - assert_eq!( - glob_vec("bbb/specials/[!!]"), - vec!( - PathBuf::from("bbb/specials/*"), - PathBuf::from("bbb/specials/?"), - PathBuf::from("bbb/specials/["), - PathBuf::from("bbb/specials/]") - ) - ); - - assert_eq!( - glob_vec("bbb/specials/[!*]"), - vec!( - PathBuf::from("bbb/specials/!"), - PathBuf::from("bbb/specials/?"), - PathBuf::from("bbb/specials/["), - PathBuf::from("bbb/specials/]") - ) - ); - - assert_eq!( - glob_vec("bbb/specials/[!?]"), - vec!( - PathBuf::from("bbb/specials/!"), - PathBuf::from("bbb/specials/*"), - PathBuf::from("bbb/specials/["), - PathBuf::from("bbb/specials/]") - ) - ); - } -} diff --git a/vendor/glob/triagebot.toml b/vendor/glob/triagebot.toml deleted file mode 100644 index fa0824ac53c0a9..00000000000000 --- a/vendor/glob/triagebot.toml +++ /dev/null @@ -1 +0,0 @@ -[assign] diff --git a/vendor/itertools/.cargo-checksum.json b/vendor/itertools/.cargo-checksum.json deleted file mode 100644 index 2fc9929fb19a8e..00000000000000 --- a/vendor/itertools/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"a3852e6977ae2992b84eaa301ca1402cafaa705d968d18b4df1e7c1324961be7",".codecov.yml":"27b445dc39fefbcb3c232623d6ce77ec15133d3fad6bde481b8d140614993b2a",".github/dependabot.yml":"7ae793ed2cfbb3d571f46e4c6ed9cfd374af472c44d38d7e9be82e91fccafcd4",".github/workflows/ci.yml":"f7335e53804a94dbfb31d3215d5035461ac3de73b932b116db2c8102c56bc396",".github/workflows/coverage.yml":"6dfc476a71ffa247ff4a79dfb2e51afc927ea644f5c781afaf5c3cd03b552537","CHANGELOG.md":"ceee4376468a3f7647f3bf4649e195a86873dd3091f23e3f992d248bd143fba2","CONTRIBUTING.md":"d5787d0fd4df15481e2e09a37234ac5dec22c007c890826991f633d890efa29e","Cargo.lock":"fd2c9ca8e299f51d7ed2a0f3760c393f03c544c817743ab7341c1f22b8c1d869","Cargo.toml":"49abb2101a0dd9cb137df206454b6620d04929a4975921fab6682ba834435620","Cargo.toml.orig":"30713cac3a7479b71408e83c0247aef8c7fd716c8fd4ab490d55c36cea0bc0e2","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7576269ea71f767b99297934c0b2367532690f8c4badc695edf8e04ab6a1e545","README.md":"fc812ab0d5756b62c2ae34f38365899204b53332d5e6a87a695b0fe15a466957","benches/bench1.rs":"d632c8b839d7b318d1cb7b81b9c62570c77dcdf0696b8ce3d52067c79c930f78","benches/combinations.rs":"5b3bd243336d6b6bdc111d66218f3f0a4ecdb10fb72e90db79959e3d8bb2cf6f","benches/combinations_with_replacement.rs":"11f29160652a2d90ce7ca4b1c339c4457888ab6867e2456ce1c62e3adf9be737","benches/fold_specialization.rs":"66ab13fd8576a662afb59ef72c5565f5c3d27f7f30a976450ee5a14958654fa2","benches/powerset.rs":"dc1fd729584147e5d8e4d19c6ca6f8706087d41c3c5beb7293d9ea43b4beab14","benches/specializations.rs":"d8320071a692147c1239881725079003be2f924f6124c3aa3bdf6a4596d66a66","benches/tree_reduce.rs":"fa4f22f042b76df89094ddf6e925ba42c4c3992f8195e719ed035f2e7cfa05bd","benches/tuple_combinations.rs":"16366158743307a0289fc1df423a3cec45009807d410a9fe9922d5b6f8b7d002","benches/tuples.rs":"5ab542aca40df4390de0ebf3819665df402d924a7dd6f4280e6ffc942bbd25c4","examples/iris.data":"596ffd580471ca4d4880f8e439c7281f3b50d8249a5960353cb200b1490f63a0","examples/iris.rs":"42c1b2fc148df52a050b013a57b577ad19911f1fe85b9525863df501979b5cd1","src/adaptors/coalesce.rs":"b57157c205ae077dd398740b61c7f49023aa80868abd8a071a6fe89ae6ecc9ad","src/adaptors/map.rs":"4952ee770cb54e98b2f649efd9c98f18951689358eb9b6bee10f139d056353ae","src/adaptors/mod.rs":"7064a1043baec815c02803d5043bd950e6a515f3a0247e44028ee080004dc225","src/adaptors/multi_product.rs":"ad501e8ae4e5089b9d2f2be1f9a4713da6a2103b14daa759e09918409f88e321","src/combinations.rs":"6c1cd55051eb59c595780b055ccabb07db72add134120dd8b2f5aa60c0f5fa6e","src/combinations_with_replacement.rs":"cad1885ca51e52a1dc324a0b06bd0d1d911f1dd58cf5d76bd9a9c78a09853b86","src/concat_impl.rs":"6094463eb57f77e115f6a3fe7f469992eef81c0c4caa9585b99a426d87f794fb","src/cons_tuples_impl.rs":"3ceee1ff0dbd4c3b43195a490b8f38b05de3a46e0fb691ba11fbbe1e7e3ad746","src/diff.rs":"046b3ac4a22036b9ec8741aba4e8f6729ae44bf14346b61c23192b88d9fc7c88","src/duplicates_impl.rs":"1be37249b4566edc8da611ed9766ec851a526e7513bd13d80fe97482dcfcf7f3","src/either_or_both.rs":"cac278666b5d3c1fd103d97d15ce4c40960ea459441aeae83c6502087fd2ad8d","src/exactly_one_err.rs":"90b6204551161d27394af72107765dbfe3b51a77f4770c2e506fa4938985a184","src/extrema_set.rs":"7e0d92ca1aafc1221e08d0297087b35373463d03228a0e65628cfd1734273e90","src/flatten_ok.rs":"62c18e5221a27949a00de49414306d6dfd601515817c1c8ae6189e3275756dd3","src/format.rs":"94675a6ac4500ec52bbf8463b2241b870fea8b5dd6b113accb8a00b2c1174871","src/free.rs":"6f3597a5ccf8a9b0606da7df6803f7368152ebcf7b7bcfd31b17fcff3a286139","src/group_map.rs":"c9da201137c6bb479b9308bfc38398b76950e39905f4ce8bc435c5318371522c","src/groupbylazy.rs":"5862629719258703aad47977ba1060f20fff15e962e18e6142758ebf6cd4a61c","src/grouping_map.rs":"8dac807a6cbf1893fdc147b4160000c452bfb5e533e1c774ed6bd3af91cf46da","src/impl_macros.rs":"97fc5f39574805e0c220aa462cf1ae7dcac5c1082d6ee5500e7d71c120db5f88","src/intersperse.rs":"55031819e985c3184275e254c9600ecbe01e9fb49f198039c5da82a87ea5b90e","src/iter_index.rs":"1b0ff8376a4ad855d44db8c662450c777db84e0f4997b53ca575c65b107bb83b","src/k_smallest.rs":"6a665742f6665e350a54ae3ff821252e7c599b57aee3239a03fa56a9d1930467","src/kmerge_impl.rs":"2e425d4189898566c5146e8f5bd258045c246f6babbe3ac5fef10ca08ae2efd2","src/lazy_buffer.rs":"a065f73c228f156bdf901824977ea9375f912823af4f9b05378e3f633d3b20e4","src/lib.rs":"75903dcd21573a8a77a205cfb8d335c60c2939771481c6431c29a0918d8dbfb0","src/merge_join.rs":"bb1fccddcc647fe21da1895a8808c06596d49900f5cf60a69a9c9141fc12af11","src/minmax.rs":"0ec34b172ca8efc4aacb96f3e5771bdc5e8ac882876ee0f59d698c3924717c48","src/multipeek_impl.rs":"79eef0be49ad66f15d41808e72c03976c4f7cff5838b69d17975d3ece266f3f8","src/pad_tail.rs":"e6bb5b086478600b0dbb8726cae8364bf83ab36d989ef467e1264eea43933b50","src/peek_nth.rs":"093f1a157b1c917f041af5244a5a46311affa2922126e36dc0ee2c501c79b58c","src/peeking_take_while.rs":"6967ba212f045145da7683a192471b2dcfcedf90d23922d70a5b7e2a1b36622e","src/permutations.rs":"b316084ee14e9e138d22f177367b3bfa24cb3e5e90ab20b9b00a9a23d653496f","src/powerset.rs":"7ab24fefc914b339dd92a6c8e639d0cad34479e09293b3346078856d6bc02d34","src/process_results_impl.rs":"a6f91aec53c56b042e15ecb8f8ca489c81e3ee92347dc9fa8352a5baac44a247","src/put_back_n_impl.rs":"5a58d7a31c03029f0726e4d42de3be869580cf76b73c6d1ef70dd40c240b03a0","src/rciter_impl.rs":"9a50cdc0106587be8ee49c2af5fcf84436b74d353c2846b401eb638c23b4733c","src/repeatn.rs":"dd9a5bf5a63ef9cc6ec5c8a6137c7ffba80f13568b6d001e189daaa29ffbaf39","src/size_hint.rs":"6022c2327ddc6df7e7b939eb60a93ee66ea9aa4d3aab49b9952e663ff4bff10b","src/sources.rs":"ef942af209ca1effcd28a95abedad8c45b659ae2a15b66c2158cb604f6e325f8","src/take_while_inclusive.rs":"1973a9f5322b3dae3b5ccded5912a08a8e2e975b9a5eac666192b118b230d305","src/tee.rs":"dad50ca162627cf0a67786f0993ef27d06cdefc14d412463e58c07824ef409d8","src/tuple_impl.rs":"0213261109e7c65746ccc22425d19141907bf7ea1e3dd4c40e9f278e6148e272","src/unique_impl.rs":"1efc280226f13ddd7dd5f7eedeec0093b704596652c942f3a0b2f8c90fa2e2f7","src/unziptuple.rs":"f3f6a2ee2658fa07db7592f2c344c2e3b1263a21fc75e1325f2be32c9dc1e750","src/with_position.rs":"9ca1eb195d04690b0c3a62a6c0eea349b8042e11c4ca4b80744f54103e1c7355","src/zip_eq_impl.rs":"4e0d38266c26982ea8b8d055994cb1298e93b7749caadbd7f25d2b6e0c8ce0d7","src/zip_longest.rs":"5572699564dd5717cc074b7733333ed238c2e9f3e6819d45e33e3a2dbda74478","src/ziptuple.rs":"d3a12221d39c8a5514574adb3ad2ccd1803d514b1cb09fbcd9253e3ddd628310","tests/adaptors_no_collect.rs":"7e6240878b1fc13b6384fdde0317d5d7ccca3e417b10a201ba61eb5255400fda","tests/flatten_ok.rs":"b7894874132918b8229c7150b2637511d8e3e14197d8eeb9382d46b2a514efa2","tests/laziness.rs":"89e6caec10da3d7aeadf9e30d5caf03cda36d07cee8415ff134b5b8e2a2cf144","tests/macros_hygiene.rs":"c9e9f0546a8c12ea52311c0eadd77d75c782d4e10ae9e74d410ea2861d526c66","tests/merge_join.rs":"5fb506b989f4a331d46cdec5775ea594656985134196099eaf8d3905bdddcdd5","tests/peeking_take_while.rs":"f834361c5520dda15eb9e9ebe87507c905462201412b21859d9f83dab91d0e0b","tests/quick.rs":"60b1ca6d820aa505545f20d6082fd08c1e0470b5326b711567ec1c93d07f9ced","tests/specializations.rs":"7c6a461850a2b4f783801ef23b2303ad985c58f2295c569001369b3c9d4c6e33","tests/test_core.rs":"482e077e0c5fe78ba0a8a126d8c0821162d820a21936855fadede713b1d4e70a","tests/test_std.rs":"f788573adc9ae19eb4bd2886c3967b273dd881982af407f6f5b6276434df0f00","tests/tuples.rs":"014e4da776174bfe923270e2a359cd9c95b372fce4b952b8138909d6e2c52762","tests/zip.rs":"2f68d531170fa2f106efafaf38ae854281d93305bf1b2b8d4bea833072518ecd"},"package":"413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"} \ No newline at end of file diff --git a/vendor/itertools/.cargo_vcs_info.json b/vendor/itertools/.cargo_vcs_info.json deleted file mode 100644 index 848cbe437ec807..00000000000000 --- a/vendor/itertools/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "d5084d15e959b85d89a49e5cd33ad6267bc541a3" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/itertools/.codecov.yml b/vendor/itertools/.codecov.yml deleted file mode 100644 index d06394ae04a138..00000000000000 --- a/vendor/itertools/.codecov.yml +++ /dev/null @@ -1,7 +0,0 @@ -coverage: - status: - project: - default: - target: auto - # Allow a tiny drop of overall project coverage in PR to reduce spurious failures. - threshold: 0.25% diff --git a/vendor/itertools/.github/dependabot.yml b/vendor/itertools/.github/dependabot.yml deleted file mode 100644 index 71607d0c3c26d5..00000000000000 --- a/vendor/itertools/.github/dependabot.yml +++ /dev/null @@ -1,6 +0,0 @@ -version: 2 -updates: -- package-ecosystem: github-actions - directory: "/" - schedule: - interval: daily diff --git a/vendor/itertools/.github/workflows/ci.yml b/vendor/itertools/.github/workflows/ci.yml deleted file mode 100644 index 239ce2405f3989..00000000000000 --- a/vendor/itertools/.github/workflows/ci.yml +++ /dev/null @@ -1,85 +0,0 @@ -name: CI - -on: - pull_request: - paths-ignore: - - "**.md" - merge_group: - paths-ignore: - - "**.md" - -jobs: - check: - runs-on: ubuntu-latest - strategy: - matrix: - features: - [ - "", - "--no-default-features", - "--no-default-features --features use_alloc", - "--all-targets --all-features", - ] - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - with: - components: clippy - - run: RUSTFLAGS="--deny warnings" cargo clippy ${{ matrix.features }} - - doc: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - - run: RUSTDOCFLAGS="-Dwarnings" cargo doc --all-features - - msrv: - runs-on: ubuntu-latest - env: - CARGO_NET_GIT_FETCH_WITH_CLI: true - steps: - - uses: actions/checkout@v4 - - uses: taiki-e/install-action@cargo-no-dev-deps - - uses: dtolnay/rust-toolchain@master - with: - # Here, it does not trigger a PR from dependabot. - toolchain: 1.43.1 - - run: cargo no-dev-deps check - - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - - run: cargo test --all-features - - check-format: - name: check format - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@master - with: - toolchain: stable - components: rustfmt - - run: cargo fmt --check - - semver-checks: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: obi1kenobi/cargo-semver-checks-action@v2.4 - with: - rust-toolchain: stable - feature-group: all-features - - # Used to signal to branch protections that all other jobs have succeeded. - all-jobs-succeed: - name: All checks succeeded - if: success() - runs-on: ubuntu-latest - needs: [check, msrv, test, check-format, doc] - steps: - - name: Mark the job as successful - run: exit 0 diff --git a/vendor/itertools/.github/workflows/coverage.yml b/vendor/itertools/.github/workflows/coverage.yml deleted file mode 100644 index 5c08456590ebd9..00000000000000 --- a/vendor/itertools/.github/workflows/coverage.yml +++ /dev/null @@ -1,34 +0,0 @@ -on: - push: - branches: [master] - paths-ignore: - - "**.md" - pull_request: - paths-ignore: - - "**.md" - -name: Code Coverage - -jobs: - coverage: - name: coverage - runs-on: ubuntu-latest - steps: - - name: checkout source - uses: actions/checkout@v4 - - - name: Install nightly toolchain - uses: dtolnay/rust-toolchain@nightly - with: - components: llvm-tools-preview - - - name: Install cargo-llvm-cov - uses: taiki-e/install-action@cargo-llvm-cov - - - name: Run llvm-cov - run: cargo llvm-cov --all-features --doctests --workspace --lcov --output-path lcov.info - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v4 - with: - files: lcov.info diff --git a/vendor/itertools/CHANGELOG.md b/vendor/itertools/CHANGELOG.md deleted file mode 100644 index de9564c6a229f7..00000000000000 --- a/vendor/itertools/CHANGELOG.md +++ /dev/null @@ -1,539 +0,0 @@ -# Changelog - -## 0.13.0 - -### Breaking -- Removed implementation of `DoubleEndedIterator` for `ConsTuples` (#853) -- Made `MultiProduct` fused and fixed on an empty iterator (#835, #834) -- Changed `iproduct!` to return tuples for maxi one iterator too (#870) -- Changed `PutBack::put_back` to return the old value (#880) -- Removed deprecated `repeat_call, Itertools::{foreach, step, map_results, fold_results}` (#878) -- Removed `TakeWhileInclusive::new` (#912) - -### Added -- Added `Itertools::{smallest_by, smallest_by_key, largest, largest_by, largest_by_key}` (#654, #885) -- Added `Itertools::tail` (#899) -- Implemented `DoubleEndedIterator` for `ProcessResults` (#910) -- Implemented `Debug` for `FormatWith` (#931) -- Added `Itertools::get` (#891) - -### Changed -- Deprecated `Itertools::group_by` (renamed `chunk_by`) (#866, #879) -- Deprecated `unfold` (use `std::iter::from_fn` instead) (#871) -- Optimized `GroupingMapBy` (#873, #876) -- Relaxed `Fn` bounds to `FnMut` in `diff_with, Itertools::into_group_map_by` (#886) -- Relaxed `Debug/Clone` bounds for `MapInto` (#889) -- Documented the `use_alloc` feature (#887) -- Optimized `Itertools::set_from` (#888) -- Removed badges in `README.md` (#890) -- Added "no-std" categories in `Cargo.toml` (#894) -- Fixed `Itertools::k_smallest` on short unfused iterators (#900) -- Deprecated `Itertools::tree_fold1` (renamed `tree_reduce`) (#895) -- Deprecated `GroupingMap::fold_first` (renamed `reduce`) (#902) -- Fixed `Itertools::k_smallest(0)` to consume the iterator, optimized `Itertools::k_smallest(1)` (#909) -- Specialized `Combinations::nth` (#914) -- Specialized `MergeBy::fold` (#920) -- Specialized `CombinationsWithReplacement::nth` (#923) -- Specialized `FlattenOk::{fold, rfold}` (#927) -- Specialized `Powerset::nth` (#924) -- Documentation fixes (#882, #936) -- Fixed `assert_equal` for iterators longer than `i32::MAX` (#932) -- Updated the `must_use` message of non-lazy `KMergeBy` and `TupleCombinations` (#939) - -### Notable Internal Changes -- Tested iterator laziness (#792) -- Created `CONTRIBUTING.md` (#767) - -## 0.12.1 - -### Added -- Documented iteration order guarantee for `Itertools::[tuple_]combinations` (#822) -- Documented possible panic in `iterate` (#842) -- Implemented `Clone` and `Debug` for `Diff` (#845) -- Implemented `Debug` for `WithPosition` (#859) -- Implemented `Eq` for `MinMaxResult` (#838) -- Implemented `From>` for `Option>` (#843) -- Implemented `PeekingNext` for `RepeatN` (#855) - -### Changed -- Made `CoalesceBy` lazy (#801) -- Optimized `Filter[Map]Ok::next`, `Itertools::partition`, `Unique[By]::next[_back]` (#818) -- Optimized `Itertools::find_position` (#837) -- Optimized `Positions::next[_back]` (#816) -- Optimized `ZipLongest::fold` (#854) -- Relaxed `Debug` bounds for `GroupingMapBy` (#860) -- Specialized `ExactlyOneError::fold` (#826) -- Specialized `Interleave[Shortest]::fold` (#849) -- Specialized `MultiPeek::fold` (#820) -- Specialized `PadUsing::[r]fold` (#825) -- Specialized `PeekNth::fold` (#824) -- Specialized `Positions::[r]fold` (#813) -- Specialized `PutBackN::fold` (#823) -- Specialized `RepeatN::[r]fold` (#821) -- Specialized `TakeWhileInclusive::fold` (#851) -- Specialized `ZipLongest::rfold` (#848) - -### Notable Internal Changes -- Added test coverage in CI (#847, #856) -- Added semver check in CI (#784) -- Enforced `clippy` in CI (#740) -- Enforced `rustdoc` in CI (#840) -- Improved specialization tests (#807) -- More specialization benchmarks (#806) - -## 0.12.0 - -### Breaking -- Made `take_while_inclusive` consume iterator by value (#709) -- Added `Clone` bound to `Unique` (#777) - -### Added -- Added `Itertools::try_len` (#723) -- Added free function `sort_unstable` (#796) -- Added `GroupMap::fold_with` (#778, #785) -- Added `PeekNth::{peek_mut, peek_nth_mut}` (#716) -- Added `PeekNth::{next_if, next_if_eq}` (#734) -- Added conversion into `(Option,Option)` to `EitherOrBoth` (#713) -- Added conversion from `Either` to `EitherOrBoth` (#715) -- Implemented `ExactSizeIterator` for `Tuples` (#761) -- Implemented `ExactSizeIterator` for `(Circular)TupleWindows` (#752) -- Made `EitherOrBoth` a shorthand for `EitherOrBoth` (#719) - -### Changed -- Added missing `#[must_use]` annotations on iterator adaptors (#794) -- Made `Combinations` lazy (#795) -- Made `Intersperse(With)` lazy (#797) -- Made `Permutations` lazy (#793) -- Made `Product` lazy (#800) -- Made `TupleWindows` lazy (#602) -- Specialized `Combinations::{count, size_hint}` (#729) -- Specialized `CombinationsWithReplacement::{count, size_hint}` (#737) -- Specialized `Powerset::fold` (#765) -- Specialized `Powerset::count` (#735) -- Specialized `TupleCombinations::{count, size_hint}` (#763) -- Specialized `TupleCombinations::fold` (#775) -- Specialized `WhileSome::fold` (#780) -- Specialized `WithPosition::fold` (#772) -- Specialized `ZipLongest::fold` (#774) -- Changed `{min, max}_set*` operations require `alloc` feature, instead of `std` (#760) -- Improved documentation of `tree_fold1` (#787) -- Improved documentation of `permutations` (#724) -- Fixed typo in documentation of `multiunzip` (#770) - -### Notable Internal Changes -- Improved specialization tests (#799, #786, #782) -- Simplified implementation of `Permutations` (#739, #748, #790) -- Combined `Merge`/`MergeBy`/`MergeJoinBy` implementations (#736) -- Simplified `Permutations::size_hint` (#739) -- Fix wrapping arithmetic in benchmarks (#770) -- Enforced `rustfmt` in CI (#751) -- Disallowed compile warnings in CI (#720) -- Used `cargo hack` to check MSRV (#754) - -## 0.11.0 - -### Breaking -- Make `Itertools::merge_join_by` also accept functions returning bool (#704) -- Implement `PeekingNext` transitively over mutable references (#643) -- Change `with_position` to yield `(Position, Item)` instead of `Position` (#699) - -### Added -- Add `Itertools::take_while_inclusive` (#616) -- Implement `PeekingNext` for `PeekingTakeWhile` (#644) -- Add `EitherOrBoth::{just_left, just_right, into_left, into_right, as_deref, as_deref_mut, left_or_insert, right_or_insert, left_or_insert_with, right_or_insert_with, insert_left, insert_right, insert_both}` (#629) -- Implement `Clone` for `CircularTupleWindows` (#686) -- Implement `Clone` for `Chunks` (#683) -- Add `Itertools::process_results` (#680) - -### Changed -- Use `Cell` instead of `RefCell` in `Format` and `FormatWith` (#608) -- CI tweaks (#674, #675) -- Document and test the difference between stable and unstable sorts (#653) -- Fix documentation error on `Itertools::max_set_by_key` (#692) -- Move MSRV metadata to `Cargo.toml` (#672) -- Implement `equal` with `Iterator::eq` (#591) - -## 0.10.5 - - Maintenance - -## 0.10.4 - - Add `EitherOrBoth::or` and `EitherOrBoth::or_else` (#593) - - Add `min_set`, `max_set` et al. (#613, #323) - - Use `either/use_std` (#628) - - Documentation fixes (#612, #625, #632, #633, #634, #638) - - Code maintenance (#623, #624, #627, #630) - -## 0.10.3 - - Maintenance - -## 0.10.2 - - Add `Itertools::multiunzip` (#362, #565) - - Add `intersperse` and `intersperse_with` free functions (#555) - - Add `Itertools::sorted_by_cached_key` (#424, #575) - - Specialize `ProcessResults::fold` (#563) - - Fix subtraction overflow in `DuplicatesBy::size_hint` (#552) - - Fix specialization tests (#574) - - More `Debug` impls (#573) - - Deprecate `fold1` (use `reduce` instead) (#580) - - Documentation fixes (`HomogenousTuple`, `into_group_map`, `into_group_map_by`, `MultiPeek::peek`) (#543 et al.) - -## 0.10.1 - - Add `Itertools::contains` (#514) - - Add `Itertools::counts_by` (#515) - - Add `Itertools::partition_result` (#511) - - Add `Itertools::all_unique` (#241) - - Add `Itertools::duplicates` and `Itertools::duplicates_by` (#502) - - Add `chain!` (#525) - - Add `Itertools::at_most_one` (#523) - - Add `Itertools::flatten_ok` (#527) - - Add `EitherOrBoth::or_default` (#583) - - Add `Itertools::find_or_last` and `Itertools::find_or_first` (#535) - - Implement `FusedIterator` for `FilterOk`, `FilterMapOk`, `InterleaveShortest`, `KMergeBy`, `MergeBy`, `PadUsing`, `Positions`, `Product` , `RcIter`, `TupleWindows`, `Unique`, `UniqueBy`, `Update`, `WhileSome`, `Combinations`, `CombinationsWithReplacement`, `Powerset`, `RepeatN`, and `WithPosition` (#550) - - Implement `FusedIterator` for `Interleave`, `IntersperseWith`, and `ZipLongest` (#548) - -## 0.10.0 - - **Increase minimum supported Rust version to 1.32.0** - - Improve macro hygiene (#507) - - Add `Itertools::powerset` (#335) - - Add `Itertools::sorted_unstable`, `Itertools::sorted_unstable_by`, and `Itertools::sorted_unstable_by_key` (#494) - - Implement `Error` for `ExactlyOneError` (#484) - - Undeprecate `Itertools::fold_while` (#476) - - Tuple-related adapters work for tuples of arity up to 12 (#475) - - `use_alloc` feature for users who have `alloc`, but not `std` (#474) - - Add `Itertools::k_smallest` (#473) - - Add `Itertools::into_grouping_map` and `GroupingMap` (#465) - - Add `Itertools::into_grouping_map_by` and `GroupingMapBy` (#465) - - Add `Itertools::counts` (#468) - - Add implementation of `DoubleEndedIterator` for `Unique` (#442) - - Add implementation of `DoubleEndedIterator` for `UniqueBy` (#442) - - Add implementation of `DoubleEndedIterator` for `Zip` (#346) - - Add `Itertools::multipeek` (#435) - - Add `Itertools::dedup_with_count` and `DedupWithCount` (#423) - - Add `Itertools::dedup_by_with_count` and `DedupByWithCount` (#423) - - Add `Itertools::intersperse_with` and `IntersperseWith` (#381) - - Add `Itertools::filter_ok` and `FilterOk` (#377) - - Add `Itertools::filter_map_ok` and `FilterMapOk` (#377) - - Deprecate `Itertools::fold_results`, use `Itertools::fold_ok` instead (#377) - - Deprecate `Itertools::map_results`, use `Itertools::map_ok` instead (#377) - - Deprecate `FoldResults`, use `FoldOk` instead (#377) - - Deprecate `MapResults`, use `MapOk` instead (#377) - - Add `Itertools::circular_tuple_windows` and `CircularTupleWindows` (#350) - - Add `peek_nth` and `PeekNth` (#303) - -## 0.9.0 - - Fix potential overflow in `MergeJoinBy::size_hint` (#385) - - Add `derive(Clone)` where possible (#382) - - Add `try_collect` method (#394) - - Add `HomogeneousTuple` trait (#389) - - Fix `combinations(0)` and `combinations_with_replacement(0)` (#383) - - Don't require `ParitalEq` to the `Item` of `DedupBy` (#397) - - Implement missing specializations on the `PutBack` adaptor and on the `MergeJoinBy` iterator (#372) - - Add `position_*` methods (#412) - - Derive `Hash` for `EitherOrBoth` (#417) - - Increase minimum supported Rust version to 1.32.0 - -## 0.8.2 - - Use `slice::iter` instead of `into_iter` to avoid future breakage (#378, by @LukasKalbertodt) -## 0.8.1 - - Added a [`.exactly_one()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.exactly_one) iterator method that, on success, extracts the single value of an iterator ; by @Xaeroxe - - Added combinatory iterator adaptors: - - [`.permutations(k)`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.permutations): - - `[0, 1, 2].iter().permutations(2)` yields - - ```rust - [ - vec![0, 1], - vec![0, 2], - vec![1, 0], - vec![1, 2], - vec![2, 0], - vec![2, 1], - ] - ``` - - ; by @tobz1000 - - - [`.combinations_with_replacement(k)`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.combinations_with_replacement): - - `[0, 1, 2].iter().combinations_with_replacement(2)` yields - - ```rust - [ - vec![0, 0], - vec![0, 1], - vec![0, 2], - vec![1, 1], - vec![1, 2], - vec![2, 2], - ] - ``` - - ; by @tommilligan - - - For reference, these methods join the already existing [`.combinations(k)`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.combinations): - - `[0, 1, 2].iter().combinations(2)` yields - - ```rust - [ - vec![0, 1], - vec![0, 2], - vec![1, 2], - ] - ``` - - - Improved the performance of [`.fold()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.fold)-based internal iteration for the [`.intersperse()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.intersperse) iterator ; by @jswrenn - - Added [`.dedup_by()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.dedup_by), [`.merge_by()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.merge_by) and [`.kmerge_by()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.kmerge_by) adaptors that work like [`.dedup()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.dedup), [`.merge()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.merge) and [`.kmerge()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.kmerge), but taking an additional custom comparison closure parameter. ; by @phimuemue - - Improved the performance of [`.all_equal()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.all_equal) ; by @fyrchik - - Loosened the bounds on [`.partition_map()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.partition_map) to take just a `FnMut` closure rather than a `Fn` closure, and made its implementation use internal iteration for better performance ; by @danielhenrymantilla - - Added convenience methods to [`EitherOrBoth`](https://docs.rs/itertools/0.8.1/itertools/enum.EitherOrBoth.html) elements yielded from the [`.zip_longest()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.zip_longest) iterator adaptor ; by @Avi-D-coder - - Added [`.sum1()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.sum1) and [`.product1()`](https://docs.rs/itertools/0.8.1/itertools/trait.Itertools.html#method.product1) iterator methods that respectively try to return the sum and the product of the elements of an iterator **when it is not empty**, otherwise they return `None` ; by @Emerentius -## 0.8.0 - - Added new adaptor `.map_into()` for conversions using `Into` by @vorner - - Improved `Itertools` docs by @JohnHeitmann - - The return type of `.sorted_by_by_key()` is now an iterator, not a Vec. - - The return type of the `izip!(x, y)` macro with exactly two arguments is now the usual `Iterator::zip`. - - Remove `.flatten()` in favour of std's `.flatten()` - - Deprecate `.foreach()` in favour of std's `.for_each()` - - Deprecate `.step()` in favour of std's `.step_by()` - - Deprecate `repeat_call` in favour of std's `repeat_with` - - Deprecate `.fold_while()` in favour of std's `.try_fold()` - - Require Rust 1.24 as minimal version. -## 0.7.11 - - Add convenience methods to `EitherOrBoth`, making it more similar to `Option` and `Either` by @jethrogb -## 0.7.10 - - No changes. -## 0.7.9 - - New inclusion policy: See the readme about suggesting features for std before accepting them in itertools. - - The `FoldWhile` type now implements `Eq` and `PartialEq` by @jturner314 -## 0.7.8 - - Add new iterator method `.tree_fold1()` which is like `.fold1()` except items are combined in a tree structure (see its docs). By @scottmcm - - Add more `Debug` impls by @phimuemue: KMerge, KMergeBy, MergeJoinBy, ConsTuples, Intersperse, ProcessResults, RcIter, Tee, TupleWindows, Tee, ZipLongest, ZipEq, Zip. -## 0.7.7 - - Add new iterator method `.into_group_map() -> HashMap>` which turns an iterator of `(K, V)` elements into such a hash table, where values are grouped by key. By @tobz1000 - - Add new free function `flatten` for the `.flatten()` adaptor. **NOTE:** recent Rust nightlies have `Iterator::flatten` and thus a clash with our flatten adaptor. One workaround is to use the itertools `flatten` free function. -## 0.7.6 - - Add new adaptor `.multi_cartesian_product()` which is an n-ary product iterator by @tobz1000 - - Add new method `.sorted_by_key()` by @Xion - - Provide simpler and faster `.count()` for `.unique()` and `.unique_by()` -## 0.7.5 - - `.multipeek()` now implements `PeekingNext`, by @nicopap. -## 0.7.4 - - Add new adaptor `.update()` by @lucasem; this adaptor is used to modify an element before passing it on in an iterator chain. -## 0.7.3 - - Add new method `.collect_tuple()` by @matklad; it makes a tuple out of the iterator's elements if the number of them matches **exactly**. - - Implement `fold` and `collect` for `.map_results()` which means it reuses the code of the standard `.map()` for these methods. -## 0.7.2 - - Add new adaptor `.merge_join_by` by @srijs; a heterogeneous merge join for two ordered sequences. -## 0.7.1 - - Iterator adaptors and iterators in itertools now use the same `must_use` reminder that the standard library adaptors do, by @matematikaedit and @bluss *“iterator adaptors are lazy and do nothing unless consumed”*. -## 0.7.0 - - Faster `izip!()` by @krdln - - `izip!()` is now a wrapper for repeated regular `.zip()` and a single `.map()`. This means it optimizes as well as the standard library `.zip()` it uses. **Note:** `multizip` and `izip!()` are now different! The former has a named type but the latter optimizes better. - - Faster `.unique()` - - `no_std` support, which is opt-in! - - Many lovable features are still there without std, like `izip!()` or `.format()` or `.merge()`, but not those that use collections. - - Trait bounds were required up front instead of just on the type: `group_by`'s `PartialEq` by @Phlosioneer and `repeat_call`'s `FnMut`. - - Removed deprecated constructor `Zip::new` — use `izip!()` or `multizip()` -## 0.6.5 - - Fix bug in `.cartesian_product()`'s fold (which only was visible for unfused iterators). -## 0.6.4 - - Add specific `fold` implementations for `.cartesian_product()` and `cons_tuples()`, which improves their performance in fold, foreach, and iterator consumers derived from them. -## 0.6.3 - - Add iterator adaptor `.positions(predicate)` by @tmccombs -## 0.6.2 - - Add function `process_results` which can “lift” a function of the regular values of an iterator so that it can process the `Ok` values from an iterator of `Results` instead, by @shepmaster - - Add iterator method `.concat()` which combines all iterator elements into a single collection using the `Extend` trait, by @srijs -## 0.6.1 - - Better size hint testing and subsequent size hint bugfixes by @rkarp. Fixes bugs in product, `interleave_shortest` size hints. - - New iterator method `.all_equal()` by @phimuemue -## 0.6.0 - - Deprecated names were removed in favour of their replacements - - `.flatten()` does not implement double ended iteration anymore - - `.fold_while()` uses `&mut self` and returns `FoldWhile`, for composability #168 - - `.foreach()` and `.fold1()` use `self`, like `.fold()` does. - - `.combinations(0)` now produces a single empty vector. #174 -## 0.5.10 - - Add itertools method `.kmerge_by()` (and corresponding free function) - - Relaxed trait requirement of `.kmerge()` and `.minmax()` to PartialOrd. -## 0.5.9 - - Add multipeek method `.reset_peek()` - - Add categories -## 0.5.8 - - Add iterator adaptor `.peeking_take_while()` and its trait `PeekingNext`. -## 0.5.7 - - Add iterator adaptor `.with_position()` - - Fix multipeek's performance for long peeks by using `VecDeque`. -## 0.5.6 - - Add `.map_results()` -## 0.5.5 - - Many more adaptors now implement `Debug` - - Add free function constructor `repeat_n`. `RepeatN::new` is now deprecated. -## 0.5.4 - - Add infinite generator function `iterate`, that takes a seed and a closure. -## 0.5.3 - - Special-cased `.fold()` for flatten and put back. `.foreach()` now uses fold on the iterator, to pick up any iterator specific loop implementation. - - `.combinations(n)` asserts up front that `n != 0`, instead of running into an error on the second iterator element. -## 0.5.2 - - Add `.tuples::()` that iterates by two, three or four elements at a time (where `T` is a tuple type). - - Add `.tuple_windows::()` that iterates using a window of the two, three or four most recent elements. - - Add `.next_tuple::()` method, that picks the next two, three or four elements in one go. - - `.interleave()` now has an accurate size hint. -## 0.5.1 - - Workaround module/function name clash that made racer crash on completing itertools. Only internal changes needed. -## 0.5.0 - - [Release announcement](https://bluss.github.io/rust/2016/09/26/itertools-0.5.0/) - - Renamed: - - `combinations` is now `tuple_combinations` - - `combinations_n` to `combinations` - - `group_by_lazy`, `chunks_lazy` to `group_by`, `chunks` - - `Unfold::new` to `unfold()` - - `RepeatCall::new` to `repeat_call()` - - `Zip::new` to `multizip` - - `PutBack::new`, `PutBackN::new` to `put_back`, `put_back_n` - - `PutBack::with_value` is now a builder setter, not a constructor - - `MultiPeek::new`, `.multipeek()` to `multipeek()` - - `format` to `format_with` and `format_default` to `format` - - `.into_rc()` to `rciter` - - `Partition` enum is now `Either` - - Module reorganization: - - All iterator structs are under `itertools::structs` but also reexported to the top level, for backwards compatibility - - All free functions are reexported at the root, `itertools::free` will be removed in the next version - - Removed: - - `ZipSlices`, use `.zip()` instead - - `.enumerate_from()`, `ZipTrusted`, due to being unstable - - `.mend_slices()`, moved to crate `odds` - - Stride, StrideMut, moved to crate `odds` - - `linspace()`, moved to crate `itertools-num` - - `.sort_by()`, use `.sorted_by()` - - `.is_empty_hint()`, use `.size_hint()` - - `.dropn()`, use `.dropping()` - - `.map_fn()`, use `.map()` - - `.slice()`, use `.take()` / `.skip()` - - helper traits in `misc` - - `new` constructors on iterator structs, use `Itertools` trait or free functions instead - - `itertools::size_hint` is now private - - Behaviour changes: - - `format` and `format_with` helpers now panic if you try to format them more than once. - - `repeat_call` is not double ended anymore - - New features: - - tuple flattening iterator is constructible with `cons_tuples` - - itertools reexports `Either` from the `either` crate. `Either` is an iterator when `L, R` are. - - `MinMaxResult` now implements `Copy` and `Clone` - - `tuple_combinations` supports 1-4 tuples of combinations (previously just 2) -## 0.4.19 - - Add `.minmax_by()` - - Add `itertools::free::cloned` - - Add `itertools::free::rciter` - - Improve `.step(n)` slightly to take advantage of specialized Fuse better. -## 0.4.18 - - Only changes related to the "unstable" crate feature. This feature is more or less deprecated. - - Use deprecated warnings when unstable is enabled. `.enumerate_from()` will be removed imminently since it's using a deprecated libstd trait. -## 0.4.17 - - Fix bug in `.kmerge()` that caused it to often produce the wrong order #134 -## 0.4.16 - - Improve precision of the `interleave_shortest` adaptor's size hint (it is now computed exactly when possible). -## 0.4.15 - - Fixup on top of the workaround in 0.4.14. A function in `itertools::free` was removed by mistake and now it is added back again. -## 0.4.14 - - Workaround an upstream regression in a Rust nightly build that broke compilation of of `itertools::free::{interleave, merge}` -## 0.4.13 - - Add `.minmax()` and `.minmax_by_key()`, iterator methods for finding both minimum and maximum in one scan. - - Add `.format_default()`, a simpler version of `.format()` (lazy formatting for iterators). -## 0.4.12 - - Add `.zip_eq()`, an adaptor like `.zip()` except it ensures iterators of inequal length don't pass silently (instead it panics). - - Add `.fold_while()`, an iterator method that is a fold that can short-circuit. - - Add `.partition_map()`, an iterator method that can separate elements into two collections. -## 0.4.11 - - Add `.get()` for `Stride{,Mut}` and `.get_mut()` for `StrideMut` -## 0.4.10 - - Improve performance of `.kmerge()` -## 0.4.9 - - Add k-ary merge adaptor `.kmerge()` - - Fix a bug in `.islice()` with ranges `a..b` where a `> b`. -## 0.4.8 - - Implement `Clone`, `Debug` for `Linspace` -## 0.4.7 - - Add function `diff_with()` that compares two iterators - - Add `.combinations_n()`, an n-ary combinations iterator - - Add methods `PutBack::with_value` and `PutBack::into_parts`. -## 0.4.6 - - Add method `.sorted()` - - Add module `itertools::free` with free function variants of common iterator adaptors and methods. For example `enumerate(iterable)`, `rev(iterable)`, and so on. -## 0.4.5 - - Add `.flatten()` -## 0.4.4 - - Allow composing `ZipSlices` with itself -## 0.4.3 - - Write `iproduct!()` as a single expression; this allows temporary values in its arguments. -## 0.4.2 - - Add `.fold_options()` - - Require Rust 1.1 or later -## 0.4.1 - - Update `.dropping()` to take advantage of `.nth()` -## 0.4.0 - - `.merge()`, `.unique()` and `.dedup()` now perform better due to not using function pointers - - Add free functions `enumerate()` and `rev()` - - Breaking changes: - - Return types of `.merge()` and `.merge_by()` renamed and changed - - Method `Merge::new` removed - - `.merge_by()` now takes a closure that returns bool. - - Return type of `.dedup()` changed - - Return type of `.mend_slices()` changed - - Return type of `.unique()` changed - - Removed function `times()`, struct `Times`: use a range instead - - Removed deprecated macro `icompr!()` - - Removed deprecated `FnMap` and method `.fn_map()`: use `.map_fn()` - - `.interleave_shortest()` is no longer guaranteed to act like fused -## 0.3.25 - - Rename `.sort_by()` to `.sorted_by()`. Old name is deprecated. - - Fix well-formedness warnings from RFC 1214, no user visible impact -## 0.3.24 - - Improve performance of `.merge()`'s ordering function slightly -## 0.3.23 - - Added `.chunks()`, similar to (and based on) `.group_by_lazy()`. - - Tweak linspace to match numpy.linspace and make it double ended. -## 0.3.22 - - Added `ZipSlices`, a fast zip for slices -## 0.3.21 - - Remove `Debug` impl for `Format`, it will have different use later -## 0.3.20 - - Optimize `.group_by_lazy()` -## 0.3.19 - - Added `.group_by_lazy()`, a possibly nonallocating group by - - Added `.format()`, a nonallocating formatting helper for iterators - - Remove uses of `RandomAccessIterator` since it has been deprecated in Rust. -## 0.3.17 - - Added (adopted) `Unfold` from Rust -## 0.3.16 - - Added adaptors `.unique()`, `.unique_by()` -## 0.3.15 - - Added method `.sort_by()` -## 0.3.14 - - Added adaptor `.while_some()` -## 0.3.13 - - Added adaptor `.interleave_shortest()` - - Added adaptor `.pad_using()` -## 0.3.11 - - Added `assert_equal` function -## 0.3.10 - - Bugfix `.combinations()` `size_hint`. -## 0.3.8 - - Added source `RepeatCall` -## 0.3.7 - - Added adaptor `PutBackN` - - Added adaptor `.combinations()` -## 0.3.6 - - Added `itertools::partition`, partition a sequence in place based on a predicate. - - Deprecate `icompr!()` with no replacement. -## 0.3.5 - - `.map_fn()` replaces deprecated `.fn_map()`. -## 0.3.4 - - `.take_while_ref()` *by-ref adaptor* - - `.coalesce()` *adaptor* - - `.mend_slices()` *adaptor* -## 0.3.3 - - `.dropping_back()` *method* - - `.fold1()` *method* - - `.is_empty_hint()` *method* diff --git a/vendor/itertools/CONTRIBUTING.md b/vendor/itertools/CONTRIBUTING.md deleted file mode 100644 index 1dbf6f59dd546b..00000000000000 --- a/vendor/itertools/CONTRIBUTING.md +++ /dev/null @@ -1,189 +0,0 @@ -# Contributing to itertools - -We use stable Rust only. -Please check the minimum version of Rust we use in `Cargo.toml`. - -_If you are proposing a major change to CI or a new iterator adaptor for this crate, -then **please first file an issue** describing your proposal._ -[Usual concerns about new methods](https://github.com/rust-itertools/itertools/issues/413#issuecomment-657670781). - -To pass CI tests successfully, your code must be free of "compiler warnings" and "clippy warnings" and be "rustfmt" formatted. - -Note that small PRs are easier to review and therefore are more easily merged. - -## Write a new method/adaptor for `Itertools` trait -In general, the code logic should be tested with [quickcheck](https://crates.io/crates/quickcheck) tests in `tests/quick.rs` -which allow us to test properties about the code with randomly generated inputs. - -### Behind `use_std`/`use_alloc` feature? -If it needs the "std" (such as using hashes) then it should be behind the `use_std` feature, -or if it requires heap allocation (such as using vectors) then it should be behind the `use_alloc` feature. -Otherwise it should be able to run in `no_std` context. - -This mostly applies to your new module, each import from it, and to your new `Itertools` method. - -### Pick the right receiver -`self`, `&mut self` or `&self`? From [#710](https://github.com/rust-itertools/itertools/pull/710): - -- Take by value when: - - It transfers ownership to another iterator type, such as `filter`, `map`... - - It consumes the iterator completely, such as `count`, `last`, `max`... -- Mutably borrow when it consumes only part of the iterator, such as `find`, `all`, `try_collect`... -- Immutably borrow when there is no change, such as `size_hint`. - -### Laziness -Iterators are [lazy](https://doc.rust-lang.org/std/iter/index.html#laziness): - -- structs of iterator adaptors should have `#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]` ; -- structs of iterators should have `#[must_use = "iterators are lazy and do nothing unless consumed"]`. - -Those behaviors are **tested** in `tests/laziness.rs`. - -## Specialize `Iterator` methods -It might be more performant to specialize some methods. -However, each specialization should be thoroughly tested. - -Correctly specializing methods can be difficult, and _we do not require that you do it on your initial PR_. - -Most of the time, we want specializations of: - -- [`size_hint`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.size_hint): - It mostly allows allocation optimizations. - When always exact, it also enables to implement `ExactSizeIterator`. - See our private module `src/size_hint.rs` for helpers. -- [`fold`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.fold) - might make iteration faster than calling `next` repeatedly. -- [`count`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.count), - [`last`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.last), - [`nth`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.nth) - as we might be able to avoid iterating on every item with `next`. - -Additionally, - -- `for_each`, `reduce`, `max/min[_by[_key]]` and `partition` all rely on `fold` so you should specialize it instead. -- `all`, `any`, `find`, `find_map`, `cmp`, `partial_cmp`, `eq`, `ne`, `lt`, `le`, `gt`, `ge` and `position` all rely (by default) on `try_fold` - which we can not specialize on stable rust, so you might want to wait it stabilizes - or specialize each of them. -- `DoubleEndedIterator::{nth_back, rfold, rfind}`: similar reasoning. - -An adaptor might use the inner iterator specializations for its own specializations. - -They are **tested** in `tests/specializations.rs` and **benchmarked** in `benches/specializations.rs` -(build those benchmarks is slow so you might want to temporarily remove the ones you do not want to measure). - -## Additional implementations -### The [`Debug`](https://doc.rust-lang.org/std/fmt/trait.Debug.html) implementation -All our iterators should implement `Debug`. - -When one of the field is not debuggable (such as _functions_), you must not derive `Debug`. -Instead, manually implement it and _ignore this field_ in our helper macro `debug_fmt_fields`. - -

-4 examples (click to expand) - -```rust -use std::fmt; - -/* ===== Simple derive. ===== */ -#[derive(Debug)] -struct Name1 { - iter: I, -} - -/* ===== With an unclonable field. ===== */ -struct Name2 { - iter: I, - func: F, -} - -// No `F: Debug` bound and the field `func` is ignored. -impl fmt::Debug for Name2 { - // it defines the `fmt` function from a struct name and the fields you want to debug. - debug_fmt_fields!(Name2, iter); -} - -/* ===== With an unclonable field, but another bound to add. ===== */ -struct Name3 { - iter: I, - item: Option, - func: F, -} - -// Same about `F` and `func`, similar about `I` but we must add the `I::Item: Debug` bound. -impl fmt::Debug for Name3 -where - I::Item: fmt::Debug, -{ - debug_fmt_fields!(Name3, iter, item); -} - -/* ===== With an unclonable field for which we can provide some information. ===== */ -struct Name4 { - iter: I, - func: Option, -} - -// If ignore a field is not good enough, implement Debug fully manually. -impl fmt::Debug for Name4 { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let func = if self.func.is_some() { "Some(_)" } else { "None" }; - f.debug_struct("Name4") - .field("iter", &self.iter) - .field("func", &func) - .finish() - } -} -``` -
- -### When/How to implement [`Clone`](https://doc.rust-lang.org/std/clone/trait.Clone.html) -All our iterators should implement `Clone` when possible. - -Note that a mutable reference is never clonable so `struct Name<'a, I: 'a> { iter: &'a mut I }` can not implement `Clone`. - -Derive `Clone` on a generic struct adds the bound `Clone` on each generic parameter. -It might be an issue in which case you should manually implement it with our helper macro `clone_fields` (it defines the `clone` function calling `clone` on each field) and be careful about the bounds. - -### When to implement [`std::iter::FusedIterator`](https://doc.rust-lang.org/std/iter/trait.FusedIterator.html) -This trait should be implemented _by all iterators that always return `None` after returning `None` once_, because it allows to optimize `Iterator::fuse()`. - -The conditions on which it should be implemented are usually the ones from the `Iterator` implementation, eventually refined to ensure it behaves in a fused way. - -### When to implement [`ExactSizeIterator`](https://doc.rust-lang.org/std/iter/trait.ExactSizeIterator.html) -_When we are always able to return an exact non-overflowing length._ - -Therefore, we do not implement it on adaptors that makes the iterator longer as the resulting length could overflow. - -One should not override `ExactSizeIterator::len` method but rely on an exact `Iterator::size_hint` implementation, meaning it returns `(length, Some(length))` (unless you could make `len` more performant than the default). - -The conditions on which it should be implemented are usually the ones from the `Iterator` implementation, probably refined to ensure the size hint is exact. - -### When to implement [`DoubleEndedIterator`](https://doc.rust-lang.org/std/iter/trait.DoubleEndedIterator.html) -When the iterator structure allows to handle _iterating on both fronts simultaneously_. -The iteration might stop in the middle when both fronts meet. - -The conditions on which it should be implemented are usually the ones from the `Iterator` implementation, probably refined to ensure we can iterate on both fronts simultaneously. - -### When to implement [`itertools::PeekingNext`](https://docs.rs/itertools/latest/itertools/trait.PeekingNext.html) -TODO - -This is currently **tested** in `tests/test_std.rs`. - -## About lending iterators -TODO - - -## Other notes -No guideline about using `#[inline]` yet. - -### `.fold` / `.for_each` / `.try_fold` / `.try_for_each` -In the Rust standard library, it's quite common for `fold` to be implemented in terms of `try_fold`. But it's not something we do yet because we can not specialize `try_fold` methods yet (it uses the unstable `Try`). - -From [#781](https://github.com/rust-itertools/itertools/pull/781), the general rule to follow is something like this: - -- If you need to completely consume an iterator: - - Use `fold` if you need an _owned_ access to an accumulator. - - Use `for_each` otherwise. -- If you need to partly consume an iterator, the same applies with `try_` versions: - - Use `try_fold` if you need an _owned_ access to an accumulator. - - Use `try_for_each` otherwise. diff --git a/vendor/itertools/Cargo.lock b/vendor/itertools/Cargo.lock deleted file mode 100644 index d2183c2a4bc1cb..00000000000000 --- a/vendor/itertools/Cargo.lock +++ /dev/null @@ -1,740 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "aho-corasick" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" -dependencies = [ - "memchr", -] - -[[package]] -name = "anes" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bumpalo" -version = "3.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" - -[[package]] -name = "cast" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "ciborium" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" -dependencies = [ - "ciborium-io", - "ciborium-ll", - "serde", -] - -[[package]] -name = "ciborium-io" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" - -[[package]] -name = "ciborium-ll" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" -dependencies = [ - "ciborium-io", - "half", -] - -[[package]] -name = "clap" -version = "3.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" -dependencies = [ - "bitflags", - "clap_lex", - "indexmap", - "textwrap", -] - -[[package]] -name = "clap_lex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] - -[[package]] -name = "criterion" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb" -dependencies = [ - "anes", - "atty", - "cast", - "ciborium", - "clap", - "criterion-plot", - "itertools 0.10.5", - "lazy_static", - "num-traits", - "oorandom", - "plotters", - "rayon", - "regex", - "serde", - "serde_derive", - "serde_json", - "tinytemplate", - "walkdir", -] - -[[package]] -name = "criterion-plot" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" -dependencies = [ - "cast", - "itertools 0.10.5", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "either" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" - -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "half" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" -dependencies = [ - "cfg-if", - "crunchy", -] - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown", -] - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "itertools" -version = "0.13.0" -dependencies = [ - "criterion", - "either", - "paste", - "permutohedron", - "quickcheck", - "rand", -] - -[[package]] -name = "itoa" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" - -[[package]] -name = "js-sys" -version = "0.3.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.154" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" - -[[package]] -name = "log" -version = "0.4.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" - -[[package]] -name = "memchr" -version = "2.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" - -[[package]] -name = "num-traits" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" -dependencies = [ - "autocfg", -] - -[[package]] -name = "once_cell" -version = "1.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" - -[[package]] -name = "oorandom" -version = "11.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" - -[[package]] -name = "os_str_bytes" -version = "6.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" - -[[package]] -name = "paste" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" - -[[package]] -name = "permutohedron" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b687ff7b5da449d39e418ad391e5e08da53ec334903ddbb921db208908fc372c" - -[[package]] -name = "plotters" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" -dependencies = [ - "num-traits", - "plotters-backend", - "plotters-svg", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "plotters-backend" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" - -[[package]] -name = "plotters-svg" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" -dependencies = [ - "plotters-backend", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "proc-macro2" -version = "1.0.82" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quickcheck" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f" -dependencies = [ - "rand", - "rand_core", -] - -[[package]] -name = "quote" -version = "1.0.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom", - "libc", - "rand_chacha", - "rand_core", - "rand_hc", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core", -] - -[[package]] -name = "rayon" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" -dependencies = [ - "crossbeam-deque", - "crossbeam-utils", -] - -[[package]] -name = "regex" -version = "1.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" -dependencies = [ - "aho-corasick", - "memchr", - "regex-automata", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" - -[[package]] -name = "ryu" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "serde" -version = "1.0.202" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.202" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.117" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "syn" -version = "2.0.63" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf5be731623ca1a1fb7d8be6f261a3be6d3e2337b8a1f97be944d020c8fcb704" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "textwrap" -version = "0.16.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" - -[[package]] -name = "tinytemplate" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "unicode-ident" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" - -[[package]] -name = "walkdir" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" -dependencies = [ - "same-file", - "winapi-util", -] - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasm-bindgen" -version = "0.2.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" -dependencies = [ - "cfg-if", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" -dependencies = [ - "bumpalo", - "log", - "once_cell", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" - -[[package]] -name = "web-sys" -version = "0.3.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" -dependencies = [ - "windows-sys", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" diff --git a/vendor/itertools/Cargo.toml b/vendor/itertools/Cargo.toml deleted file mode 100644 index 21896fed739fef..00000000000000 --- a/vendor/itertools/Cargo.toml +++ /dev/null @@ -1,105 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.43.1" -name = "itertools" -version = "0.13.0" -authors = ["bluss"] -description = "Extra iterator adaptors, iterator methods, free functions, and macros." -documentation = "https://docs.rs/itertools/" -readme = "README.md" -keywords = [ - "iterator", - "data-structure", - "zip", - "product", -] -categories = [ - "algorithms", - "rust-patterns", - "no-std", - "no-std::no-alloc", -] -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-itertools/itertools" - -[profile.bench] -debug = 2 - -[lib] -test = false -bench = false - -[[bench]] -name = "tuple_combinations" -harness = false - -[[bench]] -name = "tuples" -harness = false - -[[bench]] -name = "fold_specialization" -harness = false - -[[bench]] -name = "combinations_with_replacement" -harness = false - -[[bench]] -name = "tree_reduce" -harness = false - -[[bench]] -name = "bench1" -harness = false - -[[bench]] -name = "combinations" -harness = false - -[[bench]] -name = "powerset" -harness = false - -[[bench]] -name = "specializations" -harness = false - -[dependencies.either] -version = "1.0" -default-features = false - -[dev-dependencies.criterion] -version = "0.4.0" - -[dev-dependencies.paste] -version = "1.0.0" - -[dev-dependencies.permutohedron] -version = "0.2" - -[dev-dependencies.quickcheck] -version = "0.9" -default_features = false - -[dev-dependencies.rand] -version = "0.7" - -[features] -default = ["use_std"] -use_alloc = [] -use_std = [ - "use_alloc", - "either/use_std", -] diff --git a/vendor/itertools/LICENSE-APACHE b/vendor/itertools/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e802f..00000000000000 --- a/vendor/itertools/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/itertools/LICENSE-MIT b/vendor/itertools/LICENSE-MIT deleted file mode 100644 index 9203baa055d41d..00000000000000 --- a/vendor/itertools/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2015 - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/itertools/README.md b/vendor/itertools/README.md deleted file mode 100644 index 982ef5dbe6b7e3..00000000000000 --- a/vendor/itertools/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# Itertools - -Extra iterator adaptors, functions and macros. - -Please read the [API documentation here](https://docs.rs/itertools/). - -How to use with Cargo: - -```toml -[dependencies] -itertools = "0.13.0" -``` - -How to use in your crate: - -```rust -use itertools::Itertools; -``` - -## How to contribute -If you're not sure what to work on, try checking the [help wanted](https://github.com/rust-itertools/itertools/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) label. - -See our [CONTRIBUTING.md](https://github.com/rust-itertools/itertools/blob/master/CONTRIBUTING.md) for a detailed guide. - -## License - -Dual-licensed to be compatible with the Rust project. - -Licensed under the Apache License, Version 2.0 -https://www.apache.org/licenses/LICENSE-2.0 or the MIT license -https://opensource.org/licenses/MIT, at your -option. This file may not be copied, modified, or distributed -except according to those terms. diff --git a/vendor/itertools/benches/bench1.rs b/vendor/itertools/benches/bench1.rs deleted file mode 100644 index 53e77b0da46a00..00000000000000 --- a/vendor/itertools/benches/bench1.rs +++ /dev/null @@ -1,767 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion}; -use itertools::free::cloned; -use itertools::iproduct; -use itertools::Itertools; - -use std::cmp; -use std::iter::repeat; -use std::ops::{Add, Range}; - -fn slice_iter(c: &mut Criterion) { - let xs: Vec<_> = repeat(1i32).take(20).collect(); - - c.bench_function("slice iter", move |b| { - b.iter(|| { - for elt in xs.iter() { - black_box(elt); - } - }) - }); -} - -fn slice_iter_rev(c: &mut Criterion) { - let xs: Vec<_> = repeat(1i32).take(20).collect(); - - c.bench_function("slice iter rev", move |b| { - b.iter(|| { - for elt in xs.iter().rev() { - black_box(elt); - } - }) - }); -} - -fn zip_default_zip(c: &mut Criterion) { - let xs = vec![0; 1024]; - let ys = vec![0; 768]; - let xs = black_box(xs); - let ys = black_box(ys); - - c.bench_function("zip default zip", move |b| { - b.iter(|| { - for (&x, &y) in xs.iter().zip(&ys) { - black_box(x); - black_box(y); - } - }) - }); -} - -fn zipdot_i32_default_zip(c: &mut Criterion) { - let xs = vec![2; 1024]; - let ys = vec![2; 768]; - let xs = black_box(xs); - let ys = black_box(ys); - - c.bench_function("zipdot i32 default zip", move |b| { - b.iter(|| { - let mut s = 0; - for (&x, &y) in xs.iter().zip(&ys) { - s += x * y; - } - s - }) - }); -} - -fn zipdot_f32_default_zip(c: &mut Criterion) { - let xs = vec![2f32; 1024]; - let ys = vec![2f32; 768]; - let xs = black_box(xs); - let ys = black_box(ys); - - c.bench_function("zipdot f32 default zip", move |b| { - b.iter(|| { - let mut s = 0.; - for (&x, &y) in xs.iter().zip(&ys) { - s += x * y; - } - s - }) - }); -} - -fn zip_default_zip3(c: &mut Criterion) { - let xs = vec![0; 1024]; - let ys = vec![0; 768]; - let zs = vec![0; 766]; - let xs = black_box(xs); - let ys = black_box(ys); - let zs = black_box(zs); - - c.bench_function("zip default zip3", move |b| { - b.iter(|| { - for ((&x, &y), &z) in xs.iter().zip(&ys).zip(&zs) { - black_box(x); - black_box(y); - black_box(z); - } - }) - }); -} - -fn zip_slices_ziptuple(c: &mut Criterion) { - let xs = vec![0; 1024]; - let ys = vec![0; 768]; - - c.bench_function("zip slices ziptuple", move |b| { - b.iter(|| { - let xs = black_box(&xs); - let ys = black_box(&ys); - for (&x, &y) in itertools::multizip((xs, ys)) { - black_box(x); - black_box(y); - } - }) - }); -} - -fn zip_checked_counted_loop(c: &mut Criterion) { - let xs = vec![0; 1024]; - let ys = vec![0; 768]; - let xs = black_box(xs); - let ys = black_box(ys); - - c.bench_function("zip checked counted loop", move |b| { - b.iter(|| { - // Must slice to equal lengths, and then bounds checks are eliminated! - let len = cmp::min(xs.len(), ys.len()); - let xs = &xs[..len]; - let ys = &ys[..len]; - - for i in 0..len { - let x = xs[i]; - let y = ys[i]; - black_box(x); - black_box(y); - } - }) - }); -} - -fn zipdot_i32_checked_counted_loop(c: &mut Criterion) { - let xs = vec![2; 1024]; - let ys = vec![2; 768]; - let xs = black_box(xs); - let ys = black_box(ys); - - c.bench_function("zipdot i32 checked counted loop", move |b| { - b.iter(|| { - // Must slice to equal lengths, and then bounds checks are eliminated! - let len = cmp::min(xs.len(), ys.len()); - let xs = &xs[..len]; - let ys = &ys[..len]; - - let mut s = 0i32; - - for i in 0..len { - s += xs[i] * ys[i]; - } - s - }) - }); -} - -fn zipdot_f32_checked_counted_loop(c: &mut Criterion) { - let xs = vec![2f32; 1024]; - let ys = vec![2f32; 768]; - let xs = black_box(xs); - let ys = black_box(ys); - - c.bench_function("zipdot f32 checked counted loop", move |b| { - b.iter(|| { - // Must slice to equal lengths, and then bounds checks are eliminated! - let len = cmp::min(xs.len(), ys.len()); - let xs = &xs[..len]; - let ys = &ys[..len]; - - let mut s = 0.; - - for i in 0..len { - s += xs[i] * ys[i]; - } - s - }) - }); -} - -fn zipdot_f32_checked_counted_unrolled_loop(c: &mut Criterion) { - let xs = vec![2f32; 1024]; - let ys = vec![2f32; 768]; - let xs = black_box(xs); - let ys = black_box(ys); - - c.bench_function("zipdot f32 checked counted unrolled loop", move |b| { - b.iter(|| { - // Must slice to equal lengths, and then bounds checks are eliminated! - let len = cmp::min(xs.len(), ys.len()); - let mut xs = &xs[..len]; - let mut ys = &ys[..len]; - - let mut s = 0.; - let (mut p0, mut p1, mut p2, mut p3, mut p4, mut p5, mut p6, mut p7) = - (0., 0., 0., 0., 0., 0., 0., 0.); - - // how to unroll and have bounds checks eliminated (by cristicbz) - // split sum into eight parts to enable vectorization (by bluss) - while xs.len() >= 8 { - p0 += xs[0] * ys[0]; - p1 += xs[1] * ys[1]; - p2 += xs[2] * ys[2]; - p3 += xs[3] * ys[3]; - p4 += xs[4] * ys[4]; - p5 += xs[5] * ys[5]; - p6 += xs[6] * ys[6]; - p7 += xs[7] * ys[7]; - - xs = &xs[8..]; - ys = &ys[8..]; - } - s += p0 + p4; - s += p1 + p5; - s += p2 + p6; - s += p3 + p7; - - for i in 0..xs.len() { - s += xs[i] * ys[i]; - } - s - }) - }); -} - -fn zip_unchecked_counted_loop(c: &mut Criterion) { - let xs = vec![0; 1024]; - let ys = vec![0; 768]; - let xs = black_box(xs); - let ys = black_box(ys); - - c.bench_function("zip unchecked counted loop", move |b| { - b.iter(|| { - let len = cmp::min(xs.len(), ys.len()); - for i in 0..len { - unsafe { - let x = *xs.get_unchecked(i); - let y = *ys.get_unchecked(i); - black_box(x); - black_box(y); - } - } - }) - }); -} - -fn zipdot_i32_unchecked_counted_loop(c: &mut Criterion) { - let xs = vec![2; 1024]; - let ys = vec![2; 768]; - let xs = black_box(xs); - let ys = black_box(ys); - - c.bench_function("zipdot i32 unchecked counted loop", move |b| { - b.iter(|| { - let len = cmp::min(xs.len(), ys.len()); - let mut s = 0i32; - for i in 0..len { - unsafe { - let x = *xs.get_unchecked(i); - let y = *ys.get_unchecked(i); - s += x * y; - } - } - s - }) - }); -} - -fn zipdot_f32_unchecked_counted_loop(c: &mut Criterion) { - let xs = vec![2.; 1024]; - let ys = vec![2.; 768]; - let xs = black_box(xs); - let ys = black_box(ys); - - c.bench_function("zipdot f32 unchecked counted loop", move |b| { - b.iter(|| { - let len = cmp::min(xs.len(), ys.len()); - let mut s = 0f32; - for i in 0..len { - unsafe { - let x = *xs.get_unchecked(i); - let y = *ys.get_unchecked(i); - s += x * y; - } - } - s - }) - }); -} - -fn zip_unchecked_counted_loop3(c: &mut Criterion) { - let xs = vec![0; 1024]; - let ys = vec![0; 768]; - let zs = vec![0; 766]; - let xs = black_box(xs); - let ys = black_box(ys); - let zs = black_box(zs); - - c.bench_function("zip unchecked counted loop3", move |b| { - b.iter(|| { - let len = cmp::min(xs.len(), cmp::min(ys.len(), zs.len())); - for i in 0..len { - unsafe { - let x = *xs.get_unchecked(i); - let y = *ys.get_unchecked(i); - let z = *zs.get_unchecked(i); - black_box(x); - black_box(y); - black_box(z); - } - } - }) - }); -} - -fn chunk_by_lazy_1(c: &mut Criterion) { - let mut data = vec![0; 1024]; - for (index, elt) in data.iter_mut().enumerate() { - *elt = index / 10; - } - - let data = black_box(data); - - c.bench_function("chunk by lazy 1", move |b| { - b.iter(|| { - for (_key, chunk) in &data.iter().chunk_by(|elt| **elt) { - for elt in chunk { - black_box(elt); - } - } - }) - }); -} - -fn chunk_by_lazy_2(c: &mut Criterion) { - let mut data = vec![0; 1024]; - for (index, elt) in data.iter_mut().enumerate() { - *elt = index / 2; - } - - let data = black_box(data); - - c.bench_function("chunk by lazy 2", move |b| { - b.iter(|| { - for (_key, chunk) in &data.iter().chunk_by(|elt| **elt) { - for elt in chunk { - black_box(elt); - } - } - }) - }); -} - -fn slice_chunks(c: &mut Criterion) { - let data = vec![0; 1024]; - - let data = black_box(data); - let sz = black_box(10); - - c.bench_function("slice chunks", move |b| { - b.iter(|| { - for chunk in data.chunks(sz) { - for elt in chunk { - black_box(elt); - } - } - }) - }); -} - -fn chunks_lazy_1(c: &mut Criterion) { - let data = vec![0; 1024]; - - let data = black_box(data); - let sz = black_box(10); - - c.bench_function("chunks lazy 1", move |b| { - b.iter(|| { - for chunk in &data.iter().chunks(sz) { - for elt in chunk { - black_box(elt); - } - } - }) - }); -} - -fn equal(c: &mut Criterion) { - let data = vec![7; 1024]; - let l = data.len(); - let alpha = black_box(&data[1..]); - let beta = black_box(&data[..l - 1]); - - c.bench_function("equal", move |b| b.iter(|| itertools::equal(alpha, beta))); -} - -fn merge_default(c: &mut Criterion) { - let mut data1 = vec![0; 1024]; - let mut data2 = vec![0; 800]; - let mut x = 0; - - #[allow(clippy::explicit_counter_loop, clippy::unused_enumerate_index)] - for (_, elt) in data1.iter_mut().enumerate() { - *elt = x; - x += 1; - } - - let mut y = 0; - for (i, elt) in data2.iter_mut().enumerate() { - *elt += y; - if i % 3 == 0 { - y += 3; - } else { - y += 0; - } - } - let data1 = black_box(data1); - let data2 = black_box(data2); - - c.bench_function("merge default", move |b| { - b.iter(|| data1.iter().merge(&data2).count()) - }); -} - -fn merge_by_cmp(c: &mut Criterion) { - let mut data1 = vec![0; 1024]; - let mut data2 = vec![0; 800]; - let mut x = 0; - - #[allow(clippy::explicit_counter_loop, clippy::unused_enumerate_index)] - for (_, elt) in data1.iter_mut().enumerate() { - *elt = x; - x += 1; - } - - let mut y = 0; - for (i, elt) in data2.iter_mut().enumerate() { - *elt += y; - if i % 3 == 0 { - y += 3; - } else { - y += 0; - } - } - let data1 = black_box(data1); - let data2 = black_box(data2); - - c.bench_function("merge by cmp", move |b| { - b.iter(|| data1.iter().merge_by(&data2, PartialOrd::le).count()) - }); -} - -fn merge_by_lt(c: &mut Criterion) { - let mut data1 = vec![0; 1024]; - let mut data2 = vec![0; 800]; - let mut x = 0; - - #[allow(clippy::explicit_counter_loop, clippy::unused_enumerate_index)] - for (_, elt) in data1.iter_mut().enumerate() { - *elt = x; - x += 1; - } - - let mut y = 0; - for (i, elt) in data2.iter_mut().enumerate() { - *elt += y; - if i % 3 == 0 { - y += 3; - } else { - y += 0; - } - } - let data1 = black_box(data1); - let data2 = black_box(data2); - - c.bench_function("merge by lt", move |b| { - b.iter(|| data1.iter().merge_by(&data2, |a, b| a <= b).count()) - }); -} - -fn kmerge_default(c: &mut Criterion) { - let mut data1 = vec![0; 1024]; - let mut data2 = vec![0; 800]; - let mut x = 0; - - #[allow(clippy::explicit_counter_loop, clippy::unused_enumerate_index)] - for (_, elt) in data1.iter_mut().enumerate() { - *elt = x; - x += 1; - } - - let mut y = 0; - for (i, elt) in data2.iter_mut().enumerate() { - *elt += y; - if i % 3 == 0 { - y += 3; - } else { - y += 0; - } - } - let data1 = black_box(data1); - let data2 = black_box(data2); - let its = &[data1.iter(), data2.iter()]; - - c.bench_function("kmerge default", move |b| { - b.iter(|| its.iter().cloned().kmerge().count()) - }); -} - -fn kmerge_tenway(c: &mut Criterion) { - let mut data = vec![0; 10240]; - - let mut state = 1729u16; - fn rng(state: &mut u16) -> u16 { - let new = state.wrapping_mul(31421).wrapping_add(6927); - *state = new; - new - } - - for elt in &mut data { - *elt = rng(&mut state); - } - - let mut chunks = Vec::new(); - let mut rest = &mut data[..]; - while !rest.is_empty() { - let chunk_len = 1 + rng(&mut state) % 512; - let chunk_len = cmp::min(rest.len(), chunk_len as usize); - let (fst, tail) = { rest }.split_at_mut(chunk_len); - fst.sort(); - chunks.push(fst.iter().cloned()); - rest = tail; - } - - // println!("Chunk lengths: {}", chunks.iter().format_with(", ", |elt, f| f(&elt.len()))); - - c.bench_function("kmerge tenway", move |b| { - b.iter(|| chunks.iter().cloned().kmerge().count()) - }); -} - -fn fast_integer_sum(iter: I) -> I::Item -where - I: IntoIterator, - I::Item: Default + Add, -{ - iter.into_iter().fold(<_>::default(), |x, y| x + y) -} - -fn step_vec_2(c: &mut Criterion) { - let v = vec![0; 1024]; - - c.bench_function("step vec 2", move |b| { - b.iter(|| fast_integer_sum(cloned(v.iter().step_by(2)))) - }); -} - -fn step_vec_10(c: &mut Criterion) { - let v = vec![0; 1024]; - - c.bench_function("step vec 10", move |b| { - b.iter(|| fast_integer_sum(cloned(v.iter().step_by(10)))) - }); -} - -fn step_range_2(c: &mut Criterion) { - let v = black_box(0..1024); - - c.bench_function("step range 2", move |b| { - b.iter(|| fast_integer_sum(v.clone().step_by(2))) - }); -} - -fn step_range_10(c: &mut Criterion) { - let v = black_box(0..1024); - - c.bench_function("step range 10", move |b| { - b.iter(|| fast_integer_sum(v.clone().step_by(10))) - }); -} - -fn vec_iter_mut_partition(c: &mut Criterion) { - let data = std::iter::repeat(-1024i32..1024) - .take(256) - .flatten() - .collect_vec(); - c.bench_function("vec iter mut partition", move |b| { - b.iter_batched( - || data.clone(), - |mut data| { - black_box(itertools::partition(black_box(&mut data), |n| *n >= 0)); - }, - BatchSize::LargeInput, - ) - }); -} - -fn cartesian_product_iterator(c: &mut Criterion) { - let xs = vec![0; 16]; - - c.bench_function("cartesian product iterator", move |b| { - b.iter(|| { - let mut sum = 0; - for (&x, &y, &z) in iproduct!(&xs, &xs, &xs) { - sum += x; - sum += y; - sum += z; - } - sum - }) - }); -} - -fn multi_cartesian_product_iterator(c: &mut Criterion) { - let xs = [vec![0; 16], vec![0; 16], vec![0; 16]]; - - c.bench_function("multi cartesian product iterator", move |b| { - b.iter(|| { - let mut sum = 0; - for x in xs.iter().multi_cartesian_product() { - sum += x[0]; - sum += x[1]; - sum += x[2]; - } - sum - }) - }); -} - -fn cartesian_product_nested_for(c: &mut Criterion) { - let xs = vec![0; 16]; - - c.bench_function("cartesian product nested for", move |b| { - b.iter(|| { - let mut sum = 0; - for &x in &xs { - for &y in &xs { - for &z in &xs { - sum += x; - sum += y; - sum += z; - } - } - } - sum - }) - }); -} - -fn all_equal(c: &mut Criterion) { - let mut xs = vec![0; 5_000_000]; - xs.extend(vec![1; 5_000_000]); - - c.bench_function("all equal", move |b| b.iter(|| xs.iter().all_equal())); -} - -fn all_equal_for(c: &mut Criterion) { - let mut xs = vec![0; 5_000_000]; - xs.extend(vec![1; 5_000_000]); - - c.bench_function("all equal for", move |b| { - b.iter(|| { - for &x in &xs { - if x != xs[0] { - return false; - } - } - true - }) - }); -} - -fn all_equal_default(c: &mut Criterion) { - let mut xs = vec![0; 5_000_000]; - xs.extend(vec![1; 5_000_000]); - - c.bench_function("all equal default", move |b| { - b.iter(|| xs.iter().dedup().nth(1).is_none()) - }); -} - -const PERM_COUNT: usize = 6; - -fn permutations_iter(c: &mut Criterion) { - struct NewIterator(Range); - - impl Iterator for NewIterator { - type Item = usize; - - fn next(&mut self) -> Option { - self.0.next() - } - } - - c.bench_function("permutations iter", move |b| { - b.iter( - || { - for _ in NewIterator(0..PERM_COUNT).permutations(PERM_COUNT) {} - }, - ) - }); -} - -fn permutations_range(c: &mut Criterion) { - c.bench_function("permutations range", move |b| { - b.iter(|| for _ in (0..PERM_COUNT).permutations(PERM_COUNT) {}) - }); -} - -fn permutations_slice(c: &mut Criterion) { - let v = (0..PERM_COUNT).collect_vec(); - - c.bench_function("permutations slice", move |b| { - b.iter(|| for _ in v.as_slice().iter().permutations(PERM_COUNT) {}) - }); -} - -criterion_group!( - benches, - slice_iter, - slice_iter_rev, - zip_default_zip, - zipdot_i32_default_zip, - zipdot_f32_default_zip, - zip_default_zip3, - zip_slices_ziptuple, - zip_checked_counted_loop, - zipdot_i32_checked_counted_loop, - zipdot_f32_checked_counted_loop, - zipdot_f32_checked_counted_unrolled_loop, - zip_unchecked_counted_loop, - zipdot_i32_unchecked_counted_loop, - zipdot_f32_unchecked_counted_loop, - zip_unchecked_counted_loop3, - chunk_by_lazy_1, - chunk_by_lazy_2, - slice_chunks, - chunks_lazy_1, - equal, - merge_default, - merge_by_cmp, - merge_by_lt, - kmerge_default, - kmerge_tenway, - step_vec_2, - step_vec_10, - step_range_2, - step_range_10, - vec_iter_mut_partition, - cartesian_product_iterator, - multi_cartesian_product_iterator, - cartesian_product_nested_for, - all_equal, - all_equal_for, - all_equal_default, - permutations_iter, - permutations_range, - permutations_slice, -); -criterion_main!(benches); diff --git a/vendor/itertools/benches/combinations.rs b/vendor/itertools/benches/combinations.rs deleted file mode 100644 index 42a452111ea8d8..00000000000000 --- a/vendor/itertools/benches/combinations.rs +++ /dev/null @@ -1,117 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use itertools::Itertools; - -// approximate 100_000 iterations for each combination -const N1: usize = 100_000; -const N2: usize = 448; -const N3: usize = 86; -const N4: usize = 41; -const N14: usize = 21; - -fn comb_for1(c: &mut Criterion) { - c.bench_function("comb for1", move |b| { - b.iter(|| { - for i in 0..N1 { - black_box(vec![i]); - } - }) - }); -} - -fn comb_for2(c: &mut Criterion) { - c.bench_function("comb for2", move |b| { - b.iter(|| { - for i in 0..N2 { - for j in (i + 1)..N2 { - black_box(vec![i, j]); - } - } - }) - }); -} - -fn comb_for3(c: &mut Criterion) { - c.bench_function("comb for3", move |b| { - b.iter(|| { - for i in 0..N3 { - for j in (i + 1)..N3 { - for k in (j + 1)..N3 { - black_box(vec![i, j, k]); - } - } - } - }) - }); -} - -fn comb_for4(c: &mut Criterion) { - c.bench_function("comb for4", move |b| { - b.iter(|| { - for i in 0..N4 { - for j in (i + 1)..N4 { - for k in (j + 1)..N4 { - for l in (k + 1)..N4 { - black_box(vec![i, j, k, l]); - } - } - } - } - }) - }); -} - -fn comb_c1(c: &mut Criterion) { - c.bench_function("comb c1", move |b| { - b.iter(|| { - for combo in (0..N1).combinations(1) { - black_box(combo); - } - }) - }); -} - -fn comb_c2(c: &mut Criterion) { - c.bench_function("comb c2", move |b| { - b.iter(|| { - for combo in (0..N2).combinations(2) { - black_box(combo); - } - }) - }); -} - -fn comb_c3(c: &mut Criterion) { - c.bench_function("comb c3", move |b| { - b.iter(|| { - for combo in (0..N3).combinations(3) { - black_box(combo); - } - }) - }); -} - -fn comb_c4(c: &mut Criterion) { - c.bench_function("comb c4", move |b| { - b.iter(|| { - for combo in (0..N4).combinations(4) { - black_box(combo); - } - }) - }); -} - -fn comb_c14(c: &mut Criterion) { - c.bench_function("comb c14", move |b| { - b.iter(|| { - for combo in (0..N14).combinations(14) { - black_box(combo); - } - }) - }); -} - -criterion_group!( - benches, comb_for1, comb_for2, comb_for3, comb_for4, comb_c1, comb_c2, comb_c3, comb_c4, - comb_c14, -); -criterion_main!(benches); diff --git a/vendor/itertools/benches/combinations_with_replacement.rs b/vendor/itertools/benches/combinations_with_replacement.rs deleted file mode 100644 index 8e4fa3dc3b1bce..00000000000000 --- a/vendor/itertools/benches/combinations_with_replacement.rs +++ /dev/null @@ -1,40 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use itertools::Itertools; - -fn comb_replacement_n10_k5(c: &mut Criterion) { - c.bench_function("comb replacement n10k5", move |b| { - b.iter(|| { - for i in (0..10).combinations_with_replacement(5) { - black_box(i); - } - }) - }); -} - -fn comb_replacement_n5_k10(c: &mut Criterion) { - c.bench_function("comb replacement n5 k10", move |b| { - b.iter(|| { - for i in (0..5).combinations_with_replacement(10) { - black_box(i); - } - }) - }); -} - -fn comb_replacement_n10_k10(c: &mut Criterion) { - c.bench_function("comb replacement n10 k10", move |b| { - b.iter(|| { - for i in (0..10).combinations_with_replacement(10) { - black_box(i); - } - }) - }); -} - -criterion_group!( - benches, - comb_replacement_n10_k5, - comb_replacement_n5_k10, - comb_replacement_n10_k10, -); -criterion_main!(benches); diff --git a/vendor/itertools/benches/fold_specialization.rs b/vendor/itertools/benches/fold_specialization.rs deleted file mode 100644 index b44f3472146307..00000000000000 --- a/vendor/itertools/benches/fold_specialization.rs +++ /dev/null @@ -1,75 +0,0 @@ -#![allow(unstable_name_collisions)] - -use criterion::{criterion_group, criterion_main, Criterion}; -use itertools::Itertools; - -struct Unspecialized(I); - -impl Iterator for Unspecialized -where - I: Iterator, -{ - type Item = I::Item; - - #[inline(always)] - fn next(&mut self) -> Option { - self.0.next() - } - - #[inline(always)] - fn size_hint(&self) -> (usize, Option) { - self.0.size_hint() - } -} - -mod specialization { - use super::*; - - pub mod intersperse { - use super::*; - - pub fn external(c: &mut Criterion) { - let arr = [1; 1024]; - - c.bench_function("external", move |b| { - b.iter(|| { - let mut sum = 0; - for &x in arr.iter().intersperse(&0) { - sum += x; - } - sum - }) - }); - } - - pub fn internal_specialized(c: &mut Criterion) { - let arr = [1; 1024]; - - c.bench_function("internal specialized", move |b| { - b.iter(|| { - #[allow(clippy::unnecessary_fold)] - arr.iter().intersperse(&0).fold(0, |acc, x| acc + x) - }) - }); - } - - pub fn internal_unspecialized(c: &mut Criterion) { - let arr = [1; 1024]; - - c.bench_function("internal unspecialized", move |b| { - b.iter(|| { - #[allow(clippy::unnecessary_fold)] - Unspecialized(arr.iter().intersperse(&0)).fold(0, |acc, x| acc + x) - }) - }); - } - } -} - -criterion_group!( - benches, - specialization::intersperse::external, - specialization::intersperse::internal_specialized, - specialization::intersperse::internal_unspecialized, -); -criterion_main!(benches); diff --git a/vendor/itertools/benches/powerset.rs b/vendor/itertools/benches/powerset.rs deleted file mode 100644 index 018333d316c1e6..00000000000000 --- a/vendor/itertools/benches/powerset.rs +++ /dev/null @@ -1,97 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use itertools::Itertools; - -// Keep aggregate generated elements the same, regardless of powerset length. -const TOTAL_ELEMENTS: usize = 1 << 12; -const fn calc_iters(n: usize) -> usize { - TOTAL_ELEMENTS / (1 << n) -} - -fn powerset_n(c: &mut Criterion, n: usize) { - let id = format!("powerset {}", n); - c.bench_function(id.as_str(), move |b| { - b.iter(|| { - for _ in 0..calc_iters(n) { - for elt in (0..n).powerset() { - black_box(elt); - } - } - }) - }); -} - -fn powerset_n_fold(c: &mut Criterion, n: usize) { - let id = format!("powerset {} fold", n); - c.bench_function(id.as_str(), move |b| { - b.iter(|| { - for _ in 0..calc_iters(n) { - (0..n).powerset().fold(0, |s, elt| s + black_box(elt).len()); - } - }) - }); -} - -fn powerset_0(c: &mut Criterion) { - powerset_n(c, 0); -} - -fn powerset_1(c: &mut Criterion) { - powerset_n(c, 1); -} - -fn powerset_2(c: &mut Criterion) { - powerset_n(c, 2); -} - -fn powerset_4(c: &mut Criterion) { - powerset_n(c, 4); -} - -fn powerset_8(c: &mut Criterion) { - powerset_n(c, 8); -} - -fn powerset_12(c: &mut Criterion) { - powerset_n(c, 12); -} - -fn powerset_0_fold(c: &mut Criterion) { - powerset_n_fold(c, 0); -} - -fn powerset_1_fold(c: &mut Criterion) { - powerset_n_fold(c, 1); -} - -fn powerset_2_fold(c: &mut Criterion) { - powerset_n_fold(c, 2); -} - -fn powerset_4_fold(c: &mut Criterion) { - powerset_n_fold(c, 4); -} - -fn powerset_8_fold(c: &mut Criterion) { - powerset_n_fold(c, 8); -} - -fn powerset_12_fold(c: &mut Criterion) { - powerset_n_fold(c, 12); -} - -criterion_group!( - benches, - powerset_0, - powerset_1, - powerset_2, - powerset_4, - powerset_8, - powerset_12, - powerset_0_fold, - powerset_1_fold, - powerset_2_fold, - powerset_4_fold, - powerset_8_fold, - powerset_12_fold, -); -criterion_main!(benches); diff --git a/vendor/itertools/benches/specializations.rs b/vendor/itertools/benches/specializations.rs deleted file mode 100644 index 18039fc4edef13..00000000000000 --- a/vendor/itertools/benches/specializations.rs +++ /dev/null @@ -1,667 +0,0 @@ -#![allow(unstable_name_collisions, clippy::incompatible_msrv)] - -use criterion::black_box; -use criterion::BenchmarkId; -use itertools::Itertools; - -const NTH_INPUTS: &[usize] = &[0, 1, 2, 4, 8]; - -/// Create multiple functions each defining a benchmark group about iterator methods. -/// -/// Each created group has functions with the following ids: -/// -/// - `next`, `size_hint`, `count`, `last`, `nth`, `collect`, `fold` -/// - and when marked as `DoubleEndedIterator`: `next_back`, `nth_back`, `rfold` -/// - and when marked as `ExactSizeIterator`: `len` -/// -/// Note that this macro can be called only once. -macro_rules! bench_specializations { - ( - $( - $name:ident { - $($extra:ident)* - {$( - $init:stmt; - )*} - $iterator:expr - } - )* - ) => { - $( - #[allow(unused_must_use)] - fn $name(c: &mut ::criterion::Criterion) { - let mut bench_group = c.benchmark_group(stringify!($name)); - $( - $init - )* - let bench_first_its = { - let mut bench_idx = 0; - [0; 1000].map(|_| { - let mut it = $iterator; - if bench_idx != 0 { - it.nth(bench_idx - 1); - } - bench_idx += 1; - it - }) - }; - bench_specializations!(@Iterator bench_group bench_first_its: $iterator); - $( - bench_specializations!(@$extra bench_group bench_first_its: $iterator); - )* - bench_group.finish(); - } - )* - - ::criterion::criterion_group!(benches, $($name, )*); - ::criterion::criterion_main!(benches); - }; - - (@Iterator $group:ident $first_its:ident: $iterator:expr) => { - $group.bench_function("next", |bencher| bencher.iter(|| { - let mut it = $iterator; - while let Some(x) = it.next() { - black_box(x); - } - })); - $group.bench_function("size_hint", |bencher| bencher.iter(|| { - $first_its.iter().for_each(|it| { - black_box(it.size_hint()); - }) - })); - $group.bench_function("count", |bencher| bencher.iter(|| { - $iterator.count() - })); - $group.bench_function("last", |bencher| bencher.iter(|| { - $iterator.last() - })); - for n in NTH_INPUTS { - $group.bench_with_input(BenchmarkId::new("nth", n), n, |bencher, n| bencher.iter(|| { - for start in 0_usize..10 { - let mut it = $iterator; - if let Some(s) = start.checked_sub(1) { - black_box(it.nth(s)); - } - while let Some(x) = it.nth(*n) { - black_box(x); - } - } - })); - } - $group.bench_function("collect", |bencher| bencher.iter(|| { - $iterator.collect::>() - })); - $group.bench_function("fold", |bencher| bencher.iter(|| { - $iterator.fold((), |(), x| { - black_box(x); - }) - })); - }; - - (@DoubleEndedIterator $group:ident $_first_its:ident: $iterator:expr) => { - $group.bench_function("next_back", |bencher| bencher.iter(|| { - let mut it = $iterator; - while let Some(x) = it.next_back() { - black_box(x); - } - })); - for n in NTH_INPUTS { - $group.bench_with_input(BenchmarkId::new("nth_back", n), n, |bencher, n| bencher.iter(|| { - for start in 0_usize..10 { - let mut it = $iterator; - if let Some(s) = start.checked_sub(1) { - black_box(it.nth_back(s)); - } - while let Some(x) = it.nth_back(*n) { - black_box(x); - } - } - })); - } - $group.bench_function("rfold", |bencher| bencher.iter(|| { - $iterator.rfold((), |(), x| { - black_box(x); - }) - })); - }; - - (@ExactSizeIterator $group:ident $first_its:ident: $_iterator:expr) => { - $group.bench_function("len", |bencher| bencher.iter(|| { - $first_its.iter().for_each(|it| { - black_box(it.len()); - }) - })); - }; -} - -// Usage examples: -// - For `ZipLongest::fold` only: -// cargo bench --bench specializations zip_longest/fold -// - For `.combinations(k).nth(8)`: -// cargo bench --bench specializations combinations./nth/8 -bench_specializations! { - interleave { - { - let v1 = black_box(vec![0; 1024]); - let v2 = black_box(vec![0; 768]); - } - v1.iter().interleave(&v2) - } - interleave_shortest { - { - let v1 = black_box(vec![0; 1024]); - let v2 = black_box(vec![0; 768]); - } - v1.iter().interleave_shortest(&v2) - } - batching { - { - let v = black_box(vec![0; 1024]); - } - v.iter().batching(Iterator::next) - } - tuple_windows1 { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - } - v.iter().tuple_windows::<(_,)>() - } - tuple_windows2 { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - } - v.iter().tuple_windows::<(_, _)>() - } - tuple_windows3 { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - } - v.iter().tuple_windows::<(_, _, _)>() - } - tuple_windows4 { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - } - v.iter().tuple_windows::<(_, _, _, _)>() - } - circular_tuple_windows1 { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - } - v.iter().circular_tuple_windows::<(_,)>() - } - circular_tuple_windows2 { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - } - v.iter().circular_tuple_windows::<(_, _)>() - } - circular_tuple_windows3 { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - } - v.iter().circular_tuple_windows::<(_, _, _)>() - } - circular_tuple_windows4 { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - } - v.iter().circular_tuple_windows::<(_, _, _, _)>() - } - tuples1 { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - } - v.iter().tuples::<(_,)>() - } - tuples2 { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - } - v.iter().tuples::<(_, _)>() - } - tuples3 { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - } - v.iter().tuples::<(_, _, _)>() - } - tuples4 { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - } - v.iter().tuples::<(_, _, _, _)>() - } - tuple_buffer { - ExactSizeIterator - { - let v = black_box(vec![0; 11]); - // Short but the buffer can't have 12 or more elements. - } - { - let mut it = v.iter().tuples::<(_, _, _, _, _, _, _, _, _, _, _, _)>(); - it.next(); // No element but it fills the buffer. - it.into_buffer() - } - } - cartesian_product { - { - let v = black_box(vec![0; 16]); - } - itertools::iproduct!(&v, &v, &v) - } - multi_cartesian_product { - { - let vs = black_box([0; 3].map(|_| vec![0; 16])); - } - vs.iter().multi_cartesian_product() - } - coalesce { - { - let v = black_box(vec![0; 1024]); - } - v.iter().coalesce(|x, y| if x == y { Ok(x) } else { Err((x, y)) }) - } - dedup { - { - let v = black_box((0..32).flat_map(|x| [x; 32]).collect_vec()); - } - v.iter().dedup() - } - dedup_by { - { - let v = black_box((0..32).flat_map(|x| [x; 32]).collect_vec()); - } - v.iter().dedup_by(PartialOrd::ge) - } - dedup_with_count { - { - let v = black_box((0..32).flat_map(|x| [x; 32]).collect_vec()); - } - v.iter().dedup_with_count() - } - dedup_by_with_count { - { - let v = black_box((0..32).flat_map(|x| [x; 32]).collect_vec()); - } - v.iter().dedup_by_with_count(PartialOrd::ge) - } - duplicates { - DoubleEndedIterator - { - let v = black_box((0..32).cycle().take(1024).collect_vec()); - } - v.iter().duplicates() - } - duplicates_by { - DoubleEndedIterator - { - let v = black_box((0..1024).collect_vec()); - } - v.iter().duplicates_by(|x| *x % 10) - } - unique { - DoubleEndedIterator - { - let v = black_box((0..32).cycle().take(1024).collect_vec()); - } - v.iter().unique() - } - unique_by { - DoubleEndedIterator - { - let v = black_box((0..1024).collect_vec()); - } - v.iter().unique_by(|x| *x % 50) - } - take_while_inclusive { - { - let v = black_box((0..1024).collect_vec()); - } - v.iter().take_while_inclusive(|x| **x < 1000) - } - pad_using { - DoubleEndedIterator - ExactSizeIterator - { - let v = black_box((0..1024).collect_vec()); - } - v.iter().copied().pad_using(2048, |i| 5 * i) - } - positions { - DoubleEndedIterator - { - let v = black_box((0..1024).collect_vec()); - } - v.iter().positions(|x| x % 5 == 0) - } - update { - DoubleEndedIterator - ExactSizeIterator - { - let v = black_box((0_i32..1024).collect_vec()); - } - v.iter().copied().update(|x| *x *= 7) - } - tuple_combinations1 { - { - let v = black_box(vec![0; 1024]); - } - v.iter().tuple_combinations::<(_,)>() - } - tuple_combinations2 { - { - let v = black_box(vec![0; 64]); - } - v.iter().tuple_combinations::<(_, _)>() - } - tuple_combinations3 { - { - let v = black_box(vec![0; 64]); - } - v.iter().tuple_combinations::<(_, _, _)>() - } - tuple_combinations4 { - { - let v = black_box(vec![0; 64]); - } - v.iter().tuple_combinations::<(_, _, _, _)>() - } - intersperse { - { - let v = black_box(vec![0; 1024]); - let n = black_box(0); - } - v.iter().intersperse(&n) - } - intersperse_with { - { - let v = black_box(vec![0; 1024]); - let n = black_box(0); - } - v.iter().intersperse_with(|| &n) - } - combinations1 { - { - let v = black_box(vec![0; 1792]); - } - v.iter().combinations(1) - } - combinations2 { - { - let v = black_box(vec![0; 60]); - } - v.iter().combinations(2) - } - combinations3 { - { - let v = black_box(vec![0; 23]); - } - v.iter().combinations(3) - } - combinations4 { - { - let v = black_box(vec![0; 16]); - } - v.iter().combinations(4) - } - combinations_with_replacement1 { - { - let v = black_box(vec![0; 4096]); - } - v.iter().combinations_with_replacement(1) - } - combinations_with_replacement2 { - { - let v = black_box(vec![0; 90]); - } - v.iter().combinations_with_replacement(2) - } - combinations_with_replacement3 { - { - let v = black_box(vec![0; 28]); - } - v.iter().combinations_with_replacement(3) - } - combinations_with_replacement4 { - { - let v = black_box(vec![0; 16]); - } - v.iter().combinations_with_replacement(4) - } - permutations1 { - { - let v = black_box(vec![0; 1024]); - } - v.iter().permutations(1) - } - permutations2 { - { - let v = black_box(vec![0; 36]); - } - v.iter().permutations(2) - } - permutations3 { - { - let v = black_box(vec![0; 12]); - } - v.iter().permutations(3) - } - permutations4 { - { - let v = black_box(vec![0; 8]); - } - v.iter().permutations(4) - } - powerset { - { - let v = black_box(vec![0; 10]); - } - v.iter().powerset() - } - while_some { - {} - (0..) - .map(black_box) - .map(|i| char::from_digit(i, 16)) - .while_some() - } - with_position { - ExactSizeIterator - { - let v = black_box((0..10240).collect_vec()); - } - v.iter().with_position() - } - zip_longest { - DoubleEndedIterator - ExactSizeIterator - { - let xs = black_box(vec![0; 1024]); - let ys = black_box(vec![0; 768]); - } - xs.iter().zip_longest(ys.iter()) - } - zip_eq { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - } - v.iter().zip_eq(v.iter().rev()) - } - multizip { - DoubleEndedIterator - ExactSizeIterator - { - let v1 = black_box(vec![0; 1024]); - let v2 = black_box(vec![0; 768]); - let v3 = black_box(vec![0; 2048]); - } - itertools::multizip((&v1, &v2, &v3)) - } - izip { - DoubleEndedIterator - ExactSizeIterator - { - let v1 = black_box(vec![0; 1024]); - let v2 = black_box(vec![0; 768]); - let v3 = black_box(vec![0; 2048]); - } - itertools::izip!(&v1, &v2, &v3) - } - put_back { - { - let v = black_box(vec![0; 1024]); - } - itertools::put_back(&v).with_value(black_box(&0)) - } - put_back_n { - { - let v1 = black_box(vec![0; 1024]); - let v2 = black_box(vec![0; 16]); - } - { - let mut it = itertools::put_back_n(&v1); - for n in &v2 { - it.put_back(n); - } - it - } - } - exactly_one_error { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - } - // Use `at_most_one` would be similar. - v.iter().exactly_one().unwrap_err() - } - multipeek { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - let n = black_box(16); - } - { - let mut it = v.iter().multipeek(); - for _ in 0..n { - it.peek(); - } - it - } - } - peek_nth { - ExactSizeIterator - { - let v = black_box(vec![0; 1024]); - let n = black_box(16); - } - { - let mut it = itertools::peek_nth(&v); - it.peek_nth(n); - it - } - } - repeat_n { - DoubleEndedIterator - ExactSizeIterator - {} - itertools::repeat_n(black_box(0), black_box(1024)) - } - merge { - { - let v1 = black_box((0..1024).collect_vec()); - let v2 = black_box((0..768).collect_vec()); - } - v1.iter().merge(&v2) - } - merge_by { - { - let v1 = black_box((0..1024).collect_vec()); - let v2 = black_box((0..768).collect_vec()); - } - v1.iter().merge_by(&v2, PartialOrd::ge) - } - merge_join_by_ordering { - { - let v1 = black_box((0..1024).collect_vec()); - let v2 = black_box((0..768).collect_vec()); - } - v1.iter().merge_join_by(&v2, Ord::cmp) - } - merge_join_by_bool { - { - let v1 = black_box((0..1024).collect_vec()); - let v2 = black_box((0..768).collect_vec()); - } - v1.iter().merge_join_by(&v2, PartialOrd::ge) - } - kmerge { - { - let vs = black_box(vec![vec![0; 1024], vec![0; 256], vec![0; 768]]); - } - vs.iter().kmerge() - } - kmerge_by { - { - let vs = black_box(vec![vec![0; 1024], vec![0; 256], vec![0; 768]]); - } - vs.iter().kmerge_by(PartialOrd::ge) - } - map_into { - DoubleEndedIterator - ExactSizeIterator - { - let v = black_box(vec![0_u8; 1024]); - } - v.iter().copied().map_into::() - } - map_ok { - DoubleEndedIterator - ExactSizeIterator - { - let v = black_box((0_u32..1024) - .map(|x| if x % 2 == 1 { Err(x) } else { Ok(x) }) - .collect_vec()); - } - v.iter().copied().map_ok(|x| x + 1) - } - filter_ok { - { - let v = black_box((0_u32..1024) - .map(|x| if x % 2 == 1 { Err(x) } else { Ok(x) }) - .collect_vec()); - } - v.iter().copied().filter_ok(|x| x % 3 == 0) - } - filter_map_ok { - { - let v = black_box((0_u32..1024) - .map(|x| if x % 2 == 1 { Err(x) } else { Ok(x) }) - .collect_vec()); - } - v.iter().copied().filter_map_ok(|x| if x % 3 == 0 { Some(x + 1) } else { None }) - } - flatten_ok { - DoubleEndedIterator - { - let d = black_box(vec![0; 8]); - let v = black_box((0..512) - .map(|x| if x % 2 == 0 { Ok(&d) } else { Err(x) }) - .collect_vec()); - } - v.iter().copied().flatten_ok() - } -} diff --git a/vendor/itertools/benches/tree_reduce.rs b/vendor/itertools/benches/tree_reduce.rs deleted file mode 100644 index 051b148834815e..00000000000000 --- a/vendor/itertools/benches/tree_reduce.rs +++ /dev/null @@ -1,150 +0,0 @@ -#![allow(deprecated)] - -use criterion::{criterion_group, criterion_main, Criterion}; -use itertools::{cloned, Itertools}; - -trait IterEx: Iterator { - // Another efficient implementation against which to compare, - // but needs `std` so is less desirable. - fn tree_reduce_vec(self, mut f: F) -> Option - where - F: FnMut(Self::Item, Self::Item) -> Self::Item, - Self: Sized, - { - let hint = self.size_hint().0; - let cap = std::mem::size_of::() * 8 - hint.leading_zeros() as usize; - let mut stack = Vec::with_capacity(cap); - self.enumerate().for_each(|(mut i, mut x)| { - while (i & 1) != 0 { - x = f(stack.pop().unwrap(), x); - i >>= 1; - } - stack.push(x); - }); - stack.into_iter().fold1(f) - } -} -impl IterEx for T {} - -macro_rules! def_benchs { - ($N:expr, - $FUN:ident, - $BENCH_NAME:ident, - ) => { - mod $BENCH_NAME { - use super::*; - - pub fn sum(c: &mut Criterion) { - let v: Vec = (0..$N).collect(); - - c.bench_function( - &(stringify!($BENCH_NAME).replace('_', " ") + " sum"), - move |b| b.iter(|| cloned(&v).$FUN(|x, y| x + y)), - ); - } - - pub fn complex_iter(c: &mut Criterion) { - let u = (3..).take($N / 2); - let v = (5..).take($N / 2); - let it = u.chain(v); - - c.bench_function( - &(stringify!($BENCH_NAME).replace('_', " ") + " complex iter"), - move |b| b.iter(|| it.clone().map(|x| x as f32).$FUN(f32::atan2)), - ); - } - - pub fn string_format(c: &mut Criterion) { - // This goes quadratic with linear `fold1`, so use a smaller - // size to not waste too much time in travis. The allocations - // in here are so expensive anyway that it'll still take - // way longer per iteration than the other two benchmarks. - let v: Vec = (0..($N / 4)).collect(); - - c.bench_function( - &(stringify!($BENCH_NAME).replace('_', " ") + " string format"), - move |b| { - b.iter(|| { - cloned(&v) - .map(|x| x.to_string()) - .$FUN(|x, y| format!("{} + {}", x, y)) - }) - }, - ); - } - } - - criterion_group!( - $BENCH_NAME, - $BENCH_NAME::sum, - $BENCH_NAME::complex_iter, - $BENCH_NAME::string_format, - ); - }; -} - -def_benchs! { - 10_000, - fold1, - fold1_10k, -} - -def_benchs! { - 10_000, - tree_reduce, - tree_reduce_stack_10k, -} - -def_benchs! { - 10_000, - tree_reduce_vec, - tree_reduce_vec_10k, -} - -def_benchs! { - 100, - fold1, - fold1_100, -} - -def_benchs! { - 100, - tree_reduce, - tree_reduce_stack_100, -} - -def_benchs! { - 100, - tree_reduce_vec, - tree_reduce_vec_100, -} - -def_benchs! { - 8, - fold1, - fold1_08, -} - -def_benchs! { - 8, - tree_reduce, - tree_reduce_stack_08, -} - -def_benchs! { - 8, - tree_reduce_vec, - tree_reduce_vec_08, -} - -criterion_main!( - fold1_10k, - tree_reduce_stack_10k, - tree_reduce_vec_10k, - fold1_100, - tree_reduce_stack_100, - tree_reduce_vec_100, - fold1_08, - tree_reduce_stack_08, - tree_reduce_vec_08, -); diff --git a/vendor/itertools/benches/tuple_combinations.rs b/vendor/itertools/benches/tuple_combinations.rs deleted file mode 100644 index 4e26b282e853f7..00000000000000 --- a/vendor/itertools/benches/tuple_combinations.rs +++ /dev/null @@ -1,113 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use itertools::Itertools; - -// approximate 100_000 iterations for each combination -const N1: usize = 100_000; -const N2: usize = 448; -const N3: usize = 86; -const N4: usize = 41; - -fn tuple_comb_for1(c: &mut Criterion) { - c.bench_function("tuple comb for1", move |b| { - b.iter(|| { - for i in 0..N1 { - black_box(i); - } - }) - }); -} - -fn tuple_comb_for2(c: &mut Criterion) { - c.bench_function("tuple comb for2", move |b| { - b.iter(|| { - for i in 0..N2 { - for j in (i + 1)..N2 { - black_box(i + j); - } - } - }) - }); -} - -fn tuple_comb_for3(c: &mut Criterion) { - c.bench_function("tuple comb for3", move |b| { - b.iter(|| { - for i in 0..N3 { - for j in (i + 1)..N3 { - for k in (j + 1)..N3 { - black_box(i + j + k); - } - } - } - }) - }); -} - -fn tuple_comb_for4(c: &mut Criterion) { - c.bench_function("tuple comb for4", move |b| { - b.iter(|| { - for i in 0..N4 { - for j in (i + 1)..N4 { - for k in (j + 1)..N4 { - for l in (k + 1)..N4 { - black_box(i + j + k + l); - } - } - } - } - }) - }); -} - -fn tuple_comb_c1(c: &mut Criterion) { - c.bench_function("tuple comb c1", move |b| { - b.iter(|| { - for (i,) in (0..N1).tuple_combinations() { - black_box(i); - } - }) - }); -} - -fn tuple_comb_c2(c: &mut Criterion) { - c.bench_function("tuple comb c2", move |b| { - b.iter(|| { - for (i, j) in (0..N2).tuple_combinations() { - black_box(i + j); - } - }) - }); -} - -fn tuple_comb_c3(c: &mut Criterion) { - c.bench_function("tuple comb c3", move |b| { - b.iter(|| { - for (i, j, k) in (0..N3).tuple_combinations() { - black_box(i + j + k); - } - }) - }); -} - -fn tuple_comb_c4(c: &mut Criterion) { - c.bench_function("tuple comb c4", move |b| { - b.iter(|| { - for (i, j, k, l) in (0..N4).tuple_combinations() { - black_box(i + j + k + l); - } - }) - }); -} - -criterion_group!( - benches, - tuple_comb_for1, - tuple_comb_for2, - tuple_comb_for3, - tuple_comb_for4, - tuple_comb_c1, - tuple_comb_c2, - tuple_comb_c3, - tuple_comb_c4, -); -criterion_main!(benches); diff --git a/vendor/itertools/benches/tuples.rs b/vendor/itertools/benches/tuples.rs deleted file mode 100644 index 2eca34712ad56e..00000000000000 --- a/vendor/itertools/benches/tuples.rs +++ /dev/null @@ -1,208 +0,0 @@ -use criterion::{criterion_group, criterion_main, Criterion}; -use itertools::Itertools; - -fn s1(a: u32) -> u32 { - a -} - -fn s2(a: u32, b: u32) -> u32 { - a + b -} - -fn s3(a: u32, b: u32, c: u32) -> u32 { - a + b + c -} - -fn s4(a: u32, b: u32, c: u32, d: u32) -> u32 { - a + b + c + d -} - -fn sum_s1(s: &[u32]) -> u32 { - s1(s[0]) -} - -fn sum_s2(s: &[u32]) -> u32 { - s2(s[0], s[1]) -} - -fn sum_s3(s: &[u32]) -> u32 { - s3(s[0], s[1], s[2]) -} - -fn sum_s4(s: &[u32]) -> u32 { - s4(s[0], s[1], s[2], s[3]) -} - -fn sum_t1(s: &(&u32,)) -> u32 { - s1(*s.0) -} - -fn sum_t2(s: &(&u32, &u32)) -> u32 { - s2(*s.0, *s.1) -} - -fn sum_t3(s: &(&u32, &u32, &u32)) -> u32 { - s3(*s.0, *s.1, *s.2) -} - -fn sum_t4(s: &(&u32, &u32, &u32, &u32)) -> u32 { - s4(*s.0, *s.1, *s.2, *s.3) -} - -macro_rules! def_benchs { - ($N:expr; - $BENCH_GROUP:ident, - $TUPLE_FUN:ident, - $TUPLES:ident, - $TUPLE_WINDOWS:ident; - $SLICE_FUN:ident, - $CHUNKS:ident, - $WINDOWS:ident; - $FOR_CHUNKS:ident, - $FOR_WINDOWS:ident - ) => { - fn $FOR_CHUNKS(c: &mut Criterion) { - let v: Vec = (0..$N * 1_000).collect(); - let mut s = 0; - c.bench_function(&stringify!($FOR_CHUNKS).replace('_', " "), move |b| { - b.iter(|| { - let mut j = 0; - for _ in 0..1_000 { - s += $SLICE_FUN(&v[j..(j + $N)]); - j += $N; - } - s - }) - }); - } - - fn $FOR_WINDOWS(c: &mut Criterion) { - let v: Vec = (0..1_000).collect(); - let mut s = 0; - c.bench_function(&stringify!($FOR_WINDOWS).replace('_', " "), move |b| { - b.iter(|| { - for i in 0..(1_000 - $N) { - s += $SLICE_FUN(&v[i..(i + $N)]); - } - s - }) - }); - } - - fn $TUPLES(c: &mut Criterion) { - let v: Vec = (0..$N * 1_000).collect(); - let mut s = 0; - c.bench_function(&stringify!($TUPLES).replace('_', " "), move |b| { - b.iter(|| { - for x in v.iter().tuples() { - s += $TUPLE_FUN(&x); - } - s - }) - }); - } - - fn $CHUNKS(c: &mut Criterion) { - let v: Vec = (0..$N * 1_000).collect(); - let mut s = 0; - c.bench_function(&stringify!($CHUNKS).replace('_', " "), move |b| { - b.iter(|| { - for x in v.chunks($N) { - s += $SLICE_FUN(x); - } - s - }) - }); - } - - fn $TUPLE_WINDOWS(c: &mut Criterion) { - let v: Vec = (0..1_000).collect(); - let mut s = 0; - c.bench_function(&stringify!($TUPLE_WINDOWS).replace('_', " "), move |b| { - b.iter(|| { - for x in v.iter().tuple_windows() { - s += $TUPLE_FUN(&x); - } - s - }) - }); - } - - fn $WINDOWS(c: &mut Criterion) { - let v: Vec = (0..1_000).collect(); - let mut s = 0; - c.bench_function(&stringify!($WINDOWS).replace('_', " "), move |b| { - b.iter(|| { - for x in v.windows($N) { - s += $SLICE_FUN(x); - } - s - }) - }); - } - - criterion_group!( - $BENCH_GROUP, - $FOR_CHUNKS, - $FOR_WINDOWS, - $TUPLES, - $CHUNKS, - $TUPLE_WINDOWS, - $WINDOWS, - ); - }; -} - -def_benchs! { - 1; - benches_1, - sum_t1, - tuple_chunks_1, - tuple_windows_1; - sum_s1, - slice_chunks_1, - slice_windows_1; - for_chunks_1, - for_windows_1 -} - -def_benchs! { - 2; - benches_2, - sum_t2, - tuple_chunks_2, - tuple_windows_2; - sum_s2, - slice_chunks_2, - slice_windows_2; - for_chunks_2, - for_windows_2 -} - -def_benchs! { - 3; - benches_3, - sum_t3, - tuple_chunks_3, - tuple_windows_3; - sum_s3, - slice_chunks_3, - slice_windows_3; - for_chunks_3, - for_windows_3 -} - -def_benchs! { - 4; - benches_4, - sum_t4, - tuple_chunks_4, - tuple_windows_4; - sum_s4, - slice_chunks_4, - slice_windows_4; - for_chunks_4, - for_windows_4 -} - -criterion_main!(benches_1, benches_2, benches_3, benches_4,); diff --git a/vendor/itertools/examples/iris.data b/vendor/itertools/examples/iris.data deleted file mode 100644 index a3490e0e07dc9d..00000000000000 --- a/vendor/itertools/examples/iris.data +++ /dev/null @@ -1,150 +0,0 @@ -5.1,3.5,1.4,0.2,Iris-setosa -4.9,3.0,1.4,0.2,Iris-setosa -4.7,3.2,1.3,0.2,Iris-setosa -4.6,3.1,1.5,0.2,Iris-setosa -5.0,3.6,1.4,0.2,Iris-setosa -5.4,3.9,1.7,0.4,Iris-setosa -4.6,3.4,1.4,0.3,Iris-setosa -5.0,3.4,1.5,0.2,Iris-setosa -4.4,2.9,1.4,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -5.4,3.7,1.5,0.2,Iris-setosa -4.8,3.4,1.6,0.2,Iris-setosa -4.8,3.0,1.4,0.1,Iris-setosa -4.3,3.0,1.1,0.1,Iris-setosa -5.8,4.0,1.2,0.2,Iris-setosa -5.7,4.4,1.5,0.4,Iris-setosa -5.4,3.9,1.3,0.4,Iris-setosa -5.1,3.5,1.4,0.3,Iris-setosa -5.7,3.8,1.7,0.3,Iris-setosa -5.1,3.8,1.5,0.3,Iris-setosa -5.4,3.4,1.7,0.2,Iris-setosa -5.1,3.7,1.5,0.4,Iris-setosa -4.6,3.6,1.0,0.2,Iris-setosa -5.1,3.3,1.7,0.5,Iris-setosa -4.8,3.4,1.9,0.2,Iris-setosa -5.0,3.0,1.6,0.2,Iris-setosa -5.0,3.4,1.6,0.4,Iris-setosa -5.2,3.5,1.5,0.2,Iris-setosa -5.2,3.4,1.4,0.2,Iris-setosa -4.7,3.2,1.6,0.2,Iris-setosa -4.8,3.1,1.6,0.2,Iris-setosa -5.4,3.4,1.5,0.4,Iris-setosa -5.2,4.1,1.5,0.1,Iris-setosa -5.5,4.2,1.4,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -5.0,3.2,1.2,0.2,Iris-setosa -5.5,3.5,1.3,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -4.4,3.0,1.3,0.2,Iris-setosa -5.1,3.4,1.5,0.2,Iris-setosa -5.0,3.5,1.3,0.3,Iris-setosa -4.5,2.3,1.3,0.3,Iris-setosa -4.4,3.2,1.3,0.2,Iris-setosa -5.0,3.5,1.6,0.6,Iris-setosa -5.1,3.8,1.9,0.4,Iris-setosa -4.8,3.0,1.4,0.3,Iris-setosa -5.1,3.8,1.6,0.2,Iris-setosa -4.6,3.2,1.4,0.2,Iris-setosa -5.3,3.7,1.5,0.2,Iris-setosa -5.0,3.3,1.4,0.2,Iris-setosa -7.0,3.2,4.7,1.4,Iris-versicolor -6.4,3.2,4.5,1.5,Iris-versicolor -6.9,3.1,4.9,1.5,Iris-versicolor -5.5,2.3,4.0,1.3,Iris-versicolor -6.5,2.8,4.6,1.5,Iris-versicolor -5.7,2.8,4.5,1.3,Iris-versicolor -6.3,3.3,4.7,1.6,Iris-versicolor -4.9,2.4,3.3,1.0,Iris-versicolor -6.6,2.9,4.6,1.3,Iris-versicolor -5.2,2.7,3.9,1.4,Iris-versicolor -5.0,2.0,3.5,1.0,Iris-versicolor -5.9,3.0,4.2,1.5,Iris-versicolor -6.0,2.2,4.0,1.0,Iris-versicolor -6.1,2.9,4.7,1.4,Iris-versicolor -5.6,2.9,3.6,1.3,Iris-versicolor -6.7,3.1,4.4,1.4,Iris-versicolor -5.6,3.0,4.5,1.5,Iris-versicolor -5.8,2.7,4.1,1.0,Iris-versicolor -6.2,2.2,4.5,1.5,Iris-versicolor -5.6,2.5,3.9,1.1,Iris-versicolor -5.9,3.2,4.8,1.8,Iris-versicolor -6.1,2.8,4.0,1.3,Iris-versicolor -6.3,2.5,4.9,1.5,Iris-versicolor -6.1,2.8,4.7,1.2,Iris-versicolor -6.4,2.9,4.3,1.3,Iris-versicolor -6.6,3.0,4.4,1.4,Iris-versicolor -6.8,2.8,4.8,1.4,Iris-versicolor -6.7,3.0,5.0,1.7,Iris-versicolor -6.0,2.9,4.5,1.5,Iris-versicolor -5.7,2.6,3.5,1.0,Iris-versicolor -5.5,2.4,3.8,1.1,Iris-versicolor -5.5,2.4,3.7,1.0,Iris-versicolor -5.8,2.7,3.9,1.2,Iris-versicolor -6.0,2.7,5.1,1.6,Iris-versicolor -5.4,3.0,4.5,1.5,Iris-versicolor -6.0,3.4,4.5,1.6,Iris-versicolor -6.7,3.1,4.7,1.5,Iris-versicolor -6.3,2.3,4.4,1.3,Iris-versicolor -5.6,3.0,4.1,1.3,Iris-versicolor -5.5,2.5,4.0,1.3,Iris-versicolor -5.5,2.6,4.4,1.2,Iris-versicolor -6.1,3.0,4.6,1.4,Iris-versicolor -5.8,2.6,4.0,1.2,Iris-versicolor -5.0,2.3,3.3,1.0,Iris-versicolor -5.6,2.7,4.2,1.3,Iris-versicolor -5.7,3.0,4.2,1.2,Iris-versicolor -5.7,2.9,4.2,1.3,Iris-versicolor -6.2,2.9,4.3,1.3,Iris-versicolor -5.1,2.5,3.0,1.1,Iris-versicolor -5.7,2.8,4.1,1.3,Iris-versicolor -6.3,3.3,6.0,2.5,Iris-virginica -5.8,2.7,5.1,1.9,Iris-virginica -7.1,3.0,5.9,2.1,Iris-virginica -6.3,2.9,5.6,1.8,Iris-virginica -6.5,3.0,5.8,2.2,Iris-virginica -7.6,3.0,6.6,2.1,Iris-virginica -4.9,2.5,4.5,1.7,Iris-virginica -7.3,2.9,6.3,1.8,Iris-virginica -6.7,2.5,5.8,1.8,Iris-virginica -7.2,3.6,6.1,2.5,Iris-virginica -6.5,3.2,5.1,2.0,Iris-virginica -6.4,2.7,5.3,1.9,Iris-virginica -6.8,3.0,5.5,2.1,Iris-virginica -5.7,2.5,5.0,2.0,Iris-virginica -5.8,2.8,5.1,2.4,Iris-virginica -6.4,3.2,5.3,2.3,Iris-virginica -6.5,3.0,5.5,1.8,Iris-virginica -7.7,3.8,6.7,2.2,Iris-virginica -7.7,2.6,6.9,2.3,Iris-virginica -6.0,2.2,5.0,1.5,Iris-virginica -6.9,3.2,5.7,2.3,Iris-virginica -5.6,2.8,4.9,2.0,Iris-virginica -7.7,2.8,6.7,2.0,Iris-virginica -6.3,2.7,4.9,1.8,Iris-virginica -6.7,3.3,5.7,2.1,Iris-virginica -7.2,3.2,6.0,1.8,Iris-virginica -6.2,2.8,4.8,1.8,Iris-virginica -6.1,3.0,4.9,1.8,Iris-virginica -6.4,2.8,5.6,2.1,Iris-virginica -7.2,3.0,5.8,1.6,Iris-virginica -7.4,2.8,6.1,1.9,Iris-virginica -7.9,3.8,6.4,2.0,Iris-virginica -6.4,2.8,5.6,2.2,Iris-virginica -6.3,2.8,5.1,1.5,Iris-virginica -6.1,2.6,5.6,1.4,Iris-virginica -7.7,3.0,6.1,2.3,Iris-virginica -6.3,3.4,5.6,2.4,Iris-virginica -6.4,3.1,5.5,1.8,Iris-virginica -6.0,3.0,4.8,1.8,Iris-virginica -6.9,3.1,5.4,2.1,Iris-virginica -6.7,3.1,5.6,2.4,Iris-virginica -6.9,3.1,5.1,2.3,Iris-virginica -5.8,2.7,5.1,1.9,Iris-virginica -6.8,3.2,5.9,2.3,Iris-virginica -6.7,3.3,5.7,2.5,Iris-virginica -6.7,3.0,5.2,2.3,Iris-virginica -6.3,2.5,5.0,1.9,Iris-virginica -6.5,3.0,5.2,2.0,Iris-virginica -6.2,3.4,5.4,2.3,Iris-virginica -5.9,3.0,5.1,1.8,Iris-virginica diff --git a/vendor/itertools/examples/iris.rs b/vendor/itertools/examples/iris.rs deleted file mode 100644 index 63f9c48326041d..00000000000000 --- a/vendor/itertools/examples/iris.rs +++ /dev/null @@ -1,140 +0,0 @@ -/// -/// This example parses, sorts and groups the iris dataset -/// and does some simple manipulations. -/// -/// Iterators and itertools functionality are used throughout. -use itertools::Itertools; -use std::collections::HashMap; -use std::iter::repeat; -use std::num::ParseFloatError; -use std::str::FromStr; - -static DATA: &str = include_str!("iris.data"); - -#[derive(Clone, Debug)] -struct Iris { - name: String, - data: [f32; 4], -} - -#[allow(dead_code)] // fields are currently ignored -#[derive(Clone, Debug)] -enum ParseError { - Numeric(ParseFloatError), - Other(&'static str), -} - -impl From for ParseError { - fn from(err: ParseFloatError) -> Self { - Self::Numeric(err) - } -} - -/// Parse an Iris from a comma-separated line -impl FromStr for Iris { - type Err = ParseError; - - fn from_str(s: &str) -> Result { - let mut iris = Self { - name: "".into(), - data: [0.; 4], - }; - let mut parts = s.split(',').map(str::trim); - - // using Iterator::by_ref() - for (index, part) in parts.by_ref().take(4).enumerate() { - iris.data[index] = part.parse::()?; - } - if let Some(name) = parts.next() { - iris.name = name.into(); - } else { - return Err(ParseError::Other("Missing name")); - } - Ok(iris) - } -} - -fn main() { - // using Itertools::fold_results to create the result of parsing - let irises = DATA - .lines() - .map(str::parse) - .fold_ok(Vec::new(), |mut v, iris: Iris| { - v.push(iris); - v - }); - let mut irises = match irises { - Err(e) => { - println!("Error parsing: {:?}", e); - std::process::exit(1); - } - Ok(data) => data, - }; - - // Sort them and group them - irises.sort_by(|a, b| Ord::cmp(&a.name, &b.name)); - - // using Iterator::cycle() - let mut plot_symbols = "+ox".chars().cycle(); - let mut symbolmap = HashMap::new(); - - // using Itertools::chunk_by - for (species, species_chunk) in &irises.iter().chunk_by(|iris| &iris.name) { - // assign a plot symbol - symbolmap - .entry(species) - .or_insert_with(|| plot_symbols.next().unwrap()); - println!("{} (symbol={})", species, symbolmap[species]); - - for iris in species_chunk { - // using Itertools::format for lazy formatting - println!("{:>3.1}", iris.data.iter().format(", ")); - } - } - - // Look at all combinations of the four columns - // - // See https://en.wikipedia.org/wiki/Iris_flower_data_set - // - let n = 30; // plot size - let mut plot = vec![' '; n * n]; - - // using Itertools::tuple_combinations - for (a, b) in (0..4).tuple_combinations() { - println!("Column {} vs {}:", a, b); - - // Clear plot - // - // using std::iter::repeat; - // using Itertools::set_from - plot.iter_mut().set_from(repeat(' ')); - - // using Itertools::minmax - let min_max = |data: &[Iris], col| { - data.iter() - .map(|iris| iris.data[col]) - .minmax() - .into_option() - .expect("Can't find min/max of empty iterator") - }; - let (min_x, max_x) = min_max(&irises, a); - let (min_y, max_y) = min_max(&irises, b); - - // Plot the data points - let round_to_grid = |x, min, max| ((x - min) / (max - min) * ((n - 1) as f32)) as usize; - let flip = |ix| n - 1 - ix; // reverse axis direction - - for iris in &irises { - let ix = round_to_grid(iris.data[a], min_x, max_x); - let iy = flip(round_to_grid(iris.data[b], min_y, max_y)); - plot[n * iy + ix] = symbolmap[&iris.name]; - } - - // render plot - // - // using Itertools::join - for line in plot.chunks(n) { - println!("{}", line.iter().join(" ")) - } - } -} diff --git a/vendor/itertools/src/adaptors/coalesce.rs b/vendor/itertools/src/adaptors/coalesce.rs deleted file mode 100644 index ab1ab5255dd62e..00000000000000 --- a/vendor/itertools/src/adaptors/coalesce.rs +++ /dev/null @@ -1,286 +0,0 @@ -use std::fmt; -use std::iter::FusedIterator; - -use crate::size_hint; - -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct CoalesceBy -where - I: Iterator, - C: CountItem, -{ - iter: I, - /// `last` is `None` while no item have been taken out of `iter` (at definition). - /// Then `last` will be `Some(Some(item))` until `iter` is exhausted, - /// in which case `last` will be `Some(None)`. - last: Option>, - f: F, -} - -impl Clone for CoalesceBy -where - I: Clone + Iterator, - F: Clone, - C: CountItem, - C::CItem: Clone, -{ - clone_fields!(last, iter, f); -} - -impl fmt::Debug for CoalesceBy -where - I: Iterator + fmt::Debug, - C: CountItem, - C::CItem: fmt::Debug, -{ - debug_fmt_fields!(CoalesceBy, iter, last); -} - -pub trait CoalescePredicate { - fn coalesce_pair(&mut self, t: T, item: Item) -> Result; -} - -impl Iterator for CoalesceBy -where - I: Iterator, - F: CoalescePredicate, - C: CountItem, -{ - type Item = C::CItem; - - fn next(&mut self) -> Option { - let Self { iter, last, f } = self; - // this fuses the iterator - let init = match last { - Some(elt) => elt.take(), - None => { - *last = Some(None); - iter.next().map(C::new) - } - }?; - - Some( - iter.try_fold(init, |accum, next| match f.coalesce_pair(accum, next) { - Ok(joined) => Ok(joined), - Err((last_, next_)) => { - *last = Some(Some(next_)); - Err(last_) - } - }) - .unwrap_or_else(|x| x), - ) - } - - fn size_hint(&self) -> (usize, Option) { - let (low, hi) = size_hint::add_scalar( - self.iter.size_hint(), - matches!(self.last, Some(Some(_))) as usize, - ); - ((low > 0) as usize, hi) - } - - fn fold(self, acc: Acc, mut fn_acc: FnAcc) -> Acc - where - FnAcc: FnMut(Acc, Self::Item) -> Acc, - { - let Self { - mut iter, - last, - mut f, - } = self; - if let Some(last) = last.unwrap_or_else(|| iter.next().map(C::new)) { - let (last, acc) = iter.fold((last, acc), |(last, acc), elt| { - match f.coalesce_pair(last, elt) { - Ok(joined) => (joined, acc), - Err((last_, next_)) => (next_, fn_acc(acc, last_)), - } - }); - fn_acc(acc, last) - } else { - acc - } - } -} - -impl FusedIterator for CoalesceBy -where - I: Iterator, - F: CoalescePredicate, - C: CountItem, -{ -} - -pub struct NoCount; - -pub struct WithCount; - -pub trait CountItem { - type CItem; - fn new(t: T) -> Self::CItem; -} - -impl CountItem for NoCount { - type CItem = T; - #[inline(always)] - fn new(t: T) -> T { - t - } -} - -impl CountItem for WithCount { - type CItem = (usize, T); - #[inline(always)] - fn new(t: T) -> (usize, T) { - (1, t) - } -} - -/// An iterator adaptor that may join together adjacent elements. -/// -/// See [`.coalesce()`](crate::Itertools::coalesce) for more information. -pub type Coalesce = CoalesceBy; - -impl CoalescePredicate for F -where - F: FnMut(T, Item) -> Result, -{ - fn coalesce_pair(&mut self, t: T, item: Item) -> Result { - self(t, item) - } -} - -/// Create a new `Coalesce`. -pub fn coalesce(iter: I, f: F) -> Coalesce -where - I: Iterator, -{ - Coalesce { - last: None, - iter, - f, - } -} - -/// An iterator adaptor that removes repeated duplicates, determining equality using a comparison function. -/// -/// See [`.dedup_by()`](crate::Itertools::dedup_by) or [`.dedup()`](crate::Itertools::dedup) for more information. -pub type DedupBy = CoalesceBy, NoCount>; - -#[derive(Clone)] -pub struct DedupPred2CoalescePred(DP); - -impl fmt::Debug for DedupPred2CoalescePred { - debug_fmt_fields!(DedupPred2CoalescePred,); -} - -pub trait DedupPredicate { - // TODO replace by Fn(&T, &T)->bool once Rust supports it - fn dedup_pair(&mut self, a: &T, b: &T) -> bool; -} - -impl CoalescePredicate for DedupPred2CoalescePred -where - DP: DedupPredicate, -{ - fn coalesce_pair(&mut self, t: T, item: T) -> Result { - if self.0.dedup_pair(&t, &item) { - Ok(t) - } else { - Err((t, item)) - } - } -} - -#[derive(Clone, Debug)] -pub struct DedupEq; - -impl DedupPredicate for DedupEq { - fn dedup_pair(&mut self, a: &T, b: &T) -> bool { - a == b - } -} - -impl bool> DedupPredicate for F { - fn dedup_pair(&mut self, a: &T, b: &T) -> bool { - self(a, b) - } -} - -/// Create a new `DedupBy`. -pub fn dedup_by(iter: I, dedup_pred: Pred) -> DedupBy -where - I: Iterator, -{ - DedupBy { - last: None, - iter, - f: DedupPred2CoalescePred(dedup_pred), - } -} - -/// An iterator adaptor that removes repeated duplicates. -/// -/// See [`.dedup()`](crate::Itertools::dedup) for more information. -pub type Dedup = DedupBy; - -/// Create a new `Dedup`. -pub fn dedup(iter: I) -> Dedup -where - I: Iterator, -{ - dedup_by(iter, DedupEq) -} - -/// An iterator adaptor that removes repeated duplicates, while keeping a count of how many -/// repeated elements were present. This will determine equality using a comparison function. -/// -/// See [`.dedup_by_with_count()`](crate::Itertools::dedup_by_with_count) or -/// [`.dedup_with_count()`](crate::Itertools::dedup_with_count) for more information. -pub type DedupByWithCount = - CoalesceBy, WithCount>; - -#[derive(Clone, Debug)] -pub struct DedupPredWithCount2CoalescePred(DP); - -impl CoalescePredicate for DedupPredWithCount2CoalescePred -where - DP: DedupPredicate, -{ - fn coalesce_pair( - &mut self, - (c, t): (usize, T), - item: T, - ) -> Result<(usize, T), ((usize, T), (usize, T))> { - if self.0.dedup_pair(&t, &item) { - Ok((c + 1, t)) - } else { - Err(((c, t), (1, item))) - } - } -} - -/// An iterator adaptor that removes repeated duplicates, while keeping a count of how many -/// repeated elements were present. -/// -/// See [`.dedup_with_count()`](crate::Itertools::dedup_with_count) for more information. -pub type DedupWithCount = DedupByWithCount; - -/// Create a new `DedupByWithCount`. -pub fn dedup_by_with_count(iter: I, dedup_pred: Pred) -> DedupByWithCount -where - I: Iterator, -{ - DedupByWithCount { - last: None, - iter, - f: DedupPredWithCount2CoalescePred(dedup_pred), - } -} - -/// Create a new `DedupWithCount`. -pub fn dedup_with_count(iter: I) -> DedupWithCount -where - I: Iterator, -{ - dedup_by_with_count(iter, DedupEq) -} diff --git a/vendor/itertools/src/adaptors/map.rs b/vendor/itertools/src/adaptors/map.rs deleted file mode 100644 index c78b9be698035e..00000000000000 --- a/vendor/itertools/src/adaptors/map.rs +++ /dev/null @@ -1,130 +0,0 @@ -use std::iter::FromIterator; -use std::marker::PhantomData; - -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct MapSpecialCase { - pub(crate) iter: I, - pub(crate) f: F, -} - -pub trait MapSpecialCaseFn { - type Out; - fn call(&mut self, t: T) -> Self::Out; -} - -impl Iterator for MapSpecialCase -where - I: Iterator, - R: MapSpecialCaseFn, -{ - type Item = R::Out; - - fn next(&mut self) -> Option { - self.iter.next().map(|i| self.f.call(i)) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - fn fold(self, init: Acc, mut fold_f: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - let mut f = self.f; - self.iter.fold(init, move |acc, v| fold_f(acc, f.call(v))) - } - - fn collect(self) -> C - where - C: FromIterator, - { - let mut f = self.f; - self.iter.map(move |v| f.call(v)).collect() - } -} - -impl DoubleEndedIterator for MapSpecialCase -where - I: DoubleEndedIterator, - R: MapSpecialCaseFn, -{ - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|i| self.f.call(i)) - } -} - -impl ExactSizeIterator for MapSpecialCase -where - I: ExactSizeIterator, - R: MapSpecialCaseFn, -{ -} - -/// An iterator adapter to apply a transformation within a nested `Result::Ok`. -/// -/// See [`.map_ok()`](crate::Itertools::map_ok) for more information. -pub type MapOk = MapSpecialCase>; - -impl MapSpecialCaseFn> for MapSpecialCaseFnOk -where - F: FnMut(T) -> U, -{ - type Out = Result; - fn call(&mut self, t: Result) -> Self::Out { - t.map(|v| self.0(v)) - } -} - -#[derive(Clone)] -pub struct MapSpecialCaseFnOk(F); - -impl std::fmt::Debug for MapSpecialCaseFnOk { - debug_fmt_fields!(MapSpecialCaseFnOk,); -} - -/// Create a new `MapOk` iterator. -pub fn map_ok(iter: I, f: F) -> MapOk -where - I: Iterator>, - F: FnMut(T) -> U, -{ - MapSpecialCase { - iter, - f: MapSpecialCaseFnOk(f), - } -} - -/// An iterator adapter to apply `Into` conversion to each element. -/// -/// See [`.map_into()`](crate::Itertools::map_into) for more information. -pub type MapInto = MapSpecialCase>; - -impl, U> MapSpecialCaseFn for MapSpecialCaseFnInto { - type Out = U; - fn call(&mut self, t: T) -> Self::Out { - t.into() - } -} - -pub struct MapSpecialCaseFnInto(PhantomData); - -impl std::fmt::Debug for MapSpecialCaseFnInto { - debug_fmt_fields!(MapSpecialCaseFnInto, 0); -} - -impl Clone for MapSpecialCaseFnInto { - #[inline] - fn clone(&self) -> Self { - Self(PhantomData) - } -} - -/// Create a new [`MapInto`] iterator. -pub fn map_into(iter: I) -> MapInto { - MapSpecialCase { - iter, - f: MapSpecialCaseFnInto(PhantomData), - } -} diff --git a/vendor/itertools/src/adaptors/mod.rs b/vendor/itertools/src/adaptors/mod.rs deleted file mode 100644 index 52e36c48be4c32..00000000000000 --- a/vendor/itertools/src/adaptors/mod.rs +++ /dev/null @@ -1,1208 +0,0 @@ -//! Licensed under the Apache License, Version 2.0 -//! or the MIT license -//! , at your -//! option. This file may not be copied, modified, or distributed -//! except according to those terms. - -mod coalesce; -pub(crate) mod map; -mod multi_product; -pub use self::coalesce::*; -pub use self::map::{map_into, map_ok, MapInto, MapOk}; -#[cfg(feature = "use_alloc")] -pub use self::multi_product::*; - -use crate::size_hint::{self, SizeHint}; -use std::fmt; -use std::iter::{Enumerate, FromIterator, Fuse, FusedIterator}; -use std::marker::PhantomData; - -/// An iterator adaptor that alternates elements from two iterators until both -/// run out. -/// -/// This iterator is *fused*. -/// -/// See [`.interleave()`](crate::Itertools::interleave) for more information. -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct Interleave { - i: Fuse, - j: Fuse, - next_coming_from_j: bool, -} - -/// Create an iterator that interleaves elements in `i` and `j`. -/// -/// [`IntoIterator`] enabled version of [`Itertools::interleave`](crate::Itertools::interleave). -pub fn interleave( - i: I, - j: J, -) -> Interleave<::IntoIter, ::IntoIter> -where - I: IntoIterator, - J: IntoIterator, -{ - Interleave { - i: i.into_iter().fuse(), - j: j.into_iter().fuse(), - next_coming_from_j: false, - } -} - -impl Iterator for Interleave -where - I: Iterator, - J: Iterator, -{ - type Item = I::Item; - #[inline] - fn next(&mut self) -> Option { - self.next_coming_from_j = !self.next_coming_from_j; - if self.next_coming_from_j { - match self.i.next() { - None => self.j.next(), - r => r, - } - } else { - match self.j.next() { - None => self.i.next(), - r => r, - } - } - } - - fn size_hint(&self) -> (usize, Option) { - size_hint::add(self.i.size_hint(), self.j.size_hint()) - } - - fn fold(self, mut init: B, mut f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - let Self { - mut i, - mut j, - next_coming_from_j, - } = self; - if next_coming_from_j { - match j.next() { - Some(y) => init = f(init, y), - None => return i.fold(init, f), - } - } - let res = i.try_fold(init, |mut acc, x| { - acc = f(acc, x); - match j.next() { - Some(y) => Ok(f(acc, y)), - None => Err(acc), - } - }); - match res { - Ok(acc) => j.fold(acc, f), - Err(acc) => i.fold(acc, f), - } - } -} - -impl FusedIterator for Interleave -where - I: Iterator, - J: Iterator, -{ -} - -/// An iterator adaptor that alternates elements from the two iterators until -/// one of them runs out. -/// -/// This iterator is *fused*. -/// -/// See [`.interleave_shortest()`](crate::Itertools::interleave_shortest) -/// for more information. -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct InterleaveShortest -where - I: Iterator, - J: Iterator, -{ - i: I, - j: J, - next_coming_from_j: bool, -} - -/// Create a new `InterleaveShortest` iterator. -pub fn interleave_shortest(i: I, j: J) -> InterleaveShortest -where - I: Iterator, - J: Iterator, -{ - InterleaveShortest { - i, - j, - next_coming_from_j: false, - } -} - -impl Iterator for InterleaveShortest -where - I: Iterator, - J: Iterator, -{ - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - let e = if self.next_coming_from_j { - self.j.next() - } else { - self.i.next() - }; - if e.is_some() { - self.next_coming_from_j = !self.next_coming_from_j; - } - e - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (curr_hint, next_hint) = { - let i_hint = self.i.size_hint(); - let j_hint = self.j.size_hint(); - if self.next_coming_from_j { - (j_hint, i_hint) - } else { - (i_hint, j_hint) - } - }; - let (curr_lower, curr_upper) = curr_hint; - let (next_lower, next_upper) = next_hint; - let (combined_lower, combined_upper) = - size_hint::mul_scalar(size_hint::min(curr_hint, next_hint), 2); - let lower = if curr_lower > next_lower { - combined_lower + 1 - } else { - combined_lower - }; - let upper = { - let extra_elem = match (curr_upper, next_upper) { - (_, None) => false, - (None, Some(_)) => true, - (Some(curr_max), Some(next_max)) => curr_max > next_max, - }; - if extra_elem { - combined_upper.and_then(|x| x.checked_add(1)) - } else { - combined_upper - } - }; - (lower, upper) - } - - fn fold(self, mut init: B, mut f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - let Self { - mut i, - mut j, - next_coming_from_j, - } = self; - if next_coming_from_j { - match j.next() { - Some(y) => init = f(init, y), - None => return init, - } - } - let res = i.try_fold(init, |mut acc, x| { - acc = f(acc, x); - match j.next() { - Some(y) => Ok(f(acc, y)), - None => Err(acc), - } - }); - match res { - Ok(val) => val, - Err(val) => val, - } - } -} - -impl FusedIterator for InterleaveShortest -where - I: FusedIterator, - J: FusedIterator, -{ -} - -#[derive(Clone, Debug)] -/// An iterator adaptor that allows putting back a single -/// item to the front of the iterator. -/// -/// Iterator element type is `I::Item`. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct PutBack -where - I: Iterator, -{ - top: Option, - iter: I, -} - -/// Create an iterator where you can put back a single item -pub fn put_back(iterable: I) -> PutBack -where - I: IntoIterator, -{ - PutBack { - top: None, - iter: iterable.into_iter(), - } -} - -impl PutBack -where - I: Iterator, -{ - /// put back value `value` (builder method) - pub fn with_value(mut self, value: I::Item) -> Self { - self.put_back(value); - self - } - - /// Split the `PutBack` into its parts. - #[inline] - pub fn into_parts(self) -> (Option, I) { - let Self { top, iter } = self; - (top, iter) - } - - /// Put back a single value to the front of the iterator. - /// - /// If a value is already in the put back slot, it is returned. - #[inline] - pub fn put_back(&mut self, x: I::Item) -> Option { - self.top.replace(x) - } -} - -impl Iterator for PutBack -where - I: Iterator, -{ - type Item = I::Item; - #[inline] - fn next(&mut self) -> Option { - match self.top { - None => self.iter.next(), - ref mut some => some.take(), - } - } - #[inline] - fn size_hint(&self) -> (usize, Option) { - // Not ExactSizeIterator because size may be larger than usize - size_hint::add_scalar(self.iter.size_hint(), self.top.is_some() as usize) - } - - fn count(self) -> usize { - self.iter.count() + (self.top.is_some() as usize) - } - - fn last(self) -> Option { - self.iter.last().or(self.top) - } - - fn nth(&mut self, n: usize) -> Option { - match self.top { - None => self.iter.nth(n), - ref mut some => { - if n == 0 { - some.take() - } else { - *some = None; - self.iter.nth(n - 1) - } - } - } - } - - fn all(&mut self, mut f: G) -> bool - where - G: FnMut(Self::Item) -> bool, - { - if let Some(elt) = self.top.take() { - if !f(elt) { - return false; - } - } - self.iter.all(f) - } - - fn fold(mut self, init: Acc, mut f: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, - { - let mut accum = init; - if let Some(elt) = self.top.take() { - accum = f(accum, elt); - } - self.iter.fold(accum, f) - } -} - -#[derive(Debug, Clone)] -/// An iterator adaptor that iterates over the cartesian product of -/// the element sets of two iterators `I` and `J`. -/// -/// Iterator element type is `(I::Item, J::Item)`. -/// -/// See [`.cartesian_product()`](crate::Itertools::cartesian_product) for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct Product -where - I: Iterator, -{ - a: I, - /// `a_cur` is `None` while no item have been taken out of `a` (at definition). - /// Then `a_cur` will be `Some(Some(item))` until `a` is exhausted, - /// in which case `a_cur` will be `Some(None)`. - a_cur: Option>, - b: J, - b_orig: J, -} - -/// Create a new cartesian product iterator -/// -/// Iterator element type is `(I::Item, J::Item)`. -pub fn cartesian_product(i: I, j: J) -> Product -where - I: Iterator, - J: Clone + Iterator, - I::Item: Clone, -{ - Product { - a_cur: None, - a: i, - b: j.clone(), - b_orig: j, - } -} - -impl Iterator for Product -where - I: Iterator, - J: Clone + Iterator, - I::Item: Clone, -{ - type Item = (I::Item, J::Item); - - fn next(&mut self) -> Option { - let Self { - a, - a_cur, - b, - b_orig, - } = self; - let elt_b = match b.next() { - None => { - *b = b_orig.clone(); - match b.next() { - None => return None, - Some(x) => { - *a_cur = Some(a.next()); - x - } - } - } - Some(x) => x, - }; - a_cur - .get_or_insert_with(|| a.next()) - .as_ref() - .map(|a| (a.clone(), elt_b)) - } - - fn size_hint(&self) -> (usize, Option) { - // Not ExactSizeIterator because size may be larger than usize - // Compute a * b_orig + b for both lower and upper bound - let mut sh = size_hint::mul(self.a.size_hint(), self.b_orig.size_hint()); - if matches!(self.a_cur, Some(Some(_))) { - sh = size_hint::add(sh, self.b.size_hint()); - } - sh - } - - fn fold(self, mut accum: Acc, mut f: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, - { - // use a split loop to handle the loose a_cur as well as avoiding to - // clone b_orig at the end. - let Self { - mut a, - a_cur, - mut b, - b_orig, - } = self; - if let Some(mut elt_a) = a_cur.unwrap_or_else(|| a.next()) { - loop { - accum = b.fold(accum, |acc, elt| f(acc, (elt_a.clone(), elt))); - - // we can only continue iterating a if we had a first element; - if let Some(next_elt_a) = a.next() { - b = b_orig.clone(); - elt_a = next_elt_a; - } else { - break; - } - } - } - accum - } -} - -impl FusedIterator for Product -where - I: FusedIterator, - J: Clone + FusedIterator, - I::Item: Clone, -{ -} - -/// A “meta iterator adaptor”. Its closure receives a reference to the iterator -/// and may pick off as many elements as it likes, to produce the next iterator element. -/// -/// Iterator element type is `X` if the return type of `F` is `Option`. -/// -/// See [`.batching()`](crate::Itertools::batching) for more information. -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct Batching { - f: F, - iter: I, -} - -impl fmt::Debug for Batching -where - I: fmt::Debug, -{ - debug_fmt_fields!(Batching, iter); -} - -/// Create a new Batching iterator. -pub fn batching(iter: I, f: F) -> Batching { - Batching { f, iter } -} - -impl Iterator for Batching -where - I: Iterator, - F: FnMut(&mut I) -> Option, -{ - type Item = B; - #[inline] - fn next(&mut self) -> Option { - (self.f)(&mut self.iter) - } -} - -/// An iterator adaptor that borrows from a `Clone`-able iterator -/// to only pick off elements while the predicate returns `true`. -/// -/// See [`.take_while_ref()`](crate::Itertools::take_while_ref) for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct TakeWhileRef<'a, I: 'a, F> { - iter: &'a mut I, - f: F, -} - -impl<'a, I, F> fmt::Debug for TakeWhileRef<'a, I, F> -where - I: Iterator + fmt::Debug, -{ - debug_fmt_fields!(TakeWhileRef, iter); -} - -/// Create a new `TakeWhileRef` from a reference to clonable iterator. -pub fn take_while_ref(iter: &mut I, f: F) -> TakeWhileRef -where - I: Iterator + Clone, -{ - TakeWhileRef { iter, f } -} - -impl<'a, I, F> Iterator for TakeWhileRef<'a, I, F> -where - I: Iterator + Clone, - F: FnMut(&I::Item) -> bool, -{ - type Item = I::Item; - - fn next(&mut self) -> Option { - let old = self.iter.clone(); - match self.iter.next() { - None => None, - Some(elt) => { - if (self.f)(&elt) { - Some(elt) - } else { - *self.iter = old; - None - } - } - } - } - - fn size_hint(&self) -> (usize, Option) { - (0, self.iter.size_hint().1) - } -} - -/// An iterator adaptor that filters `Option` iterator elements -/// and produces `A`. Stops on the first `None` encountered. -/// -/// See [`.while_some()`](crate::Itertools::while_some) for more information. -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct WhileSome { - iter: I, -} - -/// Create a new `WhileSome`. -pub fn while_some(iter: I) -> WhileSome { - WhileSome { iter } -} - -impl Iterator for WhileSome -where - I: Iterator>, -{ - type Item = A; - - fn next(&mut self) -> Option { - match self.iter.next() { - None | Some(None) => None, - Some(elt) => elt, - } - } - - fn size_hint(&self) -> (usize, Option) { - (0, self.iter.size_hint().1) - } - - fn fold(mut self, acc: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - let res = self.iter.try_fold(acc, |acc, item| match item { - Some(item) => Ok(f(acc, item)), - None => Err(acc), - }); - - match res { - Ok(val) => val, - Err(val) => val, - } - } -} - -/// An iterator to iterate through all combinations in a `Clone`-able iterator that produces tuples -/// of a specific size. -/// -/// See [`.tuple_combinations()`](crate::Itertools::tuple_combinations) for more -/// information. -#[derive(Clone, Debug)] -#[must_use = "this iterator adaptor is not lazy but does nearly nothing unless consumed"] -pub struct TupleCombinations -where - I: Iterator, - T: HasCombination, -{ - iter: T::Combination, - _mi: PhantomData, -} - -pub trait HasCombination: Sized { - type Combination: From + Iterator; -} - -/// Create a new `TupleCombinations` from a clonable iterator. -pub fn tuple_combinations(iter: I) -> TupleCombinations -where - I: Iterator + Clone, - I::Item: Clone, - T: HasCombination, -{ - TupleCombinations { - iter: T::Combination::from(iter), - _mi: PhantomData, - } -} - -impl Iterator for TupleCombinations -where - I: Iterator, - T: HasCombination, -{ - type Item = T; - - fn next(&mut self) -> Option { - self.iter.next() - } - - fn size_hint(&self) -> SizeHint { - self.iter.size_hint() - } - - fn count(self) -> usize { - self.iter.count() - } - - fn fold(self, init: B, f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - self.iter.fold(init, f) - } -} - -impl FusedIterator for TupleCombinations -where - I: FusedIterator, - T: HasCombination, -{ -} - -#[derive(Clone, Debug)] -pub struct Tuple1Combination { - iter: I, -} - -impl From for Tuple1Combination { - fn from(iter: I) -> Self { - Self { iter } - } -} - -impl Iterator for Tuple1Combination { - type Item = (I::Item,); - - fn next(&mut self) -> Option { - self.iter.next().map(|x| (x,)) - } - - fn size_hint(&self) -> SizeHint { - self.iter.size_hint() - } - - fn count(self) -> usize { - self.iter.count() - } - - fn fold(self, init: B, f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - self.iter.map(|x| (x,)).fold(init, f) - } -} - -impl HasCombination for (I::Item,) { - type Combination = Tuple1Combination; -} - -macro_rules! impl_tuple_combination { - ($C:ident $P:ident ; $($X:ident)*) => ( - #[derive(Clone, Debug)] - pub struct $C { - item: Option, - iter: I, - c: $P, - } - - impl From for $C { - fn from(mut iter: I) -> Self { - Self { - item: iter.next(), - iter: iter.clone(), - c: iter.into(), - } - } - } - - impl From for $C> { - fn from(iter: I) -> Self { - Self::from(iter.fuse()) - } - } - - impl Iterator for $C - where I: Iterator + Clone, - A: Clone, - { - type Item = (A, $(ignore_ident!($X, A)),*); - - fn next(&mut self) -> Option { - if let Some(($($X,)*)) = self.c.next() { - let z = self.item.clone().unwrap(); - Some((z, $($X),*)) - } else { - self.item = self.iter.next(); - self.item.clone().and_then(|z| { - self.c = self.iter.clone().into(); - self.c.next().map(|($($X,)*)| (z, $($X),*)) - }) - } - } - - fn size_hint(&self) -> SizeHint { - const K: usize = 1 + count_ident!($($X)*); - let (mut n_min, mut n_max) = self.iter.size_hint(); - n_min = checked_binomial(n_min, K).unwrap_or(usize::MAX); - n_max = n_max.and_then(|n| checked_binomial(n, K)); - size_hint::add(self.c.size_hint(), (n_min, n_max)) - } - - fn count(self) -> usize { - const K: usize = 1 + count_ident!($($X)*); - let n = self.iter.count(); - checked_binomial(n, K).unwrap() + self.c.count() - } - - fn fold(self, mut init: B, mut f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - let Self { c, item, mut iter } = self; - if let Some(z) = item.as_ref() { - init = c - .map(|($($X,)*)| (z.clone(), $($X),*)) - .fold(init, &mut f); - } - while let Some(z) = iter.next() { - let c: $P = iter.clone().into(); - init = c - .map(|($($X,)*)| (z.clone(), $($X),*)) - .fold(init, &mut f); - } - init - } - } - - impl HasCombination for (A, $(ignore_ident!($X, A)),*) - where I: Iterator + Clone, - I::Item: Clone - { - type Combination = $C>; - } - ) -} - -// This snippet generates the twelve `impl_tuple_combination!` invocations: -// use core::iter; -// use itertools::Itertools; -// -// for i in 2..=12 { -// println!("impl_tuple_combination!(Tuple{arity}Combination Tuple{prev}Combination; {idents});", -// arity = i, -// prev = i - 1, -// idents = ('a'..'z').take(i - 1).join(" "), -// ); -// } -// It could probably be replaced by a bit more macro cleverness. -impl_tuple_combination!(Tuple2Combination Tuple1Combination; a); -impl_tuple_combination!(Tuple3Combination Tuple2Combination; a b); -impl_tuple_combination!(Tuple4Combination Tuple3Combination; a b c); -impl_tuple_combination!(Tuple5Combination Tuple4Combination; a b c d); -impl_tuple_combination!(Tuple6Combination Tuple5Combination; a b c d e); -impl_tuple_combination!(Tuple7Combination Tuple6Combination; a b c d e f); -impl_tuple_combination!(Tuple8Combination Tuple7Combination; a b c d e f g); -impl_tuple_combination!(Tuple9Combination Tuple8Combination; a b c d e f g h); -impl_tuple_combination!(Tuple10Combination Tuple9Combination; a b c d e f g h i); -impl_tuple_combination!(Tuple11Combination Tuple10Combination; a b c d e f g h i j); -impl_tuple_combination!(Tuple12Combination Tuple11Combination; a b c d e f g h i j k); - -// https://en.wikipedia.org/wiki/Binomial_coefficient#In_programming_languages -pub(crate) fn checked_binomial(mut n: usize, mut k: usize) -> Option { - if n < k { - return Some(0); - } - // `factorial(n) / factorial(n - k) / factorial(k)` but trying to avoid it overflows: - k = (n - k).min(k); // symmetry - let mut c = 1; - for i in 1..=k { - c = (c / i) - .checked_mul(n)? - .checked_add((c % i).checked_mul(n)? / i)?; - n -= 1; - } - Some(c) -} - -#[test] -fn test_checked_binomial() { - // With the first row: [1, 0, 0, ...] and the first column full of 1s, we check - // row by row the recurrence relation of binomials (which is an equivalent definition). - // For n >= 1 and k >= 1 we have: - // binomial(n, k) == binomial(n - 1, k - 1) + binomial(n - 1, k) - const LIMIT: usize = 500; - let mut row = vec![Some(0); LIMIT + 1]; - row[0] = Some(1); - for n in 0..=LIMIT { - for k in 0..=LIMIT { - assert_eq!(row[k], checked_binomial(n, k)); - } - row = std::iter::once(Some(1)) - .chain((1..=LIMIT).map(|k| row[k - 1]?.checked_add(row[k]?))) - .collect(); - } -} - -/// An iterator adapter to filter values within a nested `Result::Ok`. -/// -/// See [`.filter_ok()`](crate::Itertools::filter_ok) for more information. -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct FilterOk { - iter: I, - f: F, -} - -impl fmt::Debug for FilterOk -where - I: fmt::Debug, -{ - debug_fmt_fields!(FilterOk, iter); -} - -/// Create a new `FilterOk` iterator. -pub fn filter_ok(iter: I, f: F) -> FilterOk -where - I: Iterator>, - F: FnMut(&T) -> bool, -{ - FilterOk { iter, f } -} - -impl Iterator for FilterOk -where - I: Iterator>, - F: FnMut(&T) -> bool, -{ - type Item = Result; - - fn next(&mut self) -> Option { - let f = &mut self.f; - self.iter.find(|res| match res { - Ok(t) => f(t), - _ => true, - }) - } - - fn size_hint(&self) -> (usize, Option) { - (0, self.iter.size_hint().1) - } - - fn fold(self, init: Acc, fold_f: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - let mut f = self.f; - self.iter - .filter(|v| v.as_ref().map(&mut f).unwrap_or(true)) - .fold(init, fold_f) - } - - fn collect(self) -> C - where - C: FromIterator, - { - let mut f = self.f; - self.iter - .filter(|v| v.as_ref().map(&mut f).unwrap_or(true)) - .collect() - } -} - -impl FusedIterator for FilterOk -where - I: FusedIterator>, - F: FnMut(&T) -> bool, -{ -} - -/// An iterator adapter to filter and apply a transformation on values within a nested `Result::Ok`. -/// -/// See [`.filter_map_ok()`](crate::Itertools::filter_map_ok) for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[derive(Clone)] -pub struct FilterMapOk { - iter: I, - f: F, -} - -impl fmt::Debug for FilterMapOk -where - I: fmt::Debug, -{ - debug_fmt_fields!(FilterMapOk, iter); -} - -fn transpose_result(result: Result, E>) -> Option> { - match result { - Ok(Some(v)) => Some(Ok(v)), - Ok(None) => None, - Err(e) => Some(Err(e)), - } -} - -/// Create a new `FilterOk` iterator. -pub fn filter_map_ok(iter: I, f: F) -> FilterMapOk -where - I: Iterator>, - F: FnMut(T) -> Option, -{ - FilterMapOk { iter, f } -} - -impl Iterator for FilterMapOk -where - I: Iterator>, - F: FnMut(T) -> Option, -{ - type Item = Result; - - fn next(&mut self) -> Option { - let f = &mut self.f; - self.iter.find_map(|res| match res { - Ok(t) => f(t).map(Ok), - Err(e) => Some(Err(e)), - }) - } - - fn size_hint(&self) -> (usize, Option) { - (0, self.iter.size_hint().1) - } - - fn fold(self, init: Acc, fold_f: Fold) -> Acc - where - Fold: FnMut(Acc, Self::Item) -> Acc, - { - let mut f = self.f; - self.iter - .filter_map(|v| transpose_result(v.map(&mut f))) - .fold(init, fold_f) - } - - fn collect(self) -> C - where - C: FromIterator, - { - let mut f = self.f; - self.iter - .filter_map(|v| transpose_result(v.map(&mut f))) - .collect() - } -} - -impl FusedIterator for FilterMapOk -where - I: FusedIterator>, - F: FnMut(T) -> Option, -{ -} - -/// An iterator adapter to get the positions of each element that matches a predicate. -/// -/// See [`.positions()`](crate::Itertools::positions) for more information. -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct Positions { - iter: Enumerate, - f: F, -} - -impl fmt::Debug for Positions -where - I: fmt::Debug, -{ - debug_fmt_fields!(Positions, iter); -} - -/// Create a new `Positions` iterator. -pub fn positions(iter: I, f: F) -> Positions -where - I: Iterator, - F: FnMut(I::Item) -> bool, -{ - let iter = iter.enumerate(); - Positions { iter, f } -} - -impl Iterator for Positions -where - I: Iterator, - F: FnMut(I::Item) -> bool, -{ - type Item = usize; - - fn next(&mut self) -> Option { - let f = &mut self.f; - // TODO: once MSRV >= 1.62, use `then_some`. - self.iter - .find_map(|(count, val)| if f(val) { Some(count) } else { None }) - } - - fn size_hint(&self) -> (usize, Option) { - (0, self.iter.size_hint().1) - } - - fn fold(self, init: B, mut func: G) -> B - where - G: FnMut(B, Self::Item) -> B, - { - let mut f = self.f; - self.iter.fold(init, |mut acc, (count, val)| { - if f(val) { - acc = func(acc, count); - } - acc - }) - } -} - -impl DoubleEndedIterator for Positions -where - I: DoubleEndedIterator + ExactSizeIterator, - F: FnMut(I::Item) -> bool, -{ - fn next_back(&mut self) -> Option { - let f = &mut self.f; - // TODO: once MSRV >= 1.62, use `then_some`. - self.iter - .by_ref() - .rev() - .find_map(|(count, val)| if f(val) { Some(count) } else { None }) - } - - fn rfold(self, init: B, mut func: G) -> B - where - G: FnMut(B, Self::Item) -> B, - { - let mut f = self.f; - self.iter.rfold(init, |mut acc, (count, val)| { - if f(val) { - acc = func(acc, count); - } - acc - }) - } -} - -impl FusedIterator for Positions -where - I: FusedIterator, - F: FnMut(I::Item) -> bool, -{ -} - -/// An iterator adapter to apply a mutating function to each element before yielding it. -/// -/// See [`.update()`](crate::Itertools::update) for more information. -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct Update { - iter: I, - f: F, -} - -impl fmt::Debug for Update -where - I: fmt::Debug, -{ - debug_fmt_fields!(Update, iter); -} - -/// Create a new `Update` iterator. -pub fn update(iter: I, f: F) -> Update -where - I: Iterator, - F: FnMut(&mut I::Item), -{ - Update { iter, f } -} - -impl Iterator for Update -where - I: Iterator, - F: FnMut(&mut I::Item), -{ - type Item = I::Item; - - fn next(&mut self) -> Option { - if let Some(mut v) = self.iter.next() { - (self.f)(&mut v); - Some(v) - } else { - None - } - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - fn fold(self, init: Acc, mut g: G) -> Acc - where - G: FnMut(Acc, Self::Item) -> Acc, - { - let mut f = self.f; - self.iter.fold(init, move |acc, mut v| { - f(&mut v); - g(acc, v) - }) - } - - // if possible, re-use inner iterator specializations in collect - fn collect(self) -> C - where - C: FromIterator, - { - let mut f = self.f; - self.iter - .map(move |mut v| { - f(&mut v); - v - }) - .collect() - } -} - -impl ExactSizeIterator for Update -where - I: ExactSizeIterator, - F: FnMut(&mut I::Item), -{ -} - -impl DoubleEndedIterator for Update -where - I: DoubleEndedIterator, - F: FnMut(&mut I::Item), -{ - fn next_back(&mut self) -> Option { - if let Some(mut v) = self.iter.next_back() { - (self.f)(&mut v); - Some(v) - } else { - None - } - } -} - -impl FusedIterator for Update -where - I: FusedIterator, - F: FnMut(&mut I::Item), -{ -} diff --git a/vendor/itertools/src/adaptors/multi_product.rs b/vendor/itertools/src/adaptors/multi_product.rs deleted file mode 100644 index 314d4a46ef9272..00000000000000 --- a/vendor/itertools/src/adaptors/multi_product.rs +++ /dev/null @@ -1,231 +0,0 @@ -#![cfg(feature = "use_alloc")] -use Option::{self as State, None as ProductEnded, Some as ProductInProgress}; -use Option::{self as CurrentItems, None as NotYetPopulated, Some as Populated}; - -use alloc::vec::Vec; - -use crate::size_hint; - -#[derive(Clone)] -/// An iterator adaptor that iterates over the cartesian product of -/// multiple iterators of type `I`. -/// -/// An iterator element type is `Vec`. -/// -/// See [`.multi_cartesian_product()`](crate::Itertools::multi_cartesian_product) -/// for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct MultiProduct(State>) -where - I: Iterator + Clone, - I::Item: Clone; - -#[derive(Clone)] -/// Internals for `MultiProduct`. -struct MultiProductInner -where - I: Iterator + Clone, - I::Item: Clone, -{ - /// Holds the iterators. - iters: Vec>, - /// Not populated at the beginning then it holds the current item of each iterator. - cur: CurrentItems>, -} - -impl std::fmt::Debug for MultiProduct -where - I: Iterator + Clone + std::fmt::Debug, - I::Item: Clone + std::fmt::Debug, -{ - debug_fmt_fields!(MultiProduct, 0); -} - -impl std::fmt::Debug for MultiProductInner -where - I: Iterator + Clone + std::fmt::Debug, - I::Item: Clone + std::fmt::Debug, -{ - debug_fmt_fields!(MultiProductInner, iters, cur); -} - -/// Create a new cartesian product iterator over an arbitrary number -/// of iterators of the same type. -/// -/// Iterator element is of type `Vec`. -pub fn multi_cartesian_product(iters: H) -> MultiProduct<::IntoIter> -where - H: Iterator, - H::Item: IntoIterator, - ::IntoIter: Clone, - ::Item: Clone, -{ - let inner = MultiProductInner { - iters: iters - .map(|i| MultiProductIter::new(i.into_iter())) - .collect(), - cur: NotYetPopulated, - }; - MultiProduct(ProductInProgress(inner)) -} - -#[derive(Clone, Debug)] -/// Holds the state of a single iterator within a `MultiProduct`. -struct MultiProductIter -where - I: Iterator + Clone, - I::Item: Clone, -{ - iter: I, - iter_orig: I, -} - -impl MultiProductIter -where - I: Iterator + Clone, - I::Item: Clone, -{ - fn new(iter: I) -> Self { - Self { - iter: iter.clone(), - iter_orig: iter, - } - } -} - -impl Iterator for MultiProduct -where - I: Iterator + Clone, - I::Item: Clone, -{ - type Item = Vec; - - fn next(&mut self) -> Option { - // This fuses the iterator. - let inner = self.0.as_mut()?; - match &mut inner.cur { - Populated(values) => { - debug_assert!(!inner.iters.is_empty()); - // Find (from the right) a non-finished iterator and - // reset the finished ones encountered. - for (iter, item) in inner.iters.iter_mut().zip(values.iter_mut()).rev() { - if let Some(new) = iter.iter.next() { - *item = new; - return Some(values.clone()); - } else { - iter.iter = iter.iter_orig.clone(); - // `cur` is populated so the untouched `iter_orig` can not be empty. - *item = iter.iter.next().unwrap(); - } - } - self.0 = ProductEnded; - None - } - // Only the first time. - NotYetPopulated => { - let next: Option> = inner.iters.iter_mut().map(|i| i.iter.next()).collect(); - if next.is_none() || inner.iters.is_empty() { - // This cartesian product had at most one item to generate and now ends. - self.0 = ProductEnded; - } else { - inner.cur.clone_from(&next); - } - next - } - } - } - - fn count(self) -> usize { - match self.0 { - ProductEnded => 0, - // The iterator is fresh so the count is the product of the length of each iterator: - // - If one of them is empty, stop counting. - // - Less `count()` calls than the general case. - ProductInProgress(MultiProductInner { - iters, - cur: NotYetPopulated, - }) => iters - .into_iter() - .map(|iter| iter.iter_orig.count()) - .try_fold(1, |product, count| { - if count == 0 { - None - } else { - Some(product * count) - } - }) - .unwrap_or_default(), - // The general case. - ProductInProgress(MultiProductInner { - iters, - cur: Populated(_), - }) => iters.into_iter().fold(0, |mut acc, iter| { - if acc != 0 { - acc *= iter.iter_orig.count(); - } - acc + iter.iter.count() - }), - } - } - - fn size_hint(&self) -> (usize, Option) { - match &self.0 { - ProductEnded => (0, Some(0)), - ProductInProgress(MultiProductInner { - iters, - cur: NotYetPopulated, - }) => iters - .iter() - .map(|iter| iter.iter_orig.size_hint()) - .fold((1, Some(1)), size_hint::mul), - ProductInProgress(MultiProductInner { - iters, - cur: Populated(_), - }) => { - if let [first, tail @ ..] = &iters[..] { - tail.iter().fold(first.iter.size_hint(), |mut sh, iter| { - sh = size_hint::mul(sh, iter.iter_orig.size_hint()); - size_hint::add(sh, iter.iter.size_hint()) - }) - } else { - // Since it is populated, this cartesian product has started so `iters` is not empty. - unreachable!() - } - } - } - } - - fn last(self) -> Option { - let MultiProductInner { iters, cur } = self.0?; - // Collect the last item of each iterator of the product. - if let Populated(values) = cur { - let mut count = iters.len(); - let last = iters - .into_iter() - .zip(values) - .map(|(i, value)| { - i.iter.last().unwrap_or_else(|| { - // The iterator is empty, use its current `value`. - count -= 1; - value - }) - }) - .collect(); - if count == 0 { - // `values` was the last item. - None - } else { - Some(last) - } - } else { - iters.into_iter().map(|i| i.iter.last()).collect() - } - } -} - -impl std::iter::FusedIterator for MultiProduct -where - I: Iterator + Clone, - I::Item: Clone, -{ -} diff --git a/vendor/itertools/src/combinations.rs b/vendor/itertools/src/combinations.rs deleted file mode 100644 index 6bb2f3ec66911c..00000000000000 --- a/vendor/itertools/src/combinations.rs +++ /dev/null @@ -1,243 +0,0 @@ -use std::fmt; -use std::iter::FusedIterator; - -use super::lazy_buffer::LazyBuffer; -use alloc::vec::Vec; - -use crate::adaptors::checked_binomial; - -/// An iterator to iterate through all the `k`-length combinations in an iterator. -/// -/// See [`.combinations()`](crate::Itertools::combinations) for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct Combinations { - indices: Vec, - pool: LazyBuffer, - first: bool, -} - -impl Clone for Combinations -where - I: Clone + Iterator, - I::Item: Clone, -{ - clone_fields!(indices, pool, first); -} - -impl fmt::Debug for Combinations -where - I: Iterator + fmt::Debug, - I::Item: fmt::Debug, -{ - debug_fmt_fields!(Combinations, indices, pool, first); -} - -/// Create a new `Combinations` from a clonable iterator. -pub fn combinations(iter: I, k: usize) -> Combinations -where - I: Iterator, -{ - Combinations { - indices: (0..k).collect(), - pool: LazyBuffer::new(iter), - first: true, - } -} - -impl Combinations { - /// Returns the length of a combination produced by this iterator. - #[inline] - pub fn k(&self) -> usize { - self.indices.len() - } - - /// Returns the (current) length of the pool from which combination elements are - /// selected. This value can change between invocations of [`next`](Combinations::next). - #[inline] - pub fn n(&self) -> usize { - self.pool.len() - } - - /// Returns a reference to the source pool. - #[inline] - pub(crate) fn src(&self) -> &LazyBuffer { - &self.pool - } - - /// Resets this `Combinations` back to an initial state for combinations of length - /// `k` over the same pool data source. If `k` is larger than the current length - /// of the data pool an attempt is made to prefill the pool so that it holds `k` - /// elements. - pub(crate) fn reset(&mut self, k: usize) { - self.first = true; - - if k < self.indices.len() { - self.indices.truncate(k); - for i in 0..k { - self.indices[i] = i; - } - } else { - for i in 0..self.indices.len() { - self.indices[i] = i; - } - self.indices.extend(self.indices.len()..k); - self.pool.prefill(k); - } - } - - pub(crate) fn n_and_count(self) -> (usize, usize) { - let Self { - indices, - pool, - first, - } = self; - let n = pool.count(); - (n, remaining_for(n, first, &indices).unwrap()) - } - - /// Initialises the iterator by filling a buffer with elements from the - /// iterator. Returns true if there are no combinations, false otherwise. - fn init(&mut self) -> bool { - self.pool.prefill(self.k()); - let done = self.k() > self.n(); - if !done { - self.first = false; - } - - done - } - - /// Increments indices representing the combination to advance to the next - /// (in lexicographic order by increasing sequence) combination. For example - /// if we have n=4 & k=2 then `[0, 1] -> [0, 2] -> [0, 3] -> [1, 2] -> ...` - /// - /// Returns true if we've run out of combinations, false otherwise. - fn increment_indices(&mut self) -> bool { - if self.indices.is_empty() { - return true; // Done - } - - // Scan from the end, looking for an index to increment - let mut i: usize = self.indices.len() - 1; - - // Check if we need to consume more from the iterator - if self.indices[i] == self.pool.len() - 1 { - self.pool.get_next(); // may change pool size - } - - while self.indices[i] == i + self.pool.len() - self.indices.len() { - if i > 0 { - i -= 1; - } else { - // Reached the last combination - return true; - } - } - - // Increment index, and reset the ones to its right - self.indices[i] += 1; - for j in i + 1..self.indices.len() { - self.indices[j] = self.indices[j - 1] + 1; - } - - // If we've made it this far, we haven't run out of combos - false - } - - /// Returns the n-th item or the number of successful steps. - pub(crate) fn try_nth(&mut self, n: usize) -> Result<::Item, usize> - where - I::Item: Clone, - { - let done = if self.first { - self.init() - } else { - self.increment_indices() - }; - if done { - return Err(0); - } - for i in 0..n { - if self.increment_indices() { - return Err(i + 1); - } - } - Ok(self.pool.get_at(&self.indices)) - } -} - -impl Iterator for Combinations -where - I: Iterator, - I::Item: Clone, -{ - type Item = Vec; - fn next(&mut self) -> Option { - let done = if self.first { - self.init() - } else { - self.increment_indices() - }; - - if done { - return None; - } - - Some(self.pool.get_at(&self.indices)) - } - - fn nth(&mut self, n: usize) -> Option { - self.try_nth(n).ok() - } - - fn size_hint(&self) -> (usize, Option) { - let (mut low, mut upp) = self.pool.size_hint(); - low = remaining_for(low, self.first, &self.indices).unwrap_or(usize::MAX); - upp = upp.and_then(|upp| remaining_for(upp, self.first, &self.indices)); - (low, upp) - } - - #[inline] - fn count(self) -> usize { - self.n_and_count().1 - } -} - -impl FusedIterator for Combinations -where - I: Iterator, - I::Item: Clone, -{ -} - -/// For a given size `n`, return the count of remaining combinations or None if it would overflow. -fn remaining_for(n: usize, first: bool, indices: &[usize]) -> Option { - let k = indices.len(); - if n < k { - Some(0) - } else if first { - checked_binomial(n, k) - } else { - // https://en.wikipedia.org/wiki/Combinatorial_number_system - // http://www.site.uottawa.ca/~lucia/courses/5165-09/GenCombObj.pdf - - // The combinations generated after the current one can be counted by counting as follows: - // - The subsequent combinations that differ in indices[0]: - // If subsequent combinations differ in indices[0], then their value for indices[0] - // must be at least 1 greater than the current indices[0]. - // As indices is strictly monotonically sorted, this means we can effectively choose k values - // from (n - 1 - indices[0]), leading to binomial(n - 1 - indices[0], k) possibilities. - // - The subsequent combinations with same indices[0], but differing indices[1]: - // Here we can choose k - 1 values from (n - 1 - indices[1]) values, - // leading to binomial(n - 1 - indices[1], k - 1) possibilities. - // - (...) - // - The subsequent combinations with same indices[0..=i], but differing indices[i]: - // Here we can choose k - i values from (n - 1 - indices[i]) values: binomial(n - 1 - indices[i], k - i). - // Since subsequent combinations can in any index, we must sum up the aforementioned binomial coefficients. - - // Below, `n0` resembles indices[i]. - indices.iter().enumerate().try_fold(0usize, |sum, (i, n0)| { - sum.checked_add(checked_binomial(n - 1 - *n0, k - i)?) - }) - } -} diff --git a/vendor/itertools/src/combinations_with_replacement.rs b/vendor/itertools/src/combinations_with_replacement.rs deleted file mode 100644 index f363f9ba26b1ee..00000000000000 --- a/vendor/itertools/src/combinations_with_replacement.rs +++ /dev/null @@ -1,192 +0,0 @@ -use alloc::boxed::Box; -use alloc::vec::Vec; -use std::fmt; -use std::iter::FusedIterator; - -use super::lazy_buffer::LazyBuffer; -use crate::adaptors::checked_binomial; - -/// An iterator to iterate through all the `n`-length combinations in an iterator, with replacement. -/// -/// See [`.combinations_with_replacement()`](crate::Itertools::combinations_with_replacement) -/// for more information. -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct CombinationsWithReplacement -where - I: Iterator, - I::Item: Clone, -{ - indices: Box<[usize]>, - pool: LazyBuffer, - first: bool, -} - -impl fmt::Debug for CombinationsWithReplacement -where - I: Iterator + fmt::Debug, - I::Item: fmt::Debug + Clone, -{ - debug_fmt_fields!(CombinationsWithReplacement, indices, pool, first); -} - -/// Create a new `CombinationsWithReplacement` from a clonable iterator. -pub fn combinations_with_replacement(iter: I, k: usize) -> CombinationsWithReplacement -where - I: Iterator, - I::Item: Clone, -{ - let indices = alloc::vec![0; k].into_boxed_slice(); - let pool: LazyBuffer = LazyBuffer::new(iter); - - CombinationsWithReplacement { - indices, - pool, - first: true, - } -} - -impl CombinationsWithReplacement -where - I: Iterator, - I::Item: Clone, -{ - /// Increments indices representing the combination to advance to the next - /// (in lexicographic order by increasing sequence) combination. - /// - /// Returns true if we've run out of combinations, false otherwise. - fn increment_indices(&mut self) -> bool { - // Check if we need to consume more from the iterator - // This will run while we increment our first index digit - self.pool.get_next(); - - // Work out where we need to update our indices - let mut increment = None; - for (i, indices_int) in self.indices.iter().enumerate().rev() { - if *indices_int < self.pool.len() - 1 { - increment = Some((i, indices_int + 1)); - break; - } - } - match increment { - // If we can update the indices further - Some((increment_from, increment_value)) => { - // We need to update the rightmost non-max value - // and all those to the right - for i in &mut self.indices[increment_from..] { - *i = increment_value; - } - // TODO: once MSRV >= 1.50, use `fill` instead: - // self.indices[increment_from..].fill(increment_value); - false - } - // Otherwise, we're done - None => true, - } - } -} - -impl Iterator for CombinationsWithReplacement -where - I: Iterator, - I::Item: Clone, -{ - type Item = Vec; - - fn next(&mut self) -> Option { - if self.first { - // In empty edge cases, stop iterating immediately - if !(self.indices.is_empty() || self.pool.get_next()) { - return None; - } - self.first = false; - } else if self.increment_indices() { - return None; - } - Some(self.pool.get_at(&self.indices)) - } - - fn nth(&mut self, n: usize) -> Option { - if self.first { - // In empty edge cases, stop iterating immediately - if !(self.indices.is_empty() || self.pool.get_next()) { - return None; - } - self.first = false; - } else if self.increment_indices() { - return None; - } - for _ in 0..n { - if self.increment_indices() { - return None; - } - } - Some(self.pool.get_at(&self.indices)) - } - - fn size_hint(&self) -> (usize, Option) { - let (mut low, mut upp) = self.pool.size_hint(); - low = remaining_for(low, self.first, &self.indices).unwrap_or(usize::MAX); - upp = upp.and_then(|upp| remaining_for(upp, self.first, &self.indices)); - (low, upp) - } - - fn count(self) -> usize { - let Self { - indices, - pool, - first, - } = self; - let n = pool.count(); - remaining_for(n, first, &indices).unwrap() - } -} - -impl FusedIterator for CombinationsWithReplacement -where - I: Iterator, - I::Item: Clone, -{ -} - -/// For a given size `n`, return the count of remaining combinations with replacement or None if it would overflow. -fn remaining_for(n: usize, first: bool, indices: &[usize]) -> Option { - // With a "stars and bars" representation, choose k values with replacement from n values is - // like choosing k out of k + n − 1 positions (hence binomial(k + n - 1, k) possibilities) - // to place k stars and therefore n - 1 bars. - // Example (n=4, k=6): ***|*||** represents [0,0,0,1,3,3]. - let count = |n: usize, k: usize| { - let positions = if n == 0 { - k.saturating_sub(1) - } else { - (n - 1).checked_add(k)? - }; - checked_binomial(positions, k) - }; - let k = indices.len(); - if first { - count(n, k) - } else { - // The algorithm is similar to the one for combinations *without replacement*, - // except we choose values *with replacement* and indices are *non-strictly* monotonically sorted. - - // The combinations generated after the current one can be counted by counting as follows: - // - The subsequent combinations that differ in indices[0]: - // If subsequent combinations differ in indices[0], then their value for indices[0] - // must be at least 1 greater than the current indices[0]. - // As indices is monotonically sorted, this means we can effectively choose k values with - // replacement from (n - 1 - indices[0]), leading to count(n - 1 - indices[0], k) possibilities. - // - The subsequent combinations with same indices[0], but differing indices[1]: - // Here we can choose k - 1 values with replacement from (n - 1 - indices[1]) values, - // leading to count(n - 1 - indices[1], k - 1) possibilities. - // - (...) - // - The subsequent combinations with same indices[0..=i], but differing indices[i]: - // Here we can choose k - i values with replacement from (n - 1 - indices[i]) values: count(n - 1 - indices[i], k - i). - // Since subsequent combinations can in any index, we must sum up the aforementioned binomial coefficients. - - // Below, `n0` resembles indices[i]. - indices.iter().enumerate().try_fold(0usize, |sum, (i, n0)| { - sum.checked_add(count(n - 1 - *n0, k - i)?) - }) - } -} diff --git a/vendor/itertools/src/concat_impl.rs b/vendor/itertools/src/concat_impl.rs deleted file mode 100644 index ec7b91c605e639..00000000000000 --- a/vendor/itertools/src/concat_impl.rs +++ /dev/null @@ -1,30 +0,0 @@ -use crate::Itertools; - -/// Combine all an iterator's elements into one element by using [`Extend`]. -/// -/// [`IntoIterator`]-enabled version of [`Itertools::concat`]. -/// -/// This combinator will extend the first item with each of the rest of the -/// items of the iterator. If the iterator is empty, the default value of -/// `I::Item` is returned. -/// -/// ```rust -/// use itertools::concat; -/// -/// let input = vec![vec![1], vec![2, 3], vec![4, 5, 6]]; -/// assert_eq!(concat(input), vec![1, 2, 3, 4, 5, 6]); -/// ``` -pub fn concat(iterable: I) -> I::Item -where - I: IntoIterator, - I::Item: Extend<<::Item as IntoIterator>::Item> + IntoIterator + Default, -{ - #[allow(deprecated)] //TODO: once msrv hits 1.51. replace `fold1` with `reduce` - iterable - .into_iter() - .fold1(|mut a, b| { - a.extend(b); - a - }) - .unwrap_or_default() -} diff --git a/vendor/itertools/src/cons_tuples_impl.rs b/vendor/itertools/src/cons_tuples_impl.rs deleted file mode 100644 index 9ab309478875d1..00000000000000 --- a/vendor/itertools/src/cons_tuples_impl.rs +++ /dev/null @@ -1,58 +0,0 @@ -macro_rules! impl_cons_iter( - ($_A:ident, $_B:ident, ) => (); // stop - - ($A:ident, $($B:ident,)*) => ( - impl_cons_iter!($($B,)*); - #[allow(non_snake_case)] - impl Iterator for ConsTuples - where Iter: Iterator, - { - type Item = ($($B,)* X, ); - fn next(&mut self) -> Option { - self.iter.next().map(|(($($B,)*), x)| ($($B,)* x, )) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - fn fold(self, accum: Acc, mut f: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.fold(accum, move |acc, (($($B,)*), x)| f(acc, ($($B,)* x, ))) - } - } - ); -); - -impl_cons_iter!(A, B, C, D, E, F, G, H, I, J, K, L,); - -/// An iterator that maps an iterator of tuples like -/// `((A, B), C)` to an iterator of `(A, B, C)`. -/// -/// Used by the `iproduct!()` macro. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[derive(Debug)] -pub struct ConsTuples -where - I: Iterator, -{ - iter: I, -} - -impl Clone for ConsTuples -where - I: Clone + Iterator, -{ - clone_fields!(iter); -} - -/// Create an iterator that maps for example iterators of -/// `((A, B), C)` to `(A, B, C)`. -pub fn cons_tuples(iterable: I) -> ConsTuples -where - I: IntoIterator, -{ - ConsTuples { - iter: iterable.into_iter(), - } -} diff --git a/vendor/itertools/src/diff.rs b/vendor/itertools/src/diff.rs deleted file mode 100644 index c6d99657efd347..00000000000000 --- a/vendor/itertools/src/diff.rs +++ /dev/null @@ -1,104 +0,0 @@ -//! "Diff"ing iterators for caching elements to sequential collections without requiring the new -//! elements' iterator to be `Clone`. -//! -//! - [`Diff`] (produced by the [`diff_with`] function) -//! describes the difference between two non-`Clone` iterators `I` and `J` after breaking ASAP from -//! a lock-step comparison. - -use std::fmt; - -use crate::free::put_back; -use crate::structs::PutBack; - -/// A type returned by the [`diff_with`] function. -/// -/// `Diff` represents the way in which the elements yielded by the iterator `I` differ to some -/// iterator `J`. -pub enum Diff -where - I: Iterator, - J: Iterator, -{ - /// The index of the first non-matching element along with both iterator's remaining elements - /// starting with the first mis-match. - FirstMismatch(usize, PutBack, PutBack), - /// The total number of elements that were in `J` along with the remaining elements of `I`. - Shorter(usize, PutBack), - /// The total number of elements that were in `I` along with the remaining elements of `J`. - Longer(usize, PutBack), -} - -impl fmt::Debug for Diff -where - I: Iterator, - J: Iterator, - PutBack: fmt::Debug, - PutBack: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::FirstMismatch(idx, i, j) => f - .debug_tuple("FirstMismatch") - .field(idx) - .field(i) - .field(j) - .finish(), - Self::Shorter(idx, i) => f.debug_tuple("Shorter").field(idx).field(i).finish(), - Self::Longer(idx, j) => f.debug_tuple("Longer").field(idx).field(j).finish(), - } - } -} - -impl Clone for Diff -where - I: Iterator, - J: Iterator, - PutBack: Clone, - PutBack: Clone, -{ - fn clone(&self) -> Self { - match self { - Self::FirstMismatch(idx, i, j) => Self::FirstMismatch(*idx, i.clone(), j.clone()), - Self::Shorter(idx, i) => Self::Shorter(*idx, i.clone()), - Self::Longer(idx, j) => Self::Longer(*idx, j.clone()), - } - } -} - -/// Compares every element yielded by both `i` and `j` with the given function in lock-step and -/// returns a [`Diff`] which describes how `j` differs from `i`. -/// -/// If the number of elements yielded by `j` is less than the number of elements yielded by `i`, -/// the number of `j` elements yielded will be returned along with `i`'s remaining elements as -/// `Diff::Shorter`. -/// -/// If the two elements of a step differ, the index of those elements along with the remaining -/// elements of both `i` and `j` are returned as `Diff::FirstMismatch`. -/// -/// If `i` becomes exhausted before `j` becomes exhausted, the number of elements in `i` along with -/// the remaining `j` elements will be returned as `Diff::Longer`. -pub fn diff_with(i: I, j: J, mut is_equal: F) -> Option> -where - I: IntoIterator, - J: IntoIterator, - F: FnMut(&I::Item, &J::Item) -> bool, -{ - let mut i = i.into_iter(); - let mut j = j.into_iter(); - let mut idx = 0; - while let Some(i_elem) = i.next() { - match j.next() { - None => return Some(Diff::Shorter(idx, put_back(i).with_value(i_elem))), - Some(j_elem) => { - if !is_equal(&i_elem, &j_elem) { - let remaining_i = put_back(i).with_value(i_elem); - let remaining_j = put_back(j).with_value(j_elem); - return Some(Diff::FirstMismatch(idx, remaining_i, remaining_j)); - } - } - } - idx += 1; - } - j.next() - .map(|j_elem| Diff::Longer(idx, put_back(j).with_value(j_elem))) -} diff --git a/vendor/itertools/src/duplicates_impl.rs b/vendor/itertools/src/duplicates_impl.rs deleted file mode 100644 index a0db15432d854f..00000000000000 --- a/vendor/itertools/src/duplicates_impl.rs +++ /dev/null @@ -1,216 +0,0 @@ -use std::hash::Hash; - -mod private { - use std::collections::HashMap; - use std::fmt; - use std::hash::Hash; - - #[derive(Clone)] - #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] - pub struct DuplicatesBy { - pub(crate) iter: I, - pub(crate) meta: Meta, - } - - impl fmt::Debug for DuplicatesBy - where - I: Iterator + fmt::Debug, - V: fmt::Debug + Hash + Eq, - { - debug_fmt_fields!(DuplicatesBy, iter, meta.used); - } - - impl DuplicatesBy { - pub(crate) fn new(iter: I, key_method: F) -> Self { - Self { - iter, - meta: Meta { - used: HashMap::new(), - pending: 0, - key_method, - }, - } - } - } - - #[derive(Clone)] - pub struct Meta { - used: HashMap, - pending: usize, - key_method: F, - } - - impl Meta - where - Key: Eq + Hash, - { - /// Takes an item and returns it back to the caller if it's the second time we see it. - /// Otherwise the item is consumed and None is returned - #[inline(always)] - fn filter(&mut self, item: I) -> Option - where - F: KeyMethod, - { - let kv = self.key_method.make(item); - match self.used.get_mut(kv.key_ref()) { - None => { - self.used.insert(kv.key(), false); - self.pending += 1; - None - } - Some(true) => None, - Some(produced) => { - *produced = true; - self.pending -= 1; - Some(kv.value()) - } - } - } - } - - impl Iterator for DuplicatesBy - where - I: Iterator, - Key: Eq + Hash, - F: KeyMethod, - { - type Item = I::Item; - - fn next(&mut self) -> Option { - let Self { iter, meta } = self; - iter.find_map(|v| meta.filter(v)) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (_, hi) = self.iter.size_hint(); - let hi = hi.map(|hi| { - if hi <= self.meta.pending { - // fewer or equally many iter-remaining elements than pending elements - // => at most, each iter-remaining element is matched - hi - } else { - // fewer pending elements than iter-remaining elements - // => at most: - // * each pending element is matched - // * the other iter-remaining elements come in pairs - self.meta.pending + (hi - self.meta.pending) / 2 - } - }); - // The lower bound is always 0 since we might only get unique items from now on - (0, hi) - } - } - - impl DoubleEndedIterator for DuplicatesBy - where - I: DoubleEndedIterator, - Key: Eq + Hash, - F: KeyMethod, - { - fn next_back(&mut self) -> Option { - let Self { iter, meta } = self; - iter.rev().find_map(|v| meta.filter(v)) - } - } - - /// A keying method for use with `DuplicatesBy` - pub trait KeyMethod { - type Container: KeyXorValue; - - fn make(&mut self, value: V) -> Self::Container; - } - - /// Apply the identity function to elements before checking them for equality. - #[derive(Debug, Clone)] - pub struct ById; - impl KeyMethod for ById { - type Container = JustValue; - - fn make(&mut self, v: V) -> Self::Container { - JustValue(v) - } - } - - /// Apply a user-supplied function to elements before checking them for equality. - #[derive(Clone)] - pub struct ByFn(pub(crate) F); - impl fmt::Debug for ByFn { - debug_fmt_fields!(ByFn,); - } - impl KeyMethod for ByFn - where - F: FnMut(&V) -> K, - { - type Container = KeyValue; - - fn make(&mut self, v: V) -> Self::Container { - KeyValue((self.0)(&v), v) - } - } - - // Implementors of this trait can hold onto a key and a value but only give access to one of them - // at a time. This allows the key and the value to be the same value internally - pub trait KeyXorValue { - fn key_ref(&self) -> &K; - fn key(self) -> K; - fn value(self) -> V; - } - - #[derive(Debug)] - pub struct KeyValue(K, V); - impl KeyXorValue for KeyValue { - fn key_ref(&self) -> &K { - &self.0 - } - fn key(self) -> K { - self.0 - } - fn value(self) -> V { - self.1 - } - } - - #[derive(Debug)] - pub struct JustValue(V); - impl KeyXorValue for JustValue { - fn key_ref(&self) -> &V { - &self.0 - } - fn key(self) -> V { - self.0 - } - fn value(self) -> V { - self.0 - } - } -} - -/// An iterator adapter to filter for duplicate elements. -/// -/// See [`.duplicates_by()`](crate::Itertools::duplicates_by) for more information. -pub type DuplicatesBy = private::DuplicatesBy>; - -/// Create a new `DuplicatesBy` iterator. -pub fn duplicates_by(iter: I, f: F) -> DuplicatesBy -where - Key: Eq + Hash, - F: FnMut(&I::Item) -> Key, - I: Iterator, -{ - DuplicatesBy::new(iter, private::ByFn(f)) -} - -/// An iterator adapter to filter out duplicate elements. -/// -/// See [`.duplicates()`](crate::Itertools::duplicates) for more information. -pub type Duplicates = private::DuplicatesBy::Item, private::ById>; - -/// Create a new `Duplicates` iterator. -pub fn duplicates(iter: I) -> Duplicates -where - I: Iterator, - I::Item: Eq + Hash, -{ - Duplicates::new(iter, private::ById) -} diff --git a/vendor/itertools/src/either_or_both.rs b/vendor/itertools/src/either_or_both.rs deleted file mode 100644 index b7a7fc14115b40..00000000000000 --- a/vendor/itertools/src/either_or_both.rs +++ /dev/null @@ -1,514 +0,0 @@ -use core::ops::{Deref, DerefMut}; - -use crate::EitherOrBoth::*; - -use either::Either; - -/// Value that either holds a single A or B, or both. -#[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub enum EitherOrBoth { - /// Both values are present. - Both(A, B), - /// Only the left value of type `A` is present. - Left(A), - /// Only the right value of type `B` is present. - Right(B), -} - -impl EitherOrBoth { - /// If `Left`, or `Both`, return true. Otherwise, return false. - pub fn has_left(&self) -> bool { - self.as_ref().left().is_some() - } - - /// If `Right`, or `Both`, return true, otherwise, return false. - pub fn has_right(&self) -> bool { - self.as_ref().right().is_some() - } - - /// If `Left`, return true. Otherwise, return false. - /// Exclusive version of [`has_left`](EitherOrBoth::has_left). - pub fn is_left(&self) -> bool { - matches!(self, Left(_)) - } - - /// If `Right`, return true. Otherwise, return false. - /// Exclusive version of [`has_right`](EitherOrBoth::has_right). - pub fn is_right(&self) -> bool { - matches!(self, Right(_)) - } - - /// If `Both`, return true. Otherwise, return false. - pub fn is_both(&self) -> bool { - self.as_ref().both().is_some() - } - - /// If `Left`, or `Both`, return `Some` with the left value. Otherwise, return `None`. - pub fn left(self) -> Option { - match self { - Left(left) | Both(left, _) => Some(left), - _ => None, - } - } - - /// If `Right`, or `Both`, return `Some` with the right value. Otherwise, return `None`. - pub fn right(self) -> Option { - match self { - Right(right) | Both(_, right) => Some(right), - _ => None, - } - } - - /// Return tuple of options corresponding to the left and right value respectively - /// - /// If `Left` return `(Some(..), None)`, if `Right` return `(None,Some(..))`, else return - /// `(Some(..),Some(..))` - pub fn left_and_right(self) -> (Option, Option) { - self.map_any(Some, Some).or_default() - } - - /// If `Left`, return `Some` with the left value. If `Right` or `Both`, return `None`. - /// - /// # Examples - /// - /// ``` - /// // On the `Left` variant. - /// # use itertools::{EitherOrBoth, EitherOrBoth::{Left, Right, Both}}; - /// let x: EitherOrBoth<_, ()> = Left("bonjour"); - /// assert_eq!(x.just_left(), Some("bonjour")); - /// - /// // On the `Right` variant. - /// let x: EitherOrBoth<(), _> = Right("hola"); - /// assert_eq!(x.just_left(), None); - /// - /// // On the `Both` variant. - /// let x = Both("bonjour", "hola"); - /// assert_eq!(x.just_left(), None); - /// ``` - pub fn just_left(self) -> Option { - match self { - Left(left) => Some(left), - _ => None, - } - } - - /// If `Right`, return `Some` with the right value. If `Left` or `Both`, return `None`. - /// - /// # Examples - /// - /// ``` - /// // On the `Left` variant. - /// # use itertools::{EitherOrBoth::{Left, Right, Both}, EitherOrBoth}; - /// let x: EitherOrBoth<_, ()> = Left("auf wiedersehen"); - /// assert_eq!(x.just_left(), Some("auf wiedersehen")); - /// - /// // On the `Right` variant. - /// let x: EitherOrBoth<(), _> = Right("adios"); - /// assert_eq!(x.just_left(), None); - /// - /// // On the `Both` variant. - /// let x = Both("auf wiedersehen", "adios"); - /// assert_eq!(x.just_left(), None); - /// ``` - pub fn just_right(self) -> Option { - match self { - Right(right) => Some(right), - _ => None, - } - } - - /// If `Both`, return `Some` containing the left and right values. Otherwise, return `None`. - pub fn both(self) -> Option<(A, B)> { - match self { - Both(a, b) => Some((a, b)), - _ => None, - } - } - - /// If `Left` or `Both`, return the left value. Otherwise, convert the right value and return it. - pub fn into_left(self) -> A - where - B: Into, - { - match self { - Left(a) | Both(a, _) => a, - Right(b) => b.into(), - } - } - - /// If `Right` or `Both`, return the right value. Otherwise, convert the left value and return it. - pub fn into_right(self) -> B - where - A: Into, - { - match self { - Right(b) | Both(_, b) => b, - Left(a) => a.into(), - } - } - - /// Converts from `&EitherOrBoth` to `EitherOrBoth<&A, &B>`. - pub fn as_ref(&self) -> EitherOrBoth<&A, &B> { - match *self { - Left(ref left) => Left(left), - Right(ref right) => Right(right), - Both(ref left, ref right) => Both(left, right), - } - } - - /// Converts from `&mut EitherOrBoth` to `EitherOrBoth<&mut A, &mut B>`. - pub fn as_mut(&mut self) -> EitherOrBoth<&mut A, &mut B> { - match *self { - Left(ref mut left) => Left(left), - Right(ref mut right) => Right(right), - Both(ref mut left, ref mut right) => Both(left, right), - } - } - - /// Converts from `&EitherOrBoth` to `EitherOrBoth<&_, &_>` using the [`Deref`] trait. - pub fn as_deref(&self) -> EitherOrBoth<&A::Target, &B::Target> - where - A: Deref, - B: Deref, - { - match *self { - Left(ref left) => Left(left), - Right(ref right) => Right(right), - Both(ref left, ref right) => Both(left, right), - } - } - - /// Converts from `&mut EitherOrBoth` to `EitherOrBoth<&mut _, &mut _>` using the [`DerefMut`] trait. - pub fn as_deref_mut(&mut self) -> EitherOrBoth<&mut A::Target, &mut B::Target> - where - A: DerefMut, - B: DerefMut, - { - match *self { - Left(ref mut left) => Left(left), - Right(ref mut right) => Right(right), - Both(ref mut left, ref mut right) => Both(left, right), - } - } - - /// Convert `EitherOrBoth` to `EitherOrBoth`. - pub fn flip(self) -> EitherOrBoth { - match self { - Left(a) => Right(a), - Right(b) => Left(b), - Both(a, b) => Both(b, a), - } - } - - /// Apply the function `f` on the value `a` in `Left(a)` or `Both(a, b)` variants. If it is - /// present rewrapping the result in `self`'s original variant. - pub fn map_left(self, f: F) -> EitherOrBoth - where - F: FnOnce(A) -> M, - { - match self { - Both(a, b) => Both(f(a), b), - Left(a) => Left(f(a)), - Right(b) => Right(b), - } - } - - /// Apply the function `f` on the value `b` in `Right(b)` or `Both(a, b)` variants. - /// If it is present rewrapping the result in `self`'s original variant. - pub fn map_right(self, f: F) -> EitherOrBoth - where - F: FnOnce(B) -> M, - { - match self { - Left(a) => Left(a), - Right(b) => Right(f(b)), - Both(a, b) => Both(a, f(b)), - } - } - - /// Apply the functions `f` and `g` on the value `a` and `b` respectively; - /// found in `Left(a)`, `Right(b)`, or `Both(a, b)` variants. - /// The Result is rewrapped `self`'s original variant. - pub fn map_any(self, f: F, g: G) -> EitherOrBoth - where - F: FnOnce(A) -> L, - G: FnOnce(B) -> R, - { - match self { - Left(a) => Left(f(a)), - Right(b) => Right(g(b)), - Both(a, b) => Both(f(a), g(b)), - } - } - - /// Apply the function `f` on the value `a` in `Left(a)` or `Both(a, _)` variants if it is - /// present. - pub fn left_and_then(self, f: F) -> EitherOrBoth - where - F: FnOnce(A) -> EitherOrBoth, - { - match self { - Left(a) | Both(a, _) => f(a), - Right(b) => Right(b), - } - } - - /// Apply the function `f` on the value `b` - /// in `Right(b)` or `Both(_, b)` variants if it is present. - pub fn right_and_then(self, f: F) -> EitherOrBoth - where - F: FnOnce(B) -> EitherOrBoth, - { - match self { - Left(a) => Left(a), - Right(b) | Both(_, b) => f(b), - } - } - - /// Returns a tuple consisting of the `l` and `r` in `Both(l, r)`, if present. - /// Otherwise, returns the wrapped value for the present element, and the supplied - /// value for the other. The first (`l`) argument is used for a missing `Left` - /// value. The second (`r`) argument is used for a missing `Right` value. - /// - /// Arguments passed to `or` are eagerly evaluated; if you are passing - /// the result of a function call, it is recommended to use [`or_else`], - /// which is lazily evaluated. - /// - /// [`or_else`]: EitherOrBoth::or_else - /// - /// # Examples - /// - /// ``` - /// # use itertools::EitherOrBoth; - /// assert_eq!(EitherOrBoth::Both("tree", 1).or("stone", 5), ("tree", 1)); - /// assert_eq!(EitherOrBoth::Left("tree").or("stone", 5), ("tree", 5)); - /// assert_eq!(EitherOrBoth::Right(1).or("stone", 5), ("stone", 1)); - /// ``` - pub fn or(self, l: A, r: B) -> (A, B) { - match self { - Left(inner_l) => (inner_l, r), - Right(inner_r) => (l, inner_r), - Both(inner_l, inner_r) => (inner_l, inner_r), - } - } - - /// Returns a tuple consisting of the `l` and `r` in `Both(l, r)`, if present. - /// Otherwise, returns the wrapped value for the present element, and the [`default`](Default::default) - /// for the other. - pub fn or_default(self) -> (A, B) - where - A: Default, - B: Default, - { - match self { - Left(l) => (l, B::default()), - Right(r) => (A::default(), r), - Both(l, r) => (l, r), - } - } - - /// Returns a tuple consisting of the `l` and `r` in `Both(l, r)`, if present. - /// Otherwise, returns the wrapped value for the present element, and computes the - /// missing value with the supplied closure. The first argument (`l`) is used for a - /// missing `Left` value. The second argument (`r`) is used for a missing `Right` value. - /// - /// # Examples - /// - /// ``` - /// # use itertools::EitherOrBoth; - /// let k = 10; - /// assert_eq!(EitherOrBoth::Both("tree", 1).or_else(|| "stone", || 2 * k), ("tree", 1)); - /// assert_eq!(EitherOrBoth::Left("tree").or_else(|| "stone", || 2 * k), ("tree", 20)); - /// assert_eq!(EitherOrBoth::Right(1).or_else(|| "stone", || 2 * k), ("stone", 1)); - /// ``` - pub fn or_else A, R: FnOnce() -> B>(self, l: L, r: R) -> (A, B) { - match self { - Left(inner_l) => (inner_l, r()), - Right(inner_r) => (l(), inner_r), - Both(inner_l, inner_r) => (inner_l, inner_r), - } - } - - /// Returns a mutable reference to the left value. If the left value is not present, - /// it is replaced with `val`. - pub fn left_or_insert(&mut self, val: A) -> &mut A { - self.left_or_insert_with(|| val) - } - - /// Returns a mutable reference to the right value. If the right value is not present, - /// it is replaced with `val`. - pub fn right_or_insert(&mut self, val: B) -> &mut B { - self.right_or_insert_with(|| val) - } - - /// If the left value is not present, replace it the value computed by the closure `f`. - /// Returns a mutable reference to the now-present left value. - pub fn left_or_insert_with(&mut self, f: F) -> &mut A - where - F: FnOnce() -> A, - { - match self { - Left(left) | Both(left, _) => left, - Right(_) => self.insert_left(f()), - } - } - - /// If the right value is not present, replace it the value computed by the closure `f`. - /// Returns a mutable reference to the now-present right value. - pub fn right_or_insert_with(&mut self, f: F) -> &mut B - where - F: FnOnce() -> B, - { - match self { - Right(right) | Both(_, right) => right, - Left(_) => self.insert_right(f()), - } - } - - /// Sets the `left` value of this instance, and returns a mutable reference to it. - /// Does not affect the `right` value. - /// - /// # Examples - /// ``` - /// # use itertools::{EitherOrBoth, EitherOrBoth::{Left, Right, Both}}; - /// - /// // Overwriting a pre-existing value. - /// let mut either: EitherOrBoth<_, ()> = Left(0_u32); - /// assert_eq!(*either.insert_left(69), 69); - /// - /// // Inserting a second value. - /// let mut either = Right("no"); - /// assert_eq!(*either.insert_left("yes"), "yes"); - /// assert_eq!(either, Both("yes", "no")); - /// ``` - pub fn insert_left(&mut self, val: A) -> &mut A { - match self { - Left(left) | Both(left, _) => { - *left = val; - left - } - Right(right) => { - // This is like a map in place operation. We move out of the reference, - // change the value, and then move back into the reference. - unsafe { - // SAFETY: We know this pointer is valid for reading since we got it from a reference. - let right = std::ptr::read(right as *mut _); - // SAFETY: Again, we know the pointer is valid since we got it from a reference. - std::ptr::write(self as *mut _, Both(val, right)); - } - - if let Both(left, _) = self { - left - } else { - // SAFETY: The above pattern will always match, since we just - // set `self` equal to `Both`. - unsafe { std::hint::unreachable_unchecked() } - } - } - } - } - - /// Sets the `right` value of this instance, and returns a mutable reference to it. - /// Does not affect the `left` value. - /// - /// # Examples - /// ``` - /// # use itertools::{EitherOrBoth, EitherOrBoth::{Left, Both}}; - /// // Overwriting a pre-existing value. - /// let mut either: EitherOrBoth<_, ()> = Left(0_u32); - /// assert_eq!(*either.insert_left(69), 69); - /// - /// // Inserting a second value. - /// let mut either = Left("what's"); - /// assert_eq!(*either.insert_right(9 + 10), 21 - 2); - /// assert_eq!(either, Both("what's", 9+10)); - /// ``` - pub fn insert_right(&mut self, val: B) -> &mut B { - match self { - Right(right) | Both(_, right) => { - *right = val; - right - } - Left(left) => { - // This is like a map in place operation. We move out of the reference, - // change the value, and then move back into the reference. - unsafe { - // SAFETY: We know this pointer is valid for reading since we got it from a reference. - let left = std::ptr::read(left as *mut _); - // SAFETY: Again, we know the pointer is valid since we got it from a reference. - std::ptr::write(self as *mut _, Both(left, val)); - } - if let Both(_, right) = self { - right - } else { - // SAFETY: The above pattern will always match, since we just - // set `self` equal to `Both`. - unsafe { std::hint::unreachable_unchecked() } - } - } - } - } - - /// Set `self` to `Both(..)`, containing the specified left and right values, - /// and returns a mutable reference to those values. - pub fn insert_both(&mut self, left: A, right: B) -> (&mut A, &mut B) { - *self = Both(left, right); - if let Both(left, right) = self { - (left, right) - } else { - // SAFETY: The above pattern will always match, since we just - // set `self` equal to `Both`. - unsafe { std::hint::unreachable_unchecked() } - } - } -} - -impl EitherOrBoth { - /// Return either value of left, right, or apply a function `f` to both values if both are present. - /// The input function has to return the same type as both Right and Left carry. - /// - /// This function can be used to preferrably extract the left resp. right value, - /// but fall back to the other (i.e. right resp. left) if the preferred one is not present. - /// - /// # Examples - /// ``` - /// # use itertools::EitherOrBoth; - /// assert_eq!(EitherOrBoth::Both(3, 7).reduce(u32::max), 7); - /// assert_eq!(EitherOrBoth::Left(3).reduce(u32::max), 3); - /// assert_eq!(EitherOrBoth::Right(7).reduce(u32::max), 7); - /// - /// // Extract the left value if present, fall back to the right otherwise. - /// assert_eq!(EitherOrBoth::Left("left").reduce(|l, _r| l), "left"); - /// assert_eq!(EitherOrBoth::Right("right").reduce(|l, _r| l), "right"); - /// assert_eq!(EitherOrBoth::Both("left", "right").reduce(|l, _r| l), "left"); - /// ``` - pub fn reduce(self, f: F) -> T - where - F: FnOnce(T, T) -> T, - { - match self { - Left(a) => a, - Right(b) => b, - Both(a, b) => f(a, b), - } - } -} - -impl From> for Option> { - fn from(value: EitherOrBoth) -> Self { - match value { - Left(l) => Some(Either::Left(l)), - Right(r) => Some(Either::Right(r)), - Both(..) => None, - } - } -} - -impl From> for EitherOrBoth { - fn from(either: Either) -> Self { - match either { - Either::Left(l) => Left(l), - Either::Right(l) => Right(l), - } - } -} diff --git a/vendor/itertools/src/exactly_one_err.rs b/vendor/itertools/src/exactly_one_err.rs deleted file mode 100644 index 19b9e19189a5f1..00000000000000 --- a/vendor/itertools/src/exactly_one_err.rs +++ /dev/null @@ -1,125 +0,0 @@ -#[cfg(feature = "use_std")] -use std::error::Error; -use std::fmt::{Debug, Display, Formatter, Result as FmtResult}; - -use std::iter::ExactSizeIterator; - -use either::Either; - -use crate::size_hint; - -/// Iterator returned for the error case of `Itertools::exactly_one()` -/// This iterator yields exactly the same elements as the input iterator. -/// -/// During the execution of `exactly_one` the iterator must be mutated. This wrapper -/// effectively "restores" the state of the input iterator when it's handed back. -/// -/// This is very similar to `PutBackN` except this iterator only supports 0-2 elements and does not -/// use a `Vec`. -#[derive(Clone)] -pub struct ExactlyOneError -where - I: Iterator, -{ - first_two: Option>, - inner: I, -} - -impl ExactlyOneError -where - I: Iterator, -{ - /// Creates a new `ExactlyOneErr` iterator. - pub(crate) fn new(first_two: Option>, inner: I) -> Self { - Self { first_two, inner } - } - - fn additional_len(&self) -> usize { - match self.first_two { - Some(Either::Left(_)) => 2, - Some(Either::Right(_)) => 1, - None => 0, - } - } -} - -impl Iterator for ExactlyOneError -where - I: Iterator, -{ - type Item = I::Item; - - fn next(&mut self) -> Option { - match self.first_two.take() { - Some(Either::Left([first, second])) => { - self.first_two = Some(Either::Right(second)); - Some(first) - } - Some(Either::Right(second)) => Some(second), - None => self.inner.next(), - } - } - - fn size_hint(&self) -> (usize, Option) { - size_hint::add_scalar(self.inner.size_hint(), self.additional_len()) - } - - fn fold(self, mut init: B, mut f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - match self.first_two { - Some(Either::Left([first, second])) => { - init = f(init, first); - init = f(init, second); - } - Some(Either::Right(second)) => init = f(init, second), - None => {} - } - self.inner.fold(init, f) - } -} - -impl ExactSizeIterator for ExactlyOneError where I: ExactSizeIterator {} - -impl Display for ExactlyOneError -where - I: Iterator, -{ - fn fmt(&self, f: &mut Formatter) -> FmtResult { - let additional = self.additional_len(); - if additional > 0 { - write!(f, "got at least 2 elements when exactly one was expected") - } else { - write!(f, "got zero elements when exactly one was expected") - } - } -} - -impl Debug for ExactlyOneError -where - I: Iterator + Debug, - I::Item: Debug, -{ - fn fmt(&self, f: &mut Formatter) -> FmtResult { - let mut dbg = f.debug_struct("ExactlyOneError"); - match &self.first_two { - Some(Either::Left([first, second])) => { - dbg.field("first", first).field("second", second); - } - Some(Either::Right(second)) => { - dbg.field("second", second); - } - None => {} - } - dbg.field("inner", &self.inner).finish() - } -} - -#[cfg(feature = "use_std")] -impl Error for ExactlyOneError -where - I: Iterator + Debug, - I::Item: Debug, -{ -} diff --git a/vendor/itertools/src/extrema_set.rs b/vendor/itertools/src/extrema_set.rs deleted file mode 100644 index d24114c6d9ab1f..00000000000000 --- a/vendor/itertools/src/extrema_set.rs +++ /dev/null @@ -1,50 +0,0 @@ -#![cfg(feature = "use_alloc")] -use alloc::{vec, vec::Vec}; -use std::cmp::Ordering; - -/// Implementation guts for `min_set`, `min_set_by`, and `min_set_by_key`. -pub fn min_set_impl( - mut it: I, - mut key_for: F, - mut compare: Compare, -) -> Vec -where - I: Iterator, - F: FnMut(&I::Item) -> K, - Compare: FnMut(&I::Item, &I::Item, &K, &K) -> Ordering, -{ - match it.next() { - None => Vec::new(), - Some(element) => { - let mut current_key = key_for(&element); - let mut result = vec![element]; - it.for_each(|element| { - let key = key_for(&element); - match compare(&element, &result[0], &key, ¤t_key) { - Ordering::Less => { - result.clear(); - result.push(element); - current_key = key; - } - Ordering::Equal => { - result.push(element); - } - Ordering::Greater => {} - } - }); - result - } - } -} - -/// Implementation guts for `ax_set`, `max_set_by`, and `max_set_by_key`. -pub fn max_set_impl(it: I, key_for: F, mut compare: Compare) -> Vec -where - I: Iterator, - F: FnMut(&I::Item) -> K, - Compare: FnMut(&I::Item, &I::Item, &K, &K) -> Ordering, -{ - min_set_impl(it, key_for, |it1, it2, key1, key2| { - compare(it2, it1, key2, key1) - }) -} diff --git a/vendor/itertools/src/flatten_ok.rs b/vendor/itertools/src/flatten_ok.rs deleted file mode 100644 index 48f1e90a647965..00000000000000 --- a/vendor/itertools/src/flatten_ok.rs +++ /dev/null @@ -1,205 +0,0 @@ -use crate::size_hint; -use std::{ - fmt, - iter::{DoubleEndedIterator, FusedIterator}, -}; - -pub fn flatten_ok(iter: I) -> FlattenOk -where - I: Iterator>, - T: IntoIterator, -{ - FlattenOk { - iter, - inner_front: None, - inner_back: None, - } -} - -/// An iterator adaptor that flattens `Result::Ok` values and -/// allows `Result::Err` values through unchanged. -/// -/// See [`.flatten_ok()`](crate::Itertools::flatten_ok) for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct FlattenOk -where - I: Iterator>, - T: IntoIterator, -{ - iter: I, - inner_front: Option, - inner_back: Option, -} - -impl Iterator for FlattenOk -where - I: Iterator>, - T: IntoIterator, -{ - type Item = Result; - - fn next(&mut self) -> Option { - loop { - // Handle the front inner iterator. - if let Some(inner) = &mut self.inner_front { - if let Some(item) = inner.next() { - return Some(Ok(item)); - } - - // This is necessary for the iterator to implement `FusedIterator` - // with only the original iterator being fused. - self.inner_front = None; - } - - match self.iter.next() { - Some(Ok(ok)) => self.inner_front = Some(ok.into_iter()), - Some(Err(e)) => return Some(Err(e)), - None => { - // Handle the back inner iterator. - if let Some(inner) = &mut self.inner_back { - if let Some(item) = inner.next() { - return Some(Ok(item)); - } - - // This is necessary for the iterator to implement `FusedIterator` - // with only the original iterator being fused. - self.inner_back = None; - } else { - return None; - } - } - } - } - } - - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - // Front - let mut acc = match self.inner_front { - Some(x) => x.fold(init, |a, o| f(a, Ok(o))), - None => init, - }; - - acc = self.iter.fold(acc, |acc, x| match x { - Ok(it) => it.into_iter().fold(acc, |a, o| f(a, Ok(o))), - Err(e) => f(acc, Err(e)), - }); - - // Back - match self.inner_back { - Some(x) => x.fold(acc, |a, o| f(a, Ok(o))), - None => acc, - } - } - - fn size_hint(&self) -> (usize, Option) { - let inner_hint = |inner: &Option| { - inner - .as_ref() - .map(Iterator::size_hint) - .unwrap_or((0, Some(0))) - }; - let inner_front = inner_hint(&self.inner_front); - let inner_back = inner_hint(&self.inner_back); - // The outer iterator `Ok` case could be (0, None) as we don't know its size_hint yet. - let outer = match self.iter.size_hint() { - (0, Some(0)) => (0, Some(0)), - _ => (0, None), - }; - - size_hint::add(size_hint::add(inner_front, inner_back), outer) - } -} - -impl DoubleEndedIterator for FlattenOk -where - I: DoubleEndedIterator>, - T: IntoIterator, - T::IntoIter: DoubleEndedIterator, -{ - fn next_back(&mut self) -> Option { - loop { - // Handle the back inner iterator. - if let Some(inner) = &mut self.inner_back { - if let Some(item) = inner.next_back() { - return Some(Ok(item)); - } - - // This is necessary for the iterator to implement `FusedIterator` - // with only the original iterator being fused. - self.inner_back = None; - } - - match self.iter.next_back() { - Some(Ok(ok)) => self.inner_back = Some(ok.into_iter()), - Some(Err(e)) => return Some(Err(e)), - None => { - // Handle the front inner iterator. - if let Some(inner) = &mut self.inner_front { - if let Some(item) = inner.next_back() { - return Some(Ok(item)); - } - - // This is necessary for the iterator to implement `FusedIterator` - // with only the original iterator being fused. - self.inner_front = None; - } else { - return None; - } - } - } - } - } - - fn rfold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - // Back - let mut acc = match self.inner_back { - Some(x) => x.rfold(init, |a, o| f(a, Ok(o))), - None => init, - }; - - acc = self.iter.rfold(acc, |acc, x| match x { - Ok(it) => it.into_iter().rfold(acc, |a, o| f(a, Ok(o))), - Err(e) => f(acc, Err(e)), - }); - - // Front - match self.inner_front { - Some(x) => x.rfold(acc, |a, o| f(a, Ok(o))), - None => acc, - } - } -} - -impl Clone for FlattenOk -where - I: Iterator> + Clone, - T: IntoIterator, - T::IntoIter: Clone, -{ - clone_fields!(iter, inner_front, inner_back); -} - -impl fmt::Debug for FlattenOk -where - I: Iterator> + fmt::Debug, - T: IntoIterator, - T::IntoIter: fmt::Debug, -{ - debug_fmt_fields!(FlattenOk, iter, inner_front, inner_back); -} - -/// Only the iterator being flattened needs to implement [`FusedIterator`]. -impl FusedIterator for FlattenOk -where - I: FusedIterator>, - T: IntoIterator, -{ -} diff --git a/vendor/itertools/src/format.rs b/vendor/itertools/src/format.rs deleted file mode 100644 index 15cee34d6aad9c..00000000000000 --- a/vendor/itertools/src/format.rs +++ /dev/null @@ -1,178 +0,0 @@ -use std::cell::Cell; -use std::fmt; - -/// Format all iterator elements lazily, separated by `sep`. -/// -/// The format value can only be formatted once, after that the iterator is -/// exhausted. -/// -/// See [`.format_with()`](crate::Itertools::format_with) for more information. -pub struct FormatWith<'a, I, F> { - sep: &'a str, - /// `FormatWith` uses interior mutability because `Display::fmt` takes `&self`. - inner: Cell>, -} - -/// Format all iterator elements lazily, separated by `sep`. -/// -/// The format value can only be formatted once, after that the iterator is -/// exhausted. -/// -/// See [`.format()`](crate::Itertools::format) -/// for more information. -pub struct Format<'a, I> { - sep: &'a str, - /// `Format` uses interior mutability because `Display::fmt` takes `&self`. - inner: Cell>, -} - -pub fn new_format(iter: I, separator: &str, f: F) -> FormatWith<'_, I, F> -where - I: Iterator, - F: FnMut(I::Item, &mut dyn FnMut(&dyn fmt::Display) -> fmt::Result) -> fmt::Result, -{ - FormatWith { - sep: separator, - inner: Cell::new(Some((iter, f))), - } -} - -pub fn new_format_default(iter: I, separator: &str) -> Format<'_, I> -where - I: Iterator, -{ - Format { - sep: separator, - inner: Cell::new(Some(iter)), - } -} - -impl<'a, I, F> fmt::Display for FormatWith<'a, I, F> -where - I: Iterator, - F: FnMut(I::Item, &mut dyn FnMut(&dyn fmt::Display) -> fmt::Result) -> fmt::Result, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let (mut iter, mut format) = match self.inner.take() { - Some(t) => t, - None => panic!("FormatWith: was already formatted once"), - }; - - if let Some(fst) = iter.next() { - format(fst, &mut |disp: &dyn fmt::Display| disp.fmt(f))?; - iter.try_for_each(|elt| { - if !self.sep.is_empty() { - f.write_str(self.sep)?; - } - format(elt, &mut |disp: &dyn fmt::Display| disp.fmt(f)) - })?; - } - Ok(()) - } -} - -impl<'a, I, F> fmt::Debug for FormatWith<'a, I, F> -where - I: Iterator, - F: FnMut(I::Item, &mut dyn FnMut(&dyn fmt::Display) -> fmt::Result) -> fmt::Result, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -impl<'a, I> Format<'a, I> -where - I: Iterator, -{ - fn format( - &self, - f: &mut fmt::Formatter, - cb: fn(&I::Item, &mut fmt::Formatter) -> fmt::Result, - ) -> fmt::Result { - let mut iter = match self.inner.take() { - Some(t) => t, - None => panic!("Format: was already formatted once"), - }; - - if let Some(fst) = iter.next() { - cb(&fst, f)?; - iter.try_for_each(|elt| { - if !self.sep.is_empty() { - f.write_str(self.sep)?; - } - cb(&elt, f) - })?; - } - Ok(()) - } -} - -macro_rules! impl_format { - ($($fmt_trait:ident)*) => { - $( - impl<'a, I> fmt::$fmt_trait for Format<'a, I> - where I: Iterator, - I::Item: fmt::$fmt_trait, - { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.format(f, fmt::$fmt_trait::fmt) - } - } - )* - } -} - -impl_format! {Display Debug UpperExp LowerExp UpperHex LowerHex Octal Binary Pointer} - -impl<'a, I, F> Clone for FormatWith<'a, I, F> -where - (I, F): Clone, -{ - fn clone(&self) -> Self { - struct PutBackOnDrop<'r, 'a, I, F> { - into: &'r FormatWith<'a, I, F>, - inner: Option<(I, F)>, - } - // This ensures we preserve the state of the original `FormatWith` if `Clone` panics - impl<'r, 'a, I, F> Drop for PutBackOnDrop<'r, 'a, I, F> { - fn drop(&mut self) { - self.into.inner.set(self.inner.take()) - } - } - let pbod = PutBackOnDrop { - inner: self.inner.take(), - into: self, - }; - Self { - inner: Cell::new(pbod.inner.clone()), - sep: self.sep, - } - } -} - -impl<'a, I> Clone for Format<'a, I> -where - I: Clone, -{ - fn clone(&self) -> Self { - struct PutBackOnDrop<'r, 'a, I> { - into: &'r Format<'a, I>, - inner: Option, - } - // This ensures we preserve the state of the original `FormatWith` if `Clone` panics - impl<'r, 'a, I> Drop for PutBackOnDrop<'r, 'a, I> { - fn drop(&mut self) { - self.into.inner.set(self.inner.take()) - } - } - let pbod = PutBackOnDrop { - inner: self.inner.take(), - into: self, - }; - Self { - inner: Cell::new(pbod.inner.clone()), - sep: self.sep, - } - } -} diff --git a/vendor/itertools/src/free.rs b/vendor/itertools/src/free.rs deleted file mode 100644 index 8d0bcf3ea966e0..00000000000000 --- a/vendor/itertools/src/free.rs +++ /dev/null @@ -1,317 +0,0 @@ -//! Free functions that create iterator adaptors or call iterator methods. -//! -//! The benefit of free functions is that they accept any [`IntoIterator`] as -//! argument, so the resulting code may be easier to read. - -#[cfg(feature = "use_alloc")] -use std::fmt::Display; -use std::iter::{self, Zip}; -#[cfg(feature = "use_alloc")] -type VecIntoIter = alloc::vec::IntoIter; - -#[cfg(feature = "use_alloc")] -use alloc::string::String; - -use crate::intersperse::{Intersperse, IntersperseWith}; -use crate::Itertools; - -pub use crate::adaptors::{interleave, put_back}; -#[cfg(feature = "use_alloc")] -pub use crate::kmerge_impl::kmerge; -pub use crate::merge_join::{merge, merge_join_by}; -#[cfg(feature = "use_alloc")] -pub use crate::multipeek_impl::multipeek; -#[cfg(feature = "use_alloc")] -pub use crate::peek_nth::peek_nth; -#[cfg(feature = "use_alloc")] -pub use crate::put_back_n_impl::put_back_n; -#[cfg(feature = "use_alloc")] -pub use crate::rciter_impl::rciter; -pub use crate::zip_eq_impl::zip_eq; - -/// Iterate `iterable` with a particular value inserted between each element. -/// -/// [`IntoIterator`] enabled version of [`Iterator::intersperse`]. -/// -/// ``` -/// use itertools::intersperse; -/// -/// itertools::assert_equal(intersperse((0..3), 8), vec![0, 8, 1, 8, 2]); -/// ``` -pub fn intersperse(iterable: I, element: I::Item) -> Intersperse -where - I: IntoIterator, - ::Item: Clone, -{ - Itertools::intersperse(iterable.into_iter(), element) -} - -/// Iterate `iterable` with a particular value created by a function inserted -/// between each element. -/// -/// [`IntoIterator`] enabled version of [`Iterator::intersperse_with`]. -/// -/// ``` -/// use itertools::intersperse_with; -/// -/// let mut i = 10; -/// itertools::assert_equal(intersperse_with((0..3), || { i -= 1; i }), vec![0, 9, 1, 8, 2]); -/// assert_eq!(i, 8); -/// ``` -pub fn intersperse_with(iterable: I, element: F) -> IntersperseWith -where - I: IntoIterator, - F: FnMut() -> I::Item, -{ - Itertools::intersperse_with(iterable.into_iter(), element) -} - -/// Iterate `iterable` with a running index. -/// -/// [`IntoIterator`] enabled version of [`Iterator::enumerate`]. -/// -/// ``` -/// use itertools::enumerate; -/// -/// for (i, elt) in enumerate(&[1, 2, 3]) { -/// /* loop body */ -/// } -/// ``` -pub fn enumerate(iterable: I) -> iter::Enumerate -where - I: IntoIterator, -{ - iterable.into_iter().enumerate() -} - -/// Iterate `iterable` in reverse. -/// -/// [`IntoIterator`] enabled version of [`Iterator::rev`]. -/// -/// ``` -/// use itertools::rev; -/// -/// for elt in rev(&[1, 2, 3]) { -/// /* loop body */ -/// } -/// ``` -pub fn rev(iterable: I) -> iter::Rev -where - I: IntoIterator, - I::IntoIter: DoubleEndedIterator, -{ - iterable.into_iter().rev() -} - -/// Converts the arguments to iterators and zips them. -/// -/// [`IntoIterator`] enabled version of [`Iterator::zip`]. -/// -/// ## Example -/// -/// ``` -/// use itertools::zip; -/// -/// let mut result: Vec<(i32, char)> = Vec::new(); -/// -/// for (a, b) in zip(&[1, 2, 3, 4, 5], &['a', 'b', 'c']) { -/// result.push((*a, *b)); -/// } -/// assert_eq!(result, vec![(1, 'a'),(2, 'b'),(3, 'c')]); -/// ``` -#[deprecated( - note = "Use [std::iter::zip](https://doc.rust-lang.org/std/iter/fn.zip.html) instead", - since = "0.10.4" -)] -pub fn zip(i: I, j: J) -> Zip -where - I: IntoIterator, - J: IntoIterator, -{ - i.into_iter().zip(j) -} - -/// Takes two iterables and creates a new iterator over both in sequence. -/// -/// [`IntoIterator`] enabled version of [`Iterator::chain`]. -/// -/// ## Example -/// ``` -/// use itertools::chain; -/// -/// let mut result:Vec = Vec::new(); -/// -/// for element in chain(&[1, 2, 3], &[4]) { -/// result.push(*element); -/// } -/// assert_eq!(result, vec![1, 2, 3, 4]); -/// ``` -pub fn chain( - i: I, - j: J, -) -> iter::Chain<::IntoIter, ::IntoIter> -where - I: IntoIterator, - J: IntoIterator, -{ - i.into_iter().chain(j) -} - -/// Create an iterator that clones each element from `&T` to `T`. -/// -/// [`IntoIterator`] enabled version of [`Iterator::cloned`]. -/// -/// ``` -/// use itertools::cloned; -/// -/// assert_eq!(cloned(b"abc").next(), Some(b'a')); -/// ``` -pub fn cloned<'a, I, T>(iterable: I) -> iter::Cloned -where - I: IntoIterator, - T: Clone + 'a, -{ - iterable.into_iter().cloned() -} - -/// Perform a fold operation over the iterable. -/// -/// [`IntoIterator`] enabled version of [`Iterator::fold`]. -/// -/// ``` -/// use itertools::fold; -/// -/// assert_eq!(fold(&[1., 2., 3.], 0., |a, &b| f32::max(a, b)), 3.); -/// ``` -pub fn fold(iterable: I, init: B, f: F) -> B -where - I: IntoIterator, - F: FnMut(B, I::Item) -> B, -{ - iterable.into_iter().fold(init, f) -} - -/// Test whether the predicate holds for all elements in the iterable. -/// -/// [`IntoIterator`] enabled version of [`Iterator::all`]. -/// -/// ``` -/// use itertools::all; -/// -/// assert!(all(&[1, 2, 3], |elt| *elt > 0)); -/// ``` -pub fn all(iterable: I, f: F) -> bool -where - I: IntoIterator, - F: FnMut(I::Item) -> bool, -{ - iterable.into_iter().all(f) -} - -/// Test whether the predicate holds for any elements in the iterable. -/// -/// [`IntoIterator`] enabled version of [`Iterator::any`]. -/// -/// ``` -/// use itertools::any; -/// -/// assert!(any(&[0, -1, 2], |elt| *elt > 0)); -/// ``` -pub fn any(iterable: I, f: F) -> bool -where - I: IntoIterator, - F: FnMut(I::Item) -> bool, -{ - iterable.into_iter().any(f) -} - -/// Return the maximum value of the iterable. -/// -/// [`IntoIterator`] enabled version of [`Iterator::max`]. -/// -/// ``` -/// use itertools::max; -/// -/// assert_eq!(max(0..10), Some(9)); -/// ``` -pub fn max(iterable: I) -> Option -where - I: IntoIterator, - I::Item: Ord, -{ - iterable.into_iter().max() -} - -/// Return the minimum value of the iterable. -/// -/// [`IntoIterator`] enabled version of [`Iterator::min`]. -/// -/// ``` -/// use itertools::min; -/// -/// assert_eq!(min(0..10), Some(0)); -/// ``` -pub fn min(iterable: I) -> Option -where - I: IntoIterator, - I::Item: Ord, -{ - iterable.into_iter().min() -} - -/// Combine all iterator elements into one `String`, separated by `sep`. -/// -/// [`IntoIterator`] enabled version of [`Itertools::join`]. -/// -/// ``` -/// use itertools::join; -/// -/// assert_eq!(join(&[1, 2, 3], ", "), "1, 2, 3"); -/// ``` -#[cfg(feature = "use_alloc")] -pub fn join(iterable: I, sep: &str) -> String -where - I: IntoIterator, - I::Item: Display, -{ - iterable.into_iter().join(sep) -} - -/// Sort all iterator elements into a new iterator in ascending order. -/// -/// [`IntoIterator`] enabled version of [`Itertools::sorted`]. -/// -/// ``` -/// use itertools::sorted; -/// use itertools::assert_equal; -/// -/// assert_equal(sorted("rust".chars()), "rstu".chars()); -/// ``` -#[cfg(feature = "use_alloc")] -pub fn sorted(iterable: I) -> VecIntoIter -where - I: IntoIterator, - I::Item: Ord, -{ - iterable.into_iter().sorted() -} - -/// Sort all iterator elements into a new iterator in ascending order. -/// This sort is unstable (i.e., may reorder equal elements). -/// -/// [`IntoIterator`] enabled version of [`Itertools::sorted_unstable`]. -/// -/// ``` -/// use itertools::sorted_unstable; -/// use itertools::assert_equal; -/// -/// assert_equal(sorted_unstable("rust".chars()), "rstu".chars()); -/// ``` -#[cfg(feature = "use_alloc")] -pub fn sorted_unstable(iterable: I) -> VecIntoIter -where - I: IntoIterator, - I::Item: Ord, -{ - iterable.into_iter().sorted_unstable() -} diff --git a/vendor/itertools/src/group_map.rs b/vendor/itertools/src/group_map.rs deleted file mode 100644 index 3dcee83afd00b3..00000000000000 --- a/vendor/itertools/src/group_map.rs +++ /dev/null @@ -1,32 +0,0 @@ -#![cfg(feature = "use_std")] - -use std::collections::HashMap; -use std::hash::Hash; -use std::iter::Iterator; - -/// Return a `HashMap` of keys mapped to a list of their corresponding values. -/// -/// See [`.into_group_map()`](crate::Itertools::into_group_map) -/// for more information. -pub fn into_group_map(iter: I) -> HashMap> -where - I: Iterator, - K: Hash + Eq, -{ - let mut lookup = HashMap::new(); - - iter.for_each(|(key, val)| { - lookup.entry(key).or_insert_with(Vec::new).push(val); - }); - - lookup -} - -pub fn into_group_map_by(iter: I, mut f: F) -> HashMap> -where - I: Iterator, - K: Hash + Eq, - F: FnMut(&V) -> K, -{ - into_group_map(iter.map(|v| (f(&v), v))) -} diff --git a/vendor/itertools/src/groupbylazy.rs b/vendor/itertools/src/groupbylazy.rs deleted file mode 100644 index 5847c8f7d1f770..00000000000000 --- a/vendor/itertools/src/groupbylazy.rs +++ /dev/null @@ -1,613 +0,0 @@ -use alloc::vec::{self, Vec}; -use std::cell::{Cell, RefCell}; - -/// A trait to unify `FnMut` for `ChunkBy` with the chunk key in `IntoChunks` -trait KeyFunction { - type Key; - fn call_mut(&mut self, arg: A) -> Self::Key; -} - -impl KeyFunction for F -where - F: FnMut(A) -> K + ?Sized, -{ - type Key = K; - #[inline] - fn call_mut(&mut self, arg: A) -> Self::Key { - (*self)(arg) - } -} - -/// `ChunkIndex` acts like the grouping key function for `IntoChunks` -#[derive(Debug, Clone)] -struct ChunkIndex { - size: usize, - index: usize, - key: usize, -} - -impl ChunkIndex { - #[inline(always)] - fn new(size: usize) -> Self { - Self { - size, - index: 0, - key: 0, - } - } -} - -impl KeyFunction for ChunkIndex { - type Key = usize; - #[inline(always)] - fn call_mut(&mut self, _arg: A) -> Self::Key { - if self.index == self.size { - self.key += 1; - self.index = 0; - } - self.index += 1; - self.key - } -} - -#[derive(Clone)] -struct GroupInner -where - I: Iterator, -{ - key: F, - iter: I, - current_key: Option, - current_elt: Option, - /// flag set if iterator is exhausted - done: bool, - /// Index of group we are currently buffering or visiting - top_group: usize, - /// Least index for which we still have elements buffered - oldest_buffered_group: usize, - /// Group index for `buffer[0]` -- the slots - /// `bottom_group..oldest_buffered_group` are unused and will be erased when - /// that range is large enough. - bottom_group: usize, - /// Buffered groups, from `bottom_group` (index 0) to `top_group`. - buffer: Vec>, - /// index of last group iter that was dropped, - /// `usize::MAX` initially when no group was dropped - dropped_group: usize, -} - -impl GroupInner -where - I: Iterator, - F: for<'a> KeyFunction<&'a I::Item, Key = K>, - K: PartialEq, -{ - /// `client`: Index of group that requests next element - #[inline(always)] - fn step(&mut self, client: usize) -> Option { - /* - println!("client={}, bottom_group={}, oldest_buffered_group={}, top_group={}, buffers=[{}]", - client, self.bottom_group, self.oldest_buffered_group, - self.top_group, - self.buffer.iter().map(|elt| elt.len()).format(", ")); - */ - if client < self.oldest_buffered_group { - None - } else if client < self.top_group - || (client == self.top_group && self.buffer.len() > self.top_group - self.bottom_group) - { - self.lookup_buffer(client) - } else if self.done { - None - } else if self.top_group == client { - self.step_current() - } else { - self.step_buffering(client) - } - } - - #[inline(never)] - fn lookup_buffer(&mut self, client: usize) -> Option { - // if `bufidx` doesn't exist in self.buffer, it might be empty - let bufidx = client - self.bottom_group; - if client < self.oldest_buffered_group { - return None; - } - let elt = self.buffer.get_mut(bufidx).and_then(|queue| queue.next()); - if elt.is_none() && client == self.oldest_buffered_group { - // FIXME: VecDeque is unfortunately not zero allocation when empty, - // so we do this job manually. - // `bottom_group..oldest_buffered_group` is unused, and if it's large enough, erase it. - self.oldest_buffered_group += 1; - // skip forward further empty queues too - while self - .buffer - .get(self.oldest_buffered_group - self.bottom_group) - .map_or(false, |buf| buf.len() == 0) - { - self.oldest_buffered_group += 1; - } - - let nclear = self.oldest_buffered_group - self.bottom_group; - if nclear > 0 && nclear >= self.buffer.len() / 2 { - let mut i = 0; - self.buffer.retain(|buf| { - i += 1; - debug_assert!(buf.len() == 0 || i > nclear); - i > nclear - }); - self.bottom_group = self.oldest_buffered_group; - } - } - elt - } - - /// Take the next element from the iterator, and set the done - /// flag if exhausted. Must not be called after done. - #[inline(always)] - fn next_element(&mut self) -> Option { - debug_assert!(!self.done); - match self.iter.next() { - None => { - self.done = true; - None - } - otherwise => otherwise, - } - } - - #[inline(never)] - fn step_buffering(&mut self, client: usize) -> Option { - // requested a later group -- walk through the current group up to - // the requested group index, and buffer the elements (unless - // the group is marked as dropped). - // Because the `Groups` iterator is always the first to request - // each group index, client is the next index efter top_group. - debug_assert!(self.top_group + 1 == client); - let mut group = Vec::new(); - - if let Some(elt) = self.current_elt.take() { - if self.top_group != self.dropped_group { - group.push(elt); - } - } - let mut first_elt = None; // first element of the next group - - while let Some(elt) = self.next_element() { - let key = self.key.call_mut(&elt); - match self.current_key.take() { - None => {} - Some(old_key) => { - if old_key != key { - self.current_key = Some(key); - first_elt = Some(elt); - break; - } - } - } - self.current_key = Some(key); - if self.top_group != self.dropped_group { - group.push(elt); - } - } - - if self.top_group != self.dropped_group { - self.push_next_group(group); - } - if first_elt.is_some() { - self.top_group += 1; - debug_assert!(self.top_group == client); - } - first_elt - } - - fn push_next_group(&mut self, group: Vec) { - // When we add a new buffered group, fill up slots between oldest_buffered_group and top_group - while self.top_group - self.bottom_group > self.buffer.len() { - if self.buffer.is_empty() { - self.bottom_group += 1; - self.oldest_buffered_group += 1; - } else { - self.buffer.push(Vec::new().into_iter()); - } - } - self.buffer.push(group.into_iter()); - debug_assert!(self.top_group + 1 - self.bottom_group == self.buffer.len()); - } - - /// This is the immediate case, where we use no buffering - #[inline] - fn step_current(&mut self) -> Option { - debug_assert!(!self.done); - if let elt @ Some(..) = self.current_elt.take() { - return elt; - } - match self.next_element() { - None => None, - Some(elt) => { - let key = self.key.call_mut(&elt); - match self.current_key.take() { - None => {} - Some(old_key) => { - if old_key != key { - self.current_key = Some(key); - self.current_elt = Some(elt); - self.top_group += 1; - return None; - } - } - } - self.current_key = Some(key); - Some(elt) - } - } - } - - /// Request the just started groups' key. - /// - /// `client`: Index of group - /// - /// **Panics** if no group key is available. - fn group_key(&mut self, client: usize) -> K { - // This can only be called after we have just returned the first - // element of a group. - // Perform this by simply buffering one more element, grabbing the - // next key. - debug_assert!(!self.done); - debug_assert!(client == self.top_group); - debug_assert!(self.current_key.is_some()); - debug_assert!(self.current_elt.is_none()); - let old_key = self.current_key.take().unwrap(); - if let Some(elt) = self.next_element() { - let key = self.key.call_mut(&elt); - if old_key != key { - self.top_group += 1; - } - self.current_key = Some(key); - self.current_elt = Some(elt); - } - old_key - } -} - -impl GroupInner -where - I: Iterator, -{ - /// Called when a group is dropped - fn drop_group(&mut self, client: usize) { - // It's only useful to track the maximal index - if self.dropped_group == !0 || client > self.dropped_group { - self.dropped_group = client; - } - } -} - -#[deprecated(note = "Use `ChunkBy` instead", since = "0.13.0")] -/// See [`ChunkBy`](crate::structs::ChunkBy). -pub type GroupBy = ChunkBy; - -/// `ChunkBy` is the storage for the lazy grouping operation. -/// -/// If the groups are consumed in their original order, or if each -/// group is dropped without keeping it around, then `ChunkBy` uses -/// no allocations. It needs allocations only if several group iterators -/// are alive at the same time. -/// -/// This type implements [`IntoIterator`] (it is **not** an iterator -/// itself), because the group iterators need to borrow from this -/// value. It should be stored in a local variable or temporary and -/// iterated. -/// -/// See [`.chunk_by()`](crate::Itertools::chunk_by) for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct ChunkBy -where - I: Iterator, -{ - inner: RefCell>, - // the group iterator's current index. Keep this in the main value - // so that simultaneous iterators all use the same state. - index: Cell, -} - -/// Create a new -pub fn new(iter: J, f: F) -> ChunkBy -where - J: IntoIterator, - F: FnMut(&J::Item) -> K, -{ - ChunkBy { - inner: RefCell::new(GroupInner { - key: f, - iter: iter.into_iter(), - current_key: None, - current_elt: None, - done: false, - top_group: 0, - oldest_buffered_group: 0, - bottom_group: 0, - buffer: Vec::new(), - dropped_group: !0, - }), - index: Cell::new(0), - } -} - -impl ChunkBy -where - I: Iterator, -{ - /// `client`: Index of group that requests next element - fn step(&self, client: usize) -> Option - where - F: FnMut(&I::Item) -> K, - K: PartialEq, - { - self.inner.borrow_mut().step(client) - } - - /// `client`: Index of group - fn drop_group(&self, client: usize) { - self.inner.borrow_mut().drop_group(client); - } -} - -impl<'a, K, I, F> IntoIterator for &'a ChunkBy -where - I: Iterator, - I::Item: 'a, - F: FnMut(&I::Item) -> K, - K: PartialEq, -{ - type Item = (K, Group<'a, K, I, F>); - type IntoIter = Groups<'a, K, I, F>; - - fn into_iter(self) -> Self::IntoIter { - Groups { parent: self } - } -} - -/// An iterator that yields the Group iterators. -/// -/// Iterator element type is `(K, Group)`: -/// the group's key `K` and the group's iterator. -/// -/// See [`.chunk_by()`](crate::Itertools::chunk_by) for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct Groups<'a, K, I, F> -where - I: Iterator + 'a, - I::Item: 'a, - K: 'a, - F: 'a, -{ - parent: &'a ChunkBy, -} - -impl<'a, K, I, F> Iterator for Groups<'a, K, I, F> -where - I: Iterator, - I::Item: 'a, - F: FnMut(&I::Item) -> K, - K: PartialEq, -{ - type Item = (K, Group<'a, K, I, F>); - - #[inline] - fn next(&mut self) -> Option { - let index = self.parent.index.get(); - self.parent.index.set(index + 1); - let inner = &mut *self.parent.inner.borrow_mut(); - inner.step(index).map(|elt| { - let key = inner.group_key(index); - ( - key, - Group { - parent: self.parent, - index, - first: Some(elt), - }, - ) - }) - } -} - -/// An iterator for the elements in a single group. -/// -/// Iterator element type is `I::Item`. -pub struct Group<'a, K, I, F> -where - I: Iterator + 'a, - I::Item: 'a, - K: 'a, - F: 'a, -{ - parent: &'a ChunkBy, - index: usize, - first: Option, -} - -impl<'a, K, I, F> Drop for Group<'a, K, I, F> -where - I: Iterator, - I::Item: 'a, -{ - fn drop(&mut self) { - self.parent.drop_group(self.index); - } -} - -impl<'a, K, I, F> Iterator for Group<'a, K, I, F> -where - I: Iterator, - I::Item: 'a, - F: FnMut(&I::Item) -> K, - K: PartialEq, -{ - type Item = I::Item; - #[inline] - fn next(&mut self) -> Option { - if let elt @ Some(..) = self.first.take() { - return elt; - } - self.parent.step(self.index) - } -} - -///// IntoChunks ///// - -/// Create a new -pub fn new_chunks(iter: J, size: usize) -> IntoChunks -where - J: IntoIterator, -{ - IntoChunks { - inner: RefCell::new(GroupInner { - key: ChunkIndex::new(size), - iter: iter.into_iter(), - current_key: None, - current_elt: None, - done: false, - top_group: 0, - oldest_buffered_group: 0, - bottom_group: 0, - buffer: Vec::new(), - dropped_group: !0, - }), - index: Cell::new(0), - } -} - -/// `ChunkLazy` is the storage for a lazy chunking operation. -/// -/// `IntoChunks` behaves just like `ChunkBy`: it is iterable, and -/// it only buffers if several chunk iterators are alive at the same time. -/// -/// This type implements [`IntoIterator`] (it is **not** an iterator -/// itself), because the chunk iterators need to borrow from this -/// value. It should be stored in a local variable or temporary and -/// iterated. -/// -/// Iterator element type is `Chunk`, each chunk's iterator. -/// -/// See [`.chunks()`](crate::Itertools::chunks) for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct IntoChunks -where - I: Iterator, -{ - inner: RefCell>, - // the chunk iterator's current index. Keep this in the main value - // so that simultaneous iterators all use the same state. - index: Cell, -} - -impl Clone for IntoChunks -where - I: Clone + Iterator, - I::Item: Clone, -{ - clone_fields!(inner, index); -} - -impl IntoChunks -where - I: Iterator, -{ - /// `client`: Index of chunk that requests next element - fn step(&self, client: usize) -> Option { - self.inner.borrow_mut().step(client) - } - - /// `client`: Index of chunk - fn drop_group(&self, client: usize) { - self.inner.borrow_mut().drop_group(client); - } -} - -impl<'a, I> IntoIterator for &'a IntoChunks -where - I: Iterator, - I::Item: 'a, -{ - type Item = Chunk<'a, I>; - type IntoIter = Chunks<'a, I>; - - fn into_iter(self) -> Self::IntoIter { - Chunks { parent: self } - } -} - -/// An iterator that yields the Chunk iterators. -/// -/// Iterator element type is `Chunk`. -/// -/// See [`.chunks()`](crate::Itertools::chunks) for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[derive(Clone)] -pub struct Chunks<'a, I> -where - I: Iterator + 'a, - I::Item: 'a, -{ - parent: &'a IntoChunks, -} - -impl<'a, I> Iterator for Chunks<'a, I> -where - I: Iterator, - I::Item: 'a, -{ - type Item = Chunk<'a, I>; - - #[inline] - fn next(&mut self) -> Option { - let index = self.parent.index.get(); - self.parent.index.set(index + 1); - let inner = &mut *self.parent.inner.borrow_mut(); - inner.step(index).map(|elt| Chunk { - parent: self.parent, - index, - first: Some(elt), - }) - } -} - -/// An iterator for the elements in a single chunk. -/// -/// Iterator element type is `I::Item`. -pub struct Chunk<'a, I> -where - I: Iterator + 'a, - I::Item: 'a, -{ - parent: &'a IntoChunks, - index: usize, - first: Option, -} - -impl<'a, I> Drop for Chunk<'a, I> -where - I: Iterator, - I::Item: 'a, -{ - fn drop(&mut self) { - self.parent.drop_group(self.index); - } -} - -impl<'a, I> Iterator for Chunk<'a, I> -where - I: Iterator, - I::Item: 'a, -{ - type Item = I::Item; - #[inline] - fn next(&mut self) -> Option { - if let elt @ Some(..) = self.first.take() { - return elt; - } - self.parent.step(self.index) - } -} diff --git a/vendor/itertools/src/grouping_map.rs b/vendor/itertools/src/grouping_map.rs deleted file mode 100644 index b4aae9ecf1ba32..00000000000000 --- a/vendor/itertools/src/grouping_map.rs +++ /dev/null @@ -1,614 +0,0 @@ -#![cfg(feature = "use_std")] - -use crate::{ - adaptors::map::{MapSpecialCase, MapSpecialCaseFn}, - MinMaxResult, -}; -use std::cmp::Ordering; -use std::collections::HashMap; -use std::hash::Hash; -use std::iter::Iterator; -use std::ops::{Add, Mul}; - -/// A wrapper to allow for an easy [`into_grouping_map_by`](crate::Itertools::into_grouping_map_by) -pub type MapForGrouping = MapSpecialCase>; - -#[derive(Clone)] -pub struct GroupingMapFn(F); - -impl std::fmt::Debug for GroupingMapFn { - debug_fmt_fields!(GroupingMapFn,); -} - -impl K> MapSpecialCaseFn for GroupingMapFn { - type Out = (K, V); - fn call(&mut self, v: V) -> Self::Out { - ((self.0)(&v), v) - } -} - -pub(crate) fn new_map_for_grouping K>( - iter: I, - key_mapper: F, -) -> MapForGrouping { - MapSpecialCase { - iter, - f: GroupingMapFn(key_mapper), - } -} - -/// Creates a new `GroupingMap` from `iter` -pub fn new(iter: I) -> GroupingMap -where - I: Iterator, - K: Hash + Eq, -{ - GroupingMap { iter } -} - -/// `GroupingMapBy` is an intermediate struct for efficient group-and-fold operations. -/// -/// See [`GroupingMap`] for more informations. -pub type GroupingMapBy = GroupingMap>; - -/// `GroupingMap` is an intermediate struct for efficient group-and-fold operations. -/// It groups elements by their key and at the same time fold each group -/// using some aggregating operation. -/// -/// No method on this struct performs temporary allocations. -#[derive(Clone, Debug)] -#[must_use = "GroupingMap is lazy and do nothing unless consumed"] -pub struct GroupingMap { - iter: I, -} - -impl GroupingMap -where - I: Iterator, - K: Hash + Eq, -{ - /// This is the generic way to perform any operation on a `GroupingMap`. - /// It's suggested to use this method only to implement custom operations - /// when the already provided ones are not enough. - /// - /// Groups elements from the `GroupingMap` source by key and applies `operation` to the elements - /// of each group sequentially, passing the previously accumulated value, a reference to the key - /// and the current element as arguments, and stores the results in an `HashMap`. - /// - /// The `operation` function is invoked on each element with the following parameters: - /// - the current value of the accumulator of the group if there is currently one; - /// - a reference to the key of the group this element belongs to; - /// - the element from the source being aggregated; - /// - /// If `operation` returns `Some(element)` then the accumulator is updated with `element`, - /// otherwise the previous accumulation is discarded. - /// - /// Return a `HashMap` associating the key of each group with the result of aggregation of - /// that group's elements. If the aggregation of the last element of a group discards the - /// accumulator then there won't be an entry associated to that group's key. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = vec![2, 8, 5, 7, 9, 0, 4, 10]; - /// let lookup = data.into_iter() - /// .into_grouping_map_by(|&n| n % 4) - /// .aggregate(|acc, _key, val| { - /// if val == 0 || val == 10 { - /// None - /// } else { - /// Some(acc.unwrap_or(0) + val) - /// } - /// }); - /// - /// assert_eq!(lookup[&0], 4); // 0 resets the accumulator so only 4 is summed - /// assert_eq!(lookup[&1], 5 + 9); - /// assert_eq!(lookup.get(&2), None); // 10 resets the accumulator and nothing is summed afterward - /// assert_eq!(lookup[&3], 7); - /// assert_eq!(lookup.len(), 3); // The final keys are only 0, 1 and 2 - /// ``` - pub fn aggregate(self, mut operation: FO) -> HashMap - where - FO: FnMut(Option, &K, V) -> Option, - { - let mut destination_map = HashMap::new(); - - self.iter.for_each(|(key, val)| { - let acc = destination_map.remove(&key); - if let Some(op_res) = operation(acc, &key, val) { - destination_map.insert(key, op_res); - } - }); - - destination_map - } - - /// Groups elements from the `GroupingMap` source by key and applies `operation` to the elements - /// of each group sequentially, passing the previously accumulated value, a reference to the key - /// and the current element as arguments, and stores the results in a new map. - /// - /// `init` is called to obtain the initial value of each accumulator. - /// - /// `operation` is a function that is invoked on each element with the following parameters: - /// - the current value of the accumulator of the group; - /// - a reference to the key of the group this element belongs to; - /// - the element from the source being accumulated. - /// - /// Return a `HashMap` associating the key of each group with the result of folding that group's elements. - /// - /// ``` - /// use itertools::Itertools; - /// - /// #[derive(Debug, Default)] - /// struct Accumulator { - /// acc: usize, - /// } - /// - /// let lookup = (1..=7) - /// .into_grouping_map_by(|&n| n % 3) - /// .fold_with(|_key, _val| Default::default(), |Accumulator { acc }, _key, val| { - /// let acc = acc + val; - /// Accumulator { acc } - /// }); - /// - /// assert_eq!(lookup[&0].acc, 3 + 6); - /// assert_eq!(lookup[&1].acc, 1 + 4 + 7); - /// assert_eq!(lookup[&2].acc, 2 + 5); - /// assert_eq!(lookup.len(), 3); - /// ``` - pub fn fold_with(self, mut init: FI, mut operation: FO) -> HashMap - where - FI: FnMut(&K, &V) -> R, - FO: FnMut(R, &K, V) -> R, - { - self.aggregate(|acc, key, val| { - let acc = acc.unwrap_or_else(|| init(key, &val)); - Some(operation(acc, key, val)) - }) - } - - /// Groups elements from the `GroupingMap` source by key and applies `operation` to the elements - /// of each group sequentially, passing the previously accumulated value, a reference to the key - /// and the current element as arguments, and stores the results in a new map. - /// - /// `init` is the value from which will be cloned the initial value of each accumulator. - /// - /// `operation` is a function that is invoked on each element with the following parameters: - /// - the current value of the accumulator of the group; - /// - a reference to the key of the group this element belongs to; - /// - the element from the source being accumulated. - /// - /// Return a `HashMap` associating the key of each group with the result of folding that group's elements. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let lookup = (1..=7) - /// .into_grouping_map_by(|&n| n % 3) - /// .fold(0, |acc, _key, val| acc + val); - /// - /// assert_eq!(lookup[&0], 3 + 6); - /// assert_eq!(lookup[&1], 1 + 4 + 7); - /// assert_eq!(lookup[&2], 2 + 5); - /// assert_eq!(lookup.len(), 3); - /// ``` - pub fn fold(self, init: R, operation: FO) -> HashMap - where - R: Clone, - FO: FnMut(R, &K, V) -> R, - { - self.fold_with(|_, _| init.clone(), operation) - } - - /// Groups elements from the `GroupingMap` source by key and applies `operation` to the elements - /// of each group sequentially, passing the previously accumulated value, a reference to the key - /// and the current element as arguments, and stores the results in a new map. - /// - /// This is similar to [`fold`] but the initial value of the accumulator is the first element of the group. - /// - /// `operation` is a function that is invoked on each element with the following parameters: - /// - the current value of the accumulator of the group; - /// - a reference to the key of the group this element belongs to; - /// - the element from the source being accumulated. - /// - /// Return a `HashMap` associating the key of each group with the result of folding that group's elements. - /// - /// [`fold`]: GroupingMap::fold - /// - /// ``` - /// use itertools::Itertools; - /// - /// let lookup = (1..=7) - /// .into_grouping_map_by(|&n| n % 3) - /// .reduce(|acc, _key, val| acc + val); - /// - /// assert_eq!(lookup[&0], 3 + 6); - /// assert_eq!(lookup[&1], 1 + 4 + 7); - /// assert_eq!(lookup[&2], 2 + 5); - /// assert_eq!(lookup.len(), 3); - /// ``` - pub fn reduce(self, mut operation: FO) -> HashMap - where - FO: FnMut(V, &K, V) -> V, - { - self.aggregate(|acc, key, val| { - Some(match acc { - Some(acc) => operation(acc, key, val), - None => val, - }) - }) - } - - /// See [`.reduce()`](GroupingMap::reduce). - #[deprecated(note = "Use .reduce() instead", since = "0.13.0")] - pub fn fold_first(self, operation: FO) -> HashMap - where - FO: FnMut(V, &K, V) -> V, - { - self.reduce(operation) - } - - /// Groups elements from the `GroupingMap` source by key and collects the elements of each group in - /// an instance of `C`. The iteration order is preserved when inserting elements. - /// - /// Return a `HashMap` associating the key of each group with the collection containing that group's elements. - /// - /// ``` - /// use itertools::Itertools; - /// use std::collections::HashSet; - /// - /// let lookup = vec![0, 1, 2, 3, 4, 5, 6, 2, 3, 6].into_iter() - /// .into_grouping_map_by(|&n| n % 3) - /// .collect::>(); - /// - /// assert_eq!(lookup[&0], vec![0, 3, 6].into_iter().collect::>()); - /// assert_eq!(lookup[&1], vec![1, 4].into_iter().collect::>()); - /// assert_eq!(lookup[&2], vec![2, 5].into_iter().collect::>()); - /// assert_eq!(lookup.len(), 3); - /// ``` - pub fn collect(self) -> HashMap - where - C: Default + Extend, - { - let mut destination_map = HashMap::new(); - - self.iter.for_each(|(key, val)| { - destination_map - .entry(key) - .or_insert_with(C::default) - .extend(Some(val)); - }); - - destination_map - } - - /// Groups elements from the `GroupingMap` source by key and finds the maximum of each group. - /// - /// If several elements are equally maximum, the last element is picked. - /// - /// Returns a `HashMap` associating the key of each group with the maximum of that group's elements. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() - /// .into_grouping_map_by(|&n| n % 3) - /// .max(); - /// - /// assert_eq!(lookup[&0], 12); - /// assert_eq!(lookup[&1], 7); - /// assert_eq!(lookup[&2], 8); - /// assert_eq!(lookup.len(), 3); - /// ``` - pub fn max(self) -> HashMap - where - V: Ord, - { - self.max_by(|_, v1, v2| V::cmp(v1, v2)) - } - - /// Groups elements from the `GroupingMap` source by key and finds the maximum of each group - /// with respect to the specified comparison function. - /// - /// If several elements are equally maximum, the last element is picked. - /// - /// Returns a `HashMap` associating the key of each group with the maximum of that group's elements. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() - /// .into_grouping_map_by(|&n| n % 3) - /// .max_by(|_key, x, y| y.cmp(x)); - /// - /// assert_eq!(lookup[&0], 3); - /// assert_eq!(lookup[&1], 1); - /// assert_eq!(lookup[&2], 5); - /// assert_eq!(lookup.len(), 3); - /// ``` - pub fn max_by(self, mut compare: F) -> HashMap - where - F: FnMut(&K, &V, &V) -> Ordering, - { - self.reduce(|acc, key, val| match compare(key, &acc, &val) { - Ordering::Less | Ordering::Equal => val, - Ordering::Greater => acc, - }) - } - - /// Groups elements from the `GroupingMap` source by key and finds the element of each group - /// that gives the maximum from the specified function. - /// - /// If several elements are equally maximum, the last element is picked. - /// - /// Returns a `HashMap` associating the key of each group with the maximum of that group's elements. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() - /// .into_grouping_map_by(|&n| n % 3) - /// .max_by_key(|_key, &val| val % 4); - /// - /// assert_eq!(lookup[&0], 3); - /// assert_eq!(lookup[&1], 7); - /// assert_eq!(lookup[&2], 5); - /// assert_eq!(lookup.len(), 3); - /// ``` - pub fn max_by_key(self, mut f: F) -> HashMap - where - F: FnMut(&K, &V) -> CK, - CK: Ord, - { - self.max_by(|key, v1, v2| f(key, v1).cmp(&f(key, v2))) - } - - /// Groups elements from the `GroupingMap` source by key and finds the minimum of each group. - /// - /// If several elements are equally minimum, the first element is picked. - /// - /// Returns a `HashMap` associating the key of each group with the minimum of that group's elements. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() - /// .into_grouping_map_by(|&n| n % 3) - /// .min(); - /// - /// assert_eq!(lookup[&0], 3); - /// assert_eq!(lookup[&1], 1); - /// assert_eq!(lookup[&2], 5); - /// assert_eq!(lookup.len(), 3); - /// ``` - pub fn min(self) -> HashMap - where - V: Ord, - { - self.min_by(|_, v1, v2| V::cmp(v1, v2)) - } - - /// Groups elements from the `GroupingMap` source by key and finds the minimum of each group - /// with respect to the specified comparison function. - /// - /// If several elements are equally minimum, the first element is picked. - /// - /// Returns a `HashMap` associating the key of each group with the minimum of that group's elements. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() - /// .into_grouping_map_by(|&n| n % 3) - /// .min_by(|_key, x, y| y.cmp(x)); - /// - /// assert_eq!(lookup[&0], 12); - /// assert_eq!(lookup[&1], 7); - /// assert_eq!(lookup[&2], 8); - /// assert_eq!(lookup.len(), 3); - /// ``` - pub fn min_by(self, mut compare: F) -> HashMap - where - F: FnMut(&K, &V, &V) -> Ordering, - { - self.reduce(|acc, key, val| match compare(key, &acc, &val) { - Ordering::Less | Ordering::Equal => acc, - Ordering::Greater => val, - }) - } - - /// Groups elements from the `GroupingMap` source by key and finds the element of each group - /// that gives the minimum from the specified function. - /// - /// If several elements are equally minimum, the first element is picked. - /// - /// Returns a `HashMap` associating the key of each group with the minimum of that group's elements. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() - /// .into_grouping_map_by(|&n| n % 3) - /// .min_by_key(|_key, &val| val % 4); - /// - /// assert_eq!(lookup[&0], 12); - /// assert_eq!(lookup[&1], 4); - /// assert_eq!(lookup[&2], 8); - /// assert_eq!(lookup.len(), 3); - /// ``` - pub fn min_by_key(self, mut f: F) -> HashMap - where - F: FnMut(&K, &V) -> CK, - CK: Ord, - { - self.min_by(|key, v1, v2| f(key, v1).cmp(&f(key, v2))) - } - - /// Groups elements from the `GroupingMap` source by key and find the maximum and minimum of - /// each group. - /// - /// If several elements are equally maximum, the last element is picked. - /// If several elements are equally minimum, the first element is picked. - /// - /// See [`Itertools::minmax`](crate::Itertools::minmax) for the non-grouping version. - /// - /// Differences from the non grouping version: - /// - It never produces a `MinMaxResult::NoElements` - /// - It doesn't have any speedup - /// - /// Returns a `HashMap` associating the key of each group with the minimum and maximum of that group's elements. - /// - /// ``` - /// use itertools::Itertools; - /// use itertools::MinMaxResult::{OneElement, MinMax}; - /// - /// let lookup = vec![1, 3, 4, 5, 7, 9, 12].into_iter() - /// .into_grouping_map_by(|&n| n % 3) - /// .minmax(); - /// - /// assert_eq!(lookup[&0], MinMax(3, 12)); - /// assert_eq!(lookup[&1], MinMax(1, 7)); - /// assert_eq!(lookup[&2], OneElement(5)); - /// assert_eq!(lookup.len(), 3); - /// ``` - pub fn minmax(self) -> HashMap> - where - V: Ord, - { - self.minmax_by(|_, v1, v2| V::cmp(v1, v2)) - } - - /// Groups elements from the `GroupingMap` source by key and find the maximum and minimum of - /// each group with respect to the specified comparison function. - /// - /// If several elements are equally maximum, the last element is picked. - /// If several elements are equally minimum, the first element is picked. - /// - /// It has the same differences from the non-grouping version as `minmax`. - /// - /// Returns a `HashMap` associating the key of each group with the minimum and maximum of that group's elements. - /// - /// ``` - /// use itertools::Itertools; - /// use itertools::MinMaxResult::{OneElement, MinMax}; - /// - /// let lookup = vec![1, 3, 4, 5, 7, 9, 12].into_iter() - /// .into_grouping_map_by(|&n| n % 3) - /// .minmax_by(|_key, x, y| y.cmp(x)); - /// - /// assert_eq!(lookup[&0], MinMax(12, 3)); - /// assert_eq!(lookup[&1], MinMax(7, 1)); - /// assert_eq!(lookup[&2], OneElement(5)); - /// assert_eq!(lookup.len(), 3); - /// ``` - pub fn minmax_by(self, mut compare: F) -> HashMap> - where - F: FnMut(&K, &V, &V) -> Ordering, - { - self.aggregate(|acc, key, val| { - Some(match acc { - Some(MinMaxResult::OneElement(e)) => { - if compare(key, &val, &e) == Ordering::Less { - MinMaxResult::MinMax(val, e) - } else { - MinMaxResult::MinMax(e, val) - } - } - Some(MinMaxResult::MinMax(min, max)) => { - if compare(key, &val, &min) == Ordering::Less { - MinMaxResult::MinMax(val, max) - } else if compare(key, &val, &max) != Ordering::Less { - MinMaxResult::MinMax(min, val) - } else { - MinMaxResult::MinMax(min, max) - } - } - None => MinMaxResult::OneElement(val), - Some(MinMaxResult::NoElements) => unreachable!(), - }) - }) - } - - /// Groups elements from the `GroupingMap` source by key and find the elements of each group - /// that gives the minimum and maximum from the specified function. - /// - /// If several elements are equally maximum, the last element is picked. - /// If several elements are equally minimum, the first element is picked. - /// - /// It has the same differences from the non-grouping version as `minmax`. - /// - /// Returns a `HashMap` associating the key of each group with the minimum and maximum of that group's elements. - /// - /// ``` - /// use itertools::Itertools; - /// use itertools::MinMaxResult::{OneElement, MinMax}; - /// - /// let lookup = vec![1, 3, 4, 5, 7, 9, 12].into_iter() - /// .into_grouping_map_by(|&n| n % 3) - /// .minmax_by_key(|_key, &val| val % 4); - /// - /// assert_eq!(lookup[&0], MinMax(12, 3)); - /// assert_eq!(lookup[&1], MinMax(4, 7)); - /// assert_eq!(lookup[&2], OneElement(5)); - /// assert_eq!(lookup.len(), 3); - /// ``` - pub fn minmax_by_key(self, mut f: F) -> HashMap> - where - F: FnMut(&K, &V) -> CK, - CK: Ord, - { - self.minmax_by(|key, v1, v2| f(key, v1).cmp(&f(key, v2))) - } - - /// Groups elements from the `GroupingMap` source by key and sums them. - /// - /// This is just a shorthand for `self.reduce(|acc, _, val| acc + val)`. - /// It is more limited than `Iterator::sum` since it doesn't use the `Sum` trait. - /// - /// Returns a `HashMap` associating the key of each group with the sum of that group's elements. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() - /// .into_grouping_map_by(|&n| n % 3) - /// .sum(); - /// - /// assert_eq!(lookup[&0], 3 + 9 + 12); - /// assert_eq!(lookup[&1], 1 + 4 + 7); - /// assert_eq!(lookup[&2], 5 + 8); - /// assert_eq!(lookup.len(), 3); - /// ``` - pub fn sum(self) -> HashMap - where - V: Add, - { - self.reduce(|acc, _, val| acc + val) - } - - /// Groups elements from the `GroupingMap` source by key and multiply them. - /// - /// This is just a shorthand for `self.reduce(|acc, _, val| acc * val)`. - /// It is more limited than `Iterator::product` since it doesn't use the `Product` trait. - /// - /// Returns a `HashMap` associating the key of each group with the product of that group's elements. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let lookup = vec![1, 3, 4, 5, 7, 8, 9, 12].into_iter() - /// .into_grouping_map_by(|&n| n % 3) - /// .product(); - /// - /// assert_eq!(lookup[&0], 3 * 9 * 12); - /// assert_eq!(lookup[&1], 1 * 4 * 7); - /// assert_eq!(lookup[&2], 5 * 8); - /// assert_eq!(lookup.len(), 3); - /// ``` - pub fn product(self) -> HashMap - where - V: Mul, - { - self.reduce(|acc, _, val| acc * val) - } -} diff --git a/vendor/itertools/src/impl_macros.rs b/vendor/itertools/src/impl_macros.rs deleted file mode 100644 index 3db5ba021967c8..00000000000000 --- a/vendor/itertools/src/impl_macros.rs +++ /dev/null @@ -1,34 +0,0 @@ -//! -//! Implementation's internal macros - -macro_rules! debug_fmt_fields { - ($tyname:ident, $($($field:tt/*TODO ideally we would accept ident or tuple element here*/).+),*) => { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - f.debug_struct(stringify!($tyname)) - $( - .field(stringify!($($field).+), &self.$($field).+) - )* - .finish() - } - } -} - -macro_rules! clone_fields { - ($($field:ident),*) => { - #[inline] // TODO is this sensible? - fn clone(&self) -> Self { - Self { - $($field: self.$field.clone(),)* - } - } - } -} - -macro_rules! ignore_ident{ - ($id:ident, $($t:tt)*) => {$($t)*}; -} - -macro_rules! count_ident { - () => {0}; - ($i0:ident $($i:ident)*) => {1 + count_ident!($($i)*)}; -} diff --git a/vendor/itertools/src/intersperse.rs b/vendor/itertools/src/intersperse.rs deleted file mode 100644 index 5f4f7938ad052e..00000000000000 --- a/vendor/itertools/src/intersperse.rs +++ /dev/null @@ -1,142 +0,0 @@ -use super::size_hint; -use std::iter::{Fuse, FusedIterator}; - -pub trait IntersperseElement { - fn generate(&mut self) -> Item; -} - -#[derive(Debug, Clone)] -pub struct IntersperseElementSimple(Item); - -impl IntersperseElement for IntersperseElementSimple { - fn generate(&mut self) -> Item { - self.0.clone() - } -} - -/// An iterator adaptor to insert a particular value -/// between each element of the adapted iterator. -/// -/// Iterator element type is `I::Item` -/// -/// This iterator is *fused*. -/// -/// See [`.intersperse()`](crate::Itertools::intersperse) for more information. -pub type Intersperse = IntersperseWith::Item>>; - -/// Create a new Intersperse iterator -pub fn intersperse(iter: I, elt: I::Item) -> Intersperse -where - I: Iterator, -{ - intersperse_with(iter, IntersperseElementSimple(elt)) -} - -impl Item> IntersperseElement for F { - fn generate(&mut self) -> Item { - self() - } -} - -/// An iterator adaptor to insert a particular value created by a function -/// between each element of the adapted iterator. -/// -/// Iterator element type is `I::Item` -/// -/// This iterator is *fused*. -/// -/// See [`.intersperse_with()`](crate::Itertools::intersperse_with) for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[derive(Clone, Debug)] -pub struct IntersperseWith -where - I: Iterator, -{ - element: ElemF, - iter: Fuse, - /// `peek` is None while no item have been taken out of `iter` (at definition). - /// Then `peek` will alternatively be `Some(None)` and `Some(Some(item))`, - /// where `None` indicates it's time to generate from `element` (unless `iter` is empty). - peek: Option>, -} - -/// Create a new `IntersperseWith` iterator -pub fn intersperse_with(iter: I, elt: ElemF) -> IntersperseWith -where - I: Iterator, -{ - IntersperseWith { - peek: None, - iter: iter.fuse(), - element: elt, - } -} - -impl Iterator for IntersperseWith -where - I: Iterator, - ElemF: IntersperseElement, -{ - type Item = I::Item; - #[inline] - fn next(&mut self) -> Option { - let Self { - element, - iter, - peek, - } = self; - match peek { - Some(item @ Some(_)) => item.take(), - Some(None) => match iter.next() { - new @ Some(_) => { - *peek = Some(new); - Some(element.generate()) - } - None => None, - }, - None => { - *peek = Some(None); - iter.next() - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let mut sh = self.iter.size_hint(); - sh = size_hint::add(sh, sh); - match self.peek { - Some(Some(_)) => size_hint::add_scalar(sh, 1), - Some(None) => sh, - None => size_hint::sub_scalar(sh, 1), - } - } - - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - let Self { - mut element, - mut iter, - peek, - } = self; - let mut accum = init; - - if let Some(x) = peek.unwrap_or_else(|| iter.next()) { - accum = f(accum, x); - } - - iter.fold(accum, |accum, x| { - let accum = f(accum, element.generate()); - f(accum, x) - }) - } -} - -impl FusedIterator for IntersperseWith -where - I: Iterator, - ElemF: IntersperseElement, -{ -} diff --git a/vendor/itertools/src/iter_index.rs b/vendor/itertools/src/iter_index.rs deleted file mode 100644 index aadaa72a766912..00000000000000 --- a/vendor/itertools/src/iter_index.rs +++ /dev/null @@ -1,116 +0,0 @@ -use core::iter::{Skip, Take}; -use core::ops::{Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive}; - -#[cfg(doc)] -use crate::Itertools; - -mod private_iter_index { - use core::ops; - - pub trait Sealed {} - - impl Sealed for ops::Range {} - impl Sealed for ops::RangeInclusive {} - impl Sealed for ops::RangeTo {} - impl Sealed for ops::RangeToInclusive {} - impl Sealed for ops::RangeFrom {} - impl Sealed for ops::RangeFull {} -} - -/// Used by [`Itertools::get`] to know which iterator -/// to turn different ranges into. -pub trait IteratorIndex: private_iter_index::Sealed -where - I: Iterator, -{ - /// The type returned for this type of index. - type Output: Iterator; - - /// Returns an adapted iterator for the current index. - /// - /// Prefer calling [`Itertools::get`] instead - /// of calling this directly. - fn index(self, from: I) -> Self::Output; -} - -impl IteratorIndex for Range -where - I: Iterator, -{ - type Output = Skip>; - - fn index(self, iter: I) -> Self::Output { - iter.take(self.end).skip(self.start) - } -} - -impl IteratorIndex for RangeInclusive -where - I: Iterator, -{ - type Output = Take>; - - fn index(self, iter: I) -> Self::Output { - // end - start + 1 without overflowing if possible - let length = if *self.end() == usize::MAX { - assert_ne!(*self.start(), 0); - self.end() - self.start() + 1 - } else { - (self.end() + 1).saturating_sub(*self.start()) - }; - iter.skip(*self.start()).take(length) - } -} - -impl IteratorIndex for RangeTo -where - I: Iterator, -{ - type Output = Take; - - fn index(self, iter: I) -> Self::Output { - iter.take(self.end) - } -} - -impl IteratorIndex for RangeToInclusive -where - I: Iterator, -{ - type Output = Take; - - fn index(self, iter: I) -> Self::Output { - assert_ne!(self.end, usize::MAX); - iter.take(self.end + 1) - } -} - -impl IteratorIndex for RangeFrom -where - I: Iterator, -{ - type Output = Skip; - - fn index(self, iter: I) -> Self::Output { - iter.skip(self.start) - } -} - -impl IteratorIndex for RangeFull -where - I: Iterator, -{ - type Output = I; - - fn index(self, iter: I) -> Self::Output { - iter - } -} - -pub fn get(iter: I, index: R) -> R::Output -where - I: IntoIterator, - R: IteratorIndex, -{ - index.index(iter.into_iter()) -} diff --git a/vendor/itertools/src/k_smallest.rs b/vendor/itertools/src/k_smallest.rs deleted file mode 100644 index 7b2f62ea124bf8..00000000000000 --- a/vendor/itertools/src/k_smallest.rs +++ /dev/null @@ -1,98 +0,0 @@ -use alloc::vec::Vec; -use core::cmp::Ordering; - -/// Consumes a given iterator, returning the minimum elements in **ascending** order. -pub(crate) fn k_smallest_general(iter: I, k: usize, mut comparator: F) -> Vec -where - I: Iterator, - F: FnMut(&I::Item, &I::Item) -> Ordering, -{ - /// Sift the element currently at `origin` away from the root until it is properly ordered. - /// - /// This will leave **larger** elements closer to the root of the heap. - fn sift_down(heap: &mut [T], is_less_than: &mut F, mut origin: usize) - where - F: FnMut(&T, &T) -> bool, - { - #[inline] - fn children_of(n: usize) -> (usize, usize) { - (2 * n + 1, 2 * n + 2) - } - - while origin < heap.len() { - let (left_idx, right_idx) = children_of(origin); - if left_idx >= heap.len() { - return; - } - - let replacement_idx = - if right_idx < heap.len() && is_less_than(&heap[left_idx], &heap[right_idx]) { - right_idx - } else { - left_idx - }; - - if is_less_than(&heap[origin], &heap[replacement_idx]) { - heap.swap(origin, replacement_idx); - origin = replacement_idx; - } else { - return; - } - } - } - - if k == 0 { - iter.last(); - return Vec::new(); - } - if k == 1 { - return iter.min_by(comparator).into_iter().collect(); - } - let mut iter = iter.fuse(); - let mut storage: Vec = iter.by_ref().take(k).collect(); - - let mut is_less_than = move |a: &_, b: &_| comparator(a, b) == Ordering::Less; - - // Rearrange the storage into a valid heap by reordering from the second-bottom-most layer up to the root. - // Slightly faster than ordering on each insert, but only by a factor of lg(k). - // The resulting heap has the **largest** item on top. - for i in (0..=(storage.len() / 2)).rev() { - sift_down(&mut storage, &mut is_less_than, i); - } - - iter.for_each(|val| { - debug_assert_eq!(storage.len(), k); - if is_less_than(&val, &storage[0]) { - // Treating this as an push-and-pop saves having to write a sift-up implementation. - // https://en.wikipedia.org/wiki/Binary_heap#Insert_then_extract - storage[0] = val; - // We retain the smallest items we've seen so far, but ordered largest first so we can drop the largest efficiently. - sift_down(&mut storage, &mut is_less_than, 0); - } - }); - - // Ultimately the items need to be in least-first, strict order, but the heap is currently largest-first. - // To achieve this, repeatedly, - // 1) "pop" the largest item off the heap into the tail slot of the underlying storage, - // 2) shrink the logical size of the heap by 1, - // 3) restore the heap property over the remaining items. - let mut heap = &mut storage[..]; - while heap.len() > 1 { - let last_idx = heap.len() - 1; - heap.swap(0, last_idx); - // Sifting over a truncated slice means that the sifting will not disturb already popped elements. - heap = &mut heap[..last_idx]; - sift_down(heap, &mut is_less_than, 0); - } - - storage -} - -#[inline] -pub(crate) fn key_to_cmp(mut key: F) -> impl FnMut(&T, &T) -> Ordering -where - F: FnMut(&T) -> K, - K: Ord, -{ - move |a, b| key(a).cmp(&key(b)) -} diff --git a/vendor/itertools/src/kmerge_impl.rs b/vendor/itertools/src/kmerge_impl.rs deleted file mode 100644 index 0be3840a1b6686..00000000000000 --- a/vendor/itertools/src/kmerge_impl.rs +++ /dev/null @@ -1,240 +0,0 @@ -use crate::size_hint; -use crate::Itertools; - -use alloc::vec::Vec; -use std::fmt; -use std::iter::FusedIterator; -use std::mem::replace; - -/// Head element and Tail iterator pair -/// -/// `PartialEq`, `Eq`, `PartialOrd` and `Ord` are implemented by comparing sequences based on -/// first items (which are guaranteed to exist). -/// -/// The meanings of `PartialOrd` and `Ord` are reversed so as to turn the heap used in -/// `KMerge` into a min-heap. -#[derive(Debug)] -struct HeadTail -where - I: Iterator, -{ - head: I::Item, - tail: I, -} - -impl HeadTail -where - I: Iterator, -{ - /// Constructs a `HeadTail` from an `Iterator`. Returns `None` if the `Iterator` is empty. - fn new(mut it: I) -> Option { - let head = it.next(); - head.map(|h| Self { head: h, tail: it }) - } - - /// Get the next element and update `head`, returning the old head in `Some`. - /// - /// Returns `None` when the tail is exhausted (only `head` then remains). - fn next(&mut self) -> Option { - if let Some(next) = self.tail.next() { - Some(replace(&mut self.head, next)) - } else { - None - } - } - - /// Hints at the size of the sequence, same as the `Iterator` method. - fn size_hint(&self) -> (usize, Option) { - size_hint::add_scalar(self.tail.size_hint(), 1) - } -} - -impl Clone for HeadTail -where - I: Iterator + Clone, - I::Item: Clone, -{ - clone_fields!(head, tail); -} - -/// Make `data` a heap (min-heap w.r.t the sorting). -fn heapify(data: &mut [T], mut less_than: S) -where - S: FnMut(&T, &T) -> bool, -{ - for i in (0..data.len() / 2).rev() { - sift_down(data, i, &mut less_than); - } -} - -/// Sift down element at `index` (`heap` is a min-heap wrt the ordering) -fn sift_down(heap: &mut [T], index: usize, mut less_than: S) -where - S: FnMut(&T, &T) -> bool, -{ - debug_assert!(index <= heap.len()); - let mut pos = index; - let mut child = 2 * pos + 1; - // Require the right child to be present - // This allows to find the index of the smallest child without a branch - // that wouldn't be predicted if present - while child + 1 < heap.len() { - // pick the smaller of the two children - // use arithmetic to avoid an unpredictable branch - child += less_than(&heap[child + 1], &heap[child]) as usize; - - // sift down is done if we are already in order - if !less_than(&heap[child], &heap[pos]) { - return; - } - heap.swap(pos, child); - pos = child; - child = 2 * pos + 1; - } - // Check if the last (left) child was an only child - // if it is then it has to be compared with the parent - if child + 1 == heap.len() && less_than(&heap[child], &heap[pos]) { - heap.swap(pos, child); - } -} - -/// An iterator adaptor that merges an abitrary number of base iterators in ascending order. -/// If all base iterators are sorted (ascending), the result is sorted. -/// -/// Iterator element type is `I::Item`. -/// -/// See [`.kmerge()`](crate::Itertools::kmerge) for more information. -pub type KMerge = KMergeBy; - -pub trait KMergePredicate { - fn kmerge_pred(&mut self, a: &T, b: &T) -> bool; -} - -#[derive(Clone, Debug)] -pub struct KMergeByLt; - -impl KMergePredicate for KMergeByLt { - fn kmerge_pred(&mut self, a: &T, b: &T) -> bool { - a < b - } -} - -impl bool> KMergePredicate for F { - fn kmerge_pred(&mut self, a: &T, b: &T) -> bool { - self(a, b) - } -} - -/// Create an iterator that merges elements of the contained iterators using -/// the ordering function. -/// -/// [`IntoIterator`] enabled version of [`Itertools::kmerge`]. -/// -/// ``` -/// use itertools::kmerge; -/// -/// for elt in kmerge(vec![vec![0, 2, 4], vec![1, 3, 5], vec![6, 7]]) { -/// /* loop body */ -/// } -/// ``` -pub fn kmerge(iterable: I) -> KMerge<::IntoIter> -where - I: IntoIterator, - I::Item: IntoIterator, - <::Item as IntoIterator>::Item: PartialOrd, -{ - kmerge_by(iterable, KMergeByLt) -} - -/// An iterator adaptor that merges an abitrary number of base iterators -/// according to an ordering function. -/// -/// Iterator element type is `I::Item`. -/// -/// See [`.kmerge_by()`](crate::Itertools::kmerge_by) for more -/// information. -#[must_use = "this iterator adaptor is not lazy but does nearly nothing unless consumed"] -pub struct KMergeBy -where - I: Iterator, -{ - heap: Vec>, - less_than: F, -} - -impl fmt::Debug for KMergeBy -where - I: Iterator + fmt::Debug, - I::Item: fmt::Debug, -{ - debug_fmt_fields!(KMergeBy, heap); -} - -/// Create an iterator that merges elements of the contained iterators. -/// -/// [`IntoIterator`] enabled version of [`Itertools::kmerge_by`]. -pub fn kmerge_by( - iterable: I, - mut less_than: F, -) -> KMergeBy<::IntoIter, F> -where - I: IntoIterator, - I::Item: IntoIterator, - F: KMergePredicate<<::Item as IntoIterator>::Item>, -{ - let iter = iterable.into_iter(); - let (lower, _) = iter.size_hint(); - let mut heap: Vec<_> = Vec::with_capacity(lower); - heap.extend(iter.filter_map(|it| HeadTail::new(it.into_iter()))); - heapify(&mut heap, |a, b| less_than.kmerge_pred(&a.head, &b.head)); - KMergeBy { heap, less_than } -} - -impl Clone for KMergeBy -where - I: Iterator + Clone, - I::Item: Clone, - F: Clone, -{ - clone_fields!(heap, less_than); -} - -impl Iterator for KMergeBy -where - I: Iterator, - F: KMergePredicate, -{ - type Item = I::Item; - - fn next(&mut self) -> Option { - if self.heap.is_empty() { - return None; - } - let result = if let Some(next) = self.heap[0].next() { - next - } else { - self.heap.swap_remove(0).head - }; - let less_than = &mut self.less_than; - sift_down(&mut self.heap, 0, |a, b| { - less_than.kmerge_pred(&a.head, &b.head) - }); - Some(result) - } - - fn size_hint(&self) -> (usize, Option) { - #[allow(deprecated)] //TODO: once msrv hits 1.51. replace `fold1` with `reduce` - self.heap - .iter() - .map(|i| i.size_hint()) - .fold1(size_hint::add) - .unwrap_or((0, Some(0))) - } -} - -impl FusedIterator for KMergeBy -where - I: Iterator, - F: KMergePredicate, -{ -} diff --git a/vendor/itertools/src/lazy_buffer.rs b/vendor/itertools/src/lazy_buffer.rs deleted file mode 100644 index fefcff8f5c64de..00000000000000 --- a/vendor/itertools/src/lazy_buffer.rs +++ /dev/null @@ -1,75 +0,0 @@ -use alloc::vec::Vec; -use std::iter::Fuse; -use std::ops::Index; - -use crate::size_hint::{self, SizeHint}; - -#[derive(Debug, Clone)] -pub struct LazyBuffer { - it: Fuse, - buffer: Vec, -} - -impl LazyBuffer -where - I: Iterator, -{ - pub fn new(it: I) -> Self { - Self { - it: it.fuse(), - buffer: Vec::new(), - } - } - - pub fn len(&self) -> usize { - self.buffer.len() - } - - pub fn size_hint(&self) -> SizeHint { - size_hint::add_scalar(self.it.size_hint(), self.len()) - } - - pub fn count(self) -> usize { - self.len() + self.it.count() - } - - pub fn get_next(&mut self) -> bool { - if let Some(x) = self.it.next() { - self.buffer.push(x); - true - } else { - false - } - } - - pub fn prefill(&mut self, len: usize) { - let buffer_len = self.buffer.len(); - if len > buffer_len { - let delta = len - buffer_len; - self.buffer.extend(self.it.by_ref().take(delta)); - } - } -} - -impl LazyBuffer -where - I: Iterator, - I::Item: Clone, -{ - pub fn get_at(&self, indices: &[usize]) -> Vec { - indices.iter().map(|i| self.buffer[*i].clone()).collect() - } -} - -impl Index for LazyBuffer -where - I: Iterator, - I::Item: Sized, - Vec: Index, -{ - type Output = as Index>::Output; - - fn index(&self, index: J) -> &Self::Output { - self.buffer.index(index) - } -} diff --git a/vendor/itertools/src/lib.rs b/vendor/itertools/src/lib.rs deleted file mode 100644 index f4de79c5043a8c..00000000000000 --- a/vendor/itertools/src/lib.rs +++ /dev/null @@ -1,4365 +0,0 @@ -#![warn(missing_docs, clippy::default_numeric_fallback)] -#![crate_name = "itertools"] -#![cfg_attr(not(feature = "use_std"), no_std)] - -//! Extra iterator adaptors, functions and macros. -//! -//! To extend [`Iterator`] with methods in this crate, import -//! the [`Itertools`] trait: -//! -//! ``` -//! use itertools::Itertools; -//! ``` -//! -//! Now, new methods like [`interleave`](Itertools::interleave) -//! are available on all iterators: -//! -//! ``` -//! use itertools::Itertools; -//! -//! let it = (1..3).interleave(vec![-1, -2]); -//! itertools::assert_equal(it, vec![1, -1, 2, -2]); -//! ``` -//! -//! Most iterator methods are also provided as functions (with the benefit -//! that they convert parameters using [`IntoIterator`]): -//! -//! ``` -//! use itertools::interleave; -//! -//! for elt in interleave(&[1, 2, 3], &[2, 3, 4]) { -//! /* loop body */ -//! } -//! ``` -//! -//! ## Crate Features -//! -//! - `use_std` -//! - Enabled by default. -//! - Disable to compile itertools using `#![no_std]`. This disables -//! any item that depend on allocations (see the `use_alloc` feature) -//! and hash maps (like `unique`, `counts`, `into_grouping_map` and more). -//! - `use_alloc` -//! - Enabled by default. -//! - Enables any item that depend on allocations (like `chunk_by`, -//! `kmerge`, `join` and many more). -//! -//! ## Rust Version -//! -//! This version of itertools requires Rust 1.43.1 or later. - -#[cfg(not(feature = "use_std"))] -extern crate core as std; - -#[cfg(feature = "use_alloc")] -extern crate alloc; - -#[cfg(feature = "use_alloc")] -use alloc::{collections::VecDeque, string::String, vec::Vec}; - -pub use either::Either; - -use core::borrow::Borrow; -use std::cmp::Ordering; -#[cfg(feature = "use_std")] -use std::collections::HashMap; -#[cfg(feature = "use_std")] -use std::collections::HashSet; -use std::fmt; -#[cfg(feature = "use_alloc")] -use std::fmt::Write; -#[cfg(feature = "use_std")] -use std::hash::Hash; -use std::iter::{once, IntoIterator}; -#[cfg(feature = "use_alloc")] -type VecDequeIntoIter = alloc::collections::vec_deque::IntoIter; -#[cfg(feature = "use_alloc")] -type VecIntoIter = alloc::vec::IntoIter; -use std::iter::FromIterator; - -#[macro_use] -mod impl_macros; - -// for compatibility with no std and macros -#[doc(hidden)] -pub use std::iter as __std_iter; - -/// The concrete iterator types. -pub mod structs { - #[cfg(feature = "use_alloc")] - pub use crate::adaptors::MultiProduct; - pub use crate::adaptors::{ - Batching, Coalesce, Dedup, DedupBy, DedupByWithCount, DedupWithCount, FilterMapOk, - FilterOk, Interleave, InterleaveShortest, MapInto, MapOk, Positions, Product, PutBack, - TakeWhileRef, TupleCombinations, Update, WhileSome, - }; - #[cfg(feature = "use_alloc")] - pub use crate::combinations::Combinations; - #[cfg(feature = "use_alloc")] - pub use crate::combinations_with_replacement::CombinationsWithReplacement; - pub use crate::cons_tuples_impl::ConsTuples; - #[cfg(feature = "use_std")] - pub use crate::duplicates_impl::{Duplicates, DuplicatesBy}; - pub use crate::exactly_one_err::ExactlyOneError; - pub use crate::flatten_ok::FlattenOk; - pub use crate::format::{Format, FormatWith}; - #[allow(deprecated)] - #[cfg(feature = "use_alloc")] - pub use crate::groupbylazy::GroupBy; - #[cfg(feature = "use_alloc")] - pub use crate::groupbylazy::{Chunk, ChunkBy, Chunks, Group, Groups, IntoChunks}; - #[cfg(feature = "use_std")] - pub use crate::grouping_map::{GroupingMap, GroupingMapBy}; - pub use crate::intersperse::{Intersperse, IntersperseWith}; - #[cfg(feature = "use_alloc")] - pub use crate::kmerge_impl::{KMerge, KMergeBy}; - pub use crate::merge_join::{Merge, MergeBy, MergeJoinBy}; - #[cfg(feature = "use_alloc")] - pub use crate::multipeek_impl::MultiPeek; - pub use crate::pad_tail::PadUsing; - #[cfg(feature = "use_alloc")] - pub use crate::peek_nth::PeekNth; - pub use crate::peeking_take_while::PeekingTakeWhile; - #[cfg(feature = "use_alloc")] - pub use crate::permutations::Permutations; - #[cfg(feature = "use_alloc")] - pub use crate::powerset::Powerset; - pub use crate::process_results_impl::ProcessResults; - #[cfg(feature = "use_alloc")] - pub use crate::put_back_n_impl::PutBackN; - #[cfg(feature = "use_alloc")] - pub use crate::rciter_impl::RcIter; - pub use crate::repeatn::RepeatN; - #[allow(deprecated)] - pub use crate::sources::{Iterate, Unfold}; - pub use crate::take_while_inclusive::TakeWhileInclusive; - #[cfg(feature = "use_alloc")] - pub use crate::tee::Tee; - pub use crate::tuple_impl::{CircularTupleWindows, TupleBuffer, TupleWindows, Tuples}; - #[cfg(feature = "use_std")] - pub use crate::unique_impl::{Unique, UniqueBy}; - pub use crate::with_position::WithPosition; - pub use crate::zip_eq_impl::ZipEq; - pub use crate::zip_longest::ZipLongest; - pub use crate::ziptuple::Zip; -} - -/// Traits helpful for using certain `Itertools` methods in generic contexts. -pub mod traits { - pub use crate::iter_index::IteratorIndex; - pub use crate::tuple_impl::HomogeneousTuple; -} - -pub use crate::concat_impl::concat; -pub use crate::cons_tuples_impl::cons_tuples; -pub use crate::diff::diff_with; -pub use crate::diff::Diff; -#[cfg(feature = "use_alloc")] -pub use crate::kmerge_impl::kmerge_by; -pub use crate::minmax::MinMaxResult; -pub use crate::peeking_take_while::PeekingNext; -pub use crate::process_results_impl::process_results; -pub use crate::repeatn::repeat_n; -#[allow(deprecated)] -pub use crate::sources::{iterate, unfold}; -#[allow(deprecated)] -pub use crate::structs::*; -pub use crate::unziptuple::{multiunzip, MultiUnzip}; -pub use crate::with_position::Position; -pub use crate::ziptuple::multizip; -mod adaptors; -mod either_or_both; -pub use crate::either_or_both::EitherOrBoth; -#[doc(hidden)] -pub mod free; -#[doc(inline)] -pub use crate::free::*; -#[cfg(feature = "use_alloc")] -mod combinations; -#[cfg(feature = "use_alloc")] -mod combinations_with_replacement; -mod concat_impl; -mod cons_tuples_impl; -mod diff; -#[cfg(feature = "use_std")] -mod duplicates_impl; -mod exactly_one_err; -#[cfg(feature = "use_alloc")] -mod extrema_set; -mod flatten_ok; -mod format; -#[cfg(feature = "use_alloc")] -mod group_map; -#[cfg(feature = "use_alloc")] -mod groupbylazy; -#[cfg(feature = "use_std")] -mod grouping_map; -mod intersperse; -mod iter_index; -#[cfg(feature = "use_alloc")] -mod k_smallest; -#[cfg(feature = "use_alloc")] -mod kmerge_impl; -#[cfg(feature = "use_alloc")] -mod lazy_buffer; -mod merge_join; -mod minmax; -#[cfg(feature = "use_alloc")] -mod multipeek_impl; -mod pad_tail; -#[cfg(feature = "use_alloc")] -mod peek_nth; -mod peeking_take_while; -#[cfg(feature = "use_alloc")] -mod permutations; -#[cfg(feature = "use_alloc")] -mod powerset; -mod process_results_impl; -#[cfg(feature = "use_alloc")] -mod put_back_n_impl; -#[cfg(feature = "use_alloc")] -mod rciter_impl; -mod repeatn; -mod size_hint; -mod sources; -mod take_while_inclusive; -#[cfg(feature = "use_alloc")] -mod tee; -mod tuple_impl; -#[cfg(feature = "use_std")] -mod unique_impl; -mod unziptuple; -mod with_position; -mod zip_eq_impl; -mod zip_longest; -mod ziptuple; - -#[macro_export] -/// Create an iterator over the “cartesian product” of iterators. -/// -/// Iterator element type is like `(A, B, ..., E)` if formed -/// from iterators `(I, J, ..., M)` with element types `I::Item = A`, `J::Item = B`, etc. -/// -/// ``` -/// # use itertools::iproduct; -/// # -/// # fn main() { -/// // Iterate over the coordinates of a 4 x 4 x 4 grid -/// // from (0, 0, 0), (0, 0, 1), .., (0, 1, 0), (0, 1, 1), .. etc until (3, 3, 3) -/// for (i, j, k) in iproduct!(0..4, 0..4, 0..4) { -/// // .. -/// } -/// # } -/// ``` -macro_rules! iproduct { - (@flatten $I:expr,) => ( - $I - ); - (@flatten $I:expr, $J:expr, $($K:expr,)*) => ( - $crate::iproduct!(@flatten $crate::cons_tuples($crate::iproduct!($I, $J)), $($K,)*) - ); - () => ( - $crate::__std_iter::once(()) - ); - ($I:expr $(,)?) => ( - $crate::__std_iter::IntoIterator::into_iter($I).map(|elt| (elt,)) - ); - ($I:expr, $J:expr $(,)?) => ( - $crate::Itertools::cartesian_product( - $crate::__std_iter::IntoIterator::into_iter($I), - $crate::__std_iter::IntoIterator::into_iter($J), - ) - ); - ($I:expr, $J:expr, $($K:expr),+ $(,)?) => ( - $crate::iproduct!(@flatten $crate::iproduct!($I, $J), $($K,)+) - ); -} - -#[macro_export] -/// Create an iterator running multiple iterators in lockstep. -/// -/// The `izip!` iterator yields elements until any subiterator -/// returns `None`. -/// -/// This is a version of the standard ``.zip()`` that's supporting more than -/// two iterators. The iterator element type is a tuple with one element -/// from each of the input iterators. Just like ``.zip()``, the iteration stops -/// when the shortest of the inputs reaches its end. -/// -/// **Note:** The result of this macro is in the general case an iterator -/// composed of repeated `.zip()` and a `.map()`; it has an anonymous type. -/// The special cases of one and two arguments produce the equivalent of -/// `$a.into_iter()` and `$a.into_iter().zip($b)` respectively. -/// -/// Prefer this macro `izip!()` over [`multizip`] for the performance benefits -/// of using the standard library `.zip()`. -/// -/// ``` -/// # use itertools::izip; -/// # -/// # fn main() { -/// -/// // iterate over three sequences side-by-side -/// let mut results = [0, 0, 0, 0]; -/// let inputs = [3, 7, 9, 6]; -/// -/// for (r, index, input) in izip!(&mut results, 0..10, &inputs) { -/// *r = index * 10 + input; -/// } -/// -/// assert_eq!(results, [0 + 3, 10 + 7, 29, 36]); -/// # } -/// ``` -macro_rules! izip { - // @closure creates a tuple-flattening closure for .map() call. usage: - // @closure partial_pattern => partial_tuple , rest , of , iterators - // eg. izip!( @closure ((a, b), c) => (a, b, c) , dd , ee ) - ( @closure $p:pat => $tup:expr ) => { - |$p| $tup - }; - - // The "b" identifier is a different identifier on each recursion level thanks to hygiene. - ( @closure $p:pat => ( $($tup:tt)* ) , $_iter:expr $( , $tail:expr )* ) => { - $crate::izip!(@closure ($p, b) => ( $($tup)*, b ) $( , $tail )*) - }; - - // unary - ($first:expr $(,)*) => { - $crate::__std_iter::IntoIterator::into_iter($first) - }; - - // binary - ($first:expr, $second:expr $(,)*) => { - $crate::izip!($first) - .zip($second) - }; - - // n-ary where n > 2 - ( $first:expr $( , $rest:expr )* $(,)* ) => { - $crate::izip!($first) - $( - .zip($rest) - )* - .map( - $crate::izip!(@closure a => (a) $( , $rest )*) - ) - }; -} - -#[macro_export] -/// [Chain][`chain`] zero or more iterators together into one sequence. -/// -/// The comma-separated arguments must implement [`IntoIterator`]. -/// The final argument may be followed by a trailing comma. -/// -/// [`chain`]: Iterator::chain -/// -/// # Examples -/// -/// Empty invocations of `chain!` expand to an invocation of [`std::iter::empty`]: -/// ``` -/// use std::iter; -/// use itertools::chain; -/// -/// let _: iter::Empty<()> = chain!(); -/// let _: iter::Empty = chain!(); -/// ``` -/// -/// Invocations of `chain!` with one argument expand to [`arg.into_iter()`](IntoIterator): -/// ``` -/// use std::{ops::Range, slice}; -/// use itertools::chain; -/// let _: as IntoIterator>::IntoIter = chain!((2..6),); // trailing comma optional! -/// let _: <&[_] as IntoIterator>::IntoIter = chain!(&[2, 3, 4]); -/// ``` -/// -/// Invocations of `chain!` with multiple arguments [`.into_iter()`](IntoIterator) each -/// argument, and then [`chain`] them together: -/// ``` -/// use std::{iter::*, ops::Range, slice}; -/// use itertools::{assert_equal, chain}; -/// -/// // e.g., this: -/// let with_macro: Chain, Take>>, slice::Iter<_>> = -/// chain![once(&0), repeat(&1).take(2), &[2, 3, 5],]; -/// -/// // ...is equivalent to this: -/// let with_method: Chain, Take>>, slice::Iter<_>> = -/// once(&0) -/// .chain(repeat(&1).take(2)) -/// .chain(&[2, 3, 5]); -/// -/// assert_equal(with_macro, with_method); -/// ``` -macro_rules! chain { - () => { - core::iter::empty() - }; - ($first:expr $(, $rest:expr )* $(,)?) => { - { - let iter = core::iter::IntoIterator::into_iter($first); - $( - let iter = - core::iter::Iterator::chain( - iter, - core::iter::IntoIterator::into_iter($rest)); - )* - iter - } - }; -} - -/// An [`Iterator`] blanket implementation that provides extra adaptors and -/// methods. -/// -/// This trait defines a number of methods. They are divided into two groups: -/// -/// * *Adaptors* take an iterator and parameter as input, and return -/// a new iterator value. These are listed first in the trait. An example -/// of an adaptor is [`.interleave()`](Itertools::interleave) -/// -/// * *Regular methods* are those that don't return iterators and instead -/// return a regular value of some other kind. -/// [`.next_tuple()`](Itertools::next_tuple) is an example and the first regular -/// method in the list. -pub trait Itertools: Iterator { - // adaptors - - /// Alternate elements from two iterators until both have run out. - /// - /// Iterator element type is `Self::Item`. - /// - /// This iterator is *fused*. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let it = (1..7).interleave(vec![-1, -2]); - /// itertools::assert_equal(it, vec![1, -1, 2, -2, 3, 4, 5, 6]); - /// ``` - fn interleave(self, other: J) -> Interleave - where - J: IntoIterator, - Self: Sized, - { - interleave(self, other) - } - - /// Alternate elements from two iterators until at least one of them has run - /// out. - /// - /// Iterator element type is `Self::Item`. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let it = (1..7).interleave_shortest(vec![-1, -2]); - /// itertools::assert_equal(it, vec![1, -1, 2, -2, 3]); - /// ``` - fn interleave_shortest(self, other: J) -> InterleaveShortest - where - J: IntoIterator, - Self: Sized, - { - adaptors::interleave_shortest(self, other.into_iter()) - } - - /// An iterator adaptor to insert a particular value - /// between each element of the adapted iterator. - /// - /// Iterator element type is `Self::Item`. - /// - /// This iterator is *fused*. - /// - /// ``` - /// use itertools::Itertools; - /// - /// itertools::assert_equal((0..3).intersperse(8), vec![0, 8, 1, 8, 2]); - /// ``` - fn intersperse(self, element: Self::Item) -> Intersperse - where - Self: Sized, - Self::Item: Clone, - { - intersperse::intersperse(self, element) - } - - /// An iterator adaptor to insert a particular value created by a function - /// between each element of the adapted iterator. - /// - /// Iterator element type is `Self::Item`. - /// - /// This iterator is *fused*. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let mut i = 10; - /// itertools::assert_equal((0..3).intersperse_with(|| { i -= 1; i }), vec![0, 9, 1, 8, 2]); - /// assert_eq!(i, 8); - /// ``` - fn intersperse_with(self, element: F) -> IntersperseWith - where - Self: Sized, - F: FnMut() -> Self::Item, - { - intersperse::intersperse_with(self, element) - } - - /// Returns an iterator over a subsection of the iterator. - /// - /// Works similarly to [`slice::get`](https://doc.rust-lang.org/std/primitive.slice.html#method.get). - /// - /// **Panics** for ranges `..=usize::MAX` and `0..=usize::MAX`. - /// - /// It's a generalisation of [`Iterator::take`] and [`Iterator::skip`], - /// and uses these under the hood. - /// Therefore, the resulting iterator is: - /// - [`ExactSizeIterator`] if the adapted iterator is [`ExactSizeIterator`]. - /// - [`DoubleEndedIterator`] if the adapted iterator is [`DoubleEndedIterator`] and [`ExactSizeIterator`]. - /// - /// # Unspecified Behavior - /// The result of indexing with an exhausted [`core::ops::RangeInclusive`] is unspecified. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// - /// let vec = vec![3, 1, 4, 1, 5]; - /// - /// let mut range: Vec<_> = - /// vec.iter().get(1..=3).copied().collect(); - /// assert_eq!(&range, &[1, 4, 1]); - /// - /// // It works with other types of ranges, too - /// range = vec.iter().get(..2).copied().collect(); - /// assert_eq!(&range, &[3, 1]); - /// - /// range = vec.iter().get(0..1).copied().collect(); - /// assert_eq!(&range, &[3]); - /// - /// range = vec.iter().get(2..).copied().collect(); - /// assert_eq!(&range, &[4, 1, 5]); - /// - /// range = vec.iter().get(..=2).copied().collect(); - /// assert_eq!(&range, &[3, 1, 4]); - /// - /// range = vec.iter().get(..).copied().collect(); - /// assert_eq!(range, vec); - /// ``` - fn get(self, index: R) -> R::Output - where - Self: Sized, - R: traits::IteratorIndex, - { - iter_index::get(self, index) - } - - /// Create an iterator which iterates over both this and the specified - /// iterator simultaneously, yielding pairs of two optional elements. - /// - /// This iterator is *fused*. - /// - /// As long as neither input iterator is exhausted yet, it yields two values - /// via `EitherOrBoth::Both`. - /// - /// When the parameter iterator is exhausted, it only yields a value from the - /// `self` iterator via `EitherOrBoth::Left`. - /// - /// When the `self` iterator is exhausted, it only yields a value from the - /// parameter iterator via `EitherOrBoth::Right`. - /// - /// When both iterators return `None`, all further invocations of `.next()` - /// will return `None`. - /// - /// Iterator element type is - /// [`EitherOrBoth`](EitherOrBoth). - /// - /// ```rust - /// use itertools::EitherOrBoth::{Both, Right}; - /// use itertools::Itertools; - /// let it = (0..1).zip_longest(1..3); - /// itertools::assert_equal(it, vec![Both(0, 1), Right(2)]); - /// ``` - #[inline] - fn zip_longest(self, other: J) -> ZipLongest - where - J: IntoIterator, - Self: Sized, - { - zip_longest::zip_longest(self, other.into_iter()) - } - - /// Create an iterator which iterates over both this and the specified - /// iterator simultaneously, yielding pairs of elements. - /// - /// **Panics** if the iterators reach an end and they are not of equal - /// lengths. - #[inline] - fn zip_eq(self, other: J) -> ZipEq - where - J: IntoIterator, - Self: Sized, - { - zip_eq(self, other) - } - - /// A “meta iterator adaptor”. Its closure receives a reference to the - /// iterator and may pick off as many elements as it likes, to produce the - /// next iterator element. - /// - /// Iterator element type is `B`. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // An adaptor that gathers elements in pairs - /// let pit = (0..4).batching(|it| { - /// match it.next() { - /// None => None, - /// Some(x) => match it.next() { - /// None => None, - /// Some(y) => Some((x, y)), - /// } - /// } - /// }); - /// - /// itertools::assert_equal(pit, vec![(0, 1), (2, 3)]); - /// ``` - /// - fn batching(self, f: F) -> Batching - where - F: FnMut(&mut Self) -> Option, - Self: Sized, - { - adaptors::batching(self, f) - } - - /// Return an *iterable* that can group iterator elements. - /// Consecutive elements that map to the same key (“runs”), are assigned - /// to the same group. - /// - /// `ChunkBy` is the storage for the lazy grouping operation. - /// - /// If the groups are consumed in order, or if each group's iterator is - /// dropped without keeping it around, then `ChunkBy` uses no - /// allocations. It needs allocations only if several group iterators - /// are alive at the same time. - /// - /// This type implements [`IntoIterator`] (it is **not** an iterator - /// itself), because the group iterators need to borrow from this - /// value. It should be stored in a local variable or temporary and - /// iterated. - /// - /// Iterator element type is `(K, Group)`: the group's key and the - /// group iterator. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // chunk data into runs of larger than zero or not. - /// let data = vec![1, 3, -2, -2, 1, 0, 1, 2]; - /// // chunks: |---->|------>|--------->| - /// - /// // Note: The `&` is significant here, `ChunkBy` is iterable - /// // only by reference. You can also call `.into_iter()` explicitly. - /// let mut data_grouped = Vec::new(); - /// for (key, chunk) in &data.into_iter().chunk_by(|elt| *elt >= 0) { - /// data_grouped.push((key, chunk.collect())); - /// } - /// assert_eq!(data_grouped, vec![(true, vec![1, 3]), (false, vec![-2, -2]), (true, vec![1, 0, 1, 2])]); - /// ``` - #[cfg(feature = "use_alloc")] - fn chunk_by(self, key: F) -> ChunkBy - where - Self: Sized, - F: FnMut(&Self::Item) -> K, - K: PartialEq, - { - groupbylazy::new(self, key) - } - - /// See [`.chunk_by()`](Itertools::chunk_by). - #[deprecated(note = "Use .chunk_by() instead", since = "0.13.0")] - #[cfg(feature = "use_alloc")] - fn group_by(self, key: F) -> ChunkBy - where - Self: Sized, - F: FnMut(&Self::Item) -> K, - K: PartialEq, - { - self.chunk_by(key) - } - - /// Return an *iterable* that can chunk the iterator. - /// - /// Yield subiterators (chunks) that each yield a fixed number elements, - /// determined by `size`. The last chunk will be shorter if there aren't - /// enough elements. - /// - /// `IntoChunks` is based on `ChunkBy`: it is iterable (implements - /// `IntoIterator`, **not** `Iterator`), and it only buffers if several - /// chunk iterators are alive at the same time. - /// - /// Iterator element type is `Chunk`, each chunk's iterator. - /// - /// **Panics** if `size` is 0. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = vec![1, 1, 2, -2, 6, 0, 3, 1]; - /// //chunk size=3 |------->|-------->|--->| - /// - /// // Note: The `&` is significant here, `IntoChunks` is iterable - /// // only by reference. You can also call `.into_iter()` explicitly. - /// for chunk in &data.into_iter().chunks(3) { - /// // Check that the sum of each chunk is 4. - /// assert_eq!(4, chunk.sum()); - /// } - /// ``` - #[cfg(feature = "use_alloc")] - fn chunks(self, size: usize) -> IntoChunks - where - Self: Sized, - { - assert!(size != 0); - groupbylazy::new_chunks(self, size) - } - - /// Return an iterator over all contiguous windows producing tuples of - /// a specific size (up to 12). - /// - /// `tuple_windows` clones the iterator elements so that they can be - /// part of successive windows, this makes it most suited for iterators - /// of references and other values that are cheap to copy. - /// - /// ``` - /// use itertools::Itertools; - /// let mut v = Vec::new(); - /// - /// // pairwise iteration - /// for (a, b) in (1..5).tuple_windows() { - /// v.push((a, b)); - /// } - /// assert_eq!(v, vec![(1, 2), (2, 3), (3, 4)]); - /// - /// let mut it = (1..5).tuple_windows(); - /// assert_eq!(Some((1, 2, 3)), it.next()); - /// assert_eq!(Some((2, 3, 4)), it.next()); - /// assert_eq!(None, it.next()); - /// - /// // this requires a type hint - /// let it = (1..5).tuple_windows::<(_, _, _)>(); - /// itertools::assert_equal(it, vec![(1, 2, 3), (2, 3, 4)]); - /// - /// // you can also specify the complete type - /// use itertools::TupleWindows; - /// use std::ops::Range; - /// - /// let it: TupleWindows, (u32, u32, u32)> = (1..5).tuple_windows(); - /// itertools::assert_equal(it, vec![(1, 2, 3), (2, 3, 4)]); - /// ``` - fn tuple_windows(self) -> TupleWindows - where - Self: Sized + Iterator, - T: traits::HomogeneousTuple, - T::Item: Clone, - { - tuple_impl::tuple_windows(self) - } - - /// Return an iterator over all windows, wrapping back to the first - /// elements when the window would otherwise exceed the length of the - /// iterator, producing tuples of a specific size (up to 12). - /// - /// `circular_tuple_windows` clones the iterator elements so that they can be - /// part of successive windows, this makes it most suited for iterators - /// of references and other values that are cheap to copy. - /// - /// ``` - /// use itertools::Itertools; - /// let mut v = Vec::new(); - /// for (a, b) in (1..5).circular_tuple_windows() { - /// v.push((a, b)); - /// } - /// assert_eq!(v, vec![(1, 2), (2, 3), (3, 4), (4, 1)]); - /// - /// let mut it = (1..5).circular_tuple_windows(); - /// assert_eq!(Some((1, 2, 3)), it.next()); - /// assert_eq!(Some((2, 3, 4)), it.next()); - /// assert_eq!(Some((3, 4, 1)), it.next()); - /// assert_eq!(Some((4, 1, 2)), it.next()); - /// assert_eq!(None, it.next()); - /// - /// // this requires a type hint - /// let it = (1..5).circular_tuple_windows::<(_, _, _)>(); - /// itertools::assert_equal(it, vec![(1, 2, 3), (2, 3, 4), (3, 4, 1), (4, 1, 2)]); - /// ``` - fn circular_tuple_windows(self) -> CircularTupleWindows - where - Self: Sized + Clone + Iterator + ExactSizeIterator, - T: tuple_impl::TupleCollect + Clone, - T::Item: Clone, - { - tuple_impl::circular_tuple_windows(self) - } - /// Return an iterator that groups the items in tuples of a specific size - /// (up to 12). - /// - /// See also the method [`.next_tuple()`](Itertools::next_tuple). - /// - /// ``` - /// use itertools::Itertools; - /// let mut v = Vec::new(); - /// for (a, b) in (1..5).tuples() { - /// v.push((a, b)); - /// } - /// assert_eq!(v, vec![(1, 2), (3, 4)]); - /// - /// let mut it = (1..7).tuples(); - /// assert_eq!(Some((1, 2, 3)), it.next()); - /// assert_eq!(Some((4, 5, 6)), it.next()); - /// assert_eq!(None, it.next()); - /// - /// // this requires a type hint - /// let it = (1..7).tuples::<(_, _, _)>(); - /// itertools::assert_equal(it, vec![(1, 2, 3), (4, 5, 6)]); - /// - /// // you can also specify the complete type - /// use itertools::Tuples; - /// use std::ops::Range; - /// - /// let it: Tuples, (u32, u32, u32)> = (1..7).tuples(); - /// itertools::assert_equal(it, vec![(1, 2, 3), (4, 5, 6)]); - /// ``` - /// - /// See also [`Tuples::into_buffer`]. - fn tuples(self) -> Tuples - where - Self: Sized + Iterator, - T: traits::HomogeneousTuple, - { - tuple_impl::tuples(self) - } - - /// Split into an iterator pair that both yield all elements from - /// the original iterator. - /// - /// **Note:** If the iterator is clonable, prefer using that instead - /// of using this method. Cloning is likely to be more efficient. - /// - /// Iterator element type is `Self::Item`. - /// - /// ``` - /// use itertools::Itertools; - /// let xs = vec![0, 1, 2, 3]; - /// - /// let (mut t1, t2) = xs.into_iter().tee(); - /// itertools::assert_equal(t1.next(), Some(0)); - /// itertools::assert_equal(t2, 0..4); - /// itertools::assert_equal(t1, 1..4); - /// ``` - #[cfg(feature = "use_alloc")] - fn tee(self) -> (Tee, Tee) - where - Self: Sized, - Self::Item: Clone, - { - tee::new(self) - } - - /// Convert each item of the iterator using the [`Into`] trait. - /// - /// ```rust - /// use itertools::Itertools; - /// - /// (1i32..42i32).map_into::().collect_vec(); - /// ``` - fn map_into(self) -> MapInto - where - Self: Sized, - Self::Item: Into, - { - adaptors::map_into(self) - } - - /// Return an iterator adaptor that applies the provided closure - /// to every `Result::Ok` value. `Result::Err` values are - /// unchanged. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let input = vec![Ok(41), Err(false), Ok(11)]; - /// let it = input.into_iter().map_ok(|i| i + 1); - /// itertools::assert_equal(it, vec![Ok(42), Err(false), Ok(12)]); - /// ``` - fn map_ok(self, f: F) -> MapOk - where - Self: Iterator> + Sized, - F: FnMut(T) -> U, - { - adaptors::map_ok(self, f) - } - - /// Return an iterator adaptor that filters every `Result::Ok` - /// value with the provided closure. `Result::Err` values are - /// unchanged. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let input = vec![Ok(22), Err(false), Ok(11)]; - /// let it = input.into_iter().filter_ok(|&i| i > 20); - /// itertools::assert_equal(it, vec![Ok(22), Err(false)]); - /// ``` - fn filter_ok(self, f: F) -> FilterOk - where - Self: Iterator> + Sized, - F: FnMut(&T) -> bool, - { - adaptors::filter_ok(self, f) - } - - /// Return an iterator adaptor that filters and transforms every - /// `Result::Ok` value with the provided closure. `Result::Err` - /// values are unchanged. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let input = vec![Ok(22), Err(false), Ok(11)]; - /// let it = input.into_iter().filter_map_ok(|i| if i > 20 { Some(i * 2) } else { None }); - /// itertools::assert_equal(it, vec![Ok(44), Err(false)]); - /// ``` - fn filter_map_ok(self, f: F) -> FilterMapOk - where - Self: Iterator> + Sized, - F: FnMut(T) -> Option, - { - adaptors::filter_map_ok(self, f) - } - - /// Return an iterator adaptor that flattens every `Result::Ok` value into - /// a series of `Result::Ok` values. `Result::Err` values are unchanged. - /// - /// This is useful when you have some common error type for your crate and - /// need to propagate it upwards, but the `Result::Ok` case needs to be flattened. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let input = vec![Ok(0..2), Err(false), Ok(2..4)]; - /// let it = input.iter().cloned().flatten_ok(); - /// itertools::assert_equal(it.clone(), vec![Ok(0), Ok(1), Err(false), Ok(2), Ok(3)]); - /// - /// // This can also be used to propagate errors when collecting. - /// let output_result: Result, bool> = it.collect(); - /// assert_eq!(output_result, Err(false)); - /// ``` - fn flatten_ok(self) -> FlattenOk - where - Self: Iterator> + Sized, - T: IntoIterator, - { - flatten_ok::flatten_ok(self) - } - - /// “Lift” a function of the values of the current iterator so as to process - /// an iterator of `Result` values instead. - /// - /// `processor` is a closure that receives an adapted version of the iterator - /// as the only argument — the adapted iterator produces elements of type `T`, - /// as long as the original iterator produces `Ok` values. - /// - /// If the original iterable produces an error at any point, the adapted - /// iterator ends and it will return the error iself. - /// - /// Otherwise, the return value from the closure is returned wrapped - /// inside `Ok`. - /// - /// # Example - /// - /// ``` - /// use itertools::Itertools; - /// - /// type Item = Result; - /// - /// let first_values: Vec = vec![Ok(1), Ok(0), Ok(3)]; - /// let second_values: Vec = vec![Ok(2), Ok(1), Err("overflow")]; - /// - /// // “Lift” the iterator .max() method to work on the Ok-values. - /// let first_max = first_values.into_iter().process_results(|iter| iter.max().unwrap_or(0)); - /// let second_max = second_values.into_iter().process_results(|iter| iter.max().unwrap_or(0)); - /// - /// assert_eq!(first_max, Ok(3)); - /// assert!(second_max.is_err()); - /// ``` - fn process_results(self, processor: F) -> Result - where - Self: Iterator> + Sized, - F: FnOnce(ProcessResults) -> R, - { - process_results(self, processor) - } - - /// Return an iterator adaptor that merges the two base iterators in - /// ascending order. If both base iterators are sorted (ascending), the - /// result is sorted. - /// - /// Iterator element type is `Self::Item`. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let a = (0..11).step_by(3); - /// let b = (0..11).step_by(5); - /// let it = a.merge(b); - /// itertools::assert_equal(it, vec![0, 0, 3, 5, 6, 9, 10]); - /// ``` - fn merge(self, other: J) -> Merge - where - Self: Sized, - Self::Item: PartialOrd, - J: IntoIterator, - { - merge(self, other) - } - - /// Return an iterator adaptor that merges the two base iterators in order. - /// This is much like [`.merge()`](Itertools::merge) but allows for a custom ordering. - /// - /// This can be especially useful for sequences of tuples. - /// - /// Iterator element type is `Self::Item`. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let a = (0..).zip("bc".chars()); - /// let b = (0..).zip("ad".chars()); - /// let it = a.merge_by(b, |x, y| x.1 <= y.1); - /// itertools::assert_equal(it, vec![(0, 'a'), (0, 'b'), (1, 'c'), (1, 'd')]); - /// ``` - - fn merge_by(self, other: J, is_first: F) -> MergeBy - where - Self: Sized, - J: IntoIterator, - F: FnMut(&Self::Item, &Self::Item) -> bool, - { - merge_join::merge_by_new(self, other, is_first) - } - - /// Create an iterator that merges items from both this and the specified - /// iterator in ascending order. - /// - /// The function can either return an `Ordering` variant or a boolean. - /// - /// If `cmp_fn` returns `Ordering`, - /// it chooses whether to pair elements based on the `Ordering` returned by the - /// specified compare function. At any point, inspecting the tip of the - /// iterators `I` and `J` as items `i` of type `I::Item` and `j` of type - /// `J::Item` respectively, the resulting iterator will: - /// - /// - Emit `EitherOrBoth::Left(i)` when `i < j`, - /// and remove `i` from its source iterator - /// - Emit `EitherOrBoth::Right(j)` when `i > j`, - /// and remove `j` from its source iterator - /// - Emit `EitherOrBoth::Both(i, j)` when `i == j`, - /// and remove both `i` and `j` from their respective source iterators - /// - /// ``` - /// use itertools::Itertools; - /// use itertools::EitherOrBoth::{Left, Right, Both}; - /// - /// let a = vec![0, 2, 4, 6, 1].into_iter(); - /// let b = (0..10).step_by(3); - /// - /// itertools::assert_equal( - /// a.merge_join_by(b, |i, j| i.cmp(j)), - /// vec![Both(0, 0), Left(2), Right(3), Left(4), Both(6, 6), Left(1), Right(9)] - /// ); - /// ``` - /// - /// If `cmp_fn` returns `bool`, - /// it chooses whether to pair elements based on the boolean returned by the - /// specified function. At any point, inspecting the tip of the - /// iterators `I` and `J` as items `i` of type `I::Item` and `j` of type - /// `J::Item` respectively, the resulting iterator will: - /// - /// - Emit `Either::Left(i)` when `true`, - /// and remove `i` from its source iterator - /// - Emit `Either::Right(j)` when `false`, - /// and remove `j` from its source iterator - /// - /// It is similar to the `Ordering` case if the first argument is considered - /// "less" than the second argument. - /// - /// ``` - /// use itertools::Itertools; - /// use itertools::Either::{Left, Right}; - /// - /// let a = vec![0, 2, 4, 6, 1].into_iter(); - /// let b = (0..10).step_by(3); - /// - /// itertools::assert_equal( - /// a.merge_join_by(b, |i, j| i <= j), - /// vec![Left(0), Right(0), Left(2), Right(3), Left(4), Left(6), Left(1), Right(6), Right(9)] - /// ); - /// ``` - #[inline] - fn merge_join_by(self, other: J, cmp_fn: F) -> MergeJoinBy - where - J: IntoIterator, - F: FnMut(&Self::Item, &J::Item) -> T, - Self: Sized, - { - merge_join_by(self, other, cmp_fn) - } - - /// Return an iterator adaptor that flattens an iterator of iterators by - /// merging them in ascending order. - /// - /// If all base iterators are sorted (ascending), the result is sorted. - /// - /// Iterator element type is `Self::Item`. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let a = (0..6).step_by(3); - /// let b = (1..6).step_by(3); - /// let c = (2..6).step_by(3); - /// let it = vec![a, b, c].into_iter().kmerge(); - /// itertools::assert_equal(it, vec![0, 1, 2, 3, 4, 5]); - /// ``` - #[cfg(feature = "use_alloc")] - fn kmerge(self) -> KMerge<::IntoIter> - where - Self: Sized, - Self::Item: IntoIterator, - ::Item: PartialOrd, - { - kmerge(self) - } - - /// Return an iterator adaptor that flattens an iterator of iterators by - /// merging them according to the given closure. - /// - /// The closure `first` is called with two elements *a*, *b* and should - /// return `true` if *a* is ordered before *b*. - /// - /// If all base iterators are sorted according to `first`, the result is - /// sorted. - /// - /// Iterator element type is `Self::Item`. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let a = vec![-1f64, 2., 3., -5., 6., -7.]; - /// let b = vec![0., 2., -4.]; - /// let mut it = vec![a, b].into_iter().kmerge_by(|a, b| a.abs() < b.abs()); - /// assert_eq!(it.next(), Some(0.)); - /// assert_eq!(it.last(), Some(-7.)); - /// ``` - #[cfg(feature = "use_alloc")] - fn kmerge_by(self, first: F) -> KMergeBy<::IntoIter, F> - where - Self: Sized, - Self::Item: IntoIterator, - F: FnMut(&::Item, &::Item) -> bool, - { - kmerge_by(self, first) - } - - /// Return an iterator adaptor that iterates over the cartesian product of - /// the element sets of two iterators `self` and `J`. - /// - /// Iterator element type is `(Self::Item, J::Item)`. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let it = (0..2).cartesian_product("αβ".chars()); - /// itertools::assert_equal(it, vec![(0, 'α'), (0, 'β'), (1, 'α'), (1, 'β')]); - /// ``` - fn cartesian_product(self, other: J) -> Product - where - Self: Sized, - Self::Item: Clone, - J: IntoIterator, - J::IntoIter: Clone, - { - adaptors::cartesian_product(self, other.into_iter()) - } - - /// Return an iterator adaptor that iterates over the cartesian product of - /// all subiterators returned by meta-iterator `self`. - /// - /// All provided iterators must yield the same `Item` type. To generate - /// the product of iterators yielding multiple types, use the - /// [`iproduct`] macro instead. - /// - /// The iterator element type is `Vec`, where `T` is the iterator element - /// of the subiterators. - /// - /// Note that the iterator is fused. - /// - /// ``` - /// use itertools::Itertools; - /// let mut multi_prod = (0..3).map(|i| (i * 2)..(i * 2 + 2)) - /// .multi_cartesian_product(); - /// assert_eq!(multi_prod.next(), Some(vec![0, 2, 4])); - /// assert_eq!(multi_prod.next(), Some(vec![0, 2, 5])); - /// assert_eq!(multi_prod.next(), Some(vec![0, 3, 4])); - /// assert_eq!(multi_prod.next(), Some(vec![0, 3, 5])); - /// assert_eq!(multi_prod.next(), Some(vec![1, 2, 4])); - /// assert_eq!(multi_prod.next(), Some(vec![1, 2, 5])); - /// assert_eq!(multi_prod.next(), Some(vec![1, 3, 4])); - /// assert_eq!(multi_prod.next(), Some(vec![1, 3, 5])); - /// assert_eq!(multi_prod.next(), None); - /// ``` - /// - /// If the adapted iterator is empty, the result is an iterator yielding a single empty vector. - /// This is known as the [nullary cartesian product](https://en.wikipedia.org/wiki/Empty_product#Nullary_Cartesian_product). - /// - /// ``` - /// use itertools::Itertools; - /// let mut nullary_cartesian_product = (0..0).map(|i| (i * 2)..(i * 2 + 2)).multi_cartesian_product(); - /// assert_eq!(nullary_cartesian_product.next(), Some(vec![])); - /// assert_eq!(nullary_cartesian_product.next(), None); - /// ``` - #[cfg(feature = "use_alloc")] - fn multi_cartesian_product(self) -> MultiProduct<::IntoIter> - where - Self: Sized, - Self::Item: IntoIterator, - ::IntoIter: Clone, - ::Item: Clone, - { - adaptors::multi_cartesian_product(self) - } - - /// Return an iterator adaptor that uses the passed-in closure to - /// optionally merge together consecutive elements. - /// - /// The closure `f` is passed two elements, `previous` and `current` and may - /// return either (1) `Ok(combined)` to merge the two values or - /// (2) `Err((previous', current'))` to indicate they can't be merged. - /// In (2), the value `previous'` is emitted by the iterator. - /// Either (1) `combined` or (2) `current'` becomes the previous value - /// when coalesce continues with the next pair of elements to merge. The - /// value that remains at the end is also emitted by the iterator. - /// - /// Iterator element type is `Self::Item`. - /// - /// This iterator is *fused*. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // sum same-sign runs together - /// let data = vec![-1., -2., -3., 3., 1., 0., -1.]; - /// itertools::assert_equal(data.into_iter().coalesce(|x, y| - /// if (x >= 0.) == (y >= 0.) { - /// Ok(x + y) - /// } else { - /// Err((x, y)) - /// }), - /// vec![-6., 4., -1.]); - /// ``` - fn coalesce(self, f: F) -> Coalesce - where - Self: Sized, - F: FnMut(Self::Item, Self::Item) -> Result, - { - adaptors::coalesce(self, f) - } - - /// Remove duplicates from sections of consecutive identical elements. - /// If the iterator is sorted, all elements will be unique. - /// - /// Iterator element type is `Self::Item`. - /// - /// This iterator is *fused*. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = vec![1., 1., 2., 3., 3., 2., 2.]; - /// itertools::assert_equal(data.into_iter().dedup(), - /// vec![1., 2., 3., 2.]); - /// ``` - fn dedup(self) -> Dedup - where - Self: Sized, - Self::Item: PartialEq, - { - adaptors::dedup(self) - } - - /// Remove duplicates from sections of consecutive identical elements, - /// determining equality using a comparison function. - /// If the iterator is sorted, all elements will be unique. - /// - /// Iterator element type is `Self::Item`. - /// - /// This iterator is *fused*. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = vec![(0, 1.), (1, 1.), (0, 2.), (0, 3.), (1, 3.), (1, 2.), (2, 2.)]; - /// itertools::assert_equal(data.into_iter().dedup_by(|x, y| x.1 == y.1), - /// vec![(0, 1.), (0, 2.), (0, 3.), (1, 2.)]); - /// ``` - fn dedup_by(self, cmp: Cmp) -> DedupBy - where - Self: Sized, - Cmp: FnMut(&Self::Item, &Self::Item) -> bool, - { - adaptors::dedup_by(self, cmp) - } - - /// Remove duplicates from sections of consecutive identical elements, while keeping a count of - /// how many repeated elements were present. - /// If the iterator is sorted, all elements will be unique. - /// - /// Iterator element type is `(usize, Self::Item)`. - /// - /// This iterator is *fused*. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = vec!['a', 'a', 'b', 'c', 'c', 'b', 'b']; - /// itertools::assert_equal(data.into_iter().dedup_with_count(), - /// vec![(2, 'a'), (1, 'b'), (2, 'c'), (2, 'b')]); - /// ``` - fn dedup_with_count(self) -> DedupWithCount - where - Self: Sized, - { - adaptors::dedup_with_count(self) - } - - /// Remove duplicates from sections of consecutive identical elements, while keeping a count of - /// how many repeated elements were present. - /// This will determine equality using a comparison function. - /// If the iterator is sorted, all elements will be unique. - /// - /// Iterator element type is `(usize, Self::Item)`. - /// - /// This iterator is *fused*. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = vec![(0, 'a'), (1, 'a'), (0, 'b'), (0, 'c'), (1, 'c'), (1, 'b'), (2, 'b')]; - /// itertools::assert_equal(data.into_iter().dedup_by_with_count(|x, y| x.1 == y.1), - /// vec![(2, (0, 'a')), (1, (0, 'b')), (2, (0, 'c')), (2, (1, 'b'))]); - /// ``` - fn dedup_by_with_count(self, cmp: Cmp) -> DedupByWithCount - where - Self: Sized, - Cmp: FnMut(&Self::Item, &Self::Item) -> bool, - { - adaptors::dedup_by_with_count(self, cmp) - } - - /// Return an iterator adaptor that produces elements that appear more than once during the - /// iteration. Duplicates are detected using hash and equality. - /// - /// The iterator is stable, returning the duplicate items in the order in which they occur in - /// the adapted iterator. Each duplicate item is returned exactly once. If an item appears more - /// than twice, the second item is the item retained and the rest are discarded. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = vec![10, 20, 30, 20, 40, 10, 50]; - /// itertools::assert_equal(data.into_iter().duplicates(), - /// vec![20, 10]); - /// ``` - #[cfg(feature = "use_std")] - fn duplicates(self) -> Duplicates - where - Self: Sized, - Self::Item: Eq + Hash, - { - duplicates_impl::duplicates(self) - } - - /// Return an iterator adaptor that produces elements that appear more than once during the - /// iteration. Duplicates are detected using hash and equality. - /// - /// Duplicates are detected by comparing the key they map to with the keying function `f` by - /// hash and equality. The keys are stored in a hash map in the iterator. - /// - /// The iterator is stable, returning the duplicate items in the order in which they occur in - /// the adapted iterator. Each duplicate item is returned exactly once. If an item appears more - /// than twice, the second item is the item retained and the rest are discarded. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = vec!["a", "bb", "aa", "c", "ccc"]; - /// itertools::assert_equal(data.into_iter().duplicates_by(|s| s.len()), - /// vec!["aa", "c"]); - /// ``` - #[cfg(feature = "use_std")] - fn duplicates_by(self, f: F) -> DuplicatesBy - where - Self: Sized, - V: Eq + Hash, - F: FnMut(&Self::Item) -> V, - { - duplicates_impl::duplicates_by(self, f) - } - - /// Return an iterator adaptor that filters out elements that have - /// already been produced once during the iteration. Duplicates - /// are detected using hash and equality. - /// - /// Clones of visited elements are stored in a hash set in the - /// iterator. - /// - /// The iterator is stable, returning the non-duplicate items in the order - /// in which they occur in the adapted iterator. In a set of duplicate - /// items, the first item encountered is the item retained. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = vec![10, 20, 30, 20, 40, 10, 50]; - /// itertools::assert_equal(data.into_iter().unique(), - /// vec![10, 20, 30, 40, 50]); - /// ``` - #[cfg(feature = "use_std")] - fn unique(self) -> Unique - where - Self: Sized, - Self::Item: Clone + Eq + Hash, - { - unique_impl::unique(self) - } - - /// Return an iterator adaptor that filters out elements that have - /// already been produced once during the iteration. - /// - /// Duplicates are detected by comparing the key they map to - /// with the keying function `f` by hash and equality. - /// The keys are stored in a hash set in the iterator. - /// - /// The iterator is stable, returning the non-duplicate items in the order - /// in which they occur in the adapted iterator. In a set of duplicate - /// items, the first item encountered is the item retained. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = vec!["a", "bb", "aa", "c", "ccc"]; - /// itertools::assert_equal(data.into_iter().unique_by(|s| s.len()), - /// vec!["a", "bb", "ccc"]); - /// ``` - #[cfg(feature = "use_std")] - fn unique_by(self, f: F) -> UniqueBy - where - Self: Sized, - V: Eq + Hash, - F: FnMut(&Self::Item) -> V, - { - unique_impl::unique_by(self, f) - } - - /// Return an iterator adaptor that borrows from this iterator and - /// takes items while the closure `accept` returns `true`. - /// - /// This adaptor can only be used on iterators that implement `PeekingNext` - /// like `.peekable()`, `put_back` and a few other collection iterators. - /// - /// The last and rejected element (first `false`) is still available when - /// `peeking_take_while` is done. - /// - /// - /// See also [`.take_while_ref()`](Itertools::take_while_ref) - /// which is a similar adaptor. - fn peeking_take_while(&mut self, accept: F) -> PeekingTakeWhile - where - Self: Sized + PeekingNext, - F: FnMut(&Self::Item) -> bool, - { - peeking_take_while::peeking_take_while(self, accept) - } - - /// Return an iterator adaptor that borrows from a `Clone`-able iterator - /// to only pick off elements while the predicate `accept` returns `true`. - /// - /// It uses the `Clone` trait to restore the original iterator so that the - /// last and rejected element (first `false`) is still available when - /// `take_while_ref` is done. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let mut hexadecimals = "0123456789abcdef".chars(); - /// - /// let decimals = hexadecimals.take_while_ref(|c| c.is_numeric()) - /// .collect::(); - /// assert_eq!(decimals, "0123456789"); - /// assert_eq!(hexadecimals.next(), Some('a')); - /// - /// ``` - fn take_while_ref(&mut self, accept: F) -> TakeWhileRef - where - Self: Clone, - F: FnMut(&Self::Item) -> bool, - { - adaptors::take_while_ref(self, accept) - } - - /// Returns an iterator adaptor that consumes elements while the given - /// predicate is `true`, *including* the element for which the predicate - /// first returned `false`. - /// - /// The [`.take_while()`][std::iter::Iterator::take_while] adaptor is useful - /// when you want items satisfying a predicate, but to know when to stop - /// taking elements, we have to consume that first element that doesn't - /// satisfy the predicate. This adaptor includes that element where - /// [`.take_while()`][std::iter::Iterator::take_while] would drop it. - /// - /// The [`.take_while_ref()`][crate::Itertools::take_while_ref] adaptor - /// serves a similar purpose, but this adaptor doesn't require [`Clone`]ing - /// the underlying elements. - /// - /// ```rust - /// # use itertools::Itertools; - /// let items = vec![1, 2, 3, 4, 5]; - /// let filtered: Vec<_> = items - /// .into_iter() - /// .take_while_inclusive(|&n| n % 3 != 0) - /// .collect(); - /// - /// assert_eq!(filtered, vec![1, 2, 3]); - /// ``` - /// - /// ```rust - /// # use itertools::Itertools; - /// let items = vec![1, 2, 3, 4, 5]; - /// - /// let take_while_inclusive_result: Vec<_> = items - /// .iter() - /// .copied() - /// .take_while_inclusive(|&n| n % 3 != 0) - /// .collect(); - /// let take_while_result: Vec<_> = items - /// .into_iter() - /// .take_while(|&n| n % 3 != 0) - /// .collect(); - /// - /// assert_eq!(take_while_inclusive_result, vec![1, 2, 3]); - /// assert_eq!(take_while_result, vec![1, 2]); - /// // both iterators have the same items remaining at this point---the 3 - /// // is lost from the `take_while` vec - /// ``` - /// - /// ```rust - /// # use itertools::Itertools; - /// #[derive(Debug, PartialEq)] - /// struct NoCloneImpl(i32); - /// - /// let non_clonable_items: Vec<_> = vec![1, 2, 3, 4, 5] - /// .into_iter() - /// .map(NoCloneImpl) - /// .collect(); - /// let filtered: Vec<_> = non_clonable_items - /// .into_iter() - /// .take_while_inclusive(|n| n.0 % 3 != 0) - /// .collect(); - /// let expected: Vec<_> = vec![1, 2, 3].into_iter().map(NoCloneImpl).collect(); - /// assert_eq!(filtered, expected); - fn take_while_inclusive(self, accept: F) -> TakeWhileInclusive - where - Self: Sized, - F: FnMut(&Self::Item) -> bool, - { - take_while_inclusive::TakeWhileInclusive::new(self, accept) - } - - /// Return an iterator adaptor that filters `Option` iterator elements - /// and produces `A`. Stops on the first `None` encountered. - /// - /// Iterator element type is `A`, the unwrapped element. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // List all hexadecimal digits - /// itertools::assert_equal( - /// (0..).map(|i| std::char::from_digit(i, 16)).while_some(), - /// "0123456789abcdef".chars()); - /// - /// ``` - fn while_some(self) -> WhileSome - where - Self: Sized + Iterator>, - { - adaptors::while_some(self) - } - - /// Return an iterator adaptor that iterates over the combinations of the - /// elements from an iterator. - /// - /// Iterator element can be any homogeneous tuple of type `Self::Item` with - /// size up to 12. - /// - /// # Guarantees - /// - /// If the adapted iterator is deterministic, - /// this iterator adapter yields items in a reliable order. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let mut v = Vec::new(); - /// for (a, b) in (1..5).tuple_combinations() { - /// v.push((a, b)); - /// } - /// assert_eq!(v, vec![(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]); - /// - /// let mut it = (1..5).tuple_combinations(); - /// assert_eq!(Some((1, 2, 3)), it.next()); - /// assert_eq!(Some((1, 2, 4)), it.next()); - /// assert_eq!(Some((1, 3, 4)), it.next()); - /// assert_eq!(Some((2, 3, 4)), it.next()); - /// assert_eq!(None, it.next()); - /// - /// // this requires a type hint - /// let it = (1..5).tuple_combinations::<(_, _, _)>(); - /// itertools::assert_equal(it, vec![(1, 2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4)]); - /// - /// // you can also specify the complete type - /// use itertools::TupleCombinations; - /// use std::ops::Range; - /// - /// let it: TupleCombinations, (u32, u32, u32)> = (1..5).tuple_combinations(); - /// itertools::assert_equal(it, vec![(1, 2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4)]); - /// ``` - fn tuple_combinations(self) -> TupleCombinations - where - Self: Sized + Clone, - Self::Item: Clone, - T: adaptors::HasCombination, - { - adaptors::tuple_combinations(self) - } - - /// Return an iterator adaptor that iterates over the `k`-length combinations of - /// the elements from an iterator. - /// - /// Iterator element type is `Vec`. The iterator produces a new `Vec` per iteration, - /// and clones the iterator elements. - /// - /// # Guarantees - /// - /// If the adapted iterator is deterministic, - /// this iterator adapter yields items in a reliable order. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let it = (1..5).combinations(3); - /// itertools::assert_equal(it, vec![ - /// vec![1, 2, 3], - /// vec![1, 2, 4], - /// vec![1, 3, 4], - /// vec![2, 3, 4], - /// ]); - /// ``` - /// - /// Note: Combinations does not take into account the equality of the iterated values. - /// ``` - /// use itertools::Itertools; - /// - /// let it = vec![1, 2, 2].into_iter().combinations(2); - /// itertools::assert_equal(it, vec![ - /// vec![1, 2], // Note: these are the same - /// vec![1, 2], // Note: these are the same - /// vec![2, 2], - /// ]); - /// ``` - #[cfg(feature = "use_alloc")] - fn combinations(self, k: usize) -> Combinations - where - Self: Sized, - Self::Item: Clone, - { - combinations::combinations(self, k) - } - - /// Return an iterator that iterates over the `k`-length combinations of - /// the elements from an iterator, with replacement. - /// - /// Iterator element type is `Vec`. The iterator produces a new `Vec` per iteration, - /// and clones the iterator elements. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let it = (1..4).combinations_with_replacement(2); - /// itertools::assert_equal(it, vec![ - /// vec![1, 1], - /// vec![1, 2], - /// vec![1, 3], - /// vec![2, 2], - /// vec![2, 3], - /// vec![3, 3], - /// ]); - /// ``` - #[cfg(feature = "use_alloc")] - fn combinations_with_replacement(self, k: usize) -> CombinationsWithReplacement - where - Self: Sized, - Self::Item: Clone, - { - combinations_with_replacement::combinations_with_replacement(self, k) - } - - /// Return an iterator adaptor that iterates over all k-permutations of the - /// elements from an iterator. - /// - /// Iterator element type is `Vec` with length `k`. The iterator - /// produces a new `Vec` per iteration, and clones the iterator elements. - /// - /// If `k` is greater than the length of the input iterator, the resultant - /// iterator adaptor will be empty. - /// - /// If you are looking for permutations with replacements, - /// use `repeat_n(iter, k).multi_cartesian_product()` instead. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let perms = (5..8).permutations(2); - /// itertools::assert_equal(perms, vec![ - /// vec![5, 6], - /// vec![5, 7], - /// vec![6, 5], - /// vec![6, 7], - /// vec![7, 5], - /// vec![7, 6], - /// ]); - /// ``` - /// - /// Note: Permutations does not take into account the equality of the iterated values. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let it = vec![2, 2].into_iter().permutations(2); - /// itertools::assert_equal(it, vec![ - /// vec![2, 2], // Note: these are the same - /// vec![2, 2], // Note: these are the same - /// ]); - /// ``` - /// - /// Note: The source iterator is collected lazily, and will not be - /// re-iterated if the permutations adaptor is completed and re-iterated. - #[cfg(feature = "use_alloc")] - fn permutations(self, k: usize) -> Permutations - where - Self: Sized, - Self::Item: Clone, - { - permutations::permutations(self, k) - } - - /// Return an iterator that iterates through the powerset of the elements from an - /// iterator. - /// - /// Iterator element type is `Vec`. The iterator produces a new `Vec` - /// per iteration, and clones the iterator elements. - /// - /// The powerset of a set contains all subsets including the empty set and the full - /// input set. A powerset has length _2^n_ where _n_ is the length of the input - /// set. - /// - /// Each `Vec` produced by this iterator represents a subset of the elements - /// produced by the source iterator. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let sets = (1..4).powerset().collect::>(); - /// itertools::assert_equal(sets, vec![ - /// vec![], - /// vec![1], - /// vec![2], - /// vec![3], - /// vec![1, 2], - /// vec![1, 3], - /// vec![2, 3], - /// vec![1, 2, 3], - /// ]); - /// ``` - #[cfg(feature = "use_alloc")] - fn powerset(self) -> Powerset - where - Self: Sized, - Self::Item: Clone, - { - powerset::powerset(self) - } - - /// Return an iterator adaptor that pads the sequence to a minimum length of - /// `min` by filling missing elements using a closure `f`. - /// - /// Iterator element type is `Self::Item`. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let it = (0..5).pad_using(10, |i| 2*i); - /// itertools::assert_equal(it, vec![0, 1, 2, 3, 4, 10, 12, 14, 16, 18]); - /// - /// let it = (0..10).pad_using(5, |i| 2*i); - /// itertools::assert_equal(it, vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); - /// - /// let it = (0..5).pad_using(10, |i| 2*i).rev(); - /// itertools::assert_equal(it, vec![18, 16, 14, 12, 10, 4, 3, 2, 1, 0]); - /// ``` - fn pad_using(self, min: usize, f: F) -> PadUsing - where - Self: Sized, - F: FnMut(usize) -> Self::Item, - { - pad_tail::pad_using(self, min, f) - } - - /// Return an iterator adaptor that combines each element with a `Position` to - /// ease special-case handling of the first or last elements. - /// - /// Iterator element type is - /// [`(Position, Self::Item)`](Position) - /// - /// ``` - /// use itertools::{Itertools, Position}; - /// - /// let it = (0..4).with_position(); - /// itertools::assert_equal(it, - /// vec![(Position::First, 0), - /// (Position::Middle, 1), - /// (Position::Middle, 2), - /// (Position::Last, 3)]); - /// - /// let it = (0..1).with_position(); - /// itertools::assert_equal(it, vec![(Position::Only, 0)]); - /// ``` - fn with_position(self) -> WithPosition - where - Self: Sized, - { - with_position::with_position(self) - } - - /// Return an iterator adaptor that yields the indices of all elements - /// satisfying a predicate, counted from the start of the iterator. - /// - /// Equivalent to `iter.enumerate().filter(|(_, v)| predicate(*v)).map(|(i, _)| i)`. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = vec![1, 2, 3, 3, 4, 6, 7, 9]; - /// itertools::assert_equal(data.iter().positions(|v| v % 2 == 0), vec![1, 4, 5]); - /// - /// itertools::assert_equal(data.iter().positions(|v| v % 2 == 1).rev(), vec![7, 6, 3, 2, 0]); - /// ``` - fn positions

(self, predicate: P) -> Positions - where - Self: Sized, - P: FnMut(Self::Item) -> bool, - { - adaptors::positions(self, predicate) - } - - /// Return an iterator adaptor that applies a mutating function - /// to each element before yielding it. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let input = vec![vec![1], vec![3, 2, 1]]; - /// let it = input.into_iter().update(|mut v| v.push(0)); - /// itertools::assert_equal(it, vec![vec![1, 0], vec![3, 2, 1, 0]]); - /// ``` - fn update(self, updater: F) -> Update - where - Self: Sized, - F: FnMut(&mut Self::Item), - { - adaptors::update(self, updater) - } - - // non-adaptor methods - /// Advances the iterator and returns the next items grouped in a tuple of - /// a specific size (up to 12). - /// - /// If there are enough elements to be grouped in a tuple, then the tuple is - /// returned inside `Some`, otherwise `None` is returned. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let mut iter = 1..5; - /// - /// assert_eq!(Some((1, 2)), iter.next_tuple()); - /// ``` - fn next_tuple(&mut self) -> Option - where - Self: Sized + Iterator, - T: traits::HomogeneousTuple, - { - T::collect_from_iter_no_buf(self) - } - - /// Collects all items from the iterator into a tuple of a specific size - /// (up to 12). - /// - /// If the number of elements inside the iterator is **exactly** equal to - /// the tuple size, then the tuple is returned inside `Some`, otherwise - /// `None` is returned. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let iter = 1..3; - /// - /// if let Some((x, y)) = iter.collect_tuple() { - /// assert_eq!((x, y), (1, 2)) - /// } else { - /// panic!("Expected two elements") - /// } - /// ``` - fn collect_tuple(mut self) -> Option - where - Self: Sized + Iterator, - T: traits::HomogeneousTuple, - { - match self.next_tuple() { - elt @ Some(_) => match self.next() { - Some(_) => None, - None => elt, - }, - _ => None, - } - } - - /// Find the position and value of the first element satisfying a predicate. - /// - /// The iterator is not advanced past the first element found. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let text = "Hα"; - /// assert_eq!(text.chars().find_position(|ch| ch.is_lowercase()), Some((1, 'α'))); - /// ``` - fn find_position

(&mut self, mut pred: P) -> Option<(usize, Self::Item)> - where - P: FnMut(&Self::Item) -> bool, - { - self.enumerate().find(|(_, elt)| pred(elt)) - } - /// Find the value of the first element satisfying a predicate or return the last element, if any. - /// - /// The iterator is not advanced past the first element found. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let numbers = [1, 2, 3, 4]; - /// assert_eq!(numbers.iter().find_or_last(|&&x| x > 5), Some(&4)); - /// assert_eq!(numbers.iter().find_or_last(|&&x| x > 2), Some(&3)); - /// assert_eq!(std::iter::empty::().find_or_last(|&x| x > 5), None); - /// ``` - fn find_or_last

(mut self, mut predicate: P) -> Option - where - Self: Sized, - P: FnMut(&Self::Item) -> bool, - { - let mut prev = None; - self.find_map(|x| { - if predicate(&x) { - Some(x) - } else { - prev = Some(x); - None - } - }) - .or(prev) - } - /// Find the value of the first element satisfying a predicate or return the first element, if any. - /// - /// The iterator is not advanced past the first element found. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let numbers = [1, 2, 3, 4]; - /// assert_eq!(numbers.iter().find_or_first(|&&x| x > 5), Some(&1)); - /// assert_eq!(numbers.iter().find_or_first(|&&x| x > 2), Some(&3)); - /// assert_eq!(std::iter::empty::().find_or_first(|&x| x > 5), None); - /// ``` - fn find_or_first

(mut self, mut predicate: P) -> Option - where - Self: Sized, - P: FnMut(&Self::Item) -> bool, - { - let first = self.next()?; - Some(if predicate(&first) { - first - } else { - self.find(|x| predicate(x)).unwrap_or(first) - }) - } - /// Returns `true` if the given item is present in this iterator. - /// - /// This method is short-circuiting. If the given item is present in this - /// iterator, this method will consume the iterator up-to-and-including - /// the item. If the given item is not present in this iterator, the - /// iterator will be exhausted. - /// - /// ``` - /// use itertools::Itertools; - /// - /// #[derive(PartialEq, Debug)] - /// enum Enum { A, B, C, D, E, } - /// - /// let mut iter = vec![Enum::A, Enum::B, Enum::C, Enum::D].into_iter(); - /// - /// // search `iter` for `B` - /// assert_eq!(iter.contains(&Enum::B), true); - /// // `B` was found, so the iterator now rests at the item after `B` (i.e, `C`). - /// assert_eq!(iter.next(), Some(Enum::C)); - /// - /// // search `iter` for `E` - /// assert_eq!(iter.contains(&Enum::E), false); - /// // `E` wasn't found, so `iter` is now exhausted - /// assert_eq!(iter.next(), None); - /// ``` - fn contains(&mut self, query: &Q) -> bool - where - Self: Sized, - Self::Item: Borrow, - Q: PartialEq, - { - self.any(|x| x.borrow() == query) - } - - /// Check whether all elements compare equal. - /// - /// Empty iterators are considered to have equal elements: - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = vec![1, 1, 1, 2, 2, 3, 3, 3, 4, 5, 5]; - /// assert!(!data.iter().all_equal()); - /// assert!(data[0..3].iter().all_equal()); - /// assert!(data[3..5].iter().all_equal()); - /// assert!(data[5..8].iter().all_equal()); - /// - /// let data : Option = None; - /// assert!(data.into_iter().all_equal()); - /// ``` - fn all_equal(&mut self) -> bool - where - Self: Sized, - Self::Item: PartialEq, - { - match self.next() { - None => true, - Some(a) => self.all(|x| a == x), - } - } - - /// If there are elements and they are all equal, return a single copy of that element. - /// If there are no elements, return an Error containing None. - /// If there are elements and they are not all equal, return a tuple containing the first - /// two non-equal elements found. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = vec![1, 1, 1, 2, 2, 3, 3, 3, 4, 5, 5]; - /// assert_eq!(data.iter().all_equal_value(), Err(Some((&1, &2)))); - /// assert_eq!(data[0..3].iter().all_equal_value(), Ok(&1)); - /// assert_eq!(data[3..5].iter().all_equal_value(), Ok(&2)); - /// assert_eq!(data[5..8].iter().all_equal_value(), Ok(&3)); - /// - /// let data : Option = None; - /// assert_eq!(data.into_iter().all_equal_value(), Err(None)); - /// ``` - #[allow(clippy::type_complexity)] - fn all_equal_value(&mut self) -> Result> - where - Self: Sized, - Self::Item: PartialEq, - { - let first = self.next().ok_or(None)?; - let other = self.find(|x| x != &first); - if let Some(other) = other { - Err(Some((first, other))) - } else { - Ok(first) - } - } - - /// Check whether all elements are unique (non equal). - /// - /// Empty iterators are considered to have unique elements: - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = vec![1, 2, 3, 4, 1, 5]; - /// assert!(!data.iter().all_unique()); - /// assert!(data[0..4].iter().all_unique()); - /// assert!(data[1..6].iter().all_unique()); - /// - /// let data : Option = None; - /// assert!(data.into_iter().all_unique()); - /// ``` - #[cfg(feature = "use_std")] - fn all_unique(&mut self) -> bool - where - Self: Sized, - Self::Item: Eq + Hash, - { - let mut used = HashSet::new(); - self.all(move |elt| used.insert(elt)) - } - - /// Consume the first `n` elements from the iterator eagerly, - /// and return the same iterator again. - /// - /// It works similarly to `.skip(n)` except it is eager and - /// preserves the iterator type. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let mut iter = "αβγ".chars().dropping(2); - /// itertools::assert_equal(iter, "γ".chars()); - /// ``` - /// - /// *Fusing notes: if the iterator is exhausted by dropping, - /// the result of calling `.next()` again depends on the iterator implementation.* - fn dropping(mut self, n: usize) -> Self - where - Self: Sized, - { - if n > 0 { - self.nth(n - 1); - } - self - } - - /// Consume the last `n` elements from the iterator eagerly, - /// and return the same iterator again. - /// - /// This is only possible on double ended iterators. `n` may be - /// larger than the number of elements. - /// - /// Note: This method is eager, dropping the back elements immediately and - /// preserves the iterator type. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let init = vec![0, 3, 6, 9].into_iter().dropping_back(1); - /// itertools::assert_equal(init, vec![0, 3, 6]); - /// ``` - fn dropping_back(mut self, n: usize) -> Self - where - Self: Sized + DoubleEndedIterator, - { - if n > 0 { - (&mut self).rev().nth(n - 1); - } - self - } - - /// Combine all an iterator's elements into one element by using [`Extend`]. - /// - /// This combinator will extend the first item with each of the rest of the - /// items of the iterator. If the iterator is empty, the default value of - /// `I::Item` is returned. - /// - /// ```rust - /// use itertools::Itertools; - /// - /// let input = vec![vec![1], vec![2, 3], vec![4, 5, 6]]; - /// assert_eq!(input.into_iter().concat(), - /// vec![1, 2, 3, 4, 5, 6]); - /// ``` - fn concat(self) -> Self::Item - where - Self: Sized, - Self::Item: - Extend<<::Item as IntoIterator>::Item> + IntoIterator + Default, - { - concat(self) - } - - /// `.collect_vec()` is simply a type specialization of [`Iterator::collect`], - /// for convenience. - #[cfg(feature = "use_alloc")] - fn collect_vec(self) -> Vec - where - Self: Sized, - { - self.collect() - } - - /// `.try_collect()` is more convenient way of writing - /// `.collect::>()` - /// - /// # Example - /// - /// ``` - /// use std::{fs, io}; - /// use itertools::Itertools; - /// - /// fn process_dir_entries(entries: &[fs::DirEntry]) { - /// // ... - /// } - /// - /// fn do_stuff() -> std::io::Result<()> { - /// let entries: Vec<_> = fs::read_dir(".")?.try_collect()?; - /// process_dir_entries(&entries); - /// - /// Ok(()) - /// } - /// ``` - fn try_collect(self) -> Result - where - Self: Sized + Iterator>, - Result: FromIterator>, - { - self.collect() - } - - /// Assign to each reference in `self` from the `from` iterator, - /// stopping at the shortest of the two iterators. - /// - /// The `from` iterator is queried for its next element before the `self` - /// iterator, and if either is exhausted the method is done. - /// - /// Return the number of elements written. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let mut xs = [0; 4]; - /// xs.iter_mut().set_from(1..); - /// assert_eq!(xs, [1, 2, 3, 4]); - /// ``` - #[inline] - fn set_from<'a, A: 'a, J>(&mut self, from: J) -> usize - where - Self: Iterator, - J: IntoIterator, - { - from.into_iter() - .zip(self) - .map(|(new, old)| *old = new) - .count() - } - - /// Combine all iterator elements into one `String`, separated by `sep`. - /// - /// Use the `Display` implementation of each element. - /// - /// ``` - /// use itertools::Itertools; - /// - /// assert_eq!(["a", "b", "c"].iter().join(", "), "a, b, c"); - /// assert_eq!([1, 2, 3].iter().join(", "), "1, 2, 3"); - /// ``` - #[cfg(feature = "use_alloc")] - fn join(&mut self, sep: &str) -> String - where - Self::Item: std::fmt::Display, - { - match self.next() { - None => String::new(), - Some(first_elt) => { - // estimate lower bound of capacity needed - let (lower, _) = self.size_hint(); - let mut result = String::with_capacity(sep.len() * lower); - write!(&mut result, "{}", first_elt).unwrap(); - self.for_each(|elt| { - result.push_str(sep); - write!(&mut result, "{}", elt).unwrap(); - }); - result - } - } - } - - /// Format all iterator elements, separated by `sep`. - /// - /// All elements are formatted (any formatting trait) - /// with `sep` inserted between each element. - /// - /// **Panics** if the formatter helper is formatted more than once. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = [1.1, 2.71828, -3.]; - /// assert_eq!( - /// format!("{:.2}", data.iter().format(", ")), - /// "1.10, 2.72, -3.00"); - /// ``` - fn format(self, sep: &str) -> Format - where - Self: Sized, - { - format::new_format_default(self, sep) - } - - /// Format all iterator elements, separated by `sep`. - /// - /// This is a customizable version of [`.format()`](Itertools::format). - /// - /// The supplied closure `format` is called once per iterator element, - /// with two arguments: the element and a callback that takes a - /// `&Display` value, i.e. any reference to type that implements `Display`. - /// - /// Using `&format_args!(...)` is the most versatile way to apply custom - /// element formatting. The callback can be called multiple times if needed. - /// - /// **Panics** if the formatter helper is formatted more than once. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = [1.1, 2.71828, -3.]; - /// let data_formatter = data.iter().format_with(", ", |elt, f| f(&format_args!("{:.2}", elt))); - /// assert_eq!(format!("{}", data_formatter), - /// "1.10, 2.72, -3.00"); - /// - /// // .format_with() is recursively composable - /// let matrix = [[1., 2., 3.], - /// [4., 5., 6.]]; - /// let matrix_formatter = matrix.iter().format_with("\n", |row, f| { - /// f(&row.iter().format_with(", ", |elt, g| g(&elt))) - /// }); - /// assert_eq!(format!("{}", matrix_formatter), - /// "1, 2, 3\n4, 5, 6"); - /// - /// - /// ``` - fn format_with(self, sep: &str, format: F) -> FormatWith - where - Self: Sized, - F: FnMut(Self::Item, &mut dyn FnMut(&dyn fmt::Display) -> fmt::Result) -> fmt::Result, - { - format::new_format(self, sep, format) - } - - /// Fold `Result` values from an iterator. - /// - /// Only `Ok` values are folded. If no error is encountered, the folded - /// value is returned inside `Ok`. Otherwise, the operation terminates - /// and returns the first `Err` value it encounters. No iterator elements are - /// consumed after the first error. - /// - /// The first accumulator value is the `start` parameter. - /// Each iteration passes the accumulator value and the next value inside `Ok` - /// to the fold function `f` and its return value becomes the new accumulator value. - /// - /// For example the sequence *Ok(1), Ok(2), Ok(3)* will result in a - /// computation like this: - /// - /// ```no_run - /// # let start = 0; - /// # let f = |x, y| x + y; - /// let mut accum = start; - /// accum = f(accum, 1); - /// accum = f(accum, 2); - /// accum = f(accum, 3); - /// ``` - /// - /// With a `start` value of 0 and an addition as folding function, - /// this effectively results in *((0 + 1) + 2) + 3* - /// - /// ``` - /// use std::ops::Add; - /// use itertools::Itertools; - /// - /// let values = [1, 2, -2, -1, 2, 1]; - /// assert_eq!( - /// values.iter() - /// .map(Ok::<_, ()>) - /// .fold_ok(0, Add::add), - /// Ok(3) - /// ); - /// assert!( - /// values.iter() - /// .map(|&x| if x >= 0 { Ok(x) } else { Err("Negative number") }) - /// .fold_ok(0, Add::add) - /// .is_err() - /// ); - /// ``` - fn fold_ok(&mut self, mut start: B, mut f: F) -> Result - where - Self: Iterator>, - F: FnMut(B, A) -> B, - { - for elt in self { - match elt { - Ok(v) => start = f(start, v), - Err(u) => return Err(u), - } - } - Ok(start) - } - - /// Fold `Option` values from an iterator. - /// - /// Only `Some` values are folded. If no `None` is encountered, the folded - /// value is returned inside `Some`. Otherwise, the operation terminates - /// and returns `None`. No iterator elements are consumed after the `None`. - /// - /// This is the `Option` equivalent to [`fold_ok`](Itertools::fold_ok). - /// - /// ``` - /// use std::ops::Add; - /// use itertools::Itertools; - /// - /// let mut values = vec![Some(1), Some(2), Some(-2)].into_iter(); - /// assert_eq!(values.fold_options(5, Add::add), Some(5 + 1 + 2 - 2)); - /// - /// let mut more_values = vec![Some(2), None, Some(0)].into_iter(); - /// assert!(more_values.fold_options(0, Add::add).is_none()); - /// assert_eq!(more_values.next().unwrap(), Some(0)); - /// ``` - fn fold_options(&mut self, mut start: B, mut f: F) -> Option - where - Self: Iterator>, - F: FnMut(B, A) -> B, - { - for elt in self { - match elt { - Some(v) => start = f(start, v), - None => return None, - } - } - Some(start) - } - - /// Accumulator of the elements in the iterator. - /// - /// Like `.fold()`, without a base case. If the iterator is - /// empty, return `None`. With just one element, return it. - /// Otherwise elements are accumulated in sequence using the closure `f`. - /// - /// ``` - /// use itertools::Itertools; - /// - /// assert_eq!((0..10).fold1(|x, y| x + y).unwrap_or(0), 45); - /// assert_eq!((0..0).fold1(|x, y| x * y), None); - /// ``` - #[deprecated( - note = "Use [`Iterator::reduce`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.reduce) instead", - since = "0.10.2" - )] - fn fold1(mut self, f: F) -> Option - where - F: FnMut(Self::Item, Self::Item) -> Self::Item, - Self: Sized, - { - self.next().map(move |x| self.fold(x, f)) - } - - /// Accumulate the elements in the iterator in a tree-like manner. - /// - /// You can think of it as, while there's more than one item, repeatedly - /// combining adjacent items. It does so in bottom-up-merge-sort order, - /// however, so that it needs only logarithmic stack space. - /// - /// This produces a call tree like the following (where the calls under - /// an item are done after reading that item): - /// - /// ```text - /// 1 2 3 4 5 6 7 - /// │ │ │ │ │ │ │ - /// └─f └─f └─f │ - /// │ │ │ │ - /// └───f └─f - /// │ │ - /// └─────f - /// ``` - /// - /// Which, for non-associative functions, will typically produce a different - /// result than the linear call tree used by [`Iterator::reduce`]: - /// - /// ```text - /// 1 2 3 4 5 6 7 - /// │ │ │ │ │ │ │ - /// └─f─f─f─f─f─f - /// ``` - /// - /// If `f` is associative you should also decide carefully: - /// - /// - if `f` is a trivial operation like `u32::wrapping_add`, prefer the normal - /// [`Iterator::reduce`] instead since it will most likely result in the generation of simpler - /// code because the compiler is able to optimize it - /// - otherwise if `f` is non-trivial like `format!`, you should use `tree_reduce` since it - /// reduces the number of operations from `O(n)` to `O(ln(n))` - /// - /// Here "non-trivial" means: - /// - /// - any allocating operation - /// - any function that is a composition of many operations - /// - /// ``` - /// use itertools::Itertools; - /// - /// // The same tree as above - /// let num_strings = (1..8).map(|x| x.to_string()); - /// assert_eq!(num_strings.tree_reduce(|x, y| format!("f({}, {})", x, y)), - /// Some(String::from("f(f(f(1, 2), f(3, 4)), f(f(5, 6), 7))"))); - /// - /// // Like fold1, an empty iterator produces None - /// assert_eq!((0..0).tree_reduce(|x, y| x * y), None); - /// - /// // tree_reduce matches fold1 for associative operations... - /// assert_eq!((0..10).tree_reduce(|x, y| x + y), - /// (0..10).fold1(|x, y| x + y)); - /// // ...but not for non-associative ones - /// assert_ne!((0..10).tree_reduce(|x, y| x - y), - /// (0..10).fold1(|x, y| x - y)); - /// ``` - fn tree_reduce(mut self, mut f: F) -> Option - where - F: FnMut(Self::Item, Self::Item) -> Self::Item, - Self: Sized, - { - type State = Result>; - - fn inner0(it: &mut II, f: &mut FF) -> State - where - II: Iterator, - FF: FnMut(T, T) -> T, - { - // This function could be replaced with `it.next().ok_or(None)`, - // but half the useful tree_reduce work is combining adjacent items, - // so put that in a form that LLVM is more likely to optimize well. - - let a = if let Some(v) = it.next() { - v - } else { - return Err(None); - }; - let b = if let Some(v) = it.next() { - v - } else { - return Err(Some(a)); - }; - Ok(f(a, b)) - } - - fn inner(stop: usize, it: &mut II, f: &mut FF) -> State - where - II: Iterator, - FF: FnMut(T, T) -> T, - { - let mut x = inner0(it, f)?; - for height in 0..stop { - // Try to get another tree the same size with which to combine it, - // creating a new tree that's twice as big for next time around. - let next = if height == 0 { - inner0(it, f) - } else { - inner(height, it, f) - }; - match next { - Ok(y) => x = f(x, y), - - // If we ran out of items, combine whatever we did manage - // to get. It's better combined with the current value - // than something in a parent frame, because the tree in - // the parent is always as least as big as this one. - Err(None) => return Err(Some(x)), - Err(Some(y)) => return Err(Some(f(x, y))), - } - } - Ok(x) - } - - match inner(usize::MAX, &mut self, &mut f) { - Err(x) => x, - _ => unreachable!(), - } - } - - /// See [`.tree_reduce()`](Itertools::tree_reduce). - #[deprecated(note = "Use .tree_reduce() instead", since = "0.13.0")] - fn tree_fold1(self, f: F) -> Option - where - F: FnMut(Self::Item, Self::Item) -> Self::Item, - Self: Sized, - { - self.tree_reduce(f) - } - - /// An iterator method that applies a function, producing a single, final value. - /// - /// `fold_while()` is basically equivalent to [`Iterator::fold`] but with additional support for - /// early exit via short-circuiting. - /// - /// ``` - /// use itertools::Itertools; - /// use itertools::FoldWhile::{Continue, Done}; - /// - /// let numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; - /// - /// let mut result = 0; - /// - /// // for loop: - /// for i in &numbers { - /// if *i > 5 { - /// break; - /// } - /// result = result + i; - /// } - /// - /// // fold: - /// let result2 = numbers.iter().fold(0, |acc, x| { - /// if *x > 5 { acc } else { acc + x } - /// }); - /// - /// // fold_while: - /// let result3 = numbers.iter().fold_while(0, |acc, x| { - /// if *x > 5 { Done(acc) } else { Continue(acc + x) } - /// }).into_inner(); - /// - /// // they're the same - /// assert_eq!(result, result2); - /// assert_eq!(result2, result3); - /// ``` - /// - /// The big difference between the computations of `result2` and `result3` is that while - /// `fold()` called the provided closure for every item of the callee iterator, - /// `fold_while()` actually stopped iterating as soon as it encountered `Fold::Done(_)`. - fn fold_while(&mut self, init: B, mut f: F) -> FoldWhile - where - Self: Sized, - F: FnMut(B, Self::Item) -> FoldWhile, - { - use Result::{Err as Break, Ok as Continue}; - - let result = self.try_fold( - init, - #[inline(always)] - |acc, v| match f(acc, v) { - FoldWhile::Continue(acc) => Continue(acc), - FoldWhile::Done(acc) => Break(acc), - }, - ); - - match result { - Continue(acc) => FoldWhile::Continue(acc), - Break(acc) => FoldWhile::Done(acc), - } - } - - /// Iterate over the entire iterator and add all the elements. - /// - /// An empty iterator returns `None`, otherwise `Some(sum)`. - /// - /// # Panics - /// - /// When calling `sum1()` and a primitive integer type is being returned, this - /// method will panic if the computation overflows and debug assertions are - /// enabled. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// - /// let empty_sum = (1..1).sum1::(); - /// assert_eq!(empty_sum, None); - /// - /// let nonempty_sum = (1..11).sum1::(); - /// assert_eq!(nonempty_sum, Some(55)); - /// ``` - fn sum1(mut self) -> Option - where - Self: Sized, - S: std::iter::Sum, - { - self.next().map(|first| once(first).chain(self).sum()) - } - - /// Iterate over the entire iterator and multiply all the elements. - /// - /// An empty iterator returns `None`, otherwise `Some(product)`. - /// - /// # Panics - /// - /// When calling `product1()` and a primitive integer type is being returned, - /// method will panic if the computation overflows and debug assertions are - /// enabled. - /// - /// # Examples - /// ``` - /// use itertools::Itertools; - /// - /// let empty_product = (1..1).product1::(); - /// assert_eq!(empty_product, None); - /// - /// let nonempty_product = (1..11).product1::(); - /// assert_eq!(nonempty_product, Some(3628800)); - /// ``` - fn product1

(mut self) -> Option

- where - Self: Sized, - P: std::iter::Product, - { - self.next().map(|first| once(first).chain(self).product()) - } - - /// Sort all iterator elements into a new iterator in ascending order. - /// - /// **Note:** This consumes the entire iterator, uses the - /// [`slice::sort_unstable`] method and returns the result as a new - /// iterator that owns its elements. - /// - /// This sort is unstable (i.e., may reorder equal elements). - /// - /// The sorted iterator, if directly collected to a `Vec`, is converted - /// without any extra copying or allocation cost. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // sort the letters of the text in ascending order - /// let text = "bdacfe"; - /// itertools::assert_equal(text.chars().sorted_unstable(), - /// "abcdef".chars()); - /// ``` - #[cfg(feature = "use_alloc")] - fn sorted_unstable(self) -> VecIntoIter - where - Self: Sized, - Self::Item: Ord, - { - // Use .sort_unstable() directly since it is not quite identical with - // .sort_by(Ord::cmp) - let mut v = Vec::from_iter(self); - v.sort_unstable(); - v.into_iter() - } - - /// Sort all iterator elements into a new iterator in ascending order. - /// - /// **Note:** This consumes the entire iterator, uses the - /// [`slice::sort_unstable_by`] method and returns the result as a new - /// iterator that owns its elements. - /// - /// This sort is unstable (i.e., may reorder equal elements). - /// - /// The sorted iterator, if directly collected to a `Vec`, is converted - /// without any extra copying or allocation cost. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // sort people in descending order by age - /// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 27)]; - /// - /// let oldest_people_first = people - /// .into_iter() - /// .sorted_unstable_by(|a, b| Ord::cmp(&b.1, &a.1)) - /// .map(|(person, _age)| person); - /// - /// itertools::assert_equal(oldest_people_first, - /// vec!["Jill", "Jack", "Jane", "John"]); - /// ``` - #[cfg(feature = "use_alloc")] - fn sorted_unstable_by(self, cmp: F) -> VecIntoIter - where - Self: Sized, - F: FnMut(&Self::Item, &Self::Item) -> Ordering, - { - let mut v = Vec::from_iter(self); - v.sort_unstable_by(cmp); - v.into_iter() - } - - /// Sort all iterator elements into a new iterator in ascending order. - /// - /// **Note:** This consumes the entire iterator, uses the - /// [`slice::sort_unstable_by_key`] method and returns the result as a new - /// iterator that owns its elements. - /// - /// This sort is unstable (i.e., may reorder equal elements). - /// - /// The sorted iterator, if directly collected to a `Vec`, is converted - /// without any extra copying or allocation cost. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // sort people in descending order by age - /// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 27)]; - /// - /// let oldest_people_first = people - /// .into_iter() - /// .sorted_unstable_by_key(|x| -x.1) - /// .map(|(person, _age)| person); - /// - /// itertools::assert_equal(oldest_people_first, - /// vec!["Jill", "Jack", "Jane", "John"]); - /// ``` - #[cfg(feature = "use_alloc")] - fn sorted_unstable_by_key(self, f: F) -> VecIntoIter - where - Self: Sized, - K: Ord, - F: FnMut(&Self::Item) -> K, - { - let mut v = Vec::from_iter(self); - v.sort_unstable_by_key(f); - v.into_iter() - } - - /// Sort all iterator elements into a new iterator in ascending order. - /// - /// **Note:** This consumes the entire iterator, uses the - /// [`slice::sort`] method and returns the result as a new - /// iterator that owns its elements. - /// - /// This sort is stable (i.e., does not reorder equal elements). - /// - /// The sorted iterator, if directly collected to a `Vec`, is converted - /// without any extra copying or allocation cost. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // sort the letters of the text in ascending order - /// let text = "bdacfe"; - /// itertools::assert_equal(text.chars().sorted(), - /// "abcdef".chars()); - /// ``` - #[cfg(feature = "use_alloc")] - fn sorted(self) -> VecIntoIter - where - Self: Sized, - Self::Item: Ord, - { - // Use .sort() directly since it is not quite identical with - // .sort_by(Ord::cmp) - let mut v = Vec::from_iter(self); - v.sort(); - v.into_iter() - } - - /// Sort all iterator elements into a new iterator in ascending order. - /// - /// **Note:** This consumes the entire iterator, uses the - /// [`slice::sort_by`] method and returns the result as a new - /// iterator that owns its elements. - /// - /// This sort is stable (i.e., does not reorder equal elements). - /// - /// The sorted iterator, if directly collected to a `Vec`, is converted - /// without any extra copying or allocation cost. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // sort people in descending order by age - /// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 30)]; - /// - /// let oldest_people_first = people - /// .into_iter() - /// .sorted_by(|a, b| Ord::cmp(&b.1, &a.1)) - /// .map(|(person, _age)| person); - /// - /// itertools::assert_equal(oldest_people_first, - /// vec!["Jill", "Jack", "Jane", "John"]); - /// ``` - #[cfg(feature = "use_alloc")] - fn sorted_by(self, cmp: F) -> VecIntoIter - where - Self: Sized, - F: FnMut(&Self::Item, &Self::Item) -> Ordering, - { - let mut v = Vec::from_iter(self); - v.sort_by(cmp); - v.into_iter() - } - - /// Sort all iterator elements into a new iterator in ascending order. - /// - /// **Note:** This consumes the entire iterator, uses the - /// [`slice::sort_by_key`] method and returns the result as a new - /// iterator that owns its elements. - /// - /// This sort is stable (i.e., does not reorder equal elements). - /// - /// The sorted iterator, if directly collected to a `Vec`, is converted - /// without any extra copying or allocation cost. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // sort people in descending order by age - /// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 30)]; - /// - /// let oldest_people_first = people - /// .into_iter() - /// .sorted_by_key(|x| -x.1) - /// .map(|(person, _age)| person); - /// - /// itertools::assert_equal(oldest_people_first, - /// vec!["Jill", "Jack", "Jane", "John"]); - /// ``` - #[cfg(feature = "use_alloc")] - fn sorted_by_key(self, f: F) -> VecIntoIter - where - Self: Sized, - K: Ord, - F: FnMut(&Self::Item) -> K, - { - let mut v = Vec::from_iter(self); - v.sort_by_key(f); - v.into_iter() - } - - /// Sort all iterator elements into a new iterator in ascending order. The key function is - /// called exactly once per key. - /// - /// **Note:** This consumes the entire iterator, uses the - /// [`slice::sort_by_cached_key`] method and returns the result as a new - /// iterator that owns its elements. - /// - /// This sort is stable (i.e., does not reorder equal elements). - /// - /// The sorted iterator, if directly collected to a `Vec`, is converted - /// without any extra copying or allocation cost. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // sort people in descending order by age - /// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 30)]; - /// - /// let oldest_people_first = people - /// .into_iter() - /// .sorted_by_cached_key(|x| -x.1) - /// .map(|(person, _age)| person); - /// - /// itertools::assert_equal(oldest_people_first, - /// vec!["Jill", "Jack", "Jane", "John"]); - /// ``` - #[cfg(feature = "use_alloc")] - fn sorted_by_cached_key(self, f: F) -> VecIntoIter - where - Self: Sized, - K: Ord, - F: FnMut(&Self::Item) -> K, - { - let mut v = Vec::from_iter(self); - v.sort_by_cached_key(f); - v.into_iter() - } - - /// Sort the k smallest elements into a new iterator, in ascending order. - /// - /// **Note:** This consumes the entire iterator, and returns the result - /// as a new iterator that owns its elements. If the input contains - /// less than k elements, the result is equivalent to `self.sorted()`. - /// - /// This is guaranteed to use `k * sizeof(Self::Item) + O(1)` memory - /// and `O(n log k)` time, with `n` the number of elements in the input. - /// - /// The sorted iterator, if directly collected to a `Vec`, is converted - /// without any extra copying or allocation cost. - /// - /// **Note:** This is functionally-equivalent to `self.sorted().take(k)` - /// but much more efficient. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // A random permutation of 0..15 - /// let numbers = vec![6, 9, 1, 14, 0, 4, 8, 7, 11, 2, 10, 3, 13, 12, 5]; - /// - /// let five_smallest = numbers - /// .into_iter() - /// .k_smallest(5); - /// - /// itertools::assert_equal(five_smallest, 0..5); - /// ``` - #[cfg(feature = "use_alloc")] - fn k_smallest(self, k: usize) -> VecIntoIter - where - Self: Sized, - Self::Item: Ord, - { - // The stdlib heap has optimised handling of "holes", which is not included in our heap implementation in k_smallest_general. - // While the difference is unlikely to have practical impact unless `Self::Item` is very large, this method uses the stdlib structure - // to maintain performance compared to previous versions of the crate. - use alloc::collections::BinaryHeap; - - if k == 0 { - self.last(); - return Vec::new().into_iter(); - } - if k == 1 { - return self.min().into_iter().collect_vec().into_iter(); - } - - let mut iter = self.fuse(); - let mut heap: BinaryHeap<_> = iter.by_ref().take(k).collect(); - - iter.for_each(|i| { - debug_assert_eq!(heap.len(), k); - // Equivalent to heap.push(min(i, heap.pop())) but more efficient. - // This should be done with a single `.peek_mut().unwrap()` but - // `PeekMut` sifts-down unconditionally on Rust 1.46.0 and prior. - if *heap.peek().unwrap() > i { - *heap.peek_mut().unwrap() = i; - } - }); - - heap.into_sorted_vec().into_iter() - } - - /// Sort the k smallest elements into a new iterator using the provided comparison. - /// - /// The sorted iterator, if directly collected to a `Vec`, is converted - /// without any extra copying or allocation cost. - /// - /// This corresponds to `self.sorted_by(cmp).take(k)` in the same way that - /// [`k_smallest`](Itertools::k_smallest) corresponds to `self.sorted().take(k)`, - /// in both semantics and complexity. - /// - /// Particularly, a custom heap implementation ensures the comparison is not cloned. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // A random permutation of 0..15 - /// let numbers = vec![6, 9, 1, 14, 0, 4, 8, 7, 11, 2, 10, 3, 13, 12, 5]; - /// - /// let five_smallest = numbers - /// .into_iter() - /// .k_smallest_by(5, |a, b| (a % 7).cmp(&(b % 7)).then(a.cmp(b))); - /// - /// itertools::assert_equal(five_smallest, vec![0, 7, 14, 1, 8]); - /// ``` - #[cfg(feature = "use_alloc")] - fn k_smallest_by(self, k: usize, cmp: F) -> VecIntoIter - where - Self: Sized, - F: FnMut(&Self::Item, &Self::Item) -> Ordering, - { - k_smallest::k_smallest_general(self, k, cmp).into_iter() - } - - /// Return the elements producing the k smallest outputs of the provided function. - /// - /// The sorted iterator, if directly collected to a `Vec`, is converted - /// without any extra copying or allocation cost. - /// - /// This corresponds to `self.sorted_by_key(key).take(k)` in the same way that - /// [`k_smallest`](Itertools::k_smallest) corresponds to `self.sorted().take(k)`, - /// in both semantics and complexity. - /// - /// Particularly, a custom heap implementation ensures the comparison is not cloned. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // A random permutation of 0..15 - /// let numbers = vec![6, 9, 1, 14, 0, 4, 8, 7, 11, 2, 10, 3, 13, 12, 5]; - /// - /// let five_smallest = numbers - /// .into_iter() - /// .k_smallest_by_key(5, |n| (n % 7, *n)); - /// - /// itertools::assert_equal(five_smallest, vec![0, 7, 14, 1, 8]); - /// ``` - #[cfg(feature = "use_alloc")] - fn k_smallest_by_key(self, k: usize, key: F) -> VecIntoIter - where - Self: Sized, - F: FnMut(&Self::Item) -> K, - K: Ord, - { - self.k_smallest_by(k, k_smallest::key_to_cmp(key)) - } - - /// Sort the k largest elements into a new iterator, in descending order. - /// - /// The sorted iterator, if directly collected to a `Vec`, is converted - /// without any extra copying or allocation cost. - /// - /// It is semantically equivalent to [`k_smallest`](Itertools::k_smallest) - /// with a reversed `Ord`. - /// However, this is implemented with a custom binary heap which does not - /// have the same performance characteristics for very large `Self::Item`. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // A random permutation of 0..15 - /// let numbers = vec![6, 9, 1, 14, 0, 4, 8, 7, 11, 2, 10, 3, 13, 12, 5]; - /// - /// let five_largest = numbers - /// .into_iter() - /// .k_largest(5); - /// - /// itertools::assert_equal(five_largest, vec![14, 13, 12, 11, 10]); - /// ``` - #[cfg(feature = "use_alloc")] - fn k_largest(self, k: usize) -> VecIntoIter - where - Self: Sized, - Self::Item: Ord, - { - self.k_largest_by(k, Self::Item::cmp) - } - - /// Sort the k largest elements into a new iterator using the provided comparison. - /// - /// The sorted iterator, if directly collected to a `Vec`, is converted - /// without any extra copying or allocation cost. - /// - /// Functionally equivalent to [`k_smallest_by`](Itertools::k_smallest_by) - /// with a reversed `Ord`. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // A random permutation of 0..15 - /// let numbers = vec![6, 9, 1, 14, 0, 4, 8, 7, 11, 2, 10, 3, 13, 12, 5]; - /// - /// let five_largest = numbers - /// .into_iter() - /// .k_largest_by(5, |a, b| (a % 7).cmp(&(b % 7)).then(a.cmp(b))); - /// - /// itertools::assert_equal(five_largest, vec![13, 6, 12, 5, 11]); - /// ``` - #[cfg(feature = "use_alloc")] - fn k_largest_by(self, k: usize, mut cmp: F) -> VecIntoIter - where - Self: Sized, - F: FnMut(&Self::Item, &Self::Item) -> Ordering, - { - self.k_smallest_by(k, move |a, b| cmp(b, a)) - } - - /// Return the elements producing the k largest outputs of the provided function. - /// - /// The sorted iterator, if directly collected to a `Vec`, is converted - /// without any extra copying or allocation cost. - /// - /// Functionally equivalent to [`k_smallest_by_key`](Itertools::k_smallest_by_key) - /// with a reversed `Ord`. - /// - /// ``` - /// use itertools::Itertools; - /// - /// // A random permutation of 0..15 - /// let numbers = vec![6, 9, 1, 14, 0, 4, 8, 7, 11, 2, 10, 3, 13, 12, 5]; - /// - /// let five_largest = numbers - /// .into_iter() - /// .k_largest_by_key(5, |n| (n % 7, *n)); - /// - /// itertools::assert_equal(five_largest, vec![13, 6, 12, 5, 11]); - /// ``` - #[cfg(feature = "use_alloc")] - fn k_largest_by_key(self, k: usize, key: F) -> VecIntoIter - where - Self: Sized, - F: FnMut(&Self::Item) -> K, - K: Ord, - { - self.k_largest_by(k, k_smallest::key_to_cmp(key)) - } - - /// Consumes the iterator and return an iterator of the last `n` elements. - /// - /// The iterator, if directly collected to a `VecDeque`, is converted - /// without any extra copying or allocation cost. - /// If directly collected to a `Vec`, it may need some data movement - /// but no re-allocation. - /// - /// ``` - /// use itertools::{assert_equal, Itertools}; - /// - /// let v = vec![5, 9, 8, 4, 2, 12, 0]; - /// assert_equal(v.iter().tail(3), &[2, 12, 0]); - /// assert_equal(v.iter().tail(10), &v); - /// - /// assert_equal(v.iter().tail(1), v.iter().last()); - /// - /// assert_equal((0..100).tail(10), 90..100); - /// - /// assert_equal((0..100).filter(|x| x % 3 == 0).tail(10), (72..100).step_by(3)); - /// ``` - /// - /// For double ended iterators without side-effects, you might prefer - /// `.rev().take(n).rev()` to have a similar result (lazy and non-allocating) - /// without consuming the entire iterator. - #[cfg(feature = "use_alloc")] - fn tail(self, n: usize) -> VecDequeIntoIter - where - Self: Sized, - { - match n { - 0 => { - self.last(); - VecDeque::new() - } - 1 => self.last().into_iter().collect(), - _ => { - // Skip the starting part of the iterator if possible. - let (low, _) = self.size_hint(); - let mut iter = self.fuse().skip(low.saturating_sub(n)); - // TODO: If VecDeque has a more efficient method than - // `.pop_front();.push_back(val)` in the future then maybe revisit this. - let mut data: Vec<_> = iter.by_ref().take(n).collect(); - // Update `data` cyclically. - let idx = iter.fold(0, |i, val| { - debug_assert_eq!(data.len(), n); - data[i] = val; - if i + 1 == n { - 0 - } else { - i + 1 - } - }); - // Respect the insertion order, efficiently. - let mut data = VecDeque::from(data); - data.rotate_left(idx); - data - } - } - .into_iter() - } - - /// Collect all iterator elements into one of two - /// partitions. Unlike [`Iterator::partition`], each partition may - /// have a distinct type. - /// - /// ``` - /// use itertools::{Itertools, Either}; - /// - /// let successes_and_failures = vec![Ok(1), Err(false), Err(true), Ok(2)]; - /// - /// let (successes, failures): (Vec<_>, Vec<_>) = successes_and_failures - /// .into_iter() - /// .partition_map(|r| { - /// match r { - /// Ok(v) => Either::Left(v), - /// Err(v) => Either::Right(v), - /// } - /// }); - /// - /// assert_eq!(successes, [1, 2]); - /// assert_eq!(failures, [false, true]); - /// ``` - fn partition_map(self, mut predicate: F) -> (A, B) - where - Self: Sized, - F: FnMut(Self::Item) -> Either, - A: Default + Extend, - B: Default + Extend, - { - let mut left = A::default(); - let mut right = B::default(); - - self.for_each(|val| match predicate(val) { - Either::Left(v) => left.extend(Some(v)), - Either::Right(v) => right.extend(Some(v)), - }); - - (left, right) - } - - /// Partition a sequence of `Result`s into one list of all the `Ok` elements - /// and another list of all the `Err` elements. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let successes_and_failures = vec![Ok(1), Err(false), Err(true), Ok(2)]; - /// - /// let (successes, failures): (Vec<_>, Vec<_>) = successes_and_failures - /// .into_iter() - /// .partition_result(); - /// - /// assert_eq!(successes, [1, 2]); - /// assert_eq!(failures, [false, true]); - /// ``` - fn partition_result(self) -> (A, B) - where - Self: Iterator> + Sized, - A: Default + Extend, - B: Default + Extend, - { - self.partition_map(|r| match r { - Ok(v) => Either::Left(v), - Err(v) => Either::Right(v), - }) - } - - /// Return a `HashMap` of keys mapped to `Vec`s of values. Keys and values - /// are taken from `(Key, Value)` tuple pairs yielded by the input iterator. - /// - /// Essentially a shorthand for `.into_grouping_map().collect::>()`. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let data = vec![(0, 10), (2, 12), (3, 13), (0, 20), (3, 33), (2, 42)]; - /// let lookup = data.into_iter().into_group_map(); - /// - /// assert_eq!(lookup[&0], vec![10, 20]); - /// assert_eq!(lookup.get(&1), None); - /// assert_eq!(lookup[&2], vec![12, 42]); - /// assert_eq!(lookup[&3], vec![13, 33]); - /// ``` - #[cfg(feature = "use_std")] - fn into_group_map(self) -> HashMap> - where - Self: Iterator + Sized, - K: Hash + Eq, - { - group_map::into_group_map(self) - } - - /// Return an `Iterator` on a `HashMap`. Keys mapped to `Vec`s of values. The key is specified - /// in the closure. - /// - /// Essentially a shorthand for `.into_grouping_map_by(f).collect::>()`. - /// - /// ``` - /// use itertools::Itertools; - /// use std::collections::HashMap; - /// - /// let data = vec![(0, 10), (2, 12), (3, 13), (0, 20), (3, 33), (2, 42)]; - /// let lookup: HashMap> = - /// data.clone().into_iter().into_group_map_by(|a| a.0); - /// - /// assert_eq!(lookup[&0], vec![(0,10),(0,20)]); - /// assert_eq!(lookup.get(&1), None); - /// assert_eq!(lookup[&2], vec![(2,12), (2,42)]); - /// assert_eq!(lookup[&3], vec![(3,13), (3,33)]); - /// - /// assert_eq!( - /// data.into_iter() - /// .into_group_map_by(|x| x.0) - /// .into_iter() - /// .map(|(key, values)| (key, values.into_iter().fold(0,|acc, (_,v)| acc + v ))) - /// .collect::>()[&0], - /// 30, - /// ); - /// ``` - #[cfg(feature = "use_std")] - fn into_group_map_by(self, f: F) -> HashMap> - where - Self: Iterator + Sized, - K: Hash + Eq, - F: FnMut(&V) -> K, - { - group_map::into_group_map_by(self, f) - } - - /// Constructs a `GroupingMap` to be used later with one of the efficient - /// group-and-fold operations it allows to perform. - /// - /// The input iterator must yield item in the form of `(K, V)` where the - /// value of type `K` will be used as key to identify the groups and the - /// value of type `V` as value for the folding operation. - /// - /// See [`GroupingMap`] for more informations - /// on what operations are available. - #[cfg(feature = "use_std")] - fn into_grouping_map(self) -> GroupingMap - where - Self: Iterator + Sized, - K: Hash + Eq, - { - grouping_map::new(self) - } - - /// Constructs a `GroupingMap` to be used later with one of the efficient - /// group-and-fold operations it allows to perform. - /// - /// The values from this iterator will be used as values for the folding operation - /// while the keys will be obtained from the values by calling `key_mapper`. - /// - /// See [`GroupingMap`] for more informations - /// on what operations are available. - #[cfg(feature = "use_std")] - fn into_grouping_map_by(self, key_mapper: F) -> GroupingMapBy - where - Self: Iterator + Sized, - K: Hash + Eq, - F: FnMut(&V) -> K, - { - grouping_map::new(grouping_map::new_map_for_grouping(self, key_mapper)) - } - - /// Return all minimum elements of an iterator. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// - /// let a: [i32; 0] = []; - /// assert_eq!(a.iter().min_set(), Vec::<&i32>::new()); - /// - /// let a = [1]; - /// assert_eq!(a.iter().min_set(), vec![&1]); - /// - /// let a = [1, 2, 3, 4, 5]; - /// assert_eq!(a.iter().min_set(), vec![&1]); - /// - /// let a = [1, 1, 1, 1]; - /// assert_eq!(a.iter().min_set(), vec![&1, &1, &1, &1]); - /// ``` - /// - /// The elements can be floats but no particular result is guaranteed - /// if an element is NaN. - #[cfg(feature = "use_alloc")] - fn min_set(self) -> Vec - where - Self: Sized, - Self::Item: Ord, - { - extrema_set::min_set_impl(self, |_| (), |x, y, _, _| x.cmp(y)) - } - - /// Return all minimum elements of an iterator, as determined by - /// the specified function. - /// - /// # Examples - /// - /// ``` - /// # use std::cmp::Ordering; - /// use itertools::Itertools; - /// - /// let a: [(i32, i32); 0] = []; - /// assert_eq!(a.iter().min_set_by(|_, _| Ordering::Equal), Vec::<&(i32, i32)>::new()); - /// - /// let a = [(1, 2)]; - /// assert_eq!(a.iter().min_set_by(|&&(k1,_), &&(k2, _)| k1.cmp(&k2)), vec![&(1, 2)]); - /// - /// let a = [(1, 2), (2, 2), (3, 9), (4, 8), (5, 9)]; - /// assert_eq!(a.iter().min_set_by(|&&(_,k1), &&(_,k2)| k1.cmp(&k2)), vec![&(1, 2), &(2, 2)]); - /// - /// let a = [(1, 2), (1, 3), (1, 4), (1, 5)]; - /// assert_eq!(a.iter().min_set_by(|&&(k1,_), &&(k2, _)| k1.cmp(&k2)), vec![&(1, 2), &(1, 3), &(1, 4), &(1, 5)]); - /// ``` - /// - /// The elements can be floats but no particular result is guaranteed - /// if an element is NaN. - #[cfg(feature = "use_alloc")] - fn min_set_by(self, mut compare: F) -> Vec - where - Self: Sized, - F: FnMut(&Self::Item, &Self::Item) -> Ordering, - { - extrema_set::min_set_impl(self, |_| (), |x, y, _, _| compare(x, y)) - } - - /// Return all minimum elements of an iterator, as determined by - /// the specified function. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// - /// let a: [(i32, i32); 0] = []; - /// assert_eq!(a.iter().min_set_by_key(|_| ()), Vec::<&(i32, i32)>::new()); - /// - /// let a = [(1, 2)]; - /// assert_eq!(a.iter().min_set_by_key(|&&(k,_)| k), vec![&(1, 2)]); - /// - /// let a = [(1, 2), (2, 2), (3, 9), (4, 8), (5, 9)]; - /// assert_eq!(a.iter().min_set_by_key(|&&(_, k)| k), vec![&(1, 2), &(2, 2)]); - /// - /// let a = [(1, 2), (1, 3), (1, 4), (1, 5)]; - /// assert_eq!(a.iter().min_set_by_key(|&&(k, _)| k), vec![&(1, 2), &(1, 3), &(1, 4), &(1, 5)]); - /// ``` - /// - /// The elements can be floats but no particular result is guaranteed - /// if an element is NaN. - #[cfg(feature = "use_alloc")] - fn min_set_by_key(self, key: F) -> Vec - where - Self: Sized, - K: Ord, - F: FnMut(&Self::Item) -> K, - { - extrema_set::min_set_impl(self, key, |_, _, kx, ky| kx.cmp(ky)) - } - - /// Return all maximum elements of an iterator. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// - /// let a: [i32; 0] = []; - /// assert_eq!(a.iter().max_set(), Vec::<&i32>::new()); - /// - /// let a = [1]; - /// assert_eq!(a.iter().max_set(), vec![&1]); - /// - /// let a = [1, 2, 3, 4, 5]; - /// assert_eq!(a.iter().max_set(), vec![&5]); - /// - /// let a = [1, 1, 1, 1]; - /// assert_eq!(a.iter().max_set(), vec![&1, &1, &1, &1]); - /// ``` - /// - /// The elements can be floats but no particular result is guaranteed - /// if an element is NaN. - #[cfg(feature = "use_alloc")] - fn max_set(self) -> Vec - where - Self: Sized, - Self::Item: Ord, - { - extrema_set::max_set_impl(self, |_| (), |x, y, _, _| x.cmp(y)) - } - - /// Return all maximum elements of an iterator, as determined by - /// the specified function. - /// - /// # Examples - /// - /// ``` - /// # use std::cmp::Ordering; - /// use itertools::Itertools; - /// - /// let a: [(i32, i32); 0] = []; - /// assert_eq!(a.iter().max_set_by(|_, _| Ordering::Equal), Vec::<&(i32, i32)>::new()); - /// - /// let a = [(1, 2)]; - /// assert_eq!(a.iter().max_set_by(|&&(k1,_), &&(k2, _)| k1.cmp(&k2)), vec![&(1, 2)]); - /// - /// let a = [(1, 2), (2, 2), (3, 9), (4, 8), (5, 9)]; - /// assert_eq!(a.iter().max_set_by(|&&(_,k1), &&(_,k2)| k1.cmp(&k2)), vec![&(3, 9), &(5, 9)]); - /// - /// let a = [(1, 2), (1, 3), (1, 4), (1, 5)]; - /// assert_eq!(a.iter().max_set_by(|&&(k1,_), &&(k2, _)| k1.cmp(&k2)), vec![&(1, 2), &(1, 3), &(1, 4), &(1, 5)]); - /// ``` - /// - /// The elements can be floats but no particular result is guaranteed - /// if an element is NaN. - #[cfg(feature = "use_alloc")] - fn max_set_by(self, mut compare: F) -> Vec - where - Self: Sized, - F: FnMut(&Self::Item, &Self::Item) -> Ordering, - { - extrema_set::max_set_impl(self, |_| (), |x, y, _, _| compare(x, y)) - } - - /// Return all maximum elements of an iterator, as determined by - /// the specified function. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// - /// let a: [(i32, i32); 0] = []; - /// assert_eq!(a.iter().max_set_by_key(|_| ()), Vec::<&(i32, i32)>::new()); - /// - /// let a = [(1, 2)]; - /// assert_eq!(a.iter().max_set_by_key(|&&(k,_)| k), vec![&(1, 2)]); - /// - /// let a = [(1, 2), (2, 2), (3, 9), (4, 8), (5, 9)]; - /// assert_eq!(a.iter().max_set_by_key(|&&(_, k)| k), vec![&(3, 9), &(5, 9)]); - /// - /// let a = [(1, 2), (1, 3), (1, 4), (1, 5)]; - /// assert_eq!(a.iter().max_set_by_key(|&&(k, _)| k), vec![&(1, 2), &(1, 3), &(1, 4), &(1, 5)]); - /// ``` - /// - /// The elements can be floats but no particular result is guaranteed - /// if an element is NaN. - #[cfg(feature = "use_alloc")] - fn max_set_by_key(self, key: F) -> Vec - where - Self: Sized, - K: Ord, - F: FnMut(&Self::Item) -> K, - { - extrema_set::max_set_impl(self, key, |_, _, kx, ky| kx.cmp(ky)) - } - - /// Return the minimum and maximum elements in the iterator. - /// - /// The return type `MinMaxResult` is an enum of three variants: - /// - /// - `NoElements` if the iterator is empty. - /// - `OneElement(x)` if the iterator has exactly one element. - /// - `MinMax(x, y)` is returned otherwise, where `x <= y`. Two - /// values are equal if and only if there is more than one - /// element in the iterator and all elements are equal. - /// - /// On an iterator of length `n`, `minmax` does `1.5 * n` comparisons, - /// and so is faster than calling `min` and `max` separately which does - /// `2 * n` comparisons. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// use itertools::MinMaxResult::{NoElements, OneElement, MinMax}; - /// - /// let a: [i32; 0] = []; - /// assert_eq!(a.iter().minmax(), NoElements); - /// - /// let a = [1]; - /// assert_eq!(a.iter().minmax(), OneElement(&1)); - /// - /// let a = [1, 2, 3, 4, 5]; - /// assert_eq!(a.iter().minmax(), MinMax(&1, &5)); - /// - /// let a = [1, 1, 1, 1]; - /// assert_eq!(a.iter().minmax(), MinMax(&1, &1)); - /// ``` - /// - /// The elements can be floats but no particular result is guaranteed - /// if an element is NaN. - fn minmax(self) -> MinMaxResult - where - Self: Sized, - Self::Item: PartialOrd, - { - minmax::minmax_impl(self, |_| (), |x, y, _, _| x < y) - } - - /// Return the minimum and maximum element of an iterator, as determined by - /// the specified function. - /// - /// The return value is a variant of [`MinMaxResult`] like for [`.minmax()`](Itertools::minmax). - /// - /// For the minimum, the first minimal element is returned. For the maximum, - /// the last maximal element wins. This matches the behavior of the standard - /// [`Iterator::min`] and [`Iterator::max`] methods. - /// - /// The keys can be floats but no particular result is guaranteed - /// if a key is NaN. - fn minmax_by_key(self, key: F) -> MinMaxResult - where - Self: Sized, - K: PartialOrd, - F: FnMut(&Self::Item) -> K, - { - minmax::minmax_impl(self, key, |_, _, xk, yk| xk < yk) - } - - /// Return the minimum and maximum element of an iterator, as determined by - /// the specified comparison function. - /// - /// The return value is a variant of [`MinMaxResult`] like for [`.minmax()`](Itertools::minmax). - /// - /// For the minimum, the first minimal element is returned. For the maximum, - /// the last maximal element wins. This matches the behavior of the standard - /// [`Iterator::min`] and [`Iterator::max`] methods. - fn minmax_by(self, mut compare: F) -> MinMaxResult - where - Self: Sized, - F: FnMut(&Self::Item, &Self::Item) -> Ordering, - { - minmax::minmax_impl(self, |_| (), |x, y, _, _| Ordering::Less == compare(x, y)) - } - - /// Return the position of the maximum element in the iterator. - /// - /// If several elements are equally maximum, the position of the - /// last of them is returned. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// - /// let a: [i32; 0] = []; - /// assert_eq!(a.iter().position_max(), None); - /// - /// let a = [-3, 0, 1, 5, -10]; - /// assert_eq!(a.iter().position_max(), Some(3)); - /// - /// let a = [1, 1, -1, -1]; - /// assert_eq!(a.iter().position_max(), Some(1)); - /// ``` - fn position_max(self) -> Option - where - Self: Sized, - Self::Item: Ord, - { - self.enumerate() - .max_by(|x, y| Ord::cmp(&x.1, &y.1)) - .map(|x| x.0) - } - - /// Return the position of the maximum element in the iterator, as - /// determined by the specified function. - /// - /// If several elements are equally maximum, the position of the - /// last of them is returned. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// - /// let a: [i32; 0] = []; - /// assert_eq!(a.iter().position_max_by_key(|x| x.abs()), None); - /// - /// let a = [-3_i32, 0, 1, 5, -10]; - /// assert_eq!(a.iter().position_max_by_key(|x| x.abs()), Some(4)); - /// - /// let a = [1_i32, 1, -1, -1]; - /// assert_eq!(a.iter().position_max_by_key(|x| x.abs()), Some(3)); - /// ``` - fn position_max_by_key(self, mut key: F) -> Option - where - Self: Sized, - K: Ord, - F: FnMut(&Self::Item) -> K, - { - self.enumerate() - .max_by(|x, y| Ord::cmp(&key(&x.1), &key(&y.1))) - .map(|x| x.0) - } - - /// Return the position of the maximum element in the iterator, as - /// determined by the specified comparison function. - /// - /// If several elements are equally maximum, the position of the - /// last of them is returned. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// - /// let a: [i32; 0] = []; - /// assert_eq!(a.iter().position_max_by(|x, y| x.cmp(y)), None); - /// - /// let a = [-3_i32, 0, 1, 5, -10]; - /// assert_eq!(a.iter().position_max_by(|x, y| x.cmp(y)), Some(3)); - /// - /// let a = [1_i32, 1, -1, -1]; - /// assert_eq!(a.iter().position_max_by(|x, y| x.cmp(y)), Some(1)); - /// ``` - fn position_max_by(self, mut compare: F) -> Option - where - Self: Sized, - F: FnMut(&Self::Item, &Self::Item) -> Ordering, - { - self.enumerate() - .max_by(|x, y| compare(&x.1, &y.1)) - .map(|x| x.0) - } - - /// Return the position of the minimum element in the iterator. - /// - /// If several elements are equally minimum, the position of the - /// first of them is returned. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// - /// let a: [i32; 0] = []; - /// assert_eq!(a.iter().position_min(), None); - /// - /// let a = [-3, 0, 1, 5, -10]; - /// assert_eq!(a.iter().position_min(), Some(4)); - /// - /// let a = [1, 1, -1, -1]; - /// assert_eq!(a.iter().position_min(), Some(2)); - /// ``` - fn position_min(self) -> Option - where - Self: Sized, - Self::Item: Ord, - { - self.enumerate() - .min_by(|x, y| Ord::cmp(&x.1, &y.1)) - .map(|x| x.0) - } - - /// Return the position of the minimum element in the iterator, as - /// determined by the specified function. - /// - /// If several elements are equally minimum, the position of the - /// first of them is returned. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// - /// let a: [i32; 0] = []; - /// assert_eq!(a.iter().position_min_by_key(|x| x.abs()), None); - /// - /// let a = [-3_i32, 0, 1, 5, -10]; - /// assert_eq!(a.iter().position_min_by_key(|x| x.abs()), Some(1)); - /// - /// let a = [1_i32, 1, -1, -1]; - /// assert_eq!(a.iter().position_min_by_key(|x| x.abs()), Some(0)); - /// ``` - fn position_min_by_key(self, mut key: F) -> Option - where - Self: Sized, - K: Ord, - F: FnMut(&Self::Item) -> K, - { - self.enumerate() - .min_by(|x, y| Ord::cmp(&key(&x.1), &key(&y.1))) - .map(|x| x.0) - } - - /// Return the position of the minimum element in the iterator, as - /// determined by the specified comparison function. - /// - /// If several elements are equally minimum, the position of the - /// first of them is returned. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// - /// let a: [i32; 0] = []; - /// assert_eq!(a.iter().position_min_by(|x, y| x.cmp(y)), None); - /// - /// let a = [-3_i32, 0, 1, 5, -10]; - /// assert_eq!(a.iter().position_min_by(|x, y| x.cmp(y)), Some(4)); - /// - /// let a = [1_i32, 1, -1, -1]; - /// assert_eq!(a.iter().position_min_by(|x, y| x.cmp(y)), Some(2)); - /// ``` - fn position_min_by(self, mut compare: F) -> Option - where - Self: Sized, - F: FnMut(&Self::Item, &Self::Item) -> Ordering, - { - self.enumerate() - .min_by(|x, y| compare(&x.1, &y.1)) - .map(|x| x.0) - } - - /// Return the positions of the minimum and maximum elements in - /// the iterator. - /// - /// The return type [`MinMaxResult`] is an enum of three variants: - /// - /// - `NoElements` if the iterator is empty. - /// - `OneElement(xpos)` if the iterator has exactly one element. - /// - `MinMax(xpos, ypos)` is returned otherwise, where the - /// element at `xpos` ≤ the element at `ypos`. While the - /// referenced elements themselves may be equal, `xpos` cannot - /// be equal to `ypos`. - /// - /// On an iterator of length `n`, `position_minmax` does `1.5 * n` - /// comparisons, and so is faster than calling `position_min` and - /// `position_max` separately which does `2 * n` comparisons. - /// - /// For the minimum, if several elements are equally minimum, the - /// position of the first of them is returned. For the maximum, if - /// several elements are equally maximum, the position of the last - /// of them is returned. - /// - /// The elements can be floats but no particular result is - /// guaranteed if an element is NaN. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// use itertools::MinMaxResult::{NoElements, OneElement, MinMax}; - /// - /// let a: [i32; 0] = []; - /// assert_eq!(a.iter().position_minmax(), NoElements); - /// - /// let a = [10]; - /// assert_eq!(a.iter().position_minmax(), OneElement(0)); - /// - /// let a = [-3, 0, 1, 5, -10]; - /// assert_eq!(a.iter().position_minmax(), MinMax(4, 3)); - /// - /// let a = [1, 1, -1, -1]; - /// assert_eq!(a.iter().position_minmax(), MinMax(2, 1)); - /// ``` - fn position_minmax(self) -> MinMaxResult - where - Self: Sized, - Self::Item: PartialOrd, - { - use crate::MinMaxResult::{MinMax, NoElements, OneElement}; - match minmax::minmax_impl(self.enumerate(), |_| (), |x, y, _, _| x.1 < y.1) { - NoElements => NoElements, - OneElement(x) => OneElement(x.0), - MinMax(x, y) => MinMax(x.0, y.0), - } - } - - /// Return the postions of the minimum and maximum elements of an - /// iterator, as determined by the specified function. - /// - /// The return value is a variant of [`MinMaxResult`] like for - /// [`position_minmax`]. - /// - /// For the minimum, if several elements are equally minimum, the - /// position of the first of them is returned. For the maximum, if - /// several elements are equally maximum, the position of the last - /// of them is returned. - /// - /// The keys can be floats but no particular result is guaranteed - /// if a key is NaN. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// use itertools::MinMaxResult::{NoElements, OneElement, MinMax}; - /// - /// let a: [i32; 0] = []; - /// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), NoElements); - /// - /// let a = [10_i32]; - /// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), OneElement(0)); - /// - /// let a = [-3_i32, 0, 1, 5, -10]; - /// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), MinMax(1, 4)); - /// - /// let a = [1_i32, 1, -1, -1]; - /// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), MinMax(0, 3)); - /// ``` - /// - /// [`position_minmax`]: Self::position_minmax - fn position_minmax_by_key(self, mut key: F) -> MinMaxResult - where - Self: Sized, - K: PartialOrd, - F: FnMut(&Self::Item) -> K, - { - use crate::MinMaxResult::{MinMax, NoElements, OneElement}; - match self.enumerate().minmax_by_key(|e| key(&e.1)) { - NoElements => NoElements, - OneElement(x) => OneElement(x.0), - MinMax(x, y) => MinMax(x.0, y.0), - } - } - - /// Return the postions of the minimum and maximum elements of an - /// iterator, as determined by the specified comparison function. - /// - /// The return value is a variant of [`MinMaxResult`] like for - /// [`position_minmax`]. - /// - /// For the minimum, if several elements are equally minimum, the - /// position of the first of them is returned. For the maximum, if - /// several elements are equally maximum, the position of the last - /// of them is returned. - /// - /// # Examples - /// - /// ``` - /// use itertools::Itertools; - /// use itertools::MinMaxResult::{NoElements, OneElement, MinMax}; - /// - /// let a: [i32; 0] = []; - /// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), NoElements); - /// - /// let a = [10_i32]; - /// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), OneElement(0)); - /// - /// let a = [-3_i32, 0, 1, 5, -10]; - /// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), MinMax(4, 3)); - /// - /// let a = [1_i32, 1, -1, -1]; - /// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), MinMax(2, 1)); - /// ``` - /// - /// [`position_minmax`]: Self::position_minmax - fn position_minmax_by(self, mut compare: F) -> MinMaxResult - where - Self: Sized, - F: FnMut(&Self::Item, &Self::Item) -> Ordering, - { - use crate::MinMaxResult::{MinMax, NoElements, OneElement}; - match self.enumerate().minmax_by(|x, y| compare(&x.1, &y.1)) { - NoElements => NoElements, - OneElement(x) => OneElement(x.0), - MinMax(x, y) => MinMax(x.0, y.0), - } - } - - /// If the iterator yields exactly one element, that element will be returned, otherwise - /// an error will be returned containing an iterator that has the same output as the input - /// iterator. - /// - /// This provides an additional layer of validation over just calling `Iterator::next()`. - /// If your assumption that there should only be one element yielded is false this provides - /// the opportunity to detect and handle that, preventing errors at a distance. - /// - /// # Examples - /// ``` - /// use itertools::Itertools; - /// - /// assert_eq!((0..10).filter(|&x| x == 2).exactly_one().unwrap(), 2); - /// assert!((0..10).filter(|&x| x > 1 && x < 4).exactly_one().unwrap_err().eq(2..4)); - /// assert!((0..10).filter(|&x| x > 1 && x < 5).exactly_one().unwrap_err().eq(2..5)); - /// assert!((0..10).filter(|&_| false).exactly_one().unwrap_err().eq(0..0)); - /// ``` - fn exactly_one(mut self) -> Result> - where - Self: Sized, - { - match self.next() { - Some(first) => match self.next() { - Some(second) => Err(ExactlyOneError::new( - Some(Either::Left([first, second])), - self, - )), - None => Ok(first), - }, - None => Err(ExactlyOneError::new(None, self)), - } - } - - /// If the iterator yields no elements, `Ok(None)` will be returned. If the iterator yields - /// exactly one element, that element will be returned, otherwise an error will be returned - /// containing an iterator that has the same output as the input iterator. - /// - /// This provides an additional layer of validation over just calling `Iterator::next()`. - /// If your assumption that there should be at most one element yielded is false this provides - /// the opportunity to detect and handle that, preventing errors at a distance. - /// - /// # Examples - /// ``` - /// use itertools::Itertools; - /// - /// assert_eq!((0..10).filter(|&x| x == 2).at_most_one().unwrap(), Some(2)); - /// assert!((0..10).filter(|&x| x > 1 && x < 4).at_most_one().unwrap_err().eq(2..4)); - /// assert!((0..10).filter(|&x| x > 1 && x < 5).at_most_one().unwrap_err().eq(2..5)); - /// assert_eq!((0..10).filter(|&_| false).at_most_one().unwrap(), None); - /// ``` - fn at_most_one(mut self) -> Result, ExactlyOneError> - where - Self: Sized, - { - match self.next() { - Some(first) => match self.next() { - Some(second) => Err(ExactlyOneError::new( - Some(Either::Left([first, second])), - self, - )), - None => Ok(Some(first)), - }, - None => Ok(None), - } - } - - /// An iterator adaptor that allows the user to peek at multiple `.next()` - /// values without advancing the base iterator. - /// - /// # Examples - /// ``` - /// use itertools::Itertools; - /// - /// let mut iter = (0..10).multipeek(); - /// assert_eq!(iter.peek(), Some(&0)); - /// assert_eq!(iter.peek(), Some(&1)); - /// assert_eq!(iter.peek(), Some(&2)); - /// assert_eq!(iter.next(), Some(0)); - /// assert_eq!(iter.peek(), Some(&1)); - /// ``` - #[cfg(feature = "use_alloc")] - fn multipeek(self) -> MultiPeek - where - Self: Sized, - { - multipeek_impl::multipeek(self) - } - - /// Collect the items in this iterator and return a `HashMap` which - /// contains each item that appears in the iterator and the number - /// of times it appears. - /// - /// # Examples - /// ``` - /// # use itertools::Itertools; - /// let counts = [1, 1, 1, 3, 3, 5].into_iter().counts(); - /// assert_eq!(counts[&1], 3); - /// assert_eq!(counts[&3], 2); - /// assert_eq!(counts[&5], 1); - /// assert_eq!(counts.get(&0), None); - /// ``` - #[cfg(feature = "use_std")] - fn counts(self) -> HashMap - where - Self: Sized, - Self::Item: Eq + Hash, - { - let mut counts = HashMap::new(); - self.for_each(|item| *counts.entry(item).or_default() += 1); - counts - } - - /// Collect the items in this iterator and return a `HashMap` which - /// contains each item that appears in the iterator and the number - /// of times it appears, - /// determining identity using a keying function. - /// - /// ``` - /// # use itertools::Itertools; - /// struct Character { - /// first_name: &'static str, - /// last_name: &'static str, - /// } - /// - /// let characters = - /// vec![ - /// Character { first_name: "Amy", last_name: "Pond" }, - /// Character { first_name: "Amy", last_name: "Wong" }, - /// Character { first_name: "Amy", last_name: "Santiago" }, - /// Character { first_name: "James", last_name: "Bond" }, - /// Character { first_name: "James", last_name: "Sullivan" }, - /// Character { first_name: "James", last_name: "Norington" }, - /// Character { first_name: "James", last_name: "Kirk" }, - /// ]; - /// - /// let first_name_frequency = - /// characters - /// .into_iter() - /// .counts_by(|c| c.first_name); - /// - /// assert_eq!(first_name_frequency["Amy"], 3); - /// assert_eq!(first_name_frequency["James"], 4); - /// assert_eq!(first_name_frequency.contains_key("Asha"), false); - /// ``` - #[cfg(feature = "use_std")] - fn counts_by(self, f: F) -> HashMap - where - Self: Sized, - K: Eq + Hash, - F: FnMut(Self::Item) -> K, - { - self.map(f).counts() - } - - /// Converts an iterator of tuples into a tuple of containers. - /// - /// It consumes an entire iterator of n-ary tuples, producing `n` collections, one for each - /// column. - /// - /// This function is, in some sense, the opposite of [`multizip`]. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let inputs = vec![(1, 2, 3), (4, 5, 6), (7, 8, 9)]; - /// - /// let (a, b, c): (Vec<_>, Vec<_>, Vec<_>) = inputs - /// .into_iter() - /// .multiunzip(); - /// - /// assert_eq!(a, vec![1, 4, 7]); - /// assert_eq!(b, vec![2, 5, 8]); - /// assert_eq!(c, vec![3, 6, 9]); - /// ``` - fn multiunzip(self) -> FromI - where - Self: Sized + MultiUnzip, - { - MultiUnzip::multiunzip(self) - } - - /// Returns the length of the iterator if one exists. - /// Otherwise return `self.size_hint()`. - /// - /// Fallible [`ExactSizeIterator::len`]. - /// - /// Inherits guarantees and restrictions from [`Iterator::size_hint`]. - /// - /// ``` - /// use itertools::Itertools; - /// - /// assert_eq!([0; 10].iter().try_len(), Ok(10)); - /// assert_eq!((10..15).try_len(), Ok(5)); - /// assert_eq!((15..10).try_len(), Ok(0)); - /// assert_eq!((10..).try_len(), Err((usize::MAX, None))); - /// assert_eq!((10..15).filter(|x| x % 2 == 0).try_len(), Err((0, Some(5)))); - /// ``` - fn try_len(&self) -> Result { - let sh = self.size_hint(); - match sh { - (lo, Some(hi)) if lo == hi => Ok(lo), - _ => Err(sh), - } - } -} - -impl Itertools for T where T: Iterator + ?Sized {} - -/// Return `true` if both iterables produce equal sequences -/// (elements pairwise equal and sequences of the same length), -/// `false` otherwise. -/// -/// [`IntoIterator`] enabled version of [`Iterator::eq`]. -/// -/// ``` -/// assert!(itertools::equal(vec![1, 2, 3], 1..4)); -/// assert!(!itertools::equal(&[0, 0], &[0, 0, 0])); -/// ``` -pub fn equal(a: I, b: J) -> bool -where - I: IntoIterator, - J: IntoIterator, - I::Item: PartialEq, -{ - a.into_iter().eq(b) -} - -/// Assert that two iterables produce equal sequences, with the same -/// semantics as [`equal(a, b)`](equal). -/// -/// **Panics** on assertion failure with a message that shows the -/// two different elements and the iteration index. -/// -/// ```should_panic -/// # use itertools::assert_equal; -/// assert_equal("exceed".split('c'), "excess".split('c')); -/// // ^PANIC: panicked at 'Failed assertion Some("eed") == Some("ess") for iteration 1'. -/// ``` -pub fn assert_equal(a: I, b: J) -where - I: IntoIterator, - J: IntoIterator, - I::Item: fmt::Debug + PartialEq, - J::Item: fmt::Debug, -{ - let mut ia = a.into_iter(); - let mut ib = b.into_iter(); - let mut i: usize = 0; - loop { - match (ia.next(), ib.next()) { - (None, None) => return, - (a, b) => { - let equal = match (&a, &b) { - (Some(a), Some(b)) => a == b, - _ => false, - }; - assert!( - equal, - "Failed assertion {a:?} == {b:?} for iteration {i}", - i = i, - a = a, - b = b - ); - i += 1; - } - } - } -} - -/// Partition a sequence using predicate `pred` so that elements -/// that map to `true` are placed before elements which map to `false`. -/// -/// The order within the partitions is arbitrary. -/// -/// Return the index of the split point. -/// -/// ``` -/// use itertools::partition; -/// -/// # // use repeated numbers to not promise any ordering -/// let mut data = [7, 1, 1, 7, 1, 1, 7]; -/// let split_index = partition(&mut data, |elt| *elt >= 3); -/// -/// assert_eq!(data, [7, 7, 7, 1, 1, 1, 1]); -/// assert_eq!(split_index, 3); -/// ``` -pub fn partition<'a, A: 'a, I, F>(iter: I, mut pred: F) -> usize -where - I: IntoIterator, - I::IntoIter: DoubleEndedIterator, - F: FnMut(&A) -> bool, -{ - let mut split_index = 0; - let mut iter = iter.into_iter(); - while let Some(front) = iter.next() { - if !pred(front) { - match iter.rfind(|back| pred(back)) { - Some(back) => std::mem::swap(front, back), - None => break, - } - } - split_index += 1; - } - split_index -} - -/// An enum used for controlling the execution of `fold_while`. -/// -/// See [`.fold_while()`](Itertools::fold_while) for more information. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub enum FoldWhile { - /// Continue folding with this value - Continue(T), - /// Fold is complete and will return this value - Done(T), -} - -impl FoldWhile { - /// Return the value in the continue or done. - pub fn into_inner(self) -> T { - match self { - Self::Continue(x) | Self::Done(x) => x, - } - } - - /// Return true if `self` is `Done`, false if it is `Continue`. - pub fn is_done(&self) -> bool { - match *self { - Self::Continue(_) => false, - Self::Done(_) => true, - } - } -} diff --git a/vendor/itertools/src/merge_join.rs b/vendor/itertools/src/merge_join.rs deleted file mode 100644 index c0de35f90e2481..00000000000000 --- a/vendor/itertools/src/merge_join.rs +++ /dev/null @@ -1,347 +0,0 @@ -use std::cmp::Ordering; -use std::fmt; -use std::iter::{Fuse, FusedIterator}; -use std::marker::PhantomData; - -use either::Either; - -use super::adaptors::{put_back, PutBack}; -use crate::either_or_both::EitherOrBoth; -use crate::size_hint::{self, SizeHint}; -#[cfg(doc)] -use crate::Itertools; - -#[derive(Clone, Debug)] -pub struct MergeLte; - -/// An iterator adaptor that merges the two base iterators in ascending order. -/// If both base iterators are sorted (ascending), the result is sorted. -/// -/// Iterator element type is `I::Item`. -/// -/// See [`.merge()`](crate::Itertools::merge_by) for more information. -pub type Merge = MergeBy; - -/// Create an iterator that merges elements in `i` and `j`. -/// -/// [`IntoIterator`] enabled version of [`Itertools::merge`](crate::Itertools::merge). -/// -/// ``` -/// use itertools::merge; -/// -/// for elt in merge(&[1, 2, 3], &[2, 3, 4]) { -/// /* loop body */ -/// } -/// ``` -pub fn merge( - i: I, - j: J, -) -> Merge<::IntoIter, ::IntoIter> -where - I: IntoIterator, - J: IntoIterator, - I::Item: PartialOrd, -{ - merge_by_new(i, j, MergeLte) -} - -/// An iterator adaptor that merges the two base iterators in ascending order. -/// If both base iterators are sorted (ascending), the result is sorted. -/// -/// Iterator element type is `I::Item`. -/// -/// See [`.merge_by()`](crate::Itertools::merge_by) for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct MergeBy { - left: PutBack>, - right: PutBack>, - cmp_fn: F, -} - -/// Create a `MergeBy` iterator. -pub fn merge_by_new(a: I, b: J, cmp: F) -> MergeBy -where - I: IntoIterator, - J: IntoIterator, -{ - MergeBy { - left: put_back(a.into_iter().fuse()), - right: put_back(b.into_iter().fuse()), - cmp_fn: cmp, - } -} - -/// Return an iterator adaptor that merge-joins items from the two base iterators in ascending order. -/// -/// [`IntoIterator`] enabled version of [`Itertools::merge_join_by`]. -pub fn merge_join_by( - left: I, - right: J, - cmp_fn: F, -) -> MergeJoinBy -where - I: IntoIterator, - J: IntoIterator, - F: FnMut(&I::Item, &J::Item) -> T, -{ - MergeBy { - left: put_back(left.into_iter().fuse()), - right: put_back(right.into_iter().fuse()), - cmp_fn: MergeFuncLR(cmp_fn, PhantomData), - } -} - -/// An iterator adaptor that merge-joins items from the two base iterators in ascending order. -/// -/// See [`.merge_join_by()`](crate::Itertools::merge_join_by) for more information. -pub type MergeJoinBy = - MergeBy::Item, ::Item>>::T>>; - -#[derive(Clone, Debug)] -pub struct MergeFuncLR(F, PhantomData); - -pub trait FuncLR { - type T; -} - -impl T> FuncLR for F { - type T = T; -} - -pub trait OrderingOrBool { - type MergeResult; - fn left(left: L) -> Self::MergeResult; - fn right(right: R) -> Self::MergeResult; - // "merge" never returns (Some(...), Some(...), ...) so Option> - // is appealing but it is always followed by two put_backs, so we think the compiler is - // smart enough to optimize it. Or we could move put_backs into "merge". - fn merge(&mut self, left: L, right: R) -> (Option>, Self::MergeResult); - fn size_hint(left: SizeHint, right: SizeHint) -> SizeHint; -} - -impl Ordering> OrderingOrBool for MergeFuncLR { - type MergeResult = EitherOrBoth; - fn left(left: L) -> Self::MergeResult { - EitherOrBoth::Left(left) - } - fn right(right: R) -> Self::MergeResult { - EitherOrBoth::Right(right) - } - fn merge(&mut self, left: L, right: R) -> (Option>, Self::MergeResult) { - match self.0(&left, &right) { - Ordering::Equal => (None, EitherOrBoth::Both(left, right)), - Ordering::Less => (Some(Either::Right(right)), EitherOrBoth::Left(left)), - Ordering::Greater => (Some(Either::Left(left)), EitherOrBoth::Right(right)), - } - } - fn size_hint(left: SizeHint, right: SizeHint) -> SizeHint { - let (a_lower, a_upper) = left; - let (b_lower, b_upper) = right; - let lower = ::std::cmp::max(a_lower, b_lower); - let upper = match (a_upper, b_upper) { - (Some(x), Some(y)) => x.checked_add(y), - _ => None, - }; - (lower, upper) - } -} - -impl bool> OrderingOrBool for MergeFuncLR { - type MergeResult = Either; - fn left(left: L) -> Self::MergeResult { - Either::Left(left) - } - fn right(right: R) -> Self::MergeResult { - Either::Right(right) - } - fn merge(&mut self, left: L, right: R) -> (Option>, Self::MergeResult) { - if self.0(&left, &right) { - (Some(Either::Right(right)), Either::Left(left)) - } else { - (Some(Either::Left(left)), Either::Right(right)) - } - } - fn size_hint(left: SizeHint, right: SizeHint) -> SizeHint { - // Not ExactSizeIterator because size may be larger than usize - size_hint::add(left, right) - } -} - -impl bool> OrderingOrBool for F { - type MergeResult = T; - fn left(left: T) -> Self::MergeResult { - left - } - fn right(right: T) -> Self::MergeResult { - right - } - fn merge(&mut self, left: T, right: T) -> (Option>, Self::MergeResult) { - if self(&left, &right) { - (Some(Either::Right(right)), left) - } else { - (Some(Either::Left(left)), right) - } - } - fn size_hint(left: SizeHint, right: SizeHint) -> SizeHint { - // Not ExactSizeIterator because size may be larger than usize - size_hint::add(left, right) - } -} - -impl OrderingOrBool for MergeLte { - type MergeResult = T; - fn left(left: T) -> Self::MergeResult { - left - } - fn right(right: T) -> Self::MergeResult { - right - } - fn merge(&mut self, left: T, right: T) -> (Option>, Self::MergeResult) { - if left <= right { - (Some(Either::Right(right)), left) - } else { - (Some(Either::Left(left)), right) - } - } - fn size_hint(left: SizeHint, right: SizeHint) -> SizeHint { - // Not ExactSizeIterator because size may be larger than usize - size_hint::add(left, right) - } -} - -impl Clone for MergeBy -where - I: Iterator, - J: Iterator, - PutBack>: Clone, - PutBack>: Clone, - F: Clone, -{ - clone_fields!(left, right, cmp_fn); -} - -impl fmt::Debug for MergeBy -where - I: Iterator + fmt::Debug, - I::Item: fmt::Debug, - J: Iterator + fmt::Debug, - J::Item: fmt::Debug, -{ - debug_fmt_fields!(MergeBy, left, right); -} - -impl Iterator for MergeBy -where - I: Iterator, - J: Iterator, - F: OrderingOrBool, -{ - type Item = F::MergeResult; - - fn next(&mut self) -> Option { - match (self.left.next(), self.right.next()) { - (None, None) => None, - (Some(left), None) => Some(F::left(left)), - (None, Some(right)) => Some(F::right(right)), - (Some(left), Some(right)) => { - let (not_next, next) = self.cmp_fn.merge(left, right); - match not_next { - Some(Either::Left(l)) => { - self.left.put_back(l); - } - Some(Either::Right(r)) => { - self.right.put_back(r); - } - None => (), - } - - Some(next) - } - } - } - - fn fold(mut self, init: B, mut f: G) -> B - where - Self: Sized, - G: FnMut(B, Self::Item) -> B, - { - let mut acc = init; - let mut left = self.left.next(); - let mut right = self.right.next(); - - loop { - match (left, right) { - (Some(l), Some(r)) => match self.cmp_fn.merge(l, r) { - (Some(Either::Right(r)), x) => { - acc = f(acc, x); - left = self.left.next(); - right = Some(r); - } - (Some(Either::Left(l)), x) => { - acc = f(acc, x); - left = Some(l); - right = self.right.next(); - } - (None, x) => { - acc = f(acc, x); - left = self.left.next(); - right = self.right.next(); - } - }, - (Some(l), None) => { - self.left.put_back(l); - acc = self.left.fold(acc, |acc, x| f(acc, F::left(x))); - break; - } - (None, Some(r)) => { - self.right.put_back(r); - acc = self.right.fold(acc, |acc, x| f(acc, F::right(x))); - break; - } - (None, None) => { - break; - } - } - } - - acc - } - - fn size_hint(&self) -> SizeHint { - F::size_hint(self.left.size_hint(), self.right.size_hint()) - } - - fn nth(&mut self, mut n: usize) -> Option { - loop { - if n == 0 { - break self.next(); - } - n -= 1; - match (self.left.next(), self.right.next()) { - (None, None) => break None, - (Some(_left), None) => break self.left.nth(n).map(F::left), - (None, Some(_right)) => break self.right.nth(n).map(F::right), - (Some(left), Some(right)) => { - let (not_next, _) = self.cmp_fn.merge(left, right); - match not_next { - Some(Either::Left(l)) => { - self.left.put_back(l); - } - Some(Either::Right(r)) => { - self.right.put_back(r); - } - None => (), - } - } - } - } - } -} - -impl FusedIterator for MergeBy -where - I: Iterator, - J: Iterator, - F: OrderingOrBool, -{ -} diff --git a/vendor/itertools/src/minmax.rs b/vendor/itertools/src/minmax.rs deleted file mode 100644 index 5c9674e01124ef..00000000000000 --- a/vendor/itertools/src/minmax.rs +++ /dev/null @@ -1,116 +0,0 @@ -/// `MinMaxResult` is an enum returned by `minmax`. -/// -/// See [`.minmax()`](crate::Itertools::minmax) for more detail. -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum MinMaxResult { - /// Empty iterator - NoElements, - - /// Iterator with one element, so the minimum and maximum are the same - OneElement(T), - - /// More than one element in the iterator, the first element is not larger - /// than the second - MinMax(T, T), -} - -impl MinMaxResult { - /// `into_option` creates an `Option` of type `(T, T)`. The returned `Option` - /// has variant `None` if and only if the `MinMaxResult` has variant - /// `NoElements`. Otherwise `Some((x, y))` is returned where `x <= y`. - /// If the `MinMaxResult` has variant `OneElement(x)`, performing this - /// operation will make one clone of `x`. - /// - /// # Examples - /// - /// ``` - /// use itertools::MinMaxResult::{self, NoElements, OneElement, MinMax}; - /// - /// let r: MinMaxResult = NoElements; - /// assert_eq!(r.into_option(), None); - /// - /// let r = OneElement(1); - /// assert_eq!(r.into_option(), Some((1, 1))); - /// - /// let r = MinMax(1, 2); - /// assert_eq!(r.into_option(), Some((1, 2))); - /// ``` - pub fn into_option(self) -> Option<(T, T)> { - match self { - Self::NoElements => None, - Self::OneElement(x) => Some((x.clone(), x)), - Self::MinMax(x, y) => Some((x, y)), - } - } -} - -/// Implementation guts for `minmax` and `minmax_by_key`. -pub fn minmax_impl(mut it: I, mut key_for: F, mut lt: L) -> MinMaxResult -where - I: Iterator, - F: FnMut(&I::Item) -> K, - L: FnMut(&I::Item, &I::Item, &K, &K) -> bool, -{ - let (mut min, mut max, mut min_key, mut max_key) = match it.next() { - None => return MinMaxResult::NoElements, - Some(x) => match it.next() { - None => return MinMaxResult::OneElement(x), - Some(y) => { - let xk = key_for(&x); - let yk = key_for(&y); - if !lt(&y, &x, &yk, &xk) { - (x, y, xk, yk) - } else { - (y, x, yk, xk) - } - } - }, - }; - - loop { - // `first` and `second` are the two next elements we want to look - // at. We first compare `first` and `second` (#1). The smaller one - // is then compared to current minimum (#2). The larger one is - // compared to current maximum (#3). This way we do 3 comparisons - // for 2 elements. - let first = match it.next() { - None => break, - Some(x) => x, - }; - let second = match it.next() { - None => { - let first_key = key_for(&first); - if lt(&first, &min, &first_key, &min_key) { - min = first; - } else if !lt(&first, &max, &first_key, &max_key) { - max = first; - } - break; - } - Some(x) => x, - }; - let first_key = key_for(&first); - let second_key = key_for(&second); - if !lt(&second, &first, &second_key, &first_key) { - if lt(&first, &min, &first_key, &min_key) { - min = first; - min_key = first_key; - } - if !lt(&second, &max, &second_key, &max_key) { - max = second; - max_key = second_key; - } - } else { - if lt(&second, &min, &second_key, &min_key) { - min = second; - min_key = second_key; - } - if !lt(&first, &max, &first_key, &max_key) { - max = first; - max_key = first_key; - } - } - } - - MinMaxResult::MinMax(min, max) -} diff --git a/vendor/itertools/src/multipeek_impl.rs b/vendor/itertools/src/multipeek_impl.rs deleted file mode 100644 index 6f800b6fb6c907..00000000000000 --- a/vendor/itertools/src/multipeek_impl.rs +++ /dev/null @@ -1,116 +0,0 @@ -use crate::size_hint; -#[cfg(doc)] -use crate::Itertools; -use crate::PeekingNext; -use alloc::collections::VecDeque; -use std::iter::Fuse; - -/// See [`multipeek()`] for more information. -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct MultiPeek -where - I: Iterator, -{ - iter: Fuse, - buf: VecDeque, - index: usize, -} - -/// An iterator adaptor that allows the user to peek at multiple `.next()` -/// values without advancing the base iterator. -/// -/// [`IntoIterator`] enabled version of [`Itertools::multipeek`]. -pub fn multipeek(iterable: I) -> MultiPeek -where - I: IntoIterator, -{ - MultiPeek { - iter: iterable.into_iter().fuse(), - buf: VecDeque::new(), - index: 0, - } -} - -impl MultiPeek -where - I: Iterator, -{ - /// Reset the peeking “cursor” - pub fn reset_peek(&mut self) { - self.index = 0; - } -} - -impl MultiPeek { - /// Works exactly like `.next()` with the only difference that it doesn't - /// advance itself. `.peek()` can be called multiple times, to peek - /// further ahead. - /// When `.next()` is called, reset the peeking “cursor”. - pub fn peek(&mut self) -> Option<&I::Item> { - let ret = if self.index < self.buf.len() { - Some(&self.buf[self.index]) - } else { - match self.iter.next() { - Some(x) => { - self.buf.push_back(x); - Some(&self.buf[self.index]) - } - None => return None, - } - }; - - self.index += 1; - ret - } -} - -impl PeekingNext for MultiPeek -where - I: Iterator, -{ - fn peeking_next(&mut self, accept: F) -> Option - where - F: FnOnce(&Self::Item) -> bool, - { - if self.buf.is_empty() { - if let Some(r) = self.peek() { - if !accept(r) { - return None; - } - } - } else if let Some(r) = self.buf.front() { - if !accept(r) { - return None; - } - } - self.next() - } -} - -impl Iterator for MultiPeek -where - I: Iterator, -{ - type Item = I::Item; - - fn next(&mut self) -> Option { - self.index = 0; - self.buf.pop_front().or_else(|| self.iter.next()) - } - - fn size_hint(&self) -> (usize, Option) { - size_hint::add_scalar(self.iter.size_hint(), self.buf.len()) - } - - fn fold(self, mut init: B, mut f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - init = self.buf.into_iter().fold(init, &mut f); - self.iter.fold(init, f) - } -} - -// Same size -impl ExactSizeIterator for MultiPeek where I: ExactSizeIterator {} diff --git a/vendor/itertools/src/pad_tail.rs b/vendor/itertools/src/pad_tail.rs deleted file mode 100644 index 5595b42bacf21d..00000000000000 --- a/vendor/itertools/src/pad_tail.rs +++ /dev/null @@ -1,124 +0,0 @@ -use crate::size_hint; -use std::iter::{Fuse, FusedIterator}; - -/// An iterator adaptor that pads a sequence to a minimum length by filling -/// missing elements using a closure. -/// -/// Iterator element type is `I::Item`. -/// -/// See [`.pad_using()`](crate::Itertools::pad_using) for more information. -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct PadUsing { - iter: Fuse, - min: usize, - pos: usize, - filler: F, -} - -impl std::fmt::Debug for PadUsing -where - I: std::fmt::Debug, -{ - debug_fmt_fields!(PadUsing, iter, min, pos); -} - -/// Create a new `PadUsing` iterator. -pub fn pad_using(iter: I, min: usize, filler: F) -> PadUsing -where - I: Iterator, - F: FnMut(usize) -> I::Item, -{ - PadUsing { - iter: iter.fuse(), - min, - pos: 0, - filler, - } -} - -impl Iterator for PadUsing -where - I: Iterator, - F: FnMut(usize) -> I::Item, -{ - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - match self.iter.next() { - None => { - if self.pos < self.min { - let e = Some((self.filler)(self.pos)); - self.pos += 1; - e - } else { - None - } - } - e => { - self.pos += 1; - e - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let tail = self.min.saturating_sub(self.pos); - size_hint::max(self.iter.size_hint(), (tail, Some(tail))) - } - - fn fold(self, mut init: B, mut f: G) -> B - where - G: FnMut(B, Self::Item) -> B, - { - let mut pos = self.pos; - init = self.iter.fold(init, |acc, item| { - pos += 1; - f(acc, item) - }); - (pos..self.min).map(self.filler).fold(init, f) - } -} - -impl DoubleEndedIterator for PadUsing -where - I: DoubleEndedIterator + ExactSizeIterator, - F: FnMut(usize) -> I::Item, -{ - fn next_back(&mut self) -> Option { - if self.min == 0 { - self.iter.next_back() - } else if self.iter.len() >= self.min { - self.min -= 1; - self.iter.next_back() - } else { - self.min -= 1; - Some((self.filler)(self.min)) - } - } - - fn rfold(self, mut init: B, mut f: G) -> B - where - G: FnMut(B, Self::Item) -> B, - { - init = (self.iter.len()..self.min) - .map(self.filler) - .rfold(init, &mut f); - self.iter.rfold(init, f) - } -} - -impl ExactSizeIterator for PadUsing -where - I: ExactSizeIterator, - F: FnMut(usize) -> I::Item, -{ -} - -impl FusedIterator for PadUsing -where - I: FusedIterator, - F: FnMut(usize) -> I::Item, -{ -} diff --git a/vendor/itertools/src/peek_nth.rs b/vendor/itertools/src/peek_nth.rs deleted file mode 100644 index b03a3ef5f2a776..00000000000000 --- a/vendor/itertools/src/peek_nth.rs +++ /dev/null @@ -1,178 +0,0 @@ -use crate::size_hint; -use crate::PeekingNext; -use alloc::collections::VecDeque; -use std::iter::Fuse; - -/// See [`peek_nth()`] for more information. -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct PeekNth -where - I: Iterator, -{ - iter: Fuse, - buf: VecDeque, -} - -/// A drop-in replacement for [`std::iter::Peekable`] which adds a `peek_nth` -/// method allowing the user to `peek` at a value several iterations forward -/// without advancing the base iterator. -/// -/// This differs from `multipeek` in that subsequent calls to `peek` or -/// `peek_nth` will always return the same value until `next` is called -/// (making `reset_peek` unnecessary). -pub fn peek_nth(iterable: I) -> PeekNth -where - I: IntoIterator, -{ - PeekNth { - iter: iterable.into_iter().fuse(), - buf: VecDeque::new(), - } -} - -impl PeekNth -where - I: Iterator, -{ - /// Works exactly like the `peek` method in [`std::iter::Peekable`]. - pub fn peek(&mut self) -> Option<&I::Item> { - self.peek_nth(0) - } - - /// Works exactly like the `peek_mut` method in [`std::iter::Peekable`]. - pub fn peek_mut(&mut self) -> Option<&mut I::Item> { - self.peek_nth_mut(0) - } - - /// Returns a reference to the `nth` value without advancing the iterator. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use itertools::peek_nth; - /// - /// let xs = vec![1, 2, 3]; - /// let mut iter = peek_nth(xs.into_iter()); - /// - /// assert_eq!(iter.peek_nth(0), Some(&1)); - /// assert_eq!(iter.next(), Some(1)); - /// - /// // The iterator does not advance even if we call `peek_nth` multiple times - /// assert_eq!(iter.peek_nth(0), Some(&2)); - /// assert_eq!(iter.peek_nth(1), Some(&3)); - /// assert_eq!(iter.next(), Some(2)); - /// - /// // Calling `peek_nth` past the end of the iterator will return `None` - /// assert_eq!(iter.peek_nth(1), None); - /// ``` - pub fn peek_nth(&mut self, n: usize) -> Option<&I::Item> { - let unbuffered_items = (n + 1).saturating_sub(self.buf.len()); - - self.buf.extend(self.iter.by_ref().take(unbuffered_items)); - - self.buf.get(n) - } - - /// Returns a mutable reference to the `nth` value without advancing the iterator. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use itertools::peek_nth; - /// - /// let xs = vec![1, 2, 3, 4, 5]; - /// let mut iter = peek_nth(xs.into_iter()); - /// - /// assert_eq!(iter.peek_nth_mut(0), Some(&mut 1)); - /// assert_eq!(iter.next(), Some(1)); - /// - /// // The iterator does not advance even if we call `peek_nth_mut` multiple times - /// assert_eq!(iter.peek_nth_mut(0), Some(&mut 2)); - /// assert_eq!(iter.peek_nth_mut(1), Some(&mut 3)); - /// assert_eq!(iter.next(), Some(2)); - /// - /// // Peek into the iterator and set the value behind the mutable reference. - /// if let Some(p) = iter.peek_nth_mut(1) { - /// assert_eq!(*p, 4); - /// *p = 9; - /// } - /// - /// // The value we put in reappears as the iterator continues. - /// assert_eq!(iter.next(), Some(3)); - /// assert_eq!(iter.next(), Some(9)); - /// - /// // Calling `peek_nth_mut` past the end of the iterator will return `None` - /// assert_eq!(iter.peek_nth_mut(1), None); - /// ``` - pub fn peek_nth_mut(&mut self, n: usize) -> Option<&mut I::Item> { - let unbuffered_items = (n + 1).saturating_sub(self.buf.len()); - - self.buf.extend(self.iter.by_ref().take(unbuffered_items)); - - self.buf.get_mut(n) - } - - /// Works exactly like the `next_if` method in [`std::iter::Peekable`]. - pub fn next_if(&mut self, func: impl FnOnce(&I::Item) -> bool) -> Option { - match self.next() { - Some(item) if func(&item) => Some(item), - Some(item) => { - self.buf.push_front(item); - None - } - _ => None, - } - } - - /// Works exactly like the `next_if_eq` method in [`std::iter::Peekable`]. - pub fn next_if_eq(&mut self, expected: &T) -> Option - where - T: ?Sized, - I::Item: PartialEq, - { - self.next_if(|next| next == expected) - } -} - -impl Iterator for PeekNth -where - I: Iterator, -{ - type Item = I::Item; - - fn next(&mut self) -> Option { - self.buf.pop_front().or_else(|| self.iter.next()) - } - - fn size_hint(&self) -> (usize, Option) { - size_hint::add_scalar(self.iter.size_hint(), self.buf.len()) - } - - fn fold(self, mut init: B, mut f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - init = self.buf.into_iter().fold(init, &mut f); - self.iter.fold(init, f) - } -} - -impl ExactSizeIterator for PeekNth where I: ExactSizeIterator {} - -impl PeekingNext for PeekNth -where - I: Iterator, -{ - fn peeking_next(&mut self, accept: F) -> Option - where - F: FnOnce(&Self::Item) -> bool, - { - self.peek().filter(|item| accept(item))?; - self.next() - } -} diff --git a/vendor/itertools/src/peeking_take_while.rs b/vendor/itertools/src/peeking_take_while.rs deleted file mode 100644 index 19872a964fddcd..00000000000000 --- a/vendor/itertools/src/peeking_take_while.rs +++ /dev/null @@ -1,201 +0,0 @@ -use crate::PutBack; -#[cfg(feature = "use_alloc")] -use crate::PutBackN; -use crate::RepeatN; -use std::iter::Peekable; - -/// An iterator that allows peeking at an element before deciding to accept it. -/// -/// See [`.peeking_take_while()`](crate::Itertools::peeking_take_while) -/// for more information. -/// -/// This is implemented by peeking adaptors like peekable and put back, -/// but also by a few iterators that can be peeked natively, like the slice’s -/// by reference iterator ([`std::slice::Iter`]). -pub trait PeekingNext: Iterator { - /// Pass a reference to the next iterator element to the closure `accept`; - /// if `accept` returns `true`, return it as the next element, - /// else `None`. - fn peeking_next(&mut self, accept: F) -> Option - where - Self: Sized, - F: FnOnce(&Self::Item) -> bool; -} - -impl<'a, I> PeekingNext for &'a mut I -where - I: PeekingNext, -{ - fn peeking_next(&mut self, accept: F) -> Option - where - F: FnOnce(&Self::Item) -> bool, - { - (*self).peeking_next(accept) - } -} - -impl PeekingNext for Peekable -where - I: Iterator, -{ - fn peeking_next(&mut self, accept: F) -> Option - where - F: FnOnce(&Self::Item) -> bool, - { - if let Some(r) = self.peek() { - if !accept(r) { - return None; - } - } - self.next() - } -} - -impl PeekingNext for PutBack -where - I: Iterator, -{ - fn peeking_next(&mut self, accept: F) -> Option - where - F: FnOnce(&Self::Item) -> bool, - { - if let Some(r) = self.next() { - if !accept(&r) { - self.put_back(r); - return None; - } - Some(r) - } else { - None - } - } -} - -#[cfg(feature = "use_alloc")] -impl PeekingNext for PutBackN -where - I: Iterator, -{ - fn peeking_next(&mut self, accept: F) -> Option - where - F: FnOnce(&Self::Item) -> bool, - { - if let Some(r) = self.next() { - if !accept(&r) { - self.put_back(r); - return None; - } - Some(r) - } else { - None - } - } -} - -impl PeekingNext for RepeatN { - fn peeking_next(&mut self, accept: F) -> Option - where - F: FnOnce(&Self::Item) -> bool, - { - let r = self.elt.as_ref()?; - if !accept(r) { - return None; - } - self.next() - } -} - -/// An iterator adaptor that takes items while a closure returns `true`. -/// -/// See [`.peeking_take_while()`](crate::Itertools::peeking_take_while) -/// for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct PeekingTakeWhile<'a, I, F> -where - I: Iterator + 'a, -{ - iter: &'a mut I, - f: F, -} - -impl<'a, I, F> std::fmt::Debug for PeekingTakeWhile<'a, I, F> -where - I: Iterator + std::fmt::Debug + 'a, -{ - debug_fmt_fields!(PeekingTakeWhile, iter); -} - -/// Create a `PeekingTakeWhile` -pub fn peeking_take_while(iter: &mut I, f: F) -> PeekingTakeWhile -where - I: Iterator, -{ - PeekingTakeWhile { iter, f } -} - -impl<'a, I, F> Iterator for PeekingTakeWhile<'a, I, F> -where - I: PeekingNext, - F: FnMut(&I::Item) -> bool, -{ - type Item = I::Item; - fn next(&mut self) -> Option { - self.iter.peeking_next(&mut self.f) - } - - fn size_hint(&self) -> (usize, Option) { - (0, self.iter.size_hint().1) - } -} - -impl<'a, I, F> PeekingNext for PeekingTakeWhile<'a, I, F> -where - I: PeekingNext, - F: FnMut(&I::Item) -> bool, -{ - fn peeking_next(&mut self, g: G) -> Option - where - G: FnOnce(&Self::Item) -> bool, - { - let f = &mut self.f; - self.iter.peeking_next(|r| f(r) && g(r)) - } -} - -// Some iterators are so lightweight we can simply clone them to save their -// state and use that for peeking. -macro_rules! peeking_next_by_clone { - ([$($typarm:tt)*] $type_:ty) => { - impl<$($typarm)*> PeekingNext for $type_ { - fn peeking_next(&mut self, accept: F) -> Option - where F: FnOnce(&Self::Item) -> bool - { - let saved_state = self.clone(); - if let Some(r) = self.next() { - if !accept(&r) { - *self = saved_state; - } else { - return Some(r) - } - } - None - } - } - } -} - -peeking_next_by_clone! { ['a, T] ::std::slice::Iter<'a, T> } -peeking_next_by_clone! { ['a] ::std::str::Chars<'a> } -peeking_next_by_clone! { ['a] ::std::str::CharIndices<'a> } -peeking_next_by_clone! { ['a] ::std::str::Bytes<'a> } -peeking_next_by_clone! { ['a, T] ::std::option::Iter<'a, T> } -peeking_next_by_clone! { ['a, T] ::std::result::Iter<'a, T> } -peeking_next_by_clone! { [T] ::std::iter::Empty } -#[cfg(feature = "use_alloc")] -peeking_next_by_clone! { ['a, T] alloc::collections::linked_list::Iter<'a, T> } -#[cfg(feature = "use_alloc")] -peeking_next_by_clone! { ['a, T] alloc::collections::vec_deque::Iter<'a, T> } - -// cloning a Rev has no extra overhead; peekable and put backs are never DEI. -peeking_next_by_clone! { [I: Clone + PeekingNext + DoubleEndedIterator] -::std::iter::Rev } diff --git a/vendor/itertools/src/permutations.rs b/vendor/itertools/src/permutations.rs deleted file mode 100644 index 91389a73a7528f..00000000000000 --- a/vendor/itertools/src/permutations.rs +++ /dev/null @@ -1,186 +0,0 @@ -use alloc::boxed::Box; -use alloc::vec::Vec; -use std::fmt; -use std::iter::once; -use std::iter::FusedIterator; - -use super::lazy_buffer::LazyBuffer; -use crate::size_hint::{self, SizeHint}; - -/// An iterator adaptor that iterates through all the `k`-permutations of the -/// elements from an iterator. -/// -/// See [`.permutations()`](crate::Itertools::permutations) for -/// more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct Permutations { - vals: LazyBuffer, - state: PermutationState, -} - -impl Clone for Permutations -where - I: Clone + Iterator, - I::Item: Clone, -{ - clone_fields!(vals, state); -} - -#[derive(Clone, Debug)] -enum PermutationState { - /// No permutation generated yet. - Start { k: usize }, - /// Values from the iterator are not fully loaded yet so `n` is still unknown. - Buffered { k: usize, min_n: usize }, - /// All values from the iterator are known so `n` is known. - Loaded { - indices: Box<[usize]>, - cycles: Box<[usize]>, - }, - /// No permutation left to generate. - End, -} - -impl fmt::Debug for Permutations -where - I: Iterator + fmt::Debug, - I::Item: fmt::Debug, -{ - debug_fmt_fields!(Permutations, vals, state); -} - -pub fn permutations(iter: I, k: usize) -> Permutations { - Permutations { - vals: LazyBuffer::new(iter), - state: PermutationState::Start { k }, - } -} - -impl Iterator for Permutations -where - I: Iterator, - I::Item: Clone, -{ - type Item = Vec; - - fn next(&mut self) -> Option { - let Self { vals, state } = self; - match state { - PermutationState::Start { k: 0 } => { - *state = PermutationState::End; - Some(Vec::new()) - } - &mut PermutationState::Start { k } => { - vals.prefill(k); - if vals.len() != k { - *state = PermutationState::End; - return None; - } - *state = PermutationState::Buffered { k, min_n: k }; - Some(vals[0..k].to_vec()) - } - PermutationState::Buffered { ref k, min_n } => { - if vals.get_next() { - let item = (0..*k - 1) - .chain(once(*min_n)) - .map(|i| vals[i].clone()) - .collect(); - *min_n += 1; - Some(item) - } else { - let n = *min_n; - let prev_iteration_count = n - *k + 1; - let mut indices: Box<[_]> = (0..n).collect(); - let mut cycles: Box<[_]> = (n - k..n).rev().collect(); - // Advance the state to the correct point. - for _ in 0..prev_iteration_count { - if advance(&mut indices, &mut cycles) { - *state = PermutationState::End; - return None; - } - } - let item = vals.get_at(&indices[0..*k]); - *state = PermutationState::Loaded { indices, cycles }; - Some(item) - } - } - PermutationState::Loaded { indices, cycles } => { - if advance(indices, cycles) { - *state = PermutationState::End; - return None; - } - let k = cycles.len(); - Some(vals.get_at(&indices[0..k])) - } - PermutationState::End => None, - } - } - - fn count(self) -> usize { - let Self { vals, state } = self; - let n = vals.count(); - state.size_hint_for(n).1.unwrap() - } - - fn size_hint(&self) -> SizeHint { - let (mut low, mut upp) = self.vals.size_hint(); - low = self.state.size_hint_for(low).0; - upp = upp.and_then(|n| self.state.size_hint_for(n).1); - (low, upp) - } -} - -impl FusedIterator for Permutations -where - I: Iterator, - I::Item: Clone, -{ -} - -fn advance(indices: &mut [usize], cycles: &mut [usize]) -> bool { - let n = indices.len(); - let k = cycles.len(); - // NOTE: if `cycles` are only zeros, then we reached the last permutation. - for i in (0..k).rev() { - if cycles[i] == 0 { - cycles[i] = n - i - 1; - indices[i..].rotate_left(1); - } else { - let swap_index = n - cycles[i]; - indices.swap(i, swap_index); - cycles[i] -= 1; - return false; - } - } - true -} - -impl PermutationState { - fn size_hint_for(&self, n: usize) -> SizeHint { - // At the beginning, there are `n!/(n-k)!` items to come. - let at_start = |n, k| { - debug_assert!(n >= k); - let total = (n - k + 1..=n).try_fold(1usize, |acc, i| acc.checked_mul(i)); - (total.unwrap_or(usize::MAX), total) - }; - match *self { - Self::Start { k } if n < k => (0, Some(0)), - Self::Start { k } => at_start(n, k), - Self::Buffered { k, min_n } => { - // Same as `Start` minus the previously generated items. - size_hint::sub_scalar(at_start(n, k), min_n - k + 1) - } - Self::Loaded { - ref indices, - ref cycles, - } => { - let count = cycles.iter().enumerate().try_fold(0usize, |acc, (i, &c)| { - acc.checked_mul(indices.len() - i) - .and_then(|count| count.checked_add(c)) - }); - (count.unwrap_or(usize::MAX), count) - } - Self::End => (0, Some(0)), - } - } -} diff --git a/vendor/itertools/src/powerset.rs b/vendor/itertools/src/powerset.rs deleted file mode 100644 index 734eaf6149ac66..00000000000000 --- a/vendor/itertools/src/powerset.rs +++ /dev/null @@ -1,131 +0,0 @@ -use alloc::vec::Vec; -use std::fmt; -use std::iter::FusedIterator; - -use super::combinations::{combinations, Combinations}; -use crate::adaptors::checked_binomial; -use crate::size_hint::{self, SizeHint}; - -/// An iterator to iterate through the powerset of the elements from an iterator. -/// -/// See [`.powerset()`](crate::Itertools::powerset) for more -/// information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct Powerset { - combs: Combinations, -} - -impl Clone for Powerset -where - I: Clone + Iterator, - I::Item: Clone, -{ - clone_fields!(combs); -} - -impl fmt::Debug for Powerset -where - I: Iterator + fmt::Debug, - I::Item: fmt::Debug, -{ - debug_fmt_fields!(Powerset, combs); -} - -/// Create a new `Powerset` from a clonable iterator. -pub fn powerset(src: I) -> Powerset -where - I: Iterator, - I::Item: Clone, -{ - Powerset { - combs: combinations(src, 0), - } -} - -impl Powerset { - /// Returns true if `k` has been incremented, false otherwise. - fn increment_k(&mut self) -> bool { - if self.combs.k() < self.combs.n() || self.combs.k() == 0 { - self.combs.reset(self.combs.k() + 1); - true - } else { - false - } - } -} - -impl Iterator for Powerset -where - I: Iterator, - I::Item: Clone, -{ - type Item = Vec; - - fn next(&mut self) -> Option { - if let Some(elt) = self.combs.next() { - Some(elt) - } else if self.increment_k() { - self.combs.next() - } else { - None - } - } - - fn nth(&mut self, mut n: usize) -> Option { - loop { - match self.combs.try_nth(n) { - Ok(item) => return Some(item), - Err(steps) => { - if !self.increment_k() { - return None; - } - n -= steps; - } - } - } - } - - fn size_hint(&self) -> SizeHint { - let k = self.combs.k(); - // Total bounds for source iterator. - let (n_min, n_max) = self.combs.src().size_hint(); - let low = remaining_for(n_min, k).unwrap_or(usize::MAX); - let upp = n_max.and_then(|n| remaining_for(n, k)); - size_hint::add(self.combs.size_hint(), (low, upp)) - } - - fn count(self) -> usize { - let k = self.combs.k(); - let (n, combs_count) = self.combs.n_and_count(); - combs_count + remaining_for(n, k).unwrap() - } - - fn fold(self, mut init: B, mut f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - let mut it = self.combs; - if it.k() == 0 { - init = it.by_ref().fold(init, &mut f); - it.reset(1); - } - init = it.by_ref().fold(init, &mut f); - // n is now known for sure because k >= 1 and all k-combinations have been generated. - for k in it.k() + 1..=it.n() { - it.reset(k); - init = it.by_ref().fold(init, &mut f); - } - init - } -} - -impl FusedIterator for Powerset -where - I: Iterator, - I::Item: Clone, -{ -} - -fn remaining_for(n: usize, k: usize) -> Option { - (k + 1..=n).try_fold(0usize, |sum, i| sum.checked_add(checked_binomial(n, i)?)) -} diff --git a/vendor/itertools/src/process_results_impl.rs b/vendor/itertools/src/process_results_impl.rs deleted file mode 100644 index ad6c60d3cfb33a..00000000000000 --- a/vendor/itertools/src/process_results_impl.rs +++ /dev/null @@ -1,108 +0,0 @@ -#[cfg(doc)] -use crate::Itertools; - -/// An iterator that produces only the `T` values as long as the -/// inner iterator produces `Ok(T)`. -/// -/// Used by [`process_results`](crate::process_results), see its docs -/// for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[derive(Debug)] -pub struct ProcessResults<'a, I, E: 'a> { - error: &'a mut Result<(), E>, - iter: I, -} - -impl<'a, I, E> ProcessResults<'a, I, E> { - #[inline(always)] - fn next_body(&mut self, item: Option>) -> Option { - match item { - Some(Ok(x)) => Some(x), - Some(Err(e)) => { - *self.error = Err(e); - None - } - None => None, - } - } -} - -impl<'a, I, T, E> Iterator for ProcessResults<'a, I, E> -where - I: Iterator>, -{ - type Item = T; - - fn next(&mut self) -> Option { - let item = self.iter.next(); - self.next_body(item) - } - - fn size_hint(&self) -> (usize, Option) { - (0, self.iter.size_hint().1) - } - - fn fold(mut self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - let error = self.error; - self.iter - .try_fold(init, |acc, opt| match opt { - Ok(x) => Ok(f(acc, x)), - Err(e) => { - *error = Err(e); - Err(acc) - } - }) - .unwrap_or_else(|e| e) - } -} - -impl<'a, I, T, E> DoubleEndedIterator for ProcessResults<'a, I, E> -where - I: Iterator>, - I: DoubleEndedIterator, -{ - fn next_back(&mut self) -> Option { - let item = self.iter.next_back(); - self.next_body(item) - } - - fn rfold(mut self, init: B, mut f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - let error = self.error; - self.iter - .try_rfold(init, |acc, opt| match opt { - Ok(x) => Ok(f(acc, x)), - Err(e) => { - *error = Err(e); - Err(acc) - } - }) - .unwrap_or_else(|e| e) - } -} - -/// “Lift” a function of the values of an iterator so that it can process -/// an iterator of `Result` values instead. -/// -/// [`IntoIterator`] enabled version of [`Itertools::process_results`]. -pub fn process_results(iterable: I, processor: F) -> Result -where - I: IntoIterator>, - F: FnOnce(ProcessResults) -> R, -{ - let iter = iterable.into_iter(); - let mut error = Ok(()); - - let result = processor(ProcessResults { - error: &mut error, - iter, - }); - - error.map(|_| result) -} diff --git a/vendor/itertools/src/put_back_n_impl.rs b/vendor/itertools/src/put_back_n_impl.rs deleted file mode 100644 index a9eb4179c49a05..00000000000000 --- a/vendor/itertools/src/put_back_n_impl.rs +++ /dev/null @@ -1,71 +0,0 @@ -use alloc::vec::Vec; - -use crate::size_hint; - -/// An iterator adaptor that allows putting multiple -/// items in front of the iterator. -/// -/// Iterator element type is `I::Item`. -#[derive(Debug, Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct PutBackN { - top: Vec, - iter: I, -} - -/// Create an iterator where you can put back multiple values to the front -/// of the iteration. -/// -/// Iterator element type is `I::Item`. -pub fn put_back_n(iterable: I) -> PutBackN -where - I: IntoIterator, -{ - PutBackN { - top: Vec::new(), - iter: iterable.into_iter(), - } -} - -impl PutBackN { - /// Puts `x` in front of the iterator. - /// - /// The values are yielded in order of the most recently put back - /// values first. - /// - /// ```rust - /// use itertools::put_back_n; - /// - /// let mut it = put_back_n(1..5); - /// it.next(); - /// it.put_back(1); - /// it.put_back(0); - /// - /// assert!(itertools::equal(it, 0..5)); - /// ``` - #[inline] - pub fn put_back(&mut self, x: I::Item) { - self.top.push(x); - } -} - -impl Iterator for PutBackN { - type Item = I::Item; - #[inline] - fn next(&mut self) -> Option { - self.top.pop().or_else(|| self.iter.next()) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - size_hint::add_scalar(self.iter.size_hint(), self.top.len()) - } - - fn fold(self, mut init: B, mut f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - init = self.top.into_iter().rfold(init, &mut f); - self.iter.fold(init, f) - } -} diff --git a/vendor/itertools/src/rciter_impl.rs b/vendor/itertools/src/rciter_impl.rs deleted file mode 100644 index e3b7532069730d..00000000000000 --- a/vendor/itertools/src/rciter_impl.rs +++ /dev/null @@ -1,102 +0,0 @@ -use alloc::rc::Rc; -use std::cell::RefCell; -use std::iter::{FusedIterator, IntoIterator}; - -/// A wrapper for `Rc>`, that implements the `Iterator` trait. -#[derive(Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct RcIter { - /// The boxed iterator. - pub rciter: Rc>, -} - -/// Return an iterator inside a `Rc>` wrapper. -/// -/// The returned `RcIter` can be cloned, and each clone will refer back to the -/// same original iterator. -/// -/// `RcIter` allows doing interesting things like using `.zip()` on an iterator with -/// itself, at the cost of runtime borrow checking which may have a performance -/// penalty. -/// -/// Iterator element type is `Self::Item`. -/// -/// ``` -/// use itertools::rciter; -/// use itertools::zip; -/// -/// // In this example a range iterator is created and we iterate it using -/// // three separate handles (two of them given to zip). -/// // We also use the IntoIterator implementation for `&RcIter`. -/// -/// let mut iter = rciter(0..9); -/// let mut z = zip(&iter, &iter); -/// -/// assert_eq!(z.next(), Some((0, 1))); -/// assert_eq!(z.next(), Some((2, 3))); -/// assert_eq!(z.next(), Some((4, 5))); -/// assert_eq!(iter.next(), Some(6)); -/// assert_eq!(z.next(), Some((7, 8))); -/// assert_eq!(z.next(), None); -/// ``` -/// -/// **Panics** in iterator methods if a borrow error is encountered in the -/// iterator methods. It can only happen if the `RcIter` is reentered in -/// `.next()`, i.e. if it somehow participates in an “iterator knot” -/// where it is an adaptor of itself. -pub fn rciter(iterable: I) -> RcIter -where - I: IntoIterator, -{ - RcIter { - rciter: Rc::new(RefCell::new(iterable.into_iter())), - } -} - -impl Clone for RcIter { - clone_fields!(rciter); -} - -impl Iterator for RcIter -where - I: Iterator, -{ - type Item = A; - #[inline] - fn next(&mut self) -> Option { - self.rciter.borrow_mut().next() - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - // To work sanely with other API that assume they own an iterator, - // so it can't change in other places, we can't guarantee as much - // in our size_hint. Other clones may drain values under our feet. - (0, self.rciter.borrow().size_hint().1) - } -} - -impl DoubleEndedIterator for RcIter -where - I: DoubleEndedIterator, -{ - #[inline] - fn next_back(&mut self) -> Option { - self.rciter.borrow_mut().next_back() - } -} - -/// Return an iterator from `&RcIter` (by simply cloning it). -impl<'a, I> IntoIterator for &'a RcIter -where - I: Iterator, -{ - type Item = I::Item; - type IntoIter = RcIter; - - fn into_iter(self) -> RcIter { - self.clone() - } -} - -impl FusedIterator for RcIter where I: FusedIterator {} diff --git a/vendor/itertools/src/repeatn.rs b/vendor/itertools/src/repeatn.rs deleted file mode 100644 index d86ad9facd3324..00000000000000 --- a/vendor/itertools/src/repeatn.rs +++ /dev/null @@ -1,83 +0,0 @@ -use std::iter::FusedIterator; - -/// An iterator that produces *n* repetitions of an element. -/// -/// See [`repeat_n()`](crate::repeat_n) for more information. -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[derive(Clone, Debug)] -pub struct RepeatN { - pub(crate) elt: Option, - n: usize, -} - -/// Create an iterator that produces `n` repetitions of `element`. -pub fn repeat_n(element: A, n: usize) -> RepeatN -where - A: Clone, -{ - if n == 0 { - RepeatN { elt: None, n } - } else { - RepeatN { - elt: Some(element), - n, - } - } -} - -impl Iterator for RepeatN -where - A: Clone, -{ - type Item = A; - - fn next(&mut self) -> Option { - if self.n > 1 { - self.n -= 1; - self.elt.as_ref().cloned() - } else { - self.n = 0; - self.elt.take() - } - } - - fn size_hint(&self) -> (usize, Option) { - (self.n, Some(self.n)) - } - - fn fold(self, mut init: B, mut f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - match self { - Self { elt: Some(elt), n } => { - debug_assert!(n > 0); - init = (1..n).map(|_| elt.clone()).fold(init, &mut f); - f(init, elt) - } - _ => init, - } - } -} - -impl DoubleEndedIterator for RepeatN -where - A: Clone, -{ - #[inline] - fn next_back(&mut self) -> Option { - self.next() - } - - #[inline] - fn rfold(self, init: B, f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - self.fold(init, f) - } -} - -impl ExactSizeIterator for RepeatN where A: Clone {} - -impl FusedIterator for RepeatN where A: Clone {} diff --git a/vendor/itertools/src/size_hint.rs b/vendor/itertools/src/size_hint.rs deleted file mode 100644 index 6cfead7f2b42d4..00000000000000 --- a/vendor/itertools/src/size_hint.rs +++ /dev/null @@ -1,94 +0,0 @@ -//! Arithmetic on `Iterator.size_hint()` values. -//! - -use std::cmp; - -/// `SizeHint` is the return type of `Iterator::size_hint()`. -pub type SizeHint = (usize, Option); - -/// Add `SizeHint` correctly. -#[inline] -pub fn add(a: SizeHint, b: SizeHint) -> SizeHint { - let min = a.0.saturating_add(b.0); - let max = match (a.1, b.1) { - (Some(x), Some(y)) => x.checked_add(y), - _ => None, - }; - - (min, max) -} - -/// Add `x` correctly to a `SizeHint`. -#[inline] -pub fn add_scalar(sh: SizeHint, x: usize) -> SizeHint { - let (mut low, mut hi) = sh; - low = low.saturating_add(x); - hi = hi.and_then(|elt| elt.checked_add(x)); - (low, hi) -} - -/// Subtract `x` correctly from a `SizeHint`. -#[inline] -pub fn sub_scalar(sh: SizeHint, x: usize) -> SizeHint { - let (mut low, mut hi) = sh; - low = low.saturating_sub(x); - hi = hi.map(|elt| elt.saturating_sub(x)); - (low, hi) -} - -/// Multiply `SizeHint` correctly -#[inline] -pub fn mul(a: SizeHint, b: SizeHint) -> SizeHint { - let low = a.0.saturating_mul(b.0); - let hi = match (a.1, b.1) { - (Some(x), Some(y)) => x.checked_mul(y), - (Some(0), None) | (None, Some(0)) => Some(0), - _ => None, - }; - (low, hi) -} - -/// Multiply `x` correctly with a `SizeHint`. -#[inline] -pub fn mul_scalar(sh: SizeHint, x: usize) -> SizeHint { - let (mut low, mut hi) = sh; - low = low.saturating_mul(x); - hi = hi.and_then(|elt| elt.checked_mul(x)); - (low, hi) -} - -/// Return the maximum -#[inline] -pub fn max(a: SizeHint, b: SizeHint) -> SizeHint { - let (a_lower, a_upper) = a; - let (b_lower, b_upper) = b; - - let lower = cmp::max(a_lower, b_lower); - - let upper = match (a_upper, b_upper) { - (Some(x), Some(y)) => Some(cmp::max(x, y)), - _ => None, - }; - - (lower, upper) -} - -/// Return the minimum -#[inline] -pub fn min(a: SizeHint, b: SizeHint) -> SizeHint { - let (a_lower, a_upper) = a; - let (b_lower, b_upper) = b; - let lower = cmp::min(a_lower, b_lower); - let upper = match (a_upper, b_upper) { - (Some(u1), Some(u2)) => Some(cmp::min(u1, u2)), - _ => a_upper.or(b_upper), - }; - (lower, upper) -} - -#[test] -fn mul_size_hints() { - assert_eq!(mul((3, Some(4)), (3, Some(4))), (9, Some(16))); - assert_eq!(mul((3, Some(4)), (usize::MAX, None)), (usize::MAX, None)); - assert_eq!(mul((3, None), (0, Some(0))), (0, Some(0))); -} diff --git a/vendor/itertools/src/sources.rs b/vendor/itertools/src/sources.rs deleted file mode 100644 index c405ffdc7196e9..00000000000000 --- a/vendor/itertools/src/sources.rs +++ /dev/null @@ -1,153 +0,0 @@ -//! Iterators that are sources (produce elements from parameters, -//! not from another iterator). -#![allow(deprecated)] - -use std::fmt; -use std::mem; - -/// Creates a new unfold source with the specified closure as the "iterator -/// function" and an initial state to eventually pass to the closure -/// -/// `unfold` is a general iterator builder: it has a mutable state value, -/// and a closure with access to the state that produces the next value. -/// -/// This more or less equivalent to a regular struct with an [`Iterator`] -/// implementation, and is useful for one-off iterators. -/// -/// ``` -/// // an iterator that yields sequential Fibonacci numbers, -/// // and stops at the maximum representable value. -/// -/// use itertools::unfold; -/// -/// let mut fibonacci = unfold((1u32, 1u32), |(x1, x2)| { -/// // Attempt to get the next Fibonacci number -/// let next = x1.saturating_add(*x2); -/// -/// // Shift left: ret <- x1 <- x2 <- next -/// let ret = *x1; -/// *x1 = *x2; -/// *x2 = next; -/// -/// // If addition has saturated at the maximum, we are finished -/// if ret == *x1 && ret > 1 { -/// None -/// } else { -/// Some(ret) -/// } -/// }); -/// -/// itertools::assert_equal(fibonacci.by_ref().take(8), -/// vec![1, 1, 2, 3, 5, 8, 13, 21]); -/// assert_eq!(fibonacci.last(), Some(2_971_215_073)) -/// ``` -#[deprecated( - note = "Use [std::iter::from_fn](https://doc.rust-lang.org/std/iter/fn.from_fn.html) instead", - since = "0.13.0" -)] -pub fn unfold(initial_state: St, f: F) -> Unfold -where - F: FnMut(&mut St) -> Option, -{ - Unfold { - f, - state: initial_state, - } -} - -impl fmt::Debug for Unfold -where - St: fmt::Debug, -{ - debug_fmt_fields!(Unfold, state); -} - -/// See [`unfold`](crate::unfold) for more information. -#[derive(Clone)] -#[must_use = "iterators are lazy and do nothing unless consumed"] -#[deprecated( - note = "Use [std::iter::FromFn](https://doc.rust-lang.org/std/iter/struct.FromFn.html) instead", - since = "0.13.0" -)] -pub struct Unfold { - f: F, - /// Internal state that will be passed to the closure on the next iteration - pub state: St, -} - -impl Iterator for Unfold -where - F: FnMut(&mut St) -> Option, -{ - type Item = A; - - #[inline] - fn next(&mut self) -> Option { - (self.f)(&mut self.state) - } -} - -/// An iterator that infinitely applies function to value and yields results. -/// -/// This `struct` is created by the [`iterate()`](crate::iterate) function. -/// See its documentation for more. -#[derive(Clone)] -#[must_use = "iterators are lazy and do nothing unless consumed"] -pub struct Iterate { - state: St, - f: F, -} - -impl fmt::Debug for Iterate -where - St: fmt::Debug, -{ - debug_fmt_fields!(Iterate, state); -} - -impl Iterator for Iterate -where - F: FnMut(&St) -> St, -{ - type Item = St; - - #[inline] - fn next(&mut self) -> Option { - let next_state = (self.f)(&self.state); - Some(mem::replace(&mut self.state, next_state)) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - (usize::MAX, None) - } -} - -/// Creates a new iterator that infinitely applies function to value and yields results. -/// -/// ``` -/// use itertools::iterate; -/// -/// itertools::assert_equal(iterate(1, |i| i % 3 + 1).take(5), vec![1, 2, 3, 1, 2]); -/// ``` -/// -/// **Panics** if compute the next value does. -/// -/// ```should_panic -/// # use itertools::iterate; -/// let mut it = iterate(25u32, |x| x - 10).take_while(|&x| x > 10); -/// assert_eq!(it.next(), Some(25)); // `Iterate` holds 15. -/// assert_eq!(it.next(), Some(15)); // `Iterate` holds 5. -/// it.next(); // `5 - 10` overflows. -/// ``` -/// -/// You can alternatively use [`core::iter::successors`] as it better describes a finite iterator. -pub fn iterate(initial_value: St, f: F) -> Iterate -where - F: FnMut(&St) -> St, -{ - Iterate { - state: initial_value, - f, - } -} diff --git a/vendor/itertools/src/take_while_inclusive.rs b/vendor/itertools/src/take_while_inclusive.rs deleted file mode 100644 index 420da9847af977..00000000000000 --- a/vendor/itertools/src/take_while_inclusive.rs +++ /dev/null @@ -1,96 +0,0 @@ -use core::iter::FusedIterator; -use std::fmt; - -/// An iterator adaptor that consumes elements while the given predicate is -/// `true`, including the element for which the predicate first returned -/// `false`. -/// -/// See [`.take_while_inclusive()`](crate::Itertools::take_while_inclusive) -/// for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[derive(Clone)] -pub struct TakeWhileInclusive { - iter: I, - predicate: F, - done: bool, -} - -impl TakeWhileInclusive -where - I: Iterator, - F: FnMut(&I::Item) -> bool, -{ - /// Create a new [`TakeWhileInclusive`] from an iterator and a predicate. - pub(crate) fn new(iter: I, predicate: F) -> Self { - Self { - iter, - predicate, - done: false, - } - } -} - -impl fmt::Debug for TakeWhileInclusive -where - I: Iterator + fmt::Debug, -{ - debug_fmt_fields!(TakeWhileInclusive, iter, done); -} - -impl Iterator for TakeWhileInclusive -where - I: Iterator, - F: FnMut(&I::Item) -> bool, -{ - type Item = I::Item; - - fn next(&mut self) -> Option { - if self.done { - None - } else { - self.iter.next().map(|item| { - if !(self.predicate)(&item) { - self.done = true; - } - item - }) - } - } - - fn size_hint(&self) -> (usize, Option) { - if self.done { - (0, Some(0)) - } else { - (0, self.iter.size_hint().1) - } - } - - fn fold(mut self, init: B, mut f: Fold) -> B - where - Fold: FnMut(B, Self::Item) -> B, - { - if self.done { - init - } else { - let predicate = &mut self.predicate; - self.iter - .try_fold(init, |mut acc, item| { - let is_ok = predicate(&item); - acc = f(acc, item); - if is_ok { - Ok(acc) - } else { - Err(acc) - } - }) - .unwrap_or_else(|err| err) - } - } -} - -impl FusedIterator for TakeWhileInclusive -where - I: Iterator, - F: FnMut(&I::Item) -> bool, -{ -} diff --git a/vendor/itertools/src/tee.rs b/vendor/itertools/src/tee.rs deleted file mode 100644 index 0984c5de963971..00000000000000 --- a/vendor/itertools/src/tee.rs +++ /dev/null @@ -1,93 +0,0 @@ -use super::size_hint; - -use alloc::collections::VecDeque; -use alloc::rc::Rc; -use std::cell::RefCell; - -/// Common buffer object for the two tee halves -#[derive(Debug)] -struct TeeBuffer { - backlog: VecDeque, - iter: I, - /// The owner field indicates which id should read from the backlog - owner: bool, -} - -/// One half of an iterator pair where both return the same elements. -/// -/// See [`.tee()`](crate::Itertools::tee) for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[derive(Debug)] -pub struct Tee -where - I: Iterator, -{ - rcbuffer: Rc>>, - id: bool, -} - -pub fn new(iter: I) -> (Tee, Tee) -where - I: Iterator, -{ - let buffer = TeeBuffer { - backlog: VecDeque::new(), - iter, - owner: false, - }; - let t1 = Tee { - rcbuffer: Rc::new(RefCell::new(buffer)), - id: true, - }; - let t2 = Tee { - rcbuffer: t1.rcbuffer.clone(), - id: false, - }; - (t1, t2) -} - -impl Iterator for Tee -where - I: Iterator, - I::Item: Clone, -{ - type Item = I::Item; - fn next(&mut self) -> Option { - // .borrow_mut may fail here -- but only if the user has tied some kind of weird - // knot where the iterator refers back to itself. - let mut buffer = self.rcbuffer.borrow_mut(); - if buffer.owner == self.id { - match buffer.backlog.pop_front() { - None => {} - some_elt => return some_elt, - } - } - match buffer.iter.next() { - None => None, - Some(elt) => { - buffer.backlog.push_back(elt.clone()); - buffer.owner = !self.id; - Some(elt) - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let buffer = self.rcbuffer.borrow(); - let sh = buffer.iter.size_hint(); - - if buffer.owner == self.id { - let log_len = buffer.backlog.len(); - size_hint::add_scalar(sh, log_len) - } else { - sh - } - } -} - -impl ExactSizeIterator for Tee -where - I: ExactSizeIterator, - I::Item: Clone, -{ -} diff --git a/vendor/itertools/src/tuple_impl.rs b/vendor/itertools/src/tuple_impl.rs deleted file mode 100644 index c0d556fc95b1f6..00000000000000 --- a/vendor/itertools/src/tuple_impl.rs +++ /dev/null @@ -1,401 +0,0 @@ -//! Some iterator that produces tuples - -use std::iter::Cycle; -use std::iter::Fuse; -use std::iter::FusedIterator; - -use crate::size_hint; - -// `HomogeneousTuple` is a public facade for `TupleCollect`, allowing -// tuple-related methods to be used by clients in generic contexts, while -// hiding the implementation details of `TupleCollect`. -// See https://github.com/rust-itertools/itertools/issues/387 - -/// Implemented for homogeneous tuples of size up to 12. -pub trait HomogeneousTuple: TupleCollect {} - -impl HomogeneousTuple for T {} - -/// An iterator over a incomplete tuple. -/// -/// See [`.tuples()`](crate::Itertools::tuples) and -/// [`Tuples::into_buffer()`]. -#[derive(Clone, Debug)] -pub struct TupleBuffer -where - T: HomogeneousTuple, -{ - cur: usize, - buf: T::Buffer, -} - -impl TupleBuffer -where - T: HomogeneousTuple, -{ - fn new(buf: T::Buffer) -> Self { - Self { cur: 0, buf } - } -} - -impl Iterator for TupleBuffer -where - T: HomogeneousTuple, -{ - type Item = T::Item; - - fn next(&mut self) -> Option { - let s = self.buf.as_mut(); - if let Some(ref mut item) = s.get_mut(self.cur) { - self.cur += 1; - item.take() - } else { - None - } - } - - fn size_hint(&self) -> (usize, Option) { - let buffer = &self.buf.as_ref()[self.cur..]; - let len = if buffer.is_empty() { - 0 - } else { - buffer - .iter() - .position(|x| x.is_none()) - .unwrap_or(buffer.len()) - }; - (len, Some(len)) - } -} - -impl ExactSizeIterator for TupleBuffer where T: HomogeneousTuple {} - -/// An iterator that groups the items in tuples of a specific size. -/// -/// See [`.tuples()`](crate::Itertools::tuples) for more information. -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct Tuples -where - I: Iterator, - T: HomogeneousTuple, -{ - iter: Fuse, - buf: T::Buffer, -} - -/// Create a new tuples iterator. -pub fn tuples(iter: I) -> Tuples -where - I: Iterator, - T: HomogeneousTuple, -{ - Tuples { - iter: iter.fuse(), - buf: Default::default(), - } -} - -impl Iterator for Tuples -where - I: Iterator, - T: HomogeneousTuple, -{ - type Item = T; - - fn next(&mut self) -> Option { - T::collect_from_iter(&mut self.iter, &mut self.buf) - } - - fn size_hint(&self) -> (usize, Option) { - // The number of elts we've drawn from the underlying iterator, but have - // not yet produced as a tuple. - let buffered = T::buffer_len(&self.buf); - // To that, we must add the size estimates of the underlying iterator. - let (unbuffered_lo, unbuffered_hi) = self.iter.size_hint(); - // The total low estimate is the sum of the already-buffered elements, - // plus the low estimate of remaining unbuffered elements, divided by - // the tuple size. - let total_lo = add_then_div(unbuffered_lo, buffered, T::num_items()).unwrap_or(usize::MAX); - // And likewise for the total high estimate, but using the high estimate - // of the remaining unbuffered elements. - let total_hi = unbuffered_hi.and_then(|hi| add_then_div(hi, buffered, T::num_items())); - (total_lo, total_hi) - } -} - -/// `(n + a) / d` avoiding overflow when possible, returns `None` if it overflows. -fn add_then_div(n: usize, a: usize, d: usize) -> Option { - debug_assert_ne!(d, 0); - (n / d).checked_add(a / d)?.checked_add((n % d + a % d) / d) -} - -impl ExactSizeIterator for Tuples -where - I: ExactSizeIterator, - T: HomogeneousTuple, -{ -} - -impl Tuples -where - I: Iterator, - T: HomogeneousTuple, -{ - /// Return a buffer with the produced items that was not enough to be grouped in a tuple. - /// - /// ``` - /// use itertools::Itertools; - /// - /// let mut iter = (0..5).tuples(); - /// assert_eq!(Some((0, 1, 2)), iter.next()); - /// assert_eq!(None, iter.next()); - /// itertools::assert_equal(vec![3, 4], iter.into_buffer()); - /// ``` - pub fn into_buffer(self) -> TupleBuffer { - TupleBuffer::new(self.buf) - } -} - -/// An iterator over all contiguous windows that produces tuples of a specific size. -/// -/// See [`.tuple_windows()`](crate::Itertools::tuple_windows) for more -/// information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[derive(Clone, Debug)] -pub struct TupleWindows -where - I: Iterator, - T: HomogeneousTuple, -{ - iter: I, - last: Option, -} - -/// Create a new tuple windows iterator. -pub fn tuple_windows(iter: I) -> TupleWindows -where - I: Iterator, - T: HomogeneousTuple, - T::Item: Clone, -{ - TupleWindows { last: None, iter } -} - -impl Iterator for TupleWindows -where - I: Iterator, - T: HomogeneousTuple + Clone, - T::Item: Clone, -{ - type Item = T; - - fn next(&mut self) -> Option { - if T::num_items() == 1 { - return T::collect_from_iter_no_buf(&mut self.iter); - } - if let Some(new) = self.iter.next() { - if let Some(ref mut last) = self.last { - last.left_shift_push(new); - Some(last.clone()) - } else { - use std::iter::once; - let iter = once(new).chain(&mut self.iter); - self.last = T::collect_from_iter_no_buf(iter); - self.last.clone() - } - } else { - None - } - } - - fn size_hint(&self) -> (usize, Option) { - let mut sh = self.iter.size_hint(); - // Adjust the size hint at the beginning - // OR when `num_items == 1` (but it does not change the size hint). - if self.last.is_none() { - sh = size_hint::sub_scalar(sh, T::num_items() - 1); - } - sh - } -} - -impl ExactSizeIterator for TupleWindows -where - I: ExactSizeIterator, - T: HomogeneousTuple + Clone, - T::Item: Clone, -{ -} - -impl FusedIterator for TupleWindows -where - I: FusedIterator, - T: HomogeneousTuple + Clone, - T::Item: Clone, -{ -} - -/// An iterator over all windows, wrapping back to the first elements when the -/// window would otherwise exceed the length of the iterator, producing tuples -/// of a specific size. -/// -/// See [`.circular_tuple_windows()`](crate::Itertools::circular_tuple_windows) for more -/// information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[derive(Debug, Clone)] -pub struct CircularTupleWindows -where - I: Iterator + Clone, - T: TupleCollect + Clone, -{ - iter: TupleWindows, T>, - len: usize, -} - -pub fn circular_tuple_windows(iter: I) -> CircularTupleWindows -where - I: Iterator + Clone + ExactSizeIterator, - T: TupleCollect + Clone, - T::Item: Clone, -{ - let len = iter.len(); - let iter = tuple_windows(iter.cycle()); - - CircularTupleWindows { iter, len } -} - -impl Iterator for CircularTupleWindows -where - I: Iterator + Clone, - T: TupleCollect + Clone, - T::Item: Clone, -{ - type Item = T; - - fn next(&mut self) -> Option { - if self.len != 0 { - self.len -= 1; - self.iter.next() - } else { - None - } - } - - fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) - } -} - -impl ExactSizeIterator for CircularTupleWindows -where - I: Iterator + Clone, - T: TupleCollect + Clone, - T::Item: Clone, -{ -} - -impl FusedIterator for CircularTupleWindows -where - I: Iterator + Clone, - T: TupleCollect + Clone, - T::Item: Clone, -{ -} - -pub trait TupleCollect: Sized { - type Item; - type Buffer: Default + AsRef<[Option]> + AsMut<[Option]>; - - fn buffer_len(buf: &Self::Buffer) -> usize { - let s = buf.as_ref(); - s.iter().position(Option::is_none).unwrap_or(s.len()) - } - - fn collect_from_iter(iter: I, buf: &mut Self::Buffer) -> Option - where - I: IntoIterator; - - fn collect_from_iter_no_buf(iter: I) -> Option - where - I: IntoIterator; - - fn num_items() -> usize; - - fn left_shift_push(&mut self, item: Self::Item); -} - -macro_rules! rev_for_each_ident{ - ($m:ident, ) => {}; - ($m:ident, $i0:ident, $($i:ident,)*) => { - rev_for_each_ident!($m, $($i,)*); - $m!($i0); - }; -} - -macro_rules! impl_tuple_collect { - ($dummy:ident,) => {}; // stop - ($dummy:ident, $($Y:ident,)*) => ( - impl_tuple_collect!($($Y,)*); - impl TupleCollect for ($(ignore_ident!($Y, A),)*) { - type Item = A; - type Buffer = [Option; count_ident!($($Y)*) - 1]; - - #[allow(unused_assignments, unused_mut)] - fn collect_from_iter(iter: I, buf: &mut Self::Buffer) -> Option - where I: IntoIterator - { - let mut iter = iter.into_iter(); - $( - let mut $Y = None; - )* - - loop { - $( - $Y = iter.next(); - if $Y.is_none() { - break - } - )* - return Some(($($Y.unwrap()),*,)) - } - - let mut i = 0; - let mut s = buf.as_mut(); - $( - if i < s.len() { - s[i] = $Y; - i += 1; - } - )* - return None; - } - - fn collect_from_iter_no_buf(iter: I) -> Option - where I: IntoIterator - { - let mut iter = iter.into_iter(); - - Some(($( - { let $Y = iter.next()?; $Y }, - )*)) - } - - fn num_items() -> usize { - count_ident!($($Y)*) - } - - fn left_shift_push(&mut self, mut item: A) { - use std::mem::replace; - - let &mut ($(ref mut $Y),*,) = self; - macro_rules! replace_item{($i:ident) => { - item = replace($i, item); - }} - rev_for_each_ident!(replace_item, $($Y,)*); - drop(item); - } - } - ) -} -impl_tuple_collect!(dummy, a, b, c, d, e, f, g, h, i, j, k, l,); diff --git a/vendor/itertools/src/unique_impl.rs b/vendor/itertools/src/unique_impl.rs deleted file mode 100644 index 0f6397e48fb9de..00000000000000 --- a/vendor/itertools/src/unique_impl.rs +++ /dev/null @@ -1,188 +0,0 @@ -use std::collections::hash_map::Entry; -use std::collections::HashMap; -use std::fmt; -use std::hash::Hash; -use std::iter::FusedIterator; - -/// An iterator adapter to filter out duplicate elements. -/// -/// See [`.unique_by()`](crate::Itertools::unique) for more information. -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct UniqueBy { - iter: I, - // Use a Hashmap for the Entry API in order to prevent hashing twice. - // This can maybe be replaced with a HashSet once `get_or_insert_with` - // or a proper Entry API for Hashset is stable and meets this msrv - used: HashMap, - f: F, -} - -impl fmt::Debug for UniqueBy -where - I: Iterator + fmt::Debug, - V: fmt::Debug + Hash + Eq, -{ - debug_fmt_fields!(UniqueBy, iter, used); -} - -/// Create a new `UniqueBy` iterator. -pub fn unique_by(iter: I, f: F) -> UniqueBy -where - V: Eq + Hash, - F: FnMut(&I::Item) -> V, - I: Iterator, -{ - UniqueBy { - iter, - used: HashMap::new(), - f, - } -} - -// count the number of new unique keys in iterable (`used` is the set already seen) -fn count_new_keys(mut used: HashMap, iterable: I) -> usize -where - I: IntoIterator, - K: Hash + Eq, -{ - let iter = iterable.into_iter(); - let current_used = used.len(); - used.extend(iter.map(|key| (key, ()))); - used.len() - current_used -} - -impl Iterator for UniqueBy -where - I: Iterator, - V: Eq + Hash, - F: FnMut(&I::Item) -> V, -{ - type Item = I::Item; - - fn next(&mut self) -> Option { - let Self { iter, used, f } = self; - iter.find(|v| used.insert(f(v), ()).is_none()) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (low, hi) = self.iter.size_hint(); - ((low > 0 && self.used.is_empty()) as usize, hi) - } - - fn count(self) -> usize { - let mut key_f = self.f; - count_new_keys(self.used, self.iter.map(move |elt| key_f(&elt))) - } -} - -impl DoubleEndedIterator for UniqueBy -where - I: DoubleEndedIterator, - V: Eq + Hash, - F: FnMut(&I::Item) -> V, -{ - fn next_back(&mut self) -> Option { - let Self { iter, used, f } = self; - iter.rfind(|v| used.insert(f(v), ()).is_none()) - } -} - -impl FusedIterator for UniqueBy -where - I: FusedIterator, - V: Eq + Hash, - F: FnMut(&I::Item) -> V, -{ -} - -impl Iterator for Unique -where - I: Iterator, - I::Item: Eq + Hash + Clone, -{ - type Item = I::Item; - - fn next(&mut self) -> Option { - let UniqueBy { iter, used, .. } = &mut self.iter; - iter.find_map(|v| { - if let Entry::Vacant(entry) = used.entry(v) { - let elt = entry.key().clone(); - entry.insert(()); - return Some(elt); - } - None - }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (low, hi) = self.iter.iter.size_hint(); - ((low > 0 && self.iter.used.is_empty()) as usize, hi) - } - - fn count(self) -> usize { - count_new_keys(self.iter.used, self.iter.iter) - } -} - -impl DoubleEndedIterator for Unique -where - I: DoubleEndedIterator, - I::Item: Eq + Hash + Clone, -{ - fn next_back(&mut self) -> Option { - let UniqueBy { iter, used, .. } = &mut self.iter; - iter.rev().find_map(|v| { - if let Entry::Vacant(entry) = used.entry(v) { - let elt = entry.key().clone(); - entry.insert(()); - return Some(elt); - } - None - }) - } -} - -impl FusedIterator for Unique -where - I: FusedIterator, - I::Item: Eq + Hash + Clone, -{ -} - -/// An iterator adapter to filter out duplicate elements. -/// -/// See [`.unique()`](crate::Itertools::unique) for more information. -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct Unique -where - I: Iterator, - I::Item: Eq + Hash + Clone, -{ - iter: UniqueBy, -} - -impl fmt::Debug for Unique -where - I: Iterator + fmt::Debug, - I::Item: Hash + Eq + fmt::Debug + Clone, -{ - debug_fmt_fields!(Unique, iter); -} - -pub fn unique(iter: I) -> Unique -where - I: Iterator, - I::Item: Eq + Hash + Clone, -{ - Unique { - iter: UniqueBy { - iter, - used: HashMap::new(), - f: (), - }, - } -} diff --git a/vendor/itertools/src/unziptuple.rs b/vendor/itertools/src/unziptuple.rs deleted file mode 100644 index 2c79c2d842cbc1..00000000000000 --- a/vendor/itertools/src/unziptuple.rs +++ /dev/null @@ -1,80 +0,0 @@ -/// Converts an iterator of tuples into a tuple of containers. -/// -/// `multiunzip()` consumes an entire iterator of n-ary tuples, producing `n` collections, one for each -/// column. -/// -/// This function is, in some sense, the opposite of [`multizip`]. -/// -/// ``` -/// use itertools::multiunzip; -/// -/// let inputs = vec![(1, 2, 3), (4, 5, 6), (7, 8, 9)]; -/// -/// let (a, b, c): (Vec<_>, Vec<_>, Vec<_>) = multiunzip(inputs); -/// -/// assert_eq!(a, vec![1, 4, 7]); -/// assert_eq!(b, vec![2, 5, 8]); -/// assert_eq!(c, vec![3, 6, 9]); -/// ``` -/// -/// [`multizip`]: crate::multizip -pub fn multiunzip(i: I) -> FromI -where - I: IntoIterator, - I::IntoIter: MultiUnzip, -{ - i.into_iter().multiunzip() -} - -/// An iterator that can be unzipped into multiple collections. -/// -/// See [`.multiunzip()`](crate::Itertools::multiunzip) for more information. -pub trait MultiUnzip: Iterator { - /// Unzip this iterator into multiple collections. - fn multiunzip(self) -> FromI; -} - -macro_rules! impl_unzip_iter { - ($($T:ident => $FromT:ident),*) => ( - #[allow(non_snake_case)] - impl, $($T, $FromT: Default + Extend<$T>),* > MultiUnzip<($($FromT,)*)> for IT { - fn multiunzip(self) -> ($($FromT,)*) { - // This implementation mirrors the logic of Iterator::unzip resp. Extend for (A, B) as close as possible. - // Unfortunately a lot of the used api there is still unstable (https://github.com/rust-lang/rust/issues/72631). - // - // Iterator::unzip: https://doc.rust-lang.org/src/core/iter/traits/iterator.rs.html#2825-2865 - // Extend for (A, B): https://doc.rust-lang.org/src/core/iter/traits/collect.rs.html#370-411 - - let mut res = ($($FromT::default(),)*); - let ($($FromT,)*) = &mut res; - - // Still unstable #72631 - // let (lower_bound, _) = self.size_hint(); - // if lower_bound > 0 { - // $($FromT.extend_reserve(lower_bound);)* - // } - - self.fold((), |(), ($($T,)*)| { - // Still unstable #72631 - // $( $FromT.extend_one($T); )* - $( $FromT.extend(std::iter::once($T)); )* - }); - res - } - } - ); -} - -impl_unzip_iter!(); -impl_unzip_iter!(A => FromA); -impl_unzip_iter!(A => FromA, B => FromB); -impl_unzip_iter!(A => FromA, B => FromB, C => FromC); -impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD); -impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE); -impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE, F => FromF); -impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE, F => FromF, G => FromG); -impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE, F => FromF, G => FromG, H => FromH); -impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE, F => FromF, G => FromG, H => FromH, I => FromI); -impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE, F => FromF, G => FromG, H => FromH, I => FromI, J => FromJ); -impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE, F => FromF, G => FromG, H => FromH, I => FromI, J => FromJ, K => FromK); -impl_unzip_iter!(A => FromA, B => FromB, C => FromC, D => FromD, E => FromE, F => FromF, G => FromG, H => FromH, I => FromI, J => FromJ, K => FromK, L => FromL); diff --git a/vendor/itertools/src/with_position.rs b/vendor/itertools/src/with_position.rs deleted file mode 100644 index 2d56bb9b224710..00000000000000 --- a/vendor/itertools/src/with_position.rs +++ /dev/null @@ -1,124 +0,0 @@ -use std::fmt; -use std::iter::{Fuse, FusedIterator, Peekable}; - -/// An iterator adaptor that wraps each element in an [`Position`]. -/// -/// Iterator element type is `(Position, I::Item)`. -/// -/// See [`.with_position()`](crate::Itertools::with_position) for more information. -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct WithPosition -where - I: Iterator, -{ - handled_first: bool, - peekable: Peekable>, -} - -impl fmt::Debug for WithPosition -where - I: Iterator, - Peekable>: fmt::Debug, -{ - debug_fmt_fields!(WithPosition, handled_first, peekable); -} - -impl Clone for WithPosition -where - I: Clone + Iterator, - I::Item: Clone, -{ - clone_fields!(handled_first, peekable); -} - -/// Create a new `WithPosition` iterator. -pub fn with_position(iter: I) -> WithPosition -where - I: Iterator, -{ - WithPosition { - handled_first: false, - peekable: iter.fuse().peekable(), - } -} - -/// The first component of the value yielded by `WithPosition`. -/// Indicates the position of this element in the iterator results. -/// -/// See [`.with_position()`](crate::Itertools::with_position) for more information. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum Position { - /// This is the first element. - First, - /// This is neither the first nor the last element. - Middle, - /// This is the last element. - Last, - /// This is the only element. - Only, -} - -impl Iterator for WithPosition { - type Item = (Position, I::Item); - - fn next(&mut self) -> Option { - match self.peekable.next() { - Some(item) => { - if !self.handled_first { - // Haven't seen the first item yet, and there is one to give. - self.handled_first = true; - // Peek to see if this is also the last item, - // in which case tag it as `Only`. - match self.peekable.peek() { - Some(_) => Some((Position::First, item)), - None => Some((Position::Only, item)), - } - } else { - // Have seen the first item, and there's something left. - // Peek to see if this is the last item. - match self.peekable.peek() { - Some(_) => Some((Position::Middle, item)), - None => Some((Position::Last, item)), - } - } - } - // Iterator is finished. - None => None, - } - } - - fn size_hint(&self) -> (usize, Option) { - self.peekable.size_hint() - } - - fn fold(mut self, mut init: B, mut f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - if let Some(mut head) = self.peekable.next() { - if !self.handled_first { - // The current head is `First` or `Only`, - // it depends if there is another item or not. - match self.peekable.next() { - Some(second) => { - let first = std::mem::replace(&mut head, second); - init = f(init, (Position::First, first)); - } - None => return f(init, (Position::Only, head)), - } - } - // Have seen the first item, and there's something left. - init = self.peekable.fold(init, |acc, mut item| { - std::mem::swap(&mut head, &mut item); - f(acc, (Position::Middle, item)) - }); - // The "head" is now the last item. - init = f(init, (Position::Last, head)); - } - init - } -} - -impl ExactSizeIterator for WithPosition where I: ExactSizeIterator {} - -impl FusedIterator for WithPosition {} diff --git a/vendor/itertools/src/zip_eq_impl.rs b/vendor/itertools/src/zip_eq_impl.rs deleted file mode 100644 index 6d3b68296656ee..00000000000000 --- a/vendor/itertools/src/zip_eq_impl.rs +++ /dev/null @@ -1,64 +0,0 @@ -use super::size_hint; - -/// An iterator which iterates two other iterators simultaneously -/// and panic if they have different lengths. -/// -/// See [`.zip_eq()`](crate::Itertools::zip_eq) for more information. -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct ZipEq { - a: I, - b: J, -} - -/// Zips two iterators but **panics** if they are not of the same length. -/// -/// [`IntoIterator`] enabled version of [`Itertools::zip_eq`](crate::Itertools::zip_eq). -/// -/// ``` -/// use itertools::zip_eq; -/// -/// let data = [1, 2, 3, 4, 5]; -/// for (a, b) in zip_eq(&data[..data.len() - 1], &data[1..]) { -/// /* loop body */ -/// } -/// ``` -pub fn zip_eq(i: I, j: J) -> ZipEq -where - I: IntoIterator, - J: IntoIterator, -{ - ZipEq { - a: i.into_iter(), - b: j.into_iter(), - } -} - -impl Iterator for ZipEq -where - I: Iterator, - J: Iterator, -{ - type Item = (I::Item, J::Item); - - fn next(&mut self) -> Option { - match (self.a.next(), self.b.next()) { - (None, None) => None, - (Some(a), Some(b)) => Some((a, b)), - (None, Some(_)) | (Some(_), None) => { - panic!("itertools: .zip_eq() reached end of one iterator before the other") - } - } - } - - fn size_hint(&self) -> (usize, Option) { - size_hint::min(self.a.size_hint(), self.b.size_hint()) - } -} - -impl ExactSizeIterator for ZipEq -where - I: ExactSizeIterator, - J: ExactSizeIterator, -{ -} diff --git a/vendor/itertools/src/zip_longest.rs b/vendor/itertools/src/zip_longest.rs deleted file mode 100644 index d4eb9a882e3a31..00000000000000 --- a/vendor/itertools/src/zip_longest.rs +++ /dev/null @@ -1,139 +0,0 @@ -use super::size_hint; -use std::cmp::Ordering::{Equal, Greater, Less}; -use std::iter::{Fuse, FusedIterator}; - -use crate::either_or_both::EitherOrBoth; - -// ZipLongest originally written by SimonSapin, -// and dedicated to itertools https://github.com/rust-lang/rust/pull/19283 - -/// An iterator which iterates two other iterators simultaneously -/// and wraps the elements in [`EitherOrBoth`]. -/// -/// This iterator is *fused*. -/// -/// See [`.zip_longest()`](crate::Itertools::zip_longest) for more information. -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct ZipLongest { - a: Fuse, - b: Fuse, -} - -/// Create a new `ZipLongest` iterator. -pub fn zip_longest(a: T, b: U) -> ZipLongest -where - T: Iterator, - U: Iterator, -{ - ZipLongest { - a: a.fuse(), - b: b.fuse(), - } -} - -impl Iterator for ZipLongest -where - T: Iterator, - U: Iterator, -{ - type Item = EitherOrBoth; - - #[inline] - fn next(&mut self) -> Option { - match (self.a.next(), self.b.next()) { - (None, None) => None, - (Some(a), None) => Some(EitherOrBoth::Left(a)), - (None, Some(b)) => Some(EitherOrBoth::Right(b)), - (Some(a), Some(b)) => Some(EitherOrBoth::Both(a, b)), - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - size_hint::max(self.a.size_hint(), self.b.size_hint()) - } - - #[inline] - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - let Self { mut a, mut b } = self; - let res = a.try_fold(init, |init, a| match b.next() { - Some(b) => Ok(f(init, EitherOrBoth::Both(a, b))), - None => Err(f(init, EitherOrBoth::Left(a))), - }); - match res { - Ok(acc) => b.map(EitherOrBoth::Right).fold(acc, f), - Err(acc) => a.map(EitherOrBoth::Left).fold(acc, f), - } - } -} - -impl DoubleEndedIterator for ZipLongest -where - T: DoubleEndedIterator + ExactSizeIterator, - U: DoubleEndedIterator + ExactSizeIterator, -{ - #[inline] - fn next_back(&mut self) -> Option { - match self.a.len().cmp(&self.b.len()) { - Equal => match (self.a.next_back(), self.b.next_back()) { - (None, None) => None, - (Some(a), Some(b)) => Some(EitherOrBoth::Both(a, b)), - // These can only happen if .len() is inconsistent with .next_back() - (Some(a), None) => Some(EitherOrBoth::Left(a)), - (None, Some(b)) => Some(EitherOrBoth::Right(b)), - }, - Greater => self.a.next_back().map(EitherOrBoth::Left), - Less => self.b.next_back().map(EitherOrBoth::Right), - } - } - - fn rfold(self, mut init: B, mut f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - let Self { mut a, mut b } = self; - let a_len = a.len(); - let b_len = b.len(); - match a_len.cmp(&b_len) { - Equal => {} - Greater => { - init = a - .by_ref() - .rev() - .take(a_len - b_len) - .map(EitherOrBoth::Left) - .fold(init, &mut f) - } - Less => { - init = b - .by_ref() - .rev() - .take(b_len - a_len) - .map(EitherOrBoth::Right) - .fold(init, &mut f) - } - } - a.rfold(init, |acc, item_a| { - f(acc, EitherOrBoth::Both(item_a, b.next_back().unwrap())) - }) - } -} - -impl ExactSizeIterator for ZipLongest -where - T: ExactSizeIterator, - U: ExactSizeIterator, -{ -} - -impl FusedIterator for ZipLongest -where - T: Iterator, - U: Iterator, -{ -} diff --git a/vendor/itertools/src/ziptuple.rs b/vendor/itertools/src/ziptuple.rs deleted file mode 100644 index 3ada0296caac16..00000000000000 --- a/vendor/itertools/src/ziptuple.rs +++ /dev/null @@ -1,137 +0,0 @@ -use super::size_hint; - -/// See [`multizip`] for more information. -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -pub struct Zip { - t: T, -} - -/// An iterator that generalizes `.zip()` and allows running multiple iterators in lockstep. -/// -/// The iterator `Zip<(I, J, ..., M)>` is formed from a tuple of iterators (or values that -/// implement [`IntoIterator`]) and yields elements -/// until any of the subiterators yields `None`. -/// -/// The iterator element type is a tuple like like `(A, B, ..., E)` where `A` to `E` are the -/// element types of the subiterator. -/// -/// **Note:** The result of this function is a value of a named type (`Zip<(I, J, -/// ..)>` of each component iterator `I, J, ...`) if each component iterator is -/// nameable. -/// -/// Prefer [`izip!()`](crate::izip) over `multizip` for the performance benefits of using the -/// standard library `.zip()`. Prefer `multizip` if a nameable type is needed. -/// -/// ``` -/// use itertools::multizip; -/// -/// // iterate over three sequences side-by-side -/// let mut results = [0, 0, 0, 0]; -/// let inputs = [3, 7, 9, 6]; -/// -/// for (r, index, input) in multizip((&mut results, 0..10, &inputs)) { -/// *r = index * 10 + input; -/// } -/// -/// assert_eq!(results, [0 + 3, 10 + 7, 29, 36]); -/// ``` -pub fn multizip(t: U) -> Zip -where - Zip: From + Iterator, -{ - Zip::from(t) -} - -macro_rules! impl_zip_iter { - ($($B:ident),*) => ( - #[allow(non_snake_case)] - impl<$($B: IntoIterator),*> From<($($B,)*)> for Zip<($($B::IntoIter,)*)> { - fn from(t: ($($B,)*)) -> Self { - let ($($B,)*) = t; - Zip { t: ($($B.into_iter(),)*) } - } - } - - #[allow(non_snake_case)] - #[allow(unused_assignments)] - impl<$($B),*> Iterator for Zip<($($B,)*)> - where - $( - $B: Iterator, - )* - { - type Item = ($($B::Item,)*); - - fn next(&mut self) -> Option - { - let ($(ref mut $B,)*) = self.t; - - // NOTE: Just like iter::Zip, we check the iterators - // for None in order. We may finish unevenly (some - // iterators gave n + 1 elements, some only n). - $( - let $B = match $B.next() { - None => return None, - Some(elt) => elt - }; - )* - Some(($($B,)*)) - } - - fn size_hint(&self) -> (usize, Option) - { - let sh = (usize::MAX, None); - let ($(ref $B,)*) = self.t; - $( - let sh = size_hint::min($B.size_hint(), sh); - )* - sh - } - } - - #[allow(non_snake_case)] - impl<$($B),*> ExactSizeIterator for Zip<($($B,)*)> where - $( - $B: ExactSizeIterator, - )* - { } - - #[allow(non_snake_case)] - impl<$($B),*> DoubleEndedIterator for Zip<($($B,)*)> where - $( - $B: DoubleEndedIterator + ExactSizeIterator, - )* - { - #[inline] - fn next_back(&mut self) -> Option { - let ($(ref mut $B,)*) = self.t; - let size = *[$( $B.len(), )*].iter().min().unwrap(); - - $( - if $B.len() != size { - for _ in 0..$B.len() - size { $B.next_back(); } - } - )* - - match ($($B.next_back(),)*) { - ($(Some($B),)*) => Some(($($B,)*)), - _ => None, - } - } - } - ); -} - -impl_zip_iter!(A); -impl_zip_iter!(A, B); -impl_zip_iter!(A, B, C); -impl_zip_iter!(A, B, C, D); -impl_zip_iter!(A, B, C, D, E); -impl_zip_iter!(A, B, C, D, E, F); -impl_zip_iter!(A, B, C, D, E, F, G); -impl_zip_iter!(A, B, C, D, E, F, G, H); -impl_zip_iter!(A, B, C, D, E, F, G, H, I); -impl_zip_iter!(A, B, C, D, E, F, G, H, I, J); -impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K); -impl_zip_iter!(A, B, C, D, E, F, G, H, I, J, K, L); diff --git a/vendor/itertools/tests/adaptors_no_collect.rs b/vendor/itertools/tests/adaptors_no_collect.rs deleted file mode 100644 index 977224af29f70a..00000000000000 --- a/vendor/itertools/tests/adaptors_no_collect.rs +++ /dev/null @@ -1,51 +0,0 @@ -use itertools::Itertools; - -struct PanickingCounter { - curr: usize, - max: usize, -} - -impl Iterator for PanickingCounter { - type Item = (); - - fn next(&mut self) -> Option { - self.curr += 1; - - assert_ne!( - self.curr, self.max, - "Input iterator reached maximum of {} suggesting collection by adaptor", - self.max - ); - - Some(()) - } -} - -fn no_collect_test(to_adaptor: T) -where - A: Iterator, - T: Fn(PanickingCounter) -> A, -{ - let counter = PanickingCounter { - curr: 0, - max: 10_000, - }; - let adaptor = to_adaptor(counter); - - for _ in adaptor.take(5) {} -} - -#[test] -fn permutations_no_collect() { - no_collect_test(|iter| iter.permutations(5)) -} - -#[test] -fn combinations_no_collect() { - no_collect_test(|iter| iter.combinations(5)) -} - -#[test] -fn combinations_with_replacement_no_collect() { - no_collect_test(|iter| iter.combinations_with_replacement(5)) -} diff --git a/vendor/itertools/tests/flatten_ok.rs b/vendor/itertools/tests/flatten_ok.rs deleted file mode 100644 index bf835b5d70a173..00000000000000 --- a/vendor/itertools/tests/flatten_ok.rs +++ /dev/null @@ -1,76 +0,0 @@ -use itertools::{assert_equal, Itertools}; -use std::{ops::Range, vec::IntoIter}; - -fn mix_data() -> IntoIter, bool>> { - vec![Ok(0..2), Err(false), Ok(2..4), Err(true), Ok(4..6)].into_iter() -} - -fn ok_data() -> IntoIter, bool>> { - vec![Ok(0..2), Ok(2..4), Ok(4..6)].into_iter() -} - -#[test] -fn flatten_ok_mixed_expected_forward() { - assert_equal( - mix_data().flatten_ok(), - vec![ - Ok(0), - Ok(1), - Err(false), - Ok(2), - Ok(3), - Err(true), - Ok(4), - Ok(5), - ], - ); -} - -#[test] -fn flatten_ok_mixed_expected_reverse() { - assert_equal( - mix_data().flatten_ok().rev(), - vec![ - Ok(5), - Ok(4), - Err(true), - Ok(3), - Ok(2), - Err(false), - Ok(1), - Ok(0), - ], - ); -} - -#[test] -fn flatten_ok_collect_mixed_forward() { - assert_eq!( - mix_data().flatten_ok().collect::, _>>(), - Err(false) - ); -} - -#[test] -fn flatten_ok_collect_mixed_reverse() { - assert_eq!( - mix_data().flatten_ok().rev().collect::, _>>(), - Err(true) - ); -} - -#[test] -fn flatten_ok_collect_ok_forward() { - assert_eq!( - ok_data().flatten_ok().collect::, _>>(), - Ok((0..6).collect()) - ); -} - -#[test] -fn flatten_ok_collect_ok_reverse() { - assert_eq!( - ok_data().flatten_ok().rev().collect::, _>>(), - Ok((0..6).rev().collect()) - ); -} diff --git a/vendor/itertools/tests/laziness.rs b/vendor/itertools/tests/laziness.rs deleted file mode 100644 index c559d33adc5dfe..00000000000000 --- a/vendor/itertools/tests/laziness.rs +++ /dev/null @@ -1,283 +0,0 @@ -#![allow(unstable_name_collisions)] - -use itertools::Itertools; - -#[derive(Debug, Clone)] -#[must_use = "iterators are lazy and do nothing unless consumed"] -struct Panicking; - -impl Iterator for Panicking { - type Item = u8; - - fn next(&mut self) -> Option { - panic!("iterator adaptor is not lazy") - } - - fn size_hint(&self) -> (usize, Option) { - (0, Some(0)) - } -} - -impl ExactSizeIterator for Panicking {} - -/// ## Usage example -/// ```compile_fail -/// must_use_tests! { -/// name { -/// Panicking.name(); // Add `let _ =` only if required (encountered error). -/// } -/// // ... -/// } -/// ``` -/// -/// **TODO:** test missing `must_use` attributes better, maybe with a new lint. -macro_rules! must_use_tests { - ($($(#[$attr:meta])* $name:ident $body:block)*) => { - $( - /// `#[deny(unused_must_use)]` should force us to ignore the resulting iterators - /// by adding `let _ = ...;` on every iterator. - /// If it does not, then a `must_use` attribute is missing on the associated struct. - /// - /// However, it's only helpful if we don't add `let _ =` before seeing if there is an error or not. - /// And it does not protect us against removed `must_use` attributes. - /// There is no simple way to test this yet. - #[deny(unused_must_use)] - #[test] - $(#[$attr])* - fn $name() $body - )* - }; -} - -must_use_tests! { - // Itertools trait: - interleave { - let _ = Panicking.interleave(Panicking); - } - interleave_shortest { - let _ = Panicking.interleave_shortest(Panicking); - } - intersperse { - let _ = Panicking.intersperse(0); - } - intersperse_with { - let _ = Panicking.intersperse_with(|| 0); - } - get { - let _ = Panicking.get(1..4); - let _ = Panicking.get(1..=4); - let _ = Panicking.get(1..); - let _ = Panicking.get(..4); - let _ = Panicking.get(..=4); - let _ = Panicking.get(..); - } - zip_longest { - let _ = Panicking.zip_longest(Panicking); - } - zip_eq { - let _ = Panicking.zip_eq(Panicking); - } - batching { - let _ = Panicking.batching(Iterator::next); - } - chunk_by { - // ChunkBy - let _ = Panicking.chunk_by(|x| *x); - // Groups - let _ = Panicking.chunk_by(|x| *x).into_iter(); - } - chunks { - // IntoChunks - let _ = Panicking.chunks(1); - let _ = Panicking.chunks(2); - // Chunks - let _ = Panicking.chunks(1).into_iter(); - let _ = Panicking.chunks(2).into_iter(); - } - tuple_windows { - let _ = Panicking.tuple_windows::<(_,)>(); - let _ = Panicking.tuple_windows::<(_, _)>(); - let _ = Panicking.tuple_windows::<(_, _, _)>(); - } - circular_tuple_windows { - let _ = Panicking.circular_tuple_windows::<(_,)>(); - let _ = Panicking.circular_tuple_windows::<(_, _)>(); - let _ = Panicking.circular_tuple_windows::<(_, _, _)>(); - } - tuples { - let _ = Panicking.tuples::<(_,)>(); - let _ = Panicking.tuples::<(_, _)>(); - let _ = Panicking.tuples::<(_, _, _)>(); - } - tee { - let _ = Panicking.tee(); - } - map_into { - let _ = Panicking.map_into::(); - } - map_ok { - let _ = Panicking.map(Ok::).map_ok(|x| x + 1); - } - filter_ok { - let _ = Panicking.map(Ok::).filter_ok(|x| x % 2 == 0); - } - filter_map_ok { - let _ = Panicking.map(Ok::).filter_map_ok(|x| { - if x % 2 == 0 { - Some(x + 1) - } else { - None - } - }); - } - flatten_ok { - let _ = Panicking.map(|x| Ok::<_, ()>([x])).flatten_ok(); - } - merge { - let _ = Panicking.merge(Panicking); - } - merge_by { - let _ = Panicking.merge_by(Panicking, |_, _| true); - } - merge_join_by { - let _ = Panicking.merge_join_by(Panicking, |_, _| true); - let _ = Panicking.merge_join_by(Panicking, Ord::cmp); - } - #[should_panic] - kmerge { - let _ = Panicking.map(|_| Panicking).kmerge(); - } - #[should_panic] - kmerge_by { - let _ = Panicking.map(|_| Panicking).kmerge_by(|_, _| true); - } - cartesian_product { - let _ = Panicking.cartesian_product(Panicking); - } - multi_cartesian_product { - let _ = vec![Panicking, Panicking, Panicking].into_iter().multi_cartesian_product(); - } - coalesce { - let _ = Panicking.coalesce(|x, y| if x == y { Ok(x) } else { Err((x, y)) }); - } - dedup { - let _ = Panicking.dedup(); - } - dedup_by { - let _ = Panicking.dedup_by(|_, _| true); - } - dedup_with_count { - let _ = Panicking.dedup_with_count(); - } - dedup_by_with_count { - let _ = Panicking.dedup_by_with_count(|_, _| true); - } - duplicates { - let _ = Panicking.duplicates(); - } - duplicates_by { - let _ = Panicking.duplicates_by(|x| *x); - } - unique { - let _ = Panicking.unique(); - } - unique_by { - let _ = Panicking.unique_by(|x| *x); - } - peeking_take_while { - let _ = Panicking.peekable().peeking_take_while(|x| x % 2 == 0); - } - take_while_ref { - let _ = Panicking.take_while_ref(|x| x % 2 == 0); - } - take_while_inclusive { - let _ = Panicking.take_while_inclusive(|x| x % 2 == 0); - } - while_some { - let _ = Panicking.map(Some).while_some(); - } - tuple_combinations1 { - let _ = Panicking.tuple_combinations::<(_,)>(); - } - #[should_panic] - tuple_combinations2 { - let _ = Panicking.tuple_combinations::<(_, _)>(); - } - #[should_panic] - tuple_combinations3 { - let _ = Panicking.tuple_combinations::<(_, _, _)>(); - } - combinations { - let _ = Panicking.combinations(0); - let _ = Panicking.combinations(1); - let _ = Panicking.combinations(2); - } - combinations_with_replacement { - let _ = Panicking.combinations_with_replacement(0); - let _ = Panicking.combinations_with_replacement(1); - let _ = Panicking.combinations_with_replacement(2); - } - permutations { - let _ = Panicking.permutations(0); - let _ = Panicking.permutations(1); - let _ = Panicking.permutations(2); - } - powerset { - let _ = Panicking.powerset(); - } - pad_using { - let _ = Panicking.pad_using(25, |_| 10); - } - with_position { - let _ = Panicking.with_position(); - } - positions { - let _ = Panicking.positions(|v| v % 2 == 0); - } - update { - let _ = Panicking.update(|n| *n += 1); - } - multipeek { - let _ = Panicking.multipeek(); - } - // Not iterator themselves but still lazy. - into_grouping_map { - let _ = Panicking.map(|x| (x, x + 1)).into_grouping_map(); - } - into_grouping_map_by { - let _ = Panicking.into_grouping_map_by(|x| *x); - } - // Macros: - iproduct { - let _ = itertools::iproduct!(Panicking); - let _ = itertools::iproduct!(Panicking, Panicking); - let _ = itertools::iproduct!(Panicking, Panicking, Panicking); - } - izip { - let _ = itertools::izip!(Panicking); - let _ = itertools::izip!(Panicking, Panicking); - let _ = itertools::izip!(Panicking, Panicking, Panicking); - } - chain { - let _ = itertools::chain!(Panicking); - let _ = itertools::chain!(Panicking, Panicking); - let _ = itertools::chain!(Panicking, Panicking, Panicking); - } - // Free functions: - multizip { - let _ = itertools::multizip((Panicking, Panicking)); - } - put_back { - let _ = itertools::put_back(Panicking); - let _ = itertools::put_back(Panicking).with_value(15); - } - peek_nth { - let _ = itertools::peek_nth(Panicking); - } - put_back_n { - let _ = itertools::put_back_n(Panicking); - } - rciter { - let _ = itertools::rciter(Panicking); - } -} diff --git a/vendor/itertools/tests/macros_hygiene.rs b/vendor/itertools/tests/macros_hygiene.rs deleted file mode 100644 index 20b59fba87371d..00000000000000 --- a/vendor/itertools/tests/macros_hygiene.rs +++ /dev/null @@ -1,14 +0,0 @@ -#[test] -fn iproduct_hygiene() { - let _ = itertools::iproduct!(); - let _ = itertools::iproduct!(0..6); - let _ = itertools::iproduct!(0..6, 0..9); - let _ = itertools::iproduct!(0..6, 0..9, 0..12); -} - -#[test] -fn izip_hygiene() { - let _ = itertools::izip!(0..6); - let _ = itertools::izip!(0..6, 0..9); - let _ = itertools::izip!(0..6, 0..9, 0..12); -} diff --git a/vendor/itertools/tests/merge_join.rs b/vendor/itertools/tests/merge_join.rs deleted file mode 100644 index 776252fc58d179..00000000000000 --- a/vendor/itertools/tests/merge_join.rs +++ /dev/null @@ -1,101 +0,0 @@ -use itertools::free::merge_join_by; -use itertools::EitherOrBoth; - -#[test] -fn empty() { - let left: Vec = vec![]; - let right: Vec = vec![]; - let expected_result: Vec> = vec![]; - let actual_result = merge_join_by(left, right, |l, r| l.cmp(r)).collect::>(); - assert_eq!(expected_result, actual_result); -} - -#[test] -fn left_only() { - let left: Vec = vec![1, 2, 3]; - let right: Vec = vec![]; - let expected_result: Vec> = vec![ - EitherOrBoth::Left(1), - EitherOrBoth::Left(2), - EitherOrBoth::Left(3), - ]; - let actual_result = merge_join_by(left, right, |l, r| l.cmp(r)).collect::>(); - assert_eq!(expected_result, actual_result); -} - -#[test] -fn right_only() { - let left: Vec = vec![]; - let right: Vec = vec![1, 2, 3]; - let expected_result: Vec> = vec![ - EitherOrBoth::Right(1), - EitherOrBoth::Right(2), - EitherOrBoth::Right(3), - ]; - let actual_result = merge_join_by(left, right, |l, r| l.cmp(r)).collect::>(); - assert_eq!(expected_result, actual_result); -} - -#[test] -fn first_left_then_right() { - let left: Vec = vec![1, 2, 3]; - let right: Vec = vec![4, 5, 6]; - let expected_result: Vec> = vec![ - EitherOrBoth::Left(1), - EitherOrBoth::Left(2), - EitherOrBoth::Left(3), - EitherOrBoth::Right(4), - EitherOrBoth::Right(5), - EitherOrBoth::Right(6), - ]; - let actual_result = merge_join_by(left, right, |l, r| l.cmp(r)).collect::>(); - assert_eq!(expected_result, actual_result); -} - -#[test] -fn first_right_then_left() { - let left: Vec = vec![4, 5, 6]; - let right: Vec = vec![1, 2, 3]; - let expected_result: Vec> = vec![ - EitherOrBoth::Right(1), - EitherOrBoth::Right(2), - EitherOrBoth::Right(3), - EitherOrBoth::Left(4), - EitherOrBoth::Left(5), - EitherOrBoth::Left(6), - ]; - let actual_result = merge_join_by(left, right, |l, r| l.cmp(r)).collect::>(); - assert_eq!(expected_result, actual_result); -} - -#[test] -fn interspersed_left_and_right() { - let left: Vec = vec![1, 3, 5]; - let right: Vec = vec![2, 4, 6]; - let expected_result: Vec> = vec![ - EitherOrBoth::Left(1), - EitherOrBoth::Right(2), - EitherOrBoth::Left(3), - EitherOrBoth::Right(4), - EitherOrBoth::Left(5), - EitherOrBoth::Right(6), - ]; - let actual_result = merge_join_by(left, right, |l, r| l.cmp(r)).collect::>(); - assert_eq!(expected_result, actual_result); -} - -#[test] -fn overlapping_left_and_right() { - let left: Vec = vec![1, 3, 4, 6]; - let right: Vec = vec![2, 3, 4, 5]; - let expected_result: Vec> = vec![ - EitherOrBoth::Left(1), - EitherOrBoth::Right(2), - EitherOrBoth::Both(3, 3), - EitherOrBoth::Both(4, 4), - EitherOrBoth::Right(5), - EitherOrBoth::Left(6), - ]; - let actual_result = merge_join_by(left, right, |l, r| l.cmp(r)).collect::>(); - assert_eq!(expected_result, actual_result); -} diff --git a/vendor/itertools/tests/peeking_take_while.rs b/vendor/itertools/tests/peeking_take_while.rs deleted file mode 100644 index 5be97271dd80ef..00000000000000 --- a/vendor/itertools/tests/peeking_take_while.rs +++ /dev/null @@ -1,69 +0,0 @@ -use itertools::Itertools; -use itertools::{put_back, put_back_n}; - -#[test] -fn peeking_take_while_peekable() { - let mut r = (0..10).peekable(); - r.peeking_take_while(|x| *x <= 3).count(); - assert_eq!(r.next(), Some(4)); -} - -#[test] -fn peeking_take_while_put_back() { - let mut r = put_back(0..10); - r.peeking_take_while(|x| *x <= 3).count(); - assert_eq!(r.next(), Some(4)); - r.peeking_take_while(|_| true).count(); - assert_eq!(r.next(), None); -} - -#[test] -fn peeking_take_while_put_back_n() { - let mut r = put_back_n(6..10); - for elt in (0..6).rev() { - r.put_back(elt); - } - r.peeking_take_while(|x| *x <= 3).count(); - assert_eq!(r.next(), Some(4)); - r.peeking_take_while(|_| true).count(); - assert_eq!(r.next(), None); -} - -#[test] -fn peeking_take_while_slice_iter() { - let v = [1, 2, 3, 4, 5, 6]; - let mut r = v.iter(); - r.peeking_take_while(|x| **x <= 3).count(); - assert_eq!(r.next(), Some(&4)); - r.peeking_take_while(|_| true).count(); - assert_eq!(r.next(), None); -} - -#[test] -fn peeking_take_while_slice_iter_rev() { - let v = [1, 2, 3, 4, 5, 6]; - let mut r = v.iter().rev(); - r.peeking_take_while(|x| **x >= 3).count(); - assert_eq!(r.next(), Some(&2)); - r.peeking_take_while(|_| true).count(); - assert_eq!(r.next(), None); -} - -#[test] -fn peeking_take_while_nested() { - let mut xs = (0..10).peekable(); - let ys: Vec<_> = xs - .peeking_take_while(|x| *x < 6) - .peeking_take_while(|x| *x != 3) - .collect(); - assert_eq!(ys, vec![0, 1, 2]); - assert_eq!(xs.next(), Some(3)); - - let mut xs = (4..10).peekable(); - let ys: Vec<_> = xs - .peeking_take_while(|x| *x != 3) - .peeking_take_while(|x| *x < 6) - .collect(); - assert_eq!(ys, vec![4, 5]); - assert_eq!(xs.next(), Some(6)); -} diff --git a/vendor/itertools/tests/quick.rs b/vendor/itertools/tests/quick.rs deleted file mode 100644 index 5b8fd6a2105229..00000000000000 --- a/vendor/itertools/tests/quick.rs +++ /dev/null @@ -1,1967 +0,0 @@ -//! The purpose of these tests is to cover corner cases of iterators -//! and adaptors. -//! -//! In particular we test the tedious size_hint and exact size correctness. - -#![allow(deprecated, unstable_name_collisions)] - -use itertools::free::{ - cloned, enumerate, multipeek, peek_nth, put_back, put_back_n, rciter, zip, zip_eq, -}; -use itertools::Itertools; -use itertools::{iproduct, izip, multizip, EitherOrBoth}; -use quickcheck as qc; -use std::cmp::{max, min, Ordering}; -use std::collections::{HashMap, HashSet}; -use std::default::Default; -use std::num::Wrapping; -use std::ops::Range; - -use quickcheck::TestResult; -use rand::seq::SliceRandom; -use rand::Rng; - -/// Trait for size hint modifier types -trait HintKind: Copy + Send + qc::Arbitrary { - fn loosen_bounds(&self, org_hint: (usize, Option)) -> (usize, Option); -} - -/// Exact size hint variant that leaves hints unchanged -#[derive(Clone, Copy, Debug)] -struct Exact {} - -impl HintKind for Exact { - fn loosen_bounds(&self, org_hint: (usize, Option)) -> (usize, Option) { - org_hint - } -} - -impl qc::Arbitrary for Exact { - fn arbitrary(_: &mut G) -> Self { - Self {} - } -} - -/// Inexact size hint variant to simulate imprecise (but valid) size hints -/// -/// Will always decrease the lower bound and increase the upper bound -/// of the size hint by set amounts. -#[derive(Clone, Copy, Debug)] -struct Inexact { - underestimate: usize, - overestimate: usize, -} - -impl HintKind for Inexact { - fn loosen_bounds(&self, org_hint: (usize, Option)) -> (usize, Option) { - let (org_lower, org_upper) = org_hint; - ( - org_lower.saturating_sub(self.underestimate), - org_upper.and_then(move |x| x.checked_add(self.overestimate)), - ) - } -} - -impl qc::Arbitrary for Inexact { - fn arbitrary(g: &mut G) -> Self { - let ue_value = usize::arbitrary(g); - let oe_value = usize::arbitrary(g); - // Compensate for quickcheck using extreme values too rarely - let ue_choices = &[0, ue_value, usize::MAX]; - let oe_choices = &[0, oe_value, usize::MAX]; - Self { - underestimate: *ue_choices.choose(g).unwrap(), - overestimate: *oe_choices.choose(g).unwrap(), - } - } - - fn shrink(&self) -> Box> { - let underestimate_value = self.underestimate; - let overestimate_value = self.overestimate; - Box::new(underestimate_value.shrink().flat_map(move |ue_value| { - overestimate_value.shrink().map(move |oe_value| Self { - underestimate: ue_value, - overestimate: oe_value, - }) - })) - } -} - -/// Our base iterator that we can impl Arbitrary for -/// -/// By default we'll return inexact bounds estimates for size_hint -/// to make tests harder to pass. -/// -/// NOTE: Iter is tricky and is not fused, to help catch bugs. -/// At the end it will return None once, then return Some(0), -/// then return None again. -#[derive(Clone, Debug)] -struct Iter { - iterator: Range, - // fuse/done flag - fuse_flag: i32, - hint_kind: SK, -} - -impl Iter -where - HK: HintKind, -{ - fn new(it: Range, hint_kind: HK) -> Self { - Self { - iterator: it, - fuse_flag: 0, - hint_kind, - } - } -} - -impl Iterator for Iter -where - Range: Iterator, - as Iterator>::Item: Default, - HK: HintKind, -{ - type Item = as Iterator>::Item; - - fn next(&mut self) -> Option { - let elt = self.iterator.next(); - if elt.is_none() { - self.fuse_flag += 1; - // check fuse flag - if self.fuse_flag == 2 { - return Some(Default::default()); - } - } - elt - } - - fn size_hint(&self) -> (usize, Option) { - let org_hint = self.iterator.size_hint(); - self.hint_kind.loosen_bounds(org_hint) - } -} - -impl DoubleEndedIterator for Iter -where - Range: DoubleEndedIterator, - as Iterator>::Item: Default, - HK: HintKind, -{ - fn next_back(&mut self) -> Option { - self.iterator.next_back() - } -} - -impl ExactSizeIterator for Iter -where - Range: ExactSizeIterator, - as Iterator>::Item: Default, -{ -} - -impl qc::Arbitrary for Iter -where - T: qc::Arbitrary, - HK: HintKind, -{ - fn arbitrary(g: &mut G) -> Self { - Self::new(T::arbitrary(g)..T::arbitrary(g), HK::arbitrary(g)) - } - - fn shrink(&self) -> Box> { - let r = self.iterator.clone(); - let hint_kind = self.hint_kind; - Box::new(r.start.shrink().flat_map(move |a| { - r.end - .shrink() - .map(move |b| Self::new(a.clone()..b, hint_kind)) - })) - } -} - -/// A meta-iterator which yields `Iter`s whose start/endpoints are -/// increased or decreased linearly on each iteration. -#[derive(Clone, Debug)] -struct ShiftRange { - range_start: i32, - range_end: i32, - start_step: i32, - end_step: i32, - iter_count: u32, - hint_kind: HK, -} - -impl Iterator for ShiftRange -where - HK: HintKind, -{ - type Item = Iter; - - fn next(&mut self) -> Option { - if self.iter_count == 0 { - return None; - } - - let iter = Iter::new(self.range_start..self.range_end, self.hint_kind); - - self.range_start += self.start_step; - self.range_end += self.end_step; - self.iter_count -= 1; - - Some(iter) - } -} - -impl ExactSizeIterator for ShiftRange {} - -impl qc::Arbitrary for ShiftRange -where - HK: HintKind, -{ - fn arbitrary(g: &mut G) -> Self { - const MAX_STARTING_RANGE_DIFF: i32 = 32; - const MAX_STEP_MODULO: i32 = 8; - const MAX_ITER_COUNT: u32 = 3; - - let range_start = qc::Arbitrary::arbitrary(g); - let range_end = range_start + g.gen_range(0, MAX_STARTING_RANGE_DIFF + 1); - let start_step = g.gen_range(-MAX_STEP_MODULO, MAX_STEP_MODULO + 1); - let end_step = g.gen_range(-MAX_STEP_MODULO, MAX_STEP_MODULO + 1); - let iter_count = g.gen_range(0, MAX_ITER_COUNT + 1); - let hint_kind = qc::Arbitrary::arbitrary(g); - - Self { - range_start, - range_end, - start_step, - end_step, - iter_count, - hint_kind, - } - } -} - -fn correct_count(get_it: F) -> bool -where - I: Iterator, - F: Fn() -> I, -{ - let mut counts = vec![get_it().count()]; - - 'outer: loop { - let mut it = get_it(); - - for _ in 0..(counts.len() - 1) { - #[allow(clippy::manual_assert)] - if it.next().is_none() { - panic!("Iterator shouldn't be finished, may not be deterministic"); - } - } - - if it.next().is_none() { - break 'outer; - } - - counts.push(it.count()); - } - - let total_actual_count = counts.len() - 1; - - for (i, returned_count) in counts.into_iter().enumerate() { - let actual_count = total_actual_count - i; - if actual_count != returned_count { - println!( - "Total iterations: {} True count: {} returned count: {}", - i, actual_count, returned_count - ); - - return false; - } - } - - true -} - -fn correct_size_hint(mut it: I) -> bool { - // record size hint at each iteration - let initial_hint = it.size_hint(); - let mut hints = Vec::with_capacity(initial_hint.0 + 1); - hints.push(initial_hint); - while let Some(_) = it.next() { - hints.push(it.size_hint()) - } - - let mut true_count = hints.len(); // start off +1 too much - - // check all the size hints - for &(low, hi) in &hints { - true_count -= 1; - if low > true_count || (hi.is_some() && hi.unwrap() < true_count) { - println!("True size: {:?}, size hint: {:?}", true_count, (low, hi)); - //println!("All hints: {:?}", hints); - return false; - } - } - true -} - -fn exact_size(mut it: I) -> bool { - // check every iteration - let (mut low, mut hi) = it.size_hint(); - if Some(low) != hi { - return false; - } - while let Some(_) = it.next() { - let (xlow, xhi) = it.size_hint(); - if low != xlow + 1 { - return false; - } - low = xlow; - hi = xhi; - if Some(low) != hi { - return false; - } - } - let (low, hi) = it.size_hint(); - low == 0 && hi == Some(0) -} - -// Exact size for this case, without ExactSizeIterator -fn exact_size_for_this(mut it: I) -> bool { - // check every iteration - let (mut low, mut hi) = it.size_hint(); - if Some(low) != hi { - return false; - } - while let Some(_) = it.next() { - let (xlow, xhi) = it.size_hint(); - if low != xlow + 1 { - return false; - } - low = xlow; - hi = xhi; - if Some(low) != hi { - return false; - } - } - let (low, hi) = it.size_hint(); - low == 0 && hi == Some(0) -} - -/* - * NOTE: Range is broken! - * (all signed ranges are) -#[quickcheck] -fn size_range_i8(a: Iter) -> bool { - exact_size(a) -} - -#[quickcheck] -fn size_range_i16(a: Iter) -> bool { - exact_size(a) -} - -#[quickcheck] -fn size_range_u8(a: Iter) -> bool { - exact_size(a) -} - */ - -macro_rules! quickcheck { - // accept several property function definitions - // The property functions can use pattern matching and `mut` as usual - // in the function arguments, but the functions can not be generic. - {$($(#$attr:tt)* fn $fn_name:ident($($arg:tt)*) -> $ret:ty { $($code:tt)* })*} => ( - $( - #[test] - $(#$attr)* - fn $fn_name() { - fn prop($($arg)*) -> $ret { - $($code)* - } - ::quickcheck::quickcheck(quickcheck!(@fn prop [] $($arg)*)); - } - )* - ); - // parse argument list (with patterns allowed) into prop as fn(_, _) -> _ - (@fn $f:ident [$($t:tt)*]) => { - $f as fn($($t),*) -> _ - }; - (@fn $f:ident [$($p:tt)*] : $($tail:tt)*) => { - quickcheck!(@fn $f [$($p)* _] $($tail)*) - }; - (@fn $f:ident [$($p:tt)*] $t:tt $($tail:tt)*) => { - quickcheck!(@fn $f [$($p)*] $($tail)*) - }; -} - -quickcheck! { - - fn size_product(a: Iter, b: Iter) -> bool { - correct_size_hint(a.cartesian_product(b)) - } - fn size_product3(a: Iter, b: Iter, c: Iter) -> bool { - correct_size_hint(iproduct!(a, b, c)) - } - - fn correct_cartesian_product3(a: Iter, b: Iter, c: Iter, - take_manual: usize) -> () - { - // test correctness of iproduct through regular iteration (take) - // and through fold. - let ac = a.clone(); - let br = &b.clone(); - let cr = &c.clone(); - let answer: Vec<_> = ac.flat_map(move |ea| br.clone().flat_map(move |eb| cr.clone().map(move |ec| (ea, eb, ec)))).collect(); - let mut product_iter = iproduct!(a, b, c); - let mut actual = Vec::new(); - - actual.extend((&mut product_iter).take(take_manual)); - if actual.len() == take_manual { - product_iter.fold((), |(), elt| actual.push(elt)); - } - assert_eq!(answer, actual); - } - - fn size_multi_product(a: ShiftRange) -> bool { - correct_size_hint(a.multi_cartesian_product()) - } - fn correct_multi_product3(a: ShiftRange, take_manual: usize) -> () { - // Fix no. of iterators at 3 - let a = ShiftRange { iter_count: 3, ..a }; - - // test correctness of MultiProduct through regular iteration (take) - // and through fold. - let mut iters = a.clone(); - let i0 = iters.next().unwrap(); - let i1r = &iters.next().unwrap(); - let i2r = &iters.next().unwrap(); - let answer: Vec<_> = i0.flat_map(move |ei0| i1r.clone().flat_map(move |ei1| i2r.clone().map(move |ei2| vec![ei0, ei1, ei2]))).collect(); - let mut multi_product = a.clone().multi_cartesian_product(); - let mut actual = Vec::new(); - - actual.extend((&mut multi_product).take(take_manual)); - if actual.len() == take_manual { - multi_product.fold((), |(), elt| actual.push(elt)); - } - assert_eq!(answer, actual); - - assert_eq!(answer.into_iter().last(), a.multi_cartesian_product().last()); - } - - fn correct_empty_multi_product() -> () { - let empty = Vec::>::new().into_iter().multi_cartesian_product(); - assert!(correct_size_hint(empty.clone())); - itertools::assert_equal(empty, std::iter::once(Vec::new())) - } - - fn size_multipeek(a: Iter, s: u8) -> bool { - let mut it = multipeek(a); - // peek a few times - for _ in 0..s { - it.peek(); - } - exact_size(it) - } - - fn size_peek_nth(a: Iter, s: u8) -> bool { - let mut it = peek_nth(a); - // peek a few times - for n in 0..s { - it.peek_nth(n as usize); - } - exact_size(it) - } - - fn equal_merge(mut a: Vec, mut b: Vec) -> bool { - a.sort(); - b.sort(); - let mut merged = a.clone(); - merged.extend(b.iter().cloned()); - merged.sort(); - itertools::equal(&merged, a.iter().merge(&b)) - } - fn size_merge(a: Iter, b: Iter) -> bool { - correct_size_hint(a.merge(b)) - } - fn size_zip(a: Iter, b: Iter, c: Iter) -> bool { - let filt = a.clone().dedup(); - correct_size_hint(multizip((filt, b.clone(), c.clone()))) && - exact_size(multizip((a, b, c))) - } - fn size_zip_rc(a: Iter, b: Iter) -> bool { - let rc = rciter(a); - correct_size_hint(multizip((&rc, &rc, b))) - } - - fn size_zip_macro(a: Iter, b: Iter, c: Iter) -> bool { - let filt = a.clone().dedup(); - correct_size_hint(izip!(filt, b.clone(), c.clone())) && - exact_size(izip!(a, b, c)) - } - fn equal_kmerge(mut a: Vec, mut b: Vec, mut c: Vec) -> bool { - use itertools::free::kmerge; - a.sort(); - b.sort(); - c.sort(); - let mut merged = a.clone(); - merged.extend(b.iter().cloned()); - merged.extend(c.iter().cloned()); - merged.sort(); - itertools::equal(merged.into_iter(), kmerge(vec![a, b, c])) - } - - // Any number of input iterators - fn equal_kmerge_2(mut inputs: Vec>) -> bool { - use itertools::free::kmerge; - // sort the inputs - for input in &mut inputs { - input.sort(); - } - let mut merged = inputs.concat(); - merged.sort(); - itertools::equal(merged.into_iter(), kmerge(inputs)) - } - - // Any number of input iterators - fn equal_kmerge_by_ge(mut inputs: Vec>) -> bool { - // sort the inputs - for input in &mut inputs { - input.sort(); - input.reverse(); - } - let mut merged = inputs.concat(); - merged.sort(); - merged.reverse(); - itertools::equal(merged.into_iter(), - inputs.into_iter().kmerge_by(|x, y| x >= y)) - } - - // Any number of input iterators - fn equal_kmerge_by_lt(mut inputs: Vec>) -> bool { - // sort the inputs - for input in &mut inputs { - input.sort(); - } - let mut merged = inputs.concat(); - merged.sort(); - itertools::equal(merged.into_iter(), - inputs.into_iter().kmerge_by(|x, y| x < y)) - } - - // Any number of input iterators - fn equal_kmerge_by_le(mut inputs: Vec>) -> bool { - // sort the inputs - for input in &mut inputs { - input.sort(); - } - let mut merged = inputs.concat(); - merged.sort(); - itertools::equal(merged.into_iter(), - inputs.into_iter().kmerge_by(|x, y| x <= y)) - } - fn size_kmerge(a: Iter, b: Iter, c: Iter) -> bool { - use itertools::free::kmerge; - correct_size_hint(kmerge(vec![a, b, c])) - } - fn equal_zip_eq(a: Vec, b: Vec) -> bool { - let len = std::cmp::min(a.len(), b.len()); - let a = &a[..len]; - let b = &b[..len]; - itertools::equal(zip_eq(a, b), zip(a, b)) - } - - #[should_panic] - fn zip_eq_panics(a: Vec, b: Vec) -> TestResult { - if a.len() == b.len() { return TestResult::discard(); } - zip_eq(a.iter(), b.iter()).for_each(|_| {}); - TestResult::passed() // won't come here - } - - fn equal_positions(a: Vec) -> bool { - let with_pos = a.iter().positions(|v| v % 2 == 0); - let without = a.iter().enumerate().filter(|(_, v)| *v % 2 == 0).map(|(i, _)| i); - itertools::equal(with_pos.clone(), without.clone()) - && itertools::equal(with_pos.rev(), without.rev()) - } - fn size_zip_longest(a: Iter, b: Iter) -> bool { - let filt = a.clone().dedup(); - let filt2 = b.clone().dedup(); - correct_size_hint(filt.zip_longest(b.clone())) && - correct_size_hint(a.clone().zip_longest(filt2)) && - exact_size(a.zip_longest(b)) - } - fn size_2_zip_longest(a: Iter, b: Iter) -> bool { - let it = a.clone().zip_longest(b.clone()); - let jt = a.clone().zip_longest(b.clone()); - itertools::equal(a, - it.filter_map(|elt| match elt { - EitherOrBoth::Both(x, _) => Some(x), - EitherOrBoth::Left(x) => Some(x), - _ => None, - } - )) - && - itertools::equal(b, - jt.filter_map(|elt| match elt { - EitherOrBoth::Both(_, y) => Some(y), - EitherOrBoth::Right(y) => Some(y), - _ => None, - } - )) - } - fn size_interleave(a: Iter, b: Iter) -> bool { - correct_size_hint(a.interleave(b)) - } - fn exact_interleave(a: Iter, b: Iter) -> bool { - exact_size_for_this(a.interleave(b)) - } - fn size_interleave_shortest(a: Iter, b: Iter) -> bool { - correct_size_hint(a.interleave_shortest(b)) - } - fn exact_interleave_shortest(a: Vec<()>, b: Vec<()>) -> bool { - exact_size_for_this(a.iter().interleave_shortest(&b)) - } - fn size_intersperse(a: Iter, x: i16) -> bool { - correct_size_hint(a.intersperse(x)) - } - fn equal_intersperse(a: Vec, x: i32) -> bool { - let mut inter = false; - let mut i = 0; - for elt in a.iter().cloned().intersperse(x) { - if inter { - if elt != x { return false } - } else { - if elt != a[i] { return false } - i += 1; - } - inter = !inter; - } - true - } - - fn equal_combinations_2(a: Vec) -> bool { - let mut v = Vec::new(); - for (i, x) in enumerate(&a) { - for y in &a[i + 1..] { - v.push((x, y)); - } - } - itertools::equal(a.iter().tuple_combinations::<(_, _)>(), v) - } - - fn collect_tuple_matches_size(a: Iter) -> bool { - let size = a.clone().count(); - a.collect_tuple::<(_, _, _)>().is_some() == (size == 3) - } - - fn correct_permutations(vals: HashSet, k: usize) -> () { - // Test permutations only on iterators of distinct integers, to prevent - // false positives. - - const MAX_N: usize = 5; - - let n = min(vals.len(), MAX_N); - let vals: HashSet = vals.into_iter().take(n).collect(); - - let perms = vals.iter().permutations(k); - - let mut actual = HashSet::new(); - - for perm in perms { - assert_eq!(perm.len(), k); - - let all_items_valid = perm.iter().all(|p| vals.contains(p)); - assert!(all_items_valid, "perm contains value not from input: {:?}", perm); - - // Check that all perm items are distinct - let distinct_len = { - let perm_set: HashSet<_> = perm.iter().collect(); - perm_set.len() - }; - assert_eq!(perm.len(), distinct_len); - - // Check that the perm is new - assert!(actual.insert(perm.clone()), "perm already encountered: {:?}", perm); - } - } - - fn permutations_lexic_order(a: usize, b: usize) -> () { - let a = a % 6; - let b = b % 6; - - let n = max(a, b); - let k = min (a, b); - - let expected_first: Vec = (0..k).collect(); - let expected_last: Vec = ((n - k)..n).rev().collect(); - - let mut perms = (0..n).permutations(k); - - let mut curr_perm = match perms.next() { - Some(p) => p, - None => { return; } - }; - - assert_eq!(expected_first, curr_perm); - - for next_perm in perms { - assert!( - next_perm > curr_perm, - "next perm isn't greater-than current; next_perm={:?} curr_perm={:?} n={}", - next_perm, curr_perm, n - ); - - curr_perm = next_perm; - } - - assert_eq!(expected_last, curr_perm); - - } - - fn permutations_count(n: usize, k: usize) -> bool { - let n = n % 6; - - correct_count(|| (0..n).permutations(k)) - } - - fn permutations_size(a: Iter, k: usize) -> bool { - correct_size_hint(a.take(5).permutations(k)) - } - - fn permutations_k0_yields_once(n: usize) -> () { - let k = 0; - let expected: Vec> = vec![vec![]]; - let actual = (0..n).permutations(k).collect_vec(); - - assert_eq!(expected, actual); - } -} - -quickcheck! { - fn correct_peek_nth(mut a: Vec) -> () { - let mut it = peek_nth(a.clone()); - for start_pos in 0..a.len() + 2 { - for real_idx in start_pos..a.len() + 2 { - let peek_idx = real_idx - start_pos; - assert_eq!(it.peek_nth(peek_idx), a.get(real_idx)); - assert_eq!(it.peek_nth_mut(peek_idx), a.get_mut(real_idx)); - } - assert_eq!(it.next(), a.get(start_pos).copied()); - } - } - - fn peek_nth_mut_replace(a: Vec, b: Vec) -> () { - let mut it = peek_nth(a.iter()); - for (i, m) in b.iter().enumerate().take(a.len().min(b.len())) { - *it.peek_nth_mut(i).unwrap() = m; - } - for (i, m) in a.iter().enumerate() { - assert_eq!(it.next().unwrap(), b.get(i).unwrap_or(m)); - } - assert_eq!(it.next(), None); - assert_eq!(it.next(), None); - } - - fn peek_nth_next_if(a: Vec) -> () { - let mut it = peek_nth(a.clone()); - for (idx, mut value) in a.iter().copied().enumerate() { - let should_be_none = it.next_if(|x| x != &value); - assert_eq!(should_be_none, None); - if value % 5 == 0 { - // Sometimes, peek up to 3 further. - let n = value as usize % 3; - let nth = it.peek_nth(n); - assert_eq!(nth, a.get(idx + n)); - } else if value % 5 == 1 { - // Sometimes, peek next element mutably. - if let Some(v) = it.peek_mut() { - *v = v.wrapping_sub(1); - let should_be_none = it.next_if_eq(&value); - assert_eq!(should_be_none, None); - value = value.wrapping_sub(1); - } - } - let eq = it.next_if_eq(&value); - assert_eq!(eq, Some(value)); - } - } -} - -quickcheck! { - fn dedup_via_coalesce(a: Vec) -> bool { - let mut b = a.clone(); - b.dedup(); - itertools::equal( - &b, - a - .iter() - .coalesce(|x, y| { - if x==y { - Ok(x) - } else { - Err((x, y)) - } - }) - .fold(vec![], |mut v, n| { - v.push(n); - v - }) - ) - } -} - -quickcheck! { - fn equal_dedup(a: Vec) -> bool { - let mut b = a.clone(); - b.dedup(); - itertools::equal(&b, a.iter().dedup()) - } -} - -quickcheck! { - fn equal_dedup_by(a: Vec<(i32, i32)>) -> bool { - let mut b = a.clone(); - b.dedup_by(|x, y| x.0==y.0); - itertools::equal(&b, a.iter().dedup_by(|x, y| x.0==y.0)) - } -} - -quickcheck! { - fn size_dedup(a: Vec) -> bool { - correct_size_hint(a.iter().dedup()) - } -} - -quickcheck! { - fn size_dedup_by(a: Vec<(i32, i32)>) -> bool { - correct_size_hint(a.iter().dedup_by(|x, y| x.0==y.0)) - } -} - -quickcheck! { - fn exact_repeatn((n, x): (usize, i32)) -> bool { - let it = itertools::repeat_n(x, n); - exact_size(it) - } -} - -quickcheck! { - fn size_put_back(a: Vec, x: Option) -> bool { - let mut it = put_back(a.into_iter()); - if let Some(t) = x { - it.put_back(t); - } - correct_size_hint(it) - } -} - -quickcheck! { - fn size_put_backn(a: Vec, b: Vec) -> bool { - let mut it = put_back_n(a.into_iter()); - for elt in b { - it.put_back(elt) - } - correct_size_hint(it) - } -} - -quickcheck! { - fn merge_join_by_ordering_vs_bool(a: Vec, b: Vec) -> bool { - use either::Either; - use itertools::free::merge_join_by; - let mut has_equal = false; - let it_ord = merge_join_by(a.clone(), b.clone(), Ord::cmp).flat_map(|v| match v { - EitherOrBoth::Both(l, r) => { - has_equal = true; - vec![Either::Left(l), Either::Right(r)] - } - EitherOrBoth::Left(l) => vec![Either::Left(l)], - EitherOrBoth::Right(r) => vec![Either::Right(r)], - }); - let it_bool = merge_join_by(a, b, PartialOrd::le); - itertools::equal(it_ord, it_bool) || has_equal - } - fn merge_join_by_bool_unwrapped_is_merge_by(a: Vec, b: Vec) -> bool { - use either::Either; - use itertools::free::merge_join_by; - let it = a.clone().into_iter().merge_by(b.clone(), PartialOrd::ge); - let it_join = merge_join_by(a, b, PartialOrd::ge).map(Either::into_inner); - itertools::equal(it, it_join) - } -} - -quickcheck! { - fn size_tee(a: Vec) -> bool { - let (mut t1, mut t2) = a.iter().tee(); - t1.next(); - t1.next(); - t2.next(); - exact_size(t1) && exact_size(t2) - } -} - -quickcheck! { - fn size_tee_2(a: Vec) -> bool { - let (mut t1, mut t2) = a.iter().dedup().tee(); - t1.next(); - t1.next(); - t2.next(); - correct_size_hint(t1) && correct_size_hint(t2) - } -} - -quickcheck! { - fn size_take_while_ref(a: Vec, stop: u8) -> bool { - correct_size_hint(a.iter().take_while_ref(|x| **x != stop)) - } -} - -quickcheck! { - fn equal_partition(a: Vec) -> bool { - let mut a = a; - let mut ap = a.clone(); - let split_index = itertools::partition(&mut ap, |x| *x >= 0); - let parted = (0..split_index).all(|i| ap[i] >= 0) && - (split_index..a.len()).all(|i| ap[i] < 0); - - a.sort(); - ap.sort(); - parted && (a == ap) - } -} - -quickcheck! { - fn size_combinations(a: Iter) -> bool { - let it = a.clone().tuple_combinations::<(_, _)>(); - correct_size_hint(it.clone()) && it.count() == binomial(a.count(), 2) - } - - fn exact_size_combinations_1(a: Vec) -> bool { - let it = a.iter().tuple_combinations::<(_,)>(); - exact_size_for_this(it.clone()) && it.count() == binomial(a.len(), 1) - } - fn exact_size_combinations_2(a: Vec) -> bool { - let it = a.iter().tuple_combinations::<(_, _)>(); - exact_size_for_this(it.clone()) && it.count() == binomial(a.len(), 2) - } - fn exact_size_combinations_3(mut a: Vec) -> bool { - a.truncate(15); - let it = a.iter().tuple_combinations::<(_, _, _)>(); - exact_size_for_this(it.clone()) && it.count() == binomial(a.len(), 3) - } -} - -fn binomial(n: usize, k: usize) -> usize { - if k > n { - 0 - } else { - (n - k + 1..=n).product::() / (1..=k).product::() - } -} - -quickcheck! { - fn equal_combinations(it: Iter) -> bool { - let values = it.clone().collect_vec(); - let mut cmb = it.tuple_combinations(); - for i in 0..values.len() { - for j in i+1..values.len() { - let pair = (values[i], values[j]); - if pair != cmb.next().unwrap() { - return false; - } - } - } - cmb.next().is_none() - } -} - -quickcheck! { - fn size_pad_tail(it: Iter, pad: u8) -> bool { - correct_size_hint(it.clone().pad_using(pad as usize, |_| 0)) && - correct_size_hint(it.dropping(1).rev().pad_using(pad as usize, |_| 0)) - } -} - -quickcheck! { - fn size_pad_tail2(it: Iter, pad: u8) -> bool { - exact_size(it.pad_using(pad as usize, |_| 0)) - } -} - -quickcheck! { - fn size_powerset(it: Iter) -> bool { - // Powerset cardinality gets large very quickly, limit input to keep test fast. - correct_size_hint(it.take(12).powerset()) - } -} - -quickcheck! { - fn size_duplicates(it: Iter) -> bool { - correct_size_hint(it.duplicates()) - } -} - -quickcheck! { - fn size_unique(it: Iter) -> bool { - correct_size_hint(it.unique()) - } - - fn count_unique(it: Vec, take_first: u8) -> () { - let answer = { - let mut v = it.clone(); - v.sort(); v.dedup(); - v.len() - }; - let mut iter = cloned(&it).unique(); - let first_count = (&mut iter).take(take_first as usize).count(); - let rest_count = iter.count(); - assert_eq!(answer, first_count + rest_count); - } -} - -quickcheck! { - fn fuzz_chunk_by_lazy_1(it: Iter) -> bool { - let jt = it.clone(); - let chunks = it.chunk_by(|k| *k); - itertools::equal(jt, chunks.into_iter().flat_map(|(_, x)| x)) - } -} - -quickcheck! { - fn fuzz_chunk_by_lazy_2(data: Vec) -> bool { - let chunks = data.iter().chunk_by(|k| *k / 10); - let res = itertools::equal(data.iter(), chunks.into_iter().flat_map(|(_, x)| x)); - res - } -} - -quickcheck! { - fn fuzz_chunk_by_lazy_3(data: Vec) -> bool { - let grouper = data.iter().chunk_by(|k| *k / 10); - let chunks = grouper.into_iter().collect_vec(); - let res = itertools::equal(data.iter(), chunks.into_iter().flat_map(|(_, x)| x)); - res - } -} - -quickcheck! { - fn fuzz_chunk_by_lazy_duo(data: Vec, order: Vec<(bool, bool)>) -> bool { - let grouper = data.iter().chunk_by(|k| *k / 3); - let mut chunks1 = grouper.into_iter(); - let mut chunks2 = grouper.into_iter(); - let mut elts = Vec::<&u8>::new(); - let mut old_chunks = Vec::new(); - - let tup1 = |(_, b)| b; - for &(ord, consume_now) in &order { - let iter = &mut [&mut chunks1, &mut chunks2][ord as usize]; - match iter.next() { - Some((_, gr)) => if consume_now { - for og in old_chunks.drain(..) { - elts.extend(og); - } - elts.extend(gr); - } else { - old_chunks.push(gr); - }, - None => break, - } - } - for og in old_chunks.drain(..) { - elts.extend(og); - } - for gr in chunks1.map(&tup1) { elts.extend(gr); } - for gr in chunks2.map(&tup1) { elts.extend(gr); } - itertools::assert_equal(&data, elts); - true - } -} - -quickcheck! { - fn chunk_clone_equal(a: Vec, size: u8) -> () { - let mut size = size; - if size == 0 { - size += 1; - } - let it = a.chunks(size as usize); - itertools::assert_equal(it.clone(), it); - } -} - -quickcheck! { - fn equal_chunks_lazy(a: Vec, size: u8) -> bool { - let mut size = size; - if size == 0 { - size += 1; - } - let chunks = a.iter().chunks(size as usize); - let it = a.chunks(size as usize); - for (a, b) in chunks.into_iter().zip(it) { - if !itertools::equal(a, b) { - return false; - } - } - true - } -} - -// tuple iterators -quickcheck! { - fn equal_circular_tuple_windows_1(a: Vec) -> bool { - let x = a.iter().map(|e| (e,) ); - let y = a.iter().circular_tuple_windows::<(_,)>(); - itertools::assert_equal(x,y); - true - } - - fn equal_circular_tuple_windows_2(a: Vec) -> bool { - let x = (0..a.len()).map(|start_idx| ( - &a[start_idx], - &a[(start_idx + 1) % a.len()], - )); - let y = a.iter().circular_tuple_windows::<(_, _)>(); - itertools::assert_equal(x,y); - true - } - - fn equal_circular_tuple_windows_3(a: Vec) -> bool { - let x = (0..a.len()).map(|start_idx| ( - &a[start_idx], - &a[(start_idx + 1) % a.len()], - &a[(start_idx + 2) % a.len()], - )); - let y = a.iter().circular_tuple_windows::<(_, _, _)>(); - itertools::assert_equal(x,y); - true - } - - fn equal_circular_tuple_windows_4(a: Vec) -> bool { - let x = (0..a.len()).map(|start_idx| ( - &a[start_idx], - &a[(start_idx + 1) % a.len()], - &a[(start_idx + 2) % a.len()], - &a[(start_idx + 3) % a.len()], - )); - let y = a.iter().circular_tuple_windows::<(_, _, _, _)>(); - itertools::assert_equal(x,y); - true - } - - fn equal_cloned_circular_tuple_windows(a: Vec) -> bool { - let x = a.iter().circular_tuple_windows::<(_, _, _, _)>(); - let y = x.clone(); - itertools::assert_equal(x,y); - true - } - - fn equal_cloned_circular_tuple_windows_noninitial(a: Vec) -> bool { - let mut x = a.iter().circular_tuple_windows::<(_, _, _, _)>(); - let _ = x.next(); - let y = x.clone(); - itertools::assert_equal(x,y); - true - } - - fn equal_cloned_circular_tuple_windows_complete(a: Vec) -> bool { - let mut x = a.iter().circular_tuple_windows::<(_, _, _, _)>(); - for _ in x.by_ref() {} - let y = x.clone(); - itertools::assert_equal(x,y); - true - } - - fn circular_tuple_windows_exact_size(a: Vec) -> bool { - exact_size(a.iter().circular_tuple_windows::<(_, _, _, _)>()) - } - - fn equal_tuple_windows_1(a: Vec) -> bool { - let x = a.windows(1).map(|s| (&s[0], )); - let y = a.iter().tuple_windows::<(_,)>(); - itertools::equal(x, y) - } - - fn equal_tuple_windows_2(a: Vec) -> bool { - let x = a.windows(2).map(|s| (&s[0], &s[1])); - let y = a.iter().tuple_windows::<(_, _)>(); - itertools::equal(x, y) - } - - fn equal_tuple_windows_3(a: Vec) -> bool { - let x = a.windows(3).map(|s| (&s[0], &s[1], &s[2])); - let y = a.iter().tuple_windows::<(_, _, _)>(); - itertools::equal(x, y) - } - - fn equal_tuple_windows_4(a: Vec) -> bool { - let x = a.windows(4).map(|s| (&s[0], &s[1], &s[2], &s[3])); - let y = a.iter().tuple_windows::<(_, _, _, _)>(); - itertools::equal(x, y) - } - - fn tuple_windows_exact_size_1(a: Vec) -> bool { - exact_size(a.iter().tuple_windows::<(_,)>()) - } - - fn tuple_windows_exact_size_4(a: Vec) -> bool { - exact_size(a.iter().tuple_windows::<(_, _, _, _)>()) - } - - fn equal_tuples_1(a: Vec) -> bool { - let x = a.chunks(1).map(|s| (&s[0], )); - let y = a.iter().tuples::<(_,)>(); - itertools::equal(x, y) - } - - fn equal_tuples_2(a: Vec) -> bool { - let x = a.chunks(2).filter(|s| s.len() == 2).map(|s| (&s[0], &s[1])); - let y = a.iter().tuples::<(_, _)>(); - itertools::equal(x, y) - } - - fn equal_tuples_3(a: Vec) -> bool { - let x = a.chunks(3).filter(|s| s.len() == 3).map(|s| (&s[0], &s[1], &s[2])); - let y = a.iter().tuples::<(_, _, _)>(); - itertools::equal(x, y) - } - - fn equal_tuples_4(a: Vec) -> bool { - let x = a.chunks(4).filter(|s| s.len() == 4).map(|s| (&s[0], &s[1], &s[2], &s[3])); - let y = a.iter().tuples::<(_, _, _, _)>(); - itertools::equal(x, y) - } - - fn exact_tuple_buffer(a: Vec) -> bool { - let mut iter = a.iter().tuples::<(_, _, _, _)>(); - (&mut iter).last(); - let buffer = iter.into_buffer(); - assert_eq!(buffer.len(), a.len() % 4); - exact_size(buffer) - } - - fn tuples_size_hint_inexact(a: Iter) -> bool { - correct_size_hint(a.clone().tuples::<(_,)>()) - && correct_size_hint(a.clone().tuples::<(_, _)>()) - && correct_size_hint(a.tuples::<(_, _, _, _)>()) - } - - fn tuples_size_hint_exact(a: Iter) -> bool { - exact_size(a.clone().tuples::<(_,)>()) - && exact_size(a.clone().tuples::<(_, _)>()) - && exact_size(a.tuples::<(_, _, _, _)>()) - } -} - -// with_position -quickcheck! { - fn with_position_exact_size_1(a: Vec) -> bool { - exact_size_for_this(a.iter().with_position()) - } - fn with_position_exact_size_2(a: Iter) -> bool { - exact_size_for_this(a.with_position()) - } -} - -quickcheck! { - fn correct_group_map_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` - let count = a.len(); - let lookup = a.into_iter().map(|i| (i % modulo, i)).into_group_map(); - - assert_eq!(lookup.values().flat_map(|vals| vals.iter()).count(), count); - - for (&key, vals) in lookup.iter() { - assert!(vals.iter().all(|&val| val % modulo == key)); - } - } -} - -/// A peculiar type: Equality compares both tuple items, but ordering only the -/// first item. This is so we can check the stability property easily. -#[derive(Clone, Debug, PartialEq, Eq)] -struct Val(u32, u32); - -impl PartialOrd for Val { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Val { - fn cmp(&self, other: &Self) -> Ordering { - self.0.cmp(&other.0) - } -} - -impl qc::Arbitrary for Val { - fn arbitrary(g: &mut G) -> Self { - let (x, y) = <(u32, u32)>::arbitrary(g); - Self(x, y) - } - fn shrink(&self) -> Box> { - Box::new((self.0, self.1).shrink().map(|(x, y)| Self(x, y))) - } -} - -quickcheck! { - fn minmax(a: Vec) -> bool { - use itertools::MinMaxResult; - - - let minmax = a.iter().minmax(); - let expected = match a.len() { - 0 => MinMaxResult::NoElements, - 1 => MinMaxResult::OneElement(&a[0]), - _ => MinMaxResult::MinMax(a.iter().min().unwrap(), - a.iter().max().unwrap()), - }; - minmax == expected - } -} - -quickcheck! { - fn minmax_f64(a: Vec) -> TestResult { - use itertools::MinMaxResult; - - if a.iter().any(|x| x.is_nan()) { - return TestResult::discard(); - } - - let min = cloned(&a).fold1(f64::min); - let max = cloned(&a).fold1(f64::max); - - let minmax = cloned(&a).minmax(); - let expected = match a.len() { - 0 => MinMaxResult::NoElements, - 1 => MinMaxResult::OneElement(min.unwrap()), - _ => MinMaxResult::MinMax(min.unwrap(), max.unwrap()), - }; - TestResult::from_bool(minmax == expected) - } -} - -quickcheck! { - fn tree_reduce_f64(mut a: Vec) -> TestResult { - fn collapse_adjacent(x: Vec, mut f: F) -> Vec - where F: FnMut(f64, f64) -> f64 - { - let mut out = Vec::new(); - for i in (0..x.len()).step_by(2) { - if i == x.len()-1 { - out.push(x[i]) - } else { - out.push(f(x[i], x[i+1])); - } - } - out - } - - if a.iter().any(|x| x.is_nan()) { - return TestResult::discard(); - } - - let actual = a.iter().cloned().tree_reduce(f64::atan2); - - while a.len() > 1 { - a = collapse_adjacent(a, f64::atan2); - } - let expected = a.pop(); - - TestResult::from_bool(actual == expected) - } -} - -quickcheck! { - fn exactly_one_i32(a: Vec) -> TestResult { - let ret = a.iter().cloned().exactly_one(); - match a.len() { - 1 => TestResult::from_bool(ret.unwrap() == a[0]), - _ => TestResult::from_bool(ret.unwrap_err().eq(a.iter().cloned())), - } - } -} - -quickcheck! { - fn at_most_one_i32(a: Vec) -> TestResult { - let ret = a.iter().cloned().at_most_one(); - match a.len() { - 0 => TestResult::from_bool(ret.unwrap().is_none()), - 1 => TestResult::from_bool(ret.unwrap() == Some(a[0])), - _ => TestResult::from_bool(ret.unwrap_err().eq(a.iter().cloned())), - } - } -} - -quickcheck! { - fn consistent_grouping_map_with_by(a: Vec, modulo: u8) -> () { - let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` - - let lookup_grouping_map = a.iter().copied().map(|i| (i % modulo, i)).into_grouping_map().collect::>(); - let lookup_grouping_map_by = a.iter().copied().into_grouping_map_by(|i| i % modulo).collect::>(); - - assert_eq!(lookup_grouping_map, lookup_grouping_map_by); - } - - fn correct_grouping_map_by_aggregate_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = if modulo < 2 { 2 } else { modulo } as u64; // Avoid `% 0` - let lookup = a.iter() - .map(|&b| b as u64) // Avoid overflows - .into_grouping_map_by(|i| i % modulo) - .aggregate(|acc, &key, val| { - assert!(val % modulo == key); - if val % (modulo - 1) == 0 { - None - } else { - Some(acc.unwrap_or(0) + val) - } - }); - - let group_map_lookup = a.iter() - .map(|&b| b as u64) - .map(|i| (i % modulo, i)) - .into_group_map() - .into_iter() - .filter_map(|(key, vals)| { - vals.into_iter().fold(None, |acc, val| { - if val % (modulo - 1) == 0 { - None - } else { - Some(acc.unwrap_or(0) + val) - } - }).map(|new_val| (key, new_val)) - }) - .collect::>(); - assert_eq!(lookup, group_map_lookup); - - for m in 0..modulo { - assert_eq!( - lookup.get(&m).copied(), - a.iter() - .map(|&b| b as u64) - .filter(|&val| val % modulo == m) - .fold(None, |acc, val| { - if val % (modulo - 1) == 0 { - None - } else { - Some(acc.unwrap_or(0) + val) - } - }) - ); - } - } - - fn correct_grouping_map_by_fold_with_modulo_key(a: Vec, modulo: u8) -> () { - #[derive(Debug, Default, PartialEq)] - struct Accumulator { - acc: u64, - } - - let modulo = if modulo == 0 { 1 } else { modulo } as u64; // Avoid `% 0` - let lookup = a.iter().map(|&b| b as u64) // Avoid overflows - .into_grouping_map_by(|i| i % modulo) - .fold_with(|_key, _val| Default::default(), |Accumulator { acc }, &key, val| { - assert!(val % modulo == key); - let acc = acc + val; - Accumulator { acc } - }); - - let group_map_lookup = a.iter() - .map(|&b| b as u64) - .map(|i| (i % modulo, i)) - .into_group_map() - .into_iter() - .map(|(key, vals)| (key, vals.into_iter().sum())).map(|(key, acc)| (key,Accumulator { acc })) - .collect::>(); - assert_eq!(lookup, group_map_lookup); - - for (&key, &Accumulator { acc: sum }) in lookup.iter() { - assert_eq!(sum, a.iter().map(|&b| b as u64).filter(|&val| val % modulo == key).sum::()); - } - } - - fn correct_grouping_map_by_fold_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = if modulo == 0 { 1 } else { modulo } as u64; // Avoid `% 0` - let lookup = a.iter().map(|&b| b as u64) // Avoid overflows - .into_grouping_map_by(|i| i % modulo) - .fold(0u64, |acc, &key, val| { - assert!(val % modulo == key); - acc + val - }); - - let group_map_lookup = a.iter() - .map(|&b| b as u64) - .map(|i| (i % modulo, i)) - .into_group_map() - .into_iter() - .map(|(key, vals)| (key, vals.into_iter().sum())) - .collect::>(); - assert_eq!(lookup, group_map_lookup); - - for (&key, &sum) in lookup.iter() { - assert_eq!(sum, a.iter().map(|&b| b as u64).filter(|&val| val % modulo == key).sum::()); - } - } - - fn correct_grouping_map_by_reduce_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = if modulo == 0 { 1 } else { modulo } as u64; // Avoid `% 0` - let lookup = a.iter().map(|&b| b as u64) // Avoid overflows - .into_grouping_map_by(|i| i % modulo) - .reduce(|acc, &key, val| { - assert!(val % modulo == key); - acc + val - }); - - // TODO: Swap `fold1` with stdlib's `reduce` when it's stabilized - let group_map_lookup = a.iter() - .map(|&b| b as u64) - .map(|i| (i % modulo, i)) - .into_group_map() - .into_iter() - .map(|(key, vals)| (key, vals.into_iter().fold1(|acc, val| acc + val).unwrap())) - .collect::>(); - assert_eq!(lookup, group_map_lookup); - - for (&key, &sum) in lookup.iter() { - assert_eq!(sum, a.iter().map(|&b| b as u64).filter(|&val| val % modulo == key).sum::()); - } - } - - fn correct_grouping_map_by_collect_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` - let lookup_grouping_map = a.iter().copied().into_grouping_map_by(|i| i % modulo).collect::>(); - let lookup_group_map = a.iter().copied().map(|i| (i % modulo, i)).into_group_map(); - - assert_eq!(lookup_grouping_map, lookup_group_map); - } - - fn correct_grouping_map_by_max_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` - let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).max(); - - let group_map_lookup = a.iter().copied() - .map(|i| (i % modulo, i)) - .into_group_map() - .into_iter() - .map(|(key, vals)| (key, vals.into_iter().max().unwrap())) - .collect::>(); - assert_eq!(lookup, group_map_lookup); - - for (&key, &max) in lookup.iter() { - assert_eq!(Some(max), a.iter().copied().filter(|&val| val % modulo == key).max()); - } - } - - fn correct_grouping_map_by_max_by_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` - let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).max_by(|_, v1, v2| v1.cmp(v2)); - - let group_map_lookup = a.iter().copied() - .map(|i| (i % modulo, i)) - .into_group_map() - .into_iter() - .map(|(key, vals)| (key, vals.into_iter().max_by(|v1, v2| v1.cmp(v2)).unwrap())) - .collect::>(); - assert_eq!(lookup, group_map_lookup); - - for (&key, &max) in lookup.iter() { - assert_eq!(Some(max), a.iter().copied().filter(|&val| val % modulo == key).max_by(|v1, v2| v1.cmp(v2))); - } - } - - fn correct_grouping_map_by_max_by_key_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` - let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).max_by_key(|_, &val| val); - - let group_map_lookup = a.iter().copied() - .map(|i| (i % modulo, i)) - .into_group_map() - .into_iter() - .map(|(key, vals)| (key, vals.into_iter().max_by_key(|&val| val).unwrap())) - .collect::>(); - assert_eq!(lookup, group_map_lookup); - - for (&key, &max) in lookup.iter() { - assert_eq!(Some(max), a.iter().copied().filter(|&val| val % modulo == key).max_by_key(|&val| val)); - } - } - - fn correct_grouping_map_by_min_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` - let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).min(); - - let group_map_lookup = a.iter().copied() - .map(|i| (i % modulo, i)) - .into_group_map() - .into_iter() - .map(|(key, vals)| (key, vals.into_iter().min().unwrap())) - .collect::>(); - assert_eq!(lookup, group_map_lookup); - - for (&key, &min) in lookup.iter() { - assert_eq!(Some(min), a.iter().copied().filter(|&val| val % modulo == key).min()); - } - } - - fn correct_grouping_map_by_min_by_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` - let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).min_by(|_, v1, v2| v1.cmp(v2)); - - let group_map_lookup = a.iter().copied() - .map(|i| (i % modulo, i)) - .into_group_map() - .into_iter() - .map(|(key, vals)| (key, vals.into_iter().min_by(|v1, v2| v1.cmp(v2)).unwrap())) - .collect::>(); - assert_eq!(lookup, group_map_lookup); - - for (&key, &min) in lookup.iter() { - assert_eq!(Some(min), a.iter().copied().filter(|&val| val % modulo == key).min_by(|v1, v2| v1.cmp(v2))); - } - } - - fn correct_grouping_map_by_min_by_key_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` - let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).min_by_key(|_, &val| val); - - let group_map_lookup = a.iter().copied() - .map(|i| (i % modulo, i)) - .into_group_map() - .into_iter() - .map(|(key, vals)| (key, vals.into_iter().min_by_key(|&val| val).unwrap())) - .collect::>(); - assert_eq!(lookup, group_map_lookup); - - for (&key, &min) in lookup.iter() { - assert_eq!(Some(min), a.iter().copied().filter(|&val| val % modulo == key).min_by_key(|&val| val)); - } - } - - fn correct_grouping_map_by_minmax_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` - let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).minmax(); - - let group_map_lookup = a.iter().copied() - .map(|i| (i % modulo, i)) - .into_group_map() - .into_iter() - .map(|(key, vals)| (key, vals.into_iter().minmax())) - .collect::>(); - assert_eq!(lookup, group_map_lookup); - - for (&key, &minmax) in lookup.iter() { - assert_eq!(minmax, a.iter().copied().filter(|&val| val % modulo == key).minmax()); - } - } - - fn correct_grouping_map_by_minmax_by_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` - let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).minmax_by(|_, v1, v2| v1.cmp(v2)); - - let group_map_lookup = a.iter().copied() - .map(|i| (i % modulo, i)) - .into_group_map() - .into_iter() - .map(|(key, vals)| (key, vals.into_iter().minmax_by(|v1, v2| v1.cmp(v2)))) - .collect::>(); - assert_eq!(lookup, group_map_lookup); - - for (&key, &minmax) in lookup.iter() { - assert_eq!(minmax, a.iter().copied().filter(|&val| val % modulo == key).minmax_by(|v1, v2| v1.cmp(v2))); - } - } - - fn correct_grouping_map_by_minmax_by_key_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = if modulo == 0 { 1 } else { modulo }; // Avoid `% 0` - let lookup = a.iter().copied().into_grouping_map_by(|i| i % modulo).minmax_by_key(|_, &val| val); - - let group_map_lookup = a.iter().copied() - .map(|i| (i % modulo, i)) - .into_group_map() - .into_iter() - .map(|(key, vals)| (key, vals.into_iter().minmax_by_key(|&val| val))) - .collect::>(); - assert_eq!(lookup, group_map_lookup); - - for (&key, &minmax) in lookup.iter() { - assert_eq!(minmax, a.iter().copied().filter(|&val| val % modulo == key).minmax_by_key(|&val| val)); - } - } - - fn correct_grouping_map_by_sum_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = if modulo == 0 { 1 } else { modulo } as u64; // Avoid `% 0` - let lookup = a.iter().map(|&b| b as u64) // Avoid overflows - .into_grouping_map_by(|i| i % modulo) - .sum(); - - let group_map_lookup = a.iter().map(|&b| b as u64) - .map(|i| (i % modulo, i)) - .into_group_map() - .into_iter() - .map(|(key, vals)| (key, vals.into_iter().sum())) - .collect::>(); - assert_eq!(lookup, group_map_lookup); - - for (&key, &sum) in lookup.iter() { - assert_eq!(sum, a.iter().map(|&b| b as u64).filter(|&val| val % modulo == key).sum::()); - } - } - - fn correct_grouping_map_by_product_modulo_key(a: Vec, modulo: u8) -> () { - let modulo = Wrapping(if modulo == 0 { 1 } else { modulo } as u64); // Avoid `% 0` - let lookup = a.iter().map(|&b| Wrapping(b as u64)) // Avoid overflows - .into_grouping_map_by(|i| i % modulo) - .product(); - - let group_map_lookup = a.iter().map(|&b| Wrapping(b as u64)) - .map(|i| (i % modulo, i)) - .into_group_map() - .into_iter() - .map(|(key, vals)| (key, vals.into_iter().product::>())) - .collect::>(); - assert_eq!(lookup, group_map_lookup); - - for (&key, &prod) in lookup.iter() { - assert_eq!( - prod, - a.iter() - .map(|&b| Wrapping(b as u64)) - .filter(|&val| val % modulo == key) - .product::>() - ); - } - } - - // This should check that if multiple elements are equally minimum or maximum - // then `max`, `min` and `minmax` pick the first minimum and the last maximum. - // This is to be consistent with `std::iter::max` and `std::iter::min`. - fn correct_grouping_map_by_min_max_minmax_order_modulo_key() -> () { - use itertools::MinMaxResult; - - let lookup = (0..=10) - .into_grouping_map_by(|_| 0) - .max_by(|_, _, _| Ordering::Equal); - - assert_eq!(lookup[&0], 10); - - let lookup = (0..=10) - .into_grouping_map_by(|_| 0) - .min_by(|_, _, _| Ordering::Equal); - - assert_eq!(lookup[&0], 0); - - let lookup = (0..=10) - .into_grouping_map_by(|_| 0) - .minmax_by(|_, _, _| Ordering::Equal); - - assert_eq!(lookup[&0], MinMaxResult::MinMax(0, 10)); - } -} - -quickcheck! { - fn counts(nums: Vec) -> TestResult { - let counts = nums.iter().counts(); - for (&item, &count) in counts.iter() { - #[allow(clippy::absurd_extreme_comparisons)] - if count <= 0 { - return TestResult::failed(); - } - if count != nums.iter().filter(|&x| x == item).count() { - return TestResult::failed(); - } - } - for item in nums.iter() { - if !counts.contains_key(item) { - return TestResult::failed(); - } - } - TestResult::passed() - } -} - -quickcheck! { - fn test_double_ended_zip_2(a: Vec, b: Vec) -> TestResult { - let mut x = - multizip((a.clone().into_iter(), b.clone().into_iter())) - .collect_vec(); - x.reverse(); - - let y = - multizip((a.into_iter(), b.into_iter())) - .rfold(Vec::new(), |mut vec, e| { vec.push(e); vec }); - - TestResult::from_bool(itertools::equal(x, y)) - } - - fn test_double_ended_zip_3(a: Vec, b: Vec, c: Vec) -> TestResult { - let mut x = - multizip((a.clone().into_iter(), b.clone().into_iter(), c.clone().into_iter())) - .collect_vec(); - x.reverse(); - - let y = - multizip((a.into_iter(), b.into_iter(), c.into_iter())) - .rfold(Vec::new(), |mut vec, e| { vec.push(e); vec }); - - TestResult::from_bool(itertools::equal(x, y)) - } -} - -fn is_fused(mut it: I) -> bool { - for _ in it.by_ref() {} - for _ in 0..10 { - if it.next().is_some() { - return false; - } - } - true -} - -quickcheck! { - fn fused_combination(a: Iter) -> bool - { - is_fused(a.clone().combinations(1)) && - is_fused(a.combinations(3)) - } - - fn fused_combination_with_replacement(a: Iter) -> bool - { - is_fused(a.clone().combinations_with_replacement(1)) && - is_fused(a.combinations_with_replacement(3)) - } - - fn fused_tuple_combination(a: Iter) -> bool - { - is_fused(a.clone().fuse().tuple_combinations::<(_,)>()) && - is_fused(a.fuse().tuple_combinations::<(_,_,_)>()) - } - - fn fused_unique(a: Iter) -> bool - { - is_fused(a.fuse().unique()) - } - - fn fused_unique_by(a: Iter) -> bool - { - is_fused(a.fuse().unique_by(|x| x % 100)) - } - - fn fused_interleave_shortest(a: Iter, b: Iter) -> bool - { - !is_fused(a.clone().interleave_shortest(b.clone())) && - is_fused(a.fuse().interleave_shortest(b.fuse())) - } - - fn fused_product(a: Iter, b: Iter) -> bool - { - is_fused(a.fuse().cartesian_product(b.fuse())) - } - - fn fused_merge(a: Iter, b: Iter) -> bool - { - is_fused(a.fuse().merge(b.fuse())) - } - - fn fused_filter_ok(a: Iter) -> bool - { - is_fused(a.map(|x| if x % 2 == 0 {Ok(x)} else {Err(x)} ) - .filter_ok(|x| x % 3 == 0) - .fuse()) - } - - fn fused_filter_map_ok(a: Iter) -> bool - { - is_fused(a.map(|x| if x % 2 == 0 {Ok(x)} else {Err(x)} ) - .filter_map_ok(|x| if x % 3 == 0 {Some(x / 3)} else {None}) - .fuse()) - } - - fn fused_positions(a: Iter) -> bool - { - !is_fused(a.clone().positions(|x|x%2==0)) && - is_fused(a.fuse().positions(|x|x%2==0)) - } - - fn fused_update(a: Iter) -> bool - { - !is_fused(a.clone().update(|x|*x+=1)) && - is_fused(a.fuse().update(|x|*x+=1)) - } - - fn fused_tuple_windows(a: Iter) -> bool - { - is_fused(a.fuse().tuple_windows::<(_,_)>()) - } - - fn fused_pad_using(a: Iter) -> bool - { - is_fused(a.fuse().pad_using(100,|_|0)) - } -} - -quickcheck! { - fn min_set_contains_min(a: Vec<(usize, char)>) -> bool { - let result_set = a.iter().min_set(); - if let Some(result_element) = a.iter().min() { - result_set.contains(&result_element) - } else { - result_set.is_empty() - } - } - - fn min_set_by_contains_min(a: Vec<(usize, char)>) -> bool { - let compare = |x: &&(usize, char), y: &&(usize, char)| x.1.cmp(&y.1); - let result_set = a.iter().min_set_by(compare); - if let Some(result_element) = a.iter().min_by(compare) { - result_set.contains(&result_element) - } else { - result_set.is_empty() - } - } - - fn min_set_by_key_contains_min(a: Vec<(usize, char)>) -> bool { - let key = |x: &&(usize, char)| x.1; - let result_set = a.iter().min_set_by_key(&key); - if let Some(result_element) = a.iter().min_by_key(&key) { - result_set.contains(&result_element) - } else { - result_set.is_empty() - } - } - - fn max_set_contains_max(a: Vec<(usize, char)>) -> bool { - let result_set = a.iter().max_set(); - if let Some(result_element) = a.iter().max() { - result_set.contains(&result_element) - } else { - result_set.is_empty() - } - } - - fn max_set_by_contains_max(a: Vec<(usize, char)>) -> bool { - let compare = |x: &&(usize, char), y: &&(usize, char)| x.1.cmp(&y.1); - let result_set = a.iter().max_set_by(compare); - if let Some(result_element) = a.iter().max_by(compare) { - result_set.contains(&result_element) - } else { - result_set.is_empty() - } - } - - fn max_set_by_key_contains_max(a: Vec<(usize, char)>) -> bool { - let key = |x: &&(usize, char)| x.1; - let result_set = a.iter().max_set_by_key(&key); - if let Some(result_element) = a.iter().max_by_key(&key) { - result_set.contains(&result_element) - } else { - result_set.is_empty() - } - } - - fn tail(v: Vec, n: u8) -> bool { - let n = n as usize; - let result = &v[v.len().saturating_sub(n)..]; - itertools::equal(v.iter().tail(n), result) - && itertools::equal(v.iter().filter(|_| true).tail(n), result) - } -} diff --git a/vendor/itertools/tests/specializations.rs b/vendor/itertools/tests/specializations.rs deleted file mode 100644 index 71231147226beb..00000000000000 --- a/vendor/itertools/tests/specializations.rs +++ /dev/null @@ -1,582 +0,0 @@ -#![allow(unstable_name_collisions)] - -use itertools::Itertools; -use quickcheck::Arbitrary; -use quickcheck::{quickcheck, TestResult}; -use rand::Rng; -use std::fmt::Debug; - -struct Unspecialized(I); - -impl Iterator for Unspecialized -where - I: Iterator, -{ - type Item = I::Item; - - #[inline(always)] - fn next(&mut self) -> Option { - self.0.next() - } -} - -impl DoubleEndedIterator for Unspecialized -where - I: DoubleEndedIterator, -{ - #[inline(always)] - fn next_back(&mut self) -> Option { - self.0.next_back() - } -} - -fn test_specializations(it: &I) -where - I::Item: Eq + Debug + Clone, - I: Iterator + Clone, -{ - macro_rules! check_specialized { - ($src:expr, |$it:pat| $closure:expr) => { - // Many iterators special-case the first elements, so we test specializations for iterators that have already been advanced. - let mut src = $src.clone(); - for _ in 0..5 { - let $it = src.clone(); - let v1 = $closure; - let $it = Unspecialized(src.clone()); - let v2 = $closure; - assert_eq!(v1, v2); - src.next(); - } - } - } - check_specialized!(it, |i| i.count()); - check_specialized!(it, |i| i.last()); - check_specialized!(it, |i| i.collect::>()); - check_specialized!(it, |i| { - let mut parameters_from_fold = vec![]; - let fold_result = i.fold(vec![], |mut acc, v: I::Item| { - parameters_from_fold.push((acc.clone(), v.clone())); - acc.push(v); - acc - }); - (parameters_from_fold, fold_result) - }); - check_specialized!(it, |mut i| { - let mut parameters_from_all = vec![]; - let first = i.next(); - let all_result = i.all(|x| { - parameters_from_all.push(x.clone()); - Some(x) == first - }); - (parameters_from_all, all_result) - }); - let size = it.clone().count(); - for n in 0..size + 2 { - check_specialized!(it, |mut i| i.nth(n)); - } - // size_hint is a bit harder to check - let mut it_sh = it.clone(); - for n in 0..size + 2 { - let len = it_sh.clone().count(); - let (min, max) = it_sh.size_hint(); - assert_eq!(size - n.min(size), len); - assert!(min <= len); - if let Some(max) = max { - assert!(len <= max); - } - it_sh.next(); - } -} - -fn test_double_ended_specializations(it: &I) -where - I::Item: Eq + Debug + Clone, - I: DoubleEndedIterator + Clone, -{ - macro_rules! check_specialized { - ($src:expr, |$it:pat| $closure:expr) => { - // Many iterators special-case the first elements, so we test specializations for iterators that have already been advanced. - let mut src = $src.clone(); - for step in 0..8 { - let $it = src.clone(); - let v1 = $closure; - let $it = Unspecialized(src.clone()); - let v2 = $closure; - assert_eq!(v1, v2); - if step % 2 == 0 { - src.next(); - } else { - src.next_back(); - } - } - } - } - check_specialized!(it, |i| { - let mut parameters_from_rfold = vec![]; - let rfold_result = i.rfold(vec![], |mut acc, v: I::Item| { - parameters_from_rfold.push((acc.clone(), v.clone())); - acc.push(v); - acc - }); - (parameters_from_rfold, rfold_result) - }); - let size = it.clone().count(); - for n in 0..size + 2 { - check_specialized!(it, |mut i| i.nth_back(n)); - } -} - -quickcheck! { - fn interleave(v: Vec, w: Vec) -> () { - test_specializations(&v.iter().interleave(w.iter())); - } - - fn interleave_shortest(v: Vec, w: Vec) -> () { - test_specializations(&v.iter().interleave_shortest(w.iter())); - } - - fn batching(v: Vec) -> () { - test_specializations(&v.iter().batching(Iterator::next)); - } - - fn tuple_windows(v: Vec) -> () { - test_specializations(&v.iter().tuple_windows::<(_,)>()); - test_specializations(&v.iter().tuple_windows::<(_, _)>()); - test_specializations(&v.iter().tuple_windows::<(_, _, _)>()); - } - - fn circular_tuple_windows(v: Vec) -> () { - test_specializations(&v.iter().circular_tuple_windows::<(_,)>()); - test_specializations(&v.iter().circular_tuple_windows::<(_, _)>()); - test_specializations(&v.iter().circular_tuple_windows::<(_, _, _)>()); - } - - fn tuples(v: Vec) -> () { - test_specializations(&v.iter().tuples::<(_,)>()); - test_specializations(&v.iter().tuples::<(_, _)>()); - test_specializations(&v.iter().tuples::<(_, _, _)>()); - } - - fn cartesian_product(a: Vec, b: Vec) -> TestResult { - if a.len() * b.len() > 100 { - return TestResult::discard(); - } - test_specializations(&a.iter().cartesian_product(&b)); - TestResult::passed() - } - - fn multi_cartesian_product(a: Vec, b: Vec, c: Vec) -> TestResult { - if a.len() * b.len() * c.len() > 100 { - return TestResult::discard(); - } - test_specializations(&vec![a, b, c].into_iter().multi_cartesian_product()); - TestResult::passed() - } - - fn coalesce(v: Vec) -> () { - test_specializations(&v.iter().coalesce(|x, y| if x == y { Ok(x) } else { Err((x, y)) })) - } - - fn dedup(v: Vec) -> () { - test_specializations(&v.iter().dedup()) - } - - fn dedup_by(v: Vec) -> () { - test_specializations(&v.iter().dedup_by(PartialOrd::ge)) - } - - fn dedup_with_count(v: Vec) -> () { - test_specializations(&v.iter().dedup_with_count()) - } - - fn dedup_by_with_count(v: Vec) -> () { - test_specializations(&v.iter().dedup_by_with_count(PartialOrd::ge)) - } - - fn duplicates(v: Vec) -> () { - let it = v.iter().duplicates(); - test_specializations(&it); - test_double_ended_specializations(&it); - } - - fn duplicates_by(v: Vec) -> () { - let it = v.iter().duplicates_by(|x| *x % 10); - test_specializations(&it); - test_double_ended_specializations(&it); - } - - fn unique(v: Vec) -> () { - let it = v.iter().unique(); - test_specializations(&it); - test_double_ended_specializations(&it); - } - - fn unique_by(v: Vec) -> () { - let it = v.iter().unique_by(|x| *x % 50); - test_specializations(&it); - test_double_ended_specializations(&it); - } - - fn take_while_inclusive(v: Vec) -> () { - test_specializations(&v.iter().copied().take_while_inclusive(|&x| x < 100)); - } - - fn while_some(v: Vec) -> () { - test_specializations(&v.iter().map(|&x| if x < 100 { Some(2 * x) } else { None }).while_some()); - } - - fn pad_using(v: Vec) -> () { - use std::convert::TryFrom; - let it = v.iter().copied().pad_using(10, |i| u8::try_from(5 * i).unwrap_or(u8::MAX)); - test_specializations(&it); - test_double_ended_specializations(&it); - } - - fn with_position(v: Vec) -> () { - test_specializations(&v.iter().with_position()); - } - - fn positions(v: Vec) -> () { - let it = v.iter().positions(|x| x % 5 == 0); - test_specializations(&it); - test_double_ended_specializations(&it); - } - - fn update(v: Vec) -> () { - let it = v.iter().copied().update(|x| *x = x.wrapping_mul(7)); - test_specializations(&it); - test_double_ended_specializations(&it); - } - - fn tuple_combinations(v: Vec) -> TestResult { - if v.len() > 10 { - return TestResult::discard(); - } - test_specializations(&v.iter().tuple_combinations::<(_,)>()); - test_specializations(&v.iter().tuple_combinations::<(_, _)>()); - test_specializations(&v.iter().tuple_combinations::<(_, _, _)>()); - TestResult::passed() - } - - fn intersperse(v: Vec) -> () { - test_specializations(&v.into_iter().intersperse(0)); - } - - fn intersperse_with(v: Vec) -> () { - test_specializations(&v.into_iter().intersperse_with(|| 0)); - } - - fn combinations(a: Vec, n: u8) -> TestResult { - if n > 3 || a.len() > 8 { - return TestResult::discard(); - } - test_specializations(&a.iter().combinations(n as usize)); - TestResult::passed() - } - - fn combinations_with_replacement(a: Vec, n: u8) -> TestResult { - if n > 3 || a.len() > 7 { - return TestResult::discard(); - } - test_specializations(&a.iter().combinations_with_replacement(n as usize)); - TestResult::passed() - } - - fn permutations(a: Vec, n: u8) -> TestResult { - if n > 3 || a.len() > 8 { - return TestResult::discard(); - } - test_specializations(&a.iter().permutations(n as usize)); - TestResult::passed() - } - - fn powerset(a: Vec) -> TestResult { - if a.len() > 6 { - return TestResult::discard(); - } - test_specializations(&a.iter().powerset()); - TestResult::passed() - } - - fn zip_longest(a: Vec, b: Vec) -> () { - let it = a.into_iter().zip_longest(b); - test_specializations(&it); - test_double_ended_specializations(&it); - } - - fn zip_eq(a: Vec) -> () { - test_specializations(&a.iter().zip_eq(a.iter().rev())) - } - - fn multizip(a: Vec) -> () { - let it = itertools::multizip((a.iter(), a.iter().rev(), a.iter().take(50))); - test_specializations(&it); - test_double_ended_specializations(&it); - } - - fn izip(a: Vec, b: Vec) -> () { - test_specializations(&itertools::izip!(b.iter(), a, b.iter().rev())); - } - - fn iproduct(a: Vec, b: Vec, c: Vec) -> TestResult { - if a.len() * b.len() * c.len() > 200 { - return TestResult::discard(); - } - test_specializations(&itertools::iproduct!(a, b.iter(), c)); - TestResult::passed() - } - - fn repeat_n(element: i8, n: u8) -> () { - let it = itertools::repeat_n(element, n as usize); - test_specializations(&it); - test_double_ended_specializations(&it); - } - - fn exactly_one_error(v: Vec) -> TestResult { - // Use `at_most_one` would be similar. - match v.iter().exactly_one() { - Ok(_) => TestResult::discard(), - Err(it) => { - test_specializations(&it); - TestResult::passed() - } - } - } -} - -quickcheck! { - fn put_back_qc(test_vec: Vec) -> () { - test_specializations(&itertools::put_back(test_vec.iter())); - let mut pb = itertools::put_back(test_vec.into_iter()); - pb.put_back(1); - test_specializations(&pb); - } - - fn put_back_n(v: Vec, n: u8) -> () { - let mut it = itertools::put_back_n(v); - for k in 0..n { - it.put_back(k); - } - test_specializations(&it); - } - - fn multipeek(v: Vec, n: u8) -> () { - let mut it = v.into_iter().multipeek(); - for _ in 0..n { - it.peek(); - } - test_specializations(&it); - } - - fn peek_nth_with_peek(v: Vec, n: u8) -> () { - let mut it = itertools::peek_nth(v); - for _ in 0..n { - it.peek(); - } - test_specializations(&it); - } - - fn peek_nth_with_peek_nth(v: Vec, n: u8) -> () { - let mut it = itertools::peek_nth(v); - it.peek_nth(n as usize); - test_specializations(&it); - } - - fn peek_nth_with_peek_mut(v: Vec, n: u8) -> () { - let mut it = itertools::peek_nth(v); - for _ in 0..n { - if let Some(x) = it.peek_mut() { - *x = x.wrapping_add(50); - } - } - test_specializations(&it); - } - - fn peek_nth_with_peek_nth_mut(v: Vec, n: u8) -> () { - let mut it = itertools::peek_nth(v); - if let Some(x) = it.peek_nth_mut(n as usize) { - *x = x.wrapping_add(50); - } - test_specializations(&it); - } -} - -quickcheck! { - fn merge(a: Vec, b: Vec) -> () { - test_specializations(&a.into_iter().merge(b)) - } - - fn merge_by(a: Vec, b: Vec) -> () { - test_specializations(&a.into_iter().merge_by(b, PartialOrd::ge)) - } - - fn merge_join_by_ordering(i1: Vec, i2: Vec) -> () { - test_specializations(&i1.into_iter().merge_join_by(i2, Ord::cmp)); - } - - fn merge_join_by_bool(i1: Vec, i2: Vec) -> () { - test_specializations(&i1.into_iter().merge_join_by(i2, PartialOrd::ge)); - } - - fn kmerge(a: Vec, b: Vec, c: Vec) -> () { - test_specializations(&vec![a, b, c] - .into_iter() - .map(|v| v.into_iter().sorted()) - .kmerge()); - } - - fn kmerge_by(a: Vec, b: Vec, c: Vec) -> () { - test_specializations(&vec![a, b, c] - .into_iter() - .map(|v| v.into_iter().sorted_by_key(|a| a.abs())) - .kmerge_by(|a, b| a.abs() < b.abs())); - } -} - -quickcheck! { - fn map_into(v: Vec) -> () { - let it = v.into_iter().map_into::(); - test_specializations(&it); - test_double_ended_specializations(&it); - } - - fn map_ok(v: Vec>) -> () { - let it = v.into_iter().map_ok(|u| u.checked_add(1)); - test_specializations(&it); - test_double_ended_specializations(&it); - } - - fn filter_ok(v: Vec>) -> () { - test_specializations(&v.into_iter().filter_ok(|&i| i < 20)); - } - - fn filter_map_ok(v: Vec>) -> () { - test_specializations(&v.into_iter().filter_map_ok(|i| if i < 20 { Some(i * 2) } else { None })); - } - - // `SmallIter2` because `Vec` is too slow and we get bad coverage from a singleton like Option - fn flatten_ok(v: Vec, char>>) -> () { - let it = v.into_iter().flatten_ok(); - test_specializations(&it); - test_double_ended_specializations(&it); - } -} - -quickcheck! { - // TODO Replace this function by a normal call to test_specializations - fn process_results(v: Vec>) -> () { - helper(v.iter().copied()); - helper(v.iter().copied().filter(Result::is_ok)); - - fn helper(it: impl DoubleEndedIterator> + Clone) { - macro_rules! check_results_specialized { - ($src:expr, |$it:pat| $closure:expr) => { - assert_eq!( - itertools::process_results($src.clone(), |$it| $closure), - itertools::process_results($src.clone(), |i| { - let $it = Unspecialized(i); - $closure - }), - ) - } - } - - check_results_specialized!(it, |i| i.count()); - check_results_specialized!(it, |i| i.last()); - check_results_specialized!(it, |i| i.collect::>()); - check_results_specialized!(it, |i| i.rev().collect::>()); - check_results_specialized!(it, |i| { - let mut parameters_from_fold = vec![]; - let fold_result = i.fold(vec![], |mut acc, v| { - parameters_from_fold.push((acc.clone(), v)); - acc.push(v); - acc - }); - (parameters_from_fold, fold_result) - }); - check_results_specialized!(it, |i| { - let mut parameters_from_rfold = vec![]; - let rfold_result = i.rfold(vec![], |mut acc, v| { - parameters_from_rfold.push((acc.clone(), v)); - acc.push(v); - acc - }); - (parameters_from_rfold, rfold_result) - }); - check_results_specialized!(it, |mut i| { - let mut parameters_from_all = vec![]; - let first = i.next(); - let all_result = i.all(|x| { - parameters_from_all.push(x); - Some(x)==first - }); - (parameters_from_all, all_result) - }); - let size = it.clone().count(); - for n in 0..size + 2 { - check_results_specialized!(it, |mut i| i.nth(n)); - } - for n in 0..size + 2 { - check_results_specialized!(it, |mut i| i.nth_back(n)); - } - } - } -} - -/// Like `VecIntoIter` with maximum 2 elements. -#[derive(Debug, Clone, Default)] -enum SmallIter2 { - #[default] - Zero, - One(T), - Two(T, T), -} - -impl Arbitrary for SmallIter2 { - fn arbitrary(g: &mut G) -> Self { - match g.gen_range(0u8, 3) { - 0 => Self::Zero, - 1 => Self::One(T::arbitrary(g)), - 2 => Self::Two(T::arbitrary(g), T::arbitrary(g)), - _ => unreachable!(), - } - } - // maybe implement shrink too, maybe not -} - -impl Iterator for SmallIter2 { - type Item = T; - - fn next(&mut self) -> Option { - match std::mem::take(self) { - Self::Zero => None, - Self::One(val) => Some(val), - Self::Two(val, second) => { - *self = Self::One(second); - Some(val) - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let len = match self { - Self::Zero => 0, - Self::One(_) => 1, - Self::Two(_, _) => 2, - }; - (len, Some(len)) - } -} - -impl DoubleEndedIterator for SmallIter2 { - fn next_back(&mut self) -> Option { - match std::mem::take(self) { - Self::Zero => None, - Self::One(val) => Some(val), - Self::Two(first, val) => { - *self = Self::One(first); - Some(val) - } - } - } -} diff --git a/vendor/itertools/tests/test_core.rs b/vendor/itertools/tests/test_core.rs deleted file mode 100644 index 32af246c017b6a..00000000000000 --- a/vendor/itertools/tests/test_core.rs +++ /dev/null @@ -1,374 +0,0 @@ -//! Licensed under the Apache License, Version 2.0 -//! https://www.apache.org/licenses/LICENSE-2.0 or the MIT license -//! https://opensource.org/licenses/MIT, at your -//! option. This file may not be copied, modified, or distributed -//! except according to those terms. -#![no_std] -#![allow(deprecated)] - -use crate::it::chain; -use crate::it::free::put_back; -use crate::it::interleave; -use crate::it::intersperse; -use crate::it::intersperse_with; -use crate::it::iproduct; -use crate::it::izip; -use crate::it::multizip; -use crate::it::Itertools; -use core::iter; -use itertools as it; - -#[allow(dead_code)] -fn get_esi_then_esi(it: I) { - fn is_esi(_: impl ExactSizeIterator) {} - is_esi(it.clone().get(1..4)); - is_esi(it.clone().get(1..=4)); - is_esi(it.clone().get(1..)); - is_esi(it.clone().get(..4)); - is_esi(it.clone().get(..=4)); - is_esi(it.get(..)); -} - -#[allow(dead_code)] -fn get_dei_esi_then_dei_esi(it: I) { - fn is_dei_esi(_: impl DoubleEndedIterator + ExactSizeIterator) {} - is_dei_esi(it.clone().get(1..4)); - is_dei_esi(it.clone().get(1..=4)); - is_dei_esi(it.clone().get(1..)); - is_dei_esi(it.clone().get(..4)); - is_dei_esi(it.clone().get(..=4)); - is_dei_esi(it.get(..)); -} - -#[test] -fn get_1_max() { - let mut it = (0..5).get(1..=usize::MAX); - assert_eq!(it.next(), Some(1)); - assert_eq!(it.next_back(), Some(4)); -} - -#[test] -#[should_panic] -fn get_full_range_inclusive() { - let _it = (0..5).get(0..=usize::MAX); -} - -#[test] -fn product0() { - let mut prod = iproduct!(); - assert_eq!(prod.next(), Some(())); - assert!(prod.next().is_none()); -} - -#[test] -fn iproduct1() { - let s = "αβ"; - - let mut prod = iproduct!(s.chars()); - assert_eq!(prod.next(), Some(('α',))); - assert_eq!(prod.next(), Some(('β',))); - assert!(prod.next().is_none()); -} - -#[test] -fn product2() { - let s = "αβ"; - - let mut prod = iproduct!(s.chars(), 0..2); - assert!(prod.next() == Some(('α', 0))); - assert!(prod.next() == Some(('α', 1))); - assert!(prod.next() == Some(('β', 0))); - assert!(prod.next() == Some(('β', 1))); - assert!(prod.next().is_none()); -} - -#[test] -fn product_temporary() { - for (_x, _y, _z) in iproduct!( - [0, 1, 2].iter().cloned(), - [0, 1, 2].iter().cloned(), - [0, 1, 2].iter().cloned() - ) { - // ok - } -} - -#[test] -fn izip_macro() { - let mut zip = izip!(2..3); - assert!(zip.next() == Some(2)); - assert!(zip.next().is_none()); - - let mut zip = izip!(0..3, 0..2, 0..2i8); - for i in 0..2 { - assert!((i as usize, i, i as i8) == zip.next().unwrap()); - } - assert!(zip.next().is_none()); - - let xs: [isize; 0] = []; - let mut zip = izip!(0..3, 0..2, 0..2i8, &xs); - assert!(zip.next().is_none()); -} - -#[test] -fn izip2() { - let _zip1: iter::Zip<_, _> = izip!(1.., 2..); - let _zip2: iter::Zip<_, _> = izip!(1.., 2..,); -} - -#[test] -fn izip3() { - let mut zip: iter::Map, _> = izip!(0..3, 0..2, 0..2i8); - for i in 0..2 { - assert!((i as usize, i, i as i8) == zip.next().unwrap()); - } - assert!(zip.next().is_none()); -} - -#[test] -fn multizip3() { - let mut zip = multizip((0..3, 0..2, 0..2i8)); - for i in 0..2 { - assert!((i as usize, i, i as i8) == zip.next().unwrap()); - } - assert!(zip.next().is_none()); - - let xs: [isize; 0] = []; - let mut zip = multizip((0..3, 0..2, 0..2i8, xs.iter())); - assert!(zip.next().is_none()); - - for (_, _, _, _, _) in multizip((0..3, 0..2, xs.iter(), &xs, xs.to_vec())) { - /* test compiles */ - } -} - -#[test] -fn chain_macro() { - let mut chain = chain!(2..3); - assert!(chain.next() == Some(2)); - assert!(chain.next().is_none()); - - let mut chain = chain!(0..2, 2..3, 3..5i8); - for i in 0..5i8 { - assert_eq!(Some(i), chain.next()); - } - assert!(chain.next().is_none()); - - let mut chain = chain!(); - assert_eq!(chain.next(), Option::<()>::None); -} - -#[test] -fn chain2() { - let _ = chain!(1.., 2..); - let _ = chain!(1.., 2..,); -} - -#[test] -fn write_to() { - let xs = [7, 9, 8]; - let mut ys = [0; 5]; - let cnt = ys.iter_mut().set_from(xs.iter().copied()); - assert!(cnt == xs.len()); - assert!(ys == [7, 9, 8, 0, 0]); - - let cnt = ys.iter_mut().set_from(0..10); - assert!(cnt == ys.len()); - assert!(ys == [0, 1, 2, 3, 4]); -} - -#[test] -fn test_interleave() { - let xs: [u8; 0] = []; - let ys = [7u8, 9, 8, 10]; - let zs = [2u8, 77]; - let it = interleave(xs.iter(), ys.iter()); - it::assert_equal(it, ys.iter()); - - let rs = [7u8, 2, 9, 77, 8, 10]; - let it = interleave(ys.iter(), zs.iter()); - it::assert_equal(it, rs.iter()); -} - -#[test] -fn test_intersperse() { - let xs = [1u8, 2, 3]; - let ys = [1u8, 0, 2, 0, 3]; - let it = intersperse(&xs, &0); - it::assert_equal(it, ys.iter()); -} - -#[test] -fn test_intersperse_with() { - let xs = [1u8, 2, 3]; - let ys = [1u8, 10, 2, 10, 3]; - let i = 10; - let it = intersperse_with(&xs, || &i); - it::assert_equal(it, ys.iter()); -} - -#[test] -fn dropping() { - let xs = [1, 2, 3]; - let mut it = xs.iter().dropping(2); - assert_eq!(it.next(), Some(&3)); - assert!(it.next().is_none()); - let mut it = xs.iter().dropping(5); - assert!(it.next().is_none()); -} - -#[test] -fn batching() { - let xs = [0, 1, 2, 1, 3]; - let ys = [(0, 1), (2, 1)]; - - // An iterator that gathers elements up in pairs - let pit = xs - .iter() - .cloned() - .batching(|it| it.next().and_then(|x| it.next().map(|y| (x, y)))); - it::assert_equal(pit, ys.iter().cloned()); -} - -#[test] -fn test_put_back() { - let xs = [0, 1, 1, 1, 2, 1, 3, 3]; - let mut pb = put_back(xs.iter().cloned()); - pb.next(); - pb.put_back(1); - pb.put_back(0); - it::assert_equal(pb, xs.iter().cloned()); -} - -#[test] -fn merge() { - it::assert_equal((0..10).step_by(2).merge((1..10).step_by(2)), 0..10); -} - -#[test] -fn repeatn() { - let s = "α"; - let mut it = it::repeat_n(s, 3); - assert_eq!(it.len(), 3); - assert_eq!(it.next(), Some(s)); - assert_eq!(it.next(), Some(s)); - assert_eq!(it.next(), Some(s)); - assert_eq!(it.next(), None); - assert_eq!(it.next(), None); -} - -#[test] -fn count_clones() { - // Check that RepeatN only clones N - 1 times. - - use core::cell::Cell; - #[derive(PartialEq, Debug)] - struct Foo { - n: Cell, - } - - impl Clone for Foo { - fn clone(&self) -> Self { - let n = self.n.get(); - self.n.set(n + 1); - Self { - n: Cell::new(n + 1), - } - } - } - - for n in 0..10 { - let f = Foo { n: Cell::new(0) }; - let it = it::repeat_n(f, n); - // drain it - let last = it.last(); - if n == 0 { - assert_eq!(last, None); - } else { - assert_eq!( - last, - Some(Foo { - n: Cell::new(n - 1) - }) - ); - } - } -} - -#[test] -fn part() { - let mut data = [7, 1, 1, 9, 1, 1, 3]; - let i = it::partition(&mut data, |elt| *elt >= 3); - assert_eq!(i, 3); - assert_eq!(data, [7, 3, 9, 1, 1, 1, 1]); - - let i = it::partition(&mut data, |elt| *elt == 1); - assert_eq!(i, 4); - assert_eq!(data, [1, 1, 1, 1, 9, 3, 7]); - - let mut data = [1, 2, 3, 4, 5, 6, 7, 8, 9]; - let i = it::partition(&mut data, |elt| *elt % 3 == 0); - assert_eq!(i, 3); - assert_eq!(data, [9, 6, 3, 4, 5, 2, 7, 8, 1]); -} - -#[test] -fn tree_reduce() { - for i in 0..100 { - assert_eq!((0..i).tree_reduce(|x, y| x + y), (0..i).fold1(|x, y| x + y)); - } -} - -#[test] -fn exactly_one() { - assert_eq!((0..10).filter(|&x| x == 2).exactly_one().unwrap(), 2); - assert!((0..10) - .filter(|&x| x > 1 && x < 4) - .exactly_one() - .unwrap_err() - .eq(2..4)); - assert!((0..10) - .filter(|&x| x > 1 && x < 5) - .exactly_one() - .unwrap_err() - .eq(2..5)); - assert!((0..10) - .filter(|&_| false) - .exactly_one() - .unwrap_err() - .eq(0..0)); -} - -#[test] -fn at_most_one() { - assert_eq!((0..10).filter(|&x| x == 2).at_most_one().unwrap(), Some(2)); - assert!((0..10) - .filter(|&x| x > 1 && x < 4) - .at_most_one() - .unwrap_err() - .eq(2..4)); - assert!((0..10) - .filter(|&x| x > 1 && x < 5) - .at_most_one() - .unwrap_err() - .eq(2..5)); - assert_eq!((0..10).filter(|&_| false).at_most_one().unwrap(), None); -} - -#[test] -fn sum1() { - let v: &[i32] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; - assert_eq!(v[..0].iter().cloned().sum1::(), None); - assert_eq!(v[1..2].iter().cloned().sum1::(), Some(1)); - assert_eq!(v[1..3].iter().cloned().sum1::(), Some(3)); - assert_eq!(v.iter().cloned().sum1::(), Some(55)); -} - -#[test] -fn product1() { - let v: &[i32] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; - assert_eq!(v[..0].iter().cloned().product1::(), None); - assert_eq!(v[..1].iter().cloned().product1::(), Some(0)); - assert_eq!(v[1..3].iter().cloned().product1::(), Some(2)); - assert_eq!(v[1..5].iter().cloned().product1::(), Some(24)); -} diff --git a/vendor/itertools/tests/test_std.rs b/vendor/itertools/tests/test_std.rs deleted file mode 100644 index 00246d506dcfbb..00000000000000 --- a/vendor/itertools/tests/test_std.rs +++ /dev/null @@ -1,1523 +0,0 @@ -#![allow(unstable_name_collisions)] - -use crate::it::cloned; -use crate::it::free::put_back_n; -use crate::it::free::rciter; -use crate::it::iproduct; -use crate::it::izip; -use crate::it::multipeek; -use crate::it::multizip; -use crate::it::peek_nth; -use crate::it::repeat_n; -use crate::it::ExactlyOneError; -use crate::it::FoldWhile; -use crate::it::Itertools; -use itertools as it; -use quickcheck as qc; -use rand::{ - distributions::{Distribution, Standard}, - rngs::StdRng, - Rng, SeedableRng, -}; -use rand::{seq::SliceRandom, thread_rng}; -use std::{cmp::min, fmt::Debug, marker::PhantomData}; - -#[test] -fn product3() { - let prod = iproduct!(0..3, 0..2, 0..2); - assert_eq!(prod.size_hint(), (12, Some(12))); - let v = prod.collect_vec(); - for i in 0..3 { - for j in 0..2 { - for k in 0..2 { - assert!((i, j, k) == v[(i * 2 * 2 + j * 2 + k) as usize]); - } - } - } - for (_, _, _, _) in iproduct!(0..3, 0..2, 0..2, 0..3) { /* test compiles */ } -} - -#[test] -fn interleave_shortest() { - let v0: Vec = vec![0, 2, 4]; - let v1: Vec = vec![1, 3, 5, 7]; - let it = v0.into_iter().interleave_shortest(v1); - assert_eq!(it.size_hint(), (6, Some(6))); - assert_eq!(it.collect_vec(), vec![0, 1, 2, 3, 4, 5]); - - let v0: Vec = vec![0, 2, 4, 6, 8]; - let v1: Vec = vec![1, 3, 5]; - let it = v0.into_iter().interleave_shortest(v1); - assert_eq!(it.size_hint(), (7, Some(7))); - assert_eq!(it.collect_vec(), vec![0, 1, 2, 3, 4, 5, 6]); - - let i0 = ::std::iter::repeat(0); - let v1: Vec<_> = vec![1, 3, 5]; - let it = i0.interleave_shortest(v1); - assert_eq!(it.size_hint(), (7, Some(7))); - - let v0: Vec<_> = vec![0, 2, 4]; - let i1 = ::std::iter::repeat(1); - let it = v0.into_iter().interleave_shortest(i1); - assert_eq!(it.size_hint(), (6, Some(6))); -} - -#[test] -fn duplicates_by() { - let xs = ["aaa", "bbbbb", "aa", "ccc", "bbbb", "aaaaa", "cccc"]; - let ys = ["aa", "bbbb", "cccc"]; - it::assert_equal(ys.iter(), xs.iter().duplicates_by(|x| x[..2].to_string())); - it::assert_equal( - ys.iter(), - xs.iter().rev().duplicates_by(|x| x[..2].to_string()).rev(), - ); - let ys_rev = ["ccc", "aa", "bbbbb"]; - it::assert_equal( - ys_rev.iter(), - xs.iter().duplicates_by(|x| x[..2].to_string()).rev(), - ); -} - -#[test] -fn duplicates() { - let xs = [0, 1, 2, 3, 2, 1, 3]; - let ys = [2, 1, 3]; - it::assert_equal(ys.iter(), xs.iter().duplicates()); - it::assert_equal(ys.iter(), xs.iter().rev().duplicates().rev()); - let ys_rev = [3, 2, 1]; - it::assert_equal(ys_rev.iter(), xs.iter().duplicates().rev()); - - let xs = [0, 1, 0, 1]; - let ys = [0, 1]; - it::assert_equal(ys.iter(), xs.iter().duplicates()); - it::assert_equal(ys.iter(), xs.iter().rev().duplicates().rev()); - let ys_rev = [1, 0]; - it::assert_equal(ys_rev.iter(), xs.iter().duplicates().rev()); - - let xs = [0, 1, 2, 1, 2]; - let ys = vec![1, 2]; - assert_eq!(ys, xs.iter().duplicates().cloned().collect_vec()); - assert_eq!( - ys, - xs.iter().rev().duplicates().rev().cloned().collect_vec() - ); - let ys_rev = vec![2, 1]; - assert_eq!(ys_rev, xs.iter().duplicates().rev().cloned().collect_vec()); -} - -#[test] -fn unique_by() { - let xs = ["aaa", "bbbbb", "aa", "ccc", "bbbb", "aaaaa", "cccc"]; - let ys = ["aaa", "bbbbb", "ccc"]; - it::assert_equal(ys.iter(), xs.iter().unique_by(|x| x[..2].to_string())); - it::assert_equal( - ys.iter(), - xs.iter().rev().unique_by(|x| x[..2].to_string()).rev(), - ); - let ys_rev = ["cccc", "aaaaa", "bbbb"]; - it::assert_equal( - ys_rev.iter(), - xs.iter().unique_by(|x| x[..2].to_string()).rev(), - ); -} - -#[test] -fn unique() { - let xs = [0, 1, 2, 3, 2, 1, 3]; - let ys = [0, 1, 2, 3]; - it::assert_equal(ys.iter(), xs.iter().unique()); - it::assert_equal(ys.iter(), xs.iter().rev().unique().rev()); - let ys_rev = [3, 1, 2, 0]; - it::assert_equal(ys_rev.iter(), xs.iter().unique().rev()); - - let xs = [0, 1]; - let ys = [0, 1]; - it::assert_equal(ys.iter(), xs.iter().unique()); - it::assert_equal(ys.iter(), xs.iter().rev().unique().rev()); - let ys_rev = [1, 0]; - it::assert_equal(ys_rev.iter(), xs.iter().unique().rev()); -} - -#[test] -fn intersperse() { - let xs = ["a", "", "b", "c"]; - let v: Vec<&str> = xs.iter().cloned().intersperse(", ").collect(); - let text: String = v.concat(); - assert_eq!(text, "a, , b, c".to_string()); - - let ys = [0, 1, 2, 3]; - let mut it = ys[..0].iter().copied().intersperse(1); - assert!(it.next().is_none()); -} - -#[test] -fn dedup() { - let xs = [0, 1, 1, 1, 2, 1, 3, 3]; - let ys = [0, 1, 2, 1, 3]; - it::assert_equal(ys.iter(), xs.iter().dedup()); - let xs = [0, 0, 0, 0, 0]; - let ys = [0]; - it::assert_equal(ys.iter(), xs.iter().dedup()); - - let xs = [0, 1, 1, 1, 2, 1, 3, 3]; - let ys = [0, 1, 2, 1, 3]; - let mut xs_d = Vec::new(); - xs.iter().dedup().fold((), |(), &elt| xs_d.push(elt)); - assert_eq!(&xs_d, &ys); -} - -#[test] -fn coalesce() { - let data = [-1., -2., -3., 3., 1., 0., -1.]; - let it = data.iter().cloned().coalesce(|x, y| { - if (x >= 0.) == (y >= 0.) { - Ok(x + y) - } else { - Err((x, y)) - } - }); - itertools::assert_equal(it.clone(), vec![-6., 4., -1.]); - assert_eq!( - it.fold(vec![], |mut v, n| { - v.push(n); - v - }), - vec![-6., 4., -1.] - ); -} - -#[test] -fn dedup_by() { - let xs = [ - (0, 0), - (0, 1), - (1, 1), - (2, 1), - (0, 2), - (3, 1), - (0, 3), - (1, 3), - ]; - let ys = [(0, 0), (0, 1), (0, 2), (3, 1), (0, 3)]; - it::assert_equal(ys.iter(), xs.iter().dedup_by(|x, y| x.1 == y.1)); - let xs = [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)]; - let ys = [(0, 1)]; - it::assert_equal(ys.iter(), xs.iter().dedup_by(|x, y| x.0 == y.0)); - - let xs = [ - (0, 0), - (0, 1), - (1, 1), - (2, 1), - (0, 2), - (3, 1), - (0, 3), - (1, 3), - ]; - let ys = [(0, 0), (0, 1), (0, 2), (3, 1), (0, 3)]; - let mut xs_d = Vec::new(); - xs.iter() - .dedup_by(|x, y| x.1 == y.1) - .fold((), |(), &elt| xs_d.push(elt)); - assert_eq!(&xs_d, &ys); -} - -#[test] -fn dedup_with_count() { - let xs: [i32; 8] = [0, 1, 1, 1, 2, 1, 3, 3]; - let ys: [(usize, &i32); 5] = [(1, &0), (3, &1), (1, &2), (1, &1), (2, &3)]; - - it::assert_equal(ys.iter().cloned(), xs.iter().dedup_with_count()); - - let xs: [i32; 5] = [0, 0, 0, 0, 0]; - let ys: [(usize, &i32); 1] = [(5, &0)]; - - it::assert_equal(ys.iter().cloned(), xs.iter().dedup_with_count()); -} - -#[test] -fn dedup_by_with_count() { - let xs = [ - (0, 0), - (0, 1), - (1, 1), - (2, 1), - (0, 2), - (3, 1), - (0, 3), - (1, 3), - ]; - let ys = [ - (1, &(0, 0)), - (3, &(0, 1)), - (1, &(0, 2)), - (1, &(3, 1)), - (2, &(0, 3)), - ]; - - it::assert_equal( - ys.iter().cloned(), - xs.iter().dedup_by_with_count(|x, y| x.1 == y.1), - ); - - let xs = [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)]; - let ys = [(5, &(0, 1))]; - - it::assert_equal( - ys.iter().cloned(), - xs.iter().dedup_by_with_count(|x, y| x.0 == y.0), - ); -} - -#[test] -fn all_equal() { - assert!("".chars().all_equal()); - assert!("A".chars().all_equal()); - assert!(!"AABBCCC".chars().all_equal()); - assert!("AAAAAAA".chars().all_equal()); - for (_key, mut sub) in &"AABBCCC".chars().chunk_by(|&x| x) { - assert!(sub.all_equal()); - } -} - -#[test] -fn all_equal_value() { - assert_eq!("".chars().all_equal_value(), Err(None)); - assert_eq!("A".chars().all_equal_value(), Ok('A')); - assert_eq!("AABBCCC".chars().all_equal_value(), Err(Some(('A', 'B')))); - assert_eq!("AAAAAAA".chars().all_equal_value(), Ok('A')); - { - let mut it = [1, 2, 3].iter().copied(); - let result = it.all_equal_value(); - assert_eq!(result, Err(Some((1, 2)))); - let remaining = it.next(); - assert_eq!(remaining, Some(3)); - assert!(it.next().is_none()); - } -} - -#[test] -fn all_unique() { - assert!("ABCDEFGH".chars().all_unique()); - assert!(!"ABCDEFGA".chars().all_unique()); - assert!(::std::iter::empty::().all_unique()); -} - -#[test] -fn test_put_back_n() { - let xs = [0, 1, 1, 1, 2, 1, 3, 3]; - let mut pb = put_back_n(xs.iter().cloned()); - pb.next(); - pb.next(); - pb.put_back(1); - pb.put_back(0); - it::assert_equal(pb, xs.iter().cloned()); -} - -#[test] -fn tee() { - let xs = [0, 1, 2, 3]; - let (mut t1, mut t2) = xs.iter().cloned().tee(); - assert_eq!(t1.next(), Some(0)); - assert_eq!(t2.next(), Some(0)); - assert_eq!(t1.next(), Some(1)); - assert_eq!(t1.next(), Some(2)); - assert_eq!(t1.next(), Some(3)); - assert_eq!(t1.next(), None); - assert_eq!(t2.next(), Some(1)); - assert_eq!(t2.next(), Some(2)); - assert_eq!(t1.next(), None); - assert_eq!(t2.next(), Some(3)); - assert_eq!(t2.next(), None); - assert_eq!(t1.next(), None); - assert_eq!(t2.next(), None); - - let (t1, t2) = xs.iter().cloned().tee(); - it::assert_equal(t1, xs.iter().cloned()); - it::assert_equal(t2, xs.iter().cloned()); - - let (t1, t2) = xs.iter().cloned().tee(); - it::assert_equal(t1.zip(t2), xs.iter().cloned().zip(xs.iter().cloned())); -} - -#[test] -fn test_rciter() { - let xs = [0, 1, 1, 1, 2, 1, 3, 5, 6]; - - let mut r1 = rciter(xs.iter().cloned()); - let mut r2 = r1.clone(); - assert_eq!(r1.next(), Some(0)); - assert_eq!(r2.next(), Some(1)); - let mut z = r1.zip(r2); - assert_eq!(z.next(), Some((1, 1))); - assert_eq!(z.next(), Some((2, 1))); - assert_eq!(z.next(), Some((3, 5))); - assert_eq!(z.next(), None); - - // test intoiterator - let r1 = rciter(0..5); - let mut z = izip!(&r1, r1); - assert_eq!(z.next(), Some((0, 1))); -} - -#[test] -fn trait_pointers() { - struct ByRef<'r, I: ?Sized>(&'r mut I); - - impl<'r, X, I> Iterator for ByRef<'r, I> - where - I: ?Sized + 'r + Iterator, - { - type Item = X; - fn next(&mut self) -> Option { - self.0.next() - } - } - - let mut it = Box::new(0..10) as Box>; - assert_eq!(it.next(), Some(0)); - - { - let jt: &mut dyn Iterator = &mut *it; - assert_eq!(jt.next(), Some(1)); - - { - let mut r = ByRef(jt); - assert_eq!(r.next(), Some(2)); - } - - assert_eq!(jt.find_position(|x| *x == 4), Some((1, 4))); - jt.for_each(|_| ()); - } -} - -#[test] -fn merge_by() { - let odd: Vec<(u32, &str)> = vec![(1, "hello"), (3, "world"), (5, "!")]; - let even = [(2, "foo"), (4, "bar"), (6, "baz")]; - let expected = [ - (1, "hello"), - (2, "foo"), - (3, "world"), - (4, "bar"), - (5, "!"), - (6, "baz"), - ]; - let results = odd.iter().merge_by(even.iter(), |a, b| a.0 <= b.0); - it::assert_equal(results, expected.iter()); -} - -#[test] -fn merge_by_btree() { - use std::collections::BTreeMap; - let mut bt1 = BTreeMap::new(); - bt1.insert("hello", 1); - bt1.insert("world", 3); - let mut bt2 = BTreeMap::new(); - bt2.insert("foo", 2); - bt2.insert("bar", 4); - let results = bt1.into_iter().merge_by(bt2, |a, b| a.0 <= b.0); - let expected = vec![("bar", 4), ("foo", 2), ("hello", 1), ("world", 3)]; - it::assert_equal(results, expected); -} - -#[test] -fn kmerge() { - let its = (0..4).map(|s| (s..10).step_by(4)); - - it::assert_equal(its.kmerge(), 0..10); -} - -#[test] -fn kmerge_2() { - let its = vec![3, 2, 1, 0].into_iter().map(|s| (s..10).step_by(4)); - - it::assert_equal(its.kmerge(), 0..10); -} - -#[test] -fn kmerge_empty() { - let its = (0..4).map(|_| 0..0); - assert_eq!(its.kmerge().next(), None); -} - -#[test] -fn kmerge_size_hint() { - let its = (0..5).map(|_| (0..10)); - assert_eq!(its.kmerge().size_hint(), (50, Some(50))); -} - -#[test] -fn kmerge_empty_size_hint() { - let its = (0..5).map(|_| (0..0)); - assert_eq!(its.kmerge().size_hint(), (0, Some(0))); -} - -#[test] -fn join() { - let many = [1, 2, 3]; - let one = [1]; - let none: Vec = vec![]; - - assert_eq!(many.iter().join(", "), "1, 2, 3"); - assert_eq!(one.iter().join(", "), "1"); - assert_eq!(none.iter().join(", "), ""); -} - -#[test] -fn sorted_unstable_by() { - let sc = [3, 4, 1, 2].iter().cloned().sorted_by(|&a, &b| a.cmp(&b)); - it::assert_equal(sc, vec![1, 2, 3, 4]); - - let v = (0..5).sorted_unstable_by(|&a, &b| a.cmp(&b).reverse()); - it::assert_equal(v, vec![4, 3, 2, 1, 0]); -} - -#[test] -fn sorted_unstable_by_key() { - let sc = [3, 4, 1, 2].iter().cloned().sorted_unstable_by_key(|&x| x); - it::assert_equal(sc, vec![1, 2, 3, 4]); - - let v = (0..5).sorted_unstable_by_key(|&x| -x); - it::assert_equal(v, vec![4, 3, 2, 1, 0]); -} - -#[test] -fn sorted_by() { - let sc = [3, 4, 1, 2].iter().cloned().sorted_by(|&a, &b| a.cmp(&b)); - it::assert_equal(sc, vec![1, 2, 3, 4]); - - let v = (0..5).sorted_by(|&a, &b| a.cmp(&b).reverse()); - it::assert_equal(v, vec![4, 3, 2, 1, 0]); -} - -qc::quickcheck! { - fn k_smallest_range(n: i64, m: u16, k: u16) -> () { - // u16 is used to constrain k and m to 0..2¹⁶, - // otherwise the test could use too much memory. - let (k, m) = (k as usize, m as u64); - - let mut v: Vec<_> = (n..n.saturating_add(m as _)).collect(); - // Generate a random permutation of n..n+m - v.shuffle(&mut thread_rng()); - - // Construct the right answers for the top and bottom elements - let mut sorted = v.clone(); - sorted.sort(); - // how many elements are we checking - let num_elements = min(k, m as _); - - // Compute the top and bottom k in various combinations - let sorted_smallest = sorted[..num_elements].iter().cloned(); - let smallest = v.iter().cloned().k_smallest(k); - let smallest_by = v.iter().cloned().k_smallest_by(k, Ord::cmp); - let smallest_by_key = v.iter().cloned().k_smallest_by_key(k, |&x| x); - - let sorted_largest = sorted[sorted.len() - num_elements..].iter().rev().cloned(); - let largest = v.iter().cloned().k_largest(k); - let largest_by = v.iter().cloned().k_largest_by(k, Ord::cmp); - let largest_by_key = v.iter().cloned().k_largest_by_key(k, |&x| x); - - // Check the variations produce the same answers and that they're right - it::assert_equal(smallest, sorted_smallest.clone()); - it::assert_equal(smallest_by, sorted_smallest.clone()); - it::assert_equal(smallest_by_key, sorted_smallest); - - it::assert_equal(largest, sorted_largest.clone()); - it::assert_equal(largest_by, sorted_largest.clone()); - it::assert_equal(largest_by_key, sorted_largest); - } -} - -#[derive(Clone, Debug)] -struct RandIter { - idx: usize, - len: usize, - rng: R, - _t: PhantomData, -} - -impl Iterator for RandIter -where - Standard: Distribution, -{ - type Item = T; - fn next(&mut self) -> Option { - if self.idx == self.len { - None - } else { - self.idx += 1; - Some(self.rng.gen()) - } - } -} - -impl qc::Arbitrary for RandIter { - fn arbitrary(g: &mut G) -> Self { - Self { - idx: 0, - len: g.size(), - rng: R::seed_from_u64(g.next_u64()), - _t: PhantomData {}, - } - } -} - -// Check that taking the k smallest is the same as -// sorting then taking the k first elements -fn k_smallest_sort(i: I, k: u16) -where - I: Iterator + Clone, - I::Item: Ord + Debug, -{ - let j = i.clone(); - let k = k as usize; - it::assert_equal(i.k_smallest(k), j.sorted().take(k)) -} - -// Similar to `k_smallest_sort` but for our custom heap implementation. -fn k_smallest_by_sort(i: I, k: u16) -where - I: Iterator + Clone, - I::Item: Ord + Debug, -{ - let j = i.clone(); - let k = k as usize; - it::assert_equal(i.k_smallest_by(k, Ord::cmp), j.sorted().take(k)) -} - -macro_rules! generic_test { - ($f:ident, $($t:ty),+) => { - $(paste::item! { - qc::quickcheck! { - fn [< $f _ $t >](i: RandIter<$t>, k: u16) -> () { - $f(i, k) - } - } - })+ - }; -} - -generic_test!(k_smallest_sort, u8, u16, u32, u64, i8, i16, i32, i64); -generic_test!(k_smallest_by_sort, u8, u16, u32, u64, i8, i16, i32, i64); - -#[test] -fn sorted_by_key() { - let sc = [3, 4, 1, 2].iter().cloned().sorted_by_key(|&x| x); - it::assert_equal(sc, vec![1, 2, 3, 4]); - - let v = (0..5).sorted_by_key(|&x| -x); - it::assert_equal(v, vec![4, 3, 2, 1, 0]); -} - -#[test] -fn sorted_by_cached_key() { - // Track calls to key function - let mut ncalls = 0; - - let sorted = [3, 4, 1, 2].iter().cloned().sorted_by_cached_key(|&x| { - ncalls += 1; - x.to_string() - }); - it::assert_equal(sorted, vec![1, 2, 3, 4]); - // Check key function called once per element - assert_eq!(ncalls, 4); - - let mut ncalls = 0; - - let sorted = (0..5).sorted_by_cached_key(|&x| { - ncalls += 1; - -x - }); - it::assert_equal(sorted, vec![4, 3, 2, 1, 0]); - // Check key function called once per element - assert_eq!(ncalls, 5); -} - -#[test] -fn test_multipeek() { - let nums = vec![1u8, 2, 3, 4, 5]; - - let mp = multipeek(nums.iter().copied()); - assert_eq!(nums, mp.collect::>()); - - let mut mp = multipeek(nums.iter().copied()); - assert_eq!(mp.peek(), Some(&1)); - assert_eq!(mp.next(), Some(1)); - assert_eq!(mp.peek(), Some(&2)); - assert_eq!(mp.peek(), Some(&3)); - assert_eq!(mp.next(), Some(2)); - assert_eq!(mp.peek(), Some(&3)); - assert_eq!(mp.peek(), Some(&4)); - assert_eq!(mp.peek(), Some(&5)); - assert_eq!(mp.peek(), None); - assert_eq!(mp.next(), Some(3)); - assert_eq!(mp.next(), Some(4)); - assert_eq!(mp.peek(), Some(&5)); - assert_eq!(mp.peek(), None); - assert_eq!(mp.next(), Some(5)); - assert_eq!(mp.next(), None); - assert_eq!(mp.peek(), None); -} - -#[test] -fn test_multipeek_reset() { - let data = [1, 2, 3, 4]; - - let mut mp = multipeek(cloned(&data)); - assert_eq!(mp.peek(), Some(&1)); - assert_eq!(mp.next(), Some(1)); - assert_eq!(mp.peek(), Some(&2)); - assert_eq!(mp.peek(), Some(&3)); - mp.reset_peek(); - assert_eq!(mp.peek(), Some(&2)); - assert_eq!(mp.next(), Some(2)); -} - -#[test] -fn test_multipeek_peeking_next() { - use crate::it::PeekingNext; - let nums = [1u8, 2, 3, 4, 5, 6, 7]; - - let mut mp = multipeek(nums.iter().copied()); - assert_eq!(mp.peeking_next(|&x| x != 0), Some(1)); - assert_eq!(mp.next(), Some(2)); - assert_eq!(mp.peek(), Some(&3)); - assert_eq!(mp.peek(), Some(&4)); - assert_eq!(mp.peeking_next(|&x| x == 3), Some(3)); - assert_eq!(mp.peek(), Some(&4)); - assert_eq!(mp.peeking_next(|&x| x != 4), None); - assert_eq!(mp.peeking_next(|&x| x == 4), Some(4)); - assert_eq!(mp.peek(), Some(&5)); - assert_eq!(mp.peek(), Some(&6)); - assert_eq!(mp.peeking_next(|&x| x != 5), None); - assert_eq!(mp.peek(), Some(&7)); - assert_eq!(mp.peeking_next(|&x| x == 5), Some(5)); - assert_eq!(mp.peeking_next(|&x| x == 6), Some(6)); - assert_eq!(mp.peek(), Some(&7)); - assert_eq!(mp.peek(), None); - assert_eq!(mp.next(), Some(7)); - assert_eq!(mp.peek(), None); -} - -#[test] -fn test_repeat_n_peeking_next() { - use crate::it::PeekingNext; - let mut rn = repeat_n(0, 5); - assert_eq!(rn.peeking_next(|&x| x != 0), None); - assert_eq!(rn.peeking_next(|&x| x <= 0), Some(0)); - assert_eq!(rn.next(), Some(0)); - assert_eq!(rn.peeking_next(|&x| x <= 0), Some(0)); - assert_eq!(rn.peeking_next(|&x| x != 0), None); - assert_eq!(rn.peeking_next(|&x| x >= 0), Some(0)); - assert_eq!(rn.next(), Some(0)); - assert_eq!(rn.peeking_next(|&x| x <= 0), None); - assert_eq!(rn.next(), None); -} - -#[test] -fn test_peek_nth() { - let nums = vec![1u8, 2, 3, 4, 5]; - - let iter = peek_nth(nums.iter().copied()); - assert_eq!(nums, iter.collect::>()); - - let mut iter = peek_nth(nums.iter().copied()); - - assert_eq!(iter.peek_nth(0), Some(&1)); - assert_eq!(iter.peek_nth(0), Some(&1)); - assert_eq!(iter.next(), Some(1)); - - assert_eq!(iter.peek_nth(0), Some(&2)); - assert_eq!(iter.peek_nth(1), Some(&3)); - assert_eq!(iter.next(), Some(2)); - - assert_eq!(iter.peek_nth(0), Some(&3)); - assert_eq!(iter.peek_nth(1), Some(&4)); - assert_eq!(iter.peek_nth(2), Some(&5)); - assert_eq!(iter.peek_nth(3), None); - - assert_eq!(iter.next(), Some(3)); - assert_eq!(iter.next(), Some(4)); - - assert_eq!(iter.peek_nth(0), Some(&5)); - assert_eq!(iter.peek_nth(1), None); - assert_eq!(iter.next(), Some(5)); - assert_eq!(iter.next(), None); - - assert_eq!(iter.peek_nth(0), None); - assert_eq!(iter.peek_nth(1), None); -} - -#[test] -fn test_peek_nth_peeking_next() { - use it::PeekingNext; - let nums = [1u8, 2, 3, 4, 5, 6, 7]; - let mut iter = peek_nth(nums.iter().copied()); - - assert_eq!(iter.peeking_next(|&x| x != 0), Some(1)); - assert_eq!(iter.next(), Some(2)); - - assert_eq!(iter.peek_nth(0), Some(&3)); - assert_eq!(iter.peek_nth(1), Some(&4)); - assert_eq!(iter.peeking_next(|&x| x == 3), Some(3)); - assert_eq!(iter.peek(), Some(&4)); - - assert_eq!(iter.peeking_next(|&x| x != 4), None); - assert_eq!(iter.peeking_next(|&x| x == 4), Some(4)); - assert_eq!(iter.peek_nth(0), Some(&5)); - assert_eq!(iter.peek_nth(1), Some(&6)); - - assert_eq!(iter.peeking_next(|&x| x != 5), None); - assert_eq!(iter.peek(), Some(&5)); - - assert_eq!(iter.peeking_next(|&x| x == 5), Some(5)); - assert_eq!(iter.peeking_next(|&x| x == 6), Some(6)); - assert_eq!(iter.peek_nth(0), Some(&7)); - assert_eq!(iter.peek_nth(1), None); - assert_eq!(iter.next(), Some(7)); - assert_eq!(iter.peek(), None); -} - -#[test] -fn test_peek_nth_next_if() { - let nums = [1u8, 2, 3, 4, 5, 6, 7]; - let mut iter = peek_nth(nums.iter().copied()); - - assert_eq!(iter.next_if(|&x| x != 0), Some(1)); - assert_eq!(iter.next(), Some(2)); - - assert_eq!(iter.peek_nth(0), Some(&3)); - assert_eq!(iter.peek_nth(1), Some(&4)); - assert_eq!(iter.next_if_eq(&3), Some(3)); - assert_eq!(iter.peek(), Some(&4)); - - assert_eq!(iter.next_if(|&x| x != 4), None); - assert_eq!(iter.next_if_eq(&4), Some(4)); - assert_eq!(iter.peek_nth(0), Some(&5)); - assert_eq!(iter.peek_nth(1), Some(&6)); - - assert_eq!(iter.next_if(|&x| x != 5), None); - assert_eq!(iter.peek(), Some(&5)); - - assert_eq!(iter.next_if(|&x| x % 2 == 1), Some(5)); - assert_eq!(iter.next_if_eq(&6), Some(6)); - assert_eq!(iter.peek_nth(0), Some(&7)); - assert_eq!(iter.peek_nth(1), None); - assert_eq!(iter.next(), Some(7)); - assert_eq!(iter.peek(), None); -} - -#[test] -fn pad_using() { - it::assert_equal((0..0).pad_using(1, |_| 1), 1..2); - - let v: Vec = vec![0, 1, 2]; - let r = v.into_iter().pad_using(5, |n| n); - it::assert_equal(r, vec![0, 1, 2, 3, 4]); - - let v: Vec = vec![0, 1, 2]; - let r = v.into_iter().pad_using(1, |_| panic!()); - it::assert_equal(r, vec![0, 1, 2]); -} - -#[test] -fn chunk_by() { - for (ch1, sub) in &"AABBCCC".chars().chunk_by(|&x| x) { - for ch2 in sub { - assert_eq!(ch1, ch2); - } - } - - for (ch1, sub) in &"AAABBBCCCCDDDD".chars().chunk_by(|&x| x) { - for ch2 in sub { - assert_eq!(ch1, ch2); - if ch1 == 'C' { - break; - } - } - } - - let toupper = |ch: &char| ch.to_uppercase().next().unwrap(); - - // try all possible orderings - for indices in permutohedron::Heap::new(&mut [0, 1, 2, 3]) { - let chunks = "AaaBbbccCcDDDD".chars().chunk_by(&toupper); - let mut subs = chunks.into_iter().collect_vec(); - - for &idx in &indices[..] { - let (key, text) = match idx { - 0 => ('A', "Aaa".chars()), - 1 => ('B', "Bbb".chars()), - 2 => ('C', "ccCc".chars()), - 3 => ('D', "DDDD".chars()), - _ => unreachable!(), - }; - assert_eq!(key, subs[idx].0); - it::assert_equal(&mut subs[idx].1, text); - } - } - - let chunks = "AAABBBCCCCDDDD".chars().chunk_by(|&x| x); - let mut subs = chunks.into_iter().map(|(_, g)| g).collect_vec(); - - let sd = subs.pop().unwrap(); - let sc = subs.pop().unwrap(); - let sb = subs.pop().unwrap(); - let sa = subs.pop().unwrap(); - for (a, b, c, d) in multizip((sa, sb, sc, sd)) { - assert_eq!(a, 'A'); - assert_eq!(b, 'B'); - assert_eq!(c, 'C'); - assert_eq!(d, 'D'); - } - - // check that the key closure is called exactly n times - { - let mut ntimes = 0; - let text = "AABCCC"; - for (_, sub) in &text.chars().chunk_by(|&x| { - ntimes += 1; - x - }) { - for _ in sub {} - } - assert_eq!(ntimes, text.len()); - } - - { - let mut ntimes = 0; - let text = "AABCCC"; - for _ in &text.chars().chunk_by(|&x| { - ntimes += 1; - x - }) {} - assert_eq!(ntimes, text.len()); - } - - { - let text = "ABCCCDEEFGHIJJKK"; - let gr = text.chars().chunk_by(|&x| x); - it::assert_equal(gr.into_iter().flat_map(|(_, sub)| sub), text.chars()); - } -} - -#[test] -fn chunk_by_lazy_2() { - let data = [0, 1]; - let chunks = data.iter().chunk_by(|k| *k); - let gs = chunks.into_iter().collect_vec(); - it::assert_equal(data.iter(), gs.into_iter().flat_map(|(_k, g)| g)); - - let data = [0, 1, 1, 0, 0]; - let chunks = data.iter().chunk_by(|k| *k); - let mut gs = chunks.into_iter().collect_vec(); - gs[1..].reverse(); - it::assert_equal(&[0, 0, 0, 1, 1], gs.into_iter().flat_map(|(_, g)| g)); - - let grouper = data.iter().chunk_by(|k| *k); - let mut chunks = Vec::new(); - for (k, chunk) in &grouper { - if *k == 1 { - chunks.push(chunk); - } - } - it::assert_equal(&mut chunks[0], &[1, 1]); - - let data = [0, 0, 0, 1, 1, 0, 0, 2, 2, 3, 3]; - let grouper = data.iter().chunk_by(|k| *k); - let mut chunks = Vec::new(); - for (i, (_, chunk)) in grouper.into_iter().enumerate() { - if i < 2 { - chunks.push(chunk); - } else if i < 4 { - for _ in chunk {} - } else { - chunks.push(chunk); - } - } - it::assert_equal(&mut chunks[0], &[0, 0, 0]); - it::assert_equal(&mut chunks[1], &[1, 1]); - it::assert_equal(&mut chunks[2], &[3, 3]); - - let data = [0, 0, 0, 1, 1, 0, 0, 2, 2, 3, 3]; - let mut i = 0; - let grouper = data.iter().chunk_by(move |_| { - let k = i / 3; - i += 1; - k - }); - for (i, chunk) in &grouper { - match i { - 0 => it::assert_equal(chunk, &[0, 0, 0]), - 1 => it::assert_equal(chunk, &[1, 1, 0]), - 2 => it::assert_equal(chunk, &[0, 2, 2]), - 3 => it::assert_equal(chunk, &[3, 3]), - _ => unreachable!(), - } - } -} - -#[test] -fn chunk_by_lazy_3() { - // test consuming each chunk on the lap after it was produced - let data = [0, 0, 0, 1, 1, 0, 0, 1, 1, 2, 2]; - let grouper = data.iter().chunk_by(|elt| *elt); - let mut last = None; - for (key, chunk) in &grouper { - if let Some(gr) = last.take() { - for elt in gr { - assert!(elt != key && i32::abs(elt - key) == 1); - } - } - last = Some(chunk); - } -} - -#[test] -fn chunks() { - let data = [0, 0, 0, 1, 1, 0, 0, 2, 2, 3, 3]; - let grouper = data.iter().chunks(3); - for (i, chunk) in grouper.into_iter().enumerate() { - match i { - 0 => it::assert_equal(chunk, &[0, 0, 0]), - 1 => it::assert_equal(chunk, &[1, 1, 0]), - 2 => it::assert_equal(chunk, &[0, 2, 2]), - 3 => it::assert_equal(chunk, &[3, 3]), - _ => unreachable!(), - } - } -} - -#[test] -fn concat_empty() { - let data: Vec> = Vec::new(); - assert_eq!(data.into_iter().concat(), Vec::new()) -} - -#[test] -fn concat_non_empty() { - let data = vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]; - assert_eq!(data.into_iter().concat(), vec![1, 2, 3, 4, 5, 6, 7, 8, 9]) -} - -#[test] -fn combinations() { - assert!((1..3).combinations(5).next().is_none()); - - let it = (1..3).combinations(2); - it::assert_equal(it, vec![vec![1, 2]]); - - let it = (1..5).combinations(2); - it::assert_equal( - it, - vec![ - vec![1, 2], - vec![1, 3], - vec![1, 4], - vec![2, 3], - vec![2, 4], - vec![3, 4], - ], - ); - - it::assert_equal((0..0).tuple_combinations::<(_, _)>(), >::new()); - it::assert_equal((0..1).tuple_combinations::<(_, _)>(), >::new()); - it::assert_equal((0..2).tuple_combinations::<(_, _)>(), vec![(0, 1)]); - - it::assert_equal((0..0).combinations(2), >>::new()); - it::assert_equal((0..1).combinations(1), vec![vec![0]]); - it::assert_equal((0..2).combinations(1), vec![vec![0], vec![1]]); - it::assert_equal((0..2).combinations(2), vec![vec![0, 1]]); -} - -#[test] -fn combinations_of_too_short() { - for i in 1..10 { - assert!((0..0).combinations(i).next().is_none()); - assert!((0..i - 1).combinations(i).next().is_none()); - } -} - -#[test] -fn combinations_zero() { - it::assert_equal((1..3).combinations(0), vec![vec![]]); - it::assert_equal((0..0).combinations(0), vec![vec![]]); -} - -fn binomial(n: usize, k: usize) -> usize { - if k > n { - 0 - } else { - (n - k + 1..=n).product::() / (1..=k).product::() - } -} - -#[test] -fn combinations_range_count() { - for n in 0..=10 { - for k in 0..=10 { - let len = binomial(n, k); - let mut it = (0..n).combinations(k); - assert_eq!(len, it.clone().count()); - assert_eq!(len, it.size_hint().0); - assert_eq!(Some(len), it.size_hint().1); - for count in (0..len).rev() { - let elem = it.next(); - assert!(elem.is_some()); - assert_eq!(count, it.clone().count()); - assert_eq!(count, it.size_hint().0); - assert_eq!(Some(count), it.size_hint().1); - } - let should_be_none = it.next(); - assert!(should_be_none.is_none()); - } - } -} - -#[test] -fn combinations_inexact_size_hints() { - for k in 0..=10 { - let mut numbers = (0..18).filter(|i| i % 2 == 0); // 9 elements - let mut it = numbers.clone().combinations(k); - let real_n = numbers.clone().count(); - let len = binomial(real_n, k); - assert_eq!(len, it.clone().count()); - - let mut nb_loaded = 0; - let sh = numbers.size_hint(); - assert_eq!(binomial(sh.0 + nb_loaded, k), it.size_hint().0); - assert_eq!(sh.1.map(|n| binomial(n + nb_loaded, k)), it.size_hint().1); - - for next_count in 1..=len { - let elem = it.next(); - assert!(elem.is_some()); - assert_eq!(len - next_count, it.clone().count()); - if next_count == 1 { - // The very first time, the lazy buffer is prefilled. - nb_loaded = numbers.by_ref().take(k).count(); - } else { - // Then it loads one item each time until exhausted. - let nb = numbers.next(); - if nb.is_some() { - nb_loaded += 1; - } - } - let sh = numbers.size_hint(); - if next_count > real_n - k + 1 { - assert_eq!(0, sh.0); - assert_eq!(Some(0), sh.1); - assert_eq!(real_n, nb_loaded); - // Once it's fully loaded, size hints of `it` are exacts. - } - assert_eq!(binomial(sh.0 + nb_loaded, k) - next_count, it.size_hint().0); - assert_eq!( - sh.1.map(|n| binomial(n + nb_loaded, k) - next_count), - it.size_hint().1 - ); - } - let should_be_none = it.next(); - assert!(should_be_none.is_none()); - } -} - -#[test] -fn permutations_zero() { - it::assert_equal((1..3).permutations(0), vec![vec![]]); - it::assert_equal((0..0).permutations(0), vec![vec![]]); -} - -#[test] -fn permutations_range_count() { - for n in 0..=7 { - for k in 0..=7 { - let len = if k <= n { (n - k + 1..=n).product() } else { 0 }; - let mut it = (0..n).permutations(k); - assert_eq!(len, it.clone().count()); - assert_eq!(len, it.size_hint().0); - assert_eq!(Some(len), it.size_hint().1); - for count in (0..len).rev() { - let elem = it.next(); - assert!(elem.is_some()); - assert_eq!(count, it.clone().count()); - assert_eq!(count, it.size_hint().0); - assert_eq!(Some(count), it.size_hint().1); - } - let should_be_none = it.next(); - assert!(should_be_none.is_none()); - } - } -} - -#[test] -fn permutations_overflowed_size_hints() { - let mut it = std::iter::repeat(()).permutations(2); - assert_eq!(it.size_hint().0, usize::MAX); - assert_eq!(it.size_hint().1, None); - for nb_generated in 1..=1000 { - it.next(); - assert!(it.size_hint().0 >= usize::MAX - nb_generated); - assert_eq!(it.size_hint().1, None); - } -} - -#[test] -fn combinations_with_replacement() { - // Pool smaller than n - it::assert_equal((0..1).combinations_with_replacement(2), vec![vec![0, 0]]); - // Pool larger than n - it::assert_equal( - (0..3).combinations_with_replacement(2), - vec![ - vec![0, 0], - vec![0, 1], - vec![0, 2], - vec![1, 1], - vec![1, 2], - vec![2, 2], - ], - ); - // Zero size - it::assert_equal((0..3).combinations_with_replacement(0), vec![vec![]]); - // Zero size on empty pool - it::assert_equal((0..0).combinations_with_replacement(0), vec![vec![]]); - // Empty pool - it::assert_equal( - (0..0).combinations_with_replacement(2), - >>::new(), - ); -} - -#[test] -fn combinations_with_replacement_range_count() { - for n in 0..=7 { - for k in 0..=7 { - let len = binomial(usize::saturating_sub(n + k, 1), k); - let mut it = (0..n).combinations_with_replacement(k); - assert_eq!(len, it.clone().count()); - assert_eq!(len, it.size_hint().0); - assert_eq!(Some(len), it.size_hint().1); - for count in (0..len).rev() { - let elem = it.next(); - assert!(elem.is_some()); - assert_eq!(count, it.clone().count()); - assert_eq!(count, it.size_hint().0); - assert_eq!(Some(count), it.size_hint().1); - } - let should_be_none = it.next(); - assert!(should_be_none.is_none()); - } - } -} - -#[test] -fn powerset() { - it::assert_equal((0..0).powerset(), vec![vec![]]); - it::assert_equal((0..1).powerset(), vec![vec![], vec![0]]); - it::assert_equal( - (0..2).powerset(), - vec![vec![], vec![0], vec![1], vec![0, 1]], - ); - it::assert_equal( - (0..3).powerset(), - vec![ - vec![], - vec![0], - vec![1], - vec![2], - vec![0, 1], - vec![0, 2], - vec![1, 2], - vec![0, 1, 2], - ], - ); - - assert_eq!((0..4).powerset().count(), 1 << 4); - assert_eq!((0..8).powerset().count(), 1 << 8); - assert_eq!((0..16).powerset().count(), 1 << 16); - - for n in 0..=10 { - let mut it = (0..n).powerset(); - let len = 2_usize.pow(n); - assert_eq!(len, it.clone().count()); - assert_eq!(len, it.size_hint().0); - assert_eq!(Some(len), it.size_hint().1); - for count in (0..len).rev() { - let elem = it.next(); - assert!(elem.is_some()); - assert_eq!(count, it.clone().count()); - assert_eq!(count, it.size_hint().0); - assert_eq!(Some(count), it.size_hint().1); - } - let should_be_none = it.next(); - assert!(should_be_none.is_none()); - } -} - -#[test] -fn diff_mismatch() { - let a = [1, 2, 3, 4]; - let b = vec![1.0, 5.0, 3.0, 4.0]; - let b_map = b.into_iter().map(|f| f as i32); - let diff = it::diff_with(a.iter(), b_map, |a, b| *a == b); - - assert!(match diff { - Some(it::Diff::FirstMismatch(1, _, from_diff)) => - from_diff.collect::>() == vec![5, 3, 4], - _ => false, - }); -} - -#[test] -fn diff_longer() { - let a = [1, 2, 3, 4]; - let b = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]; - let b_map = b.into_iter().map(|f| f as i32); - let diff = it::diff_with(a.iter(), b_map, |a, b| *a == b); - - assert!(match diff { - Some(it::Diff::Longer(_, remaining)) => remaining.collect::>() == vec![5, 6], - _ => false, - }); -} - -#[test] -fn diff_shorter() { - let a = [1, 2, 3, 4]; - let b = vec![1.0, 2.0]; - let b_map = b.into_iter().map(|f| f as i32); - let diff = it::diff_with(a.iter(), b_map, |a, b| *a == b); - - assert!(match diff { - Some(it::Diff::Shorter(len, _)) => len == 2, - _ => false, - }); -} - -#[test] -fn extrema_set() { - use std::cmp::Ordering; - - // A peculiar type: Equality compares both tuple items, but ordering only the - // first item. Used to distinguish equal elements. - #[derive(Clone, Debug, PartialEq, Eq)] - struct Val(u32, u32); - - impl PartialOrd for Val { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } - } - - impl Ord for Val { - fn cmp(&self, other: &Self) -> Ordering { - self.0.cmp(&other.0) - } - } - - assert_eq!(None::.iter().min_set(), Vec::<&u32>::new()); - assert_eq!(None::.iter().max_set(), Vec::<&u32>::new()); - - assert_eq!(Some(1u32).iter().min_set(), vec![&1]); - assert_eq!(Some(1u32).iter().max_set(), vec![&1]); - - let data = [Val(0, 1), Val(2, 0), Val(0, 2), Val(1, 0), Val(2, 1)]; - - let min_set = data.iter().min_set(); - assert_eq!(min_set, vec![&Val(0, 1), &Val(0, 2)]); - - let min_set_by_key = data.iter().min_set_by_key(|v| v.1); - assert_eq!(min_set_by_key, vec![&Val(2, 0), &Val(1, 0)]); - - let min_set_by = data.iter().min_set_by(|x, y| x.1.cmp(&y.1)); - assert_eq!(min_set_by, vec![&Val(2, 0), &Val(1, 0)]); - - let max_set = data.iter().max_set(); - assert_eq!(max_set, vec![&Val(2, 0), &Val(2, 1)]); - - let max_set_by_key = data.iter().max_set_by_key(|v| v.1); - assert_eq!(max_set_by_key, vec![&Val(0, 2)]); - - let max_set_by = data.iter().max_set_by(|x, y| x.1.cmp(&y.1)); - assert_eq!(max_set_by, vec![&Val(0, 2)]); -} - -#[test] -fn minmax() { - use crate::it::MinMaxResult; - use std::cmp::Ordering; - - // A peculiar type: Equality compares both tuple items, but ordering only the - // first item. This is so we can check the stability property easily. - #[derive(Clone, Debug, PartialEq, Eq)] - struct Val(u32, u32); - - impl PartialOrd for Val { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } - } - - impl Ord for Val { - fn cmp(&self, other: &Self) -> Ordering { - self.0.cmp(&other.0) - } - } - - assert_eq!( - None::>.iter().minmax(), - MinMaxResult::NoElements - ); - - assert_eq!(Some(1u32).iter().minmax(), MinMaxResult::OneElement(&1)); - - let data = [Val(0, 1), Val(2, 0), Val(0, 2), Val(1, 0), Val(2, 1)]; - - let minmax = data.iter().minmax(); - assert_eq!(minmax, MinMaxResult::MinMax(&Val(0, 1), &Val(2, 1))); - - let (min, max) = data.iter().minmax_by_key(|v| v.1).into_option().unwrap(); - assert_eq!(min, &Val(2, 0)); - assert_eq!(max, &Val(0, 2)); - - let (min, max) = data - .iter() - .minmax_by(|x, y| x.1.cmp(&y.1)) - .into_option() - .unwrap(); - assert_eq!(min, &Val(2, 0)); - assert_eq!(max, &Val(0, 2)); -} - -#[test] -fn format() { - let data = [0, 1, 2, 3]; - let ans1 = "0, 1, 2, 3"; - let ans2 = "0--1--2--3"; - - let t1 = format!("{}", data.iter().format(", ")); - assert_eq!(t1, ans1); - let t2 = format!("{:?}", data.iter().format("--")); - assert_eq!(t2, ans2); - - let dataf = [1.1, 5.71828, -22.]; - let t3 = format!("{:.2e}", dataf.iter().format(", ")); - assert_eq!(t3, "1.10e0, 5.72e0, -2.20e1"); -} - -#[test] -fn while_some() { - let ns = (1..10) - .map(|x| if x % 5 != 0 { Some(x) } else { None }) - .while_some(); - it::assert_equal(ns, vec![1, 2, 3, 4]); -} - -#[test] -fn fold_while() { - let mut iterations = 0; - let vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; - let sum = vec - .into_iter() - .fold_while(0, |acc, item| { - iterations += 1; - let new_sum = acc + item; - if new_sum <= 20 { - FoldWhile::Continue(new_sum) - } else { - FoldWhile::Done(acc) - } - }) - .into_inner(); - assert_eq!(iterations, 6); - assert_eq!(sum, 15); -} - -#[test] -fn tree_reduce() { - let x = [ - "", - "0", - "0 1 x", - "0 1 x 2 x", - "0 1 x 2 3 x x", - "0 1 x 2 3 x x 4 x", - "0 1 x 2 3 x x 4 5 x x", - "0 1 x 2 3 x x 4 5 x 6 x x", - "0 1 x 2 3 x x 4 5 x 6 7 x x x", - "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 x", - "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 9 x x", - "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 9 x 10 x x", - "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 9 x 10 11 x x x", - "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 9 x 10 11 x x 12 x x", - "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 9 x 10 11 x x 12 13 x x x", - "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 9 x 10 11 x x 12 13 x 14 x x x", - "0 1 x 2 3 x x 4 5 x 6 7 x x x 8 9 x 10 11 x x 12 13 x 14 15 x x x x", - ]; - for (i, &s) in x.iter().enumerate() { - let expected = if s.is_empty() { - None - } else { - Some(s.to_string()) - }; - let num_strings = (0..i).map(|x| x.to_string()); - let actual = num_strings.tree_reduce(|a, b| format!("{} {} x", a, b)); - assert_eq!(actual, expected); - } -} - -#[test] -fn exactly_one_question_mark_syntax_works() { - exactly_one_question_mark_return().unwrap_err(); -} - -fn exactly_one_question_mark_return() -> Result<(), ExactlyOneError>> -{ - [].iter().exactly_one()?; - Ok(()) -} - -#[test] -fn multiunzip() { - let (a, b, c): (Vec<_>, Vec<_>, Vec<_>) = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] - .iter() - .cloned() - .multiunzip(); - assert_eq!((a, b, c), (vec![0, 3, 6], vec![1, 4, 7], vec![2, 5, 8])); - let (): () = [(), (), ()].iter().cloned().multiunzip(); - #[allow(clippy::type_complexity)] - let t: ( - Vec<_>, - Vec<_>, - Vec<_>, - Vec<_>, - Vec<_>, - Vec<_>, - Vec<_>, - Vec<_>, - Vec<_>, - Vec<_>, - Vec<_>, - Vec<_>, - ) = [(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)] - .iter() - .cloned() - .multiunzip(); - assert_eq!( - t, - ( - vec![0], - vec![1], - vec![2], - vec![3], - vec![4], - vec![5], - vec![6], - vec![7], - vec![8], - vec![9], - vec![10], - vec![11] - ) - ); -} diff --git a/vendor/itertools/tests/tuples.rs b/vendor/itertools/tests/tuples.rs deleted file mode 100644 index 9fc8b3cc78a566..00000000000000 --- a/vendor/itertools/tests/tuples.rs +++ /dev/null @@ -1,86 +0,0 @@ -use itertools::Itertools; - -#[test] -fn tuples() { - let v = [1, 2, 3, 4, 5]; - let mut iter = v.iter().cloned().tuples(); - assert_eq!(Some((1,)), iter.next()); - assert_eq!(Some((2,)), iter.next()); - assert_eq!(Some((3,)), iter.next()); - assert_eq!(Some((4,)), iter.next()); - assert_eq!(Some((5,)), iter.next()); - assert_eq!(None, iter.next()); - assert_eq!(None, iter.into_buffer().next()); - - let mut iter = v.iter().cloned().tuples(); - assert_eq!(Some((1, 2)), iter.next()); - assert_eq!(Some((3, 4)), iter.next()); - assert_eq!(None, iter.next()); - itertools::assert_equal(vec![5], iter.into_buffer()); - - let mut iter = v.iter().cloned().tuples(); - assert_eq!(Some((1, 2, 3)), iter.next()); - assert_eq!(None, iter.next()); - itertools::assert_equal(vec![4, 5], iter.into_buffer()); - - let mut iter = v.iter().cloned().tuples(); - assert_eq!(Some((1, 2, 3, 4)), iter.next()); - assert_eq!(None, iter.next()); - itertools::assert_equal(vec![5], iter.into_buffer()); -} - -#[test] -fn tuple_windows() { - let v = [1, 2, 3, 4, 5]; - - let mut iter = v.iter().cloned().tuple_windows(); - assert_eq!(Some((1,)), iter.next()); - assert_eq!(Some((2,)), iter.next()); - assert_eq!(Some((3,)), iter.next()); - - let mut iter = v.iter().cloned().tuple_windows(); - assert_eq!(Some((1, 2)), iter.next()); - assert_eq!(Some((2, 3)), iter.next()); - assert_eq!(Some((3, 4)), iter.next()); - assert_eq!(Some((4, 5)), iter.next()); - assert_eq!(None, iter.next()); - - let mut iter = v.iter().cloned().tuple_windows(); - assert_eq!(Some((1, 2, 3)), iter.next()); - assert_eq!(Some((2, 3, 4)), iter.next()); - assert_eq!(Some((3, 4, 5)), iter.next()); - assert_eq!(None, iter.next()); - - let mut iter = v.iter().cloned().tuple_windows(); - assert_eq!(Some((1, 2, 3, 4)), iter.next()); - assert_eq!(Some((2, 3, 4, 5)), iter.next()); - assert_eq!(None, iter.next()); - - let v = [1, 2, 3]; - let mut iter = v.iter().cloned().tuple_windows::<(_, _, _, _)>(); - assert_eq!(None, iter.next()); -} - -#[test] -fn next_tuple() { - let v = [1, 2, 3, 4, 5]; - let mut iter = v.iter(); - assert_eq!(iter.next_tuple().map(|(&x, &y)| (x, y)), Some((1, 2))); - assert_eq!(iter.next_tuple().map(|(&x, &y)| (x, y)), Some((3, 4))); - assert_eq!(iter.next_tuple::<(_, _)>(), None); -} - -#[test] -fn collect_tuple() { - let v = [1, 2]; - let iter = v.iter().cloned(); - assert_eq!(iter.collect_tuple(), Some((1, 2))); - - let v = [1]; - let iter = v.iter().cloned(); - assert_eq!(iter.collect_tuple::<(_, _)>(), None); - - let v = [1, 2, 3]; - let iter = v.iter().cloned(); - assert_eq!(iter.collect_tuple::<(_, _)>(), None); -} diff --git a/vendor/itertools/tests/zip.rs b/vendor/itertools/tests/zip.rs deleted file mode 100644 index 716ac20b31dda4..00000000000000 --- a/vendor/itertools/tests/zip.rs +++ /dev/null @@ -1,56 +0,0 @@ -use itertools::multizip; -use itertools::EitherOrBoth::{Both, Left, Right}; -use itertools::Itertools; - -#[test] -fn zip_longest_fused() { - let a = [Some(1), None, Some(3), Some(4)]; - let b = [1, 2, 3]; - - let unfused = a - .iter() - .batching(|it| *it.next().unwrap()) - .zip_longest(b.iter().cloned()); - itertools::assert_equal(unfused, vec![Both(1, 1), Right(2), Right(3)]); -} - -#[test] -fn test_zip_longest_size_hint() { - let c = (1..10).cycle(); - let v: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let v2 = &[10, 11, 12]; - - assert_eq!(c.zip_longest(v.iter()).size_hint(), (std::usize::MAX, None)); - - assert_eq!(v.iter().zip_longest(v2.iter()).size_hint(), (10, Some(10))); -} - -#[test] -fn test_double_ended_zip_longest() { - let xs = [1, 2, 3, 4, 5, 6]; - let ys = [1, 2, 3, 7]; - let a = xs.iter().copied(); - let b = ys.iter().copied(); - let mut it = a.zip_longest(b); - assert_eq!(it.next(), Some(Both(1, 1))); - assert_eq!(it.next(), Some(Both(2, 2))); - assert_eq!(it.next_back(), Some(Left(6))); - assert_eq!(it.next_back(), Some(Left(5))); - assert_eq!(it.next_back(), Some(Both(4, 7))); - assert_eq!(it.next(), Some(Both(3, 3))); - assert_eq!(it.next(), None); -} - -#[test] -fn test_double_ended_zip() { - let xs = [1, 2, 3, 4, 5, 6]; - let ys = [1, 2, 3, 7]; - let a = xs.iter().copied(); - let b = ys.iter().copied(); - let mut it = multizip((a, b)); - assert_eq!(it.next_back(), Some((4, 7))); - assert_eq!(it.next_back(), Some((3, 3))); - assert_eq!(it.next_back(), Some((2, 2))); - assert_eq!(it.next_back(), Some((1, 1))); - assert_eq!(it.next_back(), None); -} diff --git a/vendor/libc/.cargo-checksum.json b/vendor/libc/.cargo-checksum.json deleted file mode 100644 index 0edc0b8ceb9f4d..00000000000000 --- a/vendor/libc/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"98fdce84ef32aa54b41de15fc9dbebbfe6fcdc8d3c03c17fed6f36f6bfc0843e",".editorconfig":"e57fecd6b82cd69640ca1bc4e44f0c7acfe5fc12f641f14af9536e323b4159db",".git-blame-ignore-revs":"761aa385c661241fa77c15b502c361398cf500bbb9f8c3a4579b412c4c6249d7",".release-plz.toml":"fcf2d382c4a2abd96caf9cc391b63e0c94d5832f5c48e9ab9eb4b2c847c0887c","CHANGELOG.md":"5dc77b4161d173b54837a0df9a25cc5f6dfbd7319918d2a3767527fe9920b210","CONTRIBUTING.md":"1cac4c47d46f83d06eeabfb7bf3a70b1a5405a913db1afa31c0e6387eb5bc189","Cargo.lock":"65aaca88ee856ff95e3cc6f25d79a6e8533e973665b4a67cc5d90bde9123cac7","Cargo.toml":"cbae2079ed7be2e12c340f4284e8987e7f532beb59aa6fcee2b7755b99c23fc7","Cargo.toml.orig":"1b4281b7d5468656703919278b880ed3a9f025a27fbadb7d9bd7d14940702b46","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"a8d47ff51ca256f56a8932dba07660672dbfe3004257ca8de708aac1415937a1","README.md":"a198be444453fe0b24d4fb6a8b732eb9e6dc77ebcfd119fca95b97b45c77c77a","build.rs":"f01c77e53ffb343d5c8885e097183589bb91d1fbab78b14bc0a658694616f95c","cherry-pick-stable.sh":"c7d95e3cf2624115219edc224e3ee56484d3f05f8e157f92d5a0e3a735e51640","rustfmt.toml":"e9321ff995242e8cb0a4984254f0748ef11a89ca4618cab8f047ee59a88768e7","src/fuchsia/aarch64.rs":"9cd032304a54321a8926cf3157194c5d79a2491b1b974a05fe71729fa43b5738","src/fuchsia/mod.rs":"e0ed316a30662f2bd1cdf1c8013440c6f2688c48083326f58533565a588e00dd","src/fuchsia/riscv64.rs":"f2aba92cb88480bd72a9eb7a41aafc63c5db293b93113fa973fe5ab1fd819e29","src/fuchsia/x86_64.rs":"a6de0a3c9a45e7af2f0bac96d73da6a7cfb8c003335183016b08a25e2acab65b","src/hermit.rs":"f150c2882a4d8e510259697ff7685899e74bfabf84e5d37103c54b4387093775","src/lib.rs":"86c46729a494060c40a63f3732c5a4c1f3d757ba08d398c1ec3de2030c91a27b","src/macros.rs":"ba63c9018fe21b20dba1ceea97fb293860148d8bd8fbc7c0ab038ce2afcff1b2","src/new/bionic/mod.rs":"752e47b8a3c8cd3090e1af970f4c3e33e5fe5bb3f388bab8cfa66d99562dbfab","src/new/bionic/sys/mod.rs":"0fc3d4ff1c37a21f47c127e5b3818d6c115690e39082be9c3796b8ac2cc99478","src/new/bionic/sys/socket.rs":"c11b51f13897a7fff420f28e93bef74ee084fc08cc0a3ab15d5b286656a0043c","src/new/linux_uapi/linux/can.rs":"5684eccda3fe635c94927bfcc3cc33e357b18a16b19b72ee6b27e0f6df72079f","src/new/linux_uapi/linux/can/j1939.rs":"ab2a330c3d3cca3ac6a693b381661772036e10a6dc0004db3809a4f42cf724ba","src/new/linux_uapi/linux/can/raw.rs":"cc39efa823b9f4d13bc0978ce51186e91d484c2bcfce4c6b0cbfa28a3a256488","src/new/linux_uapi/linux/mod.rs":"e9135b549d9427b99f5978ee4096e5b7d613104cf1a91433488536ce0af3f74f","src/new/linux_uapi/mod.rs":"7497197e880a36e9276110947611d1911bc2375316cd5969a18c83ac6f9be78d","src/new/mod.rs":"143ce9cb8b1f50d08594ec8d305ad1a9d663b3dac139a1a4b758ac5c25e8a14c","src/primitives.rs":"ea7e28520f5f3991ab444f9262cb8497a8794ced2cf5e1754795fe82ebed5fb7","src/psp.rs":"081cf4e5127ba10ebb62a9a5f8f849dd92742693db443db429d446ee472b5d41","src/sgx.rs":"964d6af358f5c85f948275090e5a7854e0169c43b5c338070b6a4cd156ebc9e6","src/solid/aarch64.rs":"4d4236500f98858fc249f3b6858af5009851c8c582031926b8195b2646f7da5e","src/solid/arm.rs":"4d4236500f98858fc249f3b6858af5009851c8c582031926b8195b2646f7da5e","src/solid/mod.rs":"0a89235e63d28a0e1a938243de862fe60bd3a3e9373c09c8c5cd42399b1c712e","src/switch.rs":"bfdcaf0268d79652ee52c1a2837959b8253e6a4124fd94dda82727ecc442a758","src/teeos/mod.rs":"d03cf399183ac40c6f74ce09787089007648d00b112c9fa8254723f0c2135c94","src/trusty.rs":"c5012aeefc4307c11374f062ad1d530e2ec556e7069e375de326a49c77f65e22","src/types.rs":"0d11841d8045deabf7bcded237a347978bd41e8e2fd227acc98400c383e221c6","src/unix/aix/mod.rs":"fec4d43917078c55debffe104e42d14dc66e039c7685566043a13ca42ebee072","src/unix/aix/powerpc64.rs":"45614bea9cf2732fca2a6d7f1bdc7d62eb2dcf2146993e1d726f677f6f4d3a47","src/unix/bsd/apple/b32/mod.rs":"56e90d43e36bcf0a4012072f92dc905dd40af386014b94c978f30b3bcbed8abf","src/unix/bsd/apple/b64/aarch64/mod.rs":"897be1845603876b2849a1fddf53ccd8a97b1156907f4833f7dfb0778e840d0f","src/unix/bsd/apple/b64/mod.rs":"75a313514fd3b9f21391ddb77f965386c36e99bbf4a4c952445e4e8d50bb16b5","src/unix/bsd/apple/b64/x86_64/mod.rs":"889efaf7baeca8ba2857fba1cba19c09dd1d27f3661b6687295f82caba716d9a","src/unix/bsd/apple/mod.rs":"be8da0a8b9c788f7f3302ad5578eae7cdeab88ea198918c3cd0545aae157b919","src/unix/bsd/freebsdlike/dragonfly/errno.rs":"07b19390b9ae8f541ac35fd4e14685d639b95152d6d7a33814bb749b8b927298","src/unix/bsd/freebsdlike/dragonfly/mod.rs":"dc6daf4a8e04ad504e2aea0d489457299d87c05a6e966283b7a40e343887a2b8","src/unix/bsd/freebsdlike/freebsd/aarch64.rs":"246e20e9a143d4ac81d37d940e020ede283f8df5aecd5d149b297664c4293a84","src/unix/bsd/freebsdlike/freebsd/arm.rs":"6e938534090f85040f7228b781ba57020c412d4f8f99d65ca5ce2a0a0baf93a1","src/unix/bsd/freebsdlike/freebsd/freebsd11/b32.rs":"951c9297ed31a13509716068bc04c202ace3cbca3cc485e3a7f6b2fefa06e396","src/unix/bsd/freebsdlike/freebsd/freebsd11/b64.rs":"62ae6e372b644a4270c0bb325edaac4a9e553ad83ffdcf2f4d8608a2ec2bae9c","src/unix/bsd/freebsdlike/freebsd/freebsd11/mod.rs":"16595db5aaf422e425ac9ba1e693aa0f176f743ca194a767f265e1b3cb1b3f22","src/unix/bsd/freebsdlike/freebsd/freebsd12/mod.rs":"8467832d4b8a73e473371f49a2d7b56f632b8f44262c43b5f409c4094cbad26a","src/unix/bsd/freebsdlike/freebsd/freebsd12/x86_64.rs":"64c4bd82eaf30a2681417f982fce28de7d1b0743bfaa14004692a56cee44be21","src/unix/bsd/freebsdlike/freebsd/freebsd13/mod.rs":"2027bae85dac0ca1cfc97f4304023c06174231af3547f33c3f4fed0dcf8c5732","src/unix/bsd/freebsdlike/freebsd/freebsd13/x86_64.rs":"64c4bd82eaf30a2681417f982fce28de7d1b0743bfaa14004692a56cee44be21","src/unix/bsd/freebsdlike/freebsd/freebsd14/mod.rs":"d536725067b3a85fc57929d95a85dfdbe593c9fbe72c246f4ab855f602074fe5","src/unix/bsd/freebsdlike/freebsd/freebsd14/x86_64.rs":"d6e66809e109dc779efe5584b79d34fcd6fdba91973a415d64ae66f481f45940","src/unix/bsd/freebsdlike/freebsd/freebsd15/mod.rs":"9835c3374c2daea20de73ab7896044bc1de14f6cd0711df9d47d4e3a001c4ded","src/unix/bsd/freebsdlike/freebsd/freebsd15/x86_64.rs":"d6e66809e109dc779efe5584b79d34fcd6fdba91973a415d64ae66f481f45940","src/unix/bsd/freebsdlike/freebsd/mod.rs":"69818c7db4f93d1a2e61f414d433fb9c1007dcde11260a69f808c8b7b92834be","src/unix/bsd/freebsdlike/freebsd/powerpc.rs":"809148c48a16cef7de40378c9322a5795b40fec8c7eeaccb20df44a3c1c77c1d","src/unix/bsd/freebsdlike/freebsd/powerpc64.rs":"8ec51f2eb1eae1504b743fc22b0c498c1a1c11bbf17f8199d9d3a6af2ab108ca","src/unix/bsd/freebsdlike/freebsd/riscv64.rs":"4e8e313c3a3736fbb663c26bc445684e89c91092870dc896848df13f8ef89cfe","src/unix/bsd/freebsdlike/freebsd/x86.rs":"a3a17037050ba9f2314574a2cec8ff1962d08561e60616a3f170ba256371ff0b","src/unix/bsd/freebsdlike/freebsd/x86_64/mod.rs":"b79601d4f5f297f2aafb17aa4ed69f128d1668eda5c59866c3eae498034403aa","src/unix/bsd/freebsdlike/mod.rs":"4acf3a50e2944b0e7a35b5a6a4cae40e61283f62461b598d9efb252045aeaa8d","src/unix/bsd/mod.rs":"e05f88aabb6ca6c3f388d314a4235894734f8a0a3c0ea87f748ea7de4132a70b","src/unix/bsd/netbsdlike/mod.rs":"fa5c797fb3f57b637284ed725c10f63827f0de008edd326341dc56c1638610c7","src/unix/bsd/netbsdlike/netbsd/aarch64.rs":"ba2425edbf025f13ab3c3534d1671d310392df3a7d237539817d9cfa675971c8","src/unix/bsd/netbsdlike/netbsd/arm.rs":"f498ac1c11d0ebf6ee2c23cddb573c2358dcb5191924bd96e1bbc86870a86407","src/unix/bsd/netbsdlike/netbsd/mips.rs":"20cdd8d1427c986ecc3fcf7960d337917a13cfd8386dd2d54f8693a23d60892f","src/unix/bsd/netbsdlike/netbsd/mod.rs":"461feb5dc8dddea353ab6d32519045bc9f16525a48449c93dc7148469d94c9c3","src/unix/bsd/netbsdlike/netbsd/powerpc.rs":"c19c4edbc73b5a97b51e3e2ad39b9fee02ad15e80c70ceb3a1abfe977e5c0ead","src/unix/bsd/netbsdlike/netbsd/riscv64.rs":"efa1a156cff1ab2450439adbb3ab2113bed6b7de2205c99e9cba875aa2b1c153","src/unix/bsd/netbsdlike/netbsd/sparc64.rs":"d50816e830225779ac9e9a55a7e3e097882153d72987061d76a96ee736c8af9c","src/unix/bsd/netbsdlike/netbsd/x86.rs":"3006b6a086c0241f5383ca101e7b9357368d713f9c38400633491656d110798e","src/unix/bsd/netbsdlike/netbsd/x86_64.rs":"cb864e23a32eff1bf37563218cf6ce7dac8d028584c385107c84562cf1d87866","src/unix/bsd/netbsdlike/openbsd/aarch64.rs":"3960096fb915d2f75015e1706720d4cd0044938bcfe6727b097751b4c47df6a5","src/unix/bsd/netbsdlike/openbsd/arm.rs":"f064d935f416ca9f7e5e767b9b46da2250c997d667c0c7f4b4c7dfe02d0258c3","src/unix/bsd/netbsdlike/openbsd/mips64.rs":"bee7664d88f8451ae22552fc0721b6b6a6dee2493cc42bcb9829c1e47e4b05f5","src/unix/bsd/netbsdlike/openbsd/mod.rs":"154badb82f62c726fa7a0c320c5934bf459752262dcd101d43c2b3d3afb58cc4","src/unix/bsd/netbsdlike/openbsd/powerpc.rs":"f064d935f416ca9f7e5e767b9b46da2250c997d667c0c7f4b4c7dfe02d0258c3","src/unix/bsd/netbsdlike/openbsd/powerpc64.rs":"1f62a42e2970c42de9e3492fbf3cd5b45410889f033743579266342d1a9e2a00","src/unix/bsd/netbsdlike/openbsd/riscv64.rs":"c93baaf8e3afa8c79a1acb03234b0bb85b148a706481de909528513f45afa136","src/unix/bsd/netbsdlike/openbsd/sparc64.rs":"8d4c5a4cae63e09e1c156164ddc82e0fc77926841d4d4e419dd2e7a7b7145f58","src/unix/bsd/netbsdlike/openbsd/x86.rs":"e6da2fdff7706fd3eac147d3aaf16afdd8542f231f502660d1d89c79b5eca21b","src/unix/bsd/netbsdlike/openbsd/x86_64.rs":"89be4988c6acca7ce411aa2907401b9fed1ffce6ad606cc150683f1e136cba94","src/unix/cygwin/mod.rs":"c98bb7c1118c249f2e7533c68b0d77bd778a1050ab52aab603a03151061e084f","src/unix/haiku/b32.rs":"c3f8678ceee65a3094d8133b0d1a94470860e0b1867977f0569c52c5a20e039f","src/unix/haiku/b64.rs":"f97ce9225f4710893dab03ab3e13bc62152cc84f90c597ec88f6dc1f4c27d242","src/unix/haiku/bsd.rs":"4d9af31fdac2561ee5f942dca97dd2f48139ca74660d40b854b307fa5679d1c8","src/unix/haiku/mod.rs":"a1e1ab46a354da23a8348331d014b184a3f3b9d7fec8ced4c6efede9f5c38a45","src/unix/haiku/native.rs":"8248c0491d62ed96b5c2707a998f8d13cf2a49f2d06fa724848863860cb40e69","src/unix/haiku/x86_64.rs":"09f2384474b2fcb7d0febb0e9e802610a627cadca29dc0e60eb4cfe15552f978","src/unix/hurd/b32.rs":"501f426f7aeb60acf4119064100dd94bbdfebff2cec785428708059d853dc123","src/unix/hurd/b64.rs":"b9b2082e721a5ec89ba55fd5a16bbffcc8a05ca7cef6dbfbd78aff0806cb931f","src/unix/hurd/mod.rs":"b09754b468b78b64463cbf6d5d50ffba76e27efe66bb1e5020054627d751d98a","src/unix/linux_like/android/b32/arm.rs":"e68f6a15870a22e0383770ed1a5bd443d4c2ed237d16fea338c5da1ab9bf1fe3","src/unix/linux_like/android/b32/mod.rs":"5b10ebe56435d868846ae720bb9081cf814486722b5c13520fd4ef50a7ecfb58","src/unix/linux_like/android/b32/x86/mod.rs":"52f402bc27e3ddc519cd2699205bc0f31ba9737f89772d26c07e9c28a7f35762","src/unix/linux_like/android/b64/aarch64/mod.rs":"6d4fcf287ee09d65cfd8d8d9e2b551185f1cf9d90072922b9f703d2871deb036","src/unix/linux_like/android/b64/mod.rs":"04346a4a75b7cf20992eed6a2cfb986d38daf539208c462c10a7ccf3cd516068","src/unix/linux_like/android/b64/riscv64/mod.rs":"10705a5608bc761718ed54ce6dcc2a83c8aa9300337c4f9a67152637dc8d3b11","src/unix/linux_like/android/b64/x86_64/mod.rs":"7243327f35f4f4e59642c9015ee65a13fbc61618fb8ca615580f7f84a3b72e45","src/unix/linux_like/android/mod.rs":"a75260c2c9951ab305559d249a9586167d609a473ecf1d568c876a428d866da1","src/unix/linux_like/emscripten/lfs64.rs":"3a1d1779bcf16525a578a68237f9854090eae4c9819e18ffb5a288f39be4afbe","src/unix/linux_like/emscripten/mod.rs":"fab8e539c9681b444e96cc46eed01fd2640347ff0499b82f238ccec796f15175","src/unix/linux_like/linux/arch/generic/mod.rs":"c8f4d88ba7ffe044c47fc84ca1e21751bfd2446806ccabbe8729958cbb5d1ccc","src/unix/linux_like/linux/arch/mips/mod.rs":"058ebf07f8b10358af9a7f66bd96ba14df1cc6a942203da9abe8d1abab00fcbb","src/unix/linux_like/linux/arch/mod.rs":"8bc5898b03760a95dd4f124ac76ad92e5ae36b2d0644203d752ef2b37e487c3a","src/unix/linux_like/linux/arch/powerpc/mod.rs":"0e20b7e63fe39a200cb4813eeb934bc25d91a2427cd1b1d81bc2cfa4e2368ed5","src/unix/linux_like/linux/arch/sparc/mod.rs":"96ed29a86c629657c521a5e12dece22e570ef7ceee9e8f4a58c2e0782d74e01d","src/unix/linux_like/linux/gnu/b32/arm/mod.rs":"0ab9b524f4e4182eb92ac40c7b5101ce73509aa2d483eab58eef212005c21497","src/unix/linux_like/linux/gnu/b32/csky/mod.rs":"8fdab3a121a111b9856f0de29fe75262d8aa5f1a3d75b273cc72c8c809e87c48","src/unix/linux_like/linux/gnu/b32/m68k/mod.rs":"4c79cca606495e3d98f386a0f8d447f3f281df5ade019380b9018f05999bf849","src/unix/linux_like/linux/gnu/b32/mips/mod.rs":"981838f4092d4e3e343087903246ad7cfd7dc1a9fb1fc854419b744ed395d742","src/unix/linux_like/linux/gnu/b32/mod.rs":"050fa9856b151b3e33214b570cf7527eca11eaa9145d40a1524824b5232b2500","src/unix/linux_like/linux/gnu/b32/powerpc.rs":"7c3b9aad8856408517e056bdcfde877ca9d4529b9c39ffada70b56cdf244c403","src/unix/linux_like/linux/gnu/b32/riscv32/mod.rs":"4bdfd096759a489c0fcbfb1f38ff5c364bbe3fa3f6f3844a43486f5fb2e1eb24","src/unix/linux_like/linux/gnu/b32/sparc/mod.rs":"d8fc8800d01891bb9fd652008817eba58bf9fa823a0cac658bc252ac53885222","src/unix/linux_like/linux/gnu/b32/x86/mod.rs":"ce42dc6c6b620f898d924fdb26895b7b832a4f38e838059f99f663bda2cb3e69","src/unix/linux_like/linux/gnu/b64/aarch64/ilp32.rs":"638b2d717dcf5b59a0058c3dabab43edd84de9d1d7da6e16ad0c58e863000417","src/unix/linux_like/linux/gnu/b64/aarch64/lp64.rs":"28c11e70467b2f00735d3a04c369e96e04fd44d0587ee33359f38328b0677ee6","src/unix/linux_like/linux/gnu/b64/aarch64/mod.rs":"9bfab4e70363d559f76600cbb38659f8e204bea14b165e0054118d34ad3f94ff","src/unix/linux_like/linux/gnu/b64/loongarch64/mod.rs":"f2d8b176c64d791a5a34b7753cab2f50e34cd450e6819bf0b180bd3c6a9d9771","src/unix/linux_like/linux/gnu/b64/mips64/mod.rs":"3fac2105995a594d66e39912f4613ed67b1046a0fc11de97487306337729dccb","src/unix/linux_like/linux/gnu/b64/mod.rs":"30d1286c6b53a8c1cc090921a4192d5c05c7dbe0a7ff1aa4577774f9db934515","src/unix/linux_like/linux/gnu/b64/powerpc64/mod.rs":"285c465bd0cb1e66c2907306d560316b561732bca8ed2008eba370f48a0f957f","src/unix/linux_like/linux/gnu/b64/riscv64/mod.rs":"cd5b8088ddb38bbee6cd8f293f4dd8e0970a5c04d25b4bb4f4c8a5da61a09029","src/unix/linux_like/linux/gnu/b64/s390x.rs":"64fba1b75736ef6e22c11751c9daa3abd61af47b27da641d655e9ebb04b0f507","src/unix/linux_like/linux/gnu/b64/sparc64/mod.rs":"ef87054c07622d4f53401e72a9a937331d573802c7bfa707761097af8c47a968","src/unix/linux_like/linux/gnu/b64/x86_64/mod.rs":"deb7d1bb4639e0adaba2ad63636bc30df7d5318d934e514c0273fa08051eb5b2","src/unix/linux_like/linux/gnu/b64/x86_64/not_x32.rs":"07240332eaf44996d86e37b12d71b519b499c9c9b57898441c58ac2e8c0cb6f7","src/unix/linux_like/linux/gnu/b64/x86_64/x32.rs":"914898b781dfe6b2f755730d6000223d1beea177731e180ccbfdd84a0b8b3bd9","src/unix/linux_like/linux/gnu/mod.rs":"606d323e8aa2c14b22a54f9fc8720ae237df583ae1ba9e5bfe2249663ad02c5c","src/unix/linux_like/linux/mod.rs":"4ab0a27762842a59a5e59f9029d8a49527de4226c5d6e24156f316d7f52a4284","src/unix/linux_like/linux/musl/b32/arm/mod.rs":"8df7c7015240f62151363a0a545fb3be96e5d816b62ef673d84294e87e9bb9ea","src/unix/linux_like/linux/musl/b32/hexagon.rs":"1b0c68839dc46d00010d99e946f356d50dc4ad1c7468f99a8afe839f9542ebd0","src/unix/linux_like/linux/musl/b32/mips/mod.rs":"12c57cfa8eae992b3764114ba6868cd729995d71786029aa0f775086e935065b","src/unix/linux_like/linux/musl/b32/mod.rs":"e0f53df7ca1dbe9b0b25ccecf1adf664227995e58d67a222a8d046d2a879dfc8","src/unix/linux_like/linux/musl/b32/powerpc.rs":"92089167ddbe1fde8663373699ee16c7b01c451c442c27ab1a392582c992dc32","src/unix/linux_like/linux/musl/b32/riscv32/mod.rs":"f13543de5c3b4f8c23d9c9f4f3ad90f514be122d2707f7f781c696261cb11f91","src/unix/linux_like/linux/musl/b32/x86/mod.rs":"264aafdd2d3dbbf57764a671797ca0ef53baee4737f7acf09db4f14a5d13831d","src/unix/linux_like/linux/musl/b64/aarch64/mod.rs":"5ba43a3198d9dff45bc411925353674f1e4cef29eace6b2b1cb6cad070680308","src/unix/linux_like/linux/musl/b64/loongarch64/mod.rs":"294414bcb24a5b59335e49d00f6285b926bd352df20dcff4f935ae094d21495e","src/unix/linux_like/linux/musl/b64/mips64.rs":"d448cf011098728d32eab0b212696063c3365ef5e94fd5e26f8861a3efdbb097","src/unix/linux_like/linux/musl/b64/mod.rs":"e3055a6690ed1dc63b865957be649bfa165852c693e2a387c2c627939157a773","src/unix/linux_like/linux/musl/b64/powerpc64.rs":"03552edded40fccc52c8259af289cbeb074482c1337ef0c32c3cfff81bd3d537","src/unix/linux_like/linux/musl/b64/riscv64/mod.rs":"e13c6430f950035f94989771122c733866651194e7218c7d0b243ae04ef7c864","src/unix/linux_like/linux/musl/b64/s390x.rs":"30f9ac1527e49a57f6d829e54a82ca48e7a1b74507904e6c89a13f933da30ff5","src/unix/linux_like/linux/musl/b64/wasm32/mod.rs":"2d2a01fd01b372ebf1ff63d23072ae548f8a392f443f41a455e0bfb6a8705b70","src/unix/linux_like/linux/musl/b64/wasm32/wali.rs":"69e0d06289f1c86898ef3ab505e397af2acce146accb62efff654fe458b6af02","src/unix/linux_like/linux/musl/b64/x86_64/mod.rs":"95b8adc3443988aa4908e8c1d8c8c0a1619a7636e8ea286edd95ec815d6cd5d2","src/unix/linux_like/linux/musl/lfs64.rs":"308c5b5c9b2f4b1ad17e5c2d02946f84ae83e8f5cb0e789d8d3e76c1923a5d31","src/unix/linux_like/linux/musl/mod.rs":"d8afb6167cab328d65ce2a755a5a4559de30173dd4d09bb90001734a15437730","src/unix/linux_like/linux/uclibc/arm/mod.rs":"a90c7811623714e168b676aa50b162931e66ce86f8c59b0acac131afde474b2c","src/unix/linux_like/linux/uclibc/mips/mips32/mod.rs":"59493f1ab84ddbcf9dc5881c9cfc26e28d4fb5322d63f60eb7de5f9e8e329580","src/unix/linux_like/linux/uclibc/mips/mips64/mod.rs":"a35532d5ae376f403873aa566f37bff99c6c323d334f3201667e5f7200b04643","src/unix/linux_like/linux/uclibc/mips/mod.rs":"e552f579a8dc9e690891113aa6360607ad962bd84cbb60c370b5c5f7c7d1d8c0","src/unix/linux_like/linux/uclibc/mod.rs":"128d586702c6aa6f1d1c56342f855d62550063ea7df97c6c16abdb01fd6bf94e","src/unix/linux_like/linux/uclibc/x86_64/l4re.rs":"f29e4a969f0bf7359b984e17335cc957f980a70b49912c032fd762dd05743081","src/unix/linux_like/linux/uclibc/x86_64/mod.rs":"be62714a2ff04387093145a3d765585eaaac71e4fb53d1983546a57940fa2ce4","src/unix/linux_like/linux/uclibc/x86_64/other.rs":"12f8d4049862fc0c4d94b770f2d0341c1c7bf3da0619436169c12cadc4093def","src/unix/linux_like/mod.rs":"537ab6b4af3685a71487e31491d82cc2a08e8d4ce4a9dc88c6eeb29ae1daf5b7","src/unix/mod.rs":"4f6c804705ede5fa221cce2a9b23a69daaeccdbf390647a4d9f4e94aa462082d","src/unix/newlib/aarch64/mod.rs":"ec594c54dc0e7784668d02ef737fd194dcc3f1e6ee23328d810fd2453bcb6f20","src/unix/newlib/arm/mod.rs":"a1fb6caa077c2ed69adf12da07c814ffab4c1311579f23bae2b26a40cf180665","src/unix/newlib/espidf/mod.rs":"77e8ad5b7db027b8b0b5aa5126f15bc0e35b6f3deb2339acf403c961f13df26f","src/unix/newlib/generic.rs":"182e584f14e0984934130425dd2be0781ca2c131449b3ae1734a07c72c3d43cd","src/unix/newlib/horizon/mod.rs":"9ea04f90566fc14fcfd4ec5bd7c1ef467d6b26ce80bda73f4eec2fe7b652e816","src/unix/newlib/mod.rs":"18def44ab6d32cc50cb89242a1ef9edfa0ffe8010cafb27aaef8ebb970696dea","src/unix/newlib/powerpc/mod.rs":"4e5f804a13e907e17ebb66dcbf3b0fe6e1a611f91876aad8d8a0a69c7df0a7e8","src/unix/newlib/rtems/mod.rs":"6e26c8d4ce78557b3d0eef33f0057e46545c655612c7d86c38bb759f5e032788","src/unix/newlib/vita/mod.rs":"20fd016df6c8aa9097ab3410c5efd187a2f2a202b5e7c0e0ee67714036108329","src/unix/nto/aarch64.rs":"73ad54ebca13454a75c7b0815e853175070a8ac2eefa338012a03e8b59f01e0c","src/unix/nto/mod.rs":"a5219667280d9664a382b91dde8374f9959252a402d1b85dd3577957f4bf88b9","src/unix/nto/neutrino.rs":"2cef6af9943eec590b2b0af96a63bc3169e9d2af5c7713e3360eb09a807f248a","src/unix/nto/x86_64.rs":"8da99138e210516a95d49c8c0265eada4c5f7b93d59be86224844410f5e7929b","src/unix/nuttx/mod.rs":"137c69eca97ba9e0ca61baf6b9dafc11d68f07a1f5de527f9ff3fdc30e3f1ca9","src/unix/redox/mod.rs":"7a5b62cdb08d8eae9c871d9d3158cedf8a603c50716263228bf0a1568daf32c3","src/unix/solarish/compat.rs":"4346fbe9f8640868ac20b63bf3b52f883a37587e1df15ffe54fa0393a48a5588","src/unix/solarish/illumos.rs":"c6305f2555bc542dd63ac0edbc8e517f65a7a870ef9c406d0809d25c6c32276c","src/unix/solarish/mod.rs":"fd370036f3b0a198369104d426692ea4d1d4b9905ad3c15d61caec38e908dd02","src/unix/solarish/solaris.rs":"4045113ee68a9e29f6e2211dbfabe7fd423b21e7b882a982a589719d2c437657","src/unix/solarish/x86.rs":"44261c1f1b300dac9fa0dab93ec85d0c3b3c48b15bc4515b9820c9421cff7427","src/unix/solarish/x86_64.rs":"d888cd12da647f543df8cce7ae04e4a67f8647f71fd14cf7b4f968dbafcd4f5e","src/unix/solarish/x86_common.rs":"4ae02d88622f7f080f5e8cd328f13187edbc5e124fb3e05e4cf212597f6cce48","src/vxworks/aarch64.rs":"4d4236500f98858fc249f3b6858af5009851c8c582031926b8195b2646f7da5e","src/vxworks/arm.rs":"4d4236500f98858fc249f3b6858af5009851c8c582031926b8195b2646f7da5e","src/vxworks/mod.rs":"29474e4025c3bcccc1aa63407928d58623ea7337aa74c325ca8fb01248d52256","src/vxworks/powerpc.rs":"4d4236500f98858fc249f3b6858af5009851c8c582031926b8195b2646f7da5e","src/vxworks/powerpc64.rs":"4d4236500f98858fc249f3b6858af5009851c8c582031926b8195b2646f7da5e","src/vxworks/riscv32.rs":"b1f933205800f0da00f975d53b18fe0035e075cc4613acf110a09a277dc3302a","src/vxworks/riscv64.rs":"b1f933205800f0da00f975d53b18fe0035e075cc4613acf110a09a277dc3302a","src/vxworks/x86.rs":"b1f933205800f0da00f975d53b18fe0035e075cc4613acf110a09a277dc3302a","src/vxworks/x86_64.rs":"b1f933205800f0da00f975d53b18fe0035e075cc4613acf110a09a277dc3302a","src/wasi/mod.rs":"2d15648f99fe90cff9076f1ad93c9dffe04051a4f350e89ca4e513a20e97933c","src/wasi/p2.rs":"feecc0485eabd2c32bc5d800df6ad1b9b4d282741342fb08792f2635204e1e08","src/windows/gnu/mod.rs":"f8c154637cd4b9b5b35b197373d67742d0678abb5f674905897a00029785c455","src/windows/mod.rs":"455795a86354420b1151f35d0ec6ab75e165adf4abbd9111352e9d44edb20634","src/windows/msvc/mod.rs":"7bc0f1e7e73815296cd6b63b2700e12624e9f47b5c4113a1a87fae8e64549c00","src/xous.rs":"1a83621c40248ad4d0c08e1fd4c1107d5efcbc2f4f0169538b7b4a885abedbfa","tests/const_fn.rs":"8ac3171d7bced3576a4e93f48570b3e00c553d7510ab85a7473ae3b716a812dc"},"package":"2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976"} \ No newline at end of file diff --git a/vendor/libc/.cargo_vcs_info.json b/vendor/libc/.cargo_vcs_info.json deleted file mode 100644 index 322f793c32fdc9..00000000000000 --- a/vendor/libc/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "9f598d245e18ecb243118cfde095f24598ec9d5b" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/libc/.editorconfig b/vendor/libc/.editorconfig deleted file mode 100644 index 155c9905f91e13..00000000000000 --- a/vendor/libc/.editorconfig +++ /dev/null @@ -1,7 +0,0 @@ -[*.sh] -# See https://github.com/mvdan/sh/blob/master/cmd/shfmt/shfmt.1.scd#examples -indent_style = space -indent_size = 4 - -switch_case_indent = true -space_redirects = true diff --git a/vendor/libc/.git-blame-ignore-revs b/vendor/libc/.git-blame-ignore-revs deleted file mode 100644 index d358a2cd3d3498..00000000000000 --- a/vendor/libc/.git-blame-ignore-revs +++ /dev/null @@ -1,6 +0,0 @@ -# Format macro bodies -50f26e08e146b7e9c7d1af9614486eba327d1e31 - -# Automated changes related to the 2021 edition upgrade -643182f7da26cedb09349b8bb3735c2e58ba24e6 -108310db03e7db35ef48a902d9ce9a88ab8f9b77 diff --git a/vendor/libc/.release-plz.toml b/vendor/libc/.release-plz.toml deleted file mode 100644 index 6442af58ad98e9..00000000000000 --- a/vendor/libc/.release-plz.toml +++ /dev/null @@ -1,49 +0,0 @@ -[workspace] -git_release_name = "{{ version }}" -git_tag_name = "{{ version }}" - -[changelog] -body = """ -## [{{ version | trim_start_matches(pat="v") }}]\ - {%- if release_link -%}\ - ({{ release_link }})\ - {% endif %} \ - - {{ timestamp | date(format="%Y-%m-%d") }} -{% for group, commits in commits | group_by(attribute="group") %} -### {{ group | upper_first }} - {% for commit in commits %} - - {% if commit.scope -%}{{ commit.scope | upper_first }}: {% endif %} - {%- if commit.breaking %}[**breaking**] {% endif %} - {{- commit.message }} - {%- if commit.links %} ([{{ commit.links.1.text }}]({{ commit.links.1.href }})){% endif -%} - {% endfor %} -{% endfor %} -{%- if github -%} -{% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %} - ## New Contributors ❤️ -{% endif %}\ -{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %} - * @{{ contributor.username }} made their first contribution - {%- if contributor.pr_number %} in \ - [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \ - {%- endif %} -{%- endfor -%} -{%- endif %} -""" - -commit_parsers = [ - { message = '(?i)^(\w+: )?feat', group = "added" }, - { message = '(?i)^(\w+: )?add', group = "added" }, - { message = '(?i)^(\w+: )?change', group = "changed" }, - { message = '(?i)^(\w+: )?cleanup', group = "cleanup" }, - { message = '(?i)^(\w+: )?deprecate', group = "deprecated" }, - { message = '(?i)^(\w+: )?remove', group = "removed" }, - { message = '(?i)^(\w+: )?fix', group = "fixed" }, - { message = '(?i)^(\w+: )?fix', group = "fixed" }, - { message = '^.*', group = "other" }, -] - -link_parsers = [ - # Extract backport patterns - { pattern = '\(backport <.*/(\d+)>\)', text = "#$1", href = "https://github.com/rust-lang/libc/pull/$1"} -] diff --git a/vendor/libc/CHANGELOG.md b/vendor/libc/CHANGELOG.md deleted file mode 100644 index e9b726cf197904..00000000000000 --- a/vendor/libc/CHANGELOG.md +++ /dev/null @@ -1,747 +0,0 @@ -# Changelog - -## [0.2.177](https://github.com/rust-lang/libc/compare/0.2.176...0.2.177) - 2025-10-09 - -### Added - -- Apple: Add `TIOCGETA`, `TIOCSETA`, `TIOCSETAW`, `TIOCSETAF` constants ([#4736](https://github.com/rust-lang/libc/pull/4736)) -- Apple: Add `pthread_cond_timedwait_relative_np` ([#4719](https://github.com/rust-lang/libc/pull/4719)) -- BSDs: Add `_CS_PATH` constant ([#4738](https://github.com/rust-lang/libc/pull/4738)) -- Linux-like: Add `SIGEMT` for mips* and sparc* architectures ([#4730](https://github.com/rust-lang/libc/pull/4730)) -- OpenBSD: Add `elf_aux_info` ([#4729](https://github.com/rust-lang/libc/pull/4729)) -- Redox: Add more sysconf constants ([#4728](https://github.com/rust-lang/libc/pull/4728)) -- Windows: Add `wcsnlen` ([#4721](https://github.com/rust-lang/libc/pull/4721)) - -### Changed - -- WASIP2: Invert conditional to include p2 APIs ([#4733](https://github.com/rust-lang/libc/pull/4733)) - -## [0.2.176](https://github.com/rust-lang/libc/compare/0.2.175...0.2.176) - 2025-09-23 - -### Support - -- The default FreeBSD version has been raised from 11 to 12. This matches `rustc` since 1.78. ([#2406](https://github.com/rust-lang/libc/pull/2406)) -- `Debug` is now always implemented, rather than being gated behind the `extra_traits` feature. ([#4624](https://github.com/rust-lang/libc/pull/4624)) - -### Added - -- AIX: Restore some non-POSIX functions guarded by the `_KERNEL` macro. ([#4607](https://github.com/rust-lang/libc/pull/4607)) -- FreeBSD 14: Add `st_fileref` to `struct stat` ([#4642](https://github.com/rust-lang/libc/pull/4642)) -- Haiku: Add the `accept4` POSIX call ([#4586](https://github.com/rust-lang/libc/pull/4586)) -- Introduce a wrapper for representing padding ([#4632](https://github.com/rust-lang/libc/pull/4632)) -- Linux: Add `EM_RISCV` ([#4659](https://github.com/rust-lang/libc/pull/4659)) -- Linux: Add `MS_NOSYMFOLLOW` ([#4389](https://github.com/rust-lang/libc/pull/4389)) -- Linux: Add `backtrace_symbols(_fd)` ([#4668](https://github.com/rust-lang/libc/pull/4668)) -- Linux: Add missing `SOL_PACKET` optnames ([#4669](https://github.com/rust-lang/libc/pull/4669)) -- Musl s390x: Add `SYS_mseal` ([#4549](https://github.com/rust-lang/libc/pull/4549)) -- NuttX: Add `__errno` ([#4687](https://github.com/rust-lang/libc/pull/4687)) -- Redox: Add `dirfd`, `VDISABLE`, and resource consts ([#4660](https://github.com/rust-lang/libc/pull/4660)) -- Redox: Add more `resource.h`, `fcntl.h` constants ([#4666](https://github.com/rust-lang/libc/pull/4666)) -- Redox: Enable `strftime` and `mkostemp[s]` ([#4629](https://github.com/rust-lang/libc/pull/4629)) -- Unix, Windows: Add `qsort_r` (Unix), and `qsort(_s)` (Windows) ([#4677](https://github.com/rust-lang/libc/pull/4677)) -- Unix: Add `dlvsym` for Linux-gnu, FreeBSD, and NetBSD ([#4671](https://github.com/rust-lang/libc/pull/4671)) -- Unix: Add `sigqueue` ([#4620](https://github.com/rust-lang/libc/pull/4620)) - -### Changed - -- FreeBSD 15: Mark `kinfo_proc` as non-exhaustive ([#4553](https://github.com/rust-lang/libc/pull/4553)) -- FreeBSD: Set the ELF symbol version for `readdir_r` ([#4694](https://github.com/rust-lang/libc/pull/4694)) -- Linux: Correct the config for whether or not `epoll_event` is packed ([#4639](https://github.com/rust-lang/libc/pull/4639)) -- Tests: Replace the old `ctest` with the much more reliable new implementation ([#4655](https://github.com/rust-lang/libc/pull/4655) and many related PRs) - -### Fixed - -- AIX: Fix the type of the 4th arguement of `getgrnam_r` ([#4656](https://github.com/rust-lang/libc/pull/4656 -- FreeBSD: Limit `P_IDLEPROC` to FreeBSD 15 ([#4640](https://github.com/rust-lang/libc/pull/4640)) -- FreeBSD: Limit `mcontext_t::mc_tlsbase` to FreeBSD 15 ([#4640](https://github.com/rust-lang/libc/pull/464)) -- FreeBSD: Update gating of `mcontext_t.mc_tlsbase` ([#4703](https://github.com/rust-lang/libc/pull/4703)) -- Musl s390x: Correct the definition of `statfs[64]` ([#4549](https://github.com/rust-lang/libc/pull/4549)) -- Musl s390x: Make `fpreg_t` a union ([#4549](https://github.com/rust-lang/libc/pull/4549)) -- Redox: Fix the types of `gid_t` and `uid_t` ([#4689](https://github.com/rust-lang/libc/pull/4689)) -- Redox: Fix the value of `MAP_FIXED` ([#4684](https://github.com/rust-lang/libc/pull/4684)) - -### Deprecated - -- Apple: Correct the `deprecated` attribute for `iconv` ([`a97a0b53`](https://github.com/rust-lang/libc/commit/a97a0b53fb7faf5f99cd720ab12b1b8a5bf9f950)) -- FreeBSD: Deprecate `TIOCMGDTRWAIT` and `TIOCMSDTRWAIT` ([#4685](https://github.com/rust-lang/libc/pull/4685)) - -### Removed - -- FreeBSD: Remove `JAIL_{GET,SET}_MASK`, `_MC_FLAG_MASK` ([#4691](https://github.com/rust-lang/libc/pull/4691)) - -## [0.2.175](https://github.com/rust-lang/libc/compare/0.2.174...0.2.175) - 2025-08-10 - -### Added - -- AIX: Add `getpeereid` ([#4524](https://github.com/rust-lang/libc/pull/4524)) -- AIX: Add `struct ld_info` and friends ([#4578](https://github.com/rust-lang/libc/pull/4578)) -- AIX: Retore `struct winsize` ([#4577](https://github.com/rust-lang/libc/pull/4577)) -- Android: Add UDP socket option constants ([#4619](https://github.com/rust-lang/libc/pull/4619)) -- Android: Add `CLONE_CLEAR_SIGHAND` and `CLONE_INTO_CGROUP` ([#4502](https://github.com/rust-lang/libc/pull/4502)) -- Android: Add more `prctl` constants ([#4531](https://github.com/rust-lang/libc/pull/4531)) -- FreeBSD Add further TCP stack-related constants ([#4196](https://github.com/rust-lang/libc/pull/4196)) -- FreeBSD x86-64: Add `mcontext_t.mc_tlsbase ` ([#4503](https://github.com/rust-lang/libc/pull/4503)) -- FreeBSD15: Add `kinfo_proc.ki_uerrmsg` ([#4552](https://github.com/rust-lang/libc/pull/4552)) -- FreeBSD: Add `in_conninfo` ([#4482](https://github.com/rust-lang/libc/pull/4482)) -- FreeBSD: Add `xinpgen` and related types ([#4482](https://github.com/rust-lang/libc/pull/4482)) -- FreeBSD: Add `xktls_session` ([#4482](https://github.com/rust-lang/libc/pull/4482)) -- Haiku: Add functionality from `libbsd` ([#4221](https://github.com/rust-lang/libc/pull/4221)) -- Linux: Add `SECBIT_*` ([#4480](https://github.com/rust-lang/libc/pull/4480)) -- NetBSD, OpenBSD: Export `ioctl` request generator macros ([#4460](https://github.com/rust-lang/libc/pull/4460)) -- NetBSD: Add `ptsname_r` ([#4608](https://github.com/rust-lang/libc/pull/4608)) -- RISCV32: Add time-related syscalls ([#4612](https://github.com/rust-lang/libc/pull/4612)) -- Solarish: Add `strftime*` ([#4453](https://github.com/rust-lang/libc/pull/4453)) -- linux: Add `EXEC_RESTRICT_*` and `EXEC_DENY_*` ([#4545](https://github.com/rust-lang/libc/pull/4545)) - -### Changed - -- AIX: Add `const` to signatures to be consistent with other platforms ([#4563](https://github.com/rust-lang/libc/pull/4563)) - -### Fixed - -- AIX: Fix the type of `struct statvfs.f_fsid` ([#4576](https://github.com/rust-lang/libc/pull/4576)) -- AIX: Fix the type of constants for the `ioctl` `request` argument ([#4582](https://github.com/rust-lang/libc/pull/4582)) -- AIX: Fix the types of `stat{,64}.st_*tim` ([#4597](https://github.com/rust-lang/libc/pull/4597)) -- AIX: Use unique `errno` values ([#4507](https://github.com/rust-lang/libc/pull/4507)) -- Build: Fix an incorrect `target_os` -> `target_arch` check ([#4550](https://github.com/rust-lang/libc/pull/4550)) -- FreeBSD: Fix the type of `xktls_session_onedir.ifnet` ([#4552](https://github.com/rust-lang/libc/pull/4552)) -- Mips64 musl: Fix the type of `nlink_t` ([#4509](https://github.com/rust-lang/libc/pull/4509)) -- Mips64 musl: Use a special MIPS definition of `stack_t` ([#4528](https://github.com/rust-lang/libc/pull/4528)) -- Mips64: Fix `SI_TIMER`, `SI_MESGQ` and `SI_ASYNCIO` definitions ([#4529](https://github.com/rust-lang/libc/pull/4529)) -- Musl Mips64: Swap the order of `si_errno` and `si_code` in `siginfo_t` ([#4530](https://github.com/rust-lang/libc/pull/4530)) -- Musl Mips64: Use a special MIPS definition of `statfs` ([#4527](https://github.com/rust-lang/libc/pull/4527)) -- Musl: Fix the definition of `fanotify_event_metadata` ([#4510](https://github.com/rust-lang/libc/pull/4510)) -- NetBSD: Correct `enum fae_action` to be `#[repr(C)]` ([#60a8cfd5](https://github.com/rust-lang/libc/commit/60a8cfd564f83164d45b9533ff7a0d7371878f2a)) -- PSP: Correct `char` -> `c_char` ([eaab4fc3](https://github.com/rust-lang/libc/commit/eaab4fc3f05dc646a953d4fd5ba46dfa1f8bd6f6)) -- PowerPC musl: Fix `termios` definitions ([#4518](https://github.com/rust-lang/libc/pull/4518)) -- PowerPC musl: Fix the definition of `EDEADLK` ([#4517](https://github.com/rust-lang/libc/pull/4517)) -- PowerPC musl: Fix the definition of `NCCS` ([#4513](https://github.com/rust-lang/libc/pull/4513)) -- PowerPC musl: Fix the definitions of `MAP_LOCKED` and `MAP_NORESERVE` ([#4516](https://github.com/rust-lang/libc/pull/4516)) -- PowerPC64 musl: Fix the definition of `shmid_ds` ([#4519](https://github.com/rust-lang/libc/pull/4519)) - -### Deprecated - -- Linux: `MAP_32BIT` is only defined on x86 on non-x86 architectures ([#4511](https://github.com/rust-lang/libc/pull/4511)) - -### Removed - -- AIX: Remove duplicate constant definitions `FIND` and `ENTER` ([#4588](https://github.com/rust-lang/libc/pull/4588)) -- s390x musl: Remove `O_FSYNC` ([#4515](https://github.com/rust-lang/libc/pull/4515)) -- s390x musl: Remove `RTLD_DEEPBIND` ([#4515](https://github.com/rust-lang/libc/pull/4515)) - - -## [0.2.174](https://github.com/rust-lang/libc/compare/0.2.173...0.2.174) - 2025-06-17 - -### Added - -- Linux: Make `pidfd_info` fields pub ([#4487](https://github.com/rust-lang/libc/pull/4487)) - -### Fixed - -- Gnu x32: Add missing `timespec.tv_nsec` ([#4497](https://github.com/rust-lang/libc/pull/4497)) -- NuttX: Use `nlink_t` type for `st_nlink` in `struct stat` definition ([#4483](https://github.com/rust-lang/libc/pull/4483)) - -### Other - -- Allow new `unpredictable_function_pointer_comparisons` lints ([#4489](https://github.com/rust-lang/libc/pull/4489)) -- OpenBSD: Fix some clippy warnings to use `pointer::cast`. ([#4490](https://github.com/rust-lang/libc/pull/4490)) -- Remove unessecary semicolons from definitions of `CMSG_NXTHDR`. ([#4492](https://github.com/rust-lang/libc/pull/4492)) - - -## [0.2.173](https://github.com/rust-lang/libc/compare/0.2.172...0.2.173) - 2025-06-09 - -### Added - -- AIX: Add an AIX triple to Cargo.toml for doc ([#4475](https://github.com/rust-lang/libc/pull/4475)) -- FreeBSD: Add the `SO_SPLICE` socket option support for FreeBSD >= 14.2 ([#4451](https://github.com/rust-lang/libc/pull/4451)) -- Linux GNU: Prepare for supporting `_TIME_BITS=64` ([#4433](https://github.com/rust-lang/libc/pull/4433)) -- Linux: Add constant PACKET_IGNORE_OUTGOING ([#4319](https://github.com/rust-lang/libc/pull/4319)) -- Linux: Add constants and types for `nsfs` ioctls ([#4436](https://github.com/rust-lang/libc/pull/4436)) -- Linux: Add constants for Memory-Deny-Write-Execute `prctls` ([#4400](https://github.com/rust-lang/libc/pull/4400)) -- Linux: Add constants from `linux/cn_proc.h` and `linux/connector.h` ([#4434](https://github.com/rust-lang/libc/pull/4434)) -- Linux: Add new flags for `pwritev2` and `preadv2` ([#4452](https://github.com/rust-lang/libc/pull/4452)) -- Linux: Add pid_type enum values ([#4403](https://github.com/rust-lang/libc/pull/4403)) -- Linux: Update pidfd constants and types (Linux 6.9-6.15) ([#4402](https://github.com/rust-lang/libc/pull/4402)) -- Loongarch64 musl: Define the `MADV_SOFT_OFFLINE` constant ([#4448](https://github.com/rust-lang/libc/pull/4448)) -- Musl: Add new fields since 1.2.0/1.2.2 to `struct tcp_info` ([#4443](https://github.com/rust-lang/libc/pull/4443)) -- Musl: Prepare for supporting v1.2.3 ([#4443](https://github.com/rust-lang/libc/pull/4443)) -- NuttX: Add `arc4random` and `arc4random_buf` ([#4464](https://github.com/rust-lang/libc/pull/4464)) -- RISC-V Musl: Add `MADV_SOFT_OFFLINE` definition ([#4447](https://github.com/rust-lang/libc/pull/4447)) -- Redox: Define SCM_RIGHTS ([#4440](https://github.com/rust-lang/libc/pull/4440)) -- VxWorks: Add missing UTIME defines and TASK_RENAME_LENGTH ([#4407](https://github.com/rust-lang/libc/pull/4407)) -- Windows: Add more `time.h` functions ([#4427](https://github.com/rust-lang/libc/pull/4427)) - -### Changed - -- Redox: Update `SA_` constants. ([#4426](https://github.com/rust-lang/libc/pull/4426)) -- Redox: make `CMSG_ALIGN`, `CMSG_LEN`, and `CMSG_SPACE` const functions ([#4441](https://github.com/rust-lang/libc/pull/4441)) - -### Fixed - -- AIX: Enable libc-test and fix definitions/declarations. ([#4450](https://github.com/rust-lang/libc/pull/4450)) -- Emscripten: Fix querying emcc on windows (use emcc.bat) ([#4248](https://github.com/rust-lang/libc/pull/4248)) -- Hurd: Fix build from missing `fpos_t` ([#4472](https://github.com/rust-lang/libc/pull/4472)) -- Loongarch64 Musl: Fix the `struct ipc_perm` bindings ([#4384](https://github.com/rust-lang/libc/pull/4384)) -- Musl: Fix the `O_LARGEFILE` constant value. ([#4443](https://github.com/rust-lang/libc/pull/4443)) - -## [0.2.172](https://github.com/rust-lang/libc/compare/0.2.171...0.2.172) - 2025-04-14 - -### Added - -- Android: Add `getauxval` for 32-bit targets ([#4338](https://github.com/rust-lang/libc/pull/4338)) -- Android: Add `if_tun.h` ioctls ([#4379](https://github.com/rust-lang/libc/pull/4379)) -- Android: Define `SO_BINDTOIFINDEX` ([#4391](https://github.com/rust-lang/libc/pull/4391)) -- Cygwin: Add `posix_spawn_file_actions_add[f]chdir[_np]` ([#4387](https://github.com/rust-lang/libc/pull/4387)) -- Cygwin: Add new socket options ([#4350](https://github.com/rust-lang/libc/pull/4350)) -- Cygwin: Add statfs & fcntl ([#4321](https://github.com/rust-lang/libc/pull/4321)) -- FreeBSD: Add `filedesc` and `fdescenttbl` ([#4327](https://github.com/rust-lang/libc/pull/4327)) -- Glibc: Add unstable support for _FILE_OFFSET_BITS=64 ([#4345](https://github.com/rust-lang/libc/pull/4345)) -- Hermit: Add `AF_UNSPEC` ([#4344](https://github.com/rust-lang/libc/pull/4344)) -- Hermit: Add `AF_VSOCK` ([#4344](https://github.com/rust-lang/libc/pull/4344)) -- Illumos, NetBSD: Add `timerfd` APIs ([#4333](https://github.com/rust-lang/libc/pull/4333)) -- Linux: Add `_IO`, `_IOW`, `_IOR`, `_IOWR` to the exported API ([#4325](https://github.com/rust-lang/libc/pull/4325)) -- Linux: Add `tcp_info` to uClibc bindings ([#4347](https://github.com/rust-lang/libc/pull/4347)) -- Linux: Add further BPF program flags ([#4356](https://github.com/rust-lang/libc/pull/4356)) -- Linux: Add missing INPUT_PROP_XXX flags from `input-event-codes.h` ([#4326](https://github.com/rust-lang/libc/pull/4326)) -- Linux: Add missing TLS bindings ([#4296](https://github.com/rust-lang/libc/pull/4296)) -- Linux: Add more constants from `seccomp.h` ([#4330](https://github.com/rust-lang/libc/pull/4330)) -- Linux: Add more glibc `ptrace_sud_config` and related `PTRACE_*ET_SYSCALL_USER_DISPATCH_CONFIG`. ([#4386](https://github.com/rust-lang/libc/pull/4386)) -- Linux: Add new netlink flags ([#4288](https://github.com/rust-lang/libc/pull/4288)) -- Linux: Define ioctl codes on more architectures ([#4382](https://github.com/rust-lang/libc/pull/4382)) -- Linux: Add missing `pthread_attr_setstack` ([#4349](https://github.com/rust-lang/libc/pull/4349)) -- Musl: Add missing `utmpx` API ([#4332](https://github.com/rust-lang/libc/pull/4332)) -- Musl: Enable `getrandom` on all platforms ([#4346](https://github.com/rust-lang/libc/pull/4346)) -- NuttX: Add more signal constants ([#4353](https://github.com/rust-lang/libc/pull/4353)) -- QNX: Add QNX 7.1-iosock and 8.0 to list of additional cfgs ([#4169](https://github.com/rust-lang/libc/pull/4169)) -- QNX: Add support for alternative Neutrino network stack `io-sock` ([#4169](https://github.com/rust-lang/libc/pull/4169)) -- Redox: Add more `sys/socket.h` and `sys/uio.h` definitions ([#4388](https://github.com/rust-lang/libc/pull/4388)) -- Solaris: Temporarily define `O_DIRECT` and `SIGINFO` ([#4348](https://github.com/rust-lang/libc/pull/4348)) -- Solarish: Add `secure_getenv` ([#4342](https://github.com/rust-lang/libc/pull/4342)) -- VxWorks: Add missing `d_type` member to `dirent` ([#4352](https://github.com/rust-lang/libc/pull/4352)) -- VxWorks: Add missing signal-related constsants ([#4352](https://github.com/rust-lang/libc/pull/4352)) -- VxWorks: Add more error codes ([#4337](https://github.com/rust-lang/libc/pull/4337)) - -### Deprecated - -- FreeBSD: Deprecate `TCP_PCAP_OUT` and `TCP_PCAP_IN` ([#4381](https://github.com/rust-lang/libc/pull/4381)) - -### Fixed - -- Cygwin: Fix member types of `statfs` ([#4324](https://github.com/rust-lang/libc/pull/4324)) -- Cygwin: Fix tests ([#4357](https://github.com/rust-lang/libc/pull/4357)) -- Hermit: Make `AF_INET = 3` ([#4344](https://github.com/rust-lang/libc/pull/4344)) -- Musl: Fix the syscall table on RISC-V-32 ([#4335](https://github.com/rust-lang/libc/pull/4335)) -- Musl: Fix the value of `SA_ONSTACK` on RISC-V-32 ([#4335](https://github.com/rust-lang/libc/pull/4335)) -- VxWorks: Fix a typo in the `waitpid` parameter name ([#4334](https://github.com/rust-lang/libc/pull/4334)) - -### Removed - -- Musl: Remove `O_FSYNC` on RISC-V-32 (use `O_SYNC` instead) ([#4335](https://github.com/rust-lang/libc/pull/4335)) -- Musl: Remove `RTLD_DEEPBIND` on RISC-V-32 ([#4335](https://github.com/rust-lang/libc/pull/4335)) - -### Other - -- CI: Add matrix env variables to the environment ([#4345](https://github.com/rust-lang/libc/pull/4345)) -- CI: Always deny warnings ([#4363](https://github.com/rust-lang/libc/pull/4363)) -- CI: Always upload successfully created artifacts ([#4345](https://github.com/rust-lang/libc/pull/4345)) -- CI: Install musl from source for loongarch64 ([#4320](https://github.com/rust-lang/libc/pull/4320)) -- CI: Revert "Also skip `MFD_EXEC` and `MFD_NOEXEC_SEAL` on sparc64" ([#]()) -- CI: Use `$PWD` instead of `$(pwd)` in run-docker ([#4345](https://github.com/rust-lang/libc/pull/4345)) -- Solarish: Restrict `openpty` and `forkpty` polyfills to Illumos, replace Solaris implementation with bindings ([#4329](https://github.com/rust-lang/libc/pull/4329)) -- Testing: Ensure the makedev test does not emit unused errors ([#4363](https://github.com/rust-lang/libc/pull/4363)) - -## [0.2.171](https://github.com/rust-lang/libc/compare/0.2.170...0.2.171) - 2025-03-11 - -### Added - -- Android: Add `if_nameindex`/`if_freenameindex` support ([#4247](https://github.com/rust-lang/libc/pull/4247)) -- Apple: Add missing proc types and constants ([#4310](https://github.com/rust-lang/libc/pull/4310)) -- BSD: Add `devname` ([#4285](https://github.com/rust-lang/libc/pull/4285)) -- Cygwin: Add PTY and group API ([#4309](https://github.com/rust-lang/libc/pull/4309)) -- Cygwin: Add support ([#4279](https://github.com/rust-lang/libc/pull/4279)) -- FreeBSD: Make `spawn.h` interfaces available on all FreeBSD-like systems ([#4294](https://github.com/rust-lang/libc/pull/4294)) -- Linux: Add `AF_XDP` structs for all Linux environments ([#4163](https://github.com/rust-lang/libc/pull/4163)) -- Linux: Add SysV semaphore constants ([#4286](https://github.com/rust-lang/libc/pull/4286)) -- Linux: Add `F_SEAL_EXEC` ([#4316](https://github.com/rust-lang/libc/pull/4316)) -- Linux: Add `SO_PREFER_BUSY_POLL` and `SO_BUSY_POLL_BUDGET` ([#3917](https://github.com/rust-lang/libc/pull/3917)) -- Linux: Add `devmem` structs ([#4299](https://github.com/rust-lang/libc/pull/4299)) -- Linux: Add socket constants up to `SO_DEVMEM_DONTNEED` ([#4299](https://github.com/rust-lang/libc/pull/4299)) -- NetBSD, OpenBSD, DragonflyBSD: Add `closefrom` ([#4290](https://github.com/rust-lang/libc/pull/4290)) -- NuttX: Add `pw_passwd` field to `passwd` ([#4222](https://github.com/rust-lang/libc/pull/4222)) -- Solarish: define `IP_BOUND_IF` and `IPV6_BOUND_IF` ([#4287](https://github.com/rust-lang/libc/pull/4287)) -- Wali: Add bindings for `wasm32-wali-linux-musl` target ([#4244](https://github.com/rust-lang/libc/pull/4244)) - -### Changed - -- AIX: Use `sa_sigaction` instead of a union ([#4250](https://github.com/rust-lang/libc/pull/4250)) -- Make `msqid_ds.__msg_cbytes` public ([#4301](https://github.com/rust-lang/libc/pull/4301)) -- Unix: Make all `major`, `minor`, `makedev` into `const fn` ([#4208](https://github.com/rust-lang/libc/pull/4208)) - -### Deprecated - -- Linux: Deprecate obsolete packet filter interfaces ([#4267](https://github.com/rust-lang/libc/pull/4267)) - -### Fixed - -- Cygwin: Fix strerror_r ([#4308](https://github.com/rust-lang/libc/pull/4308)) -- Cygwin: Fix usage of f! ([#4308](https://github.com/rust-lang/libc/pull/4308)) -- Hermit: Make `stat::st_size` signed ([#4298](https://github.com/rust-lang/libc/pull/4298)) -- Linux: Correct values for `SI_TIMER`, `SI_MESGQ`, `SI_ASYNCIO` ([#4292](https://github.com/rust-lang/libc/pull/4292)) -- NuttX: Update `tm_zone` and `d_name` fields to use `c_char` type ([#4222](https://github.com/rust-lang/libc/pull/4222)) -- Xous: Include the prelude to define `c_int` ([#4304](https://github.com/rust-lang/libc/pull/4304)) - -### Other - -- Add labels to FIXMEs ([#4231](https://github.com/rust-lang/libc/pull/4231), [#4232](https://github.com/rust-lang/libc/pull/4232), [#4234](https://github.com/rust-lang/libc/pull/4234), [#4235](https://github.com/rust-lang/libc/pull/4235), [#4236](https://github.com/rust-lang/libc/pull/4236)) -- CI: Fix "cannot find libc" error on Sparc64 ([#4317](https://github.com/rust-lang/libc/pull/4317)) -- CI: Fix "cannot find libc" error on s390x ([#4317](https://github.com/rust-lang/libc/pull/4317)) -- CI: Pass `--no-self-update` to `rustup update` ([#4306](https://github.com/rust-lang/libc/pull/4306)) -- CI: Remove tests for the `i586-pc-windows-msvc` target ([#4311](https://github.com/rust-lang/libc/pull/4311)) -- CI: Remove the `check_cfg` job ([#4322](https://github.com/rust-lang/libc/pull/4312)) -- Change the range syntax that is giving `ctest` problems ([#4311](https://github.com/rust-lang/libc/pull/4311)) -- Linux: Split out the stat struct for gnu/b32/mips ([#4276](https://github.com/rust-lang/libc/pull/4276)) - -### Removed - -- NuttX: Remove `pthread_set_name_np` ([#4251](https://github.com/rust-lang/libc/pull/4251)) - -## [0.2.170](https://github.com/rust-lang/libc/compare/0.2.169...0.2.170) - 2025-02-23 - -### Added - -- Android: Declare `setdomainname` and `getdomainname` -- FreeBSD: Add `evdev` structures -- FreeBSD: Add the new `st_filerev` field to `stat32` ([#4254](https://github.com/rust-lang/libc/pull/4254)) -- Linux: Add `SI_*`` and `TRAP_*`` signal codes -- Linux: Add experimental configuration to enable 64-bit time in kernel APIs, set by `RUST_LIBC_UNSTABLE_LINUX_TIME_BITS64`. -- Linux: Add recent socket timestamping flags -- Linux: Added new CANFD_FDF flag for the flags field of canfd_frame -- Musl: add CLONE_NEWTIME -- Solarish: add the posix_spawn family of functions - -### Deprecated - -- Linux: deprecate kernel modules syscalls - -### Changed - -- Emscripten: Assume version is at least 3.1.42 - -### Fixed - -- BSD: Correct the definition of `WEXITSTATUS` -- Hurd: Fix CMSG_DATA on 64bit systems ([#4240](https://github.com/rust-lang/libc/pull/424)) -- NetBSD: fix `getmntinfo` ([#4265](https://github.com/rust-lang/libc/pull/4265) -- VxWorks: Fix the size of `time_t` - -### Other - -- Add labels to FIXMEs , , -- CI: Bump FreeBSD CI to 13.4 and 14.2 -- Copy definitions from core::ffi and centralize them -- Define c_char at top-level and remove per-target c_char definitions -- Port style.rs to syn and add tests for the style checker - -## [0.2.169](https://github.com/rust-lang/libc/compare/0.2.168...0.2.169) - 2024-12-18 - -### Added - -- FreeBSD: add more socket TCP stack constants -- Fuchsia: add a `sockaddr_vm` definition - -### Fixed - -**Breaking**: [rust-lang/rust#132975](https://github.com/rust-lang/rust/pull/132975) corrected the signedness of `core::ffi::c_char` on various Tier 2 and Tier 3 platforms (mostly Arm and RISC-V) to match Clang. This release contains the corresponding changes to `libc`, including the following specific pull requests: - -- ESP-IDF: Replace arch-conditional `c_char` with a reexport -- Fix `c_char` on various targets -- Mirror `c_char` configuration from `rust-lang/rust` - -### Cleanup - -- Do not re-export `c_void` in target-specific code - -## [0.2.168](https://github.com/rust-lang/libc/compare/0.2.167...0.2.168) - 2024-12-09 - -### Added - -- Linux: Add new process flags ([#4174](https://github.com/rust-lang/libc/pull/4174)) -- Linux: Make `IFA_*` constants available on all Linux targets -- Linux: add `MAP_DROPPABLE` -- Solaris, Illumos: add `SIGRTMIN` and `SIGRTMAX` -- Unix, Linux: adding POSIX `memccpy` and `mempcpy` GNU extension -- CI: Upload artifacts created by libc-test -- CI: Use workflow commands to group output by target -- CI: add caching - -## [0.2.167](https://github.com/rust-lang/libc/compare/0.2.166...0.2.167) - 2024-11-28 - -### Added - -- Solarish: add `st_fstype` to `stat` -- Trusty: Add `intptr_t` and `uintptr_t` ([#4161](https://github.com/rust-lang/libc/pull/4161)) - -### Fixed - -- Fix the build with `rustc-dep-of-std` -- Wasi: Add back unsafe block for `clockid_t` static variables ([#4157](https://github.com/rust-lang/libc/pull/4157)) - -### Cleanup - -- Create an internal prelude -- Fix `unused_qualifications` - -### Other - -- CI: Check various FreeBSD versions ([#4159](https://github.com/rust-lang/libc/pull/4159)) -- CI: add a timeout for all jobs -- CI: verify MSRV for `wasm32-wasi` -- Migrate to the 2021 edition - -### Removed - -- Remove one unused import after the edition 2021 bump - -## [0.2.166](https://github.com/rust-lang/libc/compare/0.2.165...0.2.166) - 2024-11-26 - -### Fixed - -This release resolves two cases of unintentional breakage from the previous release: - -- Revert removal of array size hacks [#4150](https://github.com/rust-lang/libc/pull/4150) -- Ensure `const extern` functions are always enabled [#4151](https://github.com/rust-lang/libc/pull/4151) - -## [0.2.165](https://github.com/rust-lang/libc/compare/0.2.164...0.2.165) - 2024-11-25 - -### Added - -- Android: add `mkostemp`, `mkostemps` -- Android: add a few API 30 calls -- Android: add missing syscall constants -- Apple: add `in6_ifreq` -- Apple: add missing `sysctl` net types (before release: remove `if_family_id` ([#4137](https://github.com/rust-lang/libc/pulls/4137))) -- Freebsd: add `kcmp` call support -- Hurd: add `MAP_32BIT` and `MAP_EXCL` -- Hurd: add `domainname` field to `utsname` ([#4089](https://github.com/rust-lang/libc/pulls/4089)) -- Linux GNU: add `f_flags` to struct `statfs` for arm, mips, powerpc and x86 -- Linux GNU: add `malloc_stats` -- Linux: add ELF relocation-related structs -- Linux: add `ptp_*` structs -- Linux: add `ptp_clock_caps` -- Linux: add `ptp_pin_function` and most `PTP_` constants -- Linux: add missing AF_XDP structs & constants -- Linux: add missing netfilter consts ([#3734](https://github.com/rust-lang/libc/pulls/3734)) -- Linux: add struct and constants for the `mount_setattr` syscall -- Linux: add wireless API -- Linux: expose the `len8_dlc` field of `can_frame` -- Musl: add `utmpx` API -- Musl: add missing syscall constants -- NetBSD: add `mcontext`-related data for RISCV64 -- Redox: add new `netinet` constants ) -- Solarish: add `_POSIX_VDISABLE` ([#4103](https://github.com/rust-lang/libc/pulls/4103)) -- Tests: Add a test that the `const extern fn` macro works -- Tests: Add test of primitive types against `std` -- Unix: Add `htonl`, `htons`, `ntohl`, `ntohs` -- Unix: add `aligned_alloc` -- Windows: add `aligned_realloc` - -### Fixed - -- **breaking** Hurd: fix `MAP_HASSEMAPHORE` name ([#4127](https://github.com/rust-lang/libc/pulls/4127)) -- **breaking** ulibc Mips: fix `SA_*` mismatched types ([#3211](https://github.com/rust-lang/libc/pulls/3211)) -- Aix: fix an enum FFI safety warning -- Haiku: fix some typos ([#3664](https://github.com/rust-lang/libc/pulls/3664)) -- Tests: fix `Elf{32,64}_Relr`-related tests -- Tests: fix libc-tests for `loongarch64-linux-musl` -- Tests: fix some clippy warnings -- Tests: fix tests on `riscv64gc-unknown-freebsd` - -### Deprecated - -- Apple: deprecate `iconv_open` -- Apple: deprecate `mach_task_self` -- Apple: update `mach` deprecation notices for things that were removed in `main` - -### Cleanup - -- Adjust the `f!` macro to be more flexible -- Aix: remove duplicate constants -- CI: make scripts more uniform -- Drop the `libc_align` conditional -- Drop the `libc_cfg_target_vendor` conditional -- Drop the `libc_const_size_of` conditional -- Drop the `libc_core_cvoid` conditional -- Drop the `libc_int128` conditional -- Drop the `libc_non_exhaustive` conditional -- Drop the `libc_packedN` conditional -- Drop the `libc_priv_mod_use` conditional -- Drop the `libc_union` conditional -- Drop the `long_array` conditional -- Drop the `ptr_addr_of` conditional -- Drop warnings about deprecated cargo features -- Eliminate uses of `struct_formatter` -- Fix a few other array size hacks -- Glibc: remove redundant definitions ([#3261](https://github.com/rust-lang/libc/pulls/3261)) -- Musl: remove redundant definitions ([#3261](https://github.com/rust-lang/libc/pulls/3261)) -- Musl: unify definitions of `siginfo_t` ([#3261](https://github.com/rust-lang/libc/pulls/3261)) -- Musl: unify definitions of statfs and statfs64 ([#3261](https://github.com/rust-lang/libc/pulls/3261)) -- Musl: unify definitions of statvfs and statvfs64 ([#3261](https://github.com/rust-lang/libc/pulls/3261)) -- Musl: unify statx definitions ([#3978](https://github.com/rust-lang/libc/pulls/3978)) -- Remove array size hacks for Rust < 1.47 -- Remove repetitive words -- Use #[derive] for Copy/Clone in s! and friends -- Use some tricks to format macro bodies - -### Other - -- Apply formatting to macro bodies -- Bump libc-test to Rust 2021 Edition -- CI: Add a check that semver files don't contain duplicate entries -- CI: Add `fanotify_event_info_fid` to FAM-exempt types -- CI: Allow rustfmt to organize imports ([#4136](https://github.com/rust-lang/libc/pulls/4136)) -- CI: Always run rustfmt -- CI: Change 32-bit Docker images to use EOL repos -- CI: Change 64-bit Docker images to ubuntu:24.10 -- CI: Disable the check for >1 s! invocation -- CI: Ensure build channels get run even if FILTER is unset -- CI: Ensure there is a fallback for no_std -- CI: Fix cases where unset variables cause errors -- CI: Naming adjustments and cleanup -- CI: Only invoke rustup if running in CI -- CI: Remove the logic to handle old rust versions -- CI: Set -u (error on unset) in all script files -- CI: add support for `loongarch64-unknown-linux-musl` -- CI: make `aarch64-apple-darwin` not a nightly-only target -- CI: run shellcheck on all scripts -- CI: update musl headers to Linux 6.6 -- CI: use qemu-sparc64 to run sparc64 tests -- Drop the `libc_const_extern_fn` conditional -- Drop the `libc_underscore_const_names` conditional -- Explicitly set the edition to 2015 -- Introduce a `git-blame-ignore-revs` file -- Tests: Ignore fields as required on Ubuntu 24.10 -- Tests: skip `ATF_*` constants for OpenBSD -- Triagebot: Add an autolabel for CI - -## [0.2.164](https://github.com/rust-lang/libc/compare/0.2.163...0.2.164) - 2024-11-16 - -### MSRV - -This release increases the MSRV of `libc` to 1.63. - -### Other - -- CI: remove tests with rust < 1.63 -- MSRV: document the MSRV of the stable channel to be 1.63 -- MacOS: move ifconf to s_no_extra_traits - -## [0.2.163](https://github.com/rust-lang/libc/compare/0.2.162...0.2.163) - 2024-11-16 - -### Added - -- Aix: add more `dlopen` flags -- Android: add group calls -- FreeBSD: add `TCP_FUNCTION_BLK` and `TCP_FUNCTION_ALIAS` -- Linux: add `confstr` -- Solarish: add `aio` -- Solarish: add `arc4random*` - -### Changed - -- Emscripten: upgrade emsdk to 3.1.68 -- Hurd: use more standard types -- Hurd: use the standard `ssize_t = isize` -- Solaris: fix `confstr` and `ucontext_t` - -### Other - -- CI: add Solaris -- CI: add `i686-unknown-freebsd` -- CI: ensure that calls to `sort` do not depend on locale -- Specify `rust-version` in `Cargo.toml` - -## [0.2.162](https://github.com/rust-lang/libc/compare/0.2.161...0.2.162) - 2024-11-07 - -### Added - -- Android: fix the alignment of `uc_mcontext` on arm64 -- Apple: add `host_cpu_load_info` -- ESP-IDF: add a time flag -- FreeBSD: add the `CLOSE_RANGE_CLOEXEC` flag -- FreeBSD: fix test errors regarding `__gregset_t` -- FreeBSD: fix tests on x86 FreeBSD 15 -- FreeBSD: make `ucontext_t` and `mcontext_t` available on all architectures -- Haiku: add `getentropy` -- Illumos: add `syncfs` -- Illumos: add some recently-added constants -- Linux: add `ioctl` flags -- Linux: add epoll busy polling parameters -- NuttX: add `pthread_[get/set]name_np` -- RTEMS: add `arc4random_buf` -- Trusty OS: add initial support -- WASIp2: expand socket support - -### Fixed - -- Emscripten: don't pass `-lc` -- Hurd: change `st_fsid` field to `st_dev` -- Hurd: fix the definition of `utsname` -- Illumos/Solaris: fix `FNM_CASEFOLD` definition -- Solaris: fix all tests - -### Other - -- CI: Add loongarch64 -- CI: Check that semver files are sorted -- CI: Re-enable the FreeBSD 15 job -- Clean up imports and `extern crate` usage -- Convert `mode_t` constants to octal -- Remove the `wasm32-wasi` target that has been deleted upstream - -## [0.2.161](https://github.com/rust-lang/libc/compare/0.2.160...0.2.161) - 2024-10-17 - -### Fixed - -- OpenBSD: fix `FNM_PATHNAME` and `FNM_NOESCAPE` values - -## [0.2.160](https://github.com/rust-lang/libc/compare/0.2.159...0.2.160) - 2024-10-17 - -### Added - -- Android: add `PR_GET_NAME` and `PR_SET_NAME` -- Apple: add `F_TRANSFEREXTENTS` -- Apple: add `mach_error_string` -- Apple: add additional `pthread` APIs -- Apple: add the `LOCAL_PEERTOKEN` socket option -- BSD: add `RTF_*`, `RTA_*`, `RTAX_*`, and `RTM_*` definitions -- Emscripten: add `AT_EACCESS` -- Emscripten: add `getgrgid`, `getgrnam`, `getgrnam_r` and `getgrgid_r` -- Emscripten: add `getpwnam_r` and `getpwuid_r` -- FreeBSD: add `POLLRDHUP` -- Haiku: add `arc4random` -- Illumos: add `ptsname_r` -- Linux: add `fanotify` interfaces -- Linux: add `tcp_info` -- Linux: add additional AF_PACKET options -- Linux: make Elf constants always available -- Musl x86: add `iopl` and `ioperm` -- Musl: add `posix_spawn` chdir functions -- Musl: add `utmpx.h` constants -- NetBSD: add `sysctlnametomib`, `CLOCK_THREAD_CPUTIME_ID` and `CLOCK_PROCESS_CPUTIME_ID` -- Nuttx: initial support -- RTEMS: add `getentropy` -- RTEMS: initial support -- Solarish: add `POLLRDHUP`, `POSIX_FADV_*`, `O_RSYNC`, and `posix_fallocate` -- Unix: add `fnmatch.h` -- VxWorks: add riscv64 support -- VxWorks: update constants related to the scheduler - -### Changed - -- Redox: change `ino_t` to be `c_ulonglong` - -### Fixed - -- ESP-IDF: fix mismatched constants and structs -- FreeBSD: fix `struct stat` on FreeBSD 12+ - -### Other - -- CI: Fix CI for FreeBSD 15 -- Docs: link to `windows-sys` - -## [0.2.159](https://github.com/rust-lang/libc/compare/0.2.158...0.2.159) - 2024-09-24 - -### Added - -- Android: add more `AT_*` constants in -- Apple: add missing `NOTE_*` constants in -- Hermit: add missing error numbers in -- Hurd: add `__timeval` for 64-bit support in -- Linux: add `epoll_pwait2` in -- Linux: add `mq_notify` in -- Linux: add missing `NFT_CT_*` constants in -- Linux: add the `fchmodat2` syscall in -- Linux: add the `mseal` syscall in -- OpenBSD: add `sendmmsg` and `recvmmsg` in -- Unix: add `IN6ADDR_ANY_INIT` and `IN6ADDR_LOOPBACK_INIT` in -- VxWorks: add `S_ISVTX` in -- VxWorks: add `vxCpuLib` and `taskLib` functions -- WASIp2: add definitions for `std::net` support in - -### Fixed - -- Correctly handle version checks when `clippy-driver` is used - -### Changed - -- EspIdf: change signal constants to c_int in -- HorizonOS: update network definitions in -- Linux: combine `ioctl` APIs in -- WASI: enable CI testing in -- WASIp2: enable CI testing in - -## [0.2.158](https://github.com/rust-lang/libc/compare/0.2.157...0.2.158) - 2024-08-19 - -### Other -- WASI: fix missing `Iterator` with `rustc-dep-of-std` in - -## [0.2.157](https://github.com/rust-lang/libc/compare/0.2.156...0.2.157) - 2024-08-17 - -### Added - -- Apple: add `_NSGetArgv`, `_NSGetArgc` and `_NSGetProgname` in -- Build: add `RUSTC_WRAPPER` support in -- FreeBSD: add `execvpe` support from 14.1 release in -- Fuchsia: add `SO_BINDTOIFINDEX` -- Linux: add `klogctl` in -- MacOS: add `fcntl` OFD commands in -- NetBSD: add `_lwp_park` in -- Solaris: add missing networking support in -- Unix: add `pthread_equal` in -- WASI: add `select`, `FD_SET`, `FD_ZERO`, `FD_ISSET ` in - -### Fixed -- TEEOS: fix octal notation for `O_*` constants in - -### Changed -- FreeBSD: always use freebsd12 when `rustc_dep_of_std` is set in - -## [0.2.156](https://github.com/rust-lang/libc/compare/v0.2.155...v0.2.156) - 2024-08-15 - -### Added -- Apple: add `F_ALLOCATEPERSIST` in -- Apple: add `os_sync_wait_on_address` and related definitions in -- BSD: generalise `IPV6_DONTFRAG` to all BSD targets in -- FreeBSD/DragonFly: add `IP_RECVTTL`/`IPV6_RECVHOPLIMIT` in -- Hurd: add `XATTR_CREATE`, `XATTR_REPLACE` in -- Linux GNU: `confstr` API and `_CS_*` in -- Linux musl: add `preadv2` and `pwritev2` (1.2.5 min.) in -- VxWorks: add the constant `SOMAXCONN` in -- VxWorks: add a few errnoLib related constants in - -### Fixed -- Solaris/illumos: Change `ifa_flags` type to u64 in -- QNX 7.0: Disable `libregex` in - -### Changed -- QNX NTO: update platform support in -- `addr_of!(EXTERN_STATIC)` is now considered safe in - -### Removed -- Apple: remove `rmx_state` in - -### Other -- Update or remove CI tests that have been failing diff --git a/vendor/libc/CONTRIBUTING.md b/vendor/libc/CONTRIBUTING.md deleted file mode 100644 index 0cdfaeadf90593..00000000000000 --- a/vendor/libc/CONTRIBUTING.md +++ /dev/null @@ -1,126 +0,0 @@ -# Contributing to `libc` - -Welcome! If you are reading this document, it means you are interested in -contributing to the `libc` crate. - -## v1.0 Roadmap - -`libc` has two active branches: `main` and `libc-0.2`. `main` is for active -development of the upcoming v1.0 release, and should be the target of all pull -requests. `libc-0.2` is for updates to the currently released version. - -If a pull request to `main` is a good candidate for inclusion in an `0.2.x` -release, include `@rustbot label stable-nominated` in a comment to propose this. -Good candidates will usually meet the following: - -1. The included changes are non-breaking. -2. The change applies cleanly to both branches. -3. There is a usecase that justifies inclusion in a stable release (all - additions should always have a usecase, hopefully). - -Once a `stable-nominated` PR targeting `main` has merged, it can be cherry -picked to the `libc-0.2` branch. A maintainer will likely do these cherry picks -in a batch. - -Alternatively, you can start this process yourself by creating a new branch -based on `libc-0.2` and running `git cherry-pick -xe commit-sha-on-main` -(`git -cherry-pick -xe start-sha^..end-sha` if a range of commits is needed). -`git` will automatically add the "cherry picked from commit" note, but try to -add a backport note so the original PR gets crosslinked: - -``` -# ... original commit message ... - -(backport ) # add manually -(cherry picked from commit 104b6a4ae31c726814c36318dc718470cc96e167) # added by git -``` - -Once the cherry-pick is complete, open a PR targeting `libc-0.2`. - -See the [tracking issue](https://github.com/rust-lang/libc/issues/3248) for -details. - -## Adding an API - -Want to use an API which currently isn't bound in `libc`? It's quite easy to add -one! - -The internal structure of this crate is designed to minimize the number of -`#[cfg]` attributes in order to easily be able to add new items which apply to -all platforms in the future. As a result, the crate is organized hierarchically -based on platform. Each module has a number of `#[cfg]`'d children, but only one -is ever actually compiled. Each module then reexports all the contents of its -children. - -This means that for each platform that libc supports, the path from a leaf -module to the root will contain all bindings for the platform in question. -Consequently, this indicates where an API should be added! Adding an API at a -particular level in the hierarchy means that it is supported on all the child -platforms of that level. For example, when adding a Unix API it should be added -to `src/unix/mod.rs`, but when adding a Linux-only API it should be added to -`src/unix/linux_like/linux/mod.rs`. - -If you're not 100% sure at what level of the hierarchy an API should be added -at, fear not! This crate has CI support which tests any binding against all -platforms supported, so you'll see failures if an API is added at the wrong -level or has different signatures across platforms. - -New symbol(s) (i.e. functions, constants etc.) should also be added to the -symbols list(s) found in the `libc-test/semver` directory. These lists keep -track of what symbols are public in the libc crate and ensures they remain -available between changes to the crate. If the new symbol(s) are available on -all supported Unixes it should be added to `unix.txt` list1, -otherwise they should be added to the OS specific list(s). - -With that in mind, the steps for adding a new API are: - -1. Determine where in the module hierarchy your API should be added. -2. Add the API, including adding new symbol(s) to the semver lists. -3. Send a PR to this repo. -4. Wait for CI to pass, fixing errors. -5. Wait for a merge! - -1: Note that this list has nothing to do with any Unix or Posix -standard, it's just a list shared among all OSs that declare `#[cfg(unix)]`. - -## Test before you commit - -We have two automated tests running on -[GitHub Actions](https://github.com/rust-lang/libc/actions): - -1. `libc-test` - - `cd libc-test && cargo test` - - Use the `skip_*()` functions in `build.rs` if you really need a workaround. -2. Style checker - - [`./ci/style.sh`](https://github.com/rust-lang/libc/blob/main/ci/style.sh) - -## Breaking change policy - -Sometimes an upstream adds a breaking change to their API e.g. removing outdated -items, changing the type signature, etc. And we probably should follow that -change to build the `libc` crate successfully. It's annoying to do the -equivalent of semver-major versioning for each such change. Instead, we mark the -item as deprecated and do the actual change after a certain period. The steps -are: - -1. Add `#[deprecated(since = "", note="")]` attribute to the item. - - The `since` field should have a next version of `libc` (e.g., if the current - version is `0.2.1`, it should be `0.2.2`). - - The `note` field should have a reason to deprecate and a tracking issue to - call for comments (e.g., "We consider removing this as the upstream removed - it. If you're using it, please comment on #XXX"). -2. If we don't see any concerns for a while, do the change actually. - -## Supported target policy - -When Rust removes a support for a target, the libc crate also may remove the -support at any time. - -## Releasing your change to crates.io - -This repository uses [release-plz] to handle releases. Once your pull request -has been merged, a maintainer just needs to verify the generated changelog, then -merge the bot's release PR. This will automatically publish to crates.io! - -[release-plz]: https://github.com/MarcoIeni/release-plz diff --git a/vendor/libc/Cargo.lock b/vendor/libc/Cargo.lock deleted file mode 100644 index 5b7b58c2cd6076..00000000000000 --- a/vendor/libc/Cargo.lock +++ /dev/null @@ -1,16 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "libc" -version = "0.2.177" -dependencies = [ - "rustc-std-workspace-core", -] - -[[package]] -name = "rustc-std-workspace-core" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa9c45b374136f52f2d6311062c7146bff20fec063c3f5d46a410bd937746955" diff --git a/vendor/libc/Cargo.toml b/vendor/libc/Cargo.toml deleted file mode 100644 index d6c80a49e03bc8..00000000000000 --- a/vendor/libc/Cargo.toml +++ /dev/null @@ -1,201 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.63" -name = "libc" -version = "0.2.177" -authors = ["The Rust Project Developers"] -build = "build.rs" -exclude = [ - "/ci/*", - "/.github/*", - "/.cirrus.yml", - "/triagebot.toml", -] -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = "Raw FFI bindings to platform libraries like libc." -readme = "README.md" -keywords = [ - "libc", - "ffi", - "bindings", - "operating", - "system", -] -categories = [ - "external-ffi-bindings", - "no-std", - "os", -] -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/libc" - -[package.metadata.docs.rs] -features = ["extra_traits"] -default-target = "x86_64-unknown-linux-gnu" -targets = [ - "aarch64-apple-darwin", - "aarch64-apple-ios", - "aarch64-linux-android", - "aarch64-pc-windows-msvc", - "aarch64-unknown-freebsd", - "aarch64-unknown-fuchsia", - "aarch64-unknown-hermit", - "aarch64-unknown-linux-gnu", - "aarch64-unknown-linux-musl", - "aarch64-unknown-netbsd", - "aarch64-unknown-openbsd", - "aarch64-wrs-vxworks", - "arm-linux-androideabi", - "arm-unknown-linux-gnueabi", - "arm-unknown-linux-gnueabihf", - "arm-unknown-linux-musleabi", - "arm-unknown-linux-musleabihf", - "armebv7r-none-eabi", - "armebv7r-none-eabihf", - "armv5te-unknown-linux-gnueabi", - "armv5te-unknown-linux-musleabi", - "armv7-linux-androideabi", - "armv7-unknown-linux-gnueabihf", - "armv7-unknown-linux-musleabihf", - "armv7-wrs-vxworks-eabihf", - "armv7r-none-eabi", - "armv7r-none-eabihf", - "i586-unknown-linux-gnu", - "i586-unknown-linux-musl", - "i686-linux-android", - "i686-pc-windows-gnu", - "i686-pc-windows-msvc", - "i686-pc-windows-msvc", - "i686-unknown-freebsd", - "i686-unknown-haiku", - "i686-unknown-linux-gnu", - "i686-unknown-linux-musl", - "i686-unknown-netbsd", - "i686-unknown-openbsd", - "i686-wrs-vxworks", - "mips-unknown-linux-gnu", - "mips-unknown-linux-musl", - "mips64-unknown-linux-gnuabi64", - "mips64-unknown-linux-muslabi64", - "mips64el-unknown-linux-gnuabi64", - "mips64el-unknown-linux-muslabi64", - "mipsel-sony-psp", - "mipsel-unknown-linux-gnu", - "mipsel-unknown-linux-musl", - "nvptx64-nvidia-cuda", - "powerpc-unknown-linux-gnu", - "powerpc-unknown-linux-gnuspe", - "powerpc-unknown-netbsd", - "powerpc-wrs-vxworks", - "powerpc-wrs-vxworks-spe", - "powerpc64-ibm-aix", - "powerpc64-unknown-freebsd", - "powerpc64-unknown-linux-gnu", - "powerpc64-wrs-vxworks", - "powerpc64le-unknown-linux-gnu", - "powerpc64le-unknown-linux-musl", - "riscv32gc-unknown-linux-gnu", - "riscv32i-unknown-none-elf", - "riscv32imac-unknown-none-elf", - "riscv32imc-unknown-none-elf", - "riscv32-wrs-vxworks", - "riscv64gc-unknown-freebsd", - "riscv64gc-unknown-hermit", - "riscv64gc-unknown-linux-gnu", - "riscv64gc-unknown-linux-musl", - "riscv64gc-unknown-none-elf", - "riscv64imac-unknown-none-elf", - "riscv64-wrs-vxworks", - "s390x-unknown-linux-gnu", - "s390x-unknown-linux-musl", - "sparc-unknown-linux-gnu", - "sparc64-unknown-linux-gnu", - "sparc64-unknown-netbsd", - "sparcv9-sun-solaris", - "thumbv6m-none-eabi", - "thumbv7em-none-eabi", - "thumbv7em-none-eabihf", - "thumbv7m-none-eabi", - "thumbv7neon-linux-androideabi", - "thumbv7neon-unknown-linux-gnueabihf", - "wasm32-unknown-emscripten", - "wasm32-unknown-unknown", - "x86_64-apple-darwin", - "x86_64-apple-ios", - "x86_64-fortanix-unknown-sgx", - "x86_64-linux-android", - "x86_64-pc-solaris", - "x86_64-pc-windows-gnu", - "x86_64-pc-windows-msvc", - "x86_64-unknown-dragonfly", - "x86_64-unknown-freebsd", - "x86_64-unknown-fuchsia", - "x86_64-unknown-haiku", - "x86_64-unknown-hermit", - "x86_64-unknown-illumos", - "x86_64-unknown-l4re-uclibc", - "x86_64-unknown-linux-gnu", - "x86_64-unknown-linux-gnux32", - "x86_64-unknown-linux-musl", - "x86_64-unknown-netbsd", - "x86_64-unknown-openbsd", - "x86_64-unknown-redox", - "x86_64-wrs-vxworks", -] -cargo-args = ["-Zbuild-std=core"] - -[features] -align = [] -const-extern-fn = [] -default = ["std"] -extra_traits = [] -rustc-dep-of-std = [ - "align", - "rustc-std-workspace-core", -] -std = [] -use_std = ["std"] - -[lib] -name = "libc" -path = "src/lib.rs" - -[[test]] -name = "const_fn" -path = "tests/const_fn.rs" - -[dependencies.rustc-std-workspace-core] -version = "1.0.1" -optional = true - -[lints.clippy] -expl_impl_clone_on_copy = "allow" -explicit_iter_loop = "warn" -identity_op = "allow" -manual_assert = "warn" -map_unwrap_or = "warn" -missing_safety_doc = "allow" -non_minimal_cfg = "allow" -ptr_as_ptr = "warn" -uninlined_format_args = "allow" -unnecessary_cast = "allow" -unnecessary_semicolon = "warn" -used_underscore_binding = "allow" - -[lints.rust] -unused_qualifications = "allow" diff --git a/vendor/libc/LICENSE-APACHE b/vendor/libc/LICENSE-APACHE deleted file mode 100644 index 1b5ec8b78e237b..00000000000000 --- a/vendor/libc/LICENSE-APACHE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS diff --git a/vendor/libc/LICENSE-MIT b/vendor/libc/LICENSE-MIT deleted file mode 100644 index 78061811c33c81..00000000000000 --- a/vendor/libc/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014-2020 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/libc/README.md b/vendor/libc/README.md deleted file mode 100644 index c616d8b29f52b2..00000000000000 --- a/vendor/libc/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# libc - Raw FFI bindings to platforms' system libraries - -[![GHA Status]][GitHub Actions] [![Cirrus CI Status]][Cirrus CI] [![Latest Version]][crates.io] [![Documentation]][docs.rs] ![License] - -`libc` provides all of the definitions necessary to easily interoperate with C -code (or "C-like" code) on each of the platforms that Rust supports. This -includes type definitions (e.g. `c_int`), constants (e.g. `EINVAL`) as well as -function headers (e.g. `malloc`). - -This crate exports all underlying platform types, functions, and constants under -the crate root, so all items are accessible as `libc::foo`. The types and values -of all the exported APIs match the platform that libc is compiled for. - -Windows API bindings are not included in this crate. If you are looking for -WinAPI bindings, consider using crates like [windows-sys]. - -More detailed information about the design of this library can be found in its -[associated RFC][rfc]. - -[rfc]: https://github.com/rust-lang/rfcs/blob/HEAD/text/1291-promote-libc.md -[windows-sys]: https://docs.rs/windows-sys - -## v1.0 Roadmap - -Currently, `libc` has two active branches: `main` for the upcoming v1.0 release, -and `libc-0.2` for the currently published version. By default all pull requests -should target `main`; once reviewed, they can be cherry picked to the `libc-0.2` -branch if needed. - -We will stop making new v0.2 releases once v1.0 is released. - -See the section in [CONTRIBUTING.md](CONTRIBUTING.md#v10-roadmap) for more -details. - -## Usage - -Add the following to your `Cargo.toml`: - -```toml -[dependencies] -libc = "0.2" -``` - -## Features - -* `std`: by default `libc` links to the standard library. Disable this feature - to remove this dependency and be able to use `libc` in `#![no_std]` crates. - -* `extra_traits`: all `struct`s implemented in `libc` are `Copy` and `Clone`. - This feature derives `Debug`, `Eq`, `Hash`, and `PartialEq`. - -The following features are deprecated: - -* `use_std`: this is equivalent to `std` -* `const-extern-fn`: this is now enabled by default -* `align`: this is now enabled by default - -## Rust version support - -The minimum supported Rust toolchain version is currently **Rust 1.63**. - -Increases to the MSRV are allowed to change without a major (i.e. semver- -breaking) release in order to avoid a ripple effect in the ecosystem. A policy -for when this may change is a work in progress. - -`libc` may continue to compile with Rust versions older than the current MSRV -but this is not guaranteed. - -## Platform support - -You can see the platform(target)-specific docs on [docs.rs], select a platform -you want to see. - -See [`ci/verify-build.sh`](https://github.com/rust-lang/libc/blob/HEAD/ci/verify-build.sh) for -the platforms on which `libc` is guaranteed to build for each Rust toolchain. -The test-matrix at [GitHub Actions] and [Cirrus CI] show the platforms in which -`libc` tests are run. - -

- -## License - -This project is licensed under either of - -* [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) - ([LICENSE-APACHE](https://github.com/rust-lang/libc/blob/HEAD/LICENSE-APACHE)) - -* [MIT License](https://opensource.org/licenses/MIT) - ([LICENSE-MIT](https://github.com/rust-lang/libc/blob/HEAD/LICENSE-MIT)) - -at your option. - -## Contributing - -We welcome all people who want to contribute. Please see the -[contributing instructions] for more information. - -[contributing instructions]: https://github.com/rust-lang/libc/blob/HEAD/CONTRIBUTING.md - -Contributions in any form (issues, pull requests, etc.) to this project must -adhere to Rust's [Code of Conduct]. - -[Code of Conduct]: https://www.rust-lang.org/policies/code-of-conduct - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in `libc` by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. - -[GitHub Actions]: https://github.com/rust-lang/libc/actions -[GHA Status]: https://github.com/rust-lang/libc/workflows/CI/badge.svg -[Cirrus CI]: https://cirrus-ci.com/github/rust-lang/libc -[Cirrus CI Status]: https://api.cirrus-ci.com/github/rust-lang/libc.svg -[crates.io]: https://crates.io/crates/libc -[Latest Version]: https://img.shields.io/crates/v/libc.svg -[Documentation]: https://docs.rs/libc/badge.svg -[docs.rs]: https://docs.rs/libc -[License]: https://img.shields.io/crates/l/libc.svg diff --git a/vendor/libc/build.rs b/vendor/libc/build.rs deleted file mode 100644 index 802ea7a37def04..00000000000000 --- a/vendor/libc/build.rs +++ /dev/null @@ -1,298 +0,0 @@ -use std::process::{Command, Output}; -use std::{env, str}; - -// List of cfgs this build script is allowed to set. The list is needed to support check-cfg, as we -// need to know all the possible cfgs that this script will set. If you need to set another cfg -// make sure to add it to this list as well. -const ALLOWED_CFGS: &[&str] = &[ - "emscripten_old_stat_abi", - "espidf_time32", - "freebsd10", - "freebsd11", - "freebsd12", - "freebsd13", - "freebsd14", - "freebsd15", - // Corresponds to `_FILE_OFFSET_BITS=64` in glibc - "gnu_file_offset_bits64", - // Corresponds to `_TIME_BITS=64` in glibc - "gnu_time_bits64", - "libc_deny_warnings", - "libc_thread_local", - // Corresponds to `__USE_TIME_BITS64` in UAPI - "linux_time_bits64", - "musl_v1_2_3", -]; - -// Extra values to allow for check-cfg. -const CHECK_CFG_EXTRA: &[(&str, &[&str])] = &[ - ( - "target_os", - &[ - "switch", "aix", "ohos", "hurd", "rtems", "visionos", "nuttx", "cygwin", - ], - ), - ( - "target_env", - &["illumos", "wasi", "aix", "ohos", "nto71_iosock", "nto80"], - ), - ( - "target_arch", - &["loongarch64", "mips32r6", "mips64r6", "csky"], - ), -]; - -fn main() { - // Avoid unnecessary re-building. - println!("cargo:rerun-if-changed=build.rs"); - - let (rustc_minor_ver, _is_nightly) = rustc_minor_nightly(); - let rustc_dep_of_std = env::var("CARGO_FEATURE_RUSTC_DEP_OF_STD").is_ok(); - let libc_ci = env::var("LIBC_CI").is_ok(); - let target_env = env::var("CARGO_CFG_TARGET_ENV").unwrap_or_default(); - let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap_or_default(); - let target_ptr_width = env::var("CARGO_CFG_TARGET_POINTER_WIDTH").unwrap_or_default(); - let target_arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap_or_default(); - - // The ABI of libc used by std is backward compatible with FreeBSD 12. - // The ABI of libc from crates.io is backward compatible with FreeBSD 12. - // - // On CI, we detect the actual FreeBSD version and match its ABI exactly, - // running tests to ensure that the ABI is correct. - println!("cargo:rerun-if-env-changed=RUST_LIBC_UNSTABLE_FREEBSD_VERSION"); - // Allow overriding the default version for testing - let which_freebsd = if let Ok(version) = env::var("RUST_LIBC_UNSTABLE_FREEBSD_VERSION") { - let vers = version.parse().unwrap(); - println!("cargo:warning=setting FreeBSD version to {vers}"); - vers - } else if libc_ci { - which_freebsd().unwrap_or(12) - } else { - 12 - }; - - match which_freebsd { - x if x < 10 => panic!("FreeBSD older than 10 is not supported"), - 10 => set_cfg("freebsd10"), - 11 => set_cfg("freebsd11"), - 12 => set_cfg("freebsd12"), - 13 => set_cfg("freebsd13"), - 14 => set_cfg("freebsd14"), - _ => set_cfg("freebsd15"), - } - - match emcc_version_code() { - Some(v) if (v < 30142) => set_cfg("emscripten_old_stat_abi"), - // Non-Emscripten or version >= 3.1.42. - _ => (), - } - - let musl_v1_2_3 = env::var("RUST_LIBC_UNSTABLE_MUSL_V1_2_3").is_ok(); - println!("cargo:rerun-if-env-changed=RUST_LIBC_UNSTABLE_MUSL_V1_2_3"); - // loongarch64 and ohos have already updated - if musl_v1_2_3 || target_arch == "loongarch64" || target_env == "ohos" { - // FIXME(musl): enable time64 api as well - set_cfg("musl_v1_2_3"); - } - let linux_time_bits64 = env::var("RUST_LIBC_UNSTABLE_LINUX_TIME_BITS64").is_ok(); - println!("cargo:rerun-if-env-changed=RUST_LIBC_UNSTABLE_LINUX_TIME_BITS64"); - if linux_time_bits64 { - set_cfg("linux_time_bits64"); - } - println!("cargo:rerun-if-env-changed=RUST_LIBC_UNSTABLE_GNU_FILE_OFFSET_BITS"); - println!("cargo:rerun-if-env-changed=RUST_LIBC_UNSTABLE_GNU_TIME_BITS"); - if target_env == "gnu" - && target_os == "linux" - && target_ptr_width == "32" - && target_arch != "riscv32" - && target_arch != "x86_64" - { - let defaultbits = "32".to_string(); - let (timebits, filebits) = match ( - env::var("RUST_LIBC_UNSTABLE_GNU_TIME_BITS"), - env::var("RUST_LIBC_UNSTABLE_GNU_FILE_OFFSET_BITS"), - ) { - (Ok(_), Ok(_)) => panic!("Do not set both RUST_LIBC_UNSTABLE_GNU_TIME_BITS and RUST_LIBC_UNSTABLE_GNU_FILE_OFFSET_BITS"), - (Err(_), Err(_)) => (defaultbits.clone(), defaultbits.clone()), - (Ok(tb), Err(_)) if tb == "64" => (tb.clone(), tb.clone()), - (Ok(tb), Err(_)) if tb == "32" => (tb, defaultbits.clone()), - (Ok(_), Err(_)) => panic!("Invalid value for RUST_LIBC_UNSTABLE_GNU_TIME_BITS, must be 32 or 64"), - (Err(_), Ok(fb)) if fb == "32" || fb == "64" => (defaultbits.clone(), fb), - (Err(_), Ok(_)) => panic!("Invalid value for RUST_LIBC_UNSTABLE_GNU_FILE_OFFSET_BITS, must be 32 or 64"), - }; - let valid_bits = ["32", "64"]; - assert!( - valid_bits.contains(&filebits.as_str()) && valid_bits.contains(&timebits.as_str()), - "Invalid value for RUST_LIBC_UNSTABLE_GNU_TIME_BITS or RUST_LIBC_UNSTABLE_GNU_FILE_OFFSET_BITS, must be 32, 64 or unset" - ); - assert!( - !(filebits == "32" && timebits == "64"), - "RUST_LIBC_UNSTABLE_GNU_FILE_OFFSET_BITS must be 64 or unset if RUST_LIBC_UNSTABLE_GNU_TIME_BITS is 64" - ); - if timebits == "64" { - set_cfg("linux_time_bits64"); - set_cfg("gnu_time_bits64"); - } - if filebits == "64" { - set_cfg("gnu_file_offset_bits64"); - } - } - - // On CI: deny all warnings - if libc_ci { - set_cfg("libc_deny_warnings"); - } - - // #[thread_local] is currently unstable - if rustc_dep_of_std { - set_cfg("libc_thread_local"); - } - - // Since Rust 1.80, configuration that isn't recognized by default needs to be provided to - // avoid warnings. - if rustc_minor_ver >= 80 { - for cfg in ALLOWED_CFGS { - if rustc_minor_ver >= 75 { - println!("cargo:rustc-check-cfg=cfg({cfg})"); - } else { - println!("cargo:rustc-check-cfg=values({cfg})"); - } - } - for &(name, values) in CHECK_CFG_EXTRA { - let values = values.join("\",\""); - if rustc_minor_ver >= 75 { - println!("cargo:rustc-check-cfg=cfg({name},values(\"{values}\"))"); - } else { - println!("cargo:rustc-check-cfg=values({name},\"{values}\")"); - } - } - } -} - -/// Run `rustc --version` and capture the output, adjusting arguments as needed if `clippy-driver` -/// is used instead. -fn rustc_version_cmd(is_clippy_driver: bool) -> Output { - let rustc = env::var_os("RUSTC").expect("Failed to get rustc version: missing RUSTC env"); - - let mut cmd = match env::var_os("RUSTC_WRAPPER") { - Some(ref wrapper) if wrapper.is_empty() => Command::new(rustc), - Some(wrapper) => { - let mut cmd = Command::new(wrapper); - cmd.arg(rustc); - if is_clippy_driver { - cmd.arg("--rustc"); - } - - cmd - } - None => Command::new(rustc), - }; - - cmd.arg("--version"); - - let output = cmd.output().expect("Failed to get rustc version"); - - assert!( - output.status.success(), - "failed to run rustc: {}", - String::from_utf8_lossy(output.stderr.as_slice()) - ); - - output -} - -/// Return the minor version of `rustc`, as well as a bool indicating whether or not the version -/// is a nightly. -fn rustc_minor_nightly() -> (u32, bool) { - macro_rules! otry { - ($e:expr) => { - match $e { - Some(e) => e, - None => panic!("Failed to get rustc version"), - } - }; - } - - let mut output = rustc_version_cmd(false); - - if otry!(str::from_utf8(&output.stdout).ok()).starts_with("clippy") { - output = rustc_version_cmd(true); - } - - let version = otry!(str::from_utf8(&output.stdout).ok()); - - let mut pieces = version.split('.'); - - assert_eq!( - pieces.next(), - Some("rustc 1"), - "Failed to get rustc version" - ); - - let minor = pieces.next(); - - // If `rustc` was built from a tarball, its version string - // will have neither a git hash nor a commit date - // (e.g. "rustc 1.39.0"). Treat this case as non-nightly, - // since a nightly build should either come from CI - // or a git checkout - let nightly_raw = otry!(pieces.next()).split('-').nth(1); - let nightly = nightly_raw.map_or(false, |raw| { - raw.starts_with("dev") || raw.starts_with("nightly") - }); - let minor = otry!(otry!(minor).parse().ok()); - - (minor, nightly) -} - -fn which_freebsd() -> Option { - let output = Command::new("freebsd-version").output().ok()?; - if !output.status.success() { - return None; - } - - let stdout = String::from_utf8(output.stdout).ok()?; - - match &stdout { - s if s.starts_with("10") => Some(10), - s if s.starts_with("11") => Some(11), - s if s.starts_with("12") => Some(12), - s if s.starts_with("13") => Some(13), - s if s.starts_with("14") => Some(14), - s if s.starts_with("15") => Some(15), - _ => None, - } -} - -fn emcc_version_code() -> Option { - let emcc = if cfg!(target_os = "windows") { - "emcc.bat" - } else { - "emcc" - }; - - let output = Command::new(emcc).arg("-dumpversion").output().ok()?; - if !output.status.success() { - return None; - } - - let version = String::from_utf8(output.stdout).ok()?; - - // Some Emscripten versions come with `-git` attached, so split the - // version string also on the `-` char. - let mut pieces = version.trim().split(['.', '-']); - - let major = pieces.next().and_then(|x| x.parse().ok()).unwrap_or(0); - let minor = pieces.next().and_then(|x| x.parse().ok()).unwrap_or(0); - let patch = pieces.next().and_then(|x| x.parse().ok()).unwrap_or(0); - - Some(major * 10000 + minor * 100 + patch) -} - -fn set_cfg(cfg: &str) { - assert!( - ALLOWED_CFGS.contains(&cfg), - "trying to set cfg {cfg}, but it is not in ALLOWED_CFGS", - ); - println!("cargo:rustc-cfg={cfg}"); -} diff --git a/vendor/libc/cherry-pick-stable.sh b/vendor/libc/cherry-pick-stable.sh deleted file mode 100755 index c338be4f2ab222..00000000000000 --- a/vendor/libc/cherry-pick-stable.sh +++ /dev/null @@ -1,150 +0,0 @@ -#!/bin/bash - -set -e - -# Parse arguments -DRY_RUN=false -while [[ $# -gt 0 ]]; do - case $1 in - --dry-run|-d) - DRY_RUN=true - shift - ;; - --help|-h) - echo "Usage: $0 [OPTIONS]" - echo "" - echo "Cherry-pick commits from PRs labeled 'stable-nominated' to current branch" - echo "" - echo "Options:" - echo " -d, --dry-run Show what would be done without making changes" - echo " -h, --help Show this help message" - exit 0 - ;; - *) - echo "Unknown option: $1" - echo "Use --help for usage information" - exit 1 - ;; - esac -done - -if [ "$DRY_RUN" = true ]; then - echo "[DRY RUN MODE - No changes will be made]" - echo "" -fi - -current_branch=$(git branch --show-current) -echo "Current branch: $current_branch" -echo "Fetching PRs with 'stable-nominated' label..." -echo "" - -# Get PRs with stable-nominated label that are merged -# Sort by merge date (oldest first) to preserve merge order and avoid conflicts -# Format: PR number, title, merge commit SHA -prs=$(gh pr list --state merged --label stable-nominated --json number,title,mergeCommit,mergedAt --jq 'sort_by(.mergedAt) | .[] | "\(.number)|\(.title)|\(.mergeCommit.oid)"') - -if [ -z "$prs" ]; then - echo "No PRs found with 'stable-nominated' label." - exit 0 -fi - -# Arrays to track results -declare -a successful -declare -a failed -declare -a skipped - -echo "Found PRs to cherry-pick:" -echo "" - -# Process each PR -while IFS='|' read -r pr_number title commit_sha; do - echo "----------------------------------------" - echo "PR #${pr_number}: ${title}" - echo "Commit: ${commit_sha}" - - # Check if commit already exists in current branch - if git branch --contains "$commit_sha" 2>/dev/null | grep -q "^\*"; then - echo "⏭ Already cherry-picked, skipping" - skipped+=("PR #${pr_number}: ${title}") - echo "" - continue - fi - - # Cherry-pick with -xe flags as specified - if [ "$DRY_RUN" = true ]; then - echo "Would cherry-pick with: git cherry-pick -xe $commit_sha" - echo "Would add backport note: (backport https://github.com/rust-lang/libc/pull/$pr_number)" - successful+=("PR #${pr_number}: ${title} (${commit_sha:0:8})") - else - if git cherry-pick -xe "$commit_sha" 2>&1; then - # Add backport note before the cherry-pick note as per CONTRIBUTING.md - current_msg=$(git log -1 --format=%B) - backport_line="(backport https://github.com/rust-lang/libc/pull/$pr_number)" - - # Insert backport line before "(cherry picked from commit" line - new_msg=$(echo "$current_msg" | sed "/^(cherry picked from commit/i\\ -$backport_line\\ -") - - # Amend the commit with the new message - git commit --amend -m "$new_msg" - - echo "✓ Successfully cherry-picked with backport note" - successful+=("PR #${pr_number}: ${title} (${commit_sha:0:8})") - else - echo "✗ Failed to cherry-pick" - failed+=("PR #${pr_number}: ${title} (${commit_sha:0:8})") - # Abort the failed cherry-pick - git cherry-pick --abort 2>/dev/null || true - fi - fi - echo "" -done <<< "$prs" - -# Print summary -echo "========================================" -if [ "$DRY_RUN" = true ]; then - echo "SUMMARY (DRY RUN)" -else - echo "SUMMARY" -fi -echo "========================================" -echo "" - -if [ ${#successful[@]} -gt 0 ]; then - if [ "$DRY_RUN" = true ]; then - echo "Would cherry-pick (${#successful[@]}):" - else - echo "Successfully cherry-picked (${#successful[@]}):" - fi - for item in "${successful[@]}"; do - echo " ✓ $item" - done - echo "" -fi - -if [ ${#skipped[@]} -gt 0 ]; then - echo "Skipped (${#skipped[@]}):" - for item in "${skipped[@]}"; do - echo " ⏭ $item" - done - echo "" -fi - -if [ ${#failed[@]} -gt 0 ]; then - echo "Failed (${#failed[@]}):" - for item in "${failed[@]}"; do - echo " ✗ $item" - done - echo "" - if [ "$DRY_RUN" = false ]; then - echo "Please resolve conflicts manually and re-run if needed." - fi - exit 1 -fi - -if [ "$DRY_RUN" = true ]; then - echo "Dry run complete! Run without --dry-run to apply changes." -else - echo "All done!" -fi diff --git a/vendor/libc/rustfmt.toml b/vendor/libc/rustfmt.toml deleted file mode 100644 index de0fc5ecc0166e..00000000000000 --- a/vendor/libc/rustfmt.toml +++ /dev/null @@ -1,4 +0,0 @@ -edition = "2021" -error_on_line_overflow = true -group_imports = "StdExternalCrate" -imports_granularity = "Module" diff --git a/vendor/libc/src/fuchsia/aarch64.rs b/vendor/libc/src/fuchsia/aarch64.rs deleted file mode 100644 index 577f0d99cf24d6..00000000000000 --- a/vendor/libc/src/fuchsia/aarch64.rs +++ /dev/null @@ -1,69 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -pub type __u64 = c_ulonglong; -pub type wchar_t = u32; -pub type nlink_t = c_ulong; -pub type blksize_t = c_long; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad0: c_ulong, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - __pad1: c_int, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_uint; 2], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad0: c_ulong, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - __pad1: c_int, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_uint; 2], - } - - pub struct ipc_perm { - pub __ipc_perm_key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - pub __seq: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } -} - -// From https://cs.opensource.google/fuchsia/fuchsia/+/main:zircon/third_party/ulib/musl/include/bits/signal.h;l=20-21;drc=0827b18ab9540c46f8037f407d17ea15a79e9ba7 -pub const MINSIGSTKSZ: size_t = 6144; -pub const SIGSTKSZ: size_t = 12288; diff --git a/vendor/libc/src/fuchsia/mod.rs b/vendor/libc/src/fuchsia/mod.rs deleted file mode 100644 index 31f13b16832d20..00000000000000 --- a/vendor/libc/src/fuchsia/mod.rs +++ /dev/null @@ -1,4322 +0,0 @@ -//! Definitions found commonly among almost all Unix derivatives -//! -//! More functions and definitions can be found in the more specific modules -//! according to the platform in question. - -use crate::prelude::*; - -// PUB_TYPE - -pub type intmax_t = i64; -pub type uintmax_t = u64; - -pub type locale_t = *mut c_void; - -pub type size_t = usize; -pub type ptrdiff_t = isize; -pub type intptr_t = isize; -pub type uintptr_t = usize; -pub type ssize_t = isize; - -pub type pid_t = i32; -pub type uid_t = u32; -pub type gid_t = u32; -pub type in_addr_t = u32; -pub type in_port_t = u16; -pub type sighandler_t = size_t; -pub type cc_t = c_uchar; -pub type sa_family_t = u16; -pub type pthread_key_t = c_uint; -pub type speed_t = c_uint; -pub type tcflag_t = c_uint; -pub type clockid_t = c_int; -pub type key_t = c_int; -pub type id_t = c_uint; -pub type useconds_t = u32; -pub type dev_t = u64; -pub type socklen_t = u32; -pub type pthread_t = c_ulong; -pub type mode_t = u32; -pub type ino64_t = u64; -pub type off64_t = i64; -pub type blkcnt64_t = i64; -pub type rlim64_t = u64; -pub type mqd_t = c_int; -pub type nfds_t = c_ulong; -pub type nl_item = c_int; -pub type idtype_t = c_uint; -pub type loff_t = c_longlong; - -pub type __u8 = c_uchar; -pub type __u16 = c_ushort; -pub type __s16 = c_short; -pub type __u32 = c_uint; -pub type __s32 = c_int; - -pub type Elf32_Half = u16; -pub type Elf32_Word = u32; -pub type Elf32_Off = u32; -pub type Elf32_Addr = u32; - -pub type Elf64_Half = u16; -pub type Elf64_Word = u32; -pub type Elf64_Off = u64; -pub type Elf64_Addr = u64; -pub type Elf64_Xword = u64; - -pub type clock_t = c_long; -pub type time_t = c_long; -pub type suseconds_t = c_long; -pub type ino_t = u64; -pub type off_t = i64; -pub type blkcnt_t = i64; - -pub type shmatt_t = c_ulong; -pub type msgqnum_t = c_ulong; -pub type msglen_t = c_ulong; -pub type fsblkcnt_t = c_ulonglong; -pub type fsfilcnt_t = c_ulonglong; -pub type rlim_t = c_ulonglong; - -// FIXME(fuchsia): why are these uninhabited types? that seems... wrong? -// Presumably these should be `()` or an `extern type` (when that stabilizes). -#[derive(Debug)] -pub enum timezone {} -impl Copy for timezone {} -impl Clone for timezone { - fn clone(&self) -> timezone { - *self - } -} -#[derive(Debug)] -pub enum DIR {} -impl Copy for DIR {} -impl Clone for DIR { - fn clone(&self) -> DIR { - *self - } -} - -#[derive(Debug)] -pub enum fpos64_t {} // FIXME(fuchsia): fill this out with a struct -impl Copy for fpos64_t {} -impl Clone for fpos64_t { - fn clone(&self) -> fpos64_t { - *self - } -} - -// PUB_STRUCT - -s! { - pub struct group { - pub gr_name: *mut c_char, - pub gr_passwd: *mut c_char, - pub gr_gid: crate::gid_t, - pub gr_mem: *mut *mut c_char, - } - - pub struct utimbuf { - pub actime: time_t, - pub modtime: time_t, - } - - pub struct timeval { - pub tv_sec: time_t, - pub tv_usec: suseconds_t, - } - - pub struct timespec { - pub tv_sec: time_t, - pub tv_nsec: c_long, - } - - // FIXME(fuchsia): the rlimit and rusage related functions and types don't exist - // within zircon. Are there reasons for keeping them around? - pub struct rlimit { - pub rlim_cur: rlim_t, - pub rlim_max: rlim_t, - } - - pub struct rusage { - pub ru_utime: timeval, - pub ru_stime: timeval, - pub ru_maxrss: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad1: u32, - pub ru_ixrss: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad2: u32, - pub ru_idrss: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad3: u32, - pub ru_isrss: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad4: u32, - pub ru_minflt: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad5: u32, - pub ru_majflt: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad6: u32, - pub ru_nswap: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad7: u32, - pub ru_inblock: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad8: u32, - pub ru_oublock: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad9: u32, - pub ru_msgsnd: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad10: u32, - pub ru_msgrcv: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad11: u32, - pub ru_nsignals: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad12: u32, - pub ru_nvcsw: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad13: u32, - pub ru_nivcsw: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad14: u32, - } - - pub struct in_addr { - pub s_addr: in_addr_t, - } - - pub struct in6_addr { - pub s6_addr: [u8; 16], - } - - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct ip_mreqn { - pub imr_multiaddr: in_addr, - pub imr_address: in_addr, - pub imr_ifindex: c_int, - } - - pub struct ipv6_mreq { - pub ipv6mr_multiaddr: in6_addr, - pub ipv6mr_interface: c_uint, - } - - pub struct hostent { - pub h_name: *mut c_char, - pub h_aliases: *mut *mut c_char, - pub h_addrtype: c_int, - pub h_length: c_int, - pub h_addr_list: *mut *mut c_char, - } - - pub struct iovec { - pub iov_base: *mut c_void, - pub iov_len: size_t, - } - - pub struct pollfd { - pub fd: c_int, - pub events: c_short, - pub revents: c_short, - } - - pub struct winsize { - pub ws_row: c_ushort, - pub ws_col: c_ushort, - pub ws_xpixel: c_ushort, - pub ws_ypixel: c_ushort, - } - - pub struct linger { - pub l_onoff: c_int, - pub l_linger: c_int, - } - - pub struct sigval { - // Actually a union of an int and a void* - pub sival_ptr: *mut c_void, - } - - // - pub struct itimerval { - pub it_interval: crate::timeval, - pub it_value: crate::timeval, - } - - // - pub struct tms { - pub tms_utime: crate::clock_t, - pub tms_stime: crate::clock_t, - pub tms_cutime: crate::clock_t, - pub tms_cstime: crate::clock_t, - } - - pub struct servent { - pub s_name: *mut c_char, - pub s_aliases: *mut *mut c_char, - pub s_port: c_int, - pub s_proto: *mut c_char, - } - - pub struct protoent { - pub p_name: *mut c_char, - pub p_aliases: *mut *mut c_char, - pub p_proto: c_int, - } - - pub struct aiocb { - pub aio_fildes: c_int, - pub aio_lio_opcode: c_int, - pub aio_reqprio: c_int, - pub aio_buf: *mut c_void, - pub aio_nbytes: size_t, - pub aio_sigevent: crate::sigevent, - __td: *mut c_void, - __lock: [c_int; 2], - __err: c_int, - __ret: ssize_t, - pub aio_offset: off_t, - __next: *mut c_void, - __prev: *mut c_void, - #[cfg(target_pointer_width = "32")] - __dummy4: [c_char; 24], - #[cfg(target_pointer_width = "64")] - __dummy4: [c_char; 16], - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; crate::NCCS], - pub __c_ispeed: crate::speed_t, - pub __c_ospeed: crate::speed_t, - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct ucred { - pub pid: crate::pid_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - } - - pub struct sockaddr { - pub sa_family: sa_family_t, - pub sa_data: [c_char; 14], - } - - pub struct sockaddr_in { - pub sin_family: sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [u8; 8], - } - - pub struct sockaddr_in6 { - pub sin6_family: sa_family_t, - pub sin6_port: crate::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: u32, - } - - pub struct sockaddr_vm { - pub svm_family: sa_family_t, - pub svm_reserved1: c_ushort, - pub svm_port: crate::in_port_t, - pub svm_cid: c_uint, - pub svm_zero: [u8; 4], - } - - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - pub ai_addrlen: socklen_t, - - pub ai_addr: *mut crate::sockaddr, - - pub ai_canonname: *mut c_char, - - pub ai_next: *mut addrinfo, - } - - pub struct sockaddr_ll { - pub sll_family: c_ushort, - pub sll_protocol: c_ushort, - pub sll_ifindex: c_int, - pub sll_hatype: c_ushort, - pub sll_pkttype: c_uchar, - pub sll_halen: c_uchar, - pub sll_addr: [c_uchar; 8], - } - - pub struct fd_set { - fds_bits: [c_ulong; FD_SETSIZE as usize / ULONG_SIZE], - } - - pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - pub tm_gmtoff: c_long, - pub tm_zone: *const c_char, - } - - pub struct sched_param { - pub sched_priority: c_int, - pub sched_ss_low_priority: c_int, - pub sched_ss_repl_period: crate::timespec, - pub sched_ss_init_budget: crate::timespec, - pub sched_ss_max_repl: c_int, - } - - pub struct Dl_info { - pub dli_fname: *const c_char, - pub dli_fbase: *mut c_void, - pub dli_sname: *const c_char, - pub dli_saddr: *mut c_void, - } - - pub struct epoll_event { - pub events: u32, - pub u64: u64, - } - - pub struct lconv { - pub decimal_point: *mut c_char, - pub thousands_sep: *mut c_char, - pub grouping: *mut c_char, - pub int_curr_symbol: *mut c_char, - pub currency_symbol: *mut c_char, - pub mon_decimal_point: *mut c_char, - pub mon_thousands_sep: *mut c_char, - pub mon_grouping: *mut c_char, - pub positive_sign: *mut c_char, - pub negative_sign: *mut c_char, - pub int_frac_digits: c_char, - pub frac_digits: c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub p_sign_posn: c_char, - pub n_sign_posn: c_char, - pub int_p_cs_precedes: c_char, - pub int_p_sep_by_space: c_char, - pub int_n_cs_precedes: c_char, - pub int_n_sep_by_space: c_char, - pub int_p_sign_posn: c_char, - pub int_n_sign_posn: c_char, - } - - pub struct rlimit64 { - pub rlim_cur: rlim64_t, - pub rlim_max: rlim64_t, - } - - pub struct glob_t { - pub gl_pathc: size_t, - pub gl_pathv: *mut *mut c_char, - pub gl_offs: size_t, - pub gl_flags: c_int, - - __unused1: *mut c_void, - __unused2: *mut c_void, - __unused3: *mut c_void, - __unused4: *mut c_void, - __unused5: *mut c_void, - } - - pub struct ifaddrs { - pub ifa_next: *mut ifaddrs, - pub ifa_name: *mut c_char, - pub ifa_flags: c_uint, - pub ifa_addr: *mut crate::sockaddr, - pub ifa_netmask: *mut crate::sockaddr, - pub ifa_ifu: *mut crate::sockaddr, // FIXME(union) This should be a union - pub ifa_data: *mut c_void, - } - - pub struct passwd { - pub pw_name: *mut c_char, - pub pw_passwd: *mut c_char, - pub pw_uid: crate::uid_t, - pub pw_gid: crate::gid_t, - pub pw_gecos: *mut c_char, - pub pw_dir: *mut c_char, - pub pw_shell: *mut c_char, - } - - pub struct spwd { - pub sp_namp: *mut c_char, - pub sp_pwdp: *mut c_char, - pub sp_lstchg: c_long, - pub sp_min: c_long, - pub sp_max: c_long, - pub sp_warn: c_long, - pub sp_inact: c_long, - pub sp_expire: c_long, - pub sp_flag: c_ulong, - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - #[cfg(target_endian = "little")] - pub f_fsid: c_ulong, - #[cfg(all(target_pointer_width = "32", not(target_arch = "x86_64")))] - __f_unused: c_int, - #[cfg(target_endian = "big")] - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct dqblk { - pub dqb_bhardlimit: u64, - pub dqb_bsoftlimit: u64, - pub dqb_curspace: u64, - pub dqb_ihardlimit: u64, - pub dqb_isoftlimit: u64, - pub dqb_curinodes: u64, - pub dqb_btime: u64, - pub dqb_itime: u64, - pub dqb_valid: u32, - } - - pub struct signalfd_siginfo { - pub ssi_signo: u32, - pub ssi_errno: i32, - pub ssi_code: i32, - pub ssi_pid: u32, - pub ssi_uid: u32, - pub ssi_fd: i32, - pub ssi_tid: u32, - pub ssi_band: u32, - pub ssi_overrun: u32, - pub ssi_trapno: u32, - pub ssi_status: i32, - pub ssi_int: i32, - pub ssi_ptr: u64, - pub ssi_utime: u64, - pub ssi_stime: u64, - pub ssi_addr: u64, - pub ssi_addr_lsb: u16, - _pad2: u16, - pub ssi_syscall: i32, - pub ssi_call_addr: u64, - pub ssi_arch: u32, - _pad: [u8; 28], - } - - pub struct itimerspec { - pub it_interval: crate::timespec, - pub it_value: crate::timespec, - } - - pub struct fsid_t { - __val: [c_int; 2], - } - - pub struct cpu_set_t { - #[cfg(all(target_pointer_width = "32", not(target_arch = "x86_64")))] - bits: [u32; 32], - #[cfg(not(all(target_pointer_width = "32", not(target_arch = "x86_64"))))] - bits: [u64; 16], - } - - pub struct if_nameindex { - pub if_index: c_uint, - pub if_name: *mut c_char, - } - - // System V IPC - pub struct msginfo { - pub msgpool: c_int, - pub msgmap: c_int, - pub msgmax: c_int, - pub msgmnb: c_int, - pub msgmni: c_int, - pub msgssz: c_int, - pub msgtql: c_int, - pub msgseg: c_ushort, - } - - pub struct mmsghdr { - pub msg_hdr: crate::msghdr, - pub msg_len: c_uint, - } - - pub struct sembuf { - pub sem_num: c_ushort, - pub sem_op: c_short, - pub sem_flg: c_short, - } - - pub struct input_event { - pub time: crate::timeval, - pub type_: crate::__u16, - pub code: crate::__u16, - pub value: crate::__s32, - } - - pub struct input_id { - pub bustype: crate::__u16, - pub vendor: crate::__u16, - pub product: crate::__u16, - pub version: crate::__u16, - } - - pub struct input_absinfo { - pub value: crate::__s32, - pub minimum: crate::__s32, - pub maximum: crate::__s32, - pub fuzz: crate::__s32, - pub flat: crate::__s32, - pub resolution: crate::__s32, - } - - pub struct input_keymap_entry { - pub flags: crate::__u8, - pub len: crate::__u8, - pub index: crate::__u16, - pub keycode: crate::__u32, - pub scancode: [crate::__u8; 32], - } - - pub struct input_mask { - pub type_: crate::__u32, - pub codes_size: crate::__u32, - pub codes_ptr: crate::__u64, - } - - pub struct ff_replay { - pub length: crate::__u16, - pub delay: crate::__u16, - } - - pub struct ff_trigger { - pub button: crate::__u16, - pub interval: crate::__u16, - } - - pub struct ff_envelope { - pub attack_length: crate::__u16, - pub attack_level: crate::__u16, - pub fade_length: crate::__u16, - pub fade_level: crate::__u16, - } - - pub struct ff_constant_effect { - pub level: crate::__s16, - pub envelope: ff_envelope, - } - - pub struct ff_ramp_effect { - pub start_level: crate::__s16, - pub end_level: crate::__s16, - pub envelope: ff_envelope, - } - - pub struct ff_condition_effect { - pub right_saturation: crate::__u16, - pub left_saturation: crate::__u16, - - pub right_coeff: crate::__s16, - pub left_coeff: crate::__s16, - - pub deadband: crate::__u16, - pub center: crate::__s16, - } - - pub struct ff_periodic_effect { - pub waveform: crate::__u16, - pub period: crate::__u16, - pub magnitude: crate::__s16, - pub offset: crate::__s16, - pub phase: crate::__u16, - - pub envelope: ff_envelope, - - pub custom_len: crate::__u32, - pub custom_data: *mut crate::__s16, - } - - pub struct ff_rumble_effect { - pub strong_magnitude: crate::__u16, - pub weak_magnitude: crate::__u16, - } - - pub struct ff_effect { - pub type_: crate::__u16, - pub id: crate::__s16, - pub direction: crate::__u16, - pub trigger: ff_trigger, - pub replay: ff_replay, - // FIXME(1.0): this is actually a union - #[cfg(target_pointer_width = "64")] - pub u: [u64; 4], - #[cfg(target_pointer_width = "32")] - pub u: [u32; 7], - } - - pub struct dl_phdr_info { - #[cfg(target_pointer_width = "64")] - pub dlpi_addr: Elf64_Addr, - #[cfg(target_pointer_width = "32")] - pub dlpi_addr: Elf32_Addr, - - pub dlpi_name: *const c_char, - - #[cfg(target_pointer_width = "64")] - pub dlpi_phdr: *const Elf64_Phdr, - #[cfg(target_pointer_width = "32")] - pub dlpi_phdr: *const Elf32_Phdr, - - #[cfg(target_pointer_width = "64")] - pub dlpi_phnum: Elf64_Half, - #[cfg(target_pointer_width = "32")] - pub dlpi_phnum: Elf32_Half, - - pub dlpi_adds: c_ulonglong, - pub dlpi_subs: c_ulonglong, - pub dlpi_tls_modid: size_t, - pub dlpi_tls_data: *mut c_void, - } - - pub struct Elf32_Phdr { - pub p_type: Elf32_Word, - pub p_offset: Elf32_Off, - pub p_vaddr: Elf32_Addr, - pub p_paddr: Elf32_Addr, - pub p_filesz: Elf32_Word, - pub p_memsz: Elf32_Word, - pub p_flags: Elf32_Word, - pub p_align: Elf32_Word, - } - - pub struct Elf64_Phdr { - pub p_type: Elf64_Word, - pub p_flags: Elf64_Word, - pub p_offset: Elf64_Off, - pub p_vaddr: Elf64_Addr, - pub p_paddr: Elf64_Addr, - pub p_filesz: Elf64_Xword, - pub p_memsz: Elf64_Xword, - pub p_align: Elf64_Xword, - } - - pub struct statfs64 { - pub f_type: c_ulong, - pub f_bsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_ulong, - pub f_frsize: c_ulong, - pub f_flags: c_ulong, - pub f_spare: [c_ulong; 4], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct pthread_attr_t { - __size: [u64; 7], - } - - pub struct sigset_t { - __val: [c_ulong; 16], - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: c_ulong, - __pad1: c_ulong, - __pad2: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - pub msg_rtime: crate::time_t, - pub msg_ctime: crate::time_t, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __pad1: c_ulong, - __pad2: c_ulong, - } - - pub struct statfs { - pub f_type: c_ulong, - pub f_bsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_ulong, - pub f_frsize: c_ulong, - pub f_flags: c_ulong, - pub f_spare: [c_ulong; 4], - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: c_int, - __pad1: c_int, - pub msg_control: *mut c_void, - pub msg_controllen: crate::socklen_t, - __pad2: crate::socklen_t, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - pub cmsg_len: crate::socklen_t, - pub __pad1: c_int, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct sem_t { - __val: [c_int; 8], - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - pub _pad: [c_int; 29], - _align: [usize; 0], - } - - pub struct termios2 { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; 19], - pub c_ispeed: crate::speed_t, - pub c_ospeed: crate::speed_t, - } - - pub struct in6_pktinfo { - pub ipi6_addr: crate::in6_addr, - pub ipi6_ifindex: c_uint, - } - - #[cfg_attr( - any(target_pointer_width = "32", target_arch = "x86_64"), - repr(align(4)) - )] - #[cfg_attr( - not(any(target_pointer_width = "32", target_arch = "x86_64")), - repr(align(8)) - )] - pub struct pthread_mutexattr_t { - size: [u8; crate::__SIZEOF_PTHREAD_MUTEXATTR_T], - } - - #[cfg_attr(target_pointer_width = "32", repr(align(4)))] - #[cfg_attr(target_pointer_width = "64", repr(align(8)))] - pub struct pthread_rwlockattr_t { - size: [u8; crate::__SIZEOF_PTHREAD_RWLOCKATTR_T], - } - - #[repr(align(4))] - pub struct pthread_condattr_t { - size: [u8; crate::__SIZEOF_PTHREAD_CONDATTR_T], - } -} - -s_no_extra_traits! { - pub struct sysinfo { - pub uptime: c_ulong, - pub loads: [c_ulong; 3], - pub totalram: c_ulong, - pub freeram: c_ulong, - pub sharedram: c_ulong, - pub bufferram: c_ulong, - pub totalswap: c_ulong, - pub freeswap: c_ulong, - pub procs: c_ushort, - pub pad: c_ushort, - pub totalhigh: c_ulong, - pub freehigh: c_ulong, - pub mem_unit: c_uint, - pub __reserved: [c_char; 256], - } - - pub struct sockaddr_un { - pub sun_family: sa_family_t, - pub sun_path: [c_char; 108], - } - - pub struct sockaddr_storage { - pub ss_family: sa_family_t, - __ss_pad2: [u8; 128 - 2 - 8], - __ss_align: size_t, - } - - pub struct utsname { - pub sysname: [c_char; 65], - pub nodename: [c_char; 65], - pub release: [c_char; 65], - pub version: [c_char; 65], - pub machine: [c_char; 65], - pub domainname: [c_char; 65], - } - - pub struct dirent { - pub d_ino: crate::ino_t, - pub d_off: off_t, - pub d_reclen: c_ushort, - pub d_type: c_uchar, - pub d_name: [c_char; 256], - } - - pub struct dirent64 { - pub d_ino: crate::ino64_t, - pub d_off: off64_t, - pub d_reclen: c_ushort, - pub d_type: c_uchar, - pub d_name: [c_char; 256], - } - - // x32 compatibility - // See https://sourceware.org/bugzilla/show_bug.cgi?id=21279 - pub struct mq_attr { - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub mq_flags: i64, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub mq_maxmsg: i64, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub mq_msgsize: i64, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub mq_curmsgs: i64, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pad: [i64; 4], - - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub mq_flags: c_long, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub mq_maxmsg: c_long, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub mq_msgsize: c_long, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub mq_curmsgs: c_long, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pad: [c_long; 4], - } - - pub struct sockaddr_nl { - pub nl_family: crate::sa_family_t, - nl_pad: c_ushort, - pub nl_pid: u32, - pub nl_groups: u32, - } - - pub struct sigevent { - pub sigev_value: crate::sigval, - pub sigev_signo: c_int, - pub sigev_notify: c_int, - pub sigev_notify_function: fn(crate::sigval), - pub sigev_notify_attributes: *mut pthread_attr_t, - pub __pad: [c_char; 56 - 3 * 8], - } - - #[cfg_attr( - all( - target_pointer_width = "32", - any(target_arch = "arm", target_arch = "x86_64") - ), - repr(align(4)) - )] - #[cfg_attr( - any( - target_pointer_width = "64", - not(any(target_arch = "arm", target_arch = "x86_64")) - ), - repr(align(8)) - )] - pub struct pthread_mutex_t { - size: [u8; crate::__SIZEOF_PTHREAD_MUTEX_T], - } - - #[cfg_attr( - all( - target_pointer_width = "32", - any(target_arch = "arm", target_arch = "x86_64") - ), - repr(align(4)) - )] - #[cfg_attr( - any( - target_pointer_width = "64", - not(any(target_arch = "arm", target_arch = "x86_64")) - ), - repr(align(8)) - )] - pub struct pthread_rwlock_t { - size: [u8; crate::__SIZEOF_PTHREAD_RWLOCK_T], - } - - #[cfg_attr(target_pointer_width = "32", repr(align(4)))] - #[cfg_attr(target_pointer_width = "64", repr(align(8)))] - #[cfg_attr(target_arch = "x86", repr(align(4)))] - #[cfg_attr(not(target_arch = "x86"), repr(align(8)))] - pub struct pthread_cond_t { - size: [u8; crate::__SIZEOF_PTHREAD_COND_T], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for sysinfo { - fn eq(&self, other: &sysinfo) -> bool { - self.uptime == other.uptime - && self.loads == other.loads - && self.totalram == other.totalram - && self.freeram == other.freeram - && self.sharedram == other.sharedram - && self.bufferram == other.bufferram - && self.totalswap == other.totalswap - && self.freeswap == other.freeswap - && self.procs == other.procs - && self.pad == other.pad - && self.totalhigh == other.totalhigh - && self.freehigh == other.freehigh - && self.mem_unit == other.mem_unit - && self - .__reserved - .iter() - .zip(other.__reserved.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for sysinfo {} - impl hash::Hash for sysinfo { - fn hash(&self, state: &mut H) { - self.uptime.hash(state); - self.loads.hash(state); - self.totalram.hash(state); - self.freeram.hash(state); - self.sharedram.hash(state); - self.bufferram.hash(state); - self.totalswap.hash(state); - self.freeswap.hash(state); - self.procs.hash(state); - self.pad.hash(state); - self.totalhigh.hash(state); - self.freehigh.hash(state); - self.mem_unit.hash(state); - self.__reserved.hash(state); - } - } - - impl PartialEq for sockaddr_un { - fn eq(&self, other: &sockaddr_un) -> bool { - self.sun_family == other.sun_family - && self - .sun_path - .iter() - .zip(other.sun_path.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for sockaddr_un {} - impl hash::Hash for sockaddr_un { - fn hash(&self, state: &mut H) { - self.sun_family.hash(state); - self.sun_path.hash(state); - } - } - - impl PartialEq for sockaddr_storage { - fn eq(&self, other: &sockaddr_storage) -> bool { - self.ss_family == other.ss_family - && self.__ss_align == other.__ss_align - && self - .__ss_pad2 - .iter() - .zip(other.__ss_pad2.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for sockaddr_storage {} - impl hash::Hash for sockaddr_storage { - fn hash(&self, state: &mut H) { - self.ss_family.hash(state); - self.__ss_align.hash(state); - self.__ss_pad2.hash(state); - } - } - - impl PartialEq for utsname { - fn eq(&self, other: &utsname) -> bool { - self.sysname - .iter() - .zip(other.sysname.iter()) - .all(|(a, b)| a == b) - && self - .nodename - .iter() - .zip(other.nodename.iter()) - .all(|(a, b)| a == b) - && self - .release - .iter() - .zip(other.release.iter()) - .all(|(a, b)| a == b) - && self - .version - .iter() - .zip(other.version.iter()) - .all(|(a, b)| a == b) - && self - .machine - .iter() - .zip(other.machine.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for utsname {} - impl hash::Hash for utsname { - fn hash(&self, state: &mut H) { - self.sysname.hash(state); - self.nodename.hash(state); - self.release.hash(state); - self.version.hash(state); - self.machine.hash(state); - } - } - - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_ino == other.d_ino - && self.d_off == other.d_off - && self.d_reclen == other.d_reclen - && self.d_type == other.d_type - && self - .d_name - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for dirent {} - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_ino.hash(state); - self.d_off.hash(state); - self.d_reclen.hash(state); - self.d_type.hash(state); - self.d_name.hash(state); - } - } - - impl PartialEq for dirent64 { - fn eq(&self, other: &dirent64) -> bool { - self.d_ino == other.d_ino - && self.d_off == other.d_off - && self.d_reclen == other.d_reclen - && self.d_type == other.d_type - && self - .d_name - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for dirent64 {} - impl hash::Hash for dirent64 { - fn hash(&self, state: &mut H) { - self.d_ino.hash(state); - self.d_off.hash(state); - self.d_reclen.hash(state); - self.d_type.hash(state); - self.d_name.hash(state); - } - } - - impl PartialEq for mq_attr { - fn eq(&self, other: &mq_attr) -> bool { - self.mq_flags == other.mq_flags - && self.mq_maxmsg == other.mq_maxmsg - && self.mq_msgsize == other.mq_msgsize - && self.mq_curmsgs == other.mq_curmsgs - } - } - impl Eq for mq_attr {} - impl hash::Hash for mq_attr { - fn hash(&self, state: &mut H) { - self.mq_flags.hash(state); - self.mq_maxmsg.hash(state); - self.mq_msgsize.hash(state); - self.mq_curmsgs.hash(state); - } - } - - impl PartialEq for sockaddr_nl { - fn eq(&self, other: &sockaddr_nl) -> bool { - self.nl_family == other.nl_family - && self.nl_pid == other.nl_pid - && self.nl_groups == other.nl_groups - } - } - impl Eq for sockaddr_nl {} - impl hash::Hash for sockaddr_nl { - fn hash(&self, state: &mut H) { - self.nl_family.hash(state); - self.nl_pid.hash(state); - self.nl_groups.hash(state); - } - } - - // FIXME(msrv): suggested method was added in 1.85 - #[allow(unpredictable_function_pointer_comparisons)] - impl PartialEq for sigevent { - fn eq(&self, other: &sigevent) -> bool { - self.sigev_value == other.sigev_value - && self.sigev_signo == other.sigev_signo - && self.sigev_notify == other.sigev_notify - && self.sigev_notify_function == other.sigev_notify_function - && self.sigev_notify_attributes == other.sigev_notify_attributes - } - } - impl Eq for sigevent {} - impl hash::Hash for sigevent { - fn hash(&self, state: &mut H) { - self.sigev_value.hash(state); - self.sigev_signo.hash(state); - self.sigev_notify.hash(state); - self.sigev_notify_function.hash(state); - self.sigev_notify_attributes.hash(state); - } - } - - impl PartialEq for pthread_cond_t { - fn eq(&self, other: &pthread_cond_t) -> bool { - self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) - } - } - impl Eq for pthread_cond_t {} - impl hash::Hash for pthread_cond_t { - fn hash(&self, state: &mut H) { - self.size.hash(state); - } - } - - impl PartialEq for pthread_mutex_t { - fn eq(&self, other: &pthread_mutex_t) -> bool { - self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) - } - } - impl Eq for pthread_mutex_t {} - impl hash::Hash for pthread_mutex_t { - fn hash(&self, state: &mut H) { - self.size.hash(state); - } - } - - impl PartialEq for pthread_rwlock_t { - fn eq(&self, other: &pthread_rwlock_t) -> bool { - self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) - } - } - impl Eq for pthread_rwlock_t {} - impl hash::Hash for pthread_rwlock_t { - fn hash(&self, state: &mut H) { - self.size.hash(state); - } - } - } -} - -// PUB_CONST - -pub const INT_MIN: c_int = -2147483648; -pub const INT_MAX: c_int = 2147483647; - -pub const SIG_DFL: sighandler_t = 0 as sighandler_t; -pub const SIG_IGN: sighandler_t = 1 as sighandler_t; -pub const SIG_ERR: sighandler_t = !0 as sighandler_t; - -pub const DT_UNKNOWN: u8 = 0; -pub const DT_FIFO: u8 = 1; -pub const DT_CHR: u8 = 2; -pub const DT_DIR: u8 = 4; -pub const DT_BLK: u8 = 6; -pub const DT_REG: u8 = 8; -pub const DT_LNK: u8 = 10; -pub const DT_SOCK: u8 = 12; - -pub const FD_CLOEXEC: c_int = 0x1; - -pub const USRQUOTA: c_int = 0; -pub const GRPQUOTA: c_int = 1; - -pub const SIGIOT: c_int = 6; - -pub const S_ISUID: mode_t = 0o4000; -pub const S_ISGID: mode_t = 0o2000; -pub const S_ISVTX: mode_t = 0o1000; - -pub const IF_NAMESIZE: size_t = 16; -pub const IFNAMSIZ: size_t = IF_NAMESIZE; - -pub const LOG_EMERG: c_int = 0; -pub const LOG_ALERT: c_int = 1; -pub const LOG_CRIT: c_int = 2; -pub const LOG_ERR: c_int = 3; -pub const LOG_WARNING: c_int = 4; -pub const LOG_NOTICE: c_int = 5; -pub const LOG_INFO: c_int = 6; -pub const LOG_DEBUG: c_int = 7; - -pub const LOG_KERN: c_int = 0; -pub const LOG_USER: c_int = 1 << 3; -pub const LOG_MAIL: c_int = 2 << 3; -pub const LOG_DAEMON: c_int = 3 << 3; -pub const LOG_AUTH: c_int = 4 << 3; -pub const LOG_SYSLOG: c_int = 5 << 3; -pub const LOG_LPR: c_int = 6 << 3; -pub const LOG_NEWS: c_int = 7 << 3; -pub const LOG_UUCP: c_int = 8 << 3; -pub const LOG_LOCAL0: c_int = 16 << 3; -pub const LOG_LOCAL1: c_int = 17 << 3; -pub const LOG_LOCAL2: c_int = 18 << 3; -pub const LOG_LOCAL3: c_int = 19 << 3; -pub const LOG_LOCAL4: c_int = 20 << 3; -pub const LOG_LOCAL5: c_int = 21 << 3; -pub const LOG_LOCAL6: c_int = 22 << 3; -pub const LOG_LOCAL7: c_int = 23 << 3; - -pub const LOG_PID: c_int = 0x01; -pub const LOG_CONS: c_int = 0x02; -pub const LOG_ODELAY: c_int = 0x04; -pub const LOG_NDELAY: c_int = 0x08; -pub const LOG_NOWAIT: c_int = 0x10; - -pub const LOG_PRIMASK: c_int = 7; -pub const LOG_FACMASK: c_int = 0x3f8; - -pub const PRIO_PROCESS: c_int = 0; -pub const PRIO_PGRP: c_int = 1; -pub const PRIO_USER: c_int = 2; - -pub const PRIO_MIN: c_int = -20; -pub const PRIO_MAX: c_int = 20; - -pub const IPPROTO_ICMP: c_int = 1; -pub const IPPROTO_ICMPV6: c_int = 58; -pub const IPPROTO_TCP: c_int = 6; -pub const IPPROTO_UDP: c_int = 17; -pub const IPPROTO_IP: c_int = 0; -pub const IPPROTO_IPV6: c_int = 41; - -pub const INADDR_LOOPBACK: in_addr_t = 2130706433; -pub const INADDR_ANY: in_addr_t = 0; -pub const INADDR_BROADCAST: in_addr_t = 4294967295; -pub const INADDR_NONE: in_addr_t = 4294967295; - -pub const EXIT_FAILURE: c_int = 1; -pub const EXIT_SUCCESS: c_int = 0; -pub const RAND_MAX: c_int = 2147483647; -pub const EOF: c_int = -1; -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; -pub const _IOFBF: c_int = 0; -pub const _IONBF: c_int = 2; -pub const _IOLBF: c_int = 1; - -pub const F_DUPFD: c_int = 0; -pub const F_GETFD: c_int = 1; -pub const F_SETFD: c_int = 2; -pub const F_GETFL: c_int = 3; -pub const F_SETFL: c_int = 4; - -// Linux-specific fcntls -pub const F_SETLEASE: c_int = 1024; -pub const F_GETLEASE: c_int = 1025; -pub const F_NOTIFY: c_int = 1026; -pub const F_CANCELLK: c_int = 1029; -pub const F_DUPFD_CLOEXEC: c_int = 1030; -pub const F_SETPIPE_SZ: c_int = 1031; -pub const F_GETPIPE_SZ: c_int = 1032; -pub const F_ADD_SEALS: c_int = 1033; -pub const F_GET_SEALS: c_int = 1034; - -pub const F_SEAL_SEAL: c_int = 0x0001; -pub const F_SEAL_SHRINK: c_int = 0x0002; -pub const F_SEAL_GROW: c_int = 0x0004; -pub const F_SEAL_WRITE: c_int = 0x0008; - -// FIXME(#235): Include file sealing fcntls once we have a way to verify them. - -pub const SIGTRAP: c_int = 5; - -pub const PTHREAD_CREATE_JOINABLE: c_int = 0; -pub const PTHREAD_CREATE_DETACHED: c_int = 1; - -pub const CLOCK_REALTIME: crate::clockid_t = 0; -pub const CLOCK_MONOTONIC: crate::clockid_t = 1; -pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 2; -pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 3; -pub const CLOCK_MONOTONIC_RAW: crate::clockid_t = 4; -pub const CLOCK_REALTIME_COARSE: crate::clockid_t = 5; -pub const CLOCK_MONOTONIC_COARSE: crate::clockid_t = 6; -pub const CLOCK_BOOTTIME: crate::clockid_t = 7; -pub const CLOCK_REALTIME_ALARM: crate::clockid_t = 8; -pub const CLOCK_BOOTTIME_ALARM: crate::clockid_t = 9; -pub const CLOCK_SGI_CYCLE: crate::clockid_t = 10; -pub const CLOCK_TAI: crate::clockid_t = 11; -pub const TIMER_ABSTIME: c_int = 1; - -pub const RLIMIT_CPU: c_int = 0; -pub const RLIMIT_FSIZE: c_int = 1; -pub const RLIMIT_DATA: c_int = 2; -pub const RLIMIT_STACK: c_int = 3; -pub const RLIMIT_CORE: c_int = 4; -pub const RLIMIT_LOCKS: c_int = 10; -pub const RLIMIT_SIGPENDING: c_int = 11; -pub const RLIMIT_MSGQUEUE: c_int = 12; -pub const RLIMIT_NICE: c_int = 13; -pub const RLIMIT_RTPRIO: c_int = 14; - -pub const RUSAGE_SELF: c_int = 0; - -pub const O_RDONLY: c_int = 0; -pub const O_WRONLY: c_int = 1; -pub const O_RDWR: c_int = 2; - -pub const S_IFIFO: mode_t = 0o1_0000; -pub const S_IFCHR: mode_t = 0o2_0000; -pub const S_IFBLK: mode_t = 0o6_0000; -pub const S_IFDIR: mode_t = 0o4_0000; -pub const S_IFREG: mode_t = 0o10_0000; -pub const S_IFLNK: mode_t = 0o12_0000; -pub const S_IFSOCK: mode_t = 0o14_0000; -pub const S_IFMT: mode_t = 0o17_0000; -pub const S_IRWXU: mode_t = 0o0700; -pub const S_IXUSR: mode_t = 0o0100; -pub const S_IWUSR: mode_t = 0o0200; -pub const S_IRUSR: mode_t = 0o0400; -pub const S_IRWXG: mode_t = 0o0070; -pub const S_IXGRP: mode_t = 0o0010; -pub const S_IWGRP: mode_t = 0o0020; -pub const S_IRGRP: mode_t = 0o0040; -pub const S_IRWXO: mode_t = 0o0007; -pub const S_IXOTH: mode_t = 0o0001; -pub const S_IWOTH: mode_t = 0o0002; -pub const S_IROTH: mode_t = 0o0004; -pub const F_OK: c_int = 0; -pub const R_OK: c_int = 4; -pub const W_OK: c_int = 2; -pub const X_OK: c_int = 1; -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGABRT: c_int = 6; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGSEGV: c_int = 11; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; - -pub const PROT_NONE: c_int = 0; -pub const PROT_READ: c_int = 1; -pub const PROT_WRITE: c_int = 2; -pub const PROT_EXEC: c_int = 4; - -pub const LC_CTYPE: c_int = 0; -pub const LC_NUMERIC: c_int = 1; -pub const LC_TIME: c_int = 2; -pub const LC_COLLATE: c_int = 3; -pub const LC_MONETARY: c_int = 4; -pub const LC_MESSAGES: c_int = 5; -pub const LC_ALL: c_int = 6; -pub const LC_CTYPE_MASK: c_int = 1 << LC_CTYPE; -pub const LC_NUMERIC_MASK: c_int = 1 << LC_NUMERIC; -pub const LC_TIME_MASK: c_int = 1 << LC_TIME; -pub const LC_COLLATE_MASK: c_int = 1 << LC_COLLATE; -pub const LC_MONETARY_MASK: c_int = 1 << LC_MONETARY; -pub const LC_MESSAGES_MASK: c_int = 1 << LC_MESSAGES; -// LC_ALL_MASK defined per platform - -pub const MAP_FILE: c_int = 0x0000; -pub const MAP_SHARED: c_int = 0x0001; -pub const MAP_PRIVATE: c_int = 0x0002; -pub const MAP_FIXED: c_int = 0x0010; - -pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; - -// MS_ flags for msync(2) -pub const MS_ASYNC: c_int = 0x0001; -pub const MS_INVALIDATE: c_int = 0x0002; -pub const MS_SYNC: c_int = 0x0004; - -// MS_ flags for mount(2) -pub const MS_RDONLY: c_ulong = 0x01; -pub const MS_NOSUID: c_ulong = 0x02; -pub const MS_NODEV: c_ulong = 0x04; -pub const MS_NOEXEC: c_ulong = 0x08; -pub const MS_SYNCHRONOUS: c_ulong = 0x10; -pub const MS_REMOUNT: c_ulong = 0x20; -pub const MS_MANDLOCK: c_ulong = 0x40; -pub const MS_DIRSYNC: c_ulong = 0x80; -pub const MS_NOATIME: c_ulong = 0x0400; -pub const MS_NODIRATIME: c_ulong = 0x0800; -pub const MS_BIND: c_ulong = 0x1000; -pub const MS_MOVE: c_ulong = 0x2000; -pub const MS_REC: c_ulong = 0x4000; -pub const MS_SILENT: c_ulong = 0x8000; -pub const MS_POSIXACL: c_ulong = 0x010000; -pub const MS_UNBINDABLE: c_ulong = 0x020000; -pub const MS_PRIVATE: c_ulong = 0x040000; -pub const MS_SLAVE: c_ulong = 0x080000; -pub const MS_SHARED: c_ulong = 0x100000; -pub const MS_RELATIME: c_ulong = 0x200000; -pub const MS_KERNMOUNT: c_ulong = 0x400000; -pub const MS_I_VERSION: c_ulong = 0x800000; -pub const MS_STRICTATIME: c_ulong = 0x1000000; -pub const MS_ACTIVE: c_ulong = 0x40000000; -pub const MS_NOUSER: c_ulong = 0x80000000; -pub const MS_MGC_VAL: c_ulong = 0xc0ed0000; -pub const MS_MGC_MSK: c_ulong = 0xffff0000; -pub const MS_RMT_MASK: c_ulong = 0x800051; - -pub const EPERM: c_int = 1; -pub const ENOENT: c_int = 2; -pub const ESRCH: c_int = 3; -pub const EINTR: c_int = 4; -pub const EIO: c_int = 5; -pub const ENXIO: c_int = 6; -pub const E2BIG: c_int = 7; -pub const ENOEXEC: c_int = 8; -pub const EBADF: c_int = 9; -pub const ECHILD: c_int = 10; -pub const EAGAIN: c_int = 11; -pub const ENOMEM: c_int = 12; -pub const EACCES: c_int = 13; -pub const EFAULT: c_int = 14; -pub const ENOTBLK: c_int = 15; -pub const EBUSY: c_int = 16; -pub const EEXIST: c_int = 17; -pub const EXDEV: c_int = 18; -pub const ENODEV: c_int = 19; -pub const ENOTDIR: c_int = 20; -pub const EISDIR: c_int = 21; -pub const EINVAL: c_int = 22; -pub const ENFILE: c_int = 23; -pub const EMFILE: c_int = 24; -pub const ENOTTY: c_int = 25; -pub const ETXTBSY: c_int = 26; -pub const EFBIG: c_int = 27; -pub const ENOSPC: c_int = 28; -pub const ESPIPE: c_int = 29; -pub const EROFS: c_int = 30; -pub const EMLINK: c_int = 31; -pub const EPIPE: c_int = 32; -pub const EDOM: c_int = 33; -pub const ERANGE: c_int = 34; -pub const EWOULDBLOCK: c_int = EAGAIN; - -pub const SCM_RIGHTS: c_int = 0x01; -pub const SCM_CREDENTIALS: c_int = 0x02; - -pub const PROT_GROWSDOWN: c_int = 0x1000000; -pub const PROT_GROWSUP: c_int = 0x2000000; - -pub const MAP_TYPE: c_int = 0x000f; - -pub const MADV_NORMAL: c_int = 0; -pub const MADV_RANDOM: c_int = 1; -pub const MADV_SEQUENTIAL: c_int = 2; -pub const MADV_WILLNEED: c_int = 3; -pub const MADV_DONTNEED: c_int = 4; -pub const MADV_FREE: c_int = 8; -pub const MADV_REMOVE: c_int = 9; -pub const MADV_DONTFORK: c_int = 10; -pub const MADV_DOFORK: c_int = 11; -pub const MADV_MERGEABLE: c_int = 12; -pub const MADV_UNMERGEABLE: c_int = 13; -pub const MADV_HUGEPAGE: c_int = 14; -pub const MADV_NOHUGEPAGE: c_int = 15; -pub const MADV_DONTDUMP: c_int = 16; -pub const MADV_DODUMP: c_int = 17; -pub const MADV_HWPOISON: c_int = 100; -pub const MADV_SOFT_OFFLINE: c_int = 101; - -pub const IFF_UP: c_int = 0x1; -pub const IFF_BROADCAST: c_int = 0x2; -pub const IFF_DEBUG: c_int = 0x4; -pub const IFF_LOOPBACK: c_int = 0x8; -pub const IFF_POINTOPOINT: c_int = 0x10; -pub const IFF_NOTRAILERS: c_int = 0x20; -pub const IFF_RUNNING: c_int = 0x40; -pub const IFF_NOARP: c_int = 0x80; -pub const IFF_PROMISC: c_int = 0x100; -pub const IFF_ALLMULTI: c_int = 0x200; -pub const IFF_MASTER: c_int = 0x400; -pub const IFF_SLAVE: c_int = 0x800; -pub const IFF_MULTICAST: c_int = 0x1000; -pub const IFF_PORTSEL: c_int = 0x2000; -pub const IFF_AUTOMEDIA: c_int = 0x4000; -pub const IFF_DYNAMIC: c_int = 0x8000; -pub const IFF_TUN: c_int = 0x0001; -pub const IFF_TAP: c_int = 0x0002; -pub const IFF_NO_PI: c_int = 0x1000; - -pub const SOL_IP: c_int = 0; -pub const SOL_TCP: c_int = 6; -pub const SOL_UDP: c_int = 17; -pub const SOL_IPV6: c_int = 41; -pub const SOL_ICMPV6: c_int = 58; -pub const SOL_RAW: c_int = 255; -pub const SOL_DECNET: c_int = 261; -pub const SOL_X25: c_int = 262; -pub const SOL_PACKET: c_int = 263; -pub const SOL_ATM: c_int = 264; -pub const SOL_AAL: c_int = 265; -pub const SOL_IRDA: c_int = 266; -pub const SOL_NETBEUI: c_int = 267; -pub const SOL_LLC: c_int = 268; -pub const SOL_DCCP: c_int = 269; -pub const SOL_NETLINK: c_int = 270; -pub const SOL_TIPC: c_int = 271; - -pub const AF_UNSPEC: c_int = 0; -pub const AF_UNIX: c_int = 1; -pub const AF_LOCAL: c_int = 1; -pub const AF_INET: c_int = 2; -pub const AF_AX25: c_int = 3; -pub const AF_IPX: c_int = 4; -pub const AF_APPLETALK: c_int = 5; -pub const AF_NETROM: c_int = 6; -pub const AF_BRIDGE: c_int = 7; -pub const AF_ATMPVC: c_int = 8; -pub const AF_X25: c_int = 9; -pub const AF_INET6: c_int = 10; -pub const AF_ROSE: c_int = 11; -pub const AF_DECnet: c_int = 12; -pub const AF_NETBEUI: c_int = 13; -pub const AF_SECURITY: c_int = 14; -pub const AF_KEY: c_int = 15; -pub const AF_NETLINK: c_int = 16; -pub const AF_ROUTE: c_int = AF_NETLINK; -pub const AF_PACKET: c_int = 17; -pub const AF_ASH: c_int = 18; -pub const AF_ECONET: c_int = 19; -pub const AF_ATMSVC: c_int = 20; -pub const AF_RDS: c_int = 21; -pub const AF_SNA: c_int = 22; -pub const AF_IRDA: c_int = 23; -pub const AF_PPPOX: c_int = 24; -pub const AF_WANPIPE: c_int = 25; -pub const AF_LLC: c_int = 26; -pub const AF_CAN: c_int = 29; -pub const AF_TIPC: c_int = 30; -pub const AF_BLUETOOTH: c_int = 31; -pub const AF_IUCV: c_int = 32; -pub const AF_RXRPC: c_int = 33; -pub const AF_ISDN: c_int = 34; -pub const AF_PHONET: c_int = 35; -pub const AF_IEEE802154: c_int = 36; -pub const AF_CAIF: c_int = 37; -pub const AF_ALG: c_int = 38; - -pub const PF_UNSPEC: c_int = AF_UNSPEC; -pub const PF_UNIX: c_int = AF_UNIX; -pub const PF_LOCAL: c_int = AF_LOCAL; -pub const PF_INET: c_int = AF_INET; -pub const PF_AX25: c_int = AF_AX25; -pub const PF_IPX: c_int = AF_IPX; -pub const PF_APPLETALK: c_int = AF_APPLETALK; -pub const PF_NETROM: c_int = AF_NETROM; -pub const PF_BRIDGE: c_int = AF_BRIDGE; -pub const PF_ATMPVC: c_int = AF_ATMPVC; -pub const PF_X25: c_int = AF_X25; -pub const PF_INET6: c_int = AF_INET6; -pub const PF_ROSE: c_int = AF_ROSE; -pub const PF_DECnet: c_int = AF_DECnet; -pub const PF_NETBEUI: c_int = AF_NETBEUI; -pub const PF_SECURITY: c_int = AF_SECURITY; -pub const PF_KEY: c_int = AF_KEY; -pub const PF_NETLINK: c_int = AF_NETLINK; -pub const PF_ROUTE: c_int = AF_ROUTE; -pub const PF_PACKET: c_int = AF_PACKET; -pub const PF_ASH: c_int = AF_ASH; -pub const PF_ECONET: c_int = AF_ECONET; -pub const PF_ATMSVC: c_int = AF_ATMSVC; -pub const PF_RDS: c_int = AF_RDS; -pub const PF_SNA: c_int = AF_SNA; -pub const PF_IRDA: c_int = AF_IRDA; -pub const PF_PPPOX: c_int = AF_PPPOX; -pub const PF_WANPIPE: c_int = AF_WANPIPE; -pub const PF_LLC: c_int = AF_LLC; -pub const PF_CAN: c_int = AF_CAN; -pub const PF_TIPC: c_int = AF_TIPC; -pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; -pub const PF_IUCV: c_int = AF_IUCV; -pub const PF_RXRPC: c_int = AF_RXRPC; -pub const PF_ISDN: c_int = AF_ISDN; -pub const PF_PHONET: c_int = AF_PHONET; -pub const PF_IEEE802154: c_int = AF_IEEE802154; -pub const PF_CAIF: c_int = AF_CAIF; -pub const PF_ALG: c_int = AF_ALG; - -pub const SOMAXCONN: c_int = 128; - -pub const MSG_OOB: c_int = 1; -pub const MSG_PEEK: c_int = 2; -pub const MSG_DONTROUTE: c_int = 4; -pub const MSG_CTRUNC: c_int = 8; -pub const MSG_TRUNC: c_int = 0x20; -pub const MSG_DONTWAIT: c_int = 0x40; -pub const MSG_EOR: c_int = 0x80; -pub const MSG_WAITALL: c_int = 0x100; -pub const MSG_FIN: c_int = 0x200; -pub const MSG_SYN: c_int = 0x400; -pub const MSG_CONFIRM: c_int = 0x800; -pub const MSG_RST: c_int = 0x1000; -pub const MSG_ERRQUEUE: c_int = 0x2000; -pub const MSG_NOSIGNAL: c_int = 0x4000; -pub const MSG_MORE: c_int = 0x8000; -pub const MSG_WAITFORONE: c_int = 0x10000; -pub const MSG_FASTOPEN: c_int = 0x20000000; -pub const MSG_CMSG_CLOEXEC: c_int = 0x40000000; - -pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; - -pub const SOCK_RAW: c_int = 3; -pub const SOCK_RDM: c_int = 4; - -pub const IP_TOS: c_int = 1; -pub const IP_TTL: c_int = 2; -pub const IP_HDRINCL: c_int = 3; -pub const IP_RECVTOS: c_int = 13; -pub const IP_FREEBIND: c_int = 15; -pub const IP_TRANSPARENT: c_int = 19; -pub const IP_MULTICAST_IF: c_int = 32; -pub const IP_MULTICAST_TTL: c_int = 33; -pub const IP_MULTICAST_LOOP: c_int = 34; -pub const IP_ADD_MEMBERSHIP: c_int = 35; -pub const IP_DROP_MEMBERSHIP: c_int = 36; - -pub const IPV6_UNICAST_HOPS: c_int = 16; -pub const IPV6_MULTICAST_IF: c_int = 17; -pub const IPV6_MULTICAST_HOPS: c_int = 18; -pub const IPV6_MULTICAST_LOOP: c_int = 19; -pub const IPV6_ADD_MEMBERSHIP: c_int = 20; -pub const IPV6_DROP_MEMBERSHIP: c_int = 21; -pub const IPV6_V6ONLY: c_int = 26; -pub const IPV6_RECVPKTINFO: c_int = 49; -pub const IPV6_RECVTCLASS: c_int = 66; -pub const IPV6_TCLASS: c_int = 67; - -pub const TCP_NODELAY: c_int = 1; -pub const TCP_MAXSEG: c_int = 2; -pub const TCP_CORK: c_int = 3; -pub const TCP_KEEPIDLE: c_int = 4; -pub const TCP_KEEPINTVL: c_int = 5; -pub const TCP_KEEPCNT: c_int = 6; -pub const TCP_SYNCNT: c_int = 7; -pub const TCP_LINGER2: c_int = 8; -pub const TCP_DEFER_ACCEPT: c_int = 9; -pub const TCP_WINDOW_CLAMP: c_int = 10; -pub const TCP_INFO: c_int = 11; -pub const TCP_QUICKACK: c_int = 12; -pub const TCP_CONGESTION: c_int = 13; - -pub const SO_DEBUG: c_int = 1; - -pub const SHUT_RD: c_int = 0; -pub const SHUT_WR: c_int = 1; -pub const SHUT_RDWR: c_int = 2; - -pub const LOCK_SH: c_int = 1; -pub const LOCK_EX: c_int = 2; -pub const LOCK_NB: c_int = 4; -pub const LOCK_UN: c_int = 8; - -pub const SS_ONSTACK: c_int = 1; -pub const SS_DISABLE: c_int = 2; - -pub const PATH_MAX: c_int = 4096; - -pub const FD_SETSIZE: usize = 1024; - -pub const EPOLLIN: c_int = 0x1; -pub const EPOLLPRI: c_int = 0x2; -pub const EPOLLOUT: c_int = 0x4; -pub const EPOLLRDNORM: c_int = 0x40; -pub const EPOLLRDBAND: c_int = 0x80; -pub const EPOLLWRNORM: c_int = 0x100; -pub const EPOLLWRBAND: c_int = 0x200; -pub const EPOLLMSG: c_int = 0x400; -pub const EPOLLERR: c_int = 0x8; -pub const EPOLLHUP: c_int = 0x10; -pub const EPOLLET: c_int = 0x80000000; - -pub const EPOLL_CTL_ADD: c_int = 1; -pub const EPOLL_CTL_MOD: c_int = 3; -pub const EPOLL_CTL_DEL: c_int = 2; - -pub const MNT_DETACH: c_int = 0x2; -pub const MNT_EXPIRE: c_int = 0x4; - -pub const Q_GETFMT: c_int = 0x800004; -pub const Q_GETINFO: c_int = 0x800005; -pub const Q_SETINFO: c_int = 0x800006; -pub const QIF_BLIMITS: u32 = 1; -pub const QIF_SPACE: u32 = 2; -pub const QIF_ILIMITS: u32 = 4; -pub const QIF_INODES: u32 = 8; -pub const QIF_BTIME: u32 = 16; -pub const QIF_ITIME: u32 = 32; -pub const QIF_LIMITS: u32 = 5; -pub const QIF_USAGE: u32 = 10; -pub const QIF_TIMES: u32 = 48; -pub const QIF_ALL: u32 = 63; - -pub const MNT_FORCE: c_int = 0x1; - -pub const Q_SYNC: c_int = 0x800001; -pub const Q_QUOTAON: c_int = 0x800002; -pub const Q_QUOTAOFF: c_int = 0x800003; -pub const Q_GETQUOTA: c_int = 0x800007; -pub const Q_SETQUOTA: c_int = 0x800008; - -pub const TCIOFF: c_int = 2; -pub const TCION: c_int = 3; -pub const TCOOFF: c_int = 0; -pub const TCOON: c_int = 1; -pub const TCIFLUSH: c_int = 0; -pub const TCOFLUSH: c_int = 1; -pub const TCIOFLUSH: c_int = 2; -pub const NL0: c_int = 0x00000000; -pub const NL1: c_int = 0x00000100; -pub const TAB0: c_int = 0x00000000; -pub const CR0: c_int = 0x00000000; -pub const FF0: c_int = 0x00000000; -pub const BS0: c_int = 0x00000000; -pub const VT0: c_int = 0x00000000; -pub const VERASE: usize = 2; -pub const VKILL: usize = 3; -pub const VINTR: usize = 0; -pub const VQUIT: usize = 1; -pub const VLNEXT: usize = 15; -pub const IGNBRK: crate::tcflag_t = 0x00000001; -pub const BRKINT: crate::tcflag_t = 0x00000002; -pub const IGNPAR: crate::tcflag_t = 0x00000004; -pub const PARMRK: crate::tcflag_t = 0x00000008; -pub const INPCK: crate::tcflag_t = 0x00000010; -pub const ISTRIP: crate::tcflag_t = 0x00000020; -pub const INLCR: crate::tcflag_t = 0x00000040; -pub const IGNCR: crate::tcflag_t = 0x00000080; -pub const ICRNL: crate::tcflag_t = 0x00000100; -pub const IXANY: crate::tcflag_t = 0x00000800; -pub const IMAXBEL: crate::tcflag_t = 0x00002000; -pub const OPOST: crate::tcflag_t = 0x1; -pub const CS5: crate::tcflag_t = 0x00000000; -pub const CRTSCTS: crate::tcflag_t = 0x80000000; -pub const ECHO: crate::tcflag_t = 0x00000008; -pub const OCRNL: crate::tcflag_t = 0o000010; -pub const ONOCR: crate::tcflag_t = 0o000020; -pub const ONLRET: crate::tcflag_t = 0o000040; -pub const OFILL: crate::tcflag_t = 0o000100; -pub const OFDEL: crate::tcflag_t = 0o000200; - -pub const CLONE_VM: c_int = 0x100; -pub const CLONE_FS: c_int = 0x200; -pub const CLONE_FILES: c_int = 0x400; -pub const CLONE_SIGHAND: c_int = 0x800; -pub const CLONE_PTRACE: c_int = 0x2000; -pub const CLONE_VFORK: c_int = 0x4000; -pub const CLONE_PARENT: c_int = 0x8000; -pub const CLONE_THREAD: c_int = 0x10000; -pub const CLONE_NEWNS: c_int = 0x20000; -pub const CLONE_SYSVSEM: c_int = 0x40000; -pub const CLONE_SETTLS: c_int = 0x80000; -pub const CLONE_PARENT_SETTID: c_int = 0x100000; -pub const CLONE_CHILD_CLEARTID: c_int = 0x200000; -pub const CLONE_DETACHED: c_int = 0x400000; -pub const CLONE_UNTRACED: c_int = 0x800000; -pub const CLONE_CHILD_SETTID: c_int = 0x01000000; -pub const CLONE_NEWUTS: c_int = 0x04000000; -pub const CLONE_NEWIPC: c_int = 0x08000000; -pub const CLONE_NEWUSER: c_int = 0x10000000; -pub const CLONE_NEWPID: c_int = 0x20000000; -pub const CLONE_NEWNET: c_int = 0x40000000; -pub const CLONE_IO: c_int = 0x80000000; -pub const CLONE_NEWCGROUP: c_int = 0x02000000; - -pub const WNOHANG: c_int = 0x00000001; -pub const WUNTRACED: c_int = 0x00000002; -pub const WSTOPPED: c_int = WUNTRACED; -pub const WEXITED: c_int = 0x00000004; -pub const WCONTINUED: c_int = 0x00000008; -pub const WNOWAIT: c_int = 0x01000000; - -// Options set using PTRACE_SETOPTIONS. -pub const PTRACE_O_TRACESYSGOOD: c_int = 0x00000001; -pub const PTRACE_O_TRACEFORK: c_int = 0x00000002; -pub const PTRACE_O_TRACEVFORK: c_int = 0x00000004; -pub const PTRACE_O_TRACECLONE: c_int = 0x00000008; -pub const PTRACE_O_TRACEEXEC: c_int = 0x00000010; -pub const PTRACE_O_TRACEVFORKDONE: c_int = 0x00000020; -pub const PTRACE_O_TRACEEXIT: c_int = 0x00000040; -pub const PTRACE_O_TRACESECCOMP: c_int = 0x00000080; -pub const PTRACE_O_EXITKILL: c_int = 0x00100000; -pub const PTRACE_O_SUSPEND_SECCOMP: c_int = 0x00200000; -pub const PTRACE_O_MASK: c_int = 0x003000ff; - -// Wait extended result codes for the above trace options. -pub const PTRACE_EVENT_FORK: c_int = 1; -pub const PTRACE_EVENT_VFORK: c_int = 2; -pub const PTRACE_EVENT_CLONE: c_int = 3; -pub const PTRACE_EVENT_EXEC: c_int = 4; -pub const PTRACE_EVENT_VFORK_DONE: c_int = 5; -pub const PTRACE_EVENT_EXIT: c_int = 6; -pub const PTRACE_EVENT_SECCOMP: c_int = 7; -// PTRACE_EVENT_STOP was added to glibc in 2.26 -// pub const PTRACE_EVENT_STOP: c_int = 128; - -pub const __WNOTHREAD: c_int = 0x20000000; -pub const __WALL: c_int = 0x40000000; -pub const __WCLONE: c_int = 0x80000000; - -pub const SPLICE_F_MOVE: c_uint = 0x01; -pub const SPLICE_F_NONBLOCK: c_uint = 0x02; -pub const SPLICE_F_MORE: c_uint = 0x04; -pub const SPLICE_F_GIFT: c_uint = 0x08; - -pub const RTLD_LOCAL: c_int = 0; -pub const RTLD_LAZY: c_int = 1; - -pub const POSIX_FADV_NORMAL: c_int = 0; -pub const POSIX_FADV_RANDOM: c_int = 1; -pub const POSIX_FADV_SEQUENTIAL: c_int = 2; -pub const POSIX_FADV_WILLNEED: c_int = 3; - -pub const AT_FDCWD: c_int = -100; -pub const AT_SYMLINK_NOFOLLOW: c_int = 0x100; -pub const AT_REMOVEDIR: c_int = 0x200; -pub const AT_EACCESS: c_int = 0x200; -pub const AT_SYMLINK_FOLLOW: c_int = 0x400; -pub const AT_NO_AUTOMOUNT: c_int = 0x800; -pub const AT_EMPTY_PATH: c_int = 0x1000; - -pub const LOG_CRON: c_int = 9 << 3; -pub const LOG_AUTHPRIV: c_int = 10 << 3; -pub const LOG_FTP: c_int = 11 << 3; -pub const LOG_PERROR: c_int = 0x20; - -pub const PIPE_BUF: usize = 4096; - -pub const SI_LOAD_SHIFT: c_uint = 16; - -pub const CLD_EXITED: c_int = 1; -pub const CLD_KILLED: c_int = 2; -pub const CLD_DUMPED: c_int = 3; -pub const CLD_TRAPPED: c_int = 4; -pub const CLD_STOPPED: c_int = 5; -pub const CLD_CONTINUED: c_int = 6; - -pub const SIGEV_SIGNAL: c_int = 0; -pub const SIGEV_NONE: c_int = 1; -pub const SIGEV_THREAD: c_int = 2; - -pub const P_ALL: idtype_t = 0; -pub const P_PID: idtype_t = 1; -pub const P_PGID: idtype_t = 2; - -pub const UTIME_OMIT: c_long = 1073741822; -pub const UTIME_NOW: c_long = 1073741823; - -pub const POLLIN: c_short = 0x1; -pub const POLLPRI: c_short = 0x2; -pub const POLLOUT: c_short = 0x4; -pub const POLLERR: c_short = 0x8; -pub const POLLHUP: c_short = 0x10; -pub const POLLNVAL: c_short = 0x20; -pub const POLLRDNORM: c_short = 0x040; -pub const POLLRDBAND: c_short = 0x080; - -pub const ABDAY_1: crate::nl_item = 0x20000; -pub const ABDAY_2: crate::nl_item = 0x20001; -pub const ABDAY_3: crate::nl_item = 0x20002; -pub const ABDAY_4: crate::nl_item = 0x20003; -pub const ABDAY_5: crate::nl_item = 0x20004; -pub const ABDAY_6: crate::nl_item = 0x20005; -pub const ABDAY_7: crate::nl_item = 0x20006; - -pub const DAY_1: crate::nl_item = 0x20007; -pub const DAY_2: crate::nl_item = 0x20008; -pub const DAY_3: crate::nl_item = 0x20009; -pub const DAY_4: crate::nl_item = 0x2000A; -pub const DAY_5: crate::nl_item = 0x2000B; -pub const DAY_6: crate::nl_item = 0x2000C; -pub const DAY_7: crate::nl_item = 0x2000D; - -pub const ABMON_1: crate::nl_item = 0x2000E; -pub const ABMON_2: crate::nl_item = 0x2000F; -pub const ABMON_3: crate::nl_item = 0x20010; -pub const ABMON_4: crate::nl_item = 0x20011; -pub const ABMON_5: crate::nl_item = 0x20012; -pub const ABMON_6: crate::nl_item = 0x20013; -pub const ABMON_7: crate::nl_item = 0x20014; -pub const ABMON_8: crate::nl_item = 0x20015; -pub const ABMON_9: crate::nl_item = 0x20016; -pub const ABMON_10: crate::nl_item = 0x20017; -pub const ABMON_11: crate::nl_item = 0x20018; -pub const ABMON_12: crate::nl_item = 0x20019; - -pub const MON_1: crate::nl_item = 0x2001A; -pub const MON_2: crate::nl_item = 0x2001B; -pub const MON_3: crate::nl_item = 0x2001C; -pub const MON_4: crate::nl_item = 0x2001D; -pub const MON_5: crate::nl_item = 0x2001E; -pub const MON_6: crate::nl_item = 0x2001F; -pub const MON_7: crate::nl_item = 0x20020; -pub const MON_8: crate::nl_item = 0x20021; -pub const MON_9: crate::nl_item = 0x20022; -pub const MON_10: crate::nl_item = 0x20023; -pub const MON_11: crate::nl_item = 0x20024; -pub const MON_12: crate::nl_item = 0x20025; - -pub const AM_STR: crate::nl_item = 0x20026; -pub const PM_STR: crate::nl_item = 0x20027; - -pub const D_T_FMT: crate::nl_item = 0x20028; -pub const D_FMT: crate::nl_item = 0x20029; -pub const T_FMT: crate::nl_item = 0x2002A; -pub const T_FMT_AMPM: crate::nl_item = 0x2002B; - -pub const ERA: crate::nl_item = 0x2002C; -pub const ERA_D_FMT: crate::nl_item = 0x2002E; -pub const ALT_DIGITS: crate::nl_item = 0x2002F; -pub const ERA_D_T_FMT: crate::nl_item = 0x20030; -pub const ERA_T_FMT: crate::nl_item = 0x20031; - -pub const CODESET: crate::nl_item = 14; - -pub const CRNCYSTR: crate::nl_item = 0x4000F; - -pub const RUSAGE_THREAD: c_int = 1; -pub const RUSAGE_CHILDREN: c_int = -1; - -pub const RADIXCHAR: crate::nl_item = 0x10000; -pub const THOUSEP: crate::nl_item = 0x10001; - -pub const YESEXPR: crate::nl_item = 0x50000; -pub const NOEXPR: crate::nl_item = 0x50001; -pub const YESSTR: crate::nl_item = 0x50002; -pub const NOSTR: crate::nl_item = 0x50003; - -pub const FILENAME_MAX: c_uint = 4096; -pub const L_tmpnam: c_uint = 20; -pub const _PC_LINK_MAX: c_int = 0; -pub const _PC_MAX_CANON: c_int = 1; -pub const _PC_MAX_INPUT: c_int = 2; -pub const _PC_NAME_MAX: c_int = 3; -pub const _PC_PATH_MAX: c_int = 4; -pub const _PC_PIPE_BUF: c_int = 5; -pub const _PC_CHOWN_RESTRICTED: c_int = 6; -pub const _PC_NO_TRUNC: c_int = 7; -pub const _PC_VDISABLE: c_int = 8; -pub const _PC_SYNC_IO: c_int = 9; -pub const _PC_ASYNC_IO: c_int = 10; -pub const _PC_PRIO_IO: c_int = 11; -pub const _PC_SOCK_MAXBUF: c_int = 12; -pub const _PC_FILESIZEBITS: c_int = 13; -pub const _PC_REC_INCR_XFER_SIZE: c_int = 14; -pub const _PC_REC_MAX_XFER_SIZE: c_int = 15; -pub const _PC_REC_MIN_XFER_SIZE: c_int = 16; -pub const _PC_REC_XFER_ALIGN: c_int = 17; -pub const _PC_ALLOC_SIZE_MIN: c_int = 18; -pub const _PC_SYMLINK_MAX: c_int = 19; -pub const _PC_2_SYMLINKS: c_int = 20; - -pub const _SC_ARG_MAX: c_int = 0; -pub const _SC_CHILD_MAX: c_int = 1; -pub const _SC_CLK_TCK: c_int = 2; -pub const _SC_NGROUPS_MAX: c_int = 3; -pub const _SC_OPEN_MAX: c_int = 4; -pub const _SC_STREAM_MAX: c_int = 5; -pub const _SC_TZNAME_MAX: c_int = 6; -pub const _SC_JOB_CONTROL: c_int = 7; -pub const _SC_SAVED_IDS: c_int = 8; -pub const _SC_REALTIME_SIGNALS: c_int = 9; -pub const _SC_PRIORITY_SCHEDULING: c_int = 10; -pub const _SC_TIMERS: c_int = 11; -pub const _SC_ASYNCHRONOUS_IO: c_int = 12; -pub const _SC_PRIORITIZED_IO: c_int = 13; -pub const _SC_SYNCHRONIZED_IO: c_int = 14; -pub const _SC_FSYNC: c_int = 15; -pub const _SC_MAPPED_FILES: c_int = 16; -pub const _SC_MEMLOCK: c_int = 17; -pub const _SC_MEMLOCK_RANGE: c_int = 18; -pub const _SC_MEMORY_PROTECTION: c_int = 19; -pub const _SC_MESSAGE_PASSING: c_int = 20; -pub const _SC_SEMAPHORES: c_int = 21; -pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 22; -pub const _SC_AIO_LISTIO_MAX: c_int = 23; -pub const _SC_AIO_MAX: c_int = 24; -pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 25; -pub const _SC_DELAYTIMER_MAX: c_int = 26; -pub const _SC_MQ_OPEN_MAX: c_int = 27; -pub const _SC_MQ_PRIO_MAX: c_int = 28; -pub const _SC_VERSION: c_int = 29; -pub const _SC_PAGESIZE: c_int = 30; -pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; -pub const _SC_RTSIG_MAX: c_int = 31; -pub const _SC_SEM_NSEMS_MAX: c_int = 32; -pub const _SC_SEM_VALUE_MAX: c_int = 33; -pub const _SC_SIGQUEUE_MAX: c_int = 34; -pub const _SC_TIMER_MAX: c_int = 35; -pub const _SC_BC_BASE_MAX: c_int = 36; -pub const _SC_BC_DIM_MAX: c_int = 37; -pub const _SC_BC_SCALE_MAX: c_int = 38; -pub const _SC_BC_STRING_MAX: c_int = 39; -pub const _SC_COLL_WEIGHTS_MAX: c_int = 40; -pub const _SC_EXPR_NEST_MAX: c_int = 42; -pub const _SC_LINE_MAX: c_int = 43; -pub const _SC_RE_DUP_MAX: c_int = 44; -pub const _SC_2_VERSION: c_int = 46; -pub const _SC_2_C_BIND: c_int = 47; -pub const _SC_2_C_DEV: c_int = 48; -pub const _SC_2_FORT_DEV: c_int = 49; -pub const _SC_2_FORT_RUN: c_int = 50; -pub const _SC_2_SW_DEV: c_int = 51; -pub const _SC_2_LOCALEDEF: c_int = 52; -pub const _SC_UIO_MAXIOV: c_int = 60; -pub const _SC_IOV_MAX: c_int = 60; -pub const _SC_THREADS: c_int = 67; -pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 68; -pub const _SC_GETGR_R_SIZE_MAX: c_int = 69; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 70; -pub const _SC_LOGIN_NAME_MAX: c_int = 71; -pub const _SC_TTY_NAME_MAX: c_int = 72; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 73; -pub const _SC_THREAD_KEYS_MAX: c_int = 74; -pub const _SC_THREAD_STACK_MIN: c_int = 75; -pub const _SC_THREAD_THREADS_MAX: c_int = 76; -pub const _SC_THREAD_ATTR_STACKADDR: c_int = 77; -pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 78; -pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 79; -pub const _SC_THREAD_PRIO_INHERIT: c_int = 80; -pub const _SC_THREAD_PRIO_PROTECT: c_int = 81; -pub const _SC_THREAD_PROCESS_SHARED: c_int = 82; -pub const _SC_NPROCESSORS_CONF: c_int = 83; -pub const _SC_NPROCESSORS_ONLN: c_int = 84; -pub const _SC_PHYS_PAGES: c_int = 85; -pub const _SC_AVPHYS_PAGES: c_int = 86; -pub const _SC_ATEXIT_MAX: c_int = 87; -pub const _SC_PASS_MAX: c_int = 88; -pub const _SC_XOPEN_VERSION: c_int = 89; -pub const _SC_XOPEN_XCU_VERSION: c_int = 90; -pub const _SC_XOPEN_UNIX: c_int = 91; -pub const _SC_XOPEN_CRYPT: c_int = 92; -pub const _SC_XOPEN_ENH_I18N: c_int = 93; -pub const _SC_XOPEN_SHM: c_int = 94; -pub const _SC_2_CHAR_TERM: c_int = 95; -pub const _SC_2_UPE: c_int = 97; -pub const _SC_XOPEN_XPG2: c_int = 98; -pub const _SC_XOPEN_XPG3: c_int = 99; -pub const _SC_XOPEN_XPG4: c_int = 100; -pub const _SC_NZERO: c_int = 109; -pub const _SC_XBS5_ILP32_OFF32: c_int = 125; -pub const _SC_XBS5_ILP32_OFFBIG: c_int = 126; -pub const _SC_XBS5_LP64_OFF64: c_int = 127; -pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 128; -pub const _SC_XOPEN_LEGACY: c_int = 129; -pub const _SC_XOPEN_REALTIME: c_int = 130; -pub const _SC_XOPEN_REALTIME_THREADS: c_int = 131; -pub const _SC_ADVISORY_INFO: c_int = 132; -pub const _SC_BARRIERS: c_int = 133; -pub const _SC_CLOCK_SELECTION: c_int = 137; -pub const _SC_CPUTIME: c_int = 138; -pub const _SC_THREAD_CPUTIME: c_int = 139; -pub const _SC_MONOTONIC_CLOCK: c_int = 149; -pub const _SC_READER_WRITER_LOCKS: c_int = 153; -pub const _SC_SPIN_LOCKS: c_int = 154; -pub const _SC_REGEXP: c_int = 155; -pub const _SC_SHELL: c_int = 157; -pub const _SC_SPAWN: c_int = 159; -pub const _SC_SPORADIC_SERVER: c_int = 160; -pub const _SC_THREAD_SPORADIC_SERVER: c_int = 161; -pub const _SC_TIMEOUTS: c_int = 164; -pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 165; -pub const _SC_2_PBS: c_int = 168; -pub const _SC_2_PBS_ACCOUNTING: c_int = 169; -pub const _SC_2_PBS_LOCATE: c_int = 170; -pub const _SC_2_PBS_MESSAGE: c_int = 171; -pub const _SC_2_PBS_TRACK: c_int = 172; -pub const _SC_SYMLOOP_MAX: c_int = 173; -pub const _SC_STREAMS: c_int = 174; -pub const _SC_2_PBS_CHECKPOINT: c_int = 175; -pub const _SC_V6_ILP32_OFF32: c_int = 176; -pub const _SC_V6_ILP32_OFFBIG: c_int = 177; -pub const _SC_V6_LP64_OFF64: c_int = 178; -pub const _SC_V6_LPBIG_OFFBIG: c_int = 179; -pub const _SC_HOST_NAME_MAX: c_int = 180; -pub const _SC_TRACE: c_int = 181; -pub const _SC_TRACE_EVENT_FILTER: c_int = 182; -pub const _SC_TRACE_INHERIT: c_int = 183; -pub const _SC_TRACE_LOG: c_int = 184; -pub const _SC_IPV6: c_int = 235; -pub const _SC_RAW_SOCKETS: c_int = 236; -pub const _SC_V7_ILP32_OFF32: c_int = 237; -pub const _SC_V7_ILP32_OFFBIG: c_int = 238; -pub const _SC_V7_LP64_OFF64: c_int = 239; -pub const _SC_V7_LPBIG_OFFBIG: c_int = 240; -pub const _SC_SS_REPL_MAX: c_int = 241; -pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 242; -pub const _SC_TRACE_NAME_MAX: c_int = 243; -pub const _SC_TRACE_SYS_MAX: c_int = 244; -pub const _SC_TRACE_USER_EVENT_MAX: c_int = 245; -pub const _SC_XOPEN_STREAMS: c_int = 246; -pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 247; -pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 248; - -pub const RLIM_SAVED_MAX: crate::rlim_t = RLIM_INFINITY; -pub const RLIM_SAVED_CUR: crate::rlim_t = RLIM_INFINITY; - -pub const GLOB_ERR: c_int = 1 << 0; -pub const GLOB_MARK: c_int = 1 << 1; -pub const GLOB_NOSORT: c_int = 1 << 2; -pub const GLOB_DOOFFS: c_int = 1 << 3; -pub const GLOB_NOCHECK: c_int = 1 << 4; -pub const GLOB_APPEND: c_int = 1 << 5; -pub const GLOB_NOESCAPE: c_int = 1 << 6; - -pub const GLOB_NOSPACE: c_int = 1; -pub const GLOB_ABORTED: c_int = 2; -pub const GLOB_NOMATCH: c_int = 3; - -pub const POSIX_MADV_NORMAL: c_int = 0; -pub const POSIX_MADV_RANDOM: c_int = 1; -pub const POSIX_MADV_SEQUENTIAL: c_int = 2; -pub const POSIX_MADV_WILLNEED: c_int = 3; - -pub const S_IEXEC: mode_t = 0o0100; -pub const S_IWRITE: mode_t = 0o0200; -pub const S_IREAD: mode_t = 0o0400; - -pub const F_LOCK: c_int = 1; -pub const F_TEST: c_int = 3; -pub const F_TLOCK: c_int = 2; -pub const F_ULOCK: c_int = 0; - -pub const IFF_LOWER_UP: c_int = 0x10000; -pub const IFF_DORMANT: c_int = 0x20000; -pub const IFF_ECHO: c_int = 0x40000; - -pub const ST_RDONLY: c_ulong = 1; -pub const ST_NOSUID: c_ulong = 2; -pub const ST_NODEV: c_ulong = 4; -pub const ST_NOEXEC: c_ulong = 8; -pub const ST_SYNCHRONOUS: c_ulong = 16; -pub const ST_MANDLOCK: c_ulong = 64; -pub const ST_WRITE: c_ulong = 128; -pub const ST_APPEND: c_ulong = 256; -pub const ST_IMMUTABLE: c_ulong = 512; -pub const ST_NOATIME: c_ulong = 1024; -pub const ST_NODIRATIME: c_ulong = 2048; - -pub const RTLD_NEXT: *mut c_void = -1i64 as *mut c_void; -pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); -pub const RTLD_NODELETE: c_int = 0x1000; -pub const RTLD_NOW: c_int = 0x2; - -pub const TCP_MD5SIG: c_int = 14; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - size: [0; __SIZEOF_PTHREAD_MUTEX_T], -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - size: [0; __SIZEOF_PTHREAD_COND_T], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - size: [0; __SIZEOF_PTHREAD_RWLOCK_T], -}; -pub const PTHREAD_MUTEX_NORMAL: c_int = 0; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; -pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; -pub const PTHREAD_PROCESS_PRIVATE: c_int = 0; -pub const PTHREAD_PROCESS_SHARED: c_int = 1; -pub const __SIZEOF_PTHREAD_COND_T: usize = 48; - -pub const RENAME_NOREPLACE: c_int = 1; -pub const RENAME_EXCHANGE: c_int = 2; -pub const RENAME_WHITEOUT: c_int = 4; - -pub const SCHED_OTHER: c_int = 0; -pub const SCHED_FIFO: c_int = 1; -pub const SCHED_RR: c_int = 2; -pub const SCHED_BATCH: c_int = 3; -pub const SCHED_IDLE: c_int = 5; - -// netinet/in.h -// NOTE: These are in addition to the constants defined in src/unix/mod.rs - -// IPPROTO_IP defined in src/unix/mod.rs -/// Hop-by-hop option header -pub const IPPROTO_HOPOPTS: c_int = 0; -// IPPROTO_ICMP defined in src/unix/mod.rs -/// group mgmt protocol -pub const IPPROTO_IGMP: c_int = 2; -/// for compatibility -pub const IPPROTO_IPIP: c_int = 4; -// IPPROTO_TCP defined in src/unix/mod.rs -/// exterior gateway protocol -pub const IPPROTO_EGP: c_int = 8; -/// pup -pub const IPPROTO_PUP: c_int = 12; -// IPPROTO_UDP defined in src/unix/mod.rs -/// xns idp -pub const IPPROTO_IDP: c_int = 22; -/// tp-4 w/ class negotiation -pub const IPPROTO_TP: c_int = 29; -/// DCCP -pub const IPPROTO_DCCP: c_int = 33; -// IPPROTO_IPV6 defined in src/unix/mod.rs -/// IP6 routing header -pub const IPPROTO_ROUTING: c_int = 43; -/// IP6 fragmentation header -pub const IPPROTO_FRAGMENT: c_int = 44; -/// resource reservation -pub const IPPROTO_RSVP: c_int = 46; -/// General Routing Encap. -pub const IPPROTO_GRE: c_int = 47; -/// IP6 Encap Sec. Payload -pub const IPPROTO_ESP: c_int = 50; -/// IP6 Auth Header -pub const IPPROTO_AH: c_int = 51; -// IPPROTO_ICMPV6 defined in src/unix/mod.rs -/// IP6 no next header -pub const IPPROTO_NONE: c_int = 59; -/// IP6 destination option -pub const IPPROTO_DSTOPTS: c_int = 60; -pub const IPPROTO_MTP: c_int = 92; -pub const IPPROTO_BEETPH: c_int = 94; -/// encapsulation header -pub const IPPROTO_ENCAP: c_int = 98; -/// Protocol indep. multicast -pub const IPPROTO_PIM: c_int = 103; -/// IP Payload Comp. Protocol -pub const IPPROTO_COMP: c_int = 108; -/// SCTP -pub const IPPROTO_SCTP: c_int = 132; -pub const IPPROTO_MH: c_int = 135; -pub const IPPROTO_UDPLITE: c_int = 136; -pub const IPPROTO_MPLS: c_int = 137; -/// raw IP packet -pub const IPPROTO_RAW: c_int = 255; -pub const IPPROTO_MAX: c_int = 256; - -pub const AF_IB: c_int = 27; -pub const AF_MPLS: c_int = 28; -pub const AF_NFC: c_int = 39; -pub const AF_VSOCK: c_int = 40; -pub const PF_IB: c_int = AF_IB; -pub const PF_MPLS: c_int = AF_MPLS; -pub const PF_NFC: c_int = AF_NFC; -pub const PF_VSOCK: c_int = AF_VSOCK; - -// System V IPC -pub const IPC_PRIVATE: crate::key_t = 0; - -pub const IPC_CREAT: c_int = 0o1000; -pub const IPC_EXCL: c_int = 0o2000; -pub const IPC_NOWAIT: c_int = 0o4000; - -pub const IPC_RMID: c_int = 0; -pub const IPC_SET: c_int = 1; -pub const IPC_STAT: c_int = 2; -pub const IPC_INFO: c_int = 3; -pub const MSG_STAT: c_int = 11; -pub const MSG_INFO: c_int = 12; - -pub const MSG_NOERROR: c_int = 0o10000; -pub const MSG_EXCEPT: c_int = 0o20000; -pub const MSG_COPY: c_int = 0o40000; - -pub const SHM_R: c_int = 0o400; -pub const SHM_W: c_int = 0o200; - -pub const SHM_RDONLY: c_int = 0o10000; -pub const SHM_RND: c_int = 0o20000; -pub const SHM_REMAP: c_int = 0o40000; -pub const SHM_EXEC: c_int = 0o100000; - -pub const SHM_LOCK: c_int = 11; -pub const SHM_UNLOCK: c_int = 12; - -pub const SHM_HUGETLB: c_int = 0o4000; -pub const SHM_NORESERVE: c_int = 0o10000; - -pub const EPOLLRDHUP: c_int = 0x2000; -pub const EPOLLEXCLUSIVE: c_int = 0x10000000; -pub const EPOLLONESHOT: c_int = 0x40000000; - -pub const QFMT_VFS_OLD: c_int = 1; -pub const QFMT_VFS_V0: c_int = 2; -pub const QFMT_VFS_V1: c_int = 4; - -pub const EFD_SEMAPHORE: c_int = 0x1; - -pub const LOG_NFACILITIES: c_int = 24; - -pub const SEM_FAILED: *mut crate::sem_t = ptr::null_mut(); - -pub const RB_AUTOBOOT: c_int = 0x01234567u32 as i32; -pub const RB_HALT_SYSTEM: c_int = 0xcdef0123u32 as i32; -pub const RB_ENABLE_CAD: c_int = 0x89abcdefu32 as i32; -pub const RB_DISABLE_CAD: c_int = 0x00000000u32 as i32; -pub const RB_POWER_OFF: c_int = 0x4321fedcu32 as i32; -pub const RB_SW_SUSPEND: c_int = 0xd000fce2u32 as i32; -pub const RB_KEXEC: c_int = 0x45584543u32 as i32; - -pub const AI_PASSIVE: c_int = 0x0001; -pub const AI_CANONNAME: c_int = 0x0002; -pub const AI_NUMERICHOST: c_int = 0x0004; -pub const AI_V4MAPPED: c_int = 0x0008; -pub const AI_ALL: c_int = 0x0010; -pub const AI_ADDRCONFIG: c_int = 0x0020; - -pub const AI_NUMERICSERV: c_int = 0x0400; - -pub const EAI_BADFLAGS: c_int = -1; -pub const EAI_NONAME: c_int = -2; -pub const EAI_AGAIN: c_int = -3; -pub const EAI_FAIL: c_int = -4; -pub const EAI_FAMILY: c_int = -6; -pub const EAI_SOCKTYPE: c_int = -7; -pub const EAI_SERVICE: c_int = -8; -pub const EAI_MEMORY: c_int = -10; -pub const EAI_OVERFLOW: c_int = -12; - -pub const NI_NUMERICHOST: c_int = 1; -pub const NI_NUMERICSERV: c_int = 2; -pub const NI_NOFQDN: c_int = 4; -pub const NI_NAMEREQD: c_int = 8; -pub const NI_DGRAM: c_int = 16; - -pub const SYNC_FILE_RANGE_WAIT_BEFORE: c_uint = 1; -pub const SYNC_FILE_RANGE_WRITE: c_uint = 2; -pub const SYNC_FILE_RANGE_WAIT_AFTER: c_uint = 4; - -pub const EAI_SYSTEM: c_int = -11; - -pub const AIO_CANCELED: c_int = 0; -pub const AIO_NOTCANCELED: c_int = 1; -pub const AIO_ALLDONE: c_int = 2; -pub const LIO_READ: c_int = 0; -pub const LIO_WRITE: c_int = 1; -pub const LIO_NOP: c_int = 2; -pub const LIO_WAIT: c_int = 0; -pub const LIO_NOWAIT: c_int = 1; - -pub const MREMAP_MAYMOVE: c_int = 1; -pub const MREMAP_FIXED: c_int = 2; - -pub const PR_SET_PDEATHSIG: c_int = 1; -pub const PR_GET_PDEATHSIG: c_int = 2; - -pub const PR_GET_DUMPABLE: c_int = 3; -pub const PR_SET_DUMPABLE: c_int = 4; - -pub const PR_GET_UNALIGN: c_int = 5; -pub const PR_SET_UNALIGN: c_int = 6; -pub const PR_UNALIGN_NOPRINT: c_int = 1; -pub const PR_UNALIGN_SIGBUS: c_int = 2; - -pub const PR_GET_KEEPCAPS: c_int = 7; -pub const PR_SET_KEEPCAPS: c_int = 8; - -pub const PR_GET_FPEMU: c_int = 9; -pub const PR_SET_FPEMU: c_int = 10; -pub const PR_FPEMU_NOPRINT: c_int = 1; -pub const PR_FPEMU_SIGFPE: c_int = 2; - -pub const PR_GET_FPEXC: c_int = 11; -pub const PR_SET_FPEXC: c_int = 12; -pub const PR_FP_EXC_SW_ENABLE: c_int = 0x80; -pub const PR_FP_EXC_DIV: c_int = 0x010000; -pub const PR_FP_EXC_OVF: c_int = 0x020000; -pub const PR_FP_EXC_UND: c_int = 0x040000; -pub const PR_FP_EXC_RES: c_int = 0x080000; -pub const PR_FP_EXC_INV: c_int = 0x100000; -pub const PR_FP_EXC_DISABLED: c_int = 0; -pub const PR_FP_EXC_NONRECOV: c_int = 1; -pub const PR_FP_EXC_ASYNC: c_int = 2; -pub const PR_FP_EXC_PRECISE: c_int = 3; - -pub const PR_GET_TIMING: c_int = 13; -pub const PR_SET_TIMING: c_int = 14; -pub const PR_TIMING_STATISTICAL: c_int = 0; -pub const PR_TIMING_TIMESTAMP: c_int = 1; - -pub const PR_SET_NAME: c_int = 15; -pub const PR_GET_NAME: c_int = 16; - -pub const PR_GET_ENDIAN: c_int = 19; -pub const PR_SET_ENDIAN: c_int = 20; -pub const PR_ENDIAN_BIG: c_int = 0; -pub const PR_ENDIAN_LITTLE: c_int = 1; -pub const PR_ENDIAN_PPC_LITTLE: c_int = 2; - -pub const PR_GET_SECCOMP: c_int = 21; -pub const PR_SET_SECCOMP: c_int = 22; - -pub const PR_CAPBSET_READ: c_int = 23; -pub const PR_CAPBSET_DROP: c_int = 24; - -pub const PR_GET_TSC: c_int = 25; -pub const PR_SET_TSC: c_int = 26; -pub const PR_TSC_ENABLE: c_int = 1; -pub const PR_TSC_SIGSEGV: c_int = 2; - -pub const PR_GET_SECUREBITS: c_int = 27; -pub const PR_SET_SECUREBITS: c_int = 28; - -pub const PR_SET_TIMERSLACK: c_int = 29; -pub const PR_GET_TIMERSLACK: c_int = 30; - -pub const PR_TASK_PERF_EVENTS_DISABLE: c_int = 31; -pub const PR_TASK_PERF_EVENTS_ENABLE: c_int = 32; - -pub const PR_MCE_KILL: c_int = 33; -pub const PR_MCE_KILL_CLEAR: c_int = 0; -pub const PR_MCE_KILL_SET: c_int = 1; - -pub const PR_MCE_KILL_LATE: c_int = 0; -pub const PR_MCE_KILL_EARLY: c_int = 1; -pub const PR_MCE_KILL_DEFAULT: c_int = 2; - -pub const PR_MCE_KILL_GET: c_int = 34; - -pub const PR_SET_MM: c_int = 35; -pub const PR_SET_MM_START_CODE: c_int = 1; -pub const PR_SET_MM_END_CODE: c_int = 2; -pub const PR_SET_MM_START_DATA: c_int = 3; -pub const PR_SET_MM_END_DATA: c_int = 4; -pub const PR_SET_MM_START_STACK: c_int = 5; -pub const PR_SET_MM_START_BRK: c_int = 6; -pub const PR_SET_MM_BRK: c_int = 7; -pub const PR_SET_MM_ARG_START: c_int = 8; -pub const PR_SET_MM_ARG_END: c_int = 9; -pub const PR_SET_MM_ENV_START: c_int = 10; -pub const PR_SET_MM_ENV_END: c_int = 11; -pub const PR_SET_MM_AUXV: c_int = 12; -pub const PR_SET_MM_EXE_FILE: c_int = 13; -pub const PR_SET_MM_MAP: c_int = 14; -pub const PR_SET_MM_MAP_SIZE: c_int = 15; - -pub const PR_SET_PTRACER: c_int = 0x59616d61; -pub const PR_SET_PTRACER_ANY: c_ulong = 0xffffffffffffffff; - -pub const PR_SET_CHILD_SUBREAPER: c_int = 36; -pub const PR_GET_CHILD_SUBREAPER: c_int = 37; - -pub const PR_SET_NO_NEW_PRIVS: c_int = 38; -pub const PR_GET_NO_NEW_PRIVS: c_int = 39; - -pub const PR_GET_TID_ADDRESS: c_int = 40; - -pub const PR_SET_THP_DISABLE: c_int = 41; -pub const PR_GET_THP_DISABLE: c_int = 42; - -pub const PR_MPX_ENABLE_MANAGEMENT: c_int = 43; -pub const PR_MPX_DISABLE_MANAGEMENT: c_int = 44; - -pub const PR_SET_FP_MODE: c_int = 45; -pub const PR_GET_FP_MODE: c_int = 46; -pub const PR_FP_MODE_FR: c_int = 1 << 0; -pub const PR_FP_MODE_FRE: c_int = 1 << 1; - -pub const PR_CAP_AMBIENT: c_int = 47; -pub const PR_CAP_AMBIENT_IS_SET: c_int = 1; -pub const PR_CAP_AMBIENT_RAISE: c_int = 2; -pub const PR_CAP_AMBIENT_LOWER: c_int = 3; -pub const PR_CAP_AMBIENT_CLEAR_ALL: c_int = 4; - -pub const ITIMER_REAL: c_int = 0; -pub const ITIMER_VIRTUAL: c_int = 1; -pub const ITIMER_PROF: c_int = 2; - -pub const TFD_CLOEXEC: c_int = O_CLOEXEC; -pub const TFD_NONBLOCK: c_int = O_NONBLOCK; -pub const TFD_TIMER_ABSTIME: c_int = 1; - -pub const XATTR_CREATE: c_int = 0x1; -pub const XATTR_REPLACE: c_int = 0x2; - -pub const _POSIX_VDISABLE: crate::cc_t = 0; - -pub const FALLOC_FL_KEEP_SIZE: c_int = 0x01; -pub const FALLOC_FL_PUNCH_HOLE: c_int = 0x02; -pub const FALLOC_FL_COLLAPSE_RANGE: c_int = 0x08; -pub const FALLOC_FL_ZERO_RANGE: c_int = 0x10; -pub const FALLOC_FL_INSERT_RANGE: c_int = 0x20; -pub const FALLOC_FL_UNSHARE_RANGE: c_int = 0x40; - -// On Linux, libc doesn't define this constant, libattr does instead. -// We still define it for Linux as it's defined by libc on other platforms, -// and it's mentioned in the man pages for getxattr and setxattr. -pub const ENOATTR: c_int = crate::ENODATA; - -pub const SO_ORIGINAL_DST: c_int = 80; -pub const IUTF8: crate::tcflag_t = 0x00004000; -pub const CMSPAR: crate::tcflag_t = 0o10000000000; - -pub const MFD_CLOEXEC: c_uint = 0x0001; -pub const MFD_ALLOW_SEALING: c_uint = 0x0002; - -// these are used in the p_type field of Elf32_Phdr and Elf64_Phdr, which has -// the type Elf32Word and Elf64Word respectively. Luckily, both of those are u32 -// so we can use that type here to avoid having to cast. -pub const PT_NULL: u32 = 0; -pub const PT_LOAD: u32 = 1; -pub const PT_DYNAMIC: u32 = 2; -pub const PT_INTERP: u32 = 3; -pub const PT_NOTE: u32 = 4; -pub const PT_SHLIB: u32 = 5; -pub const PT_PHDR: u32 = 6; -pub const PT_TLS: u32 = 7; -pub const PT_NUM: u32 = 8; -pub const PT_LOOS: u32 = 0x60000000; -pub const PT_GNU_EH_FRAME: u32 = 0x6474e550; -pub const PT_GNU_STACK: u32 = 0x6474e551; -pub const PT_GNU_RELRO: u32 = 0x6474e552; - -// Ethernet protocol IDs. -pub const ETH_P_LOOP: c_int = 0x0060; -pub const ETH_P_PUP: c_int = 0x0200; -pub const ETH_P_PUPAT: c_int = 0x0201; -pub const ETH_P_IP: c_int = 0x0800; -pub const ETH_P_X25: c_int = 0x0805; -pub const ETH_P_ARP: c_int = 0x0806; -pub const ETH_P_BPQ: c_int = 0x08FF; -pub const ETH_P_IEEEPUP: c_int = 0x0a00; -pub const ETH_P_IEEEPUPAT: c_int = 0x0a01; -pub const ETH_P_BATMAN: c_int = 0x4305; -pub const ETH_P_DEC: c_int = 0x6000; -pub const ETH_P_DNA_DL: c_int = 0x6001; -pub const ETH_P_DNA_RC: c_int = 0x6002; -pub const ETH_P_DNA_RT: c_int = 0x6003; -pub const ETH_P_LAT: c_int = 0x6004; -pub const ETH_P_DIAG: c_int = 0x6005; -pub const ETH_P_CUST: c_int = 0x6006; -pub const ETH_P_SCA: c_int = 0x6007; -pub const ETH_P_TEB: c_int = 0x6558; -pub const ETH_P_RARP: c_int = 0x8035; -pub const ETH_P_ATALK: c_int = 0x809B; -pub const ETH_P_AARP: c_int = 0x80F3; -pub const ETH_P_8021Q: c_int = 0x8100; -pub const ETH_P_IPX: c_int = 0x8137; -pub const ETH_P_IPV6: c_int = 0x86DD; -pub const ETH_P_PAUSE: c_int = 0x8808; -pub const ETH_P_SLOW: c_int = 0x8809; -pub const ETH_P_WCCP: c_int = 0x883E; -pub const ETH_P_MPLS_UC: c_int = 0x8847; -pub const ETH_P_MPLS_MC: c_int = 0x8848; -pub const ETH_P_ATMMPOA: c_int = 0x884c; -pub const ETH_P_PPP_DISC: c_int = 0x8863; -pub const ETH_P_PPP_SES: c_int = 0x8864; -pub const ETH_P_LINK_CTL: c_int = 0x886c; -pub const ETH_P_ATMFATE: c_int = 0x8884; -pub const ETH_P_PAE: c_int = 0x888E; -pub const ETH_P_AOE: c_int = 0x88A2; -pub const ETH_P_8021AD: c_int = 0x88A8; -pub const ETH_P_802_EX1: c_int = 0x88B5; -pub const ETH_P_TIPC: c_int = 0x88CA; -pub const ETH_P_8021AH: c_int = 0x88E7; -pub const ETH_P_MVRP: c_int = 0x88F5; -pub const ETH_P_1588: c_int = 0x88F7; -pub const ETH_P_PRP: c_int = 0x88FB; -pub const ETH_P_FCOE: c_int = 0x8906; -pub const ETH_P_TDLS: c_int = 0x890D; -pub const ETH_P_FIP: c_int = 0x8914; -pub const ETH_P_80221: c_int = 0x8917; -pub const ETH_P_LOOPBACK: c_int = 0x9000; -pub const ETH_P_QINQ1: c_int = 0x9100; -pub const ETH_P_QINQ2: c_int = 0x9200; -pub const ETH_P_QINQ3: c_int = 0x9300; -pub const ETH_P_EDSA: c_int = 0xDADA; -pub const ETH_P_AF_IUCV: c_int = 0xFBFB; - -pub const ETH_P_802_3_MIN: c_int = 0x0600; - -pub const ETH_P_802_3: c_int = 0x0001; -pub const ETH_P_AX25: c_int = 0x0002; -pub const ETH_P_ALL: c_int = 0x0003; -pub const ETH_P_802_2: c_int = 0x0004; -pub const ETH_P_SNAP: c_int = 0x0005; -pub const ETH_P_DDCMP: c_int = 0x0006; -pub const ETH_P_WAN_PPP: c_int = 0x0007; -pub const ETH_P_PPP_MP: c_int = 0x0008; -pub const ETH_P_LOCALTALK: c_int = 0x0009; -pub const ETH_P_CAN: c_int = 0x000C; -pub const ETH_P_CANFD: c_int = 0x000D; -pub const ETH_P_PPPTALK: c_int = 0x0010; -pub const ETH_P_TR_802_2: c_int = 0x0011; -pub const ETH_P_MOBITEX: c_int = 0x0015; -pub const ETH_P_CONTROL: c_int = 0x0016; -pub const ETH_P_IRDA: c_int = 0x0017; -pub const ETH_P_ECONET: c_int = 0x0018; -pub const ETH_P_HDLC: c_int = 0x0019; -pub const ETH_P_ARCNET: c_int = 0x001A; -pub const ETH_P_DSA: c_int = 0x001B; -pub const ETH_P_TRAILER: c_int = 0x001C; -pub const ETH_P_PHONET: c_int = 0x00F5; -pub const ETH_P_IEEE802154: c_int = 0x00F6; -pub const ETH_P_CAIF: c_int = 0x00F7; - -pub const SFD_CLOEXEC: c_int = 0x080000; - -pub const NCCS: usize = 32; - -pub const O_TRUNC: c_int = 0x00040000; -pub const O_NOATIME: c_int = 0x00002000; -pub const O_CLOEXEC: c_int = 0x00000100; -pub const O_TMPFILE: c_int = 0x00004000; - -pub const EBFONT: c_int = 59; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENONET: c_int = 64; -pub const ENOPKG: c_int = 65; -pub const EREMOTE: c_int = 66; -pub const ENOLINK: c_int = 67; -pub const EADV: c_int = 68; -pub const ESRMNT: c_int = 69; -pub const ECOMM: c_int = 70; -pub const EPROTO: c_int = 71; -pub const EDOTDOT: c_int = 73; - -pub const SA_NODEFER: c_int = 0x40000000; -pub const SA_RESETHAND: c_int = 0x80000000; -pub const SA_RESTART: c_int = 0x10000000; -pub const SA_NOCLDSTOP: c_int = 0x00000001; - -pub const EPOLL_CLOEXEC: c_int = 0x80000; - -pub const EFD_CLOEXEC: c_int = 0x80000; - -pub const BUFSIZ: c_uint = 1024; -pub const TMP_MAX: c_uint = 10000; -pub const FOPEN_MAX: c_uint = 1000; -pub const O_PATH: c_int = 0x00400000; -pub const O_EXEC: c_int = O_PATH; -pub const O_SEARCH: c_int = O_PATH; -pub const O_ACCMODE: c_int = 03 | O_SEARCH; -pub const O_NDELAY: c_int = O_NONBLOCK; -pub const NI_MAXHOST: crate::socklen_t = 255; -pub const PTHREAD_STACK_MIN: size_t = 2048; -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; - -pub const POSIX_MADV_DONTNEED: c_int = 4; - -pub const RLIM_INFINITY: crate::rlim_t = !0; -pub const RLIMIT_RTTIME: c_int = 15; -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIMIT_NLIMITS: c_int = 16; -#[allow(deprecated)] -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIM_NLIMITS: c_int = RLIMIT_NLIMITS; - -pub const MAP_ANONYMOUS: c_int = MAP_ANON; - -pub const SOCK_DCCP: c_int = 6; -pub const SOCK_PACKET: c_int = 10; - -pub const TCP_COOKIE_TRANSACTIONS: c_int = 15; -pub const TCP_THIN_LINEAR_TIMEOUTS: c_int = 16; -pub const TCP_THIN_DUPACK: c_int = 17; -pub const TCP_USER_TIMEOUT: c_int = 18; -pub const TCP_REPAIR: c_int = 19; -pub const TCP_REPAIR_QUEUE: c_int = 20; -pub const TCP_QUEUE_SEQ: c_int = 21; -pub const TCP_REPAIR_OPTIONS: c_int = 22; -pub const TCP_FASTOPEN: c_int = 23; -pub const TCP_TIMESTAMP: c_int = 24; - -pub const SIGUNUSED: c_int = crate::SIGSYS; - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; - -pub const CPU_SETSIZE: c_int = 128; - -pub const PTRACE_TRACEME: c_int = 0; -pub const PTRACE_PEEKTEXT: c_int = 1; -pub const PTRACE_PEEKDATA: c_int = 2; -pub const PTRACE_PEEKUSER: c_int = 3; -pub const PTRACE_POKETEXT: c_int = 4; -pub const PTRACE_POKEDATA: c_int = 5; -pub const PTRACE_POKEUSER: c_int = 6; -pub const PTRACE_CONT: c_int = 7; -pub const PTRACE_KILL: c_int = 8; -pub const PTRACE_SINGLESTEP: c_int = 9; -pub const PTRACE_GETREGS: c_int = 12; -pub const PTRACE_SETREGS: c_int = 13; -pub const PTRACE_GETFPREGS: c_int = 14; -pub const PTRACE_SETFPREGS: c_int = 15; -pub const PTRACE_ATTACH: c_int = 16; -pub const PTRACE_DETACH: c_int = 17; -pub const PTRACE_GETFPXREGS: c_int = 18; -pub const PTRACE_SETFPXREGS: c_int = 19; -pub const PTRACE_SYSCALL: c_int = 24; -pub const PTRACE_SETOPTIONS: c_int = 0x4200; -pub const PTRACE_GETEVENTMSG: c_int = 0x4201; -pub const PTRACE_GETSIGINFO: c_int = 0x4202; -pub const PTRACE_SETSIGINFO: c_int = 0x4203; -pub const PTRACE_GETREGSET: c_int = 0x4204; -pub const PTRACE_SETREGSET: c_int = 0x4205; -pub const PTRACE_SEIZE: c_int = 0x4206; -pub const PTRACE_INTERRUPT: c_int = 0x4207; -pub const PTRACE_LISTEN: c_int = 0x4208; -pub const PTRACE_PEEKSIGINFO: c_int = 0x4209; - -pub const EPOLLWAKEUP: c_int = 0x20000000; - -pub const EFD_NONBLOCK: c_int = crate::O_NONBLOCK; - -pub const SFD_NONBLOCK: c_int = crate::O_NONBLOCK; - -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -pub const TIOCINQ: c_int = crate::FIONREAD; - -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; - -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: c_int = 0x00000800; -pub const TAB2: c_int = 0x00001000; -pub const TAB3: c_int = 0x00001800; -pub const CR1: c_int = 0x00000200; -pub const CR2: c_int = 0x00000400; -pub const CR3: c_int = 0x00000600; -pub const FF1: c_int = 0x00008000; -pub const BS1: c_int = 0x00002000; -pub const VT1: c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const SO_BINDTODEVICE: c_int = 25; -pub const SO_TIMESTAMP: c_int = 29; -pub const SO_MARK: c_int = 36; -pub const SO_RXQ_OVFL: c_int = 40; -pub const SO_PEEK_OFF: c_int = 42; -pub const SO_BUSY_POLL: c_int = 46; -pub const SO_BINDTOIFINDEX: c_int = 62; - -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; - -pub const O_ASYNC: c_int = 0x00000400; - -pub const FIOCLEX: c_int = 0x5451; -pub const FIONBIO: c_int = 0x5421; - -pub const RLIMIT_RSS: c_int = 5; -pub const RLIMIT_NOFILE: c_int = 7; -pub const RLIMIT_AS: c_int = 9; -pub const RLIMIT_NPROC: c_int = 6; -pub const RLIMIT_MEMLOCK: c_int = 8; - -pub const O_APPEND: c_int = 0x00100000; -pub const O_CREAT: c_int = 0x00010000; -pub const O_EXCL: c_int = 0x00020000; -pub const O_NOCTTY: c_int = 0x00000200; -pub const O_NONBLOCK: c_int = 0x00000010; -pub const O_SYNC: c_int = 0x00000040 | O_DSYNC; -pub const O_RSYNC: c_int = O_SYNC; -pub const O_DSYNC: c_int = 0x00000020; - -pub const SOCK_CLOEXEC: c_int = 0o2000000; -pub const SOCK_NONBLOCK: c_int = 0o4000; - -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; -pub const SOCK_SEQPACKET: c_int = 5; - -pub const SOL_SOCKET: c_int = 1; - -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EDEADLOCK: c_int = EDEADLK; -pub const EMULTIHOP: c_int = 72; -pub const EBADMSG: c_int = 74; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const ERFKILL: c_int = 132; -pub const EHWPOISON: c_int = 133; - -pub const SO_REUSEADDR: c_int = 2; -pub const SO_TYPE: c_int = 3; -pub const SO_ERROR: c_int = 4; -pub const SO_DONTROUTE: c_int = 5; -pub const SO_BROADCAST: c_int = 6; -pub const SO_SNDBUF: c_int = 7; -pub const SO_RCVBUF: c_int = 8; -pub const SO_KEEPALIVE: c_int = 9; -pub const SO_OOBINLINE: c_int = 10; -pub const SO_NO_CHECK: c_int = 11; -pub const SO_PRIORITY: c_int = 12; -pub const SO_LINGER: c_int = 13; -pub const SO_BSDCOMPAT: c_int = 14; -pub const SO_REUSEPORT: c_int = 15; -pub const SO_PASSCRED: c_int = 16; -pub const SO_PEERCRED: c_int = 17; -pub const SO_RCVLOWAT: c_int = 18; -pub const SO_SNDLOWAT: c_int = 19; -pub const SO_RCVTIMEO: c_int = 20; -pub const SO_SNDTIMEO: c_int = 21; -pub const SO_ACCEPTCONN: c_int = 30; -pub const SO_SNDBUFFORCE: c_int = 32; -pub const SO_RCVBUFFORCE: c_int = 33; -pub const SO_PROTOCOL: c_int = 38; -pub const SO_DOMAIN: c_int = 39; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const EXTPROC: crate::tcflag_t = 0x00010000; - -pub const MAP_HUGETLB: c_int = 0x040000; - -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_SETOWN: c_int = 8; - -pub const VEOF: usize = 4; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; - -pub const TCGETS: c_int = 0x5401; -pub const TCSETS: c_int = 0x5402; -pub const TCSETSW: c_int = 0x5403; -pub const TCSETSF: c_int = 0x5404; -pub const TCGETA: c_int = 0x5405; -pub const TCSETA: c_int = 0x5406; -pub const TCSETAW: c_int = 0x5407; -pub const TCSETAF: c_int = 0x5408; -pub const TCSBRK: c_int = 0x5409; -pub const TCXONC: c_int = 0x540A; -pub const TCFLSH: c_int = 0x540B; -pub const TIOCGSOFTCAR: c_int = 0x5419; -pub const TIOCSSOFTCAR: c_int = 0x541A; -pub const TIOCLINUX: c_int = 0x541C; -pub const TIOCGSERIAL: c_int = 0x541E; -pub const TIOCEXCL: c_int = 0x540C; -pub const TIOCNXCL: c_int = 0x540D; -pub const TIOCSCTTY: c_int = 0x540E; -pub const TIOCGPGRP: c_int = 0x540F; -pub const TIOCSPGRP: c_int = 0x5410; -pub const TIOCOUTQ: c_int = 0x5411; -pub const TIOCSTI: c_int = 0x5412; -pub const TIOCGWINSZ: c_int = 0x5413; -pub const TIOCSWINSZ: c_int = 0x5414; -pub const TIOCMGET: c_int = 0x5415; -pub const TIOCMBIS: c_int = 0x5416; -pub const TIOCMBIC: c_int = 0x5417; -pub const TIOCMSET: c_int = 0x5418; -pub const FIONREAD: c_int = 0x541B; -pub const TIOCCONS: c_int = 0x541D; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const TIOCM_LE: c_int = 0x001; -pub const TIOCM_DTR: c_int = 0x002; -pub const TIOCM_RTS: c_int = 0x004; -pub const TIOCM_ST: c_int = 0x008; -pub const TIOCM_SR: c_int = 0x010; -pub const TIOCM_CTS: c_int = 0x020; -pub const TIOCM_CAR: c_int = 0x040; -pub const TIOCM_RNG: c_int = 0x080; -pub const TIOCM_DSR: c_int = 0x100; -pub const TIOCM_CD: c_int = TIOCM_CAR; -pub const TIOCM_RI: c_int = TIOCM_RNG; - -pub const O_DIRECTORY: c_int = 0x00080000; -pub const O_DIRECT: c_int = 0x00000800; -pub const O_LARGEFILE: c_int = 0x00001000; -pub const O_NOFOLLOW: c_int = 0x00000080; - -pub const HUGETLB_FLAG_ENCODE_SHIFT: u32 = 26; -pub const MAP_HUGE_SHIFT: u32 = 26; - -// intentionally not public, only used for fd_set -cfg_if! { - if #[cfg(target_pointer_width = "32")] { - const ULONG_SIZE: usize = 32; - } else if #[cfg(target_pointer_width = "64")] { - const ULONG_SIZE: usize = 64; - } else { - // Unknown target_pointer_width - } -} - -// END_PUB_CONST - -f! { - pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] &= !(1 << (fd % size)); - return; - } - - pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0; - } - - pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] |= 1 << (fd % size); - return; - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - for slot in (*set).fds_bits.iter_mut() { - *slot = 0; - } - } - - pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { - for slot in cpuset.bits.iter_mut() { - *slot = 0; - } - } - - pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - cpuset.bits[idx] |= 1 << offset; - () - } - - pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - cpuset.bits[idx] &= !(1 << offset); - () - } - - pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { - let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - 0 != (cpuset.bits[idx] & (1 << offset)) - } - - pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { - set1.bits == set2.bits - } - - pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { - cmsg.offset(1) as *mut c_uchar - } - - pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - if ((*cmsg).cmsg_len as size_t) < size_of::() { - core::ptr::null_mut::() - } else if __CMSG_NEXT(cmsg).add(size_of::()) >= __MHDR_END(mhdr) { - core::ptr::null_mut::() - } else { - __CMSG_NEXT(cmsg).cast() - } - } - - pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr { - if (*mhdr).msg_controllen as size_t >= size_of::() { - (*mhdr).msg_control.cast() - } else { - core::ptr::null_mut::() - } - } - - pub const fn CMSG_ALIGN(len: size_t) -> size_t { - (len + size_of::() - 1) & !(size_of::() - 1) - } - - pub const fn CMSG_SPACE(len: c_uint) -> c_uint { - (CMSG_ALIGN(len as size_t) + CMSG_ALIGN(size_of::())) as c_uint - } - - pub const fn CMSG_LEN(len: c_uint) -> c_uint { - (CMSG_ALIGN(size_of::()) + len as size_t) as c_uint - } -} - -safe_f! { - pub const fn WIFSTOPPED(status: c_int) -> bool { - (status & 0xff) == 0x7f - } - - pub const fn WSTOPSIG(status: c_int) -> c_int { - (status >> 8) & 0xff - } - - pub const fn WIFCONTINUED(status: c_int) -> bool { - status == 0xffff - } - - pub const fn WIFSIGNALED(status: c_int) -> bool { - ((status & 0x7f) + 1) as i8 >= 2 - } - - pub const fn WTERMSIG(status: c_int) -> c_int { - status & 0x7f - } - - pub const fn WIFEXITED(status: c_int) -> bool { - (status & 0x7f) == 0 - } - - pub const fn WEXITSTATUS(status: c_int) -> c_int { - (status >> 8) & 0xff - } - - pub const fn WCOREDUMP(status: c_int) -> bool { - (status & 0x80) != 0 - } - - pub const fn QCMD(cmd: c_int, type_: c_int) -> c_int { - (cmd << 8) | (type_ & 0x00ff) - } - - pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { - let major = major as crate::dev_t; - let minor = minor as crate::dev_t; - let mut dev = 0; - dev |= (major & 0x00000fff) << 8; - dev |= (major & 0xfffff000) << 32; - dev |= (minor & 0x000000ff) << 0; - dev |= (minor & 0xffffff00) << 12; - dev - } - - pub const fn major(dev: crate::dev_t) -> c_uint { - let mut major = 0; - major |= (dev & 0x00000000000fff00) >> 8; - major |= (dev & 0xfffff00000000000) >> 32; - major as c_uint - } - - pub const fn minor(dev: crate::dev_t) -> c_uint { - let mut minor = 0; - minor |= (dev & 0x00000000000000ff) >> 0; - minor |= (dev & 0x00000ffffff00000) >> 12; - minor as c_uint - } -} - -fn __CMSG_LEN(cmsg: *const cmsghdr) -> ssize_t { - ((unsafe { (*cmsg).cmsg_len as size_t } + size_of::() - 1) & !(size_of::() - 1)) - as ssize_t -} - -fn __CMSG_NEXT(cmsg: *const cmsghdr) -> *mut c_uchar { - (unsafe { cmsg.offset(__CMSG_LEN(cmsg)) }) as *mut c_uchar -} - -fn __MHDR_END(mhdr: *const msghdr) -> *mut c_uchar { - unsafe { (*mhdr).msg_control.offset((*mhdr).msg_controllen as isize) }.cast() -} - -// EXTERN_FN - -#[link(name = "c")] -#[link(name = "fdio")] -extern "C" {} - -#[derive(Debug)] -pub enum FILE {} -impl Copy for FILE {} -impl Clone for FILE { - fn clone(&self) -> FILE { - *self - } -} -#[derive(Debug)] -pub enum fpos_t {} // FIXME(fuchsia): fill this out with a struct -impl Copy for fpos_t {} -impl Clone for fpos_t { - fn clone(&self) -> fpos_t { - *self - } -} - -extern "C" { - pub fn isalnum(c: c_int) -> c_int; - pub fn isalpha(c: c_int) -> c_int; - pub fn iscntrl(c: c_int) -> c_int; - pub fn isdigit(c: c_int) -> c_int; - pub fn isgraph(c: c_int) -> c_int; - pub fn islower(c: c_int) -> c_int; - pub fn isprint(c: c_int) -> c_int; - pub fn ispunct(c: c_int) -> c_int; - pub fn isspace(c: c_int) -> c_int; - pub fn isupper(c: c_int) -> c_int; - pub fn isxdigit(c: c_int) -> c_int; - pub fn isblank(c: c_int) -> c_int; - pub fn tolower(c: c_int) -> c_int; - pub fn toupper(c: c_int) -> c_int; - pub fn fopen(filename: *const c_char, mode: *const c_char) -> *mut FILE; - pub fn freopen(filename: *const c_char, mode: *const c_char, file: *mut FILE) -> *mut FILE; - pub fn fflush(file: *mut FILE) -> c_int; - pub fn fclose(file: *mut FILE) -> c_int; - pub fn remove(filename: *const c_char) -> c_int; - pub fn rename(oldname: *const c_char, newname: *const c_char) -> c_int; - pub fn tmpfile() -> *mut FILE; - pub fn setvbuf(stream: *mut FILE, buffer: *mut c_char, mode: c_int, size: size_t) -> c_int; - pub fn setbuf(stream: *mut FILE, buf: *mut c_char); - pub fn getchar() -> c_int; - pub fn putchar(c: c_int) -> c_int; - pub fn fgetc(stream: *mut FILE) -> c_int; - pub fn fgets(buf: *mut c_char, n: c_int, stream: *mut FILE) -> *mut c_char; - pub fn fputc(c: c_int, stream: *mut FILE) -> c_int; - pub fn fputs(s: *const c_char, stream: *mut FILE) -> c_int; - pub fn puts(s: *const c_char) -> c_int; - pub fn ungetc(c: c_int, stream: *mut FILE) -> c_int; - pub fn fread(ptr: *mut c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; - pub fn fwrite(ptr: *const c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; - pub fn fseek(stream: *mut FILE, offset: c_long, whence: c_int) -> c_int; - pub fn ftell(stream: *mut FILE) -> c_long; - pub fn rewind(stream: *mut FILE); - pub fn fgetpos(stream: *mut FILE, ptr: *mut fpos_t) -> c_int; - pub fn fsetpos(stream: *mut FILE, ptr: *const fpos_t) -> c_int; - pub fn feof(stream: *mut FILE) -> c_int; - pub fn ferror(stream: *mut FILE) -> c_int; - pub fn perror(s: *const c_char); - pub fn atof(s: *const c_char) -> c_double; - pub fn atoi(s: *const c_char) -> c_int; - pub fn atol(s: *const c_char) -> c_long; - pub fn atoll(s: *const c_char) -> c_longlong; - pub fn strtod(s: *const c_char, endp: *mut *mut c_char) -> c_double; - pub fn strtof(s: *const c_char, endp: *mut *mut c_char) -> c_float; - pub fn strtol(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_long; - pub fn strtoll(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_longlong; - pub fn strtoul(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulong; - pub fn strtoull(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulonglong; - pub fn calloc(nobj: size_t, size: size_t) -> *mut c_void; - pub fn malloc(size: size_t) -> *mut c_void; - pub fn realloc(p: *mut c_void, size: size_t) -> *mut c_void; - pub fn free(p: *mut c_void); - pub fn abort() -> !; - pub fn exit(status: c_int) -> !; - pub fn _exit(status: c_int) -> !; - pub fn atexit(cb: extern "C" fn()) -> c_int; - pub fn system(s: *const c_char) -> c_int; - pub fn getenv(s: *const c_char) -> *mut c_char; - - pub fn strcpy(dst: *mut c_char, src: *const c_char) -> *mut c_char; - pub fn strncpy(dst: *mut c_char, src: *const c_char, n: size_t) -> *mut c_char; - pub fn strcat(s: *mut c_char, ct: *const c_char) -> *mut c_char; - pub fn strncat(s: *mut c_char, ct: *const c_char, n: size_t) -> *mut c_char; - pub fn strcmp(cs: *const c_char, ct: *const c_char) -> c_int; - pub fn strncmp(cs: *const c_char, ct: *const c_char, n: size_t) -> c_int; - pub fn strcoll(cs: *const c_char, ct: *const c_char) -> c_int; - pub fn strchr(cs: *const c_char, c: c_int) -> *mut c_char; - pub fn strrchr(cs: *const c_char, c: c_int) -> *mut c_char; - pub fn strspn(cs: *const c_char, ct: *const c_char) -> size_t; - pub fn strcspn(cs: *const c_char, ct: *const c_char) -> size_t; - pub fn strdup(cs: *const c_char) -> *mut c_char; - pub fn strpbrk(cs: *const c_char, ct: *const c_char) -> *mut c_char; - pub fn strstr(cs: *const c_char, ct: *const c_char) -> *mut c_char; - pub fn strlen(cs: *const c_char) -> size_t; - pub fn strnlen(cs: *const c_char, maxlen: size_t) -> size_t; - pub fn strerror(n: c_int) -> *mut c_char; - pub fn strtok(s: *mut c_char, t: *const c_char) -> *mut c_char; - pub fn strxfrm(s: *mut c_char, ct: *const c_char, n: size_t) -> size_t; - pub fn wcslen(buf: *const wchar_t) -> size_t; - pub fn wcstombs(dest: *mut c_char, src: *const wchar_t, n: size_t) -> size_t; - - pub fn memchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; - pub fn wmemchr(cx: *const wchar_t, c: wchar_t, n: size_t) -> *mut wchar_t; - pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; - pub fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; - pub fn memmove(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; - pub fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void; - - pub fn abs(i: c_int) -> c_int; - pub fn labs(i: c_long) -> c_long; - pub fn rand() -> c_int; - pub fn srand(seed: c_uint); - - pub fn getpwnam(name: *const c_char) -> *mut passwd; - pub fn getpwuid(uid: crate::uid_t) -> *mut passwd; - - pub fn fprintf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; - pub fn printf(format: *const c_char, ...) -> c_int; - pub fn snprintf(s: *mut c_char, n: size_t, format: *const c_char, ...) -> c_int; - pub fn sprintf(s: *mut c_char, format: *const c_char, ...) -> c_int; - pub fn fscanf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; - pub fn scanf(format: *const c_char, ...) -> c_int; - pub fn sscanf(s: *const c_char, format: *const c_char, ...) -> c_int; - pub fn getchar_unlocked() -> c_int; - pub fn putchar_unlocked(c: c_int) -> c_int; - - pub fn socket(domain: c_int, ty: c_int, protocol: c_int) -> c_int; - pub fn connect(socket: c_int, address: *const sockaddr, len: socklen_t) -> c_int; - pub fn listen(socket: c_int, backlog: c_int) -> c_int; - pub fn accept(socket: c_int, address: *mut sockaddr, address_len: *mut socklen_t) -> c_int; - pub fn getpeername(socket: c_int, address: *mut sockaddr, address_len: *mut socklen_t) - -> c_int; - pub fn getsockname(socket: c_int, address: *mut sockaddr, address_len: *mut socklen_t) - -> c_int; - pub fn setsockopt( - socket: c_int, - level: c_int, - name: c_int, - value: *const c_void, - option_len: socklen_t, - ) -> c_int; - pub fn socketpair( - domain: c_int, - type_: c_int, - protocol: c_int, - socket_vector: *mut c_int, - ) -> c_int; - pub fn sendto( - socket: c_int, - buf: *const c_void, - len: size_t, - flags: c_int, - addr: *const sockaddr, - addrlen: socklen_t, - ) -> ssize_t; - pub fn shutdown(socket: c_int, how: c_int) -> c_int; - - pub fn chmod(path: *const c_char, mode: mode_t) -> c_int; - pub fn fchmod(fd: c_int, mode: mode_t) -> c_int; - - pub fn fstat(fildes: c_int, buf: *mut stat) -> c_int; - - pub fn mkdir(path: *const c_char, mode: mode_t) -> c_int; - - pub fn stat(path: *const c_char, buf: *mut stat) -> c_int; - - pub fn pclose(stream: *mut crate::FILE) -> c_int; - pub fn fdopen(fd: c_int, mode: *const c_char) -> *mut crate::FILE; - pub fn fileno(stream: *mut crate::FILE) -> c_int; - - pub fn open(path: *const c_char, oflag: c_int, ...) -> c_int; - pub fn creat(path: *const c_char, mode: mode_t) -> c_int; - pub fn fcntl(fd: c_int, cmd: c_int, ...) -> c_int; - - pub fn opendir(dirname: *const c_char) -> *mut crate::DIR; - pub fn readdir(dirp: *mut crate::DIR) -> *mut crate::dirent; - pub fn readdir_r( - dirp: *mut crate::DIR, - entry: *mut crate::dirent, - result: *mut *mut crate::dirent, - ) -> c_int; - pub fn closedir(dirp: *mut crate::DIR) -> c_int; - pub fn rewinddir(dirp: *mut crate::DIR); - - pub fn openat(dirfd: c_int, pathname: *const c_char, flags: c_int, ...) -> c_int; - pub fn fchmodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, flags: c_int) -> c_int; - pub fn fchown(fd: c_int, owner: crate::uid_t, group: crate::gid_t) -> c_int; - pub fn fchownat( - dirfd: c_int, - pathname: *const c_char, - owner: crate::uid_t, - group: crate::gid_t, - flags: c_int, - ) -> c_int; - pub fn fstatat(dirfd: c_int, pathname: *const c_char, buf: *mut stat, flags: c_int) -> c_int; - pub fn linkat( - olddirfd: c_int, - oldpath: *const c_char, - newdirfd: c_int, - newpath: *const c_char, - flags: c_int, - ) -> c_int; - pub fn mkdirat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; - pub fn readlinkat( - dirfd: c_int, - pathname: *const c_char, - buf: *mut c_char, - bufsiz: size_t, - ) -> ssize_t; - pub fn renameat( - olddirfd: c_int, - oldpath: *const c_char, - newdirfd: c_int, - newpath: *const c_char, - ) -> c_int; - pub fn symlinkat(target: *const c_char, newdirfd: c_int, linkpath: *const c_char) -> c_int; - pub fn unlinkat(dirfd: c_int, pathname: *const c_char, flags: c_int) -> c_int; - - pub fn access(path: *const c_char, amode: c_int) -> c_int; - pub fn alarm(seconds: c_uint) -> c_uint; - pub fn chdir(dir: *const c_char) -> c_int; - pub fn chown(path: *const c_char, uid: uid_t, gid: gid_t) -> c_int; - pub fn lchown(path: *const c_char, uid: uid_t, gid: gid_t) -> c_int; - pub fn close(fd: c_int) -> c_int; - pub fn dup(fd: c_int) -> c_int; - pub fn dup2(src: c_int, dst: c_int) -> c_int; - - pub fn execl(path: *const c_char, arg0: *const c_char, ...) -> c_int; - pub fn execle(path: *const c_char, arg0: *const c_char, ...) -> c_int; - pub fn execlp(file: *const c_char, arg0: *const c_char, ...) -> c_int; - - // DIFF(main): changed to `*const *mut` in e77f551de9 - pub fn execv(prog: *const c_char, argv: *const *const c_char) -> c_int; - pub fn execve( - prog: *const c_char, - argv: *const *const c_char, - envp: *const *const c_char, - ) -> c_int; - pub fn execvp(c: *const c_char, argv: *const *const c_char) -> c_int; - - pub fn fork() -> pid_t; - pub fn fpathconf(filedes: c_int, name: c_int) -> c_long; - pub fn getcwd(buf: *mut c_char, size: size_t) -> *mut c_char; - pub fn getegid() -> gid_t; - pub fn geteuid() -> uid_t; - pub fn getgid() -> gid_t; - pub fn getgroups(ngroups_max: c_int, groups: *mut gid_t) -> c_int; - pub fn getlogin() -> *mut c_char; - pub fn getopt(argc: c_int, argv: *const *mut c_char, optstr: *const c_char) -> c_int; - pub fn getpgid(pid: pid_t) -> pid_t; - pub fn getpgrp() -> pid_t; - pub fn getpid() -> pid_t; - pub fn getppid() -> pid_t; - pub fn getuid() -> uid_t; - pub fn isatty(fd: c_int) -> c_int; - pub fn link(src: *const c_char, dst: *const c_char) -> c_int; - pub fn lseek(fd: c_int, offset: off_t, whence: c_int) -> off_t; - pub fn pathconf(path: *const c_char, name: c_int) -> c_long; - pub fn pause() -> c_int; - pub fn pipe(fds: *mut c_int) -> c_int; - pub fn posix_memalign(memptr: *mut *mut c_void, align: size_t, size: size_t) -> c_int; - pub fn read(fd: c_int, buf: *mut c_void, count: size_t) -> ssize_t; - pub fn rmdir(path: *const c_char) -> c_int; - pub fn seteuid(uid: uid_t) -> c_int; - pub fn setegid(gid: gid_t) -> c_int; - pub fn setgid(gid: gid_t) -> c_int; - pub fn setpgid(pid: pid_t, pgid: pid_t) -> c_int; - pub fn setsid() -> pid_t; - pub fn setuid(uid: uid_t) -> c_int; - pub fn sleep(secs: c_uint) -> c_uint; - pub fn nanosleep(rqtp: *const timespec, rmtp: *mut timespec) -> c_int; - pub fn tcgetpgrp(fd: c_int) -> pid_t; - pub fn tcsetpgrp(fd: c_int, pgrp: crate::pid_t) -> c_int; - pub fn ttyname(fd: c_int) -> *mut c_char; - pub fn unlink(c: *const c_char) -> c_int; - pub fn wait(status: *mut c_int) -> pid_t; - pub fn waitpid(pid: pid_t, status: *mut c_int, options: c_int) -> pid_t; - pub fn write(fd: c_int, buf: *const c_void, count: size_t) -> ssize_t; - pub fn pread(fd: c_int, buf: *mut c_void, count: size_t, offset: off_t) -> ssize_t; - pub fn pwrite(fd: c_int, buf: *const c_void, count: size_t, offset: off_t) -> ssize_t; - pub fn umask(mask: mode_t) -> mode_t; - - pub fn utime(file: *const c_char, buf: *const utimbuf) -> c_int; - - pub fn kill(pid: pid_t, sig: c_int) -> c_int; - - pub fn mlock(addr: *const c_void, len: size_t) -> c_int; - pub fn munlock(addr: *const c_void, len: size_t) -> c_int; - pub fn mlockall(flags: c_int) -> c_int; - pub fn munlockall() -> c_int; - - pub fn mmap( - addr: *mut c_void, - len: size_t, - prot: c_int, - flags: c_int, - fd: c_int, - offset: off_t, - ) -> *mut c_void; - pub fn munmap(addr: *mut c_void, len: size_t) -> c_int; - - pub fn if_nametoindex(ifname: *const c_char) -> c_uint; - pub fn if_indextoname(ifindex: c_uint, ifname: *mut c_char) -> *mut c_char; - - pub fn lstat(path: *const c_char, buf: *mut stat) -> c_int; - - pub fn fsync(fd: c_int) -> c_int; - - pub fn setenv(name: *const c_char, val: *const c_char, overwrite: c_int) -> c_int; - pub fn unsetenv(name: *const c_char) -> c_int; - - pub fn symlink(path1: *const c_char, path2: *const c_char) -> c_int; - - pub fn ftruncate(fd: c_int, length: off_t) -> c_int; - - pub fn signal(signum: c_int, handler: sighandler_t) -> sighandler_t; - - pub fn realpath(pathname: *const c_char, resolved: *mut c_char) -> *mut c_char; - - pub fn flock(fd: c_int, operation: c_int) -> c_int; - - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; - pub fn times(buf: *mut crate::tms) -> crate::clock_t; - - pub fn pthread_self() -> crate::pthread_t; - pub fn pthread_join(native: crate::pthread_t, value: *mut *mut c_void) -> c_int; - pub fn pthread_exit(value: *mut c_void) -> !; - pub fn pthread_attr_init(attr: *mut crate::pthread_attr_t) -> c_int; - pub fn pthread_attr_destroy(attr: *mut crate::pthread_attr_t) -> c_int; - pub fn pthread_attr_getstacksize( - attr: *const crate::pthread_attr_t, - stacksize: *mut size_t, - ) -> c_int; - pub fn pthread_attr_setstacksize(attr: *mut crate::pthread_attr_t, stack_size: size_t) - -> c_int; - pub fn pthread_attr_setdetachstate(attr: *mut crate::pthread_attr_t, state: c_int) -> c_int; - pub fn pthread_detach(thread: crate::pthread_t) -> c_int; - pub fn sched_yield() -> c_int; - pub fn pthread_key_create( - key: *mut pthread_key_t, - dtor: Option, - ) -> c_int; - pub fn pthread_key_delete(key: pthread_key_t) -> c_int; - pub fn pthread_getspecific(key: pthread_key_t) -> *mut c_void; - pub fn pthread_setspecific(key: pthread_key_t, value: *const c_void) -> c_int; - pub fn pthread_mutex_init( - lock: *mut pthread_mutex_t, - attr: *const pthread_mutexattr_t, - ) -> c_int; - pub fn pthread_mutex_destroy(lock: *mut pthread_mutex_t) -> c_int; - pub fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> c_int; - pub fn pthread_mutex_trylock(lock: *mut pthread_mutex_t) -> c_int; - pub fn pthread_mutex_unlock(lock: *mut pthread_mutex_t) -> c_int; - - pub fn pthread_mutexattr_init(attr: *mut pthread_mutexattr_t) -> c_int; - pub fn pthread_mutexattr_destroy(attr: *mut pthread_mutexattr_t) -> c_int; - pub fn pthread_mutexattr_settype(attr: *mut pthread_mutexattr_t, _type: c_int) -> c_int; - - pub fn pthread_cond_init(cond: *mut pthread_cond_t, attr: *const pthread_condattr_t) -> c_int; - pub fn pthread_cond_wait(cond: *mut pthread_cond_t, lock: *mut pthread_mutex_t) -> c_int; - pub fn pthread_cond_timedwait( - cond: *mut pthread_cond_t, - lock: *mut pthread_mutex_t, - abstime: *const crate::timespec, - ) -> c_int; - pub fn pthread_cond_signal(cond: *mut pthread_cond_t) -> c_int; - pub fn pthread_cond_broadcast(cond: *mut pthread_cond_t) -> c_int; - pub fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> c_int; - pub fn pthread_condattr_init(attr: *mut pthread_condattr_t) -> c_int; - pub fn pthread_condattr_destroy(attr: *mut pthread_condattr_t) -> c_int; - pub fn pthread_rwlock_init( - lock: *mut pthread_rwlock_t, - attr: *const pthread_rwlockattr_t, - ) -> c_int; - pub fn pthread_rwlock_destroy(lock: *mut pthread_rwlock_t) -> c_int; - pub fn pthread_rwlock_rdlock(lock: *mut pthread_rwlock_t) -> c_int; - pub fn pthread_rwlock_tryrdlock(lock: *mut pthread_rwlock_t) -> c_int; - pub fn pthread_rwlock_wrlock(lock: *mut pthread_rwlock_t) -> c_int; - pub fn pthread_rwlock_trywrlock(lock: *mut pthread_rwlock_t) -> c_int; - pub fn pthread_rwlock_unlock(lock: *mut pthread_rwlock_t) -> c_int; - pub fn pthread_rwlockattr_init(attr: *mut pthread_rwlockattr_t) -> c_int; - pub fn pthread_rwlockattr_destroy(attr: *mut pthread_rwlockattr_t) -> c_int; - pub fn pthread_getname_np(thread: crate::pthread_t, name: *mut c_char, len: size_t) -> c_int; - pub fn pthread_setname_np(thread: crate::pthread_t, name: *const c_char) -> c_int; - pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - - pub fn getsockopt( - sockfd: c_int, - level: c_int, - optname: c_int, - optval: *mut c_void, - optlen: *mut crate::socklen_t, - ) -> c_int; - pub fn raise(signum: c_int) -> c_int; - pub fn sigaction(signum: c_int, act: *const sigaction, oldact: *mut sigaction) -> c_int; - - pub fn utimes(filename: *const c_char, times: *const crate::timeval) -> c_int; - pub fn dlopen(filename: *const c_char, flag: c_int) -> *mut c_void; - pub fn dlerror() -> *mut c_char; - pub fn dlsym(handle: *mut c_void, symbol: *const c_char) -> *mut c_void; - pub fn dlclose(handle: *mut c_void) -> c_int; - pub fn dladdr(addr: *const c_void, info: *mut Dl_info) -> c_int; - - pub fn getaddrinfo( - node: *const c_char, - service: *const c_char, - hints: *const addrinfo, - res: *mut *mut addrinfo, - ) -> c_int; - pub fn freeaddrinfo(res: *mut addrinfo); - pub fn gai_strerror(errcode: c_int) -> *const c_char; - pub fn res_init() -> c_int; - - pub fn gmtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; - pub fn localtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; - pub fn mktime(tm: *mut tm) -> time_t; - pub fn time(time: *mut time_t) -> time_t; - pub fn gmtime(time_p: *const time_t) -> *mut tm; - pub fn localtime(time_p: *const time_t) -> *mut tm; - - pub fn mknod(pathname: *const c_char, mode: mode_t, dev: crate::dev_t) -> c_int; - pub fn uname(buf: *mut crate::utsname) -> c_int; - pub fn gethostname(name: *mut c_char, len: size_t) -> c_int; - pub fn getservbyname(name: *const c_char, proto: *const c_char) -> *mut servent; - pub fn getprotobyname(name: *const c_char) -> *mut protoent; - pub fn getprotobynumber(proto: c_int) -> *mut protoent; - pub fn usleep(secs: c_uint) -> c_int; - pub fn send(socket: c_int, buf: *const c_void, len: size_t, flags: c_int) -> ssize_t; - pub fn recv(socket: c_int, buf: *mut c_void, len: size_t, flags: c_int) -> ssize_t; - pub fn putenv(string: *mut c_char) -> c_int; - pub fn poll(fds: *mut pollfd, nfds: nfds_t, timeout: c_int) -> c_int; - pub fn select( - nfds: c_int, - readfds: *mut fd_set, - writefds: *mut fd_set, - errorfds: *mut fd_set, - timeout: *mut timeval, - ) -> c_int; - pub fn setlocale(category: c_int, locale: *const c_char) -> *mut c_char; - pub fn localeconv() -> *mut lconv; - - pub fn sem_destroy(sem: *mut sem_t) -> c_int; - pub fn sem_wait(sem: *mut sem_t) -> c_int; - pub fn sem_trywait(sem: *mut sem_t) -> c_int; - pub fn sem_post(sem: *mut sem_t) -> c_int; - pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; - pub fn statvfs(path: *const c_char, buf: *mut statvfs) -> c_int; - pub fn fstatvfs(fd: c_int, buf: *mut statvfs) -> c_int; - - pub fn readlink(path: *const c_char, buf: *mut c_char, bufsz: size_t) -> ssize_t; - - pub fn sigemptyset(set: *mut sigset_t) -> c_int; - pub fn sigaddset(set: *mut sigset_t, signum: c_int) -> c_int; - pub fn sigfillset(set: *mut sigset_t) -> c_int; - pub fn sigdelset(set: *mut sigset_t, signum: c_int) -> c_int; - pub fn sigismember(set: *const sigset_t, signum: c_int) -> c_int; - - pub fn sigprocmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; - pub fn sigpending(set: *mut sigset_t) -> c_int; - - pub fn timegm(tm: *mut crate::tm) -> time_t; - - pub fn getsid(pid: pid_t) -> pid_t; - - pub fn sysconf(name: c_int) -> c_long; - - pub fn mkfifo(path: *const c_char, mode: mode_t) -> c_int; - - pub fn pselect( - nfds: c_int, - readfds: *mut fd_set, - writefds: *mut fd_set, - errorfds: *mut fd_set, - timeout: *const timespec, - sigmask: *const sigset_t, - ) -> c_int; - pub fn fseeko(stream: *mut crate::FILE, offset: off_t, whence: c_int) -> c_int; - pub fn ftello(stream: *mut crate::FILE) -> off_t; - pub fn tcdrain(fd: c_int) -> c_int; - pub fn cfgetispeed(termios: *const crate::termios) -> crate::speed_t; - pub fn cfgetospeed(termios: *const crate::termios) -> crate::speed_t; - pub fn cfmakeraw(termios: *mut crate::termios); - pub fn cfsetispeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int; - pub fn cfsetospeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int; - pub fn cfsetspeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int; - pub fn tcgetattr(fd: c_int, termios: *mut crate::termios) -> c_int; - pub fn tcsetattr(fd: c_int, optional_actions: c_int, termios: *const crate::termios) -> c_int; - pub fn tcflow(fd: c_int, action: c_int) -> c_int; - pub fn tcflush(fd: c_int, action: c_int) -> c_int; - pub fn tcgetsid(fd: c_int) -> crate::pid_t; - pub fn tcsendbreak(fd: c_int, duration: c_int) -> c_int; - pub fn mkstemp(template: *mut c_char) -> c_int; - pub fn mkdtemp(template: *mut c_char) -> *mut c_char; - - pub fn tmpnam(ptr: *mut c_char) -> *mut c_char; - - pub fn openlog(ident: *const c_char, logopt: c_int, facility: c_int); - pub fn closelog(); - pub fn setlogmask(maskpri: c_int) -> c_int; - pub fn syslog(priority: c_int, message: *const c_char, ...); - - pub fn grantpt(fd: c_int) -> c_int; - pub fn posix_openpt(flags: c_int) -> c_int; - pub fn ptsname(fd: c_int) -> *mut c_char; - pub fn unlockpt(fd: c_int) -> c_int; - - pub fn fdatasync(fd: c_int) -> c_int; - pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn clock_settime(clk_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; - pub fn dirfd(dirp: *mut crate::DIR) -> c_int; - - pub fn pthread_getattr_np(native: crate::pthread_t, attr: *mut crate::pthread_attr_t) -> c_int; - pub fn pthread_attr_getstack( - attr: *const crate::pthread_attr_t, - stackaddr: *mut *mut c_void, - stacksize: *mut size_t, - ) -> c_int; - pub fn memalign(align: size_t, size: size_t) -> *mut c_void; - pub fn setgroups(ngroups: size_t, ptr: *const crate::gid_t) -> c_int; - pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; - pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; - pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; - pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; - - pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; - pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; - pub fn utimensat( - dirfd: c_int, - path: *const c_char, - times: *const crate::timespec, - flag: c_int, - ) -> c_int; - pub fn duplocale(base: crate::locale_t) -> crate::locale_t; - pub fn freelocale(loc: crate::locale_t); - pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; - pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; - - pub fn fdopendir(fd: c_int) -> *mut crate::DIR; - - pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; - pub fn pthread_condattr_getclock( - attr: *const pthread_condattr_t, - clock_id: *mut clockid_t, - ) -> c_int; - pub fn pthread_condattr_setclock( - attr: *mut pthread_condattr_t, - clock_id: crate::clockid_t, - ) -> c_int; - pub fn accept4( - fd: c_int, - addr: *mut crate::sockaddr, - len: *mut crate::socklen_t, - flg: c_int, - ) -> c_int; - pub fn ptsname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - pub fn clearenv() -> c_int; - pub fn waitid( - idtype: idtype_t, - id: id_t, - infop: *mut crate::siginfo_t, - options: c_int, - ) -> c_int; - pub fn setreuid(ruid: crate::uid_t, euid: crate::uid_t) -> c_int; - pub fn setregid(rgid: crate::gid_t, egid: crate::gid_t) -> c_int; - pub fn getresuid( - ruid: *mut crate::uid_t, - euid: *mut crate::uid_t, - suid: *mut crate::uid_t, - ) -> c_int; - pub fn getresgid( - rgid: *mut crate::gid_t, - egid: *mut crate::gid_t, - sgid: *mut crate::gid_t, - ) -> c_int; - pub fn acct(filename: *const c_char) -> c_int; - pub fn brk(addr: *mut c_void) -> c_int; - pub fn setresgid(rgid: crate::gid_t, egid: crate::gid_t, sgid: crate::gid_t) -> c_int; - pub fn setresuid(ruid: crate::uid_t, euid: crate::uid_t, suid: crate::uid_t) -> c_int; - pub fn openpty( - amaster: *mut c_int, - aslave: *mut c_int, - name: *mut c_char, - termp: *const termios, - winp: *const crate::winsize, - ) -> c_int; - - // DIFF(main): changed to `*const *mut` in e77f551de9 - pub fn execvpe( - file: *const c_char, - argv: *const *const c_char, - envp: *const *const c_char, - ) -> c_int; - pub fn fexecve(fd: c_int, argv: *const *const c_char, envp: *const *const c_char) -> c_int; - - pub fn ioctl(fd: c_int, request: c_int, ...) -> c_int; - - pub fn lutimes(file: *const c_char, times: *const crate::timeval) -> c_int; - - pub fn setpwent(); - pub fn endpwent(); - pub fn getpwent() -> *mut passwd; - - pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; - - // System V IPC - pub fn shmget(key: crate::key_t, size: size_t, shmflg: c_int) -> c_int; - pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; - pub fn shmdt(shmaddr: *const c_void) -> c_int; - pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; - pub fn ftok(pathname: *const c_char, proj_id: c_int) -> crate::key_t; - pub fn semget(key: crate::key_t, nsems: c_int, semflag: c_int) -> c_int; - pub fn semop(semid: c_int, sops: *mut crate::sembuf, nsops: size_t) -> c_int; - pub fn semctl(semid: c_int, semnum: c_int, cmd: c_int, ...) -> c_int; - pub fn msgctl(msqid: c_int, cmd: c_int, buf: *mut msqid_ds) -> c_int; - pub fn msgget(key: crate::key_t, msgflg: c_int) -> c_int; - pub fn msgrcv( - msqid: c_int, - msgp: *mut c_void, - msgsz: size_t, - msgtyp: c_long, - msgflg: c_int, - ) -> ssize_t; - pub fn msgsnd(msqid: c_int, msgp: *const c_void, msgsz: size_t, msgflg: c_int) -> c_int; - - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn __errno_location() -> *mut c_int; - - pub fn fallocate(fd: c_int, mode: c_int, offset: off_t, len: off_t) -> c_int; - pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; - pub fn readahead(fd: c_int, offset: off64_t, count: size_t) -> ssize_t; - pub fn signalfd(fd: c_int, mask: *const crate::sigset_t, flags: c_int) -> c_int; - pub fn timerfd_create(clockid: c_int, flags: c_int) -> c_int; - pub fn timerfd_gettime(fd: c_int, curr_value: *mut itimerspec) -> c_int; - pub fn timerfd_settime( - fd: c_int, - flags: c_int, - new_value: *const itimerspec, - old_value: *mut itimerspec, - ) -> c_int; - pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn quotactl(cmd: c_int, special: *const c_char, id: c_int, data: *mut c_char) -> c_int; - pub fn dup3(oldfd: c_int, newfd: c_int, flags: c_int) -> c_int; - pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; - pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; - pub fn sigtimedwait( - set: *const sigset_t, - info: *mut siginfo_t, - timeout: *const crate::timespec, - ) -> c_int; - pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; - pub fn nl_langinfo_l(item: crate::nl_item, locale: crate::locale_t) -> *mut c_char; - pub fn getnameinfo( - sa: *const crate::sockaddr, - salen: crate::socklen_t, - host: *mut c_char, - hostlen: crate::socklen_t, - serv: *mut c_char, - servlen: crate::socklen_t, - flags: c_int, - ) -> c_int; - pub fn reboot(how_to: c_int) -> c_int; - pub fn setfsgid(gid: crate::gid_t) -> c_int; - pub fn setfsuid(uid: crate::uid_t) -> c_int; - - // Not available now on Android - pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; - pub fn if_nameindex() -> *mut if_nameindex; - pub fn if_freenameindex(ptr: *mut if_nameindex); - pub fn sync_file_range(fd: c_int, offset: off64_t, nbytes: off64_t, flags: c_uint) -> c_int; - pub fn getifaddrs(ifap: *mut *mut crate::ifaddrs) -> c_int; - pub fn freeifaddrs(ifa: *mut crate::ifaddrs); - - pub fn glob( - pattern: *const c_char, - flags: c_int, - errfunc: Option c_int>, - pglob: *mut crate::glob_t, - ) -> c_int; - pub fn globfree(pglob: *mut crate::glob_t); - - pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - - pub fn shm_unlink(name: *const c_char) -> c_int; - - pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); - - pub fn telldir(dirp: *mut crate::DIR) -> c_long; - pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - - pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; - - pub fn recvfrom( - socket: c_int, - buf: *mut c_void, - len: size_t, - flags: c_int, - addr: *mut crate::sockaddr, - addrlen: *mut crate::socklen_t, - ) -> ssize_t; - pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; - pub fn futimes(fd: c_int, times: *const crate::timeval) -> c_int; - pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; - - pub fn bind( - socket: c_int, - address: *const crate::sockaddr, - address_len: crate::socklen_t, - ) -> c_int; - - pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - - pub fn sendmsg(fd: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; - pub fn recvmsg(fd: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; - pub fn getdomainname(name: *mut c_char, len: size_t) -> c_int; - pub fn setdomainname(name: *const c_char, len: size_t) -> c_int; - pub fn vhangup() -> c_int; - pub fn sendmmsg(sockfd: c_int, msgvec: *mut mmsghdr, vlen: c_uint, flags: c_int) -> c_int; - pub fn recvmmsg( - sockfd: c_int, - msgvec: *mut mmsghdr, - vlen: c_uint, - flags: c_int, - timeout: *mut crate::timespec, - ) -> c_int; - pub fn sync(); - pub fn syscall(num: c_long, ...) -> c_long; - pub fn sched_getaffinity( - pid: crate::pid_t, - cpusetsize: size_t, - cpuset: *mut cpu_set_t, - ) -> c_int; - pub fn sched_setaffinity( - pid: crate::pid_t, - cpusetsize: size_t, - cpuset: *const cpu_set_t, - ) -> c_int; - pub fn umount(target: *const c_char) -> c_int; - pub fn sched_get_priority_max(policy: c_int) -> c_int; - pub fn tee(fd_in: c_int, fd_out: c_int, len: size_t, flags: c_uint) -> ssize_t; - pub fn settimeofday(tv: *const crate::timeval, tz: *const crate::timezone) -> c_int; - pub fn splice( - fd_in: c_int, - off_in: *mut crate::loff_t, - fd_out: c_int, - off_out: *mut crate::loff_t, - len: size_t, - flags: c_uint, - ) -> ssize_t; - pub fn eventfd(init: c_uint, flags: c_int) -> c_int; - pub fn sched_rr_get_interval(pid: crate::pid_t, tp: *mut crate::timespec) -> c_int; - pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; - pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; - pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; - pub fn swapoff(puath: *const c_char) -> c_int; - pub fn vmsplice(fd: c_int, iov: *const crate::iovec, nr_segs: size_t, flags: c_uint) - -> ssize_t; - pub fn mount( - src: *const c_char, - target: *const c_char, - fstype: *const c_char, - flags: c_ulong, - data: *const c_void, - ) -> c_int; - pub fn personality(persona: c_ulong) -> c_int; - pub fn sched_getparam(pid: crate::pid_t, param: *mut crate::sched_param) -> c_int; - pub fn ppoll( - fds: *mut crate::pollfd, - nfds: nfds_t, - timeout: *const crate::timespec, - sigmask: *const sigset_t, - ) -> c_int; - pub fn pthread_mutex_timedlock( - lock: *mut pthread_mutex_t, - abstime: *const crate::timespec, - ) -> c_int; - pub fn clone( - cb: extern "C" fn(*mut c_void) -> c_int, - child_stack: *mut c_void, - flags: c_int, - arg: *mut c_void, - ... - ) -> c_int; - pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; - pub fn clock_nanosleep( - clk_id: crate::clockid_t, - flags: c_int, - rqtp: *const crate::timespec, - rmtp: *mut crate::timespec, - ) -> c_int; - pub fn pthread_attr_getguardsize( - attr: *const crate::pthread_attr_t, - guardsize: *mut size_t, - ) -> c_int; - pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; - pub fn sethostname(name: *const c_char, len: size_t) -> c_int; - pub fn sched_get_priority_min(policy: c_int) -> c_int; - pub fn umount2(target: *const c_char, flags: c_int) -> c_int; - pub fn swapon(path: *const c_char, swapflags: c_int) -> c_int; - pub fn sched_setscheduler( - pid: crate::pid_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; - pub fn getgrgid_r( - gid: crate::gid_t, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; - pub fn sem_close(sem: *mut sem_t) -> c_int; - pub fn getdtablesize() -> c_int; - pub fn getgrnam_r( - name: *const c_char, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn initgroups(user: *const c_char, group: crate::gid_t) -> c_int; - pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; - pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; - pub fn getgrnam(name: *const c_char) -> *mut crate::group; - pub fn pthread_cancel(thread: crate::pthread_t) -> c_int; - pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; - pub fn sem_unlink(name: *const c_char) -> c_int; - pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; - pub fn getpwnam_r( - name: *const c_char, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - pub fn getpwuid_r( - uid: crate::uid_t, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; - pub fn pthread_atfork( - prepare: Option, - parent: Option, - child: Option, - ) -> c_int; - pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; - - pub fn setgrent(); - pub fn endgrent(); - pub fn getgrent() -> *mut crate::group; - - pub fn getgrouplist( - user: *const c_char, - group: crate::gid_t, - groups: *mut crate::gid_t, - ngroups: *mut c_int, - ) -> c_int; - pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; - pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; - pub fn pthread_create( - native: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - f: extern "C" fn(*mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - pub fn dl_iterate_phdr( - callback: Option< - unsafe extern "C" fn( - info: *mut crate::dl_phdr_info, - size: size_t, - data: *mut c_void, - ) -> c_int, - >, - data: *mut c_void, - ) -> c_int; -} - -cfg_if! { - if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else if #[cfg(any(target_arch = "x86_64"))] { - mod x86_64; - pub use self::x86_64::*; - } else if #[cfg(any(target_arch = "riscv64"))] { - mod riscv64; - pub use self::riscv64::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/fuchsia/riscv64.rs b/vendor/libc/src/fuchsia/riscv64.rs deleted file mode 100644 index c57d52aad13867..00000000000000 --- a/vendor/libc/src/fuchsia/riscv64.rs +++ /dev/null @@ -1,46 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -// From psABI Calling Convention for RV64 -pub type __u64 = c_ulonglong; -pub type wchar_t = i32; - -pub type nlink_t = c_ulong; -pub type blksize_t = c_long; - -pub type stat64 = stat; -s! { - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - __pad0: c_int, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_long; 3], - } - - // Not actually used, IPC calls just return ENOSYS - pub struct ipc_perm { - pub __ipc_perm_key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - pub __seq: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } -} diff --git a/vendor/libc/src/fuchsia/x86_64.rs b/vendor/libc/src/fuchsia/x86_64.rs deleted file mode 100644 index add60a45640204..00000000000000 --- a/vendor/libc/src/fuchsia/x86_64.rs +++ /dev/null @@ -1,142 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -pub type wchar_t = i32; -pub type nlink_t = u64; -pub type blksize_t = c_long; -pub type __u64 = c_ulonglong; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - __pad0: c_int, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_long; 3], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - __pad0: c_int, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __reserved: [c_long; 3], - } - - pub struct mcontext_t { - __private: [u64; 32], - } - - pub struct ipc_perm { - pub __ipc_perm_key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - pub __seq: c_int, - __unused1: c_long, - __unused2: c_long, - } -} - -s_no_extra_traits! { - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_mcontext: mcontext_t, - pub uc_sigmask: crate::sigset_t, - __private: [u8; 512], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for ucontext_t { - fn eq(&self, other: &ucontext_t) -> bool { - self.uc_flags == other.uc_flags - && self.uc_link == other.uc_link - && self.uc_stack == other.uc_stack - && self.uc_mcontext == other.uc_mcontext - && self.uc_sigmask == other.uc_sigmask - && self - .__private - .iter() - .zip(other.__private.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for ucontext_t {} - impl hash::Hash for ucontext_t { - fn hash(&self, state: &mut H) { - self.uc_flags.hash(state); - self.uc_link.hash(state); - self.uc_stack.hash(state); - self.uc_mcontext.hash(state); - self.uc_sigmask.hash(state); - self.__private.hash(state); - } - } - } -} - -// offsets in user_regs_structs, from sys/reg.h -pub const R15: c_int = 0; -pub const R14: c_int = 1; -pub const R13: c_int = 2; -pub const R12: c_int = 3; -pub const RBP: c_int = 4; -pub const RBX: c_int = 5; -pub const R11: c_int = 6; -pub const R10: c_int = 7; -pub const R9: c_int = 8; -pub const R8: c_int = 9; -pub const RAX: c_int = 10; -pub const RCX: c_int = 11; -pub const RDX: c_int = 12; -pub const RSI: c_int = 13; -pub const RDI: c_int = 14; -pub const ORIG_RAX: c_int = 15; -pub const RIP: c_int = 16; -pub const CS: c_int = 17; -pub const EFLAGS: c_int = 18; -pub const RSP: c_int = 19; -pub const SS: c_int = 20; -pub const FS_BASE: c_int = 21; -pub const GS_BASE: c_int = 22; -pub const DS: c_int = 23; -pub const ES: c_int = 24; -pub const FS: c_int = 25; -pub const GS: c_int = 26; - -pub const MAP_32BIT: c_int = 0x0040; - -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; diff --git a/vendor/libc/src/hermit.rs b/vendor/libc/src/hermit.rs deleted file mode 100644 index b96be6b0e2a2fb..00000000000000 --- a/vendor/libc/src/hermit.rs +++ /dev/null @@ -1,561 +0,0 @@ -//! Hermit C type definitions - -use crate::prelude::*; - -pub type intmax_t = i64; -pub type uintmax_t = u64; -pub type intptr_t = isize; -pub type uintptr_t = usize; - -pub type size_t = usize; -pub type ssize_t = isize; -pub type ptrdiff_t = isize; - -pub type clockid_t = i32; -pub type in_addr_t = u32; -pub type in_port_t = u16; -pub type mode_t = u32; -pub type nfds_t = usize; -pub type pid_t = i32; -pub type sa_family_t = u8; -pub type socklen_t = u32; -pub type time_t = i64; - -s! { - pub struct addrinfo { - pub ai_flags: i32, - pub ai_family: i32, - pub ai_socktype: i32, - pub ai_protocol: i32, - pub ai_addrlen: socklen_t, - pub ai_canonname: *mut c_char, - pub ai_addr: *mut sockaddr, - pub ai_next: *mut addrinfo, - } - - pub struct dirent64 { - pub d_ino: u64, - pub d_off: i64, - pub d_reclen: u16, - pub d_type: u8, - pub d_name: [c_char; 256], - } - - #[repr(align(4))] - pub struct in6_addr { - pub s6_addr: [u8; 16], - } - - pub struct in_addr { - pub s_addr: in_addr_t, - } - - pub struct iovec { - iov_base: *mut c_void, - iov_len: usize, - } - - pub struct pollfd { - pub fd: i32, - pub events: i16, - pub revents: i16, - } - - pub struct sockaddr { - pub sa_len: u8, - pub sa_family: sa_family_t, - pub sa_data: [c_char; 14], - } - - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: sa_family_t, - pub sin_port: in_port_t, - pub sin_addr: in_addr, - pub sin_zero: [c_char; 8], - } - - pub struct sockaddr_in6 { - pub sin6_len: u8, - pub sin6_family: sa_family_t, - pub sin6_port: in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: in6_addr, - pub sin6_scope_id: u32, - } - - pub struct sockaddr_storage { - pub ss_len: u8, - pub ss_family: sa_family_t, - __ss_pad1: [u8; 6], - __ss_align: i64, - __ss_pad2: [u8; 112], - } - - pub struct stat { - pub st_dev: u64, - pub st_ino: u64, - pub st_nlink: u64, - pub st_mode: mode_t, - pub st_uid: u32, - pub st_gid: u32, - pub st_rdev: u64, - pub st_size: i64, - pub st_blksize: i64, - pub st_blocks: i64, - pub st_atim: timespec, - pub st_mtim: timespec, - pub st_ctim: timespec, - } - - pub struct timespec { - pub tv_sec: time_t, - pub tv_nsec: i32, - } -} - -pub const AF_UNSPEC: i32 = 0; -pub const AF_INET: i32 = 3; -pub const AF_INET6: i32 = 1; -pub const AF_VSOCK: i32 = 2; - -pub const CLOCK_REALTIME: clockid_t = 1; -pub const CLOCK_MONOTONIC: clockid_t = 4; - -pub const DT_UNKNOWN: u8 = 0; -pub const DT_FIFO: u8 = 1; -pub const DT_CHR: u8 = 2; -pub const DT_DIR: u8 = 4; -pub const DT_BLK: u8 = 6; -pub const DT_REG: u8 = 8; -pub const DT_LNK: u8 = 10; -pub const DT_SOCK: u8 = 12; -pub const DT_WHT: u8 = 14; - -pub const EAI_AGAIN: i32 = 2; -pub const EAI_BADFLAGS: i32 = 3; -pub const EAI_FAIL: i32 = 4; -pub const EAI_FAMILY: i32 = 5; -pub const EAI_MEMORY: i32 = 6; -pub const EAI_NODATA: i32 = 7; -pub const EAI_NONAME: i32 = 8; -pub const EAI_SERVICE: i32 = 9; -pub const EAI_SOCKTYPE: i32 = 10; -pub const EAI_SYSTEM: i32 = 11; -pub const EAI_OVERFLOW: i32 = 14; - -pub const EFD_SEMAPHORE: i16 = 0o1; -pub const EFD_NONBLOCK: i16 = 0o4000; -pub const EFD_CLOEXEC: i16 = 0o40000; - -pub const F_DUPFD: i32 = 0; -pub const F_GETFD: i32 = 1; -pub const F_SETFD: i32 = 2; -pub const F_GETFL: i32 = 3; -pub const F_SETFL: i32 = 4; - -pub const FD_CLOEXEC: i32 = 1; - -pub const FIONBIO: i32 = 0x8008667e; - -pub const FUTEX_RELATIVE_TIMEOUT: u32 = 1; - -pub const IP_TOS: i32 = 1; -pub const IP_TTL: i32 = 2; -pub const IP_ADD_MEMBERSHIP: i32 = 3; -pub const IP_DROP_MEMBERSHIP: i32 = 4; -pub const IP_MULTICAST_TTL: i32 = 5; -pub const IP_MULTICAST_LOOP: i32 = 7; - -pub const IPPROTO_IP: i32 = 0; -pub const IPPROTO_TCP: i32 = 6; -pub const IPPROTO_UDP: i32 = 17; -pub const IPPROTO_IPV6: i32 = 41; - -pub const IPV6_ADD_MEMBERSHIP: i32 = 12; -pub const IPV6_DROP_MEMBERSHIP: i32 = 13; -pub const IPV6_MULTICAST_LOOP: i32 = 19; -pub const IPV6_V6ONLY: i32 = 27; - -pub const MSG_PEEK: i32 = 1; - -pub const O_RDONLY: i32 = 0o0; -pub const O_WRONLY: i32 = 0o1; -pub const O_RDWR: i32 = 0o2; -pub const O_CREAT: i32 = 0o100; -pub const O_EXCL: i32 = 0o200; -pub const O_TRUNC: i32 = 0o1000; -pub const O_APPEND: i32 = 0o2000; -pub const O_NONBLOCK: i32 = 0o4000; -pub const O_DIRECTORY: i32 = 0o200000; - -pub const POLLIN: i16 = 0x1; -pub const POLLPRI: i16 = 0x2; -pub const POLLOUT: i16 = 0x4; -pub const POLLERR: i16 = 0x8; -pub const POLLHUP: i16 = 0x10; -pub const POLLNVAL: i16 = 0x20; -pub const POLLRDNORM: i16 = 0x040; -pub const POLLRDBAND: i16 = 0x080; -pub const POLLWRNORM: i16 = 0x0100; -pub const POLLWRBAND: i16 = 0x0200; -pub const POLLRDHUP: i16 = 0x2000; - -pub const S_IRWXU: mode_t = 0o0700; -pub const S_IRUSR: mode_t = 0o0400; -pub const S_IWUSR: mode_t = 0o0200; -pub const S_IXUSR: mode_t = 0o0100; -pub const S_IRWXG: mode_t = 0o0070; -pub const S_IRGRP: mode_t = 0o0040; -pub const S_IWGRP: mode_t = 0o0020; -pub const S_IXGRP: mode_t = 0o0010; -pub const S_IRWXO: mode_t = 0o0007; -pub const S_IROTH: mode_t = 0o0004; -pub const S_IWOTH: mode_t = 0o0002; -pub const S_IXOTH: mode_t = 0o0001; - -pub const S_IFMT: mode_t = 0o17_0000; -pub const S_IFSOCK: mode_t = 0o14_0000; -pub const S_IFLNK: mode_t = 0o12_0000; -pub const S_IFREG: mode_t = 0o10_0000; -pub const S_IFBLK: mode_t = 0o6_0000; -pub const S_IFDIR: mode_t = 0o4_0000; -pub const S_IFCHR: mode_t = 0o2_0000; -pub const S_IFIFO: mode_t = 0o1_0000; - -pub const SHUT_RD: i32 = 0; -pub const SHUT_WR: i32 = 1; -pub const SHUT_RDWR: i32 = 2; - -pub const SO_REUSEADDR: i32 = 0x0004; -pub const SO_KEEPALIVE: i32 = 0x0008; -pub const SO_BROADCAST: i32 = 0x0020; -pub const SO_LINGER: i32 = 0x0080; -pub const SO_SNDBUF: i32 = 0x1001; -pub const SO_RCVBUF: i32 = 0x1002; -pub const SO_SNDTIMEO: i32 = 0x1005; -pub const SO_RCVTIMEO: i32 = 0x1006; -pub const SO_ERROR: i32 = 0x1007; - -pub const SOCK_STREAM: i32 = 1; -pub const SOCK_DGRAM: i32 = 2; -pub const SOCK_NONBLOCK: i32 = 0o4000; -pub const SOCK_CLOEXEC: i32 = 0o40000; - -pub const SOL_SOCKET: i32 = 4095; - -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; - -pub const TCP_NODELAY: i32 = 1; - -pub const EPERM: i32 = 1; -pub const ENOENT: i32 = 2; -pub const ESRCH: i32 = 3; -pub const EINTR: i32 = 4; -pub const EIO: i32 = 5; -pub const ENXIO: i32 = 6; -pub const E2BIG: i32 = 7; -pub const ENOEXEC: i32 = 8; -pub const EBADF: i32 = 9; -pub const ECHILD: i32 = 10; -pub const EAGAIN: i32 = 11; -pub const ENOMEM: i32 = 12; -pub const EACCES: i32 = 13; -pub const EFAULT: i32 = 14; -pub const ENOTBLK: i32 = 15; -pub const EBUSY: i32 = 16; -pub const EEXIST: i32 = 17; -pub const EXDEV: i32 = 18; -pub const ENODEV: i32 = 19; -pub const ENOTDIR: i32 = 20; -pub const EISDIR: i32 = 21; -pub const EINVAL: i32 = 22; -pub const ENFILE: i32 = 23; -pub const EMFILE: i32 = 24; -pub const ENOTTY: i32 = 25; -pub const ETXTBSY: i32 = 26; -pub const EFBIG: i32 = 27; -pub const ENOSPC: i32 = 28; -pub const ESPIPE: i32 = 29; -pub const EROFS: i32 = 30; -pub const EMLINK: i32 = 31; -pub const EPIPE: i32 = 32; -pub const EDOM: i32 = 33; -pub const ERANGE: i32 = 34; -pub const EDEADLK: i32 = 35; -pub const ENAMETOOLONG: i32 = 36; -pub const ENOLCK: i32 = 37; -pub const ENOSYS: i32 = 38; -pub const ENOTEMPTY: i32 = 39; -pub const ELOOP: i32 = 40; -pub const EWOULDBLOCK: i32 = EAGAIN; -pub const ENOMSG: i32 = 42; -pub const EIDRM: i32 = 43; -pub const ECHRNG: i32 = 44; -pub const EL2NSYNC: i32 = 45; -pub const EL3HLT: i32 = 46; -pub const EL3RST: i32 = 47; -pub const ELNRNG: i32 = 48; -pub const EUNATCH: i32 = 49; -pub const ENOCSI: i32 = 50; -pub const EL2HLT: i32 = 51; -pub const EBADE: i32 = 52; -pub const EBADR: i32 = 53; -pub const EXFULL: i32 = 54; -pub const ENOANO: i32 = 55; -pub const EBADRQC: i32 = 56; -pub const EBADSLT: i32 = 57; -pub const EDEADLOCK: i32 = EDEADLK; -pub const EBFONT: i32 = 59; -pub const ENOSTR: i32 = 60; -pub const ENODATA: i32 = 61; -pub const ETIME: i32 = 62; -pub const ENOSR: i32 = 63; -pub const ENONET: i32 = 64; -pub const ENOPKG: i32 = 65; -pub const EREMOTE: i32 = 66; -pub const ENOLINK: i32 = 67; -pub const EADV: i32 = 68; -pub const ESRMNT: i32 = 69; -pub const ECOMM: i32 = 70; -pub const EPROTO: i32 = 71; -pub const EMULTIHOP: i32 = 72; -pub const EDOTDOT: i32 = 73; -pub const EBADMSG: i32 = 74; -pub const EOVERFLOW: i32 = 75; -pub const ENOTUNIQ: i32 = 76; -pub const EBADFD: i32 = 77; -pub const EREMCHG: i32 = 78; -pub const ELIBACC: i32 = 79; -pub const ELIBBAD: i32 = 80; -pub const ELIBSCN: i32 = 81; -pub const ELIBMAX: i32 = 82; -pub const ELIBEXEC: i32 = 83; -pub const EILSEQ: i32 = 84; -pub const ERESTART: i32 = 85; -pub const ESTRPIPE: i32 = 86; -pub const EUSERS: i32 = 87; -pub const ENOTSOCK: i32 = 88; -pub const EDESTADDRREQ: i32 = 89; -pub const EMSGSIZE: i32 = 90; -pub const EPROTOTYPE: i32 = 91; -pub const ENOPROTOOPT: i32 = 92; -pub const EPROTONOSUPPORT: i32 = 93; -pub const ESOCKTNOSUPPORT: i32 = 94; -pub const EOPNOTSUPP: i32 = 95; -pub const EPFNOSUPPORT: i32 = 96; -pub const EAFNOSUPPORT: i32 = 97; -pub const EADDRINUSE: i32 = 98; -pub const EADDRNOTAVAIL: i32 = 99; -pub const ENETDOWN: i32 = 100; -pub const ENETUNREACH: i32 = 101; -pub const ENETRESET: i32 = 102; -pub const ECONNABORTED: i32 = 103; -pub const ECONNRESET: i32 = 104; -pub const ENOBUFS: i32 = 105; -pub const EISCONN: i32 = 106; -pub const ENOTCONN: i32 = 107; -pub const ESHUTDOWN: i32 = 108; -pub const ETOOMANYREFS: i32 = 109; -pub const ETIMEDOUT: i32 = 110; -pub const ECONNREFUSED: i32 = 111; -pub const EHOSTDOWN: i32 = 112; -pub const EHOSTUNREACH: i32 = 113; -pub const EALREADY: i32 = 114; -pub const EINPROGRESS: i32 = 115; -pub const ESTALE: i32 = 116; -pub const EUCLEAN: i32 = 117; -pub const ENOTNAM: i32 = 118; -pub const ENAVAIL: i32 = 119; -pub const EISNAM: i32 = 120; -pub const EREMOTEIO: i32 = 121; -pub const EDQUOT: i32 = 122; -pub const ENOMEDIUM: i32 = 123; -pub const EMEDIUMTYPE: i32 = 124; -pub const ECANCELED: i32 = 125; -pub const ENOKEY: i32 = 126; -pub const EKEYEXPIRED: i32 = 127; -pub const EKEYREVOKED: i32 = 128; -pub const EKEYREJECTED: i32 = 129; -pub const EOWNERDEAD: i32 = 130; -pub const ENOTRECOVERABLE: i32 = 131; -pub const ERFKILL: i32 = 132; -pub const EHWPOISON: i32 = 133; - -extern "C" { - #[link_name = "sys_alloc"] - pub fn alloc(size: usize, align: usize) -> *mut u8; - - #[link_name = "sys_alloc_zeroed"] - pub fn alloc_zeroed(size: usize, align: usize) -> *mut u8; - - #[link_name = "sys_realloc"] - pub fn realloc(ptr: *mut u8, size: usize, align: usize, new_size: usize) -> *mut u8; - - #[link_name = "sys_dealloc"] - pub fn dealloc(ptr: *mut u8, size: usize, align: usize); - - #[link_name = "sys_exit"] - pub fn exit(status: i32) -> !; - - #[link_name = "sys_abort"] - pub fn abort() -> !; - - #[link_name = "sys_errno"] - pub fn errno() -> i32; - - #[link_name = "sys_clock_gettime"] - pub fn clock_gettime(clockid: clockid_t, tp: *mut timespec) -> i32; - - #[link_name = "sys_nanosleep"] - pub fn nanosleep(req: *const timespec) -> i32; - - #[link_name = "sys_available_parallelism"] - pub fn available_parallelism() -> usize; - - #[link_name = "sys_futex_wait"] - pub fn futex_wait( - address: *mut u32, - expected: u32, - timeout: *const timespec, - flags: u32, - ) -> i32; - - #[link_name = "sys_futex_wake"] - pub fn futex_wake(address: *mut u32, count: i32) -> i32; - - #[link_name = "sys_stat"] - pub fn stat(path: *const c_char, stat: *mut stat) -> i32; - - #[link_name = "sys_fstat"] - pub fn fstat(fd: i32, stat: *mut stat) -> i32; - - #[link_name = "sys_lstat"] - pub fn lstat(path: *const c_char, stat: *mut stat) -> i32; - - #[link_name = "sys_open"] - pub fn open(path: *const c_char, flags: i32, mode: mode_t) -> i32; - - #[link_name = "sys_unlink"] - pub fn unlink(path: *const c_char) -> i32; - - #[link_name = "sys_mkdir"] - pub fn mkdir(path: *const c_char, mode: mode_t) -> i32; - - #[link_name = "sys_rmdir"] - pub fn rmdir(path: *const c_char) -> i32; - - #[link_name = "sys_read"] - pub fn read(fd: i32, buf: *mut u8, len: usize) -> isize; - - #[link_name = "sys_write"] - pub fn write(fd: i32, buf: *const u8, len: usize) -> isize; - - #[link_name = "sys_readv"] - pub fn readv(fd: i32, iov: *const iovec, iovcnt: usize) -> isize; - - #[link_name = "sys_writev"] - pub fn writev(fd: i32, iov: *const iovec, iovcnt: usize) -> isize; - - #[link_name = "sys_close"] - pub fn close(fd: i32) -> i32; - - #[link_name = "sys_dup"] - pub fn dup(fd: i32) -> i32; - - #[link_name = "sys_fcntl"] - pub fn fcntl(fd: i32, cmd: i32, arg: i32) -> i32; - - #[link_name = "sys_getdents64"] - pub fn getdents64(fd: i32, dirp: *mut dirent64, count: usize) -> isize; - - #[link_name = "sys_getaddrinfo"] - pub fn getaddrinfo( - nodename: *const c_char, - servname: *const c_char, - hints: *const addrinfo, - res: *mut *mut addrinfo, - ) -> i32; - - #[link_name = "sys_freeaddrinfo"] - pub fn freeaddrinfo(ai: *mut addrinfo); - - #[link_name = "sys_socket"] - pub fn socket(domain: i32, ty: i32, protocol: i32) -> i32; - - #[link_name = "sys_bind"] - pub fn bind(sockfd: i32, addr: *const sockaddr, addrlen: socklen_t) -> i32; - - #[link_name = "sys_listen"] - pub fn listen(sockfd: i32, backlog: i32) -> i32; - - #[link_name = "sys_accept"] - pub fn accept(sockfd: i32, addr: *mut sockaddr, addrlen: *mut socklen_t) -> i32; - - #[link_name = "sys_connect"] - pub fn connect(sockfd: i32, addr: *const sockaddr, addrlen: socklen_t) -> i32; - - #[link_name = "sys_recv"] - pub fn recv(sockfd: i32, buf: *mut u8, len: usize, flags: i32) -> isize; - - #[link_name = "sys_recvfrom"] - pub fn recvfrom( - sockfd: i32, - buf: *mut c_void, - len: usize, - flags: i32, - addr: *mut sockaddr, - addrlen: *mut socklen_t, - ) -> isize; - - #[link_name = "sys_send"] - pub fn send(sockfd: i32, buf: *const c_void, len: usize, flags: i32) -> isize; - - #[link_name = "sys_sendto"] - pub fn sendto( - sockfd: i32, - buf: *const c_void, - len: usize, - flags: i32, - to: *const sockaddr, - tolen: socklen_t, - ) -> isize; - - #[link_name = "sys_getpeername"] - pub fn getpeername(sockfd: i32, addr: *mut sockaddr, addrlen: *mut socklen_t) -> i32; - - #[link_name = "sys_getsockname"] - pub fn getsockname(sockfd: i32, addr: *mut sockaddr, addrlen: *mut socklen_t) -> i32; - - #[link_name = "sys_getsockopt"] - pub fn getsockopt( - sockfd: i32, - level: i32, - optname: i32, - optval: *mut c_void, - optlen: *mut socklen_t, - ) -> i32; - - #[link_name = "sys_setsockopt"] - pub fn setsockopt( - sockfd: i32, - level: i32, - optname: i32, - optval: *const c_void, - optlen: socklen_t, - ) -> i32; - - #[link_name = "sys_ioctl"] - pub fn ioctl(sockfd: i32, cmd: i32, argp: *mut c_void) -> i32; - - #[link_name = "sys_shutdown"] - pub fn shutdown(sockfd: i32, how: i32) -> i32; - - #[link_name = "sys_eventfd"] - pub fn eventfd(initval: u64, flags: i16) -> i32; - - #[link_name = "sys_poll"] - pub fn poll(fds: *mut pollfd, nfds: nfds_t, timeout: i32) -> i32; -} diff --git a/vendor/libc/src/lib.rs b/vendor/libc/src/lib.rs deleted file mode 100644 index aa919b5ca038e3..00000000000000 --- a/vendor/libc/src/lib.rs +++ /dev/null @@ -1,159 +0,0 @@ -//! libc - Raw FFI bindings to platforms' system libraries -#![crate_name = "libc"] -#![crate_type = "rlib"] -#![allow( - renamed_and_removed_lints, // Keep this order. - unknown_lints, // Keep this order. - nonstandard_style, - overflowing_literals, - unused_macros, - unused_macro_rules, -)] -#![warn( - missing_copy_implementations, - missing_debug_implementations, - safe_packed_borrows -)] -// Prepare for a future upgrade -#![warn(rust_2024_compatibility)] -// Things missing for 2024 that are blocked on MSRV or breakage -#![allow( - missing_unsafe_on_extern, - edition_2024_expr_fragment_specifier, - // Allowed globally, the warning is enabled in individual modules as we work through them - unsafe_op_in_unsafe_fn -)] -#![cfg_attr(libc_deny_warnings, deny(warnings))] -// Attributes needed when building as part of the standard library -#![cfg_attr(feature = "rustc-dep-of-std", feature(link_cfg, no_core))] -#![cfg_attr(libc_thread_local, feature(thread_local))] -#![cfg_attr(feature = "rustc-dep-of-std", allow(internal_features))] -// DIFF(1.0): The thread local references that raise this lint were removed in 1.0 -#![cfg_attr(feature = "rustc-dep-of-std", allow(static_mut_refs))] -#![cfg_attr(not(feature = "rustc-dep-of-std"), no_std)] -#![cfg_attr(feature = "rustc-dep-of-std", no_core)] - -#[macro_use] -mod macros; -mod new; - -cfg_if! { - if #[cfg(feature = "rustc-dep-of-std")] { - extern crate rustc_std_workspace_core as core; - } -} - -pub use core::ffi::c_void; - -#[allow(unused_imports)] // needed while the module is empty on some platforms -pub use new::*; - -cfg_if! { - if #[cfg(windows)] { - mod primitives; - pub use crate::primitives::*; - - mod windows; - pub use crate::windows::*; - - prelude!(); - } else if #[cfg(target_os = "fuchsia")] { - mod primitives; - pub use crate::primitives::*; - - mod fuchsia; - pub use crate::fuchsia::*; - - prelude!(); - } else if #[cfg(target_os = "switch")] { - mod primitives; - pub use primitives::*; - - mod switch; - pub use switch::*; - - prelude!(); - } else if #[cfg(target_os = "psp")] { - mod primitives; - pub use primitives::*; - - mod psp; - pub use crate::psp::*; - - prelude!(); - } else if #[cfg(target_os = "vxworks")] { - mod primitives; - pub use crate::primitives::*; - - mod vxworks; - pub use crate::vxworks::*; - - prelude!(); - } else if #[cfg(target_os = "solid_asp3")] { - mod primitives; - pub use crate::primitives::*; - - mod solid; - pub use crate::solid::*; - - prelude!(); - } else if #[cfg(unix)] { - mod primitives; - pub use crate::primitives::*; - - mod unix; - pub use crate::unix::*; - - prelude!(); - } else if #[cfg(target_os = "hermit")] { - mod primitives; - pub use crate::primitives::*; - - mod hermit; - pub use crate::hermit::*; - - prelude!(); - } else if #[cfg(target_os = "teeos")] { - mod primitives; - pub use primitives::*; - - mod teeos; - pub use teeos::*; - - prelude!(); - } else if #[cfg(target_os = "trusty")] { - mod primitives; - pub use crate::primitives::*; - - mod trusty; - pub use crate::trusty::*; - - prelude!(); - } else if #[cfg(all(target_env = "sgx", target_vendor = "fortanix"))] { - mod primitives; - pub use crate::primitives::*; - - mod sgx; - pub use crate::sgx::*; - - prelude!(); - } else if #[cfg(any(target_env = "wasi", target_os = "wasi"))] { - mod primitives; - pub use crate::primitives::*; - - mod wasi; - pub use crate::wasi::*; - - prelude!(); - } else if #[cfg(target_os = "xous")] { - mod primitives; - pub use crate::primitives::*; - - mod xous; - pub use crate::xous::*; - - prelude!(); - } else { - // non-supported targets: empty... - } -} diff --git a/vendor/libc/src/macros.rs b/vendor/libc/src/macros.rs deleted file mode 100644 index 6906da6bd70da6..00000000000000 --- a/vendor/libc/src/macros.rs +++ /dev/null @@ -1,446 +0,0 @@ -/// A macro for defining #[cfg] if-else statements. -/// -/// This is similar to the `if/elif` C preprocessor macro by allowing definition -/// of a cascade of `#[cfg]` cases, emitting the implementation which matches -/// first. -/// -/// This allows you to conveniently provide a long list #[cfg]'d blocks of code -/// without having to rewrite each clause multiple times. -macro_rules! cfg_if { - // match if/else chains with a final `else` - ($( - if #[cfg($($meta:meta),*)] { $($it:item)* } - ) else * else { - $($it2:item)* - }) => { - cfg_if! { - @__items - () ; - $( ( ($($meta),*) ($($it)*) ), )* - ( () ($($it2)*) ), - } - }; - - // match if/else chains lacking a final `else` - ( - if #[cfg($($i_met:meta),*)] { $($i_it:item)* } - $( - else if #[cfg($($e_met:meta),*)] { $($e_it:item)* } - )* - ) => { - cfg_if! { - @__items - () ; - ( ($($i_met),*) ($($i_it)*) ), - $( ( ($($e_met),*) ($($e_it)*) ), )* - ( () () ), - } - }; - - // Internal and recursive macro to emit all the items - // - // Collects all the negated `cfg`s in a list at the beginning and after the - // semicolon is all the remaining items - (@__items ($($not:meta,)*) ; ) => {}; - (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), - $($rest:tt)*) => { - // Emit all items within one block, applying an appropriate #[cfg]. The - // #[cfg] will require all `$m` matchers specified and must also negate - // all previous matchers. - cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* } - - // Recurse to emit all other items in `$rest`, and when we do so add all - // our `$m` matchers to the list of `$not` matchers as future emissions - // will have to negate everything we just matched as well. - cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* } - }; - - // Internal macro to Apply a cfg attribute to a list of items - (@__apply $m:meta, $($it:item)*) => { - $(#[$m] $it)* - }; -} - -/// Create an internal crate prelude with `core` reexports and common types. -macro_rules! prelude { - () => { - mod types; - - /// Frequently-used types that are available on all platforms - /// - /// We need to reexport the core types so this works with `rust-dep-of-std`. - mod prelude { - // Exports from `core` - #[allow(unused_imports)] - pub(crate) use core::clone::Clone; - #[allow(unused_imports)] - pub(crate) use core::default::Default; - #[allow(unused_imports)] - pub(crate) use core::marker::{Copy, Send, Sync}; - #[allow(unused_imports)] - pub(crate) use core::option::Option; - #[allow(unused_imports)] - pub(crate) use core::prelude::v1::derive; - #[allow(unused_imports)] - pub(crate) use core::{fmt, hash, iter, mem, ptr}; - - #[allow(unused_imports)] - pub(crate) use fmt::Debug; - #[allow(unused_imports)] - pub(crate) use mem::{align_of, align_of_val, size_of, size_of_val}; - - #[allow(unused_imports)] - pub(crate) use crate::types::{CEnumRepr, Padding}; - // Commonly used types defined in this crate - #[allow(unused_imports)] - pub(crate) use crate::{ - c_char, c_double, c_float, c_int, c_long, c_longlong, c_short, c_uchar, c_uint, - c_ulong, c_ulonglong, c_ushort, c_void, intptr_t, size_t, ssize_t, uintptr_t, - }; - } - }; -} - -/// Implement `Clone` and `Copy` for a struct, as well as `Debug`, `Eq`, `Hash`, and -/// `PartialEq` if the `extra_traits` feature is enabled. -/// -/// Use [`s_no_extra_traits`] for structs where the `extra_traits` feature does not -/// make sense, and for unions. -macro_rules! s { - ($( - $(#[$attr:meta])* - pub $t:ident $i:ident { $($field:tt)* } - )*) => ($( - s!(it: $(#[$attr])* pub $t $i { $($field)* }); - )*); - - (it: $(#[$attr:meta])* pub union $i:ident { $($field:tt)* }) => ( - compile_error!("unions cannot derive extra traits, use s_no_extra_traits instead"); - ); - - (it: $(#[$attr:meta])* pub struct $i:ident { $($field:tt)* }) => ( - __item! { - #[repr(C)] - #[cfg_attr( - feature = "extra_traits", - ::core::prelude::v1::derive(Eq, Hash, PartialEq) - )] - #[::core::prelude::v1::derive( - ::core::clone::Clone, - ::core::marker::Copy, - ::core::fmt::Debug, - )] - #[allow(deprecated)] - $(#[$attr])* - pub struct $i { $($field)* } - } - ); -} - -/// Implement `Clone` and `Copy` for a tuple struct, as well as `Debug`, `Eq`, `Hash`, -/// and `PartialEq` if the `extra_traits` feature is enabled. -/// -/// This is the same as [`s`] but works for tuple structs. -macro_rules! s_paren { - ($( - $(#[$attr:meta])* - pub struct $i:ident ( $($field:tt)* ); - )*) => ($( - __item! { - #[cfg_attr( - feature = "extra_traits", - ::core::prelude::v1::derive(Eq, Hash, PartialEq) - )] - #[::core::prelude::v1::derive( - ::core::clone::Clone, - ::core::marker::Copy, - ::core::fmt::Debug, - )] - $(#[$attr])* - pub struct $i ( $($field)* ); - } - )*); -} - -/// Implement `Clone`, `Copy`, and `Debug` since those can be derived, but exclude `PartialEq`, -/// `Eq`, and `Hash`. -/// -/// Most items will prefer to use [`s`]. -macro_rules! s_no_extra_traits { - ($( - $(#[$attr:meta])* - pub $t:ident $i:ident { $($field:tt)* } - )*) => ($( - s_no_extra_traits!(it: $(#[$attr])* pub $t $i { $($field)* }); - )*); - - (it: $(#[$attr:meta])* pub union $i:ident { $($field:tt)* }) => ( - __item! { - #[repr(C)] - #[::core::prelude::v1::derive(::core::clone::Clone, ::core::marker::Copy)] - $(#[$attr])* - pub union $i { $($field)* } - } - - impl ::core::fmt::Debug for $i { - fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { - f.debug_struct(::core::stringify!($i)).finish_non_exhaustive() - } - } - ); - - (it: $(#[$attr:meta])* pub struct $i:ident { $($field:tt)* }) => ( - __item! { - #[repr(C)] - #[::core::prelude::v1::derive( - ::core::clone::Clone, - ::core::marker::Copy, - ::core::fmt::Debug, - )] - $(#[$attr])* - pub struct $i { $($field)* } - } - ); -} - -/// Specify that an enum should have no traits that aren't specified in the macro -/// invocation, i.e. no `Clone` or `Copy`. -macro_rules! missing { - ($( - $(#[$attr:meta])* - pub enum $i:ident {} - )*) => ($( - $(#[$attr])* - #[allow(missing_copy_implementations)] - pub enum $i { } - )*); -} - -/// Implement `Clone` and `Copy` for an enum, as well as `Debug`, `Eq`, `Hash`, and -/// `PartialEq` if the `extra_traits` feature is enabled. -// FIXME(#4419): Replace all uses of `e!` with `c_enum!` -macro_rules! e { - ($( - $(#[$attr:meta])* - pub enum $i:ident { $($field:tt)* } - )*) => ($( - __item! { - #[cfg_attr( - feature = "extra_traits", - ::core::prelude::v1::derive(Eq, Hash, PartialEq) - )] - #[::core::prelude::v1::derive( - ::core::clone::Clone, - ::core::marker::Copy, - ::core::fmt::Debug, - )] - $(#[$attr])* - pub enum $i { $($field)* } - } - )*); -} - -/// Represent a C enum as Rust constants and a type. -/// -/// C enums can't soundly be mapped to Rust enums since C enums are allowed to have duplicates or -/// unlisted values, but this is UB in Rust. This enum doesn't implement any traits, its main -/// purpose is to calculate the correct enum values. -/// -/// See for more. -macro_rules! c_enum { - ($( - $(#[repr($repr:ty)])? - pub enum $ty_name:ident { - $($variant:ident $(= $value:expr)?,)+ - } - )+) => { - $(c_enum!(@expand; - $(#[repr($repr)])? - pub enum $ty_name { - $($variant $(= $value)?,)+ - } - );)+ - }; - - (@expand; - $(#[repr($repr:ty)])? - pub enum $ty_name:ident { - $($variant:ident $(= $value:expr)?,)+ - } - ) => { - pub type $ty_name = c_enum!(@ty $($repr)?); - c_enum!(@one; $ty_name; 0; $($variant $(= $value)?,)+); - }; - - // Matcher for a single variant - (@one; $_ty_name:ident; $_idx:expr;) => {}; - ( - @one; $ty_name:ident; $default_val:expr; - $variant:ident $(= $value:expr)?, - $($tail:tt)* - ) => { - pub const $variant: $ty_name = { - #[allow(unused_variables)] - let r = $default_val; - $(let r = $value;)? - r - }; - - // The next value is always one more than the previous value, unless - // set explicitly. - c_enum!(@one; $ty_name; $variant + 1; $($tail)*); - }; - - // Use a specific type if provided, otherwise default to `CEnumRepr` - (@ty $repr:ty) => { $repr }; - (@ty) => { $crate::prelude::CEnumRepr }; -} - -/// Define a `unsafe` function. -macro_rules! f { - ($( - $(#[$attr:meta])* - // Less than ideal hack to match either `fn` or `const fn`. - pub $(fn $i:ident)? $(const fn $const_i:ident)? - ($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty - $body:block - )+) => {$( - #[inline] - $(#[$attr])* - pub $(unsafe extern "C" fn $i)? $(const unsafe extern "C" fn $const_i)? - ($($arg: $argty),*) -> $ret - $body - )+}; -} - -/// Define a safe function. -macro_rules! safe_f { - ($( - $(#[$attr:meta])* - // Less than ideal hack to match either `fn` or `const fn`. - pub $(fn $i:ident)? $(const fn $const_i:ident)? - ($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty - $body:block - )+) => {$( - #[inline] - $(#[$attr])* - pub $(extern "C" fn $i)? $(const extern "C" fn $const_i)? - ($($arg: $argty),*) -> $ret - $body - )+}; -} - -macro_rules! __item { - ($i:item) => { - $i - }; -} - -// This macro is used to deprecate items that should be accessed via the mach2 crate -macro_rules! deprecated_mach { - (pub const $id:ident: $ty:ty = $expr:expr;) => { - #[deprecated( - since = "0.2.55", - note = "Use the `mach2` crate instead", - )] - #[allow(deprecated)] - pub const $id: $ty = $expr; - }; - ($(pub const $id:ident: $ty:ty = $expr:expr;)*) => { - $( - deprecated_mach!( - pub const $id: $ty = $expr; - ); - )* - }; - (pub type $id:ident = $ty:ty;) => { - #[deprecated( - since = "0.2.55", - note = "Use the `mach2` crate instead", - )] - #[allow(deprecated)] - pub type $id = $ty; - }; - ($(pub type $id:ident = $ty:ty;)*) => { - $( - deprecated_mach!( - pub type $id = $ty; - ); - )* - } -} - -#[cfg(test)] -mod tests { - use crate::types::CEnumRepr; - - #[test] - fn c_enumbasic() { - // By default, variants get sequential values. - c_enum! { - pub enum e { - VAR0, - VAR1, - VAR2, - } - } - - assert_eq!(VAR0, 0 as CEnumRepr); - assert_eq!(VAR1, 1 as CEnumRepr); - assert_eq!(VAR2, 2 as CEnumRepr); - } - - #[test] - fn c_enumrepr() { - // By default, variants get sequential values. - c_enum! { - #[repr(u16)] - pub enum e { - VAR0, - } - } - - assert_eq!(VAR0, 0_u16); - } - - #[test] - fn c_enumset_value() { - // Setting an explicit value resets the count. - c_enum! { - pub enum e { - VAR2 = 2, - VAR3, - VAR4, - } - } - - assert_eq!(VAR2, 2 as CEnumRepr); - assert_eq!(VAR3, 3 as CEnumRepr); - assert_eq!(VAR4, 4 as CEnumRepr); - } - - #[test] - fn c_enummultiple_set_value() { - // C enums always take one more than the previous value, unless set to a specific - // value. Duplicates are allowed. - c_enum! { - pub enum e { - VAR0, - VAR2_0 = 2, - VAR3_0, - VAR4_0, - VAR2_1 = 2, - VAR3_1, - VAR4_1, - } - } - - assert_eq!(VAR0, 0 as CEnumRepr); - assert_eq!(VAR2_0, 2 as CEnumRepr); - assert_eq!(VAR3_0, 3 as CEnumRepr); - assert_eq!(VAR4_0, 4 as CEnumRepr); - assert_eq!(VAR2_1, 2 as CEnumRepr); - assert_eq!(VAR3_1, 3 as CEnumRepr); - assert_eq!(VAR4_1, 4 as CEnumRepr); - } -} diff --git a/vendor/libc/src/new/bionic/mod.rs b/vendor/libc/src/new/bionic/mod.rs deleted file mode 100644 index 644a4ab96d90fc..00000000000000 --- a/vendor/libc/src/new/bionic/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -mod sys; -pub use sys::*; diff --git a/vendor/libc/src/new/bionic/sys/mod.rs b/vendor/libc/src/new/bionic/sys/mod.rs deleted file mode 100644 index fd96d0821ac88c..00000000000000 --- a/vendor/libc/src/new/bionic/sys/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -mod socket; -pub use socket::*; diff --git a/vendor/libc/src/new/bionic/sys/socket.rs b/vendor/libc/src/new/bionic/sys/socket.rs deleted file mode 100644 index 49af36fe93356c..00000000000000 --- a/vendor/libc/src/new/bionic/sys/socket.rs +++ /dev/null @@ -1,51 +0,0 @@ -//! Header: `bionic/libc/include/sys/socket.h` - -use crate::prelude::*; - -s! { - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: size_t, - pub msg_control: *mut c_void, - pub msg_controllen: size_t, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - pub cmsg_len: size_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct ucred { - pub pid: crate::pid_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - } -} - -extern "C" { - pub fn recvmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: c_uint, - flags: c_int, - timeout: *const crate::timespec, - ) -> c_int; - pub fn sendmmsg( - sockfd: c_int, - msgvec: *const crate::mmsghdr, - vlen: c_uint, - flags: c_int, - ) -> c_int; - pub fn recvfrom( - socket: c_int, - buf: *mut c_void, - len: size_t, - flags: c_int, - addr: *mut crate::sockaddr, - addrlen: *mut crate::socklen_t, - ) -> ssize_t; -} diff --git a/vendor/libc/src/new/linux_uapi/linux/can.rs b/vendor/libc/src/new/linux_uapi/linux/can.rs deleted file mode 100644 index b9479a63bacea9..00000000000000 --- a/vendor/libc/src/new/linux_uapi/linux/can.rs +++ /dev/null @@ -1,136 +0,0 @@ -//! Header: `uapi/linux/can.h` - -pub(crate) mod j1939; -pub(crate) mod raw; - -pub use j1939::*; -pub use raw::*; - -use crate::prelude::*; - -pub const CAN_EFF_FLAG: canid_t = 0x80000000; -pub const CAN_RTR_FLAG: canid_t = 0x40000000; -pub const CAN_ERR_FLAG: canid_t = 0x20000000; - -pub const CAN_SFF_MASK: canid_t = 0x000007FF; -pub const CAN_EFF_MASK: canid_t = 0x1FFFFFFF; -pub const CAN_ERR_MASK: canid_t = 0x1FFFFFFF; -pub const CANXL_PRIO_MASK: crate::canid_t = CAN_SFF_MASK; - -pub type canid_t = u32; - -pub const CAN_SFF_ID_BITS: c_int = 11; -pub const CAN_EFF_ID_BITS: c_int = 29; -pub const CANXL_PRIO_BITS: c_int = CAN_SFF_ID_BITS; - -pub type can_err_mask_t = u32; - -pub const CAN_MAX_DLC: c_int = 8; -pub const CAN_MAX_DLEN: usize = 8; - -pub const CANFD_MAX_DLC: c_int = 15; -pub const CANFD_MAX_DLEN: usize = 64; - -pub const CANXL_MIN_DLC: c_int = 0; -pub const CANXL_MAX_DLC: c_int = 2047; -pub const CANXL_MAX_DLC_MASK: c_int = 0x07FF; -pub const CANXL_MIN_DLEN: usize = 1; -pub const CANXL_MAX_DLEN: usize = 2048; - -s! { - #[repr(align(8))] - pub struct can_frame { - pub can_id: canid_t, - // FIXME(1.0): this field was renamed to `len` in Linux 5.11 - pub can_dlc: u8, - __pad: u8, - __res0: u8, - pub len8_dlc: u8, - pub data: [u8; CAN_MAX_DLEN], - } -} - -pub const CANFD_BRS: c_int = 0x01; -pub const CANFD_ESI: c_int = 0x02; -pub const CANFD_FDF: c_int = 0x04; - -s! { - #[repr(align(8))] - pub struct canfd_frame { - pub can_id: canid_t, - pub len: u8, - pub flags: u8, - __res0: u8, - __res1: u8, - pub data: [u8; CANFD_MAX_DLEN], - } -} - -pub const CANXL_XLF: c_int = 0x80; -pub const CANXL_SEC: c_int = 0x01; - -s! { - #[repr(align(8))] - pub struct canxl_frame { - pub prio: canid_t, - pub flags: u8, - pub sdt: u8, - pub len: u16, - pub af: u32, - pub data: [u8; CANXL_MAX_DLEN], - } -} - -pub const CAN_MTU: usize = size_of::(); -pub const CANFD_MTU: usize = size_of::(); -pub const CANXL_MTU: usize = size_of::(); -// FIXME(offset_of): use `core::mem::offset_of!` once that is available -// https://github.com/rust-lang/rfcs/pull/3308 -// pub const CANXL_HDR_SIZE: usize = core::mem::offset_of!(canxl_frame, data); -pub const CANXL_HDR_SIZE: usize = 12; -pub const CANXL_MIN_MTU: usize = CANXL_HDR_SIZE + 64; -pub const CANXL_MAX_MTU: usize = CANXL_MTU; - -pub const CAN_RAW: c_int = 1; -pub const CAN_BCM: c_int = 2; -pub const CAN_TP16: c_int = 3; -pub const CAN_TP20: c_int = 4; -pub const CAN_MCNET: c_int = 5; -pub const CAN_ISOTP: c_int = 6; -pub const CAN_J1939: c_int = 7; -pub const CAN_NPROTO: c_int = 8; - -pub const SOL_CAN_BASE: c_int = 100; - -s_no_extra_traits! { - pub struct sockaddr_can { - pub can_family: crate::sa_family_t, - pub can_ifindex: c_int, - pub can_addr: __c_anonymous_sockaddr_can_can_addr, - } - - pub union __c_anonymous_sockaddr_can_can_addr { - pub tp: __c_anonymous_sockaddr_can_tp, - pub j1939: __c_anonymous_sockaddr_can_j1939, - } -} - -s! { - pub struct __c_anonymous_sockaddr_can_tp { - pub rx_id: canid_t, - pub tx_id: canid_t, - } - - pub struct __c_anonymous_sockaddr_can_j1939 { - pub name: u64, - pub pgn: u32, - pub addr: u8, - } - - pub struct can_filter { - pub can_id: canid_t, - pub can_mask: canid_t, - } -} - -pub const CAN_INV_FILTER: canid_t = 0x20000000; diff --git a/vendor/libc/src/new/linux_uapi/linux/can/j1939.rs b/vendor/libc/src/new/linux_uapi/linux/can/j1939.rs deleted file mode 100644 index fdf425ce6c0c1b..00000000000000 --- a/vendor/libc/src/new/linux_uapi/linux/can/j1939.rs +++ /dev/null @@ -1,60 +0,0 @@ -//! `linux/can/j1939.h` - -pub use crate::linux::can::*; - -pub const J1939_MAX_UNICAST_ADDR: c_uchar = 0xfd; -pub const J1939_IDLE_ADDR: c_uchar = 0xfe; -pub const J1939_NO_ADDR: c_uchar = 0xff; -pub const J1939_NO_NAME: c_ulong = 0; -pub const J1939_PGN_REQUEST: c_uint = 0x0ea00; -pub const J1939_PGN_ADDRESS_CLAIMED: c_uint = 0x0ee00; -pub const J1939_PGN_ADDRESS_COMMANDED: c_uint = 0x0fed8; -pub const J1939_PGN_PDU1_MAX: c_uint = 0x3ff00; -pub const J1939_PGN_MAX: c_uint = 0x3ffff; -pub const J1939_NO_PGN: c_uint = 0x40000; - -pub type pgn_t = u32; -pub type priority_t = u8; -pub type name_t = u64; - -pub const SOL_CAN_J1939: c_int = SOL_CAN_BASE + CAN_J1939; - -// FIXME(cleanup): these could use c_enum if it can accept anonymous enums. - -pub const SO_J1939_FILTER: c_int = 1; -pub const SO_J1939_PROMISC: c_int = 2; -pub const SO_J1939_SEND_PRIO: c_int = 3; -pub const SO_J1939_ERRQUEUE: c_int = 4; - -pub const SCM_J1939_DEST_ADDR: c_int = 1; -pub const SCM_J1939_DEST_NAME: c_int = 2; -pub const SCM_J1939_PRIO: c_int = 3; -pub const SCM_J1939_ERRQUEUE: c_int = 4; - -pub const J1939_NLA_PAD: c_int = 0; -pub const J1939_NLA_BYTES_ACKED: c_int = 1; -pub const J1939_NLA_TOTAL_SIZE: c_int = 2; -pub const J1939_NLA_PGN: c_int = 3; -pub const J1939_NLA_SRC_NAME: c_int = 4; -pub const J1939_NLA_DEST_NAME: c_int = 5; -pub const J1939_NLA_SRC_ADDR: c_int = 6; -pub const J1939_NLA_DEST_ADDR: c_int = 7; - -pub const J1939_EE_INFO_NONE: c_int = 0; -pub const J1939_EE_INFO_TX_ABORT: c_int = 1; -pub const J1939_EE_INFO_RX_RTS: c_int = 2; -pub const J1939_EE_INFO_RX_DPO: c_int = 3; -pub const J1939_EE_INFO_RX_ABORT: c_int = 4; - -s! { - pub struct j1939_filter { - pub name: name_t, - pub name_mask: name_t, - pub pgn: pgn_t, - pub pgn_mask: pgn_t, - pub addr: u8, - pub addr_mask: u8, - } -} - -pub const J1939_FILTER_MAX: c_int = 512; diff --git a/vendor/libc/src/new/linux_uapi/linux/can/raw.rs b/vendor/libc/src/new/linux_uapi/linux/can/raw.rs deleted file mode 100644 index 1f92a13edbba69..00000000000000 --- a/vendor/libc/src/new/linux_uapi/linux/can/raw.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! `linux/can/raw.h` - -pub use crate::linux::can::*; - -pub const SOL_CAN_RAW: c_int = SOL_CAN_BASE + CAN_RAW; -pub const CAN_RAW_FILTER_MAX: c_int = 512; - -// FIXME(cleanup): use `c_enum!`, which needs to be adapted to allow omitting a type. -pub const CAN_RAW_FILTER: c_int = 1; -pub const CAN_RAW_ERR_FILTER: c_int = 2; -pub const CAN_RAW_LOOPBACK: c_int = 3; -pub const CAN_RAW_RECV_OWN_MSGS: c_int = 4; -pub const CAN_RAW_FD_FRAMES: c_int = 5; -pub const CAN_RAW_JOIN_FILTERS: c_int = 6; -pub const CAN_RAW_XL_FRAMES: c_int = 7; diff --git a/vendor/libc/src/new/linux_uapi/linux/mod.rs b/vendor/libc/src/new/linux_uapi/linux/mod.rs deleted file mode 100644 index 4a9c04d6396b1a..00000000000000 --- a/vendor/libc/src/new/linux_uapi/linux/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! The `linux` directory within `include/uapi` in the Linux source tree. - -pub(crate) mod can; -pub use can::*; diff --git a/vendor/libc/src/new/linux_uapi/mod.rs b/vendor/libc/src/new/linux_uapi/mod.rs deleted file mode 100644 index e0d4e094c435f1..00000000000000 --- a/vendor/libc/src/new/linux_uapi/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! This directory maps to `include/uapi` in the Linux source tree. - -pub(crate) mod linux; -pub use linux::*; diff --git a/vendor/libc/src/new/mod.rs b/vendor/libc/src/new/mod.rs deleted file mode 100644 index 0a2a55b0f469bb..00000000000000 --- a/vendor/libc/src/new/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! This module contains the future directory structure. If possible, new definitions should -//! get added here. -//! -//! Eventually everything should be moved over, and we will move this directory to the top -//! level in `src`. - -cfg_if! { - if #[cfg(target_os = "linux")] { - mod linux_uapi; - pub use linux_uapi::*; - } else if #[cfg(target_os = "android")] { - mod bionic; - pub use bionic::*; - } -} diff --git a/vendor/libc/src/primitives.rs b/vendor/libc/src/primitives.rs deleted file mode 100644 index 80a10af4c85462..00000000000000 --- a/vendor/libc/src/primitives.rs +++ /dev/null @@ -1,95 +0,0 @@ -//! This module contains type aliases for C's platform-specific types -//! and fixed-width integer types. -//! -//! The platform-specific types definitions were taken from rust-lang/rust in -//! library/core/src/ffi/primitives.rs -//! -//! The fixed-width integer aliases are deprecated: use the Rust types instead. - -pub type c_schar = i8; -pub type c_uchar = u8; -pub type c_short = i16; -pub type c_ushort = u16; - -pub type c_longlong = i64; -pub type c_ulonglong = u64; - -pub type c_float = f32; -pub type c_double = f64; - -cfg_if! { - if #[cfg(all( - not(windows), - not(target_vendor = "apple"), - not(target_os = "vita"), - any( - target_arch = "aarch64", - target_arch = "arm", - target_arch = "csky", - target_arch = "hexagon", - target_arch = "msp430", - target_arch = "powerpc", - target_arch = "powerpc64", - target_arch = "riscv32", - target_arch = "riscv64", - target_arch = "s390x", - target_arch = "xtensa", - ) - ))] { - pub type c_char = u8; - } else { - // On every other target, c_char is signed. - pub type c_char = i8; - } -} - -cfg_if! { - if #[cfg(any(target_arch = "avr", target_arch = "msp430"))] { - pub type c_int = i16; - pub type c_uint = u16; - } else { - pub type c_int = i32; - pub type c_uint = u32; - } -} - -cfg_if! { - if #[cfg(all(target_pointer_width = "64", not(windows)))] { - pub type c_long = i64; - pub type c_ulong = u64; - } else { - // The minimal size of `long` in the C standard is 32 bits - pub type c_long = i32; - pub type c_ulong = u32; - } -} - -#[deprecated(since = "0.2.55", note = "Use i8 instead.")] -pub type int8_t = i8; -#[deprecated(since = "0.2.55", note = "Use i16 instead.")] -pub type int16_t = i16; -#[deprecated(since = "0.2.55", note = "Use i32 instead.")] -pub type int32_t = i32; -#[deprecated(since = "0.2.55", note = "Use i64 instead.")] -pub type int64_t = i64; -#[deprecated(since = "0.2.55", note = "Use u8 instead.")] -pub type uint8_t = u8; -#[deprecated(since = "0.2.55", note = "Use u16 instead.")] -pub type uint16_t = u16; -#[deprecated(since = "0.2.55", note = "Use u32 instead.")] -pub type uint32_t = u32; -#[deprecated(since = "0.2.55", note = "Use u64 instead.")] -pub type uint64_t = u64; - -cfg_if! { - if #[cfg(all(target_arch = "aarch64", not(target_os = "windows")))] { - /// C `__int128` (a GCC extension that's part of many ABIs) - pub type __int128 = i128; - /// C `unsigned __int128` (a GCC extension that's part of many ABIs) - pub type __uint128 = u128; - /// C __int128_t (alternate name for [__int128][]) - pub type __int128_t = i128; - /// C __uint128_t (alternate name for [__uint128][]) - pub type __uint128_t = u128; - } -} diff --git a/vendor/libc/src/psp.rs b/vendor/libc/src/psp.rs deleted file mode 100644 index 823567127c4019..00000000000000 --- a/vendor/libc/src/psp.rs +++ /dev/null @@ -1,4131 +0,0 @@ -//! PSP C type definitions -//! -//! These type declarations are not enough, as they must be ultimately resolved -//! by the linker. Crates that use these definitions must, somewhere in the -//! crate graph, include a stub provider crate such as the `psp` crate. - -use crate::prelude::*; - -pub type intmax_t = i64; -pub type uintmax_t = u64; - -pub type size_t = usize; -pub type ptrdiff_t = isize; -pub type intptr_t = isize; -pub type uintptr_t = usize; -pub type ssize_t = isize; - -pub type SceKernelVTimerHandler = unsafe extern "C" fn( - uid: SceUid, - arg1: *mut SceKernelSysClock, - arg2: *mut SceKernelSysClock, - arg3: *mut c_void, -) -> u32; - -pub type SceKernelVTimerHandlerWide = - unsafe extern "C" fn(uid: SceUid, arg1: i64, arg2: i64, arg3: *mut c_void) -> u32; - -pub type SceKernelThreadEventHandler = - unsafe extern "C" fn(mask: i32, thid: SceUid, common: *mut c_void) -> i32; - -pub type SceKernelAlarmHandler = unsafe extern "C" fn(common: *mut c_void) -> u32; - -pub type SceKernelCallbackFunction = - unsafe extern "C" fn(arg1: i32, arg2: i32, arg: *mut c_void) -> i32; - -pub type SceKernelThreadEntry = unsafe extern "C" fn(args: usize, argp: *mut c_void) -> i32; - -pub type PowerCallback = extern "C" fn(unknown: i32, power_info: i32); - -pub type IoPermissions = i32; - -pub type UmdCallback = fn(unknown: i32, event: i32) -> i32; - -pub type SceMpegRingbufferCb = - Option i32>; - -pub type GuCallback = Option; -pub type GuSwapBuffersCallback = - Option; - -pub type SceNetAdhocctlHandler = - Option; - -pub type AdhocMatchingCallback = Option< - unsafe extern "C" fn( - matching_id: i32, - event: i32, - mac: *mut u8, - opt_len: i32, - opt_data: *mut c_void, - ), ->; - -pub type SceNetApctlHandler = Option< - unsafe extern "C" fn(oldState: i32, newState: i32, event: i32, error: i32, pArg: *mut c_void), ->; - -pub type HttpMallocFunction = Option *mut c_void>; -pub type HttpReallocFunction = - Option *mut c_void>; -pub type HttpFreeFunction = Option; -pub type HttpPasswordCB = Option< - unsafe extern "C" fn( - request: i32, - auth_type: HttpAuthType, - realm: *const u8, - username: *mut u8, - password: *mut u8, - need_entity: i32, - entity_body: *mut *mut u8, - entity_size: *mut usize, - save: *mut i32, - ) -> i32, ->; - -pub type socklen_t = u32; - -e! { - #[repr(u32)] - pub enum AudioFormat { - Stereo = 0, - Mono = 0x10, - } - - #[repr(u32)] - pub enum DisplayMode { - Lcd = 0, - } - - #[repr(u32)] - pub enum DisplayPixelFormat { - Psm5650 = 0, - Psm5551 = 1, - Psm4444 = 2, - Psm8888 = 3, - } - - #[repr(u32)] - pub enum DisplaySetBufSync { - Immediate = 0, - NextFrame = 1, - } - - #[repr(i32)] - pub enum AudioOutputFrequency { - Khz48 = 48000, - Khz44_1 = 44100, - Khz32 = 32000, - Khz24 = 24000, - Khz22_05 = 22050, - Khz16 = 16000, - Khz12 = 12000, - Khz11_025 = 11025, - Khz8 = 8000, - } - - #[repr(i32)] - pub enum AudioInputFrequency { - Khz44_1 = 44100, - Khz22_05 = 22050, - Khz11_025 = 11025, - } - - #[repr(u32)] - pub enum CtrlMode { - Digital = 0, - Analog, - } - - #[repr(i32)] - pub enum GeMatrixType { - Bone0 = 0, - Bone1, - Bone2, - Bone3, - Bone4, - Bone5, - Bone6, - Bone7, - World, - View, - Projection, - TexGen, - } - - #[repr(i32)] - pub enum GeListState { - Done = 0, - Queued, - DrawingDone, - StallReached, - CancelDone, - } - - #[repr(u8)] - pub enum GeCommand { - Nop = 0, - Vaddr = 0x1, - Iaddr = 0x2, - Prim = 0x4, - Bezier = 0x5, - Spline = 0x6, - BoundingBox = 0x7, - Jump = 0x8, - BJump = 0x9, - Call = 0xa, - Ret = 0xb, - End = 0xc, - Signal = 0xe, - Finish = 0xf, - Base = 0x10, - VertexType = 0x12, - OffsetAddr = 0x13, - Origin = 0x14, - Region1 = 0x15, - Region2 = 0x16, - LightingEnable = 0x17, - LightEnable0 = 0x18, - LightEnable1 = 0x19, - LightEnable2 = 0x1a, - LightEnable3 = 0x1b, - DepthClampEnable = 0x1c, - CullFaceEnable = 0x1d, - TextureMapEnable = 0x1e, - FogEnable = 0x1f, - DitherEnable = 0x20, - AlphaBlendEnable = 0x21, - AlphaTestEnable = 0x22, - ZTestEnable = 0x23, - StencilTestEnable = 0x24, - AntiAliasEnable = 0x25, - PatchCullEnable = 0x26, - ColorTestEnable = 0x27, - LogicOpEnable = 0x28, - BoneMatrixNumber = 0x2a, - BoneMatrixData = 0x2b, - MorphWeight0 = 0x2c, - MorphWeight1 = 0x2d, - MorphWeight2 = 0x2e, - MorphWeight3 = 0x2f, - MorphWeight4 = 0x30, - MorphWeight5 = 0x31, - MorphWeight6 = 0x32, - MorphWeight7 = 0x33, - PatchDivision = 0x36, - PatchPrimitive = 0x37, - PatchFacing = 0x38, - WorldMatrixNumber = 0x3a, - WorldMatrixData = 0x3b, - ViewMatrixNumber = 0x3c, - ViewMatrixData = 0x3d, - ProjMatrixNumber = 0x3e, - ProjMatrixData = 0x3f, - TGenMatrixNumber = 0x40, - TGenMatrixData = 0x41, - ViewportXScale = 0x42, - ViewportYScale = 0x43, - ViewportZScale = 0x44, - ViewportXCenter = 0x45, - ViewportYCenter = 0x46, - ViewportZCenter = 0x47, - TexScaleU = 0x48, - TexScaleV = 0x49, - TexOffsetU = 0x4a, - TexOffsetV = 0x4b, - OffsetX = 0x4c, - OffsetY = 0x4d, - ShadeMode = 0x50, - ReverseNormal = 0x51, - MaterialUpdate = 0x53, - MaterialEmissive = 0x54, - MaterialAmbient = 0x55, - MaterialDiffuse = 0x56, - MaterialSpecular = 0x57, - MaterialAlpha = 0x58, - MaterialSpecularCoef = 0x5b, - AmbientColor = 0x5c, - AmbientAlpha = 0x5d, - LightMode = 0x5e, - LightType0 = 0x5f, - LightType1 = 0x60, - LightType2 = 0x61, - LightType3 = 0x62, - Light0X = 0x63, - Light0Y, - Light0Z, - Light1X, - Light1Y, - Light1Z, - Light2X, - Light2Y, - Light2Z, - Light3X, - Light3Y, - Light3Z, - Light0DirectionX = 0x6f, - Light0DirectionY, - Light0DirectionZ, - Light1DirectionX, - Light1DirectionY, - Light1DirectionZ, - Light2DirectionX, - Light2DirectionY, - Light2DirectionZ, - Light3DirectionX, - Light3DirectionY, - Light3DirectionZ, - Light0ConstantAtten = 0x7b, - Light0LinearAtten, - Light0QuadtraticAtten, - Light1ConstantAtten, - Light1LinearAtten, - Light1QuadtraticAtten, - Light2ConstantAtten, - Light2LinearAtten, - Light2QuadtraticAtten, - Light3ConstantAtten, - Light3LinearAtten, - Light3QuadtraticAtten, - Light0ExponentAtten = 0x87, - Light1ExponentAtten, - Light2ExponentAtten, - Light3ExponentAtten, - Light0CutoffAtten = 0x8b, - Light1CutoffAtten, - Light2CutoffAtten, - Light3CutoffAtten, - Light0Ambient = 0x8f, - Light0Diffuse, - Light0Specular, - Light1Ambient, - Light1Diffuse, - Light1Specular, - Light2Ambient, - Light2Diffuse, - Light2Specular, - Light3Ambient, - Light3Diffuse, - Light3Specular, - Cull = 0x9b, - FrameBufPtr = 0x9c, - FrameBufWidth = 0x9d, - ZBufPtr = 0x9e, - ZBufWidth = 0x9f, - TexAddr0 = 0xa0, - TexAddr1, - TexAddr2, - TexAddr3, - TexAddr4, - TexAddr5, - TexAddr6, - TexAddr7, - TexBufWidth0 = 0xa8, - TexBufWidth1, - TexBufWidth2, - TexBufWidth3, - TexBufWidth4, - TexBufWidth5, - TexBufWidth6, - TexBufWidth7, - ClutAddr = 0xb0, - ClutAddrUpper = 0xb1, - TransferSrc, - TransferSrcW, - TransferDst, - TransferDstW, - TexSize0 = 0xb8, - TexSize1, - TexSize2, - TexSize3, - TexSize4, - TexSize5, - TexSize6, - TexSize7, - TexMapMode = 0xc0, - TexShadeLs = 0xc1, - TexMode = 0xc2, - TexFormat = 0xc3, - LoadClut = 0xc4, - ClutFormat = 0xc5, - TexFilter = 0xc6, - TexWrap = 0xc7, - TexLevel = 0xc8, - TexFunc = 0xc9, - TexEnvColor = 0xca, - TexFlush = 0xcb, - TexSync = 0xcc, - Fog1 = 0xcd, - Fog2 = 0xce, - FogColor = 0xcf, - TexLodSlope = 0xd0, - FramebufPixFormat = 0xd2, - ClearMode = 0xd3, - Scissor1 = 0xd4, - Scissor2 = 0xd5, - MinZ = 0xd6, - MaxZ = 0xd7, - ColorTest = 0xd8, - ColorRef = 0xd9, - ColorTestmask = 0xda, - AlphaTest = 0xdb, - StencilTest = 0xdc, - StencilOp = 0xdd, - ZTest = 0xde, - BlendMode = 0xdf, - BlendFixedA = 0xe0, - BlendFixedB = 0xe1, - Dith0 = 0xe2, - Dith1, - Dith2, - Dith3, - LogicOp = 0xe6, - ZWriteDisable = 0xe7, - MaskRgb = 0xe8, - MaskAlpha = 0xe9, - TransferStart = 0xea, - TransferSrcPos = 0xeb, - TransferDstPos = 0xec, - TransferSize = 0xee, - Vscx = 0xf0, - Vscy = 0xf1, - Vscz = 0xf2, - Vtcs = 0xf3, - Vtct = 0xf4, - Vtcq = 0xf5, - Vcv = 0xf6, - Vap = 0xf7, - Vfc = 0xf8, - Vscv = 0xf9, - - Unknown03 = 0x03, - Unknown0D = 0x0d, - Unknown11 = 0x11, - Unknown29 = 0x29, - Unknown34 = 0x34, - Unknown35 = 0x35, - Unknown39 = 0x39, - Unknown4E = 0x4e, - Unknown4F = 0x4f, - Unknown52 = 0x52, - Unknown59 = 0x59, - Unknown5A = 0x5a, - UnknownB6 = 0xb6, - UnknownB7 = 0xb7, - UnknownD1 = 0xd1, - UnknownED = 0xed, - UnknownEF = 0xef, - UnknownFA = 0xfa, - UnknownFB = 0xfb, - UnknownFC = 0xfc, - UnknownFD = 0xfd, - UnknownFE = 0xfe, - NopFF = 0xff, - } - - #[repr(i32)] - pub enum SceSysMemPartitionId { - SceKernelUnknownPartition = 0, - SceKernelPrimaryKernelPartition = 1, - SceKernelPrimaryUserPartition = 2, - SceKernelOtherKernelPartition1 = 3, - SceKernelOtherKernelPartition2 = 4, - SceKernelVshellPARTITION = 5, - SceKernelScUserPartition = 6, - SceKernelMeUserPartition = 7, - SceKernelExtendedScKernelPartition = 8, - SceKernelExtendedSc2KernelPartition = 9, - SceKernelExtendedMeKernelPartition = 10, - SceKernelVshellKernelPartition = 11, - SceKernelExtendedKernelPartition = 12, - } - - #[repr(i32)] - pub enum SceSysMemBlockTypes { - Low = 0, - High, - Addr, - } - - #[repr(u32)] - pub enum Interrupt { - Gpio = 4, - Ata = 5, - Umd = 6, - Mscm0 = 7, - Wlan = 8, - Audio = 10, - I2c = 12, - Sircs = 14, - Systimer0 = 15, - Systimer1 = 16, - Systimer2 = 17, - Systimer3 = 18, - Thread0 = 19, - Nand = 20, - Dmacplus = 21, - Dma0 = 22, - Dma1 = 23, - Memlmd = 24, - Ge = 25, - Vblank = 30, - Mecodec = 31, - Hpremote = 36, - Mscm1 = 60, - Mscm2 = 61, - Thread1 = 65, - Interrupt = 66, - } - - #[repr(u32)] - pub enum SubInterrupt { - Gpio = Interrupt::Gpio as u32, - Ata = Interrupt::Ata as u32, - Umd = Interrupt::Umd as u32, - Dmacplus = Interrupt::Dmacplus as u32, - Ge = Interrupt::Ge as u32, - Display = Interrupt::Vblank as u32, - } - - #[repr(u32)] - pub enum SceKernelIdListType { - Thread = 1, - Semaphore = 2, - EventFlag = 3, - Mbox = 4, - Vpl = 5, - Fpl = 6, - Mpipe = 7, - Callback = 8, - ThreadEventHandler = 9, - Alarm = 10, - VTimer = 11, - SleepThread = 64, - DelayThread = 65, - SuspendThread = 66, - DormantThread = 67, - } - - #[repr(i32)] - pub enum UsbCamResolution { - Px160_120 = 0, - Px176_144 = 1, - Px320_240 = 2, - Px352_288 = 3, - Px640_480 = 4, - Px1024_768 = 5, - Px1280_960 = 6, - Px480_272 = 7, - Px360_272 = 8, - } - - #[repr(i32)] - pub enum UsbCamResolutionEx { - Px160_120 = 0, - Px176_144 = 1, - Px320_240 = 2, - Px352_288 = 3, - Px360_272 = 4, - Px480_272 = 5, - Px640_480 = 6, - Px1024_768 = 7, - Px1280_960 = 8, - } - - #[repr(i32)] - pub enum UsbCamDelay { - NoDelay = 0, - Delay10Sec = 1, - Delay20Sec = 2, - Delay30Sec = 3, - } - - #[repr(i32)] - pub enum UsbCamFrameRate { - Fps3_75 = 0, - Fps5 = 1, - Fps7_5 = 2, - Fps10 = 3, - Fps15 = 4, - Fps20 = 5, - Fps30 = 6, - Fps60 = 7, - } - - #[repr(i32)] - pub enum UsbCamWb { - Auto = 0, - Daylight = 1, - Fluorescent = 2, - Incadescent = 3, - } - - #[repr(i32)] - pub enum UsbCamEffectMode { - Normal = 0, - Negative = 1, - Blackwhite = 2, - Sepia = 3, - Blue = 4, - Red = 5, - Green = 6, - } - - #[repr(i32)] - pub enum UsbCamEvLevel { - Pos2_0 = 0, - Pos1_7 = 1, - Pos1_5 = 2, - Pos1_3 = 3, - Pos1_0 = 4, - Pos0_7 = 5, - Pos0_5 = 6, - Pos0_3 = 7, - Zero = 8, - Neg0_3, - Neg0_5, - Neg0_7, - Neg1_0, - Neg1_3, - Neg1_5, - Neg1_7, - Neg2_0, - } - - #[repr(i32)] - pub enum RtcCheckValidError { - InvalidYear = -1, - InvalidMonth = -2, - InvalidDay = -3, - InvalidHour = -4, - InvalidMinutes = -5, - InvalidSeconds = -6, - InvalidMicroseconds = -7, - } - - #[repr(u32)] - pub enum PowerTick { - All = 0, - Suspend = 1, - Display = 6, - } - - #[repr(u32)] - pub enum IoAssignPerms { - RdWr = 0, - RdOnly = 1, - } - - #[repr(u32)] - pub enum IoWhence { - Set = 0, - Cur = 1, - End = 2, - } - - #[repr(u32)] - pub enum UmdType { - Game = 0x10, - Video = 0x20, - Audio = 0x40, - } - - #[repr(u32)] - pub enum GuPrimitive { - Points = 0, - Lines = 1, - LineStrip = 2, - Triangles = 3, - TriangleStrip = 4, - TriangleFan = 5, - Sprites = 6, - } - - #[repr(u32)] - pub enum PatchPrimitive { - Points = 0, - LineStrip = 2, - TriangleStrip = 4, - } - - #[repr(u32)] - pub enum GuState { - AlphaTest = 0, - DepthTest = 1, - ScissorTest = 2, - StencilTest = 3, - Blend = 4, - CullFace = 5, - Dither = 6, - Fog = 7, - ClipPlanes = 8, - Texture2D = 9, - Lighting = 10, - Light0 = 11, - Light1 = 12, - Light2 = 13, - Light3 = 14, - LineSmooth = 15, - PatchCullFace = 16, - ColorTest = 17, - ColorLogicOp = 18, - FaceNormalReverse = 19, - PatchFace = 20, - Fragment2X = 21, - } - - #[repr(u32)] - pub enum MatrixMode { - Projection = 0, - View = 1, - Model = 2, - Texture = 3, - } - - #[repr(u32)] - pub enum TexturePixelFormat { - Psm5650 = 0, - Psm5551 = 1, - Psm4444 = 2, - Psm8888 = 3, - PsmT4 = 4, - PsmT8 = 5, - PsmT16 = 6, - PsmT32 = 7, - PsmDxt1 = 8, - PsmDxt3 = 9, - PsmDxt5 = 10, - } - - #[repr(u32)] - pub enum SplineMode { - FillFill = 0, - OpenFill = 1, - FillOpen = 2, - OpenOpen = 3, - } - - #[repr(u32)] - pub enum ShadingModel { - Flat = 0, - Smooth = 1, - } - - #[repr(u32)] - pub enum LogicalOperation { - Clear = 0, - And = 1, - AndReverse = 2, - Copy = 3, - AndInverted = 4, - Noop = 5, - Xor = 6, - Or = 7, - Nor = 8, - Equiv = 9, - Inverted = 10, - OrReverse = 11, - CopyInverted = 12, - OrInverted = 13, - Nand = 14, - Set = 15, - } - - #[repr(u32)] - pub enum TextureFilter { - Nearest = 0, - Linear = 1, - NearestMipmapNearest = 4, - LinearMipmapNearest = 5, - NearestMipmapLinear = 6, - LinearMipmapLinear = 7, - } - - #[repr(u32)] - pub enum TextureMapMode { - TextureCoords = 0, - TextureMatrix = 1, - EnvironmentMap = 2, - } - - #[repr(u32)] - pub enum TextureLevelMode { - Auto = 0, - Const = 1, - Slope = 2, - } - - #[repr(u32)] - pub enum TextureProjectionMapMode { - Position = 0, - Uv = 1, - NormalizedNormal = 2, - Normal = 3, - } - - #[repr(u32)] - pub enum GuTexWrapMode { - Repeat = 0, - Clamp = 1, - } - - #[repr(u32)] - pub enum FrontFaceDirection { - Clockwise = 0, - CounterClockwise = 1, - } - - #[repr(u32)] - pub enum AlphaFunc { - Never = 0, - Always, - Equal, - NotEqual, - Less, - LessOrEqual, - Greater, - GreaterOrEqual, - } - - #[repr(u32)] - pub enum StencilFunc { - Never = 0, - Always, - Equal, - NotEqual, - Less, - LessOrEqual, - Greater, - GreaterOrEqual, - } - - #[repr(u32)] - pub enum ColorFunc { - Never = 0, - Always, - Equal, - NotEqual, - } - - #[repr(u32)] - pub enum DepthFunc { - Never = 0, - Always, - Equal, - NotEqual, - Less, - LessOrEqual, - Greater, - GreaterOrEqual, - } - - #[repr(u32)] - pub enum TextureEffect { - Modulate = 0, - Decal = 1, - Blend = 2, - Replace = 3, - Add = 4, - } - - #[repr(u32)] - pub enum TextureColorComponent { - Rgb = 0, - Rgba = 1, - } - - #[repr(u32)] - pub enum MipmapLevel { - None = 0, - Level1, - Level2, - Level3, - Level4, - Level5, - Level6, - Level7, - } - - #[repr(u32)] - pub enum BlendOp { - Add = 0, - Subtract = 1, - ReverseSubtract = 2, - Min = 3, - Max = 4, - Abs = 5, - } - - #[repr(u32)] - pub enum BlendSrc { - SrcColor = 0, - OneMinusSrcColor = 1, - SrcAlpha = 2, - OneMinusSrcAlpha = 3, - Fix = 10, - } - - #[repr(u32)] - pub enum BlendDst { - DstColor = 0, - OneMinusDstColor = 1, - DstAlpha = 4, - OneMinusDstAlpha = 5, - Fix = 10, - } - - #[repr(u32)] - pub enum StencilOperation { - Keep = 0, - Zero = 1, - Replace = 2, - Invert = 3, - Incr = 4, - Decr = 5, - } - - #[repr(u32)] - pub enum LightMode { - SingleColor = 0, - SeparateSpecularColor = 1, - } - - #[repr(u32)] - pub enum LightType { - Directional = 0, - Pointlight = 1, - Spotlight = 2, - } - - #[repr(u32)] - pub enum GuContextType { - Direct = 0, - Call = 1, - Send = 2, - } - - #[repr(u32)] - pub enum GuQueueMode { - Tail = 0, - Head = 1, - } - - #[repr(u32)] - pub enum GuSyncMode { - Finish = 0, - Signal = 1, - Done = 2, - List = 3, - Send = 4, - } - - #[repr(u32)] - pub enum GuSyncBehavior { - Wait = 0, - NoWait = 1, - } - - #[repr(u32)] - pub enum GuCallbackId { - Signal = 1, - Finish = 4, - } - - #[repr(u32)] - pub enum SignalBehavior { - Suspend = 1, - Continue = 2, - } - - #[repr(u32)] - pub enum ClutPixelFormat { - Psm5650 = 0, - Psm5551 = 1, - Psm4444 = 2, - Psm8888 = 3, - } - - #[repr(C)] - pub enum KeyType { - Directory = 1, - Integer = 2, - String = 3, - Bytes = 4, - } - - #[repr(u32)] - pub enum UtilityMsgDialogMode { - Error, - Text, - } - - #[repr(u32)] - pub enum UtilityMsgDialogPressed { - Unknown1, - Yes, - No, - Back, - } - - #[repr(u32)] - pub enum UtilityDialogButtonAccept { - Circle, - Cross, - } - - #[repr(u32)] - pub enum SceUtilityOskInputLanguage { - Default, - Japanese, - English, - French, - Spanish, - German, - Italian, - Dutch, - Portugese, - Russian, - Korean, - } - - #[repr(u32)] - pub enum SceUtilityOskInputType { - All, - LatinDigit, - LatinSymbol, - LatinLowercase = 4, - LatinUppercase = 8, - JapaneseDigit = 0x100, - JapaneseSymbol = 0x200, - JapaneseLowercase = 0x400, - JapaneseUppercase = 0x800, - JapaneseHiragana = 0x1000, - JapaneseHalfWidthKatakana = 0x2000, - JapaneseKatakana = 0x4000, - JapaneseKanji = 0x8000, - RussianLowercase = 0x10000, - RussianUppercase = 0x20000, - Korean = 0x40000, - Url = 0x80000, - } - - #[repr(u32)] - pub enum SceUtilityOskState { - None, - Initializing, - Initialized, - Visible, - Quit, - Finished, - } - - #[repr(u32)] - pub enum SceUtilityOskResult { - Unchanged, - Cancelled, - Changed, - } - - #[repr(u32)] - pub enum SystemParamLanguage { - Japanese, - English, - French, - Spanish, - German, - Italian, - Dutch, - Portugese, - Russian, - Korean, - ChineseTraditional, - ChineseSimplified, - } - - #[repr(u32)] - pub enum SystemParamId { - StringNickname = 1, - AdhocChannel, - WlanPowerSave, - DateFormat, - TimeFormat, - Timezone, - DaylightSavings, - Language, - Unknown, - } - - #[repr(u32)] - pub enum SystemParamAdhocChannel { - ChannelAutomatic = 0, - Channel1 = 1, - Channel6 = 6, - Channel11 = 11, - } - - #[repr(u32)] - pub enum SystemParamWlanPowerSaveState { - Off, - On, - } - - #[repr(u32)] - pub enum SystemParamDateFormat { - YYYYMMDD, - MMDDYYYY, - DDMMYYYY, - } - - #[repr(u32)] - pub enum SystemParamTimeFormat { - Hour24, - Hour12, - } - - #[repr(u32)] - pub enum SystemParamDaylightSavings { - Std, - Dst, - } - - #[repr(u32)] - pub enum AvModule { - AvCodec, - SasCore, - Atrac3Plus, - MpegBase, - Mp3, - Vaudio, - Aac, - G729, - } - - #[repr(u32)] - pub enum Module { - NetCommon = 0x100, - NetAdhoc, - NetInet, - NetParseUri, - NetHttp, - NetSsl, - - UsbPspCm = 0x200, - UsbMic, - UsbCam, - UsbGps, - - AvCodec = 0x300, - AvSascore, - AvAtrac3Plus, - AvMpegBase, - AvMp3, - AvVaudio, - AvAac, - AvG729, - - NpCommon = 0x400, - NpService, - NpMatching2, - NpDrm = 0x500, - - Irda = 0x600, - } - - #[repr(u32)] - pub enum NetModule { - NetCommon = 1, - NetAdhoc, - NetInet, - NetParseUri, - NetHttp, - NetSsl, - } - - #[repr(u32)] - pub enum UsbModule { - UsbPspCm = 1, - UsbAcc, - UsbMic, - UsbCam, - UsbGps, - } - - #[repr(u32)] - pub enum NetParam { - Name, - Ssid, - Secure, - WepKey, - IsStaticIp, - Ip, - NetMask, - Route, - ManualDns, - PrimaryDns, - SecondaryDns, - ProxyUser, - ProxyPass, - UseProxy, - ProxyServer, - ProxyPort, - Unknown1, - Unknown2, - } - - #[repr(u32)] - pub enum UtilityNetconfAction { - ConnectAP, - DisplayStatus, - ConnectAdhoc, - } - - #[repr(u32)] - pub enum UtilitySavedataMode { - AutoLoad, - AutoSave, - Load, - Save, - ListLoad, - ListSave, - ListDelete, - Delete, - } - - #[repr(u32)] - pub enum UtilitySavedataFocus { - Unknown1, - FirstList, - LastList, - Latest, - Oldest, - Unknown2, - Unknown3, - FirstEmpty, - LastEmpty, - } - - #[repr(u32)] - pub enum UtilityGameSharingMode { - Single = 1, - Multiple, - } - - #[repr(u32)] - pub enum UtilityGameSharingDataType { - File = 1, - Memory, - } - - #[repr(u32)] - pub enum UtilityHtmlViewerInterfaceMode { - Full, - Limited, - None, - } - - #[repr(u32)] - pub enum UtilityHtmlViewerCookieMode { - Disabled = 0, - Enabled, - Confirm, - Default, - } - - #[repr(u32)] - pub enum UtilityHtmlViewerTextSize { - Large, - Normal, - Small, - } - - #[repr(u32)] - pub enum UtilityHtmlViewerDisplayMode { - Normal, - Fit, - SmartFit, - } - - #[repr(u32)] - pub enum UtilityHtmlViewerConnectMode { - Last, - ManualOnce, - ManualAll, - } - - #[repr(u32)] - pub enum UtilityHtmlViewerDisconnectMode { - Enable, - Disable, - Confirm, - } - - #[repr(u32)] - pub enum ScePspnetAdhocPtpState { - Closed, - Listen, - SynSent, - SynReceived, - Established, - } - - #[repr(u32)] - pub enum AdhocMatchingMode { - Host = 1, - Client, - Ptp, - } - - #[repr(u32)] - pub enum ApctlState { - Disconnected, - Scanning, - Joining, - GettingIp, - GotIp, - EapAuth, - KeyExchange, - } - - #[repr(u32)] - pub enum ApctlEvent { - ConnectRequest, - ScanRequest, - ScanComplete, - Established, - GetIp, - DisconnectRequest, - Error, - Info, - EapAuth, - KeyExchange, - Reconnect, - } - - #[repr(u32)] - pub enum ApctlInfo { - ProfileName, - Bssid, - Ssid, - SsidLength, - SecurityType, - Strength, - Channel, - PowerSave, - Ip, - SubnetMask, - Gateway, - PrimaryDns, - SecondaryDns, - UseProxy, - ProxyUrl, - ProxyPort, - EapType, - StartBrowser, - Wifisp, - } - - #[repr(u32)] - pub enum ApctlInfoSecurityType { - None, - Wep, - Wpa, - } - - #[repr(u32)] - pub enum HttpMethod { - Get, - Post, - Head, - } - - #[repr(u32)] - pub enum HttpAuthType { - Basic, - Digest, - } -} - -s_paren! { - #[repr(transparent)] - pub struct SceUid(pub i32); - - #[repr(transparent)] - #[allow(dead_code)] - pub struct SceMpeg(*mut *mut c_void); - - #[repr(transparent)] - #[allow(dead_code)] - pub struct SceMpegStream(*mut c_void); - - #[repr(transparent)] - pub struct Mp3Handle(pub i32); - - #[repr(transparent)] - #[allow(dead_code)] - pub struct RegHandle(u32); -} - -s! { - pub struct sockaddr { - pub sa_len: u8, - pub sa_family: u8, - pub sa_data: [u8; 14], - } - - pub struct in_addr { - pub s_addr: u32, - } - - pub struct AudioInputParams { - pub unknown1: i32, - pub gain: i32, - pub unknown2: i32, - pub unknown3: i32, - pub unknown4: i32, - pub unknown5: i32, - } - - pub struct Atrac3BufferInfo { - pub puc_write_position_first_buf: *mut u8, - pub ui_writable_byte_first_buf: u32, - pub ui_min_write_byte_first_buf: u32, - pub ui_read_position_first_buf: u32, - pub puc_write_position_second_buf: *mut u8, - pub ui_writable_byte_second_buf: u32, - pub ui_min_write_byte_second_buf: u32, - pub ui_read_position_second_buf: u32, - } - - pub struct SceCtrlData { - pub timestamp: u32, - pub buttons: i32, - pub lx: u8, - pub ly: u8, - pub rsrv: [u8; 6], - } - - pub struct SceCtrlLatch { - pub ui_make: u32, - pub ui_break: u32, - pub ui_press: u32, - pub ui_release: u32, - } - - pub struct GeStack { - pub stack: [u32; 8], - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct GeCallbackData { - pub signal_func: Option, - pub signal_arg: *mut c_void, - pub finish_func: Option, - pub finish_arg: *mut c_void, - } - - pub struct GeListArgs { - pub size: u32, - pub context: *mut GeContext, - pub num_stacks: u32, - pub stacks: *mut GeStack, - } - - pub struct GeBreakParam { - pub buf: [u32; 4], - } - - pub struct SceKernelLoadExecParam { - pub size: usize, - pub args: usize, - pub argp: *mut c_void, - pub key: *const u8, - } - - pub struct timeval { - pub tv_sec: i32, - pub tv_usec: i32, - } - - pub struct timezone { - pub tz_minutes_west: i32, - pub tz_dst_time: i32, - } - - pub struct IntrHandlerOptionParam { - size: i32, - entry: u32, - common: u32, - gp: u32, - intr_code: u16, - sub_count: u16, - intr_level: u16, - enabled: u16, - calls: u32, - field_1c: u32, - total_clock_lo: u32, - total_clock_hi: u32, - min_clock_lo: u32, - min_clock_hi: u32, - max_clock_lo: u32, - max_clock_hi: u32, - } - - pub struct SceKernelLMOption { - pub size: usize, - pub m_pid_text: SceUid, - pub m_pid_data: SceUid, - pub flags: u32, - pub position: u8, - pub access: u8, - pub c_reserved: [u8; 2usize], - } - - pub struct SceKernelSMOption { - pub size: usize, - pub m_pid_stack: SceUid, - pub stack_size: usize, - pub priority: i32, - pub attribute: u32, - } - - pub struct SceKernelModuleInfo { - pub size: usize, - pub n_segment: u8, - pub reserved: [u8; 3usize], - pub segment_addr: [i32; 4usize], - pub segment_size: [i32; 4usize], - pub entry_addr: u32, - pub gp_value: u32, - pub text_addr: u32, - pub text_size: u32, - pub data_size: u32, - pub bss_size: u32, - pub attribute: u16, - pub version: [u8; 2usize], - pub name: [u8; 28usize], - } - - pub struct DebugProfilerRegs { - pub enable: u32, - pub systemck: u32, - pub cpuck: u32, - pub internal: u32, - pub memory: u32, - pub copz: u32, - pub vfpu: u32, - pub sleep: u32, - pub bus_access: u32, - pub uncached_load: u32, - pub uncached_store: u32, - pub cached_load: u32, - pub cached_store: u32, - pub i_miss: u32, - pub d_miss: u32, - pub d_writeback: u32, - pub cop0_inst: u32, - pub fpu_inst: u32, - pub vfpu_inst: u32, - pub local_bus: u32, - } - - pub struct SceKernelSysClock { - pub low: u32, - pub hi: u32, - } - - pub struct SceKernelThreadOptParam { - pub size: usize, - pub stack_mpid: SceUid, - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct SceKernelThreadInfo { - pub size: usize, - pub name: [u8; 32], - pub attr: u32, - pub status: i32, - pub entry: SceKernelThreadEntry, - pub stack: *mut c_void, - pub stack_size: i32, - pub gp_reg: *mut c_void, - pub init_priority: i32, - pub current_priority: i32, - pub wait_type: i32, - pub wait_id: SceUid, - pub wakeup_count: i32, - pub exit_status: i32, - pub run_clocks: SceKernelSysClock, - pub intr_preempt_count: u32, - pub thread_preempt_count: u32, - pub release_count: u32, - } - - pub struct SceKernelThreadRunStatus { - pub size: usize, - pub status: i32, - pub current_priority: i32, - pub wait_type: i32, - pub wait_id: i32, - pub wakeup_count: i32, - pub run_clocks: SceKernelSysClock, - pub intr_preempt_count: u32, - pub thread_preempt_count: u32, - pub release_count: u32, - } - - pub struct SceKernelSemaOptParam { - pub size: usize, - } - - pub struct SceKernelSemaInfo { - pub size: usize, - pub name: [u8; 32], - pub attr: u32, - pub init_count: i32, - pub current_count: i32, - pub max_count: i32, - pub num_wait_threads: i32, - } - - pub struct SceKernelEventFlagInfo { - pub size: usize, - pub name: [u8; 32], - pub attr: u32, - pub init_pattern: u32, - pub current_pattern: u32, - pub num_wait_threads: i32, - } - - pub struct SceKernelEventFlagOptParam { - pub size: usize, - } - - pub struct SceKernelMbxOptParam { - pub size: usize, - } - - pub struct SceKernelMbxInfo { - pub size: usize, - pub name: [u8; 32usize], - pub attr: u32, - pub num_wait_threads: i32, - pub num_messages: i32, - pub first_message: *mut c_void, - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct SceKernelVTimerInfo { - pub size: usize, - pub name: [u8; 32], - pub active: i32, - pub base: SceKernelSysClock, - pub current: SceKernelSysClock, - pub schedule: SceKernelSysClock, - pub handler: SceKernelVTimerHandler, - pub common: *mut c_void, - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct SceKernelThreadEventHandlerInfo { - pub size: usize, - pub name: [u8; 32], - pub thread_id: SceUid, - pub mask: i32, - pub handler: SceKernelThreadEventHandler, - pub common: *mut c_void, - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct SceKernelAlarmInfo { - pub size: usize, - pub schedule: SceKernelSysClock, - pub handler: SceKernelAlarmHandler, - pub common: *mut c_void, - } - - pub struct SceKernelSystemStatus { - pub size: usize, - pub status: u32, - pub idle_clocks: SceKernelSysClock, - pub comes_out_of_idle_count: u32, - pub thread_switch_count: u32, - pub vfpu_switch_count: u32, - } - - pub struct SceKernelMppInfo { - pub size: usize, - pub name: [u8; 32], - pub attr: u32, - pub buf_size: i32, - pub free_size: i32, - pub num_send_wait_threads: i32, - pub num_receive_wait_threads: i32, - } - - pub struct SceKernelVplOptParam { - pub size: usize, - } - - pub struct SceKernelVplInfo { - pub size: usize, - pub name: [u8; 32], - pub attr: u32, - pub pool_size: i32, - pub free_size: i32, - pub num_wait_threads: i32, - } - - pub struct SceKernelFplOptParam { - pub size: usize, - } - - pub struct SceKernelFplInfo { - pub size: usize, - pub name: [u8; 32usize], - pub attr: u32, - pub block_size: i32, - pub num_blocks: i32, - pub free_blocks: i32, - pub num_wait_threads: i32, - } - - pub struct SceKernelVTimerOptParam { - pub size: usize, - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct SceKernelCallbackInfo { - pub size: usize, - pub name: [u8; 32usize], - pub thread_id: SceUid, - pub callback: SceKernelCallbackFunction, - pub common: *mut c_void, - pub notify_count: i32, - pub notify_arg: i32, - } - - pub struct UsbCamSetupStillParam { - pub size: i32, - pub resolution: UsbCamResolution, - pub jpeg_size: i32, - pub reverse_flags: i32, - pub delay: UsbCamDelay, - pub comp_level: i32, - } - - pub struct UsbCamSetupStillExParam { - pub size: i32, - pub unk: u32, - pub resolution: UsbCamResolutionEx, - pub jpeg_size: i32, - pub comp_level: i32, - pub unk2: u32, - pub unk3: u32, - pub flip: i32, - pub mirror: i32, - pub delay: UsbCamDelay, - pub unk4: [u32; 5usize], - } - - pub struct UsbCamSetupVideoParam { - pub size: i32, - pub resolution: UsbCamResolution, - pub framerate: UsbCamFrameRate, - pub white_balance: UsbCamWb, - pub saturation: i32, - pub brightness: i32, - pub contrast: i32, - pub sharpness: i32, - pub effect_mode: UsbCamEffectMode, - pub frame_size: i32, - pub unk: u32, - pub evl_evel: UsbCamEvLevel, - } - - pub struct UsbCamSetupVideoExParam { - pub size: i32, - pub unk: u32, - pub resolution: UsbCamResolutionEx, - pub framerate: UsbCamFrameRate, - pub unk2: u32, - pub unk3: u32, - pub white_balance: UsbCamWb, - pub saturation: i32, - pub brightness: i32, - pub contrast: i32, - pub sharpness: i32, - pub unk4: u32, - pub unk5: u32, - pub unk6: [u32; 3usize], - pub effect_mode: UsbCamEffectMode, - pub unk7: u32, - pub unk8: u32, - pub unk9: u32, - pub unk10: u32, - pub unk11: u32, - pub frame_size: i32, - pub unk12: u32, - pub ev_level: UsbCamEvLevel, - } - - pub struct ScePspDateTime { - pub year: u16, - pub month: u16, - pub day: u16, - pub hour: u16, - pub minutes: u16, - pub seconds: u16, - pub microseconds: u32, - } - - pub struct SceIoStat { - pub st_mode: i32, - pub st_attr: i32, - pub st_size: i64, - pub st_ctime: ScePspDateTime, - pub st_atime: ScePspDateTime, - pub st_mtime: ScePspDateTime, - pub st_private: [u32; 6usize], - } - - pub struct UmdInfo { - pub size: u32, - pub type_: UmdType, - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct SceMpegRingbuffer { - pub packets: i32, - pub unk0: u32, - pub unk1: u32, - pub unk2: u32, - pub unk3: u32, - pub data: *mut c_void, - pub callback: SceMpegRingbufferCb, - pub cb_param: *mut c_void, - pub unk4: u32, - pub unk5: u32, - pub sce_mpeg: *mut c_void, - } - - pub struct SceMpegAu { - pub pts_msb: u32, - pub pts: u32, - pub dts_msb: u32, - pub dts: u32, - pub es_buffer: u32, - pub au_size: u32, - } - - pub struct SceMpegAvcMode { - pub unk0: i32, - pub pixel_format: super::DisplayPixelFormat, - } - - #[repr(align(64))] - pub struct SceMpegLLI { - pub src: *mut c_void, - pub dst: *mut c_void, - pub next: *mut c_void, - pub size: i32, - } - - #[repr(align(64))] - pub struct SceMpegYCrCbBuffer { - pub frame_buffer_height16: i32, - pub frame_buffer_width16: i32, - pub unknown: i32, - pub unknown2: i32, - pub y_buffer: *mut c_void, - pub y_buffer2: *mut c_void, - pub cr_buffer: *mut c_void, - pub cb_buffer: *mut c_void, - pub cr_buffer2: *mut c_void, - pub cb_buffer2: *mut c_void, - - pub frame_height: i32, - pub frame_width: i32, - pub frame_buffer_width: i32, - pub unknown3: [i32; 11usize], - } - - pub struct ScePspSRect { - pub x: i16, - pub y: i16, - pub w: i16, - pub h: i16, - } - - pub struct ScePspIRect { - pub x: i32, - pub y: i32, - pub w: i32, - pub h: i32, - } - - pub struct ScePspL64Rect { - pub x: u64, - pub y: u64, - pub w: u64, - pub h: u64, - } - - pub struct ScePspSVector2 { - pub x: i16, - pub y: i16, - } - - pub struct ScePspIVector2 { - pub x: i32, - pub y: i32, - } - - pub struct ScePspL64Vector2 { - pub x: u64, - pub y: u64, - } - - pub struct ScePspSVector3 { - pub x: i16, - pub y: i16, - pub z: i16, - } - - pub struct ScePspIVector3 { - pub x: i32, - pub y: i32, - pub z: i32, - } - - pub struct ScePspL64Vector3 { - pub x: u64, - pub y: u64, - pub z: u64, - } - - pub struct ScePspSVector4 { - pub x: i16, - pub y: i16, - pub z: i16, - pub w: i16, - } - - pub struct ScePspIVector4 { - pub x: i32, - pub y: i32, - pub z: i32, - pub w: i32, - } - - pub struct ScePspL64Vector4 { - pub x: u64, - pub y: u64, - pub z: u64, - pub w: u64, - } - - pub struct ScePspIMatrix2 { - pub x: ScePspIVector2, - pub y: ScePspIVector2, - } - - pub struct ScePspIMatrix3 { - pub x: ScePspIVector3, - pub y: ScePspIVector3, - pub z: ScePspIVector3, - } - - #[repr(align(16))] - pub struct ScePspIMatrix4 { - pub x: ScePspIVector4, - pub y: ScePspIVector4, - pub z: ScePspIVector4, - pub w: ScePspIVector4, - } - - pub struct ScePspIMatrix4Unaligned { - pub x: ScePspIVector4, - pub y: ScePspIVector4, - pub z: ScePspIVector4, - pub w: ScePspIVector4, - } - - pub struct SceMp3InitArg { - pub mp3_stream_start: u32, - pub unk1: u32, - pub mp3_stream_end: u32, - pub unk2: u32, - pub mp3_buf: *mut c_void, - pub mp3_buf_size: i32, - pub pcm_buf: *mut c_void, - pub pcm_buf_size: i32, - } - - pub struct OpenPSID { - pub data: [u8; 16usize], - } - - pub struct UtilityDialogCommon { - pub size: u32, - pub language: SystemParamLanguage, - pub button_accept: UtilityDialogButtonAccept, - pub graphics_thread: i32, - pub access_thread: i32, - pub font_thread: i32, - pub sound_thread: i32, - pub result: i32, - pub reserved: [i32; 4usize], - } - - pub struct UtilityNetconfAdhoc { - pub name: [u8; 8usize], - pub timeout: u32, - } - - pub struct UtilityNetconfData { - pub base: UtilityDialogCommon, - pub action: UtilityNetconfAction, - pub adhocparam: *mut UtilityNetconfAdhoc, - pub hotspot: i32, - pub hotspot_connected: i32, - pub wifisp: i32, - } - - pub struct UtilitySavedataFileData { - pub buf: *mut c_void, - pub buf_size: usize, - pub size: usize, - pub unknown: i32, - } - - pub struct UtilitySavedataListSaveNewData { - pub icon0: UtilitySavedataFileData, - pub title: *mut u8, - } - - pub struct UtilityGameSharingParams { - pub base: UtilityDialogCommon, - pub unknown1: i32, - pub unknown2: i32, - pub name: [u8; 8usize], - pub unknown3: i32, - pub unknown4: i32, - pub unknown5: i32, - pub result: i32, - pub filepath: *mut u8, - pub mode: UtilityGameSharingMode, - pub datatype: UtilityGameSharingDataType, - pub data: *mut c_void, - pub datasize: u32, - } - - pub struct UtilityHtmlViewerParam { - pub base: UtilityDialogCommon, - pub memaddr: *mut c_void, - pub memsize: u32, - pub unknown1: i32, - pub unknown2: i32, - pub initialurl: *mut u8, - pub numtabs: u32, - pub interfacemode: UtilityHtmlViewerInterfaceMode, - pub options: i32, - pub dldirname: *mut u8, - pub dlfilename: *mut u8, - pub uldirname: *mut u8, - pub ulfilename: *mut u8, - pub cookiemode: UtilityHtmlViewerCookieMode, - pub unknown3: u32, - pub homeurl: *mut u8, - pub textsize: UtilityHtmlViewerTextSize, - pub displaymode: UtilityHtmlViewerDisplayMode, - pub connectmode: UtilityHtmlViewerConnectMode, - pub disconnectmode: UtilityHtmlViewerDisconnectMode, - pub memused: u32, - pub unknown4: [i32; 10usize], - } - - pub struct SceUtilityOskData { - pub unk_00: i32, - pub unk_04: i32, - pub language: SceUtilityOskInputLanguage, - pub unk_12: i32, - pub inputtype: SceUtilityOskInputType, - pub lines: i32, - pub unk_24: i32, - pub desc: *mut u16, - pub intext: *mut u16, - pub outtextlength: i32, - pub outtext: *mut u16, - pub result: SceUtilityOskResult, - pub outtextlimit: i32, - } - - pub struct SceUtilityOskParams { - pub base: UtilityDialogCommon, - pub datacount: i32, - pub data: *mut SceUtilityOskData, - pub state: SceUtilityOskState, - pub unk_60: i32, - } - - pub struct SceNetMallocStat { - pub pool: i32, - pub maximum: i32, - pub free: i32, - } - - pub struct SceNetAdhocctlAdhocId { - pub unknown: i32, - pub adhoc_id: [u8; 9usize], - pub unk: [u8; 3usize], - } - - pub struct SceNetAdhocctlScanInfo { - pub next: *mut SceNetAdhocctlScanInfo, - pub channel: i32, - pub name: [u8; 8usize], - pub bssid: [u8; 6usize], - pub unknown: [u8; 2usize], - pub unknown2: i32, - } - - pub struct SceNetAdhocctlGameModeInfo { - pub count: i32, - pub macs: [[u8; 6usize]; 16usize], - } - - pub struct SceNetAdhocPtpStat { - pub next: *mut SceNetAdhocPtpStat, - pub ptp_id: i32, - pub mac: [u8; 6usize], - pub peermac: [u8; 6usize], - pub port: u16, - pub peerport: u16, - pub sent_data: u32, - pub rcvd_data: u32, - pub state: ScePspnetAdhocPtpState, - } - - pub struct SceNetAdhocPdpStat { - pub next: *mut SceNetAdhocPdpStat, - pub pdp_id: i32, - pub mac: [u8; 6usize], - pub port: u16, - pub rcvd_data: u32, - } - - pub struct AdhocPoolStat { - pub size: i32, - pub maxsize: i32, - pub freesize: i32, - } -} - -s_no_extra_traits! { - pub struct GeContext { - pub context: [u32; 512], - } - - pub struct SceKernelUtilsSha1Context { - pub h: [u32; 5usize], - pub us_remains: u16, - pub us_computed: u16, - pub ull_total_len: u64, - pub buf: [u8; 64usize], - } - - pub struct SceKernelUtilsMt19937Context { - pub count: u32, - pub state: [u32; 624usize], - } - - pub struct SceKernelUtilsMd5Context { - pub h: [u32; 4usize], - pub pad: u32, - pub us_remains: u16, - pub us_computed: u16, - pub ull_total_len: u64, - pub buf: [u8; 64usize], - } - - pub struct SceIoDirent { - pub d_stat: SceIoStat, - pub d_name: [u8; 256usize], - pub d_private: *mut c_void, - pub dummy: i32, - } - - pub struct ScePspFRect { - pub x: f32, - pub y: f32, - pub w: f32, - pub h: f32, - } - - #[repr(align(16))] - pub struct ScePspFVector3 { - pub x: f32, - pub y: f32, - pub z: f32, - } - - #[repr(align(16))] - pub struct ScePspFVector4 { - pub x: f32, - pub y: f32, - pub z: f32, - pub w: f32, - } - - pub struct ScePspFVector4Unaligned { - pub x: f32, - pub y: f32, - pub z: f32, - pub w: f32, - } - - pub struct ScePspFVector2 { - pub x: f32, - pub y: f32, - } - - pub struct ScePspFMatrix2 { - pub x: ScePspFVector2, - pub y: ScePspFVector2, - } - - pub struct ScePspFMatrix3 { - pub x: ScePspFVector3, - pub y: ScePspFVector3, - pub z: ScePspFVector3, - } - - #[repr(align(16))] - pub struct ScePspFMatrix4 { - pub x: ScePspFVector4, - pub y: ScePspFVector4, - pub z: ScePspFVector4, - pub w: ScePspFVector4, - } - - pub struct ScePspFMatrix4Unaligned { - pub x: ScePspFVector4, - pub y: ScePspFVector4, - pub z: ScePspFVector4, - pub w: ScePspFVector4, - } - - pub union ScePspVector3 { - pub fv: ScePspFVector3, - pub iv: ScePspIVector3, - pub f: [f32; 3usize], - pub i: [i32; 3usize], - } - - pub union ScePspVector4 { - pub fv: ScePspFVector4, - pub iv: ScePspIVector4, - pub qw: u128, - pub f: [f32; 4usize], - pub i: [i32; 4usize], - } - - pub union ScePspMatrix2 { - pub fm: ScePspFMatrix2, - pub im: ScePspIMatrix2, - pub fv: [ScePspFVector2; 2usize], - pub iv: [ScePspIVector2; 2usize], - pub v: [ScePspVector2; 2usize], - pub f: [[f32; 2usize]; 2usize], - pub i: [[i32; 2usize]; 2usize], - } - - pub union ScePspMatrix3 { - pub fm: ScePspFMatrix3, - pub im: ScePspIMatrix3, - pub fv: [ScePspFVector3; 3usize], - pub iv: [ScePspIVector3; 3usize], - pub v: [ScePspVector3; 3usize], - pub f: [[f32; 3usize]; 3usize], - pub i: [[i32; 3usize]; 3usize], - } - - pub union ScePspVector2 { - pub fv: ScePspFVector2, - pub iv: ScePspIVector2, - pub f: [f32; 2usize], - pub i: [i32; 2usize], - } - - pub union ScePspMatrix4 { - pub fm: ScePspFMatrix4, - pub im: ScePspIMatrix4, - pub fv: [ScePspFVector4; 4usize], - pub iv: [ScePspIVector4; 4usize], - pub v: [ScePspVector4; 4usize], - pub f: [[f32; 4usize]; 4usize], - pub i: [[i32; 4usize]; 4usize], - } - - pub struct Key { - pub key_type: KeyType, - pub name: [u8; 256usize], - pub name_len: u32, - pub unk2: u32, - pub unk3: u32, - } - - pub struct UtilityMsgDialogParams { - pub base: UtilityDialogCommon, - pub unknown: i32, - pub mode: UtilityMsgDialogMode, - pub error_value: u32, - pub message: [u8; 512usize], - pub options: i32, - pub button_pressed: UtilityMsgDialogPressed, - } - - pub union UtilityNetData { - pub as_uint: u32, - pub as_string: [u8; 128usize], - } - - pub struct UtilitySavedataSFOParam { - pub title: [u8; 128usize], - pub savedata_title: [u8; 128usize], - pub detail: [u8; 1024usize], - pub parental_level: u8, - pub unknown: [u8; 3usize], - } - - pub struct SceUtilitySavedataParam { - pub base: UtilityDialogCommon, - pub mode: UtilitySavedataMode, - pub unknown1: i32, - pub overwrite: i32, - pub game_name: [u8; 13usize], - pub reserved: [u8; 3usize], - pub save_name: [u8; 20usize], - pub save_name_list: *mut [u8; 20usize], - pub file_name: [u8; 13usize], - pub reserved1: [u8; 3usize], - pub data_buf: *mut c_void, - pub data_buf_size: usize, - pub data_size: usize, - pub sfo_param: UtilitySavedataSFOParam, - pub icon0_file_data: UtilitySavedataFileData, - pub icon1_file_data: UtilitySavedataFileData, - pub pic1_file_data: UtilitySavedataFileData, - pub snd0_file_data: UtilitySavedataFileData, - pub new_data: *mut UtilitySavedataListSaveNewData, - pub focus: UtilitySavedataFocus, - pub unknown2: [i32; 4usize], - pub key: [u8; 16], - pub unknown3: [u8; 20], - } - - pub struct SceNetAdhocctlPeerInfo { - pub next: *mut SceNetAdhocctlPeerInfo, - pub nickname: [u8; 128usize], - pub mac: [u8; 6usize], - pub unknown: [u8; 6usize], - pub timestamp: u32, - } - - pub struct SceNetAdhocctlParams { - pub channel: i32, - pub name: [u8; 8usize], - pub bssid: [u8; 6usize], - pub nickname: [u8; 128usize], - } - - pub union SceNetApctlInfo { - pub name: [u8; 64usize], - pub bssid: [u8; 6usize], - pub ssid: [u8; 32usize], - pub ssid_length: u32, - pub security_type: u32, - pub strength: u8, - pub channel: u8, - pub power_save: u8, - pub ip: [u8; 16usize], - pub sub_net_mask: [u8; 16usize], - pub gateway: [u8; 16usize], - pub primary_dns: [u8; 16usize], - pub secondary_dns: [u8; 16usize], - pub use_proxy: u32, - pub proxy_url: [u8; 128usize], - pub proxy_port: u16, - pub eap_type: u32, - pub start_browser: u32, - pub wifisp: u32, - } -} - -pub const INT_MIN: c_int = -2147483648; -pub const INT_MAX: c_int = 2147483647; - -pub const AUDIO_VOLUME_MAX: u32 = 0x8000; -pub const AUDIO_CHANNEL_MAX: u32 = 8; -pub const AUDIO_NEXT_CHANNEL: i32 = -1; -pub const AUDIO_SAMPLE_MIN: u32 = 64; -pub const AUDIO_SAMPLE_MAX: u32 = 65472; - -pub const PSP_CTRL_SELECT: i32 = 0x000001; -pub const PSP_CTRL_START: i32 = 0x000008; -pub const PSP_CTRL_UP: i32 = 0x000010; -pub const PSP_CTRL_RIGHT: i32 = 0x000020; -pub const PSP_CTRL_DOWN: i32 = 0x000040; -pub const PSP_CTRL_LEFT: i32 = 0x000080; -pub const PSP_CTRL_LTRIGGER: i32 = 0x000100; -pub const PSP_CTRL_RTRIGGER: i32 = 0x000200; -pub const PSP_CTRL_TRIANGLE: i32 = 0x001000; -pub const PSP_CTRL_CIRCLE: i32 = 0x002000; -pub const PSP_CTRL_CROSS: i32 = 0x004000; -pub const PSP_CTRL_SQUARE: i32 = 0x008000; -pub const PSP_CTRL_HOME: i32 = 0x010000; -pub const PSP_CTRL_HOLD: i32 = 0x020000; -pub const PSP_CTRL_NOTE: i32 = 0x800000; -pub const PSP_CTRL_SCREEN: i32 = 0x400000; -pub const PSP_CTRL_VOLUP: i32 = 0x100000; -pub const PSP_CTRL_VOLDOWN: i32 = 0x200000; -pub const PSP_CTRL_WLAN_UP: i32 = 0x040000; -pub const PSP_CTRL_REMOTE: i32 = 0x080000; -pub const PSP_CTRL_DISC: i32 = 0x1000000; -pub const PSP_CTRL_MS: i32 = 0x2000000; - -pub const USB_CAM_PID: i32 = 0x282; -pub const USB_BUS_DRIVER_NAME: &str = "USBBusDriver"; -pub const USB_CAM_DRIVER_NAME: &str = "USBCamDriver"; -pub const USB_CAM_MIC_DRIVER_NAME: &str = "USBCamMicDriver"; -pub const USB_STOR_DRIVER_NAME: &str = "USBStor_Driver"; - -pub const ACTIVATED: i32 = 0x200; -pub const CONNECTED: i32 = 0x020; -pub const ESTABLISHED: i32 = 0x002; - -pub const USB_CAM_FLIP: i32 = 1; -pub const USB_CAM_MIRROR: i32 = 0x100; - -pub const THREAD_ATTR_VFPU: i32 = 0x00004000; -pub const THREAD_ATTR_USER: i32 = 0x80000000; -pub const THREAD_ATTR_USBWLAN: i32 = 0xa0000000; -pub const THREAD_ATTR_VSH: i32 = 0xc0000000; -pub const THREAD_ATTR_SCRATCH_SRAM: i32 = 0x00008000; -pub const THREAD_ATTR_NO_FILLSTACK: i32 = 0x00100000; -pub const THREAD_ATTR_CLEAR_STACK: i32 = 0x00200000; - -pub const EVENT_WAIT_MULTIPLE: i32 = 0x200; - -pub const EVENT_WAIT_AND: i32 = 0; -pub const EVENT_WAIT_OR: i32 = 1; -pub const EVENT_WAIT_CLEAR: i32 = 0x20; - -pub const POWER_INFO_POWER_SWITCH: i32 = 0x80000000; -pub const POWER_INFO_HOLD_SWITCH: i32 = 0x40000000; -pub const POWER_INFO_STANDBY: i32 = 0x00080000; -pub const POWER_INFO_RESUME_COMPLETE: i32 = 0x00040000; -pub const POWER_INFO_RESUMING: i32 = 0x00020000; -pub const POWER_INFO_SUSPENDING: i32 = 0x00010000; -pub const POWER_INFO_AC_POWER: i32 = 0x00001000; -pub const POWER_INFO_BATTERY_LOW: i32 = 0x00000100; -pub const POWER_INFO_BATTERY_EXIST: i32 = 0x00000080; -pub const POWER_INFO_BATTERY_POWER: i32 = 0x0000007; - -pub const FIO_S_IFLNK: i32 = 0x4000; -pub const FIO_S_IFDIR: i32 = 0x1000; -pub const FIO_S_IFREG: i32 = 0x2000; -pub const FIO_S_ISUID: i32 = 0x0800; -pub const FIO_S_ISGID: i32 = 0x0400; -pub const FIO_S_ISVTX: i32 = 0x0200; -pub const FIO_S_IRUSR: i32 = 0x0100; -pub const FIO_S_IWUSR: i32 = 0x0080; -pub const FIO_S_IXUSR: i32 = 0x0040; -pub const FIO_S_IRGRP: i32 = 0x0020; -pub const FIO_S_IWGRP: i32 = 0x0010; -pub const FIO_S_IXGRP: i32 = 0x0008; -pub const FIO_S_IROTH: i32 = 0x0004; -pub const FIO_S_IWOTH: i32 = 0x0002; -pub const FIO_S_IXOTH: i32 = 0x0001; - -pub const FIO_SO_IFLNK: i32 = 0x0008; -pub const FIO_SO_IFDIR: i32 = 0x0010; -pub const FIO_SO_IFREG: i32 = 0x0020; -pub const FIO_SO_IROTH: i32 = 0x0004; -pub const FIO_SO_IWOTH: i32 = 0x0002; -pub const FIO_SO_IXOTH: i32 = 0x0001; - -pub const PSP_O_RD_ONLY: i32 = 0x0001; -pub const PSP_O_WR_ONLY: i32 = 0x0002; -pub const PSP_O_RD_WR: i32 = 0x0003; -pub const PSP_O_NBLOCK: i32 = 0x0004; -pub const PSP_O_DIR: i32 = 0x0008; -pub const PSP_O_APPEND: i32 = 0x0100; -pub const PSP_O_CREAT: i32 = 0x0200; -pub const PSP_O_TRUNC: i32 = 0x0400; -pub const PSP_O_EXCL: i32 = 0x0800; -pub const PSP_O_NO_WAIT: i32 = 0x8000; - -pub const UMD_NOT_PRESENT: i32 = 0x01; -pub const UMD_PRESENT: i32 = 0x02; -pub const UMD_CHANGED: i32 = 0x04; -pub const UMD_INITING: i32 = 0x08; -pub const UMD_INITED: i32 = 0x10; -pub const UMD_READY: i32 = 0x20; - -pub const PLAY_PAUSE: i32 = 0x1; -pub const FORWARD: i32 = 0x4; -pub const BACK: i32 = 0x8; -pub const VOL_UP: i32 = 0x10; -pub const VOL_DOWN: i32 = 0x20; -pub const HOLD: i32 = 0x80; - -pub const GU_PI: f32 = 3.141593; - -pub const GU_TEXTURE_8BIT: i32 = 1; -pub const GU_TEXTURE_16BIT: i32 = 2; -pub const GU_TEXTURE_32BITF: i32 = 3; -pub const GU_COLOR_5650: i32 = 4 << 2; -pub const GU_COLOR_5551: i32 = 5 << 2; -pub const GU_COLOR_4444: i32 = 6 << 2; -pub const GU_COLOR_8888: i32 = 7 << 2; -pub const GU_NORMAL_8BIT: i32 = 1 << 5; -pub const GU_NORMAL_16BIT: i32 = 2 << 5; -pub const GU_NORMAL_32BITF: i32 = 3 << 5; -pub const GU_VERTEX_8BIT: i32 = 1 << 7; -pub const GU_VERTEX_16BIT: i32 = 2 << 7; -pub const GU_VERTEX_32BITF: i32 = 3 << 7; -pub const GU_WEIGHT_8BIT: i32 = 1 << 9; -pub const GU_WEIGHT_16BIT: i32 = 2 << 9; -pub const GU_WEIGHT_32BITF: i32 = 3 << 9; -pub const GU_INDEX_8BIT: i32 = 1 << 11; -pub const GU_INDEX_16BIT: i32 = 2 << 11; -pub const GU_WEIGHTS1: i32 = (((1 - 1) & 7) << 14) as i32; -pub const GU_WEIGHTS2: i32 = (((2 - 1) & 7) << 14) as i32; -pub const GU_WEIGHTS3: i32 = (((3 - 1) & 7) << 14) as i32; -pub const GU_WEIGHTS4: i32 = (((4 - 1) & 7) << 14) as i32; -pub const GU_WEIGHTS5: i32 = (((5 - 1) & 7) << 14) as i32; -pub const GU_WEIGHTS6: i32 = (((6 - 1) & 7) << 14) as i32; -pub const GU_WEIGHTS7: i32 = (((7 - 1) & 7) << 14) as i32; -pub const GU_WEIGHTS8: i32 = (((8 - 1) & 7) << 14) as i32; -pub const GU_VERTICES1: i32 = (((1 - 1) & 7) << 18) as i32; -pub const GU_VERTICES2: i32 = (((2 - 1) & 7) << 18) as i32; -pub const GU_VERTICES3: i32 = (((3 - 1) & 7) << 18) as i32; -pub const GU_VERTICES4: i32 = (((4 - 1) & 7) << 18) as i32; -pub const GU_VERTICES5: i32 = (((5 - 1) & 7) << 18) as i32; -pub const GU_VERTICES6: i32 = (((6 - 1) & 7) << 18) as i32; -pub const GU_VERTICES7: i32 = (((7 - 1) & 7) << 18) as i32; -pub const GU_VERTICES8: i32 = (((8 - 1) & 7) << 18) as i32; -pub const GU_TRANSFORM_2D: i32 = 1 << 23; -pub const GU_TRANSFORM_3D: i32 = 0; - -pub const GU_COLOR_BUFFER_BIT: i32 = 1; -pub const GU_STENCIL_BUFFER_BIT: i32 = 2; -pub const GU_DEPTH_BUFFER_BIT: i32 = 4; -pub const GU_FAST_CLEAR_BIT: i32 = 16; - -pub const GU_AMBIENT: i32 = 1; -pub const GU_DIFFUSE: i32 = 2; -pub const GU_SPECULAR: i32 = 4; -pub const GU_UNKNOWN_LIGHT_COMPONENT: i32 = 8; - -pub const SYSTEM_REGISTRY: [u8; 7] = *b"/system"; -pub const REG_KEYNAME_SIZE: u32 = 27; - -pub const UTILITY_MSGDIALOG_ERROR: i32 = 0; -pub const UTILITY_MSGDIALOG_TEXT: i32 = 1; -pub const UTILITY_MSGDIALOG_YES_NO_BUTTONS: i32 = 0x10; -pub const UTILITY_MSGDIALOG_DEFAULT_NO: i32 = 0x100; - -pub const UTILITY_HTMLVIEWER_OPEN_SCE_START_PAGE: i32 = 0x000001; -pub const UTILITY_HTMLVIEWER_DISABLE_STARTUP_LIMITS: i32 = 0x000002; -pub const UTILITY_HTMLVIEWER_DISABLE_EXIT_DIALOG: i32 = 0x000004; -pub const UTILITY_HTMLVIEWER_DISABLE_CURSOR: i32 = 0x000008; -pub const UTILITY_HTMLVIEWER_DISABLE_DOWNLOAD_COMPLETE_DIALOG: i32 = 0x000010; -pub const UTILITY_HTMLVIEWER_DISABLE_DOWNLOAD_START_DIALOG: i32 = 0x000020; -pub const UTILITY_HTMLVIEWER_DISABLE_DOWNLOAD_DESTINATION_DIALOG: i32 = 0x000040; -pub const UTILITY_HTMLVIEWER_LOCK_DOWNLOAD_DESTINATION_DIALOG: i32 = 0x000080; -pub const UTILITY_HTMLVIEWER_DISABLE_TAB_DISPLAY: i32 = 0x000100; -pub const UTILITY_HTMLVIEWER_ENABLE_ANALOG_HOLD: i32 = 0x000200; -pub const UTILITY_HTMLVIEWER_ENABLE_FLASH: i32 = 0x000400; -pub const UTILITY_HTMLVIEWER_DISABLE_LRTRIGGER: i32 = 0x000800; - -extern "C" { - pub fn sceAudioChReserve(channel: i32, sample_count: i32, format: AudioFormat) -> i32; - pub fn sceAudioChRelease(channel: i32) -> i32; - pub fn sceAudioOutput(channel: i32, vol: i32, buf: *mut c_void) -> i32; - pub fn sceAudioOutputBlocking(channel: i32, vol: i32, buf: *mut c_void) -> i32; - pub fn sceAudioOutputPanned( - channel: i32, - left_vol: i32, - right_vol: i32, - buf: *mut c_void, - ) -> i32; - pub fn sceAudioOutputPannedBlocking( - channel: i32, - left_vol: i32, - right_vol: i32, - buf: *mut c_void, - ) -> i32; - pub fn sceAudioGetChannelRestLen(channel: i32) -> i32; - pub fn sceAudioGetChannelRestLength(channel: i32) -> i32; - pub fn sceAudioSetChannelDataLen(channel: i32, sample_count: i32) -> i32; - pub fn sceAudioChangeChannelConfig(channel: i32, format: AudioFormat) -> i32; - pub fn sceAudioChangeChannelVolume(channel: i32, left_vol: i32, right_vol: i32) -> i32; - pub fn sceAudioOutput2Reserve(sample_count: i32) -> i32; - pub fn sceAudioOutput2Release() -> i32; - pub fn sceAudioOutput2ChangeLength(sample_count: i32) -> i32; - pub fn sceAudioOutput2OutputBlocking(vol: i32, buf: *mut c_void) -> i32; - pub fn sceAudioOutput2GetRestSample() -> i32; - pub fn sceAudioSRCChReserve( - sample_count: i32, - freq: AudioOutputFrequency, - channels: i32, - ) -> i32; - pub fn sceAudioSRCChRelease() -> i32; - pub fn sceAudioSRCOutputBlocking(vol: i32, buf: *mut c_void) -> i32; - pub fn sceAudioInputInit(unknown1: i32, gain: i32, unknown2: i32) -> i32; - pub fn sceAudioInputInitEx(params: *mut AudioInputParams) -> i32; - pub fn sceAudioInputBlocking(sample_count: i32, freq: AudioInputFrequency, buf: *mut c_void); - pub fn sceAudioInput(sample_count: i32, freq: AudioInputFrequency, buf: *mut c_void); - pub fn sceAudioGetInputLength() -> i32; - pub fn sceAudioWaitInputEnd() -> i32; - pub fn sceAudioPollInputEnd() -> i32; - - pub fn sceAtracGetAtracID(ui_codec_type: u32) -> i32; - pub fn sceAtracSetDataAndGetID(buf: *mut c_void, bufsize: usize) -> i32; - pub fn sceAtracDecodeData( - atrac_id: i32, - out_samples: *mut u16, - out_n: *mut i32, - out_end: *mut i32, - out_remain_frame: *mut i32, - ) -> i32; - pub fn sceAtracGetRemainFrame(atrac_id: i32, out_remain_frame: *mut i32) -> i32; - pub fn sceAtracGetStreamDataInfo( - atrac_id: i32, - write_pointer: *mut *mut u8, - available_bytes: *mut u32, - read_offset: *mut u32, - ) -> i32; - pub fn sceAtracAddStreamData(atrac_id: i32, bytes_to_add: u32) -> i32; - pub fn sceAtracGetBitrate(atrac_id: i32, out_bitrate: *mut i32) -> i32; - pub fn sceAtracSetLoopNum(atrac_id: i32, nloops: i32) -> i32; - pub fn sceAtracReleaseAtracID(atrac_id: i32) -> i32; - pub fn sceAtracGetNextSample(atrac_id: i32, out_n: *mut i32) -> i32; - pub fn sceAtracGetMaxSample(atrac_id: i32, out_max: *mut i32) -> i32; - pub fn sceAtracGetBufferInfoForReseting( - atrac_id: i32, - ui_sample: u32, - pbuffer_info: *mut Atrac3BufferInfo, - ) -> i32; - pub fn sceAtracGetChannel(atrac_id: i32, pui_channel: *mut u32) -> i32; - pub fn sceAtracGetInternalErrorInfo(atrac_id: i32, pi_result: *mut i32) -> i32; - pub fn sceAtracGetLoopStatus( - atrac_id: i32, - pi_loop_num: *mut i32, - pui_loop_status: *mut u32, - ) -> i32; - pub fn sceAtracGetNextDecodePosition(atrac_id: i32, pui_sample_position: *mut u32) -> i32; - pub fn sceAtracGetSecondBufferInfo( - atrac_id: i32, - pui_position: *mut u32, - pui_data_byte: *mut u32, - ) -> i32; - pub fn sceAtracGetSoundSample( - atrac_id: i32, - pi_end_sample: *mut i32, - pi_loop_start_sample: *mut i32, - pi_loop_end_sample: *mut i32, - ) -> i32; - pub fn sceAtracResetPlayPosition( - atrac_id: i32, - ui_sample: u32, - ui_write_byte_first_buf: u32, - ui_write_byte_second_buf: u32, - ) -> i32; - pub fn sceAtracSetData(atrac_id: i32, puc_buffer_addr: *mut u8, ui_buffer_byte: u32) -> i32; - pub fn sceAtracSetHalfwayBuffer( - atrac_id: i32, - puc_buffer_addr: *mut u8, - ui_read_byte: u32, - ui_buffer_byte: u32, - ) -> i32; - pub fn sceAtracSetHalfwayBufferAndGetID( - puc_buffer_addr: *mut u8, - ui_read_byte: u32, - ui_buffer_byte: u32, - ) -> i32; - pub fn sceAtracSetSecondBuffer( - atrac_id: i32, - puc_second_buffer_addr: *mut u8, - ui_second_buffer_byte: u32, - ) -> i32; - - pub fn sceCtrlSetSamplingCycle(cycle: i32) -> i32; - pub fn sceCtrlGetSamplingCycle(pcycle: *mut i32) -> i32; - pub fn sceCtrlSetSamplingMode(mode: CtrlMode) -> i32; - pub fn sceCtrlGetSamplingMode(pmode: *mut i32) -> i32; - pub fn sceCtrlPeekBufferPositive(pad_data: *mut SceCtrlData, count: i32) -> i32; - pub fn sceCtrlPeekBufferNegative(pad_data: *mut SceCtrlData, count: i32) -> i32; - pub fn sceCtrlReadBufferPositive(pad_data: *mut SceCtrlData, count: i32) -> i32; - pub fn sceCtrlReadBufferNegative(pad_data: *mut SceCtrlData, count: i32) -> i32; - pub fn sceCtrlPeekLatch(latch_data: *mut SceCtrlLatch) -> i32; - pub fn sceCtrlReadLatch(latch_data: *mut SceCtrlLatch) -> i32; - pub fn sceCtrlSetIdleCancelThreshold(idlereset: i32, idleback: i32) -> i32; - pub fn sceCtrlGetIdleCancelThreshold(idlereset: *mut i32, idleback: *mut i32) -> i32; - - pub fn sceDisplaySetMode(mode: DisplayMode, width: usize, height: usize) -> u32; - pub fn sceDisplayGetMode(pmode: *mut i32, pwidth: *mut i32, pheight: *mut i32) -> i32; - pub fn sceDisplaySetFrameBuf( - top_addr: *const u8, - buffer_width: usize, - pixel_format: DisplayPixelFormat, - sync: DisplaySetBufSync, - ) -> u32; - pub fn sceDisplayGetFrameBuf( - top_addr: *mut *mut c_void, - buffer_width: *mut usize, - pixel_format: *mut DisplayPixelFormat, - sync: DisplaySetBufSync, - ) -> i32; - pub fn sceDisplayGetVcount() -> u32; - pub fn sceDisplayWaitVblank() -> i32; - pub fn sceDisplayWaitVblankCB() -> i32; - pub fn sceDisplayWaitVblankStart() -> i32; - pub fn sceDisplayWaitVblankStartCB() -> i32; - pub fn sceDisplayGetAccumulatedHcount() -> i32; - pub fn sceDisplayGetCurrentHcount() -> i32; - pub fn sceDisplayGetFramePerSec() -> f32; - pub fn sceDisplayIsForeground() -> i32; - pub fn sceDisplayIsVblank() -> i32; - - pub fn sceGeEdramGetSize() -> u32; - pub fn sceGeEdramGetAddr() -> *mut u8; - pub fn sceGeEdramSetAddrTranslation(width: i32) -> i32; - pub fn sceGeGetCmd(cmd: i32) -> u32; - pub fn sceGeGetMtx(type_: GeMatrixType, matrix: *mut c_void) -> i32; - pub fn sceGeGetStack(stack_id: i32, stack: *mut GeStack) -> i32; - pub fn sceGeSaveContext(context: *mut GeContext) -> i32; - pub fn sceGeRestoreContext(context: *const GeContext) -> i32; - pub fn sceGeListEnQueue( - list: *const c_void, - stall: *mut c_void, - cbid: i32, - arg: *mut GeListArgs, - ) -> i32; - pub fn sceGeListEnQueueHead( - list: *const c_void, - stall: *mut c_void, - cbid: i32, - arg: *mut GeListArgs, - ) -> i32; - pub fn sceGeListDeQueue(qid: i32) -> i32; - pub fn sceGeListUpdateStallAddr(qid: i32, stall: *mut c_void) -> i32; - pub fn sceGeListSync(qid: i32, sync_type: i32) -> GeListState; - pub fn sceGeDrawSync(sync_type: i32) -> GeListState; - pub fn sceGeBreak(mode: i32, p_param: *mut GeBreakParam) -> i32; - pub fn sceGeContinue() -> i32; - pub fn sceGeSetCallback(cb: *mut GeCallbackData) -> i32; - pub fn sceGeUnsetCallback(cbid: i32) -> i32; - - pub fn sceKernelExitGame(); - pub fn sceKernelRegisterExitCallback(id: SceUid) -> i32; - pub fn sceKernelLoadExec(file: *const u8, param: *mut SceKernelLoadExecParam) -> i32; - - pub fn sceKernelAllocPartitionMemory( - partition: SceSysMemPartitionId, - name: *const u8, - type_: SceSysMemBlockTypes, - size: u32, - addr: *mut c_void, - ) -> SceUid; - pub fn sceKernelGetBlockHeadAddr(blockid: SceUid) -> *mut c_void; - pub fn sceKernelFreePartitionMemory(blockid: SceUid) -> i32; - pub fn sceKernelTotalFreeMemSize() -> usize; - pub fn sceKernelMaxFreeMemSize() -> usize; - pub fn sceKernelDevkitVersion() -> u32; - pub fn sceKernelSetCompiledSdkVersion(version: u32) -> i32; - pub fn sceKernelGetCompiledSdkVersion() -> u32; - - pub fn sceKernelLibcTime(t: *mut i32) -> i32; - pub fn sceKernelLibcClock() -> u32; - pub fn sceKernelLibcGettimeofday(tp: *mut timeval, tzp: *mut timezone) -> i32; - pub fn sceKernelDcacheWritebackAll(); - pub fn sceKernelDcacheWritebackInvalidateAll(); - pub fn sceKernelDcacheWritebackRange(p: *const c_void, size: u32); - pub fn sceKernelDcacheWritebackInvalidateRange(p: *const c_void, size: u32); - pub fn sceKernelDcacheInvalidateRange(p: *const c_void, size: u32); - pub fn sceKernelIcacheInvalidateAll(); - pub fn sceKernelIcacheInvalidateRange(p: *const c_void, size: u32); - pub fn sceKernelUtilsMt19937Init(ctx: *mut SceKernelUtilsMt19937Context, seed: u32) -> i32; - pub fn sceKernelUtilsMt19937UInt(ctx: *mut SceKernelUtilsMt19937Context) -> u32; - pub fn sceKernelUtilsMd5Digest(data: *mut u8, size: u32, digest: *mut u8) -> i32; - pub fn sceKernelUtilsMd5BlockInit(ctx: *mut SceKernelUtilsMd5Context) -> i32; - pub fn sceKernelUtilsMd5BlockUpdate( - ctx: *mut SceKernelUtilsMd5Context, - data: *mut u8, - size: u32, - ) -> i32; - pub fn sceKernelUtilsMd5BlockResult(ctx: *mut SceKernelUtilsMd5Context, digest: *mut u8) - -> i32; - pub fn sceKernelUtilsSha1Digest(data: *mut u8, size: u32, digest: *mut u8) -> i32; - pub fn sceKernelUtilsSha1BlockInit(ctx: *mut SceKernelUtilsSha1Context) -> i32; - pub fn sceKernelUtilsSha1BlockUpdate( - ctx: *mut SceKernelUtilsSha1Context, - data: *mut u8, - size: u32, - ) -> i32; - pub fn sceKernelUtilsSha1BlockResult( - ctx: *mut SceKernelUtilsSha1Context, - digest: *mut u8, - ) -> i32; - - pub fn sceKernelRegisterSubIntrHandler( - int_no: i32, - no: i32, - handler: *mut c_void, - arg: *mut c_void, - ) -> i32; - pub fn sceKernelReleaseSubIntrHandler(int_no: i32, no: i32) -> i32; - pub fn sceKernelEnableSubIntr(int_no: i32, no: i32) -> i32; - pub fn sceKernelDisableSubIntr(int_no: i32, no: i32) -> i32; - pub fn QueryIntrHandlerInfo( - intr_code: SceUid, - sub_intr_code: SceUid, - data: *mut IntrHandlerOptionParam, - ) -> i32; - - pub fn sceKernelCpuSuspendIntr() -> u32; - pub fn sceKernelCpuResumeIntr(flags: u32); - pub fn sceKernelCpuResumeIntrWithSync(flags: u32); - pub fn sceKernelIsCpuIntrSuspended(flags: u32) -> i32; - pub fn sceKernelIsCpuIntrEnable() -> i32; - - pub fn sceKernelLoadModule( - path: *const u8, - flags: i32, - option: *mut SceKernelLMOption, - ) -> SceUid; - pub fn sceKernelLoadModuleMs( - path: *const u8, - flags: i32, - option: *mut SceKernelLMOption, - ) -> SceUid; - pub fn sceKernelLoadModuleByID( - fid: SceUid, - flags: i32, - option: *mut SceKernelLMOption, - ) -> SceUid; - pub fn sceKernelLoadModuleBufferUsbWlan( - buf_size: usize, - buf: *mut c_void, - flags: i32, - option: *mut SceKernelLMOption, - ) -> SceUid; - pub fn sceKernelStartModule( - mod_id: SceUid, - arg_size: usize, - argp: *mut c_void, - status: *mut i32, - option: *mut SceKernelSMOption, - ) -> i32; - pub fn sceKernelStopModule( - mod_id: SceUid, - arg_size: usize, - argp: *mut c_void, - status: *mut i32, - option: *mut SceKernelSMOption, - ) -> i32; - pub fn sceKernelUnloadModule(mod_id: SceUid) -> i32; - pub fn sceKernelSelfStopUnloadModule(unknown: i32, arg_size: usize, argp: *mut c_void) -> i32; - pub fn sceKernelStopUnloadSelfModule( - arg_size: usize, - argp: *mut c_void, - status: *mut i32, - option: *mut SceKernelSMOption, - ) -> i32; - pub fn sceKernelQueryModuleInfo(mod_id: SceUid, info: *mut SceKernelModuleInfo) -> i32; - pub fn sceKernelGetModuleIdList( - read_buf: *mut SceUid, - read_buf_size: i32, - id_count: *mut i32, - ) -> i32; - - pub fn sceKernelVolatileMemLock(unk: i32, ptr: *mut *mut c_void, size: *mut i32) -> i32; - pub fn sceKernelVolatileMemTryLock(unk: i32, ptr: *mut *mut c_void, size: *mut i32) -> i32; - pub fn sceKernelVolatileMemUnlock(unk: i32) -> i32; - - pub fn sceKernelStdin() -> SceUid; - pub fn sceKernelStdout() -> SceUid; - pub fn sceKernelStderr() -> SceUid; - - pub fn sceKernelGetThreadmanIdType(uid: SceUid) -> SceKernelIdListType; - pub fn sceKernelCreateThread( - name: *const u8, - entry: SceKernelThreadEntry, - init_priority: i32, - stack_size: i32, - attr: i32, - option: *mut SceKernelThreadOptParam, - ) -> SceUid; - pub fn sceKernelDeleteThread(thid: SceUid) -> i32; - pub fn sceKernelStartThread(id: SceUid, arg_len: usize, arg_p: *mut c_void) -> i32; - pub fn sceKernelExitThread(status: i32) -> i32; - pub fn sceKernelExitDeleteThread(status: i32) -> i32; - pub fn sceKernelTerminateThread(thid: SceUid) -> i32; - pub fn sceKernelTerminateDeleteThread(thid: SceUid) -> i32; - pub fn sceKernelSuspendDispatchThread() -> i32; - pub fn sceKernelResumeDispatchThread(state: i32) -> i32; - pub fn sceKernelSleepThread() -> i32; - pub fn sceKernelSleepThreadCB() -> i32; - pub fn sceKernelWakeupThread(thid: SceUid) -> i32; - pub fn sceKernelCancelWakeupThread(thid: SceUid) -> i32; - pub fn sceKernelSuspendThread(thid: SceUid) -> i32; - pub fn sceKernelResumeThread(thid: SceUid) -> i32; - pub fn sceKernelWaitThreadEnd(thid: SceUid, timeout: *mut u32) -> i32; - pub fn sceKernelWaitThreadEndCB(thid: SceUid, timeout: *mut u32) -> i32; - pub fn sceKernelDelayThread(delay: u32) -> i32; - pub fn sceKernelDelayThreadCB(delay: u32) -> i32; - pub fn sceKernelDelaySysClockThread(delay: *mut SceKernelSysClock) -> i32; - pub fn sceKernelDelaySysClockThreadCB(delay: *mut SceKernelSysClock) -> i32; - pub fn sceKernelChangeCurrentThreadAttr(unknown: i32, attr: i32) -> i32; - pub fn sceKernelChangeThreadPriority(thid: SceUid, priority: i32) -> i32; - pub fn sceKernelRotateThreadReadyQueue(priority: i32) -> i32; - pub fn sceKernelReleaseWaitThread(thid: SceUid) -> i32; - pub fn sceKernelGetThreadId() -> i32; - pub fn sceKernelGetThreadCurrentPriority() -> i32; - pub fn sceKernelGetThreadExitStatus(thid: SceUid) -> i32; - pub fn sceKernelCheckThreadStack() -> i32; - pub fn sceKernelGetThreadStackFreeSize(thid: SceUid) -> i32; - pub fn sceKernelReferThreadStatus(thid: SceUid, info: *mut SceKernelThreadInfo) -> i32; - pub fn sceKernelReferThreadRunStatus( - thid: SceUid, - status: *mut SceKernelThreadRunStatus, - ) -> i32; - pub fn sceKernelCreateSema( - name: *const u8, - attr: u32, - init_val: i32, - max_val: i32, - option: *mut SceKernelSemaOptParam, - ) -> SceUid; - pub fn sceKernelDeleteSema(sema_id: SceUid) -> i32; - pub fn sceKernelSignalSema(sema_id: SceUid, signal: i32) -> i32; - pub fn sceKernelWaitSema(sema_id: SceUid, signal: i32, timeout: *mut u32) -> i32; - pub fn sceKernelWaitSemaCB(sema_id: SceUid, signal: i32, timeout: *mut u32) -> i32; - pub fn sceKernelPollSema(sema_id: SceUid, signal: i32) -> i32; - pub fn sceKernelReferSemaStatus(sema_id: SceUid, info: *mut SceKernelSemaInfo) -> i32; - pub fn sceKernelCreateEventFlag( - name: *const u8, - attr: i32, - bits: i32, - opt: *mut SceKernelEventFlagOptParam, - ) -> SceUid; - pub fn sceKernelSetEventFlag(ev_id: SceUid, bits: u32) -> i32; - pub fn sceKernelClearEventFlag(ev_id: SceUid, bits: u32) -> i32; - pub fn sceKernelPollEventFlag(ev_id: SceUid, bits: u32, wait: i32, out_bits: *mut u32) -> i32; - pub fn sceKernelWaitEventFlag( - ev_id: SceUid, - bits: u32, - wait: i32, - out_bits: *mut u32, - timeout: *mut u32, - ) -> i32; - pub fn sceKernelWaitEventFlagCB( - ev_id: SceUid, - bits: u32, - wait: i32, - out_bits: *mut u32, - timeout: *mut u32, - ) -> i32; - pub fn sceKernelDeleteEventFlag(ev_id: SceUid) -> i32; - pub fn sceKernelReferEventFlagStatus(event: SceUid, status: *mut SceKernelEventFlagInfo) - -> i32; - pub fn sceKernelCreateMbx( - name: *const u8, - attr: u32, - option: *mut SceKernelMbxOptParam, - ) -> SceUid; - pub fn sceKernelDeleteMbx(mbx_id: SceUid) -> i32; - pub fn sceKernelSendMbx(mbx_id: SceUid, message: *mut c_void) -> i32; - pub fn sceKernelReceiveMbx(mbx_id: SceUid, message: *mut *mut c_void, timeout: *mut u32) - -> i32; - pub fn sceKernelReceiveMbxCB( - mbx_id: SceUid, - message: *mut *mut c_void, - timeout: *mut u32, - ) -> i32; - pub fn sceKernelPollMbx(mbx_id: SceUid, pmessage: *mut *mut c_void) -> i32; - pub fn sceKernelCancelReceiveMbx(mbx_id: SceUid, num: *mut i32) -> i32; - pub fn sceKernelReferMbxStatus(mbx_id: SceUid, info: *mut SceKernelMbxInfo) -> i32; - pub fn sceKernelSetAlarm( - clock: u32, - handler: SceKernelAlarmHandler, - common: *mut c_void, - ) -> SceUid; - pub fn sceKernelSetSysClockAlarm( - clock: *mut SceKernelSysClock, - handler: *mut SceKernelAlarmHandler, - common: *mut c_void, - ) -> SceUid; - pub fn sceKernelCancelAlarm(alarm_id: SceUid) -> i32; - pub fn sceKernelReferAlarmStatus(alarm_id: SceUid, info: *mut SceKernelAlarmInfo) -> i32; - pub fn sceKernelCreateCallback( - name: *const u8, - func: SceKernelCallbackFunction, - arg: *mut c_void, - ) -> SceUid; - pub fn sceKernelReferCallbackStatus(cb: SceUid, status: *mut SceKernelCallbackInfo) -> i32; - pub fn sceKernelDeleteCallback(cb: SceUid) -> i32; - pub fn sceKernelNotifyCallback(cb: SceUid, arg2: i32) -> i32; - pub fn sceKernelCancelCallback(cb: SceUid) -> i32; - pub fn sceKernelGetCallbackCount(cb: SceUid) -> i32; - pub fn sceKernelCheckCallback() -> i32; - pub fn sceKernelGetThreadmanIdList( - type_: SceKernelIdListType, - read_buf: *mut SceUid, - read_buf_size: i32, - id_count: *mut i32, - ) -> i32; - pub fn sceKernelReferSystemStatus(status: *mut SceKernelSystemStatus) -> i32; - pub fn sceKernelCreateMsgPipe( - name: *const u8, - part: i32, - attr: i32, - unk1: *mut c_void, - opt: *mut c_void, - ) -> SceUid; - pub fn sceKernelDeleteMsgPipe(uid: SceUid) -> i32; - pub fn sceKernelSendMsgPipe( - uid: SceUid, - message: *mut c_void, - size: u32, - unk1: i32, - unk2: *mut c_void, - timeout: *mut u32, - ) -> i32; - pub fn sceKernelSendMsgPipeCB( - uid: SceUid, - message: *mut c_void, - size: u32, - unk1: i32, - unk2: *mut c_void, - timeout: *mut u32, - ) -> i32; - pub fn sceKernelTrySendMsgPipe( - uid: SceUid, - message: *mut c_void, - size: u32, - unk1: i32, - unk2: *mut c_void, - ) -> i32; - pub fn sceKernelReceiveMsgPipe( - uid: SceUid, - message: *mut c_void, - size: u32, - unk1: i32, - unk2: *mut c_void, - timeout: *mut u32, - ) -> i32; - pub fn sceKernelReceiveMsgPipeCB( - uid: SceUid, - message: *mut c_void, - size: u32, - unk1: i32, - unk2: *mut c_void, - timeout: *mut u32, - ) -> i32; - pub fn sceKernelTryReceiveMsgPipe( - uid: SceUid, - message: *mut c_void, - size: u32, - unk1: i32, - unk2: *mut c_void, - ) -> i32; - pub fn sceKernelCancelMsgPipe(uid: SceUid, send: *mut i32, recv: *mut i32) -> i32; - pub fn sceKernelReferMsgPipeStatus(uid: SceUid, info: *mut SceKernelMppInfo) -> i32; - pub fn sceKernelCreateVpl( - name: *const u8, - part: i32, - attr: i32, - size: u32, - opt: *mut SceKernelVplOptParam, - ) -> SceUid; - pub fn sceKernelDeleteVpl(uid: SceUid) -> i32; - pub fn sceKernelAllocateVpl( - uid: SceUid, - size: u32, - data: *mut *mut c_void, - timeout: *mut u32, - ) -> i32; - pub fn sceKernelAllocateVplCB( - uid: SceUid, - size: u32, - data: *mut *mut c_void, - timeout: *mut u32, - ) -> i32; - pub fn sceKernelTryAllocateVpl(uid: SceUid, size: u32, data: *mut *mut c_void) -> i32; - pub fn sceKernelFreeVpl(uid: SceUid, data: *mut c_void) -> i32; - pub fn sceKernelCancelVpl(uid: SceUid, num: *mut i32) -> i32; - pub fn sceKernelReferVplStatus(uid: SceUid, info: *mut SceKernelVplInfo) -> i32; - pub fn sceKernelCreateFpl( - name: *const u8, - part: i32, - attr: i32, - size: u32, - blocks: u32, - opt: *mut SceKernelFplOptParam, - ) -> i32; - pub fn sceKernelDeleteFpl(uid: SceUid) -> i32; - pub fn sceKernelAllocateFpl(uid: SceUid, data: *mut *mut c_void, timeout: *mut u32) -> i32; - pub fn sceKernelAllocateFplCB(uid: SceUid, data: *mut *mut c_void, timeout: *mut u32) -> i32; - pub fn sceKernelTryAllocateFpl(uid: SceUid, data: *mut *mut c_void) -> i32; - pub fn sceKernelFreeFpl(uid: SceUid, data: *mut c_void) -> i32; - pub fn sceKernelCancelFpl(uid: SceUid, pnum: *mut i32) -> i32; - pub fn sceKernelReferFplStatus(uid: SceUid, info: *mut SceKernelFplInfo) -> i32; - pub fn sceKernelUSec2SysClock(usec: u32, clock: *mut SceKernelSysClock) -> i32; - pub fn sceKernelUSec2SysClockWide(usec: u32) -> i64; - pub fn sceKernelSysClock2USec( - clock: *mut SceKernelSysClock, - low: *mut u32, - high: *mut u32, - ) -> i32; - pub fn sceKernelSysClock2USecWide(clock: i64, low: *mut u32, high: *mut u32) -> i32; - pub fn sceKernelGetSystemTime(time: *mut SceKernelSysClock) -> i32; - pub fn sceKernelGetSystemTimeWide() -> i64; - pub fn sceKernelGetSystemTimeLow() -> u32; - pub fn sceKernelCreateVTimer(name: *const u8, opt: *mut SceKernelVTimerOptParam) -> SceUid; - pub fn sceKernelDeleteVTimer(uid: SceUid) -> i32; - pub fn sceKernelGetVTimerBase(uid: SceUid, base: *mut SceKernelSysClock) -> i32; - pub fn sceKernelGetVTimerBaseWide(uid: SceUid) -> i64; - pub fn sceKernelGetVTimerTime(uid: SceUid, time: *mut SceKernelSysClock) -> i32; - pub fn sceKernelGetVTimerTimeWide(uid: SceUid) -> i64; - pub fn sceKernelSetVTimerTime(uid: SceUid, time: *mut SceKernelSysClock) -> i32; - pub fn sceKernelSetVTimerTimeWide(uid: SceUid, time: i64) -> i64; - pub fn sceKernelStartVTimer(uid: SceUid) -> i32; - pub fn sceKernelStopVTimer(uid: SceUid) -> i32; - pub fn sceKernelSetVTimerHandler( - uid: SceUid, - time: *mut SceKernelSysClock, - handler: SceKernelVTimerHandler, - common: *mut c_void, - ) -> i32; - pub fn sceKernelSetVTimerHandlerWide( - uid: SceUid, - time: i64, - handler: SceKernelVTimerHandlerWide, - common: *mut c_void, - ) -> i32; - pub fn sceKernelCancelVTimerHandler(uid: SceUid) -> i32; - pub fn sceKernelReferVTimerStatus(uid: SceUid, info: *mut SceKernelVTimerInfo) -> i32; - pub fn sceKernelRegisterThreadEventHandler( - name: *const u8, - thread_id: SceUid, - mask: i32, - handler: SceKernelThreadEventHandler, - common: *mut c_void, - ) -> SceUid; - pub fn sceKernelReleaseThreadEventHandler(uid: SceUid) -> i32; - pub fn sceKernelReferThreadEventHandlerStatus( - uid: SceUid, - info: *mut SceKernelThreadEventHandlerInfo, - ) -> i32; - pub fn sceKernelReferThreadProfiler() -> *mut DebugProfilerRegs; - pub fn sceKernelReferGlobalProfiler() -> *mut DebugProfilerRegs; - - pub fn sceUsbStart(driver_name: *const u8, size: i32, args: *mut c_void) -> i32; - pub fn sceUsbStop(driver_name: *const u8, size: i32, args: *mut c_void) -> i32; - pub fn sceUsbActivate(pid: u32) -> i32; - pub fn sceUsbDeactivate(pid: u32) -> i32; - pub fn sceUsbGetState() -> i32; - pub fn sceUsbGetDrvState(driver_name: *const u8) -> i32; -} - -extern "C" { - pub fn sceUsbCamSetupStill(param: *mut UsbCamSetupStillParam) -> i32; - pub fn sceUsbCamSetupStillEx(param: *mut UsbCamSetupStillExParam) -> i32; - pub fn sceUsbCamStillInputBlocking(buf: *mut u8, size: usize) -> i32; - pub fn sceUsbCamStillInput(buf: *mut u8, size: usize) -> i32; - pub fn sceUsbCamStillWaitInputEnd() -> i32; - pub fn sceUsbCamStillPollInputEnd() -> i32; - pub fn sceUsbCamStillCancelInput() -> i32; - pub fn sceUsbCamStillGetInputLength() -> i32; - pub fn sceUsbCamSetupVideo( - param: *mut UsbCamSetupVideoParam, - work_area: *mut c_void, - work_area_size: i32, - ) -> i32; - pub fn sceUsbCamSetupVideoEx( - param: *mut UsbCamSetupVideoExParam, - work_area: *mut c_void, - work_area_size: i32, - ) -> i32; - pub fn sceUsbCamStartVideo() -> i32; - pub fn sceUsbCamStopVideo() -> i32; - pub fn sceUsbCamReadVideoFrameBlocking(buf: *mut u8, size: usize) -> i32; - pub fn sceUsbCamReadVideoFrame(buf: *mut u8, size: usize) -> i32; - pub fn sceUsbCamWaitReadVideoFrameEnd() -> i32; - pub fn sceUsbCamPollReadVideoFrameEnd() -> i32; - pub fn sceUsbCamGetReadVideoFrameSize() -> i32; - pub fn sceUsbCamSetSaturation(saturation: i32) -> i32; - pub fn sceUsbCamSetBrightness(brightness: i32) -> i32; - pub fn sceUsbCamSetContrast(contrast: i32) -> i32; - pub fn sceUsbCamSetSharpness(sharpness: i32) -> i32; - pub fn sceUsbCamSetImageEffectMode(effect_mode: UsbCamEffectMode) -> i32; - pub fn sceUsbCamSetEvLevel(exposure_level: UsbCamEvLevel) -> i32; - pub fn sceUsbCamSetReverseMode(reverse_flags: i32) -> i32; - pub fn sceUsbCamSetZoom(zoom: i32) -> i32; - pub fn sceUsbCamGetSaturation(saturation: *mut i32) -> i32; - pub fn sceUsbCamGetBrightness(brightness: *mut i32) -> i32; - pub fn sceUsbCamGetContrast(contrast: *mut i32) -> i32; - pub fn sceUsbCamGetSharpness(sharpness: *mut i32) -> i32; - pub fn sceUsbCamGetImageEffectMode(effect_mode: *mut UsbCamEffectMode) -> i32; - pub fn sceUsbCamGetEvLevel(exposure_level: *mut UsbCamEvLevel) -> i32; - pub fn sceUsbCamGetReverseMode(reverse_flags: *mut i32) -> i32; - pub fn sceUsbCamGetZoom(zoom: *mut i32) -> i32; - pub fn sceUsbCamAutoImageReverseSW(on: i32) -> i32; - pub fn sceUsbCamGetAutoImageReverseState() -> i32; - pub fn sceUsbCamGetLensDirection() -> i32; - - pub fn sceUsbstorBootRegisterNotify(event_flag: SceUid) -> i32; - pub fn sceUsbstorBootUnregisterNotify(event_flag: u32) -> i32; - pub fn sceUsbstorBootSetCapacity(size: u32) -> i32; - - pub fn scePowerRegisterCallback(slot: i32, cbid: SceUid) -> i32; - pub fn scePowerUnregisterCallback(slot: i32) -> i32; - pub fn scePowerIsPowerOnline() -> i32; - pub fn scePowerIsBatteryExist() -> i32; - pub fn scePowerIsBatteryCharging() -> i32; - pub fn scePowerGetBatteryChargingStatus() -> i32; - pub fn scePowerIsLowBattery() -> i32; - pub fn scePowerGetBatteryLifePercent() -> i32; - pub fn scePowerGetBatteryLifeTime() -> i32; - pub fn scePowerGetBatteryTemp() -> i32; - pub fn scePowerGetBatteryElec() -> i32; - pub fn scePowerGetBatteryVolt() -> i32; - pub fn scePowerSetCpuClockFrequency(cpufreq: i32) -> i32; - pub fn scePowerSetBusClockFrequency(busfreq: i32) -> i32; - pub fn scePowerGetCpuClockFrequency() -> i32; - pub fn scePowerGetCpuClockFrequencyInt() -> i32; - pub fn scePowerGetCpuClockFrequencyFloat() -> f32; - pub fn scePowerGetBusClockFrequency() -> i32; - pub fn scePowerGetBusClockFrequencyInt() -> i32; - pub fn scePowerGetBusClockFrequencyFloat() -> f32; - pub fn scePowerSetClockFrequency(pllfreq: i32, cpufreq: i32, busfreq: i32) -> i32; - pub fn scePowerLock(unknown: i32) -> i32; - pub fn scePowerUnlock(unknown: i32) -> i32; - pub fn scePowerTick(t: PowerTick) -> i32; - pub fn scePowerGetIdleTimer() -> i32; - pub fn scePowerIdleTimerEnable(unknown: i32) -> i32; - pub fn scePowerIdleTimerDisable(unknown: i32) -> i32; - pub fn scePowerRequestStandby() -> i32; - pub fn scePowerRequestSuspend() -> i32; - - pub fn sceWlanDevIsPowerOn() -> i32; - pub fn sceWlanGetSwitchState() -> i32; - pub fn sceWlanGetEtherAddr(ether_addr: *mut u8) -> i32; - - pub fn sceWlanDevAttach() -> i32; - pub fn sceWlanDevDetach() -> i32; - - pub fn sceRtcGetTickResolution() -> u32; - pub fn sceRtcGetCurrentTick(tick: *mut u64) -> i32; - pub fn sceRtcGetCurrentClock(tm: *mut ScePspDateTime, tz: i32) -> i32; - pub fn sceRtcGetCurrentClockLocalTime(tm: *mut ScePspDateTime) -> i32; - pub fn sceRtcConvertUtcToLocalTime(tick_utc: *const u64, tick_local: *mut u64) -> i32; - pub fn sceRtcConvertLocalTimeToUTC(tick_local: *const u64, tick_utc: *mut u64) -> i32; - pub fn sceRtcIsLeapYear(year: i32) -> i32; - pub fn sceRtcGetDaysInMonth(year: i32, month: i32) -> i32; - pub fn sceRtcGetDayOfWeek(year: i32, month: i32, day: i32) -> i32; - pub fn sceRtcCheckValid(date: *const ScePspDateTime) -> i32; - pub fn sceRtcSetTick(date: *mut ScePspDateTime, tick: *const u64) -> i32; - pub fn sceRtcGetTick(date: *const ScePspDateTime, tick: *mut u64) -> i32; - pub fn sceRtcCompareTick(tick1: *const u64, tick2: *const u64) -> i32; - pub fn sceRtcTickAddTicks(dest_tick: *mut u64, src_tick: *const u64, num_ticks: u64) -> i32; - pub fn sceRtcTickAddMicroseconds(dest_tick: *mut u64, src_tick: *const u64, num_ms: u64) - -> i32; - pub fn sceRtcTickAddSeconds(dest_tick: *mut u64, src_tick: *const u64, num_seconds: u64) - -> i32; - pub fn sceRtcTickAddMinutes(dest_tick: *mut u64, src_tick: *const u64, num_minutes: u64) - -> i32; - pub fn sceRtcTickAddHours(dest_tick: *mut u64, src_tick: *const u64, num_hours: u64) -> i32; - pub fn sceRtcTickAddDays(dest_tick: *mut u64, src_tick: *const u64, num_days: u64) -> i32; - pub fn sceRtcTickAddWeeks(dest_tick: *mut u64, src_tick: *const u64, num_weeks: u64) -> i32; - pub fn sceRtcTickAddMonths(dest_tick: *mut u64, src_tick: *const u64, num_months: u64) -> i32; - pub fn sceRtcTickAddYears(dest_tick: *mut u64, src_tick: *const u64, num_years: u64) -> i32; - pub fn sceRtcSetTime_t(date: *mut ScePspDateTime, time: u32) -> i32; - pub fn sceRtcGetTime_t(date: *const ScePspDateTime, time: *mut u32) -> i32; - pub fn sceRtcSetTime64_t(date: *mut ScePspDateTime, time: u64) -> i32; - pub fn sceRtcGetTime64_t(date: *const ScePspDateTime, time: *mut u64) -> i32; - pub fn sceRtcSetDosTime(date: *mut ScePspDateTime, dos_time: u32) -> i32; - pub fn sceRtcGetDosTime(date: *mut ScePspDateTime, dos_time: u32) -> i32; - pub fn sceRtcSetWin32FileTime(date: *mut ScePspDateTime, time: *mut u64) -> i32; - pub fn sceRtcGetWin32FileTime(date: *mut ScePspDateTime, time: *mut u64) -> i32; - pub fn sceRtcParseDateTime(dest_tick: *mut u64, date_string: *const u8) -> i32; - pub fn sceRtcFormatRFC3339( - psz_date_time: *mut c_char, - p_utc: *const u64, - time_zone_minutes: i32, - ) -> i32; - pub fn sceRtcFormatRFC3339LocalTime(psz_date_time: *mut c_char, p_utc: *const u64) -> i32; - pub fn sceRtcParseRFC3339(p_utc: *mut u64, psz_date_time: *const u8) -> i32; - pub fn sceRtcFormatRFC2822( - psz_date_time: *mut c_char, - p_utc: *const u64, - time_zone_minutes: i32, - ) -> i32; - pub fn sceRtcFormatRFC2822LocalTime(psz_date_time: *mut c_char, p_utc: *const u64) -> i32; - - pub fn sceIoOpen(file: *const u8, flags: i32, permissions: IoPermissions) -> SceUid; - pub fn sceIoOpenAsync(file: *const u8, flags: i32, permissions: IoPermissions) -> SceUid; - pub fn sceIoClose(fd: SceUid) -> i32; - pub fn sceIoCloseAsync(fd: SceUid) -> i32; - pub fn sceIoRead(fd: SceUid, data: *mut c_void, size: u32) -> i32; - pub fn sceIoReadAsync(fd: SceUid, data: *mut c_void, size: u32) -> i32; - pub fn sceIoWrite(fd: SceUid, data: *const c_void, size: usize) -> i32; - pub fn sceIoWriteAsync(fd: SceUid, data: *const c_void, size: u32) -> i32; - pub fn sceIoLseek(fd: SceUid, offset: i64, whence: IoWhence) -> i64; - pub fn sceIoLseekAsync(fd: SceUid, offset: i64, whence: IoWhence) -> i32; - pub fn sceIoLseek32(fd: SceUid, offset: i32, whence: IoWhence) -> i32; - pub fn sceIoLseek32Async(fd: SceUid, offset: i32, whence: IoWhence) -> i32; - pub fn sceIoRemove(file: *const u8) -> i32; - pub fn sceIoMkdir(dir: *const u8, mode: IoPermissions) -> i32; - pub fn sceIoRmdir(path: *const u8) -> i32; - pub fn sceIoChdir(path: *const u8) -> i32; - pub fn sceIoRename(oldname: *const u8, newname: *const u8) -> i32; - pub fn sceIoDopen(dirname: *const u8) -> SceUid; - pub fn sceIoDread(fd: SceUid, dir: *mut SceIoDirent) -> i32; - pub fn sceIoDclose(fd: SceUid) -> i32; - pub fn sceIoDevctl( - dev: *const u8, - cmd: u32, - indata: *mut c_void, - inlen: i32, - outdata: *mut c_void, - outlen: i32, - ) -> i32; - pub fn sceIoAssign( - dev1: *const u8, - dev2: *const u8, - dev3: *const u8, - mode: IoAssignPerms, - unk1: *mut c_void, - unk2: i32, - ) -> i32; - pub fn sceIoUnassign(dev: *const u8) -> i32; - pub fn sceIoGetstat(file: *const u8, stat: *mut SceIoStat) -> i32; - pub fn sceIoChstat(file: *const u8, stat: *mut SceIoStat, bits: i32) -> i32; - pub fn sceIoIoctl( - fd: SceUid, - cmd: u32, - indata: *mut c_void, - inlen: i32, - outdata: *mut c_void, - outlen: i32, - ) -> i32; - pub fn sceIoIoctlAsync( - fd: SceUid, - cmd: u32, - indata: *mut c_void, - inlen: i32, - outdata: *mut c_void, - outlen: i32, - ) -> i32; - pub fn sceIoSync(device: *const u8, unk: u32) -> i32; - pub fn sceIoWaitAsync(fd: SceUid, res: *mut i64) -> i32; - pub fn sceIoWaitAsyncCB(fd: SceUid, res: *mut i64) -> i32; - pub fn sceIoPollAsync(fd: SceUid, res: *mut i64) -> i32; - pub fn sceIoGetAsyncStat(fd: SceUid, poll: i32, res: *mut i64) -> i32; - pub fn sceIoCancel(fd: SceUid) -> i32; - pub fn sceIoGetDevType(fd: SceUid) -> i32; - pub fn sceIoChangeAsyncPriority(fd: SceUid, pri: i32) -> i32; - pub fn sceIoSetAsyncCallback(fd: SceUid, cb: SceUid, argp: *mut c_void) -> i32; - - pub fn sceJpegInitMJpeg() -> i32; - pub fn sceJpegFinishMJpeg() -> i32; - pub fn sceJpegCreateMJpeg(width: i32, height: i32) -> i32; - pub fn sceJpegDeleteMJpeg() -> i32; - pub fn sceJpegDecodeMJpeg(jpeg_buf: *mut u8, size: usize, rgba: *mut c_void, unk: u32) -> i32; - - pub fn sceUmdCheckMedium() -> i32; - pub fn sceUmdGetDiscInfo(info: *mut UmdInfo) -> i32; - pub fn sceUmdActivate(unit: i32, drive: *const u8) -> i32; - pub fn sceUmdDeactivate(unit: i32, drive: *const u8) -> i32; - pub fn sceUmdWaitDriveStat(state: i32) -> i32; - pub fn sceUmdWaitDriveStatWithTimer(state: i32, timeout: u32) -> i32; - pub fn sceUmdWaitDriveStatCB(state: i32, timeout: u32) -> i32; - pub fn sceUmdCancelWaitDriveStat() -> i32; - pub fn sceUmdGetDriveStat() -> i32; - pub fn sceUmdGetErrorStat() -> i32; - pub fn sceUmdRegisterUMDCallBack(cbid: i32) -> i32; - pub fn sceUmdUnRegisterUMDCallBack(cbid: i32) -> i32; - pub fn sceUmdReplacePermit() -> i32; - pub fn sceUmdReplaceProhibit() -> i32; - - pub fn sceMpegInit() -> i32; - pub fn sceMpegFinish(); - pub fn sceMpegRingbufferQueryMemSize(packets: i32) -> i32; - pub fn sceMpegRingbufferConstruct( - ringbuffer: *mut SceMpegRingbuffer, - packets: i32, - data: *mut c_void, - size: i32, - callback: SceMpegRingbufferCb, - cb_param: *mut c_void, - ) -> i32; - pub fn sceMpegRingbufferDestruct(ringbuffer: *mut SceMpegRingbuffer); - pub fn sceMpegRingbufferAvailableSize(ringbuffer: *mut SceMpegRingbuffer) -> i32; - pub fn sceMpegRingbufferPut( - ringbuffer: *mut SceMpegRingbuffer, - num_packets: i32, - available: i32, - ) -> i32; - pub fn sceMpegQueryMemSize(unk: i32) -> i32; - pub fn sceMpegCreate( - handle: SceMpeg, - data: *mut c_void, - size: i32, - ringbuffer: *mut SceMpegRingbuffer, - frame_width: i32, - unk1: i32, - unk2: i32, - ) -> i32; - pub fn sceMpegDelete(handle: SceMpeg); - pub fn sceMpegQueryStreamOffset(handle: SceMpeg, buffer: *mut c_void, offset: *mut i32) -> i32; - pub fn sceMpegQueryStreamSize(buffer: *mut c_void, size: *mut i32) -> i32; - pub fn sceMpegRegistStream(handle: SceMpeg, stream_id: i32, unk: i32) -> SceMpegStream; - pub fn sceMpegUnRegistStream(handle: SceMpeg, stream: SceMpegStream); - pub fn sceMpegFlushAllStream(handle: SceMpeg) -> i32; - pub fn sceMpegMallocAvcEsBuf(handle: SceMpeg) -> *mut c_void; - pub fn sceMpegFreeAvcEsBuf(handle: SceMpeg, buf: *mut c_void); - pub fn sceMpegQueryAtracEsSize(handle: SceMpeg, es_size: *mut i32, out_size: *mut i32) -> i32; - pub fn sceMpegInitAu(handle: SceMpeg, es_buffer: *mut c_void, au: *mut SceMpegAu) -> i32; - pub fn sceMpegGetAvcAu( - handle: SceMpeg, - stream: SceMpegStream, - au: *mut SceMpegAu, - unk: *mut i32, - ) -> i32; - pub fn sceMpegAvcDecodeMode(handle: SceMpeg, mode: *mut SceMpegAvcMode) -> i32; - pub fn sceMpegAvcDecode( - handle: SceMpeg, - au: *mut SceMpegAu, - iframe_width: i32, - buffer: *mut c_void, - init: *mut i32, - ) -> i32; - pub fn sceMpegAvcDecodeStop( - handle: SceMpeg, - frame_width: i32, - buffer: *mut c_void, - status: *mut i32, - ) -> i32; - pub fn sceMpegGetAtracAu( - handle: SceMpeg, - stream: SceMpegStream, - au: *mut SceMpegAu, - unk: *mut c_void, - ) -> i32; - pub fn sceMpegAtracDecode( - handle: SceMpeg, - au: *mut SceMpegAu, - buffer: *mut c_void, - init: i32, - ) -> i32; - - pub fn sceMpegBaseYCrCbCopyVme(yuv_buffer: *mut c_void, buffer: *mut i32, type_: i32) -> i32; - pub fn sceMpegBaseCscInit(width: i32) -> i32; - pub fn sceMpegBaseCscVme( - rgb_buffer: *mut c_void, - rgb_buffer2: *mut c_void, - width: i32, - y_cr_cb_buffer: *mut SceMpegYCrCbBuffer, - ) -> i32; - pub fn sceMpegbase_BEA18F91(lli: *mut SceMpegLLI) -> i32; - - pub fn sceHprmPeekCurrentKey(key: *mut i32) -> i32; - pub fn sceHprmPeekLatch(latch: *mut [u32; 4]) -> i32; - pub fn sceHprmReadLatch(latch: *mut [u32; 4]) -> i32; - pub fn sceHprmIsHeadphoneExist() -> i32; - pub fn sceHprmIsRemoteExist() -> i32; - pub fn sceHprmIsMicrophoneExist() -> i32; - - pub fn sceGuDepthBuffer(zbp: *mut c_void, zbw: i32); - pub fn sceGuDispBuffer(width: i32, height: i32, dispbp: *mut c_void, dispbw: i32); - pub fn sceGuDrawBuffer(psm: DisplayPixelFormat, fbp: *mut c_void, fbw: i32); - pub fn sceGuDrawBufferList(psm: DisplayPixelFormat, fbp: *mut c_void, fbw: i32); - pub fn sceGuDisplay(state: bool) -> bool; - pub fn sceGuDepthFunc(function: DepthFunc); - pub fn sceGuDepthMask(mask: i32); - pub fn sceGuDepthOffset(offset: i32); - pub fn sceGuDepthRange(near: i32, far: i32); - pub fn sceGuFog(near: f32, far: f32, color: u32); - pub fn sceGuInit(); - pub fn sceGuTerm(); - pub fn sceGuBreak(mode: i32); - pub fn sceGuContinue(); - pub fn sceGuSetCallback(signal: GuCallbackId, callback: GuCallback) -> GuCallback; - pub fn sceGuSignal(behavior: SignalBehavior, signal: i32); - pub fn sceGuSendCommandf(cmd: GeCommand, argument: f32); - pub fn sceGuSendCommandi(cmd: GeCommand, argument: i32); - pub fn sceGuGetMemory(size: i32) -> *mut c_void; - pub fn sceGuStart(context_type: GuContextType, list: *mut c_void); - pub fn sceGuFinish() -> i32; - pub fn sceGuFinishId(id: u32) -> i32; - pub fn sceGuCallList(list: *const c_void); - pub fn sceGuCallMode(mode: i32); - pub fn sceGuCheckList() -> i32; - pub fn sceGuSendList(mode: GuQueueMode, list: *const c_void, context: *mut GeContext); - pub fn sceGuSwapBuffers() -> *mut c_void; - pub fn sceGuSync(mode: GuSyncMode, behavior: GuSyncBehavior) -> GeListState; - pub fn sceGuDrawArray( - prim: GuPrimitive, - vtype: i32, - count: i32, - indices: *const c_void, - vertices: *const c_void, - ); - pub fn sceGuBeginObject( - vtype: i32, - count: i32, - indices: *const c_void, - vertices: *const c_void, - ); - pub fn sceGuEndObject(); - pub fn sceGuSetStatus(state: GuState, status: i32); - pub fn sceGuGetStatus(state: GuState) -> bool; - pub fn sceGuSetAllStatus(status: i32); - pub fn sceGuGetAllStatus() -> i32; - pub fn sceGuEnable(state: GuState); - pub fn sceGuDisable(state: GuState); - pub fn sceGuLight(light: i32, type_: LightType, components: i32, position: &ScePspFVector3); - pub fn sceGuLightAtt(light: i32, atten0: f32, atten1: f32, atten2: f32); - pub fn sceGuLightColor(light: i32, component: i32, color: u32); - pub fn sceGuLightMode(mode: LightMode); - pub fn sceGuLightSpot(light: i32, direction: &ScePspFVector3, exponent: f32, cutoff: f32); - pub fn sceGuClear(flags: i32); - pub fn sceGuClearColor(color: u32); - pub fn sceGuClearDepth(depth: u32); - pub fn sceGuClearStencil(stencil: u32); - pub fn sceGuPixelMask(mask: u32); - pub fn sceGuColor(color: u32); - pub fn sceGuColorFunc(func: ColorFunc, color: u32, mask: u32); - pub fn sceGuColorMaterial(components: i32); - pub fn sceGuAlphaFunc(func: AlphaFunc, value: i32, mask: i32); - pub fn sceGuAmbient(color: u32); - pub fn sceGuAmbientColor(color: u32); - pub fn sceGuBlendFunc(op: BlendOp, src: BlendSrc, dest: BlendDst, src_fix: u32, dest_fix: u32); - pub fn sceGuMaterial(components: i32, color: u32); - pub fn sceGuModelColor(emissive: u32, ambient: u32, diffuse: u32, specular: u32); - pub fn sceGuStencilFunc(func: StencilFunc, ref_: i32, mask: i32); - pub fn sceGuStencilOp(fail: StencilOperation, zfail: StencilOperation, zpass: StencilOperation); - pub fn sceGuSpecular(power: f32); - pub fn sceGuFrontFace(order: FrontFaceDirection); - pub fn sceGuLogicalOp(op: LogicalOperation); - pub fn sceGuSetDither(matrix: &ScePspIMatrix4); - pub fn sceGuShadeModel(mode: ShadingModel); - pub fn sceGuCopyImage( - psm: DisplayPixelFormat, - sx: i32, - sy: i32, - width: i32, - height: i32, - srcw: i32, - src: *mut c_void, - dx: i32, - dy: i32, - destw: i32, - dest: *mut c_void, - ); - pub fn sceGuTexEnvColor(color: u32); - pub fn sceGuTexFilter(min: TextureFilter, mag: TextureFilter); - pub fn sceGuTexFlush(); - pub fn sceGuTexFunc(tfx: TextureEffect, tcc: TextureColorComponent); - pub fn sceGuTexImage( - mipmap: MipmapLevel, - width: i32, - height: i32, - tbw: i32, - tbp: *const c_void, - ); - pub fn sceGuTexLevelMode(mode: TextureLevelMode, bias: f32); - pub fn sceGuTexMapMode(mode: TextureMapMode, a1: u32, a2: u32); - pub fn sceGuTexMode(tpsm: TexturePixelFormat, maxmips: i32, a2: i32, swizzle: i32); - pub fn sceGuTexOffset(u: f32, v: f32); - pub fn sceGuTexProjMapMode(mode: TextureProjectionMapMode); - pub fn sceGuTexScale(u: f32, v: f32); - pub fn sceGuTexSlope(slope: f32); - pub fn sceGuTexSync(); - pub fn sceGuTexWrap(u: GuTexWrapMode, v: GuTexWrapMode); - pub fn sceGuClutLoad(num_blocks: i32, cbp: *const c_void); - pub fn sceGuClutMode(cpsm: ClutPixelFormat, shift: u32, mask: u32, a3: u32); - pub fn sceGuOffset(x: u32, y: u32); - pub fn sceGuScissor(x: i32, y: i32, w: i32, h: i32); - pub fn sceGuViewport(cx: i32, cy: i32, width: i32, height: i32); - pub fn sceGuDrawBezier( - v_type: i32, - u_count: i32, - v_count: i32, - indices: *const c_void, - vertices: *const c_void, - ); - pub fn sceGuPatchDivide(ulevel: u32, vlevel: u32); - pub fn sceGuPatchFrontFace(a0: u32); - pub fn sceGuPatchPrim(prim: PatchPrimitive); - pub fn sceGuDrawSpline( - v_type: i32, - u_count: i32, - v_count: i32, - u_edge: i32, - v_edge: i32, - indices: *const c_void, - vertices: *const c_void, - ); - pub fn sceGuSetMatrix(type_: MatrixMode, matrix: &ScePspFMatrix4); - pub fn sceGuBoneMatrix(index: u32, matrix: &ScePspFMatrix4); - pub fn sceGuMorphWeight(index: i32, weight: f32); - pub fn sceGuDrawArrayN( - primitive_type: GuPrimitive, - v_type: i32, - count: i32, - a3: i32, - indices: *const c_void, - vertices: *const c_void, - ); - - pub fn sceGumDrawArray( - prim: GuPrimitive, - v_type: i32, - count: i32, - indices: *const c_void, - vertices: *const c_void, - ); - pub fn sceGumDrawArrayN( - prim: GuPrimitive, - v_type: i32, - count: i32, - a3: i32, - indices: *const c_void, - vertices: *const c_void, - ); - pub fn sceGumDrawBezier( - v_type: i32, - u_count: i32, - v_count: i32, - indices: *const c_void, - vertices: *const c_void, - ); - pub fn sceGumDrawSpline( - v_type: i32, - u_count: i32, - v_count: i32, - u_edge: i32, - v_edge: i32, - indices: *const c_void, - vertices: *const c_void, - ); - pub fn sceGumFastInverse(); - pub fn sceGumFullInverse(); - pub fn sceGumLoadIdentity(); - pub fn sceGumLoadMatrix(m: &ScePspFMatrix4); - pub fn sceGumLookAt(eye: &ScePspFVector3, center: &ScePspFVector3, up: &ScePspFVector3); - pub fn sceGumMatrixMode(mode: MatrixMode); - pub fn sceGumMultMatrix(m: &ScePspFMatrix4); - pub fn sceGumOrtho(left: f32, right: f32, bottom: f32, top: f32, near: f32, far: f32); - pub fn sceGumPerspective(fovy: f32, aspect: f32, near: f32, far: f32); - pub fn sceGumPopMatrix(); - pub fn sceGumPushMatrix(); - pub fn sceGumRotateX(angle: f32); - pub fn sceGumRotateY(angle: f32); - pub fn sceGumRotateZ(angle: f32); - pub fn sceGumRotateXYZ(v: &ScePspFVector3); - pub fn sceGumRotateZYX(v: &ScePspFVector3); - pub fn sceGumScale(v: &ScePspFVector3); - pub fn sceGumStoreMatrix(m: &mut ScePspFMatrix4); - pub fn sceGumTranslate(v: &ScePspFVector3); - pub fn sceGumUpdateMatrix(); - - pub fn sceMp3ReserveMp3Handle(args: *mut SceMp3InitArg) -> i32; - pub fn sceMp3ReleaseMp3Handle(handle: Mp3Handle) -> i32; - pub fn sceMp3InitResource() -> i32; - pub fn sceMp3TermResource() -> i32; - pub fn sceMp3Init(handle: Mp3Handle) -> i32; - pub fn sceMp3Decode(handle: Mp3Handle, dst: *mut *mut i16) -> i32; - pub fn sceMp3GetInfoToAddStreamData( - handle: Mp3Handle, - dst: *mut *mut u8, - to_write: *mut i32, - src_pos: *mut i32, - ) -> i32; - pub fn sceMp3NotifyAddStreamData(handle: Mp3Handle, size: i32) -> i32; - pub fn sceMp3CheckStreamDataNeeded(handle: Mp3Handle) -> i32; - pub fn sceMp3SetLoopNum(handle: Mp3Handle, loop_: i32) -> i32; - pub fn sceMp3GetLoopNum(handle: Mp3Handle) -> i32; - pub fn sceMp3GetSumDecodedSample(handle: Mp3Handle) -> i32; - pub fn sceMp3GetMaxOutputSample(handle: Mp3Handle) -> i32; - pub fn sceMp3GetSamplingRate(handle: Mp3Handle) -> i32; - pub fn sceMp3GetBitRate(handle: Mp3Handle) -> i32; - pub fn sceMp3GetMp3ChannelNum(handle: Mp3Handle) -> i32; - pub fn sceMp3ResetPlayPosition(handle: Mp3Handle) -> i32; - - pub fn sceRegOpenRegistry(reg: *mut Key, mode: i32, handle: *mut RegHandle) -> i32; - pub fn sceRegFlushRegistry(handle: RegHandle) -> i32; - pub fn sceRegCloseRegistry(handle: RegHandle) -> i32; - pub fn sceRegOpenCategory( - handle: RegHandle, - name: *const u8, - mode: i32, - dir_handle: *mut RegHandle, - ) -> i32; - pub fn sceRegRemoveCategory(handle: RegHandle, name: *const u8) -> i32; - pub fn sceRegCloseCategory(dir_handle: RegHandle) -> i32; - pub fn sceRegFlushCategory(dir_handle: RegHandle) -> i32; - pub fn sceRegGetKeyInfo( - dir_handle: RegHandle, - name: *const u8, - key_handle: *mut RegHandle, - type_: *mut KeyType, - size: *mut usize, - ) -> i32; - pub fn sceRegGetKeyInfoByName( - dir_handle: RegHandle, - name: *const u8, - type_: *mut KeyType, - size: *mut usize, - ) -> i32; - pub fn sceRegGetKeyValue( - dir_handle: RegHandle, - key_handle: RegHandle, - buf: *mut c_void, - size: usize, - ) -> i32; - pub fn sceRegGetKeyValueByName( - dir_handle: RegHandle, - name: *const u8, - buf: *mut c_void, - size: usize, - ) -> i32; - pub fn sceRegSetKeyValue( - dir_handle: RegHandle, - name: *const u8, - buf: *const c_void, - size: usize, - ) -> i32; - pub fn sceRegGetKeysNum(dir_handle: RegHandle, num: *mut i32) -> i32; - pub fn sceRegGetKeys(dir_handle: RegHandle, buf: *mut u8, num: i32) -> i32; - pub fn sceRegCreateKey(dir_handle: RegHandle, name: *const u8, type_: i32, size: usize) -> i32; - pub fn sceRegRemoveRegistry(key: *mut Key) -> i32; - - pub fn sceOpenPSIDGetOpenPSID(openpsid: *mut OpenPSID) -> i32; - - pub fn sceUtilityMsgDialogInitStart(params: *mut UtilityMsgDialogParams) -> i32; - pub fn sceUtilityMsgDialogShutdownStart(); - pub fn sceUtilityMsgDialogGetStatus() -> i32; - pub fn sceUtilityMsgDialogUpdate(n: i32); - pub fn sceUtilityMsgDialogAbort() -> i32; - pub fn sceUtilityNetconfInitStart(data: *mut UtilityNetconfData) -> i32; - pub fn sceUtilityNetconfShutdownStart() -> i32; - pub fn sceUtilityNetconfUpdate(unknown: i32) -> i32; - pub fn sceUtilityNetconfGetStatus() -> i32; - pub fn sceUtilityCheckNetParam(id: i32) -> i32; - pub fn sceUtilityGetNetParam(conf: i32, param: NetParam, data: *mut UtilityNetData) -> i32; - pub fn sceUtilitySavedataInitStart(params: *mut SceUtilitySavedataParam) -> i32; - pub fn sceUtilitySavedataGetStatus() -> i32; - pub fn sceUtilitySavedataShutdownStart() -> i32; - pub fn sceUtilitySavedataUpdate(unknown: i32); - pub fn sceUtilityGameSharingInitStart(params: *mut UtilityGameSharingParams) -> i32; - pub fn sceUtilityGameSharingShutdownStart(); - pub fn sceUtilityGameSharingGetStatus() -> i32; - pub fn sceUtilityGameSharingUpdate(n: i32); - pub fn sceUtilityHtmlViewerInitStart(params: *mut UtilityHtmlViewerParam) -> i32; - pub fn sceUtilityHtmlViewerShutdownStart() -> i32; - pub fn sceUtilityHtmlViewerUpdate(n: i32) -> i32; - pub fn sceUtilityHtmlViewerGetStatus() -> i32; - pub fn sceUtilitySetSystemParamInt(id: SystemParamId, value: i32) -> i32; - pub fn sceUtilitySetSystemParamString(id: SystemParamId, str: *const u8) -> i32; - pub fn sceUtilityGetSystemParamInt(id: SystemParamId, value: *mut i32) -> i32; - pub fn sceUtilityGetSystemParamString(id: SystemParamId, str: *mut u8, len: i32) -> i32; - pub fn sceUtilityOskInitStart(params: *mut SceUtilityOskParams) -> i32; - pub fn sceUtilityOskShutdownStart() -> i32; - pub fn sceUtilityOskUpdate(n: i32) -> i32; - pub fn sceUtilityOskGetStatus() -> i32; - pub fn sceUtilityLoadNetModule(module: NetModule) -> i32; - pub fn sceUtilityUnloadNetModule(module: NetModule) -> i32; - pub fn sceUtilityLoadAvModule(module: AvModule) -> i32; - pub fn sceUtilityUnloadAvModule(module: AvModule) -> i32; - pub fn sceUtilityLoadUsbModule(module: UsbModule) -> i32; - pub fn sceUtilityUnloadUsbModule(module: UsbModule) -> i32; - pub fn sceUtilityLoadModule(module: Module) -> i32; - pub fn sceUtilityUnloadModule(module: Module) -> i32; - pub fn sceUtilityCreateNetParam(conf: i32) -> i32; - pub fn sceUtilitySetNetParam(param: NetParam, val: *const c_void) -> i32; - pub fn sceUtilityCopyNetParam(src: i32, dest: i32) -> i32; - pub fn sceUtilityDeleteNetParam(conf: i32) -> i32; - - pub fn sceNetInit( - poolsize: i32, - calloutprio: i32, - calloutstack: i32, - netintrprio: i32, - netintrstack: i32, - ) -> i32; - pub fn sceNetTerm() -> i32; - pub fn sceNetFreeThreadinfo(thid: i32) -> i32; - pub fn sceNetThreadAbort(thid: i32) -> i32; - pub fn sceNetEtherStrton(name: *mut u8, mac: *mut u8); - pub fn sceNetEtherNtostr(mac: *mut u8, name: *mut u8); - pub fn sceNetGetLocalEtherAddr(mac: *mut u8) -> i32; - pub fn sceNetGetMallocStat(stat: *mut SceNetMallocStat) -> i32; - - pub fn sceNetAdhocctlInit( - stacksize: i32, - priority: i32, - adhoc_id: *mut SceNetAdhocctlAdhocId, - ) -> i32; - pub fn sceNetAdhocctlTerm() -> i32; - pub fn sceNetAdhocctlConnect(name: *const u8) -> i32; - pub fn sceNetAdhocctlDisconnect() -> i32; - pub fn sceNetAdhocctlGetState(event: *mut i32) -> i32; - pub fn sceNetAdhocctlCreate(name: *const u8) -> i32; - pub fn sceNetAdhocctlJoin(scaninfo: *mut SceNetAdhocctlScanInfo) -> i32; - pub fn sceNetAdhocctlGetAdhocId(id: *mut SceNetAdhocctlAdhocId) -> i32; - pub fn sceNetAdhocctlCreateEnterGameMode( - name: *const u8, - unknown: i32, - num: i32, - macs: *mut u8, - timeout: u32, - unknown2: i32, - ) -> i32; - pub fn sceNetAdhocctlJoinEnterGameMode( - name: *const u8, - hostmac: *mut u8, - timeout: u32, - unknown: i32, - ) -> i32; - pub fn sceNetAdhocctlGetGameModeInfo(gamemodeinfo: *mut SceNetAdhocctlGameModeInfo) -> i32; - pub fn sceNetAdhocctlExitGameMode() -> i32; - pub fn sceNetAdhocctlGetPeerList(length: *mut i32, buf: *mut c_void) -> i32; - pub fn sceNetAdhocctlGetPeerInfo( - mac: *mut u8, - size: i32, - peerinfo: *mut SceNetAdhocctlPeerInfo, - ) -> i32; - pub fn sceNetAdhocctlScan() -> i32; - pub fn sceNetAdhocctlGetScanInfo(length: *mut i32, buf: *mut c_void) -> i32; - pub fn sceNetAdhocctlAddHandler(handler: SceNetAdhocctlHandler, unknown: *mut c_void) -> i32; - pub fn sceNetAdhocctlDelHandler(id: i32) -> i32; - pub fn sceNetAdhocctlGetNameByAddr(mac: *mut u8, nickname: *mut u8) -> i32; - pub fn sceNetAdhocctlGetAddrByName( - nickname: *mut u8, - length: *mut i32, - buf: *mut c_void, - ) -> i32; - pub fn sceNetAdhocctlGetParameter(params: *mut SceNetAdhocctlParams) -> i32; - - pub fn sceNetAdhocInit() -> i32; - pub fn sceNetAdhocTerm() -> i32; - pub fn sceNetAdhocPdpCreate(mac: *mut u8, port: u16, buf_size: u32, unk1: i32) -> i32; - pub fn sceNetAdhocPdpDelete(id: i32, unk1: i32) -> i32; - pub fn sceNetAdhocPdpSend( - id: i32, - dest_mac_addr: *mut u8, - port: u16, - data: *mut c_void, - len: u32, - timeout: u32, - nonblock: i32, - ) -> i32; - pub fn sceNetAdhocPdpRecv( - id: i32, - src_mac_addr: *mut u8, - port: *mut u16, - data: *mut c_void, - data_length: *mut c_void, - timeout: u32, - nonblock: i32, - ) -> i32; - pub fn sceNetAdhocGetPdpStat(size: *mut i32, stat: *mut SceNetAdhocPdpStat) -> i32; - pub fn sceNetAdhocGameModeCreateMaster(data: *mut c_void, size: i32) -> i32; - pub fn sceNetAdhocGameModeCreateReplica(mac: *mut u8, data: *mut c_void, size: i32) -> i32; - pub fn sceNetAdhocGameModeUpdateMaster() -> i32; - pub fn sceNetAdhocGameModeUpdateReplica(id: i32, unk1: i32) -> i32; - pub fn sceNetAdhocGameModeDeleteMaster() -> i32; - pub fn sceNetAdhocGameModeDeleteReplica(id: i32) -> i32; - pub fn sceNetAdhocPtpOpen( - srcmac: *mut u8, - srcport: u16, - destmac: *mut u8, - destport: u16, - buf_size: u32, - delay: u32, - count: i32, - unk1: i32, - ) -> i32; - pub fn sceNetAdhocPtpConnect(id: i32, timeout: u32, nonblock: i32) -> i32; - pub fn sceNetAdhocPtpListen( - srcmac: *mut u8, - srcport: u16, - buf_size: u32, - delay: u32, - count: i32, - queue: i32, - unk1: i32, - ) -> i32; - pub fn sceNetAdhocPtpAccept( - id: i32, - mac: *mut u8, - port: *mut u16, - timeout: u32, - nonblock: i32, - ) -> i32; - pub fn sceNetAdhocPtpSend( - id: i32, - data: *mut c_void, - data_size: *mut i32, - timeout: u32, - nonblock: i32, - ) -> i32; - pub fn sceNetAdhocPtpRecv( - id: i32, - data: *mut c_void, - data_size: *mut i32, - timeout: u32, - nonblock: i32, - ) -> i32; - pub fn sceNetAdhocPtpFlush(id: i32, timeout: u32, nonblock: i32) -> i32; - pub fn sceNetAdhocPtpClose(id: i32, unk1: i32) -> i32; - pub fn sceNetAdhocGetPtpStat(size: *mut i32, stat: *mut SceNetAdhocPtpStat) -> i32; -} - -extern "C" { - pub fn sceNetAdhocMatchingInit(memsize: i32) -> i32; - pub fn sceNetAdhocMatchingTerm() -> i32; - pub fn sceNetAdhocMatchingCreate( - mode: AdhocMatchingMode, - max_peers: i32, - port: u16, - buf_size: i32, - hello_delay: u32, - ping_delay: u32, - init_count: i32, - msg_delay: u32, - callback: AdhocMatchingCallback, - ) -> i32; - pub fn sceNetAdhocMatchingDelete(matching_id: i32) -> i32; - pub fn sceNetAdhocMatchingStart( - matching_id: i32, - evth_pri: i32, - evth_stack: i32, - inth_pri: i32, - inth_stack: i32, - opt_len: i32, - opt_data: *mut c_void, - ) -> i32; - pub fn sceNetAdhocMatchingStop(matching_id: i32) -> i32; - pub fn sceNetAdhocMatchingSelectTarget( - matching_id: i32, - mac: *mut u8, - opt_len: i32, - opt_data: *mut c_void, - ) -> i32; - pub fn sceNetAdhocMatchingCancelTarget(matching_id: i32, mac: *mut u8) -> i32; - pub fn sceNetAdhocMatchingCancelTargetWithOpt( - matching_id: i32, - mac: *mut u8, - opt_len: i32, - opt_data: *mut c_void, - ) -> i32; - pub fn sceNetAdhocMatchingSendData( - matching_id: i32, - mac: *mut u8, - data_len: i32, - data: *mut c_void, - ) -> i32; - pub fn sceNetAdhocMatchingAbortSendData(matching_id: i32, mac: *mut u8) -> i32; - pub fn sceNetAdhocMatchingSetHelloOpt( - matching_id: i32, - opt_len: i32, - opt_data: *mut c_void, - ) -> i32; - pub fn sceNetAdhocMatchingGetHelloOpt( - matching_id: i32, - opt_len: *mut i32, - opt_data: *mut c_void, - ) -> i32; - pub fn sceNetAdhocMatchingGetMembers( - matching_id: i32, - length: *mut i32, - buf: *mut c_void, - ) -> i32; - pub fn sceNetAdhocMatchingGetPoolMaxAlloc() -> i32; - pub fn sceNetAdhocMatchingGetPoolStat(poolstat: *mut AdhocPoolStat) -> i32; -} - -extern "C" { - pub fn sceNetApctlInit(stack_size: i32, init_priority: i32) -> i32; - pub fn sceNetApctlTerm() -> i32; - pub fn sceNetApctlGetInfo(code: ApctlInfo, pinfo: *mut SceNetApctlInfo) -> i32; - pub fn sceNetApctlAddHandler(handler: SceNetApctlHandler, parg: *mut c_void) -> i32; - pub fn sceNetApctlDelHandler(handler_id: i32) -> i32; - pub fn sceNetApctlConnect(conn_index: i32) -> i32; - pub fn sceNetApctlDisconnect() -> i32; - pub fn sceNetApctlGetState(pstate: *mut ApctlState) -> i32; - - pub fn sceNetInetInit() -> i32; - pub fn sceNetInetTerm() -> i32; - pub fn sceNetInetAccept(s: i32, addr: *mut sockaddr, addr_len: *mut socklen_t) -> i32; - pub fn sceNetInetBind(s: i32, my_addr: *const sockaddr, addr_len: socklen_t) -> i32; - pub fn sceNetInetConnect(s: i32, serv_addr: *const sockaddr, addr_len: socklen_t) -> i32; - pub fn sceNetInetGetsockopt( - s: i32, - level: i32, - opt_name: i32, - opt_val: *mut c_void, - optl_en: *mut socklen_t, - ) -> i32; - pub fn sceNetInetListen(s: i32, backlog: i32) -> i32; - pub fn sceNetInetRecv(s: i32, buf: *mut c_void, len: usize, flags: i32) -> usize; - pub fn sceNetInetRecvfrom( - s: i32, - buf: *mut c_void, - flags: usize, - arg1: i32, - from: *mut sockaddr, - from_len: *mut socklen_t, - ) -> usize; - pub fn sceNetInetSend(s: i32, buf: *const c_void, len: usize, flags: i32) -> usize; - pub fn sceNetInetSendto( - s: i32, - buf: *const c_void, - len: usize, - flags: i32, - to: *const sockaddr, - to_len: socklen_t, - ) -> usize; - pub fn sceNetInetSetsockopt( - s: i32, - level: i32, - opt_name: i32, - opt_val: *const c_void, - opt_len: socklen_t, - ) -> i32; - pub fn sceNetInetShutdown(s: i32, how: i32) -> i32; - pub fn sceNetInetSocket(domain: i32, type_: i32, protocol: i32) -> i32; - pub fn sceNetInetClose(s: i32) -> i32; - pub fn sceNetInetGetErrno() -> i32; - - pub fn sceSslInit(unknown1: i32) -> i32; - pub fn sceSslEnd() -> i32; - pub fn sceSslGetUsedMemoryMax(memory: *mut u32) -> i32; - pub fn sceSslGetUsedMemoryCurrent(memory: *mut u32) -> i32; - - pub fn sceHttpInit(unknown1: u32) -> i32; - pub fn sceHttpEnd() -> i32; - pub fn sceHttpCreateTemplate(agent: *mut u8, unknown1: i32, unknown2: i32) -> i32; - pub fn sceHttpDeleteTemplate(templateid: i32) -> i32; - pub fn sceHttpCreateConnection( - templateid: i32, - host: *mut u8, - unknown1: *mut u8, - port: u16, - unknown2: i32, - ) -> i32; - pub fn sceHttpCreateConnectionWithURL(templateid: i32, url: *const u8, unknown1: i32) -> i32; - pub fn sceHttpDeleteConnection(connection_id: i32) -> i32; - pub fn sceHttpCreateRequest( - connection_id: i32, - method: HttpMethod, - path: *mut u8, - content_length: u64, - ) -> i32; - pub fn sceHttpCreateRequestWithURL( - connection_id: i32, - method: HttpMethod, - url: *mut u8, - content_length: u64, - ) -> i32; - pub fn sceHttpDeleteRequest(request_id: i32) -> i32; - pub fn sceHttpSendRequest(request_id: i32, data: *mut c_void, data_size: u32) -> i32; - pub fn sceHttpAbortRequest(request_id: i32) -> i32; - pub fn sceHttpReadData(request_id: i32, data: *mut c_void, data_size: u32) -> i32; - pub fn sceHttpGetContentLength(request_id: i32, content_length: *mut u64) -> i32; - pub fn sceHttpGetStatusCode(request_id: i32, status_code: *mut i32) -> i32; - pub fn sceHttpSetResolveTimeOut(id: i32, timeout: u32) -> i32; - pub fn sceHttpSetResolveRetry(id: i32, count: i32) -> i32; - pub fn sceHttpSetConnectTimeOut(id: i32, timeout: u32) -> i32; - pub fn sceHttpSetSendTimeOut(id: i32, timeout: u32) -> i32; - pub fn sceHttpSetRecvTimeOut(id: i32, timeout: u32) -> i32; - pub fn sceHttpEnableKeepAlive(id: i32) -> i32; - pub fn sceHttpDisableKeepAlive(id: i32) -> i32; - pub fn sceHttpEnableRedirect(id: i32) -> i32; - pub fn sceHttpDisableRedirect(id: i32) -> i32; - pub fn sceHttpEnableCookie(id: i32) -> i32; - pub fn sceHttpDisableCookie(id: i32) -> i32; - pub fn sceHttpSaveSystemCookie() -> i32; - pub fn sceHttpLoadSystemCookie() -> i32; - pub fn sceHttpAddExtraHeader(id: i32, name: *mut u8, value: *mut u8, unknown1: i32) -> i32; - pub fn sceHttpDeleteHeader(id: i32, name: *const u8) -> i32; - pub fn sceHttpsInit(unknown1: i32, unknown2: i32, unknown3: i32, unknown4: i32) -> i32; - pub fn sceHttpsEnd() -> i32; - pub fn sceHttpsLoadDefaultCert(unknown1: i32, unknown2: i32) -> i32; - pub fn sceHttpDisableAuth(id: i32) -> i32; - pub fn sceHttpDisableCache(id: i32) -> i32; - pub fn sceHttpEnableAuth(id: i32) -> i32; - pub fn sceHttpEnableCache(id: i32) -> i32; - pub fn sceHttpEndCache() -> i32; - pub fn sceHttpGetAllHeader(request: i32, header: *mut *mut u8, header_size: *mut u32) -> i32; - pub fn sceHttpGetNetworkErrno(request: i32, err_num: *mut i32) -> i32; - pub fn sceHttpGetProxy( - id: i32, - activate_flag: *mut i32, - mode: *mut i32, - proxy_host: *mut u8, - len: usize, - proxy_port: *mut u16, - ) -> i32; - pub fn sceHttpInitCache(max_size: usize) -> i32; - pub fn sceHttpSetAuthInfoCB(id: i32, cbfunc: HttpPasswordCB) -> i32; - pub fn sceHttpSetProxy( - id: i32, - activate_flag: i32, - mode: i32, - new_proxy_host: *const u8, - new_proxy_port: u16, - ) -> i32; - pub fn sceHttpSetResHeaderMaxSize(id: i32, header_size: u32) -> i32; - pub fn sceHttpSetMallocFunction( - malloc_func: HttpMallocFunction, - free_func: HttpFreeFunction, - realloc_func: HttpReallocFunction, - ) -> i32; - - pub fn sceNetResolverInit() -> i32; - pub fn sceNetResolverCreate(rid: *mut i32, buf: *mut c_void, buf_length: u32) -> i32; - pub fn sceNetResolverDelete(rid: i32) -> i32; - pub fn sceNetResolverStartNtoA( - rid: i32, - hostname: *const u8, - addr: *mut in_addr, - timeout: u32, - retry: i32, - ) -> i32; - pub fn sceNetResolverStartAtoN( - rid: i32, - addr: *const in_addr, - hostname: *mut u8, - hostname_len: u32, - timeout: u32, - retry: i32, - ) -> i32; - pub fn sceNetResolverStop(rid: i32) -> i32; - pub fn sceNetResolverTerm() -> i32; -} diff --git a/vendor/libc/src/sgx.rs b/vendor/libc/src/sgx.rs deleted file mode 100644 index 9cf9c6d3b41b8d..00000000000000 --- a/vendor/libc/src/sgx.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! SGX C types definition - -use crate::prelude::*; - -pub type intmax_t = i64; -pub type uintmax_t = u64; - -pub type size_t = usize; -pub type ptrdiff_t = isize; -pub type intptr_t = isize; -pub type uintptr_t = usize; -pub type ssize_t = isize; - -pub const INT_MIN: c_int = -2147483648; -pub const INT_MAX: c_int = 2147483647; diff --git a/vendor/libc/src/solid/aarch64.rs b/vendor/libc/src/solid/aarch64.rs deleted file mode 100644 index 376783c8234baf..00000000000000 --- a/vendor/libc/src/solid/aarch64.rs +++ /dev/null @@ -1 +0,0 @@ -pub type wchar_t = u32; diff --git a/vendor/libc/src/solid/arm.rs b/vendor/libc/src/solid/arm.rs deleted file mode 100644 index 376783c8234baf..00000000000000 --- a/vendor/libc/src/solid/arm.rs +++ /dev/null @@ -1 +0,0 @@ -pub type wchar_t = u32; diff --git a/vendor/libc/src/solid/mod.rs b/vendor/libc/src/solid/mod.rs deleted file mode 100644 index 40d6a9d3485868..00000000000000 --- a/vendor/libc/src/solid/mod.rs +++ /dev/null @@ -1,876 +0,0 @@ -//! Interface to the [SOLID] C library -//! -//! [SOLID]: https://solid.kmckk.com/ - -use crate::prelude::*; - -pub type intmax_t = i64; -pub type uintmax_t = u64; - -pub type uintptr_t = usize; -pub type intptr_t = isize; -pub type ptrdiff_t = isize; -pub type size_t = crate::uintptr_t; -pub type ssize_t = intptr_t; - -pub type clock_t = c_uint; -pub type time_t = i64; -pub type clockid_t = c_int; -pub type timer_t = c_int; -pub type suseconds_t = c_int; -pub type useconds_t = c_uint; - -pub type sighandler_t = size_t; - -// sys/ansi.h -pub type __caddr_t = *mut c_char; -pub type __gid_t = u32; -pub type __in_addr_t = u32; -pub type __in_port_t = u16; -pub type __mode_t = u32; -pub type __off_t = i64; -pub type __pid_t = i32; -pub type __sa_family_t = u8; -pub type __socklen_t = c_uint; -pub type __uid_t = u32; -pub type __fsblkcnt_t = u64; -pub type __fsfilcnt_t = u64; - -// locale.h -pub type locale_t = usize; - -// nl_types.h -pub type nl_item = c_long; - -// sys/types.h -pub type __va_list = *mut c_char; -pub type u_int8_t = u8; -pub type u_int16_t = u16; -pub type u_int32_t = u32; -pub type u_int64_t = u64; -pub type u_char = c_uchar; -pub type u_short = c_ushort; -pub type u_int = c_uint; -pub type u_long = c_ulong; -pub type unchar = c_uchar; -pub type ushort = c_ushort; -pub type uint = c_uint; -pub type ulong = c_ulong; -pub type u_quad_t = u64; -pub type quad_t = i64; -pub type qaddr_t = *mut quad_t; -pub type longlong_t = i64; -pub type u_longlong_t = u64; -pub type blkcnt_t = i64; -pub type blksize_t = i32; -pub type fsblkcnt_t = __fsblkcnt_t; -pub type fsfilcnt_t = __fsfilcnt_t; -pub type caddr_t = __caddr_t; -pub type daddr_t = i64; -pub type dev_t = u64; -pub type fixpt_t = u32; -pub type gid_t = __gid_t; -pub type idtype_t = c_int; -pub type id_t = u32; -pub type ino_t = u64; -pub type key_t = c_long; -pub type mode_t = __mode_t; -pub type nlink_t = u32; -pub type off_t = __off_t; -pub type pid_t = __pid_t; -pub type lwpid_t = i32; -pub type rlim_t = u64; -pub type segsz_t = i32; -pub type swblk_t = i32; -pub type mqd_t = c_int; -pub type cpuid_t = c_ulong; -pub type psetid_t = c_int; - -s! { - // stat.h - pub struct stat { - pub st_dev: dev_t, - pub st_ino: ino_t, - pub st_mode: c_short, - pub st_nlink: c_short, - pub st_uid: c_short, - pub st_gid: c_short, - pub st_rdev: dev_t, - pub st_size: off_t, - pub st_atime: time_t, - pub st_mtime: time_t, - pub st_ctime: time_t, - pub st_blksize: blksize_t, - } - - // time.h - pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - pub tm_gmtoff: c_long, - pub tm_zone: *mut c_char, - } - - // stdlib.h - pub struct qdiv_t { - pub quot: quad_t, - pub rem: quad_t, - } - pub struct lldiv_t { - pub quot: c_longlong, - pub rem: c_longlong, - } - pub struct div_t { - pub quot: c_int, - pub rem: c_int, - } - pub struct ldiv_t { - pub quot: c_long, - pub rem: c_long, - } - - // locale.h - pub struct lconv { - pub decimal_point: *mut c_char, - pub thousands_sep: *mut c_char, - pub grouping: *mut c_char, - pub int_curr_symbol: *mut c_char, - pub currency_symbol: *mut c_char, - pub mon_decimal_point: *mut c_char, - pub mon_thousands_sep: *mut c_char, - pub mon_grouping: *mut c_char, - pub positive_sign: *mut c_char, - pub negative_sign: *mut c_char, - pub int_frac_digits: c_char, - pub frac_digits: c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub p_sign_posn: c_char, - pub n_sign_posn: c_char, - pub int_p_cs_precedes: c_char, - pub int_n_cs_precedes: c_char, - pub int_p_sep_by_space: c_char, - pub int_n_sep_by_space: c_char, - pub int_p_sign_posn: c_char, - pub int_n_sign_posn: c_char, - } - - pub struct iovec { - pub iov_base: *mut c_void, - pub iov_len: size_t, - } - - pub struct timeval { - pub tv_sec: c_long, - pub tv_usec: c_long, - } -} - -pub const INT_MIN: c_int = -2147483648; -pub const INT_MAX: c_int = 2147483647; - -pub const EXIT_FAILURE: c_int = 1; -pub const EXIT_SUCCESS: c_int = 0; -pub const RAND_MAX: c_int = 0x7fffffff; -pub const EOF: c_int = -1; -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; -pub const _IOFBF: c_int = 0; -pub const _IONBF: c_int = 2; -pub const _IOLBF: c_int = 1; -pub const BUFSIZ: c_uint = 1024; -pub const FOPEN_MAX: c_uint = 20; -pub const FILENAME_MAX: c_uint = 1024; - -pub const O_RDONLY: c_int = 1; -pub const O_WRONLY: c_int = 2; -pub const O_RDWR: c_int = 4; -pub const O_APPEND: c_int = 8; -pub const O_CREAT: c_int = 0x10; -pub const O_EXCL: c_int = 0x400; -pub const O_TEXT: c_int = 0x100; -pub const O_BINARY: c_int = 0x200; -pub const O_TRUNC: c_int = 0x20; -pub const S_IEXEC: c_short = 0o0100; -pub const S_IWRITE: c_short = 0o0200; -pub const S_IREAD: c_short = 0o0400; -pub const S_IFCHR: c_short = 0o2_0000; -pub const S_IFDIR: c_short = 0o4_0000; -pub const S_IFMT: c_short = 0o16_0000; -pub const S_IFIFO: c_short = 0o1_0000; -pub const S_IFBLK: c_short = 0o6_0000; -pub const S_IFREG: c_short = 0o10_0000; - -pub const LC_ALL: c_int = 0; -pub const LC_COLLATE: c_int = 1; -pub const LC_CTYPE: c_int = 2; -pub const LC_MONETARY: c_int = 3; -pub const LC_NUMERIC: c_int = 4; -pub const LC_TIME: c_int = 5; -pub const LC_MESSAGES: c_int = 6; -pub const _LC_LAST: c_int = 7; - -pub const EPERM: c_int = 1; -pub const ENOENT: c_int = 2; -pub const ESRCH: c_int = 3; -pub const EINTR: c_int = 4; -pub const EIO: c_int = 5; -pub const ENXIO: c_int = 6; -pub const E2BIG: c_int = 7; -pub const ENOEXEC: c_int = 8; -pub const EBADF: c_int = 9; -pub const ECHILD: c_int = 10; -pub const EAGAIN: c_int = 11; -pub const ENOMEM: c_int = 12; -pub const EACCES: c_int = 13; -pub const EFAULT: c_int = 14; -pub const ENOTBLK: c_int = 15; -pub const EBUSY: c_int = 16; -pub const EEXIST: c_int = 17; -pub const EXDEV: c_int = 18; -pub const ENODEV: c_int = 19; -pub const ENOTDIR: c_int = 20; -pub const EISDIR: c_int = 21; -pub const EINVAL: c_int = 22; -pub const ENFILE: c_int = 23; -pub const EMFILE: c_int = 24; -pub const ENOTTY: c_int = 25; -pub const ETXTBSY: c_int = 26; -pub const EFBIG: c_int = 27; -pub const ENOSPC: c_int = 28; -pub const ESPIPE: c_int = 29; -pub const EROFS: c_int = 30; -pub const EMLINK: c_int = 31; -pub const EPIPE: c_int = 32; -pub const EDOM: c_int = 33; -pub const ERANGE: c_int = 34; - -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const EWOULDBLOCK: c_int = EAGAIN; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; - -pub const EDEADLOCK: c_int = EDEADLK; - -pub const EBFONT: c_int = 59; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENONET: c_int = 64; -pub const ENOPKG: c_int = 65; -pub const EREMOTE: c_int = 66; -pub const ENOLINK: c_int = 67; -pub const EADV: c_int = 68; -pub const ESRMNT: c_int = 69; -pub const ECOMM: c_int = 70; -pub const EPROTO: c_int = 71; -pub const EMULTIHOP: c_int = 72; -pub const EDOTDOT: c_int = 73; -pub const EBADMSG: c_int = 74; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDQUOT: c_int = 122; - -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; - -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; - -pub const ENOTSUP: c_int = 132; -pub const EFTYPE: c_int = 133; - -// signal codes -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGTRAP: c_int = 5; -pub const SIGABRT: c_int = 6; -pub const SIGIOT: c_int = SIGABRT; -pub const SIGEMT: c_int = 7; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGBUS: c_int = 10; -pub const SIGSEGV: c_int = 11; -pub const SIGSYS: c_int = 12; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; -pub const SIGURG: c_int = 16; -pub const SIGSTOP: c_int = 17; -pub const SIGTSTP: c_int = 18; -pub const SIGCONT: c_int = 19; -pub const SIGCHLD: c_int = 20; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGIO: c_int = 23; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGINFO: c_int = 29; -pub const SIGUSR1: c_int = 30; -pub const SIGUSR2: c_int = 31; -pub const SIGPWR: c_int = 32; - -#[derive(Debug)] -pub enum FILE {} -impl Copy for FILE {} -impl Clone for FILE { - fn clone(&self) -> FILE { - *self - } -} -#[derive(Debug)] -pub enum fpos_t {} -impl Copy for fpos_t {} -impl Clone for fpos_t { - fn clone(&self) -> fpos_t { - *self - } -} - -extern "C" { - // ctype.h - pub fn isalnum(c: c_int) -> c_int; - pub fn isalpha(c: c_int) -> c_int; - pub fn iscntrl(c: c_int) -> c_int; - pub fn isdigit(c: c_int) -> c_int; - pub fn isgraph(c: c_int) -> c_int; - pub fn islower(c: c_int) -> c_int; - pub fn isprint(c: c_int) -> c_int; - pub fn ispunct(c: c_int) -> c_int; - pub fn isspace(c: c_int) -> c_int; - pub fn isupper(c: c_int) -> c_int; - pub fn isxdigit(c: c_int) -> c_int; - pub fn isblank(c: c_int) -> c_int; - pub fn tolower(c: c_int) -> c_int; - pub fn toupper(c: c_int) -> c_int; - - // stdio.h - pub fn __get_stdio_file(fileno: c_int) -> *mut FILE; - pub fn clearerr(arg1: *mut FILE); - pub fn fclose(arg1: *mut FILE) -> c_int; - pub fn feof(arg1: *mut FILE) -> c_int; - pub fn ferror(arg1: *mut FILE) -> c_int; - pub fn fflush(arg1: *mut FILE) -> c_int; - pub fn fgetc(arg1: *mut FILE) -> c_int; - pub fn fgets(arg1: *mut c_char, arg2: c_int, arg3: *mut FILE) -> *mut c_char; - pub fn fopen(arg1: *const c_char, arg2: *const c_char) -> *mut FILE; - pub fn fprintf(arg1: *mut FILE, arg2: *const c_char, ...) -> c_int; - pub fn fputc(arg1: c_int, arg2: *mut FILE) -> c_int; - pub fn fputs(arg1: *const c_char, arg2: *mut FILE) -> c_int; - pub fn fread(arg1: *mut c_void, arg2: size_t, arg3: size_t, arg4: *mut FILE) -> size_t; - pub fn freopen(arg1: *const c_char, arg2: *const c_char, arg3: *mut FILE) -> *mut FILE; - pub fn fscanf(arg1: *mut FILE, arg2: *const c_char, ...) -> c_int; - pub fn fseek(arg1: *mut FILE, arg2: c_long, arg3: c_int) -> c_int; - pub fn ftell(arg1: *mut FILE) -> c_long; - pub fn fwrite(arg1: *const c_void, arg2: size_t, arg3: size_t, arg4: *mut FILE) -> size_t; - pub fn getc(arg1: *mut FILE) -> c_int; - pub fn getchar() -> c_int; - pub fn perror(arg1: *const c_char); - pub fn printf(arg1: *const c_char, ...) -> c_int; - pub fn putc(arg1: c_int, arg2: *mut FILE) -> c_int; - pub fn putchar(arg1: c_int) -> c_int; - pub fn puts(arg1: *const c_char) -> c_int; - pub fn remove(arg1: *const c_char) -> c_int; - pub fn rewind(arg1: *mut FILE); - pub fn scanf(arg1: *const c_char, ...) -> c_int; - pub fn setbuf(arg1: *mut FILE, arg2: *mut c_char); - pub fn setvbuf(arg1: *mut FILE, arg2: *mut c_char, arg3: c_int, arg4: size_t) -> c_int; - pub fn sscanf(arg1: *const c_char, arg2: *const c_char, ...) -> c_int; - pub fn tmpfile() -> *mut FILE; - pub fn ungetc(arg1: c_int, arg2: *mut FILE) -> c_int; - pub fn vfprintf(arg1: *mut FILE, arg2: *const c_char, arg3: __va_list) -> c_int; - pub fn vprintf(arg1: *const c_char, arg2: __va_list) -> c_int; - pub fn gets(arg1: *mut c_char) -> *mut c_char; - pub fn sprintf(arg1: *mut c_char, arg2: *const c_char, ...) -> c_int; - pub fn tmpnam(arg1: *const c_char) -> *mut c_char; - pub fn vsprintf(arg1: *mut c_char, arg2: *const c_char, arg3: __va_list) -> c_int; - pub fn rename(arg1: *const c_char, arg2: *const c_char) -> c_int; - pub fn asiprintf(arg1: *mut *mut c_char, arg2: *const c_char, ...) -> c_int; - pub fn fiprintf(arg1: *mut FILE, arg2: *const c_char, ...) -> c_int; - pub fn fiscanf(arg1: *mut FILE, arg2: *const c_char, ...) -> c_int; - pub fn iprintf(arg1: *const c_char, ...) -> c_int; - pub fn iscanf(arg1: *const c_char, ...) -> c_int; - pub fn siprintf(arg1: *mut c_char, arg2: *const c_char, ...) -> c_int; - pub fn siscanf(arg1: *mut c_char, arg2: *const c_char, ...) -> c_int; - pub fn sniprintf(arg1: *mut c_char, arg2: size_t, arg3: *const c_char, ...) -> c_int; - pub fn vasiprintf(arg1: *mut *mut c_char, arg2: *const c_char, arg3: __va_list) -> c_int; - pub fn vfiprintf(arg1: *mut FILE, arg2: *const c_char, arg3: __va_list) -> c_int; - pub fn vfiscanf(arg1: *mut FILE, arg2: *const c_char, arg3: __va_list) -> c_int; - pub fn viprintf(arg1: *const c_char, arg2: __va_list) -> c_int; - pub fn viscanf(arg1: *const c_char, arg2: __va_list) -> c_int; - pub fn vsiprintf(arg1: *mut c_char, arg2: *const c_char, arg3: __va_list) -> c_int; - pub fn vsiscanf(arg1: *const c_char, arg2: *const c_char, arg3: __va_list) -> c_int; - pub fn vsniprintf( - arg1: *mut c_char, - arg2: size_t, - arg3: *const c_char, - arg4: __va_list, - ) -> c_int; - pub fn vdiprintf(arg1: c_int, arg2: *const c_char, arg3: __va_list) -> c_int; - pub fn diprintf(arg1: c_int, arg2: *const c_char, ...) -> c_int; - pub fn fgetpos(arg1: *mut FILE, arg2: *mut fpos_t) -> c_int; - pub fn fsetpos(arg1: *mut FILE, arg2: *const fpos_t) -> c_int; - pub fn fdopen(arg1: c_int, arg2: *const c_char) -> *mut FILE; - pub fn fileno(arg1: *mut FILE) -> c_int; - pub fn flockfile(arg1: *mut FILE); - pub fn ftrylockfile(arg1: *mut FILE) -> c_int; - pub fn funlockfile(arg1: *mut FILE); - pub fn getc_unlocked(arg1: *mut FILE) -> c_int; - pub fn getchar_unlocked() -> c_int; - pub fn putc_unlocked(arg1: c_int, arg2: *mut FILE) -> c_int; - pub fn putchar_unlocked(arg1: c_int) -> c_int; - pub fn snprintf(arg1: *mut c_char, arg2: size_t, arg3: *const c_char, ...) -> c_int; - pub fn vsnprintf( - arg1: *mut c_char, - arg2: size_t, - arg3: *const c_char, - arg4: __va_list, - ) -> c_int; - pub fn getw(arg1: *mut FILE) -> c_int; - pub fn putw(arg1: c_int, arg2: *mut FILE) -> c_int; - pub fn tempnam(arg1: *const c_char, arg2: *const c_char) -> *mut c_char; - pub fn fseeko(stream: *mut FILE, offset: off_t, whence: c_int) -> c_int; - pub fn ftello(stream: *mut FILE) -> off_t; - - // stdlib.h - pub fn atof(arg1: *const c_char) -> f64; - pub fn strtod(arg1: *const c_char, arg2: *mut *mut c_char) -> f64; - pub fn drand48() -> f64; - pub fn erand48(arg1: *mut c_ushort) -> f64; - pub fn strtof(arg1: *const c_char, arg2: *mut *mut c_char) -> f32; - pub fn strtold(arg1: *const c_char, arg2: *mut *mut c_char) -> f64; - pub fn strtod_l(arg1: *const c_char, arg2: *mut *mut c_char, arg3: locale_t) -> f64; - pub fn strtof_l(arg1: *const c_char, arg2: *mut *mut c_char, arg3: locale_t) -> f32; - pub fn strtold_l(arg1: *const c_char, arg2: *mut *mut c_char, arg3: locale_t) -> f64; - pub fn _Exit(arg1: c_int) -> !; - pub fn abort() -> !; - pub fn abs(arg1: c_int) -> c_int; - pub fn atexit(arg1: Option) -> c_int; - pub fn atoi(arg1: *const c_char) -> c_int; - pub fn atol(arg1: *const c_char) -> c_long; - pub fn itoa(arg1: c_int, arg2: *mut c_char, arg3: c_int) -> *mut c_char; - pub fn ltoa(arg1: c_long, arg2: *mut c_char, arg3: c_int) -> *mut c_char; - pub fn ultoa(arg1: c_ulong, arg2: *mut c_char, arg3: c_int) -> *mut c_char; - pub fn bsearch( - arg1: *const c_void, - arg2: *const c_void, - arg3: size_t, - arg4: size_t, - arg5: Option c_int>, - ) -> *mut c_void; - pub fn calloc(arg1: size_t, arg2: size_t) -> *mut c_void; - pub fn div(arg1: c_int, arg2: c_int) -> div_t; - pub fn exit(arg1: c_int) -> !; - pub fn free(arg1: *mut c_void); - pub fn getenv(arg1: *const c_char) -> *mut c_char; - pub fn labs(arg1: c_long) -> c_long; - pub fn ldiv(arg1: c_long, arg2: c_long) -> ldiv_t; - pub fn malloc(arg1: size_t) -> *mut c_void; - pub fn qsort( - arg1: *mut c_void, - arg2: size_t, - arg3: size_t, - arg4: Option c_int>, - ); - pub fn rand() -> c_int; - pub fn realloc(arg1: *mut c_void, arg2: size_t) -> *mut c_void; - pub fn srand(arg1: c_uint); - pub fn strtol(arg1: *const c_char, arg2: *mut *mut c_char, arg3: c_int) -> c_long; - pub fn strtoul(arg1: *const c_char, arg2: *mut *mut c_char, arg3: c_int) -> c_ulong; - pub fn mblen(arg1: *const c_char, arg2: size_t) -> c_int; - pub fn mbstowcs(arg1: *mut wchar_t, arg2: *const c_char, arg3: size_t) -> size_t; - pub fn wctomb(arg1: *mut c_char, arg2: wchar_t) -> c_int; - pub fn mbtowc(arg1: *mut wchar_t, arg2: *const c_char, arg3: size_t) -> c_int; - pub fn wcstombs(arg1: *mut c_char, arg2: *const wchar_t, arg3: size_t) -> size_t; - pub fn rand_r(arg1: *mut c_uint) -> c_int; - pub fn jrand48(arg1: *mut c_ushort) -> c_long; - pub fn lcong48(arg1: *mut c_ushort); - pub fn lrand48() -> c_long; - pub fn mrand48() -> c_long; - pub fn nrand48(arg1: *mut c_ushort) -> c_long; - pub fn seed48(arg1: *mut c_ushort) -> *mut c_ushort; - pub fn srand48(arg1: c_long); - pub fn putenv(arg1: *mut c_char) -> c_int; - pub fn a64l(arg1: *const c_char) -> c_long; - pub fn l64a(arg1: c_long) -> *mut c_char; - pub fn random() -> c_long; - pub fn setstate(arg1: *mut c_char) -> *mut c_char; - pub fn initstate(arg1: c_uint, arg2: *mut c_char, arg3: size_t) -> *mut c_char; - pub fn srandom(arg1: c_uint); - pub fn mkostemp(arg1: *mut c_char, arg2: c_int) -> c_int; - pub fn mkostemps(arg1: *mut c_char, arg2: c_int, arg3: c_int) -> c_int; - pub fn mkdtemp(arg1: *mut c_char) -> *mut c_char; - pub fn mkstemp(arg1: *mut c_char) -> c_int; - pub fn mktemp(arg1: *mut c_char) -> *mut c_char; - pub fn atoll(arg1: *const c_char) -> c_longlong; - pub fn llabs(arg1: c_longlong) -> c_longlong; - pub fn lldiv(arg1: c_longlong, arg2: c_longlong) -> lldiv_t; - pub fn strtoll(arg1: *const c_char, arg2: *mut *mut c_char, arg3: c_int) -> c_longlong; - pub fn strtoull(arg1: *const c_char, arg2: *mut *mut c_char, arg3: c_int) -> c_ulonglong; - pub fn aligned_alloc(arg1: size_t, arg2: size_t) -> *mut c_void; - pub fn at_quick_exit(arg1: Option) -> c_int; - pub fn quick_exit(arg1: c_int); - pub fn setenv(arg1: *const c_char, arg2: *const c_char, arg3: c_int) -> c_int; - pub fn unsetenv(arg1: *const c_char) -> c_int; - pub fn humanize_number( - arg1: *mut c_char, - arg2: size_t, - arg3: i64, - arg4: *const c_char, - arg5: c_int, - arg6: c_int, - ) -> c_int; - pub fn dehumanize_number(arg1: *const c_char, arg2: *mut i64) -> c_int; - pub fn getenv_r(arg1: *const c_char, arg2: *mut c_char, arg3: size_t) -> c_int; - pub fn heapsort( - arg1: *mut c_void, - arg2: size_t, - arg3: size_t, - arg4: Option c_int>, - ) -> c_int; - pub fn mergesort( - arg1: *mut c_void, - arg2: size_t, - arg3: size_t, - arg4: Option c_int>, - ) -> c_int; - pub fn radixsort( - arg1: *mut *const c_uchar, - arg2: c_int, - arg3: *const c_uchar, - arg4: c_uint, - ) -> c_int; - pub fn sradixsort( - arg1: *mut *const c_uchar, - arg2: c_int, - arg3: *const c_uchar, - arg4: c_uint, - ) -> c_int; - pub fn getprogname() -> *const c_char; - pub fn setprogname(arg1: *const c_char); - pub fn qabs(arg1: quad_t) -> quad_t; - pub fn strtoq(arg1: *const c_char, arg2: *mut *mut c_char, arg3: c_int) -> quad_t; - pub fn strtouq(arg1: *const c_char, arg2: *mut *mut c_char, arg3: c_int) -> u_quad_t; - pub fn strsuftoll( - arg1: *const c_char, - arg2: *const c_char, - arg3: c_longlong, - arg4: c_longlong, - ) -> c_longlong; - pub fn strsuftollx( - arg1: *const c_char, - arg2: *const c_char, - arg3: c_longlong, - arg4: c_longlong, - arg5: *mut c_char, - arg6: size_t, - ) -> c_longlong; - pub fn l64a_r(arg1: c_long, arg2: *mut c_char, arg3: c_int) -> c_int; - pub fn qdiv(arg1: quad_t, arg2: quad_t) -> qdiv_t; - pub fn strtol_l( - arg1: *const c_char, - arg2: *mut *mut c_char, - arg3: c_int, - arg4: locale_t, - ) -> c_long; - pub fn strtoul_l( - arg1: *const c_char, - arg2: *mut *mut c_char, - arg3: c_int, - arg4: locale_t, - ) -> c_ulong; - pub fn strtoll_l( - arg1: *const c_char, - arg2: *mut *mut c_char, - arg3: c_int, - arg4: locale_t, - ) -> c_longlong; - pub fn strtoull_l( - arg1: *const c_char, - arg2: *mut *mut c_char, - arg3: c_int, - arg4: locale_t, - ) -> c_ulonglong; - pub fn strtoq_l( - arg1: *const c_char, - arg2: *mut *mut c_char, - arg3: c_int, - arg4: locale_t, - ) -> quad_t; - pub fn strtouq_l( - arg1: *const c_char, - arg2: *mut *mut c_char, - arg3: c_int, - arg4: locale_t, - ) -> u_quad_t; - pub fn _mb_cur_max_l(arg1: locale_t) -> size_t; - pub fn mblen_l(arg1: *const c_char, arg2: size_t, arg3: locale_t) -> c_int; - pub fn mbstowcs_l( - arg1: *mut wchar_t, - arg2: *const c_char, - arg3: size_t, - arg4: locale_t, - ) -> size_t; - pub fn wctomb_l(arg1: *mut c_char, arg2: wchar_t, arg3: locale_t) -> c_int; - pub fn mbtowc_l(arg1: *mut wchar_t, arg2: *const c_char, arg3: size_t, arg4: locale_t) - -> c_int; - pub fn wcstombs_l( - arg1: *mut c_char, - arg2: *const wchar_t, - arg3: size_t, - arg4: locale_t, - ) -> size_t; - - // string.h - pub fn memchr(arg1: *const c_void, arg2: c_int, arg3: size_t) -> *mut c_void; - pub fn memcmp(arg1: *const c_void, arg2: *const c_void, arg3: size_t) -> c_int; - pub fn memcpy(arg1: *mut c_void, arg2: *const c_void, arg3: size_t) -> *mut c_void; - pub fn memmove(arg1: *mut c_void, arg2: *const c_void, arg3: size_t) -> *mut c_void; - pub fn memset(arg1: *mut c_void, arg2: c_int, arg3: size_t) -> *mut c_void; - pub fn strcat(arg1: *mut c_char, arg2: *const c_char) -> *mut c_char; - pub fn strchr(arg1: *const c_char, arg2: c_int) -> *mut c_char; - pub fn strcmp(arg1: *const c_char, arg2: *const c_char) -> c_int; - pub fn strcoll(arg1: *const c_char, arg2: *const c_char) -> c_int; - pub fn strcpy(arg1: *mut c_char, arg2: *const c_char) -> *mut c_char; - pub fn strcspn(arg1: *const c_char, arg2: *const c_char) -> size_t; - pub fn strerror(arg1: c_int) -> *mut c_char; - pub fn strlen(arg1: *const c_char) -> size_t; - pub fn strncat(arg1: *mut c_char, arg2: *const c_char, arg3: size_t) -> *mut c_char; - pub fn strncmp(arg1: *const c_char, arg2: *const c_char, arg3: size_t) -> c_int; - pub fn strncpy(arg1: *mut c_char, arg2: *const c_char, arg3: size_t) -> *mut c_char; - pub fn strpbrk(arg1: *const c_char, arg2: *const c_char) -> *mut c_char; - pub fn strrchr(arg1: *const c_char, arg2: c_int) -> *mut c_char; - pub fn strspn(arg1: *const c_char, arg2: *const c_char) -> size_t; - pub fn strstr(arg1: *const c_char, arg2: *const c_char) -> *mut c_char; - pub fn strtok(arg1: *mut c_char, arg2: *const c_char) -> *mut c_char; - pub fn strtok_r(arg1: *mut c_char, arg2: *const c_char, arg3: *mut *mut c_char) -> *mut c_char; - pub fn strerror_r(arg1: c_int, arg2: *mut c_char, arg3: size_t) -> c_int; - pub fn strxfrm(arg1: *mut c_char, arg2: *const c_char, arg3: size_t) -> size_t; - pub fn memccpy( - arg1: *mut c_void, - arg2: *const c_void, - arg3: c_int, - arg4: size_t, - ) -> *mut c_void; - pub fn strdup(arg1: *const c_char) -> *mut c_char; - pub fn stpcpy(arg1: *mut c_char, arg2: *const c_char) -> *mut c_char; - pub fn stpncpy(arg1: *mut c_char, arg2: *const c_char, arg3: size_t) -> *mut c_char; - pub fn strnlen(arg1: *const c_char, arg2: size_t) -> size_t; - pub fn memmem( - arg1: *const c_void, - arg2: size_t, - arg3: *const c_void, - arg4: size_t, - ) -> *mut c_void; - pub fn strcasestr(arg1: *const c_char, arg2: *const c_char) -> *mut c_char; - pub fn strlcat(arg1: *mut c_char, arg2: *const c_char, arg3: size_t) -> size_t; - pub fn strlcpy(arg1: *mut c_char, arg2: *const c_char, arg3: size_t) -> size_t; - pub fn strsep(arg1: *mut *mut c_char, arg2: *const c_char) -> *mut c_char; - pub fn stresep(arg1: *mut *mut c_char, arg2: *const c_char, arg3: c_int) -> *mut c_char; - pub fn strndup(arg1: *const c_char, arg2: size_t) -> *mut c_char; - pub fn memrchr(arg1: *const c_void, arg2: c_int, arg3: size_t) -> *mut c_void; - pub fn explicit_memset(arg1: *mut c_void, arg2: c_int, arg3: size_t) -> *mut c_void; - pub fn consttime_memequal(arg1: *const c_void, arg2: *const c_void, arg3: size_t) -> c_int; - pub fn strcoll_l(arg1: *const c_char, arg2: *const c_char, arg3: locale_t) -> c_int; - pub fn strxfrm_l( - arg1: *mut c_char, - arg2: *const c_char, - arg3: size_t, - arg4: locale_t, - ) -> size_t; - pub fn strerror_l(arg1: c_int, arg2: locale_t) -> *mut c_char; - - // strings.h - pub fn bcmp(arg1: *const c_void, arg2: *const c_void, arg3: size_t) -> c_int; - pub fn bcopy(arg1: *const c_void, arg2: *mut c_void, arg3: size_t); - pub fn bzero(arg1: *mut c_void, arg2: size_t); - pub fn ffs(arg1: c_int) -> c_int; - pub fn popcount(arg1: c_uint) -> c_uint; - pub fn popcountl(arg1: c_ulong) -> c_uint; - pub fn popcountll(arg1: c_ulonglong) -> c_uint; - pub fn popcount32(arg1: u32) -> c_uint; - pub fn popcount64(arg1: u64) -> c_uint; - pub fn rindex(arg1: *const c_char, arg2: c_int) -> *mut c_char; - pub fn strcasecmp(arg1: *const c_char, arg2: *const c_char) -> c_int; - pub fn strncasecmp(arg1: *const c_char, arg2: *const c_char, arg3: size_t) -> c_int; - - // signal.h - pub fn signal(arg1: c_int, arg2: sighandler_t) -> sighandler_t; - pub fn raise(arg1: c_int) -> c_int; - - // time.h - pub fn asctime(arg1: *const tm) -> *mut c_char; - pub fn clock() -> clock_t; - pub fn ctime(arg1: *const time_t) -> *mut c_char; - pub fn difftime(arg1: time_t, arg2: time_t) -> f64; - pub fn gmtime(arg1: *const time_t) -> *mut tm; - pub fn localtime(arg1: *const time_t) -> *mut tm; - pub fn time(arg1: *mut time_t) -> time_t; - pub fn mktime(arg1: *mut tm) -> time_t; - pub fn strftime( - arg1: *mut c_char, - arg2: size_t, - arg3: *const c_char, - arg4: *const tm, - ) -> size_t; - pub fn utime(arg1: *const c_char, arg2: *mut time_t) -> c_int; - pub fn asctime_r(arg1: *const tm, arg2: *mut c_char) -> *mut c_char; - pub fn ctime_r(arg1: *const time_t, arg2: *mut c_char) -> *mut c_char; - pub fn gmtime_r(arg1: *const time_t, arg2: *mut tm) -> *mut tm; - pub fn localtime_r(arg1: *const time_t, arg2: *mut tm) -> *mut tm; - - // sys/stat.h - pub fn stat(arg1: *const c_char, arg2: *mut stat) -> c_int; - pub fn lstat(arg1: *const c_char, arg2: *mut stat) -> c_int; - pub fn fstat(arg1: c_int, arg2: *mut stat) -> c_int; - pub fn chmod(arg1: *const c_char, arg2: __mode_t) -> c_int; - pub fn mkdir(arg1: *const c_char, arg2: __mode_t) -> c_int; - - // fcntl.h - pub fn open(arg1: *const c_char, arg2: c_int, ...) -> c_int; - pub fn creat(arg1: *const c_char, arg2: c_int) -> c_int; - pub fn close(arg1: c_int) -> c_int; - pub fn read(arg1: c_int, arg2: *mut c_void, arg3: c_int) -> c_int; - pub fn write(arg1: c_int, arg2: *const c_void, arg3: c_int) -> c_int; - pub fn unlink(arg1: *const c_char) -> c_int; - pub fn tell(arg1: c_int) -> c_long; - pub fn dup(arg1: c_int) -> c_int; - pub fn dup2(arg1: c_int, arg2: c_int) -> c_int; - pub fn access(arg1: *const c_char, arg2: c_int) -> c_int; - pub fn rmdir(arg1: *const c_char) -> c_int; - pub fn chdir(arg1: *const c_char) -> c_int; - pub fn _exit(arg1: c_int); - pub fn getwd(arg1: *mut c_char) -> *mut c_char; - pub fn getcwd(arg1: *mut c_char, arg2: size_t) -> *mut c_char; - pub static mut optarg: *mut c_char; - pub static mut opterr: c_int; - pub static mut optind: c_int; - pub static mut optopt: c_int; - pub static mut optreset: c_int; - pub fn getopt(arg1: c_int, arg2: *mut *mut c_char, arg3: *const c_char) -> c_int; - pub static mut suboptarg: *mut c_char; - pub fn getsubopt( - arg1: *mut *mut c_char, - arg2: *const *mut c_char, - arg3: *mut *mut c_char, - ) -> c_int; - pub fn fcntl(arg1: c_int, arg2: c_int, ...) -> c_int; - pub fn getpid() -> pid_t; - pub fn sleep(arg1: c_uint) -> c_uint; - pub fn usleep(arg1: useconds_t) -> c_int; - - // locale.h - pub fn localeconv() -> *mut lconv; - pub fn setlocale(arg1: c_int, arg2: *const c_char) -> *mut c_char; - pub fn duplocale(arg1: locale_t) -> locale_t; - pub fn freelocale(arg1: locale_t); - pub fn localeconv_l(arg1: locale_t) -> *mut lconv; - pub fn newlocale(arg1: c_int, arg2: *const c_char, arg3: locale_t) -> locale_t; - - // langinfo.h - pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; - pub fn nl_langinfo_l(item: crate::nl_item, locale: locale_t) -> *mut c_char; - - // malloc.h - pub fn memalign(align: size_t, size: size_t) -> *mut c_void; - - // sys/types.h - pub fn lseek(arg1: c_int, arg2: __off_t, arg3: c_int) -> __off_t; -} - -cfg_if! { - if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else if #[cfg(any(target_arch = "arm"))] { - mod arm; - pub use self::arm::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/switch.rs b/vendor/libc/src/switch.rs deleted file mode 100644 index d965ff7005fb24..00000000000000 --- a/vendor/libc/src/switch.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! Switch C type definitions - -pub type intmax_t = i64; -pub type uintmax_t = u64; - -pub type size_t = usize; -pub type ptrdiff_t = isize; -pub type intptr_t = isize; -pub type uintptr_t = usize; -pub type ssize_t = isize; - -pub type off_t = i64; -pub type wchar_t = u32; - -pub const INT_MIN: c_int = -2147483648; -pub const INT_MAX: c_int = 2147483647; diff --git a/vendor/libc/src/teeos/mod.rs b/vendor/libc/src/teeos/mod.rs deleted file mode 100644 index fd9c0b168aba49..00000000000000 --- a/vendor/libc/src/teeos/mod.rs +++ /dev/null @@ -1,1355 +0,0 @@ -//! Libc bindings for teeos -//! -//! Apparently the loader just dynamically links it anyway, but fails -//! when linking is explicitly requested. -#![allow(non_camel_case_types)] -#![allow(non_snake_case)] - -use crate::prelude::*; - -pub type c_bool = i32; - -pub type intmax_t = i64; - -pub type uintmax_t = u64; - -pub type size_t = usize; - -pub type ptrdiff_t = isize; - -pub type intptr_t = isize; - -pub type uintptr_t = usize; - -pub type ssize_t = isize; - -pub type pid_t = c_int; - -pub type wchar_t = u32; - -// long double in C means A float point value, which has 128bit length. -// but some bit maybe not used, so the real length of long double could be 80(x86) or 128(power pc/IEEE) -// this is different from f128(not stable and not included default) in Rust, so we use u128 for FFI(Rust to C). -// this is unstable and will cause to memfault/data abort. -pub type c_longdouble = _CLongDouble; - -pub type pthread_t = c_ulong; - -pub type pthread_key_t = c_uint; - -pub type pthread_spinlock_t = c_int; - -pub type off_t = i64; - -pub type time_t = c_long; - -pub type clock_t = c_long; - -pub type clockid_t = c_int; - -pub type suseconds_t = c_long; - -pub type once_fn = extern "C" fn() -> c_void; - -pub type pthread_once_t = c_int; - -pub type va_list = *mut c_char; - -pub type wint_t = c_uint; - -pub type wctype_t = c_ulong; - -pub type cmpfunc = extern "C" fn(x: *const c_void, y: *const c_void) -> c_int; - -#[repr(align(16))] -pub struct _CLongDouble(pub u128); - -#[repr(align(8))] -#[repr(C)] -pub struct pthread_cond_t { - #[doc(hidden)] - size: [u8; __SIZEOF_PTHREAD_COND_T], -} - -#[repr(align(8))] -#[repr(C)] -pub struct pthread_mutex_t { - #[doc(hidden)] - size: [u8; __SIZEOF_PTHREAD_MUTEX_T], -} - -#[repr(align(4))] -#[repr(C)] -pub struct pthread_mutexattr_t { - #[doc(hidden)] - size: [u8; __SIZEOF_PTHREAD_MUTEXATTR_T], -} - -#[repr(align(4))] -#[repr(C)] -pub struct pthread_condattr_t { - #[doc(hidden)] - size: [u8; __SIZEOF_PTHREAD_CONDATTR_T], -} - -#[repr(C)] -pub struct pthread_attr_t { - __size: [u64; 7], -} - -#[repr(C)] -pub struct cpu_set_t { - bits: [c_ulong; 128 / size_of::()], -} - -#[repr(C)] -pub struct timespec { - pub tv_sec: time_t, - pub tv_nsec: c_long, -} - -#[repr(C)] -pub struct timeval { - pub tv_sec: time_t, - pub tv_usec: suseconds_t, -} - -#[repr(C)] -pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - pub __tm_gmtoff: c_long, - pub __tm_zone: *const c_char, -} - -#[repr(C)] -pub struct mbstate_t { - pub __opaque1: c_uint, - pub __opaque2: c_uint, -} - -#[repr(C)] -pub struct sem_t { - pub __val: [c_int; 4 * size_of::() / size_of::()], -} - -#[repr(C)] -pub struct div_t { - pub quot: c_int, - pub rem: c_int, -} - -// fcntl -pub const O_CREAT: u32 = 0o100; - -pub const O_EXCL: u32 = 0o200; - -pub const O_NOCTTY: u32 = 0o400; - -pub const O_TRUNC: u32 = 0o1000; - -pub const O_APPEND: u32 = 0o2000; - -pub const O_NONBLOCK: u32 = 0o4000; - -pub const O_DSYNC: u32 = 0o10000; - -pub const O_SYNC: u32 = 0o4010000; - -pub const O_RSYNC: u32 = 0o4010000; - -pub const O_DIRECTORY: u32 = 0o200000; - -pub const O_NOFOLLOW: u32 = 0o400000; - -pub const O_CLOEXEC: u32 = 0o2000000; - -pub const O_ASYNC: u32 = 0o20000; - -pub const O_DIRECT: u32 = 0o40000; - -pub const O_LARGEFILE: u32 = 0o100000; - -pub const O_NOATIME: u32 = 0o1000000; - -pub const O_PATH: u32 = 0o10000000; - -pub const O_TMPFILE: u32 = 0o20200000; - -pub const O_NDELAY: u32 = O_NONBLOCK; - -pub const F_DUPFD: u32 = 0; - -pub const F_GETFD: u32 = 1; - -pub const F_SETFD: u32 = 2; - -pub const F_GETFL: u32 = 3; - -pub const F_SETFL: u32 = 4; - -pub const F_SETOWN: u32 = 8; - -pub const F_GETOWN: u32 = 9; - -pub const F_SETSIG: u32 = 10; - -pub const F_GETSIG: u32 = 11; - -pub const F_GETLK: u32 = 12; - -pub const F_SETLK: u32 = 13; - -pub const F_SETLKW: u32 = 14; - -pub const F_SETOWN_EX: u32 = 15; - -pub const F_GETOWN_EX: u32 = 16; - -pub const F_GETOWNER_UIDS: u32 = 17; - -// mman -pub const MAP_FAILED: u64 = 0xffffffffffffffff; - -pub const MAP_FIXED_NOREPLACE: u32 = 0x100000; - -pub const MAP_SHARED_VALIDATE: u32 = 0x03; - -pub const MAP_SHARED: u32 = 0x01; - -pub const MAP_PRIVATE: u32 = 0x02; - -pub const MAP_TYPE: u32 = 0x0f; - -pub const MAP_FIXED: u32 = 0x10; - -pub const MAP_ANON: u32 = 0x20; - -pub const MAP_ANONYMOUS: u32 = MAP_ANON; - -pub const MAP_NORESERVE: u32 = 0x4000; - -pub const MAP_GROWSDOWN: u32 = 0x0100; - -pub const MAP_DENYWRITE: u32 = 0x0800; - -pub const MAP_EXECUTABLE: u32 = 0x1000; - -pub const MAP_LOCKED: u32 = 0x2000; - -pub const MAP_POPULATE: u32 = 0x8000; - -pub const MAP_NONBLOCK: u32 = 0x10000; - -pub const MAP_STACK: u32 = 0x20000; - -pub const MAP_HUGETLB: u32 = 0x40000; - -pub const MAP_SYNC: u32 = 0x80000; - -pub const MAP_FILE: u32 = 0; - -pub const MAP_HUGE_SHIFT: u32 = 26; - -pub const MAP_HUGE_MASK: u32 = 0x3f; - -pub const MAP_HUGE_16KB: u32 = 14 << 26; - -pub const MAP_HUGE_64KB: u32 = 16 << 26; - -pub const MAP_HUGE_512KB: u32 = 19 << 26; - -pub const MAP_HUGE_1MB: u32 = 20 << 26; - -pub const MAP_HUGE_2MB: u32 = 21 << 26; - -pub const MAP_HUGE_8MB: u32 = 23 << 26; - -pub const MAP_HUGE_16MB: u32 = 24 << 26; - -pub const MAP_HUGE_32MB: u32 = 25 << 26; - -pub const MAP_HUGE_256MB: u32 = 28 << 26; - -pub const MAP_HUGE_512MB: u32 = 29 << 26; - -pub const MAP_HUGE_1GB: u32 = 30 << 26; - -pub const MAP_HUGE_2GB: u32 = 31 << 26; - -pub const MAP_HUGE_16GB: u32 = 34u32 << 26; - -pub const PROT_NONE: u32 = 0; - -pub const PROT_READ: u32 = 1; - -pub const PROT_WRITE: u32 = 2; - -pub const PROT_EXEC: u32 = 4; - -pub const PROT_GROWSDOWN: u32 = 0x01000000; - -pub const PROT_GROWSUP: u32 = 0x02000000; - -pub const MS_ASYNC: u32 = 1; - -pub const MS_INVALIDATE: u32 = 2; - -pub const MS_SYNC: u32 = 4; - -pub const MCL_CURRENT: u32 = 1; - -pub const MCL_FUTURE: u32 = 2; - -pub const MCL_ONFAULT: u32 = 4; - -pub const POSIX_MADV_NORMAL: u32 = 0; - -pub const POSIX_MADV_RANDOM: u32 = 1; - -pub const POSIX_MADV_SEQUENTIAL: u32 = 2; - -pub const POSIX_MADV_WILLNEED: u32 = 3; - -pub const POSIX_MADV_DONTNEED: u32 = 4; - -// wctype -pub const WCTYPE_ALNUM: u64 = 1; - -pub const WCTYPE_ALPHA: u64 = 2; - -pub const WCTYPE_BLANK: u64 = 3; - -pub const WCTYPE_CNTRL: u64 = 4; - -pub const WCTYPE_DIGIT: u64 = 5; - -pub const WCTYPE_GRAPH: u64 = 6; - -pub const WCTYPE_LOWER: u64 = 7; - -pub const WCTYPE_PRINT: u64 = 8; - -pub const WCTYPE_PUNCT: u64 = 9; - -pub const WCTYPE_SPACE: u64 = 10; - -pub const WCTYPE_UPPER: u64 = 11; - -pub const WCTYPE_XDIGIT: u64 = 12; - -// locale -pub const LC_CTYPE: i32 = 0; - -pub const LC_NUMERIC: i32 = 1; - -pub const LC_TIME: i32 = 2; - -pub const LC_COLLATE: i32 = 3; - -pub const LC_MONETARY: i32 = 4; - -pub const LC_MESSAGES: i32 = 5; - -pub const LC_ALL: i32 = 6; - -// pthread -pub const __SIZEOF_PTHREAD_COND_T: usize = 48; - -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; - -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; - -// errno.h -pub const EPERM: c_int = 1; - -pub const ENOENT: c_int = 2; - -pub const ESRCH: c_int = 3; - -pub const EINTR: c_int = 4; - -pub const EIO: c_int = 5; - -pub const ENXIO: c_int = 6; - -pub const E2BIG: c_int = 7; - -pub const ENOEXEC: c_int = 8; - -pub const EBADF: c_int = 9; - -pub const ECHILD: c_int = 10; - -pub const EAGAIN: c_int = 11; - -pub const ENOMEM: c_int = 12; - -pub const EACCES: c_int = 13; - -pub const EFAULT: c_int = 14; - -pub const ENOTBLK: c_int = 15; - -pub const EBUSY: c_int = 16; - -pub const EEXIST: c_int = 17; - -pub const EXDEV: c_int = 18; - -pub const ENODEV: c_int = 19; - -pub const ENOTDIR: c_int = 20; - -pub const EISDIR: c_int = 21; - -pub const EINVAL: c_int = 22; - -pub const ENFILE: c_int = 23; - -pub const EMFILE: c_int = 24; - -pub const ENOTTY: c_int = 25; - -pub const ETXTBSY: c_int = 26; - -pub const EFBIG: c_int = 27; - -pub const ENOSPC: c_int = 28; - -pub const ESPIPE: c_int = 29; - -pub const EROFS: c_int = 30; - -pub const EMLINK: c_int = 31; - -pub const EPIPE: c_int = 32; - -pub const EDOM: c_int = 33; - -pub const ERANGE: c_int = 34; - -pub const EDEADLK: c_int = 35; - -pub const ENAMETOOLONG: c_int = 36; - -pub const ENOLCK: c_int = 37; - -pub const ENOSYS: c_int = 38; - -pub const ENOTEMPTY: c_int = 39; - -pub const ELOOP: c_int = 40; - -pub const EWOULDBLOCK: c_int = EAGAIN; - -pub const ENOMSG: c_int = 42; - -pub const EIDRM: c_int = 43; - -pub const ECHRNG: c_int = 44; - -pub const EL2NSYNC: c_int = 45; - -pub const EL3HLT: c_int = 46; - -pub const EL3RST: c_int = 47; - -pub const ELNRNG: c_int = 48; - -pub const EUNATCH: c_int = 49; - -pub const ENOCSI: c_int = 50; - -pub const EL2HLT: c_int = 51; - -pub const EBADE: c_int = 52; - -pub const EBADR: c_int = 53; - -pub const EXFULL: c_int = 54; - -pub const ENOANO: c_int = 55; - -pub const EBADRQC: c_int = 56; - -pub const EBADSLT: c_int = 57; - -pub const EDEADLOCK: c_int = EDEADLK; - -pub const EBFONT: c_int = 59; - -pub const ENOSTR: c_int = 60; - -pub const ENODATA: c_int = 61; - -pub const ETIME: c_int = 62; - -pub const ENOSR: c_int = 63; - -pub const ENONET: c_int = 64; - -pub const ENOPKG: c_int = 65; - -pub const EREMOTE: c_int = 66; - -pub const ENOLINK: c_int = 67; - -pub const EADV: c_int = 68; - -pub const ESRMNT: c_int = 69; - -pub const ECOMM: c_int = 70; - -pub const EPROTO: c_int = 71; - -pub const EMULTIHOP: c_int = 72; - -pub const EDOTDOT: c_int = 73; - -pub const EBADMSG: c_int = 74; - -pub const EOVERFLOW: c_int = 75; - -pub const ENOTUNIQ: c_int = 76; - -pub const EBADFD: c_int = 77; - -pub const EREMCHG: c_int = 78; - -pub const ELIBACC: c_int = 79; - -pub const ELIBBAD: c_int = 80; - -pub const ELIBSCN: c_int = 81; - -pub const ELIBMAX: c_int = 82; - -pub const ELIBEXEC: c_int = 83; - -pub const EILSEQ: c_int = 84; - -pub const ERESTART: c_int = 85; - -pub const ESTRPIPE: c_int = 86; - -pub const EUSERS: c_int = 87; - -pub const ENOTSOCK: c_int = 88; - -pub const EDESTADDRREQ: c_int = 89; - -pub const EMSGSIZE: c_int = 90; - -pub const EPROTOTYPE: c_int = 91; - -pub const ENOPROTOOPT: c_int = 92; - -pub const EPROTONOSUPPOR: c_int = 93; - -pub const ESOCKTNOSUPPOR: c_int = 94; - -pub const EOPNOTSUPP: c_int = 95; - -pub const ENOTSUP: c_int = EOPNOTSUPP; - -pub const EPFNOSUPPORT: c_int = 96; - -pub const EAFNOSUPPORT: c_int = 97; - -pub const EADDRINUSE: c_int = 98; - -pub const EADDRNOTAVAIL: c_int = 99; - -pub const ENETDOWN: c_int = 100; - -pub const ENETUNREACH: c_int = 101; - -pub const ENETRESET: c_int = 102; - -pub const ECONNABORTED: c_int = 103; - -pub const ECONNRESET: c_int = 104; - -pub const ENOBUFS: c_int = 105; - -pub const EISCONN: c_int = 106; - -pub const ENOTCONN: c_int = 107; - -pub const ESHUTDOWN: c_int = 108; - -pub const ETOOMANYREFS: c_int = 109; - -pub const ETIMEDOUT: c_int = 110; - -pub const ECONNREFUSED: c_int = 111; - -pub const EHOSTDOWN: c_int = 112; - -pub const EHOSTUNREACH: c_int = 113; - -pub const EALREADY: c_int = 114; - -pub const EINPROGRESS: c_int = 115; - -pub const ESTALE: c_int = 116; - -pub const EUCLEAN: c_int = 117; - -pub const ENOTNAM: c_int = 118; - -pub const ENAVAIL: c_int = 119; - -pub const EISNAM: c_int = 120; - -pub const EREMOTEIO: c_int = 121; - -pub const EDQUOT: c_int = 122; - -pub const ENOMEDIUM: c_int = 123; - -pub const EMEDIUMTYPE: c_int = 124; - -pub const ECANCELED: c_int = 125; - -pub const ENOKEY: c_int = 126; - -pub const EKEYEXPIRED: c_int = 127; - -pub const EKEYREVOKED: c_int = 128; - -pub const EKEYREJECTED: c_int = 129; - -pub const EOWNERDEAD: c_int = 130; - -pub const ENOTRECOVERABLE: c_int = 131; - -pub const ERFKILL: c_int = 132; - -pub const EHWPOISON: c_int = 133; - -// pthread_attr.h -pub const TEESMP_THREAD_ATTR_CA_WILDCARD: c_int = 0; - -pub const TEESMP_THREAD_ATTR_CA_INHERIT: c_int = -1; - -pub const TEESMP_THREAD_ATTR_TASK_ID_INHERIT: c_int = -1; - -pub const TEESMP_THREAD_ATTR_HAS_SHADOW: c_int = 0x1; - -pub const TEESMP_THREAD_ATTR_NO_SHADOW: c_int = 0x0; - -// unistd.h -pub const _SC_ARG_MAX: c_int = 0; - -pub const _SC_CHILD_MAX: c_int = 1; - -pub const _SC_CLK_TCK: c_int = 2; - -pub const _SC_NGROUPS_MAX: c_int = 3; - -pub const _SC_OPEN_MAX: c_int = 4; - -pub const _SC_STREAM_MAX: c_int = 5; - -pub const _SC_TZNAME_MAX: c_int = 6; - -pub const _SC_JOB_CONTROL: c_int = 7; - -pub const _SC_SAVED_IDS: c_int = 8; - -pub const _SC_REALTIME_SIGNALS: c_int = 9; - -pub const _SC_PRIORITY_SCHEDULING: c_int = 10; - -pub const _SC_TIMERS: c_int = 11; - -pub const _SC_ASYNCHRONOUS_IO: c_int = 12; - -pub const _SC_PRIORITIZED_IO: c_int = 13; - -pub const _SC_SYNCHRONIZED_IO: c_int = 14; - -pub const _SC_FSYNC: c_int = 15; - -pub const _SC_MAPPED_FILES: c_int = 16; - -pub const _SC_MEMLOCK: c_int = 17; - -pub const _SC_MEMLOCK_RANGE: c_int = 18; - -pub const _SC_MEMORY_PROTECTION: c_int = 19; - -pub const _SC_MESSAGE_PASSING: c_int = 20; - -pub const _SC_SEMAPHORES: c_int = 21; - -pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 22; - -pub const _SC_AIO_LISTIO_MAX: c_int = 23; - -pub const _SC_AIO_MAX: c_int = 24; - -pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 25; - -pub const _SC_DELAYTIMER_MAX: c_int = 26; - -pub const _SC_MQ_OPEN_MAX: c_int = 27; - -pub const _SC_MQ_PRIO_MAX: c_int = 28; - -pub const _SC_VERSION: c_int = 29; - -pub const _SC_PAGE_SIZE: c_int = 30; - -pub const _SC_PAGESIZE: c_int = 30; /* !! */ - -pub const _SC_RTSIG_MAX: c_int = 31; - -pub const _SC_SEM_NSEMS_MAX: c_int = 32; - -pub const _SC_SEM_VALUE_MAX: c_int = 33; - -pub const _SC_SIGQUEUE_MAX: c_int = 34; - -pub const _SC_TIMER_MAX: c_int = 35; - -pub const _SC_BC_BASE_MAX: c_int = 36; - -pub const _SC_BC_DIM_MAX: c_int = 37; - -pub const _SC_BC_SCALE_MAX: c_int = 38; - -pub const _SC_BC_STRING_MAX: c_int = 39; - -pub const _SC_COLL_WEIGHTS_MAX: c_int = 40; - -pub const _SC_EXPR_NEST_MAX: c_int = 42; - -pub const _SC_LINE_MAX: c_int = 43; - -pub const _SC_RE_DUP_MAX: c_int = 44; - -pub const _SC_2_VERSION: c_int = 46; - -pub const _SC_2_C_BIND: c_int = 47; - -pub const _SC_2_C_DEV: c_int = 48; - -pub const _SC_2_FORT_DEV: c_int = 49; - -pub const _SC_2_FORT_RUN: c_int = 50; - -pub const _SC_2_SW_DEV: c_int = 51; - -pub const _SC_2_LOCALEDEF: c_int = 52; - -pub const _SC_UIO_MAXIOV: c_int = 60; /* !! */ - -pub const _SC_IOV_MAX: c_int = 60; - -pub const _SC_THREADS: c_int = 67; - -pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 68; - -pub const _SC_GETGR_R_SIZE_MAX: c_int = 69; - -pub const _SC_GETPW_R_SIZE_MAX: c_int = 70; - -pub const _SC_LOGIN_NAME_MAX: c_int = 71; - -pub const _SC_TTY_NAME_MAX: c_int = 72; - -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 73; - -pub const _SC_THREAD_KEYS_MAX: c_int = 74; - -pub const _SC_THREAD_STACK_MIN: c_int = 75; - -pub const _SC_THREAD_THREADS_MAX: c_int = 76; - -pub const _SC_THREAD_ATTR_STACKADDR: c_int = 77; - -pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 78; - -pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 79; - -pub const _SC_THREAD_PRIO_INHERIT: c_int = 80; - -pub const _SC_THREAD_PRIO_PROTECT: c_int = 81; - -pub const _SC_THREAD_PROCESS_SHARED: c_int = 82; - -pub const _SC_NPROCESSORS_CONF: c_int = 83; - -pub const _SC_NPROCESSORS_ONLN: c_int = 84; - -pub const _SC_PHYS_PAGES: c_int = 85; - -pub const _SC_AVPHYS_PAGES: c_int = 86; - -pub const _SC_ATEXIT_MAX: c_int = 87; - -pub const _SC_PASS_MAX: c_int = 88; - -pub const _SC_XOPEN_VERSION: c_int = 89; - -pub const _SC_XOPEN_XCU_VERSION: c_int = 90; - -pub const _SC_XOPEN_UNIX: c_int = 91; - -pub const _SC_XOPEN_CRYPT: c_int = 92; - -pub const _SC_XOPEN_ENH_I18N: c_int = 93; - -pub const _SC_XOPEN_SHM: c_int = 94; - -pub const _SC_2_CHAR_TERM: c_int = 95; - -pub const _SC_2_UPE: c_int = 97; - -pub const _SC_XOPEN_XPG2: c_int = 98; - -pub const _SC_XOPEN_XPG3: c_int = 99; - -pub const _SC_XOPEN_XPG4: c_int = 100; - -pub const _SC_NZERO: c_int = 109; - -pub const _SC_XBS5_ILP32_OFF32: c_int = 125; - -pub const _SC_XBS5_ILP32_OFFBIG: c_int = 126; - -pub const _SC_XBS5_LP64_OFF64: c_int = 127; - -pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 128; - -pub const _SC_XOPEN_LEGACY: c_int = 129; - -pub const _SC_XOPEN_REALTIME: c_int = 130; - -pub const _SC_XOPEN_REALTIME_THREADS: c_int = 131; - -pub const _SC_ADVISORY_INFO: c_int = 132; - -pub const _SC_BARRIERS: c_int = 133; - -pub const _SC_CLOCK_SELECTION: c_int = 137; - -pub const _SC_CPUTIME: c_int = 138; - -pub const _SC_THREAD_CPUTIME: c_int = 139; - -pub const _SC_MONOTONIC_CLOCK: c_int = 149; - -pub const _SC_READER_WRITER_LOCKS: c_int = 153; - -pub const _SC_SPIN_LOCKS: c_int = 154; - -pub const _SC_REGEXP: c_int = 155; - -pub const _SC_SHELL: c_int = 157; - -pub const _SC_SPAWN: c_int = 159; - -pub const _SC_SPORADIC_SERVER: c_int = 160; - -pub const _SC_THREAD_SPORADIC_SERVER: c_int = 161; - -pub const _SC_TIMEOUTS: c_int = 164; - -pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 165; - -pub const _SC_2_PBS: c_int = 168; - -pub const _SC_2_PBS_ACCOUNTING: c_int = 169; - -pub const _SC_2_PBS_LOCATE: c_int = 170; - -pub const _SC_2_PBS_MESSAGE: c_int = 171; - -pub const _SC_2_PBS_TRACK: c_int = 172; - -pub const _SC_SYMLOOP_MAX: c_int = 173; - -pub const _SC_STREAMS: c_int = 174; - -pub const _SC_2_PBS_CHECKPOINT: c_int = 175; - -pub const _SC_V6_ILP32_OFF32: c_int = 176; - -pub const _SC_V6_ILP32_OFFBIG: c_int = 177; - -pub const _SC_V6_LP64_OFF64: c_int = 178; - -pub const _SC_V6_LPBIG_OFFBIG: c_int = 179; - -pub const _SC_HOST_NAME_MAX: c_int = 180; - -pub const _SC_TRACE: c_int = 181; - -pub const _SC_TRACE_EVENT_FILTER: c_int = 182; - -pub const _SC_TRACE_INHERIT: c_int = 183; - -pub const _SC_TRACE_LOG: c_int = 184; - -pub const _SC_IPV6: c_int = 235; - -pub const _SC_RAW_SOCKETS: c_int = 236; - -pub const _SC_V7_ILP32_OFF32: c_int = 237; - -pub const _SC_V7_ILP32_OFFBIG: c_int = 238; - -pub const _SC_V7_LP64_OFF64: c_int = 239; - -pub const _SC_V7_LPBIG_OFFBIG: c_int = 240; - -pub const _SC_SS_REPL_MAX: c_int = 241; - -pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 242; - -pub const _SC_TRACE_NAME_MAX: c_int = 243; - -pub const _SC_TRACE_SYS_MAX: c_int = 244; - -pub const _SC_TRACE_USER_EVENT_MAX: c_int = 245; - -pub const _SC_XOPEN_STREAMS: c_int = 246; - -pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 247; - -pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 248; - -// limits.h -pub const PTHREAD_KEYS_MAX: c_int = 128; - -pub const PTHREAD_STACK_MIN: c_int = 2048; - -pub const PTHREAD_DESTRUCTOR_ITERATIONS: c_int = 4; - -pub const SEM_VALUE_MAX: c_int = 0x7fffffff; - -pub const SEM_NSEMS_MAX: c_int = 256; - -pub const DELAYTIMER_MAX: c_int = 0x7fffffff; - -pub const MQ_PRIO_MAX: c_int = 32768; - -pub const LOGIN_NAME_MAX: c_int = 256; - -// time.h -pub const CLOCK_REALTIME: clockid_t = 0; - -pub const CLOCK_MONOTONIC: clockid_t = 1; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - size: [0; __SIZEOF_PTHREAD_MUTEX_T], -}; - -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - size: [0; __SIZEOF_PTHREAD_COND_T], -}; - -pub const PTHREAD_MUTEX_NORMAL: c_int = 0; - -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; - -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; - -pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; - -pub const PTHREAD_MUTEX_STALLED: c_int = 0; - -pub const PTHREAD_MUTEX_ROBUST: c_int = 1; - -extern "C" { - // ---- ALLOC ----------------------------------------------------------------------------- - pub fn calloc(nobj: size_t, size: size_t) -> *mut c_void; - - pub fn malloc(size: size_t) -> *mut c_void; - - pub fn realloc(p: *mut c_void, size: size_t) -> *mut c_void; - - pub fn aligned_alloc(align: size_t, len: size_t) -> *mut c_void; - - pub fn free(p: *mut c_void); - - pub fn posix_memalign(memptr: *mut *mut c_void, align: size_t, size: size_t) -> c_int; - - pub fn memchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; - - pub fn wmemchr(cx: *const wchar_t, c: wchar_t, n: size_t) -> *mut wchar_t; - - pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; - - pub fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; - - pub fn memmove(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; - - pub fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void; - - // ----- PTHREAD --------------------------------------------------------------------------- - pub fn pthread_self() -> pthread_t; - - pub fn pthread_join(native: pthread_t, value: *mut *mut c_void) -> c_int; - - // detach or pthread_attr_setdetachstate must not be called! - //pub fn pthread_detach(thread: pthread_t) -> c_int; - - pub fn pthread_exit(value: *mut c_void) -> !; - - pub fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int; - - pub fn pthread_attr_destroy(attr: *mut pthread_attr_t) -> c_int; - - pub fn pthread_attr_getstack( - attr: *const pthread_attr_t, - stackaddr: *mut *mut c_void, - stacksize: *mut size_t, - ) -> c_int; - - pub fn pthread_attr_setstacksize(attr: *mut pthread_attr_t, stack_size: size_t) -> c_int; - - pub fn pthread_attr_getstacksize(attr: *const pthread_attr_t, size: *mut size_t) -> c_int; - - pub fn pthread_attr_settee( - attr: *mut pthread_attr_t, - ca: c_int, - task_id: c_int, - shadow: c_int, - ) -> c_int; - - // C-TA API do not include this interface, but TA can use. - pub fn sched_yield() -> c_int; - - pub fn pthread_key_create( - key: *mut pthread_key_t, - dtor: Option, - ) -> c_int; - - pub fn pthread_key_delete(key: pthread_key_t) -> c_int; - - pub fn pthread_getspecific(key: pthread_key_t) -> *mut c_void; - - pub fn pthread_setspecific(key: pthread_key_t, value: *const c_void) -> c_int; - - pub fn pthread_mutex_destroy(lock: *mut pthread_mutex_t) -> c_int; - - pub fn pthread_mutex_init( - lock: *mut pthread_mutex_t, - attr: *const pthread_mutexattr_t, - ) -> c_int; - - pub fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> c_int; - - pub fn pthread_mutex_trylock(lock: *mut pthread_mutex_t) -> c_int; - - pub fn pthread_mutex_unlock(lock: *mut pthread_mutex_t) -> c_int; - - pub fn pthread_mutexattr_destroy(attr: *mut pthread_mutexattr_t) -> c_int; - - pub fn pthread_mutexattr_init(attr: *mut pthread_mutexattr_t) -> c_int; - - pub fn pthread_mutexattr_settype(attr: *mut pthread_mutexattr_t, _type: c_int) -> c_int; - - pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; - - pub fn pthread_cond_broadcast(cond: *mut pthread_cond_t) -> c_int; - - pub fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> c_int; - - pub fn pthread_cond_init(cond: *mut pthread_cond_t, attr: *const pthread_condattr_t) -> c_int; - - pub fn pthread_cond_signal(cond: *mut pthread_cond_t) -> c_int; - - pub fn pthread_cond_wait(cond: *mut pthread_cond_t, lock: *mut pthread_mutex_t) -> c_int; - - pub fn pthread_cond_timedwait( - cond: *mut pthread_cond_t, - lock: *mut pthread_mutex_t, - abstime: *const timespec, - ) -> c_int; - - pub fn pthread_mutexattr_setrobust(attr: *mut pthread_mutexattr_t, robustness: c_int) -> c_int; - - pub fn pthread_create( - native: *mut pthread_t, - attr: *const pthread_attr_t, - f: extern "C" fn(*mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - - pub fn pthread_spin_init(lock: *mut pthread_spinlock_t, pshared: c_int) -> c_int; - - pub fn pthread_spin_destroy(lock: *mut pthread_spinlock_t) -> c_int; - - pub fn pthread_spin_lock(lock: *mut pthread_spinlock_t) -> c_int; - - pub fn pthread_spin_trylock(lock: *mut pthread_spinlock_t) -> c_int; - - pub fn pthread_spin_unlock(lock: *mut pthread_spinlock_t) -> c_int; - - pub fn pthread_setschedprio(native: pthread_t, priority: c_int) -> c_int; - - pub fn pthread_once(pot: *mut pthread_once_t, f: Option) -> c_int; - - pub fn pthread_equal(p1: pthread_t, p2: pthread_t) -> c_int; - - pub fn pthread_mutexattr_setprotocol(a: *mut pthread_mutexattr_t, protocol: c_int) -> c_int; - - pub fn pthread_attr_setstack( - attr: *mut pthread_attr_t, - stack: *mut c_void, - size: size_t, - ) -> c_int; - - pub fn pthread_setaffinity_np(td: pthread_t, size: size_t, set: *const cpu_set_t) -> c_int; - - pub fn pthread_getaffinity_np(td: pthread_t, size: size_t, set: *mut cpu_set_t) -> c_int; - - // stdio.h - pub fn printf(fmt: *const c_char, ...) -> c_int; - - pub fn scanf(fmt: *const c_char, ...) -> c_int; - - pub fn snprintf(s: *mut c_char, n: size_t, fmt: *const c_char, ...) -> c_int; - - pub fn sprintf(s: *mut c_char, fmt: *const c_char, ...) -> c_int; - - pub fn vsnprintf(s: *mut c_char, n: size_t, fmt: *const c_char, ap: va_list) -> c_int; - - pub fn vsprintf(s: *mut c_char, fmt: *const c_char, ap: va_list) -> c_int; - - // Not available. - //pub fn pthread_setname_np(thread: pthread_t, name: *const c_char) -> c_int; - - pub fn abort() -> !; - - // Not available. - //pub fn prctl(op: c_int, ...) -> c_int; - - pub fn sched_getaffinity(pid: pid_t, cpusetsize: size_t, cpuset: *mut cpu_set_t) -> c_int; - - pub fn sched_setaffinity(pid: pid_t, cpusetsize: size_t, cpuset: *const cpu_set_t) -> c_int; - - // sysconf is currently only implemented as a stub. - pub fn sysconf(name: c_int) -> c_long; - - // mman.h - pub fn mmap( - addr: *mut c_void, - len: size_t, - prot: c_int, - flags: c_int, - fd: c_int, - offset: off_t, - ) -> *mut c_void; - pub fn munmap(addr: *mut c_void, len: size_t) -> c_int; - - // errno.h - pub fn __errno_location() -> *mut c_int; - - pub fn strerror(e: c_int) -> *mut c_char; - - // time.h - pub fn clock_gettime(clock_id: clockid_t, tp: *mut timespec) -> c_int; - - // unistd - pub fn getpid() -> pid_t; - - // time - pub fn gettimeofday(tv: *mut timeval, tz: *mut c_void) -> c_int; - - pub fn strftime( - restrict: *mut c_char, - sz: size_t, - _restrict: *const c_char, - __restrict: *const tm, - ) -> size_t; - - pub fn time(t: *mut time_t) -> time_t; - - // sem - pub fn sem_close(sem: *mut sem_t) -> c_int; - - pub fn sem_destroy(sem: *mut sem_t) -> c_int; - - pub fn sem_getvalue(sem: *mut sem_t, valp: *mut c_int) -> c_int; - - pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; - - pub fn sem_open(name: *const c_char, flags: c_int, ...) -> *mut sem_t; - - pub fn sem_post(sem: *mut sem_t) -> c_int; - - pub fn sem_unlink(name: *const c_char) -> c_int; - - pub fn sem_wait(sem: *mut sem_t) -> c_int; - - // locale - pub fn setlocale(cat: c_int, name: *const c_char) -> *mut c_char; - - pub fn strcoll(l: *const c_char, r: *const c_char) -> c_int; - - pub fn strxfrm(dest: *mut c_char, src: *const c_char, n: size_t) -> size_t; - - pub fn strtod(s: *const c_char, p: *mut *mut c_char) -> c_double; - - // multibyte - pub fn mbrtowc(wc: *mut wchar_t, src: *const c_char, n: size_t, st: *mut mbstate_t) -> size_t; - - pub fn wcrtomb(s: *mut c_char, wc: wchar_t, st: *mut mbstate_t) -> size_t; - - pub fn wctob(c: wint_t) -> c_int; - - // prng - pub fn srandom(seed: c_uint); - - pub fn initstate(seed: c_uint, state: *mut c_char, size: size_t) -> *mut c_char; - - pub fn setstate(state: *mut c_char) -> *mut c_char; - - pub fn random() -> c_long; - - // string - pub fn strchr(s: *const c_char, c: c_int) -> *mut c_char; - - pub fn strlen(cs: *const c_char) -> size_t; - - pub fn strcmp(l: *const c_char, r: *const c_char) -> c_int; - - pub fn strcpy(dest: *mut c_char, src: *const c_char) -> *mut c_char; - - pub fn strncmp(_l: *const c_char, r: *const c_char, n: size_t) -> c_int; - - pub fn strncpy(dest: *mut c_char, src: *const c_char, n: size_t) -> *mut c_char; - - pub fn strnlen(cs: *const c_char, n: size_t) -> size_t; - - pub fn strrchr(s: *const c_char, c: c_int) -> *mut c_char; - - pub fn strstr(h: *const c_char, n: *const c_char) -> *mut c_char; - - pub fn wcschr(s: *const wchar_t, c: wchar_t) -> *mut wchar_t; - - pub fn wcslen(s: *const wchar_t) -> size_t; - - // ctype - pub fn isalpha(c: c_int) -> c_int; - - pub fn isascii(c: c_int) -> c_int; - - pub fn isdigit(c: c_int) -> c_int; - - pub fn islower(c: c_int) -> c_int; - - pub fn isprint(c: c_int) -> c_int; - - pub fn isspace(c: c_int) -> c_int; - - pub fn iswctype(wc: wint_t, ttype: wctype_t) -> c_int; - - pub fn iswdigit(wc: wint_t) -> c_int; - - pub fn iswlower(wc: wint_t) -> c_int; - - pub fn iswspace(wc: wint_t) -> c_int; - - pub fn iswupper(wc: wint_t) -> c_int; - - pub fn towupper(wc: wint_t) -> wint_t; - - pub fn towlower(wc: wint_t) -> wint_t; - - // cmath - pub fn atan(x: c_double) -> c_double; - - pub fn ceil(x: c_double) -> c_double; - - pub fn ceilf(x: c_float) -> c_float; - - pub fn exp(x: c_double) -> c_double; - - pub fn fabs(x: c_double) -> c_double; - - pub fn floor(x: c_double) -> c_double; - - pub fn frexp(x: c_double, e: *mut c_int) -> c_double; - - pub fn log(x: c_double) -> c_double; - - pub fn log2(x: c_double) -> c_double; - - pub fn pow(x: c_double, y: c_double) -> c_double; - - pub fn roundf(x: c_float) -> c_float; - - pub fn scalbn(x: c_double, n: c_int) -> c_double; - - pub fn sqrt(x: c_double) -> c_double; - - // stdlib - pub fn abs(x: c_int) -> c_int; - - pub fn atof(s: *const c_char) -> c_double; - - pub fn atoi(s: *const c_char) -> c_int; - - pub fn atol(s: *const c_char) -> c_long; - - pub fn atoll(s: *const c_char) -> c_longlong; - - pub fn bsearch( - key: *const c_void, - base: *const c_void, - nel: size_t, - width: size_t, - cmp: cmpfunc, - ) -> *mut c_void; - - pub fn div(num: c_int, den: c_int) -> div_t; - - pub fn ecvt(x: c_double, n: c_int, dp: *mut c_int, sign: *mut c_int) -> *mut c_char; - - pub fn imaxabs(a: intmax_t) -> intmax_t; - - pub fn llabs(a: c_longlong) -> c_longlong; - - pub fn qsort(base: *mut c_void, nel: size_t, width: size_t, cmp: cmpfunc); - - pub fn strtoul(s: *const c_char, p: *mut *mut c_char, base: c_int) -> c_ulong; - - pub fn strtol(s: *const c_char, p: *mut *mut c_char, base: c_int) -> c_long; - - pub fn wcstod(s: *const wchar_t, p: *mut *mut wchar_t) -> c_double; -} - -pub fn errno() -> c_int { - unsafe { *__errno_location() } -} - -pub fn CPU_COUNT_S(size: usize, cpuset: &cpu_set_t) -> c_int { - let mut s: u32 = 0; - let size_of_mask = size_of_val(&cpuset.bits[0]); - - for i in cpuset.bits[..(size / size_of_mask)].iter() { - s += i.count_ones(); - } - s as c_int -} - -pub fn CPU_COUNT(cpuset: &cpu_set_t) -> c_int { - CPU_COUNT_S(size_of::(), cpuset) -} diff --git a/vendor/libc/src/trusty.rs b/vendor/libc/src/trusty.rs deleted file mode 100644 index 7441aade0631eb..00000000000000 --- a/vendor/libc/src/trusty.rs +++ /dev/null @@ -1,72 +0,0 @@ -use crate::prelude::*; -pub type size_t = usize; -pub type ssize_t = isize; - -pub type off_t = i64; - -pub type c_uint8_t = u8; -pub type c_uint16_t = u16; -pub type c_uint32_t = u32; -pub type c_uint64_t = u64; - -pub type c_int8_t = i8; -pub type c_int16_t = i16; -pub type c_int32_t = i32; -pub type c_int64_t = i64; - -pub type intptr_t = isize; -pub type uintptr_t = usize; - -pub type time_t = c_long; - -pub type clockid_t = c_int; - -s! { - pub struct iovec { - pub iov_base: *mut c_void, - pub iov_len: size_t, - } - - pub struct timespec { - pub tv_sec: time_t, - pub tv_nsec: c_long, - } -} - -pub const PROT_READ: i32 = 1; -pub const PROT_WRITE: i32 = 2; - -// Trusty only supports `CLOCK_BOOTTIME`. -pub const CLOCK_BOOTTIME: clockid_t = 7; - -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; - -pub const AT_PAGESZ: c_ulong = 6; - -pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; - -extern "C" { - pub fn calloc(nobj: size_t, size: size_t) -> *mut c_void; - pub fn malloc(size: size_t) -> *mut c_void; - pub fn realloc(p: *mut c_void, size: size_t) -> *mut c_void; - pub fn free(p: *mut c_void); - pub fn memalign(align: size_t, size: size_t) -> *mut c_void; - pub fn posix_memalign(memptr: *mut *mut c_void, align: size_t, size: size_t) -> c_int; - pub fn write(fd: c_int, buf: *const c_void, count: size_t) -> ssize_t; - pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - pub fn close(fd: c_int) -> c_int; - pub fn strlen(cs: *const c_char) -> size_t; - pub fn getauxval(type_: c_ulong) -> c_ulong; - pub fn mmap( - addr: *mut c_void, - len: size_t, - prot: c_int, - flags: c_int, - fd: c_int, - offset: off_t, - ) -> *mut c_void; - pub fn munmap(addr: *mut c_void, len: size_t) -> c_int; - pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn nanosleep(rqtp: *const crate::timespec, rmtp: *mut crate::timespec) -> c_int; -} diff --git a/vendor/libc/src/types.rs b/vendor/libc/src/types.rs deleted file mode 100644 index 7d49a425d59ead..00000000000000 --- a/vendor/libc/src/types.rs +++ /dev/null @@ -1,39 +0,0 @@ -//! Platform-agnostic support types. - -use core::mem::MaybeUninit; - -use crate::prelude::*; - -/// A transparent wrapper over `MaybeUninit` to represent uninitialized padding -/// while providing `Default`. -// This is restricted to `Copy` types since that's a loose indicator that zeros is actually -// a valid bitpattern. There is no technical reason this is required, though, so it could be -// lifted in the future if it becomes a problem. -#[allow(unused)] -#[repr(transparent)] -#[derive(Clone, Copy)] -pub(crate) struct Padding(MaybeUninit); - -impl Default for Padding { - fn default() -> Self { - Self(MaybeUninit::zeroed()) - } -} - -impl fmt::Debug for Padding { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Taken frmo `MaybeUninit`'s debug implementation - // NB: there is no `.pad_fmt` so we can't use a simpler `format_args!("Padding<{..}>"). - let full_name = core::any::type_name::(); - let prefix_len = full_name.find("Padding").unwrap(); - f.pad(&full_name[prefix_len..]) - } -} - -/// The default repr type used for C style enums in Rust. -#[cfg(target_env = "msvc")] -#[allow(unused)] -pub(crate) type CEnumRepr = c_int; -#[cfg(not(target_env = "msvc"))] -#[allow(unused)] -pub(crate) type CEnumRepr = c_uint; diff --git a/vendor/libc/src/unix/aix/mod.rs b/vendor/libc/src/unix/aix/mod.rs deleted file mode 100644 index b6d1af52d133cb..00000000000000 --- a/vendor/libc/src/unix/aix/mod.rs +++ /dev/null @@ -1,3382 +0,0 @@ -use crate::prelude::*; -use crate::{in_addr_t, in_port_t}; - -pub type caddr_t = *mut c_char; -pub type clockid_t = c_longlong; -pub type blkcnt_t = c_long; -pub type clock_t = c_int; -pub type daddr_t = c_long; -pub type dev_t = c_ulong; -pub type fpos64_t = c_longlong; -pub type fsblkcnt_t = c_ulong; -pub type fsfilcnt_t = c_ulong; -pub type ino_t = c_ulong; -pub type key_t = c_int; -pub type mode_t = c_uint; -pub type nlink_t = c_short; -pub type rlim_t = c_ulong; -pub type speed_t = c_uint; -pub type tcflag_t = c_uint; -pub type time_t = c_long; -pub type time64_t = i64; -pub type timer_t = c_long; -pub type wchar_t = c_uint; -pub type nfds_t = c_uint; -pub type projid_t = c_int; -pub type id_t = c_uint; -pub type blksize64_t = c_ulonglong; -pub type blkcnt64_t = c_ulonglong; -pub type suseconds_t = c_int; -pub type useconds_t = c_uint; -pub type off_t = c_long; -pub type offset_t = c_longlong; -pub type off64_t = c_longlong; -pub type idtype_t = c_uint; - -pub type socklen_t = c_uint; -pub type sa_family_t = c_uchar; - -pub type signal_t = c_int; -pub type pthread_t = c_uint; -pub type pthread_key_t = c_uint; -pub type thread_t = pthread_t; -pub type blksize_t = c_long; -pub type nl_item = c_int; -pub type mqd_t = c_int; -pub type shmatt_t = c_ulong; -pub type regoff_t = c_long; -pub type rlim64_t = c_ulonglong; - -pub type sem_t = c_int; -pub type pollset_t = c_int; -pub type sctp_assoc_t = c_uint; - -pub type pthread_rwlockattr_t = *mut c_void; -pub type pthread_condattr_t = *mut c_void; -pub type pthread_mutexattr_t = *mut c_void; -pub type pthread_attr_t = *mut c_void; -pub type pthread_barrierattr_t = *mut c_void; -pub type posix_spawn_file_actions_t = *mut c_char; -pub type iconv_t = *mut c_void; - -e! { - #[repr(u32)] - pub enum uio_rw { - UIO_READ = 0, - UIO_WRITE, - UIO_READ_NO_MOVE, - UIO_WRITE_NO_MOVE, - UIO_PWRITE, - } - #[repr(u32)] - pub enum ACTION { - FIND = 0, - ENTER, - } -} - -s! { - pub struct fsid_t { - pub val: [c_uint; 2], - } - - pub struct fsid64_t { - pub val: [crate::uint64_t; 2], - } - - pub struct timezone { - pub tz_minuteswest: c_int, - pub tz_dsttime: c_int, - } - - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct dirent { - pub d_offset: c_ulong, - pub d_ino: crate::ino_t, - pub d_reclen: c_ushort, - pub d_namlen: c_ushort, - pub d_name: [c_char; 256], - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_cc: [crate::cc_t; crate::NCCS], - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_sysid: c_uint, - pub l_pid: crate::pid_t, - pub l_vfs: c_int, - pub l_start: off64_t, - pub l_len: off64_t, - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: c_int, - pub msg_control: *mut c_void, - pub msg_controllen: socklen_t, - pub msg_flags: c_int, - } - - pub struct statvfs64 { - pub f_bsize: crate::blksize64_t, - pub f_frsize: crate::blksize64_t, - pub f_blocks: crate::blkcnt64_t, - pub f_bfree: crate::blkcnt64_t, - pub f_bavail: crate::blkcnt64_t, - pub f_files: crate::blkcnt64_t, - pub f_ffree: crate::blkcnt64_t, - pub f_favail: crate::blkcnt64_t, - pub f_fsid: fsid64_t, - pub f_basetype: [c_char; 16], - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - pub f_fstr: [c_char; 32], - pub f_filler: [c_ulong; 16], - } - - pub struct lconv { - pub decimal_point: *mut c_char, - pub thousands_sep: *mut c_char, - pub grouping: *mut c_char, - pub int_curr_symbol: *mut c_char, - pub currency_symbol: *mut c_char, - pub mon_decimal_point: *mut c_char, - pub mon_thousands_sep: *mut c_char, - pub mon_grouping: *mut c_char, - pub positive_sign: *mut c_char, - pub negative_sign: *mut c_char, - pub int_frac_digits: c_char, - pub frac_digits: c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub p_sign_posn: c_char, - pub n_sign_posn: c_char, - pub left_parenthesis: *mut c_char, - pub right_parenthesis: *mut c_char, - pub int_p_cs_precedes: c_char, - pub int_p_sep_by_space: c_char, - pub int_n_cs_precedes: c_char, - pub int_n_sep_by_space: c_char, - pub int_p_sign_posn: c_char, - pub int_n_sign_posn: c_char, - } - - pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - } - - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - pub ai_addrlen: c_ulong, - pub ai_canonname: *mut c_char, - pub ai_addr: *mut crate::sockaddr, - pub ai_next: *mut addrinfo, - pub ai_eflags: c_int, - } - - pub struct in_addr { - pub s_addr: in_addr_t, - } - - pub struct ip_mreq_source { - pub imr_multiaddr: in_addr, - pub imr_sourceaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct sockaddr { - pub sa_len: c_uchar, - pub sa_family: sa_family_t, - pub sa_data: [c_char; 14], - } - - pub struct sockaddr_dl { - pub sdl_len: c_uchar, - pub sdl_family: c_uchar, - pub sdl_index: c_ushort, - pub sdl_type: c_uchar, - pub sdl_nlen: c_uchar, - pub sdl_alen: c_uchar, - pub sdl_slen: c_uchar, - pub sdl_data: [c_char; 120], - } - - pub struct sockaddr_in { - pub sin_len: c_uchar, - pub sin_family: sa_family_t, - pub sin_port: in_port_t, - pub sin_addr: in_addr, - pub sin_zero: [c_uchar; 8], - } - - pub struct sockaddr_in6 { - pub sin6_len: c_uchar, - pub sin6_family: c_uchar, - pub sin6_port: crate::uint16_t, - pub sin6_flowinfo: crate::uint32_t, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: crate::uint32_t, - } - - pub struct sockaddr_storage { - pub __ss_len: c_uchar, - pub ss_family: sa_family_t, - __ss_pad1: [c_char; 6], - __ss_align: crate::int64_t, - __ss_pad2: [c_char; 1265], - } - - pub struct sockaddr_un { - pub sun_len: c_uchar, - pub sun_family: sa_family_t, - pub sun_path: [c_char; 1023], - } - - pub struct st_timespec { - pub tv_sec: crate::time_t, - pub tv_nsec: c_int, - } - - pub struct statfs64 { - pub f_version: c_int, - pub f_type: c_int, - pub f_bsize: blksize64_t, - pub f_blocks: blkcnt64_t, - pub f_bfree: blkcnt64_t, - pub f_bavail: blkcnt64_t, - pub f_files: crate::uint64_t, - pub f_ffree: crate::uint64_t, - pub f_fsid: fsid64_t, - pub f_vfstype: c_int, - pub f_fsize: blksize64_t, - pub f_vfsnumber: c_int, - pub f_vfsoff: c_int, - pub f_vfslen: c_int, - pub f_vfsvers: c_int, - pub f_fname: [c_char; 32], - pub f_fpack: [c_char; 32], - pub f_name_max: c_int, - } - - pub struct passwd { - pub pw_name: *mut c_char, - pub pw_passwd: *mut c_char, - pub pw_uid: crate::uid_t, - pub pw_gid: crate::gid_t, - pub pw_gecos: *mut c_char, - pub pw_dir: *mut c_char, - pub pw_shell: *mut c_char, - } - - pub struct utsname { - pub sysname: [c_char; 32], - pub nodename: [c_char; 32], - pub release: [c_char; 32], - pub version: [c_char; 32], - pub machine: [c_char; 32], - } - - pub struct xutsname { - pub nid: c_uint, - pub reserved: c_int, - pub longnid: c_ulonglong, - } - - pub struct cmsghdr { - pub cmsg_len: crate::socklen_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct sigevent { - pub sigev_value: crate::sigval, - pub sigev_signo: c_int, - pub sigev_notify: c_int, - pub sigev_notify_function: extern "C" fn(val: crate::sigval), - pub sigev_notify_attributes: *mut pthread_attr_t, - } - - pub struct osigevent { - pub sevt_value: *mut c_void, - pub sevt_signo: signal_t, - } - - pub struct poll_ctl { - pub cmd: c_short, - pub events: c_short, - pub fd: c_int, - } - - pub struct sf_parms { - pub header_data: *mut c_void, - pub header_length: c_uint, - pub file_descriptor: c_int, - pub file_size: crate::uint64_t, - pub file_offset: crate::uint64_t, - pub file_bytes: crate::int64_t, - pub trailer_data: *mut c_void, - pub trailer_length: c_uint, - pub bytes_sent: crate::uint64_t, - } - - pub struct mmsghdr { - pub msg_hdr: crate::msghdr, - pub msg_len: c_uint, - } - - pub struct sched_param { - pub sched_priority: c_int, - pub sched_policy: c_int, - pub sched_reserved: [c_int; 6], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - pub __pad: [c_int; 4], - } - - pub struct posix_spawnattr_t { - pub posix_attr_flags: c_short, - pub posix_attr_pgroup: crate::pid_t, - pub posix_attr_sigmask: crate::sigset_t, - pub posix_attr_sigdefault: crate::sigset_t, - pub posix_attr_schedpolicy: c_int, - pub posix_attr_schedparam: sched_param, - } - - pub struct glob_t { - pub gl_pathc: size_t, - pub gl_pathv: *mut *mut c_char, - pub gl_offs: size_t, - pub gl_padr: *mut c_void, - pub gl_ptx: *mut c_void, - } - - pub struct mallinfo { - pub arena: c_ulong, - pub ordblks: c_int, - pub smblks: c_int, - pub hblks: c_int, - pub hblkhd: c_int, - pub usmblks: c_ulong, - pub fsmblks: c_ulong, - pub uordblks: c_ulong, - pub fordblks: c_ulong, - pub keepcost: c_int, - } - - pub struct exit_status { - pub e_termination: c_short, - pub e_exit: c_short, - } - - pub struct utmp { - pub ut_user: [c_char; 256], - pub ut_id: [c_char; 14], - pub ut_line: [c_char; 64], - pub ut_pid: crate::pid_t, - pub ut_type: c_short, - pub ut_time: time64_t, - pub ut_exit: exit_status, - pub ut_host: [c_char; 256], - pub __dbl_word_pad: c_int, - pub __reservedA: [c_int; 2], - pub __reservedV: [c_int; 6], - } - - pub struct regmatch_t { - pub rm_so: regoff_t, - pub rm_eo: regoff_t, - } - - pub struct regex_t { - pub re_nsub: size_t, - pub re_comp: *mut c_void, - pub re_cflags: c_int, - pub re_erroff: size_t, - pub re_len: size_t, - pub re_ucoll: [crate::wchar_t; 2], - pub re_lsub: [*mut c_void; 24], - pub re_esub: [*mut c_void; 24], - pub re_map: *mut c_uchar, - pub __maxsub: c_int, - pub __unused: [*mut c_void; 34], - } - - pub struct rlimit64 { - pub rlim_cur: rlim64_t, - pub rlim_max: rlim64_t, - } - - pub struct shmid_ds { - pub shm_perm: ipc_perm, - pub shm_segsz: size_t, - pub shm_lpid: crate::pid_t, - pub shm_cpid: crate::pid_t, - pub shm_nattch: shmatt_t, - pub shm_cnattch: shmatt_t, - pub shm_atime: time_t, - pub shm_dtime: time_t, - pub shm_ctime: time_t, - pub shm_handle: crate::uint32_t, - pub shm_extshm: c_int, - pub shm_pagesize: crate::int64_t, - pub shm_lba: crate::uint64_t, - pub shm_reserved0: crate::int64_t, - pub shm_reserved1: crate::int64_t, - } - - pub struct stat64 { - pub st_dev: dev_t, - pub st_ino: ino_t, - pub st_mode: mode_t, - pub st_nlink: nlink_t, - pub st_flag: c_ushort, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: dev_t, - pub st_ssize: c_int, - pub st_atim: crate::timespec, - pub st_mtim: crate::timespec, - pub st_ctim: crate::timespec, - pub st_blksize: blksize_t, - pub st_blocks: blkcnt_t, - pub st_vfstype: c_int, - pub st_vfs: c_uint, - pub st_type: c_uint, - pub st_gen: c_uint, - pub st_reserved: [c_uint; 10], - pub st_size: off64_t, - } - - pub struct mntent { - pub mnt_fsname: *mut c_char, - pub mnt_dir: *mut c_char, - pub mnt_type: *mut c_char, - pub mnt_opts: *mut c_char, - pub mnt_freq: c_int, - pub mnt_passno: c_int, - } - - pub struct ipc_perm { - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: mode_t, - pub seq: c_ushort, - pub __reserved: c_ushort, - pub key: key_t, - } - - pub struct entry { - pub key: *mut c_char, - pub data: *mut c_void, - } - - pub struct mq_attr { - pub mq_flags: c_long, - pub mq_maxmsg: c_long, - pub mq_msgsize: c_long, - pub mq_curmsgs: c_long, - } - - pub struct sembuf { - pub sem_num: c_ushort, - pub sem_op: c_short, - pub sem_flg: c_short, - } - - pub struct if_nameindex { - pub if_index: c_uint, - pub if_name: *mut c_char, - } - - pub struct itimerspec { - pub it_interval: crate::timespec, - pub it_value: crate::timespec, - } - - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, // FIXME(union): this field is actually a union - pub sa_mask: sigset_t, - pub sa_flags: c_int, - } -} - -s_no_extra_traits! { - pub union __poll_ctl_ext_u { - pub addr: *mut c_void, - pub data32: u32, - pub data: u64, - } - - pub struct poll_ctl_ext { - pub version: u8, - pub command: u8, - pub events: c_short, - pub fd: c_int, - pub u: __poll_ctl_ext_u, - pub reserved64: [u64; 6], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for __poll_ctl_ext_u { - fn eq(&self, other: &__poll_ctl_ext_u) -> bool { - unsafe { - self.addr == other.addr - && self.data32 == other.data32 - && self.data == other.data - } - } - } - impl Eq for __poll_ctl_ext_u {} - impl hash::Hash for __poll_ctl_ext_u { - fn hash(&self, state: &mut H) { - unsafe { - self.addr.hash(state); - self.data32.hash(state); - self.data.hash(state); - } - } - } - - impl PartialEq for poll_ctl_ext { - fn eq(&self, other: &poll_ctl_ext) -> bool { - self.version == other.version - && self.command == other.command - && self.events == other.events - && self.fd == other.fd - && self.reserved64 == other.reserved64 - && self.u == other.u - } - } - impl Eq for poll_ctl_ext {} - impl hash::Hash for poll_ctl_ext { - fn hash(&self, state: &mut H) { - self.version.hash(state); - self.command.hash(state); - self.events.hash(state); - self.fd.hash(state); - self.u.hash(state); - self.reserved64.hash(state); - } - } - } -} - -// dlfcn.h -pub const RTLD_LAZY: c_int = 0x4; -pub const RTLD_NOW: c_int = 0x2; -pub const RTLD_GLOBAL: c_int = 0x10000; -pub const RTLD_LOCAL: c_int = 0x80000; -pub const RTLD_MEMBER: c_int = 0x40000; -pub const RTLD_NOAUTODEFER: c_int = 0x20000; -pub const RTLD_DEFAULT: *mut c_void = -1isize as *mut c_void; -pub const RTLD_MYSELF: *mut c_void = -2isize as *mut c_void; -pub const RTLD_NEXT: *mut c_void = -3isize as *mut c_void; - -// fcntl.h -pub const O_RDONLY: c_int = 0x0; -pub const O_WRONLY: c_int = 0x1; -pub const O_RDWR: c_int = 0x2; -pub const O_NDELAY: c_int = 0x8000; -pub const O_APPEND: c_int = 0x8; -pub const O_DSYNC: c_int = 0x400000; -pub const O_CREAT: c_int = 0x100; -pub const O_EXCL: c_int = 0x400; -pub const O_NOCTTY: c_int = 0x800; -pub const O_TRUNC: c_int = 0x200; -pub const O_NOFOLLOW: c_int = 0x1000000; -pub const O_DIRECTORY: c_int = 0x80000; -pub const O_SEARCH: c_int = 0x20; -pub const O_EXEC: c_int = 0x20; -pub const O_CLOEXEC: c_int = 0x800000; -pub const O_ACCMODE: c_int = O_RDONLY | O_WRONLY | O_RDWR | O_EXEC | O_SEARCH; -pub const O_DIRECT: c_int = 0x8000000; -pub const O_TTY_INIT: c_int = 0; -pub const O_RSYNC: c_int = 0x200000; -pub const O_LARGEFILE: c_int = 0x4000000; -pub const F_DUPFD: c_int = 0; -pub const F_DUPFD_CLOEXEC: c_int = 16; -pub const F_GETFD: c_int = 1; -pub const F_SETFD: c_int = 2; -pub const F_GETFL: c_int = 3; -pub const F_SETFL: c_int = 4; -pub const F_GETLK: c_int = F_GETLK64; -pub const F_SETLK: c_int = F_SETLK64; -pub const F_SETLKW: c_int = F_SETLKW64; -pub const F_GETOWN: c_int = 8; -pub const F_SETOWN: c_int = 9; -pub const F_CLOSEM: c_int = 10; -pub const F_GETLK64: c_int = 11; -pub const F_SETLK64: c_int = 12; -pub const F_SETLKW64: c_int = 13; -pub const F_DUP2FD: c_int = 14; -pub const F_TSTLK: c_int = 15; -pub const AT_FDCWD: c_int = -2; -pub const AT_SYMLINK_NOFOLLOW: c_int = 1; -pub const AT_SYMLINK_FOLLOW: c_int = 2; -pub const AT_REMOVEDIR: c_int = 1; -pub const AT_EACCESS: c_int = 1; -pub const O_SYNC: c_int = 16; -pub const O_NONBLOCK: c_int = 4; -pub const FASYNC: c_int = 0x20000; -pub const POSIX_FADV_NORMAL: c_int = 1; -pub const POSIX_FADV_SEQUENTIAL: c_int = 2; -pub const POSIX_FADV_RANDOM: c_int = 3; -pub const POSIX_FADV_WILLNEED: c_int = 4; -pub const POSIX_FADV_DONTNEED: c_int = 5; -pub const POSIX_FADV_NOREUSE: c_int = 6; - -// glob.h -pub const GLOB_APPEND: c_int = 0x1; -pub const GLOB_DOOFFS: c_int = 0x2; -pub const GLOB_ERR: c_int = 0x4; -pub const GLOB_MARK: c_int = 0x8; -pub const GLOB_NOCHECK: c_int = 0x10; -pub const GLOB_NOSORT: c_int = 0x20; -pub const GLOB_NOESCAPE: c_int = 0x80; -pub const GLOB_NOSPACE: c_int = 0x2000; -pub const GLOB_ABORTED: c_int = 0x1000; -pub const GLOB_NOMATCH: c_int = 0x4000; -pub const GLOB_NOSYS: c_int = 0x8000; - -// langinfo.h -pub const DAY_1: crate::nl_item = 13; -pub const DAY_2: crate::nl_item = 14; -pub const DAY_3: crate::nl_item = 15; -pub const DAY_4: crate::nl_item = 16; -pub const DAY_5: crate::nl_item = 17; -pub const DAY_6: crate::nl_item = 18; -pub const DAY_7: crate::nl_item = 19; -pub const ABDAY_1: crate::nl_item = 6; -pub const ABDAY_2: crate::nl_item = 7; -pub const ABDAY_3: crate::nl_item = 8; -pub const ABDAY_4: crate::nl_item = 9; -pub const ABDAY_5: crate::nl_item = 10; -pub const ABDAY_6: crate::nl_item = 11; -pub const ABDAY_7: crate::nl_item = 12; -pub const MON_1: crate::nl_item = 32; -pub const MON_2: crate::nl_item = 33; -pub const MON_3: crate::nl_item = 34; -pub const MON_4: crate::nl_item = 35; -pub const MON_5: crate::nl_item = 36; -pub const MON_6: crate::nl_item = 37; -pub const MON_7: crate::nl_item = 38; -pub const MON_8: crate::nl_item = 39; -pub const MON_9: crate::nl_item = 40; -pub const MON_10: crate::nl_item = 41; -pub const MON_11: crate::nl_item = 42; -pub const MON_12: crate::nl_item = 43; -pub const ABMON_1: crate::nl_item = 20; -pub const ABMON_2: crate::nl_item = 21; -pub const ABMON_3: crate::nl_item = 22; -pub const ABMON_4: crate::nl_item = 23; -pub const ABMON_5: crate::nl_item = 24; -pub const ABMON_6: crate::nl_item = 25; -pub const ABMON_7: crate::nl_item = 26; -pub const ABMON_8: crate::nl_item = 27; -pub const ABMON_9: crate::nl_item = 28; -pub const ABMON_10: crate::nl_item = 29; -pub const ABMON_11: crate::nl_item = 30; -pub const ABMON_12: crate::nl_item = 31; -pub const RADIXCHAR: crate::nl_item = 44; -pub const THOUSEP: crate::nl_item = 45; -pub const YESSTR: crate::nl_item = 46; -pub const NOSTR: crate::nl_item = 47; -pub const CRNCYSTR: crate::nl_item = 48; -pub const D_T_FMT: crate::nl_item = 1; -pub const D_FMT: crate::nl_item = 2; -pub const T_FMT: crate::nl_item = 3; -pub const AM_STR: crate::nl_item = 4; -pub const PM_STR: crate::nl_item = 5; -pub const CODESET: crate::nl_item = 49; -pub const T_FMT_AMPM: crate::nl_item = 55; -pub const ERA: crate::nl_item = 56; -pub const ERA_D_FMT: crate::nl_item = 57; -pub const ERA_D_T_FMT: crate::nl_item = 58; -pub const ERA_T_FMT: crate::nl_item = 59; -pub const ALT_DIGITS: crate::nl_item = 60; -pub const YESEXPR: crate::nl_item = 61; -pub const NOEXPR: crate::nl_item = 62; - -// locale.h -pub const LC_GLOBAL_LOCALE: crate::locale_t = -1isize as crate::locale_t; -pub const LC_COLLATE: c_int = 0; -pub const LC_CTYPE: c_int = 1; -pub const LC_MONETARY: c_int = 2; -pub const LC_NUMERIC: c_int = 3; -pub const LC_TIME: c_int = 4; -pub const LC_MESSAGES: c_int = 5; -pub const LC_ALL: c_int = -1; -pub const LC_COLLATE_MASK: c_int = 1; -pub const LC_CTYPE_MASK: c_int = 2; -pub const LC_MESSAGES_MASK: c_int = 4; -pub const LC_MONETARY_MASK: c_int = 8; -pub const LC_NUMERIC_MASK: c_int = 16; -pub const LC_TIME_MASK: c_int = 32; -pub const LC_ALL_MASK: c_int = LC_COLLATE_MASK - | LC_CTYPE_MASK - | LC_MESSAGES_MASK - | LC_MONETARY_MASK - | LC_NUMERIC_MASK - | LC_TIME_MASK; - -// netdb.h -pub const NI_MAXHOST: crate::socklen_t = 1025; -pub const NI_MAXSERV: crate::socklen_t = 32; -pub const NI_NOFQDN: crate::socklen_t = 0x1; -pub const NI_NUMERICHOST: crate::socklen_t = 0x2; -pub const NI_NAMEREQD: crate::socklen_t = 0x4; -pub const NI_NUMERICSERV: crate::socklen_t = 0x8; -pub const NI_DGRAM: crate::socklen_t = 0x10; -pub const NI_NUMERICSCOPE: crate::socklen_t = 0x40; -pub const EAI_AGAIN: c_int = 2; -pub const EAI_BADFLAGS: c_int = 3; -pub const EAI_FAIL: c_int = 4; -pub const EAI_FAMILY: c_int = 5; -pub const EAI_MEMORY: c_int = 6; -pub const EAI_NODATA: c_int = 7; -pub const EAI_NONAME: c_int = 8; -pub const EAI_SERVICE: c_int = 9; -pub const EAI_SOCKTYPE: c_int = 10; -pub const EAI_SYSTEM: c_int = 11; -pub const EAI_OVERFLOW: c_int = 13; -pub const AI_CANONNAME: c_int = 0x01; -pub const AI_PASSIVE: c_int = 0x02; -pub const AI_NUMERICHOST: c_int = 0x04; -pub const AI_ADDRCONFIG: c_int = 0x08; -pub const AI_V4MAPPED: c_int = 0x10; -pub const AI_ALL: c_int = 0x20; -pub const AI_NUMERICSERV: c_int = 0x40; -pub const AI_EXTFLAGS: c_int = 0x80; -pub const AI_DEFAULT: c_int = AI_V4MAPPED | AI_ADDRCONFIG; -pub const IPV6_ADDRFORM: c_int = 22; -pub const IPV6_ADDR_PREFERENCES: c_int = 74; -pub const IPV6_CHECKSUM: c_int = 39; -pub const IPV6_DONTFRAG: c_int = 45; -pub const IPV6_DSTOPTS: c_int = 54; -pub const IPV6_FLOWINFO_FLOWLABEL: c_int = 0x00ffffff; -pub const IPV6_FLOWINFO_PRIORITY: c_int = 0x0f000000; -pub const IPV6_FLOWINFO_PRIFLOW: c_int = 0x0fffffff; -pub const IPV6_FLOWINFO_SRFLAG: c_int = 0x10000000; -pub const IPV6_FLOWINFO_VERSION: c_int = 0xf0000000; -pub const IPV6_HOPLIMIT: c_int = 40; -pub const IPV6_HOPOPTS: c_int = 52; -pub const IPV6_NEXTHOP: c_int = 48; -pub const IPV6_PATHMTU: c_int = 46; -pub const IPV6_PKTINFO: c_int = 33; -pub const IPV6_PREFER_SRC_CGA: c_int = 16; -pub const IPV6_PREFER_SRC_COA: c_int = 2; -pub const IPV6_PREFER_SRC_HOME: c_int = 1; -pub const IPV6_PREFER_SRC_NONCGA: c_int = 32; -pub const IPV6_PREFER_SRC_PUBLIC: c_int = 4; -pub const IPV6_PREFER_SRC_TMP: c_int = 8; -pub const IPV6_RECVDSTOPTS: c_int = 56; -pub const IPV6_RECVHOPLIMIT: c_int = 41; -pub const IPV6_RECVHOPOPTS: c_int = 53; -pub const IPV6_RECVPATHMTU: c_int = 47; -pub const IPV6_RECVRTHDR: c_int = 51; -pub const IPV6_RECVTCLASS: c_int = 42; -pub const IPV6_RTHDR: c_int = 50; -pub const IPV6_RTHDRDSTOPTS: c_int = 55; -pub const IPV6_TCLASS: c_int = 43; - -// net/bpf.h -pub const DLT_NULL: c_int = 0x18; -pub const DLT_EN10MB: c_int = 0x6; -pub const DLT_EN3MB: c_int = 0x1a; -pub const DLT_AX25: c_int = 0x5; -pub const DLT_PRONET: c_int = 0xd; -pub const DLT_IEEE802: c_int = 0x7; -pub const DLT_ARCNET: c_int = 0x23; -pub const DLT_SLIP: c_int = 0x1c; -pub const DLT_PPP: c_int = 0x17; -pub const DLT_FDDI: c_int = 0xf; -pub const DLT_ATM: c_int = 0x25; -pub const DLT_IPOIB: c_int = 0xc7; -pub const BIOCSETF: c_int = 0x80104267; -pub const BIOCGRTIMEOUT: c_int = 0x4010426e; -pub const BIOCGBLEN: c_int = 0x40044266; -pub const BIOCSBLEN: c_int = 0xc0044266; -pub const BIOCFLUSH: c_int = 0x20004268; -pub const BIOCPROMISC: c_int = 0x20004269; -pub const BIOCGDLT: c_int = 0x4004426a; -pub const BIOCSRTIMEOUT: c_int = 0x8010426d; -pub const BIOCGSTATS: c_int = 0x4008426f; -pub const BIOCIMMEDIATE: c_int = 0x80044270; -pub const BIOCVERSION: c_int = 0x40044271; -pub const BIOCSDEVNO: c_int = 0x20004272; -pub const BIOCGETIF: c_int = 0x4020426b; -pub const BIOCSETIF: c_int = 0x8020426c; -pub const BPF_ABS: c_int = 32; -pub const BPF_ADD: c_int = 0; -pub const BPF_ALIGNMENT: c_ulong = 4; -pub const BPF_ALU: c_int = 4; -pub const BPF_AND: c_int = 80; -pub const BPF_B: c_int = 16; -pub const BPF_DIV: c_int = 48; -pub const BPF_H: c_int = 8; -pub const BPF_IMM: c_int = 0; -pub const BPF_IND: c_int = 64; -pub const BPF_JA: c_int = 0; -pub const BPF_JEQ: c_int = 16; -pub const BPF_JGE: c_int = 48; -pub const BPF_JGT: c_int = 32; -pub const BPF_JMP: c_int = 5; -pub const BPF_JSET: c_int = 64; -pub const BPF_K: c_int = 0; -pub const BPF_LD: c_int = 0; -pub const BPF_LDX: c_int = 1; -pub const BPF_LEN: c_int = 128; -pub const BPF_LSH: c_int = 96; -pub const BPF_MAXINSNS: c_int = 512; -pub const BPF_MEM: c_int = 96; -pub const BPF_MEMWORDS: c_int = 16; -pub const BPF_MISC: c_int = 7; -pub const BPF_MSH: c_int = 160; -pub const BPF_MUL: c_int = 32; -pub const BPF_NEG: c_int = 128; -pub const BPF_OR: c_int = 64; -pub const BPF_RET: c_int = 6; -pub const BPF_RSH: c_int = 112; -pub const BPF_ST: c_int = 2; -pub const BPF_STX: c_int = 3; -pub const BPF_SUB: c_int = 16; -pub const BPF_W: c_int = 0; -pub const BPF_X: c_int = 8; - -// net/if.h -pub const IFNET_SLOWHZ: c_int = 1; -pub const IFQ_MAXLEN: c_int = 50; -pub const IFF_UP: c_int = 0x1; -pub const IFF_BROADCAST: c_int = 0x2; -pub const IFF_DEBUG: c_int = 0x4; -pub const IFF_LOOPBACK: c_int = 0x8; -pub const IFF_POINTOPOINT: c_int = 0x10; -pub const IFF_NOTRAILERS: c_int = 0x20; -pub const IFF_RUNNING: c_int = 0x40; -pub const IFF_NOARP: c_int = 0x80; -pub const IFF_PROMISC: c_int = 0x100; -pub const IFF_ALLMULTI: c_int = 0x200; -pub const IFF_MULTICAST: c_int = 0x80000; -pub const IFF_LINK0: c_int = 0x100000; -pub const IFF_LINK1: c_int = 0x200000; -pub const IFF_LINK2: c_int = 0x400000; -pub const IFF_OACTIVE: c_int = 0x400; -pub const IFF_SIMPLEX: c_int = 0x800; - -// net/if_arp.h -pub const ARPHRD_ETHER: c_int = 1; -pub const ARPHRD_802_5: c_int = 6; -pub const ARPHRD_802_3: c_int = 6; -pub const ARPHRD_FDDI: c_int = 1; - -// net/route.h -pub const RTM_ADD: c_int = 0x1; -pub const RTM_DELETE: c_int = 0x2; -pub const RTM_CHANGE: c_int = 0x3; -pub const RTM_GET: c_int = 0x4; -pub const RTM_LOSING: c_int = 0x5; -pub const RTM_REDIRECT: c_int = 0x6; -pub const RTM_MISS: c_int = 0x7; -pub const RTM_LOCK: c_int = 0x8; -pub const RTM_OLDADD: c_int = 0x9; -pub const RTM_OLDDEL: c_int = 0xa; -pub const RTM_RESOLVE: c_int = 0xb; -pub const RTM_NEWADDR: c_int = 0xc; -pub const RTM_DELADDR: c_int = 0xd; -pub const RTM_IFINFO: c_int = 0xe; -pub const RTM_EXPIRE: c_int = 0xf; -pub const RTM_RTLOST: c_int = 0x10; -pub const RTM_GETNEXT: c_int = 0x11; -pub const RTM_SAMEADDR: c_int = 0x12; -pub const RTM_SET: c_int = 0x13; -pub const RTV_MTU: c_int = 0x1; -pub const RTV_HOPCOUNT: c_int = 0x2; -pub const RTV_EXPIRE: c_int = 0x4; -pub const RTV_RPIPE: c_int = 0x8; -pub const RTV_SPIPE: c_int = 0x10; -pub const RTV_SSTHRESH: c_int = 0x20; -pub const RTV_RTT: c_int = 0x40; -pub const RTV_RTTVAR: c_int = 0x80; -pub const RTA_DST: c_int = 0x1; -pub const RTA_GATEWAY: c_int = 0x2; -pub const RTA_NETMASK: c_int = 0x4; -pub const RTA_GENMASK: c_int = 0x8; -pub const RTA_IFP: c_int = 0x10; -pub const RTA_IFA: c_int = 0x20; -pub const RTA_AUTHOR: c_int = 0x40; -pub const RTA_BRD: c_int = 0x80; -pub const RTA_DOWNSTREAM: c_int = 0x100; -pub const RTAX_DST: c_int = 0; -pub const RTAX_GATEWAY: c_int = 1; -pub const RTAX_NETMASK: c_int = 2; -pub const RTAX_GENMASK: c_int = 3; -pub const RTAX_IFP: c_int = 4; -pub const RTAX_IFA: c_int = 5; -pub const RTAX_AUTHOR: c_int = 6; -pub const RTAX_BRD: c_int = 7; -pub const RTAX_MAX: c_int = 8; -pub const RTF_UP: c_int = 0x1; -pub const RTF_GATEWAY: c_int = 0x2; -pub const RTF_HOST: c_int = 0x4; -pub const RTF_REJECT: c_int = 0x8; -pub const RTF_DYNAMIC: c_int = 0x10; -pub const RTF_MODIFIED: c_int = 0x20; -pub const RTF_DONE: c_int = 0x40; -pub const RTF_MASK: c_int = 0x80; -pub const RTF_CLONING: c_int = 0x100; -pub const RTF_XRESOLVE: c_int = 0x200; -pub const RTF_LLINFO: c_int = 0x400; -pub const RTF_STATIC: c_int = 0x800; -pub const RTF_BLACKHOLE: c_int = 0x1000; -pub const RTF_BUL: c_int = 0x2000; -pub const RTF_PROTO2: c_int = 0x4000; -pub const RTF_PROTO1: c_int = 0x8000; -pub const RTF_CLONE: c_int = 0x10000; -pub const RTF_CLONED: c_int = 0x20000; -pub const RTF_PROTO3: c_int = 0x40000; -pub const RTF_BCE: c_int = 0x80000; -pub const RTF_PINNED: c_int = 0x100000; -pub const RTF_LOCAL: c_int = 0x200000; -pub const RTF_BROADCAST: c_int = 0x400000; -pub const RTF_MULTICAST: c_int = 0x800000; -pub const RTF_ACTIVE_DGD: c_int = 0x1000000; -pub const RTF_STOPSRCH: c_int = 0x2000000; -pub const RTF_FREE_IN_PROG: c_int = 0x4000000; -pub const RTF_PERMANENT6: c_int = 0x8000000; -pub const RTF_UNREACHABLE: c_int = 0x10000000; -pub const RTF_CACHED: c_int = 0x20000000; -pub const RTF_SMALLMTU: c_int = 0x40000; - -// netinet/in.h -pub const IPPROTO_HOPOPTS: c_int = 0; -pub const IPPROTO_IGMP: c_int = 2; -pub const IPPROTO_GGP: c_int = 3; -pub const IPPROTO_IPIP: c_int = 4; -pub const IPPROTO_EGP: c_int = 8; -pub const IPPROTO_PUP: c_int = 12; -pub const IPPROTO_IDP: c_int = 22; -pub const IPPROTO_TP: c_int = 29; -pub const IPPROTO_ROUTING: c_int = 43; -pub const IPPROTO_FRAGMENT: c_int = 44; -pub const IPPROTO_QOS: c_int = 45; -pub const IPPROTO_RSVP: c_int = 46; -pub const IPPROTO_GRE: c_int = 47; -pub const IPPROTO_ESP: c_int = 50; -pub const IPPROTO_AH: c_int = 51; -pub const IPPROTO_NONE: c_int = 59; -pub const IPPROTO_DSTOPTS: c_int = 60; -pub const IPPROTO_LOCAL: c_int = 63; -pub const IPPROTO_EON: c_int = 80; -pub const IPPROTO_BIP: c_int = 0x53; -pub const IPPROTO_SCTP: c_int = 132; -pub const IPPROTO_MH: c_int = 135; -pub const IPPROTO_GIF: c_int = 140; -pub const IPPROTO_RAW: c_int = 255; -pub const IP_OPTIONS: c_int = 1; -pub const IP_HDRINCL: c_int = 2; -pub const IP_TOS: c_int = 3; -pub const IP_TTL: c_int = 4; -pub const IP_UNICAST_HOPS: c_int = 4; -pub const IP_RECVOPTS: c_int = 5; -pub const IP_RECVRETOPTS: c_int = 6; -pub const IP_RECVDSTADDR: c_int = 7; -pub const IP_RETOPTS: c_int = 8; -pub const IP_MULTICAST_IF: c_int = 9; -pub const IP_MULTICAST_TTL: c_int = 10; -pub const IP_MULTICAST_HOPS: c_int = 10; -pub const IP_MULTICAST_LOOP: c_int = 11; -pub const IP_ADD_MEMBERSHIP: c_int = 12; -pub const IP_DROP_MEMBERSHIP: c_int = 13; -pub const IP_RECVMACHDR: c_int = 14; -pub const IP_RECVIFINFO: c_int = 15; -pub const IP_BROADCAST_IF: c_int = 16; -pub const IP_DHCPMODE: c_int = 17; -pub const IP_RECVIF: c_int = 20; -pub const IP_ADDRFORM: c_int = 22; -pub const IP_DONTFRAG: c_int = 25; -pub const IP_FINDPMTU: c_int = 26; -pub const IP_PMTUAGE: c_int = 27; -pub const IP_RECVINTERFACE: c_int = 32; -pub const IP_RECVTTL: c_int = 34; -pub const IP_BLOCK_SOURCE: c_int = 58; -pub const IP_UNBLOCK_SOURCE: c_int = 59; -pub const IP_ADD_SOURCE_MEMBERSHIP: c_int = 60; -pub const IP_DROP_SOURCE_MEMBERSHIP: c_int = 61; -pub const IP_DEFAULT_MULTICAST_TTL: c_int = 1; -pub const IP_DEFAULT_MULTICAST_LOOP: c_int = 1; -pub const IP_INC_MEMBERSHIPS: c_int = 20; -pub const IP_INIT_MEMBERSHIP: c_int = 20; -pub const IPV6_UNICAST_HOPS: c_int = IP_TTL; -pub const IPV6_MULTICAST_IF: c_int = IP_MULTICAST_IF; -pub const IPV6_MULTICAST_HOPS: c_int = IP_MULTICAST_TTL; -pub const IPV6_MULTICAST_LOOP: c_int = IP_MULTICAST_LOOP; -pub const IPV6_RECVPKTINFO: c_int = 35; -pub const IPV6_V6ONLY: c_int = 37; -pub const IPV6_ADD_MEMBERSHIP: c_int = IP_ADD_MEMBERSHIP; -pub const IPV6_DROP_MEMBERSHIP: c_int = IP_DROP_MEMBERSHIP; -pub const IPV6_JOIN_GROUP: c_int = IP_ADD_MEMBERSHIP; -pub const IPV6_LEAVE_GROUP: c_int = IP_DROP_MEMBERSHIP; -pub const MCAST_BLOCK_SOURCE: c_int = 64; -pub const MCAST_EXCLUDE: c_int = 2; -pub const MCAST_INCLUDE: c_int = 1; -pub const MCAST_JOIN_GROUP: c_int = 62; -pub const MCAST_JOIN_SOURCE_GROUP: c_int = 66; -pub const MCAST_LEAVE_GROUP: c_int = 63; -pub const MCAST_LEAVE_SOURCE_GROUP: c_int = 67; -pub const MCAST_UNBLOCK_SOURCE: c_int = 65; - -// netinet/ip.h -pub const MAXTTL: c_int = 255; -pub const IPDEFTTL: c_int = 64; -pub const IPOPT_CONTROL: c_int = 0; -pub const IPOPT_EOL: c_int = 0; -pub const IPOPT_LSRR: c_int = 131; -pub const IPOPT_MINOFF: c_int = 4; -pub const IPOPT_NOP: c_int = 1; -pub const IPOPT_OFFSET: c_int = 2; -pub const IPOPT_OLEN: c_int = 1; -pub const IPOPT_OPTVAL: c_int = 0; -pub const IPOPT_RESERVED1: c_int = 0x20; -pub const IPOPT_RESERVED2: c_int = 0x60; -pub const IPOPT_RR: c_int = 7; -pub const IPOPT_SSRR: c_int = 137; -pub const IPOPT_TS: c_int = 68; -pub const IPOPT_TS_PRESPEC: c_int = 3; -pub const IPOPT_TS_TSANDADDR: c_int = 1; -pub const IPOPT_TS_TSONLY: c_int = 0; -pub const IPTOS_LOWDELAY: c_int = 16; -pub const IPTOS_PREC_CRITIC_ECP: c_int = 160; -pub const IPTOS_PREC_FLASH: c_int = 96; -pub const IPTOS_PREC_FLASHOVERRIDE: c_int = 128; -pub const IPTOS_PREC_IMMEDIATE: c_int = 64; -pub const IPTOS_PREC_INTERNETCONTROL: c_int = 192; -pub const IPTOS_PREC_NETCONTROL: c_int = 224; -pub const IPTOS_PREC_PRIORITY: c_int = 32; -pub const IPTOS_PREC_ROUTINE: c_int = 16; -pub const IPTOS_RELIABILITY: c_int = 4; -pub const IPTOS_THROUGHPUT: c_int = 8; -pub const IPVERSION: c_int = 4; - -// netinet/tcp.h -pub const TCP_NODELAY: c_int = 0x1; -pub const TCP_MAXSEG: c_int = 0x2; -pub const TCP_RFC1323: c_int = 0x4; -pub const TCP_KEEPALIVE: c_int = 0x8; -pub const TCP_KEEPIDLE: c_int = 0x11; -pub const TCP_KEEPINTVL: c_int = 0x12; -pub const TCP_KEEPCNT: c_int = 0x13; -pub const TCP_NODELAYACK: c_int = 0x14; - -// pthread.h -pub const PTHREAD_BARRIER_SERIAL_THREAD: c_int = 2; -pub const PTHREAD_CREATE_JOINABLE: c_int = 0; -pub const PTHREAD_CREATE_DETACHED: c_int = 1; -pub const PTHREAD_PROCESS_SHARED: c_int = 0; -pub const PTHREAD_PROCESS_PRIVATE: c_ushort = 1; -pub const PTHREAD_STACK_MIN: size_t = PAGESIZE as size_t * 4; -pub const PTHREAD_MUTEX_NORMAL: c_int = 5; -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 3; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 4; -pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; -pub const PTHREAD_MUTEX_ROBUST: c_int = 1; -pub const PTHREAD_MUTEX_STALLED: c_int = 0; -pub const PTHREAD_PRIO_INHERIT: c_int = 3; -pub const PTHREAD_PRIO_NONE: c_int = 1; -pub const PTHREAD_PRIO_PROTECT: c_int = 2; - -// regex.h -pub const REG_EXTENDED: c_int = 1; -pub const REG_ICASE: c_int = 2; -pub const REG_NEWLINE: c_int = 4; -pub const REG_NOSUB: c_int = 8; -pub const REG_NOTBOL: c_int = 0x100; -pub const REG_NOTEOL: c_int = 0x200; -pub const REG_NOMATCH: c_int = 1; -pub const REG_BADPAT: c_int = 2; -pub const REG_ECOLLATE: c_int = 3; -pub const REG_ECTYPE: c_int = 4; -pub const REG_EESCAPE: c_int = 5; -pub const REG_ESUBREG: c_int = 6; -pub const REG_EBRACK: c_int = 7; -pub const REG_EPAREN: c_int = 8; -pub const REG_EBRACE: c_int = 9; -pub const REG_BADBR: c_int = 10; -pub const REG_ERANGE: c_int = 11; -pub const REG_ESPACE: c_int = 12; -pub const REG_BADRPT: c_int = 13; -pub const REG_ECHAR: c_int = 14; -pub const REG_EBOL: c_int = 15; -pub const REG_EEOL: c_int = 16; -pub const REG_ENOSYS: c_int = 17; - -// rpcsvc/mount.h -pub const NFSMNT_SOFT: c_int = 0x001; -pub const NFSMNT_WSIZE: c_int = 0x002; -pub const NFSMNT_RSIZE: c_int = 0x004; -pub const NFSMNT_TIMEO: c_int = 0x008; -pub const NFSMNT_RETRANS: c_int = 0x010; -pub const NFSMNT_HOSTNAME: c_int = 0x020; -pub const NFSMNT_INT: c_int = 0x040; -pub const NFSMNT_NOAC: c_int = 0x080; -pub const NFSMNT_ACREGMIN: c_int = 0x0100; -pub const NFSMNT_ACREGMAX: c_int = 0x0200; -pub const NFSMNT_ACDIRMIN: c_int = 0x0400; -pub const NFSMNT_ACDIRMAX: c_int = 0x0800; - -// rpcsvc/rstat.h -pub const CPUSTATES: c_int = 4; - -// semaphore.h -pub const SEM_FAILED: *mut sem_t = -1isize as *mut crate::sem_t; - -// spawn.h -// DIFF(main): changed to `c_short` in f62eb023ab -pub const POSIX_SPAWN_SETPGROUP: c_int = 0x1; -pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x2; -pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x4; -pub const POSIX_SPAWN_SETSCHEDULER: c_int = 0x8; -pub const POSIX_SPAWN_SETSCHEDPARAM: c_int = 0x10; -pub const POSIX_SPAWN_RESETIDS: c_int = 0x20; -pub const POSIX_SPAWN_FORK_HANDLERS: c_int = 0x1000; - -// stdio.h -pub const EOF: c_int = -1; -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; -pub const _IOFBF: c_int = 0o000; -pub const _IONBF: c_int = 0o004; -pub const _IOLBF: c_int = 0o100; -pub const BUFSIZ: c_uint = 4096; -pub const FOPEN_MAX: c_uint = 32767; -pub const FILENAME_MAX: c_uint = 255; -pub const L_tmpnam: c_uint = 21; -pub const TMP_MAX: c_uint = 16384; - -// stdlib.h -pub const EXIT_FAILURE: c_int = 1; -pub const EXIT_SUCCESS: c_int = 0; -pub const RAND_MAX: c_int = 32767; - -// sys/access.h -pub const F_OK: c_int = 0; -pub const R_OK: c_int = 4; -pub const W_OK: c_int = 2; -pub const X_OK: c_int = 1; - -// sys/aio.h -pub const LIO_NOP: c_int = 0; -pub const LIO_READ: c_int = 1; -pub const LIO_WRITE: c_int = 2; -pub const LIO_NOWAIT: c_int = 0; -pub const LIO_WAIT: c_int = 1; -pub const AIO_ALLDONE: c_int = 2; -pub const AIO_CANCELED: c_int = 0; -pub const AIO_NOTCANCELED: c_int = 1; - -// sys/errno.h -pub const EPERM: c_int = 1; -pub const ENOENT: c_int = 2; -pub const ESRCH: c_int = 3; -pub const EINTR: c_int = 4; -pub const EIO: c_int = 5; -pub const ENXIO: c_int = 6; -pub const E2BIG: c_int = 7; -pub const ENOEXEC: c_int = 8; -pub const EBADF: c_int = 9; -pub const ECHILD: c_int = 10; -pub const EAGAIN: c_int = 11; -pub const ENOMEM: c_int = 12; -pub const EACCES: c_int = 13; -pub const EFAULT: c_int = 14; -pub const ENOTBLK: c_int = 15; -pub const EBUSY: c_int = 16; -pub const EEXIST: c_int = 17; -pub const EXDEV: c_int = 18; -pub const ENODEV: c_int = 19; -pub const ENOTDIR: c_int = 20; -pub const EISDIR: c_int = 21; -pub const EINVAL: c_int = 22; -pub const ENFILE: c_int = 23; -pub const EMFILE: c_int = 24; -pub const ENOTTY: c_int = 25; -pub const ETXTBSY: c_int = 26; -pub const EFBIG: c_int = 27; -pub const ENOSPC: c_int = 28; -pub const ESPIPE: c_int = 29; -pub const EROFS: c_int = 30; -pub const EMLINK: c_int = 31; -pub const EPIPE: c_int = 32; -pub const EDOM: c_int = 33; -pub const ERANGE: c_int = 34; -pub const ENOMSG: c_int = 35; -pub const EIDRM: c_int = 36; -pub const ECHRNG: c_int = 37; -pub const EL2NSYNC: c_int = 38; -pub const EL3HLT: c_int = 39; -pub const EL3RST: c_int = 40; -pub const ELNRNG: c_int = 41; -pub const EUNATCH: c_int = 42; -pub const ENOCSI: c_int = 43; -pub const EL2HLT: c_int = 44; -pub const EDEADLK: c_int = 45; -pub const ENOTREADY: c_int = 46; -pub const EWRPROTECT: c_int = 47; -pub const EFORMAT: c_int = 48; -pub const ENOLCK: c_int = 49; -pub const ENOCONNECT: c_int = 50; -pub const ESTALE: c_int = 52; -pub const EDIST: c_int = 53; -pub const EWOULDBLOCK: c_int = 54; -pub const EINPROGRESS: c_int = 55; -pub const EALREADY: c_int = 56; -pub const ENOTSOCK: c_int = 57; -pub const EDESTADDRREQ: c_int = 58; -pub const EMSGSIZE: c_int = 59; -pub const EPROTOTYPE: c_int = 60; -pub const ENOPROTOOPT: c_int = 61; -pub const EPROTONOSUPPORT: c_int = 62; -pub const ESOCKTNOSUPPORT: c_int = 63; -pub const EOPNOTSUPP: c_int = 64; -pub const EPFNOSUPPORT: c_int = 65; -pub const EAFNOSUPPORT: c_int = 66; -pub const EADDRINUSE: c_int = 67; -pub const EADDRNOTAVAIL: c_int = 68; -pub const ENETDOWN: c_int = 69; -pub const ENETUNREACH: c_int = 70; -pub const ENETRESET: c_int = 71; -pub const ECONNABORTED: c_int = 72; -pub const ECONNRESET: c_int = 73; -pub const ENOBUFS: c_int = 74; -pub const EISCONN: c_int = 75; -pub const ENOTCONN: c_int = 76; -pub const ESHUTDOWN: c_int = 77; -pub const ETIMEDOUT: c_int = 78; -pub const ECONNREFUSED: c_int = 79; -pub const EHOSTDOWN: c_int = 80; -pub const EHOSTUNREACH: c_int = 81; -pub const ERESTART: c_int = 82; -pub const EPROCLIM: c_int = 83; -pub const EUSERS: c_int = 84; -pub const ELOOP: c_int = 85; -pub const ENAMETOOLONG: c_int = 86; -pub const ENOTEMPTY: c_int = 87; -pub const EDQUOT: c_int = 88; -pub const ECORRUPT: c_int = 89; -pub const ESYSERROR: c_int = 90; -pub const EREMOTE: c_int = 93; -pub const ENOTRECOVERABLE: c_int = 94; -pub const EOWNERDEAD: c_int = 95; -// errnos 96-108 reserved for future use compatible with AIX PS/2 -pub const ENOSYS: c_int = 109; -pub const EMEDIA: c_int = 110; -pub const ESOFT: c_int = 111; -pub const ENOATTR: c_int = 112; -pub const ESAD: c_int = 113; -pub const ENOTRUST: c_int = 114; -pub const ETOOMANYREFS: c_int = 115; -pub const EILSEQ: c_int = 116; -pub const ECANCELED: c_int = 117; -pub const ENOSR: c_int = 118; -pub const ETIME: c_int = 119; -pub const EBADMSG: c_int = 120; -pub const EPROTO: c_int = 121; -pub const ENODATA: c_int = 122; -pub const ENOSTR: c_int = 123; -pub const ENOTSUP: c_int = 124; -pub const EMULTIHOP: c_int = 125; -pub const ENOLINK: c_int = 126; -pub const EOVERFLOW: c_int = 127; - -// sys/dr.h -pub const LPAR_INFO_FORMAT1: c_int = 1; -pub const LPAR_INFO_FORMAT2: c_int = 2; -pub const WPAR_INFO_FORMAT: c_int = 3; -pub const PROC_MODULE_INFO: c_int = 4; -pub const NUM_PROC_MODULE_TYPES: c_int = 5; -pub const LPAR_INFO_VRME_NUM_POOLS: c_int = 6; -pub const LPAR_INFO_VRME_POOLS: c_int = 7; -pub const LPAR_INFO_VRME_LPAR: c_int = 8; -pub const LPAR_INFO_VRME_RESET_HWMARKS: c_int = 9; -pub const LPAR_INFO_VRME_ALLOW_DESIRED: c_int = 10; -pub const EMTP_INFO_FORMAT: c_int = 11; -pub const LPAR_INFO_LPM_CAPABILITY: c_int = 12; -pub const ENERGYSCALE_INFO: c_int = 13; - -// sys/file.h -pub const LOCK_SH: c_int = 1; -pub const LOCK_EX: c_int = 2; -pub const LOCK_NB: c_int = 4; -pub const LOCK_UN: c_int = 8; - -// sys/flock.h -pub const F_RDLCK: c_short = 0o01; -pub const F_WRLCK: c_short = 0o02; -pub const F_UNLCK: c_short = 0o03; - -// sys/fs/quota_common.h -pub const Q_QUOTAON: c_int = 0x100; -pub const Q_QUOTAOFF: c_int = 0x200; -pub const Q_SETUSE: c_int = 0x500; -pub const Q_SYNC: c_int = 0x600; -pub const Q_GETQUOTA: c_int = 0x300; -pub const Q_SETQLIM: c_int = 0x400; -pub const Q_SETQUOTA: c_int = 0x400; - -// sys/ioctl.h -pub const IOCPARM_MASK: c_int = 0x7f; -pub const IOC_VOID: c_int = 0x20000000; -pub const IOC_OUT: c_int = 0x40000000; -pub const IOC_IN: c_int = 0x40000000 << 1; -pub const IOC_INOUT: c_int = IOC_IN | IOC_OUT; -pub const FIOCLEX: c_int = 0x20006601; -pub const FIONCLEX: c_int = 0x20006602; -pub const FIONREAD: c_int = 0x4004667f; -pub const FIONBIO: c_int = 0x8004667e; -pub const FIOASYNC: c_int = 0x8004667d; -pub const FIOSETOWN: c_int = 0x8004667c; -pub const FIOGETOWN: c_int = 0x4004667b; -pub const TIOCGETD: c_int = 0x40047400; -pub const TIOCSETD: c_int = 0x80047401; -pub const TIOCHPCL: c_int = 0x20007402; -pub const TIOCMODG: c_int = 0x40047403; -pub const TIOCMODS: c_int = 0x80047404; -pub const TIOCM_LE: c_int = 0x1; -pub const TIOCM_DTR: c_int = 0x2; -pub const TIOCM_RTS: c_int = 0x4; -pub const TIOCM_ST: c_int = 0x8; -pub const TIOCM_SR: c_int = 0x10; -pub const TIOCM_CTS: c_int = 0x20; -pub const TIOCM_CAR: c_int = 0x40; -pub const TIOCM_CD: c_int = 0x40; -pub const TIOCM_RNG: c_int = 0x80; -pub const TIOCM_RI: c_int = 0x80; -pub const TIOCM_DSR: c_int = 0x100; -pub const TIOCGETP: c_int = 0x40067408; -pub const TIOCSETP: c_int = 0x80067409; -pub const TIOCSETN: c_int = 0x8006740a; -pub const TIOCEXCL: c_int = 0x2000740d; -pub const TIOCNXCL: c_int = 0x2000740e; -pub const TIOCFLUSH: c_int = 0x80047410; -pub const TIOCSETC: c_int = 0x80067411; -pub const TIOCGETC: c_int = 0x40067412; -pub const TANDEM: c_int = 0x1; -pub const CBREAK: c_int = 0x2; -pub const LCASE: c_int = 0x4; -pub const MDMBUF: c_int = 0x800000; -pub const XTABS: c_int = 0xc00; -pub const SIOCADDMULTI: c_int = 0x80206931; -pub const SIOCADDRT: c_int = 0x8038720a; -pub const SIOCDARP: c_int = 0x804c6920; -pub const SIOCDELMULTI: c_int = 0x80206932; -pub const SIOCDELRT: c_int = 0x8038720b; -pub const SIOCDIFADDR: c_int = 0x80286919; -pub const SIOCGARP: c_int = 0xc04c6926; -pub const SIOCGIFADDR: c_int = 0xc0286921; -pub const SIOCGIFBRDADDR: c_int = 0xc0286923; -pub const SIOCGIFCONF: c_int = 0xc0106945; -pub const SIOCGIFDSTADDR: c_int = 0xc0286922; -pub const SIOCGIFFLAGS: c_int = 0xc0286911; -pub const SIOCGIFHWADDR: c_int = 0xc0546995; -pub const SIOCGIFMETRIC: c_int = 0xc0286917; -pub const SIOCGIFMTU: c_int = 0xc0286956; -pub const SIOCGIFNETMASK: c_int = 0xc0286925; -pub const SIOCSARP: c_int = 0x804c691e; -pub const SIOCSIFADDR: c_int = 0x8028690c; -pub const SIOCSIFBRDADDR: c_int = 0x80286913; -pub const SIOCSIFDSTADDR: c_int = 0x8028690e; -pub const SIOCSIFFLAGS: c_int = 0x80286910; -pub const SIOCSIFMETRIC: c_int = 0x80286918; -pub const SIOCSIFMTU: c_int = 0x80286958; -pub const SIOCSIFNETMASK: c_int = 0x80286916; -pub const TIOCUCNTL: c_int = 0x80047466; -pub const TIOCCONS: c_int = 0x80047462; -pub const TIOCPKT: c_int = 0x80047470; -pub const TIOCPKT_DATA: c_int = 0; -pub const TIOCPKT_FLUSHREAD: c_int = 1; -pub const TIOCPKT_FLUSHWRITE: c_int = 2; -pub const TIOCPKT_NOSTOP: c_int = 0x10; -pub const TIOCPKT_DOSTOP: c_int = 0x20; -pub const TIOCPKT_START: c_int = 8; -pub const TIOCPKT_STOP: c_int = 4; - -// sys/ipc.h -pub const IPC_ALLOC: c_int = 0o100000; -pub const IPC_CREAT: c_int = 0o020000; -pub const IPC_EXCL: c_int = 0o002000; -pub const IPC_NOWAIT: c_int = 0o004000; -pub const IPC_RMID: c_int = 0; -pub const IPC_SET: c_int = 101; -pub const IPC_R: c_int = 0o0400; -pub const IPC_W: c_int = 0o0200; -pub const IPC_O: c_int = 0o1000; -pub const IPC_NOERROR: c_int = 0o10000; -pub const IPC_STAT: c_int = 102; -pub const IPC_PRIVATE: crate::key_t = -1; -pub const SHM_LOCK: c_int = 201; -pub const SHM_UNLOCK: c_int = 202; - -// sys/ldr.h -pub const L_GETMESSAGES: c_int = 1; -pub const L_GETINFO: c_int = 2; -pub const L_GETLIBPATH: c_int = 3; -pub const L_GETKERNINFO: c_int = 4; -pub const L_GETLIB32INFO: c_int = 5; -pub const L_GETLIB64INFO: c_int = 6; -pub const L_GETPROCINFO: c_int = 7; -pub const L_GETXINFO: c_int = 8; - -// sys/limits.h -pub const PATH_MAX: c_int = 1023; -pub const PAGESIZE: c_int = 4096; -pub const IOV_MAX: c_int = 16; -pub const AIO_LISTIO_MAX: c_int = 4096; -pub const PIPE_BUF: usize = 32768; -pub const OPEN_MAX: c_int = 65534; -pub const MAX_INPUT: c_int = 512; -pub const MAX_CANON: c_int = 256; -pub const ARG_MAX: c_int = 1048576; -pub const BC_BASE_MAX: c_int = 99; -pub const BC_DIM_MAX: c_int = 0x800; -pub const BC_SCALE_MAX: c_int = 99; -pub const BC_STRING_MAX: c_int = 0x800; -pub const CHARCLASS_NAME_MAX: c_int = 14; -pub const CHILD_MAX: c_int = 128; -pub const COLL_WEIGHTS_MAX: c_int = 4; -pub const EXPR_NEST_MAX: c_int = 32; -pub const NZERO: c_int = 20; - -// sys/lockf.h -pub const F_LOCK: c_int = 1; -pub const F_TEST: c_int = 3; -pub const F_TLOCK: c_int = 2; -pub const F_ULOCK: c_int = 0; - -// sys/machine.h -pub const BIG_ENDIAN: c_int = 4321; -pub const LITTLE_ENDIAN: c_int = 1234; -pub const PDP_ENDIAN: c_int = 3412; - -// sys/mman.h -pub const PROT_NONE: c_int = 0; -pub const PROT_READ: c_int = 1; -pub const PROT_WRITE: c_int = 2; -pub const PROT_EXEC: c_int = 4; -pub const MAP_FILE: c_int = 0; -pub const MAP_SHARED: c_int = 1; -pub const MAP_PRIVATE: c_int = 2; -pub const MAP_FIXED: c_int = 0x100; -pub const MAP_ANON: c_int = 0x10; -pub const MAP_ANONYMOUS: c_int = 0x10; -pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; -pub const MAP_TYPE: c_int = 0xf0; -pub const MCL_CURRENT: c_int = 0x100; -pub const MCL_FUTURE: c_int = 0x200; -pub const MS_SYNC: c_int = 0x20; -pub const MS_ASYNC: c_int = 0x10; -pub const MS_INVALIDATE: c_int = 0x40; -pub const POSIX_MADV_NORMAL: c_int = 1; -pub const POSIX_MADV_RANDOM: c_int = 3; -pub const POSIX_MADV_SEQUENTIAL: c_int = 2; -pub const POSIX_MADV_WILLNEED: c_int = 4; -pub const POSIX_MADV_DONTNEED: c_int = 5; -pub const MADV_NORMAL: c_int = 0; -pub const MADV_RANDOM: c_int = 1; -pub const MADV_SEQUENTIAL: c_int = 2; -pub const MADV_WILLNEED: c_int = 3; -pub const MADV_DONTNEED: c_int = 4; - -// sys/mode.h -pub const S_IFMT: mode_t = 0o17_0000; -pub const S_IFREG: mode_t = 0o10_0000; -pub const S_IFDIR: mode_t = 0o4_0000; -pub const S_IFBLK: mode_t = 0o6_0000; -pub const S_IFCHR: mode_t = 0o2_0000; -pub const S_IFIFO: mode_t = 0o1_0000; -pub const S_IRWXU: mode_t = 0o0700; -pub const S_IRUSR: mode_t = 0o0400; -pub const S_IWUSR: mode_t = 0o0200; -pub const S_IXUSR: mode_t = 0o0100; -pub const S_IRWXG: mode_t = 0o0070; -pub const S_IRGRP: mode_t = 0o0040; -pub const S_IWGRP: mode_t = 0o0020; -pub const S_IXGRP: mode_t = 0o0010; -pub const S_IRWXO: mode_t = 0o0007; -pub const S_IROTH: mode_t = 0o0004; -pub const S_IWOTH: mode_t = 0o0002; -pub const S_IXOTH: mode_t = 0o0001; -pub const S_IFLNK: mode_t = 0o12_0000; -pub const S_IFSOCK: mode_t = 0o14_0000; -pub const S_IEXEC: mode_t = 0o0100; -pub const S_IWRITE: mode_t = 0o0200; -pub const S_IREAD: mode_t = 0o0400; - -// sys/msg.h -pub const MSG_NOERROR: c_int = 0o10000; - -// sys/m_signal.h -pub const SIGSTKSZ: size_t = 4096; -pub const MINSIGSTKSZ: size_t = 1200; - -// sys/params.h -pub const MAXPATHLEN: c_int = PATH_MAX + 1; -pub const MAXSYMLINKS: c_int = 20; -pub const MAXHOSTNAMELEN: c_int = 256; -pub const MAXUPRC: c_int = 128; -pub const NGROUPS_MAX: c_ulong = 2048; -pub const NGROUPS: c_ulong = NGROUPS_MAX; -pub const NOFILE: c_int = OPEN_MAX; - -// sys/poll.h -pub const POLLIN: c_short = 0x0001; -pub const POLLPRI: c_short = 0x0004; -pub const POLLOUT: c_short = 0x0002; -pub const POLLERR: c_short = 0x4000; -pub const POLLHUP: c_short = 0x2000; -pub const POLLMSG: c_short = 0x0080; -pub const POLLSYNC: c_short = 0x8000; -pub const POLLNVAL: c_short = POLLSYNC; -pub const POLLNORM: c_short = POLLIN; -pub const POLLRDNORM: c_short = 0x0010; -pub const POLLWRNORM: c_short = POLLOUT; -pub const POLLRDBAND: c_short = 0x0020; -pub const POLLWRBAND: c_short = 0x0040; - -// sys/pollset.h -pub const PS_ADD: c_uchar = 0; -pub const PS_MOD: c_uchar = 1; -pub const PS_DELETE: c_uchar = 2; -pub const PS_REPLACE: c_uchar = 3; - -// sys/ptrace.h -pub const PT_TRACE_ME: c_int = 0; -pub const PT_READ_I: c_int = 1; -pub const PT_READ_D: c_int = 2; -pub const PT_WRITE_I: c_int = 4; -pub const PT_WRITE_D: c_int = 5; -pub const PT_CONTINUE: c_int = 7; -pub const PT_KILL: c_int = 8; -pub const PT_STEP: c_int = 9; -pub const PT_READ_GPR: c_int = 11; -pub const PT_READ_FPR: c_int = 12; -pub const PT_WRITE_GPR: c_int = 14; -pub const PT_WRITE_FPR: c_int = 15; -pub const PT_READ_BLOCK: c_int = 17; -pub const PT_WRITE_BLOCK: c_int = 19; -pub const PT_ATTACH: c_int = 30; -pub const PT_DETACH: c_int = 31; -pub const PT_REGSET: c_int = 32; -pub const PT_REATT: c_int = 33; -pub const PT_LDINFO: c_int = 34; -pub const PT_MULTI: c_int = 35; -pub const PT_NEXT: c_int = 36; -pub const PT_SET: c_int = 37; -pub const PT_CLEAR: c_int = 38; -pub const PT_LDXINFO: c_int = 39; -pub const PT_QUERY: c_int = 40; -pub const PT_WATCH: c_int = 41; -pub const PTT_CONTINUE: c_int = 50; -pub const PTT_STEP: c_int = 51; -pub const PTT_READ_SPRS: c_int = 52; -pub const PTT_WRITE_SPRS: c_int = 53; -pub const PTT_READ_GPRS: c_int = 54; -pub const PTT_WRITE_GPRS: c_int = 55; -pub const PTT_READ_FPRS: c_int = 56; -pub const PTT_WRITE_FPRS: c_int = 57; -pub const PTT_READ_VEC: c_int = 58; -pub const PTT_WRITE_VEC: c_int = 59; -pub const PTT_WATCH: c_int = 60; -pub const PTT_SET_TRAP: c_int = 61; -pub const PTT_CLEAR_TRAP: c_int = 62; -pub const PTT_READ_UKEYSET: c_int = 63; -pub const PT_GET_UKEY: c_int = 64; -pub const PTT_READ_FPSCR_HI: c_int = 65; -pub const PTT_WRITE_FPSCR_HI: c_int = 66; -pub const PTT_READ_VSX: c_int = 67; -pub const PTT_WRITE_VSX: c_int = 68; -pub const PTT_READ_TM: c_int = 69; -pub const PTRACE_ATTACH: c_int = 14; -pub const PTRACE_CONT: c_int = 7; -pub const PTRACE_DETACH: c_int = 15; -pub const PTRACE_GETFPREGS: c_int = 12; -pub const PTRACE_GETREGS: c_int = 10; -pub const PTRACE_KILL: c_int = 8; -pub const PTRACE_PEEKDATA: c_int = 2; -pub const PTRACE_PEEKTEXT: c_int = 1; -pub const PTRACE_PEEKUSER: c_int = 3; -pub const PTRACE_POKEDATA: c_int = 5; -pub const PTRACE_POKETEXT: c_int = 4; -pub const PTRACE_POKEUSER: c_int = 6; -pub const PTRACE_SETFPREGS: c_int = 13; -pub const PTRACE_SETREGS: c_int = 11; -pub const PTRACE_SINGLESTEP: c_int = 9; -pub const PTRACE_SYSCALL: c_int = 16; -pub const PTRACE_TRACEME: c_int = 0; - -// sys/resource.h -pub const RLIMIT_CPU: c_int = 0; -pub const RLIMIT_FSIZE: c_int = 1; -pub const RLIMIT_DATA: c_int = 2; -pub const RLIMIT_STACK: c_int = 3; -pub const RLIMIT_CORE: c_int = 4; -pub const RLIMIT_RSS: c_int = 5; -pub const RLIMIT_AS: c_int = 6; -pub const RLIMIT_NOFILE: c_int = 7; -pub const RLIMIT_THREADS: c_int = 8; -pub const RLIMIT_NPROC: c_int = 9; -pub const RUSAGE_SELF: c_int = 0; -pub const RUSAGE_CHILDREN: c_int = -1; -pub const PRIO_PROCESS: c_int = 0; -pub const PRIO_PGRP: c_int = 1; -pub const PRIO_USER: c_int = 2; -pub const RUSAGE_THREAD: c_int = 1; -pub const RLIM_SAVED_MAX: c_ulong = RLIM_INFINITY - 1; -pub const RLIM_SAVED_CUR: c_ulong = RLIM_INFINITY - 2; -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIM_NLIMITS: c_int = 10; - -// sys/sched.h -pub const SCHED_OTHER: c_int = 0; -pub const SCHED_FIFO: c_int = 1; -pub const SCHED_RR: c_int = 2; -pub const SCHED_LOCAL: c_int = 3; -pub const SCHED_GLOBAL: c_int = 4; -pub const SCHED_FIFO2: c_int = 5; -pub const SCHED_FIFO3: c_int = 6; -pub const SCHED_FIFO4: c_int = 7; - -// sys/sem.h -pub const SEM_UNDO: c_int = 0o10000; -pub const GETNCNT: c_int = 3; -pub const GETPID: c_int = 4; -pub const GETVAL: c_int = 5; -pub const GETALL: c_int = 6; -pub const GETZCNT: c_int = 7; -pub const SETVAL: c_int = 8; -pub const SETALL: c_int = 9; - -// sys/shm.h -pub const SHMLBA: c_int = 0x10000000; -pub const SHMLBA_EXTSHM: c_int = 0x1000; -pub const SHM_SHMAT: c_int = 0x80000000; -pub const SHM_RDONLY: c_int = 0o10000; -pub const SHM_RND: c_int = 0o20000; -pub const SHM_PIN: c_int = 0o4000; -pub const SHM_LGPAGE: c_int = 0o20000000000; -pub const SHM_MAP: c_int = 0o4000; -pub const SHM_FMAP: c_int = 0o2000; -pub const SHM_COPY: c_int = 0o40000; -pub const SHM_CLEAR: c_int = 0; -pub const SHM_HGSEG: c_int = 0o10000000000; -pub const SHM_R: c_int = IPC_R; -pub const SHM_W: c_int = IPC_W; -pub const SHM_DEST: c_int = 0o2000; - -// sys/signal.h -pub const SA_ONSTACK: c_int = 0x00000001; -pub const SA_RESETHAND: c_int = 0x00000002; -pub const SA_RESTART: c_int = 0x00000008; -pub const SA_SIGINFO: c_int = 0x00000100; -pub const SA_NODEFER: c_int = 0x00000200; -pub const SA_NOCLDWAIT: c_int = 0x00000400; -pub const SA_NOCLDSTOP: c_int = 0x00000004; -pub const SS_ONSTACK: c_int = 0x00000001; -pub const SS_DISABLE: c_int = 0x00000002; -pub const SIGCHLD: c_int = 20; -pub const SIGBUS: c_int = 10; -pub const SIG_BLOCK: c_int = 0; -pub const SIG_UNBLOCK: c_int = 1; -pub const SIG_SETMASK: c_int = 2; -pub const SIGEV_NONE: c_int = 1; -pub const SIGEV_SIGNAL: c_int = 2; -pub const SIGEV_THREAD: c_int = 3; -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGABRT: c_int = 6; -pub const SIGEMT: c_int = 7; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGSEGV: c_int = 11; -pub const SIGSYS: c_int = 12; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; -pub const SIGUSR1: c_int = 30; -pub const SIGUSR2: c_int = 31; -pub const SIGPWR: c_int = 29; -pub const SIGWINCH: c_int = 28; -pub const SIGURG: c_int = 16; -pub const SIGPOLL: c_int = SIGIO; -pub const SIGIO: c_int = 23; -pub const SIGSTOP: c_int = 17; -pub const SIGTSTP: c_int = 18; -pub const SIGCONT: c_int = 19; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGVTALRM: c_int = 34; -pub const SIGPROF: c_int = 32; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGTRAP: c_int = 5; -pub const SIGCLD: c_int = 20; -pub const SIGRTMAX: c_int = 57; -pub const SIGRTMIN: c_int = 50; -pub const SI_USER: c_int = 0; -pub const SI_UNDEFINED: c_int = 8; -pub const SI_EMPTY: c_int = 9; -pub const BUS_ADRALN: c_int = 1; -pub const BUS_ADRERR: c_int = 2; -pub const BUS_OBJERR: c_int = 3; -pub const BUS_UEGARD: c_int = 4; -pub const CLD_EXITED: c_int = 10; -pub const CLD_KILLED: c_int = 11; -pub const CLD_DUMPED: c_int = 12; -pub const CLD_TRAPPED: c_int = 13; -pub const CLD_STOPPED: c_int = 14; -pub const CLD_CONTINUED: c_int = 15; -pub const FPE_INTDIV: c_int = 20; -pub const FPE_INTOVF: c_int = 21; -pub const FPE_FLTDIV: c_int = 22; -pub const FPE_FLTOVF: c_int = 23; -pub const FPE_FLTUND: c_int = 24; -pub const FPE_FLTRES: c_int = 25; -pub const FPE_FLTINV: c_int = 26; -pub const FPE_FLTSUB: c_int = 27; -pub const ILL_ILLOPC: c_int = 30; -pub const ILL_ILLOPN: c_int = 31; -pub const ILL_ILLADR: c_int = 32; -pub const ILL_ILLTRP: c_int = 33; -pub const ILL_PRVOPC: c_int = 34; -pub const ILL_PRVREG: c_int = 35; -pub const ILL_COPROC: c_int = 36; -pub const ILL_BADSTK: c_int = 37; -pub const ILL_TMBADTHING: c_int = 38; -pub const POLL_IN: c_int = 40; -pub const POLL_OUT: c_int = 41; -pub const POLL_MSG: c_int = -3; -pub const POLL_ERR: c_int = 43; -pub const POLL_PRI: c_int = 44; -pub const POLL_HUP: c_int = 45; -pub const SEGV_MAPERR: c_int = 50; -pub const SEGV_ACCERR: c_int = 51; -pub const SEGV_KEYERR: c_int = 52; -pub const TRAP_BRKPT: c_int = 60; -pub const TRAP_TRACE: c_int = 61; -pub const SI_QUEUE: c_int = 71; -pub const SI_TIMER: c_int = 72; -pub const SI_ASYNCIO: c_int = 73; -pub const SI_MESGQ: c_int = 74; - -// sys/socket.h -pub const AF_UNSPEC: c_int = 0; -pub const AF_UNIX: c_int = 1; -pub const AF_INET: c_int = 2; -pub const AF_IMPLINK: c_int = 3; -pub const AF_PUP: c_int = 4; -pub const AF_CHAOS: c_int = 5; -pub const AF_NS: c_int = 6; -pub const AF_ECMA: c_int = 8; -pub const AF_DATAKIT: c_int = 9; -pub const AF_CCITT: c_int = 10; -pub const AF_SNA: c_int = 11; -pub const AF_DECnet: c_int = 12; -pub const AF_DLI: c_int = 13; -pub const AF_LAT: c_int = 14; -pub const SO_TIMESTAMPNS: c_int = 0x100a; -pub const SOMAXCONN: c_int = 1024; -pub const AF_LOCAL: c_int = AF_UNIX; -pub const UIO_MAXIOV: c_int = 1024; -pub const pseudo_AF_XTP: c_int = 19; -pub const AF_HYLINK: c_int = 15; -pub const AF_APPLETALK: c_int = 16; -pub const AF_ISO: c_int = 7; -pub const AF_OSI: c_int = AF_ISO; -pub const AF_ROUTE: c_int = 17; -pub const AF_LINK: c_int = 18; -pub const AF_INET6: c_int = 24; -pub const AF_INTF: c_int = 20; -pub const AF_RIF: c_int = 21; -pub const AF_NDD: c_int = 23; -pub const AF_MAX: c_int = 30; -pub const PF_UNSPEC: c_int = AF_UNSPEC; -pub const PF_UNIX: c_int = AF_UNIX; -pub const PF_INET: c_int = AF_INET; -pub const PF_IMPLINK: c_int = AF_IMPLINK; -pub const PF_PUP: c_int = AF_PUP; -pub const PF_CHAOS: c_int = AF_CHAOS; -pub const PF_NS: c_int = AF_NS; -pub const PF_ISO: c_int = AF_ISO; -pub const PF_OSI: c_int = AF_ISO; -pub const PF_ECMA: c_int = AF_ECMA; -pub const PF_DATAKIT: c_int = AF_DATAKIT; -pub const PF_CCITT: c_int = AF_CCITT; -pub const PF_SNA: c_int = AF_SNA; -pub const PF_DECnet: c_int = AF_DECnet; -pub const PF_DLI: c_int = AF_DLI; -pub const PF_LAT: c_int = AF_LAT; -pub const PF_HYLINK: c_int = AF_HYLINK; -pub const PF_APPLETALK: c_int = AF_APPLETALK; -pub const PF_ROUTE: c_int = AF_ROUTE; -pub const PF_LINK: c_int = AF_LINK; -pub const PF_XTP: c_int = 19; -pub const PF_RIF: c_int = AF_RIF; -pub const PF_INTF: c_int = AF_INTF; -pub const PF_NDD: c_int = AF_NDD; -pub const PF_INET6: c_int = AF_INET6; -pub const PF_MAX: c_int = AF_MAX; -pub const SF_CLOSE: c_int = 1; -pub const SF_REUSE: c_int = 2; -pub const SF_DONT_CACHE: c_int = 4; -pub const SF_SYNC_CACHE: c_int = 8; -pub const SOCK_DGRAM: c_int = 2; -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_RAW: c_int = 3; -pub const SOCK_RDM: c_int = 4; -pub const SOCK_SEQPACKET: c_int = 5; -pub const SOL_SOCKET: c_int = 0xffff; -pub const SO_DEBUG: c_int = 0x0001; -pub const SO_ACCEPTCONN: c_int = 0x0002; -pub const SO_REUSEADDR: c_int = 0x0004; -pub const SO_KEEPALIVE: c_int = 0x0008; -pub const SO_DONTROUTE: c_int = 0x0010; -pub const SO_BROADCAST: c_int = 0x0020; -pub const SO_USELOOPBACK: c_int = 0x0040; -pub const SO_LINGER: c_int = 0x0080; -pub const SO_OOBINLINE: c_int = 0x0100; -pub const SO_REUSEPORT: c_int = 0x0200; -pub const SO_USE_IFBUFS: c_int = 0x0400; -pub const SO_CKSUMRECV: c_int = 0x0800; -pub const SO_NOREUSEADDR: c_int = 0x1000; -pub const SO_KERNACCEPT: c_int = 0x2000; -pub const SO_NOMULTIPATH: c_int = 0x4000; -pub const SO_AUDIT: c_int = 0x8000; -pub const SO_SNDBUF: c_int = 0x1001; -pub const SO_RCVBUF: c_int = 0x1002; -pub const SO_SNDLOWAT: c_int = 0x1003; -pub const SO_RCVLOWAT: c_int = 0x1004; -pub const SO_SNDTIMEO: c_int = 0x1005; -pub const SO_RCVTIMEO: c_int = 0x1006; -pub const SO_ERROR: c_int = 0x1007; -pub const SO_TYPE: c_int = 0x1008; -pub const SCM_RIGHTS: c_int = 0x01; -pub const MSG_OOB: c_int = 0x1; -pub const MSG_PEEK: c_int = 0x2; -pub const MSG_DONTROUTE: c_int = 0x4; -pub const MSG_EOR: c_int = 0x8; -pub const MSG_TRUNC: c_int = 0x10; -pub const MSG_CTRUNC: c_int = 0x20; -pub const MSG_WAITALL: c_int = 0x40; -pub const MSG_MPEG2: c_int = 0x80; -pub const MSG_NOSIGNAL: c_int = 0x100; -pub const MSG_WAITFORONE: c_int = 0x200; -pub const MSG_ARGEXT: c_int = 0x400; -pub const MSG_NONBLOCK: c_int = 0x4000; -pub const MSG_COMPAT: c_int = 0x8000; -pub const MSG_MAXIOVLEN: c_int = 16; -pub const SHUT_RD: c_int = 0; -pub const SHUT_WR: c_int = 1; -pub const SHUT_RDWR: c_int = 2; - -// sys/stat.h -pub const UTIME_NOW: c_int = -2; -pub const UTIME_OMIT: c_int = -3; - -// sys/statvfs.h -pub const ST_RDONLY: c_ulong = 0x0001; -pub const ST_NOSUID: c_ulong = 0x0040; -pub const ST_NODEV: c_ulong = 0x0080; - -// sys/stropts.h -pub const I_NREAD: c_int = 0x20005301; -pub const I_PUSH: c_int = 0x20005302; -pub const I_POP: c_int = 0x20005303; -pub const I_LOOK: c_int = 0x20005304; -pub const I_FLUSH: c_int = 0x20005305; -pub const I_SRDOPT: c_int = 0x20005306; -pub const I_GRDOPT: c_int = 0x20005307; -pub const I_STR: c_int = 0x20005308; -pub const I_SETSIG: c_int = 0x20005309; -pub const I_GETSIG: c_int = 0x2000530a; -pub const I_FIND: c_int = 0x2000530b; -pub const I_LINK: c_int = 0x2000530c; -pub const I_UNLINK: c_int = 0x2000530d; -pub const I_PEEK: c_int = 0x2000530f; -pub const I_FDINSERT: c_int = 0x20005310; -pub const I_SENDFD: c_int = 0x20005311; -pub const I_RECVFD: c_int = 0x20005312; -pub const I_SWROPT: c_int = 0x20005314; -pub const I_GWROPT: c_int = 0x20005315; -pub const I_LIST: c_int = 0x20005316; -pub const I_PLINK: c_int = 0x2000531d; -pub const I_PUNLINK: c_int = 0x2000531e; -pub const I_FLUSHBAND: c_int = 0x20005313; -pub const I_CKBAND: c_int = 0x20005318; -pub const I_GETBAND: c_int = 0x20005319; -pub const I_ATMARK: c_int = 0x20005317; -pub const I_SETCLTIME: c_int = 0x2000531b; -pub const I_GETCLTIME: c_int = 0x2000531c; -pub const I_CANPUT: c_int = 0x2000531a; - -// sys/syslog.h -pub const LOG_CRON: c_int = 9 << 3; -pub const LOG_AUTHPRIV: c_int = 10 << 3; -pub const LOG_NFACILITIES: c_int = 24; -pub const LOG_PERROR: c_int = 0x20; - -// sys/systemcfg.h -pub const SC_ARCH: c_int = 1; -pub const SC_IMPL: c_int = 2; -pub const SC_VERS: c_int = 3; -pub const SC_WIDTH: c_int = 4; -pub const SC_NCPUS: c_int = 5; -pub const SC_L1C_ATTR: c_int = 6; -pub const SC_L1C_ISZ: c_int = 7; -pub const SC_L1C_DSZ: c_int = 8; -pub const SC_L1C_ICA: c_int = 9; -pub const SC_L1C_DCA: c_int = 10; -pub const SC_L1C_IBS: c_int = 11; -pub const SC_L1C_DBS: c_int = 12; -pub const SC_L1C_ILS: c_int = 13; -pub const SC_L1C_DLS: c_int = 14; -pub const SC_L2C_SZ: c_int = 15; -pub const SC_L2C_AS: c_int = 16; -pub const SC_TLB_ATTR: c_int = 17; -pub const SC_ITLB_SZ: c_int = 18; -pub const SC_DTLB_SZ: c_int = 19; -pub const SC_ITLB_ATT: c_int = 20; -pub const SC_DTLB_ATT: c_int = 21; -pub const SC_RESRV_SZ: c_int = 22; -pub const SC_PRI_LC: c_int = 23; -pub const SC_PRO_LC: c_int = 24; -pub const SC_RTC_TYPE: c_int = 25; -pub const SC_VIRT_AL: c_int = 26; -pub const SC_CAC_CONG: c_int = 27; -pub const SC_MOD_ARCH: c_int = 28; -pub const SC_MOD_IMPL: c_int = 29; -pub const SC_XINT: c_int = 30; -pub const SC_XFRAC: c_int = 31; -pub const SC_KRN_ATTR: c_int = 32; -pub const SC_PHYSMEM: c_int = 33; -pub const SC_SLB_ATTR: c_int = 34; -pub const SC_SLB_SZ: c_int = 35; -pub const SC_MAX_NCPUS: c_int = 37; -pub const SC_MAX_REALADDR: c_int = 38; -pub const SC_ORIG_ENT_CAP: c_int = 39; -pub const SC_ENT_CAP: c_int = 40; -pub const SC_DISP_WHE: c_int = 41; -pub const SC_CAPINC: c_int = 42; -pub const SC_VCAPW: c_int = 43; -pub const SC_SPLP_STAT: c_int = 44; -pub const SC_SMT_STAT: c_int = 45; -pub const SC_SMT_TC: c_int = 46; -pub const SC_VMX_VER: c_int = 47; -pub const SC_LMB_SZ: c_int = 48; -pub const SC_MAX_XCPU: c_int = 49; -pub const SC_EC_LVL: c_int = 50; -pub const SC_AME_STAT: c_int = 51; -pub const SC_ECO_STAT: c_int = 52; -pub const SC_DFP_VER: c_int = 53; -pub const SC_VRM_STAT: c_int = 54; -pub const SC_PHYS_IMP: c_int = 55; -pub const SC_PHYS_VER: c_int = 56; -pub const SC_SPCM_STATUS: c_int = 57; -pub const SC_SPCM_MAX: c_int = 58; -pub const SC_TM_VER: c_int = 59; -pub const SC_NX_CAP: c_int = 60; -pub const SC_PKS_STATE: c_int = 61; -pub const SC_MMA_VER: c_int = 62; -pub const POWER_RS: c_int = 1; -pub const POWER_PC: c_int = 2; -pub const IA64: c_int = 3; -pub const POWER_RS1: c_int = 0x1; -pub const POWER_RSC: c_int = 0x2; -pub const POWER_RS2: c_int = 0x4; -pub const POWER_601: c_int = 0x8; -pub const POWER_604: c_int = 0x10; -pub const POWER_603: c_int = 0x20; -pub const POWER_620: c_int = 0x40; -pub const POWER_630: c_int = 0x80; -pub const POWER_A35: c_int = 0x100; -pub const POWER_RS64II: c_int = 0x200; -pub const POWER_RS64III: c_int = 0x400; -pub const POWER_4: c_int = 0x800; -pub const POWER_RS64IV: c_int = POWER_4; -pub const POWER_MPC7450: c_int = 0x1000; -pub const POWER_5: c_int = 0x2000; -pub const POWER_6: c_int = 0x4000; -pub const POWER_7: c_int = 0x8000; -pub const POWER_8: c_int = 0x10000; -pub const POWER_9: c_int = 0x20000; - -// sys/time.h -pub const FD_SETSIZE: usize = 65534; -pub const TIMEOFDAY: c_int = 9; -pub const CLOCK_REALTIME: crate::clockid_t = TIMEOFDAY as clockid_t; -pub const CLOCK_MONOTONIC: crate::clockid_t = 10; -pub const TIMER_ABSTIME: c_int = 999; -pub const ITIMER_REAL: c_int = 0; -pub const ITIMER_VIRTUAL: c_int = 1; -pub const ITIMER_PROF: c_int = 2; -pub const ITIMER_VIRT: c_int = 3; -pub const ITIMER_REAL1: c_int = 20; -pub const ITIMER_REAL_TH: c_int = ITIMER_REAL1; -pub const DST_AUST: c_int = 2; -pub const DST_CAN: c_int = 6; -pub const DST_EET: c_int = 5; -pub const DST_MET: c_int = 4; -pub const DST_NONE: c_int = 0; -pub const DST_USA: c_int = 1; -pub const DST_WET: c_int = 3; - -// sys/termio.h -pub const CSTART: crate::tcflag_t = 0o21; -pub const CSTOP: crate::tcflag_t = 0o23; -pub const TCGETA: c_int = TIOC | 5; -pub const TCSETA: c_int = TIOC | 6; -pub const TCSETAW: c_int = TIOC | 7; -pub const TCSETAF: c_int = TIOC | 8; -pub const TCSBRK: c_int = TIOC | 9; -pub const TCXONC: c_int = TIOC | 11; -pub const TCFLSH: c_int = TIOC | 12; -pub const TCGETS: c_int = TIOC | 1; -pub const TCSETS: c_int = TIOC | 2; -pub const TCSANOW: c_int = 0; -pub const TCSETSW: c_int = TIOC | 3; -pub const TCSADRAIN: c_int = 1; -pub const TCSETSF: c_int = TIOC | 4; -pub const TCSAFLUSH: c_int = 2; -pub const TCIFLUSH: c_int = 0; -pub const TCOFLUSH: c_int = 1; -pub const TCIOFLUSH: c_int = 2; -pub const TCOOFF: c_int = 0; -pub const TCOON: c_int = 1; -pub const TCIOFF: c_int = 2; -pub const TCION: c_int = 3; -pub const TIOC: c_int = 0x5400; -pub const TIOCGWINSZ: c_int = 0x40087468; -pub const TIOCSWINSZ: c_int = 0x80087467; -pub const TIOCLBIS: c_int = 0x8004747f; -pub const TIOCLBIC: c_int = 0x8004747e; -pub const TIOCLSET: c_int = 0x8004747d; -pub const TIOCLGET: c_int = 0x4004747c; -pub const TIOCSBRK: c_int = 0x2000747b; -pub const TIOCCBRK: c_int = 0x2000747a; -pub const TIOCSDTR: c_int = 0x20007479; -pub const TIOCCDTR: c_int = 0x20007478; -pub const TIOCSLTC: c_int = 0x80067475; -pub const TIOCGLTC: c_int = 0x40067474; -pub const TIOCOUTQ: c_int = 0x40047473; -pub const TIOCNOTTY: c_int = 0x20007471; -pub const TIOCSTOP: c_int = 0x2000746f; -pub const TIOCSTART: c_int = 0x2000746e; -pub const TIOCGPGRP: c_int = 0x40047477; -pub const TIOCSPGRP: c_int = 0x80047476; -pub const TIOCGSID: c_int = 0x40047448; -pub const TIOCSTI: c_int = 0x80017472; -pub const TIOCMSET: c_int = 0x8004746d; -pub const TIOCMBIS: c_int = 0x8004746c; -pub const TIOCMBIC: c_int = 0x8004746b; -pub const TIOCMGET: c_int = 0x4004746a; -pub const TIOCREMOTE: c_int = 0x80047469; - -// sys/user.h -pub const MAXCOMLEN: c_int = 32; -pub const UF_SYSTEM: c_int = 0x1000; - -// sys/vattr.h -pub const AT_FLAGS: c_int = 0x80; -pub const AT_GID: c_int = 8; -pub const AT_UID: c_int = 4; - -// sys/wait.h -pub const P_ALL: idtype_t = 0; -pub const P_PID: idtype_t = 1; -pub const P_PGID: idtype_t = 2; -pub const WNOHANG: c_int = 0x1; -pub const WUNTRACED: c_int = 0x2; -pub const WEXITED: c_int = 0x04; -pub const WCONTINUED: c_int = 0x01000000; -pub const WNOWAIT: c_int = 0x10; -pub const WSTOPPED: c_int = _W_STOPPED; -pub const _W_STOPPED: c_int = 0x00000040; -pub const _W_SLWTED: c_int = 0x0000007c; -pub const _W_SEWTED: c_int = 0x0000007d; -pub const _W_SFWTED: c_int = 0x0000007e; -pub const _W_STRC: c_int = 0x0000007f; - -// termios.h -pub const NCCS: usize = 16; -pub const OLCUC: crate::tcflag_t = 2; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS5: crate::tcflag_t = 0x00000000; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const ECHO: crate::tcflag_t = 0x00000008; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOCTL: crate::tcflag_t = 0x00020000; -pub const ECHOPRT: crate::tcflag_t = 0x00040000; -pub const ECHOKE: crate::tcflag_t = 0x00080000; -pub const IGNBRK: crate::tcflag_t = 0x00000001; -pub const BRKINT: crate::tcflag_t = 0x00000002; -pub const IGNPAR: crate::tcflag_t = 0x00000004; -pub const PARMRK: crate::tcflag_t = 0x00000008; -pub const INPCK: crate::tcflag_t = 0x00000010; -pub const ISTRIP: crate::tcflag_t = 0x00000020; -pub const INLCR: crate::tcflag_t = 0x00000040; -pub const IGNCR: crate::tcflag_t = 0x00000080; -pub const ICRNL: crate::tcflag_t = 0x00000100; -pub const IXON: crate::tcflag_t = 0x00000200; -pub const IXOFF: crate::tcflag_t = 0x00000400; -pub const IXANY: crate::tcflag_t = 0x00001000; -pub const IMAXBEL: crate::tcflag_t = 0x00010000; -pub const OPOST: crate::tcflag_t = 0x00000001; -pub const ONLCR: crate::tcflag_t = 0x00000004; -pub const OCRNL: crate::tcflag_t = 0x00000008; -pub const ONOCR: crate::tcflag_t = 0x00000010; -pub const ONLRET: crate::tcflag_t = 0x00000020; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const IEXTEN: crate::tcflag_t = 0x00200000; -pub const TOSTOP: crate::tcflag_t = 0x00010000; -pub const FLUSHO: crate::tcflag_t = 0x00100000; -pub const PENDIN: crate::tcflag_t = 0x20000000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const VINTR: usize = 0; -pub const VQUIT: usize = 1; -pub const VERASE: usize = 2; -pub const VKILL: usize = 3; -pub const VEOF: usize = 4; -pub const VEOL: usize = 5; -pub const VSTART: usize = 7; -pub const VSTOP: usize = 8; -pub const VSUSP: usize = 9; -pub const VMIN: usize = 4; -pub const VTIME: usize = 5; -pub const VEOL2: usize = 6; -pub const VDSUSP: usize = 10; -pub const VREPRINT: usize = 11; -pub const VDISCRD: usize = 12; -pub const VWERSE: usize = 13; -pub const VLNEXT: usize = 14; -pub const B0: crate::speed_t = 0x0; -pub const B50: crate::speed_t = 0x1; -pub const B75: crate::speed_t = 0x2; -pub const B110: crate::speed_t = 0x3; -pub const B134: crate::speed_t = 0x4; -pub const B150: crate::speed_t = 0x5; -pub const B200: crate::speed_t = 0x6; -pub const B300: crate::speed_t = 0x7; -pub const B600: crate::speed_t = 0x8; -pub const B1200: crate::speed_t = 0x9; -pub const B1800: crate::speed_t = 0xa; -pub const B2400: crate::speed_t = 0xb; -pub const B4800: crate::speed_t = 0xc; -pub const B9600: crate::speed_t = 0xd; -pub const B19200: crate::speed_t = 0xe; -pub const B38400: crate::speed_t = 0xf; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const IUCLC: crate::tcflag_t = 0x00000800; -pub const OFILL: crate::tcflag_t = 0x00000040; -pub const OFDEL: crate::tcflag_t = 0x00000080; -pub const CRDLY: crate::tcflag_t = 0x00000300; -pub const CR0: crate::tcflag_t = 0x00000000; -pub const CR1: crate::tcflag_t = 0x00000100; -pub const CR2: crate::tcflag_t = 0x00000200; -pub const CR3: crate::tcflag_t = 0x00000300; -pub const TABDLY: crate::tcflag_t = 0x00000c00; -pub const TAB0: crate::tcflag_t = 0x00000000; -pub const TAB1: crate::tcflag_t = 0x00000400; -pub const TAB2: crate::tcflag_t = 0x00000800; -pub const TAB3: crate::tcflag_t = 0x00000c00; -pub const BSDLY: crate::tcflag_t = 0x00001000; -pub const BS0: crate::tcflag_t = 0x00000000; -pub const BS1: crate::tcflag_t = 0x00001000; -pub const FFDLY: crate::tcflag_t = 0x00002000; -pub const FF0: crate::tcflag_t = 0x00000000; -pub const FF1: crate::tcflag_t = 0x00002000; -pub const NLDLY: crate::tcflag_t = 0x00004000; -pub const NL0: crate::tcflag_t = 0x00000000; -pub const NL1: crate::tcflag_t = 0x00004000; -pub const VTDLY: crate::tcflag_t = 0x00008000; -pub const VT0: crate::tcflag_t = 0x00000000; -pub const VT1: crate::tcflag_t = 0x00008000; -pub const OXTABS: crate::tcflag_t = 0x00040000; -pub const ONOEOT: crate::tcflag_t = 0x00080000; -pub const CBAUD: crate::tcflag_t = 0x0000000f; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const CIBAUD: crate::tcflag_t = 0x000f0000; -pub const IBSHIFT: crate::tcflag_t = 16; -pub const PAREXT: crate::tcflag_t = 0x00100000; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const XCASE: crate::tcflag_t = 0x00000004; -pub const ALTWERASE: crate::tcflag_t = 0x00400000; - -// time.h -pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 11; -pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 12; - -// unistd.h -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; -pub const _POSIX_VDISABLE: c_int = 0xff; -pub const _PC_LINK_MAX: c_int = 11; -pub const _PC_MAX_CANON: c_int = 12; -pub const _PC_MAX_INPUT: c_int = 13; -pub const _PC_NAME_MAX: c_int = 14; -pub const _PC_PATH_MAX: c_int = 16; -pub const _PC_PIPE_BUF: c_int = 17; -pub const _PC_NO_TRUNC: c_int = 15; -pub const _PC_VDISABLE: c_int = 18; -pub const _PC_CHOWN_RESTRICTED: c_int = 10; -pub const _PC_ASYNC_IO: c_int = 19; -pub const _PC_PRIO_IO: c_int = 21; -pub const _PC_SYNC_IO: c_int = 20; -pub const _PC_ALLOC_SIZE_MIN: c_int = 26; -pub const _PC_REC_INCR_XFER_SIZE: c_int = 27; -pub const _PC_REC_MAX_XFER_SIZE: c_int = 28; -pub const _PC_REC_MIN_XFER_SIZE: c_int = 29; -pub const _PC_REC_XFER_ALIGN: c_int = 30; -pub const _PC_SYMLINK_MAX: c_int = 25; -pub const _PC_2_SYMLINKS: c_int = 31; -pub const _PC_TIMESTAMP_RESOLUTION: c_int = 32; -pub const _PC_FILESIZEBITS: c_int = 22; -pub const _SC_ARG_MAX: c_int = 0; -pub const _SC_CHILD_MAX: c_int = 1; -pub const _SC_CLK_TCK: c_int = 2; -pub const _SC_NGROUPS_MAX: c_int = 3; -pub const _SC_OPEN_MAX: c_int = 4; -pub const _SC_JOB_CONTROL: c_int = 7; -pub const _SC_SAVED_IDS: c_int = 8; -pub const _SC_VERSION: c_int = 9; -pub const _SC_PASS_MAX: c_int = 45; -pub const _SC_PAGESIZE: c_int = _SC_PAGE_SIZE; -pub const _SC_PAGE_SIZE: c_int = 48; -pub const _SC_XOPEN_VERSION: c_int = 46; -pub const _SC_NPROCESSORS_CONF: c_int = 71; -pub const _SC_NPROCESSORS_ONLN: c_int = 72; -pub const _SC_STREAM_MAX: c_int = 5; -pub const _SC_TZNAME_MAX: c_int = 6; -pub const _SC_AIO_LISTIO_MAX: c_int = 75; -pub const _SC_AIO_MAX: c_int = 76; -pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 77; -pub const _SC_ASYNCHRONOUS_IO: c_int = 78; -pub const _SC_DELAYTIMER_MAX: c_int = 79; -pub const _SC_FSYNC: c_int = 80; -pub const _SC_MAPPED_FILES: c_int = 84; -pub const _SC_MEMLOCK: c_int = 85; -pub const _SC_MEMLOCK_RANGE: c_int = 86; -pub const _SC_MEMORY_PROTECTION: c_int = 87; -pub const _SC_MESSAGE_PASSING: c_int = 88; -pub const _SC_MQ_OPEN_MAX: c_int = 89; -pub const _SC_MQ_PRIO_MAX: c_int = 90; -pub const _SC_PRIORITIZED_IO: c_int = 91; -pub const _SC_PRIORITY_SCHEDULING: c_int = 92; -pub const _SC_REALTIME_SIGNALS: c_int = 93; -pub const _SC_RTSIG_MAX: c_int = 94; -pub const _SC_SEMAPHORES: c_int = 95; -pub const _SC_SEM_NSEMS_MAX: c_int = 96; -pub const _SC_SEM_VALUE_MAX: c_int = 97; -pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 98; -pub const _SC_SIGQUEUE_MAX: c_int = 99; -pub const _SC_SYNCHRONIZED_IO: c_int = 100; -pub const _SC_TIMERS: c_int = 102; -pub const _SC_TIMER_MAX: c_int = 103; -pub const _SC_2_C_BIND: c_int = 51; -pub const _SC_2_C_DEV: c_int = 32; -pub const _SC_2_C_VERSION: c_int = 52; -pub const _SC_2_FORT_DEV: c_int = 33; -pub const _SC_2_FORT_RUN: c_int = 34; -pub const _SC_2_LOCALEDEF: c_int = 35; -pub const _SC_2_SW_DEV: c_int = 36; -pub const _SC_2_UPE: c_int = 53; -pub const _SC_2_VERSION: c_int = 31; -pub const _SC_BC_BASE_MAX: c_int = 23; -pub const _SC_BC_DIM_MAX: c_int = 24; -pub const _SC_BC_SCALE_MAX: c_int = 25; -pub const _SC_BC_STRING_MAX: c_int = 26; -pub const _SC_COLL_WEIGHTS_MAX: c_int = 50; -pub const _SC_EXPR_NEST_MAX: c_int = 28; -pub const _SC_LINE_MAX: c_int = 29; -pub const _SC_RE_DUP_MAX: c_int = 30; -pub const _SC_XOPEN_CRYPT: c_int = 56; -pub const _SC_XOPEN_ENH_I18N: c_int = 57; -pub const _SC_XOPEN_SHM: c_int = 55; -pub const _SC_2_CHAR_TERM: c_int = 54; -pub const _SC_XOPEN_XCU_VERSION: c_int = 109; -pub const _SC_ATEXIT_MAX: c_int = 47; -pub const _SC_IOV_MAX: c_int = 58; -pub const _SC_XOPEN_UNIX: c_int = 73; -pub const _SC_T_IOV_MAX: c_int = 0; -pub const _SC_PHYS_PAGES: c_int = 113; -pub const _SC_AVPHYS_PAGES: c_int = 114; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 101; -pub const _SC_GETGR_R_SIZE_MAX: c_int = 81; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 82; -pub const _SC_LOGIN_NAME_MAX: c_int = 83; -pub const _SC_THREAD_KEYS_MAX: c_int = 68; -pub const _SC_THREAD_STACK_MIN: c_int = 69; -pub const _SC_THREAD_THREADS_MAX: c_int = 70; -pub const _SC_TTY_NAME_MAX: c_int = 104; -pub const _SC_THREADS: c_int = 60; -pub const _SC_THREAD_ATTR_STACKADDR: c_int = 61; -pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 62; -pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 64; -pub const _SC_THREAD_PRIO_INHERIT: c_int = 65; -pub const _SC_THREAD_PRIO_PROTECT: c_int = 66; -pub const _SC_THREAD_PROCESS_SHARED: c_int = 67; -pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 59; -pub const _SC_XOPEN_LEGACY: c_int = 112; -pub const _SC_XOPEN_REALTIME: c_int = 110; -pub const _SC_XOPEN_REALTIME_THREADS: c_int = 111; -pub const _SC_XBS5_ILP32_OFF32: c_int = 105; -pub const _SC_XBS5_ILP32_OFFBIG: c_int = 106; -pub const _SC_XBS5_LP64_OFF64: c_int = 107; -pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 108; -pub const _SC_2_PBS: c_int = 132; -pub const _SC_2_PBS_ACCOUNTING: c_int = 133; -pub const _SC_2_PBS_CHECKPOINT: c_int = 134; -pub const _SC_2_PBS_LOCATE: c_int = 135; -pub const _SC_2_PBS_MESSAGE: c_int = 136; -pub const _SC_2_PBS_TRACK: c_int = 137; -pub const _SC_ADVISORY_INFO: c_int = 130; -pub const _SC_BARRIERS: c_int = 138; -pub const _SC_CLOCK_SELECTION: c_int = 139; -pub const _SC_CPUTIME: c_int = 140; -pub const _SC_HOST_NAME_MAX: c_int = 126; -pub const _SC_MONOTONIC_CLOCK: c_int = 141; -pub const _SC_READER_WRITER_LOCKS: c_int = 142; -pub const _SC_REGEXP: c_int = 127; -pub const _SC_SHELL: c_int = 128; -pub const _SC_SPAWN: c_int = 143; -pub const _SC_SPIN_LOCKS: c_int = 144; -pub const _SC_SPORADIC_SERVER: c_int = 145; -pub const _SC_SS_REPL_MAX: c_int = 156; -pub const _SC_SYMLOOP_MAX: c_int = 129; -pub const _SC_THREAD_CPUTIME: c_int = 146; -pub const _SC_THREAD_SPORADIC_SERVER: c_int = 147; -pub const _SC_TIMEOUTS: c_int = 148; -pub const _SC_TRACE: c_int = 149; -pub const _SC_TRACE_EVENT_FILTER: c_int = 150; -pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 157; -pub const _SC_TRACE_INHERIT: c_int = 151; -pub const _SC_TRACE_LOG: c_int = 152; -pub const _SC_TRACE_NAME_MAX: c_int = 158; -pub const _SC_TRACE_SYS_MAX: c_int = 159; -pub const _SC_TRACE_USER_EVENT_MAX: c_int = 160; -pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 153; -pub const _SC_V6_ILP32_OFF32: c_int = 121; -pub const _SC_V6_ILP32_OFFBIG: c_int = 122; -pub const _SC_V6_LP64_OFF64: c_int = 123; -pub const _SC_V6_LPBIG_OFFBIG: c_int = 124; -pub const _SC_XOPEN_STREAMS: c_int = 125; -pub const _SC_IPV6: c_int = 154; -pub const _SC_RAW_SOCKETS: c_int = 155; - -// utmp.h -pub const EMPTY: c_short = 0; -pub const RUN_LVL: c_short = 1; -pub const BOOT_TIME: c_short = 2; -pub const OLD_TIME: c_short = 3; -pub const NEW_TIME: c_short = 4; -pub const INIT_PROCESS: c_short = 5; -pub const LOGIN_PROCESS: c_short = 6; -pub const USER_PROCESS: c_short = 7; -pub const DEAD_PROCESS: c_short = 8; -pub const ACCOUNTING: c_short = 9; - -f! { - pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr { - if (*mhdr).msg_controllen as usize >= size_of::() { - (*mhdr).msg_control as *mut cmsghdr - } else { - core::ptr::null_mut::() - } - } - - pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - if cmsg.is_null() { - CMSG_FIRSTHDR(mhdr) - } else { - if (cmsg as usize + (*cmsg).cmsg_len as usize + size_of::()) - > ((*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize) - { - core::ptr::null_mut::() - } else { - // AIX does not have any alignment/padding for ancillary data, so we don't need _CMSG_ALIGN here. - (cmsg as usize + (*cmsg).cmsg_len as usize) as *mut cmsghdr - } - } - } - - pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { - (cmsg as *mut c_uchar).offset(size_of::() as isize) - } - - pub const fn CMSG_LEN(length: c_uint) -> c_uint { - size_of::() as c_uint + length - } - - pub const fn CMSG_SPACE(length: c_uint) -> c_uint { - size_of::() as c_uint + length - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - for slot in (*set).fds_bits.iter_mut() { - *slot = 0; - } - } - - pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { - let bits = size_of::() * 8; - let fd = fd as usize; - (*set).fds_bits[fd / bits] |= 1 << (fd % bits); - return; - } - - pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { - let bits = size_of::() * 8; - let fd = fd as usize; - (*set).fds_bits[fd / bits] &= !(1 << (fd % bits)); - return; - } - - pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { - let bits = size_of::() * 8; - let fd = fd as usize; - return ((*set).fds_bits[fd / bits] & (1 << (fd % bits))) != 0; - } -} - -safe_f! { - pub const fn WIFSTOPPED(status: c_int) -> bool { - (status & _W_STOPPED) != 0 - } - - pub const fn WSTOPSIG(status: c_int) -> c_int { - if WIFSTOPPED(status) { - (((status as c_uint) >> 8) & 0xff) as c_int - } else { - -1 - } - } - - pub const fn WIFEXITED(status: c_int) -> bool { - (status & 0xFF) == 0 - } - - pub const fn WEXITSTATUS(status: c_int) -> c_int { - if WIFEXITED(status) { - (((status as c_uint) >> 8) & 0xff) as c_int - } else { - -1 - } - } - - pub const fn WIFSIGNALED(status: c_int) -> bool { - !WIFEXITED(status) && !WIFSTOPPED(status) - } - - pub const fn WTERMSIG(status: c_int) -> c_int { - if WIFSIGNALED(status) { - (((status as c_uint) >> 16) & 0xff) as c_int - } else { - -1 - } - } - - pub const fn WIFCONTINUED(status: c_int) -> bool { - (status & WCONTINUED) != 0 - } - - // AIX doesn't have native WCOREDUMP. - pub const fn WCOREDUMP(_status: c_int) -> bool { - false - } - - pub const fn major(dev: crate::dev_t) -> c_uint { - let x = dev >> 16; - x as c_uint - } - - pub const fn minor(dev: crate::dev_t) -> c_uint { - let y = dev & 0xFFFF; - y as c_uint - } - - pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { - let major = major as crate::dev_t; - let minor = minor as crate::dev_t; - let mut dev = 0; - dev |= major << 16; - dev |= minor; - dev - } -} - -#[link(name = "thread")] -extern "C" { - pub fn thr_kill(id: thread_t, sig: c_int) -> c_int; - pub fn thr_self() -> thread_t; -} - -#[link(name = "pthread")] -extern "C" { - pub fn pthread_atfork( - prepare: Option, - parent: Option, - child: Option, - ) -> c_int; - - pub fn pthread_attr_getdetachstate( - attr: *const crate::pthread_attr_t, - detachstate: *mut c_int, - ) -> c_int; - - pub fn pthread_attr_getguardsize( - attr: *const crate::pthread_attr_t, - guardsize: *mut size_t, - ) -> c_int; - - pub fn pthread_attr_getinheritsched( - attr: *const crate::pthread_attr_t, - inheritsched: *mut c_int, - ) -> c_int; - - pub fn pthread_attr_getschedparam( - attr: *const crate::pthread_attr_t, - param: *mut sched_param, - ) -> c_int; - - pub fn pthread_attr_getstackaddr( - attr: *const crate::pthread_attr_t, - stackaddr: *mut *mut c_void, - ) -> c_int; - - pub fn pthread_attr_getschedpolicy( - attr: *const crate::pthread_attr_t, - policy: *mut c_int, - ) -> c_int; - - pub fn pthread_attr_getscope( - attr: *const crate::pthread_attr_t, - contentionscope: *mut c_int, - ) -> c_int; - - pub fn pthread_attr_getstack( - attr: *const crate::pthread_attr_t, - stackaddr: *mut *mut c_void, - stacksize: *mut size_t, - ) -> c_int; - - pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; - - pub fn pthread_attr_setinheritsched( - attr: *mut crate::pthread_attr_t, - inheritsched: c_int, - ) -> c_int; - - pub fn pthread_attr_setschedparam( - attr: *mut crate::pthread_attr_t, - param: *const sched_param, - ) -> c_int; - - pub fn pthread_attr_setschedpolicy(attr: *mut crate::pthread_attr_t, policy: c_int) -> c_int; - - pub fn pthread_attr_setscope(attr: *mut crate::pthread_attr_t, contentionscope: c_int) - -> c_int; - - pub fn pthread_attr_setstack( - attr: *mut crate::pthread_attr_t, - stackaddr: *mut c_void, - stacksize: size_t, - ) -> c_int; - - pub fn pthread_attr_setstackaddr( - attr: *mut crate::pthread_attr_t, - stackaddr: *mut c_void, - ) -> c_int; - - pub fn pthread_barrierattr_destroy(attr: *mut crate::pthread_barrierattr_t) -> c_int; - - pub fn pthread_barrierattr_getpshared( - attr: *const crate::pthread_barrierattr_t, - pshared: *mut c_int, - ) -> c_int; - - pub fn pthread_barrierattr_init(attr: *mut crate::pthread_barrierattr_t) -> c_int; - - pub fn pthread_barrierattr_setpshared( - attr: *mut crate::pthread_barrierattr_t, - pshared: c_int, - ) -> c_int; - - pub fn pthread_barrier_destroy(barrier: *mut pthread_barrier_t) -> c_int; - - pub fn pthread_barrier_init( - barrier: *mut pthread_barrier_t, - attr: *const crate::pthread_barrierattr_t, - count: c_uint, - ) -> c_int; - - pub fn pthread_barrier_wait(barrier: *mut pthread_barrier_t) -> c_int; - - pub fn pthread_cancel(thread: crate::pthread_t) -> c_int; - - pub fn pthread_cleanup_pop(execute: c_int) -> c_void; - - pub fn pthread_cleanup_push( - routine: Option, - arg: *mut c_void, - ) -> c_void; - - pub fn pthread_condattr_getclock( - attr: *const pthread_condattr_t, - clock_id: *mut clockid_t, - ) -> c_int; - - pub fn pthread_condattr_getpshared( - attr: *const pthread_condattr_t, - pshared: *mut c_int, - ) -> c_int; - - pub fn pthread_condattr_setclock( - attr: *mut pthread_condattr_t, - clock_id: crate::clockid_t, - ) -> c_int; - - pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: c_int) -> c_int; - - pub fn pthread_create( - thread: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - start_routine: extern "C" fn(*mut c_void) -> *mut c_void, - arg: *mut c_void, - ) -> c_int; - - pub fn pthread_getconcurrency() -> c_int; - - pub fn pthread_getcpuclockid( - thread_id: crate::pthread_t, - clock_id: *mut crate::clockid_t, - ) -> c_int; - - pub fn pthread_getschedparam( - thread: crate::pthread_t, - policy: *mut c_int, - param: *mut sched_param, - ) -> c_int; - - pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; - - pub fn pthread_mutexattr_getprioceiling( - attr: *const crate::pthread_mutexattr_t, - prioceiling: *mut c_int, - ) -> c_int; - - pub fn pthread_mutexattr_getprotocol( - attr: *const pthread_mutexattr_t, - protocol: *mut c_int, - ) -> c_int; - - pub fn pthread_mutexattr_getpshared( - attr: *const pthread_mutexattr_t, - pshared: *mut c_int, - ) -> c_int; - - pub fn pthread_mutexattr_getrobust( - attr: *const crate::pthread_mutexattr_t, - robust: *mut c_int, - ) -> c_int; - - pub fn pthread_mutexattr_gettype( - attr: *const crate::pthread_mutexattr_t, - _type: *mut c_int, - ) -> c_int; - - pub fn pthread_mutexattr_setprioceiling( - attr: *mut crate::pthread_mutexattr_t, - prioceiling: c_int, - ) -> c_int; - - pub fn pthread_mutexattr_setprotocol(attr: *mut pthread_mutexattr_t, protocol: c_int) -> c_int; - - pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; - - pub fn pthread_mutexattr_setrobust( - attr: *mut crate::pthread_mutexattr_t, - robust: c_int, - ) -> c_int; - - pub fn pthread_mutex_consistent(mutex: *mut crate::pthread_mutex_t) -> c_int; - - pub fn pthread_mutex_getprioceiling( - mutex: *const crate::pthread_mutex_t, - prioceiling: *mut c_int, - ) -> c_int; - - pub fn pthread_mutex_setprioceiling( - mutex: *mut crate::pthread_mutex_t, - prioceiling: c_int, - old_ceiling: *mut c_int, - ) -> c_int; - - pub fn pthread_mutex_timedlock( - mutex: *mut pthread_mutex_t, - abstime: *const crate::timespec, - ) -> c_int; - - pub fn pthread_once( - once_control: *mut crate::pthread_once_t, - init_routine: Option, - ) -> c_int; - - pub fn pthread_rwlockattr_getpshared( - attr: *const pthread_rwlockattr_t, - pshared: *mut c_int, - ) -> c_int; - - pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, pshared: c_int) -> c_int; - - pub fn pthread_rwlock_timedrdlock( - rwlock: *mut crate::pthread_rwlock_t, - abstime: *const crate::timespec, - ) -> c_int; - - pub fn pthread_rwlock_timedwrlock( - rwlock: *mut crate::pthread_rwlock_t, - abstime: *const crate::timespec, - ) -> c_int; - - pub fn pthread_setcancelstate(state: c_int, oldstate: *mut c_int) -> c_int; - pub fn pthread_setcanceltype(_type: c_int, oldtype: *mut c_int) -> c_int; - - pub fn pthread_setconcurrency(new_level: c_int) -> c_int; - - pub fn pthread_setschedparam( - thread: crate::pthread_t, - policy: c_int, - param: *const sched_param, - ) -> c_int; - - pub fn pthread_setschedprio(thread: crate::pthread_t, prio: c_int) -> c_int; - - pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oset: *mut sigset_t) -> c_int; - - pub fn pthread_spin_destroy(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_spin_init(lock: *mut pthread_spinlock_t, pshared: c_int) -> c_int; - pub fn pthread_spin_lock(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_spin_trylock(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_spin_unlock(lock: *mut pthread_spinlock_t) -> c_int; - - pub fn pthread_testcancel() -> c_void; -} - -#[link(name = "iconv")] -extern "C" { - pub fn iconv( - cd: iconv_t, - inbuf: *mut *mut c_char, - inbytesleft: *mut size_t, - outbuf: *mut *mut c_char, - outbytesleft: *mut size_t, - ) -> size_t; - pub fn iconv_close(cd: iconv_t) -> c_int; - pub fn iconv_open(tocode: *const c_char, fromcode: *const c_char) -> iconv_t; -} - -extern "C" { - pub fn acct(filename: *mut c_char) -> c_int; - #[link_name = "_posix_aio_cancel"] - pub fn aio_cancel(fildes: c_int, aiocbp: *mut crate::aiocb) -> c_int; - #[link_name = "_posix_aio_error"] - pub fn aio_error(aiocbp: *const crate::aiocb) -> c_int; - #[link_name = "_posix_aio_fsync"] - pub fn aio_fsync(op: c_int, aiocbp: *mut crate::aiocb) -> c_int; - #[link_name = "_posix_aio_read"] - pub fn aio_read(aiocbp: *mut crate::aiocb) -> c_int; - #[link_name = "_posix_aio_return"] - pub fn aio_return(aiocbp: *mut crate::aiocb) -> ssize_t; - #[link_name = "_posix_aio_suspend"] - pub fn aio_suspend( - list: *const *const crate::aiocb, - nent: c_int, - timeout: *const crate::timespec, - ) -> c_int; - #[link_name = "_posix_aio_write"] - pub fn aio_write(aiocbp: *mut crate::aiocb) -> c_int; - pub fn basename(path: *mut c_char) -> *mut c_char; - pub fn bind( - socket: c_int, - address: *const crate::sockaddr, - address_len: crate::socklen_t, - ) -> c_int; - pub fn brk(addr: *mut c_void) -> c_int; - pub fn clearenv() -> c_int; - pub fn clock_getcpuclockid(pid: crate::pid_t, clk_id: *mut crate::clockid_t) -> c_int; - pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn clock_nanosleep( - clk_id: crate::clockid_t, - flags: c_int, - rqtp: *const crate::timespec, - rmtp: *mut crate::timespec, - ) -> c_int; - pub fn clock_settime(clock_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; - pub fn creat64(path: *const c_char, mode: mode_t) -> c_int; - pub fn ctermid(s: *mut c_char) -> *mut c_char; - pub fn dirfd(dirp: *mut crate::DIR) -> c_int; - pub fn dirname(path: *mut c_char) -> *mut c_char; - pub fn drand48() -> c_double; - pub fn duplocale(arg1: crate::locale_t) -> crate::locale_t; - pub fn endgrent(); - pub fn endmntent(streamp: *mut crate::FILE) -> c_int; - pub fn endpwent(); - pub fn endutent(); - pub fn endutxent(); - pub fn erand48(xseed: *mut c_ushort) -> c_double; - pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; - pub fn fattach(fildes: c_int, path: *const c_char) -> c_int; - pub fn fdatasync(fd: c_int) -> c_int; - - // DIFF(main): changed to `*const *mut` in e77f551de9 - pub fn fexecve(fd: c_int, argv: *const *const c_char, envp: *const *const c_char) -> c_int; - - pub fn ffs(value: c_int) -> c_int; - pub fn ffsl(value: c_long) -> c_int; - pub fn ffsll(value: c_longlong) -> c_int; - pub fn fgetgrent(file: *mut crate::FILE) -> *mut crate::group; - pub fn fgetpos64(stream: *mut crate::FILE, ptr: *mut fpos64_t) -> c_int; - pub fn fgetpwent(file: *mut crate::FILE) -> *mut crate::passwd; - pub fn fopen64(filename: *const c_char, mode: *const c_char) -> *mut crate::FILE; - pub fn freelocale(loc: crate::locale_t); - pub fn freopen64( - filename: *const c_char, - mode: *const c_char, - file: *mut crate::FILE, - ) -> *mut crate::FILE; - pub fn fseeko64(stream: *mut crate::FILE, offset: off64_t, whence: c_int) -> c_int; - pub fn fsetpos64(stream: *mut crate::FILE, ptr: *const fpos64_t) -> c_int; - pub fn fstat64(fildes: c_int, buf: *mut stat64) -> c_int; - pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; - pub fn fstatfs64(fd: c_int, buf: *mut statfs64) -> c_int; - pub fn fstatvfs64(fd: c_int, buf: *mut statvfs64) -> c_int; - pub fn ftello64(stream: *mut crate::FILE) -> off64_t; - pub fn ftok(path: *const c_char, id: c_int) -> crate::key_t; - pub fn ftruncate64(fd: c_int, length: off64_t) -> c_int; - pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; - pub fn getcontext(ucp: *mut ucontext_t) -> c_int; - pub fn getdomainname(name: *mut c_char, len: c_int) -> c_int; - pub fn getdtablesize() -> c_int; - pub fn getgrent() -> *mut crate::group; - pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; - #[link_name = "_posix_getgrgid_r"] - pub fn getgrgid_r( - gid: crate::gid_t, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn getgrnam(name: *const c_char) -> *mut crate::group; - #[link_name = "_posix_getgrnam_r"] - pub fn getgrnam_r( - name: *const c_char, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn getgrset(user: *const c_char) -> *mut c_char; - pub fn gethostid() -> c_long; - pub fn getmntent(stream: *mut crate::FILE) -> *mut crate::mntent; - pub fn getnameinfo( - sa: *const crate::sockaddr, - salen: size_t, - host: *mut c_char, - hostlen: size_t, - serv: *mut c_char, - servlen: size_t, - flags: c_int, - ) -> c_int; - pub fn getpagesize() -> c_int; - pub fn getpeereid(socket: c_int, euid: *mut crate::uid_t, egid: *mut crate::gid_t) -> c_int; - pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; - pub fn getpwent() -> *mut crate::passwd; - #[link_name = "_posix_getpwnam_r"] - pub fn getpwnam_r( - name: *const c_char, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - #[link_name = "_posix_getpwuid_r"] - pub fn getpwuid_r( - uid: crate::uid_t, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; - pub fn getrlimit64(resource: c_int, rlim: *mut rlimit64) -> c_int; - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; - pub fn getitimer(which: c_int, curr_value: *mut crate::itimerval) -> c_int; - pub fn getutent() -> *mut utmp; - pub fn getutid(u: *const utmp) -> *mut utmp; - pub fn getutline(u: *const utmp) -> *mut utmp; - pub fn getutxent() -> *mut utmpx; - pub fn getutxid(ut: *const utmpx) -> *mut utmpx; - pub fn getutxline(ut: *const utmpx) -> *mut utmpx; - pub fn glob( - pattern: *const c_char, - flags: c_int, - errfunc: Option c_int>, - pglob: *mut crate::glob_t, - ) -> c_int; - pub fn globfree(pglob: *mut crate::glob_t); - pub fn hasmntopt(mnt: *const crate::mntent, opt: *const c_char) -> *mut c_char; - pub fn hcreate(nelt: size_t) -> c_int; - pub fn hdestroy(); - pub fn hsearch(entry: entry, action: ACTION) -> *mut entry; - pub fn if_freenameindex(ptr: *mut if_nameindex); - pub fn if_nameindex() -> *mut if_nameindex; - pub fn initgroups(name: *const c_char, basegid: crate::gid_t) -> c_int; - pub fn ioctl(fildes: c_int, request: c_int, ...) -> c_int; - pub fn jrand48(xseed: *mut c_ushort) -> c_long; - pub fn lcong48(p: *mut c_ushort); - pub fn lfind( - key: *const c_void, - base: *const c_void, - nelp: *mut size_t, - width: size_t, - compar: Option c_int>, - ) -> *mut c_void; - #[link_name = "_posix_lio_listio"] - pub fn lio_listio( - mode: c_int, - aiocb_list: *const *mut aiocb, - nent: c_int, - sevp: *mut sigevent, - ) -> c_int; - pub fn loadquery(flags: c_int, buf: *mut c_void, buflen: c_uint, ...) -> c_int; - pub fn lpar_get_info(command: c_int, buf: *mut c_void, bufsize: size_t) -> c_int; - pub fn lpar_set_resources(id: c_int, resource: *mut c_void) -> c_int; - pub fn lrand48() -> c_long; - pub fn lsearch( - key: *const c_void, - base: *mut c_void, - nelp: *mut size_t, - width: size_t, - compar: Option c_int>, - ) -> *mut c_void; - pub fn lseek64(fd: c_int, offset: off64_t, whence: c_int) -> off64_t; - pub fn lstat64(path: *const c_char, buf: *mut stat64) -> c_int; - pub fn madvise(addr: caddr_t, len: size_t, advice: c_int) -> c_int; - pub fn makecontext(ucp: *mut crate::ucontext_t, func: extern "C" fn(), argc: c_int, ...); - pub fn mallinfo() -> crate::mallinfo; - pub fn mallopt(param: c_int, value: c_int) -> c_int; - pub fn memmem( - haystack: *const c_void, - haystacklen: size_t, - needle: *const c_void, - needlelen: size_t, - ) -> *mut c_void; - pub fn memset_s(s: *mut c_void, smax: size_t, c: c_int, n: size_t) -> c_int; - pub fn mincore(addr: caddr_t, len: size_t, vec: *mut c_char) -> c_int; - pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; - pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; - pub fn mount(device: *const c_char, path: *const c_char, flags: c_int) -> c_int; - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn mq_close(mqd: crate::mqd_t) -> c_int; - pub fn mq_getattr(mqd: crate::mqd_t, attr: *mut crate::mq_attr) -> c_int; - pub fn mq_notify(mqd: crate::mqd_t, notification: *const crate::sigevent) -> c_int; - pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> crate::mqd_t; - pub fn mq_receive( - mqd: crate::mqd_t, - msg_ptr: *mut c_char, - msg_len: size_t, - msg_prio: *mut c_uint, - ) -> ssize_t; - pub fn mq_send( - mqd: crate::mqd_t, - msg_ptr: *const c_char, - msg_len: size_t, - msg_prio: c_uint, - ) -> c_int; - pub fn mq_setattr( - mqd: crate::mqd_t, - newattr: *const crate::mq_attr, - oldattr: *mut crate::mq_attr, - ) -> c_int; - pub fn mq_timedreceive( - mqd: crate::mqd_t, - msg_ptr: *mut c_char, - msg_len: size_t, - msg_prio: *mut c_uint, - abs_timeout: *const crate::timespec, - ) -> ssize_t; - pub fn mq_timedsend( - mqd: crate::mqd_t, - msg_ptr: *const c_char, - msg_len: size_t, - msg_prio: c_uint, - abs_timeout: *const crate::timespec, - ) -> c_int; - pub fn mq_unlink(name: *const c_char) -> c_int; - pub fn mrand48() -> c_long; - pub fn msgctl(msqid: c_int, cmd: c_int, buf: *mut msqid_ds) -> c_int; - pub fn msgget(key: crate::key_t, msgflg: c_int) -> c_int; - pub fn msgrcv( - msqid: c_int, - msgp: *mut c_void, - msgsz: size_t, - msgtyp: c_long, - msgflg: c_int, - ) -> ssize_t; - pub fn msgsnd(msqid: c_int, msgp: *const c_void, msgsz: size_t, msgflg: c_int) -> c_int; - pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; - pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; - pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; - pub fn nl_langinfo_l(item: crate::nl_item, loc: crate::locale_t) -> *mut c_char; - pub fn nrand48(xseed: *mut c_ushort) -> c_long; - pub fn open64(path: *const c_char, oflag: c_int, ...) -> c_int; - pub fn pollset_create(maxfd: c_int) -> pollset_t; - pub fn pollset_ctl(ps: pollset_t, pollctl_array: *mut poll_ctl, array_length: c_int) -> c_int; - pub fn pollset_destroy(ps: pollset_t) -> c_int; - pub fn pollset_poll( - ps: pollset_t, - polldata_array: *mut crate::pollfd, - array_length: c_int, - timeout: c_int, - ) -> c_int; - pub fn pollset_query(ps: pollset_t, pollfd_query: *mut crate::pollfd) -> c_int; - pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; - pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; - pub fn posix_fadvise64(fd: c_int, offset: off64_t, len: off64_t, advise: c_int) -> c_int; - pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; - pub fn posix_fallocate64(fd: c_int, offset: off64_t, len: off64_t) -> c_int; - pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - pub fn posix_spawn( - pid: *mut crate::pid_t, - path: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawn_file_actions_addclose( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - ) -> c_int; - pub fn posix_spawn_file_actions_adddup2( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - newfd: c_int, - ) -> c_int; - pub fn posix_spawn_file_actions_addopen( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - path: *const c_char, - oflag: c_int, - mode: mode_t, - ) -> c_int; - pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; - pub fn posix_spawnattr_getpgroup( - attr: *const posix_spawnattr_t, - flags: *mut crate::pid_t, - ) -> c_int; - pub fn posix_spawnattr_getschedparam( - attr: *const posix_spawnattr_t, - param: *mut crate::sched_param, - ) -> c_int; - pub fn posix_spawnattr_getschedpolicy( - attr: *const posix_spawnattr_t, - flags: *mut c_int, - ) -> c_int; - pub fn posix_spawnattr_getsigdefault( - attr: *const posix_spawnattr_t, - default: *mut sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getsigmask( - attr: *const posix_spawnattr_t, - default: *mut sigset_t, - ) -> c_int; - pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; - pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: crate::pid_t) -> c_int; - pub fn posix_spawnattr_setschedparam( - attr: *mut posix_spawnattr_t, - param: *const crate::sched_param, - ) -> c_int; - pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, flags: c_int) -> c_int; - pub fn posix_spawnattr_setsigdefault( - attr: *mut posix_spawnattr_t, - default: *const crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigmask( - attr: *mut posix_spawnattr_t, - default: *const crate::sigset_t, - ) -> c_int; - pub fn posix_spawnp( - pid: *mut crate::pid_t, - file: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn pread64(fd: c_int, buf: *mut c_void, count: size_t, offset: off64_t) -> ssize_t; - pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: offset_t) -> ssize_t; - pub fn ptrace64( - request: c_int, - id: c_longlong, - addr: c_longlong, - data: c_int, - buff: *mut c_int, - ) -> c_int; - pub fn pututline(u: *const utmp) -> *mut utmp; - pub fn pututxline(ut: *const utmpx) -> *mut utmpx; - pub fn pwrite64(fd: c_int, buf: *const c_void, count: size_t, offset: off64_t) -> ssize_t; - pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: offset_t) - -> ssize_t; - pub fn quotactl(cmd: *mut c_char, special: c_int, id: c_int, data: caddr_t) -> c_int; - pub fn rand() -> c_int; - pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - // AIX header socket.h maps recvfrom() to nrecvfrom() - #[link_name = "nrecvfrom"] - pub fn recvfrom( - socket: c_int, - buf: *mut c_void, - len: size_t, - flags: c_int, - addr: *mut crate::sockaddr, - addrlen: *mut crate::socklen_t, - ) -> ssize_t; - pub fn recvmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: c_uint, - flags: c_int, - timeout: *mut crate::timespec, - ) -> c_int; - // AIX header socket.h maps recvmsg() to nrecvmsg(). - #[link_name = "nrecvmsg"] - pub fn recvmsg(sockfd: c_int, msg: *mut msghdr, flags: c_int) -> ssize_t; - pub fn regcomp(preg: *mut regex_t, pattern: *const c_char, cflags: c_int) -> c_int; - pub fn regerror( - errcode: c_int, - preg: *const crate::regex_t, - errbuf: *mut c_char, - errbuf_size: size_t, - ) -> size_t; - pub fn regexec( - preg: *const regex_t, - input: *const c_char, - nmatch: size_t, - pmatch: *mut regmatch_t, - eflags: c_int, - ) -> c_int; - pub fn regfree(preg: *mut regex_t); - pub fn sbrk(increment: intptr_t) -> *mut c_void; - pub fn sched_getparam(pid: crate::pid_t, param: *mut sched_param) -> c_int; - pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; - pub fn sched_get_priority_max(policy: c_int) -> c_int; - pub fn sched_get_priority_min(policy: c_int) -> c_int; - pub fn sched_rr_get_interval(pid: crate::pid_t, tp: *mut crate::timespec) -> c_int; - pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; - pub fn sched_setscheduler( - pid: crate::pid_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - pub fn sctp_opt_info( - sd: c_int, - id: crate::sctp_assoc_t, - opt: c_int, - arg_size: *mut c_void, - size: *mut size_t, - ) -> c_int; - pub fn sctp_peeloff(s: c_int, id: *mut c_uint) -> c_int; - pub fn seed48(xseed: *mut c_ushort) -> *mut c_ushort; - pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); - pub fn sem_close(sem: *mut sem_t) -> c_int; - pub fn sem_destroy(sem: *mut sem_t) -> c_int; - pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; - pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; - pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; - pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; - pub fn sem_unlink(name: *const c_char) -> c_int; - pub fn semctl(semid: c_int, semnum: c_int, cmd: c_int, ...) -> c_int; - pub fn semget(key: crate::key_t, nsems: c_int, semflag: c_int) -> c_int; - pub fn semop(semid: c_int, sops: *mut sembuf, nsops: size_t) -> c_int; - pub fn send_file(socket: *mut c_int, iobuf: *mut sf_parms, flags: c_uint) -> ssize_t; - pub fn sendmmsg(sockfd: c_int, msgvec: *mut mmsghdr, vlen: c_uint, flags: c_int) -> c_int; - // AIX header socket.h maps sendmsg() to nsendmsg(). - #[link_name = "nsendmsg"] - pub fn sendmsg(sockfd: c_int, msg: *const msghdr, flags: c_int) -> ssize_t; - pub fn setcontext(ucp: *const ucontext_t) -> c_int; - pub fn setdomainname(name: *const c_char, len: c_int) -> c_int; - pub fn setgroups(ngroups: c_int, ptr: *const crate::gid_t) -> c_int; - pub fn setgrent(); - pub fn sethostid(hostid: c_int) -> c_int; - pub fn sethostname(name: *const c_char, len: c_int) -> c_int; - pub fn setmntent(filename: *const c_char, ty: *const c_char) -> *mut crate::FILE; - pub fn setpriority(which: c_int, who: id_t, priority: c_int) -> c_int; - pub fn setpwent(); - pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; - pub fn setrlimit64(resource: c_int, rlim: *const rlimit64) -> c_int; - pub fn settimeofday(tv: *const crate::timeval, tz: *const crate::timezone) -> c_int; - pub fn setitimer( - which: c_int, - new_value: *const crate::itimerval, - old_value: *mut crate::itimerval, - ) -> c_int; - pub fn setutent(); - pub fn setutxent(); - pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; - pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; - pub fn sigtimedwait( - set: *const sigset_t, - info: *mut siginfo_t, - timeout: *const crate::timespec, - ) -> c_int; - pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; - pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; - pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; - pub fn shmdt(shmaddr: *const c_void) -> c_int; - pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; - pub fn shmget(key: key_t, size: size_t, shmflg: c_int) -> c_int; - pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; - pub fn shm_unlink(name: *const c_char) -> c_int; - pub fn splice(socket1: c_int, socket2: c_int, flags: c_int) -> c_int; - pub fn srand(seed: c_uint); - pub fn srand48(seed: c_long); - pub fn stat64(path: *const c_char, buf: *mut stat64) -> c_int; - pub fn stat64at(dirfd: c_int, path: *const c_char, buf: *mut stat64, flags: c_int) -> c_int; - pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; - pub fn statfs64(path: *const c_char, buf: *mut statfs64) -> c_int; - pub fn statvfs64(path: *const c_char, buf: *mut statvfs64) -> c_int; - pub fn statx(path: *const c_char, buf: *mut stat, length: c_int, command: c_int) -> c_int; - pub fn strcasecmp_l( - string1: *const c_char, - string2: *const c_char, - locale: crate::locale_t, - ) -> c_int; - pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - pub fn strftime( - arg1: *mut c_char, - arg2: size_t, - arg3: *const c_char, - arg4: *const tm, - ) -> size_t; - pub fn strncasecmp_l( - string1: *const c_char, - string2: *const c_char, - length: size_t, - locale: crate::locale_t, - ) -> c_int; - pub fn strptime(s: *const c_char, format: *const c_char, tm: *mut crate::tm) -> *mut c_char; - pub fn strsep(string: *mut *mut c_char, delim: *const c_char) -> *mut c_char; - pub fn swapcontext(uocp: *mut ucontext_t, ucp: *const ucontext_t) -> c_int; - pub fn swapoff(path: *const c_char) -> c_int; - pub fn swapon(path: *const c_char) -> c_int; - pub fn sync(); - pub fn telldir(dirp: *mut crate::DIR) -> c_long; - pub fn timer_create( - clockid: crate::clockid_t, - sevp: *mut crate::sigevent, - timerid: *mut crate::timer_t, - ) -> c_int; - pub fn timer_delete(timerid: timer_t) -> c_int; - pub fn timer_getoverrun(timerid: timer_t) -> c_int; - pub fn timer_gettime(timerid: timer_t, value: *mut itimerspec) -> c_int; - pub fn timer_settime( - timerid: crate::timer_t, - flags: c_int, - new_value: *const crate::itimerspec, - old_value: *mut crate::itimerspec, - ) -> c_int; - pub fn truncate64(path: *const c_char, length: off64_t) -> c_int; - pub fn uname(buf: *mut crate::utsname) -> c_int; - pub fn updwtmp(file: *const c_char, u: *const utmp); - pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; - pub fn utmpname(file: *const c_char) -> c_int; - pub fn utimensat( - dirfd: c_int, - path: *const c_char, - times: *const crate::timespec, - flag: c_int, - ) -> c_int; - pub fn wait4( - pid: crate::pid_t, - status: *mut c_int, - options: c_int, - rusage: *mut crate::rusage, - ) -> crate::pid_t; - pub fn waitid( - idtype: idtype_t, - id: id_t, - infop: *mut crate::siginfo_t, - options: c_int, - ) -> c_int; - pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - - // Use AIX thread-safe version errno. - pub fn _Errno() -> *mut c_int; -} - -cfg_if! { - if #[cfg(target_arch = "powerpc64")] { - mod powerpc64; - pub use self::powerpc64::*; - } -} diff --git a/vendor/libc/src/unix/aix/powerpc64.rs b/vendor/libc/src/unix/aix/powerpc64.rs deleted file mode 100644 index ba4ddc057c40be..00000000000000 --- a/vendor/libc/src/unix/aix/powerpc64.rs +++ /dev/null @@ -1,477 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -// Define lock_data_instrumented as an empty enum -missing! { - #[derive(Debug)] - pub enum lock_data_instrumented {} -} - -s! { - pub struct sigset_t { - pub ss_set: [c_ulong; 4], - } - - pub struct fd_set { - pub fds_bits: [c_long; 1024], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_sysid: c_uint, - pub l_pid: crate::pid_t, - pub l_vfs: c_int, - pub l_start: off_t, - pub l_len: off_t, - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_basetype: [c_char; 16], - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - pub f_fstr: [c_char; 32], - pub f_filler: [c_ulong; 16], - } - - pub struct pthread_rwlock_t { - __rw_word: [c_long; 10], - } - - pub struct pthread_cond_t { - __cv_word: [c_long; 6], - } - - pub struct pthread_mutex_t { - __mt_word: [c_long; 8], - } - - pub struct pthread_once_t { - __on_word: [c_long; 9], - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_flag: c_ushort, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_ssize: c_int, - pub st_atim: crate::timespec, - pub st_mtim: crate::timespec, - pub st_ctim: crate::timespec, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_vfstype: c_int, - pub st_vfs: c_uint, - pub st_type: c_uint, - pub st_gen: c_uint, - pub st_reserved: [c_uint; 9], - pub st_padto_ll: c_uint, - pub st_size: off_t, - } - - pub struct statfs { - pub f_version: c_int, - pub f_type: c_int, - pub f_bsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsblkcnt_t, - pub f_ffree: crate::fsblkcnt_t, - pub f_fsid: crate::fsid64_t, - pub f_vfstype: c_int, - pub f_fsize: c_ulong, - pub f_vfsnumber: c_int, - pub f_vfsoff: c_int, - pub f_vfslen: c_int, - pub f_vfsvers: c_int, - pub f_fname: [c_char; 32], - pub f_fpack: [c_char; 32], - pub f_name_max: c_int, - } - - pub struct aiocb { - pub aio_lio_opcode: c_int, - pub aio_fildes: c_int, - pub aio_word1: c_int, - pub aio_offset: off_t, - pub aio_buf: *mut c_void, - pub aio_return: ssize_t, - pub aio_errno: c_int, - pub aio_nbytes: size_t, - pub aio_reqprio: c_int, - pub aio_sigevent: crate::sigevent, - pub aio_word2: c_int, - pub aio_fp: c_int, - pub aio_handle: *mut aiocb, - pub aio_reserved: [c_uint; 2], - pub aio_sigev_tid: c_long, - } - - pub struct __vmxreg_t { - __v: [c_uint; 4], - } - - pub struct __vmx_context_t { - pub __vr: [crate::__vmxreg_t; 32], - pub __pad1: [c_uint; 3], - pub __vscr: c_uint, - pub __vrsave: c_uint, - pub __pad2: [c_uint; 3], - } - - pub struct __vsx_context_t { - pub __vsr_dw1: [c_ulonglong; 32], - } - - pub struct __tm_context_t { - pub vmx: crate::__vmx_context_t, - pub vsx: crate::__vsx_context_t, - pub gpr: [c_ulonglong; 32], - pub lr: c_ulonglong, - pub ctr: c_ulonglong, - pub cr: c_uint, - pub xer: c_uint, - pub amr: c_ulonglong, - pub texasr: c_ulonglong, - pub tfiar: c_ulonglong, - pub tfhar: c_ulonglong, - pub ppr: c_ulonglong, - pub dscr: c_ulonglong, - pub tar: c_ulonglong, - pub fpscr: c_uint, - pub fpscrx: c_uint, - pub fpr: [fpreg_t; 32], - pub tmcontext: c_char, - pub tmstate: c_char, - pub prevowner: c_char, - pub pad: [c_char; 5], - } - - pub struct __context64 { - pub gpr: [c_ulonglong; 32], - pub msr: c_ulonglong, - pub iar: c_ulonglong, - pub lr: c_ulonglong, - pub ctr: c_ulonglong, - pub cr: c_uint, - pub xer: c_uint, - pub fpscr: c_uint, - pub fpscrx: c_uint, - pub except: [c_ulonglong; 1], - pub fpr: [fpreg_t; 32], - pub fpeu: c_char, - pub fpinfo: c_char, - pub fpscr24_31: c_char, - pub pad: [c_char; 1], - pub excp_type: c_int, - } - - pub struct mcontext_t { - pub jmp_context: __context64, - } - - pub struct __extctx_t { - pub __flags: c_uint, - pub __rsvd1: [c_uint; 3], - pub __vmx: crate::__vmx_context_t, - pub __ukeys: [c_uint; 2], - pub __vsx: crate::__vsx_context_t, - pub __tm: crate::__tm_context_t, - pub __reserved: [c_char; 1860], - pub __extctx_magic: c_int, - } - - pub struct ucontext_t { - pub __sc_onstack: c_int, - pub uc_sigmask: crate::sigset_t, - pub __sc_uerror: c_int, - pub uc_mcontext: crate::mcontext_t, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub __extctx: *mut crate::__extctx_t, - pub __extctx_magic: c_int, - pub __pad: [c_int; 1], - } - - pub struct utmpx { - pub ut_user: [c_char; 256], - pub ut_id: [c_char; 14], - pub ut_line: [c_char; 64], - pub ut_pid: crate::pid_t, - pub ut_type: c_short, - pub ut_tv: crate::timeval, - pub ut_host: [c_char; 256], - pub __dbl_word_pad: c_int, - pub __reservedA: [c_int; 2], - pub __reservedV: [c_int; 6], - } - - pub struct pthread_spinlock_t { - pub __sp_word: [c_long; 3], - } - - pub struct pthread_barrier_t { - pub __br_word: [c_long; 5], - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_first: c_uint, - pub msg_last: c_uint, - pub msg_cbytes: c_uint, - pub msg_qnum: c_uint, - pub msg_qbytes: c_ulong, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - pub msg_stime: crate::time_t, - pub msg_rtime: crate::time_t, - pub msg_ctime: crate::time_t, - pub msg_rwait: c_int, - pub msg_wwait: c_int, - pub msg_reqevents: c_ushort, - } -} - -s_no_extra_traits! { - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - pub si_pid: crate::pid_t, - pub si_uid: crate::uid_t, - pub si_status: c_int, - pub si_addr: *mut c_void, - pub si_band: c_long, - pub si_value: crate::sigval, - pub __si_flags: c_int, - pub __pad: [c_int; 3], - } - - pub union _kernel_simple_lock { - pub _slock: c_long, - pub _slockp: *mut lock_data_instrumented, - } - - pub struct fileops_t { - pub fo_rw: Option< - extern "C" fn( - file: *mut file, - rw: crate::uio_rw, - io: *mut c_void, - ext: c_long, - secattr: *mut c_void, - ) -> c_int, - >, - pub fo_ioctl: Option< - extern "C" fn( - file: *mut file, - a: c_long, - b: crate::caddr_t, - c: c_long, - d: c_long, - ) -> c_int, - >, - pub fo_select: Option< - extern "C" fn(file: *mut file, a: c_int, b: *mut c_ushort, c: extern "C" fn()) -> c_int, - >, - pub fo_close: Option c_int>, - pub fo_fstat: Option c_int>, - } - - pub struct file { - pub f_flag: c_long, - pub f_count: c_int, - pub f_options: c_short, - pub f_type: c_short, - // Should be pointer to 'vnode' - pub f_data: *mut c_void, - pub f_offset: c_longlong, - pub f_dir_off: c_long, - // Should be pointer to 'cred' - pub f_cred: *mut c_void, - pub f_lock: _kernel_simple_lock, - pub f_offset_lock: _kernel_simple_lock, - pub f_vinfo: crate::caddr_t, - pub f_ops: *mut fileops_t, - pub f_parentp: crate::caddr_t, - pub f_fnamep: crate::caddr_t, - pub f_fdata: [c_char; 160], - } - - pub union __ld_info_file { - pub _ldinfo_fd: c_int, - pub _ldinfo_fp: *mut file, - pub _core_offset: c_long, - } - - pub struct ld_info { - pub ldinfo_next: c_uint, - pub ldinfo_flags: c_uint, - pub _file: __ld_info_file, - pub ldinfo_textorg: *mut c_void, - pub ldinfo_textsize: c_ulong, - pub ldinfo_dataorg: *mut c_void, - pub ldinfo_datasize: c_ulong, - pub ldinfo_filename: [c_char; 2], - } - - pub union __pollfd_ext_u { - pub addr: *mut c_void, - pub data32: u32, - pub data: u64, - } - - pub struct pollfd_ext { - pub fd: c_int, - pub events: c_short, - pub revents: c_short, - pub data: __pollfd_ext_u, - } - - pub struct fpreg_t { - pub d: c_double, - } -} - -impl siginfo_t { - pub unsafe fn si_addr(&self) -> *mut c_void { - self.si_addr - } - - pub unsafe fn si_value(&self) -> crate::sigval { - self.si_value - } - - pub unsafe fn si_pid(&self) -> crate::pid_t { - self.si_pid - } - - pub unsafe fn si_uid(&self) -> crate::uid_t { - self.si_uid - } - - pub unsafe fn si_status(&self) -> c_int { - self.si_status - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for siginfo_t { - fn eq(&self, other: &siginfo_t) -> bool { - self.si_signo == other.si_signo - && self.si_errno == other.si_errno - && self.si_code == other.si_code - && self.si_pid == other.si_pid - && self.si_uid == other.si_uid - && self.si_status == other.si_status - && self.si_addr == other.si_addr - && self.si_band == other.si_band - && self.__si_flags == other.__si_flags - && self.si_value == other.si_value - } - } - impl Eq for siginfo_t {} - impl hash::Hash for siginfo_t { - fn hash(&self, state: &mut H) { - self.si_signo.hash(state); - self.si_errno.hash(state); - self.si_code.hash(state); - self.si_pid.hash(state); - self.si_uid.hash(state); - self.si_status.hash(state); - self.si_addr.hash(state); - self.si_band.hash(state); - self.si_value.hash(state); - self.__si_flags.hash(state); - } - } - - impl PartialEq for __pollfd_ext_u { - fn eq(&self, other: &__pollfd_ext_u) -> bool { - unsafe { - self.addr == other.addr - && self.data32 == other.data32 - && self.data == other.data - } - } - } - impl Eq for __pollfd_ext_u {} - impl hash::Hash for __pollfd_ext_u { - fn hash(&self, state: &mut H) { - unsafe { - self.addr.hash(state); - self.data.hash(state); - self.data32.hash(state); - } - } - } - - impl PartialEq for pollfd_ext { - fn eq(&self, other: &pollfd_ext) -> bool { - self.fd == other.fd - && self.events == other.events - && self.revents == other.revents - && self.data == other.data - } - } - impl Eq for pollfd_ext {} - impl hash::Hash for pollfd_ext { - fn hash(&self, state: &mut H) { - self.fd.hash(state); - self.events.hash(state); - self.revents.hash(state); - self.data.hash(state); - } - } - impl PartialEq for fpreg_t { - fn eq(&self, other: &fpreg_t) -> bool { - self.d == other.d - } - } - - impl Eq for fpreg_t {} - - impl hash::Hash for fpreg_t { - fn hash(&self, state: &mut H) { - let d: u64 = unsafe { mem::transmute(self.d) }; - d.hash(state); - } - } - } -} - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - __mt_word: [0, 2, 0, 0, 0, 0, 0, 0], -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - __cv_word: [0, 0, 0, 0, 2, 0], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - __rw_word: [2, 0, 0, 0, 0, 0, 0, 0, 0, 0], -}; - -pub const PTHREAD_ONCE_INIT: pthread_once_t = pthread_once_t { - __on_word: [0, 0, 0, 0, 0, 2, 0, 0, 0], -}; - -pub const RLIM_INFINITY: c_ulong = 0x7fffffffffffffff; - -extern "C" { - pub fn getsystemcfg(label: c_int) -> c_ulong; -} diff --git a/vendor/libc/src/unix/bsd/apple/b32/mod.rs b/vendor/libc/src/unix/bsd/apple/b32/mod.rs deleted file mode 100644 index bd6762558f508e..00000000000000 --- a/vendor/libc/src/unix/bsd/apple/b32/mod.rs +++ /dev/null @@ -1,135 +0,0 @@ -//! 32-bit specific Apple (ios/darwin) definitions - -use crate::prelude::*; - -pub type boolean_t = c_int; - -s! { - pub struct if_data { - pub ifi_type: c_uchar, - pub ifi_typelen: c_uchar, - pub ifi_physical: c_uchar, - pub ifi_addrlen: c_uchar, - pub ifi_hdrlen: c_uchar, - pub ifi_recvquota: c_uchar, - pub ifi_xmitquota: c_uchar, - pub ifi_unused1: c_uchar, - pub ifi_mtu: u32, - pub ifi_metric: u32, - pub ifi_baudrate: u32, - pub ifi_ipackets: u32, - pub ifi_ierrors: u32, - pub ifi_opackets: u32, - pub ifi_oerrors: u32, - pub ifi_collisions: u32, - pub ifi_ibytes: u32, - pub ifi_obytes: u32, - pub ifi_imcasts: u32, - pub ifi_omcasts: u32, - pub ifi_iqdrops: u32, - pub ifi_noproto: u32, - pub ifi_recvtiming: u32, - pub ifi_xmittiming: u32, - pub ifi_lastchange: crate::timeval, - pub ifi_unused2: u32, - pub ifi_hwassist: u32, - pub ifi_reserved1: u32, - pub ifi_reserved2: u32, - } - - pub struct bpf_hdr { - pub bh_tstamp: crate::timeval, - pub bh_caplen: u32, - pub bh_datalen: u32, - pub bh_hdrlen: c_ushort, - } - - pub struct malloc_zone_t { - __private: [crate::uintptr_t; 18], // FIXME(macos): keeping private for now - } -} - -s_no_extra_traits! { - pub struct pthread_attr_t { - __sig: c_long, - __opaque: [c_char; 36], - } - - pub struct pthread_once_t { - __sig: c_long, - __opaque: [c_char; crate::__PTHREAD_ONCE_SIZE__], - } - - #[repr(align(16))] - pub struct max_align_t { - priv_: [f64; 2], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for pthread_attr_t { - fn eq(&self, other: &pthread_attr_t) -> bool { - self.__sig == other.__sig - && self - .__opaque - .iter() - .zip(other.__opaque.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for pthread_attr_t {} - impl hash::Hash for pthread_attr_t { - fn hash(&self, state: &mut H) { - self.__sig.hash(state); - self.__opaque.hash(state); - } - } - impl PartialEq for pthread_once_t { - fn eq(&self, other: &pthread_once_t) -> bool { - self.__sig == other.__sig - && self - .__opaque - .iter() - .zip(other.__opaque.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for pthread_once_t {} - impl hash::Hash for pthread_once_t { - fn hash(&self, state: &mut H) { - self.__sig.hash(state); - self.__opaque.hash(state); - } - } - } -} - -#[doc(hidden)] -#[deprecated(since = "0.2.55")] -pub const NET_RT_MAXID: c_int = 10; - -pub const __PTHREAD_MUTEX_SIZE__: usize = 40; -pub const __PTHREAD_COND_SIZE__: usize = 24; -pub const __PTHREAD_CONDATTR_SIZE__: usize = 4; -pub const __PTHREAD_ONCE_SIZE__: usize = 4; -pub const __PTHREAD_RWLOCK_SIZE__: usize = 124; -pub const __PTHREAD_RWLOCKATTR_SIZE__: usize = 12; - -pub const TIOCTIMESTAMP: c_ulong = 0x40087459; -pub const TIOCDCDTIMESTAMP: c_ulong = 0x40087458; - -pub const BIOCSETF: c_ulong = 0x80084267; -pub const BIOCSRTIMEOUT: c_ulong = 0x8008426d; -pub const BIOCGRTIMEOUT: c_ulong = 0x4008426e; -pub const BIOCSETFNR: c_ulong = 0x8008427e; - -const _PTHREAD_ONCE_SIG_INIT: c_long = 0x30B1BCBA; -pub const PTHREAD_ONCE_INIT: crate::pthread_once_t = crate::pthread_once_t { - __sig: _PTHREAD_ONCE_SIG_INIT, - __opaque: [0; 4], -}; - -extern "C" { - pub fn exchangedata(path1: *const c_char, path2: *const c_char, options: c_ulong) -> c_int; -} diff --git a/vendor/libc/src/unix/bsd/apple/b64/aarch64/mod.rs b/vendor/libc/src/unix/bsd/apple/b64/aarch64/mod.rs deleted file mode 100644 index a13013c09b03b2..00000000000000 --- a/vendor/libc/src/unix/bsd/apple/b64/aarch64/mod.rs +++ /dev/null @@ -1,53 +0,0 @@ -use crate::prelude::*; - -pub type boolean_t = c_int; -pub type mcontext_t = *mut __darwin_mcontext64; - -s! { - pub struct malloc_zone_t { - __private: [crate::uintptr_t; 18], // FIXME(macos): needs arm64 auth pointers support - } - - pub struct ucontext_t { - pub uc_onstack: c_int, - pub uc_sigmask: crate::sigset_t, - pub uc_stack: crate::stack_t, - pub uc_link: *mut crate::ucontext_t, - pub uc_mcsize: usize, - pub uc_mcontext: mcontext_t, - } - - pub struct __darwin_mcontext64 { - pub __es: __darwin_arm_exception_state64, - pub __ss: __darwin_arm_thread_state64, - pub __ns: __darwin_arm_neon_state64, - } - - pub struct __darwin_arm_exception_state64 { - pub __far: u64, - pub __esr: u32, - pub __exception: u32, - } - - pub struct __darwin_arm_thread_state64 { - pub __x: [u64; 29], - pub __fp: u64, - pub __lr: u64, - pub __sp: u64, - pub __pc: u64, - pub __cpsr: u32, - pub __pad: u32, - } - - pub struct __darwin_arm_neon_state64 { - pub __v: [crate::__uint128_t; 32], - pub __fpsr: u32, - pub __fpcr: u32, - } -} - -s_no_extra_traits! { - pub struct max_align_t { - priv_: f64, - } -} diff --git a/vendor/libc/src/unix/bsd/apple/b64/mod.rs b/vendor/libc/src/unix/bsd/apple/b64/mod.rs deleted file mode 100644 index 34743464a44e76..00000000000000 --- a/vendor/libc/src/unix/bsd/apple/b64/mod.rs +++ /dev/null @@ -1,141 +0,0 @@ -//! 64-bit specific Apple (ios/darwin) definitions - -use crate::prelude::*; - -s! { - pub struct timeval32 { - pub tv_sec: i32, - pub tv_usec: i32, - } - - pub struct if_data { - pub ifi_type: c_uchar, - pub ifi_typelen: c_uchar, - pub ifi_physical: c_uchar, - pub ifi_addrlen: c_uchar, - pub ifi_hdrlen: c_uchar, - pub ifi_recvquota: c_uchar, - pub ifi_xmitquota: c_uchar, - pub ifi_unused1: c_uchar, - pub ifi_mtu: u32, - pub ifi_metric: u32, - pub ifi_baudrate: u32, - pub ifi_ipackets: u32, - pub ifi_ierrors: u32, - pub ifi_opackets: u32, - pub ifi_oerrors: u32, - pub ifi_collisions: u32, - pub ifi_ibytes: u32, - pub ifi_obytes: u32, - pub ifi_imcasts: u32, - pub ifi_omcasts: u32, - pub ifi_iqdrops: u32, - pub ifi_noproto: u32, - pub ifi_recvtiming: u32, - pub ifi_xmittiming: u32, - pub ifi_lastchange: timeval32, - pub ifi_unused2: u32, - pub ifi_hwassist: u32, - pub ifi_reserved1: u32, - pub ifi_reserved2: u32, - } - - pub struct bpf_hdr { - pub bh_tstamp: crate::timeval32, - pub bh_caplen: u32, - pub bh_datalen: u32, - pub bh_hdrlen: c_ushort, - } -} - -s_no_extra_traits! { - pub struct pthread_attr_t { - __sig: c_long, - __opaque: [c_char; 56], - } - - pub struct pthread_once_t { - __sig: c_long, - __opaque: [c_char; __PTHREAD_ONCE_SIZE__], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for pthread_attr_t { - fn eq(&self, other: &pthread_attr_t) -> bool { - self.__sig == other.__sig - && self - .__opaque - .iter() - .zip(other.__opaque.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for pthread_attr_t {} - impl hash::Hash for pthread_attr_t { - fn hash(&self, state: &mut H) { - self.__sig.hash(state); - self.__opaque.hash(state); - } - } - impl PartialEq for pthread_once_t { - fn eq(&self, other: &pthread_once_t) -> bool { - self.__sig == other.__sig - && self - .__opaque - .iter() - .zip(other.__opaque.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for pthread_once_t {} - impl hash::Hash for pthread_once_t { - fn hash(&self, state: &mut H) { - self.__sig.hash(state); - self.__opaque.hash(state); - } - } - } -} - -#[doc(hidden)] -#[deprecated(since = "0.2.55")] -pub const NET_RT_MAXID: c_int = 11; - -pub const __PTHREAD_MUTEX_SIZE__: usize = 56; -pub const __PTHREAD_COND_SIZE__: usize = 40; -pub const __PTHREAD_CONDATTR_SIZE__: usize = 8; -pub const __PTHREAD_ONCE_SIZE__: usize = 8; -pub const __PTHREAD_RWLOCK_SIZE__: usize = 192; -pub const __PTHREAD_RWLOCKATTR_SIZE__: usize = 16; - -pub const TIOCTIMESTAMP: c_ulong = 0x40107459; -pub const TIOCDCDTIMESTAMP: c_ulong = 0x40107458; - -pub const BIOCSETF: c_ulong = 0x80104267; -pub const BIOCSRTIMEOUT: c_ulong = 0x8010426d; -pub const BIOCGRTIMEOUT: c_ulong = 0x4010426e; -pub const BIOCSETFNR: c_ulong = 0x8010427e; - -const _PTHREAD_ONCE_SIG_INIT: c_long = 0x30B1BCBA; -pub const PTHREAD_ONCE_INIT: crate::pthread_once_t = crate::pthread_once_t { - __sig: _PTHREAD_ONCE_SIG_INIT, - __opaque: [0; 8], -}; - -extern "C" { - pub fn exchangedata(path1: *const c_char, path2: *const c_char, options: c_uint) -> c_int; -} - -cfg_if! { - if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } else if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/unix/bsd/apple/b64/x86_64/mod.rs b/vendor/libc/src/unix/bsd/apple/b64/x86_64/mod.rs deleted file mode 100644 index 5365becf66c3e7..00000000000000 --- a/vendor/libc/src/unix/bsd/apple/b64/x86_64/mod.rs +++ /dev/null @@ -1,179 +0,0 @@ -use crate::prelude::*; - -pub type boolean_t = c_uint; -pub type mcontext_t = *mut __darwin_mcontext64; - -s! { - pub struct ucontext_t { - pub uc_onstack: c_int, - pub uc_sigmask: crate::sigset_t, - pub uc_stack: crate::stack_t, - pub uc_link: *mut crate::ucontext_t, - pub uc_mcsize: usize, - pub uc_mcontext: mcontext_t, - } - - pub struct __darwin_mcontext64 { - pub __es: __darwin_x86_exception_state64, - pub __ss: __darwin_x86_thread_state64, - pub __fs: __darwin_x86_float_state64, - } - - pub struct __darwin_x86_exception_state64 { - pub __trapno: u16, - pub __cpu: u16, - pub __err: u32, - pub __faultvaddr: u64, - } - - pub struct __darwin_x86_thread_state64 { - pub __rax: u64, - pub __rbx: u64, - pub __rcx: u64, - pub __rdx: u64, - pub __rdi: u64, - pub __rsi: u64, - pub __rbp: u64, - pub __rsp: u64, - pub __r8: u64, - pub __r9: u64, - pub __r10: u64, - pub __r11: u64, - pub __r12: u64, - pub __r13: u64, - pub __r14: u64, - pub __r15: u64, - pub __rip: u64, - pub __rflags: u64, - pub __cs: u64, - pub __fs: u64, - pub __gs: u64, - } - - pub struct __darwin_x86_float_state64 { - pub __fpu_reserved: [c_int; 2], - __fpu_fcw: c_short, - __fpu_fsw: c_short, - pub __fpu_ftw: u8, - pub __fpu_rsrv1: u8, - pub __fpu_fop: u16, - pub __fpu_ip: u32, - pub __fpu_cs: u16, - pub __fpu_rsrv2: u16, - pub __fpu_dp: u32, - pub __fpu_ds: u16, - pub __fpu_rsrv3: u16, - pub __fpu_mxcsr: u32, - pub __fpu_mxcsrmask: u32, - pub __fpu_stmm0: __darwin_mmst_reg, - pub __fpu_stmm1: __darwin_mmst_reg, - pub __fpu_stmm2: __darwin_mmst_reg, - pub __fpu_stmm3: __darwin_mmst_reg, - pub __fpu_stmm4: __darwin_mmst_reg, - pub __fpu_stmm5: __darwin_mmst_reg, - pub __fpu_stmm6: __darwin_mmst_reg, - pub __fpu_stmm7: __darwin_mmst_reg, - pub __fpu_xmm0: __darwin_xmm_reg, - pub __fpu_xmm1: __darwin_xmm_reg, - pub __fpu_xmm2: __darwin_xmm_reg, - pub __fpu_xmm3: __darwin_xmm_reg, - pub __fpu_xmm4: __darwin_xmm_reg, - pub __fpu_xmm5: __darwin_xmm_reg, - pub __fpu_xmm6: __darwin_xmm_reg, - pub __fpu_xmm7: __darwin_xmm_reg, - pub __fpu_xmm8: __darwin_xmm_reg, - pub __fpu_xmm9: __darwin_xmm_reg, - pub __fpu_xmm10: __darwin_xmm_reg, - pub __fpu_xmm11: __darwin_xmm_reg, - pub __fpu_xmm12: __darwin_xmm_reg, - pub __fpu_xmm13: __darwin_xmm_reg, - pub __fpu_xmm14: __darwin_xmm_reg, - pub __fpu_xmm15: __darwin_xmm_reg, - // this field is actually [u8; 96], but defining it with a bigger type - // allows us to auto-implement traits for it since the length of the - // array is less than 32 - __fpu_rsrv4: [u32; 24], - pub __fpu_reserved1: c_int, - } - - pub struct __darwin_mmst_reg { - pub __mmst_reg: [c_char; 10], - pub __mmst_rsrv: [c_char; 6], - } - - pub struct __darwin_xmm_reg { - pub __xmm_reg: [c_char; 16], - } - - pub struct malloc_introspection_t { - _private: [crate::uintptr_t; 16], // FIXME(macos): keeping private for now - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct malloc_zone_t { - _reserved1: *mut c_void, - _reserved2: *mut c_void, - pub size: - Option size_t>, - pub malloc: - Option *mut c_void>, - pub calloc: Option< - unsafe extern "C" fn( - zone: *mut malloc_zone_t, - num_items: size_t, - size: size_t, - ) -> *mut c_void, - >, - pub valloc: - Option *mut c_void>, - pub free: Option, - pub realloc: Option< - unsafe extern "C" fn( - zone: *mut malloc_zone_t, - ptr: *mut c_void, - size: size_t, - ) -> *mut c_void, - >, - pub destroy: Option, - pub zone_name: *const c_char, - pub batch_malloc: Option< - unsafe extern "C" fn( - zone: *mut malloc_zone_t, - size: size_t, - results: *mut *mut c_void, - num_requested: c_uint, - ) -> c_uint, - >, - pub batch_free: Option< - unsafe extern "C" fn( - zone: *mut malloc_zone_t, - to_be_freed: *mut *mut c_void, - num_to_be_freed: c_uint, - ), - >, - pub introspect: *mut malloc_introspection_t, - pub version: c_uint, - pub memalign: Option< - unsafe extern "C" fn( - zone: *mut malloc_zone_t, - alignment: size_t, - size: size_t, - ) -> *mut c_void, - >, - pub free_definite_size: - Option, - pub pressure_relief: - Option size_t>, - pub claimed_address: Option< - unsafe extern "C" fn(zone: *mut malloc_zone_t, ptr: *mut c_void) -> crate::boolean_t, - >, - } -} - -s_no_extra_traits! { - #[repr(align(16))] - pub struct max_align_t { - priv_: [f64; 2], - } -} diff --git a/vendor/libc/src/unix/bsd/apple/mod.rs b/vendor/libc/src/unix/bsd/apple/mod.rs deleted file mode 100644 index 857508f794ad1c..00000000000000 --- a/vendor/libc/src/unix/bsd/apple/mod.rs +++ /dev/null @@ -1,6245 +0,0 @@ -//! Apple (ios/darwin)-specific definitions -//! -//! This covers *-apple-* triples currently - -use crate::prelude::*; -use crate::{cmsghdr, off_t}; - -pub type wchar_t = i32; -pub type clock_t = c_ulong; -pub type time_t = c_long; -pub type suseconds_t = i32; -pub type dev_t = i32; -pub type ino_t = u64; -pub type mode_t = u16; -pub type nlink_t = u16; -pub type blksize_t = i32; -pub type rlim_t = u64; -pub type pthread_key_t = c_ulong; -pub type sigset_t = u32; -pub type clockid_t = c_uint; -pub type fsblkcnt_t = c_uint; -pub type fsfilcnt_t = c_uint; -pub type speed_t = c_ulong; -pub type tcflag_t = c_ulong; -pub type nl_item = c_int; -pub type id_t = c_uint; -pub type sem_t = c_int; -pub type idtype_t = c_uint; -pub type integer_t = c_int; -pub type cpu_type_t = integer_t; -pub type cpu_subtype_t = integer_t; -pub type natural_t = u32; -pub type mach_msg_type_number_t = natural_t; -pub type kern_return_t = c_int; -pub type uuid_t = [u8; 16]; -pub type task_info_t = *mut integer_t; -pub type host_info_t = *mut integer_t; -pub type task_flavor_t = natural_t; -pub type rusage_info_t = *mut c_void; -pub type vm_offset_t = crate::uintptr_t; -pub type vm_size_t = crate::uintptr_t; -pub type vm_address_t = vm_offset_t; -pub type quad_t = i64; -pub type u_quad_t = u64; - -pub type posix_spawnattr_t = *mut c_void; -pub type posix_spawn_file_actions_t = *mut c_void; -pub type key_t = c_int; -pub type shmatt_t = c_ushort; - -pub type sae_associd_t = u32; -pub type sae_connid_t = u32; - -pub type mach_port_t = c_uint; -pub type host_t = c_uint; -pub type host_flavor_t = integer_t; -pub type host_info64_t = *mut integer_t; -pub type processor_flavor_t = c_int; -pub type thread_flavor_t = natural_t; -pub type thread_inspect_t = crate::mach_port_t; -pub type thread_act_t = crate::mach_port_t; -pub type thread_act_array_t = *mut crate::thread_act_t; -pub type policy_t = c_int; -pub type mach_error_t = crate::kern_return_t; -pub type mach_vm_address_t = u64; -pub type mach_vm_offset_t = u64; -pub type mach_vm_size_t = u64; -pub type vm_map_t = crate::mach_port_t; -pub type mem_entry_name_port_t = crate::mach_port_t; -pub type memory_object_t = crate::mach_port_t; -pub type memory_object_offset_t = c_ulonglong; -pub type vm_inherit_t = c_uint; -pub type vm_prot_t = c_int; - -pub type ledger_t = crate::mach_port_t; -pub type ledger_array_t = *mut crate::ledger_t; - -pub type iconv_t = *mut c_void; - -// mach/host_info.h -pub type host_cpu_load_info_t = *mut host_cpu_load_info; -pub type host_cpu_load_info_data_t = host_cpu_load_info; - -// mach/processor_info.h -pub type processor_cpu_load_info_t = *mut processor_cpu_load_info; -pub type processor_cpu_load_info_data_t = processor_cpu_load_info; -pub type processor_basic_info_t = *mut processor_basic_info; -pub type processor_basic_info_data_t = processor_basic_info; -pub type processor_set_basic_info_data_t = processor_set_basic_info; -pub type processor_set_basic_info_t = *mut processor_set_basic_info; -pub type processor_set_load_info_data_t = processor_set_load_info; -pub type processor_set_load_info_t = *mut processor_set_load_info; -pub type processor_info_t = *mut integer_t; -pub type processor_info_array_t = *mut integer_t; - -pub type mach_task_basic_info_data_t = mach_task_basic_info; -pub type mach_task_basic_info_t = *mut mach_task_basic_info; -pub type task_thread_times_info_data_t = task_thread_times_info; -pub type task_thread_times_info_t = *mut task_thread_times_info; - -pub type thread_info_t = *mut integer_t; -pub type thread_basic_info_t = *mut thread_basic_info; -pub type thread_basic_info_data_t = thread_basic_info; -pub type thread_identifier_info_t = *mut thread_identifier_info; -pub type thread_identifier_info_data_t = thread_identifier_info; -pub type thread_extended_info_t = *mut thread_extended_info; -pub type thread_extended_info_data_t = thread_extended_info; - -pub type thread_t = crate::mach_port_t; -pub type thread_policy_flavor_t = natural_t; -pub type thread_policy_t = *mut integer_t; -pub type thread_latency_qos_t = integer_t; -pub type thread_throughput_qos_t = integer_t; -pub type thread_standard_policy_data_t = thread_standard_policy; -pub type thread_standard_policy_t = *mut thread_standard_policy; -pub type thread_extended_policy_data_t = thread_extended_policy; -pub type thread_extended_policy_t = *mut thread_extended_policy; -pub type thread_time_constraint_policy_data_t = thread_time_constraint_policy; -pub type thread_time_constraint_policy_t = *mut thread_time_constraint_policy; -pub type thread_precedence_policy_data_t = thread_precedence_policy; -pub type thread_precedence_policy_t = *mut thread_precedence_policy; -pub type thread_affinity_policy_data_t = thread_affinity_policy; -pub type thread_affinity_policy_t = *mut thread_affinity_policy; -pub type thread_background_policy_data_t = thread_background_policy; -pub type thread_background_policy_t = *mut thread_background_policy; -pub type thread_latency_qos_policy_data_t = thread_latency_qos_policy; -pub type thread_latency_qos_policy_t = *mut thread_latency_qos_policy; -pub type thread_throughput_qos_policy_data_t = thread_throughput_qos_policy; -pub type thread_throughput_qos_policy_t = *mut thread_throughput_qos_policy; - -pub type pthread_introspection_hook_t = - extern "C" fn(event: c_uint, thread: crate::pthread_t, addr: *mut c_void, size: size_t); -pub type pthread_jit_write_callback_t = Option c_int>; - -pub type os_clockid_t = u32; - -pub type os_sync_wait_on_address_flags_t = u32; -pub type os_sync_wake_by_address_flags_t = u32; - -pub type os_unfair_lock = os_unfair_lock_s; -pub type os_unfair_lock_t = *mut os_unfair_lock; - -pub type os_log_t = *mut c_void; -pub type os_log_type_t = u8; -pub type os_signpost_id_t = u64; -pub type os_signpost_type_t = u8; - -pub type vm_statistics_t = *mut vm_statistics; -pub type vm_statistics_data_t = vm_statistics; -pub type vm_statistics64_t = *mut vm_statistics64; -pub type vm_statistics64_data_t = vm_statistics64; - -pub type task_t = crate::mach_port_t; -pub type task_inspect_t = crate::mach_port_t; - -pub type sysdir_search_path_enumeration_state = c_uint; - -pub type CCStatus = i32; -pub type CCCryptorStatus = i32; -pub type CCRNGStatus = crate::CCCryptorStatus; - -pub type copyfile_state_t = *mut c_void; -pub type copyfile_flags_t = u32; -pub type copyfile_callback_t = Option< - extern "C" fn( - c_int, - c_int, - copyfile_state_t, - *const c_char, - *const c_char, - *mut c_void, - ) -> c_int, ->; - -pub type attrgroup_t = u32; -pub type vol_capabilities_set_t = [u32; 4]; - -deprecated_mach! { - pub type mach_timebase_info_data_t = mach_timebase_info; -} - -#[derive(Debug)] -pub enum timezone {} -impl Copy for timezone {} -impl Clone for timezone { - fn clone(&self) -> timezone { - *self - } -} - -#[derive(Debug)] -#[repr(u32)] -pub enum qos_class_t { - QOS_CLASS_USER_INTERACTIVE = 0x21, - QOS_CLASS_USER_INITIATED = 0x19, - QOS_CLASS_DEFAULT = 0x15, - QOS_CLASS_UTILITY = 0x11, - QOS_CLASS_BACKGROUND = 0x09, - QOS_CLASS_UNSPECIFIED = 0x00, -} -impl Copy for qos_class_t {} -impl Clone for qos_class_t { - fn clone(&self) -> qos_class_t { - *self - } -} - -#[derive(Debug)] -#[repr(u32)] -pub enum sysdir_search_path_directory_t { - SYSDIR_DIRECTORY_APPLICATION = 1, - SYSDIR_DIRECTORY_DEMO_APPLICATION = 2, - SYSDIR_DIRECTORY_DEVELOPER_APPLICATION = 3, - SYSDIR_DIRECTORY_ADMIN_APPLICATION = 4, - SYSDIR_DIRECTORY_LIBRARY = 5, - SYSDIR_DIRECTORY_DEVELOPER = 6, - SYSDIR_DIRECTORY_USER = 7, - SYSDIR_DIRECTORY_DOCUMENTATION = 8, - SYSDIR_DIRECTORY_DOCUMENT = 9, - SYSDIR_DIRECTORY_CORESERVICE = 10, - SYSDIR_DIRECTORY_AUTOSAVED_INFORMATION = 11, - SYSDIR_DIRECTORY_DESKTOP = 12, - SYSDIR_DIRECTORY_CACHES = 13, - SYSDIR_DIRECTORY_APPLICATION_SUPPORT = 14, - SYSDIR_DIRECTORY_DOWNLOADS = 15, - SYSDIR_DIRECTORY_INPUT_METHODS = 16, - SYSDIR_DIRECTORY_MOVIES = 17, - SYSDIR_DIRECTORY_MUSIC = 18, - SYSDIR_DIRECTORY_PICTURES = 19, - SYSDIR_DIRECTORY_PRINTER_DESCRIPTION = 20, - SYSDIR_DIRECTORY_SHARED_PUBLIC = 21, - SYSDIR_DIRECTORY_PREFERENCE_PANES = 22, - SYSDIR_DIRECTORY_ALL_APPLICATIONS = 100, - SYSDIR_DIRECTORY_ALL_LIBRARIES = 101, -} -impl Copy for sysdir_search_path_directory_t {} -impl Clone for sysdir_search_path_directory_t { - fn clone(&self) -> sysdir_search_path_directory_t { - *self - } -} - -#[derive(Debug)] -#[repr(u32)] -pub enum sysdir_search_path_domain_mask_t { - SYSDIR_DOMAIN_MASK_USER = (1 << 0), - SYSDIR_DOMAIN_MASK_LOCAL = (1 << 1), - SYSDIR_DOMAIN_MASK_NETWORK = (1 << 2), - SYSDIR_DOMAIN_MASK_SYSTEM = (1 << 3), - SYSDIR_DOMAIN_MASK_ALL = 0x0ffff, -} -impl Copy for sysdir_search_path_domain_mask_t {} -impl Clone for sysdir_search_path_domain_mask_t { - fn clone(&self) -> sysdir_search_path_domain_mask_t { - *self - } -} - -s! { - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct ip_mreqn { - pub imr_multiaddr: in_addr, - pub imr_address: in_addr, - pub imr_ifindex: c_int, - } - - pub struct ip_mreq_source { - pub imr_multiaddr: in_addr, - pub imr_sourceaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct aiocb { - pub aio_fildes: c_int, - pub aio_offset: off_t, - pub aio_buf: *mut c_void, - pub aio_nbytes: size_t, - pub aio_reqprio: c_int, - pub aio_sigevent: sigevent, - pub aio_lio_opcode: c_int, - } - - pub struct glob_t { - pub gl_pathc: size_t, - __unused1: c_int, - pub gl_offs: size_t, - __unused2: c_int, - pub gl_pathv: *mut *mut c_char, - - __unused3: *mut c_void, - - __unused4: *mut c_void, - __unused5: *mut c_void, - __unused6: *mut c_void, - __unused7: *mut c_void, - __unused8: *mut c_void, - } - - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - pub ai_addrlen: crate::socklen_t, - pub ai_canonname: *mut c_char, - pub ai_addr: *mut crate::sockaddr, - pub ai_next: *mut addrinfo, - } - - #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] - pub struct mach_timebase_info { - pub numer: u32, - pub denom: u32, - } - - pub struct stat { - pub st_dev: dev_t, - pub st_mode: mode_t, - pub st_nlink: nlink_t, - pub st_ino: ino_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: dev_t, - pub st_atime: time_t, - pub st_atime_nsec: c_long, - pub st_mtime: time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: time_t, - pub st_ctime_nsec: c_long, - pub st_birthtime: time_t, - pub st_birthtime_nsec: c_long, - pub st_size: off_t, - pub st_blocks: crate::blkcnt_t, - pub st_blksize: blksize_t, - pub st_flags: u32, - pub st_gen: u32, - pub st_lspare: i32, - pub st_qspare: [i64; 2], - } - - pub struct pthread_mutexattr_t { - __sig: c_long, - __opaque: [u8; 8], - } - - pub struct pthread_condattr_t { - __sig: c_long, - __opaque: [u8; __PTHREAD_CONDATTR_SIZE__], - } - - pub struct pthread_rwlockattr_t { - __sig: c_long, - __opaque: [u8; __PTHREAD_RWLOCKATTR_SIZE__], - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - pub si_pid: crate::pid_t, - pub si_uid: crate::uid_t, - pub si_status: c_int, - pub si_addr: *mut c_void, - //Requires it to be union for tests - //pub si_value: crate::sigval, - _pad: [usize; 9], - } - - pub struct sigaction { - // FIXME(union): this field is actually a union - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: sigset_t, - pub sa_flags: c_int, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } - - pub struct fstore_t { - pub fst_flags: c_uint, - pub fst_posmode: c_int, - pub fst_offset: off_t, - pub fst_length: off_t, - pub fst_bytesalloc: off_t, - } - - pub struct fpunchhole_t { - pub fp_flags: c_uint, /* unused */ - pub reserved: c_uint, /* (to maintain 8-byte alignment) */ - pub fp_offset: off_t, /* IN: start of the region */ - pub fp_length: off_t, /* IN: size of the region */ - } - - pub struct ftrimactivefile_t { - pub fta_offset: off_t, - pub fta_length: off_t, - } - - pub struct fspecread_t { - pub fsr_flags: c_uint, - pub reserved: c_uint, - pub fsr_offset: off_t, - pub fsr_length: off_t, - } - - pub struct radvisory { - pub ra_offset: off_t, - pub ra_count: c_int, - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - } - - pub struct Dl_info { - pub dli_fname: *const c_char, - pub dli_fbase: *mut c_void, - pub dli_sname: *const c_char, - pub dli_saddr: *mut c_void, - } - - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: crate::sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [c_char; 8], - } - - pub struct kevent64_s { - pub ident: u64, - pub filter: i16, - pub flags: u16, - pub fflags: u32, - pub data: i64, - pub udata: u64, - pub ext: [u64; 2], - } - - pub struct dqblk { - pub dqb_bhardlimit: u64, - pub dqb_bsoftlimit: u64, - pub dqb_curbytes: u64, - pub dqb_ihardlimit: u32, - pub dqb_isoftlimit: u32, - pub dqb_curinodes: u32, - pub dqb_btime: u32, - pub dqb_itime: u32, - pub dqb_id: u32, - pub dqb_spare: [u32; 4], - } - - pub struct if_msghdr { - pub ifm_msglen: c_ushort, - pub ifm_version: c_uchar, - pub ifm_type: c_uchar, - pub ifm_addrs: c_int, - pub ifm_flags: c_int, - pub ifm_index: c_ushort, - pub ifm_data: if_data, - } - - pub struct ifa_msghdr { - pub ifam_msglen: c_ushort, - pub ifam_version: c_uchar, - pub ifam_type: c_uchar, - pub ifam_addrs: c_int, - pub ifam_flags: c_int, - pub ifam_index: c_ushort, - pub ifam_metric: c_int, - } - - pub struct ifma_msghdr { - pub ifmam_msglen: c_ushort, - pub ifmam_version: c_uchar, - pub ifmam_type: c_uchar, - pub ifmam_addrs: c_int, - pub ifmam_flags: c_int, - pub ifmam_index: c_ushort, - } - - pub struct ifma_msghdr2 { - pub ifmam_msglen: c_ushort, - pub ifmam_version: c_uchar, - pub ifmam_type: c_uchar, - pub ifmam_addrs: c_int, - pub ifmam_flags: c_int, - pub ifmam_index: c_ushort, - pub ifmam_refcount: i32, - } - - pub struct rt_metrics { - pub rmx_locks: u32, - pub rmx_mtu: u32, - pub rmx_hopcount: u32, - pub rmx_expire: i32, - pub rmx_recvpipe: u32, - pub rmx_sendpipe: u32, - pub rmx_ssthresh: u32, - pub rmx_rtt: u32, - pub rmx_rttvar: u32, - pub rmx_pksent: u32, - /// This field does not exist anymore, the u32 is now part of a resized - /// `rmx_filler` array. - pub rmx_state: u32, - pub rmx_filler: [u32; 3], - } - - pub struct rt_msghdr { - pub rtm_msglen: c_ushort, - pub rtm_version: c_uchar, - pub rtm_type: c_uchar, - pub rtm_index: c_ushort, - pub rtm_flags: c_int, - pub rtm_addrs: c_int, - pub rtm_pid: crate::pid_t, - pub rtm_seq: c_int, - pub rtm_errno: c_int, - pub rtm_use: c_int, - pub rtm_inits: u32, - pub rtm_rmx: rt_metrics, - } - - pub struct rt_msghdr2 { - pub rtm_msglen: c_ushort, - pub rtm_version: c_uchar, - pub rtm_type: c_uchar, - pub rtm_index: c_ushort, - pub rtm_flags: c_int, - pub rtm_addrs: c_int, - pub rtm_refcnt: i32, - pub rtm_parentflags: c_int, - pub rtm_reserved: c_int, - pub rtm_use: c_int, - pub rtm_inits: u32, - pub rtm_rmx: rt_metrics, - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_cc: [crate::cc_t; crate::NCCS], - pub c_ispeed: crate::speed_t, - pub c_ospeed: crate::speed_t, - } - - pub struct flock { - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - pub l_type: c_short, - pub l_whence: c_short, - } - - pub struct sf_hdtr { - pub headers: *mut crate::iovec, - pub hdr_cnt: c_int, - pub trailers: *mut crate::iovec, - pub trl_cnt: c_int, - } - - pub struct lconv { - pub decimal_point: *mut c_char, - pub thousands_sep: *mut c_char, - pub grouping: *mut c_char, - pub int_curr_symbol: *mut c_char, - pub currency_symbol: *mut c_char, - pub mon_decimal_point: *mut c_char, - pub mon_thousands_sep: *mut c_char, - pub mon_grouping: *mut c_char, - pub positive_sign: *mut c_char, - pub negative_sign: *mut c_char, - pub int_frac_digits: c_char, - pub frac_digits: c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub p_sign_posn: c_char, - pub n_sign_posn: c_char, - pub int_p_cs_precedes: c_char, - pub int_n_cs_precedes: c_char, - pub int_p_sep_by_space: c_char, - pub int_n_sep_by_space: c_char, - pub int_p_sign_posn: c_char, - pub int_n_sign_posn: c_char, - } - - pub struct proc_taskinfo { - pub pti_virtual_size: u64, - pub pti_resident_size: u64, - pub pti_total_user: u64, - pub pti_total_system: u64, - pub pti_threads_user: u64, - pub pti_threads_system: u64, - pub pti_policy: i32, - pub pti_faults: i32, - pub pti_pageins: i32, - pub pti_cow_faults: i32, - pub pti_messages_sent: i32, - pub pti_messages_received: i32, - pub pti_syscalls_mach: i32, - pub pti_syscalls_unix: i32, - pub pti_csw: i32, - pub pti_threadnum: i32, - pub pti_numrunning: i32, - pub pti_priority: i32, - } - - pub struct proc_bsdinfo { - pub pbi_flags: u32, - pub pbi_status: u32, - pub pbi_xstatus: u32, - pub pbi_pid: u32, - pub pbi_ppid: u32, - pub pbi_uid: crate::uid_t, - pub pbi_gid: crate::gid_t, - pub pbi_ruid: crate::uid_t, - pub pbi_rgid: crate::gid_t, - pub pbi_svuid: crate::uid_t, - pub pbi_svgid: crate::gid_t, - pub rfu_1: u32, - pub pbi_comm: [c_char; MAXCOMLEN], - pub pbi_name: [c_char; 32], // MAXCOMLEN * 2, but macro isn't happy... - pub pbi_nfiles: u32, - pub pbi_pgid: u32, - pub pbi_pjobc: u32, - pub e_tdev: u32, - pub e_tpgid: u32, - pub pbi_nice: i32, - pub pbi_start_tvsec: u64, - pub pbi_start_tvusec: u64, - } - - pub struct proc_taskallinfo { - pub pbsd: proc_bsdinfo, - pub ptinfo: proc_taskinfo, - } - - pub struct xsw_usage { - pub xsu_total: u64, - pub xsu_avail: u64, - pub xsu_used: u64, - pub xsu_pagesize: u32, - pub xsu_encrypted: crate::boolean_t, - } - - pub struct xucred { - pub cr_version: c_uint, - pub cr_uid: crate::uid_t, - pub cr_ngroups: c_short, - pub cr_groups: [crate::gid_t; 16], - } - - #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] - pub struct mach_header { - pub magic: u32, - pub cputype: cpu_type_t, - pub cpusubtype: cpu_subtype_t, - pub filetype: u32, - pub ncmds: u32, - pub sizeofcmds: u32, - pub flags: u32, - } - - #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] - pub struct mach_header_64 { - pub magic: u32, - pub cputype: cpu_type_t, - pub cpusubtype: cpu_subtype_t, - pub filetype: u32, - pub ncmds: u32, - pub sizeofcmds: u32, - pub flags: u32, - pub reserved: u32, - } - - pub struct segment_command { - pub cmd: u32, - pub cmdsize: u32, - pub segname: [c_char; 16], - pub vmaddr: u32, - pub vmsize: u32, - pub fileoff: u32, - pub filesize: u32, - pub maxprot: vm_prot_t, - pub initprot: vm_prot_t, - pub nsects: u32, - pub flags: u32, - } - - pub struct segment_command_64 { - pub cmd: u32, - pub cmdsize: u32, - pub segname: [c_char; 16], - pub vmaddr: u64, - pub vmsize: u64, - pub fileoff: u64, - pub filesize: u64, - pub maxprot: vm_prot_t, - pub initprot: vm_prot_t, - pub nsects: u32, - pub flags: u32, - } - - pub struct load_command { - pub cmd: u32, - pub cmdsize: u32, - } - - pub struct sockaddr_dl { - pub sdl_len: c_uchar, - pub sdl_family: c_uchar, - pub sdl_index: c_ushort, - pub sdl_type: c_uchar, - pub sdl_nlen: c_uchar, - pub sdl_alen: c_uchar, - pub sdl_slen: c_uchar, - pub sdl_data: [c_char; 12], - } - - pub struct sockaddr_inarp { - pub sin_len: c_uchar, - pub sin_family: c_uchar, - pub sin_port: c_ushort, - pub sin_addr: crate::in_addr, - pub sin_srcaddr: crate::in_addr, - pub sin_tos: c_ushort, - pub sin_other: c_ushort, - } - - pub struct sockaddr_ctl { - pub sc_len: c_uchar, - pub sc_family: c_uchar, - pub ss_sysaddr: u16, - pub sc_id: u32, - pub sc_unit: u32, - pub sc_reserved: [u32; 5], - } - - pub struct in_pktinfo { - pub ipi_ifindex: c_uint, - pub ipi_spec_dst: crate::in_addr, - pub ipi_addr: crate::in_addr, - } - - pub struct in6_pktinfo { - pub ipi6_addr: crate::in6_addr, - pub ipi6_ifindex: c_uint, - } - - // sys/ipc.h: - - pub struct ipc_perm { - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: mode_t, - pub _seq: c_ushort, - pub _key: crate::key_t, - } - - // sys/sem.h - - pub struct sembuf { - pub sem_num: c_ushort, - pub sem_op: c_short, - pub sem_flg: c_short, - } - - // sys/shm.h - - pub struct arphdr { - pub ar_hrd: u16, - pub ar_pro: u16, - pub ar_hln: u8, - pub ar_pln: u8, - pub ar_op: u16, - } - - pub struct in_addr { - pub s_addr: crate::in_addr_t, - } - - // net/ndrv.h - pub struct sockaddr_ndrv { - pub snd_len: c_uchar, - pub snd_family: c_uchar, - pub snd_name: [c_uchar; crate::IFNAMSIZ], - } - - // sys/socket.h - - pub struct sa_endpoints_t { - pub sae_srcif: c_uint, // optional source interface - pub sae_srcaddr: *const crate::sockaddr, // optional source address - pub sae_srcaddrlen: crate::socklen_t, // size of source address - pub sae_dstaddr: *const crate::sockaddr, // destination address - pub sae_dstaddrlen: crate::socklen_t, // size of destination address - } - - pub struct timex { - pub modes: c_uint, - pub offset: c_long, - pub freq: c_long, - pub maxerror: c_long, - pub esterror: c_long, - pub status: c_int, - pub constant: c_long, - pub precision: c_long, - pub tolerance: c_long, - pub ppsfreq: c_long, - pub jitter: c_long, - pub shift: c_int, - pub stabil: c_long, - pub jitcnt: c_long, - pub calcnt: c_long, - pub errcnt: c_long, - pub stbcnt: c_long, - } - - pub struct ntptimeval { - pub time: crate::timespec, - pub maxerror: c_long, - pub esterror: c_long, - pub tai: c_long, - pub time_state: c_int, - } - - pub struct thread_standard_policy { - pub no_data: natural_t, - } - - pub struct thread_extended_policy { - pub timeshare: boolean_t, - } - - pub struct thread_time_constraint_policy { - pub period: u32, - pub computation: u32, - pub constraint: u32, - pub preemptible: boolean_t, - } - - pub struct thread_precedence_policy { - pub importance: integer_t, - } - - pub struct thread_affinity_policy { - pub affinity_tag: integer_t, - } - - pub struct thread_background_policy { - pub priority: integer_t, - } - - pub struct thread_latency_qos_policy { - pub thread_latency_qos_tier: thread_latency_qos_t, - } - - pub struct thread_throughput_qos_policy { - pub thread_throughput_qos_tier: thread_throughput_qos_t, - } - - // malloc/malloc.h - pub struct malloc_statistics_t { - pub blocks_in_use: c_uint, - pub size_in_use: size_t, - pub max_size_in_use: size_t, - pub size_allocated: size_t, - } - - pub struct mstats { - pub bytes_total: size_t, - pub chunks_used: size_t, - pub bytes_used: size_t, - pub chunks_free: size_t, - pub bytes_free: size_t, - } - - pub struct vm_range_t { - pub address: crate::vm_address_t, - pub size: crate::vm_size_t, - } - - // sched.h - pub struct sched_param { - pub sched_priority: c_int, - __opaque: [c_char; 4], - } - - pub struct vinfo_stat { - pub vst_dev: u32, - pub vst_mode: u16, - pub vst_nlink: u16, - pub vst_ino: u64, - pub vst_uid: crate::uid_t, - pub vst_gid: crate::gid_t, - pub vst_atime: i64, - pub vst_atimensec: i64, - pub vst_mtime: i64, - pub vst_mtimensec: i64, - pub vst_ctime: i64, - pub vst_ctimensec: i64, - pub vst_birthtime: i64, - pub vst_birthtimensec: i64, - pub vst_size: off_t, - pub vst_blocks: i64, - pub vst_blksize: i32, - pub vst_flags: u32, - pub vst_gen: u32, - pub vst_rdev: u32, - pub vst_qspare: [i64; 2], - } - - pub struct vnode_info { - pub vi_stat: vinfo_stat, - pub vi_type: c_int, - pub vi_pad: c_int, - pub vi_fsid: crate::fsid_t, - } - - pub struct vnode_info_path { - pub vip_vi: vnode_info, - // Normally it's `vip_path: [c_char; MAXPATHLEN]` but because libc supports an old rustc - // version, we go around this limitation like this. - pub vip_path: [[c_char; 32]; 32], - } - - pub struct proc_vnodepathinfo { - pub pvi_cdir: vnode_info_path, - pub pvi_rdir: vnode_info_path, - } - - pub struct vm_statistics { - pub free_count: natural_t, - pub active_count: natural_t, - pub inactive_count: natural_t, - pub wire_count: natural_t, - pub zero_fill_count: natural_t, - pub reactivations: natural_t, - pub pageins: natural_t, - pub pageouts: natural_t, - pub faults: natural_t, - pub cow_faults: natural_t, - pub lookups: natural_t, - pub hits: natural_t, - pub purgeable_count: natural_t, - pub purges: natural_t, - pub speculative_count: natural_t, - } - - pub struct task_thread_times_info { - pub user_time: time_value_t, - pub system_time: time_value_t, - } - - pub struct rusage_info_v0 { - pub ri_uuid: [u8; 16], - pub ri_user_time: u64, - pub ri_system_time: u64, - pub ri_pkg_idle_wkups: u64, - pub ri_interrupt_wkups: u64, - pub ri_pageins: u64, - pub ri_wired_size: u64, - pub ri_resident_size: u64, - pub ri_phys_footprint: u64, - pub ri_proc_start_abstime: u64, - pub ri_proc_exit_abstime: u64, - } - - pub struct rusage_info_v1 { - pub ri_uuid: [u8; 16], - pub ri_user_time: u64, - pub ri_system_time: u64, - pub ri_pkg_idle_wkups: u64, - pub ri_interrupt_wkups: u64, - pub ri_pageins: u64, - pub ri_wired_size: u64, - pub ri_resident_size: u64, - pub ri_phys_footprint: u64, - pub ri_proc_start_abstime: u64, - pub ri_proc_exit_abstime: u64, - pub ri_child_user_time: u64, - pub ri_child_system_time: u64, - pub ri_child_pkg_idle_wkups: u64, - pub ri_child_interrupt_wkups: u64, - pub ri_child_pageins: u64, - pub ri_child_elapsed_abstime: u64, - } - - pub struct rusage_info_v2 { - pub ri_uuid: [u8; 16], - pub ri_user_time: u64, - pub ri_system_time: u64, - pub ri_pkg_idle_wkups: u64, - pub ri_interrupt_wkups: u64, - pub ri_pageins: u64, - pub ri_wired_size: u64, - pub ri_resident_size: u64, - pub ri_phys_footprint: u64, - pub ri_proc_start_abstime: u64, - pub ri_proc_exit_abstime: u64, - pub ri_child_user_time: u64, - pub ri_child_system_time: u64, - pub ri_child_pkg_idle_wkups: u64, - pub ri_child_interrupt_wkups: u64, - pub ri_child_pageins: u64, - pub ri_child_elapsed_abstime: u64, - pub ri_diskio_bytesread: u64, - pub ri_diskio_byteswritten: u64, - } - - pub struct rusage_info_v3 { - pub ri_uuid: [u8; 16], - pub ri_user_time: u64, - pub ri_system_time: u64, - pub ri_pkg_idle_wkups: u64, - pub ri_interrupt_wkups: u64, - pub ri_pageins: u64, - pub ri_wired_size: u64, - pub ri_resident_size: u64, - pub ri_phys_footprint: u64, - pub ri_proc_start_abstime: u64, - pub ri_proc_exit_abstime: u64, - pub ri_child_user_time: u64, - pub ri_child_system_time: u64, - pub ri_child_pkg_idle_wkups: u64, - pub ri_child_interrupt_wkups: u64, - pub ri_child_pageins: u64, - pub ri_child_elapsed_abstime: u64, - pub ri_diskio_bytesread: u64, - pub ri_diskio_byteswritten: u64, - pub ri_cpu_time_qos_default: u64, - pub ri_cpu_time_qos_maintenance: u64, - pub ri_cpu_time_qos_background: u64, - pub ri_cpu_time_qos_utility: u64, - pub ri_cpu_time_qos_legacy: u64, - pub ri_cpu_time_qos_user_initiated: u64, - pub ri_cpu_time_qos_user_interactive: u64, - pub ri_billed_system_time: u64, - pub ri_serviced_system_time: u64, - } - - pub struct rusage_info_v4 { - pub ri_uuid: [u8; 16], - pub ri_user_time: u64, - pub ri_system_time: u64, - pub ri_pkg_idle_wkups: u64, - pub ri_interrupt_wkups: u64, - pub ri_pageins: u64, - pub ri_wired_size: u64, - pub ri_resident_size: u64, - pub ri_phys_footprint: u64, - pub ri_proc_start_abstime: u64, - pub ri_proc_exit_abstime: u64, - pub ri_child_user_time: u64, - pub ri_child_system_time: u64, - pub ri_child_pkg_idle_wkups: u64, - pub ri_child_interrupt_wkups: u64, - pub ri_child_pageins: u64, - pub ri_child_elapsed_abstime: u64, - pub ri_diskio_bytesread: u64, - pub ri_diskio_byteswritten: u64, - pub ri_cpu_time_qos_default: u64, - pub ri_cpu_time_qos_maintenance: u64, - pub ri_cpu_time_qos_background: u64, - pub ri_cpu_time_qos_utility: u64, - pub ri_cpu_time_qos_legacy: u64, - pub ri_cpu_time_qos_user_initiated: u64, - pub ri_cpu_time_qos_user_interactive: u64, - pub ri_billed_system_time: u64, - pub ri_serviced_system_time: u64, - pub ri_logical_writes: u64, - pub ri_lifetime_max_phys_footprint: u64, - pub ri_instructions: u64, - pub ri_cycles: u64, - pub ri_billed_energy: u64, - pub ri_serviced_energy: u64, - pub ri_interval_max_phys_footprint: u64, - pub ri_runnable_time: u64, - } - - pub struct image_offset { - pub uuid: crate::uuid_t, - pub offset: u32, - } - - pub struct attrlist { - pub bitmapcount: c_ushort, - pub reserved: u16, - pub commonattr: attrgroup_t, - pub volattr: attrgroup_t, - pub dirattr: attrgroup_t, - pub fileattr: attrgroup_t, - pub forkattr: attrgroup_t, - } - - pub struct attrreference_t { - pub attr_dataoffset: i32, - pub attr_length: u32, - } - - pub struct vol_capabilities_attr_t { - pub capabilities: vol_capabilities_set_t, - pub valid: vol_capabilities_set_t, - } - - pub struct attribute_set_t { - pub commonattr: attrgroup_t, - pub volattr: attrgroup_t, - pub dirattr: attrgroup_t, - pub fileattr: attrgroup_t, - pub forkattr: attrgroup_t, - } - - pub struct vol_attributes_attr_t { - pub validattr: attribute_set_t, - pub nativeattr: attribute_set_t, - } - - #[repr(align(8))] - pub struct tcp_connection_info { - pub tcpi_state: u8, - pub tcpi_snd_wscale: u8, - pub tcpi_rcv_wscale: u8, - __pad1: u8, - pub tcpi_options: u32, - pub tcpi_flags: u32, - pub tcpi_rto: u32, - pub tcpi_maxseg: u32, - pub tcpi_snd_ssthresh: u32, - pub tcpi_snd_cwnd: u32, - pub tcpi_snd_wnd: u32, - pub tcpi_snd_sbbytes: u32, - pub tcpi_rcv_wnd: u32, - pub tcpi_rttcur: u32, - pub tcpi_srtt: u32, - pub tcpi_rttvar: u32, - pub tcpi_tfo_cookie_req: u32, - pub tcpi_tfo_cookie_rcv: u32, - pub tcpi_tfo_syn_loss: u32, - pub tcpi_tfo_syn_data_sent: u32, - pub tcpi_tfo_syn_data_acked: u32, - pub tcpi_tfo_syn_data_rcv: u32, - pub tcpi_tfo_cookie_req_rcv: u32, - pub tcpi_tfo_cookie_sent: u32, - pub tcpi_tfo_cookie_invalid: u32, - pub tcpi_tfo_cookie_wrong: u32, - pub tcpi_tfo_no_cookie_rcv: u32, - pub tcpi_tfo_heuristics_disable: u32, - pub tcpi_tfo_send_blackhole: u32, - pub tcpi_tfo_recv_blackhole: u32, - pub tcpi_tfo_onebyte_proxy: u32, - __pad2: u32, - pub tcpi_txpackets: u64, - pub tcpi_txbytes: u64, - pub tcpi_txretransmitbytes: u64, - pub tcpi_rxpackets: u64, - pub tcpi_rxbytes: u64, - pub tcpi_rxoutoforderbytes: u64, - pub tcpi_rxretransmitpackets: u64, - } - - pub struct in6_addrlifetime { - pub ia6t_expire: time_t, - pub ia6t_preferred: time_t, - pub ia6t_vltime: u32, - pub ia6t_pltime: u32, - } - - pub struct in6_ifstat { - pub ifs6_in_receive: crate::u_quad_t, - pub ifs6_in_hdrerr: crate::u_quad_t, - pub ifs6_in_toobig: crate::u_quad_t, - pub ifs6_in_noroute: crate::u_quad_t, - pub ifs6_in_addrerr: crate::u_quad_t, - pub ifs6_in_protounknown: crate::u_quad_t, - pub ifs6_in_truncated: crate::u_quad_t, - pub ifs6_in_discard: crate::u_quad_t, - pub ifs6_in_deliver: crate::u_quad_t, - pub ifs6_out_forward: crate::u_quad_t, - pub ifs6_out_request: crate::u_quad_t, - pub ifs6_out_discard: crate::u_quad_t, - pub ifs6_out_fragok: crate::u_quad_t, - pub ifs6_out_fragfail: crate::u_quad_t, - pub ifs6_out_fragcreat: crate::u_quad_t, - pub ifs6_reass_reqd: crate::u_quad_t, - pub ifs6_reass_ok: crate::u_quad_t, - pub ifs6_atmfrag_rcvd: crate::u_quad_t, - pub ifs6_reass_fail: crate::u_quad_t, - pub ifs6_in_mcast: crate::u_quad_t, - pub ifs6_out_mcast: crate::u_quad_t, - pub ifs6_cantfoward_icmp6: crate::u_quad_t, - pub ifs6_addr_expiry_cnt: crate::u_quad_t, - pub ifs6_pfx_expiry_cnt: crate::u_quad_t, - pub ifs6_defrtr_expiry_cnt: crate::u_quad_t, - } - - pub struct icmp6_ifstat { - pub ifs6_in_msg: crate::u_quad_t, - pub ifs6_in_error: crate::u_quad_t, - pub ifs6_in_dstunreach: crate::u_quad_t, - pub ifs6_in_adminprohib: crate::u_quad_t, - pub ifs6_in_timeexceed: crate::u_quad_t, - pub ifs6_in_paramprob: crate::u_quad_t, - pub ifs6_in_pkttoobig: crate::u_quad_t, - pub ifs6_in_echo: crate::u_quad_t, - pub ifs6_in_echoreply: crate::u_quad_t, - pub ifs6_in_routersolicit: crate::u_quad_t, - pub ifs6_in_routeradvert: crate::u_quad_t, - pub ifs6_in_neighborsolicit: crate::u_quad_t, - pub ifs6_in_neighboradvert: crate::u_quad_t, - pub ifs6_in_redirect: crate::u_quad_t, - pub ifs6_in_mldquery: crate::u_quad_t, - pub ifs6_in_mldreport: crate::u_quad_t, - pub ifs6_in_mlddone: crate::u_quad_t, - pub ifs6_out_msg: crate::u_quad_t, - pub ifs6_out_error: crate::u_quad_t, - pub ifs6_out_dstunreach: crate::u_quad_t, - pub ifs6_out_adminprohib: crate::u_quad_t, - pub ifs6_out_timeexceed: crate::u_quad_t, - pub ifs6_out_paramprob: crate::u_quad_t, - pub ifs6_out_pkttoobig: crate::u_quad_t, - pub ifs6_out_echo: crate::u_quad_t, - pub ifs6_out_echoreply: crate::u_quad_t, - pub ifs6_out_routersolicit: crate::u_quad_t, - pub ifs6_out_routeradvert: crate::u_quad_t, - pub ifs6_out_neighborsolicit: crate::u_quad_t, - pub ifs6_out_neighboradvert: crate::u_quad_t, - pub ifs6_out_redirect: crate::u_quad_t, - pub ifs6_out_mldquery: crate::u_quad_t, - pub ifs6_out_mldreport: crate::u_quad_t, - pub ifs6_out_mlddone: crate::u_quad_t, - } - - // mach/host_info.h - pub struct host_cpu_load_info { - pub cpu_ticks: [crate::natural_t; CPU_STATE_MAX as usize], - } - - // net/if_mib.h - pub struct ifmibdata { - /// Name of interface - pub ifmd_name: [c_char; crate::IFNAMSIZ], - /// Number of promiscuous listeners - pub ifmd_pcount: c_uint, - /// Interface flags - pub ifmd_flags: c_uint, - /// Instantaneous length of send queue - pub ifmd_snd_len: c_uint, - /// Maximum length of send queue - pub ifmd_snd_maxlen: c_uint, - /// Number of drops in send queue - pub ifmd_snd_drops: c_uint, - /// For future expansion - pub ifmd_filler: [c_uint; 4], - /// Generic information and statistics - pub ifmd_data: if_data64, - } - - pub struct ifs_iso_8802_3 { - pub dot3StatsAlignmentErrors: u32, - pub dot3StatsFCSErrors: u32, - pub dot3StatsSingleCollisionFrames: u32, - pub dot3StatsMultipleCollisionFrames: u32, - pub dot3StatsSQETestErrors: u32, - pub dot3StatsDeferredTransmissions: u32, - pub dot3StatsLateCollisions: u32, - pub dot3StatsExcessiveCollisions: u32, - pub dot3StatsInternalMacTransmitErrors: u32, - pub dot3StatsCarrierSenseErrors: u32, - pub dot3StatsFrameTooLongs: u32, - pub dot3StatsInternalMacReceiveErrors: u32, - pub dot3StatsEtherChipSet: u32, - pub dot3StatsMissedFrames: u32, - pub dot3StatsCollFrequencies: [u32; 16], - pub dot3Compliance: u32, - } - - // kern_control.h - pub struct ctl_info { - pub ctl_id: u32, - pub ctl_name: [c_char; MAX_KCTL_NAME], - } - - // sys/proc_info.h - pub struct proc_fdinfo { - pub proc_fd: i32, - pub proc_fdtype: u32, - } -} - -s_no_extra_traits! { - #[repr(packed(4))] - pub struct ifconf { - pub ifc_len: c_int, - pub ifc_ifcu: __c_anonymous_ifc_ifcu, - } - - #[repr(packed(4))] - pub struct kevent { - pub ident: crate::uintptr_t, - pub filter: i16, - pub flags: u16, - pub fflags: u32, - pub data: intptr_t, - pub udata: *mut c_void, - } - - #[repr(packed(4))] - pub struct semid_ds { - // Note the manpage shows different types than the system header. - pub sem_perm: ipc_perm, - pub sem_base: i32, - pub sem_nsems: c_ushort, - pub sem_otime: crate::time_t, - pub sem_pad1: i32, - pub sem_ctime: crate::time_t, - pub sem_pad2: i32, - pub sem_pad3: [i32; 4], - } - - #[repr(packed(4))] - pub struct shmid_ds { - pub shm_perm: ipc_perm, - pub shm_segsz: size_t, - pub shm_lpid: crate::pid_t, - pub shm_cpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - pub shm_atime: crate::time_t, // FIXME(macos): 64-bit wrong align => wrong offset - pub shm_dtime: crate::time_t, // FIXME(macos): 64-bit wrong align => wrong offset - pub shm_ctime: crate::time_t, // FIXME(macos): 64-bit wrong align => wrong offset - // FIXME: 64-bit wrong align => wrong offset: - pub shm_internal: *mut c_void, - } - - pub struct proc_threadinfo { - pub pth_user_time: u64, - pub pth_system_time: u64, - pub pth_cpu_usage: i32, - pub pth_policy: i32, - pub pth_run_state: i32, - pub pth_flags: i32, - pub pth_sleep_time: i32, - pub pth_curpri: i32, - pub pth_priority: i32, - pub pth_maxpriority: i32, - pub pth_name: [c_char; MAXTHREADNAMESIZE], - } - - pub struct statfs { - pub f_bsize: u32, - pub f_iosize: i32, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::fsid_t, - pub f_owner: crate::uid_t, - pub f_type: u32, - pub f_flags: u32, - pub f_fssubtype: u32, - pub f_fstypename: [c_char; 16], - pub f_mntonname: [c_char; 1024], - pub f_mntfromname: [c_char; 1024], - pub f_flags_ext: u32, - pub f_reserved: [u32; 7], - } - - pub struct dirent { - pub d_ino: u64, - pub d_seekoff: u64, - pub d_reclen: u16, - pub d_namlen: u16, - pub d_type: u8, - pub d_name: [c_char; 1024], - } - - pub struct pthread_rwlock_t { - __sig: c_long, - __opaque: [u8; __PTHREAD_RWLOCK_SIZE__], - } - - pub struct pthread_mutex_t { - __sig: c_long, - __opaque: [u8; __PTHREAD_MUTEX_SIZE__], - } - - pub struct pthread_cond_t { - __sig: c_long, - __opaque: [u8; __PTHREAD_COND_SIZE__], - } - - pub struct sockaddr_storage { - pub ss_len: u8, - pub ss_family: crate::sa_family_t, - __ss_pad1: [u8; 6], - __ss_align: i64, - __ss_pad2: [u8; 112], - } - - pub struct utmpx { - pub ut_user: [c_char; _UTX_USERSIZE], - pub ut_id: [c_char; _UTX_IDSIZE], - pub ut_line: [c_char; _UTX_LINESIZE], - pub ut_pid: crate::pid_t, - pub ut_type: c_short, - pub ut_tv: crate::timeval, - pub ut_host: [c_char; _UTX_HOSTSIZE], - ut_pad: [u32; 16], - } - - pub struct sigevent { - pub sigev_notify: c_int, - pub sigev_signo: c_int, - pub sigev_value: crate::sigval, - __unused1: *mut c_void, //actually a function pointer - pub sigev_notify_attributes: *mut crate::pthread_attr_t, - } - - pub struct processor_cpu_load_info { - pub cpu_ticks: [c_uint; CPU_STATE_MAX as usize], - } - - pub struct processor_basic_info { - pub cpu_type: cpu_type_t, - pub cpu_subtype: cpu_subtype_t, - pub running: crate::boolean_t, - pub slot_num: c_int, - pub is_master: crate::boolean_t, - } - - pub struct processor_set_basic_info { - pub processor_count: c_int, - pub default_policy: c_int, - } - - pub struct processor_set_load_info { - pub task_count: c_int, - pub thread_count: c_int, - pub load_average: integer_t, - pub mach_factor: integer_t, - } - - pub struct time_value_t { - pub seconds: integer_t, - pub microseconds: integer_t, - } - - pub struct thread_basic_info { - pub user_time: time_value_t, - pub system_time: time_value_t, - pub cpu_usage: crate::integer_t, - pub policy: crate::policy_t, - pub run_state: crate::integer_t, - pub flags: crate::integer_t, - pub suspend_count: crate::integer_t, - pub sleep_time: crate::integer_t, - } - - pub struct thread_identifier_info { - pub thread_id: u64, - pub thread_handle: u64, - pub dispatch_qaddr: u64, - } - - pub struct thread_extended_info { - pub pth_user_time: u64, - pub pth_system_time: u64, - pub pth_cpu_usage: i32, - pub pth_policy: i32, - pub pth_run_state: i32, - pub pth_flags: i32, - pub pth_sleep_time: i32, - pub pth_curpri: i32, - pub pth_priority: i32, - pub pth_maxpriority: i32, - pub pth_name: [c_char; MAXTHREADNAMESIZE], - } - - #[repr(packed(4))] - pub struct if_data64 { - pub ifi_type: c_uchar, - pub ifi_typelen: c_uchar, - pub ifi_physical: c_uchar, - pub ifi_addrlen: c_uchar, - pub ifi_hdrlen: c_uchar, - pub ifi_recvquota: c_uchar, - pub ifi_xmitquota: c_uchar, - pub ifi_unused1: c_uchar, - pub ifi_mtu: u32, - pub ifi_metric: u32, - pub ifi_baudrate: u64, - pub ifi_ipackets: u64, - pub ifi_ierrors: u64, - pub ifi_opackets: u64, - pub ifi_oerrors: u64, - pub ifi_collisions: u64, - pub ifi_ibytes: u64, - pub ifi_obytes: u64, - pub ifi_imcasts: u64, - pub ifi_omcasts: u64, - pub ifi_iqdrops: u64, - pub ifi_noproto: u64, - pub ifi_recvtiming: u32, - pub ifi_xmittiming: u32, - #[cfg(target_pointer_width = "32")] - pub ifi_lastchange: crate::timeval, - #[cfg(not(target_pointer_width = "32"))] - pub ifi_lastchange: timeval32, - } - - #[repr(packed(4))] - pub struct if_msghdr2 { - pub ifm_msglen: c_ushort, - pub ifm_version: c_uchar, - pub ifm_type: c_uchar, - pub ifm_addrs: c_int, - pub ifm_flags: c_int, - pub ifm_index: c_ushort, - pub ifm_snd_len: c_int, - pub ifm_snd_maxlen: c_int, - pub ifm_snd_drops: c_int, - pub ifm_timer: c_int, - pub ifm_data: if_data64, - } - - #[repr(packed(8))] - pub struct vm_statistics64 { - pub free_count: natural_t, - pub active_count: natural_t, - pub inactive_count: natural_t, - pub wire_count: natural_t, - pub zero_fill_count: u64, - pub reactivations: u64, - pub pageins: u64, - pub pageouts: u64, - pub faults: u64, - pub cow_faults: u64, - pub lookups: u64, - pub hits: u64, - pub purges: u64, - pub purgeable_count: natural_t, - pub speculative_count: natural_t, - pub decompressions: u64, - pub compressions: u64, - pub swapins: u64, - pub swapouts: u64, - pub compressor_page_count: natural_t, - pub throttled_count: natural_t, - pub external_page_count: natural_t, - pub internal_page_count: natural_t, - pub total_uncompressed_pages_in_compressor: u64, - } - - #[repr(packed(4))] - pub struct mach_task_basic_info { - pub virtual_size: mach_vm_size_t, - pub resident_size: mach_vm_size_t, - pub resident_size_max: mach_vm_size_t, - pub user_time: time_value_t, - pub system_time: time_value_t, - pub policy: crate::policy_t, - pub suspend_count: integer_t, - } - - #[repr(packed(4))] - pub struct log2phys { - pub l2p_flags: c_uint, - pub l2p_contigbytes: off_t, - pub l2p_devoffset: off_t, - } - - pub struct os_unfair_lock_s { - _os_unfair_lock_opaque: u32, - } - - #[repr(packed(1))] - pub struct sockaddr_vm { - pub svm_len: c_uchar, - pub svm_family: crate::sa_family_t, - pub svm_reserved1: c_ushort, - pub svm_port: c_uint, - pub svm_cid: c_uint, - } - - pub struct ifdevmtu { - pub ifdm_current: c_int, - pub ifdm_min: c_int, - pub ifdm_max: c_int, - } - - pub union __c_anonymous_ifk_data { - pub ifk_ptr: *mut c_void, - pub ifk_value: c_int, - } - - #[repr(packed(4))] - pub struct ifkpi { - pub ifk_module_id: c_uint, - pub ifk_type: c_uint, - pub ifk_data: __c_anonymous_ifk_data, - } - - pub union __c_anonymous_ifr_ifru { - pub ifru_addr: crate::sockaddr, - pub ifru_dstaddr: crate::sockaddr, - pub ifru_broadaddr: crate::sockaddr, - pub ifru_flags: c_short, - pub ifru_metrics: c_int, - pub ifru_mtu: c_int, - pub ifru_phys: c_int, - pub ifru_media: c_int, - pub ifru_intval: c_int, - pub ifru_data: *mut c_char, - pub ifru_devmtu: ifdevmtu, - pub ifru_kpi: ifkpi, - pub ifru_wake_flags: u32, - pub ifru_route_refcnt: u32, - pub ifru_cap: [c_int; 2], - pub ifru_functional_type: u32, - } - - pub struct ifreq { - pub ifr_name: [c_char; crate::IFNAMSIZ], - pub ifr_ifru: __c_anonymous_ifr_ifru, - } - - pub union __c_anonymous_ifc_ifcu { - pub ifcu_buf: *mut c_char, - pub ifcu_req: *mut ifreq, - } - - pub union __c_anonymous_ifr_ifru6 { - pub ifru_addr: crate::sockaddr_in6, - pub ifru_dstaddr: crate::sockaddr_in6, - pub ifru_flags: c_int, - pub ifru_flags6: c_int, - pub ifru_metrics: c_int, - pub ifru_intval: c_int, - pub ifru_data: *mut c_char, - pub ifru_lifetime: in6_addrlifetime, - pub ifru_stat: in6_ifstat, - pub ifru_icmp6stat: icmp6_ifstat, - pub ifru_scope_id: [u32; SCOPE6_ID_MAX], - } - - pub struct in6_ifreq { - pub ifr_name: [c_char; crate::IFNAMSIZ], - pub ifr_ifru: __c_anonymous_ifr_ifru6, - } -} - -impl siginfo_t { - pub unsafe fn si_addr(&self) -> *mut c_void { - self.si_addr - } - - pub unsafe fn si_value(&self) -> crate::sigval { - #[repr(C)] - struct siginfo_timer { - _si_signo: c_int, - _si_errno: c_int, - _si_code: c_int, - _si_pid: crate::pid_t, - _si_uid: crate::uid_t, - _si_status: c_int, - _si_addr: *mut c_void, - si_value: crate::sigval, - } - - (*(self as *const siginfo_t).cast::()).si_value - } - - pub unsafe fn si_pid(&self) -> crate::pid_t { - self.si_pid - } - - pub unsafe fn si_uid(&self) -> crate::uid_t { - self.si_uid - } - - pub unsafe fn si_status(&self) -> c_int { - self.si_status - } -} - -s_no_extra_traits! { - pub union semun { - pub val: c_int, - pub buf: *mut semid_ds, - pub array: *mut c_ushort, - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for semun { - fn eq(&self, other: &semun) -> bool { - unsafe { self.val == other.val } - } - } - impl Eq for semun {} - impl hash::Hash for semun { - fn hash(&self, state: &mut H) { - unsafe { self.val.hash(state) }; - } - } - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for ifconf - where - Self: Copy, - { - fn eq(&self, other: &Self) -> bool { - let len_ptr1 = core::ptr::addr_of!(self.ifc_len); - let len_ptr2 = core::ptr::addr_of!(other.ifc_len); - let ifcu_ptr1 = core::ptr::addr_of!(self.ifc_ifcu); - let ifcu_ptr2 = core::ptr::addr_of!(other.ifc_ifcu); - - // SAFETY: `ifconf` implements `Copy` so the reads are valid - let len1 = unsafe { len_ptr1.read_unaligned() }; - let len2 = unsafe { len_ptr2.read_unaligned() }; - let ifcu1 = unsafe { ifcu_ptr1.read_unaligned() }; - let ifcu2 = unsafe { ifcu_ptr2.read_unaligned() }; - - len1 == len2 && ifcu1 == ifcu2 - } - } - impl Eq for ifconf {} - - impl PartialEq for kevent { - fn eq(&self, other: &kevent) -> bool { - self.ident == other.ident - && self.filter == other.filter - && self.flags == other.flags - && self.fflags == other.fflags - && self.data == other.data - && self.udata == other.udata - } - } - impl Eq for kevent {} - impl hash::Hash for kevent { - fn hash(&self, state: &mut H) { - let ident = self.ident; - let filter = self.filter; - let flags = self.flags; - let fflags = self.fflags; - let data = self.data; - let udata = self.udata; - ident.hash(state); - filter.hash(state); - flags.hash(state); - fflags.hash(state); - data.hash(state); - udata.hash(state); - } - } - - impl PartialEq for semid_ds { - fn eq(&self, other: &semid_ds) -> bool { - let sem_perm = self.sem_perm; - let sem_pad3 = self.sem_pad3; - let other_sem_perm = other.sem_perm; - let other_sem_pad3 = other.sem_pad3; - sem_perm == other_sem_perm - && self.sem_base == other.sem_base - && self.sem_nsems == other.sem_nsems - && self.sem_otime == other.sem_otime - && self.sem_pad1 == other.sem_pad1 - && self.sem_ctime == other.sem_ctime - && self.sem_pad2 == other.sem_pad2 - && sem_pad3 == other_sem_pad3 - } - } - impl Eq for semid_ds {} - impl hash::Hash for semid_ds { - fn hash(&self, state: &mut H) { - let sem_perm = self.sem_perm; - let sem_base = self.sem_base; - let sem_nsems = self.sem_nsems; - let sem_otime = self.sem_otime; - let sem_pad1 = self.sem_pad1; - let sem_ctime = self.sem_ctime; - let sem_pad2 = self.sem_pad2; - let sem_pad3 = self.sem_pad3; - sem_perm.hash(state); - sem_base.hash(state); - sem_nsems.hash(state); - sem_otime.hash(state); - sem_pad1.hash(state); - sem_ctime.hash(state); - sem_pad2.hash(state); - sem_pad3.hash(state); - } - } - - impl PartialEq for shmid_ds { - fn eq(&self, other: &shmid_ds) -> bool { - let shm_perm = self.shm_perm; - let other_shm_perm = other.shm_perm; - shm_perm == other_shm_perm - && self.shm_segsz == other.shm_segsz - && self.shm_lpid == other.shm_lpid - && self.shm_cpid == other.shm_cpid - && self.shm_nattch == other.shm_nattch - && self.shm_atime == other.shm_atime - && self.shm_dtime == other.shm_dtime - && self.shm_ctime == other.shm_ctime - && self.shm_internal == other.shm_internal - } - } - impl Eq for shmid_ds {} - impl hash::Hash for shmid_ds { - fn hash(&self, state: &mut H) { - let shm_perm = self.shm_perm; - let shm_segsz = self.shm_segsz; - let shm_lpid = self.shm_lpid; - let shm_cpid = self.shm_cpid; - let shm_nattch = self.shm_nattch; - let shm_atime = self.shm_atime; - let shm_dtime = self.shm_dtime; - let shm_ctime = self.shm_ctime; - let shm_internal = self.shm_internal; - shm_perm.hash(state); - shm_segsz.hash(state); - shm_lpid.hash(state); - shm_cpid.hash(state); - shm_nattch.hash(state); - shm_atime.hash(state); - shm_dtime.hash(state); - shm_ctime.hash(state); - shm_internal.hash(state); - } - } - - impl PartialEq for proc_threadinfo { - fn eq(&self, other: &proc_threadinfo) -> bool { - self.pth_user_time == other.pth_user_time - && self.pth_system_time == other.pth_system_time - && self.pth_cpu_usage == other.pth_cpu_usage - && self.pth_policy == other.pth_policy - && self.pth_run_state == other.pth_run_state - && self.pth_flags == other.pth_flags - && self.pth_sleep_time == other.pth_sleep_time - && self.pth_curpri == other.pth_curpri - && self.pth_priority == other.pth_priority - && self.pth_maxpriority == other.pth_maxpriority - && self - .pth_name - .iter() - .zip(other.pth_name.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for proc_threadinfo {} - impl hash::Hash for proc_threadinfo { - fn hash(&self, state: &mut H) { - self.pth_user_time.hash(state); - self.pth_system_time.hash(state); - self.pth_cpu_usage.hash(state); - self.pth_policy.hash(state); - self.pth_run_state.hash(state); - self.pth_flags.hash(state); - self.pth_sleep_time.hash(state); - self.pth_curpri.hash(state); - self.pth_priority.hash(state); - self.pth_maxpriority.hash(state); - self.pth_name.hash(state); - } - } - - impl PartialEq for statfs { - fn eq(&self, other: &statfs) -> bool { - self.f_bsize == other.f_bsize - && self.f_iosize == other.f_iosize - && self.f_blocks == other.f_blocks - && self.f_bfree == other.f_bfree - && self.f_bavail == other.f_bavail - && self.f_files == other.f_files - && self.f_ffree == other.f_ffree - && self.f_fsid == other.f_fsid - && self.f_owner == other.f_owner - && self.f_flags == other.f_flags - && self.f_fssubtype == other.f_fssubtype - && self.f_fstypename == other.f_fstypename - && self.f_type == other.f_type - && self - .f_mntonname - .iter() - .zip(other.f_mntonname.iter()) - .all(|(a, b)| a == b) - && self - .f_mntfromname - .iter() - .zip(other.f_mntfromname.iter()) - .all(|(a, b)| a == b) - && self.f_reserved == other.f_reserved - } - } - - impl Eq for statfs {} - - impl hash::Hash for statfs { - fn hash(&self, state: &mut H) { - self.f_bsize.hash(state); - self.f_iosize.hash(state); - self.f_blocks.hash(state); - self.f_bfree.hash(state); - self.f_bavail.hash(state); - self.f_files.hash(state); - self.f_ffree.hash(state); - self.f_fsid.hash(state); - self.f_owner.hash(state); - self.f_flags.hash(state); - self.f_fssubtype.hash(state); - self.f_fstypename.hash(state); - self.f_type.hash(state); - self.f_mntonname.hash(state); - self.f_mntfromname.hash(state); - self.f_reserved.hash(state); - } - } - - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_ino == other.d_ino - && self.d_seekoff == other.d_seekoff - && self.d_reclen == other.d_reclen - && self.d_namlen == other.d_namlen - && self.d_type == other.d_type - && self - .d_name - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for dirent {} - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_ino.hash(state); - self.d_seekoff.hash(state); - self.d_reclen.hash(state); - self.d_namlen.hash(state); - self.d_type.hash(state); - self.d_name.hash(state); - } - } - impl PartialEq for pthread_rwlock_t { - fn eq(&self, other: &pthread_rwlock_t) -> bool { - self.__sig == other.__sig - && self - .__opaque - .iter() - .zip(other.__opaque.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for pthread_rwlock_t {} - impl hash::Hash for pthread_rwlock_t { - fn hash(&self, state: &mut H) { - self.__sig.hash(state); - self.__opaque.hash(state); - } - } - - impl PartialEq for pthread_mutex_t { - fn eq(&self, other: &pthread_mutex_t) -> bool { - self.__sig == other.__sig - && self - .__opaque - .iter() - .zip(other.__opaque.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for pthread_mutex_t {} - - impl hash::Hash for pthread_mutex_t { - fn hash(&self, state: &mut H) { - self.__sig.hash(state); - self.__opaque.hash(state); - } - } - - impl PartialEq for pthread_cond_t { - fn eq(&self, other: &pthread_cond_t) -> bool { - self.__sig == other.__sig - && self - .__opaque - .iter() - .zip(other.__opaque.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for pthread_cond_t {} - - impl hash::Hash for pthread_cond_t { - fn hash(&self, state: &mut H) { - self.__sig.hash(state); - self.__opaque.hash(state); - } - } - - impl PartialEq for sockaddr_storage { - fn eq(&self, other: &sockaddr_storage) -> bool { - self.ss_len == other.ss_len - && self.ss_family == other.ss_family - && self - .__ss_pad1 - .iter() - .zip(other.__ss_pad1.iter()) - .all(|(a, b)| a == b) - && self.__ss_align == other.__ss_align - && self - .__ss_pad2 - .iter() - .zip(other.__ss_pad2.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for sockaddr_storage {} - - impl hash::Hash for sockaddr_storage { - fn hash(&self, state: &mut H) { - self.ss_len.hash(state); - self.ss_family.hash(state); - self.__ss_pad1.hash(state); - self.__ss_align.hash(state); - self.__ss_pad2.hash(state); - } - } - - impl PartialEq for utmpx { - fn eq(&self, other: &utmpx) -> bool { - self.ut_user - .iter() - .zip(other.ut_user.iter()) - .all(|(a, b)| a == b) - && self.ut_id == other.ut_id - && self.ut_line == other.ut_line - && self.ut_pid == other.ut_pid - && self.ut_type == other.ut_type - && self.ut_tv == other.ut_tv - && self - .ut_host - .iter() - .zip(other.ut_host.iter()) - .all(|(a, b)| a == b) - && self.ut_pad == other.ut_pad - } - } - - impl Eq for utmpx {} - - impl hash::Hash for utmpx { - fn hash(&self, state: &mut H) { - self.ut_user.hash(state); - self.ut_id.hash(state); - self.ut_line.hash(state); - self.ut_pid.hash(state); - self.ut_type.hash(state); - self.ut_tv.hash(state); - self.ut_host.hash(state); - self.ut_pad.hash(state); - } - } - - impl PartialEq for sigevent { - fn eq(&self, other: &sigevent) -> bool { - self.sigev_notify == other.sigev_notify - && self.sigev_signo == other.sigev_signo - && self.sigev_value == other.sigev_value - && self.sigev_notify_attributes == other.sigev_notify_attributes - } - } - - impl Eq for sigevent {} - - impl hash::Hash for sigevent { - fn hash(&self, state: &mut H) { - self.sigev_notify.hash(state); - self.sigev_signo.hash(state); - self.sigev_value.hash(state); - self.sigev_notify_attributes.hash(state); - } - } - - impl PartialEq for processor_cpu_load_info { - fn eq(&self, other: &processor_cpu_load_info) -> bool { - self.cpu_ticks == other.cpu_ticks - } - } - impl Eq for processor_cpu_load_info {} - impl hash::Hash for processor_cpu_load_info { - fn hash(&self, state: &mut H) { - self.cpu_ticks.hash(state); - } - } - - impl PartialEq for processor_basic_info { - fn eq(&self, other: &processor_basic_info) -> bool { - self.cpu_type == other.cpu_type - && self.cpu_subtype == other.cpu_subtype - && self.running == other.running - && self.slot_num == other.slot_num - && self.is_master == other.is_master - } - } - impl Eq for processor_basic_info {} - impl hash::Hash for processor_basic_info { - fn hash(&self, state: &mut H) { - self.cpu_type.hash(state); - self.cpu_subtype.hash(state); - self.running.hash(state); - self.slot_num.hash(state); - self.is_master.hash(state); - } - } - - impl PartialEq for processor_set_basic_info { - fn eq(&self, other: &processor_set_basic_info) -> bool { - self.processor_count == other.processor_count - && self.default_policy == other.default_policy - } - } - impl Eq for processor_set_basic_info {} - impl hash::Hash for processor_set_basic_info { - fn hash(&self, state: &mut H) { - self.processor_count.hash(state); - self.default_policy.hash(state); - } - } - - impl PartialEq for processor_set_load_info { - fn eq(&self, other: &processor_set_load_info) -> bool { - self.task_count == other.task_count - && self.thread_count == other.thread_count - && self.load_average == other.load_average - && self.mach_factor == other.mach_factor - } - } - impl Eq for processor_set_load_info {} - impl hash::Hash for processor_set_load_info { - fn hash(&self, state: &mut H) { - self.task_count.hash(state); - self.thread_count.hash(state); - self.load_average.hash(state); - self.mach_factor.hash(state); - } - } - - impl PartialEq for time_value_t { - fn eq(&self, other: &time_value_t) -> bool { - self.seconds == other.seconds && self.microseconds == other.microseconds - } - } - impl Eq for time_value_t {} - impl hash::Hash for time_value_t { - fn hash(&self, state: &mut H) { - self.seconds.hash(state); - self.microseconds.hash(state); - } - } - impl PartialEq for thread_basic_info { - fn eq(&self, other: &thread_basic_info) -> bool { - self.user_time == other.user_time - && self.system_time == other.system_time - && self.cpu_usage == other.cpu_usage - && self.policy == other.policy - && self.run_state == other.run_state - && self.flags == other.flags - && self.suspend_count == other.suspend_count - && self.sleep_time == other.sleep_time - } - } - impl Eq for thread_basic_info {} - impl hash::Hash for thread_basic_info { - fn hash(&self, state: &mut H) { - self.user_time.hash(state); - self.system_time.hash(state); - self.cpu_usage.hash(state); - self.policy.hash(state); - self.run_state.hash(state); - self.flags.hash(state); - self.suspend_count.hash(state); - self.sleep_time.hash(state); - } - } - impl PartialEq for thread_extended_info { - fn eq(&self, other: &thread_extended_info) -> bool { - self.pth_user_time == other.pth_user_time - && self.pth_system_time == other.pth_system_time - && self.pth_cpu_usage == other.pth_cpu_usage - && self.pth_policy == other.pth_policy - && self.pth_run_state == other.pth_run_state - && self.pth_flags == other.pth_flags - && self.pth_sleep_time == other.pth_sleep_time - && self.pth_curpri == other.pth_curpri - && self.pth_priority == other.pth_priority - && self.pth_maxpriority == other.pth_maxpriority - && self - .pth_name - .iter() - .zip(other.pth_name.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for thread_extended_info {} - impl hash::Hash for thread_extended_info { - fn hash(&self, state: &mut H) { - self.pth_user_time.hash(state); - self.pth_system_time.hash(state); - self.pth_cpu_usage.hash(state); - self.pth_policy.hash(state); - self.pth_run_state.hash(state); - self.pth_flags.hash(state); - self.pth_sleep_time.hash(state); - self.pth_curpri.hash(state); - self.pth_priority.hash(state); - self.pth_maxpriority.hash(state); - self.pth_name.hash(state); - } - } - impl PartialEq for thread_identifier_info { - fn eq(&self, other: &thread_identifier_info) -> bool { - self.thread_id == other.thread_id - && self.thread_handle == other.thread_handle - && self.dispatch_qaddr == other.dispatch_qaddr - } - } - impl Eq for thread_identifier_info {} - impl hash::Hash for thread_identifier_info { - fn hash(&self, state: &mut H) { - self.thread_id.hash(state); - self.thread_handle.hash(state); - self.dispatch_qaddr.hash(state); - } - } - impl PartialEq for if_data64 { - fn eq(&self, other: &if_data64) -> bool { - self.ifi_type == other.ifi_type - && self.ifi_typelen == other.ifi_typelen - && self.ifi_physical == other.ifi_physical - && self.ifi_addrlen == other.ifi_addrlen - && self.ifi_hdrlen == other.ifi_hdrlen - && self.ifi_recvquota == other.ifi_recvquota - && self.ifi_xmitquota == other.ifi_xmitquota - && self.ifi_unused1 == other.ifi_unused1 - && self.ifi_mtu == other.ifi_mtu - && self.ifi_metric == other.ifi_metric - && self.ifi_baudrate == other.ifi_baudrate - && self.ifi_ipackets == other.ifi_ipackets - && self.ifi_ierrors == other.ifi_ierrors - && self.ifi_opackets == other.ifi_opackets - && self.ifi_oerrors == other.ifi_oerrors - && self.ifi_collisions == other.ifi_collisions - && self.ifi_ibytes == other.ifi_ibytes - && self.ifi_obytes == other.ifi_obytes - && self.ifi_imcasts == other.ifi_imcasts - && self.ifi_omcasts == other.ifi_omcasts - && self.ifi_iqdrops == other.ifi_iqdrops - && self.ifi_noproto == other.ifi_noproto - && self.ifi_recvtiming == other.ifi_recvtiming - && self.ifi_xmittiming == other.ifi_xmittiming - && self.ifi_lastchange == other.ifi_lastchange - } - } - impl Eq for if_data64 {} - impl hash::Hash for if_data64 { - fn hash(&self, state: &mut H) { - let ifi_type = self.ifi_type; - let ifi_typelen = self.ifi_typelen; - let ifi_physical = self.ifi_physical; - let ifi_addrlen = self.ifi_addrlen; - let ifi_hdrlen = self.ifi_hdrlen; - let ifi_recvquota = self.ifi_recvquota; - let ifi_xmitquota = self.ifi_xmitquota; - let ifi_unused1 = self.ifi_unused1; - let ifi_mtu = self.ifi_mtu; - let ifi_metric = self.ifi_metric; - let ifi_baudrate = self.ifi_baudrate; - let ifi_ipackets = self.ifi_ipackets; - let ifi_ierrors = self.ifi_ierrors; - let ifi_opackets = self.ifi_opackets; - let ifi_oerrors = self.ifi_oerrors; - let ifi_collisions = self.ifi_collisions; - let ifi_ibytes = self.ifi_ibytes; - let ifi_obytes = self.ifi_obytes; - let ifi_imcasts = self.ifi_imcasts; - let ifi_omcasts = self.ifi_omcasts; - let ifi_iqdrops = self.ifi_iqdrops; - let ifi_noproto = self.ifi_noproto; - let ifi_recvtiming = self.ifi_recvtiming; - let ifi_xmittiming = self.ifi_xmittiming; - let ifi_lastchange = self.ifi_lastchange; - ifi_type.hash(state); - ifi_typelen.hash(state); - ifi_physical.hash(state); - ifi_addrlen.hash(state); - ifi_hdrlen.hash(state); - ifi_recvquota.hash(state); - ifi_xmitquota.hash(state); - ifi_unused1.hash(state); - ifi_mtu.hash(state); - ifi_metric.hash(state); - ifi_baudrate.hash(state); - ifi_ipackets.hash(state); - ifi_ierrors.hash(state); - ifi_opackets.hash(state); - ifi_oerrors.hash(state); - ifi_collisions.hash(state); - ifi_ibytes.hash(state); - ifi_obytes.hash(state); - ifi_imcasts.hash(state); - ifi_omcasts.hash(state); - ifi_iqdrops.hash(state); - ifi_noproto.hash(state); - ifi_recvtiming.hash(state); - ifi_xmittiming.hash(state); - ifi_lastchange.hash(state); - } - } - impl PartialEq for if_msghdr2 { - fn eq(&self, other: &if_msghdr2) -> bool { - self.ifm_msglen == other.ifm_msglen - && self.ifm_version == other.ifm_version - && self.ifm_type == other.ifm_type - && self.ifm_addrs == other.ifm_addrs - && self.ifm_flags == other.ifm_flags - && self.ifm_index == other.ifm_index - && self.ifm_snd_len == other.ifm_snd_len - && self.ifm_snd_maxlen == other.ifm_snd_maxlen - && self.ifm_snd_drops == other.ifm_snd_drops - && self.ifm_timer == other.ifm_timer - && self.ifm_data == other.ifm_data - } - } - impl Eq for if_msghdr2 {} - impl hash::Hash for if_msghdr2 { - fn hash(&self, state: &mut H) { - let ifm_msglen = self.ifm_msglen; - let ifm_version = self.ifm_version; - let ifm_type = self.ifm_type; - let ifm_addrs = self.ifm_addrs; - let ifm_flags = self.ifm_flags; - let ifm_index = self.ifm_index; - let ifm_snd_len = self.ifm_snd_len; - let ifm_snd_maxlen = self.ifm_snd_maxlen; - let ifm_snd_drops = self.ifm_snd_drops; - let ifm_timer = self.ifm_timer; - let ifm_data = self.ifm_data; - ifm_msglen.hash(state); - ifm_version.hash(state); - ifm_type.hash(state); - ifm_addrs.hash(state); - ifm_flags.hash(state); - ifm_index.hash(state); - ifm_snd_len.hash(state); - ifm_snd_maxlen.hash(state); - ifm_snd_drops.hash(state); - ifm_timer.hash(state); - ifm_data.hash(state); - } - } - impl PartialEq for vm_statistics64 { - fn eq(&self, other: &vm_statistics64) -> bool { - // Otherwise rustfmt crashes... - let total_uncompressed = self.total_uncompressed_pages_in_compressor; - self.free_count == other.free_count - && self.active_count == other.active_count - && self.inactive_count == other.inactive_count - && self.wire_count == other.wire_count - && self.zero_fill_count == other.zero_fill_count - && self.reactivations == other.reactivations - && self.pageins == other.pageins - && self.pageouts == other.pageouts - && self.faults == other.faults - && self.cow_faults == other.cow_faults - && self.lookups == other.lookups - && self.hits == other.hits - && self.purges == other.purges - && self.purgeable_count == other.purgeable_count - && self.speculative_count == other.speculative_count - && self.decompressions == other.decompressions - && self.compressions == other.compressions - && self.swapins == other.swapins - && self.swapouts == other.swapouts - && self.compressor_page_count == other.compressor_page_count - && self.throttled_count == other.throttled_count - && self.external_page_count == other.external_page_count - && self.internal_page_count == other.internal_page_count - && total_uncompressed == other.total_uncompressed_pages_in_compressor - } - } - impl Eq for vm_statistics64 {} - impl hash::Hash for vm_statistics64 { - fn hash(&self, state: &mut H) { - let free_count = self.free_count; - let active_count = self.active_count; - let inactive_count = self.inactive_count; - let wire_count = self.wire_count; - let zero_fill_count = self.zero_fill_count; - let reactivations = self.reactivations; - let pageins = self.pageins; - let pageouts = self.pageouts; - let faults = self.faults; - let cow_faults = self.cow_faults; - let lookups = self.lookups; - let hits = self.hits; - let purges = self.purges; - let purgeable_count = self.purgeable_count; - let speculative_count = self.speculative_count; - let decompressions = self.decompressions; - let compressions = self.compressions; - let swapins = self.swapins; - let swapouts = self.swapouts; - let compressor_page_count = self.compressor_page_count; - let throttled_count = self.throttled_count; - let external_page_count = self.external_page_count; - let internal_page_count = self.internal_page_count; - // Otherwise rustfmt crashes... - let total_uncompressed = self.total_uncompressed_pages_in_compressor; - free_count.hash(state); - active_count.hash(state); - inactive_count.hash(state); - wire_count.hash(state); - zero_fill_count.hash(state); - reactivations.hash(state); - pageins.hash(state); - pageouts.hash(state); - faults.hash(state); - cow_faults.hash(state); - lookups.hash(state); - hits.hash(state); - purges.hash(state); - purgeable_count.hash(state); - speculative_count.hash(state); - decompressions.hash(state); - compressions.hash(state); - swapins.hash(state); - swapouts.hash(state); - compressor_page_count.hash(state); - throttled_count.hash(state); - external_page_count.hash(state); - internal_page_count.hash(state); - total_uncompressed.hash(state); - } - } - - impl PartialEq for mach_task_basic_info { - fn eq(&self, other: &mach_task_basic_info) -> bool { - self.virtual_size == other.virtual_size - && self.resident_size == other.resident_size - && self.resident_size_max == other.resident_size_max - && self.user_time == other.user_time - && self.system_time == other.system_time - && self.policy == other.policy - && self.suspend_count == other.suspend_count - } - } - impl Eq for mach_task_basic_info {} - impl hash::Hash for mach_task_basic_info { - fn hash(&self, state: &mut H) { - let virtual_size = self.virtual_size; - let resident_size = self.resident_size; - let resident_size_max = self.resident_size_max; - let user_time = self.user_time; - let system_time = self.system_time; - let policy = self.policy; - let suspend_count = self.suspend_count; - virtual_size.hash(state); - resident_size.hash(state); - resident_size_max.hash(state); - user_time.hash(state); - system_time.hash(state); - policy.hash(state); - suspend_count.hash(state); - } - } - - impl PartialEq for log2phys { - fn eq(&self, other: &log2phys) -> bool { - self.l2p_flags == other.l2p_flags - && self.l2p_contigbytes == other.l2p_contigbytes - && self.l2p_devoffset == other.l2p_devoffset - } - } - impl Eq for log2phys {} - impl hash::Hash for log2phys { - fn hash(&self, state: &mut H) { - let l2p_flags = self.l2p_flags; - let l2p_contigbytes = self.l2p_contigbytes; - let l2p_devoffset = self.l2p_devoffset; - l2p_flags.hash(state); - l2p_contigbytes.hash(state); - l2p_devoffset.hash(state); - } - } - impl PartialEq for os_unfair_lock { - fn eq(&self, other: &os_unfair_lock) -> bool { - self._os_unfair_lock_opaque == other._os_unfair_lock_opaque - } - } - - impl Eq for os_unfair_lock {} - - impl hash::Hash for os_unfair_lock { - fn hash(&self, state: &mut H) { - self._os_unfair_lock_opaque.hash(state); - } - } - - impl PartialEq for sockaddr_vm { - fn eq(&self, other: &sockaddr_vm) -> bool { - self.svm_len == other.svm_len - && self.svm_family == other.svm_family - && self.svm_reserved1 == other.svm_reserved1 - && self.svm_port == other.svm_port - && self.svm_cid == other.svm_cid - } - } - - impl Eq for sockaddr_vm {} - - impl hash::Hash for sockaddr_vm { - fn hash(&self, state: &mut H) { - let svm_len = self.svm_len; - let svm_family = self.svm_family; - let svm_reserved1 = self.svm_reserved1; - let svm_port = self.svm_port; - let svm_cid = self.svm_cid; - - svm_len.hash(state); - svm_family.hash(state); - svm_reserved1.hash(state); - svm_port.hash(state); - svm_cid.hash(state); - } - } - - impl PartialEq for ifdevmtu { - fn eq(&self, other: &ifdevmtu) -> bool { - self.ifdm_current == other.ifdm_current - && self.ifdm_min == other.ifdm_min - && self.ifdm_max == other.ifdm_max - } - } - - impl Eq for ifdevmtu {} - - impl hash::Hash for ifdevmtu { - fn hash(&self, state: &mut H) { - self.ifdm_current.hash(state); - self.ifdm_min.hash(state); - self.ifdm_max.hash(state); - } - } - - impl PartialEq for __c_anonymous_ifk_data { - fn eq(&self, other: &__c_anonymous_ifk_data) -> bool { - unsafe { self.ifk_ptr == other.ifk_ptr && self.ifk_value == other.ifk_value } - } - } - - impl Eq for __c_anonymous_ifk_data {} - impl hash::Hash for __c_anonymous_ifk_data { - fn hash(&self, state: &mut H) { - unsafe { - self.ifk_ptr.hash(state); - self.ifk_value.hash(state); - } - } - } - - impl PartialEq for ifkpi { - fn eq(&self, other: &ifkpi) -> bool { - self.ifk_module_id == other.ifk_module_id && self.ifk_type == other.ifk_type - } - } - - impl Eq for ifkpi {} - - impl hash::Hash for ifkpi { - fn hash(&self, state: &mut H) { - self.ifk_module_id.hash(state); - self.ifk_type.hash(state); - } - } - - impl PartialEq for __c_anonymous_ifr_ifru { - fn eq(&self, other: &__c_anonymous_ifr_ifru) -> bool { - unsafe { - self.ifru_addr == other.ifru_addr - && self.ifru_dstaddr == other.ifru_dstaddr - && self.ifru_broadaddr == other.ifru_broadaddr - && self.ifru_flags == other.ifru_flags - && self.ifru_metrics == other.ifru_metrics - && self.ifru_mtu == other.ifru_mtu - && self.ifru_phys == other.ifru_phys - && self.ifru_media == other.ifru_media - && self.ifru_intval == other.ifru_intval - && self.ifru_data == other.ifru_data - && self.ifru_devmtu == other.ifru_devmtu - && self.ifru_kpi == other.ifru_kpi - && self.ifru_wake_flags == other.ifru_wake_flags - && self.ifru_route_refcnt == other.ifru_route_refcnt - && self - .ifru_cap - .iter() - .zip(other.ifru_cap.iter()) - .all(|(a, b)| a == b) - && self.ifru_functional_type == other.ifru_functional_type - } - } - } - - impl Eq for __c_anonymous_ifr_ifru {} - - impl hash::Hash for __c_anonymous_ifr_ifru { - fn hash(&self, state: &mut H) { - unsafe { - self.ifru_addr.hash(state); - self.ifru_dstaddr.hash(state); - self.ifru_broadaddr.hash(state); - self.ifru_flags.hash(state); - self.ifru_metrics.hash(state); - self.ifru_mtu.hash(state); - self.ifru_phys.hash(state); - self.ifru_media.hash(state); - self.ifru_intval.hash(state); - self.ifru_data.hash(state); - self.ifru_devmtu.hash(state); - self.ifru_kpi.hash(state); - self.ifru_wake_flags.hash(state); - self.ifru_route_refcnt.hash(state); - self.ifru_cap.hash(state); - self.ifru_functional_type.hash(state); - } - } - } - - impl PartialEq for ifreq { - fn eq(&self, other: &ifreq) -> bool { - self.ifr_name == other.ifr_name && self.ifr_ifru == other.ifr_ifru - } - } - - impl Eq for ifreq {} - - impl hash::Hash for ifreq { - fn hash(&self, state: &mut H) { - self.ifr_name.hash(state); - self.ifr_ifru.hash(state); - } - } - - impl Eq for __c_anonymous_ifc_ifcu {} - - impl PartialEq for __c_anonymous_ifc_ifcu { - fn eq(&self, other: &__c_anonymous_ifc_ifcu) -> bool { - unsafe { self.ifcu_buf == other.ifcu_buf && self.ifcu_req == other.ifcu_req } - } - } - - impl hash::Hash for __c_anonymous_ifc_ifcu { - fn hash(&self, state: &mut H) { - unsafe { self.ifcu_buf.hash(state) }; - unsafe { self.ifcu_req.hash(state) }; - } - } - - impl PartialEq for __c_anonymous_ifr_ifru6 { - fn eq(&self, other: &__c_anonymous_ifr_ifru6) -> bool { - unsafe { - self.ifru_addr == other.ifru_addr - && self.ifru_dstaddr == other.ifru_dstaddr - && self.ifru_flags == other.ifru_flags - && self.ifru_flags6 == other.ifru_flags6 - && self.ifru_metrics == other.ifru_metrics - && self.ifru_intval == other.ifru_intval - && self.ifru_data == other.ifru_data - && self - .ifru_scope_id - .iter() - .zip(other.ifru_scope_id.iter()) - .all(|(a, b)| a == b) - } - } - } - - impl Eq for __c_anonymous_ifr_ifru6 {} - - impl hash::Hash for __c_anonymous_ifr_ifru6 { - fn hash(&self, state: &mut H) { - unsafe { - self.ifru_addr.hash(state); - self.ifru_dstaddr.hash(state); - self.ifru_flags.hash(state); - self.ifru_flags6.hash(state); - self.ifru_metrics.hash(state); - self.ifru_intval.hash(state); - self.ifru_data.hash(state); - self.ifru_scope_id.hash(state); - } - } - } - - impl PartialEq for in6_ifreq { - fn eq(&self, other: &in6_ifreq) -> bool { - self.ifr_name == other.ifr_name && self.ifr_ifru == other.ifr_ifru - } - } - - impl Eq for in6_ifreq {} - } -} - -pub const _UTX_USERSIZE: usize = 256; -pub const _UTX_LINESIZE: usize = 32; -pub const _UTX_IDSIZE: usize = 4; -pub const _UTX_HOSTSIZE: usize = 256; - -pub const EMPTY: c_short = 0; -pub const RUN_LVL: c_short = 1; -pub const BOOT_TIME: c_short = 2; -pub const OLD_TIME: c_short = 3; -pub const NEW_TIME: c_short = 4; -pub const INIT_PROCESS: c_short = 5; -pub const LOGIN_PROCESS: c_short = 6; -pub const USER_PROCESS: c_short = 7; -pub const DEAD_PROCESS: c_short = 8; -pub const ACCOUNTING: c_short = 9; -pub const SIGNATURE: c_short = 10; -pub const SHUTDOWN_TIME: c_short = 11; - -pub const LC_COLLATE_MASK: c_int = 1 << 0; -pub const LC_CTYPE_MASK: c_int = 1 << 1; -pub const LC_MESSAGES_MASK: c_int = 1 << 2; -pub const LC_MONETARY_MASK: c_int = 1 << 3; -pub const LC_NUMERIC_MASK: c_int = 1 << 4; -pub const LC_TIME_MASK: c_int = 1 << 5; -pub const LC_ALL_MASK: c_int = LC_COLLATE_MASK - | LC_CTYPE_MASK - | LC_MESSAGES_MASK - | LC_MONETARY_MASK - | LC_NUMERIC_MASK - | LC_TIME_MASK; - -pub const CODESET: crate::nl_item = 0; -pub const D_T_FMT: crate::nl_item = 1; -pub const D_FMT: crate::nl_item = 2; -pub const T_FMT: crate::nl_item = 3; -pub const T_FMT_AMPM: crate::nl_item = 4; -pub const AM_STR: crate::nl_item = 5; -pub const PM_STR: crate::nl_item = 6; - -pub const DAY_1: crate::nl_item = 7; -pub const DAY_2: crate::nl_item = 8; -pub const DAY_3: crate::nl_item = 9; -pub const DAY_4: crate::nl_item = 10; -pub const DAY_5: crate::nl_item = 11; -pub const DAY_6: crate::nl_item = 12; -pub const DAY_7: crate::nl_item = 13; - -pub const ABDAY_1: crate::nl_item = 14; -pub const ABDAY_2: crate::nl_item = 15; -pub const ABDAY_3: crate::nl_item = 16; -pub const ABDAY_4: crate::nl_item = 17; -pub const ABDAY_5: crate::nl_item = 18; -pub const ABDAY_6: crate::nl_item = 19; -pub const ABDAY_7: crate::nl_item = 20; - -pub const MON_1: crate::nl_item = 21; -pub const MON_2: crate::nl_item = 22; -pub const MON_3: crate::nl_item = 23; -pub const MON_4: crate::nl_item = 24; -pub const MON_5: crate::nl_item = 25; -pub const MON_6: crate::nl_item = 26; -pub const MON_7: crate::nl_item = 27; -pub const MON_8: crate::nl_item = 28; -pub const MON_9: crate::nl_item = 29; -pub const MON_10: crate::nl_item = 30; -pub const MON_11: crate::nl_item = 31; -pub const MON_12: crate::nl_item = 32; - -pub const ABMON_1: crate::nl_item = 33; -pub const ABMON_2: crate::nl_item = 34; -pub const ABMON_3: crate::nl_item = 35; -pub const ABMON_4: crate::nl_item = 36; -pub const ABMON_5: crate::nl_item = 37; -pub const ABMON_6: crate::nl_item = 38; -pub const ABMON_7: crate::nl_item = 39; -pub const ABMON_8: crate::nl_item = 40; -pub const ABMON_9: crate::nl_item = 41; -pub const ABMON_10: crate::nl_item = 42; -pub const ABMON_11: crate::nl_item = 43; -pub const ABMON_12: crate::nl_item = 44; - -pub const CLOCK_REALTIME: crate::clockid_t = 0; -pub const CLOCK_MONOTONIC_RAW: crate::clockid_t = 4; -pub const CLOCK_MONOTONIC_RAW_APPROX: crate::clockid_t = 5; -pub const CLOCK_MONOTONIC: crate::clockid_t = 6; -pub const CLOCK_UPTIME_RAW: crate::clockid_t = 8; -pub const CLOCK_UPTIME_RAW_APPROX: crate::clockid_t = 9; -pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 12; -pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 16; - -pub const ERA: crate::nl_item = 45; -pub const ERA_D_FMT: crate::nl_item = 46; -pub const ERA_D_T_FMT: crate::nl_item = 47; -pub const ERA_T_FMT: crate::nl_item = 48; -pub const ALT_DIGITS: crate::nl_item = 49; - -pub const RADIXCHAR: crate::nl_item = 50; -pub const THOUSEP: crate::nl_item = 51; - -pub const YESEXPR: crate::nl_item = 52; -pub const NOEXPR: crate::nl_item = 53; - -pub const YESSTR: crate::nl_item = 54; -pub const NOSTR: crate::nl_item = 55; - -pub const CRNCYSTR: crate::nl_item = 56; - -pub const D_MD_ORDER: crate::nl_item = 57; - -pub const EXIT_FAILURE: c_int = 1; -pub const EXIT_SUCCESS: c_int = 0; -pub const RAND_MAX: c_int = 2147483647; -pub const EOF: c_int = -1; -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; -pub const SEEK_HOLE: c_int = 3; -pub const SEEK_DATA: c_int = 4; -pub const _IOFBF: c_int = 0; -pub const _IONBF: c_int = 2; -pub const _IOLBF: c_int = 1; -pub const BUFSIZ: c_uint = 1024; -pub const FOPEN_MAX: c_uint = 20; -pub const FILENAME_MAX: c_uint = 1024; -pub const L_tmpnam: c_uint = 1024; -pub const TMP_MAX: c_uint = 308915776; -pub const _PC_LINK_MAX: c_int = 1; -pub const _PC_MAX_CANON: c_int = 2; -pub const _PC_MAX_INPUT: c_int = 3; -pub const _PC_NAME_MAX: c_int = 4; -pub const _PC_PATH_MAX: c_int = 5; -pub const _PC_PIPE_BUF: c_int = 6; -pub const _PC_CHOWN_RESTRICTED: c_int = 7; -pub const _PC_NO_TRUNC: c_int = 8; -pub const _PC_VDISABLE: c_int = 9; -pub const _PC_NAME_CHARS_MAX: c_int = 10; -pub const _PC_CASE_SENSITIVE: c_int = 11; -pub const _PC_CASE_PRESERVING: c_int = 12; -pub const _PC_EXTENDED_SECURITY_NP: c_int = 13; -pub const _PC_AUTH_OPAQUE_NP: c_int = 14; -pub const _PC_2_SYMLINKS: c_int = 15; -pub const _PC_ALLOC_SIZE_MIN: c_int = 16; -pub const _PC_ASYNC_IO: c_int = 17; -pub const _PC_FILESIZEBITS: c_int = 18; -pub const _PC_PRIO_IO: c_int = 19; -pub const _PC_REC_INCR_XFER_SIZE: c_int = 20; -pub const _PC_REC_MAX_XFER_SIZE: c_int = 21; -pub const _PC_REC_MIN_XFER_SIZE: c_int = 22; -pub const _PC_REC_XFER_ALIGN: c_int = 23; -pub const _PC_SYMLINK_MAX: c_int = 24; -pub const _PC_SYNC_IO: c_int = 25; -pub const _PC_XATTR_SIZE_BITS: c_int = 26; -pub const _PC_MIN_HOLE_SIZE: c_int = 27; -pub const O_EVTONLY: c_int = 0x00008000; -pub const O_NOCTTY: c_int = 0x00020000; -pub const O_DIRECTORY: c_int = 0x00100000; -pub const O_SYMLINK: c_int = 0x00200000; -pub const O_DSYNC: c_int = 0x00400000; -pub const O_CLOEXEC: c_int = 0x01000000; -pub const O_NOFOLLOW_ANY: c_int = 0x20000000; -pub const O_EXEC: c_int = 0x40000000; -pub const O_SEARCH: c_int = O_EXEC | O_DIRECTORY; -pub const S_IFIFO: mode_t = 0o1_0000; -pub const S_IFCHR: mode_t = 0o2_0000; -pub const S_IFBLK: mode_t = 0o6_0000; -pub const S_IFDIR: mode_t = 0o4_0000; -pub const S_IFREG: mode_t = 0o10_0000; -pub const S_IFLNK: mode_t = 0o12_0000; -pub const S_IFSOCK: mode_t = 0o14_0000; -pub const S_IFMT: mode_t = 0o17_0000; -pub const S_IEXEC: mode_t = 0o0100; -pub const S_IWRITE: mode_t = 0o0200; -pub const S_IREAD: mode_t = 0o0400; -pub const S_IRWXU: mode_t = 0o0700; -pub const S_IXUSR: mode_t = 0o0100; -pub const S_IWUSR: mode_t = 0o0200; -pub const S_IRUSR: mode_t = 0o0400; -pub const S_IRWXG: mode_t = 0o0070; -pub const S_IXGRP: mode_t = 0o0010; -pub const S_IWGRP: mode_t = 0o0020; -pub const S_IRGRP: mode_t = 0o0040; -pub const S_IRWXO: mode_t = 0o0007; -pub const S_IXOTH: mode_t = 0o0001; -pub const S_IWOTH: mode_t = 0o0002; -pub const S_IROTH: mode_t = 0o0004; -pub const F_OK: c_int = 0; -pub const R_OK: c_int = 4; -pub const W_OK: c_int = 2; -pub const X_OK: c_int = 1; -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; -pub const F_LOCK: c_int = 1; -pub const F_TEST: c_int = 3; -pub const F_TLOCK: c_int = 2; -pub const F_ULOCK: c_int = 0; -pub const F_GETLK: c_int = 7; -pub const F_SETLK: c_int = 8; -pub const F_SETLKW: c_int = 9; -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGABRT: c_int = 6; -pub const SIGEMT: c_int = 7; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGSEGV: c_int = 11; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; - -pub const PROT_NONE: c_int = 0; -pub const PROT_READ: c_int = 1; -pub const PROT_WRITE: c_int = 2; -pub const PROT_EXEC: c_int = 4; - -pub const PT_TRACE_ME: c_int = 0; -pub const PT_READ_I: c_int = 1; -pub const PT_READ_D: c_int = 2; -pub const PT_READ_U: c_int = 3; -pub const PT_WRITE_I: c_int = 4; -pub const PT_WRITE_D: c_int = 5; -pub const PT_WRITE_U: c_int = 6; -pub const PT_CONTINUE: c_int = 7; -pub const PT_KILL: c_int = 8; -pub const PT_STEP: c_int = 9; -pub const PT_ATTACH: c_int = 10; -pub const PT_DETACH: c_int = 11; -pub const PT_SIGEXC: c_int = 12; -pub const PT_THUPDATE: c_int = 13; -pub const PT_ATTACHEXC: c_int = 14; - -pub const PT_FORCEQUOTA: c_int = 30; -pub const PT_DENY_ATTACH: c_int = 31; -pub const PT_FIRSTMACH: c_int = 32; - -pub const MAP_FILE: c_int = 0x0000; -pub const MAP_SHARED: c_int = 0x0001; -pub const MAP_PRIVATE: c_int = 0x0002; -pub const MAP_FIXED: c_int = 0x0010; -pub const MAP_ANON: c_int = 0x1000; -pub const MAP_ANONYMOUS: c_int = MAP_ANON; - -pub const CPU_STATE_USER: c_int = 0; -pub const CPU_STATE_SYSTEM: c_int = 1; -pub const CPU_STATE_IDLE: c_int = 2; -pub const CPU_STATE_NICE: c_int = 3; -pub const CPU_STATE_MAX: c_int = 4; - -pub const PROCESSOR_BASIC_INFO: c_int = 1; -pub const PROCESSOR_CPU_LOAD_INFO: c_int = 2; -pub const PROCESSOR_PM_REGS_INFO: c_int = 0x10000001; -pub const PROCESSOR_TEMPERATURE: c_int = 0x10000002; -pub const PROCESSOR_SET_LOAD_INFO: c_int = 4; -pub const PROCESSOR_SET_BASIC_INFO: c_int = 5; - -deprecated_mach! { - pub const VM_FLAGS_FIXED: c_int = 0x0000; - pub const VM_FLAGS_ANYWHERE: c_int = 0x0001; - pub const VM_FLAGS_PURGABLE: c_int = 0x0002; - pub const VM_FLAGS_RANDOM_ADDR: c_int = 0x0008; - pub const VM_FLAGS_NO_CACHE: c_int = 0x0010; - pub const VM_FLAGS_RESILIENT_CODESIGN: c_int = 0x0020; - pub const VM_FLAGS_RESILIENT_MEDIA: c_int = 0x0040; - pub const VM_FLAGS_OVERWRITE: c_int = 0x4000; - pub const VM_FLAGS_SUPERPAGE_MASK: c_int = 0x70000; - pub const VM_FLAGS_RETURN_DATA_ADDR: c_int = 0x100000; - pub const VM_FLAGS_RETURN_4K_DATA_ADDR: c_int = 0x800000; - pub const VM_FLAGS_ALIAS_MASK: c_int = 0xFF000000; - pub const VM_FLAGS_USER_ALLOCATE: c_int = 0xff07401f; - pub const VM_FLAGS_USER_MAP: c_int = 0xff97401f; - pub const VM_FLAGS_USER_REMAP: c_int = VM_FLAGS_FIXED - | VM_FLAGS_ANYWHERE - | VM_FLAGS_RANDOM_ADDR - | VM_FLAGS_OVERWRITE - | VM_FLAGS_RETURN_DATA_ADDR - | VM_FLAGS_RESILIENT_CODESIGN; - - pub const VM_FLAGS_SUPERPAGE_SHIFT: c_int = 16; - pub const SUPERPAGE_NONE: c_int = 0; - pub const SUPERPAGE_SIZE_ANY: c_int = 1; - pub const VM_FLAGS_SUPERPAGE_NONE: c_int = SUPERPAGE_NONE << VM_FLAGS_SUPERPAGE_SHIFT; - pub const VM_FLAGS_SUPERPAGE_SIZE_ANY: c_int = SUPERPAGE_SIZE_ANY << VM_FLAGS_SUPERPAGE_SHIFT; - pub const SUPERPAGE_SIZE_2MB: c_int = 2; - pub const VM_FLAGS_SUPERPAGE_SIZE_2MB: c_int = SUPERPAGE_SIZE_2MB << VM_FLAGS_SUPERPAGE_SHIFT; - - pub const VM_MEMORY_MALLOC: c_int = 1; - pub const VM_MEMORY_MALLOC_SMALL: c_int = 2; - pub const VM_MEMORY_MALLOC_LARGE: c_int = 3; - pub const VM_MEMORY_MALLOC_HUGE: c_int = 4; - pub const VM_MEMORY_SBRK: c_int = 5; - pub const VM_MEMORY_REALLOC: c_int = 6; - pub const VM_MEMORY_MALLOC_TINY: c_int = 7; - pub const VM_MEMORY_MALLOC_LARGE_REUSABLE: c_int = 8; - pub const VM_MEMORY_MALLOC_LARGE_REUSED: c_int = 9; - pub const VM_MEMORY_ANALYSIS_TOOL: c_int = 10; - pub const VM_MEMORY_MALLOC_NANO: c_int = 11; - pub const VM_MEMORY_MACH_MSG: c_int = 20; - pub const VM_MEMORY_IOKIT: c_int = 21; - pub const VM_MEMORY_STACK: c_int = 30; - pub const VM_MEMORY_GUARD: c_int = 31; - pub const VM_MEMORY_SHARED_PMAP: c_int = 32; - pub const VM_MEMORY_DYLIB: c_int = 33; - pub const VM_MEMORY_OBJC_DISPATCHERS: c_int = 34; - pub const VM_MEMORY_UNSHARED_PMAP: c_int = 35; - pub const VM_MEMORY_APPKIT: c_int = 40; - pub const VM_MEMORY_FOUNDATION: c_int = 41; - pub const VM_MEMORY_COREGRAPHICS: c_int = 42; - pub const VM_MEMORY_CORESERVICES: c_int = 43; - pub const VM_MEMORY_CARBON: c_int = VM_MEMORY_CORESERVICES; - pub const VM_MEMORY_JAVA: c_int = 44; - pub const VM_MEMORY_COREDATA: c_int = 45; - pub const VM_MEMORY_COREDATA_OBJECTIDS: c_int = 46; - pub const VM_MEMORY_ATS: c_int = 50; - pub const VM_MEMORY_LAYERKIT: c_int = 51; - pub const VM_MEMORY_CGIMAGE: c_int = 52; - pub const VM_MEMORY_TCMALLOC: c_int = 53; - pub const VM_MEMORY_COREGRAPHICS_DATA: c_int = 54; - pub const VM_MEMORY_COREGRAPHICS_SHARED: c_int = 55; - pub const VM_MEMORY_COREGRAPHICS_FRAMEBUFFERS: c_int = 56; - pub const VM_MEMORY_COREGRAPHICS_BACKINGSTORES: c_int = 57; - pub const VM_MEMORY_COREGRAPHICS_XALLOC: c_int = 58; - pub const VM_MEMORY_COREGRAPHICS_MISC: c_int = VM_MEMORY_COREGRAPHICS; - pub const VM_MEMORY_DYLD: c_int = 60; - pub const VM_MEMORY_DYLD_MALLOC: c_int = 61; - pub const VM_MEMORY_SQLITE: c_int = 62; - pub const VM_MEMORY_JAVASCRIPT_CORE: c_int = 63; - pub const VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR: c_int = 64; - pub const VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE: c_int = 65; - pub const VM_MEMORY_GLSL: c_int = 66; - pub const VM_MEMORY_OPENCL: c_int = 67; - pub const VM_MEMORY_COREIMAGE: c_int = 68; - pub const VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS: c_int = 69; - pub const VM_MEMORY_IMAGEIO: c_int = 70; - pub const VM_MEMORY_COREPROFILE: c_int = 71; - pub const VM_MEMORY_ASSETSD: c_int = 72; - pub const VM_MEMORY_OS_ALLOC_ONCE: c_int = 73; - pub const VM_MEMORY_LIBDISPATCH: c_int = 74; - pub const VM_MEMORY_ACCELERATE: c_int = 75; - pub const VM_MEMORY_COREUI: c_int = 76; - pub const VM_MEMORY_COREUIFILE: c_int = 77; - pub const VM_MEMORY_GENEALOGY: c_int = 78; - pub const VM_MEMORY_RAWCAMERA: c_int = 79; - pub const VM_MEMORY_CORPSEINFO: c_int = 80; - pub const VM_MEMORY_ASL: c_int = 81; - pub const VM_MEMORY_SWIFT_RUNTIME: c_int = 82; - pub const VM_MEMORY_SWIFT_METADATA: c_int = 83; - pub const VM_MEMORY_DHMM: c_int = 84; - pub const VM_MEMORY_SCENEKIT: c_int = 86; - pub const VM_MEMORY_SKYWALK: c_int = 87; - pub const VM_MEMORY_APPLICATION_SPECIFIC_1: c_int = 240; - pub const VM_MEMORY_APPLICATION_SPECIFIC_16: c_int = 255; -} - -pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; - -pub const MS_ASYNC: c_int = 0x0001; -pub const MS_INVALIDATE: c_int = 0x0002; -pub const MS_SYNC: c_int = 0x0010; - -pub const MS_KILLPAGES: c_int = 0x0004; -pub const MS_DEACTIVATE: c_int = 0x0008; - -pub const EPERM: c_int = 1; -pub const ENOENT: c_int = 2; -pub const ESRCH: c_int = 3; -pub const EINTR: c_int = 4; -pub const EIO: c_int = 5; -pub const ENXIO: c_int = 6; -pub const E2BIG: c_int = 7; -pub const ENOEXEC: c_int = 8; -pub const EBADF: c_int = 9; -pub const ECHILD: c_int = 10; -pub const EDEADLK: c_int = 11; -pub const ENOMEM: c_int = 12; -pub const EACCES: c_int = 13; -pub const EFAULT: c_int = 14; -pub const ENOTBLK: c_int = 15; -pub const EBUSY: c_int = 16; -pub const EEXIST: c_int = 17; -pub const EXDEV: c_int = 18; -pub const ENODEV: c_int = 19; -pub const ENOTDIR: c_int = 20; -pub const EISDIR: c_int = 21; -pub const EINVAL: c_int = 22; -pub const ENFILE: c_int = 23; -pub const EMFILE: c_int = 24; -pub const ENOTTY: c_int = 25; -pub const ETXTBSY: c_int = 26; -pub const EFBIG: c_int = 27; -pub const ENOSPC: c_int = 28; -pub const ESPIPE: c_int = 29; -pub const EROFS: c_int = 30; -pub const EMLINK: c_int = 31; -pub const EPIPE: c_int = 32; -pub const EDOM: c_int = 33; -pub const ERANGE: c_int = 34; -pub const EAGAIN: c_int = 35; -pub const EWOULDBLOCK: c_int = EAGAIN; -pub const EINPROGRESS: c_int = 36; -pub const EALREADY: c_int = 37; -pub const ENOTSOCK: c_int = 38; -pub const EDESTADDRREQ: c_int = 39; -pub const EMSGSIZE: c_int = 40; -pub const EPROTOTYPE: c_int = 41; -pub const ENOPROTOOPT: c_int = 42; -pub const EPROTONOSUPPORT: c_int = 43; -pub const ESOCKTNOSUPPORT: c_int = 44; -pub const ENOTSUP: c_int = 45; -pub const EPFNOSUPPORT: c_int = 46; -pub const EAFNOSUPPORT: c_int = 47; -pub const EADDRINUSE: c_int = 48; -pub const EADDRNOTAVAIL: c_int = 49; -pub const ENETDOWN: c_int = 50; -pub const ENETUNREACH: c_int = 51; -pub const ENETRESET: c_int = 52; -pub const ECONNABORTED: c_int = 53; -pub const ECONNRESET: c_int = 54; -pub const ENOBUFS: c_int = 55; -pub const EISCONN: c_int = 56; -pub const ENOTCONN: c_int = 57; -pub const ESHUTDOWN: c_int = 58; -pub const ETOOMANYREFS: c_int = 59; -pub const ETIMEDOUT: c_int = 60; -pub const ECONNREFUSED: c_int = 61; -pub const ELOOP: c_int = 62; -pub const ENAMETOOLONG: c_int = 63; -pub const EHOSTDOWN: c_int = 64; -pub const EHOSTUNREACH: c_int = 65; -pub const ENOTEMPTY: c_int = 66; -pub const EPROCLIM: c_int = 67; -pub const EUSERS: c_int = 68; -pub const EDQUOT: c_int = 69; -pub const ESTALE: c_int = 70; -pub const EREMOTE: c_int = 71; -pub const EBADRPC: c_int = 72; -pub const ERPCMISMATCH: c_int = 73; -pub const EPROGUNAVAIL: c_int = 74; -pub const EPROGMISMATCH: c_int = 75; -pub const EPROCUNAVAIL: c_int = 76; -pub const ENOLCK: c_int = 77; -pub const ENOSYS: c_int = 78; -pub const EFTYPE: c_int = 79; -pub const EAUTH: c_int = 80; -pub const ENEEDAUTH: c_int = 81; -pub const EPWROFF: c_int = 82; -pub const EDEVERR: c_int = 83; -pub const EOVERFLOW: c_int = 84; -pub const EBADEXEC: c_int = 85; -pub const EBADARCH: c_int = 86; -pub const ESHLIBVERS: c_int = 87; -pub const EBADMACHO: c_int = 88; -pub const ECANCELED: c_int = 89; -pub const EIDRM: c_int = 90; -pub const ENOMSG: c_int = 91; -pub const EILSEQ: c_int = 92; -pub const ENOATTR: c_int = 93; -pub const EBADMSG: c_int = 94; -pub const EMULTIHOP: c_int = 95; -pub const ENODATA: c_int = 96; -pub const ENOLINK: c_int = 97; -pub const ENOSR: c_int = 98; -pub const ENOSTR: c_int = 99; -pub const EPROTO: c_int = 100; -pub const ETIME: c_int = 101; -pub const EOPNOTSUPP: c_int = 102; -pub const ENOPOLICY: c_int = 103; -pub const ENOTRECOVERABLE: c_int = 104; -pub const EOWNERDEAD: c_int = 105; -pub const EQFULL: c_int = 106; -pub const ELAST: c_int = 106; - -pub const EAI_AGAIN: c_int = 2; -pub const EAI_BADFLAGS: c_int = 3; -pub const EAI_FAIL: c_int = 4; -pub const EAI_FAMILY: c_int = 5; -pub const EAI_MEMORY: c_int = 6; -pub const EAI_NODATA: c_int = 7; -pub const EAI_NONAME: c_int = 8; -pub const EAI_SERVICE: c_int = 9; -pub const EAI_SOCKTYPE: c_int = 10; -pub const EAI_SYSTEM: c_int = 11; -pub const EAI_OVERFLOW: c_int = 14; - -pub const F_DUPFD: c_int = 0; -pub const F_DUPFD_CLOEXEC: c_int = 67; -pub const F_GETFD: c_int = 1; -pub const F_SETFD: c_int = 2; -pub const F_GETFL: c_int = 3; -pub const F_SETFL: c_int = 4; -pub const F_PREALLOCATE: c_int = 42; -pub const F_RDADVISE: c_int = 44; -pub const F_RDAHEAD: c_int = 45; -pub const F_NOCACHE: c_int = 48; -pub const F_LOG2PHYS: c_int = 49; -pub const F_GETPATH: c_int = 50; -pub const F_FULLFSYNC: c_int = 51; -pub const F_FREEZE_FS: c_int = 53; -pub const F_THAW_FS: c_int = 54; -pub const F_GLOBAL_NOCACHE: c_int = 55; -pub const F_NODIRECT: c_int = 62; -pub const F_LOG2PHYS_EXT: c_int = 65; -pub const F_BARRIERFSYNC: c_int = 85; -// See https://github.com/apple/darwin-xnu/blob/main/bsd/sys/fcntl.h -pub const F_OFD_SETLK: c_int = 90; /* Acquire or release open file description lock */ -pub const F_OFD_SETLKW: c_int = 91; /* (as F_OFD_SETLK but blocking if conflicting lock) */ -pub const F_OFD_GETLK: c_int = 92; /* Examine OFD lock */ -pub const F_PUNCHHOLE: c_int = 99; -pub const F_TRIM_ACTIVE_FILE: c_int = 100; -pub const F_SPECULATIVE_READ: c_int = 101; -pub const F_GETPATH_NOFIRMLINK: c_int = 102; -pub const F_TRANSFEREXTENTS: c_int = 110; - -pub const F_ALLOCATECONTIG: c_uint = 0x02; -pub const F_ALLOCATEALL: c_uint = 0x04; -pub const F_ALLOCATEPERSIST: c_uint = 0x08; - -pub const F_PEOFPOSMODE: c_int = 3; -pub const F_VOLPOSMODE: c_int = 4; - -pub const AT_FDCWD: c_int = -2; -pub const AT_EACCESS: c_int = 0x0010; -pub const AT_SYMLINK_NOFOLLOW: c_int = 0x0020; -pub const AT_SYMLINK_FOLLOW: c_int = 0x0040; -pub const AT_REMOVEDIR: c_int = 0x0080; - -pub const PTHREAD_INTROSPECTION_THREAD_CREATE: c_uint = 1; -pub const PTHREAD_INTROSPECTION_THREAD_START: c_uint = 2; -pub const PTHREAD_INTROSPECTION_THREAD_TERMINATE: c_uint = 3; -pub const PTHREAD_INTROSPECTION_THREAD_DESTROY: c_uint = 4; - -pub const TIOCMODG: c_ulong = 0x40047403; -pub const TIOCMODS: c_ulong = 0x80047404; -pub const TIOCM_LE: c_int = 0x1; -pub const TIOCM_DTR: c_int = 0x2; -pub const TIOCM_RTS: c_int = 0x4; -pub const TIOCM_ST: c_int = 0x8; -pub const TIOCM_SR: c_int = 0x10; -pub const TIOCM_CTS: c_int = 0x20; -pub const TIOCM_CAR: c_int = 0x40; -pub const TIOCM_CD: c_int = 0x40; -pub const TIOCM_RNG: c_int = 0x80; -pub const TIOCM_RI: c_int = 0x80; -pub const TIOCM_DSR: c_int = 0x100; -pub const TIOCEXCL: c_int = 0x2000740d; -pub const TIOCNXCL: c_int = 0x2000740e; -pub const TIOCFLUSH: c_ulong = 0x80047410; -pub const TIOCGETD: c_ulong = 0x4004741a; -pub const TIOCSETD: c_ulong = 0x8004741b; -pub const TIOCIXON: c_uint = 0x20007481; -pub const TIOCIXOFF: c_uint = 0x20007480; -pub const TIOCSDTR: c_uint = 0x20007479; -pub const TIOCCDTR: c_uint = 0x20007478; -pub const TIOCGPGRP: c_ulong = 0x40047477; -pub const TIOCSPGRP: c_ulong = 0x80047476; -pub const TIOCOUTQ: c_ulong = 0x40047473; -pub const TIOCSTI: c_ulong = 0x80017472; -pub const TIOCNOTTY: c_uint = 0x20007471; -pub const TIOCPKT: c_ulong = 0x80047470; -pub const TIOCPKT_DATA: c_int = 0x0; -pub const TIOCPKT_FLUSHREAD: c_int = 0x1; -pub const TIOCPKT_FLUSHWRITE: c_int = 0x2; -pub const TIOCPKT_STOP: c_int = 0x4; -pub const TIOCPKT_START: c_int = 0x8; -pub const TIOCPKT_NOSTOP: c_int = 0x10; -pub const TIOCPKT_DOSTOP: c_int = 0x20; -pub const TIOCPKT_IOCTL: c_int = 0x40; -pub const TIOCSTOP: c_uint = 0x2000746f; -pub const TIOCSTART: c_uint = 0x2000746e; -pub const TIOCMSET: c_ulong = 0x8004746d; -pub const TIOCMBIS: c_ulong = 0x8004746c; -pub const TIOCMBIC: c_ulong = 0x8004746b; -pub const TIOCMGET: c_ulong = 0x4004746a; -pub const TIOCREMOTE: c_ulong = 0x80047469; -pub const TIOCGWINSZ: c_ulong = 0x40087468; -pub const TIOCSWINSZ: c_ulong = 0x80087467; -pub const TIOCUCNTL: c_ulong = 0x80047466; -pub const TIOCSTAT: c_uint = 0x20007465; -pub const TIOCSCONS: c_uint = 0x20007463; -pub const TIOCCONS: c_ulong = 0x80047462; -pub const TIOCSCTTY: c_uint = 0x20007461; -pub const TIOCEXT: c_ulong = 0x80047460; -pub const TIOCSIG: c_uint = 0x2000745f; -pub const TIOCDRAIN: c_uint = 0x2000745e; -pub const TIOCMSDTRWAIT: c_ulong = 0x8004745b; -pub const TIOCMGDTRWAIT: c_ulong = 0x4004745a; -pub const TIOCSDRAINWAIT: c_ulong = 0x80047457; -pub const TIOCGDRAINWAIT: c_ulong = 0x40047456; -pub const TIOCDSIMICROCODE: c_uint = 0x20007455; -pub const TIOCPTYGRANT: c_uint = 0x20007454; -pub const TIOCPTYGNAME: c_uint = 0x40807453; -pub const TIOCPTYUNLK: c_uint = 0x20007452; -pub const TIOCGETA: c_ulong = 0x40487413; -pub const TIOCSETA: c_ulong = 0x80487414; -pub const TIOCSETAW: c_ulong = 0x80487415; -pub const TIOCSETAF: c_ulong = 0x80487416; - -pub const BIOCGRSIG: c_ulong = 0x40044272; -pub const BIOCSRSIG: c_ulong = 0x80044273; -pub const BIOCSDLT: c_ulong = 0x80044278; -pub const BIOCGSEESENT: c_ulong = 0x40044276; -pub const BIOCSSEESENT: c_ulong = 0x80044277; -pub const BIOCGDLTLIST: c_ulong = 0xc00c4279; - -pub const FIODTYPE: c_ulong = 0x4004667a; - -pub const B0: speed_t = 0; -pub const B50: speed_t = 50; -pub const B75: speed_t = 75; -pub const B110: speed_t = 110; -pub const B134: speed_t = 134; -pub const B150: speed_t = 150; -pub const B200: speed_t = 200; -pub const B300: speed_t = 300; -pub const B600: speed_t = 600; -pub const B1200: speed_t = 1200; -pub const B1800: speed_t = 1800; -pub const B2400: speed_t = 2400; -pub const B4800: speed_t = 4800; -pub const B9600: speed_t = 9600; -pub const B19200: speed_t = 19200; -pub const B38400: speed_t = 38400; -pub const B7200: speed_t = 7200; -pub const B14400: speed_t = 14400; -pub const B28800: speed_t = 28800; -pub const B57600: speed_t = 57600; -pub const B76800: speed_t = 76800; -pub const B115200: speed_t = 115200; -pub const B230400: speed_t = 230400; -pub const EXTA: speed_t = 19200; -pub const EXTB: speed_t = 38400; - -pub const SIGTRAP: c_int = 5; - -pub const GLOB_APPEND: c_int = 0x0001; -pub const GLOB_DOOFFS: c_int = 0x0002; -pub const GLOB_ERR: c_int = 0x0004; -pub const GLOB_MARK: c_int = 0x0008; -pub const GLOB_NOCHECK: c_int = 0x0010; -pub const GLOB_NOSORT: c_int = 0x0020; -pub const GLOB_NOESCAPE: c_int = 0x2000; - -pub const GLOB_NOSPACE: c_int = -1; -pub const GLOB_ABORTED: c_int = -2; -pub const GLOB_NOMATCH: c_int = -3; - -pub const POSIX_MADV_NORMAL: c_int = 0; -pub const POSIX_MADV_RANDOM: c_int = 1; -pub const POSIX_MADV_SEQUENTIAL: c_int = 2; -pub const POSIX_MADV_WILLNEED: c_int = 3; -pub const POSIX_MADV_DONTNEED: c_int = 4; - -pub const _SC_IOV_MAX: c_int = 56; -pub const _SC_GETGR_R_SIZE_MAX: c_int = 70; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 71; -pub const _SC_LOGIN_NAME_MAX: c_int = 73; -pub const _SC_MQ_PRIO_MAX: c_int = 75; -pub const _SC_THREAD_ATTR_STACKADDR: c_int = 82; -pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 83; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 85; -pub const _SC_THREAD_KEYS_MAX: c_int = 86; -pub const _SC_THREAD_PRIO_INHERIT: c_int = 87; -pub const _SC_THREAD_PRIO_PROTECT: c_int = 88; -pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 89; -pub const _SC_THREAD_PROCESS_SHARED: c_int = 90; -pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 91; -pub const _SC_THREAD_STACK_MIN: c_int = 93; -pub const _SC_THREAD_THREADS_MAX: c_int = 94; -pub const _SC_THREADS: c_int = 96; -pub const _SC_TTY_NAME_MAX: c_int = 101; -pub const _SC_ATEXIT_MAX: c_int = 107; -pub const _SC_XOPEN_CRYPT: c_int = 108; -pub const _SC_XOPEN_ENH_I18N: c_int = 109; -pub const _SC_XOPEN_LEGACY: c_int = 110; -pub const _SC_XOPEN_REALTIME: c_int = 111; -pub const _SC_XOPEN_REALTIME_THREADS: c_int = 112; -pub const _SC_XOPEN_SHM: c_int = 113; -pub const _SC_XOPEN_UNIX: c_int = 115; -pub const _SC_XOPEN_VERSION: c_int = 116; -pub const _SC_XOPEN_XCU_VERSION: c_int = 121; -pub const _SC_PHYS_PAGES: c_int = 200; - -pub const PTHREAD_PROCESS_PRIVATE: c_int = 2; -pub const PTHREAD_PROCESS_SHARED: c_int = 1; -pub const PTHREAD_CREATE_JOINABLE: c_int = 1; -pub const PTHREAD_CREATE_DETACHED: c_int = 2; -pub const PTHREAD_INHERIT_SCHED: c_int = 1; -pub const PTHREAD_EXPLICIT_SCHED: c_int = 2; -pub const PTHREAD_CANCEL_ENABLE: c_int = 0x01; -pub const PTHREAD_CANCEL_DISABLE: c_int = 0x00; -pub const PTHREAD_CANCEL_DEFERRED: c_int = 0x02; -pub const PTHREAD_CANCEL_ASYNCHRONOUS: c_int = 0x00; -pub const PTHREAD_CANCELED: *mut c_void = 1 as *mut c_void; -pub const PTHREAD_SCOPE_SYSTEM: c_int = 1; -pub const PTHREAD_SCOPE_PROCESS: c_int = 2; -pub const PTHREAD_PRIO_NONE: c_int = 0; -pub const PTHREAD_PRIO_INHERIT: c_int = 1; -pub const PTHREAD_PRIO_PROTECT: c_int = 2; - -#[cfg(target_arch = "aarch64")] -pub const PTHREAD_STACK_MIN: size_t = 16384; -#[cfg(not(target_arch = "aarch64"))] -pub const PTHREAD_STACK_MIN: size_t = 8192; - -pub const RLIMIT_CPU: c_int = 0; -pub const RLIMIT_FSIZE: c_int = 1; -pub const RLIMIT_DATA: c_int = 2; -pub const RLIMIT_STACK: c_int = 3; -pub const RLIMIT_CORE: c_int = 4; -pub const RLIMIT_AS: c_int = 5; -pub const RLIMIT_RSS: c_int = RLIMIT_AS; -pub const RLIMIT_MEMLOCK: c_int = 6; -pub const RLIMIT_NPROC: c_int = 7; -pub const RLIMIT_NOFILE: c_int = 8; -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIM_NLIMITS: c_int = 9; -pub const _RLIMIT_POSIX_FLAG: c_int = 0x1000; - -pub const RLIM_INFINITY: rlim_t = 0x7fff_ffff_ffff_ffff; - -pub const RUSAGE_SELF: c_int = 0; -pub const RUSAGE_CHILDREN: c_int = -1; - -pub const MADV_NORMAL: c_int = 0; -pub const MADV_RANDOM: c_int = 1; -pub const MADV_SEQUENTIAL: c_int = 2; -pub const MADV_WILLNEED: c_int = 3; -pub const MADV_DONTNEED: c_int = 4; -pub const MADV_FREE: c_int = 5; -pub const MADV_ZERO_WIRED_PAGES: c_int = 6; -pub const MADV_FREE_REUSABLE: c_int = 7; -pub const MADV_FREE_REUSE: c_int = 8; -pub const MADV_CAN_REUSE: c_int = 9; - -pub const MINCORE_INCORE: c_int = 0x1; -pub const MINCORE_REFERENCED: c_int = 0x2; -pub const MINCORE_MODIFIED: c_int = 0x4; -pub const MINCORE_REFERENCED_OTHER: c_int = 0x8; -pub const MINCORE_MODIFIED_OTHER: c_int = 0x10; - -pub const CTLIOCGINFO: c_ulong = 0xc0644e03; - -// -// sys/netinet/in.h -// Protocols (RFC 1700) -// NOTE: These are in addition to the constants defined in src/unix/mod.rs - -// IPPROTO_IP defined in src/unix/mod.rs -/// IP6 hop-by-hop options -pub const IPPROTO_HOPOPTS: c_int = 0; -// IPPROTO_ICMP defined in src/unix/mod.rs -/// group mgmt protocol -pub const IPPROTO_IGMP: c_int = 2; -/// gateway2 (deprecated) -pub const IPPROTO_GGP: c_int = 3; -/// for compatibility -pub const IPPROTO_IPIP: c_int = 4; -// IPPROTO_TCP defined in src/unix/mod.rs -/// Stream protocol II. -pub const IPPROTO_ST: c_int = 7; -/// exterior gateway protocol -pub const IPPROTO_EGP: c_int = 8; -/// private interior gateway -pub const IPPROTO_PIGP: c_int = 9; -/// BBN RCC Monitoring -pub const IPPROTO_RCCMON: c_int = 10; -/// network voice protocol -pub const IPPROTO_NVPII: c_int = 11; -/// pup -pub const IPPROTO_PUP: c_int = 12; -/// Argus -pub const IPPROTO_ARGUS: c_int = 13; -/// EMCON -pub const IPPROTO_EMCON: c_int = 14; -/// Cross Net Debugger -pub const IPPROTO_XNET: c_int = 15; -/// Chaos -pub const IPPROTO_CHAOS: c_int = 16; -// IPPROTO_UDP defined in src/unix/mod.rs -/// Multiplexing -pub const IPPROTO_MUX: c_int = 18; -/// DCN Measurement Subsystems -pub const IPPROTO_MEAS: c_int = 19; -/// Host Monitoring -pub const IPPROTO_HMP: c_int = 20; -/// Packet Radio Measurement -pub const IPPROTO_PRM: c_int = 21; -/// xns idp -pub const IPPROTO_IDP: c_int = 22; -/// Trunk-1 -pub const IPPROTO_TRUNK1: c_int = 23; -/// Trunk-2 -pub const IPPROTO_TRUNK2: c_int = 24; -/// Leaf-1 -pub const IPPROTO_LEAF1: c_int = 25; -/// Leaf-2 -pub const IPPROTO_LEAF2: c_int = 26; -/// Reliable Data -pub const IPPROTO_RDP: c_int = 27; -/// Reliable Transaction -pub const IPPROTO_IRTP: c_int = 28; -/// tp-4 w/ class negotiation -pub const IPPROTO_TP: c_int = 29; -/// Bulk Data Transfer -pub const IPPROTO_BLT: c_int = 30; -/// Network Services -pub const IPPROTO_NSP: c_int = 31; -/// Merit Internodal -pub const IPPROTO_INP: c_int = 32; -/// Sequential Exchange -pub const IPPROTO_SEP: c_int = 33; -/// Third Party Connect -pub const IPPROTO_3PC: c_int = 34; -/// InterDomain Policy Routing -pub const IPPROTO_IDPR: c_int = 35; -/// XTP -pub const IPPROTO_XTP: c_int = 36; -/// Datagram Delivery -pub const IPPROTO_DDP: c_int = 37; -/// Control Message Transport -pub const IPPROTO_CMTP: c_int = 38; -/// TP++ Transport -pub const IPPROTO_TPXX: c_int = 39; -/// IL transport protocol -pub const IPPROTO_IL: c_int = 40; -// IPPROTO_IPV6 defined in src/unix/mod.rs -/// Source Demand Routing -pub const IPPROTO_SDRP: c_int = 42; -/// IP6 routing header -pub const IPPROTO_ROUTING: c_int = 43; -/// IP6 fragmentation header -pub const IPPROTO_FRAGMENT: c_int = 44; -/// InterDomain Routing -pub const IPPROTO_IDRP: c_int = 45; -/// resource reservation -pub const IPPROTO_RSVP: c_int = 46; -/// General Routing Encap. -pub const IPPROTO_GRE: c_int = 47; -/// Mobile Host Routing -pub const IPPROTO_MHRP: c_int = 48; -/// BHA -pub const IPPROTO_BHA: c_int = 49; -/// IP6 Encap Sec. Payload -pub const IPPROTO_ESP: c_int = 50; -/// IP6 Auth Header -pub const IPPROTO_AH: c_int = 51; -/// Integ. Net Layer Security -pub const IPPROTO_INLSP: c_int = 52; -/// IP with encryption -pub const IPPROTO_SWIPE: c_int = 53; -/// Next Hop Resolution -pub const IPPROTO_NHRP: c_int = 54; -/* 55-57: Unassigned */ -// IPPROTO_ICMPV6 defined in src/unix/mod.rs -/// IP6 no next header -pub const IPPROTO_NONE: c_int = 59; -/// IP6 destination option -pub const IPPROTO_DSTOPTS: c_int = 60; -/// any host internal protocol -pub const IPPROTO_AHIP: c_int = 61; -/// CFTP -pub const IPPROTO_CFTP: c_int = 62; -/// "hello" routing protocol -pub const IPPROTO_HELLO: c_int = 63; -/// SATNET/Backroom EXPAK -pub const IPPROTO_SATEXPAK: c_int = 64; -/// Kryptolan -pub const IPPROTO_KRYPTOLAN: c_int = 65; -/// Remote Virtual Disk -pub const IPPROTO_RVD: c_int = 66; -/// Pluribus Packet Core -pub const IPPROTO_IPPC: c_int = 67; -/// Any distributed FS -pub const IPPROTO_ADFS: c_int = 68; -/// Satnet Monitoring -pub const IPPROTO_SATMON: c_int = 69; -/// VISA Protocol -pub const IPPROTO_VISA: c_int = 70; -/// Packet Core Utility -pub const IPPROTO_IPCV: c_int = 71; -/// Comp. Prot. Net. Executive -pub const IPPROTO_CPNX: c_int = 72; -/// Comp. Prot. HeartBeat -pub const IPPROTO_CPHB: c_int = 73; -/// Wang Span Network -pub const IPPROTO_WSN: c_int = 74; -/// Packet Video Protocol -pub const IPPROTO_PVP: c_int = 75; -/// BackRoom SATNET Monitoring -pub const IPPROTO_BRSATMON: c_int = 76; -/// Sun net disk proto (temp.) -pub const IPPROTO_ND: c_int = 77; -/// WIDEBAND Monitoring -pub const IPPROTO_WBMON: c_int = 78; -/// WIDEBAND EXPAK -pub const IPPROTO_WBEXPAK: c_int = 79; -/// ISO cnlp -pub const IPPROTO_EON: c_int = 80; -/// VMTP -pub const IPPROTO_VMTP: c_int = 81; -/// Secure VMTP -pub const IPPROTO_SVMTP: c_int = 82; -/// Banyon VINES -pub const IPPROTO_VINES: c_int = 83; -/// TTP -pub const IPPROTO_TTP: c_int = 84; -/// NSFNET-IGP -pub const IPPROTO_IGP: c_int = 85; -/// dissimilar gateway prot. -pub const IPPROTO_DGP: c_int = 86; -/// TCF -pub const IPPROTO_TCF: c_int = 87; -/// Cisco/GXS IGRP -pub const IPPROTO_IGRP: c_int = 88; -/// OSPFIGP -pub const IPPROTO_OSPFIGP: c_int = 89; -/// Strite RPC protocol -pub const IPPROTO_SRPC: c_int = 90; -/// Locus Address Resoloution -pub const IPPROTO_LARP: c_int = 91; -/// Multicast Transport -pub const IPPROTO_MTP: c_int = 92; -/// AX.25 Frames -pub const IPPROTO_AX25: c_int = 93; -/// IP encapsulated in IP -pub const IPPROTO_IPEIP: c_int = 94; -/// Mobile Int.ing control -pub const IPPROTO_MICP: c_int = 95; -/// Semaphore Comm. security -pub const IPPROTO_SCCSP: c_int = 96; -/// Ethernet IP encapsulation -pub const IPPROTO_ETHERIP: c_int = 97; -/// encapsulation header -pub const IPPROTO_ENCAP: c_int = 98; -/// any private encr. scheme -pub const IPPROTO_APES: c_int = 99; -/// GMTP -pub const IPPROTO_GMTP: c_int = 100; - -/* 101-254: Partly Unassigned */ -/// Protocol Independent Mcast -pub const IPPROTO_PIM: c_int = 103; -/// payload compression (IPComp) -pub const IPPROTO_IPCOMP: c_int = 108; -/// PGM -pub const IPPROTO_PGM: c_int = 113; -/// SCTP -pub const IPPROTO_SCTP: c_int = 132; - -/* 255: Reserved */ -/* BSD Private, local use, namespace incursion */ -/// divert pseudo-protocol -pub const IPPROTO_DIVERT: c_int = 254; -/// raw IP packet -pub const IPPROTO_RAW: c_int = 255; -pub const IPPROTO_MAX: c_int = 256; -/// last return value of *_input(), meaning "all job for this pkt is done". -pub const IPPROTO_DONE: c_int = 257; - -pub const AF_UNSPEC: c_int = 0; -pub const AF_LOCAL: c_int = 1; -pub const AF_UNIX: c_int = AF_LOCAL; -pub const AF_INET: c_int = 2; -pub const AF_IMPLINK: c_int = 3; -pub const AF_PUP: c_int = 4; -pub const AF_CHAOS: c_int = 5; -pub const AF_NS: c_int = 6; -pub const AF_ISO: c_int = 7; -pub const AF_OSI: c_int = AF_ISO; -pub const AF_ECMA: c_int = 8; -pub const AF_DATAKIT: c_int = 9; -pub const AF_CCITT: c_int = 10; -pub const AF_SNA: c_int = 11; -pub const AF_DECnet: c_int = 12; -pub const AF_DLI: c_int = 13; -pub const AF_LAT: c_int = 14; -pub const AF_HYLINK: c_int = 15; -pub const AF_APPLETALK: c_int = 16; -pub const AF_ROUTE: c_int = 17; -pub const AF_LINK: c_int = 18; -pub const pseudo_AF_XTP: c_int = 19; -pub const AF_COIP: c_int = 20; -pub const AF_CNT: c_int = 21; -pub const pseudo_AF_RTIP: c_int = 22; -pub const AF_IPX: c_int = 23; -pub const AF_SIP: c_int = 24; -pub const pseudo_AF_PIP: c_int = 25; -pub const AF_NDRV: c_int = 27; -pub const AF_ISDN: c_int = 28; -pub const AF_E164: c_int = AF_ISDN; -pub const pseudo_AF_KEY: c_int = 29; -pub const AF_INET6: c_int = 30; -pub const AF_NATM: c_int = 31; -pub const AF_SYSTEM: c_int = 32; -pub const AF_NETBIOS: c_int = 33; -pub const AF_PPP: c_int = 34; -pub const pseudo_AF_HDRCMPLT: c_int = 35; -pub const AF_IEEE80211: c_int = 37; -pub const AF_UTUN: c_int = 38; -pub const AF_VSOCK: c_int = 40; -pub const AF_SYS_CONTROL: c_int = 2; - -pub const SYSPROTO_EVENT: c_int = 1; -pub const SYSPROTO_CONTROL: c_int = 2; - -pub const PF_UNSPEC: c_int = AF_UNSPEC; -pub const PF_LOCAL: c_int = AF_LOCAL; -pub const PF_UNIX: c_int = PF_LOCAL; -pub const PF_INET: c_int = AF_INET; -pub const PF_IMPLINK: c_int = AF_IMPLINK; -pub const PF_PUP: c_int = AF_PUP; -pub const PF_CHAOS: c_int = AF_CHAOS; -pub const PF_NS: c_int = AF_NS; -pub const PF_ISO: c_int = AF_ISO; -pub const PF_OSI: c_int = AF_ISO; -pub const PF_ECMA: c_int = AF_ECMA; -pub const PF_DATAKIT: c_int = AF_DATAKIT; -pub const PF_CCITT: c_int = AF_CCITT; -pub const PF_SNA: c_int = AF_SNA; -pub const PF_DECnet: c_int = AF_DECnet; -pub const PF_DLI: c_int = AF_DLI; -pub const PF_LAT: c_int = AF_LAT; -pub const PF_HYLINK: c_int = AF_HYLINK; -pub const PF_APPLETALK: c_int = AF_APPLETALK; -pub const PF_ROUTE: c_int = AF_ROUTE; -pub const PF_LINK: c_int = AF_LINK; -pub const PF_XTP: c_int = pseudo_AF_XTP; -pub const PF_COIP: c_int = AF_COIP; -pub const PF_CNT: c_int = AF_CNT; -pub const PF_SIP: c_int = AF_SIP; -pub const PF_IPX: c_int = AF_IPX; -pub const PF_RTIP: c_int = pseudo_AF_RTIP; -pub const PF_PIP: c_int = pseudo_AF_PIP; -pub const PF_NDRV: c_int = AF_NDRV; -pub const PF_ISDN: c_int = AF_ISDN; -pub const PF_KEY: c_int = pseudo_AF_KEY; -pub const PF_INET6: c_int = AF_INET6; -pub const PF_NATM: c_int = AF_NATM; -pub const PF_SYSTEM: c_int = AF_SYSTEM; -pub const PF_NETBIOS: c_int = AF_NETBIOS; -pub const PF_PPP: c_int = AF_PPP; -pub const PF_VSOCK: c_int = AF_VSOCK; - -pub const NET_RT_DUMP: c_int = 1; -pub const NET_RT_FLAGS: c_int = 2; -pub const NET_RT_IFLIST: c_int = 3; - -pub const SOMAXCONN: c_int = 128; - -pub const SOCK_MAXADDRLEN: c_int = 255; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; -pub const SOCK_RAW: c_int = 3; -pub const SOCK_RDM: c_int = 4; -pub const SOCK_SEQPACKET: c_int = 5; -pub const IP_TTL: c_int = 4; -pub const IP_HDRINCL: c_int = 2; -pub const IP_RECVDSTADDR: c_int = 7; -pub const IP_ADD_MEMBERSHIP: c_int = 12; -pub const IP_DROP_MEMBERSHIP: c_int = 13; -pub const IP_RECVIF: c_int = 20; -pub const IP_RECVTTL: c_int = 24; -pub const IP_BOUND_IF: c_int = 25; -pub const IP_PKTINFO: c_int = 26; -pub const IP_RECVTOS: c_int = 27; -pub const IP_DONTFRAG: c_int = 28; -pub const IPV6_JOIN_GROUP: c_int = 12; -pub const IPV6_LEAVE_GROUP: c_int = 13; -pub const IPV6_CHECKSUM: c_int = 26; -pub const IPV6_RECVTCLASS: c_int = 35; -pub const IPV6_TCLASS: c_int = 36; -pub const IPV6_RECVHOPLIMIT: c_int = 37; -pub const IPV6_PKTINFO: c_int = 46; -pub const IPV6_HOPLIMIT: c_int = 47; -pub const IPV6_RECVPKTINFO: c_int = 61; -pub const IP_ADD_SOURCE_MEMBERSHIP: c_int = 70; -pub const IP_DROP_SOURCE_MEMBERSHIP: c_int = 71; -pub const IP_BLOCK_SOURCE: c_int = 72; -pub const IP_UNBLOCK_SOURCE: c_int = 73; -pub const IPV6_BOUND_IF: c_int = 125; - -pub const TCP_NOPUSH: c_int = 4; -pub const TCP_NOOPT: c_int = 8; -pub const TCP_KEEPALIVE: c_int = 0x10; -pub const TCP_KEEPINTVL: c_int = 0x101; -pub const TCP_KEEPCNT: c_int = 0x102; -/// Enable/Disable TCP Fastopen on this socket -pub const TCP_FASTOPEN: c_int = 0x105; -pub const TCP_CONNECTION_INFO: c_int = 0x106; - -pub const SOL_LOCAL: c_int = 0; - -/// Retrieve peer credentials. -pub const LOCAL_PEERCRED: c_int = 0x001; -/// Retrieve peer PID. -pub const LOCAL_PEERPID: c_int = 0x002; -/// Retrieve effective peer PID. -pub const LOCAL_PEEREPID: c_int = 0x003; -/// Retrieve peer UUID. -pub const LOCAL_PEERUUID: c_int = 0x004; -/// Retrieve effective peer UUID. -pub const LOCAL_PEEREUUID: c_int = 0x005; -/// Retrieve peer audit token. -pub const LOCAL_PEERTOKEN: c_int = 0x006; - -pub const SOL_SOCKET: c_int = 0xffff; - -pub const SO_DEBUG: c_int = 0x01; -pub const SO_ACCEPTCONN: c_int = 0x0002; -pub const SO_REUSEADDR: c_int = 0x0004; -pub const SO_KEEPALIVE: c_int = 0x0008; -pub const SO_DONTROUTE: c_int = 0x0010; -pub const SO_BROADCAST: c_int = 0x0020; -pub const SO_USELOOPBACK: c_int = 0x0040; -pub const SO_LINGER: c_int = 0x0080; -pub const SO_OOBINLINE: c_int = 0x0100; -pub const SO_REUSEPORT: c_int = 0x0200; -pub const SO_TIMESTAMP: c_int = 0x0400; -pub const SO_TIMESTAMP_MONOTONIC: c_int = 0x0800; -pub const SO_DONTTRUNC: c_int = 0x2000; -pub const SO_WANTMORE: c_int = 0x4000; -pub const SO_WANTOOBFLAG: c_int = 0x8000; -pub const SO_SNDBUF: c_int = 0x1001; -pub const SO_RCVBUF: c_int = 0x1002; -pub const SO_SNDLOWAT: c_int = 0x1003; -pub const SO_RCVLOWAT: c_int = 0x1004; -pub const SO_SNDTIMEO: c_int = 0x1005; -pub const SO_RCVTIMEO: c_int = 0x1006; -pub const SO_ERROR: c_int = 0x1007; -pub const SO_TYPE: c_int = 0x1008; -pub const SO_LABEL: c_int = 0x1010; -pub const SO_PEERLABEL: c_int = 0x1011; -pub const SO_NREAD: c_int = 0x1020; -pub const SO_NKE: c_int = 0x1021; -pub const SO_NOSIGPIPE: c_int = 0x1022; -pub const SO_NOADDRERR: c_int = 0x1023; -pub const SO_NWRITE: c_int = 0x1024; -pub const SO_REUSESHAREUID: c_int = 0x1025; -pub const SO_NOTIFYCONFLICT: c_int = 0x1026; -pub const SO_LINGER_SEC: c_int = 0x1080; -pub const SO_RANDOMPORT: c_int = 0x1082; -pub const SO_NP_EXTENSIONS: c_int = 0x1083; - -pub const MSG_OOB: c_int = 0x1; -pub const MSG_PEEK: c_int = 0x2; -pub const MSG_DONTROUTE: c_int = 0x4; -pub const MSG_EOR: c_int = 0x8; -pub const MSG_TRUNC: c_int = 0x10; -pub const MSG_CTRUNC: c_int = 0x20; -pub const MSG_WAITALL: c_int = 0x40; -pub const MSG_DONTWAIT: c_int = 0x80; -pub const MSG_EOF: c_int = 0x100; -pub const MSG_FLUSH: c_int = 0x400; -pub const MSG_HOLD: c_int = 0x800; -pub const MSG_SEND: c_int = 0x1000; -pub const MSG_HAVEMORE: c_int = 0x2000; -pub const MSG_RCVMORE: c_int = 0x4000; -pub const MSG_NEEDSA: c_int = 0x10000; -pub const MSG_NOSIGNAL: c_int = 0x80000; - -pub const SCM_TIMESTAMP: c_int = 0x02; -pub const SCM_CREDS: c_int = 0x03; - -// https://github.com/aosm/xnu/blob/HEAD/bsd/net/if.h#L140-L156 -pub const IFF_UP: c_int = 0x1; // interface is up -pub const IFF_BROADCAST: c_int = 0x2; // broadcast address valid -pub const IFF_DEBUG: c_int = 0x4; // turn on debugging -pub const IFF_LOOPBACK: c_int = 0x8; // is a loopback net -pub const IFF_POINTOPOINT: c_int = 0x10; // interface is point-to-point link -pub const IFF_NOTRAILERS: c_int = 0x20; // obsolete: avoid use of trailers -pub const IFF_RUNNING: c_int = 0x40; // resources allocated -pub const IFF_NOARP: c_int = 0x80; // no address resolution protocol -pub const IFF_PROMISC: c_int = 0x100; // receive all packets -pub const IFF_ALLMULTI: c_int = 0x200; // receive all multicast packets -pub const IFF_OACTIVE: c_int = 0x400; // transmission in progress -pub const IFF_SIMPLEX: c_int = 0x800; // can't hear own transmissions -pub const IFF_LINK0: c_int = 0x1000; // per link layer defined bit -pub const IFF_LINK1: c_int = 0x2000; // per link layer defined bit -pub const IFF_LINK2: c_int = 0x4000; // per link layer defined bit -pub const IFF_ALTPHYS: c_int = IFF_LINK2; // use alternate physical connection -pub const IFF_MULTICAST: c_int = 0x8000; // supports multicast - -pub const SCOPE6_ID_MAX: size_t = 16; - -pub const SHUT_RD: c_int = 0; -pub const SHUT_WR: c_int = 1; -pub const SHUT_RDWR: c_int = 2; - -pub const SAE_ASSOCID_ANY: crate::sae_associd_t = 0; -/// ((sae_associd_t)(-1ULL)) -pub const SAE_ASSOCID_ALL: crate::sae_associd_t = 0xffffffff; - -pub const SAE_CONNID_ANY: crate::sae_connid_t = 0; -/// ((sae_connid_t)(-1ULL)) -pub const SAE_CONNID_ALL: crate::sae_connid_t = 0xffffffff; - -// connectx() flag parameters - -/// resume connect() on read/write -pub const CONNECT_RESUME_ON_READ_WRITE: c_uint = 0x1; -/// data is idempotent -pub const CONNECT_DATA_IDEMPOTENT: c_uint = 0x2; -/// data includes security that replaces the TFO-cookie -pub const CONNECT_DATA_AUTHENTICATED: c_uint = 0x4; - -pub const LOCK_SH: c_int = 1; -pub const LOCK_EX: c_int = 2; -pub const LOCK_NB: c_int = 4; -pub const LOCK_UN: c_int = 8; - -pub const MAP_COPY: c_int = 0x0002; -pub const MAP_RENAME: c_int = 0x0020; -pub const MAP_NORESERVE: c_int = 0x0040; -pub const MAP_NOEXTEND: c_int = 0x0100; -pub const MAP_HASSEMAPHORE: c_int = 0x0200; -pub const MAP_NOCACHE: c_int = 0x0400; -pub const MAP_JIT: c_int = 0x0800; - -pub const _SC_ARG_MAX: c_int = 1; -pub const _SC_CHILD_MAX: c_int = 2; -pub const _SC_CLK_TCK: c_int = 3; -pub const _SC_NGROUPS_MAX: c_int = 4; -pub const _SC_OPEN_MAX: c_int = 5; -pub const _SC_JOB_CONTROL: c_int = 6; -pub const _SC_SAVED_IDS: c_int = 7; -pub const _SC_VERSION: c_int = 8; -pub const _SC_BC_BASE_MAX: c_int = 9; -pub const _SC_BC_DIM_MAX: c_int = 10; -pub const _SC_BC_SCALE_MAX: c_int = 11; -pub const _SC_BC_STRING_MAX: c_int = 12; -pub const _SC_COLL_WEIGHTS_MAX: c_int = 13; -pub const _SC_EXPR_NEST_MAX: c_int = 14; -pub const _SC_LINE_MAX: c_int = 15; -pub const _SC_RE_DUP_MAX: c_int = 16; -pub const _SC_2_VERSION: c_int = 17; -pub const _SC_2_C_BIND: c_int = 18; -pub const _SC_2_C_DEV: c_int = 19; -pub const _SC_2_CHAR_TERM: c_int = 20; -pub const _SC_2_FORT_DEV: c_int = 21; -pub const _SC_2_FORT_RUN: c_int = 22; -pub const _SC_2_LOCALEDEF: c_int = 23; -pub const _SC_2_SW_DEV: c_int = 24; -pub const _SC_2_UPE: c_int = 25; -pub const _SC_STREAM_MAX: c_int = 26; -pub const _SC_TZNAME_MAX: c_int = 27; -pub const _SC_ASYNCHRONOUS_IO: c_int = 28; -pub const _SC_PAGESIZE: c_int = 29; -pub const _SC_MEMLOCK: c_int = 30; -pub const _SC_MEMLOCK_RANGE: c_int = 31; -pub const _SC_MEMORY_PROTECTION: c_int = 32; -pub const _SC_MESSAGE_PASSING: c_int = 33; -pub const _SC_PRIORITIZED_IO: c_int = 34; -pub const _SC_PRIORITY_SCHEDULING: c_int = 35; -pub const _SC_REALTIME_SIGNALS: c_int = 36; -pub const _SC_SEMAPHORES: c_int = 37; -pub const _SC_FSYNC: c_int = 38; -pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 39; -pub const _SC_SYNCHRONIZED_IO: c_int = 40; -pub const _SC_TIMERS: c_int = 41; -pub const _SC_AIO_LISTIO_MAX: c_int = 42; -pub const _SC_AIO_MAX: c_int = 43; -pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 44; -pub const _SC_DELAYTIMER_MAX: c_int = 45; -pub const _SC_MQ_OPEN_MAX: c_int = 46; -pub const _SC_MAPPED_FILES: c_int = 47; -pub const _SC_RTSIG_MAX: c_int = 48; -pub const _SC_SEM_NSEMS_MAX: c_int = 49; -pub const _SC_SEM_VALUE_MAX: c_int = 50; -pub const _SC_SIGQUEUE_MAX: c_int = 51; -pub const _SC_TIMER_MAX: c_int = 52; -pub const _SC_NPROCESSORS_CONF: c_int = 57; -pub const _SC_NPROCESSORS_ONLN: c_int = 58; -pub const _SC_2_PBS: c_int = 59; -pub const _SC_2_PBS_ACCOUNTING: c_int = 60; -pub const _SC_2_PBS_CHECKPOINT: c_int = 61; -pub const _SC_2_PBS_LOCATE: c_int = 62; -pub const _SC_2_PBS_MESSAGE: c_int = 63; -pub const _SC_2_PBS_TRACK: c_int = 64; -pub const _SC_ADVISORY_INFO: c_int = 65; -pub const _SC_BARRIERS: c_int = 66; -pub const _SC_CLOCK_SELECTION: c_int = 67; -pub const _SC_CPUTIME: c_int = 68; -pub const _SC_FILE_LOCKING: c_int = 69; -pub const _SC_HOST_NAME_MAX: c_int = 72; -pub const _SC_MONOTONIC_CLOCK: c_int = 74; -pub const _SC_READER_WRITER_LOCKS: c_int = 76; -pub const _SC_REGEXP: c_int = 77; -pub const _SC_SHELL: c_int = 78; -pub const _SC_SPAWN: c_int = 79; -pub const _SC_SPIN_LOCKS: c_int = 80; -pub const _SC_SPORADIC_SERVER: c_int = 81; -pub const _SC_THREAD_CPUTIME: c_int = 84; -pub const _SC_THREAD_SPORADIC_SERVER: c_int = 92; -pub const _SC_TIMEOUTS: c_int = 95; -pub const _SC_TRACE: c_int = 97; -pub const _SC_TRACE_EVENT_FILTER: c_int = 98; -pub const _SC_TRACE_INHERIT: c_int = 99; -pub const _SC_TRACE_LOG: c_int = 100; -pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 102; -pub const _SC_V6_ILP32_OFF32: c_int = 103; -pub const _SC_V6_ILP32_OFFBIG: c_int = 104; -pub const _SC_V6_LP64_OFF64: c_int = 105; -pub const _SC_V6_LPBIG_OFFBIG: c_int = 106; -pub const _SC_IPV6: c_int = 118; -pub const _SC_RAW_SOCKETS: c_int = 119; -pub const _SC_SYMLOOP_MAX: c_int = 120; -pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; -pub const _SC_XOPEN_STREAMS: c_int = 114; -pub const _SC_XBS5_ILP32_OFF32: c_int = 122; -pub const _SC_XBS5_ILP32_OFFBIG: c_int = 123; -pub const _SC_XBS5_LP64_OFF64: c_int = 124; -pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 125; -pub const _SC_SS_REPL_MAX: c_int = 126; -pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 127; -pub const _SC_TRACE_NAME_MAX: c_int = 128; -pub const _SC_TRACE_SYS_MAX: c_int = 129; -pub const _SC_TRACE_USER_EVENT_MAX: c_int = 130; -pub const _SC_PASS_MAX: c_int = 131; -// `confstr` keys (only the values guaranteed by `man confstr`). -pub const _CS_PATH: c_int = 1; -pub const _CS_DARWIN_USER_DIR: c_int = 65536; -pub const _CS_DARWIN_USER_TEMP_DIR: c_int = 65537; -pub const _CS_DARWIN_USER_CACHE_DIR: c_int = 65538; - -pub const PTHREAD_MUTEX_NORMAL: c_int = 0; -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 1; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 2; -pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; -pub const _PTHREAD_MUTEX_SIG_init: c_long = 0x32AAABA7; -pub const _PTHREAD_COND_SIG_init: c_long = 0x3CB0B1BB; -pub const _PTHREAD_RWLOCK_SIG_init: c_long = 0x2DA8B3B4; -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - __sig: _PTHREAD_MUTEX_SIG_init, - __opaque: [0; __PTHREAD_MUTEX_SIZE__], -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - __sig: _PTHREAD_COND_SIG_init, - __opaque: [0; __PTHREAD_COND_SIZE__], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - __sig: _PTHREAD_RWLOCK_SIG_init, - __opaque: [0; __PTHREAD_RWLOCK_SIZE__], -}; - -pub const OS_UNFAIR_LOCK_INIT: os_unfair_lock = os_unfair_lock { - _os_unfair_lock_opaque: 0, -}; - -pub const OS_LOG_TYPE_DEFAULT: crate::os_log_type_t = 0x00; -pub const OS_LOG_TYPE_INFO: crate::os_log_type_t = 0x01; -pub const OS_LOG_TYPE_DEBUG: crate::os_log_type_t = 0x02; -pub const OS_LOG_TYPE_ERROR: crate::os_log_type_t = 0x10; -pub const OS_LOG_TYPE_FAULT: crate::os_log_type_t = 0x11; - -pub const OS_SIGNPOST_EVENT: crate::os_signpost_type_t = 0x00; -pub const OS_SIGNPOST_INTERVAL_BEGIN: crate::os_signpost_type_t = 0x01; -pub const OS_SIGNPOST_INTERVAL_END: crate::os_signpost_type_t = 0x02; - -pub const MINSIGSTKSZ: size_t = 32768; -pub const SIGSTKSZ: size_t = 131072; - -pub const FD_SETSIZE: usize = 1024; - -pub const ST_NOSUID: c_ulong = 2; - -pub const SCHED_OTHER: c_int = 1; -pub const SCHED_FIFO: c_int = 4; -pub const SCHED_RR: c_int = 2; - -pub const EVFILT_READ: i16 = -1; -pub const EVFILT_WRITE: i16 = -2; -pub const EVFILT_AIO: i16 = -3; -pub const EVFILT_VNODE: i16 = -4; -pub const EVFILT_PROC: i16 = -5; -pub const EVFILT_SIGNAL: i16 = -6; -pub const EVFILT_TIMER: i16 = -7; -pub const EVFILT_MACHPORT: i16 = -8; -pub const EVFILT_FS: i16 = -9; -pub const EVFILT_USER: i16 = -10; -pub const EVFILT_VM: i16 = -12; - -pub const EV_ADD: u16 = 0x1; -pub const EV_DELETE: u16 = 0x2; -pub const EV_ENABLE: u16 = 0x4; -pub const EV_DISABLE: u16 = 0x8; -pub const EV_ONESHOT: u16 = 0x10; -pub const EV_CLEAR: u16 = 0x20; -pub const EV_RECEIPT: u16 = 0x40; -pub const EV_DISPATCH: u16 = 0x80; -pub const EV_FLAG0: u16 = 0x1000; -pub const EV_POLL: u16 = 0x1000; -pub const EV_FLAG1: u16 = 0x2000; -pub const EV_OOBAND: u16 = 0x2000; -pub const EV_ERROR: u16 = 0x4000; -pub const EV_EOF: u16 = 0x8000; -pub const EV_SYSFLAGS: u16 = 0xf000; - -pub const NOTE_TRIGGER: u32 = 0x01000000; -pub const NOTE_FFNOP: u32 = 0x00000000; -pub const NOTE_FFAND: u32 = 0x40000000; -pub const NOTE_FFOR: u32 = 0x80000000; -pub const NOTE_FFCOPY: u32 = 0xc0000000; -pub const NOTE_FFCTRLMASK: u32 = 0xc0000000; -pub const NOTE_FFLAGSMASK: u32 = 0x00ffffff; -pub const NOTE_LOWAT: u32 = 0x00000001; -pub const NOTE_DELETE: u32 = 0x00000001; -pub const NOTE_WRITE: u32 = 0x00000002; -pub const NOTE_EXTEND: u32 = 0x00000004; -pub const NOTE_ATTRIB: u32 = 0x00000008; -pub const NOTE_LINK: u32 = 0x00000010; -pub const NOTE_RENAME: u32 = 0x00000020; -pub const NOTE_REVOKE: u32 = 0x00000040; -pub const NOTE_NONE: u32 = 0x00000080; -pub const NOTE_EXIT: u32 = 0x80000000; -pub const NOTE_FORK: u32 = 0x40000000; -pub const NOTE_EXEC: u32 = 0x20000000; -#[doc(hidden)] -#[deprecated(since = "0.2.49", note = "Deprecated since MacOSX 10.9")] -pub const NOTE_REAP: u32 = 0x10000000; -pub const NOTE_SIGNAL: u32 = 0x08000000; -pub const NOTE_EXITSTATUS: u32 = 0x04000000; -pub const NOTE_EXIT_DETAIL: u32 = 0x02000000; -pub const NOTE_PDATAMASK: u32 = 0x000fffff; -pub const NOTE_PCTRLMASK: u32 = 0xfff00000; -#[doc(hidden)] -#[deprecated(since = "0.2.49", note = "Deprecated since MacOSX 10.9")] -pub const NOTE_EXIT_REPARENTED: u32 = 0x00080000; -pub const NOTE_EXIT_DETAIL_MASK: u32 = 0x00070000; -pub const NOTE_EXIT_DECRYPTFAIL: u32 = 0x00010000; -pub const NOTE_EXIT_MEMORY: u32 = 0x00020000; -pub const NOTE_EXIT_CSERROR: u32 = 0x00040000; -pub const NOTE_VM_PRESSURE: u32 = 0x80000000; -pub const NOTE_VM_PRESSURE_TERMINATE: u32 = 0x40000000; -pub const NOTE_VM_PRESSURE_SUDDEN_TERMINATE: u32 = 0x20000000; -pub const NOTE_VM_ERROR: u32 = 0x10000000; -pub const NOTE_SECONDS: u32 = 0x00000001; -pub const NOTE_USECONDS: u32 = 0x00000002; -pub const NOTE_NSECONDS: u32 = 0x00000004; -pub const NOTE_ABSOLUTE: u32 = 0x00000008; -pub const NOTE_LEEWAY: u32 = 0x00000010; -pub const NOTE_CRITICAL: u32 = 0x00000020; -pub const NOTE_BACKGROUND: u32 = 0x00000040; -pub const NOTE_MACH_CONTINUOUS_TIME: u32 = 0x00000080; -pub const NOTE_MACHTIME: u32 = 0x00000100; -pub const NOTE_TRACK: u32 = 0x00000001; -pub const NOTE_TRACKERR: u32 = 0x00000002; -pub const NOTE_CHILD: u32 = 0x00000004; - -pub const OCRNL: crate::tcflag_t = 0x00000010; -pub const ONOCR: crate::tcflag_t = 0x00000020; -pub const ONLRET: crate::tcflag_t = 0x00000040; -pub const OFILL: crate::tcflag_t = 0x00000080; -pub const NLDLY: crate::tcflag_t = 0x00000300; -pub const TABDLY: crate::tcflag_t = 0x00000c04; -pub const CRDLY: crate::tcflag_t = 0x00003000; -pub const FFDLY: crate::tcflag_t = 0x00004000; -pub const BSDLY: crate::tcflag_t = 0x00008000; -pub const VTDLY: crate::tcflag_t = 0x00010000; -pub const OFDEL: crate::tcflag_t = 0x00020000; - -pub const NL0: crate::tcflag_t = 0x00000000; -pub const NL1: crate::tcflag_t = 0x00000100; -pub const TAB0: crate::tcflag_t = 0x00000000; -pub const TAB1: crate::tcflag_t = 0x00000400; -pub const TAB2: crate::tcflag_t = 0x00000800; -pub const CR0: crate::tcflag_t = 0x00000000; -pub const CR1: crate::tcflag_t = 0x00001000; -pub const CR2: crate::tcflag_t = 0x00002000; -pub const CR3: crate::tcflag_t = 0x00003000; -pub const FF0: crate::tcflag_t = 0x00000000; -pub const FF1: crate::tcflag_t = 0x00004000; -pub const BS0: crate::tcflag_t = 0x00000000; -pub const BS1: crate::tcflag_t = 0x00008000; -pub const TAB3: crate::tcflag_t = 0x00000004; -pub const VT0: crate::tcflag_t = 0x00000000; -pub const VT1: crate::tcflag_t = 0x00010000; -pub const IUTF8: crate::tcflag_t = 0x00004000; -pub const CRTSCTS: crate::tcflag_t = 0x00030000; - -pub const NI_MAXHOST: crate::socklen_t = 1025; -pub const NI_MAXSERV: crate::socklen_t = 32; -pub const NI_NOFQDN: c_int = 0x00000001; -pub const NI_NUMERICHOST: c_int = 0x00000002; -pub const NI_NAMEREQD: c_int = 0x00000004; -pub const NI_NUMERICSERV: c_int = 0x00000008; -pub const NI_NUMERICSCOPE: c_int = 0x00000100; -pub const NI_DGRAM: c_int = 0x00000010; - -pub const Q_GETQUOTA: c_int = 0x300; -pub const Q_SETQUOTA: c_int = 0x400; - -pub const RENAME_SWAP: c_uint = 0x00000002; -pub const RENAME_EXCL: c_uint = 0x00000004; - -pub const RTLD_LOCAL: c_int = 0x4; -pub const RTLD_FIRST: c_int = 0x100; -pub const RTLD_NODELETE: c_int = 0x80; -pub const RTLD_NOLOAD: c_int = 0x10; -pub const RTLD_GLOBAL: c_int = 0x8; -pub const RTLD_MAIN_ONLY: *mut c_void = -5isize as *mut c_void; - -pub const _WSTOPPED: c_int = 0o177; - -pub const LOG_NETINFO: c_int = 12 << 3; -pub const LOG_REMOTEAUTH: c_int = 13 << 3; -pub const LOG_INSTALL: c_int = 14 << 3; -pub const LOG_RAS: c_int = 15 << 3; -pub const LOG_LAUNCHD: c_int = 24 << 3; -pub const LOG_NFACILITIES: c_int = 25; - -pub const CTLTYPE: c_int = 0xf; -pub const CTLTYPE_NODE: c_int = 1; -pub const CTLTYPE_INT: c_int = 2; -pub const CTLTYPE_STRING: c_int = 3; -pub const CTLTYPE_QUAD: c_int = 4; -pub const CTLTYPE_OPAQUE: c_int = 5; -pub const CTLTYPE_STRUCT: c_int = CTLTYPE_OPAQUE; -pub const CTLFLAG_RD: c_int = 0x80000000; -pub const CTLFLAG_WR: c_int = 0x40000000; -pub const CTLFLAG_RW: c_int = CTLFLAG_RD | CTLFLAG_WR; -pub const CTLFLAG_NOLOCK: c_int = 0x20000000; -pub const CTLFLAG_ANYBODY: c_int = 0x10000000; -pub const CTLFLAG_SECURE: c_int = 0x08000000; -pub const CTLFLAG_MASKED: c_int = 0x04000000; -pub const CTLFLAG_NOAUTO: c_int = 0x02000000; -pub const CTLFLAG_KERN: c_int = 0x01000000; -pub const CTLFLAG_LOCKED: c_int = 0x00800000; -pub const CTLFLAG_OID2: c_int = 0x00400000; -pub const CTL_UNSPEC: c_int = 0; -pub const CTL_KERN: c_int = 1; -pub const CTL_VM: c_int = 2; -pub const CTL_VFS: c_int = 3; -pub const CTL_NET: c_int = 4; -pub const CTL_DEBUG: c_int = 5; -pub const CTL_HW: c_int = 6; -pub const CTL_MACHDEP: c_int = 7; -pub const CTL_USER: c_int = 8; -pub const CTL_MAXID: c_int = 9; -pub const KERN_OSTYPE: c_int = 1; -pub const KERN_OSRELEASE: c_int = 2; -pub const KERN_OSREV: c_int = 3; -pub const KERN_VERSION: c_int = 4; -pub const KERN_MAXVNODES: c_int = 5; -pub const KERN_MAXPROC: c_int = 6; -pub const KERN_MAXFILES: c_int = 7; -pub const KERN_ARGMAX: c_int = 8; -pub const KERN_SECURELVL: c_int = 9; -pub const KERN_HOSTNAME: c_int = 10; -pub const KERN_HOSTID: c_int = 11; -pub const KERN_CLOCKRATE: c_int = 12; -pub const KERN_VNODE: c_int = 13; -pub const KERN_PROC: c_int = 14; -pub const KERN_FILE: c_int = 15; -pub const KERN_PROF: c_int = 16; -pub const KERN_POSIX1: c_int = 17; -pub const KERN_NGROUPS: c_int = 18; -pub const KERN_JOB_CONTROL: c_int = 19; -pub const KERN_SAVED_IDS: c_int = 20; -pub const KERN_BOOTTIME: c_int = 21; -pub const KERN_NISDOMAINNAME: c_int = 22; -pub const KERN_DOMAINNAME: c_int = KERN_NISDOMAINNAME; -pub const KERN_MAXPARTITIONS: c_int = 23; -pub const KERN_KDEBUG: c_int = 24; -pub const KERN_UPDATEINTERVAL: c_int = 25; -pub const KERN_OSRELDATE: c_int = 26; -pub const KERN_NTP_PLL: c_int = 27; -pub const KERN_BOOTFILE: c_int = 28; -pub const KERN_MAXFILESPERPROC: c_int = 29; -pub const KERN_MAXPROCPERUID: c_int = 30; -pub const KERN_DUMPDEV: c_int = 31; -pub const KERN_IPC: c_int = 32; -pub const KERN_DUMMY: c_int = 33; -pub const KERN_PS_STRINGS: c_int = 34; -pub const KERN_USRSTACK32: c_int = 35; -pub const KERN_LOGSIGEXIT: c_int = 36; -pub const KERN_SYMFILE: c_int = 37; -pub const KERN_PROCARGS: c_int = 38; -pub const KERN_NETBOOT: c_int = 40; -pub const KERN_SYSV: c_int = 42; -pub const KERN_AFFINITY: c_int = 43; -pub const KERN_TRANSLATE: c_int = 44; -pub const KERN_CLASSIC: c_int = KERN_TRANSLATE; -pub const KERN_EXEC: c_int = 45; -pub const KERN_CLASSICHANDLER: c_int = KERN_EXEC; -pub const KERN_AIOMAX: c_int = 46; -pub const KERN_AIOPROCMAX: c_int = 47; -pub const KERN_AIOTHREADS: c_int = 48; -pub const KERN_COREFILE: c_int = 50; -pub const KERN_COREDUMP: c_int = 51; -pub const KERN_SUGID_COREDUMP: c_int = 52; -pub const KERN_PROCDELAYTERM: c_int = 53; -pub const KERN_SHREG_PRIVATIZABLE: c_int = 54; -pub const KERN_LOW_PRI_WINDOW: c_int = 56; -pub const KERN_LOW_PRI_DELAY: c_int = 57; -pub const KERN_POSIX: c_int = 58; -pub const KERN_USRSTACK64: c_int = 59; -pub const KERN_NX_PROTECTION: c_int = 60; -pub const KERN_TFP: c_int = 61; -pub const KERN_PROCNAME: c_int = 62; -pub const KERN_THALTSTACK: c_int = 63; -pub const KERN_SPECULATIVE_READS: c_int = 64; -pub const KERN_OSVERSION: c_int = 65; -pub const KERN_SAFEBOOT: c_int = 66; -pub const KERN_RAGEVNODE: c_int = 68; -pub const KERN_TTY: c_int = 69; -pub const KERN_CHECKOPENEVT: c_int = 70; -pub const KERN_THREADNAME: c_int = 71; -pub const KERN_MAXID: c_int = 72; -pub const KERN_RAGE_PROC: c_int = 1; -pub const KERN_RAGE_THREAD: c_int = 2; -pub const KERN_UNRAGE_PROC: c_int = 3; -pub const KERN_UNRAGE_THREAD: c_int = 4; -pub const KERN_OPENEVT_PROC: c_int = 1; -pub const KERN_UNOPENEVT_PROC: c_int = 2; -pub const KERN_TFP_POLICY: c_int = 1; -pub const KERN_TFP_POLICY_DENY: c_int = 0; -pub const KERN_TFP_POLICY_DEFAULT: c_int = 2; -pub const KERN_KDEFLAGS: c_int = 1; -pub const KERN_KDDFLAGS: c_int = 2; -pub const KERN_KDENABLE: c_int = 3; -pub const KERN_KDSETBUF: c_int = 4; -pub const KERN_KDGETBUF: c_int = 5; -pub const KERN_KDSETUP: c_int = 6; -pub const KERN_KDREMOVE: c_int = 7; -pub const KERN_KDSETREG: c_int = 8; -pub const KERN_KDGETREG: c_int = 9; -pub const KERN_KDREADTR: c_int = 10; -pub const KERN_KDPIDTR: c_int = 11; -pub const KERN_KDTHRMAP: c_int = 12; -pub const KERN_KDPIDEX: c_int = 14; -pub const KERN_KDSETRTCDEC: c_int = 15; -pub const KERN_KDGETENTROPY: c_int = 16; -pub const KERN_KDWRITETR: c_int = 17; -pub const KERN_KDWRITEMAP: c_int = 18; -#[doc(hidden)] -#[deprecated(since = "0.2.49", note = "Removed in MacOSX 10.12")] -pub const KERN_KDENABLE_BG_TRACE: c_int = 19; -#[doc(hidden)] -#[deprecated(since = "0.2.49", note = "Removed in MacOSX 10.12")] -pub const KERN_KDDISABLE_BG_TRACE: c_int = 20; -pub const KERN_KDREADCURTHRMAP: c_int = 21; -pub const KERN_KDSET_TYPEFILTER: c_int = 22; -pub const KERN_KDBUFWAIT: c_int = 23; -pub const KERN_KDCPUMAP: c_int = 24; -pub const KERN_PROC_ALL: c_int = 0; -pub const KERN_PROC_PID: c_int = 1; -pub const KERN_PROC_PGRP: c_int = 2; -pub const KERN_PROC_SESSION: c_int = 3; -pub const KERN_PROC_TTY: c_int = 4; -pub const KERN_PROC_UID: c_int = 5; -pub const KERN_PROC_RUID: c_int = 6; -pub const KERN_PROC_LCID: c_int = 7; -pub const KERN_SUCCESS: c_int = 0; -pub const KERN_INVALID_ADDRESS: c_int = 1; -pub const KERN_PROTECTION_FAILURE: c_int = 2; -pub const KERN_NO_SPACE: c_int = 3; -pub const KERN_INVALID_ARGUMENT: c_int = 4; -pub const KERN_FAILURE: c_int = 5; -pub const KERN_RESOURCE_SHORTAGE: c_int = 6; -pub const KERN_NOT_RECEIVER: c_int = 7; -pub const KERN_NO_ACCESS: c_int = 8; -pub const KERN_MEMORY_FAILURE: c_int = 9; -pub const KERN_MEMORY_ERROR: c_int = 10; -pub const KERN_ALREADY_IN_SET: c_int = 11; -pub const KERN_NOT_IN_SET: c_int = 12; -pub const KERN_NAME_EXISTS: c_int = 13; -pub const KERN_ABORTED: c_int = 14; -pub const KERN_INVALID_NAME: c_int = 15; -pub const KERN_INVALID_TASK: c_int = 16; -pub const KERN_INVALID_RIGHT: c_int = 17; -pub const KERN_INVALID_VALUE: c_int = 18; -pub const KERN_UREFS_OVERFLOW: c_int = 19; -pub const KERN_INVALID_CAPABILITY: c_int = 20; -pub const KERN_RIGHT_EXISTS: c_int = 21; -pub const KERN_INVALID_HOST: c_int = 22; -pub const KERN_MEMORY_PRESENT: c_int = 23; -pub const KERN_MEMORY_DATA_MOVED: c_int = 24; -pub const KERN_MEMORY_RESTART_COPY: c_int = 25; -pub const KERN_INVALID_PROCESSOR_SET: c_int = 26; -pub const KERN_POLICY_LIMIT: c_int = 27; -pub const KERN_INVALID_POLICY: c_int = 28; -pub const KERN_INVALID_OBJECT: c_int = 29; -pub const KERN_ALREADY_WAITING: c_int = 30; -pub const KERN_DEFAULT_SET: c_int = 31; -pub const KERN_EXCEPTION_PROTECTED: c_int = 32; -pub const KERN_INVALID_LEDGER: c_int = 33; -pub const KERN_INVALID_MEMORY_CONTROL: c_int = 34; -pub const KERN_INVALID_SECURITY: c_int = 35; -pub const KERN_NOT_DEPRESSED: c_int = 36; -pub const KERN_TERMINATED: c_int = 37; -pub const KERN_LOCK_SET_DESTROYED: c_int = 38; -pub const KERN_LOCK_UNSTABLE: c_int = 39; -pub const KERN_LOCK_OWNED: c_int = 40; -pub const KERN_LOCK_OWNED_SELF: c_int = 41; -pub const KERN_SEMAPHORE_DESTROYED: c_int = 42; -pub const KERN_RPC_SERVER_TERMINATED: c_int = 43; -pub const KERN_RPC_TERMINATE_ORPHAN: c_int = 44; -pub const KERN_RPC_CONTINUE_ORPHAN: c_int = 45; -pub const KERN_NOT_SUPPORTED: c_int = 46; -pub const KERN_NODE_DOWN: c_int = 47; -pub const KERN_NOT_WAITING: c_int = 48; -pub const KERN_OPERATION_TIMED_OUT: c_int = 49; -pub const KERN_CODESIGN_ERROR: c_int = 50; -pub const KERN_POLICY_STATIC: c_int = 51; -pub const KERN_INSUFFICIENT_BUFFER_SIZE: c_int = 52; -pub const KIPC_MAXSOCKBUF: c_int = 1; -pub const KIPC_SOCKBUF_WASTE: c_int = 2; -pub const KIPC_SOMAXCONN: c_int = 3; -pub const KIPC_MAX_LINKHDR: c_int = 4; -pub const KIPC_MAX_PROTOHDR: c_int = 5; -pub const KIPC_MAX_HDR: c_int = 6; -pub const KIPC_MAX_DATALEN: c_int = 7; -pub const KIPC_MBSTAT: c_int = 8; -pub const KIPC_NMBCLUSTERS: c_int = 9; -pub const KIPC_SOQLIMITCOMPAT: c_int = 10; -pub const VM_METER: c_int = 1; -pub const VM_LOADAVG: c_int = 2; -pub const VM_MACHFACTOR: c_int = 4; -pub const VM_SWAPUSAGE: c_int = 5; -pub const VM_MAXID: c_int = 6; -pub const VM_PROT_NONE: crate::vm_prot_t = 0x00; -pub const VM_PROT_READ: crate::vm_prot_t = 0x01; -pub const VM_PROT_WRITE: crate::vm_prot_t = 0x02; -pub const VM_PROT_EXECUTE: crate::vm_prot_t = 0x04; -pub const MEMORY_OBJECT_NULL: crate::memory_object_t = 0; -pub const HW_MACHINE: c_int = 1; -pub const HW_MODEL: c_int = 2; -pub const HW_NCPU: c_int = 3; -pub const HW_BYTEORDER: c_int = 4; -pub const HW_PHYSMEM: c_int = 5; -pub const HW_USERMEM: c_int = 6; -pub const HW_PAGESIZE: c_int = 7; -pub const HW_DISKNAMES: c_int = 8; -pub const HW_DISKSTATS: c_int = 9; -pub const HW_EPOCH: c_int = 10; -pub const HW_FLOATINGPT: c_int = 11; -pub const HW_MACHINE_ARCH: c_int = 12; -pub const HW_VECTORUNIT: c_int = 13; -pub const HW_BUS_FREQ: c_int = 14; -pub const HW_CPU_FREQ: c_int = 15; -pub const HW_CACHELINE: c_int = 16; -pub const HW_L1ICACHESIZE: c_int = 17; -pub const HW_L1DCACHESIZE: c_int = 18; -pub const HW_L2SETTINGS: c_int = 19; -pub const HW_L2CACHESIZE: c_int = 20; -pub const HW_L3SETTINGS: c_int = 21; -pub const HW_L3CACHESIZE: c_int = 22; -pub const HW_TB_FREQ: c_int = 23; -pub const HW_MEMSIZE: c_int = 24; -pub const HW_AVAILCPU: c_int = 25; -pub const HW_TARGET: c_int = 26; -pub const HW_PRODUCT: c_int = 27; -pub const HW_MAXID: c_int = 28; -pub const USER_CS_PATH: c_int = 1; -pub const USER_BC_BASE_MAX: c_int = 2; -pub const USER_BC_DIM_MAX: c_int = 3; -pub const USER_BC_SCALE_MAX: c_int = 4; -pub const USER_BC_STRING_MAX: c_int = 5; -pub const USER_COLL_WEIGHTS_MAX: c_int = 6; -pub const USER_EXPR_NEST_MAX: c_int = 7; -pub const USER_LINE_MAX: c_int = 8; -pub const USER_RE_DUP_MAX: c_int = 9; -pub const USER_POSIX2_VERSION: c_int = 10; -pub const USER_POSIX2_C_BIND: c_int = 11; -pub const USER_POSIX2_C_DEV: c_int = 12; -pub const USER_POSIX2_CHAR_TERM: c_int = 13; -pub const USER_POSIX2_FORT_DEV: c_int = 14; -pub const USER_POSIX2_FORT_RUN: c_int = 15; -pub const USER_POSIX2_LOCALEDEF: c_int = 16; -pub const USER_POSIX2_SW_DEV: c_int = 17; -pub const USER_POSIX2_UPE: c_int = 18; -pub const USER_STREAM_MAX: c_int = 19; -pub const USER_TZNAME_MAX: c_int = 20; -pub const USER_MAXID: c_int = 21; -pub const CTL_DEBUG_NAME: c_int = 0; -pub const CTL_DEBUG_VALUE: c_int = 1; -pub const CTL_DEBUG_MAXID: c_int = 20; - -pub const PRIO_DARWIN_THREAD: c_int = 3; -pub const PRIO_DARWIN_PROCESS: c_int = 4; -pub const PRIO_DARWIN_BG: c_int = 0x1000; -pub const PRIO_DARWIN_NONUI: c_int = 0x1001; - -pub const SEM_FAILED: *mut sem_t = -1isize as *mut crate::sem_t; - -pub const AI_PASSIVE: c_int = 0x00000001; -pub const AI_CANONNAME: c_int = 0x00000002; -pub const AI_NUMERICHOST: c_int = 0x00000004; -pub const AI_NUMERICSERV: c_int = 0x00001000; -pub const AI_MASK: c_int = - AI_PASSIVE | AI_CANONNAME | AI_NUMERICHOST | AI_NUMERICSERV | AI_ADDRCONFIG; -pub const AI_ALL: c_int = 0x00000100; -pub const AI_V4MAPPED_CFG: c_int = 0x00000200; -pub const AI_ADDRCONFIG: c_int = 0x00000400; -pub const AI_V4MAPPED: c_int = 0x00000800; -pub const AI_DEFAULT: c_int = AI_V4MAPPED_CFG | AI_ADDRCONFIG; -pub const AI_UNUSABLE: c_int = 0x10000000; - -pub const SIGEV_NONE: c_int = 0; -pub const SIGEV_SIGNAL: c_int = 1; -pub const SIGEV_THREAD: c_int = 3; - -pub const AIO_CANCELED: c_int = 2; -pub const AIO_NOTCANCELED: c_int = 4; -pub const AIO_ALLDONE: c_int = 1; -#[deprecated( - since = "0.2.64", - note = "Can vary at runtime. Use sysconf(3) instead" -)] -pub const AIO_LISTIO_MAX: c_int = 16; -pub const LIO_NOP: c_int = 0; -pub const LIO_WRITE: c_int = 2; -pub const LIO_READ: c_int = 1; -pub const LIO_WAIT: c_int = 2; -pub const LIO_NOWAIT: c_int = 1; - -pub const WEXITED: c_int = 0x00000004; -pub const WSTOPPED: c_int = 0x00000008; -pub const WCONTINUED: c_int = 0x00000010; -pub const WNOWAIT: c_int = 0x00000020; - -pub const P_ALL: idtype_t = 0; -pub const P_PID: idtype_t = 1; -pub const P_PGID: idtype_t = 2; - -pub const UTIME_OMIT: c_long = -2; -pub const UTIME_NOW: c_long = -1; - -pub const XATTR_NOFOLLOW: c_int = 0x0001; -pub const XATTR_CREATE: c_int = 0x0002; -pub const XATTR_REPLACE: c_int = 0x0004; -pub const XATTR_NOSECURITY: c_int = 0x0008; -pub const XATTR_NODEFAULT: c_int = 0x0010; -pub const XATTR_SHOWCOMPRESSION: c_int = 0x0020; - -pub const NET_RT_IFLIST2: c_int = 0x0006; - -// net/route.h -pub const RTF_DELCLONE: c_int = 0x80; -pub const RTF_CLONING: c_int = 0x100; -pub const RTF_XRESOLVE: c_int = 0x200; -pub const RTF_LLINFO: c_int = 0x400; -pub const RTF_NOIFREF: c_int = 0x2000; -pub const RTF_PRCLONING: c_int = 0x10000; -pub const RTF_WASCLONED: c_int = 0x20000; -pub const RTF_PROTO3: c_int = 0x40000; -pub const RTF_PINNED: c_int = 0x100000; -pub const RTF_LOCAL: c_int = 0x200000; -pub const RTF_BROADCAST: c_int = 0x400000; -pub const RTF_MULTICAST: c_int = 0x800000; -pub const RTF_IFSCOPE: c_int = 0x1000000; -pub const RTF_CONDEMNED: c_int = 0x2000000; -pub const RTF_IFREF: c_int = 0x4000000; -pub const RTF_PROXY: c_int = 0x8000000; -pub const RTF_ROUTER: c_int = 0x10000000; -pub const RTF_DEAD: c_int = 0x20000000; -pub const RTF_GLOBAL: c_int = 0x40000000; - -pub const RTM_VERSION: c_int = 5; - -// Message types -pub const RTM_LOCK: c_int = 0x8; -pub const RTM_OLDADD: c_int = 0x9; -pub const RTM_OLDDEL: c_int = 0xa; -pub const RTM_RESOLVE: c_int = 0xb; -pub const RTM_NEWADDR: c_int = 0xc; -pub const RTM_DELADDR: c_int = 0xd; -pub const RTM_IFINFO: c_int = 0xe; -pub const RTM_NEWMADDR: c_int = 0xf; -pub const RTM_DELMADDR: c_int = 0x10; -pub const RTM_IFINFO2: c_int = 0x12; -pub const RTM_NEWMADDR2: c_int = 0x13; -pub const RTM_GET2: c_int = 0x14; - -// Bitmask values for rtm_inits and rmx_locks. -pub const RTV_MTU: c_int = 0x1; -pub const RTV_HOPCOUNT: c_int = 0x2; -pub const RTV_EXPIRE: c_int = 0x4; -pub const RTV_RPIPE: c_int = 0x8; -pub const RTV_SPIPE: c_int = 0x10; -pub const RTV_SSTHRESH: c_int = 0x20; -pub const RTV_RTT: c_int = 0x40; -pub const RTV_RTTVAR: c_int = 0x80; - -pub const RTAX_MAX: c_int = 8; - -pub const KERN_PROCARGS2: c_int = 49; - -pub const PROC_PIDTASKALLINFO: c_int = 2; -pub const PROC_PIDTBSDINFO: c_int = 3; -pub const PROC_PIDTASKINFO: c_int = 4; -pub const PROC_PIDTHREADINFO: c_int = 5; -pub const PROC_PIDVNODEPATHINFO: c_int = 9; -pub const PROC_PIDPATHINFO_MAXSIZE: c_int = 4096; - -pub const PROC_PIDLISTFDS: c_int = 1; -pub const PROC_PIDLISTFD_SIZE: c_int = size_of::() as c_int; -pub const PROX_FDTYPE_ATALK: c_int = 0; -pub const PROX_FDTYPE_VNODE: c_int = 1; -pub const PROX_FDTYPE_SOCKET: c_int = 2; -pub const PROX_FDTYPE_PSHM: c_int = 3; -pub const PROX_FDTYPE_PSEM: c_int = 4; -pub const PROX_FDTYPE_KQUEUE: c_int = 5; -pub const PROX_FDTYPE_PIPE: c_int = 6; -pub const PROX_FDTYPE_FSEVENTS: c_int = 7; -pub const PROX_FDTYPE_NETPOLICY: c_int = 9; -pub const PROX_FDTYPE_CHANNEL: c_int = 10; -pub const PROX_FDTYPE_NEXUS: c_int = 11; - -pub const PROC_CSM_ALL: c_uint = 0x0001; -pub const PROC_CSM_NOSMT: c_uint = 0x0002; -pub const PROC_CSM_TECS: c_uint = 0x0004; -pub const MAXCOMLEN: usize = 16; -pub const MAXTHREADNAMESIZE: usize = 64; - -pub const XUCRED_VERSION: c_uint = 0; - -pub const LC_SEGMENT: u32 = 0x1; -pub const LC_SEGMENT_64: u32 = 0x19; - -pub const MH_MAGIC: u32 = 0xfeedface; -pub const MH_MAGIC_64: u32 = 0xfeedfacf; - -// net/if_utun.h -pub const UTUN_OPT_FLAGS: c_int = 1; -pub const UTUN_OPT_IFNAME: c_int = 2; - -// net/bpf.h -pub const DLT_NULL: c_uint = 0; // no link-layer encapsulation -pub const DLT_EN10MB: c_uint = 1; // Ethernet (10Mb) -pub const DLT_EN3MB: c_uint = 2; // Experimental Ethernet (3Mb) -pub const DLT_AX25: c_uint = 3; // Amateur Radio AX.25 -pub const DLT_PRONET: c_uint = 4; // Proteon ProNET Token Ring -pub const DLT_CHAOS: c_uint = 5; // Chaos -pub const DLT_IEEE802: c_uint = 6; // IEEE 802 Networks -pub const DLT_ARCNET: c_uint = 7; // ARCNET -pub const DLT_SLIP: c_uint = 8; // Serial Line IP -pub const DLT_PPP: c_uint = 9; // Point-to-point Protocol -pub const DLT_FDDI: c_uint = 10; // FDDI -pub const DLT_ATM_RFC1483: c_uint = 11; // LLC/SNAP encapsulated atm -pub const DLT_RAW: c_uint = 12; // raw IP -pub const DLT_LOOP: c_uint = 108; - -// https://github.com/apple/darwin-xnu/blob/HEAD/bsd/net/bpf.h#L100 -// sizeof(i32) -pub const BPF_ALIGNMENT: c_int = 4; - -// sys/mount.h -pub const MNT_NODEV: c_int = 0x00000010; -pub const MNT_UNION: c_int = 0x00000020; -pub const MNT_CPROTECT: c_int = 0x00000080; - -// MAC labeled / "quarantined" flag -pub const MNT_QUARANTINE: c_int = 0x00000400; - -// Flags set by internal operations. -pub const MNT_LOCAL: c_int = 0x00001000; -pub const MNT_QUOTA: c_int = 0x00002000; -pub const MNT_ROOTFS: c_int = 0x00004000; -pub const MNT_DOVOLFS: c_int = 0x00008000; - -pub const MNT_DONTBROWSE: c_int = 0x00100000; -pub const MNT_IGNORE_OWNERSHIP: c_int = 0x00200000; -pub const MNT_AUTOMOUNTED: c_int = 0x00400000; -pub const MNT_JOURNALED: c_int = 0x00800000; -pub const MNT_NOUSERXATTR: c_int = 0x01000000; -pub const MNT_DEFWRITE: c_int = 0x02000000; -pub const MNT_MULTILABEL: c_int = 0x04000000; -pub const MNT_NOATIME: c_int = 0x10000000; -pub const MNT_SNAPSHOT: c_int = 0x40000000; - -// External filesystem command modifier flags. -pub const MNT_NOBLOCK: c_int = 0x00020000; - -// sys/spawn.h: -// DIFF(main): changed to `c_short` in f62eb023ab -pub const POSIX_SPAWN_RESETIDS: c_int = 0x0001; -pub const POSIX_SPAWN_SETPGROUP: c_int = 0x0002; -pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x0004; -pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x0008; -pub const POSIX_SPAWN_SETEXEC: c_int = 0x0040; -pub const POSIX_SPAWN_START_SUSPENDED: c_int = 0x0080; -pub const POSIX_SPAWN_CLOEXEC_DEFAULT: c_int = 0x4000; - -// sys/ipc.h: -pub const IPC_CREAT: c_int = 0x200; -pub const IPC_EXCL: c_int = 0x400; -pub const IPC_NOWAIT: c_int = 0x800; -pub const IPC_PRIVATE: key_t = 0; - -pub const IPC_RMID: c_int = 0; -pub const IPC_SET: c_int = 1; -pub const IPC_STAT: c_int = 2; - -pub const IPC_R: c_int = 0x100; -pub const IPC_W: c_int = 0x80; -pub const IPC_M: c_int = 0x1000; - -// sys/sem.h -pub const SEM_UNDO: c_int = 0o10000; - -pub const GETNCNT: c_int = 3; -pub const GETPID: c_int = 4; -pub const GETVAL: c_int = 5; -pub const GETALL: c_int = 6; -pub const GETZCNT: c_int = 7; -pub const SETVAL: c_int = 8; -pub const SETALL: c_int = 9; - -// sys/shm.h -pub const SHM_RDONLY: c_int = 0x1000; -pub const SHM_RND: c_int = 0x2000; -#[cfg(target_arch = "aarch64")] -pub const SHMLBA: c_int = 16 * 1024; -#[cfg(not(target_arch = "aarch64"))] -pub const SHMLBA: c_int = 4096; -pub const SHM_R: c_int = IPC_R; -pub const SHM_W: c_int = IPC_W; - -// Flags for chflags(2) -pub const UF_SETTABLE: c_uint = 0x0000ffff; -pub const UF_NODUMP: c_uint = 0x00000001; -pub const UF_IMMUTABLE: c_uint = 0x00000002; -pub const UF_APPEND: c_uint = 0x00000004; -pub const UF_OPAQUE: c_uint = 0x00000008; -pub const UF_COMPRESSED: c_uint = 0x00000020; -pub const UF_TRACKED: c_uint = 0x00000040; -pub const SF_SETTABLE: c_uint = 0xffff0000; -pub const SF_ARCHIVED: c_uint = 0x00010000; -pub const SF_IMMUTABLE: c_uint = 0x00020000; -pub const SF_APPEND: c_uint = 0x00040000; -pub const UF_HIDDEN: c_uint = 0x00008000; - -// -pub const NTP_API: c_int = 4; -pub const MAXPHASE: c_long = 500000000; -pub const MAXFREQ: c_long = 500000; -pub const MINSEC: c_int = 256; -pub const MAXSEC: c_int = 2048; -pub const NANOSECOND: c_long = 1000000000; -pub const SCALE_PPM: c_int = 65; -pub const MAXTC: c_int = 10; -pub const MOD_OFFSET: c_uint = 0x0001; -pub const MOD_FREQUENCY: c_uint = 0x0002; -pub const MOD_MAXERROR: c_uint = 0x0004; -pub const MOD_ESTERROR: c_uint = 0x0008; -pub const MOD_STATUS: c_uint = 0x0010; -pub const MOD_TIMECONST: c_uint = 0x0020; -pub const MOD_PPSMAX: c_uint = 0x0040; -pub const MOD_TAI: c_uint = 0x0080; -pub const MOD_MICRO: c_uint = 0x1000; -pub const MOD_NANO: c_uint = 0x2000; -pub const MOD_CLKB: c_uint = 0x4000; -pub const MOD_CLKA: c_uint = 0x8000; -pub const STA_PLL: c_int = 0x0001; -pub const STA_PPSFREQ: c_int = 0x0002; -pub const STA_PPSTIME: c_int = 0x0004; -pub const STA_FLL: c_int = 0x0008; -pub const STA_INS: c_int = 0x0010; -pub const STA_DEL: c_int = 0x0020; -pub const STA_UNSYNC: c_int = 0x0040; -pub const STA_FREQHOLD: c_int = 0x0080; -pub const STA_PPSSIGNAL: c_int = 0x0100; -pub const STA_PPSJITTER: c_int = 0x0200; -pub const STA_PPSWANDER: c_int = 0x0400; -pub const STA_PPSERROR: c_int = 0x0800; -pub const STA_CLOCKERR: c_int = 0x1000; -pub const STA_NANO: c_int = 0x2000; -pub const STA_MODE: c_int = 0x4000; -pub const STA_CLK: c_int = 0x8000; -pub const STA_RONLY: c_int = STA_PPSSIGNAL - | STA_PPSJITTER - | STA_PPSWANDER - | STA_PPSERROR - | STA_CLOCKERR - | STA_NANO - | STA_MODE - | STA_CLK; -pub const TIME_OK: c_int = 0; -pub const TIME_INS: c_int = 1; -pub const TIME_DEL: c_int = 2; -pub const TIME_OOP: c_int = 3; -pub const TIME_WAIT: c_int = 4; -pub const TIME_ERROR: c_int = 5; - -// -pub const MNT_WAIT: c_int = 1; -pub const MNT_NOWAIT: c_int = 2; - -// -pub const THREAD_STANDARD_POLICY: c_int = 1; -pub const THREAD_STANDARD_POLICY_COUNT: c_int = 0; -pub const THREAD_EXTENDED_POLICY: c_int = 1; -pub const THREAD_TIME_CONSTRAINT_POLICY: c_int = 2; -pub const THREAD_PRECEDENCE_POLICY: c_int = 3; -pub const THREAD_AFFINITY_POLICY: c_int = 4; -pub const THREAD_AFFINITY_TAG_NULL: c_int = 0; -pub const THREAD_BACKGROUND_POLICY: c_int = 5; -pub const THREAD_BACKGROUND_POLICY_DARWIN_BG: c_int = 0x1000; -pub const THREAD_LATENCY_QOS_POLICY: c_int = 7; -pub const THREAD_THROUGHPUT_QOS_POLICY: c_int = 8; - -// -pub const TH_STATE_RUNNING: c_int = 1; -pub const TH_STATE_STOPPED: c_int = 2; -pub const TH_STATE_WAITING: c_int = 3; -pub const TH_STATE_UNINTERRUPTIBLE: c_int = 4; -pub const TH_STATE_HALTED: c_int = 5; -pub const TH_FLAGS_SWAPPED: c_int = 0x1; -pub const TH_FLAGS_IDLE: c_int = 0x2; -pub const TH_FLAGS_GLOBAL_FORCED_IDLE: c_int = 0x4; -pub const THREAD_BASIC_INFO: c_int = 3; -pub const THREAD_IDENTIFIER_INFO: c_int = 4; -pub const THREAD_EXTENDED_INFO: c_int = 5; - -// CommonCrypto/CommonCryptoError.h -pub const kCCSuccess: i32 = 0; -pub const kCCParamError: i32 = -4300; -pub const kCCBufferTooSmall: i32 = -4301; -pub const kCCMemoryFailure: i32 = -4302; -pub const kCCAlignmentError: i32 = -4303; -pub const kCCDecodeError: i32 = -4304; -pub const kCCUnimplemented: i32 = -4305; -pub const kCCOverflow: i32 = -4306; -pub const kCCRNGFailure: i32 = -4307; -pub const kCCUnspecifiedError: i32 = -4308; -pub const kCCCallSequenceError: i32 = -4309; -pub const kCCKeySizeError: i32 = -4310; -pub const kCCInvalidKey: i32 = -4311; - -// mach/host_info.h -pub const HOST_LOAD_INFO: i32 = 1; -pub const HOST_VM_INFO: i32 = 2; -pub const HOST_CPU_LOAD_INFO: i32 = 3; -pub const HOST_VM_INFO64: i32 = 4; -pub const HOST_EXTMOD_INFO64: i32 = 5; -pub const HOST_EXPIRED_TASK_INFO: i32 = 6; - -// mach/vm_statistics.h -pub const VM_PAGE_QUERY_PAGE_PRESENT: i32 = 0x1; -pub const VM_PAGE_QUERY_PAGE_FICTITIOUS: i32 = 0x2; -pub const VM_PAGE_QUERY_PAGE_REF: i32 = 0x4; -pub const VM_PAGE_QUERY_PAGE_DIRTY: i32 = 0x8; -pub const VM_PAGE_QUERY_PAGE_PAGED_OUT: i32 = 0x10; -pub const VM_PAGE_QUERY_PAGE_COPIED: i32 = 0x20; -pub const VM_PAGE_QUERY_PAGE_SPECULATIVE: i32 = 0x40; -pub const VM_PAGE_QUERY_PAGE_EXTERNAL: i32 = 0x80; -pub const VM_PAGE_QUERY_PAGE_CS_VALIDATED: i32 = 0x100; -pub const VM_PAGE_QUERY_PAGE_CS_TAINTED: i32 = 0x200; -pub const VM_PAGE_QUERY_PAGE_CS_NX: i32 = 0x400; - -// mach/task_info.h -pub const TASK_THREAD_TIMES_INFO: u32 = 3; -pub const HOST_CPU_LOAD_INFO_COUNT: u32 = 4; -pub const MACH_TASK_BASIC_INFO: u32 = 20; - -pub const MACH_PORT_NULL: i32 = 0; - -pub const RUSAGE_INFO_V0: c_int = 0; -pub const RUSAGE_INFO_V1: c_int = 1; -pub const RUSAGE_INFO_V2: c_int = 2; -pub const RUSAGE_INFO_V3: c_int = 3; -pub const RUSAGE_INFO_V4: c_int = 4; - -// copyfile.h -pub const COPYFILE_ACL: crate::copyfile_flags_t = 1 << 0; -pub const COPYFILE_STAT: crate::copyfile_flags_t = 1 << 1; -pub const COPYFILE_XATTR: crate::copyfile_flags_t = 1 << 2; -pub const COPYFILE_DATA: crate::copyfile_flags_t = 1 << 3; -pub const COPYFILE_SECURITY: crate::copyfile_flags_t = COPYFILE_STAT | COPYFILE_ACL; -pub const COPYFILE_METADATA: crate::copyfile_flags_t = COPYFILE_SECURITY | COPYFILE_XATTR; -pub const COPYFILE_RECURSIVE: crate::copyfile_flags_t = 1 << 15; -pub const COPYFILE_CHECK: crate::copyfile_flags_t = 1 << 16; -pub const COPYFILE_EXCL: crate::copyfile_flags_t = 1 << 17; -pub const COPYFILE_NOFOLLOW_SRC: crate::copyfile_flags_t = 1 << 18; -pub const COPYFILE_NOFOLLOW_DST: crate::copyfile_flags_t = 1 << 19; -pub const COPYFILE_MOVE: crate::copyfile_flags_t = 1 << 20; -pub const COPYFILE_UNLINK: crate::copyfile_flags_t = 1 << 21; -pub const COPYFILE_NOFOLLOW: crate::copyfile_flags_t = - COPYFILE_NOFOLLOW_SRC | COPYFILE_NOFOLLOW_DST; -pub const COPYFILE_PACK: crate::copyfile_flags_t = 1 << 22; -pub const COPYFILE_UNPACK: crate::copyfile_flags_t = 1 << 23; -pub const COPYFILE_CLONE: crate::copyfile_flags_t = 1 << 24; -pub const COPYFILE_CLONE_FORCE: crate::copyfile_flags_t = 1 << 25; -pub const COPYFILE_RUN_IN_PLACE: crate::copyfile_flags_t = 1 << 26; -pub const COPYFILE_DATA_SPARSE: crate::copyfile_flags_t = 1 << 27; -pub const COPYFILE_PRESERVE_DST_TRACKED: crate::copyfile_flags_t = 1 << 28; -pub const COPYFILE_VERBOSE: crate::copyfile_flags_t = 1 << 30; -pub const COPYFILE_RECURSE_ERROR: c_int = 0; -pub const COPYFILE_RECURSE_FILE: c_int = 1; -pub const COPYFILE_RECURSE_DIR: c_int = 2; -pub const COPYFILE_RECURSE_DIR_CLEANUP: c_int = 3; -pub const COPYFILE_COPY_DATA: c_int = 4; -pub const COPYFILE_COPY_XATTR: c_int = 5; -pub const COPYFILE_START: c_int = 1; -pub const COPYFILE_FINISH: c_int = 2; -pub const COPYFILE_ERR: c_int = 3; -pub const COPYFILE_PROGRESS: c_int = 4; -pub const COPYFILE_CONTINUE: c_int = 0; -pub const COPYFILE_SKIP: c_int = 1; -pub const COPYFILE_QUIT: c_int = 2; -pub const COPYFILE_STATE_SRC_FD: c_int = 1; -pub const COPYFILE_STATE_SRC_FILENAME: c_int = 2; -pub const COPYFILE_STATE_DST_FD: c_int = 3; -pub const COPYFILE_STATE_DST_FILENAME: c_int = 4; -pub const COPYFILE_STATE_QUARANTINE: c_int = 5; -pub const COPYFILE_STATE_STATUS_CB: c_int = 6; -pub const COPYFILE_STATE_STATUS_CTX: c_int = 7; -pub const COPYFILE_STATE_COPIED: c_int = 8; -pub const COPYFILE_STATE_XATTRNAME: c_int = 9; -pub const COPYFILE_STATE_WAS_CLONED: c_int = 10; -pub const COPYFILE_STATE_SRC_BSIZE: c_int = 11; -pub const COPYFILE_STATE_DST_BSIZE: c_int = 12; -pub const COPYFILE_STATE_BSIZE: c_int = 13; - -// -pub const ATTR_BIT_MAP_COUNT: c_ushort = 5; -pub const FSOPT_NOFOLLOW: u32 = 0x1; -pub const FSOPT_NOFOLLOW_ANY: u32 = 0x800; -pub const FSOPT_REPORT_FULLSIZE: u32 = 0x4; -pub const FSOPT_PACK_INVAL_ATTRS: u32 = 0x8; -pub const FSOPT_ATTR_CMN_EXTENDED: u32 = 0x20; -pub const FSOPT_RETURN_REALDEV: u32 = 0x200; -pub const ATTR_CMN_NAME: attrgroup_t = 0x00000001; -pub const ATTR_CMN_DEVID: attrgroup_t = 0x00000002; -pub const ATTR_CMN_FSID: attrgroup_t = 0x00000004; -pub const ATTR_CMN_OBJTYPE: attrgroup_t = 0x00000008; -pub const ATTR_CMN_OBJTAG: attrgroup_t = 0x00000010; -pub const ATTR_CMN_OBJID: attrgroup_t = 0x00000020; -pub const ATTR_CMN_OBJPERMANENTID: attrgroup_t = 0x00000040; -pub const ATTR_CMN_PAROBJID: attrgroup_t = 0x00000080; -pub const ATTR_CMN_SCRIPT: attrgroup_t = 0x00000100; -pub const ATTR_CMN_CRTIME: attrgroup_t = 0x00000200; -pub const ATTR_CMN_MODTIME: attrgroup_t = 0x00000400; -pub const ATTR_CMN_CHGTIME: attrgroup_t = 0x00000800; -pub const ATTR_CMN_ACCTIME: attrgroup_t = 0x00001000; -pub const ATTR_CMN_BKUPTIME: attrgroup_t = 0x00002000; -pub const ATTR_CMN_FNDRINFO: attrgroup_t = 0x00004000; -pub const ATTR_CMN_OWNERID: attrgroup_t = 0x00008000; -pub const ATTR_CMN_GRPID: attrgroup_t = 0x00010000; -pub const ATTR_CMN_ACCESSMASK: attrgroup_t = 0x00020000; -pub const ATTR_CMN_FLAGS: attrgroup_t = 0x00040000; -pub const ATTR_CMN_GEN_COUNT: attrgroup_t = 0x00080000; -pub const ATTR_CMN_DOCUMENT_ID: attrgroup_t = 0x00100000; -pub const ATTR_CMN_USERACCESS: attrgroup_t = 0x00200000; -pub const ATTR_CMN_EXTENDED_SECURITY: attrgroup_t = 0x00400000; -pub const ATTR_CMN_UUID: attrgroup_t = 0x00800000; -pub const ATTR_CMN_GRPUUID: attrgroup_t = 0x01000000; -pub const ATTR_CMN_FILEID: attrgroup_t = 0x02000000; -pub const ATTR_CMN_PARENTID: attrgroup_t = 0x04000000; -pub const ATTR_CMN_FULLPATH: attrgroup_t = 0x08000000; -pub const ATTR_CMN_ADDEDTIME: attrgroup_t = 0x10000000; -pub const ATTR_CMN_DATA_PROTECT_FLAGS: attrgroup_t = 0x40000000; -pub const ATTR_CMN_RETURNED_ATTRS: attrgroup_t = 0x80000000; -pub const ATTR_VOL_FSTYPE: attrgroup_t = 0x00000001; -pub const ATTR_VOL_SIGNATURE: attrgroup_t = 0x00000002; -pub const ATTR_VOL_SIZE: attrgroup_t = 0x00000004; -pub const ATTR_VOL_SPACEFREE: attrgroup_t = 0x00000008; -pub const ATTR_VOL_SPACEAVAIL: attrgroup_t = 0x00000010; -pub const ATTR_VOL_MINALLOCATION: attrgroup_t = 0x00000020; -pub const ATTR_VOL_ALLOCATIONCLUMP: attrgroup_t = 0x00000040; -pub const ATTR_VOL_IOBLOCKSIZE: attrgroup_t = 0x00000080; -pub const ATTR_VOL_OBJCOUNT: attrgroup_t = 0x00000100; -pub const ATTR_VOL_FILECOUNT: attrgroup_t = 0x00000200; -pub const ATTR_VOL_DIRCOUNT: attrgroup_t = 0x00000400; -pub const ATTR_VOL_MAXOBJCOUNT: attrgroup_t = 0x00000800; -pub const ATTR_VOL_MOUNTPOINT: attrgroup_t = 0x00001000; -pub const ATTR_VOL_NAME: attrgroup_t = 0x00002000; -pub const ATTR_VOL_MOUNTFLAGS: attrgroup_t = 0x00004000; -pub const ATTR_VOL_MOUNTEDDEVICE: attrgroup_t = 0x00008000; -pub const ATTR_VOL_ENCODINGSUSED: attrgroup_t = 0x00010000; -pub const ATTR_VOL_CAPABILITIES: attrgroup_t = 0x00020000; -pub const ATTR_VOL_UUID: attrgroup_t = 0x00040000; -pub const ATTR_VOL_SPACEUSED: attrgroup_t = 0x00800000; -pub const ATTR_VOL_QUOTA_SIZE: attrgroup_t = 0x10000000; -pub const ATTR_VOL_RESERVED_SIZE: attrgroup_t = 0x20000000; -pub const ATTR_VOL_ATTRIBUTES: attrgroup_t = 0x40000000; -pub const ATTR_VOL_INFO: attrgroup_t = 0x80000000; -pub const ATTR_DIR_LINKCOUNT: attrgroup_t = 0x00000001; -pub const ATTR_DIR_ENTRYCOUNT: attrgroup_t = 0x00000002; -pub const ATTR_DIR_MOUNTSTATUS: attrgroup_t = 0x00000004; -pub const ATTR_DIR_ALLOCSIZE: attrgroup_t = 0x00000008; -pub const ATTR_DIR_IOBLOCKSIZE: attrgroup_t = 0x00000010; -pub const ATTR_DIR_DATALENGTH: attrgroup_t = 0x00000020; -pub const ATTR_FILE_LINKCOUNT: attrgroup_t = 0x00000001; -pub const ATTR_FILE_TOTALSIZE: attrgroup_t = 0x00000002; -pub const ATTR_FILE_ALLOCSIZE: attrgroup_t = 0x00000004; -pub const ATTR_FILE_IOBLOCKSIZE: attrgroup_t = 0x00000008; -pub const ATTR_FILE_DEVTYPE: attrgroup_t = 0x00000020; -pub const ATTR_FILE_FORKCOUNT: attrgroup_t = 0x00000080; -pub const ATTR_FILE_FORKLIST: attrgroup_t = 0x00000100; -pub const ATTR_FILE_DATALENGTH: attrgroup_t = 0x00000200; -pub const ATTR_FILE_DATAALLOCSIZE: attrgroup_t = 0x00000400; -pub const ATTR_FILE_RSRCLENGTH: attrgroup_t = 0x00001000; -pub const ATTR_FILE_RSRCALLOCSIZE: attrgroup_t = 0x00002000; -pub const ATTR_CMNEXT_RELPATH: attrgroup_t = 0x00000004; -pub const ATTR_CMNEXT_PRIVATESIZE: attrgroup_t = 0x00000008; -pub const ATTR_CMNEXT_LINKID: attrgroup_t = 0x00000010; -pub const ATTR_CMNEXT_NOFIRMLINKPATH: attrgroup_t = 0x00000020; -pub const ATTR_CMNEXT_REALDEVID: attrgroup_t = 0x00000040; -pub const ATTR_CMNEXT_REALFSID: attrgroup_t = 0x00000080; -pub const ATTR_CMNEXT_CLONEID: attrgroup_t = 0x00000100; -pub const ATTR_CMNEXT_EXT_FLAGS: attrgroup_t = 0x00000200; -pub const ATTR_CMNEXT_RECURSIVE_GENCOUNT: attrgroup_t = 0x00000400; -pub const DIR_MNTSTATUS_MNTPOINT: u32 = 0x1; -pub const VOL_CAPABILITIES_FORMAT: usize = 0; -pub const VOL_CAPABILITIES_INTERFACES: usize = 1; -pub const VOL_CAP_FMT_PERSISTENTOBJECTIDS: attrgroup_t = 0x00000001; -pub const VOL_CAP_FMT_SYMBOLICLINKS: attrgroup_t = 0x00000002; -pub const VOL_CAP_FMT_HARDLINKS: attrgroup_t = 0x00000004; -pub const VOL_CAP_FMT_JOURNAL: attrgroup_t = 0x00000008; -pub const VOL_CAP_FMT_JOURNAL_ACTIVE: attrgroup_t = 0x00000010; -pub const VOL_CAP_FMT_NO_ROOT_TIMES: attrgroup_t = 0x00000020; -pub const VOL_CAP_FMT_SPARSE_FILES: attrgroup_t = 0x00000040; -pub const VOL_CAP_FMT_ZERO_RUNS: attrgroup_t = 0x00000080; -pub const VOL_CAP_FMT_CASE_SENSITIVE: attrgroup_t = 0x00000100; -pub const VOL_CAP_FMT_CASE_PRESERVING: attrgroup_t = 0x00000200; -pub const VOL_CAP_FMT_FAST_STATFS: attrgroup_t = 0x00000400; -pub const VOL_CAP_FMT_2TB_FILESIZE: attrgroup_t = 0x00000800; -pub const VOL_CAP_FMT_OPENDENYMODES: attrgroup_t = 0x00001000; -pub const VOL_CAP_FMT_HIDDEN_FILES: attrgroup_t = 0x00002000; -pub const VOL_CAP_FMT_PATH_FROM_ID: attrgroup_t = 0x00004000; -pub const VOL_CAP_FMT_NO_VOLUME_SIZES: attrgroup_t = 0x00008000; -pub const VOL_CAP_FMT_DECMPFS_COMPRESSION: attrgroup_t = 0x00010000; -pub const VOL_CAP_FMT_64BIT_OBJECT_IDS: attrgroup_t = 0x00020000; -pub const VOL_CAP_FMT_DIR_HARDLINKS: attrgroup_t = 0x00040000; -pub const VOL_CAP_FMT_DOCUMENT_ID: attrgroup_t = 0x00080000; -pub const VOL_CAP_FMT_WRITE_GENERATION_COUNT: attrgroup_t = 0x00100000; -pub const VOL_CAP_FMT_NO_IMMUTABLE_FILES: attrgroup_t = 0x00200000; -pub const VOL_CAP_FMT_NO_PERMISSIONS: attrgroup_t = 0x00400000; -pub const VOL_CAP_FMT_SHARED_SPACE: attrgroup_t = 0x00800000; -pub const VOL_CAP_FMT_VOL_GROUPS: attrgroup_t = 0x01000000; -pub const VOL_CAP_FMT_SEALED: attrgroup_t = 0x02000000; -pub const VOL_CAP_INT_SEARCHFS: attrgroup_t = 0x00000001; -pub const VOL_CAP_INT_ATTRLIST: attrgroup_t = 0x00000002; -pub const VOL_CAP_INT_NFSEXPORT: attrgroup_t = 0x00000004; -pub const VOL_CAP_INT_READDIRATTR: attrgroup_t = 0x00000008; -pub const VOL_CAP_INT_EXCHANGEDATA: attrgroup_t = 0x00000010; -pub const VOL_CAP_INT_COPYFILE: attrgroup_t = 0x00000020; -pub const VOL_CAP_INT_ALLOCATE: attrgroup_t = 0x00000040; -pub const VOL_CAP_INT_VOL_RENAME: attrgroup_t = 0x00000080; -pub const VOL_CAP_INT_ADVLOCK: attrgroup_t = 0x00000100; -pub const VOL_CAP_INT_FLOCK: attrgroup_t = 0x00000200; -pub const VOL_CAP_INT_EXTENDED_SECURITY: attrgroup_t = 0x00000400; -pub const VOL_CAP_INT_USERACCESS: attrgroup_t = 0x00000800; -pub const VOL_CAP_INT_MANLOCK: attrgroup_t = 0x00001000; -pub const VOL_CAP_INT_NAMEDSTREAMS: attrgroup_t = 0x00002000; -pub const VOL_CAP_INT_EXTENDED_ATTR: attrgroup_t = 0x00004000; -pub const VOL_CAP_INT_CLONE: attrgroup_t = 0x00010000; -pub const VOL_CAP_INT_SNAPSHOT: attrgroup_t = 0x00020000; -pub const VOL_CAP_INT_RENAME_SWAP: attrgroup_t = 0x00040000; -pub const VOL_CAP_INT_RENAME_EXCL: attrgroup_t = 0x00080000; -pub const VOL_CAP_INT_RENAME_OPENFAIL: attrgroup_t = 0x00100000; - -// os/clock.h -pub const OS_CLOCK_MACH_ABSOLUTE_TIME: os_clockid_t = 32; - -// os/os_sync_wait_on_address.h -pub const OS_SYNC_WAIT_ON_ADDRESS_NONE: os_sync_wait_on_address_flags_t = 0x00000000; -pub const OS_SYNC_WAIT_ON_ADDRESS_SHARED: os_sync_wait_on_address_flags_t = 0x00000001; -pub const OS_SYNC_WAKE_BY_ADDRESS_NONE: os_sync_wake_by_address_flags_t = 0x00000000; -pub const OS_SYNC_WAKE_BY_ADDRESS_SHARED: os_sync_wake_by_address_flags_t = 0x00000001; - -// -/// Process being created by fork. -pub const SIDL: u32 = 1; -/// Currently runnable. -pub const SRUN: u32 = 2; -/// Sleeping on an address. -pub const SSLEEP: u32 = 3; -/// Process debugging or suspension. -pub const SSTOP: u32 = 4; -/// Awaiting collection by parent. -pub const SZOMB: u32 = 5; - -// sys/vsock.h -pub const VMADDR_CID_ANY: c_uint = 0xFFFFFFFF; -pub const VMADDR_CID_HYPERVISOR: c_uint = 0; -pub const VMADDR_CID_RESERVED: c_uint = 1; -pub const VMADDR_CID_HOST: c_uint = 2; -pub const VMADDR_PORT_ANY: c_uint = 0xFFFFFFFF; - -const fn __DARWIN_ALIGN32(p: usize) -> usize { - const __DARWIN_ALIGNBYTES32: usize = size_of::() - 1; - (p + __DARWIN_ALIGNBYTES32) & !__DARWIN_ALIGNBYTES32 -} - -pub const THREAD_EXTENDED_POLICY_COUNT: mach_msg_type_number_t = - (size_of::() / size_of::()) as mach_msg_type_number_t; -pub const THREAD_TIME_CONSTRAINT_POLICY_COUNT: mach_msg_type_number_t = - (size_of::() / size_of::()) - as mach_msg_type_number_t; -pub const THREAD_PRECEDENCE_POLICY_COUNT: mach_msg_type_number_t = - (size_of::() / size_of::()) - as mach_msg_type_number_t; -pub const THREAD_AFFINITY_POLICY_COUNT: mach_msg_type_number_t = - (size_of::() / size_of::()) as mach_msg_type_number_t; -pub const THREAD_BACKGROUND_POLICY_COUNT: mach_msg_type_number_t = - (size_of::() / size_of::()) - as mach_msg_type_number_t; -pub const THREAD_LATENCY_QOS_POLICY_COUNT: mach_msg_type_number_t = - (size_of::() / size_of::()) - as mach_msg_type_number_t; -pub const THREAD_THROUGHPUT_QOS_POLICY_COUNT: mach_msg_type_number_t = - (size_of::() / size_of::()) - as mach_msg_type_number_t; -pub const THREAD_BASIC_INFO_COUNT: mach_msg_type_number_t = - (size_of::() / size_of::()) as mach_msg_type_number_t; -pub const THREAD_IDENTIFIER_INFO_COUNT: mach_msg_type_number_t = - (size_of::() / size_of::()) as mach_msg_type_number_t; -pub const THREAD_EXTENDED_INFO_COUNT: mach_msg_type_number_t = - (size_of::() / size_of::()) as mach_msg_type_number_t; - -pub const TASK_THREAD_TIMES_INFO_COUNT: u32 = - (size_of::() / size_of::()) as u32; -pub const MACH_TASK_BASIC_INFO_COUNT: u32 = - (size_of::() / size_of::()) as u32; -pub const HOST_VM_INFO64_COUNT: mach_msg_type_number_t = - (size_of::() / size_of::()) as mach_msg_type_number_t; - -// bsd/net/if_mib.h -/// Non-interface-specific -pub const IFMIB_SYSTEM: c_int = 1; -/// Per-interface data table -pub const IFMIB_IFDATA: c_int = 2; -/// All interfaces data at once -pub const IFMIB_IFALLDATA: c_int = 3; - -/// Generic stats for all kinds of ifaces -pub const IFDATA_GENERAL: c_int = 1; -/// Specific to the type of interface -pub const IFDATA_LINKSPECIFIC: c_int = 2; -/// Addresses assigned to interface -pub const IFDATA_ADDRS: c_int = 3; -/// Multicast addresses assigned to interface -pub const IFDATA_MULTIADDRS: c_int = 4; - -/// Number of interfaces configured -pub const IFMIB_IFCOUNT: c_int = 1; - -/// Functions not specific to a type of iface -pub const NETLINK_GENERIC: c_int = 0; - -pub const DOT3COMPLIANCE_STATS: c_int = 1; -pub const DOT3COMPLIANCE_COLLS: c_int = 2; - -// kern_control.h -pub const MAX_KCTL_NAME: usize = 96; - -f! { - pub fn CMSG_NXTHDR(mhdr: *const crate::msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - if cmsg.is_null() { - return crate::CMSG_FIRSTHDR(mhdr); - } - let cmsg_len = (*cmsg).cmsg_len as usize; - let next = cmsg as usize + __DARWIN_ALIGN32(cmsg_len); - let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; - if next + __DARWIN_ALIGN32(size_of::()) > max { - core::ptr::null_mut() - } else { - next as *mut cmsghdr - } - } - - pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { - (cmsg as *mut c_uchar).add(__DARWIN_ALIGN32(size_of::())) - } - - pub const fn CMSG_SPACE(length: c_uint) -> c_uint { - (__DARWIN_ALIGN32(size_of::()) + __DARWIN_ALIGN32(length as usize)) as c_uint - } - - pub const fn CMSG_LEN(length: c_uint) -> c_uint { - (__DARWIN_ALIGN32(size_of::()) + length as usize) as c_uint - } - - pub const fn VM_MAKE_TAG(id: u8) -> u32 { - (id as u32) << 24u32 - } -} - -safe_f! { - pub const fn WSTOPSIG(status: c_int) -> c_int { - status >> 8 - } - - pub const fn _WSTATUS(status: c_int) -> c_int { - status & 0x7f - } - - pub const fn WIFCONTINUED(status: c_int) -> bool { - _WSTATUS(status) == _WSTOPPED && WSTOPSIG(status) == 0x13 - } - - pub const fn WIFSIGNALED(status: c_int) -> bool { - _WSTATUS(status) != _WSTOPPED && _WSTATUS(status) != 0 - } - - pub const fn WIFSTOPPED(status: c_int) -> bool { - _WSTATUS(status) == _WSTOPPED && WSTOPSIG(status) != 0x13 - } - - pub const fn makedev(major: i32, minor: i32) -> dev_t { - (major << 24) | minor - } - - pub const fn major(dev: dev_t) -> i32 { - (dev >> 24) & 0xff - } - - pub const fn minor(dev: dev_t) -> i32 { - dev & 0xffffff - } -} - -extern "C" { - pub fn setgrent(); - #[doc(hidden)] - #[deprecated(since = "0.2.49", note = "Deprecated in MacOSX 10.5")] - #[cfg_attr(not(target_arch = "aarch64"), link_name = "daemon$1050")] - pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; - #[doc(hidden)] - #[deprecated(since = "0.2.49", note = "Deprecated in MacOSX 10.10")] - pub fn sem_destroy(sem: *mut sem_t) -> c_int; - #[doc(hidden)] - #[deprecated(since = "0.2.49", note = "Deprecated in MacOSX 10.10")] - pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; - pub fn aio_read(aiocbp: *mut aiocb) -> c_int; - pub fn aio_write(aiocbp: *mut aiocb) -> c_int; - pub fn aio_fsync(op: c_int, aiocbp: *mut aiocb) -> c_int; - pub fn aio_error(aiocbp: *const aiocb) -> c_int; - pub fn aio_return(aiocbp: *mut aiocb) -> ssize_t; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "aio_suspend$UNIX2003" - )] - pub fn aio_suspend( - aiocb_list: *const *const aiocb, - nitems: c_int, - timeout: *const crate::timespec, - ) -> c_int; - pub fn aio_cancel(fd: c_int, aiocbp: *mut aiocb) -> c_int; - pub fn chflags(path: *const c_char, flags: c_uint) -> c_int; - pub fn fchflags(fd: c_int, flags: c_uint) -> c_int; - pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn lio_listio( - mode: c_int, - aiocb_list: *const *mut aiocb, - nitems: c_int, - sevp: *mut sigevent, - ) -> c_int; - - pub fn dirfd(dirp: *mut crate::DIR) -> c_int; - - pub fn lutimes(file: *const c_char, times: *const crate::timeval) -> c_int; - - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; - pub fn getutxent() -> *mut utmpx; - pub fn getutxid(ut: *const utmpx) -> *mut utmpx; - pub fn getutxline(ut: *const utmpx) -> *mut utmpx; - pub fn pututxline(ut: *const utmpx) -> *mut utmpx; - pub fn setutxent(); - pub fn endutxent(); - pub fn utmpxname(file: *const c_char) -> c_int; - - pub fn asctime(tm: *const crate::tm) -> *mut c_char; - pub fn ctime(clock: *const time_t) -> *mut c_char; - pub fn getdate(datestr: *const c_char) -> *mut crate::tm; - pub fn strptime( - buf: *const c_char, - format: *const c_char, - timeptr: *mut crate::tm, - ) -> *mut c_char; - pub fn asctime_r(tm: *const crate::tm, result: *mut c_char) -> *mut c_char; - pub fn ctime_r(clock: *const time_t, result: *mut c_char) -> *mut c_char; - - pub fn getnameinfo( - sa: *const crate::sockaddr, - salen: crate::socklen_t, - host: *mut c_char, - hostlen: crate::socklen_t, - serv: *mut c_char, - servlen: crate::socklen_t, - flags: c_int, - ) -> c_int; - pub fn mincore(addr: *const c_void, len: size_t, vec: *mut c_char) -> c_int; - pub fn sysctlnametomib(name: *const c_char, mibp: *mut c_int, sizep: *mut size_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "mprotect$UNIX2003" - )] - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn semget(key: key_t, nsems: c_int, semflg: c_int) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "semctl$UNIX2003" - )] - pub fn semctl(semid: c_int, semnum: c_int, cmd: c_int, ...) -> c_int; - pub fn semop(semid: c_int, sops: *mut sembuf, nsops: size_t) -> c_int; - pub fn shm_open(name: *const c_char, oflag: c_int, ...) -> c_int; - pub fn ftok(pathname: *const c_char, proj_id: c_int) -> key_t; - pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; - pub fn shmdt(shmaddr: *const c_void) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "shmctl$UNIX2003" - )] - pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; - pub fn shmget(key: key_t, size: size_t, shmflg: c_int) -> c_int; - pub fn sysctl( - name: *mut c_int, - namelen: c_uint, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *mut c_void, - newlen: size_t, - ) -> c_int; - pub fn sysctlbyname( - name: *const c_char, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *mut c_void, - newlen: size_t, - ) -> c_int; - #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] - pub fn mach_absolute_time() -> u64; - #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] - #[allow(deprecated)] - pub fn mach_timebase_info(info: *mut crate::mach_timebase_info) -> c_int; - #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] - pub fn mach_host_self() -> mach_port_t; - #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] - pub fn mach_thread_self() -> mach_port_t; - pub fn pthread_cond_timedwait_relative_np( - cond: *mut pthread_cond_t, - lock: *mut pthread_mutex_t, - timeout: *const crate::timespec, - ) -> c_int; - pub fn pthread_once( - once_control: *mut crate::pthread_once_t, - init_routine: Option, - ) -> c_int; - pub fn pthread_attr_getinheritsched( - attr: *const crate::pthread_attr_t, - inheritsched: *mut c_int, - ) -> c_int; - pub fn pthread_attr_getschedpolicy( - attr: *const crate::pthread_attr_t, - policy: *mut c_int, - ) -> c_int; - pub fn pthread_attr_getscope( - attr: *const crate::pthread_attr_t, - contentionscope: *mut c_int, - ) -> c_int; - pub fn pthread_attr_getstackaddr( - attr: *const crate::pthread_attr_t, - stackaddr: *mut *mut c_void, - ) -> c_int; - pub fn pthread_attr_getdetachstate( - attr: *const crate::pthread_attr_t, - detachstate: *mut c_int, - ) -> c_int; - pub fn pthread_attr_setinheritsched( - attr: *mut crate::pthread_attr_t, - inheritsched: c_int, - ) -> c_int; - pub fn pthread_attr_setschedpolicy(attr: *mut crate::pthread_attr_t, policy: c_int) -> c_int; - pub fn pthread_attr_setscope(attr: *mut crate::pthread_attr_t, contentionscope: c_int) - -> c_int; - pub fn pthread_attr_setstackaddr( - attr: *mut crate::pthread_attr_t, - stackaddr: *mut c_void, - ) -> c_int; - pub fn pthread_setname_np(name: *const c_char) -> c_int; - pub fn pthread_getname_np(thread: crate::pthread_t, name: *mut c_char, len: size_t) -> c_int; - pub fn pthread_mach_thread_np(thread: crate::pthread_t) -> crate::mach_port_t; - pub fn pthread_from_mach_thread_np(port: crate::mach_port_t) -> crate::pthread_t; - pub fn pthread_create_from_mach_thread( - thread: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - f: extern "C" fn(*mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - pub fn pthread_stack_frame_decode_np( - frame_addr: crate::uintptr_t, - return_addr: *mut crate::uintptr_t, - ) -> crate::uintptr_t; - pub fn pthread_get_stackaddr_np(thread: crate::pthread_t) -> *mut c_void; - pub fn pthread_get_stacksize_np(thread: crate::pthread_t) -> size_t; - pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: c_int) -> c_int; - pub fn pthread_condattr_getpshared( - attr: *const pthread_condattr_t, - pshared: *mut c_int, - ) -> c_int; - pub fn pthread_main_np() -> c_int; - pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; - pub fn pthread_mutexattr_getpshared( - attr: *const pthread_mutexattr_t, - pshared: *mut c_int, - ) -> c_int; - pub fn pthread_rwlockattr_getpshared( - attr: *const pthread_rwlockattr_t, - val: *mut c_int, - ) -> c_int; - pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, val: c_int) -> c_int; - pub fn pthread_threadid_np(thread: crate::pthread_t, thread_id: *mut u64) -> c_int; - pub fn pthread_attr_set_qos_class_np( - attr: *mut pthread_attr_t, - class: qos_class_t, - priority: c_int, - ) -> c_int; - pub fn pthread_attr_get_qos_class_np( - attr: *mut pthread_attr_t, - class: *mut qos_class_t, - priority: *mut c_int, - ) -> c_int; - pub fn pthread_set_qos_class_self_np(class: qos_class_t, priority: c_int) -> c_int; - pub fn pthread_get_qos_class_np( - thread: crate::pthread_t, - class: *mut qos_class_t, - priority: *mut c_int, - ) -> c_int; - pub fn pthread_attr_getschedparam( - attr: *const crate::pthread_attr_t, - param: *mut sched_param, - ) -> c_int; - pub fn pthread_attr_setschedparam( - attr: *mut crate::pthread_attr_t, - param: *const sched_param, - ) -> c_int; - pub fn pthread_getschedparam( - thread: crate::pthread_t, - policy: *mut c_int, - param: *mut sched_param, - ) -> c_int; - pub fn pthread_setschedparam( - thread: crate::pthread_t, - policy: c_int, - param: *const sched_param, - ) -> c_int; - - // Available from Big Sur - pub fn pthread_introspection_hook_install( - hook: crate::pthread_introspection_hook_t, - ) -> crate::pthread_introspection_hook_t; - pub fn pthread_introspection_setspecific_np( - thread: crate::pthread_t, - key: crate::pthread_key_t, - value: *const c_void, - ) -> c_int; - pub fn pthread_introspection_getspecific_np( - thread: crate::pthread_t, - key: crate::pthread_key_t, - ) -> *mut c_void; - pub fn pthread_jit_write_protect_np(enabled: c_int); - pub fn pthread_jit_write_protect_supported_np() -> c_int; - // An array of pthread_jit_write_with_callback_np must declare - // the list of callbacks e.g. - // #[link_section = "__DATA_CONST,__pth_jit_func"] - // static callbacks: [libc::pthread_jit_write_callback_t; 2] = [native_jit_write_cb, - // std::mem::transmute::(std::ptr::null())]; - // (a handy PTHREAD_JIT_WRITE_CALLBACK_NP macro for other languages). - pub fn pthread_jit_write_with_callback_np( - callback: crate::pthread_jit_write_callback_t, - ctx: *mut c_void, - ) -> c_int; - pub fn pthread_jit_write_freeze_callbacks_np(); - pub fn pthread_cpu_number_np(cpu_number_out: *mut size_t) -> c_int; - - // Available starting with macOS 14.4. - pub fn os_sync_wait_on_address( - addr: *mut c_void, - value: u64, - size: size_t, - flags: os_sync_wait_on_address_flags_t, - ) -> c_int; - pub fn os_sync_wait_on_address_with_deadline( - addr: *mut c_void, - value: u64, - size: size_t, - flags: os_sync_wait_on_address_flags_t, - clockid: os_clockid_t, - deadline: u64, - ) -> c_int; - pub fn os_sync_wait_on_address_with_timeout( - addr: *mut c_void, - value: u64, - size: size_t, - flags: os_sync_wait_on_address_flags_t, - clockid: os_clockid_t, - timeout_ns: u64, - ) -> c_int; - pub fn os_sync_wake_by_address_any( - addr: *mut c_void, - size: size_t, - flags: os_sync_wake_by_address_flags_t, - ) -> c_int; - pub fn os_sync_wake_by_address_all( - addr: *mut c_void, - size: size_t, - flags: os_sync_wake_by_address_flags_t, - ) -> c_int; - - pub fn os_unfair_lock_lock(lock: os_unfair_lock_t); - pub fn os_unfair_lock_trylock(lock: os_unfair_lock_t) -> bool; - pub fn os_unfair_lock_unlock(lock: os_unfair_lock_t); - pub fn os_unfair_lock_assert_owner(lock: os_unfair_lock_t); - pub fn os_unfair_lock_assert_not_owner(lock: os_unfair_lock_t); - - pub fn os_log_create(subsystem: *const c_char, category: *const c_char) -> crate::os_log_t; - pub fn os_log_type_enabled(oslog: crate::os_log_t, tpe: crate::os_log_type_t) -> bool; - pub fn os_signpost_id_make_with_pointer( - log: crate::os_log_t, - ptr: *const c_void, - ) -> crate::os_signpost_id_t; - pub fn os_signpost_id_generate(log: crate::os_log_t) -> crate::os_signpost_id_t; - pub fn os_signpost_enabled(log: crate::os_log_t) -> bool; - - pub fn thread_policy_set( - thread: thread_t, - flavor: thread_policy_flavor_t, - policy_info: thread_policy_t, - count: mach_msg_type_number_t, - ) -> kern_return_t; - pub fn thread_policy_get( - thread: thread_t, - flavor: thread_policy_flavor_t, - policy_info: thread_policy_t, - count: *mut mach_msg_type_number_t, - get_default: *mut boolean_t, - ) -> kern_return_t; - pub fn thread_info( - target_act: thread_inspect_t, - flavor: thread_flavor_t, - thread_info_out: thread_info_t, - thread_info_outCnt: *mut mach_msg_type_number_t, - ) -> kern_return_t; - #[cfg_attr(doc, doc(alias = "__errno_location"))] - #[cfg_attr(doc, doc(alias = "errno"))] - pub fn __error() -> *mut c_int; - pub fn backtrace(buf: *mut *mut c_void, sz: c_int) -> c_int; - pub fn backtrace_symbols(addrs: *const *mut c_void, sz: c_int) -> *mut *mut c_char; - pub fn backtrace_symbols_fd(addrs: *const *mut c_void, sz: c_int, fd: c_int); - pub fn backtrace_from_fp(startfp: *mut c_void, array: *mut *mut c_void, size: c_int) -> c_int; - pub fn backtrace_image_offsets( - array: *const *mut c_void, - image_offsets: *mut image_offset, - size: c_int, - ); - pub fn backtrace_async(array: *mut *mut c_void, length: size_t, task_id: *mut u32) -> size_t; - #[cfg_attr( - all(target_os = "macos", not(target_arch = "aarch64")), - link_name = "statfs$INODE64" - )] - pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; - #[cfg_attr( - all(target_os = "macos", not(target_arch = "aarch64")), - link_name = "fstatfs$INODE64" - )] - pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; - pub fn kevent( - kq: c_int, - changelist: *const crate::kevent, - nchanges: c_int, - eventlist: *mut crate::kevent, - nevents: c_int, - timeout: *const crate::timespec, - ) -> c_int; - pub fn kevent64( - kq: c_int, - changelist: *const crate::kevent64_s, - nchanges: c_int, - eventlist: *mut crate::kevent64_s, - nevents: c_int, - flags: c_uint, - timeout: *const crate::timespec, - ) -> c_int; - pub fn mount( - src: *const c_char, - target: *const c_char, - flags: c_int, - data: *mut c_void, - ) -> c_int; - pub fn fmount(src: *const c_char, fd: c_int, flags: c_int, data: *mut c_void) -> c_int; - pub fn ptrace(request: c_int, pid: crate::pid_t, addr: *mut c_char, data: c_int) -> c_int; - pub fn quotactl(special: *const c_char, cmd: c_int, id: c_int, data: *mut c_char) -> c_int; - pub fn sethostname(name: *const c_char, len: c_int) -> c_int; - pub fn sendfile( - fd: c_int, - s: c_int, - offset: off_t, - len: *mut off_t, - hdtr: *mut crate::sf_hdtr, - flags: c_int, - ) -> c_int; - pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; - pub fn utimensat( - dirfd: c_int, - path: *const c_char, - times: *const crate::timespec, - flag: c_int, - ) -> c_int; - pub fn openpty( - amaster: *mut c_int, - aslave: *mut c_int, - name: *mut c_char, - termp: *mut termios, - winp: *mut crate::winsize, - ) -> c_int; - pub fn forkpty( - amaster: *mut c_int, - name: *mut c_char, - termp: *mut termios, - winp: *mut crate::winsize, - ) -> crate::pid_t; - pub fn login_tty(fd: c_int) -> c_int; - pub fn duplocale(base: crate::locale_t) -> crate::locale_t; - pub fn freelocale(loc: crate::locale_t) -> c_int; - pub fn localeconv_l(loc: crate::locale_t) -> *mut lconv; - pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; - pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; - pub fn querylocale(mask: c_int, loc: crate::locale_t) -> *const c_char; - pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; - pub fn setpriority(which: c_int, who: crate::id_t, prio: c_int) -> c_int; - pub fn getdomainname(name: *mut c_char, len: c_int) -> c_int; - pub fn setdomainname(name: *const c_char, len: c_int) -> c_int; - pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn getxattr( - path: *const c_char, - name: *const c_char, - value: *mut c_void, - size: size_t, - position: u32, - flags: c_int, - ) -> ssize_t; - pub fn fgetxattr( - filedes: c_int, - name: *const c_char, - value: *mut c_void, - size: size_t, - position: u32, - flags: c_int, - ) -> ssize_t; - pub fn setxattr( - path: *const c_char, - name: *const c_char, - value: *const c_void, - size: size_t, - position: u32, - flags: c_int, - ) -> c_int; - pub fn fsetxattr( - filedes: c_int, - name: *const c_char, - value: *const c_void, - size: size_t, - position: u32, - flags: c_int, - ) -> c_int; - pub fn listxattr(path: *const c_char, list: *mut c_char, size: size_t, flags: c_int) - -> ssize_t; - pub fn flistxattr(filedes: c_int, list: *mut c_char, size: size_t, flags: c_int) -> ssize_t; - pub fn removexattr(path: *const c_char, name: *const c_char, flags: c_int) -> c_int; - pub fn renamex_np(from: *const c_char, to: *const c_char, flags: c_uint) -> c_int; - pub fn renameatx_np( - fromfd: c_int, - from: *const c_char, - tofd: c_int, - to: *const c_char, - flags: c_uint, - ) -> c_int; - pub fn fremovexattr(filedes: c_int, name: *const c_char, flags: c_int) -> c_int; - - pub fn getgrouplist( - name: *const c_char, - basegid: c_int, - groups: *mut c_int, - ngroups: *mut c_int, - ) -> c_int; - pub fn initgroups(user: *const c_char, basegroup: c_int) -> c_int; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "waitid$UNIX2003" - )] - pub fn waitid( - idtype: idtype_t, - id: id_t, - infop: *mut crate::siginfo_t, - options: c_int, - ) -> c_int; - pub fn brk(addr: *const c_void) -> *mut c_void; - pub fn sbrk(increment: c_int) -> *mut c_void; - pub fn settimeofday(tv: *const crate::timeval, tz: *const crate::timezone) -> c_int; - #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] - pub fn _dyld_image_count() -> u32; - #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] - #[allow(deprecated)] - pub fn _dyld_get_image_header(image_index: u32) -> *const mach_header; - #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] - pub fn _dyld_get_image_vmaddr_slide(image_index: u32) -> intptr_t; - #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] - pub fn _dyld_get_image_name(image_index: u32) -> *const c_char; - - pub fn posix_spawn( - pid: *mut crate::pid_t, - path: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnp( - pid: *mut crate::pid_t, - file: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_getsigdefault( - attr: *const posix_spawnattr_t, - default: *mut crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigdefault( - attr: *mut posix_spawnattr_t, - default: *const crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getsigmask( - attr: *const posix_spawnattr_t, - default: *mut crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigmask( - attr: *mut posix_spawnattr_t, - default: *const crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; - pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; - pub fn posix_spawnattr_getpgroup( - attr: *const posix_spawnattr_t, - flags: *mut crate::pid_t, - ) -> c_int; - pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: crate::pid_t) -> c_int; - pub fn posix_spawnattr_setarchpref_np( - attr: *mut posix_spawnattr_t, - count: size_t, - pref: *mut crate::cpu_type_t, - subpref: *mut crate::cpu_subtype_t, - ocount: *mut size_t, - ) -> c_int; - pub fn posix_spawnattr_getarchpref_np( - attr: *const posix_spawnattr_t, - count: size_t, - pref: *mut crate::cpu_type_t, - subpref: *mut crate::cpu_subtype_t, - ocount: *mut size_t, - ) -> c_int; - pub fn posix_spawnattr_getbinpref_np( - attr: *const posix_spawnattr_t, - count: size_t, - pref: *mut crate::cpu_type_t, - ocount: *mut size_t, - ) -> c_int; - pub fn posix_spawnattr_setbinpref_np( - attr: *mut posix_spawnattr_t, - count: size_t, - pref: *mut crate::cpu_type_t, - ocount: *mut size_t, - ) -> c_int; - pub fn posix_spawnattr_set_qos_class_np( - attr: *mut posix_spawnattr_t, - qos_class: crate::qos_class_t, - ) -> c_int; - pub fn posix_spawnattr_get_qos_class_np( - attr: *const posix_spawnattr_t, - qos_class: *mut crate::qos_class_t, - ) -> c_int; - - pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_addopen( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - path: *const c_char, - oflag: c_int, - mode: mode_t, - ) -> c_int; - pub fn posix_spawn_file_actions_addclose( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - ) -> c_int; - pub fn posix_spawn_file_actions_adddup2( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - newfd: c_int, - ) -> c_int; - pub fn uname(buf: *mut crate::utsname) -> c_int; - - pub fn connectx( - socket: c_int, - endpoints: *const sa_endpoints_t, - associd: sae_associd_t, - flags: c_uint, - iov: *const crate::iovec, - iovcnt: c_uint, - len: *mut size_t, - connid: *mut sae_connid_t, - ) -> c_int; - pub fn disconnectx(socket: c_int, associd: sae_associd_t, connid: sae_connid_t) -> c_int; - - pub fn ntp_adjtime(buf: *mut timex) -> c_int; - pub fn ntp_gettime(buf: *mut ntptimeval) -> c_int; - - #[cfg_attr( - all(target_os = "macos", not(target_arch = "aarch64")), - link_name = "getmntinfo$INODE64" - )] - pub fn getmntinfo(mntbufp: *mut *mut statfs, flags: c_int) -> c_int; - #[cfg_attr( - all(target_os = "macos", not(target_arch = "aarch64")), - link_name = "getfsstat$INODE64" - )] - pub fn getfsstat(mntbufp: *mut statfs, bufsize: c_int, flags: c_int) -> c_int; - - // Copy-on-write functions. - // According to the man page `flags` is an `int` but in the header - // this is a `uint32_t`. - pub fn clonefile(src: *const c_char, dst: *const c_char, flags: u32) -> c_int; - pub fn clonefileat( - src_dirfd: c_int, - src: *const c_char, - dst_dirfd: c_int, - dst: *const c_char, - flags: u32, - ) -> c_int; - pub fn fclonefileat(srcfd: c_int, dst_dirfd: c_int, dst: *const c_char, flags: u32) -> c_int; - - pub fn copyfile( - from: *const c_char, - to: *const c_char, - state: copyfile_state_t, - flags: copyfile_flags_t, - ) -> c_int; - pub fn fcopyfile( - from: c_int, - to: c_int, - state: copyfile_state_t, - flags: copyfile_flags_t, - ) -> c_int; - pub fn copyfile_state_free(s: copyfile_state_t) -> c_int; - pub fn copyfile_state_alloc() -> copyfile_state_t; - pub fn copyfile_state_get(s: copyfile_state_t, flags: u32, dst: *mut c_void) -> c_int; - pub fn copyfile_state_set(s: copyfile_state_t, flags: u32, src: *const c_void) -> c_int; - - pub fn mach_error_string(error_value: crate::mach_error_t) -> *mut c_char; - - // Added in macOS 10.13 - // ISO/IEC 9899:2011 ("ISO C11") K.3.7.4.1 - pub fn memset_s(s: *mut c_void, smax: size_t, c: c_int, n: size_t) -> c_int; - // Added in macOS 10.5 - pub fn memset_pattern4(b: *mut c_void, pattern4: *const c_void, len: size_t); - pub fn memset_pattern8(b: *mut c_void, pattern8: *const c_void, len: size_t); - pub fn memset_pattern16(b: *mut c_void, pattern16: *const c_void, len: size_t); - - // Inherited from BSD but available from Big Sur only - pub fn strtonum( - __numstr: *const c_char, - __minval: c_longlong, - __maxval: c_longlong, - errstrp: *mut *const c_char, - ) -> c_longlong; - - pub fn mstats() -> mstats; - pub fn malloc_printf(format: *const c_char, ...); - pub fn malloc_zone_check(zone: *mut crate::malloc_zone_t) -> crate::boolean_t; - pub fn malloc_zone_print(zone: *mut crate::malloc_zone_t, verbose: crate::boolean_t); - pub fn malloc_zone_statistics(zone: *mut crate::malloc_zone_t, stats: *mut malloc_statistics_t); - pub fn malloc_zone_log(zone: *mut crate::malloc_zone_t, address: *mut c_void); - pub fn malloc_zone_print_ptr_info(ptr: *mut c_void); - pub fn malloc_default_zone() -> *mut crate::malloc_zone_t; - pub fn malloc_zone_from_ptr(ptr: *const c_void) -> *mut crate::malloc_zone_t; - pub fn malloc_zone_malloc(zone: *mut crate::malloc_zone_t, size: size_t) -> *mut c_void; - pub fn malloc_zone_valloc(zone: *mut crate::malloc_zone_t, size: size_t) -> *mut c_void; - pub fn malloc_zone_calloc( - zone: *mut crate::malloc_zone_t, - num_items: size_t, - size: size_t, - ) -> *mut c_void; - pub fn malloc_zone_realloc( - zone: *mut crate::malloc_zone_t, - ptr: *mut c_void, - size: size_t, - ) -> *mut c_void; - pub fn malloc_zone_free(zone: *mut crate::malloc_zone_t, ptr: *mut c_void); - - pub fn proc_listpids(t: u32, typeinfo: u32, buffer: *mut c_void, buffersize: c_int) -> c_int; - pub fn proc_listallpids(buffer: *mut c_void, buffersize: c_int) -> c_int; - pub fn proc_listpgrppids(pgrpid: crate::pid_t, buffer: *mut c_void, buffersize: c_int) - -> c_int; - pub fn proc_listchildpids(ppid: crate::pid_t, buffer: *mut c_void, buffersize: c_int) -> c_int; - pub fn proc_pidinfo( - pid: c_int, - flavor: c_int, - arg: u64, - buffer: *mut c_void, - buffersize: c_int, - ) -> c_int; - pub fn proc_pidfdinfo( - pid: c_int, - fd: c_int, - flavor: c_int, - buffer: *mut c_void, - buffersize: c_int, - ) -> c_int; - pub fn proc_pidfileportinfo( - pid: c_int, - fileport: u32, - flavor: c_int, - buffer: *mut c_void, - buffersize: c_int, - ) -> c_int; - pub fn proc_pidpath(pid: c_int, buffer: *mut c_void, buffersize: u32) -> c_int; - pub fn proc_name(pid: c_int, buffer: *mut c_void, buffersize: u32) -> c_int; - pub fn proc_regionfilename( - pid: c_int, - address: u64, - buffer: *mut c_void, - buffersize: u32, - ) -> c_int; - pub fn proc_kmsgbuf(buffer: *mut c_void, buffersize: u32) -> c_int; - pub fn proc_libversion(major: *mut c_int, minor: *mut c_int) -> c_int; - pub fn proc_pid_rusage(pid: c_int, flavor: c_int, buffer: *mut rusage_info_t) -> c_int; - - // Available from Big Sur - pub fn proc_set_no_smt() -> c_int; - pub fn proc_setthread_no_smt() -> c_int; - pub fn proc_set_csm(flags: u32) -> c_int; - pub fn proc_setthread_csm(flags: u32) -> c_int; - /// # Notes - /// - /// `id` is of type [`uuid_t`]. - pub fn gethostuuid(id: *mut u8, timeout: *const crate::timespec) -> c_int; - - pub fn gethostid() -> c_long; - pub fn sethostid(hostid: c_long); - - pub fn CCRandomGenerateBytes(bytes: *mut c_void, size: size_t) -> crate::CCRNGStatus; - pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; - - // FIXME(1.0): should this actually be deprecated? - #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] - pub fn _NSGetExecutablePath(buf: *mut c_char, bufsize: *mut u32) -> c_int; - - // crt_externs.h - pub fn _NSGetArgv() -> *mut *mut *mut c_char; - pub fn _NSGetArgc() -> *mut c_int; - pub fn _NSGetEnviron() -> *mut *mut *mut c_char; - pub fn _NSGetProgname() -> *mut *mut c_char; - - #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] - pub fn mach_vm_map( - target_task: crate::vm_map_t, - address: *mut crate::mach_vm_address_t, - size: crate::mach_vm_size_t, - mask: crate::mach_vm_offset_t, - flags: c_int, - object: crate::mem_entry_name_port_t, - offset: crate::memory_object_offset_t, - copy: crate::boolean_t, - cur_protection: crate::vm_prot_t, - max_protection: crate::vm_prot_t, - inheritance: crate::vm_inherit_t, - ) -> crate::kern_return_t; - - pub fn vm_allocate( - target_task: vm_map_t, - address: *mut vm_address_t, - size: vm_size_t, - flags: c_int, - ) -> crate::kern_return_t; - - pub fn vm_deallocate( - target_task: vm_map_t, - address: vm_address_t, - size: vm_size_t, - ) -> crate::kern_return_t; - - pub fn host_statistics64( - host_priv: host_t, - flavor: host_flavor_t, - host_info64_out: host_info64_t, - host_info64_outCnt: *mut mach_msg_type_number_t, - ) -> crate::kern_return_t; - pub fn host_processor_info( - host: host_t, - flavor: processor_flavor_t, - out_processor_count: *mut natural_t, - out_processor_info: *mut processor_info_array_t, - out_processor_infoCnt: *mut mach_msg_type_number_t, - ) -> crate::kern_return_t; - - #[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] - pub static mut mach_task_self_: crate::mach_port_t; - pub fn task_for_pid( - host: crate::mach_port_t, - pid: crate::pid_t, - task: *mut crate::mach_port_t, - ) -> crate::kern_return_t; - pub fn task_info( - host: crate::mach_port_t, - flavor: task_flavor_t, - task_info_out: task_info_t, - task_info_count: *mut mach_msg_type_number_t, - ) -> crate::kern_return_t; - pub fn task_create( - target_task: crate::task_t, - ledgers: crate::ledger_array_t, - ledgersCnt: crate::mach_msg_type_number_t, - inherit_memory: crate::boolean_t, - child_task: *mut crate::task_t, - ) -> crate::kern_return_t; - pub fn task_terminate(target_task: crate::task_t) -> crate::kern_return_t; - pub fn task_threads( - target_task: crate::task_inspect_t, - act_list: *mut crate::thread_act_array_t, - act_listCnt: *mut crate::mach_msg_type_number_t, - ) -> crate::kern_return_t; - pub fn host_statistics( - host_priv: host_t, - flavor: host_flavor_t, - host_info_out: host_info_t, - host_info_outCnt: *mut mach_msg_type_number_t, - ) -> crate::kern_return_t; - - // sysdir.h - pub fn sysdir_start_search_path_enumeration( - dir: sysdir_search_path_directory_t, - domainMask: sysdir_search_path_domain_mask_t, - ) -> crate::sysdir_search_path_enumeration_state; - pub fn sysdir_get_next_search_path_enumeration( - state: crate::sysdir_search_path_enumeration_state, - path: *mut c_char, - ) -> crate::sysdir_search_path_enumeration_state; - - pub static vm_page_size: vm_size_t; - - pub fn getattrlist( - path: *const c_char, - attrList: *mut c_void, - attrBuf: *mut c_void, - attrBufSize: size_t, - options: u32, - ) -> c_int; - pub fn fgetattrlist( - fd: c_int, - attrList: *mut c_void, - attrBuf: *mut c_void, - attrBufSize: size_t, - options: u32, - ) -> c_int; - pub fn getattrlistat( - fd: c_int, - path: *const c_char, - attrList: *mut c_void, - attrBuf: *mut c_void, - attrBufSize: size_t, - options: c_ulong, - ) -> c_int; - pub fn setattrlist( - path: *const c_char, - attrList: *mut c_void, - attrBuf: *mut c_void, - attrBufSize: size_t, - options: u32, - ) -> c_int; - pub fn fsetattrlist( - fd: c_int, - attrList: *mut c_void, - attrBuf: *mut c_void, - attrBufSize: size_t, - options: u32, - ) -> c_int; - pub fn setattrlistat( - dir_fd: c_int, - path: *const c_char, - attrList: *mut c_void, - attrBuf: *mut c_void, - attrBufSize: size_t, - options: u32, - ) -> c_int; - pub fn getattrlistbulk( - dirfd: c_int, - attrList: *mut c_void, - attrBuf: *mut c_void, - attrBufSize: size_t, - options: u64, - ) -> c_int; - - pub fn malloc_size(ptr: *const c_void) -> size_t; - pub fn malloc_good_size(size: size_t) -> size_t; - - pub fn dirname(path: *mut c_char) -> *mut c_char; - pub fn basename(path: *mut c_char) -> *mut c_char; - - pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; - pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; - pub fn freadlink(fd: c_int, buf: *mut c_char, size: size_t) -> c_int; - pub fn execvP( - file: *const c_char, - search_path: *const c_char, - argv: *const *mut c_char, - ) -> c_int; - - pub fn qsort_r( - base: *mut c_void, - num: size_t, - size: size_t, - arg: *mut c_void, - compar: Option c_int>, - ); -} - -#[allow(deprecated)] -#[deprecated(since = "0.2.55", note = "Use the `mach2` crate instead")] -pub unsafe fn mach_task_self() -> crate::mach_port_t { - mach_task_self_ -} - -cfg_if! { - if #[cfg(target_os = "macos")] { - extern "C" { - pub fn clock_settime(clock_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; - } - } -} -cfg_if! { - if #[cfg(any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "visionos" - ))] { - extern "C" { - pub fn memmem( - haystack: *const c_void, - haystacklen: size_t, - needle: *const c_void, - needlelen: size_t, - ) -> *mut c_void; - pub fn task_set_info( - target_task: crate::task_t, - flavor: crate::task_flavor_t, - task_info_in: crate::task_info_t, - task_info_inCnt: crate::mach_msg_type_number_t, - ) -> crate::kern_return_t; - } - } -} - -// These require a dependency on `libiconv`, and including this when built as -// part of `std` means every Rust program gets it. Ideally we would have a link -// modifier to only include these if they are used, but we do not. -#[cfg_attr(not(feature = "rustc-dep-of-std"), link(name = "iconv"))] -extern "C" { - #[deprecated(note = "Will be removed in 1.0 to avoid the `iconv` dependency")] - pub fn iconv_open(tocode: *const c_char, fromcode: *const c_char) -> iconv_t; - #[deprecated(note = "Will be removed in 1.0 to avoid the `iconv` dependency")] - pub fn iconv( - cd: iconv_t, - inbuf: *mut *mut c_char, - inbytesleft: *mut size_t, - outbuf: *mut *mut c_char, - outbytesleft: *mut size_t, - ) -> size_t; - #[deprecated(note = "Will be removed in 1.0 to avoid the `iconv` dependency")] - pub fn iconv_close(cd: iconv_t) -> c_int; -} - -cfg_if! { - if #[cfg(target_pointer_width = "32")] { - mod b32; - pub use self::b32::*; - } else if #[cfg(target_pointer_width = "64")] { - mod b64; - pub use self::b64::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/errno.rs b/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/errno.rs deleted file mode 100644 index 874c1da84d3a58..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/errno.rs +++ /dev/null @@ -1,17 +0,0 @@ -use crate::prelude::*; - -/* DIFF(main): module removed in de76fee6 */ - -// DragonFlyBSD's __error function is declared with "static inline", so it must -// be implemented in the libc crate, as a pointer to a static thread_local. -f! { - #[deprecated(since = "0.2.77", note = "Use `__errno_location()` instead")] - pub fn __error() -> *mut c_int { - &mut errno - } -} - -extern "C" { - #[thread_local] - pub static mut errno: c_int; -} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs deleted file mode 100644 index 8720bf7fb36495..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs +++ /dev/null @@ -1,1635 +0,0 @@ -use crate::prelude::*; -use crate::{cmsghdr, off_t}; - -pub type dev_t = u32; -pub type wchar_t = i32; -pub type clock_t = u64; -pub type ino_t = u64; -pub type lwpid_t = i32; -pub type nlink_t = u32; -pub type blksize_t = i64; -pub type clockid_t = c_ulong; - -pub type time_t = i64; -pub type suseconds_t = i64; - -pub type uuid_t = crate::uuid; - -pub type fsblkcnt_t = u64; -pub type fsfilcnt_t = u64; -pub type idtype_t = c_uint; -pub type shmatt_t = c_uint; - -pub type mqd_t = c_int; -pub type sem_t = *mut sem; - -pub type cpuset_t = cpumask_t; -pub type cpu_set_t = cpumask_t; - -pub type register_t = c_long; -pub type umtx_t = c_int; -pub type pthread_barrierattr_t = c_int; -pub type pthread_barrier_t = crate::uintptr_t; -pub type pthread_spinlock_t = crate::uintptr_t; - -pub type segsz_t = usize; - -pub type vm_prot_t = u8; -pub type vm_maptype_t = u8; -pub type vm_inherit_t = i8; -pub type vm_subsys_t = c_int; -pub type vm_eflags_t = c_uint; - -pub type vm_map_t = *mut __c_anonymous_vm_map; -pub type vm_map_entry_t = *mut vm_map_entry; - -pub type pmap = __c_anonymous_pmap; - -#[derive(Debug)] -pub enum sem {} -impl Copy for sem {} -impl Clone for sem { - fn clone(&self) -> sem { - *self - } -} - -e! { - #[repr(u32)] - pub enum lwpstat { - LSRUN = 1, - LSSTOP = 2, - LSSLEEP = 3, - } - - #[repr(u32)] - pub enum procstat { - SIDL = 1, - SACTIVE = 2, - SSTOP = 3, - SZOMB = 4, - SCORE = 5, - } -} - -s! { - pub struct kevent { - pub ident: crate::uintptr_t, - pub filter: c_short, - pub flags: c_ushort, - pub fflags: c_uint, - pub data: intptr_t, - pub udata: *mut c_void, - } - - pub struct exit_status { - pub e_termination: u16, - pub e_exit: u16, - } - - pub struct aiocb { - pub aio_fildes: c_int, - pub aio_offset: off_t, - pub aio_buf: *mut c_void, - pub aio_nbytes: size_t, - pub aio_sigevent: sigevent, - pub aio_lio_opcode: c_int, - pub aio_reqprio: c_int, - _aio_val: c_int, - _aio_err: c_int, - } - - pub struct uuid { - pub time_low: u32, - pub time_mid: u16, - pub time_hi_and_version: u16, - pub clock_seq_hi_and_reserved: u8, - pub clock_seq_low: u8, - pub node: [u8; 6], - } - - pub struct mq_attr { - pub mq_flags: c_long, - pub mq_maxmsg: c_long, - pub mq_msgsize: c_long, - pub mq_curmsgs: c_long, - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - pub f_owner: crate::uid_t, - pub f_type: c_uint, - pub f_syncreads: u64, - pub f_syncwrites: u64, - pub f_asyncreads: u64, - pub f_asyncwrites: u64, - pub f_fsid_uuid: crate::uuid_t, - pub f_uid_uuid: crate::uuid_t, - } - - pub struct stat { - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_dev: crate::dev_t, - pub st_mode: crate::mode_t, - pub st_padding1: u16, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_size: off_t, - pub st_blocks: i64, - pub __old_st_blksize: u32, - pub st_flags: u32, - pub st_gen: u32, - pub st_lspare: i32, - pub st_blksize: i64, - pub st_qspare2: i64, - } - - pub struct if_data { - pub ifi_type: c_uchar, - pub ifi_physical: c_uchar, - pub ifi_addrlen: c_uchar, - pub ifi_hdrlen: c_uchar, - pub ifi_recvquota: c_uchar, - pub ifi_xmitquota: c_uchar, - pub ifi_mtu: c_ulong, - pub ifi_metric: c_ulong, - pub ifi_link_state: c_ulong, - pub ifi_baudrate: u64, - pub ifi_ipackets: c_ulong, - pub ifi_ierrors: c_ulong, - pub ifi_opackets: c_ulong, - pub ifi_oerrors: c_ulong, - pub ifi_collisions: c_ulong, - pub ifi_ibytes: c_ulong, - pub ifi_obytes: c_ulong, - pub ifi_imcasts: c_ulong, - pub ifi_omcasts: c_ulong, - pub ifi_iqdrops: c_ulong, - pub ifi_noproto: c_ulong, - pub ifi_hwassist: c_ulong, - pub ifi_oqdrops: c_ulong, - pub ifi_lastchange: crate::timeval, - } - - pub struct if_msghdr { - pub ifm_msglen: c_ushort, - pub ifm_version: c_uchar, - pub ifm_type: c_uchar, - pub ifm_addrs: c_int, - pub ifm_flags: c_int, - pub ifm_index: c_ushort, - pub ifm_data: if_data, - } - - pub struct sockaddr_dl { - pub sdl_len: c_uchar, - pub sdl_family: c_uchar, - pub sdl_index: c_ushort, - pub sdl_type: c_uchar, - pub sdl_nlen: c_uchar, - pub sdl_alen: c_uchar, - pub sdl_slen: c_uchar, - pub sdl_data: [c_char; 12], - pub sdl_rcf: c_ushort, - pub sdl_route: [c_ushort; 16], - } - - pub struct xucred { - pub cr_version: c_uint, - pub cr_uid: crate::uid_t, - pub cr_ngroups: c_short, - pub cr_groups: [crate::gid_t; 16], - __cr_unused1: *mut c_void, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } - - pub struct cpumask_t { - ary: [u64; 4], - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_lpid: crate::pid_t, - pub shm_cpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - shm_internal: *mut c_void, - } - - pub struct kinfo_file { - pub f_size: size_t, - pub f_pid: crate::pid_t, - pub f_uid: crate::uid_t, - pub f_fd: c_int, - pub f_file: *mut c_void, - pub f_type: c_short, - pub f_count: c_int, - pub f_msgcount: c_int, - pub f_offset: off_t, - pub f_data: *mut c_void, - pub f_flag: c_uint, - } - - pub struct kinfo_cputime { - pub cp_user: u64, - pub cp_nice: u64, - pub cp_sys: u64, - pub cp_intr: u64, - pub cp_idel: u64, - cp_unused01: u64, - cp_unused02: u64, - pub cp_sample_pc: u64, - pub cp_sample_sp: u64, - pub cp_msg: [c_char; 32], - } - - pub struct kinfo_lwp { - pub kl_pid: crate::pid_t, - pub kl_tid: crate::lwpid_t, - pub kl_flags: c_int, - pub kl_stat: crate::lwpstat, - pub kl_lock: c_int, - pub kl_tdflags: c_int, - pub kl_mpcount: c_int, - pub kl_prio: c_int, - pub kl_tdprio: c_int, - pub kl_rtprio: crate::rtprio, - pub kl_uticks: u64, - pub kl_sticks: u64, - pub kl_iticks: u64, - pub kl_cpticks: u64, - pub kl_pctcpu: c_uint, - pub kl_slptime: c_uint, - pub kl_origcpu: c_int, - pub kl_estcpu: c_int, - pub kl_cpuid: c_int, - pub kl_ru: crate::rusage, - pub kl_siglist: crate::sigset_t, - pub kl_sigmask: crate::sigset_t, - pub kl_wchan: crate::uintptr_t, - pub kl_wmesg: [c_char; 9], - pub kl_comm: [c_char; MAXCOMLEN + 1], - } - - pub struct kinfo_proc { - pub kp_paddr: crate::uintptr_t, - pub kp_flags: c_int, - pub kp_stat: crate::procstat, - pub kp_lock: c_int, - pub kp_acflag: c_int, - pub kp_traceflag: c_int, - pub kp_fd: crate::uintptr_t, - pub kp_siglist: crate::sigset_t, - pub kp_sigignore: crate::sigset_t, - pub kp_sigcatch: crate::sigset_t, - pub kp_sigflag: c_int, - pub kp_start: crate::timeval, - pub kp_comm: [c_char; MAXCOMLEN + 1], - pub kp_uid: crate::uid_t, - pub kp_ngroups: c_short, - pub kp_groups: [crate::gid_t; NGROUPS], - pub kp_ruid: crate::uid_t, - pub kp_svuid: crate::uid_t, - pub kp_rgid: crate::gid_t, - pub kp_svgid: crate::gid_t, - pub kp_pid: crate::pid_t, - pub kp_ppid: crate::pid_t, - pub kp_pgid: crate::pid_t, - pub kp_jobc: c_int, - pub kp_sid: crate::pid_t, - pub kp_login: [c_char; 40], // MAXNAMELEN rounded up to the nearest sizeof(long) - pub kp_tdev: crate::dev_t, - pub kp_tpgid: crate::pid_t, - pub kp_tsid: crate::pid_t, - pub kp_exitstat: c_ushort, - pub kp_nthreads: c_int, - pub kp_nice: c_int, - pub kp_swtime: c_uint, - pub kp_vm_map_size: size_t, - pub kp_vm_rssize: crate::segsz_t, - pub kp_vm_swrss: crate::segsz_t, - pub kp_vm_tsize: crate::segsz_t, - pub kp_vm_dsize: crate::segsz_t, - pub kp_vm_ssize: crate::segsz_t, - pub kp_vm_prssize: c_uint, - pub kp_jailid: c_int, - pub kp_ru: crate::rusage, - pub kp_cru: crate::rusage, - pub kp_auxflags: c_int, - pub kp_lwp: crate::kinfo_lwp, - pub kp_ktaddr: crate::uintptr_t, - kp_spare: [c_int; 2], - } - - pub struct __c_anonymous_vm_map { - _priv: [crate::uintptr_t; 36], - } - - pub struct vm_map_entry { - _priv: [crate::uintptr_t; 15], - pub eflags: crate::vm_eflags_t, - pub maptype: crate::vm_maptype_t, - pub protection: crate::vm_prot_t, - pub max_protection: crate::vm_prot_t, - pub inheritance: crate::vm_inherit_t, - pub wired_count: c_int, - pub id: crate::vm_subsys_t, - } - - pub struct __c_anonymous_pmap { - _priv1: [crate::uintptr_t; 32], - _priv2: [crate::uintptr_t; 32], - _priv3: [crate::uintptr_t; 32], - _priv4: [crate::uintptr_t; 32], - _priv5: [crate::uintptr_t; 8], - } - - pub struct vmspace { - vm_map: __c_anonymous_vm_map, - vm_pmap: __c_anonymous_pmap, - pub vm_flags: c_int, - pub vm_shm: *mut c_char, - pub vm_rssize: crate::segsz_t, - pub vm_swrss: crate::segsz_t, - pub vm_tsize: crate::segsz_t, - pub vm_dsize: crate::segsz_t, - pub vm_ssize: crate::segsz_t, - pub vm_taddr: *mut c_char, - pub vm_daddr: *mut c_char, - pub vm_maxsaddr: *mut c_char, - pub vm_minsaddr: *mut c_char, - _unused1: c_int, - _unused2: c_int, - pub vm_pagesupply: c_int, - pub vm_holdcnt: c_uint, - pub vm_refcnt: c_uint, - } - - pub struct cpuctl_msr_args_t { - pub msr: c_int, - pub data: u64, - } - - pub struct cpuctl_cpuid_args_t { - pub level: c_int, - pub data: [u32; 4], - } - - pub struct cpuctl_cpuid_count_args_t { - pub level: c_int, - pub level_type: c_int, - pub data: [u32; 4], - } - - pub struct cpuctl_update_args_t { - pub data: *mut c_void, - pub size: size_t, - } -} - -s_no_extra_traits! { - pub struct utmpx { - pub ut_name: [c_char; 32], - pub ut_id: [c_char; 4], - - pub ut_line: [c_char; 32], - pub ut_host: [c_char; 256], - - pub ut_unused: [u8; 16], - pub ut_session: u16, - pub ut_type: u16, - pub ut_pid: crate::pid_t, - ut_exit: exit_status, - ut_ss: crate::sockaddr_storage, - pub ut_tv: crate::timeval, - pub ut_unused2: [u8; 16], - } - - pub struct lastlogx { - pub ll_tv: crate::timeval, - pub ll_line: [c_char; _UTX_LINESIZE], - pub ll_host: [c_char; _UTX_HOSTSIZE], - pub ll_ss: crate::sockaddr_storage, - } - - pub struct dirent { - pub d_fileno: crate::ino_t, - pub d_namlen: u16, - pub d_type: u8, - __unused1: u8, - __unused2: u32, - pub d_name: [c_char; 256], - } - - pub struct statfs { - __spare2: c_long, - pub f_bsize: c_long, - pub f_iosize: c_long, - pub f_blocks: c_long, - pub f_bfree: c_long, - pub f_bavail: c_long, - pub f_files: c_long, - pub f_ffree: c_long, - pub f_fsid: crate::fsid_t, - pub f_owner: crate::uid_t, - pub f_type: c_int, - pub f_flags: c_int, - pub f_syncwrites: c_long, - pub f_asyncwrites: c_long, - pub f_fstypename: [c_char; 16], - pub f_mntonname: [c_char; 80], - pub f_syncreads: c_long, - pub f_asyncreads: c_long, - __spares1: c_short, - pub f_mntfromname: [c_char; 80], - __spares2: c_short, - __spare: [c_long; 2], - } - - pub struct sigevent { - pub sigev_notify: c_int, - // The union is 8-byte in size, so it is aligned at a 8-byte offset. - #[cfg(target_pointer_width = "64")] - __unused1: c_int, - pub sigev_signo: c_int, //actually a union - // pad the union - #[cfg(target_pointer_width = "64")] - __unused2: c_int, - pub sigev_value: crate::sigval, - __unused3: *mut c_void, //actually a function pointer - } - - pub struct mcontext_t { - pub mc_onstack: register_t, - pub mc_rdi: register_t, - pub mc_rsi: register_t, - pub mc_rdx: register_t, - pub mc_rcx: register_t, - pub mc_r8: register_t, - pub mc_r9: register_t, - pub mc_rax: register_t, - pub mc_rbx: register_t, - pub mc_rbp: register_t, - pub mc_r10: register_t, - pub mc_r11: register_t, - pub mc_r12: register_t, - pub mc_r13: register_t, - pub mc_r14: register_t, - pub mc_r15: register_t, - pub mc_xflags: register_t, - pub mc_trapno: register_t, - pub mc_addr: register_t, - pub mc_flags: register_t, - pub mc_err: register_t, - pub mc_rip: register_t, - pub mc_cs: register_t, - pub mc_rflags: register_t, - pub mc_rsp: register_t, - pub mc_ss: register_t, - pub mc_len: c_uint, - pub mc_fpformat: c_uint, - pub mc_ownedfp: c_uint, - __reserved: c_uint, - __unused: [c_uint; 8], - pub mc_fpregs: [[c_uint; 8]; 32], - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct ucontext_t { - pub uc_sigmask: crate::sigset_t, - pub uc_mcontext: mcontext_t, - pub uc_link: *mut ucontext_t, - pub uc_stack: stack_t, - pub uc_cofunc: Option, - pub uc_arg: *mut c_void, - __pad: [c_int; 4], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for utmpx { - fn eq(&self, other: &utmpx) -> bool { - self.ut_name == other.ut_name - && self.ut_id == other.ut_id - && self.ut_line == other.ut_line - && self - .ut_host - .iter() - .zip(other.ut_host.iter()) - .all(|(a, b)| a == b) - && self.ut_unused == other.ut_unused - && self.ut_session == other.ut_session - && self.ut_type == other.ut_type - && self.ut_pid == other.ut_pid - && self.ut_exit == other.ut_exit - && self.ut_ss == other.ut_ss - && self.ut_tv == other.ut_tv - && self.ut_unused2 == other.ut_unused2 - } - } - impl Eq for utmpx {} - impl hash::Hash for utmpx { - fn hash(&self, state: &mut H) { - self.ut_name.hash(state); - self.ut_id.hash(state); - self.ut_line.hash(state); - self.ut_host.hash(state); - self.ut_unused.hash(state); - self.ut_session.hash(state); - self.ut_type.hash(state); - self.ut_pid.hash(state); - self.ut_exit.hash(state); - self.ut_ss.hash(state); - self.ut_tv.hash(state); - self.ut_unused2.hash(state); - } - } - impl PartialEq for lastlogx { - fn eq(&self, other: &lastlogx) -> bool { - self.ll_tv == other.ll_tv - && self.ll_line == other.ll_line - && self.ll_host == other.ll_host - && self.ll_ss == other.ll_ss - } - } - impl Eq for lastlogx {} - impl hash::Hash for lastlogx { - fn hash(&self, state: &mut H) { - self.ll_tv.hash(state); - self.ll_line.hash(state); - self.ll_host.hash(state); - self.ll_ss.hash(state); - } - } - - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_fileno == other.d_fileno - && self.d_namlen == other.d_namlen - && self.d_type == other.d_type - // Ignore __unused1 - // Ignore __unused2 - && self - .d_name - .iter() - .zip(other.d_name.iter()) - .all(|(a,b)| a == b) - } - } - impl Eq for dirent {} - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_fileno.hash(state); - self.d_namlen.hash(state); - self.d_type.hash(state); - // Ignore __unused1 - // Ignore __unused2 - self.d_name.hash(state); - } - } - - impl PartialEq for statfs { - fn eq(&self, other: &statfs) -> bool { - self.f_bsize == other.f_bsize - && self.f_iosize == other.f_iosize - && self.f_blocks == other.f_blocks - && self.f_bfree == other.f_bfree - && self.f_bavail == other.f_bavail - && self.f_files == other.f_files - && self.f_ffree == other.f_ffree - && self.f_fsid == other.f_fsid - && self.f_owner == other.f_owner - && self.f_type == other.f_type - && self.f_flags == other.f_flags - && self.f_syncwrites == other.f_syncwrites - && self.f_asyncwrites == other.f_asyncwrites - && self.f_fstypename == other.f_fstypename - && self - .f_mntonname - .iter() - .zip(other.f_mntonname.iter()) - .all(|(a, b)| a == b) - && self.f_syncreads == other.f_syncreads - && self.f_asyncreads == other.f_asyncreads - && self - .f_mntfromname - .iter() - .zip(other.f_mntfromname.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for statfs {} - impl hash::Hash for statfs { - fn hash(&self, state: &mut H) { - self.f_bsize.hash(state); - self.f_iosize.hash(state); - self.f_blocks.hash(state); - self.f_bfree.hash(state); - self.f_bavail.hash(state); - self.f_files.hash(state); - self.f_ffree.hash(state); - self.f_fsid.hash(state); - self.f_owner.hash(state); - self.f_type.hash(state); - self.f_flags.hash(state); - self.f_syncwrites.hash(state); - self.f_asyncwrites.hash(state); - self.f_fstypename.hash(state); - self.f_mntonname.hash(state); - self.f_syncreads.hash(state); - self.f_asyncreads.hash(state); - self.f_mntfromname.hash(state); - } - } - - impl PartialEq for sigevent { - fn eq(&self, other: &sigevent) -> bool { - self.sigev_notify == other.sigev_notify - && self.sigev_signo == other.sigev_signo - && self.sigev_value == other.sigev_value - } - } - impl Eq for sigevent {} - impl hash::Hash for sigevent { - fn hash(&self, state: &mut H) { - self.sigev_notify.hash(state); - self.sigev_signo.hash(state); - self.sigev_value.hash(state); - } - } - impl PartialEq for mcontext_t { - fn eq(&self, other: &mcontext_t) -> bool { - self.mc_onstack == other.mc_onstack - && self.mc_rdi == other.mc_rdi - && self.mc_rsi == other.mc_rsi - && self.mc_rdx == other.mc_rdx - && self.mc_rcx == other.mc_rcx - && self.mc_r8 == other.mc_r8 - && self.mc_r9 == other.mc_r9 - && self.mc_rax == other.mc_rax - && self.mc_rbx == other.mc_rbx - && self.mc_rbp == other.mc_rbp - && self.mc_r10 == other.mc_r10 - && self.mc_r11 == other.mc_r11 - && self.mc_r12 == other.mc_r12 - && self.mc_r13 == other.mc_r13 - && self.mc_r14 == other.mc_r14 - && self.mc_r15 == other.mc_r15 - && self.mc_xflags == other.mc_xflags - && self.mc_trapno == other.mc_trapno - && self.mc_addr == other.mc_addr - && self.mc_flags == other.mc_flags - && self.mc_err == other.mc_err - && self.mc_rip == other.mc_rip - && self.mc_cs == other.mc_cs - && self.mc_rflags == other.mc_rflags - && self.mc_rsp == other.mc_rsp - && self.mc_ss == other.mc_ss - && self.mc_len == other.mc_len - && self.mc_fpformat == other.mc_fpformat - && self.mc_ownedfp == other.mc_ownedfp - && self.mc_fpregs == other.mc_fpregs - } - } - impl Eq for mcontext_t {} - impl hash::Hash for mcontext_t { - fn hash(&self, state: &mut H) { - self.mc_onstack.hash(state); - self.mc_rdi.hash(state); - self.mc_rsi.hash(state); - self.mc_rdx.hash(state); - self.mc_rcx.hash(state); - self.mc_r8.hash(state); - self.mc_r9.hash(state); - self.mc_rax.hash(state); - self.mc_rbx.hash(state); - self.mc_rbp.hash(state); - self.mc_r10.hash(state); - self.mc_r11.hash(state); - self.mc_r10.hash(state); - self.mc_r11.hash(state); - self.mc_r12.hash(state); - self.mc_r13.hash(state); - self.mc_r14.hash(state); - self.mc_r15.hash(state); - self.mc_xflags.hash(state); - self.mc_trapno.hash(state); - self.mc_addr.hash(state); - self.mc_flags.hash(state); - self.mc_err.hash(state); - self.mc_rip.hash(state); - self.mc_cs.hash(state); - self.mc_rflags.hash(state); - self.mc_rsp.hash(state); - self.mc_ss.hash(state); - self.mc_len.hash(state); - self.mc_fpformat.hash(state); - self.mc_ownedfp.hash(state); - self.mc_fpregs.hash(state); - } - } - // FIXME(msrv): suggested method was added in 1.85 - #[allow(unpredictable_function_pointer_comparisons)] - impl PartialEq for ucontext_t { - fn eq(&self, other: &ucontext_t) -> bool { - self.uc_sigmask == other.uc_sigmask - && self.uc_mcontext == other.uc_mcontext - && self.uc_link == other.uc_link - && self.uc_stack == other.uc_stack - && self.uc_cofunc == other.uc_cofunc - && self.uc_arg == other.uc_arg - } - } - impl Eq for ucontext_t {} - impl hash::Hash for ucontext_t { - fn hash(&self, state: &mut H) { - self.uc_sigmask.hash(state); - self.uc_mcontext.hash(state); - self.uc_link.hash(state); - self.uc_stack.hash(state); - self.uc_cofunc.hash(state); - self.uc_arg.hash(state); - } - } - } -} - -pub const RAND_MAX: c_int = 0x7fff_ffff; -pub const PTHREAD_STACK_MIN: size_t = 16384; -pub const SIGSTKSZ: size_t = 40960; -pub const SIGCKPT: c_int = 33; -pub const SIGCKPTEXIT: c_int = 34; -pub const CKPT_FREEZE: c_int = 0x1; -pub const CKPT_THAW: c_int = 0x2; -pub const MADV_INVAL: c_int = 10; -pub const MADV_SETMAP: c_int = 11; -pub const O_CLOEXEC: c_int = 0x00020000; -pub const O_DIRECTORY: c_int = 0x08000000; -pub const F_GETLK: c_int = 7; -pub const F_SETLK: c_int = 8; -pub const F_SETLKW: c_int = 9; -pub const F_GETPATH: c_int = 19; -pub const ENOMEDIUM: c_int = 93; -pub const ENOTRECOVERABLE: c_int = 94; -pub const EOWNERDEAD: c_int = 95; -pub const EASYNC: c_int = 99; -pub const ELAST: c_int = 99; -pub const RLIMIT_POSIXLOCKS: c_int = 11; -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIM_NLIMITS: crate::rlim_t = 12; - -pub const Q_GETQUOTA: c_int = 0x300; -pub const Q_SETQUOTA: c_int = 0x400; - -pub const CTL_UNSPEC: c_int = 0; -pub const CTL_KERN: c_int = 1; -pub const CTL_VM: c_int = 2; -pub const CTL_VFS: c_int = 3; -pub const CTL_NET: c_int = 4; -pub const CTL_DEBUG: c_int = 5; -pub const CTL_HW: c_int = 6; -pub const CTL_MACHDEP: c_int = 7; -pub const CTL_USER: c_int = 8; -pub const CTL_P1003_1B: c_int = 9; -pub const CTL_LWKT: c_int = 10; -pub const CTL_MAXID: c_int = 11; -pub const KERN_OSTYPE: c_int = 1; -pub const KERN_OSRELEASE: c_int = 2; -pub const KERN_OSREV: c_int = 3; -pub const KERN_VERSION: c_int = 4; -pub const KERN_MAXVNODES: c_int = 5; -pub const KERN_MAXPROC: c_int = 6; -pub const KERN_MAXFILES: c_int = 7; -pub const KERN_ARGMAX: c_int = 8; -pub const KERN_SECURELVL: c_int = 9; -pub const KERN_HOSTNAME: c_int = 10; -pub const KERN_HOSTID: c_int = 11; -pub const KERN_CLOCKRATE: c_int = 12; -pub const KERN_VNODE: c_int = 13; -pub const KERN_PROC: c_int = 14; -pub const KERN_FILE: c_int = 15; -pub const KERN_PROF: c_int = 16; -pub const KERN_POSIX1: c_int = 17; -pub const KERN_NGROUPS: c_int = 18; -pub const KERN_JOB_CONTROL: c_int = 19; -pub const KERN_SAVED_IDS: c_int = 20; -pub const KERN_BOOTTIME: c_int = 21; -pub const KERN_NISDOMAINNAME: c_int = 22; -pub const KERN_UPDATEINTERVAL: c_int = 23; -pub const KERN_OSRELDATE: c_int = 24; -pub const KERN_NTP_PLL: c_int = 25; -pub const KERN_BOOTFILE: c_int = 26; -pub const KERN_MAXFILESPERPROC: c_int = 27; -pub const KERN_MAXPROCPERUID: c_int = 28; -pub const KERN_DUMPDEV: c_int = 29; -pub const KERN_IPC: c_int = 30; -pub const KERN_DUMMY: c_int = 31; -pub const KERN_PS_STRINGS: c_int = 32; -pub const KERN_USRSTACK: c_int = 33; -pub const KERN_LOGSIGEXIT: c_int = 34; -pub const KERN_IOV_MAX: c_int = 35; -pub const KERN_MAXPOSIXLOCKSPERUID: c_int = 36; -pub const KERN_MAXID: c_int = 37; -pub const KERN_PROC_ALL: c_int = 0; -pub const KERN_PROC_PID: c_int = 1; -pub const KERN_PROC_PGRP: c_int = 2; -pub const KERN_PROC_SESSION: c_int = 3; -pub const KERN_PROC_TTY: c_int = 4; -pub const KERN_PROC_UID: c_int = 5; -pub const KERN_PROC_RUID: c_int = 6; -pub const KERN_PROC_ARGS: c_int = 7; -pub const KERN_PROC_CWD: c_int = 8; -pub const KERN_PROC_PATHNAME: c_int = 9; -pub const KERN_PROC_FLAGMASK: c_int = 0x10; -pub const KERN_PROC_FLAG_LWP: c_int = 0x10; -pub const KIPC_MAXSOCKBUF: c_int = 1; -pub const KIPC_SOCKBUF_WASTE: c_int = 2; -pub const KIPC_SOMAXCONN: c_int = 3; -pub const KIPC_MAX_LINKHDR: c_int = 4; -pub const KIPC_MAX_PROTOHDR: c_int = 5; -pub const KIPC_MAX_HDR: c_int = 6; -pub const KIPC_MAX_DATALEN: c_int = 7; -pub const KIPC_MBSTAT: c_int = 8; -pub const KIPC_NMBCLUSTERS: c_int = 9; -pub const HW_MACHINE: c_int = 1; -pub const HW_MODEL: c_int = 2; -pub const HW_NCPU: c_int = 3; -pub const HW_BYTEORDER: c_int = 4; -pub const HW_PHYSMEM: c_int = 5; -pub const HW_USERMEM: c_int = 6; -pub const HW_PAGESIZE: c_int = 7; -pub const HW_DISKNAMES: c_int = 8; -pub const HW_DISKSTATS: c_int = 9; -pub const HW_FLOATINGPT: c_int = 10; -pub const HW_MACHINE_ARCH: c_int = 11; -pub const HW_MACHINE_PLATFORM: c_int = 12; -pub const HW_SENSORS: c_int = 13; -pub const HW_MAXID: c_int = 14; -pub const USER_CS_PATH: c_int = 1; -pub const USER_BC_BASE_MAX: c_int = 2; -pub const USER_BC_DIM_MAX: c_int = 3; -pub const USER_BC_SCALE_MAX: c_int = 4; -pub const USER_BC_STRING_MAX: c_int = 5; -pub const USER_COLL_WEIGHTS_MAX: c_int = 6; -pub const USER_EXPR_NEST_MAX: c_int = 7; -pub const USER_LINE_MAX: c_int = 8; -pub const USER_RE_DUP_MAX: c_int = 9; -pub const USER_POSIX2_VERSION: c_int = 10; -pub const USER_POSIX2_C_BIND: c_int = 11; -pub const USER_POSIX2_C_DEV: c_int = 12; -pub const USER_POSIX2_CHAR_TERM: c_int = 13; -pub const USER_POSIX2_FORT_DEV: c_int = 14; -pub const USER_POSIX2_FORT_RUN: c_int = 15; -pub const USER_POSIX2_LOCALEDEF: c_int = 16; -pub const USER_POSIX2_SW_DEV: c_int = 17; -pub const USER_POSIX2_UPE: c_int = 18; -pub const USER_STREAM_MAX: c_int = 19; -pub const USER_TZNAME_MAX: c_int = 20; -pub const USER_MAXID: c_int = 21; -pub const CTL_P1003_1B_ASYNCHRONOUS_IO: c_int = 1; -pub const CTL_P1003_1B_MAPPED_FILES: c_int = 2; -pub const CTL_P1003_1B_MEMLOCK: c_int = 3; -pub const CTL_P1003_1B_MEMLOCK_RANGE: c_int = 4; -pub const CTL_P1003_1B_MEMORY_PROTECTION: c_int = 5; -pub const CTL_P1003_1B_MESSAGE_PASSING: c_int = 6; -pub const CTL_P1003_1B_PRIORITIZED_IO: c_int = 7; -pub const CTL_P1003_1B_PRIORITY_SCHEDULING: c_int = 8; -pub const CTL_P1003_1B_REALTIME_SIGNALS: c_int = 9; -pub const CTL_P1003_1B_SEMAPHORES: c_int = 10; -pub const CTL_P1003_1B_FSYNC: c_int = 11; -pub const CTL_P1003_1B_SHARED_MEMORY_OBJECTS: c_int = 12; -pub const CTL_P1003_1B_SYNCHRONIZED_IO: c_int = 13; -pub const CTL_P1003_1B_TIMERS: c_int = 14; -pub const CTL_P1003_1B_AIO_LISTIO_MAX: c_int = 15; -pub const CTL_P1003_1B_AIO_MAX: c_int = 16; -pub const CTL_P1003_1B_AIO_PRIO_DELTA_MAX: c_int = 17; -pub const CTL_P1003_1B_DELAYTIMER_MAX: c_int = 18; -pub const CTL_P1003_1B_UNUSED1: c_int = 19; -pub const CTL_P1003_1B_PAGESIZE: c_int = 20; -pub const CTL_P1003_1B_RTSIG_MAX: c_int = 21; -pub const CTL_P1003_1B_SEM_NSEMS_MAX: c_int = 22; -pub const CTL_P1003_1B_SEM_VALUE_MAX: c_int = 23; -pub const CTL_P1003_1B_SIGQUEUE_MAX: c_int = 24; -pub const CTL_P1003_1B_TIMER_MAX: c_int = 25; -pub const CTL_P1003_1B_MAXID: c_int = 26; - -pub const CPUCTL_RSMSR: c_int = 0xc0106301; -pub const CPUCTL_WRMSR: c_int = 0xc0106302; -pub const CPUCTL_CPUID: c_int = 0xc0106303; -pub const CPUCTL_UPDATE: c_int = 0xc0106304; -pub const CPUCTL_MSRSBIT: c_int = 0xc0106305; -pub const CPUCTL_MSRCBIT: c_int = 0xc0106306; -pub const CPUCTL_CPUID_COUNT: c_int = 0xc0106307; - -pub const CPU_SETSIZE: size_t = size_of::() * 8; - -pub const EVFILT_READ: i16 = -1; -pub const EVFILT_WRITE: i16 = -2; -pub const EVFILT_AIO: i16 = -3; -pub const EVFILT_VNODE: i16 = -4; -pub const EVFILT_PROC: i16 = -5; -pub const EVFILT_SIGNAL: i16 = -6; -pub const EVFILT_TIMER: i16 = -7; -pub const EVFILT_EXCEPT: i16 = -8; -pub const EVFILT_USER: i16 = -9; -pub const EVFILT_FS: i16 = -10; - -pub const EV_ADD: u16 = 0x1; -pub const EV_DELETE: u16 = 0x2; -pub const EV_ENABLE: u16 = 0x4; -pub const EV_DISABLE: u16 = 0x8; -pub const EV_ONESHOT: u16 = 0x10; -pub const EV_CLEAR: u16 = 0x20; -pub const EV_RECEIPT: u16 = 0x40; -pub const EV_DISPATCH: u16 = 0x80; -pub const EV_NODATA: u16 = 0x1000; -pub const EV_FLAG1: u16 = 0x2000; -pub const EV_ERROR: u16 = 0x4000; -pub const EV_EOF: u16 = 0x8000; -pub const EV_HUP: u16 = 0x8000; -pub const EV_SYSFLAGS: u16 = 0xf000; - -pub const FIODNAME: c_ulong = 0x80106678; - -pub const NOTE_TRIGGER: u32 = 0x01000000; -pub const NOTE_FFNOP: u32 = 0x00000000; -pub const NOTE_FFAND: u32 = 0x40000000; -pub const NOTE_FFOR: u32 = 0x80000000; -pub const NOTE_FFCOPY: u32 = 0xc0000000; -pub const NOTE_FFCTRLMASK: u32 = 0xc0000000; -pub const NOTE_FFLAGSMASK: u32 = 0x00ffffff; -pub const NOTE_LOWAT: u32 = 0x00000001; -pub const NOTE_OOB: u32 = 0x00000002; -pub const NOTE_DELETE: u32 = 0x00000001; -pub const NOTE_WRITE: u32 = 0x00000002; -pub const NOTE_EXTEND: u32 = 0x00000004; -pub const NOTE_ATTRIB: u32 = 0x00000008; -pub const NOTE_LINK: u32 = 0x00000010; -pub const NOTE_RENAME: u32 = 0x00000020; -pub const NOTE_REVOKE: u32 = 0x00000040; -pub const NOTE_EXIT: u32 = 0x80000000; -pub const NOTE_FORK: u32 = 0x40000000; -pub const NOTE_EXEC: u32 = 0x20000000; -pub const NOTE_PDATAMASK: u32 = 0x000fffff; -pub const NOTE_PCTRLMASK: u32 = 0xf0000000; -pub const NOTE_TRACK: u32 = 0x00000001; -pub const NOTE_TRACKERR: u32 = 0x00000002; -pub const NOTE_CHILD: u32 = 0x00000004; - -pub const SO_SNDSPACE: c_int = 0x100a; -pub const SO_CPUHINT: c_int = 0x1030; -pub const SO_PASSCRED: c_int = 0x4000; - -pub const PT_FIRSTMACH: c_int = 32; - -pub const PROC_REAP_ACQUIRE: c_int = 0x0001; -pub const PROC_REAP_RELEASE: c_int = 0x0002; -pub const PROC_REAP_STATUS: c_int = 0x0003; -pub const PROC_PDEATHSIG_CTL: c_int = 0x0004; -pub const PROC_PDEATHSIG_STATUS: c_int = 0x0005; - -// https://github.com/DragonFlyBSD/DragonFlyBSD/blob/HEAD/sys/net/if.h#L101 -pub const IFF_UP: c_int = 0x1; // interface is up -pub const IFF_BROADCAST: c_int = 0x2; // broadcast address valid -pub const IFF_DEBUG: c_int = 0x4; // turn on debugging -pub const IFF_LOOPBACK: c_int = 0x8; // is a loopback net -pub const IFF_POINTOPOINT: c_int = 0x10; // interface is point-to-point link -pub const IFF_SMART: c_int = 0x20; // interface manages own routes -pub const IFF_RUNNING: c_int = 0x40; // resources allocated -pub const IFF_NOARP: c_int = 0x80; // no address resolution protocol -pub const IFF_PROMISC: c_int = 0x100; // receive all packets -pub const IFF_ALLMULTI: c_int = 0x200; // receive all multicast packets -pub const IFF_OACTIVE_COMPAT: c_int = 0x400; // was transmission in progress -pub const IFF_SIMPLEX: c_int = 0x800; // can't hear own transmissions -pub const IFF_LINK0: c_int = 0x1000; // per link layer defined bit -pub const IFF_LINK1: c_int = 0x2000; // per link layer defined bit -pub const IFF_LINK2: c_int = 0x4000; // per link layer defined bit -pub const IFF_ALTPHYS: c_int = IFF_LINK2; // use alternate physical connection -pub const IFF_MULTICAST: c_int = 0x8000; // supports multicast - // was interface is in polling mode -pub const IFF_POLLING_COMPAT: c_int = 0x10000; -pub const IFF_PPROMISC: c_int = 0x20000; // user-requested promisc mode -pub const IFF_MONITOR: c_int = 0x40000; // user-requested monitor mode -pub const IFF_STATICARP: c_int = 0x80000; // static ARP -pub const IFF_NPOLLING: c_int = 0x100000; // interface is in polling mode -pub const IFF_IDIRECT: c_int = 0x200000; // direct input - -// -// sys/netinet/in.h -// Protocols (RFC 1700) -// NOTE: These are in addition to the constants defined in src/unix/mod.rs - -// IPPROTO_IP defined in src/unix/mod.rs -/// IP6 hop-by-hop options -pub const IPPROTO_HOPOPTS: c_int = 0; -// IPPROTO_ICMP defined in src/unix/mod.rs -/// group mgmt protocol -pub const IPPROTO_IGMP: c_int = 2; -/// gateway^2 (deprecated) -pub const IPPROTO_GGP: c_int = 3; -/// for compatibility -pub const IPPROTO_IPIP: c_int = 4; -// IPPROTO_TCP defined in src/unix/mod.rs -/// Stream protocol II. -pub const IPPROTO_ST: c_int = 7; -/// exterior gateway protocol -pub const IPPROTO_EGP: c_int = 8; -/// private interior gateway -pub const IPPROTO_PIGP: c_int = 9; -/// BBN RCC Monitoring -pub const IPPROTO_RCCMON: c_int = 10; -/// network voice protocol -pub const IPPROTO_NVPII: c_int = 11; -/// pup -pub const IPPROTO_PUP: c_int = 12; -/// Argus -pub const IPPROTO_ARGUS: c_int = 13; -/// EMCON -pub const IPPROTO_EMCON: c_int = 14; -/// Cross Net Debugger -pub const IPPROTO_XNET: c_int = 15; -/// Chaos -pub const IPPROTO_CHAOS: c_int = 16; -// IPPROTO_UDP defined in src/unix/mod.rs -/// Multiplexing -pub const IPPROTO_MUX: c_int = 18; -/// DCN Measurement Subsystems -pub const IPPROTO_MEAS: c_int = 19; -/// Host Monitoring -pub const IPPROTO_HMP: c_int = 20; -/// Packet Radio Measurement -pub const IPPROTO_PRM: c_int = 21; -/// xns idp -pub const IPPROTO_IDP: c_int = 22; -/// Trunk-1 -pub const IPPROTO_TRUNK1: c_int = 23; -/// Trunk-2 -pub const IPPROTO_TRUNK2: c_int = 24; -/// Leaf-1 -pub const IPPROTO_LEAF1: c_int = 25; -/// Leaf-2 -pub const IPPROTO_LEAF2: c_int = 26; -/// Reliable Data -pub const IPPROTO_RDP: c_int = 27; -/// Reliable Transaction -pub const IPPROTO_IRTP: c_int = 28; -/// tp-4 w/ class negotiation -pub const IPPROTO_TP: c_int = 29; -/// Bulk Data Transfer -pub const IPPROTO_BLT: c_int = 30; -/// Network Services -pub const IPPROTO_NSP: c_int = 31; -/// Merit Internodal -pub const IPPROTO_INP: c_int = 32; -/// Sequential Exchange -pub const IPPROTO_SEP: c_int = 33; -/// Third Party Connect -pub const IPPROTO_3PC: c_int = 34; -/// InterDomain Policy Routing -pub const IPPROTO_IDPR: c_int = 35; -/// XTP -pub const IPPROTO_XTP: c_int = 36; -/// Datagram Delivery -pub const IPPROTO_DDP: c_int = 37; -/// Control Message Transport -pub const IPPROTO_CMTP: c_int = 38; -/// TP++ Transport -pub const IPPROTO_TPXX: c_int = 39; -/// IL transport protocol -pub const IPPROTO_IL: c_int = 40; -// IPPROTO_IPV6 defined in src/unix/mod.rs -/// Source Demand Routing -pub const IPPROTO_SDRP: c_int = 42; -/// IP6 routing header -pub const IPPROTO_ROUTING: c_int = 43; -/// IP6 fragmentation header -pub const IPPROTO_FRAGMENT: c_int = 44; -/// InterDomain Routing -pub const IPPROTO_IDRP: c_int = 45; -/// resource reservation -pub const IPPROTO_RSVP: c_int = 46; -/// General Routing Encap. -pub const IPPROTO_GRE: c_int = 47; -/// Mobile Host Routing -pub const IPPROTO_MHRP: c_int = 48; -/// BHA -pub const IPPROTO_BHA: c_int = 49; -/// IP6 Encap Sec. Payload -pub const IPPROTO_ESP: c_int = 50; -/// IP6 Auth Header -pub const IPPROTO_AH: c_int = 51; -/// Integ. Net Layer Security -pub const IPPROTO_INLSP: c_int = 52; -/// IP with encryption -pub const IPPROTO_SWIPE: c_int = 53; -/// Next Hop Resolution -pub const IPPROTO_NHRP: c_int = 54; -/// IP Mobility -pub const IPPROTO_MOBILE: c_int = 55; -/// Transport Layer Security -pub const IPPROTO_TLSP: c_int = 56; -/// SKIP -pub const IPPROTO_SKIP: c_int = 57; -// IPPROTO_ICMPV6 defined in src/unix/mod.rs -/// IP6 no next header -pub const IPPROTO_NONE: c_int = 59; -/// IP6 destination option -pub const IPPROTO_DSTOPTS: c_int = 60; -/// any host internal protocol -pub const IPPROTO_AHIP: c_int = 61; -/// CFTP -pub const IPPROTO_CFTP: c_int = 62; -/// "hello" routing protocol -pub const IPPROTO_HELLO: c_int = 63; -/// SATNET/Backroom EXPAK -pub const IPPROTO_SATEXPAK: c_int = 64; -/// Kryptolan -pub const IPPROTO_KRYPTOLAN: c_int = 65; -/// Remote Virtual Disk -pub const IPPROTO_RVD: c_int = 66; -/// Pluribus Packet Core -pub const IPPROTO_IPPC: c_int = 67; -/// Any distributed FS -pub const IPPROTO_ADFS: c_int = 68; -/// Satnet Monitoring -pub const IPPROTO_SATMON: c_int = 69; -/// VISA Protocol -pub const IPPROTO_VISA: c_int = 70; -/// Packet Core Utility -pub const IPPROTO_IPCV: c_int = 71; -/// Comp. Prot. Net. Executive -pub const IPPROTO_CPNX: c_int = 72; -/// Comp. Prot. HeartBeat -pub const IPPROTO_CPHB: c_int = 73; -/// Wang Span Network -pub const IPPROTO_WSN: c_int = 74; -/// Packet Video Protocol -pub const IPPROTO_PVP: c_int = 75; -/// BackRoom SATNET Monitoring -pub const IPPROTO_BRSATMON: c_int = 76; -/// Sun net disk proto (temp.) -pub const IPPROTO_ND: c_int = 77; -/// WIDEBAND Monitoring -pub const IPPROTO_WBMON: c_int = 78; -/// WIDEBAND EXPAK -pub const IPPROTO_WBEXPAK: c_int = 79; -/// ISO cnlp -pub const IPPROTO_EON: c_int = 80; -/// VMTP -pub const IPPROTO_VMTP: c_int = 81; -/// Secure VMTP -pub const IPPROTO_SVMTP: c_int = 82; -/// Banyon VINES -pub const IPPROTO_VINES: c_int = 83; -/// TTP -pub const IPPROTO_TTP: c_int = 84; -/// NSFNET-IGP -pub const IPPROTO_IGP: c_int = 85; -/// dissimilar gateway prot. -pub const IPPROTO_DGP: c_int = 86; -/// TCF -pub const IPPROTO_TCF: c_int = 87; -/// Cisco/GXS IGRP -pub const IPPROTO_IGRP: c_int = 88; -/// OSPFIGP -pub const IPPROTO_OSPFIGP: c_int = 89; -/// Strite RPC protocol -pub const IPPROTO_SRPC: c_int = 90; -/// Locus Address Resoloution -pub const IPPROTO_LARP: c_int = 91; -/// Multicast Transport -pub const IPPROTO_MTP: c_int = 92; -/// AX.25 Frames -pub const IPPROTO_AX25: c_int = 93; -/// IP encapsulated in IP -pub const IPPROTO_IPEIP: c_int = 94; -/// Mobile Int.ing control -pub const IPPROTO_MICP: c_int = 95; -/// Semaphore Comm. security -pub const IPPROTO_SCCSP: c_int = 96; -/// Ethernet IP encapsulation -pub const IPPROTO_ETHERIP: c_int = 97; -/// encapsulation header -pub const IPPROTO_ENCAP: c_int = 98; -/// any private encr. scheme -pub const IPPROTO_APES: c_int = 99; -/// GMTP -pub const IPPROTO_GMTP: c_int = 100; -/// payload compression (IPComp) -pub const IPPROTO_IPCOMP: c_int = 108; - -/* 101-254: Partly Unassigned */ -/// Protocol Independent Mcast -pub const IPPROTO_PIM: c_int = 103; -/// CARP -pub const IPPROTO_CARP: c_int = 112; -/// PGM -pub const IPPROTO_PGM: c_int = 113; -/// PFSYNC -pub const IPPROTO_PFSYNC: c_int = 240; - -/* 255: Reserved */ -/* BSD Private, local use, namespace incursion, no longer used */ -/// divert pseudo-protocol -pub const IPPROTO_DIVERT: c_int = 254; -pub const IPPROTO_MAX: c_int = 256; -/// last return value of *_input(), meaning "all job for this pkt is done". -pub const IPPROTO_DONE: c_int = 257; - -/// Used by RSS: the layer3 protocol is unknown -pub const IPPROTO_UNKNOWN: c_int = 258; - -// sys/netinet/tcp.h -pub const TCP_SIGNATURE_ENABLE: c_int = 16; -pub const TCP_KEEPINIT: c_int = 32; -pub const TCP_FASTKEEP: c_int = 128; - -pub const AF_BLUETOOTH: c_int = 33; -pub const AF_MPLS: c_int = 34; -pub const AF_IEEE80211: c_int = 35; - -pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; - -pub const NET_RT_DUMP: c_int = 1; -pub const NET_RT_FLAGS: c_int = 2; -pub const NET_RT_IFLIST: c_int = 3; -pub const NET_RT_MAXID: c_int = 4; - -pub const SOMAXOPT_SIZE: c_int = 65536; - -pub const MSG_UNUSED09: c_int = 0x00000200; -pub const MSG_NOSIGNAL: c_int = 0x00000400; -pub const MSG_SYNC: c_int = 0x00000800; -pub const MSG_CMSG_CLOEXEC: c_int = 0x00001000; -pub const MSG_FBLOCKING: c_int = 0x00010000; -pub const MSG_FNONBLOCKING: c_int = 0x00020000; -pub const MSG_FMASK: c_int = 0xFFFF0000; - -// sys/mount.h -pub const MNT_NODEV: c_int = 0x00000010; -pub const MNT_AUTOMOUNTED: c_int = 0x00000020; -pub const MNT_TRIM: c_int = 0x01000000; -pub const MNT_LOCAL: c_int = 0x00001000; -pub const MNT_QUOTA: c_int = 0x00002000; -pub const MNT_ROOTFS: c_int = 0x00004000; -pub const MNT_USER: c_int = 0x00008000; -pub const MNT_IGNORE: c_int = 0x00800000; - -// utmpx entry types -pub const EMPTY: c_short = 0; -pub const RUN_LVL: c_short = 1; -pub const BOOT_TIME: c_short = 2; -pub const OLD_TIME: c_short = 3; -pub const NEW_TIME: c_short = 4; -pub const INIT_PROCESS: c_short = 5; -pub const LOGIN_PROCESS: c_short = 6; -pub const USER_PROCESS: c_short = 7; -pub const DEAD_PROCESS: c_short = 8; -pub const ACCOUNTING: c_short = 9; -pub const SIGNATURE: c_short = 10; -pub const DOWNTIME: c_short = 11; -// utmpx database types -pub const UTX_DB_UTMPX: c_uint = 0; -pub const UTX_DB_WTMPX: c_uint = 1; -pub const UTX_DB_LASTLOG: c_uint = 2; -pub const _UTX_LINESIZE: usize = 32; -pub const _UTX_USERSIZE: usize = 32; -pub const _UTX_IDSIZE: usize = 4; -pub const _UTX_HOSTSIZE: usize = 256; - -pub const LC_COLLATE_MASK: c_int = 1 << 0; -pub const LC_CTYPE_MASK: c_int = 1 << 1; -pub const LC_MONETARY_MASK: c_int = 1 << 2; -pub const LC_NUMERIC_MASK: c_int = 1 << 3; -pub const LC_TIME_MASK: c_int = 1 << 4; -pub const LC_MESSAGES_MASK: c_int = 1 << 5; -pub const LC_ALL_MASK: c_int = LC_COLLATE_MASK - | LC_CTYPE_MASK - | LC_MESSAGES_MASK - | LC_MONETARY_MASK - | LC_NUMERIC_MASK - | LC_TIME_MASK; - -pub const TIOCSIG: c_ulong = 0x2000745f; -pub const BTUARTDISC: c_int = 0x7; -pub const TIOCDCDTIMESTAMP: c_ulong = 0x40107458; -pub const TIOCISPTMASTER: c_ulong = 0x20007455; -pub const TIOCMODG: c_ulong = 0x40047403; -pub const TIOCMODS: c_ulong = 0x80047404; -pub const TIOCREMOTE: c_ulong = 0x80047469; -pub const TIOCTIMESTAMP: c_ulong = 0x40107459; - -// Constants used by "at" family of system calls. -pub const AT_FDCWD: c_int = 0xFFFAFDCD; // invalid file descriptor -pub const AT_SYMLINK_NOFOLLOW: c_int = 1; -pub const AT_REMOVEDIR: c_int = 2; -pub const AT_EACCESS: c_int = 4; -pub const AT_SYMLINK_FOLLOW: c_int = 8; - -pub const VCHECKPT: usize = 19; - -pub const _PC_2_SYMLINKS: c_int = 22; -pub const _PC_TIMESTAMP_RESOLUTION: c_int = 23; - -pub const _CS_PATH: c_int = 1; - -pub const _SC_V7_ILP32_OFF32: c_int = 122; -pub const _SC_V7_ILP32_OFFBIG: c_int = 123; -pub const _SC_V7_LP64_OFF64: c_int = 124; -pub const _SC_V7_LPBIG_OFFBIG: c_int = 125; -pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 126; -pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 127; - -pub const WCONTINUED: c_int = 0x4; -pub const WSTOPPED: c_int = 0x2; -pub const WNOWAIT: c_int = 0x8; -pub const WEXITED: c_int = 0x10; -pub const WTRAPPED: c_int = 0x20; - -// Similar to FreeBSD, only the standardized ones are exposed. -// There are more. -pub const P_PID: idtype_t = 0; -pub const P_PGID: idtype_t = 2; -pub const P_ALL: idtype_t = 7; - -// Values for struct rtprio (type_ field) -pub const RTP_PRIO_REALTIME: c_ushort = 0; -pub const RTP_PRIO_NORMAL: c_ushort = 1; -pub const RTP_PRIO_IDLE: c_ushort = 2; -pub const RTP_PRIO_THREAD: c_ushort = 3; - -// Flags for chflags(2) -pub const UF_NOHISTORY: c_ulong = 0x00000040; -pub const UF_CACHE: c_ulong = 0x00000080; -pub const UF_XLINK: c_ulong = 0x00000100; -pub const SF_NOHISTORY: c_ulong = 0x00400000; -pub const SF_CACHE: c_ulong = 0x00800000; -pub const SF_XLINK: c_ulong = 0x01000000; - -// timespec constants -pub const UTIME_OMIT: c_long = -2; -pub const UTIME_NOW: c_long = -1; - -pub const MINCORE_SUPER: c_int = 0x20; - -// kinfo_proc constants -pub const MAXCOMLEN: usize = 16; -pub const MAXLOGNAME: usize = 33; -pub const NGROUPS: usize = 16; - -pub const RB_PAUSE: c_int = 0x40000; -pub const RB_VIDEO: c_int = 0x20000000; - -// net/route.h -pub const RTF_CLONING: c_int = 0x100; -pub const RTF_PRCLONING: c_int = 0x10000; -pub const RTF_WASCLONED: c_int = 0x20000; -pub const RTF_MPLSOPS: c_int = 0x1000000; - -pub const RTM_VERSION: c_int = 7; - -pub const RTAX_MPLS1: c_int = 8; -pub const RTAX_MPLS2: c_int = 9; -pub const RTAX_MPLS3: c_int = 10; -pub const RTAX_MAX: c_int = 11; - -const fn _CMSG_ALIGN(n: usize) -> usize { - (n + (size_of::() - 1)) & !(size_of::() - 1) -} - -f! { - pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { - (cmsg as *mut c_uchar).offset(_CMSG_ALIGN(size_of::()) as isize) - } - - pub const fn CMSG_LEN(length: c_uint) -> c_uint { - (_CMSG_ALIGN(size_of::()) + length as usize) as c_uint - } - - pub fn CMSG_NXTHDR(mhdr: *const crate::msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - let next = cmsg as usize - + _CMSG_ALIGN((*cmsg).cmsg_len as usize) - + _CMSG_ALIGN(size_of::()); - let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; - if next <= max { - (cmsg as usize + _CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr - } else { - core::ptr::null_mut::() - } - } - - pub const fn CMSG_SPACE(length: c_uint) -> c_uint { - (_CMSG_ALIGN(size_of::()) + _CMSG_ALIGN(length as usize)) as c_uint - } - - pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { - for slot in cpuset.ary.iter_mut() { - *slot = 0; - } - } - - pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let (idx, offset) = ((cpu >> 6) & 3, cpu & 63); - cpuset.ary[idx] |= 1 << offset; - () - } - - pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let (idx, offset) = ((cpu >> 6) & 3, cpu & 63); - cpuset.ary[idx] &= !(1 << offset); - () - } - - pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { - let (idx, offset) = ((cpu >> 6) & 3, cpu & 63); - 0 != cpuset.ary[idx] & (1 << offset) - } -} - -safe_f! { - pub const fn WIFSIGNALED(status: c_int) -> bool { - (status & 0o177) != 0o177 && (status & 0o177) != 0 - } - - pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { - let major = major as crate::dev_t; - let minor = minor as crate::dev_t; - let mut dev = 0; - dev |= major << 8; - dev |= minor; - dev - } - - pub const fn major(dev: crate::dev_t) -> c_int { - ((dev >> 8) & 0xff) as c_int - } - - pub const fn minor(dev: crate::dev_t) -> c_int { - (dev & 0xffff00ff) as c_int - } -} - -extern "C" { - pub fn __errno_location() -> *mut c_int; - pub fn setgrent(); - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - - pub fn setutxdb(_type: c_uint, file: *mut c_char) -> c_int; - - pub fn aio_waitcomplete(iocbp: *mut *mut aiocb, timeout: *mut crate::timespec) -> c_int; - - pub fn devname_r( - dev: crate::dev_t, - mode: crate::mode_t, - buf: *mut c_char, - len: size_t, - ) -> *mut c_char; - - pub fn waitid( - idtype: idtype_t, - id: crate::id_t, - infop: *mut crate::siginfo_t, - options: c_int, - ) -> c_int; - - pub fn freelocale(loc: crate::locale_t); - - pub fn lwp_rtprio( - function: c_int, - pid: crate::pid_t, - lwpid: lwpid_t, - rtp: *mut super::rtprio, - ) -> c_int; - - pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; - pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; - pub fn uname(buf: *mut crate::utsname) -> c_int; - pub fn memmem( - haystack: *const c_void, - haystacklen: size_t, - needle: *const c_void, - needlelen: size_t, - ) -> *mut c_void; - pub fn pthread_spin_init(lock: *mut pthread_spinlock_t, pshared: c_int) -> c_int; - pub fn pthread_spin_destroy(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_spin_lock(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_spin_trylock(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_spin_unlock(lock: *mut pthread_spinlock_t) -> c_int; - - pub fn sched_getaffinity(pid: crate::pid_t, cpusetsize: size_t, mask: *mut cpu_set_t) -> c_int; - pub fn sched_setaffinity( - pid: crate::pid_t, - cpusetsize: size_t, - mask: *const cpu_set_t, - ) -> c_int; - pub fn sched_getcpu() -> c_int; - pub fn setproctitle(fmt: *const c_char, ...); - - pub fn shmget(key: crate::key_t, size: size_t, shmflg: c_int) -> c_int; - pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; - pub fn shmdt(shmaddr: *const c_void) -> c_int; - pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; - pub fn procctl( - idtype: crate::idtype_t, - id: crate::id_t, - cmd: c_int, - data: *mut c_void, - ) -> c_int; - - pub fn updwtmpx(file: *const c_char, ut: *const utmpx) -> c_int; - pub fn getlastlogx(fname: *const c_char, uid: crate::uid_t, ll: *mut lastlogx) - -> *mut lastlogx; - pub fn updlastlogx(fname: *const c_char, uid: crate::uid_t, ll: *mut lastlogx) -> c_int; - pub fn getutxuser(name: *const c_char) -> utmpx; - pub fn utmpxname(file: *const c_char) -> c_int; - - pub fn sys_checkpoint(tpe: c_int, fd: c_int, pid: crate::pid_t, retval: c_int) -> c_int; - - pub fn umtx_sleep(ptr: *const c_int, value: c_int, timeout: c_int) -> c_int; - pub fn umtx_wakeup(ptr: *const c_int, count: c_int) -> c_int; - - pub fn dirname(path: *mut c_char) -> *mut c_char; - pub fn basename(path: *mut c_char) -> *mut c_char; - pub fn getmntinfo(mntbufp: *mut *mut crate::statfs, flags: c_int) -> c_int; - pub fn getmntvinfo( - mntbufp: *mut *mut crate::statfs, - mntvbufp: *mut *mut crate::statvfs, - flags: c_int, - ) -> c_int; - - pub fn closefrom(lowfd: c_int) -> c_int; -} - -#[link(name = "rt")] -extern "C" { - pub fn aio_cancel(fd: c_int, aiocbp: *mut aiocb) -> c_int; - pub fn aio_error(aiocbp: *const aiocb) -> c_int; - pub fn aio_fsync(op: c_int, aiocbp: *mut aiocb) -> c_int; - pub fn aio_read(aiocbp: *mut aiocb) -> c_int; - pub fn aio_return(aiocbp: *mut aiocb) -> ssize_t; - pub fn aio_suspend( - aiocb_list: *const *const aiocb, - nitems: c_int, - timeout: *const crate::timespec, - ) -> c_int; - pub fn aio_write(aiocbp: *mut aiocb) -> c_int; - pub fn lio_listio( - mode: c_int, - aiocb_list: *const *mut aiocb, - nitems: c_int, - sevp: *mut sigevent, - ) -> c_int; - - pub fn reallocf(ptr: *mut c_void, size: size_t) -> *mut c_void; - pub fn freezero(ptr: *mut c_void, size: size_t); -} - -#[link(name = "kvm")] -extern "C" { - pub fn kvm_vm_map_entry_first( - kvm: *mut crate::kvm_t, - map: vm_map_t, - entry: vm_map_entry_t, - ) -> vm_map_entry_t; - pub fn kvm_vm_map_entry_next( - kvm: *mut crate::kvm_t, - map: vm_map_entry_t, - entry: vm_map_entry_t, - ) -> vm_map_entry_t; -} - -// DIFF(main): module removed in de76fee6 -cfg_if! { - if #[cfg(libc_thread_local)] { - mod errno; - pub use self::errno::*; - } -} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/aarch64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/aarch64.rs deleted file mode 100644 index e74c26bb46e2c9..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/aarch64.rs +++ /dev/null @@ -1,110 +0,0 @@ -use crate::prelude::*; - -pub type clock_t = i32; -pub type wchar_t = u32; -pub type time_t = i64; -pub type suseconds_t = i64; -pub type register_t = i64; - -s_no_extra_traits! { - pub struct gpregs { - pub gp_x: [crate::register_t; 30], - pub gp_lr: crate::register_t, - pub gp_sp: crate::register_t, - pub gp_elr: crate::register_t, - pub gp_spsr: u32, - pub gp_pad: c_int, - } - - pub struct fpregs { - pub fp_q: u128, - pub fp_sr: u32, - pub fp_cr: u32, - pub fp_flags: c_int, - pub fp_pad: c_int, - } - - pub struct mcontext_t { - pub mc_gpregs: gpregs, - pub mc_fpregs: fpregs, - pub mc_flags: c_int, - pub mc_pad: c_int, - pub mc_spare: [u64; 8], - } -} - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for gpregs { - fn eq(&self, other: &gpregs) -> bool { - self.gp_x.iter().zip(other.gp_x.iter()).all(|(a, b)| a == b) - && self.gp_lr == other.gp_lr - && self.gp_sp == other.gp_sp - && self.gp_elr == other.gp_elr - && self.gp_spsr == other.gp_spsr - && self.gp_pad == other.gp_pad - } - } - impl Eq for gpregs {} - impl hash::Hash for gpregs { - fn hash(&self, state: &mut H) { - self.gp_x.hash(state); - self.gp_lr.hash(state); - self.gp_sp.hash(state); - self.gp_elr.hash(state); - self.gp_spsr.hash(state); - self.gp_pad.hash(state); - } - } - impl PartialEq for fpregs { - fn eq(&self, other: &fpregs) -> bool { - self.fp_q == other.fp_q - && self.fp_sr == other.fp_sr - && self.fp_cr == other.fp_cr - && self.fp_flags == other.fp_flags - && self.fp_pad == other.fp_pad - } - } - impl Eq for fpregs {} - impl hash::Hash for fpregs { - fn hash(&self, state: &mut H) { - self.fp_q.hash(state); - self.fp_sr.hash(state); - self.fp_cr.hash(state); - self.fp_flags.hash(state); - self.fp_pad.hash(state); - } - } - impl PartialEq for mcontext_t { - fn eq(&self, other: &mcontext_t) -> bool { - self.mc_gpregs == other.mc_gpregs - && self.mc_fpregs == other.mc_fpregs - && self.mc_flags == other.mc_flags - && self.mc_pad == other.mc_pad - && self - .mc_spare - .iter() - .zip(other.mc_spare.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for mcontext_t {} - impl hash::Hash for mcontext_t { - fn hash(&self, state: &mut H) { - self.mc_gpregs.hash(state); - self.mc_fpregs.hash(state); - self.mc_flags.hash(state); - self.mc_pad.hash(state); - self.mc_spare.hash(state); - } - } - } -} - -pub const BIOCSRTIMEOUT: c_ulong = 0x8010426d; -pub const BIOCGRTIMEOUT: c_ulong = 0x4010426e; -pub const MAP_32BIT: c_int = 0x00080000; -pub const MINSIGSTKSZ: size_t = 4096; // 1024 * 4 -pub const TIOCTIMESTAMP: c_ulong = 0x40107459; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/arm.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/arm.rs deleted file mode 100644 index c17e12913d8f82..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/arm.rs +++ /dev/null @@ -1,53 +0,0 @@ -use crate::prelude::*; - -pub type clock_t = u32; -pub type wchar_t = u32; -pub type time_t = i64; -pub type suseconds_t = i32; -pub type register_t = i32; -pub type __greg_t = c_uint; -pub type __gregset_t = [crate::__greg_t; 17]; - -s_no_extra_traits! { - pub struct mcontext_t { - pub __gregs: crate::__gregset_t, - pub mc_vfp_size: usize, - pub mc_vfp_ptr: *mut c_void, - pub mc_spare: [c_uint; 33], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for mcontext_t { - fn eq(&self, other: &mcontext_t) -> bool { - self.__gregs == other.__gregs - && self.mc_vfp_size == other.mc_vfp_size - && self.mc_vfp_ptr == other.mc_vfp_ptr - && self - .mc_spare - .iter() - .zip(other.mc_spare.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for mcontext_t {} - impl hash::Hash for mcontext_t { - fn hash(&self, state: &mut H) { - self.__gregs.hash(state); - self.mc_vfp_size.hash(state); - self.mc_vfp_ptr.hash(state); - self.mc_spare.hash(state); - } - } - } -} - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const BIOCSRTIMEOUT: c_ulong = 0x8010426d; -pub const BIOCGRTIMEOUT: c_ulong = 0x4010426e; - -pub const MAP_32BIT: c_int = 0x00080000; -pub const MINSIGSTKSZ: size_t = 4096; // 1024 * 4 -pub const TIOCTIMESTAMP: c_ulong = 0x40107459; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b32.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b32.rs deleted file mode 100644 index dca7d6ee799888..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b32.rs +++ /dev/null @@ -1,37 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -#[repr(C)] -#[derive(Debug)] -#[cfg_attr(feature = "extra_traits", derive(Eq, Hash, PartialEq))] -pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_size: off_t, - pub st_blocks: crate::blkcnt_t, - pub st_blksize: crate::blksize_t, - pub st_flags: crate::fflags_t, - pub st_gen: u32, - pub st_lspare: i32, - pub st_birthtime: crate::time_t, - pub st_birthtime_nsec: c_long, - __unused: [u8; 8], -} - -impl Copy for crate::stat {} -impl Clone for crate::stat { - fn clone(&self) -> crate::stat { - *self - } -} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b64.rs deleted file mode 100644 index 1f31aac0e3d3d6..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/b64.rs +++ /dev/null @@ -1,36 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -#[repr(C)] -#[derive(Debug)] -#[cfg_attr(feature = "extra_traits", derive(Eq, Hash, PartialEq))] -pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_size: off_t, - pub st_blocks: crate::blkcnt_t, - pub st_blksize: crate::blksize_t, - pub st_flags: crate::fflags_t, - pub st_gen: u32, - pub st_lspare: i32, - pub st_birthtime: crate::time_t, - pub st_birthtime_nsec: c_long, -} - -impl Copy for crate::stat {} -impl Clone for crate::stat { - fn clone(&self) -> crate::stat { - *self - } -} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/mod.rs deleted file mode 100644 index b3b032bc66949f..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd11/mod.rs +++ /dev/null @@ -1,449 +0,0 @@ -use crate::prelude::*; - -// APIs that were changed after FreeBSD 11 - -// The type of `nlink_t` changed from `u16` to `u64` in FreeBSD 12: -pub type nlink_t = u16; -// Type of `dev_t` changed from `u32` to `u64` in FreeBSD 12: -pub type dev_t = u32; -// Type of `ino_t` changed from `__uint32_t` to `__uint64_t` in FreeBSD 12: -pub type ino_t = u32; - -s! { - pub struct kevent { - pub ident: crate::uintptr_t, - pub filter: c_short, - pub flags: c_ushort, - pub fflags: c_uint, - pub data: intptr_t, - pub udata: *mut c_void, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_lpid: crate::pid_t, - pub shm_cpid: crate::pid_t, - // Type of shm_nattc changed from `int` to `shmatt_t` (aka `unsigned - // int`) in FreeBSD 12: - pub shm_nattch: c_int, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - } - - pub struct kinfo_proc { - /// Size of this structure. - pub ki_structsize: c_int, - /// Reserved: layout identifier. - pub ki_layout: c_int, - /// Address of command arguments. - pub ki_args: *mut crate::pargs, - // This is normally "struct proc". - /// Address of proc. - pub ki_paddr: *mut c_void, - // This is normally "struct user". - /// Kernel virtual address of u-area. - pub ki_addr: *mut c_void, - // This is normally "struct vnode". - /// Pointer to trace file. - pub ki_tracep: *mut c_void, - // This is normally "struct vnode". - /// Pointer to executable file. - pub ki_textvp: *mut c_void, - /// Pointer to open file info. - pub ki_fd: *mut crate::filedesc, - // This is normally "struct vmspace". - /// Pointer to kernel vmspace struct. - pub ki_vmspace: *mut c_void, - /// Sleep address. - pub ki_wchan: *mut c_void, - /// Process identifier. - pub ki_pid: crate::pid_t, - /// Parent process ID. - pub ki_ppid: crate::pid_t, - /// Process group ID. - pub ki_pgid: crate::pid_t, - /// tty process group ID. - pub ki_tpgid: crate::pid_t, - /// Process session ID. - pub ki_sid: crate::pid_t, - /// Terminal session ID. - pub ki_tsid: crate::pid_t, - /// Job control counter. - pub ki_jobc: c_short, - /// Unused (just here for alignment). - pub ki_spare_short1: c_short, - /// Controlling tty dev. - pub ki_tdev: crate::dev_t, - /// Signals arrived but not delivered. - pub ki_siglist: crate::sigset_t, - /// Current signal mask. - pub ki_sigmask: crate::sigset_t, - /// Signals being ignored. - pub ki_sigignore: crate::sigset_t, - /// Signals being caught by user. - pub ki_sigcatch: crate::sigset_t, - /// Effective user ID. - pub ki_uid: crate::uid_t, - /// Real user ID. - pub ki_ruid: crate::uid_t, - /// Saved effective user ID. - pub ki_svuid: crate::uid_t, - /// Real group ID. - pub ki_rgid: crate::gid_t, - /// Saved effective group ID. - pub ki_svgid: crate::gid_t, - /// Number of groups. - pub ki_ngroups: c_short, - /// Unused (just here for alignment). - pub ki_spare_short2: c_short, - /// Groups. - pub ki_groups: [crate::gid_t; crate::KI_NGROUPS], - /// Virtual size. - pub ki_size: crate::vm_size_t, - /// Current resident set size in pages. - pub ki_rssize: crate::segsz_t, - /// Resident set size before last swap. - pub ki_swrss: crate::segsz_t, - /// Text size (pages) XXX. - pub ki_tsize: crate::segsz_t, - /// Data size (pages) XXX. - pub ki_dsize: crate::segsz_t, - /// Stack size (pages). - pub ki_ssize: crate::segsz_t, - /// Exit status for wait & stop signal. - pub ki_xstat: crate::u_short, - /// Accounting flags. - pub ki_acflag: crate::u_short, - /// %cpu for process during `ki_swtime`. - pub ki_pctcpu: crate::fixpt_t, - /// Time averaged value of `ki_cpticks`. - pub ki_estcpu: crate::u_int, - /// Time since last blocked. - pub ki_slptime: crate::u_int, - /// Time swapped in or out. - pub ki_swtime: crate::u_int, - /// Number of copy-on-write faults. - pub ki_cow: crate::u_int, - /// Real time in microsec. - pub ki_runtime: u64, - /// Starting time. - pub ki_start: crate::timeval, - /// Time used by process children. - pub ki_childtime: crate::timeval, - /// P_* flags. - pub ki_flag: c_long, - /// KI_* flags (below). - pub ki_kiflag: c_long, - /// Kernel trace points. - pub ki_traceflag: c_int, - /// S* process status. - pub ki_stat: c_char, - /// Process "nice" value. - pub ki_nice: i8, // signed char - /// Process lock (prevent swap) count. - pub ki_lock: c_char, - /// Run queue index. - pub ki_rqindex: c_char, - /// Which cpu we are on. - pub ki_oncpu_old: c_uchar, - /// Last cpu we were on. - pub ki_lastcpu_old: c_uchar, - /// Thread name. - pub ki_tdname: [c_char; crate::TDNAMLEN + 1], - /// Wchan message. - pub ki_wmesg: [c_char; crate::WMESGLEN + 1], - /// Setlogin name. - pub ki_login: [c_char; crate::LOGNAMELEN + 1], - /// Lock name. - pub ki_lockname: [c_char; crate::LOCKNAMELEN + 1], - /// Command name. - pub ki_comm: [c_char; crate::COMMLEN + 1], - /// Emulation name. - pub ki_emul: [c_char; crate::KI_EMULNAMELEN + 1], - /// Login class. - pub ki_loginclass: [c_char; crate::LOGINCLASSLEN + 1], - /// More thread name. - pub ki_moretdname: [c_char; crate::MAXCOMLEN - crate::TDNAMLEN + 1], - /// Spare string space. - pub ki_sparestrings: [[c_char; 23]; 2], // little hack to allow PartialEq - /// Spare room for growth. - pub ki_spareints: [c_int; crate::KI_NSPARE_INT], - /// Which cpu we are on. - pub ki_oncpu: c_int, - /// Last cpu we were on. - pub ki_lastcpu: c_int, - /// PID of tracing process. - pub ki_tracer: c_int, - /// P2_* flags. - pub ki_flag2: c_int, - /// Default FIB number. - pub ki_fibnum: c_int, - /// Credential flags. - pub ki_cr_flags: crate::u_int, - /// Process jail ID. - pub ki_jid: c_int, - /// Number of threads in total. - pub ki_numthreads: c_int, - /// Thread ID. - pub ki_tid: crate::lwpid_t, - /// Process priority. - pub ki_pri: crate::priority, - /// Process rusage statistics. - pub ki_rusage: crate::rusage, - /// rusage of children processes. - pub ki_rusage_ch: crate::rusage, - // This is normally "struct pcb". - /// Kernel virtual addr of pcb. - pub ki_pcb: *mut c_void, - /// Kernel virtual addr of stack. - pub ki_kstack: *mut c_void, - /// User convenience pointer. - pub ki_udata: *mut c_void, - // This is normally "struct thread". - pub ki_tdaddr: *mut c_void, - pub ki_spareptrs: [*mut c_void; crate::KI_NSPARE_PTR], - pub ki_sparelongs: [c_long; crate::KI_NSPARE_LONG], - /// PS_* flags. - pub ki_sflag: c_long, - /// kthread flag. - pub ki_tdflags: c_long, - } -} - -s_no_extra_traits! { - pub struct dirent { - pub d_fileno: crate::ino_t, - pub d_reclen: u16, - pub d_type: u8, - // Type of `d_namlen` changed from `char` to `u16` in FreeBSD 12: - pub d_namlen: u8, - pub d_name: [c_char; 256], - } - - pub struct statfs { - pub f_version: u32, - pub f_type: u32, - pub f_flags: u64, - pub f_bsize: u64, - pub f_iosize: u64, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: i64, - pub f_files: u64, - pub f_ffree: i64, - pub f_syncwrites: u64, - pub f_asyncwrites: u64, - pub f_syncreads: u64, - pub f_asyncreads: u64, - f_spare: [u64; 10], - pub f_namemax: u32, - pub f_owner: crate::uid_t, - pub f_fsid: crate::fsid_t, - f_charspare: [c_char; 80], - pub f_fstypename: [c_char; 16], - // Array length changed from 88 to 1024 in FreeBSD 12: - pub f_mntfromname: [c_char; 88], - // Array length changed from 88 to 1024 in FreeBSD 12: - pub f_mntonname: [c_char; 88], - } - - pub struct vnstat { - pub vn_fileid: u64, - pub vn_size: u64, - pub vn_mntdir: *mut c_char, - pub vn_dev: u32, - pub vn_fsid: u32, - pub vn_type: c_int, - pub vn_mode: u16, - pub vn_devname: [c_char; crate::SPECNAMELEN as usize + 1], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for statfs { - fn eq(&self, other: &statfs) -> bool { - self.f_version == other.f_version - && self.f_type == other.f_type - && self.f_flags == other.f_flags - && self.f_bsize == other.f_bsize - && self.f_iosize == other.f_iosize - && self.f_blocks == other.f_blocks - && self.f_bfree == other.f_bfree - && self.f_bavail == other.f_bavail - && self.f_files == other.f_files - && self.f_ffree == other.f_ffree - && self.f_syncwrites == other.f_syncwrites - && self.f_asyncwrites == other.f_asyncwrites - && self.f_syncreads == other.f_syncreads - && self.f_asyncreads == other.f_asyncreads - && self.f_namemax == other.f_namemax - && self.f_owner == other.f_owner - && self.f_fsid == other.f_fsid - && self.f_fstypename == other.f_fstypename - && self - .f_mntfromname - .iter() - .zip(other.f_mntfromname.iter()) - .all(|(a, b)| a == b) - && self - .f_mntonname - .iter() - .zip(other.f_mntonname.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for statfs {} - impl hash::Hash for statfs { - fn hash(&self, state: &mut H) { - self.f_version.hash(state); - self.f_type.hash(state); - self.f_flags.hash(state); - self.f_bsize.hash(state); - self.f_iosize.hash(state); - self.f_blocks.hash(state); - self.f_bfree.hash(state); - self.f_bavail.hash(state); - self.f_files.hash(state); - self.f_ffree.hash(state); - self.f_syncwrites.hash(state); - self.f_asyncwrites.hash(state); - self.f_syncreads.hash(state); - self.f_asyncreads.hash(state); - self.f_namemax.hash(state); - self.f_owner.hash(state); - self.f_fsid.hash(state); - self.f_fstypename.hash(state); - self.f_mntfromname.hash(state); - self.f_mntonname.hash(state); - } - } - - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_fileno == other.d_fileno - && self.d_reclen == other.d_reclen - && self.d_type == other.d_type - && self.d_namlen == other.d_namlen - && self.d_name[..self.d_namlen as _] - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for dirent {} - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_fileno.hash(state); - self.d_reclen.hash(state); - self.d_type.hash(state); - self.d_namlen.hash(state); - self.d_name[..self.d_namlen as _].hash(state); - } - } - - impl PartialEq for vnstat { - fn eq(&self, other: &vnstat) -> bool { - let self_vn_devname: &[c_char] = &self.vn_devname; - let other_vn_devname: &[c_char] = &other.vn_devname; - - self.vn_fileid == other.vn_fileid - && self.vn_size == other.vn_size - && self.vn_mntdir == other.vn_mntdir - && self.vn_dev == other.vn_dev - && self.vn_fsid == other.vn_fsid - && self.vn_type == other.vn_type - && self.vn_mode == other.vn_mode - && self_vn_devname == other_vn_devname - } - } - impl Eq for vnstat {} - impl hash::Hash for vnstat { - fn hash(&self, state: &mut H) { - let self_vn_devname: &[c_char] = &self.vn_devname; - - self.vn_fileid.hash(state); - self.vn_size.hash(state); - self.vn_mntdir.hash(state); - self.vn_dev.hash(state); - self.vn_fsid.hash(state); - self.vn_type.hash(state); - self.vn_mode.hash(state); - self_vn_devname.hash(state); - } - } - } -} - -pub const ELAST: c_int = 96; -pub const RAND_MAX: c_int = 0x7fff_fffd; -pub const KI_NSPARE_PTR: usize = 6; -pub const MINCORE_SUPER: c_int = 0x20; -/// max length of devicename -pub const SPECNAMELEN: c_int = 63; - -safe_f! { - pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { - let major = major as crate::dev_t; - let minor = minor as crate::dev_t; - (major << 8) | minor - } - - pub const fn major(dev: crate::dev_t) -> c_int { - ((dev >> 8) & 0xff) as c_int - } - - pub const fn minor(dev: crate::dev_t) -> c_int { - (dev & 0xffff00ff) as c_int - } -} - -extern "C" { - // Return type c_int was removed in FreeBSD 12 - pub fn setgrent() -> c_int; - - // Type of `addr` argument changed from `const void*` to `void*` - // in FreeBSD 12 - pub fn mprotect(addr: *const c_void, len: size_t, prot: c_int) -> c_int; - - // Return type c_int was removed in FreeBSD 12 - pub fn freelocale(loc: crate::locale_t) -> c_int; - - // Return type c_int changed to ssize_t in FreeBSD 12: - pub fn msgrcv( - msqid: c_int, - msgp: *mut c_void, - msgsz: size_t, - msgtyp: c_long, - msgflg: c_int, - ) -> c_int; - - // Type of `path` argument changed from `const void*` to `void*` - // in FreeBSD 12 - pub fn dirname(path: *const c_char) -> *mut c_char; - pub fn basename(path: *const c_char) -> *mut c_char; - - // Argument order of the function pointer changed in FreeBSD 14. From 14 onwards the signature - // matches the POSIX specification by having the third argument be a mutable pointer, on - // earlier versions the first argument is the mutable pointer. - #[link_name = "qsort_r@FBSD_1.0"] - pub fn qsort_r( - base: *mut c_void, - num: size_t, - size: size_t, - arg: *mut c_void, - compar: Option c_int>, - ); -} - -cfg_if! { - if #[cfg(target_pointer_width = "64")] { - mod b64; - pub use self::b64::*; - } else { - mod b32; - pub use self::b32::*; - } -} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/mod.rs deleted file mode 100644 index 962d7817a2649c..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/mod.rs +++ /dev/null @@ -1,487 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -// APIs in FreeBSD 12 that have changed since 11. - -pub type nlink_t = u64; -pub type dev_t = u64; -pub type ino_t = u64; -pub type shmatt_t = c_uint; - -s! { - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_lpid: crate::pid_t, - pub shm_cpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - } - - pub struct kevent { - pub ident: crate::uintptr_t, - pub filter: c_short, - pub flags: c_ushort, - pub fflags: c_uint, - pub data: i64, - pub udata: *mut c_void, - pub ext: [u64; 4], - } - - pub struct kvm_page { - pub version: c_uint, - pub paddr: c_ulong, - pub kmap_vaddr: c_ulong, - pub dmap_vaddr: c_ulong, - pub prot: crate::vm_prot_t, - pub offset: crate::u_long, - pub len: size_t, - } - - pub struct kinfo_proc { - /// Size of this structure. - pub ki_structsize: c_int, - /// Reserved: layout identifier. - pub ki_layout: c_int, - /// Address of command arguments. - pub ki_args: *mut crate::pargs, - // This is normally "struct proc". - /// Address of proc. - pub ki_paddr: *mut c_void, - // This is normally "struct user". - /// Kernel virtual address of u-area. - pub ki_addr: *mut c_void, - // This is normally "struct vnode". - /// Pointer to trace file. - pub ki_tracep: *mut c_void, - // This is normally "struct vnode". - /// Pointer to executable file. - pub ki_textvp: *mut c_void, - /// Pointer to open file info. - pub ki_fd: *mut crate::filedesc, - // This is normally "struct vmspace". - /// Pointer to kernel vmspace struct. - pub ki_vmspace: *mut c_void, - /// Sleep address. - pub ki_wchan: *mut c_void, - /// Process identifier. - pub ki_pid: crate::pid_t, - /// Parent process ID. - pub ki_ppid: crate::pid_t, - /// Process group ID. - pub ki_pgid: crate::pid_t, - /// tty process group ID. - pub ki_tpgid: crate::pid_t, - /// Process session ID. - pub ki_sid: crate::pid_t, - /// Terminal session ID. - pub ki_tsid: crate::pid_t, - /// Job control counter. - pub ki_jobc: c_short, - /// Unused (just here for alignment). - pub ki_spare_short1: c_short, - /// Controlling tty dev. - pub ki_tdev_freebsd11: u32, - /// Signals arrived but not delivered. - pub ki_siglist: crate::sigset_t, - /// Current signal mask. - pub ki_sigmask: crate::sigset_t, - /// Signals being ignored. - pub ki_sigignore: crate::sigset_t, - /// Signals being caught by user. - pub ki_sigcatch: crate::sigset_t, - /// Effective user ID. - pub ki_uid: crate::uid_t, - /// Real user ID. - pub ki_ruid: crate::uid_t, - /// Saved effective user ID. - pub ki_svuid: crate::uid_t, - /// Real group ID. - pub ki_rgid: crate::gid_t, - /// Saved effective group ID. - pub ki_svgid: crate::gid_t, - /// Number of groups. - pub ki_ngroups: c_short, - /// Unused (just here for alignment). - pub ki_spare_short2: c_short, - /// Groups. - pub ki_groups: [crate::gid_t; crate::KI_NGROUPS], - /// Virtual size. - pub ki_size: crate::vm_size_t, - /// Current resident set size in pages. - pub ki_rssize: crate::segsz_t, - /// Resident set size before last swap. - pub ki_swrss: crate::segsz_t, - /// Text size (pages) XXX. - pub ki_tsize: crate::segsz_t, - /// Data size (pages) XXX. - pub ki_dsize: crate::segsz_t, - /// Stack size (pages). - pub ki_ssize: crate::segsz_t, - /// Exit status for wait & stop signal. - pub ki_xstat: crate::u_short, - /// Accounting flags. - pub ki_acflag: crate::u_short, - /// %cpu for process during `ki_swtime`. - pub ki_pctcpu: crate::fixpt_t, - /// Time averaged value of `ki_cpticks`. - pub ki_estcpu: crate::u_int, - /// Time since last blocked. - pub ki_slptime: crate::u_int, - /// Time swapped in or out. - pub ki_swtime: crate::u_int, - /// Number of copy-on-write faults. - pub ki_cow: crate::u_int, - /// Real time in microsec. - pub ki_runtime: u64, - /// Starting time. - pub ki_start: crate::timeval, - /// Time used by process children. - pub ki_childtime: crate::timeval, - /// P_* flags. - pub ki_flag: c_long, - /// KI_* flags (below). - pub ki_kiflag: c_long, - /// Kernel trace points. - pub ki_traceflag: c_int, - /// S* process status. - pub ki_stat: c_char, - /// Process "nice" value. - pub ki_nice: i8, // signed char - /// Process lock (prevent swap) count. - pub ki_lock: c_char, - /// Run queue index. - pub ki_rqindex: c_char, - /// Which cpu we are on. - pub ki_oncpu_old: c_uchar, - /// Last cpu we were on. - pub ki_lastcpu_old: c_uchar, - /// Thread name. - pub ki_tdname: [c_char; crate::TDNAMLEN + 1], - /// Wchan message. - pub ki_wmesg: [c_char; crate::WMESGLEN + 1], - /// Setlogin name. - pub ki_login: [c_char; crate::LOGNAMELEN + 1], - /// Lock name. - pub ki_lockname: [c_char; crate::LOCKNAMELEN + 1], - /// Command name. - pub ki_comm: [c_char; crate::COMMLEN + 1], - /// Emulation name. - pub ki_emul: [c_char; crate::KI_EMULNAMELEN + 1], - /// Login class. - pub ki_loginclass: [c_char; crate::LOGINCLASSLEN + 1], - /// More thread name. - pub ki_moretdname: [c_char; crate::MAXCOMLEN - crate::TDNAMLEN + 1], - /// Spare string space. - pub ki_sparestrings: [[c_char; 23]; 2], // little hack to allow PartialEq - /// Spare room for growth. - pub ki_spareints: [c_int; crate::KI_NSPARE_INT], - /// Controlling tty dev. - pub ki_tdev: crate::dev_t, - /// Which cpu we are on. - pub ki_oncpu: c_int, - /// Last cpu we were on. - pub ki_lastcpu: c_int, - /// PID of tracing process. - pub ki_tracer: c_int, - /// P2_* flags. - pub ki_flag2: c_int, - /// Default FIB number. - pub ki_fibnum: c_int, - /// Credential flags. - pub ki_cr_flags: crate::u_int, - /// Process jail ID. - pub ki_jid: c_int, - /// Number of threads in total. - pub ki_numthreads: c_int, - /// Thread ID. - pub ki_tid: crate::lwpid_t, - /// Process priority. - pub ki_pri: crate::priority, - /// Process rusage statistics. - pub ki_rusage: crate::rusage, - /// rusage of children processes. - pub ki_rusage_ch: crate::rusage, - // This is normally "struct pcb". - /// Kernel virtual addr of pcb. - pub ki_pcb: *mut c_void, - /// Kernel virtual addr of stack. - pub ki_kstack: *mut c_void, - /// User convenience pointer. - pub ki_udata: *mut c_void, - // This is normally "struct thread". - pub ki_tdaddr: *mut c_void, - pub ki_spareptrs: [*mut c_void; crate::KI_NSPARE_PTR], - pub ki_sparelongs: [c_long; crate::KI_NSPARE_LONG], - /// PS_* flags. - pub ki_sflag: c_long, - /// kthread flag. - pub ki_tdflags: c_long, - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - st_padding0: i16, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - st_padding1: i32, - pub st_rdev: crate::dev_t, - #[cfg(target_arch = "x86")] - st_atim_ext: i32, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - #[cfg(target_arch = "x86")] - st_mtim_ext: i32, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - #[cfg(target_arch = "x86")] - st_ctim_ext: i32, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - #[cfg(target_arch = "x86")] - st_btim_ext: i32, - pub st_birthtime: crate::time_t, - pub st_birthtime_nsec: c_long, - pub st_size: off_t, - pub st_blocks: crate::blkcnt_t, - pub st_blksize: crate::blksize_t, - pub st_flags: crate::fflags_t, - pub st_gen: u64, - pub st_spare: [u64; 10], - } -} - -s_no_extra_traits! { - pub struct dirent { - pub d_fileno: crate::ino_t, - pub d_off: off_t, - pub d_reclen: u16, - pub d_type: u8, - d_pad0: u8, - pub d_namlen: u16, - d_pad1: u16, - pub d_name: [c_char; 256], - } - - pub struct statfs { - pub f_version: u32, - pub f_type: u32, - pub f_flags: u64, - pub f_bsize: u64, - pub f_iosize: u64, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: i64, - pub f_files: u64, - pub f_ffree: i64, - pub f_syncwrites: u64, - pub f_asyncwrites: u64, - pub f_syncreads: u64, - pub f_asyncreads: u64, - f_spare: [u64; 10], - pub f_namemax: u32, - pub f_owner: crate::uid_t, - pub f_fsid: crate::fsid_t, - f_charspare: [c_char; 80], - pub f_fstypename: [c_char; 16], - pub f_mntfromname: [c_char; 1024], - pub f_mntonname: [c_char; 1024], - } - - pub struct vnstat { - pub vn_fileid: u64, - pub vn_size: u64, - pub vn_dev: u64, - pub vn_fsid: u64, - pub vn_mntdir: *mut c_char, - pub vn_type: c_int, - pub vn_mode: u16, - pub vn_devname: [c_char; crate::SPECNAMELEN as usize + 1], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for statfs { - fn eq(&self, other: &statfs) -> bool { - self.f_version == other.f_version - && self.f_type == other.f_type - && self.f_flags == other.f_flags - && self.f_bsize == other.f_bsize - && self.f_iosize == other.f_iosize - && self.f_blocks == other.f_blocks - && self.f_bfree == other.f_bfree - && self.f_bavail == other.f_bavail - && self.f_files == other.f_files - && self.f_ffree == other.f_ffree - && self.f_syncwrites == other.f_syncwrites - && self.f_asyncwrites == other.f_asyncwrites - && self.f_syncreads == other.f_syncreads - && self.f_asyncreads == other.f_asyncreads - && self.f_namemax == other.f_namemax - && self.f_owner == other.f_owner - && self.f_fsid == other.f_fsid - && self.f_fstypename == other.f_fstypename - && self - .f_mntfromname - .iter() - .zip(other.f_mntfromname.iter()) - .all(|(a, b)| a == b) - && self - .f_mntonname - .iter() - .zip(other.f_mntonname.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for statfs {} - impl hash::Hash for statfs { - fn hash(&self, state: &mut H) { - self.f_version.hash(state); - self.f_type.hash(state); - self.f_flags.hash(state); - self.f_bsize.hash(state); - self.f_iosize.hash(state); - self.f_blocks.hash(state); - self.f_bfree.hash(state); - self.f_bavail.hash(state); - self.f_files.hash(state); - self.f_ffree.hash(state); - self.f_syncwrites.hash(state); - self.f_asyncwrites.hash(state); - self.f_syncreads.hash(state); - self.f_asyncreads.hash(state); - self.f_namemax.hash(state); - self.f_owner.hash(state); - self.f_fsid.hash(state); - self.f_charspare.hash(state); - self.f_fstypename.hash(state); - self.f_mntfromname.hash(state); - self.f_mntonname.hash(state); - } - } - - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_fileno == other.d_fileno - && self.d_off == other.d_off - && self.d_reclen == other.d_reclen - && self.d_type == other.d_type - && self.d_namlen == other.d_namlen - && self.d_name[..self.d_namlen as _] - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for dirent {} - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_fileno.hash(state); - self.d_off.hash(state); - self.d_reclen.hash(state); - self.d_type.hash(state); - self.d_namlen.hash(state); - self.d_name[..self.d_namlen as _].hash(state); - } - } - - impl PartialEq for vnstat { - fn eq(&self, other: &vnstat) -> bool { - let self_vn_devname: &[c_char] = &self.vn_devname; - let other_vn_devname: &[c_char] = &other.vn_devname; - - self.vn_fileid == other.vn_fileid - && self.vn_size == other.vn_size - && self.vn_dev == other.vn_dev - && self.vn_fsid == other.vn_fsid - && self.vn_mntdir == other.vn_mntdir - && self.vn_type == other.vn_type - && self.vn_mode == other.vn_mode - && self_vn_devname == other_vn_devname - } - } - impl Eq for vnstat {} - impl hash::Hash for vnstat { - fn hash(&self, state: &mut H) { - let self_vn_devname: &[c_char] = &self.vn_devname; - - self.vn_fileid.hash(state); - self.vn_size.hash(state); - self.vn_dev.hash(state); - self.vn_fsid.hash(state); - self.vn_mntdir.hash(state); - self.vn_type.hash(state); - self.vn_mode.hash(state); - self_vn_devname.hash(state); - } - } - } -} - -pub const RAND_MAX: c_int = 0x7fff_fffd; -pub const ELAST: c_int = 97; - -/// max length of devicename -pub const SPECNAMELEN: c_int = 63; -pub const KI_NSPARE_PTR: usize = 6; - -pub const MINCORE_SUPER: c_int = 0x20; - -safe_f! { - pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { - let major = major as crate::dev_t; - let minor = minor as crate::dev_t; - let mut dev = 0; - dev |= ((major & 0xffffff00) as dev_t) << 32; - dev |= ((major & 0x000000ff) as dev_t) << 8; - dev |= ((minor & 0x0000ff00) as dev_t) << 24; - dev |= ((minor & 0xffff00ff) as dev_t) << 0; - dev - } - - pub const fn major(dev: crate::dev_t) -> c_int { - (((dev >> 32) & 0xffffff00) | ((dev >> 8) & 0xff)) as c_int - } - - pub const fn minor(dev: crate::dev_t) -> c_int { - (((dev >> 24) & 0xff00) | (dev & 0xffff00ff)) as c_int - } -} - -extern "C" { - pub fn setgrent(); - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn freelocale(loc: crate::locale_t); - pub fn msgrcv( - msqid: c_int, - msgp: *mut c_void, - msgsz: size_t, - msgtyp: c_long, - msgflg: c_int, - ) -> ssize_t; - - pub fn dirname(path: *mut c_char) -> *mut c_char; - pub fn basename(path: *mut c_char) -> *mut c_char; - - #[link_name = "qsort_r@FBSD_1.0"] - pub fn qsort_r( - base: *mut c_void, - num: size_t, - size: size_t, - arg: *mut c_void, - compar: Option c_int>, - ); -} - -cfg_if! { - if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } -} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/x86_64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/x86_64.rs deleted file mode 100644 index b29171cc509c51..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd12/x86_64.rs +++ /dev/null @@ -1,7 +0,0 @@ -use crate::prelude::*; - -pub const PROC_KPTI_CTL: c_int = crate::PROC_PROCCTL_MD_MIN; -pub const PROC_KPTI_CTL_ENABLE_ON_EXEC: c_int = 1; -pub const PROC_KPTI_CTL_DISABLE_ON_EXEC: c_int = 2; -pub const PROC_KPTI_STATUS: c_int = crate::PROC_PROCCTL_MD_MIN + 1; -pub const PROC_KPTI_STATUS_ACTIVE: c_int = 0x80000000; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/mod.rs deleted file mode 100644 index 7b0e467ba375ef..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/mod.rs +++ /dev/null @@ -1,531 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -// APIs in FreeBSD 13 that have changed since 11. - -pub type nlink_t = u64; -pub type dev_t = u64; -pub type ino_t = u64; -pub type shmatt_t = c_uint; -pub type kpaddr_t = u64; -pub type kssize_t = i64; -pub type domainset_t = __c_anonymous_domainset; - -s! { - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_lpid: crate::pid_t, - pub shm_cpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - } - - pub struct kevent { - pub ident: crate::uintptr_t, - pub filter: c_short, - pub flags: c_ushort, - pub fflags: c_uint, - pub data: i64, - pub udata: *mut c_void, - pub ext: [u64; 4], - } - - pub struct kvm_page { - pub kp_version: crate::u_int, - pub kp_paddr: crate::kpaddr_t, - pub kp_kmap_vaddr: crate::kvaddr_t, - pub kp_dmap_vaddr: crate::kvaddr_t, - pub kp_prot: crate::vm_prot_t, - pub kp_offset: off_t, - pub kp_len: size_t, - } - - pub struct __c_anonymous_domainset { - #[cfg(target_pointer_width = "64")] - _priv: [c_ulong; 4], - #[cfg(target_pointer_width = "32")] - _priv: [c_ulong; 8], - } - - pub struct kinfo_proc { - /// Size of this structure. - pub ki_structsize: c_int, - /// Reserved: layout identifier. - pub ki_layout: c_int, - /// Address of command arguments. - pub ki_args: *mut crate::pargs, - // This is normally "struct proc". - /// Address of proc. - pub ki_paddr: *mut c_void, - // This is normally "struct user". - /// Kernel virtual address of u-area. - pub ki_addr: *mut c_void, - // This is normally "struct vnode". - /// Pointer to trace file. - pub ki_tracep: *mut c_void, - // This is normally "struct vnode". - /// Pointer to executable file. - pub ki_textvp: *mut c_void, - /// Pointer to open file info. - pub ki_fd: *mut crate::filedesc, - // This is normally "struct vmspace". - /// Pointer to kernel vmspace struct. - pub ki_vmspace: *mut c_void, - /// Sleep address. - pub ki_wchan: *const c_void, - /// Process identifier. - pub ki_pid: crate::pid_t, - /// Parent process ID. - pub ki_ppid: crate::pid_t, - /// Process group ID. - pub ki_pgid: crate::pid_t, - /// tty process group ID. - pub ki_tpgid: crate::pid_t, - /// Process session ID. - pub ki_sid: crate::pid_t, - /// Terminal session ID. - pub ki_tsid: crate::pid_t, - /// Job control counter. - pub ki_jobc: c_short, - /// Unused (just here for alignment). - pub ki_spare_short1: c_short, - /// Controlling tty dev. - pub ki_tdev_freebsd11: u32, - /// Signals arrived but not delivered. - pub ki_siglist: crate::sigset_t, - /// Current signal mask. - pub ki_sigmask: crate::sigset_t, - /// Signals being ignored. - pub ki_sigignore: crate::sigset_t, - /// Signals being caught by user. - pub ki_sigcatch: crate::sigset_t, - /// Effective user ID. - pub ki_uid: crate::uid_t, - /// Real user ID. - pub ki_ruid: crate::uid_t, - /// Saved effective user ID. - pub ki_svuid: crate::uid_t, - /// Real group ID. - pub ki_rgid: crate::gid_t, - /// Saved effective group ID. - pub ki_svgid: crate::gid_t, - /// Number of groups. - pub ki_ngroups: c_short, - /// Unused (just here for alignment). - pub ki_spare_short2: c_short, - /// Groups. - pub ki_groups: [crate::gid_t; crate::KI_NGROUPS], - /// Virtual size. - pub ki_size: crate::vm_size_t, - /// Current resident set size in pages. - pub ki_rssize: crate::segsz_t, - /// Resident set size before last swap. - pub ki_swrss: crate::segsz_t, - /// Text size (pages) XXX. - pub ki_tsize: crate::segsz_t, - /// Data size (pages) XXX. - pub ki_dsize: crate::segsz_t, - /// Stack size (pages). - pub ki_ssize: crate::segsz_t, - /// Exit status for wait & stop signal. - pub ki_xstat: crate::u_short, - /// Accounting flags. - pub ki_acflag: crate::u_short, - /// %cpu for process during `ki_swtime`. - pub ki_pctcpu: crate::fixpt_t, - /// Time averaged value of `ki_cpticks`. - pub ki_estcpu: crate::u_int, - /// Time since last blocked. - pub ki_slptime: crate::u_int, - /// Time swapped in or out. - pub ki_swtime: crate::u_int, - /// Number of copy-on-write faults. - pub ki_cow: crate::u_int, - /// Real time in microsec. - pub ki_runtime: u64, - /// Starting time. - pub ki_start: crate::timeval, - /// Time used by process children. - pub ki_childtime: crate::timeval, - /// P_* flags. - pub ki_flag: c_long, - /// KI_* flags (below). - pub ki_kiflag: c_long, - /// Kernel trace points. - pub ki_traceflag: c_int, - /// S* process status. - pub ki_stat: c_char, - /// Process "nice" value. - pub ki_nice: i8, // signed char - /// Process lock (prevent swap) count. - pub ki_lock: c_char, - /// Run queue index. - pub ki_rqindex: c_char, - /// Which cpu we are on. - pub ki_oncpu_old: c_uchar, - /// Last cpu we were on. - pub ki_lastcpu_old: c_uchar, - /// Thread name. - pub ki_tdname: [c_char; crate::TDNAMLEN + 1], - /// Wchan message. - pub ki_wmesg: [c_char; crate::WMESGLEN + 1], - /// Setlogin name. - pub ki_login: [c_char; crate::LOGNAMELEN + 1], - /// Lock name. - pub ki_lockname: [c_char; crate::LOCKNAMELEN + 1], - /// Command name. - pub ki_comm: [c_char; crate::COMMLEN + 1], - /// Emulation name. - pub ki_emul: [c_char; crate::KI_EMULNAMELEN + 1], - /// Login class. - pub ki_loginclass: [c_char; crate::LOGINCLASSLEN + 1], - /// More thread name. - pub ki_moretdname: [c_char; crate::MAXCOMLEN - crate::TDNAMLEN + 1], - /// Spare string space. - pub ki_sparestrings: [[c_char; 23]; 2], // little hack to allow PartialEq - /// Spare room for growth. - pub ki_spareints: [c_int; crate::KI_NSPARE_INT], - /// Controlling tty dev. - pub ki_tdev: u64, - /// Which cpu we are on. - pub ki_oncpu: c_int, - /// Last cpu we were on. - pub ki_lastcpu: c_int, - /// PID of tracing process. - pub ki_tracer: c_int, - /// P2_* flags. - pub ki_flag2: c_int, - /// Default FIB number. - pub ki_fibnum: c_int, - /// Credential flags. - pub ki_cr_flags: crate::u_int, - /// Process jail ID. - pub ki_jid: c_int, - /// Number of threads in total. - pub ki_numthreads: c_int, - /// Thread ID. - pub ki_tid: crate::lwpid_t, - /// Process priority. - pub ki_pri: crate::priority, - /// Process rusage statistics. - pub ki_rusage: crate::rusage, - /// rusage of children processes. - pub ki_rusage_ch: crate::rusage, - // This is normally "struct pcb". - /// Kernel virtual addr of pcb. - pub ki_pcb: *mut c_void, - /// Kernel virtual addr of stack. - pub ki_kstack: *mut c_void, - /// User convenience pointer. - pub ki_udata: *mut c_void, - // This is normally "struct thread". - pub ki_tdaddr: *mut c_void, - // This is normally "struct pwddesc". - /// Pointer to process paths info. - pub ki_pd: *mut c_void, - pub ki_spareptrs: [*mut c_void; crate::KI_NSPARE_PTR], - pub ki_sparelongs: [c_long; crate::KI_NSPARE_LONG], - /// PS_* flags. - pub ki_sflag: c_long, - /// kthread flag. - pub ki_tdflags: c_long, - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - st_padding0: i16, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - st_padding1: i32, - pub st_rdev: crate::dev_t, - #[cfg(target_arch = "x86")] - st_atim_ext: i32, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - #[cfg(target_arch = "x86")] - st_mtim_ext: i32, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - #[cfg(target_arch = "x86")] - st_ctim_ext: i32, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - #[cfg(target_arch = "x86")] - st_btim_ext: i32, - pub st_birthtime: crate::time_t, - pub st_birthtime_nsec: c_long, - pub st_size: off_t, - pub st_blocks: crate::blkcnt_t, - pub st_blksize: crate::blksize_t, - pub st_flags: crate::fflags_t, - pub st_gen: u64, - pub st_spare: [u64; 10], - } -} - -s_no_extra_traits! { - pub struct dirent { - pub d_fileno: crate::ino_t, - pub d_off: off_t, - pub d_reclen: u16, - pub d_type: u8, - d_pad0: u8, - pub d_namlen: u16, - d_pad1: u16, - pub d_name: [c_char; 256], - } - - pub struct statfs { - pub f_version: u32, - pub f_type: u32, - pub f_flags: u64, - pub f_bsize: u64, - pub f_iosize: u64, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: i64, - pub f_files: u64, - pub f_ffree: i64, - pub f_syncwrites: u64, - pub f_asyncwrites: u64, - pub f_syncreads: u64, - pub f_asyncreads: u64, - f_spare: [u64; 10], - pub f_namemax: u32, - pub f_owner: crate::uid_t, - pub f_fsid: crate::fsid_t, - f_charspare: [c_char; 80], - pub f_fstypename: [c_char; 16], - pub f_mntfromname: [c_char; 1024], - pub f_mntonname: [c_char; 1024], - } - - pub struct vnstat { - pub vn_fileid: u64, - pub vn_size: u64, - pub vn_dev: u64, - pub vn_fsid: u64, - pub vn_mntdir: *mut c_char, - pub vn_type: c_int, - pub vn_mode: u16, - pub vn_devname: [c_char; crate::SPECNAMELEN as usize + 1], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for statfs { - fn eq(&self, other: &statfs) -> bool { - self.f_version == other.f_version - && self.f_type == other.f_type - && self.f_flags == other.f_flags - && self.f_bsize == other.f_bsize - && self.f_iosize == other.f_iosize - && self.f_blocks == other.f_blocks - && self.f_bfree == other.f_bfree - && self.f_bavail == other.f_bavail - && self.f_files == other.f_files - && self.f_ffree == other.f_ffree - && self.f_syncwrites == other.f_syncwrites - && self.f_asyncwrites == other.f_asyncwrites - && self.f_syncreads == other.f_syncreads - && self.f_asyncreads == other.f_asyncreads - && self.f_namemax == other.f_namemax - && self.f_owner == other.f_owner - && self.f_fsid == other.f_fsid - && self.f_fstypename == other.f_fstypename - && self - .f_mntfromname - .iter() - .zip(other.f_mntfromname.iter()) - .all(|(a, b)| a == b) - && self - .f_mntonname - .iter() - .zip(other.f_mntonname.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for statfs {} - impl hash::Hash for statfs { - fn hash(&self, state: &mut H) { - self.f_version.hash(state); - self.f_type.hash(state); - self.f_flags.hash(state); - self.f_bsize.hash(state); - self.f_iosize.hash(state); - self.f_blocks.hash(state); - self.f_bfree.hash(state); - self.f_bavail.hash(state); - self.f_files.hash(state); - self.f_ffree.hash(state); - self.f_syncwrites.hash(state); - self.f_asyncwrites.hash(state); - self.f_syncreads.hash(state); - self.f_asyncreads.hash(state); - self.f_namemax.hash(state); - self.f_owner.hash(state); - self.f_fsid.hash(state); - self.f_charspare.hash(state); - self.f_fstypename.hash(state); - self.f_mntfromname.hash(state); - self.f_mntonname.hash(state); - } - } - - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_fileno == other.d_fileno - && self.d_off == other.d_off - && self.d_reclen == other.d_reclen - && self.d_type == other.d_type - && self.d_namlen == other.d_namlen - && self.d_name[..self.d_namlen as _] - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for dirent {} - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_fileno.hash(state); - self.d_off.hash(state); - self.d_reclen.hash(state); - self.d_type.hash(state); - self.d_namlen.hash(state); - self.d_name[..self.d_namlen as _].hash(state); - } - } - - impl PartialEq for vnstat { - fn eq(&self, other: &vnstat) -> bool { - let self_vn_devname: &[c_char] = &self.vn_devname; - let other_vn_devname: &[c_char] = &other.vn_devname; - - self.vn_fileid == other.vn_fileid - && self.vn_size == other.vn_size - && self.vn_dev == other.vn_dev - && self.vn_fsid == other.vn_fsid - && self.vn_mntdir == other.vn_mntdir - && self.vn_type == other.vn_type - && self.vn_mode == other.vn_mode - && self_vn_devname == other_vn_devname - } - } - impl Eq for vnstat {} - impl hash::Hash for vnstat { - fn hash(&self, state: &mut H) { - let self_vn_devname: &[c_char] = &self.vn_devname; - - self.vn_fileid.hash(state); - self.vn_size.hash(state); - self.vn_dev.hash(state); - self.vn_fsid.hash(state); - self.vn_mntdir.hash(state); - self.vn_type.hash(state); - self.vn_mode.hash(state); - self_vn_devname.hash(state); - } - } - } -} - -pub const RAND_MAX: c_int = 0x7fff_ffff; -pub const ELAST: c_int = 97; - -pub const KF_TYPE_EVENTFD: c_int = 13; - -/// max length of devicename -pub const SPECNAMELEN: c_int = 255; -pub const KI_NSPARE_PTR: usize = 5; - -/// domainset policies -pub const DOMAINSET_POLICY_INVALID: c_int = 0; -pub const DOMAINSET_POLICY_ROUNDROBIN: c_int = 1; -pub const DOMAINSET_POLICY_FIRSTTOUCH: c_int = 2; -pub const DOMAINSET_POLICY_PREFER: c_int = 3; -pub const DOMAINSET_POLICY_INTERLEAVE: c_int = 4; - -pub const MINCORE_SUPER: c_int = 0x20; - -safe_f! { - pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { - let major = major as crate::dev_t; - let minor = minor as crate::dev_t; - let mut dev = 0; - dev |= ((major & 0xffffff00) as dev_t) << 32; - dev |= ((major & 0x000000ff) as dev_t) << 8; - dev |= ((minor & 0x0000ff00) as dev_t) << 24; - dev |= ((minor & 0xffff00ff) as dev_t) << 0; - dev - } - - pub const fn major(dev: crate::dev_t) -> c_int { - (((dev >> 32) & 0xffffff00) | ((dev >> 8) & 0xff)) as c_int - } - - pub const fn minor(dev: crate::dev_t) -> c_int { - (((dev >> 24) & 0xff00) | (dev & 0xffff00ff)) as c_int - } -} - -extern "C" { - pub fn setgrent(); - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn freelocale(loc: crate::locale_t); - pub fn msgrcv( - msqid: c_int, - msgp: *mut c_void, - msgsz: size_t, - msgtyp: c_long, - msgflg: c_int, - ) -> ssize_t; - - pub fn cpuset_getdomain( - level: crate::cpulevel_t, - which: crate::cpuwhich_t, - id: crate::id_t, - setsize: size_t, - mask: *mut crate::domainset_t, - policy: *mut c_int, - ) -> c_int; - pub fn cpuset_setdomain( - level: crate::cpulevel_t, - which: crate::cpuwhich_t, - id: crate::id_t, - setsize: size_t, - mask: *const crate::domainset_t, - policy: c_int, - ) -> c_int; - - pub fn dirname(path: *mut c_char) -> *mut c_char; - pub fn basename(path: *mut c_char) -> *mut c_char; - - #[link_name = "qsort_r@FBSD_1.0"] - pub fn qsort_r( - base: *mut c_void, - num: size_t, - size: size_t, - arg: *mut c_void, - compar: Option c_int>, - ); -} - -#[link(name = "kvm")] -extern "C" { - pub fn kvm_kerndisp(kd: *mut crate::kvm_t) -> crate::kssize_t; -} - -cfg_if! { - if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } -} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/x86_64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/x86_64.rs deleted file mode 100644 index b29171cc509c51..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd13/x86_64.rs +++ /dev/null @@ -1,7 +0,0 @@ -use crate::prelude::*; - -pub const PROC_KPTI_CTL: c_int = crate::PROC_PROCCTL_MD_MIN; -pub const PROC_KPTI_CTL_ENABLE_ON_EXEC: c_int = 1; -pub const PROC_KPTI_CTL_DISABLE_ON_EXEC: c_int = 2; -pub const PROC_KPTI_STATUS: c_int = crate::PROC_PROCCTL_MD_MIN + 1; -pub const PROC_KPTI_STATUS_ACTIVE: c_int = 0x80000000; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/mod.rs deleted file mode 100644 index f20a46655665d8..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/mod.rs +++ /dev/null @@ -1,532 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -// APIs in FreeBSD 14 that have changed since 11. - -pub type nlink_t = u64; -pub type dev_t = u64; -pub type ino_t = u64; -pub type shmatt_t = c_uint; -pub type kpaddr_t = u64; -pub type kssize_t = i64; -pub type domainset_t = __c_anonymous_domainset; - -s! { - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_lpid: crate::pid_t, - pub shm_cpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - } - - pub struct kevent { - pub ident: crate::uintptr_t, - pub filter: c_short, - pub flags: c_ushort, - pub fflags: c_uint, - pub data: i64, - pub udata: *mut c_void, - pub ext: [u64; 4], - } - - pub struct kvm_page { - pub kp_version: crate::u_int, - pub kp_paddr: crate::kpaddr_t, - pub kp_kmap_vaddr: crate::kvaddr_t, - pub kp_dmap_vaddr: crate::kvaddr_t, - pub kp_prot: crate::vm_prot_t, - pub kp_offset: off_t, - pub kp_len: size_t, - } - - pub struct __c_anonymous_domainset { - #[cfg(target_pointer_width = "64")] - _priv: [c_ulong; 4], - #[cfg(target_pointer_width = "32")] - _priv: [c_ulong; 8], - } - - pub struct kinfo_proc { - /// Size of this structure. - pub ki_structsize: c_int, - /// Reserved: layout identifier. - pub ki_layout: c_int, - /// Address of command arguments. - pub ki_args: *mut crate::pargs, - // This is normally "struct proc". - /// Address of proc. - pub ki_paddr: *mut c_void, - // This is normally "struct user". - /// Kernel virtual address of u-area. - pub ki_addr: *mut c_void, - // This is normally "struct vnode". - /// Pointer to trace file. - pub ki_tracep: *mut c_void, - // This is normally "struct vnode". - /// Pointer to executable file. - pub ki_textvp: *mut c_void, - /// Pointer to open file info. - pub ki_fd: *mut crate::filedesc, - // This is normally "struct vmspace". - /// Pointer to kernel vmspace struct. - pub ki_vmspace: *mut c_void, - /// Sleep address. - pub ki_wchan: *const c_void, - /// Process identifier. - pub ki_pid: crate::pid_t, - /// Parent process ID. - pub ki_ppid: crate::pid_t, - /// Process group ID. - pub ki_pgid: crate::pid_t, - /// tty process group ID. - pub ki_tpgid: crate::pid_t, - /// Process session ID. - pub ki_sid: crate::pid_t, - /// Terminal session ID. - pub ki_tsid: crate::pid_t, - /// Job control counter. - pub ki_jobc: c_short, - /// Unused (just here for alignment). - pub ki_spare_short1: c_short, - /// Controlling tty dev. - pub ki_tdev_freebsd11: u32, - /// Signals arrived but not delivered. - pub ki_siglist: crate::sigset_t, - /// Current signal mask. - pub ki_sigmask: crate::sigset_t, - /// Signals being ignored. - pub ki_sigignore: crate::sigset_t, - /// Signals being caught by user. - pub ki_sigcatch: crate::sigset_t, - /// Effective user ID. - pub ki_uid: crate::uid_t, - /// Real user ID. - pub ki_ruid: crate::uid_t, - /// Saved effective user ID. - pub ki_svuid: crate::uid_t, - /// Real group ID. - pub ki_rgid: crate::gid_t, - /// Saved effective group ID. - pub ki_svgid: crate::gid_t, - /// Number of groups. - pub ki_ngroups: c_short, - /// Unused (just here for alignment). - pub ki_spare_short2: c_short, - /// Groups. - pub ki_groups: [crate::gid_t; crate::KI_NGROUPS], - /// Virtual size. - pub ki_size: crate::vm_size_t, - /// Current resident set size in pages. - pub ki_rssize: crate::segsz_t, - /// Resident set size before last swap. - pub ki_swrss: crate::segsz_t, - /// Text size (pages) XXX. - pub ki_tsize: crate::segsz_t, - /// Data size (pages) XXX. - pub ki_dsize: crate::segsz_t, - /// Stack size (pages). - pub ki_ssize: crate::segsz_t, - /// Exit status for wait & stop signal. - pub ki_xstat: crate::u_short, - /// Accounting flags. - pub ki_acflag: crate::u_short, - /// %cpu for process during `ki_swtime`. - pub ki_pctcpu: crate::fixpt_t, - /// Time averaged value of `ki_cpticks`. - pub ki_estcpu: crate::u_int, - /// Time since last blocked. - pub ki_slptime: crate::u_int, - /// Time swapped in or out. - pub ki_swtime: crate::u_int, - /// Number of copy-on-write faults. - pub ki_cow: crate::u_int, - /// Real time in microsec. - pub ki_runtime: u64, - /// Starting time. - pub ki_start: crate::timeval, - /// Time used by process children. - pub ki_childtime: crate::timeval, - /// P_* flags. - pub ki_flag: c_long, - /// KI_* flags (below). - pub ki_kiflag: c_long, - /// Kernel trace points. - pub ki_traceflag: c_int, - /// S* process status. - pub ki_stat: c_char, - /// Process "nice" value. - pub ki_nice: i8, // signed char - /// Process lock (prevent swap) count. - pub ki_lock: c_char, - /// Run queue index. - pub ki_rqindex: c_char, - /// Which cpu we are on. - pub ki_oncpu_old: c_uchar, - /// Last cpu we were on. - pub ki_lastcpu_old: c_uchar, - /// Thread name. - pub ki_tdname: [c_char; crate::TDNAMLEN + 1], - /// Wchan message. - pub ki_wmesg: [c_char; crate::WMESGLEN + 1], - /// Setlogin name. - pub ki_login: [c_char; crate::LOGNAMELEN + 1], - /// Lock name. - pub ki_lockname: [c_char; crate::LOCKNAMELEN + 1], - /// Command name. - pub ki_comm: [c_char; crate::COMMLEN + 1], - /// Emulation name. - pub ki_emul: [c_char; crate::KI_EMULNAMELEN + 1], - /// Login class. - pub ki_loginclass: [c_char; crate::LOGINCLASSLEN + 1], - /// More thread name. - pub ki_moretdname: [c_char; crate::MAXCOMLEN - crate::TDNAMLEN + 1], - /// Spare string space. - pub ki_sparestrings: [[c_char; 23]; 2], // little hack to allow PartialEq - /// Spare room for growth. - pub ki_spareints: [c_int; crate::KI_NSPARE_INT], - /// Controlling tty dev. - pub ki_tdev: u64, - /// Which cpu we are on. - pub ki_oncpu: c_int, - /// Last cpu we were on. - pub ki_lastcpu: c_int, - /// PID of tracing process. - pub ki_tracer: c_int, - /// P2_* flags. - pub ki_flag2: c_int, - /// Default FIB number. - pub ki_fibnum: c_int, - /// Credential flags. - pub ki_cr_flags: crate::u_int, - /// Process jail ID. - pub ki_jid: c_int, - /// Number of threads in total. - pub ki_numthreads: c_int, - /// Thread ID. - pub ki_tid: crate::lwpid_t, - /// Process priority. - pub ki_pri: crate::priority, - /// Process rusage statistics. - pub ki_rusage: crate::rusage, - /// rusage of children processes. - pub ki_rusage_ch: crate::rusage, - // This is normally "struct pcb". - /// Kernel virtual addr of pcb. - pub ki_pcb: *mut c_void, - /// Kernel virtual addr of stack. - pub ki_kstack: *mut c_void, - /// User convenience pointer. - pub ki_udata: *mut c_void, - // This is normally "struct thread". - pub ki_tdaddr: *mut c_void, - // This is normally "struct pwddesc". - /// Pointer to process paths info. - pub ki_pd: *mut c_void, - pub ki_spareptrs: [*mut c_void; crate::KI_NSPARE_PTR], - pub ki_sparelongs: [c_long; crate::KI_NSPARE_LONG], - /// PS_* flags. - pub ki_sflag: c_long, - /// kthread flag. - pub ki_tdflags: c_long, - } - - #[non_exhaustive] - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - st_padding0: i16, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - st_padding1: i32, - pub st_rdev: crate::dev_t, - #[cfg(target_arch = "x86")] - st_atim_ext: i32, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - #[cfg(target_arch = "x86")] - st_mtim_ext: i32, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - #[cfg(target_arch = "x86")] - st_ctim_ext: i32, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - #[cfg(target_arch = "x86")] - st_btim_ext: i32, - pub st_birthtime: crate::time_t, - pub st_birthtime_nsec: c_long, - pub st_size: off_t, - pub st_blocks: crate::blkcnt_t, - pub st_blksize: crate::blksize_t, - pub st_flags: crate::fflags_t, - pub st_gen: u64, - pub st_filerev: u64, - pub st_spare: [u64; 9], - } -} - -s_no_extra_traits! { - pub struct dirent { - pub d_fileno: crate::ino_t, - pub d_off: off_t, - pub d_reclen: u16, - pub d_type: u8, - d_pad0: u8, - pub d_namlen: u16, - d_pad1: u16, - pub d_name: [c_char; 256], - } - - pub struct statfs { - pub f_version: u32, - pub f_type: u32, - pub f_flags: u64, - pub f_bsize: u64, - pub f_iosize: u64, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: i64, - pub f_files: u64, - pub f_ffree: i64, - pub f_syncwrites: u64, - pub f_asyncwrites: u64, - pub f_syncreads: u64, - pub f_asyncreads: u64, - f_spare: [u64; 10], - pub f_namemax: u32, - pub f_owner: crate::uid_t, - pub f_fsid: crate::fsid_t, - f_charspare: [c_char; 80], - pub f_fstypename: [c_char; 16], - pub f_mntfromname: [c_char; 1024], - pub f_mntonname: [c_char; 1024], - } - - pub struct vnstat { - pub vn_fileid: u64, - pub vn_size: u64, - pub vn_dev: u64, - pub vn_fsid: u64, - pub vn_mntdir: *mut c_char, - pub vn_type: c_int, - pub vn_mode: u16, - pub vn_devname: [c_char; crate::SPECNAMELEN as usize + 1], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for statfs { - fn eq(&self, other: &statfs) -> bool { - self.f_version == other.f_version - && self.f_type == other.f_type - && self.f_flags == other.f_flags - && self.f_bsize == other.f_bsize - && self.f_iosize == other.f_iosize - && self.f_blocks == other.f_blocks - && self.f_bfree == other.f_bfree - && self.f_bavail == other.f_bavail - && self.f_files == other.f_files - && self.f_ffree == other.f_ffree - && self.f_syncwrites == other.f_syncwrites - && self.f_asyncwrites == other.f_asyncwrites - && self.f_syncreads == other.f_syncreads - && self.f_asyncreads == other.f_asyncreads - && self.f_namemax == other.f_namemax - && self.f_owner == other.f_owner - && self.f_fsid == other.f_fsid - && self.f_fstypename == other.f_fstypename - && self - .f_mntfromname - .iter() - .zip(other.f_mntfromname.iter()) - .all(|(a, b)| a == b) - && self - .f_mntonname - .iter() - .zip(other.f_mntonname.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for statfs {} - impl hash::Hash for statfs { - fn hash(&self, state: &mut H) { - self.f_version.hash(state); - self.f_type.hash(state); - self.f_flags.hash(state); - self.f_bsize.hash(state); - self.f_iosize.hash(state); - self.f_blocks.hash(state); - self.f_bfree.hash(state); - self.f_bavail.hash(state); - self.f_files.hash(state); - self.f_ffree.hash(state); - self.f_syncwrites.hash(state); - self.f_asyncwrites.hash(state); - self.f_syncreads.hash(state); - self.f_asyncreads.hash(state); - self.f_namemax.hash(state); - self.f_owner.hash(state); - self.f_fsid.hash(state); - self.f_charspare.hash(state); - self.f_fstypename.hash(state); - self.f_mntfromname.hash(state); - self.f_mntonname.hash(state); - } - } - - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_fileno == other.d_fileno - && self.d_off == other.d_off - && self.d_reclen == other.d_reclen - && self.d_type == other.d_type - && self.d_namlen == other.d_namlen - && self.d_name[..self.d_namlen as _] - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for dirent {} - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_fileno.hash(state); - self.d_off.hash(state); - self.d_reclen.hash(state); - self.d_type.hash(state); - self.d_namlen.hash(state); - self.d_name[..self.d_namlen as _].hash(state); - } - } - - impl PartialEq for vnstat { - fn eq(&self, other: &vnstat) -> bool { - let self_vn_devname: &[c_char] = &self.vn_devname; - let other_vn_devname: &[c_char] = &other.vn_devname; - - self.vn_fileid == other.vn_fileid - && self.vn_size == other.vn_size - && self.vn_dev == other.vn_dev - && self.vn_fsid == other.vn_fsid - && self.vn_mntdir == other.vn_mntdir - && self.vn_type == other.vn_type - && self.vn_mode == other.vn_mode - && self_vn_devname == other_vn_devname - } - } - impl Eq for vnstat {} - impl hash::Hash for vnstat { - fn hash(&self, state: &mut H) { - let self_vn_devname: &[c_char] = &self.vn_devname; - - self.vn_fileid.hash(state); - self.vn_size.hash(state); - self.vn_dev.hash(state); - self.vn_fsid.hash(state); - self.vn_mntdir.hash(state); - self.vn_type.hash(state); - self.vn_mode.hash(state); - self_vn_devname.hash(state); - } - } - } -} - -pub const RAND_MAX: c_int = 0x7fff_ffff; -pub const ELAST: c_int = 97; - -pub const KF_TYPE_EVENTFD: c_int = 13; - -/// max length of devicename -pub const SPECNAMELEN: c_int = 255; -pub const KI_NSPARE_PTR: usize = 5; - -/// domainset policies -pub const DOMAINSET_POLICY_INVALID: c_int = 0; -pub const DOMAINSET_POLICY_ROUNDROBIN: c_int = 1; -pub const DOMAINSET_POLICY_FIRSTTOUCH: c_int = 2; -pub const DOMAINSET_POLICY_PREFER: c_int = 3; -pub const DOMAINSET_POLICY_INTERLEAVE: c_int = 4; - -pub const MINCORE_SUPER: c_int = 0x60; - -safe_f! { - pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { - let major = major as crate::dev_t; - let minor = minor as crate::dev_t; - let mut dev = 0; - dev |= ((major & 0xffffff00) as dev_t) << 32; - dev |= ((major & 0x000000ff) as dev_t) << 8; - dev |= ((minor & 0x0000ff00) as dev_t) << 24; - dev |= ((minor & 0xffff00ff) as dev_t) << 0; - dev - } - - pub const fn major(dev: crate::dev_t) -> c_int { - (((dev >> 32) & 0xffffff00) | ((dev >> 8) & 0xff)) as c_int - } - - pub const fn minor(dev: crate::dev_t) -> c_int { - (((dev >> 24) & 0xff00) | (dev & 0xffff00ff)) as c_int - } -} - -extern "C" { - pub fn setgrent(); - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn freelocale(loc: crate::locale_t); - pub fn msgrcv( - msqid: c_int, - msgp: *mut c_void, - msgsz: size_t, - msgtyp: c_long, - msgflg: c_int, - ) -> ssize_t; - - pub fn cpuset_getdomain( - level: crate::cpulevel_t, - which: crate::cpuwhich_t, - id: crate::id_t, - setsize: size_t, - mask: *mut crate::domainset_t, - policy: *mut c_int, - ) -> c_int; - pub fn cpuset_setdomain( - level: crate::cpulevel_t, - which: crate::cpuwhich_t, - id: crate::id_t, - setsize: size_t, - mask: *const crate::domainset_t, - policy: c_int, - ) -> c_int; - - pub fn dirname(path: *mut c_char) -> *mut c_char; - pub fn basename(path: *mut c_char) -> *mut c_char; - - pub fn qsort_r( - base: *mut c_void, - num: size_t, - size: size_t, - compar: Option c_int>, - arg: *mut c_void, - ); -} - -#[link(name = "kvm")] -extern "C" { - pub fn kvm_kerndisp(kd: *mut crate::kvm_t) -> crate::kssize_t; -} - -cfg_if! { - if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } -} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/x86_64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/x86_64.rs deleted file mode 100644 index 3e037471fbf68b..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd14/x86_64.rs +++ /dev/null @@ -1,14 +0,0 @@ -use crate::prelude::*; - -pub const PROC_KPTI_CTL: c_int = crate::PROC_PROCCTL_MD_MIN; -pub const PROC_KPTI_CTL_ENABLE_ON_EXEC: c_int = 1; -pub const PROC_KPTI_CTL_DISABLE_ON_EXEC: c_int = 2; -pub const PROC_KPTI_STATUS: c_int = crate::PROC_PROCCTL_MD_MIN + 1; -pub const PROC_KPTI_STATUS_ACTIVE: c_int = 0x80000000; -pub const PROC_LA_CTL: c_int = crate::PROC_PROCCTL_MD_MIN + 2; -pub const PROC_LA_STATUS: c_int = crate::PROC_PROCCTL_MD_MIN + 3; -pub const PROC_LA_CTL_LA48_ON_EXEC: c_int = 1; -pub const PROC_LA_CTL_LA57_ON_EXEC: c_int = 2; -pub const PROC_LA_CTL_DEFAULT_ON_EXEC: c_int = 3; -pub const PROC_LA_STATUS_LA48: c_int = 0x01000000; -pub const PROC_LA_STATUS_LA57: c_int = 0x02000000; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/mod.rs deleted file mode 100644 index c0d27ef370e6f5..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/mod.rs +++ /dev/null @@ -1,534 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -// APIs in FreeBSD 15 that have changed since 11. - -pub type nlink_t = u64; -pub type dev_t = u64; -pub type ino_t = u64; -pub type shmatt_t = c_uint; -pub type kpaddr_t = u64; -pub type kssize_t = i64; -pub type domainset_t = __c_anonymous_domainset; - -s! { - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_lpid: crate::pid_t, - pub shm_cpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - } - - pub struct kevent { - pub ident: crate::uintptr_t, - pub filter: c_short, - pub flags: c_ushort, - pub fflags: c_uint, - pub data: i64, - pub udata: *mut c_void, - pub ext: [u64; 4], - } - - pub struct kvm_page { - pub kp_version: crate::u_int, - pub kp_paddr: crate::kpaddr_t, - pub kp_kmap_vaddr: crate::kvaddr_t, - pub kp_dmap_vaddr: crate::kvaddr_t, - pub kp_prot: crate::vm_prot_t, - pub kp_offset: off_t, - pub kp_len: size_t, - } - - pub struct __c_anonymous_domainset { - #[cfg(target_pointer_width = "64")] - _priv: [c_ulong; 4], - #[cfg(target_pointer_width = "32")] - _priv: [c_ulong; 8], - } - - #[non_exhaustive] - pub struct kinfo_proc { - /// Size of this structure. - pub ki_structsize: c_int, - /// Reserved: layout identifier. - pub ki_layout: c_int, - /// Address of command arguments. - pub ki_args: *mut crate::pargs, - // This is normally "struct proc". - /// Address of proc. - pub ki_paddr: *mut c_void, - // This is normally "struct user". - /// Kernel virtual address of u-area. - pub ki_addr: *mut c_void, - // This is normally "struct vnode". - /// Pointer to trace file. - pub ki_tracep: *mut c_void, - // This is normally "struct vnode". - /// Pointer to executable file. - pub ki_textvp: *mut c_void, - /// Pointer to open file info. - pub ki_fd: *mut crate::filedesc, - // This is normally "struct vmspace". - /// Pointer to kernel vmspace struct. - pub ki_vmspace: *mut c_void, - /// Sleep address. - pub ki_wchan: *const c_void, - /// Process identifier. - pub ki_pid: crate::pid_t, - /// Parent process ID. - pub ki_ppid: crate::pid_t, - /// Process group ID. - pub ki_pgid: crate::pid_t, - /// tty process group ID. - pub ki_tpgid: crate::pid_t, - /// Process session ID. - pub ki_sid: crate::pid_t, - /// Terminal session ID. - pub ki_tsid: crate::pid_t, - /// Job control counter. - pub ki_jobc: c_short, - /// Unused (just here for alignment). - pub ki_spare_short1: c_short, - /// Controlling tty dev. - pub ki_tdev_freebsd11: u32, - /// Signals arrived but not delivered. - pub ki_siglist: crate::sigset_t, - /// Current signal mask. - pub ki_sigmask: crate::sigset_t, - /// Signals being ignored. - pub ki_sigignore: crate::sigset_t, - /// Signals being caught by user. - pub ki_sigcatch: crate::sigset_t, - /// Effective user ID. - pub ki_uid: crate::uid_t, - /// Real user ID. - pub ki_ruid: crate::uid_t, - /// Saved effective user ID. - pub ki_svuid: crate::uid_t, - /// Real group ID. - pub ki_rgid: crate::gid_t, - /// Saved effective group ID. - pub ki_svgid: crate::gid_t, - /// Number of groups. - pub ki_ngroups: c_short, - /// Unused (just here for alignment). - pub ki_spare_short2: c_short, - /// Groups. - pub ki_groups: [crate::gid_t; crate::KI_NGROUPS], - /// Virtual size. - pub ki_size: crate::vm_size_t, - /// Current resident set size in pages. - pub ki_rssize: crate::segsz_t, - /// Resident set size before last swap. - pub ki_swrss: crate::segsz_t, - /// Text size (pages) XXX. - pub ki_tsize: crate::segsz_t, - /// Data size (pages) XXX. - pub ki_dsize: crate::segsz_t, - /// Stack size (pages). - pub ki_ssize: crate::segsz_t, - /// Exit status for wait & stop signal. - pub ki_xstat: crate::u_short, - /// Accounting flags. - pub ki_acflag: crate::u_short, - /// %cpu for process during `ki_swtime`. - pub ki_pctcpu: crate::fixpt_t, - /// Time averaged value of `ki_cpticks`. - pub ki_estcpu: crate::u_int, - /// Time since last blocked. - pub ki_slptime: crate::u_int, - /// Time swapped in or out. - pub ki_swtime: crate::u_int, - /// Number of copy-on-write faults. - pub ki_cow: crate::u_int, - /// Real time in microsec. - pub ki_runtime: u64, - /// Starting time. - pub ki_start: crate::timeval, - /// Time used by process children. - pub ki_childtime: crate::timeval, - /// P_* flags. - pub ki_flag: c_long, - /// KI_* flags (below). - pub ki_kiflag: c_long, - /// Kernel trace points. - pub ki_traceflag: c_int, - /// S* process status. - pub ki_stat: c_char, - /// Process "nice" value. - pub ki_nice: i8, // signed char - /// Process lock (prevent swap) count. - pub ki_lock: c_char, - /// Run queue index. - pub ki_rqindex: c_char, - /// Which cpu we are on. - pub ki_oncpu_old: c_uchar, - /// Last cpu we were on. - pub ki_lastcpu_old: c_uchar, - /// Thread name. - pub ki_tdname: [c_char; crate::TDNAMLEN + 1], - /// Wchan message. - pub ki_wmesg: [c_char; crate::WMESGLEN + 1], - /// Setlogin name. - pub ki_login: [c_char; crate::LOGNAMELEN + 1], - /// Lock name. - pub ki_lockname: [c_char; crate::LOCKNAMELEN + 1], - /// Command name. - pub ki_comm: [c_char; crate::COMMLEN + 1], - /// Emulation name. - pub ki_emul: [c_char; crate::KI_EMULNAMELEN + 1], - /// Login class. - pub ki_loginclass: [c_char; crate::LOGINCLASSLEN + 1], - /// More thread name. - pub ki_moretdname: [c_char; crate::MAXCOMLEN - crate::TDNAMLEN + 1], - /// Spare string space. - pub ki_sparestrings: [[c_char; 23]; 2], // little hack to allow PartialEq - /// Spare room for growth. - pub ki_spareints: [c_int; crate::KI_NSPARE_INT], - /// Controlling tty dev. - pub ki_tdev: u64, - /// Which cpu we are on. - pub ki_oncpu: c_int, - /// Last cpu we were on. - pub ki_lastcpu: c_int, - /// PID of tracing process. - pub ki_tracer: c_int, - /// P2_* flags. - pub ki_flag2: c_int, - /// Default FIB number. - pub ki_fibnum: c_int, - /// Credential flags. - pub ki_cr_flags: crate::u_int, - /// Process jail ID. - pub ki_jid: c_int, - /// Number of threads in total. - pub ki_numthreads: c_int, - /// Thread ID. - pub ki_tid: crate::lwpid_t, - /// Process priority. - pub ki_pri: crate::priority, - /// Process rusage statistics. - pub ki_rusage: crate::rusage, - /// rusage of children processes. - pub ki_rusage_ch: crate::rusage, - // This is normally "struct pcb". - /// Kernel virtual addr of pcb. - pub ki_pcb: *mut c_void, - /// Kernel virtual addr of stack. - pub ki_kstack: *mut c_void, - /// User convenience pointer. - pub ki_udata: *mut c_void, - // This is normally "struct thread". - pub ki_tdaddr: *mut c_void, - // This is normally "struct pwddesc". - /// Pointer to process paths info. - pub ki_pd: *mut c_void, - /// Address of the ext err msg place - pub ki_uerrmsg: *mut c_void, - pub ki_spareptrs: [*mut c_void; crate::KI_NSPARE_PTR], - pub ki_sparelongs: [c_long; crate::KI_NSPARE_LONG], - /// PS_* flags. - pub ki_sflag: c_long, - /// kthread flag. - pub ki_tdflags: c_long, - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - st_padding0: i16, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - st_padding1: i32, - pub st_rdev: crate::dev_t, - #[cfg(target_arch = "x86")] - st_atim_ext: i32, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - #[cfg(target_arch = "x86")] - st_mtim_ext: i32, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - #[cfg(target_arch = "x86")] - st_ctim_ext: i32, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - #[cfg(target_arch = "x86")] - st_btim_ext: i32, - pub st_birthtime: crate::time_t, - pub st_birthtime_nsec: c_long, - pub st_size: off_t, - pub st_blocks: crate::blkcnt_t, - pub st_blksize: crate::blksize_t, - pub st_flags: crate::fflags_t, - pub st_gen: u64, - pub st_filerev: u64, - pub st_spare: [u64; 9], - } -} - -s_no_extra_traits! { - pub struct dirent { - pub d_fileno: crate::ino_t, - pub d_off: off_t, - pub d_reclen: u16, - pub d_type: u8, - d_pad0: u8, - pub d_namlen: u16, - d_pad1: u16, - pub d_name: [c_char; 256], - } - - pub struct statfs { - pub f_version: u32, - pub f_type: u32, - pub f_flags: u64, - pub f_bsize: u64, - pub f_iosize: u64, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: i64, - pub f_files: u64, - pub f_ffree: i64, - pub f_syncwrites: u64, - pub f_asyncwrites: u64, - pub f_syncreads: u64, - pub f_asyncreads: u64, - f_spare: [u64; 10], - pub f_namemax: u32, - pub f_owner: crate::uid_t, - pub f_fsid: crate::fsid_t, - f_charspare: [c_char; 80], - pub f_fstypename: [c_char; 16], - pub f_mntfromname: [c_char; 1024], - pub f_mntonname: [c_char; 1024], - } - - pub struct vnstat { - pub vn_fileid: u64, - pub vn_size: u64, - pub vn_dev: u64, - pub vn_fsid: u64, - pub vn_mntdir: *mut c_char, - pub vn_type: c_int, - pub vn_mode: u16, - pub vn_devname: [c_char; crate::SPECNAMELEN as usize + 1], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for statfs { - fn eq(&self, other: &statfs) -> bool { - self.f_version == other.f_version - && self.f_type == other.f_type - && self.f_flags == other.f_flags - && self.f_bsize == other.f_bsize - && self.f_iosize == other.f_iosize - && self.f_blocks == other.f_blocks - && self.f_bfree == other.f_bfree - && self.f_bavail == other.f_bavail - && self.f_files == other.f_files - && self.f_ffree == other.f_ffree - && self.f_syncwrites == other.f_syncwrites - && self.f_asyncwrites == other.f_asyncwrites - && self.f_syncreads == other.f_syncreads - && self.f_asyncreads == other.f_asyncreads - && self.f_namemax == other.f_namemax - && self.f_owner == other.f_owner - && self.f_fsid == other.f_fsid - && self.f_fstypename == other.f_fstypename - && self - .f_mntfromname - .iter() - .zip(other.f_mntfromname.iter()) - .all(|(a, b)| a == b) - && self - .f_mntonname - .iter() - .zip(other.f_mntonname.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for statfs {} - impl hash::Hash for statfs { - fn hash(&self, state: &mut H) { - self.f_version.hash(state); - self.f_type.hash(state); - self.f_flags.hash(state); - self.f_bsize.hash(state); - self.f_iosize.hash(state); - self.f_blocks.hash(state); - self.f_bfree.hash(state); - self.f_bavail.hash(state); - self.f_files.hash(state); - self.f_ffree.hash(state); - self.f_syncwrites.hash(state); - self.f_asyncwrites.hash(state); - self.f_syncreads.hash(state); - self.f_asyncreads.hash(state); - self.f_namemax.hash(state); - self.f_owner.hash(state); - self.f_fsid.hash(state); - self.f_charspare.hash(state); - self.f_fstypename.hash(state); - self.f_mntfromname.hash(state); - self.f_mntonname.hash(state); - } - } - - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_fileno == other.d_fileno - && self.d_off == other.d_off - && self.d_reclen == other.d_reclen - && self.d_type == other.d_type - && self.d_namlen == other.d_namlen - && self.d_name[..self.d_namlen as _] - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for dirent {} - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_fileno.hash(state); - self.d_off.hash(state); - self.d_reclen.hash(state); - self.d_type.hash(state); - self.d_namlen.hash(state); - self.d_name[..self.d_namlen as _].hash(state); - } - } - - impl PartialEq for vnstat { - fn eq(&self, other: &vnstat) -> bool { - let self_vn_devname: &[c_char] = &self.vn_devname; - let other_vn_devname: &[c_char] = &other.vn_devname; - - self.vn_fileid == other.vn_fileid - && self.vn_size == other.vn_size - && self.vn_dev == other.vn_dev - && self.vn_fsid == other.vn_fsid - && self.vn_mntdir == other.vn_mntdir - && self.vn_type == other.vn_type - && self.vn_mode == other.vn_mode - && self_vn_devname == other_vn_devname - } - } - impl Eq for vnstat {} - impl hash::Hash for vnstat { - fn hash(&self, state: &mut H) { - let self_vn_devname: &[c_char] = &self.vn_devname; - - self.vn_fileid.hash(state); - self.vn_size.hash(state); - self.vn_dev.hash(state); - self.vn_fsid.hash(state); - self.vn_mntdir.hash(state); - self.vn_type.hash(state); - self.vn_mode.hash(state); - self_vn_devname.hash(state); - } - } - } -} - -pub const RAND_MAX: c_int = 0x7fff_ffff; -pub const ELAST: c_int = 97; - -pub const KF_TYPE_EVENTFD: c_int = 13; - -/// max length of devicename -pub const SPECNAMELEN: c_int = 255; -pub const KI_NSPARE_PTR: usize = 4; - -/// domainset policies -pub const DOMAINSET_POLICY_INVALID: c_int = 0; -pub const DOMAINSET_POLICY_ROUNDROBIN: c_int = 1; -pub const DOMAINSET_POLICY_FIRSTTOUCH: c_int = 2; -pub const DOMAINSET_POLICY_PREFER: c_int = 3; -pub const DOMAINSET_POLICY_INTERLEAVE: c_int = 4; - -pub const MINCORE_SUPER: c_int = 0x60; - -safe_f! { - pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { - let major = major as crate::dev_t; - let minor = minor as crate::dev_t; - let mut dev = 0; - dev |= ((major & 0xffffff00) as dev_t) << 32; - dev |= ((major & 0x000000ff) as dev_t) << 8; - dev |= ((minor & 0x0000ff00) as dev_t) << 24; - dev |= ((minor & 0xffff00ff) as dev_t) << 0; - dev - } - - pub const fn major(dev: crate::dev_t) -> c_int { - (((dev >> 32) & 0xffffff00) | ((dev >> 8) & 0xff)) as c_int - } - - pub const fn minor(dev: crate::dev_t) -> c_int { - (((dev >> 24) & 0xff00) | (dev & 0xffff00ff)) as c_int - } -} - -extern "C" { - pub fn setgrent(); - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn freelocale(loc: crate::locale_t); - pub fn msgrcv( - msqid: c_int, - msgp: *mut c_void, - msgsz: size_t, - msgtyp: c_long, - msgflg: c_int, - ) -> ssize_t; - - pub fn cpuset_getdomain( - level: crate::cpulevel_t, - which: crate::cpuwhich_t, - id: crate::id_t, - setsize: size_t, - mask: *mut crate::domainset_t, - policy: *mut c_int, - ) -> c_int; - pub fn cpuset_setdomain( - level: crate::cpulevel_t, - which: crate::cpuwhich_t, - id: crate::id_t, - setsize: size_t, - mask: *const crate::domainset_t, - policy: c_int, - ) -> c_int; - - pub fn dirname(path: *mut c_char) -> *mut c_char; - pub fn basename(path: *mut c_char) -> *mut c_char; - - pub fn qsort_r( - base: *mut c_void, - num: size_t, - size: size_t, - compar: Option c_int>, - arg: *mut c_void, - ); -} - -#[link(name = "kvm")] -extern "C" { - pub fn kvm_kerndisp(kd: *mut crate::kvm_t) -> crate::kssize_t; -} - -cfg_if! { - if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } -} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/x86_64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/x86_64.rs deleted file mode 100644 index 3e037471fbf68b..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/freebsd15/x86_64.rs +++ /dev/null @@ -1,14 +0,0 @@ -use crate::prelude::*; - -pub const PROC_KPTI_CTL: c_int = crate::PROC_PROCCTL_MD_MIN; -pub const PROC_KPTI_CTL_ENABLE_ON_EXEC: c_int = 1; -pub const PROC_KPTI_CTL_DISABLE_ON_EXEC: c_int = 2; -pub const PROC_KPTI_STATUS: c_int = crate::PROC_PROCCTL_MD_MIN + 1; -pub const PROC_KPTI_STATUS_ACTIVE: c_int = 0x80000000; -pub const PROC_LA_CTL: c_int = crate::PROC_PROCCTL_MD_MIN + 2; -pub const PROC_LA_STATUS: c_int = crate::PROC_PROCCTL_MD_MIN + 3; -pub const PROC_LA_CTL_LA48_ON_EXEC: c_int = 1; -pub const PROC_LA_CTL_LA57_ON_EXEC: c_int = 2; -pub const PROC_LA_CTL_DEFAULT_ON_EXEC: c_int = 3; -pub const PROC_LA_STATUS_LA48: c_int = 0x01000000; -pub const PROC_LA_STATUS_LA57: c_int = 0x02000000; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs deleted file mode 100644 index a5166d4e15c75a..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs +++ /dev/null @@ -1,5659 +0,0 @@ -use crate::prelude::*; -use crate::{cmsghdr, off_t}; - -pub type fflags_t = u32; - -pub type vm_prot_t = u_char; -pub type kvaddr_t = u64; -pub type segsz_t = isize; -pub type __fixpt_t = u32; -pub type fixpt_t = __fixpt_t; -pub type __lwpid_t = i32; -pub type lwpid_t = __lwpid_t; -pub type blksize_t = i32; -pub type ksize_t = u64; -pub type inp_gen_t = u64; -pub type so_gen_t = u64; -pub type clockid_t = c_int; -pub type sem_t = _sem; -pub type timer_t = *mut __c_anonymous__timer; - -pub type fsblkcnt_t = u64; -pub type fsfilcnt_t = u64; -pub type idtype_t = c_uint; - -pub type msglen_t = c_ulong; -pub type msgqnum_t = c_ulong; - -pub type cpulevel_t = c_int; -pub type cpuwhich_t = c_int; - -pub type mqd_t = *mut c_void; - -pub type pthread_spinlock_t = *mut __c_anonymous_pthread_spinlock; -pub type pthread_barrierattr_t = *mut __c_anonymous_pthread_barrierattr; -pub type pthread_barrier_t = *mut __c_anonymous_pthread_barrier; - -pub type uuid_t = crate::uuid; -pub type u_int = c_uint; -pub type u_char = c_uchar; -pub type u_long = c_ulong; -pub type u_short = c_ushort; - -pub type caddr_t = *mut c_char; - -pub type fhandle_t = fhandle; - -pub type au_id_t = crate::uid_t; -pub type au_asid_t = crate::pid_t; - -pub type cpusetid_t = c_int; - -pub type sctp_assoc_t = u32; - -pub type eventfd_t = u64; - -#[derive(Debug)] -#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] -#[repr(u32)] -pub enum devstat_support_flags { - DEVSTAT_ALL_SUPPORTED = 0x00, - DEVSTAT_NO_BLOCKSIZE = 0x01, - DEVSTAT_NO_ORDERED_TAGS = 0x02, - DEVSTAT_BS_UNAVAILABLE = 0x04, -} -impl Copy for devstat_support_flags {} -impl Clone for devstat_support_flags { - fn clone(&self) -> devstat_support_flags { - *self - } -} - -#[derive(Debug)] -#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] -#[repr(u32)] -pub enum devstat_trans_flags { - DEVSTAT_NO_DATA = 0x00, - DEVSTAT_READ = 0x01, - DEVSTAT_WRITE = 0x02, - DEVSTAT_FREE = 0x03, -} - -impl Copy for devstat_trans_flags {} -impl Clone for devstat_trans_flags { - fn clone(&self) -> devstat_trans_flags { - *self - } -} - -#[derive(Debug)] -#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] -#[repr(u32)] -pub enum devstat_tag_type { - DEVSTAT_TAG_SIMPLE = 0x00, - DEVSTAT_TAG_HEAD = 0x01, - DEVSTAT_TAG_ORDERED = 0x02, - DEVSTAT_TAG_NONE = 0x03, -} -impl Copy for devstat_tag_type {} -impl Clone for devstat_tag_type { - fn clone(&self) -> devstat_tag_type { - *self - } -} - -#[derive(Debug)] -#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] -#[repr(u32)] -pub enum devstat_match_flags { - DEVSTAT_MATCH_NONE = 0x00, - DEVSTAT_MATCH_TYPE = 0x01, - DEVSTAT_MATCH_IF = 0x02, - DEVSTAT_MATCH_PASS = 0x04, -} -impl Copy for devstat_match_flags {} -impl Clone for devstat_match_flags { - fn clone(&self) -> devstat_match_flags { - *self - } -} - -#[derive(Debug)] -#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] -#[repr(u32)] -pub enum devstat_priority { - DEVSTAT_PRIORITY_MIN = 0x000, - DEVSTAT_PRIORITY_OTHER = 0x020, - DEVSTAT_PRIORITY_PASS = 0x030, - DEVSTAT_PRIORITY_FD = 0x040, - DEVSTAT_PRIORITY_WFD = 0x050, - DEVSTAT_PRIORITY_TAPE = 0x060, - DEVSTAT_PRIORITY_CD = 0x090, - DEVSTAT_PRIORITY_DISK = 0x110, - DEVSTAT_PRIORITY_ARRAY = 0x120, - DEVSTAT_PRIORITY_MAX = 0xfff, -} -impl Copy for devstat_priority {} -impl Clone for devstat_priority { - fn clone(&self) -> devstat_priority { - *self - } -} - -#[derive(Debug)] -#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] -#[repr(u32)] -pub enum devstat_type_flags { - DEVSTAT_TYPE_DIRECT = 0x000, - DEVSTAT_TYPE_SEQUENTIAL = 0x001, - DEVSTAT_TYPE_PRINTER = 0x002, - DEVSTAT_TYPE_PROCESSOR = 0x003, - DEVSTAT_TYPE_WORM = 0x004, - DEVSTAT_TYPE_CDROM = 0x005, - DEVSTAT_TYPE_SCANNER = 0x006, - DEVSTAT_TYPE_OPTICAL = 0x007, - DEVSTAT_TYPE_CHANGER = 0x008, - DEVSTAT_TYPE_COMM = 0x009, - DEVSTAT_TYPE_ASC0 = 0x00a, - DEVSTAT_TYPE_ASC1 = 0x00b, - DEVSTAT_TYPE_STORARRAY = 0x00c, - DEVSTAT_TYPE_ENCLOSURE = 0x00d, - DEVSTAT_TYPE_FLOPPY = 0x00e, - DEVSTAT_TYPE_MASK = 0x00f, - DEVSTAT_TYPE_IF_SCSI = 0x010, - DEVSTAT_TYPE_IF_IDE = 0x020, - DEVSTAT_TYPE_IF_OTHER = 0x030, - DEVSTAT_TYPE_IF_MASK = 0x0f0, - DEVSTAT_TYPE_PASS = 0x100, -} -impl Copy for devstat_type_flags {} -impl Clone for devstat_type_flags { - fn clone(&self) -> devstat_type_flags { - *self - } -} - -#[derive(Debug)] -#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] -#[repr(u32)] -pub enum devstat_metric { - DSM_NONE, - DSM_TOTAL_BYTES, - DSM_TOTAL_BYTES_READ, - DSM_TOTAL_BYTES_WRITE, - DSM_TOTAL_TRANSFERS, - DSM_TOTAL_TRANSFERS_READ, - DSM_TOTAL_TRANSFERS_WRITE, - DSM_TOTAL_TRANSFERS_OTHER, - DSM_TOTAL_BLOCKS, - DSM_TOTAL_BLOCKS_READ, - DSM_TOTAL_BLOCKS_WRITE, - DSM_KB_PER_TRANSFER, - DSM_KB_PER_TRANSFER_READ, - DSM_KB_PER_TRANSFER_WRITE, - DSM_TRANSFERS_PER_SECOND, - DSM_TRANSFERS_PER_SECOND_READ, - DSM_TRANSFERS_PER_SECOND_WRITE, - DSM_TRANSFERS_PER_SECOND_OTHER, - DSM_MB_PER_SECOND, - DSM_MB_PER_SECOND_READ, - DSM_MB_PER_SECOND_WRITE, - DSM_BLOCKS_PER_SECOND, - DSM_BLOCKS_PER_SECOND_READ, - DSM_BLOCKS_PER_SECOND_WRITE, - DSM_MS_PER_TRANSACTION, - DSM_MS_PER_TRANSACTION_READ, - DSM_MS_PER_TRANSACTION_WRITE, - DSM_SKIP, - DSM_TOTAL_BYTES_FREE, - DSM_TOTAL_TRANSFERS_FREE, - DSM_TOTAL_BLOCKS_FREE, - DSM_KB_PER_TRANSFER_FREE, - DSM_MB_PER_SECOND_FREE, - DSM_TRANSFERS_PER_SECOND_FREE, - DSM_BLOCKS_PER_SECOND_FREE, - DSM_MS_PER_TRANSACTION_OTHER, - DSM_MS_PER_TRANSACTION_FREE, - DSM_BUSY_PCT, - DSM_QUEUE_LENGTH, - DSM_TOTAL_DURATION, - DSM_TOTAL_DURATION_READ, - DSM_TOTAL_DURATION_WRITE, - DSM_TOTAL_DURATION_FREE, - DSM_TOTAL_DURATION_OTHER, - DSM_TOTAL_BUSY_TIME, - DSM_MAX, -} -impl Copy for devstat_metric {} -impl Clone for devstat_metric { - fn clone(&self) -> devstat_metric { - *self - } -} - -#[derive(Debug)] -#[cfg_attr(feature = "extra_traits", derive(Hash, PartialEq, Eq))] -#[repr(u32)] -pub enum devstat_select_mode { - DS_SELECT_ADD, - DS_SELECT_ONLY, - DS_SELECT_REMOVE, - DS_SELECT_ADDONLY, -} -impl Copy for devstat_select_mode {} -impl Clone for devstat_select_mode { - fn clone(&self) -> devstat_select_mode { - *self - } -} - -s! { - pub struct aiocb { - pub aio_fildes: c_int, - pub aio_offset: off_t, - pub aio_buf: *mut c_void, - pub aio_nbytes: size_t, - __unused1: [c_int; 2], - __unused2: *mut c_void, - pub aio_lio_opcode: c_int, - pub aio_reqprio: c_int, - // unused 3 through 5 are the __aiocb_private structure - __unused3: c_long, - __unused4: c_long, - __unused5: *mut c_void, - pub aio_sigevent: sigevent, - } - - pub struct jail { - pub version: u32, - pub path: *mut c_char, - pub hostname: *mut c_char, - pub jailname: *mut c_char, - pub ip4s: c_uint, - pub ip6s: c_uint, - pub ip4: *mut crate::in_addr, - pub ip6: *mut crate::in6_addr, - } - - pub struct statvfs { - pub f_bavail: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_blocks: crate::fsblkcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_bsize: c_ulong, - pub f_flag: c_ulong, - pub f_frsize: c_ulong, - pub f_fsid: c_ulong, - pub f_namemax: c_ulong, - } - - // internal structure has changed over time - pub struct _sem { - data: [u32; 4], - } - pub struct sembuf { - pub sem_num: c_ushort, - pub sem_op: c_short, - pub sem_flg: c_short, - } - - pub struct input_event { - pub time: crate::timeval, - pub type_: crate::u_short, - pub code: crate::u_short, - pub value: i32, - } - - pub struct input_absinfo { - pub value: i32, - pub minimum: i32, - pub maximum: i32, - pub fuzz: i32, - pub flat: i32, - pub resolution: i32, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - __unused1: *mut c_void, - __unused2: *mut c_void, - pub msg_cbytes: crate::msglen_t, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - pub msg_stime: crate::time_t, - pub msg_rtime: crate::time_t, - pub msg_ctime: crate::time_t, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } - - pub struct mmsghdr { - pub msg_hdr: crate::msghdr, - pub msg_len: ssize_t, - } - - pub struct sockcred { - pub sc_uid: crate::uid_t, - pub sc_euid: crate::uid_t, - pub sc_gid: crate::gid_t, - pub sc_egid: crate::gid_t, - pub sc_ngroups: c_int, - pub sc_groups: [crate::gid_t; 1], - } - - pub struct ptrace_vm_entry { - pub pve_entry: c_int, - pub pve_timestamp: c_int, - pub pve_start: c_ulong, - pub pve_end: c_ulong, - pub pve_offset: c_ulong, - pub pve_prot: c_uint, - pub pve_pathlen: c_uint, - pub pve_fileid: c_long, - pub pve_fsid: u32, - pub pve_path: *mut c_char, - } - - pub struct ptrace_lwpinfo { - pub pl_lwpid: lwpid_t, - pub pl_event: c_int, - pub pl_flags: c_int, - pub pl_sigmask: crate::sigset_t, - pub pl_siglist: crate::sigset_t, - pub pl_siginfo: crate::siginfo_t, - pub pl_tdname: [c_char; crate::MAXCOMLEN as usize + 1], - pub pl_child_pid: crate::pid_t, - pub pl_syscall_code: c_uint, - pub pl_syscall_narg: c_uint, - } - - pub struct ptrace_sc_ret { - pub sr_retval: [crate::register_t; 2], - pub sr_error: c_int, - } - - pub struct ptrace_coredump { - pub pc_fd: c_int, - pub pc_flags: u32, - pub pc_limit: off_t, - } - - pub struct ptrace_sc_remote { - pub pscr_ret: ptrace_sc_ret, - pub pscr_syscall: c_uint, - pub pscr_nargs: c_uint, - pub pscr_args: *mut crate::register_t, - } - - pub struct cpuset_t { - #[cfg(all(any(freebsd15, freebsd14), target_pointer_width = "64"))] - __bits: [c_long; 16], - #[cfg(all(any(freebsd15, freebsd14), target_pointer_width = "32"))] - __bits: [c_long; 32], - #[cfg(all(not(any(freebsd15, freebsd14)), target_pointer_width = "64"))] - __bits: [c_long; 4], - #[cfg(all(not(any(freebsd15, freebsd14)), target_pointer_width = "32"))] - __bits: [c_long; 8], - } - - pub struct cap_rights_t { - cr_rights: [u64; 2], - } - - pub struct umutex { - m_owner: crate::lwpid_t, - m_flags: u32, - m_ceilings: [u32; 2], - m_rb_link: crate::uintptr_t, - #[cfg(target_pointer_width = "32")] - m_pad: u32, - m_spare: [u32; 2], - } - - pub struct ucond { - c_has_waiters: u32, - c_flags: u32, - c_clockid: u32, - c_spare: [u32; 1], - } - - pub struct uuid { - pub time_low: u32, - pub time_mid: u16, - pub time_hi_and_version: u16, - pub clock_seq_hi_and_reserved: u8, - pub clock_seq_low: u8, - pub node: [u8; _UUID_NODE_LEN], - } - - pub struct __c_anonymous_pthread_spinlock { - s_clock: umutex, - } - - pub struct __c_anonymous_pthread_barrierattr { - pshared: c_int, - } - - pub struct __c_anonymous_pthread_barrier { - b_lock: umutex, - b_cv: ucond, - b_cycle: i64, - b_count: c_int, - b_waiters: c_int, - b_refcount: c_int, - b_destroying: c_int, - } - - pub struct kinfo_vmentry { - pub kve_structsize: c_int, - pub kve_type: c_int, - pub kve_start: u64, - pub kve_end: u64, - pub kve_offset: u64, - pub kve_vn_fileid: u64, - #[cfg(not(freebsd11))] - pub kve_vn_fsid_freebsd11: u32, - #[cfg(freebsd11)] - pub kve_vn_fsid: u32, - pub kve_flags: c_int, - pub kve_resident: c_int, - pub kve_private_resident: c_int, - pub kve_protection: c_int, - pub kve_ref_count: c_int, - pub kve_shadow_count: c_int, - pub kve_vn_type: c_int, - pub kve_vn_size: u64, - #[cfg(not(freebsd11))] - pub kve_vn_rdev_freebsd11: u32, - #[cfg(freebsd11)] - pub kve_vn_rdev: u32, - pub kve_vn_mode: u16, - pub kve_status: u16, - #[cfg(not(freebsd11))] - pub kve_vn_fsid: u64, - #[cfg(not(freebsd11))] - pub kve_vn_rdev: u64, - #[cfg(not(freebsd11))] - _kve_is_spare: [c_int; 8], - #[cfg(freebsd11)] - _kve_is_spare: [c_int; 12], - pub kve_path: [[c_char; 32]; 32], - } - - pub struct __c_anonymous_filestat { - pub stqe_next: *mut filestat, - } - - pub struct filestat { - pub fs_type: c_int, - pub fs_flags: c_int, - pub fs_fflags: c_int, - pub fs_uflags: c_int, - pub fs_fd: c_int, - pub fs_ref_count: c_int, - pub fs_offset: off_t, - pub fs_typedep: *mut c_void, - pub fs_path: *mut c_char, - pub next: __c_anonymous_filestat, - pub fs_cap_rights: cap_rights_t, - } - - pub struct filestat_list { - pub stqh_first: *mut filestat, - pub stqh_last: *mut *mut filestat, - } - - pub struct procstat { - pub tpe: c_int, - pub kd: crate::uintptr_t, - pub vmentries: *mut c_void, - pub files: *mut c_void, - pub argv: *mut c_void, - pub envv: *mut c_void, - pub core: crate::uintptr_t, - } - - pub struct itimerspec { - pub it_interval: crate::timespec, - pub it_value: crate::timespec, - } - - pub struct __c_anonymous__timer { - _priv: [c_int; 3], - } - - /// Used to hold a copy of the command line, if it had a sane length. - pub struct pargs { - /// Reference count. - pub ar_ref: u_int, - /// Length. - pub ar_length: u_int, - /// Arguments. - pub ar_args: [c_uchar; 1], - } - - pub struct priority { - /// Scheduling class. - pub pri_class: u_char, - /// Normal priority level. - pub pri_level: u_char, - /// Priority before propagation. - pub pri_native: u_char, - /// User priority based on p_cpu and p_nice. - pub pri_user: u_char, - } - - pub struct kvm_swap { - pub ksw_devname: [c_char; 32], - pub ksw_used: u_int, - pub ksw_total: u_int, - pub ksw_flags: c_int, - pub ksw_reserved1: u_int, - pub ksw_reserved2: u_int, - } - - pub struct nlist { - /// symbol name (in memory) - pub n_name: *const c_char, - /// type defines - pub n_type: c_uchar, - /// "type" and binding information - pub n_other: c_char, - /// used by stab entries - pub n_desc: c_short, - pub n_value: c_ulong, - } - - pub struct kvm_nlist { - pub n_name: *const c_char, - pub n_type: c_uchar, - pub n_value: crate::kvaddr_t, - } - - pub struct __c_anonymous_sem { - _priv: crate::uintptr_t, - } - - pub struct semid_ds { - pub sem_perm: crate::ipc_perm, - pub __sem_base: *mut __c_anonymous_sem, - pub sem_nsems: c_ushort, - pub sem_otime: crate::time_t, - pub sem_ctime: crate::time_t, - } - - pub struct vmtotal { - pub t_vm: u64, - pub t_avm: u64, - pub t_rm: u64, - pub t_arm: u64, - pub t_vmshr: u64, - pub t_avmshr: u64, - pub t_rmshr: u64, - pub t_armshr: u64, - pub t_free: u64, - pub t_rq: i16, - pub t_dw: i16, - pub t_pw: i16, - pub t_sl: i16, - pub t_sw: i16, - pub t_pad: [u16; 3], - } - - pub struct sockstat { - pub inp_ppcb: u64, - pub so_addr: u64, - pub so_pcb: u64, - pub unp_conn: u64, - pub dom_family: c_int, - pub proto: c_int, - pub so_rcv_sb_state: c_int, - pub so_snd_sb_state: c_int, - /// Socket address. - pub sa_local: crate::sockaddr_storage, - /// Peer address. - pub sa_peer: crate::sockaddr_storage, - pub type_: c_int, - pub dname: [c_char; 32], - #[cfg(any(freebsd12, freebsd13, freebsd14, freebsd15))] - pub sendq: c_uint, - #[cfg(any(freebsd12, freebsd13, freebsd14, freebsd15))] - pub recvq: c_uint, - } - - pub struct shmstat { - pub size: u64, - pub mode: u16, - } - - pub struct spacectl_range { - pub r_offset: off_t, - pub r_len: off_t, - } - - pub struct rusage_ext { - pub rux_runtime: u64, - pub rux_uticks: u64, - pub rux_sticks: u64, - pub rux_iticks: u64, - pub rux_uu: u64, - pub rux_su: u64, - pub rux_tu: u64, - } - - pub struct if_clonereq { - pub ifcr_total: c_int, - pub ifcr_count: c_int, - pub ifcr_buffer: *mut c_char, - } - - pub struct if_msghdr { - /// to skip over non-understood messages - pub ifm_msglen: c_ushort, - /// future binary compatibility - pub ifm_version: c_uchar, - /// message type - pub ifm_type: c_uchar, - /// like rtm_addrs - pub ifm_addrs: c_int, - /// value of if_flags - pub ifm_flags: c_int, - /// index for associated ifp - pub ifm_index: c_ushort, - pub _ifm_spare1: c_ushort, - /// statistics and other data about if - pub ifm_data: if_data, - } - - pub struct if_msghdrl { - /// to skip over non-understood messages - pub ifm_msglen: c_ushort, - /// future binary compatibility - pub ifm_version: c_uchar, - /// message type - pub ifm_type: c_uchar, - /// like rtm_addrs - pub ifm_addrs: c_int, - /// value of if_flags - pub ifm_flags: c_int, - /// index for associated ifp - pub ifm_index: c_ushort, - /// spare space to grow if_index, see if_var.h - pub _ifm_spare1: c_ushort, - /// length of if_msghdrl incl. if_data - pub ifm_len: c_ushort, - /// offset of if_data from beginning - pub ifm_data_off: c_ushort, - pub _ifm_spare2: c_int, - /// statistics and other data about if - pub ifm_data: if_data, - } - - pub struct ifa_msghdr { - /// to skip over non-understood messages - pub ifam_msglen: c_ushort, - /// future binary compatibility - pub ifam_version: c_uchar, - /// message type - pub ifam_type: c_uchar, - /// like rtm_addrs - pub ifam_addrs: c_int, - /// value of ifa_flags - pub ifam_flags: c_int, - /// index for associated ifp - pub ifam_index: c_ushort, - pub _ifam_spare1: c_ushort, - /// value of ifa_ifp->if_metric - pub ifam_metric: c_int, - } - - pub struct ifa_msghdrl { - /// to skip over non-understood messages - pub ifam_msglen: c_ushort, - /// future binary compatibility - pub ifam_version: c_uchar, - /// message type - pub ifam_type: c_uchar, - /// like rtm_addrs - pub ifam_addrs: c_int, - /// value of ifa_flags - pub ifam_flags: c_int, - /// index for associated ifp - pub ifam_index: c_ushort, - /// spare space to grow if_index, see if_var.h - pub _ifam_spare1: c_ushort, - /// length of ifa_msghdrl incl. if_data - pub ifam_len: c_ushort, - /// offset of if_data from beginning - pub ifam_data_off: c_ushort, - /// value of ifa_ifp->if_metric - pub ifam_metric: c_int, - /// statistics and other data about if or address - pub ifam_data: if_data, - } - - pub struct ifma_msghdr { - /// to skip over non-understood messages - pub ifmam_msglen: c_ushort, - /// future binary compatibility - pub ifmam_version: c_uchar, - /// message type - pub ifmam_type: c_uchar, - /// like rtm_addrs - pub ifmam_addrs: c_int, - /// value of ifa_flags - pub ifmam_flags: c_int, - /// index for associated ifp - pub ifmam_index: c_ushort, - pub _ifmam_spare1: c_ushort, - } - - pub struct if_announcemsghdr { - /// to skip over non-understood messages - pub ifan_msglen: c_ushort, - /// future binary compatibility - pub ifan_version: c_uchar, - /// message type - pub ifan_type: c_uchar, - /// index for associated ifp - pub ifan_index: c_ushort, - /// if name, e.g. "en0" - pub ifan_name: [c_char; crate::IFNAMSIZ as usize], - /// what type of announcement - pub ifan_what: c_ushort, - } - - pub struct ifreq_buffer { - pub length: size_t, - pub buffer: *mut c_void, - } - - pub struct ifaliasreq { - /// if name, e.g. "en0" - pub ifra_name: [c_char; crate::IFNAMSIZ as usize], - pub ifra_addr: crate::sockaddr, - pub ifra_broadaddr: crate::sockaddr, - pub ifra_mask: crate::sockaddr, - pub ifra_vhid: c_int, - } - - /// 9.x compat - pub struct oifaliasreq { - /// if name, e.g. "en0" - pub ifra_name: [c_char; crate::IFNAMSIZ as usize], - pub ifra_addr: crate::sockaddr, - pub ifra_broadaddr: crate::sockaddr, - pub ifra_mask: crate::sockaddr, - } - - pub struct ifmediareq { - /// if name, e.g. "en0" - pub ifm_name: [c_char; crate::IFNAMSIZ as usize], - /// current media options - pub ifm_current: c_int, - /// don't care mask - pub ifm_mask: c_int, - /// media status - pub ifm_status: c_int, - /// active options - pub ifm_active: c_int, - /// # entries in ifm_ulist array - pub ifm_count: c_int, - /// media words - pub ifm_ulist: *mut c_int, - } - - pub struct ifdrv { - /// if name, e.g. "en0" - pub ifd_name: [c_char; crate::IFNAMSIZ as usize], - pub ifd_cmd: c_ulong, - pub ifd_len: size_t, - pub ifd_data: *mut c_void, - } - - pub struct ifi2creq { - /// i2c address (0xA0, 0xA2) - pub dev_addr: u8, - /// read offset - pub offset: u8, - /// read length - pub len: u8, - pub spare0: u8, - pub spare1: u32, - /// read buffer - pub data: [u8; 8], - } - - pub struct ifrsshash { - /// if name, e.g. "en0" - pub ifrh_name: [c_char; crate::IFNAMSIZ as usize], - /// RSS_FUNC_ - pub ifrh_func: u8, - pub ifrh_spare0: u8, - pub ifrh_spare1: u16, - /// RSS_TYPE_ - pub ifrh_types: u32, - } - - pub struct ifmibdata { - /// name of interface - pub ifmd_name: [c_char; crate::IFNAMSIZ as usize], - /// number of promiscuous listeners - pub ifmd_pcount: c_int, - /// interface flags - pub ifmd_flags: c_int, - /// instantaneous length of send queue - pub ifmd_snd_len: c_int, - /// maximum length of send queue - pub ifmd_snd_maxlen: c_int, - /// number of drops in send queue - pub ifmd_snd_drops: c_int, - /// for future expansion - pub ifmd_filler: [c_int; 4], - /// generic information and statistics - pub ifmd_data: if_data, - } - - pub struct ifmib_iso_8802_3 { - pub dot3StatsAlignmentErrors: u32, - pub dot3StatsFCSErrors: u32, - pub dot3StatsSingleCollisionFrames: u32, - pub dot3StatsMultipleCollisionFrames: u32, - pub dot3StatsSQETestErrors: u32, - pub dot3StatsDeferredTransmissions: u32, - pub dot3StatsLateCollisions: u32, - pub dot3StatsExcessiveCollisions: u32, - pub dot3StatsInternalMacTransmitErrors: u32, - pub dot3StatsCarrierSenseErrors: u32, - pub dot3StatsFrameTooLongs: u32, - pub dot3StatsInternalMacReceiveErrors: u32, - pub dot3StatsEtherChipSet: u32, - pub dot3StatsMissedFrames: u32, - pub dot3StatsCollFrequencies: [u32; 16], - pub dot3Compliance: u32, - } - - pub struct __c_anonymous_ph { - pub ph1: u64, - pub ph2: u64, - } - - pub struct fid { - pub fid_len: c_ushort, - pub fid_data0: c_ushort, - pub fid_data: [c_char; crate::MAXFIDSZ as usize], - } - - pub struct fhandle { - pub fh_fsid: crate::fsid_t, - pub fh_fid: fid, - } - - pub struct bintime { - pub sec: crate::time_t, - pub frac: u64, - } - - pub struct clockinfo { - /// clock frequency - pub hz: c_int, - /// micro-seconds per hz tick - pub tick: c_int, - pub spare: c_int, - /// statistics clock frequency - pub stathz: c_int, - /// profiling clock frequency - pub profhz: c_int, - } - - pub struct __c_anonymous_stailq_entry_devstat { - pub stqe_next: *mut devstat, - } - - pub struct devstat { - /// Update sequence - pub sequence0: crate::u_int, - /// Allocated entry - pub allocated: c_int, - /// started ops - pub start_count: crate::u_int, - /// completed ops - pub end_count: crate::u_int, - /// busy time unaccounted for since this time - pub busy_from: bintime, - pub dev_links: __c_anonymous_stailq_entry_devstat, - /// Devstat device number. - pub device_number: u32, - pub device_name: [c_char; DEVSTAT_NAME_LEN as usize], - pub unit_number: c_int, - pub bytes: [u64; DEVSTAT_N_TRANS_FLAGS as usize], - pub operations: [u64; DEVSTAT_N_TRANS_FLAGS as usize], - pub duration: [bintime; DEVSTAT_N_TRANS_FLAGS as usize], - pub busy_time: bintime, - /// Time the device was created. - pub creation_time: bintime, - /// Block size, bytes - pub block_size: u32, - /// The number of simple, ordered, and head of queue tags sent. - pub tag_types: [u64; 3], - /// Which statistics are supported by a given device. - pub flags: devstat_support_flags, - /// Device type - pub device_type: devstat_type_flags, - /// Controls list pos. - pub priority: devstat_priority, - /// Identification for GEOM nodes - pub id: *const c_void, - /// Update sequence - pub sequence1: crate::u_int, - } - - pub struct devstat_match { - pub match_fields: devstat_match_flags, - pub device_type: devstat_type_flags, - pub num_match_categories: c_int, - } - - pub struct devstat_match_table { - pub match_str: *const c_char, - pub type_: devstat_type_flags, - pub match_field: devstat_match_flags, - } - - pub struct device_selection { - pub device_number: u32, - pub device_name: [c_char; DEVSTAT_NAME_LEN as usize], - pub unit_number: c_int, - pub selected: c_int, - pub bytes: u64, - pub position: c_int, - } - - pub struct devinfo { - pub devices: *mut devstat, - pub mem_ptr: *mut u8, - pub generation: c_long, - pub numdevs: c_int, - } - - pub struct sockcred2 { - pub sc_version: c_int, - pub sc_pid: crate::pid_t, - pub sc_uid: crate::uid_t, - pub sc_euid: crate::uid_t, - pub sc_gid: crate::gid_t, - pub sc_egid: crate::gid_t, - pub sc_ngroups: c_int, - pub sc_groups: [crate::gid_t; 1], - } - - pub struct ifconf { - pub ifc_len: c_int, - pub ifc_ifcu: __c_anonymous_ifc_ifcu, - } - - pub struct au_mask_t { - pub am_success: c_uint, - pub am_failure: c_uint, - } - - pub struct au_tid_t { - pub port: u32, - pub machine: u32, - } - - pub struct auditinfo_t { - pub ai_auid: crate::au_id_t, - pub ai_mask: crate::au_mask_t, - pub ai_termid: au_tid_t, - pub ai_asid: crate::au_asid_t, - } - - pub struct tcp_fastopen { - pub enable: c_int, - pub psk: [u8; crate::TCP_FASTOPEN_PSK_LEN as usize], - } - - pub struct tcp_function_set { - pub function_set_name: [c_char; crate::TCP_FUNCTION_NAME_LEN_MAX as usize], - pub pcbcnt: u32, - } - - // Note: this structure will change in a backwards-incompatible way in - // FreeBSD 15. - pub struct tcp_info { - pub tcpi_state: u8, - pub __tcpi_ca_state: u8, - pub __tcpi_retransmits: u8, - pub __tcpi_probes: u8, - pub __tcpi_backoff: u8, - pub tcpi_options: u8, - pub tcp_snd_wscale: u8, - pub tcp_rcv_wscale: u8, - pub tcpi_rto: u32, - pub __tcpi_ato: u32, - pub tcpi_snd_mss: u32, - pub tcpi_rcv_mss: u32, - pub __tcpi_unacked: u32, - pub __tcpi_sacked: u32, - pub __tcpi_lost: u32, - pub __tcpi_retrans: u32, - pub __tcpi_fackets: u32, - pub __tcpi_last_data_sent: u32, - pub __tcpi_last_ack_sent: u32, - pub tcpi_last_data_recv: u32, - pub __tcpi_last_ack_recv: u32, - pub __tcpi_pmtu: u32, - pub __tcpi_rcv_ssthresh: u32, - pub tcpi_rtt: u32, - pub tcpi_rttvar: u32, - pub tcpi_snd_ssthresh: u32, - pub tcpi_snd_cwnd: u32, - pub __tcpi_advmss: u32, - pub __tcpi_reordering: u32, - pub __tcpi_rcv_rtt: u32, - pub tcpi_rcv_space: u32, - pub tcpi_snd_wnd: u32, - pub tcpi_snd_bwnd: u32, - pub tcpi_snd_nxt: u32, - pub tcpi_rcv_nxt: u32, - pub tcpi_toe_tid: u32, - pub tcpi_snd_rexmitpack: u32, - pub tcpi_rcv_ooopack: u32, - pub tcpi_snd_zerowin: u32, - #[cfg(any(freebsd15, freebsd14))] - pub tcpi_delivered_ce: u32, - #[cfg(any(freebsd15, freebsd14))] - pub tcpi_received_ce: u32, - #[cfg(any(freebsd15, freebsd14))] - pub __tcpi_delivered_e1_bytes: u32, - #[cfg(any(freebsd15, freebsd14))] - pub __tcpi_delivered_e0_bytes: u32, - #[cfg(any(freebsd15, freebsd14))] - pub __tcpi_delivered_ce_bytes: u32, - #[cfg(any(freebsd15, freebsd14))] - pub __tcpi_received_e1_bytes: u32, - #[cfg(any(freebsd15, freebsd14))] - pub __tcpi_received_e0_bytes: u32, - #[cfg(any(freebsd15, freebsd14))] - pub __tcpi_received_ce_bytes: u32, - #[cfg(any(freebsd15, freebsd14))] - pub tcpi_total_tlp: u32, - #[cfg(any(freebsd15, freebsd14))] - pub tcpi_total_tlp_bytes: u64, - #[cfg(any(freebsd15, freebsd14))] - pub tcpi_snd_una: u32, - #[cfg(any(freebsd15, freebsd14))] - pub tcpi_snd_max: u32, - #[cfg(any(freebsd15, freebsd14))] - pub tcpi_rcv_numsacks: u32, - #[cfg(any(freebsd15, freebsd14))] - pub tcpi_rcv_adv: u32, - #[cfg(any(freebsd15, freebsd14))] - pub tcpi_dupacks: u32, - #[cfg(freebsd14)] - pub __tcpi_pad: [u32; 10], - #[cfg(freebsd15)] - pub __tcpi_pad: [u32; 14], - #[cfg(not(any(freebsd15, freebsd14)))] - pub __tcpi_pad: [u32; 26], - } - - pub struct _umtx_time { - pub _timeout: crate::timespec, - pub _flags: u32, - pub _clockid: u32, - } - - pub struct shm_largepage_conf { - pub psind: c_int, - pub alloc_policy: c_int, - __pad: [c_int; 10], - } - - pub struct memory_type { - __priva: [crate::uintptr_t; 32], - __privb: [crate::uintptr_t; 26], - } - - pub struct memory_type_list { - __priv: [crate::uintptr_t; 2], - } - - pub struct pidfh { - __priva: [[crate::uintptr_t; 32]; 8], - __privb: [crate::uintptr_t; 2], - } - - pub struct sctp_event { - pub se_assoc_id: crate::sctp_assoc_t, - pub se_type: u16, - pub se_on: u8, - } - - pub struct sctp_event_subscribe { - pub sctp_data_io_event: u8, - pub sctp_association_event: u8, - pub sctp_address_event: u8, - pub sctp_send_failure_event: u8, - pub sctp_peer_error_event: u8, - pub sctp_shutdown_event: u8, - pub sctp_partial_delivery_event: u8, - pub sctp_adaptation_layer_event: u8, - pub sctp_authentication_event: u8, - pub sctp_sender_dry_event: u8, - pub sctp_stream_reset_event: u8, - } - - pub struct sctp_initmsg { - pub sinit_num_ostreams: u16, - pub sinit_max_instreams: u16, - pub sinit_max_attempts: u16, - pub sinit_max_init_timeo: u16, - } - - pub struct sctp_sndrcvinfo { - pub sinfo_stream: u16, - pub sinfo_ssn: u16, - pub sinfo_flags: u16, - pub sinfo_ppid: u32, - pub sinfo_context: u32, - pub sinfo_timetolive: u32, - pub sinfo_tsn: u32, - pub sinfo_cumtsn: u32, - pub sinfo_assoc_id: crate::sctp_assoc_t, - pub sinfo_keynumber: u16, - pub sinfo_keynumber_valid: u16, - pub __reserve_pad: [[u8; 23]; 4], - } - - pub struct sctp_extrcvinfo { - pub sinfo_stream: u16, - pub sinfo_ssn: u16, - pub sinfo_flags: u16, - pub sinfo_ppid: u32, - pub sinfo_context: u32, - pub sinfo_timetolive: u32, - pub sinfo_tsn: u32, - pub sinfo_cumtsn: u32, - pub sinfo_assoc_id: crate::sctp_assoc_t, - pub serinfo_next_flags: u16, - pub serinfo_next_stream: u16, - pub serinfo_next_aid: u32, - pub serinfo_next_length: u32, - pub serinfo_next_ppid: u32, - pub sinfo_keynumber: u16, - pub sinfo_keynumber_valid: u16, - pub __reserve_pad: [[u8; 19]; 4], - } - - pub struct sctp_sndinfo { - pub snd_sid: u16, - pub snd_flags: u16, - pub snd_ppid: u32, - pub snd_context: u32, - pub snd_assoc_id: crate::sctp_assoc_t, - } - - pub struct sctp_prinfo { - pub pr_policy: u16, - pub pr_value: u32, - } - - pub struct sctp_default_prinfo { - pub pr_policy: u16, - pub pr_value: u32, - pub pr_assoc_id: crate::sctp_assoc_t, - } - - pub struct sctp_authinfo { - pub auth_keynumber: u16, - } - - pub struct sctp_rcvinfo { - pub rcv_sid: u16, - pub rcv_ssn: u16, - pub rcv_flags: u16, - pub rcv_ppid: u32, - pub rcv_tsn: u32, - pub rcv_cumtsn: u32, - pub rcv_context: u32, - pub rcv_assoc_id: crate::sctp_assoc_t, - } - - pub struct sctp_nxtinfo { - pub nxt_sid: u16, - pub nxt_flags: u16, - pub nxt_ppid: u32, - pub nxt_length: u32, - pub nxt_assoc_id: crate::sctp_assoc_t, - } - - pub struct sctp_recvv_rn { - pub recvv_rcvinfo: sctp_rcvinfo, - pub recvv_nxtinfo: sctp_nxtinfo, - } - - pub struct sctp_sendv_spa { - pub sendv_flags: u32, - pub sendv_sndinfo: sctp_sndinfo, - pub sendv_prinfo: sctp_prinfo, - pub sendv_authinfo: sctp_authinfo, - } - - pub struct sctp_snd_all_completes { - pub sall_stream: u16, - pub sall_flags: u16, - pub sall_ppid: u32, - pub sall_context: u32, - pub sall_num_sent: u32, - pub sall_num_failed: u32, - } - - pub struct sctp_pcbinfo { - pub ep_count: u32, - pub asoc_count: u32, - pub laddr_count: u32, - pub raddr_count: u32, - pub chk_count: u32, - pub readq_count: u32, - pub free_chunks: u32, - pub stream_oque: u32, - } - - pub struct sctp_sockstat { - pub ss_assoc_id: crate::sctp_assoc_t, - pub ss_total_sndbuf: u32, - pub ss_total_recv_buf: u32, - } - - pub struct sctp_assoc_change { - pub sac_type: u16, - pub sac_flags: u16, - pub sac_length: u32, - pub sac_state: u16, - pub sac_error: u16, - pub sac_outbound_streams: u16, - pub sac_inbound_streams: u16, - pub sac_assoc_id: crate::sctp_assoc_t, - pub sac_info: [u8; 0], - } - - pub struct sctp_paddr_change { - pub spc_type: u16, - pub spc_flags: u16, - pub spc_length: u32, - pub spc_aaddr: crate::sockaddr_storage, - pub spc_state: u32, - pub spc_error: u32, - pub spc_assoc_id: crate::sctp_assoc_t, - } - - pub struct sctp_remote_error { - pub sre_type: u16, - pub sre_flags: u16, - pub sre_length: u32, - pub sre_error: u16, - pub sre_assoc_id: crate::sctp_assoc_t, - pub sre_data: [u8; 0], - } - - pub struct sctp_send_failed_event { - pub ssfe_type: u16, - pub ssfe_flags: u16, - pub ssfe_length: u32, - pub ssfe_error: u32, - pub ssfe_info: sctp_sndinfo, - pub ssfe_assoc_id: crate::sctp_assoc_t, - pub ssfe_data: [u8; 0], - } - - pub struct sctp_shutdown_event { - pub sse_type: u16, - pub sse_flags: u16, - pub sse_length: u32, - pub sse_assoc_id: crate::sctp_assoc_t, - } - - pub struct sctp_adaptation_event { - pub sai_type: u16, - pub sai_flags: u16, - pub sai_length: u32, - pub sai_adaptation_ind: u32, - pub sai_assoc_id: crate::sctp_assoc_t, - } - - pub struct sctp_setadaptation { - pub ssb_adaptation_ind: u32, - } - - pub struct sctp_pdapi_event { - pub pdapi_type: u16, - pub pdapi_flags: u16, - pub pdapi_length: u32, - pub pdapi_indication: u32, - pub pdapi_stream: u16, - pub pdapi_seq: u16, - pub pdapi_assoc_id: crate::sctp_assoc_t, - } - - pub struct sctp_sender_dry_event { - pub sender_dry_type: u16, - pub sender_dry_flags: u16, - pub sender_dry_length: u32, - pub sender_dry_assoc_id: crate::sctp_assoc_t, - } - - pub struct sctp_stream_reset_event { - pub strreset_type: u16, - pub strreset_flags: u16, - pub strreset_length: u32, - pub strreset_assoc_id: crate::sctp_assoc_t, - pub strreset_stream_list: [u16; 0], - } - - pub struct sctp_stream_change_event { - pub strchange_type: u16, - pub strchange_flags: u16, - pub strchange_length: u32, - pub strchange_assoc_id: crate::sctp_assoc_t, - pub strchange_instrms: u16, - pub strchange_outstrms: u16, - } - - pub struct filedesc { - pub fd_files: *mut fdescenttbl, - pub fd_map: *mut c_ulong, - pub fd_freefile: c_int, - pub fd_refcnt: c_int, - pub fd_holdcnt: c_int, - fd_sx: sx, - fd_kqlist: kqlist, - pub fd_holdleaderscount: c_int, - pub fd_holdleaderswakeup: c_int, - } - - pub struct fdescenttbl { - pub fdt_nfiles: c_int, - fdt_ofiles: [*mut c_void; 0], - } - - // FIXME: Should be private. - #[doc(hidden)] - pub struct sx { - lock_object: lock_object, - sx_lock: crate::uintptr_t, - } - - // FIXME: Should be private. - #[doc(hidden)] - pub struct lock_object { - lo_name: *const c_char, - lo_flags: c_uint, - lo_data: c_uint, - // This is normally `struct witness`. - lo_witness: *mut c_void, - } - - // FIXME: Should be private. - #[doc(hidden)] - pub struct kqlist { - tqh_first: *mut c_void, - tqh_last: *mut *mut c_void, - } - - pub struct splice { - pub sp_fd: c_int, - pub sp_max: off_t, - pub sp_idle: crate::timeval, - } -} - -s_no_extra_traits! { - pub struct utmpx { - pub ut_type: c_short, - pub ut_tv: crate::timeval, - pub ut_id: [c_char; 8], - pub ut_pid: crate::pid_t, - pub ut_user: [c_char; 32], - pub ut_line: [c_char; 16], - pub ut_host: [c_char; 128], - pub __ut_spare: [c_char; 64], - } - - pub union __c_anonymous_cr_pid { - __cr_unused: *mut c_void, - pub cr_pid: crate::pid_t, - } - - pub struct xucred { - pub cr_version: c_uint, - pub cr_uid: crate::uid_t, - pub cr_ngroups: c_short, - pub cr_groups: [crate::gid_t; 16], - pub cr_pid__c_anonymous_union: __c_anonymous_cr_pid, - } - - pub struct sockaddr_dl { - pub sdl_len: c_uchar, - pub sdl_family: c_uchar, - pub sdl_index: c_ushort, - pub sdl_type: c_uchar, - pub sdl_nlen: c_uchar, - pub sdl_alen: c_uchar, - pub sdl_slen: c_uchar, - pub sdl_data: [c_char; 46], - } - - pub struct mq_attr { - pub mq_flags: c_long, - pub mq_maxmsg: c_long, - pub mq_msgsize: c_long, - pub mq_curmsgs: c_long, - __reserved: [c_long; 4], - } - - pub struct sigevent { - pub sigev_notify: c_int, - pub sigev_signo: c_int, - pub sigev_value: crate::sigval, - //The rest of the structure is actually a union. We expose only - //sigev_notify_thread_id because it's the most useful union member. - pub sigev_notify_thread_id: crate::lwpid_t, - #[cfg(target_pointer_width = "64")] - __unused1: c_int, - __unused2: [c_long; 7], - } - - pub struct ptsstat { - #[cfg(any(freebsd12, freebsd13, freebsd14, freebsd15))] - pub dev: u64, - #[cfg(not(any(freebsd12, freebsd13, freebsd14, freebsd15)))] - pub dev: u32, - pub devname: [c_char; SPECNAMELEN as usize + 1], - } - - pub union __c_anonymous_elf32_auxv_union { - pub a_val: c_int, - } - - pub struct Elf32_Auxinfo { - pub a_type: c_int, - pub a_un: __c_anonymous_elf32_auxv_union, - } - - pub union __c_anonymous_ifi_epoch { - pub tt: crate::time_t, - pub ph: u64, - } - - pub union __c_anonymous_ifi_lastchange { - pub tv: crate::timeval, - pub ph: __c_anonymous_ph, - } - - pub struct if_data { - /// ethernet, tokenring, etc - pub ifi_type: u8, - /// e.g., AUI, Thinnet, 10base-T, etc - pub ifi_physical: u8, - /// media address length - pub ifi_addrlen: u8, - /// media header length - pub ifi_hdrlen: u8, - /// current link state - pub ifi_link_state: u8, - /// carp vhid - pub ifi_vhid: u8, - /// length of this data struct - pub ifi_datalen: u16, - /// maximum transmission unit - pub ifi_mtu: u32, - /// routing metric (external only) - pub ifi_metric: u32, - /// linespeed - pub ifi_baudrate: u64, - /// packets received on interface - pub ifi_ipackets: u64, - /// input errors on interface - pub ifi_ierrors: u64, - /// packets sent on interface - pub ifi_opackets: u64, - /// output errors on interface - pub ifi_oerrors: u64, - /// collisions on csma interfaces - pub ifi_collisions: u64, - /// total number of octets received - pub ifi_ibytes: u64, - /// total number of octets sent - pub ifi_obytes: u64, - /// packets received via multicast - pub ifi_imcasts: u64, - /// packets sent via multicast - pub ifi_omcasts: u64, - /// dropped on input - pub ifi_iqdrops: u64, - /// dropped on output - pub ifi_oqdrops: u64, - /// destined for unsupported protocol - pub ifi_noproto: u64, - /// HW offload capabilities, see IFCAP - pub ifi_hwassist: u64, - /// uptime at attach or stat reset - pub __ifi_epoch: __c_anonymous_ifi_epoch, - /// time of last administrative change - pub __ifi_lastchange: __c_anonymous_ifi_lastchange, - } - - pub union __c_anonymous_ifr_ifru { - pub ifru_addr: crate::sockaddr, - pub ifru_dstaddr: crate::sockaddr, - pub ifru_broadaddr: crate::sockaddr, - pub ifru_buffer: ifreq_buffer, - pub ifru_flags: [c_short; 2], - pub ifru_index: c_short, - pub ifru_jid: c_int, - pub ifru_metric: c_int, - pub ifru_mtu: c_int, - pub ifru_phys: c_int, - pub ifru_media: c_int, - pub ifru_data: crate::caddr_t, - pub ifru_cap: [c_int; 2], - pub ifru_fib: c_uint, - pub ifru_vlan_pcp: c_uchar, - } - - pub struct ifreq { - /// if name, e.g. "en0" - pub ifr_name: [c_char; crate::IFNAMSIZ], - pub ifr_ifru: __c_anonymous_ifr_ifru, - } - - pub union __c_anonymous_ifc_ifcu { - pub ifcu_buf: crate::caddr_t, - pub ifcu_req: *mut ifreq, - } - - pub struct ifstat { - /// if name, e.g. "en0" - pub ifs_name: [c_char; crate::IFNAMSIZ as usize], - pub ascii: [c_char; crate::IFSTATMAX as usize + 1], - } - - pub struct ifrsskey { - /// if name, e.g. "en0" - pub ifrk_name: [c_char; crate::IFNAMSIZ as usize], - /// RSS_FUNC_ - pub ifrk_func: u8, - pub ifrk_spare0: u8, - pub ifrk_keylen: u16, - pub ifrk_key: [u8; crate::RSS_KEYLEN as usize], - } - - pub struct ifdownreason { - pub ifdr_name: [c_char; crate::IFNAMSIZ as usize], - pub ifdr_reason: u32, - pub ifdr_vendor: u32, - pub ifdr_msg: [c_char; crate::IFDR_MSG_SIZE as usize], - } - - #[repr(packed)] - pub struct sctphdr { - pub src_port: u16, - pub dest_port: u16, - pub v_tag: u32, - pub checksum: u32, - } - - #[repr(packed)] - pub struct sctp_chunkhdr { - pub chunk_type: u8, - pub chunk_flags: u8, - pub chunk_length: u16, - } - - #[repr(packed)] - pub struct sctp_paramhdr { - pub param_type: u16, - pub param_length: u16, - } - - #[repr(packed)] - pub struct sctp_gen_error_cause { - pub code: u16, - pub length: u16, - pub info: [u8; 0], - } - - #[repr(packed)] - pub struct sctp_error_cause { - pub code: u16, - pub length: u16, - } - - #[repr(packed)] - pub struct sctp_error_invalid_stream { - pub cause: sctp_error_cause, - pub stream_id: u16, - __reserved: u16, - } - - #[repr(packed)] - pub struct sctp_error_missing_param { - pub cause: sctp_error_cause, - pub num_missing_params: u32, - pub tpe: [u8; 0], - } - - #[repr(packed)] - pub struct sctp_error_stale_cookie { - pub cause: sctp_error_cause, - pub stale_time: u32, - } - - #[repr(packed)] - pub struct sctp_error_out_of_resource { - pub cause: sctp_error_cause, - } - - #[repr(packed)] - pub struct sctp_error_unresolv_addr { - pub cause: sctp_error_cause, - } - - #[repr(packed)] - pub struct sctp_error_unrecognized_chunk { - pub cause: sctp_error_cause, - pub ch: sctp_chunkhdr, - } - - #[repr(packed)] - pub struct sctp_error_no_user_data { - pub cause: sctp_error_cause, - pub tsn: u32, - } - - #[repr(packed)] - pub struct sctp_error_auth_invalid_hmac { - pub cause: sctp_error_cause, - pub hmac_id: u16, - } - - pub struct kinfo_file { - pub kf_structsize: c_int, - pub kf_type: c_int, - pub kf_fd: c_int, - pub kf_ref_count: c_int, - pub kf_flags: c_int, - _kf_pad0: c_int, - pub kf_offset: i64, - _priv: [u8; 304], // FIXME(freebsd): this is really a giant union - pub kf_status: u16, - _kf_pad1: u16, - _kf_ispare0: c_int, - pub kf_cap_rights: crate::cap_rights_t, - _kf_cap_spare: u64, - pub kf_path: [c_char; crate::PATH_MAX as usize], - } - - pub struct ucontext_t { - pub uc_sigmask: crate::sigset_t, - pub uc_mcontext: crate::mcontext_t, - pub uc_link: *mut crate::ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_flags: c_int, - __spare__: [c_int; 4], - } - - #[repr(align(8))] - pub struct xinpgen { - pub xig_len: ksize_t, - pub xig_count: u32, - _xig_spare32: u32, - pub xig_gen: inp_gen_t, - pub xig_sogen: so_gen_t, - _xig_spare64: [u64; 4], - } - - pub struct in_addr_4in6 { - _ia46_pad32: [u32; 3], - pub ia46_addr4: crate::in_addr, - } - - pub union in_dependaddr { - pub id46_addr: crate::in_addr_4in6, - pub id6_addr: crate::in6_addr, - } - - pub struct in_endpoints { - pub ie_fport: u16, - pub ie_lport: u16, - pub ie_dependfaddr: crate::in_dependaddr, - pub ie_dependladdr: crate::in_dependaddr, - pub ie6_zoneid: u32, - } - - pub struct in_conninfo { - pub inc_flags: u8, - pub inc_len: u8, - pub inc_fibnum: u16, - pub inc_ie: crate::in_endpoints, - } - - pub struct xktls_session_onedir { - // Note: this field is called `gen` in upstream FreeBSD, but `gen` is - // reserved keyword in Rust since the 2024 Edition, hence `gennum`. - pub gennum: u64, - _rsrv1: [u64; 8], - _rsrv2: [u32; 8], - pub iv: [u8; 32], - pub cipher_algorithm: i32, - pub auth_algorithm: i32, - pub cipher_key_len: u16, - pub iv_len: u16, - pub auth_key_len: u16, - pub max_frame_len: u16, - pub tls_vmajor: u8, - pub tls_vminor: u8, - pub tls_hlen: u8, - pub tls_tlen: u8, - pub tls_bs: u8, - pub flags: u8, - pub drv_st_len: u16, - pub ifnet: [c_char; 16], - } - - pub struct xktls_session { - pub tsz: u32, - pub fsz: u32, - pub inp_gencnt: u64, - pub so_pcb: kvaddr_t, - pub coninf: crate::in_conninfo, - pub rx_vlan_id: c_ushort, - pub rcv: crate::xktls_session_onedir, - pub snd: crate::xktls_session_onedir, - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for utmpx { - fn eq(&self, other: &utmpx) -> bool { - self.ut_type == other.ut_type - && self.ut_tv == other.ut_tv - && self.ut_id == other.ut_id - && self.ut_pid == other.ut_pid - && self.ut_user == other.ut_user - && self.ut_line == other.ut_line - && self - .ut_host - .iter() - .zip(other.ut_host.iter()) - .all(|(a, b)| a == b) - && self - .__ut_spare - .iter() - .zip(other.__ut_spare.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for utmpx {} - impl hash::Hash for utmpx { - fn hash(&self, state: &mut H) { - self.ut_type.hash(state); - self.ut_tv.hash(state); - self.ut_id.hash(state); - self.ut_pid.hash(state); - self.ut_user.hash(state); - self.ut_line.hash(state); - self.ut_host.hash(state); - self.__ut_spare.hash(state); - } - } - - impl PartialEq for __c_anonymous_cr_pid { - fn eq(&self, other: &__c_anonymous_cr_pid) -> bool { - unsafe { self.cr_pid == other.cr_pid } - } - } - impl Eq for __c_anonymous_cr_pid {} - impl hash::Hash for __c_anonymous_cr_pid { - fn hash(&self, state: &mut H) { - unsafe { self.cr_pid.hash(state) }; - } - } - - impl PartialEq for xucred { - fn eq(&self, other: &xucred) -> bool { - self.cr_version == other.cr_version - && self.cr_uid == other.cr_uid - && self.cr_ngroups == other.cr_ngroups - && self.cr_groups == other.cr_groups - && self.cr_pid__c_anonymous_union == other.cr_pid__c_anonymous_union - } - } - impl Eq for xucred {} - impl hash::Hash for xucred { - fn hash(&self, state: &mut H) { - self.cr_version.hash(state); - self.cr_uid.hash(state); - self.cr_ngroups.hash(state); - self.cr_groups.hash(state); - self.cr_pid__c_anonymous_union.hash(state); - } - } - - impl PartialEq for sockaddr_dl { - fn eq(&self, other: &sockaddr_dl) -> bool { - self.sdl_len == other.sdl_len - && self.sdl_family == other.sdl_family - && self.sdl_index == other.sdl_index - && self.sdl_type == other.sdl_type - && self.sdl_nlen == other.sdl_nlen - && self.sdl_alen == other.sdl_alen - && self.sdl_slen == other.sdl_slen - && self - .sdl_data - .iter() - .zip(other.sdl_data.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for sockaddr_dl {} - impl hash::Hash for sockaddr_dl { - fn hash(&self, state: &mut H) { - self.sdl_len.hash(state); - self.sdl_family.hash(state); - self.sdl_index.hash(state); - self.sdl_type.hash(state); - self.sdl_nlen.hash(state); - self.sdl_alen.hash(state); - self.sdl_slen.hash(state); - self.sdl_data.hash(state); - } - } - - impl PartialEq for mq_attr { - fn eq(&self, other: &mq_attr) -> bool { - self.mq_flags == other.mq_flags - && self.mq_maxmsg == other.mq_maxmsg - && self.mq_msgsize == other.mq_msgsize - && self.mq_curmsgs == other.mq_curmsgs - } - } - impl Eq for mq_attr {} - impl hash::Hash for mq_attr { - fn hash(&self, state: &mut H) { - self.mq_flags.hash(state); - self.mq_maxmsg.hash(state); - self.mq_msgsize.hash(state); - self.mq_curmsgs.hash(state); - } - } - - impl PartialEq for sigevent { - fn eq(&self, other: &sigevent) -> bool { - self.sigev_notify == other.sigev_notify - && self.sigev_signo == other.sigev_signo - && self.sigev_value == other.sigev_value - && self.sigev_notify_thread_id == other.sigev_notify_thread_id - } - } - impl Eq for sigevent {} - impl hash::Hash for sigevent { - fn hash(&self, state: &mut H) { - self.sigev_notify.hash(state); - self.sigev_signo.hash(state); - self.sigev_value.hash(state); - self.sigev_notify_thread_id.hash(state); - } - } - - impl PartialEq for ptsstat { - fn eq(&self, other: &ptsstat) -> bool { - let self_devname: &[c_char] = &self.devname; - let other_devname: &[c_char] = &other.devname; - - self.dev == other.dev && self_devname == other_devname - } - } - impl Eq for ptsstat {} - impl hash::Hash for ptsstat { - fn hash(&self, state: &mut H) { - let self_devname: &[c_char] = &self.devname; - - self.dev.hash(state); - self_devname.hash(state); - } - } - - impl PartialEq for __c_anonymous_elf32_auxv_union { - fn eq(&self, other: &__c_anonymous_elf32_auxv_union) -> bool { - unsafe { self.a_val == other.a_val } - } - } - impl Eq for __c_anonymous_elf32_auxv_union {} - impl PartialEq for Elf32_Auxinfo { - fn eq(&self, other: &Elf32_Auxinfo) -> bool { - self.a_type == other.a_type && self.a_un == other.a_un - } - } - impl Eq for Elf32_Auxinfo {} - - impl PartialEq for __c_anonymous_ifr_ifru { - fn eq(&self, other: &__c_anonymous_ifr_ifru) -> bool { - unsafe { - self.ifru_addr == other.ifru_addr - && self.ifru_dstaddr == other.ifru_dstaddr - && self.ifru_broadaddr == other.ifru_broadaddr - && self.ifru_buffer == other.ifru_buffer - && self.ifru_flags == other.ifru_flags - && self.ifru_index == other.ifru_index - && self.ifru_jid == other.ifru_jid - && self.ifru_metric == other.ifru_metric - && self.ifru_mtu == other.ifru_mtu - && self.ifru_phys == other.ifru_phys - && self.ifru_media == other.ifru_media - && self.ifru_data == other.ifru_data - && self.ifru_cap == other.ifru_cap - && self.ifru_fib == other.ifru_fib - && self.ifru_vlan_pcp == other.ifru_vlan_pcp - } - } - } - impl Eq for __c_anonymous_ifr_ifru {} - impl hash::Hash for __c_anonymous_ifr_ifru { - fn hash(&self, state: &mut H) { - unsafe { self.ifru_addr.hash(state) }; - unsafe { self.ifru_dstaddr.hash(state) }; - unsafe { self.ifru_broadaddr.hash(state) }; - unsafe { self.ifru_buffer.hash(state) }; - unsafe { self.ifru_flags.hash(state) }; - unsafe { self.ifru_index.hash(state) }; - unsafe { self.ifru_jid.hash(state) }; - unsafe { self.ifru_metric.hash(state) }; - unsafe { self.ifru_mtu.hash(state) }; - unsafe { self.ifru_phys.hash(state) }; - unsafe { self.ifru_media.hash(state) }; - unsafe { self.ifru_data.hash(state) }; - unsafe { self.ifru_cap.hash(state) }; - unsafe { self.ifru_fib.hash(state) }; - unsafe { self.ifru_vlan_pcp.hash(state) }; - } - } - - impl PartialEq for ifreq { - fn eq(&self, other: &ifreq) -> bool { - self.ifr_name == other.ifr_name && self.ifr_ifru == other.ifr_ifru - } - } - impl Eq for ifreq {} - impl hash::Hash for ifreq { - fn hash(&self, state: &mut H) { - self.ifr_name.hash(state); - self.ifr_ifru.hash(state); - } - } - - impl Eq for __c_anonymous_ifc_ifcu {} - - impl PartialEq for __c_anonymous_ifc_ifcu { - fn eq(&self, other: &__c_anonymous_ifc_ifcu) -> bool { - unsafe { self.ifcu_buf == other.ifcu_buf && self.ifcu_req == other.ifcu_req } - } - } - - impl hash::Hash for __c_anonymous_ifc_ifcu { - fn hash(&self, state: &mut H) { - unsafe { self.ifcu_buf.hash(state) }; - unsafe { self.ifcu_req.hash(state) }; - } - } - - impl PartialEq for ifstat { - fn eq(&self, other: &ifstat) -> bool { - let self_ascii: &[c_char] = &self.ascii; - let other_ascii: &[c_char] = &other.ascii; - - self.ifs_name == other.ifs_name && self_ascii == other_ascii - } - } - impl Eq for ifstat {} - impl hash::Hash for ifstat { - fn hash(&self, state: &mut H) { - self.ifs_name.hash(state); - self.ascii.hash(state); - } - } - - impl PartialEq for ifrsskey { - fn eq(&self, other: &ifrsskey) -> bool { - let self_ifrk_key: &[u8] = &self.ifrk_key; - let other_ifrk_key: &[u8] = &other.ifrk_key; - - self.ifrk_name == other.ifrk_name - && self.ifrk_func == other.ifrk_func - && self.ifrk_spare0 == other.ifrk_spare0 - && self.ifrk_keylen == other.ifrk_keylen - && self_ifrk_key == other_ifrk_key - } - } - impl Eq for ifrsskey {} - impl hash::Hash for ifrsskey { - fn hash(&self, state: &mut H) { - self.ifrk_name.hash(state); - self.ifrk_func.hash(state); - self.ifrk_spare0.hash(state); - self.ifrk_keylen.hash(state); - self.ifrk_key.hash(state); - } - } - - impl PartialEq for ifdownreason { - fn eq(&self, other: &ifdownreason) -> bool { - let self_ifdr_msg: &[c_char] = &self.ifdr_msg; - let other_ifdr_msg: &[c_char] = &other.ifdr_msg; - - self.ifdr_name == other.ifdr_name - && self.ifdr_reason == other.ifdr_reason - && self.ifdr_vendor == other.ifdr_vendor - && self_ifdr_msg == other_ifdr_msg - } - } - impl Eq for ifdownreason {} - impl hash::Hash for ifdownreason { - fn hash(&self, state: &mut H) { - self.ifdr_name.hash(state); - self.ifdr_reason.hash(state); - self.ifdr_vendor.hash(state); - self.ifdr_msg.hash(state); - } - } - - impl PartialEq for __c_anonymous_ifi_epoch { - fn eq(&self, other: &__c_anonymous_ifi_epoch) -> bool { - unsafe { self.tt == other.tt && self.ph == other.ph } - } - } - impl Eq for __c_anonymous_ifi_epoch {} - impl hash::Hash for __c_anonymous_ifi_epoch { - fn hash(&self, state: &mut H) { - unsafe { - self.tt.hash(state); - self.ph.hash(state); - } - } - } - - impl PartialEq for __c_anonymous_ifi_lastchange { - fn eq(&self, other: &__c_anonymous_ifi_lastchange) -> bool { - unsafe { self.tv == other.tv && self.ph == other.ph } - } - } - impl Eq for __c_anonymous_ifi_lastchange {} - impl hash::Hash for __c_anonymous_ifi_lastchange { - fn hash(&self, state: &mut H) { - unsafe { - self.tv.hash(state); - self.ph.hash(state); - } - } - } - - impl PartialEq for if_data { - fn eq(&self, other: &if_data) -> bool { - self.ifi_type == other.ifi_type - && self.ifi_physical == other.ifi_physical - && self.ifi_addrlen == other.ifi_addrlen - && self.ifi_hdrlen == other.ifi_hdrlen - && self.ifi_link_state == other.ifi_link_state - && self.ifi_vhid == other.ifi_vhid - && self.ifi_datalen == other.ifi_datalen - && self.ifi_mtu == other.ifi_mtu - && self.ifi_metric == other.ifi_metric - && self.ifi_baudrate == other.ifi_baudrate - && self.ifi_ipackets == other.ifi_ipackets - && self.ifi_ierrors == other.ifi_ierrors - && self.ifi_opackets == other.ifi_opackets - && self.ifi_oerrors == other.ifi_oerrors - && self.ifi_collisions == other.ifi_collisions - && self.ifi_ibytes == other.ifi_ibytes - && self.ifi_obytes == other.ifi_obytes - && self.ifi_imcasts == other.ifi_imcasts - && self.ifi_omcasts == other.ifi_omcasts - && self.ifi_iqdrops == other.ifi_iqdrops - && self.ifi_oqdrops == other.ifi_oqdrops - && self.ifi_noproto == other.ifi_noproto - && self.ifi_hwassist == other.ifi_hwassist - && self.__ifi_epoch == other.__ifi_epoch - && self.__ifi_lastchange == other.__ifi_lastchange - } - } - impl Eq for if_data {} - impl hash::Hash for if_data { - fn hash(&self, state: &mut H) { - self.ifi_type.hash(state); - self.ifi_physical.hash(state); - self.ifi_addrlen.hash(state); - self.ifi_hdrlen.hash(state); - self.ifi_link_state.hash(state); - self.ifi_vhid.hash(state); - self.ifi_datalen.hash(state); - self.ifi_mtu.hash(state); - self.ifi_metric.hash(state); - self.ifi_baudrate.hash(state); - self.ifi_ipackets.hash(state); - self.ifi_ierrors.hash(state); - self.ifi_opackets.hash(state); - self.ifi_oerrors.hash(state); - self.ifi_collisions.hash(state); - self.ifi_ibytes.hash(state); - self.ifi_obytes.hash(state); - self.ifi_imcasts.hash(state); - self.ifi_omcasts.hash(state); - self.ifi_iqdrops.hash(state); - self.ifi_oqdrops.hash(state); - self.ifi_noproto.hash(state); - self.ifi_hwassist.hash(state); - self.__ifi_epoch.hash(state); - self.__ifi_lastchange.hash(state); - } - } - - impl PartialEq for sctphdr { - fn eq(&self, other: &sctphdr) -> bool { - return { self.src_port } == { other.src_port } - && { self.dest_port } == { other.dest_port } - && { self.v_tag } == { other.v_tag } - && { self.checksum } == { other.checksum }; - } - } - impl Eq for sctphdr {} - impl hash::Hash for sctphdr { - fn hash(&self, state: &mut H) { - { self.src_port }.hash(state); - { self.dest_port }.hash(state); - { self.v_tag }.hash(state); - { self.checksum }.hash(state); - } - } - - impl PartialEq for sctp_chunkhdr { - fn eq(&self, other: &sctp_chunkhdr) -> bool { - return { self.chunk_type } == { other.chunk_type } - && { self.chunk_flags } == { other.chunk_flags } - && { self.chunk_length } == { other.chunk_length }; - } - } - impl Eq for sctp_chunkhdr {} - impl hash::Hash for sctp_chunkhdr { - fn hash(&self, state: &mut H) { - { self.chunk_type }.hash(state); - { self.chunk_flags }.hash(state); - { self.chunk_length }.hash(state); - } - } - - impl PartialEq for sctp_paramhdr { - fn eq(&self, other: &sctp_paramhdr) -> bool { - return { self.param_type } == { other.param_type } && { self.param_length } == { - other.param_length - }; - } - } - impl Eq for sctp_paramhdr {} - impl hash::Hash for sctp_paramhdr { - fn hash(&self, state: &mut H) { - { self.param_type }.hash(state); - { self.param_length }.hash(state); - } - } - - impl PartialEq for sctp_gen_error_cause { - fn eq(&self, other: &sctp_gen_error_cause) -> bool { - return { self.code } == { other.code } && { self.length } == { other.length } && { - self.info - } - .iter() - .zip({ other.info }.iter()) - .all(|(a, b)| a == b); - } - } - impl Eq for sctp_gen_error_cause {} - impl hash::Hash for sctp_gen_error_cause { - fn hash(&self, state: &mut H) { - { self.code }.hash(state); - { self.length }.hash(state); - { self.info }.hash(state); - } - } - - impl PartialEq for sctp_error_cause { - fn eq(&self, other: &sctp_error_cause) -> bool { - return { self.code } == { other.code } && { self.length } == { other.length }; - } - } - impl Eq for sctp_error_cause {} - impl hash::Hash for sctp_error_cause { - fn hash(&self, state: &mut H) { - { self.code }.hash(state); - { self.length }.hash(state); - } - } - - impl PartialEq for sctp_error_invalid_stream { - fn eq(&self, other: &sctp_error_invalid_stream) -> bool { - return { self.cause } == { other.cause } && { self.stream_id } == { - other.stream_id - }; - } - } - impl Eq for sctp_error_invalid_stream {} - impl hash::Hash for sctp_error_invalid_stream { - fn hash(&self, state: &mut H) { - { self.cause }.hash(state); - { self.stream_id }.hash(state); - } - } - - impl PartialEq for sctp_error_missing_param { - fn eq(&self, other: &sctp_error_missing_param) -> bool { - return { self.cause } == { other.cause } - && { self.num_missing_params } == { other.num_missing_params } - && { self.tpe } - .iter() - .zip({ other.tpe }.iter()) - .all(|(a, b)| a == b); - } - } - impl Eq for sctp_error_missing_param {} - impl hash::Hash for sctp_error_missing_param { - fn hash(&self, state: &mut H) { - { self.cause }.hash(state); - { self.num_missing_params }.hash(state); - { self.tpe }.hash(state); - } - } - - impl PartialEq for sctp_error_stale_cookie { - fn eq(&self, other: &sctp_error_stale_cookie) -> bool { - return { self.cause } == { other.cause } && { self.stale_time } == { - other.stale_time - }; - } - } - impl Eq for sctp_error_stale_cookie {} - impl hash::Hash for sctp_error_stale_cookie { - fn hash(&self, state: &mut H) { - { self.cause }.hash(state); - { self.stale_time }.hash(state); - } - } - - impl PartialEq for sctp_error_out_of_resource { - fn eq(&self, other: &sctp_error_out_of_resource) -> bool { - return { self.cause } == { other.cause }; - } - } - impl Eq for sctp_error_out_of_resource {} - impl hash::Hash for sctp_error_out_of_resource { - fn hash(&self, state: &mut H) { - { self.cause }.hash(state); - } - } - - impl PartialEq for sctp_error_unresolv_addr { - fn eq(&self, other: &sctp_error_unresolv_addr) -> bool { - return { self.cause } == { other.cause }; - } - } - impl Eq for sctp_error_unresolv_addr {} - impl hash::Hash for sctp_error_unresolv_addr { - fn hash(&self, state: &mut H) { - { self.cause }.hash(state); - } - } - - impl PartialEq for sctp_error_unrecognized_chunk { - fn eq(&self, other: &sctp_error_unrecognized_chunk) -> bool { - return { self.cause } == { other.cause } && { self.ch } == { other.ch }; - } - } - impl Eq for sctp_error_unrecognized_chunk {} - impl hash::Hash for sctp_error_unrecognized_chunk { - fn hash(&self, state: &mut H) { - { self.cause }.hash(state); - { self.ch }.hash(state); - } - } - - impl PartialEq for sctp_error_no_user_data { - fn eq(&self, other: &sctp_error_no_user_data) -> bool { - return { self.cause } == { other.cause } && { self.tsn } == { other.tsn }; - } - } - impl Eq for sctp_error_no_user_data {} - impl hash::Hash for sctp_error_no_user_data { - fn hash(&self, state: &mut H) { - { self.cause }.hash(state); - { self.tsn }.hash(state); - } - } - - impl PartialEq for sctp_error_auth_invalid_hmac { - fn eq(&self, other: &sctp_error_auth_invalid_hmac) -> bool { - return { self.cause } == { other.cause } && { self.hmac_id } == { other.hmac_id }; - } - } - impl Eq for sctp_error_auth_invalid_hmac {} - impl hash::Hash for sctp_error_auth_invalid_hmac { - fn hash(&self, state: &mut H) { - { self.cause }.hash(state); - { self.hmac_id }.hash(state); - } - } - - impl PartialEq for kinfo_file { - fn eq(&self, other: &kinfo_file) -> bool { - self.kf_structsize == other.kf_structsize - && self.kf_type == other.kf_type - && self.kf_fd == other.kf_fd - && self.kf_ref_count == other.kf_ref_count - && self.kf_flags == other.kf_flags - && self.kf_offset == other.kf_offset - && self.kf_status == other.kf_status - && self.kf_cap_rights == other.kf_cap_rights - && self - .kf_path - .iter() - .zip(other.kf_path.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for kinfo_file {} - impl hash::Hash for kinfo_file { - fn hash(&self, state: &mut H) { - self.kf_structsize.hash(state); - self.kf_type.hash(state); - self.kf_fd.hash(state); - self.kf_ref_count.hash(state); - self.kf_flags.hash(state); - self.kf_offset.hash(state); - self.kf_status.hash(state); - self.kf_cap_rights.hash(state); - self.kf_path.hash(state); - } - } - } -} - -#[derive(Debug)] -#[repr(u32)] -pub enum dot3Vendors { - dot3VendorAMD = 1, - dot3VendorIntel = 2, - dot3VendorNational = 4, - dot3VendorFujitsu = 5, - dot3VendorDigital = 6, - dot3VendorWesternDigital = 7, -} -impl Copy for dot3Vendors {} -impl Clone for dot3Vendors { - fn clone(&self) -> dot3Vendors { - *self - } -} - -// aio.h -pub const LIO_VECTORED: c_int = 4; -pub const LIO_WRITEV: c_int = 5; -pub const LIO_READV: c_int = 6; - -// sys/caprights.h -pub const CAP_RIGHTS_VERSION_00: i32 = 0; -pub const CAP_RIGHTS_VERSION: i32 = CAP_RIGHTS_VERSION_00; - -// sys/capsicum.h -macro_rules! cap_right { - ($idx:expr, $bit:expr) => { - ((1u64 << (57 + ($idx))) | ($bit)) - }; -} -pub const CAP_READ: u64 = cap_right!(0, 0x0000000000000001u64); -pub const CAP_WRITE: u64 = cap_right!(0, 0x0000000000000002u64); -pub const CAP_SEEK_TELL: u64 = cap_right!(0, 0x0000000000000004u64); -pub const CAP_SEEK: u64 = CAP_SEEK_TELL | 0x0000000000000008u64; -pub const CAP_PREAD: u64 = CAP_SEEK | CAP_READ; -pub const CAP_PWRITE: u64 = CAP_SEEK | CAP_WRITE; -pub const CAP_MMAP: u64 = cap_right!(0, 0x0000000000000010u64); -pub const CAP_MMAP_R: u64 = CAP_MMAP | CAP_SEEK | CAP_READ; -pub const CAP_MMAP_W: u64 = CAP_MMAP | CAP_SEEK | CAP_WRITE; -pub const CAP_MMAP_X: u64 = CAP_MMAP | CAP_SEEK | 0x0000000000000020u64; -pub const CAP_MMAP_RW: u64 = CAP_MMAP_R | CAP_MMAP_W; -pub const CAP_MMAP_RX: u64 = CAP_MMAP_R | CAP_MMAP_X; -pub const CAP_MMAP_WX: u64 = CAP_MMAP_W | CAP_MMAP_X; -pub const CAP_MMAP_RWX: u64 = CAP_MMAP_R | CAP_MMAP_W | CAP_MMAP_X; -pub const CAP_CREATE: u64 = cap_right!(0, 0x0000000000000040u64); -pub const CAP_FEXECVE: u64 = cap_right!(0, 0x0000000000000080u64); -pub const CAP_FSYNC: u64 = cap_right!(0, 0x0000000000000100u64); -pub const CAP_FTRUNCATE: u64 = cap_right!(0, 0x0000000000000200u64); -pub const CAP_LOOKUP: u64 = cap_right!(0, 0x0000000000000400u64); -pub const CAP_FCHDIR: u64 = cap_right!(0, 0x0000000000000800u64); -pub const CAP_FCHFLAGS: u64 = cap_right!(0, 0x0000000000001000u64); -pub const CAP_CHFLAGSAT: u64 = CAP_FCHFLAGS | CAP_LOOKUP; -pub const CAP_FCHMOD: u64 = cap_right!(0, 0x0000000000002000u64); -pub const CAP_FCHMODAT: u64 = CAP_FCHMOD | CAP_LOOKUP; -pub const CAP_FCHOWN: u64 = cap_right!(0, 0x0000000000004000u64); -pub const CAP_FCHOWNAT: u64 = CAP_FCHOWN | CAP_LOOKUP; -pub const CAP_FCNTL: u64 = cap_right!(0, 0x0000000000008000u64); -pub const CAP_FLOCK: u64 = cap_right!(0, 0x0000000000010000u64); -pub const CAP_FPATHCONF: u64 = cap_right!(0, 0x0000000000020000u64); -pub const CAP_FSCK: u64 = cap_right!(0, 0x0000000000040000u64); -pub const CAP_FSTAT: u64 = cap_right!(0, 0x0000000000080000u64); -pub const CAP_FSTATAT: u64 = CAP_FSTAT | CAP_LOOKUP; -pub const CAP_FSTATFS: u64 = cap_right!(0, 0x0000000000100000u64); -pub const CAP_FUTIMES: u64 = cap_right!(0, 0x0000000000200000u64); -pub const CAP_FUTIMESAT: u64 = CAP_FUTIMES | CAP_LOOKUP; -// Note: this was named CAP_LINKAT prior to FreeBSD 11.0. -pub const CAP_LINKAT_TARGET: u64 = CAP_LOOKUP | 0x0000000000400000u64; -pub const CAP_MKDIRAT: u64 = CAP_LOOKUP | 0x0000000000800000u64; -pub const CAP_MKFIFOAT: u64 = CAP_LOOKUP | 0x0000000001000000u64; -pub const CAP_MKNODAT: u64 = CAP_LOOKUP | 0x0000000002000000u64; -// Note: this was named CAP_RENAMEAT prior to FreeBSD 11.0. -pub const CAP_RENAMEAT_SOURCE: u64 = CAP_LOOKUP | 0x0000000004000000u64; -pub const CAP_SYMLINKAT: u64 = CAP_LOOKUP | 0x0000000008000000u64; -pub const CAP_UNLINKAT: u64 = CAP_LOOKUP | 0x0000000010000000u64; -pub const CAP_ACCEPT: u64 = cap_right!(0, 0x0000000020000000u64); -pub const CAP_BIND: u64 = cap_right!(0, 0x0000000040000000u64); -pub const CAP_CONNECT: u64 = cap_right!(0, 0x0000000080000000u64); -pub const CAP_GETPEERNAME: u64 = cap_right!(0, 0x0000000100000000u64); -pub const CAP_GETSOCKNAME: u64 = cap_right!(0, 0x0000000200000000u64); -pub const CAP_GETSOCKOPT: u64 = cap_right!(0, 0x0000000400000000u64); -pub const CAP_LISTEN: u64 = cap_right!(0, 0x0000000800000000u64); -pub const CAP_PEELOFF: u64 = cap_right!(0, 0x0000001000000000u64); -pub const CAP_RECV: u64 = CAP_READ; -pub const CAP_SEND: u64 = CAP_WRITE; -pub const CAP_SETSOCKOPT: u64 = cap_right!(0, 0x0000002000000000u64); -pub const CAP_SHUTDOWN: u64 = cap_right!(0, 0x0000004000000000u64); -pub const CAP_BINDAT: u64 = CAP_LOOKUP | 0x0000008000000000u64; -pub const CAP_CONNECTAT: u64 = CAP_LOOKUP | 0x0000010000000000u64; -pub const CAP_LINKAT_SOURCE: u64 = CAP_LOOKUP | 0x0000020000000000u64; -pub const CAP_RENAMEAT_TARGET: u64 = CAP_LOOKUP | 0x0000040000000000u64; -pub const CAP_SOCK_CLIENT: u64 = CAP_CONNECT - | CAP_GETPEERNAME - | CAP_GETSOCKNAME - | CAP_GETSOCKOPT - | CAP_PEELOFF - | CAP_RECV - | CAP_SEND - | CAP_SETSOCKOPT - | CAP_SHUTDOWN; -pub const CAP_SOCK_SERVER: u64 = CAP_ACCEPT - | CAP_BIND - | CAP_GETPEERNAME - | CAP_GETSOCKNAME - | CAP_GETSOCKOPT - | CAP_LISTEN - | CAP_PEELOFF - | CAP_RECV - | CAP_SEND - | CAP_SETSOCKOPT - | CAP_SHUTDOWN; -#[deprecated(since = "0.2.165", note = "Not stable across OS versions")] -pub const CAP_ALL0: u64 = cap_right!(0, 0x000007FFFFFFFFFFu64); -#[deprecated(since = "0.2.165", note = "Not stable across OS versions")] -pub const CAP_UNUSED0_44: u64 = cap_right!(0, 0x0000080000000000u64); -#[deprecated(since = "0.2.165", note = "Not stable across OS versions")] -pub const CAP_UNUSED0_57: u64 = cap_right!(0, 0x0100000000000000u64); -pub const CAP_MAC_GET: u64 = cap_right!(1, 0x0000000000000001u64); -pub const CAP_MAC_SET: u64 = cap_right!(1, 0x0000000000000002u64); -pub const CAP_SEM_GETVALUE: u64 = cap_right!(1, 0x0000000000000004u64); -pub const CAP_SEM_POST: u64 = cap_right!(1, 0x0000000000000008u64); -pub const CAP_SEM_WAIT: u64 = cap_right!(1, 0x0000000000000010u64); -pub const CAP_EVENT: u64 = cap_right!(1, 0x0000000000000020u64); -pub const CAP_KQUEUE_EVENT: u64 = cap_right!(1, 0x0000000000000040u64); -pub const CAP_IOCTL: u64 = cap_right!(1, 0x0000000000000080u64); -pub const CAP_TTYHOOK: u64 = cap_right!(1, 0x0000000000000100u64); -pub const CAP_PDGETPID: u64 = cap_right!(1, 0x0000000000000200u64); -pub const CAP_PDWAIT: u64 = cap_right!(1, 0x0000000000000400u64); -pub const CAP_PDKILL: u64 = cap_right!(1, 0x0000000000000800u64); -pub const CAP_EXTATTR_DELETE: u64 = cap_right!(1, 0x0000000000001000u64); -pub const CAP_EXTATTR_GET: u64 = cap_right!(1, 0x0000000000002000u64); -pub const CAP_EXTATTR_LIST: u64 = cap_right!(1, 0x0000000000004000u64); -pub const CAP_EXTATTR_SET: u64 = cap_right!(1, 0x0000000000008000u64); -pub const CAP_ACL_CHECK: u64 = cap_right!(1, 0x0000000000010000u64); -pub const CAP_ACL_DELETE: u64 = cap_right!(1, 0x0000000000020000u64); -pub const CAP_ACL_GET: u64 = cap_right!(1, 0x0000000000040000u64); -pub const CAP_ACL_SET: u64 = cap_right!(1, 0x0000000000080000u64); -pub const CAP_KQUEUE_CHANGE: u64 = cap_right!(1, 0x0000000000100000u64); -pub const CAP_KQUEUE: u64 = CAP_KQUEUE_EVENT | CAP_KQUEUE_CHANGE; -#[deprecated(since = "0.2.165", note = "Not stable across OS versions")] -pub const CAP_ALL1: u64 = cap_right!(1, 0x00000000001FFFFFu64); -#[deprecated(since = "0.2.165", note = "Not stable across OS versions")] -pub const CAP_UNUSED1_22: u64 = cap_right!(1, 0x0000000000200000u64); -#[deprecated(since = "0.2.165", note = "Not stable across OS versions")] -pub const CAP_UNUSED1_57: u64 = cap_right!(1, 0x0100000000000000u64); -pub const CAP_FCNTL_GETFL: u32 = 1 << 3; -pub const CAP_FCNTL_SETFL: u32 = 1 << 4; -pub const CAP_FCNTL_GETOWN: u32 = 1 << 5; -pub const CAP_FCNTL_SETOWN: u32 = 1 << 6; - -// sys/devicestat.h -pub const DEVSTAT_N_TRANS_FLAGS: c_int = 4; -pub const DEVSTAT_NAME_LEN: c_int = 16; - -// sys/cpuset.h -cfg_if! { - if #[cfg(any(freebsd15, freebsd14))] { - pub const CPU_SETSIZE: c_int = 1024; - } else { - pub const CPU_SETSIZE: c_int = 256; - } -} - -pub const SIGEV_THREAD_ID: c_int = 4; - -pub const EXTATTR_NAMESPACE_EMPTY: c_int = 0; -pub const EXTATTR_NAMESPACE_USER: c_int = 1; -pub const EXTATTR_NAMESPACE_SYSTEM: c_int = 2; - -pub const PTHREAD_STACK_MIN: size_t = MINSIGSTKSZ; -pub const PTHREAD_MUTEX_ADAPTIVE_NP: c_int = 4; -pub const PTHREAD_MUTEX_STALLED: c_int = 0; -pub const PTHREAD_MUTEX_ROBUST: c_int = 1; -pub const SIGSTKSZ: size_t = MINSIGSTKSZ + 32768; -pub const SF_NODISKIO: c_int = 0x00000001; -pub const SF_MNOWAIT: c_int = 0x00000002; -pub const SF_SYNC: c_int = 0x00000004; -pub const SF_USER_READAHEAD: c_int = 0x00000008; -pub const SF_NOCACHE: c_int = 0x00000010; -pub const O_CLOEXEC: c_int = 0x00100000; -pub const O_DIRECTORY: c_int = 0x00020000; -pub const O_DSYNC: c_int = 0x01000000; -pub const O_EMPTY_PATH: c_int = 0x02000000; -pub const O_EXEC: c_int = 0x00040000; -pub const O_PATH: c_int = 0x00400000; -pub const O_RESOLVE_BENEATH: c_int = 0x00800000; -pub const O_SEARCH: c_int = O_EXEC; -pub const O_TTY_INIT: c_int = 0x00080000; -pub const O_VERIFY: c_int = 0x00200000; -pub const F_GETLK: c_int = 11; -pub const F_SETLK: c_int = 12; -pub const F_SETLKW: c_int = 13; -pub const ENOTCAPABLE: c_int = 93; -pub const ECAPMODE: c_int = 94; -pub const ENOTRECOVERABLE: c_int = 95; -pub const EOWNERDEAD: c_int = 96; -pub const EINTEGRITY: c_int = 97; -pub const RLIMIT_NPTS: c_int = 11; -pub const RLIMIT_SWAP: c_int = 12; -pub const RLIMIT_KQUEUES: c_int = 13; -pub const RLIMIT_UMTXP: c_int = 14; -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIM_NLIMITS: crate::rlim_t = 15; -pub const RLIM_SAVED_MAX: crate::rlim_t = crate::RLIM_INFINITY; -pub const RLIM_SAVED_CUR: crate::rlim_t = crate::RLIM_INFINITY; - -pub const CP_USER: c_int = 0; -pub const CP_NICE: c_int = 1; -pub const CP_SYS: c_int = 2; -pub const CP_INTR: c_int = 3; -pub const CP_IDLE: c_int = 4; -pub const CPUSTATES: c_int = 5; - -pub const NI_NOFQDN: c_int = 0x00000001; -pub const NI_NUMERICHOST: c_int = 0x00000002; -pub const NI_NAMEREQD: c_int = 0x00000004; -pub const NI_NUMERICSERV: c_int = 0x00000008; -pub const NI_DGRAM: c_int = 0x00000010; -pub const NI_NUMERICSCOPE: c_int = 0x00000020; - -pub const XU_NGROUPS: c_int = 16; - -pub const Q_GETQUOTA: c_int = 0x700; -pub const Q_SETQUOTA: c_int = 0x800; - -pub const MAP_GUARD: c_int = 0x00002000; -pub const MAP_EXCL: c_int = 0x00004000; -pub const MAP_PREFAULT_READ: c_int = 0x00040000; -pub const MAP_ALIGNMENT_SHIFT: c_int = 24; -pub const MAP_ALIGNMENT_MASK: c_int = 0xff << MAP_ALIGNMENT_SHIFT; -pub const MAP_ALIGNED_SUPER: c_int = 1 << MAP_ALIGNMENT_SHIFT; - -pub const POSIX_FADV_NORMAL: c_int = 0; -pub const POSIX_FADV_RANDOM: c_int = 1; -pub const POSIX_FADV_SEQUENTIAL: c_int = 2; -pub const POSIX_FADV_WILLNEED: c_int = 3; -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; - -pub const POLLINIGNEOF: c_short = 0x2000; -pub const POLLRDHUP: c_short = 0x4000; - -pub const EVFILT_READ: i16 = -1; -pub const EVFILT_WRITE: i16 = -2; -pub const EVFILT_AIO: i16 = -3; -pub const EVFILT_VNODE: i16 = -4; -pub const EVFILT_PROC: i16 = -5; -pub const EVFILT_SIGNAL: i16 = -6; -pub const EVFILT_TIMER: i16 = -7; -pub const EVFILT_PROCDESC: i16 = -8; -pub const EVFILT_FS: i16 = -9; -pub const EVFILT_LIO: i16 = -10; -pub const EVFILT_USER: i16 = -11; -pub const EVFILT_SENDFILE: i16 = -12; -pub const EVFILT_EMPTY: i16 = -13; - -pub const EV_ADD: u16 = 0x1; -pub const EV_DELETE: u16 = 0x2; -pub const EV_ENABLE: u16 = 0x4; -pub const EV_DISABLE: u16 = 0x8; -pub const EV_FORCEONESHOT: u16 = 0x100; -pub const EV_KEEPUDATA: u16 = 0x200; - -pub const EV_ONESHOT: u16 = 0x10; -pub const EV_CLEAR: u16 = 0x20; -pub const EV_RECEIPT: u16 = 0x40; -pub const EV_DISPATCH: u16 = 0x80; -pub const EV_SYSFLAGS: u16 = 0xf000; -pub const EV_DROP: u16 = 0x1000; -pub const EV_FLAG1: u16 = 0x2000; -pub const EV_FLAG2: u16 = 0x4000; - -pub const EV_EOF: u16 = 0x8000; -pub const EV_ERROR: u16 = 0x4000; - -pub const NOTE_TRIGGER: u32 = 0x01000000; -pub const NOTE_FFNOP: u32 = 0x00000000; -pub const NOTE_FFAND: u32 = 0x40000000; -pub const NOTE_FFOR: u32 = 0x80000000; -pub const NOTE_FFCOPY: u32 = 0xc0000000; -pub const NOTE_FFCTRLMASK: u32 = 0xc0000000; -pub const NOTE_FFLAGSMASK: u32 = 0x00ffffff; -pub const NOTE_LOWAT: u32 = 0x00000001; -pub const NOTE_FILE_POLL: u32 = 0x00000002; -pub const NOTE_DELETE: u32 = 0x00000001; -pub const NOTE_WRITE: u32 = 0x00000002; -pub const NOTE_EXTEND: u32 = 0x00000004; -pub const NOTE_ATTRIB: u32 = 0x00000008; -pub const NOTE_LINK: u32 = 0x00000010; -pub const NOTE_RENAME: u32 = 0x00000020; -pub const NOTE_REVOKE: u32 = 0x00000040; -pub const NOTE_OPEN: u32 = 0x00000080; -pub const NOTE_CLOSE: u32 = 0x00000100; -pub const NOTE_CLOSE_WRITE: u32 = 0x00000200; -pub const NOTE_READ: u32 = 0x00000400; -pub const NOTE_EXIT: u32 = 0x80000000; -pub const NOTE_FORK: u32 = 0x40000000; -pub const NOTE_EXEC: u32 = 0x20000000; -pub const NOTE_PDATAMASK: u32 = 0x000fffff; -pub const NOTE_PCTRLMASK: u32 = 0xf0000000; -pub const NOTE_TRACK: u32 = 0x00000001; -pub const NOTE_TRACKERR: u32 = 0x00000002; -pub const NOTE_CHILD: u32 = 0x00000004; -pub const NOTE_SECONDS: u32 = 0x00000001; -pub const NOTE_MSECONDS: u32 = 0x00000002; -pub const NOTE_USECONDS: u32 = 0x00000004; -pub const NOTE_NSECONDS: u32 = 0x00000008; -pub const NOTE_ABSTIME: u32 = 0x00000010; - -pub const MADV_PROTECT: c_int = 10; - -#[doc(hidden)] -#[deprecated( - since = "0.2.72", - note = "CTL_UNSPEC is deprecated. Use CTL_SYSCTL instead" -)] -pub const CTL_UNSPEC: c_int = 0; -pub const CTL_SYSCTL: c_int = 0; -pub const CTL_KERN: c_int = 1; -pub const CTL_VM: c_int = 2; -pub const CTL_VFS: c_int = 3; -pub const CTL_NET: c_int = 4; -pub const CTL_DEBUG: c_int = 5; -pub const CTL_HW: c_int = 6; -pub const CTL_MACHDEP: c_int = 7; -pub const CTL_USER: c_int = 8; -pub const CTL_P1003_1B: c_int = 9; - -// sys/sysctl.h -pub const CTL_MAXNAME: c_int = 24; - -pub const CTLTYPE: c_int = 0xf; -pub const CTLTYPE_NODE: c_int = 1; -pub const CTLTYPE_INT: c_int = 2; -pub const CTLTYPE_STRING: c_int = 3; -pub const CTLTYPE_S64: c_int = 4; -pub const CTLTYPE_OPAQUE: c_int = 5; -pub const CTLTYPE_STRUCT: c_int = CTLTYPE_OPAQUE; -pub const CTLTYPE_UINT: c_int = 6; -pub const CTLTYPE_LONG: c_int = 7; -pub const CTLTYPE_ULONG: c_int = 8; -pub const CTLTYPE_U64: c_int = 9; -pub const CTLTYPE_U8: c_int = 0xa; -pub const CTLTYPE_U16: c_int = 0xb; -pub const CTLTYPE_S8: c_int = 0xc; -pub const CTLTYPE_S16: c_int = 0xd; -pub const CTLTYPE_S32: c_int = 0xe; -pub const CTLTYPE_U32: c_int = 0xf; - -pub const CTLFLAG_RD: c_int = 0x80000000; -pub const CTLFLAG_WR: c_int = 0x40000000; -pub const CTLFLAG_RW: c_int = CTLFLAG_RD | CTLFLAG_WR; -pub const CTLFLAG_DORMANT: c_int = 0x20000000; -pub const CTLFLAG_ANYBODY: c_int = 0x10000000; -pub const CTLFLAG_SECURE: c_int = 0x08000000; -pub const CTLFLAG_PRISON: c_int = 0x04000000; -pub const CTLFLAG_DYN: c_int = 0x02000000; -pub const CTLFLAG_SKIP: c_int = 0x01000000; -pub const CTLMASK_SECURE: c_int = 0x00F00000; -pub const CTLFLAG_TUN: c_int = 0x00080000; -pub const CTLFLAG_RDTUN: c_int = CTLFLAG_RD | CTLFLAG_TUN; -pub const CTLFLAG_RWTUN: c_int = CTLFLAG_RW | CTLFLAG_TUN; -pub const CTLFLAG_MPSAFE: c_int = 0x00040000; -pub const CTLFLAG_VNET: c_int = 0x00020000; -pub const CTLFLAG_DYING: c_int = 0x00010000; -pub const CTLFLAG_CAPRD: c_int = 0x00008000; -pub const CTLFLAG_CAPWR: c_int = 0x00004000; -pub const CTLFLAG_STATS: c_int = 0x00002000; -pub const CTLFLAG_NOFETCH: c_int = 0x00001000; -pub const CTLFLAG_CAPRW: c_int = CTLFLAG_CAPRD | CTLFLAG_CAPWR; -pub const CTLFLAG_NEEDGIANT: c_int = 0x00000800; - -pub const CTLSHIFT_SECURE: c_int = 20; -pub const CTLFLAG_SECURE1: c_int = CTLFLAG_SECURE | (0 << CTLSHIFT_SECURE); -pub const CTLFLAG_SECURE2: c_int = CTLFLAG_SECURE | (1 << CTLSHIFT_SECURE); -pub const CTLFLAG_SECURE3: c_int = CTLFLAG_SECURE | (2 << CTLSHIFT_SECURE); - -pub const OID_AUTO: c_int = -1; - -pub const CTL_SYSCTL_DEBUG: c_int = 0; -pub const CTL_SYSCTL_NAME: c_int = 1; -pub const CTL_SYSCTL_NEXT: c_int = 2; -pub const CTL_SYSCTL_NAME2OID: c_int = 3; -pub const CTL_SYSCTL_OIDFMT: c_int = 4; -pub const CTL_SYSCTL_OIDDESCR: c_int = 5; -pub const CTL_SYSCTL_OIDLABEL: c_int = 6; -pub const CTL_SYSCTL_NEXTNOSKIP: c_int = 7; - -pub const KERN_OSTYPE: c_int = 1; -pub const KERN_OSRELEASE: c_int = 2; -pub const KERN_OSREV: c_int = 3; -pub const KERN_VERSION: c_int = 4; -pub const KERN_MAXVNODES: c_int = 5; -pub const KERN_MAXPROC: c_int = 6; -pub const KERN_MAXFILES: c_int = 7; -pub const KERN_ARGMAX: c_int = 8; -pub const KERN_SECURELVL: c_int = 9; -pub const KERN_HOSTNAME: c_int = 10; -pub const KERN_HOSTID: c_int = 11; -pub const KERN_CLOCKRATE: c_int = 12; -pub const KERN_VNODE: c_int = 13; -pub const KERN_PROC: c_int = 14; -pub const KERN_FILE: c_int = 15; -pub const KERN_PROF: c_int = 16; -pub const KERN_POSIX1: c_int = 17; -pub const KERN_NGROUPS: c_int = 18; -pub const KERN_JOB_CONTROL: c_int = 19; -pub const KERN_SAVED_IDS: c_int = 20; -pub const KERN_BOOTTIME: c_int = 21; -pub const KERN_NISDOMAINNAME: c_int = 22; -pub const KERN_UPDATEINTERVAL: c_int = 23; -pub const KERN_OSRELDATE: c_int = 24; -pub const KERN_NTP_PLL: c_int = 25; -pub const KERN_BOOTFILE: c_int = 26; -pub const KERN_MAXFILESPERPROC: c_int = 27; -pub const KERN_MAXPROCPERUID: c_int = 28; -pub const KERN_DUMPDEV: c_int = 29; -pub const KERN_IPC: c_int = 30; -pub const KERN_DUMMY: c_int = 31; -pub const KERN_PS_STRINGS: c_int = 32; -pub const KERN_USRSTACK: c_int = 33; -pub const KERN_LOGSIGEXIT: c_int = 34; -pub const KERN_IOV_MAX: c_int = 35; -pub const KERN_HOSTUUID: c_int = 36; -pub const KERN_ARND: c_int = 37; -pub const KERN_MAXPHYS: c_int = 38; - -pub const KERN_PROC_ALL: c_int = 0; -pub const KERN_PROC_PID: c_int = 1; -pub const KERN_PROC_PGRP: c_int = 2; -pub const KERN_PROC_SESSION: c_int = 3; -pub const KERN_PROC_TTY: c_int = 4; -pub const KERN_PROC_UID: c_int = 5; -pub const KERN_PROC_RUID: c_int = 6; -pub const KERN_PROC_ARGS: c_int = 7; -pub const KERN_PROC_PROC: c_int = 8; -pub const KERN_PROC_SV_NAME: c_int = 9; -pub const KERN_PROC_RGID: c_int = 10; -pub const KERN_PROC_GID: c_int = 11; -pub const KERN_PROC_PATHNAME: c_int = 12; -pub const KERN_PROC_OVMMAP: c_int = 13; -pub const KERN_PROC_OFILEDESC: c_int = 14; -pub const KERN_PROC_KSTACK: c_int = 15; -pub const KERN_PROC_INC_THREAD: c_int = 0x10; -pub const KERN_PROC_VMMAP: c_int = 32; -pub const KERN_PROC_FILEDESC: c_int = 33; -pub const KERN_PROC_GROUPS: c_int = 34; -pub const KERN_PROC_ENV: c_int = 35; -pub const KERN_PROC_AUXV: c_int = 36; -pub const KERN_PROC_RLIMIT: c_int = 37; -pub const KERN_PROC_PS_STRINGS: c_int = 38; -pub const KERN_PROC_UMASK: c_int = 39; -pub const KERN_PROC_OSREL: c_int = 40; -pub const KERN_PROC_SIGTRAMP: c_int = 41; -pub const KERN_PROC_CWD: c_int = 42; -pub const KERN_PROC_NFDS: c_int = 43; -pub const KERN_PROC_SIGFASTBLK: c_int = 44; - -pub const KIPC_MAXSOCKBUF: c_int = 1; -pub const KIPC_SOCKBUF_WASTE: c_int = 2; -pub const KIPC_SOMAXCONN: c_int = 3; -pub const KIPC_MAX_LINKHDR: c_int = 4; -pub const KIPC_MAX_PROTOHDR: c_int = 5; -pub const KIPC_MAX_HDR: c_int = 6; -pub const KIPC_MAX_DATALEN: c_int = 7; - -pub const HW_MACHINE: c_int = 1; -pub const HW_MODEL: c_int = 2; -pub const HW_NCPU: c_int = 3; -pub const HW_BYTEORDER: c_int = 4; -pub const HW_PHYSMEM: c_int = 5; -pub const HW_USERMEM: c_int = 6; -pub const HW_PAGESIZE: c_int = 7; -pub const HW_DISKNAMES: c_int = 8; -pub const HW_DISKSTATS: c_int = 9; -pub const HW_FLOATINGPT: c_int = 10; -pub const HW_MACHINE_ARCH: c_int = 11; -pub const HW_REALMEM: c_int = 12; - -pub const USER_CS_PATH: c_int = 1; -pub const USER_BC_BASE_MAX: c_int = 2; -pub const USER_BC_DIM_MAX: c_int = 3; -pub const USER_BC_SCALE_MAX: c_int = 4; -pub const USER_BC_STRING_MAX: c_int = 5; -pub const USER_COLL_WEIGHTS_MAX: c_int = 6; -pub const USER_EXPR_NEST_MAX: c_int = 7; -pub const USER_LINE_MAX: c_int = 8; -pub const USER_RE_DUP_MAX: c_int = 9; -pub const USER_POSIX2_VERSION: c_int = 10; -pub const USER_POSIX2_C_BIND: c_int = 11; -pub const USER_POSIX2_C_DEV: c_int = 12; -pub const USER_POSIX2_CHAR_TERM: c_int = 13; -pub const USER_POSIX2_FORT_DEV: c_int = 14; -pub const USER_POSIX2_FORT_RUN: c_int = 15; -pub const USER_POSIX2_LOCALEDEF: c_int = 16; -pub const USER_POSIX2_SW_DEV: c_int = 17; -pub const USER_POSIX2_UPE: c_int = 18; -pub const USER_STREAM_MAX: c_int = 19; -pub const USER_TZNAME_MAX: c_int = 20; -pub const USER_LOCALBASE: c_int = 21; - -pub const CTL_P1003_1B_ASYNCHRONOUS_IO: c_int = 1; -pub const CTL_P1003_1B_MAPPED_FILES: c_int = 2; -pub const CTL_P1003_1B_MEMLOCK: c_int = 3; -pub const CTL_P1003_1B_MEMLOCK_RANGE: c_int = 4; -pub const CTL_P1003_1B_MEMORY_PROTECTION: c_int = 5; -pub const CTL_P1003_1B_MESSAGE_PASSING: c_int = 6; -pub const CTL_P1003_1B_PRIORITIZED_IO: c_int = 7; -pub const CTL_P1003_1B_PRIORITY_SCHEDULING: c_int = 8; -pub const CTL_P1003_1B_REALTIME_SIGNALS: c_int = 9; -pub const CTL_P1003_1B_SEMAPHORES: c_int = 10; -pub const CTL_P1003_1B_FSYNC: c_int = 11; -pub const CTL_P1003_1B_SHARED_MEMORY_OBJECTS: c_int = 12; -pub const CTL_P1003_1B_SYNCHRONIZED_IO: c_int = 13; -pub const CTL_P1003_1B_TIMERS: c_int = 14; -pub const CTL_P1003_1B_AIO_LISTIO_MAX: c_int = 15; -pub const CTL_P1003_1B_AIO_MAX: c_int = 16; -pub const CTL_P1003_1B_AIO_PRIO_DELTA_MAX: c_int = 17; -pub const CTL_P1003_1B_DELAYTIMER_MAX: c_int = 18; -pub const CTL_P1003_1B_MQ_OPEN_MAX: c_int = 19; -pub const CTL_P1003_1B_PAGESIZE: c_int = 20; -pub const CTL_P1003_1B_RTSIG_MAX: c_int = 21; -pub const CTL_P1003_1B_SEM_NSEMS_MAX: c_int = 22; -pub const CTL_P1003_1B_SEM_VALUE_MAX: c_int = 23; -pub const CTL_P1003_1B_SIGQUEUE_MAX: c_int = 24; -pub const CTL_P1003_1B_TIMER_MAX: c_int = 25; - -pub const TIOCGPTN: c_ulong = 0x4004740f; -pub const TIOCPTMASTER: c_ulong = 0x2000741c; -pub const TIOCSIG: c_ulong = 0x2004745f; -pub const TIOCM_DCD: c_int = 0x40; -pub const H4DISC: c_int = 0x7; - -pub const VM_TOTAL: c_int = 1; - -cfg_if! { - if #[cfg(target_pointer_width = "64")] { - pub const BIOCSETFNR: c_ulong = 0x80104282; - } else { - pub const BIOCSETFNR: c_ulong = 0x80084282; - } -} - -cfg_if! { - if #[cfg(target_pointer_width = "64")] { - pub const FIODGNAME: c_ulong = 0x80106678; - } else { - pub const FIODGNAME: c_ulong = 0x80086678; - } -} - -pub const FIONWRITE: c_ulong = 0x40046677; -pub const FIONSPACE: c_ulong = 0x40046676; -pub const FIOSEEKDATA: c_ulong = 0xc0086661; -pub const FIOSEEKHOLE: c_ulong = 0xc0086662; -pub const FIOSSHMLPGCNF: c_ulong = 0x80306664; - -pub const JAIL_API_VERSION: u32 = 2; -pub const JAIL_CREATE: c_int = 0x01; -pub const JAIL_UPDATE: c_int = 0x02; -pub const JAIL_ATTACH: c_int = 0x04; -pub const JAIL_DYING: c_int = 0x08; -pub const JAIL_SYS_DISABLE: c_int = 0; -pub const JAIL_SYS_NEW: c_int = 1; -pub const JAIL_SYS_INHERIT: c_int = 2; - -pub const MNT_ACLS: c_int = 0x08000000; -pub const MNT_BYFSID: c_int = 0x08000000; -pub const MNT_GJOURNAL: c_int = 0x02000000; -pub const MNT_MULTILABEL: c_int = 0x04000000; -pub const MNT_NFS4ACLS: c_int = 0x00000010; -pub const MNT_SNAPSHOT: c_int = 0x01000000; -pub const MNT_UNION: c_int = 0x00000020; -pub const MNT_NONBUSY: c_int = 0x04000000; - -pub const SCM_BINTIME: c_int = 0x04; -pub const SCM_REALTIME: c_int = 0x05; -pub const SCM_MONOTONIC: c_int = 0x06; -pub const SCM_TIME_INFO: c_int = 0x07; -pub const SCM_CREDS2: c_int = 0x08; - -pub const SO_BINTIME: c_int = 0x2000; -pub const SO_NO_OFFLOAD: c_int = 0x4000; -pub const SO_NO_DDP: c_int = 0x8000; -pub const SO_REUSEPORT_LB: c_int = 0x10000; -pub const SO_LABEL: c_int = 0x1009; -pub const SO_PEERLABEL: c_int = 0x1010; -pub const SO_LISTENQLIMIT: c_int = 0x1011; -pub const SO_LISTENQLEN: c_int = 0x1012; -pub const SO_LISTENINCQLEN: c_int = 0x1013; -pub const SO_SETFIB: c_int = 0x1014; -pub const SO_USER_COOKIE: c_int = 0x1015; -pub const SO_PROTOCOL: c_int = 0x1016; -pub const SO_PROTOTYPE: c_int = SO_PROTOCOL; -pub const SO_TS_CLOCK: c_int = 0x1017; -pub const SO_DOMAIN: c_int = 0x1019; -pub const SO_SPLICE: c_int = 0x1023; -pub const SO_VENDOR: c_int = 0x80000000; - -pub const SO_TS_REALTIME_MICRO: c_int = 0; -pub const SO_TS_BINTIME: c_int = 1; -pub const SO_TS_REALTIME: c_int = 2; -pub const SO_TS_MONOTONIC: c_int = 3; -pub const SO_TS_DEFAULT: c_int = SO_TS_REALTIME_MICRO; -pub const SO_TS_CLOCK_MAX: c_int = SO_TS_MONOTONIC; - -pub const LOCAL_CREDS: c_int = 2; -pub const LOCAL_CREDS_PERSISTENT: c_int = 3; -pub const LOCAL_CONNWAIT: c_int = 4; -pub const LOCAL_VENDOR: c_int = SO_VENDOR; - -pub const PL_EVENT_NONE: c_int = 0; -pub const PL_EVENT_SIGNAL: c_int = 1; -pub const PL_FLAG_SA: c_int = 0x01; -pub const PL_FLAG_BOUND: c_int = 0x02; -pub const PL_FLAG_SCE: c_int = 0x04; -pub const PL_FLAG_SCX: c_int = 0x08; -pub const PL_FLAG_EXEC: c_int = 0x10; -pub const PL_FLAG_SI: c_int = 0x20; -pub const PL_FLAG_FORKED: c_int = 0x40; -pub const PL_FLAG_CHILD: c_int = 0x80; -pub const PL_FLAG_BORN: c_int = 0x100; -pub const PL_FLAG_EXITED: c_int = 0x200; -pub const PL_FLAG_VFORKED: c_int = 0x400; -pub const PL_FLAG_VFORK_DONE: c_int = 0x800; - -pub const PT_LWPINFO: c_int = 13; -pub const PT_GETNUMLWPS: c_int = 14; -pub const PT_GETLWPLIST: c_int = 15; -pub const PT_CLEARSTEP: c_int = 16; -pub const PT_SETSTEP: c_int = 17; -pub const PT_SUSPEND: c_int = 18; -pub const PT_RESUME: c_int = 19; -pub const PT_TO_SCE: c_int = 20; -pub const PT_TO_SCX: c_int = 21; -pub const PT_SYSCALL: c_int = 22; -pub const PT_FOLLOW_FORK: c_int = 23; -pub const PT_LWP_EVENTS: c_int = 24; -pub const PT_GET_EVENT_MASK: c_int = 25; -pub const PT_SET_EVENT_MASK: c_int = 26; -pub const PT_GET_SC_ARGS: c_int = 27; -pub const PT_GET_SC_RET: c_int = 28; -pub const PT_COREDUMP: c_int = 29; -pub const PT_GETREGS: c_int = 33; -pub const PT_SETREGS: c_int = 34; -pub const PT_GETFPREGS: c_int = 35; -pub const PT_SETFPREGS: c_int = 36; -pub const PT_GETDBREGS: c_int = 37; -pub const PT_SETDBREGS: c_int = 38; -pub const PT_VM_TIMESTAMP: c_int = 40; -pub const PT_VM_ENTRY: c_int = 41; -pub const PT_GETREGSET: c_int = 42; -pub const PT_SETREGSET: c_int = 43; -pub const PT_SC_REMOTE: c_int = 44; -pub const PT_FIRSTMACH: c_int = 64; - -pub const PTRACE_EXEC: c_int = 0x0001; -pub const PTRACE_SCE: c_int = 0x0002; -pub const PTRACE_SCX: c_int = 0x0004; -pub const PTRACE_SYSCALL: c_int = PTRACE_SCE | PTRACE_SCX; -pub const PTRACE_FORK: c_int = 0x0008; -pub const PTRACE_LWP: c_int = 0x0010; -pub const PTRACE_VFORK: c_int = 0x0020; -pub const PTRACE_DEFAULT: c_int = PTRACE_EXEC; - -pub const PC_COMPRESS: u32 = 0x00000001; -pub const PC_ALL: u32 = 0x00000002; - -pub const PROC_SPROTECT: c_int = 1; -pub const PROC_REAP_ACQUIRE: c_int = 2; -pub const PROC_REAP_RELEASE: c_int = 3; -pub const PROC_REAP_STATUS: c_int = 4; -pub const PROC_REAP_GETPIDS: c_int = 5; -pub const PROC_REAP_KILL: c_int = 6; -pub const PROC_TRACE_CTL: c_int = 7; -pub const PROC_TRACE_STATUS: c_int = 8; -pub const PROC_TRAPCAP_CTL: c_int = 9; -pub const PROC_TRAPCAP_STATUS: c_int = 10; -pub const PROC_PDEATHSIG_CTL: c_int = 11; -pub const PROC_PDEATHSIG_STATUS: c_int = 12; -pub const PROC_ASLR_CTL: c_int = 13; -pub const PROC_ASLR_STATUS: c_int = 14; -pub const PROC_PROTMAX_CTL: c_int = 15; -pub const PROC_PROTMAX_STATUS: c_int = 16; -pub const PROC_STACKGAP_CTL: c_int = 17; -pub const PROC_STACKGAP_STATUS: c_int = 18; -pub const PROC_NO_NEW_PRIVS_CTL: c_int = 19; -pub const PROC_NO_NEW_PRIVS_STATUS: c_int = 20; -pub const PROC_WXMAP_CTL: c_int = 21; -pub const PROC_WXMAP_STATUS: c_int = 22; -pub const PROC_PROCCTL_MD_MIN: c_int = 0x10000000; - -pub const PPROT_SET: c_int = 1; -pub const PPROT_CLEAR: c_int = 2; -pub const PPROT_DESCEND: c_int = 0x10; -pub const PPROT_INHERIT: c_int = 0x20; - -pub const PROC_TRACE_CTL_ENABLE: c_int = 1; -pub const PROC_TRACE_CTL_DISABLE: c_int = 2; -pub const PROC_TRACE_CTL_DISABLE_EXEC: c_int = 3; - -pub const PROC_TRAPCAP_CTL_ENABLE: c_int = 1; -pub const PROC_TRAPCAP_CTL_DISABLE: c_int = 2; - -pub const PROC_ASLR_FORCE_ENABLE: c_int = 1; -pub const PROC_ASLR_FORCE_DISABLE: c_int = 2; -pub const PROC_ASLR_NOFORCE: c_int = 3; -pub const PROC_ASLR_ACTIVE: c_int = 0x80000000; - -pub const PROC_PROTMAX_FORCE_ENABLE: c_int = 1; -pub const PROC_PROTMAX_FORCE_DISABLE: c_int = 2; -pub const PROC_PROTMAX_NOFORCE: c_int = 3; -pub const PROC_PROTMAX_ACTIVE: c_int = 0x80000000; - -pub const PROC_STACKGAP_ENABLE: c_int = 0x0001; -pub const PROC_STACKGAP_DISABLE: c_int = 0x0002; -pub const PROC_STACKGAP_ENABLE_EXEC: c_int = 0x0004; -pub const PROC_STACKGAP_DISABLE_EXEC: c_int = 0x0008; - -pub const PROC_NO_NEW_PRIVS_ENABLE: c_int = 1; -pub const PROC_NO_NEW_PRIVS_DISABLE: c_int = 2; - -pub const PROC_WX_MAPPINGS_PERMIT: c_int = 0x0001; -pub const PROC_WX_MAPPINGS_DISALLOW_EXEC: c_int = 0x0002; -pub const PROC_WXORX_ENFORCE: c_int = 0x80000000; - -pub const AF_SLOW: c_int = 33; -pub const AF_SCLUSTER: c_int = 34; -pub const AF_ARP: c_int = 35; -pub const AF_BLUETOOTH: c_int = 36; -pub const AF_IEEE80211: c_int = 37; -pub const AF_INET_SDP: c_int = 40; -pub const AF_INET6_SDP: c_int = 42; - -// sys/net/if.h -pub const IF_MAXUNIT: c_int = 0x7fff; -/// (n) interface is up -pub const IFF_UP: c_int = 0x1; -/// (i) broadcast address valid -pub const IFF_BROADCAST: c_int = 0x2; -/// (n) turn on debugging -pub const IFF_DEBUG: c_int = 0x4; -/// (i) is a loopback net -pub const IFF_LOOPBACK: c_int = 0x8; -/// (i) is a point-to-point link -pub const IFF_POINTOPOINT: c_int = 0x10; -/// (i) calls if_input in net epoch -#[deprecated(since = "0.2.149", note = "Removed in FreeBSD 14")] -pub const IFF_KNOWSEPOCH: c_int = 0x20; -/// (d) resources allocated -pub const IFF_RUNNING: c_int = 0x40; -#[doc(hidden)] -#[deprecated( - since = "0.2.54", - note = "IFF_DRV_RUNNING is deprecated. Use the portable IFF_RUNNING instead" -)] -/// (d) resources allocate -pub const IFF_DRV_RUNNING: c_int = 0x40; -/// (n) no address resolution protocol -pub const IFF_NOARP: c_int = 0x80; -/// (n) receive all packets -pub const IFF_PROMISC: c_int = 0x100; -/// (n) receive all multicast packets -pub const IFF_ALLMULTI: c_int = 0x200; -/// (d) tx hardware queue is full -pub const IFF_OACTIVE: c_int = 0x400; -#[doc(hidden)] -#[deprecated(since = "0.2.54", note = "Use the portable `IFF_OACTIVE` instead")] -/// (d) tx hardware queue is full -pub const IFF_DRV_OACTIVE: c_int = 0x400; -/// (i) can't hear own transmissions -pub const IFF_SIMPLEX: c_int = 0x800; -/// per link layer defined bit -pub const IFF_LINK0: c_int = 0x1000; -/// per link layer defined bit -pub const IFF_LINK1: c_int = 0x2000; -/// per link layer defined bit -pub const IFF_LINK2: c_int = 0x4000; -/// use alternate physical connection -pub const IFF_ALTPHYS: c_int = IFF_LINK2; -/// (i) supports multicast -pub const IFF_MULTICAST: c_int = 0x8000; -/// (i) unconfigurable using ioctl(2) -pub const IFF_CANTCONFIG: c_int = 0x10000; -/// (n) user-requested promisc mode -pub const IFF_PPROMISC: c_int = 0x20000; -/// (n) user-requested monitor mode -pub const IFF_MONITOR: c_int = 0x40000; -/// (n) static ARP -pub const IFF_STATICARP: c_int = 0x80000; -/// (n) interface is winding down -pub const IFF_DYING: c_int = 0x200000; -/// (n) interface is being renamed -pub const IFF_RENAMING: c_int = 0x400000; -/// interface is not part of any groups -#[deprecated(since = "0.2.149", note = "Removed in FreeBSD 14")] -pub const IFF_NOGROUP: c_int = 0x800000; - -/// link invalid/unknown -pub const LINK_STATE_UNKNOWN: c_int = 0; -/// link is down -pub const LINK_STATE_DOWN: c_int = 1; -/// link is up -pub const LINK_STATE_UP: c_int = 2; - -/// can offload checksum on RX -pub const IFCAP_RXCSUM: c_int = 0x00001; -/// can offload checksum on TX -pub const IFCAP_TXCSUM: c_int = 0x00002; -/// can be a network console -pub const IFCAP_NETCONS: c_int = 0x00004; -/// VLAN-compatible MTU -pub const IFCAP_VLAN_MTU: c_int = 0x00008; -/// hardware VLAN tag support -pub const IFCAP_VLAN_HWTAGGING: c_int = 0x00010; -/// 9000 byte MTU supported -pub const IFCAP_JUMBO_MTU: c_int = 0x00020; -/// driver supports polling -pub const IFCAP_POLLING: c_int = 0x00040; -/// can do IFCAP_HWCSUM on VLANs -pub const IFCAP_VLAN_HWCSUM: c_int = 0x00080; -/// can do TCP Segmentation Offload -pub const IFCAP_TSO4: c_int = 0x00100; -/// can do TCP6 Segmentation Offload -pub const IFCAP_TSO6: c_int = 0x00200; -/// can do Large Receive Offload -pub const IFCAP_LRO: c_int = 0x00400; -/// wake on any unicast frame -pub const IFCAP_WOL_UCAST: c_int = 0x00800; -/// wake on any multicast frame -pub const IFCAP_WOL_MCAST: c_int = 0x01000; -/// wake on any Magic Packet -pub const IFCAP_WOL_MAGIC: c_int = 0x02000; -/// interface can offload TCP -pub const IFCAP_TOE4: c_int = 0x04000; -/// interface can offload TCP6 -pub const IFCAP_TOE6: c_int = 0x08000; -/// interface hw can filter vlan tag -pub const IFCAP_VLAN_HWFILTER: c_int = 0x10000; -/// can do SIOCGIFCAPNV/SIOCSIFCAPNV -pub const IFCAP_NV: c_int = 0x20000; -/// can do IFCAP_TSO on VLANs -pub const IFCAP_VLAN_HWTSO: c_int = 0x40000; -/// the runtime link state is dynamic -pub const IFCAP_LINKSTATE: c_int = 0x80000; -/// netmap mode supported/enabled -pub const IFCAP_NETMAP: c_int = 0x100000; -/// can offload checksum on IPv6 RX -pub const IFCAP_RXCSUM_IPV6: c_int = 0x200000; -/// can offload checksum on IPv6 TX -pub const IFCAP_TXCSUM_IPV6: c_int = 0x400000; -/// manages counters internally -pub const IFCAP_HWSTATS: c_int = 0x800000; -/// hardware supports TX rate limiting -pub const IFCAP_TXRTLMT: c_int = 0x1000000; -/// hardware rx timestamping -pub const IFCAP_HWRXTSTMP: c_int = 0x2000000; -/// understands M_EXTPG mbufs -pub const IFCAP_MEXTPG: c_int = 0x4000000; -/// can do TLS encryption and segmentation for TCP -pub const IFCAP_TXTLS4: c_int = 0x8000000; -/// can do TLS encryption and segmentation for TCP6 -pub const IFCAP_TXTLS6: c_int = 0x10000000; -/// can do IFCAN_HWCSUM on VXLANs -pub const IFCAP_VXLAN_HWCSUM: c_int = 0x20000000; -/// can do IFCAP_TSO on VXLANs -pub const IFCAP_VXLAN_HWTSO: c_int = 0x40000000; -/// can do TLS with rate limiting -pub const IFCAP_TXTLS_RTLMT: c_int = 0x80000000; - -pub const IFCAP_HWCSUM_IPV6: c_int = IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6; -pub const IFCAP_HWCSUM: c_int = IFCAP_RXCSUM | IFCAP_TXCSUM; -pub const IFCAP_TSO: c_int = IFCAP_TSO4 | IFCAP_TSO6; -pub const IFCAP_WOL: c_int = IFCAP_WOL_UCAST | IFCAP_WOL_MCAST | IFCAP_WOL_MAGIC; -pub const IFCAP_TOE: c_int = IFCAP_TOE4 | IFCAP_TOE6; -pub const IFCAP_TXTLS: c_int = IFCAP_TXTLS4 | IFCAP_TXTLS6; -pub const IFCAP_CANTCHANGE: c_int = IFCAP_NETMAP | IFCAP_NV; - -pub const IFQ_MAXLEN: c_int = 50; -pub const IFNET_SLOWHZ: c_int = 1; - -pub const IFAN_ARRIVAL: c_int = 0; -pub const IFAN_DEPARTURE: c_int = 1; - -pub const IFSTATMAX: c_int = 800; - -pub const RSS_FUNC_NONE: c_int = 0; -pub const RSS_FUNC_PRIVATE: c_int = 1; -pub const RSS_FUNC_TOEPLITZ: c_int = 2; - -pub const RSS_TYPE_IPV4: c_int = 0x00000001; -pub const RSS_TYPE_TCP_IPV4: c_int = 0x00000002; -pub const RSS_TYPE_IPV6: c_int = 0x00000004; -pub const RSS_TYPE_IPV6_EX: c_int = 0x00000008; -pub const RSS_TYPE_TCP_IPV6: c_int = 0x00000010; -pub const RSS_TYPE_TCP_IPV6_EX: c_int = 0x00000020; -pub const RSS_TYPE_UDP_IPV4: c_int = 0x00000040; -pub const RSS_TYPE_UDP_IPV6: c_int = 0x00000080; -pub const RSS_TYPE_UDP_IPV6_EX: c_int = 0x00000100; -pub const RSS_KEYLEN: c_int = 128; - -pub const IFNET_PCP_NONE: c_int = 0xff; -pub const IFDR_MSG_SIZE: c_int = 64; -pub const IFDR_REASON_MSG: c_int = 1; -pub const IFDR_REASON_VENDOR: c_int = 2; - -// sys/net/if_mib.h - -/// non-interface-specific -pub const IFMIB_SYSTEM: c_int = 1; -/// per-interface data table -pub const IFMIB_IFDATA: c_int = 2; - -/// generic stats for all kinds of ifaces -pub const IFDATA_GENERAL: c_int = 1; -/// specific to the type of interface -pub const IFDATA_LINKSPECIFIC: c_int = 2; -/// driver name and unit -pub const IFDATA_DRIVERNAME: c_int = 3; - -/// number of interfaces configured -pub const IFMIB_IFCOUNT: c_int = 1; - -/// functions not specific to a type of iface -pub const NETLINK_GENERIC: c_int = 0; - -pub const DOT3COMPLIANCE_STATS: c_int = 1; -pub const DOT3COMPLIANCE_COLLS: c_int = 2; - -pub const dot3ChipSetAMD7990: c_int = 1; -pub const dot3ChipSetAMD79900: c_int = 2; -pub const dot3ChipSetAMD79C940: c_int = 3; - -pub const dot3ChipSetIntel82586: c_int = 1; -pub const dot3ChipSetIntel82596: c_int = 2; -pub const dot3ChipSetIntel82557: c_int = 3; - -pub const dot3ChipSetNational8390: c_int = 1; -pub const dot3ChipSetNationalSonic: c_int = 2; - -pub const dot3ChipSetFujitsu86950: c_int = 1; - -pub const dot3ChipSetDigitalDC21040: c_int = 1; -pub const dot3ChipSetDigitalDC21140: c_int = 2; -pub const dot3ChipSetDigitalDC21041: c_int = 3; -pub const dot3ChipSetDigitalDC21140A: c_int = 4; -pub const dot3ChipSetDigitalDC21142: c_int = 5; - -pub const dot3ChipSetWesternDigital83C690: c_int = 1; -pub const dot3ChipSetWesternDigital83C790: c_int = 2; - -// sys/netinet/in.h -// Protocols (RFC 1700) -// NOTE: These are in addition to the constants defined in src/unix/mod.rs - -// IPPROTO_IP defined in src/unix/mod.rs -/// IP6 hop-by-hop options -pub const IPPROTO_HOPOPTS: c_int = 0; -// IPPROTO_ICMP defined in src/unix/mod.rs -/// group mgmt protocol -pub const IPPROTO_IGMP: c_int = 2; -/// gateway^2 (deprecated) -pub const IPPROTO_GGP: c_int = 3; -/// for compatibility -pub const IPPROTO_IPIP: c_int = 4; -// IPPROTO_TCP defined in src/unix/mod.rs -/// Stream protocol II. -pub const IPPROTO_ST: c_int = 7; -/// exterior gateway protocol -pub const IPPROTO_EGP: c_int = 8; -/// private interior gateway -pub const IPPROTO_PIGP: c_int = 9; -/// BBN RCC Monitoring -pub const IPPROTO_RCCMON: c_int = 10; -/// network voice protocol -pub const IPPROTO_NVPII: c_int = 11; -/// pup -pub const IPPROTO_PUP: c_int = 12; -/// Argus -pub const IPPROTO_ARGUS: c_int = 13; -/// EMCON -pub const IPPROTO_EMCON: c_int = 14; -/// Cross Net Debugger -pub const IPPROTO_XNET: c_int = 15; -/// Chaos -pub const IPPROTO_CHAOS: c_int = 16; -// IPPROTO_UDP defined in src/unix/mod.rs -/// Multiplexing -pub const IPPROTO_MUX: c_int = 18; -/// DCN Measurement Subsystems -pub const IPPROTO_MEAS: c_int = 19; -/// Host Monitoring -pub const IPPROTO_HMP: c_int = 20; -/// Packet Radio Measurement -pub const IPPROTO_PRM: c_int = 21; -/// xns idp -pub const IPPROTO_IDP: c_int = 22; -/// Trunk-1 -pub const IPPROTO_TRUNK1: c_int = 23; -/// Trunk-2 -pub const IPPROTO_TRUNK2: c_int = 24; -/// Leaf-1 -pub const IPPROTO_LEAF1: c_int = 25; -/// Leaf-2 -pub const IPPROTO_LEAF2: c_int = 26; -/// Reliable Data -pub const IPPROTO_RDP: c_int = 27; -/// Reliable Transaction -pub const IPPROTO_IRTP: c_int = 28; -/// tp-4 w/ class negotiation -pub const IPPROTO_TP: c_int = 29; -/// Bulk Data Transfer -pub const IPPROTO_BLT: c_int = 30; -/// Network Services -pub const IPPROTO_NSP: c_int = 31; -/// Merit Internodal -pub const IPPROTO_INP: c_int = 32; -#[doc(hidden)] -#[deprecated( - since = "0.2.72", - note = "IPPROTO_SEP is deprecated. Use IPPROTO_DCCP instead" -)] -pub const IPPROTO_SEP: c_int = 33; -/// Datagram Congestion Control Protocol -pub const IPPROTO_DCCP: c_int = 33; -/// Third Party Connect -pub const IPPROTO_3PC: c_int = 34; -/// InterDomain Policy Routing -pub const IPPROTO_IDPR: c_int = 35; -/// XTP -pub const IPPROTO_XTP: c_int = 36; -/// Datagram Delivery -pub const IPPROTO_DDP: c_int = 37; -/// Control Message Transport -pub const IPPROTO_CMTP: c_int = 38; -/// TP++ Transport -pub const IPPROTO_TPXX: c_int = 39; -/// IL transport protocol -pub const IPPROTO_IL: c_int = 40; -// IPPROTO_IPV6 defined in src/unix/mod.rs -/// Source Demand Routing -pub const IPPROTO_SDRP: c_int = 42; -/// IP6 routing header -pub const IPPROTO_ROUTING: c_int = 43; -/// IP6 fragmentation header -pub const IPPROTO_FRAGMENT: c_int = 44; -/// InterDomain Routing -pub const IPPROTO_IDRP: c_int = 45; -/// resource reservation -pub const IPPROTO_RSVP: c_int = 46; -/// General Routing Encap. -pub const IPPROTO_GRE: c_int = 47; -/// Mobile Host Routing -pub const IPPROTO_MHRP: c_int = 48; -/// BHA -pub const IPPROTO_BHA: c_int = 49; -/// IP6 Encap Sec. Payload -pub const IPPROTO_ESP: c_int = 50; -/// IP6 Auth Header -pub const IPPROTO_AH: c_int = 51; -/// Integ. Net Layer Security -pub const IPPROTO_INLSP: c_int = 52; -/// IP with encryption -pub const IPPROTO_SWIPE: c_int = 53; -/// Next Hop Resolution -pub const IPPROTO_NHRP: c_int = 54; -/// IP Mobility -pub const IPPROTO_MOBILE: c_int = 55; -/// Transport Layer Security -pub const IPPROTO_TLSP: c_int = 56; -/// SKIP -pub const IPPROTO_SKIP: c_int = 57; -// IPPROTO_ICMPV6 defined in src/unix/mod.rs -/// IP6 no next header -pub const IPPROTO_NONE: c_int = 59; -/// IP6 destination option -pub const IPPROTO_DSTOPTS: c_int = 60; -/// any host internal protocol -pub const IPPROTO_AHIP: c_int = 61; -/// CFTP -pub const IPPROTO_CFTP: c_int = 62; -/// "hello" routing protocol -pub const IPPROTO_HELLO: c_int = 63; -/// SATNET/Backroom EXPAK -pub const IPPROTO_SATEXPAK: c_int = 64; -/// Kryptolan -pub const IPPROTO_KRYPTOLAN: c_int = 65; -/// Remote Virtual Disk -pub const IPPROTO_RVD: c_int = 66; -/// Pluribus Packet Core -pub const IPPROTO_IPPC: c_int = 67; -/// Any distributed FS -pub const IPPROTO_ADFS: c_int = 68; -/// Satnet Monitoring -pub const IPPROTO_SATMON: c_int = 69; -/// VISA Protocol -pub const IPPROTO_VISA: c_int = 70; -/// Packet Core Utility -pub const IPPROTO_IPCV: c_int = 71; -/// Comp. Prot. Net. Executive -pub const IPPROTO_CPNX: c_int = 72; -/// Comp. Prot. HeartBeat -pub const IPPROTO_CPHB: c_int = 73; -/// Wang Span Network -pub const IPPROTO_WSN: c_int = 74; -/// Packet Video Protocol -pub const IPPROTO_PVP: c_int = 75; -/// BackRoom SATNET Monitoring -pub const IPPROTO_BRSATMON: c_int = 76; -/// Sun net disk proto (temp.) -pub const IPPROTO_ND: c_int = 77; -/// WIDEBAND Monitoring -pub const IPPROTO_WBMON: c_int = 78; -/// WIDEBAND EXPAK -pub const IPPROTO_WBEXPAK: c_int = 79; -/// ISO cnlp -pub const IPPROTO_EON: c_int = 80; -/// VMTP -pub const IPPROTO_VMTP: c_int = 81; -/// Secure VMTP -pub const IPPROTO_SVMTP: c_int = 82; -/// Banyon VINES -pub const IPPROTO_VINES: c_int = 83; -/// TTP -pub const IPPROTO_TTP: c_int = 84; -/// NSFNET-IGP -pub const IPPROTO_IGP: c_int = 85; -/// dissimilar gateway prot. -pub const IPPROTO_DGP: c_int = 86; -/// TCF -pub const IPPROTO_TCF: c_int = 87; -/// Cisco/GXS IGRP -pub const IPPROTO_IGRP: c_int = 88; -/// OSPFIGP -pub const IPPROTO_OSPFIGP: c_int = 89; -/// Strite RPC protocol -pub const IPPROTO_SRPC: c_int = 90; -/// Locus Address Resoloution -pub const IPPROTO_LARP: c_int = 91; -/// Multicast Transport -pub const IPPROTO_MTP: c_int = 92; -/// AX.25 Frames -pub const IPPROTO_AX25: c_int = 93; -/// IP encapsulated in IP -pub const IPPROTO_IPEIP: c_int = 94; -/// Mobile Int.ing control -pub const IPPROTO_MICP: c_int = 95; -/// Semaphore Comm. security -pub const IPPROTO_SCCSP: c_int = 96; -/// Ethernet IP encapsulation -pub const IPPROTO_ETHERIP: c_int = 97; -/// encapsulation header -pub const IPPROTO_ENCAP: c_int = 98; -/// any private encr. scheme -pub const IPPROTO_APES: c_int = 99; -/// GMTP -pub const IPPROTO_GMTP: c_int = 100; -/// payload compression (IPComp) -pub const IPPROTO_IPCOMP: c_int = 108; -/// SCTP -pub const IPPROTO_SCTP: c_int = 132; -/// IPv6 Mobility Header -pub const IPPROTO_MH: c_int = 135; -/// UDP-Lite -pub const IPPROTO_UDPLITE: c_int = 136; -/// IP6 Host Identity Protocol -pub const IPPROTO_HIP: c_int = 139; -/// IP6 Shim6 Protocol -pub const IPPROTO_SHIM6: c_int = 140; - -/* 101-254: Partly Unassigned */ -/// Protocol Independent Mcast -pub const IPPROTO_PIM: c_int = 103; -/// CARP -pub const IPPROTO_CARP: c_int = 112; -/// PGM -pub const IPPROTO_PGM: c_int = 113; -/// MPLS-in-IP -pub const IPPROTO_MPLS: c_int = 137; -/// PFSYNC -pub const IPPROTO_PFSYNC: c_int = 240; - -/* 255: Reserved */ -/* BSD Private, local use, namespace incursion, no longer used */ -/// OLD divert pseudo-proto -pub const IPPROTO_OLD_DIVERT: c_int = 254; -pub const IPPROTO_MAX: c_int = 256; -/// last return value of *_input(), meaning "all job for this pkt is done". -pub const IPPROTO_DONE: c_int = 257; - -/* Only used internally, so can be outside the range of valid IP protocols. */ -/// divert pseudo-protocol -pub const IPPROTO_DIVERT: c_int = 258; -/// SeND pseudo-protocol -pub const IPPROTO_SEND: c_int = 259; - -// sys/netinet/TCP.h -pub const TCP_MD5SIG: c_int = 16; -pub const TCP_INFO: c_int = 32; -pub const TCP_CONGESTION: c_int = 64; -pub const TCP_CCALGOOPT: c_int = 65; -pub const TCP_MAXUNACKTIME: c_int = 68; -#[deprecated(since = "0.2.160", note = "Removed in FreeBSD 15")] -pub const TCP_MAXPEAKRATE: c_int = 69; -pub const TCP_IDLE_REDUCE: c_int = 70; -pub const TCP_REMOTE_UDP_ENCAPS_PORT: c_int = 71; -pub const TCP_DELACK: c_int = 72; -pub const TCP_FIN_IS_RST: c_int = 73; -pub const TCP_LOG_LIMIT: c_int = 74; -pub const TCP_SHARED_CWND_ALLOWED: c_int = 75; -pub const TCP_PROC_ACCOUNTING: c_int = 76; -pub const TCP_USE_CMP_ACKS: c_int = 77; -pub const TCP_PERF_INFO: c_int = 78; -pub const TCP_LRD: c_int = 79; -pub const TCP_KEEPINIT: c_int = 128; -pub const TCP_FASTOPEN: c_int = 1025; -#[deprecated(since = "0.2.171", note = "removed in FreeBSD 15")] -pub const TCP_PCAP_OUT: c_int = 2048; -#[deprecated(since = "0.2.171", note = "removed in FreeBSD 15")] -pub const TCP_PCAP_IN: c_int = 4096; -pub const TCP_FUNCTION_BLK: c_int = 8192; -pub const TCP_FUNCTION_ALIAS: c_int = 8193; -pub const TCP_FASTOPEN_PSK_LEN: c_int = 16; -pub const TCP_FUNCTION_NAME_LEN_MAX: c_int = 32; - -pub const TCP_REUSPORT_LB_NUMA: c_int = 1026; -pub const TCP_RACK_MBUF_QUEUE: c_int = 1050; -pub const TCP_RACK_TLP_REDUCE: c_int = 1052; -pub const TCP_RACK_PACE_MAX_SEG: c_int = 1054; -pub const TCP_RACK_PACE_ALWAYS: c_int = 1055; -pub const TCP_RACK_PRR_SENDALOT: c_int = 1057; -pub const TCP_RACK_MIN_TO: c_int = 1058; -pub const TCP_RACK_EARLY_SEG: c_int = 1060; -pub const TCP_RACK_REORD_THRESH: c_int = 1061; -pub const TCP_RACK_REORD_FADE: c_int = 1062; -pub const TCP_RACK_TLP_THRESH: c_int = 1063; -pub const TCP_RACK_PKT_DELAY: c_int = 1064; -pub const TCP_BBR_IWINTSO: c_int = 1067; -pub const TCP_BBR_STARTUP_PG: c_int = 1069; -pub const TCP_BBR_DRAIN_PG: c_int = 1070; -pub const TCP_BBR_PROBE_RTT_INT: c_int = 1072; -pub const TCP_BBR_STARTUP_LOSS_EXIT: c_int = 1074; -pub const TCP_BBR_TSLIMITS: c_int = 1076; -pub const TCP_BBR_PACE_OH: c_int = 1077; -pub const TCP_BBR_USEDEL_RATE: c_int = 1079; -pub const TCP_BBR_MIN_RTO: c_int = 1080; -pub const TCP_BBR_MAX_RTO: c_int = 1081; -pub const TCP_BBR_ALGORITHM: c_int = 1083; -pub const TCP_BBR_PACE_PER_SEC: c_int = 1086; -pub const TCP_BBR_PACE_DEL_TAR: c_int = 1087; -pub const TCP_BBR_PACE_SEG_MAX: c_int = 1088; -pub const TCP_BBR_PACE_SEG_MIN: c_int = 1089; -pub const TCP_BBR_PACE_CROSS: c_int = 1090; -pub const TCP_BBR_TMR_PACE_OH: c_int = 1096; -pub const TCP_BBR_RACK_RTT_USE: c_int = 1098; -pub const TCP_BBR_RETRAN_WTSO: c_int = 1099; -pub const TCP_BBR_PROBE_RTT_GAIN: c_int = 1101; -pub const TCP_BBR_PROBE_RTT_LEN: c_int = 1102; -pub const TCP_BBR_SEND_IWND_IN_TSO: c_int = 1103; -pub const TCP_BBR_USE_RACK_RR: c_int = 1104; -pub const TCP_BBR_HDWR_PACE: c_int = 1105; -pub const TCP_BBR_UTTER_MAX_TSO: c_int = 1106; -pub const TCP_BBR_EXTRA_STATE: c_int = 1107; -pub const TCP_BBR_FLOOR_MIN_TSO: c_int = 1108; -pub const TCP_BBR_MIN_TOPACEOUT: c_int = 1109; -pub const TCP_BBR_TSTMP_RAISES: c_int = 1110; -pub const TCP_BBR_POLICER_DETECT: c_int = 1111; -pub const TCP_BBR_RACK_INIT_RATE: c_int = 1112; - -pub const IP_BINDANY: c_int = 24; -pub const IP_BINDMULTI: c_int = 25; -pub const IP_RSS_LISTEN_BUCKET: c_int = 26; -pub const IP_ORIGDSTADDR: c_int = 27; -pub const IP_RECVORIGDSTADDR: c_int = IP_ORIGDSTADDR; - -pub const IP_DONTFRAG: c_int = 67; -pub const IP_RECVTOS: c_int = 68; - -pub const IPV6_BINDANY: c_int = 64; -pub const IPV6_ORIGDSTADDR: c_int = 72; -pub const IPV6_RECVORIGDSTADDR: c_int = IPV6_ORIGDSTADDR; - -pub const PF_SLOW: c_int = AF_SLOW; -pub const PF_SCLUSTER: c_int = AF_SCLUSTER; -pub const PF_ARP: c_int = AF_ARP; -pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; -pub const PF_IEEE80211: c_int = AF_IEEE80211; -pub const PF_INET_SDP: c_int = AF_INET_SDP; -pub const PF_INET6_SDP: c_int = AF_INET6_SDP; - -pub const NET_RT_DUMP: c_int = 1; -pub const NET_RT_FLAGS: c_int = 2; -pub const NET_RT_IFLIST: c_int = 3; -pub const NET_RT_IFMALIST: c_int = 4; -pub const NET_RT_IFLISTL: c_int = 5; - -// System V IPC -pub const IPC_INFO: c_int = 3; -pub const MSG_NOERROR: c_int = 0o10000; -pub const SHM_LOCK: c_int = 11; -pub const SHM_UNLOCK: c_int = 12; -pub const SHM_STAT: c_int = 13; -pub const SHM_INFO: c_int = 14; -pub const SHM_ANON: *mut c_char = 1 as *mut c_char; - -// The *_MAXID constants never should've been used outside of the -// FreeBSD base system. And with the exception of CTL_P1003_1B_MAXID, -// they were all removed in svn r262489. They remain here for backwards -// compatibility only, and are scheduled to be removed in libc 1.0.0. -#[doc(hidden)] -#[deprecated(since = "0.2.54", note = "Removed in FreeBSD 11")] -pub const CTL_MAXID: c_int = 10; -#[doc(hidden)] -#[deprecated(since = "0.2.54", note = "Removed in FreeBSD 11")] -pub const KERN_MAXID: c_int = 38; -#[doc(hidden)] -#[deprecated(since = "0.2.54", note = "Removed in FreeBSD 11")] -pub const HW_MAXID: c_int = 13; -#[doc(hidden)] -#[deprecated(since = "0.2.54", note = "Removed in FreeBSD 11")] -pub const USER_MAXID: c_int = 21; -#[doc(hidden)] -#[deprecated(since = "0.2.74", note = "Removed in FreeBSD 13")] -pub const CTL_P1003_1B_MAXID: c_int = 26; - -pub const MSG_NOTIFICATION: c_int = 0x00002000; -pub const MSG_NBIO: c_int = 0x00004000; -pub const MSG_COMPAT: c_int = 0x00008000; -pub const MSG_CMSG_CLOEXEC: c_int = 0x00040000; -pub const MSG_NOSIGNAL: c_int = 0x20000; -pub const MSG_WAITFORONE: c_int = 0x00080000; - -// utmpx entry types -pub const EMPTY: c_short = 0; -pub const BOOT_TIME: c_short = 1; -pub const OLD_TIME: c_short = 2; -pub const NEW_TIME: c_short = 3; -pub const USER_PROCESS: c_short = 4; -pub const INIT_PROCESS: c_short = 5; -pub const LOGIN_PROCESS: c_short = 6; -pub const DEAD_PROCESS: c_short = 7; -pub const SHUTDOWN_TIME: c_short = 8; -// utmp database types -pub const UTXDB_ACTIVE: c_int = 0; -pub const UTXDB_LASTLOGIN: c_int = 1; -pub const UTXDB_LOG: c_int = 2; - -pub const LC_COLLATE_MASK: c_int = 1 << 0; -pub const LC_CTYPE_MASK: c_int = 1 << 1; -pub const LC_MONETARY_MASK: c_int = 1 << 2; -pub const LC_NUMERIC_MASK: c_int = 1 << 3; -pub const LC_TIME_MASK: c_int = 1 << 4; -pub const LC_MESSAGES_MASK: c_int = 1 << 5; -pub const LC_ALL_MASK: c_int = LC_COLLATE_MASK - | LC_CTYPE_MASK - | LC_MESSAGES_MASK - | LC_MONETARY_MASK - | LC_NUMERIC_MASK - | LC_TIME_MASK; - -pub const WSTOPPED: c_int = 2; // same as WUNTRACED -pub const WCONTINUED: c_int = 4; -pub const WNOWAIT: c_int = 8; -pub const WEXITED: c_int = 16; -pub const WTRAPPED: c_int = 32; - -// FreeBSD defines a great many more of these, we only expose the -// standardized ones. -pub const P_PID: idtype_t = 0; -pub const P_PGID: idtype_t = 2; -pub const P_ALL: idtype_t = 7; - -pub const UTIME_OMIT: c_long = -2; -pub const UTIME_NOW: c_long = -1; - -pub const B460800: crate::speed_t = 460800; -pub const B921600: crate::speed_t = 921600; - -pub const AT_FDCWD: c_int = -100; -pub const AT_EACCESS: c_int = 0x100; -pub const AT_SYMLINK_NOFOLLOW: c_int = 0x200; -pub const AT_SYMLINK_FOLLOW: c_int = 0x400; -pub const AT_REMOVEDIR: c_int = 0x800; -pub const AT_RESOLVE_BENEATH: c_int = 0x2000; -pub const AT_EMPTY_PATH: c_int = 0x4000; - -pub const AT_NULL: c_int = 0; -pub const AT_IGNORE: c_int = 1; -pub const AT_EXECFD: c_int = 2; -pub const AT_PHDR: c_int = 3; -pub const AT_PHENT: c_int = 4; -pub const AT_PHNUM: c_int = 5; -pub const AT_PAGESZ: c_int = 6; -pub const AT_BASE: c_int = 7; -pub const AT_FLAGS: c_int = 8; -pub const AT_ENTRY: c_int = 9; -pub const AT_NOTELF: c_int = 10; -pub const AT_UID: c_int = 11; -pub const AT_EUID: c_int = 12; -pub const AT_GID: c_int = 13; -pub const AT_EGID: c_int = 14; -pub const AT_EXECPATH: c_int = 15; -pub const AT_CANARY: c_int = 16; -pub const AT_OSRELDATE: c_int = 18; -pub const AT_NCPUS: c_int = 19; -pub const AT_PAGESIZES: c_int = 20; -pub const AT_TIMEKEEP: c_int = 22; -pub const AT_HWCAP: c_int = 25; -pub const AT_HWCAP2: c_int = 26; -pub const AT_USRSTACKBASE: c_int = 35; -pub const AT_USRSTACKLIM: c_int = 36; - -pub const TABDLY: crate::tcflag_t = 0x00000004; -pub const TAB0: crate::tcflag_t = 0x00000000; -pub const TAB3: crate::tcflag_t = 0x00000004; - -pub const _PC_ACL_NFS4: c_int = 64; - -pub const _SC_CPUSET_SIZE: c_int = 122; - -pub const _UUID_NODE_LEN: usize = 6; - -// Flags which can be passed to pdfork(2) -pub const PD_DAEMON: c_int = 0x00000001; -pub const PD_CLOEXEC: c_int = 0x00000002; -pub const PD_ALLOWED_AT_FORK: c_int = PD_DAEMON | PD_CLOEXEC; - -// Values for struct rtprio (type_ field) -pub const RTP_PRIO_REALTIME: c_ushort = 2; -pub const RTP_PRIO_NORMAL: c_ushort = 3; -pub const RTP_PRIO_IDLE: c_ushort = 4; - -// Flags for chflags(2) -pub const UF_SYSTEM: c_ulong = 0x00000080; -pub const UF_SPARSE: c_ulong = 0x00000100; -pub const UF_OFFLINE: c_ulong = 0x00000200; -pub const UF_REPARSE: c_ulong = 0x00000400; -pub const UF_ARCHIVE: c_ulong = 0x00000800; -pub const UF_READONLY: c_ulong = 0x00001000; -pub const UF_HIDDEN: c_ulong = 0x00008000; -pub const SF_SNAPSHOT: c_ulong = 0x00200000; - -// fcntl commands -pub const F_ADD_SEALS: c_int = 19; -pub const F_GET_SEALS: c_int = 20; -pub const F_OGETLK: c_int = 7; -pub const F_OSETLK: c_int = 8; -pub const F_OSETLKW: c_int = 9; -pub const F_RDAHEAD: c_int = 16; -pub const F_READAHEAD: c_int = 15; -pub const F_SETLK_REMOTE: c_int = 14; -pub const F_KINFO: c_int = 22; - -// for use with F_ADD_SEALS -pub const F_SEAL_GROW: c_int = 4; -pub const F_SEAL_SEAL: c_int = 1; -pub const F_SEAL_SHRINK: c_int = 2; -pub const F_SEAL_WRITE: c_int = 8; - -// for use with fspacectl -pub const SPACECTL_DEALLOC: c_int = 1; - -// For realhostname* api -pub const HOSTNAME_FOUND: c_int = 0; -pub const HOSTNAME_INCORRECTNAME: c_int = 1; -pub const HOSTNAME_INVALIDADDR: c_int = 2; -pub const HOSTNAME_INVALIDNAME: c_int = 3; - -// For rfork -pub const RFFDG: c_int = 4; -pub const RFPROC: c_int = 16; -pub const RFMEM: c_int = 32; -pub const RFNOWAIT: c_int = 64; -pub const RFCFDG: c_int = 4096; -pub const RFTHREAD: c_int = 8192; -pub const RFSIGSHARE: c_int = 16384; -pub const RFLINUXTHPN: c_int = 65536; -pub const RFTSIGZMB: c_int = 524288; -pub const RFSPAWN: c_int = 2147483648; - -// For eventfd -pub const EFD_SEMAPHORE: c_int = 0x1; -pub const EFD_NONBLOCK: c_int = 0x4; -pub const EFD_CLOEXEC: c_int = 0x100000; - -pub const MALLOCX_ZERO: c_int = 0x40; - -/// size of returned wchan message -pub const WMESGLEN: usize = 8; -/// size of returned lock name -pub const LOCKNAMELEN: usize = 8; -/// size of returned thread name -pub const TDNAMLEN: usize = 16; -/// size of returned ki_comm name -pub const COMMLEN: usize = 19; -/// size of returned ki_emul -pub const KI_EMULNAMELEN: usize = 16; -/// number of groups in ki_groups -pub const KI_NGROUPS: usize = 16; -cfg_if! { - if #[cfg(freebsd11)] { - pub const KI_NSPARE_INT: usize = 4; - } else { - pub const KI_NSPARE_INT: usize = 2; - } -} -pub const KI_NSPARE_LONG: usize = 12; -/// Flags for the process credential. -pub const KI_CRF_CAPABILITY_MODE: usize = 0x00000001; -/// Steal a bit from ki_cr_flags to indicate that the cred had more than -/// KI_NGROUPS groups. -pub const KI_CRF_GRP_OVERFLOW: usize = 0x80000000; -/// controlling tty vnode active -pub const KI_CTTY: usize = 0x00000001; -/// session leader -pub const KI_SLEADER: usize = 0x00000002; -/// proc blocked on lock ki_lockname -pub const KI_LOCKBLOCK: usize = 0x00000004; -/// size of returned ki_login -pub const LOGNAMELEN: usize = 17; -/// size of returned ki_loginclass -pub const LOGINCLASSLEN: usize = 17; - -pub const KF_ATTR_VALID: c_int = 0x0001; -pub const KF_TYPE_NONE: c_int = 0; -pub const KF_TYPE_VNODE: c_int = 1; -pub const KF_TYPE_SOCKET: c_int = 2; -pub const KF_TYPE_PIPE: c_int = 3; -pub const KF_TYPE_FIFO: c_int = 4; -pub const KF_TYPE_KQUEUE: c_int = 5; -pub const KF_TYPE_MQUEUE: c_int = 7; -pub const KF_TYPE_SHM: c_int = 8; -pub const KF_TYPE_SEM: c_int = 9; -pub const KF_TYPE_PTS: c_int = 10; -pub const KF_TYPE_PROCDESC: c_int = 11; -pub const KF_TYPE_DEV: c_int = 12; -pub const KF_TYPE_UNKNOWN: c_int = 255; - -pub const KF_VTYPE_VNON: c_int = 0; -pub const KF_VTYPE_VREG: c_int = 1; -pub const KF_VTYPE_VDIR: c_int = 2; -pub const KF_VTYPE_VBLK: c_int = 3; -pub const KF_VTYPE_VCHR: c_int = 4; -pub const KF_VTYPE_VLNK: c_int = 5; -pub const KF_VTYPE_VSOCK: c_int = 6; -pub const KF_VTYPE_VFIFO: c_int = 7; -pub const KF_VTYPE_VBAD: c_int = 8; -pub const KF_VTYPE_UNKNOWN: c_int = 255; - -/// Current working directory -pub const KF_FD_TYPE_CWD: c_int = -1; -/// Root directory -pub const KF_FD_TYPE_ROOT: c_int = -2; -/// Jail directory -pub const KF_FD_TYPE_JAIL: c_int = -3; -/// Ktrace vnode -pub const KF_FD_TYPE_TRACE: c_int = -4; -pub const KF_FD_TYPE_TEXT: c_int = -5; -/// Controlling terminal -pub const KF_FD_TYPE_CTTY: c_int = -6; -pub const KF_FLAG_READ: c_int = 0x00000001; -pub const KF_FLAG_WRITE: c_int = 0x00000002; -pub const KF_FLAG_APPEND: c_int = 0x00000004; -pub const KF_FLAG_ASYNC: c_int = 0x00000008; -pub const KF_FLAG_FSYNC: c_int = 0x00000010; -pub const KF_FLAG_NONBLOCK: c_int = 0x00000020; -pub const KF_FLAG_DIRECT: c_int = 0x00000040; -pub const KF_FLAG_HASLOCK: c_int = 0x00000080; -pub const KF_FLAG_SHLOCK: c_int = 0x00000100; -pub const KF_FLAG_EXLOCK: c_int = 0x00000200; -pub const KF_FLAG_NOFOLLOW: c_int = 0x00000400; -pub const KF_FLAG_CREAT: c_int = 0x00000800; -pub const KF_FLAG_TRUNC: c_int = 0x00001000; -pub const KF_FLAG_EXCL: c_int = 0x00002000; -pub const KF_FLAG_EXEC: c_int = 0x00004000; - -pub const KVME_TYPE_NONE: c_int = 0; -pub const KVME_TYPE_DEFAULT: c_int = 1; -pub const KVME_TYPE_VNODE: c_int = 2; -pub const KVME_TYPE_SWAP: c_int = 3; -pub const KVME_TYPE_DEVICE: c_int = 4; -pub const KVME_TYPE_PHYS: c_int = 5; -pub const KVME_TYPE_DEAD: c_int = 6; -pub const KVME_TYPE_SG: c_int = 7; -pub const KVME_TYPE_MGTDEVICE: c_int = 8; -// Present in `sys/user.h` but is undefined for whatever reason... -// pub const KVME_TYPE_GUARD: c_int = 9; -pub const KVME_TYPE_UNKNOWN: c_int = 255; -pub const KVME_PROT_READ: c_int = 0x00000001; -pub const KVME_PROT_WRITE: c_int = 0x00000002; -pub const KVME_PROT_EXEC: c_int = 0x00000004; -pub const KVME_FLAG_COW: c_int = 0x00000001; -pub const KVME_FLAG_NEEDS_COPY: c_int = 0x00000002; -pub const KVME_FLAG_NOCOREDUMP: c_int = 0x00000004; -pub const KVME_FLAG_SUPER: c_int = 0x00000008; -pub const KVME_FLAG_GROWS_UP: c_int = 0x00000010; -pub const KVME_FLAG_GROWS_DOWN: c_int = 0x00000020; -pub const KVME_FLAG_USER_WIRED: c_int = 0x00000040; - -pub const KKST_MAXLEN: c_int = 1024; -/// Stack is valid. -pub const KKST_STATE_STACKOK: c_int = 0; -/// Stack swapped out. -pub const KKST_STATE_SWAPPED: c_int = 1; -pub const KKST_STATE_RUNNING: c_int = 2; - -// Constants about priority. -pub const PRI_MIN: c_int = 0; -pub const PRI_MAX: c_int = 255; -pub const PRI_MIN_ITHD: c_int = PRI_MIN; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -#[allow(deprecated)] -pub const PRI_MAX_ITHD: c_int = PRI_MIN_REALTIME - 1; -pub const PI_REALTIME: c_int = PRI_MIN_ITHD + 0; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const PI_AV: c_int = PRI_MIN_ITHD + 4; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const PI_NET: c_int = PRI_MIN_ITHD + 8; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const PI_DISK: c_int = PRI_MIN_ITHD + 12; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const PI_TTY: c_int = PRI_MIN_ITHD + 16; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const PI_DULL: c_int = PRI_MIN_ITHD + 20; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const PI_SOFT: c_int = PRI_MIN_ITHD + 24; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const PRI_MIN_REALTIME: c_int = 48; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -#[allow(deprecated)] -pub const PRI_MAX_REALTIME: c_int = PRI_MIN_KERN - 1; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const PRI_MIN_KERN: c_int = 80; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -#[allow(deprecated)] -pub const PRI_MAX_KERN: c_int = PRI_MIN_TIMESHARE - 1; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -#[allow(deprecated)] -pub const PSWP: c_int = PRI_MIN_KERN + 0; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -#[allow(deprecated)] -pub const PVM: c_int = PRI_MIN_KERN + 4; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -#[allow(deprecated)] -pub const PINOD: c_int = PRI_MIN_KERN + 8; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -#[allow(deprecated)] -pub const PRIBIO: c_int = PRI_MIN_KERN + 12; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -#[allow(deprecated)] -pub const PVFS: c_int = PRI_MIN_KERN + 16; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -#[allow(deprecated)] -pub const PZERO: c_int = PRI_MIN_KERN + 20; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -#[allow(deprecated)] -pub const PSOCK: c_int = PRI_MIN_KERN + 24; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -#[allow(deprecated)] -pub const PWAIT: c_int = PRI_MIN_KERN + 28; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -#[allow(deprecated)] -pub const PLOCK: c_int = PRI_MIN_KERN + 32; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -#[allow(deprecated)] -pub const PPAUSE: c_int = PRI_MIN_KERN + 36; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const PRI_MIN_TIMESHARE: c_int = 120; -pub const PRI_MAX_TIMESHARE: c_int = PRI_MIN_IDLE - 1; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -#[allow(deprecated)] -pub const PUSER: c_int = PRI_MIN_TIMESHARE; -pub const PRI_MIN_IDLE: c_int = 224; -pub const PRI_MAX_IDLE: c_int = PRI_MAX; - -pub const NZERO: c_int = 0; - -// Resource utilization information. -pub const RUSAGE_THREAD: c_int = 1; - -cfg_if! { - if #[cfg(any(freebsd11, target_pointer_width = "32"))] { - pub const ARG_MAX: c_int = 256 * 1024; - } else { - pub const ARG_MAX: c_int = 2 * 256 * 1024; - } -} -pub const CHILD_MAX: c_int = 40; -/// max command name remembered -pub const MAXCOMLEN: usize = 19; -/// max interpreter file name length -pub const MAXINTERP: c_int = crate::PATH_MAX; -/// max login name length (incl. NUL) -pub const MAXLOGNAME: c_int = 33; -/// max simultaneous processes -pub const MAXUPRC: c_int = CHILD_MAX; -/// max bytes for an exec function -pub const NCARGS: c_int = ARG_MAX; -/// /* max number groups -pub const NGROUPS: c_int = NGROUPS_MAX + 1; -/// max open files per process -pub const NOFILE: c_int = OPEN_MAX; -/// marker for empty group set member -pub const NOGROUP: c_int = 65535; -/// max hostname size -pub const MAXHOSTNAMELEN: c_int = 256; -/// max bytes in term canon input line -pub const MAX_CANON: c_int = 255; -/// max bytes in terminal input -pub const MAX_INPUT: c_int = 255; -/// max bytes in a file name -pub const NAME_MAX: c_int = 255; -pub const MAXSYMLINKS: c_int = 32; -/// max supplemental group id's -pub const NGROUPS_MAX: c_int = 1023; -/// max open files per process -pub const OPEN_MAX: c_int = 64; - -pub const _POSIX_ARG_MAX: c_int = 4096; -pub const _POSIX_LINK_MAX: c_int = 8; -pub const _POSIX_MAX_CANON: c_int = 255; -pub const _POSIX_MAX_INPUT: c_int = 255; -pub const _POSIX_NAME_MAX: c_int = 14; -pub const _POSIX_PIPE_BUF: c_int = 512; -pub const _POSIX_SSIZE_MAX: c_int = 32767; -pub const _POSIX_STREAM_MAX: c_int = 8; - -/// max ibase/obase values in bc(1) -pub const BC_BASE_MAX: c_int = 99; -/// max array elements in bc(1) -pub const BC_DIM_MAX: c_int = 2048; -/// max scale value in bc(1) -pub const BC_SCALE_MAX: c_int = 99; -/// max const string length in bc(1) -pub const BC_STRING_MAX: c_int = 1000; -/// max character class name size -pub const CHARCLASS_NAME_MAX: c_int = 14; -/// max weights for order keyword -pub const COLL_WEIGHTS_MAX: c_int = 10; -/// max expressions nested in expr(1) -pub const EXPR_NEST_MAX: c_int = 32; -/// max bytes in an input line -pub const LINE_MAX: c_int = 2048; -/// max RE's in interval notation -pub const RE_DUP_MAX: c_int = 255; - -pub const _POSIX2_BC_BASE_MAX: c_int = 99; -pub const _POSIX2_BC_DIM_MAX: c_int = 2048; -pub const _POSIX2_BC_SCALE_MAX: c_int = 99; -pub const _POSIX2_BC_STRING_MAX: c_int = 1000; -pub const _POSIX2_CHARCLASS_NAME_MAX: c_int = 14; -pub const _POSIX2_COLL_WEIGHTS_MAX: c_int = 2; -pub const _POSIX2_EQUIV_CLASS_MAX: c_int = 2; -pub const _POSIX2_EXPR_NEST_MAX: c_int = 32; -pub const _POSIX2_LINE_MAX: c_int = 2048; -pub const _POSIX2_RE_DUP_MAX: c_int = 255; - -// sys/proc.h -pub const TDF_BORROWING: c_int = 0x00000001; -pub const TDF_INPANIC: c_int = 0x00000002; -pub const TDF_INMEM: c_int = 0x00000004; -pub const TDF_SINTR: c_int = 0x00000008; -pub const TDF_TIMEOUT: c_int = 0x00000010; -pub const TDF_IDLETD: c_int = 0x00000020; -pub const TDF_CANSWAP: c_int = 0x00000040; -pub const TDF_KTH_SUSP: c_int = 0x00000100; -pub const TDF_ALLPROCSUSP: c_int = 0x00000200; -pub const TDF_BOUNDARY: c_int = 0x00000400; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const TDF_ASTPENDING: c_int = 0x00000800; -pub const TDF_SBDRY: c_int = 0x00002000; -pub const TDF_UPIBLOCKED: c_int = 0x00004000; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const TDF_NEEDSUSPCHK: c_int = 0x00008000; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const TDF_NEEDRESCHED: c_int = 0x00010000; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const TDF_NEEDSIGCHK: c_int = 0x00020000; -pub const TDF_NOLOAD: c_int = 0x00040000; -pub const TDF_SERESTART: c_int = 0x00080000; -pub const TDF_THRWAKEUP: c_int = 0x00100000; -pub const TDF_SEINTR: c_int = 0x00200000; -pub const TDF_SWAPINREQ: c_int = 0x00400000; -#[deprecated(since = "0.2.133", note = "Removed in FreeBSD 14")] -pub const TDF_UNUSED23: c_int = 0x00800000; -pub const TDF_SCHED0: c_int = 0x01000000; -pub const TDF_SCHED1: c_int = 0x02000000; -pub const TDF_SCHED2: c_int = 0x04000000; -pub const TDF_SCHED3: c_int = 0x08000000; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const TDF_ALRMPEND: c_int = 0x10000000; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const TDF_PROFPEND: c_int = 0x20000000; -#[deprecated(since = "0.2.133", note = "Not stable across OS versions")] -pub const TDF_MACPEND: c_int = 0x40000000; - -pub const TDB_SUSPEND: c_int = 0x00000001; -pub const TDB_XSIG: c_int = 0x00000002; -pub const TDB_USERWR: c_int = 0x00000004; -pub const TDB_SCE: c_int = 0x00000008; -pub const TDB_SCX: c_int = 0x00000010; -pub const TDB_EXEC: c_int = 0x00000020; -pub const TDB_FORK: c_int = 0x00000040; -pub const TDB_STOPATFORK: c_int = 0x00000080; -pub const TDB_CHILD: c_int = 0x00000100; -pub const TDB_BORN: c_int = 0x00000200; -pub const TDB_EXIT: c_int = 0x00000400; -pub const TDB_VFORK: c_int = 0x00000800; -pub const TDB_FSTP: c_int = 0x00001000; -pub const TDB_STEP: c_int = 0x00002000; - -pub const TDP_OLDMASK: c_int = 0x00000001; -pub const TDP_INKTR: c_int = 0x00000002; -pub const TDP_INKTRACE: c_int = 0x00000004; -pub const TDP_BUFNEED: c_int = 0x00000008; -pub const TDP_COWINPROGRESS: c_int = 0x00000010; -pub const TDP_ALTSTACK: c_int = 0x00000020; -pub const TDP_DEADLKTREAT: c_int = 0x00000040; -pub const TDP_NOFAULTING: c_int = 0x00000080; -pub const TDP_OWEUPC: c_int = 0x00000200; -pub const TDP_ITHREAD: c_int = 0x00000400; -pub const TDP_SYNCIO: c_int = 0x00000800; -pub const TDP_SCHED1: c_int = 0x00001000; -pub const TDP_SCHED2: c_int = 0x00002000; -pub const TDP_SCHED3: c_int = 0x00004000; -pub const TDP_SCHED4: c_int = 0x00008000; -pub const TDP_GEOM: c_int = 0x00010000; -pub const TDP_SOFTDEP: c_int = 0x00020000; -pub const TDP_NORUNNINGBUF: c_int = 0x00040000; -pub const TDP_WAKEUP: c_int = 0x00080000; -pub const TDP_INBDFLUSH: c_int = 0x00100000; -pub const TDP_KTHREAD: c_int = 0x00200000; -pub const TDP_CALLCHAIN: c_int = 0x00400000; -pub const TDP_IGNSUSP: c_int = 0x00800000; -pub const TDP_AUDITREC: c_int = 0x01000000; -pub const TDP_RFPPWAIT: c_int = 0x02000000; -pub const TDP_RESETSPUR: c_int = 0x04000000; -pub const TDP_NERRNO: c_int = 0x08000000; -pub const TDP_EXECVMSPC: c_int = 0x40000000; - -pub const TDI_SUSPENDED: c_int = 0x0001; -pub const TDI_SLEEPING: c_int = 0x0002; -pub const TDI_SWAPPED: c_int = 0x0004; -pub const TDI_LOCK: c_int = 0x0008; -pub const TDI_IWAIT: c_int = 0x0010; - -pub const P_ADVLOCK: c_int = 0x00000001; -pub const P_CONTROLT: c_int = 0x00000002; -pub const P_KPROC: c_int = 0x00000004; -#[deprecated(since = "1.0", note = "Replaced in FreeBSD 15 by P_IDLEPROC")] -pub const P_UNUSED3: c_int = 0x00000008; -#[cfg(freebsd15)] -pub const P_IDLEPROC: c_int = 0x00000008; -pub const P_PPWAIT: c_int = 0x00000010; -pub const P_PROFIL: c_int = 0x00000020; -pub const P_STOPPROF: c_int = 0x00000040; -pub const P_HADTHREADS: c_int = 0x00000080; -pub const P_SUGID: c_int = 0x00000100; -pub const P_SYSTEM: c_int = 0x00000200; -pub const P_SINGLE_EXIT: c_int = 0x00000400; -pub const P_TRACED: c_int = 0x00000800; -pub const P_WAITED: c_int = 0x00001000; -pub const P_WEXIT: c_int = 0x00002000; -pub const P_EXEC: c_int = 0x00004000; -pub const P_WKILLED: c_int = 0x00008000; -pub const P_CONTINUED: c_int = 0x00010000; -pub const P_STOPPED_SIG: c_int = 0x00020000; -pub const P_STOPPED_TRACE: c_int = 0x00040000; -pub const P_STOPPED_SINGLE: c_int = 0x00080000; -pub const P_PROTECTED: c_int = 0x00100000; -pub const P_SIGEVENT: c_int = 0x00200000; -pub const P_SINGLE_BOUNDARY: c_int = 0x00400000; -pub const P_HWPMC: c_int = 0x00800000; -pub const P_JAILED: c_int = 0x01000000; -pub const P_TOTAL_STOP: c_int = 0x02000000; -pub const P_INEXEC: c_int = 0x04000000; -pub const P_STATCHILD: c_int = 0x08000000; -pub const P_INMEM: c_int = 0x10000000; -pub const P_SWAPPINGOUT: c_int = 0x20000000; -pub const P_SWAPPINGIN: c_int = 0x40000000; -pub const P_PPTRACE: c_int = 0x80000000; -pub const P_STOPPED: c_int = P_STOPPED_SIG | P_STOPPED_SINGLE | P_STOPPED_TRACE; - -pub const P2_INHERIT_PROTECTED: c_int = 0x00000001; -pub const P2_NOTRACE: c_int = 0x00000002; -pub const P2_NOTRACE_EXEC: c_int = 0x00000004; -pub const P2_AST_SU: c_int = 0x00000008; -pub const P2_PTRACE_FSTP: c_int = 0x00000010; -pub const P2_TRAPCAP: c_int = 0x00000020; -pub const P2_STKGAP_DISABLE: c_int = 0x00000800; -pub const P2_STKGAP_DISABLE_EXEC: c_int = 0x00001000; - -pub const P_TREE_ORPHANED: c_int = 0x00000001; -pub const P_TREE_FIRST_ORPHAN: c_int = 0x00000002; -pub const P_TREE_REAPER: c_int = 0x00000004; - -pub const SIDL: c_char = 1; -pub const SRUN: c_char = 2; -pub const SSLEEP: c_char = 3; -pub const SSTOP: c_char = 4; -pub const SZOMB: c_char = 5; -pub const SWAIT: c_char = 6; -pub const SLOCK: c_char = 7; - -pub const P_MAGIC: c_int = 0xbeefface; - -pub const TDP_SIGFASTBLOCK: c_int = 0x00000100; -pub const TDP_UIOHELD: c_int = 0x10000000; -pub const TDP_SIGFASTPENDING: c_int = 0x80000000; -pub const TDP2_COMPAT32RB: c_int = 0x00000002; -pub const P2_PROTMAX_ENABLE: c_int = 0x00000200; -pub const P2_PROTMAX_DISABLE: c_int = 0x00000400; -pub const TDP2_SBPAGES: c_int = 0x00000001; -pub const P2_ASLR_ENABLE: c_int = 0x00000040; -pub const P2_ASLR_DISABLE: c_int = 0x00000080; -pub const P2_ASLR_IGNSTART: c_int = 0x00000100; -pub const P_TREE_GRPEXITED: c_int = 0x00000008; - -// libprocstat.h -pub const PS_FST_VTYPE_VNON: c_int = 1; -pub const PS_FST_VTYPE_VREG: c_int = 2; -pub const PS_FST_VTYPE_VDIR: c_int = 3; -pub const PS_FST_VTYPE_VBLK: c_int = 4; -pub const PS_FST_VTYPE_VCHR: c_int = 5; -pub const PS_FST_VTYPE_VLNK: c_int = 6; -pub const PS_FST_VTYPE_VSOCK: c_int = 7; -pub const PS_FST_VTYPE_VFIFO: c_int = 8; -pub const PS_FST_VTYPE_VBAD: c_int = 9; -pub const PS_FST_VTYPE_UNKNOWN: c_int = 255; - -pub const PS_FST_TYPE_VNODE: c_int = 1; -pub const PS_FST_TYPE_FIFO: c_int = 2; -pub const PS_FST_TYPE_SOCKET: c_int = 3; -pub const PS_FST_TYPE_PIPE: c_int = 4; -pub const PS_FST_TYPE_PTS: c_int = 5; -pub const PS_FST_TYPE_KQUEUE: c_int = 6; -pub const PS_FST_TYPE_MQUEUE: c_int = 8; -pub const PS_FST_TYPE_SHM: c_int = 9; -pub const PS_FST_TYPE_SEM: c_int = 10; -pub const PS_FST_TYPE_UNKNOWN: c_int = 11; -pub const PS_FST_TYPE_NONE: c_int = 12; -pub const PS_FST_TYPE_PROCDESC: c_int = 13; -pub const PS_FST_TYPE_DEV: c_int = 14; -pub const PS_FST_TYPE_EVENTFD: c_int = 15; - -pub const PS_FST_UFLAG_RDIR: c_int = 0x0001; -pub const PS_FST_UFLAG_CDIR: c_int = 0x0002; -pub const PS_FST_UFLAG_JAIL: c_int = 0x0004; -pub const PS_FST_UFLAG_TRACE: c_int = 0x0008; -pub const PS_FST_UFLAG_TEXT: c_int = 0x0010; -pub const PS_FST_UFLAG_MMAP: c_int = 0x0020; -pub const PS_FST_UFLAG_CTTY: c_int = 0x0040; - -pub const PS_FST_FFLAG_READ: c_int = 0x0001; -pub const PS_FST_FFLAG_WRITE: c_int = 0x0002; -pub const PS_FST_FFLAG_NONBLOCK: c_int = 0x0004; -pub const PS_FST_FFLAG_APPEND: c_int = 0x0008; -pub const PS_FST_FFLAG_SHLOCK: c_int = 0x0010; -pub const PS_FST_FFLAG_EXLOCK: c_int = 0x0020; -pub const PS_FST_FFLAG_ASYNC: c_int = 0x0040; -pub const PS_FST_FFLAG_SYNC: c_int = 0x0080; -pub const PS_FST_FFLAG_NOFOLLOW: c_int = 0x0100; -pub const PS_FST_FFLAG_CREAT: c_int = 0x0200; -pub const PS_FST_FFLAG_TRUNC: c_int = 0x0400; -pub const PS_FST_FFLAG_EXCL: c_int = 0x0800; -pub const PS_FST_FFLAG_DIRECT: c_int = 0x1000; -pub const PS_FST_FFLAG_EXEC: c_int = 0x2000; -pub const PS_FST_FFLAG_HASLOCK: c_int = 0x4000; - -// sys/mount.h - -/// File identifier. -/// These are unique per filesystem on a single machine. -/// -/// Note that the offset of fid_data is 4 bytes, so care must be taken to avoid -/// undefined behavior accessing unaligned fields within an embedded struct. -pub const MAXFIDSZ: c_int = 16; -/// Length of type name including null. -pub const MFSNAMELEN: c_int = 16; -cfg_if! { - if #[cfg(any(freebsd10, freebsd11))] { - /// Size of on/from name bufs. - pub const MNAMELEN: c_int = 88; - } else { - /// Size of on/from name bufs. - pub const MNAMELEN: c_int = 1024; - } -} - -/// Using journaled soft updates. -pub const MNT_SUJ: u64 = 0x100000000; -/// Mounted by automountd(8). -pub const MNT_AUTOMOUNTED: u64 = 0x200000000; -/// Filesys metadata untrusted. -pub const MNT_UNTRUSTED: u64 = 0x800000000; - -/// Require TLS. -pub const MNT_EXTLS: u64 = 0x4000000000; -/// Require TLS with client cert. -pub const MNT_EXTLSCERT: u64 = 0x8000000000; -/// Require TLS with user cert. -pub const MNT_EXTLSCERTUSER: u64 = 0x10000000000; - -/// Filesystem is stored locally. -pub const MNT_LOCAL: u64 = 0x000001000; -/// Quotas are enabled on fs. -pub const MNT_QUOTA: u64 = 0x000002000; -/// Identifies the root fs. -pub const MNT_ROOTFS: u64 = 0x000004000; -/// Mounted by a user. -pub const MNT_USER: u64 = 0x000008000; -/// Do not show entry in df. -pub const MNT_IGNORE: u64 = 0x000800000; -/// Filesystem is verified. -pub const MNT_VERIFIED: u64 = 0x400000000; - -/// Do not cover a mount point. -pub const MNT_NOCOVER: u64 = 0x001000000000; -/// Only mount on empty dir. -pub const MNT_EMPTYDIR: u64 = 0x002000000000; -/// Recursively unmount uppers. -pub const MNT_RECURSE: u64 = 0x100000000000; -/// Unmount in async context. -pub const MNT_DEFERRED: u64 = 0x200000000000; - -/// Get configured filesystems. -pub const VFS_VFSCONF: c_int = 0; -/// Generic filesystem information. -pub const VFS_GENERIC: c_int = 0; - -/// int: highest defined filesystem type. -pub const VFS_MAXTYPENUM: c_int = 1; -/// struct: vfsconf for filesystem given as next argument. -pub const VFS_CONF: c_int = 2; - -/// Synchronously wait for I/O to complete. -pub const MNT_WAIT: c_int = 1; -/// Start all I/O, but do not wait for it. -pub const MNT_NOWAIT: c_int = 2; -/// Push data not written by filesystem syncer. -pub const MNT_LAZY: c_int = 3; -/// Suspend file system after sync. -pub const MNT_SUSPEND: c_int = 4; - -pub const MAXSECFLAVORS: c_int = 5; - -/// Statically compiled into kernel. -pub const VFCF_STATIC: c_int = 0x00010000; -/// May get data over the network. -pub const VFCF_NETWORK: c_int = 0x00020000; -/// Writes are not implemented. -pub const VFCF_READONLY: c_int = 0x00040000; -/// Data does not represent real files. -pub const VFCF_SYNTHETIC: c_int = 0x00080000; -/// Aliases some other mounted FS. -pub const VFCF_LOOPBACK: c_int = 0x00100000; -/// Stores file names as Unicode. -pub const VFCF_UNICODE: c_int = 0x00200000; -/// Can be mounted from within a jail. -pub const VFCF_JAIL: c_int = 0x00400000; -/// Supports delegated administration. -pub const VFCF_DELEGADMIN: c_int = 0x00800000; -/// Stop at Boundary: defer stop requests to kernel->user (AST) transition. -pub const VFCF_SBDRY: c_int = 0x01000000; - -// time.h - -/// not on dst -pub const DST_NONE: c_int = 0; -/// USA style dst -pub const DST_USA: c_int = 1; -/// Australian style dst -pub const DST_AUST: c_int = 2; -/// Western European dst -pub const DST_WET: c_int = 3; -/// Middle European dst -pub const DST_MET: c_int = 4; -/// Eastern European dst -pub const DST_EET: c_int = 5; -/// Canada -pub const DST_CAN: c_int = 6; - -pub const CPUCLOCK_WHICH_PID: c_int = 0; -pub const CPUCLOCK_WHICH_TID: c_int = 1; - -pub const MFD_CLOEXEC: c_uint = 0x00000001; -pub const MFD_ALLOW_SEALING: c_uint = 0x00000002; -pub const MFD_HUGETLB: c_uint = 0x00000004; -pub const MFD_HUGE_MASK: c_uint = 0xFC000000; -pub const MFD_HUGE_64KB: c_uint = 16 << 26; -pub const MFD_HUGE_512KB: c_uint = 19 << 26; -pub const MFD_HUGE_1MB: c_uint = 20 << 26; -pub const MFD_HUGE_2MB: c_uint = 21 << 26; -pub const MFD_HUGE_8MB: c_uint = 23 << 26; -pub const MFD_HUGE_16MB: c_uint = 24 << 26; -pub const MFD_HUGE_32MB: c_uint = 25 << 26; -pub const MFD_HUGE_256MB: c_uint = 28 << 26; -pub const MFD_HUGE_512MB: c_uint = 29 << 26; -pub const MFD_HUGE_1GB: c_uint = 30 << 26; -pub const MFD_HUGE_2GB: c_uint = 31 << 26; -pub const MFD_HUGE_16GB: c_uint = 34 << 26; - -pub const SHM_LARGEPAGE_ALLOC_DEFAULT: c_int = 0; -pub const SHM_LARGEPAGE_ALLOC_NOWAIT: c_int = 1; -pub const SHM_LARGEPAGE_ALLOC_HARD: c_int = 2; -pub const SHM_RENAME_NOREPLACE: c_int = 1 << 0; -pub const SHM_RENAME_EXCHANGE: c_int = 1 << 1; - -// sys/umtx.h - -pub const UMTX_OP_WAIT: c_int = 2; -pub const UMTX_OP_WAKE: c_int = 3; -pub const UMTX_OP_MUTEX_TRYLOCK: c_int = 4; -pub const UMTX_OP_MUTEX_LOCK: c_int = 5; -pub const UMTX_OP_MUTEX_UNLOCK: c_int = 6; -pub const UMTX_OP_SET_CEILING: c_int = 7; -pub const UMTX_OP_CV_WAIT: c_int = 8; -pub const UMTX_OP_CV_SIGNAL: c_int = 9; -pub const UMTX_OP_CV_BROADCAST: c_int = 10; -pub const UMTX_OP_WAIT_UINT: c_int = 11; -pub const UMTX_OP_RW_RDLOCK: c_int = 12; -pub const UMTX_OP_RW_WRLOCK: c_int = 13; -pub const UMTX_OP_RW_UNLOCK: c_int = 14; -pub const UMTX_OP_WAIT_UINT_PRIVATE: c_int = 15; -pub const UMTX_OP_WAKE_PRIVATE: c_int = 16; -pub const UMTX_OP_MUTEX_WAIT: c_int = 17; -pub const UMTX_OP_NWAKE_PRIVATE: c_int = 21; -pub const UMTX_OP_MUTEX_WAKE2: c_int = 22; -pub const UMTX_OP_SEM2_WAIT: c_int = 23; -pub const UMTX_OP_SEM2_WAKE: c_int = 24; -pub const UMTX_OP_SHM: c_int = 25; -pub const UMTX_OP_ROBUST_LISTS: c_int = 26; - -pub const UMTX_ABSTIME: u32 = 1; - -pub const CPU_LEVEL_ROOT: c_int = 1; -pub const CPU_LEVEL_CPUSET: c_int = 2; -pub const CPU_LEVEL_WHICH: c_int = 3; - -pub const CPU_WHICH_TID: c_int = 1; -pub const CPU_WHICH_PID: c_int = 2; -pub const CPU_WHICH_CPUSET: c_int = 3; -pub const CPU_WHICH_IRQ: c_int = 4; -pub const CPU_WHICH_JAIL: c_int = 5; - -// net/route.h -pub const RTF_LLDATA: c_int = 0x400; -pub const RTF_FIXEDMTU: c_int = 0x80000; - -pub const RTM_VERSION: c_int = 5; - -pub const RTAX_MAX: c_int = 8; - -// sys/signal.h -pub const SIGTHR: c_int = 32; -pub const SIGLWP: c_int = SIGTHR; -pub const SIGLIBRT: c_int = 33; - -// netinet/sctp.h -pub const SCTP_FUTURE_ASSOC: c_int = 0; -pub const SCTP_CURRENT_ASSOC: c_int = 1; -pub const SCTP_ALL_ASSOC: c_int = 2; - -pub const SCTP_NO_NEXT_MSG: c_int = 0x0000; -pub const SCTP_NEXT_MSG_AVAIL: c_int = 0x0001; -pub const SCTP_NEXT_MSG_ISCOMPLETE: c_int = 0x0002; -pub const SCTP_NEXT_MSG_IS_UNORDERED: c_int = 0x0004; -pub const SCTP_NEXT_MSG_IS_NOTIFICATION: c_int = 0x0008; - -pub const SCTP_RECVV_NOINFO: c_int = 0; -pub const SCTP_RECVV_RCVINFO: c_int = 1; -pub const SCTP_RECVV_NXTINFO: c_int = 2; -pub const SCTP_RECVV_RN: c_int = 3; - -pub const SCTP_SENDV_NOINFO: c_int = 0; -pub const SCTP_SENDV_SNDINFO: c_int = 1; -pub const SCTP_SENDV_PRINFO: c_int = 2; -pub const SCTP_SENDV_AUTHINFO: c_int = 3; -pub const SCTP_SENDV_SPA: c_int = 4; - -pub const SCTP_SEND_SNDINFO_VALID: c_int = 0x00000001; -pub const SCTP_SEND_PRINFO_VALID: c_int = 0x00000002; -pub const SCTP_SEND_AUTHINFO_VALID: c_int = 0x00000004; - -pub const SCTP_NOTIFICATION: c_int = 0x0010; -pub const SCTP_COMPLETE: c_int = 0x0020; -pub const SCTP_EOF: c_int = 0x0100; -pub const SCTP_ABORT: c_int = 0x0200; -pub const SCTP_UNORDERED: c_int = 0x0400; -pub const SCTP_ADDR_OVER: c_int = 0x0800; -pub const SCTP_SENDALL: c_int = 0x1000; -pub const SCTP_EOR: c_int = 0x2000; -pub const SCTP_SACK_IMMEDIATELY: c_int = 0x4000; -pub const SCTP_PR_SCTP_NONE: c_int = 0x0000; -pub const SCTP_PR_SCTP_TTL: c_int = 0x0001; -pub const SCTP_PR_SCTP_PRIO: c_int = 0x0002; -pub const SCTP_PR_SCTP_BUF: c_int = SCTP_PR_SCTP_PRIO; -pub const SCTP_PR_SCTP_RTX: c_int = 0x0003; -pub const SCTP_PR_SCTP_MAX: c_int = SCTP_PR_SCTP_RTX; -pub const SCTP_PR_SCTP_ALL: c_int = 0x000f; - -pub const SCTP_INIT: c_int = 0x0001; -pub const SCTP_SNDRCV: c_int = 0x0002; -pub const SCTP_EXTRCV: c_int = 0x0003; -pub const SCTP_SNDINFO: c_int = 0x0004; -pub const SCTP_RCVINFO: c_int = 0x0005; -pub const SCTP_NXTINFO: c_int = 0x0006; -pub const SCTP_PRINFO: c_int = 0x0007; -pub const SCTP_AUTHINFO: c_int = 0x0008; -pub const SCTP_DSTADDRV4: c_int = 0x0009; -pub const SCTP_DSTADDRV6: c_int = 0x000a; - -pub const SCTP_RTOINFO: c_int = 0x00000001; -pub const SCTP_ASSOCINFO: c_int = 0x00000002; -pub const SCTP_INITMSG: c_int = 0x00000003; -pub const SCTP_NODELAY: c_int = 0x00000004; -pub const SCTP_AUTOCLOSE: c_int = 0x00000005; -pub const SCTP_SET_PEER_PRIMARY_ADDR: c_int = 0x00000006; -pub const SCTP_PRIMARY_ADDR: c_int = 0x00000007; -pub const SCTP_ADAPTATION_LAYER: c_int = 0x00000008; -pub const SCTP_ADAPTION_LAYER: c_int = 0x00000008; -pub const SCTP_DISABLE_FRAGMENTS: c_int = 0x00000009; -pub const SCTP_PEER_ADDR_PARAMS: c_int = 0x0000000a; -pub const SCTP_DEFAULT_SEND_PARAM: c_int = 0x0000000b; -pub const SCTP_EVENTS: c_int = 0x0000000c; -pub const SCTP_I_WANT_MAPPED_V4_ADDR: c_int = 0x0000000d; -pub const SCTP_MAXSEG: c_int = 0x0000000e; -pub const SCTP_DELAYED_SACK: c_int = 0x0000000f; -pub const SCTP_FRAGMENT_INTERLEAVE: c_int = 0x00000010; -pub const SCTP_PARTIAL_DELIVERY_POINT: c_int = 0x00000011; -pub const SCTP_AUTH_CHUNK: c_int = 0x00000012; -pub const SCTP_AUTH_KEY: c_int = 0x00000013; -pub const SCTP_HMAC_IDENT: c_int = 0x00000014; -pub const SCTP_AUTH_ACTIVE_KEY: c_int = 0x00000015; -pub const SCTP_AUTH_DELETE_KEY: c_int = 0x00000016; -pub const SCTP_USE_EXT_RCVINFO: c_int = 0x00000017; -pub const SCTP_AUTO_ASCONF: c_int = 0x00000018; -pub const SCTP_MAXBURST: c_int = 0x00000019; -pub const SCTP_MAX_BURST: c_int = 0x00000019; -pub const SCTP_CONTEXT: c_int = 0x0000001a; -pub const SCTP_EXPLICIT_EOR: c_int = 0x00000001b; -pub const SCTP_REUSE_PORT: c_int = 0x00000001c; -pub const SCTP_AUTH_DEACTIVATE_KEY: c_int = 0x00000001d; -pub const SCTP_EVENT: c_int = 0x0000001e; -pub const SCTP_RECVRCVINFO: c_int = 0x0000001f; -pub const SCTP_RECVNXTINFO: c_int = 0x00000020; -pub const SCTP_DEFAULT_SNDINFO: c_int = 0x00000021; -pub const SCTP_DEFAULT_PRINFO: c_int = 0x00000022; -pub const SCTP_PEER_ADDR_THLDS: c_int = 0x00000023; -pub const SCTP_REMOTE_UDP_ENCAPS_PORT: c_int = 0x00000024; -pub const SCTP_ECN_SUPPORTED: c_int = 0x00000025; -pub const SCTP_AUTH_SUPPORTED: c_int = 0x00000027; -pub const SCTP_ASCONF_SUPPORTED: c_int = 0x00000028; -pub const SCTP_RECONFIG_SUPPORTED: c_int = 0x00000029; -pub const SCTP_NRSACK_SUPPORTED: c_int = 0x00000030; -pub const SCTP_PKTDROP_SUPPORTED: c_int = 0x00000031; -pub const SCTP_MAX_CWND: c_int = 0x00000032; - -pub const SCTP_STATUS: c_int = 0x00000100; -pub const SCTP_GET_PEER_ADDR_INFO: c_int = 0x00000101; -pub const SCTP_PEER_AUTH_CHUNKS: c_int = 0x00000102; -pub const SCTP_LOCAL_AUTH_CHUNKS: c_int = 0x00000103; -pub const SCTP_GET_ASSOC_NUMBER: c_int = 0x00000104; -pub const SCTP_GET_ASSOC_ID_LIST: c_int = 0x00000105; -pub const SCTP_TIMEOUTS: c_int = 0x00000106; -pub const SCTP_PR_STREAM_STATUS: c_int = 0x00000107; -pub const SCTP_PR_ASSOC_STATUS: c_int = 0x00000108; - -pub const SCTP_COMM_UP: c_int = 0x0001; -pub const SCTP_COMM_LOST: c_int = 0x0002; -pub const SCTP_RESTART: c_int = 0x0003; -pub const SCTP_SHUTDOWN_COMP: c_int = 0x0004; -pub const SCTP_CANT_STR_ASSOC: c_int = 0x0005; - -pub const SCTP_ASSOC_SUPPORTS_PR: c_int = 0x01; -pub const SCTP_ASSOC_SUPPORTS_AUTH: c_int = 0x02; -pub const SCTP_ASSOC_SUPPORTS_ASCONF: c_int = 0x03; -pub const SCTP_ASSOC_SUPPORTS_MULTIBUF: c_int = 0x04; -pub const SCTP_ASSOC_SUPPORTS_RE_CONFIG: c_int = 0x05; -pub const SCTP_ASSOC_SUPPORTS_INTERLEAVING: c_int = 0x06; -pub const SCTP_ASSOC_SUPPORTS_MAX: c_int = 0x06; - -pub const SCTP_ADDR_AVAILABLE: c_int = 0x0001; -pub const SCTP_ADDR_UNREACHABLE: c_int = 0x0002; -pub const SCTP_ADDR_REMOVED: c_int = 0x0003; -pub const SCTP_ADDR_ADDED: c_int = 0x0004; -pub const SCTP_ADDR_MADE_PRIM: c_int = 0x0005; -pub const SCTP_ADDR_CONFIRMED: c_int = 0x0006; - -pub const SCTP_ACTIVE: c_int = 0x0001; -pub const SCTP_INACTIVE: c_int = 0x0002; -pub const SCTP_UNCONFIRMED: c_int = 0x0200; - -pub const SCTP_DATA_UNSENT: c_int = 0x0001; -pub const SCTP_DATA_SENT: c_int = 0x0002; - -pub const SCTP_PARTIAL_DELIVERY_ABORTED: c_int = 0x0001; - -pub const SCTP_AUTH_NEW_KEY: c_int = 0x0001; -pub const SCTP_AUTH_NEWKEY: c_int = SCTP_AUTH_NEW_KEY; -pub const SCTP_AUTH_NO_AUTH: c_int = 0x0002; -pub const SCTP_AUTH_FREE_KEY: c_int = 0x0003; - -pub const SCTP_STREAM_RESET_INCOMING_SSN: c_int = 0x0001; -pub const SCTP_STREAM_RESET_OUTGOING_SSN: c_int = 0x0002; -pub const SCTP_STREAM_RESET_DENIED: c_int = 0x0004; -pub const SCTP_STREAM_RESET_FAILED: c_int = 0x0008; - -pub const SCTP_ASSOC_RESET_DENIED: c_int = 0x0004; -pub const SCTP_ASSOC_RESET_FAILED: c_int = 0x0008; - -pub const SCTP_STREAM_CHANGE_DENIED: c_int = 0x0004; -pub const SCTP_STREAM_CHANGE_FAILED: c_int = 0x0008; - -pub const KENV_DUMP_LOADER: c_int = 4; -pub const KENV_DUMP_STATIC: c_int = 5; - -pub const RB_PAUSE: c_int = 0x100000; -pub const RB_REROOT: c_int = 0x200000; -pub const RB_POWERCYCLE: c_int = 0x400000; -pub const RB_PROBE: c_int = 0x10000000; -pub const RB_MULTIPLE: c_int = 0x20000000; - -// netinet/in_pcb.h -pub const INC_ISIPV6: c_uchar = 0x01; -pub const INC_IPV6MINMTU: c_uchar = 0x02; - -// sys/time.h -pub const CLOCK_BOOTTIME: crate::clockid_t = crate::CLOCK_UPTIME; -pub const CLOCK_REALTIME_COARSE: crate::clockid_t = crate::CLOCK_REALTIME_FAST; -pub const CLOCK_MONOTONIC_COARSE: crate::clockid_t = crate::CLOCK_MONOTONIC_FAST; - -// sys/timerfd.h - -pub const TFD_NONBLOCK: c_int = crate::O_NONBLOCK; -pub const TFD_CLOEXEC: c_int = O_CLOEXEC; -pub const TFD_TIMER_ABSTIME: c_int = 0x01; -pub const TFD_TIMER_CANCEL_ON_SET: c_int = 0x02; - -// sys/unistd.h - -pub const CLOSE_RANGE_CLOEXEC: c_uint = 1 << 2; - -pub const KCMP_FILE: c_int = 100; -pub const KCMP_FILEOBJ: c_int = 101; -pub const KCMP_FILES: c_int = 102; -pub const KCMP_SIGHAND: c_int = 103; -pub const KCMP_VM: c_int = 104; - -pub const fn MAP_ALIGNED(a: c_int) -> c_int { - a << 24 -} - -const fn _ALIGN(p: usize) -> usize { - (p + _ALIGNBYTES) & !_ALIGNBYTES -} - -f! { - pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { - (cmsg as *mut c_uchar).add(_ALIGN(size_of::())) - } - - pub const fn CMSG_LEN(length: c_uint) -> c_uint { - _ALIGN(size_of::()) as c_uint + length - } - - pub fn CMSG_NXTHDR(mhdr: *const crate::msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - if cmsg.is_null() { - return crate::CMSG_FIRSTHDR(mhdr); - } - let next = cmsg as usize + _ALIGN((*cmsg).cmsg_len as usize) + _ALIGN(size_of::()); - let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; - if next > max { - core::ptr::null_mut::() - } else { - (cmsg as usize + _ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr - } - } - - pub const fn CMSG_SPACE(length: c_uint) -> c_uint { - (_ALIGN(size_of::()) + _ALIGN(length as usize)) as c_uint - } - - pub fn MALLOCX_ALIGN(lg: c_uint) -> c_int { - ffsl(lg as c_long - 1) - } - - pub const fn MALLOCX_TCACHE(tc: c_int) -> c_int { - (tc + 2) << 8 as c_int - } - - pub const fn MALLOCX_ARENA(a: c_int) -> c_int { - (a + 1) << 20 as c_int - } - - pub fn SOCKCREDSIZE(ngrps: usize) -> usize { - let ngrps = if ngrps > 0 { ngrps - 1 } else { 0 }; - size_of::() + size_of::() * ngrps - } - - pub fn uname(buf: *mut crate::utsname) -> c_int { - __xuname(256, buf as *mut c_void) - } - - pub fn CPU_ZERO(cpuset: &mut cpuset_t) -> () { - for slot in cpuset.__bits.iter_mut() { - *slot = 0; - } - } - - pub fn CPU_FILL(cpuset: &mut cpuset_t) -> () { - for slot in cpuset.__bits.iter_mut() { - *slot = !0; - } - } - - pub fn CPU_SET(cpu: usize, cpuset: &mut cpuset_t) -> () { - let bitset_bits = 8 * size_of::(); - let (idx, offset) = (cpu / bitset_bits, cpu % bitset_bits); - cpuset.__bits[idx] |= 1 << offset; - } - - pub fn CPU_CLR(cpu: usize, cpuset: &mut cpuset_t) -> () { - let bitset_bits = 8 * size_of::(); - let (idx, offset) = (cpu / bitset_bits, cpu % bitset_bits); - cpuset.__bits[idx] &= !(1 << offset); - } - - pub fn CPU_ISSET(cpu: usize, cpuset: &cpuset_t) -> bool { - let bitset_bits = 8 * size_of::(); - let (idx, offset) = (cpu / bitset_bits, cpu % bitset_bits); - 0 != cpuset.__bits[idx] & (1 << offset) - } - - pub fn CPU_COUNT(cpuset: &cpuset_t) -> c_int { - let mut s: u32 = 0; - let cpuset_size = size_of::(); - let bitset_size = size_of::(); - - for i in cpuset.__bits[..(cpuset_size / bitset_size)].iter() { - s += i.count_ones(); - } - s as c_int - } - - pub fn SOCKCRED2SIZE(ngrps: usize) -> usize { - let ngrps = if ngrps > 0 { ngrps - 1 } else { 0 }; - size_of::() + size_of::() * ngrps - } - - pub fn PROT_MAX(x: c_int) -> c_int { - x << 16 - } - - pub fn PROT_MAX_EXTRACT(x: c_int) -> c_int { - (x >> 16) & (crate::PROT_READ | crate::PROT_WRITE | crate::PROT_EXEC) - } -} - -safe_f! { - pub const fn WIFSIGNALED(status: c_int) -> bool { - (status & 0o177) != 0o177 && (status & 0o177) != 0 && status != 0x13 - } - - pub const fn INVALID_SINFO_FLAG(x: c_int) -> bool { - (x) & 0xfffffff0 - & !(SCTP_EOF - | SCTP_ABORT - | SCTP_UNORDERED - | SCTP_ADDR_OVER - | SCTP_SENDALL - | SCTP_EOR - | SCTP_SACK_IMMEDIATELY) - != 0 - } - - pub const fn PR_SCTP_POLICY(x: c_int) -> c_int { - x & 0x0f - } - - pub const fn PR_SCTP_ENABLED(x: c_int) -> bool { - PR_SCTP_POLICY(x) != SCTP_PR_SCTP_NONE && PR_SCTP_POLICY(x) != SCTP_PR_SCTP_ALL - } - - pub const fn PR_SCTP_TTL_ENABLED(x: c_int) -> bool { - PR_SCTP_POLICY(x) == SCTP_PR_SCTP_TTL - } - - pub const fn PR_SCTP_BUF_ENABLED(x: c_int) -> bool { - PR_SCTP_POLICY(x) == SCTP_PR_SCTP_BUF - } - - pub const fn PR_SCTP_RTX_ENABLED(x: c_int) -> bool { - PR_SCTP_POLICY(x) == SCTP_PR_SCTP_RTX - } - - pub const fn PR_SCTP_INVALID_POLICY(x: c_int) -> bool { - PR_SCTP_POLICY(x) > SCTP_PR_SCTP_MAX - } - - pub const fn PR_SCTP_VALID_POLICY(x: c_int) -> bool { - PR_SCTP_POLICY(x) <= SCTP_PR_SCTP_MAX - } -} - -cfg_if! { - if #[cfg(not(any(freebsd10, freebsd11)))] { - extern "C" { - pub fn fhlink(fhp: *mut fhandle_t, to: *const c_char) -> c_int; - pub fn fhlinkat(fhp: *mut fhandle_t, tofd: c_int, to: *const c_char) -> c_int; - pub fn fhreadlink(fhp: *mut fhandle_t, buf: *mut c_char, bufsize: size_t) -> c_int; - pub fn getfhat(fd: c_int, path: *mut c_char, fhp: *mut fhandle, flag: c_int) -> c_int; - } - } -} - -extern "C" { - #[cfg_attr(doc, doc(alias = "__errno_location"))] - #[cfg_attr(doc, doc(alias = "errno"))] - pub fn __error() -> *mut c_int; - - pub fn aio_cancel(fd: c_int, aiocbp: *mut aiocb) -> c_int; - pub fn aio_error(aiocbp: *const aiocb) -> c_int; - pub fn aio_fsync(op: c_int, aiocbp: *mut aiocb) -> c_int; - pub fn aio_read(aiocbp: *mut aiocb) -> c_int; - pub fn aio_readv(aiocbp: *mut crate::aiocb) -> c_int; - pub fn aio_return(aiocbp: *mut aiocb) -> ssize_t; - pub fn aio_suspend( - aiocb_list: *const *const aiocb, - nitems: c_int, - timeout: *const crate::timespec, - ) -> c_int; - pub fn aio_write(aiocbp: *mut aiocb) -> c_int; - pub fn aio_writev(aiocbp: *mut crate::aiocb) -> c_int; - - pub fn copy_file_range( - infd: c_int, - inoffp: *mut off_t, - outfd: c_int, - outoffp: *mut off_t, - len: size_t, - flags: c_uint, - ) -> ssize_t; - - pub fn devname_r( - dev: crate::dev_t, - mode: crate::mode_t, - buf: *mut c_char, - len: c_int, - ) -> *mut c_char; - - pub fn extattr_delete_fd(fd: c_int, attrnamespace: c_int, attrname: *const c_char) -> c_int; - pub fn extattr_delete_file( - path: *const c_char, - attrnamespace: c_int, - attrname: *const c_char, - ) -> c_int; - pub fn extattr_delete_link( - path: *const c_char, - attrnamespace: c_int, - attrname: *const c_char, - ) -> c_int; - pub fn extattr_get_fd( - fd: c_int, - attrnamespace: c_int, - attrname: *const c_char, - data: *mut c_void, - nbytes: size_t, - ) -> ssize_t; - pub fn extattr_get_file( - path: *const c_char, - attrnamespace: c_int, - attrname: *const c_char, - data: *mut c_void, - nbytes: size_t, - ) -> ssize_t; - pub fn extattr_get_link( - path: *const c_char, - attrnamespace: c_int, - attrname: *const c_char, - data: *mut c_void, - nbytes: size_t, - ) -> ssize_t; - pub fn extattr_list_fd( - fd: c_int, - attrnamespace: c_int, - data: *mut c_void, - nbytes: size_t, - ) -> ssize_t; - pub fn extattr_list_file( - path: *const c_char, - attrnamespace: c_int, - data: *mut c_void, - nbytes: size_t, - ) -> ssize_t; - pub fn extattr_list_link( - path: *const c_char, - attrnamespace: c_int, - data: *mut c_void, - nbytes: size_t, - ) -> ssize_t; - pub fn extattr_set_fd( - fd: c_int, - attrnamespace: c_int, - attrname: *const c_char, - data: *const c_void, - nbytes: size_t, - ) -> ssize_t; - pub fn extattr_set_file( - path: *const c_char, - attrnamespace: c_int, - attrname: *const c_char, - data: *const c_void, - nbytes: size_t, - ) -> ssize_t; - pub fn extattr_set_link( - path: *const c_char, - attrnamespace: c_int, - attrname: *const c_char, - data: *const c_void, - nbytes: size_t, - ) -> ssize_t; - - pub fn fspacectl( - fd: c_int, - cmd: c_int, - rqsr: *const spacectl_range, - flags: c_int, - rmsr: *mut spacectl_range, - ) -> c_int; - - pub fn jail(jail: *mut crate::jail) -> c_int; - pub fn jail_attach(jid: c_int) -> c_int; - pub fn jail_remove(jid: c_int) -> c_int; - pub fn jail_get(iov: *mut crate::iovec, niov: c_uint, flags: c_int) -> c_int; - pub fn jail_set(iov: *mut crate::iovec, niov: c_uint, flags: c_int) -> c_int; - - pub fn lio_listio( - mode: c_int, - aiocb_list: *const *mut aiocb, - nitems: c_int, - sevp: *mut sigevent, - ) -> c_int; - - pub fn getutxuser(user: *const c_char) -> *mut utmpx; - pub fn setutxdb(_type: c_int, file: *const c_char) -> c_int; - - pub fn aio_waitcomplete(iocbp: *mut *mut aiocb, timeout: *mut crate::timespec) -> ssize_t; - pub fn mq_getfd_np(mqd: crate::mqd_t) -> c_int; - - pub fn waitid( - idtype: idtype_t, - id: crate::id_t, - infop: *mut crate::siginfo_t, - options: c_int, - ) -> c_int; - pub fn ptsname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - - pub fn ftok(pathname: *const c_char, proj_id: c_int) -> crate::key_t; - pub fn shmget(key: crate::key_t, size: size_t, shmflg: c_int) -> c_int; - pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; - pub fn shmdt(shmaddr: *const c_void) -> c_int; - pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; - pub fn semget(key: crate::key_t, nsems: c_int, semflg: c_int) -> c_int; - pub fn semctl(semid: c_int, semnum: c_int, cmd: c_int, ...) -> c_int; - pub fn semop(semid: c_int, sops: *mut sembuf, nsops: size_t) -> c_int; - pub fn msgctl(msqid: c_int, cmd: c_int, buf: *mut crate::msqid_ds) -> c_int; - pub fn msgget(key: crate::key_t, msgflg: c_int) -> c_int; - pub fn msgsnd(msqid: c_int, msgp: *const c_void, msgsz: size_t, msgflg: c_int) -> c_int; - pub fn cfmakesane(termios: *mut crate::termios); - - pub fn pdfork(fdp: *mut c_int, flags: c_int) -> crate::pid_t; - pub fn pdgetpid(fd: c_int, pidp: *mut crate::pid_t) -> c_int; - pub fn pdkill(fd: c_int, signum: c_int) -> c_int; - - pub fn rtprio_thread(function: c_int, lwpid: crate::lwpid_t, rtp: *mut super::rtprio) -> c_int; - - pub fn uuidgen(store: *mut uuid, count: c_int) -> c_int; - - pub fn thr_kill(id: c_long, sig: c_int) -> c_int; - pub fn thr_kill2(pid: crate::pid_t, id: c_long, sig: c_int) -> c_int; - pub fn thr_self(tid: *mut c_long) -> c_int; - pub fn pthread_getthreadid_np() -> c_int; - pub fn pthread_getaffinity_np( - td: crate::pthread_t, - cpusetsize: size_t, - cpusetp: *mut cpuset_t, - ) -> c_int; - pub fn pthread_setaffinity_np( - td: crate::pthread_t, - cpusetsize: size_t, - cpusetp: *const cpuset_t, - ) -> c_int; - - // sched.h linux compatibility api - pub fn sched_getaffinity( - pid: crate::pid_t, - cpusetsz: size_t, - cpuset: *mut crate::cpuset_t, - ) -> c_int; - pub fn sched_setaffinity( - pid: crate::pid_t, - cpusetsz: size_t, - cpuset: *const crate::cpuset_t, - ) -> c_int; - pub fn sched_getcpu() -> c_int; - - pub fn pthread_mutex_consistent(mutex: *mut crate::pthread_mutex_t) -> c_int; - - pub fn pthread_mutexattr_getrobust( - attr: *mut crate::pthread_mutexattr_t, - robust: *mut c_int, - ) -> c_int; - pub fn pthread_mutexattr_setrobust( - attr: *mut crate::pthread_mutexattr_t, - robust: c_int, - ) -> c_int; - - pub fn pthread_spin_init(lock: *mut pthread_spinlock_t, pshared: c_int) -> c_int; - pub fn pthread_spin_destroy(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_spin_lock(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_spin_trylock(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_spin_unlock(lock: *mut pthread_spinlock_t) -> c_int; - - #[cfg_attr(all(target_os = "freebsd", freebsd11), link_name = "statfs@FBSD_1.0")] - pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; - #[cfg_attr(all(target_os = "freebsd", freebsd11), link_name = "fstatfs@FBSD_1.0")] - pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; - - pub fn dup3(src: c_int, dst: c_int, flags: c_int) -> c_int; - pub fn __xuname(nmln: c_int, buf: *mut c_void) -> c_int; - - pub fn sendmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: size_t, - flags: c_int, - ) -> ssize_t; - pub fn recvmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: size_t, - flags: c_int, - timeout: *const crate::timespec, - ) -> ssize_t; - pub fn memmem( - haystack: *const c_void, - haystacklen: size_t, - needle: *const c_void, - needlelen: size_t, - ) -> *mut c_void; - - pub fn fhopen(fhp: *const fhandle_t, flags: c_int) -> c_int; - pub fn fhstat(fhp: *const fhandle, buf: *mut crate::stat) -> c_int; - pub fn fhstatfs(fhp: *const fhandle_t, buf: *mut crate::statfs) -> c_int; - pub fn getfh(path: *const c_char, fhp: *mut fhandle_t) -> c_int; - pub fn lgetfh(path: *const c_char, fhp: *mut fhandle_t) -> c_int; - pub fn getfsstat(buf: *mut crate::statfs, bufsize: c_long, mode: c_int) -> c_int; - #[cfg_attr( - all(target_os = "freebsd", freebsd11), - link_name = "getmntinfo@FBSD_1.0" - )] - pub fn getmntinfo(mntbufp: *mut *mut crate::statfs, mode: c_int) -> c_int; - pub fn mount( - type_: *const c_char, - dir: *const c_char, - flags: c_int, - data: *mut c_void, - ) -> c_int; - pub fn nmount(iov: *mut crate::iovec, niov: c_uint, flags: c_int) -> c_int; - - pub fn setproctitle(fmt: *const c_char, ...); - pub fn rfork(flags: c_int) -> c_int; - pub fn cpuset_getaffinity( - level: cpulevel_t, - which: cpuwhich_t, - id: crate::id_t, - setsize: size_t, - mask: *mut cpuset_t, - ) -> c_int; - pub fn cpuset_setaffinity( - level: cpulevel_t, - which: cpuwhich_t, - id: crate::id_t, - setsize: size_t, - mask: *const cpuset_t, - ) -> c_int; - pub fn cpuset(setid: *mut crate::cpusetid_t) -> c_int; - pub fn cpuset_getid( - level: cpulevel_t, - which: cpuwhich_t, - id: crate::id_t, - setid: *mut crate::cpusetid_t, - ) -> c_int; - pub fn cpuset_setid(which: cpuwhich_t, id: crate::id_t, setid: crate::cpusetid_t) -> c_int; - pub fn cap_enter() -> c_int; - pub fn cap_getmode(modep: *mut c_uint) -> c_int; - pub fn cap_fcntls_get(fd: c_int, fcntlrightsp: *mut u32) -> c_int; - pub fn cap_fcntls_limit(fd: c_int, fcntlrights: u32) -> c_int; - pub fn cap_ioctls_get(fd: c_int, cmds: *mut u_long, maxcmds: usize) -> isize; - pub fn cap_ioctls_limit(fd: c_int, cmds: *const u_long, ncmds: usize) -> c_int; - pub fn __cap_rights_init(version: c_int, rights: *mut cap_rights_t, ...) -> *mut cap_rights_t; - pub fn __cap_rights_get(version: c_int, fd: c_int, rightsp: *mut cap_rights_t) -> c_int; - pub fn __cap_rights_set(rights: *mut cap_rights_t, ...) -> *mut cap_rights_t; - pub fn __cap_rights_clear(rights: *mut cap_rights_t, ...) -> *mut cap_rights_t; - pub fn __cap_rights_is_set(rights: *const cap_rights_t, ...) -> bool; - pub fn cap_rights_is_valid(rights: *const cap_rights_t) -> bool; - pub fn cap_rights_limit(fd: c_int, rights: *const cap_rights_t) -> c_int; - pub fn cap_rights_merge(dst: *mut cap_rights_t, src: *const cap_rights_t) -> *mut cap_rights_t; - pub fn cap_rights_remove(dst: *mut cap_rights_t, src: *const cap_rights_t) - -> *mut cap_rights_t; - pub fn cap_rights_contains(big: *const cap_rights_t, little: *const cap_rights_t) -> bool; - pub fn cap_sandboxed() -> bool; - - pub fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void; - - pub fn ffs(value: c_int) -> c_int; - pub fn ffsl(value: c_long) -> c_int; - pub fn ffsll(value: c_longlong) -> c_int; - pub fn fls(value: c_int) -> c_int; - pub fn flsl(value: c_long) -> c_int; - pub fn flsll(value: c_longlong) -> c_int; - pub fn malloc_stats_print( - write_cb: unsafe extern "C" fn(*mut c_void, *const c_char), - cbopaque: *mut c_void, - opt: *const c_char, - ); - pub fn mallctl( - name: *const c_char, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *mut c_void, - newlen: size_t, - ) -> c_int; - pub fn mallctlnametomib(name: *const c_char, mibp: *mut size_t, miplen: *mut size_t) -> c_int; - pub fn mallctlbymib( - mib: *const size_t, - mible: size_t, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *mut c_void, - newlen: size_t, - ) -> c_int; - pub fn mallocx(size: size_t, flags: c_int) -> *mut c_void; - pub fn rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void; - pub fn xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t; - pub fn sallocx(ptr: *const c_void, flags: c_int) -> size_t; - pub fn dallocx(ptr: *mut c_void, flags: c_int); - pub fn sdallocx(ptr: *mut c_void, size: size_t, flags: c_int); - pub fn nallocx(size: size_t, flags: c_int) -> size_t; - - pub fn procctl( - idtype: crate::idtype_t, - id: crate::id_t, - cmd: c_int, - data: *mut c_void, - ) -> c_int; - - pub fn getpagesize() -> c_int; - pub fn getpagesizes(pagesize: *mut size_t, nelem: c_int) -> c_int; - - pub fn clock_getcpuclockid2(arg1: crate::id_t, arg2: c_int, arg3: *mut clockid_t) -> c_int; - pub fn strchrnul(s: *const c_char, c: c_int) -> *mut c_char; - - pub fn shm_create_largepage( - path: *const c_char, - flags: c_int, - psind: c_int, - alloc_policy: c_int, - mode: crate::mode_t, - ) -> c_int; - pub fn shm_rename(path_from: *const c_char, path_to: *const c_char, flags: c_int) -> c_int; - pub fn memfd_create(name: *const c_char, flags: c_uint) -> c_int; - pub fn setaudit(auditinfo: *const auditinfo_t) -> c_int; - - pub fn eventfd(init: c_uint, flags: c_int) -> c_int; - pub fn eventfd_read(fd: c_int, value: *mut eventfd_t) -> c_int; - pub fn eventfd_write(fd: c_int, value: eventfd_t) -> c_int; - - pub fn fdatasync(fd: c_int) -> c_int; - - pub fn elf_aux_info(aux: c_int, buf: *mut c_void, buflen: c_int) -> c_int; - pub fn setproctitle_fast(fmt: *const c_char, ...); - pub fn timingsafe_bcmp(a: *const c_void, b: *const c_void, len: size_t) -> c_int; - pub fn timingsafe_memcmp(a: *const c_void, b: *const c_void, len: size_t) -> c_int; - - pub fn _umtx_op( - obj: *mut c_void, - op: c_int, - val: c_ulong, - uaddr: *mut c_void, - uaddr2: *mut c_void, - ) -> c_int; - - pub fn sctp_peeloff(s: c_int, id: crate::sctp_assoc_t) -> c_int; - pub fn sctp_bindx(s: c_int, addrs: *mut crate::sockaddr, num: c_int, tpe: c_int) -> c_int; - pub fn sctp_connectx( - s: c_int, - addrs: *const crate::sockaddr, - addrcnt: c_int, - id: *mut crate::sctp_assoc_t, - ) -> c_int; - pub fn sctp_getaddrlen(family: crate::sa_family_t) -> c_int; - pub fn sctp_getpaddrs( - s: c_int, - asocid: crate::sctp_assoc_t, - addrs: *mut *mut crate::sockaddr, - ) -> c_int; - pub fn sctp_freepaddrs(addrs: *mut crate::sockaddr); - pub fn sctp_getladdrs( - s: c_int, - asocid: crate::sctp_assoc_t, - addrs: *mut *mut crate::sockaddr, - ) -> c_int; - pub fn sctp_freeladdrs(addrs: *mut crate::sockaddr); - pub fn sctp_opt_info( - s: c_int, - id: crate::sctp_assoc_t, - opt: c_int, - arg: *mut c_void, - size: *mut crate::socklen_t, - ) -> c_int; - pub fn sctp_sendv( - sd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - addrs: *mut crate::sockaddr, - addrcnt: c_int, - info: *mut c_void, - infolen: crate::socklen_t, - infotype: c_uint, - flags: c_int, - ) -> ssize_t; - pub fn sctp_recvv( - sd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - from: *mut crate::sockaddr, - fromlen: *mut crate::socklen_t, - info: *mut c_void, - infolen: *mut crate::socklen_t, - infotype: *mut c_uint, - flags: *mut c_int, - ) -> ssize_t; - - pub fn timerfd_create(clockid: c_int, flags: c_int) -> c_int; - pub fn timerfd_gettime(fd: c_int, curr_value: *mut itimerspec) -> c_int; - pub fn timerfd_settime( - fd: c_int, - flags: c_int, - new_value: *const itimerspec, - old_value: *mut itimerspec, - ) -> c_int; - pub fn closefrom(lowfd: c_int); - pub fn close_range(lowfd: c_uint, highfd: c_uint, flags: c_int) -> c_int; - - pub fn execvpe( - file: *const c_char, - argv: *const *const c_char, - envp: *const *const c_char, - ) -> c_int; - - pub fn kcmp( - pid1: crate::pid_t, - pid2: crate::pid_t, - type_: c_int, - idx1: c_ulong, - idx2: c_ulong, - ) -> c_int; - pub fn dlvsym( - handle: *mut c_void, - symbol: *const c_char, - version: *const c_char, - ) -> *mut c_void; -} - -#[link(name = "memstat")] -extern "C" { - pub fn memstat_strerror(error: c_int) -> *const c_char; - pub fn memstat_mtl_alloc() -> *mut memory_type_list; - pub fn memstat_mtl_first(list: *mut memory_type_list) -> *mut memory_type; - pub fn memstat_mtl_next(mtp: *mut memory_type) -> *mut memory_type; - pub fn memstat_mtl_find( - list: *mut memory_type_list, - allocator: c_int, - name: *const c_char, - ) -> *mut memory_type; - pub fn memstat_mtl_free(list: *mut memory_type_list); - pub fn memstat_mtl_geterror(list: *mut memory_type_list) -> c_int; - pub fn memstat_get_name(mtp: *const memory_type) -> *const c_char; -} - -#[link(name = "kvm")] -extern "C" { - pub fn kvm_dpcpu_setcpu(kd: *mut crate::kvm_t, cpu: c_uint) -> c_int; - pub fn kvm_getargv( - kd: *mut crate::kvm_t, - p: *const kinfo_proc, - nchr: c_int, - ) -> *mut *mut c_char; - pub fn kvm_getcptime(kd: *mut crate::kvm_t, cp_time: *mut c_long) -> c_int; - pub fn kvm_getenvv( - kd: *mut crate::kvm_t, - p: *const kinfo_proc, - nchr: c_int, - ) -> *mut *mut c_char; - pub fn kvm_geterr(kd: *mut crate::kvm_t) -> *mut c_char; - pub fn kvm_getmaxcpu(kd: *mut crate::kvm_t) -> c_int; - pub fn kvm_getncpus(kd: *mut crate::kvm_t) -> c_int; - pub fn kvm_getpcpu(kd: *mut crate::kvm_t, cpu: c_int) -> *mut c_void; - pub fn kvm_counter_u64_fetch(kd: *mut crate::kvm_t, base: c_ulong) -> u64; - pub fn kvm_getswapinfo( - kd: *mut crate::kvm_t, - info: *mut kvm_swap, - maxswap: c_int, - flags: c_int, - ) -> c_int; - pub fn kvm_native(kd: *mut crate::kvm_t) -> c_int; - pub fn kvm_nlist(kd: *mut crate::kvm_t, nl: *mut nlist) -> c_int; - pub fn kvm_nlist2(kd: *mut crate::kvm_t, nl: *mut kvm_nlist) -> c_int; - pub fn kvm_read_zpcpu( - kd: *mut crate::kvm_t, - base: c_ulong, - buf: *mut c_void, - size: size_t, - cpu: c_int, - ) -> ssize_t; - pub fn kvm_read2( - kd: *mut crate::kvm_t, - addr: kvaddr_t, - buf: *mut c_void, - nbytes: size_t, - ) -> ssize_t; -} - -#[link(name = "util")] -extern "C" { - pub fn extattr_namespace_to_string(attrnamespace: c_int, string: *mut *mut c_char) -> c_int; - pub fn extattr_string_to_namespace(string: *const c_char, attrnamespace: *mut c_int) -> c_int; - pub fn realhostname(host: *mut c_char, hsize: size_t, ip: *const crate::in_addr) -> c_int; - pub fn realhostname_sa( - host: *mut c_char, - hsize: size_t, - addr: *mut crate::sockaddr, - addrlen: c_int, - ) -> c_int; - - pub fn kld_isloaded(name: *const c_char) -> c_int; - pub fn kld_load(name: *const c_char) -> c_int; - - pub fn kinfo_getvmmap(pid: crate::pid_t, cntp: *mut c_int) -> *mut kinfo_vmentry; - - pub fn hexdump(ptr: *const c_void, length: c_int, hdr: *const c_char, flags: c_int); - pub fn humanize_number( - buf: *mut c_char, - len: size_t, - number: i64, - suffix: *const c_char, - scale: c_int, - flags: c_int, - ) -> c_int; - - pub fn flopen(path: *const c_char, flags: c_int, ...) -> c_int; - pub fn flopenat(fd: c_int, path: *const c_char, flags: c_int, ...) -> c_int; - - pub fn getlocalbase() -> *const c_char; - - pub fn pidfile_open( - path: *const c_char, - mode: crate::mode_t, - pidptr: *mut crate::pid_t, - ) -> *mut crate::pidfh; - pub fn pidfile_write(path: *mut crate::pidfh) -> c_int; - pub fn pidfile_close(path: *mut crate::pidfh) -> c_int; - pub fn pidfile_remove(path: *mut crate::pidfh) -> c_int; - pub fn pidfile_fileno(path: *const crate::pidfh) -> c_int; - // FIXME(freebsd): pidfile_signal in due time (both manpage present and updated image snapshot) -} - -#[link(name = "procstat")] -extern "C" { - pub fn procstat_open_sysctl() -> *mut procstat; - pub fn procstat_getfiles( - procstat: *mut procstat, - kp: *mut kinfo_proc, - mmapped: c_int, - ) -> *mut filestat_list; - pub fn procstat_freefiles(procstat: *mut procstat, head: *mut filestat_list); - pub fn procstat_getprocs( - procstat: *mut procstat, - what: c_int, - arg: c_int, - count: *mut c_uint, - ) -> *mut kinfo_proc; - pub fn procstat_freeprocs(procstat: *mut procstat, p: *mut kinfo_proc); - pub fn procstat_getvmmap( - procstat: *mut procstat, - kp: *mut kinfo_proc, - count: *mut c_uint, - ) -> *mut kinfo_vmentry; - pub fn procstat_freevmmap(procstat: *mut procstat, vmmap: *mut kinfo_vmentry); - pub fn procstat_close(procstat: *mut procstat); - pub fn procstat_freeargv(procstat: *mut procstat); - pub fn procstat_freeenvv(procstat: *mut procstat); - pub fn procstat_freegroups(procstat: *mut procstat, groups: *mut crate::gid_t); - pub fn procstat_freeptlwpinfo(procstat: *mut procstat, pl: *mut ptrace_lwpinfo); - pub fn procstat_getargv( - procstat: *mut procstat, - kp: *mut kinfo_proc, - nchr: size_t, - ) -> *mut *mut c_char; - pub fn procstat_getenvv( - procstat: *mut procstat, - kp: *mut kinfo_proc, - nchr: size_t, - ) -> *mut *mut c_char; - pub fn procstat_getgroups( - procstat: *mut procstat, - kp: *mut kinfo_proc, - count: *mut c_uint, - ) -> *mut crate::gid_t; - pub fn procstat_getosrel( - procstat: *mut procstat, - kp: *mut kinfo_proc, - osrelp: *mut c_int, - ) -> c_int; - pub fn procstat_getpathname( - procstat: *mut procstat, - kp: *mut kinfo_proc, - pathname: *mut c_char, - maxlen: size_t, - ) -> c_int; - pub fn procstat_getrlimit( - procstat: *mut procstat, - kp: *mut kinfo_proc, - which: c_int, - rlimit: *mut crate::rlimit, - ) -> c_int; - pub fn procstat_getumask( - procstat: *mut procstat, - kp: *mut kinfo_proc, - maskp: *mut c_ushort, - ) -> c_int; - pub fn procstat_open_core(filename: *const c_char) -> *mut procstat; - pub fn procstat_open_kvm(nlistf: *const c_char, memf: *const c_char) -> *mut procstat; - pub fn procstat_get_socket_info( - proc_: *mut procstat, - fst: *mut filestat, - sock: *mut sockstat, - errbuf: *mut c_char, - ) -> c_int; - pub fn procstat_get_vnode_info( - proc_: *mut procstat, - fst: *mut filestat, - vn: *mut vnstat, - errbuf: *mut c_char, - ) -> c_int; - pub fn procstat_get_pts_info( - proc_: *mut procstat, - fst: *mut filestat, - pts: *mut ptsstat, - errbuf: *mut c_char, - ) -> c_int; - pub fn procstat_get_shm_info( - proc_: *mut procstat, - fst: *mut filestat, - shm: *mut shmstat, - errbuf: *mut c_char, - ) -> c_int; -} - -#[link(name = "rt")] -extern "C" { - pub fn timer_create(clock_id: clockid_t, evp: *mut sigevent, timerid: *mut timer_t) -> c_int; - pub fn timer_delete(timerid: timer_t) -> c_int; - pub fn timer_getoverrun(timerid: timer_t) -> c_int; - pub fn timer_gettime(timerid: timer_t, value: *mut itimerspec) -> c_int; - pub fn timer_settime( - timerid: timer_t, - flags: c_int, - value: *const itimerspec, - ovalue: *mut itimerspec, - ) -> c_int; -} - -#[link(name = "devstat")] -extern "C" { - pub fn devstat_getnumdevs(kd: *mut crate::kvm_t) -> c_int; - pub fn devstat_getgeneration(kd: *mut crate::kvm_t) -> c_long; - pub fn devstat_getversion(kd: *mut crate::kvm_t) -> c_int; - pub fn devstat_checkversion(kd: *mut crate::kvm_t) -> c_int; - pub fn devstat_selectdevs( - dev_select: *mut *mut device_selection, - num_selected: *mut c_int, - num_selections: *mut c_int, - select_generation: *mut c_long, - current_generation: c_long, - devices: *mut devstat, - numdevs: c_int, - matches: *mut devstat_match, - num_matches: c_int, - dev_selections: *mut *mut c_char, - num_dev_selections: c_int, - select_mode: devstat_select_mode, - maxshowdevs: c_int, - perf_select: c_int, - ) -> c_int; - pub fn devstat_buildmatch( - match_str: *mut c_char, - matches: *mut *mut devstat_match, - num_matches: *mut c_int, - ) -> c_int; -} - -cfg_if! { - if #[cfg(freebsd15)] { - mod freebsd15; - pub use self::freebsd15::*; - } else if #[cfg(freebsd14)] { - mod freebsd14; - pub use self::freebsd14::*; - } else if #[cfg(freebsd13)] { - mod freebsd13; - pub use self::freebsd13::*; - } else if #[cfg(freebsd12)] { - mod freebsd12; - pub use self::freebsd12::*; - } else if #[cfg(any(freebsd10, freebsd11))] { - mod freebsd11; - pub use self::freebsd11::*; - } else { - // Unknown freebsd version - } -} - -cfg_if! { - if #[cfg(target_arch = "x86")] { - mod x86; - pub use self::x86::*; - } else if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } else if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else if #[cfg(target_arch = "arm")] { - mod arm; - pub use self::arm::*; - } else if #[cfg(target_arch = "powerpc64")] { - mod powerpc64; - pub use self::powerpc64::*; - } else if #[cfg(target_arch = "powerpc")] { - mod powerpc; - pub use self::powerpc::*; - } else if #[cfg(target_arch = "riscv64")] { - mod riscv64; - pub use self::riscv64::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc.rs deleted file mode 100644 index e4275b10ba508c..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc.rs +++ /dev/null @@ -1,62 +0,0 @@ -use crate::prelude::*; - -pub type clock_t = u32; -pub type wchar_t = i32; -pub type time_t = i64; -pub type suseconds_t = i32; -pub type register_t = i32; - -s_no_extra_traits! { - #[repr(align(16))] - pub struct mcontext_t { - pub mc_vers: c_int, - pub mc_flags: c_int, - pub mc_onstack: c_int, - pub mc_len: c_int, - pub mc_avec: [u64; 64], - pub mc_av: [u32; 2], - pub mc_frame: [crate::register_t; 42], - pub mc_fpreg: [u64; 33], - pub mc_vsxfpreg: [u64; 32], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for mcontext_t { - fn eq(&self, other: &mcontext_t) -> bool { - self.mc_vers == other.mc_vers - && self.mc_flags == other.mc_flags - && self.mc_onstack == other.mc_onstack - && self.mc_len == other.mc_len - && self.mc_avec == other.mc_avec - && self.mc_av == other.mc_av - && self.mc_frame == other.mc_frame - && self.mc_fpreg == other.mc_fpreg - && self.mc_vsxfpreg == other.mc_vsxfpreg - } - } - impl Eq for mcontext_t {} - impl hash::Hash for mcontext_t { - fn hash(&self, state: &mut H) { - self.mc_vers.hash(state); - self.mc_flags.hash(state); - self.mc_onstack.hash(state); - self.mc_len.hash(state); - self.mc_avec.hash(state); - self.mc_av.hash(state); - self.mc_frame.hash(state); - self.mc_fpreg.hash(state); - self.mc_vsxfpreg.hash(state); - } - } - } -} - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const BIOCSRTIMEOUT: c_ulong = 0x8010426d; -pub const BIOCGRTIMEOUT: c_ulong = 0x4010426e; -pub const MAP_32BIT: c_int = 0x00080000; -pub const MINSIGSTKSZ: size_t = 2048; // 512 * 4 -pub const TIOCTIMESTAMP: c_ulong = 0x40107459; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc64.rs deleted file mode 100644 index b5a81311ecc60b..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/powerpc64.rs +++ /dev/null @@ -1,63 +0,0 @@ -use crate::prelude::*; - -pub type clock_t = u32; -pub type wchar_t = i32; -pub type time_t = i64; -pub type suseconds_t = i64; -pub type register_t = i64; - -s_no_extra_traits! { - #[repr(align(16))] - pub struct mcontext_t { - pub mc_vers: c_int, - pub mc_flags: c_int, - pub mc_onstack: c_int, - pub mc_len: c_int, - pub mc_avec: [u64; 64], - pub mc_av: [u32; 2], - pub mc_frame: [crate::register_t; 42], - pub mc_fpreg: [u64; 33], - pub mc_vsxfpreg: [u64; 32], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for mcontext_t { - fn eq(&self, other: &mcontext_t) -> bool { - self.mc_vers == other.mc_vers - && self.mc_flags == other.mc_flags - && self.mc_onstack == other.mc_onstack - && self.mc_len == other.mc_len - && self.mc_avec == other.mc_avec - && self.mc_av == other.mc_av - && self.mc_frame == other.mc_frame - && self.mc_fpreg == other.mc_fpreg - && self.mc_vsxfpreg == other.mc_vsxfpreg - } - } - impl Eq for mcontext_t {} - impl hash::Hash for mcontext_t { - fn hash(&self, state: &mut H) { - self.mc_vers.hash(state); - self.mc_flags.hash(state); - self.mc_onstack.hash(state); - self.mc_len.hash(state); - self.mc_avec.hash(state); - self.mc_av.hash(state); - self.mc_frame.hash(state); - self.mc_fpreg.hash(state); - self.mc_vsxfpreg.hash(state); - } - } - } -} - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const BIOCSRTIMEOUT: c_ulong = 0x8010426d; -pub const BIOCGRTIMEOUT: c_ulong = 0x4010426e; - -pub const MAP_32BIT: c_int = 0x00080000; -pub const MINSIGSTKSZ: size_t = 2048; // 512 * 4 -pub const TIOCTIMESTAMP: c_ulong = 0x40107459; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/riscv64.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/riscv64.rs deleted file mode 100644 index 5ae5d34a746605..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/riscv64.rs +++ /dev/null @@ -1,116 +0,0 @@ -use crate::prelude::*; - -pub type clock_t = i32; -pub type wchar_t = c_int; -pub type time_t = i64; -pub type suseconds_t = c_long; -pub type register_t = i64; - -s_no_extra_traits! { - pub struct gpregs { - pub gp_ra: crate::register_t, - pub gp_sp: crate::register_t, - pub gp_gp: crate::register_t, - pub gp_tp: crate::register_t, - pub gp_t: [crate::register_t; 7], - pub gp_s: [crate::register_t; 12], - pub gp_a: [crate::register_t; 8], - pub gp_sepc: crate::register_t, - pub gp_sstatus: crate::register_t, - } - - pub struct fpregs { - pub fp_x: [[u64; 2]; 32], - pub fp_fcsr: u64, - pub fp_flags: c_int, - pub pad: c_int, - } - - pub struct mcontext_t { - pub mc_gpregs: gpregs, - pub mc_fpregs: fpregs, - pub mc_flags: c_int, - pub mc_pad: c_int, - pub mc_spare: [u64; 8], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for gpregs { - fn eq(&self, other: &gpregs) -> bool { - self.gp_ra == other.gp_ra - && self.gp_sp == other.gp_sp - && self.gp_gp == other.gp_gp - && self.gp_tp == other.gp_tp - && self.gp_t.iter().zip(other.gp_t.iter()).all(|(a, b)| a == b) - && self.gp_s.iter().zip(other.gp_s.iter()).all(|(a, b)| a == b) - && self.gp_a.iter().zip(other.gp_a.iter()).all(|(a, b)| a == b) - && self.gp_sepc == other.gp_sepc - && self.gp_sstatus == other.gp_sstatus - } - } - impl Eq for gpregs {} - impl hash::Hash for gpregs { - fn hash(&self, state: &mut H) { - self.gp_ra.hash(state); - self.gp_sp.hash(state); - self.gp_gp.hash(state); - self.gp_tp.hash(state); - self.gp_t.hash(state); - self.gp_s.hash(state); - self.gp_a.hash(state); - self.gp_sepc.hash(state); - self.gp_sstatus.hash(state); - } - } - impl PartialEq for fpregs { - fn eq(&self, other: &fpregs) -> bool { - self.fp_x == other.fp_x - && self.fp_fcsr == other.fp_fcsr - && self.fp_flags == other.fp_flags - && self.pad == other.pad - } - } - impl Eq for fpregs {} - impl hash::Hash for fpregs { - fn hash(&self, state: &mut H) { - self.fp_x.hash(state); - self.fp_fcsr.hash(state); - self.fp_flags.hash(state); - self.pad.hash(state); - } - } - impl PartialEq for mcontext_t { - fn eq(&self, other: &mcontext_t) -> bool { - self.mc_gpregs == other.mc_gpregs - && self.mc_fpregs == other.mc_fpregs - && self.mc_flags == other.mc_flags - && self.mc_pad == other.mc_pad - && self - .mc_spare - .iter() - .zip(other.mc_spare.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for mcontext_t {} - impl hash::Hash for mcontext_t { - fn hash(&self, state: &mut H) { - self.mc_gpregs.hash(state); - self.mc_fpregs.hash(state); - self.mc_flags.hash(state); - self.mc_pad.hash(state); - self.mc_spare.hash(state); - } - } - } -} - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const BIOCSRTIMEOUT: c_ulong = 0x8010426d; -pub const BIOCGRTIMEOUT: c_ulong = 0x4010426e; -pub const MAP_32BIT: c_int = 0x00080000; -pub const MINSIGSTKSZ: size_t = 4096; // 1024 * 4 -pub const TIOCTIMESTAMP: c_ulong = 0x40107459; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86.rs deleted file mode 100644 index 5becde55db43ee..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86.rs +++ /dev/null @@ -1,134 +0,0 @@ -use crate::prelude::*; - -pub type clock_t = c_ulong; -pub type wchar_t = i32; -pub type time_t = i32; -pub type suseconds_t = i32; -pub type register_t = i32; - -s_no_extra_traits! { - #[repr(align(16))] - pub struct mcontext_t { - pub mc_onstack: register_t, - pub mc_gs: register_t, - pub mc_fs: register_t, - pub mc_es: register_t, - pub mc_ds: register_t, - pub mc_edi: register_t, - pub mc_esi: register_t, - pub mc_ebp: register_t, - pub mc_isp: register_t, - pub mc_ebx: register_t, - pub mc_edx: register_t, - pub mc_ecx: register_t, - pub mc_eax: register_t, - pub mc_trapno: register_t, - pub mc_err: register_t, - pub mc_eip: register_t, - pub mc_cs: register_t, - pub mc_eflags: register_t, - pub mc_esp: register_t, - pub mc_ss: register_t, - pub mc_len: c_int, - pub mc_fpformat: c_int, - pub mc_ownedfp: c_int, - pub mc_flags: register_t, - pub mc_fpstate: [c_int; 128], - pub mc_fsbase: register_t, - pub mc_gsbase: register_t, - pub mc_xfpustate: register_t, - pub mc_xfpustate_len: register_t, - pub mc_spare2: [c_int; 4], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for mcontext_t { - fn eq(&self, other: &mcontext_t) -> bool { - self.mc_onstack == other.mc_onstack - && self.mc_gs == other.mc_gs - && self.mc_fs == other.mc_fs - && self.mc_es == other.mc_es - && self.mc_ds == other.mc_ds - && self.mc_edi == other.mc_edi - && self.mc_esi == other.mc_esi - && self.mc_ebp == other.mc_ebp - && self.mc_isp == other.mc_isp - && self.mc_ebx == other.mc_ebx - && self.mc_edx == other.mc_edx - && self.mc_ecx == other.mc_ecx - && self.mc_eax == other.mc_eax - && self.mc_trapno == other.mc_trapno - && self.mc_err == other.mc_err - && self.mc_eip == other.mc_eip - && self.mc_cs == other.mc_cs - && self.mc_eflags == other.mc_eflags - && self.mc_esp == other.mc_esp - && self.mc_ss == other.mc_ss - && self.mc_len == other.mc_len - && self.mc_fpformat == other.mc_fpformat - && self.mc_ownedfp == other.mc_ownedfp - && self.mc_flags == other.mc_flags - && self - .mc_fpstate - .iter() - .zip(other.mc_fpstate.iter()) - .all(|(a, b)| a == b) - && self.mc_fsbase == other.mc_fsbase - && self.mc_gsbase == other.mc_gsbase - && self.mc_xfpustate == other.mc_xfpustate - && self.mc_xfpustate_len == other.mc_xfpustate_len - && self - .mc_spare2 - .iter() - .zip(other.mc_spare2.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for mcontext_t {} - impl hash::Hash for mcontext_t { - fn hash(&self, state: &mut H) { - self.mc_onstack.hash(state); - self.mc_gs.hash(state); - self.mc_fs.hash(state); - self.mc_es.hash(state); - self.mc_ds.hash(state); - self.mc_edi.hash(state); - self.mc_esi.hash(state); - self.mc_ebp.hash(state); - self.mc_isp.hash(state); - self.mc_ebx.hash(state); - self.mc_edx.hash(state); - self.mc_ecx.hash(state); - self.mc_eax.hash(state); - self.mc_trapno.hash(state); - self.mc_err.hash(state); - self.mc_eip.hash(state); - self.mc_cs.hash(state); - self.mc_eflags.hash(state); - self.mc_esp.hash(state); - self.mc_ss.hash(state); - self.mc_len.hash(state); - self.mc_fpformat.hash(state); - self.mc_ownedfp.hash(state); - self.mc_flags.hash(state); - self.mc_fpstate.hash(state); - self.mc_fsbase.hash(state); - self.mc_gsbase.hash(state); - self.mc_xfpustate.hash(state); - self.mc_xfpustate_len.hash(state); - self.mc_spare2.hash(state); - } - } - } -} - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const MINSIGSTKSZ: size_t = 2048; // 512 * 4 - -pub const BIOCSRTIMEOUT: c_ulong = 0x8008426d; -pub const BIOCGRTIMEOUT: c_ulong = 0x4008426e; -pub const KINFO_FILE_SIZE: c_int = 1392; -pub const TIOCTIMESTAMP: c_ulong = 0x40087459; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/mod.rs deleted file mode 100644 index d665e3da01e875..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/freebsd/x86_64/mod.rs +++ /dev/null @@ -1,346 +0,0 @@ -use crate::prelude::*; - -pub type clock_t = i32; -pub type wchar_t = i32; -pub type time_t = i64; -pub type suseconds_t = i64; -pub type register_t = i64; - -s! { - pub struct reg32 { - pub r_fs: u32, - pub r_es: u32, - pub r_ds: u32, - pub r_edi: u32, - pub r_esi: u32, - pub r_ebp: u32, - pub r_isp: u32, - pub r_ebx: u32, - pub r_edx: u32, - pub r_ecx: u32, - pub r_eax: u32, - pub r_trapno: u32, - pub r_err: u32, - pub r_eip: u32, - pub r_cs: u32, - pub r_eflags: u32, - pub r_esp: u32, - pub r_ss: u32, - pub r_gs: u32, - } - - pub struct reg { - pub r_r15: i64, - pub r_r14: i64, - pub r_r13: i64, - pub r_r12: i64, - pub r_r11: i64, - pub r_r10: i64, - pub r_r9: i64, - pub r_r8: i64, - pub r_rdi: i64, - pub r_rsi: i64, - pub r_rbp: i64, - pub r_rbx: i64, - pub r_rdx: i64, - pub r_rcx: i64, - pub r_rax: i64, - pub r_trapno: u32, - pub r_fs: u16, - pub r_gs: u16, - pub r_err: u32, - pub r_es: u16, - pub r_ds: u16, - pub r_rip: i64, - pub r_cs: i64, - pub r_rflags: i64, - pub r_rsp: i64, - pub r_ss: i64, - } -} - -s_no_extra_traits! { - pub struct fpreg32 { - pub fpr_env: [u32; 7], - pub fpr_acc: [[u8; 10]; 8], - pub fpr_ex_sw: u32, - pub fpr_pad: [u8; 64], - } - - pub struct fpreg { - pub fpr_env: [u64; 4], - pub fpr_acc: [[u8; 16]; 8], - pub fpr_xacc: [[u8; 16]; 16], - pub fpr_spare: [u64; 12], - } - - pub struct xmmreg { - pub xmm_env: [u32; 8], - pub xmm_acc: [[u8; 16]; 8], - pub xmm_reg: [[u8; 16]; 8], - pub xmm_pad: [u8; 224], - } - - pub union __c_anonymous_elf64_auxv_union { - pub a_val: c_long, - pub a_ptr: *mut c_void, - pub a_fcn: extern "C" fn(), - } - - pub struct Elf64_Auxinfo { - pub a_type: c_long, - pub a_un: __c_anonymous_elf64_auxv_union, - } - - #[repr(align(16))] - pub struct max_align_t { - priv_: [f64; 4], - } - - #[repr(align(16))] - #[cfg_attr(not(any(freebsd11, freebsd12, freebsd13, freebsd14)), non_exhaustive)] - pub struct mcontext_t { - pub mc_onstack: register_t, - pub mc_rdi: register_t, - pub mc_rsi: register_t, - pub mc_rdx: register_t, - pub mc_rcx: register_t, - pub mc_r8: register_t, - pub mc_r9: register_t, - pub mc_rax: register_t, - pub mc_rbx: register_t, - pub mc_rbp: register_t, - pub mc_r10: register_t, - pub mc_r11: register_t, - pub mc_r12: register_t, - pub mc_r13: register_t, - pub mc_r14: register_t, - pub mc_r15: register_t, - pub mc_trapno: u32, - pub mc_fs: u16, - pub mc_gs: u16, - pub mc_addr: register_t, - pub mc_flags: u32, - pub mc_es: u16, - pub mc_ds: u16, - pub mc_err: register_t, - pub mc_rip: register_t, - pub mc_cs: register_t, - pub mc_rflags: register_t, - pub mc_rsp: register_t, - pub mc_ss: register_t, - pub mc_len: c_long, - pub mc_fpformat: c_long, - pub mc_ownedfp: c_long, - pub mc_fpstate: [c_long; 64], - pub mc_fsbase: register_t, - pub mc_gsbase: register_t, - pub mc_xfpustate: register_t, - pub mc_xfpustate_len: register_t, - // freebsd < 15 - #[cfg(any(freebsd11, freebsd12, freebsd13, freebsd14))] - pub mc_spare: [c_long; 4], - // freebsd >= 15 - #[cfg(not(any(freebsd11, freebsd12, freebsd13, freebsd14)))] - pub mc_tlsbase: register_t, - #[cfg(not(any(freebsd11, freebsd12, freebsd13, freebsd14)))] - pub mc_spare: [c_long; 3], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for fpreg32 { - fn eq(&self, other: &fpreg32) -> bool { - self.fpr_env == other.fpr_env - && self.fpr_acc == other.fpr_acc - && self.fpr_ex_sw == other.fpr_ex_sw - && self - .fpr_pad - .iter() - .zip(other.fpr_pad.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for fpreg32 {} - impl hash::Hash for fpreg32 { - fn hash(&self, state: &mut H) { - self.fpr_env.hash(state); - self.fpr_acc.hash(state); - self.fpr_ex_sw.hash(state); - self.fpr_pad.hash(state); - } - } - - impl PartialEq for fpreg { - fn eq(&self, other: &fpreg) -> bool { - self.fpr_env == other.fpr_env - && self.fpr_acc == other.fpr_acc - && self.fpr_xacc == other.fpr_xacc - && self.fpr_spare == other.fpr_spare - } - } - impl Eq for fpreg {} - impl hash::Hash for fpreg { - fn hash(&self, state: &mut H) { - self.fpr_env.hash(state); - self.fpr_acc.hash(state); - self.fpr_xacc.hash(state); - self.fpr_spare.hash(state); - } - } - - impl PartialEq for xmmreg { - fn eq(&self, other: &xmmreg) -> bool { - self.xmm_env == other.xmm_env - && self.xmm_acc == other.xmm_acc - && self.xmm_reg == other.xmm_reg - && self - .xmm_pad - .iter() - .zip(other.xmm_pad.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for xmmreg {} - impl hash::Hash for xmmreg { - fn hash(&self, state: &mut H) { - self.xmm_env.hash(state); - self.xmm_acc.hash(state); - self.xmm_reg.hash(state); - self.xmm_pad.hash(state); - } - } - - // FIXME(msrv): suggested method was added in 1.85 - #[allow(unpredictable_function_pointer_comparisons)] - impl PartialEq for __c_anonymous_elf64_auxv_union { - fn eq(&self, other: &__c_anonymous_elf64_auxv_union) -> bool { - unsafe { - self.a_val == other.a_val - || self.a_ptr == other.a_ptr - || self.a_fcn == other.a_fcn - } - } - } - impl Eq for __c_anonymous_elf64_auxv_union {} - impl PartialEq for Elf64_Auxinfo { - fn eq(&self, other: &Elf64_Auxinfo) -> bool { - self.a_type == other.a_type && self.a_un == other.a_un - } - } - impl Eq for Elf64_Auxinfo {} - - impl PartialEq for mcontext_t { - fn eq(&self, other: &mcontext_t) -> bool { - self.mc_onstack == other.mc_onstack - && self.mc_rdi == other.mc_rdi - && self.mc_rsi == other.mc_rsi - && self.mc_rdx == other.mc_rdx - && self.mc_rcx == other.mc_rcx - && self.mc_r8 == other.mc_r8 - && self.mc_r9 == other.mc_r9 - && self.mc_rax == other.mc_rax - && self.mc_rbx == other.mc_rbx - && self.mc_rbp == other.mc_rbp - && self.mc_r10 == other.mc_r10 - && self.mc_r11 == other.mc_r11 - && self.mc_r12 == other.mc_r12 - && self.mc_r13 == other.mc_r13 - && self.mc_r14 == other.mc_r14 - && self.mc_r15 == other.mc_r15 - && self.mc_trapno == other.mc_trapno - && self.mc_fs == other.mc_fs - && self.mc_gs == other.mc_gs - && self.mc_addr == other.mc_addr - && self.mc_flags == other.mc_flags - && self.mc_es == other.mc_es - && self.mc_ds == other.mc_ds - && self.mc_err == other.mc_err - && self.mc_rip == other.mc_rip - && self.mc_cs == other.mc_cs - && self.mc_rflags == other.mc_rflags - && self.mc_rsp == other.mc_rsp - && self.mc_ss == other.mc_ss - && self.mc_len == other.mc_len - && self.mc_fpformat == other.mc_fpformat - && self.mc_ownedfp == other.mc_ownedfp - && self - .mc_fpstate - .iter() - .zip(other.mc_fpstate.iter()) - .all(|(a, b)| a == b) - && self.mc_fsbase == other.mc_fsbase - && self.mc_gsbase == other.mc_gsbase - && self.mc_xfpustate == other.mc_xfpustate - && self.mc_xfpustate_len == other.mc_xfpustate_len - && self.mc_spare == other.mc_spare - } - } - impl Eq for mcontext_t {} - impl hash::Hash for mcontext_t { - fn hash(&self, state: &mut H) { - self.mc_onstack.hash(state); - self.mc_rdi.hash(state); - self.mc_rsi.hash(state); - self.mc_rdx.hash(state); - self.mc_rcx.hash(state); - self.mc_r8.hash(state); - self.mc_r9.hash(state); - self.mc_rax.hash(state); - self.mc_rbx.hash(state); - self.mc_rbp.hash(state); - self.mc_r10.hash(state); - self.mc_r11.hash(state); - self.mc_r12.hash(state); - self.mc_r13.hash(state); - self.mc_r14.hash(state); - self.mc_r15.hash(state); - self.mc_trapno.hash(state); - self.mc_fs.hash(state); - self.mc_gs.hash(state); - self.mc_addr.hash(state); - self.mc_flags.hash(state); - self.mc_es.hash(state); - self.mc_ds.hash(state); - self.mc_err.hash(state); - self.mc_rip.hash(state); - self.mc_cs.hash(state); - self.mc_rflags.hash(state); - self.mc_rsp.hash(state); - self.mc_ss.hash(state); - self.mc_len.hash(state); - self.mc_fpformat.hash(state); - self.mc_ownedfp.hash(state); - self.mc_fpstate.hash(state); - self.mc_fsbase.hash(state); - self.mc_gsbase.hash(state); - self.mc_xfpustate.hash(state); - self.mc_xfpustate_len.hash(state); - self.mc_spare.hash(state); - } - } - } -} - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const BIOCSRTIMEOUT: c_ulong = 0x8010426d; -pub const BIOCGRTIMEOUT: c_ulong = 0x4010426e; - -pub const MAP_32BIT: c_int = 0x00080000; -pub const MINSIGSTKSZ: size_t = 2048; // 512 * 4 - -pub const _MC_HASSEGS: u32 = 0x1; -pub const _MC_HASBASES: u32 = 0x2; -pub const _MC_HASFPXSTATE: u32 = 0x4; - -pub const _MC_FPFMT_NODEV: c_long = 0x10000; -pub const _MC_FPFMT_XMM: c_long = 0x10002; -pub const _MC_FPOWNED_NONE: c_long = 0x20000; -pub const _MC_FPOWNED_FPU: c_long = 0x20001; -pub const _MC_FPOWNED_PCB: c_long = 0x20002; - -pub const KINFO_FILE_SIZE: c_int = 1392; - -pub const TIOCTIMESTAMP: c_ulong = 0x40107459; diff --git a/vendor/libc/src/unix/bsd/freebsdlike/mod.rs b/vendor/libc/src/unix/bsd/freebsdlike/mod.rs deleted file mode 100644 index 4bf62033474f04..00000000000000 --- a/vendor/libc/src/unix/bsd/freebsdlike/mod.rs +++ /dev/null @@ -1,2009 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -pub type mode_t = u16; -pub type pthread_attr_t = *mut c_void; -pub type rlim_t = i64; -pub type pthread_mutex_t = *mut c_void; -pub type pthread_mutexattr_t = *mut c_void; -pub type pthread_cond_t = *mut c_void; -pub type pthread_condattr_t = *mut c_void; -pub type pthread_rwlock_t = *mut c_void; -pub type pthread_rwlockattr_t = *mut c_void; -pub type pthread_key_t = c_int; -pub type tcflag_t = c_uint; -pub type speed_t = c_uint; -pub type nl_item = c_int; -pub type id_t = i64; -pub type vm_size_t = crate::uintptr_t; -pub type key_t = c_long; - -// elf.h - -pub type Elf32_Addr = u32; -pub type Elf32_Half = u16; -pub type Elf32_Lword = u64; -pub type Elf32_Off = u32; -pub type Elf32_Sword = i32; -pub type Elf32_Word = u32; - -pub type Elf64_Addr = u64; -pub type Elf64_Half = u16; -pub type Elf64_Lword = u64; -pub type Elf64_Off = u64; -pub type Elf64_Sword = i32; -pub type Elf64_Sxword = i64; -pub type Elf64_Word = u32; -pub type Elf64_Xword = u64; - -pub type iconv_t = *mut c_void; - -// It's an alias over "struct __kvm_t". However, its fields aren't supposed to be used directly, -// making the type definition system dependent. Better not bind it exactly. -pub type kvm_t = c_void; - -pub type posix_spawnattr_t = *mut c_void; -pub type posix_spawn_file_actions_t = *mut c_void; - -cfg_if! { - if #[cfg(target_pointer_width = "64")] { - type Elf_Addr = Elf64_Addr; - type Elf_Half = Elf64_Half; - type Elf_Phdr = Elf64_Phdr; - } else if #[cfg(target_pointer_width = "32")] { - type Elf_Addr = Elf32_Addr; - type Elf_Half = Elf32_Half; - type Elf_Phdr = Elf32_Phdr; - } -} - -// link.h - -#[derive(Debug)] -pub enum timezone {} -impl Copy for timezone {} -impl Clone for timezone { - fn clone(&self) -> timezone { - *self - } -} - -impl siginfo_t { - pub unsafe fn si_addr(&self) -> *mut c_void { - self.si_addr - } - - pub unsafe fn si_value(&self) -> crate::sigval { - self.si_value - } - - pub unsafe fn si_pid(&self) -> crate::pid_t { - self.si_pid - } - - pub unsafe fn si_uid(&self) -> crate::uid_t { - self.si_uid - } - - pub unsafe fn si_status(&self) -> c_int { - self.si_status - } -} - -s! { - pub struct in_addr { - pub s_addr: crate::in_addr_t, - } - - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct ip_mreqn { - pub imr_multiaddr: in_addr, - pub imr_address: in_addr, - pub imr_ifindex: c_int, - } - - pub struct ip_mreq_source { - pub imr_multiaddr: in_addr, - pub imr_sourceaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct glob_t { - pub gl_pathc: size_t, - pub gl_matchc: size_t, - pub gl_offs: size_t, - pub gl_flags: c_int, - pub gl_pathv: *mut *mut c_char, - __unused3: *mut c_void, - __unused4: *mut c_void, - __unused5: *mut c_void, - __unused6: *mut c_void, - __unused7: *mut c_void, - __unused8: *mut c_void, - } - - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - pub ai_addrlen: crate::socklen_t, - pub ai_canonname: *mut c_char, - pub ai_addr: *mut crate::sockaddr, - pub ai_next: *mut addrinfo, - } - - pub struct sigset_t { - bits: [u32; 4], - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - pub si_pid: crate::pid_t, - pub si_uid: crate::uid_t, - pub si_status: c_int, - pub si_addr: *mut c_void, - pub si_value: crate::sigval, - _pad1: c_long, - _pad2: [c_int; 7], - } - - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_flags: c_int, - pub sa_mask: sigset_t, - } - - pub struct sched_param { - pub sched_priority: c_int, - } - - pub struct Dl_info { - pub dli_fname: *const c_char, - pub dli_fbase: *mut c_void, - pub dli_sname: *const c_char, - pub dli_saddr: *mut c_void, - } - - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: crate::sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [c_char; 8], - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_cc: [crate::cc_t; crate::NCCS], - pub c_ispeed: crate::speed_t, - pub c_ospeed: crate::speed_t, - } - - pub struct flock { - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - pub l_type: c_short, - pub l_whence: c_short, - #[cfg(not(target_os = "dragonfly"))] - pub l_sysid: c_int, - } - - pub struct sf_hdtr { - pub headers: *mut crate::iovec, - pub hdr_cnt: c_int, - pub trailers: *mut crate::iovec, - pub trl_cnt: c_int, - } - - pub struct lconv { - pub decimal_point: *mut c_char, - pub thousands_sep: *mut c_char, - pub grouping: *mut c_char, - pub int_curr_symbol: *mut c_char, - pub currency_symbol: *mut c_char, - pub mon_decimal_point: *mut c_char, - pub mon_thousands_sep: *mut c_char, - pub mon_grouping: *mut c_char, - pub positive_sign: *mut c_char, - pub negative_sign: *mut c_char, - pub int_frac_digits: c_char, - pub frac_digits: c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub p_sign_posn: c_char, - pub n_sign_posn: c_char, - pub int_p_cs_precedes: c_char, - pub int_n_cs_precedes: c_char, - pub int_p_sep_by_space: c_char, - pub int_n_sep_by_space: c_char, - pub int_p_sign_posn: c_char, - pub int_n_sign_posn: c_char, - } - - pub struct cmsgcred { - pub cmcred_pid: crate::pid_t, - pub cmcred_uid: crate::uid_t, - pub cmcred_euid: crate::uid_t, - pub cmcred_gid: crate::gid_t, - pub cmcred_ngroups: c_short, - pub cmcred_groups: [crate::gid_t; CMGROUP_MAX], - } - - pub struct rtprio { - pub type_: c_ushort, - pub prio: c_ushort, - } - - pub struct in6_pktinfo { - pub ipi6_addr: crate::in6_addr, - pub ipi6_ifindex: c_uint, - } - - pub struct arphdr { - pub ar_hrd: u16, - pub ar_pro: u16, - pub ar_hln: u8, - pub ar_pln: u8, - pub ar_op: u16, - } - - pub struct timex { - pub modes: c_uint, - pub offset: c_long, - pub freq: c_long, - pub maxerror: c_long, - pub esterror: c_long, - pub status: c_int, - pub constant: c_long, - pub precision: c_long, - pub tolerance: c_long, - pub ppsfreq: c_long, - pub jitter: c_long, - pub shift: c_int, - pub stabil: c_long, - pub jitcnt: c_long, - pub calcnt: c_long, - pub errcnt: c_long, - pub stbcnt: c_long, - } - - pub struct ntptimeval { - pub time: crate::timespec, - pub maxerror: c_long, - pub esterror: c_long, - pub tai: c_long, - pub time_state: c_int, - } - - pub struct accept_filter_arg { - pub af_name: [c_char; 16], - af_arg: [c_char; 256 - 16], - } - - pub struct ptrace_io_desc { - pub piod_op: c_int, - pub piod_offs: *mut c_void, - pub piod_addr: *mut c_void, - pub piod_len: size_t, - } - - // bpf.h - - pub struct bpf_program { - pub bf_len: c_uint, - pub bf_insns: *mut bpf_insn, - } - - pub struct bpf_stat { - pub bs_recv: c_uint, - pub bs_drop: c_uint, - } - - pub struct bpf_version { - pub bv_major: c_ushort, - pub bv_minor: c_ushort, - } - - pub struct bpf_hdr { - pub bh_tstamp: crate::timeval, - pub bh_caplen: u32, - pub bh_datalen: u32, - pub bh_hdrlen: c_ushort, - } - - pub struct bpf_insn { - pub code: c_ushort, - pub jt: c_uchar, - pub jf: c_uchar, - pub k: u32, - } - - pub struct bpf_dltlist { - bfl_len: c_uint, - bfl_list: *mut c_uint, - } - - // elf.h - - pub struct Elf32_Phdr { - pub p_type: Elf32_Word, - pub p_offset: Elf32_Off, - pub p_vaddr: Elf32_Addr, - pub p_paddr: Elf32_Addr, - pub p_filesz: Elf32_Word, - pub p_memsz: Elf32_Word, - pub p_flags: Elf32_Word, - pub p_align: Elf32_Word, - } - - pub struct Elf64_Phdr { - pub p_type: Elf64_Word, - pub p_flags: Elf64_Word, - pub p_offset: Elf64_Off, - pub p_vaddr: Elf64_Addr, - pub p_paddr: Elf64_Addr, - pub p_filesz: Elf64_Xword, - pub p_memsz: Elf64_Xword, - pub p_align: Elf64_Xword, - } - - // link.h - - pub struct dl_phdr_info { - pub dlpi_addr: Elf_Addr, - pub dlpi_name: *const c_char, - pub dlpi_phdr: *const Elf_Phdr, - pub dlpi_phnum: Elf_Half, - pub dlpi_adds: c_ulonglong, - pub dlpi_subs: c_ulonglong, - pub dlpi_tls_modid: usize, - pub dlpi_tls_data: *mut c_void, - } - - pub struct ipc_perm { - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub mode: mode_t, - pub seq: c_ushort, - pub key: crate::key_t, - } - - pub struct eui64 { - pub octet: [u8; EUI64_LEN], - } -} - -s_no_extra_traits! { - pub struct sockaddr_storage { - pub ss_len: u8, - pub ss_family: crate::sa_family_t, - __ss_pad1: [u8; 6], - __ss_align: i64, - __ss_pad2: [u8; 112], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for sockaddr_storage { - fn eq(&self, other: &sockaddr_storage) -> bool { - self.ss_len == other.ss_len - && self.ss_family == other.ss_family - && self.__ss_pad1 == other.__ss_pad1 - && self.__ss_align == other.__ss_align - && self - .__ss_pad2 - .iter() - .zip(other.__ss_pad2.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for sockaddr_storage {} - impl hash::Hash for sockaddr_storage { - fn hash(&self, state: &mut H) { - self.ss_len.hash(state); - self.ss_family.hash(state); - self.__ss_pad1.hash(state); - self.__ss_align.hash(state); - self.__ss_pad2.hash(state); - } - } - } -} - -// Non-public helper constant -const SIZEOF_LONG: usize = size_of::(); - -#[deprecated( - since = "0.2.64", - note = "Can vary at runtime. Use sysconf(3) instead" -)] -pub const AIO_LISTIO_MAX: c_int = 16; -pub const AIO_CANCELED: c_int = 1; -pub const AIO_NOTCANCELED: c_int = 2; -pub const AIO_ALLDONE: c_int = 3; -pub const LIO_NOP: c_int = 0; -pub const LIO_WRITE: c_int = 1; -pub const LIO_READ: c_int = 2; -pub const LIO_WAIT: c_int = 1; -pub const LIO_NOWAIT: c_int = 0; - -pub const SIGEV_NONE: c_int = 0; -pub const SIGEV_SIGNAL: c_int = 1; -pub const SIGEV_THREAD: c_int = 2; -pub const SIGEV_KEVENT: c_int = 3; - -pub const CODESET: crate::nl_item = 0; -pub const D_T_FMT: crate::nl_item = 1; -pub const D_FMT: crate::nl_item = 2; -pub const T_FMT: crate::nl_item = 3; -pub const T_FMT_AMPM: crate::nl_item = 4; -pub const AM_STR: crate::nl_item = 5; -pub const PM_STR: crate::nl_item = 6; - -pub const DAY_1: crate::nl_item = 7; -pub const DAY_2: crate::nl_item = 8; -pub const DAY_3: crate::nl_item = 9; -pub const DAY_4: crate::nl_item = 10; -pub const DAY_5: crate::nl_item = 11; -pub const DAY_6: crate::nl_item = 12; -pub const DAY_7: crate::nl_item = 13; - -pub const ABDAY_1: crate::nl_item = 14; -pub const ABDAY_2: crate::nl_item = 15; -pub const ABDAY_3: crate::nl_item = 16; -pub const ABDAY_4: crate::nl_item = 17; -pub const ABDAY_5: crate::nl_item = 18; -pub const ABDAY_6: crate::nl_item = 19; -pub const ABDAY_7: crate::nl_item = 20; - -pub const MON_1: crate::nl_item = 21; -pub const MON_2: crate::nl_item = 22; -pub const MON_3: crate::nl_item = 23; -pub const MON_4: crate::nl_item = 24; -pub const MON_5: crate::nl_item = 25; -pub const MON_6: crate::nl_item = 26; -pub const MON_7: crate::nl_item = 27; -pub const MON_8: crate::nl_item = 28; -pub const MON_9: crate::nl_item = 29; -pub const MON_10: crate::nl_item = 30; -pub const MON_11: crate::nl_item = 31; -pub const MON_12: crate::nl_item = 32; - -pub const ABMON_1: crate::nl_item = 33; -pub const ABMON_2: crate::nl_item = 34; -pub const ABMON_3: crate::nl_item = 35; -pub const ABMON_4: crate::nl_item = 36; -pub const ABMON_5: crate::nl_item = 37; -pub const ABMON_6: crate::nl_item = 38; -pub const ABMON_7: crate::nl_item = 39; -pub const ABMON_8: crate::nl_item = 40; -pub const ABMON_9: crate::nl_item = 41; -pub const ABMON_10: crate::nl_item = 42; -pub const ABMON_11: crate::nl_item = 43; -pub const ABMON_12: crate::nl_item = 44; - -pub const ERA: crate::nl_item = 45; -pub const ERA_D_FMT: crate::nl_item = 46; -pub const ERA_D_T_FMT: crate::nl_item = 47; -pub const ERA_T_FMT: crate::nl_item = 48; -pub const ALT_DIGITS: crate::nl_item = 49; - -pub const RADIXCHAR: crate::nl_item = 50; -pub const THOUSEP: crate::nl_item = 51; - -pub const YESEXPR: crate::nl_item = 52; -pub const NOEXPR: crate::nl_item = 53; - -pub const YESSTR: crate::nl_item = 54; -pub const NOSTR: crate::nl_item = 55; - -pub const CRNCYSTR: crate::nl_item = 56; - -pub const D_MD_ORDER: crate::nl_item = 57; - -pub const ALTMON_1: crate::nl_item = 58; -pub const ALTMON_2: crate::nl_item = 59; -pub const ALTMON_3: crate::nl_item = 60; -pub const ALTMON_4: crate::nl_item = 61; -pub const ALTMON_5: crate::nl_item = 62; -pub const ALTMON_6: crate::nl_item = 63; -pub const ALTMON_7: crate::nl_item = 64; -pub const ALTMON_8: crate::nl_item = 65; -pub const ALTMON_9: crate::nl_item = 66; -pub const ALTMON_10: crate::nl_item = 67; -pub const ALTMON_11: crate::nl_item = 68; -pub const ALTMON_12: crate::nl_item = 69; - -pub const EXIT_FAILURE: c_int = 1; -pub const EXIT_SUCCESS: c_int = 0; -pub const EOF: c_int = -1; -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; -pub const SEEK_DATA: c_int = 3; -pub const SEEK_HOLE: c_int = 4; -pub const _IOFBF: c_int = 0; -pub const _IONBF: c_int = 2; -pub const _IOLBF: c_int = 1; -pub const BUFSIZ: c_uint = 1024; -pub const FOPEN_MAX: c_uint = 20; -pub const FILENAME_MAX: c_uint = 1024; -pub const L_tmpnam: c_uint = 1024; -pub const TMP_MAX: c_uint = 308915776; - -pub const O_NOCTTY: c_int = 32768; -pub const O_DIRECT: c_int = 0x00010000; - -pub const S_IFIFO: mode_t = 0o1_0000; -pub const S_IFCHR: mode_t = 0o2_0000; -pub const S_IFBLK: mode_t = 0o6_0000; -pub const S_IFDIR: mode_t = 0o4_0000; -pub const S_IFREG: mode_t = 0o10_0000; -pub const S_IFLNK: mode_t = 0o12_0000; -pub const S_IFSOCK: mode_t = 0o14_0000; -pub const S_IFMT: mode_t = 0o17_0000; -pub const S_IEXEC: mode_t = 0o0100; -pub const S_IWRITE: mode_t = 0o0200; -pub const S_IREAD: mode_t = 0o0400; -pub const S_IRWXU: mode_t = 0o0700; -pub const S_IXUSR: mode_t = 0o0100; -pub const S_IWUSR: mode_t = 0o0200; -pub const S_IRUSR: mode_t = 0o0400; -pub const S_IRWXG: mode_t = 0o0070; -pub const S_IXGRP: mode_t = 0o0010; -pub const S_IWGRP: mode_t = 0o0020; -pub const S_IRGRP: mode_t = 0o0040; -pub const S_IRWXO: mode_t = 0o0007; -pub const S_IXOTH: mode_t = 0o0001; -pub const S_IWOTH: mode_t = 0o0002; -pub const S_IROTH: mode_t = 0o0004; -pub const F_OK: c_int = 0; -pub const R_OK: c_int = 4; -pub const W_OK: c_int = 2; -pub const X_OK: c_int = 1; -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; -pub const F_LOCK: c_int = 1; -pub const F_TEST: c_int = 3; -pub const F_TLOCK: c_int = 2; -pub const F_ULOCK: c_int = 0; -pub const F_DUPFD_CLOEXEC: c_int = 17; -pub const F_DUP2FD: c_int = 10; -pub const F_DUP2FD_CLOEXEC: c_int = 18; -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGABRT: c_int = 6; -pub const SIGEMT: c_int = 7; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGSEGV: c_int = 11; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; - -pub const PROT_NONE: c_int = 0; -pub const PROT_READ: c_int = 1; -pub const PROT_WRITE: c_int = 2; -pub const PROT_EXEC: c_int = 4; - -pub const MAP_FILE: c_int = 0x0000; -pub const MAP_SHARED: c_int = 0x0001; -pub const MAP_PRIVATE: c_int = 0x0002; -pub const MAP_FIXED: c_int = 0x0010; -pub const MAP_ANON: c_int = 0x1000; -pub const MAP_ANONYMOUS: c_int = MAP_ANON; - -pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; - -pub const MNT_EXPUBLIC: c_int = 0x20000000; -pub const MNT_NOATIME: c_int = 0x10000000; -pub const MNT_NOCLUSTERR: c_int = 0x40000000; -pub const MNT_NOCLUSTERW: c_int = 0x80000000; -pub const MNT_NOSYMFOLLOW: c_int = 0x00400000; -pub const MNT_SOFTDEP: c_int = 0x00200000; -pub const MNT_SUIDDIR: c_int = 0x00100000; -pub const MNT_EXRDONLY: c_int = 0x00000080; -pub const MNT_DEFEXPORTED: c_int = 0x00000200; -pub const MNT_EXPORTANON: c_int = 0x00000400; -pub const MNT_EXKERB: c_int = 0x00000800; -pub const MNT_DELEXPORT: c_int = 0x00020000; - -pub const MS_SYNC: c_int = 0x0000; -pub const MS_ASYNC: c_int = 0x0001; -pub const MS_INVALIDATE: c_int = 0x0002; - -pub const EPERM: c_int = 1; -pub const ENOENT: c_int = 2; -pub const ESRCH: c_int = 3; -pub const EINTR: c_int = 4; -pub const EIO: c_int = 5; -pub const ENXIO: c_int = 6; -pub const E2BIG: c_int = 7; -pub const ENOEXEC: c_int = 8; -pub const EBADF: c_int = 9; -pub const ECHILD: c_int = 10; -pub const EDEADLK: c_int = 11; -pub const ENOMEM: c_int = 12; -pub const EACCES: c_int = 13; -pub const EFAULT: c_int = 14; -pub const ENOTBLK: c_int = 15; -pub const EBUSY: c_int = 16; -pub const EEXIST: c_int = 17; -pub const EXDEV: c_int = 18; -pub const ENODEV: c_int = 19; -pub const ENOTDIR: c_int = 20; -pub const EISDIR: c_int = 21; -pub const EINVAL: c_int = 22; -pub const ENFILE: c_int = 23; -pub const EMFILE: c_int = 24; -pub const ENOTTY: c_int = 25; -pub const ETXTBSY: c_int = 26; -pub const EFBIG: c_int = 27; -pub const ENOSPC: c_int = 28; -pub const ESPIPE: c_int = 29; -pub const EROFS: c_int = 30; -pub const EMLINK: c_int = 31; -pub const EPIPE: c_int = 32; -pub const EDOM: c_int = 33; -pub const ERANGE: c_int = 34; -pub const EAGAIN: c_int = 35; -pub const EWOULDBLOCK: c_int = 35; -pub const EINPROGRESS: c_int = 36; -pub const EALREADY: c_int = 37; -pub const ENOTSOCK: c_int = 38; -pub const EDESTADDRREQ: c_int = 39; -pub const EMSGSIZE: c_int = 40; -pub const EPROTOTYPE: c_int = 41; -pub const ENOPROTOOPT: c_int = 42; -pub const EPROTONOSUPPORT: c_int = 43; -pub const ESOCKTNOSUPPORT: c_int = 44; -pub const EOPNOTSUPP: c_int = 45; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 46; -pub const EAFNOSUPPORT: c_int = 47; -pub const EADDRINUSE: c_int = 48; -pub const EADDRNOTAVAIL: c_int = 49; -pub const ENETDOWN: c_int = 50; -pub const ENETUNREACH: c_int = 51; -pub const ENETRESET: c_int = 52; -pub const ECONNABORTED: c_int = 53; -pub const ECONNRESET: c_int = 54; -pub const ENOBUFS: c_int = 55; -pub const EISCONN: c_int = 56; -pub const ENOTCONN: c_int = 57; -pub const ESHUTDOWN: c_int = 58; -pub const ETOOMANYREFS: c_int = 59; -pub const ETIMEDOUT: c_int = 60; -pub const ECONNREFUSED: c_int = 61; -pub const ELOOP: c_int = 62; -pub const ENAMETOOLONG: c_int = 63; -pub const EHOSTDOWN: c_int = 64; -pub const EHOSTUNREACH: c_int = 65; -pub const ENOTEMPTY: c_int = 66; -pub const EPROCLIM: c_int = 67; -pub const EUSERS: c_int = 68; -pub const EDQUOT: c_int = 69; -pub const ESTALE: c_int = 70; -pub const EREMOTE: c_int = 71; -pub const EBADRPC: c_int = 72; -pub const ERPCMISMATCH: c_int = 73; -pub const EPROGUNAVAIL: c_int = 74; -pub const EPROGMISMATCH: c_int = 75; -pub const EPROCUNAVAIL: c_int = 76; -pub const ENOLCK: c_int = 77; -pub const ENOSYS: c_int = 78; -pub const EFTYPE: c_int = 79; -pub const EAUTH: c_int = 80; -pub const ENEEDAUTH: c_int = 81; -pub const EIDRM: c_int = 82; -pub const ENOMSG: c_int = 83; -pub const EOVERFLOW: c_int = 84; -pub const ECANCELED: c_int = 85; -pub const EILSEQ: c_int = 86; -pub const ENOATTR: c_int = 87; -pub const EDOOFUS: c_int = 88; -pub const EBADMSG: c_int = 89; -pub const EMULTIHOP: c_int = 90; -pub const ENOLINK: c_int = 91; -pub const EPROTO: c_int = 92; - -pub const POLLSTANDARD: c_short = crate::POLLIN - | crate::POLLPRI - | crate::POLLOUT - | crate::POLLRDNORM - | crate::POLLRDBAND - | crate::POLLWRBAND - | crate::POLLERR - | crate::POLLHUP - | crate::POLLNVAL; - -pub const AI_PASSIVE: c_int = 0x00000001; -pub const AI_CANONNAME: c_int = 0x00000002; -pub const AI_NUMERICHOST: c_int = 0x00000004; -pub const AI_NUMERICSERV: c_int = 0x00000008; -pub const AI_ALL: c_int = 0x00000100; -pub const AI_ADDRCONFIG: c_int = 0x00000400; -pub const AI_V4MAPPED: c_int = 0x00000800; - -pub const EAI_AGAIN: c_int = 2; -pub const EAI_BADFLAGS: c_int = 3; -pub const EAI_FAIL: c_int = 4; -pub const EAI_FAMILY: c_int = 5; -pub const EAI_MEMORY: c_int = 6; -pub const EAI_NONAME: c_int = 8; -pub const EAI_SERVICE: c_int = 9; -pub const EAI_SOCKTYPE: c_int = 10; -pub const EAI_SYSTEM: c_int = 11; -pub const EAI_OVERFLOW: c_int = 14; - -pub const F_DUPFD: c_int = 0; -pub const F_GETFD: c_int = 1; -pub const F_SETFD: c_int = 2; -pub const F_GETFL: c_int = 3; -pub const F_SETFL: c_int = 4; - -pub const SIGTRAP: c_int = 5; - -pub const GLOB_APPEND: c_int = 0x0001; -pub const GLOB_DOOFFS: c_int = 0x0002; -pub const GLOB_ERR: c_int = 0x0004; -pub const GLOB_MARK: c_int = 0x0008; -pub const GLOB_NOCHECK: c_int = 0x0010; -pub const GLOB_NOSORT: c_int = 0x0020; -pub const GLOB_NOESCAPE: c_int = 0x2000; - -pub const GLOB_NOSPACE: c_int = -1; -pub const GLOB_ABORTED: c_int = -2; -pub const GLOB_NOMATCH: c_int = -3; - -pub const POSIX_MADV_NORMAL: c_int = 0; -pub const POSIX_MADV_RANDOM: c_int = 1; -pub const POSIX_MADV_SEQUENTIAL: c_int = 2; -pub const POSIX_MADV_WILLNEED: c_int = 3; -pub const POSIX_MADV_DONTNEED: c_int = 4; - -pub const PTHREAD_BARRIER_SERIAL_THREAD: c_int = -1; -pub const PTHREAD_PROCESS_PRIVATE: c_int = 0; -pub const PTHREAD_PROCESS_SHARED: c_int = 1; -pub const PTHREAD_CREATE_JOINABLE: c_int = 0; -pub const PTHREAD_CREATE_DETACHED: c_int = 1; - -pub const RLIMIT_CPU: c_int = 0; -pub const RLIMIT_FSIZE: c_int = 1; -pub const RLIMIT_DATA: c_int = 2; -pub const RLIMIT_STACK: c_int = 3; -pub const RLIMIT_CORE: c_int = 4; -pub const RLIMIT_RSS: c_int = 5; -pub const RLIMIT_MEMLOCK: c_int = 6; -pub const RLIMIT_NPROC: c_int = 7; -pub const RLIMIT_NOFILE: c_int = 8; -pub const RLIMIT_SBSIZE: c_int = 9; -pub const RLIMIT_VMEM: c_int = 10; -pub const RLIMIT_AS: c_int = RLIMIT_VMEM; -pub const RLIM_INFINITY: rlim_t = 0x7fff_ffff_ffff_ffff; - -pub const RUSAGE_SELF: c_int = 0; -pub const RUSAGE_CHILDREN: c_int = -1; - -pub const CLOCK_REALTIME: crate::clockid_t = 0; -pub const CLOCK_VIRTUAL: crate::clockid_t = 1; -pub const CLOCK_PROF: crate::clockid_t = 2; -pub const CLOCK_MONOTONIC: crate::clockid_t = 4; -pub const CLOCK_UPTIME: crate::clockid_t = 5; -pub const CLOCK_UPTIME_PRECISE: crate::clockid_t = 7; -pub const CLOCK_UPTIME_FAST: crate::clockid_t = 8; -pub const CLOCK_REALTIME_PRECISE: crate::clockid_t = 9; -pub const CLOCK_REALTIME_FAST: crate::clockid_t = 10; -pub const CLOCK_MONOTONIC_PRECISE: crate::clockid_t = 11; -pub const CLOCK_MONOTONIC_FAST: crate::clockid_t = 12; -pub const CLOCK_SECOND: crate::clockid_t = 13; -pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 14; -pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 15; - -pub const MADV_NORMAL: c_int = 0; -pub const MADV_RANDOM: c_int = 1; -pub const MADV_SEQUENTIAL: c_int = 2; -pub const MADV_WILLNEED: c_int = 3; -pub const MADV_DONTNEED: c_int = 4; -pub const MADV_FREE: c_int = 5; -pub const MADV_NOSYNC: c_int = 6; -pub const MADV_AUTOSYNC: c_int = 7; -pub const MADV_NOCORE: c_int = 8; -pub const MADV_CORE: c_int = 9; - -pub const MINCORE_INCORE: c_int = 0x1; -pub const MINCORE_REFERENCED: c_int = 0x2; -pub const MINCORE_MODIFIED: c_int = 0x4; -pub const MINCORE_REFERENCED_OTHER: c_int = 0x8; -pub const MINCORE_MODIFIED_OTHER: c_int = 0x10; - -pub const AF_UNSPEC: c_int = 0; -pub const AF_LOCAL: c_int = 1; -pub const AF_UNIX: c_int = AF_LOCAL; -pub const AF_INET: c_int = 2; -pub const AF_IMPLINK: c_int = 3; -pub const AF_PUP: c_int = 4; -pub const AF_CHAOS: c_int = 5; -pub const AF_NETBIOS: c_int = 6; -pub const AF_ISO: c_int = 7; -pub const AF_OSI: c_int = AF_ISO; -pub const AF_ECMA: c_int = 8; -pub const AF_DATAKIT: c_int = 9; -pub const AF_CCITT: c_int = 10; -pub const AF_SNA: c_int = 11; -pub const AF_DECnet: c_int = 12; -pub const AF_DLI: c_int = 13; -pub const AF_LAT: c_int = 14; -pub const AF_HYLINK: c_int = 15; -pub const AF_APPLETALK: c_int = 16; -pub const AF_ROUTE: c_int = 17; -pub const AF_LINK: c_int = 18; -pub const pseudo_AF_XTP: c_int = 19; -pub const AF_COIP: c_int = 20; -pub const AF_CNT: c_int = 21; -pub const pseudo_AF_RTIP: c_int = 22; -pub const AF_IPX: c_int = 23; -pub const AF_SIP: c_int = 24; -pub const pseudo_AF_PIP: c_int = 25; -pub const AF_ISDN: c_int = 26; -pub const AF_E164: c_int = AF_ISDN; -pub const pseudo_AF_KEY: c_int = 27; -pub const AF_INET6: c_int = 28; -pub const AF_NATM: c_int = 29; -pub const AF_ATM: c_int = 30; -pub const pseudo_AF_HDRCMPLT: c_int = 31; -pub const AF_NETGRAPH: c_int = 32; - -pub const PF_UNSPEC: c_int = AF_UNSPEC; -pub const PF_LOCAL: c_int = AF_LOCAL; -pub const PF_UNIX: c_int = PF_LOCAL; -pub const PF_INET: c_int = AF_INET; -pub const PF_IMPLINK: c_int = AF_IMPLINK; -pub const PF_PUP: c_int = AF_PUP; -pub const PF_CHAOS: c_int = AF_CHAOS; -pub const PF_NETBIOS: c_int = AF_NETBIOS; -pub const PF_ISO: c_int = AF_ISO; -pub const PF_OSI: c_int = AF_ISO; -pub const PF_ECMA: c_int = AF_ECMA; -pub const PF_DATAKIT: c_int = AF_DATAKIT; -pub const PF_CCITT: c_int = AF_CCITT; -pub const PF_SNA: c_int = AF_SNA; -pub const PF_DECnet: c_int = AF_DECnet; -pub const PF_DLI: c_int = AF_DLI; -pub const PF_LAT: c_int = AF_LAT; -pub const PF_HYLINK: c_int = AF_HYLINK; -pub const PF_APPLETALK: c_int = AF_APPLETALK; -pub const PF_ROUTE: c_int = AF_ROUTE; -pub const PF_LINK: c_int = AF_LINK; -pub const PF_XTP: c_int = pseudo_AF_XTP; -pub const PF_COIP: c_int = AF_COIP; -pub const PF_CNT: c_int = AF_CNT; -pub const PF_SIP: c_int = AF_SIP; -pub const PF_IPX: c_int = AF_IPX; -pub const PF_RTIP: c_int = pseudo_AF_RTIP; -pub const PF_PIP: c_int = pseudo_AF_PIP; -pub const PF_ISDN: c_int = AF_ISDN; -pub const PF_KEY: c_int = pseudo_AF_KEY; -pub const PF_INET6: c_int = AF_INET6; -pub const PF_NATM: c_int = AF_NATM; -pub const PF_ATM: c_int = AF_ATM; -pub const PF_NETGRAPH: c_int = AF_NETGRAPH; - -pub const PIOD_READ_D: c_int = 1; -pub const PIOD_WRITE_D: c_int = 2; -pub const PIOD_READ_I: c_int = 3; -pub const PIOD_WRITE_I: c_int = 4; - -pub const PT_TRACE_ME: c_int = 0; -pub const PT_READ_I: c_int = 1; -pub const PT_READ_D: c_int = 2; -pub const PT_WRITE_I: c_int = 4; -pub const PT_WRITE_D: c_int = 5; -pub const PT_CONTINUE: c_int = 7; -pub const PT_KILL: c_int = 8; -pub const PT_STEP: c_int = 9; -pub const PT_ATTACH: c_int = 10; -pub const PT_DETACH: c_int = 11; -pub const PT_IO: c_int = 12; - -pub const SOMAXCONN: c_int = 128; - -pub const MSG_OOB: c_int = 0x00000001; -pub const MSG_PEEK: c_int = 0x00000002; -pub const MSG_DONTROUTE: c_int = 0x00000004; -pub const MSG_EOR: c_int = 0x00000008; -pub const MSG_TRUNC: c_int = 0x00000010; -pub const MSG_CTRUNC: c_int = 0x00000020; -pub const MSG_WAITALL: c_int = 0x00000040; -pub const MSG_DONTWAIT: c_int = 0x00000080; -pub const MSG_EOF: c_int = 0x00000100; - -pub const SCM_TIMESTAMP: c_int = 0x02; -pub const SCM_CREDS: c_int = 0x03; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; -pub const SOCK_RAW: c_int = 3; -pub const SOCK_RDM: c_int = 4; -pub const SOCK_SEQPACKET: c_int = 5; -pub const SOCK_CLOEXEC: c_int = 0x10000000; -pub const SOCK_NONBLOCK: c_int = 0x20000000; -pub const SOCK_MAXADDRLEN: c_int = 255; -pub const IP_TTL: c_int = 4; -pub const IP_HDRINCL: c_int = 2; -pub const IP_RECVDSTADDR: c_int = 7; -pub const IP_SENDSRCADDR: c_int = IP_RECVDSTADDR; -pub const IP_ADD_MEMBERSHIP: c_int = 12; -pub const IP_DROP_MEMBERSHIP: c_int = 13; -pub const IP_RECVIF: c_int = 20; -pub const IP_RECVTTL: c_int = 65; -pub const IPV6_RECVHOPLIMIT: c_int = 37; -pub const IPV6_JOIN_GROUP: c_int = 12; -pub const IPV6_LEAVE_GROUP: c_int = 13; -pub const IPV6_CHECKSUM: c_int = 26; -pub const IPV6_RECVPKTINFO: c_int = 36; -pub const IPV6_PKTINFO: c_int = 46; -pub const IPV6_HOPLIMIT: c_int = 47; -pub const IPV6_RECVTCLASS: c_int = 57; -pub const IPV6_TCLASS: c_int = 61; -pub const IP_ADD_SOURCE_MEMBERSHIP: c_int = 70; -pub const IP_DROP_SOURCE_MEMBERSHIP: c_int = 71; -pub const IP_BLOCK_SOURCE: c_int = 72; -pub const IP_UNBLOCK_SOURCE: c_int = 73; - -pub const TCP_NOPUSH: c_int = 4; -pub const TCP_NOOPT: c_int = 8; -pub const TCP_KEEPIDLE: c_int = 256; -pub const TCP_KEEPINTVL: c_int = 512; -pub const TCP_KEEPCNT: c_int = 1024; - -pub const SOL_SOCKET: c_int = 0xffff; -pub const SO_DEBUG: c_int = 0x01; -pub const SO_ACCEPTCONN: c_int = 0x0002; -pub const SO_REUSEADDR: c_int = 0x0004; -pub const SO_KEEPALIVE: c_int = 0x0008; -pub const SO_DONTROUTE: c_int = 0x0010; -pub const SO_BROADCAST: c_int = 0x0020; -pub const SO_USELOOPBACK: c_int = 0x0040; -pub const SO_LINGER: c_int = 0x0080; -pub const SO_OOBINLINE: c_int = 0x0100; -pub const SO_REUSEPORT: c_int = 0x0200; -pub const SO_TIMESTAMP: c_int = 0x0400; -pub const SO_NOSIGPIPE: c_int = 0x0800; -pub const SO_ACCEPTFILTER: c_int = 0x1000; -pub const SO_SNDBUF: c_int = 0x1001; -pub const SO_RCVBUF: c_int = 0x1002; -pub const SO_SNDLOWAT: c_int = 0x1003; -pub const SO_RCVLOWAT: c_int = 0x1004; -pub const SO_SNDTIMEO: c_int = 0x1005; -pub const SO_RCVTIMEO: c_int = 0x1006; -pub const SO_ERROR: c_int = 0x1007; -pub const SO_TYPE: c_int = 0x1008; - -pub const LOCAL_PEERCRED: c_int = 1; - -// net/route.h -pub const RTF_XRESOLVE: c_int = 0x200; -pub const RTF_LLINFO: c_int = 0x400; -pub const RTF_PROTO3: c_int = 0x40000; -pub const RTF_PINNED: c_int = 0x100000; -pub const RTF_LOCAL: c_int = 0x200000; -pub const RTF_BROADCAST: c_int = 0x400000; -pub const RTF_MULTICAST: c_int = 0x800000; - -pub const RTM_LOCK: c_int = 0x8; -pub const RTM_RESOLVE: c_int = 0xb; -pub const RTM_NEWADDR: c_int = 0xc; -pub const RTM_DELADDR: c_int = 0xd; -pub const RTM_IFINFO: c_int = 0xe; -pub const RTM_NEWMADDR: c_int = 0xf; -pub const RTM_DELMADDR: c_int = 0x10; -pub const RTM_IFANNOUNCE: c_int = 0x11; -pub const RTM_IEEE80211: c_int = 0x12; - -pub const SHUT_RD: c_int = 0; -pub const SHUT_WR: c_int = 1; -pub const SHUT_RDWR: c_int = 2; - -pub const LOCK_SH: c_int = 1; -pub const LOCK_EX: c_int = 2; -pub const LOCK_NB: c_int = 4; -pub const LOCK_UN: c_int = 8; - -pub const MAP_COPY: c_int = 0x0002; -#[doc(hidden)] -#[deprecated( - since = "0.2.54", - note = "Removed in FreeBSD 11, unused in DragonFlyBSD" -)] -pub const MAP_RENAME: c_int = 0x0020; -#[doc(hidden)] -#[deprecated( - since = "0.2.54", - note = "Removed in FreeBSD 11, unused in DragonFlyBSD" -)] -pub const MAP_NORESERVE: c_int = 0x0040; -pub const MAP_HASSEMAPHORE: c_int = 0x0200; -pub const MAP_STACK: c_int = 0x0400; -pub const MAP_NOSYNC: c_int = 0x0800; -pub const MAP_NOCORE: c_int = 0x020000; - -pub const IPPROTO_RAW: c_int = 255; - -pub const _PC_LINK_MAX: c_int = 1; -pub const _PC_MAX_CANON: c_int = 2; -pub const _PC_MAX_INPUT: c_int = 3; -pub const _PC_NAME_MAX: c_int = 4; -pub const _PC_PATH_MAX: c_int = 5; -pub const _PC_PIPE_BUF: c_int = 6; -pub const _PC_CHOWN_RESTRICTED: c_int = 7; -pub const _PC_NO_TRUNC: c_int = 8; -pub const _PC_VDISABLE: c_int = 9; -pub const _PC_ALLOC_SIZE_MIN: c_int = 10; -pub const _PC_FILESIZEBITS: c_int = 12; -pub const _PC_REC_INCR_XFER_SIZE: c_int = 14; -pub const _PC_REC_MAX_XFER_SIZE: c_int = 15; -pub const _PC_REC_MIN_XFER_SIZE: c_int = 16; -pub const _PC_REC_XFER_ALIGN: c_int = 17; -pub const _PC_SYMLINK_MAX: c_int = 18; -pub const _PC_MIN_HOLE_SIZE: c_int = 21; -pub const _PC_ASYNC_IO: c_int = 53; -pub const _PC_PRIO_IO: c_int = 54; -pub const _PC_SYNC_IO: c_int = 55; -pub const _PC_ACL_EXTENDED: c_int = 59; -pub const _PC_ACL_PATH_MAX: c_int = 60; -pub const _PC_CAP_PRESENT: c_int = 61; -pub const _PC_INF_PRESENT: c_int = 62; -pub const _PC_MAC_PRESENT: c_int = 63; - -pub const _SC_ARG_MAX: c_int = 1; -pub const _SC_CHILD_MAX: c_int = 2; -pub const _SC_CLK_TCK: c_int = 3; -pub const _SC_NGROUPS_MAX: c_int = 4; -pub const _SC_OPEN_MAX: c_int = 5; -pub const _SC_JOB_CONTROL: c_int = 6; -pub const _SC_SAVED_IDS: c_int = 7; -pub const _SC_VERSION: c_int = 8; -pub const _SC_BC_BASE_MAX: c_int = 9; -pub const _SC_BC_DIM_MAX: c_int = 10; -pub const _SC_BC_SCALE_MAX: c_int = 11; -pub const _SC_BC_STRING_MAX: c_int = 12; -pub const _SC_COLL_WEIGHTS_MAX: c_int = 13; -pub const _SC_EXPR_NEST_MAX: c_int = 14; -pub const _SC_LINE_MAX: c_int = 15; -pub const _SC_RE_DUP_MAX: c_int = 16; -pub const _SC_2_VERSION: c_int = 17; -pub const _SC_2_C_BIND: c_int = 18; -pub const _SC_2_C_DEV: c_int = 19; -pub const _SC_2_CHAR_TERM: c_int = 20; -pub const _SC_2_FORT_DEV: c_int = 21; -pub const _SC_2_FORT_RUN: c_int = 22; -pub const _SC_2_LOCALEDEF: c_int = 23; -pub const _SC_2_SW_DEV: c_int = 24; -pub const _SC_2_UPE: c_int = 25; -pub const _SC_STREAM_MAX: c_int = 26; -pub const _SC_TZNAME_MAX: c_int = 27; -pub const _SC_ASYNCHRONOUS_IO: c_int = 28; -pub const _SC_MAPPED_FILES: c_int = 29; -pub const _SC_MEMLOCK: c_int = 30; -pub const _SC_MEMLOCK_RANGE: c_int = 31; -pub const _SC_MEMORY_PROTECTION: c_int = 32; -pub const _SC_MESSAGE_PASSING: c_int = 33; -pub const _SC_PRIORITIZED_IO: c_int = 34; -pub const _SC_PRIORITY_SCHEDULING: c_int = 35; -pub const _SC_REALTIME_SIGNALS: c_int = 36; -pub const _SC_SEMAPHORES: c_int = 37; -pub const _SC_FSYNC: c_int = 38; -pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 39; -pub const _SC_SYNCHRONIZED_IO: c_int = 40; -pub const _SC_TIMERS: c_int = 41; -pub const _SC_AIO_LISTIO_MAX: c_int = 42; -pub const _SC_AIO_MAX: c_int = 43; -pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 44; -pub const _SC_DELAYTIMER_MAX: c_int = 45; -pub const _SC_MQ_OPEN_MAX: c_int = 46; -pub const _SC_PAGESIZE: c_int = 47; -pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; -pub const _SC_RTSIG_MAX: c_int = 48; -pub const _SC_SEM_NSEMS_MAX: c_int = 49; -pub const _SC_SEM_VALUE_MAX: c_int = 50; -pub const _SC_SIGQUEUE_MAX: c_int = 51; -pub const _SC_TIMER_MAX: c_int = 52; -pub const _SC_IOV_MAX: c_int = 56; -pub const _SC_NPROCESSORS_CONF: c_int = 57; -pub const _SC_2_PBS: c_int = 59; -pub const _SC_2_PBS_ACCOUNTING: c_int = 60; -pub const _SC_2_PBS_CHECKPOINT: c_int = 61; -pub const _SC_2_PBS_LOCATE: c_int = 62; -pub const _SC_2_PBS_MESSAGE: c_int = 63; -pub const _SC_2_PBS_TRACK: c_int = 64; -pub const _SC_ADVISORY_INFO: c_int = 65; -pub const _SC_BARRIERS: c_int = 66; -pub const _SC_CLOCK_SELECTION: c_int = 67; -pub const _SC_CPUTIME: c_int = 68; -pub const _SC_FILE_LOCKING: c_int = 69; -pub const _SC_NPROCESSORS_ONLN: c_int = 58; -pub const _SC_GETGR_R_SIZE_MAX: c_int = 70; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 71; -pub const _SC_HOST_NAME_MAX: c_int = 72; -pub const _SC_LOGIN_NAME_MAX: c_int = 73; -pub const _SC_MONOTONIC_CLOCK: c_int = 74; -pub const _SC_MQ_PRIO_MAX: c_int = 75; -pub const _SC_READER_WRITER_LOCKS: c_int = 76; -pub const _SC_REGEXP: c_int = 77; -pub const _SC_SHELL: c_int = 78; -pub const _SC_SPAWN: c_int = 79; -pub const _SC_SPIN_LOCKS: c_int = 80; -pub const _SC_SPORADIC_SERVER: c_int = 81; -pub const _SC_THREAD_ATTR_STACKADDR: c_int = 82; -pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 83; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 85; -pub const _SC_THREAD_KEYS_MAX: c_int = 86; -pub const _SC_THREAD_PRIO_INHERIT: c_int = 87; -pub const _SC_THREAD_PRIO_PROTECT: c_int = 88; -pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 89; -pub const _SC_THREAD_PROCESS_SHARED: c_int = 90; -pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 91; -pub const _SC_THREAD_SPORADIC_SERVER: c_int = 92; -pub const _SC_THREAD_STACK_MIN: c_int = 93; -pub const _SC_THREAD_THREADS_MAX: c_int = 94; -pub const _SC_TIMEOUTS: c_int = 95; -pub const _SC_THREADS: c_int = 96; -pub const _SC_TRACE: c_int = 97; -pub const _SC_TRACE_EVENT_FILTER: c_int = 98; -pub const _SC_TRACE_INHERIT: c_int = 99; -pub const _SC_TRACE_LOG: c_int = 100; -pub const _SC_TTY_NAME_MAX: c_int = 101; -pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 102; -pub const _SC_V6_ILP32_OFF32: c_int = 103; -pub const _SC_V6_ILP32_OFFBIG: c_int = 104; -pub const _SC_V6_LP64_OFF64: c_int = 105; -pub const _SC_V6_LPBIG_OFFBIG: c_int = 106; -pub const _SC_ATEXIT_MAX: c_int = 107; -pub const _SC_XOPEN_CRYPT: c_int = 108; -pub const _SC_XOPEN_ENH_I18N: c_int = 109; -pub const _SC_XOPEN_LEGACY: c_int = 110; -pub const _SC_XOPEN_REALTIME: c_int = 111; -pub const _SC_XOPEN_REALTIME_THREADS: c_int = 112; -pub const _SC_XOPEN_SHM: c_int = 113; -pub const _SC_XOPEN_STREAMS: c_int = 114; -pub const _SC_XOPEN_UNIX: c_int = 115; -pub const _SC_XOPEN_VERSION: c_int = 116; -pub const _SC_XOPEN_XCU_VERSION: c_int = 117; -pub const _SC_IPV6: c_int = 118; -pub const _SC_RAW_SOCKETS: c_int = 119; -pub const _SC_SYMLOOP_MAX: c_int = 120; -pub const _SC_PHYS_PAGES: c_int = 121; - -pub const _CS_PATH: c_int = 1; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = ptr::null_mut(); -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = ptr::null_mut(); -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = ptr::null_mut(); -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 1; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 2; -pub const PTHREAD_MUTEX_NORMAL: c_int = 3; -pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_ERRORCHECK; - -pub const SCHED_FIFO: c_int = 1; -pub const SCHED_OTHER: c_int = 2; -pub const SCHED_RR: c_int = 3; - -pub const FD_SETSIZE: usize = 1024; - -pub const ST_NOSUID: c_ulong = 2; - -pub const NI_MAXHOST: size_t = 1025; - -pub const XUCRED_VERSION: c_uint = 0; - -pub const RTLD_LOCAL: c_int = 0; -pub const RTLD_NODELETE: c_int = 0x1000; -pub const RTLD_NOLOAD: c_int = 0x2000; -pub const RTLD_GLOBAL: c_int = 0x100; - -pub const LOG_NTP: c_int = 12 << 3; -pub const LOG_SECURITY: c_int = 13 << 3; -pub const LOG_CONSOLE: c_int = 14 << 3; -pub const LOG_NFACILITIES: c_int = 24; - -pub const TIOCEXCL: c_ulong = 0x2000740d; -pub const TIOCNXCL: c_ulong = 0x2000740e; -pub const TIOCFLUSH: c_ulong = 0x80047410; -pub const TIOCGETA: c_ulong = 0x402c7413; -pub const TIOCSETA: c_ulong = 0x802c7414; -pub const TIOCSETAW: c_ulong = 0x802c7415; -pub const TIOCSETAF: c_ulong = 0x802c7416; -pub const TIOCGETD: c_ulong = 0x4004741a; -pub const TIOCSETD: c_ulong = 0x8004741b; -pub const TIOCGDRAINWAIT: c_ulong = 0x40047456; -pub const TIOCSDRAINWAIT: c_ulong = 0x80047457; -#[cfg_attr( - not(target_os = "dragonfly"), - deprecated = "unused since FreeBSD 8, removed in FreeBSD 15" -)] -pub const TIOCMGDTRWAIT: c_ulong = 0x4004745a; -#[cfg_attr( - not(target_os = "dragonfly"), - deprecated = "unused since FreeBSD 8, removed in FreeBSD 15" -)] -pub const TIOCMSDTRWAIT: c_ulong = 0x8004745b; -pub const TIOCDRAIN: c_ulong = 0x2000745e; -pub const TIOCEXT: c_ulong = 0x80047460; -pub const TIOCSCTTY: c_ulong = 0x20007461; -pub const TIOCCONS: c_ulong = 0x80047462; -pub const TIOCGSID: c_ulong = 0x40047463; -pub const TIOCSTAT: c_ulong = 0x20007465; -pub const TIOCUCNTL: c_ulong = 0x80047466; -pub const TIOCSWINSZ: c_ulong = 0x80087467; -pub const TIOCGWINSZ: c_ulong = 0x40087468; -pub const TIOCMGET: c_ulong = 0x4004746a; -pub const TIOCM_LE: c_int = 0x1; -pub const TIOCM_DTR: c_int = 0x2; -pub const TIOCM_RTS: c_int = 0x4; -pub const TIOCM_ST: c_int = 0x8; -pub const TIOCM_SR: c_int = 0x10; -pub const TIOCM_CTS: c_int = 0x20; -pub const TIOCM_RI: c_int = 0x80; -pub const TIOCM_DSR: c_int = 0x100; -pub const TIOCM_CD: c_int = 0x40; -pub const TIOCM_CAR: c_int = 0x40; -pub const TIOCM_RNG: c_int = 0x80; -pub const TIOCMBIC: c_ulong = 0x8004746b; -pub const TIOCMBIS: c_ulong = 0x8004746c; -pub const TIOCMSET: c_ulong = 0x8004746d; -pub const TIOCSTART: c_ulong = 0x2000746e; -pub const TIOCSTOP: c_ulong = 0x2000746f; -pub const TIOCPKT: c_ulong = 0x80047470; -pub const TIOCPKT_DATA: c_int = 0x0; -pub const TIOCPKT_FLUSHREAD: c_int = 0x1; -pub const TIOCPKT_FLUSHWRITE: c_int = 0x2; -pub const TIOCPKT_STOP: c_int = 0x4; -pub const TIOCPKT_START: c_int = 0x8; -pub const TIOCPKT_NOSTOP: c_int = 0x10; -pub const TIOCPKT_DOSTOP: c_int = 0x20; -pub const TIOCPKT_IOCTL: c_int = 0x40; -pub const TIOCNOTTY: c_ulong = 0x20007471; -pub const TIOCSTI: c_ulong = 0x80017472; -pub const TIOCOUTQ: c_ulong = 0x40047473; -pub const TIOCSPGRP: c_ulong = 0x80047476; -pub const TIOCGPGRP: c_ulong = 0x40047477; -pub const TIOCCDTR: c_ulong = 0x20007478; -pub const TIOCSDTR: c_ulong = 0x20007479; -pub const TTYDISC: c_int = 0x0; -pub const SLIPDISC: c_int = 0x4; -pub const PPPDISC: c_int = 0x5; -pub const NETGRAPHDISC: c_int = 0x6; - -pub const BIOCGRSIG: c_ulong = 0x40044272; -pub const BIOCSRSIG: c_ulong = 0x80044273; -pub const BIOCSDLT: c_ulong = 0x80044278; -pub const BIOCGSEESENT: c_ulong = 0x40044276; -pub const BIOCSSEESENT: c_ulong = 0x80044277; -cfg_if! { - if #[cfg(target_pointer_width = "64")] { - pub const BIOCGDLTLIST: c_ulong = 0xc0104279; - pub const BIOCSETF: c_ulong = 0x80104267; - } else if #[cfg(target_pointer_width = "32")] { - pub const BIOCGDLTLIST: c_ulong = 0xc0084279; - pub const BIOCSETF: c_ulong = 0x80084267; - } -} - -pub const FIODTYPE: c_ulong = 0x4004667a; -pub const FIOGETLBA: c_ulong = 0x40046679; - -pub const B0: speed_t = 0; -pub const B50: speed_t = 50; -pub const B75: speed_t = 75; -pub const B110: speed_t = 110; -pub const B134: speed_t = 134; -pub const B150: speed_t = 150; -pub const B200: speed_t = 200; -pub const B300: speed_t = 300; -pub const B600: speed_t = 600; -pub const B1200: speed_t = 1200; -pub const B1800: speed_t = 1800; -pub const B2400: speed_t = 2400; -pub const B4800: speed_t = 4800; -pub const B9600: speed_t = 9600; -pub const B19200: speed_t = 19200; -pub const B38400: speed_t = 38400; -pub const B7200: speed_t = 7200; -pub const B14400: speed_t = 14400; -pub const B28800: speed_t = 28800; -pub const B57600: speed_t = 57600; -pub const B76800: speed_t = 76800; -pub const B115200: speed_t = 115200; -pub const B230400: speed_t = 230400; -pub const EXTA: speed_t = 19200; -pub const EXTB: speed_t = 38400; - -pub const SEM_FAILED: *mut sem_t = ptr::null_mut(); - -pub const CRTSCTS: crate::tcflag_t = 0x00030000; -pub const CCTS_OFLOW: crate::tcflag_t = 0x00010000; -pub const CRTS_IFLOW: crate::tcflag_t = 0x00020000; -pub const CDTR_IFLOW: crate::tcflag_t = 0x00040000; -pub const CDSR_OFLOW: crate::tcflag_t = 0x00080000; -pub const CCAR_OFLOW: crate::tcflag_t = 0x00100000; -pub const VERASE2: usize = 7; -pub const OCRNL: crate::tcflag_t = 0x10; -pub const ONOCR: crate::tcflag_t = 0x20; -pub const ONLRET: crate::tcflag_t = 0x40; - -pub const CMGROUP_MAX: usize = 16; - -pub const EUI64_LEN: usize = 8; - -// https://github.com/freebsd/freebsd/blob/HEAD/sys/net/bpf.h -pub const BPF_ALIGNMENT: usize = SIZEOF_LONG; - -// Values for rtprio struct (prio field) and syscall (function argument) -pub const RTP_PRIO_MIN: c_ushort = 0; -pub const RTP_PRIO_MAX: c_ushort = 31; -pub const RTP_LOOKUP: c_int = 0; -pub const RTP_SET: c_int = 1; - -// Flags for chflags(2) -pub const UF_SETTABLE: c_ulong = 0x0000ffff; -pub const UF_NODUMP: c_ulong = 0x00000001; -pub const UF_IMMUTABLE: c_ulong = 0x00000002; -pub const UF_APPEND: c_ulong = 0x00000004; -pub const UF_OPAQUE: c_ulong = 0x00000008; -pub const UF_NOUNLINK: c_ulong = 0x00000010; -pub const SF_SETTABLE: c_ulong = 0xffff0000; -pub const SF_ARCHIVED: c_ulong = 0x00010000; -pub const SF_IMMUTABLE: c_ulong = 0x00020000; -pub const SF_APPEND: c_ulong = 0x00040000; -pub const SF_NOUNLINK: c_ulong = 0x00100000; - -pub const TIMER_ABSTIME: c_int = 1; - -// -pub const NTP_API: c_int = 4; -pub const MAXPHASE: c_long = 500000000; -pub const MAXFREQ: c_long = 500000; -pub const MINSEC: c_int = 256; -pub const MAXSEC: c_int = 2048; -pub const NANOSECOND: c_long = 1000000000; -pub const SCALE_PPM: c_int = 65; -pub const MAXTC: c_int = 10; -pub const MOD_OFFSET: c_uint = 0x0001; -pub const MOD_FREQUENCY: c_uint = 0x0002; -pub const MOD_MAXERROR: c_uint = 0x0004; -pub const MOD_ESTERROR: c_uint = 0x0008; -pub const MOD_STATUS: c_uint = 0x0010; -pub const MOD_TIMECONST: c_uint = 0x0020; -pub const MOD_PPSMAX: c_uint = 0x0040; -pub const MOD_TAI: c_uint = 0x0080; -pub const MOD_MICRO: c_uint = 0x1000; -pub const MOD_NANO: c_uint = 0x2000; -pub const MOD_CLKB: c_uint = 0x4000; -pub const MOD_CLKA: c_uint = 0x8000; -pub const STA_PLL: c_int = 0x0001; -pub const STA_PPSFREQ: c_int = 0x0002; -pub const STA_PPSTIME: c_int = 0x0004; -pub const STA_FLL: c_int = 0x0008; -pub const STA_INS: c_int = 0x0010; -pub const STA_DEL: c_int = 0x0020; -pub const STA_UNSYNC: c_int = 0x0040; -pub const STA_FREQHOLD: c_int = 0x0080; -pub const STA_PPSSIGNAL: c_int = 0x0100; -pub const STA_PPSJITTER: c_int = 0x0200; -pub const STA_PPSWANDER: c_int = 0x0400; -pub const STA_PPSERROR: c_int = 0x0800; -pub const STA_CLOCKERR: c_int = 0x1000; -pub const STA_NANO: c_int = 0x2000; -pub const STA_MODE: c_int = 0x4000; -pub const STA_CLK: c_int = 0x8000; -pub const STA_RONLY: c_int = STA_PPSSIGNAL - | STA_PPSJITTER - | STA_PPSWANDER - | STA_PPSERROR - | STA_CLOCKERR - | STA_NANO - | STA_MODE - | STA_CLK; -pub const TIME_OK: c_int = 0; -pub const TIME_INS: c_int = 1; -pub const TIME_DEL: c_int = 2; -pub const TIME_OOP: c_int = 3; -pub const TIME_WAIT: c_int = 4; -pub const TIME_ERROR: c_int = 5; - -pub const REG_ENOSYS: c_int = -1; -pub const REG_ILLSEQ: c_int = 17; - -pub const IPC_PRIVATE: crate::key_t = 0; -pub const IPC_CREAT: c_int = 0o1000; -pub const IPC_EXCL: c_int = 0o2000; -pub const IPC_NOWAIT: c_int = 0o4000; -pub const IPC_RMID: c_int = 0; -pub const IPC_SET: c_int = 1; -pub const IPC_STAT: c_int = 2; -pub const IPC_R: c_int = 0o400; -pub const IPC_W: c_int = 0o200; -pub const IPC_M: c_int = 0o10000; - -pub const SHM_RDONLY: c_int = 0o10000; -pub const SHM_RND: c_int = 0o20000; -pub const SHM_R: c_int = 0o400; -pub const SHM_W: c_int = 0o200; - -pub const KENV_GET: c_int = 0; -pub const KENV_SET: c_int = 1; -pub const KENV_UNSET: c_int = 2; -pub const KENV_DUMP: c_int = 3; -pub const KENV_MNAMELEN: c_int = 128; -pub const KENV_MVALLEN: c_int = 128; - -pub const RB_ASKNAME: c_int = 0x001; -pub const RB_SINGLE: c_int = 0x002; -pub const RB_NOSYNC: c_int = 0x004; -pub const RB_HALT: c_int = 0x008; -pub const RB_INITNAME: c_int = 0x010; -pub const RB_DFLTROOT: c_int = 0x020; -pub const RB_KDB: c_int = 0x040; -pub const RB_RDONLY: c_int = 0x080; -pub const RB_DUMP: c_int = 0x100; -pub const RB_MINIROOT: c_int = 0x200; -pub const RB_VERBOSE: c_int = 0x800; -pub const RB_SERIAL: c_int = 0x1000; -pub const RB_CDROM: c_int = 0x2000; -pub const RB_POWEROFF: c_int = 0x4000; -pub const RB_GDB: c_int = 0x8000; -pub const RB_MUTE: c_int = 0x10000; -pub const RB_SELFTEST: c_int = 0x20000; - -// For getrandom() -pub const GRND_NONBLOCK: c_uint = 0x1; -pub const GRND_RANDOM: c_uint = 0x2; -pub const GRND_INSECURE: c_uint = 0x4; - -// DIFF(main): changed to `c_short` in f62eb023ab -pub const POSIX_SPAWN_RESETIDS: c_int = 0x01; -pub const POSIX_SPAWN_SETPGROUP: c_int = 0x02; -pub const POSIX_SPAWN_SETSCHEDPARAM: c_int = 0x04; -pub const POSIX_SPAWN_SETSCHEDULER: c_int = 0x08; -pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x10; -pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x20; - -safe_f! { - pub const fn WIFCONTINUED(status: c_int) -> bool { - status == 0x13 - } - - pub const fn WSTOPSIG(status: c_int) -> c_int { - status >> 8 - } - - pub const fn WIFSTOPPED(status: c_int) -> bool { - (status & 0o177) == 0o177 - } -} - -extern "C" { - pub fn sem_destroy(sem: *mut sem_t) -> c_int; - pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; - - pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut crate::timezone) -> c_int; - pub fn accept4( - s: c_int, - addr: *mut crate::sockaddr, - addrlen: *mut crate::socklen_t, - flags: c_int, - ) -> c_int; - pub fn chflags(path: *const c_char, flags: c_ulong) -> c_int; - pub fn chflagsat(fd: c_int, path: *const c_char, flags: c_ulong, atflag: c_int) -> c_int; - - pub fn clock_nanosleep( - clk_id: crate::clockid_t, - flags: c_int, - rqtp: *const crate::timespec, - rmtp: *mut crate::timespec, - ) -> c_int; - - pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn clock_settime(clk_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; - pub fn clock_getcpuclockid(pid: crate::pid_t, clk_id: *mut crate::clockid_t) -> c_int; - - pub fn pthread_getcpuclockid(thread: crate::pthread_t, clk_id: *mut crate::clockid_t) -> c_int; - - pub fn dirfd(dirp: *mut crate::DIR) -> c_int; - pub fn duplocale(base: crate::locale_t) -> crate::locale_t; - pub fn endutxent(); - pub fn fchflags(fd: c_int, flags: c_ulong) -> c_int; - - // DIFF(main): changed to `*const *mut` in e77f551de9 - pub fn fexecve(fd: c_int, argv: *const *const c_char, envp: *const *const c_char) -> c_int; - - pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; - pub fn getdomainname(name: *mut c_char, len: c_int) -> c_int; - pub fn getgrent_r( - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn getpwent_r( - pwd: *mut crate::passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::passwd, - ) -> c_int; - pub fn getgrouplist( - name: *const c_char, - basegid: crate::gid_t, - groups: *mut crate::gid_t, - ngroups: *mut c_int, - ) -> c_int; - pub fn getnameinfo( - sa: *const crate::sockaddr, - salen: crate::socklen_t, - host: *mut c_char, - hostlen: size_t, - serv: *mut c_char, - servlen: size_t, - flags: c_int, - ) -> c_int; - pub fn getpriority(which: c_int, who: c_int) -> c_int; - pub fn getresgid( - rgid: *mut crate::gid_t, - egid: *mut crate::gid_t, - sgid: *mut crate::gid_t, - ) -> c_int; - pub fn getresuid( - ruid: *mut crate::uid_t, - euid: *mut crate::uid_t, - suid: *mut crate::uid_t, - ) -> c_int; - pub fn getutxent() -> *mut utmpx; - pub fn getutxid(ut: *const utmpx) -> *mut utmpx; - pub fn getutxline(ut: *const utmpx) -> *mut utmpx; - pub fn initgroups(name: *const c_char, basegid: crate::gid_t) -> c_int; - #[cfg_attr( - all(target_os = "freebsd", any(freebsd11, freebsd10)), - link_name = "kevent@FBSD_1.0" - )] - pub fn kevent( - kq: c_int, - changelist: *const crate::kevent, - nchanges: c_int, - eventlist: *mut crate::kevent, - nevents: c_int, - timeout: *const crate::timespec, - ) -> c_int; - pub fn lchflags(path: *const c_char, flags: c_ulong) -> c_int; - pub fn lutimes(file: *const c_char, times: *const crate::timeval) -> c_int; - pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; - pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; - #[cfg_attr( - all(target_os = "freebsd", any(freebsd11, freebsd10)), - link_name = "mknodat@FBSD_1.1" - )] - pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; - pub fn malloc_usable_size(ptr: *const c_void) -> size_t; - pub fn mincore(addr: *const c_void, len: size_t, vec: *mut c_char) -> c_int; - pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; - pub fn nl_langinfo_l(item: crate::nl_item, locale: crate::locale_t) -> *mut c_char; - pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; - pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; - pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; - pub fn ppoll( - fds: *mut crate::pollfd, - nfds: crate::nfds_t, - timeout: *const crate::timespec, - sigmask: *const sigset_t, - ) -> c_int; - pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn pthread_attr_get_np(tid: crate::pthread_t, attr: *mut crate::pthread_attr_t) -> c_int; - pub fn pthread_attr_getguardsize( - attr: *const crate::pthread_attr_t, - guardsize: *mut size_t, - ) -> c_int; - pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; - pub fn pthread_attr_getstack( - attr: *const crate::pthread_attr_t, - stackaddr: *mut *mut c_void, - stacksize: *mut size_t, - ) -> c_int; - pub fn pthread_condattr_getclock( - attr: *const pthread_condattr_t, - clock_id: *mut clockid_t, - ) -> c_int; - pub fn pthread_condattr_getpshared( - attr: *const pthread_condattr_t, - pshared: *mut c_int, - ) -> c_int; - pub fn pthread_condattr_setclock( - attr: *mut pthread_condattr_t, - clock_id: crate::clockid_t, - ) -> c_int; - pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: c_int) -> c_int; - pub fn pthread_main_np() -> c_int; - pub fn pthread_mutex_timedlock( - lock: *mut pthread_mutex_t, - abstime: *const crate::timespec, - ) -> c_int; - pub fn pthread_mutexattr_getpshared( - attr: *const pthread_mutexattr_t, - pshared: *mut c_int, - ) -> c_int; - pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; - pub fn pthread_rwlockattr_getpshared( - attr: *const pthread_rwlockattr_t, - val: *mut c_int, - ) -> c_int; - pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, val: c_int) -> c_int; - pub fn pthread_barrierattr_init(attr: *mut crate::pthread_barrierattr_t) -> c_int; - pub fn pthread_barrierattr_destroy(attr: *mut crate::pthread_barrierattr_t) -> c_int; - pub fn pthread_barrierattr_getpshared( - attr: *const crate::pthread_barrierattr_t, - shared: *mut c_int, - ) -> c_int; - pub fn pthread_barrierattr_setpshared( - attr: *mut crate::pthread_barrierattr_t, - shared: c_int, - ) -> c_int; - pub fn pthread_barrier_init( - barrier: *mut pthread_barrier_t, - attr: *const crate::pthread_barrierattr_t, - count: c_uint, - ) -> c_int; - pub fn pthread_barrier_destroy(barrier: *mut pthread_barrier_t) -> c_int; - pub fn pthread_barrier_wait(barrier: *mut pthread_barrier_t) -> c_int; - pub fn pthread_get_name_np(tid: crate::pthread_t, name: *mut c_char, len: size_t); - pub fn pthread_set_name_np(tid: crate::pthread_t, name: *const c_char); - pub fn pthread_getname_np( - thread: crate::pthread_t, - buffer: *mut c_char, - length: size_t, - ) -> c_int; - pub fn pthread_setname_np(thread: crate::pthread_t, name: *const c_char) -> c_int; - pub fn pthread_setschedparam( - native: crate::pthread_t, - policy: c_int, - param: *const sched_param, - ) -> c_int; - pub fn pthread_getschedparam( - native: crate::pthread_t, - policy: *mut c_int, - param: *mut sched_param, - ) -> c_int; - pub fn ptrace(request: c_int, pid: crate::pid_t, addr: *mut c_char, data: c_int) -> c_int; - pub fn utrace(addr: *const c_void, len: size_t) -> c_int; - pub fn pututxline(ut: *const utmpx) -> *mut utmpx; - pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn querylocale(mask: c_int, loc: crate::locale_t) -> *const c_char; - pub fn rtprio(function: c_int, pid: crate::pid_t, rtp: *mut rtprio) -> c_int; - pub fn sched_rr_get_interval(pid: crate::pid_t, t: *mut crate::timespec) -> c_int; - pub fn sched_getparam(pid: crate::pid_t, param: *mut sched_param) -> c_int; - pub fn sched_setparam(pid: crate::pid_t, param: *const sched_param) -> c_int; - pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; - pub fn sched_setscheduler( - pid: crate::pid_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; - pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; - pub fn sendfile( - fd: c_int, - s: c_int, - offset: off_t, - nbytes: size_t, - hdtr: *mut crate::sf_hdtr, - sbytes: *mut off_t, - flags: c_int, - ) -> c_int; - pub fn setdomainname(name: *const c_char, len: c_int) -> c_int; - pub fn sethostname(name: *const c_char, len: c_int) -> c_int; - pub fn setpriority(which: c_int, who: c_int, prio: c_int) -> c_int; - pub fn setresgid(rgid: crate::gid_t, egid: crate::gid_t, sgid: crate::gid_t) -> c_int; - pub fn setresuid(ruid: crate::uid_t, euid: crate::uid_t, suid: crate::uid_t) -> c_int; - pub fn settimeofday(tv: *const crate::timeval, tz: *const crate::timezone) -> c_int; - pub fn setutxent(); - pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; - pub fn sigtimedwait( - set: *const sigset_t, - info: *mut siginfo_t, - timeout: *const crate::timespec, - ) -> c_int; - pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; - pub fn sysctl( - name: *const c_int, - namelen: c_uint, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *const c_void, - newlen: size_t, - ) -> c_int; - pub fn sysctlbyname( - name: *const c_char, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *const c_void, - newlen: size_t, - ) -> c_int; - pub fn sysctlnametomib(name: *const c_char, mibp: *mut c_int, sizep: *mut size_t) -> c_int; - pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; - pub fn utimensat( - dirfd: c_int, - path: *const c_char, - times: *const crate::timespec, - flag: c_int, - ) -> c_int; - - pub fn ntp_adjtime(buf: *mut timex) -> c_int; - pub fn ntp_gettime(buf: *mut ntptimeval) -> c_int; - - // #include - pub fn dl_iterate_phdr( - callback: Option< - unsafe extern "C" fn(info: *mut dl_phdr_info, size: usize, data: *mut c_void) -> c_int, - >, - data: *mut c_void, - ) -> c_int; - - pub fn iconv_open(tocode: *const c_char, fromcode: *const c_char) -> iconv_t; - pub fn iconv( - cd: iconv_t, - inbuf: *mut *mut c_char, - inbytesleft: *mut size_t, - outbuf: *mut *mut c_char, - outbytesleft: *mut size_t, - ) -> size_t; - pub fn iconv_close(cd: iconv_t) -> c_int; - - // Added in `FreeBSD` 11.0 - // Added in `DragonFly BSD` 5.4 - pub fn explicit_bzero(s: *mut c_void, len: size_t); - // ISO/IEC 9899:2011 ("ISO C11") K.3.7.4.1 - pub fn memset_s(s: *mut c_void, smax: size_t, c: c_int, n: size_t) -> c_int; - pub fn gethostid() -> c_long; - pub fn sethostid(hostid: c_long); - - pub fn eui64_aton(a: *const c_char, e: *mut eui64) -> c_int; - pub fn eui64_ntoa(id: *const eui64, a: *mut c_char, len: size_t) -> c_int; - pub fn eui64_ntohost(hostname: *mut c_char, len: size_t, id: *const eui64) -> c_int; - pub fn eui64_hostton(hostname: *const c_char, id: *mut eui64) -> c_int; - - pub fn eaccess(path: *const c_char, mode: c_int) -> c_int; - - pub fn kenv(action: c_int, name: *const c_char, value: *mut c_char, len: c_int) -> c_int; - pub fn reboot(howto: c_int) -> c_int; - - pub fn exect(path: *const c_char, argv: *const *mut c_char, envp: *const *mut c_char) -> c_int; - pub fn execvP( - file: *const c_char, - search_path: *const c_char, - argv: *const *mut c_char, - ) -> c_int; - - pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; - pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; - - pub fn posix_spawn( - pid: *mut crate::pid_t, - path: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnp( - pid: *mut crate::pid_t, - file: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_getsigdefault( - attr: *const posix_spawnattr_t, - default: *mut crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigdefault( - attr: *mut posix_spawnattr_t, - default: *const crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getsigmask( - attr: *const posix_spawnattr_t, - default: *mut crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigmask( - attr: *mut posix_spawnattr_t, - default: *const crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; - pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; - pub fn posix_spawnattr_getpgroup( - attr: *const posix_spawnattr_t, - flags: *mut crate::pid_t, - ) -> c_int; - pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: crate::pid_t) -> c_int; - pub fn posix_spawnattr_getschedpolicy( - attr: *const posix_spawnattr_t, - flags: *mut c_int, - ) -> c_int; - pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, flags: c_int) -> c_int; - pub fn posix_spawnattr_getschedparam( - attr: *const posix_spawnattr_t, - param: *mut crate::sched_param, - ) -> c_int; - pub fn posix_spawnattr_setschedparam( - attr: *mut posix_spawnattr_t, - param: *const crate::sched_param, - ) -> c_int; - - pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_addopen( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - path: *const c_char, - oflag: c_int, - mode: mode_t, - ) -> c_int; - pub fn posix_spawn_file_actions_addclose( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - ) -> c_int; - pub fn posix_spawn_file_actions_adddup2( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - newfd: c_int, - ) -> c_int; -} - -#[link(name = "rt")] -extern "C" { - pub fn mq_close(mqd: crate::mqd_t) -> c_int; - pub fn mq_getattr(mqd: crate::mqd_t, attr: *mut crate::mq_attr) -> c_int; - pub fn mq_notify(mqd: crate::mqd_t, notification: *const crate::sigevent) -> c_int; - pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> crate::mqd_t; - pub fn mq_receive( - mqd: crate::mqd_t, - msg_ptr: *mut c_char, - msg_len: size_t, - msg_prio: *mut c_uint, - ) -> ssize_t; - pub fn mq_send( - mqd: crate::mqd_t, - msg_ptr: *const c_char, - msg_len: size_t, - msg_prio: c_uint, - ) -> c_int; - pub fn mq_setattr( - mqd: crate::mqd_t, - newattr: *const crate::mq_attr, - oldattr: *mut crate::mq_attr, - ) -> c_int; - pub fn mq_timedreceive( - mqd: crate::mqd_t, - msg_ptr: *mut c_char, - msg_len: size_t, - msg_prio: *mut c_uint, - abs_timeout: *const crate::timespec, - ) -> ssize_t; - pub fn mq_timedsend( - mqd: crate::mqd_t, - msg_ptr: *const c_char, - msg_len: size_t, - msg_prio: c_uint, - abs_timeout: *const crate::timespec, - ) -> c_int; - pub fn mq_unlink(name: *const c_char) -> c_int; - - pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; - pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; -} - -#[link(name = "util")] -extern "C" { - pub fn openpty( - amaster: *mut c_int, - aslave: *mut c_int, - name: *mut c_char, - termp: *mut termios, - winp: *mut crate::winsize, - ) -> c_int; - pub fn forkpty( - amaster: *mut c_int, - name: *mut c_char, - termp: *mut termios, - winp: *mut crate::winsize, - ) -> crate::pid_t; - pub fn login_tty(fd: c_int) -> c_int; - pub fn fparseln( - stream: *mut crate::FILE, - len: *mut size_t, - lineno: *mut size_t, - delim: *const c_char, - flags: c_int, - ) -> *mut c_char; -} - -#[link(name = "execinfo")] -extern "C" { - pub fn backtrace(addrlist: *mut *mut c_void, len: size_t) -> size_t; - pub fn backtrace_symbols(addrlist: *const *mut c_void, len: size_t) -> *mut *mut c_char; - pub fn backtrace_symbols_fd(addrlist: *const *mut c_void, len: size_t, fd: c_int) -> c_int; -} - -#[link(name = "kvm")] -extern "C" { - pub fn kvm_open( - execfile: *const c_char, - corefile: *const c_char, - swapfile: *const c_char, - flags: c_int, - errstr: *const c_char, - ) -> *mut crate::kvm_t; - pub fn kvm_close(kd: *mut crate::kvm_t) -> c_int; - pub fn kvm_getprocs( - kd: *mut crate::kvm_t, - op: c_int, - arg: c_int, - cnt: *mut c_int, - ) -> *mut crate::kinfo_proc; - pub fn kvm_getloadavg(kd: *mut kvm_t, loadavg: *mut c_double, nelem: c_int) -> c_int; - pub fn kvm_openfiles( - execfile: *const c_char, - corefile: *const c_char, - swapfile: *const c_char, - flags: c_int, - errbuf: *mut c_char, - ) -> *mut crate::kvm_t; - pub fn kvm_read( - kd: *mut crate::kvm_t, - addr: c_ulong, - buf: *mut c_void, - nbytes: size_t, - ) -> ssize_t; - pub fn kvm_write( - kd: *mut crate::kvm_t, - addr: c_ulong, - buf: *const c_void, - nbytes: size_t, - ) -> ssize_t; -} - -cfg_if! { - if #[cfg(target_os = "freebsd")] { - mod freebsd; - pub use self::freebsd::*; - } else if #[cfg(target_os = "dragonfly")] { - mod dragonfly; - pub use self::dragonfly::*; - } else { - // ... - } -} diff --git a/vendor/libc/src/unix/bsd/mod.rs b/vendor/libc/src/unix/bsd/mod.rs deleted file mode 100644 index 24531db8531453..00000000000000 --- a/vendor/libc/src/unix/bsd/mod.rs +++ /dev/null @@ -1,969 +0,0 @@ -use crate::prelude::*; - -pub type off_t = i64; -pub type useconds_t = u32; -pub type blkcnt_t = i64; -pub type socklen_t = u32; -pub type sa_family_t = u8; -pub type pthread_t = crate::uintptr_t; -pub type nfds_t = c_uint; -pub type regoff_t = off_t; - -s! { - pub struct sockaddr { - pub sa_len: u8, - pub sa_family: sa_family_t, - pub sa_data: [c_char; 14], - } - - pub struct sockaddr_in6 { - pub sin6_len: u8, - pub sin6_family: sa_family_t, - pub sin6_port: crate::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: u32, - } - - pub struct passwd { - pub pw_name: *mut c_char, - pub pw_passwd: *mut c_char, - pub pw_uid: crate::uid_t, - pub pw_gid: crate::gid_t, - pub pw_change: crate::time_t, - pub pw_class: *mut c_char, - pub pw_gecos: *mut c_char, - pub pw_dir: *mut c_char, - pub pw_shell: *mut c_char, - pub pw_expire: crate::time_t, - - #[cfg(not(any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "watchos", - target_os = "visionos", - target_os = "netbsd", - target_os = "openbsd" - )))] - pub pw_fields: c_int, - } - - pub struct ifaddrs { - pub ifa_next: *mut ifaddrs, - pub ifa_name: *mut c_char, - pub ifa_flags: c_uint, - pub ifa_addr: *mut crate::sockaddr, - pub ifa_netmask: *mut crate::sockaddr, - pub ifa_dstaddr: *mut crate::sockaddr, - pub ifa_data: *mut c_void, - #[cfg(target_os = "netbsd")] - pub ifa_addrflags: c_uint, - } - - pub struct fd_set { - #[cfg(all( - target_pointer_width = "64", - any(target_os = "freebsd", target_os = "dragonfly") - ))] - fds_bits: [i64; FD_SETSIZE as usize / 64], - #[cfg(not(all( - target_pointer_width = "64", - any(target_os = "freebsd", target_os = "dragonfly") - )))] - fds_bits: [i32; FD_SETSIZE as usize / 32], - } - - pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - pub tm_gmtoff: c_long, - pub tm_zone: *mut c_char, - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: c_int, - pub msg_control: *mut c_void, - pub msg_controllen: crate::socklen_t, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - pub cmsg_len: crate::socklen_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct fsid_t { - __fsid_val: [i32; 2], - } - - pub struct if_nameindex { - pub if_index: c_uint, - pub if_name: *mut c_char, - } - - pub struct regex_t { - __re_magic: c_int, - __re_nsub: size_t, - __re_endp: *const c_char, - __re_g: *mut c_void, - } - - pub struct regmatch_t { - pub rm_so: regoff_t, - pub rm_eo: regoff_t, - } - - pub struct option { - pub name: *const c_char, - pub has_arg: c_int, - pub flag: *mut c_int, - pub val: c_int, - } -} - -s_no_extra_traits! { - pub struct sockaddr_un { - pub sun_len: u8, - pub sun_family: sa_family_t, - pub sun_path: [c_char; 104], - } - - pub struct utsname { - #[cfg(not(target_os = "dragonfly"))] - pub sysname: [c_char; 256], - #[cfg(target_os = "dragonfly")] - pub sysname: [c_char; 32], - #[cfg(not(target_os = "dragonfly"))] - pub nodename: [c_char; 256], - #[cfg(target_os = "dragonfly")] - pub nodename: [c_char; 32], - #[cfg(not(target_os = "dragonfly"))] - pub release: [c_char; 256], - #[cfg(target_os = "dragonfly")] - pub release: [c_char; 32], - #[cfg(not(target_os = "dragonfly"))] - pub version: [c_char; 256], - #[cfg(target_os = "dragonfly")] - pub version: [c_char; 32], - #[cfg(not(target_os = "dragonfly"))] - pub machine: [c_char; 256], - #[cfg(target_os = "dragonfly")] - pub machine: [c_char; 32], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for sockaddr_un { - fn eq(&self, other: &sockaddr_un) -> bool { - self.sun_len == other.sun_len - && self.sun_family == other.sun_family - && self - .sun_path - .iter() - .zip(other.sun_path.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for sockaddr_un {} - - impl hash::Hash for sockaddr_un { - fn hash(&self, state: &mut H) { - self.sun_len.hash(state); - self.sun_family.hash(state); - self.sun_path.hash(state); - } - } - - impl PartialEq for utsname { - fn eq(&self, other: &utsname) -> bool { - self.sysname - .iter() - .zip(other.sysname.iter()) - .all(|(a, b)| a == b) - && self - .nodename - .iter() - .zip(other.nodename.iter()) - .all(|(a, b)| a == b) - && self - .release - .iter() - .zip(other.release.iter()) - .all(|(a, b)| a == b) - && self - .version - .iter() - .zip(other.version.iter()) - .all(|(a, b)| a == b) - && self - .machine - .iter() - .zip(other.machine.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for utsname {} - - impl hash::Hash for utsname { - fn hash(&self, state: &mut H) { - self.sysname.hash(state); - self.nodename.hash(state); - self.release.hash(state); - self.version.hash(state); - self.machine.hash(state); - } - } - } -} - -pub const LC_ALL: c_int = 0; -pub const LC_COLLATE: c_int = 1; -pub const LC_CTYPE: c_int = 2; -pub const LC_MONETARY: c_int = 3; -pub const LC_NUMERIC: c_int = 4; -pub const LC_TIME: c_int = 5; -pub const LC_MESSAGES: c_int = 6; - -pub const FIOCLEX: c_ulong = 0x20006601; -pub const FIONCLEX: c_ulong = 0x20006602; -pub const FIONREAD: c_ulong = 0x4004667f; -pub const FIONBIO: c_ulong = 0x8004667e; -pub const FIOASYNC: c_ulong = 0x8004667d; -pub const FIOSETOWN: c_ulong = 0x8004667c; -pub const FIOGETOWN: c_ulong = 0x4004667b; - -pub const PATH_MAX: c_int = 1024; -pub const MAXPATHLEN: c_int = PATH_MAX; - -pub const IOV_MAX: c_int = 1024; - -pub const SA_ONSTACK: c_int = 0x0001; -pub const SA_SIGINFO: c_int = 0x0040; -pub const SA_RESTART: c_int = 0x0002; -pub const SA_RESETHAND: c_int = 0x0004; -pub const SA_NOCLDSTOP: c_int = 0x0008; -pub const SA_NODEFER: c_int = 0x0010; -pub const SA_NOCLDWAIT: c_int = 0x0020; - -pub const SS_ONSTACK: c_int = 1; -pub const SS_DISABLE: c_int = 4; - -pub const SIGCHLD: c_int = 20; -pub const SIGBUS: c_int = 10; -pub const SIGUSR1: c_int = 30; -pub const SIGUSR2: c_int = 31; -pub const SIGCONT: c_int = 19; -pub const SIGSTOP: c_int = 17; -pub const SIGTSTP: c_int = 18; -pub const SIGURG: c_int = 16; -pub const SIGIO: c_int = 23; -pub const SIGSYS: c_int = 12; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGINFO: c_int = 29; - -pub const SIG_SETMASK: c_int = 3; -pub const SIG_BLOCK: c_int = 0x1; -pub const SIG_UNBLOCK: c_int = 0x2; - -pub const IP_TOS: c_int = 3; -pub const IP_MULTICAST_IF: c_int = 9; -pub const IP_MULTICAST_TTL: c_int = 10; -pub const IP_MULTICAST_LOOP: c_int = 11; - -pub const IPV6_UNICAST_HOPS: c_int = 4; -pub const IPV6_MULTICAST_IF: c_int = 9; -pub const IPV6_MULTICAST_HOPS: c_int = 10; -pub const IPV6_MULTICAST_LOOP: c_int = 11; -pub const IPV6_V6ONLY: c_int = 27; -pub const IPV6_DONTFRAG: c_int = 62; - -pub const IPTOS_ECN_NOTECT: u8 = 0x00; -pub const IPTOS_ECN_MASK: u8 = 0x03; -pub const IPTOS_ECN_ECT1: u8 = 0x01; -pub const IPTOS_ECN_ECT0: u8 = 0x02; -pub const IPTOS_ECN_CE: u8 = 0x03; - -pub const ST_RDONLY: c_ulong = 1; - -pub const SCM_RIGHTS: c_int = 0x01; - -pub const NCCS: usize = 20; - -pub const O_ACCMODE: c_int = 0x3; -pub const O_RDONLY: c_int = 0; -pub const O_WRONLY: c_int = 1; -pub const O_RDWR: c_int = 2; -pub const O_APPEND: c_int = 8; -pub const O_CREAT: c_int = 512; -pub const O_TRUNC: c_int = 1024; -pub const O_EXCL: c_int = 2048; -pub const O_ASYNC: c_int = 0x40; -pub const O_SYNC: c_int = 0x80; -pub const O_NONBLOCK: c_int = 0x4; -pub const O_NOFOLLOW: c_int = 0x100; -pub const O_SHLOCK: c_int = 0x10; -pub const O_EXLOCK: c_int = 0x20; -pub const O_FSYNC: c_int = O_SYNC; -pub const O_NDELAY: c_int = O_NONBLOCK; - -pub const F_GETOWN: c_int = 5; -pub const F_SETOWN: c_int = 6; - -pub const F_RDLCK: c_short = 1; -pub const F_UNLCK: c_short = 2; -pub const F_WRLCK: c_short = 3; - -pub const MNT_RDONLY: c_int = 0x00000001; -pub const MNT_SYNCHRONOUS: c_int = 0x00000002; -pub const MNT_NOEXEC: c_int = 0x00000004; -pub const MNT_NOSUID: c_int = 0x00000008; -pub const MNT_ASYNC: c_int = 0x00000040; -pub const MNT_EXPORTED: c_int = 0x00000100; -pub const MNT_UPDATE: c_int = 0x00010000; -pub const MNT_RELOAD: c_int = 0x00040000; -pub const MNT_FORCE: c_int = 0x00080000; - -pub const Q_SYNC: c_int = 0x600; -pub const Q_QUOTAON: c_int = 0x100; -pub const Q_QUOTAOFF: c_int = 0x200; - -pub const TCIOFF: c_int = 3; -pub const TCION: c_int = 4; -pub const TCOOFF: c_int = 1; -pub const TCOON: c_int = 2; -pub const TCIFLUSH: c_int = 1; -pub const TCOFLUSH: c_int = 2; -pub const TCIOFLUSH: c_int = 3; -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; -pub const VEOF: usize = 0; -pub const VEOL: usize = 1; -pub const VEOL2: usize = 2; -pub const VERASE: usize = 3; -pub const VWERASE: usize = 4; -pub const VKILL: usize = 5; -pub const VREPRINT: usize = 6; -pub const VINTR: usize = 8; -pub const VQUIT: usize = 9; -pub const VSUSP: usize = 10; -pub const VDSUSP: usize = 11; -pub const VSTART: usize = 12; -pub const VSTOP: usize = 13; -pub const VLNEXT: usize = 14; -pub const VDISCARD: usize = 15; -pub const VMIN: usize = 16; -pub const VTIME: usize = 17; -pub const VSTATUS: usize = 18; -pub const _POSIX_VDISABLE: crate::cc_t = 0xff; -pub const IGNBRK: crate::tcflag_t = 0x00000001; -pub const BRKINT: crate::tcflag_t = 0x00000002; -pub const IGNPAR: crate::tcflag_t = 0x00000004; -pub const PARMRK: crate::tcflag_t = 0x00000008; -pub const INPCK: crate::tcflag_t = 0x00000010; -pub const ISTRIP: crate::tcflag_t = 0x00000020; -pub const INLCR: crate::tcflag_t = 0x00000040; -pub const IGNCR: crate::tcflag_t = 0x00000080; -pub const ICRNL: crate::tcflag_t = 0x00000100; -pub const IXON: crate::tcflag_t = 0x00000200; -pub const IXOFF: crate::tcflag_t = 0x00000400; -pub const IXANY: crate::tcflag_t = 0x00000800; -pub const IMAXBEL: crate::tcflag_t = 0x00002000; -pub const OPOST: crate::tcflag_t = 0x1; -pub const ONLCR: crate::tcflag_t = 0x2; -pub const OXTABS: crate::tcflag_t = 0x4; -pub const ONOEOT: crate::tcflag_t = 0x8; -pub const CIGNORE: crate::tcflag_t = 0x00000001; -pub const CSIZE: crate::tcflag_t = 0x00000300; -pub const CS5: crate::tcflag_t = 0x00000000; -pub const CS6: crate::tcflag_t = 0x00000100; -pub const CS7: crate::tcflag_t = 0x00000200; -pub const CS8: crate::tcflag_t = 0x00000300; -pub const CSTOPB: crate::tcflag_t = 0x00000400; -pub const CREAD: crate::tcflag_t = 0x00000800; -pub const PARENB: crate::tcflag_t = 0x00001000; -pub const PARODD: crate::tcflag_t = 0x00002000; -pub const HUPCL: crate::tcflag_t = 0x00004000; -pub const CLOCAL: crate::tcflag_t = 0x00008000; -pub const ECHOKE: crate::tcflag_t = 0x00000001; -pub const ECHOE: crate::tcflag_t = 0x00000002; -pub const ECHOK: crate::tcflag_t = 0x00000004; -pub const ECHO: crate::tcflag_t = 0x00000008; -pub const ECHONL: crate::tcflag_t = 0x00000010; -pub const ECHOPRT: crate::tcflag_t = 0x00000020; -pub const ECHOCTL: crate::tcflag_t = 0x00000040; -pub const ISIG: crate::tcflag_t = 0x00000080; -pub const ICANON: crate::tcflag_t = 0x00000100; -pub const ALTWERASE: crate::tcflag_t = 0x00000200; -pub const IEXTEN: crate::tcflag_t = 0x00000400; -pub const EXTPROC: crate::tcflag_t = 0x00000800; -pub const TOSTOP: crate::tcflag_t = 0x00400000; -pub const FLUSHO: crate::tcflag_t = 0x00800000; -pub const NOKERNINFO: crate::tcflag_t = 0x02000000; -pub const PENDIN: crate::tcflag_t = 0x20000000; -pub const NOFLSH: crate::tcflag_t = 0x80000000; -pub const MDMBUF: crate::tcflag_t = 0x00100000; - -pub const WNOHANG: c_int = 0x00000001; -pub const WUNTRACED: c_int = 0x00000002; - -pub const RTLD_LAZY: c_int = 0x1; -pub const RTLD_NOW: c_int = 0x2; -pub const RTLD_NEXT: *mut c_void = -1isize as *mut c_void; -pub const RTLD_DEFAULT: *mut c_void = -2isize as *mut c_void; -pub const RTLD_SELF: *mut c_void = -3isize as *mut c_void; - -pub const LOG_CRON: c_int = 9 << 3; -pub const LOG_AUTHPRIV: c_int = 10 << 3; -pub const LOG_FTP: c_int = 11 << 3; -pub const LOG_PERROR: c_int = 0x20; - -pub const TCP_NODELAY: c_int = 1; -pub const TCP_MAXSEG: c_int = 2; - -pub const PIPE_BUF: usize = 512; - -// si_code values for SIGBUS signal -pub const BUS_ADRALN: c_int = 1; -pub const BUS_ADRERR: c_int = 2; -pub const BUS_OBJERR: c_int = 3; - -// si_code values for SIGCHLD signal -pub const CLD_EXITED: c_int = 1; -pub const CLD_KILLED: c_int = 2; -pub const CLD_DUMPED: c_int = 3; -pub const CLD_TRAPPED: c_int = 4; -pub const CLD_STOPPED: c_int = 5; -pub const CLD_CONTINUED: c_int = 6; - -pub const POLLIN: c_short = 0x1; -pub const POLLPRI: c_short = 0x2; -pub const POLLOUT: c_short = 0x4; -pub const POLLERR: c_short = 0x8; -pub const POLLHUP: c_short = 0x10; -pub const POLLNVAL: c_short = 0x20; -pub const POLLRDNORM: c_short = 0x040; -pub const POLLWRNORM: c_short = 0x004; -pub const POLLRDBAND: c_short = 0x080; -pub const POLLWRBAND: c_short = 0x100; - -pub const BIOCGBLEN: c_ulong = 0x40044266; -pub const BIOCSBLEN: c_ulong = 0xc0044266; -pub const BIOCFLUSH: c_uint = 0x20004268; -pub const BIOCPROMISC: c_uint = 0x20004269; -pub const BIOCGDLT: c_ulong = 0x4004426a; -pub const BIOCGETIF: c_ulong = 0x4020426b; -pub const BIOCSETIF: c_ulong = 0x8020426c; -pub const BIOCGSTATS: c_ulong = 0x4008426f; -pub const BIOCIMMEDIATE: c_ulong = 0x80044270; -pub const BIOCVERSION: c_ulong = 0x40044271; -pub const BIOCGHDRCMPLT: c_ulong = 0x40044274; -pub const BIOCSHDRCMPLT: c_ulong = 0x80044275; -pub const SIOCGIFADDR: c_ulong = 0xc0206921; - -pub const REG_BASIC: c_int = 0o0000; -pub const REG_EXTENDED: c_int = 0o0001; -pub const REG_ICASE: c_int = 0o0002; -pub const REG_NOSUB: c_int = 0o0004; -pub const REG_NEWLINE: c_int = 0o0010; -pub const REG_NOSPEC: c_int = 0o0020; -pub const REG_PEND: c_int = 0o0040; -pub const REG_DUMP: c_int = 0o0200; - -pub const REG_NOMATCH: c_int = 1; -pub const REG_BADPAT: c_int = 2; -pub const REG_ECOLLATE: c_int = 3; -pub const REG_ECTYPE: c_int = 4; -pub const REG_EESCAPE: c_int = 5; -pub const REG_ESUBREG: c_int = 6; -pub const REG_EBRACK: c_int = 7; -pub const REG_EPAREN: c_int = 8; -pub const REG_EBRACE: c_int = 9; -pub const REG_BADBR: c_int = 10; -pub const REG_ERANGE: c_int = 11; -pub const REG_ESPACE: c_int = 12; -pub const REG_BADRPT: c_int = 13; -pub const REG_EMPTY: c_int = 14; -pub const REG_ASSERT: c_int = 15; -pub const REG_INVARG: c_int = 16; -pub const REG_ATOI: c_int = 255; -pub const REG_ITOA: c_int = 0o0400; - -pub const REG_NOTBOL: c_int = 0o00001; -pub const REG_NOTEOL: c_int = 0o00002; -pub const REG_STARTEND: c_int = 0o00004; -pub const REG_TRACE: c_int = 0o00400; -pub const REG_LARGE: c_int = 0o01000; -pub const REG_BACKR: c_int = 0o02000; - -pub const TIOCCBRK: c_uint = 0x2000747a; -pub const TIOCSBRK: c_uint = 0x2000747b; - -pub const PRIO_PROCESS: c_int = 0; -pub const PRIO_PGRP: c_int = 1; -pub const PRIO_USER: c_int = 2; - -pub const ITIMER_REAL: c_int = 0; -pub const ITIMER_VIRTUAL: c_int = 1; -pub const ITIMER_PROF: c_int = 2; - -// net/route.h - -pub const RTF_UP: c_int = 0x1; -pub const RTF_GATEWAY: c_int = 0x2; -pub const RTF_HOST: c_int = 0x4; -pub const RTF_REJECT: c_int = 0x8; -pub const RTF_DYNAMIC: c_int = 0x10; -pub const RTF_MODIFIED: c_int = 0x20; -pub const RTF_DONE: c_int = 0x40; -pub const RTF_STATIC: c_int = 0x800; -pub const RTF_BLACKHOLE: c_int = 0x1000; -pub const RTF_PROTO2: c_int = 0x4000; -pub const RTF_PROTO1: c_int = 0x8000; - -// Message types -pub const RTM_ADD: c_int = 0x1; -pub const RTM_DELETE: c_int = 0x2; -pub const RTM_CHANGE: c_int = 0x3; -pub const RTM_GET: c_int = 0x4; -pub const RTM_LOSING: c_int = 0x5; -pub const RTM_REDIRECT: c_int = 0x6; -pub const RTM_MISS: c_int = 0x7; - -// Bitmask values for rtm_addrs. -pub const RTA_DST: c_int = 0x1; -pub const RTA_GATEWAY: c_int = 0x2; -pub const RTA_NETMASK: c_int = 0x4; -pub const RTA_GENMASK: c_int = 0x8; -pub const RTA_IFP: c_int = 0x10; -pub const RTA_IFA: c_int = 0x20; -pub const RTA_AUTHOR: c_int = 0x40; -pub const RTA_BRD: c_int = 0x80; - -// Index offsets for sockaddr array for alternate internal encoding. -pub const RTAX_DST: c_int = 0; -pub const RTAX_GATEWAY: c_int = 1; -pub const RTAX_NETMASK: c_int = 2; -pub const RTAX_GENMASK: c_int = 3; -pub const RTAX_IFP: c_int = 4; -pub const RTAX_IFA: c_int = 5; -pub const RTAX_AUTHOR: c_int = 6; -pub const RTAX_BRD: c_int = 7; - -f! { - pub fn CMSG_FIRSTHDR(mhdr: *const crate::msghdr) -> *mut cmsghdr { - if (*mhdr).msg_controllen as usize >= size_of::() { - (*mhdr).msg_control.cast::() - } else { - core::ptr::null_mut() - } - } - - pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { - let bits = size_of_val(&(*set).fds_bits[0]) * 8; - let fd = fd as usize; - (*set).fds_bits[fd / bits] &= !(1 << (fd % bits)); - return; - } - - pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { - let bits = size_of_val(&(*set).fds_bits[0]) * 8; - let fd = fd as usize; - return ((*set).fds_bits[fd / bits] & (1 << (fd % bits))) != 0; - } - - pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { - let bits = size_of_val(&(*set).fds_bits[0]) * 8; - let fd = fd as usize; - (*set).fds_bits[fd / bits] |= 1 << (fd % bits); - return; - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - for slot in &mut (*set).fds_bits { - *slot = 0; - } - } -} - -safe_f! { - pub const fn WTERMSIG(status: c_int) -> c_int { - status & 0o177 - } - - pub const fn WIFEXITED(status: c_int) -> bool { - (status & 0o177) == 0 - } - - pub const fn WEXITSTATUS(status: c_int) -> c_int { - (status >> 8) & 0x00ff - } - - pub const fn WCOREDUMP(status: c_int) -> bool { - (status & 0o200) != 0 - } - - pub const fn QCMD(cmd: c_int, type_: c_int) -> c_int { - (cmd << 8) | (type_ & 0x00ff) - } -} - -extern "C" { - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "getrlimit$UNIX2003" - )] - pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "setrlimit$UNIX2003" - )] - pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; - - pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - pub fn abs(i: c_int) -> c_int; - pub fn labs(i: c_long) -> c_long; - #[cfg_attr( - all(target_os = "freebsd", any(freebsd12, freebsd11, freebsd10)), - link_name = "rand@FBSD_1.0" - )] - pub fn rand() -> c_int; - #[cfg_attr( - all(target_os = "freebsd", any(freebsd12, freebsd11, freebsd10)), - link_name = "srand@FBSD_1.0" - )] - pub fn srand(seed: c_uint); - - pub fn getifaddrs(ifap: *mut *mut crate::ifaddrs) -> c_int; - pub fn freeifaddrs(ifa: *mut crate::ifaddrs); - pub fn setgroups(ngroups: c_int, ptr: *const crate::gid_t) -> c_int; - pub fn setlogin(name: *const c_char) -> c_int; - pub fn ioctl(fd: c_int, request: c_ulong, ...) -> c_int; - pub fn kqueue() -> c_int; - pub fn unmount(target: *const c_char, arg: c_int) -> c_int; - pub fn syscall(num: c_int, ...) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__getpwent50")] - pub fn getpwent() -> *mut passwd; - pub fn setpwent(); - pub fn endpwent(); - pub fn endgrent(); - pub fn getgrent() -> *mut crate::group; - - pub fn getprogname() -> *const c_char; - pub fn setprogname(name: *const c_char); - pub fn getloadavg(loadavg: *mut c_double, nelem: c_int) -> c_int; - pub fn if_nameindex() -> *mut if_nameindex; - pub fn if_freenameindex(ptr: *mut if_nameindex); - - pub fn getpeereid(socket: c_int, euid: *mut crate::uid_t, egid: *mut crate::gid_t) -> c_int; - - #[cfg_attr( - all(target_os = "macos", not(target_arch = "aarch64")), - link_name = "glob$INODE64" - )] - #[cfg_attr(target_os = "netbsd", link_name = "__glob30")] - #[cfg_attr( - all(target_os = "freebsd", any(freebsd11, freebsd10)), - link_name = "glob@FBSD_1.0" - )] - pub fn glob( - pattern: *const c_char, - flags: c_int, - errfunc: Option c_int>, - pglob: *mut crate::glob_t, - ) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__globfree30")] - #[cfg_attr( - all(target_os = "freebsd", any(freebsd11, freebsd10)), - link_name = "globfree@FBSD_1.0" - )] - pub fn globfree(pglob: *mut crate::glob_t); - - pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - - pub fn shm_unlink(name: *const c_char) -> c_int; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86_64"), - link_name = "seekdir$INODE64" - )] - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "seekdir$INODE64$UNIX2003" - )] - pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86_64"), - link_name = "telldir$INODE64" - )] - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "telldir$INODE64$UNIX2003" - )] - pub fn telldir(dirp: *mut crate::DIR) -> c_long; - pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "msync$UNIX2003" - )] - #[cfg_attr(target_os = "netbsd", link_name = "__msync13")] - pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "recvfrom$UNIX2003" - )] - pub fn recvfrom( - socket: c_int, - buf: *mut c_void, - len: size_t, - flags: c_int, - addr: *mut crate::sockaddr, - addrlen: *mut crate::socklen_t, - ) -> ssize_t; - pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__futimes50")] - pub fn futimes(fd: c_int, times: *const crate::timeval) -> c_int; - pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "bind$UNIX2003" - )] - pub fn bind( - socket: c_int, - address: *const crate::sockaddr, - address_len: crate::socklen_t, - ) -> c_int; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "writev$UNIX2003" - )] - pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "readv$UNIX2003" - )] - pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "sendmsg$UNIX2003" - )] - pub fn sendmsg(fd: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "recvmsg$UNIX2003" - )] - pub fn recvmsg(fd: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; - - pub fn sync(); - pub fn getgrgid_r( - gid: crate::gid_t, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "sigaltstack$UNIX2003" - )] - #[cfg_attr(target_os = "netbsd", link_name = "__sigaltstack14")] - pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; - pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; - pub fn sem_close(sem: *mut sem_t) -> c_int; - pub fn getdtablesize() -> c_int; - pub fn getgrnam_r( - name: *const c_char, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_sigmask$UNIX2003" - )] - pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; - pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; - pub fn getgrnam(name: *const c_char) -> *mut crate::group; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_cancel$UNIX2003" - )] - pub fn pthread_cancel(thread: crate::pthread_t) -> c_int; - pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; - pub fn sched_get_priority_min(policy: c_int) -> c_int; - pub fn sched_get_priority_max(policy: c_int) -> c_int; - pub fn sem_unlink(name: *const c_char) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__getpwnam_r50")] - pub fn getpwnam_r( - name: *const c_char, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__getpwuid_r50")] - pub fn getpwuid_r( - uid: crate::uid_t, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "sigwait$UNIX2003" - )] - pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; - pub fn pthread_atfork( - prepare: Option, - parent: Option, - child: Option, - ) -> c_int; - pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "popen$UNIX2003" - )] - pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; - pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; - pub fn pthread_create( - native: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - f: extern "C" fn(*mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - pub fn acct(filename: *const c_char) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "wait4$UNIX2003" - )] - #[cfg_attr( - all(target_os = "freebsd", any(freebsd12, freebsd11, freebsd10)), - link_name = "wait4@FBSD_1.0" - )] - pub fn wait4( - pid: crate::pid_t, - status: *mut c_int, - options: c_int, - rusage: *mut crate::rusage, - ) -> crate::pid_t; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "getitimer$UNIX2003" - )] - pub fn getitimer(which: c_int, curr_value: *mut crate::itimerval) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "setitimer$UNIX2003" - )] - pub fn setitimer( - which: c_int, - new_value: *const crate::itimerval, - old_value: *mut crate::itimerval, - ) -> c_int; - - pub fn regcomp(preg: *mut regex_t, pattern: *const c_char, cflags: c_int) -> c_int; - - pub fn regexec( - preg: *const regex_t, - input: *const c_char, - nmatch: size_t, - pmatch: *mut regmatch_t, - eflags: c_int, - ) -> c_int; - - pub fn regerror( - errcode: c_int, - preg: *const regex_t, - errbuf: *mut c_char, - errbuf_size: size_t, - ) -> size_t; - - pub fn regfree(preg: *mut regex_t); - - pub fn arc4random() -> u32; - pub fn arc4random_buf(buf: *mut c_void, size: size_t); - pub fn arc4random_uniform(l: u32) -> u32; - - pub fn drand48() -> c_double; - pub fn erand48(xseed: *mut c_ushort) -> c_double; - pub fn lrand48() -> c_long; - pub fn nrand48(xseed: *mut c_ushort) -> c_long; - pub fn mrand48() -> c_long; - pub fn jrand48(xseed: *mut c_ushort) -> c_long; - pub fn srand48(seed: c_long); - pub fn seed48(xseed: *mut c_ushort) -> *mut c_ushort; - pub fn lcong48(p: *mut c_ushort); - pub fn getopt_long( - argc: c_int, - argv: *const *mut c_char, - optstring: *const c_char, - longopts: *const option, - longindex: *mut c_int, - ) -> c_int; - - pub fn strftime( - buf: *mut c_char, - maxsize: size_t, - format: *const c_char, - timeptr: *const crate::tm, - ) -> size_t; - pub fn strftime_l( - buf: *mut c_char, - maxsize: size_t, - format: *const c_char, - timeptr: *const crate::tm, - locale: crate::locale_t, - ) -> size_t; - - pub fn devname(dev: crate::dev_t, mode_t: crate::mode_t) -> *mut c_char; -} - -cfg_if! { - if #[cfg(any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "watchos", - target_os = "visionos" - ))] { - mod apple; - pub use self::apple::*; - } else if #[cfg(any(target_os = "openbsd", target_os = "netbsd"))] { - mod netbsdlike; - pub use self::netbsdlike::*; - } else if #[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] { - mod freebsdlike; - pub use self::freebsdlike::*; - } else { - // Unknown target_os - } -} diff --git a/vendor/libc/src/unix/bsd/netbsdlike/mod.rs b/vendor/libc/src/unix/bsd/netbsdlike/mod.rs deleted file mode 100644 index bc3e4cdf094ff3..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/mod.rs +++ /dev/null @@ -1,905 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -pub type wchar_t = i32; -pub type time_t = i64; -pub type mode_t = u32; -pub type nlink_t = u32; -pub type ino_t = u64; -pub type pthread_key_t = c_int; -pub type rlim_t = u64; -pub type speed_t = c_uint; -pub type tcflag_t = c_uint; -pub type nl_item = c_long; -pub type clockid_t = c_int; -pub type id_t = u32; -pub type sem_t = *mut sem; -pub type key_t = c_long; - -#[derive(Debug)] -pub enum timezone {} -impl Copy for timezone {} -impl Clone for timezone { - fn clone(&self) -> timezone { - *self - } -} -#[derive(Debug)] -pub enum sem {} -impl Copy for sem {} -impl Clone for sem { - fn clone(&self) -> sem { - *self - } -} - -s! { - pub struct sched_param { - pub sched_priority: c_int, - } - - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } - - pub struct in6_pktinfo { - pub ipi6_addr: crate::in6_addr, - pub ipi6_ifindex: c_uint, - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_cc: [crate::cc_t; crate::NCCS], - pub c_ispeed: c_int, - pub c_ospeed: c_int, - } - - pub struct flock { - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - pub l_type: c_short, - pub l_whence: c_short, - } - - pub struct ipc_perm { - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub mode: mode_t, - #[cfg(target_os = "openbsd")] - pub seq: c_ushort, - #[cfg(target_os = "netbsd")] - pub _seq: c_ushort, - #[cfg(target_os = "openbsd")] - pub key: crate::key_t, - #[cfg(target_os = "netbsd")] - pub _key: crate::key_t, - } - - pub struct ptrace_io_desc { - pub piod_op: c_int, - pub piod_offs: *mut c_void, - pub piod_addr: *mut c_void, - pub piod_len: size_t, - } - - pub struct mmsghdr { - pub msg_hdr: crate::msghdr, - pub msg_len: c_uint, - } -} - -pub const D_T_FMT: crate::nl_item = 0; -pub const D_FMT: crate::nl_item = 1; -pub const T_FMT: crate::nl_item = 2; -pub const T_FMT_AMPM: crate::nl_item = 3; -pub const AM_STR: crate::nl_item = 4; -pub const PM_STR: crate::nl_item = 5; - -pub const DAY_1: crate::nl_item = 6; -pub const DAY_2: crate::nl_item = 7; -pub const DAY_3: crate::nl_item = 8; -pub const DAY_4: crate::nl_item = 9; -pub const DAY_5: crate::nl_item = 10; -pub const DAY_6: crate::nl_item = 11; -pub const DAY_7: crate::nl_item = 12; - -pub const ABDAY_1: crate::nl_item = 13; -pub const ABDAY_2: crate::nl_item = 14; -pub const ABDAY_3: crate::nl_item = 15; -pub const ABDAY_4: crate::nl_item = 16; -pub const ABDAY_5: crate::nl_item = 17; -pub const ABDAY_6: crate::nl_item = 18; -pub const ABDAY_7: crate::nl_item = 19; - -pub const MON_1: crate::nl_item = 20; -pub const MON_2: crate::nl_item = 21; -pub const MON_3: crate::nl_item = 22; -pub const MON_4: crate::nl_item = 23; -pub const MON_5: crate::nl_item = 24; -pub const MON_6: crate::nl_item = 25; -pub const MON_7: crate::nl_item = 26; -pub const MON_8: crate::nl_item = 27; -pub const MON_9: crate::nl_item = 28; -pub const MON_10: crate::nl_item = 29; -pub const MON_11: crate::nl_item = 30; -pub const MON_12: crate::nl_item = 31; - -pub const ABMON_1: crate::nl_item = 32; -pub const ABMON_2: crate::nl_item = 33; -pub const ABMON_3: crate::nl_item = 34; -pub const ABMON_4: crate::nl_item = 35; -pub const ABMON_5: crate::nl_item = 36; -pub const ABMON_6: crate::nl_item = 37; -pub const ABMON_7: crate::nl_item = 38; -pub const ABMON_8: crate::nl_item = 39; -pub const ABMON_9: crate::nl_item = 40; -pub const ABMON_10: crate::nl_item = 41; -pub const ABMON_11: crate::nl_item = 42; -pub const ABMON_12: crate::nl_item = 43; - -pub const RADIXCHAR: crate::nl_item = 44; -pub const THOUSEP: crate::nl_item = 45; -pub const YESSTR: crate::nl_item = 46; -pub const YESEXPR: crate::nl_item = 47; -pub const NOSTR: crate::nl_item = 48; -pub const NOEXPR: crate::nl_item = 49; -pub const CRNCYSTR: crate::nl_item = 50; - -pub const CODESET: crate::nl_item = 51; - -pub const EXIT_FAILURE: c_int = 1; -pub const EXIT_SUCCESS: c_int = 0; -pub const RAND_MAX: c_int = 2147483647; -pub const EOF: c_int = -1; -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; -pub const _IOFBF: c_int = 0; -pub const _IONBF: c_int = 2; -pub const _IOLBF: c_int = 1; -pub const BUFSIZ: c_uint = 1024; -pub const FOPEN_MAX: c_uint = 20; -pub const FILENAME_MAX: c_uint = 1024; -pub const L_tmpnam: c_uint = 1024; -pub const O_NOCTTY: c_int = 32768; -pub const S_IFIFO: mode_t = 0o1_0000; -pub const S_IFCHR: mode_t = 0o2_0000; -pub const S_IFBLK: mode_t = 0o6_0000; -pub const S_IFDIR: mode_t = 0o4_0000; -pub const S_IFREG: mode_t = 0o10_0000; -pub const S_IFLNK: mode_t = 0o12_0000; -pub const S_IFSOCK: mode_t = 0o14_0000; -pub const S_IFMT: mode_t = 0o17_0000; -pub const S_IEXEC: mode_t = 0o0100; -pub const S_IWRITE: mode_t = 0o0200; -pub const S_IREAD: mode_t = 0o0400; -pub const S_IRWXU: mode_t = 0o0700; -pub const S_IXUSR: mode_t = 0o0100; -pub const S_IWUSR: mode_t = 0o0200; -pub const S_IRUSR: mode_t = 0o0400; -pub const S_IRWXG: mode_t = 0o0070; -pub const S_IXGRP: mode_t = 0o0010; -pub const S_IWGRP: mode_t = 0o0020; -pub const S_IRGRP: mode_t = 0o0040; -pub const S_IRWXO: mode_t = 0o0007; -pub const S_IXOTH: mode_t = 0o0001; -pub const S_IWOTH: mode_t = 0o0002; -pub const S_IROTH: mode_t = 0o0004; -pub const F_OK: c_int = 0; -pub const R_OK: c_int = 4; -pub const W_OK: c_int = 2; -pub const X_OK: c_int = 1; -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; -pub const F_LOCK: c_int = 1; -pub const F_TEST: c_int = 3; -pub const F_TLOCK: c_int = 2; -pub const F_ULOCK: c_int = 0; -pub const F_GETLK: c_int = 7; -pub const F_SETLK: c_int = 8; -pub const F_SETLKW: c_int = 9; -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGABRT: c_int = 6; -pub const SIGEMT: c_int = 7; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGSEGV: c_int = 11; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; - -pub const PROT_NONE: c_int = 0; -pub const PROT_READ: c_int = 1; -pub const PROT_WRITE: c_int = 2; -pub const PROT_EXEC: c_int = 4; - -pub const MAP_FILE: c_int = 0x0000; -pub const MAP_SHARED: c_int = 0x0001; -pub const MAP_PRIVATE: c_int = 0x0002; -pub const MAP_FIXED: c_int = 0x0010; -pub const MAP_ANON: c_int = 0x1000; -pub const MAP_ANONYMOUS: c_int = MAP_ANON; - -pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; - -pub const IPC_CREAT: c_int = 0o001000; -pub const IPC_EXCL: c_int = 0o002000; -pub const IPC_NOWAIT: c_int = 0o004000; - -pub const IPC_PRIVATE: crate::key_t = 0; - -pub const IPC_RMID: c_int = 0; -pub const IPC_SET: c_int = 1; -pub const IPC_STAT: c_int = 2; - -pub const IPC_R: c_int = 0o000400; -pub const IPC_W: c_int = 0o000200; -pub const IPC_M: c_int = 0o010000; - -pub const SHM_R: c_int = IPC_R; -pub const SHM_W: c_int = IPC_W; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; - -pub const MS_ASYNC: c_int = 0x0001; - -pub const EPERM: c_int = 1; -pub const ENOENT: c_int = 2; -pub const ESRCH: c_int = 3; -pub const EINTR: c_int = 4; -pub const EIO: c_int = 5; -pub const ENXIO: c_int = 6; -pub const E2BIG: c_int = 7; -pub const ENOEXEC: c_int = 8; -pub const EBADF: c_int = 9; -pub const ECHILD: c_int = 10; -pub const EDEADLK: c_int = 11; -pub const ENOMEM: c_int = 12; -pub const EACCES: c_int = 13; -pub const EFAULT: c_int = 14; -pub const ENOTBLK: c_int = 15; -pub const EBUSY: c_int = 16; -pub const EEXIST: c_int = 17; -pub const EXDEV: c_int = 18; -pub const ENODEV: c_int = 19; -pub const ENOTDIR: c_int = 20; -pub const EISDIR: c_int = 21; -pub const EINVAL: c_int = 22; -pub const ENFILE: c_int = 23; -pub const EMFILE: c_int = 24; -pub const ENOTTY: c_int = 25; -pub const ETXTBSY: c_int = 26; -pub const EFBIG: c_int = 27; -pub const ENOSPC: c_int = 28; -pub const ESPIPE: c_int = 29; -pub const EROFS: c_int = 30; -pub const EMLINK: c_int = 31; -pub const EPIPE: c_int = 32; -pub const EDOM: c_int = 33; -pub const ERANGE: c_int = 34; -pub const EAGAIN: c_int = 35; -pub const EWOULDBLOCK: c_int = 35; -pub const EINPROGRESS: c_int = 36; -pub const EALREADY: c_int = 37; -pub const ENOTSOCK: c_int = 38; -pub const EDESTADDRREQ: c_int = 39; -pub const EMSGSIZE: c_int = 40; -pub const EPROTOTYPE: c_int = 41; -pub const ENOPROTOOPT: c_int = 42; -pub const EPROTONOSUPPORT: c_int = 43; -pub const ESOCKTNOSUPPORT: c_int = 44; -pub const EOPNOTSUPP: c_int = 45; -pub const EPFNOSUPPORT: c_int = 46; -pub const EAFNOSUPPORT: c_int = 47; -pub const EADDRINUSE: c_int = 48; -pub const EADDRNOTAVAIL: c_int = 49; -pub const ENETDOWN: c_int = 50; -pub const ENETUNREACH: c_int = 51; -pub const ENETRESET: c_int = 52; -pub const ECONNABORTED: c_int = 53; -pub const ECONNRESET: c_int = 54; -pub const ENOBUFS: c_int = 55; -pub const EISCONN: c_int = 56; -pub const ENOTCONN: c_int = 57; -pub const ESHUTDOWN: c_int = 58; -pub const ETOOMANYREFS: c_int = 59; -pub const ETIMEDOUT: c_int = 60; -pub const ECONNREFUSED: c_int = 61; -pub const ELOOP: c_int = 62; -pub const ENAMETOOLONG: c_int = 63; -pub const EHOSTDOWN: c_int = 64; -pub const EHOSTUNREACH: c_int = 65; -pub const ENOTEMPTY: c_int = 66; -pub const EPROCLIM: c_int = 67; -pub const EUSERS: c_int = 68; -pub const EDQUOT: c_int = 69; -pub const ESTALE: c_int = 70; -pub const EREMOTE: c_int = 71; -pub const EBADRPC: c_int = 72; -pub const ERPCMISMATCH: c_int = 73; -pub const EPROGUNAVAIL: c_int = 74; -pub const EPROGMISMATCH: c_int = 75; -pub const EPROCUNAVAIL: c_int = 76; -pub const ENOLCK: c_int = 77; -pub const ENOSYS: c_int = 78; -pub const EFTYPE: c_int = 79; -pub const EAUTH: c_int = 80; -pub const ENEEDAUTH: c_int = 81; - -pub const F_DUPFD: c_int = 0; -pub const F_GETFD: c_int = 1; -pub const F_SETFD: c_int = 2; -pub const F_GETFL: c_int = 3; -pub const F_SETFL: c_int = 4; - -pub const SIGTRAP: c_int = 5; - -pub const GLOB_APPEND: c_int = 0x0001; -pub const GLOB_DOOFFS: c_int = 0x0002; -pub const GLOB_ERR: c_int = 0x0004; -pub const GLOB_MARK: c_int = 0x0008; -pub const GLOB_NOCHECK: c_int = 0x0010; -pub const GLOB_NOSORT: c_int = 0x0020; -pub const GLOB_NOESCAPE: c_int = 0x1000; - -pub const GLOB_NOSPACE: c_int = -1; -pub const GLOB_ABORTED: c_int = -2; -pub const GLOB_NOMATCH: c_int = -3; -pub const GLOB_NOSYS: c_int = -4; - -pub const POSIX_MADV_NORMAL: c_int = 0; -pub const POSIX_MADV_RANDOM: c_int = 1; -pub const POSIX_MADV_SEQUENTIAL: c_int = 2; -pub const POSIX_MADV_WILLNEED: c_int = 3; -pub const POSIX_MADV_DONTNEED: c_int = 4; - -// DIFF(main): changed to `c_short` in f62eb023ab -pub const POSIX_SPAWN_RESETIDS: c_int = 0x01; -pub const POSIX_SPAWN_SETPGROUP: c_int = 0x02; -pub const POSIX_SPAWN_SETSCHEDPARAM: c_int = 0x04; -pub const POSIX_SPAWN_SETSCHEDULER: c_int = 0x08; -pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x10; -pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x20; - -pub const PTHREAD_CREATE_JOINABLE: c_int = 0; -pub const PTHREAD_CREATE_DETACHED: c_int = 1; - -pub const PIOD_READ_D: c_int = 1; -pub const PIOD_WRITE_D: c_int = 2; -pub const PIOD_READ_I: c_int = 3; -pub const PIOD_WRITE_I: c_int = 4; -pub const PIOD_READ_AUXV: c_int = 5; - -pub const PT_TRACE_ME: c_int = 0; -pub const PT_READ_I: c_int = 1; -pub const PT_READ_D: c_int = 2; -pub const PT_WRITE_I: c_int = 4; -pub const PT_WRITE_D: c_int = 5; -pub const PT_CONTINUE: c_int = 7; -pub const PT_KILL: c_int = 8; -pub const PT_ATTACH: c_int = 9; -pub const PT_DETACH: c_int = 10; -pub const PT_IO: c_int = 11; - -// http://man.openbsd.org/OpenBSD-current/man2/clock_getres.2 -// The man page says clock_gettime(3) can accept various values as clockid_t but -// http://fxr.watson.org/fxr/source/kern/kern_time.c?v=OPENBSD;im=excerpts#L161 -// the implementation rejects anything other than the below two -// -// http://netbsd.gw.com/cgi-bin/man-cgi?clock_gettime -// https://github.com/jsonn/src/blob/HEAD/sys/kern/subr_time.c#L222 -// Basically the same goes for NetBSD -pub const CLOCK_REALTIME: crate::clockid_t = 0; -pub const CLOCK_MONOTONIC: crate::clockid_t = 3; - -pub const RLIMIT_CPU: c_int = 0; -pub const RLIMIT_FSIZE: c_int = 1; -pub const RLIMIT_DATA: c_int = 2; -pub const RLIMIT_STACK: c_int = 3; -pub const RLIMIT_CORE: c_int = 4; -pub const RLIMIT_RSS: c_int = 5; -pub const RLIMIT_MEMLOCK: c_int = 6; -pub const RLIMIT_NPROC: c_int = 7; -pub const RLIMIT_NOFILE: c_int = 8; - -pub const RLIM_INFINITY: rlim_t = 0x7fff_ffff_ffff_ffff; -pub const RLIM_SAVED_MAX: rlim_t = RLIM_INFINITY; -pub const RLIM_SAVED_CUR: rlim_t = RLIM_INFINITY; - -pub const RUSAGE_SELF: c_int = 0; -pub const RUSAGE_CHILDREN: c_int = -1; - -pub const MADV_NORMAL: c_int = 0; -pub const MADV_RANDOM: c_int = 1; -pub const MADV_SEQUENTIAL: c_int = 2; -pub const MADV_WILLNEED: c_int = 3; -pub const MADV_DONTNEED: c_int = 4; -pub const MADV_FREE: c_int = 6; - -// sys/fstypes.h in NetBSD, or sys/mount.h in OpenBSD -pub const MNT_NODEV: c_int = 0x00000010; -pub const MNT_LOCAL: c_int = 0x00001000; -pub const MNT_QUOTA: c_int = 0x00002000; - -// sys/ioccom.h in NetBSD and OpenBSD -pub const IOCPARM_MASK: u32 = 0x1fff; - -pub const IOC_VOID: c_ulong = 0x20000000; -pub const IOC_OUT: c_ulong = 0x40000000; -pub const IOC_IN: c_ulong = 0x80000000; -pub const IOC_INOUT: c_ulong = IOC_IN | IOC_OUT; -pub const IOC_DIRMASK: c_ulong = 0xe0000000; - -pub const fn _IO(g: c_ulong, n: c_ulong) -> c_ulong { - _IOC(IOC_VOID, g, n, 0) -} - -/// Build an ioctl number for an read-only ioctl. -pub const fn _IOR(g: c_ulong, n: c_ulong) -> c_ulong { - _IOC(IOC_OUT, g, n, mem::size_of::() as c_ulong) -} - -/// Build an ioctl number for an write-only ioctl. -pub const fn _IOW(g: c_ulong, n: c_ulong) -> c_ulong { - _IOC(IOC_IN, g, n, mem::size_of::() as c_ulong) -} - -/// Build an ioctl number for a read-write ioctl. -pub const fn _IOWR(g: c_ulong, n: c_ulong) -> c_ulong { - _IOC(IOC_INOUT, g, n, mem::size_of::() as c_ulong) -} - -pub const AF_UNSPEC: c_int = 0; -pub const AF_LOCAL: c_int = 1; -pub const AF_UNIX: c_int = AF_LOCAL; -pub const AF_INET: c_int = 2; -pub const AF_IMPLINK: c_int = 3; -pub const AF_PUP: c_int = 4; -pub const AF_CHAOS: c_int = 5; -pub const AF_NS: c_int = 6; -pub const AF_ISO: c_int = 7; -pub const AF_OSI: c_int = AF_ISO; -pub const AF_DATAKIT: c_int = 9; -pub const AF_CCITT: c_int = 10; -pub const AF_SNA: c_int = 11; -pub const AF_DECnet: c_int = 12; -pub const AF_DLI: c_int = 13; -pub const AF_LAT: c_int = 14; -pub const AF_HYLINK: c_int = 15; -pub const AF_APPLETALK: c_int = 16; -pub const AF_LINK: c_int = 18; -pub const pseudo_AF_XTP: c_int = 19; -pub const AF_COIP: c_int = 20; -pub const AF_CNT: c_int = 21; -pub const pseudo_AF_RTIP: c_int = 22; -pub const AF_IPX: c_int = 23; -pub const AF_INET6: c_int = 24; -pub const pseudo_AF_PIP: c_int = 25; -pub const AF_ISDN: c_int = 26; -pub const AF_E164: c_int = AF_ISDN; -pub const AF_NATM: c_int = 27; - -pub const PF_UNSPEC: c_int = AF_UNSPEC; -pub const PF_LOCAL: c_int = AF_LOCAL; -pub const PF_UNIX: c_int = PF_LOCAL; -pub const PF_INET: c_int = AF_INET; -pub const PF_IMPLINK: c_int = AF_IMPLINK; -pub const PF_PUP: c_int = AF_PUP; -pub const PF_CHAOS: c_int = AF_CHAOS; -pub const PF_NS: c_int = AF_NS; -pub const PF_ISO: c_int = AF_ISO; -pub const PF_OSI: c_int = AF_ISO; -pub const PF_DATAKIT: c_int = AF_DATAKIT; -pub const PF_CCITT: c_int = AF_CCITT; -pub const PF_SNA: c_int = AF_SNA; -pub const PF_DECnet: c_int = AF_DECnet; -pub const PF_DLI: c_int = AF_DLI; -pub const PF_LAT: c_int = AF_LAT; -pub const PF_HYLINK: c_int = AF_HYLINK; -pub const PF_APPLETALK: c_int = AF_APPLETALK; -pub const PF_LINK: c_int = AF_LINK; -pub const PF_XTP: c_int = pseudo_AF_XTP; -pub const PF_COIP: c_int = AF_COIP; -pub const PF_CNT: c_int = AF_CNT; -pub const PF_IPX: c_int = AF_IPX; -pub const PF_INET6: c_int = AF_INET6; -pub const PF_RTIP: c_int = pseudo_AF_RTIP; -pub const PF_PIP: c_int = pseudo_AF_PIP; -pub const PF_ISDN: c_int = AF_ISDN; -pub const PF_NATM: c_int = AF_NATM; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; -pub const SOCK_RAW: c_int = 3; -pub const SOCK_RDM: c_int = 4; -pub const SOCK_SEQPACKET: c_int = 5; -pub const IP_TTL: c_int = 4; -pub const IP_HDRINCL: c_int = 2; -pub const IP_ADD_MEMBERSHIP: c_int = 12; -pub const IP_DROP_MEMBERSHIP: c_int = 13; -pub const IPV6_RECVPKTINFO: c_int = 36; -pub const IPV6_PKTINFO: c_int = 46; -pub const IPV6_RECVTCLASS: c_int = 57; -pub const IPV6_TCLASS: c_int = 61; - -pub const SOL_SOCKET: c_int = 0xffff; -pub const SO_DEBUG: c_int = 0x01; -pub const SO_ACCEPTCONN: c_int = 0x0002; -pub const SO_REUSEADDR: c_int = 0x0004; -pub const SO_KEEPALIVE: c_int = 0x0008; -pub const SO_DONTROUTE: c_int = 0x0010; -pub const SO_BROADCAST: c_int = 0x0020; -pub const SO_USELOOPBACK: c_int = 0x0040; -pub const SO_LINGER: c_int = 0x0080; -pub const SO_OOBINLINE: c_int = 0x0100; -pub const SO_REUSEPORT: c_int = 0x0200; -pub const SO_SNDBUF: c_int = 0x1001; -pub const SO_RCVBUF: c_int = 0x1002; -pub const SO_SNDLOWAT: c_int = 0x1003; -pub const SO_RCVLOWAT: c_int = 0x1004; -pub const SO_ERROR: c_int = 0x1007; -pub const SO_TYPE: c_int = 0x1008; - -pub const SOMAXCONN: c_int = 128; - -pub const MSG_OOB: c_int = 0x1; -pub const MSG_PEEK: c_int = 0x2; -pub const MSG_DONTROUTE: c_int = 0x4; -pub const MSG_EOR: c_int = 0x8; -pub const MSG_TRUNC: c_int = 0x10; -pub const MSG_CTRUNC: c_int = 0x20; -pub const MSG_WAITALL: c_int = 0x40; -pub const MSG_DONTWAIT: c_int = 0x80; -pub const MSG_BCAST: c_int = 0x100; -pub const MSG_MCAST: c_int = 0x200; -pub const MSG_NOSIGNAL: c_int = 0x400; -pub const MSG_CMSG_CLOEXEC: c_int = 0x800; - -pub const SHUT_RD: c_int = 0; -pub const SHUT_WR: c_int = 1; -pub const SHUT_RDWR: c_int = 2; - -pub const LOCK_SH: c_int = 1; -pub const LOCK_EX: c_int = 2; -pub const LOCK_NB: c_int = 4; -pub const LOCK_UN: c_int = 8; - -pub const IPPROTO_RAW: c_int = 255; - -pub const _SC_ARG_MAX: c_int = 1; -pub const _SC_CHILD_MAX: c_int = 2; -pub const _SC_NGROUPS_MAX: c_int = 4; -pub const _SC_OPEN_MAX: c_int = 5; -pub const _SC_JOB_CONTROL: c_int = 6; -pub const _SC_SAVED_IDS: c_int = 7; -pub const _SC_VERSION: c_int = 8; -pub const _SC_BC_BASE_MAX: c_int = 9; -pub const _SC_BC_DIM_MAX: c_int = 10; -pub const _SC_BC_SCALE_MAX: c_int = 11; -pub const _SC_BC_STRING_MAX: c_int = 12; -pub const _SC_COLL_WEIGHTS_MAX: c_int = 13; -pub const _SC_EXPR_NEST_MAX: c_int = 14; -pub const _SC_LINE_MAX: c_int = 15; -pub const _SC_RE_DUP_MAX: c_int = 16; -pub const _SC_2_VERSION: c_int = 17; -pub const _SC_2_C_BIND: c_int = 18; -pub const _SC_2_C_DEV: c_int = 19; -pub const _SC_2_CHAR_TERM: c_int = 20; -pub const _SC_2_FORT_DEV: c_int = 21; -pub const _SC_2_FORT_RUN: c_int = 22; -pub const _SC_2_LOCALEDEF: c_int = 23; -pub const _SC_2_SW_DEV: c_int = 24; -pub const _SC_2_UPE: c_int = 25; -pub const _SC_STREAM_MAX: c_int = 26; -pub const _SC_TZNAME_MAX: c_int = 27; -pub const _SC_PAGESIZE: c_int = 28; -pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; -pub const _SC_FSYNC: c_int = 29; -pub const _SC_XOPEN_SHM: c_int = 30; - -pub const Q_GETQUOTA: c_int = 0x300; -pub const Q_SETQUOTA: c_int = 0x400; - -pub const RTLD_GLOBAL: c_int = 0x100; - -pub const LOG_NFACILITIES: c_int = 24; - -pub const HW_NCPU: c_int = 3; - -pub const B0: speed_t = 0; -pub const B50: speed_t = 50; -pub const B75: speed_t = 75; -pub const B110: speed_t = 110; -pub const B134: speed_t = 134; -pub const B150: speed_t = 150; -pub const B200: speed_t = 200; -pub const B300: speed_t = 300; -pub const B600: speed_t = 600; -pub const B1200: speed_t = 1200; -pub const B1800: speed_t = 1800; -pub const B2400: speed_t = 2400; -pub const B4800: speed_t = 4800; -pub const B9600: speed_t = 9600; -pub const B19200: speed_t = 19200; -pub const B38400: speed_t = 38400; -pub const B7200: speed_t = 7200; -pub const B14400: speed_t = 14400; -pub const B28800: speed_t = 28800; -pub const B57600: speed_t = 57600; -pub const B76800: speed_t = 76800; -pub const B115200: speed_t = 115200; -pub const B230400: speed_t = 230400; -pub const EXTA: speed_t = 19200; -pub const EXTB: speed_t = 38400; - -pub const SEM_FAILED: *mut sem_t = ptr::null_mut(); - -pub const CRTSCTS: crate::tcflag_t = 0x00010000; -pub const CRTS_IFLOW: crate::tcflag_t = CRTSCTS; -pub const CCTS_OFLOW: crate::tcflag_t = CRTSCTS; -pub const OCRNL: crate::tcflag_t = 0x10; - -pub const TIOCEXCL: c_ulong = 0x2000740d; -pub const TIOCNXCL: c_ulong = 0x2000740e; -pub const TIOCFLUSH: c_ulong = 0x80047410; -pub const TIOCGETA: c_ulong = 0x402c7413; -pub const TIOCSETA: c_ulong = 0x802c7414; -pub const TIOCSETAW: c_ulong = 0x802c7415; -pub const TIOCSETAF: c_ulong = 0x802c7416; -pub const TIOCGETD: c_ulong = 0x4004741a; -pub const TIOCSETD: c_ulong = 0x8004741b; -pub const TIOCMGET: c_ulong = 0x4004746a; -pub const TIOCMBIC: c_ulong = 0x8004746b; -pub const TIOCMBIS: c_ulong = 0x8004746c; -pub const TIOCMSET: c_ulong = 0x8004746d; -pub const TIOCSTART: c_ulong = 0x2000746e; -pub const TIOCSTOP: c_ulong = 0x2000746f; -pub const TIOCSCTTY: c_ulong = 0x20007461; -pub const TIOCGWINSZ: c_ulong = 0x40087468; -pub const TIOCSWINSZ: c_ulong = 0x80087467; -pub const TIOCM_LE: c_int = 0o0001; -pub const TIOCM_DTR: c_int = 0o0002; -pub const TIOCM_RTS: c_int = 0o0004; -pub const TIOCM_ST: c_int = 0o0010; -pub const TIOCM_SR: c_int = 0o0020; -pub const TIOCM_CTS: c_int = 0o0040; -pub const TIOCM_CAR: c_int = 0o0100; -pub const TIOCM_RNG: c_int = 0o0200; -pub const TIOCM_DSR: c_int = 0o0400; -pub const TIOCM_CD: c_int = TIOCM_CAR; -pub const TIOCM_RI: c_int = TIOCM_RNG; - -pub const TIMER_ABSTIME: c_int = 1; - -// sys/reboot.h - -pub const RB_AUTOBOOT: c_int = 0; - -pub const TCP_INFO: c_int = 9; - -#[link(name = "util")] -extern "C" { - pub fn setgrent(); - pub fn sem_destroy(sem: *mut sem_t) -> c_int; - pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; - - pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; - pub fn accept4( - s: c_int, - addr: *mut crate::sockaddr, - addrlen: *mut crate::socklen_t, - flags: c_int, - ) -> c_int; - pub fn mincore(addr: *mut c_void, len: size_t, vec: *mut c_char) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__clock_getres50")] - pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__clock_gettime50")] - pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__clock_settime50")] - pub fn clock_settime(clk_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; - pub fn __errno() -> *mut c_int; - pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; - pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; - pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; - pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; - pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; - pub fn utimensat( - dirfd: c_int, - path: *const c_char, - times: *const crate::timespec, - flag: c_int, - ) -> c_int; - pub fn fdatasync(fd: c_int) -> c_int; - pub fn login_tty(fd: c_int) -> c_int; - pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; - pub fn setpriority(which: c_int, who: crate::id_t, prio: c_int) -> c_int; - - pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; - pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; - pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; - pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; - pub fn pthread_condattr_setclock( - attr: *mut pthread_condattr_t, - clock_id: crate::clockid_t, - ) -> c_int; - pub fn sethostname(name: *const c_char, len: size_t) -> c_int; - pub fn pthread_mutex_timedlock( - lock: *mut pthread_mutex_t, - abstime: *const crate::timespec, - ) -> c_int; - pub fn pthread_spin_init(lock: *mut pthread_spinlock_t, pshared: c_int) -> c_int; - pub fn pthread_spin_destroy(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_spin_lock(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_spin_trylock(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_spin_unlock(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_setschedparam( - native: crate::pthread_t, - policy: c_int, - param: *const sched_param, - ) -> c_int; - pub fn pthread_getschedparam( - native: crate::pthread_t, - policy: *mut c_int, - param: *mut sched_param, - ) -> c_int; - pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; - - pub fn getgrouplist( - name: *const c_char, - basegid: crate::gid_t, - groups: *mut crate::gid_t, - ngroups: *mut c_int, - ) -> c_int; - pub fn initgroups(name: *const c_char, basegid: crate::gid_t) -> c_int; - pub fn getdomainname(name: *mut c_char, len: size_t) -> c_int; - pub fn setdomainname(name: *const c_char, len: size_t) -> c_int; - pub fn uname(buf: *mut crate::utsname) -> c_int; - - pub fn shmget(key: crate::key_t, size: size_t, shmflg: c_int) -> c_int; - pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; - pub fn shmdt(shmaddr: *const c_void) -> c_int; - pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; - - // DIFF(main): changed to `*const *mut` in e77f551de9 - pub fn execvpe( - file: *const c_char, - argv: *const *const c_char, - envp: *const *const c_char, - ) -> c_int; - - pub fn waitid( - idtype: idtype_t, - id: crate::id_t, - infop: *mut crate::siginfo_t, - options: c_int, - ) -> c_int; - - pub fn posix_spawn( - pid: *mut crate::pid_t, - path: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnp( - pid: *mut crate::pid_t, - file: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_getsigdefault( - attr: *const posix_spawnattr_t, - default: *mut crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigdefault( - attr: *mut posix_spawnattr_t, - default: *const crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getsigmask( - attr: *const posix_spawnattr_t, - default: *mut crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigmask( - attr: *mut posix_spawnattr_t, - default: *const crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; - pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; - pub fn posix_spawnattr_getpgroup( - attr: *const posix_spawnattr_t, - flags: *mut crate::pid_t, - ) -> c_int; - pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: crate::pid_t) -> c_int; - pub fn posix_spawnattr_getschedpolicy( - attr: *const posix_spawnattr_t, - flags: *mut c_int, - ) -> c_int; - pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, flags: c_int) -> c_int; - pub fn posix_spawnattr_getschedparam( - attr: *const posix_spawnattr_t, - param: *mut crate::sched_param, - ) -> c_int; - pub fn posix_spawnattr_setschedparam( - attr: *mut posix_spawnattr_t, - param: *const crate::sched_param, - ) -> c_int; - - pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_addopen( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - path: *const c_char, - oflag: c_int, - mode: mode_t, - ) -> c_int; - pub fn posix_spawn_file_actions_addclose( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - ) -> c_int; - pub fn posix_spawn_file_actions_adddup2( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - newfd: c_int, - ) -> c_int; -} - -extern "C" { - pub fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void; - pub fn gethostid() -> c_long; - pub fn sethostid(hostid: c_long) -> c_int; - pub fn ftok(path: *const c_char, id: c_int) -> crate::key_t; - - pub fn dirname(path: *mut c_char) -> *mut c_char; - pub fn basename(path: *mut c_char) -> *mut c_char; - pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; - - pub fn sendmmsg(sockfd: c_int, mmsg: *mut crate::mmsghdr, vlen: c_uint, flags: c_int) -> c_int; - pub fn recvmmsg( - sockfd: c_int, - mmsg: *mut crate::mmsghdr, - vlen: c_uint, - flags: c_int, - timeout: *mut crate::timespec, - ) -> c_int; - - pub fn closefrom(lowfd: c_int) -> c_int; -} - -cfg_if! { - if #[cfg(target_os = "netbsd")] { - mod netbsd; - pub use self::netbsd::*; - } else if #[cfg(target_os = "openbsd")] { - mod openbsd; - pub use self::openbsd::*; - } else { - // Unknown target_os - } -} diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/aarch64.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/aarch64.rs deleted file mode 100644 index e0206af04f8f1b..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/aarch64.rs +++ /dev/null @@ -1,132 +0,0 @@ -use crate::prelude::*; -use crate::PT_FIRSTMACH; - -pub type greg_t = u64; -pub type __cpu_simple_lock_nv_t = c_uchar; - -s! { - pub struct __fregset { - pub __qregs: [__c_anonymous__freg; 32], - pub __fpcr: u32, - pub __fpsr: u32, - } - - pub struct mcontext_t { - pub __gregs: [crate::greg_t; 32], - pub __fregs: __fregset, - __spare: [crate::greg_t; 8], - } - - pub struct ucontext_t { - pub uc_flags: c_uint, - pub uc_link: *mut ucontext_t, - pub uc_sigmask: crate::sigset_t, - pub uc_stack: crate::stack_t, - pub uc_mcontext: mcontext_t, - } -} - -s_no_extra_traits! { - #[repr(align(16))] - pub union __c_anonymous__freg { - pub __b8: [u8; 16], - pub __h16: [u16; 8], - pub __s32: [u32; 4], - pub __d64: [u64; 2], - pub __q128: [u128; 1], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for __c_anonymous__freg { - fn eq(&self, other: &__c_anonymous__freg) -> bool { - unsafe { - self.__b8 == other.__b8 - || self.__h16 == other.__h16 - || self.__s32 == other.__s32 - || self.__d64 == other.__d64 - || self.__q128 == other.__q128 - } - } - } - impl Eq for __c_anonymous__freg {} - impl hash::Hash for __c_anonymous__freg { - fn hash(&self, state: &mut H) { - unsafe { - self.__b8.hash(state); - self.__h16.hash(state); - self.__s32.hash(state); - self.__d64.hash(state); - self.__q128.hash(state); - } - } - } - } -} - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const PT_GETREGS: c_int = PT_FIRSTMACH + 0; -pub const PT_SETREGS: c_int = PT_FIRSTMACH + 1; -pub const PT_GETFPREGS: c_int = PT_FIRSTMACH + 2; -pub const PT_SETFPREGS: c_int = PT_FIRSTMACH + 3; - -pub const _REG_R0: c_int = 0; -pub const _REG_R1: c_int = 1; -pub const _REG_R2: c_int = 2; -pub const _REG_R3: c_int = 3; -pub const _REG_R4: c_int = 4; -pub const _REG_R5: c_int = 5; -pub const _REG_R6: c_int = 6; -pub const _REG_R7: c_int = 7; -pub const _REG_R8: c_int = 8; -pub const _REG_R9: c_int = 9; -pub const _REG_R10: c_int = 10; -pub const _REG_R11: c_int = 11; -pub const _REG_R12: c_int = 12; -pub const _REG_R13: c_int = 13; -pub const _REG_R14: c_int = 14; -pub const _REG_R15: c_int = 15; -pub const _REG_CPSR: c_int = 16; -pub const _REG_X0: c_int = 0; -pub const _REG_X1: c_int = 1; -pub const _REG_X2: c_int = 2; -pub const _REG_X3: c_int = 3; -pub const _REG_X4: c_int = 4; -pub const _REG_X5: c_int = 5; -pub const _REG_X6: c_int = 6; -pub const _REG_X7: c_int = 7; -pub const _REG_X8: c_int = 8; -pub const _REG_X9: c_int = 9; -pub const _REG_X10: c_int = 10; -pub const _REG_X11: c_int = 11; -pub const _REG_X12: c_int = 12; -pub const _REG_X13: c_int = 13; -pub const _REG_X14: c_int = 14; -pub const _REG_X15: c_int = 15; -pub const _REG_X16: c_int = 16; -pub const _REG_X17: c_int = 17; -pub const _REG_X18: c_int = 18; -pub const _REG_X19: c_int = 19; -pub const _REG_X20: c_int = 20; -pub const _REG_X21: c_int = 21; -pub const _REG_X22: c_int = 22; -pub const _REG_X23: c_int = 23; -pub const _REG_X24: c_int = 24; -pub const _REG_X25: c_int = 25; -pub const _REG_X26: c_int = 26; -pub const _REG_X27: c_int = 27; -pub const _REG_X28: c_int = 28; -pub const _REG_X29: c_int = 29; -pub const _REG_X30: c_int = 30; -pub const _REG_X31: c_int = 31; -pub const _REG_ELR: c_int = 32; -pub const _REG_SPSR: c_int = 33; -pub const _REG_TIPDR: c_int = 34; - -pub const _REG_RV: c_int = _REG_X0; -pub const _REG_FP: c_int = _REG_X29; -pub const _REG_LR: c_int = _REG_X30; -pub const _REG_SP: c_int = _REG_X31; -pub const _REG_PC: c_int = _REG_ELR; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/arm.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/arm.rs deleted file mode 100644 index 9ff44bd40826a2..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/arm.rs +++ /dev/null @@ -1,70 +0,0 @@ -use crate::prelude::*; -use crate::PT_FIRSTMACH; - -pub type __cpu_simple_lock_nv_t = c_int; - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const PT_GETREGS: c_int = PT_FIRSTMACH + 1; -pub const PT_SETREGS: c_int = PT_FIRSTMACH + 2; -pub const PT_GETFPREGS: c_int = PT_FIRSTMACH + 3; -pub const PT_SETFPREGS: c_int = PT_FIRSTMACH + 4; - -pub const _REG_R0: c_int = 0; -pub const _REG_R1: c_int = 1; -pub const _REG_R2: c_int = 2; -pub const _REG_R3: c_int = 3; -pub const _REG_R4: c_int = 4; -pub const _REG_R5: c_int = 5; -pub const _REG_R6: c_int = 6; -pub const _REG_R7: c_int = 7; -pub const _REG_R8: c_int = 8; -pub const _REG_R9: c_int = 9; -pub const _REG_R10: c_int = 10; -pub const _REG_R11: c_int = 11; -pub const _REG_R12: c_int = 12; -pub const _REG_R13: c_int = 13; -pub const _REG_R14: c_int = 14; -pub const _REG_R15: c_int = 15; -pub const _REG_CPSR: c_int = 16; -pub const _REG_X0: c_int = 0; -pub const _REG_X1: c_int = 1; -pub const _REG_X2: c_int = 2; -pub const _REG_X3: c_int = 3; -pub const _REG_X4: c_int = 4; -pub const _REG_X5: c_int = 5; -pub const _REG_X6: c_int = 6; -pub const _REG_X7: c_int = 7; -pub const _REG_X8: c_int = 8; -pub const _REG_X9: c_int = 9; -pub const _REG_X10: c_int = 10; -pub const _REG_X11: c_int = 11; -pub const _REG_X12: c_int = 12; -pub const _REG_X13: c_int = 13; -pub const _REG_X14: c_int = 14; -pub const _REG_X15: c_int = 15; -pub const _REG_X16: c_int = 16; -pub const _REG_X17: c_int = 17; -pub const _REG_X18: c_int = 18; -pub const _REG_X19: c_int = 19; -pub const _REG_X20: c_int = 20; -pub const _REG_X21: c_int = 21; -pub const _REG_X22: c_int = 22; -pub const _REG_X23: c_int = 23; -pub const _REG_X24: c_int = 24; -pub const _REG_X25: c_int = 25; -pub const _REG_X26: c_int = 26; -pub const _REG_X27: c_int = 27; -pub const _REG_X28: c_int = 28; -pub const _REG_X29: c_int = 29; -pub const _REG_X30: c_int = 30; -pub const _REG_X31: c_int = 31; -pub const _REG_ELR: c_int = 32; -pub const _REG_SPSR: c_int = 33; -pub const _REG_TIPDR: c_int = 34; - -pub const _REG_RV: c_int = _REG_R0; -pub const _REG_FP: c_int = _REG_R11; -pub const _REG_LR: c_int = _REG_R13; -pub const _REG_SP: c_int = _REG_R14; -pub const _REG_PC: c_int = _REG_R15; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mips.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mips.rs deleted file mode 100644 index 1b24b4f6e3159a..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mips.rs +++ /dev/null @@ -1,11 +0,0 @@ -use crate::prelude::*; -use crate::PT_FIRSTMACH; - -pub type __cpu_simple_lock_nv_t = c_int; - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const PT_GETREGS: c_int = PT_FIRSTMACH + 1; -pub const PT_SETREGS: c_int = PT_FIRSTMACH + 2; -pub const PT_GETFPREGS: c_int = PT_FIRSTMACH + 3; -pub const PT_SETFPREGS: c_int = PT_FIRSTMACH + 4; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs deleted file mode 100644 index 9f0831323af798..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs +++ /dev/null @@ -1,3007 +0,0 @@ -use crate::prelude::*; -use crate::{cmsghdr, off_t}; - -pub type clock_t = c_uint; -pub type suseconds_t = c_int; -pub type dev_t = u64; -pub type blksize_t = i32; -pub type fsblkcnt_t = u64; -pub type fsfilcnt_t = u64; -pub type idtype_t = c_int; -pub type mqd_t = c_int; -type __pthread_spin_t = __cpu_simple_lock_nv_t; -pub type vm_size_t = crate::uintptr_t; // FIXME(deprecated): deprecated since long time -pub type lwpid_t = c_uint; -pub type shmatt_t = c_uint; -pub type cpuid_t = c_ulong; -pub type cpuset_t = _cpuset; -pub type pthread_spin_t = c_uchar; -pub type timer_t = c_int; - -// elf.h - -pub type Elf32_Addr = u32; -pub type Elf32_Half = u16; -pub type Elf32_Lword = u64; -pub type Elf32_Off = u32; -pub type Elf32_Sword = i32; -pub type Elf32_Word = u32; - -pub type Elf64_Addr = u64; -pub type Elf64_Half = u16; -pub type Elf64_Lword = u64; -pub type Elf64_Off = u64; -pub type Elf64_Sword = i32; -pub type Elf64_Sxword = i64; -pub type Elf64_Word = u32; -pub type Elf64_Xword = u64; - -pub type iconv_t = *mut c_void; - -e! { - #[repr(C)] - pub enum fae_action { - FAE_OPEN, - FAE_DUP2, - FAE_CLOSE, - } -} - -cfg_if! { - if #[cfg(target_pointer_width = "64")] { - type Elf_Addr = Elf64_Addr; - type Elf_Half = Elf64_Half; - type Elf_Phdr = Elf64_Phdr; - } else if #[cfg(target_pointer_width = "32")] { - type Elf_Addr = Elf32_Addr; - type Elf_Half = Elf32_Half; - type Elf_Phdr = Elf32_Phdr; - } -} - -impl siginfo_t { - pub unsafe fn si_addr(&self) -> *mut c_void { - self.si_addr - } - - pub unsafe fn si_code(&self) -> c_int { - self.si_code - } - - pub unsafe fn si_errno(&self) -> c_int { - self.si_errno - } - - pub unsafe fn si_pid(&self) -> crate::pid_t { - #[repr(C)] - struct siginfo_timer { - _si_signo: c_int, - _si_errno: c_int, - _si_code: c_int, - __pad1: c_int, - _pid: crate::pid_t, - } - (*(self as *const siginfo_t as *const siginfo_timer))._pid - } - - pub unsafe fn si_uid(&self) -> crate::uid_t { - #[repr(C)] - struct siginfo_timer { - _si_signo: c_int, - _si_errno: c_int, - _si_code: c_int, - __pad1: c_int, - _pid: crate::pid_t, - _uid: crate::uid_t, - } - (*(self as *const siginfo_t as *const siginfo_timer))._uid - } - - pub unsafe fn si_value(&self) -> crate::sigval { - #[repr(C)] - struct siginfo_timer { - _si_signo: c_int, - _si_errno: c_int, - _si_code: c_int, - __pad1: c_int, - _pid: crate::pid_t, - _uid: crate::uid_t, - value: crate::sigval, - } - (*(self as *const siginfo_t as *const siginfo_timer)).value - } - - pub unsafe fn si_status(&self) -> c_int { - #[repr(C)] - struct siginfo_timer { - _si_signo: c_int, - _si_errno: c_int, - _si_code: c_int, - __pad1: c_int, - _pid: crate::pid_t, - _uid: crate::uid_t, - _value: crate::sigval, - _cpid: crate::pid_t, - _cuid: crate::uid_t, - status: c_int, - } - (*(self as *const siginfo_t as *const siginfo_timer)).status - } -} - -s! { - pub struct aiocb { - pub aio_offset: off_t, - pub aio_buf: *mut c_void, - pub aio_nbytes: size_t, - pub aio_fildes: c_int, - pub aio_lio_opcode: c_int, - pub aio_reqprio: c_int, - pub aio_sigevent: crate::sigevent, - _state: c_int, - _errno: c_int, - _retval: ssize_t, - } - - pub struct glob_t { - pub gl_pathc: size_t, - pub gl_matchc: size_t, - pub gl_offs: size_t, - pub gl_flags: c_int, - pub gl_pathv: *mut *mut c_char, - - __unused3: *mut c_void, - - __unused4: *mut c_void, - __unused5: *mut c_void, - __unused6: *mut c_void, - __unused7: *mut c_void, - __unused8: *mut c_void, - } - - pub struct mq_attr { - pub mq_flags: c_long, - pub mq_maxmsg: c_long, - pub mq_msgsize: c_long, - pub mq_curmsgs: c_long, - } - - pub struct itimerspec { - pub it_interval: crate::timespec, - pub it_value: crate::timespec, - } - - pub struct sigset_t { - __bits: [u32; 4], - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_mode: crate::mode_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_atime: crate::time_t, - pub st_atimensec: c_long, - pub st_mtime: crate::time_t, - pub st_mtimensec: c_long, - pub st_ctime: crate::time_t, - pub st_ctimensec: c_long, - pub st_birthtime: crate::time_t, - pub st_birthtimensec: c_long, - pub st_size: off_t, - pub st_blocks: crate::blkcnt_t, - pub st_blksize: crate::blksize_t, - pub st_flags: u32, - pub st_gen: u32, - pub st_spare: [u32; 2], - } - - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - pub ai_addrlen: crate::socklen_t, - pub ai_canonname: *mut c_char, - pub ai_addr: *mut crate::sockaddr, - pub ai_next: *mut crate::addrinfo, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_code: c_int, - pub si_errno: c_int, - __pad1: c_int, - pub si_addr: *mut c_void, - __pad2: [u64; 13], - } - - pub struct pthread_attr_t { - pta_magic: c_uint, - pta_flags: c_int, - pta_private: *mut c_void, - } - - pub struct pthread_mutex_t { - ptm_magic: c_uint, - ptm_errorcheck: __pthread_spin_t, - #[cfg(any( - target_arch = "sparc", - target_arch = "sparc64", - target_arch = "x86", - target_arch = "x86_64" - ))] - ptm_pad1: [u8; 3], - // actually a union with a non-unused, 0-initialized field - ptm_unused: __pthread_spin_t, - #[cfg(any( - target_arch = "sparc", - target_arch = "sparc64", - target_arch = "x86", - target_arch = "x86_64" - ))] - ptm_pad2: [u8; 3], - ptm_owner: crate::pthread_t, - ptm_waiters: *mut u8, - ptm_recursed: c_uint, - ptm_spare2: *mut c_void, - } - - pub struct pthread_mutexattr_t { - ptma_magic: c_uint, - ptma_private: *mut c_void, - } - - pub struct pthread_rwlockattr_t { - ptra_magic: c_uint, - ptra_private: *mut c_void, - } - - pub struct pthread_cond_t { - ptc_magic: c_uint, - ptc_lock: __pthread_spin_t, - ptc_waiters_first: *mut u8, - ptc_waiters_last: *mut u8, - ptc_mutex: *mut crate::pthread_mutex_t, - ptc_private: *mut c_void, - } - - pub struct pthread_condattr_t { - ptca_magic: c_uint, - ptca_private: *mut c_void, - } - - pub struct pthread_rwlock_t { - ptr_magic: c_uint, - ptr_interlock: __pthread_spin_t, - ptr_rblocked_first: *mut u8, - ptr_rblocked_last: *mut u8, - ptr_wblocked_first: *mut u8, - ptr_wblocked_last: *mut u8, - ptr_nreaders: c_uint, - ptr_owner: crate::pthread_t, - ptr_private: *mut c_void, - } - - pub struct pthread_spinlock_t { - pts_magic: c_uint, - pts_spin: crate::pthread_spin_t, - pts_flags: c_int, - } - - pub struct kevent { - pub ident: crate::uintptr_t, - pub filter: u32, - pub flags: u32, - pub fflags: u32, - pub data: i64, - // FIXME(netbsd): NetBSD 10.0 will finally have same layout as other BSD - pub udata: intptr_t, - } - - pub struct dqblk { - pub dqb_bhardlimit: u32, - pub dqb_bsoftlimit: u32, - pub dqb_curblocks: u32, - pub dqb_ihardlimit: u32, - pub dqb_isoftlimit: u32, - pub dqb_curinodes: u32, - pub dqb_btime: i32, - pub dqb_itime: i32, - } - - pub struct Dl_info { - pub dli_fname: *const c_char, - pub dli_fbase: *mut c_void, - pub dli_sname: *const c_char, - pub dli_saddr: *const c_void, - } - - pub struct lconv { - pub decimal_point: *mut c_char, - pub thousands_sep: *mut c_char, - pub grouping: *mut c_char, - pub int_curr_symbol: *mut c_char, - pub currency_symbol: *mut c_char, - pub mon_decimal_point: *mut c_char, - pub mon_thousands_sep: *mut c_char, - pub mon_grouping: *mut c_char, - pub positive_sign: *mut c_char, - pub negative_sign: *mut c_char, - pub int_frac_digits: c_char, - pub frac_digits: c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub p_sign_posn: c_char, - pub n_sign_posn: c_char, - pub int_p_cs_precedes: c_char, - pub int_n_cs_precedes: c_char, - pub int_p_sep_by_space: c_char, - pub int_n_sep_by_space: c_char, - pub int_p_sign_posn: c_char, - pub int_n_sign_posn: c_char, - } - - pub struct if_data { - pub ifi_type: c_uchar, - pub ifi_addrlen: c_uchar, - pub ifi_hdrlen: c_uchar, - pub ifi_link_state: c_int, - pub ifi_mtu: u64, - pub ifi_metric: u64, - pub ifi_baudrate: u64, - pub ifi_ipackets: u64, - pub ifi_ierrors: u64, - pub ifi_opackets: u64, - pub ifi_oerrors: u64, - pub ifi_collisions: u64, - pub ifi_ibytes: u64, - pub ifi_obytes: u64, - pub ifi_imcasts: u64, - pub ifi_omcasts: u64, - pub ifi_iqdrops: u64, - pub ifi_noproto: u64, - pub ifi_lastchange: crate::timespec, - } - - pub struct if_msghdr { - pub ifm_msglen: c_ushort, - pub ifm_version: c_uchar, - pub ifm_type: c_uchar, - pub ifm_addrs: c_int, - pub ifm_flags: c_int, - pub ifm_index: c_ushort, - pub ifm_data: if_data, - } - - pub struct sockcred { - pub sc_pid: crate::pid_t, - pub sc_uid: crate::uid_t, - pub sc_euid: crate::uid_t, - pub sc_gid: crate::gid_t, - pub sc_egid: crate::gid_t, - pub sc_ngroups: c_int, - pub sc_groups: [crate::gid_t; 1], - } - - pub struct uucred { - pub cr_unused: c_ushort, - pub cr_uid: crate::uid_t, - pub cr_gid: crate::gid_t, - pub cr_ngroups: c_int, - pub cr_groups: [crate::gid_t; NGROUPS_MAX as usize], - } - - pub struct unpcbid { - pub unp_pid: crate::pid_t, - pub unp_euid: crate::uid_t, - pub unp_egid: crate::gid_t, - } - - pub struct sockaddr_dl { - pub sdl_len: c_uchar, - pub sdl_family: c_uchar, - pub sdl_index: c_ushort, - pub sdl_type: u8, - pub sdl_nlen: u8, - pub sdl_alen: u8, - pub sdl_slen: u8, - pub sdl_data: [c_char; 12], - } - - pub struct __exit_status { - pub e_termination: u16, - pub e_exit: u16, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_lpid: crate::pid_t, - pub shm_cpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - _shm_internal: *mut c_void, - } - - pub struct utmp { - pub ut_line: [c_char; UT_LINESIZE], - pub ut_name: [c_char; UT_NAMESIZE], - pub ut_host: [c_char; UT_HOSTSIZE], - pub ut_time: crate::time_t, - } - - pub struct lastlog { - pub ll_line: [c_char; UT_LINESIZE], - pub ll_host: [c_char; UT_HOSTSIZE], - pub ll_time: crate::time_t, - } - - pub struct timex { - pub modes: c_uint, - pub offset: c_long, - pub freq: c_long, - pub maxerror: c_long, - pub esterror: c_long, - pub status: c_int, - pub constant: c_long, - pub precision: c_long, - pub tolerance: c_long, - pub ppsfreq: c_long, - pub jitter: c_long, - pub shift: c_int, - pub stabil: c_long, - pub jitcnt: c_long, - pub calcnt: c_long, - pub errcnt: c_long, - pub stbcnt: c_long, - } - - pub struct ntptimeval { - pub time: crate::timespec, - pub maxerror: c_long, - pub esterror: c_long, - pub tai: c_long, - pub time_state: c_int, - } - - // elf.h - - pub struct Elf32_Phdr { - pub p_type: Elf32_Word, - pub p_offset: Elf32_Off, - pub p_vaddr: Elf32_Addr, - pub p_paddr: Elf32_Addr, - pub p_filesz: Elf32_Word, - pub p_memsz: Elf32_Word, - pub p_flags: Elf32_Word, - pub p_align: Elf32_Word, - } - - pub struct Elf64_Phdr { - pub p_type: Elf64_Word, - pub p_flags: Elf64_Word, - pub p_offset: Elf64_Off, - pub p_vaddr: Elf64_Addr, - pub p_paddr: Elf64_Addr, - pub p_filesz: Elf64_Xword, - pub p_memsz: Elf64_Xword, - pub p_align: Elf64_Xword, - } - - pub struct Aux32Info { - pub a_type: Elf32_Word, - pub a_v: Elf32_Word, - } - - pub struct Aux64Info { - pub a_type: Elf64_Word, - pub a_v: Elf64_Xword, - } - - // link.h - - pub struct dl_phdr_info { - pub dlpi_addr: Elf_Addr, - pub dlpi_name: *const c_char, - pub dlpi_phdr: *const Elf_Phdr, - pub dlpi_phnum: Elf_Half, - pub dlpi_adds: c_ulonglong, - pub dlpi_subs: c_ulonglong, - pub dlpi_tls_modid: usize, - pub dlpi_tls_data: *mut c_void, - } - - pub struct _cpuset { - bits: [u32; 0], - } - - pub struct accept_filter_arg { - pub af_name: [c_char; 16], - af_arg: [c_char; 256 - 16], - } - - pub struct ki_sigset_t { - pub __bits: [u32; 4], - } - - pub struct kinfo_proc2 { - pub p_forw: u64, - pub p_back: u64, - pub p_paddr: u64, - pub p_addr: u64, - pub p_fd: u64, - pub p_cwdi: u64, - pub p_stats: u64, - pub p_limit: u64, - pub p_vmspace: u64, - pub p_sigacts: u64, - pub p_sess: u64, - pub p_tsess: u64, - pub p_ru: u64, - pub p_eflag: i32, - pub p_exitsig: i32, - pub p_flag: i32, - pub p_pid: i32, - pub p_ppid: i32, - pub p_sid: i32, - pub p__pgid: i32, - pub p_tpgid: i32, - pub p_uid: u32, - pub p_ruid: u32, - pub p_gid: u32, - pub p_rgid: u32, - pub p_groups: [u32; KI_NGROUPS as usize], - pub p_ngroups: i16, - pub p_jobc: i16, - pub p_tdev: u32, - pub p_estcpu: u32, - pub p_rtime_sec: u32, - pub p_rtime_usec: u32, - pub p_cpticks: i32, - pub p_pctcpu: u32, - pub p_swtime: u32, - pub p_slptime: u32, - pub p_schedflags: i32, - pub p_uticks: u64, - pub p_sticks: u64, - pub p_iticks: u64, - pub p_tracep: u64, - pub p_traceflag: i32, - pub p_holdcnt: i32, - pub p_siglist: ki_sigset_t, - pub p_sigmask: ki_sigset_t, - pub p_sigignore: ki_sigset_t, - pub p_sigcatch: ki_sigset_t, - pub p_stat: i8, - pub p_priority: u8, - pub p_usrpri: u8, - pub p_nice: u8, - pub p_xstat: u16, - pub p_acflag: u16, - pub p_comm: [c_char; KI_MAXCOMLEN as usize], - pub p_wmesg: [c_char; KI_WMESGLEN as usize], - pub p_wchan: u64, - pub p_login: [c_char; KI_MAXLOGNAME as usize], - pub p_vm_rssize: i32, - pub p_vm_tsize: i32, - pub p_vm_dsize: i32, - pub p_vm_ssize: i32, - pub p_uvalid: i64, - pub p_ustart_sec: u32, - pub p_ustart_usec: u32, - pub p_uutime_sec: u32, - pub p_uutime_usec: u32, - pub p_ustime_sec: u32, - pub p_ustime_usec: u32, - pub p_uru_maxrss: u64, - pub p_uru_ixrss: u64, - pub p_uru_idrss: u64, - pub p_uru_isrss: u64, - pub p_uru_minflt: u64, - pub p_uru_majflt: u64, - pub p_uru_nswap: u64, - pub p_uru_inblock: u64, - pub p_uru_oublock: u64, - pub p_uru_msgsnd: u64, - pub p_uru_msgrcv: u64, - pub p_uru_nsignals: u64, - pub p_uru_nvcsw: u64, - pub p_uru_nivcsw: u64, - pub p_uctime_sec: u32, - pub p_uctime_usec: u32, - pub p_cpuid: u64, - pub p_realflag: u64, - pub p_nlwps: u64, - pub p_nrlwps: u64, - pub p_realstat: u64, - pub p_svuid: u32, - pub p_svgid: u32, - pub p_ename: [c_char; KI_MAXEMULLEN as usize], - pub p_vm_vsize: i64, - pub p_vm_msize: i64, - } - - pub struct kinfo_lwp { - pub l_forw: u64, - pub l_back: u64, - pub l_laddr: u64, - pub l_addr: u64, - pub l_lid: i32, - pub l_flag: i32, - pub l_swtime: u32, - pub l_slptime: u32, - pub l_schedflags: i32, - pub l_holdcnt: i32, - pub l_priority: u8, - pub l_usrpri: u8, - pub l_stat: i8, - l_pad1: i8, - l_pad2: i32, - pub l_wmesg: [c_char; KI_WMESGLEN as usize], - pub l_wchan: u64, - pub l_cpuid: u64, - pub l_rtime_sec: u32, - pub l_rtime_usec: u32, - pub l_cpticks: u32, - pub l_pctcpu: u32, - pub l_pid: u32, - pub l_name: [c_char; KI_LNAMELEN as usize], - } - - pub struct kinfo_vmentry { - pub kve_start: u64, - pub kve_end: u64, - pub kve_offset: u64, - pub kve_type: u32, - pub kve_flags: u32, - pub kve_count: u32, - pub kve_wired_count: u32, - pub kve_advice: u32, - pub kve_attributes: u32, - pub kve_protection: u32, - pub kve_max_protection: u32, - pub kve_ref_count: u32, - pub kve_inheritance: u32, - pub kve_vn_fileid: u64, - pub kve_vn_size: u64, - pub kve_vn_fsid: u64, - pub kve_vn_rdev: u64, - pub kve_vn_type: u32, - pub kve_vn_mode: u32, - pub kve_path: [[c_char; 32]; 32], - } - - pub struct __c_anonymous_posix_spawn_fae_open { - pub path: *mut c_char, - pub oflag: c_int, - pub mode: crate::mode_t, - } - - pub struct __c_anonymous_posix_spawn_fae_dup2 { - pub newfildes: c_int, - } - - pub struct posix_spawnattr_t { - pub sa_flags: c_short, - pub sa_pgroup: crate::pid_t, - pub sa_schedparam: crate::sched_param, - pub sa_schedpolicy: c_int, - pub sa_sigdefault: sigset_t, - pub sa_sigmask: sigset_t, - } - - pub struct posix_spawn_file_actions_entry_t { - pub fae_action: fae_action, - pub fae_fildes: c_int, - pub fae_data: __c_anonymous_posix_spawn_fae, - } - - pub struct posix_spawn_file_actions_t { - pub size: c_uint, - pub len: c_uint, - pub fae: *mut posix_spawn_file_actions_entry_t, - } - - pub struct ptrace_lwpinfo { - pub pl_lwpid: lwpid_t, - pub pl_event: c_int, - } - - pub struct ptrace_lwpstatus { - pub pl_lwpid: lwpid_t, - pub pl_sigpend: sigset_t, - pub pl_sigmask: sigset_t, - pub pl_name: [c_char; 20], - pub pl_private: *mut c_void, - } - - pub struct ptrace_siginfo { - pub psi_siginfo: siginfo_t, - pub psi_lwpid: lwpid_t, - } - - pub struct ptrace_event { - pub pe_set_event: c_int, - } - - pub struct sysctldesc { - pub descr_num: i32, - pub descr_ver: u32, - pub descr_len: u32, - pub descr_str: [c_char; 1], - } - - pub struct ifreq { - pub _priv: [[c_char; 6]; 24], - } - - pub struct ifconf { - pub ifc_len: c_int, - pub ifc_ifcu: __c_anonymous_ifc_ifcu, - } - - pub struct tcp_info { - pub tcpi_state: u8, - pub __tcpi_ca_state: u8, - pub __tcpi_retransmits: u8, - pub __tcpi_probes: u8, - pub __tcpi_backoff: u8, - pub tcpi_options: u8, - pub tcp_snd_wscale: u8, - pub tcp_rcv_wscale: u8, - pub tcpi_rto: u32, - pub __tcpi_ato: u32, - pub tcpi_snd_mss: u32, - pub tcpi_rcv_mss: u32, - pub __tcpi_unacked: u32, - pub __tcpi_sacked: u32, - pub __tcpi_lost: u32, - pub __tcpi_retrans: u32, - pub __tcpi_fackets: u32, - pub __tcpi_last_data_sent: u32, - pub __tcpi_last_ack_sent: u32, - pub tcpi_last_data_recv: u32, - pub __tcpi_last_ack_recv: u32, - pub __tcpi_pmtu: u32, - pub __tcpi_rcv_ssthresh: u32, - pub tcpi_rtt: u32, - pub tcpi_rttvar: u32, - pub tcpi_snd_ssthresh: u32, - pub tcpi_snd_cwnd: u32, - pub __tcpi_advmss: u32, - pub __tcpi_reordering: u32, - pub __tcpi_rcv_rtt: u32, - pub tcpi_rcv_space: u32, - pub tcpi_snd_wnd: u32, - pub tcpi_snd_bwnd: u32, - pub tcpi_snd_nxt: u32, - pub tcpi_rcv_nxt: u32, - pub tcpi_toe_tid: u32, - pub tcpi_snd_rexmitpack: u32, - pub tcpi_rcv_ooopack: u32, - pub tcpi_snd_zerowin: u32, - pub __tcpi_pad: [u32; 26], - } -} - -s_no_extra_traits! { - pub struct utmpx { - pub ut_name: [c_char; _UTX_USERSIZE], - pub ut_id: [c_char; _UTX_IDSIZE], - pub ut_line: [c_char; _UTX_LINESIZE], - pub ut_host: [c_char; _UTX_HOSTSIZE], - pub ut_session: u16, - pub ut_type: u16, - pub ut_pid: crate::pid_t, - pub ut_exit: __exit_status, // FIXME(netbsd): when anonymous struct are supported - pub ut_ss: sockaddr_storage, - pub ut_tv: crate::timeval, - pub ut_pad: [u8; _UTX_PADSIZE], - } - - pub struct lastlogx { - pub ll_tv: crate::timeval, - pub ll_line: [c_char; _UTX_LINESIZE], - pub ll_host: [c_char; _UTX_HOSTSIZE], - pub ll_ss: sockaddr_storage, - } - - pub struct in_pktinfo { - pub ipi_addr: crate::in_addr, - pub ipi_ifindex: c_uint, - } - - pub struct arphdr { - pub ar_hrd: u16, - pub ar_pro: u16, - pub ar_hln: u8, - pub ar_pln: u8, - pub ar_op: u16, - } - - pub struct in_addr { - pub s_addr: crate::in_addr_t, - } - - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: crate::sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [i8; 8], - } - - pub struct dirent { - pub d_fileno: crate::ino_t, - pub d_reclen: u16, - pub d_namlen: u16, - pub d_type: u8, - pub d_name: [c_char; 512], - } - - pub struct statvfs { - pub f_flag: c_ulong, - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_iosize: c_ulong, - - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_bresvd: crate::fsblkcnt_t, - - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fresvd: crate::fsfilcnt_t, - - pub f_syncreads: u64, - pub f_syncwrites: u64, - - pub f_asyncreads: u64, - pub f_asyncwrites: u64, - - pub f_fsidx: crate::fsid_t, - pub f_fsid: c_ulong, - pub f_namemax: c_ulong, - pub f_owner: crate::uid_t, - - pub f_spare: [u32; 4], - - pub f_fstypename: [c_char; 32], - pub f_mntonname: [c_char; 1024], - pub f_mntfromname: [c_char; 1024], - } - - pub struct sockaddr_storage { - pub ss_len: u8, - pub ss_family: crate::sa_family_t, - __ss_pad1: [u8; 6], - __ss_pad2: i64, - __ss_pad3: [u8; 112], - } - - pub struct sigevent { - pub sigev_notify: c_int, - pub sigev_signo: c_int, - pub sigev_value: crate::sigval, - __unused1: *mut c_void, //actually a function pointer - pub sigev_notify_attributes: *mut c_void, - } - - pub union __c_anonymous_posix_spawn_fae { - pub open: __c_anonymous_posix_spawn_fae_open, - pub dup2: __c_anonymous_posix_spawn_fae_dup2, - } - - pub union __c_anonymous_ifc_ifcu { - pub ifcu_buf: *mut c_void, - pub ifcu_req: *mut ifreq, - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for utmpx { - fn eq(&self, other: &utmpx) -> bool { - self.ut_type == other.ut_type - && self.ut_pid == other.ut_pid - && self.ut_name == other.ut_name - && self.ut_line == other.ut_line - && self.ut_id == other.ut_id - && self.ut_exit == other.ut_exit - && self.ut_session == other.ut_session - && self.ut_tv == other.ut_tv - && self.ut_ss == other.ut_ss - && self - .ut_pad - .iter() - .zip(other.ut_pad.iter()) - .all(|(a, b)| a == b) - && self - .ut_host - .iter() - .zip(other.ut_host.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for utmpx {} - - impl hash::Hash for utmpx { - fn hash(&self, state: &mut H) { - self.ut_name.hash(state); - self.ut_type.hash(state); - self.ut_pid.hash(state); - self.ut_line.hash(state); - self.ut_id.hash(state); - self.ut_host.hash(state); - self.ut_exit.hash(state); - self.ut_session.hash(state); - self.ut_tv.hash(state); - self.ut_ss.hash(state); - self.ut_pad.hash(state); - } - } - - impl PartialEq for lastlogx { - fn eq(&self, other: &lastlogx) -> bool { - self.ll_tv == other.ll_tv - && self.ll_line == other.ll_line - && self.ll_ss == other.ll_ss - && self - .ll_host - .iter() - .zip(other.ll_host.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for lastlogx {} - - impl hash::Hash for lastlogx { - fn hash(&self, state: &mut H) { - self.ll_tv.hash(state); - self.ll_line.hash(state); - self.ll_host.hash(state); - self.ll_ss.hash(state); - } - } - - impl PartialEq for in_pktinfo { - fn eq(&self, other: &in_pktinfo) -> bool { - self.ipi_addr == other.ipi_addr && self.ipi_ifindex == other.ipi_ifindex - } - } - impl Eq for in_pktinfo {} - impl hash::Hash for in_pktinfo { - fn hash(&self, state: &mut H) { - self.ipi_addr.hash(state); - self.ipi_ifindex.hash(state); - } - } - - impl PartialEq for arphdr { - fn eq(&self, other: &arphdr) -> bool { - self.ar_hrd == other.ar_hrd - && self.ar_pro == other.ar_pro - && self.ar_hln == other.ar_hln - && self.ar_pln == other.ar_pln - && self.ar_op == other.ar_op - } - } - impl Eq for arphdr {} - impl hash::Hash for arphdr { - fn hash(&self, state: &mut H) { - let ar_hrd = self.ar_hrd; - let ar_pro = self.ar_pro; - let ar_op = self.ar_op; - ar_hrd.hash(state); - ar_pro.hash(state); - self.ar_hln.hash(state); - self.ar_pln.hash(state); - ar_op.hash(state); - } - } - - impl PartialEq for in_addr { - fn eq(&self, other: &in_addr) -> bool { - self.s_addr == other.s_addr - } - } - impl Eq for in_addr {} - impl hash::Hash for in_addr { - fn hash(&self, state: &mut H) { - let s_addr = self.s_addr; - s_addr.hash(state); - } - } - - impl PartialEq for ip_mreq { - fn eq(&self, other: &ip_mreq) -> bool { - self.imr_multiaddr == other.imr_multiaddr - && self.imr_interface == other.imr_interface - } - } - impl Eq for ip_mreq {} - impl hash::Hash for ip_mreq { - fn hash(&self, state: &mut H) { - self.imr_multiaddr.hash(state); - self.imr_interface.hash(state); - } - } - - impl PartialEq for sockaddr_in { - fn eq(&self, other: &sockaddr_in) -> bool { - self.sin_len == other.sin_len - && self.sin_family == other.sin_family - && self.sin_port == other.sin_port - && self.sin_addr == other.sin_addr - && self.sin_zero == other.sin_zero - } - } - impl Eq for sockaddr_in {} - impl hash::Hash for sockaddr_in { - fn hash(&self, state: &mut H) { - self.sin_len.hash(state); - self.sin_family.hash(state); - self.sin_port.hash(state); - self.sin_addr.hash(state); - self.sin_zero.hash(state); - } - } - - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_fileno == other.d_fileno - && self.d_reclen == other.d_reclen - && self.d_namlen == other.d_namlen - && self.d_type == other.d_type - && self - .d_name - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for dirent {} - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_fileno.hash(state); - self.d_reclen.hash(state); - self.d_namlen.hash(state); - self.d_type.hash(state); - self.d_name.hash(state); - } - } - - impl PartialEq for statvfs { - fn eq(&self, other: &statvfs) -> bool { - self.f_flag == other.f_flag - && self.f_bsize == other.f_bsize - && self.f_frsize == other.f_frsize - && self.f_iosize == other.f_iosize - && self.f_blocks == other.f_blocks - && self.f_bfree == other.f_bfree - && self.f_bavail == other.f_bavail - && self.f_bresvd == other.f_bresvd - && self.f_files == other.f_files - && self.f_ffree == other.f_ffree - && self.f_favail == other.f_favail - && self.f_fresvd == other.f_fresvd - && self.f_syncreads == other.f_syncreads - && self.f_syncwrites == other.f_syncwrites - && self.f_asyncreads == other.f_asyncreads - && self.f_asyncwrites == other.f_asyncwrites - && self.f_fsidx == other.f_fsidx - && self.f_fsid == other.f_fsid - && self.f_namemax == other.f_namemax - && self.f_owner == other.f_owner - && self.f_spare == other.f_spare - && self.f_fstypename == other.f_fstypename - && self - .f_mntonname - .iter() - .zip(other.f_mntonname.iter()) - .all(|(a, b)| a == b) - && self - .f_mntfromname - .iter() - .zip(other.f_mntfromname.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for statvfs {} - impl hash::Hash for statvfs { - fn hash(&self, state: &mut H) { - self.f_flag.hash(state); - self.f_bsize.hash(state); - self.f_frsize.hash(state); - self.f_iosize.hash(state); - self.f_blocks.hash(state); - self.f_bfree.hash(state); - self.f_bavail.hash(state); - self.f_bresvd.hash(state); - self.f_files.hash(state); - self.f_ffree.hash(state); - self.f_favail.hash(state); - self.f_fresvd.hash(state); - self.f_syncreads.hash(state); - self.f_syncwrites.hash(state); - self.f_asyncreads.hash(state); - self.f_asyncwrites.hash(state); - self.f_fsidx.hash(state); - self.f_fsid.hash(state); - self.f_namemax.hash(state); - self.f_owner.hash(state); - self.f_spare.hash(state); - self.f_fstypename.hash(state); - self.f_mntonname.hash(state); - self.f_mntfromname.hash(state); - } - } - - impl PartialEq for sockaddr_storage { - fn eq(&self, other: &sockaddr_storage) -> bool { - self.ss_len == other.ss_len - && self.ss_family == other.ss_family - && self.__ss_pad1 == other.__ss_pad1 - && self.__ss_pad2 == other.__ss_pad2 - && self - .__ss_pad3 - .iter() - .zip(other.__ss_pad3.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for sockaddr_storage {} - impl hash::Hash for sockaddr_storage { - fn hash(&self, state: &mut H) { - self.ss_len.hash(state); - self.ss_family.hash(state); - self.__ss_pad1.hash(state); - self.__ss_pad2.hash(state); - self.__ss_pad3.hash(state); - } - } - - impl PartialEq for sigevent { - fn eq(&self, other: &sigevent) -> bool { - self.sigev_notify == other.sigev_notify - && self.sigev_signo == other.sigev_signo - && self.sigev_value == other.sigev_value - && self.sigev_notify_attributes == other.sigev_notify_attributes - } - } - impl Eq for sigevent {} - impl hash::Hash for sigevent { - fn hash(&self, state: &mut H) { - self.sigev_notify.hash(state); - self.sigev_signo.hash(state); - self.sigev_value.hash(state); - self.sigev_notify_attributes.hash(state); - } - } - - impl Eq for __c_anonymous_posix_spawn_fae {} - - impl PartialEq for __c_anonymous_posix_spawn_fae { - fn eq(&self, other: &__c_anonymous_posix_spawn_fae) -> bool { - unsafe { self.open == other.open || self.dup2 == other.dup2 } - } - } - - impl hash::Hash for __c_anonymous_posix_spawn_fae { - fn hash(&self, state: &mut H) { - unsafe { - self.open.hash(state); - self.dup2.hash(state); - } - } - } - - impl Eq for __c_anonymous_ifc_ifcu {} - - impl PartialEq for __c_anonymous_ifc_ifcu { - fn eq(&self, other: &__c_anonymous_ifc_ifcu) -> bool { - unsafe { self.ifcu_buf == other.ifcu_buf || self.ifcu_req == other.ifcu_req } - } - } - - impl hash::Hash for __c_anonymous_ifc_ifcu { - fn hash(&self, state: &mut H) { - unsafe { - self.ifcu_buf.hash(state); - self.ifcu_req.hash(state); - } - } - } - } -} - -pub const AT_FDCWD: c_int = -100; -pub const AT_EACCESS: c_int = 0x100; -pub const AT_SYMLINK_NOFOLLOW: c_int = 0x200; -pub const AT_SYMLINK_FOLLOW: c_int = 0x400; -pub const AT_REMOVEDIR: c_int = 0x800; - -pub const AT_NULL: c_int = 0; -pub const AT_IGNORE: c_int = 1; -pub const AT_EXECFD: c_int = 2; -pub const AT_PHDR: c_int = 3; -pub const AT_PHENT: c_int = 4; -pub const AT_PHNUM: c_int = 5; -pub const AT_PAGESZ: c_int = 6; -pub const AT_BASE: c_int = 7; -pub const AT_FLAGS: c_int = 8; -pub const AT_ENTRY: c_int = 9; -pub const AT_DCACHEBSIZE: c_int = 10; -pub const AT_ICACHEBSIZE: c_int = 11; -pub const AT_UCACHEBSIZE: c_int = 12; -pub const AT_STACKBASE: c_int = 13; -pub const AT_EUID: c_int = 2000; -pub const AT_RUID: c_int = 2001; -pub const AT_EGID: c_int = 2002; -pub const AT_RGID: c_int = 2003; -pub const AT_SUN_LDELF: c_int = 2004; -pub const AT_SUN_LDSHDR: c_int = 2005; -pub const AT_SUN_LDNAME: c_int = 2006; -pub const AT_SUN_LDPGSIZE: c_int = 2007; -pub const AT_SUN_PLATFORM: c_int = 2008; -pub const AT_SUN_HWCAP: c_int = 2009; -pub const AT_SUN_IFLUSH: c_int = 2010; -pub const AT_SUN_CPU: c_int = 2011; -pub const AT_SUN_EMUL_ENTRY: c_int = 2012; -pub const AT_SUN_EMUL_EXECFD: c_int = 2013; -pub const AT_SUN_EXECNAME: c_int = 2014; - -pub const EXTATTR_NAMESPACE_USER: c_int = 1; -pub const EXTATTR_NAMESPACE_SYSTEM: c_int = 2; - -pub const LC_COLLATE_MASK: c_int = 1 << crate::LC_COLLATE; -pub const LC_CTYPE_MASK: c_int = 1 << crate::LC_CTYPE; -pub const LC_MONETARY_MASK: c_int = 1 << crate::LC_MONETARY; -pub const LC_NUMERIC_MASK: c_int = 1 << crate::LC_NUMERIC; -pub const LC_TIME_MASK: c_int = 1 << crate::LC_TIME; -pub const LC_MESSAGES_MASK: c_int = 1 << crate::LC_MESSAGES; -pub const LC_ALL_MASK: c_int = !0; - -pub const ERA: crate::nl_item = 52; -pub const ERA_D_FMT: crate::nl_item = 53; -pub const ERA_D_T_FMT: crate::nl_item = 54; -pub const ERA_T_FMT: crate::nl_item = 55; -pub const ALT_DIGITS: crate::nl_item = 56; - -pub const O_CLOEXEC: c_int = 0x400000; -pub const O_ALT_IO: c_int = 0x40000; -pub const O_NOSIGPIPE: c_int = 0x1000000; -pub const O_SEARCH: c_int = 0x800000; -pub const O_DIRECTORY: c_int = 0x200000; -pub const O_DIRECT: c_int = 0x00080000; -pub const O_RSYNC: c_int = 0x00020000; - -pub const MS_SYNC: c_int = 0x4; -pub const MS_INVALIDATE: c_int = 0x2; - -// Here because they are not present on OpenBSD -// (https://github.com/openbsd/src/blob/HEAD/sys/sys/resource.h) -pub const RLIMIT_SBSIZE: c_int = 9; -pub const RLIMIT_AS: c_int = 10; -pub const RLIMIT_NTHR: c_int = 11; - -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIM_NLIMITS: c_int = 12; - -pub const EIDRM: c_int = 82; -pub const ENOMSG: c_int = 83; -pub const EOVERFLOW: c_int = 84; -pub const EILSEQ: c_int = 85; -pub const ENOTSUP: c_int = 86; -pub const ECANCELED: c_int = 87; -pub const EBADMSG: c_int = 88; -pub const ENODATA: c_int = 89; -pub const ENOSR: c_int = 90; -pub const ENOSTR: c_int = 91; -pub const ETIME: c_int = 92; -pub const ENOATTR: c_int = 93; -pub const EMULTIHOP: c_int = 94; -pub const ENOLINK: c_int = 95; -pub const EPROTO: c_int = 96; -pub const EOWNERDEAD: c_int = 97; -pub const ENOTRECOVERABLE: c_int = 98; -#[deprecated( - since = "0.2.143", - note = "This value will always match the highest defined error number \ - and thus is not stable. \ - See #3040 for more info." -)] -pub const ELAST: c_int = 98; - -pub const F_DUPFD_CLOEXEC: c_int = 12; -pub const F_CLOSEM: c_int = 10; -pub const F_GETNOSIGPIPE: c_int = 13; -pub const F_SETNOSIGPIPE: c_int = 14; -pub const F_MAXFD: c_int = 11; -pub const F_GETPATH: c_int = 15; - -pub const FUTEX_WAIT: c_int = 0; -pub const FUTEX_WAKE: c_int = 1; -pub const FUTEX_FD: c_int = 2; -pub const FUTEX_REQUEUE: c_int = 3; -pub const FUTEX_CMP_REQUEUE: c_int = 4; -pub const FUTEX_WAKE_OP: c_int = 5; -pub const FUTEX_LOCK_PI: c_int = 6; -pub const FUTEX_UNLOCK_PI: c_int = 7; -pub const FUTEX_TRYLOCK_PI: c_int = 8; -pub const FUTEX_WAIT_BITSET: c_int = 9; -pub const FUTEX_WAKE_BITSET: c_int = 10; -pub const FUTEX_WAIT_REQUEUE_PI: c_int = 11; -pub const FUTEX_CMP_REQUEUE_PI: c_int = 12; -pub const FUTEX_PRIVATE_FLAG: c_int = 1 << 7; -pub const FUTEX_CLOCK_REALTIME: c_int = 1 << 8; -pub const FUTEX_CMD_MASK: c_int = !(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME); -pub const FUTEX_WAITERS: u32 = 1 << 31; -pub const FUTEX_OWNER_DIED: u32 = 1 << 30; -pub const FUTEX_SYNCOBJ_1: u32 = 1 << 29; -pub const FUTEX_SYNCOBJ_0: u32 = 1 << 28; -pub const FUTEX_TID_MASK: u32 = (1 << 28) - 1; -pub const FUTEX_BITSET_MATCH_ANY: u32 = !0; - -pub const IP_RECVDSTADDR: c_int = 7; -pub const IP_SENDSRCADDR: c_int = IP_RECVDSTADDR; -pub const IP_RECVIF: c_int = 20; -pub const IP_PKTINFO: c_int = 25; -pub const IP_RECVPKTINFO: c_int = 26; -pub const IPV6_JOIN_GROUP: c_int = 12; -pub const IPV6_LEAVE_GROUP: c_int = 13; - -pub const TCP_KEEPIDLE: c_int = 3; -pub const TCP_KEEPINTVL: c_int = 5; -pub const TCP_KEEPCNT: c_int = 6; -pub const TCP_KEEPINIT: c_int = 7; -pub const TCP_MD5SIG: c_int = 0x10; -pub const TCP_CONGCTL: c_int = 0x20; - -pub const SOCK_CONN_DGRAM: c_int = 6; -pub const SOCK_DCCP: c_int = SOCK_CONN_DGRAM; -pub const SOCK_NOSIGPIPE: c_int = 0x40000000; -pub const SOCK_FLAGS_MASK: c_int = 0xf0000000; - -pub const SO_SNDTIMEO: c_int = 0x100b; -pub const SO_RCVTIMEO: c_int = 0x100c; -pub const SO_NOSIGPIPE: c_int = 0x0800; -pub const SO_ACCEPTFILTER: c_int = 0x1000; -pub const SO_TIMESTAMP: c_int = 0x2000; -pub const SO_OVERFLOWED: c_int = 0x1009; -pub const SO_NOHEADER: c_int = 0x100a; - -// http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/sys/un.h?annotate -pub const LOCAL_OCREDS: c_int = 0x0001; // pass credentials to receiver -pub const LOCAL_CONNWAIT: c_int = 0x0002; // connects block until accepted -pub const LOCAL_PEEREID: c_int = 0x0003; // get peer identification -pub const LOCAL_CREDS: c_int = 0x0004; // pass credentials to receiver - -// https://github.com/NetBSD/src/blob/trunk/sys/net/if.h#L373 -pub const IFF_UP: c_int = 0x0001; // interface is up -pub const IFF_BROADCAST: c_int = 0x0002; // broadcast address valid -pub const IFF_DEBUG: c_int = 0x0004; // turn on debugging -pub const IFF_LOOPBACK: c_int = 0x0008; // is a loopback net -pub const IFF_POINTOPOINT: c_int = 0x0010; // interface is point-to-point link -pub const IFF_NOTRAILERS: c_int = 0x0020; // avoid use of trailers -pub const IFF_RUNNING: c_int = 0x0040; // resources allocated -pub const IFF_NOARP: c_int = 0x0080; // no address resolution protocol -pub const IFF_PROMISC: c_int = 0x0100; // receive all packets -pub const IFF_ALLMULTI: c_int = 0x0200; // receive all multicast packets -pub const IFF_OACTIVE: c_int = 0x0400; // transmission in progress -pub const IFF_SIMPLEX: c_int = 0x0800; // can't hear own transmissions -pub const IFF_LINK0: c_int = 0x1000; // per link layer defined bit -pub const IFF_LINK1: c_int = 0x2000; // per link layer defined bit -pub const IFF_LINK2: c_int = 0x4000; // per link layer defined bit -pub const IFF_MULTICAST: c_int = 0x8000; // supports multicast - -// sys/netinet/in.h -// Protocols (RFC 1700) -// NOTE: These are in addition to the constants defined in src/unix/mod.rs - -// IPPROTO_IP defined in src/unix/mod.rs -/// Hop-by-hop option header -pub const IPPROTO_HOPOPTS: c_int = 0; -// IPPROTO_ICMP defined in src/unix/mod.rs -/// group mgmt protocol -pub const IPPROTO_IGMP: c_int = 2; -/// gateway^2 (deprecated) -pub const IPPROTO_GGP: c_int = 3; -/// for compatibility -pub const IPPROTO_IPIP: c_int = 4; -// IPPROTO_TCP defined in src/unix/mod.rs -/// exterior gateway protocol -pub const IPPROTO_EGP: c_int = 8; -/// pup -pub const IPPROTO_PUP: c_int = 12; -// IPPROTO_UDP defined in src/unix/mod.rs -/// xns idp -pub const IPPROTO_IDP: c_int = 22; -/// tp-4 w/ class negotiation -pub const IPPROTO_TP: c_int = 29; -/// DCCP -pub const IPPROTO_DCCP: c_int = 33; -// IPPROTO_IPV6 defined in src/unix/mod.rs -/// IP6 routing header -pub const IPPROTO_ROUTING: c_int = 43; -/// IP6 fragmentation header -pub const IPPROTO_FRAGMENT: c_int = 44; -/// resource reservation -pub const IPPROTO_RSVP: c_int = 46; -/// General Routing Encap. -pub const IPPROTO_GRE: c_int = 47; -/// IP6 Encap Sec. Payload -pub const IPPROTO_ESP: c_int = 50; -/// IP6 Auth Header -pub const IPPROTO_AH: c_int = 51; -/// IP Mobility RFC 2004 -pub const IPPROTO_MOBILE: c_int = 55; -/// IPv6 ICMP -pub const IPPROTO_IPV6_ICMP: c_int = 58; -// IPPROTO_ICMPV6 defined in src/unix/mod.rs -/// IP6 no next header -pub const IPPROTO_NONE: c_int = 59; -/// IP6 destination option -pub const IPPROTO_DSTOPTS: c_int = 60; -/// ISO cnlp -pub const IPPROTO_EON: c_int = 80; -/// Ethernet-in-IP -pub const IPPROTO_ETHERIP: c_int = 97; -/// encapsulation header -pub const IPPROTO_ENCAP: c_int = 98; -/// Protocol indep. multicast -pub const IPPROTO_PIM: c_int = 103; -/// IP Payload Comp. Protocol -pub const IPPROTO_IPCOMP: c_int = 108; -/// VRRP RFC 2338 -pub const IPPROTO_VRRP: c_int = 112; -/// Common Address Resolution Protocol -pub const IPPROTO_CARP: c_int = 112; -/// L2TPv3 -pub const IPPROTO_L2TP: c_int = 115; -/// SCTP -pub const IPPROTO_SCTP: c_int = 132; -/// PFSYNC -pub const IPPROTO_PFSYNC: c_int = 240; -pub const IPPROTO_MAX: c_int = 256; - -/// last return value of *_input(), meaning "all job for this pkt is done". -pub const IPPROTO_DONE: c_int = 257; - -/// sysctl placeholder for (FAST_)IPSEC -pub const CTL_IPPROTO_IPSEC: c_int = 258; - -pub const AF_OROUTE: c_int = 17; -pub const AF_ARP: c_int = 28; -pub const pseudo_AF_KEY: c_int = 29; -pub const pseudo_AF_HDRCMPLT: c_int = 30; -pub const AF_BLUETOOTH: c_int = 31; -pub const AF_IEEE80211: c_int = 32; -pub const AF_MPLS: c_int = 33; -pub const AF_ROUTE: c_int = 34; -pub const NET_RT_DUMP: c_int = 1; -pub const NET_RT_FLAGS: c_int = 2; -pub const NET_RT_OOOIFLIST: c_int = 3; -pub const NET_RT_OOIFLIST: c_int = 4; -pub const NET_RT_OIFLIST: c_int = 5; -pub const NET_RT_IFLIST: c_int = 6; -pub const NET_RT_MAXID: c_int = 7; - -pub const PF_OROUTE: c_int = AF_OROUTE; -pub const PF_ARP: c_int = AF_ARP; -pub const PF_KEY: c_int = pseudo_AF_KEY; -pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; -pub const PF_MPLS: c_int = AF_MPLS; -pub const PF_ROUTE: c_int = AF_ROUTE; - -pub const MSG_NBIO: c_int = 0x1000; -pub const MSG_WAITFORONE: c_int = 0x2000; -pub const MSG_NOTIFICATION: c_int = 0x4000; - -pub const SCM_TIMESTAMP: c_int = 0x08; -pub const SCM_CREDS: c_int = 0x10; - -pub const O_DSYNC: c_int = 0x10000; - -pub const MAP_RENAME: c_int = 0x20; -pub const MAP_NORESERVE: c_int = 0x40; -pub const MAP_HASSEMAPHORE: c_int = 0x200; -pub const MAP_TRYFIXED: c_int = 0x400; -pub const MAP_WIRED: c_int = 0x800; -pub const MAP_STACK: c_int = 0x2000; -// map alignment aliases for MAP_ALIGNED -pub const MAP_ALIGNMENT_SHIFT: c_int = 24; -pub const MAP_ALIGNMENT_MASK: c_int = 0xff << MAP_ALIGNMENT_SHIFT; -pub const MAP_ALIGNMENT_64KB: c_int = 16 << MAP_ALIGNMENT_SHIFT; -pub const MAP_ALIGNMENT_16MB: c_int = 24 << MAP_ALIGNMENT_SHIFT; -pub const MAP_ALIGNMENT_4GB: c_int = 32 << MAP_ALIGNMENT_SHIFT; -pub const MAP_ALIGNMENT_1TB: c_int = 40 << MAP_ALIGNMENT_SHIFT; -pub const MAP_ALIGNMENT_256TB: c_int = 48 << MAP_ALIGNMENT_SHIFT; -pub const MAP_ALIGNMENT_64PB: c_int = 56 << MAP_ALIGNMENT_SHIFT; -// mremap flag -pub const MAP_REMAPDUP: c_int = 0x004; - -pub const DCCP_TYPE_REQUEST: c_int = 0; -pub const DCCP_TYPE_RESPONSE: c_int = 1; -pub const DCCP_TYPE_DATA: c_int = 2; -pub const DCCP_TYPE_ACK: c_int = 3; -pub const DCCP_TYPE_DATAACK: c_int = 4; -pub const DCCP_TYPE_CLOSEREQ: c_int = 5; -pub const DCCP_TYPE_CLOSE: c_int = 6; -pub const DCCP_TYPE_RESET: c_int = 7; -pub const DCCP_TYPE_MOVE: c_int = 8; - -pub const DCCP_FEATURE_CC: c_int = 1; -pub const DCCP_FEATURE_ECN: c_int = 2; -pub const DCCP_FEATURE_ACKRATIO: c_int = 3; -pub const DCCP_FEATURE_ACKVECTOR: c_int = 4; -pub const DCCP_FEATURE_MOBILITY: c_int = 5; -pub const DCCP_FEATURE_LOSSWINDOW: c_int = 6; -pub const DCCP_FEATURE_CONN_NONCE: c_int = 8; -pub const DCCP_FEATURE_IDENTREG: c_int = 7; - -pub const DCCP_OPT_PADDING: c_int = 0; -pub const DCCP_OPT_DATA_DISCARD: c_int = 1; -pub const DCCP_OPT_SLOW_RECV: c_int = 2; -pub const DCCP_OPT_BUF_CLOSED: c_int = 3; -pub const DCCP_OPT_CHANGE_L: c_int = 32; -pub const DCCP_OPT_CONFIRM_L: c_int = 33; -pub const DCCP_OPT_CHANGE_R: c_int = 34; -pub const DCCP_OPT_CONFIRM_R: c_int = 35; -pub const DCCP_OPT_INIT_COOKIE: c_int = 36; -pub const DCCP_OPT_NDP_COUNT: c_int = 37; -pub const DCCP_OPT_ACK_VECTOR0: c_int = 38; -pub const DCCP_OPT_ACK_VECTOR1: c_int = 39; -pub const DCCP_OPT_RECV_BUF_DROPS: c_int = 40; -pub const DCCP_OPT_TIMESTAMP: c_int = 41; -pub const DCCP_OPT_TIMESTAMP_ECHO: c_int = 42; -pub const DCCP_OPT_ELAPSEDTIME: c_int = 43; -pub const DCCP_OPT_DATACHECKSUM: c_int = 44; - -pub const DCCP_REASON_UNSPEC: c_int = 0; -pub const DCCP_REASON_CLOSED: c_int = 1; -pub const DCCP_REASON_INVALID: c_int = 2; -pub const DCCP_REASON_OPTION_ERR: c_int = 3; -pub const DCCP_REASON_FEA_ERR: c_int = 4; -pub const DCCP_REASON_CONN_REF: c_int = 5; -pub const DCCP_REASON_BAD_SNAME: c_int = 6; -pub const DCCP_REASON_BAD_COOKIE: c_int = 7; -pub const DCCP_REASON_INV_MOVE: c_int = 8; -pub const DCCP_REASON_UNANSW_CH: c_int = 10; -pub const DCCP_REASON_FRUITLESS_NEG: c_int = 11; - -pub const DCCP_CCID: c_int = 1; -pub const DCCP_CSLEN: c_int = 2; -pub const DCCP_MAXSEG: c_int = 4; -pub const DCCP_SERVICE: c_int = 8; - -pub const DCCP_NDP_LIMIT: c_int = 16; -pub const DCCP_SEQ_NUM_LIMIT: c_int = 16777216; -pub const DCCP_MAX_OPTIONS: c_int = 32; -pub const DCCP_MAX_PKTS: c_int = 100; - -pub const _PC_LINK_MAX: c_int = 1; -pub const _PC_MAX_CANON: c_int = 2; -pub const _PC_MAX_INPUT: c_int = 3; -pub const _PC_NAME_MAX: c_int = 4; -pub const _PC_PATH_MAX: c_int = 5; -pub const _PC_PIPE_BUF: c_int = 6; -pub const _PC_CHOWN_RESTRICTED: c_int = 7; -pub const _PC_NO_TRUNC: c_int = 8; -pub const _PC_VDISABLE: c_int = 9; -pub const _PC_SYNC_IO: c_int = 10; -pub const _PC_FILESIZEBITS: c_int = 11; -pub const _PC_SYMLINK_MAX: c_int = 12; -pub const _PC_2_SYMLINKS: c_int = 13; -pub const _PC_ACL_EXTENDED: c_int = 14; -pub const _PC_MIN_HOLE_SIZE: c_int = 15; - -pub const _CS_PATH: c_int = 1; - -pub const _SC_SYNCHRONIZED_IO: c_int = 31; -pub const _SC_IOV_MAX: c_int = 32; -pub const _SC_MAPPED_FILES: c_int = 33; -pub const _SC_MEMLOCK: c_int = 34; -pub const _SC_MEMLOCK_RANGE: c_int = 35; -pub const _SC_MEMORY_PROTECTION: c_int = 36; -pub const _SC_LOGIN_NAME_MAX: c_int = 37; -pub const _SC_MONOTONIC_CLOCK: c_int = 38; -pub const _SC_CLK_TCK: c_int = 39; -pub const _SC_ATEXIT_MAX: c_int = 40; -pub const _SC_THREADS: c_int = 41; -pub const _SC_SEMAPHORES: c_int = 42; -pub const _SC_BARRIERS: c_int = 43; -pub const _SC_TIMERS: c_int = 44; -pub const _SC_SPIN_LOCKS: c_int = 45; -pub const _SC_READER_WRITER_LOCKS: c_int = 46; -pub const _SC_GETGR_R_SIZE_MAX: c_int = 47; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 48; -pub const _SC_CLOCK_SELECTION: c_int = 49; -pub const _SC_ASYNCHRONOUS_IO: c_int = 50; -pub const _SC_AIO_LISTIO_MAX: c_int = 51; -pub const _SC_AIO_MAX: c_int = 52; -pub const _SC_MESSAGE_PASSING: c_int = 53; -pub const _SC_MQ_OPEN_MAX: c_int = 54; -pub const _SC_MQ_PRIO_MAX: c_int = 55; -pub const _SC_PRIORITY_SCHEDULING: c_int = 56; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 57; -pub const _SC_THREAD_KEYS_MAX: c_int = 58; -pub const _SC_THREAD_STACK_MIN: c_int = 59; -pub const _SC_THREAD_THREADS_MAX: c_int = 60; -pub const _SC_THREAD_ATTR_STACKADDR: c_int = 61; -pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 62; -pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 63; -pub const _SC_THREAD_PRIO_INHERIT: c_int = 64; -pub const _SC_THREAD_PRIO_PROTECT: c_int = 65; -pub const _SC_THREAD_PROCESS_SHARED: c_int = 66; -pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 67; -pub const _SC_TTY_NAME_MAX: c_int = 68; -pub const _SC_HOST_NAME_MAX: c_int = 69; -pub const _SC_PASS_MAX: c_int = 70; -pub const _SC_REGEXP: c_int = 71; -pub const _SC_SHELL: c_int = 72; -pub const _SC_SYMLOOP_MAX: c_int = 73; -pub const _SC_V6_ILP32_OFF32: c_int = 74; -pub const _SC_V6_ILP32_OFFBIG: c_int = 75; -pub const _SC_V6_LP64_OFF64: c_int = 76; -pub const _SC_V6_LPBIG_OFFBIG: c_int = 77; -pub const _SC_2_PBS: c_int = 80; -pub const _SC_2_PBS_ACCOUNTING: c_int = 81; -pub const _SC_2_PBS_CHECKPOINT: c_int = 82; -pub const _SC_2_PBS_LOCATE: c_int = 83; -pub const _SC_2_PBS_MESSAGE: c_int = 84; -pub const _SC_2_PBS_TRACK: c_int = 85; -pub const _SC_SPAWN: c_int = 86; -pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 87; -pub const _SC_TIMER_MAX: c_int = 88; -pub const _SC_SEM_NSEMS_MAX: c_int = 89; -pub const _SC_CPUTIME: c_int = 90; -pub const _SC_THREAD_CPUTIME: c_int = 91; -pub const _SC_DELAYTIMER_MAX: c_int = 92; -// These two variables will be supported in NetBSD 8.0 -// pub const _SC_SIGQUEUE_MAX : c_int = 93; -// pub const _SC_REALTIME_SIGNALS : c_int = 94; -pub const _SC_PHYS_PAGES: c_int = 121; -pub const _SC_NPROCESSORS_CONF: c_int = 1001; -pub const _SC_NPROCESSORS_ONLN: c_int = 1002; -pub const _SC_SCHED_RT_TS: c_int = 2001; -pub const _SC_SCHED_PRI_MIN: c_int = 2002; -pub const _SC_SCHED_PRI_MAX: c_int = 2003; - -pub const FD_SETSIZE: usize = 0x100; - -pub const ST_NOSUID: c_ulong = 8; - -pub const BIOCGRSIG: c_ulong = 0x40044272; -pub const BIOCSRSIG: c_ulong = 0x80044273; -pub const BIOCSDLT: c_ulong = 0x80044278; -pub const BIOCGSEESENT: c_ulong = 0x40044276; -pub const BIOCSSEESENT: c_ulong = 0x80044277; - -// -pub const MNT_UNION: c_int = 0x00000020; -pub const MNT_NOCOREDUMP: c_int = 0x00008000; -pub const MNT_RELATIME: c_int = 0x00020000; -pub const MNT_IGNORE: c_int = 0x00100000; -pub const MNT_NFS4ACLS: c_int = 0x00200000; -pub const MNT_DISCARD: c_int = 0x00800000; -pub const MNT_EXTATTR: c_int = 0x01000000; -pub const MNT_LOG: c_int = 0x02000000; -pub const MNT_NOATIME: c_int = 0x04000000; -pub const MNT_AUTOMOUNTED: c_int = 0x10000000; -pub const MNT_SYMPERM: c_int = 0x20000000; -pub const MNT_NODEVMTIME: c_int = 0x40000000; -pub const MNT_SOFTDEP: c_int = 0x80000000; -pub const MNT_POSIX1EACLS: c_int = 0x00000800; -pub const MNT_ACLS: c_int = MNT_POSIX1EACLS; -pub const MNT_WAIT: c_int = 1; -pub const MNT_NOWAIT: c_int = 2; -pub const MNT_LAZY: c_int = 3; - -// sys/ioccom.h -pub const IOCPARM_SHIFT: u32 = 16; -pub const IOCGROUP_SHIFT: u32 = 8; - -pub const fn IOCPARM_LEN(x: u32) -> u32 { - (x >> IOCPARM_SHIFT) & crate::IOCPARM_MASK -} - -pub const fn IOCBASECMD(x: u32) -> u32 { - x & (!(crate::IOCPARM_MASK << IOCPARM_SHIFT)) -} - -pub const fn IOCGROUP(x: u32) -> u32 { - (x >> IOCGROUP_SHIFT) & 0xff -} - -pub const fn _IOC(inout: c_ulong, group: c_ulong, num: c_ulong, len: c_ulong) -> c_ulong { - (inout) - | (((len) & crate::IOCPARM_MASK as c_ulong) << IOCPARM_SHIFT) - | ((group) << IOCGROUP_SHIFT) - | (num) -} - -// -pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 2; -pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 4; -pub const NTP_API: c_int = 4; -pub const MAXPHASE: c_long = 500000000; -pub const MAXFREQ: c_long = 500000; -pub const MINSEC: c_int = 256; -pub const MAXSEC: c_int = 2048; -pub const NANOSECOND: c_long = 1000000000; -pub const SCALE_PPM: c_int = 65; -pub const MAXTC: c_int = 10; -pub const MOD_OFFSET: c_uint = 0x0001; -pub const MOD_FREQUENCY: c_uint = 0x0002; -pub const MOD_MAXERROR: c_uint = 0x0004; -pub const MOD_ESTERROR: c_uint = 0x0008; -pub const MOD_STATUS: c_uint = 0x0010; -pub const MOD_TIMECONST: c_uint = 0x0020; -pub const MOD_PPSMAX: c_uint = 0x0040; -pub const MOD_TAI: c_uint = 0x0080; -pub const MOD_MICRO: c_uint = 0x1000; -pub const MOD_NANO: c_uint = 0x2000; -pub const MOD_CLKB: c_uint = 0x4000; -pub const MOD_CLKA: c_uint = 0x8000; -pub const STA_PLL: c_int = 0x0001; -pub const STA_PPSFREQ: c_int = 0x0002; -pub const STA_PPSTIME: c_int = 0x0004; -pub const STA_FLL: c_int = 0x0008; -pub const STA_INS: c_int = 0x0010; -pub const STA_DEL: c_int = 0x0020; -pub const STA_UNSYNC: c_int = 0x0040; -pub const STA_FREQHOLD: c_int = 0x0080; -pub const STA_PPSSIGNAL: c_int = 0x0100; -pub const STA_PPSJITTER: c_int = 0x0200; -pub const STA_PPSWANDER: c_int = 0x0400; -pub const STA_PPSERROR: c_int = 0x0800; -pub const STA_CLOCKERR: c_int = 0x1000; -pub const STA_NANO: c_int = 0x2000; -pub const STA_MODE: c_int = 0x4000; -pub const STA_CLK: c_int = 0x8000; -pub const STA_RONLY: c_int = STA_PPSSIGNAL - | STA_PPSJITTER - | STA_PPSWANDER - | STA_PPSERROR - | STA_CLOCKERR - | STA_NANO - | STA_MODE - | STA_CLK; -pub const TIME_OK: c_int = 0; -pub const TIME_INS: c_int = 1; -pub const TIME_DEL: c_int = 2; -pub const TIME_OOP: c_int = 3; -pub const TIME_WAIT: c_int = 4; -pub const TIME_ERROR: c_int = 5; - -pub const LITTLE_ENDIAN: c_int = 1234; -pub const BIG_ENDIAN: c_int = 4321; - -pub const PL_EVENT_NONE: c_int = 0; -pub const PL_EVENT_SIGNAL: c_int = 1; -pub const PL_EVENT_SUSPENDED: c_int = 2; - -cfg_if! { - if #[cfg(any( - target_arch = "sparc", - target_arch = "sparc64", - target_arch = "x86", - target_arch = "x86_64" - ))] { - pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - ptm_magic: 0x33330003, - ptm_errorcheck: 0, - ptm_pad1: [0; 3], - ptm_unused: 0, - ptm_pad2: [0; 3], - ptm_waiters: 0 as *mut _, - ptm_owner: 0, - ptm_recursed: 0, - ptm_spare2: 0 as *mut _, - }; - } else { - pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - ptm_magic: 0x33330003, - ptm_errorcheck: 0, - ptm_unused: 0, - ptm_waiters: 0 as *mut _, - ptm_owner: 0, - ptm_recursed: 0, - ptm_spare2: 0 as *mut _, - }; - } -} - -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - ptc_magic: 0x55550005, - ptc_lock: 0, - ptc_waiters_first: 0 as *mut _, - ptc_waiters_last: 0 as *mut _, - ptc_mutex: 0 as *mut _, - ptc_private: 0 as *mut _, -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - ptr_magic: 0x99990009, - ptr_interlock: 0, - ptr_rblocked_first: 0 as *mut _, - ptr_rblocked_last: 0 as *mut _, - ptr_wblocked_first: 0 as *mut _, - ptr_wblocked_last: 0 as *mut _, - ptr_nreaders: 0, - ptr_owner: 0, - ptr_private: 0 as *mut _, -}; -pub const PTHREAD_MUTEX_NORMAL: c_int = 0; -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 1; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 2; -pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; - -pub const SCHED_NONE: c_int = -1; -pub const SCHED_OTHER: c_int = 0; -pub const SCHED_FIFO: c_int = 1; -pub const SCHED_RR: c_int = 2; - -pub const EVFILT_AIO: u32 = 2; -pub const EVFILT_PROC: u32 = 4; -pub const EVFILT_READ: u32 = 0; -pub const EVFILT_SIGNAL: u32 = 5; -pub const EVFILT_TIMER: u32 = 6; -pub const EVFILT_VNODE: u32 = 3; -pub const EVFILT_WRITE: u32 = 1; -pub const EVFILT_FS: u32 = 7; -pub const EVFILT_USER: u32 = 8; -pub const EVFILT_EMPTY: u32 = 9; - -pub const EV_ADD: u32 = 0x1; -pub const EV_DELETE: u32 = 0x2; -pub const EV_ENABLE: u32 = 0x4; -pub const EV_DISABLE: u32 = 0x8; -pub const EV_ONESHOT: u32 = 0x10; -pub const EV_CLEAR: u32 = 0x20; -pub const EV_RECEIPT: u32 = 0x40; -pub const EV_DISPATCH: u32 = 0x80; -pub const EV_FLAG1: u32 = 0x2000; -pub const EV_ERROR: u32 = 0x4000; -pub const EV_EOF: u32 = 0x8000; -pub const EV_SYSFLAGS: u32 = 0xf000; - -pub const NOTE_TRIGGER: u32 = 0x01000000; -pub const NOTE_FFNOP: u32 = 0x00000000; -pub const NOTE_FFAND: u32 = 0x40000000; -pub const NOTE_FFOR: u32 = 0x80000000; -pub const NOTE_FFCOPY: u32 = 0xc0000000; -pub const NOTE_FFCTRLMASK: u32 = 0xc0000000; -pub const NOTE_FFLAGSMASK: u32 = 0x00ffffff; -pub const NOTE_LOWAT: u32 = 0x00000001; -pub const NOTE_DELETE: u32 = 0x00000001; -pub const NOTE_WRITE: u32 = 0x00000002; -pub const NOTE_EXTEND: u32 = 0x00000004; -pub const NOTE_ATTRIB: u32 = 0x00000008; -pub const NOTE_LINK: u32 = 0x00000010; -pub const NOTE_RENAME: u32 = 0x00000020; -pub const NOTE_REVOKE: u32 = 0x00000040; -pub const NOTE_EXIT: u32 = 0x80000000; -pub const NOTE_FORK: u32 = 0x40000000; -pub const NOTE_EXEC: u32 = 0x20000000; -pub const NOTE_PDATAMASK: u32 = 0x000fffff; -pub const NOTE_PCTRLMASK: u32 = 0xf0000000; -pub const NOTE_TRACK: u32 = 0x00000001; -pub const NOTE_TRACKERR: u32 = 0x00000002; -pub const NOTE_CHILD: u32 = 0x00000004; -pub const NOTE_MSECONDS: u32 = 0x00000000; -pub const NOTE_SECONDS: u32 = 0x00000001; -pub const NOTE_USECONDS: u32 = 0x00000002; -pub const NOTE_NSECONDS: u32 = 0x00000003; -pub const NOTE_ABSTIME: u32 = 0x000000010; - -pub const TMP_MAX: c_uint = 308915776; - -pub const AI_PASSIVE: c_int = 0x00000001; -pub const AI_CANONNAME: c_int = 0x00000002; -pub const AI_NUMERICHOST: c_int = 0x00000004; -pub const AI_NUMERICSERV: c_int = 0x00000008; -pub const AI_ADDRCONFIG: c_int = 0x00000400; -pub const AI_SRV: c_int = 0x00000800; - -pub const NI_MAXHOST: crate::socklen_t = 1025; -pub const NI_MAXSERV: crate::socklen_t = 32; - -pub const NI_NOFQDN: c_int = 0x00000001; -pub const NI_NUMERICHOST: c_int = 0x000000002; -pub const NI_NAMEREQD: c_int = 0x000000004; -pub const NI_NUMERICSERV: c_int = 0x000000008; -pub const NI_DGRAM: c_int = 0x00000010; -pub const NI_WITHSCOPEID: c_int = 0x00000020; -pub const NI_NUMERICSCOPE: c_int = 0x00000040; - -pub const RTLD_NOLOAD: c_int = 0x2000; -pub const RTLD_LOCAL: c_int = 0x200; - -pub const CTL_MAXNAME: c_int = 12; -pub const SYSCTL_NAMELEN: c_int = 32; -pub const SYSCTL_DEFSIZE: c_int = 8; -pub const CTLTYPE_NODE: c_int = 1; -pub const CTLTYPE_INT: c_int = 2; -pub const CTLTYPE_STRING: c_int = 3; -pub const CTLTYPE_QUAD: c_int = 4; -pub const CTLTYPE_STRUCT: c_int = 5; -pub const CTLTYPE_BOOL: c_int = 6; -pub const CTLFLAG_READONLY: c_int = 0x00000000; -pub const CTLFLAG_READWRITE: c_int = 0x00000070; -pub const CTLFLAG_ANYWRITE: c_int = 0x00000080; -pub const CTLFLAG_PRIVATE: c_int = 0x00000100; -pub const CTLFLAG_PERMANENT: c_int = 0x00000200; -pub const CTLFLAG_OWNDATA: c_int = 0x00000400; -pub const CTLFLAG_IMMEDIATE: c_int = 0x00000800; -pub const CTLFLAG_HEX: c_int = 0x00001000; -pub const CTLFLAG_ROOT: c_int = 0x00002000; -pub const CTLFLAG_ANYNUMBER: c_int = 0x00004000; -pub const CTLFLAG_HIDDEN: c_int = 0x00008000; -pub const CTLFLAG_ALIAS: c_int = 0x00010000; -pub const CTLFLAG_MMAP: c_int = 0x00020000; -pub const CTLFLAG_OWNDESC: c_int = 0x00040000; -pub const CTLFLAG_UNSIGNED: c_int = 0x00080000; -pub const SYSCTL_VERS_MASK: c_int = 0xff000000; -pub const SYSCTL_VERS_0: c_int = 0x00000000; -pub const SYSCTL_VERS_1: c_int = 0x01000000; -pub const SYSCTL_VERSION: c_int = SYSCTL_VERS_1; -pub const CTL_EOL: c_int = -1; -pub const CTL_QUERY: c_int = -2; -pub const CTL_CREATE: c_int = -3; -pub const CTL_CREATESYM: c_int = -4; -pub const CTL_DESTROY: c_int = -5; -pub const CTL_MMAP: c_int = -6; -pub const CTL_DESCRIBE: c_int = -7; -pub const CTL_UNSPEC: c_int = 0; -pub const CTL_KERN: c_int = 1; -pub const CTL_VM: c_int = 2; -pub const CTL_VFS: c_int = 3; -pub const CTL_NET: c_int = 4; -pub const CTL_DEBUG: c_int = 5; -pub const CTL_HW: c_int = 6; -pub const CTL_MACHDEP: c_int = 7; -pub const CTL_USER: c_int = 8; -pub const CTL_DDB: c_int = 9; -pub const CTL_PROC: c_int = 10; -pub const CTL_VENDOR: c_int = 11; -pub const CTL_EMUL: c_int = 12; -pub const CTL_SECURITY: c_int = 13; -pub const CTL_MAXID: c_int = 14; -pub const KERN_OSTYPE: c_int = 1; -pub const KERN_OSRELEASE: c_int = 2; -pub const KERN_OSREV: c_int = 3; -pub const KERN_VERSION: c_int = 4; -pub const KERN_MAXVNODES: c_int = 5; -pub const KERN_MAXPROC: c_int = 6; -pub const KERN_MAXFILES: c_int = 7; -pub const KERN_ARGMAX: c_int = 8; -pub const KERN_SECURELVL: c_int = 9; -pub const KERN_HOSTNAME: c_int = 10; -pub const KERN_HOSTID: c_int = 11; -pub const KERN_CLOCKRATE: c_int = 12; -pub const KERN_VNODE: c_int = 13; -pub const KERN_PROC: c_int = 14; -pub const KERN_FILE: c_int = 15; -pub const KERN_PROF: c_int = 16; -pub const KERN_POSIX1: c_int = 17; -pub const KERN_NGROUPS: c_int = 18; -pub const KERN_JOB_CONTROL: c_int = 19; -pub const KERN_SAVED_IDS: c_int = 20; -pub const KERN_OBOOTTIME: c_int = 21; -pub const KERN_DOMAINNAME: c_int = 22; -pub const KERN_MAXPARTITIONS: c_int = 23; -pub const KERN_RAWPARTITION: c_int = 24; -pub const KERN_NTPTIME: c_int = 25; -pub const KERN_TIMEX: c_int = 26; -pub const KERN_AUTONICETIME: c_int = 27; -pub const KERN_AUTONICEVAL: c_int = 28; -pub const KERN_RTC_OFFSET: c_int = 29; -pub const KERN_ROOT_DEVICE: c_int = 30; -pub const KERN_MSGBUFSIZE: c_int = 31; -pub const KERN_FSYNC: c_int = 32; -pub const KERN_OLDSYSVMSG: c_int = 33; -pub const KERN_OLDSYSVSEM: c_int = 34; -pub const KERN_OLDSYSVSHM: c_int = 35; -pub const KERN_OLDSHORTCORENAME: c_int = 36; -pub const KERN_SYNCHRONIZED_IO: c_int = 37; -pub const KERN_IOV_MAX: c_int = 38; -pub const KERN_MBUF: c_int = 39; -pub const KERN_MAPPED_FILES: c_int = 40; -pub const KERN_MEMLOCK: c_int = 41; -pub const KERN_MEMLOCK_RANGE: c_int = 42; -pub const KERN_MEMORY_PROTECTION: c_int = 43; -pub const KERN_LOGIN_NAME_MAX: c_int = 44; -pub const KERN_DEFCORENAME: c_int = 45; -pub const KERN_LOGSIGEXIT: c_int = 46; -pub const KERN_PROC2: c_int = 47; -pub const KERN_PROC_ARGS: c_int = 48; -pub const KERN_FSCALE: c_int = 49; -pub const KERN_CCPU: c_int = 50; -pub const KERN_CP_TIME: c_int = 51; -pub const KERN_OLDSYSVIPC_INFO: c_int = 52; -pub const KERN_MSGBUF: c_int = 53; -pub const KERN_CONSDEV: c_int = 54; -pub const KERN_MAXPTYS: c_int = 55; -pub const KERN_PIPE: c_int = 56; -pub const KERN_MAXPHYS: c_int = 57; -pub const KERN_SBMAX: c_int = 58; -pub const KERN_TKSTAT: c_int = 59; -pub const KERN_MONOTONIC_CLOCK: c_int = 60; -pub const KERN_URND: c_int = 61; -pub const KERN_LABELSECTOR: c_int = 62; -pub const KERN_LABELOFFSET: c_int = 63; -pub const KERN_LWP: c_int = 64; -pub const KERN_FORKFSLEEP: c_int = 65; -pub const KERN_POSIX_THREADS: c_int = 66; -pub const KERN_POSIX_SEMAPHORES: c_int = 67; -pub const KERN_POSIX_BARRIERS: c_int = 68; -pub const KERN_POSIX_TIMERS: c_int = 69; -pub const KERN_POSIX_SPIN_LOCKS: c_int = 70; -pub const KERN_POSIX_READER_WRITER_LOCKS: c_int = 71; -pub const KERN_DUMP_ON_PANIC: c_int = 72; -pub const KERN_SOMAXKVA: c_int = 73; -pub const KERN_ROOT_PARTITION: c_int = 74; -pub const KERN_DRIVERS: c_int = 75; -pub const KERN_BUF: c_int = 76; -pub const KERN_FILE2: c_int = 77; -pub const KERN_VERIEXEC: c_int = 78; -pub const KERN_CP_ID: c_int = 79; -pub const KERN_HARDCLOCK_TICKS: c_int = 80; -pub const KERN_ARND: c_int = 81; -pub const KERN_SYSVIPC: c_int = 82; -pub const KERN_BOOTTIME: c_int = 83; -pub const KERN_EVCNT: c_int = 84; -pub const KERN_MAXID: c_int = 85; -pub const KERN_PROC_ALL: c_int = 0; -pub const KERN_PROC_PID: c_int = 1; -pub const KERN_PROC_PGRP: c_int = 2; -pub const KERN_PROC_SESSION: c_int = 3; -pub const KERN_PROC_TTY: c_int = 4; -pub const KERN_PROC_UID: c_int = 5; -pub const KERN_PROC_RUID: c_int = 6; -pub const KERN_PROC_GID: c_int = 7; -pub const KERN_PROC_RGID: c_int = 8; -pub const KERN_PROC_ARGV: c_int = 1; -pub const KERN_PROC_NARGV: c_int = 2; -pub const KERN_PROC_ENV: c_int = 3; -pub const KERN_PROC_NENV: c_int = 4; -pub const KERN_PROC_PATHNAME: c_int = 5; -pub const VM_PROC: c_int = 16; -pub const VM_PROC_MAP: c_int = 1; - -pub const EAI_AGAIN: c_int = 2; -pub const EAI_BADFLAGS: c_int = 3; -pub const EAI_FAIL: c_int = 4; -pub const EAI_FAMILY: c_int = 5; -pub const EAI_MEMORY: c_int = 6; -pub const EAI_NODATA: c_int = 7; -pub const EAI_NONAME: c_int = 8; -pub const EAI_SERVICE: c_int = 9; -pub const EAI_SOCKTYPE: c_int = 10; -pub const EAI_SYSTEM: c_int = 11; -pub const EAI_OVERFLOW: c_int = 14; - -pub const AIO_CANCELED: c_int = 1; -pub const AIO_NOTCANCELED: c_int = 2; -pub const AIO_ALLDONE: c_int = 3; -pub const LIO_NOP: c_int = 0; -pub const LIO_WRITE: c_int = 1; -pub const LIO_READ: c_int = 2; -pub const LIO_WAIT: c_int = 1; -pub const LIO_NOWAIT: c_int = 0; - -pub const SIGEV_NONE: c_int = 0; -pub const SIGEV_SIGNAL: c_int = 1; -pub const SIGEV_THREAD: c_int = 2; - -pub const WSTOPPED: c_int = 0x00000002; // same as WUNTRACED -pub const WCONTINUED: c_int = 0x00000010; -pub const WEXITED: c_int = 0x000000020; -pub const WNOWAIT: c_int = 0x00010000; - -pub const WALTSIG: c_int = 0x00000004; -pub const WALLSIG: c_int = 0x00000008; -pub const WTRAPPED: c_int = 0x00000040; -pub const WNOZOMBIE: c_int = 0x00020000; - -pub const P_ALL: idtype_t = 0; -pub const P_PID: idtype_t = 1; -pub const P_PGID: idtype_t = 4; - -pub const UTIME_OMIT: c_long = 1073741822; -pub const UTIME_NOW: c_long = 1073741823; - -pub const B460800: crate::speed_t = 460800; -pub const B921600: crate::speed_t = 921600; - -pub const ONOCR: crate::tcflag_t = 0x20; -pub const ONLRET: crate::tcflag_t = 0x40; -pub const CDTRCTS: crate::tcflag_t = 0x00020000; -pub const CHWFLOW: crate::tcflag_t = crate::MDMBUF | crate::CRTSCTS | crate::CDTRCTS; - -// pub const _PATH_UTMPX: &[c_char; 14] = b"/var/run/utmpx"; -// pub const _PATH_WTMPX: &[c_char; 14] = b"/var/log/wtmpx"; -// pub const _PATH_LASTLOGX: &[c_char; 17] = b"/var/log/lastlogx"; -// pub const _PATH_UTMP_UPDATE: &[c_char; 24] = b"/usr/libexec/utmp_update"; -pub const UT_NAMESIZE: usize = 8; -pub const UT_LINESIZE: usize = 8; -pub const UT_HOSTSIZE: usize = 16; -pub const _UTX_USERSIZE: usize = 32; -pub const _UTX_LINESIZE: usize = 32; -pub const _UTX_PADSIZE: usize = 40; -pub const _UTX_IDSIZE: usize = 4; -pub const _UTX_HOSTSIZE: usize = 256; -pub const EMPTY: u16 = 0; -pub const RUN_LVL: u16 = 1; -pub const BOOT_TIME: u16 = 2; -pub const OLD_TIME: u16 = 3; -pub const NEW_TIME: u16 = 4; -pub const INIT_PROCESS: u16 = 5; -pub const LOGIN_PROCESS: u16 = 6; -pub const USER_PROCESS: u16 = 7; -pub const DEAD_PROCESS: u16 = 8; -pub const ACCOUNTING: u16 = 9; -pub const SIGNATURE: u16 = 10; -pub const DOWN_TIME: u16 = 11; - -pub const SOCK_CLOEXEC: c_int = 0x10000000; -pub const SOCK_NONBLOCK: c_int = 0x20000000; - -// Uncomment on next NetBSD release -// pub const FIOSEEKDATA: c_ulong = 0xc0086661; -// pub const FIOSEEKHOLE: c_ulong = 0xc0086662; -pub const OFIOGETBMAP: c_ulong = 0xc004667a; -pub const FIOGETBMAP: c_ulong = 0xc008667a; -pub const FIONWRITE: c_ulong = 0x40046679; -pub const FIONSPACE: c_ulong = 0x40046678; -pub const FIBMAP: c_ulong = 0xc008667a; - -pub const SIGSTKSZ: size_t = 40960; - -pub const REG_ENOSYS: c_int = 17; - -pub const PT_DUMPCORE: c_int = 12; -pub const PT_LWPINFO: c_int = 13; -pub const PT_SYSCALL: c_int = 14; -pub const PT_SYSCALLEMU: c_int = 15; -pub const PT_SET_EVENT_MASK: c_int = 16; -pub const PT_GET_EVENT_MASK: c_int = 17; -pub const PT_GET_PROCESS_STATE: c_int = 18; -pub const PT_SET_SIGINFO: c_int = 19; -pub const PT_GET_SIGINFO: c_int = 20; -pub const PT_RESUME: c_int = 21; -pub const PT_SUSPEND: c_int = 23; -pub const PT_STOP: c_int = 23; -pub const PT_LWPSTATUS: c_int = 24; -pub const PT_LWPNEXT: c_int = 25; -pub const PT_SET_SIGPASS: c_int = 26; -pub const PT_GET_SIGPASS: c_int = 27; -pub const PT_FIRSTMACH: c_int = 32; -pub const POSIX_SPAWN_RETURNERROR: c_int = 0x40; - -// Flags for chflags(2) -pub const SF_APPEND: c_ulong = 0x00040000; -pub const SF_ARCHIVED: c_ulong = 0x00010000; -pub const SF_IMMUTABLE: c_ulong = 0x00020000; -pub const SF_LOG: c_ulong = 0x00400000; -pub const SF_SETTABLE: c_ulong = 0xffff0000; -pub const SF_SNAPINVAL: c_ulong = 0x00800000; -pub const SF_SNAPSHOT: c_ulong = 0x00200000; -pub const UF_APPEND: c_ulong = 0x00000004; -pub const UF_IMMUTABLE: c_ulong = 0x00000002; -pub const UF_NODUMP: c_ulong = 0x00000001; -pub const UF_OPAQUE: c_ulong = 0x00000008; -pub const UF_SETTABLE: c_ulong = 0x0000ffff; - -// sys/sysctl.h -pub const KVME_PROT_READ: c_int = 0x00000001; -pub const KVME_PROT_WRITE: c_int = 0x00000002; -pub const KVME_PROT_EXEC: c_int = 0x00000004; - -pub const KVME_FLAG_COW: c_int = 0x00000001; -pub const KVME_FLAG_NEEDS_COPY: c_int = 0x00000002; -pub const KVME_FLAG_NOCOREDUMP: c_int = 0x000000004; -pub const KVME_FLAG_PAGEABLE: c_int = 0x000000008; -pub const KVME_FLAG_GROWS_UP: c_int = 0x000000010; -pub const KVME_FLAG_GROWS_DOWN: c_int = 0x000000020; - -pub const NGROUPS_MAX: c_int = 16; - -pub const KI_NGROUPS: c_int = 16; -pub const KI_MAXCOMLEN: c_int = 24; -pub const KI_WMESGLEN: c_int = 8; -pub const KI_MAXLOGNAME: c_int = 24; -pub const KI_MAXEMULLEN: c_int = 16; -pub const KI_LNAMELEN: c_int = 20; - -// sys/lwp.h -pub const LSIDL: c_int = 1; -pub const LSRUN: c_int = 2; -pub const LSSLEEP: c_int = 3; -pub const LSSTOP: c_int = 4; -pub const LSZOMB: c_int = 5; -pub const LSONPROC: c_int = 7; -pub const LSSUSPENDED: c_int = 8; - -// sys/xattr.h -pub const XATTR_CREATE: c_int = 0x01; -pub const XATTR_REPLACE: c_int = 0x02; -// sys/extattr.h -pub const EXTATTR_NAMESPACE_EMPTY: c_int = 0; - -// For getrandom() -pub const GRND_NONBLOCK: c_uint = 0x1; -pub const GRND_RANDOM: c_uint = 0x2; -pub const GRND_INSECURE: c_uint = 0x4; - -// sys/reboot.h -pub const RB_ASKNAME: c_int = 0x000000001; -pub const RB_SINGLE: c_int = 0x000000002; -pub const RB_NOSYNC: c_int = 0x000000004; -pub const RB_HALT: c_int = 0x000000008; -pub const RB_INITNAME: c_int = 0x000000010; -pub const RB_KDB: c_int = 0x000000040; -pub const RB_RDONLY: c_int = 0x000000080; -pub const RB_DUMP: c_int = 0x000000100; -pub const RB_MINIROOT: c_int = 0x000000200; -pub const RB_STRING: c_int = 0x000000400; -pub const RB_POWERDOWN: c_int = RB_HALT | 0x000000800; -pub const RB_USERCONF: c_int = 0x000001000; - -pub const fn MAP_ALIGNED(alignment: c_int) -> c_int { - alignment << MAP_ALIGNMENT_SHIFT -} - -// net/route.h -pub const RTF_MASK: c_int = 0x80; -pub const RTF_CONNECTED: c_int = 0x100; -pub const RTF_ANNOUNCE: c_int = 0x20000; -pub const RTF_SRC: c_int = 0x10000; -pub const RTF_LOCAL: c_int = 0x40000; -pub const RTF_BROADCAST: c_int = 0x80000; -pub const RTF_UPDATING: c_int = 0x100000; -pub const RTF_DONTCHANGEIFA: c_int = 0x200000; - -pub const RTM_VERSION: c_int = 4; -pub const RTM_LOCK: c_int = 0x8; -pub const RTM_IFANNOUNCE: c_int = 0x10; -pub const RTM_IEEE80211: c_int = 0x11; -pub const RTM_SETGATE: c_int = 0x12; -pub const RTM_LLINFO_UPD: c_int = 0x13; -pub const RTM_IFINFO: c_int = 0x14; -pub const RTM_OCHGADDR: c_int = 0x15; -pub const RTM_NEWADDR: c_int = 0x16; -pub const RTM_DELADDR: c_int = 0x17; -pub const RTM_CHGADDR: c_int = 0x18; - -pub const RTA_TAG: c_int = 0x100; - -pub const RTAX_TAG: c_int = 8; -pub const RTAX_MAX: c_int = 9; - -// sys/timerfd.h -pub const TFD_CLOEXEC: i32 = crate::O_CLOEXEC; -pub const TFD_NONBLOCK: i32 = crate::O_NONBLOCK; -pub const TFD_TIMER_ABSTIME: i32 = crate::O_WRONLY; -pub const TFD_TIMER_CANCEL_ON_SET: i32 = crate::O_RDWR; - -const fn _ALIGN(p: usize) -> usize { - (p + _ALIGNBYTES) & !_ALIGNBYTES -} - -f! { - pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { - (cmsg as *mut c_uchar).add(_ALIGN(size_of::())) - } - - pub const fn CMSG_LEN(length: c_uint) -> c_uint { - _ALIGN(size_of::()) as c_uint + length - } - - pub fn CMSG_NXTHDR(mhdr: *const crate::msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - if cmsg.is_null() { - return crate::CMSG_FIRSTHDR(mhdr); - } - let next = cmsg as usize + _ALIGN((*cmsg).cmsg_len as usize) + _ALIGN(size_of::()); - let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; - if next > max { - core::ptr::null_mut::() - } else { - (cmsg as usize + _ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr - } - } - - pub const fn CMSG_SPACE(length: c_uint) -> c_uint { - (_ALIGN(size_of::()) + _ALIGN(length as usize)) as c_uint - } - - // dirfd() is a macro on netbsd to access - // the first field of the struct where dirp points to: - // http://cvsweb.netbsd.org/bsdweb.cgi/src/include/dirent.h?rev=1.36 - pub fn dirfd(dirp: *mut crate::DIR) -> c_int { - *(dirp as *const c_int) - } - - pub fn SOCKCREDSIZE(ngrps: usize) -> usize { - let ngrps = if ngrps > 0 { ngrps - 1 } else { 0 }; - size_of::() + size_of::() * ngrps - } - - pub fn PROT_MPROTECT(x: c_int) -> c_int { - x << 3 - } - - pub fn PROT_MPROTECT_EXTRACT(x: c_int) -> c_int { - (x >> 3) & 0x7 - } -} - -safe_f! { - pub const fn WSTOPSIG(status: c_int) -> c_int { - status >> 8 - } - - pub const fn WIFSIGNALED(status: c_int) -> bool { - (status & 0o177) != 0o177 && (status & 0o177) != 0 - } - - pub const fn WIFSTOPPED(status: c_int) -> bool { - (status & 0o177) == 0o177 - } - - pub const fn WIFCONTINUED(status: c_int) -> bool { - status == 0xffff - } - - pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { - let major = major as crate::dev_t; - let minor = minor as crate::dev_t; - let mut dev = 0; - dev |= (major << 8) & 0x000ff00; - dev |= (minor << 12) & 0xfff00000; - dev |= minor & 0xff; - dev - } - - pub const fn major(dev: crate::dev_t) -> c_int { - (((dev as u32) & 0x000fff00) >> 8) as c_int - } - - pub const fn minor(dev: crate::dev_t) -> c_int { - let mut res = 0; - res |= ((dev as u32) & 0xfff00000) >> 12; - res |= (dev as u32) & 0x000000ff; - res as c_int - } -} - -extern "C" { - pub fn ntp_adjtime(buf: *mut timex) -> c_int; - pub fn ntp_gettime(buf: *mut ntptimeval) -> c_int; - pub fn clock_nanosleep( - clk_id: crate::clockid_t, - flags: c_int, - rqtp: *const crate::timespec, - rmtp: *mut crate::timespec, - ) -> c_int; - - pub fn reallocarr(ptr: *mut c_void, number: size_t, size: size_t) -> c_int; - - pub fn chflags(path: *const c_char, flags: c_ulong) -> c_int; - pub fn fchflags(fd: c_int, flags: c_ulong) -> c_int; - pub fn lchflags(path: *const c_char, flags: c_ulong) -> c_int; - - pub fn extattr_list_fd( - fd: c_int, - attrnamespace: c_int, - data: *mut c_void, - nbytes: size_t, - ) -> ssize_t; - pub fn extattr_list_file( - path: *const c_char, - attrnamespace: c_int, - data: *mut c_void, - nbytes: size_t, - ) -> ssize_t; - pub fn extattr_list_link( - path: *const c_char, - attrnamespace: c_int, - data: *mut c_void, - nbytes: size_t, - ) -> ssize_t; - pub fn extattr_delete_fd(fd: c_int, attrnamespace: c_int, attrname: *const c_char) -> c_int; - pub fn extattr_delete_file( - path: *const c_char, - attrnamespace: c_int, - attrname: *const c_char, - ) -> c_int; - pub fn extattr_delete_link( - path: *const c_char, - attrnamespace: c_int, - attrname: *const c_char, - ) -> c_int; - pub fn extattr_get_fd( - fd: c_int, - attrnamespace: c_int, - attrname: *const c_char, - data: *mut c_void, - nbytes: size_t, - ) -> ssize_t; - pub fn extattr_get_file( - path: *const c_char, - attrnamespace: c_int, - attrname: *const c_char, - data: *mut c_void, - nbytes: size_t, - ) -> ssize_t; - pub fn extattr_get_link( - path: *const c_char, - attrnamespace: c_int, - attrname: *const c_char, - data: *mut c_void, - nbytes: size_t, - ) -> ssize_t; - pub fn extattr_namespace_to_string(attrnamespace: c_int, string: *mut *mut c_char) -> c_int; - pub fn extattr_set_fd( - fd: c_int, - attrnamespace: c_int, - attrname: *const c_char, - data: *const c_void, - nbytes: size_t, - ) -> c_int; - pub fn extattr_set_file( - path: *const c_char, - attrnamespace: c_int, - attrname: *const c_char, - data: *const c_void, - nbytes: size_t, - ) -> c_int; - pub fn extattr_set_link( - path: *const c_char, - attrnamespace: c_int, - attrname: *const c_char, - data: *const c_void, - nbytes: size_t, - ) -> c_int; - pub fn extattr_string_to_namespace(string: *const c_char, attrnamespace: *mut c_int) -> c_int; - - pub fn openpty( - amaster: *mut c_int, - aslave: *mut c_int, - name: *mut c_char, - termp: *mut crate::termios, - winp: *mut crate::winsize, - ) -> c_int; - pub fn forkpty( - amaster: *mut c_int, - name: *mut c_char, - termp: *mut crate::termios, - winp: *mut crate::winsize, - ) -> crate::pid_t; - - pub fn ptsname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - - #[link_name = "__lutimes50"] - pub fn lutimes(file: *const c_char, times: *const crate::timeval) -> c_int; - #[link_name = "__gettimeofday50"] - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; - pub fn getnameinfo( - sa: *const crate::sockaddr, - salen: crate::socklen_t, - host: *mut c_char, - hostlen: crate::socklen_t, - serv: *mut c_char, - servlen: crate::socklen_t, - flags: c_int, - ) -> c_int; - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn sysctl( - name: *const c_int, - namelen: c_uint, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *const c_void, - newlen: size_t, - ) -> c_int; - pub fn sysctlbyname( - name: *const c_char, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *const c_void, - newlen: size_t, - ) -> c_int; - pub fn sysctlnametomib(sname: *const c_char, name: *mut c_int, namelenp: *mut size_t) -> c_int; - #[link_name = "__kevent50"] - pub fn kevent( - kq: c_int, - changelist: *const crate::kevent, - nchanges: size_t, - eventlist: *mut crate::kevent, - nevents: size_t, - timeout: *const crate::timespec, - ) -> c_int; - #[link_name = "__mount50"] - pub fn mount( - src: *const c_char, - target: *const c_char, - flags: c_int, - data: *mut c_void, - size: size_t, - ) -> c_int; - pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> crate::mqd_t; - pub fn mq_close(mqd: crate::mqd_t) -> c_int; - pub fn mq_getattr(mqd: crate::mqd_t, attr: *mut crate::mq_attr) -> c_int; - pub fn mq_notify(mqd: crate::mqd_t, notification: *const crate::sigevent) -> c_int; - pub fn mq_receive( - mqd: crate::mqd_t, - msg_ptr: *mut c_char, - msg_len: size_t, - msg_prio: *mut c_uint, - ) -> ssize_t; - pub fn mq_send( - mqd: crate::mqd_t, - msg_ptr: *const c_char, - msg_len: size_t, - msg_prio: c_uint, - ) -> c_int; - pub fn mq_setattr( - mqd: crate::mqd_t, - newattr: *const crate::mq_attr, - oldattr: *mut crate::mq_attr, - ) -> c_int; - #[link_name = "__mq_timedreceive50"] - pub fn mq_timedreceive( - mqd: crate::mqd_t, - msg_ptr: *mut c_char, - msg_len: size_t, - msg_prio: *mut c_uint, - abs_timeout: *const crate::timespec, - ) -> ssize_t; - #[link_name = "__mq_timedsend50"] - pub fn mq_timedsend( - mqd: crate::mqd_t, - msg_ptr: *const c_char, - msg_len: size_t, - msg_prio: c_uint, - abs_timeout: *const crate::timespec, - ) -> c_int; - pub fn mq_unlink(name: *const c_char) -> c_int; - pub fn ptrace(request: c_int, pid: crate::pid_t, addr: *mut c_void, data: c_int) -> c_int; - pub fn utrace(label: *const c_char, addr: *mut c_void, len: size_t) -> c_int; - pub fn pthread_getname_np(t: crate::pthread_t, name: *mut c_char, len: size_t) -> c_int; - pub fn pthread_setname_np( - t: crate::pthread_t, - name: *const c_char, - arg: *const c_void, - ) -> c_int; - pub fn pthread_attr_get_np(thread: crate::pthread_t, attr: *mut crate::pthread_attr_t) - -> c_int; - pub fn pthread_getattr_np(native: crate::pthread_t, attr: *mut crate::pthread_attr_t) -> c_int; - pub fn pthread_attr_getguardsize( - attr: *const crate::pthread_attr_t, - guardsize: *mut size_t, - ) -> c_int; - pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; - pub fn pthread_attr_getstack( - attr: *const crate::pthread_attr_t, - stackaddr: *mut *mut c_void, - stacksize: *mut size_t, - ) -> c_int; - pub fn pthread_getaffinity_np( - thread: crate::pthread_t, - size: size_t, - set: *mut cpuset_t, - ) -> c_int; - pub fn pthread_setaffinity_np( - thread: crate::pthread_t, - size: size_t, - set: *mut cpuset_t, - ) -> c_int; - - pub fn _cpuset_create() -> *mut cpuset_t; - pub fn _cpuset_destroy(set: *mut cpuset_t); - pub fn _cpuset_clr(cpu: cpuid_t, set: *mut cpuset_t) -> c_int; - pub fn _cpuset_set(cpu: cpuid_t, set: *mut cpuset_t) -> c_int; - pub fn _cpuset_isset(cpu: cpuid_t, set: *const cpuset_t) -> c_int; - pub fn _cpuset_size(set: *const cpuset_t) -> size_t; - pub fn _cpuset_zero(set: *mut cpuset_t); - #[link_name = "__sigtimedwait50"] - pub fn sigtimedwait( - set: *const sigset_t, - info: *mut siginfo_t, - timeout: *const crate::timespec, - ) -> c_int; - pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; - - pub fn duplocale(base: crate::locale_t) -> crate::locale_t; - pub fn freelocale(loc: crate::locale_t); - pub fn localeconv_l(loc: crate::locale_t) -> *mut lconv; - pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; - #[link_name = "__settimeofday50"] - pub fn settimeofday(tv: *const crate::timeval, tz: *const c_void) -> c_int; - - pub fn dup3(src: c_int, dst: c_int, flags: c_int) -> c_int; - - pub fn kqueue1(flags: c_int) -> c_int; - - pub fn _lwp_self() -> lwpid_t; - pub fn memmem( - haystack: *const c_void, - haystacklen: size_t, - needle: *const c_void, - needlelen: size_t, - ) -> *mut c_void; - - // link.h - - pub fn dl_iterate_phdr( - callback: Option< - unsafe extern "C" fn(info: *mut dl_phdr_info, size: usize, data: *mut c_void) -> c_int, - >, - data: *mut c_void, - ) -> c_int; - - // dlfcn.h - - pub fn _dlauxinfo() -> *mut c_void; - - pub fn iconv_open(tocode: *const c_char, fromcode: *const c_char) -> iconv_t; - pub fn iconv( - cd: iconv_t, - inbuf: *mut *mut c_char, - inbytesleft: *mut size_t, - outbuf: *mut *mut c_char, - outbytesleft: *mut size_t, - ) -> size_t; - pub fn iconv_close(cd: iconv_t) -> c_int; - - pub fn timer_create( - clockid: crate::clockid_t, - sevp: *mut crate::sigevent, - timerid: *mut crate::timer_t, - ) -> c_int; - pub fn timer_delete(timerid: crate::timer_t) -> c_int; - pub fn timer_getoverrun(timerid: crate::timer_t) -> c_int; - pub fn timer_gettime(timerid: crate::timer_t, curr_value: *mut crate::itimerspec) -> c_int; - pub fn timer_settime( - timerid: crate::timer_t, - flags: c_int, - new_value: *const crate::itimerspec, - old_value: *mut crate::itimerspec, - ) -> c_int; - pub fn dlvsym( - handle: *mut c_void, - symbol: *const c_char, - version: *const c_char, - ) -> *mut c_void; - - // Added in `NetBSD` 7.0 - pub fn explicit_memset(b: *mut c_void, c: c_int, len: size_t); - pub fn consttime_memequal(a: *const c_void, b: *const c_void, len: size_t) -> c_int; - - pub fn setproctitle(fmt: *const c_char, ...); - pub fn mremap( - oldp: *mut c_void, - oldsize: size_t, - newp: *mut c_void, - newsize: size_t, - flags: c_int, - ) -> *mut c_void; - - pub fn sched_rr_get_interval(pid: crate::pid_t, t: *mut crate::timespec) -> c_int; - pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; - pub fn sched_getparam(pid: crate::pid_t, param: *mut crate::sched_param) -> c_int; - pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; - pub fn sched_setscheduler( - pid: crate::pid_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - - #[link_name = "__pollts50"] - pub fn pollts( - fds: *mut crate::pollfd, - nfds: crate::nfds_t, - ts: *const crate::timespec, - sigmask: *const crate::sigset_t, - ) -> c_int; - pub fn ppoll( - fds: *mut crate::pollfd, - nfds: crate::nfds_t, - ts: *const crate::timespec, - sigmask: *const crate::sigset_t, - ) -> c_int; - pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; - - pub fn reboot(mode: c_int, bootstr: *mut c_char) -> c_int; - - #[link_name = "___lwp_park60"] - pub fn _lwp_park( - clock: crate::clockid_t, - flags: c_int, - ts: *const crate::timespec, - unpark: crate::lwpid_t, - hint: *const c_void, - unparkhint: *mut c_void, - ) -> c_int; - pub fn _lwp_unpark(lwp: crate::lwpid_t, hint: *const c_void) -> c_int; - pub fn _lwp_unpark_all( - targets: *const crate::lwpid_t, - ntargets: size_t, - hint: *const c_void, - ) -> c_int; - #[link_name = "__getmntinfo13"] - pub fn getmntinfo(mntbufp: *mut *mut crate::statvfs, flags: c_int) -> c_int; - pub fn getvfsstat(buf: *mut statvfs, bufsize: size_t, flags: c_int) -> c_int; - - // Added in `NetBSD` 10.0 - pub fn timerfd_create(clockid: crate::clockid_t, flags: c_int) -> c_int; - pub fn timerfd_gettime(fd: c_int, curr_value: *mut itimerspec) -> c_int; - pub fn timerfd_settime( - fd: c_int, - flags: c_int, - new_value: *const itimerspec, - old_value: *mut itimerspec, - ) -> c_int; - - pub fn qsort_r( - base: *mut c_void, - num: size_t, - size: size_t, - compar: Option c_int>, - arg: *mut c_void, - ); -} - -#[link(name = "rt")] -extern "C" { - pub fn aio_read(aiocbp: *mut aiocb) -> c_int; - pub fn aio_write(aiocbp: *mut aiocb) -> c_int; - pub fn aio_fsync(op: c_int, aiocbp: *mut aiocb) -> c_int; - pub fn aio_error(aiocbp: *const aiocb) -> c_int; - pub fn aio_return(aiocbp: *mut aiocb) -> ssize_t; - #[link_name = "__aio_suspend50"] - pub fn aio_suspend( - aiocb_list: *const *const aiocb, - nitems: c_int, - timeout: *const crate::timespec, - ) -> c_int; - pub fn aio_cancel(fd: c_int, aiocbp: *mut aiocb) -> c_int; - pub fn lio_listio( - mode: c_int, - aiocb_list: *const *mut aiocb, - nitems: c_int, - sevp: *mut sigevent, - ) -> c_int; -} - -#[link(name = "util")] -extern "C" { - #[cfg_attr(target_os = "netbsd", link_name = "__getpwent_r50")] - pub fn getpwent_r( - pwd: *mut crate::passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::passwd, - ) -> c_int; - pub fn getgrent_r( - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - - pub fn updwtmpx(file: *const c_char, ut: *const utmpx) -> c_int; - pub fn getlastlogx(fname: *const c_char, uid: crate::uid_t, ll: *mut lastlogx) - -> *mut lastlogx; - pub fn updlastlogx(fname: *const c_char, uid: crate::uid_t, ll: *mut lastlogx) -> c_int; - pub fn utmpxname(file: *const c_char) -> c_int; - pub fn getutxent() -> *mut utmpx; - pub fn getutxid(ut: *const utmpx) -> *mut utmpx; - pub fn getutxline(ut: *const utmpx) -> *mut utmpx; - pub fn pututxline(ut: *const utmpx) -> *mut utmpx; - pub fn setutxent(); - pub fn endutxent(); - - pub fn getutmp(ux: *const utmpx, u: *mut utmp); - pub fn getutmpx(u: *const utmp, ux: *mut utmpx); - - pub fn utpname(file: *const c_char) -> c_int; - pub fn setutent(); - pub fn endutent(); - pub fn getutent() -> *mut utmp; - - pub fn efopen(p: *const c_char, m: *const c_char) -> crate::FILE; - pub fn emalloc(n: size_t) -> *mut c_void; - pub fn ecalloc(n: size_t, c: size_t) -> *mut c_void; - pub fn erealloc(p: *mut c_void, n: size_t) -> *mut c_void; - pub fn ereallocarr(p: *mut c_void, n: size_t, s: size_t); - pub fn estrdup(s: *const c_char) -> *mut c_char; - pub fn estrndup(s: *const c_char, len: size_t) -> *mut c_char; - pub fn estrlcpy(dst: *mut c_char, src: *const c_char, len: size_t) -> size_t; - pub fn estrlcat(dst: *mut c_char, src: *const c_char, len: size_t) -> size_t; - pub fn estrtoi( - nptr: *const c_char, - base: c_int, - lo: crate::intmax_t, - hi: crate::intmax_t, - ) -> crate::intmax_t; - pub fn estrtou( - nptr: *const c_char, - base: c_int, - lo: crate::uintmax_t, - hi: crate::uintmax_t, - ) -> crate::uintmax_t; - pub fn easprintf(string: *mut *mut c_char, fmt: *const c_char, ...) -> c_int; - pub fn evasprintf(string: *mut *mut c_char, fmt: *const c_char, ...) -> c_int; - pub fn esetfunc( - cb: Option, - ) -> Option; - pub fn secure_path(path: *const c_char) -> c_int; - pub fn snprintb(buf: *mut c_char, buflen: size_t, fmt: *const c_char, val: u64) -> c_int; - pub fn snprintb_m( - buf: *mut c_char, - buflen: size_t, - fmt: *const c_char, - val: u64, - max: size_t, - ) -> c_int; - - pub fn getbootfile() -> *const c_char; - pub fn getbyteorder() -> c_int; - pub fn getdiskrawname(buf: *mut c_char, buflen: size_t, name: *const c_char) -> *const c_char; - pub fn getdiskcookedname( - buf: *mut c_char, - buflen: size_t, - name: *const c_char, - ) -> *const c_char; - pub fn getfsspecname(buf: *mut c_char, buflen: size_t, spec: *const c_char) -> *const c_char; - - pub fn strpct( - buf: *mut c_char, - bufsiz: size_t, - numerator: crate::uintmax_t, - denominator: crate::uintmax_t, - precision: size_t, - ) -> *mut c_char; - pub fn strspct( - buf: *mut c_char, - bufsiz: size_t, - numerator: crate::intmax_t, - denominator: crate::intmax_t, - precision: size_t, - ) -> *mut c_char; - #[link_name = "__login50"] - pub fn login(ut: *const utmp); - #[link_name = "__loginx50"] - pub fn loginx(ut: *const utmpx); - pub fn logout(line: *const c_char); - pub fn logoutx(line: *const c_char, status: c_int, tpe: c_int); - pub fn logwtmp(line: *const c_char, name: *const c_char, host: *const c_char); - pub fn logwtmpx( - line: *const c_char, - name: *const c_char, - host: *const c_char, - status: c_int, - tpe: c_int, - ); - - pub fn getxattr( - path: *const c_char, - name: *const c_char, - value: *mut c_void, - size: size_t, - ) -> ssize_t; - pub fn lgetxattr( - path: *const c_char, - name: *const c_char, - value: *mut c_void, - size: size_t, - ) -> ssize_t; - pub fn fgetxattr( - filedes: c_int, - name: *const c_char, - value: *mut c_void, - size: size_t, - ) -> ssize_t; - pub fn setxattr( - path: *const c_char, - name: *const c_char, - value: *const c_void, - size: size_t, - ) -> c_int; - pub fn lsetxattr( - path: *const c_char, - name: *const c_char, - value: *const c_void, - size: size_t, - ) -> c_int; - pub fn fsetxattr( - filedes: c_int, - name: *const c_char, - value: *const c_void, - size: size_t, - flags: c_int, - ) -> c_int; - pub fn listxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; - pub fn llistxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; - pub fn flistxattr(filedes: c_int, list: *mut c_char, size: size_t) -> ssize_t; - pub fn removexattr(path: *const c_char, name: *const c_char) -> c_int; - pub fn lremovexattr(path: *const c_char, name: *const c_char) -> c_int; - pub fn fremovexattr(fd: c_int, path: *const c_char, name: *const c_char) -> c_int; - - pub fn string_to_flags( - string_p: *mut *mut c_char, - setp: *mut c_ulong, - clrp: *mut c_ulong, - ) -> c_int; - pub fn flags_to_string(flags: c_ulong, def: *const c_char) -> c_int; - - pub fn kinfo_getvmmap(pid: crate::pid_t, cntp: *mut size_t) -> *mut kinfo_vmentry; -} - -#[link(name = "execinfo")] -extern "C" { - pub fn backtrace(addrlist: *mut *mut c_void, len: size_t) -> size_t; - pub fn backtrace_symbols(addrlist: *const *mut c_void, len: size_t) -> *mut *mut c_char; - pub fn backtrace_symbols_fd(addrlist: *const *mut c_void, len: size_t, fd: c_int) -> c_int; - pub fn backtrace_symbols_fmt( - addrlist: *const *mut c_void, - len: size_t, - fmt: *const c_char, - ) -> *mut *mut c_char; - pub fn backtrace_symbols_fd_fmt( - addrlist: *const *mut c_void, - len: size_t, - fd: c_int, - fmt: *const c_char, - ) -> c_int; -} - -cfg_if! { - if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else if #[cfg(target_arch = "arm")] { - mod arm; - pub use self::arm::*; - } else if #[cfg(target_arch = "powerpc")] { - mod powerpc; - pub use self::powerpc::*; - } else if #[cfg(target_arch = "sparc64")] { - mod sparc64; - pub use self::sparc64::*; - } else if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } else if #[cfg(target_arch = "x86")] { - mod x86; - pub use self::x86::*; - } else if #[cfg(target_arch = "mips")] { - mod mips; - pub use self::mips::*; - } else if #[cfg(target_arch = "riscv64")] { - mod riscv64; - pub use self::riscv64::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/powerpc.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/powerpc.rs deleted file mode 100644 index f8f2d56c0d3742..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/powerpc.rs +++ /dev/null @@ -1,10 +0,0 @@ -use crate::prelude::*; -use crate::PT_FIRSTMACH; - -pub type __cpu_simple_lock_nv_t = c_int; - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const PT_STEP: c_int = PT_FIRSTMACH + 0; -pub const PT_GETREGS: c_int = PT_FIRSTMACH + 1; -pub const PT_SETREGS: c_int = PT_FIRSTMACH + 2; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/riscv64.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/riscv64.rs deleted file mode 100644 index 47240cb2818c0b..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/riscv64.rs +++ /dev/null @@ -1,77 +0,0 @@ -use PT_FIRSTMACH; - -use crate::prelude::*; - -pub type __greg_t = u64; -pub type __cpu_simple_lock_nv_t = c_int; -pub type __gregset = [__greg_t; _NGREG]; -pub type __fregset = [__freg; _NFREG]; - -s! { - pub struct mcontext_t { - pub __gregs: __gregset, - pub __fregs: __fpregset, - __spare: [crate::__greg_t; 7], - } -} - -s_no_extra_traits! { - pub union __fpreg { - pub u_u64: u64, - pub u_d: c_double, - } -} - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const PT_GETREGS: c_int = PT_FIRSTMACH + 0; -pub const PT_SETREGS: c_int = PT_FIRSTMACH + 1; -pub const PT_GETFPREGS: c_int = PT_FIRSTMACH + 2; -pub const PT_SETFPREGS: c_int = PT_FIRSTMACH + 3; - -pub const _NGREG: usize = 32; -pub const _NFREG: usize = 33; - -pub const _REG_X1: c_int = 0; -pub const _REG_X2: c_int = 1; -pub const _REG_X3: c_int = 2; -pub const _REG_X4: c_int = 3; -pub const _REG_X5: c_int = 4; -pub const _REG_X6: c_int = 5; -pub const _REG_X7: c_int = 6; -pub const _REG_X8: c_int = 7; -pub const _REG_X9: c_int = 8; -pub const _REG_X10: c_int = 9; -pub const _REG_X11: c_int = 10; -pub const _REG_X12: c_int = 11; -pub const _REG_X13: c_int = 12; -pub const _REG_X14: c_int = 13; -pub const _REG_X15: c_int = 14; -pub const _REG_X16: c_int = 15; -pub const _REG_X17: c_int = 16; -pub const _REG_X18: c_int = 17; -pub const _REG_X19: c_int = 18; -pub const _REG_X20: c_int = 19; -pub const _REG_X21: c_int = 20; -pub const _REG_X22: c_int = 21; -pub const _REG_X23: c_int = 22; -pub const _REG_X24: c_int = 23; -pub const _REG_X25: c_int = 24; -pub const _REG_X26: c_int = 25; -pub const _REG_X27: c_int = 26; -pub const _REG_X28: c_int = 27; -pub const _REG_X29: c_int = 28; -pub const _REG_X30: c_int = 29; -pub const _REG_X31: c_int = 30; -pub const _REG_PC: c_int = 31; - -pub const _REG_RA: c_int = _REG_X1; -pub const _REG_SP: c_int = _REG_X2; -pub const _REG_GP: c_int = _REG_X3; -pub const _REG_TP: c_int = _REG_X4; -pub const _REG_S0: c_int = _REG_X8; -pub const _REG_RV: c_int = _REG_X10; -pub const _REG_A0: c_int = _REG_X10; - -pub const _REG_F0: c_int = 0; -pub const _REG_FPCSR: c_int = 32; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/sparc64.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/sparc64.rs deleted file mode 100644 index 91622f7eea3fab..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/sparc64.rs +++ /dev/null @@ -1,7 +0,0 @@ -use crate::prelude::*; - -pub type __cpu_simple_lock_nv_t = c_uchar; - -// should be pub(crate), but that requires Rust 1.18.0 -#[doc(hidden)] -pub const _ALIGNBYTES: usize = 0xf; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86.rs deleted file mode 100644 index 95f55768973ca3..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86.rs +++ /dev/null @@ -1,5 +0,0 @@ -use crate::prelude::*; - -pub type __cpu_simple_lock_nv_t = c_uchar; - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86_64.rs b/vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86_64.rs deleted file mode 100644 index 77daa4b1e9eb28..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/netbsd/x86_64.rs +++ /dev/null @@ -1,56 +0,0 @@ -use crate::prelude::*; -use crate::PT_FIRSTMACH; - -pub type c___greg_t = u64; -pub type __cpu_simple_lock_nv_t = c_uchar; - -s! { - pub struct mcontext_t { - pub __gregs: [c___greg_t; 26], - pub _mc_tlsbase: c___greg_t, - pub __fpregs: [[c_char; 32]; 16], - } - - pub struct ucontext_t { - pub uc_flags: c_uint, - pub uc_link: *mut crate::ucontext_t, - pub uc_sigmask: crate::sigset_t, - pub uc_stack: crate::stack_t, - pub uc_mcontext: crate::mcontext_t, - } -} - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const PT_STEP: c_int = PT_FIRSTMACH + 0; -pub const PT_GETREGS: c_int = PT_FIRSTMACH + 1; -pub const PT_SETREGS: c_int = PT_FIRSTMACH + 2; -pub const PT_GETFPREGS: c_int = PT_FIRSTMACH + 3; -pub const PT_SETFPREGS: c_int = PT_FIRSTMACH + 4; - -pub const _REG_RDI: c_int = 0; -pub const _REG_RSI: c_int = 1; -pub const _REG_RDX: c_int = 2; -pub const _REG_RCX: c_int = 3; -pub const _REG_R8: c_int = 4; -pub const _REG_R9: c_int = 5; -pub const _REG_R10: c_int = 6; -pub const _REG_R11: c_int = 7; -pub const _REG_R12: c_int = 8; -pub const _REG_R13: c_int = 9; -pub const _REG_R14: c_int = 10; -pub const _REG_R15: c_int = 11; -pub const _REG_RBP: c_int = 12; -pub const _REG_RBX: c_int = 13; -pub const _REG_RAX: c_int = 14; -pub const _REG_GS: c_int = 15; -pub const _REG_FS: c_int = 16; -pub const _REG_ES: c_int = 17; -pub const _REG_DS: c_int = 18; -pub const _REG_TRAPNO: c_int = 19; -pub const _REG_ERR: c_int = 20; -pub const _REG_RIP: c_int = 21; -pub const _REG_CS: c_int = 22; -pub const _REG_RFLAGS: c_int = 23; -pub const _REG_RSP: c_int = 24; -pub const _REG_SS: c_int = 25; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/aarch64.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/aarch64.rs deleted file mode 100644 index e0d347fb5e6b87..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/aarch64.rs +++ /dev/null @@ -1,20 +0,0 @@ -use crate::prelude::*; - -pub type ucontext_t = sigcontext; - -s! { - pub struct sigcontext { - __sc_unused: c_int, - pub sc_mask: c_int, - pub sc_sp: c_ulong, - pub sc_lr: c_ulong, - pub sc_elr: c_ulong, - pub sc_spsr: c_ulong, - pub sc_x: [c_ulong; 30], - pub sc_cookie: c_long, - } -} - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const _MAX_PAGE_SHIFT: u32 = 12; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/arm.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/arm.rs deleted file mode 100644 index 8b3f72139d86e9..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/arm.rs +++ /dev/null @@ -1,5 +0,0 @@ -use crate::prelude::*; - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const _MAX_PAGE_SHIFT: u32 = 12; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mips64.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mips64.rs deleted file mode 100644 index 162ceda265df91..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mips64.rs +++ /dev/null @@ -1,4 +0,0 @@ -#[doc(hidden)] -pub const _ALIGNBYTES: usize = 7; - -pub const _MAX_PAGE_SHIFT: u32 = 14; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mod.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mod.rs deleted file mode 100644 index b28f4557f52187..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/mod.rs +++ /dev/null @@ -1,2149 +0,0 @@ -use crate::prelude::*; -use crate::unix::bsd::O_SYNC; -use crate::{cmsghdr, off_t}; - -pub type clock_t = i64; -pub type suseconds_t = c_long; -pub type dev_t = i32; -pub type sigset_t = c_uint; -pub type blksize_t = i32; -pub type fsblkcnt_t = u64; -pub type fsfilcnt_t = u64; -pub type idtype_t = c_uint; -pub type pthread_attr_t = *mut c_void; -pub type pthread_mutex_t = *mut c_void; -pub type pthread_mutexattr_t = *mut c_void; -pub type pthread_cond_t = *mut c_void; -pub type pthread_condattr_t = *mut c_void; -pub type pthread_rwlock_t = *mut c_void; -pub type pthread_rwlockattr_t = *mut c_void; -pub type pthread_spinlock_t = crate::uintptr_t; -pub type caddr_t = *mut c_char; - -// elf.h - -pub type Elf32_Addr = u32; -pub type Elf32_Half = u16; -pub type Elf32_Lword = u64; -pub type Elf32_Off = u32; -pub type Elf32_Sword = i32; -pub type Elf32_Word = u32; - -pub type Elf64_Addr = u64; -pub type Elf64_Half = u16; -pub type Elf64_Lword = u64; -pub type Elf64_Off = u64; -pub type Elf64_Sword = i32; -pub type Elf64_Sxword = i64; -pub type Elf64_Word = u32; -pub type Elf64_Xword = u64; - -// search.h - -pub type ENTRY = entry; -pub type ACTION = c_uint; - -// spawn.h -pub type posix_spawnattr_t = *mut c_void; -pub type posix_spawn_file_actions_t = *mut c_void; - -cfg_if! { - if #[cfg(target_pointer_width = "64")] { - type Elf_Addr = Elf64_Addr; - type Elf_Half = Elf64_Half; - type Elf_Phdr = Elf64_Phdr; - } else if #[cfg(target_pointer_width = "32")] { - type Elf_Addr = Elf32_Addr; - type Elf_Half = Elf32_Half; - type Elf_Phdr = Elf32_Phdr; - } -} - -s! { - pub struct ip_mreqn { - pub imr_multiaddr: in_addr, - pub imr_address: in_addr, - pub imr_ifindex: c_int, - } - - pub struct glob_t { - pub gl_pathc: size_t, - pub gl_matchc: size_t, - pub gl_offs: size_t, - pub gl_flags: c_int, - pub gl_pathv: *mut *mut c_char, - __unused1: *mut c_void, - __unused2: *mut c_void, - __unused3: *mut c_void, - __unused4: *mut c_void, - __unused5: *mut c_void, - __unused6: *mut c_void, - __unused7: *mut c_void, - } - - pub struct lconv { - pub decimal_point: *mut c_char, - pub thousands_sep: *mut c_char, - pub grouping: *mut c_char, - pub int_curr_symbol: *mut c_char, - pub currency_symbol: *mut c_char, - pub mon_decimal_point: *mut c_char, - pub mon_thousands_sep: *mut c_char, - pub mon_grouping: *mut c_char, - pub positive_sign: *mut c_char, - pub negative_sign: *mut c_char, - pub int_frac_digits: c_char, - pub frac_digits: c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub p_sign_posn: c_char, - pub n_sign_posn: c_char, - pub int_p_cs_precedes: c_char, - pub int_p_sep_by_space: c_char, - pub int_n_cs_precedes: c_char, - pub int_n_sep_by_space: c_char, - pub int_p_sign_posn: c_char, - pub int_n_sign_posn: c_char, - } - - pub struct ufs_args { - pub fspec: *mut c_char, - pub export_info: export_args, - } - - pub struct mfs_args { - pub fspec: *mut c_char, - pub export_info: export_args, - // https://github.com/openbsd/src/blob/HEAD/sys/sys/types.h#L134 - pub base: *mut c_char, - pub size: c_ulong, - } - - pub struct iso_args { - pub fspec: *mut c_char, - pub export_info: export_args, - pub flags: c_int, - pub sess: c_int, - } - - pub struct nfs_args { - pub version: c_int, - pub addr: *mut crate::sockaddr, - pub addrlen: c_int, - pub sotype: c_int, - pub proto: c_int, - pub fh: *mut c_uchar, - pub fhsize: c_int, - pub flags: c_int, - pub wsize: c_int, - pub rsize: c_int, - pub readdirsize: c_int, - pub timeo: c_int, - pub retrans: c_int, - pub maxgrouplist: c_int, - pub readahead: c_int, - pub leaseterm: c_int, - pub deadthresh: c_int, - pub hostname: *mut c_char, - pub acregmin: c_int, - pub acregmax: c_int, - pub acdirmin: c_int, - pub acdirmax: c_int, - } - - pub struct msdosfs_args { - pub fspec: *mut c_char, - pub export_info: export_args, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub mask: crate::mode_t, - pub flags: c_int, - } - - pub struct ntfs_args { - pub fspec: *mut c_char, - pub export_info: export_args, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub mode: crate::mode_t, - pub flag: c_ulong, - } - - pub struct udf_args { - pub fspec: *mut c_char, - pub lastblock: u32, - } - - pub struct tmpfs_args { - pub ta_version: c_int, - pub ta_nodes_max: crate::ino_t, - pub ta_size_max: off_t, - pub ta_root_uid: crate::uid_t, - pub ta_root_gid: crate::gid_t, - pub ta_root_mode: crate::mode_t, - } - - pub struct fusefs_args { - pub name: *mut c_char, - pub fd: c_int, - pub max_read: c_int, - pub allow_other: c_int, - } - - pub struct xucred { - pub cr_uid: crate::uid_t, - pub cr_gid: crate::gid_t, - pub cr_ngroups: c_short, - //https://github.com/openbsd/src/blob/HEAD/sys/sys/syslimits.h#L44 - pub cr_groups: [crate::gid_t; 16], - } - - pub struct export_args { - pub ex_flags: c_int, - pub ex_root: crate::uid_t, - pub ex_anon: xucred, - pub ex_addr: *mut crate::sockaddr, - pub ex_addrlen: c_int, - pub ex_mask: *mut crate::sockaddr, - pub ex_masklen: c_int, - } - - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct in_addr { - pub s_addr: crate::in_addr_t, - } - - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: crate::sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [i8; 8], - } - - pub struct splice { - pub sp_fd: c_int, - pub sp_max: off_t, - pub sp_idle: crate::timeval, - } - - pub struct kevent { - pub ident: crate::uintptr_t, - pub filter: c_short, - pub flags: c_ushort, - pub fflags: c_uint, - pub data: i64, - pub udata: *mut c_void, - } - - pub struct stat { - pub st_mode: crate::mode_t, - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_size: off_t, - pub st_blocks: crate::blkcnt_t, - pub st_blksize: crate::blksize_t, - pub st_flags: u32, - pub st_gen: u32, - pub st_birthtime: crate::time_t, - pub st_birthtime_nsec: c_long, - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - } - - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - pub ai_addrlen: crate::socklen_t, - pub ai_addr: *mut crate::sockaddr, - pub ai_canonname: *mut c_char, - pub ai_next: *mut crate::addrinfo, - } - - pub struct Dl_info { - pub dli_fname: *const c_char, - pub dli_fbase: *mut c_void, - pub dli_sname: *const c_char, - pub dli_saddr: *mut c_void, - } - - pub struct if_data { - pub ifi_type: c_uchar, - pub ifi_addrlen: c_uchar, - pub ifi_hdrlen: c_uchar, - pub ifi_link_state: c_uchar, - pub ifi_mtu: u32, - pub ifi_metric: u32, - pub ifi_rdomain: u32, - pub ifi_baudrate: u64, - pub ifi_ipackets: u64, - pub ifi_ierrors: u64, - pub ifi_opackets: u64, - pub ifi_oerrors: u64, - pub ifi_collisions: u64, - pub ifi_ibytes: u64, - pub ifi_obytes: u64, - pub ifi_imcasts: u64, - pub ifi_omcasts: u64, - pub ifi_iqdrops: u64, - pub ifi_oqdrops: u64, - pub ifi_noproto: u64, - pub ifi_capabilities: u32, - pub ifi_lastchange: crate::timeval, - } - - pub struct if_msghdr { - pub ifm_msglen: c_ushort, - pub ifm_version: c_uchar, - pub ifm_type: c_uchar, - pub ifm_hdrlen: c_ushort, - pub ifm_index: c_ushort, - pub ifm_tableid: c_ushort, - pub ifm_pad1: c_uchar, - pub ifm_pad2: c_uchar, - pub ifm_addrs: c_int, - pub ifm_flags: c_int, - pub ifm_xflags: c_int, - pub ifm_data: if_data, - } - - pub struct sockaddr_dl { - pub sdl_len: c_uchar, - pub sdl_family: c_uchar, - pub sdl_index: c_ushort, - pub sdl_type: c_uchar, - pub sdl_nlen: c_uchar, - pub sdl_alen: c_uchar, - pub sdl_slen: c_uchar, - pub sdl_data: [c_char; 24], - } - - pub struct sockpeercred { - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub pid: crate::pid_t, - } - - pub struct arphdr { - pub ar_hrd: u16, - pub ar_pro: u16, - pub ar_hln: u8, - pub ar_pln: u8, - pub ar_op: u16, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: c_int, - pub shm_lpid: crate::pid_t, - pub shm_cpid: crate::pid_t, - pub shm_nattch: c_short, - pub shm_atime: crate::time_t, - __shm_atimensec: c_long, - pub shm_dtime: crate::time_t, - __shm_dtimensec: c_long, - pub shm_ctime: crate::time_t, - __shm_ctimensec: c_long, - pub shm_internal: *mut c_void, - } - - // elf.h - pub struct Elf32_Phdr { - pub p_type: Elf32_Word, - pub p_offset: Elf32_Off, - pub p_vaddr: Elf32_Addr, - pub p_paddr: Elf32_Addr, - pub p_filesz: Elf32_Word, - pub p_memsz: Elf32_Word, - pub p_flags: Elf32_Word, - pub p_align: Elf32_Word, - } - - pub struct Elf64_Phdr { - pub p_type: Elf64_Word, - pub p_flags: Elf64_Word, - pub p_offset: Elf64_Off, - pub p_vaddr: Elf64_Addr, - pub p_paddr: Elf64_Addr, - pub p_filesz: Elf64_Xword, - pub p_memsz: Elf64_Xword, - pub p_align: Elf64_Xword, - } - - // link.h - - pub struct dl_phdr_info { - pub dlpi_addr: Elf_Addr, - pub dlpi_name: *const c_char, - pub dlpi_phdr: *const Elf_Phdr, - pub dlpi_phnum: Elf_Half, - } - - // sys/sysctl.h - pub struct kinfo_proc { - pub p_forw: u64, - pub p_back: u64, - pub p_paddr: u64, - pub p_addr: u64, - pub p_fd: u64, - pub p_stats: u64, - pub p_limit: u64, - pub p_vmspace: u64, - pub p_sigacts: u64, - pub p_sess: u64, - pub p_tsess: u64, - pub p_ru: u64, - pub p_eflag: i32, - pub p_exitsig: i32, - pub p_flag: i32, - pub p_pid: i32, - pub p_ppid: i32, - pub p_sid: i32, - pub p__pgid: i32, - pub p_tpgid: i32, - pub p_uid: u32, - pub p_ruid: u32, - pub p_gid: u32, - pub p_rgid: u32, - pub p_groups: [u32; KI_NGROUPS as usize], - pub p_ngroups: i16, - pub p_jobc: i16, - pub p_tdev: u32, - pub p_estcpu: u32, - pub p_rtime_sec: u32, - pub p_rtime_usec: u32, - pub p_cpticks: i32, - pub p_pctcpu: u32, - pub p_swtime: u32, - pub p_slptime: u32, - pub p_schedflags: i32, - pub p_uticks: u64, - pub p_sticks: u64, - pub p_iticks: u64, - pub p_tracep: u64, - pub p_traceflag: i32, - pub p_holdcnt: i32, - pub p_siglist: i32, - pub p_sigmask: u32, - pub p_sigignore: u32, - pub p_sigcatch: u32, - pub p_stat: i8, - pub p_priority: u8, - pub p_usrpri: u8, - pub p_nice: u8, - pub p_xstat: u16, - pub p_spare: u16, - pub p_comm: [c_char; KI_MAXCOMLEN as usize], - pub p_wmesg: [c_char; KI_WMESGLEN as usize], - pub p_wchan: u64, - pub p_login: [c_char; KI_MAXLOGNAME as usize], - pub p_vm_rssize: i32, - pub p_vm_tsize: i32, - pub p_vm_dsize: i32, - pub p_vm_ssize: i32, - pub p_uvalid: i64, - pub p_ustart_sec: u64, - pub p_ustart_usec: u32, - pub p_uutime_sec: u32, - pub p_uutime_usec: u32, - pub p_ustime_sec: u32, - pub p_ustime_usec: u32, - pub p_uru_maxrss: u64, - pub p_uru_ixrss: u64, - pub p_uru_idrss: u64, - pub p_uru_isrss: u64, - pub p_uru_minflt: u64, - pub p_uru_majflt: u64, - pub p_uru_nswap: u64, - pub p_uru_inblock: u64, - pub p_uru_oublock: u64, - pub p_uru_msgsnd: u64, - pub p_uru_msgrcv: u64, - pub p_uru_nsignals: u64, - pub p_uru_nvcsw: u64, - pub p_uru_nivcsw: u64, - pub p_uctime_sec: u32, - pub p_uctime_usec: u32, - pub p_psflags: u32, - pub p_acflag: u32, - pub p_svuid: u32, - pub p_svgid: u32, - pub p_emul: [c_char; KI_EMULNAMELEN as usize], - pub p_rlim_rss_cur: u64, - pub p_cpuid: u64, - pub p_vm_map_size: u64, - pub p_tid: i32, - pub p_rtableid: u32, - pub p_pledge: u64, - pub p_name: [c_char; KI_MAXCOMLEN as usize], - } - - pub struct kinfo_vmentry { - pub kve_start: c_ulong, - pub kve_end: c_ulong, - pub kve_guard: c_ulong, - pub kve_fspace: c_ulong, - pub kve_fspace_augment: c_ulong, - pub kve_offset: u64, - pub kve_wired_count: c_int, - pub kve_etype: c_int, - pub kve_protection: c_int, - pub kve_max_protection: c_int, - pub kve_advice: c_int, - pub kve_inheritance: c_int, - pub kve_flags: u8, - } - - pub struct ptrace_state { - pub pe_report_event: c_int, - pub pe_other_pid: crate::pid_t, - pub pe_tid: crate::pid_t, - } - - pub struct ptrace_thread_state { - pub pts_tid: crate::pid_t, - } - - // search.h - pub struct entry { - pub key: *mut c_char, - pub data: *mut c_void, - } - - pub struct ifreq { - pub ifr_name: [c_char; crate::IFNAMSIZ], - pub ifr_ifru: __c_anonymous_ifr_ifru, - } - - pub struct tcp_info { - pub tcpi_state: u8, - pub __tcpi_ca_state: u8, - pub __tcpi_retransmits: u8, - pub __tcpi_probes: u8, - pub __tcpi_backoff: u8, - pub tcpi_options: u8, - pub tcpi_snd_wscale: u8, - pub tcpi_rcv_wscale: u8, - pub tcpi_rto: u32, - pub __tcpi_ato: u32, - pub tcpi_snd_mss: u32, - pub tcpi_rcv_mss: u32, - pub __tcpi_unacked: u32, - pub __tcpi_sacked: u32, - pub __tcpi_lost: u32, - pub __tcpi_retrans: u32, - pub __tcpi_fackets: u32, - pub tcpi_last_data_sent: u32, - pub tcpi_last_ack_sent: u32, - pub tcpi_last_data_recv: u32, - pub tcpi_last_ack_recv: u32, - pub __tcpi_pmtu: u32, - pub __tcpi_rcv_ssthresh: u32, - pub tcpi_rtt: u32, - pub tcpi_rttvar: u32, - pub tcpi_snd_ssthresh: u32, - pub tcpi_snd_cwnd: u32, - pub __tcpi_advmss: u32, - pub __tcpi_reordering: u32, - pub __tcpi_rcv_rtt: u32, - pub tcpi_rcv_space: u32, - pub tcpi_snd_wnd: u32, - pub tcpi_snd_nxt: u32, - pub tcpi_rcv_nxt: u32, - pub tcpi_toe_tid: u32, - pub tcpi_snd_rexmitpack: u32, - pub tcpi_rcv_ooopack: u32, - pub tcpi_snd_zerowin: u32, - pub tcpi_rttmin: u32, - pub tcpi_max_sndwnd: u32, - pub tcpi_rcv_adv: u32, - pub tcpi_rcv_up: u32, - pub tcpi_snd_una: u32, - pub tcpi_snd_up: u32, - pub tcpi_snd_wl1: u32, - pub tcpi_snd_wl2: u32, - pub tcpi_snd_max: u32, - pub tcpi_ts_recent: u32, - pub tcpi_ts_recent_age: u32, - pub tcpi_rfbuf_cnt: u32, - pub tcpi_rfbuf_ts: u32, - pub tcpi_so_rcv_sb_cc: u32, - pub tcpi_so_rcv_sb_hiwat: u32, - pub tcpi_so_rcv_sb_lowat: u32, - pub tcpi_so_rcv_sb_wat: u32, - pub tcpi_so_snd_sb_cc: u32, - pub tcpi_so_snd_sb_hiwat: u32, - pub tcpi_so_snd_sb_lowat: u32, - pub tcpi_so_snd_sb_wat: u32, - } -} - -impl siginfo_t { - pub unsafe fn si_addr(&self) -> *mut c_char { - self.si_addr - } - - pub unsafe fn si_code(&self) -> c_int { - self.si_code - } - - pub unsafe fn si_errno(&self) -> c_int { - self.si_errno - } - - pub unsafe fn si_pid(&self) -> crate::pid_t { - #[repr(C)] - struct siginfo_timer { - _si_signo: c_int, - _si_code: c_int, - _si_errno: c_int, - _pad: [c_int; SI_PAD], - _pid: crate::pid_t, - } - (*(self as *const siginfo_t).cast::())._pid - } - - pub unsafe fn si_uid(&self) -> crate::uid_t { - #[repr(C)] - struct siginfo_timer { - _si_signo: c_int, - _si_code: c_int, - _si_errno: c_int, - _pad: [c_int; SI_PAD], - _pid: crate::pid_t, - _uid: crate::uid_t, - } - (*(self as *const siginfo_t).cast::())._uid - } - - pub unsafe fn si_value(&self) -> crate::sigval { - #[repr(C)] - struct siginfo_timer { - _si_signo: c_int, - _si_code: c_int, - _si_errno: c_int, - _pad: [c_int; SI_PAD], - _pid: crate::pid_t, - _uid: crate::uid_t, - value: crate::sigval, - } - (*(self as *const siginfo_t).cast::()).value - } -} - -s_no_extra_traits! { - pub struct dirent { - pub d_fileno: crate::ino_t, - pub d_off: off_t, - pub d_reclen: u16, - pub d_type: u8, - pub d_namlen: u8, - __d_padding: [u8; 4], - pub d_name: [c_char; 256], - } - - pub struct sockaddr_storage { - pub ss_len: u8, - pub ss_family: crate::sa_family_t, - __ss_pad1: [u8; 6], - __ss_pad2: i64, - __ss_pad3: [u8; 240], - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_code: c_int, - pub si_errno: c_int, - pub si_addr: *mut c_char, - #[cfg(target_pointer_width = "32")] - __pad: [u8; 112], - #[cfg(target_pointer_width = "64")] - __pad: [u8; 108], - } - - pub struct lastlog { - ll_time: crate::time_t, - ll_line: [c_char; UT_LINESIZE], - ll_host: [c_char; UT_HOSTSIZE], - } - - pub struct utmp { - pub ut_line: [c_char; UT_LINESIZE], - pub ut_name: [c_char; UT_NAMESIZE], - pub ut_host: [c_char; UT_HOSTSIZE], - pub ut_time: crate::time_t, - } - - pub union mount_info { - pub ufs_args: ufs_args, - pub mfs_args: mfs_args, - pub nfs_args: nfs_args, - pub iso_args: iso_args, - pub msdosfs_args: msdosfs_args, - pub ntfs_args: ntfs_args, - pub tmpfs_args: tmpfs_args, - align: [c_char; 160], - } - - pub union __c_anonymous_ifr_ifru { - pub ifru_addr: crate::sockaddr, - pub ifru_dstaddr: crate::sockaddr, - pub ifru_broadaddr: crate::sockaddr, - pub ifru_flags: c_short, - pub ifru_metric: c_int, - pub ifru_vnetid: i64, - pub ifru_media: u64, - pub ifru_data: crate::caddr_t, - pub ifru_index: c_uint, - } - - // This type uses the union mount_info: - pub struct statfs { - pub f_flags: u32, - pub f_bsize: u32, - pub f_iosize: u32, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: i64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: i64, - pub f_syncwrites: u64, - pub f_syncreads: u64, - pub f_asyncwrites: u64, - pub f_asyncreads: u64, - pub f_fsid: crate::fsid_t, - pub f_namemax: u32, - pub f_owner: crate::uid_t, - pub f_ctime: u64, - pub f_fstypename: [c_char; 16], - pub f_mntonname: [c_char; 90], - pub f_mntfromname: [c_char; 90], - pub f_mntfromspec: [c_char; 90], - pub mount_info: mount_info, - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_fileno == other.d_fileno - && self.d_off == other.d_off - && self.d_reclen == other.d_reclen - && self.d_type == other.d_type - && self.d_namlen == other.d_namlen - && self - .d_name - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for dirent {} - - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_fileno.hash(state); - self.d_off.hash(state); - self.d_reclen.hash(state); - self.d_type.hash(state); - self.d_namlen.hash(state); - self.d_name.hash(state); - } - } - - impl PartialEq for sockaddr_storage { - fn eq(&self, other: &sockaddr_storage) -> bool { - self.ss_len == other.ss_len && self.ss_family == other.ss_family - } - } - - impl Eq for sockaddr_storage {} - - impl hash::Hash for sockaddr_storage { - fn hash(&self, state: &mut H) { - self.ss_len.hash(state); - self.ss_family.hash(state); - } - } - - impl PartialEq for siginfo_t { - fn eq(&self, other: &siginfo_t) -> bool { - self.si_signo == other.si_signo - && self.si_code == other.si_code - && self.si_errno == other.si_errno - && self.si_addr == other.si_addr - } - } - - impl Eq for siginfo_t {} - - impl hash::Hash for siginfo_t { - fn hash(&self, state: &mut H) { - self.si_signo.hash(state); - self.si_code.hash(state); - self.si_errno.hash(state); - self.si_addr.hash(state); - } - } - - impl PartialEq for lastlog { - fn eq(&self, other: &lastlog) -> bool { - self.ll_time == other.ll_time - && self - .ll_line - .iter() - .zip(other.ll_line.iter()) - .all(|(a, b)| a == b) - && self - .ll_host - .iter() - .zip(other.ll_host.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for lastlog {} - - impl hash::Hash for lastlog { - fn hash(&self, state: &mut H) { - self.ll_time.hash(state); - self.ll_line.hash(state); - self.ll_host.hash(state); - } - } - - impl PartialEq for utmp { - fn eq(&self, other: &utmp) -> bool { - self.ut_time == other.ut_time - && self - .ut_line - .iter() - .zip(other.ut_line.iter()) - .all(|(a, b)| a == b) - && self - .ut_name - .iter() - .zip(other.ut_name.iter()) - .all(|(a, b)| a == b) - && self - .ut_host - .iter() - .zip(other.ut_host.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for utmp {} - - impl hash::Hash for utmp { - fn hash(&self, state: &mut H) { - self.ut_line.hash(state); - self.ut_name.hash(state); - self.ut_host.hash(state); - self.ut_time.hash(state); - } - } - - impl PartialEq for mount_info { - fn eq(&self, other: &mount_info) -> bool { - unsafe { - self.align - .iter() - .zip(other.align.iter()) - .all(|(a, b)| a == b) - } - } - } - - impl Eq for mount_info {} - - impl hash::Hash for mount_info { - fn hash(&self, state: &mut H) { - unsafe { self.align.hash(state) }; - } - } - - impl PartialEq for __c_anonymous_ifr_ifru { - fn eq(&self, other: &__c_anonymous_ifr_ifru) -> bool { - unsafe { - self.ifru_addr == other.ifru_addr - && self.ifru_dstaddr == other.ifru_dstaddr - && self.ifru_broadaddr == other.ifru_broadaddr - && self.ifru_flags == other.ifru_flags - && self.ifru_metric == other.ifru_metric - && self.ifru_vnetid == other.ifru_vnetid - && self.ifru_media == other.ifru_media - && self.ifru_data == other.ifru_data - && self.ifru_index == other.ifru_index - } - } - } - - impl Eq for __c_anonymous_ifr_ifru {} - - impl hash::Hash for __c_anonymous_ifr_ifru { - fn hash(&self, state: &mut H) { - unsafe { - self.ifru_addr.hash(state); - self.ifru_dstaddr.hash(state); - self.ifru_broadaddr.hash(state); - self.ifru_flags.hash(state); - self.ifru_metric.hash(state); - self.ifru_vnetid.hash(state); - self.ifru_media.hash(state); - self.ifru_data.hash(state); - self.ifru_index.hash(state); - } - } - } - - impl PartialEq for statfs { - fn eq(&self, other: &statfs) -> bool { - self.f_flags == other.f_flags - && self.f_bsize == other.f_bsize - && self.f_iosize == other.f_iosize - && self.f_blocks == other.f_blocks - && self.f_bfree == other.f_bfree - && self.f_bavail == other.f_bavail - && self.f_files == other.f_files - && self.f_ffree == other.f_ffree - && self.f_favail == other.f_favail - && self.f_syncwrites == other.f_syncwrites - && self.f_syncreads == other.f_syncreads - && self.f_asyncwrites == other.f_asyncwrites - && self.f_asyncreads == other.f_asyncreads - && self.f_fsid == other.f_fsid - && self.f_namemax == other.f_namemax - && self.f_owner == other.f_owner - && self.f_ctime == other.f_ctime - && self - .f_fstypename - .iter() - .zip(other.f_fstypename.iter()) - .all(|(a, b)| a == b) - && self - .f_mntonname - .iter() - .zip(other.f_mntonname.iter()) - .all(|(a, b)| a == b) - && self - .f_mntfromname - .iter() - .zip(other.f_mntfromname.iter()) - .all(|(a, b)| a == b) - && self - .f_mntfromspec - .iter() - .zip(other.f_mntfromspec.iter()) - .all(|(a, b)| a == b) - && self.mount_info == other.mount_info - } - } - - impl Eq for statfs {} - - impl hash::Hash for statfs { - fn hash(&self, state: &mut H) { - self.f_flags.hash(state); - self.f_bsize.hash(state); - self.f_iosize.hash(state); - self.f_blocks.hash(state); - self.f_bfree.hash(state); - self.f_bavail.hash(state); - self.f_files.hash(state); - self.f_ffree.hash(state); - self.f_favail.hash(state); - self.f_syncwrites.hash(state); - self.f_syncreads.hash(state); - self.f_asyncwrites.hash(state); - self.f_asyncreads.hash(state); - self.f_fsid.hash(state); - self.f_namemax.hash(state); - self.f_owner.hash(state); - self.f_ctime.hash(state); - self.f_fstypename.hash(state); - self.f_mntonname.hash(state); - self.f_mntfromname.hash(state); - self.f_mntfromspec.hash(state); - self.mount_info.hash(state); - } - } - } -} - -pub const UT_NAMESIZE: usize = 32; -pub const UT_LINESIZE: usize = 8; -pub const UT_HOSTSIZE: usize = 256; - -pub const O_CLOEXEC: c_int = 0x10000; -pub const O_DIRECTORY: c_int = 0x20000; -pub const O_RSYNC: c_int = O_SYNC; - -pub const MS_SYNC: c_int = 0x0002; -pub const MS_INVALIDATE: c_int = 0x0004; - -pub const POLLNORM: c_short = crate::POLLRDNORM; - -pub const ENOATTR: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const EOVERFLOW: c_int = 87; -pub const ECANCELED: c_int = 88; -pub const EIDRM: c_int = 89; -pub const ENOMSG: c_int = 90; -pub const ENOTSUP: c_int = 91; -pub const EBADMSG: c_int = 92; -pub const ENOTRECOVERABLE: c_int = 93; -pub const EOWNERDEAD: c_int = 94; -pub const EPROTO: c_int = 95; -pub const ELAST: c_int = 95; - -pub const F_DUPFD_CLOEXEC: c_int = 10; - -pub const UTIME_OMIT: c_long = -1; -pub const UTIME_NOW: c_long = -2; - -pub const AT_FDCWD: c_int = -100; -pub const AT_EACCESS: c_int = 0x01; -pub const AT_SYMLINK_NOFOLLOW: c_int = 0x02; -pub const AT_SYMLINK_FOLLOW: c_int = 0x04; -pub const AT_REMOVEDIR: c_int = 0x08; - -pub const AT_NULL: c_int = 0; -pub const AT_IGNORE: c_int = 1; -pub const AT_PAGESZ: c_int = 6; -pub const AT_HWCAP: c_int = 25; -pub const AT_HWCAP2: c_int = 26; - -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIM_NLIMITS: c_int = 9; - -pub const SO_TIMESTAMP: c_int = 0x0800; -pub const SO_SNDTIMEO: c_int = 0x1005; -pub const SO_RCVTIMEO: c_int = 0x1006; -pub const SO_BINDANY: c_int = 0x1000; -pub const SO_NETPROC: c_int = 0x1020; -pub const SO_RTABLE: c_int = 0x1021; -pub const SO_PEERCRED: c_int = 0x1022; -pub const SO_SPLICE: c_int = 0x1023; -pub const SO_DOMAIN: c_int = 0x1024; -pub const SO_PROTOCOL: c_int = 0x1025; - -// sys/netinet/in.h -// Protocols (RFC 1700) -// NOTE: These are in addition to the constants defined in src/unix/mod.rs - -// IPPROTO_IP defined in src/unix/mod.rs -/// Hop-by-hop option header -pub const IPPROTO_HOPOPTS: c_int = 0; -// IPPROTO_ICMP defined in src/unix/mod.rs -/// group mgmt protocol -pub const IPPROTO_IGMP: c_int = 2; -/// gateway^2 (deprecated) -pub const IPPROTO_GGP: c_int = 3; -/// for compatibility -pub const IPPROTO_IPIP: c_int = 4; -// IPPROTO_TCP defined in src/unix/mod.rs -/// exterior gateway protocol -pub const IPPROTO_EGP: c_int = 8; -/// pup -pub const IPPROTO_PUP: c_int = 12; -// IPPROTO_UDP defined in src/unix/mod.rs -/// xns idp -pub const IPPROTO_IDP: c_int = 22; -/// tp-4 w/ class negotiation -pub const IPPROTO_TP: c_int = 29; -// IPPROTO_IPV6 defined in src/unix/mod.rs -/// IP6 routing header -pub const IPPROTO_ROUTING: c_int = 43; -/// IP6 fragmentation header -pub const IPPROTO_FRAGMENT: c_int = 44; -/// resource reservation -pub const IPPROTO_RSVP: c_int = 46; -/// General Routing Encap. -pub const IPPROTO_GRE: c_int = 47; -/// IP6 Encap Sec. Payload -pub const IPPROTO_ESP: c_int = 50; -/// IP6 Auth Header -pub const IPPROTO_AH: c_int = 51; -/// IP Mobility RFC 2004 -pub const IPPROTO_MOBILE: c_int = 55; -// IPPROTO_ICMPV6 defined in src/unix/mod.rs -/// IP6 no next header -pub const IPPROTO_NONE: c_int = 59; -/// IP6 destination option -pub const IPPROTO_DSTOPTS: c_int = 60; -/// ISO cnlp -pub const IPPROTO_EON: c_int = 80; -/// Ethernet-in-IP -pub const IPPROTO_ETHERIP: c_int = 97; -/// encapsulation header -pub const IPPROTO_ENCAP: c_int = 98; -/// Protocol indep. multicast -pub const IPPROTO_PIM: c_int = 103; -/// IP Payload Comp. Protocol -pub const IPPROTO_IPCOMP: c_int = 108; -/// CARP -pub const IPPROTO_CARP: c_int = 112; -/// unicast MPLS packet -pub const IPPROTO_MPLS: c_int = 137; -/// PFSYNC -pub const IPPROTO_PFSYNC: c_int = 240; -pub const IPPROTO_MAX: c_int = 256; - -// Only used internally, so it can be outside the range of valid IP protocols -pub const IPPROTO_DIVERT: c_int = 258; - -pub const IP_RECVDSTADDR: c_int = 7; -pub const IP_SENDSRCADDR: c_int = IP_RECVDSTADDR; -pub const IP_RECVIF: c_int = 30; - -// sys/netinet/in.h -pub const TCP_MD5SIG: c_int = 0x04; -pub const TCP_NOPUSH: c_int = 0x10; - -pub const MSG_WAITFORONE: c_int = 0x1000; - -pub const AF_ECMA: c_int = 8; -pub const AF_ROUTE: c_int = 17; -pub const AF_ENCAP: c_int = 28; -pub const AF_SIP: c_int = 29; -pub const AF_KEY: c_int = 30; -pub const pseudo_AF_HDRCMPLT: c_int = 31; -pub const AF_BLUETOOTH: c_int = 32; -pub const AF_MPLS: c_int = 33; -pub const pseudo_AF_PFLOW: c_int = 34; -pub const pseudo_AF_PIPEX: c_int = 35; -pub const NET_RT_DUMP: c_int = 1; -pub const NET_RT_FLAGS: c_int = 2; -pub const NET_RT_IFLIST: c_int = 3; -pub const NET_RT_STATS: c_int = 4; -pub const NET_RT_TABLE: c_int = 5; -pub const NET_RT_IFNAMES: c_int = 6; -#[doc(hidden)] -#[deprecated( - since = "0.2.95", - note = "Possibly increasing over the releases and might not be so used in the field" -)] -pub const NET_RT_MAXID: c_int = 7; - -pub const IPV6_JOIN_GROUP: c_int = 12; -pub const IPV6_LEAVE_GROUP: c_int = 13; - -pub const PF_ROUTE: c_int = AF_ROUTE; -pub const PF_ECMA: c_int = AF_ECMA; -pub const PF_ENCAP: c_int = AF_ENCAP; -pub const PF_SIP: c_int = AF_SIP; -pub const PF_KEY: c_int = AF_KEY; -pub const PF_BPF: c_int = pseudo_AF_HDRCMPLT; -pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; -pub const PF_MPLS: c_int = AF_MPLS; -pub const PF_PFLOW: c_int = pseudo_AF_PFLOW; -pub const PF_PIPEX: c_int = pseudo_AF_PIPEX; - -pub const SCM_TIMESTAMP: c_int = 0x04; - -pub const O_DSYNC: c_int = 128; - -pub const MAP_RENAME: c_int = 0x0000; -pub const MAP_NORESERVE: c_int = 0x0000; -pub const MAP_HASSEMAPHORE: c_int = 0x0000; -pub const MAP_TRYFIXED: c_int = 0; - -pub const EIPSEC: c_int = 82; -pub const ENOMEDIUM: c_int = 85; -pub const EMEDIUMTYPE: c_int = 86; - -pub const EAI_BADFLAGS: c_int = -1; -pub const EAI_NONAME: c_int = -2; -pub const EAI_AGAIN: c_int = -3; -pub const EAI_FAIL: c_int = -4; -pub const EAI_NODATA: c_int = -5; -pub const EAI_FAMILY: c_int = -6; -pub const EAI_SOCKTYPE: c_int = -7; -pub const EAI_SERVICE: c_int = -8; -pub const EAI_MEMORY: c_int = -10; -pub const EAI_SYSTEM: c_int = -11; -pub const EAI_OVERFLOW: c_int = -14; - -pub const RUSAGE_THREAD: c_int = 1; - -pub const MAP_COPY: c_int = 0x0002; -pub const MAP_NOEXTEND: c_int = 0x0000; - -pub const _PC_LINK_MAX: c_int = 1; -pub const _PC_MAX_CANON: c_int = 2; -pub const _PC_MAX_INPUT: c_int = 3; -pub const _PC_NAME_MAX: c_int = 4; -pub const _PC_PATH_MAX: c_int = 5; -pub const _PC_PIPE_BUF: c_int = 6; -pub const _PC_CHOWN_RESTRICTED: c_int = 7; -pub const _PC_NO_TRUNC: c_int = 8; -pub const _PC_VDISABLE: c_int = 9; -pub const _PC_2_SYMLINKS: c_int = 10; -pub const _PC_ALLOC_SIZE_MIN: c_int = 11; -pub const _PC_ASYNC_IO: c_int = 12; -pub const _PC_FILESIZEBITS: c_int = 13; -pub const _PC_PRIO_IO: c_int = 14; -pub const _PC_REC_INCR_XFER_SIZE: c_int = 15; -pub const _PC_REC_MAX_XFER_SIZE: c_int = 16; -pub const _PC_REC_MIN_XFER_SIZE: c_int = 17; -pub const _PC_REC_XFER_ALIGN: c_int = 18; -pub const _PC_SYMLINK_MAX: c_int = 19; -pub const _PC_SYNC_IO: c_int = 20; -pub const _PC_TIMESTAMP_RESOLUTION: c_int = 21; - -pub const _CS_PATH: c_int = 1; - -pub const _SC_CLK_TCK: c_int = 3; -pub const _SC_SEM_NSEMS_MAX: c_int = 31; -pub const _SC_SEM_VALUE_MAX: c_int = 32; -pub const _SC_HOST_NAME_MAX: c_int = 33; -pub const _SC_MONOTONIC_CLOCK: c_int = 34; -pub const _SC_2_PBS: c_int = 35; -pub const _SC_2_PBS_ACCOUNTING: c_int = 36; -pub const _SC_2_PBS_CHECKPOINT: c_int = 37; -pub const _SC_2_PBS_LOCATE: c_int = 38; -pub const _SC_2_PBS_MESSAGE: c_int = 39; -pub const _SC_2_PBS_TRACK: c_int = 40; -pub const _SC_ADVISORY_INFO: c_int = 41; -pub const _SC_AIO_LISTIO_MAX: c_int = 42; -pub const _SC_AIO_MAX: c_int = 43; -pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 44; -pub const _SC_ASYNCHRONOUS_IO: c_int = 45; -pub const _SC_ATEXIT_MAX: c_int = 46; -pub const _SC_BARRIERS: c_int = 47; -pub const _SC_CLOCK_SELECTION: c_int = 48; -pub const _SC_CPUTIME: c_int = 49; -pub const _SC_DELAYTIMER_MAX: c_int = 50; -pub const _SC_IOV_MAX: c_int = 51; -pub const _SC_IPV6: c_int = 52; -pub const _SC_MAPPED_FILES: c_int = 53; -pub const _SC_MEMLOCK: c_int = 54; -pub const _SC_MEMLOCK_RANGE: c_int = 55; -pub const _SC_MEMORY_PROTECTION: c_int = 56; -pub const _SC_MESSAGE_PASSING: c_int = 57; -pub const _SC_MQ_OPEN_MAX: c_int = 58; -pub const _SC_MQ_PRIO_MAX: c_int = 59; -pub const _SC_PRIORITIZED_IO: c_int = 60; -pub const _SC_PRIORITY_SCHEDULING: c_int = 61; -pub const _SC_RAW_SOCKETS: c_int = 62; -pub const _SC_READER_WRITER_LOCKS: c_int = 63; -pub const _SC_REALTIME_SIGNALS: c_int = 64; -pub const _SC_REGEXP: c_int = 65; -pub const _SC_RTSIG_MAX: c_int = 66; -pub const _SC_SEMAPHORES: c_int = 67; -pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 68; -pub const _SC_SHELL: c_int = 69; -pub const _SC_SIGQUEUE_MAX: c_int = 70; -pub const _SC_SPAWN: c_int = 71; -pub const _SC_SPIN_LOCKS: c_int = 72; -pub const _SC_SPORADIC_SERVER: c_int = 73; -pub const _SC_SS_REPL_MAX: c_int = 74; -pub const _SC_SYNCHRONIZED_IO: c_int = 75; -pub const _SC_SYMLOOP_MAX: c_int = 76; -pub const _SC_THREAD_ATTR_STACKADDR: c_int = 77; -pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 78; -pub const _SC_THREAD_CPUTIME: c_int = 79; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 80; -pub const _SC_THREAD_KEYS_MAX: c_int = 81; -pub const _SC_THREAD_PRIO_INHERIT: c_int = 82; -pub const _SC_THREAD_PRIO_PROTECT: c_int = 83; -pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 84; -pub const _SC_THREAD_PROCESS_SHARED: c_int = 85; -pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 86; -pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 87; -pub const _SC_THREAD_SPORADIC_SERVER: c_int = 88; -pub const _SC_THREAD_STACK_MIN: c_int = 89; -pub const _SC_THREAD_THREADS_MAX: c_int = 90; -pub const _SC_THREADS: c_int = 91; -pub const _SC_TIMEOUTS: c_int = 92; -pub const _SC_TIMER_MAX: c_int = 93; -pub const _SC_TIMERS: c_int = 94; -pub const _SC_TRACE: c_int = 95; -pub const _SC_TRACE_EVENT_FILTER: c_int = 96; -pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 97; -pub const _SC_TRACE_INHERIT: c_int = 98; -pub const _SC_TRACE_LOG: c_int = 99; -pub const _SC_GETGR_R_SIZE_MAX: c_int = 100; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 101; -pub const _SC_LOGIN_NAME_MAX: c_int = 102; -pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 103; -pub const _SC_TRACE_NAME_MAX: c_int = 104; -pub const _SC_TRACE_SYS_MAX: c_int = 105; -pub const _SC_TRACE_USER_EVENT_MAX: c_int = 106; -pub const _SC_TTY_NAME_MAX: c_int = 107; -pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 108; -pub const _SC_V6_ILP32_OFF32: c_int = 109; -pub const _SC_V6_ILP32_OFFBIG: c_int = 110; -pub const _SC_V6_LP64_OFF64: c_int = 111; -pub const _SC_V6_LPBIG_OFFBIG: c_int = 112; -pub const _SC_V7_ILP32_OFF32: c_int = 113; -pub const _SC_V7_ILP32_OFFBIG: c_int = 114; -pub const _SC_V7_LP64_OFF64: c_int = 115; -pub const _SC_V7_LPBIG_OFFBIG: c_int = 116; -pub const _SC_XOPEN_CRYPT: c_int = 117; -pub const _SC_XOPEN_ENH_I18N: c_int = 118; -pub const _SC_XOPEN_LEGACY: c_int = 119; -pub const _SC_XOPEN_REALTIME: c_int = 120; -pub const _SC_XOPEN_REALTIME_THREADS: c_int = 121; -pub const _SC_XOPEN_STREAMS: c_int = 122; -pub const _SC_XOPEN_UNIX: c_int = 123; -pub const _SC_XOPEN_UUCP: c_int = 124; -pub const _SC_XOPEN_VERSION: c_int = 125; -pub const _SC_PHYS_PAGES: c_int = 500; -pub const _SC_AVPHYS_PAGES: c_int = 501; -pub const _SC_NPROCESSORS_CONF: c_int = 502; -pub const _SC_NPROCESSORS_ONLN: c_int = 503; - -pub const FD_SETSIZE: usize = 1024; - -pub const SCHED_FIFO: c_int = 1; -pub const SCHED_OTHER: c_int = 2; -pub const SCHED_RR: c_int = 3; - -pub const ST_NOSUID: c_ulong = 2; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = ptr::null_mut(); -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = ptr::null_mut(); -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = ptr::null_mut(); - -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 1; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 2; -pub const PTHREAD_MUTEX_NORMAL: c_int = 3; -pub const PTHREAD_MUTEX_STRICT_NP: c_int = 4; -pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_STRICT_NP; - -pub const EVFILT_READ: i16 = -1; -pub const EVFILT_WRITE: i16 = -2; -pub const EVFILT_AIO: i16 = -3; -pub const EVFILT_VNODE: i16 = -4; -pub const EVFILT_PROC: i16 = -5; -pub const EVFILT_SIGNAL: i16 = -6; -pub const EVFILT_TIMER: i16 = -7; -pub const EVFILT_DEVICE: i16 = -8; -pub const EVFILT_EXCEPT: i16 = -9; - -pub const EV_ADD: u16 = 0x1; -pub const EV_DELETE: u16 = 0x2; -pub const EV_ENABLE: u16 = 0x4; -pub const EV_DISABLE: u16 = 0x8; -pub const EV_ONESHOT: u16 = 0x10; -pub const EV_CLEAR: u16 = 0x20; -pub const EV_RECEIPT: u16 = 0x40; -pub const EV_DISPATCH: u16 = 0x80; -pub const EV_FLAG1: u16 = 0x2000; -pub const EV_ERROR: u16 = 0x4000; -pub const EV_EOF: u16 = 0x8000; - -#[deprecated(since = "0.2.113", note = "Not stable across OS versions")] -pub const EV_SYSFLAGS: u16 = 0xf800; - -pub const NOTE_LOWAT: u32 = 0x00000001; -pub const NOTE_EOF: u32 = 0x00000002; -pub const NOTE_OOB: u32 = 0x00000004; -pub const NOTE_DELETE: u32 = 0x00000001; -pub const NOTE_WRITE: u32 = 0x00000002; -pub const NOTE_EXTEND: u32 = 0x00000004; -pub const NOTE_ATTRIB: u32 = 0x00000008; -pub const NOTE_LINK: u32 = 0x00000010; -pub const NOTE_RENAME: u32 = 0x00000020; -pub const NOTE_REVOKE: u32 = 0x00000040; -pub const NOTE_TRUNCATE: u32 = 0x00000080; -pub const NOTE_EXIT: u32 = 0x80000000; -pub const NOTE_FORK: u32 = 0x40000000; -pub const NOTE_EXEC: u32 = 0x20000000; -pub const NOTE_PDATAMASK: u32 = 0x000fffff; -pub const NOTE_PCTRLMASK: u32 = 0xf0000000; -pub const NOTE_TRACK: u32 = 0x00000001; -pub const NOTE_TRACKERR: u32 = 0x00000002; -pub const NOTE_CHILD: u32 = 0x00000004; -pub const NOTE_CHANGE: u32 = 0x00000001; - -pub const TMP_MAX: c_uint = 0x7fffffff; - -pub const AI_PASSIVE: c_int = 1; -pub const AI_CANONNAME: c_int = 2; -pub const AI_NUMERICHOST: c_int = 4; -pub const AI_EXT: c_int = 8; -pub const AI_NUMERICSERV: c_int = 16; -pub const AI_FQDN: c_int = 32; -pub const AI_ADDRCONFIG: c_int = 64; - -pub const NI_NUMERICHOST: c_int = 1; -pub const NI_NUMERICSERV: c_int = 2; -pub const NI_NOFQDN: c_int = 4; -pub const NI_NAMEREQD: c_int = 8; -pub const NI_DGRAM: c_int = 16; - -pub const NI_MAXHOST: size_t = 256; - -pub const RTLD_LOCAL: c_int = 0; - -pub const CTL_MAXNAME: c_int = 12; - -pub const CTLTYPE_NODE: c_int = 1; -pub const CTLTYPE_INT: c_int = 2; -pub const CTLTYPE_STRING: c_int = 3; -pub const CTLTYPE_QUAD: c_int = 4; -pub const CTLTYPE_STRUCT: c_int = 5; - -pub const CTL_UNSPEC: c_int = 0; -pub const CTL_KERN: c_int = 1; -pub const CTL_VM: c_int = 2; -pub const CTL_FS: c_int = 3; -pub const CTL_NET: c_int = 4; -pub const CTL_DEBUG: c_int = 5; -pub const CTL_HW: c_int = 6; -pub const CTL_MACHDEP: c_int = 7; -pub const CTL_DDB: c_int = 9; -pub const CTL_VFS: c_int = 10; -pub const CTL_MAXID: c_int = 11; - -pub const HW_NCPUONLINE: c_int = 25; - -pub const KERN_OSTYPE: c_int = 1; -pub const KERN_OSRELEASE: c_int = 2; -pub const KERN_OSREV: c_int = 3; -pub const KERN_VERSION: c_int = 4; -pub const KERN_MAXVNODES: c_int = 5; -pub const KERN_MAXPROC: c_int = 6; -pub const KERN_MAXFILES: c_int = 7; -pub const KERN_ARGMAX: c_int = 8; -pub const KERN_SECURELVL: c_int = 9; -pub const KERN_HOSTNAME: c_int = 10; -pub const KERN_HOSTID: c_int = 11; -pub const KERN_CLOCKRATE: c_int = 12; -pub const KERN_PROF: c_int = 16; -pub const KERN_POSIX1: c_int = 17; -pub const KERN_NGROUPS: c_int = 18; -pub const KERN_JOB_CONTROL: c_int = 19; -pub const KERN_SAVED_IDS: c_int = 20; -pub const KERN_BOOTTIME: c_int = 21; -pub const KERN_DOMAINNAME: c_int = 22; -pub const KERN_MAXPARTITIONS: c_int = 23; -pub const KERN_RAWPARTITION: c_int = 24; -pub const KERN_MAXTHREAD: c_int = 25; -pub const KERN_NTHREADS: c_int = 26; -pub const KERN_OSVERSION: c_int = 27; -pub const KERN_SOMAXCONN: c_int = 28; -pub const KERN_SOMINCONN: c_int = 29; -#[deprecated(since = "0.2.71", note = "Removed in OpenBSD 6.0")] -pub const KERN_USERMOUNT: c_int = 30; -pub const KERN_NOSUIDCOREDUMP: c_int = 32; -pub const KERN_FSYNC: c_int = 33; -pub const KERN_SYSVMSG: c_int = 34; -pub const KERN_SYSVSEM: c_int = 35; -pub const KERN_SYSVSHM: c_int = 36; -#[deprecated(since = "0.2.71", note = "Removed in OpenBSD 6.0")] -pub const KERN_ARND: c_int = 37; -pub const KERN_MSGBUFSIZE: c_int = 38; -pub const KERN_MALLOCSTATS: c_int = 39; -pub const KERN_CPTIME: c_int = 40; -pub const KERN_NCHSTATS: c_int = 41; -pub const KERN_FORKSTAT: c_int = 42; -pub const KERN_NSELCOLL: c_int = 43; -pub const KERN_TTY: c_int = 44; -pub const KERN_CCPU: c_int = 45; -pub const KERN_FSCALE: c_int = 46; -pub const KERN_NPROCS: c_int = 47; -pub const KERN_MSGBUF: c_int = 48; -pub const KERN_POOL: c_int = 49; -pub const KERN_STACKGAPRANDOM: c_int = 50; -pub const KERN_SYSVIPC_INFO: c_int = 51; -pub const KERN_SPLASSERT: c_int = 54; -pub const KERN_PROC_ARGS: c_int = 55; -pub const KERN_NFILES: c_int = 56; -pub const KERN_TTYCOUNT: c_int = 57; -pub const KERN_NUMVNODES: c_int = 58; -pub const KERN_MBSTAT: c_int = 59; -pub const KERN_SEMINFO: c_int = 61; -pub const KERN_SHMINFO: c_int = 62; -pub const KERN_INTRCNT: c_int = 63; -pub const KERN_WATCHDOG: c_int = 64; -pub const KERN_PROC: c_int = 66; -pub const KERN_MAXCLUSTERS: c_int = 67; -pub const KERN_EVCOUNT: c_int = 68; -pub const KERN_TIMECOUNTER: c_int = 69; -pub const KERN_MAXLOCKSPERUID: c_int = 70; -pub const KERN_CPTIME2: c_int = 71; -pub const KERN_CACHEPCT: c_int = 72; -pub const KERN_FILE: c_int = 73; -pub const KERN_CONSDEV: c_int = 75; -pub const KERN_NETLIVELOCKS: c_int = 76; -pub const KERN_POOL_DEBUG: c_int = 77; -pub const KERN_PROC_CWD: c_int = 78; -pub const KERN_PROC_NOBROADCASTKILL: c_int = 79; -pub const KERN_PROC_VMMAP: c_int = 80; -pub const KERN_GLOBAL_PTRACE: c_int = 81; -pub const KERN_CONSBUFSIZE: c_int = 82; -pub const KERN_CONSBUF: c_int = 83; -pub const KERN_AUDIO: c_int = 84; -pub const KERN_CPUSTATS: c_int = 85; -pub const KERN_PFSTATUS: c_int = 86; -pub const KERN_TIMEOUT_STATS: c_int = 87; -#[deprecated( - since = "0.2.95", - note = "Possibly increasing over the releases and might not be so used in the field" -)] -pub const KERN_MAXID: c_int = 88; - -pub const KERN_PROC_ALL: c_int = 0; -pub const KERN_PROC_PID: c_int = 1; -pub const KERN_PROC_PGRP: c_int = 2; -pub const KERN_PROC_SESSION: c_int = 3; -pub const KERN_PROC_TTY: c_int = 4; -pub const KERN_PROC_UID: c_int = 5; -pub const KERN_PROC_RUID: c_int = 6; -pub const KERN_PROC_KTHREAD: c_int = 7; -pub const KERN_PROC_SHOW_THREADS: c_int = 0x40000000; - -pub const KERN_SYSVIPC_MSG_INFO: c_int = 1; -pub const KERN_SYSVIPC_SEM_INFO: c_int = 2; -pub const KERN_SYSVIPC_SHM_INFO: c_int = 3; - -pub const KERN_PROC_ARGV: c_int = 1; -pub const KERN_PROC_NARGV: c_int = 2; -pub const KERN_PROC_ENV: c_int = 3; -pub const KERN_PROC_NENV: c_int = 4; - -pub const KI_NGROUPS: c_int = 16; -pub const KI_MAXCOMLEN: c_int = 24; -pub const KI_WMESGLEN: c_int = 8; -pub const KI_MAXLOGNAME: c_int = 32; -pub const KI_EMULNAMELEN: c_int = 8; - -pub const KVE_ET_OBJ: c_int = 0x00000001; -pub const KVE_ET_SUBMAP: c_int = 0x00000002; -pub const KVE_ET_COPYONWRITE: c_int = 0x00000004; -pub const KVE_ET_NEEDSCOPY: c_int = 0x00000008; -pub const KVE_ET_HOLE: c_int = 0x00000010; -pub const KVE_ET_NOFAULT: c_int = 0x00000020; -pub const KVE_ET_STACK: c_int = 0x00000040; -pub const KVE_ET_WC: c_int = 0x000000080; -pub const KVE_ET_CONCEAL: c_int = 0x000000100; -pub const KVE_ET_SYSCALL: c_int = 0x000000200; -pub const KVE_ET_FREEMAPPED: c_int = 0x000000800; - -pub const KVE_PROT_NONE: c_int = 0x00000000; -pub const KVE_PROT_READ: c_int = 0x00000001; -pub const KVE_PROT_WRITE: c_int = 0x00000002; -pub const KVE_PROT_EXEC: c_int = 0x00000004; - -pub const KVE_ADV_NORMAL: c_int = 0x00000000; -pub const KVE_ADV_RANDOM: c_int = 0x00000001; -pub const KVE_ADV_SEQUENTIAL: c_int = 0x00000002; - -pub const KVE_INH_SHARE: c_int = 0x00000000; -pub const KVE_INH_COPY: c_int = 0x00000010; -pub const KVE_INH_NONE: c_int = 0x00000020; -pub const KVE_INH_ZERO: c_int = 0x00000030; - -pub const KVE_F_STATIC: c_int = 0x1; -pub const KVE_F_KMEM: c_int = 0x2; - -pub const CHWFLOW: crate::tcflag_t = crate::MDMBUF | crate::CRTSCTS; -pub const OLCUC: crate::tcflag_t = 0x20; -pub const ONOCR: crate::tcflag_t = 0x40; -pub const ONLRET: crate::tcflag_t = 0x80; - -//https://github.com/openbsd/src/blob/HEAD/sys/sys/mount.h -pub const ISOFSMNT_NORRIP: c_int = 0x1; // disable Rock Ridge Ext -pub const ISOFSMNT_GENS: c_int = 0x2; // enable generation numbers -pub const ISOFSMNT_EXTATT: c_int = 0x4; // enable extended attr -pub const ISOFSMNT_NOJOLIET: c_int = 0x8; // disable Joliet Ext -pub const ISOFSMNT_SESS: c_int = 0x10; // use iso_args.sess - -pub const NFS_ARGSVERSION: c_int = 4; // change when nfs_args changes - -pub const NFSMNT_RESVPORT: c_int = 0; // always use reserved ports -pub const NFSMNT_SOFT: c_int = 0x1; // soft mount (hard is default) -pub const NFSMNT_WSIZE: c_int = 0x2; // set write size -pub const NFSMNT_RSIZE: c_int = 0x4; // set read size -pub const NFSMNT_TIMEO: c_int = 0x8; // set initial timeout -pub const NFSMNT_RETRANS: c_int = 0x10; // set number of request retries -pub const NFSMNT_MAXGRPS: c_int = 0x20; // set maximum grouplist size -pub const NFSMNT_INT: c_int = 0x40; // allow interrupts on hard mount -pub const NFSMNT_NOCONN: c_int = 0x80; // Don't Connect the socket -pub const NFSMNT_NQNFS: c_int = 0x100; // Use Nqnfs protocol -pub const NFSMNT_NFSV3: c_int = 0x200; // Use NFS Version 3 protocol -pub const NFSMNT_KERB: c_int = 0x400; // Use Kerberos authentication -pub const NFSMNT_DUMBTIMR: c_int = 0x800; // Don't estimate rtt dynamically -pub const NFSMNT_LEASETERM: c_int = 0x1000; // set lease term (nqnfs) -pub const NFSMNT_READAHEAD: c_int = 0x2000; // set read ahead -pub const NFSMNT_DEADTHRESH: c_int = 0x4000; // set dead server retry thresh -pub const NFSMNT_NOAC: c_int = 0x8000; // disable attribute cache -pub const NFSMNT_RDIRPLUS: c_int = 0x10000; // Use Readdirplus for V3 -pub const NFSMNT_READDIRSIZE: c_int = 0x20000; // Set readdir size - -/* Flags valid only in mount syscall arguments */ -pub const NFSMNT_ACREGMIN: c_int = 0x40000; // acregmin field valid -pub const NFSMNT_ACREGMAX: c_int = 0x80000; // acregmax field valid -pub const NFSMNT_ACDIRMIN: c_int = 0x100000; // acdirmin field valid -pub const NFSMNT_ACDIRMAX: c_int = 0x200000; // acdirmax field valid - -/* Flags valid only in kernel */ -pub const NFSMNT_INTERNAL: c_int = 0xfffc0000; // Bits set internally -pub const NFSMNT_HASWRITEVERF: c_int = 0x40000; // Has write verifier for V3 -pub const NFSMNT_GOTPATHCONF: c_int = 0x80000; // Got the V3 pathconf info -pub const NFSMNT_GOTFSINFO: c_int = 0x100000; // Got the V3 fsinfo -pub const NFSMNT_MNTD: c_int = 0x200000; // Mnt server for mnt point -pub const NFSMNT_DISMINPROG: c_int = 0x400000; // Dismount in progress -pub const NFSMNT_DISMNT: c_int = 0x800000; // Dismounted -pub const NFSMNT_SNDLOCK: c_int = 0x1000000; // Send socket lock -pub const NFSMNT_WANTSND: c_int = 0x2000000; // Want above -pub const NFSMNT_RCVLOCK: c_int = 0x4000000; // Rcv socket lock -pub const NFSMNT_WANTRCV: c_int = 0x8000000; // Want above -pub const NFSMNT_WAITAUTH: c_int = 0x10000000; // Wait for authentication -pub const NFSMNT_HASAUTH: c_int = 0x20000000; // Has authenticator -pub const NFSMNT_WANTAUTH: c_int = 0x40000000; // Wants an authenticator -pub const NFSMNT_AUTHERR: c_int = 0x80000000; // Authentication error - -pub const MSDOSFSMNT_SHORTNAME: c_int = 0x1; // Force old DOS short names only -pub const MSDOSFSMNT_LONGNAME: c_int = 0x2; // Force Win'95 long names -pub const MSDOSFSMNT_NOWIN95: c_int = 0x4; // Completely ignore Win95 entries - -pub const NTFS_MFLAG_CASEINS: c_int = 0x1; -pub const NTFS_MFLAG_ALLNAMES: c_int = 0x2; - -pub const TMPFS_ARGS_VERSION: c_int = 1; - -const SI_MAXSZ: size_t = 128; -const SI_PAD: size_t = (SI_MAXSZ / size_of::()) - 3; - -pub const MAP_STACK: c_int = 0x4000; -pub const MAP_CONCEAL: c_int = 0x8000; - -// https://github.com/openbsd/src/blob/HEAD/sys/net/if.h#L187 -pub const IFF_UP: c_int = 0x1; // interface is up -pub const IFF_BROADCAST: c_int = 0x2; // broadcast address valid -pub const IFF_DEBUG: c_int = 0x4; // turn on debugging -pub const IFF_LOOPBACK: c_int = 0x8; // is a loopback net -pub const IFF_POINTOPOINT: c_int = 0x10; // interface is point-to-point link -pub const IFF_STATICARP: c_int = 0x20; // only static ARP -pub const IFF_RUNNING: c_int = 0x40; // resources allocated -pub const IFF_NOARP: c_int = 0x80; // no address resolution protocol -pub const IFF_PROMISC: c_int = 0x100; // receive all packets -pub const IFF_ALLMULTI: c_int = 0x200; // receive all multicast packets -pub const IFF_OACTIVE: c_int = 0x400; // transmission in progress -pub const IFF_SIMPLEX: c_int = 0x800; // can't hear own transmissions -pub const IFF_LINK0: c_int = 0x1000; // per link layer defined bit -pub const IFF_LINK1: c_int = 0x2000; // per link layer defined bit -pub const IFF_LINK2: c_int = 0x4000; // per link layer defined bit -pub const IFF_MULTICAST: c_int = 0x8000; // supports multicast - -pub const PTHREAD_STACK_MIN: size_t = 1_usize << _MAX_PAGE_SHIFT; -pub const MINSIGSTKSZ: size_t = 3_usize << _MAX_PAGE_SHIFT; -pub const SIGSTKSZ: size_t = MINSIGSTKSZ + (1_usize << _MAX_PAGE_SHIFT) * 4; - -pub const PT_SET_EVENT_MASK: c_int = 12; -pub const PT_GET_EVENT_MASK: c_int = 13; -pub const PT_GET_PROCESS_STATE: c_int = 14; -pub const PT_GET_THREAD_FIRST: c_int = 15; -pub const PT_GET_THREAD_NEXT: c_int = 16; -pub const PT_FIRSTMACH: c_int = 32; - -pub const SOCK_CLOEXEC: c_int = 0x8000; -pub const SOCK_NONBLOCK: c_int = 0x4000; -pub const SOCK_DNS: c_int = 0x1000; - -pub const BIOCGRSIG: c_ulong = 0x40044273; -pub const BIOCSRSIG: c_ulong = 0x80044272; -pub const BIOCSDLT: c_ulong = 0x8004427a; - -pub const PTRACE_FORK: c_int = 0x0002; - -pub const WCONTINUED: c_int = 0x08; -pub const WEXITED: c_int = 0x04; -pub const WSTOPPED: c_int = 0x02; // same as WUNTRACED -pub const WNOWAIT: c_int = 0x10; -pub const WTRAPPED: c_int = 0x20; - -pub const P_ALL: crate::idtype_t = 0; -pub const P_PGID: crate::idtype_t = 1; -pub const P_PID: crate::idtype_t = 2; - -// search.h -pub const FIND: crate::ACTION = 0; -pub const ENTER: crate::ACTION = 1; - -// futex.h -pub const FUTEX_WAIT: c_int = 1; -pub const FUTEX_WAKE: c_int = 2; -pub const FUTEX_REQUEUE: c_int = 3; -pub const FUTEX_PRIVATE_FLAG: c_int = 128; - -// sysctl.h, kinfo_proc p_eflag constants -pub const EPROC_CTTY: i32 = 0x01; // controlling tty vnode active -pub const EPROC_SLEADER: i32 = 0x02; // session leader -pub const EPROC_UNVEIL: i32 = 0x04; // has unveil settings -pub const EPROC_LKUNVEIL: i32 = 0x08; // unveil is locked - -// Flags for chflags(2) -pub const UF_SETTABLE: c_uint = 0x0000ffff; -pub const UF_NODUMP: c_uint = 0x00000001; -pub const UF_IMMUTABLE: c_uint = 0x00000002; -pub const UF_APPEND: c_uint = 0x00000004; -pub const UF_OPAQUE: c_uint = 0x00000008; -pub const SF_SETTABLE: c_uint = 0xffff0000; -pub const SF_ARCHIVED: c_uint = 0x00010000; -pub const SF_IMMUTABLE: c_uint = 0x00020000; -pub const SF_APPEND: c_uint = 0x00040000; - -// sys/exec_elf.h - Legal values for p_type (segment type). -pub const PT_NULL: u32 = 0; -pub const PT_LOAD: u32 = 1; -pub const PT_DYNAMIC: u32 = 2; -pub const PT_INTERP: u32 = 3; -pub const PT_NOTE: u32 = 4; -pub const PT_SHLIB: u32 = 5; -pub const PT_PHDR: u32 = 6; -pub const PT_TLS: u32 = 7; -pub const PT_LOOS: u32 = 0x60000000; -pub const PT_HIOS: u32 = 0x6fffffff; -pub const PT_LOPROC: u32 = 0x70000000; -pub const PT_HIPROC: u32 = 0x7fffffff; - -pub const PT_GNU_EH_FRAME: u32 = 0x6474e550; -pub const PT_GNU_RELRO: u32 = 0x6474e552; - -// sys/exec_elf.h - Legal values for p_flags (segment flags). -pub const PF_X: u32 = 0x1; -pub const PF_W: u32 = 0x2; -pub const PF_R: u32 = 0x4; -pub const PF_MASKOS: u32 = 0x0ff00000; -pub const PF_MASKPROC: u32 = 0xf0000000; - -// sys/ioccom.h -pub const fn IOCPARM_LEN(x: u32) -> u32 { - (x >> 16) & crate::IOCPARM_MASK -} - -pub const fn IOCBASECMD(x: u32) -> u32 { - x & (!(crate::IOCPARM_MASK << 16)) -} - -pub const fn IOCGROUP(x: u32) -> u32 { - (x >> 8) & 0xff -} - -pub const fn _IOC(inout: c_ulong, group: c_ulong, num: c_ulong, len: c_ulong) -> c_ulong { - (inout) | (((len) & crate::IOCPARM_MASK as c_ulong) << 16) | ((group) << 8) | (num) -} - -// sys/mount.h -pub const MNT_NOPERM: c_int = 0x00000020; -pub const MNT_WXALLOWED: c_int = 0x00000800; -pub const MNT_EXRDONLY: c_int = 0x00000080; -pub const MNT_DEFEXPORTED: c_int = 0x00000200; -pub const MNT_EXPORTANON: c_int = 0x00000400; -pub const MNT_ROOTFS: c_int = 0x00004000; -pub const MNT_NOATIME: c_int = 0x00008000; -pub const MNT_DELEXPORT: c_int = 0x00020000; -pub const MNT_STALLED: c_int = 0x00100000; -pub const MNT_SWAPPABLE: c_int = 0x00200000; -pub const MNT_WANTRDWR: c_int = 0x02000000; -pub const MNT_SOFTDEP: c_int = 0x04000000; -pub const MNT_DOOMED: c_int = 0x08000000; - -// For use with vfs_fsync and getfsstat -pub const MNT_WAIT: c_int = 1; -pub const MNT_NOWAIT: c_int = 2; -pub const MNT_LAZY: c_int = 3; - -// sys/_time.h -pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 2; -pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 4; -pub const CLOCK_UPTIME: crate::clockid_t = 5; -pub const CLOCK_BOOTTIME: crate::clockid_t = 6; - -pub const LC_COLLATE_MASK: c_int = 1 << crate::LC_COLLATE; -pub const LC_CTYPE_MASK: c_int = 1 << crate::LC_CTYPE; -pub const LC_MONETARY_MASK: c_int = 1 << crate::LC_MONETARY; -pub const LC_NUMERIC_MASK: c_int = 1 << crate::LC_NUMERIC; -pub const LC_TIME_MASK: c_int = 1 << crate::LC_TIME; -pub const LC_MESSAGES_MASK: c_int = 1 << crate::LC_MESSAGES; - -const _LC_LAST: c_int = 7; -pub const LC_ALL_MASK: c_int = (1 << _LC_LAST) - 2; - -pub const LC_GLOBAL_LOCALE: crate::locale_t = -1isize as crate::locale_t; - -// sys/reboot.h -pub const RB_ASKNAME: c_int = 0x00001; -pub const RB_SINGLE: c_int = 0x00002; -pub const RB_NOSYNC: c_int = 0x00004; -pub const RB_HALT: c_int = 0x00008; -pub const RB_INITNAME: c_int = 0x00010; -pub const RB_KDB: c_int = 0x00040; -pub const RB_RDONLY: c_int = 0x00080; -pub const RB_DUMP: c_int = 0x00100; -pub const RB_MINIROOT: c_int = 0x00200; -pub const RB_CONFIG: c_int = 0x00400; -pub const RB_TIMEBAD: c_int = 0x00800; -pub const RB_POWERDOWN: c_int = 0x01000; -pub const RB_SERCONS: c_int = 0x02000; -pub const RB_USERREQ: c_int = 0x04000; -pub const RB_RESET: c_int = 0x08000; -pub const RB_GOODRANDOM: c_int = 0x10000; -pub const RB_UNHIBERNATE: c_int = 0x20000; - -// net/route.h -pub const RTF_CLONING: c_int = 0x100; -pub const RTF_MULTICAST: c_int = 0x200; -pub const RTF_LLINFO: c_int = 0x400; -pub const RTF_PROTO3: c_int = 0x2000; -pub const RTF_ANNOUNCE: c_int = crate::RTF_PROTO2; - -pub const RTF_CLONED: c_int = 0x10000; -pub const RTF_CACHED: c_int = 0x20000; -pub const RTF_MPATH: c_int = 0x40000; -pub const RTF_MPLS: c_int = 0x100000; -pub const RTF_LOCAL: c_int = 0x200000; -pub const RTF_BROADCAST: c_int = 0x400000; -pub const RTF_CONNECTED: c_int = 0x800000; -pub const RTF_BFD: c_int = 0x1000000; -pub const RTF_FMASK: c_int = crate::RTF_LLINFO - | crate::RTF_PROTO1 - | crate::RTF_PROTO2 - | crate::RTF_PROTO3 - | crate::RTF_BLACKHOLE - | crate::RTF_REJECT - | crate::RTF_STATIC - | crate::RTF_MPLS - | crate::RTF_BFD; - -pub const RTM_VERSION: c_int = 5; -pub const RTM_RESOLVE: c_int = 0xb; -pub const RTM_NEWADDR: c_int = 0xc; -pub const RTM_DELADDR: c_int = 0xd; -pub const RTM_IFINFO: c_int = 0xe; -pub const RTM_IFANNOUNCE: c_int = 0xf; -pub const RTM_DESYNC: c_int = 0x10; -pub const RTM_INVALIDATE: c_int = 0x11; -pub const RTM_BFD: c_int = 0x12; -pub const RTM_PROPOSAL: c_int = 0x13; -pub const RTM_CHGADDRATTR: c_int = 0x14; -pub const RTM_80211INFO: c_int = 0x15; -pub const RTM_SOURCE: c_int = 0x16; - -pub const RTA_SRC: c_int = 0x100; -pub const RTA_SRCMASK: c_int = 0x200; -pub const RTA_LABEL: c_int = 0x400; -pub const RTA_BFD: c_int = 0x800; -pub const RTA_DNS: c_int = 0x1000; -pub const RTA_STATIC: c_int = 0x2000; -pub const RTA_SEARCH: c_int = 0x4000; - -pub const RTAX_SRC: c_int = 8; -pub const RTAX_SRCMASK: c_int = 9; -pub const RTAX_LABEL: c_int = 10; -pub const RTAX_BFD: c_int = 11; -pub const RTAX_DNS: c_int = 12; -pub const RTAX_STATIC: c_int = 13; -pub const RTAX_SEARCH: c_int = 14; -pub const RTAX_MAX: c_int = 15; - -const fn _ALIGN(p: usize) -> usize { - (p + _ALIGNBYTES) & !_ALIGNBYTES -} - -f! { - pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { - (cmsg as *mut c_uchar).offset(_ALIGN(size_of::()) as isize) - } - - pub const fn CMSG_LEN(length: c_uint) -> c_uint { - _ALIGN(size_of::()) as c_uint + length - } - - pub fn CMSG_NXTHDR(mhdr: *const crate::msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - if cmsg.is_null() { - return crate::CMSG_FIRSTHDR(mhdr); - } - let next = cmsg as usize + _ALIGN((*cmsg).cmsg_len as usize) + _ALIGN(size_of::()); - let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; - if next > max { - core::ptr::null_mut::() - } else { - (cmsg as usize + _ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr - } - } - - pub const fn CMSG_SPACE(length: c_uint) -> c_uint { - (_ALIGN(size_of::()) + _ALIGN(length as usize)) as c_uint - } -} - -safe_f! { - pub const fn WSTOPSIG(status: c_int) -> c_int { - status >> 8 - } - - pub const fn WIFSIGNALED(status: c_int) -> bool { - (status & 0o177) != 0o177 && (status & 0o177) != 0 - } - - pub const fn WIFSTOPPED(status: c_int) -> bool { - (status & 0xff) == 0o177 - } - - pub const fn WIFCONTINUED(status: c_int) -> bool { - (status & 0o177777) == 0o177777 - } - - pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { - let major = major as crate::dev_t; - let minor = minor as crate::dev_t; - let mut dev = 0; - dev |= (major & 0xff) << 8; - dev |= minor & 0xff; - dev |= (minor & 0xffff00) << 8; - dev - } - - pub const fn major(dev: crate::dev_t) -> c_uint { - ((dev as c_uint) >> 8) & 0xff - } - - pub const fn minor(dev: crate::dev_t) -> c_uint { - let dev = dev as c_uint; - let mut res = 0; - res |= (dev) & 0xff; - res |= ((dev) & 0xffff0000) >> 8; - res - } -} - -extern "C" { - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut crate::timezone) -> c_int; - pub fn settimeofday(tp: *const crate::timeval, tz: *const crate::timezone) -> c_int; - pub fn pledge(promises: *const c_char, execpromises: *const c_char) -> c_int; - pub fn unveil(path: *const c_char, permissions: *const c_char) -> c_int; - pub fn strtonum( - nptr: *const c_char, - minval: c_longlong, - maxval: c_longlong, - errstr: *mut *const c_char, - ) -> c_longlong; - pub fn dup3(src: c_int, dst: c_int, flags: c_int) -> c_int; - pub fn chflags(path: *const c_char, flags: c_uint) -> c_int; - pub fn fchflags(fd: c_int, flags: c_uint) -> c_int; - pub fn chflagsat(fd: c_int, path: *const c_char, flags: c_uint, atflag: c_int) -> c_int; - pub fn dirfd(dirp: *mut crate::DIR) -> c_int; - pub fn getnameinfo( - sa: *const crate::sockaddr, - salen: crate::socklen_t, - host: *mut c_char, - hostlen: size_t, - serv: *mut c_char, - servlen: size_t, - flags: c_int, - ) -> c_int; - pub fn getresgid( - rgid: *mut crate::gid_t, - egid: *mut crate::gid_t, - sgid: *mut crate::gid_t, - ) -> c_int; - pub fn getresuid( - ruid: *mut crate::uid_t, - euid: *mut crate::uid_t, - suid: *mut crate::uid_t, - ) -> c_int; - pub fn kevent( - kq: c_int, - changelist: *const crate::kevent, - nchanges: c_int, - eventlist: *mut crate::kevent, - nevents: c_int, - timeout: *const crate::timespec, - ) -> c_int; - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn getthrid() -> crate::pid_t; - pub fn pthread_attr_getguardsize( - attr: *const crate::pthread_attr_t, - guardsize: *mut size_t, - ) -> c_int; - pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; - pub fn pthread_attr_getstack( - attr: *const crate::pthread_attr_t, - stackaddr: *mut *mut c_void, - stacksize: *mut size_t, - ) -> c_int; - pub fn pthread_main_np() -> c_int; - pub fn pthread_get_name_np(tid: crate::pthread_t, name: *mut c_char, len: size_t); - pub fn pthread_set_name_np(tid: crate::pthread_t, name: *const c_char); - pub fn pthread_stackseg_np(thread: crate::pthread_t, sinfo: *mut crate::stack_t) -> c_int; - - pub fn openpty( - amaster: *mut c_int, - aslave: *mut c_int, - name: *mut c_char, - termp: *const crate::termios, - winp: *const crate::winsize, - ) -> c_int; - pub fn forkpty( - amaster: *mut c_int, - name: *mut c_char, - termp: *const crate::termios, - winp: *const crate::winsize, - ) -> crate::pid_t; - - pub fn sysctl( - name: *const c_int, - namelen: c_uint, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *mut c_void, - newlen: size_t, - ) -> c_int; - pub fn setresgid(rgid: crate::gid_t, egid: crate::gid_t, sgid: crate::gid_t) -> c_int; - pub fn setresuid(ruid: crate::uid_t, euid: crate::uid_t, suid: crate::uid_t) -> c_int; - pub fn ptrace(request: c_int, pid: crate::pid_t, addr: caddr_t, data: c_int) -> c_int; - pub fn utrace(label: *const c_char, addr: *const c_void, len: size_t) -> c_int; - pub fn memmem( - haystack: *const c_void, - haystacklen: size_t, - needle: *const c_void, - needlelen: size_t, - ) -> *mut c_void; - // #include - pub fn dl_iterate_phdr( - callback: Option< - unsafe extern "C" fn(info: *mut dl_phdr_info, size: usize, data: *mut c_void) -> c_int, - >, - data: *mut c_void, - ) -> c_int; - pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; - pub fn freelocale(loc: crate::locale_t); - pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; - pub fn duplocale(base: crate::locale_t) -> crate::locale_t; - - // Added in `OpenBSD` 5.5 - pub fn explicit_bzero(s: *mut c_void, len: size_t); - - pub fn setproctitle(fmt: *const c_char, ...); - - pub fn freezero(ptr: *mut c_void, size: size_t); - pub fn malloc_conceal(size: size_t) -> *mut c_void; - pub fn calloc_conceal(nmemb: size_t, size: size_t) -> *mut c_void; - - pub fn srand48_deterministic(seed: c_long); - pub fn seed48_deterministic(xseed: *mut c_ushort) -> *mut c_ushort; - pub fn lcong48_deterministic(p: *mut c_ushort); - - pub fn lsearch( - key: *const c_void, - base: *mut c_void, - nelp: *mut size_t, - width: size_t, - compar: Option c_int>, - ) -> *mut c_void; - pub fn lfind( - key: *const c_void, - base: *const c_void, - nelp: *mut size_t, - width: size_t, - compar: Option c_int>, - ) -> *mut c_void; - pub fn hcreate(nelt: size_t) -> c_int; - pub fn hdestroy(); - pub fn hsearch(entry: crate::ENTRY, action: crate::ACTION) -> *mut crate::ENTRY; - - // futex.h - pub fn futex( - uaddr: *mut u32, - op: c_int, - val: c_int, - timeout: *const crate::timespec, - uaddr2: *mut u32, - ) -> c_int; - - pub fn mimmutable(addr: *mut c_void, len: size_t) -> c_int; - - pub fn reboot(mode: c_int) -> c_int; - - pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; - pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; - pub fn getmntinfo(mntbufp: *mut *mut crate::statfs, flags: c_int) -> c_int; - pub fn getfsstat(buf: *mut statfs, bufsize: size_t, flags: c_int) -> c_int; - - pub fn elf_aux_info(aux: c_int, buf: *mut c_void, buflen: c_int) -> c_int; -} - -#[link(name = "execinfo")] -extern "C" { - pub fn backtrace(addrlist: *mut *mut c_void, len: size_t) -> size_t; - pub fn backtrace_symbols(addrlist: *const *mut c_void, len: size_t) -> *mut *mut c_char; - pub fn backtrace_symbols_fd(addrlist: *const *mut c_void, len: size_t, fd: c_int) -> c_int; - pub fn backtrace_symbols_fmt( - addrlist: *const *mut c_void, - len: size_t, - fmt: *const c_char, - ) -> *mut *mut c_char; -} - -cfg_if! { - if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else if #[cfg(target_arch = "arm")] { - mod arm; - pub use self::arm::*; - } else if #[cfg(target_arch = "mips64")] { - mod mips64; - pub use self::mips64::*; - } else if #[cfg(target_arch = "powerpc")] { - mod powerpc; - pub use self::powerpc::*; - } else if #[cfg(target_arch = "powerpc64")] { - mod powerpc64; - pub use self::powerpc64::*; - } else if #[cfg(target_arch = "riscv64")] { - mod riscv64; - pub use self::riscv64::*; - } else if #[cfg(target_arch = "sparc64")] { - mod sparc64; - pub use self::sparc64::*; - } else if #[cfg(target_arch = "x86")] { - mod x86; - pub use self::x86::*; - } else if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc.rs deleted file mode 100644 index 8b3f72139d86e9..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc.rs +++ /dev/null @@ -1,5 +0,0 @@ -use crate::prelude::*; - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const _MAX_PAGE_SHIFT: u32 = 12; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc64.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc64.rs deleted file mode 100644 index 5ebe85741454ee..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/powerpc64.rs +++ /dev/null @@ -1,5 +0,0 @@ -use crate::prelude::*; - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const _MAX_PAGE_SHIFT: u32 = 12; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/riscv64.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/riscv64.rs deleted file mode 100644 index 3545763d12c540..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/riscv64.rs +++ /dev/null @@ -1,25 +0,0 @@ -use crate::prelude::*; - -pub type ucontext_t = sigcontext; - -s! { - pub struct sigcontext { - __sc_unused: c_int, - pub sc_mask: c_int, - pub sc_ra: c_long, - pub sc_sp: c_long, - pub sc_gp: c_long, - pub sc_tp: c_long, - pub sc_t: [c_long; 7], - pub sc_s: [c_long; 12], - pub sc_a: [c_long; 8], - pub sc_sepc: c_long, - pub sc_f: [c_long; 32], - pub sc_fcsr: c_long, - pub sc_cookie: c_long, - } -} - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const _MAX_PAGE_SHIFT: u32 = 12; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/sparc64.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/sparc64.rs deleted file mode 100644 index 88481f4f014e81..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/sparc64.rs +++ /dev/null @@ -1,4 +0,0 @@ -#[doc(hidden)] -pub const _ALIGNBYTES: usize = 0xf; - -pub const _MAX_PAGE_SHIFT: u32 = 13; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86.rs deleted file mode 100644 index 97dc58327d2226..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86.rs +++ /dev/null @@ -1,5 +0,0 @@ -use crate::prelude::*; - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const _MAX_PAGE_SHIFT: u32 = 12; diff --git a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86_64.rs b/vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86_64.rs deleted file mode 100644 index 984570c3870130..00000000000000 --- a/vendor/libc/src/unix/bsd/netbsdlike/openbsd/x86_64.rs +++ /dev/null @@ -1,109 +0,0 @@ -use crate::prelude::*; -use crate::PT_FIRSTMACH; - -pub type ucontext_t = sigcontext; - -s! { - pub struct sigcontext { - pub sc_rdi: c_long, - pub sc_rsi: c_long, - pub sc_rdx: c_long, - pub sc_rcx: c_long, - pub sc_r8: c_long, - pub sc_r9: c_long, - pub sc_r10: c_long, - pub sc_r11: c_long, - pub sc_r12: c_long, - pub sc_r13: c_long, - pub sc_r14: c_long, - pub sc_r15: c_long, - pub sc_rbp: c_long, - pub sc_rbx: c_long, - pub sc_rax: c_long, - pub sc_gs: c_long, - pub sc_fs: c_long, - pub sc_es: c_long, - pub sc_ds: c_long, - pub sc_trapno: c_long, - pub sc_err: c_long, - pub sc_rip: c_long, - pub sc_cs: c_long, - pub sc_rflags: c_long, - pub sc_rsp: c_long, - pub sc_ss: c_long, - pub sc_fpstate: *mut fxsave64, - __sc_unused: c_int, - pub sc_mask: c_int, - pub sc_cookie: c_long, - } -} - -s_no_extra_traits! { - #[repr(packed)] - pub struct fxsave64 { - pub fx_fcw: u16, - pub fx_fsw: u16, - pub fx_ftw: u8, - __fx_unused1: u8, - pub fx_fop: u16, - pub fx_rip: u64, - pub fx_rdp: u64, - pub fx_mxcsr: u32, - pub fx_mxcsr_mask: u32, - pub fx_st: [[u64; 2]; 8], - pub fx_xmm: [[u64; 2]; 16], - __fx_unused3: [u8; 96], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - // `fxsave64` is packed, so field access is unaligned. - // use {x} to create temporary storage, copy field to it, and do aligned access. - impl PartialEq for fxsave64 { - fn eq(&self, other: &fxsave64) -> bool { - return { self.fx_fcw } == { other.fx_fcw } - && { self.fx_fsw } == { other.fx_fsw } - && { self.fx_ftw } == { other.fx_ftw } - && { self.fx_fop } == { other.fx_fop } - && { self.fx_rip } == { other.fx_rip } - && { self.fx_rdp } == { other.fx_rdp } - && { self.fx_mxcsr } == { other.fx_mxcsr } - && { self.fx_mxcsr_mask } == { other.fx_mxcsr_mask } - && { self.fx_st } - .iter() - .zip({ other.fx_st }.iter()) - .all(|(a, b)| a == b) - && { self.fx_xmm } - .iter() - .zip({ other.fx_xmm }.iter()) - .all(|(a, b)| a == b); - } - } - impl Eq for fxsave64 {} - impl hash::Hash for fxsave64 { - fn hash(&self, state: &mut H) { - { self.fx_fcw }.hash(state); - { self.fx_fsw }.hash(state); - { self.fx_ftw }.hash(state); - { self.fx_fop }.hash(state); - { self.fx_rip }.hash(state); - { self.fx_rdp }.hash(state); - { self.fx_mxcsr }.hash(state); - { self.fx_mxcsr_mask }.hash(state); - { self.fx_st }.hash(state); - { self.fx_xmm }.hash(state); - } - } - } -} - -pub(crate) const _ALIGNBYTES: usize = size_of::() - 1; - -pub const _MAX_PAGE_SHIFT: u32 = 12; - -pub const PT_STEP: c_int = PT_FIRSTMACH + 0; -pub const PT_GETREGS: c_int = PT_FIRSTMACH + 1; -pub const PT_SETREGS: c_int = PT_FIRSTMACH + 2; -pub const PT_GETFPREGS: c_int = PT_FIRSTMACH + 3; -pub const PT_SETFPREGS: c_int = PT_FIRSTMACH + 4; diff --git a/vendor/libc/src/unix/cygwin/mod.rs b/vendor/libc/src/unix/cygwin/mod.rs deleted file mode 100644 index 12e30f3f9016c5..00000000000000 --- a/vendor/libc/src/unix/cygwin/mod.rs +++ /dev/null @@ -1,2477 +0,0 @@ -use crate::prelude::*; -use crate::*; - -pub type wchar_t = c_ushort; - -pub type blkcnt_t = i64; -pub type blksize_t = i32; -pub type dev_t = u32; -pub type fsblkcnt_t = c_ulong; -pub type fsfilcnt_t = c_ulong; -pub type ino_t = u64; -pub type key_t = c_longlong; -pub type sa_family_t = u16; -pub type socklen_t = c_int; - -pub type off_t = c_long; -pub type id_t = u32; -pub type mode_t = u32; -pub type _off64_t = c_longlong; -pub type loff_t = _off64_t; -pub type iconv_t = *mut c_void; -pub type clock_t = c_ulong; -pub type time_t = c_long; -pub type clockid_t = c_ulong; -pub type timer_t = c_ulong; -pub type nl_item = c_int; -pub type nlink_t = c_ushort; -pub type suseconds_t = c_long; -pub type useconds_t = c_ulong; - -#[derive(Debug)] -pub enum timezone {} -impl Copy for timezone {} -impl Clone for timezone { - fn clone(&self) -> timezone { - *self - } -} - -pub type sigset_t = c_ulong; - -pub type fd_mask = c_ulong; - -pub type pthread_t = *mut c_void; -pub type pthread_mutex_t = *mut c_void; - -// Must be usize due to libstd/sys_common/thread_local.rs, -// should technically be *mut c_void -pub type pthread_key_t = usize; - -pub type pthread_attr_t = *mut c_void; -pub type pthread_mutexattr_t = *mut c_void; -pub type pthread_condattr_t = *mut c_void; -pub type pthread_cond_t = *mut c_void; - -// The following ones should be *mut c_void -pub type pthread_barrierattr_t = usize; -pub type pthread_barrier_t = usize; -pub type pthread_spinlock_t = usize; - -pub type pthread_rwlock_t = *mut c_void; -pub type pthread_rwlockattr_t = *mut c_void; - -pub type register_t = intptr_t; -pub type u_char = c_uchar; -pub type u_short = c_ushort; -pub type u_long = c_ulong; -pub type u_int = c_uint; -pub type caddr_t = *mut c_char; -pub type vm_size_t = c_ulong; - -pub type rlim_t = c_ulong; - -pub type nfds_t = c_uint; - -pub type sem_t = *mut sem; - -#[derive(Debug)] -pub enum sem {} -impl Copy for sem {} -impl Clone for sem { - fn clone(&self) -> sem { - *self - } -} - -pub type tcflag_t = c_uint; -pub type speed_t = c_uint; - -pub type vm_offset_t = c_ulong; - -pub type posix_spawn_file_actions_t = *mut c_void; -pub type posix_spawnattr_t = *mut c_void; - -s! { - pub struct itimerspec { - pub it_interval: timespec, - pub it_value: timespec, - } - - pub struct cpu_set_t { - bits: [u64; 16], - } - - pub struct sigaction { - pub sa_sigaction: sighandler_t, - pub sa_mask: sigset_t, - pub sa_flags: c_int, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - pub tm_gmtoff: c_long, - pub tm_zone: *const c_char, - } - - pub struct bintime { - pub sec: time_t, - pub frac: u64, - } - - pub struct passwd { - pub pw_name: *mut c_char, - pub pw_passwd: *mut c_char, - pub pw_uid: uid_t, - pub pw_gid: gid_t, - pub pw_comment: *mut c_char, - pub pw_gecos: *mut c_char, - pub pw_dir: *mut c_char, - pub pw_shell: *mut c_char, - } - - pub struct if_nameindex { - pub if_index: c_uint, - pub if_name: *mut c_char, - } - - pub struct ucred { - pub pid: pid_t, - pub uid: uid_t, - pub gid: gid_t, - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: socklen_t, - pub msg_iov: *mut iovec, - pub msg_iovlen: c_int, - pub msg_control: *mut c_void, - pub msg_controllen: socklen_t, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - pub cmsg_len: size_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct Dl_info { - pub dli_fname: [c_char; PATH_MAX as usize], - pub dli_fbase: *mut c_void, - pub dli_sname: *const c_char, - pub dli_saddr: *mut c_void, - } - - pub struct in6_pktinfo { - pub ipi6_addr: in6_addr, - pub ipi6_ifindex: u32, - } - - pub struct sockaddr_in6 { - pub sin6_family: sa_family_t, - pub sin6_port: in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: in6_addr, - pub sin6_scope_id: u32, - } - - pub struct ip_mreq_source { - pub imr_multiaddr: in_addr, - pub imr_sourceaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - pub ai_addrlen: socklen_t, - pub ai_canonname: *mut c_char, - pub ai_addr: *mut sockaddr, - pub ai_next: *mut addrinfo, - } - - pub struct lconv { - pub decimal_point: *mut c_char, - pub thousands_sep: *mut c_char, - pub grouping: *mut c_char, - pub int_curr_symbol: *mut c_char, - pub currency_symbol: *mut c_char, - pub mon_decimal_point: *mut c_char, - pub mon_thousands_sep: *mut c_char, - pub mon_grouping: *mut c_char, - pub positive_sign: *mut c_char, - pub negative_sign: *mut c_char, - pub int_frac_digits: c_char, - pub frac_digits: c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub p_sign_posn: c_char, - pub n_sign_posn: c_char, - pub int_n_cs_precedes: c_char, - pub int_n_sep_by_space: c_char, - pub int_n_sign_posn: c_char, - pub int_p_cs_precedes: c_char, - pub int_p_sep_by_space: c_char, - pub int_p_sign_posn: c_char, - } - - pub struct termios { - pub c_iflag: tcflag_t, - pub c_oflag: tcflag_t, - pub c_cflag: tcflag_t, - pub c_lflag: tcflag_t, - pub c_line: c_char, - pub c_cc: [cc_t; NCCS], - pub c_ispeed: speed_t, - pub c_ospeed: speed_t, - } - - pub struct sched_param { - pub sched_priority: c_int, - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: pid_t, - } - - pub struct hostent { - pub h_name: *const c_char, - pub h_aliases: *mut *mut c_char, - pub h_addrtype: c_short, - pub h_length: c_short, - pub h_addr_list: *mut *mut c_char, - } - - pub struct linger { - pub l_onoff: c_ushort, - pub l_linger: c_ushort, - } - - pub struct fd_set { - fds_bits: [fd_mask; FD_SETSIZE / size_of::() / 8], - } - - pub struct _uc_fpxreg { - pub significand: [u16; 4], - pub exponent: u16, - pub padding: [u16; 3], - } - - pub struct _uc_xmmreg { - pub element: [u32; 4], - } - - pub struct _fpstate { - pub cwd: u16, - pub swd: u16, - pub ftw: u16, - pub fop: u16, - pub rip: u64, - pub rdp: u64, - pub mxcsr: u32, - pub mxcr_mask: u32, - pub st: [_uc_fpxreg; 8], - pub xmm: [_uc_xmmreg; 16], - pub padding: [u32; 24], - } - - #[repr(align(16))] - pub struct mcontext_t { - pub p1home: u64, - pub p2home: u64, - pub p3home: u64, - pub p4home: u64, - pub p5home: u64, - pub p6home: u64, - pub ctxflags: u32, - pub mxcsr: u32, - pub cs: u16, - pub ds: u16, - pub es: u16, - pub fs: u16, - pub gs: u16, - pub ss: u16, - pub eflags: u32, - pub dr0: u64, - pub dr1: u64, - pub dr2: u64, - pub dr3: u64, - pub dr6: u64, - pub dr7: u64, - pub rax: u64, - pub rcx: u64, - pub rdx: u64, - pub rbx: u64, - pub rsp: u64, - pub rbp: u64, - pub rsi: u64, - pub rdi: u64, - pub r8: u64, - pub r9: u64, - pub r10: u64, - pub r11: u64, - pub r12: u64, - pub r13: u64, - pub r14: u64, - pub r15: u64, - pub rip: u64, - pub fpregs: _fpstate, - pub vregs: [u64; 52], - pub vcx: u64, - pub dbc: u64, - pub btr: u64, - pub bfr: u64, - pub etr: u64, - pub efr: u64, - pub oldmask: u64, - pub cr2: u64, - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigevent { - pub sigev_value: sigval, - pub sigev_signo: c_int, - pub sigev_notify: c_int, - pub sigev_notify_function: Option, - pub sigev_notify_attributes: *mut pthread_attr_t, - } - - #[repr(align(8))] - pub struct ucontext_t { - pub uc_mcontext: mcontext_t, - pub uc_link: *mut ucontext_t, - pub uc_sigmask: sigset_t, - pub uc_stack: stack_t, - pub uc_flags: c_ulong, - } - - pub struct sockaddr { - pub sa_family: sa_family_t, - pub sa_data: [c_char; 14], - } - - pub struct sockaddr_storage { - pub ss_family: sa_family_t, - __ss_pad1: [c_char; 6], - __ss_align: i64, - __ss_pad2: [c_char; 112], - } - - pub struct stat { - pub st_dev: dev_t, - pub st_ino: ino_t, - pub st_mode: mode_t, - pub st_nlink: nlink_t, - pub st_uid: uid_t, - pub st_gid: gid_t, - pub st_rdev: dev_t, - pub st_size: off_t, - pub st_atime: time_t, - pub st_atime_nsec: c_long, - pub st_mtime: time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: blksize_t, - pub st_blocks: blkcnt_t, - pub st_birthtime: time_t, - pub st_birthtime_nsec: c_long, - } - - pub struct in_addr { - pub s_addr: in_addr_t, - } - - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct in_pktinfo { - pub ipi_addr: in_addr, - pub ipi_ifindex: u32, - } - - pub struct sockaddr_in { - pub sin_family: sa_family_t, - pub sin_port: in_port_t, - pub sin_addr: in_addr, - pub sin_zero: [u8; 8], - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: fsblkcnt_t, - pub f_bfree: fsblkcnt_t, - pub f_bavail: fsblkcnt_t, - pub f_files: fsfilcnt_t, - pub f_ffree: fsfilcnt_t, - pub f_favail: fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - } - - pub struct statfs { - pub f_type: c_long, - pub f_bsize: c_long, - pub f_blocks: c_long, - pub f_bfree: c_long, - pub f_bavail: c_long, - pub f_files: c_long, - pub f_ffree: c_long, - pub f_fsid: c_long, - pub f_namelen: c_long, - pub f_spare: [c_long; 6], - } -} - -s_no_extra_traits! { - #[repr(align(16))] - pub struct max_align_t { - priv_: [f64; 4], - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_code: c_int, - pub si_pid: pid_t, - pub si_uid: uid_t, - pub si_errno: c_int, - __pad: [u32; 32], - } - - pub union __c_anonymous_ifr_ifru { - pub ifru_addr: sockaddr, - pub ifru_broadaddr: sockaddr, - pub ifru_dstaddr: sockaddr, - pub ifru_netmask: sockaddr, - pub ifru_hwaddr: sockaddr, - pub ifru_flags: c_int, - pub ifru_metric: c_int, - pub ifru_mtu: c_int, - pub ifru_ifindex: c_int, - pub ifru_data: *mut c_char, - __ifru_pad: [c_char; 28], - } - - pub struct ifreq { - /// if name, e.g. "en0" - pub ifr_name: [c_char; IFNAMSIZ], - pub ifr_ifru: __c_anonymous_ifr_ifru, - } - - pub union __c_anonymous_ifc_ifcu { - pub ifcu_buf: caddr_t, - pub ifcu_req: *mut ifreq, - } - - pub struct ifconf { - pub ifc_len: c_int, - pub ifc_ifcu: __c_anonymous_ifc_ifcu, - } - - pub struct dirent { - __d_version: u32, - pub d_ino: ino_t, - pub d_type: c_uchar, - __d_unused1: [c_uchar; 3], - __d_internal1: u32, - pub d_name: [c_char; 256], - } - - pub struct sockaddr_un { - pub sun_family: sa_family_t, - pub sun_path: [c_char; 108], - } - - pub struct utsname { - pub sysname: [c_char; 66], - pub nodename: [c_char; 65], - pub release: [c_char; 65], - pub version: [c_char; 65], - pub machine: [c_char; 65], - pub domainname: [c_char; 65], - } -} - -impl siginfo_t { - pub unsafe fn si_addr(&self) -> *mut c_void { - #[repr(C)] - struct siginfo_si_addr { - _si_signo: c_int, - _si_code: c_int, - _si_pid: pid_t, - _si_uid: uid_t, - _si_errno: c_int, - si_addr: *mut c_void, - } - (*(self as *const siginfo_t as *const siginfo_si_addr)).si_addr - } - - pub unsafe fn si_status(&self) -> c_int { - #[repr(C)] - struct siginfo_sigchld { - _si_signo: c_int, - _si_code: c_int, - _si_pid: pid_t, - _si_uid: uid_t, - _si_errno: c_int, - si_status: c_int, - } - (*(self as *const siginfo_t as *const siginfo_sigchld)).si_status - } - - pub unsafe fn si_pid(&self) -> pid_t { - self.si_pid - } - - pub unsafe fn si_uid(&self) -> uid_t { - self.si_uid - } - - pub unsafe fn si_value(&self) -> sigval { - #[repr(C)] - struct siginfo_si_value { - _si_signo: c_int, - _si_code: c_int, - _si_pid: pid_t, - _si_uid: uid_t, - _si_errno: c_int, - si_value: sigval, - } - (*(self as *const siginfo_t as *const siginfo_si_value)).si_value - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for siginfo_t { - fn eq(&self, other: &siginfo_t) -> bool { - self.si_signo == other.si_signo - && self.si_code == other.si_code - && self.si_pid == other.si_pid - && self.si_uid == other.si_uid - && self.si_errno == other.si_errno - } - } - - impl Eq for siginfo_t {} - - impl hash::Hash for siginfo_t { - fn hash(&self, state: &mut H) { - self.si_signo.hash(state); - self.si_code.hash(state); - self.si_pid.hash(state); - self.si_uid.hash(state); - self.si_errno.hash(state); - // Ignore __pad - } - } - - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_ino == other.d_ino - && self.d_type == other.d_type - && self - .d_name - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for dirent {} - - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_ino.hash(state); - self.d_type.hash(state); - self.d_name.hash(state); - } - } - - impl PartialEq for sockaddr_un { - fn eq(&self, other: &sockaddr_un) -> bool { - self.sun_family == other.sun_family - && self - .sun_path - .iter() - .zip(other.sun_path.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for sockaddr_un {} - - impl hash::Hash for sockaddr_un { - fn hash(&self, state: &mut H) { - self.sun_family.hash(state); - self.sun_path.hash(state); - } - } - - impl PartialEq for utsname { - fn eq(&self, other: &utsname) -> bool { - self.sysname - .iter() - .zip(other.sysname.iter()) - .all(|(a, b)| a == b) - && self - .nodename - .iter() - .zip(other.nodename.iter()) - .all(|(a, b)| a == b) - && self - .release - .iter() - .zip(other.release.iter()) - .all(|(a, b)| a == b) - && self - .version - .iter() - .zip(other.version.iter()) - .all(|(a, b)| a == b) - && self - .machine - .iter() - .zip(other.machine.iter()) - .all(|(a, b)| a == b) - && self - .domainname - .iter() - .zip(other.domainname.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for utsname {} - - impl hash::Hash for utsname { - fn hash(&self, state: &mut H) { - self.sysname.hash(state); - self.nodename.hash(state); - self.release.hash(state); - self.version.hash(state); - self.machine.hash(state); - self.domainname.hash(state); - } - } - } -} - -pub const FD_SETSIZE: usize = 1024; - -pub const CPU_SETSIZE: c_int = 0x400; - -// si_code values for SIGBUS signal -pub const BUS_ADRALN: c_int = 25; -pub const BUS_ADRERR: c_int = 26; -pub const BUS_OBJERR: c_int = 27; - -// si_code values for SIGCHLD signal -pub const CLD_EXITED: c_int = 28; -pub const CLD_KILLED: c_int = 29; -pub const CLD_DUMPED: c_int = 30; -pub const CLD_TRAPPED: c_int = 31; -pub const CLD_STOPPED: c_int = 32; -pub const CLD_CONTINUED: c_int = 33; - -pub const SIGEV_SIGNAL: c_int = 0; -pub const SIGEV_NONE: c_int = 1; -pub const SIGEV_THREAD: c_int = 2; - -pub const SA_NOCLDSTOP: c_int = 0x00000001; -pub const SA_NOCLDWAIT: c_int = 0; // FIXME: does not exist on Cygwin! -pub const SA_SIGINFO: c_int = 0x00000002; -pub const SA_RESTART: c_int = 0x10000000; -pub const SA_ONSTACK: c_int = 0x20000000; -pub const SA_NODEFER: c_int = 0x40000000; -pub const SA_RESETHAND: c_int = 0x80000000; -pub const MINSIGSTKSZ: size_t = 8192; -pub const SIGSTKSZ: size_t = 32768; -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGTRAP: c_int = 5; -pub const SIGABRT: c_int = 6; -pub const SIGEMT: c_int = 7; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGBUS: c_int = 10; -pub const SIGSEGV: c_int = 11; -pub const SIGSYS: c_int = 12; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; -pub const SIGURG: c_int = 16; -pub const SIGSTOP: c_int = 17; -pub const SIGTSTP: c_int = 18; -pub const SIGCONT: c_int = 19; -pub const SIGCHLD: c_int = 20; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGIO: c_int = 23; -pub const SIGPOLL: c_int = 23; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGPWR: c_int = 29; -pub const SIGUSR1: c_int = 30; -pub const SIGUSR2: c_int = 31; - -pub const SS_ONSTACK: c_int = 0x1; -pub const SS_DISABLE: c_int = 0x2; - -pub const SIG_SETMASK: c_int = 0; -pub const SIG_BLOCK: c_int = 1; -pub const SIG_UNBLOCK: c_int = 2; - -pub const TIMER_ABSTIME: c_int = 4; -pub const CLOCK_REALTIME_COARSE: clockid_t = 0; -pub const CLOCK_REALTIME: clockid_t = 1; -pub const CLOCK_PROCESS_CPUTIME_ID: clockid_t = 2; -pub const CLOCK_THREAD_CPUTIME_ID: clockid_t = 3; -pub const CLOCK_MONOTONIC: clockid_t = 4; -pub const CLOCK_MONOTONIC_RAW: clockid_t = 5; -pub const CLOCK_MONOTONIC_COARSE: clockid_t = 6; -pub const CLOCK_BOOTTIME: clockid_t = 7; -pub const CLOCK_REALTIME_ALARM: clockid_t = 8; -pub const CLOCK_BOOTTIME_ALARM: clockid_t = 9; - -pub const ITIMER_REAL: c_int = 0; -pub const ITIMER_VIRTUAL: c_int = 1; -pub const ITIMER_PROF: c_int = 2; - -pub const PRIO_PROCESS: c_int = 0; -pub const PRIO_PGRP: c_int = 1; -pub const PRIO_USER: c_int = 2; -pub const RLIMIT_CPU: c_int = 0; -pub const RLIMIT_FSIZE: c_int = 1; -pub const RLIMIT_DATA: c_int = 2; -pub const RLIMIT_STACK: c_int = 3; -pub const RLIMIT_CORE: c_int = 4; -pub const RLIMIT_NOFILE: c_int = 5; -pub const RLIMIT_AS: c_int = 6; -pub const RLIM_NLIMITS: c_int = 7; -pub const RLIMIT_NLIMITS: c_int = RLIM_NLIMITS; -pub const RLIM_INFINITY: rlim_t = !0; -pub const RLIM_SAVED_MAX: rlim_t = RLIM_INFINITY; -pub const RLIM_SAVED_CUR: rlim_t = RLIM_INFINITY; - -pub const RUSAGE_SELF: c_int = 0; -pub const RUSAGE_CHILDREN: c_int = -1; - -pub const IFF_UP: c_int = 0x1; // interface is up -pub const IFF_BROADCAST: c_int = 0x2; // broadcast address valid -pub const IFF_LOOPBACK: c_int = 0x8; // is a loopback net -pub const IFF_POINTOPOINT: c_int = 0x10; // interface is point-to-point link -pub const IFF_NOTRAILERS: c_int = 0x20; // avoid use of trailers -pub const IFF_RUNNING: c_int = 0x40; // resources allocated -pub const IFF_NOARP: c_int = 0x80; // no address resolution protocol -pub const IFF_PROMISC: c_int = 0x100; // receive all packets -pub const IFF_MULTICAST: c_int = 0x1000; // supports multicast -pub const IFF_LOWER_UP: c_int = 0x10000; // driver signals L1 up -pub const IFF_DORMANT: c_int = 0x20000; // driver signals dormant - -pub const IF_NAMESIZE: size_t = 44; -pub const IFNAMSIZ: size_t = IF_NAMESIZE; - -pub const FIONREAD: c_int = 0x4008667f; -pub const FIONBIO: c_int = 0x8004667e; -pub const FIOASYNC: c_int = 0x8008667d; -pub const FIOCLEX: c_int = 0; // FIXME: does not exist on Cygwin! -pub const SIOCGIFCONF: c_ulong = 0x80107364; -pub const SIOCGIFFLAGS: c_ulong = 0x80507365; -pub const SIOCGIFADDR: c_ulong = 0x80507366; -pub const SIOCGIFBRDADDR: c_ulong = 0x80507367; -pub const SIOCGIFNETMASK: c_ulong = 0x80507368; -pub const SIOCGIFHWADDR: c_ulong = 0x80507369; -pub const SIOCGIFMETRIC: c_ulong = 0x8050736a; -pub const SIOCGIFMTU: c_ulong = 0x8050736b; -pub const SIOCGIFINDEX: c_ulong = 0x8050736c; -pub const SIOGIFINDEX: c_ulong = SIOCGIFINDEX; -pub const SIOCGIFDSTADDR: c_ulong = 0x8050736e; -pub const SOL_SOCKET: c_int = 0xffff; -pub const SO_DEBUG: c_int = 1; -pub const SO_ACCEPTCONN: c_int = 0x0002; -pub const SO_REUSEADDR: c_int = 0x0004; -pub const SO_KEEPALIVE: c_int = 0x0008; -pub const SO_DONTROUTE: c_int = 0x0010; -pub const SO_BROADCAST: c_int = 0x0020; -pub const SO_USELOOPBACK: c_int = 0x0040; -pub const SO_LINGER: c_int = 0x0080; -pub const SO_OOBINLINE: c_int = 0x0100; -pub const SO_PEERCRED: c_int = 0x0200; -pub const SO_PASSCRED: c_int = 0x0400; -pub const SO_SNDBUF: c_int = 0x1001; -pub const SO_RCVBUF: c_int = 0x1002; -pub const SO_SNDLOWAT: c_int = 0x1003; -pub const SO_RCVLOWAT: c_int = 0x1004; -pub const SO_SNDTIMEO: c_int = 0x1005; -pub const SO_RCVTIMEO: c_int = 0x1006; -pub const SO_ERROR: c_int = 0x1007; -pub const SO_TYPE: c_int = 0x1008; - -pub const SCM_RIGHTS: c_int = 0x01; -pub const SCM_CREDENTIALS: c_int = 0x02; -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; -pub const SOCK_RAW: c_int = 3; -pub const SOCK_RDM: c_int = 4; -pub const SOCK_SEQPACKET: c_int = 5; -pub const SOCK_NONBLOCK: c_int = 0x01000000; -pub const SOCK_CLOEXEC: c_int = 0x02000000; -pub const AF_UNSPEC: c_int = 0; -pub const AF_LOCAL: c_int = 1; -pub const AF_UNIX: c_int = AF_LOCAL; -pub const AF_INET: c_int = 2; -pub const AF_IMPLINK: c_int = 3; -pub const AF_PUP: c_int = 4; -pub const AF_CHAOS: c_int = 5; -pub const AF_NS: c_int = 6; -pub const AF_ISO: c_int = 7; -pub const AF_OSI: c_int = AF_ISO; -pub const AF_ECMA: c_int = 8; -pub const AF_DATAKIT: c_int = 9; -pub const AF_CCITT: c_int = 10; -pub const AF_SNA: c_int = 11; -pub const AF_DECnet: c_int = 12; -pub const AF_DLI: c_int = 13; -pub const AF_LAT: c_int = 14; -pub const AF_HYLINK: c_int = 15; -pub const AF_APPLETALK: c_int = 16; -pub const AF_NETBIOS: c_int = 17; -pub const AF_INET6: c_int = 23; -pub const PF_UNSPEC: c_int = AF_UNSPEC; -pub const PF_LOCAL: c_int = AF_LOCAL; -pub const PF_UNIX: c_int = PF_LOCAL; -pub const PF_INET: c_int = AF_INET; -pub const PF_IMPLINK: c_int = AF_IMPLINK; -pub const PF_PUP: c_int = AF_PUP; -pub const PF_CHAOS: c_int = AF_CHAOS; -pub const PF_NS: c_int = AF_NS; -pub const PF_ISO: c_int = AF_ISO; -pub const PF_OSI: c_int = AF_ISO; -pub const PF_DATAKIT: c_int = AF_DATAKIT; -pub const PF_CCITT: c_int = AF_CCITT; -pub const PF_SNA: c_int = AF_SNA; -pub const PF_DECnet: c_int = AF_DECnet; -pub const PF_DLI: c_int = AF_DLI; -pub const PF_LAT: c_int = AF_LAT; -pub const PF_HYLINK: c_int = AF_HYLINK; -pub const PF_APPLETALK: c_int = AF_APPLETALK; -pub const PF_NETBIOS: c_int = AF_NETBIOS; -pub const PF_INET6: c_int = AF_INET6; -pub const SOMAXCONN: c_int = 0x7fffffff; -pub const MSG_OOB: c_int = 0x1; -pub const MSG_PEEK: c_int = 0x2; -pub const MSG_DONTROUTE: c_int = 0x4; -pub const MSG_WAITALL: c_int = 0x8; -pub const MSG_DONTWAIT: c_int = 0x10; -pub const MSG_NOSIGNAL: c_int = 0x20; -pub const MSG_TRUNC: c_int = 0x0100; -pub const MSG_CTRUNC: c_int = 0x0200; -pub const MSG_BCAST: c_int = 0x0400; -pub const MSG_MCAST: c_int = 0x0800; -pub const MSG_CMSG_CLOEXEC: c_int = 0x1000; -pub const MSG_EOR: c_int = 0x8000; -pub const SOL_IP: c_int = 0; -pub const SOL_IPV6: c_int = 41; -pub const SOL_TCP: c_int = 6; -pub const SOL_UDP: c_int = 17; -pub const IPTOS_LOWDELAY: u8 = 0x10; -pub const IPTOS_THROUGHPUT: u8 = 0x08; -pub const IPTOS_RELIABILITY: u8 = 0x04; -pub const IPTOS_LOWCOST: u8 = 0x02; -pub const IPTOS_MINCOST: u8 = IPTOS_LOWCOST; -pub const IP_DEFAULT_MULTICAST_TTL: c_int = 1; -pub const IP_DEFAULT_MULTICAST_LOOP: c_int = 1; -pub const IP_OPTIONS: c_int = 1; -pub const IP_HDRINCL: c_int = 2; -pub const IP_TOS: c_int = 3; -pub const IP_TTL: c_int = 4; -pub const IP_MULTICAST_IF: c_int = 9; -pub const IP_MULTICAST_TTL: c_int = 10; -pub const IP_MULTICAST_LOOP: c_int = 11; -pub const IP_ADD_MEMBERSHIP: c_int = 12; -pub const IP_DROP_MEMBERSHIP: c_int = 13; -pub const IP_ADD_SOURCE_MEMBERSHIP: c_int = 15; -pub const IP_DROP_SOURCE_MEMBERSHIP: c_int = 16; -pub const IP_BLOCK_SOURCE: c_int = 17; -pub const IP_UNBLOCK_SOURCE: c_int = 18; -pub const IP_PKTINFO: c_int = 19; -pub const IP_RECVTTL: c_int = 21; -pub const IP_UNICAST_IF: c_int = 31; -pub const IP_RECVTOS: c_int = 40; -pub const IP_MTU_DISCOVER: c_int = 71; -pub const IP_MTU: c_int = 73; -pub const IP_RECVERR: c_int = 75; -pub const IP_PMTUDISC_WANT: c_int = 0; -pub const IP_PMTUDISC_DO: c_int = 1; -pub const IP_PMTUDISC_DONT: c_int = 2; -pub const IP_PMTUDISC_PROBE: c_int = 3; -pub const IPV6_HOPOPTS: c_int = 1; -pub const IPV6_HDRINCL: c_int = 2; -pub const IPV6_UNICAST_HOPS: c_int = 4; -pub const IPV6_MULTICAST_IF: c_int = 9; -pub const IPV6_MULTICAST_HOPS: c_int = 10; -pub const IPV6_MULTICAST_LOOP: c_int = 11; -pub const IPV6_ADD_MEMBERSHIP: c_int = 12; -pub const IPV6_DROP_MEMBERSHIP: c_int = 13; -pub const IPV6_JOIN_GROUP: c_int = 12; -pub const IPV6_LEAVE_GROUP: c_int = 13; -pub const IPV6_DONTFRAG: c_int = 14; -pub const IPV6_PKTINFO: c_int = 19; -pub const IPV6_HOPLIMIT: c_int = 21; -pub const IPV6_CHECKSUM: c_int = 26; -pub const IPV6_V6ONLY: c_int = 27; -pub const IPV6_UNICAST_IF: c_int = 31; -pub const IPV6_RTHDR: c_int = 32; -pub const IPV6_RECVRTHDR: c_int = 38; -pub const IPV6_TCLASS: c_int = 39; -pub const IPV6_RECVTCLASS: c_int = 40; -pub const IPV6_MTU_DISCOVER: c_int = 71; -pub const IPV6_MTU: c_int = 72; -pub const IPV6_RECVERR: c_int = 75; -pub const IPV6_PMTUDISC_WANT: c_int = 0; -pub const IPV6_PMTUDISC_DO: c_int = 1; -pub const IPV6_PMTUDISC_DONT: c_int = 2; -pub const IPV6_PMTUDISC_PROBE: c_int = 3; -pub const MCAST_JOIN_GROUP: c_int = 41; -pub const MCAST_LEAVE_GROUP: c_int = 42; -pub const MCAST_BLOCK_SOURCE: c_int = 43; -pub const MCAST_UNBLOCK_SOURCE: c_int = 44; -pub const MCAST_JOIN_SOURCE_GROUP: c_int = 45; -pub const MCAST_LEAVE_SOURCE_GROUP: c_int = 46; -pub const MCAST_INCLUDE: c_int = 0; -pub const MCAST_EXCLUDE: c_int = 1; -pub const SHUT_RD: c_int = 0; -pub const SHUT_WR: c_int = 1; -pub const SHUT_RDWR: c_int = 2; - -pub const S_BLKSIZE: mode_t = 1024; -pub const S_IREAD: mode_t = 256; -pub const S_IWRITE: mode_t = 128; -pub const S_IEXEC: mode_t = 64; -pub const S_ENFMT: mode_t = 1024; -pub const S_IFMT: mode_t = 61440; -pub const S_IFDIR: mode_t = 16384; -pub const S_IFCHR: mode_t = 8192; -pub const S_IFBLK: mode_t = 24576; -pub const S_IFREG: mode_t = 32768; -pub const S_IFLNK: mode_t = 40960; -pub const S_IFSOCK: mode_t = 49152; -pub const S_IFIFO: mode_t = 4096; -pub const S_IRWXU: mode_t = 448; -pub const S_IRUSR: mode_t = 256; -pub const S_IWUSR: mode_t = 128; -pub const S_IXUSR: mode_t = 64; -pub const S_IRWXG: mode_t = 56; -pub const S_IRGRP: mode_t = 32; -pub const S_IWGRP: mode_t = 16; -pub const S_IXGRP: mode_t = 8; -pub const S_IRWXO: mode_t = 7; -pub const S_IROTH: mode_t = 4; -pub const S_IWOTH: mode_t = 2; -pub const S_IXOTH: mode_t = 1; -pub const UTIME_NOW: c_long = -2; -pub const UTIME_OMIT: c_long = -1; - -pub const ARG_MAX: c_int = 32000; -pub const CHILD_MAX: c_int = 256; -pub const IOV_MAX: c_int = 1024; -pub const PTHREAD_STACK_MIN: size_t = 65536; -pub const PATH_MAX: c_int = 4096; -pub const PIPE_BUF: usize = 4096; -pub const NGROUPS_MAX: c_int = 1024; - -pub const FORK_RELOAD: c_int = 1; -pub const FORK_NO_RELOAD: c_int = 0; - -pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); -pub const RTLD_LOCAL: c_int = 0; -pub const RTLD_LAZY: c_int = 1; -pub const RTLD_NOW: c_int = 2; -pub const RTLD_GLOBAL: c_int = 4; -pub const RTLD_NODELETE: c_int = 8; -pub const RTLD_NOLOAD: c_int = 16; -pub const RTLD_DEEPBIND: c_int = 32; - -/// IP6 hop-by-hop options -pub const IPPROTO_HOPOPTS: c_int = 0; - -/// gateway mgmt protocol -pub const IPPROTO_IGMP: c_int = 2; - -/// IPIP tunnels (older KA9Q tunnels use 94) -pub const IPPROTO_IPIP: c_int = 4; - -/// exterior gateway protocol -pub const IPPROTO_EGP: c_int = 8; - -/// pup -pub const IPPROTO_PUP: c_int = 12; - -/// xns idp -pub const IPPROTO_IDP: c_int = 22; - -/// IP6 routing header -pub const IPPROTO_ROUTING: c_int = 43; - -/// IP6 fragmentation header -pub const IPPROTO_FRAGMENT: c_int = 44; - -/// IP6 Encap Sec. Payload -pub const IPPROTO_ESP: c_int = 50; - -/// IP6 Auth Header -pub const IPPROTO_AH: c_int = 51; - -/// IP6 no next header -pub const IPPROTO_NONE: c_int = 59; - -/// IP6 destination option -pub const IPPROTO_DSTOPTS: c_int = 60; - -pub const IPPROTO_RAW: c_int = 255; -pub const IPPROTO_MAX: c_int = 256; - -pub const AI_PASSIVE: c_int = 0x1; -pub const AI_CANONNAME: c_int = 0x2; -pub const AI_NUMERICHOST: c_int = 0x4; -pub const AI_NUMERICSERV: c_int = 0x8; -pub const AI_ALL: c_int = 0x100; -pub const AI_ADDRCONFIG: c_int = 0x400; -pub const AI_V4MAPPED: c_int = 0x800; -pub const NI_NOFQDN: c_int = 0x1; -pub const NI_NUMERICHOST: c_int = 0x2; -pub const NI_NAMEREQD: c_int = 0x4; -pub const NI_NUMERICSERV: c_int = 0x8; -pub const NI_DGRAM: c_int = 0x10; -pub const NI_MAXHOST: c_int = 1025; -pub const NI_MAXSERV: c_int = 32; -pub const EAI_AGAIN: c_int = 2; -pub const EAI_BADFLAGS: c_int = 3; -pub const EAI_FAIL: c_int = 4; -pub const EAI_FAMILY: c_int = 5; -pub const EAI_MEMORY: c_int = 6; -pub const EAI_NODATA: c_int = 7; -pub const EAI_NONAME: c_int = 8; -pub const EAI_SERVICE: c_int = 9; -pub const EAI_SOCKTYPE: c_int = 10; -pub const EAI_SYSTEM: c_int = 11; -pub const EAI_OVERFLOW: c_int = 14; - -pub const POLLIN: c_short = 0x1; -pub const POLLPRI: c_short = 0x2; -pub const POLLOUT: c_short = 0x4; -pub const POLLERR: c_short = 0x8; -pub const POLLHUP: c_short = 0x10; -pub const POLLNVAL: c_short = 0x20; -pub const POLLRDNORM: c_short = 0x1; -pub const POLLRDBAND: c_short = 0x2; -pub const POLLWRNORM: c_short = 0x4; -pub const POLLWRBAND: c_short = 0x4; - -pub const LC_ALL: c_int = 0; -pub const LC_COLLATE: c_int = 1; -pub const LC_CTYPE: c_int = 2; -pub const LC_MONETARY: c_int = 3; -pub const LC_NUMERIC: c_int = 4; -pub const LC_TIME: c_int = 5; -pub const LC_MESSAGES: c_int = 6; -pub const LC_ALL_MASK: c_int = 1 << 0; -pub const LC_COLLATE_MASK: c_int = 1 << 1; -pub const LC_CTYPE_MASK: c_int = 1 << 2; -pub const LC_MONETARY_MASK: c_int = 1 << 3; -pub const LC_NUMERIC_MASK: c_int = 1 << 4; -pub const LC_TIME_MASK: c_int = 1 << 5; -pub const LC_MESSAGES_MASK: c_int = 1 << 6; -pub const LC_GLOBAL_LOCALE: locale_t = -1isize as locale_t; - -pub const SEM_FAILED: *mut sem_t = core::ptr::null_mut(); - -pub const ST_RDONLY: c_ulong = 0x80000; -pub const ST_NOSUID: c_ulong = 0; - -pub const TIOCMGET: c_int = 0x5415; -pub const TIOCMBIS: c_int = 0x5416; -pub const TIOCMBIC: c_int = 0x5417; -pub const TIOCMSET: c_int = 0x5418; -pub const TIOCINQ: c_int = 0x541B; -pub const TIOCSCTTY: c_int = 0x540E; -pub const TIOCSBRK: c_int = 0x5427; -pub const TIOCCBRK: c_int = 0x5428; -pub const TIOCM_DTR: c_int = 0x002; -pub const TIOCM_RTS: c_int = 0x004; -pub const TIOCM_CTS: c_int = 0x020; -pub const TIOCM_CAR: c_int = 0x040; -pub const TIOCM_RNG: c_int = 0x080; -pub const TIOCM_CD: c_int = TIOCM_CAR; -pub const TIOCM_RI: c_int = TIOCM_RNG; -pub const TCOOFF: c_int = 0; -pub const TCOON: c_int = 1; -pub const TCIOFF: c_int = 2; -pub const TCION: c_int = 3; -pub const TCGETA: c_int = 5; -pub const TCSETA: c_int = 6; -pub const TCSETAW: c_int = 7; -pub const TCSETAF: c_int = 8; -pub const TCIFLUSH: c_int = 0; -pub const TCOFLUSH: c_int = 1; -pub const TCIOFLUSH: c_int = 2; -pub const TCFLSH: c_int = 3; -pub const TCSAFLUSH: c_int = 1; -pub const TCSANOW: c_int = 2; -pub const TCSADRAIN: c_int = 3; -pub const TIOCPKT: c_int = 6; -pub const TIOCPKT_DATA: c_int = 0x0; -pub const TIOCPKT_FLUSHREAD: c_int = 0x1; -pub const TIOCPKT_FLUSHWRITE: c_int = 0x2; -pub const TIOCPKT_STOP: c_int = 0x4; -pub const TIOCPKT_START: c_int = 0x8; -pub const TIOCPKT_NOSTOP: c_int = 0x10; -pub const TIOCPKT_DOSTOP: c_int = 0x20; -pub const IGNBRK: tcflag_t = 0x00001; -pub const BRKINT: tcflag_t = 0x00002; -pub const IGNPAR: tcflag_t = 0x00004; -pub const IMAXBEL: tcflag_t = 0x00008; -pub const INPCK: tcflag_t = 0x00010; -pub const ISTRIP: tcflag_t = 0x00020; -pub const INLCR: tcflag_t = 0x00040; -pub const IGNCR: tcflag_t = 0x00080; -pub const ICRNL: tcflag_t = 0x00100; -pub const IXON: tcflag_t = 0x00400; -pub const IXOFF: tcflag_t = 0x01000; -pub const IUCLC: tcflag_t = 0x04000; -pub const IXANY: tcflag_t = 0x08000; -pub const PARMRK: tcflag_t = 0x10000; -pub const IUTF8: tcflag_t = 0x20000; -pub const OPOST: tcflag_t = 0x00001; -pub const OLCUC: tcflag_t = 0x00002; -pub const OCRNL: tcflag_t = 0x00004; -pub const ONLCR: tcflag_t = 0x00008; -pub const ONOCR: tcflag_t = 0x00010; -pub const ONLRET: tcflag_t = 0x00020; -pub const OFILL: tcflag_t = 0x00040; -pub const CRDLY: tcflag_t = 0x00180; -pub const CR0: tcflag_t = 0x00000; -pub const CR1: tcflag_t = 0x00080; -pub const CR2: tcflag_t = 0x00100; -pub const CR3: tcflag_t = 0x00180; -pub const NLDLY: tcflag_t = 0x00200; -pub const NL0: tcflag_t = 0x00000; -pub const NL1: tcflag_t = 0x00200; -pub const BSDLY: tcflag_t = 0x00400; -pub const BS0: tcflag_t = 0x00000; -pub const BS1: tcflag_t = 0x00400; -pub const TABDLY: tcflag_t = 0x01800; -pub const TAB0: tcflag_t = 0x00000; -pub const TAB1: tcflag_t = 0x00800; -pub const TAB2: tcflag_t = 0x01000; -pub const TAB3: tcflag_t = 0x01800; -pub const XTABS: tcflag_t = 0x01800; -pub const VTDLY: tcflag_t = 0x02000; -pub const VT0: tcflag_t = 0x00000; -pub const VT1: tcflag_t = 0x02000; -pub const FFDLY: tcflag_t = 0x04000; -pub const FF0: tcflag_t = 0x00000; -pub const FF1: tcflag_t = 0x04000; -pub const OFDEL: tcflag_t = 0x08000; -pub const CBAUD: tcflag_t = 0x0100f; -pub const B0: speed_t = 0x00000; -pub const B50: speed_t = 0x00001; -pub const B75: speed_t = 0x00002; -pub const B110: speed_t = 0x00003; -pub const B134: speed_t = 0x00004; -pub const B150: speed_t = 0x00005; -pub const B200: speed_t = 0x00006; -pub const B300: speed_t = 0x00007; -pub const B600: speed_t = 0x00008; -pub const B1200: speed_t = 0x00009; -pub const B1800: speed_t = 0x0000a; -pub const B2400: speed_t = 0x0000b; -pub const B4800: speed_t = 0x0000c; -pub const B9600: speed_t = 0x0000d; -pub const B19200: speed_t = 0x0000e; -pub const B38400: speed_t = 0x0000f; -pub const CSIZE: tcflag_t = 0x00030; -pub const CS5: tcflag_t = 0x00000; -pub const CS6: tcflag_t = 0x00010; -pub const CS7: tcflag_t = 0x00020; -pub const CS8: tcflag_t = 0x00030; -pub const CSTOPB: tcflag_t = 0x00040; -pub const CREAD: tcflag_t = 0x00080; -pub const PARENB: tcflag_t = 0x00100; -pub const PARODD: tcflag_t = 0x00200; -pub const HUPCL: tcflag_t = 0x00400; -pub const CLOCAL: tcflag_t = 0x00800; -pub const CBAUDEX: tcflag_t = 0x0100f; -pub const B57600: speed_t = 0x01001; -pub const B115200: speed_t = 0x01002; -pub const B230400: speed_t = 0x01004; -pub const B460800: speed_t = 0x01006; -pub const B500000: speed_t = 0x01007; -pub const B576000: speed_t = 0x01008; -pub const B921600: speed_t = 0x01009; -pub const B1000000: speed_t = 0x0100a; -pub const B1152000: speed_t = 0x0100b; -pub const B1500000: speed_t = 0x0100c; -pub const B2000000: speed_t = 0x0100d; -pub const B2500000: speed_t = 0x0100e; -pub const B3000000: speed_t = 0x0100f; -pub const CRTSCTS: tcflag_t = 0x08000; -pub const CMSPAR: tcflag_t = 0x40000000; -pub const ISIG: tcflag_t = 0x0001; -pub const ICANON: tcflag_t = 0x0002; -pub const ECHO: tcflag_t = 0x0004; -pub const ECHOE: tcflag_t = 0x0008; -pub const ECHOK: tcflag_t = 0x0010; -pub const ECHONL: tcflag_t = 0x0020; -pub const NOFLSH: tcflag_t = 0x0040; -pub const TOSTOP: tcflag_t = 0x0080; -pub const IEXTEN: tcflag_t = 0x0100; -pub const FLUSHO: tcflag_t = 0x0200; -pub const ECHOKE: tcflag_t = 0x0400; -pub const ECHOCTL: tcflag_t = 0x0800; -pub const VDISCARD: usize = 1; -pub const VEOL: usize = 2; -pub const VEOL2: usize = 3; -pub const VEOF: usize = 4; -pub const VERASE: usize = 5; -pub const VINTR: usize = 6; -pub const VKILL: usize = 7; -pub const VLNEXT: usize = 8; -pub const VMIN: usize = 9; -pub const VQUIT: usize = 10; -pub const VREPRINT: usize = 11; -pub const VSTART: usize = 12; -pub const VSTOP: usize = 13; -pub const VSUSP: usize = 14; -pub const VSWTC: usize = 15; -pub const VTIME: usize = 16; -pub const VWERASE: usize = 17; -pub const NCCS: usize = 18; - -pub const TIOCGWINSZ: c_int = 0x5401; -pub const TIOCSWINSZ: c_int = 0x5402; -pub const TIOCLINUX: c_int = 0x5403; -pub const TIOCGPGRP: c_int = 0x540f; -pub const TIOCSPGRP: c_int = 0x5410; - -pub const WNOHANG: c_int = 1; -pub const WUNTRACED: c_int = 2; -pub const WCONTINUED: c_int = 8; - -pub const EXIT_FAILURE: c_int = 1; -pub const EXIT_SUCCESS: c_int = 0; - -pub const PROT_NONE: c_int = 0; -pub const PROT_READ: c_int = 1; -pub const PROT_WRITE: c_int = 2; -pub const PROT_EXEC: c_int = 4; -pub const MAP_FILE: c_int = 0; -pub const MAP_SHARED: c_int = 1; -pub const MAP_PRIVATE: c_int = 2; -pub const MAP_TYPE: c_int = 0xf; -pub const MAP_FIXED: c_int = 0x10; -pub const MAP_ANON: c_int = 0x20; -pub const MAP_ANONYMOUS: c_int = MAP_ANON; -pub const MAP_NORESERVE: c_int = 0x4000; -pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; -pub const MS_ASYNC: c_int = 1; -pub const MS_SYNC: c_int = 2; -pub const MS_INVALIDATE: c_int = 4; -pub const POSIX_MADV_NORMAL: c_int = 0; -pub const POSIX_MADV_SEQUENTIAL: c_int = 1; -pub const POSIX_MADV_RANDOM: c_int = 2; -pub const POSIX_MADV_WILLNEED: c_int = 3; -pub const POSIX_MADV_DONTNEED: c_int = 4; -pub const MADV_NORMAL: c_int = 0; -pub const MADV_SEQUENTIAL: c_int = 1; -pub const MADV_RANDOM: c_int = 2; -pub const MADV_WILLNEED: c_int = 3; -pub const MADV_DONTNEED: c_int = 4; - -pub const F_ULOCK: c_int = 0; -pub const F_LOCK: c_int = 1; -pub const F_TLOCK: c_int = 2; -pub const F_TEST: c_int = 3; - -pub const F_OK: c_int = 0; -pub const R_OK: c_int = 4; -pub const W_OK: c_int = 2; -pub const X_OK: c_int = 1; -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; -pub const _SC_ARG_MAX: c_int = 0; -pub const _SC_CHILD_MAX: c_int = 1; -pub const _SC_CLK_TCK: c_int = 2; -pub const _SC_NGROUPS_MAX: c_int = 3; -pub const _SC_OPEN_MAX: c_int = 4; -pub const _SC_JOB_CONTROL: c_int = 5; -pub const _SC_SAVED_IDS: c_int = 6; -pub const _SC_VERSION: c_int = 7; -pub const _SC_PAGESIZE: c_int = 8; -pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; -pub const _SC_NPROCESSORS_CONF: c_int = 9; -pub const _SC_NPROCESSORS_ONLN: c_int = 10; -pub const _SC_PHYS_PAGES: c_int = 11; -pub const _SC_AVPHYS_PAGES: c_int = 12; -pub const _SC_MQ_OPEN_MAX: c_int = 13; -pub const _SC_MQ_PRIO_MAX: c_int = 14; -pub const _SC_RTSIG_MAX: c_int = 15; -pub const _SC_SEM_NSEMS_MAX: c_int = 16; -pub const _SC_SEM_VALUE_MAX: c_int = 17; -pub const _SC_SIGQUEUE_MAX: c_int = 18; -pub const _SC_TIMER_MAX: c_int = 19; -pub const _SC_TZNAME_MAX: c_int = 20; -pub const _SC_ASYNCHRONOUS_IO: c_int = 21; -pub const _SC_FSYNC: c_int = 22; -pub const _SC_MAPPED_FILES: c_int = 23; -pub const _SC_MEMLOCK: c_int = 24; -pub const _SC_MEMLOCK_RANGE: c_int = 25; -pub const _SC_MEMORY_PROTECTION: c_int = 26; -pub const _SC_MESSAGE_PASSING: c_int = 27; -pub const _SC_PRIORITIZED_IO: c_int = 28; -pub const _SC_REALTIME_SIGNALS: c_int = 29; -pub const _SC_SEMAPHORES: c_int = 30; -pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 31; -pub const _SC_SYNCHRONIZED_IO: c_int = 32; -pub const _SC_TIMERS: c_int = 33; -pub const _SC_AIO_LISTIO_MAX: c_int = 34; -pub const _SC_AIO_MAX: c_int = 35; -pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 36; -pub const _SC_DELAYTIMER_MAX: c_int = 37; -pub const _SC_THREAD_KEYS_MAX: c_int = 38; -pub const _SC_THREAD_STACK_MIN: c_int = 39; -pub const _SC_THREAD_THREADS_MAX: c_int = 40; -pub const _SC_TTY_NAME_MAX: c_int = 41; -pub const _SC_THREADS: c_int = 42; -pub const _SC_THREAD_ATTR_STACKADDR: c_int = 43; -pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 44; -pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 45; -pub const _SC_THREAD_PRIO_INHERIT: c_int = 46; -pub const _SC_THREAD_PRIO_PROTECT: c_int = 47; -pub const _SC_THREAD_PRIO_CEILING: c_int = _SC_THREAD_PRIO_PROTECT; -pub const _SC_THREAD_PROCESS_SHARED: c_int = 48; -pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 49; -pub const _SC_GETGR_R_SIZE_MAX: c_int = 50; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 51; -pub const _SC_LOGIN_NAME_MAX: c_int = 52; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 53; -pub const _SC_ADVISORY_INFO: c_int = 54; -pub const _SC_ATEXIT_MAX: c_int = 55; -pub const _SC_BARRIERS: c_int = 56; -pub const _SC_BC_BASE_MAX: c_int = 57; -pub const _SC_BC_DIM_MAX: c_int = 58; -pub const _SC_BC_SCALE_MAX: c_int = 59; -pub const _SC_BC_STRING_MAX: c_int = 60; -pub const _SC_CLOCK_SELECTION: c_int = 61; -pub const _SC_COLL_WEIGHTS_MAX: c_int = 62; -pub const _SC_CPUTIME: c_int = 63; -pub const _SC_EXPR_NEST_MAX: c_int = 64; -pub const _SC_HOST_NAME_MAX: c_int = 65; -pub const _SC_IOV_MAX: c_int = 66; -pub const _SC_IPV6: c_int = 67; -pub const _SC_LINE_MAX: c_int = 68; -pub const _SC_MONOTONIC_CLOCK: c_int = 69; -pub const _SC_RAW_SOCKETS: c_int = 70; -pub const _SC_READER_WRITER_LOCKS: c_int = 71; -pub const _SC_REGEXP: c_int = 72; -pub const _SC_RE_DUP_MAX: c_int = 73; -pub const _SC_SHELL: c_int = 74; -pub const _SC_SPAWN: c_int = 75; -pub const _SC_SPIN_LOCKS: c_int = 76; -pub const _SC_SPORADIC_SERVER: c_int = 77; -pub const _SC_SS_REPL_MAX: c_int = 78; -pub const _SC_SYMLOOP_MAX: c_int = 79; -pub const _SC_THREAD_CPUTIME: c_int = 80; -pub const _SC_THREAD_SPORADIC_SERVER: c_int = 81; -pub const _SC_TIMEOUTS: c_int = 82; -pub const _SC_TRACE: c_int = 83; -pub const _SC_TRACE_EVENT_FILTER: c_int = 84; -pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 85; -pub const _SC_TRACE_INHERIT: c_int = 86; -pub const _SC_TRACE_LOG: c_int = 87; -pub const _SC_TRACE_NAME_MAX: c_int = 88; -pub const _SC_TRACE_SYS_MAX: c_int = 89; -pub const _SC_TRACE_USER_EVENT_MAX: c_int = 90; -pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 91; -pub const _SC_V7_ILP32_OFF32: c_int = 92; -pub const _SC_V6_ILP32_OFF32: c_int = _SC_V7_ILP32_OFF32; -pub const _SC_XBS5_ILP32_OFF32: c_int = _SC_V7_ILP32_OFF32; -pub const _SC_V7_ILP32_OFFBIG: c_int = 93; -pub const _SC_V6_ILP32_OFFBIG: c_int = _SC_V7_ILP32_OFFBIG; -pub const _SC_XBS5_ILP32_OFFBIG: c_int = _SC_V7_ILP32_OFFBIG; -pub const _SC_V7_LP64_OFF64: c_int = 94; -pub const _SC_V6_LP64_OFF64: c_int = _SC_V7_LP64_OFF64; -pub const _SC_XBS5_LP64_OFF64: c_int = _SC_V7_LP64_OFF64; -pub const _SC_V7_LPBIG_OFFBIG: c_int = 95; -pub const _SC_V6_LPBIG_OFFBIG: c_int = _SC_V7_LPBIG_OFFBIG; -pub const _SC_XBS5_LPBIG_OFFBIG: c_int = _SC_V7_LPBIG_OFFBIG; -pub const _SC_XOPEN_CRYPT: c_int = 96; -pub const _SC_XOPEN_ENH_I18N: c_int = 97; -pub const _SC_XOPEN_LEGACY: c_int = 98; -pub const _SC_XOPEN_REALTIME: c_int = 99; -pub const _SC_STREAM_MAX: c_int = 100; -pub const _SC_PRIORITY_SCHEDULING: c_int = 101; -pub const _SC_XOPEN_REALTIME_THREADS: c_int = 102; -pub const _SC_XOPEN_SHM: c_int = 103; -pub const _SC_XOPEN_STREAMS: c_int = 104; -pub const _SC_XOPEN_UNIX: c_int = 105; -pub const _SC_XOPEN_VERSION: c_int = 106; -pub const _SC_2_CHAR_TERM: c_int = 107; -pub const _SC_2_C_BIND: c_int = 108; -pub const _SC_2_C_DEV: c_int = 109; -pub const _SC_2_FORT_DEV: c_int = 110; -pub const _SC_2_FORT_RUN: c_int = 111; -pub const _SC_2_LOCALEDEF: c_int = 112; -pub const _SC_2_PBS: c_int = 113; -pub const _SC_2_PBS_ACCOUNTING: c_int = 114; -pub const _SC_2_PBS_CHECKPOINT: c_int = 115; -pub const _SC_2_PBS_LOCATE: c_int = 116; -pub const _SC_2_PBS_MESSAGE: c_int = 117; -pub const _SC_2_PBS_TRACK: c_int = 118; -pub const _SC_2_SW_DEV: c_int = 119; -pub const _SC_2_UPE: c_int = 120; -pub const _SC_2_VERSION: c_int = 121; -pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 122; -pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 123; -pub const _SC_XOPEN_UUCP: c_int = 124; -pub const _SC_LEVEL1_ICACHE_SIZE: c_int = 125; -pub const _SC_LEVEL1_ICACHE_ASSOC: c_int = 126; -pub const _SC_LEVEL1_ICACHE_LINESIZE: c_int = 127; -pub const _SC_LEVEL1_DCACHE_SIZE: c_int = 128; -pub const _SC_LEVEL1_DCACHE_ASSOC: c_int = 129; -pub const _SC_LEVEL1_DCACHE_LINESIZE: c_int = 130; -pub const _SC_LEVEL2_CACHE_SIZE: c_int = 131; -pub const _SC_LEVEL2_CACHE_ASSOC: c_int = 132; -pub const _SC_LEVEL2_CACHE_LINESIZE: c_int = 133; -pub const _SC_LEVEL3_CACHE_SIZE: c_int = 134; -pub const _SC_LEVEL3_CACHE_ASSOC: c_int = 135; -pub const _SC_LEVEL3_CACHE_LINESIZE: c_int = 136; -pub const _SC_LEVEL4_CACHE_SIZE: c_int = 137; -pub const _SC_LEVEL4_CACHE_ASSOC: c_int = 138; -pub const _SC_LEVEL4_CACHE_LINESIZE: c_int = 139; -pub const _PC_LINK_MAX: c_int = 0; -pub const _PC_MAX_CANON: c_int = 1; -pub const _PC_MAX_INPUT: c_int = 2; -pub const _PC_NAME_MAX: c_int = 3; -pub const _PC_PATH_MAX: c_int = 4; -pub const _PC_PIPE_BUF: c_int = 5; -pub const _PC_CHOWN_RESTRICTED: c_int = 6; -pub const _PC_NO_TRUNC: c_int = 7; -pub const _PC_VDISABLE: c_int = 8; -pub const _PC_ASYNC_IO: c_int = 9; -pub const _PC_PRIO_IO: c_int = 10; -pub const _PC_SYNC_IO: c_int = 11; -pub const _PC_FILESIZEBITS: c_int = 12; -pub const _PC_2_SYMLINKS: c_int = 13; -pub const _PC_SYMLINK_MAX: c_int = 14; -pub const _PC_ALLOC_SIZE_MIN: c_int = 15; -pub const _PC_REC_INCR_XFER_SIZE: c_int = 16; -pub const _PC_REC_MAX_XFER_SIZE: c_int = 17; -pub const _PC_REC_MIN_XFER_SIZE: c_int = 18; -pub const _PC_REC_XFER_ALIGN: c_int = 19; -pub const _PC_TIMESTAMP_RESOLUTION: c_int = 20; -pub const _CS_PATH: c_int = 0; - -pub const O_ACCMODE: c_int = 0x3; -pub const O_RDONLY: c_int = 0; -pub const O_WRONLY: c_int = 1; -pub const O_RDWR: c_int = 2; -pub const O_APPEND: c_int = 0x0008; -pub const O_CREAT: c_int = 0x0200; -pub const O_TRUNC: c_int = 0x0400; -pub const O_EXCL: c_int = 0x0800; -pub const O_SYNC: c_int = 0x2000; -pub const O_NONBLOCK: c_int = 0x4000; -pub const O_NOCTTY: c_int = 0x8000; -pub const O_CLOEXEC: c_int = 0x40000; -pub const O_NOFOLLOW: c_int = 0x100000; -pub const O_DIRECTORY: c_int = 0x200000; -pub const O_EXEC: c_int = 0x400000; -pub const O_SEARCH: c_int = 0x400000; -pub const O_DIRECT: c_int = 0x80000; -pub const O_DSYNC: c_int = 0x2000; -pub const O_RSYNC: c_int = 0x2000; -pub const O_TMPFILE: c_int = 0x800000; -pub const O_NOATIME: c_int = 0x1000000; -pub const O_PATH: c_int = 0x2000000; -pub const F_DUPFD: c_int = 0; -pub const F_GETFD: c_int = 1; -pub const F_SETFD: c_int = 2; -pub const F_GETFL: c_int = 3; -pub const F_SETFL: c_int = 4; -pub const F_GETOWN: c_int = 5; -pub const F_SETOWN: c_int = 6; -pub const F_GETLK: c_int = 7; -pub const F_SETLK: c_int = 8; -pub const F_SETLKW: c_int = 9; -pub const F_RGETLK: c_int = 10; -pub const F_RSETLK: c_int = 11; -pub const F_CNVT: c_int = 12; -pub const F_RSETLKW: c_int = 13; -pub const F_DUPFD_CLOEXEC: c_int = 14; -pub const F_RDLCK: c_int = 1; -pub const F_WRLCK: c_int = 2; -pub const F_UNLCK: c_int = 3; -pub const AT_FDCWD: c_int = -2; -pub const AT_EACCESS: c_int = 1; -pub const AT_SYMLINK_NOFOLLOW: c_int = 2; -pub const AT_SYMLINK_FOLLOW: c_int = 4; -pub const AT_REMOVEDIR: c_int = 8; -pub const AT_EMPTY_PATH: c_int = 16; -pub const LOCK_SH: c_int = 1; -pub const LOCK_EX: c_int = 2; -pub const LOCK_NB: c_int = 4; -pub const LOCK_UN: c_int = 8; - -pub const EPERM: c_int = 1; -pub const ENOENT: c_int = 2; -pub const ESRCH: c_int = 3; -pub const EINTR: c_int = 4; -pub const EIO: c_int = 5; -pub const ENXIO: c_int = 6; -pub const E2BIG: c_int = 7; -pub const ENOEXEC: c_int = 8; -pub const EBADF: c_int = 9; -pub const ECHILD: c_int = 10; -pub const EAGAIN: c_int = 11; -pub const ENOMEM: c_int = 12; -pub const EACCES: c_int = 13; -pub const EFAULT: c_int = 14; -pub const ENOTBLK: c_int = 15; -pub const EBUSY: c_int = 16; -pub const EEXIST: c_int = 17; -pub const EXDEV: c_int = 18; -pub const ENODEV: c_int = 19; -pub const ENOTDIR: c_int = 20; -pub const EISDIR: c_int = 21; -pub const EINVAL: c_int = 22; -pub const ENFILE: c_int = 23; -pub const EMFILE: c_int = 24; -pub const ENOTTY: c_int = 25; -pub const ETXTBSY: c_int = 26; -pub const EFBIG: c_int = 27; -pub const ENOSPC: c_int = 28; -pub const ESPIPE: c_int = 29; -pub const EROFS: c_int = 30; -pub const EMLINK: c_int = 31; -pub const EPIPE: c_int = 32; -pub const EDOM: c_int = 33; -pub const ERANGE: c_int = 34; -pub const ENOMSG: c_int = 35; -pub const EIDRM: c_int = 36; -pub const ECHRNG: c_int = 37; -pub const EL2NSYNC: c_int = 38; -pub const EL3HLT: c_int = 39; -pub const EL3RST: c_int = 40; -pub const ELNRNG: c_int = 41; -pub const EUNATCH: c_int = 42; -pub const ENOCSI: c_int = 43; -pub const EL2HLT: c_int = 44; -pub const EDEADLK: c_int = 45; -pub const ENOLCK: c_int = 46; -pub const EBADE: c_int = 50; -pub const EBADR: c_int = 51; -pub const EXFULL: c_int = 52; -pub const ENOANO: c_int = 53; -pub const EBADRQC: c_int = 54; -pub const EBADSLT: c_int = 55; -pub const EDEADLOCK: c_int = 56; -pub const EBFONT: c_int = 57; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENONET: c_int = 64; -pub const ENOPKG: c_int = 65; -pub const EREMOTE: c_int = 66; -pub const ENOLINK: c_int = 67; -pub const EADV: c_int = 68; -pub const ESRMNT: c_int = 69; -pub const ECOMM: c_int = 70; -pub const EPROTO: c_int = 71; -pub const EMULTIHOP: c_int = 74; -pub const EDOTDOT: c_int = 76; -pub const EBADMSG: c_int = 77; -pub const EFTYPE: c_int = 79; -pub const ENOTUNIQ: c_int = 80; -pub const EBADFD: c_int = 81; -pub const EREMCHG: c_int = 82; -pub const ELIBACC: c_int = 83; -pub const ELIBBAD: c_int = 84; -pub const ELIBSCN: c_int = 85; -pub const ELIBMAX: c_int = 86; -pub const ELIBEXEC: c_int = 87; -pub const ENOSYS: c_int = 88; -pub const ENOTEMPTY: c_int = 90; -pub const ENAMETOOLONG: c_int = 91; -pub const ELOOP: c_int = 92; -pub const EOPNOTSUPP: c_int = 95; -pub const EPFNOSUPPORT: c_int = 96; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EAFNOSUPPORT: c_int = 106; -pub const EPROTOTYPE: c_int = 107; -pub const ENOTSOCK: c_int = 108; -pub const ENOPROTOOPT: c_int = 109; -pub const ESHUTDOWN: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EADDRINUSE: c_int = 112; -pub const ECONNABORTED: c_int = 113; -pub const ENETUNREACH: c_int = 114; -pub const ENETDOWN: c_int = 115; -pub const ETIMEDOUT: c_int = 116; -pub const EHOSTDOWN: c_int = 117; -pub const EHOSTUNREACH: c_int = 118; -pub const EINPROGRESS: c_int = 119; -pub const EALREADY: c_int = 120; -pub const EDESTADDRREQ: c_int = 121; -pub const EMSGSIZE: c_int = 122; -pub const EPROTONOSUPPORT: c_int = 123; -pub const ESOCKTNOSUPPORT: c_int = 124; -pub const EADDRNOTAVAIL: c_int = 125; -pub const ENETRESET: c_int = 126; -pub const EISCONN: c_int = 127; -pub const ENOTCONN: c_int = 128; -pub const ETOOMANYREFS: c_int = 129; -pub const EPROCLIM: c_int = 130; -pub const EUSERS: c_int = 131; -pub const EDQUOT: c_int = 132; -pub const ESTALE: c_int = 133; -pub const ENOTSUP: c_int = 134; -pub const ENOMEDIUM: c_int = 135; -pub const EILSEQ: c_int = 138; -pub const EOVERFLOW: c_int = 139; -pub const ECANCELED: c_int = 140; -pub const ENOTRECOVERABLE: c_int = 141; -pub const EOWNERDEAD: c_int = 142; -pub const ESTRPIPE: c_int = 143; -pub const EWOULDBLOCK: c_int = EAGAIN; /* Operation would block */ - -pub const SCHED_OTHER: c_int = 3; -pub const SCHED_FIFO: c_int = 1; -pub const SCHED_RR: c_int = 2; - -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = 21 as *mut _; -pub const PTHREAD_CREATE_DETACHED: c_int = 1; -pub const PTHREAD_CREATE_JOINABLE: c_int = 0; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 0; -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 1; -pub const PTHREAD_MUTEX_NORMAL: c_int = 2; -pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: pthread_mutex_t = 18 as *mut _; -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: pthread_mutex_t = 20 as *mut _; -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = 19 as *mut _; -pub const PTHREAD_PROCESS_SHARED: c_int = 1; -pub const PTHREAD_PROCESS_PRIVATE: c_int = 0; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = 22 as *mut _; - -pub const LITTLE_ENDIAN: c_int = 1234; -pub const BIG_ENDIAN: c_int = 4321; - -pub const TCP_NODELAY: c_int = 1; -pub const TCP_KEEPIDLE: c_int = 3; -pub const TCP_MAXSEG: c_int = 4; -pub const TCP_QUICKACK: c_int = 12; -pub const TCP_USER_TIMEOUT: c_int = 14; -pub const TCP_FASTOPEN: c_int = 15; -pub const TCP_KEEPCNT: c_int = 16; -pub const TCP_KEEPINTVL: c_int = 17; - -pub const WINDOWS_POST: c_int = 0; -pub const WINDOWS_SEND: c_int = 1; -pub const WINDOWS_HWND: c_int = 2; - -pub const MOUNT_TEXT: c_uint = 0x01; -pub const MOUNT_SYSTEM: c_uint = 0x08; -pub const MOUNT_EXEC: c_uint = 0x10; -pub const MOUNT_CYGDRIVE: c_uint = 0x20; -pub const MOUNT_CYGWIN_EXEC: c_uint = 0x40; -pub const MOUNT_SPARSE: c_uint = 0x80; -pub const MOUNT_NOTEXEC: c_uint = 0x100; -pub const MOUNT_DEVFS: c_uint = 0x200; -pub const MOUNT_PROC: c_uint = 0x400; -pub const MOUNT_RO: c_uint = 0x1000; -pub const MOUNT_NOACL: c_uint = 0x2000; -pub const MOUNT_NOPOSIX: c_uint = 0x4000; -pub const MOUNT_OVERRIDE: c_uint = 0x8000; -pub const MOUNT_IMMUTABLE: c_uint = 0x10000; -pub const MOUNT_AUTOMATIC: c_uint = 0x20000; -pub const MOUNT_DOS: c_uint = 0x40000; -pub const MOUNT_IHASH: c_uint = 0x80000; -pub const MOUNT_BIND: c_uint = 0x100000; -pub const MOUNT_USER_TEMP: c_uint = 0x200000; -pub const MOUNT_DONT_USE: c_uint = 0x80000000; - -pub const _POSIX_VDISABLE: cc_t = 0; - -pub const GRND_NONBLOCK: c_uint = 0x1; -pub const GRND_RANDOM: c_uint = 0x2; - -pub const _IONBF: c_int = 2; -pub const BUFSIZ: c_int = 1024; - -pub const POSIX_SPAWN_RESETIDS: c_int = 0x01; -pub const POSIX_SPAWN_SETPGROUP: c_int = 0x02; -pub const POSIX_SPAWN_SETSCHEDPARAM: c_int = 0x04; -pub const POSIX_SPAWN_SETSCHEDULER: c_int = 0x08; -pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x10; -pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x20; - -pub const POSIX_FADV_NORMAL: c_int = 0; -pub const POSIX_FADV_SEQUENTIAL: c_int = 1; -pub const POSIX_FADV_RANDOM: c_int = 2; -pub const POSIX_FADV_WILLNEED: c_int = 3; -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; - -pub const FALLOC_FL_PUNCH_HOLE: c_int = 0x0001; -pub const FALLOC_FL_ZERO_RANGE: c_int = 0x0002; -pub const FALLOC_FL_UNSHARE_RANGE: c_int = 0x0004; -pub const FALLOC_FL_COLLAPSE_RANGE: c_int = 0x0008; -pub const FALLOC_FL_INSERT_RANGE: c_int = 0x0010; -pub const FALLOC_FL_KEEP_SIZE: c_int = 0x1000; - -f! { - pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] &= !(1 << (fd % size)); - } - - pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0 - } - - pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] |= 1 << (fd % size); - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - for slot in (*set).fds_bits.iter_mut() { - *slot = 0; - } - } - - pub fn CPU_ALLOC_SIZE(count: c_int) -> size_t { - let _dummy: cpu_set_t = cpu_set_t { bits: [0; 16] }; - let size_in_bits = 8 * size_of_val(&_dummy.bits[0]); - ((count as size_t + size_in_bits - 1) / 8) as size_t - } - - pub fn CPU_COUNT_S(size: usize, cpuset: &cpu_set_t) -> c_int { - let mut s: u32 = 0; - let size_of_mask = size_of_val(&cpuset.bits[0]); - for i in cpuset.bits[..(size / size_of_mask)].iter() { - s += i.count_ones(); - } - s as c_int - } - - pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { - for slot in cpuset.bits.iter_mut() { - *slot = 0; - } - } - pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); - if cpu < size_in_bits { - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - cpuset.bits[idx] |= 1 << offset; - } - } - - pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); - if cpu < size_in_bits { - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - cpuset.bits[idx] &= !(1 << offset); - } - } - - pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { - let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); - if cpu < size_in_bits { - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - 0 != (cpuset.bits[idx] & (1 << offset)) - } else { - false - } - } - - pub fn CPU_COUNT(cpuset: &cpu_set_t) -> c_int { - CPU_COUNT_S(size_of::(), cpuset) - } - - pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { - set1.bits == set2.bits - } - - pub fn CMSG_LEN(length: c_uint) -> c_uint { - CMSG_ALIGN(size_of::()) as c_uint + length - } - - pub const fn CMSG_SPACE(length: c_uint) -> c_uint { - (CMSG_ALIGN(length as usize) + CMSG_ALIGN(size_of::())) as c_uint - } - - pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr { - if (*mhdr).msg_controllen as usize >= size_of::() { - (*mhdr).msg_control.cast() - } else { - core::ptr::null_mut() - } - } - - pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - let next = (cmsg as usize + CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr; - let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; - if next as usize + CMSG_ALIGN(size_of::()) as usize > max { - core::ptr::null_mut() - } else { - next - } - } - - pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { - cmsg.offset(1).cast_mut().cast() - } -} - -safe_f! { - pub const fn makedev(ma: c_uint, mi: c_uint) -> dev_t { - let ma = ma as dev_t; - let mi = mi as dev_t; - (ma << 16) | (mi & 0xffff) - } - - pub const fn major(dev: dev_t) -> c_uint { - ((dev >> 16) & 0xffff) as c_uint - } - - pub const fn minor(dev: dev_t) -> c_uint { - (dev & 0xffff) as c_uint - } - - pub const fn WIFEXITED(status: c_int) -> bool { - (status & 0xff) == 0 - } - - pub const fn WIFSIGNALED(status: c_int) -> bool { - (status & 0o177) != 0o177 && (status & 0o177) != 0 - } - - pub const fn WIFSTOPPED(status: c_int) -> bool { - (status & 0xff) == 0o177 - } - - pub const fn WIFCONTINUED(status: c_int) -> bool { - (status & 0o177777) == 0o177777 - } - - pub const fn WEXITSTATUS(status: c_int) -> c_int { - (status >> 8) & 0xff - } - - pub const fn WTERMSIG(status: c_int) -> c_int { - status & 0o177 - } - - pub const fn WSTOPSIG(status: c_int) -> c_int { - (status >> 8) & 0xff - } - - pub const fn WCOREDUMP(status: c_int) -> bool { - WIFSIGNALED(status) && (status & 0x80) != 0 - } -} - -const fn CMSG_ALIGN(len: usize) -> usize { - len + size_of::() - 1 & !(size_of::() - 1) -} - -extern "C" { - pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; - pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; - - pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; - pub fn sigsuspend(mask: *const sigset_t) -> c_int; - pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; - pub fn pthread_kill(thread: pthread_t, sig: c_int) -> c_int; - - pub fn sigtimedwait( - set: *const sigset_t, - info: *mut siginfo_t, - timeout: *const timespec, - ) -> c_int; - - pub fn strftime(s: *mut c_char, max: size_t, format: *const c_char, tm: *const tm) -> size_t; - - pub fn asctime_r(tm: *const tm, buf: *mut c_char) -> *mut c_char; - pub fn ctime_r(timep: *const time_t, buf: *mut c_char) -> *mut c_char; - pub fn strptime(s: *const c_char, format: *const c_char, tm: *mut tm) -> *mut c_char; - pub fn clock_settime(clk_id: clockid_t, tp: *const timespec) -> c_int; - pub fn clock_gettime(clk_id: clockid_t, tp: *mut timespec) -> c_int; - pub fn clock_getres(clk_id: clockid_t, tp: *mut timespec) -> c_int; - - pub fn timer_create(clockid: clockid_t, sevp: *mut sigevent, timerid: *mut timer_t) -> c_int; - - pub fn timer_delete(timerid: timer_t) -> c_int; - - pub fn timer_settime( - timerid: timer_t, - flags: c_int, - new_value: *const itimerspec, - old_value: *mut itimerspec, - ) -> c_int; - - pub fn timer_gettime(timerid: timer_t, curr_value: *mut itimerspec) -> c_int; - pub fn timer_getoverrun(timerid: timer_t) -> c_int; - - pub fn clock_nanosleep( - clk_id: clockid_t, - flags: c_int, - rqtp: *const timespec, - rmtp: *mut timespec, - ) -> c_int; - - pub fn clock_getcpuclockid(pid: pid_t, clk_id: *mut clockid_t) -> c_int; - - pub fn futimes(fd: c_int, times: *const timeval) -> c_int; - pub fn lutimes(file: *const c_char, times: *const timeval) -> c_int; - pub fn settimeofday(tv: *const timeval, tz: *const timezone) -> c_int; - pub fn getitimer(which: c_int, curr_value: *mut itimerval) -> c_int; - - pub fn setitimer(which: c_int, new_value: *const itimerval, old_value: *mut itimerval) - -> c_int; - - pub fn gettimeofday(tp: *mut timeval, tz: *mut c_void) -> c_int; - pub fn futimesat(fd: c_int, path: *const c_char, times: *const timeval) -> c_int; - - pub fn getrlimit(resource: c_int, rlim: *mut rlimit) -> c_int; - pub fn setrlimit(resource: c_int, rlim: *const rlimit) -> c_int; - pub fn getpriority(which: c_int, who: id_t) -> c_int; - pub fn setpriority(which: c_int, who: id_t, prio: c_int) -> c_int; - - pub fn getpwnam_r( - name: *const c_char, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - - pub fn getpwuid_r( - uid: uid_t, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - - pub fn getpwent() -> *mut passwd; - pub fn setpwent(); - pub fn endpwent(); - - pub fn if_nameindex() -> *mut if_nameindex; - pub fn if_freenameindex(ptr: *mut if_nameindex); - - pub fn readv(fd: c_int, iov: *const iovec, iovcnt: c_int) -> ssize_t; - pub fn writev(fd: c_int, iov: *const iovec, iovcnt: c_int) -> ssize_t; - - pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; - - pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; - - pub fn utimensat( - dirfd: c_int, - path: *const c_char, - times: *const timespec, - flag: c_int, - ) -> c_int; - - pub fn futimens(fd: c_int, times: *const timespec) -> c_int; - - pub fn dlfork(val: c_int); - - pub fn accept4(s: c_int, addr: *mut sockaddr, addrlen: *mut socklen_t, flags: c_int) -> c_int; - - pub fn bind(socket: c_int, address: *const sockaddr, address_len: socklen_t) -> c_int; - - pub fn recvfrom( - socket: c_int, - buf: *mut c_void, - len: size_t, - flags: c_int, - addr: *mut sockaddr, - addrlen: *mut socklen_t, - ) -> ssize_t; - - pub fn recvmsg(fd: c_int, msg: *mut msghdr, flags: c_int) -> ssize_t; - pub fn sendmsg(fd: c_int, msg: *const msghdr, flags: c_int) -> ssize_t; - - pub fn getnameinfo( - sa: *const sockaddr, - salen: socklen_t, - host: *mut c_char, - hostlen: socklen_t, - serv: *mut c_char, - sevlen: socklen_t, - flags: c_int, - ) -> c_int; - - pub fn ppoll( - fds: *mut pollfd, - nfds: nfds_t, - timeout: *const timespec, - sigmask: *const sigset_t, - ) -> c_int; - - pub fn newlocale(mask: c_int, locale: *const c_char, base: locale_t) -> locale_t; - pub fn freelocale(loc: locale_t); - pub fn duplocale(base: locale_t) -> locale_t; - pub fn uselocale(loc: locale_t) -> locale_t; - - pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; - pub fn sem_destroy(sem: *mut sem_t) -> c_int; - pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; - pub fn sem_close(sem: *mut sem_t) -> c_int; - pub fn sem_unlink(name: *const c_char) -> c_int; - pub fn sem_timedwait(sem: *mut sem_t, abstime: *const timespec) -> c_int; - pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; - - pub fn clearenv() -> c_int; - pub fn ptsname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - pub fn getpt() -> c_int; - pub fn memalign(align: size_t, size: size_t) -> *mut c_void; - pub fn getloadavg(loadavg: *mut c_double, nelem: c_int) -> c_int; - - pub fn abs(i: c_int) -> c_int; - pub fn arc4random() -> u32; - pub fn arc4random_uniform(l: u32) -> u32; - pub fn arc4random_buf(buf: *mut c_void, size: size_t); - pub fn labs(i: c_long) -> c_long; - pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; - pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; - pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; - pub fn rand() -> c_int; - pub fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void; - pub fn reallocf(ptr: *mut c_void, size: size_t) -> *mut c_void; - pub fn srand(seed: c_uint); - pub fn drand48() -> c_double; - pub fn erand48(xseed: *mut c_ushort) -> c_double; - pub fn jrand48(xseed: *mut c_ushort) -> c_long; - pub fn lcong48(p: *mut c_ushort); - pub fn lrand48() -> c_long; - pub fn mrand48() -> c_long; - pub fn nrand48(xseed: *mut c_ushort) -> c_long; - pub fn seed48(xseed: *mut c_ushort) -> *mut c_ushort; - pub fn srand48(seed: c_long); - - pub fn qsort_r( - base: *mut c_void, - num: size_t, - size: size_t, - compar: Option c_int>, - arg: *mut c_void, - ); - - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; - pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; - pub fn shm_unlink(name: *const c_char) -> c_int; - - pub fn explicit_bzero(s: *mut c_void, len: size_t); - pub fn ffs(value: c_int) -> c_int; - pub fn ffsl(value: c_long) -> c_int; - pub fn ffsll(value: c_longlong) -> c_int; - pub fn fls(value: c_int) -> c_int; - pub fn flsl(value: c_long) -> c_int; - pub fn flsll(value: c_longlong) -> c_int; - pub fn strcasecmp_l(s1: *const c_char, s2: *const c_char, loc: locale_t) -> c_int; - - pub fn strncasecmp_l(s1: *const c_char, s2: *const c_char, n: size_t, loc: locale_t) -> c_int; - - pub fn timingsafe_bcmp(a: *const c_void, b: *const c_void, len: size_t) -> c_int; - pub fn timingsafe_memcmp(a: *const c_void, b: *const c_void, len: size_t) -> c_int; - - pub fn memmem( - haystack: *const c_void, - haystacklen: size_t, - needle: *const c_void, - needlelen: size_t, - ) -> *mut c_void; - - pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; - #[link_name = "__xpg_strerror_r"] - pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - pub fn strsep(string: *mut *mut c_char, delim: *const c_char) -> *mut c_char; - - #[link_name = "__gnu_basename"] - pub fn basename(path: *const c_char) -> *mut c_char; - - pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; - pub fn dup3(src: c_int, dst: c_int, flags: c_int) -> c_int; - pub fn eaccess(pathname: *const c_char, mode: c_int) -> c_int; - pub fn euidaccess(pathname: *const c_char, mode: c_int) -> c_int; - - pub fn execvpe( - file: *const c_char, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - - pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; - - pub fn fexecve(fd: c_int, argv: *const *mut c_char, envp: *const *mut c_char) -> c_int; - - pub fn fdatasync(fd: c_int) -> c_int; - pub fn getdomainname(name: *mut c_char, len: size_t) -> c_int; - pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; - pub fn gethostid() -> c_long; - pub fn getpagesize() -> c_int; - pub fn getpeereid(socket: c_int, euid: *mut uid_t, egid: *mut gid_t) -> c_int; - - pub fn pthread_atfork( - prepare: Option, - parent: Option, - child: Option, - ) -> c_int; - - pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; - pub fn sbrk(increment: intptr_t) -> *mut c_void; - pub fn setgroups(ngroups: c_int, ptr: *const gid_t) -> c_int; - pub fn sethostname(name: *const c_char, len: size_t) -> c_int; - pub fn vhangup() -> c_int; - pub fn getdtablesize() -> c_int; - pub fn sync(); - - pub fn __errno() -> *mut c_int; - - pub fn sched_setparam(pid: pid_t, param: *const sched_param) -> c_int; - pub fn sched_getparam(pid: pid_t, param: *mut sched_param) -> c_int; - - pub fn sched_setscheduler(pid: pid_t, policy: c_int, param: *const sched_param) -> c_int; - - pub fn sched_getscheduler(pid: pid_t) -> c_int; - pub fn sched_get_priority_max(policy: c_int) -> c_int; - pub fn sched_get_priority_min(policy: c_int) -> c_int; - pub fn sched_rr_get_interval(pid: pid_t, t: *mut timespec) -> c_int; - pub fn sched_getcpu() -> c_int; - pub fn sched_getaffinity(pid: pid_t, cpusetsize: size_t, mask: *mut cpu_set_t) -> c_int; - - pub fn sched_setaffinity(pid: pid_t, cpusetsize: size_t, cpuset: *const cpu_set_t) -> c_int; - - pub fn pthread_attr_getguardsize(attr: *const pthread_attr_t, guardsize: *mut size_t) -> c_int; - - pub fn pthread_attr_getschedparam( - attr: *const pthread_attr_t, - param: *mut sched_param, - ) -> c_int; - - pub fn pthread_attr_setschedparam( - attr: *mut pthread_attr_t, - param: *const sched_param, - ) -> c_int; - - pub fn pthread_attr_getstack( - attr: *const pthread_attr_t, - stackaddr: *mut *mut c_void, - stacksize: *mut size_t, - ) -> c_int; - - pub fn pthread_cancel(thread: pthread_t) -> c_int; - - pub fn pthread_condattr_getclock( - attr: *const pthread_condattr_t, - clock_id: *mut clockid_t, - ) -> c_int; - - pub fn pthread_condattr_getpshared( - attr: *const pthread_condattr_t, - pshared: *mut c_int, - ) -> c_int; - - pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, clock_id: clockid_t) -> c_int; - - pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: c_int) -> c_int; - pub fn pthread_barrierattr_init(attr: *mut pthread_barrierattr_t) -> c_int; - - pub fn pthread_barrierattr_setpshared(attr: *mut pthread_barrierattr_t, shared: c_int) - -> c_int; - - pub fn pthread_barrierattr_getpshared( - attr: *const pthread_barrierattr_t, - shared: *mut c_int, - ) -> c_int; - - pub fn pthread_barrierattr_destroy(attr: *mut pthread_barrierattr_t) -> c_int; - - pub fn pthread_barrier_init( - barrier: *mut pthread_barrier_t, - attr: *const pthread_barrierattr_t, - count: c_uint, - ) -> c_int; - - pub fn pthread_barrier_destroy(barrier: *mut pthread_barrier_t) -> c_int; - pub fn pthread_barrier_wait(barrier: *mut pthread_barrier_t) -> c_int; - - pub fn pthread_create( - native: *mut pthread_t, - attr: *const pthread_attr_t, - f: extern "C" fn(*mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - - pub fn pthread_getcpuclockid(thread: pthread_t, clk_id: *mut clockid_t) -> c_int; - - pub fn pthread_getschedparam( - native: pthread_t, - policy: *mut c_int, - param: *mut sched_param, - ) -> c_int; - - pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, abstime: *const timespec) -> c_int; - - pub fn pthread_mutexattr_getprotocol( - attr: *const pthread_mutexattr_t, - protocol: *mut c_int, - ) -> c_int; - - pub fn pthread_mutexattr_getpshared( - attr: *const pthread_mutexattr_t, - pshared: *mut c_int, - ) -> c_int; - - pub fn pthread_mutexattr_setprotocol(attr: *mut pthread_mutexattr_t, protocol: c_int) -> c_int; - - pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; - - pub fn pthread_spin_destroy(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_spin_init(lock: *mut pthread_spinlock_t, pshared: c_int) -> c_int; - pub fn pthread_spin_lock(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_spin_trylock(lock: *mut pthread_spinlock_t) -> c_int; - pub fn pthread_spin_unlock(lock: *mut pthread_spinlock_t) -> c_int; - - pub fn pthread_rwlockattr_getpshared( - attr: *const pthread_rwlockattr_t, - val: *mut c_int, - ) -> c_int; - - pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, val: c_int) -> c_int; - - pub fn pthread_setschedparam( - native: pthread_t, - policy: c_int, - param: *const sched_param, - ) -> c_int; - - pub fn pthread_setschedprio(native: pthread_t, priority: c_int) -> c_int; - - pub fn pthread_getaffinity_np( - thread: pthread_t, - cpusetsize: size_t, - cpuset: *mut cpu_set_t, - ) -> c_int; - - pub fn pthread_getattr_np(native: pthread_t, attr: *mut pthread_attr_t) -> c_int; - pub fn pthread_getname_np(thread: pthread_t, name: *mut c_char, len: size_t) -> c_int; - - pub fn pthread_setaffinity_np( - thread: pthread_t, - cpusetsize: size_t, - cpuset: *const cpu_set_t, - ) -> c_int; - - pub fn pthread_setname_np(thread: pthread_t, name: *const c_char) -> c_int; - pub fn pthread_sigqueue(thread: pthread_t, sig: c_int, value: sigval) -> c_int; - - pub fn ioctl(fd: c_int, request: c_int, ...) -> c_int; - - pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; - - pub fn mount(src: *const c_char, target: *const c_char, flags: c_uint) -> c_int; - - pub fn umount(target: *const c_char) -> c_int; - pub fn cygwin_umount(target: *const c_char, flags: c_uint) -> c_int; - - pub fn dirfd(dirp: *mut DIR) -> c_int; - pub fn seekdir(dirp: *mut DIR, loc: c_long); - pub fn telldir(dirp: *mut DIR) -> c_long; - - pub fn uname(buf: *mut utsname) -> c_int; - - pub fn posix_spawn( - pid: *mut pid_t, - path: *const c_char, - file_actions: *const posix_spawn_file_actions_t, - attrp: *const posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnp( - pid: *mut pid_t, - file: *const c_char, - file_actions: *const posix_spawn_file_actions_t, - attrp: *const posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_getsigdefault( - attr: *const posix_spawnattr_t, - default: *mut sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigdefault( - attr: *mut posix_spawnattr_t, - default: *const sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getsigmask( - attr: *const posix_spawnattr_t, - default: *mut sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigmask( - attr: *mut posix_spawnattr_t, - default: *const sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; - pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; - pub fn posix_spawnattr_getpgroup(attr: *const posix_spawnattr_t, flags: *mut pid_t) -> c_int; - pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: pid_t) -> c_int; - pub fn posix_spawnattr_getschedpolicy( - attr: *const posix_spawnattr_t, - flags: *mut c_int, - ) -> c_int; - pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, flags: c_int) -> c_int; - pub fn posix_spawnattr_getschedparam( - attr: *const posix_spawnattr_t, - param: *mut sched_param, - ) -> c_int; - pub fn posix_spawnattr_setschedparam( - attr: *mut posix_spawnattr_t, - param: *const sched_param, - ) -> c_int; - - pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_addopen( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - path: *const c_char, - oflag: c_int, - mode: mode_t, - ) -> c_int; - pub fn posix_spawn_file_actions_addclose( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - ) -> c_int; - pub fn posix_spawn_file_actions_adddup2( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - newfd: c_int, - ) -> c_int; - pub fn posix_spawn_file_actions_addchdir( - actions: *mut crate::posix_spawn_file_actions_t, - path: *const c_char, - ) -> c_int; - pub fn posix_spawn_file_actions_addfchdir( - actions: *mut crate::posix_spawn_file_actions_t, - fd: c_int, - ) -> c_int; - pub fn posix_spawn_file_actions_addchdir_np( - actions: *mut crate::posix_spawn_file_actions_t, - path: *const c_char, - ) -> c_int; - pub fn posix_spawn_file_actions_addfchdir_np( - actions: *mut crate::posix_spawn_file_actions_t, - fd: c_int, - ) -> c_int; - - pub fn forkpty( - amaster: *mut c_int, - name: *mut c_char, - termp: *const termios, - winp: *const crate::winsize, - ) -> crate::pid_t; - pub fn openpty( - amaster: *mut c_int, - aslave: *mut c_int, - name: *mut c_char, - termp: *const termios, - winp: *const crate::winsize, - ) -> c_int; - - pub fn getgrgid_r( - gid: crate::gid_t, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn getgrouplist( - user: *const c_char, - group: crate::gid_t, - groups: *mut crate::gid_t, - ngroups: *mut c_int, - ) -> c_int; - pub fn getgrnam_r( - name: *const c_char, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn initgroups(user: *const c_char, group: crate::gid_t) -> c_int; - - pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; - pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; - - pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; - pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; - pub fn fallocate(fd: c_int, mode: c_int, offset: off_t, len: off_t) -> c_int; -} diff --git a/vendor/libc/src/unix/haiku/b32.rs b/vendor/libc/src/unix/haiku/b32.rs deleted file mode 100644 index 1aa27e615ca4ea..00000000000000 --- a/vendor/libc/src/unix/haiku/b32.rs +++ /dev/null @@ -1,18 +0,0 @@ -pub type time_t = i32; - -pub type Elf_Addr = crate::Elf32_Addr; -pub type Elf_Half = crate::Elf32_Half; -pub type Elf_Phdr = crate::Elf32_Phdr; - -s! { - pub struct Elf32_Phdr { - pub p_type: crate::Elf32_Word, - pub p_offset: crate::Elf32_Off, - pub p_vaddr: crate::Elf32_Addr, - pub p_paddr: crate::Elf32_Addr, - pub p_filesz: crate::Elf32_Word, - pub p_memsz: crate::Elf32_Word, - pub p_flags: crate::Elf32_Word, - pub p_align: crate::Elf32_Word, - } -} diff --git a/vendor/libc/src/unix/haiku/b64.rs b/vendor/libc/src/unix/haiku/b64.rs deleted file mode 100644 index 3355241fdb7971..00000000000000 --- a/vendor/libc/src/unix/haiku/b64.rs +++ /dev/null @@ -1,18 +0,0 @@ -pub type time_t = i64; - -pub type Elf_Addr = crate::Elf64_Addr; -pub type Elf_Half = crate::Elf64_Half; -pub type Elf_Phdr = crate::Elf64_Phdr; - -s! { - pub struct Elf64_Phdr { - pub p_type: crate::Elf64_Word, - pub p_flags: crate::Elf64_Word, - pub p_offset: crate::Elf64_Off, - pub p_vaddr: crate::Elf64_Addr, - pub p_paddr: crate::Elf64_Addr, - pub p_filesz: crate::Elf64_Xword, - pub p_memsz: crate::Elf64_Xword, - pub p_align: crate::Elf64_Xword, - } -} diff --git a/vendor/libc/src/unix/haiku/bsd.rs b/vendor/libc/src/unix/haiku/bsd.rs deleted file mode 100644 index 1e3881e2c67ff5..00000000000000 --- a/vendor/libc/src/unix/haiku/bsd.rs +++ /dev/null @@ -1,151 +0,0 @@ -//! This file contains the BSD APIs available in Haiku. It corresponds to the -//! header files in `headers/compatibility/bsd`. -//! -//! Note that Haiku's BSD compatibility is a combination of system APIs and -//! utility libraries. There should only be system APIs in `libc`. When you are -//! trying to determine whether something should be included in this file, the -//! best indicator is whether it also exists in the BSD-specific definitions in -//! this libc crate. - -use crate::prelude::*; - -// stringlist.h (utility library) -// Note: this is kept because it was previously introduced -pub type StringList = _stringlist; - -s! { - // stringlist.h (utility library) - // Note: this is kept because it was previously introduced - pub struct _stringlist { - pub sl_str: *mut *mut c_char, - pub sl_max: size_t, - pub sl_cur: size_t, - } - - // sys/event.h - pub struct kevent { - pub ident: crate::uintptr_t, - pub filter: c_short, - pub flags: c_ushort, - pub fflags: c_uint, - pub data: i64, - pub udata: *mut c_void, - pub ext: [u64; 4], - } - - // sys/link_elf.h - pub struct dl_phdr_info { - pub dlpi_addr: crate::Elf_Addr, - pub dlpi_name: *const c_char, - pub dlpi_phdr: *const crate::Elf_Phdr, - pub dlpi_phnum: crate::Elf_Half, - } -} - -// sys/event.h -pub const EVFILT_READ: i16 = -1; -pub const EVFILT_WRITE: i16 = -2; -pub const EVFILT_PROC: i16 = -5; -pub const EV_ADD: u16 = 0x0001; -pub const EV_DELETE: u16 = 0x0002; -pub const EV_ONESHOT: u16 = 0x0010; -pub const EV_CLEAR: u16 = 0x0020; -pub const EV_EOF: u16 = 0x8000; -pub const EV_ERROR: u16 = 0x4000; -pub const NOTE_EXIT: u32 = 0x80000000; - -// sys/ioccom.h -pub const IOC_VOID: c_ulong = 0x20000000; -pub const IOC_OUT: c_ulong = 0x40000000; -pub const IOC_IN: c_ulong = 0x80000000; -pub const IOC_INOUT: c_ulong = IOC_IN | IOC_OUT; -pub const IOC_DIRMASK: c_ulong = 0xe0000000; - -#[link(name = "bsd")] -extern "C" { - // stdlib.h - pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; - pub fn getprogname() -> *const c_char; - pub fn setprogname(progname: *const c_char); - pub fn arc4random() -> u32; - pub fn arc4random_uniform(upper_bound: u32) -> u32; - pub fn arc4random_buf(buf: *mut c_void, n: size_t); - pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; - pub fn strtonum( - nptr: *const c_char, - minval: c_longlong, - maxval: c_longlong, - errstr: *mut *const c_char, - ) -> c_longlong; - - // pty.h - pub fn openpty( - amaster: *mut c_int, - aslave: *mut c_int, - name: *mut c_char, - termp: *mut crate::termios, - winp: *mut crate::winsize, - ) -> c_int; - pub fn login_tty(_fd: c_int) -> c_int; - pub fn forkpty( - amaster: *mut c_int, - name: *mut c_char, - termp: *mut crate::termios, - winp: *mut crate::winsize, - ) -> crate::pid_t; - - // string.h - pub fn strsep(string: *mut *mut c_char, delimiters: *const c_char) -> *mut c_char; - pub fn explicit_bzero(buf: *mut c_void, len: size_t); - - // stringlist.h (utility library) - // Note: this is kept because it was previously introduced - pub fn sl_init() -> *mut StringList; - pub fn sl_add(sl: *mut StringList, n: *mut c_char) -> c_int; - pub fn sl_free(sl: *mut StringList, i: c_int); - pub fn sl_find(sl: *mut StringList, n: *mut c_char) -> *mut c_char; - - // sys/event.h - pub fn kqueue() -> c_int; - pub fn kevent( - kq: c_int, - changelist: *const kevent, - nchanges: c_int, - eventlist: *mut kevent, - nevents: c_int, - timeout: *const crate::timespec, - ) -> c_int; - - // sys/link_elf.h - pub fn dl_iterate_phdr( - callback: Option< - unsafe extern "C" fn(info: *mut dl_phdr_info, size: usize, data: *mut c_void) -> c_int, - >, - data: *mut c_void, - ) -> c_int; - - // sys/time.h - pub fn lutimes(file: *const c_char, times: *const crate::timeval) -> c_int; - - // sys/uov.h - pub fn preadv( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: crate::off_t, - ) -> ssize_t; - pub fn pwritev( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: crate::off_t, - ) -> ssize_t; - - // sys/wait.h - pub fn wait4( - pid: crate::pid_t, - status: *mut c_int, - options: c_int, - rusage: *mut crate::rusage, - ) -> crate::pid_t; -} diff --git a/vendor/libc/src/unix/haiku/mod.rs b/vendor/libc/src/unix/haiku/mod.rs deleted file mode 100644 index 964598e97ca35e..00000000000000 --- a/vendor/libc/src/unix/haiku/mod.rs +++ /dev/null @@ -1,2097 +0,0 @@ -use crate::prelude::*; - -// This module contains bindings to the native Haiku API. The Haiku API -// originates from BeOS, and it was the original way to perform low level -// system and IO operations. The POSIX API was in that era was like a -// compatibility layer. In current Haiku development, both the POSIX API and -// the Haiku API are considered to be co-equal status. However, they are not -// integrated like they are on other UNIX platforms, which means that for many -// low level concepts there are two versions, like processes (POSIX) and -// teams (Haiku), or pthreads and native threads. -// -// Both the POSIX API and the Haiku API live in libroot.so, the library that is -// linked to any binary by default. Additionally, Haiku supports several -// non-POSIX APIs from BSD and GNU, which live in libbsd.so and libgnu.so. These -// modules are also supported. -// -// The module is comprised of the following files: -// - `mod.rs` (this file) implements the C11 and POSIX API found in -// `headers/posix` -// - `b32.rs`, `b64.rs` and `x86_64.rs` contain platform-specific definitions -// of the C11 and POSIX APIs -// - `native.rs` defines the native Haiku API that is implemented in -// `libroot.so` and that are found in `headers/os`. -// - `bsd.rs` defines the BSD customizations available on Haiku found in -// `headers/compatibility/bsd` - -pub type rlim_t = crate::uintptr_t; -pub type sa_family_t = u8; -pub type pthread_key_t = c_int; -pub type nfds_t = c_ulong; -pub type tcflag_t = c_uint; -pub type speed_t = c_uchar; -pub type clock_t = i32; -pub type clockid_t = i32; -pub type suseconds_t = i32; -pub type wchar_t = i32; -pub type off_t = i64; -pub type ino_t = i64; -pub type blkcnt_t = i64; -pub type blksize_t = i32; -pub type dev_t = i32; -pub type mode_t = u32; -pub type nlink_t = i32; -pub type useconds_t = u32; -pub type socklen_t = u32; -pub type pthread_t = crate::uintptr_t; -pub type pthread_condattr_t = crate::uintptr_t; -pub type pthread_mutexattr_t = crate::uintptr_t; -pub type pthread_rwlockattr_t = crate::uintptr_t; -pub type sigset_t = u64; -pub type fsblkcnt_t = i64; -pub type fsfilcnt_t = i64; -pub type pthread_attr_t = *mut c_void; -pub type nl_item = c_int; -pub type id_t = i32; -pub type idtype_t = c_int; -pub type fd_mask = u32; -pub type regoff_t = c_int; -pub type key_t = i32; -pub type msgqnum_t = u32; -pub type msglen_t = u32; - -pub type Elf32_Addr = u32; -pub type Elf32_Half = u16; -pub type Elf32_Off = u32; -pub type Elf32_Sword = i32; -pub type Elf32_Word = u32; - -pub type Elf64_Addr = u64; -pub type Elf64_Half = u16; -pub type Elf64_Off = u64; -pub type Elf64_Sword = i32; -pub type Elf64_Sxword = i64; -pub type Elf64_Word = u32; -pub type Elf64_Xword = u64; - -pub type ENTRY = entry; -pub type ACTION = c_int; - -pub type posix_spawnattr_t = *mut c_void; -pub type posix_spawn_file_actions_t = *mut c_void; - -#[derive(Debug)] -pub enum timezone {} -impl Copy for timezone {} -impl Clone for timezone { - fn clone(&self) -> timezone { - *self - } -} - -impl siginfo_t { - pub unsafe fn si_addr(&self) -> *mut c_void { - self.si_addr - } - - pub unsafe fn si_pid(&self) -> crate::pid_t { - self.si_pid - } - - pub unsafe fn si_uid(&self) -> crate::uid_t { - self.si_uid - } - - pub unsafe fn si_status(&self) -> c_int { - self.si_status - } -} - -s! { - pub struct in_addr { - pub s_addr: crate::in_addr_t, - } - - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct sockaddr { - pub sa_len: u8, - pub sa_family: sa_family_t, - pub sa_data: [u8; 30], - } - - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [i8; 24], - } - - pub struct sockaddr_in6 { - pub sin6_len: u8, - pub sin6_family: u8, - pub sin6_port: u16, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: u32, - } - - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - pub ai_addrlen: socklen_t, - pub ai_canonname: *mut c_char, - pub ai_addr: *mut crate::sockaddr, - pub ai_next: *mut addrinfo, - } - - pub struct ifaddrs { - pub ifa_next: *mut ifaddrs, - pub ifa_name: *const c_char, - pub ifa_flags: c_uint, - pub ifa_addr: *mut crate::sockaddr, - pub ifa_netmask: *mut crate::sockaddr, - pub ifa_dstaddr: *mut crate::sockaddr, - pub ifa_data: *mut c_void, - } - - pub struct fd_set { - // size for 1024 bits, and a fd_mask with size u32 - fds_bits: [fd_mask; 32], - } - - pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - pub tm_gmtoff: c_int, - pub tm_zone: *mut c_char, - } - - pub struct utsname { - pub sysname: [c_char; 32], - pub nodename: [c_char; 32], - pub release: [c_char; 32], - pub version: [c_char; 32], - pub machine: [c_char; 32], - } - - pub struct lconv { - pub decimal_point: *mut c_char, - pub thousands_sep: *mut c_char, - pub grouping: *mut c_char, - pub int_curr_symbol: *mut c_char, - pub currency_symbol: *mut c_char, - pub mon_decimal_point: *mut c_char, - pub mon_thousands_sep: *mut c_char, - pub mon_grouping: *mut c_char, - pub positive_sign: *mut c_char, - pub negative_sign: *mut c_char, - pub int_frac_digits: c_char, - pub frac_digits: c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub p_sign_posn: c_char, - pub n_sign_posn: c_char, - pub int_p_cs_precedes: c_char, - pub int_p_sep_by_space: c_char, - pub int_n_cs_precedes: c_char, - pub int_n_sep_by_space: c_char, - pub int_p_sign_posn: c_char, - pub int_n_sign_posn: c_char, - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: c_int, - pub msg_control: *mut c_void, - pub msg_controllen: socklen_t, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - pub cmsg_len: crate::socklen_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct Dl_info { - pub dli_fname: *const c_char, - pub dli_fbase: *mut c_void, - pub dli_sname: *const c_char, - pub dli_saddr: *mut c_void, - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: c_char, - pub c_ispeed: crate::speed_t, - pub c_ospeed: crate::speed_t, - pub c_cc: [crate::cc_t; crate::NCCS], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct stat { - pub st_dev: dev_t, - pub st_ino: ino_t, - pub st_mode: mode_t, - pub st_nlink: nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_size: off_t, - pub st_rdev: dev_t, - pub st_blksize: blksize_t, - pub st_atime: time_t, - pub st_atime_nsec: c_long, - pub st_mtime: time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: time_t, - pub st_ctime_nsec: c_long, - pub st_crtime: time_t, - pub st_crtime_nsec: c_long, - pub st_type: u32, - pub st_blocks: blkcnt_t, - } - - pub struct glob_t { - pub gl_pathc: size_t, - __unused1: size_t, - pub gl_offs: size_t, - __unused2: size_t, - pub gl_pathv: *mut *mut c_char, - - __unused3: *mut c_void, - __unused4: *mut c_void, - __unused5: *mut c_void, - __unused6: *mut c_void, - __unused7: *mut c_void, - __unused8: *mut c_void, - } - - pub struct pthread_mutex_t { - flags: u32, - lock: i32, - unused: i32, - owner: i32, - owner_count: i32, - } - - pub struct pthread_cond_t { - flags: u32, - unused: i32, - mutex: *mut c_void, - waiter_count: i32, - lock: i32, - } - - pub struct pthread_rwlock_t { - flags: u32, - owner: i32, - lock_sem: i32, // this is actually a union - lock_count: i32, - reader_count: i32, - writer_count: i32, - waiters: [*mut c_void; 2], - } - - pub struct pthread_spinlock_t { - lock: u32, - } - - pub struct passwd { - pub pw_name: *mut c_char, - pub pw_passwd: *mut c_char, - pub pw_uid: crate::uid_t, - pub pw_gid: crate::gid_t, - pub pw_dir: *mut c_char, - pub pw_shell: *mut c_char, - pub pw_gecos: *mut c_char, - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_code: c_int, - pub si_errno: c_int, - pub si_pid: crate::pid_t, - pub si_uid: crate::uid_t, - pub si_addr: *mut c_void, - pub si_status: c_int, - pub si_band: c_long, - pub sigval: *mut c_void, - } - - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, //actually a union with sa_handler - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - sa_userdata: *mut c_void, - } - - pub struct sem_t { - pub type_: i32, - pub named_sem_id: i32, // actually a union with unnamed_sem (i32) - pub padding: [i32; 2], - } - - pub struct ucred { - pub pid: crate::pid_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - } - - pub struct sockaddr_dl { - pub sdl_len: u8, - pub sdl_family: u8, - pub sdl_e_type: u16, - pub sdl_index: u32, - pub sdl_type: u8, - pub sdl_nlen: u8, - pub sdl_alen: u8, - pub sdl_slen: u8, - pub sdl_data: [u8; 46], - } - - pub struct spwd { - pub sp_namp: *mut c_char, - pub sp_pwdp: *mut c_char, - pub sp_lstchg: c_int, - pub sp_min: c_int, - pub sp_max: c_int, - pub sp_warn: c_int, - pub sp_inact: c_int, - pub sp_expire: c_int, - pub sp_flag: c_int, - } - - pub struct regex_t { - __buffer: *mut c_void, - __allocated: size_t, - __used: size_t, - __syntax: c_ulong, - __fastmap: *mut c_char, - __translate: *mut c_char, - __re_nsub: size_t, - __bitfield: u8, - } - - pub struct regmatch_t { - pub rm_so: regoff_t, - pub rm_eo: regoff_t, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - pub msg_stime: crate::time_t, - pub msg_rtime: crate::time_t, - pub msg_ctime: crate::time_t, - } - - pub struct ipc_perm { - pub key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: mode_t, - } - - pub struct sembuf { - pub sem_num: c_ushort, - pub sem_op: c_short, - pub sem_flg: c_short, - } - - pub struct entry { - pub key: *mut c_char, - pub data: *mut c_void, - } - - pub struct option { - pub name: *const c_char, - pub has_arg: c_int, - pub flag: *mut c_int, - pub val: c_int, - } -} - -s_no_extra_traits! { - pub struct sockaddr_un { - pub sun_len: u8, - pub sun_family: sa_family_t, - pub sun_path: [c_char; 126], - } - pub struct sockaddr_storage { - pub ss_len: u8, - pub ss_family: sa_family_t, - __ss_pad1: [u8; 6], - __ss_pad2: u64, - __ss_pad3: [u8; 112], - } - pub struct dirent { - pub d_dev: dev_t, - pub d_pdev: dev_t, - pub d_ino: ino_t, - pub d_pino: i64, - pub d_reclen: c_ushort, - pub d_name: [c_char; 1024], // Max length is _POSIX_PATH_MAX - } - - pub struct sigevent { - pub sigev_notify: c_int, - pub sigev_signo: c_int, - pub sigev_value: crate::sigval, - __unused1: *mut c_void, // actually a function pointer - pub sigev_notify_attributes: *mut crate::pthread_attr_t, - } - - pub struct utmpx { - pub ut_type: c_short, - pub ut_tv: crate::timeval, - pub ut_id: [c_char; 8], - pub ut_pid: crate::pid_t, - pub ut_user: [c_char; 32], - pub ut_line: [c_char; 16], - pub ut_host: [c_char; 128], - __ut_reserved: [c_char; 64], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for utmpx { - fn eq(&self, other: &utmpx) -> bool { - self.ut_type == other.ut_type - && self.ut_tv == other.ut_tv - && self.ut_id == other.ut_id - && self.ut_pid == other.ut_pid - && self.ut_user == other.ut_user - && self.ut_line == other.ut_line - && self - .ut_host - .iter() - .zip(other.ut_host.iter()) - .all(|(a, b)| a == b) - && self.__ut_reserved == other.__ut_reserved - } - } - - impl Eq for utmpx {} - impl hash::Hash for utmpx { - fn hash(&self, state: &mut H) { - self.ut_type.hash(state); - self.ut_tv.hash(state); - self.ut_id.hash(state); - self.ut_pid.hash(state); - self.ut_user.hash(state); - self.ut_line.hash(state); - self.ut_host.hash(state); - self.__ut_reserved.hash(state); - } - } - impl PartialEq for sockaddr_un { - fn eq(&self, other: &sockaddr_un) -> bool { - self.sun_len == other.sun_len - && self.sun_family == other.sun_family - && self - .sun_path - .iter() - .zip(other.sun_path.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for sockaddr_un {} - impl hash::Hash for sockaddr_un { - fn hash(&self, state: &mut H) { - self.sun_len.hash(state); - self.sun_family.hash(state); - self.sun_path.hash(state); - } - } - - impl PartialEq for sockaddr_storage { - fn eq(&self, other: &sockaddr_storage) -> bool { - self.ss_len == other.ss_len - && self.ss_family == other.ss_family - && self - .__ss_pad1 - .iter() - .zip(other.__ss_pad1.iter()) - .all(|(a, b)| a == b) - && self.__ss_pad2 == other.__ss_pad2 - && self - .__ss_pad3 - .iter() - .zip(other.__ss_pad3.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for sockaddr_storage {} - impl hash::Hash for sockaddr_storage { - fn hash(&self, state: &mut H) { - self.ss_len.hash(state); - self.ss_family.hash(state); - self.__ss_pad1.hash(state); - self.__ss_pad2.hash(state); - self.__ss_pad3.hash(state); - } - } - - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_dev == other.d_dev - && self.d_pdev == other.d_pdev - && self.d_ino == other.d_ino - && self.d_pino == other.d_pino - && self.d_reclen == other.d_reclen - && self - .d_name - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for dirent {} - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_dev.hash(state); - self.d_pdev.hash(state); - self.d_ino.hash(state); - self.d_pino.hash(state); - self.d_reclen.hash(state); - self.d_name.hash(state); - } - } - - impl PartialEq for sigevent { - fn eq(&self, other: &sigevent) -> bool { - self.sigev_notify == other.sigev_notify - && self.sigev_signo == other.sigev_signo - && self.sigev_value == other.sigev_value - && self.sigev_notify_attributes == other.sigev_notify_attributes - } - } - impl Eq for sigevent {} - impl hash::Hash for sigevent { - fn hash(&self, state: &mut H) { - self.sigev_notify.hash(state); - self.sigev_signo.hash(state); - self.sigev_value.hash(state); - self.sigev_notify_attributes.hash(state); - } - } - } -} - -pub const EXIT_FAILURE: c_int = 1; -pub const EXIT_SUCCESS: c_int = 0; -pub const RAND_MAX: c_int = 2147483647; -pub const EOF: c_int = -1; -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; -pub const L_SET: c_int = SEEK_SET; -pub const L_INCR: c_int = SEEK_CUR; -pub const L_XTND: c_int = SEEK_END; -pub const _IOFBF: c_int = 0; -pub const _IONBF: c_int = 2; -pub const _IOLBF: c_int = 1; - -pub const F_DUPFD: c_int = 0x0001; -pub const F_GETFD: c_int = 0x0002; -pub const F_SETFD: c_int = 0x0004; -pub const F_GETFL: c_int = 0x0008; -pub const F_SETFL: c_int = 0x0010; -pub const F_GETLK: c_int = 0x0020; -pub const F_SETLK: c_int = 0x0080; -pub const F_SETLKW: c_int = 0x0100; -pub const F_DUPFD_CLOEXEC: c_int = 0x0200; - -pub const F_RDLCK: c_int = 0x0040; -pub const F_UNLCK: c_int = 0x0200; -pub const F_WRLCK: c_int = 0x0400; - -pub const AT_FDCWD: c_int = -100; -pub const AT_SYMLINK_NOFOLLOW: c_int = 0x01; -pub const AT_SYMLINK_FOLLOW: c_int = 0x02; -pub const AT_REMOVEDIR: c_int = 0x04; -pub const AT_EACCESS: c_int = 0x08; - -pub const POLLIN: c_short = 0x0001; -pub const POLLOUT: c_short = 0x0002; -pub const POLLRDNORM: c_short = POLLIN; -pub const POLLWRNORM: c_short = POLLOUT; -pub const POLLRDBAND: c_short = 0x0008; -pub const POLLWRBAND: c_short = 0x0010; -pub const POLLPRI: c_short = 0x0020; -pub const POLLERR: c_short = 0x0004; -pub const POLLHUP: c_short = 0x0080; -pub const POLLNVAL: c_short = 0x1000; - -pub const PTHREAD_CREATE_JOINABLE: c_int = 0; -pub const PTHREAD_CREATE_DETACHED: c_int = 1; - -pub const CLOCK_REALTIME: c_int = -1; -pub const CLOCK_MONOTONIC: c_int = 0; -pub const CLOCK_PROCESS_CPUTIME_ID: c_int = -2; -pub const CLOCK_THREAD_CPUTIME_ID: c_int = -3; - -pub const RLIMIT_CORE: c_int = 0; -pub const RLIMIT_CPU: c_int = 1; -pub const RLIMIT_DATA: c_int = 2; -pub const RLIMIT_FSIZE: c_int = 3; -pub const RLIMIT_NOFILE: c_int = 4; -pub const RLIMIT_STACK: c_int = 5; -pub const RLIMIT_AS: c_int = 6; -pub const RLIM_INFINITY: crate::rlim_t = 0xffffffff; -// Haiku specific -pub const RLIMIT_NOVMON: c_int = 7; -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIM_NLIMITS: c_int = 8; - -pub const RUSAGE_SELF: c_int = 0; - -pub const RTLD_LAZY: c_int = 0; - -pub const NCCS: usize = 11; - -pub const O_RDONLY: c_int = 0x0000; -pub const O_WRONLY: c_int = 0x0001; -pub const O_RDWR: c_int = 0x0002; -pub const O_ACCMODE: c_int = 0x0003; - -pub const O_EXCL: c_int = 0x0100; -pub const O_CREAT: c_int = 0x0200; -pub const O_TRUNC: c_int = 0x0400; -pub const O_NOCTTY: c_int = 0x1000; -pub const O_NOTRAVERSE: c_int = 0x2000; - -pub const O_CLOEXEC: c_int = 0x00000040; -pub const O_NONBLOCK: c_int = 0x00000080; -pub const O_APPEND: c_int = 0x00000800; -pub const O_SYNC: c_int = 0x00010000; -pub const O_RSYNC: c_int = 0x00020000; -pub const O_DSYNC: c_int = 0x00040000; -pub const O_NOFOLLOW: c_int = 0x00080000; -pub const O_NOCACHE: c_int = 0x00100000; -pub const O_DIRECTORY: c_int = 0x00200000; - -pub const S_IFIFO: mode_t = 0o1_0000; -pub const S_IFCHR: mode_t = 0o2_0000; -pub const S_IFBLK: mode_t = 0o6_0000; -pub const S_IFDIR: mode_t = 0o4_0000; -pub const S_IFREG: mode_t = 0o10_0000; -pub const S_IFLNK: mode_t = 0o12_0000; -pub const S_IFSOCK: mode_t = 0o14_0000; -pub const S_IFMT: mode_t = 0o17_0000; - -pub const S_IRWXU: mode_t = 0o0700; -pub const S_IRUSR: mode_t = 0o0400; -pub const S_IWUSR: mode_t = 0o0200; -pub const S_IXUSR: mode_t = 0o0100; -pub const S_IRWXG: mode_t = 0o0070; -pub const S_IRGRP: mode_t = 0o0040; -pub const S_IWGRP: mode_t = 0o0020; -pub const S_IXGRP: mode_t = 0o0010; -pub const S_IRWXO: mode_t = 0o0007; -pub const S_IROTH: mode_t = 0o0004; -pub const S_IWOTH: mode_t = 0o0002; -pub const S_IXOTH: mode_t = 0o0001; - -pub const F_OK: c_int = 0; -pub const R_OK: c_int = 4; -pub const W_OK: c_int = 2; -pub const X_OK: c_int = 1; -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; - -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGCHLD: c_int = 5; -pub const SIGABRT: c_int = 6; -pub const SIGPIPE: c_int = 7; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGSTOP: c_int = 10; -pub const SIGSEGV: c_int = 11; -pub const SIGCONT: c_int = 12; -pub const SIGTSTP: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; -pub const SIGTTIN: c_int = 16; -pub const SIGTTOU: c_int = 17; -pub const SIGUSR1: c_int = 18; -pub const SIGUSR2: c_int = 19; -pub const SIGWINCH: c_int = 20; -pub const SIGKILLTHR: c_int = 21; -pub const SIGTRAP: c_int = 22; -pub const SIGPOLL: c_int = 23; -pub const SIGPROF: c_int = 24; -pub const SIGSYS: c_int = 25; -pub const SIGURG: c_int = 26; -pub const SIGVTALRM: c_int = 27; -pub const SIGXCPU: c_int = 28; -pub const SIGXFSZ: c_int = 29; -pub const SIGBUS: c_int = 30; - -pub const SIG_BLOCK: c_int = 1; -pub const SIG_UNBLOCK: c_int = 2; -pub const SIG_SETMASK: c_int = 3; - -pub const SIGEV_NONE: c_int = 0; -pub const SIGEV_SIGNAL: c_int = 1; -pub const SIGEV_THREAD: c_int = 2; - -pub const EAI_AGAIN: c_int = 2; -pub const EAI_BADFLAGS: c_int = 3; -pub const EAI_FAIL: c_int = 4; -pub const EAI_FAMILY: c_int = 5; -pub const EAI_MEMORY: c_int = 6; -pub const EAI_NODATA: c_int = 7; -pub const EAI_NONAME: c_int = 8; -pub const EAI_SERVICE: c_int = 9; -pub const EAI_SOCKTYPE: c_int = 10; -pub const EAI_SYSTEM: c_int = 11; -pub const EAI_OVERFLOW: c_int = 14; - -pub const PROT_NONE: c_int = 0; -pub const PROT_READ: c_int = 1; -pub const PROT_WRITE: c_int = 2; -pub const PROT_EXEC: c_int = 4; - -pub const LC_ALL: c_int = 0; -pub const LC_COLLATE: c_int = 1; -pub const LC_CTYPE: c_int = 2; -pub const LC_MONETARY: c_int = 3; -pub const LC_NUMERIC: c_int = 4; -pub const LC_TIME: c_int = 5; -pub const LC_MESSAGES: c_int = 6; - -// FIXME(haiku): Haiku does not have MAP_FILE, but library/std/os.rs requires it -pub const MAP_FILE: c_int = 0x00; -pub const MAP_SHARED: c_int = 0x01; -pub const MAP_PRIVATE: c_int = 0x02; -pub const MAP_FIXED: c_int = 0x04; -pub const MAP_ANONYMOUS: c_int = 0x08; -pub const MAP_NORESERVE: c_int = 0x10; -pub const MAP_ANON: c_int = MAP_ANONYMOUS; - -pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; - -pub const MS_ASYNC: c_int = 0x01; -pub const MS_INVALIDATE: c_int = 0x04; -pub const MS_SYNC: c_int = 0x02; - -pub const E2BIG: c_int = -2147454975; -pub const ECHILD: c_int = -2147454974; -pub const EDEADLK: c_int = -2147454973; -pub const EFBIG: c_int = -2147454972; -pub const EMLINK: c_int = -2147454971; -pub const ENFILE: c_int = -2147454970; -pub const ENODEV: c_int = -2147454969; -pub const ENOLCK: c_int = -2147454968; -pub const ENOSYS: c_int = -2147454967; -pub const ENOTTY: c_int = -2147454966; -pub const ENXIO: c_int = -2147454965; -pub const ESPIPE: c_int = -2147454964; -pub const ESRCH: c_int = -2147454963; -pub const EFPOS: c_int = -2147454962; -pub const ESIGPARM: c_int = -2147454961; -pub const EDOM: c_int = -2147454960; -pub const ERANGE: c_int = -2147454959; -pub const EPROTOTYPE: c_int = -2147454958; -pub const EPROTONOSUPPORT: c_int = -2147454957; -pub const EPFNOSUPPORT: c_int = -2147454956; -pub const EAFNOSUPPORT: c_int = -2147454955; -pub const EADDRINUSE: c_int = -2147454954; -pub const EADDRNOTAVAIL: c_int = -2147454953; -pub const ENETDOWN: c_int = -2147454952; -pub const ENETUNREACH: c_int = -2147454951; -pub const ENETRESET: c_int = -2147454950; -pub const ECONNABORTED: c_int = -2147454949; -pub const ECONNRESET: c_int = -2147454948; -pub const EISCONN: c_int = -2147454947; -pub const ENOTCONN: c_int = -2147454946; -pub const ESHUTDOWN: c_int = -2147454945; -pub const ECONNREFUSED: c_int = -2147454944; -pub const EHOSTUNREACH: c_int = -2147454943; -pub const ENOPROTOOPT: c_int = -2147454942; -pub const ENOBUFS: c_int = -2147454941; -pub const EINPROGRESS: c_int = -2147454940; -pub const EALREADY: c_int = -2147454939; -pub const EILSEQ: c_int = -2147454938; -pub const ENOMSG: c_int = -2147454937; -pub const ESTALE: c_int = -2147454936; -pub const EOVERFLOW: c_int = -2147454935; -pub const EMSGSIZE: c_int = -2147454934; -pub const EOPNOTSUPP: c_int = -2147454933; -pub const ENOTSOCK: c_int = -2147454932; -pub const EHOSTDOWN: c_int = -2147454931; -pub const EBADMSG: c_int = -2147454930; -pub const ECANCELED: c_int = -2147454929; -pub const EDESTADDRREQ: c_int = -2147454928; -pub const EDQUOT: c_int = -2147454927; -pub const EIDRM: c_int = -2147454926; -pub const EMULTIHOP: c_int = -2147454925; -pub const ENODATA: c_int = -2147454924; -pub const ENOLINK: c_int = -2147454923; -pub const ENOSR: c_int = -2147454922; -pub const ENOSTR: c_int = -2147454921; -pub const ENOTSUP: c_int = -2147454920; -pub const EPROTO: c_int = -2147454919; -pub const ETIME: c_int = -2147454918; -pub const ETXTBSY: c_int = -2147454917; -pub const ENOATTR: c_int = -2147454916; - -// INT_MIN -pub const ENOMEM: c_int = -2147483648; - -// POSIX errors that can be mapped to BeOS error codes -pub const EACCES: c_int = -2147483646; -pub const EINTR: c_int = -2147483638; -pub const EIO: c_int = -2147483647; -pub const EBUSY: c_int = -2147483634; -pub const EFAULT: c_int = -2147478783; -pub const ETIMEDOUT: c_int = -2147483639; -pub const EAGAIN: c_int = -2147483637; -pub const EWOULDBLOCK: c_int = -2147483637; -pub const EBADF: c_int = -2147459072; -pub const EEXIST: c_int = -2147459070; -pub const EINVAL: c_int = -2147483643; -pub const ENAMETOOLONG: c_int = -2147459068; -pub const ENOENT: c_int = -2147459069; -pub const EPERM: c_int = -2147483633; -pub const ENOTDIR: c_int = -2147459067; -pub const EISDIR: c_int = -2147459063; -pub const ENOTEMPTY: c_int = -2147459066; -pub const ENOSPC: c_int = -2147459065; -pub const EROFS: c_int = -2147459064; -pub const EMFILE: c_int = -2147459062; -pub const EXDEV: c_int = -2147459061; -pub const ELOOP: c_int = -2147459060; -pub const ENOEXEC: c_int = -2147478782; -pub const EPIPE: c_int = -2147459059; - -pub const IPPROTO_RAW: c_int = 255; - -// These are prefixed with POSIX_ on Haiku -pub const MADV_NORMAL: c_int = 1; -pub const MADV_SEQUENTIAL: c_int = 2; -pub const MADV_RANDOM: c_int = 3; -pub const MADV_WILLNEED: c_int = 4; -pub const MADV_DONTNEED: c_int = 5; -pub const MADV_FREE: c_int = 6; - -// https://github.com/haiku/haiku/blob/HEAD/headers/posix/net/if.h#L80 -pub const IFF_UP: c_int = 0x0001; -pub const IFF_BROADCAST: c_int = 0x0002; // valid broadcast address -pub const IFF_LOOPBACK: c_int = 0x0008; -pub const IFF_POINTOPOINT: c_int = 0x0010; // point-to-point link -pub const IFF_NOARP: c_int = 0x0040; // no address resolution -pub const IFF_AUTOUP: c_int = 0x0080; // auto dial -pub const IFF_PROMISC: c_int = 0x0100; // receive all packets -pub const IFF_ALLMULTI: c_int = 0x0200; // receive all multicast packets -pub const IFF_SIMPLEX: c_int = 0x0800; // doesn't receive own transmissions -pub const IFF_LINK: c_int = 0x1000; // has link -pub const IFF_AUTO_CONFIGURED: c_int = 0x2000; -pub const IFF_CONFIGURING: c_int = 0x4000; -pub const IFF_MULTICAST: c_int = 0x8000; // supports multicast - -pub const AF_UNSPEC: c_int = 0; -pub const AF_INET: c_int = 1; -pub const AF_APPLETALK: c_int = 2; -pub const AF_ROUTE: c_int = 3; -pub const AF_LINK: c_int = 4; -pub const AF_INET6: c_int = 5; -pub const AF_DLI: c_int = 6; -pub const AF_IPX: c_int = 7; -pub const AF_NOTIFY: c_int = 8; -pub const AF_LOCAL: c_int = 9; -pub const AF_UNIX: c_int = AF_LOCAL; -pub const AF_BLUETOOTH: c_int = 10; - -pub const PF_UNSPEC: c_int = AF_UNSPEC; -pub const PF_INET: c_int = AF_INET; -pub const PF_ROUTE: c_int = AF_ROUTE; -pub const PF_LINK: c_int = AF_LINK; -pub const PF_INET6: c_int = AF_INET6; -pub const PF_LOCAL: c_int = AF_LOCAL; -pub const PF_UNIX: c_int = AF_UNIX; -pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; - -pub const IP_OPTIONS: c_int = 1; -pub const IP_HDRINCL: c_int = 2; -pub const IP_TOS: c_int = 3; -pub const IP_TTL: c_int = 4; -pub const IP_RECVOPTS: c_int = 5; -pub const IP_RECVRETOPTS: c_int = 6; -pub const IP_RECVDSTADDR: c_int = 7; -pub const IP_RETOPTS: c_int = 8; -pub const IP_MULTICAST_IF: c_int = 9; -pub const IP_MULTICAST_TTL: c_int = 10; -pub const IP_MULTICAST_LOOP: c_int = 11; -pub const IP_ADD_MEMBERSHIP: c_int = 12; -pub const IP_DROP_MEMBERSHIP: c_int = 13; -pub const IP_BLOCK_SOURCE: c_int = 14; -pub const IP_UNBLOCK_SOURCE: c_int = 15; -pub const IP_ADD_SOURCE_MEMBERSHIP: c_int = 16; -pub const IP_DROP_SOURCE_MEMBERSHIP: c_int = 17; - -pub const TCP_NODELAY: c_int = 0x01; -pub const TCP_MAXSEG: c_int = 0x02; -pub const TCP_NOPUSH: c_int = 0x04; -pub const TCP_NOOPT: c_int = 0x08; - -pub const IF_NAMESIZE: size_t = 32; -pub const IFNAMSIZ: size_t = IF_NAMESIZE; - -pub const IPV6_MULTICAST_IF: c_int = 24; -pub const IPV6_MULTICAST_HOPS: c_int = 25; -pub const IPV6_MULTICAST_LOOP: c_int = 26; -pub const IPV6_UNICAST_HOPS: c_int = 27; -pub const IPV6_JOIN_GROUP: c_int = 28; -pub const IPV6_LEAVE_GROUP: c_int = 29; -pub const IPV6_V6ONLY: c_int = 30; -pub const IPV6_PKTINFO: c_int = 31; -pub const IPV6_RECVPKTINFO: c_int = 32; -pub const IPV6_HOPLIMIT: c_int = 33; -pub const IPV6_RECVHOPLIMIT: c_int = 34; -pub const IPV6_HOPOPTS: c_int = 35; -pub const IPV6_DSTOPTS: c_int = 36; -pub const IPV6_RTHDR: c_int = 37; - -pub const MSG_OOB: c_int = 0x0001; -pub const MSG_PEEK: c_int = 0x0002; -pub const MSG_DONTROUTE: c_int = 0x0004; -pub const MSG_EOR: c_int = 0x0008; -pub const MSG_TRUNC: c_int = 0x0010; -pub const MSG_CTRUNC: c_int = 0x0020; -pub const MSG_WAITALL: c_int = 0x0040; -pub const MSG_DONTWAIT: c_int = 0x0080; -pub const MSG_BCAST: c_int = 0x0100; -pub const MSG_MCAST: c_int = 0x0200; -pub const MSG_EOF: c_int = 0x0400; -pub const MSG_NOSIGNAL: c_int = 0x0800; - -pub const SHUT_RD: c_int = 0; -pub const SHUT_WR: c_int = 1; -pub const SHUT_RDWR: c_int = 2; - -pub const LOCK_SH: c_int = 0x01; -pub const LOCK_EX: c_int = 0x02; -pub const LOCK_NB: c_int = 0x04; -pub const LOCK_UN: c_int = 0x08; - -pub const MINSIGSTKSZ: size_t = 8192; -pub const SIGSTKSZ: size_t = 16384; - -pub const IOV_MAX: c_int = 1024; -pub const PATH_MAX: c_int = 1024; - -pub const SA_NOCLDSTOP: c_int = 0x01; -pub const SA_NOCLDWAIT: c_int = 0x02; -pub const SA_RESETHAND: c_int = 0x04; -pub const SA_NODEFER: c_int = 0x08; -pub const SA_RESTART: c_int = 0x10; -pub const SA_ONSTACK: c_int = 0x20; -pub const SA_SIGINFO: c_int = 0x40; -pub const SA_NOMASK: c_int = SA_NODEFER; -pub const SA_STACK: c_int = SA_ONSTACK; -pub const SA_ONESHOT: c_int = SA_RESETHAND; - -pub const SS_ONSTACK: c_int = 0x1; -pub const SS_DISABLE: c_int = 0x2; - -// DIFF(main): changed to `c_int` in 500365e1 -pub const FD_SETSIZE: usize = 1024; - -pub const RTLD_LOCAL: c_int = 0x0; -pub const RTLD_NOW: c_int = 0x1; -pub const RTLD_GLOBAL: c_int = 0x2; -pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); - -pub const BUFSIZ: c_uint = 8192; -pub const FILENAME_MAX: c_uint = 256; -pub const FOPEN_MAX: c_uint = 128; -pub const L_tmpnam: c_uint = 512; -pub const TMP_MAX: c_uint = 32768; - -pub const _PC_CHOWN_RESTRICTED: c_int = 1; -pub const _PC_MAX_CANON: c_int = 2; -pub const _PC_MAX_INPUT: c_int = 3; -pub const _PC_NAME_MAX: c_int = 4; -pub const _PC_NO_TRUNC: c_int = 5; -pub const _PC_PATH_MAX: c_int = 6; -pub const _PC_PIPE_BUF: c_int = 7; -pub const _PC_VDISABLE: c_int = 8; -pub const _PC_LINK_MAX: c_int = 25; -pub const _PC_SYNC_IO: c_int = 26; -pub const _PC_ASYNC_IO: c_int = 27; -pub const _PC_PRIO_IO: c_int = 28; -pub const _PC_SOCK_MAXBUF: c_int = 29; -pub const _PC_FILESIZEBITS: c_int = 30; -pub const _PC_REC_INCR_XFER_SIZE: c_int = 31; -pub const _PC_REC_MAX_XFER_SIZE: c_int = 32; -pub const _PC_REC_MIN_XFER_SIZE: c_int = 33; -pub const _PC_REC_XFER_ALIGN: c_int = 34; -pub const _PC_ALLOC_SIZE_MIN: c_int = 35; -pub const _PC_SYMLINK_MAX: c_int = 36; -pub const _PC_2_SYMLINKS: c_int = 37; -pub const _PC_XATTR_EXISTS: c_int = 38; -pub const _PC_XATTR_ENABLED: c_int = 39; - -pub const FIONBIO: c_ulong = 0xbe000000; -pub const FIONREAD: c_ulong = 0xbe000001; -pub const FIOSEEKDATA: c_ulong = 0xbe000002; -pub const FIOSEEKHOLE: c_ulong = 0xbe000003; - -pub const _SC_ARG_MAX: c_int = 15; -pub const _SC_CHILD_MAX: c_int = 16; -pub const _SC_CLK_TCK: c_int = 17; -pub const _SC_JOB_CONTROL: c_int = 18; -pub const _SC_NGROUPS_MAX: c_int = 19; -pub const _SC_OPEN_MAX: c_int = 20; -pub const _SC_SAVED_IDS: c_int = 21; -pub const _SC_STREAM_MAX: c_int = 22; -pub const _SC_TZNAME_MAX: c_int = 23; -pub const _SC_VERSION: c_int = 24; -pub const _SC_GETGR_R_SIZE_MAX: c_int = 25; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 26; -pub const _SC_PAGESIZE: c_int = 27; -pub const _SC_PAGE_SIZE: c_int = 27; -pub const _SC_SEM_NSEMS_MAX: c_int = 28; -pub const _SC_SEM_VALUE_MAX: c_int = 29; -pub const _SC_SEMAPHORES: c_int = 30; -pub const _SC_THREADS: c_int = 31; -pub const _SC_IOV_MAX: c_int = 32; -pub const _SC_UIO_MAXIOV: c_int = 32; -pub const _SC_NPROCESSORS_CONF: c_int = 34; -pub const _SC_NPROCESSORS_ONLN: c_int = 35; -pub const _SC_ATEXIT_MAX: c_int = 37; -pub const _SC_PASS_MAX: c_int = 39; -pub const _SC_PHYS_PAGES: c_int = 40; -pub const _SC_AVPHYS_PAGES: c_int = 41; -pub const _SC_PIPE: c_int = 42; -pub const _SC_SELECT: c_int = 43; -pub const _SC_POLL: c_int = 44; -pub const _SC_MAPPED_FILES: c_int = 45; -pub const _SC_THREAD_PROCESS_SHARED: c_int = 46; -pub const _SC_THREAD_STACK_MIN: c_int = 47; -pub const _SC_THREAD_ATTR_STACKADDR: c_int = 48; -pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 49; -pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 50; -pub const _SC_REALTIME_SIGNALS: c_int = 51; -pub const _SC_MEMORY_PROTECTION: c_int = 52; -pub const _SC_SIGQUEUE_MAX: c_int = 53; -pub const _SC_RTSIG_MAX: c_int = 54; -pub const _SC_MONOTONIC_CLOCK: c_int = 55; -pub const _SC_DELAYTIMER_MAX: c_int = 56; -pub const _SC_TIMER_MAX: c_int = 57; -pub const _SC_TIMERS: c_int = 58; -pub const _SC_CPUTIME: c_int = 59; -pub const _SC_THREAD_CPUTIME: c_int = 60; -pub const _SC_HOST_NAME_MAX: c_int = 61; -pub const _SC_REGEXP: c_int = 62; -pub const _SC_SYMLOOP_MAX: c_int = 63; -pub const _SC_SHELL: c_int = 64; -pub const _SC_TTY_NAME_MAX: c_int = 65; -pub const _SC_ADVISORY_INFO: c_int = 66; -pub const _SC_BARRIERS: c_int = 67; -pub const _SC_CLOCK_SELECTION: c_int = 68; -pub const _SC_FSYNC: c_int = 69; -pub const _SC_IPV6: c_int = 70; -pub const _SC_MEMLOCK: c_int = 71; -pub const _SC_MEMLOCK_RANGE: c_int = 72; -pub const _SC_MESSAGE_PASSING: c_int = 73; -pub const _SC_PRIORITIZED_IO: c_int = 74; -pub const _SC_PRIORITY_SCHEDULING: c_int = 75; -pub const _SC_READER_WRITER_LOCKS: c_int = 76; -pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 77; -pub const _SC_SPAWN: c_int = 78; -pub const _SC_SPIN_LOCKS: c_int = 79; -pub const _SC_SPORADIC_SERVER: c_int = 80; -pub const _SC_SYNCHRONIZED_IO: c_int = 81; -pub const _SC_THREAD_PRIO_INHERIT: c_int = 82; -pub const _SC_THREAD_PRIO_PROTECT: c_int = 83; -pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 84; -pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 85; -pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 86; -pub const _SC_THREAD_SPORADIC_SERVER: c_int = 87; -pub const _SC_TIMEOUTS: c_int = 88; -pub const _SC_TRACE: c_int = 89; -pub const _SC_TRACE_EVENT_FILTER: c_int = 90; -pub const _SC_TRACE_INHERIT: c_int = 91; -pub const _SC_TRACE_LOG: c_int = 92; -pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 93; -pub const _SC_V6_ILP32_OFF32: c_int = 94; -pub const _SC_V6_ILP32_OFFBIG: c_int = 95; -pub const _SC_V6_LP64_OFF64: c_int = 96; -pub const _SC_V6_LPBIG_OFFBIG: c_int = 97; -pub const _SC_V7_ILP32_OFF32: c_int = 98; -pub const _SC_V7_ILP32_OFFBIG: c_int = 99; -pub const _SC_V7_LP64_OFF64: c_int = 100; -pub const _SC_V7_LPBIG_OFFBIG: c_int = 101; -pub const _SC_2_C_BIND: c_int = 102; -pub const _SC_2_C_DEV: c_int = 103; -pub const _SC_2_CHAR_TERM: c_int = 104; -pub const _SC_2_FORT_DEV: c_int = 105; -pub const _SC_2_FORT_RUN: c_int = 106; -pub const _SC_2_LOCALEDEF: c_int = 107; -pub const _SC_2_PBS: c_int = 108; -pub const _SC_2_PBS_ACCOUNTING: c_int = 109; -pub const _SC_2_PBS_CHECKPOINT: c_int = 110; -pub const _SC_2_PBS_LOCATE: c_int = 111; -pub const _SC_2_PBS_MESSAGE: c_int = 112; -pub const _SC_2_PBS_TRACK: c_int = 113; -pub const _SC_2_SW_DEV: c_int = 114; -pub const _SC_2_UPE: c_int = 115; -pub const _SC_2_VERSION: c_int = 116; -pub const _SC_XOPEN_CRYPT: c_int = 117; -pub const _SC_XOPEN_ENH_I18N: c_int = 118; -pub const _SC_XOPEN_REALTIME: c_int = 119; -pub const _SC_XOPEN_REALTIME_THREADS: c_int = 120; -pub const _SC_XOPEN_SHM: c_int = 121; -pub const _SC_XOPEN_STREAMS: c_int = 122; -pub const _SC_XOPEN_UNIX: c_int = 123; -pub const _SC_XOPEN_UUCP: c_int = 124; -pub const _SC_XOPEN_VERSION: c_int = 125; -pub const _SC_BC_BASE_MAX: c_int = 129; -pub const _SC_BC_DIM_MAX: c_int = 130; -pub const _SC_BC_SCALE_MAX: c_int = 131; -pub const _SC_BC_STRING_MAX: c_int = 132; -pub const _SC_COLL_WEIGHTS_MAX: c_int = 133; -pub const _SC_EXPR_NEST_MAX: c_int = 134; -pub const _SC_LINE_MAX: c_int = 135; -pub const _SC_LOGIN_NAME_MAX: c_int = 136; -pub const _SC_MQ_OPEN_MAX: c_int = 137; -pub const _SC_MQ_PRIO_MAX: c_int = 138; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 139; -pub const _SC_THREAD_KEYS_MAX: c_int = 140; -pub const _SC_THREAD_THREADS_MAX: c_int = 141; -pub const _SC_RE_DUP_MAX: c_int = 142; - -pub const PTHREAD_STACK_MIN: size_t = 8192; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - flags: 0, - lock: 0, - unused: -42, - owner: -1, - owner_count: 0, -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - flags: 0, - unused: -42, - mutex: 0 as *mut _, - waiter_count: 0, - lock: 0, -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - flags: 0, - owner: -1, - lock_sem: 0, - lock_count: 0, - reader_count: 0, - writer_count: 0, - waiters: [0 as *mut _; 2], -}; - -pub const PTHREAD_MUTEX_DEFAULT: c_int = 0; -pub const PTHREAD_MUTEX_NORMAL: c_int = 1; -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 3; - -pub const FIOCLEX: c_ulong = 0; // FIXME(haiku): does not exist on Haiku! - -pub const RUSAGE_CHILDREN: c_int = -1; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; -pub const SOCK_RAW: c_int = 3; -pub const SOCK_SEQPACKET: c_int = 5; -pub const SOCK_NONBLOCK: c_int = 0x00040000; -pub const SOCK_CLOEXEC: c_int = 0x00080000; - -pub const SOL_SOCKET: c_int = -1; -pub const SO_ACCEPTCONN: c_int = 0x00000001; -pub const SO_BROADCAST: c_int = 0x00000002; -pub const SO_DEBUG: c_int = 0x00000004; -pub const SO_DONTROUTE: c_int = 0x00000008; -pub const SO_KEEPALIVE: c_int = 0x00000010; -pub const SO_OOBINLINE: c_int = 0x00000020; -pub const SO_REUSEADDR: c_int = 0x00000040; -pub const SO_REUSEPORT: c_int = 0x00000080; -pub const SO_USELOOPBACK: c_int = 0x00000100; -pub const SO_LINGER: c_int = 0x00000200; -pub const SO_SNDBUF: c_int = 0x40000001; -pub const SO_SNDLOWAT: c_int = 0x40000002; -pub const SO_SNDTIMEO: c_int = 0x40000003; -pub const SO_RCVBUF: c_int = 0x40000004; -pub const SO_RCVLOWAT: c_int = 0x40000005; -pub const SO_RCVTIMEO: c_int = 0x40000006; -pub const SO_ERROR: c_int = 0x40000007; -pub const SO_TYPE: c_int = 0x40000008; -pub const SO_NONBLOCK: c_int = 0x40000009; -pub const SO_BINDTODEVICE: c_int = 0x4000000a; -pub const SO_PEERCRED: c_int = 0x4000000b; - -pub const SCM_RIGHTS: c_int = 0x01; - -pub const SOMAXCONN: c_int = 32; - -pub const NI_MAXHOST: size_t = 1025; - -pub const WNOHANG: c_int = 0x01; -pub const WUNTRACED: c_int = 0x02; -pub const WCONTINUED: c_int = 0x04; -pub const WEXITED: c_int = 0x08; -pub const WSTOPPED: c_int = 0x10; -pub const WNOWAIT: c_int = 0x20; - -// si_code values for SIGBUS signal -pub const BUS_ADRALN: c_int = 40; -pub const BUS_ADRERR: c_int = 41; -pub const BUS_OBJERR: c_int = 42; - -// si_code values for SIGCHLD signal -pub const CLD_EXITED: c_int = 60; -pub const CLD_KILLED: c_int = 61; -pub const CLD_DUMPED: c_int = 62; -pub const CLD_TRAPPED: c_int = 63; -pub const CLD_STOPPED: c_int = 64; -pub const CLD_CONTINUED: c_int = 65; - -pub const P_ALL: idtype_t = 0; -pub const P_PID: idtype_t = 1; -pub const P_PGID: idtype_t = 2; - -pub const UTIME_OMIT: c_long = 1000000001; -pub const UTIME_NOW: c_long = 1000000000; - -pub const VINTR: usize = 0; -pub const VQUIT: usize = 1; -pub const VERASE: usize = 2; -pub const VKILL: usize = 3; -pub const VEOF: usize = 4; -pub const VEOL: usize = 5; -pub const VMIN: usize = 4; -pub const VTIME: usize = 5; -pub const VEOL2: usize = 6; -pub const VSWTCH: usize = 7; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VSUSP: usize = 10; - -pub const IGNBRK: crate::tcflag_t = 0x01; -pub const BRKINT: crate::tcflag_t = 0x02; -pub const IGNPAR: crate::tcflag_t = 0x04; -pub const PARMRK: crate::tcflag_t = 0x08; -pub const INPCK: crate::tcflag_t = 0x10; -pub const ISTRIP: crate::tcflag_t = 0x20; -pub const INLCR: crate::tcflag_t = 0x40; -pub const IGNCR: crate::tcflag_t = 0x80; -pub const ICRNL: crate::tcflag_t = 0x100; -pub const IUCLC: crate::tcflag_t = 0x200; -pub const IXON: crate::tcflag_t = 0x400; -pub const IXANY: crate::tcflag_t = 0x800; -pub const IXOFF: crate::tcflag_t = 0x1000; - -pub const OPOST: crate::tcflag_t = 0x00000001; -pub const OLCUC: crate::tcflag_t = 0x00000002; -pub const ONLCR: crate::tcflag_t = 0x00000004; -pub const OCRNL: crate::tcflag_t = 0x00000008; -pub const ONOCR: crate::tcflag_t = 0x00000010; -pub const ONLRET: crate::tcflag_t = 0x00000020; -pub const OFILL: crate::tcflag_t = 0x00000040; -pub const OFDEL: crate::tcflag_t = 0x00000080; -pub const NLDLY: crate::tcflag_t = 0x00000100; -pub const NL0: crate::tcflag_t = 0x00000000; -pub const NL1: crate::tcflag_t = 0x00000100; -pub const CRDLY: crate::tcflag_t = 0x00000600; -pub const CR0: crate::tcflag_t = 0x00000000; -pub const CR1: crate::tcflag_t = 0x00000200; -pub const CR2: crate::tcflag_t = 0x00000400; -pub const CR3: crate::tcflag_t = 0x00000600; -pub const TABDLY: crate::tcflag_t = 0x00001800; -pub const TAB0: crate::tcflag_t = 0x00000000; -pub const TAB1: crate::tcflag_t = 0x00000800; -pub const TAB2: crate::tcflag_t = 0x00001000; -pub const TAB3: crate::tcflag_t = 0x00001800; -pub const BSDLY: crate::tcflag_t = 0x00002000; -pub const BS0: crate::tcflag_t = 0x00000000; -pub const BS1: crate::tcflag_t = 0x00002000; -pub const VTDLY: crate::tcflag_t = 0x00004000; -pub const VT0: crate::tcflag_t = 0x00000000; -pub const VT1: crate::tcflag_t = 0x00004000; -pub const FFDLY: crate::tcflag_t = 0x00008000; -pub const FF0: crate::tcflag_t = 0x00000000; -pub const FF1: crate::tcflag_t = 0x00008000; - -pub const CSIZE: crate::tcflag_t = 0x00000020; -pub const CS5: crate::tcflag_t = 0x00000000; -pub const CS6: crate::tcflag_t = 0x00000000; -pub const CS7: crate::tcflag_t = 0x00000000; -pub const CS8: crate::tcflag_t = 0x00000020; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const XLOBLK: crate::tcflag_t = 0x00001000; -pub const CTSFLOW: crate::tcflag_t = 0x00002000; -pub const RTSFLOW: crate::tcflag_t = 0x00004000; -pub const CRTSCTS: crate::tcflag_t = RTSFLOW | CTSFLOW; - -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const XCASE: crate::tcflag_t = 0x00000004; -pub const ECHO: crate::tcflag_t = 0x00000008; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const IEXTEN: crate::tcflag_t = 0x00000200; -pub const ECHOCTL: crate::tcflag_t = 0x00000400; -pub const ECHOPRT: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00001000; -pub const FLUSHO: crate::tcflag_t = 0x00002000; -pub const PENDIN: crate::tcflag_t = 0x00004000; - -pub const TCGB_CTS: c_int = 0x01; -pub const TCGB_DSR: c_int = 0x02; -pub const TCGB_RI: c_int = 0x04; -pub const TCGB_DCD: c_int = 0x08; -pub const TIOCM_CTS: c_int = TCGB_CTS; -pub const TIOCM_CD: c_int = TCGB_DCD; -pub const TIOCM_CAR: c_int = TCGB_DCD; -pub const TIOCM_RI: c_int = TCGB_RI; -pub const TIOCM_RNG: c_int = TCGB_RI; -pub const TIOCM_DSR: c_int = TCGB_DSR; -pub const TIOCM_DTR: c_int = 0x10; -pub const TIOCM_RTS: c_int = 0x20; - -pub const B0: speed_t = 0x00; -pub const B50: speed_t = 0x01; -pub const B75: speed_t = 0x02; -pub const B110: speed_t = 0x03; -pub const B134: speed_t = 0x04; -pub const B150: speed_t = 0x05; -pub const B200: speed_t = 0x06; -pub const B300: speed_t = 0x07; -pub const B600: speed_t = 0x08; -pub const B1200: speed_t = 0x09; -pub const B1800: speed_t = 0x0A; -pub const B2400: speed_t = 0x0B; -pub const B4800: speed_t = 0x0C; -pub const B9600: speed_t = 0x0D; -pub const B19200: speed_t = 0x0E; -pub const B38400: speed_t = 0x0F; -pub const B57600: speed_t = 0x10; -pub const B115200: speed_t = 0x11; -pub const B230400: speed_t = 0x12; -pub const B31250: speed_t = 0x13; - -pub const TCSANOW: c_int = 0x01; -pub const TCSADRAIN: c_int = 0x02; -pub const TCSAFLUSH: c_int = 0x04; - -pub const TCOOFF: c_int = 0x01; -pub const TCOON: c_int = 0x02; -pub const TCIOFF: c_int = 0x04; -pub const TCION: c_int = 0x08; - -pub const TCIFLUSH: c_int = 0x01; -pub const TCOFLUSH: c_int = 0x02; -pub const TCIOFLUSH: c_int = 0x03; - -pub const TCGETA: c_ulong = 0x8000; -pub const TCSETA: c_ulong = TCGETA + 1; -pub const TCSETAF: c_ulong = TCGETA + 2; -pub const TCSETAW: c_ulong = TCGETA + 3; -pub const TCSBRK: c_ulong = TCGETA + 5; -pub const TCFLSH: c_ulong = TCGETA + 6; -pub const TCXONC: c_ulong = TCGETA + 7; -pub const TCGETBITS: c_ulong = TCGETA + 9; -pub const TCSETDTR: c_ulong = TCGETA + 10; -pub const TCSETRTS: c_ulong = TCGETA + 11; -pub const TIOCGWINSZ: c_ulong = TCGETA + 12; -pub const TIOCSWINSZ: c_ulong = TCGETA + 13; -pub const TIOCGPGRP: c_ulong = TCGETA + 15; -pub const TIOCSPGRP: c_ulong = TCGETA + 16; -pub const TIOCSCTTY: c_ulong = TCGETA + 17; -pub const TIOCMGET: c_ulong = TCGETA + 18; -pub const TIOCMSET: c_ulong = TCGETA + 19; -pub const TIOCSBRK: c_ulong = TCGETA + 20; -pub const TIOCCBRK: c_ulong = TCGETA + 21; -pub const TIOCMBIS: c_ulong = TCGETA + 22; -pub const TIOCMBIC: c_ulong = TCGETA + 23; -pub const TIOCGSID: c_ulong = TCGETA + 24; -pub const TIOCOUTQ: c_ulong = TCGETA + 25; -pub const TIOCEXCL: c_ulong = TCGETA + 26; -pub const TIOCNXCL: c_ulong = TCGETA + 27; - -pub const PRIO_PROCESS: c_int = 0; -pub const PRIO_PGRP: c_int = 1; -pub const PRIO_USER: c_int = 2; - -// utmpx entry types -pub const EMPTY: c_short = 0; -pub const BOOT_TIME: c_short = 1; -pub const OLD_TIME: c_short = 2; -pub const NEW_TIME: c_short = 3; -pub const USER_PROCESS: c_short = 4; -pub const INIT_PROCESS: c_short = 5; -pub const LOGIN_PROCESS: c_short = 6; -pub const DEAD_PROCESS: c_short = 7; - -pub const LOG_PID: c_int = 1 << 12; -pub const LOG_CONS: c_int = 2 << 12; -pub const LOG_ODELAY: c_int = 4 << 12; -pub const LOG_NDELAY: c_int = 8 << 12; -pub const LOG_SERIAL: c_int = 16 << 12; -pub const LOG_PERROR: c_int = 32 << 12; -pub const LOG_NOWAIT: c_int = 64 << 12; - -// spawn.h -// DIFF(main): changed to `c_short` in f62eb023ab -pub const POSIX_SPAWN_RESETIDS: c_int = 0x01; -pub const POSIX_SPAWN_SETPGROUP: c_int = 0x02; -pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x10; -pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x20; -pub const POSIX_SPAWN_SETSID: c_int = 0x40; - -const fn CMSG_ALIGN(len: usize) -> usize { - len + size_of::() - 1 & !(size_of::() - 1) -} - -f! { - pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr { - if (*mhdr).msg_controllen as usize >= size_of::() { - (*mhdr).msg_control as *mut cmsghdr - } else { - core::ptr::null_mut::() - } - } - - pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { - (cmsg as *mut c_uchar).offset(CMSG_ALIGN(size_of::()) as isize) - } - - pub const fn CMSG_SPACE(length: c_uint) -> c_uint { - (CMSG_ALIGN(length as usize) + CMSG_ALIGN(size_of::())) as c_uint - } - - pub const fn CMSG_LEN(length: c_uint) -> c_uint { - CMSG_ALIGN(size_of::()) as c_uint + length - } - - pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - if cmsg.is_null() { - return crate::CMSG_FIRSTHDR(mhdr); - } - let next = cmsg as usize - + CMSG_ALIGN((*cmsg).cmsg_len as usize) - + CMSG_ALIGN(size_of::()); - let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; - if next > max { - core::ptr::null_mut::() - } else { - (cmsg as usize + CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr - } - } - - pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] &= !(1 << (fd % size)); - return; - } - - pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0; - } - - pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] |= 1 << (fd % size); - return; - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - for slot in (*set).fds_bits.iter_mut() { - *slot = 0; - } - } -} - -safe_f! { - pub const fn WIFEXITED(status: c_int) -> bool { - (status & !0xff) == 0 - } - - pub const fn WEXITSTATUS(status: c_int) -> c_int { - status & 0xff - } - - pub const fn WIFSIGNALED(status: c_int) -> bool { - ((status >> 8) & 0xff) != 0 - } - - pub const fn WTERMSIG(status: c_int) -> c_int { - (status >> 8) & 0xff - } - - pub const fn WIFSTOPPED(status: c_int) -> bool { - ((status >> 16) & 0xff) != 0 - } - - pub const fn WSTOPSIG(status: c_int) -> c_int { - (status >> 16) & 0xff - } - - // actually WIFCORED, but this is used everywhere else - pub const fn WCOREDUMP(status: c_int) -> bool { - (status & 0x10000) != 0 - } - - pub const fn WIFCONTINUED(status: c_int) -> bool { - (status & 0x20000) != 0 - } -} - -extern "C" { - pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; - pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; - pub fn getpriority(which: c_int, who: id_t) -> c_int; - pub fn setpriority(which: c_int, who: id_t, priority: c_int) -> c_int; - - pub fn endusershell(); - pub fn getpass(prompt: *const c_char) -> *mut c_char; - pub fn getusershell() -> *mut c_char; - pub fn issetugid() -> c_int; - pub fn setusershell(); - - pub fn utimensat( - fd: c_int, - path: *const c_char, - times: *const crate::timespec, - flag: c_int, - ) -> c_int; - pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; - pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - pub fn _errnop() -> *mut c_int; - - pub fn abs(i: c_int) -> c_int; - pub fn labs(i: c_long) -> c_long; - pub fn rand() -> c_int; - pub fn srand(seed: c_uint); - pub fn getifaddrs(ifap: *mut *mut crate::ifaddrs) -> c_int; - pub fn freeifaddrs(ifa: *mut crate::ifaddrs); - pub fn ppoll( - fds: *mut crate::pollfd, - numfds: crate::nfds_t, - timeout: *const crate::timespec, - sigMask: *const sigset_t, - ) -> c_int; - - pub fn getspent() -> *mut spwd; - pub fn getspent_r( - pwd: *mut spwd, - buf: *mut c_char, - bufferSize: size_t, - res: *mut *mut spwd, - ) -> c_int; - pub fn setspent(); - pub fn endspent(); - pub fn getspnam(name: *const c_char) -> *mut spwd; - pub fn getspnam_r( - name: *const c_char, - spwd: *mut spwd, - buffer: *mut c_char, - bufferSize: size_t, - res: *mut *mut spwd, - ) -> c_int; - pub fn sgetspent(line: *const c_char) -> *mut spwd; - pub fn sgetspent_r( - line: *const c_char, - spwd: *mut spwd, - buffer: *mut c_char, - bufferSize: size_t, - res: *mut *mut spwd, - ) -> c_int; - pub fn fgetspent(file: *mut crate::FILE) -> *mut spwd; - pub fn fgetspent_r( - file: *mut crate::FILE, - spwd: *mut spwd, - buffer: *mut c_char, - bufferSize: size_t, - res: *mut *mut spwd, - ) -> c_int; - pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; - pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; - pub fn sem_destroy(sem: *mut sem_t) -> c_int; - pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; - - pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn clock_settime(clk_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; - pub fn clock_getcpuclockid(pid: crate::pid_t, clk_id: *mut crate::clockid_t) -> c_int; - pub fn pthread_create( - thread: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - f: extern "C" fn(*mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - pub fn pthread_attr_getguardsize( - attr: *const crate::pthread_attr_t, - guardsize: *mut size_t, - ) -> c_int; - pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; - pub fn pthread_attr_getstack( - attr: *const crate::pthread_attr_t, - stackaddr: *mut *mut c_void, - stacksize: *mut size_t, - ) -> c_int; - pub fn pthread_condattr_getclock( - attr: *const pthread_condattr_t, - clock_id: *mut clockid_t, - ) -> c_int; - pub fn pthread_condattr_setclock( - attr: *mut pthread_condattr_t, - clock_id: crate::clockid_t, - ) -> c_int; - pub fn valloc(numBytes: size_t) -> *mut c_void; - pub fn malloc_usable_size(ptr: *mut c_void) -> size_t; - pub fn memalign(align: size_t, size: size_t) -> *mut c_void; - pub fn setgroups(ngroups: c_int, ptr: *const crate::gid_t) -> c_int; - pub fn initgroups(name: *const c_char, basegid: crate::gid_t) -> c_int; - pub fn ioctl(fd: c_int, request: c_ulong, ...) -> c_int; - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn dirfd(dirp: *mut crate::DIR) -> c_int; - pub fn getnameinfo( - sa: *const crate::sockaddr, - salen: crate::socklen_t, - host: *mut c_char, - hostlen: crate::socklen_t, - serv: *mut c_char, - servlen: crate::socklen_t, - flags: c_int, - ) -> c_int; - pub fn pthread_mutex_timedlock( - lock: *mut pthread_mutex_t, - abstime: *const crate::timespec, - ) -> c_int; - pub fn pthread_sigqueue(thread: crate::pthread_t, sig: c_int, value: crate::sigval) -> c_int; - pub fn pthread_spin_init(lock: *mut crate::pthread_spinlock_t, pshared: c_int) -> c_int; - pub fn pthread_spin_destroy(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_spin_lock(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_spin_trylock(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_spin_unlock(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn waitid( - idtype: idtype_t, - id: id_t, - infop: *mut crate::siginfo_t, - options: c_int, - ) -> c_int; - - pub fn glob( - pattern: *const c_char, - flags: c_int, - errfunc: Option c_int>, - pglob: *mut crate::glob_t, - ) -> c_int; - pub fn globfree(pglob: *mut crate::glob_t); - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; - pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advice: c_int) -> c_int; - pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; - - pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; - pub fn shm_unlink(name: *const c_char) -> c_int; - - pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); - - pub fn telldir(dirp: *mut crate::DIR) -> c_long; - pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - - pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; - - pub fn recvfrom( - socket: c_int, - buf: *mut c_void, - len: size_t, - flags: c_int, - addr: *mut crate::sockaddr, - addrlen: *mut crate::socklen_t, - ) -> ssize_t; - pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; - - pub fn bind( - socket: c_int, - address: *const crate::sockaddr, - address_len: crate::socklen_t, - ) -> c_int; - - pub fn accept4( - socket: c_int, - address: *mut crate::sockaddr, - addressLength: *mut crate::socklen_t, - flags: c_int, - ) -> c_int; - - pub fn writev(fd: c_int, iov: *const crate::iovec, count: c_int) -> ssize_t; - pub fn readv(fd: c_int, iov: *const crate::iovec, count: c_int) -> ssize_t; - - pub fn sendmsg(fd: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; - pub fn recvmsg(fd: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; - - // DIFF(main): changed to `*const *mut` in e77f551de9 - pub fn execvpe( - file: *const c_char, - argv: *const *const c_char, - environment: *const *const c_char, - ) -> c_int; - - pub fn getgrgid_r( - gid: crate::gid_t, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn getgrouplist( - user: *const c_char, - basegroup: crate::gid_t, - grouplist: *mut crate::gid_t, - groupcount: *mut c_int, - ) -> c_int; - pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; - pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; - pub fn sem_close(sem: *mut sem_t) -> c_int; - pub fn getdtablesize() -> c_int; - pub fn getgrnam_r( - name: *const c_char, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; - pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; - pub fn getgrnam(name: *const c_char) -> *mut crate::group; - pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; - pub fn sem_unlink(name: *const c_char) -> c_int; - pub fn getpwnam_r( - name: *const c_char, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - pub fn getpwuid_r( - uid: crate::uid_t, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - pub fn getpwent() -> *mut passwd; - pub fn setpwent(); - pub fn endpwent(); - pub fn endgrent(); - pub fn getgrent() -> *mut crate::group; - pub fn setgrent(); - pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; - pub fn pthread_atfork( - prepare: Option, - parent: Option, - child: Option, - ) -> c_int; - pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; - pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; - pub fn sethostname(name: *const c_char, len: size_t) -> c_int; - pub fn uname(buf: *mut crate::utsname) -> c_int; - pub fn getutxent() -> *mut utmpx; - pub fn getutxid(ut: *const utmpx) -> *mut utmpx; - pub fn getutxline(ut: *const utmpx) -> *mut utmpx; - pub fn pututxline(ut: *const utmpx) -> *mut utmpx; - pub fn setutxent(); - pub fn endutxent(); - pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; - - pub fn sigtimedwait( - set: *const sigset_t, - info: *mut siginfo_t, - timeout: *const crate::timespec, - ) -> c_int; - pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; - - pub fn getitimer(which: c_int, curr_value: *mut crate::itimerval) -> c_int; - pub fn setitimer( - which: c_int, - new_value: *const crate::itimerval, - old_value: *mut crate::itimerval, - ) -> c_int; - - pub fn regcomp(preg: *mut regex_t, pattern: *const c_char, cflags: c_int) -> c_int; - - pub fn regexec( - preg: *const regex_t, - input: *const c_char, - nmatch: size_t, - pmatch: *mut regmatch_t, - eflags: c_int, - ) -> c_int; - - pub fn regerror( - errcode: c_int, - preg: *const regex_t, - errbuf: *mut c_char, - errbuf_size: size_t, - ) -> size_t; - - pub fn regfree(preg: *mut regex_t); - - pub fn msgctl(msqid: c_int, cmd: c_int, buf: *mut msqid_ds) -> c_int; - pub fn msgget(key: crate::key_t, msgflg: c_int) -> c_int; - pub fn msgrcv( - msqid: c_int, - msgp: *mut c_void, - msgsz: size_t, - msgtype: c_long, - msgflg: c_int, - ) -> ssize_t; - pub fn msgsnd(msqid: c_int, msgp: *const c_void, msgsz: size_t, msgflg: c_int) -> c_int; - pub fn semget(key: crate::key_t, nsems: c_int, semflg: c_int) -> c_int; - pub fn semctl(semid: c_int, semnum: c_int, cmd: c_int, ...) -> c_int; - pub fn semop(semid: c_int, sops: *mut sembuf, nsops: size_t) -> c_int; - pub fn ftok(pathname: *const c_char, proj_id: c_int) -> crate::key_t; - - pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; - - pub fn lsearch( - key: *const c_void, - base: *mut c_void, - nelp: *mut size_t, - width: size_t, - compar: Option c_int>, - ) -> *mut c_void; - pub fn lfind( - key: *const c_void, - base: *const c_void, - nelp: *mut size_t, - width: size_t, - compar: Option c_int>, - ) -> *mut c_void; - pub fn hcreate(nelt: size_t) -> c_int; - pub fn hdestroy(); - pub fn hsearch(entry: crate::ENTRY, action: crate::ACTION) -> *mut crate::ENTRY; - - pub fn drand48() -> c_double; - pub fn erand48(xseed: *mut c_ushort) -> c_double; - pub fn lrand48() -> c_long; - pub fn nrand48(xseed: *mut c_ushort) -> c_long; - pub fn mrand48() -> c_long; - pub fn jrand48(xseed: *mut c_ushort) -> c_long; - pub fn srand48(seed: c_long); - pub fn seed48(xseed: *mut c_ushort) -> *mut c_ushort; - pub fn lcong48(p: *mut c_ushort); - - pub fn clearenv() -> c_int; - pub fn ctermid(s: *mut c_char) -> *mut c_char; - - pub fn sync(); - pub fn getpagesize() -> c_int; - - pub fn brk(addr: *mut c_void) -> c_int; - pub fn sbrk(increment: intptr_t) -> *mut c_void; - - pub fn posix_spawn( - pid: *mut crate::pid_t, - path: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnp( - pid: *mut crate::pid_t, - file: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - - pub fn posix_spawn_file_actions_init(file_actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_destroy(file_actions: *mut posix_spawn_file_actions_t) - -> c_int; - pub fn posix_spawn_file_actions_addopen( - file_actions: *mut posix_spawn_file_actions_t, - fildes: c_int, - path: *const c_char, - oflag: c_int, - mode: mode_t, - ) -> c_int; - pub fn posix_spawn_file_actions_addclose( - file_actions: *mut posix_spawn_file_actions_t, - fildes: c_int, - ) -> c_int; - pub fn posix_spawn_file_actions_adddup2( - file_actions: *mut posix_spawn_file_actions_t, - fildes: c_int, - newfildes: c_int, - ) -> c_int; - - pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, _flags: *mut c_short) -> c_int; - pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; - pub fn posix_spawnattr_getpgroup( - attr: *const posix_spawnattr_t, - _pgroup: *mut crate::pid_t, - ) -> c_int; - pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, pgroup: crate::pid_t) -> c_int; - pub fn posix_spawnattr_getsigdefault( - attr: *const posix_spawnattr_t, - sigdefault: *mut crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigdefault( - attr: *mut posix_spawnattr_t, - sigdefault: *const crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getsigmask( - attr: *const posix_spawnattr_t, - _sigmask: *mut crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigmask( - attr: *mut posix_spawnattr_t, - sigmask: *const crate::sigset_t, - ) -> c_int; - pub fn getopt_long( - argc: c_int, - argv: *const *mut c_char, - optstring: *const c_char, - longopts: *const option, - longindex: *mut c_int, - ) -> c_int; - pub fn strcasecmp_l( - string1: *const c_char, - string2: *const c_char, - locale: crate::locale_t, - ) -> c_int; - pub fn strncasecmp_l( - string1: *const c_char, - string2: *const c_char, - length: size_t, - locale: crate::locale_t, - ) -> c_int; - - pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; -} - -#[link(name = "gnu")] -extern "C" { - pub fn memmem( - source: *const c_void, - sourceLength: size_t, - search: *const c_void, - searchLength: size_t, - ) -> *mut c_void; - - pub fn pthread_getattr_np(thread: crate::pthread_t, attr: *mut crate::pthread_attr_t) -> c_int; - pub fn pthread_getname_np( - thread: crate::pthread_t, - buffer: *mut c_char, - length: size_t, - ) -> c_int; - pub fn pthread_setname_np(thread: crate::pthread_t, name: *const c_char) -> c_int; -} - -cfg_if! { - if #[cfg(target_pointer_width = "64")] { - mod b64; - pub use self::b64::*; - } else { - mod b32; - pub use self::b32::*; - } -} - -cfg_if! { - if #[cfg(target_arch = "x86")] { - // TODO - // mod x86; - // pub use self::x86::*; - } else if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } else if #[cfg(target_arch = "aarch64")] { - // TODO - // mod aarch64; - // pub use self::aarch64::*; - } -} - -mod bsd; -pub use self::bsd::*; - -mod native; -pub use self::native::*; diff --git a/vendor/libc/src/unix/haiku/native.rs b/vendor/libc/src/unix/haiku/native.rs deleted file mode 100644 index 13a203f92ff565..00000000000000 --- a/vendor/libc/src/unix/haiku/native.rs +++ /dev/null @@ -1,1388 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -// This file follows the Haiku API for Haiku R1 beta 5. It is organized by the -// C/C++ header files in which the concepts can be found, while adhering to the -// style guide for this crate. - -// Helper macro to generate u32 constants. The Haiku API uses (non-standard) -// multi-character constants (like 'UPDA' or 'MSGM') to represent 32 bit -// integer constants. - -macro_rules! haiku_constant { - ($a:tt, $b:tt, $c:tt, $d:tt) => { - (($a as u32) << 24) + (($b as u32) << 16) + (($c as u32) << 8) + ($d as u32) - }; -} - -// support/SupportDefs.h -pub type status_t = i32; -pub type bigtime_t = i64; -pub type nanotime_t = i64; -pub type type_code = u32; -pub type perform_code = u32; - -// kernel/OS.h -pub type area_id = i32; -pub type port_id = i32; -pub type sem_id = i32; -pub type team_id = i32; -pub type thread_id = i32; - -pub type thread_func = extern "C" fn(*mut c_void) -> status_t; - -// kernel/image.h -pub type image_id = i32; - -c_enum! { - // kernel/OS.h - pub enum thread_state { - B_THREAD_RUNNING = 1, - B_THREAD_READY, - B_THREAD_RECEIVING, - B_THREAD_ASLEEP, - B_THREAD_SUSPENDED, - B_THREAD_WAITING, - } - - // kernel/image.h - pub enum image_type { - B_APP_IMAGE = 1, - B_LIBRARY_IMAGE, - B_ADD_ON_IMAGE, - B_SYSTEM_IMAGE, - } - - // kernel/scheduler.h - - pub enum be_task_flags { - B_DEFAULT_MEDIA_PRIORITY = 0x000, - B_OFFLINE_PROCESSING = 0x001, - B_STATUS_RENDERING = 0x002, - B_USER_INPUT_HANDLING = 0x004, - B_LIVE_VIDEO_MANIPULATION = 0x008, - B_VIDEO_PLAYBACK = 0x010, - B_VIDEO_RECORDING = 0x020, - B_LIVE_AUDIO_MANIPULATION = 0x040, - B_AUDIO_PLAYBACK = 0x080, - B_AUDIO_RECORDING = 0x100, - B_LIVE_3D_RENDERING = 0x200, - B_NUMBER_CRUNCHING = 0x400, - B_MIDI_PROCESSING = 0x800, - } - - pub enum schduler_mode { - SCHEDULER_MODE_LOW_LATENCY, - SCHEDULER_MODE_POWER_SAVING, - } - - // FindDirectory.h - pub enum path_base_directory { - B_FIND_PATH_INSTALLATION_LOCATION_DIRECTORY, - B_FIND_PATH_ADD_ONS_DIRECTORY, - B_FIND_PATH_APPS_DIRECTORY, - B_FIND_PATH_BIN_DIRECTORY, - B_FIND_PATH_BOOT_DIRECTORY, - B_FIND_PATH_CACHE_DIRECTORY, - B_FIND_PATH_DATA_DIRECTORY, - B_FIND_PATH_DEVELOP_DIRECTORY, - B_FIND_PATH_DEVELOP_LIB_DIRECTORY, - B_FIND_PATH_DOCUMENTATION_DIRECTORY, - B_FIND_PATH_ETC_DIRECTORY, - B_FIND_PATH_FONTS_DIRECTORY, - B_FIND_PATH_HEADERS_DIRECTORY, - B_FIND_PATH_LIB_DIRECTORY, - B_FIND_PATH_LOG_DIRECTORY, - B_FIND_PATH_MEDIA_NODES_DIRECTORY, - B_FIND_PATH_PACKAGES_DIRECTORY, - B_FIND_PATH_PREFERENCES_DIRECTORY, - B_FIND_PATH_SERVERS_DIRECTORY, - B_FIND_PATH_SETTINGS_DIRECTORY, - B_FIND_PATH_SOUNDS_DIRECTORY, - B_FIND_PATH_SPOOL_DIRECTORY, - B_FIND_PATH_TRANSLATORS_DIRECTORY, - B_FIND_PATH_VAR_DIRECTORY, - B_FIND_PATH_IMAGE_PATH = 1000, - B_FIND_PATH_PACKAGE_PATH, - } - - pub enum directory_which { - B_DESKTOP_DIRECTORY = 0, - B_TRASH_DIRECTORY, - B_SYSTEM_DIRECTORY = 1000, - B_SYSTEM_ADDONS_DIRECTORY = 1002, - B_SYSTEM_BOOT_DIRECTORY, - B_SYSTEM_FONTS_DIRECTORY, - B_SYSTEM_LIB_DIRECTORY, - B_SYSTEM_SERVERS_DIRECTORY, - B_SYSTEM_APPS_DIRECTORY, - B_SYSTEM_BIN_DIRECTORY, - B_SYSTEM_DOCUMENTATION_DIRECTORY = 1010, - B_SYSTEM_PREFERENCES_DIRECTORY, - B_SYSTEM_TRANSLATORS_DIRECTORY, - B_SYSTEM_MEDIA_NODES_DIRECTORY, - B_SYSTEM_SOUNDS_DIRECTORY, - B_SYSTEM_DATA_DIRECTORY, - B_SYSTEM_DEVELOP_DIRECTORY, - B_SYSTEM_PACKAGES_DIRECTORY, - B_SYSTEM_HEADERS_DIRECTORY, - B_SYSTEM_ETC_DIRECTORY = 2008, - B_SYSTEM_SETTINGS_DIRECTORY = 2010, - B_SYSTEM_LOG_DIRECTORY = 2012, - B_SYSTEM_SPOOL_DIRECTORY, - B_SYSTEM_TEMP_DIRECTORY, - B_SYSTEM_VAR_DIRECTORY, - B_SYSTEM_CACHE_DIRECTORY = 2020, - B_SYSTEM_NONPACKAGED_DIRECTORY = 2023, - B_SYSTEM_NONPACKAGED_ADDONS_DIRECTORY, - B_SYSTEM_NONPACKAGED_TRANSLATORS_DIRECTORY, - B_SYSTEM_NONPACKAGED_MEDIA_NODES_DIRECTORY, - B_SYSTEM_NONPACKAGED_BIN_DIRECTORY, - B_SYSTEM_NONPACKAGED_DATA_DIRECTORY, - B_SYSTEM_NONPACKAGED_FONTS_DIRECTORY, - B_SYSTEM_NONPACKAGED_SOUNDS_DIRECTORY, - B_SYSTEM_NONPACKAGED_DOCUMENTATION_DIRECTORY, - B_SYSTEM_NONPACKAGED_LIB_DIRECTORY, - B_SYSTEM_NONPACKAGED_HEADERS_DIRECTORY, - B_SYSTEM_NONPACKAGED_DEVELOP_DIRECTORY, - B_USER_DIRECTORY = 3000, - B_USER_CONFIG_DIRECTORY, - B_USER_ADDONS_DIRECTORY, - B_USER_BOOT_DIRECTORY, - B_USER_FONTS_DIRECTORY, - B_USER_LIB_DIRECTORY, - B_USER_SETTINGS_DIRECTORY, - B_USER_DESKBAR_DIRECTORY, - B_USER_PRINTERS_DIRECTORY, - B_USER_TRANSLATORS_DIRECTORY, - B_USER_MEDIA_NODES_DIRECTORY, - B_USER_SOUNDS_DIRECTORY, - B_USER_DATA_DIRECTORY, - B_USER_CACHE_DIRECTORY, - B_USER_PACKAGES_DIRECTORY, - B_USER_HEADERS_DIRECTORY, - B_USER_NONPACKAGED_DIRECTORY, - B_USER_NONPACKAGED_ADDONS_DIRECTORY, - B_USER_NONPACKAGED_TRANSLATORS_DIRECTORY, - B_USER_NONPACKAGED_MEDIA_NODES_DIRECTORY, - B_USER_NONPACKAGED_BIN_DIRECTORY, - B_USER_NONPACKAGED_DATA_DIRECTORY, - B_USER_NONPACKAGED_FONTS_DIRECTORY, - B_USER_NONPACKAGED_SOUNDS_DIRECTORY, - B_USER_NONPACKAGED_DOCUMENTATION_DIRECTORY, - B_USER_NONPACKAGED_LIB_DIRECTORY, - B_USER_NONPACKAGED_HEADERS_DIRECTORY, - B_USER_NONPACKAGED_DEVELOP_DIRECTORY, - B_USER_DEVELOP_DIRECTORY, - B_USER_DOCUMENTATION_DIRECTORY, - B_USER_SERVERS_DIRECTORY, - B_USER_APPS_DIRECTORY, - B_USER_BIN_DIRECTORY, - B_USER_PREFERENCES_DIRECTORY, - B_USER_ETC_DIRECTORY, - B_USER_LOG_DIRECTORY, - B_USER_SPOOL_DIRECTORY, - B_USER_VAR_DIRECTORY, - B_APPS_DIRECTORY = 4000, - B_PREFERENCES_DIRECTORY, - B_UTILITIES_DIRECTORY, - B_PACKAGE_LINKS_DIRECTORY, - } - - // kernel/OS.h - - pub enum topology_level_type { - B_TOPOLOGY_UNKNOWN, - B_TOPOLOGY_ROOT, - B_TOPOLOGY_SMT, - B_TOPOLOGY_CORE, - B_TOPOLOGY_PACKAGE, - } - - pub enum cpu_platform { - B_CPU_UNKNOWN, - B_CPU_x86, - B_CPU_x86_64, - B_CPU_PPC, - B_CPU_PPC_64, - B_CPU_M68K, - B_CPU_ARM, - B_CPU_ARM_64, - B_CPU_ALPHA, - B_CPU_MIPS, - B_CPU_SH, - B_CPU_SPARC, - B_CPU_RISC_V, - } - - pub enum cpu_vendor { - B_CPU_VENDOR_UNKNOWN, - B_CPU_VENDOR_AMD, - B_CPU_VENDOR_CYRIX, - B_CPU_VENDOR_IDT, - B_CPU_VENDOR_INTEL, - B_CPU_VENDOR_NATIONAL_SEMICONDUCTOR, - B_CPU_VENDOR_RISE, - B_CPU_VENDOR_TRANSMETA, - B_CPU_VENDOR_VIA, - B_CPU_VENDOR_IBM, - B_CPU_VENDOR_MOTOROLA, - B_CPU_VENDOR_NEC, - B_CPU_VENDOR_HYGON, - B_CPU_VENDOR_SUN, - B_CPU_VENDOR_FUJITSU, - } -} - -s! { - // kernel/OS.h - pub struct area_info { - pub area: area_id, - pub name: [c_char; B_OS_NAME_LENGTH], - pub size: usize, - pub lock: u32, - pub protection: u32, - pub team: team_id, - pub ram_size: u32, - pub copy_count: u32, - pub in_count: u32, - pub out_count: u32, - pub address: *mut c_void, - } - - pub struct port_info { - pub port: port_id, - pub team: team_id, - pub name: [c_char; B_OS_NAME_LENGTH], - pub capacity: i32, - pub queue_count: i32, - pub total_count: i32, - } - - pub struct port_message_info { - pub size: size_t, - pub sender: crate::uid_t, - pub sender_group: crate::gid_t, - pub sender_team: crate::team_id, - } - - pub struct team_info { - pub team: team_id, - pub thread_count: i32, - pub image_count: i32, - pub area_count: i32, - pub debugger_nub_thread: thread_id, - pub debugger_nub_port: port_id, - pub argc: i32, - pub args: [c_char; 64], - pub uid: crate::uid_t, - pub gid: crate::gid_t, - } - - pub struct sem_info { - pub sem: sem_id, - pub team: team_id, - pub name: [c_char; B_OS_NAME_LENGTH], - pub count: i32, - pub latest_holder: thread_id, - } - - pub struct team_usage_info { - pub user_time: bigtime_t, - pub kernel_time: bigtime_t, - } - - pub struct thread_info { - pub thread: thread_id, - pub team: team_id, - pub name: [c_char; B_OS_NAME_LENGTH], - pub state: thread_state, - pub priority: i32, - pub sem: sem_id, - pub user_time: bigtime_t, - pub kernel_time: bigtime_t, - pub stack_base: *mut c_void, - pub stack_end: *mut c_void, - } - - pub struct cpu_info { - pub active_time: bigtime_t, - pub enabled: bool, - pub current_frequency: u64, - } - - pub struct system_info { - pub boot_time: bigtime_t, - pub cpu_count: u32, - pub max_pages: u64, - pub used_pages: u64, - pub cached_pages: u64, - pub block_cache_pages: u64, - pub ignored_pages: u64, - pub needed_memory: u64, - pub free_memory: u64, - pub max_swap_pages: u64, - pub free_swap_pages: u64, - pub page_faults: u32, - pub max_sems: u32, - pub used_sems: u32, - pub max_ports: u32, - pub used_ports: u32, - pub max_threads: u32, - pub used_threads: u32, - pub max_teams: u32, - pub used_teams: u32, - pub kernel_name: [c_char; B_FILE_NAME_LENGTH], - pub kernel_build_date: [c_char; B_OS_NAME_LENGTH], - pub kernel_build_time: [c_char; B_OS_NAME_LENGTH], - pub kernel_version: i64, - pub abi: u32, - } - - pub struct object_wait_info { - pub object: i32, - pub type_: u16, - pub events: u16, - } - - pub struct cpu_topology_root_info { - pub platform: cpu_platform, - } - - pub struct cpu_topology_package_info { - pub vendor: cpu_vendor, - pub cache_line_size: u32, - } - - pub struct cpu_topology_core_info { - pub model: u32, - pub default_frequency: u64, - } - // kernel/fs_attr.h - pub struct attr_info { - pub type_: u32, - pub size: off_t, - } - - // kernel/fs_index.h - pub struct index_info { - pub type_: u32, - pub size: off_t, - pub modification_time: crate::time_t, - pub creation_time: crate::time_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - } - - //kernel/fs_info.h - pub struct fs_info { - pub dev: crate::dev_t, - pub root: crate::ino_t, - pub flags: u32, - pub block_size: off_t, - pub io_size: off_t, - pub total_blocks: off_t, - pub free_blocks: off_t, - pub total_nodes: off_t, - pub free_nodes: off_t, - pub device_name: [c_char; 128], - pub volume_name: [c_char; B_FILE_NAME_LENGTH], - pub fsh_name: [c_char; B_OS_NAME_LENGTH], - } - - // kernel/image.h - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct image_info { - pub id: image_id, - pub image_type: c_int, - pub sequence: i32, - pub init_order: i32, - // FIXME(1.0): these should be made optional - pub init_routine: extern "C" fn(), - pub term_routine: extern "C" fn(), - pub device: crate::dev_t, - pub node: crate::ino_t, - pub name: [c_char; crate::PATH_MAX as usize], - pub text: *mut c_void, - pub data: *mut c_void, - pub text_size: i32, - pub data_size: i32, - pub api_version: i32, - pub abi: i32, - } - - pub struct __c_anonymous_eax_0 { - pub max_eax: u32, - pub vendor_id: [c_char; 12], - } - - pub struct __c_anonymous_eax_1 { - pub stepping: u32, - pub model: u32, - pub family: u32, - pub tpe: u32, - __reserved_0: u32, - pub extended_model: u32, - pub extended_family: u32, - __reserved_1: u32, - pub brand_index: u32, - pub clflush: u32, - pub logical_cpus: u32, - pub apic_id: u32, - pub features: u32, - pub extended_features: u32, - } - - pub struct __c_anonymous_eax_2 { - pub call_num: u8, - pub cache_descriptors: [u8; 15], - } - - pub struct __c_anonymous_eax_3 { - __reserved: [u32; 2], - pub serial_number_high: u32, - pub serial_number_low: u32, - } - - pub struct __c_anonymous_regs { - pub eax: u32, - pub ebx: u32, - pub edx: u32, - pub ecx: u32, - } -} - -s_no_extra_traits! { - pub union cpuid_info { - pub eax_0: __c_anonymous_eax_0, - pub eax_1: __c_anonymous_eax_1, - pub eax_2: __c_anonymous_eax_2, - pub eax_3: __c_anonymous_eax_3, - pub as_chars: [c_char; 16], - pub regs: __c_anonymous_regs, - } - - pub union __c_anonymous_cpu_topology_info_data { - pub root: cpu_topology_root_info, - pub package: cpu_topology_package_info, - pub core: cpu_topology_core_info, - } - - pub struct cpu_topology_node_info { - pub id: u32, - pub type_: topology_level_type, - pub level: u32, - pub data: __c_anonymous_cpu_topology_info_data, - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for cpuid_info { - fn eq(&self, other: &cpuid_info) -> bool { - unsafe { - self.eax_0 == other.eax_0 - || self.eax_1 == other.eax_1 - || self.eax_2 == other.eax_2 - || self.eax_3 == other.eax_3 - || self.as_chars == other.as_chars - || self.regs == other.regs - } - } - } - impl Eq for cpuid_info {} - - impl PartialEq for __c_anonymous_cpu_topology_info_data { - fn eq(&self, other: &__c_anonymous_cpu_topology_info_data) -> bool { - unsafe { - self.root == other.root - || self.package == other.package - || self.core == other.core - } - } - } - impl Eq for __c_anonymous_cpu_topology_info_data {} - - impl PartialEq for cpu_topology_node_info { - fn eq(&self, other: &cpu_topology_node_info) -> bool { - self.id == other.id && self.type_ == other.type_ && self.level == other.level - } - } - - impl Eq for cpu_topology_node_info {} - } -} - -// kernel/OS.h -pub const B_OS_NAME_LENGTH: usize = 32; -pub const B_PAGE_SIZE: usize = 4096; -pub const B_INFINITE_TIMEOUT: usize = 9223372036854775807; - -pub const B_RELATIVE_TIMEOUT: u32 = 0x8; -pub const B_ABSOLUTE_TIMEOUT: u32 = 0x10; -pub const B_TIMEOUT_REAL_TIME_BASE: u32 = 0x40; -pub const B_ABSOLUTE_REAL_TIME_TIMEOUT: u32 = B_ABSOLUTE_TIMEOUT | B_TIMEOUT_REAL_TIME_BASE; - -pub const B_NO_LOCK: u32 = 0; -pub const B_LAZY_LOCK: u32 = 1; -pub const B_FULL_LOCK: u32 = 2; -pub const B_CONTIGUOUS: u32 = 3; -pub const B_LOMEM: u32 = 4; -pub const B_32_BIT_FULL_LOCK: u32 = 5; -pub const B_32_BIT_CONTIGUOUS: u32 = 6; - -pub const B_ANY_ADDRESS: u32 = 0; -pub const B_EXACT_ADDRESS: u32 = 1; -pub const B_BASE_ADDRESS: u32 = 2; -pub const B_CLONE_ADDRESS: u32 = 3; -pub const B_ANY_KERNEL_ADDRESS: u32 = 4; -pub const B_RANDOMIZED_ANY_ADDRESS: u32 = 6; -pub const B_RANDOMIZED_BASE_ADDRESS: u32 = 7; - -pub const B_READ_AREA: u32 = 1 << 0; -pub const B_WRITE_AREA: u32 = 1 << 1; -pub const B_EXECUTE_AREA: u32 = 1 << 2; -pub const B_STACK_AREA: u32 = 1 << 3; -pub const B_CLONEABLE_AREA: u32 = 1 << 8; - -pub const B_CAN_INTERRUPT: u32 = 0x01; -pub const B_CHECK_PERMISSION: u32 = 0x04; -pub const B_KILL_CAN_INTERRUPT: u32 = 0x20; -pub const B_DO_NOT_RESCHEDULE: u32 = 0x02; -pub const B_RELEASE_ALL: u32 = 0x08; -pub const B_RELEASE_IF_WAITING_ONLY: u32 = 0x10; - -pub const B_CURRENT_TEAM: team_id = 0; -pub const B_SYSTEM_TEAM: team_id = 1; - -pub const B_TEAM_USAGE_SELF: i32 = 0; -pub const B_TEAM_USAGE_CHILDREN: i32 = -1; - -pub const B_IDLE_PRIORITY: i32 = 0; -pub const B_LOWEST_ACTIVE_PRIORITY: i32 = 1; -pub const B_LOW_PRIORITY: i32 = 5; -pub const B_NORMAL_PRIORITY: i32 = 10; -pub const B_DISPLAY_PRIORITY: i32 = 15; -pub const B_URGENT_DISPLAY_PRIORITY: i32 = 20; -pub const B_REAL_TIME_DISPLAY_PRIORITY: i32 = 100; -pub const B_URGENT_PRIORITY: i32 = 110; -pub const B_REAL_TIME_PRIORITY: i32 = 120; - -pub const B_SYSTEM_TIMEBASE: i32 = 0; -pub const B_FIRST_REAL_TIME_PRIORITY: i32 = B_REAL_TIME_DISPLAY_PRIORITY; - -pub const B_ONE_SHOT_ABSOLUTE_ALARM: u32 = 1; -pub const B_ONE_SHOT_RELATIVE_ALARM: u32 = 2; -pub const B_PERIODIC_ALARM: u32 = 3; - -pub const B_OBJECT_TYPE_FD: u16 = 0; -pub const B_OBJECT_TYPE_SEMAPHORE: u16 = 1; -pub const B_OBJECT_TYPE_PORT: u16 = 2; -pub const B_OBJECT_TYPE_THREAD: u16 = 3; - -pub const B_EVENT_READ: u16 = 0x0001; -pub const B_EVENT_WRITE: u16 = 0x0002; -pub const B_EVENT_ERROR: u16 = 0x0004; -pub const B_EVENT_PRIORITY_READ: u16 = 0x0008; -pub const B_EVENT_PRIORITY_WRITE: u16 = 0x0010; -pub const B_EVENT_HIGH_PRIORITY_READ: u16 = 0x0020; -pub const B_EVENT_HIGH_PRIORITY_WRITE: u16 = 0x0040; -pub const B_EVENT_DISCONNECTED: u16 = 0x0080; -pub const B_EVENT_ACQUIRE_SEMAPHORE: u16 = 0x0001; -pub const B_EVENT_INVALID: u16 = 0x1000; - -// kernel/fs_info.h -pub const B_FS_IS_READONLY: u32 = 0x00000001; -pub const B_FS_IS_REMOVABLE: u32 = 0x00000002; -pub const B_FS_IS_PERSISTENT: u32 = 0x00000004; -pub const B_FS_IS_SHARED: u32 = 0x00000008; -pub const B_FS_HAS_MIME: u32 = 0x00010000; -pub const B_FS_HAS_ATTR: u32 = 0x00020000; -pub const B_FS_HAS_QUERY: u32 = 0x00040000; -pub const B_FS_HAS_SELF_HEALING_LINKS: u32 = 0x00080000; -pub const B_FS_HAS_ALIASES: u32 = 0x00100000; -pub const B_FS_SUPPORTS_NODE_MONITORING: u32 = 0x00200000; -pub const B_FS_SUPPORTS_MONITOR_CHILDREN: u32 = 0x00400000; - -// kernel/fs_query.h -pub const B_LIVE_QUERY: u32 = 0x00000001; -pub const B_QUERY_NON_INDEXED: u32 = 0x00000002; - -// kernel/fs_volume.h -pub const B_MOUNT_READ_ONLY: u32 = 1; -pub const B_MOUNT_VIRTUAL_DEVICE: u32 = 2; -pub const B_FORCE_UNMOUNT: u32 = 1; - -// kernel/image.h -pub const B_FLUSH_DCACHE: u32 = 0x0001; -pub const B_FLUSH_ICACHE: u32 = 0x0004; -pub const B_INVALIDATE_DCACHE: u32 = 0x0002; -pub const B_INVALIDATE_ICACHE: u32 = 0x0008; - -pub const B_SYMBOL_TYPE_DATA: i32 = 0x1; -pub const B_SYMBOL_TYPE_TEXT: i32 = 0x2; -pub const B_SYMBOL_TYPE_ANY: i32 = 0x5; - -// storage/StorageDefs.h -pub const B_DEV_NAME_LENGTH: usize = 128; -pub const B_FILE_NAME_LENGTH: usize = crate::FILENAME_MAX as usize; -pub const B_PATH_NAME_LENGTH: usize = crate::PATH_MAX as usize; -pub const B_ATTR_NAME_LENGTH: usize = B_FILE_NAME_LENGTH - 1; -pub const B_MIME_TYPE_LENGTH: usize = B_ATTR_NAME_LENGTH - 15; -pub const B_MAX_SYMLINKS: usize = 16; - -// Haiku open modes in BFile are passed as u32 -pub const B_READ_ONLY: u32 = crate::O_RDONLY as u32; -pub const B_WRITE_ONLY: u32 = crate::O_WRONLY as u32; -pub const B_READ_WRITE: u32 = crate::O_RDWR as u32; - -pub const B_FAIL_IF_EXISTS: u32 = crate::O_EXCL as u32; -pub const B_CREATE_FILE: u32 = crate::O_CREAT as u32; -pub const B_ERASE_FILE: u32 = crate::O_TRUNC as u32; -pub const B_OPEN_AT_END: u32 = crate::O_APPEND as u32; - -pub const B_FILE_NODE: u32 = 0x01; -pub const B_SYMLINK_NODE: u32 = 0x02; -pub const B_DIRECTORY_NODE: u32 = 0x04; -pub const B_ANY_NODE: u32 = 0x07; - -// support/Errors.h -pub const B_GENERAL_ERROR_BASE: status_t = core::i32::MIN; -pub const B_OS_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x1000; -pub const B_APP_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x2000; -pub const B_INTERFACE_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x3000; -pub const B_MEDIA_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x4000; -pub const B_TRANSLATION_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x4800; -pub const B_MIDI_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x5000; -pub const B_STORAGE_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x6000; -pub const B_POSIX_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x7000; -pub const B_MAIL_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x8000; -pub const B_PRINT_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0x9000; -pub const B_DEVICE_ERROR_BASE: status_t = B_GENERAL_ERROR_BASE + 0xa000; -pub const B_ERRORS_END: status_t = B_GENERAL_ERROR_BASE + 0xffff; - -// General errors -pub const B_NO_MEMORY: status_t = B_GENERAL_ERROR_BASE + 0; -pub const B_IO_ERROR: status_t = B_GENERAL_ERROR_BASE + 1; -pub const B_PERMISSION_DENIED: status_t = B_GENERAL_ERROR_BASE + 2; -pub const B_BAD_INDEX: status_t = B_GENERAL_ERROR_BASE + 3; -pub const B_BAD_TYPE: status_t = B_GENERAL_ERROR_BASE + 4; -pub const B_BAD_VALUE: status_t = B_GENERAL_ERROR_BASE + 5; -pub const B_MISMATCHED_VALUES: status_t = B_GENERAL_ERROR_BASE + 6; -pub const B_NAME_NOT_FOUND: status_t = B_GENERAL_ERROR_BASE + 7; -pub const B_NAME_IN_USE: status_t = B_GENERAL_ERROR_BASE + 8; -pub const B_TIMED_OUT: status_t = B_GENERAL_ERROR_BASE + 9; -pub const B_INTERRUPTED: status_t = B_GENERAL_ERROR_BASE + 10; -pub const B_WOULD_BLOCK: status_t = B_GENERAL_ERROR_BASE + 11; -pub const B_CANCELED: status_t = B_GENERAL_ERROR_BASE + 12; -pub const B_NO_INIT: status_t = B_GENERAL_ERROR_BASE + 13; -pub const B_NOT_INITIALIZED: status_t = B_GENERAL_ERROR_BASE + 13; -pub const B_BUSY: status_t = B_GENERAL_ERROR_BASE + 14; -pub const B_NOT_ALLOWED: status_t = B_GENERAL_ERROR_BASE + 15; -pub const B_BAD_DATA: status_t = B_GENERAL_ERROR_BASE + 16; -pub const B_DONT_DO_THAT: status_t = B_GENERAL_ERROR_BASE + 17; - -pub const B_ERROR: status_t = -1; -pub const B_OK: status_t = 0; -pub const B_NO_ERROR: status_t = 0; - -// Kernel kit errors -pub const B_BAD_SEM_ID: status_t = B_OS_ERROR_BASE + 0; -pub const B_NO_MORE_SEMS: status_t = B_OS_ERROR_BASE + 1; - -pub const B_BAD_THREAD_ID: status_t = B_OS_ERROR_BASE + 0x100; -pub const B_NO_MORE_THREADS: status_t = B_OS_ERROR_BASE + 0x101; -pub const B_BAD_THREAD_STATE: status_t = B_OS_ERROR_BASE + 0x102; -pub const B_BAD_TEAM_ID: status_t = B_OS_ERROR_BASE + 0x103; -pub const B_NO_MORE_TEAMS: status_t = B_OS_ERROR_BASE + 0x104; - -pub const B_BAD_PORT_ID: status_t = B_OS_ERROR_BASE + 0x200; -pub const B_NO_MORE_PORTS: status_t = B_OS_ERROR_BASE + 0x201; - -pub const B_BAD_IMAGE_ID: status_t = B_OS_ERROR_BASE + 0x300; -pub const B_BAD_ADDRESS: status_t = B_OS_ERROR_BASE + 0x301; -pub const B_NOT_AN_EXECUTABLE: status_t = B_OS_ERROR_BASE + 0x302; -pub const B_MISSING_LIBRARY: status_t = B_OS_ERROR_BASE + 0x303; -pub const B_MISSING_SYMBOL: status_t = B_OS_ERROR_BASE + 0x304; -pub const B_UNKNOWN_EXECUTABLE: status_t = B_OS_ERROR_BASE + 0x305; -pub const B_LEGACY_EXECUTABLE: status_t = B_OS_ERROR_BASE + 0x306; - -pub const B_DEBUGGER_ALREADY_INSTALLED: status_t = B_OS_ERROR_BASE + 0x400; - -// Application kit errors -pub const B_BAD_REPLY: status_t = B_APP_ERROR_BASE + 0; -pub const B_DUPLICATE_REPLY: status_t = B_APP_ERROR_BASE + 1; -pub const B_MESSAGE_TO_SELF: status_t = B_APP_ERROR_BASE + 2; -pub const B_BAD_HANDLER: status_t = B_APP_ERROR_BASE + 3; -pub const B_ALREADY_RUNNING: status_t = B_APP_ERROR_BASE + 4; -pub const B_LAUNCH_FAILED: status_t = B_APP_ERROR_BASE + 5; -pub const B_AMBIGUOUS_APP_LAUNCH: status_t = B_APP_ERROR_BASE + 6; -pub const B_UNKNOWN_MIME_TYPE: status_t = B_APP_ERROR_BASE + 7; -pub const B_BAD_SCRIPT_SYNTAX: status_t = B_APP_ERROR_BASE + 8; -pub const B_LAUNCH_FAILED_NO_RESOLVE_LINK: status_t = B_APP_ERROR_BASE + 9; -pub const B_LAUNCH_FAILED_EXECUTABLE: status_t = B_APP_ERROR_BASE + 10; -pub const B_LAUNCH_FAILED_APP_NOT_FOUND: status_t = B_APP_ERROR_BASE + 11; -pub const B_LAUNCH_FAILED_APP_IN_TRASH: status_t = B_APP_ERROR_BASE + 12; -pub const B_LAUNCH_FAILED_NO_PREFERRED_APP: status_t = B_APP_ERROR_BASE + 13; -pub const B_LAUNCH_FAILED_FILES_APP_NOT_FOUND: status_t = B_APP_ERROR_BASE + 14; -pub const B_BAD_MIME_SNIFFER_RULE: status_t = B_APP_ERROR_BASE + 15; -pub const B_NOT_A_MESSAGE: status_t = B_APP_ERROR_BASE + 16; -pub const B_SHUTDOWN_CANCELLED: status_t = B_APP_ERROR_BASE + 17; -pub const B_SHUTTING_DOWN: status_t = B_APP_ERROR_BASE + 18; - -// Storage kit errors -pub const B_FILE_ERROR: status_t = B_STORAGE_ERROR_BASE + 0; -pub const B_FILE_EXISTS: status_t = B_STORAGE_ERROR_BASE + 2; -pub const B_ENTRY_NOT_FOUND: status_t = B_STORAGE_ERROR_BASE + 3; -pub const B_NAME_TOO_LONG: status_t = B_STORAGE_ERROR_BASE + 4; -pub const B_NOT_A_DIRECTORY: status_t = B_STORAGE_ERROR_BASE + 5; -pub const B_DIRECTORY_NOT_EMPTY: status_t = B_STORAGE_ERROR_BASE + 6; -pub const B_DEVICE_FULL: status_t = B_STORAGE_ERROR_BASE + 7; -pub const B_READ_ONLY_DEVICE: status_t = B_STORAGE_ERROR_BASE + 8; -pub const B_IS_A_DIRECTORY: status_t = B_STORAGE_ERROR_BASE + 9; -pub const B_NO_MORE_FDS: status_t = B_STORAGE_ERROR_BASE + 10; -pub const B_CROSS_DEVICE_LINK: status_t = B_STORAGE_ERROR_BASE + 11; -pub const B_LINK_LIMIT: status_t = B_STORAGE_ERROR_BASE + 12; -pub const B_BUSTED_PIPE: status_t = B_STORAGE_ERROR_BASE + 13; -pub const B_UNSUPPORTED: status_t = B_STORAGE_ERROR_BASE + 14; -pub const B_PARTITION_TOO_SMALL: status_t = B_STORAGE_ERROR_BASE + 15; -pub const B_PARTIAL_READ: status_t = B_STORAGE_ERROR_BASE + 16; -pub const B_PARTIAL_WRITE: status_t = B_STORAGE_ERROR_BASE + 17; - -// Mapped posix errors -pub const B_BUFFER_OVERFLOW: status_t = crate::EOVERFLOW; -pub const B_TOO_MANY_ARGS: status_t = crate::E2BIG; -pub const B_FILE_TOO_LARGE: status_t = crate::EFBIG; -pub const B_RESULT_NOT_REPRESENTABLE: status_t = crate::ERANGE; -pub const B_DEVICE_NOT_FOUND: status_t = crate::ENODEV; -pub const B_NOT_SUPPORTED: status_t = crate::EOPNOTSUPP; - -// Media kit errors -pub const B_STREAM_NOT_FOUND: status_t = B_MEDIA_ERROR_BASE + 0; -pub const B_SERVER_NOT_FOUND: status_t = B_MEDIA_ERROR_BASE + 1; -pub const B_RESOURCE_NOT_FOUND: status_t = B_MEDIA_ERROR_BASE + 2; -pub const B_RESOURCE_UNAVAILABLE: status_t = B_MEDIA_ERROR_BASE + 3; -pub const B_BAD_SUBSCRIBER: status_t = B_MEDIA_ERROR_BASE + 4; -pub const B_SUBSCRIBER_NOT_ENTERED: status_t = B_MEDIA_ERROR_BASE + 5; -pub const B_BUFFER_NOT_AVAILABLE: status_t = B_MEDIA_ERROR_BASE + 6; -pub const B_LAST_BUFFER_ERROR: status_t = B_MEDIA_ERROR_BASE + 7; - -pub const B_MEDIA_SYSTEM_FAILURE: status_t = B_MEDIA_ERROR_BASE + 100; -pub const B_MEDIA_BAD_NODE: status_t = B_MEDIA_ERROR_BASE + 101; -pub const B_MEDIA_NODE_BUSY: status_t = B_MEDIA_ERROR_BASE + 102; -pub const B_MEDIA_BAD_FORMAT: status_t = B_MEDIA_ERROR_BASE + 103; -pub const B_MEDIA_BAD_BUFFER: status_t = B_MEDIA_ERROR_BASE + 104; -pub const B_MEDIA_TOO_MANY_NODES: status_t = B_MEDIA_ERROR_BASE + 105; -pub const B_MEDIA_TOO_MANY_BUFFERS: status_t = B_MEDIA_ERROR_BASE + 106; -pub const B_MEDIA_NODE_ALREADY_EXISTS: status_t = B_MEDIA_ERROR_BASE + 107; -pub const B_MEDIA_BUFFER_ALREADY_EXISTS: status_t = B_MEDIA_ERROR_BASE + 108; -pub const B_MEDIA_CANNOT_SEEK: status_t = B_MEDIA_ERROR_BASE + 109; -pub const B_MEDIA_CANNOT_CHANGE_RUN_MODE: status_t = B_MEDIA_ERROR_BASE + 110; -pub const B_MEDIA_APP_ALREADY_REGISTERED: status_t = B_MEDIA_ERROR_BASE + 111; -pub const B_MEDIA_APP_NOT_REGISTERED: status_t = B_MEDIA_ERROR_BASE + 112; -pub const B_MEDIA_CANNOT_RECLAIM_BUFFERS: status_t = B_MEDIA_ERROR_BASE + 113; -pub const B_MEDIA_BUFFERS_NOT_RECLAIMED: status_t = B_MEDIA_ERROR_BASE + 114; -pub const B_MEDIA_TIME_SOURCE_STOPPED: status_t = B_MEDIA_ERROR_BASE + 115; -pub const B_MEDIA_TIME_SOURCE_BUSY: status_t = B_MEDIA_ERROR_BASE + 116; -pub const B_MEDIA_BAD_SOURCE: status_t = B_MEDIA_ERROR_BASE + 117; -pub const B_MEDIA_BAD_DESTINATION: status_t = B_MEDIA_ERROR_BASE + 118; -pub const B_MEDIA_ALREADY_CONNECTED: status_t = B_MEDIA_ERROR_BASE + 119; -pub const B_MEDIA_NOT_CONNECTED: status_t = B_MEDIA_ERROR_BASE + 120; -pub const B_MEDIA_BAD_CLIP_FORMAT: status_t = B_MEDIA_ERROR_BASE + 121; -pub const B_MEDIA_ADDON_FAILED: status_t = B_MEDIA_ERROR_BASE + 122; -pub const B_MEDIA_ADDON_DISABLED: status_t = B_MEDIA_ERROR_BASE + 123; -pub const B_MEDIA_CHANGE_IN_PROGRESS: status_t = B_MEDIA_ERROR_BASE + 124; -pub const B_MEDIA_STALE_CHANGE_COUNT: status_t = B_MEDIA_ERROR_BASE + 125; -pub const B_MEDIA_ADDON_RESTRICTED: status_t = B_MEDIA_ERROR_BASE + 126; -pub const B_MEDIA_NO_HANDLER: status_t = B_MEDIA_ERROR_BASE + 127; -pub const B_MEDIA_DUPLICATE_FORMAT: status_t = B_MEDIA_ERROR_BASE + 128; -pub const B_MEDIA_REALTIME_DISABLED: status_t = B_MEDIA_ERROR_BASE + 129; -pub const B_MEDIA_REALTIME_UNAVAILABLE: status_t = B_MEDIA_ERROR_BASE + 130; - -// Mail kit errors -pub const B_MAIL_NO_DAEMON: status_t = B_MAIL_ERROR_BASE + 0; -pub const B_MAIL_UNKNOWN_USER: status_t = B_MAIL_ERROR_BASE + 1; -pub const B_MAIL_WRONG_PASSWORD: status_t = B_MAIL_ERROR_BASE + 2; -pub const B_MAIL_UNKNOWN_HOST: status_t = B_MAIL_ERROR_BASE + 3; -pub const B_MAIL_ACCESS_ERROR: status_t = B_MAIL_ERROR_BASE + 4; -pub const B_MAIL_UNKNOWN_FIELD: status_t = B_MAIL_ERROR_BASE + 5; -pub const B_MAIL_NO_RECIPIENT: status_t = B_MAIL_ERROR_BASE + 6; -pub const B_MAIL_INVALID_MAIL: status_t = B_MAIL_ERROR_BASE + 7; - -// Print kit errors -pub const B_NO_PRINT_SERVER: status_t = B_PRINT_ERROR_BASE + 0; - -// Device kit errors -pub const B_DEV_INVALID_IOCTL: status_t = B_DEVICE_ERROR_BASE + 0; -pub const B_DEV_NO_MEMORY: status_t = B_DEVICE_ERROR_BASE + 1; -pub const B_DEV_BAD_DRIVE_NUM: status_t = B_DEVICE_ERROR_BASE + 2; -pub const B_DEV_NO_MEDIA: status_t = B_DEVICE_ERROR_BASE + 3; -pub const B_DEV_UNREADABLE: status_t = B_DEVICE_ERROR_BASE + 4; -pub const B_DEV_FORMAT_ERROR: status_t = B_DEVICE_ERROR_BASE + 5; -pub const B_DEV_TIMEOUT: status_t = B_DEVICE_ERROR_BASE + 6; -pub const B_DEV_RECALIBRATE_ERROR: status_t = B_DEVICE_ERROR_BASE + 7; -pub const B_DEV_SEEK_ERROR: status_t = B_DEVICE_ERROR_BASE + 8; -pub const B_DEV_ID_ERROR: status_t = B_DEVICE_ERROR_BASE + 9; -pub const B_DEV_READ_ERROR: status_t = B_DEVICE_ERROR_BASE + 10; -pub const B_DEV_WRITE_ERROR: status_t = B_DEVICE_ERROR_BASE + 11; -pub const B_DEV_NOT_READY: status_t = B_DEVICE_ERROR_BASE + 12; -pub const B_DEV_MEDIA_CHANGED: status_t = B_DEVICE_ERROR_BASE + 13; -pub const B_DEV_MEDIA_CHANGE_REQUESTED: status_t = B_DEVICE_ERROR_BASE + 14; -pub const B_DEV_RESOURCE_CONFLICT: status_t = B_DEVICE_ERROR_BASE + 15; -pub const B_DEV_CONFIGURATION_ERROR: status_t = B_DEVICE_ERROR_BASE + 16; -pub const B_DEV_DISABLED_BY_USER: status_t = B_DEVICE_ERROR_BASE + 17; -pub const B_DEV_DOOR_OPEN: status_t = B_DEVICE_ERROR_BASE + 18; - -pub const B_DEV_INVALID_PIPE: status_t = B_DEVICE_ERROR_BASE + 19; -pub const B_DEV_CRC_ERROR: status_t = B_DEVICE_ERROR_BASE + 20; -pub const B_DEV_STALLED: status_t = B_DEVICE_ERROR_BASE + 21; -pub const B_DEV_BAD_PID: status_t = B_DEVICE_ERROR_BASE + 22; -pub const B_DEV_UNEXPECTED_PID: status_t = B_DEVICE_ERROR_BASE + 23; -pub const B_DEV_DATA_OVERRUN: status_t = B_DEVICE_ERROR_BASE + 24; -pub const B_DEV_DATA_UNDERRUN: status_t = B_DEVICE_ERROR_BASE + 25; -pub const B_DEV_FIFO_OVERRUN: status_t = B_DEVICE_ERROR_BASE + 26; -pub const B_DEV_FIFO_UNDERRUN: status_t = B_DEVICE_ERROR_BASE + 27; -pub const B_DEV_PENDING: status_t = B_DEVICE_ERROR_BASE + 28; -pub const B_DEV_MULTIPLE_ERRORS: status_t = B_DEVICE_ERROR_BASE + 29; -pub const B_DEV_TOO_LATE: status_t = B_DEVICE_ERROR_BASE + 30; - -// translation kit errors -pub const B_TRANSLATION_BASE_ERROR: status_t = B_TRANSLATION_ERROR_BASE + 0; -pub const B_NO_TRANSLATOR: status_t = B_TRANSLATION_ERROR_BASE + 1; -pub const B_ILLEGAL_DATA: status_t = B_TRANSLATION_ERROR_BASE + 2; - -// support/TypeConstants.h -pub const B_AFFINE_TRANSFORM_TYPE: u32 = haiku_constant!('A', 'M', 'T', 'X'); -pub const B_ALIGNMENT_TYPE: u32 = haiku_constant!('A', 'L', 'G', 'N'); -pub const B_ANY_TYPE: u32 = haiku_constant!('A', 'N', 'Y', 'T'); -pub const B_ATOM_TYPE: u32 = haiku_constant!('A', 'T', 'O', 'M'); -pub const B_ATOMREF_TYPE: u32 = haiku_constant!('A', 'T', 'M', 'R'); -pub const B_BOOL_TYPE: u32 = haiku_constant!('B', 'O', 'O', 'L'); -pub const B_CHAR_TYPE: u32 = haiku_constant!('C', 'H', 'A', 'R'); -pub const B_COLOR_8_BIT_TYPE: u32 = haiku_constant!('C', 'L', 'R', 'B'); -pub const B_DOUBLE_TYPE: u32 = haiku_constant!('D', 'B', 'L', 'E'); -pub const B_FLOAT_TYPE: u32 = haiku_constant!('F', 'L', 'O', 'T'); -pub const B_GRAYSCALE_8_BIT_TYPE: u32 = haiku_constant!('G', 'R', 'Y', 'B'); -pub const B_INT16_TYPE: u32 = haiku_constant!('S', 'H', 'R', 'T'); -pub const B_INT32_TYPE: u32 = haiku_constant!('L', 'O', 'N', 'G'); -pub const B_INT64_TYPE: u32 = haiku_constant!('L', 'L', 'N', 'G'); -pub const B_INT8_TYPE: u32 = haiku_constant!('B', 'Y', 'T', 'E'); -pub const B_LARGE_ICON_TYPE: u32 = haiku_constant!('I', 'C', 'O', 'N'); -pub const B_MEDIA_PARAMETER_GROUP_TYPE: u32 = haiku_constant!('B', 'M', 'C', 'G'); -pub const B_MEDIA_PARAMETER_TYPE: u32 = haiku_constant!('B', 'M', 'C', 'T'); -pub const B_MEDIA_PARAMETER_WEB_TYPE: u32 = haiku_constant!('B', 'M', 'C', 'W'); -pub const B_MESSAGE_TYPE: u32 = haiku_constant!('M', 'S', 'G', 'G'); -pub const B_MESSENGER_TYPE: u32 = haiku_constant!('M', 'S', 'N', 'G'); -pub const B_MIME_TYPE: u32 = haiku_constant!('M', 'I', 'M', 'E'); -pub const B_MINI_ICON_TYPE: u32 = haiku_constant!('M', 'I', 'C', 'N'); -pub const B_MONOCHROME_1_BIT_TYPE: u32 = haiku_constant!('M', 'N', 'O', 'B'); -pub const B_OBJECT_TYPE: u32 = haiku_constant!('O', 'P', 'T', 'R'); -pub const B_OFF_T_TYPE: u32 = haiku_constant!('O', 'F', 'F', 'T'); -pub const B_PATTERN_TYPE: u32 = haiku_constant!('P', 'A', 'T', 'N'); -pub const B_POINTER_TYPE: u32 = haiku_constant!('P', 'N', 'T', 'R'); -pub const B_POINT_TYPE: u32 = haiku_constant!('B', 'P', 'N', 'T'); -pub const B_PROPERTY_INFO_TYPE: u32 = haiku_constant!('S', 'C', 'T', 'D'); -pub const B_RAW_TYPE: u32 = haiku_constant!('R', 'A', 'W', 'T'); -pub const B_RECT_TYPE: u32 = haiku_constant!('R', 'E', 'C', 'T'); -pub const B_REF_TYPE: u32 = haiku_constant!('R', 'R', 'E', 'F'); -pub const B_RGB_32_BIT_TYPE: u32 = haiku_constant!('R', 'G', 'B', 'B'); -pub const B_RGB_COLOR_TYPE: u32 = haiku_constant!('R', 'G', 'B', 'C'); -pub const B_SIZE_TYPE: u32 = haiku_constant!('S', 'I', 'Z', 'E'); -pub const B_SIZE_T_TYPE: u32 = haiku_constant!('S', 'I', 'Z', 'T'); -pub const B_SSIZE_T_TYPE: u32 = haiku_constant!('S', 'S', 'Z', 'T'); -pub const B_STRING_TYPE: u32 = haiku_constant!('C', 'S', 'T', 'R'); -pub const B_STRING_LIST_TYPE: u32 = haiku_constant!('S', 'T', 'R', 'L'); -pub const B_TIME_TYPE: u32 = haiku_constant!('T', 'I', 'M', 'E'); -pub const B_UINT16_TYPE: u32 = haiku_constant!('U', 'S', 'H', 'T'); -pub const B_UINT32_TYPE: u32 = haiku_constant!('U', 'L', 'N', 'G'); -pub const B_UINT64_TYPE: u32 = haiku_constant!('U', 'L', 'L', 'G'); -pub const B_UINT8_TYPE: u32 = haiku_constant!('U', 'B', 'Y', 'T'); -pub const B_VECTOR_ICON_TYPE: u32 = haiku_constant!('V', 'I', 'C', 'N'); -pub const B_XATTR_TYPE: u32 = haiku_constant!('X', 'A', 'T', 'R'); -pub const B_NETWORK_ADDRESS_TYPE: u32 = haiku_constant!('N', 'W', 'A', 'D'); -pub const B_MIME_STRING_TYPE: u32 = haiku_constant!('M', 'I', 'M', 'S'); -pub const B_ASCII_TYPE: u32 = haiku_constant!('T', 'E', 'X', 'T'); -pub const B_APP_IMAGE_SYMBOL: *const c_void = core::ptr::null(); - -extern "C" { - // kernel/OS.h - pub fn create_area( - name: *const c_char, - startAddress: *mut *mut c_void, - addressSpec: u32, - size: usize, - lock: u32, - protection: u32, - ) -> area_id; - pub fn clone_area( - name: *const c_char, - destAddress: *mut *mut c_void, - addressSpec: u32, - protection: u32, - source: area_id, - ) -> area_id; - pub fn find_area(name: *const c_char) -> area_id; - pub fn area_for(address: *mut c_void) -> area_id; - pub fn delete_area(id: area_id) -> status_t; - pub fn resize_area(id: area_id, newSize: usize) -> status_t; - pub fn set_area_protection(id: area_id, newProtection: u32) -> status_t; - pub fn _get_area_info(id: area_id, areaInfo: *mut area_info, size: usize) -> status_t; - pub fn _get_next_area_info( - team: team_id, - cookie: *mut isize, - areaInfo: *mut area_info, - size: usize, - ) -> status_t; - - pub fn create_port(capacity: i32, name: *const c_char) -> port_id; - pub fn find_port(name: *const c_char) -> port_id; - pub fn read_port( - port: port_id, - code: *mut i32, - buffer: *mut c_void, - bufferSize: size_t, - ) -> ssize_t; - pub fn read_port_etc( - port: port_id, - code: *mut i32, - buffer: *mut c_void, - bufferSize: size_t, - flags: u32, - timeout: bigtime_t, - ) -> ssize_t; - pub fn write_port( - port: port_id, - code: i32, - buffer: *const c_void, - bufferSize: size_t, - ) -> status_t; - pub fn write_port_etc( - port: port_id, - code: i32, - buffer: *const c_void, - bufferSize: size_t, - flags: u32, - timeout: bigtime_t, - ) -> status_t; - pub fn close_port(port: port_id) -> status_t; - pub fn delete_port(port: port_id) -> status_t; - pub fn port_buffer_size(port: port_id) -> ssize_t; - pub fn port_buffer_size_etc(port: port_id, flags: u32, timeout: bigtime_t) -> ssize_t; - pub fn port_count(port: port_id) -> ssize_t; - pub fn set_port_owner(port: port_id, team: team_id) -> status_t; - - pub fn _get_port_info(port: port_id, buf: *mut port_info, portInfoSize: size_t) -> status_t; - pub fn _get_next_port_info( - port: port_id, - cookie: *mut i32, - portInfo: *mut port_info, - portInfoSize: size_t, - ) -> status_t; - pub fn _get_port_message_info_etc( - port: port_id, - info: *mut port_message_info, - infoSize: size_t, - flags: u32, - timeout: bigtime_t, - ) -> status_t; - - pub fn create_sem(count: i32, name: *const c_char) -> sem_id; - pub fn delete_sem(id: sem_id) -> status_t; - pub fn acquire_sem(id: sem_id) -> status_t; - pub fn acquire_sem_etc(id: sem_id, count: i32, flags: u32, timeout: bigtime_t) -> status_t; - pub fn release_sem(id: sem_id) -> status_t; - pub fn release_sem_etc(id: sem_id, count: i32, flags: u32) -> status_t; - pub fn switch_sem(semToBeReleased: sem_id, id: sem_id) -> status_t; - pub fn switch_sem_etc( - semToBeReleased: sem_id, - id: sem_id, - count: i32, - flags: u32, - timeout: bigtime_t, - ) -> status_t; - pub fn get_sem_count(id: sem_id, threadCount: *mut i32) -> status_t; - pub fn set_sem_owner(id: sem_id, team: team_id) -> status_t; - pub fn _get_sem_info(id: sem_id, info: *mut sem_info, infoSize: size_t) -> status_t; - pub fn _get_next_sem_info( - team: team_id, - cookie: *mut i32, - info: *mut sem_info, - infoSize: size_t, - ) -> status_t; - - pub fn kill_team(team: team_id) -> status_t; - pub fn _get_team_info(team: team_id, info: *mut team_info, size: size_t) -> status_t; - pub fn _get_next_team_info(cookie: *mut i32, info: *mut team_info, size: size_t) -> status_t; - - pub fn spawn_thread( - func: thread_func, - name: *const c_char, - priority: i32, - data: *mut c_void, - ) -> thread_id; - pub fn kill_thread(thread: thread_id) -> status_t; - pub fn resume_thread(thread: thread_id) -> status_t; - pub fn suspend_thread(thread: thread_id) -> status_t; - - pub fn rename_thread(thread: thread_id, newName: *const c_char) -> status_t; - pub fn set_thread_priority(thread: thread_id, newPriority: i32) -> status_t; - pub fn suggest_thread_priority( - what: u32, - period: i32, - jitter: crate::bigtime_t, - length: crate::bigtime_t, - ) -> i32; - pub fn estimate_max_scheduling_latency(th: crate::thread_id) -> crate::bigtime_t; - pub fn exit_thread(status: status_t); - pub fn wait_for_thread(thread: thread_id, returnValue: *mut status_t) -> status_t; - pub fn on_exit_thread(callback: extern "C" fn(*mut c_void), data: *mut c_void) -> status_t; - - pub fn find_thread(name: *const c_char) -> thread_id; - - pub fn get_scheduler_mode() -> i32; - pub fn set_scheduler_mode(mode: i32) -> status_t; - - pub fn send_data( - thread: thread_id, - code: i32, - buffer: *const c_void, - bufferSize: size_t, - ) -> status_t; - pub fn receive_data(sender: *mut thread_id, buffer: *mut c_void, bufferSize: size_t) -> i32; - pub fn has_data(thread: thread_id) -> bool; - - pub fn snooze(amount: bigtime_t) -> status_t; - pub fn snooze_etc(amount: bigtime_t, timeBase: c_int, flags: u32) -> status_t; - pub fn snooze_until(time: bigtime_t, timeBase: c_int) -> status_t; - - pub fn _get_thread_info(id: thread_id, info: *mut thread_info, size: size_t) -> status_t; - pub fn _get_next_thread_info( - team: team_id, - cookie: *mut i32, - info: *mut thread_info, - size: size_t, - ) -> status_t; - - pub fn get_pthread_thread_id(thread: crate::pthread_t) -> thread_id; - - pub fn _get_team_usage_info( - team: team_id, - who: i32, - info: *mut team_usage_info, - size: size_t, - ) -> status_t; - - pub fn real_time_clock() -> c_ulong; - pub fn set_real_time_clock(secsSinceJan1st1970: c_ulong); - pub fn real_time_clock_usecs() -> bigtime_t; - pub fn system_time() -> bigtime_t; - pub fn system_time_nsecs() -> nanotime_t; - // set_timezone() is deprecated and a no-op - - pub fn set_alarm(when: bigtime_t, flags: u32) -> bigtime_t; - pub fn debugger(message: *const c_char); - pub fn disable_debugger(state: c_int) -> c_int; - - pub fn get_system_info(info: *mut system_info) -> status_t; - pub fn _get_cpu_info_etc( - firstCPU: u32, - cpuCount: u32, - info: *mut cpu_info, - size: size_t, - ) -> status_t; - pub fn get_cpu_topology_info( - topologyInfos: *mut cpu_topology_node_info, - topologyInfoCount: *mut u32, - ) -> status_t; - pub fn is_computer_on() -> i32; - pub fn is_computer_on_fire() -> c_double; - pub fn send_signal(threadID: thread_id, signal: c_uint) -> c_int; - pub fn set_signal_stack(base: *mut c_void, size: size_t); - - pub fn wait_for_objects(infos: *mut object_wait_info, numInfos: c_int) -> ssize_t; - pub fn wait_for_objects_etc( - infos: *mut object_wait_info, - numInfos: c_int, - flags: u32, - timeout: bigtime_t, - ) -> ssize_t; - - // kernel/fs_attr.h - pub fn fs_read_attr( - fd: c_int, - attribute: *const c_char, - type_: u32, - pos: off_t, - buffer: *mut c_void, - readBytes: size_t, - ) -> ssize_t; - pub fn fs_write_attr( - fd: c_int, - attribute: *const c_char, - type_: u32, - pos: off_t, - buffer: *const c_void, - writeBytes: size_t, - ) -> ssize_t; - pub fn fs_remove_attr(fd: c_int, attribute: *const c_char) -> c_int; - pub fn fs_stat_attr(fd: c_int, attribute: *const c_char, attrInfo: *mut attr_info) -> c_int; - - pub fn fs_open_attr( - path: *const c_char, - attribute: *const c_char, - type_: u32, - openMode: c_int, - ) -> c_int; - pub fn fs_fopen_attr(fd: c_int, attribute: *const c_char, type_: u32, openMode: c_int) - -> c_int; - pub fn fs_close_attr(fd: c_int) -> c_int; - - pub fn fs_open_attr_dir(path: *const c_char) -> *mut crate::DIR; - pub fn fs_lopen_attr_dir(path: *const c_char) -> *mut crate::DIR; - pub fn fs_fopen_attr_dir(fd: c_int) -> *mut crate::DIR; - pub fn fs_close_attr_dir(dir: *mut crate::DIR) -> c_int; - pub fn fs_read_attr_dir(dir: *mut crate::DIR) -> *mut crate::dirent; - pub fn fs_rewind_attr_dir(dir: *mut crate::DIR); - - // kernel/fs_image.h - pub fn fs_create_index( - device: crate::dev_t, - name: *const c_char, - type_: u32, - flags: u32, - ) -> c_int; - pub fn fs_remove_index(device: crate::dev_t, name: *const c_char) -> c_int; - pub fn fs_stat_index( - device: crate::dev_t, - name: *const c_char, - indexInfo: *mut index_info, - ) -> c_int; - - pub fn fs_open_index_dir(device: crate::dev_t) -> *mut crate::DIR; - pub fn fs_close_index_dir(indexDirectory: *mut crate::DIR) -> c_int; - pub fn fs_read_index_dir(indexDirectory: *mut crate::DIR) -> *mut crate::dirent; - pub fn fs_rewind_index_dir(indexDirectory: *mut crate::DIR); - - // kernel/fs_info.h - pub fn dev_for_path(path: *const c_char) -> crate::dev_t; - pub fn next_dev(pos: *mut i32) -> crate::dev_t; - pub fn fs_stat_dev(dev: crate::dev_t, info: *mut fs_info) -> c_int; - - // kernel/fs_query.h - pub fn fs_open_query(device: crate::dev_t, query: *const c_char, flags: u32) - -> *mut crate::DIR; - pub fn fs_open_live_query( - device: crate::dev_t, - query: *const c_char, - flags: u32, - port: port_id, - token: i32, - ) -> *mut crate::DIR; - pub fn fs_close_query(d: *mut crate::DIR) -> c_int; - pub fn fs_read_query(d: *mut crate::DIR) -> *mut crate::dirent; - pub fn get_path_for_dirent(dent: *mut crate::dirent, buf: *mut c_char, len: size_t) - -> status_t; - - // kernel/fs_volume.h - pub fn fs_mount_volume( - where_: *const c_char, - device: *const c_char, - filesystem: *const c_char, - flags: u32, - parameters: *const c_char, - ) -> crate::dev_t; - pub fn fs_unmount_volume(path: *const c_char, flags: u32) -> status_t; - - // kernel/image.h - pub fn load_image( - argc: i32, - argv: *mut *const c_char, - environ: *mut *const c_char, - ) -> thread_id; - pub fn load_add_on(path: *const c_char) -> image_id; - pub fn unload_add_on(image: image_id) -> status_t; - pub fn get_image_symbol( - image: image_id, - name: *const c_char, - symbolType: i32, - symbolLocation: *mut *mut c_void, - ) -> status_t; - pub fn get_nth_image_symbol( - image: image_id, - n: i32, - nameBuffer: *mut c_char, - nameLength: *mut i32, - symbolType: *mut i32, - symbolLocation: *mut *mut c_void, - ) -> status_t; - pub fn clear_caches(address: *mut c_void, length: size_t, flags: u32); - pub fn _get_image_info(image: image_id, info: *mut image_info, size: size_t) -> status_t; - pub fn _get_next_image_info( - team: team_id, - cookie: *mut i32, - info: *mut image_info, - size: size_t, - ) -> status_t; - pub fn find_path( - codePointer: *const c_void, - baseDirectory: path_base_directory, - subPath: *const c_char, - pathBuffer: *mut c_char, - bufferSize: usize, - ) -> status_t; - pub fn find_path_etc( - codePointer: *const c_void, - dependency: *const c_char, - architecture: *const c_char, - baseDirectory: path_base_directory, - subPath: *const c_char, - flags: u32, - pathBuffer: *mut c_char, - bufferSize: size_t, - ) -> status_t; - pub fn find_path_for_path( - path: *const c_char, - baseDirectory: path_base_directory, - subPath: *const c_char, - pathBuffer: *mut c_char, - bufferSize: size_t, - ) -> status_t; - pub fn find_path_for_path_etc( - path: *const c_char, - dependency: *const c_char, - architecture: *const c_char, - baseDirectory: path_base_directory, - subPath: *const c_char, - flags: u32, - pathBuffer: *mut c_char, - bufferSize: size_t, - ) -> status_t; - pub fn find_paths( - baseDirectory: path_base_directory, - subPath: *const c_char, - _paths: *mut *mut *mut c_char, - pathCount: *mut size_t, - ) -> status_t; - pub fn find_paths_etc( - architecture: *const c_char, - baseDirectory: path_base_directory, - subPath: *const c_char, - flags: u32, - _paths: *mut *mut *mut c_char, - pathCount: *mut size_t, - ) -> status_t; - pub fn find_directory( - which: directory_which, - volume: crate::dev_t, - createIt: bool, - pathString: *mut c_char, - length: i32, - ) -> status_t; - - pub fn get_cpuid(info: *mut cpuid_info, eaxRegister: u32, cpuNum: u32) -> status_t; -} - -// The following functions are defined as macros in C/C++ -#[inline] -pub unsafe fn get_cpu_info(firstCPU: u32, cpuCount: u32, info: *mut cpu_info) -> status_t { - _get_cpu_info_etc(firstCPU, cpuCount, info, size_of::() as size_t) -} - -#[inline] -pub unsafe fn get_area_info(id: area_id, info: *mut area_info) -> status_t { - _get_area_info(id, info, size_of::() as usize) -} - -#[inline] -pub unsafe fn get_next_area_info( - team: team_id, - cookie: *mut isize, - info: *mut area_info, -) -> status_t { - _get_next_area_info(team, cookie, info, size_of::() as usize) -} - -#[inline] -pub unsafe fn get_port_info(port: port_id, buf: *mut port_info) -> status_t { - _get_port_info(port, buf, size_of::() as size_t) -} - -#[inline] -pub unsafe fn get_next_port_info( - port: port_id, - cookie: *mut i32, - portInfo: *mut port_info, -) -> status_t { - _get_next_port_info(port, cookie, portInfo, size_of::() as size_t) -} - -#[inline] -pub unsafe fn get_port_message_info_etc( - port: port_id, - info: *mut port_message_info, - flags: u32, - timeout: bigtime_t, -) -> status_t { - _get_port_message_info_etc( - port, - info, - size_of::() as size_t, - flags, - timeout, - ) -} - -#[inline] -pub unsafe fn get_sem_info(id: sem_id, info: *mut sem_info) -> status_t { - _get_sem_info(id, info, size_of::() as size_t) -} - -#[inline] -pub unsafe fn get_next_sem_info(team: team_id, cookie: *mut i32, info: *mut sem_info) -> status_t { - _get_next_sem_info(team, cookie, info, size_of::() as size_t) -} - -#[inline] -pub unsafe fn get_team_info(team: team_id, info: *mut team_info) -> status_t { - _get_team_info(team, info, size_of::() as size_t) -} - -#[inline] -pub unsafe fn get_next_team_info(cookie: *mut i32, info: *mut team_info) -> status_t { - _get_next_team_info(cookie, info, size_of::() as size_t) -} - -#[inline] -pub unsafe fn get_team_usage_info(team: team_id, who: i32, info: *mut team_usage_info) -> status_t { - _get_team_usage_info(team, who, info, size_of::() as size_t) -} - -#[inline] -pub unsafe fn get_thread_info(id: thread_id, info: *mut thread_info) -> status_t { - _get_thread_info(id, info, size_of::() as size_t) -} - -#[inline] -pub unsafe fn get_next_thread_info( - team: team_id, - cookie: *mut i32, - info: *mut thread_info, -) -> status_t { - _get_next_thread_info(team, cookie, info, size_of::() as size_t) -} - -// kernel/image.h -#[inline] -pub unsafe fn get_image_info(image: image_id, info: *mut image_info) -> status_t { - _get_image_info(image, info, size_of::() as size_t) -} - -#[inline] -pub unsafe fn get_next_image_info( - team: team_id, - cookie: *mut i32, - info: *mut image_info, -) -> status_t { - _get_next_image_info(team, cookie, info, size_of::() as size_t) -} diff --git a/vendor/libc/src/unix/haiku/x86_64.rs b/vendor/libc/src/unix/haiku/x86_64.rs deleted file mode 100644 index 16e2612ed760d1..00000000000000 --- a/vendor/libc/src/unix/haiku/x86_64.rs +++ /dev/null @@ -1,208 +0,0 @@ -use crate::prelude::*; - -s_no_extra_traits! { - pub struct fpu_state { - pub control: c_ushort, - pub status: c_ushort, - pub tag: c_ushort, - pub opcode: c_ushort, - pub rip: c_ulong, - pub rdp: c_ulong, - pub mxcsr: c_uint, - pub mscsr_mask: c_uint, - pub _fpreg: [[c_uchar; 8]; 16], - pub _xmm: [[c_uchar; 16]; 16], - pub _reserved_416_511: [c_uchar; 96], - } - - pub struct xstate_hdr { - pub bv: c_ulong, - pub xcomp_bv: c_ulong, - pub _reserved: [c_uchar; 48], - } - - pub struct savefpu { - pub fp_fxsave: fpu_state, - pub fp_xstate: xstate_hdr, - pub _fp_ymm: [[c_uchar; 16]; 16], - } - - pub struct mcontext_t { - pub rax: c_ulong, - pub rbx: c_ulong, - pub rcx: c_ulong, - pub rdx: c_ulong, - pub rdi: c_ulong, - pub rsi: c_ulong, - pub rbp: c_ulong, - pub r8: c_ulong, - pub r9: c_ulong, - pub r10: c_ulong, - pub r11: c_ulong, - pub r12: c_ulong, - pub r13: c_ulong, - pub r14: c_ulong, - pub r15: c_ulong, - pub rsp: c_ulong, - pub rip: c_ulong, - pub rflags: c_ulong, - pub fpu: savefpu, - } - - pub struct ucontext_t { - pub uc_link: *mut ucontext_t, - pub uc_sigmask: crate::sigset_t, - pub uc_stack: crate::stack_t, - pub uc_mcontext: mcontext_t, - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for fpu_state { - fn eq(&self, other: &fpu_state) -> bool { - self.control == other.control - && self.status == other.status - && self.tag == other.tag - && self.opcode == other.opcode - && self.rip == other.rip - && self.rdp == other.rdp - && self.mxcsr == other.mxcsr - && self.mscsr_mask == other.mscsr_mask - && self - ._fpreg - .iter() - .zip(other._fpreg.iter()) - .all(|(a, b)| a == b) - && self._xmm.iter().zip(other._xmm.iter()).all(|(a, b)| a == b) - && self - ._reserved_416_511 - .iter() - .zip(other._reserved_416_511.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for fpu_state {} - impl hash::Hash for fpu_state { - fn hash(&self, state: &mut H) { - self.control.hash(state); - self.status.hash(state); - self.tag.hash(state); - self.opcode.hash(state); - self.rip.hash(state); - self.rdp.hash(state); - self.mxcsr.hash(state); - self.mscsr_mask.hash(state); - self._fpreg.hash(state); - self._xmm.hash(state); - self._reserved_416_511.hash(state); - } - } - - impl PartialEq for xstate_hdr { - fn eq(&self, other: &xstate_hdr) -> bool { - self.bv == other.bv - && self.xcomp_bv == other.xcomp_bv - && self - ._reserved - .iter() - .zip(other._reserved.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for xstate_hdr {} - impl hash::Hash for xstate_hdr { - fn hash(&self, state: &mut H) { - self.bv.hash(state); - self.xcomp_bv.hash(state); - self._reserved.hash(state); - } - } - - impl PartialEq for savefpu { - fn eq(&self, other: &savefpu) -> bool { - self.fp_fxsave == other.fp_fxsave - && self.fp_xstate == other.fp_xstate - && self - ._fp_ymm - .iter() - .zip(other._fp_ymm.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for savefpu {} - impl hash::Hash for savefpu { - fn hash(&self, state: &mut H) { - self.fp_fxsave.hash(state); - self.fp_xstate.hash(state); - self._fp_ymm.hash(state); - } - } - - impl PartialEq for mcontext_t { - fn eq(&self, other: &mcontext_t) -> bool { - self.rax == other.rax - && self.rbx == other.rbx - && self.rbx == other.rbx - && self.rcx == other.rcx - && self.rdx == other.rdx - && self.rdi == other.rdi - && self.rsi == other.rsi - && self.r8 == other.r8 - && self.r9 == other.r9 - && self.r10 == other.r10 - && self.r11 == other.r11 - && self.r12 == other.r12 - && self.r13 == other.r13 - && self.r14 == other.r14 - && self.r15 == other.r15 - && self.rsp == other.rsp - && self.rip == other.rip - && self.rflags == other.rflags - && self.fpu == other.fpu - } - } - impl Eq for mcontext_t {} - impl hash::Hash for mcontext_t { - fn hash(&self, state: &mut H) { - self.rax.hash(state); - self.rbx.hash(state); - self.rcx.hash(state); - self.rdx.hash(state); - self.rdi.hash(state); - self.rsi.hash(state); - self.rbp.hash(state); - self.r8.hash(state); - self.r9.hash(state); - self.r10.hash(state); - self.r11.hash(state); - self.r12.hash(state); - self.r13.hash(state); - self.r14.hash(state); - self.r15.hash(state); - self.rsp.hash(state); - self.rip.hash(state); - self.rflags.hash(state); - self.fpu.hash(state); - } - } - - impl PartialEq for ucontext_t { - fn eq(&self, other: &ucontext_t) -> bool { - self.uc_link == other.uc_link - && self.uc_sigmask == other.uc_sigmask - && self.uc_stack == other.uc_stack - && self.uc_mcontext == other.uc_mcontext - } - } - impl Eq for ucontext_t {} - impl hash::Hash for ucontext_t { - fn hash(&self, state: &mut H) { - self.uc_link.hash(state); - self.uc_sigmask.hash(state); - self.uc_stack.hash(state); - self.uc_mcontext.hash(state); - } - } - } -} diff --git a/vendor/libc/src/unix/hurd/b32.rs b/vendor/libc/src/unix/hurd/b32.rs deleted file mode 100644 index e706789006dbaa..00000000000000 --- a/vendor/libc/src/unix/hurd/b32.rs +++ /dev/null @@ -1,92 +0,0 @@ -use crate::prelude::*; - -pub type __int64_t = c_longlong; -pub type __uint64_t = c_ulonglong; - -pub type int_fast16_t = c_int; -pub type int_fast32_t = c_int; -pub type int_fast64_t = c_longlong; -pub type uint_fast16_t = c_uint; -pub type uint_fast32_t = c_uint; -pub type uint_fast64_t = c_ulonglong; - -pub type __quad_t = c_longlong; -pub type __u_quad_t = c_ulonglong; -pub type __intmax_t = c_longlong; -pub type __uintmax_t = c_ulonglong; - -pub type __squad_type = crate::__int64_t; -pub type __uquad_type = crate::__uint64_t; -pub type __sword_type = c_int; -pub type __uword_type = c_uint; -pub type __slong32_type = c_long; -pub type __ulong32_type = c_ulong; -pub type __s64_type = crate::__int64_t; -pub type __u64_type = crate::__uint64_t; - -pub type __ipc_pid_t = c_ushort; - -pub type Elf32_Half = u16; -pub type Elf32_Word = u32; -pub type Elf32_Off = u32; -pub type Elf32_Addr = u32; -pub type Elf32_Section = u16; - -pub type Elf_Addr = crate::Elf32_Addr; -pub type Elf_Half = crate::Elf32_Half; -pub type Elf_Ehdr = crate::Elf32_Ehdr; -pub type Elf_Phdr = crate::Elf32_Phdr; -pub type Elf_Shdr = crate::Elf32_Shdr; -pub type Elf_Sym = crate::Elf32_Sym; - -s! { - pub struct Elf32_Ehdr { - pub e_ident: [c_uchar; 16], - pub e_type: Elf32_Half, - pub e_machine: Elf32_Half, - pub e_version: Elf32_Word, - pub e_entry: Elf32_Addr, - pub e_phoff: Elf32_Off, - pub e_shoff: Elf32_Off, - pub e_flags: Elf32_Word, - pub e_ehsize: Elf32_Half, - pub e_phentsize: Elf32_Half, - pub e_phnum: Elf32_Half, - pub e_shentsize: Elf32_Half, - pub e_shnum: Elf32_Half, - pub e_shstrndx: Elf32_Half, - } - - pub struct Elf32_Shdr { - pub sh_name: Elf32_Word, - pub sh_type: Elf32_Word, - pub sh_flags: Elf32_Word, - pub sh_addr: Elf32_Addr, - pub sh_offset: Elf32_Off, - pub sh_size: Elf32_Word, - pub sh_link: Elf32_Word, - pub sh_info: Elf32_Word, - pub sh_addralign: Elf32_Word, - pub sh_entsize: Elf32_Word, - } - - pub struct Elf32_Sym { - pub st_name: Elf32_Word, - pub st_value: Elf32_Addr, - pub st_size: Elf32_Word, - pub st_info: c_uchar, - pub st_other: c_uchar, - pub st_shndx: Elf32_Section, - } - - pub struct Elf32_Phdr { - pub p_type: crate::Elf32_Word, - pub p_offset: crate::Elf32_Off, - pub p_vaddr: crate::Elf32_Addr, - pub p_paddr: crate::Elf32_Addr, - pub p_filesz: crate::Elf32_Word, - pub p_memsz: crate::Elf32_Word, - pub p_flags: crate::Elf32_Word, - pub p_align: crate::Elf32_Word, - } -} diff --git a/vendor/libc/src/unix/hurd/b64.rs b/vendor/libc/src/unix/hurd/b64.rs deleted file mode 100644 index a44428c575adfc..00000000000000 --- a/vendor/libc/src/unix/hurd/b64.rs +++ /dev/null @@ -1,94 +0,0 @@ -use crate::prelude::*; - -pub type __int64_t = c_long; -pub type __uint64_t = c_ulong; - -pub type int_fast16_t = c_long; -pub type int_fast32_t = c_long; -pub type int_fast64_t = c_long; -pub type uint_fast16_t = c_ulong; -pub type uint_fast32_t = c_ulong; -pub type uint_fast64_t = c_ulong; - -pub type __quad_t = c_long; -pub type __u_quad_t = c_ulong; -pub type __intmax_t = c_long; -pub type __uintmax_t = c_ulong; - -pub type __squad_type = c_long; -pub type __uquad_type = c_ulong; -pub type __sword_type = c_long; -pub type __uword_type = c_ulong; -pub type __slong32_type = c_int; -pub type __ulong32_type = c_uint; -pub type __s64_type = c_long; -pub type __u64_type = c_ulong; - -pub type __ipc_pid_t = c_int; - -pub type Elf64_Half = u16; -pub type Elf64_Word = u32; -pub type Elf64_Off = u64; -pub type Elf64_Addr = u64; -pub type Elf64_Xword = u64; -pub type Elf64_Sxword = i64; -pub type Elf64_Section = u16; - -pub type Elf_Addr = crate::Elf64_Addr; -pub type Elf_Half = crate::Elf64_Half; -pub type Elf_Ehdr = crate::Elf64_Ehdr; -pub type Elf_Phdr = crate::Elf64_Phdr; -pub type Elf_Shdr = crate::Elf64_Shdr; -pub type Elf_Sym = crate::Elf64_Sym; - -s! { - pub struct Elf64_Ehdr { - pub e_ident: [c_uchar; 16], - pub e_type: Elf64_Half, - pub e_machine: Elf64_Half, - pub e_version: Elf64_Word, - pub e_entry: Elf64_Addr, - pub e_phoff: Elf64_Off, - pub e_shoff: Elf64_Off, - pub e_flags: Elf64_Word, - pub e_ehsize: Elf64_Half, - pub e_phentsize: Elf64_Half, - pub e_phnum: Elf64_Half, - pub e_shentsize: Elf64_Half, - pub e_shnum: Elf64_Half, - pub e_shstrndx: Elf64_Half, - } - - pub struct Elf64_Shdr { - pub sh_name: Elf64_Word, - pub sh_type: Elf64_Word, - pub sh_flags: Elf64_Xword, - pub sh_addr: Elf64_Addr, - pub sh_offset: Elf64_Off, - pub sh_size: Elf64_Xword, - pub sh_link: Elf64_Word, - pub sh_info: Elf64_Word, - pub sh_addralign: Elf64_Xword, - pub sh_entsize: Elf64_Xword, - } - - pub struct Elf64_Sym { - pub st_name: Elf64_Word, - pub st_info: c_uchar, - pub st_other: c_uchar, - pub st_shndx: Elf64_Section, - pub st_value: Elf64_Addr, - pub st_size: Elf64_Xword, - } - - pub struct Elf64_Phdr { - pub p_type: crate::Elf64_Word, - pub p_flags: crate::Elf64_Word, - pub p_offset: crate::Elf64_Off, - pub p_vaddr: crate::Elf64_Addr, - pub p_paddr: crate::Elf64_Addr, - pub p_filesz: crate::Elf64_Xword, - pub p_memsz: crate::Elf64_Xword, - pub p_align: crate::Elf64_Xword, - } -} diff --git a/vendor/libc/src/unix/hurd/mod.rs b/vendor/libc/src/unix/hurd/mod.rs deleted file mode 100644 index 24e9fe56f392dc..00000000000000 --- a/vendor/libc/src/unix/hurd/mod.rs +++ /dev/null @@ -1,4623 +0,0 @@ -#![allow(dead_code)] - -use crate::c_schar; -use crate::prelude::*; - -// types -pub type __s16_type = c_short; -pub type __u16_type = c_ushort; -pub type __s32_type = c_int; -pub type __u32_type = c_uint; -pub type __slongword_type = c_long; -pub type __ulongword_type = c_ulong; - -pub type __u_char = c_uchar; -pub type __u_short = c_ushort; -pub type __u_int = c_uint; -pub type __u_long = c_ulong; -pub type __int8_t = c_schar; -pub type __uint8_t = c_uchar; -pub type __int16_t = c_short; -pub type __uint16_t = c_ushort; -pub type __int32_t = c_int; -pub type __uint32_t = c_uint; -pub type __int_least8_t = __int8_t; -pub type __uint_least8_t = __uint8_t; -pub type __int_least16_t = __int16_t; -pub type __uint_least16_t = __uint16_t; -pub type __int_least32_t = __int32_t; -pub type __uint_least32_t = __uint32_t; -pub type __int_least64_t = __int64_t; -pub type __uint_least64_t = __uint64_t; - -pub type __dev_t = __uword_type; -pub type __uid_t = __u32_type; -pub type __gid_t = __u32_type; -pub type __ino_t = __ulongword_type; -pub type __ino64_t = __uquad_type; -pub type __mode_t = __u32_type; -pub type __nlink_t = __uword_type; -pub type __off_t = __slongword_type; -pub type __off64_t = __squad_type; -pub type __pid_t = __s32_type; -pub type __rlim_t = __ulongword_type; -pub type __rlim64_t = __uquad_type; -pub type __blkcnt_t = __slongword_type; -pub type __blkcnt64_t = __squad_type; -pub type __fsblkcnt_t = __ulongword_type; -pub type __fsblkcnt64_t = __uquad_type; -pub type __fsfilcnt_t = __ulongword_type; -pub type __fsfilcnt64_t = __uquad_type; -pub type __fsword_t = __sword_type; -pub type __id_t = __u32_type; -pub type __clock_t = __slongword_type; -pub type __time_t = __slongword_type; -pub type __useconds_t = __u32_type; -pub type __suseconds_t = __slongword_type; -pub type __suseconds64_t = __squad_type; -pub type __daddr_t = __s32_type; -pub type __key_t = __s32_type; -pub type __clockid_t = __s32_type; -pub type __timer_t = __uword_type; -pub type __blksize_t = __slongword_type; -pub type __fsid_t = __uquad_type; -pub type __ssize_t = __sword_type; -pub type __syscall_slong_t = __slongword_type; -pub type __syscall_ulong_t = __ulongword_type; -pub type __cpu_mask = __ulongword_type; - -pub type __loff_t = __off64_t; -pub type __caddr_t = *mut c_char; -pub type __intptr_t = __sword_type; -pub type __ptrdiff_t = __sword_type; -pub type __socklen_t = __u32_type; -pub type __sig_atomic_t = c_int; -pub type __time64_t = __int64_t; -pub type wchar_t = c_int; -pub type wint_t = c_uint; -pub type gid_t = __gid_t; -pub type uid_t = __uid_t; -pub type off_t = __off_t; -pub type off64_t = __off64_t; -pub type useconds_t = __useconds_t; -pub type pid_t = __pid_t; -pub type socklen_t = __socklen_t; - -pub type in_addr_t = u32; - -pub type _Float32 = f32; -pub type _Float64 = f64; -pub type _Float32x = f64; -pub type _Float64x = f64; - -pub type __locale_t = *mut __locale_struct; -pub type locale_t = __locale_t; - -pub type u_char = __u_char; -pub type u_short = __u_short; -pub type u_int = __u_int; -pub type u_long = __u_long; -pub type quad_t = __quad_t; -pub type u_quad_t = __u_quad_t; -pub type fsid_t = __fsid_t; -pub type loff_t = __loff_t; -pub type ino_t = __ino_t; -pub type ino64_t = __ino64_t; -pub type dev_t = __dev_t; -pub type mode_t = __mode_t; -pub type nlink_t = __nlink_t; -pub type id_t = __id_t; -pub type daddr_t = __daddr_t; -pub type caddr_t = __caddr_t; -pub type key_t = __key_t; -pub type clock_t = __clock_t; -pub type clockid_t = __clockid_t; -pub type time_t = __time_t; -pub type timer_t = __timer_t; -pub type suseconds_t = __suseconds_t; -pub type ulong = c_ulong; -pub type ushort = c_ushort; -pub type uint = c_uint; -pub type u_int8_t = __uint8_t; -pub type u_int16_t = __uint16_t; -pub type u_int32_t = __uint32_t; -pub type u_int64_t = __uint64_t; -pub type register_t = c_int; -pub type __sigset_t = c_ulong; -pub type sigset_t = __sigset_t; - -pub type __fd_mask = c_long; -pub type fd_mask = __fd_mask; -pub type blksize_t = __blksize_t; -pub type blkcnt_t = __blkcnt_t; -pub type fsblkcnt_t = __fsblkcnt_t; -pub type fsfilcnt_t = __fsfilcnt_t; -pub type blkcnt64_t = __blkcnt64_t; -pub type fsblkcnt64_t = __fsblkcnt64_t; -pub type fsfilcnt64_t = __fsfilcnt64_t; - -pub type __pthread_spinlock_t = c_int; -pub type __tss_t = c_int; -pub type __thrd_t = c_long; -pub type __pthread_t = c_long; -pub type pthread_t = __pthread_t; -pub type __pthread_process_shared = c_uint; -pub type __pthread_inheritsched = c_uint; -pub type __pthread_contentionscope = c_uint; -pub type __pthread_detachstate = c_uint; -pub type pthread_attr_t = __pthread_attr; -pub type __pthread_mutex_protocol = c_uint; -pub type __pthread_mutex_type = c_uint; -pub type __pthread_mutex_robustness = c_uint; -pub type pthread_mutexattr_t = __pthread_mutexattr; -pub type pthread_mutex_t = __pthread_mutex; -pub type pthread_condattr_t = __pthread_condattr; -pub type pthread_cond_t = __pthread_cond; -pub type pthread_spinlock_t = __pthread_spinlock_t; -pub type pthread_rwlockattr_t = __pthread_rwlockattr; -pub type pthread_rwlock_t = __pthread_rwlock; -pub type pthread_barrierattr_t = __pthread_barrierattr; -pub type pthread_barrier_t = __pthread_barrier; -pub type __pthread_key = c_int; -pub type pthread_key_t = __pthread_key; -pub type pthread_once_t = __pthread_once; - -pub type __rlimit_resource = c_uint; -pub type __rlimit_resource_t = __rlimit_resource; -pub type rlim_t = __rlim_t; -pub type rlim64_t = __rlim64_t; - -pub type __rusage_who = c_int; - -pub type __priority_which = c_uint; - -pub type sa_family_t = c_uchar; - -pub type in_port_t = u16; - -pub type __sigval_t = crate::sigval; - -pub type sigevent_t = sigevent; - -pub type nfds_t = c_ulong; - -pub type tcflag_t = c_uint; -pub type cc_t = c_uchar; -pub type speed_t = c_int; - -pub type sigval_t = crate::sigval; - -pub type greg_t = c_int; -pub type gregset_t = [greg_t; 19usize]; - -pub type __ioctl_dir = c_uint; - -pub type __ioctl_datum = c_uint; - -pub type __error_t_codes = c_int; - -pub type int_least8_t = __int_least8_t; -pub type int_least16_t = __int_least16_t; -pub type int_least32_t = __int_least32_t; -pub type int_least64_t = __int_least64_t; -pub type uint_least8_t = __uint_least8_t; -pub type uint_least16_t = __uint_least16_t; -pub type uint_least32_t = __uint_least32_t; -pub type uint_least64_t = __uint_least64_t; -pub type int_fast8_t = c_schar; -pub type uint_fast8_t = c_uchar; -pub type intmax_t = __intmax_t; -pub type uintmax_t = __uintmax_t; - -pub type tcp_seq = u32; - -pub type tcp_ca_state = c_uint; - -pub type idtype_t = c_uint; - -pub type mqd_t = c_int; - -pub type Lmid_t = c_long; - -pub type regoff_t = c_int; - -pub type nl_item = c_int; - -pub type iconv_t = *mut c_void; - -#[derive(Debug)] -pub enum fpos64_t {} // FIXME(hurd): fill this out with a struct -impl Copy for fpos64_t {} -impl Clone for fpos64_t { - fn clone(&self) -> fpos64_t { - *self - } -} - -#[derive(Debug)] -pub enum timezone {} -impl Copy for timezone {} -impl Clone for timezone { - fn clone(&self) -> timezone { - *self - } -} - -// structs -s! { - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct ip_mreqn { - pub imr_multiaddr: in_addr, - pub imr_address: in_addr, - pub imr_ifindex: c_int, - } - - pub struct ip_mreq_source { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - pub imr_sourceaddr: in_addr, - } - - pub struct sockaddr { - pub sa_len: c_uchar, - pub sa_family: sa_family_t, - pub sa_data: [c_char; 14usize], - } - - pub struct in_addr { - pub s_addr: in_addr_t, - } - - pub struct sockaddr_in { - pub sin_len: c_uchar, - pub sin_family: sa_family_t, - pub sin_port: in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [c_uchar; 8usize], - } - - pub struct sockaddr_in6 { - pub sin6_len: c_uchar, - pub sin6_family: sa_family_t, - pub sin6_port: in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: u32, - } - - pub struct sockaddr_un { - pub sun_len: c_uchar, - pub sun_family: sa_family_t, - pub sun_path: [c_char; 108usize], - } - - pub struct sockaddr_storage { - pub ss_len: c_uchar, - pub ss_family: sa_family_t, - pub __ss_padding: [c_char; 122usize], - pub __ss_align: __uint32_t, - } - - pub struct sockaddr_at { - pub _address: u8, - } - - pub struct sockaddr_ax25 { - pub _address: u8, - } - - pub struct sockaddr_x25 { - pub _address: u8, - } - - pub struct sockaddr_dl { - pub _address: u8, - } - pub struct sockaddr_eon { - pub _address: u8, - } - pub struct sockaddr_inarp { - pub _address: u8, - } - - pub struct sockaddr_ipx { - pub _address: u8, - } - pub struct sockaddr_iso { - pub _address: u8, - } - - pub struct sockaddr_ns { - pub _address: u8, - } - - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - pub ai_addrlen: crate::socklen_t, - pub ai_addr: *mut sockaddr, - pub ai_canonname: *mut c_char, - pub ai_next: *mut addrinfo, - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: c_int, - pub msg_control: *mut c_void, - pub msg_controllen: crate::socklen_t, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - pub cmsg_len: crate::socklen_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct dirent { - pub d_ino: __ino_t, - pub d_reclen: c_ushort, - pub d_type: c_uchar, - pub d_namlen: c_uchar, - pub d_name: [c_char; 1usize], - } - - pub struct dirent64 { - pub d_ino: __ino64_t, - pub d_reclen: c_ushort, - pub d_type: c_uchar, - pub d_namlen: c_uchar, - pub d_name: [c_char; 1usize], - } - - pub struct fd_set { - pub fds_bits: [__fd_mask; 8usize], - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_cc: [crate::cc_t; 20usize], - pub __ispeed: crate::speed_t, - pub __ospeed: crate::speed_t, - } - - pub struct mallinfo { - pub arena: c_int, - pub ordblks: c_int, - pub smblks: c_int, - pub hblks: c_int, - pub hblkhd: c_int, - pub usmblks: c_int, - pub fsmblks: c_int, - pub uordblks: c_int, - pub fordblks: c_int, - pub keepcost: c_int, - } - - pub struct mallinfo2 { - pub arena: size_t, - pub ordblks: size_t, - pub smblks: size_t, - pub hblks: size_t, - pub hblkhd: size_t, - pub usmblks: size_t, - pub fsmblks: size_t, - pub uordblks: size_t, - pub fordblks: size_t, - pub keepcost: size_t, - } - - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: __sigset_t, - pub sa_flags: c_int, - } - - pub struct sigevent { - pub sigev_value: crate::sigval, - pub sigev_signo: c_int, - pub sigev_notify: c_int, - __unused1: *mut c_void, //actually a function pointer - pub sigev_notify_attributes: *mut pthread_attr_t, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - pub si_pid: __pid_t, - pub si_uid: __uid_t, - pub si_addr: *mut c_void, - pub si_status: c_int, - pub si_band: c_long, - pub si_value: crate::sigval, - } - - pub struct timespec { - pub tv_sec: __time_t, - pub tv_nsec: __syscall_slong_t, - } - - pub struct __timeval { - pub tv_sec: i32, - pub tv_usec: i32, - } - - pub struct __locale_data { - pub _address: u8, - } - - pub struct stat { - pub st_fstype: c_int, - pub st_dev: __fsid_t, /* Actually st_fsid */ - pub st_ino: __ino_t, - pub st_gen: c_uint, - pub st_rdev: __dev_t, - pub st_mode: __mode_t, - pub st_nlink: __nlink_t, - pub st_uid: __uid_t, - pub st_gid: __gid_t, - pub st_size: __off_t, - pub st_atim: crate::timespec, - pub st_mtim: crate::timespec, - pub st_ctim: crate::timespec, - pub st_blksize: __blksize_t, - pub st_blocks: __blkcnt_t, - pub st_author: __uid_t, - pub st_flags: c_uint, - pub st_spare: [c_int; 11usize], - } - - pub struct stat64 { - pub st_fstype: c_int, - pub st_dev: __fsid_t, /* Actually st_fsid */ - pub st_ino: __ino64_t, - pub st_gen: c_uint, - pub st_rdev: __dev_t, - pub st_mode: __mode_t, - pub st_nlink: __nlink_t, - pub st_uid: __uid_t, - pub st_gid: __gid_t, - pub st_size: __off64_t, - pub st_atim: crate::timespec, - pub st_mtim: crate::timespec, - pub st_ctim: crate::timespec, - pub st_blksize: __blksize_t, - pub st_blocks: __blkcnt64_t, - pub st_author: __uid_t, - pub st_flags: c_uint, - pub st_spare: [c_int; 8usize], - } - - pub struct statx { - pub stx_mask: u32, - pub stx_blksize: u32, - pub stx_attributes: u64, - pub stx_nlink: u32, - pub stx_uid: u32, - pub stx_gid: u32, - pub stx_mode: u16, - __statx_pad1: [u16; 1], - pub stx_ino: u64, - pub stx_size: u64, - pub stx_blocks: u64, - pub stx_attributes_mask: u64, - pub stx_atime: crate::statx_timestamp, - pub stx_btime: crate::statx_timestamp, - pub stx_ctime: crate::statx_timestamp, - pub stx_mtime: crate::statx_timestamp, - pub stx_rdev_major: u32, - pub stx_rdev_minor: u32, - pub stx_dev_major: u32, - pub stx_dev_minor: u32, - __statx_pad2: [u64; 14], - } - - pub struct statx_timestamp { - pub tv_sec: i64, - pub tv_nsec: u32, - pub __statx_timestamp_pad1: [i32; 1], - } - - pub struct statfs { - pub f_type: c_uint, - pub f_bsize: c_ulong, - pub f_blocks: __fsblkcnt_t, - pub f_bfree: __fsblkcnt_t, - pub f_bavail: __fsblkcnt_t, - pub f_files: __fsblkcnt_t, - pub f_ffree: __fsblkcnt_t, - pub f_fsid: __fsid_t, - pub f_namelen: c_ulong, - pub f_favail: __fsfilcnt_t, - pub f_frsize: c_ulong, - pub f_flag: c_ulong, - pub f_spare: [c_uint; 3usize], - } - - pub struct statfs64 { - pub f_type: c_uint, - pub f_bsize: c_ulong, - pub f_blocks: __fsblkcnt64_t, - pub f_bfree: __fsblkcnt64_t, - pub f_bavail: __fsblkcnt64_t, - pub f_files: __fsblkcnt64_t, - pub f_ffree: __fsblkcnt64_t, - pub f_fsid: __fsid_t, - pub f_namelen: c_ulong, - pub f_favail: __fsfilcnt64_t, - pub f_frsize: c_ulong, - pub f_flag: c_ulong, - pub f_spare: [c_uint; 3usize], - } - - pub struct statvfs { - pub __f_type: c_uint, - pub f_bsize: c_ulong, - pub f_blocks: __fsblkcnt_t, - pub f_bfree: __fsblkcnt_t, - pub f_bavail: __fsblkcnt_t, - pub f_files: __fsfilcnt_t, - pub f_ffree: __fsfilcnt_t, - pub f_fsid: __fsid_t, - pub f_namemax: c_ulong, - pub f_favail: __fsfilcnt_t, - pub f_frsize: c_ulong, - pub f_flag: c_ulong, - pub f_spare: [c_uint; 3usize], - } - - pub struct statvfs64 { - pub __f_type: c_uint, - pub f_bsize: c_ulong, - pub f_blocks: __fsblkcnt64_t, - pub f_bfree: __fsblkcnt64_t, - pub f_bavail: __fsblkcnt64_t, - pub f_files: __fsfilcnt64_t, - pub f_ffree: __fsfilcnt64_t, - pub f_fsid: __fsid_t, - pub f_namemax: c_ulong, - pub f_favail: __fsfilcnt64_t, - pub f_frsize: c_ulong, - pub f_flag: c_ulong, - pub f_spare: [c_uint; 3usize], - } - - pub struct aiocb { - pub aio_fildes: c_int, - pub aio_lio_opcode: c_int, - pub aio_reqprio: c_int, - pub aio_buf: *mut c_void, - pub aio_nbytes: size_t, - pub aio_sigevent: crate::sigevent, - __next_prio: *mut aiocb, - __abs_prio: c_int, - __policy: c_int, - __error_code: c_int, - __return_value: ssize_t, - pub aio_offset: off_t, - #[cfg(all(not(target_arch = "x86_64"), target_pointer_width = "32"))] - __unused1: [c_char; 4], - __glibc_reserved: [c_char; 32], - } - - pub struct mq_attr { - pub mq_flags: c_long, - pub mq_maxmsg: c_long, - pub mq_msgsize: c_long, - pub mq_curmsgs: c_long, - } - - pub struct __exit_status { - pub e_termination: c_short, - pub e_exit: c_short, - } - - #[cfg_attr(target_pointer_width = "32", repr(align(4)))] - #[cfg_attr(target_pointer_width = "64", repr(align(8)))] - pub struct sem_t { - __size: [c_char; 20usize], - } - - pub struct __pthread { - pub _address: u8, - } - - pub struct __pthread_mutexattr { - pub __prioceiling: c_int, - pub __protocol: __pthread_mutex_protocol, - pub __pshared: __pthread_process_shared, - pub __mutex_type: __pthread_mutex_type, - } - pub struct __pthread_mutex { - pub __lock: c_uint, - pub __owner_id: c_uint, - pub __cnt: c_uint, - pub __shpid: c_int, - pub __type: c_int, - pub __flags: c_int, - pub __reserved1: c_uint, - pub __reserved2: c_uint, - } - - pub struct __pthread_condattr { - pub __pshared: __pthread_process_shared, - pub __clock: __clockid_t, - } - - pub struct __pthread_rwlockattr { - pub __pshared: __pthread_process_shared, - } - - pub struct __pthread_barrierattr { - pub __pshared: __pthread_process_shared, - } - - pub struct __pthread_once { - pub __run: c_int, - pub __lock: __pthread_spinlock_t, - } - - pub struct __pthread_cond { - pub __lock: __pthread_spinlock_t, - pub __queue: *mut __pthread, - pub __attr: *mut __pthread_condattr, - pub __wrefs: c_uint, - pub __data: *mut c_void, - } - - pub struct __pthread_attr { - pub __schedparam: sched_param, - pub __stackaddr: *mut c_void, - pub __stacksize: size_t, - pub __guardsize: size_t, - pub __detachstate: __pthread_detachstate, - pub __inheritsched: __pthread_inheritsched, - pub __contentionscope: __pthread_contentionscope, - pub __schedpolicy: c_int, - } - - pub struct __pthread_rwlock { - pub __held: __pthread_spinlock_t, - pub __lock: __pthread_spinlock_t, - pub __readers: c_int, - pub __readerqueue: *mut __pthread, - pub __writerqueue: *mut __pthread, - pub __attr: *mut __pthread_rwlockattr, - pub __data: *mut c_void, - } - - pub struct __pthread_barrier { - pub __lock: __pthread_spinlock_t, - pub __queue: *mut __pthread, - pub __pending: c_uint, - pub __count: c_uint, - pub __attr: *mut __pthread_barrierattr, - pub __data: *mut c_void, - } - - pub struct seminfo { - pub semmap: c_int, - pub semmni: c_int, - pub semmns: c_int, - pub semmnu: c_int, - pub semmsl: c_int, - pub semopm: c_int, - pub semume: c_int, - pub semusz: c_int, - pub semvmx: c_int, - pub semaem: c_int, - } - - pub struct _IO_FILE { - _unused: [u8; 0], - } - - pub struct sched_param { - pub sched_priority: c_int, - } - - pub struct iovec { - pub iov_base: *mut c_void, - pub iov_len: size_t, - } - - pub struct passwd { - pub pw_name: *mut c_char, - pub pw_passwd: *mut c_char, - pub pw_uid: __uid_t, - pub pw_gid: __gid_t, - pub pw_gecos: *mut c_char, - pub pw_dir: *mut c_char, - pub pw_shell: *mut c_char, - } - - pub struct spwd { - pub sp_namp: *mut c_char, - pub sp_pwdp: *mut c_char, - pub sp_lstchg: c_long, - pub sp_min: c_long, - pub sp_max: c_long, - pub sp_warn: c_long, - pub sp_inact: c_long, - pub sp_expire: c_long, - pub sp_flag: c_ulong, - } - - pub struct itimerspec { - pub it_interval: crate::timespec, - pub it_value: crate::timespec, - } - - pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - pub tm_gmtoff: c_long, - pub tm_zone: *const c_char, - } - - pub struct lconv { - pub decimal_point: *mut c_char, - pub thousands_sep: *mut c_char, - pub grouping: *mut c_char, - pub int_curr_symbol: *mut c_char, - pub currency_symbol: *mut c_char, - pub mon_decimal_point: *mut c_char, - pub mon_thousands_sep: *mut c_char, - pub mon_grouping: *mut c_char, - pub positive_sign: *mut c_char, - pub negative_sign: *mut c_char, - pub int_frac_digits: c_char, - pub frac_digits: c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub p_sign_posn: c_char, - pub n_sign_posn: c_char, - pub int_p_cs_precedes: c_char, - pub int_p_sep_by_space: c_char, - pub int_n_cs_precedes: c_char, - pub int_n_sep_by_space: c_char, - pub int_p_sign_posn: c_char, - pub int_n_sign_posn: c_char, - } - - pub struct Dl_info { - pub dli_fname: *const c_char, - pub dli_fbase: *mut c_void, - pub dli_sname: *const c_char, - pub dli_saddr: *mut c_void, - } - - pub struct ifaddrs { - pub ifa_next: *mut ifaddrs, - pub ifa_name: *mut c_char, - pub ifa_flags: c_uint, - pub ifa_addr: *mut crate::sockaddr, - pub ifa_netmask: *mut crate::sockaddr, - pub ifa_ifu: *mut crate::sockaddr, // FIXME(union) This should be a union - pub ifa_data: *mut c_void, - } - - pub struct arpreq { - pub arp_pa: crate::sockaddr, - pub arp_ha: crate::sockaddr, - pub arp_flags: c_int, - pub arp_netmask: crate::sockaddr, - pub arp_dev: [c_char; 16], - } - - pub struct arpreq_old { - pub arp_pa: crate::sockaddr, - pub arp_ha: crate::sockaddr, - pub arp_flags: c_int, - pub arp_netmask: crate::sockaddr, - } - - pub struct arphdr { - pub ar_hrd: u16, - pub ar_pro: u16, - pub ar_hln: u8, - pub ar_pln: u8, - pub ar_op: u16, - } - - pub struct arpd_request { - pub req: c_ushort, - pub ip: u32, - pub dev: c_ulong, - pub stamp: c_ulong, - pub updated: c_ulong, - pub ha: [c_uchar; crate::MAX_ADDR_LEN], - } - - pub struct mmsghdr { - pub msg_hdr: crate::msghdr, - pub msg_len: c_uint, - } - - pub struct ifreq { - /// interface name, e.g. "en0" - pub ifr_name: [c_char; crate::IFNAMSIZ], - pub ifr_ifru: crate::sockaddr, - } - - pub struct __locale_struct { - pub __locales: [*mut __locale_data; 13usize], - pub __ctype_b: *const c_ushort, - pub __ctype_tolower: *const c_int, - pub __ctype_toupper: *const c_int, - pub __names: [*const c_char; 13usize], - } - - pub struct utsname { - pub sysname: [c_char; _UTSNAME_LENGTH], - pub nodename: [c_char; _UTSNAME_LENGTH], - pub release: [c_char; _UTSNAME_LENGTH], - pub version: [c_char; _UTSNAME_LENGTH], - pub machine: [c_char; _UTSNAME_LENGTH], - pub domainname: [c_char; _UTSNAME_LENGTH], - } - - pub struct rlimit64 { - pub rlim_cur: rlim64_t, - pub rlim_max: rlim64_t, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } - - pub struct dl_phdr_info { - pub dlpi_addr: Elf_Addr, - pub dlpi_name: *const c_char, - pub dlpi_phdr: *const Elf_Phdr, - pub dlpi_phnum: Elf_Half, - pub dlpi_adds: c_ulonglong, - pub dlpi_subs: c_ulonglong, - pub dlpi_tls_modid: size_t, - pub dlpi_tls_data: *mut c_void, - } - - pub struct flock { - #[cfg(target_pointer_width = "32")] - pub l_type: c_int, - #[cfg(target_pointer_width = "32")] - pub l_whence: c_int, - #[cfg(target_pointer_width = "64")] - pub l_type: c_short, - #[cfg(target_pointer_width = "64")] - pub l_whence: c_short, - pub l_start: __off_t, - pub l_len: __off_t, - pub l_pid: __pid_t, - } - - pub struct flock64 { - #[cfg(target_pointer_width = "32")] - pub l_type: c_int, - #[cfg(target_pointer_width = "32")] - pub l_whence: c_int, - #[cfg(target_pointer_width = "64")] - pub l_type: c_short, - #[cfg(target_pointer_width = "64")] - pub l_whence: c_short, - pub l_start: __off_t, - pub l_len: __off64_t, - pub l_pid: __pid_t, - } - - pub struct glob_t { - pub gl_pathc: size_t, - pub gl_pathv: *mut *mut c_char, - pub gl_offs: size_t, - pub gl_flags: c_int, - - __unused1: *mut c_void, - __unused2: *mut c_void, - __unused3: *mut c_void, - __unused4: *mut c_void, - __unused5: *mut c_void, - } - - pub struct glob64_t { - pub gl_pathc: size_t, - pub gl_pathv: *mut *mut c_char, - pub gl_offs: size_t, - pub gl_flags: c_int, - - __unused1: *mut c_void, - __unused2: *mut c_void, - __unused3: *mut c_void, - __unused4: *mut c_void, - __unused5: *mut c_void, - } - - pub struct regex_t { - __buffer: *mut c_void, - __allocated: size_t, - __used: size_t, - __syntax: c_ulong, - __fastmap: *mut c_char, - __translate: *mut c_char, - __re_nsub: size_t, - __bitfield: u8, - } - - pub struct cpu_set_t { - #[cfg(all(target_pointer_width = "32", not(target_arch = "x86_64")))] - bits: [u32; 32], - #[cfg(not(all(target_pointer_width = "32", not(target_arch = "x86_64"))))] - bits: [u64; 16], - } - - pub struct if_nameindex { - pub if_index: c_uint, - pub if_name: *mut c_char, - } - - // System V IPC - pub struct msginfo { - pub msgpool: c_int, - pub msgmap: c_int, - pub msgmax: c_int, - pub msgmnb: c_int, - pub msgmni: c_int, - pub msgssz: c_int, - pub msgtql: c_int, - pub msgseg: c_ushort, - } - - pub struct sembuf { - pub sem_num: c_ushort, - pub sem_op: c_short, - pub sem_flg: c_short, - } - - pub struct mntent { - pub mnt_fsname: *mut c_char, - pub mnt_dir: *mut c_char, - pub mnt_type: *mut c_char, - pub mnt_opts: *mut c_char, - pub mnt_freq: c_int, - pub mnt_passno: c_int, - } - - pub struct posix_spawn_file_actions_t { - __allocated: c_int, - __used: c_int, - __actions: *mut c_int, - __pad: [c_int; 16], - } - - pub struct posix_spawnattr_t { - __flags: c_short, - __pgrp: crate::pid_t, - __sd: crate::sigset_t, - __ss: crate::sigset_t, - __sp: crate::sched_param, - __policy: c_int, - __pad: [c_int; 16], - } - - pub struct regmatch_t { - pub rm_so: regoff_t, - pub rm_eo: regoff_t, - } - - pub struct option { - pub name: *const c_char, - pub has_arg: c_int, - pub flag: *mut c_int, - pub val: c_int, - } -} - -s_no_extra_traits! { - pub struct utmpx { - pub ut_type: c_short, - pub ut_pid: crate::pid_t, - pub ut_line: [c_char; __UT_LINESIZE], - pub ut_id: [c_char; 4], - - pub ut_user: [c_char; __UT_NAMESIZE], - pub ut_host: [c_char; __UT_HOSTSIZE], - pub ut_exit: __exit_status, - - #[cfg(any(all(target_pointer_width = "32", not(target_arch = "x86_64"))))] - pub ut_session: c_long, - #[cfg(any(all(target_pointer_width = "32", not(target_arch = "x86_64"))))] - pub ut_tv: crate::timeval, - - #[cfg(not(any(all(target_pointer_width = "32", not(target_arch = "x86_64")))))] - pub ut_session: i32, - #[cfg(not(any(all(target_pointer_width = "32", not(target_arch = "x86_64")))))] - pub ut_tv: __timeval, - - pub ut_addr_v6: [i32; 4], - __glibc_reserved: [c_char; 20], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for utmpx { - fn eq(&self, other: &utmpx) -> bool { - self.ut_type == other.ut_type - && self.ut_pid == other.ut_pid - && self.ut_line == other.ut_line - && self.ut_id == other.ut_id - && self.ut_user == other.ut_user - && self - .ut_host - .iter() - .zip(other.ut_host.iter()) - .all(|(a, b)| a == b) - && self.ut_exit == other.ut_exit - && self.ut_session == other.ut_session - && self.ut_tv == other.ut_tv - && self.ut_addr_v6 == other.ut_addr_v6 - && self.__glibc_reserved == other.__glibc_reserved - } - } - - impl Eq for utmpx {} - - impl hash::Hash for utmpx { - fn hash(&self, state: &mut H) { - self.ut_type.hash(state); - self.ut_pid.hash(state); - self.ut_line.hash(state); - self.ut_id.hash(state); - self.ut_user.hash(state); - self.ut_host.hash(state); - self.ut_exit.hash(state); - self.ut_session.hash(state); - self.ut_tv.hash(state); - self.ut_addr_v6.hash(state); - self.__glibc_reserved.hash(state); - } - } - } -} - -impl siginfo_t { - pub unsafe fn si_addr(&self) -> *mut c_void { - self.si_addr - } - - pub unsafe fn si_value(&self) -> crate::sigval { - self.si_value - } - - pub unsafe fn si_pid(&self) -> crate::pid_t { - self.si_pid - } - - pub unsafe fn si_uid(&self) -> crate::uid_t { - self.si_uid - } - - pub unsafe fn si_status(&self) -> c_int { - self.si_status - } -} - -// const - -// aio.h -pub const AIO_CANCELED: c_int = 0; -pub const AIO_NOTCANCELED: c_int = 1; -pub const AIO_ALLDONE: c_int = 2; -pub const LIO_READ: c_int = 0; -pub const LIO_WRITE: c_int = 1; -pub const LIO_NOP: c_int = 2; -pub const LIO_WAIT: c_int = 0; -pub const LIO_NOWAIT: c_int = 1; - -// glob.h -pub const GLOB_ERR: c_int = 1 << 0; -pub const GLOB_MARK: c_int = 1 << 1; -pub const GLOB_NOSORT: c_int = 1 << 2; -pub const GLOB_DOOFFS: c_int = 1 << 3; -pub const GLOB_NOCHECK: c_int = 1 << 4; -pub const GLOB_APPEND: c_int = 1 << 5; -pub const GLOB_NOESCAPE: c_int = 1 << 6; - -pub const GLOB_NOSPACE: c_int = 1; -pub const GLOB_ABORTED: c_int = 2; -pub const GLOB_NOMATCH: c_int = 3; - -pub const GLOB_PERIOD: c_int = 1 << 7; -pub const GLOB_ALTDIRFUNC: c_int = 1 << 9; -pub const GLOB_BRACE: c_int = 1 << 10; -pub const GLOB_NOMAGIC: c_int = 1 << 11; -pub const GLOB_TILDE: c_int = 1 << 12; -pub const GLOB_ONLYDIR: c_int = 1 << 13; -pub const GLOB_TILDE_CHECK: c_int = 1 << 14; - -// ipc.h -pub const IPC_PRIVATE: crate::key_t = 0; - -pub const IPC_CREAT: c_int = 0o1000; -pub const IPC_EXCL: c_int = 0o2000; -pub const IPC_NOWAIT: c_int = 0o4000; - -pub const IPC_RMID: c_int = 0; -pub const IPC_SET: c_int = 1; -pub const IPC_STAT: c_int = 2; -pub const IPC_INFO: c_int = 3; -pub const MSG_STAT: c_int = 11; -pub const MSG_INFO: c_int = 12; - -pub const MSG_NOERROR: c_int = 0o10000; -pub const MSG_EXCEPT: c_int = 0o20000; - -// shm.h -pub const SHM_R: c_int = 0o400; -pub const SHM_W: c_int = 0o200; - -pub const SHM_RDONLY: c_int = 0o10000; -pub const SHM_RND: c_int = 0o20000; -pub const SHM_REMAP: c_int = 0o40000; - -pub const SHM_LOCK: c_int = 11; -pub const SHM_UNLOCK: c_int = 12; -// unistd.h -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; -pub const __FD_SETSIZE: usize = 256; -pub const R_OK: c_int = 4; -pub const W_OK: c_int = 2; -pub const X_OK: c_int = 1; -pub const F_OK: c_int = 0; -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; -pub const SEEK_DATA: c_int = 3; -pub const SEEK_HOLE: c_int = 4; -pub const L_SET: c_int = 0; -pub const L_INCR: c_int = 1; -pub const L_XTND: c_int = 2; -pub const F_ULOCK: c_int = 0; -pub const F_LOCK: c_int = 1; -pub const F_TLOCK: c_int = 2; -pub const F_TEST: c_int = 3; -pub const CLOSE_RANGE_CLOEXEC: c_int = 4; - -// stdio.h -pub const EOF: c_int = -1; - -// stdlib.h -pub const WNOHANG: c_int = 1; -pub const WUNTRACED: c_int = 2; -pub const WSTOPPED: c_int = 2; -pub const WCONTINUED: c_int = 4; -pub const WNOWAIT: c_int = 8; -pub const WEXITED: c_int = 16; -pub const __W_CONTINUED: c_int = 65535; -pub const __WCOREFLAG: c_int = 128; -pub const RAND_MAX: c_int = 2147483647; -pub const EXIT_FAILURE: c_int = 1; -pub const EXIT_SUCCESS: c_int = 0; -pub const __LITTLE_ENDIAN: usize = 1234; -pub const __BIG_ENDIAN: usize = 4321; -pub const __PDP_ENDIAN: usize = 3412; -pub const __BYTE_ORDER: usize = 1234; -pub const __FLOAT_WORD_ORDER: usize = 1234; -pub const LITTLE_ENDIAN: usize = 1234; -pub const BIG_ENDIAN: usize = 4321; -pub const PDP_ENDIAN: usize = 3412; -pub const BYTE_ORDER: usize = 1234; - -// sys/select.h -pub const FD_SETSIZE: usize = 256; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 32; -pub const __SIZEOF_PTHREAD_ATTR_T: usize = 32; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 28; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 24; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 16; -pub const __SIZEOF_PTHREAD_COND_T: usize = 20; -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 8; -pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_ONCE_T: usize = 8; -pub const __PTHREAD_SPIN_LOCK_INITIALIZER: c_int = 0; -pub const PTHREAD_MUTEX_NORMAL: c_int = 0; - -// sys/resource.h -pub const RLIM_INFINITY: crate::rlim_t = 2147483647; -pub const RLIM64_INFINITY: crate::rlim64_t = 9223372036854775807; -pub const RLIM_SAVED_MAX: crate::rlim_t = RLIM_INFINITY; -pub const RLIM_SAVED_CUR: crate::rlim_t = RLIM_INFINITY; -pub const PRIO_MIN: c_int = -20; -pub const PRIO_MAX: c_int = 20; - -// pwd.h -pub const NSS_BUFLEN_PASSWD: usize = 1024; - -// sys/socket.h -pub const SOCK_TYPE_MASK: usize = 15; -pub const PF_UNSPEC: c_int = 0; -pub const PF_LOCAL: c_int = 1; -pub const PF_UNIX: c_int = 1; -pub const PF_FILE: c_int = 1; -pub const PF_INET: c_int = 2; -pub const PF_IMPLINK: c_int = 3; -pub const PF_PUP: c_int = 4; -pub const PF_CHAOS: c_int = 5; -pub const PF_NS: c_int = 6; -pub const PF_ISO: c_int = 7; -pub const PF_OSI: c_int = 7; -pub const PF_ECMA: c_int = 8; -pub const PF_DATAKIT: c_int = 9; -pub const PF_CCITT: c_int = 10; -pub const PF_SNA: c_int = 11; -pub const PF_DECnet: c_int = 12; -pub const PF_DLI: c_int = 13; -pub const PF_LAT: c_int = 14; -pub const PF_HYLINK: c_int = 15; -pub const PF_APPLETALK: c_int = 16; -pub const PF_ROUTE: c_int = 17; -pub const PF_XTP: c_int = 19; -pub const PF_COIP: c_int = 20; -pub const PF_CNT: c_int = 21; -pub const PF_RTIP: c_int = 22; -pub const PF_IPX: c_int = 23; -pub const PF_SIP: c_int = 24; -pub const PF_PIP: c_int = 25; -pub const PF_INET6: c_int = 26; -pub const PF_MAX: c_int = 27; -pub const AF_UNSPEC: c_int = 0; -pub const AF_LOCAL: c_int = 1; -pub const AF_UNIX: c_int = 1; -pub const AF_FILE: c_int = 1; -pub const AF_INET: c_int = 2; -pub const AF_IMPLINK: c_int = 3; -pub const AF_PUP: c_int = 4; -pub const AF_CHAOS: c_int = 5; -pub const AF_NS: c_int = 6; -pub const AF_ISO: c_int = 7; -pub const AF_OSI: c_int = 7; -pub const AF_ECMA: c_int = 8; -pub const AF_DATAKIT: c_int = 9; -pub const AF_CCITT: c_int = 10; -pub const AF_SNA: c_int = 11; -pub const AF_DECnet: c_int = 12; -pub const AF_DLI: c_int = 13; -pub const AF_LAT: c_int = 14; -pub const AF_HYLINK: c_int = 15; -pub const AF_APPLETALK: c_int = 16; -pub const AF_ROUTE: c_int = 17; -pub const pseudo_AF_XTP: c_int = 19; -pub const AF_COIP: c_int = 20; -pub const AF_CNT: c_int = 21; -pub const pseudo_AF_RTIP: c_int = 22; -pub const AF_IPX: c_int = 23; -pub const AF_SIP: c_int = 24; -pub const pseudo_AF_PIP: c_int = 25; -pub const AF_INET6: c_int = 26; -pub const AF_MAX: c_int = 27; -pub const SOMAXCONN: c_int = 4096; -pub const _SS_SIZE: usize = 128; -pub const CMGROUP_MAX: usize = 16; -pub const SOL_SOCKET: c_int = 65535; - -// sys/time.h -pub const ITIMER_REAL: c_int = 0; -pub const ITIMER_VIRTUAL: c_int = 1; -pub const ITIMER_PROF: c_int = 2; - -// netinet/in.h -pub const SOL_IP: c_int = 0; -pub const SOL_TCP: c_int = 6; -pub const SOL_UDP: c_int = 17; -pub const SOL_IPV6: c_int = 41; -pub const SOL_ICMPV6: c_int = 58; -pub const IP_OPTIONS: c_int = 1; -pub const IP_HDRINCL: c_int = 2; -pub const IP_TOS: c_int = 3; -pub const IP_TTL: c_int = 4; -pub const IP_RECVOPTS: c_int = 5; -pub const IP_RECVRETOPTS: c_int = 6; -pub const IP_RECVDSTADDR: c_int = 7; -pub const IP_RETOPTS: c_int = 8; -pub const IP_MULTICAST_IF: c_int = 9; -pub const IP_MULTICAST_TTL: c_int = 10; -pub const IP_MULTICAST_LOOP: c_int = 11; -pub const IP_ADD_MEMBERSHIP: c_int = 12; -pub const IP_DROP_MEMBERSHIP: c_int = 13; -pub const IPV6_ADDRFORM: c_int = 1; -pub const IPV6_2292PKTINFO: c_int = 2; -pub const IPV6_2292HOPOPTS: c_int = 3; -pub const IPV6_2292DSTOPTS: c_int = 4; -pub const IPV6_2292RTHDR: c_int = 5; -pub const IPV6_2292PKTOPTIONS: c_int = 6; -pub const IPV6_CHECKSUM: c_int = 7; -pub const IPV6_2292HOPLIMIT: c_int = 8; -pub const IPV6_RXINFO: c_int = 2; -pub const IPV6_TXINFO: c_int = 2; -pub const SCM_SRCINFO: c_int = 2; -pub const IPV6_UNICAST_HOPS: c_int = 16; -pub const IPV6_MULTICAST_IF: c_int = 17; -pub const IPV6_MULTICAST_HOPS: c_int = 18; -pub const IPV6_MULTICAST_LOOP: c_int = 19; -pub const IPV6_JOIN_GROUP: c_int = 20; -pub const IPV6_LEAVE_GROUP: c_int = 21; -pub const IPV6_ROUTER_ALERT: c_int = 22; -pub const IPV6_MTU_DISCOVER: c_int = 23; -pub const IPV6_MTU: c_int = 24; -pub const IPV6_RECVERR: c_int = 25; -pub const IPV6_V6ONLY: c_int = 26; -pub const IPV6_JOIN_ANYCAST: c_int = 27; -pub const IPV6_LEAVE_ANYCAST: c_int = 28; -pub const IPV6_RECVPKTINFO: c_int = 49; -pub const IPV6_PKTINFO: c_int = 50; -pub const IPV6_RECVHOPLIMIT: c_int = 51; -pub const IPV6_HOPLIMIT: c_int = 52; -pub const IPV6_RECVHOPOPTS: c_int = 53; -pub const IPV6_HOPOPTS: c_int = 54; -pub const IPV6_RTHDRDSTOPTS: c_int = 55; -pub const IPV6_RECVRTHDR: c_int = 56; -pub const IPV6_RTHDR: c_int = 57; -pub const IPV6_RECVDSTOPTS: c_int = 58; -pub const IPV6_DSTOPTS: c_int = 59; -pub const IPV6_RECVPATHMTU: c_int = 60; -pub const IPV6_PATHMTU: c_int = 61; -pub const IPV6_DONTFRAG: c_int = 62; -pub const IPV6_RECVTCLASS: c_int = 66; -pub const IPV6_TCLASS: c_int = 67; -pub const IPV6_ADDR_PREFERENCES: c_int = 72; -pub const IPV6_MINHOPCOUNT: c_int = 73; -pub const IPV6_ADD_MEMBERSHIP: c_int = 20; -pub const IPV6_DROP_MEMBERSHIP: c_int = 21; -pub const IPV6_RXHOPOPTS: c_int = 3; -pub const IPV6_RXDSTOPTS: c_int = 4; -pub const IPV6_RTHDR_LOOSE: c_int = 0; -pub const IPV6_RTHDR_STRICT: c_int = 1; -pub const IPV6_RTHDR_TYPE_0: c_int = 0; -pub const IN_CLASSA_NET: u32 = 4278190080; -pub const IN_CLASSA_NSHIFT: usize = 24; -pub const IN_CLASSA_HOST: u32 = 16777215; -pub const IN_CLASSA_MAX: u32 = 128; -pub const IN_CLASSB_NET: u32 = 4294901760; -pub const IN_CLASSB_NSHIFT: usize = 16; -pub const IN_CLASSB_HOST: u32 = 65535; -pub const IN_CLASSB_MAX: u32 = 65536; -pub const IN_CLASSC_NET: u32 = 4294967040; -pub const IN_CLASSC_NSHIFT: usize = 8; -pub const IN_CLASSC_HOST: u32 = 255; -pub const IN_LOOPBACKNET: u32 = 127; -pub const INET_ADDRSTRLEN: usize = 16; -pub const INET6_ADDRSTRLEN: usize = 46; - -// netinet/ip.h -pub const IPTOS_TOS_MASK: u8 = 0x1E; -pub const IPTOS_PREC_MASK: u8 = 0xE0; - -pub const IPTOS_ECN_NOT_ECT: u8 = 0x00; - -pub const IPTOS_LOWDELAY: u8 = 0x10; -pub const IPTOS_THROUGHPUT: u8 = 0x08; -pub const IPTOS_RELIABILITY: u8 = 0x04; -pub const IPTOS_MINCOST: u8 = 0x02; - -pub const IPTOS_PREC_NETCONTROL: u8 = 0xe0; -pub const IPTOS_PREC_INTERNETCONTROL: u8 = 0xc0; -pub const IPTOS_PREC_CRITIC_ECP: u8 = 0xa0; -pub const IPTOS_PREC_FLASHOVERRIDE: u8 = 0x80; -pub const IPTOS_PREC_FLASH: u8 = 0x60; -pub const IPTOS_PREC_IMMEDIATE: u8 = 0x40; -pub const IPTOS_PREC_PRIORITY: u8 = 0x20; -pub const IPTOS_PREC_ROUTINE: u8 = 0x00; - -pub const IPTOS_ECN_MASK: u8 = 0x03; -pub const IPTOS_ECN_ECT1: u8 = 0x01; -pub const IPTOS_ECN_ECT0: u8 = 0x02; -pub const IPTOS_ECN_CE: u8 = 0x03; - -pub const IPOPT_COPY: u8 = 0x80; -pub const IPOPT_CLASS_MASK: u8 = 0x60; -pub const IPOPT_NUMBER_MASK: u8 = 0x1f; - -pub const IPOPT_CONTROL: u8 = 0x00; -pub const IPOPT_RESERVED1: u8 = 0x20; -pub const IPOPT_MEASUREMENT: u8 = 0x40; -pub const IPOPT_RESERVED2: u8 = 0x60; -pub const IPOPT_END: u8 = 0 | IPOPT_CONTROL; -pub const IPOPT_NOOP: u8 = 1 | IPOPT_CONTROL; -pub const IPOPT_SEC: u8 = 2 | IPOPT_CONTROL | IPOPT_COPY; -pub const IPOPT_LSRR: u8 = 3 | IPOPT_CONTROL | IPOPT_COPY; -pub const IPOPT_TIMESTAMP: u8 = 4 | IPOPT_MEASUREMENT; -pub const IPOPT_RR: u8 = 7 | IPOPT_CONTROL; -pub const IPOPT_SID: u8 = 8 | IPOPT_CONTROL | IPOPT_COPY; -pub const IPOPT_SSRR: u8 = 9 | IPOPT_CONTROL | IPOPT_COPY; -pub const IPOPT_RA: u8 = 20 | IPOPT_CONTROL | IPOPT_COPY; -pub const IPVERSION: u8 = 4; -pub const MAXTTL: u8 = 255; -pub const IPDEFTTL: u8 = 64; -pub const IPOPT_OPTVAL: u8 = 0; -pub const IPOPT_OLEN: u8 = 1; -pub const IPOPT_OFFSET: u8 = 2; -pub const IPOPT_MINOFF: u8 = 4; -pub const MAX_IPOPTLEN: u8 = 40; -pub const IPOPT_NOP: u8 = IPOPT_NOOP; -pub const IPOPT_EOL: u8 = IPOPT_END; -pub const IPOPT_TS: u8 = IPOPT_TIMESTAMP; -pub const IPOPT_TS_TSONLY: u8 = 0; -pub const IPOPT_TS_TSANDADDR: u8 = 1; -pub const IPOPT_TS_PRESPEC: u8 = 3; - -// net/if_arp.h -pub const ARPOP_REQUEST: u16 = 1; -pub const ARPOP_REPLY: u16 = 2; -pub const ARPOP_RREQUEST: u16 = 3; -pub const ARPOP_RREPLY: u16 = 4; -pub const ARPOP_InREQUEST: u16 = 8; -pub const ARPOP_InREPLY: u16 = 9; -pub const ARPOP_NAK: u16 = 10; - -pub const MAX_ADDR_LEN: usize = 7; -pub const ARPD_UPDATE: c_ushort = 0x01; -pub const ARPD_LOOKUP: c_ushort = 0x02; -pub const ARPD_FLUSH: c_ushort = 0x03; -pub const ATF_MAGIC: c_int = 0x80; - -pub const ATF_NETMASK: c_int = 0x20; -pub const ATF_DONTPUB: c_int = 0x40; - -pub const ARPHRD_NETROM: u16 = 0; -pub const ARPHRD_ETHER: u16 = 1; -pub const ARPHRD_EETHER: u16 = 2; -pub const ARPHRD_AX25: u16 = 3; -pub const ARPHRD_PRONET: u16 = 4; -pub const ARPHRD_CHAOS: u16 = 5; -pub const ARPHRD_IEEE802: u16 = 6; -pub const ARPHRD_ARCNET: u16 = 7; -pub const ARPHRD_APPLETLK: u16 = 8; -pub const ARPHRD_DLCI: u16 = 15; -pub const ARPHRD_ATM: u16 = 19; -pub const ARPHRD_METRICOM: u16 = 23; -pub const ARPHRD_IEEE1394: u16 = 24; -pub const ARPHRD_EUI64: u16 = 27; -pub const ARPHRD_INFINIBAND: u16 = 32; - -pub const ARPHRD_SLIP: u16 = 256; -pub const ARPHRD_CSLIP: u16 = 257; -pub const ARPHRD_SLIP6: u16 = 258; -pub const ARPHRD_CSLIP6: u16 = 259; -pub const ARPHRD_RSRVD: u16 = 260; -pub const ARPHRD_ADAPT: u16 = 264; -pub const ARPHRD_ROSE: u16 = 270; -pub const ARPHRD_X25: u16 = 271; -pub const ARPHRD_HWX25: u16 = 272; -pub const ARPHRD_CAN: u16 = 280; -pub const ARPHRD_PPP: u16 = 512; -pub const ARPHRD_CISCO: u16 = 513; -pub const ARPHRD_HDLC: u16 = ARPHRD_CISCO; -pub const ARPHRD_LAPB: u16 = 516; -pub const ARPHRD_DDCMP: u16 = 517; -pub const ARPHRD_RAWHDLC: u16 = 518; - -pub const ARPHRD_TUNNEL: u16 = 768; -pub const ARPHRD_TUNNEL6: u16 = 769; -pub const ARPHRD_FRAD: u16 = 770; -pub const ARPHRD_SKIP: u16 = 771; -pub const ARPHRD_LOOPBACK: u16 = 772; -pub const ARPHRD_LOCALTLK: u16 = 773; -pub const ARPHRD_FDDI: u16 = 774; -pub const ARPHRD_BIF: u16 = 775; -pub const ARPHRD_SIT: u16 = 776; -pub const ARPHRD_IPDDP: u16 = 777; -pub const ARPHRD_IPGRE: u16 = 778; -pub const ARPHRD_PIMREG: u16 = 779; -pub const ARPHRD_HIPPI: u16 = 780; -pub const ARPHRD_ASH: u16 = 781; -pub const ARPHRD_ECONET: u16 = 782; -pub const ARPHRD_IRDA: u16 = 783; -pub const ARPHRD_FCPP: u16 = 784; -pub const ARPHRD_FCAL: u16 = 785; -pub const ARPHRD_FCPL: u16 = 786; -pub const ARPHRD_FCFABRIC: u16 = 787; -pub const ARPHRD_IEEE802_TR: u16 = 800; -pub const ARPHRD_IEEE80211: u16 = 801; -pub const ARPHRD_IEEE80211_PRISM: u16 = 802; -pub const ARPHRD_IEEE80211_RADIOTAP: u16 = 803; -pub const ARPHRD_IEEE802154: u16 = 804; - -pub const ARPHRD_VOID: u16 = 0xFFFF; -pub const ARPHRD_NONE: u16 = 0xFFFE; - -// bits/posix1_lim.h -pub const _POSIX_AIO_LISTIO_MAX: usize = 2; -pub const _POSIX_AIO_MAX: usize = 1; -pub const _POSIX_ARG_MAX: usize = 4096; -pub const _POSIX_CHILD_MAX: usize = 25; -pub const _POSIX_DELAYTIMER_MAX: usize = 32; -pub const _POSIX_HOST_NAME_MAX: usize = 255; -pub const _POSIX_LINK_MAX: usize = 8; -pub const _POSIX_LOGIN_NAME_MAX: usize = 9; -pub const _POSIX_MAX_CANON: usize = 255; -pub const _POSIX_MAX_INPUT: usize = 255; -pub const _POSIX_MQ_OPEN_MAX: usize = 8; -pub const _POSIX_MQ_PRIO_MAX: usize = 32; -pub const _POSIX_NAME_MAX: usize = 14; -pub const _POSIX_NGROUPS_MAX: usize = 8; -pub const _POSIX_OPEN_MAX: usize = 20; -pub const _POSIX_FD_SETSIZE: usize = 20; -pub const _POSIX_PATH_MAX: usize = 256; -pub const _POSIX_PIPE_BUF: usize = 512; -pub const _POSIX_RE_DUP_MAX: usize = 255; -pub const _POSIX_RTSIG_MAX: usize = 8; -pub const _POSIX_SEM_NSEMS_MAX: usize = 256; -pub const _POSIX_SEM_VALUE_MAX: usize = 32767; -pub const _POSIX_SIGQUEUE_MAX: usize = 32; -pub const _POSIX_SSIZE_MAX: usize = 32767; -pub const _POSIX_STREAM_MAX: usize = 8; -pub const _POSIX_SYMLINK_MAX: usize = 255; -pub const _POSIX_SYMLOOP_MAX: usize = 8; -pub const _POSIX_TIMER_MAX: usize = 32; -pub const _POSIX_TTY_NAME_MAX: usize = 9; -pub const _POSIX_TZNAME_MAX: usize = 6; -pub const _POSIX_QLIMIT: usize = 1; -pub const _POSIX_HIWAT: usize = 512; -pub const _POSIX_UIO_MAXIOV: usize = 16; -pub const _POSIX_CLOCKRES_MIN: usize = 20000000; -pub const NAME_MAX: usize = 255; -pub const NGROUPS_MAX: usize = 256; -pub const _POSIX_THREAD_KEYS_MAX: usize = 128; -pub const _POSIX_THREAD_DESTRUCTOR_ITERATIONS: usize = 4; -pub const _POSIX_THREAD_THREADS_MAX: usize = 64; -pub const SEM_VALUE_MAX: c_int = 2147483647; -pub const MAXNAMLEN: usize = 255; - -// netdb.h -pub const _PATH_HEQUIV: &[u8; 17usize] = b"/etc/hosts.equiv\0"; -pub const _PATH_HOSTS: &[u8; 11usize] = b"/etc/hosts\0"; -pub const _PATH_NETWORKS: &[u8; 14usize] = b"/etc/networks\0"; -pub const _PATH_NSSWITCH_CONF: &[u8; 19usize] = b"/etc/nsswitch.conf\0"; -pub const _PATH_PROTOCOLS: &[u8; 15usize] = b"/etc/protocols\0"; -pub const _PATH_SERVICES: &[u8; 14usize] = b"/etc/services\0"; -pub const HOST_NOT_FOUND: c_int = 1; -pub const TRY_AGAIN: c_int = 2; -pub const NO_RECOVERY: c_int = 3; -pub const NO_DATA: c_int = 4; -pub const NETDB_INTERNAL: c_int = -1; -pub const NETDB_SUCCESS: c_int = 0; -pub const NO_ADDRESS: c_int = 4; -pub const IPPORT_RESERVED: c_int = 1024; -pub const SCOPE_DELIMITER: u8 = 37u8; -pub const GAI_WAIT: c_int = 0; -pub const GAI_NOWAIT: c_int = 1; -pub const AI_PASSIVE: c_int = 1; -pub const AI_CANONNAME: c_int = 2; -pub const AI_NUMERICHOST: c_int = 4; -pub const AI_V4MAPPED: c_int = 8; -pub const AI_ALL: c_int = 16; -pub const AI_ADDRCONFIG: c_int = 32; -pub const AI_IDN: c_int = 64; -pub const AI_CANONIDN: c_int = 128; -pub const AI_NUMERICSERV: c_int = 1024; -pub const EAI_BADFLAGS: c_int = -1; -pub const EAI_NONAME: c_int = -2; -pub const EAI_AGAIN: c_int = -3; -pub const EAI_FAIL: c_int = -4; -pub const EAI_FAMILY: c_int = -6; -pub const EAI_SOCKTYPE: c_int = -7; -pub const EAI_SERVICE: c_int = -8; -pub const EAI_MEMORY: c_int = -10; -pub const EAI_SYSTEM: c_int = -11; -pub const EAI_OVERFLOW: c_int = -12; -pub const EAI_NODATA: c_int = -5; -pub const EAI_ADDRFAMILY: c_int = -9; -pub const EAI_INPROGRESS: c_int = -100; -pub const EAI_CANCELED: c_int = -101; -pub const EAI_NOTCANCELED: c_int = -102; -pub const EAI_ALLDONE: c_int = -103; -pub const EAI_INTR: c_int = -104; -pub const EAI_IDN_ENCODE: c_int = -105; -pub const NI_MAXHOST: usize = 1025; -pub const NI_MAXSERV: usize = 32; -pub const NI_NUMERICHOST: c_int = 1; -pub const NI_NUMERICSERV: c_int = 2; -pub const NI_NOFQDN: c_int = 4; -pub const NI_NAMEREQD: c_int = 8; -pub const NI_DGRAM: c_int = 16; -pub const NI_IDN: c_int = 32; - -// time.h -pub const CLOCK_REALTIME: crate::clockid_t = 0; -pub const CLOCK_MONOTONIC: crate::clockid_t = 1; -pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 2; -pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 3; -pub const CLOCK_MONOTONIC_RAW: crate::clockid_t = 4; -pub const CLOCK_REALTIME_COARSE: crate::clockid_t = 5; -pub const CLOCK_MONOTONIC_COARSE: crate::clockid_t = 6; -pub const TIMER_ABSTIME: c_int = 1; -pub const TIME_UTC: c_int = 1; - -// sys/poll.h -pub const POLLIN: i16 = 1; -pub const POLLPRI: i16 = 2; -pub const POLLOUT: i16 = 4; -pub const POLLRDNORM: i16 = 1; -pub const POLLRDBAND: i16 = 2; -pub const POLLWRNORM: i16 = 4; -pub const POLLWRBAND: i16 = 4; -pub const POLLERR: i16 = 8; -pub const POLLHUP: i16 = 16; -pub const POLLNVAL: i16 = 32; - -// locale.h -pub const __LC_CTYPE: usize = 0; -pub const __LC_NUMERIC: usize = 1; -pub const __LC_TIME: usize = 2; -pub const __LC_COLLATE: usize = 3; -pub const __LC_MONETARY: usize = 4; -pub const __LC_MESSAGES: usize = 5; -pub const __LC_ALL: usize = 6; -pub const __LC_PAPER: usize = 7; -pub const __LC_NAME: usize = 8; -pub const __LC_ADDRESS: usize = 9; -pub const __LC_TELEPHONE: usize = 10; -pub const __LC_MEASUREMENT: usize = 11; -pub const __LC_IDENTIFICATION: usize = 12; -pub const LC_CTYPE: c_int = 0; -pub const LC_NUMERIC: c_int = 1; -pub const LC_TIME: c_int = 2; -pub const LC_COLLATE: c_int = 3; -pub const LC_MONETARY: c_int = 4; -pub const LC_MESSAGES: c_int = 5; -pub const LC_ALL: c_int = 6; -pub const LC_PAPER: c_int = 7; -pub const LC_NAME: c_int = 8; -pub const LC_ADDRESS: c_int = 9; -pub const LC_TELEPHONE: c_int = 10; -pub const LC_MEASUREMENT: c_int = 11; -pub const LC_IDENTIFICATION: c_int = 12; -pub const LC_CTYPE_MASK: c_int = 1; -pub const LC_NUMERIC_MASK: c_int = 2; -pub const LC_TIME_MASK: c_int = 4; -pub const LC_COLLATE_MASK: c_int = 8; -pub const LC_MONETARY_MASK: c_int = 16; -pub const LC_MESSAGES_MASK: c_int = 32; -pub const LC_PAPER_MASK: c_int = 128; -pub const LC_NAME_MASK: c_int = 256; -pub const LC_ADDRESS_MASK: c_int = 512; -pub const LC_TELEPHONE_MASK: c_int = 1024; -pub const LC_MEASUREMENT_MASK: c_int = 2048; -pub const LC_IDENTIFICATION_MASK: c_int = 4096; -pub const LC_ALL_MASK: c_int = 8127; - -pub const ABDAY_1: crate::nl_item = 0x20000; -pub const ABDAY_2: crate::nl_item = 0x20001; -pub const ABDAY_3: crate::nl_item = 0x20002; -pub const ABDAY_4: crate::nl_item = 0x20003; -pub const ABDAY_5: crate::nl_item = 0x20004; -pub const ABDAY_6: crate::nl_item = 0x20005; -pub const ABDAY_7: crate::nl_item = 0x20006; - -pub const DAY_1: crate::nl_item = 0x20007; -pub const DAY_2: crate::nl_item = 0x20008; -pub const DAY_3: crate::nl_item = 0x20009; -pub const DAY_4: crate::nl_item = 0x2000A; -pub const DAY_5: crate::nl_item = 0x2000B; -pub const DAY_6: crate::nl_item = 0x2000C; -pub const DAY_7: crate::nl_item = 0x2000D; - -pub const ABMON_1: crate::nl_item = 0x2000E; -pub const ABMON_2: crate::nl_item = 0x2000F; -pub const ABMON_3: crate::nl_item = 0x20010; -pub const ABMON_4: crate::nl_item = 0x20011; -pub const ABMON_5: crate::nl_item = 0x20012; -pub const ABMON_6: crate::nl_item = 0x20013; -pub const ABMON_7: crate::nl_item = 0x20014; -pub const ABMON_8: crate::nl_item = 0x20015; -pub const ABMON_9: crate::nl_item = 0x20016; -pub const ABMON_10: crate::nl_item = 0x20017; -pub const ABMON_11: crate::nl_item = 0x20018; -pub const ABMON_12: crate::nl_item = 0x20019; - -pub const MON_1: crate::nl_item = 0x2001A; -pub const MON_2: crate::nl_item = 0x2001B; -pub const MON_3: crate::nl_item = 0x2001C; -pub const MON_4: crate::nl_item = 0x2001D; -pub const MON_5: crate::nl_item = 0x2001E; -pub const MON_6: crate::nl_item = 0x2001F; -pub const MON_7: crate::nl_item = 0x20020; -pub const MON_8: crate::nl_item = 0x20021; -pub const MON_9: crate::nl_item = 0x20022; -pub const MON_10: crate::nl_item = 0x20023; -pub const MON_11: crate::nl_item = 0x20024; -pub const MON_12: crate::nl_item = 0x20025; - -pub const AM_STR: crate::nl_item = 0x20026; -pub const PM_STR: crate::nl_item = 0x20027; - -pub const D_T_FMT: crate::nl_item = 0x20028; -pub const D_FMT: crate::nl_item = 0x20029; -pub const T_FMT: crate::nl_item = 0x2002A; -pub const T_FMT_AMPM: crate::nl_item = 0x2002B; - -pub const ERA: crate::nl_item = 0x2002C; -pub const ERA_D_FMT: crate::nl_item = 0x2002E; -pub const ALT_DIGITS: crate::nl_item = 0x2002F; -pub const ERA_D_T_FMT: crate::nl_item = 0x20030; -pub const ERA_T_FMT: crate::nl_item = 0x20031; - -pub const CODESET: crate::nl_item = 14; -pub const CRNCYSTR: crate::nl_item = 0x4000F; -pub const RADIXCHAR: crate::nl_item = 0x10000; -pub const THOUSEP: crate::nl_item = 0x10001; -pub const YESEXPR: crate::nl_item = 0x50000; -pub const NOEXPR: crate::nl_item = 0x50001; -pub const YESSTR: crate::nl_item = 0x50002; -pub const NOSTR: crate::nl_item = 0x50003; - -// reboot.h -pub const RB_AUTOBOOT: c_int = 0x0; -pub const RB_ASKNAME: c_int = 0x1; -pub const RB_SINGLE: c_int = 0x2; -pub const RB_KBD: c_int = 0x4; -pub const RB_HALT: c_int = 0x8; -pub const RB_INITNAME: c_int = 0x10; -pub const RB_DFLTROOT: c_int = 0x20; -pub const RB_NOBOOTRC: c_int = 0x20; -pub const RB_ALTBOOT: c_int = 0x40; -pub const RB_UNIPROC: c_int = 0x80; -pub const RB_DEBUGGER: c_int = 0x1000; - -// semaphore.h -pub const __SIZEOF_SEM_T: usize = 20; -pub const SEM_FAILED: *mut crate::sem_t = ptr::null_mut(); - -// termios.h -pub const IGNBRK: crate::tcflag_t = 1; -pub const BRKINT: crate::tcflag_t = 2; -pub const IGNPAR: crate::tcflag_t = 4; -pub const PARMRK: crate::tcflag_t = 8; -pub const INPCK: crate::tcflag_t = 16; -pub const ISTRIP: crate::tcflag_t = 32; -pub const INLCR: crate::tcflag_t = 64; -pub const IGNCR: crate::tcflag_t = 128; -pub const ICRNL: crate::tcflag_t = 256; -pub const IXON: crate::tcflag_t = 512; -pub const IXOFF: crate::tcflag_t = 1024; -pub const IXANY: crate::tcflag_t = 2048; -pub const IMAXBEL: crate::tcflag_t = 8192; -pub const IUCLC: crate::tcflag_t = 16384; -pub const OPOST: crate::tcflag_t = 1; -pub const ONLCR: crate::tcflag_t = 2; -pub const ONOEOT: crate::tcflag_t = 8; -pub const OCRNL: crate::tcflag_t = 16; -pub const ONOCR: crate::tcflag_t = 32; -pub const ONLRET: crate::tcflag_t = 64; -pub const NLDLY: crate::tcflag_t = 768; -pub const NL0: crate::tcflag_t = 0; -pub const NL1: crate::tcflag_t = 256; -pub const TABDLY: crate::tcflag_t = 3076; -pub const TAB0: crate::tcflag_t = 0; -pub const TAB1: crate::tcflag_t = 1024; -pub const TAB2: crate::tcflag_t = 2048; -pub const TAB3: crate::tcflag_t = 4; -pub const CRDLY: crate::tcflag_t = 12288; -pub const CR0: crate::tcflag_t = 0; -pub const CR1: crate::tcflag_t = 4096; -pub const CR2: crate::tcflag_t = 8192; -pub const CR3: crate::tcflag_t = 12288; -pub const FFDLY: crate::tcflag_t = 16384; -pub const FF0: crate::tcflag_t = 0; -pub const FF1: crate::tcflag_t = 16384; -pub const BSDLY: crate::tcflag_t = 32768; -pub const BS0: crate::tcflag_t = 0; -pub const BS1: crate::tcflag_t = 32768; -pub const VTDLY: crate::tcflag_t = 65536; -pub const VT0: crate::tcflag_t = 0; -pub const VT1: crate::tcflag_t = 65536; -pub const OLCUC: crate::tcflag_t = 131072; -pub const OFILL: crate::tcflag_t = 262144; -pub const OFDEL: crate::tcflag_t = 524288; -pub const CIGNORE: crate::tcflag_t = 1; -pub const CSIZE: crate::tcflag_t = 768; -pub const CS5: crate::tcflag_t = 0; -pub const CS6: crate::tcflag_t = 256; -pub const CS7: crate::tcflag_t = 512; -pub const CS8: crate::tcflag_t = 768; -pub const CSTOPB: crate::tcflag_t = 1024; -pub const CREAD: crate::tcflag_t = 2048; -pub const PARENB: crate::tcflag_t = 4096; -pub const PARODD: crate::tcflag_t = 8192; -pub const HUPCL: crate::tcflag_t = 16384; -pub const CLOCAL: crate::tcflag_t = 32768; -pub const CRTSCTS: crate::tcflag_t = 65536; -pub const CRTS_IFLOW: crate::tcflag_t = 65536; -pub const CCTS_OFLOW: crate::tcflag_t = 65536; -pub const CDTRCTS: crate::tcflag_t = 131072; -pub const MDMBUF: crate::tcflag_t = 1048576; -pub const CHWFLOW: crate::tcflag_t = 1245184; -pub const ECHOKE: crate::tcflag_t = 1; -pub const _ECHOE: crate::tcflag_t = 2; -pub const ECHOE: crate::tcflag_t = 2; -pub const _ECHOK: crate::tcflag_t = 4; -pub const ECHOK: crate::tcflag_t = 4; -pub const _ECHO: crate::tcflag_t = 8; -pub const ECHO: crate::tcflag_t = 8; -pub const _ECHONL: crate::tcflag_t = 16; -pub const ECHONL: crate::tcflag_t = 16; -pub const ECHOPRT: crate::tcflag_t = 32; -pub const ECHOCTL: crate::tcflag_t = 64; -pub const _ISIG: crate::tcflag_t = 128; -pub const ISIG: crate::tcflag_t = 128; -pub const _ICANON: crate::tcflag_t = 256; -pub const ICANON: crate::tcflag_t = 256; -pub const ALTWERASE: crate::tcflag_t = 512; -pub const _IEXTEN: crate::tcflag_t = 1024; -pub const IEXTEN: crate::tcflag_t = 1024; -pub const EXTPROC: crate::tcflag_t = 2048; -pub const _TOSTOP: crate::tcflag_t = 4194304; -pub const TOSTOP: crate::tcflag_t = 4194304; -pub const FLUSHO: crate::tcflag_t = 8388608; -pub const NOKERNINFO: crate::tcflag_t = 33554432; -pub const PENDIN: crate::tcflag_t = 536870912; -pub const _NOFLSH: crate::tcflag_t = 2147483648; -pub const NOFLSH: crate::tcflag_t = 2147483648; -pub const VEOF: usize = 0; -pub const VEOL: usize = 1; -pub const VEOL2: usize = 2; -pub const VERASE: usize = 3; -pub const VWERASE: usize = 4; -pub const VKILL: usize = 5; -pub const VREPRINT: usize = 6; -pub const VINTR: usize = 8; -pub const VQUIT: usize = 9; -pub const VSUSP: usize = 10; -pub const VDSUSP: usize = 11; -pub const VSTART: usize = 12; -pub const VSTOP: usize = 13; -pub const VLNEXT: usize = 14; -pub const VDISCARD: usize = 15; -pub const VMIN: usize = 16; -pub const VTIME: usize = 17; -pub const VSTATUS: usize = 18; -pub const NCCS: usize = 20; -pub const B0: crate::speed_t = 0; -pub const B50: crate::speed_t = 50; -pub const B75: crate::speed_t = 75; -pub const B110: crate::speed_t = 110; -pub const B134: crate::speed_t = 134; -pub const B150: crate::speed_t = 150; -pub const B200: crate::speed_t = 200; -pub const B300: crate::speed_t = 300; -pub const B600: crate::speed_t = 600; -pub const B1200: crate::speed_t = 1200; -pub const B1800: crate::speed_t = 1800; -pub const B2400: crate::speed_t = 2400; -pub const B4800: crate::speed_t = 4800; -pub const B9600: crate::speed_t = 9600; -pub const B7200: crate::speed_t = 7200; -pub const B14400: crate::speed_t = 14400; -pub const B19200: crate::speed_t = 19200; -pub const B28800: crate::speed_t = 28800; -pub const B38400: crate::speed_t = 38400; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const B57600: crate::speed_t = 57600; -pub const B76800: crate::speed_t = 76800; -pub const B115200: crate::speed_t = 115200; -pub const B230400: crate::speed_t = 230400; -pub const B460800: crate::speed_t = 460800; -pub const B500000: crate::speed_t = 500000; -pub const B576000: crate::speed_t = 576000; -pub const B921600: crate::speed_t = 921600; -pub const B1000000: crate::speed_t = 1000000; -pub const B1152000: crate::speed_t = 1152000; -pub const B1500000: crate::speed_t = 1500000; -pub const B2000000: crate::speed_t = 2000000; -pub const B2500000: crate::speed_t = 2500000; -pub const B3000000: crate::speed_t = 3000000; -pub const B3500000: crate::speed_t = 3500000; -pub const B4000000: crate::speed_t = 4000000; -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; -pub const TCSASOFT: c_int = 16; -pub const TCIFLUSH: c_int = 1; -pub const TCOFLUSH: c_int = 2; -pub const TCIOFLUSH: c_int = 3; -pub const TCOOFF: c_int = 1; -pub const TCOON: c_int = 2; -pub const TCIOFF: c_int = 3; -pub const TCION: c_int = 4; -pub const TTYDEF_IFLAG: crate::tcflag_t = 11042; -pub const TTYDEF_LFLAG: crate::tcflag_t = 1483; -pub const TTYDEF_CFLAG: crate::tcflag_t = 23040; -pub const TTYDEF_SPEED: crate::tcflag_t = 9600; -pub const CEOL: u8 = 0u8; -pub const CERASE: u8 = 127; -pub const CMIN: u8 = 1; -pub const CQUIT: u8 = 28; -pub const CTIME: u8 = 0; -pub const CBRK: u8 = 0u8; - -// dlfcn.h -pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); -pub const RTLD_NEXT: *mut c_void = -1i64 as *mut c_void; -pub const RTLD_LAZY: c_int = 1; -pub const RTLD_NOW: c_int = 2; -pub const RTLD_BINDING_MASK: c_int = 3; -pub const RTLD_NOLOAD: c_int = 4; -pub const RTLD_DEEPBIND: c_int = 8; -pub const RTLD_GLOBAL: c_int = 256; -pub const RTLD_LOCAL: c_int = 0; -pub const RTLD_NODELETE: c_int = 4096; -pub const DLFO_STRUCT_HAS_EH_DBASE: usize = 1; -pub const DLFO_STRUCT_HAS_EH_COUNT: usize = 0; -pub const LM_ID_BASE: c_long = 0; -pub const LM_ID_NEWLM: c_long = -1; - -// bits/signum_generic.h -pub const SIGINT: c_int = 2; -pub const SIGILL: c_int = 4; -pub const SIGABRT: c_int = 6; -pub const SIGFPE: c_int = 8; -pub const SIGSEGV: c_int = 11; -pub const SIGTERM: c_int = 15; -pub const SIGHUP: c_int = 1; -pub const SIGQUIT: c_int = 3; -pub const SIGTRAP: c_int = 5; -pub const SIGKILL: c_int = 9; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGIOT: c_int = 6; -pub const SIGBUS: c_int = 10; -pub const SIGSYS: c_int = 12; -pub const SIGEMT: c_int = 7; -pub const SIGINFO: c_int = 29; -pub const SIGLOST: c_int = 32; -pub const SIGURG: c_int = 16; -pub const SIGSTOP: c_int = 17; -pub const SIGTSTP: c_int = 18; -pub const SIGCONT: c_int = 19; -pub const SIGCHLD: c_int = 20; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGPOLL: c_int = 23; -pub const SIGXCPU: c_int = 24; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGXFSZ: c_int = 25; -pub const SIGUSR1: c_int = 30; -pub const SIGUSR2: c_int = 31; -pub const SIGWINCH: c_int = 28; -pub const SIGIO: c_int = 23; -pub const SIGCLD: c_int = 20; -pub const __SIGRTMIN: usize = 32; -pub const __SIGRTMAX: usize = 32; -pub const _NSIG: usize = 33; -pub const NSIG: usize = 33; - -// bits/sigaction.h -pub const SA_ONSTACK: c_int = 1; -pub const SA_RESTART: c_int = 2; -pub const SA_NODEFER: c_int = 16; -pub const SA_RESETHAND: c_int = 4; -pub const SA_NOCLDSTOP: c_int = 8; -pub const SA_SIGINFO: c_int = 64; -pub const SA_INTERRUPT: c_int = 0; -pub const SA_NOMASK: c_int = 16; -pub const SA_ONESHOT: c_int = 4; -pub const SA_STACK: c_int = 1; -pub const SIG_BLOCK: c_int = 1; -pub const SIG_UNBLOCK: c_int = 2; -pub const SIG_SETMASK: c_int = 3; - -// bits/sigcontext.h -pub const FPC_IE: u16 = 1; -pub const FPC_IM: u16 = 1; -pub const FPC_DE: u16 = 2; -pub const FPC_DM: u16 = 2; -pub const FPC_ZE: u16 = 4; -pub const FPC_ZM: u16 = 4; -pub const FPC_OE: u16 = 8; -pub const FPC_OM: u16 = 8; -pub const FPC_UE: u16 = 16; -pub const FPC_PE: u16 = 32; -pub const FPC_PC: u16 = 768; -pub const FPC_PC_24: u16 = 0; -pub const FPC_PC_53: u16 = 512; -pub const FPC_PC_64: u16 = 768; -pub const FPC_RC: u16 = 3072; -pub const FPC_RC_RN: u16 = 0; -pub const FPC_RC_RD: u16 = 1024; -pub const FPC_RC_RU: u16 = 2048; -pub const FPC_RC_CHOP: u16 = 3072; -pub const FPC_IC: u16 = 4096; -pub const FPC_IC_PROJ: u16 = 0; -pub const FPC_IC_AFF: u16 = 4096; -pub const FPS_IE: u16 = 1; -pub const FPS_DE: u16 = 2; -pub const FPS_ZE: u16 = 4; -pub const FPS_OE: u16 = 8; -pub const FPS_UE: u16 = 16; -pub const FPS_PE: u16 = 32; -pub const FPS_SF: u16 = 64; -pub const FPS_ES: u16 = 128; -pub const FPS_C0: u16 = 256; -pub const FPS_C1: u16 = 512; -pub const FPS_C2: u16 = 1024; -pub const FPS_TOS: u16 = 14336; -pub const FPS_TOS_SHIFT: u16 = 11; -pub const FPS_C3: u16 = 16384; -pub const FPS_BUSY: u16 = 32768; -pub const FPE_INTOVF_TRAP: c_int = 1; -pub const FPE_INTDIV_FAULT: c_int = 2; -pub const FPE_FLTOVF_FAULT: c_int = 3; -pub const FPE_FLTDIV_FAULT: c_int = 4; -pub const FPE_FLTUND_FAULT: c_int = 5; -pub const FPE_SUBRNG_FAULT: c_int = 7; -pub const FPE_FLTDNR_FAULT: c_int = 8; -pub const FPE_FLTINX_FAULT: c_int = 9; -pub const FPE_EMERR_FAULT: c_int = 10; -pub const FPE_EMBND_FAULT: c_int = 11; -pub const ILL_INVOPR_FAULT: c_int = 1; -pub const ILL_STACK_FAULT: c_int = 2; -pub const ILL_FPEOPR_FAULT: c_int = 3; -pub const DBG_SINGLE_TRAP: c_int = 1; -pub const DBG_BRKPNT_FAULT: c_int = 2; -pub const __NGREG: usize = 19; -pub const NGREG: usize = 19; - -// bits/sigstack.h -pub const MINSIGSTKSZ: usize = 8192; -pub const SIGSTKSZ: usize = 40960; - -// sys/stat.h -pub const __S_IFMT: mode_t = 0o17_0000; -pub const __S_IFDIR: mode_t = 0o4_0000; -pub const __S_IFCHR: mode_t = 0o2_0000; -pub const __S_IFBLK: mode_t = 0o6_0000; -pub const __S_IFREG: mode_t = 0o10_0000; -pub const __S_IFLNK: mode_t = 0o12_0000; -pub const __S_IFSOCK: mode_t = 0o14_0000; -pub const __S_IFIFO: mode_t = 0o1_0000; -pub const __S_ISUID: mode_t = 0o4000; -pub const __S_ISGID: mode_t = 0o2000; -pub const __S_ISVTX: mode_t = 0o1000; -pub const __S_IREAD: mode_t = 0o0400; -pub const __S_IWRITE: mode_t = 0o0200; -pub const __S_IEXEC: mode_t = 0o0100; -pub const S_INOCACHE: mode_t = 0o20_0000; -pub const S_IUSEUNK: mode_t = 0o40_0000; -pub const S_IUNKNOWN: mode_t = 0o700_0000; -pub const S_IUNKSHIFT: mode_t = 0o0014; -pub const S_IPTRANS: mode_t = 0o1000_0000; -pub const S_IATRANS: mode_t = 0o2000_0000; -pub const S_IROOT: mode_t = 0o4000_0000; -pub const S_ITRANS: mode_t = 0o7000_0000; -pub const S_IMMAP0: mode_t = 0o10000_0000; -pub const CMASK: mode_t = 18; -pub const UF_SETTABLE: c_uint = 65535; -pub const UF_NODUMP: c_uint = 1; -pub const UF_IMMUTABLE: c_uint = 2; -pub const UF_APPEND: c_uint = 4; -pub const UF_OPAQUE: c_uint = 8; -pub const UF_NOUNLINK: c_uint = 16; -pub const SF_SETTABLE: c_uint = 4294901760; -pub const SF_ARCHIVED: c_uint = 65536; -pub const SF_IMMUTABLE: c_uint = 131072; -pub const SF_APPEND: c_uint = 262144; -pub const SF_NOUNLINK: c_uint = 1048576; -pub const SF_SNAPSHOT: c_uint = 2097152; -pub const UTIME_NOW: c_long = -1; -pub const UTIME_OMIT: c_long = -2; -pub const S_IFMT: mode_t = 0o17_0000; -pub const S_IFDIR: mode_t = 0o4_0000; -pub const S_IFCHR: mode_t = 0o2_0000; -pub const S_IFBLK: mode_t = 0o6_0000; -pub const S_IFREG: mode_t = 0o10_0000; -pub const S_IFIFO: mode_t = 0o1_0000; -pub const S_IFLNK: mode_t = 0o12_0000; -pub const S_IFSOCK: mode_t = 0o14_0000; -pub const S_ISUID: mode_t = 0o4000; -pub const S_ISGID: mode_t = 0o2000; -pub const S_ISVTX: mode_t = 0o1000; -pub const S_IRUSR: mode_t = 0o0400; -pub const S_IWUSR: mode_t = 0o0200; -pub const S_IXUSR: mode_t = 0o0100; -pub const S_IRWXU: mode_t = 0o0700; -pub const S_IREAD: mode_t = 0o0400; -pub const S_IWRITE: mode_t = 0o0200; -pub const S_IEXEC: mode_t = 0o0100; -pub const S_IRGRP: mode_t = 0o0040; -pub const S_IWGRP: mode_t = 0o0020; -pub const S_IXGRP: mode_t = 0o0010; -pub const S_IRWXG: mode_t = 0o0070; -pub const S_IROTH: mode_t = 0o0004; -pub const S_IWOTH: mode_t = 0o0002; -pub const S_IXOTH: mode_t = 0o0001; -pub const S_IRWXO: mode_t = 0o0007; -pub const ACCESSPERMS: mode_t = 511; -pub const ALLPERMS: mode_t = 4095; -pub const DEFFILEMODE: mode_t = 438; -pub const S_BLKSIZE: usize = 512; -pub const STATX_TYPE: c_uint = 1; -pub const STATX_MODE: c_uint = 2; -pub const STATX_NLINK: c_uint = 4; -pub const STATX_UID: c_uint = 8; -pub const STATX_GID: c_uint = 16; -pub const STATX_ATIME: c_uint = 32; -pub const STATX_MTIME: c_uint = 64; -pub const STATX_CTIME: c_uint = 128; -pub const STATX_INO: c_uint = 256; -pub const STATX_SIZE: c_uint = 512; -pub const STATX_BLOCKS: c_uint = 1024; -pub const STATX_BASIC_STATS: c_uint = 2047; -pub const STATX_ALL: c_uint = 4095; -pub const STATX_BTIME: c_uint = 2048; -pub const STATX_MNT_ID: c_uint = 4096; -pub const STATX_DIOALIGN: c_uint = 8192; -pub const STATX__RESERVED: c_uint = 2147483648; -pub const STATX_ATTR_COMPRESSED: c_uint = 4; -pub const STATX_ATTR_IMMUTABLE: c_uint = 16; -pub const STATX_ATTR_APPEND: c_uint = 32; -pub const STATX_ATTR_NODUMP: c_uint = 64; -pub const STATX_ATTR_ENCRYPTED: c_uint = 2048; -pub const STATX_ATTR_AUTOMOUNT: c_uint = 4096; -pub const STATX_ATTR_MOUNT_ROOT: c_uint = 8192; -pub const STATX_ATTR_VERITY: c_uint = 1048576; -pub const STATX_ATTR_DAX: c_uint = 2097152; - -// sys/ioctl.h -pub const TIOCM_LE: c_int = 1; -pub const TIOCM_DTR: c_int = 2; -pub const TIOCM_RTS: c_int = 4; -pub const TIOCM_ST: c_int = 8; -pub const TIOCM_SR: c_int = 16; -pub const TIOCM_CTS: c_int = 32; -pub const TIOCM_CAR: c_int = 64; -pub const TIOCM_CD: c_int = 64; -pub const TIOCM_RNG: c_int = 128; -pub const TIOCM_RI: c_int = 128; -pub const TIOCM_DSR: c_int = 256; -pub const TIOCPKT_DATA: c_int = 0; -pub const TIOCPKT_FLUSHREAD: c_int = 1; -pub const TIOCPKT_FLUSHWRITE: c_int = 2; -pub const TIOCPKT_STOP: c_int = 4; -pub const TIOCPKT_START: c_int = 8; -pub const TIOCPKT_NOSTOP: c_int = 16; -pub const TIOCPKT_DOSTOP: c_int = 32; -pub const TIOCPKT_IOCTL: c_int = 64; -pub const TTYDISC: c_int = 0; -pub const TABLDISC: c_int = 3; -pub const SLIPDISC: c_int = 4; -pub const TANDEM: crate::tcflag_t = 1; -pub const CBREAK: crate::tcflag_t = 2; -pub const LCASE: crate::tcflag_t = 4; -pub const CRMOD: crate::tcflag_t = 16; -pub const RAW: crate::tcflag_t = 32; -pub const ODDP: crate::tcflag_t = 64; -pub const EVENP: crate::tcflag_t = 128; -pub const ANYP: crate::tcflag_t = 192; -pub const NLDELAY: crate::tcflag_t = 768; -pub const NL2: crate::tcflag_t = 512; -pub const NL3: crate::tcflag_t = 768; -pub const TBDELAY: crate::tcflag_t = 3072; -pub const XTABS: crate::tcflag_t = 3072; -pub const CRDELAY: crate::tcflag_t = 12288; -pub const VTDELAY: crate::tcflag_t = 16384; -pub const BSDELAY: crate::tcflag_t = 32768; -pub const ALLDELAY: crate::tcflag_t = 65280; -pub const CRTBS: crate::tcflag_t = 65536; -pub const PRTERA: crate::tcflag_t = 131072; -pub const CRTERA: crate::tcflag_t = 262144; -pub const TILDE: crate::tcflag_t = 524288; -pub const LITOUT: crate::tcflag_t = 2097152; -pub const NOHANG: crate::tcflag_t = 16777216; -pub const L001000: crate::tcflag_t = 33554432; -pub const CRTKIL: crate::tcflag_t = 67108864; -pub const PASS8: crate::tcflag_t = 134217728; -pub const CTLECH: crate::tcflag_t = 268435456; -pub const DECCTQ: crate::tcflag_t = 1073741824; - -pub const FIONBIO: c_ulong = 0xa008007e; -pub const FIONREAD: c_ulong = 0x6008007f; -pub const TIOCSWINSZ: c_ulong = 0x90200767; -pub const TIOCGWINSZ: c_ulong = 0x50200768; -pub const TIOCEXCL: c_ulong = 0x70d; -pub const TIOCNXCL: c_ulong = 0x70e; -pub const TIOCSCTTY: c_ulong = 0x761; - -pub const FIOCLEX: c_ulong = 1; - -// fcntl.h -pub const O_EXEC: c_int = 4; -pub const O_NORW: c_int = 0; -pub const O_RDONLY: c_int = 1; -pub const O_WRONLY: c_int = 2; -pub const O_RDWR: c_int = 3; -pub const O_ACCMODE: c_int = 3; -pub const O_LARGEFILE: c_int = 0; -pub const O_CREAT: c_int = 16; -pub const O_EXCL: c_int = 32; -pub const O_NOLINK: c_int = 64; -pub const O_NOTRANS: c_int = 128; -pub const O_NOFOLLOW: c_int = 1048576; -pub const O_DIRECTORY: c_int = 2097152; -pub const O_APPEND: c_int = 256; -pub const O_ASYNC: c_int = 512; -pub const O_FSYNC: c_int = 1024; -pub const O_SYNC: c_int = 1024; -pub const O_NOATIME: c_int = 2048; -pub const O_SHLOCK: c_int = 131072; -pub const O_EXLOCK: c_int = 262144; -pub const O_DSYNC: c_int = 1024; -pub const O_RSYNC: c_int = 1024; -pub const O_NONBLOCK: c_int = 8; -pub const O_NDELAY: c_int = 8; -pub const O_HURD: c_int = 458751; -pub const O_TRUNC: c_int = 65536; -pub const O_CLOEXEC: c_int = 4194304; -pub const O_IGNORE_CTTY: c_int = 524288; -pub const O_TMPFILE: c_int = 8388608; -pub const O_NOCTTY: c_int = 0; -pub const FREAD: c_int = 1; -pub const FWRITE: c_int = 2; -pub const FASYNC: c_int = 512; -pub const FCREAT: c_int = 16; -pub const FEXCL: c_int = 32; -pub const FTRUNC: c_int = 65536; -pub const FNOCTTY: c_int = 0; -pub const FFSYNC: c_int = 1024; -pub const FSYNC: c_int = 1024; -pub const FAPPEND: c_int = 256; -pub const FNONBLOCK: c_int = 8; -pub const FNDELAY: c_int = 8; -pub const F_DUPFD: c_int = 0; -pub const F_GETFD: c_int = 1; -pub const F_SETFD: c_int = 2; -pub const F_GETFL: c_int = 3; -pub const F_SETFL: c_int = 4; -pub const F_GETOWN: c_int = 5; -pub const F_SETOWN: c_int = 6; -pub const F_GETLK: c_int = 7; -pub const F_SETLK: c_int = 8; -pub const F_SETLKW: c_int = 9; -pub const F_GETLK64: c_int = 10; -pub const F_SETLK64: c_int = 11; -pub const F_SETLKW64: c_int = 12; -pub const F_DUPFD_CLOEXEC: c_int = 1030; -pub const FD_CLOEXEC: c_int = 1; -pub const F_RDLCK: c_int = 1; -pub const F_WRLCK: c_int = 2; -pub const F_UNLCK: c_int = 3; -pub const POSIX_FADV_NORMAL: c_int = 0; -pub const POSIX_FADV_RANDOM: c_int = 1; -pub const POSIX_FADV_SEQUENTIAL: c_int = 2; -pub const POSIX_FADV_WILLNEED: c_int = 3; -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; -pub const AT_FDCWD: c_int = -100; -pub const AT_SYMLINK_NOFOLLOW: c_int = 256; -pub const AT_REMOVEDIR: c_int = 512; -pub const AT_SYMLINK_FOLLOW: c_int = 1024; -pub const AT_NO_AUTOMOUNT: c_int = 2048; -pub const AT_EMPTY_PATH: c_int = 4096; -pub const AT_STATX_SYNC_TYPE: c_int = 24576; -pub const AT_STATX_SYNC_AS_STAT: c_int = 0; -pub const AT_STATX_FORCE_SYNC: c_int = 8192; -pub const AT_STATX_DONT_SYNC: c_int = 16384; -pub const AT_RECURSIVE: c_int = 32768; -pub const AT_EACCESS: c_int = 512; - -// sys/uio.h -pub const RWF_HIPRI: c_int = 1; -pub const RWF_DSYNC: c_int = 2; -pub const RWF_SYNC: c_int = 4; -pub const RWF_NOWAIT: c_int = 8; -pub const RWF_APPEND: c_int = 16; - -// errno.h -pub const EPERM: c_int = 1073741825; -pub const ENOENT: c_int = 1073741826; -pub const ESRCH: c_int = 1073741827; -pub const EINTR: c_int = 1073741828; -pub const EIO: c_int = 1073741829; -pub const ENXIO: c_int = 1073741830; -pub const E2BIG: c_int = 1073741831; -pub const ENOEXEC: c_int = 1073741832; -pub const EBADF: c_int = 1073741833; -pub const ECHILD: c_int = 1073741834; -pub const EDEADLK: c_int = 1073741835; -pub const ENOMEM: c_int = 1073741836; -pub const EACCES: c_int = 1073741837; -pub const EFAULT: c_int = 1073741838; -pub const ENOTBLK: c_int = 1073741839; -pub const EBUSY: c_int = 1073741840; -pub const EEXIST: c_int = 1073741841; -pub const EXDEV: c_int = 1073741842; -pub const ENODEV: c_int = 1073741843; -pub const ENOTDIR: c_int = 1073741844; -pub const EISDIR: c_int = 1073741845; -pub const EINVAL: c_int = 1073741846; -pub const EMFILE: c_int = 1073741848; -pub const ENFILE: c_int = 1073741847; -pub const ENOTTY: c_int = 1073741849; -pub const ETXTBSY: c_int = 1073741850; -pub const EFBIG: c_int = 1073741851; -pub const ENOSPC: c_int = 1073741852; -pub const ESPIPE: c_int = 1073741853; -pub const EROFS: c_int = 1073741854; -pub const EMLINK: c_int = 1073741855; -pub const EPIPE: c_int = 1073741856; -pub const EDOM: c_int = 1073741857; -pub const ERANGE: c_int = 1073741858; -pub const EAGAIN: c_int = 1073741859; -pub const EWOULDBLOCK: c_int = 1073741859; -pub const EINPROGRESS: c_int = 1073741860; -pub const EALREADY: c_int = 1073741861; -pub const ENOTSOCK: c_int = 1073741862; -pub const EMSGSIZE: c_int = 1073741864; -pub const EPROTOTYPE: c_int = 1073741865; -pub const ENOPROTOOPT: c_int = 1073741866; -pub const EPROTONOSUPPORT: c_int = 1073741867; -pub const ESOCKTNOSUPPORT: c_int = 1073741868; -pub const EOPNOTSUPP: c_int = 1073741869; -pub const EPFNOSUPPORT: c_int = 1073741870; -pub const EAFNOSUPPORT: c_int = 1073741871; -pub const EADDRINUSE: c_int = 1073741872; -pub const EADDRNOTAVAIL: c_int = 1073741873; -pub const ENETDOWN: c_int = 1073741874; -pub const ENETUNREACH: c_int = 1073741875; -pub const ENETRESET: c_int = 1073741876; -pub const ECONNABORTED: c_int = 1073741877; -pub const ECONNRESET: c_int = 1073741878; -pub const ENOBUFS: c_int = 1073741879; -pub const EISCONN: c_int = 1073741880; -pub const ENOTCONN: c_int = 1073741881; -pub const EDESTADDRREQ: c_int = 1073741863; -pub const ESHUTDOWN: c_int = 1073741882; -pub const ETOOMANYREFS: c_int = 1073741883; -pub const ETIMEDOUT: c_int = 1073741884; -pub const ECONNREFUSED: c_int = 1073741885; -pub const ELOOP: c_int = 1073741886; -pub const ENAMETOOLONG: c_int = 1073741887; -pub const EHOSTDOWN: c_int = 1073741888; -pub const EHOSTUNREACH: c_int = 1073741889; -pub const ENOTEMPTY: c_int = 1073741890; -pub const EPROCLIM: c_int = 1073741891; -pub const EUSERS: c_int = 1073741892; -pub const EDQUOT: c_int = 1073741893; -pub const ESTALE: c_int = 1073741894; -pub const EREMOTE: c_int = 1073741895; -pub const EBADRPC: c_int = 1073741896; -pub const ERPCMISMATCH: c_int = 1073741897; -pub const EPROGUNAVAIL: c_int = 1073741898; -pub const EPROGMISMATCH: c_int = 1073741899; -pub const EPROCUNAVAIL: c_int = 1073741900; -pub const ENOLCK: c_int = 1073741901; -pub const EFTYPE: c_int = 1073741903; -pub const EAUTH: c_int = 1073741904; -pub const ENEEDAUTH: c_int = 1073741905; -pub const ENOSYS: c_int = 1073741902; -pub const ELIBEXEC: c_int = 1073741907; -pub const ENOTSUP: c_int = 1073741942; -pub const EILSEQ: c_int = 1073741930; -pub const EBACKGROUND: c_int = 1073741924; -pub const EDIED: c_int = 1073741925; -pub const EGREGIOUS: c_int = 1073741927; -pub const EIEIO: c_int = 1073741928; -pub const EGRATUITOUS: c_int = 1073741929; -pub const EBADMSG: c_int = 1073741931; -pub const EIDRM: c_int = 1073741932; -pub const EMULTIHOP: c_int = 1073741933; -pub const ENODATA: c_int = 1073741934; -pub const ENOLINK: c_int = 1073741935; -pub const ENOMSG: c_int = 1073741936; -pub const ENOSR: c_int = 1073741937; -pub const ENOSTR: c_int = 1073741938; -pub const EOVERFLOW: c_int = 1073741939; -pub const EPROTO: c_int = 1073741940; -pub const ETIME: c_int = 1073741941; -pub const ECANCELED: c_int = 1073741943; -pub const EOWNERDEAD: c_int = 1073741944; -pub const ENOTRECOVERABLE: c_int = 1073741945; -pub const EMACH_SEND_IN_PROGRESS: c_int = 268435457; -pub const EMACH_SEND_INVALID_DATA: c_int = 268435458; -pub const EMACH_SEND_INVALID_DEST: c_int = 268435459; -pub const EMACH_SEND_TIMED_OUT: c_int = 268435460; -pub const EMACH_SEND_WILL_NOTIFY: c_int = 268435461; -pub const EMACH_SEND_NOTIFY_IN_PROGRESS: c_int = 268435462; -pub const EMACH_SEND_INTERRUPTED: c_int = 268435463; -pub const EMACH_SEND_MSG_TOO_SMALL: c_int = 268435464; -pub const EMACH_SEND_INVALID_REPLY: c_int = 268435465; -pub const EMACH_SEND_INVALID_RIGHT: c_int = 268435466; -pub const EMACH_SEND_INVALID_NOTIFY: c_int = 268435467; -pub const EMACH_SEND_INVALID_MEMORY: c_int = 268435468; -pub const EMACH_SEND_NO_BUFFER: c_int = 268435469; -pub const EMACH_SEND_NO_NOTIFY: c_int = 268435470; -pub const EMACH_SEND_INVALID_TYPE: c_int = 268435471; -pub const EMACH_SEND_INVALID_HEADER: c_int = 268435472; -pub const EMACH_RCV_IN_PROGRESS: c_int = 268451841; -pub const EMACH_RCV_INVALID_NAME: c_int = 268451842; -pub const EMACH_RCV_TIMED_OUT: c_int = 268451843; -pub const EMACH_RCV_TOO_LARGE: c_int = 268451844; -pub const EMACH_RCV_INTERRUPTED: c_int = 268451845; -pub const EMACH_RCV_PORT_CHANGED: c_int = 268451846; -pub const EMACH_RCV_INVALID_NOTIFY: c_int = 268451847; -pub const EMACH_RCV_INVALID_DATA: c_int = 268451848; -pub const EMACH_RCV_PORT_DIED: c_int = 268451849; -pub const EMACH_RCV_IN_SET: c_int = 268451850; -pub const EMACH_RCV_HEADER_ERROR: c_int = 268451851; -pub const EMACH_RCV_BODY_ERROR: c_int = 268451852; -pub const EKERN_INVALID_ADDRESS: c_int = 1; -pub const EKERN_PROTECTION_FAILURE: c_int = 2; -pub const EKERN_NO_SPACE: c_int = 3; -pub const EKERN_INVALID_ARGUMENT: c_int = 4; -pub const EKERN_FAILURE: c_int = 5; -pub const EKERN_RESOURCE_SHORTAGE: c_int = 6; -pub const EKERN_NOT_RECEIVER: c_int = 7; -pub const EKERN_NO_ACCESS: c_int = 8; -pub const EKERN_MEMORY_FAILURE: c_int = 9; -pub const EKERN_MEMORY_ERROR: c_int = 10; -pub const EKERN_NOT_IN_SET: c_int = 12; -pub const EKERN_NAME_EXISTS: c_int = 13; -pub const EKERN_ABORTED: c_int = 14; -pub const EKERN_INVALID_NAME: c_int = 15; -pub const EKERN_INVALID_TASK: c_int = 16; -pub const EKERN_INVALID_RIGHT: c_int = 17; -pub const EKERN_INVALID_VALUE: c_int = 18; -pub const EKERN_UREFS_OVERFLOW: c_int = 19; -pub const EKERN_INVALID_CAPABILITY: c_int = 20; -pub const EKERN_RIGHT_EXISTS: c_int = 21; -pub const EKERN_INVALID_HOST: c_int = 22; -pub const EKERN_MEMORY_PRESENT: c_int = 23; -pub const EKERN_WRITE_PROTECTION_FAILURE: c_int = 24; -pub const EKERN_TERMINATED: c_int = 26; -pub const EKERN_TIMEDOUT: c_int = 27; -pub const EKERN_INTERRUPTED: c_int = 28; -pub const EMIG_TYPE_ERROR: c_int = -300; -pub const EMIG_REPLY_MISMATCH: c_int = -301; -pub const EMIG_REMOTE_ERROR: c_int = -302; -pub const EMIG_BAD_ID: c_int = -303; -pub const EMIG_BAD_ARGUMENTS: c_int = -304; -pub const EMIG_NO_REPLY: c_int = -305; -pub const EMIG_EXCEPTION: c_int = -306; -pub const EMIG_ARRAY_TOO_LARGE: c_int = -307; -pub const EMIG_SERVER_DIED: c_int = -308; -pub const EMIG_DESTROY_REQUEST: c_int = -309; -pub const ED_IO_ERROR: c_int = 2500; -pub const ED_WOULD_BLOCK: c_int = 2501; -pub const ED_NO_SUCH_DEVICE: c_int = 2502; -pub const ED_ALREADY_OPEN: c_int = 2503; -pub const ED_DEVICE_DOWN: c_int = 2504; -pub const ED_INVALID_OPERATION: c_int = 2505; -pub const ED_INVALID_RECNUM: c_int = 2506; -pub const ED_INVALID_SIZE: c_int = 2507; -pub const ED_NO_MEMORY: c_int = 2508; -pub const ED_READ_ONLY: c_int = 2509; -pub const _HURD_ERRNOS: usize = 122; - -// sched.h -pub const SCHED_OTHER: c_int = 0; -pub const SCHED_FIFO: c_int = 1; -pub const SCHED_RR: c_int = 2; -pub const _BITS_TYPES_STRUCT_SCHED_PARAM: usize = 1; -pub const __CPU_SETSIZE: usize = 1024; -pub const CPU_SETSIZE: usize = 1024; - -// pthread.h -pub const PTHREAD_SPINLOCK_INITIALIZER: c_int = 0; -pub const PTHREAD_CANCEL_DISABLE: c_int = 0; -pub const PTHREAD_CANCEL_ENABLE: c_int = 1; -pub const PTHREAD_CANCEL_DEFERRED: c_int = 0; -pub const PTHREAD_CANCEL_ASYNCHRONOUS: c_int = 1; -pub const PTHREAD_BARRIER_SERIAL_THREAD: c_int = -1; - -// netinet/tcp.h -pub const TCP_NODELAY: c_int = 1; -pub const TCP_MAXSEG: c_int = 2; -pub const TCP_CORK: c_int = 3; -pub const TCP_KEEPIDLE: c_int = 4; -pub const TCP_KEEPINTVL: c_int = 5; -pub const TCP_KEEPCNT: c_int = 6; -pub const TCP_SYNCNT: c_int = 7; -pub const TCP_LINGER2: c_int = 8; -pub const TCP_DEFER_ACCEPT: c_int = 9; -pub const TCP_WINDOW_CLAMP: c_int = 10; -pub const TCP_INFO: c_int = 11; -pub const TCP_QUICKACK: c_int = 12; -pub const TCP_CONGESTION: c_int = 13; -pub const TCP_MD5SIG: c_int = 14; -pub const TCP_COOKIE_TRANSACTIONS: c_int = 15; -pub const TCP_THIN_LINEAR_TIMEOUTS: c_int = 16; -pub const TCP_THIN_DUPACK: c_int = 17; -pub const TCP_USER_TIMEOUT: c_int = 18; -pub const TCP_REPAIR: c_int = 19; -pub const TCP_REPAIR_QUEUE: c_int = 20; -pub const TCP_QUEUE_SEQ: c_int = 21; -pub const TCP_REPAIR_OPTIONS: c_int = 22; -pub const TCP_FASTOPEN: c_int = 23; -pub const TCP_TIMESTAMP: c_int = 24; -pub const TCP_NOTSENT_LOWAT: c_int = 25; -pub const TCP_CC_INFO: c_int = 26; -pub const TCP_SAVE_SYN: c_int = 27; -pub const TCP_SAVED_SYN: c_int = 28; -pub const TCP_REPAIR_WINDOW: c_int = 29; -pub const TCP_FASTOPEN_CONNECT: c_int = 30; -pub const TCP_ULP: c_int = 31; -pub const TCP_MD5SIG_EXT: c_int = 32; -pub const TCP_FASTOPEN_KEY: c_int = 33; -pub const TCP_FASTOPEN_NO_COOKIE: c_int = 34; -pub const TCP_ZEROCOPY_RECEIVE: c_int = 35; -pub const TCP_INQ: c_int = 36; -pub const TCP_CM_INQ: c_int = 36; -pub const TCP_TX_DELAY: c_int = 37; -pub const TCP_REPAIR_ON: c_int = 1; -pub const TCP_REPAIR_OFF: c_int = 0; -pub const TCP_REPAIR_OFF_NO_WP: c_int = -1; - -// stdint.h -pub const INT8_MIN: i8 = -128; -pub const INT16_MIN: i16 = -32768; -pub const INT32_MIN: i32 = -2147483648; -pub const INT8_MAX: i8 = 127; -pub const INT16_MAX: i16 = 32767; -pub const INT32_MAX: i32 = 2147483647; -pub const UINT8_MAX: u8 = 255; -pub const UINT16_MAX: u16 = 65535; -pub const UINT32_MAX: u32 = 4294967295; -pub const INT_LEAST8_MIN: int_least8_t = -128; -pub const INT_LEAST16_MIN: int_least16_t = -32768; -pub const INT_LEAST32_MIN: int_least32_t = -2147483648; -pub const INT_LEAST8_MAX: int_least8_t = 127; -pub const INT_LEAST16_MAX: int_least16_t = 32767; -pub const INT_LEAST32_MAX: int_least32_t = 2147483647; -pub const UINT_LEAST8_MAX: uint_least8_t = 255; -pub const UINT_LEAST16_MAX: uint_least16_t = 65535; -pub const UINT_LEAST32_MAX: uint_least32_t = 4294967295; -pub const INT_FAST8_MIN: int_fast8_t = -128; -pub const INT_FAST16_MIN: int_fast16_t = -2147483648; -pub const INT_FAST32_MIN: int_fast32_t = -2147483648; -pub const INT_FAST8_MAX: int_fast8_t = 127; -pub const INT_FAST16_MAX: int_fast16_t = 2147483647; -pub const INT_FAST32_MAX: int_fast32_t = 2147483647; -pub const UINT_FAST8_MAX: uint_fast8_t = 255; -pub const UINT_FAST16_MAX: uint_fast16_t = 4294967295; -pub const UINT_FAST32_MAX: uint_fast32_t = 4294967295; -pub const INTPTR_MIN: __intptr_t = -2147483648; -pub const INTPTR_MAX: __intptr_t = 2147483647; -pub const UINTPTR_MAX: usize = 4294967295; -pub const PTRDIFF_MIN: __ptrdiff_t = -2147483648; -pub const PTRDIFF_MAX: __ptrdiff_t = 2147483647; -pub const SIG_ATOMIC_MIN: __sig_atomic_t = -2147483648; -pub const SIG_ATOMIC_MAX: __sig_atomic_t = 2147483647; -pub const SIZE_MAX: usize = 4294967295; -pub const WINT_MIN: wint_t = 0; -pub const WINT_MAX: wint_t = 4294967295; -pub const INT8_WIDTH: usize = 8; -pub const UINT8_WIDTH: usize = 8; -pub const INT16_WIDTH: usize = 16; -pub const UINT16_WIDTH: usize = 16; -pub const INT32_WIDTH: usize = 32; -pub const UINT32_WIDTH: usize = 32; -pub const INT64_WIDTH: usize = 64; -pub const UINT64_WIDTH: usize = 64; -pub const INT_LEAST8_WIDTH: usize = 8; -pub const UINT_LEAST8_WIDTH: usize = 8; -pub const INT_LEAST16_WIDTH: usize = 16; -pub const UINT_LEAST16_WIDTH: usize = 16; -pub const INT_LEAST32_WIDTH: usize = 32; -pub const UINT_LEAST32_WIDTH: usize = 32; -pub const INT_LEAST64_WIDTH: usize = 64; -pub const UINT_LEAST64_WIDTH: usize = 64; -pub const INT_FAST8_WIDTH: usize = 8; -pub const UINT_FAST8_WIDTH: usize = 8; -pub const INT_FAST16_WIDTH: usize = 32; -pub const UINT_FAST16_WIDTH: usize = 32; -pub const INT_FAST32_WIDTH: usize = 32; -pub const UINT_FAST32_WIDTH: usize = 32; -pub const INT_FAST64_WIDTH: usize = 64; -pub const UINT_FAST64_WIDTH: usize = 64; -pub const INTPTR_WIDTH: usize = 32; -pub const UINTPTR_WIDTH: usize = 32; -pub const INTMAX_WIDTH: usize = 64; -pub const UINTMAX_WIDTH: usize = 64; -pub const PTRDIFF_WIDTH: usize = 32; -pub const SIG_ATOMIC_WIDTH: usize = 32; -pub const SIZE_WIDTH: usize = 32; -pub const WCHAR_WIDTH: usize = 32; -pub const WINT_WIDTH: usize = 32; - -pub const TH_FIN: u8 = 1; -pub const TH_SYN: u8 = 2; -pub const TH_RST: u8 = 4; -pub const TH_PUSH: u8 = 8; -pub const TH_ACK: u8 = 16; -pub const TH_URG: u8 = 32; -pub const TCPOPT_EOL: u8 = 0; -pub const TCPOPT_NOP: u8 = 1; -pub const TCPOPT_MAXSEG: u8 = 2; -pub const TCPOLEN_MAXSEG: u8 = 4; -pub const TCPOPT_WINDOW: u8 = 3; -pub const TCPOLEN_WINDOW: u8 = 3; -pub const TCPOPT_SACK_PERMITTED: u8 = 4; -pub const TCPOLEN_SACK_PERMITTED: u8 = 2; -pub const TCPOPT_SACK: u8 = 5; -pub const TCPOPT_TIMESTAMP: u8 = 8; -pub const TCPOLEN_TIMESTAMP: u8 = 10; -pub const TCPOLEN_TSTAMP_APPA: u8 = 12; -pub const TCPOPT_TSTAMP_HDR: u32 = 16844810; -pub const TCP_MSS: usize = 512; -pub const TCP_MAXWIN: usize = 65535; -pub const TCP_MAX_WINSHIFT: usize = 14; -pub const TCPI_OPT_TIMESTAMPS: u8 = 1; -pub const TCPI_OPT_SACK: u8 = 2; -pub const TCPI_OPT_WSCALE: u8 = 4; -pub const TCPI_OPT_ECN: u8 = 8; -pub const TCPI_OPT_ECN_SEEN: u8 = 16; -pub const TCPI_OPT_SYN_DATA: u8 = 32; -pub const TCP_MD5SIG_MAXKEYLEN: usize = 80; -pub const TCP_MD5SIG_FLAG_PREFIX: usize = 1; -pub const TCP_COOKIE_MIN: usize = 8; -pub const TCP_COOKIE_MAX: usize = 16; -pub const TCP_COOKIE_PAIR_SIZE: usize = 32; -pub const TCP_COOKIE_IN_ALWAYS: c_int = 1; -pub const TCP_COOKIE_OUT_NEVER: c_int = 2; -pub const TCP_S_DATA_IN: c_int = 4; -pub const TCP_S_DATA_OUT: c_int = 8; -pub const TCP_MSS_DEFAULT: usize = 536; -pub const TCP_MSS_DESIRED: usize = 1220; - -// sys/wait.h -pub const WCOREFLAG: c_int = 128; -pub const WAIT_ANY: pid_t = -1; -pub const WAIT_MYPGRP: pid_t = 0; - -// sys/file.h -pub const LOCK_SH: c_int = 1; -pub const LOCK_EX: c_int = 2; -pub const LOCK_UN: c_int = 8; -pub const LOCK_NB: c_int = 4; - -// sys/mman.h -pub const PROT_NONE: c_int = 0; -pub const PROT_READ: c_int = 4; -pub const PROT_WRITE: c_int = 2; -pub const PROT_EXEC: c_int = 1; -pub const MAP_FILE: c_int = 1; -pub const MAP_ANON: c_int = 2; -pub const MAP_ANONYMOUS: c_int = MAP_ANON; -pub const MAP_TYPE: c_int = 15; -pub const MAP_COPY: c_int = 32; -pub const MAP_SHARED: c_int = 16; -pub const MAP_PRIVATE: c_int = 0; -pub const MAP_FIXED: c_int = 256; -pub const MAP_NOEXTEND: c_int = 512; -pub const MAP_HASSEMAPHORE: c_int = 1024; -pub const MAP_INHERIT: c_int = 2048; -pub const MAP_32BIT: c_int = 4096; -pub const MAP_EXCL: c_int = 16384; -pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; -pub const MADV_NORMAL: c_int = 0; -pub const MADV_RANDOM: c_int = 1; -pub const MADV_SEQUENTIAL: c_int = 2; -pub const MADV_WILLNEED: c_int = 3; -pub const MADV_DONTNEED: c_int = 4; -pub const POSIX_MADV_NORMAL: c_int = 0; -pub const POSIX_MADV_RANDOM: c_int = 1; -pub const POSIX_MADV_SEQUENTIAL: c_int = 2; -pub const POSIX_MADV_WILLNEED: c_int = 3; -pub const POSIX_MADV_WONTNEED: c_int = 4; - -pub const MS_ASYNC: c_int = 1; -pub const MS_SYNC: c_int = 0; -pub const MS_INVALIDATE: c_int = 2; -pub const MREMAP_MAYMOVE: c_int = 1; -pub const MREMAP_FIXED: c_int = 2; -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; - -// sys/xattr.h -pub const XATTR_CREATE: c_int = 0x1; -pub const XATTR_REPLACE: c_int = 0x2; - -// spawn.h -pub const POSIX_SPAWN_USEVFORK: c_int = 64; -pub const POSIX_SPAWN_SETSID: c_int = 128; - -// sys/syslog.h -pub const LOG_CRON: c_int = 9 << 3; -pub const LOG_AUTHPRIV: c_int = 10 << 3; -pub const LOG_FTP: c_int = 11 << 3; -pub const LOG_PERROR: c_int = 0x20; - -// net/if.h -pub const IFF_UP: c_int = 0x1; -pub const IFF_BROADCAST: c_int = 0x2; -pub const IFF_DEBUG: c_int = 0x4; -pub const IFF_LOOPBACK: c_int = 0x8; -pub const IFF_POINTOPOINT: c_int = 0x10; -pub const IFF_NOTRAILERS: c_int = 0x20; -pub const IFF_RUNNING: c_int = 0x40; -pub const IFF_NOARP: c_int = 0x80; -pub const IFF_PROMISC: c_int = 0x100; -pub const IFF_ALLMULTI: c_int = 0x200; -pub const IFF_MASTER: c_int = 0x400; -pub const IFF_SLAVE: c_int = 0x800; -pub const IFF_MULTICAST: c_int = 0x1000; -pub const IFF_PORTSEL: c_int = 0x2000; -pub const IFF_AUTOMEDIA: c_int = 0x4000; -pub const IFF_DYNAMIC: c_int = 0x8000; - -// random.h -pub const GRND_NONBLOCK: c_uint = 1; -pub const GRND_RANDOM: c_uint = 2; -pub const GRND_INSECURE: c_uint = 4; - -pub const _PC_LINK_MAX: c_int = 0; -pub const _PC_MAX_CANON: c_int = 1; -pub const _PC_MAX_INPUT: c_int = 2; -pub const _PC_NAME_MAX: c_int = 3; -pub const _PC_PATH_MAX: c_int = 4; -pub const _PC_PIPE_BUF: c_int = 5; -pub const _PC_CHOWN_RESTRICTED: c_int = 6; -pub const _PC_NO_TRUNC: c_int = 7; -pub const _PC_VDISABLE: c_int = 8; -pub const _PC_SYNC_IO: c_int = 9; -pub const _PC_ASYNC_IO: c_int = 10; -pub const _PC_PRIO_IO: c_int = 11; -pub const _PC_SOCK_MAXBUF: c_int = 12; -pub const _PC_FILESIZEBITS: c_int = 13; -pub const _PC_REC_INCR_XFER_SIZE: c_int = 14; -pub const _PC_REC_MAX_XFER_SIZE: c_int = 15; -pub const _PC_REC_MIN_XFER_SIZE: c_int = 16; -pub const _PC_REC_XFER_ALIGN: c_int = 17; -pub const _PC_ALLOC_SIZE_MIN: c_int = 18; -pub const _PC_SYMLINK_MAX: c_int = 19; -pub const _PC_2_SYMLINKS: c_int = 20; -pub const _SC_ARG_MAX: c_int = 0; -pub const _SC_CHILD_MAX: c_int = 1; -pub const _SC_CLK_TCK: c_int = 2; -pub const _SC_NGROUPS_MAX: c_int = 3; -pub const _SC_OPEN_MAX: c_int = 4; -pub const _SC_STREAM_MAX: c_int = 5; -pub const _SC_TZNAME_MAX: c_int = 6; -pub const _SC_JOB_CONTROL: c_int = 7; -pub const _SC_SAVED_IDS: c_int = 8; -pub const _SC_REALTIME_SIGNALS: c_int = 9; -pub const _SC_PRIORITY_SCHEDULING: c_int = 10; -pub const _SC_TIMERS: c_int = 11; -pub const _SC_ASYNCHRONOUS_IO: c_int = 12; -pub const _SC_PRIORITIZED_IO: c_int = 13; -pub const _SC_SYNCHRONIZED_IO: c_int = 14; -pub const _SC_FSYNC: c_int = 15; -pub const _SC_MAPPED_FILES: c_int = 16; -pub const _SC_MEMLOCK: c_int = 17; -pub const _SC_MEMLOCK_RANGE: c_int = 18; -pub const _SC_MEMORY_PROTECTION: c_int = 19; -pub const _SC_MESSAGE_PASSING: c_int = 20; -pub const _SC_SEMAPHORES: c_int = 21; -pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 22; -pub const _SC_AIO_LISTIO_MAX: c_int = 23; -pub const _SC_AIO_MAX: c_int = 24; -pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 25; -pub const _SC_DELAYTIMER_MAX: c_int = 26; -pub const _SC_MQ_OPEN_MAX: c_int = 27; -pub const _SC_MQ_PRIO_MAX: c_int = 28; -pub const _SC_VERSION: c_int = 29; -pub const _SC_PAGESIZE: c_int = 30; -pub const _SC_PAGE_SIZE: c_int = 30; -pub const _SC_RTSIG_MAX: c_int = 31; -pub const _SC_SEM_NSEMS_MAX: c_int = 32; -pub const _SC_SEM_VALUE_MAX: c_int = 33; -pub const _SC_SIGQUEUE_MAX: c_int = 34; -pub const _SC_TIMER_MAX: c_int = 35; -pub const _SC_BC_BASE_MAX: c_int = 36; -pub const _SC_BC_DIM_MAX: c_int = 37; -pub const _SC_BC_SCALE_MAX: c_int = 38; -pub const _SC_BC_STRING_MAX: c_int = 39; -pub const _SC_COLL_WEIGHTS_MAX: c_int = 40; -pub const _SC_EQUIV_CLASS_MAX: c_int = 41; -pub const _SC_EXPR_NEST_MAX: c_int = 42; -pub const _SC_LINE_MAX: c_int = 43; -pub const _SC_RE_DUP_MAX: c_int = 44; -pub const _SC_CHARCLASS_NAME_MAX: c_int = 45; -pub const _SC_2_VERSION: c_int = 46; -pub const _SC_2_C_BIND: c_int = 47; -pub const _SC_2_C_DEV: c_int = 48; -pub const _SC_2_FORT_DEV: c_int = 49; -pub const _SC_2_FORT_RUN: c_int = 50; -pub const _SC_2_SW_DEV: c_int = 51; -pub const _SC_2_LOCALEDEF: c_int = 52; -pub const _SC_PII: c_int = 53; -pub const _SC_PII_XTI: c_int = 54; -pub const _SC_PII_SOCKET: c_int = 55; -pub const _SC_PII_INTERNET: c_int = 56; -pub const _SC_PII_OSI: c_int = 57; -pub const _SC_POLL: c_int = 58; -pub const _SC_SELECT: c_int = 59; -pub const _SC_UIO_MAXIOV: c_int = 60; -pub const _SC_IOV_MAX: c_int = 60; -pub const _SC_PII_INTERNET_STREAM: c_int = 61; -pub const _SC_PII_INTERNET_DGRAM: c_int = 62; -pub const _SC_PII_OSI_COTS: c_int = 63; -pub const _SC_PII_OSI_CLTS: c_int = 64; -pub const _SC_PII_OSI_M: c_int = 65; -pub const _SC_T_IOV_MAX: c_int = 66; -pub const _SC_THREADS: c_int = 67; -pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 68; -pub const _SC_GETGR_R_SIZE_MAX: c_int = 69; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 70; -pub const _SC_LOGIN_NAME_MAX: c_int = 71; -pub const _SC_TTY_NAME_MAX: c_int = 72; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 73; -pub const _SC_THREAD_KEYS_MAX: c_int = 74; -pub const _SC_THREAD_STACK_MIN: c_int = 75; -pub const _SC_THREAD_THREADS_MAX: c_int = 76; -pub const _SC_THREAD_ATTR_STACKADDR: c_int = 77; -pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 78; -pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 79; -pub const _SC_THREAD_PRIO_INHERIT: c_int = 80; -pub const _SC_THREAD_PRIO_PROTECT: c_int = 81; -pub const _SC_THREAD_PROCESS_SHARED: c_int = 82; -pub const _SC_NPROCESSORS_CONF: c_int = 83; -pub const _SC_NPROCESSORS_ONLN: c_int = 84; -pub const _SC_PHYS_PAGES: c_int = 85; -pub const _SC_AVPHYS_PAGES: c_int = 86; -pub const _SC_ATEXIT_MAX: c_int = 87; -pub const _SC_PASS_MAX: c_int = 88; -pub const _SC_XOPEN_VERSION: c_int = 89; -pub const _SC_XOPEN_XCU_VERSION: c_int = 90; -pub const _SC_XOPEN_UNIX: c_int = 91; -pub const _SC_XOPEN_CRYPT: c_int = 92; -pub const _SC_XOPEN_ENH_I18N: c_int = 93; -pub const _SC_XOPEN_SHM: c_int = 94; -pub const _SC_2_CHAR_TERM: c_int = 95; -pub const _SC_2_C_VERSION: c_int = 96; -pub const _SC_2_UPE: c_int = 97; -pub const _SC_XOPEN_XPG2: c_int = 98; -pub const _SC_XOPEN_XPG3: c_int = 99; -pub const _SC_XOPEN_XPG4: c_int = 100; -pub const _SC_CHAR_BIT: c_int = 101; -pub const _SC_CHAR_MAX: c_int = 102; -pub const _SC_CHAR_MIN: c_int = 103; -pub const _SC_INT_MAX: c_int = 104; -pub const _SC_INT_MIN: c_int = 105; -pub const _SC_LONG_BIT: c_int = 106; -pub const _SC_WORD_BIT: c_int = 107; -pub const _SC_MB_LEN_MAX: c_int = 108; -pub const _SC_NZERO: c_int = 109; -pub const _SC_SSIZE_MAX: c_int = 110; -pub const _SC_SCHAR_MAX: c_int = 111; -pub const _SC_SCHAR_MIN: c_int = 112; -pub const _SC_SHRT_MAX: c_int = 113; -pub const _SC_SHRT_MIN: c_int = 114; -pub const _SC_UCHAR_MAX: c_int = 115; -pub const _SC_UINT_MAX: c_int = 116; -pub const _SC_ULONG_MAX: c_int = 117; -pub const _SC_USHRT_MAX: c_int = 118; -pub const _SC_NL_ARGMAX: c_int = 119; -pub const _SC_NL_LANGMAX: c_int = 120; -pub const _SC_NL_MSGMAX: c_int = 121; -pub const _SC_NL_NMAX: c_int = 122; -pub const _SC_NL_SETMAX: c_int = 123; -pub const _SC_NL_TEXTMAX: c_int = 124; -pub const _SC_XBS5_ILP32_OFF32: c_int = 125; -pub const _SC_XBS5_ILP32_OFFBIG: c_int = 126; -pub const _SC_XBS5_LP64_OFF64: c_int = 127; -pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 128; -pub const _SC_XOPEN_LEGACY: c_int = 129; -pub const _SC_XOPEN_REALTIME: c_int = 130; -pub const _SC_XOPEN_REALTIME_THREADS: c_int = 131; -pub const _SC_ADVISORY_INFO: c_int = 132; -pub const _SC_BARRIERS: c_int = 133; -pub const _SC_BASE: c_int = 134; -pub const _SC_C_LANG_SUPPORT: c_int = 135; -pub const _SC_C_LANG_SUPPORT_R: c_int = 136; -pub const _SC_CLOCK_SELECTION: c_int = 137; -pub const _SC_CPUTIME: c_int = 138; -pub const _SC_THREAD_CPUTIME: c_int = 139; -pub const _SC_DEVICE_IO: c_int = 140; -pub const _SC_DEVICE_SPECIFIC: c_int = 141; -pub const _SC_DEVICE_SPECIFIC_R: c_int = 142; -pub const _SC_FD_MGMT: c_int = 143; -pub const _SC_FIFO: c_int = 144; -pub const _SC_PIPE: c_int = 145; -pub const _SC_FILE_ATTRIBUTES: c_int = 146; -pub const _SC_FILE_LOCKING: c_int = 147; -pub const _SC_FILE_SYSTEM: c_int = 148; -pub const _SC_MONOTONIC_CLOCK: c_int = 149; -pub const _SC_MULTI_PROCESS: c_int = 150; -pub const _SC_SINGLE_PROCESS: c_int = 151; -pub const _SC_NETWORKING: c_int = 152; -pub const _SC_READER_WRITER_LOCKS: c_int = 153; -pub const _SC_SPIN_LOCKS: c_int = 154; -pub const _SC_REGEXP: c_int = 155; -pub const _SC_REGEX_VERSION: c_int = 156; -pub const _SC_SHELL: c_int = 157; -pub const _SC_SIGNALS: c_int = 158; -pub const _SC_SPAWN: c_int = 159; -pub const _SC_SPORADIC_SERVER: c_int = 160; -pub const _SC_THREAD_SPORADIC_SERVER: c_int = 161; -pub const _SC_SYSTEM_DATABASE: c_int = 162; -pub const _SC_SYSTEM_DATABASE_R: c_int = 163; -pub const _SC_TIMEOUTS: c_int = 164; -pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 165; -pub const _SC_USER_GROUPS: c_int = 166; -pub const _SC_USER_GROUPS_R: c_int = 167; -pub const _SC_2_PBS: c_int = 168; -pub const _SC_2_PBS_ACCOUNTING: c_int = 169; -pub const _SC_2_PBS_LOCATE: c_int = 170; -pub const _SC_2_PBS_MESSAGE: c_int = 171; -pub const _SC_2_PBS_TRACK: c_int = 172; -pub const _SC_SYMLOOP_MAX: c_int = 173; -pub const _SC_STREAMS: c_int = 174; -pub const _SC_2_PBS_CHECKPOINT: c_int = 175; -pub const _SC_V6_ILP32_OFF32: c_int = 176; -pub const _SC_V6_ILP32_OFFBIG: c_int = 177; -pub const _SC_V6_LP64_OFF64: c_int = 178; -pub const _SC_V6_LPBIG_OFFBIG: c_int = 179; -pub const _SC_HOST_NAME_MAX: c_int = 180; -pub const _SC_TRACE: c_int = 181; -pub const _SC_TRACE_EVENT_FILTER: c_int = 182; -pub const _SC_TRACE_INHERIT: c_int = 183; -pub const _SC_TRACE_LOG: c_int = 184; -pub const _SC_LEVEL1_ICACHE_SIZE: c_int = 185; -pub const _SC_LEVEL1_ICACHE_ASSOC: c_int = 186; -pub const _SC_LEVEL1_ICACHE_LINESIZE: c_int = 187; -pub const _SC_LEVEL1_DCACHE_SIZE: c_int = 188; -pub const _SC_LEVEL1_DCACHE_ASSOC: c_int = 189; -pub const _SC_LEVEL1_DCACHE_LINESIZE: c_int = 190; -pub const _SC_LEVEL2_CACHE_SIZE: c_int = 191; -pub const _SC_LEVEL2_CACHE_ASSOC: c_int = 192; -pub const _SC_LEVEL2_CACHE_LINESIZE: c_int = 193; -pub const _SC_LEVEL3_CACHE_SIZE: c_int = 194; -pub const _SC_LEVEL3_CACHE_ASSOC: c_int = 195; -pub const _SC_LEVEL3_CACHE_LINESIZE: c_int = 196; -pub const _SC_LEVEL4_CACHE_SIZE: c_int = 197; -pub const _SC_LEVEL4_CACHE_ASSOC: c_int = 198; -pub const _SC_LEVEL4_CACHE_LINESIZE: c_int = 199; -pub const _SC_IPV6: c_int = 235; -pub const _SC_RAW_SOCKETS: c_int = 236; -pub const _SC_V7_ILP32_OFF32: c_int = 237; -pub const _SC_V7_ILP32_OFFBIG: c_int = 238; -pub const _SC_V7_LP64_OFF64: c_int = 239; -pub const _SC_V7_LPBIG_OFFBIG: c_int = 240; -pub const _SC_SS_REPL_MAX: c_int = 241; -pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 242; -pub const _SC_TRACE_NAME_MAX: c_int = 243; -pub const _SC_TRACE_SYS_MAX: c_int = 244; -pub const _SC_TRACE_USER_EVENT_MAX: c_int = 245; -pub const _SC_XOPEN_STREAMS: c_int = 246; -pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 247; -pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 248; -pub const _SC_MINSIGSTKSZ: c_int = 249; -pub const _SC_SIGSTKSZ: c_int = 250; - -pub const _CS_PATH: c_int = 0; -pub const _CS_V6_WIDTH_RESTRICTED_ENVS: c_int = 1; -pub const _CS_GNU_LIBC_VERSION: c_int = 2; -pub const _CS_GNU_LIBPTHREAD_VERSION: c_int = 3; -pub const _CS_V5_WIDTH_RESTRICTED_ENVS: c_int = 4; -pub const _CS_V7_WIDTH_RESTRICTED_ENVS: c_int = 5; -pub const _CS_LFS_CFLAGS: c_int = 1000; -pub const _CS_LFS_LDFLAGS: c_int = 1001; -pub const _CS_LFS_LIBS: c_int = 1002; -pub const _CS_LFS_LINTFLAGS: c_int = 1003; -pub const _CS_LFS64_CFLAGS: c_int = 1004; -pub const _CS_LFS64_LDFLAGS: c_int = 1005; -pub const _CS_LFS64_LIBS: c_int = 1006; -pub const _CS_LFS64_LINTFLAGS: c_int = 1007; -pub const _CS_XBS5_ILP32_OFF32_CFLAGS: c_int = 1100; -pub const _CS_XBS5_ILP32_OFF32_LDFLAGS: c_int = 1101; -pub const _CS_XBS5_ILP32_OFF32_LIBS: c_int = 1102; -pub const _CS_XBS5_ILP32_OFF32_LINTFLAGS: c_int = 1103; -pub const _CS_XBS5_ILP32_OFFBIG_CFLAGS: c_int = 1104; -pub const _CS_XBS5_ILP32_OFFBIG_LDFLAGS: c_int = 1105; -pub const _CS_XBS5_ILP32_OFFBIG_LIBS: c_int = 1106; -pub const _CS_XBS5_ILP32_OFFBIG_LINTFLAGS: c_int = 1107; -pub const _CS_XBS5_LP64_OFF64_CFLAGS: c_int = 1108; -pub const _CS_XBS5_LP64_OFF64_LDFLAGS: c_int = 1109; -pub const _CS_XBS5_LP64_OFF64_LIBS: c_int = 1110; -pub const _CS_XBS5_LP64_OFF64_LINTFLAGS: c_int = 1111; -pub const _CS_XBS5_LPBIG_OFFBIG_CFLAGS: c_int = 1112; -pub const _CS_XBS5_LPBIG_OFFBIG_LDFLAGS: c_int = 1113; -pub const _CS_XBS5_LPBIG_OFFBIG_LIBS: c_int = 1114; -pub const _CS_XBS5_LPBIG_OFFBIG_LINTFLAGS: c_int = 1115; -pub const _CS_POSIX_V6_ILP32_OFF32_CFLAGS: c_int = 1116; -pub const _CS_POSIX_V6_ILP32_OFF32_LDFLAGS: c_int = 1117; -pub const _CS_POSIX_V6_ILP32_OFF32_LIBS: c_int = 1118; -pub const _CS_POSIX_V6_ILP32_OFF32_LINTFLAGS: c_int = 1119; -pub const _CS_POSIX_V6_ILP32_OFFBIG_CFLAGS: c_int = 1120; -pub const _CS_POSIX_V6_ILP32_OFFBIG_LDFLAGS: c_int = 1121; -pub const _CS_POSIX_V6_ILP32_OFFBIG_LIBS: c_int = 1122; -pub const _CS_POSIX_V6_ILP32_OFFBIG_LINTFLAGS: c_int = 1123; -pub const _CS_POSIX_V6_LP64_OFF64_CFLAGS: c_int = 1124; -pub const _CS_POSIX_V6_LP64_OFF64_LDFLAGS: c_int = 1125; -pub const _CS_POSIX_V6_LP64_OFF64_LIBS: c_int = 1126; -pub const _CS_POSIX_V6_LP64_OFF64_LINTFLAGS: c_int = 1127; -pub const _CS_POSIX_V6_LPBIG_OFFBIG_CFLAGS: c_int = 1128; -pub const _CS_POSIX_V6_LPBIG_OFFBIG_LDFLAGS: c_int = 1129; -pub const _CS_POSIX_V6_LPBIG_OFFBIG_LIBS: c_int = 1130; -pub const _CS_POSIX_V6_LPBIG_OFFBIG_LINTFLAGS: c_int = 1131; -pub const _CS_POSIX_V7_ILP32_OFF32_CFLAGS: c_int = 1132; -pub const _CS_POSIX_V7_ILP32_OFF32_LDFLAGS: c_int = 1133; -pub const _CS_POSIX_V7_ILP32_OFF32_LIBS: c_int = 1134; -pub const _CS_POSIX_V7_ILP32_OFF32_LINTFLAGS: c_int = 1135; -pub const _CS_POSIX_V7_ILP32_OFFBIG_CFLAGS: c_int = 1136; -pub const _CS_POSIX_V7_ILP32_OFFBIG_LDFLAGS: c_int = 1137; -pub const _CS_POSIX_V7_ILP32_OFFBIG_LIBS: c_int = 1138; -pub const _CS_POSIX_V7_ILP32_OFFBIG_LINTFLAGS: c_int = 1139; -pub const _CS_POSIX_V7_LP64_OFF64_CFLAGS: c_int = 1140; -pub const _CS_POSIX_V7_LP64_OFF64_LDFLAGS: c_int = 1141; -pub const _CS_POSIX_V7_LP64_OFF64_LIBS: c_int = 1142; -pub const _CS_POSIX_V7_LP64_OFF64_LINTFLAGS: c_int = 1143; -pub const _CS_POSIX_V7_LPBIG_OFFBIG_CFLAGS: c_int = 1144; -pub const _CS_POSIX_V7_LPBIG_OFFBIG_LDFLAGS: c_int = 1145; -pub const _CS_POSIX_V7_LPBIG_OFFBIG_LIBS: c_int = 1146; -pub const _CS_POSIX_V7_LPBIG_OFFBIG_LINTFLAGS: c_int = 1147; -pub const _CS_V6_ENV: c_int = 1148; -pub const _CS_V7_ENV: c_int = 1149; - -pub const PTHREAD_PROCESS_PRIVATE: __pthread_process_shared = 0; -pub const PTHREAD_PROCESS_SHARED: __pthread_process_shared = 1; - -pub const PTHREAD_EXPLICIT_SCHED: __pthread_inheritsched = 0; -pub const PTHREAD_INHERIT_SCHED: __pthread_inheritsched = 1; - -pub const PTHREAD_SCOPE_SYSTEM: __pthread_contentionscope = 0; -pub const PTHREAD_SCOPE_PROCESS: __pthread_contentionscope = 1; - -pub const PTHREAD_CREATE_JOINABLE: __pthread_detachstate = 0; -pub const PTHREAD_CREATE_DETACHED: __pthread_detachstate = 1; - -pub const PTHREAD_PRIO_NONE: __pthread_mutex_protocol = 0; -pub const PTHREAD_PRIO_INHERIT: __pthread_mutex_protocol = 1; -pub const PTHREAD_PRIO_PROTECT: __pthread_mutex_protocol = 2; - -pub const PTHREAD_MUTEX_TIMED: __pthread_mutex_type = 0; -pub const PTHREAD_MUTEX_ERRORCHECK: __pthread_mutex_type = 1; -pub const PTHREAD_MUTEX_RECURSIVE: __pthread_mutex_type = 2; - -pub const PTHREAD_MUTEX_STALLED: __pthread_mutex_robustness = 0; -pub const PTHREAD_MUTEX_ROBUST: __pthread_mutex_robustness = 256; - -pub const RLIMIT_CPU: crate::__rlimit_resource_t = 0; -pub const RLIMIT_FSIZE: crate::__rlimit_resource_t = 1; -pub const RLIMIT_DATA: crate::__rlimit_resource_t = 2; -pub const RLIMIT_STACK: crate::__rlimit_resource_t = 3; -pub const RLIMIT_CORE: crate::__rlimit_resource_t = 4; -pub const RLIMIT_RSS: crate::__rlimit_resource_t = 5; -pub const RLIMIT_MEMLOCK: crate::__rlimit_resource_t = 6; -pub const RLIMIT_NPROC: crate::__rlimit_resource_t = 7; -pub const RLIMIT_OFILE: crate::__rlimit_resource_t = 8; -pub const RLIMIT_NOFILE: crate::__rlimit_resource_t = 8; -pub const RLIMIT_SBSIZE: crate::__rlimit_resource_t = 9; -pub const RLIMIT_AS: crate::__rlimit_resource_t = 10; -pub const RLIMIT_VMEM: crate::__rlimit_resource_t = 10; -pub const RLIMIT_NLIMITS: crate::__rlimit_resource_t = 11; -pub const RLIM_NLIMITS: crate::__rlimit_resource_t = 11; - -pub const RUSAGE_SELF: __rusage_who = 0; -pub const RUSAGE_CHILDREN: __rusage_who = -1; - -pub const PRIO_PROCESS: __priority_which = 0; -pub const PRIO_PGRP: __priority_which = 1; -pub const PRIO_USER: __priority_which = 2; - -pub const __UT_LINESIZE: usize = 32; -pub const __UT_NAMESIZE: usize = 32; -pub const __UT_HOSTSIZE: usize = 256; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; -pub const SOCK_RAW: c_int = 3; -pub const SOCK_RDM: c_int = 4; -pub const SOCK_SEQPACKET: c_int = 5; -pub const SOCK_CLOEXEC: c_int = 4194304; -pub const SOCK_NONBLOCK: c_int = 2048; - -pub const MSG_OOB: c_int = 1; -pub const MSG_PEEK: c_int = 2; -pub const MSG_DONTROUTE: c_int = 4; -pub const MSG_EOR: c_int = 8; -pub const MSG_TRUNC: c_int = 16; -pub const MSG_CTRUNC: c_int = 32; -pub const MSG_WAITALL: c_int = 64; -pub const MSG_DONTWAIT: c_int = 128; -pub const MSG_NOSIGNAL: c_int = 1024; -pub const MSG_CMSG_CLOEXEC: c_int = 0x40000000; - -pub const SCM_RIGHTS: c_int = 1; -pub const SCM_TIMESTAMP: c_int = 2; -pub const SCM_CREDS: c_int = 3; - -pub const SO_DEBUG: c_int = 1; -pub const SO_ACCEPTCONN: c_int = 2; -pub const SO_REUSEADDR: c_int = 4; -pub const SO_KEEPALIVE: c_int = 8; -pub const SO_DONTROUTE: c_int = 16; -pub const SO_BROADCAST: c_int = 32; -pub const SO_USELOOPBACK: c_int = 64; -pub const SO_LINGER: c_int = 128; -pub const SO_OOBINLINE: c_int = 256; -pub const SO_REUSEPORT: c_int = 512; -pub const SO_SNDBUF: c_int = 4097; -pub const SO_RCVBUF: c_int = 4098; -pub const SO_SNDLOWAT: c_int = 4099; -pub const SO_RCVLOWAT: c_int = 4100; -pub const SO_SNDTIMEO: c_int = 4101; -pub const SO_RCVTIMEO: c_int = 4102; -pub const SO_ERROR: c_int = 4103; -pub const SO_STYLE: c_int = 4104; -pub const SO_TYPE: c_int = 4104; - -pub const IPPROTO_IP: c_int = 0; -pub const IPPROTO_ICMP: c_int = 1; -pub const IPPROTO_IGMP: c_int = 2; -pub const IPPROTO_IPIP: c_int = 4; -pub const IPPROTO_TCP: c_int = 6; -pub const IPPROTO_EGP: c_int = 8; -pub const IPPROTO_PUP: c_int = 12; -pub const IPPROTO_UDP: c_int = 17; -pub const IPPROTO_IDP: c_int = 22; -pub const IPPROTO_TP: c_int = 29; -pub const IPPROTO_DCCP: c_int = 33; -pub const IPPROTO_IPV6: c_int = 41; -pub const IPPROTO_RSVP: c_int = 46; -pub const IPPROTO_GRE: c_int = 47; -pub const IPPROTO_ESP: c_int = 50; -pub const IPPROTO_AH: c_int = 51; -pub const IPPROTO_MTP: c_int = 92; -pub const IPPROTO_BEETPH: c_int = 94; -pub const IPPROTO_ENCAP: c_int = 98; -pub const IPPROTO_PIM: c_int = 103; -pub const IPPROTO_COMP: c_int = 108; -pub const IPPROTO_L2TP: c_int = 115; -pub const IPPROTO_SCTP: c_int = 132; -pub const IPPROTO_UDPLITE: c_int = 136; -pub const IPPROTO_MPLS: c_int = 137; -pub const IPPROTO_ETHERNET: c_int = 143; -pub const IPPROTO_RAW: c_int = 255; -pub const IPPROTO_MPTCP: c_int = 262; -pub const IPPROTO_MAX: c_int = 263; - -pub const IPPROTO_HOPOPTS: c_int = 0; -pub const IPPROTO_ROUTING: c_int = 43; -pub const IPPROTO_FRAGMENT: c_int = 44; -pub const IPPROTO_ICMPV6: c_int = 58; -pub const IPPROTO_NONE: c_int = 59; -pub const IPPROTO_DSTOPTS: c_int = 60; -pub const IPPROTO_MH: c_int = 135; - -pub const IPPORT_ECHO: in_port_t = 7; -pub const IPPORT_DISCARD: in_port_t = 9; -pub const IPPORT_SYSTAT: in_port_t = 11; -pub const IPPORT_DAYTIME: in_port_t = 13; -pub const IPPORT_NETSTAT: in_port_t = 15; -pub const IPPORT_FTP: in_port_t = 21; -pub const IPPORT_TELNET: in_port_t = 23; -pub const IPPORT_SMTP: in_port_t = 25; -pub const IPPORT_TIMESERVER: in_port_t = 37; -pub const IPPORT_NAMESERVER: in_port_t = 42; -pub const IPPORT_WHOIS: in_port_t = 43; -pub const IPPORT_MTP: in_port_t = 57; -pub const IPPORT_TFTP: in_port_t = 69; -pub const IPPORT_RJE: in_port_t = 77; -pub const IPPORT_FINGER: in_port_t = 79; -pub const IPPORT_TTYLINK: in_port_t = 87; -pub const IPPORT_SUPDUP: in_port_t = 95; -pub const IPPORT_EXECSERVER: in_port_t = 512; -pub const IPPORT_LOGINSERVER: in_port_t = 513; -pub const IPPORT_CMDSERVER: in_port_t = 514; -pub const IPPORT_EFSSERVER: in_port_t = 520; -pub const IPPORT_BIFFUDP: in_port_t = 512; -pub const IPPORT_WHOSERVER: in_port_t = 513; -pub const IPPORT_ROUTESERVER: in_port_t = 520; -pub const IPPORT_USERRESERVED: in_port_t = 5000; - -pub const DT_UNKNOWN: c_uchar = 0; -pub const DT_FIFO: c_uchar = 1; -pub const DT_CHR: c_uchar = 2; -pub const DT_DIR: c_uchar = 4; -pub const DT_BLK: c_uchar = 6; -pub const DT_REG: c_uchar = 8; -pub const DT_LNK: c_uchar = 10; -pub const DT_SOCK: c_uchar = 12; -pub const DT_WHT: c_uchar = 14; - -pub const ST_RDONLY: c_ulong = 1; -pub const ST_NOSUID: c_ulong = 2; -pub const ST_NOEXEC: c_ulong = 8; -pub const ST_SYNCHRONOUS: c_ulong = 16; -pub const ST_NOATIME: c_ulong = 32; -pub const ST_RELATIME: c_ulong = 64; - -pub const RTLD_DI_LMID: c_int = 1; -pub const RTLD_DI_LINKMAP: c_int = 2; -pub const RTLD_DI_CONFIGADDR: c_int = 3; -pub const RTLD_DI_SERINFO: c_int = 4; -pub const RTLD_DI_SERINFOSIZE: c_int = 5; -pub const RTLD_DI_ORIGIN: c_int = 6; -pub const RTLD_DI_PROFILENAME: c_int = 7; -pub const RTLD_DI_PROFILEOUT: c_int = 8; -pub const RTLD_DI_TLS_MODID: c_int = 9; -pub const RTLD_DI_TLS_DATA: c_int = 10; -pub const RTLD_DI_PHDR: c_int = 11; -pub const RTLD_DI_MAX: c_int = 11; - -pub const SI_ASYNCIO: c_int = -4; -pub const SI_MESGQ: c_int = -3; -pub const SI_TIMER: c_int = -2; -pub const SI_QUEUE: c_int = -1; -pub const SI_USER: c_int = 0; - -pub const ILL_ILLOPC: c_int = 1; -pub const ILL_ILLOPN: c_int = 2; -pub const ILL_ILLADR: c_int = 3; -pub const ILL_ILLTRP: c_int = 4; -pub const ILL_PRVOPC: c_int = 5; -pub const ILL_PRVREG: c_int = 6; -pub const ILL_COPROC: c_int = 7; -pub const ILL_BADSTK: c_int = 8; - -pub const FPE_INTDIV: c_int = 1; -pub const FPE_INTOVF: c_int = 2; -pub const FPE_FLTDIV: c_int = 3; -pub const FPE_FLTOVF: c_int = 4; -pub const FPE_FLTUND: c_int = 5; -pub const FPE_FLTRES: c_int = 6; -pub const FPE_FLTINV: c_int = 7; -pub const FPE_FLTSUB: c_int = 8; - -pub const SEGV_MAPERR: c_int = 1; -pub const SEGV_ACCERR: c_int = 2; - -pub const BUS_ADRALN: c_int = 1; -pub const BUS_ADRERR: c_int = 2; -pub const BUS_OBJERR: c_int = 3; - -pub const TRAP_BRKPT: c_int = 1; -pub const TRAP_TRACE: c_int = 2; - -pub const CLD_EXITED: c_int = 1; -pub const CLD_KILLED: c_int = 2; -pub const CLD_DUMPED: c_int = 3; -pub const CLD_TRAPPED: c_int = 4; -pub const CLD_STOPPED: c_int = 5; -pub const CLD_CONTINUED: c_int = 6; - -pub const POLL_IN: c_int = 1; -pub const POLL_OUT: c_int = 2; -pub const POLL_MSG: c_int = 3; -pub const POLL_ERR: c_int = 4; -pub const POLL_PRI: c_int = 5; -pub const POLL_HUP: c_int = 6; - -pub const SIGEV_SIGNAL: c_int = 0; -pub const SIGEV_NONE: c_int = 1; -pub const SIGEV_THREAD: c_int = 2; - -pub const REG_GS: c_uint = 0; -pub const REG_FS: c_uint = 1; -pub const REG_ES: c_uint = 2; -pub const REG_DS: c_uint = 3; -pub const REG_EDI: c_uint = 4; -pub const REG_ESI: c_uint = 5; -pub const REG_EBP: c_uint = 6; -pub const REG_ESP: c_uint = 7; -pub const REG_EBX: c_uint = 8; -pub const REG_EDX: c_uint = 9; -pub const REG_ECX: c_uint = 10; -pub const REG_EAX: c_uint = 11; -pub const REG_TRAPNO: c_uint = 12; -pub const REG_ERR: c_uint = 13; -pub const REG_EIP: c_uint = 14; -pub const REG_CS: c_uint = 15; -pub const REG_EFL: c_uint = 16; -pub const REG_UESP: c_uint = 17; -pub const REG_SS: c_uint = 18; - -pub const IOC_VOID: __ioctl_dir = 0; -pub const IOC_OUT: __ioctl_dir = 1; -pub const IOC_IN: __ioctl_dir = 2; -pub const IOC_INOUT: __ioctl_dir = 3; - -pub const IOC_8: __ioctl_datum = 0; -pub const IOC_16: __ioctl_datum = 1; -pub const IOC_32: __ioctl_datum = 2; -pub const IOC_64: __ioctl_datum = 3; - -pub const TCP_ESTABLISHED: c_uint = 1; -pub const TCP_SYN_SENT: c_uint = 2; -pub const TCP_SYN_RECV: c_uint = 3; -pub const TCP_FIN_WAIT1: c_uint = 4; -pub const TCP_FIN_WAIT2: c_uint = 5; -pub const TCP_TIME_WAIT: c_uint = 6; -pub const TCP_CLOSE: c_uint = 7; -pub const TCP_CLOSE_WAIT: c_uint = 8; -pub const TCP_LAST_ACK: c_uint = 9; -pub const TCP_LISTEN: c_uint = 10; -pub const TCP_CLOSING: c_uint = 11; - -pub const TCP_CA_Open: tcp_ca_state = 0; -pub const TCP_CA_Disorder: tcp_ca_state = 1; -pub const TCP_CA_CWR: tcp_ca_state = 2; -pub const TCP_CA_Recovery: tcp_ca_state = 3; -pub const TCP_CA_Loss: tcp_ca_state = 4; - -pub const TCP_NO_QUEUE: c_uint = 0; -pub const TCP_RECV_QUEUE: c_uint = 1; -pub const TCP_SEND_QUEUE: c_uint = 2; -pub const TCP_QUEUES_NR: c_uint = 3; - -pub const P_ALL: idtype_t = 0; -pub const P_PID: idtype_t = 1; -pub const P_PGID: idtype_t = 2; - -pub const SS_ONSTACK: c_int = 1; -pub const SS_DISABLE: c_int = 4; - -pub const SHUT_RD: c_int = 0; -pub const SHUT_WR: c_int = 1; -pub const SHUT_RDWR: c_int = 2; -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - __lock: 0, - __owner_id: 0, - __cnt: 0, - __shpid: 0, - __type: PTHREAD_MUTEX_TIMED as c_int, - __flags: 0, - __reserved1: 0, - __reserved2: 0, -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - __lock: __PTHREAD_SPIN_LOCK_INITIALIZER, - __queue: 0i64 as *mut __pthread, - __attr: 0i64 as *mut __pthread_condattr, - __wrefs: 0, - __data: 0i64 as *mut c_void, -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - __held: __PTHREAD_SPIN_LOCK_INITIALIZER, - __lock: __PTHREAD_SPIN_LOCK_INITIALIZER, - __readers: 0, - __readerqueue: 0i64 as *mut __pthread, - __writerqueue: 0i64 as *mut __pthread, - __attr: 0i64 as *mut __pthread_rwlockattr, - __data: 0i64 as *mut c_void, -}; -pub const PTHREAD_STACK_MIN: size_t = 0; - -// Non-public helper constants -const _UTSNAME_LENGTH: usize = 1024; - -const fn CMSG_ALIGN(len: usize) -> usize { - (len + size_of::() - 1) & !(size_of::() - 1) -} - -// functions -f! { - pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr { - if (*mhdr).msg_controllen as usize >= size_of::() { - (*mhdr).msg_control.cast::() - } else { - core::ptr::null_mut::() - } - } - - pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { - (cmsg as *mut c_uchar).offset(CMSG_ALIGN(size_of::()) as isize) - } - - pub const fn CMSG_SPACE(length: c_uint) -> c_uint { - (CMSG_ALIGN(length as usize) + CMSG_ALIGN(size_of::())) as c_uint - } - - pub const fn CMSG_LEN(length: c_uint) -> c_uint { - CMSG_ALIGN(size_of::()) as c_uint + length - } - - pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - if ((*cmsg).cmsg_len as usize) < size_of::() { - return core::ptr::null_mut::(); - } - let next = (cmsg as usize + CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr; - let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; - if (next.offset(1)) as usize > max - || next as usize + CMSG_ALIGN((*next).cmsg_len as usize) > max - { - core::ptr::null_mut::() - } else { - next.cast::() - } - } - - pub fn CPU_ALLOC_SIZE(count: c_int) -> size_t { - let _dummy: cpu_set_t = mem::zeroed(); - let size_in_bits = 8 * size_of_val(&_dummy.bits[0]); - ((count as size_t + size_in_bits - 1) / 8) as size_t - } - - pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { - for slot in cpuset.bits.iter_mut() { - *slot = 0; - } - } - - pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - cpuset.bits[idx] |= 1 << offset; - } - - pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - cpuset.bits[idx] &= !(1 << offset); - } - - pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { - let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - 0 != (cpuset.bits[idx] & (1 << offset)) - } - - pub fn CPU_COUNT_S(size: usize, cpuset: &cpu_set_t) -> c_int { - let mut s: u32 = 0; - let size_of_mask = size_of_val(&cpuset.bits[0]); - for i in cpuset.bits[..(size / size_of_mask)].iter() { - s += i.count_ones(); - } - s as c_int - } - - pub fn CPU_COUNT(cpuset: &cpu_set_t) -> c_int { - CPU_COUNT_S(size_of::(), cpuset) - } - - pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { - set1.bits == set2.bits - } - - pub fn IPTOS_TOS(tos: u8) -> u8 { - tos & IPTOS_TOS_MASK - } - - pub fn IPTOS_PREC(tos: u8) -> u8 { - tos & IPTOS_PREC_MASK - } - - pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] &= !(1 << (fd % size)); - return; - } - - pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0; - } - - pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] |= 1 << (fd % size); - return; - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - for slot in (*set).fds_bits.iter_mut() { - *slot = 0; - } - } -} - -extern "C" { - pub fn lutimes(file: *const c_char, times: *const crate::timeval) -> c_int; - - pub fn futimes(fd: c_int, times: *const crate::timeval) -> c_int; - pub fn futimens(__fd: c_int, __times: *const crate::timespec) -> c_int; - - pub fn utimensat( - dirfd: c_int, - path: *const c_char, - times: *const crate::timespec, - flag: c_int, - ) -> c_int; - - pub fn mkfifoat(__fd: c_int, __path: *const c_char, __mode: __mode_t) -> c_int; - - pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; - - pub fn __libc_current_sigrtmin() -> c_int; - - pub fn __libc_current_sigrtmax() -> c_int; - - pub fn wait4( - pid: crate::pid_t, - status: *mut c_int, - options: c_int, - rusage: *mut crate::rusage, - ) -> crate::pid_t; - - pub fn waitid( - idtype: idtype_t, - id: id_t, - infop: *mut crate::siginfo_t, - options: c_int, - ) -> c_int; - - pub fn sigwait(__set: *const sigset_t, __sig: *mut c_int) -> c_int; - - pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; - pub fn sigtimedwait( - set: *const sigset_t, - info: *mut siginfo_t, - timeout: *const crate::timespec, - ) -> c_int; - pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; - - pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; - - pub fn ioctl(__fd: c_int, __request: c_ulong, ...) -> c_int; - - pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; - - pub fn dup3(oldfd: c_int, newfd: c_int, flags: c_int) -> c_int; - - pub fn pread64(fd: c_int, buf: *mut c_void, count: size_t, offset: off64_t) -> ssize_t; - pub fn pwrite64(fd: c_int, buf: *const c_void, count: size_t, offset: off64_t) -> ssize_t; - - pub fn readv(__fd: c_int, __iovec: *const crate::iovec, __count: c_int) -> ssize_t; - pub fn writev(__fd: c_int, __iovec: *const crate::iovec, __count: c_int) -> ssize_t; - - pub fn preadv( - __fd: c_int, - __iovec: *const crate::iovec, - __count: c_int, - __offset: __off_t, - ) -> ssize_t; - pub fn pwritev( - __fd: c_int, - __iovec: *const crate::iovec, - __count: c_int, - __offset: __off_t, - ) -> ssize_t; - - pub fn preadv64(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off64_t) - -> ssize_t; - pub fn pwritev64( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: off64_t, - ) -> ssize_t; - - pub fn fread_unlocked( - buf: *mut c_void, - size: size_t, - nobj: size_t, - stream: *mut crate::FILE, - ) -> size_t; - - pub fn aio_read(aiocbp: *mut aiocb) -> c_int; - pub fn aio_write(aiocbp: *mut aiocb) -> c_int; - pub fn aio_fsync(op: c_int, aiocbp: *mut aiocb) -> c_int; - pub fn aio_error(aiocbp: *const aiocb) -> c_int; - pub fn aio_return(aiocbp: *mut aiocb) -> ssize_t; - pub fn aio_suspend( - aiocb_list: *const *const aiocb, - nitems: c_int, - timeout: *const crate::timespec, - ) -> c_int; - pub fn aio_cancel(fd: c_int, aiocbp: *mut aiocb) -> c_int; - pub fn lio_listio( - mode: c_int, - aiocb_list: *const *mut aiocb, - nitems: c_int, - sevp: *mut crate::sigevent, - ) -> c_int; - - pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> crate::mqd_t; - pub fn mq_close(mqd: crate::mqd_t) -> c_int; - pub fn mq_unlink(name: *const c_char) -> c_int; - pub fn mq_receive( - mqd: crate::mqd_t, - msg_ptr: *mut c_char, - msg_len: size_t, - msg_prio: *mut c_uint, - ) -> ssize_t; - pub fn mq_timedreceive( - mqd: crate::mqd_t, - msg_ptr: *mut c_char, - msg_len: size_t, - msg_prio: *mut c_uint, - abs_timeout: *const crate::timespec, - ) -> ssize_t; - pub fn mq_send( - mqd: crate::mqd_t, - msg_ptr: *const c_char, - msg_len: size_t, - msg_prio: c_uint, - ) -> c_int; - pub fn mq_timedsend( - mqd: crate::mqd_t, - msg_ptr: *const c_char, - msg_len: size_t, - msg_prio: c_uint, - abs_timeout: *const crate::timespec, - ) -> c_int; - pub fn mq_getattr(mqd: crate::mqd_t, attr: *mut crate::mq_attr) -> c_int; - pub fn mq_setattr( - mqd: crate::mqd_t, - newattr: *const crate::mq_attr, - oldattr: *mut crate::mq_attr, - ) -> c_int; - - pub fn lseek64(__fd: c_int, __offset: __off64_t, __whence: c_int) -> __off64_t; - - pub fn lseek(__fd: c_int, __offset: __off_t, __whence: c_int) -> __off_t; - - pub fn fgetpos64(stream: *mut crate::FILE, ptr: *mut fpos64_t) -> c_int; - pub fn fseeko64(stream: *mut crate::FILE, offset: off64_t, whence: c_int) -> c_int; - pub fn fsetpos64(stream: *mut crate::FILE, ptr: *const fpos64_t) -> c_int; - pub fn ftello64(stream: *mut crate::FILE) -> off64_t; - - pub fn bind(__fd: c_int, __addr: *const sockaddr, __len: crate::socklen_t) -> c_int; - - pub fn accept4( - fd: c_int, - addr: *mut crate::sockaddr, - len: *mut crate::socklen_t, - flg: c_int, - ) -> c_int; - - pub fn ppoll( - fds: *mut crate::pollfd, - nfds: nfds_t, - timeout: *const crate::timespec, - sigmask: *const sigset_t, - ) -> c_int; - - pub fn recvmsg(__fd: c_int, __message: *mut msghdr, __flags: c_int) -> ssize_t; - - pub fn sendmsg(__fd: c_int, __message: *const msghdr, __flags: c_int) -> ssize_t; - - pub fn recvfrom( - socket: c_int, - buf: *mut c_void, - len: size_t, - flags: c_int, - addr: *mut crate::sockaddr, - addrlen: *mut crate::socklen_t, - ) -> ssize_t; - - pub fn sendfile(out_fd: c_int, in_fd: c_int, offset: *mut off_t, count: size_t) -> ssize_t; - pub fn sendfile64(out_fd: c_int, in_fd: c_int, offset: *mut off64_t, count: size_t) -> ssize_t; - - pub fn shutdown(__fd: c_int, __how: c_int) -> c_int; - - pub fn sethostname(name: *const c_char, len: size_t) -> c_int; - pub fn getdomainname(name: *mut c_char, len: size_t) -> c_int; - pub fn setdomainname(name: *const c_char, len: size_t) -> c_int; - pub fn if_nameindex() -> *mut if_nameindex; - pub fn if_freenameindex(ptr: *mut if_nameindex); - - pub fn getnameinfo( - sa: *const crate::sockaddr, - salen: crate::socklen_t, - host: *mut c_char, - hostlen: crate::socklen_t, - serv: *mut c_char, - servlen: crate::socklen_t, - flags: c_int, - ) -> c_int; - - pub fn getifaddrs(ifap: *mut *mut crate::ifaddrs) -> c_int; - pub fn freeifaddrs(ifa: *mut crate::ifaddrs); - - pub fn uname(buf: *mut crate::utsname) -> c_int; - - pub fn gethostid() -> c_long; - pub fn sethostid(hostid: c_long) -> c_int; - - pub fn setpwent(); - pub fn endpwent(); - pub fn getpwent() -> *mut passwd; - pub fn setgrent(); - pub fn endgrent(); - pub fn getgrent() -> *mut crate::group; - pub fn setspent(); - pub fn endspent(); - pub fn getspent() -> *mut spwd; - - pub fn getspnam(name: *const c_char) -> *mut spwd; - - pub fn getpwent_r( - pwd: *mut crate::passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::passwd, - ) -> c_int; - pub fn getgrent_r( - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn fgetpwent_r( - stream: *mut crate::FILE, - pwd: *mut crate::passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::passwd, - ) -> c_int; - pub fn fgetgrent_r( - stream: *mut crate::FILE, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - - pub fn putpwent(p: *const crate::passwd, stream: *mut crate::FILE) -> c_int; - pub fn putgrent(grp: *const crate::group, stream: *mut crate::FILE) -> c_int; - - pub fn getpwnam_r( - name: *const c_char, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - - pub fn getpwuid_r( - uid: crate::uid_t, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - - pub fn fgetspent_r( - fp: *mut crate::FILE, - spbuf: *mut crate::spwd, - buf: *mut c_char, - buflen: size_t, - spbufp: *mut *mut crate::spwd, - ) -> c_int; - pub fn sgetspent_r( - s: *const c_char, - spbuf: *mut crate::spwd, - buf: *mut c_char, - buflen: size_t, - spbufp: *mut *mut crate::spwd, - ) -> c_int; - pub fn getspent_r( - spbuf: *mut crate::spwd, - buf: *mut c_char, - buflen: size_t, - spbufp: *mut *mut crate::spwd, - ) -> c_int; - - pub fn getspnam_r( - name: *const c_char, - spbuf: *mut spwd, - buf: *mut c_char, - buflen: size_t, - spbufp: *mut *mut spwd, - ) -> c_int; - - // mntent.h - pub fn getmntent_r( - stream: *mut crate::FILE, - mntbuf: *mut crate::mntent, - buf: *mut c_char, - buflen: c_int, - ) -> *mut crate::mntent; - - pub fn utmpname(file: *const c_char) -> c_int; - pub fn utmpxname(file: *const c_char) -> c_int; - pub fn getutxent() -> *mut utmpx; - pub fn getutxid(ut: *const utmpx) -> *mut utmpx; - pub fn getutxline(ut: *const utmpx) -> *mut utmpx; - pub fn pututxline(ut: *const utmpx) -> *mut utmpx; - pub fn setutxent(); - pub fn endutxent(); - - pub fn getresuid( - ruid: *mut crate::uid_t, - euid: *mut crate::uid_t, - suid: *mut crate::uid_t, - ) -> c_int; - pub fn getresgid( - rgid: *mut crate::gid_t, - egid: *mut crate::gid_t, - sgid: *mut crate::gid_t, - ) -> c_int; - pub fn setresuid(ruid: crate::uid_t, euid: crate::uid_t, suid: crate::uid_t) -> c_int; - pub fn setresgid(rgid: crate::gid_t, egid: crate::gid_t, sgid: crate::gid_t) -> c_int; - - pub fn initgroups(user: *const c_char, group: crate::gid_t) -> c_int; - - pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; - pub fn getgrgid_r( - gid: crate::gid_t, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - - pub fn getgrnam(name: *const c_char) -> *mut crate::group; - pub fn getgrnam_r( - name: *const c_char, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - - pub fn getgrouplist( - user: *const c_char, - group: crate::gid_t, - groups: *mut crate::gid_t, - ngroups: *mut c_int, - ) -> c_int; - - pub fn setgroups(ngroups: size_t, ptr: *const crate::gid_t) -> c_int; - - pub fn acct(filename: *const c_char) -> c_int; - - pub fn setmntent(filename: *const c_char, ty: *const c_char) -> *mut crate::FILE; - pub fn getmntent(stream: *mut crate::FILE) -> *mut crate::mntent; - pub fn addmntent(stream: *mut crate::FILE, mnt: *const crate::mntent) -> c_int; - pub fn endmntent(streamp: *mut crate::FILE) -> c_int; - pub fn hasmntopt(mnt: *const crate::mntent, opt: *const c_char) -> *mut c_char; - - pub fn pthread_create( - native: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - f: extern "C" fn(*mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - pub fn pthread_kill(__threadid: crate::pthread_t, __signo: c_int) -> c_int; - pub fn pthread_cancel(thread: crate::pthread_t) -> c_int; - pub fn __pthread_equal(__t1: __pthread_t, __t2: __pthread_t) -> c_int; - - pub fn pthread_getattr_np(__thr: crate::pthread_t, __attr: *mut pthread_attr_t) -> c_int; - - pub fn pthread_attr_getguardsize( - __attr: *const pthread_attr_t, - __guardsize: *mut size_t, - ) -> c_int; - pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; - - pub fn pthread_attr_getstack( - __attr: *const pthread_attr_t, - __stackaddr: *mut *mut c_void, - __stacksize: *mut size_t, - ) -> c_int; - - pub fn pthread_mutexattr_getpshared( - attr: *const pthread_mutexattr_t, - pshared: *mut c_int, - ) -> c_int; - pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; - - pub fn pthread_mutex_timedlock( - lock: *mut pthread_mutex_t, - abstime: *const crate::timespec, - ) -> c_int; - - pub fn pthread_rwlockattr_getpshared( - attr: *const pthread_rwlockattr_t, - val: *mut c_int, - ) -> c_int; - pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, val: c_int) -> c_int; - - pub fn pthread_condattr_getclock( - attr: *const pthread_condattr_t, - clock_id: *mut clockid_t, - ) -> c_int; - pub fn pthread_condattr_setclock( - __attr: *mut pthread_condattr_t, - __clock_id: __clockid_t, - ) -> c_int; - pub fn pthread_condattr_getpshared( - attr: *const pthread_condattr_t, - pshared: *mut c_int, - ) -> c_int; - pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: c_int) -> c_int; - - pub fn pthread_once(control: *mut pthread_once_t, routine: extern "C" fn()) -> c_int; - - pub fn pthread_barrierattr_init(attr: *mut crate::pthread_barrierattr_t) -> c_int; - pub fn pthread_barrierattr_destroy(attr: *mut crate::pthread_barrierattr_t) -> c_int; - pub fn pthread_barrierattr_getpshared( - attr: *const crate::pthread_barrierattr_t, - shared: *mut c_int, - ) -> c_int; - pub fn pthread_barrierattr_setpshared( - attr: *mut crate::pthread_barrierattr_t, - shared: c_int, - ) -> c_int; - pub fn pthread_barrier_init( - barrier: *mut pthread_barrier_t, - attr: *const crate::pthread_barrierattr_t, - count: c_uint, - ) -> c_int; - pub fn pthread_barrier_destroy(barrier: *mut pthread_barrier_t) -> c_int; - pub fn pthread_barrier_wait(barrier: *mut pthread_barrier_t) -> c_int; - pub fn pthread_spin_init(lock: *mut crate::pthread_spinlock_t, pshared: c_int) -> c_int; - pub fn pthread_spin_destroy(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_spin_lock(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_spin_trylock(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_spin_unlock(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_atfork( - prepare: Option, - parent: Option, - child: Option, - ) -> c_int; - - pub fn pthread_sigmask( - __how: c_int, - __newmask: *const __sigset_t, - __oldmask: *mut __sigset_t, - ) -> c_int; - - pub fn sched_getparam(pid: crate::pid_t, param: *mut crate::sched_param) -> c_int; - pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; - pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; - pub fn sched_setscheduler( - pid: crate::pid_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - pub fn pthread_getschedparam( - native: crate::pthread_t, - policy: *mut c_int, - param: *mut crate::sched_param, - ) -> c_int; - pub fn pthread_setschedparam( - native: crate::pthread_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - - pub fn pthread_getcpuclockid(thread: crate::pthread_t, clk_id: *mut crate::clockid_t) -> c_int; - - pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; - pub fn sem_destroy(sem: *mut sem_t) -> c_int; - pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; - pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; - - pub fn clock_getres(__clock_id: clockid_t, __res: *mut crate::timespec) -> c_int; - pub fn clock_gettime(__clock_id: clockid_t, __tp: *mut crate::timespec) -> c_int; - pub fn clock_settime(__clock_id: clockid_t, __tp: *const crate::timespec) -> c_int; - pub fn clock_getcpuclockid(pid: crate::pid_t, clk_id: *mut crate::clockid_t) -> c_int; - - pub fn clock_nanosleep( - clk_id: crate::clockid_t, - flags: c_int, - rqtp: *const crate::timespec, - rmtp: *mut crate::timespec, - ) -> c_int; - - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut crate::timezone) -> c_int; - pub fn settimeofday(tv: *const crate::timeval, tz: *const crate::timezone) -> c_int; - - pub fn asctime_r(tm: *const crate::tm, buf: *mut c_char) -> *mut c_char; - pub fn ctime_r(timep: *const time_t, buf: *mut c_char) -> *mut c_char; - - pub fn strftime( - s: *mut c_char, - max: size_t, - format: *const c_char, - tm: *const crate::tm, - ) -> size_t; - pub fn strptime(s: *const c_char, format: *const c_char, tm: *mut crate::tm) -> *mut c_char; - - pub fn timer_create( - clockid: crate::clockid_t, - sevp: *mut crate::sigevent, - timerid: *mut crate::timer_t, - ) -> c_int; - pub fn timer_delete(timerid: crate::timer_t) -> c_int; - pub fn timer_getoverrun(timerid: crate::timer_t) -> c_int; - pub fn timer_gettime(timerid: crate::timer_t, curr_value: *mut crate::itimerspec) -> c_int; - pub fn timer_settime( - timerid: crate::timer_t, - flags: c_int, - new_value: *const crate::itimerspec, - old_value: *mut crate::itimerspec, - ) -> c_int; - - pub fn fstat(__fd: c_int, __buf: *mut stat) -> c_int; - pub fn fstat64(__fd: c_int, __buf: *mut stat64) -> c_int; - - pub fn fstatat(__fd: c_int, __file: *const c_char, __buf: *mut stat, __flag: c_int) -> c_int; - pub fn fstatat64( - __fd: c_int, - __file: *const c_char, - __buf: *mut stat64, - __flag: c_int, - ) -> c_int; - - pub fn statx( - dirfd: c_int, - pathname: *const c_char, - flags: c_int, - mask: c_uint, - statxbuf: *mut statx, - ) -> c_int; - - pub fn ftruncate(__fd: c_int, __length: __off_t) -> c_int; - pub fn ftruncate64(__fd: c_int, __length: __off64_t) -> c_int; - pub fn truncate64(__file: *const c_char, __length: __off64_t) -> c_int; - - pub fn lstat(__file: *const c_char, __buf: *mut stat) -> c_int; - pub fn lstat64(__file: *const c_char, __buf: *mut stat64) -> c_int; - - pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; - pub fn statfs64(__file: *const c_char, __buf: *mut statfs64) -> c_int; - pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; - pub fn fstatfs64(__fildes: c_int, __buf: *mut statfs64) -> c_int; - - pub fn statvfs(__file: *const c_char, __buf: *mut statvfs) -> c_int; - pub fn statvfs64(__file: *const c_char, __buf: *mut statvfs64) -> c_int; - pub fn fstatvfs(__fildes: c_int, __buf: *mut statvfs) -> c_int; - pub fn fstatvfs64(__fildes: c_int, __buf: *mut statvfs64) -> c_int; - - pub fn open(__file: *const c_char, __oflag: c_int, ...) -> c_int; - pub fn open64(__file: *const c_char, __oflag: c_int, ...) -> c_int; - - pub fn openat(__fd: c_int, __file: *const c_char, __oflag: c_int, ...) -> c_int; - pub fn openat64(__fd: c_int, __file: *const c_char, __oflag: c_int, ...) -> c_int; - - pub fn fopen64(filename: *const c_char, mode: *const c_char) -> *mut crate::FILE; - pub fn freopen64( - filename: *const c_char, - mode: *const c_char, - file: *mut crate::FILE, - ) -> *mut crate::FILE; - - pub fn creat64(path: *const c_char, mode: mode_t) -> c_int; - - pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; - pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; - pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; - pub fn tmpfile64() -> *mut crate::FILE; - - pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; - - pub fn getdtablesize() -> c_int; - - // Added in `glibc` 2.34 - pub fn close_range(first: c_uint, last: c_uint, flags: c_int) -> c_int; - - pub fn openpty( - __amaster: *mut c_int, - __aslave: *mut c_int, - __name: *mut c_char, - __termp: *const termios, - __winp: *const crate::winsize, - ) -> c_int; - - pub fn forkpty( - __amaster: *mut c_int, - __name: *mut c_char, - __termp: *const termios, - __winp: *const crate::winsize, - ) -> crate::pid_t; - - pub fn getpt() -> c_int; - pub fn ptsname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - pub fn login_tty(fd: c_int) -> c_int; - - pub fn ctermid(s: *mut c_char) -> *mut c_char; - - pub fn clearenv() -> c_int; - - pub fn execveat( - dirfd: c_int, - pathname: *const c_char, - argv: *const *mut c_char, - envp: *const *mut c_char, - flags: c_int, - ) -> c_int; - - // DIFF(main): changed to `*const *mut` in e77f551de9 - pub fn execvpe( - file: *const c_char, - argv: *const *const c_char, - envp: *const *const c_char, - ) -> c_int; - pub fn fexecve(fd: c_int, argv: *const *const c_char, envp: *const *const c_char) -> c_int; - - pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; - - // posix/spawn.h - pub fn posix_spawn( - pid: *mut crate::pid_t, - path: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnp( - pid: *mut crate::pid_t, - file: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_getsigdefault( - attr: *const posix_spawnattr_t, - default: *mut crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigdefault( - attr: *mut posix_spawnattr_t, - default: *const crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getsigmask( - attr: *const posix_spawnattr_t, - default: *mut crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigmask( - attr: *mut posix_spawnattr_t, - default: *const crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; - pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; - pub fn posix_spawnattr_getpgroup( - attr: *const posix_spawnattr_t, - flags: *mut crate::pid_t, - ) -> c_int; - pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: crate::pid_t) -> c_int; - pub fn posix_spawnattr_getschedpolicy( - attr: *const posix_spawnattr_t, - flags: *mut c_int, - ) -> c_int; - pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, flags: c_int) -> c_int; - pub fn posix_spawnattr_getschedparam( - attr: *const posix_spawnattr_t, - param: *mut crate::sched_param, - ) -> c_int; - pub fn posix_spawnattr_setschedparam( - attr: *mut posix_spawnattr_t, - param: *const crate::sched_param, - ) -> c_int; - - pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_addopen( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - path: *const c_char, - oflag: c_int, - mode: mode_t, - ) -> c_int; - pub fn posix_spawn_file_actions_addclose( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - ) -> c_int; - pub fn posix_spawn_file_actions_adddup2( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - newfd: c_int, - ) -> c_int; - - // Added in `glibc` 2.29 - pub fn posix_spawn_file_actions_addchdir_np( - actions: *mut crate::posix_spawn_file_actions_t, - path: *const c_char, - ) -> c_int; - // Added in `glibc` 2.29 - pub fn posix_spawn_file_actions_addfchdir_np( - actions: *mut crate::posix_spawn_file_actions_t, - fd: c_int, - ) -> c_int; - // Added in `glibc` 2.34 - pub fn posix_spawn_file_actions_addclosefrom_np( - actions: *mut crate::posix_spawn_file_actions_t, - from: c_int, - ) -> c_int; - // Added in `glibc` 2.35 - pub fn posix_spawn_file_actions_addtcsetpgrp_np( - actions: *mut crate::posix_spawn_file_actions_t, - tcfd: c_int, - ) -> c_int; - - pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; - pub fn shm_unlink(name: *const c_char) -> c_int; - - pub fn euidaccess(pathname: *const c_char, mode: c_int) -> c_int; - pub fn eaccess(pathname: *const c_char, mode: c_int) -> c_int; - - pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; - - pub fn stat(__file: *const c_char, __buf: *mut stat) -> c_int; - pub fn stat64(__file: *const c_char, __buf: *mut stat64) -> c_int; - - pub fn readdir(dirp: *mut crate::DIR) -> *mut crate::dirent; - pub fn readdir64(dirp: *mut crate::DIR) -> *mut crate::dirent64; - pub fn readdir_r( - dirp: *mut crate::DIR, - entry: *mut crate::dirent, - result: *mut *mut crate::dirent, - ) -> c_int; - pub fn readdir64_r( - dirp: *mut crate::DIR, - entry: *mut crate::dirent64, - result: *mut *mut crate::dirent64, - ) -> c_int; - pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); - pub fn telldir(dirp: *mut crate::DIR) -> c_long; - - pub fn dirfd(dirp: *mut crate::DIR) -> c_int; - - #[link_name = "__xpg_strerror_r"] - pub fn strerror_r(__errnum: c_int, __buf: *mut c_char, __buflen: size_t) -> c_int; - - pub fn __errno_location() -> *mut c_int; - - pub fn mmap64( - __addr: *mut c_void, - __len: size_t, - __prot: c_int, - __flags: c_int, - __fd: c_int, - __offset: __off64_t, - ) -> *mut c_void; - - pub fn mremap( - addr: *mut c_void, - len: size_t, - new_len: size_t, - flags: c_int, - ... - ) -> *mut c_void; - - pub fn mprotect(__addr: *mut c_void, __len: size_t, __prot: c_int) -> c_int; - - pub fn msync(__addr: *mut c_void, __len: size_t, __flags: c_int) -> c_int; - pub fn sync(); - pub fn syncfs(fd: c_int) -> c_int; - pub fn fdatasync(fd: c_int) -> c_int; - - pub fn fallocate64(fd: c_int, mode: c_int, offset: off64_t, len: off64_t) -> c_int; - pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; - pub fn posix_fallocate64(fd: c_int, offset: off64_t, len: off64_t) -> c_int; - - pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; - - pub fn posix_fadvise64(fd: c_int, offset: off64_t, len: off64_t, advise: c_int) -> c_int; - - pub fn madvise(__addr: *mut c_void, __len: size_t, __advice: c_int) -> c_int; - - pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - - pub fn getrlimit(resource: crate::__rlimit_resource_t, rlim: *mut crate::rlimit) -> c_int; - pub fn getrlimit64(resource: crate::__rlimit_resource_t, rlim: *mut crate::rlimit64) -> c_int; - pub fn setrlimit(resource: crate::__rlimit_resource_t, rlim: *const crate::rlimit) -> c_int; - pub fn setrlimit64(resource: crate::__rlimit_resource_t, rlim: *const crate::rlimit64) - -> c_int; - - pub fn getpriority(which: crate::__priority_which, who: crate::id_t) -> c_int; - pub fn setpriority(which: crate::__priority_which, who: crate::id_t, prio: c_int) -> c_int; - - pub fn getrandom(__buffer: *mut c_void, __length: size_t, __flags: c_uint) -> ssize_t; - pub fn getentropy(__buffer: *mut c_void, __length: size_t) -> c_int; - - pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; - pub fn memmem( - haystack: *const c_void, - haystacklen: size_t, - needle: *const c_void, - needlelen: size_t, - ) -> *mut c_void; - pub fn strchrnul(s: *const c_char, c: c_int) -> *mut c_char; - - pub fn abs(i: c_int) -> c_int; - pub fn labs(i: c_long) -> c_long; - pub fn rand() -> c_int; - pub fn srand(seed: c_uint); - - pub fn drand48() -> c_double; - pub fn erand48(xseed: *mut c_ushort) -> c_double; - pub fn lrand48() -> c_long; - pub fn nrand48(xseed: *mut c_ushort) -> c_long; - pub fn mrand48() -> c_long; - pub fn jrand48(xseed: *mut c_ushort) -> c_long; - pub fn srand48(seed: c_long); - pub fn seed48(xseed: *mut c_ushort) -> *mut c_ushort; - pub fn lcong48(p: *mut c_ushort); - - pub fn qsort_r( - base: *mut c_void, - num: size_t, - size: size_t, - compar: Option c_int>, - arg: *mut c_void, - ); - - pub fn brk(addr: *mut c_void) -> c_int; - pub fn sbrk(increment: intptr_t) -> *mut c_void; - - pub fn memalign(align: size_t, size: size_t) -> *mut c_void; - pub fn mallopt(param: c_int, value: c_int) -> c_int; - - pub fn mallinfo() -> crate::mallinfo; - pub fn mallinfo2() -> crate::mallinfo2; - pub fn malloc_info(options: c_int, stream: *mut crate::FILE) -> c_int; - pub fn malloc_usable_size(ptr: *mut c_void) -> size_t; - pub fn malloc_trim(__pad: size_t) -> c_int; - - pub fn iconv_open(tocode: *const c_char, fromcode: *const c_char) -> iconv_t; - pub fn iconv( - cd: iconv_t, - inbuf: *mut *mut c_char, - inbytesleft: *mut size_t, - outbuf: *mut *mut c_char, - outbytesleft: *mut size_t, - ) -> size_t; - pub fn iconv_close(cd: iconv_t) -> c_int; - - pub fn getopt_long( - argc: c_int, - argv: *const *mut c_char, - optstring: *const c_char, - longopts: *const option, - longindex: *mut c_int, - ) -> c_int; - - pub fn backtrace(buf: *mut *mut c_void, sz: c_int) -> c_int; - - pub fn reboot(how_to: c_int) -> c_int; - - pub fn getloadavg(loadavg: *mut c_double, nelem: c_int) -> c_int; - - pub fn regexec( - preg: *const crate::regex_t, - input: *const c_char, - nmatch: size_t, - pmatch: *mut regmatch_t, - eflags: c_int, - ) -> c_int; - - pub fn regerror( - errcode: c_int, - preg: *const crate::regex_t, - errbuf: *mut c_char, - errbuf_size: size_t, - ) -> size_t; - - pub fn regfree(preg: *mut crate::regex_t); - - pub fn glob( - pattern: *const c_char, - flags: c_int, - errfunc: Option c_int>, - pglob: *mut crate::glob_t, - ) -> c_int; - pub fn globfree(pglob: *mut crate::glob_t); - - pub fn glob64( - pattern: *const c_char, - flags: c_int, - errfunc: Option c_int>, - pglob: *mut glob64_t, - ) -> c_int; - pub fn globfree64(pglob: *mut glob64_t); - - pub fn getxattr( - path: *const c_char, - name: *const c_char, - value: *mut c_void, - size: size_t, - ) -> ssize_t; - pub fn lgetxattr( - path: *const c_char, - name: *const c_char, - value: *mut c_void, - size: size_t, - ) -> ssize_t; - pub fn fgetxattr( - filedes: c_int, - name: *const c_char, - value: *mut c_void, - size: size_t, - ) -> ssize_t; - pub fn setxattr( - path: *const c_char, - name: *const c_char, - value: *const c_void, - size: size_t, - flags: c_int, - ) -> c_int; - pub fn lsetxattr( - path: *const c_char, - name: *const c_char, - value: *const c_void, - size: size_t, - flags: c_int, - ) -> c_int; - pub fn fsetxattr( - filedes: c_int, - name: *const c_char, - value: *const c_void, - size: size_t, - flags: c_int, - ) -> c_int; - pub fn listxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; - pub fn llistxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; - pub fn flistxattr(filedes: c_int, list: *mut c_char, size: size_t) -> ssize_t; - pub fn removexattr(path: *const c_char, name: *const c_char) -> c_int; - pub fn lremovexattr(path: *const c_char, name: *const c_char) -> c_int; - pub fn fremovexattr(filedes: c_int, name: *const c_char) -> c_int; - - pub fn dirname(path: *mut c_char) -> *mut c_char; - /// POSIX version of `basename(3)`, defined in `libgen.h`. - #[link_name = "__xpg_basename"] - pub fn posix_basename(path: *mut c_char) -> *mut c_char; - /// GNU version of `basename(3)`, defined in `string.h`. - #[link_name = "basename"] - pub fn gnu_basename(path: *const c_char) -> *mut c_char; - - pub fn dlmopen(lmid: Lmid_t, filename: *const c_char, flag: c_int) -> *mut c_void; - pub fn dlinfo(handle: *mut c_void, request: c_int, info: *mut c_void) -> c_int; - pub fn dladdr1( - addr: *const c_void, - info: *mut crate::Dl_info, - extra_info: *mut *mut c_void, - flags: c_int, - ) -> c_int; - - pub fn duplocale(base: crate::locale_t) -> crate::locale_t; - pub fn freelocale(loc: crate::locale_t); - pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; - pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; - pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; - pub fn nl_langinfo_l(item: crate::nl_item, locale: crate::locale_t) -> *mut c_char; - - pub fn dl_iterate_phdr( - callback: Option< - unsafe extern "C" fn( - info: *mut crate::dl_phdr_info, - size: size_t, - data: *mut c_void, - ) -> c_int, - >, - data: *mut c_void, - ) -> c_int; - - pub fn gnu_get_libc_release() -> *const c_char; - pub fn gnu_get_libc_version() -> *const c_char; -} - -safe_f! { - pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { - let major = major as crate::dev_t; - let minor = minor as crate::dev_t; - let mut dev = 0; - dev |= major << 8; - dev |= minor; - dev - } - - pub const fn major(dev: crate::dev_t) -> c_uint { - ((dev >> 8) & 0xff) as c_uint - } - - pub const fn minor(dev: crate::dev_t) -> c_uint { - (dev & 0xffff00ff) as c_uint - } - - pub fn SIGRTMAX() -> c_int { - unsafe { __libc_current_sigrtmax() } - } - - pub fn SIGRTMIN() -> c_int { - unsafe { __libc_current_sigrtmin() } - } - - pub const fn WIFSTOPPED(status: c_int) -> bool { - (status & 0xff) == 0x7f - } - - pub const fn WSTOPSIG(status: c_int) -> c_int { - (status >> 8) & 0xff - } - - pub const fn WIFCONTINUED(status: c_int) -> bool { - status == 0xffff - } - - pub const fn WIFSIGNALED(status: c_int) -> bool { - ((status & 0x7f) + 1) as i8 >= 2 - } - - pub const fn WTERMSIG(status: c_int) -> c_int { - status & 0x7f - } - - pub const fn WIFEXITED(status: c_int) -> bool { - (status & 0x7f) == 0 - } - - pub const fn WEXITSTATUS(status: c_int) -> c_int { - (status >> 8) & 0xff - } - - pub const fn WCOREDUMP(status: c_int) -> bool { - (status & 0x80) != 0 - } - - pub const fn W_EXITCODE(ret: c_int, sig: c_int) -> c_int { - (ret << 8) | sig - } - - pub const fn W_STOPCODE(sig: c_int) -> c_int { - (sig << 8) | 0x7f - } - - pub const fn QCMD(cmd: c_int, type_: c_int) -> c_int { - (cmd << 8) | (type_ & 0x00ff) - } - - pub const fn IPOPT_COPIED(o: u8) -> u8 { - o & IPOPT_COPY - } - - pub const fn IPOPT_CLASS(o: u8) -> u8 { - o & IPOPT_CLASS_MASK - } - - pub const fn IPOPT_NUMBER(o: u8) -> u8 { - o & IPOPT_NUMBER_MASK - } - - pub const fn IPTOS_ECN(x: u8) -> u8 { - x & crate::IPTOS_ECN_MASK - } -} - -cfg_if! { - if #[cfg(target_pointer_width = "64")] { - mod b64; - pub use self::b64::*; - } else { - mod b32; - pub use self::b32::*; - } -} diff --git a/vendor/libc/src/unix/linux_like/android/b32/arm.rs b/vendor/libc/src/unix/linux_like/android/b32/arm.rs deleted file mode 100644 index b78c8a83623eaf..00000000000000 --- a/vendor/libc/src/unix/linux_like/android/b32/arm.rs +++ /dev/null @@ -1,532 +0,0 @@ -use crate::prelude::*; - -pub type wchar_t = u32; -pub type greg_t = i32; -pub type mcontext_t = sigcontext; - -s! { - pub struct sigcontext { - pub trap_no: c_ulong, - pub error_code: c_ulong, - pub oldmask: c_ulong, - pub arm_r0: c_ulong, - pub arm_r1: c_ulong, - pub arm_r2: c_ulong, - pub arm_r3: c_ulong, - pub arm_r4: c_ulong, - pub arm_r5: c_ulong, - pub arm_r6: c_ulong, - pub arm_r7: c_ulong, - pub arm_r8: c_ulong, - pub arm_r9: c_ulong, - pub arm_r10: c_ulong, - pub arm_fp: c_ulong, - pub arm_ip: c_ulong, - pub arm_sp: c_ulong, - pub arm_lr: c_ulong, - pub arm_pc: c_ulong, - pub arm_cpsr: c_ulong, - pub fault_address: c_ulong, - } -} - -s_no_extra_traits! { - pub struct __c_anonymous_uc_sigmask_with_padding { - pub uc_sigmask: crate::sigset_t, - /* Android has a wrong (smaller) sigset_t on x86. */ - __padding_rt_sigset: u32, - } - - pub union __c_anonymous_uc_sigmask { - uc_sigmask: __c_anonymous_uc_sigmask_with_padding, - uc_sigmask64: crate::sigset64_t, - } - - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_mcontext: mcontext_t, - pub uc_sigmask__c_anonymous_union: __c_anonymous_uc_sigmask, - /* The kernel adds extra padding after uc_sigmask to match - * glibc sigset_t on ARM. */ - __padding: [c_char; 120], - __align: [c_longlong; 0], - uc_regspace: [c_ulong; 128], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for __c_anonymous_uc_sigmask_with_padding { - fn eq(&self, other: &__c_anonymous_uc_sigmask_with_padding) -> bool { - self.uc_sigmask == other.uc_sigmask - // Ignore padding - } - } - impl Eq for __c_anonymous_uc_sigmask_with_padding {} - impl hash::Hash for __c_anonymous_uc_sigmask_with_padding { - fn hash(&self, state: &mut H) { - self.uc_sigmask.hash(state) - // Ignore padding - } - } - - impl PartialEq for __c_anonymous_uc_sigmask { - fn eq(&self, other: &__c_anonymous_uc_sigmask) -> bool { - unsafe { self.uc_sigmask == other.uc_sigmask } - } - } - impl Eq for __c_anonymous_uc_sigmask {} - impl hash::Hash for __c_anonymous_uc_sigmask { - fn hash(&self, state: &mut H) { - unsafe { self.uc_sigmask.hash(state) } - } - } - - impl PartialEq for ucontext_t { - fn eq(&self, other: &Self) -> bool { - self.uc_flags == other.uc_flags - && self.uc_link == other.uc_link - && self.uc_stack == other.uc_stack - && self.uc_mcontext == other.uc_mcontext - && self.uc_sigmask__c_anonymous_union == other.uc_sigmask__c_anonymous_union - && &self.uc_regspace[..] == &other.uc_regspace[..] - // Ignore padding field - } - } - impl Eq for ucontext_t {} - impl hash::Hash for ucontext_t { - fn hash(&self, state: &mut H) { - self.uc_flags.hash(state); - self.uc_link.hash(state); - self.uc_stack.hash(state); - self.uc_mcontext.hash(state); - self.uc_sigmask__c_anonymous_union.hash(state); - self.uc_regspace[..].hash(state); - // Ignore padding field - } - } - } -} - -pub const O_DIRECT: c_int = 0x10000; -pub const O_DIRECTORY: c_int = 0x4000; -pub const O_NOFOLLOW: c_int = 0x8000; -pub const O_LARGEFILE: c_int = 0o400000; - -pub const SYS_restart_syscall: c_long = 0; -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execve: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_lchown: c_long = 16; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_mount: c_long = 21; -pub const SYS_setuid: c_long = 23; -pub const SYS_getuid: c_long = 24; -pub const SYS_ptrace: c_long = 26; -pub const SYS_pause: c_long = 29; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_rename: c_long = 38; -pub const SYS_mkdir: c_long = 39; -pub const SYS_rmdir: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_brk: c_long = 45; -pub const SYS_setgid: c_long = 46; -pub const SYS_getgid: c_long = 47; -pub const SYS_geteuid: c_long = 49; -pub const SYS_getegid: c_long = 50; -pub const SYS_acct: c_long = 51; -pub const SYS_umount2: c_long = 52; -pub const SYS_ioctl: c_long = 54; -pub const SYS_fcntl: c_long = 55; -pub const SYS_setpgid: c_long = 57; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_ustat: c_long = 62; -pub const SYS_dup2: c_long = 63; -pub const SYS_getppid: c_long = 64; -pub const SYS_getpgrp: c_long = 65; -pub const SYS_setsid: c_long = 66; -pub const SYS_sigaction: c_long = 67; -pub const SYS_setreuid: c_long = 70; -pub const SYS_setregid: c_long = 71; -pub const SYS_sigsuspend: c_long = 72; -pub const SYS_sigpending: c_long = 73; -pub const SYS_sethostname: c_long = 74; -pub const SYS_setrlimit: c_long = 75; -pub const SYS_getrusage: c_long = 77; -pub const SYS_gettimeofday: c_long = 78; -pub const SYS_settimeofday: c_long = 79; -pub const SYS_getgroups: c_long = 80; -pub const SYS_setgroups: c_long = 81; -pub const SYS_symlink: c_long = 83; -pub const SYS_readlink: c_long = 85; -pub const SYS_uselib: c_long = 86; -pub const SYS_swapon: c_long = 87; -pub const SYS_reboot: c_long = 88; -pub const SYS_munmap: c_long = 91; -pub const SYS_truncate: c_long = 92; -pub const SYS_ftruncate: c_long = 93; -pub const SYS_fchmod: c_long = 94; -pub const SYS_fchown: c_long = 95; -pub const SYS_getpriority: c_long = 96; -pub const SYS_setpriority: c_long = 97; -pub const SYS_statfs: c_long = 99; -pub const SYS_fstatfs: c_long = 100; -pub const SYS_syslog: c_long = 103; -pub const SYS_setitimer: c_long = 104; -pub const SYS_getitimer: c_long = 105; -pub const SYS_stat: c_long = 106; -pub const SYS_lstat: c_long = 107; -pub const SYS_fstat: c_long = 108; -pub const SYS_vhangup: c_long = 111; -pub const SYS_wait4: c_long = 114; -pub const SYS_swapoff: c_long = 115; -pub const SYS_sysinfo: c_long = 116; -pub const SYS_fsync: c_long = 118; -pub const SYS_sigreturn: c_long = 119; -pub const SYS_clone: c_long = 120; -pub const SYS_setdomainname: c_long = 121; -pub const SYS_uname: c_long = 122; -pub const SYS_adjtimex: c_long = 124; -pub const SYS_mprotect: c_long = 125; -pub const SYS_sigprocmask: c_long = 126; -pub const SYS_init_module: c_long = 128; -pub const SYS_delete_module: c_long = 129; -pub const SYS_quotactl: c_long = 131; -pub const SYS_getpgid: c_long = 132; -pub const SYS_fchdir: c_long = 133; -pub const SYS_bdflush: c_long = 134; -pub const SYS_sysfs: c_long = 135; -pub const SYS_personality: c_long = 136; -pub const SYS_setfsuid: c_long = 138; -pub const SYS_setfsgid: c_long = 139; -pub const SYS_getdents: c_long = 141; -pub const SYS_flock: c_long = 143; -pub const SYS_msync: c_long = 144; -pub const SYS_readv: c_long = 145; -pub const SYS_writev: c_long = 146; -pub const SYS_getsid: c_long = 147; -pub const SYS_fdatasync: c_long = 148; -pub const SYS_mlock: c_long = 150; -pub const SYS_munlock: c_long = 151; -pub const SYS_mlockall: c_long = 152; -pub const SYS_munlockall: c_long = 153; -pub const SYS_sched_setparam: c_long = 154; -pub const SYS_sched_getparam: c_long = 155; -pub const SYS_sched_setscheduler: c_long = 156; -pub const SYS_sched_getscheduler: c_long = 157; -pub const SYS_sched_yield: c_long = 158; -pub const SYS_sched_get_priority_max: c_long = 159; -pub const SYS_sched_get_priority_min: c_long = 160; -pub const SYS_sched_rr_get_interval: c_long = 161; -pub const SYS_nanosleep: c_long = 162; -pub const SYS_mremap: c_long = 163; -pub const SYS_setresuid: c_long = 164; -pub const SYS_getresuid: c_long = 165; -pub const SYS_poll: c_long = 168; -pub const SYS_nfsservctl: c_long = 169; -pub const SYS_setresgid: c_long = 170; -pub const SYS_getresgid: c_long = 171; -pub const SYS_prctl: c_long = 172; -pub const SYS_rt_sigreturn: c_long = 173; -pub const SYS_rt_sigaction: c_long = 174; -pub const SYS_rt_sigprocmask: c_long = 175; -pub const SYS_rt_sigpending: c_long = 176; -pub const SYS_rt_sigtimedwait: c_long = 177; -pub const SYS_rt_sigqueueinfo: c_long = 178; -pub const SYS_rt_sigsuspend: c_long = 179; -pub const SYS_pread64: c_long = 180; -pub const SYS_pwrite64: c_long = 181; -pub const SYS_chown: c_long = 182; -pub const SYS_getcwd: c_long = 183; -pub const SYS_capget: c_long = 184; -pub const SYS_capset: c_long = 185; -pub const SYS_sigaltstack: c_long = 186; -pub const SYS_sendfile: c_long = 187; -pub const SYS_vfork: c_long = 190; -pub const SYS_ugetrlimit: c_long = 191; -pub const SYS_mmap2: c_long = 192; -pub const SYS_truncate64: c_long = 193; -pub const SYS_ftruncate64: c_long = 194; -pub const SYS_stat64: c_long = 195; -pub const SYS_lstat64: c_long = 196; -pub const SYS_fstat64: c_long = 197; -pub const SYS_lchown32: c_long = 198; -pub const SYS_getuid32: c_long = 199; -pub const SYS_getgid32: c_long = 200; -pub const SYS_geteuid32: c_long = 201; -pub const SYS_getegid32: c_long = 202; -pub const SYS_setreuid32: c_long = 203; -pub const SYS_setregid32: c_long = 204; -pub const SYS_getgroups32: c_long = 205; -pub const SYS_setgroups32: c_long = 206; -pub const SYS_fchown32: c_long = 207; -pub const SYS_setresuid32: c_long = 208; -pub const SYS_getresuid32: c_long = 209; -pub const SYS_setresgid32: c_long = 210; -pub const SYS_getresgid32: c_long = 211; -pub const SYS_chown32: c_long = 212; -pub const SYS_setuid32: c_long = 213; -pub const SYS_setgid32: c_long = 214; -pub const SYS_setfsuid32: c_long = 215; -pub const SYS_setfsgid32: c_long = 216; -pub const SYS_getdents64: c_long = 217; -pub const SYS_pivot_root: c_long = 218; -pub const SYS_mincore: c_long = 219; -pub const SYS_madvise: c_long = 220; -pub const SYS_fcntl64: c_long = 221; -pub const SYS_gettid: c_long = 224; -pub const SYS_readahead: c_long = 225; -pub const SYS_setxattr: c_long = 226; -pub const SYS_lsetxattr: c_long = 227; -pub const SYS_fsetxattr: c_long = 228; -pub const SYS_getxattr: c_long = 229; -pub const SYS_lgetxattr: c_long = 230; -pub const SYS_fgetxattr: c_long = 231; -pub const SYS_listxattr: c_long = 232; -pub const SYS_llistxattr: c_long = 233; -pub const SYS_flistxattr: c_long = 234; -pub const SYS_removexattr: c_long = 235; -pub const SYS_lremovexattr: c_long = 236; -pub const SYS_fremovexattr: c_long = 237; -pub const SYS_tkill: c_long = 238; -pub const SYS_sendfile64: c_long = 239; -pub const SYS_futex: c_long = 240; -pub const SYS_sched_setaffinity: c_long = 241; -pub const SYS_sched_getaffinity: c_long = 242; -pub const SYS_io_setup: c_long = 243; -pub const SYS_io_destroy: c_long = 244; -pub const SYS_io_getevents: c_long = 245; -pub const SYS_io_submit: c_long = 246; -pub const SYS_io_cancel: c_long = 247; -pub const SYS_exit_group: c_long = 248; -pub const SYS_lookup_dcookie: c_long = 249; -pub const SYS_epoll_create: c_long = 250; -pub const SYS_epoll_ctl: c_long = 251; -pub const SYS_epoll_wait: c_long = 252; -pub const SYS_remap_file_pages: c_long = 253; -pub const SYS_set_tid_address: c_long = 256; -pub const SYS_timer_create: c_long = 257; -pub const SYS_timer_settime: c_long = 258; -pub const SYS_timer_gettime: c_long = 259; -pub const SYS_timer_getoverrun: c_long = 260; -pub const SYS_timer_delete: c_long = 261; -pub const SYS_clock_settime: c_long = 262; -pub const SYS_clock_gettime: c_long = 263; -pub const SYS_clock_getres: c_long = 264; -pub const SYS_clock_nanosleep: c_long = 265; -pub const SYS_statfs64: c_long = 266; -pub const SYS_fstatfs64: c_long = 267; -pub const SYS_tgkill: c_long = 268; -pub const SYS_utimes: c_long = 269; -pub const SYS_arm_fadvise64_64: c_long = 270; -pub const SYS_pciconfig_iobase: c_long = 271; -pub const SYS_pciconfig_read: c_long = 272; -pub const SYS_pciconfig_write: c_long = 273; -pub const SYS_mq_open: c_long = 274; -pub const SYS_mq_unlink: c_long = 275; -pub const SYS_mq_timedsend: c_long = 276; -pub const SYS_mq_timedreceive: c_long = 277; -pub const SYS_mq_notify: c_long = 278; -pub const SYS_mq_getsetattr: c_long = 279; -pub const SYS_waitid: c_long = 280; -pub const SYS_socket: c_long = 281; -pub const SYS_bind: c_long = 282; -pub const SYS_connect: c_long = 283; -pub const SYS_listen: c_long = 284; -pub const SYS_accept: c_long = 285; -pub const SYS_getsockname: c_long = 286; -pub const SYS_getpeername: c_long = 287; -pub const SYS_socketpair: c_long = 288; -pub const SYS_send: c_long = 289; -pub const SYS_sendto: c_long = 290; -pub const SYS_recv: c_long = 291; -pub const SYS_recvfrom: c_long = 292; -pub const SYS_shutdown: c_long = 293; -pub const SYS_setsockopt: c_long = 294; -pub const SYS_getsockopt: c_long = 295; -pub const SYS_sendmsg: c_long = 296; -pub const SYS_recvmsg: c_long = 297; -pub const SYS_semop: c_long = 298; -pub const SYS_semget: c_long = 299; -pub const SYS_semctl: c_long = 300; -pub const SYS_msgsnd: c_long = 301; -pub const SYS_msgrcv: c_long = 302; -pub const SYS_msgget: c_long = 303; -pub const SYS_msgctl: c_long = 304; -pub const SYS_shmat: c_long = 305; -pub const SYS_shmdt: c_long = 306; -pub const SYS_shmget: c_long = 307; -pub const SYS_shmctl: c_long = 308; -pub const SYS_add_key: c_long = 309; -pub const SYS_request_key: c_long = 310; -pub const SYS_keyctl: c_long = 311; -pub const SYS_semtimedop: c_long = 312; -pub const SYS_vserver: c_long = 313; -pub const SYS_ioprio_set: c_long = 314; -pub const SYS_ioprio_get: c_long = 315; -pub const SYS_inotify_init: c_long = 316; -pub const SYS_inotify_add_watch: c_long = 317; -pub const SYS_inotify_rm_watch: c_long = 318; -pub const SYS_mbind: c_long = 319; -pub const SYS_get_mempolicy: c_long = 320; -pub const SYS_set_mempolicy: c_long = 321; -pub const SYS_openat: c_long = 322; -pub const SYS_mkdirat: c_long = 323; -pub const SYS_mknodat: c_long = 324; -pub const SYS_fchownat: c_long = 325; -pub const SYS_futimesat: c_long = 326; -pub const SYS_fstatat64: c_long = 327; -pub const SYS_unlinkat: c_long = 328; -pub const SYS_renameat: c_long = 329; -pub const SYS_linkat: c_long = 330; -pub const SYS_symlinkat: c_long = 331; -pub const SYS_readlinkat: c_long = 332; -pub const SYS_fchmodat: c_long = 333; -pub const SYS_faccessat: c_long = 334; -pub const SYS_pselect6: c_long = 335; -pub const SYS_ppoll: c_long = 336; -pub const SYS_unshare: c_long = 337; -pub const SYS_set_robust_list: c_long = 338; -pub const SYS_get_robust_list: c_long = 339; -pub const SYS_splice: c_long = 340; -pub const SYS_arm_sync_file_range: c_long = 341; -pub const SYS_tee: c_long = 342; -pub const SYS_vmsplice: c_long = 343; -pub const SYS_move_pages: c_long = 344; -pub const SYS_getcpu: c_long = 345; -pub const SYS_epoll_pwait: c_long = 346; -pub const SYS_kexec_load: c_long = 347; -pub const SYS_utimensat: c_long = 348; -pub const SYS_signalfd: c_long = 349; -pub const SYS_timerfd_create: c_long = 350; -pub const SYS_eventfd: c_long = 351; -pub const SYS_fallocate: c_long = 352; -pub const SYS_timerfd_settime: c_long = 353; -pub const SYS_timerfd_gettime: c_long = 354; -pub const SYS_signalfd4: c_long = 355; -pub const SYS_eventfd2: c_long = 356; -pub const SYS_epoll_create1: c_long = 357; -pub const SYS_dup3: c_long = 358; -pub const SYS_pipe2: c_long = 359; -pub const SYS_inotify_init1: c_long = 360; -pub const SYS_preadv: c_long = 361; -pub const SYS_pwritev: c_long = 362; -pub const SYS_rt_tgsigqueueinfo: c_long = 363; -pub const SYS_perf_event_open: c_long = 364; -pub const SYS_recvmmsg: c_long = 365; -pub const SYS_accept4: c_long = 366; -pub const SYS_fanotify_init: c_long = 367; -pub const SYS_fanotify_mark: c_long = 368; -pub const SYS_prlimit64: c_long = 369; -pub const SYS_name_to_handle_at: c_long = 370; -pub const SYS_open_by_handle_at: c_long = 371; -pub const SYS_clock_adjtime: c_long = 372; -pub const SYS_syncfs: c_long = 373; -pub const SYS_sendmmsg: c_long = 374; -pub const SYS_setns: c_long = 375; -pub const SYS_process_vm_readv: c_long = 376; -pub const SYS_process_vm_writev: c_long = 377; -pub const SYS_kcmp: c_long = 378; -pub const SYS_finit_module: c_long = 379; -pub const SYS_sched_setattr: c_long = 380; -pub const SYS_sched_getattr: c_long = 381; -pub const SYS_renameat2: c_long = 382; -pub const SYS_seccomp: c_long = 383; -pub const SYS_getrandom: c_long = 384; -pub const SYS_memfd_create: c_long = 385; -pub const SYS_bpf: c_long = 386; -pub const SYS_execveat: c_long = 387; -pub const SYS_userfaultfd: c_long = 388; -pub const SYS_membarrier: c_long = 389; -pub const SYS_mlock2: c_long = 390; -pub const SYS_copy_file_range: c_long = 391; -pub const SYS_preadv2: c_long = 392; -pub const SYS_pwritev2: c_long = 393; -pub const SYS_pkey_mprotect: c_long = 394; -pub const SYS_pkey_alloc: c_long = 395; -pub const SYS_pkey_free: c_long = 396; -pub const SYS_statx: c_long = 397; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; - -// offsets in mcontext_t.gregs from sys/ucontext.h -pub const REG_R0: c_int = 0; -pub const REG_R1: c_int = 1; -pub const REG_R2: c_int = 2; -pub const REG_R3: c_int = 3; -pub const REG_R4: c_int = 4; -pub const REG_R5: c_int = 5; -pub const REG_R6: c_int = 6; -pub const REG_R7: c_int = 7; -pub const REG_R8: c_int = 8; -pub const REG_R9: c_int = 9; -pub const REG_R10: c_int = 10; -pub const REG_R11: c_int = 11; -pub const REG_R12: c_int = 12; -pub const REG_R13: c_int = 13; -pub const REG_R14: c_int = 14; -pub const REG_R15: c_int = 15; - -pub const NGREG: c_int = 18; - -// From NDK's asm/auxvec.h -pub const AT_SYSINFO_EHDR: c_ulong = 33; - -f! { - // Sadly, Android before 5.0 (API level 21), the accept4 syscall is not - // exposed by the libc. As work-around, we implement it through `syscall` - // directly. This workaround can be removed if the minimum version of - // Android is bumped. When the workaround is removed, `accept4` can be - // moved back to `linux_like/mod.rs` - pub fn accept4( - fd: c_int, - addr: *mut crate::sockaddr, - len: *mut crate::socklen_t, - flg: c_int, - ) -> c_int { - crate::syscall(SYS_accept4, fd, addr, len, flg) as c_int - } -} diff --git a/vendor/libc/src/unix/linux_like/android/b32/mod.rs b/vendor/libc/src/unix/linux_like/android/b32/mod.rs deleted file mode 100644 index d02dbf92d79246..00000000000000 --- a/vendor/libc/src/unix/linux_like/android/b32/mod.rs +++ /dev/null @@ -1,239 +0,0 @@ -use crate::prelude::*; - -// The following definitions are correct for arm and i686, -// but may be wrong for mips - -pub type mode_t = u16; -pub type off64_t = c_longlong; -pub type sigset_t = c_ulong; -pub type socklen_t = i32; -pub type time64_t = i64; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; - -s! { - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct rlimit64 { - pub rlim_cur: u64, - pub rlim_max: u64, - } - - pub struct stat { - pub st_dev: c_ulonglong, - __pad0: [c_uchar; 4], - __st_ino: crate::ino_t, - pub st_mode: c_uint, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: c_ulonglong, - __pad3: [c_uchar; 4], - pub st_size: c_longlong, - pub st_blksize: crate::blksize_t, - pub st_blocks: c_ulonglong, - pub st_atime: c_long, - pub st_atime_nsec: c_long, - pub st_mtime: c_long, - pub st_mtime_nsec: c_long, - pub st_ctime: c_long, - pub st_ctime_nsec: c_long, - pub st_ino: c_ulonglong, - } - - pub struct stat64 { - pub st_dev: c_ulonglong, - __pad0: [c_uchar; 4], - __st_ino: crate::ino_t, - pub st_mode: c_uint, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: c_ulonglong, - __pad3: [c_uchar; 4], - pub st_size: c_longlong, - pub st_blksize: crate::blksize_t, - pub st_blocks: c_ulonglong, - pub st_atime: c_long, - pub st_atime_nsec: c_long, - pub st_mtime: c_long, - pub st_mtime_nsec: c_long, - pub st_ctime: c_long, - pub st_ctime_nsec: c_long, - pub st_ino: c_ulonglong, - } - - pub struct statfs64 { - pub f_type: u32, - pub f_bsize: u32, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::__fsid_t, - pub f_namelen: u32, - pub f_frsize: u32, - pub f_flags: u32, - pub f_spare: [u32; 4], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: c_ulong, - pub f_bfree: c_ulong, - pub f_bavail: c_ulong, - pub f_files: c_ulong, - pub f_ffree: c_ulong, - pub f_favail: c_ulong, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - } - - pub struct pthread_attr_t { - pub flags: u32, - pub stack_base: *mut c_void, - pub stack_size: size_t, - pub guard_size: size_t, - pub sched_policy: i32, - pub sched_priority: i32, - } - - pub struct pthread_mutex_t { - value: c_int, - } - - pub struct pthread_cond_t { - value: c_int, - } - - pub struct pthread_rwlock_t { - lock: pthread_mutex_t, - cond: pthread_cond_t, - numLocks: c_int, - writerThreadId: c_int, - pendingReaders: c_int, - pendingWriters: c_int, - attr: i32, - __reserved: [c_char; 12], - } - - pub struct pthread_barrier_t { - __private: [i32; 8], - } - - pub struct pthread_spinlock_t { - __private: [i32; 2], - } - - pub struct passwd { - pub pw_name: *mut c_char, - pub pw_passwd: *mut c_char, - pub pw_uid: crate::uid_t, - pub pw_gid: crate::gid_t, - pub pw_dir: *mut c_char, - pub pw_shell: *mut c_char, - } - - pub struct statfs { - pub f_type: u32, - pub f_bsize: u32, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::__fsid_t, - pub f_namelen: u32, - pub f_frsize: u32, - pub f_flags: u32, - pub f_spare: [u32; 4], - } - - pub struct sysinfo { - pub uptime: c_long, - pub loads: [c_ulong; 3], - pub totalram: c_ulong, - pub freeram: c_ulong, - pub sharedram: c_ulong, - pub bufferram: c_ulong, - pub totalswap: c_ulong, - pub freeswap: c_ulong, - pub procs: c_ushort, - pub pad: c_ushort, - pub totalhigh: c_ulong, - pub freehigh: c_ulong, - pub mem_unit: c_uint, - pub _f: [c_char; 8], - } -} - -s_no_extra_traits! { - pub struct sigset64_t { - __bits: [c_ulong; 2], - } -} - -// These constants must be of the same type of sigaction.sa_flags -pub const SA_NOCLDSTOP: c_int = 0x00000001; -pub const SA_NOCLDWAIT: c_int = 0x00000002; -pub const SA_NODEFER: c_int = 0x40000000; -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_RESETHAND: c_int = 0x80000000; -pub const SA_RESTART: c_int = 0x10000000; -pub const SA_SIGINFO: c_int = 0x00000004; - -pub const RTLD_GLOBAL: c_int = 2; -pub const RTLD_NOW: c_int = 0; -pub const RTLD_DEFAULT: *mut c_void = -1isize as *mut c_void; - -pub const PTRACE_GETFPREGS: c_int = 14; -pub const PTRACE_SETFPREGS: c_int = 15; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { value: 0 }; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { value: 0 }; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - lock: PTHREAD_MUTEX_INITIALIZER, - cond: PTHREAD_COND_INITIALIZER, - numLocks: 0, - writerThreadId: 0, - pendingReaders: 0, - pendingWriters: 0, - attr: 0, - __reserved: [0; 12], -}; -pub const PTHREAD_STACK_MIN: size_t = 4096 * 2; -pub const CPU_SETSIZE: size_t = 32; -pub const __CPU_BITS: size_t = 32; - -pub const UT_LINESIZE: usize = 8; -pub const UT_NAMESIZE: usize = 8; -pub const UT_HOSTSIZE: usize = 16; - -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; - -extern "C" { - pub fn timegm64(tm: *const crate::tm) -> crate::time64_t; -} - -cfg_if! { - if #[cfg(target_arch = "x86")] { - mod x86; - pub use self::x86::*; - } else if #[cfg(target_arch = "arm")] { - mod arm; - pub use self::arm::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/unix/linux_like/android/b32/x86/mod.rs b/vendor/libc/src/unix/linux_like/android/b32/x86/mod.rs deleted file mode 100644 index ca46c3c462246a..00000000000000 --- a/vendor/libc/src/unix/linux_like/android/b32/x86/mod.rs +++ /dev/null @@ -1,604 +0,0 @@ -use crate::prelude::*; - -pub type wchar_t = i32; -pub type greg_t = i32; - -s! { - pub struct _libc_fpreg { - pub significand: [u16; 4], - pub exponent: u16, - } - - pub struct _libc_fpstate { - pub cw: c_ulong, - pub sw: c_ulong, - pub tag: c_ulong, - pub ipoff: c_ulong, - pub cssel: c_ulong, - pub dataoff: c_ulong, - pub datasel: c_ulong, - pub _st: [_libc_fpreg; 8], - pub status: c_ulong, - } - - pub struct mcontext_t { - pub gregs: [greg_t; 19], - pub fpregs: *mut _libc_fpstate, - pub oldmask: c_ulong, - pub cr2: c_ulong, - } -} - -s_no_extra_traits! { - pub struct __c_anonymous_uc_sigmask_with_padding { - pub uc_sigmask: crate::sigset_t, - /* Android has a wrong (smaller) sigset_t on x86. */ - __padding_rt_sigset: u32, - } - - pub union __c_anonymous_uc_sigmask { - uc_sigmask: __c_anonymous_uc_sigmask_with_padding, - uc_sigmask64: crate::sigset64_t, - } - - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_mcontext: mcontext_t, - pub uc_sigmask__c_anonymous_union: __c_anonymous_uc_sigmask, - __padding_rt_sigset: u32, - __fpregs_mem: _libc_fpstate, - } - - #[repr(align(8))] - pub struct max_align_t { - priv_: [f64; 2], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for __c_anonymous_uc_sigmask_with_padding { - fn eq(&self, other: &__c_anonymous_uc_sigmask_with_padding) -> bool { - self.uc_sigmask == other.uc_sigmask - // Ignore padding - } - } - impl Eq for __c_anonymous_uc_sigmask_with_padding {} - impl hash::Hash for __c_anonymous_uc_sigmask_with_padding { - fn hash(&self, state: &mut H) { - self.uc_sigmask.hash(state) - // Ignore padding - } - } - - impl PartialEq for __c_anonymous_uc_sigmask { - fn eq(&self, other: &__c_anonymous_uc_sigmask) -> bool { - unsafe { self.uc_sigmask == other.uc_sigmask } - } - } - impl Eq for __c_anonymous_uc_sigmask {} - impl hash::Hash for __c_anonymous_uc_sigmask { - fn hash(&self, state: &mut H) { - unsafe { self.uc_sigmask.hash(state) } - } - } - - impl PartialEq for ucontext_t { - fn eq(&self, other: &Self) -> bool { - self.uc_flags == other.uc_flags - && self.uc_link == other.uc_link - && self.uc_stack == other.uc_stack - && self.uc_mcontext == other.uc_mcontext - && self.uc_sigmask__c_anonymous_union == other.uc_sigmask__c_anonymous_union - // Ignore padding field - } - } - impl Eq for ucontext_t {} - impl hash::Hash for ucontext_t { - fn hash(&self, state: &mut H) { - self.uc_flags.hash(state); - self.uc_link.hash(state); - self.uc_stack.hash(state); - self.uc_mcontext.hash(state); - self.uc_sigmask__c_anonymous_union.hash(state); - // Ignore padding field - } - } - } -} - -pub const O_DIRECT: c_int = 0x4000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_NOFOLLOW: c_int = 0x20000; -pub const O_LARGEFILE: c_int = 0o0100000; - -pub const MAP_32BIT: c_int = 0x40; - -// Syscall table -pub const SYS_restart_syscall: c_long = 0; -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_waitpid: c_long = 7; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execve: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_time: c_long = 13; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_lchown: c_long = 16; -pub const SYS_break: c_long = 17; -pub const SYS_oldstat: c_long = 18; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_mount: c_long = 21; -pub const SYS_umount: c_long = 22; -pub const SYS_setuid: c_long = 23; -pub const SYS_getuid: c_long = 24; -pub const SYS_stime: c_long = 25; -pub const SYS_ptrace: c_long = 26; -pub const SYS_alarm: c_long = 27; -pub const SYS_oldfstat: c_long = 28; -pub const SYS_pause: c_long = 29; -pub const SYS_utime: c_long = 30; -pub const SYS_stty: c_long = 31; -pub const SYS_gtty: c_long = 32; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_ftime: c_long = 35; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_rename: c_long = 38; -pub const SYS_mkdir: c_long = 39; -pub const SYS_rmdir: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_prof: c_long = 44; -pub const SYS_brk: c_long = 45; -pub const SYS_setgid: c_long = 46; -pub const SYS_getgid: c_long = 47; -pub const SYS_signal: c_long = 48; -pub const SYS_geteuid: c_long = 49; -pub const SYS_getegid: c_long = 50; -pub const SYS_acct: c_long = 51; -pub const SYS_umount2: c_long = 52; -pub const SYS_lock: c_long = 53; -pub const SYS_ioctl: c_long = 54; -pub const SYS_fcntl: c_long = 55; -pub const SYS_mpx: c_long = 56; -pub const SYS_setpgid: c_long = 57; -pub const SYS_ulimit: c_long = 58; -pub const SYS_oldolduname: c_long = 59; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_ustat: c_long = 62; -pub const SYS_dup2: c_long = 63; -pub const SYS_getppid: c_long = 64; -pub const SYS_getpgrp: c_long = 65; -pub const SYS_setsid: c_long = 66; -pub const SYS_sigaction: c_long = 67; -pub const SYS_sgetmask: c_long = 68; -pub const SYS_ssetmask: c_long = 69; -pub const SYS_setreuid: c_long = 70; -pub const SYS_setregid: c_long = 71; -pub const SYS_sigsuspend: c_long = 72; -pub const SYS_sigpending: c_long = 73; -pub const SYS_sethostname: c_long = 74; -pub const SYS_setrlimit: c_long = 75; -pub const SYS_getrlimit: c_long = 76; -pub const SYS_getrusage: c_long = 77; -pub const SYS_gettimeofday: c_long = 78; -pub const SYS_settimeofday: c_long = 79; -pub const SYS_getgroups: c_long = 80; -pub const SYS_setgroups: c_long = 81; -pub const SYS_select: c_long = 82; -pub const SYS_symlink: c_long = 83; -pub const SYS_oldlstat: c_long = 84; -pub const SYS_readlink: c_long = 85; -pub const SYS_uselib: c_long = 86; -pub const SYS_swapon: c_long = 87; -pub const SYS_reboot: c_long = 88; -pub const SYS_readdir: c_long = 89; -pub const SYS_mmap: c_long = 90; -pub const SYS_munmap: c_long = 91; -pub const SYS_truncate: c_long = 92; -pub const SYS_ftruncate: c_long = 93; -pub const SYS_fchmod: c_long = 94; -pub const SYS_fchown: c_long = 95; -pub const SYS_getpriority: c_long = 96; -pub const SYS_setpriority: c_long = 97; -pub const SYS_profil: c_long = 98; -pub const SYS_statfs: c_long = 99; -pub const SYS_fstatfs: c_long = 100; -pub const SYS_ioperm: c_long = 101; -pub const SYS_socketcall: c_long = 102; -pub const SYS_syslog: c_long = 103; -pub const SYS_setitimer: c_long = 104; -pub const SYS_getitimer: c_long = 105; -pub const SYS_stat: c_long = 106; -pub const SYS_lstat: c_long = 107; -pub const SYS_fstat: c_long = 108; -pub const SYS_olduname: c_long = 109; -pub const SYS_iopl: c_long = 110; -pub const SYS_vhangup: c_long = 111; -pub const SYS_idle: c_long = 112; -pub const SYS_vm86old: c_long = 113; -pub const SYS_wait4: c_long = 114; -pub const SYS_swapoff: c_long = 115; -pub const SYS_sysinfo: c_long = 116; -pub const SYS_ipc: c_long = 117; -pub const SYS_fsync: c_long = 118; -pub const SYS_sigreturn: c_long = 119; -pub const SYS_clone: c_long = 120; -pub const SYS_setdomainname: c_long = 121; -pub const SYS_uname: c_long = 122; -pub const SYS_modify_ldt: c_long = 123; -pub const SYS_adjtimex: c_long = 124; -pub const SYS_mprotect: c_long = 125; -pub const SYS_sigprocmask: c_long = 126; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 127; -pub const SYS_init_module: c_long = 128; -pub const SYS_delete_module: c_long = 129; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 130; -pub const SYS_quotactl: c_long = 131; -pub const SYS_getpgid: c_long = 132; -pub const SYS_fchdir: c_long = 133; -pub const SYS_bdflush: c_long = 134; -pub const SYS_sysfs: c_long = 135; -pub const SYS_personality: c_long = 136; -pub const SYS_afs_syscall: c_long = 137; -pub const SYS_setfsuid: c_long = 138; -pub const SYS_setfsgid: c_long = 139; -// FIXME(android): SYS__llseek is in the NDK sources but for some reason is -// not available in the tests -// pub const SYS__llseek: c_long = 140; -pub const SYS_getdents: c_long = 141; -// FIXME(android): SYS__newselect is in the NDK sources but for some reason is -// not available in the tests -// pub const SYS__newselect: c_long = 142; -pub const SYS_flock: c_long = 143; -pub const SYS_msync: c_long = 144; -pub const SYS_readv: c_long = 145; -pub const SYS_writev: c_long = 146; -pub const SYS_getsid: c_long = 147; -pub const SYS_fdatasync: c_long = 148; -// FIXME(android): SYS__llseek is in the NDK sources but for some reason is -// not available in the tests -// pub const SYS__sysctl: c_long = 149; -pub const SYS_mlock: c_long = 150; -pub const SYS_munlock: c_long = 151; -pub const SYS_mlockall: c_long = 152; -pub const SYS_munlockall: c_long = 153; -pub const SYS_sched_setparam: c_long = 154; -pub const SYS_sched_getparam: c_long = 155; -pub const SYS_sched_setscheduler: c_long = 156; -pub const SYS_sched_getscheduler: c_long = 157; -pub const SYS_sched_yield: c_long = 158; -pub const SYS_sched_get_priority_max: c_long = 159; -pub const SYS_sched_get_priority_min: c_long = 160; -pub const SYS_sched_rr_get_interval: c_long = 161; -pub const SYS_nanosleep: c_long = 162; -pub const SYS_mremap: c_long = 163; -pub const SYS_setresuid: c_long = 164; -pub const SYS_getresuid: c_long = 165; -pub const SYS_vm86: c_long = 166; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 167; -pub const SYS_poll: c_long = 168; -pub const SYS_nfsservctl: c_long = 169; -pub const SYS_setresgid: c_long = 170; -pub const SYS_getresgid: c_long = 171; -pub const SYS_prctl: c_long = 172; -pub const SYS_rt_sigreturn: c_long = 173; -pub const SYS_rt_sigaction: c_long = 174; -pub const SYS_rt_sigprocmask: c_long = 175; -pub const SYS_rt_sigpending: c_long = 176; -pub const SYS_rt_sigtimedwait: c_long = 177; -pub const SYS_rt_sigqueueinfo: c_long = 178; -pub const SYS_rt_sigsuspend: c_long = 179; -pub const SYS_pread64: c_long = 180; -pub const SYS_pwrite64: c_long = 181; -pub const SYS_chown: c_long = 182; -pub const SYS_getcwd: c_long = 183; -pub const SYS_capget: c_long = 184; -pub const SYS_capset: c_long = 185; -pub const SYS_sigaltstack: c_long = 186; -pub const SYS_sendfile: c_long = 187; -pub const SYS_getpmsg: c_long = 188; -pub const SYS_putpmsg: c_long = 189; -pub const SYS_vfork: c_long = 190; -pub const SYS_ugetrlimit: c_long = 191; -pub const SYS_mmap2: c_long = 192; -pub const SYS_truncate64: c_long = 193; -pub const SYS_ftruncate64: c_long = 194; -pub const SYS_stat64: c_long = 195; -pub const SYS_lstat64: c_long = 196; -pub const SYS_fstat64: c_long = 197; -pub const SYS_lchown32: c_long = 198; -pub const SYS_getuid32: c_long = 199; -pub const SYS_getgid32: c_long = 200; -pub const SYS_geteuid32: c_long = 201; -pub const SYS_getegid32: c_long = 202; -pub const SYS_setreuid32: c_long = 203; -pub const SYS_setregid32: c_long = 204; -pub const SYS_getgroups32: c_long = 205; -pub const SYS_setgroups32: c_long = 206; -pub const SYS_fchown32: c_long = 207; -pub const SYS_setresuid32: c_long = 208; -pub const SYS_getresuid32: c_long = 209; -pub const SYS_setresgid32: c_long = 210; -pub const SYS_getresgid32: c_long = 211; -pub const SYS_chown32: c_long = 212; -pub const SYS_setuid32: c_long = 213; -pub const SYS_setgid32: c_long = 214; -pub const SYS_setfsuid32: c_long = 215; -pub const SYS_setfsgid32: c_long = 216; -pub const SYS_pivot_root: c_long = 217; -pub const SYS_mincore: c_long = 218; -pub const SYS_madvise: c_long = 219; -pub const SYS_getdents64: c_long = 220; -pub const SYS_fcntl64: c_long = 221; -pub const SYS_gettid: c_long = 224; -pub const SYS_readahead: c_long = 225; -pub const SYS_setxattr: c_long = 226; -pub const SYS_lsetxattr: c_long = 227; -pub const SYS_fsetxattr: c_long = 228; -pub const SYS_getxattr: c_long = 229; -pub const SYS_lgetxattr: c_long = 230; -pub const SYS_fgetxattr: c_long = 231; -pub const SYS_listxattr: c_long = 232; -pub const SYS_llistxattr: c_long = 233; -pub const SYS_flistxattr: c_long = 234; -pub const SYS_removexattr: c_long = 235; -pub const SYS_lremovexattr: c_long = 236; -pub const SYS_fremovexattr: c_long = 237; -pub const SYS_tkill: c_long = 238; -pub const SYS_sendfile64: c_long = 239; -pub const SYS_futex: c_long = 240; -pub const SYS_sched_setaffinity: c_long = 241; -pub const SYS_sched_getaffinity: c_long = 242; -pub const SYS_set_thread_area: c_long = 243; -pub const SYS_get_thread_area: c_long = 244; -pub const SYS_io_setup: c_long = 245; -pub const SYS_io_destroy: c_long = 246; -pub const SYS_io_getevents: c_long = 247; -pub const SYS_io_submit: c_long = 248; -pub const SYS_io_cancel: c_long = 249; -pub const SYS_fadvise64: c_long = 250; -pub const SYS_exit_group: c_long = 252; -pub const SYS_lookup_dcookie: c_long = 253; -pub const SYS_epoll_create: c_long = 254; -pub const SYS_epoll_ctl: c_long = 255; -pub const SYS_epoll_wait: c_long = 256; -pub const SYS_remap_file_pages: c_long = 257; -pub const SYS_set_tid_address: c_long = 258; -pub const SYS_timer_create: c_long = 259; -pub const SYS_timer_settime: c_long = 260; -pub const SYS_timer_gettime: c_long = 261; -pub const SYS_timer_getoverrun: c_long = 262; -pub const SYS_timer_delete: c_long = 263; -pub const SYS_clock_settime: c_long = 264; -pub const SYS_clock_gettime: c_long = 265; -pub const SYS_clock_getres: c_long = 266; -pub const SYS_clock_nanosleep: c_long = 267; -pub const SYS_statfs64: c_long = 268; -pub const SYS_fstatfs64: c_long = 269; -pub const SYS_tgkill: c_long = 270; -pub const SYS_utimes: c_long = 271; -pub const SYS_fadvise64_64: c_long = 272; -pub const SYS_vserver: c_long = 273; -pub const SYS_mbind: c_long = 274; -pub const SYS_get_mempolicy: c_long = 275; -pub const SYS_set_mempolicy: c_long = 276; -pub const SYS_mq_open: c_long = 277; -pub const SYS_mq_unlink: c_long = 278; -pub const SYS_mq_timedsend: c_long = 279; -pub const SYS_mq_timedreceive: c_long = 280; -pub const SYS_mq_notify: c_long = 281; -pub const SYS_mq_getsetattr: c_long = 282; -pub const SYS_kexec_load: c_long = 283; -pub const SYS_waitid: c_long = 284; -pub const SYS_add_key: c_long = 286; -pub const SYS_request_key: c_long = 287; -pub const SYS_keyctl: c_long = 288; -pub const SYS_ioprio_set: c_long = 289; -pub const SYS_ioprio_get: c_long = 290; -pub const SYS_inotify_init: c_long = 291; -pub const SYS_inotify_add_watch: c_long = 292; -pub const SYS_inotify_rm_watch: c_long = 293; -pub const SYS_migrate_pages: c_long = 294; -pub const SYS_openat: c_long = 295; -pub const SYS_mkdirat: c_long = 296; -pub const SYS_mknodat: c_long = 297; -pub const SYS_fchownat: c_long = 298; -pub const SYS_futimesat: c_long = 299; -pub const SYS_fstatat64: c_long = 300; -pub const SYS_unlinkat: c_long = 301; -pub const SYS_renameat: c_long = 302; -pub const SYS_linkat: c_long = 303; -pub const SYS_symlinkat: c_long = 304; -pub const SYS_readlinkat: c_long = 305; -pub const SYS_fchmodat: c_long = 306; -pub const SYS_faccessat: c_long = 307; -pub const SYS_pselect6: c_long = 308; -pub const SYS_ppoll: c_long = 309; -pub const SYS_unshare: c_long = 310; -pub const SYS_set_robust_list: c_long = 311; -pub const SYS_get_robust_list: c_long = 312; -pub const SYS_splice: c_long = 313; -pub const SYS_sync_file_range: c_long = 314; -pub const SYS_tee: c_long = 315; -pub const SYS_vmsplice: c_long = 316; -pub const SYS_move_pages: c_long = 317; -pub const SYS_getcpu: c_long = 318; -pub const SYS_epoll_pwait: c_long = 319; -pub const SYS_utimensat: c_long = 320; -pub const SYS_signalfd: c_long = 321; -pub const SYS_timerfd_create: c_long = 322; -pub const SYS_eventfd: c_long = 323; -pub const SYS_fallocate: c_long = 324; -pub const SYS_timerfd_settime: c_long = 325; -pub const SYS_timerfd_gettime: c_long = 326; -pub const SYS_signalfd4: c_long = 327; -pub const SYS_eventfd2: c_long = 328; -pub const SYS_epoll_create1: c_long = 329; -pub const SYS_dup3: c_long = 330; -pub const SYS_pipe2: c_long = 331; -pub const SYS_inotify_init1: c_long = 332; -pub const SYS_preadv: c_long = 333; -pub const SYS_pwritev: c_long = 334; -pub const SYS_rt_tgsigqueueinfo: c_long = 335; -pub const SYS_perf_event_open: c_long = 336; -pub const SYS_recvmmsg: c_long = 337; -pub const SYS_fanotify_init: c_long = 338; -pub const SYS_fanotify_mark: c_long = 339; -pub const SYS_prlimit64: c_long = 340; -pub const SYS_name_to_handle_at: c_long = 341; -pub const SYS_open_by_handle_at: c_long = 342; -pub const SYS_clock_adjtime: c_long = 343; -pub const SYS_syncfs: c_long = 344; -pub const SYS_sendmmsg: c_long = 345; -pub const SYS_setns: c_long = 346; -pub const SYS_process_vm_readv: c_long = 347; -pub const SYS_process_vm_writev: c_long = 348; -pub const SYS_kcmp: c_long = 349; -pub const SYS_finit_module: c_long = 350; -pub const SYS_sched_setattr: c_long = 351; -pub const SYS_sched_getattr: c_long = 352; -pub const SYS_renameat2: c_long = 353; -pub const SYS_seccomp: c_long = 354; -pub const SYS_getrandom: c_long = 355; -pub const SYS_memfd_create: c_long = 356; -pub const SYS_bpf: c_long = 357; -pub const SYS_execveat: c_long = 358; -pub const SYS_socket: c_long = 359; -pub const SYS_socketpair: c_long = 360; -pub const SYS_bind: c_long = 361; -pub const SYS_connect: c_long = 362; -pub const SYS_listen: c_long = 363; -pub const SYS_accept4: c_long = 364; -pub const SYS_getsockopt: c_long = 365; -pub const SYS_setsockopt: c_long = 366; -pub const SYS_getsockname: c_long = 367; -pub const SYS_getpeername: c_long = 368; -pub const SYS_sendto: c_long = 369; -pub const SYS_sendmsg: c_long = 370; -pub const SYS_recvfrom: c_long = 371; -pub const SYS_recvmsg: c_long = 372; -pub const SYS_shutdown: c_long = 373; -pub const SYS_userfaultfd: c_long = 374; -pub const SYS_membarrier: c_long = 375; -pub const SYS_mlock2: c_long = 376; -pub const SYS_copy_file_range: c_long = 377; -pub const SYS_preadv2: c_long = 378; -pub const SYS_pwritev2: c_long = 379; -pub const SYS_pkey_mprotect: c_long = 380; -pub const SYS_pkey_alloc: c_long = 381; -pub const SYS_pkey_free: c_long = 382; -pub const SYS_statx: c_long = 383; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; - -// offsets in user_regs_structs, from sys/reg.h -pub const EBX: c_int = 0; -pub const ECX: c_int = 1; -pub const EDX: c_int = 2; -pub const ESI: c_int = 3; -pub const EDI: c_int = 4; -pub const EBP: c_int = 5; -pub const EAX: c_int = 6; -pub const DS: c_int = 7; -pub const ES: c_int = 8; -pub const FS: c_int = 9; -pub const GS: c_int = 10; -pub const ORIG_EAX: c_int = 11; -pub const EIP: c_int = 12; -pub const CS: c_int = 13; -pub const EFL: c_int = 14; -pub const UESP: c_int = 15; -pub const SS: c_int = 16; - -// offsets in mcontext_t.gregs from sys/ucontext.h -pub const REG_GS: c_int = 0; -pub const REG_FS: c_int = 1; -pub const REG_ES: c_int = 2; -pub const REG_DS: c_int = 3; -pub const REG_EDI: c_int = 4; -pub const REG_ESI: c_int = 5; -pub const REG_EBP: c_int = 6; -pub const REG_ESP: c_int = 7; -pub const REG_EBX: c_int = 8; -pub const REG_EDX: c_int = 9; -pub const REG_ECX: c_int = 10; -pub const REG_EAX: c_int = 11; -pub const REG_TRAPNO: c_int = 12; -pub const REG_ERR: c_int = 13; -pub const REG_EIP: c_int = 14; -pub const REG_CS: c_int = 15; -pub const REG_EFL: c_int = 16; -pub const REG_UESP: c_int = 17; -pub const REG_SS: c_int = 18; - -// From NDK's asm/auxvec.h -pub const AT_SYSINFO: c_ulong = 32; -pub const AT_SYSINFO_EHDR: c_ulong = 33; -pub const AT_VECTOR_SIZE_ARCH: c_ulong = 3; - -// socketcall values from linux/net.h (only the needed ones, and not public) -const SYS_ACCEPT4: c_int = 18; - -f! { - // Sadly, Android before 5.0 (API level 21), the accept4 syscall is not - // exposed by the libc. As work-around, we implement it as raw syscall. - // Note that for x86, the `accept4` syscall is not available either, - // and we must use the `socketcall` syscall instead. - // This workaround can be removed if the minimum Android version is bumped. - // When the workaround is removed, `accept4` can be moved back - // to `linux_like/mod.rs` - pub fn accept4( - fd: c_int, - addr: *mut crate::sockaddr, - len: *mut crate::socklen_t, - flg: c_int, - ) -> c_int { - // Arguments are passed as array of `long int` - // (which is big enough on x86 for a pointer). - let mut args = [fd as c_long, addr as c_long, len as c_long, flg as c_long]; - crate::syscall(SYS_socketcall, SYS_ACCEPT4, args[..].as_mut_ptr()) - } -} diff --git a/vendor/libc/src/unix/linux_like/android/b64/aarch64/mod.rs b/vendor/libc/src/unix/linux_like/android/b64/aarch64/mod.rs deleted file mode 100644 index 3c6131089ee892..00000000000000 --- a/vendor/libc/src/unix/linux_like/android/b64/aarch64/mod.rs +++ /dev/null @@ -1,473 +0,0 @@ -use crate::off64_t; -use crate::prelude::*; - -pub type wchar_t = u32; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: c_uint, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad1: c_ulong, - pub st_size: off64_t, - pub st_blksize: c_int, - __pad2: c_int, - pub st_blocks: c_long, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused4: c_uint, - __unused5: c_uint, - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: c_uint, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad1: c_ulong, - pub st_size: off64_t, - pub st_blksize: c_int, - __pad2: c_int, - pub st_blocks: c_long, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused4: c_uint, - __unused5: c_uint, - } - - pub struct user_regs_struct { - pub regs: [u64; 31], - pub sp: u64, - pub pc: u64, - pub pstate: u64, - } - - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_sigmask: crate::sigset_t, - pub uc_mcontext: mcontext_t, - } - - #[repr(align(16))] - pub struct mcontext_t { - pub fault_address: c_ulonglong, - pub regs: [c_ulonglong; 31], - pub sp: c_ulonglong, - pub pc: c_ulonglong, - pub pstate: c_ulonglong, - __reserved: [u64; 512], - } - - pub struct user_fpsimd_struct { - pub vregs: [crate::__uint128_t; 32], - pub fpsr: u32, - pub fpcr: u32, - } -} - -s_no_extra_traits! { - #[repr(align(16))] - pub struct max_align_t { - priv_: [f32; 8], - } -} - -pub const O_DIRECT: c_int = 0x10000; -pub const O_DIRECTORY: c_int = 0x4000; -pub const O_NOFOLLOW: c_int = 0x8000; -pub const O_LARGEFILE: c_int = 0o400000; - -pub const SIGSTKSZ: size_t = 16384; -pub const MINSIGSTKSZ: size_t = 5120; - -// From NDK's asm/hwcap.h -pub const HWCAP_FP: c_ulong = 1 << 0; -pub const HWCAP_ASIMD: c_ulong = 1 << 1; -pub const HWCAP_EVTSTRM: c_ulong = 1 << 2; -pub const HWCAP_AES: c_ulong = 1 << 3; -pub const HWCAP_PMULL: c_ulong = 1 << 4; -pub const HWCAP_SHA1: c_ulong = 1 << 5; -pub const HWCAP_SHA2: c_ulong = 1 << 6; -pub const HWCAP_CRC32: c_ulong = 1 << 7; -pub const HWCAP_ATOMICS: c_ulong = 1 << 8; -pub const HWCAP_FPHP: c_ulong = 1 << 9; -pub const HWCAP_ASIMDHP: c_ulong = 1 << 10; -pub const HWCAP_CPUID: c_ulong = 1 << 11; -pub const HWCAP_ASIMDRDM: c_ulong = 1 << 12; -pub const HWCAP_JSCVT: c_ulong = 1 << 13; -pub const HWCAP_FCMA: c_ulong = 1 << 14; -pub const HWCAP_LRCPC: c_ulong = 1 << 15; -pub const HWCAP_DCPOP: c_ulong = 1 << 16; -pub const HWCAP_SHA3: c_ulong = 1 << 17; -pub const HWCAP_SM3: c_ulong = 1 << 18; -pub const HWCAP_SM4: c_ulong = 1 << 19; -pub const HWCAP_ASIMDDP: c_ulong = 1 << 20; -pub const HWCAP_SHA512: c_ulong = 1 << 21; -pub const HWCAP_SVE: c_ulong = 1 << 22; -pub const HWCAP_ASIMDFHM: c_ulong = 1 << 23; -pub const HWCAP_DIT: c_ulong = 1 << 24; -pub const HWCAP_USCAT: c_ulong = 1 << 25; -pub const HWCAP_ILRCPC: c_ulong = 1 << 26; -pub const HWCAP_FLAGM: c_ulong = 1 << 27; -pub const HWCAP_SSBS: c_ulong = 1 << 28; -pub const HWCAP_SB: c_ulong = 1 << 29; -pub const HWCAP_PACA: c_ulong = 1 << 30; -pub const HWCAP_PACG: c_ulong = 1 << 31; -pub const HWCAP2_DCPODP: c_ulong = 1 << 0; -pub const HWCAP2_SVE2: c_ulong = 1 << 1; -pub const HWCAP2_SVEAES: c_ulong = 1 << 2; -pub const HWCAP2_SVEPMULL: c_ulong = 1 << 3; -pub const HWCAP2_SVEBITPERM: c_ulong = 1 << 4; -pub const HWCAP2_SVESHA3: c_ulong = 1 << 5; -pub const HWCAP2_SVESM4: c_ulong = 1 << 6; -pub const HWCAP2_FLAGM2: c_ulong = 1 << 7; -pub const HWCAP2_FRINT: c_ulong = 1 << 8; -pub const HWCAP2_SVEI8MM: c_ulong = 1 << 9; -pub const HWCAP2_SVEF32MM: c_ulong = 1 << 10; -pub const HWCAP2_SVEF64MM: c_ulong = 1 << 11; -pub const HWCAP2_SVEBF16: c_ulong = 1 << 12; -pub const HWCAP2_I8MM: c_ulong = 1 << 13; -pub const HWCAP2_BF16: c_ulong = 1 << 14; -pub const HWCAP2_DGH: c_ulong = 1 << 15; -pub const HWCAP2_RNG: c_ulong = 1 << 16; -pub const HWCAP2_BTI: c_ulong = 1 << 17; -pub const HWCAP2_MTE: c_ulong = 1 << 18; -pub const HWCAP2_ECV: c_ulong = 1 << 19; -pub const HWCAP2_AFP: c_ulong = 1 << 20; -pub const HWCAP2_RPRES: c_ulong = 1 << 21; -pub const HWCAP2_MTE3: c_ulong = 1 << 22; -pub const HWCAP2_SME: c_ulong = 1 << 23; -pub const HWCAP2_SME_I16I64: c_ulong = 1 << 24; -pub const HWCAP2_SME_F64F64: c_ulong = 1 << 25; -pub const HWCAP2_SME_I8I32: c_ulong = 1 << 26; -pub const HWCAP2_SME_F16F32: c_ulong = 1 << 27; -pub const HWCAP2_SME_B16F32: c_ulong = 1 << 28; -pub const HWCAP2_SME_F32F32: c_ulong = 1 << 29; -pub const HWCAP2_SME_FA64: c_ulong = 1 << 30; -pub const HWCAP2_WFXT: c_ulong = 1 << 31; -pub const HWCAP2_EBF16: c_ulong = 1 << 32; -pub const HWCAP2_SVE_EBF16: c_ulong = 1 << 33; - -pub const SYS_io_setup: c_long = 0; -pub const SYS_io_destroy: c_long = 1; -pub const SYS_io_submit: c_long = 2; -pub const SYS_io_cancel: c_long = 3; -pub const SYS_io_getevents: c_long = 4; -pub const SYS_setxattr: c_long = 5; -pub const SYS_lsetxattr: c_long = 6; -pub const SYS_fsetxattr: c_long = 7; -pub const SYS_getxattr: c_long = 8; -pub const SYS_lgetxattr: c_long = 9; -pub const SYS_fgetxattr: c_long = 10; -pub const SYS_listxattr: c_long = 11; -pub const SYS_llistxattr: c_long = 12; -pub const SYS_flistxattr: c_long = 13; -pub const SYS_removexattr: c_long = 14; -pub const SYS_lremovexattr: c_long = 15; -pub const SYS_fremovexattr: c_long = 16; -pub const SYS_getcwd: c_long = 17; -pub const SYS_lookup_dcookie: c_long = 18; -pub const SYS_eventfd2: c_long = 19; -pub const SYS_epoll_create1: c_long = 20; -pub const SYS_epoll_ctl: c_long = 21; -pub const SYS_epoll_pwait: c_long = 22; -pub const SYS_dup: c_long = 23; -pub const SYS_dup3: c_long = 24; -pub const SYS_fcntl: c_long = 25; -pub const SYS_inotify_init1: c_long = 26; -pub const SYS_inotify_add_watch: c_long = 27; -pub const SYS_inotify_rm_watch: c_long = 28; -pub const SYS_ioctl: c_long = 29; -pub const SYS_ioprio_set: c_long = 30; -pub const SYS_ioprio_get: c_long = 31; -pub const SYS_flock: c_long = 32; -pub const SYS_mknodat: c_long = 33; -pub const SYS_mkdirat: c_long = 34; -pub const SYS_unlinkat: c_long = 35; -pub const SYS_symlinkat: c_long = 36; -pub const SYS_linkat: c_long = 37; -pub const SYS_renameat: c_long = 38; -pub const SYS_umount2: c_long = 39; -pub const SYS_mount: c_long = 40; -pub const SYS_pivot_root: c_long = 41; -pub const SYS_nfsservctl: c_long = 42; -pub const SYS_fallocate: c_long = 47; -pub const SYS_faccessat: c_long = 48; -pub const SYS_chdir: c_long = 49; -pub const SYS_fchdir: c_long = 50; -pub const SYS_chroot: c_long = 51; -pub const SYS_fchmod: c_long = 52; -pub const SYS_fchmodat: c_long = 53; -pub const SYS_fchownat: c_long = 54; -pub const SYS_fchown: c_long = 55; -pub const SYS_openat: c_long = 56; -pub const SYS_close: c_long = 57; -pub const SYS_vhangup: c_long = 58; -pub const SYS_pipe2: c_long = 59; -pub const SYS_quotactl: c_long = 60; -pub const SYS_getdents64: c_long = 61; -pub const SYS_lseek: c_long = 62; -pub const SYS_read: c_long = 63; -pub const SYS_write: c_long = 64; -pub const SYS_readv: c_long = 65; -pub const SYS_writev: c_long = 66; -pub const SYS_pread64: c_long = 67; -pub const SYS_pwrite64: c_long = 68; -pub const SYS_preadv: c_long = 69; -pub const SYS_pwritev: c_long = 70; -pub const SYS_pselect6: c_long = 72; -pub const SYS_ppoll: c_long = 73; -pub const SYS_signalfd4: c_long = 74; -pub const SYS_vmsplice: c_long = 75; -pub const SYS_splice: c_long = 76; -pub const SYS_tee: c_long = 77; -pub const SYS_readlinkat: c_long = 78; -pub const SYS_sync: c_long = 81; -pub const SYS_fsync: c_long = 82; -pub const SYS_fdatasync: c_long = 83; -pub const SYS_sync_file_range: c_long = 84; -pub const SYS_timerfd_create: c_long = 85; -pub const SYS_timerfd_settime: c_long = 86; -pub const SYS_timerfd_gettime: c_long = 87; -pub const SYS_utimensat: c_long = 88; -pub const SYS_acct: c_long = 89; -pub const SYS_capget: c_long = 90; -pub const SYS_capset: c_long = 91; -pub const SYS_personality: c_long = 92; -pub const SYS_exit: c_long = 93; -pub const SYS_exit_group: c_long = 94; -pub const SYS_waitid: c_long = 95; -pub const SYS_set_tid_address: c_long = 96; -pub const SYS_unshare: c_long = 97; -pub const SYS_futex: c_long = 98; -pub const SYS_set_robust_list: c_long = 99; -pub const SYS_get_robust_list: c_long = 100; -pub const SYS_nanosleep: c_long = 101; -pub const SYS_getitimer: c_long = 102; -pub const SYS_setitimer: c_long = 103; -pub const SYS_kexec_load: c_long = 104; -pub const SYS_init_module: c_long = 105; -pub const SYS_delete_module: c_long = 106; -pub const SYS_timer_create: c_long = 107; -pub const SYS_timer_gettime: c_long = 108; -pub const SYS_timer_getoverrun: c_long = 109; -pub const SYS_timer_settime: c_long = 110; -pub const SYS_timer_delete: c_long = 111; -pub const SYS_clock_settime: c_long = 112; -pub const SYS_clock_gettime: c_long = 113; -pub const SYS_clock_getres: c_long = 114; -pub const SYS_clock_nanosleep: c_long = 115; -pub const SYS_syslog: c_long = 116; -pub const SYS_ptrace: c_long = 117; -pub const SYS_sched_setparam: c_long = 118; -pub const SYS_sched_setscheduler: c_long = 119; -pub const SYS_sched_getscheduler: c_long = 120; -pub const SYS_sched_getparam: c_long = 121; -pub const SYS_sched_setaffinity: c_long = 122; -pub const SYS_sched_getaffinity: c_long = 123; -pub const SYS_sched_yield: c_long = 124; -pub const SYS_sched_get_priority_max: c_long = 125; -pub const SYS_sched_get_priority_min: c_long = 126; -pub const SYS_sched_rr_get_interval: c_long = 127; -pub const SYS_restart_syscall: c_long = 128; -pub const SYS_kill: c_long = 129; -pub const SYS_tkill: c_long = 130; -pub const SYS_tgkill: c_long = 131; -pub const SYS_sigaltstack: c_long = 132; -pub const SYS_rt_sigsuspend: c_long = 133; -pub const SYS_rt_sigaction: c_long = 134; -pub const SYS_rt_sigprocmask: c_long = 135; -pub const SYS_rt_sigpending: c_long = 136; -pub const SYS_rt_sigtimedwait: c_long = 137; -pub const SYS_rt_sigqueueinfo: c_long = 138; -pub const SYS_rt_sigreturn: c_long = 139; -pub const SYS_setpriority: c_long = 140; -pub const SYS_getpriority: c_long = 141; -pub const SYS_reboot: c_long = 142; -pub const SYS_setregid: c_long = 143; -pub const SYS_setgid: c_long = 144; -pub const SYS_setreuid: c_long = 145; -pub const SYS_setuid: c_long = 146; -pub const SYS_setresuid: c_long = 147; -pub const SYS_getresuid: c_long = 148; -pub const SYS_setresgid: c_long = 149; -pub const SYS_getresgid: c_long = 150; -pub const SYS_setfsuid: c_long = 151; -pub const SYS_setfsgid: c_long = 152; -pub const SYS_times: c_long = 153; -pub const SYS_setpgid: c_long = 154; -pub const SYS_getpgid: c_long = 155; -pub const SYS_getsid: c_long = 156; -pub const SYS_setsid: c_long = 157; -pub const SYS_getgroups: c_long = 158; -pub const SYS_setgroups: c_long = 159; -pub const SYS_uname: c_long = 160; -pub const SYS_sethostname: c_long = 161; -pub const SYS_setdomainname: c_long = 162; -pub const SYS_getrlimit: c_long = 163; -pub const SYS_setrlimit: c_long = 164; -pub const SYS_getrusage: c_long = 165; -pub const SYS_umask: c_long = 166; -pub const SYS_prctl: c_long = 167; -pub const SYS_getcpu: c_long = 168; -pub const SYS_gettimeofday: c_long = 169; -pub const SYS_settimeofday: c_long = 170; -pub const SYS_adjtimex: c_long = 171; -pub const SYS_getpid: c_long = 172; -pub const SYS_getppid: c_long = 173; -pub const SYS_getuid: c_long = 174; -pub const SYS_geteuid: c_long = 175; -pub const SYS_getgid: c_long = 176; -pub const SYS_getegid: c_long = 177; -pub const SYS_gettid: c_long = 178; -pub const SYS_sysinfo: c_long = 179; -pub const SYS_mq_open: c_long = 180; -pub const SYS_mq_unlink: c_long = 181; -pub const SYS_mq_timedsend: c_long = 182; -pub const SYS_mq_timedreceive: c_long = 183; -pub const SYS_mq_notify: c_long = 184; -pub const SYS_mq_getsetattr: c_long = 185; -pub const SYS_msgget: c_long = 186; -pub const SYS_msgctl: c_long = 187; -pub const SYS_msgrcv: c_long = 188; -pub const SYS_msgsnd: c_long = 189; -pub const SYS_semget: c_long = 190; -pub const SYS_semctl: c_long = 191; -pub const SYS_semtimedop: c_long = 192; -pub const SYS_semop: c_long = 193; -pub const SYS_shmget: c_long = 194; -pub const SYS_shmctl: c_long = 195; -pub const SYS_shmat: c_long = 196; -pub const SYS_shmdt: c_long = 197; -pub const SYS_socket: c_long = 198; -pub const SYS_socketpair: c_long = 199; -pub const SYS_bind: c_long = 200; -pub const SYS_listen: c_long = 201; -pub const SYS_accept: c_long = 202; -pub const SYS_connect: c_long = 203; -pub const SYS_getsockname: c_long = 204; -pub const SYS_getpeername: c_long = 205; -pub const SYS_sendto: c_long = 206; -pub const SYS_recvfrom: c_long = 207; -pub const SYS_setsockopt: c_long = 208; -pub const SYS_getsockopt: c_long = 209; -pub const SYS_shutdown: c_long = 210; -pub const SYS_sendmsg: c_long = 211; -pub const SYS_recvmsg: c_long = 212; -pub const SYS_readahead: c_long = 213; -pub const SYS_brk: c_long = 214; -pub const SYS_munmap: c_long = 215; -pub const SYS_mremap: c_long = 216; -pub const SYS_add_key: c_long = 217; -pub const SYS_request_key: c_long = 218; -pub const SYS_keyctl: c_long = 219; -pub const SYS_clone: c_long = 220; -pub const SYS_execve: c_long = 221; -pub const SYS_mmap: c_long = 222; -pub const SYS_swapon: c_long = 224; -pub const SYS_swapoff: c_long = 225; -pub const SYS_mprotect: c_long = 226; -pub const SYS_msync: c_long = 227; -pub const SYS_mlock: c_long = 228; -pub const SYS_munlock: c_long = 229; -pub const SYS_mlockall: c_long = 230; -pub const SYS_munlockall: c_long = 231; -pub const SYS_mincore: c_long = 232; -pub const SYS_madvise: c_long = 233; -pub const SYS_remap_file_pages: c_long = 234; -pub const SYS_mbind: c_long = 235; -pub const SYS_get_mempolicy: c_long = 236; -pub const SYS_set_mempolicy: c_long = 237; -pub const SYS_migrate_pages: c_long = 238; -pub const SYS_move_pages: c_long = 239; -pub const SYS_rt_tgsigqueueinfo: c_long = 240; -pub const SYS_perf_event_open: c_long = 241; -pub const SYS_accept4: c_long = 242; -pub const SYS_recvmmsg: c_long = 243; -pub const SYS_arch_specific_syscall: c_long = 244; -pub const SYS_wait4: c_long = 260; -pub const SYS_prlimit64: c_long = 261; -pub const SYS_fanotify_init: c_long = 262; -pub const SYS_fanotify_mark: c_long = 263; -pub const SYS_name_to_handle_at: c_long = 264; -pub const SYS_open_by_handle_at: c_long = 265; -pub const SYS_clock_adjtime: c_long = 266; -pub const SYS_syncfs: c_long = 267; -pub const SYS_setns: c_long = 268; -pub const SYS_sendmmsg: c_long = 269; -pub const SYS_process_vm_readv: c_long = 270; -pub const SYS_process_vm_writev: c_long = 271; -pub const SYS_kcmp: c_long = 272; -pub const SYS_finit_module: c_long = 273; -pub const SYS_sched_setattr: c_long = 274; -pub const SYS_sched_getattr: c_long = 275; -pub const SYS_renameat2: c_long = 276; -pub const SYS_seccomp: c_long = 277; -pub const SYS_getrandom: c_long = 278; -pub const SYS_memfd_create: c_long = 279; -pub const SYS_bpf: c_long = 280; -pub const SYS_execveat: c_long = 281; -pub const SYS_userfaultfd: c_long = 282; -pub const SYS_membarrier: c_long = 283; -pub const SYS_mlock2: c_long = 284; -pub const SYS_copy_file_range: c_long = 285; -pub const SYS_preadv2: c_long = 286; -pub const SYS_pwritev2: c_long = 287; -pub const SYS_pkey_mprotect: c_long = 288; -pub const SYS_pkey_alloc: c_long = 289; -pub const SYS_pkey_free: c_long = 290; -pub const SYS_statx: c_long = 291; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; -pub const SYS_syscalls: c_long = 451; - -pub const PROT_BTI: c_int = 0x10; -pub const PROT_MTE: c_int = 0x20; - -// From NDK's asm/auxvec.h -pub const AT_SYSINFO_EHDR: c_ulong = 33; -pub const AT_VECTOR_SIZE_ARCH: c_ulong = 2; diff --git a/vendor/libc/src/unix/linux_like/android/b64/mod.rs b/vendor/libc/src/unix/linux_like/android/b64/mod.rs deleted file mode 100644 index 46ceed4c6dcba2..00000000000000 --- a/vendor/libc/src/unix/linux_like/android/b64/mod.rs +++ /dev/null @@ -1,292 +0,0 @@ -use crate::prelude::*; - -// The following definitions are correct for aarch64 and x86_64, -// but may be wrong for mips64 - -pub type mode_t = u32; -pub type off64_t = i64; -pub type socklen_t = u32; - -s! { - pub struct sigset_t { - __val: [c_ulong; 1], - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_flags: c_int, - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_restorer: Option, - } - - pub struct rlimit64 { - pub rlim_cur: c_ulonglong, - pub rlim_max: c_ulonglong, - } - - pub struct pthread_attr_t { - pub flags: u32, - pub stack_base: *mut c_void, - pub stack_size: size_t, - pub guard_size: size_t, - pub sched_policy: i32, - pub sched_priority: i32, - __reserved: [c_char; 16], - } - - pub struct passwd { - pub pw_name: *mut c_char, - pub pw_passwd: *mut c_char, - pub pw_uid: crate::uid_t, - pub pw_gid: crate::gid_t, - pub pw_gecos: *mut c_char, - pub pw_dir: *mut c_char, - pub pw_shell: *mut c_char, - } - - pub struct statfs { - pub f_type: u64, - pub f_bsize: u64, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::__fsid_t, - pub f_namelen: u64, - pub f_frsize: u64, - pub f_flags: u64, - pub f_spare: [u64; 4], - } - - pub struct sysinfo { - pub uptime: c_long, - pub loads: [c_ulong; 3], - pub totalram: c_ulong, - pub freeram: c_ulong, - pub sharedram: c_ulong, - pub bufferram: c_ulong, - pub totalswap: c_ulong, - pub freeswap: c_ulong, - pub procs: c_ushort, - pub pad: c_ushort, - pub totalhigh: c_ulong, - pub freehigh: c_ulong, - pub mem_unit: c_uint, - pub _f: [c_char; 0], - } - - pub struct statfs64 { - pub f_type: u64, - pub f_bsize: u64, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::__fsid_t, - pub f_namelen: u64, - pub f_frsize: u64, - pub f_flags: u64, - pub f_spare: [u64; 4], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct pthread_barrier_t { - __private: [i64; 4], - } - - pub struct pthread_spinlock_t { - __private: i64, - } -} - -s_no_extra_traits! { - pub struct pthread_mutex_t { - value: c_int, - __reserved: [c_char; 36], - } - - pub struct pthread_cond_t { - value: c_int, - __reserved: [c_char; 44], - } - - pub struct pthread_rwlock_t { - numLocks: c_int, - writerThreadId: c_int, - pendingReaders: c_int, - pendingWriters: c_int, - attr: i32, - __reserved: [c_char; 36], - } - - pub struct sigset64_t { - __bits: [c_ulong; 1], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for pthread_mutex_t { - fn eq(&self, other: &pthread_mutex_t) -> bool { - self.value == other.value - && self - .__reserved - .iter() - .zip(other.__reserved.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for pthread_mutex_t {} - - impl hash::Hash for pthread_mutex_t { - fn hash(&self, state: &mut H) { - self.value.hash(state); - self.__reserved.hash(state); - } - } - - impl PartialEq for pthread_cond_t { - fn eq(&self, other: &pthread_cond_t) -> bool { - self.value == other.value - && self - .__reserved - .iter() - .zip(other.__reserved.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for pthread_cond_t {} - - impl hash::Hash for pthread_cond_t { - fn hash(&self, state: &mut H) { - self.value.hash(state); - self.__reserved.hash(state); - } - } - - impl PartialEq for pthread_rwlock_t { - fn eq(&self, other: &pthread_rwlock_t) -> bool { - self.numLocks == other.numLocks - && self.writerThreadId == other.writerThreadId - && self.pendingReaders == other.pendingReaders - && self.pendingWriters == other.pendingWriters - && self.attr == other.attr - && self - .__reserved - .iter() - .zip(other.__reserved.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for pthread_rwlock_t {} - - impl hash::Hash for pthread_rwlock_t { - fn hash(&self, state: &mut H) { - self.numLocks.hash(state); - self.writerThreadId.hash(state); - self.pendingReaders.hash(state); - self.pendingWriters.hash(state); - self.attr.hash(state); - self.__reserved.hash(state); - } - } - } -} - -// These constants must be of the same type of sigaction.sa_flags -pub const SA_NOCLDSTOP: c_int = 0x00000001; -pub const SA_NOCLDWAIT: c_int = 0x00000002; -pub const SA_NODEFER: c_int = 0x40000000; -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_RESETHAND: c_int = 0x80000000; -pub const SA_RESTART: c_int = 0x10000000; -pub const SA_SIGINFO: c_int = 0x00000004; - -pub const RTLD_GLOBAL: c_int = 0x00100; -pub const RTLD_NOW: c_int = 2; -pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - value: 0, - __reserved: [0; 36], -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - value: 0, - __reserved: [0; 44], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - numLocks: 0, - writerThreadId: 0, - pendingReaders: 0, - pendingWriters: 0, - attr: 0, - __reserved: [0; 36], -}; -pub const PTHREAD_STACK_MIN: size_t = 4096 * 4; -pub const CPU_SETSIZE: size_t = 1024; -pub const __CPU_BITS: size_t = 64; - -pub const UT_LINESIZE: usize = 32; -pub const UT_NAMESIZE: usize = 32; -pub const UT_HOSTSIZE: usize = 256; - -f! { - // Sadly, Android before 5.0 (API level 21), the accept4 syscall is not - // exposed by the libc. As work-around, we implement it through `syscall` - // directly. This workaround can be removed if the minimum version of - // Android is bumped. When the workaround is removed, `accept4` can be - // moved back to `linux_like/mod.rs` - pub fn accept4( - fd: c_int, - addr: *mut crate::sockaddr, - len: *mut crate::socklen_t, - flg: c_int, - ) -> c_int { - crate::syscall(SYS_accept4, fd, addr, len, flg) as c_int - } -} - -extern "C" { - pub fn __system_property_wait( - pi: *const crate::prop_info, - __old_serial: u32, - __new_serial_ptr: *mut u32, - __relative_timeout: *const crate::timespec, - ) -> bool; -} - -cfg_if! { - if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } else if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else if #[cfg(target_arch = "riscv64")] { - mod riscv64; - pub use self::riscv64::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/unix/linux_like/android/b64/riscv64/mod.rs b/vendor/libc/src/unix/linux_like/android/b64/riscv64/mod.rs deleted file mode 100644 index ca8c727164ad74..00000000000000 --- a/vendor/libc/src/unix/linux_like/android/b64/riscv64/mod.rs +++ /dev/null @@ -1,384 +0,0 @@ -use crate::off64_t; -use crate::prelude::*; - -pub type wchar_t = u32; -pub type greg_t = i64; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: c_uint, - pub st_nlink: c_uint, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad1: c_ulong, - pub st_size: off64_t, - pub st_blksize: c_int, - __pad2: c_int, - pub st_blocks: c_long, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused4: c_uint, - __unused5: c_uint, - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: c_uint, - pub st_nlink: c_uint, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad1: c_ulong, - pub st_size: off64_t, - pub st_blksize: c_int, - __pad2: c_int, - pub st_blocks: c_long, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused4: c_uint, - __unused5: c_uint, - } -} - -s_no_extra_traits! { - #[repr(align(16))] - pub struct max_align_t { - priv_: [f32; 8], - } -} - -pub const O_DIRECT: c_int = 0x40000; -pub const O_DIRECTORY: c_int = 0x200000; -pub const O_NOFOLLOW: c_int = 0x400000; -pub const O_LARGEFILE: c_int = 0x100000; - -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; - -// From NDK's asm/hwcap.h -pub const COMPAT_HWCAP_ISA_I: c_ulong = 1 << (b'I' - b'A'); -pub const COMPAT_HWCAP_ISA_M: c_ulong = 1 << (b'M' - b'A'); -pub const COMPAT_HWCAP_ISA_A: c_ulong = 1 << (b'A' - b'A'); -pub const COMPAT_HWCAP_ISA_F: c_ulong = 1 << (b'F' - b'A'); -pub const COMPAT_HWCAP_ISA_D: c_ulong = 1 << (b'D' - b'A'); -pub const COMPAT_HWCAP_ISA_C: c_ulong = 1 << (b'C' - b'A'); - -pub const SYS_io_setup: c_long = 0; -pub const SYS_io_destroy: c_long = 1; -pub const SYS_io_submit: c_long = 2; -pub const SYS_io_cancel: c_long = 3; -pub const SYS_io_getevents: c_long = 4; -pub const SYS_setxattr: c_long = 5; -pub const SYS_lsetxattr: c_long = 6; -pub const SYS_fsetxattr: c_long = 7; -pub const SYS_getxattr: c_long = 8; -pub const SYS_lgetxattr: c_long = 9; -pub const SYS_fgetxattr: c_long = 10; -pub const SYS_listxattr: c_long = 11; -pub const SYS_llistxattr: c_long = 12; -pub const SYS_flistxattr: c_long = 13; -pub const SYS_removexattr: c_long = 14; -pub const SYS_lremovexattr: c_long = 15; -pub const SYS_fremovexattr: c_long = 16; -pub const SYS_getcwd: c_long = 17; -pub const SYS_lookup_dcookie: c_long = 18; -pub const SYS_eventfd2: c_long = 19; -pub const SYS_epoll_create1: c_long = 20; -pub const SYS_epoll_ctl: c_long = 21; -pub const SYS_epoll_pwait: c_long = 22; -pub const SYS_dup: c_long = 23; -pub const SYS_dup3: c_long = 24; -pub const SYS_inotify_init1: c_long = 26; -pub const SYS_inotify_add_watch: c_long = 27; -pub const SYS_inotify_rm_watch: c_long = 28; -pub const SYS_ioctl: c_long = 29; -pub const SYS_ioprio_set: c_long = 30; -pub const SYS_ioprio_get: c_long = 31; -pub const SYS_flock: c_long = 32; -pub const SYS_mknodat: c_long = 33; -pub const SYS_mkdirat: c_long = 34; -pub const SYS_unlinkat: c_long = 35; -pub const SYS_symlinkat: c_long = 36; -pub const SYS_linkat: c_long = 37; -pub const SYS_renameat: c_long = 38; -pub const SYS_umount2: c_long = 39; -pub const SYS_mount: c_long = 40; -pub const SYS_pivot_root: c_long = 41; -pub const SYS_nfsservctl: c_long = 42; -pub const SYS_fallocate: c_long = 47; -pub const SYS_faccessat: c_long = 48; -pub const SYS_chdir: c_long = 49; -pub const SYS_fchdir: c_long = 50; -pub const SYS_chroot: c_long = 51; -pub const SYS_fchmod: c_long = 52; -pub const SYS_fchmodat: c_long = 53; -pub const SYS_fchownat: c_long = 54; -pub const SYS_fchown: c_long = 55; -pub const SYS_openat: c_long = 56; -pub const SYS_close: c_long = 57; -pub const SYS_vhangup: c_long = 58; -pub const SYS_pipe2: c_long = 59; -pub const SYS_quotactl: c_long = 60; -pub const SYS_getdents64: c_long = 61; -pub const SYS_read: c_long = 63; -pub const SYS_write: c_long = 64; -pub const SYS_readv: c_long = 65; -pub const SYS_writev: c_long = 66; -pub const SYS_pread64: c_long = 67; -pub const SYS_pwrite64: c_long = 68; -pub const SYS_preadv: c_long = 69; -pub const SYS_pwritev: c_long = 70; -pub const SYS_pselect6: c_long = 72; -pub const SYS_ppoll: c_long = 73; -pub const SYS_signalfd4: c_long = 74; -pub const SYS_vmsplice: c_long = 75; -pub const SYS_splice: c_long = 76; -pub const SYS_tee: c_long = 77; -pub const SYS_readlinkat: c_long = 78; -pub const SYS_sync: c_long = 81; -pub const SYS_fsync: c_long = 82; -pub const SYS_fdatasync: c_long = 83; -pub const SYS_sync_file_range: c_long = 84; -pub const SYS_timerfd_create: c_long = 85; -pub const SYS_timerfd_settime: c_long = 86; -pub const SYS_timerfd_gettime: c_long = 87; -pub const SYS_utimensat: c_long = 88; -pub const SYS_acct: c_long = 89; -pub const SYS_capget: c_long = 90; -pub const SYS_capset: c_long = 91; -pub const SYS_personality: c_long = 92; -pub const SYS_exit: c_long = 93; -pub const SYS_exit_group: c_long = 94; -pub const SYS_waitid: c_long = 95; -pub const SYS_set_tid_address: c_long = 96; -pub const SYS_unshare: c_long = 97; -pub const SYS_futex: c_long = 98; -pub const SYS_set_robust_list: c_long = 99; -pub const SYS_get_robust_list: c_long = 100; -pub const SYS_nanosleep: c_long = 101; -pub const SYS_getitimer: c_long = 102; -pub const SYS_setitimer: c_long = 103; -pub const SYS_kexec_load: c_long = 104; -pub const SYS_init_module: c_long = 105; -pub const SYS_delete_module: c_long = 106; -pub const SYS_timer_create: c_long = 107; -pub const SYS_timer_gettime: c_long = 108; -pub const SYS_timer_getoverrun: c_long = 109; -pub const SYS_timer_settime: c_long = 110; -pub const SYS_timer_delete: c_long = 111; -pub const SYS_clock_settime: c_long = 112; -pub const SYS_clock_gettime: c_long = 113; -pub const SYS_clock_getres: c_long = 114; -pub const SYS_clock_nanosleep: c_long = 115; -pub const SYS_syslog: c_long = 116; -pub const SYS_ptrace: c_long = 117; -pub const SYS_sched_setparam: c_long = 118; -pub const SYS_sched_setscheduler: c_long = 119; -pub const SYS_sched_getscheduler: c_long = 120; -pub const SYS_sched_getparam: c_long = 121; -pub const SYS_sched_setaffinity: c_long = 122; -pub const SYS_sched_getaffinity: c_long = 123; -pub const SYS_sched_yield: c_long = 124; -pub const SYS_sched_get_priority_max: c_long = 125; -pub const SYS_sched_get_priority_min: c_long = 126; -pub const SYS_sched_rr_get_interval: c_long = 127; -pub const SYS_restart_syscall: c_long = 128; -pub const SYS_kill: c_long = 129; -pub const SYS_tkill: c_long = 130; -pub const SYS_tgkill: c_long = 131; -pub const SYS_sigaltstack: c_long = 132; -pub const SYS_rt_sigsuspend: c_long = 133; -pub const SYS_rt_sigaction: c_long = 134; -pub const SYS_rt_sigprocmask: c_long = 135; -pub const SYS_rt_sigpending: c_long = 136; -pub const SYS_rt_sigtimedwait: c_long = 137; -pub const SYS_rt_sigqueueinfo: c_long = 138; -pub const SYS_rt_sigreturn: c_long = 139; -pub const SYS_setpriority: c_long = 140; -pub const SYS_getpriority: c_long = 141; -pub const SYS_reboot: c_long = 142; -pub const SYS_setregid: c_long = 143; -pub const SYS_setgid: c_long = 144; -pub const SYS_setreuid: c_long = 145; -pub const SYS_setuid: c_long = 146; -pub const SYS_setresuid: c_long = 147; -pub const SYS_getresuid: c_long = 148; -pub const SYS_setresgid: c_long = 149; -pub const SYS_getresgid: c_long = 150; -pub const SYS_setfsuid: c_long = 151; -pub const SYS_setfsgid: c_long = 152; -pub const SYS_times: c_long = 153; -pub const SYS_setpgid: c_long = 154; -pub const SYS_getpgid: c_long = 155; -pub const SYS_getsid: c_long = 156; -pub const SYS_setsid: c_long = 157; -pub const SYS_getgroups: c_long = 158; -pub const SYS_setgroups: c_long = 159; -pub const SYS_uname: c_long = 160; -pub const SYS_sethostname: c_long = 161; -pub const SYS_setdomainname: c_long = 162; -pub const SYS_getrlimit: c_long = 163; -pub const SYS_setrlimit: c_long = 164; -pub const SYS_getrusage: c_long = 165; -pub const SYS_umask: c_long = 166; -pub const SYS_prctl: c_long = 167; -pub const SYS_getcpu: c_long = 168; -pub const SYS_gettimeofday: c_long = 169; -pub const SYS_settimeofday: c_long = 170; -pub const SYS_adjtimex: c_long = 171; -pub const SYS_getpid: c_long = 172; -pub const SYS_getppid: c_long = 173; -pub const SYS_getuid: c_long = 174; -pub const SYS_geteuid: c_long = 175; -pub const SYS_getgid: c_long = 176; -pub const SYS_getegid: c_long = 177; -pub const SYS_gettid: c_long = 178; -pub const SYS_sysinfo: c_long = 179; -pub const SYS_mq_open: c_long = 180; -pub const SYS_mq_unlink: c_long = 181; -pub const SYS_mq_timedsend: c_long = 182; -pub const SYS_mq_timedreceive: c_long = 183; -pub const SYS_mq_notify: c_long = 184; -pub const SYS_mq_getsetattr: c_long = 185; -pub const SYS_msgget: c_long = 186; -pub const SYS_msgctl: c_long = 187; -pub const SYS_msgrcv: c_long = 188; -pub const SYS_msgsnd: c_long = 189; -pub const SYS_semget: c_long = 190; -pub const SYS_semctl: c_long = 191; -pub const SYS_semtimedop: c_long = 192; -pub const SYS_semop: c_long = 193; -pub const SYS_shmget: c_long = 194; -pub const SYS_shmctl: c_long = 195; -pub const SYS_shmat: c_long = 196; -pub const SYS_shmdt: c_long = 197; -pub const SYS_socket: c_long = 198; -pub const SYS_socketpair: c_long = 199; -pub const SYS_bind: c_long = 200; -pub const SYS_listen: c_long = 201; -pub const SYS_accept: c_long = 202; -pub const SYS_connect: c_long = 203; -pub const SYS_getsockname: c_long = 204; -pub const SYS_getpeername: c_long = 205; -pub const SYS_sendto: c_long = 206; -pub const SYS_recvfrom: c_long = 207; -pub const SYS_setsockopt: c_long = 208; -pub const SYS_getsockopt: c_long = 209; -pub const SYS_shutdown: c_long = 210; -pub const SYS_sendmsg: c_long = 211; -pub const SYS_recvmsg: c_long = 212; -pub const SYS_readahead: c_long = 213; -pub const SYS_brk: c_long = 214; -pub const SYS_munmap: c_long = 215; -pub const SYS_mremap: c_long = 216; -pub const SYS_add_key: c_long = 217; -pub const SYS_request_key: c_long = 218; -pub const SYS_keyctl: c_long = 219; -pub const SYS_clone: c_long = 220; -pub const SYS_execve: c_long = 221; -pub const SYS_swapon: c_long = 224; -pub const SYS_swapoff: c_long = 225; -pub const SYS_mprotect: c_long = 226; -pub const SYS_msync: c_long = 227; -pub const SYS_mlock: c_long = 228; -pub const SYS_munlock: c_long = 229; -pub const SYS_mlockall: c_long = 230; -pub const SYS_munlockall: c_long = 231; -pub const SYS_mincore: c_long = 232; -pub const SYS_madvise: c_long = 233; -pub const SYS_remap_file_pages: c_long = 234; -pub const SYS_mbind: c_long = 235; -pub const SYS_get_mempolicy: c_long = 236; -pub const SYS_set_mempolicy: c_long = 237; -pub const SYS_migrate_pages: c_long = 238; -pub const SYS_move_pages: c_long = 239; -pub const SYS_rt_tgsigqueueinfo: c_long = 240; -pub const SYS_perf_event_open: c_long = 241; -pub const SYS_accept4: c_long = 242; -pub const SYS_recvmmsg: c_long = 243; -pub const SYS_arch_specific_syscall: c_long = 244; -pub const SYS_wait4: c_long = 260; -pub const SYS_prlimit64: c_long = 261; -pub const SYS_fanotify_init: c_long = 262; -pub const SYS_fanotify_mark: c_long = 263; -pub const SYS_name_to_handle_at: c_long = 264; -pub const SYS_open_by_handle_at: c_long = 265; -pub const SYS_clock_adjtime: c_long = 266; -pub const SYS_syncfs: c_long = 267; -pub const SYS_setns: c_long = 268; -pub const SYS_sendmmsg: c_long = 269; -pub const SYS_process_vm_readv: c_long = 270; -pub const SYS_process_vm_writev: c_long = 271; -pub const SYS_kcmp: c_long = 272; -pub const SYS_finit_module: c_long = 273; -pub const SYS_sched_setattr: c_long = 274; -pub const SYS_sched_getattr: c_long = 275; -pub const SYS_renameat2: c_long = 276; -pub const SYS_seccomp: c_long = 277; -pub const SYS_getrandom: c_long = 278; -pub const SYS_memfd_create: c_long = 279; -pub const SYS_bpf: c_long = 280; -pub const SYS_execveat: c_long = 281; -pub const SYS_userfaultfd: c_long = 282; -pub const SYS_membarrier: c_long = 283; -pub const SYS_mlock2: c_long = 284; -pub const SYS_copy_file_range: c_long = 285; -pub const SYS_preadv2: c_long = 286; -pub const SYS_pwritev2: c_long = 287; -pub const SYS_pkey_mprotect: c_long = 288; -pub const SYS_pkey_alloc: c_long = 289; -pub const SYS_pkey_free: c_long = 290; -pub const SYS_statx: c_long = 291; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; - -// From NDK's asm/auxvec.h -pub const AT_SYSINFO_EHDR: c_ulong = 33; -pub const AT_L1I_CACHESIZE: c_ulong = 40; -pub const AT_L1I_CACHEGEOMETRY: c_ulong = 41; -pub const AT_L1D_CACHESIZE: c_ulong = 42; -pub const AT_L1D_CACHEGEOMETRY: c_ulong = 43; -pub const AT_L2_CACHESIZE: c_ulong = 44; -pub const AT_L2_CACHEGEOMETRY: c_ulong = 45; -pub const AT_L3_CACHESIZE: c_ulong = 46; -pub const AT_L3_CACHEGEOMETRY: c_ulong = 47; -pub const AT_VECTOR_SIZE_ARCH: c_ulong = 9; diff --git a/vendor/libc/src/unix/linux_like/android/b64/x86_64/mod.rs b/vendor/libc/src/unix/linux_like/android/b64/x86_64/mod.rs deleted file mode 100644 index 0fddeb7bc267f5..00000000000000 --- a/vendor/libc/src/unix/linux_like/android/b64/x86_64/mod.rs +++ /dev/null @@ -1,748 +0,0 @@ -use crate::off64_t; -use crate::prelude::*; - -pub type wchar_t = i32; -pub type greg_t = i64; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: c_ulong, - pub st_mode: c_uint, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_size: off64_t, - pub st_blksize: c_long, - pub st_blocks: c_long, - pub st_atime: c_long, - pub st_atime_nsec: c_long, - pub st_mtime: c_long, - pub st_mtime_nsec: c_long, - pub st_ctime: c_long, - pub st_ctime_nsec: c_long, - __unused: [c_long; 3], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: c_ulong, - pub st_mode: c_uint, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_size: off64_t, - pub st_blksize: c_long, - pub st_blocks: c_long, - pub st_atime: c_long, - pub st_atime_nsec: c_long, - pub st_mtime: c_long, - pub st_mtime_nsec: c_long, - pub st_ctime: c_long, - pub st_ctime_nsec: c_long, - __unused: [c_long; 3], - } - - pub struct _libc_xmmreg { - pub element: [u32; 4], - } - - pub struct user_regs_struct { - pub r15: c_ulong, - pub r14: c_ulong, - pub r13: c_ulong, - pub r12: c_ulong, - pub rbp: c_ulong, - pub rbx: c_ulong, - pub r11: c_ulong, - pub r10: c_ulong, - pub r9: c_ulong, - pub r8: c_ulong, - pub rax: c_ulong, - pub rcx: c_ulong, - pub rdx: c_ulong, - pub rsi: c_ulong, - pub rdi: c_ulong, - pub orig_rax: c_ulong, - pub rip: c_ulong, - pub cs: c_ulong, - pub eflags: c_ulong, - pub rsp: c_ulong, - pub ss: c_ulong, - pub fs_base: c_ulong, - pub gs_base: c_ulong, - pub ds: c_ulong, - pub es: c_ulong, - pub fs: c_ulong, - pub gs: c_ulong, - } - - pub struct user { - pub regs: user_regs_struct, - pub u_fpvalid: c_int, - pub i387: user_fpregs_struct, - pub u_tsize: c_ulong, - pub u_dsize: c_ulong, - pub u_ssize: c_ulong, - pub start_code: c_ulong, - pub start_stack: c_ulong, - pub signal: c_long, - __reserved: c_int, - #[cfg(target_pointer_width = "32")] - __pad1: u32, - pub u_ar0: *mut user_regs_struct, - #[cfg(target_pointer_width = "32")] - __pad2: u32, - pub u_fpstate: *mut user_fpregs_struct, - pub magic: c_ulong, - pub u_comm: [c_char; 32], - pub u_debugreg: [c_ulong; 8], - pub error_code: c_ulong, - pub fault_address: c_ulong, - } -} - -s_no_extra_traits! { - pub union __c_anonymous_uc_sigmask { - uc_sigmask: crate::sigset_t, - uc_sigmask64: crate::sigset64_t, - } - - #[repr(align(16))] - pub struct max_align_t { - priv_: [f64; 4], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for __c_anonymous_uc_sigmask { - fn eq(&self, other: &__c_anonymous_uc_sigmask) -> bool { - unsafe { self.uc_sigmask == other.uc_sigmask } - } - } - impl Eq for __c_anonymous_uc_sigmask {} - impl hash::Hash for __c_anonymous_uc_sigmask { - fn hash(&self, state: &mut H) { - unsafe { self.uc_sigmask.hash(state) } - } - } - } -} - -s_no_extra_traits! { - pub struct _libc_fpxreg { - pub significand: [u16; 4], - pub exponent: u16, - __padding: [u16; 3], - } - - pub struct _libc_fpstate { - pub cwd: u16, - pub swd: u16, - pub ftw: u16, - pub fop: u16, - pub rip: u64, - pub rdp: u64, - pub mxcsr: u32, - pub mxcr_mask: u32, - pub _st: [_libc_fpxreg; 8], - pub _xmm: [_libc_xmmreg; 16], - __private: [u32; 24], - } - - pub struct mcontext_t { - pub gregs: [greg_t; 23], - pub fpregs: *mut _libc_fpstate, - __private: [u64; 8], - } - - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_mcontext: mcontext_t, - pub uc_sigmask64: __c_anonymous_uc_sigmask, - __fpregs_mem: _libc_fpstate, - } - - pub struct user_fpregs_struct { - pub cwd: c_ushort, - pub swd: c_ushort, - pub ftw: c_ushort, - pub fop: c_ushort, - pub rip: c_ulong, - pub rdp: c_ulong, - pub mxcsr: c_uint, - pub mxcr_mask: c_uint, - pub st_space: [c_uint; 32], - pub xmm_space: [c_uint; 64], - padding: [c_uint; 24], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for _libc_fpxreg { - fn eq(&self, other: &Self) -> bool { - self.significand == other.significand && self.exponent == other.exponent - // Ignore padding field - } - } - impl Eq for _libc_fpxreg {} - impl hash::Hash for _libc_fpxreg { - fn hash(&self, state: &mut H) { - self.significand.hash(state); - self.exponent.hash(state); - // Ignore padding field - } - } - - impl PartialEq for _libc_fpstate { - fn eq(&self, other: &Self) -> bool { - self.cwd == other.cwd - && self.swd == other.swd - && self.ftw == other.ftw - && self.fop == other.fop - && self.rip == other.rip - && self.rdp == other.rdp - && self.mxcsr == other.mxcsr - && self.mxcr_mask == other.mxcr_mask - && self._st == other._st - && self._xmm == other._xmm - // Ignore padding field - } - } - impl Eq for _libc_fpstate {} - impl hash::Hash for _libc_fpstate { - fn hash(&self, state: &mut H) { - self.cwd.hash(state); - self.swd.hash(state); - self.ftw.hash(state); - self.fop.hash(state); - self.rip.hash(state); - self.rdp.hash(state); - self.mxcsr.hash(state); - self.mxcr_mask.hash(state); - self._st.hash(state); - self._xmm.hash(state); - // Ignore padding field - } - } - - impl PartialEq for mcontext_t { - fn eq(&self, other: &Self) -> bool { - self.gregs == other.gregs && self.fpregs == other.fpregs - // Ignore padding field - } - } - impl Eq for mcontext_t {} - impl hash::Hash for mcontext_t { - fn hash(&self, state: &mut H) { - self.gregs.hash(state); - self.fpregs.hash(state); - // Ignore padding field - } - } - - impl PartialEq for ucontext_t { - fn eq(&self, other: &Self) -> bool { - self.uc_flags == other.uc_flags - && self.uc_link == other.uc_link - && self.uc_stack == other.uc_stack - && self.uc_mcontext == other.uc_mcontext - && self.uc_sigmask64 == other.uc_sigmask64 - // Ignore padding field - } - } - impl Eq for ucontext_t {} - impl hash::Hash for ucontext_t { - fn hash(&self, state: &mut H) { - self.uc_flags.hash(state); - self.uc_link.hash(state); - self.uc_stack.hash(state); - self.uc_mcontext.hash(state); - self.uc_sigmask64.hash(state); - // Ignore padding field - } - } - - impl PartialEq for user_fpregs_struct { - fn eq(&self, other: &user_fpregs_struct) -> bool { - self.cwd == other.cwd - && self.swd == other.swd - && self.ftw == other.ftw - && self.fop == other.fop - && self.rip == other.rip - && self.rdp == other.rdp - && self.mxcsr == other.mxcsr - && self.mxcr_mask == other.mxcr_mask - && self.st_space == other.st_space - && self - .xmm_space - .iter() - .zip(other.xmm_space.iter()) - .all(|(a, b)| a == b) - // Ignore padding field - } - } - - impl Eq for user_fpregs_struct {} - - impl hash::Hash for user_fpregs_struct { - fn hash(&self, state: &mut H) { - self.cwd.hash(state); - self.swd.hash(state); - self.ftw.hash(state); - self.fop.hash(state); - self.rip.hash(state); - self.rdp.hash(state); - self.mxcsr.hash(state); - self.mxcr_mask.hash(state); - self.st_space.hash(state); - self.xmm_space.hash(state); - // Ignore padding field - } - } - } -} - -pub const O_DIRECT: c_int = 0x4000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_NOFOLLOW: c_int = 0x20000; -pub const O_LARGEFILE: c_int = 0o0100000; - -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; - -pub const MAP_32BIT: c_int = 0x40; - -// Syscall table - -pub const SYS_read: c_long = 0; -pub const SYS_write: c_long = 1; -pub const SYS_open: c_long = 2; -pub const SYS_close: c_long = 3; -pub const SYS_stat: c_long = 4; -pub const SYS_fstat: c_long = 5; -pub const SYS_lstat: c_long = 6; -pub const SYS_poll: c_long = 7; -pub const SYS_lseek: c_long = 8; -pub const SYS_mmap: c_long = 9; -pub const SYS_mprotect: c_long = 10; -pub const SYS_munmap: c_long = 11; -pub const SYS_brk: c_long = 12; -pub const SYS_rt_sigaction: c_long = 13; -pub const SYS_rt_sigprocmask: c_long = 14; -pub const SYS_rt_sigreturn: c_long = 15; -pub const SYS_ioctl: c_long = 16; -pub const SYS_pread64: c_long = 17; -pub const SYS_pwrite64: c_long = 18; -pub const SYS_readv: c_long = 19; -pub const SYS_writev: c_long = 20; -pub const SYS_access: c_long = 21; -pub const SYS_pipe: c_long = 22; -pub const SYS_select: c_long = 23; -pub const SYS_sched_yield: c_long = 24; -pub const SYS_mremap: c_long = 25; -pub const SYS_msync: c_long = 26; -pub const SYS_mincore: c_long = 27; -pub const SYS_madvise: c_long = 28; -pub const SYS_shmget: c_long = 29; -pub const SYS_shmat: c_long = 30; -pub const SYS_shmctl: c_long = 31; -pub const SYS_dup: c_long = 32; -pub const SYS_dup2: c_long = 33; -pub const SYS_pause: c_long = 34; -pub const SYS_nanosleep: c_long = 35; -pub const SYS_getitimer: c_long = 36; -pub const SYS_alarm: c_long = 37; -pub const SYS_setitimer: c_long = 38; -pub const SYS_getpid: c_long = 39; -pub const SYS_sendfile: c_long = 40; -pub const SYS_socket: c_long = 41; -pub const SYS_connect: c_long = 42; -pub const SYS_accept: c_long = 43; -pub const SYS_sendto: c_long = 44; -pub const SYS_recvfrom: c_long = 45; -pub const SYS_sendmsg: c_long = 46; -pub const SYS_recvmsg: c_long = 47; -pub const SYS_shutdown: c_long = 48; -pub const SYS_bind: c_long = 49; -pub const SYS_listen: c_long = 50; -pub const SYS_getsockname: c_long = 51; -pub const SYS_getpeername: c_long = 52; -pub const SYS_socketpair: c_long = 53; -pub const SYS_setsockopt: c_long = 54; -pub const SYS_getsockopt: c_long = 55; -pub const SYS_clone: c_long = 56; -pub const SYS_fork: c_long = 57; -pub const SYS_vfork: c_long = 58; -pub const SYS_execve: c_long = 59; -pub const SYS_exit: c_long = 60; -pub const SYS_wait4: c_long = 61; -pub const SYS_kill: c_long = 62; -pub const SYS_uname: c_long = 63; -pub const SYS_semget: c_long = 64; -pub const SYS_semop: c_long = 65; -pub const SYS_semctl: c_long = 66; -pub const SYS_shmdt: c_long = 67; -pub const SYS_msgget: c_long = 68; -pub const SYS_msgsnd: c_long = 69; -pub const SYS_msgrcv: c_long = 70; -pub const SYS_msgctl: c_long = 71; -pub const SYS_fcntl: c_long = 72; -pub const SYS_flock: c_long = 73; -pub const SYS_fsync: c_long = 74; -pub const SYS_fdatasync: c_long = 75; -pub const SYS_truncate: c_long = 76; -pub const SYS_ftruncate: c_long = 77; -pub const SYS_getdents: c_long = 78; -pub const SYS_getcwd: c_long = 79; -pub const SYS_chdir: c_long = 80; -pub const SYS_fchdir: c_long = 81; -pub const SYS_rename: c_long = 82; -pub const SYS_mkdir: c_long = 83; -pub const SYS_rmdir: c_long = 84; -pub const SYS_creat: c_long = 85; -pub const SYS_link: c_long = 86; -pub const SYS_unlink: c_long = 87; -pub const SYS_symlink: c_long = 88; -pub const SYS_readlink: c_long = 89; -pub const SYS_chmod: c_long = 90; -pub const SYS_fchmod: c_long = 91; -pub const SYS_chown: c_long = 92; -pub const SYS_fchown: c_long = 93; -pub const SYS_lchown: c_long = 94; -pub const SYS_umask: c_long = 95; -pub const SYS_gettimeofday: c_long = 96; -pub const SYS_getrlimit: c_long = 97; -pub const SYS_getrusage: c_long = 98; -pub const SYS_sysinfo: c_long = 99; -pub const SYS_times: c_long = 100; -pub const SYS_ptrace: c_long = 101; -pub const SYS_getuid: c_long = 102; -pub const SYS_syslog: c_long = 103; -pub const SYS_getgid: c_long = 104; -pub const SYS_setuid: c_long = 105; -pub const SYS_setgid: c_long = 106; -pub const SYS_geteuid: c_long = 107; -pub const SYS_getegid: c_long = 108; -pub const SYS_setpgid: c_long = 109; -pub const SYS_getppid: c_long = 110; -pub const SYS_getpgrp: c_long = 111; -pub const SYS_setsid: c_long = 112; -pub const SYS_setreuid: c_long = 113; -pub const SYS_setregid: c_long = 114; -pub const SYS_getgroups: c_long = 115; -pub const SYS_setgroups: c_long = 116; -pub const SYS_setresuid: c_long = 117; -pub const SYS_getresuid: c_long = 118; -pub const SYS_setresgid: c_long = 119; -pub const SYS_getresgid: c_long = 120; -pub const SYS_getpgid: c_long = 121; -pub const SYS_setfsuid: c_long = 122; -pub const SYS_setfsgid: c_long = 123; -pub const SYS_getsid: c_long = 124; -pub const SYS_capget: c_long = 125; -pub const SYS_capset: c_long = 126; -pub const SYS_rt_sigpending: c_long = 127; -pub const SYS_rt_sigtimedwait: c_long = 128; -pub const SYS_rt_sigqueueinfo: c_long = 129; -pub const SYS_rt_sigsuspend: c_long = 130; -pub const SYS_sigaltstack: c_long = 131; -pub const SYS_utime: c_long = 132; -pub const SYS_mknod: c_long = 133; -pub const SYS_uselib: c_long = 134; -pub const SYS_personality: c_long = 135; -pub const SYS_ustat: c_long = 136; -pub const SYS_statfs: c_long = 137; -pub const SYS_fstatfs: c_long = 138; -pub const SYS_sysfs: c_long = 139; -pub const SYS_getpriority: c_long = 140; -pub const SYS_setpriority: c_long = 141; -pub const SYS_sched_setparam: c_long = 142; -pub const SYS_sched_getparam: c_long = 143; -pub const SYS_sched_setscheduler: c_long = 144; -pub const SYS_sched_getscheduler: c_long = 145; -pub const SYS_sched_get_priority_max: c_long = 146; -pub const SYS_sched_get_priority_min: c_long = 147; -pub const SYS_sched_rr_get_interval: c_long = 148; -pub const SYS_mlock: c_long = 149; -pub const SYS_munlock: c_long = 150; -pub const SYS_mlockall: c_long = 151; -pub const SYS_munlockall: c_long = 152; -pub const SYS_vhangup: c_long = 153; -pub const SYS_modify_ldt: c_long = 154; -pub const SYS_pivot_root: c_long = 155; -// FIXME(android): SYS__sysctl is in the NDK sources but for some reason is -// not available in the tests -// pub const SYS__sysctl: c_long = 156; -pub const SYS_prctl: c_long = 157; -pub const SYS_arch_prctl: c_long = 158; -pub const SYS_adjtimex: c_long = 159; -pub const SYS_setrlimit: c_long = 160; -pub const SYS_chroot: c_long = 161; -pub const SYS_sync: c_long = 162; -pub const SYS_acct: c_long = 163; -pub const SYS_settimeofday: c_long = 164; -pub const SYS_mount: c_long = 165; -pub const SYS_umount2: c_long = 166; -pub const SYS_swapon: c_long = 167; -pub const SYS_swapoff: c_long = 168; -pub const SYS_reboot: c_long = 169; -pub const SYS_sethostname: c_long = 170; -pub const SYS_setdomainname: c_long = 171; -pub const SYS_iopl: c_long = 172; -pub const SYS_ioperm: c_long = 173; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 174; -pub const SYS_init_module: c_long = 175; -pub const SYS_delete_module: c_long = 176; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 177; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 178; -pub const SYS_quotactl: c_long = 179; -pub const SYS_nfsservctl: c_long = 180; -pub const SYS_getpmsg: c_long = 181; -pub const SYS_putpmsg: c_long = 182; -pub const SYS_afs_syscall: c_long = 183; -pub const SYS_tuxcall: c_long = 184; -pub const SYS_security: c_long = 185; -pub const SYS_gettid: c_long = 186; -pub const SYS_readahead: c_long = 187; -pub const SYS_setxattr: c_long = 188; -pub const SYS_lsetxattr: c_long = 189; -pub const SYS_fsetxattr: c_long = 190; -pub const SYS_getxattr: c_long = 191; -pub const SYS_lgetxattr: c_long = 192; -pub const SYS_fgetxattr: c_long = 193; -pub const SYS_listxattr: c_long = 194; -pub const SYS_llistxattr: c_long = 195; -pub const SYS_flistxattr: c_long = 196; -pub const SYS_removexattr: c_long = 197; -pub const SYS_lremovexattr: c_long = 198; -pub const SYS_fremovexattr: c_long = 199; -pub const SYS_tkill: c_long = 200; -pub const SYS_time: c_long = 201; -pub const SYS_futex: c_long = 202; -pub const SYS_sched_setaffinity: c_long = 203; -pub const SYS_sched_getaffinity: c_long = 204; -pub const SYS_set_thread_area: c_long = 205; -pub const SYS_io_setup: c_long = 206; -pub const SYS_io_destroy: c_long = 207; -pub const SYS_io_getevents: c_long = 208; -pub const SYS_io_submit: c_long = 209; -pub const SYS_io_cancel: c_long = 210; -pub const SYS_get_thread_area: c_long = 211; -pub const SYS_lookup_dcookie: c_long = 212; -pub const SYS_epoll_create: c_long = 213; -pub const SYS_epoll_ctl_old: c_long = 214; -pub const SYS_epoll_wait_old: c_long = 215; -pub const SYS_remap_file_pages: c_long = 216; -pub const SYS_getdents64: c_long = 217; -pub const SYS_set_tid_address: c_long = 218; -pub const SYS_restart_syscall: c_long = 219; -pub const SYS_semtimedop: c_long = 220; -pub const SYS_fadvise64: c_long = 221; -pub const SYS_timer_create: c_long = 222; -pub const SYS_timer_settime: c_long = 223; -pub const SYS_timer_gettime: c_long = 224; -pub const SYS_timer_getoverrun: c_long = 225; -pub const SYS_timer_delete: c_long = 226; -pub const SYS_clock_settime: c_long = 227; -pub const SYS_clock_gettime: c_long = 228; -pub const SYS_clock_getres: c_long = 229; -pub const SYS_clock_nanosleep: c_long = 230; -pub const SYS_exit_group: c_long = 231; -pub const SYS_epoll_wait: c_long = 232; -pub const SYS_epoll_ctl: c_long = 233; -pub const SYS_tgkill: c_long = 234; -pub const SYS_utimes: c_long = 235; -pub const SYS_vserver: c_long = 236; -pub const SYS_mbind: c_long = 237; -pub const SYS_set_mempolicy: c_long = 238; -pub const SYS_get_mempolicy: c_long = 239; -pub const SYS_mq_open: c_long = 240; -pub const SYS_mq_unlink: c_long = 241; -pub const SYS_mq_timedsend: c_long = 242; -pub const SYS_mq_timedreceive: c_long = 243; -pub const SYS_mq_notify: c_long = 244; -pub const SYS_mq_getsetattr: c_long = 245; -pub const SYS_kexec_load: c_long = 246; -pub const SYS_waitid: c_long = 247; -pub const SYS_add_key: c_long = 248; -pub const SYS_request_key: c_long = 249; -pub const SYS_keyctl: c_long = 250; -pub const SYS_ioprio_set: c_long = 251; -pub const SYS_ioprio_get: c_long = 252; -pub const SYS_inotify_init: c_long = 253; -pub const SYS_inotify_add_watch: c_long = 254; -pub const SYS_inotify_rm_watch: c_long = 255; -pub const SYS_migrate_pages: c_long = 256; -pub const SYS_openat: c_long = 257; -pub const SYS_mkdirat: c_long = 258; -pub const SYS_mknodat: c_long = 259; -pub const SYS_fchownat: c_long = 260; -pub const SYS_futimesat: c_long = 261; -pub const SYS_newfstatat: c_long = 262; -pub const SYS_unlinkat: c_long = 263; -pub const SYS_renameat: c_long = 264; -pub const SYS_linkat: c_long = 265; -pub const SYS_symlinkat: c_long = 266; -pub const SYS_readlinkat: c_long = 267; -pub const SYS_fchmodat: c_long = 268; -pub const SYS_faccessat: c_long = 269; -pub const SYS_pselect6: c_long = 270; -pub const SYS_ppoll: c_long = 271; -pub const SYS_unshare: c_long = 272; -pub const SYS_set_robust_list: c_long = 273; -pub const SYS_get_robust_list: c_long = 274; -pub const SYS_splice: c_long = 275; -pub const SYS_tee: c_long = 276; -pub const SYS_sync_file_range: c_long = 277; -pub const SYS_vmsplice: c_long = 278; -pub const SYS_move_pages: c_long = 279; -pub const SYS_utimensat: c_long = 280; -pub const SYS_epoll_pwait: c_long = 281; -pub const SYS_signalfd: c_long = 282; -pub const SYS_timerfd_create: c_long = 283; -pub const SYS_eventfd: c_long = 284; -pub const SYS_fallocate: c_long = 285; -pub const SYS_timerfd_settime: c_long = 286; -pub const SYS_timerfd_gettime: c_long = 287; -pub const SYS_accept4: c_long = 288; -pub const SYS_signalfd4: c_long = 289; -pub const SYS_eventfd2: c_long = 290; -pub const SYS_epoll_create1: c_long = 291; -pub const SYS_dup3: c_long = 292; -pub const SYS_pipe2: c_long = 293; -pub const SYS_inotify_init1: c_long = 294; -pub const SYS_preadv: c_long = 295; -pub const SYS_pwritev: c_long = 296; -pub const SYS_rt_tgsigqueueinfo: c_long = 297; -pub const SYS_perf_event_open: c_long = 298; -pub const SYS_recvmmsg: c_long = 299; -pub const SYS_fanotify_init: c_long = 300; -pub const SYS_fanotify_mark: c_long = 301; -pub const SYS_prlimit64: c_long = 302; -pub const SYS_name_to_handle_at: c_long = 303; -pub const SYS_open_by_handle_at: c_long = 304; -pub const SYS_clock_adjtime: c_long = 305; -pub const SYS_syncfs: c_long = 306; -pub const SYS_sendmmsg: c_long = 307; -pub const SYS_setns: c_long = 308; -pub const SYS_getcpu: c_long = 309; -pub const SYS_process_vm_readv: c_long = 310; -pub const SYS_process_vm_writev: c_long = 311; -pub const SYS_kcmp: c_long = 312; -pub const SYS_finit_module: c_long = 313; -pub const SYS_sched_setattr: c_long = 314; -pub const SYS_sched_getattr: c_long = 315; -pub const SYS_renameat2: c_long = 316; -pub const SYS_seccomp: c_long = 317; -pub const SYS_getrandom: c_long = 318; -pub const SYS_memfd_create: c_long = 319; -pub const SYS_kexec_file_load: c_long = 320; -pub const SYS_bpf: c_long = 321; -pub const SYS_execveat: c_long = 322; -pub const SYS_userfaultfd: c_long = 323; -pub const SYS_membarrier: c_long = 324; -pub const SYS_mlock2: c_long = 325; -pub const SYS_copy_file_range: c_long = 326; -pub const SYS_preadv2: c_long = 327; -pub const SYS_pwritev2: c_long = 328; -pub const SYS_pkey_mprotect: c_long = 329; -pub const SYS_pkey_alloc: c_long = 330; -pub const SYS_pkey_free: c_long = 331; -pub const SYS_statx: c_long = 332; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; - -// offsets in user_regs_structs, from sys/reg.h -pub const R15: c_int = 0; -pub const R14: c_int = 1; -pub const R13: c_int = 2; -pub const R12: c_int = 3; -pub const RBP: c_int = 4; -pub const RBX: c_int = 5; -pub const R11: c_int = 6; -pub const R10: c_int = 7; -pub const R9: c_int = 8; -pub const R8: c_int = 9; -pub const RAX: c_int = 10; -pub const RCX: c_int = 11; -pub const RDX: c_int = 12; -pub const RSI: c_int = 13; -pub const RDI: c_int = 14; -pub const ORIG_RAX: c_int = 15; -pub const RIP: c_int = 16; -pub const CS: c_int = 17; -pub const EFLAGS: c_int = 18; -pub const RSP: c_int = 19; -pub const SS: c_int = 20; -pub const FS_BASE: c_int = 21; -pub const GS_BASE: c_int = 22; -pub const DS: c_int = 23; -pub const ES: c_int = 24; -pub const FS: c_int = 25; -pub const GS: c_int = 26; - -// offsets in mcontext_t.gregs from sys/ucontext.h -pub const REG_R8: c_int = 0; -pub const REG_R9: c_int = 1; -pub const REG_R10: c_int = 2; -pub const REG_R11: c_int = 3; -pub const REG_R12: c_int = 4; -pub const REG_R13: c_int = 5; -pub const REG_R14: c_int = 6; -pub const REG_R15: c_int = 7; -pub const REG_RDI: c_int = 8; -pub const REG_RSI: c_int = 9; -pub const REG_RBP: c_int = 10; -pub const REG_RBX: c_int = 11; -pub const REG_RDX: c_int = 12; -pub const REG_RAX: c_int = 13; -pub const REG_RCX: c_int = 14; -pub const REG_RSP: c_int = 15; -pub const REG_RIP: c_int = 16; -pub const REG_EFL: c_int = 17; -pub const REG_CSGSFS: c_int = 18; -pub const REG_ERR: c_int = 19; -pub const REG_TRAPNO: c_int = 20; -pub const REG_OLDMASK: c_int = 21; -pub const REG_CR2: c_int = 22; - -// From NDK's asm/auxvec.h -pub const AT_SYSINFO_EHDR: c_ulong = 33; -pub const AT_VECTOR_SIZE_ARCH: c_ulong = 3; diff --git a/vendor/libc/src/unix/linux_like/android/mod.rs b/vendor/libc/src/unix/linux_like/android/mod.rs deleted file mode 100644 index fbd8ac2f87cfc9..00000000000000 --- a/vendor/libc/src/unix/linux_like/android/mod.rs +++ /dev/null @@ -1,4157 +0,0 @@ -//! Android-specific definitions for linux-like values - -use crate::prelude::*; -use crate::{cmsghdr, msghdr}; - -cfg_if! { - if #[cfg(doc)] { - pub(crate) type Ioctl = c_int; - } else { - #[doc(hidden)] - pub type Ioctl = c_int; - } -} - -pub type clock_t = c_long; -pub type time_t = c_long; -pub type suseconds_t = c_long; -pub type off_t = c_long; -pub type blkcnt_t = c_ulong; -pub type blksize_t = c_ulong; -pub type nlink_t = u32; -pub type useconds_t = u32; -pub type pthread_t = c_long; -pub type pthread_mutexattr_t = c_long; -pub type pthread_rwlockattr_t = c_long; -pub type pthread_barrierattr_t = c_int; -pub type pthread_condattr_t = c_long; -pub type pthread_key_t = c_int; -pub type fsfilcnt_t = c_ulong; -pub type fsblkcnt_t = c_ulong; -pub type nfds_t = c_uint; -pub type rlim_t = c_ulong; -pub type dev_t = c_ulong; -pub type ino_t = c_ulong; -pub type ino64_t = u64; -pub type __CPU_BITTYPE = c_ulong; -pub type idtype_t = c_int; -pub type loff_t = c_longlong; -pub type __kernel_loff_t = c_longlong; -pub type __kernel_pid_t = c_int; - -pub type __u8 = c_uchar; -pub type __u16 = c_ushort; -pub type __s16 = c_short; -pub type __u32 = c_uint; -pub type __s32 = c_int; - -// linux/elf.h - -pub type Elf32_Addr = u32; -pub type Elf32_Half = u16; -pub type Elf32_Off = u32; -pub type Elf32_Word = u32; - -pub type Elf64_Addr = u64; -pub type Elf64_Half = u16; -pub type Elf64_Off = u64; -pub type Elf64_Word = u32; -pub type Elf64_Xword = u64; - -pub type eventfd_t = u64; - -// these structs sit behind a heap allocation on Android -pub type posix_spawn_file_actions_t = *mut c_void; -pub type posix_spawnattr_t = *mut c_void; - -s! { - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct __fsid_t { - __val: [c_int; 2], - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; crate::NCCS], - } - - pub struct termios2 { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; 19], - pub c_ispeed: crate::speed_t, - pub c_ospeed: crate::speed_t, - } - - pub struct mallinfo { - pub arena: size_t, - pub ordblks: size_t, - pub smblks: size_t, - pub hblks: size_t, - pub hblkhd: size_t, - pub usmblks: size_t, - pub fsmblks: size_t, - pub uordblks: size_t, - pub fordblks: size_t, - pub keepcost: size_t, - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: crate::__kernel_loff_t, - pub l_len: crate::__kernel_loff_t, - pub l_pid: crate::__kernel_pid_t, - } - - pub struct cpu_set_t { - #[cfg(target_pointer_width = "64")] - __bits: [__CPU_BITTYPE; 16], - #[cfg(target_pointer_width = "32")] - __bits: [__CPU_BITTYPE; 1], - } - - pub struct sem_t { - count: c_uint, - #[cfg(target_pointer_width = "64")] - __reserved: [c_int; 3], - } - - pub struct exit_status { - pub e_termination: c_short, - pub e_exit: c_short, - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - #[cfg(target_pointer_width = "64")] - __f_reserved: [u32; 6], - } - - pub struct signalfd_siginfo { - pub ssi_signo: u32, - pub ssi_errno: i32, - pub ssi_code: i32, - pub ssi_pid: u32, - pub ssi_uid: u32, - pub ssi_fd: i32, - pub ssi_tid: u32, - pub ssi_band: u32, - pub ssi_overrun: u32, - pub ssi_trapno: u32, - pub ssi_status: i32, - pub ssi_int: i32, - pub ssi_ptr: c_ulonglong, - pub ssi_utime: c_ulonglong, - pub ssi_stime: c_ulonglong, - pub ssi_addr: c_ulonglong, - pub ssi_addr_lsb: u16, - _pad2: u16, - pub ssi_syscall: i32, - pub ssi_call_addr: u64, - pub ssi_arch: u32, - _pad: [u8; 28], - } - - pub struct itimerspec { - pub it_interval: crate::timespec, - pub it_value: crate::timespec, - } - - pub struct genlmsghdr { - pub cmd: u8, - pub version: u8, - pub reserved: u16, - } - - pub struct nlmsghdr { - pub nlmsg_len: u32, - pub nlmsg_type: u16, - pub nlmsg_flags: u16, - pub nlmsg_seq: u32, - pub nlmsg_pid: u32, - } - - pub struct nlmsgerr { - pub error: c_int, - pub msg: nlmsghdr, - } - - pub struct nl_pktinfo { - pub group: u32, - } - - pub struct nl_mmap_req { - pub nm_block_size: c_uint, - pub nm_block_nr: c_uint, - pub nm_frame_size: c_uint, - pub nm_frame_nr: c_uint, - } - - pub struct nl_mmap_hdr { - pub nm_status: c_uint, - pub nm_len: c_uint, - pub nm_group: u32, - pub nm_pid: u32, - pub nm_uid: u32, - pub nm_gid: u32, - } - - pub struct nlattr { - pub nla_len: u16, - pub nla_type: u16, - } - - pub struct in6_pktinfo { - pub ipi6_addr: crate::in6_addr, - pub ipi6_ifindex: c_int, - } - - pub struct inotify_event { - pub wd: c_int, - pub mask: u32, - pub cookie: u32, - pub len: u32, - } - - pub struct sock_extended_err { - pub ee_errno: u32, - pub ee_origin: u8, - pub ee_type: u8, - pub ee_code: u8, - pub ee_pad: u8, - pub ee_info: u32, - pub ee_data: u32, - } - - pub struct regex_t { - re_magic: c_int, - re_nsub: size_t, - re_endp: *const c_char, - re_guts: *mut c_void, - } - - pub struct regmatch_t { - pub rm_so: ssize_t, - pub rm_eo: ssize_t, - } - - pub struct sockaddr_vm { - pub svm_family: crate::sa_family_t, - pub svm_reserved1: c_ushort, - pub svm_port: c_uint, - pub svm_cid: c_uint, - pub svm_zero: [u8; 4], - } - - // linux/elf.h - - pub struct Elf32_Phdr { - pub p_type: Elf32_Word, - pub p_offset: Elf32_Off, - pub p_vaddr: Elf32_Addr, - pub p_paddr: Elf32_Addr, - pub p_filesz: Elf32_Word, - pub p_memsz: Elf32_Word, - pub p_flags: Elf32_Word, - pub p_align: Elf32_Word, - } - - pub struct Elf64_Phdr { - pub p_type: Elf64_Word, - pub p_flags: Elf64_Word, - pub p_offset: Elf64_Off, - pub p_vaddr: Elf64_Addr, - pub p_paddr: Elf64_Addr, - pub p_filesz: Elf64_Xword, - pub p_memsz: Elf64_Xword, - pub p_align: Elf64_Xword, - } - - // link.h - - pub struct dl_phdr_info { - #[cfg(target_pointer_width = "64")] - pub dlpi_addr: Elf64_Addr, - #[cfg(target_pointer_width = "32")] - pub dlpi_addr: Elf32_Addr, - - pub dlpi_name: *const c_char, - - #[cfg(target_pointer_width = "64")] - pub dlpi_phdr: *const Elf64_Phdr, - #[cfg(target_pointer_width = "32")] - pub dlpi_phdr: *const Elf32_Phdr, - - #[cfg(target_pointer_width = "64")] - pub dlpi_phnum: Elf64_Half, - #[cfg(target_pointer_width = "32")] - pub dlpi_phnum: Elf32_Half, - - // These fields were added in Android R - pub dlpi_adds: c_ulonglong, - pub dlpi_subs: c_ulonglong, - pub dlpi_tls_modid: size_t, - pub dlpi_tls_data: *mut c_void, - } - - // linux/seccomp.h - pub struct seccomp_data { - pub nr: c_int, - pub arch: crate::__u32, - pub instruction_pointer: crate::__u64, - pub args: [crate::__u64; 6], - } - - pub struct seccomp_metadata { - pub filter_off: crate::__u64, - pub flags: crate::__u64, - } - - pub struct ptrace_peeksiginfo_args { - pub off: crate::__u64, - pub flags: crate::__u32, - pub nr: crate::__s32, - } - - // linux/input.h - pub struct input_event { - pub time: crate::timeval, - pub type_: crate::__u16, - pub code: crate::__u16, - pub value: crate::__s32, - } - - pub struct input_id { - pub bustype: crate::__u16, - pub vendor: crate::__u16, - pub product: crate::__u16, - pub version: crate::__u16, - } - - pub struct input_absinfo { - pub value: crate::__s32, - pub minimum: crate::__s32, - pub maximum: crate::__s32, - pub fuzz: crate::__s32, - pub flat: crate::__s32, - pub resolution: crate::__s32, - } - - pub struct input_keymap_entry { - pub flags: crate::__u8, - pub len: crate::__u8, - pub index: crate::__u16, - pub keycode: crate::__u32, - pub scancode: [crate::__u8; 32], - } - - pub struct input_mask { - pub type_: crate::__u32, - pub codes_size: crate::__u32, - pub codes_ptr: crate::__u64, - } - - pub struct ff_replay { - pub length: crate::__u16, - pub delay: crate::__u16, - } - - pub struct ff_trigger { - pub button: crate::__u16, - pub interval: crate::__u16, - } - - pub struct ff_envelope { - pub attack_length: crate::__u16, - pub attack_level: crate::__u16, - pub fade_length: crate::__u16, - pub fade_level: crate::__u16, - } - - pub struct ff_constant_effect { - pub level: crate::__s16, - pub envelope: ff_envelope, - } - - pub struct ff_ramp_effect { - pub start_level: crate::__s16, - pub end_level: crate::__s16, - pub envelope: ff_envelope, - } - - pub struct ff_condition_effect { - pub right_saturation: crate::__u16, - pub left_saturation: crate::__u16, - - pub right_coeff: crate::__s16, - pub left_coeff: crate::__s16, - - pub deadband: crate::__u16, - pub center: crate::__s16, - } - - pub struct ff_periodic_effect { - pub waveform: crate::__u16, - pub period: crate::__u16, - pub magnitude: crate::__s16, - pub offset: crate::__s16, - pub phase: crate::__u16, - - pub envelope: ff_envelope, - - pub custom_len: crate::__u32, - pub custom_data: *mut crate::__s16, - } - - pub struct ff_rumble_effect { - pub strong_magnitude: crate::__u16, - pub weak_magnitude: crate::__u16, - } - - pub struct ff_effect { - pub type_: crate::__u16, - pub id: crate::__s16, - pub direction: crate::__u16, - pub trigger: ff_trigger, - pub replay: ff_replay, - // FIXME(1.0): this is actually a union - #[cfg(target_pointer_width = "64")] - pub u: [u64; 4], - #[cfg(target_pointer_width = "32")] - pub u: [u32; 7], - } - - // linux/uinput.h - pub struct uinput_ff_upload { - pub request_id: crate::__u32, - pub retval: crate::__s32, - pub effect: ff_effect, - pub old: ff_effect, - } - - pub struct uinput_ff_erase { - pub request_id: crate::__u32, - pub retval: crate::__s32, - pub effect_id: crate::__u32, - } - - pub struct uinput_abs_setup { - pub code: crate::__u16, - pub absinfo: input_absinfo, - } - - pub struct option { - pub name: *const c_char, - pub has_arg: c_int, - pub flag: *mut c_int, - pub val: c_int, - } - - pub struct __c_anonymous_ifru_map { - pub mem_start: c_ulong, - pub mem_end: c_ulong, - pub base_addr: c_ushort, - pub irq: c_uchar, - pub dma: c_uchar, - pub port: c_uchar, - } - - pub struct in6_ifreq { - pub ifr6_addr: crate::in6_addr, - pub ifr6_prefixlen: u32, - pub ifr6_ifindex: c_int, - } - - pub struct if_nameindex { - pub if_index: c_uint, - pub if_name: *mut c_char, - } -} - -s_no_extra_traits! { - pub struct sockaddr_nl { - pub nl_family: crate::sa_family_t, - nl_pad: c_ushort, - pub nl_pid: u32, - pub nl_groups: u32, - } - - pub struct dirent { - pub d_ino: u64, - pub d_off: i64, - pub d_reclen: c_ushort, - pub d_type: c_uchar, - pub d_name: [c_char; 256], - } - - pub struct dirent64 { - pub d_ino: u64, - pub d_off: i64, - pub d_reclen: c_ushort, - pub d_type: c_uchar, - pub d_name: [c_char; 256], - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - pub _pad: [c_int; 29], - _align: [usize; 0], - } - - pub struct lastlog { - ll_time: crate::time_t, - ll_line: [c_char; UT_LINESIZE], - ll_host: [c_char; UT_HOSTSIZE], - } - - pub struct utmp { - pub ut_type: c_short, - pub ut_pid: crate::pid_t, - pub ut_line: [c_char; UT_LINESIZE], - pub ut_id: [c_char; 4], - pub ut_user: [c_char; UT_NAMESIZE], - pub ut_host: [c_char; UT_HOSTSIZE], - pub ut_exit: exit_status, - pub ut_session: c_long, - pub ut_tv: crate::timeval, - pub ut_addr_v6: [i32; 4], - unused: [c_char; 20], - } - - pub struct sockaddr_alg { - pub salg_family: crate::sa_family_t, - pub salg_type: [c_uchar; 14], - pub salg_feat: u32, - pub salg_mask: u32, - pub salg_name: [c_uchar; 64], - } - - pub struct uinput_setup { - pub id: input_id, - pub name: [c_char; UINPUT_MAX_NAME_SIZE], - pub ff_effects_max: crate::__u32, - } - - pub struct uinput_user_dev { - pub name: [c_char; UINPUT_MAX_NAME_SIZE], - pub id: input_id, - pub ff_effects_max: crate::__u32, - pub absmax: [crate::__s32; ABS_CNT], - pub absmin: [crate::__s32; ABS_CNT], - pub absfuzz: [crate::__s32; ABS_CNT], - pub absflat: [crate::__s32; ABS_CNT], - } - - /// WARNING: The `PartialEq`, `Eq` and `Hash` implementations of this - /// type are unsound and will be removed in the future. - #[deprecated( - note = "this struct has unsafe trait implementations that will be \ - removed in the future", - since = "0.2.80" - )] - pub struct af_alg_iv { - pub ivlen: u32, - pub iv: [c_uchar; 0], - } - - pub struct prop_info { - __name: [c_char; 32], - __serial: c_uint, - __value: [c_char; 92], - } - - pub union __c_anonymous_ifr_ifru { - pub ifru_addr: crate::sockaddr, - pub ifru_dstaddr: crate::sockaddr, - pub ifru_broadaddr: crate::sockaddr, - pub ifru_netmask: crate::sockaddr, - pub ifru_hwaddr: crate::sockaddr, - pub ifru_flags: c_short, - pub ifru_ifindex: c_int, - pub ifru_metric: c_int, - pub ifru_mtu: c_int, - pub ifru_map: __c_anonymous_ifru_map, - pub ifru_slave: [c_char; crate::IFNAMSIZ], - pub ifru_newname: [c_char; crate::IFNAMSIZ], - pub ifru_data: *mut c_char, - } - - pub struct ifreq { - /// interface name, e.g. "en0" - pub ifr_name: [c_char; crate::IFNAMSIZ], - pub ifr_ifru: __c_anonymous_ifr_ifru, - } - - pub union __c_anonymous_ifc_ifcu { - pub ifcu_buf: *mut c_char, - pub ifcu_req: *mut crate::ifreq, - } - - /* Structure used in SIOCGIFCONF request. Used to retrieve interface - configuration for machine (useful for programs which must know all - networks accessible). */ - pub struct ifconf { - pub ifc_len: c_int, /* Size of buffer. */ - pub ifc_ifcu: __c_anonymous_ifc_ifcu, - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for sockaddr_nl { - fn eq(&self, other: &sockaddr_nl) -> bool { - self.nl_family == other.nl_family - && self.nl_pid == other.nl_pid - && self.nl_groups == other.nl_groups - } - } - impl Eq for sockaddr_nl {} - impl hash::Hash for sockaddr_nl { - fn hash(&self, state: &mut H) { - self.nl_family.hash(state); - self.nl_pid.hash(state); - self.nl_groups.hash(state); - } - } - - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_ino == other.d_ino - && self.d_off == other.d_off - && self.d_reclen == other.d_reclen - && self.d_type == other.d_type - && self - .d_name - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for dirent {} - - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_ino.hash(state); - self.d_off.hash(state); - self.d_reclen.hash(state); - self.d_type.hash(state); - self.d_name.hash(state); - } - } - - impl PartialEq for dirent64 { - fn eq(&self, other: &dirent64) -> bool { - self.d_ino == other.d_ino - && self.d_off == other.d_off - && self.d_reclen == other.d_reclen - && self.d_type == other.d_type - && self - .d_name - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for dirent64 {} - - impl hash::Hash for dirent64 { - fn hash(&self, state: &mut H) { - self.d_ino.hash(state); - self.d_off.hash(state); - self.d_reclen.hash(state); - self.d_type.hash(state); - self.d_name.hash(state); - } - } - - impl PartialEq for siginfo_t { - fn eq(&self, other: &siginfo_t) -> bool { - self.si_signo == other.si_signo - && self.si_errno == other.si_errno - && self.si_code == other.si_code - // Ignore _pad - // Ignore _align - } - } - - impl Eq for siginfo_t {} - - impl hash::Hash for siginfo_t { - fn hash(&self, state: &mut H) { - self.si_signo.hash(state); - self.si_errno.hash(state); - self.si_code.hash(state); - // Ignore _pad - // Ignore _align - } - } - - impl PartialEq for lastlog { - fn eq(&self, other: &lastlog) -> bool { - self.ll_time == other.ll_time - && self - .ll_line - .iter() - .zip(other.ll_line.iter()) - .all(|(a, b)| a == b) - && self - .ll_host - .iter() - .zip(other.ll_host.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for lastlog {} - - impl hash::Hash for lastlog { - fn hash(&self, state: &mut H) { - self.ll_time.hash(state); - self.ll_line.hash(state); - self.ll_host.hash(state); - } - } - - impl PartialEq for utmp { - fn eq(&self, other: &utmp) -> bool { - self.ut_type == other.ut_type - && self.ut_pid == other.ut_pid - && self - .ut_line - .iter() - .zip(other.ut_line.iter()) - .all(|(a, b)| a == b) - && self.ut_id == other.ut_id - && self - .ut_user - .iter() - .zip(other.ut_user.iter()) - .all(|(a, b)| a == b) - && self - .ut_host - .iter() - .zip(other.ut_host.iter()) - .all(|(a, b)| a == b) - && self.ut_exit == other.ut_exit - && self.ut_session == other.ut_session - && self.ut_tv == other.ut_tv - && self.ut_addr_v6 == other.ut_addr_v6 - && self.unused == other.unused - } - } - - impl Eq for utmp {} - - impl hash::Hash for utmp { - fn hash(&self, state: &mut H) { - self.ut_type.hash(state); - self.ut_pid.hash(state); - self.ut_line.hash(state); - self.ut_id.hash(state); - self.ut_user.hash(state); - self.ut_host.hash(state); - self.ut_exit.hash(state); - self.ut_session.hash(state); - self.ut_tv.hash(state); - self.ut_addr_v6.hash(state); - self.unused.hash(state); - } - } - - impl PartialEq for sockaddr_alg { - fn eq(&self, other: &sockaddr_alg) -> bool { - self.salg_family == other.salg_family - && self - .salg_type - .iter() - .zip(other.salg_type.iter()) - .all(|(a, b)| a == b) - && self.salg_feat == other.salg_feat - && self.salg_mask == other.salg_mask - && self - .salg_name - .iter() - .zip(other.salg_name.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for sockaddr_alg {} - - impl hash::Hash for sockaddr_alg { - fn hash(&self, state: &mut H) { - self.salg_family.hash(state); - self.salg_type.hash(state); - self.salg_feat.hash(state); - self.salg_mask.hash(state); - self.salg_name.hash(state); - } - } - - impl PartialEq for uinput_setup { - fn eq(&self, other: &uinput_setup) -> bool { - self.id == other.id - && self.name[..] == other.name[..] - && self.ff_effects_max == other.ff_effects_max - } - } - impl Eq for uinput_setup {} - - impl hash::Hash for uinput_setup { - fn hash(&self, state: &mut H) { - self.id.hash(state); - self.name.hash(state); - self.ff_effects_max.hash(state); - } - } - - impl PartialEq for uinput_user_dev { - fn eq(&self, other: &uinput_user_dev) -> bool { - self.name[..] == other.name[..] - && self.id == other.id - && self.ff_effects_max == other.ff_effects_max - && self.absmax[..] == other.absmax[..] - && self.absmin[..] == other.absmin[..] - && self.absfuzz[..] == other.absfuzz[..] - && self.absflat[..] == other.absflat[..] - } - } - impl Eq for uinput_user_dev {} - - impl hash::Hash for uinput_user_dev { - fn hash(&self, state: &mut H) { - self.name.hash(state); - self.id.hash(state); - self.ff_effects_max.hash(state); - self.absmax.hash(state); - self.absmin.hash(state); - self.absfuzz.hash(state); - self.absflat.hash(state); - } - } - - #[allow(deprecated)] - impl af_alg_iv { - fn as_slice(&self) -> &[u8] { - unsafe { ::core::slice::from_raw_parts(self.iv.as_ptr(), self.ivlen as usize) } - } - } - - #[allow(deprecated)] - impl PartialEq for af_alg_iv { - fn eq(&self, other: &af_alg_iv) -> bool { - *self.as_slice() == *other.as_slice() - } - } - - #[allow(deprecated)] - impl Eq for af_alg_iv {} - - #[allow(deprecated)] - impl hash::Hash for af_alg_iv { - fn hash(&self, state: &mut H) { - self.as_slice().hash(state); - } - } - - impl PartialEq for prop_info { - fn eq(&self, other: &prop_info) -> bool { - self.__name == other.__name - && self.__serial == other.__serial - && self.__value == other.__value - } - } - impl Eq for prop_info {} - } -} - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MS_NOUSER: c_ulong = 0xffffffff80000000; -pub const MS_RMT_MASK: c_ulong = 0x02800051; - -pub const O_TRUNC: c_int = 512; -pub const O_CLOEXEC: c_int = 0x80000; -pub const O_PATH: c_int = 0o10000000; -pub const O_NOATIME: c_int = 0o1000000; - -pub const EBFONT: c_int = 59; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENONET: c_int = 64; -pub const ENOPKG: c_int = 65; -pub const EREMOTE: c_int = 66; -pub const ENOLINK: c_int = 67; -pub const EADV: c_int = 68; -pub const ESRMNT: c_int = 69; -pub const ECOMM: c_int = 70; -pub const EPROTO: c_int = 71; -pub const EDOTDOT: c_int = 73; - -pub const EPOLL_CLOEXEC: c_int = 0x80000; - -// sys/eventfd.h -pub const EFD_SEMAPHORE: c_int = 0x1; -pub const EFD_CLOEXEC: c_int = O_CLOEXEC; -pub const EFD_NONBLOCK: c_int = O_NONBLOCK; - -// sys/timerfd.h -pub const TFD_CLOEXEC: c_int = O_CLOEXEC; -pub const TFD_NONBLOCK: c_int = O_NONBLOCK; -pub const TFD_TIMER_ABSTIME: c_int = 1; -pub const TFD_TIMER_CANCEL_ON_SET: c_int = 2; - -pub const USER_PROCESS: c_short = 7; - -pub const _POSIX_VDISABLE: crate::cc_t = 0; - -// linux/falloc.h -pub const FALLOC_FL_KEEP_SIZE: c_int = 0x01; -pub const FALLOC_FL_PUNCH_HOLE: c_int = 0x02; -pub const FALLOC_FL_NO_HIDE_STALE: c_int = 0x04; -pub const FALLOC_FL_COLLAPSE_RANGE: c_int = 0x08; -pub const FALLOC_FL_ZERO_RANGE: c_int = 0x10; -pub const FALLOC_FL_INSERT_RANGE: c_int = 0x20; -pub const FALLOC_FL_UNSHARE_RANGE: c_int = 0x40; - -pub const BUFSIZ: c_uint = 1024; -pub const FILENAME_MAX: c_uint = 4096; -pub const FOPEN_MAX: c_uint = 20; -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; -pub const L_tmpnam: c_uint = 4096; -pub const TMP_MAX: c_uint = 308915776; -pub const _PC_LINK_MAX: c_int = 1; -pub const _PC_MAX_CANON: c_int = 2; -pub const _PC_MAX_INPUT: c_int = 3; -pub const _PC_NAME_MAX: c_int = 4; -pub const _PC_PATH_MAX: c_int = 5; -pub const _PC_PIPE_BUF: c_int = 6; -pub const _PC_2_SYMLINKS: c_int = 7; -pub const _PC_ALLOC_SIZE_MIN: c_int = 8; -pub const _PC_REC_INCR_XFER_SIZE: c_int = 9; -pub const _PC_REC_MAX_XFER_SIZE: c_int = 10; -pub const _PC_REC_MIN_XFER_SIZE: c_int = 11; -pub const _PC_REC_XFER_ALIGN: c_int = 12; -pub const _PC_SYMLINK_MAX: c_int = 13; -pub const _PC_CHOWN_RESTRICTED: c_int = 14; -pub const _PC_NO_TRUNC: c_int = 15; -pub const _PC_VDISABLE: c_int = 16; -pub const _PC_ASYNC_IO: c_int = 17; -pub const _PC_PRIO_IO: c_int = 18; -pub const _PC_SYNC_IO: c_int = 19; - -pub const FIONBIO: c_int = 0x5421; - -pub const _SC_ARG_MAX: c_int = 0x0000; -pub const _SC_BC_BASE_MAX: c_int = 0x0001; -pub const _SC_BC_DIM_MAX: c_int = 0x0002; -pub const _SC_BC_SCALE_MAX: c_int = 0x0003; -pub const _SC_BC_STRING_MAX: c_int = 0x0004; -pub const _SC_CHILD_MAX: c_int = 0x0005; -pub const _SC_CLK_TCK: c_int = 0x0006; -pub const _SC_COLL_WEIGHTS_MAX: c_int = 0x0007; -pub const _SC_EXPR_NEST_MAX: c_int = 0x0008; -pub const _SC_LINE_MAX: c_int = 0x0009; -pub const _SC_NGROUPS_MAX: c_int = 0x000a; -pub const _SC_OPEN_MAX: c_int = 0x000b; -pub const _SC_PASS_MAX: c_int = 0x000c; -pub const _SC_2_C_BIND: c_int = 0x000d; -pub const _SC_2_C_DEV: c_int = 0x000e; -pub const _SC_2_C_VERSION: c_int = 0x000f; -pub const _SC_2_CHAR_TERM: c_int = 0x0010; -pub const _SC_2_FORT_DEV: c_int = 0x0011; -pub const _SC_2_FORT_RUN: c_int = 0x0012; -pub const _SC_2_LOCALEDEF: c_int = 0x0013; -pub const _SC_2_SW_DEV: c_int = 0x0014; -pub const _SC_2_UPE: c_int = 0x0015; -pub const _SC_2_VERSION: c_int = 0x0016; -pub const _SC_JOB_CONTROL: c_int = 0x0017; -pub const _SC_SAVED_IDS: c_int = 0x0018; -pub const _SC_VERSION: c_int = 0x0019; -pub const _SC_RE_DUP_MAX: c_int = 0x001a; -pub const _SC_STREAM_MAX: c_int = 0x001b; -pub const _SC_TZNAME_MAX: c_int = 0x001c; -pub const _SC_XOPEN_CRYPT: c_int = 0x001d; -pub const _SC_XOPEN_ENH_I18N: c_int = 0x001e; -pub const _SC_XOPEN_SHM: c_int = 0x001f; -pub const _SC_XOPEN_VERSION: c_int = 0x0020; -pub const _SC_XOPEN_XCU_VERSION: c_int = 0x0021; -pub const _SC_XOPEN_REALTIME: c_int = 0x0022; -pub const _SC_XOPEN_REALTIME_THREADS: c_int = 0x0023; -pub const _SC_XOPEN_LEGACY: c_int = 0x0024; -pub const _SC_ATEXIT_MAX: c_int = 0x0025; -pub const _SC_IOV_MAX: c_int = 0x0026; -pub const _SC_UIO_MAXIOV: c_int = _SC_IOV_MAX; -pub const _SC_PAGESIZE: c_int = 0x0027; -pub const _SC_PAGE_SIZE: c_int = 0x0028; -pub const _SC_XOPEN_UNIX: c_int = 0x0029; -pub const _SC_XBS5_ILP32_OFF32: c_int = 0x002a; -pub const _SC_XBS5_ILP32_OFFBIG: c_int = 0x002b; -pub const _SC_XBS5_LP64_OFF64: c_int = 0x002c; -pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 0x002d; -pub const _SC_AIO_LISTIO_MAX: c_int = 0x002e; -pub const _SC_AIO_MAX: c_int = 0x002f; -pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 0x0030; -pub const _SC_DELAYTIMER_MAX: c_int = 0x0031; -pub const _SC_MQ_OPEN_MAX: c_int = 0x0032; -pub const _SC_MQ_PRIO_MAX: c_int = 0x0033; -pub const _SC_RTSIG_MAX: c_int = 0x0034; -pub const _SC_SEM_NSEMS_MAX: c_int = 0x0035; -pub const _SC_SEM_VALUE_MAX: c_int = 0x0036; -pub const _SC_SIGQUEUE_MAX: c_int = 0x0037; -pub const _SC_TIMER_MAX: c_int = 0x0038; -pub const _SC_ASYNCHRONOUS_IO: c_int = 0x0039; -pub const _SC_FSYNC: c_int = 0x003a; -pub const _SC_MAPPED_FILES: c_int = 0x003b; -pub const _SC_MEMLOCK: c_int = 0x003c; -pub const _SC_MEMLOCK_RANGE: c_int = 0x003d; -pub const _SC_MEMORY_PROTECTION: c_int = 0x003e; -pub const _SC_MESSAGE_PASSING: c_int = 0x003f; -pub const _SC_PRIORITIZED_IO: c_int = 0x0040; -pub const _SC_PRIORITY_SCHEDULING: c_int = 0x0041; -pub const _SC_REALTIME_SIGNALS: c_int = 0x0042; -pub const _SC_SEMAPHORES: c_int = 0x0043; -pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 0x0044; -pub const _SC_SYNCHRONIZED_IO: c_int = 0x0045; -pub const _SC_TIMERS: c_int = 0x0046; -pub const _SC_GETGR_R_SIZE_MAX: c_int = 0x0047; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 0x0048; -pub const _SC_LOGIN_NAME_MAX: c_int = 0x0049; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 0x004a; -pub const _SC_THREAD_KEYS_MAX: c_int = 0x004b; -pub const _SC_THREAD_STACK_MIN: c_int = 0x004c; -pub const _SC_THREAD_THREADS_MAX: c_int = 0x004d; -pub const _SC_TTY_NAME_MAX: c_int = 0x004e; -pub const _SC_THREADS: c_int = 0x004f; -pub const _SC_THREAD_ATTR_STACKADDR: c_int = 0x0050; -pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 0x0051; -pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 0x0052; -pub const _SC_THREAD_PRIO_INHERIT: c_int = 0x0053; -pub const _SC_THREAD_PRIO_PROTECT: c_int = 0x0054; -pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 0x0055; -pub const _SC_NPROCESSORS_CONF: c_int = 0x0060; -pub const _SC_NPROCESSORS_ONLN: c_int = 0x0061; -pub const _SC_PHYS_PAGES: c_int = 0x0062; -pub const _SC_AVPHYS_PAGES: c_int = 0x0063; -pub const _SC_MONOTONIC_CLOCK: c_int = 0x0064; -pub const _SC_2_PBS: c_int = 0x0065; -pub const _SC_2_PBS_ACCOUNTING: c_int = 0x0066; -pub const _SC_2_PBS_CHECKPOINT: c_int = 0x0067; -pub const _SC_2_PBS_LOCATE: c_int = 0x0068; -pub const _SC_2_PBS_MESSAGE: c_int = 0x0069; -pub const _SC_2_PBS_TRACK: c_int = 0x006a; -pub const _SC_ADVISORY_INFO: c_int = 0x006b; -pub const _SC_BARRIERS: c_int = 0x006c; -pub const _SC_CLOCK_SELECTION: c_int = 0x006d; -pub const _SC_CPUTIME: c_int = 0x006e; -pub const _SC_HOST_NAME_MAX: c_int = 0x006f; -pub const _SC_IPV6: c_int = 0x0070; -pub const _SC_RAW_SOCKETS: c_int = 0x0071; -pub const _SC_READER_WRITER_LOCKS: c_int = 0x0072; -pub const _SC_REGEXP: c_int = 0x0073; -pub const _SC_SHELL: c_int = 0x0074; -pub const _SC_SPAWN: c_int = 0x0075; -pub const _SC_SPIN_LOCKS: c_int = 0x0076; -pub const _SC_SPORADIC_SERVER: c_int = 0x0077; -pub const _SC_SS_REPL_MAX: c_int = 0x0078; -pub const _SC_SYMLOOP_MAX: c_int = 0x0079; -pub const _SC_THREAD_CPUTIME: c_int = 0x007a; -pub const _SC_THREAD_PROCESS_SHARED: c_int = 0x007b; -pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 0x007c; -pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 0x007d; -pub const _SC_THREAD_SPORADIC_SERVER: c_int = 0x007e; -pub const _SC_TIMEOUTS: c_int = 0x007f; -pub const _SC_TRACE: c_int = 0x0080; -pub const _SC_TRACE_EVENT_FILTER: c_int = 0x0081; -pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 0x0082; -pub const _SC_TRACE_INHERIT: c_int = 0x0083; -pub const _SC_TRACE_LOG: c_int = 0x0084; -pub const _SC_TRACE_NAME_MAX: c_int = 0x0085; -pub const _SC_TRACE_SYS_MAX: c_int = 0x0086; -pub const _SC_TRACE_USER_EVENT_MAX: c_int = 0x0087; -pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 0x0088; -pub const _SC_V7_ILP32_OFF32: c_int = 0x0089; -pub const _SC_V7_ILP32_OFFBIG: c_int = 0x008a; -pub const _SC_V7_LP64_OFF64: c_int = 0x008b; -pub const _SC_V7_LPBIG_OFFBIG: c_int = 0x008c; -pub const _SC_XOPEN_STREAMS: c_int = 0x008d; -pub const _SC_XOPEN_UUCP: c_int = 0x008e; -pub const _SC_LEVEL1_ICACHE_SIZE: c_int = 0x008f; -pub const _SC_LEVEL1_ICACHE_ASSOC: c_int = 0x0090; -pub const _SC_LEVEL1_ICACHE_LINESIZE: c_int = 0x0091; -pub const _SC_LEVEL1_DCACHE_SIZE: c_int = 0x0092; -pub const _SC_LEVEL1_DCACHE_ASSOC: c_int = 0x0093; -pub const _SC_LEVEL1_DCACHE_LINESIZE: c_int = 0x0094; -pub const _SC_LEVEL2_CACHE_SIZE: c_int = 0x0095; -pub const _SC_LEVEL2_CACHE_ASSOC: c_int = 0x0096; -pub const _SC_LEVEL2_CACHE_LINESIZE: c_int = 0x0097; -pub const _SC_LEVEL3_CACHE_SIZE: c_int = 0x0098; -pub const _SC_LEVEL3_CACHE_ASSOC: c_int = 0x0099; -pub const _SC_LEVEL3_CACHE_LINESIZE: c_int = 0x009a; -pub const _SC_LEVEL4_CACHE_SIZE: c_int = 0x009b; -pub const _SC_LEVEL4_CACHE_ASSOC: c_int = 0x009c; -pub const _SC_LEVEL4_CACHE_LINESIZE: c_int = 0x009d; - -pub const F_LOCK: c_int = 1; -pub const F_TEST: c_int = 3; -pub const F_TLOCK: c_int = 2; -pub const F_ULOCK: c_int = 0; - -pub const F_SEAL_FUTURE_WRITE: c_int = 0x0010; -pub const F_SEAL_EXEC: c_int = 0x0020; - -pub const IFF_LOWER_UP: c_int = 0x10000; -pub const IFF_DORMANT: c_int = 0x20000; -pub const IFF_ECHO: c_int = 0x40000; - -pub const PTHREAD_BARRIER_SERIAL_THREAD: c_int = -1; -pub const PTHREAD_MUTEX_NORMAL: c_int = 0; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; -pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; - -pub const PTHREAD_EXPLICIT_SCHED: c_int = 0; -pub const PTHREAD_INHERIT_SCHED: c_int = 1; - -// stdio.h -pub const RENAME_NOREPLACE: c_int = 1; -pub const RENAME_EXCHANGE: c_int = 2; -pub const RENAME_WHITEOUT: c_int = 4; - -pub const FIOCLEX: c_int = 0x5451; -pub const FIONCLEX: c_int = 0x5450; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] -pub const SIGUNUSED: c_int = 31; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const RUSAGE_CHILDREN: c_int = -1; - -pub const LC_PAPER: c_int = 7; -pub const LC_NAME: c_int = 8; -pub const LC_ADDRESS: c_int = 9; -pub const LC_TELEPHONE: c_int = 10; -pub const LC_MEASUREMENT: c_int = 11; -pub const LC_IDENTIFICATION: c_int = 12; -pub const LC_PAPER_MASK: c_int = 1 << LC_PAPER; -pub const LC_NAME_MASK: c_int = 1 << LC_NAME; -pub const LC_ADDRESS_MASK: c_int = 1 << LC_ADDRESS; -pub const LC_TELEPHONE_MASK: c_int = 1 << LC_TELEPHONE; -pub const LC_MEASUREMENT_MASK: c_int = 1 << LC_MEASUREMENT; -pub const LC_IDENTIFICATION_MASK: c_int = 1 << LC_IDENTIFICATION; -pub const LC_ALL_MASK: c_int = crate::LC_CTYPE_MASK - | crate::LC_NUMERIC_MASK - | crate::LC_TIME_MASK - | crate::LC_COLLATE_MASK - | crate::LC_MONETARY_MASK - | crate::LC_MESSAGES_MASK - | LC_PAPER_MASK - | LC_NAME_MASK - | LC_ADDRESS_MASK - | LC_TELEPHONE_MASK - | LC_MEASUREMENT_MASK - | LC_IDENTIFICATION_MASK; - -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_ANONYMOUS: c_int = 0x0020; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; - -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; - -pub const EMULTIHOP: c_int = 72; -pub const EBADMSG: c_int = 74; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; -pub const SOCK_SEQPACKET: c_int = 5; -pub const SOCK_DCCP: c_int = 6; -#[deprecated(since = "0.2.70", note = "AF_PACKET must be used instead")] -pub const SOCK_PACKET: c_int = 10; - -pub const IPPROTO_MAX: c_int = 256; - -pub const SOL_SOCKET: c_int = 1; -pub const SOL_SCTP: c_int = 132; -pub const SOL_IPX: c_int = 256; -pub const SOL_AX25: c_int = 257; -pub const SOL_ATALK: c_int = 258; -pub const SOL_NETROM: c_int = 259; -pub const SOL_ROSE: c_int = 260; - -/* UDP socket options */ -// include/uapi/linux/udp.h -pub const UDP_CORK: c_int = 1; -pub const UDP_ENCAP: c_int = 100; -pub const UDP_NO_CHECK6_TX: c_int = 101; -pub const UDP_NO_CHECK6_RX: c_int = 102; -pub const UDP_SEGMENT: c_int = 103; -pub const UDP_GRO: c_int = 104; - -/* DCCP socket options */ -pub const DCCP_SOCKOPT_PACKET_SIZE: c_int = 1; -pub const DCCP_SOCKOPT_SERVICE: c_int = 2; -pub const DCCP_SOCKOPT_CHANGE_L: c_int = 3; -pub const DCCP_SOCKOPT_CHANGE_R: c_int = 4; -pub const DCCP_SOCKOPT_GET_CUR_MPS: c_int = 5; -pub const DCCP_SOCKOPT_SERVER_TIMEWAIT: c_int = 6; -pub const DCCP_SOCKOPT_SEND_CSCOV: c_int = 10; -pub const DCCP_SOCKOPT_RECV_CSCOV: c_int = 11; -pub const DCCP_SOCKOPT_AVAILABLE_CCIDS: c_int = 12; -pub const DCCP_SOCKOPT_CCID: c_int = 13; -pub const DCCP_SOCKOPT_TX_CCID: c_int = 14; -pub const DCCP_SOCKOPT_RX_CCID: c_int = 15; -pub const DCCP_SOCKOPT_QPOLICY_ID: c_int = 16; -pub const DCCP_SOCKOPT_QPOLICY_TXQLEN: c_int = 17; -pub const DCCP_SOCKOPT_CCID_RX_INFO: c_int = 128; -pub const DCCP_SOCKOPT_CCID_TX_INFO: c_int = 192; - -/// maximum number of services provided on the same listening port -pub const DCCP_SERVICE_LIST_MAX_LEN: c_int = 32; - -pub const SO_REUSEADDR: c_int = 2; -pub const SO_TYPE: c_int = 3; -pub const SO_ERROR: c_int = 4; -pub const SO_DONTROUTE: c_int = 5; -pub const SO_BROADCAST: c_int = 6; -pub const SO_SNDBUF: c_int = 7; -pub const SO_RCVBUF: c_int = 8; -pub const SO_KEEPALIVE: c_int = 9; -pub const SO_OOBINLINE: c_int = 10; -pub const SO_PRIORITY: c_int = 12; -pub const SO_LINGER: c_int = 13; -pub const SO_BSDCOMPAT: c_int = 14; -pub const SO_REUSEPORT: c_int = 15; -pub const SO_PASSCRED: c_int = 16; -pub const SO_PEERCRED: c_int = 17; -pub const SO_RCVLOWAT: c_int = 18; -pub const SO_SNDLOWAT: c_int = 19; -pub const SO_RCVTIMEO: c_int = 20; -pub const SO_SNDTIMEO: c_int = 21; -pub const SO_BINDTODEVICE: c_int = 25; -pub const SO_ATTACH_FILTER: c_int = 26; -pub const SO_DETACH_FILTER: c_int = 27; -pub const SO_GET_FILTER: c_int = SO_ATTACH_FILTER; -pub const SO_TIMESTAMP: c_int = 29; -pub const SO_ACCEPTCONN: c_int = 30; -pub const SO_PEERSEC: c_int = 31; -pub const SO_SNDBUFFORCE: c_int = 32; -pub const SO_RCVBUFFORCE: c_int = 33; -pub const SO_PASSSEC: c_int = 34; -pub const SO_TIMESTAMPNS: c_int = 35; -// pub const SO_TIMESTAMPNS_OLD: c_int = 35; -pub const SO_MARK: c_int = 36; -pub const SO_TIMESTAMPING: c_int = 37; -// pub const SO_TIMESTAMPING_OLD: c_int = 37; -pub const SO_PROTOCOL: c_int = 38; -pub const SO_DOMAIN: c_int = 39; -pub const SO_RXQ_OVFL: c_int = 40; -pub const SO_PEEK_OFF: c_int = 42; -pub const SO_BUSY_POLL: c_int = 46; -pub const SCM_TIMESTAMPING_OPT_STATS: c_int = 54; -pub const SCM_TIMESTAMPING_PKTINFO: c_int = 58; -pub const SO_BINDTOIFINDEX: c_int = 62; -pub const SO_TIMESTAMP_NEW: c_int = 63; -pub const SO_TIMESTAMPNS_NEW: c_int = 64; -pub const SO_TIMESTAMPING_NEW: c_int = 65; - -// Defined in unix/linux_like/mod.rs -// pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; -pub const SCM_TIMESTAMPNS: c_int = SO_TIMESTAMPNS; -pub const SCM_TIMESTAMPING: c_int = SO_TIMESTAMPING; - -pub const IPTOS_ECN_NOTECT: u8 = 0x00; - -pub const O_ACCMODE: c_int = 3; -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 0x101000; -pub const O_ASYNC: c_int = 0x2000; -pub const O_NDELAY: c_int = 0x800; -pub const O_DSYNC: c_int = 4096; -pub const O_RSYNC: c_int = O_SYNC; - -pub const NI_MAXHOST: size_t = 1025; -pub const NI_MAXSERV: size_t = 32; - -pub const NI_NOFQDN: c_int = 0x00000001; -pub const NI_NUMERICHOST: c_int = 0x00000002; -pub const NI_NAMEREQD: c_int = 0x00000004; -pub const NI_NUMERICSERV: c_int = 0x00000008; -pub const NI_DGRAM: c_int = 0x00000010; - -pub const NCCS: usize = 19; -pub const TCSBRKP: c_int = 0x5425; -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 0x1; -pub const TCSAFLUSH: c_int = 0x2; -pub const VEOF: usize = 4; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; -pub const EXTPROC: crate::tcflag_t = 0o200000; - -pub const MAP_HUGETLB: c_int = 0x040000; - -pub const PTRACE_TRACEME: c_int = 0; -pub const PTRACE_PEEKTEXT: c_int = 1; -pub const PTRACE_PEEKDATA: c_int = 2; -pub const PTRACE_PEEKUSER: c_int = 3; -pub const PTRACE_POKETEXT: c_int = 4; -pub const PTRACE_POKEDATA: c_int = 5; -pub const PTRACE_POKEUSER: c_int = 6; -pub const PTRACE_CONT: c_int = 7; -pub const PTRACE_KILL: c_int = 8; -pub const PTRACE_SINGLESTEP: c_int = 9; -pub const PTRACE_GETREGS: c_int = 12; -pub const PTRACE_SETREGS: c_int = 13; -pub const PTRACE_ATTACH: c_int = 16; -pub const PTRACE_DETACH: c_int = 17; -pub const PTRACE_SYSCALL: c_int = 24; -pub const PTRACE_SETOPTIONS: c_int = 0x4200; -pub const PTRACE_GETEVENTMSG: c_int = 0x4201; -pub const PTRACE_GETSIGINFO: c_int = 0x4202; -pub const PTRACE_SETSIGINFO: c_int = 0x4203; -pub const PTRACE_GETREGSET: c_int = 0x4204; -pub const PTRACE_SETREGSET: c_int = 0x4205; -pub const PTRACE_SECCOMP_GET_METADATA: c_int = 0x420d; - -pub const PTRACE_EVENT_STOP: c_int = 128; - -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETOWN: c_int = 8; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_RDLCK: c_int = 0; -pub const F_WRLCK: c_int = 1; -pub const F_UNLCK: c_int = 2; -pub const F_OFD_GETLK: c_int = 36; -pub const F_OFD_SETLK: c_int = 37; -pub const F_OFD_SETLKW: c_int = 38; - -pub const RLIMIT_CPU: c_int = 0; -pub const RLIMIT_FSIZE: c_int = 1; -pub const RLIMIT_DATA: c_int = 2; -pub const RLIMIT_STACK: c_int = 3; -pub const RLIMIT_CORE: c_int = 4; -pub const RLIMIT_RSS: c_int = 5; -pub const RLIMIT_NPROC: c_int = 6; -pub const RLIMIT_NOFILE: c_int = 7; -pub const RLIMIT_MEMLOCK: c_int = 8; -pub const RLIMIT_AS: c_int = 9; -pub const RLIMIT_LOCKS: c_int = 10; -pub const RLIMIT_SIGPENDING: c_int = 11; -pub const RLIMIT_MSGQUEUE: c_int = 12; -pub const RLIMIT_NICE: c_int = 13; -pub const RLIMIT_RTPRIO: c_int = 14; - -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIM_NLIMITS: c_int = 16; -pub const RLIM_INFINITY: crate::rlim_t = !0; - -pub const TCGETS: c_int = 0x5401; -pub const TCSETS: c_int = 0x5402; -pub const TCSETSW: c_int = 0x5403; -pub const TCSETSF: c_int = 0x5404; -pub const TCGETS2: c_int = 0x802c542a; -pub const TCSETS2: c_int = 0x402c542b; -pub const TCSETSW2: c_int = 0x402c542c; -pub const TCSETSF2: c_int = 0x402c542d; -pub const TCGETA: c_int = 0x5405; -pub const TCSETA: c_int = 0x5406; -pub const TCSETAW: c_int = 0x5407; -pub const TCSETAF: c_int = 0x5408; -pub const TCSBRK: c_int = 0x5409; -pub const TCXONC: c_int = 0x540A; -pub const TCFLSH: c_int = 0x540B; -pub const TIOCGSOFTCAR: c_int = 0x5419; -pub const TIOCSSOFTCAR: c_int = 0x541A; -pub const TIOCINQ: c_int = 0x541B; -pub const TIOCLINUX: c_int = 0x541C; -pub const TIOCGSERIAL: c_int = 0x541E; -pub const TIOCEXCL: c_int = 0x540C; -pub const TIOCNXCL: c_int = 0x540D; -pub const TIOCSCTTY: c_int = 0x540E; -pub const TIOCGPGRP: c_int = 0x540F; -pub const TIOCSPGRP: c_int = 0x5410; -pub const TIOCOUTQ: c_int = 0x5411; -pub const TIOCSTI: c_int = 0x5412; -pub const TIOCGWINSZ: c_int = 0x5413; -pub const TIOCSWINSZ: c_int = 0x5414; -pub const TIOCMGET: c_int = 0x5415; -pub const TIOCMBIS: c_int = 0x5416; -pub const TIOCMBIC: c_int = 0x5417; -pub const TIOCMSET: c_int = 0x5418; -pub const FIONREAD: c_int = 0x541B; -pub const TIOCCONS: c_int = 0x541D; -pub const TIOCSBRK: c_int = 0x5427; -pub const TIOCCBRK: c_int = 0x5428; - -pub const ST_RDONLY: c_ulong = 1; -pub const ST_NOSUID: c_ulong = 2; -pub const ST_NODEV: c_ulong = 4; -pub const ST_NOEXEC: c_ulong = 8; -pub const ST_SYNCHRONOUS: c_ulong = 16; -pub const ST_MANDLOCK: c_ulong = 64; -pub const ST_NOATIME: c_ulong = 1024; -pub const ST_NODIRATIME: c_ulong = 2048; -pub const ST_RELATIME: c_ulong = 4096; - -pub const RTLD_NOLOAD: c_int = 0x4; -pub const RTLD_NODELETE: c_int = 0x1000; - -pub const SEM_FAILED: *mut sem_t = ptr::null_mut(); - -pub const AI_PASSIVE: c_int = 0x00000001; -pub const AI_CANONNAME: c_int = 0x00000002; -pub const AI_NUMERICHOST: c_int = 0x00000004; -pub const AI_NUMERICSERV: c_int = 0x00000008; -pub const AI_MASK: c_int = - AI_PASSIVE | AI_CANONNAME | AI_NUMERICHOST | AI_NUMERICSERV | AI_ADDRCONFIG; -pub const AI_ALL: c_int = 0x00000100; -pub const AI_V4MAPPED_CFG: c_int = 0x00000200; -pub const AI_ADDRCONFIG: c_int = 0x00000400; -pub const AI_V4MAPPED: c_int = 0x00000800; -pub const AI_DEFAULT: c_int = AI_V4MAPPED_CFG | AI_ADDRCONFIG; - -// linux/kexec.h -pub const KEXEC_ON_CRASH: c_int = 0x00000001; -pub const KEXEC_PRESERVE_CONTEXT: c_int = 0x00000002; -pub const KEXEC_ARCH_MASK: c_int = 0xffff0000; -pub const KEXEC_FILE_UNLOAD: c_int = 0x00000001; -pub const KEXEC_FILE_ON_CRASH: c_int = 0x00000002; -pub const KEXEC_FILE_NO_INITRAMFS: c_int = 0x00000004; - -pub const LINUX_REBOOT_MAGIC1: c_int = 0xfee1dead; -pub const LINUX_REBOOT_MAGIC2: c_int = 672274793; -pub const LINUX_REBOOT_MAGIC2A: c_int = 85072278; -pub const LINUX_REBOOT_MAGIC2B: c_int = 369367448; -pub const LINUX_REBOOT_MAGIC2C: c_int = 537993216; - -pub const LINUX_REBOOT_CMD_RESTART: c_int = 0x01234567; -pub const LINUX_REBOOT_CMD_HALT: c_int = 0xCDEF0123; -pub const LINUX_REBOOT_CMD_CAD_ON: c_int = 0x89ABCDEF; -pub const LINUX_REBOOT_CMD_CAD_OFF: c_int = 0x00000000; -pub const LINUX_REBOOT_CMD_POWER_OFF: c_int = 0x4321FEDC; -pub const LINUX_REBOOT_CMD_RESTART2: c_int = 0xA1B2C3D4; -pub const LINUX_REBOOT_CMD_SW_SUSPEND: c_int = 0xD000FCE2; -pub const LINUX_REBOOT_CMD_KEXEC: c_int = 0x45584543; - -pub const REG_BASIC: c_int = 0; -pub const REG_EXTENDED: c_int = 1; -pub const REG_ICASE: c_int = 2; -pub const REG_NOSUB: c_int = 4; -pub const REG_NEWLINE: c_int = 8; -pub const REG_NOSPEC: c_int = 16; -pub const REG_PEND: c_int = 32; -pub const REG_DUMP: c_int = 128; - -pub const REG_NOMATCH: c_int = 1; -pub const REG_BADPAT: c_int = 2; -pub const REG_ECOLLATE: c_int = 3; -pub const REG_ECTYPE: c_int = 4; -pub const REG_EESCAPE: c_int = 5; -pub const REG_ESUBREG: c_int = 6; -pub const REG_EBRACK: c_int = 7; -pub const REG_EPAREN: c_int = 8; -pub const REG_EBRACE: c_int = 9; -pub const REG_BADBR: c_int = 10; -pub const REG_ERANGE: c_int = 11; -pub const REG_ESPACE: c_int = 12; -pub const REG_BADRPT: c_int = 13; -pub const REG_EMPTY: c_int = 14; -pub const REG_ASSERT: c_int = 15; -pub const REG_INVARG: c_int = 16; -pub const REG_ATOI: c_int = 255; -pub const REG_ITOA: c_int = 256; - -pub const REG_NOTBOL: c_int = 1; -pub const REG_NOTEOL: c_int = 2; -pub const REG_STARTEND: c_int = 4; -pub const REG_TRACE: c_int = 256; -pub const REG_LARGE: c_int = 512; -pub const REG_BACKR: c_int = 1024; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; - -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: crate::tcflag_t = 0x00000800; -pub const TAB2: crate::tcflag_t = 0x00001000; -pub const TAB3: crate::tcflag_t = 0x00001800; -pub const CR1: crate::tcflag_t = 0x00000200; -pub const CR2: crate::tcflag_t = 0x00000400; -pub const CR3: crate::tcflag_t = 0x00000600; -pub const FF1: crate::tcflag_t = 0x00008000; -pub const BS1: crate::tcflag_t = 0x00002000; -pub const VT1: crate::tcflag_t = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const BOTHER: crate::speed_t = 0o010000; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; -pub const IBSHIFT: crate::tcflag_t = 16; - -pub const BLKIOMIN: c_int = 0x1278; -pub const BLKIOOPT: c_int = 0x1279; -pub const BLKSSZGET: c_int = 0x1268; -pub const BLKPBSZGET: c_int = 0x127B; - -pub const EAI_AGAIN: c_int = 2; -pub const EAI_BADFLAGS: c_int = 3; -pub const EAI_FAIL: c_int = 4; -pub const EAI_FAMILY: c_int = 5; -pub const EAI_MEMORY: c_int = 6; -pub const EAI_NODATA: c_int = 7; -pub const EAI_NONAME: c_int = 8; -pub const EAI_SERVICE: c_int = 9; -pub const EAI_SOCKTYPE: c_int = 10; -pub const EAI_SYSTEM: c_int = 11; -pub const EAI_OVERFLOW: c_int = 14; - -pub const NETLINK_ROUTE: c_int = 0; -pub const NETLINK_UNUSED: c_int = 1; -pub const NETLINK_USERSOCK: c_int = 2; -pub const NETLINK_FIREWALL: c_int = 3; -pub const NETLINK_SOCK_DIAG: c_int = 4; -pub const NETLINK_NFLOG: c_int = 5; -pub const NETLINK_XFRM: c_int = 6; -pub const NETLINK_SELINUX: c_int = 7; -pub const NETLINK_ISCSI: c_int = 8; -pub const NETLINK_AUDIT: c_int = 9; -pub const NETLINK_FIB_LOOKUP: c_int = 10; -pub const NETLINK_CONNECTOR: c_int = 11; -pub const NETLINK_NETFILTER: c_int = 12; -pub const NETLINK_IP6_FW: c_int = 13; -pub const NETLINK_DNRTMSG: c_int = 14; -pub const NETLINK_KOBJECT_UEVENT: c_int = 15; -pub const NETLINK_GENERIC: c_int = 16; -pub const NETLINK_SCSITRANSPORT: c_int = 18; -pub const NETLINK_ECRYPTFS: c_int = 19; -pub const NETLINK_RDMA: c_int = 20; -pub const NETLINK_CRYPTO: c_int = 21; -pub const NETLINK_INET_DIAG: c_int = NETLINK_SOCK_DIAG; - -pub const MAX_LINKS: c_int = 32; - -pub const NLM_F_REQUEST: c_int = 1; -pub const NLM_F_MULTI: c_int = 2; -pub const NLM_F_ACK: c_int = 4; -pub const NLM_F_ECHO: c_int = 8; -pub const NLM_F_DUMP_INTR: c_int = 16; -pub const NLM_F_DUMP_FILTERED: c_int = 32; - -pub const NLM_F_ROOT: c_int = 0x100; -pub const NLM_F_MATCH: c_int = 0x200; -pub const NLM_F_ATOMIC: c_int = 0x400; -pub const NLM_F_DUMP: c_int = NLM_F_ROOT | NLM_F_MATCH; - -pub const NLM_F_REPLACE: c_int = 0x100; -pub const NLM_F_EXCL: c_int = 0x200; -pub const NLM_F_CREATE: c_int = 0x400; -pub const NLM_F_APPEND: c_int = 0x800; - -pub const NLM_F_NONREC: c_int = 0x100; -pub const NLM_F_BULK: c_int = 0x200; - -pub const NLM_F_CAPPED: c_int = 0x100; -pub const NLM_F_ACK_TLVS: c_int = 0x200; - -pub const NLMSG_NOOP: c_int = 0x1; -pub const NLMSG_ERROR: c_int = 0x2; -pub const NLMSG_DONE: c_int = 0x3; -pub const NLMSG_OVERRUN: c_int = 0x4; -pub const NLMSG_MIN_TYPE: c_int = 0x10; - -// linux/netfilter/nfnetlink.h -pub const NFNLGRP_NONE: c_int = 0; -pub const NFNLGRP_CONNTRACK_NEW: c_int = 1; -pub const NFNLGRP_CONNTRACK_UPDATE: c_int = 2; -pub const NFNLGRP_CONNTRACK_DESTROY: c_int = 3; -pub const NFNLGRP_CONNTRACK_EXP_NEW: c_int = 4; -pub const NFNLGRP_CONNTRACK_EXP_UPDATE: c_int = 5; -pub const NFNLGRP_CONNTRACK_EXP_DESTROY: c_int = 6; -pub const NFNLGRP_NFTABLES: c_int = 7; -pub const NFNLGRP_ACCT_QUOTA: c_int = 8; - -pub const NFNETLINK_V0: c_int = 0; - -pub const NFNL_SUBSYS_NONE: c_int = 0; -pub const NFNL_SUBSYS_CTNETLINK: c_int = 1; -pub const NFNL_SUBSYS_CTNETLINK_EXP: c_int = 2; -pub const NFNL_SUBSYS_QUEUE: c_int = 3; -pub const NFNL_SUBSYS_ULOG: c_int = 4; -pub const NFNL_SUBSYS_OSF: c_int = 5; -pub const NFNL_SUBSYS_IPSET: c_int = 6; -pub const NFNL_SUBSYS_ACCT: c_int = 7; -pub const NFNL_SUBSYS_CTNETLINK_TIMEOUT: c_int = 8; -pub const NFNL_SUBSYS_CTHELPER: c_int = 9; -pub const NFNL_SUBSYS_NFTABLES: c_int = 10; -pub const NFNL_SUBSYS_NFT_COMPAT: c_int = 11; -pub const NFNL_SUBSYS_COUNT: c_int = 12; - -pub const NFNL_MSG_BATCH_BEGIN: c_int = NLMSG_MIN_TYPE; -pub const NFNL_MSG_BATCH_END: c_int = NLMSG_MIN_TYPE + 1; - -// linux/netfilter/nfnetlink_log.h -pub const NFULNL_MSG_PACKET: c_int = 0; -pub const NFULNL_MSG_CONFIG: c_int = 1; - -pub const NFULA_UNSPEC: c_int = 0; -pub const NFULA_PACKET_HDR: c_int = 1; -pub const NFULA_MARK: c_int = 2; -pub const NFULA_TIMESTAMP: c_int = 3; -pub const NFULA_IFINDEX_INDEV: c_int = 4; -pub const NFULA_IFINDEX_OUTDEV: c_int = 5; -pub const NFULA_IFINDEX_PHYSINDEV: c_int = 6; -pub const NFULA_IFINDEX_PHYSOUTDEV: c_int = 7; -pub const NFULA_HWADDR: c_int = 8; -pub const NFULA_PAYLOAD: c_int = 9; -pub const NFULA_PREFIX: c_int = 10; -pub const NFULA_UID: c_int = 11; -pub const NFULA_SEQ: c_int = 12; -pub const NFULA_SEQ_GLOBAL: c_int = 13; -pub const NFULA_GID: c_int = 14; -pub const NFULA_HWTYPE: c_int = 15; -pub const NFULA_HWHEADER: c_int = 16; -pub const NFULA_HWLEN: c_int = 17; -pub const NFULA_CT: c_int = 18; -pub const NFULA_CT_INFO: c_int = 19; - -pub const NFULNL_CFG_CMD_NONE: c_int = 0; -pub const NFULNL_CFG_CMD_BIND: c_int = 1; -pub const NFULNL_CFG_CMD_UNBIND: c_int = 2; -pub const NFULNL_CFG_CMD_PF_BIND: c_int = 3; -pub const NFULNL_CFG_CMD_PF_UNBIND: c_int = 4; - -pub const NFULA_CFG_UNSPEC: c_int = 0; -pub const NFULA_CFG_CMD: c_int = 1; -pub const NFULA_CFG_MODE: c_int = 2; -pub const NFULA_CFG_NLBUFSIZ: c_int = 3; -pub const NFULA_CFG_TIMEOUT: c_int = 4; -pub const NFULA_CFG_QTHRESH: c_int = 5; -pub const NFULA_CFG_FLAGS: c_int = 6; - -pub const NFULNL_COPY_NONE: c_int = 0x00; -pub const NFULNL_COPY_META: c_int = 0x01; -pub const NFULNL_COPY_PACKET: c_int = 0x02; - -pub const NFULNL_CFG_F_SEQ: c_int = 0x0001; -pub const NFULNL_CFG_F_SEQ_GLOBAL: c_int = 0x0002; -pub const NFULNL_CFG_F_CONNTRACK: c_int = 0x0004; - -// linux/netfilter/nfnetlink_log.h -pub const NFQNL_MSG_PACKET: c_int = 0; -pub const NFQNL_MSG_VERDICT: c_int = 1; -pub const NFQNL_MSG_CONFIG: c_int = 2; -pub const NFQNL_MSG_VERDICT_BATCH: c_int = 3; - -pub const NFQA_UNSPEC: c_int = 0; -pub const NFQA_PACKET_HDR: c_int = 1; -pub const NFQA_VERDICT_HDR: c_int = 2; -pub const NFQA_MARK: c_int = 3; -pub const NFQA_TIMESTAMP: c_int = 4; -pub const NFQA_IFINDEX_INDEV: c_int = 5; -pub const NFQA_IFINDEX_OUTDEV: c_int = 6; -pub const NFQA_IFINDEX_PHYSINDEV: c_int = 7; -pub const NFQA_IFINDEX_PHYSOUTDEV: c_int = 8; -pub const NFQA_HWADDR: c_int = 9; -pub const NFQA_PAYLOAD: c_int = 10; -pub const NFQA_CT: c_int = 11; -pub const NFQA_CT_INFO: c_int = 12; -pub const NFQA_CAP_LEN: c_int = 13; -pub const NFQA_SKB_INFO: c_int = 14; -pub const NFQA_EXP: c_int = 15; -pub const NFQA_UID: c_int = 16; -pub const NFQA_GID: c_int = 17; -pub const NFQA_SECCTX: c_int = 18; -/* - FIXME: These are not yet available in musl sanitized kernel headers and - make the tests fail. Enable them once musl has them. - - See https://github.com/rust-lang/libc/pull/1628 for more details. -pub const NFQA_VLAN: c_int = 19; -pub const NFQA_L2HDR: c_int = 20; - -pub const NFQA_VLAN_UNSPEC: c_int = 0; -pub const NFQA_VLAN_PROTO: c_int = 1; -pub const NFQA_VLAN_TCI: c_int = 2; -*/ - -pub const NFQNL_CFG_CMD_NONE: c_int = 0; -pub const NFQNL_CFG_CMD_BIND: c_int = 1; -pub const NFQNL_CFG_CMD_UNBIND: c_int = 2; -pub const NFQNL_CFG_CMD_PF_BIND: c_int = 3; -pub const NFQNL_CFG_CMD_PF_UNBIND: c_int = 4; - -pub const NFQNL_COPY_NONE: c_int = 0; -pub const NFQNL_COPY_META: c_int = 1; -pub const NFQNL_COPY_PACKET: c_int = 2; - -pub const NFQA_CFG_UNSPEC: c_int = 0; -pub const NFQA_CFG_CMD: c_int = 1; -pub const NFQA_CFG_PARAMS: c_int = 2; -pub const NFQA_CFG_QUEUE_MAXLEN: c_int = 3; -pub const NFQA_CFG_MASK: c_int = 4; -pub const NFQA_CFG_FLAGS: c_int = 5; - -pub const NFQA_CFG_F_FAIL_OPEN: c_int = 0x0001; -pub const NFQA_CFG_F_CONNTRACK: c_int = 0x0002; -pub const NFQA_CFG_F_GSO: c_int = 0x0004; -pub const NFQA_CFG_F_UID_GID: c_int = 0x0008; -pub const NFQA_CFG_F_SECCTX: c_int = 0x0010; -pub const NFQA_CFG_F_MAX: c_int = 0x0020; - -pub const NFQA_SKB_CSUMNOTREADY: c_int = 0x0001; -pub const NFQA_SKB_GSO: c_int = 0x0002; -pub const NFQA_SKB_CSUM_NOTVERIFIED: c_int = 0x0004; - -pub const GENL_NAMSIZ: c_int = 16; - -pub const GENL_MIN_ID: c_int = NLMSG_MIN_TYPE; -pub const GENL_MAX_ID: c_int = 1023; - -pub const GENL_ADMIN_PERM: c_int = 0x01; -pub const GENL_CMD_CAP_DO: c_int = 0x02; -pub const GENL_CMD_CAP_DUMP: c_int = 0x04; -pub const GENL_CMD_CAP_HASPOL: c_int = 0x08; -pub const GENL_UNS_ADMIN_PERM: c_int = 0x10; - -pub const GENL_ID_CTRL: c_int = NLMSG_MIN_TYPE; -pub const GENL_ID_VFS_DQUOT: c_int = NLMSG_MIN_TYPE + 1; -pub const GENL_ID_PMCRAID: c_int = NLMSG_MIN_TYPE + 2; - -pub const CTRL_CMD_UNSPEC: c_int = 0; -pub const CTRL_CMD_NEWFAMILY: c_int = 1; -pub const CTRL_CMD_DELFAMILY: c_int = 2; -pub const CTRL_CMD_GETFAMILY: c_int = 3; -pub const CTRL_CMD_NEWOPS: c_int = 4; -pub const CTRL_CMD_DELOPS: c_int = 5; -pub const CTRL_CMD_GETOPS: c_int = 6; -pub const CTRL_CMD_NEWMCAST_GRP: c_int = 7; -pub const CTRL_CMD_DELMCAST_GRP: c_int = 8; -pub const CTRL_CMD_GETMCAST_GRP: c_int = 9; - -pub const CTRL_ATTR_UNSPEC: c_int = 0; -pub const CTRL_ATTR_FAMILY_ID: c_int = 1; -pub const CTRL_ATTR_FAMILY_NAME: c_int = 2; -pub const CTRL_ATTR_VERSION: c_int = 3; -pub const CTRL_ATTR_HDRSIZE: c_int = 4; -pub const CTRL_ATTR_MAXATTR: c_int = 5; -pub const CTRL_ATTR_OPS: c_int = 6; -pub const CTRL_ATTR_MCAST_GROUPS: c_int = 7; - -pub const CTRL_ATTR_OP_UNSPEC: c_int = 0; -pub const CTRL_ATTR_OP_ID: c_int = 1; -pub const CTRL_ATTR_OP_FLAGS: c_int = 2; - -pub const CTRL_ATTR_MCAST_GRP_UNSPEC: c_int = 0; -pub const CTRL_ATTR_MCAST_GRP_NAME: c_int = 1; -pub const CTRL_ATTR_MCAST_GRP_ID: c_int = 2; - -pub const NETLINK_ADD_MEMBERSHIP: c_int = 1; -pub const NETLINK_DROP_MEMBERSHIP: c_int = 2; -pub const NETLINK_PKTINFO: c_int = 3; -pub const NETLINK_BROADCAST_ERROR: c_int = 4; -pub const NETLINK_NO_ENOBUFS: c_int = 5; -pub const NETLINK_RX_RING: c_int = 6; -pub const NETLINK_TX_RING: c_int = 7; -pub const NETLINK_LISTEN_ALL_NSID: c_int = 8; -pub const NETLINK_LIST_MEMBERSHIPS: c_int = 9; -pub const NETLINK_CAP_ACK: c_int = 10; -pub const NETLINK_EXT_ACK: c_int = 11; -pub const NETLINK_GET_STRICT_CHK: c_int = 12; - -pub const GRND_NONBLOCK: c_uint = 0x0001; -pub const GRND_RANDOM: c_uint = 0x0002; -pub const GRND_INSECURE: c_uint = 0x0004; - -// -pub const SECCOMP_MODE_DISABLED: c_uint = 0; -pub const SECCOMP_MODE_STRICT: c_uint = 1; -pub const SECCOMP_MODE_FILTER: c_uint = 2; - -pub const SECCOMP_SET_MODE_STRICT: c_uint = 0; -pub const SECCOMP_SET_MODE_FILTER: c_uint = 1; -pub const SECCOMP_GET_ACTION_AVAIL: c_uint = 2; -pub const SECCOMP_GET_NOTIF_SIZES: c_uint = 3; - -pub const SECCOMP_FILTER_FLAG_TSYNC: c_ulong = 1 << 0; -pub const SECCOMP_FILTER_FLAG_LOG: c_ulong = 1 << 1; -pub const SECCOMP_FILTER_FLAG_SPEC_ALLOW: c_ulong = 1 << 2; -pub const SECCOMP_FILTER_FLAG_NEW_LISTENER: c_ulong = 1 << 3; -pub const SECCOMP_FILTER_FLAG_TSYNC_ESRCH: c_ulong = 1 << 4; -pub const SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV: c_ulong = 1 << 5; - -pub const SECCOMP_RET_KILL_PROCESS: c_uint = 0x80000000; -pub const SECCOMP_RET_KILL_THREAD: c_uint = 0x00000000; -pub const SECCOMP_RET_KILL: c_uint = SECCOMP_RET_KILL_THREAD; -pub const SECCOMP_RET_TRAP: c_uint = 0x00030000; -pub const SECCOMP_RET_ERRNO: c_uint = 0x00050000; -pub const SECCOMP_RET_USER_NOTIF: c_uint = 0x7fc00000; -pub const SECCOMP_RET_TRACE: c_uint = 0x7ff00000; -pub const SECCOMP_RET_LOG: c_uint = 0x7ffc0000; -pub const SECCOMP_RET_ALLOW: c_uint = 0x7fff0000; - -pub const SECCOMP_RET_ACTION_FULL: c_uint = 0xffff0000; -pub const SECCOMP_RET_ACTION: c_uint = 0x7fff0000; -pub const SECCOMP_RET_DATA: c_uint = 0x0000ffff; - -pub const SECCOMP_USER_NOTIF_FLAG_CONTINUE: c_ulong = 1; - -pub const SECCOMP_ADDFD_FLAG_SETFD: c_ulong = 1; -pub const SECCOMP_ADDFD_FLAG_SEND: c_ulong = 2; - -pub const NLA_F_NESTED: c_int = 1 << 15; -pub const NLA_F_NET_BYTEORDER: c_int = 1 << 14; -pub const NLA_TYPE_MASK: c_int = !(NLA_F_NESTED | NLA_F_NET_BYTEORDER); - -pub const NLA_ALIGNTO: c_int = 4; - -pub const SIGEV_THREAD_ID: c_int = 4; - -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; - -pub const TIOCM_LE: c_int = 0x001; -pub const TIOCM_DTR: c_int = 0x002; -pub const TIOCM_RTS: c_int = 0x004; -pub const TIOCM_ST: c_int = 0x008; -pub const TIOCM_SR: c_int = 0x010; -pub const TIOCM_CTS: c_int = 0x020; -pub const TIOCM_CAR: c_int = 0x040; -pub const TIOCM_RNG: c_int = 0x080; -pub const TIOCM_DSR: c_int = 0x100; -pub const TIOCM_CD: c_int = TIOCM_CAR; -pub const TIOCM_RI: c_int = TIOCM_RNG; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const SFD_CLOEXEC: c_int = O_CLOEXEC; -pub const SFD_NONBLOCK: c_int = O_NONBLOCK; - -pub const SOCK_NONBLOCK: c_int = O_NONBLOCK; - -pub const SO_ORIGINAL_DST: c_int = 80; - -pub const IP_RECVFRAGSIZE: c_int = 25; - -pub const IPV6_FLOWINFO: c_int = 11; -pub const IPV6_MULTICAST_ALL: c_int = 29; -pub const IPV6_ROUTER_ALERT_ISOLATE: c_int = 30; -pub const IPV6_FLOWLABEL_MGR: c_int = 32; -pub const IPV6_FLOWINFO_SEND: c_int = 33; -pub const IPV6_RECVFRAGSIZE: c_int = 77; -pub const IPV6_FREEBIND: c_int = 78; -pub const IPV6_FLOWINFO_FLOWLABEL: c_int = 0x000fffff; -pub const IPV6_FLOWINFO_PRIORITY: c_int = 0x0ff00000; - -pub const IUTF8: crate::tcflag_t = 0x00004000; -pub const CMSPAR: crate::tcflag_t = 0o10000000000; -pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; - -pub const MFD_CLOEXEC: c_uint = 0x0001; -pub const MFD_ALLOW_SEALING: c_uint = 0x0002; -pub const MFD_HUGETLB: c_uint = 0x0004; -pub const MFD_NOEXEC_SEAL: c_uint = 0x0008; -pub const MFD_EXEC: c_uint = 0x0010; -pub const MFD_HUGE_64KB: c_uint = 0x40000000; -pub const MFD_HUGE_512KB: c_uint = 0x4c000000; -pub const MFD_HUGE_1MB: c_uint = 0x50000000; -pub const MFD_HUGE_2MB: c_uint = 0x54000000; -pub const MFD_HUGE_8MB: c_uint = 0x5c000000; -pub const MFD_HUGE_16MB: c_uint = 0x60000000; -pub const MFD_HUGE_32MB: c_uint = 0x64000000; -pub const MFD_HUGE_256MB: c_uint = 0x70000000; -pub const MFD_HUGE_512MB: c_uint = 0x74000000; -pub const MFD_HUGE_1GB: c_uint = 0x78000000; -pub const MFD_HUGE_2GB: c_uint = 0x7c000000; -pub const MFD_HUGE_16GB: c_uint = 0x88000000; -pub const MFD_HUGE_MASK: c_uint = 63; -pub const MFD_HUGE_SHIFT: c_uint = 26; - -// these are used in the p_type field of Elf32_Phdr and Elf64_Phdr, which has -// the type Elf32Word and Elf64Word respectively. Luckily, both of those are u32 -// so we can use that type here to avoid having to cast. -pub const PT_NULL: u32 = 0; -pub const PT_LOAD: u32 = 1; -pub const PT_DYNAMIC: u32 = 2; -pub const PT_INTERP: u32 = 3; -pub const PT_NOTE: u32 = 4; -pub const PT_SHLIB: u32 = 5; -pub const PT_PHDR: u32 = 6; -pub const PT_TLS: u32 = 7; -pub const PT_LOOS: u32 = 0x60000000; -pub const PT_GNU_EH_FRAME: u32 = 0x6474e550; -pub const PT_GNU_STACK: u32 = 0x6474e551; -pub const PT_GNU_RELRO: u32 = 0x6474e552; -pub const PT_HIOS: u32 = 0x6fffffff; -pub const PT_LOPROC: u32 = 0x70000000; -pub const PT_HIPROC: u32 = 0x7fffffff; - -// uapi/linux/mount.h -pub const OPEN_TREE_CLONE: c_uint = 0x01; -pub const OPEN_TREE_CLOEXEC: c_uint = O_CLOEXEC as c_uint; - -// linux/netfilter.h -pub const NF_DROP: c_int = 0; -pub const NF_ACCEPT: c_int = 1; -pub const NF_STOLEN: c_int = 2; -pub const NF_QUEUE: c_int = 3; -pub const NF_REPEAT: c_int = 4; -pub const NF_STOP: c_int = 5; -pub const NF_MAX_VERDICT: c_int = NF_STOP; - -pub const NF_VERDICT_MASK: c_int = 0x000000ff; -pub const NF_VERDICT_FLAG_QUEUE_BYPASS: c_int = 0x00008000; - -pub const NF_VERDICT_QMASK: c_int = 0xffff0000; -pub const NF_VERDICT_QBITS: c_int = 16; - -pub const NF_VERDICT_BITS: c_int = 16; - -pub const NF_INET_PRE_ROUTING: c_int = 0; -pub const NF_INET_LOCAL_IN: c_int = 1; -pub const NF_INET_FORWARD: c_int = 2; -pub const NF_INET_LOCAL_OUT: c_int = 3; -pub const NF_INET_POST_ROUTING: c_int = 4; -pub const NF_INET_NUMHOOKS: c_int = 5; -pub const NF_INET_INGRESS: c_int = NF_INET_NUMHOOKS; - -pub const NF_NETDEV_INGRESS: c_int = 0; -pub const NF_NETDEV_EGRESS: c_int = 1; -pub const NF_NETDEV_NUMHOOKS: c_int = 2; - -pub const NFPROTO_UNSPEC: c_int = 0; -pub const NFPROTO_INET: c_int = 1; -pub const NFPROTO_IPV4: c_int = 2; -pub const NFPROTO_ARP: c_int = 3; -pub const NFPROTO_NETDEV: c_int = 5; -pub const NFPROTO_BRIDGE: c_int = 7; -pub const NFPROTO_IPV6: c_int = 10; -pub const NFPROTO_DECNET: c_int = 12; -pub const NFPROTO_NUMPROTO: c_int = 13; - -// linux/netfilter_arp.h -pub const NF_ARP: c_int = 0; -pub const NF_ARP_IN: c_int = 0; -pub const NF_ARP_OUT: c_int = 1; -pub const NF_ARP_FORWARD: c_int = 2; -pub const NF_ARP_NUMHOOKS: c_int = 3; - -// linux/netfilter_bridge.h -pub const NF_BR_PRE_ROUTING: c_int = 0; -pub const NF_BR_LOCAL_IN: c_int = 1; -pub const NF_BR_FORWARD: c_int = 2; -pub const NF_BR_LOCAL_OUT: c_int = 3; -pub const NF_BR_POST_ROUTING: c_int = 4; -pub const NF_BR_BROUTING: c_int = 5; -pub const NF_BR_NUMHOOKS: c_int = 6; - -pub const NF_BR_PRI_FIRST: c_int = crate::INT_MIN; -pub const NF_BR_PRI_NAT_DST_BRIDGED: c_int = -300; -pub const NF_BR_PRI_FILTER_BRIDGED: c_int = -200; -pub const NF_BR_PRI_BRNF: c_int = 0; -pub const NF_BR_PRI_NAT_DST_OTHER: c_int = 100; -pub const NF_BR_PRI_FILTER_OTHER: c_int = 200; -pub const NF_BR_PRI_NAT_SRC: c_int = 300; -pub const NF_BR_PRI_LAST: c_int = crate::INT_MAX; - -// linux/netfilter_ipv4.h -pub const NF_IP_PRE_ROUTING: c_int = 0; -pub const NF_IP_LOCAL_IN: c_int = 1; -pub const NF_IP_FORWARD: c_int = 2; -pub const NF_IP_LOCAL_OUT: c_int = 3; -pub const NF_IP_POST_ROUTING: c_int = 4; -pub const NF_IP_NUMHOOKS: c_int = 5; - -pub const NF_IP_PRI_FIRST: c_int = crate::INT_MIN; -pub const NF_IP_PRI_RAW_BEFORE_DEFRAG: c_int = -450; -pub const NF_IP_PRI_CONNTRACK_DEFRAG: c_int = -400; -pub const NF_IP_PRI_RAW: c_int = -300; -pub const NF_IP_PRI_SELINUX_FIRST: c_int = -225; -pub const NF_IP_PRI_CONNTRACK: c_int = -200; -pub const NF_IP_PRI_MANGLE: c_int = -150; -pub const NF_IP_PRI_NAT_DST: c_int = -100; -pub const NF_IP_PRI_FILTER: c_int = 0; -pub const NF_IP_PRI_SECURITY: c_int = 50; -pub const NF_IP_PRI_NAT_SRC: c_int = 100; -pub const NF_IP_PRI_SELINUX_LAST: c_int = 225; -pub const NF_IP_PRI_CONNTRACK_HELPER: c_int = 300; -pub const NF_IP_PRI_CONNTRACK_CONFIRM: c_int = crate::INT_MAX; -pub const NF_IP_PRI_LAST: c_int = crate::INT_MAX; - -// linux/netfilter_ipv6.h -pub const NF_IP6_PRE_ROUTING: c_int = 0; -pub const NF_IP6_LOCAL_IN: c_int = 1; -pub const NF_IP6_FORWARD: c_int = 2; -pub const NF_IP6_LOCAL_OUT: c_int = 3; -pub const NF_IP6_POST_ROUTING: c_int = 4; -pub const NF_IP6_NUMHOOKS: c_int = 5; - -pub const NF_IP6_PRI_FIRST: c_int = crate::INT_MIN; -pub const NF_IP6_PRI_RAW_BEFORE_DEFRAG: c_int = -450; -pub const NF_IP6_PRI_CONNTRACK_DEFRAG: c_int = -400; -pub const NF_IP6_PRI_RAW: c_int = -300; -pub const NF_IP6_PRI_SELINUX_FIRST: c_int = -225; -pub const NF_IP6_PRI_CONNTRACK: c_int = -200; -pub const NF_IP6_PRI_MANGLE: c_int = -150; -pub const NF_IP6_PRI_NAT_DST: c_int = -100; -pub const NF_IP6_PRI_FILTER: c_int = 0; -pub const NF_IP6_PRI_SECURITY: c_int = 50; -pub const NF_IP6_PRI_NAT_SRC: c_int = 100; -pub const NF_IP6_PRI_SELINUX_LAST: c_int = 225; -pub const NF_IP6_PRI_CONNTRACK_HELPER: c_int = 300; -pub const NF_IP6_PRI_LAST: c_int = crate::INT_MAX; - -// linux/netfilter_ipv6/ip6_tables.h -pub const IP6T_SO_ORIGINAL_DST: c_int = 80; - -// linux/netfilter/nf_tables.h -pub const NFT_TABLE_MAXNAMELEN: c_int = 256; -pub const NFT_CHAIN_MAXNAMELEN: c_int = 256; -pub const NFT_SET_MAXNAMELEN: c_int = 256; -pub const NFT_OBJ_MAXNAMELEN: c_int = 256; -pub const NFT_USERDATA_MAXLEN: c_int = 256; - -pub const NFT_REG_VERDICT: c_int = 0; -pub const NFT_REG_1: c_int = 1; -pub const NFT_REG_2: c_int = 2; -pub const NFT_REG_3: c_int = 3; -pub const NFT_REG_4: c_int = 4; -pub const __NFT_REG_MAX: c_int = 5; -pub const NFT_REG32_00: c_int = 8; -pub const NFT_REG32_01: c_int = 9; -pub const NFT_REG32_02: c_int = 10; -pub const NFT_REG32_03: c_int = 11; -pub const NFT_REG32_04: c_int = 12; -pub const NFT_REG32_05: c_int = 13; -pub const NFT_REG32_06: c_int = 14; -pub const NFT_REG32_07: c_int = 15; -pub const NFT_REG32_08: c_int = 16; -pub const NFT_REG32_09: c_int = 17; -pub const NFT_REG32_10: c_int = 18; -pub const NFT_REG32_11: c_int = 19; -pub const NFT_REG32_12: c_int = 20; -pub const NFT_REG32_13: c_int = 21; -pub const NFT_REG32_14: c_int = 22; -pub const NFT_REG32_15: c_int = 23; - -pub const NFT_REG_SIZE: c_int = 16; -pub const NFT_REG32_SIZE: c_int = 4; - -pub const NFT_CONTINUE: c_int = -1; -pub const NFT_BREAK: c_int = -2; -pub const NFT_JUMP: c_int = -3; -pub const NFT_GOTO: c_int = -4; -pub const NFT_RETURN: c_int = -5; - -pub const NFT_MSG_NEWTABLE: c_int = 0; -pub const NFT_MSG_GETTABLE: c_int = 1; -pub const NFT_MSG_DELTABLE: c_int = 2; -pub const NFT_MSG_NEWCHAIN: c_int = 3; -pub const NFT_MSG_GETCHAIN: c_int = 4; -pub const NFT_MSG_DELCHAIN: c_int = 5; -pub const NFT_MSG_NEWRULE: c_int = 6; -pub const NFT_MSG_GETRULE: c_int = 7; -pub const NFT_MSG_DELRULE: c_int = 8; -pub const NFT_MSG_NEWSET: c_int = 9; -pub const NFT_MSG_GETSET: c_int = 10; -pub const NFT_MSG_DELSET: c_int = 11; -pub const NFT_MSG_NEWSETELEM: c_int = 12; -pub const NFT_MSG_GETSETELEM: c_int = 13; -pub const NFT_MSG_DELSETELEM: c_int = 14; -pub const NFT_MSG_NEWGEN: c_int = 15; -pub const NFT_MSG_GETGEN: c_int = 16; -pub const NFT_MSG_TRACE: c_int = 17; -pub const NFT_MSG_NEWOBJ: c_int = 18; -pub const NFT_MSG_GETOBJ: c_int = 19; -pub const NFT_MSG_DELOBJ: c_int = 20; -pub const NFT_MSG_GETOBJ_RESET: c_int = 21; -pub const NFT_MSG_MAX: c_int = 25; - -pub const NFT_SET_ANONYMOUS: c_int = 0x1; -pub const NFT_SET_CONSTANT: c_int = 0x2; -pub const NFT_SET_INTERVAL: c_int = 0x4; -pub const NFT_SET_MAP: c_int = 0x8; -pub const NFT_SET_TIMEOUT: c_int = 0x10; -pub const NFT_SET_EVAL: c_int = 0x20; - -pub const NFT_SET_POL_PERFORMANCE: c_int = 0; -pub const NFT_SET_POL_MEMORY: c_int = 1; - -pub const NFT_SET_ELEM_INTERVAL_END: c_int = 0x1; - -pub const NFT_DATA_VALUE: c_uint = 0; -pub const NFT_DATA_VERDICT: c_uint = 0xffffff00; - -pub const NFT_DATA_RESERVED_MASK: c_uint = 0xffffff00; - -pub const NFT_DATA_VALUE_MAXLEN: c_int = 64; - -pub const NFT_BYTEORDER_NTOH: c_int = 0; -pub const NFT_BYTEORDER_HTON: c_int = 1; - -pub const NFT_CMP_EQ: c_int = 0; -pub const NFT_CMP_NEQ: c_int = 1; -pub const NFT_CMP_LT: c_int = 2; -pub const NFT_CMP_LTE: c_int = 3; -pub const NFT_CMP_GT: c_int = 4; -pub const NFT_CMP_GTE: c_int = 5; - -pub const NFT_RANGE_EQ: c_int = 0; -pub const NFT_RANGE_NEQ: c_int = 1; - -pub const NFT_LOOKUP_F_INV: c_int = 1 << 0; - -pub const NFT_DYNSET_OP_ADD: c_int = 0; -pub const NFT_DYNSET_OP_UPDATE: c_int = 1; - -pub const NFT_DYNSET_F_INV: c_int = 1 << 0; - -pub const NFT_PAYLOAD_LL_HEADER: c_int = 0; -pub const NFT_PAYLOAD_NETWORK_HEADER: c_int = 1; -pub const NFT_PAYLOAD_TRANSPORT_HEADER: c_int = 2; - -pub const NFT_PAYLOAD_CSUM_NONE: c_int = 0; -pub const NFT_PAYLOAD_CSUM_INET: c_int = 1; - -pub const NFT_META_LEN: c_int = 0; -pub const NFT_META_PROTOCOL: c_int = 1; -pub const NFT_META_PRIORITY: c_int = 2; -pub const NFT_META_MARK: c_int = 3; -pub const NFT_META_IIF: c_int = 4; -pub const NFT_META_OIF: c_int = 5; -pub const NFT_META_IIFNAME: c_int = 6; -pub const NFT_META_OIFNAME: c_int = 7; -pub const NFT_META_IIFTYPE: c_int = 8; -pub const NFT_META_OIFTYPE: c_int = 9; -pub const NFT_META_SKUID: c_int = 10; -pub const NFT_META_SKGID: c_int = 11; -pub const NFT_META_NFTRACE: c_int = 12; -pub const NFT_META_RTCLASSID: c_int = 13; -pub const NFT_META_SECMARK: c_int = 14; -pub const NFT_META_NFPROTO: c_int = 15; -pub const NFT_META_L4PROTO: c_int = 16; -pub const NFT_META_BRI_IIFNAME: c_int = 17; -pub const NFT_META_BRI_OIFNAME: c_int = 18; -pub const NFT_META_PKTTYPE: c_int = 19; -pub const NFT_META_CPU: c_int = 20; -pub const NFT_META_IIFGROUP: c_int = 21; -pub const NFT_META_OIFGROUP: c_int = 22; -pub const NFT_META_CGROUP: c_int = 23; -pub const NFT_META_PRANDOM: c_int = 24; - -pub const NFT_CT_STATE: c_int = 0; -pub const NFT_CT_DIRECTION: c_int = 1; -pub const NFT_CT_STATUS: c_int = 2; -pub const NFT_CT_MARK: c_int = 3; -pub const NFT_CT_SECMARK: c_int = 4; -pub const NFT_CT_EXPIRATION: c_int = 5; -pub const NFT_CT_HELPER: c_int = 6; -pub const NFT_CT_L3PROTOCOL: c_int = 7; -pub const NFT_CT_SRC: c_int = 8; -pub const NFT_CT_DST: c_int = 9; -pub const NFT_CT_PROTOCOL: c_int = 10; -pub const NFT_CT_PROTO_SRC: c_int = 11; -pub const NFT_CT_PROTO_DST: c_int = 12; -pub const NFT_CT_LABELS: c_int = 13; -pub const NFT_CT_PKTS: c_int = 14; -pub const NFT_CT_BYTES: c_int = 15; -pub const NFT_CT_AVGPKT: c_int = 16; -pub const NFT_CT_ZONE: c_int = 17; -pub const NFT_CT_EVENTMASK: c_int = 18; -pub const NFT_CT_SRC_IP: c_int = 19; -pub const NFT_CT_DST_IP: c_int = 20; -pub const NFT_CT_SRC_IP6: c_int = 21; -pub const NFT_CT_DST_IP6: c_int = 22; -pub const NFT_CT_ID: c_int = 23; - -pub const NFT_LIMIT_PKTS: c_int = 0; -pub const NFT_LIMIT_PKT_BYTES: c_int = 1; - -pub const NFT_LIMIT_F_INV: c_int = 1 << 0; - -pub const NFT_QUEUE_FLAG_BYPASS: c_int = 0x01; -pub const NFT_QUEUE_FLAG_CPU_FANOUT: c_int = 0x02; -pub const NFT_QUEUE_FLAG_MASK: c_int = 0x03; - -pub const NFT_QUOTA_F_INV: c_int = 1 << 0; - -pub const NFT_REJECT_ICMP_UNREACH: c_int = 0; -pub const NFT_REJECT_TCP_RST: c_int = 1; -pub const NFT_REJECT_ICMPX_UNREACH: c_int = 2; - -pub const NFT_REJECT_ICMPX_NO_ROUTE: c_int = 0; -pub const NFT_REJECT_ICMPX_PORT_UNREACH: c_int = 1; -pub const NFT_REJECT_ICMPX_HOST_UNREACH: c_int = 2; -pub const NFT_REJECT_ICMPX_ADMIN_PROHIBITED: c_int = 3; - -pub const NFT_NAT_SNAT: c_int = 0; -pub const NFT_NAT_DNAT: c_int = 1; - -pub const NFT_TRACETYPE_UNSPEC: c_int = 0; -pub const NFT_TRACETYPE_POLICY: c_int = 1; -pub const NFT_TRACETYPE_RETURN: c_int = 2; -pub const NFT_TRACETYPE_RULE: c_int = 3; - -pub const NFT_NG_INCREMENTAL: c_int = 0; -pub const NFT_NG_RANDOM: c_int = 1; - -// linux/input.h -pub const FF_MAX: crate::__u16 = 0x7f; -pub const FF_CNT: usize = FF_MAX as usize + 1; - -// linux/input-event-codes.h -pub const INPUT_PROP_MAX: crate::__u16 = 0x1f; -pub const INPUT_PROP_CNT: usize = INPUT_PROP_MAX as usize + 1; -pub const EV_MAX: crate::__u16 = 0x1f; -pub const EV_CNT: usize = EV_MAX as usize + 1; -pub const SYN_MAX: crate::__u16 = 0xf; -pub const SYN_CNT: usize = SYN_MAX as usize + 1; -pub const KEY_MAX: crate::__u16 = 0x2ff; -pub const KEY_CNT: usize = KEY_MAX as usize + 1; -pub const REL_MAX: crate::__u16 = 0x0f; -pub const REL_CNT: usize = REL_MAX as usize + 1; -pub const ABS_MAX: crate::__u16 = 0x3f; -pub const ABS_CNT: usize = ABS_MAX as usize + 1; -pub const SW_MAX: crate::__u16 = 0x0f; -pub const SW_CNT: usize = SW_MAX as usize + 1; -pub const MSC_MAX: crate::__u16 = 0x07; -pub const MSC_CNT: usize = MSC_MAX as usize + 1; -pub const LED_MAX: crate::__u16 = 0x0f; -pub const LED_CNT: usize = LED_MAX as usize + 1; -pub const REP_MAX: crate::__u16 = 0x01; -pub const REP_CNT: usize = REP_MAX as usize + 1; -pub const SND_MAX: crate::__u16 = 0x07; -pub const SND_CNT: usize = SND_MAX as usize + 1; - -// linux/uinput.h -pub const UINPUT_VERSION: c_uint = 5; -pub const UINPUT_MAX_NAME_SIZE: usize = 80; - -// start android/platform/bionic/libc/kernel/uapi/linux/if_ether.h -// from https://android.googlesource.com/platform/bionic/+/HEAD/libc/kernel/uapi/linux/if_ether.h -pub const ETH_ALEN: c_int = 6; -pub const ETH_HLEN: c_int = 14; -pub const ETH_ZLEN: c_int = 60; -pub const ETH_DATA_LEN: c_int = 1500; -pub const ETH_FRAME_LEN: c_int = 1514; -pub const ETH_FCS_LEN: c_int = 4; -pub const ETH_MIN_MTU: c_int = 68; -pub const ETH_MAX_MTU: c_int = 0xFFFF; -pub const ETH_P_LOOP: c_int = 0x0060; -pub const ETH_P_PUP: c_int = 0x0200; -pub const ETH_P_PUPAT: c_int = 0x0201; -pub const ETH_P_TSN: c_int = 0x22F0; -pub const ETH_P_IP: c_int = 0x0800; -pub const ETH_P_X25: c_int = 0x0805; -pub const ETH_P_ARP: c_int = 0x0806; -pub const ETH_P_BPQ: c_int = 0x08FF; -pub const ETH_P_IEEEPUP: c_int = 0x0a00; -pub const ETH_P_IEEEPUPAT: c_int = 0x0a01; -pub const ETH_P_BATMAN: c_int = 0x4305; -pub const ETH_P_DEC: c_int = 0x6000; -pub const ETH_P_DNA_DL: c_int = 0x6001; -pub const ETH_P_DNA_RC: c_int = 0x6002; -pub const ETH_P_DNA_RT: c_int = 0x6003; -pub const ETH_P_LAT: c_int = 0x6004; -pub const ETH_P_DIAG: c_int = 0x6005; -pub const ETH_P_CUST: c_int = 0x6006; -pub const ETH_P_SCA: c_int = 0x6007; -pub const ETH_P_TEB: c_int = 0x6558; -pub const ETH_P_RARP: c_int = 0x8035; -pub const ETH_P_ATALK: c_int = 0x809B; -pub const ETH_P_AARP: c_int = 0x80F3; -pub const ETH_P_8021Q: c_int = 0x8100; -/* see rust-lang/libc#924 pub const ETH_P_ERSPAN: c_int = 0x88BE;*/ -pub const ETH_P_IPX: c_int = 0x8137; -pub const ETH_P_IPV6: c_int = 0x86DD; -pub const ETH_P_PAUSE: c_int = 0x8808; -pub const ETH_P_SLOW: c_int = 0x8809; -pub const ETH_P_WCCP: c_int = 0x883E; -pub const ETH_P_MPLS_UC: c_int = 0x8847; -pub const ETH_P_MPLS_MC: c_int = 0x8848; -pub const ETH_P_ATMMPOA: c_int = 0x884c; -pub const ETH_P_PPP_DISC: c_int = 0x8863; -pub const ETH_P_PPP_SES: c_int = 0x8864; -pub const ETH_P_LINK_CTL: c_int = 0x886c; -pub const ETH_P_ATMFATE: c_int = 0x8884; -pub const ETH_P_PAE: c_int = 0x888E; -pub const ETH_P_AOE: c_int = 0x88A2; -pub const ETH_P_8021AD: c_int = 0x88A8; -pub const ETH_P_802_EX1: c_int = 0x88B5; -pub const ETH_P_TIPC: c_int = 0x88CA; -pub const ETH_P_MACSEC: c_int = 0x88E5; -pub const ETH_P_8021AH: c_int = 0x88E7; -pub const ETH_P_MVRP: c_int = 0x88F5; -pub const ETH_P_1588: c_int = 0x88F7; -pub const ETH_P_NCSI: c_int = 0x88F8; -pub const ETH_P_PRP: c_int = 0x88FB; -pub const ETH_P_FCOE: c_int = 0x8906; -/* see rust-lang/libc#924 pub const ETH_P_IBOE: c_int = 0x8915;*/ -pub const ETH_P_TDLS: c_int = 0x890D; -pub const ETH_P_FIP: c_int = 0x8914; -pub const ETH_P_80221: c_int = 0x8917; -pub const ETH_P_HSR: c_int = 0x892F; -/* see rust-lang/libc#924 pub const ETH_P_NSH: c_int = 0x894F;*/ -pub const ETH_P_LOOPBACK: c_int = 0x9000; -pub const ETH_P_QINQ1: c_int = 0x9100; -pub const ETH_P_QINQ2: c_int = 0x9200; -pub const ETH_P_QINQ3: c_int = 0x9300; -pub const ETH_P_EDSA: c_int = 0xDADA; -/* see rust-lang/libc#924 pub const ETH_P_IFE: c_int = 0xED3E;*/ -pub const ETH_P_AF_IUCV: c_int = 0xFBFB; -pub const ETH_P_802_3_MIN: c_int = 0x0600; -pub const ETH_P_802_3: c_int = 0x0001; -pub const ETH_P_AX25: c_int = 0x0002; -pub const ETH_P_ALL: c_int = 0x0003; -pub const ETH_P_802_2: c_int = 0x0004; -pub const ETH_P_SNAP: c_int = 0x0005; -pub const ETH_P_DDCMP: c_int = 0x0006; -pub const ETH_P_WAN_PPP: c_int = 0x0007; -pub const ETH_P_PPP_MP: c_int = 0x0008; -pub const ETH_P_LOCALTALK: c_int = 0x0009; -pub const ETH_P_CAN: c_int = 0x000C; -pub const ETH_P_CANFD: c_int = 0x000D; -pub const ETH_P_PPPTALK: c_int = 0x0010; -pub const ETH_P_TR_802_2: c_int = 0x0011; -pub const ETH_P_MOBITEX: c_int = 0x0015; -pub const ETH_P_CONTROL: c_int = 0x0016; -pub const ETH_P_IRDA: c_int = 0x0017; -pub const ETH_P_ECONET: c_int = 0x0018; -pub const ETH_P_HDLC: c_int = 0x0019; -pub const ETH_P_ARCNET: c_int = 0x001A; -pub const ETH_P_DSA: c_int = 0x001B; -pub const ETH_P_TRAILER: c_int = 0x001C; -pub const ETH_P_PHONET: c_int = 0x00F5; -pub const ETH_P_IEEE802154: c_int = 0x00F6; -pub const ETH_P_CAIF: c_int = 0x00F7; -pub const ETH_P_XDSA: c_int = 0x00F8; -/* see rust-lang/libc#924 pub const ETH_P_MAP: c_int = 0x00F9;*/ -// end android/platform/bionic/libc/kernel/uapi/linux/if_ether.h - -// start android/platform/bionic/libc/kernel/uapi/linux/neighbour.h -pub const NDA_UNSPEC: c_ushort = 0; -pub const NDA_DST: c_ushort = 1; -pub const NDA_LLADDR: c_ushort = 2; -pub const NDA_CACHEINFO: c_ushort = 3; -pub const NDA_PROBES: c_ushort = 4; -pub const NDA_VLAN: c_ushort = 5; -pub const NDA_PORT: c_ushort = 6; -pub const NDA_VNI: c_ushort = 7; -pub const NDA_IFINDEX: c_ushort = 8; -pub const NDA_MASTER: c_ushort = 9; -pub const NDA_LINK_NETNSID: c_ushort = 10; -pub const NDA_SRC_VNI: c_ushort = 11; -pub const NDA_PROTOCOL: c_ushort = 12; -pub const NDA_NH_ID: c_ushort = 13; -pub const NDA_FDB_EXT_ATTRS: c_ushort = 14; -pub const NDA_FLAGS_EXT: c_ushort = 15; -pub const NDA_NDM_STATE_MASK: c_ushort = 16; -pub const NDA_NDM_FLAGS_MASK: c_ushort = 17; - -pub const NTF_USE: u8 = 0x01; -pub const NTF_SELF: u8 = 0x02; -pub const NTF_MASTER: u8 = 0x04; -pub const NTF_PROXY: u8 = 0x08; -pub const NTF_EXT_LEARNED: u8 = 0x10; -pub const NTF_OFFLOADED: u8 = 0x20; -pub const NTF_STICKY: u8 = 0x40; -pub const NTF_ROUTER: u8 = 0x80; - -pub const NTF_EXT_MANAGED: u8 = 0x01; -pub const NTF_EXT_LOCKED: u8 = 0x02; - -pub const NUD_NONE: u16 = 0x00; -pub const NUD_INCOMPLETE: u16 = 0x01; -pub const NUD_REACHABLE: u16 = 0x02; -pub const NUD_STALE: u16 = 0x04; -pub const NUD_DELAY: u16 = 0x08; -pub const NUD_PROBE: u16 = 0x10; -pub const NUD_FAILED: u16 = 0x20; -pub const NUD_NOARP: u16 = 0x40; -pub const NUD_PERMANENT: u16 = 0x80; - -pub const NDTPA_UNSPEC: c_ushort = 0; -pub const NDTPA_IFINDEX: c_ushort = 1; -pub const NDTPA_REFCNT: c_ushort = 2; -pub const NDTPA_REACHABLE_TIME: c_ushort = 3; -pub const NDTPA_BASE_REACHABLE_TIME: c_ushort = 4; -pub const NDTPA_RETRANS_TIME: c_ushort = 5; -pub const NDTPA_GC_STALETIME: c_ushort = 6; -pub const NDTPA_DELAY_PROBE_TIME: c_ushort = 7; -pub const NDTPA_QUEUE_LEN: c_ushort = 8; -pub const NDTPA_APP_PROBES: c_ushort = 9; -pub const NDTPA_UCAST_PROBES: c_ushort = 10; -pub const NDTPA_MCAST_PROBES: c_ushort = 11; -pub const NDTPA_ANYCAST_DELAY: c_ushort = 12; -pub const NDTPA_PROXY_DELAY: c_ushort = 13; -pub const NDTPA_PROXY_QLEN: c_ushort = 14; -pub const NDTPA_LOCKTIME: c_ushort = 15; -pub const NDTPA_QUEUE_LENBYTES: c_ushort = 16; -pub const NDTPA_MCAST_REPROBES: c_ushort = 17; -pub const NDTPA_PAD: c_ushort = 18; -pub const NDTPA_INTERVAL_PROBE_TIME_MS: c_ushort = 19; - -pub const NDTA_UNSPEC: c_ushort = 0; -pub const NDTA_NAME: c_ushort = 1; -pub const NDTA_THRESH1: c_ushort = 2; -pub const NDTA_THRESH2: c_ushort = 3; -pub const NDTA_THRESH3: c_ushort = 4; -pub const NDTA_CONFIG: c_ushort = 5; -pub const NDTA_PARMS: c_ushort = 6; -pub const NDTA_STATS: c_ushort = 7; -pub const NDTA_GC_INTERVAL: c_ushort = 8; -pub const NDTA_PAD: c_ushort = 9; - -pub const FDB_NOTIFY_BIT: u16 = 0x01; -pub const FDB_NOTIFY_INACTIVE_BIT: u16 = 0x02; - -pub const NFEA_UNSPEC: c_ushort = 0; -pub const NFEA_ACTIVITY_NOTIFY: c_ushort = 1; -pub const NFEA_DONT_REFRESH: c_ushort = 2; -// end android/platform/bionic/libc/kernel/uapi/linux/neighbour.h - -pub const SIOCADDRT: c_ulong = 0x0000890B; -pub const SIOCDELRT: c_ulong = 0x0000890C; -pub const SIOCRTMSG: c_ulong = 0x0000890D; -pub const SIOCGIFNAME: c_ulong = 0x00008910; -pub const SIOCSIFLINK: c_ulong = 0x00008911; -pub const SIOCGIFCONF: c_ulong = 0x00008912; -pub const SIOCGIFFLAGS: c_ulong = 0x00008913; -pub const SIOCSIFFLAGS: c_ulong = 0x00008914; -pub const SIOCGIFADDR: c_ulong = 0x00008915; -pub const SIOCSIFADDR: c_ulong = 0x00008916; -pub const SIOCGIFDSTADDR: c_ulong = 0x00008917; -pub const SIOCSIFDSTADDR: c_ulong = 0x00008918; -pub const SIOCGIFBRDADDR: c_ulong = 0x00008919; -pub const SIOCSIFBRDADDR: c_ulong = 0x0000891A; -pub const SIOCGIFNETMASK: c_ulong = 0x0000891B; -pub const SIOCSIFNETMASK: c_ulong = 0x0000891C; -pub const SIOCGIFMETRIC: c_ulong = 0x0000891D; -pub const SIOCSIFMETRIC: c_ulong = 0x0000891E; -pub const SIOCGIFMEM: c_ulong = 0x0000891F; -pub const SIOCSIFMEM: c_ulong = 0x00008920; -pub const SIOCGIFMTU: c_ulong = 0x00008921; -pub const SIOCSIFMTU: c_ulong = 0x00008922; -pub const SIOCSIFNAME: c_ulong = 0x00008923; -pub const SIOCSIFHWADDR: c_ulong = 0x00008924; -pub const SIOCGIFENCAP: c_ulong = 0x00008925; -pub const SIOCSIFENCAP: c_ulong = 0x00008926; -pub const SIOCGIFHWADDR: c_ulong = 0x00008927; -pub const SIOCGIFSLAVE: c_ulong = 0x00008929; -pub const SIOCSIFSLAVE: c_ulong = 0x00008930; -pub const SIOCADDMULTI: c_ulong = 0x00008931; -pub const SIOCDELMULTI: c_ulong = 0x00008932; -pub const SIOCGIFINDEX: c_ulong = 0x00008933; -pub const SIOGIFINDEX: c_ulong = SIOCGIFINDEX; -pub const SIOCSIFPFLAGS: c_ulong = 0x00008934; -pub const SIOCGIFPFLAGS: c_ulong = 0x00008935; -pub const SIOCDIFADDR: c_ulong = 0x00008936; -pub const SIOCSIFHWBROADCAST: c_ulong = 0x00008937; -pub const SIOCGIFCOUNT: c_ulong = 0x00008938; -pub const SIOCGIFBR: c_ulong = 0x00008940; -pub const SIOCSIFBR: c_ulong = 0x00008941; -pub const SIOCGIFTXQLEN: c_ulong = 0x00008942; -pub const SIOCSIFTXQLEN: c_ulong = 0x00008943; -pub const SIOCETHTOOL: c_ulong = 0x00008946; -pub const SIOCGMIIPHY: c_ulong = 0x00008947; -pub const SIOCGMIIREG: c_ulong = 0x00008948; -pub const SIOCSMIIREG: c_ulong = 0x00008949; -pub const SIOCWANDEV: c_ulong = 0x0000894A; -pub const SIOCOUTQNSD: c_ulong = 0x0000894B; -pub const SIOCGSKNS: c_ulong = 0x0000894C; -pub const SIOCDARP: c_ulong = 0x00008953; -pub const SIOCGARP: c_ulong = 0x00008954; -pub const SIOCSARP: c_ulong = 0x00008955; -pub const SIOCDRARP: c_ulong = 0x00008960; -pub const SIOCGRARP: c_ulong = 0x00008961; -pub const SIOCSRARP: c_ulong = 0x00008962; -pub const SIOCGIFMAP: c_ulong = 0x00008970; -pub const SIOCSIFMAP: c_ulong = 0x00008971; -pub const SIOCADDDLCI: c_ulong = 0x00008980; -pub const SIOCDELDLCI: c_ulong = 0x00008981; -pub const SIOCGIFVLAN: c_ulong = 0x00008982; -pub const SIOCSIFVLAN: c_ulong = 0x00008983; -pub const SIOCBONDENSLAVE: c_ulong = 0x00008990; -pub const SIOCBONDRELEASE: c_ulong = 0x00008991; -pub const SIOCBONDSETHWADDR: c_ulong = 0x00008992; -pub const SIOCBONDSLAVEINFOQUERY: c_ulong = 0x00008993; -pub const SIOCBONDINFOQUERY: c_ulong = 0x00008994; -pub const SIOCBONDCHANGEACTIVE: c_ulong = 0x00008995; -pub const SIOCBRADDBR: c_ulong = 0x000089a0; -pub const SIOCBRDELBR: c_ulong = 0x000089a1; -pub const SIOCBRADDIF: c_ulong = 0x000089a2; -pub const SIOCBRDELIF: c_ulong = 0x000089a3; -pub const SIOCSHWTSTAMP: c_ulong = 0x000089b0; -pub const SIOCGHWTSTAMP: c_ulong = 0x000089b1; -pub const SIOCDEVPRIVATE: c_ulong = 0x000089F0; -pub const SIOCPROTOPRIVATE: c_ulong = 0x000089E0; - -// linux/module.h -pub const MODULE_INIT_IGNORE_MODVERSIONS: c_uint = 0x0001; -pub const MODULE_INIT_IGNORE_VERMAGIC: c_uint = 0x0002; - -// linux/net_tstamp.h -pub const SOF_TIMESTAMPING_TX_HARDWARE: c_uint = 1 << 0; -pub const SOF_TIMESTAMPING_TX_SOFTWARE: c_uint = 1 << 1; -pub const SOF_TIMESTAMPING_RX_HARDWARE: c_uint = 1 << 2; -pub const SOF_TIMESTAMPING_RX_SOFTWARE: c_uint = 1 << 3; -pub const SOF_TIMESTAMPING_SOFTWARE: c_uint = 1 << 4; -pub const SOF_TIMESTAMPING_SYS_HARDWARE: c_uint = 1 << 5; -pub const SOF_TIMESTAMPING_RAW_HARDWARE: c_uint = 1 << 6; -pub const SOF_TIMESTAMPING_OPT_ID: c_uint = 1 << 7; -pub const SOF_TIMESTAMPING_TX_SCHED: c_uint = 1 << 8; -pub const SOF_TIMESTAMPING_TX_ACK: c_uint = 1 << 9; -pub const SOF_TIMESTAMPING_OPT_CMSG: c_uint = 1 << 10; -pub const SOF_TIMESTAMPING_OPT_TSONLY: c_uint = 1 << 11; -pub const SOF_TIMESTAMPING_OPT_STATS: c_uint = 1 << 12; -pub const SOF_TIMESTAMPING_OPT_PKTINFO: c_uint = 1 << 13; -pub const SOF_TIMESTAMPING_OPT_TX_SWHW: c_uint = 1 << 14; -pub const SOF_TIMESTAMPING_BIND_PHC: c_uint = 1 << 15; -pub const SOF_TIMESTAMPING_OPT_ID_TCP: c_uint = 1 << 16; -pub const SOF_TIMESTAMPING_OPT_RX_FILTER: c_uint = 1 << 17; - -#[deprecated( - since = "0.2.55", - note = "ENOATTR is not available on Android; use ENODATA instead" -)] -pub const ENOATTR: c_int = crate::ENODATA; - -// linux/if_alg.h -pub const ALG_SET_KEY: c_int = 1; -pub const ALG_SET_IV: c_int = 2; -pub const ALG_SET_OP: c_int = 3; -pub const ALG_SET_AEAD_ASSOCLEN: c_int = 4; -pub const ALG_SET_AEAD_AUTHSIZE: c_int = 5; -pub const ALG_SET_DRBG_ENTROPY: c_int = 6; - -pub const ALG_OP_DECRYPT: c_int = 0; -pub const ALG_OP_ENCRYPT: c_int = 1; - -// sys/mman.h -pub const MLOCK_ONFAULT: c_int = 0x01; - -// uapi/linux/vm_sockets.h -pub const VMADDR_CID_ANY: c_uint = 0xFFFFFFFF; -pub const VMADDR_CID_HYPERVISOR: c_uint = 0; -pub const VMADDR_CID_LOCAL: c_uint = 1; -pub const VMADDR_CID_HOST: c_uint = 2; -pub const VMADDR_PORT_ANY: c_uint = 0xFFFFFFFF; - -// uapi/linux/inotify.h -pub const IN_ACCESS: u32 = 0x0000_0001; -pub const IN_MODIFY: u32 = 0x0000_0002; -pub const IN_ATTRIB: u32 = 0x0000_0004; -pub const IN_CLOSE_WRITE: u32 = 0x0000_0008; -pub const IN_CLOSE_NOWRITE: u32 = 0x0000_0010; -pub const IN_CLOSE: u32 = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE; -pub const IN_OPEN: u32 = 0x0000_0020; -pub const IN_MOVED_FROM: u32 = 0x0000_0040; -pub const IN_MOVED_TO: u32 = 0x0000_0080; -pub const IN_MOVE: u32 = IN_MOVED_FROM | IN_MOVED_TO; -pub const IN_CREATE: u32 = 0x0000_0100; -pub const IN_DELETE: u32 = 0x0000_0200; -pub const IN_DELETE_SELF: u32 = 0x0000_0400; -pub const IN_MOVE_SELF: u32 = 0x0000_0800; -pub const IN_UNMOUNT: u32 = 0x0000_2000; -pub const IN_Q_OVERFLOW: u32 = 0x0000_4000; -pub const IN_IGNORED: u32 = 0x0000_8000; -pub const IN_ONLYDIR: u32 = 0x0100_0000; -pub const IN_DONT_FOLLOW: u32 = 0x0200_0000; -pub const IN_EXCL_UNLINK: u32 = 0x0400_0000; - -pub const IN_MASK_CREATE: u32 = 0x1000_0000; -pub const IN_MASK_ADD: u32 = 0x2000_0000; -pub const IN_ISDIR: u32 = 0x4000_0000; -pub const IN_ONESHOT: u32 = 0x8000_0000; - -pub const IN_ALL_EVENTS: u32 = IN_ACCESS - | IN_MODIFY - | IN_ATTRIB - | IN_CLOSE_WRITE - | IN_CLOSE_NOWRITE - | IN_OPEN - | IN_MOVED_FROM - | IN_MOVED_TO - | IN_DELETE - | IN_CREATE - | IN_DELETE_SELF - | IN_MOVE_SELF; - -pub const IN_CLOEXEC: c_int = O_CLOEXEC; -pub const IN_NONBLOCK: c_int = O_NONBLOCK; - -pub const FUTEX_WAIT: c_int = 0; -pub const FUTEX_WAKE: c_int = 1; -pub const FUTEX_FD: c_int = 2; -pub const FUTEX_REQUEUE: c_int = 3; -pub const FUTEX_CMP_REQUEUE: c_int = 4; -pub const FUTEX_WAKE_OP: c_int = 5; -pub const FUTEX_LOCK_PI: c_int = 6; -pub const FUTEX_UNLOCK_PI: c_int = 7; -pub const FUTEX_TRYLOCK_PI: c_int = 8; -pub const FUTEX_WAIT_BITSET: c_int = 9; -pub const FUTEX_WAKE_BITSET: c_int = 10; -pub const FUTEX_WAIT_REQUEUE_PI: c_int = 11; -pub const FUTEX_CMP_REQUEUE_PI: c_int = 12; -pub const FUTEX_LOCK_PI2: c_int = 13; - -pub const FUTEX_PRIVATE_FLAG: c_int = 128; -pub const FUTEX_CLOCK_REALTIME: c_int = 256; -pub const FUTEX_CMD_MASK: c_int = !(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME); - -// linux/errqueue.h -pub const SO_EE_ORIGIN_NONE: u8 = 0; -pub const SO_EE_ORIGIN_LOCAL: u8 = 1; -pub const SO_EE_ORIGIN_ICMP: u8 = 2; -pub const SO_EE_ORIGIN_ICMP6: u8 = 3; -pub const SO_EE_ORIGIN_TXSTATUS: u8 = 4; -pub const SO_EE_ORIGIN_TIMESTAMPING: u8 = SO_EE_ORIGIN_TXSTATUS; - -// errno.h -pub const EPERM: c_int = 1; -pub const ENOENT: c_int = 2; -pub const ESRCH: c_int = 3; -pub const EINTR: c_int = 4; -pub const EIO: c_int = 5; -pub const ENXIO: c_int = 6; -pub const E2BIG: c_int = 7; -pub const ENOEXEC: c_int = 8; -pub const EBADF: c_int = 9; -pub const ECHILD: c_int = 10; -pub const EAGAIN: c_int = 11; -pub const ENOMEM: c_int = 12; -pub const EACCES: c_int = 13; -pub const EFAULT: c_int = 14; -pub const ENOTBLK: c_int = 15; -pub const EBUSY: c_int = 16; -pub const EEXIST: c_int = 17; -pub const EXDEV: c_int = 18; -pub const ENODEV: c_int = 19; -pub const ENOTDIR: c_int = 20; -pub const EISDIR: c_int = 21; -pub const EINVAL: c_int = 22; -pub const ENFILE: c_int = 23; -pub const EMFILE: c_int = 24; -pub const ENOTTY: c_int = 25; -pub const ETXTBSY: c_int = 26; -pub const EFBIG: c_int = 27; -pub const ENOSPC: c_int = 28; -pub const ESPIPE: c_int = 29; -pub const EROFS: c_int = 30; -pub const EMLINK: c_int = 31; -pub const EPIPE: c_int = 32; -pub const EDOM: c_int = 33; -pub const ERANGE: c_int = 34; -pub const EWOULDBLOCK: c_int = EAGAIN; - -pub const PRIO_PROCESS: c_int = 0; -pub const PRIO_PGRP: c_int = 1; -pub const PRIO_USER: c_int = 2; - -// linux/sched.h -pub const SCHED_NORMAL: c_int = 0; -pub const SCHED_FIFO: c_int = 1; -pub const SCHED_RR: c_int = 2; -pub const SCHED_BATCH: c_int = 3; -pub const SCHED_IDLE: c_int = 5; -pub const SCHED_DEADLINE: c_int = 6; - -pub const SCHED_RESET_ON_FORK: c_int = 0x40000000; - -pub const CLONE_PIDFD: c_int = 0x1000; -pub const CLONE_CLEAR_SIGHAND: c_ulonglong = 0x100000000; -pub const CLONE_INTO_CGROUP: c_ulonglong = 0x200000000; - -// linux/membarrier.h -pub const MEMBARRIER_CMD_QUERY: c_int = 0; -pub const MEMBARRIER_CMD_GLOBAL: c_int = 1 << 0; -pub const MEMBARRIER_CMD_GLOBAL_EXPEDITED: c_int = 1 << 1; -pub const MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED: c_int = 1 << 2; -pub const MEMBARRIER_CMD_PRIVATE_EXPEDITED: c_int = 1 << 3; -pub const MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED: c_int = 1 << 4; -pub const MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE: c_int = 1 << 5; -pub const MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE: c_int = 1 << 6; -pub const MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ: c_int = 1 << 7; -pub const MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ: c_int = 1 << 8; - -// linux/mempolicy.h -pub const MPOL_DEFAULT: c_int = 0; -pub const MPOL_PREFERRED: c_int = 1; -pub const MPOL_BIND: c_int = 2; -pub const MPOL_INTERLEAVE: c_int = 3; -pub const MPOL_LOCAL: c_int = 4; -pub const MPOL_F_NUMA_BALANCING: c_int = 1 << 13; -pub const MPOL_F_RELATIVE_NODES: c_int = 1 << 14; -pub const MPOL_F_STATIC_NODES: c_int = 1 << 15; - -// bits/seek_constants.h -pub const SEEK_DATA: c_int = 3; -pub const SEEK_HOLE: c_int = 4; - -// sys/socket.h -pub const AF_NFC: c_int = 39; -pub const AF_VSOCK: c_int = 40; -pub const PF_NFC: c_int = AF_NFC; -pub const PF_VSOCK: c_int = AF_VSOCK; - -pub const SOMAXCONN: c_int = 128; - -// sys/system_properties.h -pub const PROP_VALUE_MAX: c_int = 92; -pub const PROP_NAME_MAX: c_int = 32; - -// sys/prctl.h -pub const PR_SET_PDEATHSIG: c_int = 1; -pub const PR_GET_PDEATHSIG: c_int = 2; -pub const PR_GET_DUMPABLE: c_int = 3; -pub const PR_SET_DUMPABLE: c_int = 4; -pub const PR_GET_UNALIGN: c_int = 5; -pub const PR_SET_UNALIGN: c_int = 6; -pub const PR_UNALIGN_NOPRINT: c_int = 1; -pub const PR_UNALIGN_SIGBUS: c_int = 2; -pub const PR_GET_KEEPCAPS: c_int = 7; -pub const PR_SET_KEEPCAPS: c_int = 8; -pub const PR_GET_FPEMU: c_int = 9; -pub const PR_SET_FPEMU: c_int = 10; -pub const PR_FPEMU_NOPRINT: c_int = 1; -pub const PR_FPEMU_SIGFPE: c_int = 2; -pub const PR_GET_FPEXC: c_int = 11; -pub const PR_SET_FPEXC: c_int = 12; -pub const PR_FP_EXC_SW_ENABLE: c_int = 0x80; -pub const PR_FP_EXC_DIV: c_int = 0x010000; -pub const PR_FP_EXC_OVF: c_int = 0x020000; -pub const PR_FP_EXC_UND: c_int = 0x040000; -pub const PR_FP_EXC_RES: c_int = 0x080000; -pub const PR_FP_EXC_INV: c_int = 0x100000; -pub const PR_FP_EXC_DISABLED: c_int = 0; -pub const PR_FP_EXC_NONRECOV: c_int = 1; -pub const PR_FP_EXC_ASYNC: c_int = 2; -pub const PR_FP_EXC_PRECISE: c_int = 3; -pub const PR_GET_TIMING: c_int = 13; -pub const PR_SET_TIMING: c_int = 14; -pub const PR_TIMING_STATISTICAL: c_int = 0; -pub const PR_TIMING_TIMESTAMP: c_int = 1; -pub const PR_SET_NAME: c_int = 15; -pub const PR_GET_NAME: c_int = 16; -pub const PR_GET_ENDIAN: c_int = 19; -pub const PR_SET_ENDIAN: c_int = 20; -pub const PR_ENDIAN_BIG: c_int = 0; -pub const PR_ENDIAN_LITTLE: c_int = 1; -pub const PR_ENDIAN_PPC_LITTLE: c_int = 2; -pub const PR_GET_SECCOMP: c_int = 21; -pub const PR_SET_SECCOMP: c_int = 22; -pub const PR_CAPBSET_READ: c_int = 23; -pub const PR_CAPBSET_DROP: c_int = 24; -pub const PR_GET_TSC: c_int = 25; -pub const PR_SET_TSC: c_int = 26; -pub const PR_TSC_ENABLE: c_int = 1; -pub const PR_TSC_SIGSEGV: c_int = 2; -pub const PR_GET_SECUREBITS: c_int = 27; -pub const PR_SET_SECUREBITS: c_int = 28; -pub const PR_SET_TIMERSLACK: c_int = 29; -pub const PR_GET_TIMERSLACK: c_int = 30; -pub const PR_TASK_PERF_EVENTS_DISABLE: c_int = 31; -pub const PR_TASK_PERF_EVENTS_ENABLE: c_int = 32; -pub const PR_MCE_KILL: c_int = 33; -pub const PR_MCE_KILL_CLEAR: c_int = 0; -pub const PR_MCE_KILL_SET: c_int = 1; -pub const PR_MCE_KILL_LATE: c_int = 0; -pub const PR_MCE_KILL_EARLY: c_int = 1; -pub const PR_MCE_KILL_DEFAULT: c_int = 2; -pub const PR_MCE_KILL_GET: c_int = 34; -pub const PR_SET_MM: c_int = 35; -pub const PR_SET_MM_START_CODE: c_int = 1; -pub const PR_SET_MM_END_CODE: c_int = 2; -pub const PR_SET_MM_START_DATA: c_int = 3; -pub const PR_SET_MM_END_DATA: c_int = 4; -pub const PR_SET_MM_START_STACK: c_int = 5; -pub const PR_SET_MM_START_BRK: c_int = 6; -pub const PR_SET_MM_BRK: c_int = 7; -pub const PR_SET_MM_ARG_START: c_int = 8; -pub const PR_SET_MM_ARG_END: c_int = 9; -pub const PR_SET_MM_ENV_START: c_int = 10; -pub const PR_SET_MM_ENV_END: c_int = 11; -pub const PR_SET_MM_AUXV: c_int = 12; -pub const PR_SET_MM_EXE_FILE: c_int = 13; -pub const PR_SET_MM_MAP: c_int = 14; -pub const PR_SET_MM_MAP_SIZE: c_int = 15; -pub const PR_SET_PTRACER: c_int = 0x59616d61; -pub const PR_SET_PTRACER_ANY: c_ulong = 0xffffffffffffffff; -pub const PR_SET_CHILD_SUBREAPER: c_int = 36; -pub const PR_GET_CHILD_SUBREAPER: c_int = 37; -pub const PR_SET_NO_NEW_PRIVS: c_int = 38; -pub const PR_GET_NO_NEW_PRIVS: c_int = 39; -pub const PR_GET_TID_ADDRESS: c_int = 40; -pub const PR_SET_THP_DISABLE: c_int = 41; -pub const PR_GET_THP_DISABLE: c_int = 42; -pub const PR_MPX_ENABLE_MANAGEMENT: c_int = 43; -pub const PR_MPX_DISABLE_MANAGEMENT: c_int = 44; -pub const PR_SET_FP_MODE: c_int = 45; -pub const PR_GET_FP_MODE: c_int = 46; -pub const PR_FP_MODE_FR: c_int = 1 << 0; -pub const PR_FP_MODE_FRE: c_int = 1 << 1; -pub const PR_CAP_AMBIENT: c_int = 47; -pub const PR_CAP_AMBIENT_IS_SET: c_int = 1; -pub const PR_CAP_AMBIENT_RAISE: c_int = 2; -pub const PR_CAP_AMBIENT_LOWER: c_int = 3; -pub const PR_CAP_AMBIENT_CLEAR_ALL: c_int = 4; -pub const PR_SVE_SET_VL: c_int = 50; -pub const PR_SVE_SET_VL_ONEXEC: c_int = 1 << 18; -pub const PR_SVE_GET_VL: c_int = 51; -pub const PR_SVE_VL_LEN_MASK: c_int = 0xffff; -pub const PR_SVE_VL_INHERIT: c_int = 1 << 17; -pub const PR_GET_SPECULATION_CTRL: c_int = 52; -pub const PR_SET_SPECULATION_CTRL: c_int = 53; -pub const PR_SPEC_STORE_BYPASS: c_int = 0; -pub const PR_SPEC_INDIRECT_BRANCH: c_int = 1; -pub const PR_SPEC_L1D_FLUSH: c_int = 2; -pub const PR_SPEC_NOT_AFFECTED: c_int = 0; -pub const PR_SPEC_PRCTL: c_ulong = 1 << 0; -pub const PR_SPEC_ENABLE: c_ulong = 1 << 1; -pub const PR_SPEC_DISABLE: c_ulong = 1 << 2; -pub const PR_SPEC_FORCE_DISABLE: c_ulong = 1 << 3; -pub const PR_SPEC_DISABLE_NOEXEC: c_ulong = 1 << 4; -pub const PR_PAC_RESET_KEYS: c_int = 54; -pub const PR_PAC_APIAKEY: c_ulong = 1 << 0; -pub const PR_PAC_APIBKEY: c_ulong = 1 << 1; -pub const PR_PAC_APDAKEY: c_ulong = 1 << 2; -pub const PR_PAC_APDBKEY: c_ulong = 1 << 3; -pub const PR_PAC_APGAKEY: c_ulong = 1 << 4; -pub const PR_SET_TAGGED_ADDR_CTRL: c_int = 55; -pub const PR_GET_TAGGED_ADDR_CTRL: c_int = 56; -pub const PR_TAGGED_ADDR_ENABLE: c_ulong = 1 << 0; -pub const PR_MTE_TCF_NONE: c_ulong = 0; -pub const PR_MTE_TCF_SYNC: c_ulong = 1 << 1; -pub const PR_MTE_TCF_ASYNC: c_ulong = 1 << 2; -pub const PR_MTE_TCF_MASK: c_ulong = PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC; -pub const PR_MTE_TAG_SHIFT: c_ulong = 3; -pub const PR_MTE_TAG_MASK: c_ulong = 0xffff << PR_MTE_TAG_SHIFT; -pub const PR_MTE_TCF_SHIFT: c_ulong = 1; -pub const PR_SET_IO_FLUSHER: c_int = 57; -pub const PR_GET_IO_FLUSHER: c_int = 58; -pub const PR_SET_SYSCALL_USER_DISPATCH: c_int = 59; -pub const PR_SYS_DISPATCH_OFF: c_int = 0; -pub const PR_SYS_DISPATCH_ON: c_int = 1; -pub const SYSCALL_DISPATCH_FILTER_ALLOW: c_int = 0; -pub const SYSCALL_DISPATCH_FILTER_BLOCK: c_int = 1; -pub const PR_PAC_SET_ENABLED_KEYS: c_int = 60; -pub const PR_PAC_GET_ENABLED_KEYS: c_int = 61; -pub const PR_SCHED_CORE: c_int = 62; -pub const PR_SCHED_CORE_GET: c_int = 0; -pub const PR_SCHED_CORE_CREATE: c_int = 1; -pub const PR_SCHED_CORE_SHARE_TO: c_int = 2; -pub const PR_SCHED_CORE_SHARE_FROM: c_int = 3; -pub const PR_SCHED_CORE_MAX: c_int = 4; -pub const PR_SCHED_CORE_SCOPE_THREAD: c_int = 0; -pub const PR_SCHED_CORE_SCOPE_THREAD_GROUP: c_int = 1; -pub const PR_SCHED_CORE_SCOPE_PROCESS_GROUP: c_int = 2; -pub const PR_SME_SET_VL: c_int = 63; -pub const PR_SME_SET_VL_ONEXEC: c_int = 1 << 18; -pub const PR_SME_GET_VL: c_int = 64; -pub const PR_SME_VL_LEN_MASK: c_int = 0xffff; -pub const PR_SME_VL_INHERIT: c_int = 1 << 17; -pub const PR_SET_MDWE: c_int = 65; -pub const PR_MDWE_REFUSE_EXEC_GAIN: c_ulong = 1 << 0; -pub const PR_MDWE_NO_INHERIT: c_ulong = 1 << 1; -pub const PR_GET_MDWE: c_int = 66; -pub const PR_SET_VMA: c_int = 0x53564d41; -pub const PR_SET_VMA_ANON_NAME: c_int = 0; -pub const PR_GET_AUXV: c_int = 0x41555856; -pub const PR_SET_MEMORY_MERGE: c_int = 67; -pub const PR_GET_MEMORY_MERGE: c_int = 68; -pub const PR_RISCV_V_SET_CONTROL: c_int = 69; -pub const PR_RISCV_V_GET_CONTROL: c_int = 70; -pub const PR_RISCV_V_VSTATE_CTRL_DEFAULT: c_int = 0; -pub const PR_RISCV_V_VSTATE_CTRL_OFF: c_int = 1; -pub const PR_RISCV_V_VSTATE_CTRL_ON: c_int = 2; -pub const PR_RISCV_V_VSTATE_CTRL_INHERIT: c_int = 1 << 4; -pub const PR_RISCV_V_VSTATE_CTRL_CUR_MASK: c_int = 0x3; -pub const PR_RISCV_V_VSTATE_CTRL_NEXT_MASK: c_int = 0xc; -pub const PR_RISCV_V_VSTATE_CTRL_MASK: c_int = 0x1f; - -// linux/if_addr.h -pub const IFA_UNSPEC: c_ushort = 0; -pub const IFA_ADDRESS: c_ushort = 1; -pub const IFA_LOCAL: c_ushort = 2; -pub const IFA_LABEL: c_ushort = 3; -pub const IFA_BROADCAST: c_ushort = 4; -pub const IFA_ANYCAST: c_ushort = 5; -pub const IFA_CACHEINFO: c_ushort = 6; -pub const IFA_MULTICAST: c_ushort = 7; - -pub const IFA_F_SECONDARY: u32 = 0x01; -pub const IFA_F_TEMPORARY: u32 = 0x01; -pub const IFA_F_NODAD: u32 = 0x02; -pub const IFA_F_OPTIMISTIC: u32 = 0x04; -pub const IFA_F_DADFAILED: u32 = 0x08; -pub const IFA_F_HOMEADDRESS: u32 = 0x10; -pub const IFA_F_DEPRECATED: u32 = 0x20; -pub const IFA_F_TENTATIVE: u32 = 0x40; -pub const IFA_F_PERMANENT: u32 = 0x80; - -// linux/if_link.h -pub const IFLA_UNSPEC: c_ushort = 0; -pub const IFLA_ADDRESS: c_ushort = 1; -pub const IFLA_BROADCAST: c_ushort = 2; -pub const IFLA_IFNAME: c_ushort = 3; -pub const IFLA_MTU: c_ushort = 4; -pub const IFLA_LINK: c_ushort = 5; -pub const IFLA_QDISC: c_ushort = 6; -pub const IFLA_STATS: c_ushort = 7; -pub const IFLA_COST: c_ushort = 8; -pub const IFLA_PRIORITY: c_ushort = 9; -pub const IFLA_MASTER: c_ushort = 10; -pub const IFLA_WIRELESS: c_ushort = 11; -pub const IFLA_PROTINFO: c_ushort = 12; -pub const IFLA_TXQLEN: c_ushort = 13; -pub const IFLA_MAP: c_ushort = 14; -pub const IFLA_WEIGHT: c_ushort = 15; -pub const IFLA_OPERSTATE: c_ushort = 16; -pub const IFLA_LINKMODE: c_ushort = 17; -pub const IFLA_LINKINFO: c_ushort = 18; -pub const IFLA_NET_NS_PID: c_ushort = 19; -pub const IFLA_IFALIAS: c_ushort = 20; -pub const IFLA_NUM_VF: c_ushort = 21; -pub const IFLA_VFINFO_LIST: c_ushort = 22; -pub const IFLA_STATS64: c_ushort = 23; -pub const IFLA_VF_PORTS: c_ushort = 24; -pub const IFLA_PORT_SELF: c_ushort = 25; -pub const IFLA_AF_SPEC: c_ushort = 26; -pub const IFLA_GROUP: c_ushort = 27; -pub const IFLA_NET_NS_FD: c_ushort = 28; -pub const IFLA_EXT_MASK: c_ushort = 29; -pub const IFLA_PROMISCUITY: c_ushort = 30; -pub const IFLA_NUM_TX_QUEUES: c_ushort = 31; -pub const IFLA_NUM_RX_QUEUES: c_ushort = 32; -pub const IFLA_CARRIER: c_ushort = 33; -pub const IFLA_PHYS_PORT_ID: c_ushort = 34; -pub const IFLA_CARRIER_CHANGES: c_ushort = 35; -pub const IFLA_PHYS_SWITCH_ID: c_ushort = 36; -pub const IFLA_LINK_NETNSID: c_ushort = 37; -pub const IFLA_PHYS_PORT_NAME: c_ushort = 38; -pub const IFLA_PROTO_DOWN: c_ushort = 39; -pub const IFLA_GSO_MAX_SEGS: c_ushort = 40; -pub const IFLA_GSO_MAX_SIZE: c_ushort = 41; -pub const IFLA_PAD: c_ushort = 42; -pub const IFLA_XDP: c_ushort = 43; -pub const IFLA_EVENT: c_ushort = 44; -pub const IFLA_NEW_NETNSID: c_ushort = 45; -pub const IFLA_IF_NETNSID: c_ushort = 46; -pub const IFLA_TARGET_NETNSID: c_ushort = IFLA_IF_NETNSID; -pub const IFLA_CARRIER_UP_COUNT: c_ushort = 47; -pub const IFLA_CARRIER_DOWN_COUNT: c_ushort = 48; -pub const IFLA_NEW_IFINDEX: c_ushort = 49; -pub const IFLA_MIN_MTU: c_ushort = 50; -pub const IFLA_MAX_MTU: c_ushort = 51; -pub const IFLA_PROP_LIST: c_ushort = 52; -pub const IFLA_ALT_IFNAME: c_ushort = 53; -pub const IFLA_PERM_ADDRESS: c_ushort = 54; -pub const IFLA_PROTO_DOWN_REASON: c_ushort = 55; -pub const IFLA_PARENT_DEV_NAME: c_ushort = 56; -pub const IFLA_PARENT_DEV_BUS_NAME: c_ushort = 57; -pub const IFLA_GRO_MAX_SIZE: c_ushort = 58; -pub const IFLA_TSO_MAX_SIZE: c_ushort = 59; -pub const IFLA_TSO_MAX_SEGS: c_ushort = 60; -pub const IFLA_ALLMULTI: c_ushort = 61; -pub const IFLA_DEVLINK_PORT: c_ushort = 62; -pub const IFLA_GSO_IPV4_MAX_SIZE: c_ushort = 63; -pub const IFLA_GRO_IPV4_MAX_SIZE: c_ushort = 64; - -pub const IFLA_INFO_UNSPEC: c_ushort = 0; -pub const IFLA_INFO_KIND: c_ushort = 1; -pub const IFLA_INFO_DATA: c_ushort = 2; -pub const IFLA_INFO_XSTATS: c_ushort = 3; -pub const IFLA_INFO_SLAVE_KIND: c_ushort = 4; -pub const IFLA_INFO_SLAVE_DATA: c_ushort = 5; - -// linux/rtnetlink.h -pub const TCA_UNSPEC: c_ushort = 0; -pub const TCA_KIND: c_ushort = 1; -pub const TCA_OPTIONS: c_ushort = 2; -pub const TCA_STATS: c_ushort = 3; -pub const TCA_XSTATS: c_ushort = 4; -pub const TCA_RATE: c_ushort = 5; -pub const TCA_FCNT: c_ushort = 6; -pub const TCA_STATS2: c_ushort = 7; -pub const TCA_STAB: c_ushort = 8; - -pub const RTM_NEWLINK: u16 = 16; -pub const RTM_DELLINK: u16 = 17; -pub const RTM_GETLINK: u16 = 18; -pub const RTM_SETLINK: u16 = 19; -pub const RTM_NEWADDR: u16 = 20; -pub const RTM_DELADDR: u16 = 21; -pub const RTM_GETADDR: u16 = 22; -pub const RTM_NEWROUTE: u16 = 24; -pub const RTM_DELROUTE: u16 = 25; -pub const RTM_GETROUTE: u16 = 26; -pub const RTM_NEWNEIGH: u16 = 28; -pub const RTM_DELNEIGH: u16 = 29; -pub const RTM_GETNEIGH: u16 = 30; -pub const RTM_NEWRULE: u16 = 32; -pub const RTM_DELRULE: u16 = 33; -pub const RTM_GETRULE: u16 = 34; -pub const RTM_NEWQDISC: u16 = 36; -pub const RTM_DELQDISC: u16 = 37; -pub const RTM_GETQDISC: u16 = 38; -pub const RTM_NEWTCLASS: u16 = 40; -pub const RTM_DELTCLASS: u16 = 41; -pub const RTM_GETTCLASS: u16 = 42; -pub const RTM_NEWTFILTER: u16 = 44; -pub const RTM_DELTFILTER: u16 = 45; -pub const RTM_GETTFILTER: u16 = 46; -pub const RTM_NEWACTION: u16 = 48; -pub const RTM_DELACTION: u16 = 49; -pub const RTM_GETACTION: u16 = 50; -pub const RTM_NEWPREFIX: u16 = 52; -pub const RTM_GETMULTICAST: u16 = 58; -pub const RTM_GETANYCAST: u16 = 62; -pub const RTM_NEWNEIGHTBL: u16 = 64; -pub const RTM_GETNEIGHTBL: u16 = 66; -pub const RTM_SETNEIGHTBL: u16 = 67; -pub const RTM_NEWNDUSEROPT: u16 = 68; -pub const RTM_NEWADDRLABEL: u16 = 72; -pub const RTM_DELADDRLABEL: u16 = 73; -pub const RTM_GETADDRLABEL: u16 = 74; -pub const RTM_GETDCB: u16 = 78; -pub const RTM_SETDCB: u16 = 79; -pub const RTM_NEWNETCONF: u16 = 80; -pub const RTM_GETNETCONF: u16 = 82; -pub const RTM_NEWMDB: u16 = 84; -pub const RTM_DELMDB: u16 = 85; -pub const RTM_GETMDB: u16 = 86; -pub const RTM_NEWNSID: u16 = 88; -pub const RTM_DELNSID: u16 = 89; -pub const RTM_GETNSID: u16 = 90; - -pub const RTM_F_NOTIFY: c_uint = 0x100; -pub const RTM_F_CLONED: c_uint = 0x200; -pub const RTM_F_EQUALIZE: c_uint = 0x400; -pub const RTM_F_PREFIX: c_uint = 0x800; - -pub const RTA_UNSPEC: c_ushort = 0; -pub const RTA_DST: c_ushort = 1; -pub const RTA_SRC: c_ushort = 2; -pub const RTA_IIF: c_ushort = 3; -pub const RTA_OIF: c_ushort = 4; -pub const RTA_GATEWAY: c_ushort = 5; -pub const RTA_PRIORITY: c_ushort = 6; -pub const RTA_PREFSRC: c_ushort = 7; -pub const RTA_METRICS: c_ushort = 8; -pub const RTA_MULTIPATH: c_ushort = 9; -pub const RTA_PROTOINFO: c_ushort = 10; // No longer used -pub const RTA_FLOW: c_ushort = 11; -pub const RTA_CACHEINFO: c_ushort = 12; -pub const RTA_SESSION: c_ushort = 13; // No longer used -pub const RTA_MP_ALGO: c_ushort = 14; // No longer used -pub const RTA_TABLE: c_ushort = 15; -pub const RTA_MARK: c_ushort = 16; -pub const RTA_MFC_STATS: c_ushort = 17; - -pub const RTN_UNSPEC: c_uchar = 0; -pub const RTN_UNICAST: c_uchar = 1; -pub const RTN_LOCAL: c_uchar = 2; -pub const RTN_BROADCAST: c_uchar = 3; -pub const RTN_ANYCAST: c_uchar = 4; -pub const RTN_MULTICAST: c_uchar = 5; -pub const RTN_BLACKHOLE: c_uchar = 6; -pub const RTN_UNREACHABLE: c_uchar = 7; -pub const RTN_PROHIBIT: c_uchar = 8; -pub const RTN_THROW: c_uchar = 9; -pub const RTN_NAT: c_uchar = 10; -pub const RTN_XRESOLVE: c_uchar = 11; - -pub const RTPROT_UNSPEC: c_uchar = 0; -pub const RTPROT_REDIRECT: c_uchar = 1; -pub const RTPROT_KERNEL: c_uchar = 2; -pub const RTPROT_BOOT: c_uchar = 3; -pub const RTPROT_STATIC: c_uchar = 4; - -pub const RT_SCOPE_UNIVERSE: c_uchar = 0; -pub const RT_SCOPE_SITE: c_uchar = 200; -pub const RT_SCOPE_LINK: c_uchar = 253; -pub const RT_SCOPE_HOST: c_uchar = 254; -pub const RT_SCOPE_NOWHERE: c_uchar = 255; - -pub const RT_TABLE_UNSPEC: c_uchar = 0; -pub const RT_TABLE_COMPAT: c_uchar = 252; -pub const RT_TABLE_DEFAULT: c_uchar = 253; -pub const RT_TABLE_MAIN: c_uchar = 254; -pub const RT_TABLE_LOCAL: c_uchar = 255; - -pub const RTMSG_NEWDEVICE: u32 = 0x11; -pub const RTMSG_DELDEVICE: u32 = 0x12; -pub const RTMSG_NEWROUTE: u32 = 0x21; -pub const RTMSG_DELROUTE: u32 = 0x22; - -pub const CTL_KERN: c_int = 1; -pub const CTL_VM: c_int = 2; -pub const CTL_NET: c_int = 3; -pub const CTL_FS: c_int = 5; -pub const CTL_DEBUG: c_int = 6; -pub const CTL_DEV: c_int = 7; -pub const CTL_BUS: c_int = 8; -pub const CTL_ABI: c_int = 9; -pub const CTL_CPU: c_int = 10; - -pub const CTL_BUS_ISA: c_int = 1; - -pub const INOTIFY_MAX_USER_INSTANCES: c_int = 1; -pub const INOTIFY_MAX_USER_WATCHES: c_int = 2; -pub const INOTIFY_MAX_QUEUED_EVENTS: c_int = 3; - -pub const KERN_OSTYPE: c_int = 1; -pub const KERN_OSRELEASE: c_int = 2; -pub const KERN_OSREV: c_int = 3; -pub const KERN_VERSION: c_int = 4; -pub const KERN_SECUREMASK: c_int = 5; -pub const KERN_PROF: c_int = 6; -pub const KERN_NODENAME: c_int = 7; -pub const KERN_DOMAINNAME: c_int = 8; -pub const KERN_PANIC: c_int = 15; -pub const KERN_REALROOTDEV: c_int = 16; -pub const KERN_SPARC_REBOOT: c_int = 21; -pub const KERN_CTLALTDEL: c_int = 22; -pub const KERN_PRINTK: c_int = 23; -pub const KERN_NAMETRANS: c_int = 24; -pub const KERN_PPC_HTABRECLAIM: c_int = 25; -pub const KERN_PPC_ZEROPAGED: c_int = 26; -pub const KERN_PPC_POWERSAVE_NAP: c_int = 27; -pub const KERN_MODPROBE: c_int = 28; -pub const KERN_SG_BIG_BUFF: c_int = 29; -pub const KERN_ACCT: c_int = 30; -pub const KERN_PPC_L2CR: c_int = 31; -pub const KERN_RTSIGNR: c_int = 32; -pub const KERN_RTSIGMAX: c_int = 33; -pub const KERN_SHMMAX: c_int = 34; -pub const KERN_MSGMAX: c_int = 35; -pub const KERN_MSGMNB: c_int = 36; -pub const KERN_MSGPOOL: c_int = 37; -pub const KERN_SYSRQ: c_int = 38; -pub const KERN_MAX_THREADS: c_int = 39; -pub const KERN_RANDOM: c_int = 40; -pub const KERN_SHMALL: c_int = 41; -pub const KERN_MSGMNI: c_int = 42; -pub const KERN_SEM: c_int = 43; -pub const KERN_SPARC_STOP_A: c_int = 44; -pub const KERN_SHMMNI: c_int = 45; -pub const KERN_OVERFLOWUID: c_int = 46; -pub const KERN_OVERFLOWGID: c_int = 47; -pub const KERN_SHMPATH: c_int = 48; -pub const KERN_HOTPLUG: c_int = 49; -pub const KERN_IEEE_EMULATION_WARNINGS: c_int = 50; -pub const KERN_S390_USER_DEBUG_LOGGING: c_int = 51; -pub const KERN_CORE_USES_PID: c_int = 52; -pub const KERN_TAINTED: c_int = 53; -pub const KERN_CADPID: c_int = 54; -pub const KERN_PIDMAX: c_int = 55; -pub const KERN_CORE_PATTERN: c_int = 56; -pub const KERN_PANIC_ON_OOPS: c_int = 57; -pub const KERN_HPPA_PWRSW: c_int = 58; -pub const KERN_HPPA_UNALIGNED: c_int = 59; -pub const KERN_PRINTK_RATELIMIT: c_int = 60; -pub const KERN_PRINTK_RATELIMIT_BURST: c_int = 61; -pub const KERN_PTY: c_int = 62; -pub const KERN_NGROUPS_MAX: c_int = 63; -pub const KERN_SPARC_SCONS_PWROFF: c_int = 64; -pub const KERN_HZ_TIMER: c_int = 65; -pub const KERN_UNKNOWN_NMI_PANIC: c_int = 66; -pub const KERN_BOOTLOADER_TYPE: c_int = 67; -pub const KERN_RANDOMIZE: c_int = 68; -pub const KERN_SETUID_DUMPABLE: c_int = 69; -pub const KERN_SPIN_RETRY: c_int = 70; -pub const KERN_ACPI_VIDEO_FLAGS: c_int = 71; -pub const KERN_IA64_UNALIGNED: c_int = 72; -pub const KERN_COMPAT_LOG: c_int = 73; -pub const KERN_MAX_LOCK_DEPTH: c_int = 74; - -pub const VM_OVERCOMMIT_MEMORY: c_int = 5; -pub const VM_PAGE_CLUSTER: c_int = 10; -pub const VM_DIRTY_BACKGROUND: c_int = 11; -pub const VM_DIRTY_RATIO: c_int = 12; -pub const VM_DIRTY_WB_CS: c_int = 13; -pub const VM_DIRTY_EXPIRE_CS: c_int = 14; -pub const VM_NR_PDFLUSH_THREADS: c_int = 15; -pub const VM_OVERCOMMIT_RATIO: c_int = 16; -pub const VM_PAGEBUF: c_int = 17; -pub const VM_HUGETLB_PAGES: c_int = 18; -pub const VM_SWAPPINESS: c_int = 19; -pub const VM_LOWMEM_RESERVE_RATIO: c_int = 20; -pub const VM_MIN_FREE_KBYTES: c_int = 21; -pub const VM_MAX_MAP_COUNT: c_int = 22; -pub const VM_LAPTOP_MODE: c_int = 23; -pub const VM_BLOCK_DUMP: c_int = 24; -pub const VM_HUGETLB_GROUP: c_int = 25; -pub const VM_VFS_CACHE_PRESSURE: c_int = 26; -pub const VM_LEGACY_VA_LAYOUT: c_int = 27; -pub const VM_SWAP_TOKEN_TIMEOUT: c_int = 28; -pub const VM_DROP_PAGECACHE: c_int = 29; -pub const VM_PERCPU_PAGELIST_FRACTION: c_int = 30; -pub const VM_ZONE_RECLAIM_MODE: c_int = 31; -pub const VM_MIN_UNMAPPED: c_int = 32; -pub const VM_PANIC_ON_OOM: c_int = 33; -pub const VM_VDSO_ENABLED: c_int = 34; - -pub const NET_CORE: c_int = 1; -pub const NET_ETHER: c_int = 2; -pub const NET_802: c_int = 3; -pub const NET_UNIX: c_int = 4; -pub const NET_IPV4: c_int = 5; -pub const NET_IPX: c_int = 6; -pub const NET_ATALK: c_int = 7; -pub const NET_NETROM: c_int = 8; -pub const NET_AX25: c_int = 9; -pub const NET_BRIDGE: c_int = 10; -pub const NET_ROSE: c_int = 11; -pub const NET_IPV6: c_int = 12; -pub const NET_X25: c_int = 13; -pub const NET_TR: c_int = 14; -pub const NET_DECNET: c_int = 15; -pub const NET_ECONET: c_int = 16; -pub const NET_SCTP: c_int = 17; -pub const NET_LLC: c_int = 18; -pub const NET_NETFILTER: c_int = 19; -pub const NET_DCCP: c_int = 20; -pub const HUGETLB_FLAG_ENCODE_SHIFT: c_int = 26; -pub const MAP_HUGE_SHIFT: c_int = HUGETLB_FLAG_ENCODE_SHIFT; - -// include/linux/sched.h -pub const PF_VCPU: c_int = 0x00000001; -pub const PF_IDLE: c_int = 0x00000002; -pub const PF_EXITING: c_int = 0x00000004; -pub const PF_POSTCOREDUMP: c_int = 0x00000008; -pub const PF_IO_WORKER: c_int = 0x00000010; -pub const PF_WQ_WORKER: c_int = 0x00000020; -pub const PF_FORKNOEXEC: c_int = 0x00000040; -pub const PF_MCE_PROCESS: c_int = 0x00000080; -pub const PF_SUPERPRIV: c_int = 0x00000100; -pub const PF_DUMPCORE: c_int = 0x00000200; -pub const PF_SIGNALED: c_int = 0x00000400; -pub const PF_MEMALLOC: c_int = 0x00000800; -pub const PF_NPROC_EXCEEDED: c_int = 0x00001000; -pub const PF_USED_MATH: c_int = 0x00002000; -pub const PF_USER_WORKER: c_int = 0x00004000; -pub const PF_NOFREEZE: c_int = 0x00008000; - -pub const PF_KSWAPD: c_int = 0x00020000; -pub const PF_MEMALLOC_NOFS: c_int = 0x00040000; -pub const PF_MEMALLOC_NOIO: c_int = 0x00080000; -pub const PF_LOCAL_THROTTLE: c_int = 0x00100000; -pub const PF_KTHREAD: c_int = 0x00200000; -pub const PF_RANDOMIZE: c_int = 0x00400000; - -pub const PF_NO_SETAFFINITY: c_int = 0x04000000; -pub const PF_MCE_EARLY: c_int = 0x08000000; -pub const PF_MEMALLOC_PIN: c_int = 0x10000000; - -pub const PF_SUSPEND_TASK: c_int = 0x80000000; - -pub const KLOG_CLOSE: c_int = 0; -pub const KLOG_OPEN: c_int = 1; -pub const KLOG_READ: c_int = 2; -pub const KLOG_READ_ALL: c_int = 3; -pub const KLOG_READ_CLEAR: c_int = 4; -pub const KLOG_CLEAR: c_int = 5; -pub const KLOG_CONSOLE_OFF: c_int = 6; -pub const KLOG_CONSOLE_ON: c_int = 7; -pub const KLOG_CONSOLE_LEVEL: c_int = 8; -pub const KLOG_SIZE_UNREAD: c_int = 9; -pub const KLOG_SIZE_BUFFER: c_int = 10; - -// From NDK's linux/auxvec.h -pub const AT_NULL: c_ulong = 0; -pub const AT_IGNORE: c_ulong = 1; -pub const AT_EXECFD: c_ulong = 2; -pub const AT_PHDR: c_ulong = 3; -pub const AT_PHENT: c_ulong = 4; -pub const AT_PHNUM: c_ulong = 5; -pub const AT_PAGESZ: c_ulong = 6; -pub const AT_BASE: c_ulong = 7; -pub const AT_FLAGS: c_ulong = 8; -pub const AT_ENTRY: c_ulong = 9; -pub const AT_NOTELF: c_ulong = 10; -pub const AT_UID: c_ulong = 11; -pub const AT_EUID: c_ulong = 12; -pub const AT_GID: c_ulong = 13; -pub const AT_EGID: c_ulong = 14; -pub const AT_PLATFORM: c_ulong = 15; -pub const AT_HWCAP: c_ulong = 16; -pub const AT_CLKTCK: c_ulong = 17; -pub const AT_SECURE: c_ulong = 23; -pub const AT_BASE_PLATFORM: c_ulong = 24; -pub const AT_RANDOM: c_ulong = 25; -pub const AT_HWCAP2: c_ulong = 26; -pub const AT_RSEQ_FEATURE_SIZE: c_ulong = 27; -pub const AT_RSEQ_ALIGN: c_ulong = 28; -pub const AT_EXECFN: c_ulong = 31; -pub const AT_MINSIGSTKSZ: c_ulong = 51; - -// siginfo.h -pub const SI_DETHREAD: c_int = -7; -pub const TRAP_PERF: c_int = 6; - -// Most `*_SUPER_MAGIC` constants are defined at the `linux_like` level; the -// following are only available on newer Linux versions than the versions -// currently used in CI in some configurations, so we define them here. -cfg_if! { - if #[cfg(not(target_arch = "s390x"))] { - pub const XFS_SUPER_MAGIC: c_long = 0x58465342; - } else if #[cfg(target_arch = "s390x")] { - pub const XFS_SUPER_MAGIC: c_uint = 0x58465342; - } -} - -f! { - pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - let next = (cmsg as usize + super::CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr; - let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; - if (next.offset(1)) as usize > max { - core::ptr::null_mut::() - } else { - next as *mut cmsghdr - } - } - - pub fn CPU_ALLOC_SIZE(count: c_int) -> size_t { - let _dummy: cpu_set_t = mem::zeroed(); - let size_in_bits = 8 * size_of_val(&_dummy.__bits[0]); - ((count as size_t + size_in_bits - 1) / 8) as size_t - } - - pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { - for slot in cpuset.__bits.iter_mut() { - *slot = 0; - } - } - - pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in_bits = 8 * size_of_val(&cpuset.__bits[0]); // 32, 64 etc - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - cpuset.__bits[idx] |= 1 << offset; - () - } - - pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in_bits = 8 * size_of_val(&cpuset.__bits[0]); // 32, 64 etc - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - cpuset.__bits[idx] &= !(1 << offset); - () - } - - pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { - let size_in_bits = 8 * size_of_val(&cpuset.__bits[0]); - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - 0 != (cpuset.__bits[idx] & (1 << offset)) - } - - pub fn CPU_COUNT_S(size: usize, cpuset: &cpu_set_t) -> c_int { - let mut s: u32 = 0; - let size_of_mask = size_of_val(&cpuset.__bits[0]); - for i in cpuset.__bits[..(size / size_of_mask)].iter() { - s += i.count_ones(); - } - s as c_int - } - - pub fn CPU_COUNT(cpuset: &cpu_set_t) -> c_int { - CPU_COUNT_S(size_of::(), cpuset) - } - - pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { - set1.__bits == set2.__bits - } - - pub fn NLA_ALIGN(len: c_int) -> c_int { - return ((len) + NLA_ALIGNTO - 1) & !(NLA_ALIGNTO - 1); - } - - pub fn SO_EE_OFFENDER(ee: *const crate::sock_extended_err) -> *mut crate::sockaddr { - ee.offset(1) as *mut crate::sockaddr - } -} - -safe_f! { - pub const fn makedev(ma: c_uint, mi: c_uint) -> crate::dev_t { - let ma = ma as crate::dev_t; - let mi = mi as crate::dev_t; - ((ma & 0xfff) << 8) | (mi & 0xff) | ((mi & 0xfff00) << 12) - } - - pub const fn major(dev: crate::dev_t) -> c_int { - ((dev >> 8) & 0xfff) as c_int - } - - pub const fn minor(dev: crate::dev_t) -> c_int { - ((dev & 0xff) | ((dev >> 12) & 0xfff00)) as c_int - } -} - -extern "C" { - pub fn setgrent(); - pub fn endgrent(); - pub fn getgrent() -> *mut crate::group; - pub fn getrlimit64(resource: c_int, rlim: *mut rlimit64) -> c_int; - pub fn setrlimit64(resource: c_int, rlim: *const rlimit64) -> c_int; - pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; - pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; - pub fn prlimit( - pid: crate::pid_t, - resource: c_int, - new_limit: *const crate::rlimit, - old_limit: *mut crate::rlimit, - ) -> c_int; - pub fn prlimit64( - pid: crate::pid_t, - resource: c_int, - new_limit: *const crate::rlimit64, - old_limit: *mut crate::rlimit64, - ) -> c_int; - pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut crate::timezone) -> c_int; - pub fn mlock2(addr: *const c_void, len: size_t, flags: c_int) -> c_int; - pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn getnameinfo( - sa: *const crate::sockaddr, - salen: crate::socklen_t, - host: *mut c_char, - hostlen: size_t, - serv: *mut c_char, - servlen: size_t, - flags: c_int, - ) -> c_int; - pub fn preadv(fd: c_int, iov: *const crate::iovec, count: c_int, offset: off_t) -> ssize_t; - pub fn pwritev(fd: c_int, iov: *const crate::iovec, count: c_int, offset: off_t) -> ssize_t; - pub fn process_vm_readv( - pid: crate::pid_t, - local_iov: *const crate::iovec, - local_iov_count: c_ulong, - remote_iov: *const crate::iovec, - remote_iov_count: c_ulong, - flags: c_ulong, - ) -> ssize_t; - pub fn process_vm_writev( - pid: crate::pid_t, - local_iov: *const crate::iovec, - local_iov_count: c_ulong, - remote_iov: *const crate::iovec, - remote_iov_count: c_ulong, - flags: c_ulong, - ) -> ssize_t; - pub fn ptrace(request: c_int, ...) -> c_long; - pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; - pub fn setpriority(which: c_int, who: crate::id_t, prio: c_int) -> c_int; - pub fn __sched_cpualloc(count: size_t) -> *mut crate::cpu_set_t; - pub fn __sched_cpufree(set: *mut crate::cpu_set_t); - pub fn __sched_cpucount(setsize: size_t, set: *const cpu_set_t) -> c_int; - pub fn sched_getcpu() -> c_int; - pub fn mallinfo() -> crate::mallinfo; - // available from API 23 - pub fn malloc_info(options: c_int, stream: *mut crate::FILE) -> c_int; - - pub fn malloc_usable_size(ptr: *const c_void) -> size_t; - - pub fn utmpname(name: *const c_char) -> c_int; - pub fn setutent(); - pub fn getutent() -> *mut utmp; - - pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); - pub fn telldir(dirp: *mut crate::DIR) -> c_long; - pub fn fallocate(fd: c_int, mode: c_int, offset: off_t, len: off_t) -> c_int; - pub fn fallocate64(fd: c_int, mode: c_int, offset: off64_t, len: off64_t) -> c_int; - pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; - pub fn posix_fallocate64(fd: c_int, offset: off64_t, len: off64_t) -> c_int; - pub fn getxattr( - path: *const c_char, - name: *const c_char, - value: *mut c_void, - size: size_t, - ) -> ssize_t; - pub fn lgetxattr( - path: *const c_char, - name: *const c_char, - value: *mut c_void, - size: size_t, - ) -> ssize_t; - pub fn fgetxattr( - filedes: c_int, - name: *const c_char, - value: *mut c_void, - size: size_t, - ) -> ssize_t; - pub fn setxattr( - path: *const c_char, - name: *const c_char, - value: *const c_void, - size: size_t, - flags: c_int, - ) -> c_int; - pub fn lsetxattr( - path: *const c_char, - name: *const c_char, - value: *const c_void, - size: size_t, - flags: c_int, - ) -> c_int; - pub fn fsetxattr( - filedes: c_int, - name: *const c_char, - value: *const c_void, - size: size_t, - flags: c_int, - ) -> c_int; - pub fn listxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; - pub fn llistxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; - pub fn flistxattr(filedes: c_int, list: *mut c_char, size: size_t) -> ssize_t; - pub fn removexattr(path: *const c_char, name: *const c_char) -> c_int; - pub fn lremovexattr(path: *const c_char, name: *const c_char) -> c_int; - pub fn fremovexattr(filedes: c_int, name: *const c_char) -> c_int; - pub fn signalfd(fd: c_int, mask: *const crate::sigset_t, flags: c_int) -> c_int; - pub fn timerfd_create(clock: crate::clockid_t, flags: c_int) -> c_int; - pub fn timerfd_gettime(fd: c_int, current_value: *mut itimerspec) -> c_int; - pub fn timerfd_settime( - fd: c_int, - flags: c_int, - new_value: *const itimerspec, - old_value: *mut itimerspec, - ) -> c_int; - pub fn syscall(num: c_long, ...) -> c_long; - pub fn sched_getaffinity( - pid: crate::pid_t, - cpusetsize: size_t, - cpuset: *mut cpu_set_t, - ) -> c_int; - pub fn sched_setaffinity( - pid: crate::pid_t, - cpusetsize: size_t, - cpuset: *const cpu_set_t, - ) -> c_int; - pub fn epoll_create(size: c_int) -> c_int; - pub fn epoll_create1(flags: c_int) -> c_int; - pub fn epoll_wait( - epfd: c_int, - events: *mut crate::epoll_event, - maxevents: c_int, - timeout: c_int, - ) -> c_int; - pub fn epoll_ctl(epfd: c_int, op: c_int, fd: c_int, event: *mut crate::epoll_event) -> c_int; - pub fn pthread_getschedparam( - native: crate::pthread_t, - policy: *mut c_int, - param: *mut crate::sched_param, - ) -> c_int; - pub fn unshare(flags: c_int) -> c_int; - pub fn umount(target: *const c_char) -> c_int; - pub fn sched_get_priority_max(policy: c_int) -> c_int; - pub fn tee(fd_in: c_int, fd_out: c_int, len: size_t, flags: c_uint) -> ssize_t; - pub fn settimeofday(tv: *const crate::timeval, tz: *const crate::timezone) -> c_int; - pub fn splice( - fd_in: c_int, - off_in: *mut crate::loff_t, - fd_out: c_int, - off_out: *mut crate::loff_t, - len: size_t, - flags: c_uint, - ) -> ssize_t; - pub fn eventfd(init: c_uint, flags: c_int) -> c_int; - pub fn eventfd_read(fd: c_int, value: *mut eventfd_t) -> c_int; - pub fn eventfd_write(fd: c_int, value: eventfd_t) -> c_int; - pub fn sched_rr_get_interval(pid: crate::pid_t, tp: *mut crate::timespec) -> c_int; - pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; - pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; - pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; - pub fn setns(fd: c_int, nstype: c_int) -> c_int; - pub fn swapoff(puath: *const c_char) -> c_int; - pub fn vmsplice(fd: c_int, iov: *const crate::iovec, nr_segs: size_t, flags: c_uint) - -> ssize_t; - pub fn mount( - src: *const c_char, - target: *const c_char, - fstype: *const c_char, - flags: c_ulong, - data: *const c_void, - ) -> c_int; - pub fn personality(persona: c_uint) -> c_int; - pub fn prctl(option: c_int, ...) -> c_int; - pub fn sched_getparam(pid: crate::pid_t, param: *mut crate::sched_param) -> c_int; - pub fn ppoll( - fds: *mut crate::pollfd, - nfds: nfds_t, - timeout: *const crate::timespec, - sigmask: *const sigset_t, - ) -> c_int; - pub fn pthread_mutex_timedlock( - lock: *mut pthread_mutex_t, - abstime: *const crate::timespec, - ) -> c_int; - pub fn pthread_barrierattr_init(attr: *mut crate::pthread_barrierattr_t) -> c_int; - pub fn pthread_barrierattr_destroy(attr: *mut crate::pthread_barrierattr_t) -> c_int; - pub fn pthread_barrierattr_getpshared( - attr: *const crate::pthread_barrierattr_t, - shared: *mut c_int, - ) -> c_int; - pub fn pthread_barrierattr_setpshared( - attr: *mut crate::pthread_barrierattr_t, - shared: c_int, - ) -> c_int; - pub fn pthread_barrier_init( - barrier: *mut pthread_barrier_t, - attr: *const crate::pthread_barrierattr_t, - count: c_uint, - ) -> c_int; - pub fn pthread_barrier_destroy(barrier: *mut pthread_barrier_t) -> c_int; - pub fn pthread_barrier_wait(barrier: *mut pthread_barrier_t) -> c_int; - pub fn pthread_spin_init(lock: *mut crate::pthread_spinlock_t, pshared: c_int) -> c_int; - pub fn pthread_spin_destroy(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_spin_lock(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_spin_trylock(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_spin_unlock(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn clone( - cb: extern "C" fn(*mut c_void) -> c_int, - child_stack: *mut c_void, - flags: c_int, - arg: *mut c_void, - ... - ) -> c_int; - pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; - pub fn clock_nanosleep( - clk_id: crate::clockid_t, - flags: c_int, - rqtp: *const crate::timespec, - rmtp: *mut crate::timespec, - ) -> c_int; - pub fn pthread_attr_getguardsize( - attr: *const crate::pthread_attr_t, - guardsize: *mut size_t, - ) -> c_int; - pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; - pub fn pthread_attr_getinheritsched( - attr: *const crate::pthread_attr_t, - flag: *mut c_int, - ) -> c_int; - pub fn pthread_attr_setinheritsched(attr: *mut crate::pthread_attr_t, flag: c_int) -> c_int; - pub fn sethostname(name: *const c_char, len: size_t) -> c_int; - pub fn sched_get_priority_min(policy: c_int) -> c_int; - pub fn pthread_condattr_getpshared( - attr: *const pthread_condattr_t, - pshared: *mut c_int, - ) -> c_int; - pub fn sysinfo(info: *mut crate::sysinfo) -> c_int; - pub fn umount2(target: *const c_char, flags: c_int) -> c_int; - pub fn pthread_setschedparam( - native: crate::pthread_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - pub fn swapon(path: *const c_char, swapflags: c_int) -> c_int; - pub fn sched_setscheduler( - pid: crate::pid_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - pub fn sendfile(out_fd: c_int, in_fd: c_int, offset: *mut off_t, count: size_t) -> ssize_t; - pub fn sendfile64(out_fd: c_int, in_fd: c_int, offset: *mut off64_t, count: size_t) -> ssize_t; - pub fn setfsgid(gid: crate::gid_t) -> c_int; - pub fn setfsuid(uid: crate::uid_t) -> c_int; - pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; - pub fn getgrgid_r( - gid: crate::gid_t, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; - pub fn sem_close(sem: *mut sem_t) -> c_int; - pub fn getgrnam_r( - name: *const c_char, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; - pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; - pub fn getgrnam(name: *const c_char) -> *mut crate::group; - pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; - pub fn sem_unlink(name: *const c_char) -> c_int; - pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; - pub fn getpwnam_r( - name: *const c_char, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - pub fn getpwuid_r( - uid: crate::uid_t, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - pub fn sigtimedwait( - set: *const sigset_t, - info: *mut siginfo_t, - timeout: *const crate::timespec, - ) -> c_int; - pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; - pub fn pthread_atfork( - prepare: Option, - parent: Option, - child: Option, - ) -> c_int; - pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; - pub fn getgrouplist( - user: *const c_char, - group: crate::gid_t, - groups: *mut crate::gid_t, - ngroups: *mut c_int, - ) -> c_int; - pub fn initgroups(user: *const c_char, group: crate::gid_t) -> c_int; - pub fn pthread_mutexattr_getpshared( - attr: *const pthread_mutexattr_t, - pshared: *mut c_int, - ) -> c_int; - pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; - pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; - pub fn pthread_create( - native: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - f: extern "C" fn(*mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - pub fn __errno() -> *mut c_int; - pub fn inotify_rm_watch(fd: c_int, wd: u32) -> c_int; - pub fn inotify_init() -> c_int; - pub fn inotify_init1(flags: c_int) -> c_int; - pub fn inotify_add_watch(fd: c_int, path: *const c_char, mask: u32) -> c_int; - - pub fn regcomp(preg: *mut crate::regex_t, pattern: *const c_char, cflags: c_int) -> c_int; - - pub fn regexec( - preg: *const crate::regex_t, - input: *const c_char, - nmatch: size_t, - pmatch: *mut regmatch_t, - eflags: c_int, - ) -> c_int; - - pub fn regerror( - errcode: c_int, - preg: *const crate::regex_t, - errbuf: *mut c_char, - errbuf_size: size_t, - ) -> size_t; - - pub fn regfree(preg: *mut crate::regex_t); - - pub fn android_set_abort_message(msg: *const c_char); - - pub fn gettid() -> crate::pid_t; - - pub fn getauxval(type_: c_ulong) -> c_ulong; - - /// Only available in API Version 28+ - pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; - pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; - - pub fn pthread_setname_np(thread: crate::pthread_t, name: *const c_char) -> c_int; - - pub fn __system_property_set(__name: *const c_char, __value: *const c_char) -> c_int; - pub fn __system_property_get(__name: *const c_char, __value: *mut c_char) -> c_int; - pub fn __system_property_find(__name: *const c_char) -> *const prop_info; - pub fn __system_property_find_nth(__n: c_uint) -> *const prop_info; - pub fn __system_property_foreach( - __callback: unsafe extern "C" fn(__pi: *const prop_info, __cookie: *mut c_void), - __cookie: *mut c_void, - ) -> c_int; - - // #include - /// Only available in API Version 21+ - pub fn dl_iterate_phdr( - callback: Option< - unsafe extern "C" fn(info: *mut dl_phdr_info, size: usize, data: *mut c_void) -> c_int, - >, - data: *mut c_void, - ) -> c_int; - - pub fn arc4random() -> u32; - pub fn arc4random_uniform(__upper_bound: u32) -> u32; - pub fn arc4random_buf(__buf: *mut c_void, __n: size_t); - - pub fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void; - - pub fn pthread_getcpuclockid(thread: crate::pthread_t, clk_id: *mut crate::clockid_t) -> c_int; - - pub fn dirname(path: *const c_char) -> *mut c_char; - pub fn basename(path: *const c_char) -> *mut c_char; - pub fn getopt_long( - argc: c_int, - argv: *const *mut c_char, - optstring: *const c_char, - longopts: *const option, - longindex: *mut c_int, - ) -> c_int; - - pub fn sync(); - pub fn syncfs(fd: c_int) -> c_int; - - pub fn memmem( - haystack: *const c_void, - haystacklen: size_t, - needle: *const c_void, - needlelen: size_t, - ) -> *mut c_void; - pub fn fread_unlocked( - buf: *mut c_void, - size: size_t, - nobj: size_t, - stream: *mut crate::FILE, - ) -> size_t; - pub fn fwrite_unlocked( - buf: *const c_void, - size: size_t, - nobj: size_t, - stream: *mut crate::FILE, - ) -> size_t; - pub fn fflush_unlocked(stream: *mut crate::FILE) -> c_int; - pub fn fgets_unlocked(buf: *mut c_char, size: c_int, stream: *mut crate::FILE) -> *mut c_char; - - pub fn klogctl(syslog_type: c_int, bufp: *mut c_char, len: c_int) -> c_int; - - pub fn memfd_create(name: *const c_char, flags: c_uint) -> c_int; - pub fn renameat2( - olddirfd: c_int, - oldpath: *const c_char, - newdirfd: c_int, - newpath: *const c_char, - flags: c_uint, - ) -> c_int; - - pub fn if_nameindex() -> *mut if_nameindex; - pub fn if_freenameindex(ptr: *mut if_nameindex); -} - -cfg_if! { - if #[cfg(target_pointer_width = "32")] { - mod b32; - pub use self::b32::*; - } else if #[cfg(target_pointer_width = "64")] { - mod b64; - pub use self::b64::*; - } else { - // Unknown target_pointer_width - } -} - -impl siginfo_t { - pub unsafe fn si_addr(&self) -> *mut c_void { - #[repr(C)] - struct siginfo_sigfault { - _si_signo: c_int, - _si_errno: c_int, - _si_code: c_int, - si_addr: *mut c_void, - } - (*(self as *const siginfo_t as *const siginfo_sigfault)).si_addr - } - - pub unsafe fn si_value(&self) -> crate::sigval { - #[repr(C)] - struct siginfo_timer { - _si_signo: c_int, - _si_errno: c_int, - _si_code: c_int, - _si_tid: c_int, - _si_overrun: c_int, - si_sigval: crate::sigval, - } - (*(self as *const siginfo_t as *const siginfo_timer)).si_sigval - } -} - -// Internal, for casts to access union fields -#[repr(C)] -struct sifields_sigchld { - si_pid: crate::pid_t, - si_uid: crate::uid_t, - si_status: c_int, - si_utime: c_long, - si_stime: c_long, -} -impl Copy for sifields_sigchld {} -impl Clone for sifields_sigchld { - fn clone(&self) -> sifields_sigchld { - *self - } -} - -// Internal, for casts to access union fields -#[repr(C)] -union sifields { - _align_pointer: *mut c_void, - sigchld: sifields_sigchld, -} - -// Internal, for casts to access union fields. Note that some variants -// of sifields start with a pointer, which makes the alignment of -// sifields vary on 32-bit and 64-bit architectures. -#[repr(C)] -struct siginfo_f { - _siginfo_base: [c_int; 3], - sifields: sifields, -} - -impl siginfo_t { - unsafe fn sifields(&self) -> &sifields { - &(*(self as *const siginfo_t as *const siginfo_f)).sifields - } - - pub unsafe fn si_pid(&self) -> crate::pid_t { - self.sifields().sigchld.si_pid - } - - pub unsafe fn si_uid(&self) -> crate::uid_t { - self.sifields().sigchld.si_uid - } - - pub unsafe fn si_status(&self) -> c_int { - self.sifields().sigchld.si_status - } - - pub unsafe fn si_utime(&self) -> c_long { - self.sifields().sigchld.si_utime - } - - pub unsafe fn si_stime(&self) -> c_long { - self.sifields().sigchld.si_stime - } -} diff --git a/vendor/libc/src/unix/linux_like/emscripten/lfs64.rs b/vendor/libc/src/unix/linux_like/emscripten/lfs64.rs deleted file mode 100644 index 06be875446bb6a..00000000000000 --- a/vendor/libc/src/unix/linux_like/emscripten/lfs64.rs +++ /dev/null @@ -1,211 +0,0 @@ -use crate::off64_t; -use crate::prelude::*; - -// In-sync with ../linux/musl/lfs64.rs except for fallocate64, prlimit64 and sendfile64 - -#[inline] -pub unsafe extern "C" fn creat64(path: *const c_char, mode: crate::mode_t) -> c_int { - crate::creat(path, mode) -} - -#[inline] -pub unsafe extern "C" fn fgetpos64(stream: *mut crate::FILE, pos: *mut crate::fpos64_t) -> c_int { - crate::fgetpos(stream, pos as *mut _) -} - -#[inline] -pub unsafe extern "C" fn fopen64(pathname: *const c_char, mode: *const c_char) -> *mut crate::FILE { - crate::fopen(pathname, mode) -} - -#[inline] -pub unsafe extern "C" fn freopen64( - pathname: *const c_char, - mode: *const c_char, - stream: *mut crate::FILE, -) -> *mut crate::FILE { - crate::freopen(pathname, mode, stream) -} - -#[inline] -pub unsafe extern "C" fn fseeko64( - stream: *mut crate::FILE, - offset: off64_t, - whence: c_int, -) -> c_int { - crate::fseeko(stream, offset, whence) -} - -#[inline] -pub unsafe extern "C" fn fsetpos64(stream: *mut crate::FILE, pos: *const crate::fpos64_t) -> c_int { - crate::fsetpos(stream, pos as *mut _) -} - -#[inline] -pub unsafe extern "C" fn fstat64(fildes: c_int, buf: *mut crate::stat64) -> c_int { - crate::fstat(fildes, buf as *mut _) -} - -#[inline] -pub unsafe extern "C" fn fstatat64( - fd: c_int, - path: *const c_char, - buf: *mut crate::stat64, - flag: c_int, -) -> c_int { - crate::fstatat(fd, path, buf as *mut _, flag) -} - -#[inline] -pub unsafe extern "C" fn fstatfs64(fd: c_int, buf: *mut crate::statfs64) -> c_int { - crate::fstatfs(fd, buf as *mut _) -} - -#[inline] -pub unsafe extern "C" fn fstatvfs64(fd: c_int, buf: *mut crate::statvfs64) -> c_int { - crate::fstatvfs(fd, buf as *mut _) -} - -#[inline] -pub unsafe extern "C" fn ftello64(stream: *mut crate::FILE) -> off64_t { - crate::ftello(stream) -} - -#[inline] -pub unsafe extern "C" fn ftruncate64(fd: c_int, length: off64_t) -> c_int { - crate::ftruncate(fd, length) -} - -#[inline] -pub unsafe extern "C" fn getrlimit64(resource: c_int, rlim: *mut crate::rlimit64) -> c_int { - crate::getrlimit(resource, rlim as *mut _) -} - -#[inline] -pub unsafe extern "C" fn lseek64(fd: c_int, offset: off64_t, whence: c_int) -> off64_t { - crate::lseek(fd, offset, whence) -} - -#[inline] -pub unsafe extern "C" fn lstat64(path: *const c_char, buf: *mut crate::stat64) -> c_int { - crate::lstat(path, buf as *mut _) -} - -#[inline] -pub unsafe extern "C" fn mmap64( - addr: *mut c_void, - length: size_t, - prot: c_int, - flags: c_int, - fd: c_int, - offset: off64_t, -) -> *mut c_void { - crate::mmap(addr, length, prot, flags, fd, offset) -} - -// These functions are variadic in the C ABI since the `mode` argument is "optional". Variadic -// `extern "C"` functions are unstable in Rust so we cannot write a shim function for these -// entrypoints. See https://github.com/rust-lang/rust/issues/44930. -// -// These aliases are mostly fine though, neither function takes a LFS64-namespaced type as an -// argument, nor do their names clash with any declared types. -pub use crate::{open as open64, openat as openat64}; - -#[inline] -pub unsafe extern "C" fn posix_fadvise64( - fd: c_int, - offset: off64_t, - len: off64_t, - advice: c_int, -) -> c_int { - crate::posix_fadvise(fd, offset, len, advice) -} - -#[inline] -pub unsafe extern "C" fn posix_fallocate64(fd: c_int, offset: off64_t, len: off64_t) -> c_int { - crate::posix_fallocate(fd, offset, len) -} - -#[inline] -pub unsafe extern "C" fn pread64( - fd: c_int, - buf: *mut c_void, - count: size_t, - offset: off64_t, -) -> ssize_t { - crate::pread(fd, buf, count, offset) -} - -#[inline] -pub unsafe extern "C" fn preadv64( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: off64_t, -) -> ssize_t { - crate::preadv(fd, iov, iovcnt, offset) -} - -#[inline] -pub unsafe extern "C" fn pwrite64( - fd: c_int, - buf: *const c_void, - count: size_t, - offset: off64_t, -) -> ssize_t { - crate::pwrite(fd, buf, count, offset) -} - -#[inline] -pub unsafe extern "C" fn pwritev64( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: off64_t, -) -> ssize_t { - crate::pwritev(fd, iov, iovcnt, offset) -} - -#[inline] -pub unsafe extern "C" fn readdir64(dirp: *mut crate::DIR) -> *mut crate::dirent64 { - crate::readdir(dirp) as *mut _ -} - -#[inline] -pub unsafe extern "C" fn readdir64_r( - dirp: *mut crate::DIR, - entry: *mut crate::dirent64, - result: *mut *mut crate::dirent64, -) -> c_int { - crate::readdir_r(dirp, entry as *mut _, result as *mut _) -} - -#[inline] -pub unsafe extern "C" fn setrlimit64(resource: c_int, rlim: *const crate::rlimit64) -> c_int { - crate::setrlimit(resource, rlim as *mut _) -} - -#[inline] -pub unsafe extern "C" fn stat64(pathname: *const c_char, statbuf: *mut crate::stat64) -> c_int { - crate::stat(pathname, statbuf as *mut _) -} - -#[inline] -pub unsafe extern "C" fn statfs64(pathname: *const c_char, buf: *mut crate::statfs64) -> c_int { - crate::statfs(pathname, buf as *mut _) -} - -#[inline] -pub unsafe extern "C" fn statvfs64(path: *const c_char, buf: *mut crate::statvfs64) -> c_int { - crate::statvfs(path, buf as *mut _) -} - -#[inline] -pub unsafe extern "C" fn tmpfile64() -> *mut crate::FILE { - crate::tmpfile() -} - -#[inline] -pub unsafe extern "C" fn truncate64(path: *const c_char, length: off64_t) -> c_int { - crate::truncate(path, length) -} diff --git a/vendor/libc/src/unix/linux_like/emscripten/mod.rs b/vendor/libc/src/unix/linux_like/emscripten/mod.rs deleted file mode 100644 index 417e3e593bc5eb..00000000000000 --- a/vendor/libc/src/unix/linux_like/emscripten/mod.rs +++ /dev/null @@ -1,1589 +0,0 @@ -use crate::prelude::*; - -pub type wchar_t = i32; -pub type useconds_t = u32; -pub type dev_t = u32; -pub type socklen_t = u32; -pub type pthread_t = c_ulong; -pub type mode_t = u32; -pub type shmatt_t = c_ulong; -pub type mqd_t = c_int; -pub type msgqnum_t = c_ulong; -pub type msglen_t = c_ulong; -pub type nfds_t = c_ulong; -pub type nl_item = c_int; -pub type idtype_t = c_uint; -pub type loff_t = i64; -pub type pthread_key_t = c_uint; - -pub type clock_t = c_long; -pub type time_t = i64; -pub type suseconds_t = c_long; -pub type ino_t = u64; -pub type off_t = i64; -pub type blkcnt_t = i32; - -pub type blksize_t = c_long; -pub type fsblkcnt_t = u32; -pub type fsfilcnt_t = u32; -pub type rlim_t = u64; -pub type nlink_t = u32; - -pub type ino64_t = crate::ino_t; -pub type off64_t = off_t; -pub type blkcnt64_t = crate::blkcnt_t; -pub type rlim64_t = crate::rlim_t; - -pub type rlimit64 = crate::rlimit; -pub type flock64 = crate::flock; -pub type stat64 = crate::stat; -pub type statfs64 = crate::statfs; -pub type statvfs64 = crate::statvfs; -pub type dirent64 = crate::dirent; - -#[derive(Debug)] -pub enum fpos64_t {} // FIXME(emscripten): fill this out with a struct -impl Copy for fpos64_t {} -impl Clone for fpos64_t { - fn clone(&self) -> fpos64_t { - *self - } -} - -s! { - pub struct glob_t { - pub gl_pathc: size_t, - pub gl_pathv: *mut *mut c_char, - pub gl_offs: size_t, - pub gl_flags: c_int, - - __unused1: *mut c_void, - __unused2: *mut c_void, - __unused3: *mut c_void, - __unused4: *mut c_void, - __unused5: *mut c_void, - } - - pub struct passwd { - pub pw_name: *mut c_char, - pub pw_passwd: *mut c_char, - pub pw_uid: crate::uid_t, - pub pw_gid: crate::gid_t, - pub pw_gecos: *mut c_char, - pub pw_dir: *mut c_char, - pub pw_shell: *mut c_char, - } - - pub struct spwd { - pub sp_namp: *mut c_char, - pub sp_pwdp: *mut c_char, - pub sp_lstchg: c_long, - pub sp_min: c_long, - pub sp_max: c_long, - pub sp_warn: c_long, - pub sp_inact: c_long, - pub sp_expire: c_long, - pub sp_flag: c_ulong, - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - __f_unused: c_int, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct signalfd_siginfo { - pub ssi_signo: u32, - pub ssi_errno: i32, - pub ssi_code: i32, - pub ssi_pid: u32, - pub ssi_uid: u32, - pub ssi_fd: i32, - pub ssi_tid: u32, - pub ssi_band: u32, - pub ssi_overrun: u32, - pub ssi_trapno: u32, - pub ssi_status: i32, - pub ssi_int: i32, - pub ssi_ptr: u64, - pub ssi_utime: u64, - pub ssi_stime: u64, - pub ssi_addr: u64, - pub ssi_addr_lsb: u16, - _pad2: u16, - pub ssi_syscall: i32, - pub ssi_call_addr: u64, - pub ssi_arch: u32, - _pad: [u8; 28], - } - - pub struct fsid_t { - __val: [c_int; 2], - } - - pub struct cpu_set_t { - bits: [u32; 32], - } - - pub struct if_nameindex { - pub if_index: c_uint, - pub if_name: *mut c_char, - } - - // System V IPC - pub struct msginfo { - pub msgpool: c_int, - pub msgmap: c_int, - pub msgmax: c_int, - pub msgmnb: c_int, - pub msgmni: c_int, - pub msgssz: c_int, - pub msgtql: c_int, - pub msgseg: c_ushort, - } - - pub struct sembuf { - pub sem_num: c_ushort, - pub sem_op: c_short, - pub sem_flg: c_short, - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct ipc_perm { - pub __ipc_perm_key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: mode_t, - pub __seq: c_int, - __unused1: c_long, - __unused2: c_long, - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; crate::NCCS], - pub __c_ispeed: crate::speed_t, - pub __c_ospeed: crate::speed_t, - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct pthread_attr_t { - __size: [u32; 11], - } - - pub struct sigset_t { - __val: [c_ulong; 32], - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: c_int, - pub msg_control: *mut c_void, - pub msg_controllen: crate::socklen_t, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - pub cmsg_len: crate::socklen_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct sem_t { - __val: [c_int; 4], - } - pub struct stat { - pub st_dev: crate::dev_t, - #[cfg(emscripten_old_stat_abi)] - __st_dev_padding: c_int, - #[cfg(emscripten_old_stat_abi)] - __st_ino_truncated: c_long, - pub st_mode: mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - #[cfg(emscripten_old_stat_abi)] - __st_rdev_padding: c_int, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_ino: crate::ino_t, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: c_ulong, - __pad1: c_ulong, - __pad2: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - pub msg_rtime: crate::time_t, - pub msg_ctime: crate::time_t, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __pad1: c_ulong, - __pad2: c_ulong, - } - - pub struct statfs { - pub f_type: c_ulong, - pub f_bsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_ulong, - pub f_frsize: c_ulong, - pub f_flags: c_ulong, - pub f_spare: [c_ulong; 4], - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - pub _pad: [c_int; 29], - _align: [usize; 0], - } - - pub struct arpd_request { - pub req: c_ushort, - pub ip: u32, - pub dev: c_ulong, - pub stamp: c_ulong, - pub updated: c_ulong, - pub ha: [c_uchar; crate::MAX_ADDR_LEN], - } - - #[repr(align(4))] - pub struct pthread_mutex_t { - size: [u8; crate::__SIZEOF_PTHREAD_MUTEX_T], - } - - #[repr(align(4))] - pub struct pthread_rwlock_t { - size: [u8; crate::__SIZEOF_PTHREAD_RWLOCK_T], - } - - #[repr(align(4))] - pub struct pthread_mutexattr_t { - size: [u8; crate::__SIZEOF_PTHREAD_MUTEXATTR_T], - } - - #[repr(align(4))] - pub struct pthread_rwlockattr_t { - size: [u8; crate::__SIZEOF_PTHREAD_RWLOCKATTR_T], - } - - #[repr(align(4))] - pub struct pthread_condattr_t { - size: [u8; crate::__SIZEOF_PTHREAD_CONDATTR_T], - } -} - -s_no_extra_traits! { - pub struct dirent { - pub d_ino: crate::ino_t, - pub d_off: off_t, - pub d_reclen: c_ushort, - pub d_type: c_uchar, - pub d_name: [c_char; 256], - } - - pub struct sysinfo { - pub uptime: c_ulong, - pub loads: [c_ulong; 3], - pub totalram: c_ulong, - pub freeram: c_ulong, - pub sharedram: c_ulong, - pub bufferram: c_ulong, - pub totalswap: c_ulong, - pub freeswap: c_ulong, - pub procs: c_ushort, - pub pad: c_ushort, - pub totalhigh: c_ulong, - pub freehigh: c_ulong, - pub mem_unit: c_uint, - pub __reserved: [c_char; 256], - } - - pub struct mq_attr { - pub mq_flags: c_long, - pub mq_maxmsg: c_long, - pub mq_msgsize: c_long, - pub mq_curmsgs: c_long, - pad: [c_long; 4], - } - - #[cfg_attr(target_pointer_width = "32", repr(align(4)))] - #[cfg_attr(target_pointer_width = "64", repr(align(8)))] - pub struct pthread_cond_t { - size: [u8; crate::__SIZEOF_PTHREAD_COND_T], - } - - #[repr(align(8))] - pub struct max_align_t { - priv_: [f64; 3], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_ino == other.d_ino - && self.d_off == other.d_off - && self.d_reclen == other.d_reclen - && self.d_type == other.d_type - && self - .d_name - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for dirent {} - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_ino.hash(state); - self.d_off.hash(state); - self.d_reclen.hash(state); - self.d_type.hash(state); - self.d_name.hash(state); - } - } - - impl PartialEq for sysinfo { - fn eq(&self, other: &sysinfo) -> bool { - self.uptime == other.uptime - && self.loads == other.loads - && self.totalram == other.totalram - && self.freeram == other.freeram - && self.sharedram == other.sharedram - && self.bufferram == other.bufferram - && self.totalswap == other.totalswap - && self.freeswap == other.freeswap - && self.procs == other.procs - && self.pad == other.pad - && self.totalhigh == other.totalhigh - && self.freehigh == other.freehigh - && self.mem_unit == other.mem_unit - && self - .__reserved - .iter() - .zip(other.__reserved.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for sysinfo {} - impl hash::Hash for sysinfo { - fn hash(&self, state: &mut H) { - self.uptime.hash(state); - self.loads.hash(state); - self.totalram.hash(state); - self.freeram.hash(state); - self.sharedram.hash(state); - self.bufferram.hash(state); - self.totalswap.hash(state); - self.freeswap.hash(state); - self.procs.hash(state); - self.pad.hash(state); - self.totalhigh.hash(state); - self.freehigh.hash(state); - self.mem_unit.hash(state); - self.__reserved.hash(state); - } - } - - impl PartialEq for mq_attr { - fn eq(&self, other: &mq_attr) -> bool { - self.mq_flags == other.mq_flags - && self.mq_maxmsg == other.mq_maxmsg - && self.mq_msgsize == other.mq_msgsize - && self.mq_curmsgs == other.mq_curmsgs - } - } - impl Eq for mq_attr {} - impl hash::Hash for mq_attr { - fn hash(&self, state: &mut H) { - self.mq_flags.hash(state); - self.mq_maxmsg.hash(state); - self.mq_msgsize.hash(state); - self.mq_curmsgs.hash(state); - } - } - - impl PartialEq for pthread_cond_t { - fn eq(&self, other: &pthread_cond_t) -> bool { - self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) - } - } - impl Eq for pthread_cond_t {} - impl hash::Hash for pthread_cond_t { - fn hash(&self, state: &mut H) { - self.size.hash(state); - } - } - } -} - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MS_NOUSER: c_ulong = 0x80000000; -pub const MS_RMT_MASK: c_ulong = 0x02800051; - -pub const ABDAY_1: crate::nl_item = 0x20000; -pub const ABDAY_2: crate::nl_item = 0x20001; -pub const ABDAY_3: crate::nl_item = 0x20002; -pub const ABDAY_4: crate::nl_item = 0x20003; -pub const ABDAY_5: crate::nl_item = 0x20004; -pub const ABDAY_6: crate::nl_item = 0x20005; -pub const ABDAY_7: crate::nl_item = 0x20006; - -pub const DAY_1: crate::nl_item = 0x20007; -pub const DAY_2: crate::nl_item = 0x20008; -pub const DAY_3: crate::nl_item = 0x20009; -pub const DAY_4: crate::nl_item = 0x2000A; -pub const DAY_5: crate::nl_item = 0x2000B; -pub const DAY_6: crate::nl_item = 0x2000C; -pub const DAY_7: crate::nl_item = 0x2000D; - -pub const ABMON_1: crate::nl_item = 0x2000E; -pub const ABMON_2: crate::nl_item = 0x2000F; -pub const ABMON_3: crate::nl_item = 0x20010; -pub const ABMON_4: crate::nl_item = 0x20011; -pub const ABMON_5: crate::nl_item = 0x20012; -pub const ABMON_6: crate::nl_item = 0x20013; -pub const ABMON_7: crate::nl_item = 0x20014; -pub const ABMON_8: crate::nl_item = 0x20015; -pub const ABMON_9: crate::nl_item = 0x20016; -pub const ABMON_10: crate::nl_item = 0x20017; -pub const ABMON_11: crate::nl_item = 0x20018; -pub const ABMON_12: crate::nl_item = 0x20019; - -pub const MON_1: crate::nl_item = 0x2001A; -pub const MON_2: crate::nl_item = 0x2001B; -pub const MON_3: crate::nl_item = 0x2001C; -pub const MON_4: crate::nl_item = 0x2001D; -pub const MON_5: crate::nl_item = 0x2001E; -pub const MON_6: crate::nl_item = 0x2001F; -pub const MON_7: crate::nl_item = 0x20020; -pub const MON_8: crate::nl_item = 0x20021; -pub const MON_9: crate::nl_item = 0x20022; -pub const MON_10: crate::nl_item = 0x20023; -pub const MON_11: crate::nl_item = 0x20024; -pub const MON_12: crate::nl_item = 0x20025; - -pub const AM_STR: crate::nl_item = 0x20026; -pub const PM_STR: crate::nl_item = 0x20027; - -pub const D_T_FMT: crate::nl_item = 0x20028; -pub const D_FMT: crate::nl_item = 0x20029; -pub const T_FMT: crate::nl_item = 0x2002A; -pub const T_FMT_AMPM: crate::nl_item = 0x2002B; - -pub const ERA: crate::nl_item = 0x2002C; -pub const ERA_D_FMT: crate::nl_item = 0x2002E; -pub const ALT_DIGITS: crate::nl_item = 0x2002F; -pub const ERA_D_T_FMT: crate::nl_item = 0x20030; -pub const ERA_T_FMT: crate::nl_item = 0x20031; - -pub const CODESET: crate::nl_item = 14; - -pub const CRNCYSTR: crate::nl_item = 0x4000F; - -pub const RUSAGE_THREAD: c_int = 1; -pub const RUSAGE_CHILDREN: c_int = -1; - -pub const RADIXCHAR: crate::nl_item = 0x10000; -pub const THOUSEP: crate::nl_item = 0x10001; - -pub const YESEXPR: crate::nl_item = 0x50000; -pub const NOEXPR: crate::nl_item = 0x50001; -pub const YESSTR: crate::nl_item = 0x50002; -pub const NOSTR: crate::nl_item = 0x50003; - -pub const FILENAME_MAX: c_uint = 4096; -pub const L_tmpnam: c_uint = 20; -pub const _PC_LINK_MAX: c_int = 0; -pub const _PC_MAX_CANON: c_int = 1; -pub const _PC_MAX_INPUT: c_int = 2; -pub const _PC_NAME_MAX: c_int = 3; -pub const _PC_PATH_MAX: c_int = 4; -pub const _PC_PIPE_BUF: c_int = 5; -pub const _PC_CHOWN_RESTRICTED: c_int = 6; -pub const _PC_NO_TRUNC: c_int = 7; -pub const _PC_VDISABLE: c_int = 8; -pub const _PC_SYNC_IO: c_int = 9; -pub const _PC_ASYNC_IO: c_int = 10; -pub const _PC_PRIO_IO: c_int = 11; -pub const _PC_SOCK_MAXBUF: c_int = 12; -pub const _PC_FILESIZEBITS: c_int = 13; -pub const _PC_REC_INCR_XFER_SIZE: c_int = 14; -pub const _PC_REC_MAX_XFER_SIZE: c_int = 15; -pub const _PC_REC_MIN_XFER_SIZE: c_int = 16; -pub const _PC_REC_XFER_ALIGN: c_int = 17; -pub const _PC_ALLOC_SIZE_MIN: c_int = 18; -pub const _PC_SYMLINK_MAX: c_int = 19; -pub const _PC_2_SYMLINKS: c_int = 20; - -pub const _SC_ARG_MAX: c_int = 0; -pub const _SC_CHILD_MAX: c_int = 1; -pub const _SC_CLK_TCK: c_int = 2; -pub const _SC_NGROUPS_MAX: c_int = 3; -pub const _SC_OPEN_MAX: c_int = 4; -pub const _SC_STREAM_MAX: c_int = 5; -pub const _SC_TZNAME_MAX: c_int = 6; -pub const _SC_JOB_CONTROL: c_int = 7; -pub const _SC_SAVED_IDS: c_int = 8; -pub const _SC_REALTIME_SIGNALS: c_int = 9; -pub const _SC_PRIORITY_SCHEDULING: c_int = 10; -pub const _SC_TIMERS: c_int = 11; -pub const _SC_ASYNCHRONOUS_IO: c_int = 12; -pub const _SC_PRIORITIZED_IO: c_int = 13; -pub const _SC_SYNCHRONIZED_IO: c_int = 14; -pub const _SC_FSYNC: c_int = 15; -pub const _SC_MAPPED_FILES: c_int = 16; -pub const _SC_MEMLOCK: c_int = 17; -pub const _SC_MEMLOCK_RANGE: c_int = 18; -pub const _SC_MEMORY_PROTECTION: c_int = 19; -pub const _SC_MESSAGE_PASSING: c_int = 20; -pub const _SC_SEMAPHORES: c_int = 21; -pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 22; -pub const _SC_AIO_LISTIO_MAX: c_int = 23; -pub const _SC_AIO_MAX: c_int = 24; -pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 25; -pub const _SC_DELAYTIMER_MAX: c_int = 26; -pub const _SC_MQ_OPEN_MAX: c_int = 27; -pub const _SC_MQ_PRIO_MAX: c_int = 28; -pub const _SC_VERSION: c_int = 29; -pub const _SC_PAGESIZE: c_int = 30; -pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; -pub const _SC_RTSIG_MAX: c_int = 31; -pub const _SC_SEM_NSEMS_MAX: c_int = 32; -pub const _SC_SEM_VALUE_MAX: c_int = 33; -pub const _SC_SIGQUEUE_MAX: c_int = 34; -pub const _SC_TIMER_MAX: c_int = 35; -pub const _SC_BC_BASE_MAX: c_int = 36; -pub const _SC_BC_DIM_MAX: c_int = 37; -pub const _SC_BC_SCALE_MAX: c_int = 38; -pub const _SC_BC_STRING_MAX: c_int = 39; -pub const _SC_COLL_WEIGHTS_MAX: c_int = 40; -pub const _SC_EXPR_NEST_MAX: c_int = 42; -pub const _SC_LINE_MAX: c_int = 43; -pub const _SC_RE_DUP_MAX: c_int = 44; -pub const _SC_2_VERSION: c_int = 46; -pub const _SC_2_C_BIND: c_int = 47; -pub const _SC_2_C_DEV: c_int = 48; -pub const _SC_2_FORT_DEV: c_int = 49; -pub const _SC_2_FORT_RUN: c_int = 50; -pub const _SC_2_SW_DEV: c_int = 51; -pub const _SC_2_LOCALEDEF: c_int = 52; -pub const _SC_UIO_MAXIOV: c_int = 60; -pub const _SC_IOV_MAX: c_int = 60; -pub const _SC_THREADS: c_int = 67; -pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 68; -pub const _SC_GETGR_R_SIZE_MAX: c_int = 69; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 70; -pub const _SC_LOGIN_NAME_MAX: c_int = 71; -pub const _SC_TTY_NAME_MAX: c_int = 72; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 73; -pub const _SC_THREAD_KEYS_MAX: c_int = 74; -pub const _SC_THREAD_STACK_MIN: c_int = 75; -pub const _SC_THREAD_THREADS_MAX: c_int = 76; -pub const _SC_THREAD_ATTR_STACKADDR: c_int = 77; -pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 78; -pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 79; -pub const _SC_THREAD_PRIO_INHERIT: c_int = 80; -pub const _SC_THREAD_PRIO_PROTECT: c_int = 81; -pub const _SC_THREAD_PROCESS_SHARED: c_int = 82; -pub const _SC_NPROCESSORS_CONF: c_int = 83; -pub const _SC_NPROCESSORS_ONLN: c_int = 84; -pub const _SC_PHYS_PAGES: c_int = 85; -pub const _SC_AVPHYS_PAGES: c_int = 86; -pub const _SC_ATEXIT_MAX: c_int = 87; -pub const _SC_PASS_MAX: c_int = 88; -pub const _SC_XOPEN_VERSION: c_int = 89; -pub const _SC_XOPEN_XCU_VERSION: c_int = 90; -pub const _SC_XOPEN_UNIX: c_int = 91; -pub const _SC_XOPEN_CRYPT: c_int = 92; -pub const _SC_XOPEN_ENH_I18N: c_int = 93; -pub const _SC_XOPEN_SHM: c_int = 94; -pub const _SC_2_CHAR_TERM: c_int = 95; -pub const _SC_2_UPE: c_int = 97; -pub const _SC_XOPEN_XPG2: c_int = 98; -pub const _SC_XOPEN_XPG3: c_int = 99; -pub const _SC_XOPEN_XPG4: c_int = 100; -pub const _SC_NZERO: c_int = 109; -pub const _SC_XBS5_ILP32_OFF32: c_int = 125; -pub const _SC_XBS5_ILP32_OFFBIG: c_int = 126; -pub const _SC_XBS5_LP64_OFF64: c_int = 127; -pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 128; -pub const _SC_XOPEN_LEGACY: c_int = 129; -pub const _SC_XOPEN_REALTIME: c_int = 130; -pub const _SC_XOPEN_REALTIME_THREADS: c_int = 131; -pub const _SC_ADVISORY_INFO: c_int = 132; -pub const _SC_BARRIERS: c_int = 133; -pub const _SC_CLOCK_SELECTION: c_int = 137; -pub const _SC_CPUTIME: c_int = 138; -pub const _SC_THREAD_CPUTIME: c_int = 139; -pub const _SC_MONOTONIC_CLOCK: c_int = 149; -pub const _SC_READER_WRITER_LOCKS: c_int = 153; -pub const _SC_SPIN_LOCKS: c_int = 154; -pub const _SC_REGEXP: c_int = 155; -pub const _SC_SHELL: c_int = 157; -pub const _SC_SPAWN: c_int = 159; -pub const _SC_SPORADIC_SERVER: c_int = 160; -pub const _SC_THREAD_SPORADIC_SERVER: c_int = 161; -pub const _SC_TIMEOUTS: c_int = 164; -pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 165; -pub const _SC_2_PBS: c_int = 168; -pub const _SC_2_PBS_ACCOUNTING: c_int = 169; -pub const _SC_2_PBS_LOCATE: c_int = 170; -pub const _SC_2_PBS_MESSAGE: c_int = 171; -pub const _SC_2_PBS_TRACK: c_int = 172; -pub const _SC_SYMLOOP_MAX: c_int = 173; -pub const _SC_STREAMS: c_int = 174; -pub const _SC_2_PBS_CHECKPOINT: c_int = 175; -pub const _SC_V6_ILP32_OFF32: c_int = 176; -pub const _SC_V6_ILP32_OFFBIG: c_int = 177; -pub const _SC_V6_LP64_OFF64: c_int = 178; -pub const _SC_V6_LPBIG_OFFBIG: c_int = 179; -pub const _SC_HOST_NAME_MAX: c_int = 180; -pub const _SC_TRACE: c_int = 181; -pub const _SC_TRACE_EVENT_FILTER: c_int = 182; -pub const _SC_TRACE_INHERIT: c_int = 183; -pub const _SC_TRACE_LOG: c_int = 184; -pub const _SC_IPV6: c_int = 235; -pub const _SC_RAW_SOCKETS: c_int = 236; -pub const _SC_V7_ILP32_OFF32: c_int = 237; -pub const _SC_V7_ILP32_OFFBIG: c_int = 238; -pub const _SC_V7_LP64_OFF64: c_int = 239; -pub const _SC_V7_LPBIG_OFFBIG: c_int = 240; -pub const _SC_SS_REPL_MAX: c_int = 241; -pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 242; -pub const _SC_TRACE_NAME_MAX: c_int = 243; -pub const _SC_TRACE_SYS_MAX: c_int = 244; -pub const _SC_TRACE_USER_EVENT_MAX: c_int = 245; -pub const _SC_XOPEN_STREAMS: c_int = 246; -pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 247; -pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 248; - -pub const RLIM_SAVED_MAX: crate::rlim_t = RLIM_INFINITY; -pub const RLIM_SAVED_CUR: crate::rlim_t = RLIM_INFINITY; - -pub const GLOB_ERR: c_int = 1 << 0; -pub const GLOB_MARK: c_int = 1 << 1; -pub const GLOB_NOSORT: c_int = 1 << 2; -pub const GLOB_DOOFFS: c_int = 1 << 3; -pub const GLOB_NOCHECK: c_int = 1 << 4; -pub const GLOB_APPEND: c_int = 1 << 5; -pub const GLOB_NOESCAPE: c_int = 1 << 6; - -pub const GLOB_NOSPACE: c_int = 1; -pub const GLOB_ABORTED: c_int = 2; -pub const GLOB_NOMATCH: c_int = 3; - -pub const POSIX_MADV_NORMAL: c_int = 0; -pub const POSIX_MADV_RANDOM: c_int = 1; -pub const POSIX_MADV_SEQUENTIAL: c_int = 2; -pub const POSIX_MADV_WILLNEED: c_int = 3; - -pub const AT_EACCESS: c_int = 0x200; - -pub const S_IEXEC: mode_t = 0o0100; -pub const S_IWRITE: mode_t = 0o0200; -pub const S_IREAD: mode_t = 0o0400; - -pub const F_LOCK: c_int = 1; -pub const F_TEST: c_int = 3; -pub const F_TLOCK: c_int = 2; -pub const F_ULOCK: c_int = 0; - -pub const ST_RDONLY: c_ulong = 1; -pub const ST_NOSUID: c_ulong = 2; -pub const ST_NODEV: c_ulong = 4; -pub const ST_NOEXEC: c_ulong = 8; -pub const ST_SYNCHRONOUS: c_ulong = 16; -pub const ST_MANDLOCK: c_ulong = 64; -pub const ST_WRITE: c_ulong = 128; -pub const ST_APPEND: c_ulong = 256; -pub const ST_IMMUTABLE: c_ulong = 512; -pub const ST_NOATIME: c_ulong = 1024; -pub const ST_NODIRATIME: c_ulong = 2048; - -pub const RTLD_NEXT: *mut c_void = -1i64 as *mut c_void; -pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); -pub const RTLD_NODELETE: c_int = 0x1000; -pub const RTLD_NOW: c_int = 0x2; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - size: [0; __SIZEOF_PTHREAD_MUTEX_T], -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - size: [0; __SIZEOF_PTHREAD_COND_T], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - size: [0; __SIZEOF_PTHREAD_RWLOCK_T], -}; - -pub const PTHREAD_MUTEX_NORMAL: c_int = 0; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; -pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; -pub const PTHREAD_PROCESS_PRIVATE: c_int = 0; -pub const PTHREAD_PROCESS_SHARED: c_int = 1; -pub const __SIZEOF_PTHREAD_COND_T: usize = 48; - -pub const SCHED_OTHER: c_int = 0; -pub const SCHED_FIFO: c_int = 1; -pub const SCHED_RR: c_int = 2; -pub const SCHED_BATCH: c_int = 3; -pub const SCHED_IDLE: c_int = 5; - -pub const AF_IB: c_int = 27; -pub const AF_MPLS: c_int = 28; -pub const AF_NFC: c_int = 39; -pub const AF_VSOCK: c_int = 40; -pub const PF_IB: c_int = AF_IB; -pub const PF_MPLS: c_int = AF_MPLS; -pub const PF_NFC: c_int = AF_NFC; -pub const PF_VSOCK: c_int = AF_VSOCK; - -// System V IPC -pub const IPC_PRIVATE: crate::key_t = 0; - -pub const IPC_CREAT: c_int = 0o1000; -pub const IPC_EXCL: c_int = 0o2000; -pub const IPC_NOWAIT: c_int = 0o4000; - -pub const IPC_RMID: c_int = 0; -pub const IPC_SET: c_int = 1; -pub const IPC_STAT: c_int = 2; -pub const IPC_INFO: c_int = 3; -pub const MSG_STAT: c_int = 11; -pub const MSG_INFO: c_int = 12; - -pub const MSG_NOERROR: c_int = 0o10000; -pub const MSG_EXCEPT: c_int = 0o20000; - -pub const SHM_R: c_int = 0o400; -pub const SHM_W: c_int = 0o200; - -pub const SHM_RDONLY: c_int = 0o10000; -pub const SHM_RND: c_int = 0o20000; -pub const SHM_REMAP: c_int = 0o40000; -pub const SHM_EXEC: c_int = 0o100000; - -pub const SHM_LOCK: c_int = 11; -pub const SHM_UNLOCK: c_int = 12; - -pub const SHM_HUGETLB: c_int = 0o4000; -pub const SHM_NORESERVE: c_int = 0o10000; - -pub const LOG_NFACILITIES: c_int = 24; - -pub const SEM_FAILED: *mut crate::sem_t = ptr::null_mut(); - -pub const AI_PASSIVE: c_int = 0x0001; -pub const AI_CANONNAME: c_int = 0x0002; -pub const AI_NUMERICHOST: c_int = 0x0004; -pub const AI_V4MAPPED: c_int = 0x0008; -pub const AI_ALL: c_int = 0x0010; -pub const AI_ADDRCONFIG: c_int = 0x0020; - -pub const AI_NUMERICSERV: c_int = 0x0400; - -pub const EAI_BADFLAGS: c_int = -1; -pub const EAI_NONAME: c_int = -2; -pub const EAI_AGAIN: c_int = -3; -pub const EAI_FAIL: c_int = -4; -pub const EAI_FAMILY: c_int = -6; -pub const EAI_SOCKTYPE: c_int = -7; -pub const EAI_SERVICE: c_int = -8; -pub const EAI_MEMORY: c_int = -10; -pub const EAI_OVERFLOW: c_int = -12; - -pub const NI_NUMERICHOST: c_int = 1; -pub const NI_NUMERICSERV: c_int = 2; -pub const NI_NOFQDN: c_int = 4; -pub const NI_NAMEREQD: c_int = 8; -pub const NI_DGRAM: c_int = 16; - -pub const SYNC_FILE_RANGE_WAIT_BEFORE: c_uint = 1; -pub const SYNC_FILE_RANGE_WRITE: c_uint = 2; -pub const SYNC_FILE_RANGE_WAIT_AFTER: c_uint = 4; - -pub const EAI_SYSTEM: c_int = -11; - -pub const MREMAP_MAYMOVE: c_int = 1; -pub const MREMAP_FIXED: c_int = 2; - -pub const ITIMER_REAL: c_int = 0; -pub const ITIMER_VIRTUAL: c_int = 1; -pub const ITIMER_PROF: c_int = 2; - -pub const _POSIX_VDISABLE: crate::cc_t = 0; - -pub const FALLOC_FL_KEEP_SIZE: c_int = 0x01; -pub const FALLOC_FL_PUNCH_HOLE: c_int = 0x02; - -pub const NCCS: usize = 32; - -pub const O_TRUNC: c_int = 512; -pub const O_NOATIME: c_int = 0o1000000; -pub const O_CLOEXEC: c_int = 0x80000; - -// Defined as wasi value. -pub const EPERM: c_int = 63; -pub const ENOENT: c_int = 44; -pub const ESRCH: c_int = 71; -pub const EINTR: c_int = 27; -pub const EIO: c_int = 29; -pub const ENXIO: c_int = 60; -pub const E2BIG: c_int = 1; -pub const ENOEXEC: c_int = 45; -pub const EBADF: c_int = 8; -pub const ECHILD: c_int = 12; -pub const EAGAIN: c_int = 6; -pub const ENOMEM: c_int = 48; -pub const EACCES: c_int = 2; -pub const EFAULT: c_int = 21; -pub const ENOTBLK: c_int = 105; -pub const EBUSY: c_int = 10; -pub const EEXIST: c_int = 20; -pub const EXDEV: c_int = 75; -pub const ENODEV: c_int = 43; -pub const ENOTDIR: c_int = 54; -pub const EISDIR: c_int = 31; -pub const EINVAL: c_int = 28; -pub const ENFILE: c_int = 41; -pub const EMFILE: c_int = 33; -pub const ENOTTY: c_int = 59; -pub const ETXTBSY: c_int = 74; -pub const EFBIG: c_int = 22; -pub const ENOSPC: c_int = 51; -pub const ESPIPE: c_int = 70; -pub const EROFS: c_int = 69; -pub const EMLINK: c_int = 34; -pub const EPIPE: c_int = 64; -pub const EDOM: c_int = 18; -pub const ERANGE: c_int = 68; -pub const EWOULDBLOCK: c_int = EAGAIN; -pub const ENOLINK: c_int = 47; -pub const EPROTO: c_int = 65; -pub const EDEADLK: c_int = 16; -pub const EDEADLOCK: c_int = EDEADLK; -pub const ENAMETOOLONG: c_int = 37; -pub const ENOLCK: c_int = 46; -pub const ENOSYS: c_int = 52; -pub const ENOTEMPTY: c_int = 55; -pub const ELOOP: c_int = 32; -pub const ENOMSG: c_int = 49; -pub const EIDRM: c_int = 24; -pub const EMULTIHOP: c_int = 36; -pub const EBADMSG: c_int = 9; -pub const EOVERFLOW: c_int = 61; -pub const EILSEQ: c_int = 25; -pub const ENOTSOCK: c_int = 57; -pub const EDESTADDRREQ: c_int = 17; -pub const EMSGSIZE: c_int = 35; -pub const EPROTOTYPE: c_int = 67; -pub const ENOPROTOOPT: c_int = 50; -pub const EPROTONOSUPPORT: c_int = 66; -pub const EAFNOSUPPORT: c_int = 5; -pub const EADDRINUSE: c_int = 3; -pub const EADDRNOTAVAIL: c_int = 4; -pub const ENETDOWN: c_int = 38; -pub const ENETUNREACH: c_int = 40; -pub const ENETRESET: c_int = 39; -pub const ECONNABORTED: c_int = 13; -pub const ECONNRESET: c_int = 15; -pub const ENOBUFS: c_int = 42; -pub const EISCONN: c_int = 30; -pub const ENOTCONN: c_int = 53; -pub const ETIMEDOUT: c_int = 73; -pub const ECONNREFUSED: c_int = 14; -pub const EHOSTUNREACH: c_int = 23; -pub const EALREADY: c_int = 7; -pub const EINPROGRESS: c_int = 26; -pub const ESTALE: c_int = 72; -pub const EDQUOT: c_int = 19; -pub const ECANCELED: c_int = 11; -pub const EOWNERDEAD: c_int = 62; -pub const ENOTRECOVERABLE: c_int = 56; - -pub const ENOSTR: c_int = 100; -pub const EBFONT: c_int = 101; -pub const EBADSLT: c_int = 102; -pub const EBADRQC: c_int = 103; -pub const ENOANO: c_int = 104; -pub const ECHRNG: c_int = 106; -pub const EL3HLT: c_int = 107; -pub const EL3RST: c_int = 108; -pub const ELNRNG: c_int = 109; -pub const EUNATCH: c_int = 110; -pub const ENOCSI: c_int = 111; -pub const EL2HLT: c_int = 112; -pub const EBADE: c_int = 113; -pub const EBADR: c_int = 114; -pub const EXFULL: c_int = 115; -pub const ENODATA: c_int = 116; -pub const ETIME: c_int = 117; -pub const ENOSR: c_int = 118; -pub const ENONET: c_int = 119; -pub const ENOPKG: c_int = 120; -pub const EREMOTE: c_int = 121; -pub const EADV: c_int = 122; -pub const ESRMNT: c_int = 123; -pub const ECOMM: c_int = 124; -pub const EDOTDOT: c_int = 125; -pub const ENOTUNIQ: c_int = 126; -pub const EBADFD: c_int = 127; -pub const EREMCHG: c_int = 128; -pub const ELIBACC: c_int = 129; -pub const ELIBBAD: c_int = 130; -pub const ELIBSCN: c_int = 131; -pub const ELIBMAX: c_int = 132; -pub const ELIBEXEC: c_int = 133; -pub const ERESTART: c_int = 134; -pub const ESTRPIPE: c_int = 135; -pub const EUSERS: c_int = 136; -pub const ESOCKTNOSUPPORT: c_int = 137; -pub const EOPNOTSUPP: c_int = 138; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 139; -pub const ESHUTDOWN: c_int = 140; -pub const ETOOMANYREFS: c_int = 141; -pub const EHOSTDOWN: c_int = 142; -pub const EUCLEAN: c_int = 143; -pub const ENOTNAM: c_int = 144; -pub const ENAVAIL: c_int = 145; -pub const EISNAM: c_int = 146; -pub const EREMOTEIO: c_int = 147; -pub const ENOMEDIUM: c_int = 148; -pub const EMEDIUMTYPE: c_int = 149; -pub const ENOKEY: c_int = 150; -pub const EKEYEXPIRED: c_int = 151; -pub const EKEYREVOKED: c_int = 152; -pub const EKEYREJECTED: c_int = 153; -pub const ERFKILL: c_int = 154; -pub const EHWPOISON: c_int = 155; -pub const EL2NSYNC: c_int = 156; - -pub const SA_NODEFER: c_int = 0x40000000; -pub const SA_RESETHAND: c_int = 0x80000000; -pub const SA_RESTART: c_int = 0x10000000; -pub const SA_NOCLDSTOP: c_int = 0x00000001; - -pub const BUFSIZ: c_uint = 1024; -pub const TMP_MAX: c_uint = 10000; -pub const FOPEN_MAX: c_uint = 1000; -pub const O_PATH: c_int = 0o10000000; -pub const O_EXEC: c_int = 0o10000000; -pub const O_SEARCH: c_int = 0o10000000; -pub const O_ACCMODE: c_int = 0o10000003; -pub const O_NDELAY: c_int = O_NONBLOCK; -pub const NI_MAXHOST: crate::socklen_t = 255; -pub const PTHREAD_STACK_MIN: size_t = 2048; -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; - -pub const POSIX_MADV_DONTNEED: c_int = 4; - -pub const RLIM_INFINITY: crate::rlim_t = !0; -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIMIT_NLIMITS: c_int = 16; -#[allow(deprecated)] -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIM_NLIMITS: c_int = RLIMIT_NLIMITS; - -pub const MAP_ANONYMOUS: c_int = MAP_ANON; - -#[doc(hidden)] -#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] -pub const SIGUNUSED: c_int = crate::SIGSYS; - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; - -pub const CPU_SETSIZE: c_int = 1024; - -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -pub const TIOCINQ: c_int = crate::FIONREAD; - -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; - -pub const CLOCK_SGI_CYCLE: crate::clockid_t = 10; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; - -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: c_int = 0x00000800; -pub const TAB2: c_int = 0x00001000; -pub const TAB3: c_int = 0x00001800; -pub const CR1: c_int = 0x00000200; -pub const CR2: c_int = 0x00000400; -pub const CR3: c_int = 0x00000600; -pub const FF1: c_int = 0x00008000; -pub const BS1: c_int = 0x00002000; -pub const VT1: c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const SO_BINDTODEVICE: c_int = 25; -pub const SO_TIMESTAMP: c_int = 63; -pub const SO_MARK: c_int = 36; -pub const SO_RXQ_OVFL: c_int = 40; -pub const SO_PEEK_OFF: c_int = 42; -pub const SO_BUSY_POLL: c_int = 46; - -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24; - -pub const O_DIRECT: c_int = 0x4000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_NOFOLLOW: c_int = 0x20000; -pub const O_ASYNC: c_int = 0x2000; - -pub const FIOCLEX: c_int = 0x5451; -pub const FIONBIO: c_int = 0x5421; - -pub const RLIMIT_RSS: c_int = 5; -pub const RLIMIT_NOFILE: c_int = 7; -pub const RLIMIT_AS: c_int = 9; -pub const RLIMIT_NPROC: c_int = 6; -pub const RLIMIT_MEMLOCK: c_int = 8; -pub const RLIMIT_CPU: c_int = 0; -pub const RLIMIT_FSIZE: c_int = 1; -pub const RLIMIT_DATA: c_int = 2; -pub const RLIMIT_STACK: c_int = 3; -pub const RLIMIT_CORE: c_int = 4; -pub const RLIMIT_LOCKS: c_int = 10; -pub const RLIMIT_SIGPENDING: c_int = 11; -pub const RLIMIT_MSGQUEUE: c_int = 12; -pub const RLIMIT_NICE: c_int = 13; -pub const RLIMIT_RTPRIO: c_int = 14; - -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; - -pub const SOCK_NONBLOCK: c_int = 2048; - -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; -pub const SOCK_SEQPACKET: c_int = 5; - -pub const IPPROTO_MAX: c_int = 263; - -pub const SOL_SOCKET: c_int = 1; - -pub const SO_REUSEADDR: c_int = 2; -pub const SO_TYPE: c_int = 3; -pub const SO_ERROR: c_int = 4; -pub const SO_DONTROUTE: c_int = 5; -pub const SO_BROADCAST: c_int = 6; -pub const SO_SNDBUF: c_int = 7; -pub const SO_RCVBUF: c_int = 8; -pub const SO_KEEPALIVE: c_int = 9; -pub const SO_OOBINLINE: c_int = 10; -pub const SO_LINGER: c_int = 13; -pub const SO_REUSEPORT: c_int = 15; -pub const SO_RCVLOWAT: c_int = 18; -pub const SO_SNDLOWAT: c_int = 19; -pub const SO_RCVTIMEO: c_int = 66; -pub const SO_SNDTIMEO: c_int = 67; -pub const SO_ACCEPTCONN: c_int = 30; - -pub const IPV6_RTHDR_LOOSE: c_int = 0; -pub const IPV6_RTHDR_STRICT: c_int = 1; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const EXTPROC: crate::tcflag_t = 0x00010000; - -pub const MAP_HUGETLB: c_int = 0x040000; - -pub const F_GETLK: c_int = 12; -pub const F_GETOWN: c_int = 9; -pub const F_SETLK: c_int = 13; -pub const F_SETLKW: c_int = 14; -pub const F_SETOWN: c_int = 8; -pub const F_OFD_GETLK: c_int = 36; -pub const F_OFD_SETLK: c_int = 37; -pub const F_OFD_SETLKW: c_int = 38; - -pub const VEOF: usize = 4; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; - -pub const TCGETS: c_int = 0x5401; -pub const TCSETS: c_int = 0x5402; -pub const TCSETSW: c_int = 0x5403; -pub const TCSETSF: c_int = 0x5404; -pub const TCGETA: c_int = 0x5405; -pub const TCSETA: c_int = 0x5406; -pub const TCSETAW: c_int = 0x5407; -pub const TCSETAF: c_int = 0x5408; -pub const TCSBRK: c_int = 0x5409; -pub const TCXONC: c_int = 0x540A; -pub const TCFLSH: c_int = 0x540B; -pub const TIOCGSOFTCAR: c_int = 0x5419; -pub const TIOCSSOFTCAR: c_int = 0x541A; -pub const TIOCLINUX: c_int = 0x541C; -pub const TIOCGSERIAL: c_int = 0x541E; -pub const TIOCEXCL: c_int = 0x540C; -pub const TIOCNXCL: c_int = 0x540D; -pub const TIOCSCTTY: c_int = 0x540E; -pub const TIOCGPGRP: c_int = 0x540F; -pub const TIOCSPGRP: c_int = 0x5410; -pub const TIOCOUTQ: c_int = 0x5411; -pub const TIOCSTI: c_int = 0x5412; -pub const TIOCGWINSZ: c_int = 0x5413; -pub const TIOCSWINSZ: c_int = 0x5414; -pub const TIOCMGET: c_int = 0x5415; -pub const TIOCMBIS: c_int = 0x5416; -pub const TIOCMBIC: c_int = 0x5417; -pub const TIOCMSET: c_int = 0x5418; -pub const FIONREAD: c_int = 0x541B; -pub const TIOCCONS: c_int = 0x541D; - -pub const SYS_gettid: c_long = 224; // Valid for arm (32-bit) and x86 (32-bit) - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const TIOCM_LE: c_int = 0x001; -pub const TIOCM_DTR: c_int = 0x002; -pub const TIOCM_RTS: c_int = 0x004; -pub const TIOCM_ST: c_int = 0x008; -pub const TIOCM_SR: c_int = 0x010; -pub const TIOCM_CTS: c_int = 0x020; -pub const TIOCM_CAR: c_int = 0x040; -pub const TIOCM_RNG: c_int = 0x080; -pub const TIOCM_DSR: c_int = 0x100; -pub const TIOCM_CD: c_int = TIOCM_CAR; -pub const TIOCM_RI: c_int = TIOCM_RNG; -pub const O_TMPFILE: c_int = 0x410000; - -pub const MAX_ADDR_LEN: usize = 7; -pub const ARPD_UPDATE: c_ushort = 0x01; -pub const ARPD_LOOKUP: c_ushort = 0x02; -pub const ARPD_FLUSH: c_ushort = 0x03; -pub const ATF_MAGIC: c_int = 0x80; - -pub const PRIO_PROCESS: c_int = 0; -pub const PRIO_PGRP: c_int = 1; -pub const PRIO_USER: c_int = 2; - -pub const SOMAXCONN: c_int = 128; - -f! { - pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - if ((*cmsg).cmsg_len as usize) < size_of::() { - return core::ptr::null_mut::(); - } - let next = (cmsg as usize + super::CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr; - let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; - if (next.offset(1)) as usize > max { - core::ptr::null_mut::() - } else { - next as *mut cmsghdr - } - } - - pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { - for slot in cpuset.bits.iter_mut() { - *slot = 0; - } - } - - pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - cpuset.bits[idx] |= 1 << offset; - () - } - - pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - cpuset.bits[idx] &= !(1 << offset); - () - } - - pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { - let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - 0 != (cpuset.bits[idx] & (1 << offset)) - } - - pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { - set1.bits == set2.bits - } -} - -safe_f! { - pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { - let major = major as crate::dev_t; - let minor = minor as crate::dev_t; - let mut dev = 0; - dev |= (major & 0xfffff000) << 31 << 1; - dev |= (major & 0x00000fff) << 8; - dev |= (minor & 0xffffff00) << 12; - dev |= minor & 0x000000ff; - dev - } - - pub const fn major(dev: crate::dev_t) -> c_uint { - // see - // https://github.com/emscripten-core/emscripten/blob/ - // main/system/lib/libc/musl/include/sys/sysmacros.h - let mut major = 0; - major |= (dev >> 31 >> 1) & 0xfffff000; - major |= (dev >> 8) & 0x00000fff; - major as c_uint - } - - pub const fn minor(dev: crate::dev_t) -> c_uint { - // see - // https://github.com/emscripten-core/emscripten/blob/ - // main/system/lib/libc/musl/include/sys/sysmacros.h - let mut minor = 0; - minor |= (dev >> 12) & 0xffffff00; - minor |= dev & 0x000000ff; - minor as c_uint - } -} - -extern "C" { - pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; - pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; - pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - - pub fn abs(i: c_int) -> c_int; - pub fn labs(i: c_long) -> c_long; - pub fn rand() -> c_int; - pub fn srand(seed: c_uint); - - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; - - pub fn setpwent(); - pub fn endpwent(); - pub fn getpwent() -> *mut passwd; - - pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; - - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn __errno_location() -> *mut c_int; - - pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; - pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn dup3(oldfd: c_int, newfd: c_int, flags: c_int) -> c_int; - pub fn nl_langinfo_l(item: crate::nl_item, locale: crate::locale_t) -> *mut c_char; - pub fn accept4( - fd: c_int, - addr: *mut crate::sockaddr, - len: *mut crate::socklen_t, - flg: c_int, - ) -> c_int; - pub fn getnameinfo( - sa: *const crate::sockaddr, - salen: crate::socklen_t, - host: *mut c_char, - hostlen: crate::socklen_t, - serv: *mut c_char, - servlen: crate::socklen_t, - flags: c_int, - ) -> c_int; - pub fn getloadavg(loadavg: *mut c_double, nelem: c_int) -> c_int; - - pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; - pub fn if_nameindex() -> *mut if_nameindex; - pub fn if_freenameindex(ptr: *mut if_nameindex); - - pub fn mremap( - addr: *mut c_void, - len: size_t, - new_len: size_t, - flags: c_int, - ... - ) -> *mut c_void; - - pub fn glob( - pattern: *const c_char, - flags: c_int, - errfunc: Option c_int>, - pglob: *mut crate::glob_t, - ) -> c_int; - pub fn globfree(pglob: *mut crate::glob_t); - - pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - - pub fn shm_unlink(name: *const c_char) -> c_int; - - pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); - - pub fn telldir(dirp: *mut crate::DIR) -> c_long; - pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - - pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; - - pub fn recvfrom( - socket: c_int, - buf: *mut c_void, - len: size_t, - flags: c_int, - addr: *mut crate::sockaddr, - addrlen: *mut crate::socklen_t, - ) -> ssize_t; - pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; - pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; - - pub fn sendmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: c_uint, - flags: c_uint, - ) -> c_int; - pub fn recvmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: c_uint, - flags: c_uint, - timeout: *mut crate::timespec, - ) -> c_int; - pub fn sync(); - pub fn ioctl(fd: c_int, request: c_int, ...) -> c_int; - pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; - pub fn setpriority(which: c_int, who: crate::id_t, prio: c_int) -> c_int; - pub fn pthread_create( - native: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - f: extern "C" fn(*mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - - pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; - - pub fn getpwnam_r( - name: *const c_char, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - pub fn getpwuid_r( - uid: crate::uid_t, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - - // grp.h - pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; - pub fn getgrnam(name: *const c_char) -> *mut crate::group; - pub fn getgrnam_r( - name: *const c_char, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn getgrgid_r( - gid: crate::gid_t, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; -} - -// Alias to 64 to mimic glibc's LFS64 support -mod lfs64; -pub use self::lfs64::*; diff --git a/vendor/libc/src/unix/linux_like/linux/arch/generic/mod.rs b/vendor/libc/src/unix/linux_like/linux/arch/generic/mod.rs deleted file mode 100644 index 465ceddeab64ec..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/arch/generic/mod.rs +++ /dev/null @@ -1,334 +0,0 @@ -use crate::prelude::*; -use crate::Ioctl; - -s! { - pub struct termios2 { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; 19], - pub c_ispeed: crate::speed_t, - pub c_ospeed: crate::speed_t, - } -} - -// include/uapi/asm-generic/socket.h -// arch/alpha/include/uapi/asm/socket.h -// tools/include/uapi/asm-generic/socket.h -// arch/mips/include/uapi/asm/socket.h -pub const SOL_SOCKET: c_int = 1; - -// Defined in unix/linux_like/mod.rs -// pub const SO_DEBUG: c_int = 1; -pub const SO_REUSEADDR: c_int = 2; -pub const SO_TYPE: c_int = 3; -pub const SO_ERROR: c_int = 4; -pub const SO_DONTROUTE: c_int = 5; -pub const SO_BROADCAST: c_int = 6; -pub const SO_SNDBUF: c_int = 7; -pub const SO_RCVBUF: c_int = 8; -pub const SO_KEEPALIVE: c_int = 9; -pub const SO_OOBINLINE: c_int = 10; -pub const SO_NO_CHECK: c_int = 11; -pub const SO_PRIORITY: c_int = 12; -pub const SO_LINGER: c_int = 13; -pub const SO_BSDCOMPAT: c_int = 14; -pub const SO_REUSEPORT: c_int = 15; -pub const SO_PASSCRED: c_int = 16; -pub const SO_PEERCRED: c_int = 17; -pub const SO_RCVLOWAT: c_int = 18; -pub const SO_SNDLOWAT: c_int = 19; -pub const SO_SECURITY_AUTHENTICATION: c_int = 22; -pub const SO_SECURITY_ENCRYPTION_TRANSPORT: c_int = 23; -pub const SO_SECURITY_ENCRYPTION_NETWORK: c_int = 24; -pub const SO_BINDTODEVICE: c_int = 25; -pub const SO_ATTACH_FILTER: c_int = 26; -pub const SO_DETACH_FILTER: c_int = 27; -pub const SO_GET_FILTER: c_int = SO_ATTACH_FILTER; -pub const SO_PEERNAME: c_int = 28; - -cfg_if! { - if #[cfg(all( - linux_time_bits64, - any(target_arch = "arm", target_arch = "x86"), - not(any(target_env = "musl", target_env = "ohos")) - ))] { - pub const SO_TIMESTAMP: c_int = SO_TIMESTAMP_NEW; - pub const SO_TIMESTAMPNS: c_int = SO_TIMESTAMPNS_NEW; - pub const SO_TIMESTAMPING: c_int = SO_TIMESTAMPING_NEW; - pub const SO_RCVTIMEO: c_int = SO_RCVTIMEO_NEW; - pub const SO_SNDTIMEO: c_int = SO_SNDTIMEO_NEW; - } else if #[cfg(all( - linux_time_bits64, - any(target_arch = "arm", target_arch = "x86"), - any(target_env = "musl", target_env = "ohos") - ))] { - pub const SO_TIMESTAMP: c_int = 63; - pub const SO_TIMESTAMPNS: c_int = 64; - pub const SO_TIMESTAMPING: c_int = 65; - pub const SO_RCVTIMEO: c_int = 66; - pub const SO_SNDTIMEO: c_int = 67; - } else { - const SO_TIMESTAMP_OLD: c_int = 29; - const SO_TIMESTAMPNS_OLD: c_int = 35; - const SO_TIMESTAMPING_OLD: c_int = 37; - const SO_RCVTIMEO_OLD: c_int = 20; - const SO_SNDTIMEO_OLD: c_int = 21; - - pub const SO_TIMESTAMP: c_int = SO_TIMESTAMP_OLD; - pub const SO_TIMESTAMPNS: c_int = SO_TIMESTAMPNS_OLD; - pub const SO_TIMESTAMPING: c_int = SO_TIMESTAMPING_OLD; - pub const SO_RCVTIMEO: c_int = SO_RCVTIMEO_OLD; - pub const SO_SNDTIMEO: c_int = SO_SNDTIMEO_OLD; - } -} - -pub const SO_ACCEPTCONN: c_int = 30; -pub const SO_PEERSEC: c_int = 31; -pub const SO_SNDBUFFORCE: c_int = 32; -pub const SO_RCVBUFFORCE: c_int = 33; -pub const SO_PASSSEC: c_int = 34; -pub const SO_MARK: c_int = 36; -pub const SO_PROTOCOL: c_int = 38; -pub const SO_DOMAIN: c_int = 39; -pub const SO_RXQ_OVFL: c_int = 40; -pub const SO_WIFI_STATUS: c_int = 41; -pub const SCM_WIFI_STATUS: c_int = SO_WIFI_STATUS; -pub const SO_PEEK_OFF: c_int = 42; -pub const SO_NOFCS: c_int = 43; -pub const SO_LOCK_FILTER: c_int = 44; -pub const SO_SELECT_ERR_QUEUE: c_int = 45; -pub const SO_BUSY_POLL: c_int = 46; -pub const SO_MAX_PACING_RATE: c_int = 47; -pub const SO_BPF_EXTENSIONS: c_int = 48; -pub const SO_INCOMING_CPU: c_int = 49; -pub const SO_ATTACH_BPF: c_int = 50; -pub const SO_DETACH_BPF: c_int = SO_DETACH_FILTER; -pub const SO_ATTACH_REUSEPORT_CBPF: c_int = 51; -pub const SO_ATTACH_REUSEPORT_EBPF: c_int = 52; -pub const SO_CNX_ADVICE: c_int = 53; -pub const SCM_TIMESTAMPING_OPT_STATS: c_int = 54; -pub const SO_MEMINFO: c_int = 55; -pub const SO_INCOMING_NAPI_ID: c_int = 56; -pub const SO_COOKIE: c_int = 57; -pub const SCM_TIMESTAMPING_PKTINFO: c_int = 58; -pub const SO_PEERGROUPS: c_int = 59; -pub const SO_ZEROCOPY: c_int = 60; -pub const SO_TXTIME: c_int = 61; -pub const SCM_TXTIME: c_int = SO_TXTIME; -pub const SO_BINDTOIFINDEX: c_int = 62; -cfg_if! { - // Some of these platforms in CI already have these constants. - // But they may still not have those _OLD ones. - if #[cfg(all( - any( - target_arch = "x86", - target_arch = "x86_64", - target_arch = "arm", - target_arch = "aarch64", - target_arch = "csky", - target_arch = "loongarch64" - ), - // FIXME(musl): - // Musl hardcodes the SO_* constants instead - // of inheriting them from the kernel headers. - // For new constants you might need consider updating - // musl in the CI as well. - not(any(target_env = "musl", target_env = "ohos")) - ))] { - pub const SO_TIMESTAMP_NEW: c_int = 63; - pub const SO_TIMESTAMPNS_NEW: c_int = 64; - pub const SO_TIMESTAMPING_NEW: c_int = 65; - pub const SO_RCVTIMEO_NEW: c_int = 66; - pub const SO_SNDTIMEO_NEW: c_int = 67; - pub const SO_DETACH_REUSEPORT_BPF: c_int = 68; - } -} -pub const SO_PREFER_BUSY_POLL: c_int = 69; -pub const SO_BUSY_POLL_BUDGET: c_int = 70; -pub const SO_NETNS_COOKIE: c_int = 71; -pub const SO_BUF_LOCK: c_int = 72; -pub const SO_RESERVE_MEM: c_int = 73; -pub const SO_TXREHASH: c_int = 74; -pub const SO_RCVMARK: c_int = 75; -pub const SO_PASSPIDFD: c_int = 76; -pub const SO_PEERPIDFD: c_int = 77; -pub const SO_DEVMEM_LINEAR: c_int = 78; -pub const SO_DEVMEM_DMABUF: c_int = 79; -pub const SO_DEVMEM_DONTNEED: c_int = 80; - -// Defined in unix/linux_like/mod.rs -// pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; -pub const SCM_TIMESTAMPNS: c_int = SO_TIMESTAMPNS; -pub const SCM_TIMESTAMPING: c_int = SO_TIMESTAMPING; - -pub const SCM_DEVMEM_LINEAR: c_int = SO_DEVMEM_LINEAR; -pub const SCM_DEVMEM_DMABUF: c_int = SO_DEVMEM_DMABUF; - -// Ioctl Constants - -pub const TCGETS: Ioctl = 0x5401; -pub const TCSETS: Ioctl = 0x5402; -pub const TCSETSW: Ioctl = 0x5403; -pub const TCSETSF: Ioctl = 0x5404; -pub const TCGETA: Ioctl = 0x5405; -pub const TCSETA: Ioctl = 0x5406; -pub const TCSETAW: Ioctl = 0x5407; -pub const TCSETAF: Ioctl = 0x5408; -pub const TCSBRK: Ioctl = 0x5409; -pub const TCXONC: Ioctl = 0x540A; -pub const TCFLSH: Ioctl = 0x540B; -pub const TIOCEXCL: Ioctl = 0x540C; -pub const TIOCNXCL: Ioctl = 0x540D; -pub const TIOCSCTTY: Ioctl = 0x540E; -pub const TIOCGPGRP: Ioctl = 0x540F; -pub const TIOCSPGRP: Ioctl = 0x5410; -pub const TIOCOUTQ: Ioctl = 0x5411; -pub const TIOCSTI: Ioctl = 0x5412; -pub const TIOCGWINSZ: Ioctl = 0x5413; -pub const TIOCSWINSZ: Ioctl = 0x5414; -pub const TIOCMGET: Ioctl = 0x5415; -pub const TIOCMBIS: Ioctl = 0x5416; -pub const TIOCMBIC: Ioctl = 0x5417; -pub const TIOCMSET: Ioctl = 0x5418; -pub const TIOCGSOFTCAR: Ioctl = 0x5419; -pub const TIOCSSOFTCAR: Ioctl = 0x541A; -pub const FIONREAD: Ioctl = 0x541B; -pub const TIOCINQ: Ioctl = FIONREAD; -pub const TIOCLINUX: Ioctl = 0x541C; -pub const TIOCCONS: Ioctl = 0x541D; -pub const TIOCGSERIAL: Ioctl = 0x541E; -pub const TIOCSSERIAL: Ioctl = 0x541F; -pub const TIOCPKT: Ioctl = 0x5420; -pub const FIONBIO: Ioctl = 0x5421; -pub const TIOCNOTTY: Ioctl = 0x5422; -pub const TIOCSETD: Ioctl = 0x5423; -pub const TIOCGETD: Ioctl = 0x5424; -pub const TCSBRKP: Ioctl = 0x5425; -pub const TIOCSBRK: Ioctl = 0x5427; -pub const TIOCCBRK: Ioctl = 0x5428; -pub const TIOCGSID: Ioctl = 0x5429; -pub const TCGETS2: Ioctl = 0x802c542a; -pub const TCSETS2: Ioctl = 0x402c542b; -pub const TCSETSW2: Ioctl = 0x402c542c; -pub const TCSETSF2: Ioctl = 0x402c542d; -pub const TIOCGRS485: Ioctl = 0x542E; -pub const TIOCSRS485: Ioctl = 0x542F; -pub const TIOCGPTN: Ioctl = 0x80045430; -pub const TIOCSPTLCK: Ioctl = 0x40045431; -pub const TIOCGDEV: Ioctl = 0x80045432; -pub const TCGETX: Ioctl = 0x5432; -pub const TCSETX: Ioctl = 0x5433; -pub const TCSETXF: Ioctl = 0x5434; -pub const TCSETXW: Ioctl = 0x5435; -pub const TIOCSIG: Ioctl = 0x40045436; -pub const TIOCVHANGUP: Ioctl = 0x5437; -pub const TIOCGPKT: Ioctl = 0x80045438; -pub const TIOCGPTLCK: Ioctl = 0x80045439; -pub const TIOCGEXCL: Ioctl = 0x80045440; -pub const TIOCGPTPEER: Ioctl = 0x5441; -// pub const TIOCGISO7816: Ioctl = 0x80285442; -// pub const TIOCSISO7816: Ioctl = 0xc0285443; -pub const FIONCLEX: Ioctl = 0x5450; -pub const FIOCLEX: Ioctl = 0x5451; -pub const FIOASYNC: Ioctl = 0x5452; -pub const TIOCSERCONFIG: Ioctl = 0x5453; -pub const TIOCSERGWILD: Ioctl = 0x5454; -pub const TIOCSERSWILD: Ioctl = 0x5455; -pub const TIOCGLCKTRMIOS: Ioctl = 0x5456; -pub const TIOCSLCKTRMIOS: Ioctl = 0x5457; -pub const TIOCSERGSTRUCT: Ioctl = 0x5458; -pub const TIOCSERGETLSR: Ioctl = 0x5459; -pub const TIOCSERGETMULTI: Ioctl = 0x545A; -pub const TIOCSERSETMULTI: Ioctl = 0x545B; -pub const TIOCMIWAIT: Ioctl = 0x545C; -pub const TIOCGICOUNT: Ioctl = 0x545D; -pub const BLKIOMIN: Ioctl = 0x1278; -pub const BLKIOOPT: Ioctl = 0x1279; -pub const BLKSSZGET: Ioctl = 0x1268; -pub const BLKPBSZGET: Ioctl = 0x127B; - -cfg_if! { - if #[cfg(any(target_arch = "arm", target_arch = "s390x"))] { - pub const FIOQSIZE: Ioctl = 0x545E; - } else { - pub const FIOQSIZE: Ioctl = 0x5460; - } -} - -pub const TIOCM_LE: c_int = 0x001; -pub const TIOCM_DTR: c_int = 0x002; -pub const TIOCM_RTS: c_int = 0x004; -pub const TIOCM_ST: c_int = 0x008; -pub const TIOCM_SR: c_int = 0x010; -pub const TIOCM_CTS: c_int = 0x020; -pub const TIOCM_CAR: c_int = 0x040; -pub const TIOCM_CD: c_int = TIOCM_CAR; -pub const TIOCM_RNG: c_int = 0x080; -pub const TIOCM_RI: c_int = TIOCM_RNG; -pub const TIOCM_DSR: c_int = 0x100; - -pub const BOTHER: crate::speed_t = 0o010000; -pub const IBSHIFT: crate::tcflag_t = 16; - -// RLIMIT Constants - -cfg_if! { - if #[cfg(any(target_env = "gnu", target_env = "uclibc"))] { - pub const RLIMIT_CPU: crate::__rlimit_resource_t = 0; - pub const RLIMIT_FSIZE: crate::__rlimit_resource_t = 1; - pub const RLIMIT_DATA: crate::__rlimit_resource_t = 2; - pub const RLIMIT_STACK: crate::__rlimit_resource_t = 3; - pub const RLIMIT_CORE: crate::__rlimit_resource_t = 4; - pub const RLIMIT_RSS: crate::__rlimit_resource_t = 5; - pub const RLIMIT_NPROC: crate::__rlimit_resource_t = 6; - pub const RLIMIT_NOFILE: crate::__rlimit_resource_t = 7; - pub const RLIMIT_MEMLOCK: crate::__rlimit_resource_t = 8; - pub const RLIMIT_AS: crate::__rlimit_resource_t = 9; - pub const RLIMIT_LOCKS: crate::__rlimit_resource_t = 10; - pub const RLIMIT_SIGPENDING: crate::__rlimit_resource_t = 11; - pub const RLIMIT_MSGQUEUE: crate::__rlimit_resource_t = 12; - pub const RLIMIT_NICE: crate::__rlimit_resource_t = 13; - pub const RLIMIT_RTPRIO: crate::__rlimit_resource_t = 14; - pub const RLIMIT_RTTIME: crate::__rlimit_resource_t = 15; - #[allow(deprecated)] - #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] - pub const RLIMIT_NLIMITS: crate::__rlimit_resource_t = RLIM_NLIMITS; - } else if #[cfg(any(target_env = "musl", target_env = "ohos"))] { - pub const RLIMIT_CPU: c_int = 0; - pub const RLIMIT_FSIZE: c_int = 1; - pub const RLIMIT_DATA: c_int = 2; - pub const RLIMIT_STACK: c_int = 3; - pub const RLIMIT_CORE: c_int = 4; - pub const RLIMIT_RSS: c_int = 5; - pub const RLIMIT_NPROC: c_int = 6; - pub const RLIMIT_NOFILE: c_int = 7; - pub const RLIMIT_MEMLOCK: c_int = 8; - pub const RLIMIT_AS: c_int = 9; - pub const RLIMIT_LOCKS: c_int = 10; - pub const RLIMIT_SIGPENDING: c_int = 11; - pub const RLIMIT_MSGQUEUE: c_int = 12; - pub const RLIMIT_NICE: c_int = 13; - pub const RLIMIT_RTPRIO: c_int = 14; - pub const RLIMIT_RTTIME: c_int = 15; - #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] - pub const RLIM_NLIMITS: c_int = 16; - #[allow(deprecated)] - #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] - pub const RLIMIT_NLIMITS: c_int = RLIM_NLIMITS; - } -} - -cfg_if! { - if #[cfg(target_env = "gnu")] { - #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] - pub const RLIM_NLIMITS: crate::__rlimit_resource_t = 16; - } else if #[cfg(target_env = "uclibc")] { - #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] - pub const RLIM_NLIMITS: crate::__rlimit_resource_t = 15; - } -} - -pub const RLIM_INFINITY: crate::rlim_t = !0; diff --git a/vendor/libc/src/unix/linux_like/linux/arch/mips/mod.rs b/vendor/libc/src/unix/linux_like/linux/arch/mips/mod.rs deleted file mode 100644 index ba688948a906d2..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/arch/mips/mod.rs +++ /dev/null @@ -1,333 +0,0 @@ -use crate::prelude::*; -use crate::Ioctl; - -s! { - pub struct termios2 { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; 23], - pub c_ispeed: crate::speed_t, - pub c_ospeed: crate::speed_t, - } -} - -// arch/mips/include/uapi/asm/socket.h -pub const SOL_SOCKET: c_int = 0xffff; - -// Defined in unix/linux_like/mod.rs -// pub const SO_DEBUG: c_int = 0x0001; -pub const SO_REUSEADDR: c_int = 0x0004; -pub const SO_KEEPALIVE: c_int = 0x0008; -pub const SO_DONTROUTE: c_int = 0x0010; -pub const SO_BROADCAST: c_int = 0x0020; -pub const SO_LINGER: c_int = 0x0080; -pub const SO_OOBINLINE: c_int = 0x0100; -pub const SO_REUSEPORT: c_int = 0x0200; -pub const SO_TYPE: c_int = 0x1008; -// pub const SO_STYLE: c_int = SO_TYPE; -pub const SO_ERROR: c_int = 0x1007; -pub const SO_SNDBUF: c_int = 0x1001; -pub const SO_RCVBUF: c_int = 0x1002; -pub const SO_SNDLOWAT: c_int = 0x1003; -pub const SO_RCVLOWAT: c_int = 0x1004; -cfg_if! { - if #[cfg(linux_time_bits64)] { - const SO_RCVTIMEO_NEW: c_int = 66; - const SO_SNDTIMEO_NEW: c_int = 67; - - pub const SO_SNDTIMEO: c_int = SO_SNDTIMEO_NEW; - pub const SO_RCVTIMEO: c_int = SO_RCVTIMEO_NEW; - } else { - const SO_SNDTIMEO_OLD: c_int = 0x1005; - const SO_RCVTIMEO_OLD: c_int = 0x1006; - - pub const SO_SNDTIMEO: c_int = SO_SNDTIMEO_OLD; - pub const SO_RCVTIMEO: c_int = SO_RCVTIMEO_OLD; - } -} -pub const SO_ACCEPTCONN: c_int = 0x1009; -pub const SO_PROTOCOL: c_int = 0x1028; -pub const SO_DOMAIN: c_int = 0x1029; - -pub const SO_NO_CHECK: c_int = 11; -pub const SO_PRIORITY: c_int = 12; -pub const SO_BSDCOMPAT: c_int = 14; -pub const SO_PASSCRED: c_int = 17; -pub const SO_PEERCRED: c_int = 18; -pub const SO_SECURITY_AUTHENTICATION: c_int = 22; -pub const SO_SECURITY_ENCRYPTION_TRANSPORT: c_int = 23; -pub const SO_SECURITY_ENCRYPTION_NETWORK: c_int = 24; -pub const SO_BINDTODEVICE: c_int = 25; -pub const SO_ATTACH_FILTER: c_int = 26; -pub const SO_DETACH_FILTER: c_int = 27; -pub const SO_GET_FILTER: c_int = SO_ATTACH_FILTER; -pub const SO_PEERNAME: c_int = 28; -pub const SO_PEERSEC: c_int = 30; -pub const SO_SNDBUFFORCE: c_int = 31; -pub const SO_RCVBUFFORCE: c_int = 33; -pub const SO_PASSSEC: c_int = 34; -pub const SO_MARK: c_int = 36; -pub const SO_RXQ_OVFL: c_int = 40; -pub const SO_WIFI_STATUS: c_int = 41; -pub const SCM_WIFI_STATUS: c_int = SO_WIFI_STATUS; -pub const SO_PEEK_OFF: c_int = 42; -pub const SO_NOFCS: c_int = 43; -pub const SO_LOCK_FILTER: c_int = 44; -pub const SO_SELECT_ERR_QUEUE: c_int = 45; -pub const SO_BUSY_POLL: c_int = 46; -pub const SO_MAX_PACING_RATE: c_int = 47; -pub const SO_BPF_EXTENSIONS: c_int = 48; -pub const SO_INCOMING_CPU: c_int = 49; -pub const SO_ATTACH_BPF: c_int = 50; -pub const SO_DETACH_BPF: c_int = SO_DETACH_FILTER; -pub const SO_ATTACH_REUSEPORT_CBPF: c_int = 51; -pub const SO_ATTACH_REUSEPORT_EBPF: c_int = 52; -pub const SO_CNX_ADVICE: c_int = 53; -pub const SCM_TIMESTAMPING_OPT_STATS: c_int = 54; -pub const SO_MEMINFO: c_int = 55; -pub const SO_INCOMING_NAPI_ID: c_int = 56; -pub const SO_COOKIE: c_int = 57; -pub const SCM_TIMESTAMPING_PKTINFO: c_int = 58; -pub const SO_PEERGROUPS: c_int = 59; -pub const SO_ZEROCOPY: c_int = 60; -pub const SO_TXTIME: c_int = 61; -pub const SCM_TXTIME: c_int = SO_TXTIME; -pub const SO_BINDTOIFINDEX: c_int = 62; - -cfg_if! { - if #[cfg(linux_time_bits64)] { - const SO_TIMESTAMP_NEW: c_int = 63; - const SO_TIMESTAMPNS_NEW: c_int = 64; - const SO_TIMESTAMPING_NEW: c_int = 65; - - pub const SO_TIMESTAMP: c_int = SO_TIMESTAMP_NEW; - pub const SO_TIMESTAMPNS: c_int = SO_TIMESTAMPNS_NEW; - pub const SO_TIMESTAMPING: c_int = SO_TIMESTAMPING_NEW; - } else { - const SO_TIMESTAMP_OLD: c_int = 29; - const SO_TIMESTAMPNS_OLD: c_int = 35; - const SO_TIMESTAMPING_OLD: c_int = 37; - - pub const SO_TIMESTAMP: c_int = SO_TIMESTAMP_OLD; - pub const SO_TIMESTAMPNS: c_int = SO_TIMESTAMPNS_OLD; - pub const SO_TIMESTAMPING: c_int = SO_TIMESTAMPING_OLD; - } -} - -// pub const SO_DETACH_REUSEPORT_BPF: c_int = 68; -pub const SO_PREFER_BUSY_POLL: c_int = 69; -pub const SO_BUSY_POLL_BUDGET: c_int = 70; -pub const SO_NETNS_COOKIE: c_int = 71; -pub const SO_BUF_LOCK: c_int = 72; -pub const SO_RESERVE_MEM: c_int = 73; -pub const SO_TXREHASH: c_int = 74; -pub const SO_RCVMARK: c_int = 75; -pub const SO_PASSPIDFD: c_int = 76; -pub const SO_PEERPIDFD: c_int = 77; -pub const SO_DEVMEM_LINEAR: c_int = 78; -pub const SO_DEVMEM_DMABUF: c_int = 79; -pub const SO_DEVMEM_DONTNEED: c_int = 80; - -// Defined in unix/linux_like/mod.rs -// pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; -pub const SCM_TIMESTAMPNS: c_int = SO_TIMESTAMPNS; -pub const SCM_TIMESTAMPING: c_int = SO_TIMESTAMPING; - -pub const SCM_DEVMEM_LINEAR: c_int = SO_DEVMEM_LINEAR; -pub const SCM_DEVMEM_DMABUF: c_int = SO_DEVMEM_DMABUF; - -// Ioctl Constants - -pub const TCGETS: Ioctl = 0x540d; -pub const TCSETS: Ioctl = 0x540e; -pub const TCSETSW: Ioctl = 0x540f; -pub const TCSETSF: Ioctl = 0x5410; -pub const TCGETA: Ioctl = 0x5401; -pub const TCSETA: Ioctl = 0x5402; -pub const TCSETAW: Ioctl = 0x5403; -pub const TCSETAF: Ioctl = 0x5404; -pub const TCSBRK: Ioctl = 0x5405; -pub const TCXONC: Ioctl = 0x5406; -pub const TCFLSH: Ioctl = 0x5407; -pub const TIOCEXCL: Ioctl = 0x740d; -pub const TIOCNXCL: Ioctl = 0x740e; -pub const TIOCSCTTY: Ioctl = 0x5480; -pub const TIOCGPGRP: Ioctl = 0x40047477; -pub const TIOCSPGRP: Ioctl = 0x80047476; -pub const TIOCOUTQ: Ioctl = 0x7472; -pub const TIOCSTI: Ioctl = 0x5472; -pub const TIOCGWINSZ: Ioctl = 0x40087468; -pub const TIOCSWINSZ: Ioctl = 0x80087467; -pub const TIOCMGET: Ioctl = 0x741d; -pub const TIOCMBIS: Ioctl = 0x741b; -pub const TIOCMBIC: Ioctl = 0x741c; -pub const TIOCMSET: Ioctl = 0x741a; -pub const TIOCGSOFTCAR: Ioctl = 0x5481; -pub const TIOCSSOFTCAR: Ioctl = 0x5482; -pub const FIONREAD: Ioctl = 0x467f; -pub const TIOCINQ: Ioctl = FIONREAD; -pub const TIOCLINUX: Ioctl = 0x5483; -pub const TIOCCONS: Ioctl = 0x80047478; -pub const TIOCGSERIAL: Ioctl = 0x5484; -pub const TIOCSSERIAL: Ioctl = 0x5485; -pub const TIOCPKT: Ioctl = 0x5470; -pub const FIONBIO: Ioctl = 0x667e; -pub const TIOCNOTTY: Ioctl = 0x5471; -pub const TIOCSETD: Ioctl = 0x7401; -pub const TIOCGETD: Ioctl = 0x7400; -pub const TCSBRKP: Ioctl = 0x5486; -pub const TIOCSBRK: Ioctl = 0x5427; -pub const TIOCCBRK: Ioctl = 0x5428; -pub const TIOCGSID: Ioctl = 0x7416; -pub const TCGETS2: Ioctl = 0x4030542a; -pub const TCSETS2: Ioctl = 0x8030542b; -pub const TCSETSW2: Ioctl = 0x8030542c; -pub const TCSETSF2: Ioctl = 0x8030542d; -pub const TIOCGPTN: Ioctl = 0x40045430; -pub const TIOCSPTLCK: Ioctl = 0x80045431; -pub const TIOCGDEV: Ioctl = 0x40045432; -pub const TIOCSIG: Ioctl = 0x80045436; -pub const TIOCVHANGUP: Ioctl = 0x5437; -pub const TIOCGPKT: Ioctl = 0x40045438; -pub const TIOCGPTLCK: Ioctl = 0x40045439; -pub const TIOCGEXCL: Ioctl = 0x40045440; -pub const TIOCGPTPEER: Ioctl = 0x20005441; -//pub const TIOCGISO7816: Ioctl = 0x40285442; -//pub const TIOCSISO7816: Ioctl = 0xc0285443; -pub const FIONCLEX: Ioctl = 0x6602; -pub const FIOCLEX: Ioctl = 0x6601; -pub const FIOASYNC: Ioctl = 0x667d; -pub const TIOCSERCONFIG: Ioctl = 0x5488; -pub const TIOCSERGWILD: Ioctl = 0x5489; -pub const TIOCSERSWILD: Ioctl = 0x548a; -pub const TIOCGLCKTRMIOS: Ioctl = 0x548b; -pub const TIOCSLCKTRMIOS: Ioctl = 0x548c; -pub const TIOCSERGSTRUCT: Ioctl = 0x548d; -pub const TIOCSERGETLSR: Ioctl = 0x548e; -pub const TIOCSERGETMULTI: Ioctl = 0x548f; -pub const TIOCSERSETMULTI: Ioctl = 0x5490; -pub const TIOCMIWAIT: Ioctl = 0x5491; -pub const TIOCGICOUNT: Ioctl = 0x5492; -pub const FIOQSIZE: Ioctl = 0x667f; -pub const TIOCSLTC: Ioctl = 0x7475; -pub const TIOCGETP: Ioctl = 0x7408; -pub const TIOCSETP: Ioctl = 0x7409; -pub const TIOCSETN: Ioctl = 0x740a; -pub const BLKIOMIN: Ioctl = 0x20001278; -pub const BLKIOOPT: Ioctl = 0x20001279; -pub const BLKSSZGET: Ioctl = 0x20001268; -pub const BLKPBSZGET: Ioctl = 0x2000127B; - -cfg_if! { - if #[cfg(target_env = "musl")] { - pub const TIOCGRS485: Ioctl = 0x4020542e; - pub const TIOCSRS485: Ioctl = 0xc020542f; - } -} - -pub const TIOCM_LE: c_int = 0x001; -pub const TIOCM_DTR: c_int = 0x002; -pub const TIOCM_RTS: c_int = 0x004; -pub const TIOCM_ST: c_int = 0x010; -pub const TIOCM_SR: c_int = 0x020; -pub const TIOCM_CTS: c_int = 0x040; -pub const TIOCM_CAR: c_int = 0x100; -pub const TIOCM_CD: c_int = TIOCM_CAR; -pub const TIOCM_RNG: c_int = 0x200; -pub const TIOCM_RI: c_int = TIOCM_RNG; -pub const TIOCM_DSR: c_int = 0x400; - -pub const BOTHER: crate::speed_t = 0o010000; -pub const IBSHIFT: crate::tcflag_t = 16; - -// RLIMIT Constants - -cfg_if! { - if #[cfg(any(target_env = "gnu", target_env = "uclibc"))] { - pub const RLIMIT_CPU: crate::__rlimit_resource_t = 0; - pub const RLIMIT_FSIZE: crate::__rlimit_resource_t = 1; - pub const RLIMIT_DATA: crate::__rlimit_resource_t = 2; - pub const RLIMIT_STACK: crate::__rlimit_resource_t = 3; - pub const RLIMIT_CORE: crate::__rlimit_resource_t = 4; - pub const RLIMIT_NOFILE: crate::__rlimit_resource_t = 5; - pub const RLIMIT_AS: crate::__rlimit_resource_t = 6; - pub const RLIMIT_RSS: crate::__rlimit_resource_t = 7; - pub const RLIMIT_NPROC: crate::__rlimit_resource_t = 8; - pub const RLIMIT_MEMLOCK: crate::__rlimit_resource_t = 9; - pub const RLIMIT_LOCKS: crate::__rlimit_resource_t = 10; - pub const RLIMIT_SIGPENDING: crate::__rlimit_resource_t = 11; - pub const RLIMIT_MSGQUEUE: crate::__rlimit_resource_t = 12; - pub const RLIMIT_NICE: crate::__rlimit_resource_t = 13; - pub const RLIMIT_RTPRIO: crate::__rlimit_resource_t = 14; - pub const RLIMIT_RTTIME: crate::__rlimit_resource_t = 15; - #[allow(deprecated)] - #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] - pub const RLIMIT_NLIMITS: crate::__rlimit_resource_t = RLIM_NLIMITS; - } else if #[cfg(target_env = "musl")] { - pub const RLIMIT_CPU: c_int = 0; - pub const RLIMIT_FSIZE: c_int = 1; - pub const RLIMIT_DATA: c_int = 2; - pub const RLIMIT_STACK: c_int = 3; - pub const RLIMIT_CORE: c_int = 4; - pub const RLIMIT_NOFILE: c_int = 5; - pub const RLIMIT_AS: c_int = 6; - pub const RLIMIT_RSS: c_int = 7; - pub const RLIMIT_NPROC: c_int = 8; - pub const RLIMIT_MEMLOCK: c_int = 9; - pub const RLIMIT_LOCKS: c_int = 10; - pub const RLIMIT_SIGPENDING: c_int = 11; - pub const RLIMIT_MSGQUEUE: c_int = 12; - pub const RLIMIT_NICE: c_int = 13; - pub const RLIMIT_RTPRIO: c_int = 14; - pub const RLIMIT_RTTIME: c_int = 15; - #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] - pub const RLIM_NLIMITS: c_int = 16; - #[allow(deprecated)] - #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] - pub const RLIMIT_NLIMITS: c_int = RLIM_NLIMITS; - pub const RLIM_INFINITY: crate::rlim_t = !0; - } -} - -cfg_if! { - if #[cfg(target_env = "gnu")] { - #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] - pub const RLIM_NLIMITS: crate::__rlimit_resource_t = 16; - } else if #[cfg(target_env = "uclibc")] { - #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] - pub const RLIM_NLIMITS: crate::__rlimit_resource_t = 15; - } -} - -cfg_if! { - if #[cfg( - any(target_arch = "mips64", target_arch = "mips64r6"), - any(target_env = "gnu", target_env = "uclibc") - )] { - pub const RLIM_INFINITY: crate::rlim_t = !0; - } -} - -cfg_if! { - if #[cfg(all( - any(target_arch = "mips", target_arch = "mips32r6"), - any( - all(target_env = "uclibc", linux_time_bits64), - all( - target_env = "gnu", - any(linux_time_bits64, gnu_file_offset_bits64) - ) - ) - ))] { - pub const RLIM_INFINITY: crate::rlim_t = !0; - } else if #[cfg(all( - any(target_arch = "mips", target_arch = "mips32r6"), - any(target_env = "uclibc", target_env = "gnu"), - not(linux_time_bits64) - ))] { - pub const RLIM_INFINITY: crate::rlim_t = 0x7fffffff; - } -} diff --git a/vendor/libc/src/unix/linux_like/linux/arch/mod.rs b/vendor/libc/src/unix/linux_like/linux/arch/mod.rs deleted file mode 100644 index 00914a43ac1646..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/arch/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -cfg_if! { - if #[cfg(any( - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "mips64", - target_arch = "mips64r6" - ))] { - mod mips; - pub use self::mips::*; - } else if #[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))] { - mod powerpc; - pub use self::powerpc::*; - } else if #[cfg(any(target_arch = "sparc", target_arch = "sparc64"))] { - mod sparc; - pub use self::sparc::*; - } else { - mod generic; - pub use self::generic::*; - } -} diff --git a/vendor/libc/src/unix/linux_like/linux/arch/powerpc/mod.rs b/vendor/libc/src/unix/linux_like/linux/arch/powerpc/mod.rs deleted file mode 100644 index 3249a9f1b6a46d..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/arch/powerpc/mod.rs +++ /dev/null @@ -1,280 +0,0 @@ -use crate::prelude::*; -use crate::Ioctl; - -// arch/powerpc/include/uapi/asm/socket.h - -pub const SOL_SOCKET: c_int = 1; - -// Defined in unix/linux_like/mod.rs -// pub const SO_DEBUG: c_int = 1; -pub const SO_REUSEADDR: c_int = 2; -pub const SO_TYPE: c_int = 3; -pub const SO_ERROR: c_int = 4; -pub const SO_DONTROUTE: c_int = 5; -pub const SO_BROADCAST: c_int = 6; -pub const SO_SNDBUF: c_int = 7; -pub const SO_RCVBUF: c_int = 8; -pub const SO_KEEPALIVE: c_int = 9; -pub const SO_OOBINLINE: c_int = 10; -pub const SO_NO_CHECK: c_int = 11; -pub const SO_PRIORITY: c_int = 12; -pub const SO_LINGER: c_int = 13; -pub const SO_BSDCOMPAT: c_int = 14; -pub const SO_REUSEPORT: c_int = 15; -// powerpc only differs in these -pub const SO_RCVLOWAT: c_int = 16; -pub const SO_SNDLOWAT: c_int = 17; - -cfg_if! { - if #[cfg(linux_time_bits64)] { - const SO_RCVTIMEO_NEW: c_int = 66; - const SO_SNDTIMEO_NEW: c_int = 67; - - pub const SO_RCVTIMEO: c_int = SO_RCVTIMEO_NEW; - pub const SO_SNDTIMEO: c_int = SO_SNDTIMEO_NEW; - } else { - const SO_RCVTIMEO_OLD: c_int = 18; - const SO_SNDTIMEO_OLD: c_int = 19; - - pub const SO_RCVTIMEO: c_int = SO_RCVTIMEO_OLD; - pub const SO_SNDTIMEO: c_int = SO_SNDTIMEO_OLD; - } -} - -pub const SO_PASSCRED: c_int = 20; -pub const SO_PEERCRED: c_int = 21; -// end -pub const SO_SECURITY_AUTHENTICATION: c_int = 22; -pub const SO_SECURITY_ENCRYPTION_TRANSPORT: c_int = 23; -pub const SO_SECURITY_ENCRYPTION_NETWORK: c_int = 24; -pub const SO_BINDTODEVICE: c_int = 25; -pub const SO_ATTACH_FILTER: c_int = 26; -pub const SO_DETACH_FILTER: c_int = 27; -pub const SO_GET_FILTER: c_int = SO_ATTACH_FILTER; -pub const SO_PEERNAME: c_int = 28; -cfg_if! { - if #[cfg(linux_time_bits64)] { - const SO_TIMESTAMP_NEW: c_int = 63; - const SO_TIMESTAMPNS_NEW: c_int = 64; - const SO_TIMESTAMPING_NEW: c_int = 65; - - pub const SO_TIMESTAMP: c_int = SO_TIMESTAMP_NEW; - pub const SO_TIMESTAMPNS: c_int = SO_TIMESTAMPNS_NEW; - pub const SO_TIMESTAMPING: c_int = SO_TIMESTAMPING_NEW; - } else { - const SO_TIMESTAMP_OLD: c_int = 29; - const SO_TIMESTAMPNS_OLD: c_int = 35; - const SO_TIMESTAMPING_OLD: c_int = 37; - - pub const SO_TIMESTAMP: c_int = SO_TIMESTAMP_OLD; - pub const SO_TIMESTAMPNS: c_int = SO_TIMESTAMPNS_OLD; - pub const SO_TIMESTAMPING: c_int = SO_TIMESTAMPING_OLD; - } -} -pub const SO_ACCEPTCONN: c_int = 30; -pub const SO_PEERSEC: c_int = 31; -pub const SO_SNDBUFFORCE: c_int = 32; -pub const SO_RCVBUFFORCE: c_int = 33; -pub const SO_PASSSEC: c_int = 34; -pub const SO_MARK: c_int = 36; -pub const SO_PROTOCOL: c_int = 38; -pub const SO_DOMAIN: c_int = 39; -pub const SO_RXQ_OVFL: c_int = 40; -pub const SO_WIFI_STATUS: c_int = 41; -pub const SCM_WIFI_STATUS: c_int = SO_WIFI_STATUS; -pub const SO_PEEK_OFF: c_int = 42; -pub const SO_NOFCS: c_int = 43; -pub const SO_LOCK_FILTER: c_int = 44; -pub const SO_SELECT_ERR_QUEUE: c_int = 45; -pub const SO_BUSY_POLL: c_int = 46; -pub const SO_MAX_PACING_RATE: c_int = 47; -pub const SO_BPF_EXTENSIONS: c_int = 48; -pub const SO_INCOMING_CPU: c_int = 49; -pub const SO_ATTACH_BPF: c_int = 50; -pub const SO_DETACH_BPF: c_int = SO_DETACH_FILTER; -pub const SO_ATTACH_REUSEPORT_CBPF: c_int = 51; -pub const SO_ATTACH_REUSEPORT_EBPF: c_int = 52; -pub const SO_CNX_ADVICE: c_int = 53; -pub const SCM_TIMESTAMPING_OPT_STATS: c_int = 54; -pub const SO_MEMINFO: c_int = 55; -pub const SO_INCOMING_NAPI_ID: c_int = 56; -pub const SO_COOKIE: c_int = 57; -pub const SCM_TIMESTAMPING_PKTINFO: c_int = 58; -pub const SO_PEERGROUPS: c_int = 59; -pub const SO_ZEROCOPY: c_int = 60; -pub const SO_TXTIME: c_int = 61; -pub const SCM_TXTIME: c_int = SO_TXTIME; -pub const SO_BINDTOIFINDEX: c_int = 62; -// pub const SO_DETACH_REUSEPORT_BPF: c_int = 68; -pub const SO_PREFER_BUSY_POLL: c_int = 69; -pub const SO_BUSY_POLL_BUDGET: c_int = 70; -pub const SO_NETNS_COOKIE: c_int = 71; -pub const SO_BUF_LOCK: c_int = 72; -pub const SO_RESERVE_MEM: c_int = 73; -pub const SO_TXREHASH: c_int = 74; -pub const SO_RCVMARK: c_int = 75; -pub const SO_PASSPIDFD: c_int = 76; -pub const SO_PEERPIDFD: c_int = 77; -pub const SO_DEVMEM_LINEAR: c_int = 78; -pub const SO_DEVMEM_DMABUF: c_int = 79; -pub const SO_DEVMEM_DONTNEED: c_int = 80; - -// Defined in unix/linux_like/mod.rs -// pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; -pub const SCM_TIMESTAMPNS: c_int = SO_TIMESTAMPNS; -pub const SCM_TIMESTAMPING: c_int = SO_TIMESTAMPING; - -pub const SCM_DEVMEM_LINEAR: c_int = SO_DEVMEM_LINEAR; -pub const SCM_DEVMEM_DMABUF: c_int = SO_DEVMEM_DMABUF; - -// Ioctl Constants - -cfg_if! { - if #[cfg(target_env = "gnu")] { - pub const TCGETS: Ioctl = 0x403c7413; - pub const TCSETS: Ioctl = 0x803c7414; - pub const TCSETSW: Ioctl = 0x803c7415; - pub const TCSETSF: Ioctl = 0x803c7416; - } else if #[cfg(target_env = "musl")] { - pub const TCGETS: Ioctl = 0x402c7413; - pub const TCSETS: Ioctl = 0x802c7414; - pub const TCSETSW: Ioctl = 0x802c7415; - pub const TCSETSF: Ioctl = 0x802c7416; - } -} - -pub const TCGETA: Ioctl = 0x40147417; -pub const TCSETA: Ioctl = 0x80147418; -pub const TCSETAW: Ioctl = 0x80147419; -pub const TCSETAF: Ioctl = 0x8014741C; -pub const TCSBRK: Ioctl = 0x2000741D; -pub const TCXONC: Ioctl = 0x2000741E; -pub const TCFLSH: Ioctl = 0x2000741F; -pub const TIOCEXCL: Ioctl = 0x540C; -pub const TIOCNXCL: Ioctl = 0x540D; -pub const TIOCSCTTY: Ioctl = 0x540E; -pub const TIOCGPGRP: Ioctl = 0x40047477; -pub const TIOCSPGRP: Ioctl = 0x80047476; -pub const TIOCOUTQ: Ioctl = 0x40047473; -pub const TIOCSTI: Ioctl = 0x5412; -pub const TIOCGWINSZ: Ioctl = 0x40087468; -pub const TIOCSWINSZ: Ioctl = 0x80087467; -pub const TIOCMGET: Ioctl = 0x5415; -pub const TIOCMBIS: Ioctl = 0x5416; -pub const TIOCMBIC: Ioctl = 0x5417; -pub const TIOCMSET: Ioctl = 0x5418; -pub const TIOCGSOFTCAR: Ioctl = 0x5419; -pub const TIOCSSOFTCAR: Ioctl = 0x541A; -pub const FIONREAD: Ioctl = 0x4004667F; -pub const TIOCINQ: Ioctl = FIONREAD; -pub const TIOCLINUX: Ioctl = 0x541C; -pub const TIOCCONS: Ioctl = 0x541D; -pub const TIOCGSERIAL: Ioctl = 0x541E; -pub const TIOCSSERIAL: Ioctl = 0x541F; -pub const TIOCPKT: Ioctl = 0x5420; -pub const FIONBIO: Ioctl = 0x8004667e; -pub const TIOCNOTTY: Ioctl = 0x5422; -pub const TIOCSETD: Ioctl = 0x5423; -pub const TIOCGETD: Ioctl = 0x5424; -pub const TCSBRKP: Ioctl = 0x5425; -pub const TIOCSBRK: Ioctl = 0x5427; -pub const TIOCCBRK: Ioctl = 0x5428; -pub const TIOCGSID: Ioctl = 0x5429; -pub const TIOCGRS485: Ioctl = 0x542e; -pub const TIOCSRS485: Ioctl = 0x542f; -pub const TIOCGPTN: Ioctl = 0x40045430; -pub const TIOCSPTLCK: Ioctl = 0x80045431; -pub const TIOCGDEV: Ioctl = 0x40045432; -pub const TIOCSIG: Ioctl = 0x80045436; -pub const TIOCVHANGUP: Ioctl = 0x5437; -pub const TIOCGPKT: Ioctl = 0x40045438; -pub const TIOCGPTLCK: Ioctl = 0x40045439; -pub const TIOCGEXCL: Ioctl = 0x40045440; -pub const TIOCGPTPEER: Ioctl = 0x20005441; -//pub const TIOCGISO7816: Ioctl = 0x40285442; -//pub const TIOCSISO7816: Ioctl = 0xc0285443; -pub const FIONCLEX: Ioctl = 0x20006602; -pub const FIOCLEX: Ioctl = 0x20006601; -pub const FIOASYNC: Ioctl = 0x8004667d; -pub const TIOCSERCONFIG: Ioctl = 0x5453; -pub const TIOCSERGWILD: Ioctl = 0x5454; -pub const TIOCSERSWILD: Ioctl = 0x5455; -pub const TIOCGLCKTRMIOS: Ioctl = 0x5456; -pub const TIOCSLCKTRMIOS: Ioctl = 0x5457; -pub const TIOCSERGSTRUCT: Ioctl = 0x5458; -pub const TIOCSERGETLSR: Ioctl = 0x5459; -pub const TIOCSERGETMULTI: Ioctl = 0x545A; -pub const TIOCSERSETMULTI: Ioctl = 0x545B; -pub const TIOCMIWAIT: Ioctl = 0x545C; -pub const TIOCGICOUNT: Ioctl = 0x545D; -pub const BLKIOMIN: Ioctl = 0x20001278; -pub const BLKIOOPT: Ioctl = 0x20001279; -pub const BLKSSZGET: Ioctl = 0x20001268; -pub const BLKPBSZGET: Ioctl = 0x2000127B; -//pub const FIOQSIZE: Ioctl = 0x40086680; - -pub const TIOCM_LE: c_int = 0x001; -pub const TIOCM_DTR: c_int = 0x002; -pub const TIOCM_RTS: c_int = 0x004; -pub const TIOCM_ST: c_int = 0x008; -pub const TIOCM_SR: c_int = 0x010; -pub const TIOCM_CTS: c_int = 0x020; -pub const TIOCM_CAR: c_int = 0x040; -pub const TIOCM_CD: c_int = TIOCM_CAR; -pub const TIOCM_RNG: c_int = 0x080; -pub const TIOCM_RI: c_int = TIOCM_RNG; -pub const TIOCM_DSR: c_int = 0x100; - -pub const BOTHER: crate::speed_t = 0o0037; -pub const IBSHIFT: crate::tcflag_t = 16; - -// RLIMIT Constants - -cfg_if! { - if #[cfg(target_env = "gnu")] { - pub const RLIMIT_CPU: crate::__rlimit_resource_t = 0; - pub const RLIMIT_FSIZE: crate::__rlimit_resource_t = 1; - pub const RLIMIT_DATA: crate::__rlimit_resource_t = 2; - pub const RLIMIT_STACK: crate::__rlimit_resource_t = 3; - pub const RLIMIT_CORE: crate::__rlimit_resource_t = 4; - pub const RLIMIT_RSS: crate::__rlimit_resource_t = 5; - pub const RLIMIT_NPROC: crate::__rlimit_resource_t = 6; - pub const RLIMIT_NOFILE: crate::__rlimit_resource_t = 7; - pub const RLIMIT_MEMLOCK: crate::__rlimit_resource_t = 8; - pub const RLIMIT_AS: crate::__rlimit_resource_t = 9; - pub const RLIMIT_LOCKS: crate::__rlimit_resource_t = 10; - pub const RLIMIT_SIGPENDING: crate::__rlimit_resource_t = 11; - pub const RLIMIT_MSGQUEUE: crate::__rlimit_resource_t = 12; - pub const RLIMIT_NICE: crate::__rlimit_resource_t = 13; - pub const RLIMIT_RTPRIO: crate::__rlimit_resource_t = 14; - pub const RLIMIT_RTTIME: crate::__rlimit_resource_t = 15; - #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] - pub const RLIM_NLIMITS: crate::__rlimit_resource_t = 16; - #[allow(deprecated)] - #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] - pub const RLIMIT_NLIMITS: crate::__rlimit_resource_t = RLIM_NLIMITS; - } else if #[cfg(target_env = "musl")] { - pub const RLIMIT_CPU: c_int = 0; - pub const RLIMIT_FSIZE: c_int = 1; - pub const RLIMIT_DATA: c_int = 2; - pub const RLIMIT_STACK: c_int = 3; - pub const RLIMIT_CORE: c_int = 4; - pub const RLIMIT_RSS: c_int = 5; - pub const RLIMIT_NPROC: c_int = 6; - pub const RLIMIT_NOFILE: c_int = 7; - pub const RLIMIT_MEMLOCK: c_int = 8; - pub const RLIMIT_AS: c_int = 9; - pub const RLIMIT_LOCKS: c_int = 10; - pub const RLIMIT_SIGPENDING: c_int = 11; - pub const RLIMIT_MSGQUEUE: c_int = 12; - pub const RLIMIT_NICE: c_int = 13; - pub const RLIMIT_RTPRIO: c_int = 14; - pub const RLIMIT_RTTIME: c_int = 15; - #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] - pub const RLIM_NLIMITS: c_int = 16; - #[allow(deprecated)] - #[deprecated(since = "0.2.64", note = "Not stable across OS versions")] - pub const RLIMIT_NLIMITS: c_int = RLIM_NLIMITS; - } -} -pub const RLIM_INFINITY: crate::rlim_t = !0; diff --git a/vendor/libc/src/unix/linux_like/linux/arch/sparc/mod.rs b/vendor/libc/src/unix/linux_like/linux/arch/sparc/mod.rs deleted file mode 100644 index 4c108ba7b71c1a..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/arch/sparc/mod.rs +++ /dev/null @@ -1,247 +0,0 @@ -use crate::prelude::*; -use crate::Ioctl; - -s! { - pub struct termios2 { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; 19], - pub c_ispeed: crate::speed_t, - pub c_ospeed: crate::speed_t, - } -} - -// arch/sparc/include/uapi/asm/socket.h -pub const SOL_SOCKET: c_int = 0xffff; - -// Defined in unix/linux_like/mod.rs -// pub const SO_DEBUG: c_int = 0x0001; -pub const SO_PASSCRED: c_int = 0x0002; -pub const SO_REUSEADDR: c_int = 0x0004; -pub const SO_KEEPALIVE: c_int = 0x0008; -pub const SO_DONTROUTE: c_int = 0x0010; -pub const SO_BROADCAST: c_int = 0x0020; -pub const SO_PEERCRED: c_int = 0x0040; -pub const SO_LINGER: c_int = 0x0080; -pub const SO_OOBINLINE: c_int = 0x0100; -pub const SO_REUSEPORT: c_int = 0x0200; -pub const SO_BSDCOMPAT: c_int = 0x0400; -pub const SO_RCVLOWAT: c_int = 0x0800; -pub const SO_SNDLOWAT: c_int = 0x1000; -pub const SO_RCVTIMEO: c_int = 0x2000; -pub const SO_SNDTIMEO: c_int = 0x4000; -// pub const SO_RCVTIMEO_OLD: c_int = 0x2000; -// pub const SO_SNDTIMEO_OLD: c_int = 0x4000; -pub const SO_ACCEPTCONN: c_int = 0x8000; -pub const SO_SNDBUF: c_int = 0x1001; -pub const SO_RCVBUF: c_int = 0x1002; -pub const SO_SNDBUFFORCE: c_int = 0x100a; -pub const SO_RCVBUFFORCE: c_int = 0x100b; -pub const SO_ERROR: c_int = 0x1007; -pub const SO_TYPE: c_int = 0x1008; -pub const SO_PROTOCOL: c_int = 0x1028; -pub const SO_DOMAIN: c_int = 0x1029; -pub const SO_NO_CHECK: c_int = 0x000b; -pub const SO_PRIORITY: c_int = 0x000c; -pub const SO_BINDTODEVICE: c_int = 0x000d; -pub const SO_ATTACH_FILTER: c_int = 0x001a; -pub const SO_DETACH_FILTER: c_int = 0x001b; -pub const SO_GET_FILTER: c_int = SO_ATTACH_FILTER; -pub const SO_PEERNAME: c_int = 0x001c; -pub const SO_PEERSEC: c_int = 0x001e; -pub const SO_PASSSEC: c_int = 0x001f; -pub const SO_MARK: c_int = 0x0022; -pub const SO_RXQ_OVFL: c_int = 0x0024; -pub const SO_WIFI_STATUS: c_int = 0x0025; -pub const SCM_WIFI_STATUS: c_int = SO_WIFI_STATUS; -pub const SO_PEEK_OFF: c_int = 0x0026; -pub const SO_NOFCS: c_int = 0x0027; -pub const SO_LOCK_FILTER: c_int = 0x0028; -pub const SO_SELECT_ERR_QUEUE: c_int = 0x0029; -pub const SO_BUSY_POLL: c_int = 0x0030; -pub const SO_MAX_PACING_RATE: c_int = 0x0031; -pub const SO_BPF_EXTENSIONS: c_int = 0x0032; -pub const SO_INCOMING_CPU: c_int = 0x0033; -pub const SO_ATTACH_BPF: c_int = 0x0034; -pub const SO_DETACH_BPF: c_int = SO_DETACH_FILTER; -pub const SO_ATTACH_REUSEPORT_CBPF: c_int = 0x0035; -pub const SO_ATTACH_REUSEPORT_EBPF: c_int = 0x0036; -pub const SO_CNX_ADVICE: c_int = 0x0037; -pub const SCM_TIMESTAMPING_OPT_STATS: c_int = 0x0038; -pub const SO_MEMINFO: c_int = 0x0039; -pub const SO_INCOMING_NAPI_ID: c_int = 0x003a; -pub const SO_COOKIE: c_int = 0x003b; -pub const SCM_TIMESTAMPING_PKTINFO: c_int = 0x003c; -pub const SO_PEERGROUPS: c_int = 0x003d; -pub const SO_ZEROCOPY: c_int = 0x003e; -pub const SO_TXTIME: c_int = 0x003f; -pub const SCM_TXTIME: c_int = SO_TXTIME; -pub const SO_BINDTOIFINDEX: c_int = 0x0041; -pub const SO_SECURITY_AUTHENTICATION: c_int = 0x5001; -pub const SO_SECURITY_ENCRYPTION_TRANSPORT: c_int = 0x5002; -pub const SO_SECURITY_ENCRYPTION_NETWORK: c_int = 0x5004; -pub const SO_TIMESTAMP: c_int = 0x001d; -pub const SO_TIMESTAMPNS: c_int = 0x0021; -pub const SO_TIMESTAMPING: c_int = 0x0023; -// pub const SO_TIMESTAMP_OLD: c_int = 0x001d; -// pub const SO_TIMESTAMPNS_OLD: c_int = 0x0021; -// pub const SO_TIMESTAMPING_OLD: c_int = 0x0023; -// pub const SO_TIMESTAMP_NEW: c_int = 0x0046; -// pub const SO_TIMESTAMPNS_NEW: c_int = 0x0042; -// pub const SO_TIMESTAMPING_NEW: c_int = 0x0043; -// pub const SO_RCVTIMEO_NEW: c_int = 0x0044; -// pub const SO_SNDTIMEO_NEW: c_int = 0x0045; -// pub const SO_DETACH_REUSEPORT_BPF: c_int = 0x0047; -pub const SO_PREFER_BUSY_POLL: c_int = 0x0048; -pub const SO_BUSY_POLL_BUDGET: c_int = 0x0049; -pub const SO_NETNS_COOKIE: c_int = 0x0050; -pub const SO_BUF_LOCK: c_int = 0x0051; -pub const SO_RESERVE_MEM: c_int = 0x0052; -pub const SO_TXREHASH: c_int = 0x0053; -pub const SO_RCVMARK: c_int = 0x0054; -pub const SO_PASSPIDFD: c_int = 0x0055; -pub const SO_PEERPIDFD: c_int = 0x0056; -pub const SO_DEVMEM_LINEAR: c_int = 0x0057; -pub const SO_DEVMEM_DMABUF: c_int = 0x0058; -pub const SO_DEVMEM_DONTNEED: c_int = 0x0059; - -// Defined in unix/linux_like/mod.rs -// pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; -pub const SCM_TIMESTAMPNS: c_int = SO_TIMESTAMPNS; -pub const SCM_TIMESTAMPING: c_int = SO_TIMESTAMPING; - -pub const SCM_DEVMEM_LINEAR: c_int = SO_DEVMEM_LINEAR; -pub const SCM_DEVMEM_DMABUF: c_int = SO_DEVMEM_DMABUF; - -// Ioctl Constants - -pub const TCGETS: Ioctl = 0x40245408; -pub const TCSETS: Ioctl = 0x80245409; -pub const TCSETSW: Ioctl = 0x8024540a; -pub const TCSETSF: Ioctl = 0x8024540b; -pub const TCGETA: Ioctl = 0x40125401; -pub const TCSETA: Ioctl = 0x80125402; -pub const TCSETAW: Ioctl = 0x80125403; -pub const TCSETAF: Ioctl = 0x80125404; -pub const TCSBRK: Ioctl = 0x20005405; -pub const TCXONC: Ioctl = 0x20005406; -pub const TCFLSH: Ioctl = 0x20005407; -pub const TIOCEXCL: Ioctl = 0x2000740d; -pub const TIOCNXCL: Ioctl = 0x2000740e; -pub const TIOCSCTTY: Ioctl = 0x20007484; -pub const TIOCGPGRP: Ioctl = 0x40047483; -pub const TIOCSPGRP: Ioctl = 0x80047482; -pub const TIOCOUTQ: Ioctl = 0x40047473; -pub const TIOCSTI: Ioctl = 0x80017472; -pub const TIOCGWINSZ: Ioctl = 0x40087468; -pub const TIOCSWINSZ: Ioctl = 0x80087467; -pub const TIOCMGET: Ioctl = 0x4004746a; -pub const TIOCMBIS: Ioctl = 0x8004746c; -pub const TIOCMBIC: Ioctl = 0x8004746b; -pub const TIOCMSET: Ioctl = 0x8004746d; -pub const TIOCGSOFTCAR: Ioctl = 0x40047464; -pub const TIOCSSOFTCAR: Ioctl = 0x80047465; -pub const FIONREAD: Ioctl = 0x4004667f; -pub const TIOCINQ: Ioctl = FIONREAD; -pub const TIOCLINUX: Ioctl = 0x541C; -pub const TIOCCONS: Ioctl = 0x20007424; -pub const TIOCGSERIAL: Ioctl = 0x541E; -pub const TIOCSSERIAL: Ioctl = 0x541F; -pub const TIOCPKT: Ioctl = 0x80047470; -pub const FIONBIO: Ioctl = 0x8004667e; -pub const TIOCNOTTY: Ioctl = 0x20007471; -pub const TIOCSETD: Ioctl = 0x80047401; -pub const TIOCGETD: Ioctl = 0x40047400; -pub const TCSBRKP: Ioctl = 0x5425; -pub const TIOCSBRK: Ioctl = 0x2000747b; -pub const TIOCCBRK: Ioctl = 0x2000747a; -pub const TIOCGSID: Ioctl = 0x40047485; -pub const TCGETS2: Ioctl = 0x402c540c; -pub const TCSETS2: Ioctl = 0x802c540d; -pub const TCSETSW2: Ioctl = 0x802c540e; -pub const TCSETSF2: Ioctl = 0x802c540f; -pub const TIOCGPTN: Ioctl = 0x40047486; -pub const TIOCSPTLCK: Ioctl = 0x80047487; -pub const TIOCGDEV: Ioctl = 0x40045432; -pub const TIOCSIG: Ioctl = 0x80047488; -pub const TIOCVHANGUP: Ioctl = 0x20005437; -pub const TIOCGPKT: Ioctl = 0x40045438; -pub const TIOCGPTLCK: Ioctl = 0x40045439; -pub const TIOCGEXCL: Ioctl = 0x40045440; -pub const TIOCGPTPEER: Ioctl = 0x20007489; -pub const FIONCLEX: Ioctl = 0x20006602; -pub const FIOCLEX: Ioctl = 0x20006601; -pub const TIOCSERCONFIG: Ioctl = 0x5453; -pub const TIOCSERGWILD: Ioctl = 0x5454; -pub const TIOCSERSWILD: Ioctl = 0x5455; -pub const TIOCGLCKTRMIOS: Ioctl = 0x5456; -pub const TIOCSLCKTRMIOS: Ioctl = 0x5457; -pub const TIOCSERGSTRUCT: Ioctl = 0x5458; -pub const TIOCSERGETLSR: Ioctl = 0x5459; -pub const TIOCSERGETMULTI: Ioctl = 0x545A; -pub const TIOCSERSETMULTI: Ioctl = 0x545B; -pub const TIOCMIWAIT: Ioctl = 0x545C; -pub const TIOCGICOUNT: Ioctl = 0x545D; -pub const TIOCSTART: Ioctl = 0x2000746e; -pub const TIOCSTOP: Ioctl = 0x2000746f; -pub const BLKIOMIN: Ioctl = 0x20001278; -pub const BLKIOOPT: Ioctl = 0x20001279; -pub const BLKSSZGET: Ioctl = 0x20001268; -pub const BLKPBSZGET: Ioctl = 0x2000127B; - -//pub const FIOASYNC: Ioctl = 0x4004667d; -//pub const FIOQSIZE: Ioctl = ; -//pub const TIOCGISO7816: Ioctl = 0x40285443; -//pub const TIOCSISO7816: Ioctl = 0xc0285444; -//pub const TIOCGRS485: Ioctl = 0x40205441; -//pub const TIOCSRS485: Ioctl = 0xc0205442; - -pub const TIOCM_LE: c_int = 0x001; -pub const TIOCM_DTR: c_int = 0x002; -pub const TIOCM_RTS: c_int = 0x004; -pub const TIOCM_ST: c_int = 0x008; -pub const TIOCM_SR: c_int = 0x010; -pub const TIOCM_CTS: c_int = 0x020; -pub const TIOCM_CAR: c_int = 0x040; -pub const TIOCM_CD: c_int = TIOCM_CAR; -pub const TIOCM_RNG: c_int = 0x080; -pub const TIOCM_RI: c_int = TIOCM_RNG; -pub const TIOCM_DSR: c_int = 0x100; - -pub const BOTHER: crate::speed_t = 0x1000; -pub const IBSHIFT: crate::tcflag_t = 16; - -// RLIMIT Constants - -pub const RLIMIT_CPU: crate::__rlimit_resource_t = 0; -pub const RLIMIT_FSIZE: crate::__rlimit_resource_t = 1; -pub const RLIMIT_DATA: crate::__rlimit_resource_t = 2; -pub const RLIMIT_STACK: crate::__rlimit_resource_t = 3; -pub const RLIMIT_CORE: crate::__rlimit_resource_t = 4; -pub const RLIMIT_RSS: crate::__rlimit_resource_t = 5; -pub const RLIMIT_NOFILE: crate::__rlimit_resource_t = 6; -pub const RLIMIT_NPROC: crate::__rlimit_resource_t = 7; -pub const RLIMIT_MEMLOCK: crate::__rlimit_resource_t = 8; -pub const RLIMIT_AS: crate::__rlimit_resource_t = 9; -pub const RLIMIT_LOCKS: crate::__rlimit_resource_t = 10; -pub const RLIMIT_SIGPENDING: crate::__rlimit_resource_t = 11; -pub const RLIMIT_MSGQUEUE: crate::__rlimit_resource_t = 12; -pub const RLIMIT_NICE: crate::__rlimit_resource_t = 13; -pub const RLIMIT_RTPRIO: crate::__rlimit_resource_t = 14; -pub const RLIMIT_RTTIME: crate::__rlimit_resource_t = 15; -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIM_NLIMITS: crate::__rlimit_resource_t = 16; -#[allow(deprecated)] -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIMIT_NLIMITS: crate::__rlimit_resource_t = RLIM_NLIMITS; - -cfg_if! { - if #[cfg(target_arch = "sparc64")] { - pub const RLIM_INFINITY: crate::rlim_t = !0; - } else if #[cfg(target_arch = "sparc")] { - pub const RLIM_INFINITY: crate::rlim_t = 0x7fffffff; - } -} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/arm/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/arm/mod.rs deleted file mode 100644 index 900851ab5f42c5..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b32/arm/mod.rs +++ /dev/null @@ -1,928 +0,0 @@ -use crate::prelude::*; -use crate::{off64_t, off_t}; - -pub type wchar_t = u32; - -s! { - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct statfs { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - f_spare: [crate::__fsword_t; 4], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_ushort, - __pad1: c_ushort, - pub __seq: c_ushort, - __pad2: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - #[cfg(not(gnu_time_bits64))] - __pad1: c_uint, - #[cfg(not(gnu_time_bits64))] - __st_ino: c_ulong, - #[cfg(gnu_time_bits64)] - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - #[cfg(not(gnu_time_bits64))] - __pad2: c_uint, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - #[cfg(gnu_time_bits64)] - _atime_pad: c_int, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - #[cfg(gnu_time_bits64)] - _mtime_pad: c_int, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - #[cfg(gnu_time_bits64)] - _ctime_pad: c_int, - #[cfg(not(gnu_time_bits64))] - pub st_ino: crate::ino64_t, - } - - pub struct statfs64 { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::fsid_t, - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - pub f_spare: [crate::__fsword_t; 4], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - __f_unused: c_int, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __unused1: c_ulong, - pub shm_dtime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __unused2: c_ulong, - pub shm_ctime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __unused3: c_ulong, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused4: c_ulong, - __unused5: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved1: c_ulong, - pub msg_rtime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved2: c_ulong, - pub msg_ctime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved3: c_ulong, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __glibc_reserved4: c_ulong, - __glibc_reserved5: c_ulong, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - #[doc(hidden)] - #[deprecated( - since = "0.2.54", - note = "Please leave a comment on \ - https://github.com/rust-lang/libc/pull/1316 if you're using \ - this field" - )] - pub _pad: [c_int; 29], - _align: [usize; 0], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct mcontext_t { - pub trap_no: c_ulong, - pub error_code: c_ulong, - pub oldmask: c_ulong, - pub arm_r0: c_ulong, - pub arm_r1: c_ulong, - pub arm_r2: c_ulong, - pub arm_r3: c_ulong, - pub arm_r4: c_ulong, - pub arm_r5: c_ulong, - pub arm_r6: c_ulong, - pub arm_r7: c_ulong, - pub arm_r8: c_ulong, - pub arm_r9: c_ulong, - pub arm_r10: c_ulong, - pub arm_fp: c_ulong, - pub arm_ip: c_ulong, - pub arm_sp: c_ulong, - pub arm_lr: c_ulong, - pub arm_pc: c_ulong, - pub arm_cpsr: c_ulong, - pub fault_address: c_ulong, - } - - pub struct user_regs { - pub arm_r0: c_ulong, - pub arm_r1: c_ulong, - pub arm_r2: c_ulong, - pub arm_r3: c_ulong, - pub arm_r4: c_ulong, - pub arm_r5: c_ulong, - pub arm_r6: c_ulong, - pub arm_r7: c_ulong, - pub arm_r8: c_ulong, - pub arm_r9: c_ulong, - pub arm_r10: c_ulong, - pub arm_fp: c_ulong, - pub arm_ip: c_ulong, - pub arm_sp: c_ulong, - pub arm_lr: c_ulong, - pub arm_pc: c_ulong, - pub arm_cpsr: c_ulong, - pub arm_orig_r0: c_ulong, - } -} - -s_no_extra_traits! { - #[repr(align(8))] - pub struct max_align_t { - priv_: [i64; 2], - } - - #[repr(align(8))] - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_mcontext: crate::mcontext_t, - pub uc_sigmask: crate::sigset_t, - pub uc_regspace: [c_ulong; 128], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for ucontext_t { - fn eq(&self, other: &ucontext_t) -> bool { - self.uc_flags == other.uc_flags - && self.uc_link == other.uc_link - && self.uc_stack == other.uc_stack - && self.uc_mcontext == other.uc_mcontext - && self.uc_sigmask == other.uc_sigmask - } - } - impl Eq for ucontext_t {} - impl hash::Hash for ucontext_t { - fn hash(&self, state: &mut H) { - self.uc_flags.hash(state); - self.uc_link.hash(state); - self.uc_stack.hash(state); - self.uc_mcontext.hash(state); - self.uc_sigmask.hash(state); - } - } - } -} - -pub const VEOF: usize = 4; -pub const RTLD_DEEPBIND: c_int = 0x8; -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; -pub const O_DIRECT: c_int = 0x10000; -pub const O_DIRECTORY: c_int = 0x4000; -pub const O_NOFOLLOW: c_int = 0x8000; -pub const O_LARGEFILE: c_int = 0o400000; -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_FSYNC: c_int = 0x101000; -pub const O_ASYNC: c_int = 0x2000; -pub const O_NDELAY: c_int = 0x800; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_ANONYMOUS: c_int = 0x0020; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_SYNC: c_int = 0x080000; - -pub const EDEADLOCK: c_int = 35; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const EHWPOISON: c_int = 133; -pub const ERFKILL: c_int = 132; - -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -cfg_if! { - if #[cfg(gnu_file_offset_bits64)] { - pub const F_GETLK: c_int = 12; - } else { - pub const F_GETLK: c_int = 5; - } -} -pub const F_GETOWN: c_int = 9; -pub const F_SETOWN: c_int = 8; - -pub const EFD_NONBLOCK: c_int = 0x800; -pub const SFD_NONBLOCK: c_int = 0x0800; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] -pub const SIGUNUSED: c_int = 31; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: crate::tcflag_t = 0x00000800; -pub const TAB2: crate::tcflag_t = 0x00001000; -pub const TAB3: crate::tcflag_t = 0x00001800; -pub const CR1: crate::tcflag_t = 0x00000200; -pub const CR2: crate::tcflag_t = 0x00000400; -pub const CR3: crate::tcflag_t = 0x00000600; -pub const FF1: crate::tcflag_t = 0x00008000; -pub const BS1: crate::tcflag_t = 0x00002000; -pub const VT1: crate::tcflag_t = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; -pub const EXTPROC: crate::tcflag_t = 0x00010000; - -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -// Syscall table -pub const SYS_restart_syscall: c_long = 0; -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execve: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_lchown: c_long = 16; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_mount: c_long = 21; -pub const SYS_setuid: c_long = 23; -pub const SYS_getuid: c_long = 24; -pub const SYS_ptrace: c_long = 26; -pub const SYS_pause: c_long = 29; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_rename: c_long = 38; -pub const SYS_mkdir: c_long = 39; -pub const SYS_rmdir: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_brk: c_long = 45; -pub const SYS_setgid: c_long = 46; -pub const SYS_getgid: c_long = 47; -pub const SYS_geteuid: c_long = 49; -pub const SYS_getegid: c_long = 50; -pub const SYS_acct: c_long = 51; -pub const SYS_umount2: c_long = 52; -pub const SYS_ioctl: c_long = 54; -pub const SYS_fcntl: c_long = 55; -pub const SYS_setpgid: c_long = 57; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_ustat: c_long = 62; -pub const SYS_dup2: c_long = 63; -pub const SYS_getppid: c_long = 64; -pub const SYS_getpgrp: c_long = 65; -pub const SYS_setsid: c_long = 66; -pub const SYS_sigaction: c_long = 67; -pub const SYS_setreuid: c_long = 70; -pub const SYS_setregid: c_long = 71; -pub const SYS_sigsuspend: c_long = 72; -pub const SYS_sigpending: c_long = 73; -pub const SYS_sethostname: c_long = 74; -pub const SYS_setrlimit: c_long = 75; -pub const SYS_getrusage: c_long = 77; -pub const SYS_gettimeofday: c_long = 78; -pub const SYS_settimeofday: c_long = 79; -pub const SYS_getgroups: c_long = 80; -pub const SYS_setgroups: c_long = 81; -pub const SYS_symlink: c_long = 83; -pub const SYS_readlink: c_long = 85; -pub const SYS_uselib: c_long = 86; -pub const SYS_swapon: c_long = 87; -pub const SYS_reboot: c_long = 88; -pub const SYS_munmap: c_long = 91; -pub const SYS_truncate: c_long = 92; -pub const SYS_ftruncate: c_long = 93; -pub const SYS_fchmod: c_long = 94; -pub const SYS_fchown: c_long = 95; -pub const SYS_getpriority: c_long = 96; -pub const SYS_setpriority: c_long = 97; -pub const SYS_statfs: c_long = 99; -pub const SYS_fstatfs: c_long = 100; -pub const SYS_syslog: c_long = 103; -pub const SYS_setitimer: c_long = 104; -pub const SYS_getitimer: c_long = 105; -pub const SYS_stat: c_long = 106; -pub const SYS_lstat: c_long = 107; -pub const SYS_fstat: c_long = 108; -pub const SYS_vhangup: c_long = 111; -pub const SYS_wait4: c_long = 114; -pub const SYS_swapoff: c_long = 115; -pub const SYS_sysinfo: c_long = 116; -pub const SYS_fsync: c_long = 118; -pub const SYS_sigreturn: c_long = 119; -pub const SYS_clone: c_long = 120; -pub const SYS_setdomainname: c_long = 121; -pub const SYS_uname: c_long = 122; -pub const SYS_adjtimex: c_long = 124; -pub const SYS_mprotect: c_long = 125; -pub const SYS_sigprocmask: c_long = 126; -pub const SYS_init_module: c_long = 128; -pub const SYS_delete_module: c_long = 129; -pub const SYS_quotactl: c_long = 131; -pub const SYS_getpgid: c_long = 132; -pub const SYS_fchdir: c_long = 133; -pub const SYS_bdflush: c_long = 134; -pub const SYS_sysfs: c_long = 135; -pub const SYS_personality: c_long = 136; -pub const SYS_setfsuid: c_long = 138; -pub const SYS_setfsgid: c_long = 139; -pub const SYS__llseek: c_long = 140; -pub const SYS_getdents: c_long = 141; -pub const SYS__newselect: c_long = 142; -pub const SYS_flock: c_long = 143; -pub const SYS_msync: c_long = 144; -pub const SYS_readv: c_long = 145; -pub const SYS_writev: c_long = 146; -pub const SYS_getsid: c_long = 147; -pub const SYS_fdatasync: c_long = 148; -pub const SYS__sysctl: c_long = 149; -pub const SYS_mlock: c_long = 150; -pub const SYS_munlock: c_long = 151; -pub const SYS_mlockall: c_long = 152; -pub const SYS_munlockall: c_long = 153; -pub const SYS_sched_setparam: c_long = 154; -pub const SYS_sched_getparam: c_long = 155; -pub const SYS_sched_setscheduler: c_long = 156; -pub const SYS_sched_getscheduler: c_long = 157; -pub const SYS_sched_yield: c_long = 158; -pub const SYS_sched_get_priority_max: c_long = 159; -pub const SYS_sched_get_priority_min: c_long = 160; -pub const SYS_sched_rr_get_interval: c_long = 161; -pub const SYS_nanosleep: c_long = 162; -pub const SYS_mremap: c_long = 163; -pub const SYS_setresuid: c_long = 164; -pub const SYS_getresuid: c_long = 165; -pub const SYS_poll: c_long = 168; -pub const SYS_nfsservctl: c_long = 169; -pub const SYS_setresgid: c_long = 170; -pub const SYS_getresgid: c_long = 171; -pub const SYS_prctl: c_long = 172; -pub const SYS_rt_sigreturn: c_long = 173; -pub const SYS_rt_sigaction: c_long = 174; -pub const SYS_rt_sigprocmask: c_long = 175; -pub const SYS_rt_sigpending: c_long = 176; -pub const SYS_rt_sigtimedwait: c_long = 177; -pub const SYS_rt_sigqueueinfo: c_long = 178; -pub const SYS_rt_sigsuspend: c_long = 179; -pub const SYS_pread64: c_long = 180; -pub const SYS_pwrite64: c_long = 181; -pub const SYS_chown: c_long = 182; -pub const SYS_getcwd: c_long = 183; -pub const SYS_capget: c_long = 184; -pub const SYS_capset: c_long = 185; -pub const SYS_sigaltstack: c_long = 186; -pub const SYS_sendfile: c_long = 187; -pub const SYS_vfork: c_long = 190; -pub const SYS_ugetrlimit: c_long = 191; -pub const SYS_mmap2: c_long = 192; -pub const SYS_truncate64: c_long = 193; -pub const SYS_ftruncate64: c_long = 194; -pub const SYS_stat64: c_long = 195; -pub const SYS_lstat64: c_long = 196; -pub const SYS_fstat64: c_long = 197; -pub const SYS_lchown32: c_long = 198; -pub const SYS_getuid32: c_long = 199; -pub const SYS_getgid32: c_long = 200; -pub const SYS_geteuid32: c_long = 201; -pub const SYS_getegid32: c_long = 202; -pub const SYS_setreuid32: c_long = 203; -pub const SYS_setregid32: c_long = 204; -pub const SYS_getgroups32: c_long = 205; -pub const SYS_setgroups32: c_long = 206; -pub const SYS_fchown32: c_long = 207; -pub const SYS_setresuid32: c_long = 208; -pub const SYS_getresuid32: c_long = 209; -pub const SYS_setresgid32: c_long = 210; -pub const SYS_getresgid32: c_long = 211; -pub const SYS_chown32: c_long = 212; -pub const SYS_setuid32: c_long = 213; -pub const SYS_setgid32: c_long = 214; -pub const SYS_setfsuid32: c_long = 215; -pub const SYS_setfsgid32: c_long = 216; -pub const SYS_getdents64: c_long = 217; -pub const SYS_pivot_root: c_long = 218; -pub const SYS_mincore: c_long = 219; -pub const SYS_madvise: c_long = 220; -pub const SYS_fcntl64: c_long = 221; -pub const SYS_gettid: c_long = 224; -pub const SYS_readahead: c_long = 225; -pub const SYS_setxattr: c_long = 226; -pub const SYS_lsetxattr: c_long = 227; -pub const SYS_fsetxattr: c_long = 228; -pub const SYS_getxattr: c_long = 229; -pub const SYS_lgetxattr: c_long = 230; -pub const SYS_fgetxattr: c_long = 231; -pub const SYS_listxattr: c_long = 232; -pub const SYS_llistxattr: c_long = 233; -pub const SYS_flistxattr: c_long = 234; -pub const SYS_removexattr: c_long = 235; -pub const SYS_lremovexattr: c_long = 236; -pub const SYS_fremovexattr: c_long = 237; -pub const SYS_tkill: c_long = 238; -pub const SYS_sendfile64: c_long = 239; -pub const SYS_futex: c_long = 240; -pub const SYS_sched_setaffinity: c_long = 241; -pub const SYS_sched_getaffinity: c_long = 242; -pub const SYS_io_setup: c_long = 243; -pub const SYS_io_destroy: c_long = 244; -pub const SYS_io_getevents: c_long = 245; -pub const SYS_io_submit: c_long = 246; -pub const SYS_io_cancel: c_long = 247; -pub const SYS_exit_group: c_long = 248; -pub const SYS_lookup_dcookie: c_long = 249; -pub const SYS_epoll_create: c_long = 250; -pub const SYS_epoll_ctl: c_long = 251; -pub const SYS_epoll_wait: c_long = 252; -pub const SYS_remap_file_pages: c_long = 253; -pub const SYS_set_tid_address: c_long = 256; -pub const SYS_timer_create: c_long = 257; -pub const SYS_timer_settime: c_long = 258; -pub const SYS_timer_gettime: c_long = 259; -pub const SYS_timer_getoverrun: c_long = 260; -pub const SYS_timer_delete: c_long = 261; -pub const SYS_clock_settime: c_long = 262; -pub const SYS_clock_gettime: c_long = 263; -pub const SYS_clock_getres: c_long = 264; -pub const SYS_clock_nanosleep: c_long = 265; -pub const SYS_statfs64: c_long = 266; -pub const SYS_fstatfs64: c_long = 267; -pub const SYS_tgkill: c_long = 268; -pub const SYS_utimes: c_long = 269; -pub const SYS_arm_fadvise64_64: c_long = 270; -pub const SYS_pciconfig_iobase: c_long = 271; -pub const SYS_pciconfig_read: c_long = 272; -pub const SYS_pciconfig_write: c_long = 273; -pub const SYS_mq_open: c_long = 274; -pub const SYS_mq_unlink: c_long = 275; -pub const SYS_mq_timedsend: c_long = 276; -pub const SYS_mq_timedreceive: c_long = 277; -pub const SYS_mq_notify: c_long = 278; -pub const SYS_mq_getsetattr: c_long = 279; -pub const SYS_waitid: c_long = 280; -pub const SYS_socket: c_long = 281; -pub const SYS_bind: c_long = 282; -pub const SYS_connect: c_long = 283; -pub const SYS_listen: c_long = 284; -pub const SYS_accept: c_long = 285; -pub const SYS_getsockname: c_long = 286; -pub const SYS_getpeername: c_long = 287; -pub const SYS_socketpair: c_long = 288; -pub const SYS_send: c_long = 289; -pub const SYS_sendto: c_long = 290; -pub const SYS_recv: c_long = 291; -pub const SYS_recvfrom: c_long = 292; -pub const SYS_shutdown: c_long = 293; -pub const SYS_setsockopt: c_long = 294; -pub const SYS_getsockopt: c_long = 295; -pub const SYS_sendmsg: c_long = 296; -pub const SYS_recvmsg: c_long = 297; -pub const SYS_semop: c_long = 298; -pub const SYS_semget: c_long = 299; -pub const SYS_semctl: c_long = 300; -pub const SYS_msgsnd: c_long = 301; -pub const SYS_msgrcv: c_long = 302; -pub const SYS_msgget: c_long = 303; -pub const SYS_msgctl: c_long = 304; -pub const SYS_shmat: c_long = 305; -pub const SYS_shmdt: c_long = 306; -pub const SYS_shmget: c_long = 307; -pub const SYS_shmctl: c_long = 308; -pub const SYS_add_key: c_long = 309; -pub const SYS_request_key: c_long = 310; -pub const SYS_keyctl: c_long = 311; -pub const SYS_semtimedop: c_long = 312; -pub const SYS_vserver: c_long = 313; -pub const SYS_ioprio_set: c_long = 314; -pub const SYS_ioprio_get: c_long = 315; -pub const SYS_inotify_init: c_long = 316; -pub const SYS_inotify_add_watch: c_long = 317; -pub const SYS_inotify_rm_watch: c_long = 318; -pub const SYS_mbind: c_long = 319; -pub const SYS_get_mempolicy: c_long = 320; -pub const SYS_set_mempolicy: c_long = 321; -pub const SYS_openat: c_long = 322; -pub const SYS_mkdirat: c_long = 323; -pub const SYS_mknodat: c_long = 324; -pub const SYS_fchownat: c_long = 325; -pub const SYS_futimesat: c_long = 326; -pub const SYS_fstatat64: c_long = 327; -pub const SYS_unlinkat: c_long = 328; -pub const SYS_renameat: c_long = 329; -pub const SYS_linkat: c_long = 330; -pub const SYS_symlinkat: c_long = 331; -pub const SYS_readlinkat: c_long = 332; -pub const SYS_fchmodat: c_long = 333; -pub const SYS_faccessat: c_long = 334; -pub const SYS_pselect6: c_long = 335; -pub const SYS_ppoll: c_long = 336; -pub const SYS_unshare: c_long = 337; -pub const SYS_set_robust_list: c_long = 338; -pub const SYS_get_robust_list: c_long = 339; -pub const SYS_splice: c_long = 340; -pub const SYS_arm_sync_file_range: c_long = 341; -pub const SYS_tee: c_long = 342; -pub const SYS_vmsplice: c_long = 343; -pub const SYS_move_pages: c_long = 344; -pub const SYS_getcpu: c_long = 345; -pub const SYS_epoll_pwait: c_long = 346; -pub const SYS_kexec_load: c_long = 347; -pub const SYS_utimensat: c_long = 348; -pub const SYS_signalfd: c_long = 349; -pub const SYS_timerfd_create: c_long = 350; -pub const SYS_eventfd: c_long = 351; -pub const SYS_fallocate: c_long = 352; -pub const SYS_timerfd_settime: c_long = 353; -pub const SYS_timerfd_gettime: c_long = 354; -pub const SYS_signalfd4: c_long = 355; -pub const SYS_eventfd2: c_long = 356; -pub const SYS_epoll_create1: c_long = 357; -pub const SYS_dup3: c_long = 358; -pub const SYS_pipe2: c_long = 359; -pub const SYS_inotify_init1: c_long = 360; -pub const SYS_preadv: c_long = 361; -pub const SYS_pwritev: c_long = 362; -pub const SYS_rt_tgsigqueueinfo: c_long = 363; -pub const SYS_perf_event_open: c_long = 364; -pub const SYS_recvmmsg: c_long = 365; -pub const SYS_accept4: c_long = 366; -pub const SYS_fanotify_init: c_long = 367; -pub const SYS_fanotify_mark: c_long = 368; -pub const SYS_prlimit64: c_long = 369; -pub const SYS_name_to_handle_at: c_long = 370; -pub const SYS_open_by_handle_at: c_long = 371; -pub const SYS_clock_adjtime: c_long = 372; -pub const SYS_syncfs: c_long = 373; -pub const SYS_sendmmsg: c_long = 374; -pub const SYS_setns: c_long = 375; -pub const SYS_process_vm_readv: c_long = 376; -pub const SYS_process_vm_writev: c_long = 377; -pub const SYS_kcmp: c_long = 378; -pub const SYS_finit_module: c_long = 379; -pub const SYS_sched_setattr: c_long = 380; -pub const SYS_sched_getattr: c_long = 381; -pub const SYS_renameat2: c_long = 382; -pub const SYS_seccomp: c_long = 383; -pub const SYS_getrandom: c_long = 384; -pub const SYS_memfd_create: c_long = 385; -pub const SYS_bpf: c_long = 386; -pub const SYS_execveat: c_long = 387; -pub const SYS_userfaultfd: c_long = 388; -pub const SYS_membarrier: c_long = 389; -pub const SYS_mlock2: c_long = 390; -pub const SYS_copy_file_range: c_long = 391; -pub const SYS_preadv2: c_long = 392; -pub const SYS_pwritev2: c_long = 393; -pub const SYS_pkey_mprotect: c_long = 394; -pub const SYS_pkey_alloc: c_long = 395; -pub const SYS_pkey_free: c_long = 396; -pub const SYS_statx: c_long = 397; -pub const SYS_rseq: c_long = 398; -pub const SYS_kexec_file_load: c_long = 401; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; -pub const SYS_mseal: c_long = 462; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/csky/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/csky/mod.rs deleted file mode 100644 index 95881894a4b943..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b32/csky/mod.rs +++ /dev/null @@ -1,745 +0,0 @@ -use crate::prelude::*; -use crate::{off64_t, off_t}; - -pub type wchar_t = u32; - -s! { - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct statfs { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - f_spare: [crate::__fsword_t; 5], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_ushort, - __pad1: c_ushort, - pub __seq: c_ushort, - __pad2: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - __pad1: c_uint, - __st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad2: c_uint, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_ino: crate::ino64_t, - } - - pub struct statfs64 { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::fsid_t, - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - pub f_spare: [crate::__fsword_t; 4], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - __f_unused: c_int, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - __unused1: c_ulong, - pub shm_dtime: crate::time_t, - __unused2: c_ulong, - pub shm_ctime: crate::time_t, - __unused3: c_ulong, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused4: c_ulong, - __unused5: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - __glibc_reserved1: c_ulong, - pub msg_rtime: crate::time_t, - __glibc_reserved2: c_ulong, - pub msg_ctime: crate::time_t, - __glibc_reserved3: c_ulong, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __glibc_reserved4: c_ulong, - __glibc_reserved5: c_ulong, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - #[doc(hidden)] - #[deprecated( - since = "0.2.54", - note = "Please leave a comment on \ - https://github.com/rust-lang/libc/pull/1316 if you're using \ - this field" - )] - pub _pad: [c_int; 29], - _align: [usize; 0], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } -} - -s_no_extra_traits! { - #[repr(align(8))] - pub struct max_align_t { - priv_: [i64; 2], - } -} - -pub const VEOF: usize = 4; -pub const RTLD_DEEPBIND: c_int = 0x8; -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; -pub const O_DIRECT: c_int = 0x4000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_NOFOLLOW: c_int = 0x20000; -pub const O_LARGEFILE: c_int = 0o100000; -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_FSYNC: c_int = 0x101000; -pub const O_ASYNC: c_int = 0x2000; -pub const O_NDELAY: c_int = 0x800; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_ANONYMOUS: c_int = 0x0020; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_SYNC: c_int = 0x080000; - -pub const EDEADLOCK: c_int = 35; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const EHWPOISON: c_int = 133; -pub const ERFKILL: c_int = 132; - -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETOWN: c_int = 8; - -pub const EFD_NONBLOCK: c_int = 0x800; -pub const SFD_NONBLOCK: c_int = 0x0800; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] -pub const SIGUNUSED: c_int = 31; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: crate::tcflag_t = 0x00000800; -pub const TAB2: crate::tcflag_t = 0x00001000; -pub const TAB3: crate::tcflag_t = 0x00001800; -pub const CR1: crate::tcflag_t = 0x00000200; -pub const CR2: crate::tcflag_t = 0x00000400; -pub const CR3: crate::tcflag_t = 0x00000600; -pub const FF1: crate::tcflag_t = 0x00008000; -pub const BS1: crate::tcflag_t = 0x00002000; -pub const VT1: crate::tcflag_t = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; -pub const EXTPROC: crate::tcflag_t = 0x00010000; - -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -// Syscall table -pub const SYS_read: c_long = 63; -pub const SYS_write: c_long = 64; -pub const SYS_close: c_long = 57; -pub const SYS_fstat: c_long = 80; -pub const SYS_lseek: c_long = 62; -pub const SYS_mmap: c_long = 222; -pub const SYS_mprotect: c_long = 226; -pub const SYS_munmap: c_long = 215; -pub const SYS_brk: c_long = 214; -pub const SYS_rt_sigaction: c_long = 134; -pub const SYS_rt_sigprocmask: c_long = 135; -pub const SYS_rt_sigreturn: c_long = 139; -pub const SYS_ioctl: c_long = 29; -pub const SYS_pread64: c_long = 67; -pub const SYS_pwrite64: c_long = 68; -pub const SYS_readv: c_long = 65; -pub const SYS_writev: c_long = 66; -pub const SYS_sched_yield: c_long = 124; -pub const SYS_mremap: c_long = 216; -pub const SYS_msync: c_long = 227; -pub const SYS_mincore: c_long = 232; -pub const SYS_madvise: c_long = 233; -pub const SYS_shmget: c_long = 194; -pub const SYS_shmat: c_long = 196; -pub const SYS_shmctl: c_long = 195; -pub const SYS_dup: c_long = 23; -pub const SYS_nanosleep: c_long = 101; -pub const SYS_getitimer: c_long = 102; -pub const SYS_setitimer: c_long = 103; -pub const SYS_getpid: c_long = 172; -pub const SYS_sendfile: c_long = 71; -pub const SYS_socket: c_long = 198; -pub const SYS_connect: c_long = 203; -pub const SYS_accept: c_long = 202; -pub const SYS_sendto: c_long = 206; -pub const SYS_recvfrom: c_long = 207; -pub const SYS_sendmsg: c_long = 211; -pub const SYS_recvmsg: c_long = 212; -pub const SYS_shutdown: c_long = 210; -pub const SYS_bind: c_long = 200; -pub const SYS_listen: c_long = 201; -pub const SYS_getsockname: c_long = 204; -pub const SYS_getpeername: c_long = 205; -pub const SYS_socketpair: c_long = 199; -pub const SYS_setsockopt: c_long = 208; -pub const SYS_getsockopt: c_long = 209; -pub const SYS_clone: c_long = 220; -pub const SYS_execve: c_long = 221; -pub const SYS_exit: c_long = 93; -pub const SYS_wait4: c_long = 260; -pub const SYS_kill: c_long = 129; -pub const SYS_uname: c_long = 160; -pub const SYS_semget: c_long = 190; -pub const SYS_semop: c_long = 193; -pub const SYS_semctl: c_long = 191; -pub const SYS_shmdt: c_long = 197; -pub const SYS_msgget: c_long = 186; -pub const SYS_msgsnd: c_long = 189; -pub const SYS_msgrcv: c_long = 188; -pub const SYS_msgctl: c_long = 187; -pub const SYS_fcntl: c_long = 25; -pub const SYS_flock: c_long = 32; -pub const SYS_fsync: c_long = 82; -pub const SYS_fdatasync: c_long = 83; -pub const SYS_truncate: c_long = 45; -pub const SYS_ftruncate: c_long = 46; -pub const SYS_getcwd: c_long = 17; -pub const SYS_chdir: c_long = 49; -pub const SYS_fchdir: c_long = 50; -pub const SYS_fchmod: c_long = 52; -pub const SYS_fchown: c_long = 55; -pub const SYS_umask: c_long = 166; -pub const SYS_gettimeofday: c_long = 169; -pub const SYS_getrlimit: c_long = 163; -pub const SYS_getrusage: c_long = 165; -pub const SYS_sysinfo: c_long = 179; -pub const SYS_times: c_long = 153; -pub const SYS_ptrace: c_long = 117; -pub const SYS_getuid: c_long = 174; -pub const SYS_syslog: c_long = 116; -pub const SYS_getgid: c_long = 176; -pub const SYS_setuid: c_long = 146; -pub const SYS_setgid: c_long = 144; -pub const SYS_geteuid: c_long = 175; -pub const SYS_getegid: c_long = 177; -pub const SYS_setpgid: c_long = 154; -pub const SYS_getppid: c_long = 173; -pub const SYS_setsid: c_long = 157; -pub const SYS_setreuid: c_long = 145; -pub const SYS_setregid: c_long = 143; -pub const SYS_getgroups: c_long = 158; -pub const SYS_setgroups: c_long = 159; -pub const SYS_setresuid: c_long = 147; -pub const SYS_getresuid: c_long = 148; -pub const SYS_setresgid: c_long = 149; -pub const SYS_getresgid: c_long = 150; -pub const SYS_getpgid: c_long = 155; -pub const SYS_setfsuid: c_long = 151; -pub const SYS_setfsgid: c_long = 152; -pub const SYS_getsid: c_long = 156; -pub const SYS_capget: c_long = 90; -pub const SYS_capset: c_long = 91; -pub const SYS_rt_sigpending: c_long = 136; -pub const SYS_rt_sigtimedwait: c_long = 137; -pub const SYS_rt_sigqueueinfo: c_long = 138; -pub const SYS_rt_sigsuspend: c_long = 133; -pub const SYS_sigaltstack: c_long = 132; -pub const SYS_personality: c_long = 92; -pub const SYS_statfs: c_long = 43; -pub const SYS_fstatfs: c_long = 44; -pub const SYS_getpriority: c_long = 141; -pub const SYS_setpriority: c_long = 140; -pub const SYS_sched_setparam: c_long = 118; -pub const SYS_sched_getparam: c_long = 121; -pub const SYS_sched_setscheduler: c_long = 119; -pub const SYS_sched_getscheduler: c_long = 120; -pub const SYS_sched_get_priority_max: c_long = 125; -pub const SYS_sched_get_priority_min: c_long = 126; -pub const SYS_sched_rr_get_interval: c_long = 127; -pub const SYS_mlock: c_long = 228; -pub const SYS_munlock: c_long = 229; -pub const SYS_mlockall: c_long = 230; -pub const SYS_munlockall: c_long = 231; -pub const SYS_vhangup: c_long = 58; -pub const SYS_pivot_root: c_long = 41; -pub const SYS_prctl: c_long = 167; -pub const SYS_adjtimex: c_long = 171; -pub const SYS_setrlimit: c_long = 164; -pub const SYS_chroot: c_long = 51; -pub const SYS_sync: c_long = 81; -pub const SYS_acct: c_long = 89; -pub const SYS_settimeofday: c_long = 170; -pub const SYS_mount: c_long = 40; -pub const SYS_umount2: c_long = 39; -pub const SYS_swapon: c_long = 224; -pub const SYS_swapoff: c_long = 225; -pub const SYS_reboot: c_long = 142; -pub const SYS_sethostname: c_long = 161; -pub const SYS_setdomainname: c_long = 162; -pub const SYS_init_module: c_long = 105; -pub const SYS_delete_module: c_long = 106; -pub const SYS_quotactl: c_long = 60; -pub const SYS_nfsservctl: c_long = 42; -pub const SYS_gettid: c_long = 178; -pub const SYS_readahead: c_long = 213; -pub const SYS_setxattr: c_long = 5; -pub const SYS_lsetxattr: c_long = 6; -pub const SYS_fsetxattr: c_long = 7; -pub const SYS_getxattr: c_long = 8; -pub const SYS_lgetxattr: c_long = 9; -pub const SYS_fgetxattr: c_long = 10; -pub const SYS_listxattr: c_long = 11; -pub const SYS_llistxattr: c_long = 12; -pub const SYS_flistxattr: c_long = 13; -pub const SYS_removexattr: c_long = 14; -pub const SYS_lremovexattr: c_long = 15; -pub const SYS_fremovexattr: c_long = 16; -pub const SYS_tkill: c_long = 130; -pub const SYS_futex: c_long = 98; -pub const SYS_sched_setaffinity: c_long = 122; -pub const SYS_sched_getaffinity: c_long = 123; -pub const SYS_io_setup: c_long = 0; -pub const SYS_io_destroy: c_long = 1; -pub const SYS_io_getevents: c_long = 4; -pub const SYS_io_submit: c_long = 2; -pub const SYS_io_cancel: c_long = 3; -pub const SYS_lookup_dcookie: c_long = 18; -pub const SYS_remap_file_pages: c_long = 234; -pub const SYS_getdents64: c_long = 61; -pub const SYS_set_tid_address: c_long = 96; -pub const SYS_restart_syscall: c_long = 128; -pub const SYS_semtimedop: c_long = 192; -pub const SYS_fadvise64: c_long = 223; -pub const SYS_timer_create: c_long = 107; -pub const SYS_timer_settime: c_long = 110; -pub const SYS_timer_gettime: c_long = 108; -pub const SYS_timer_getoverrun: c_long = 109; -pub const SYS_timer_delete: c_long = 111; -pub const SYS_clock_settime: c_long = 112; -pub const SYS_clock_gettime: c_long = 113; -pub const SYS_clock_getres: c_long = 114; -pub const SYS_clock_nanosleep: c_long = 115; -pub const SYS_exit_group: c_long = 94; -pub const SYS_epoll_ctl: c_long = 21; -pub const SYS_tgkill: c_long = 131; -pub const SYS_mbind: c_long = 235; -pub const SYS_set_mempolicy: c_long = 237; -pub const SYS_get_mempolicy: c_long = 236; -pub const SYS_mq_open: c_long = 180; -pub const SYS_mq_unlink: c_long = 181; -pub const SYS_mq_timedsend: c_long = 182; -pub const SYS_mq_timedreceive: c_long = 183; -pub const SYS_mq_notify: c_long = 184; -pub const SYS_mq_getsetattr: c_long = 185; -pub const SYS_kexec_load: c_long = 104; -pub const SYS_waitid: c_long = 95; -pub const SYS_add_key: c_long = 217; -pub const SYS_request_key: c_long = 218; -pub const SYS_keyctl: c_long = 219; -pub const SYS_ioprio_set: c_long = 30; -pub const SYS_ioprio_get: c_long = 31; -pub const SYS_inotify_add_watch: c_long = 27; -pub const SYS_inotify_rm_watch: c_long = 28; -pub const SYS_migrate_pages: c_long = 238; -pub const SYS_openat: c_long = 56; -pub const SYS_mkdirat: c_long = 34; -pub const SYS_mknodat: c_long = 33; -pub const SYS_fchownat: c_long = 54; -pub const SYS_newfstatat: c_long = 79; -pub const SYS_unlinkat: c_long = 35; -pub const SYS_linkat: c_long = 37; -pub const SYS_symlinkat: c_long = 36; -pub const SYS_readlinkat: c_long = 78; -pub const SYS_fchmodat: c_long = 53; -pub const SYS_faccessat: c_long = 48; -pub const SYS_pselect6: c_long = 72; -pub const SYS_ppoll: c_long = 73; -pub const SYS_unshare: c_long = 97; -pub const SYS_set_robust_list: c_long = 99; -pub const SYS_get_robust_list: c_long = 100; -pub const SYS_splice: c_long = 76; -pub const SYS_tee: c_long = 77; -pub const SYS_sync_file_range: c_long = 84; -pub const SYS_vmsplice: c_long = 75; -pub const SYS_move_pages: c_long = 239; -pub const SYS_utimensat: c_long = 88; -pub const SYS_epoll_pwait: c_long = 22; -pub const SYS_timerfd_create: c_long = 85; -pub const SYS_fallocate: c_long = 47; -pub const SYS_timerfd_settime: c_long = 86; -pub const SYS_timerfd_gettime: c_long = 87; -pub const SYS_accept4: c_long = 242; -pub const SYS_signalfd4: c_long = 74; -pub const SYS_eventfd2: c_long = 19; -pub const SYS_epoll_create1: c_long = 20; -pub const SYS_dup3: c_long = 24; -pub const SYS_pipe2: c_long = 59; -pub const SYS_inotify_init1: c_long = 26; -pub const SYS_preadv: c_long = 69; -pub const SYS_pwritev: c_long = 70; -pub const SYS_rt_tgsigqueueinfo: c_long = 240; -pub const SYS_perf_event_open: c_long = 241; -pub const SYS_recvmmsg: c_long = 243; -pub const SYS_fanotify_init: c_long = 262; -pub const SYS_fanotify_mark: c_long = 263; -pub const SYS_prlimit64: c_long = 261; -pub const SYS_name_to_handle_at: c_long = 264; -pub const SYS_open_by_handle_at: c_long = 265; -pub const SYS_clock_adjtime: c_long = 266; -pub const SYS_syncfs: c_long = 267; -pub const SYS_sendmmsg: c_long = 269; -pub const SYS_setns: c_long = 268; -pub const SYS_getcpu: c_long = 168; -pub const SYS_process_vm_readv: c_long = 270; -pub const SYS_process_vm_writev: c_long = 271; -pub const SYS_kcmp: c_long = 272; -pub const SYS_finit_module: c_long = 273; -pub const SYS_sched_setattr: c_long = 274; -pub const SYS_sched_getattr: c_long = 275; -pub const SYS_renameat2: c_long = 276; -pub const SYS_seccomp: c_long = 277; -pub const SYS_getrandom: c_long = 278; -pub const SYS_memfd_create: c_long = 279; -pub const SYS_bpf: c_long = 280; -pub const SYS_execveat: c_long = 281; -pub const SYS_userfaultfd: c_long = 282; -pub const SYS_membarrier: c_long = 283; -pub const SYS_mlock2: c_long = 284; -pub const SYS_copy_file_range: c_long = 285; -pub const SYS_preadv2: c_long = 286; -pub const SYS_pwritev2: c_long = 287; -pub const SYS_pkey_mprotect: c_long = 288; -pub const SYS_pkey_alloc: c_long = 289; -pub const SYS_pkey_free: c_long = 290; -pub const SYS_statx: c_long = 291; -pub const SYS_rseq: c_long = 293; -pub const SYS_syscall: c_long = 294; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/m68k/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/m68k/mod.rs deleted file mode 100644 index d614fddeca9d90..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b32/m68k/mod.rs +++ /dev/null @@ -1,863 +0,0 @@ -use crate::prelude::*; -use crate::{off64_t, off_t}; - -pub type wchar_t = i32; - -s! { - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct statfs { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - f_spare: [crate::__fsword_t; 4], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - } - - pub struct ipc_perm { - __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - __seq: c_ushort, - __pad1: c_ushort, - __glibc_reserved1: c_ulong, - __glibc_reserved2: c_ulong, - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - __pad1: c_ushort, - pub __st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad2: c_ushort, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_ulong, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_ulong, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_ulong, - pub st_ino: crate::ino64_t, - } - - pub struct statfs64 { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: crate::fsblkcnt64_t, - pub f_bfree: crate::fsblkcnt64_t, - pub f_bavail: crate::fsblkcnt64_t, - pub f_files: crate::fsblkcnt64_t, - pub f_ffree: crate::fsblkcnt64_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - pub f_spare: [crate::__fsword_t; 4], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt64_t, - pub f_bfree: crate::fsblkcnt64_t, - pub f_bavail: crate::fsblkcnt64_t, - pub f_files: crate::fsblkcnt64_t, - pub f_ffree: crate::fsblkcnt64_t, - pub f_favail: crate::fsblkcnt64_t, - pub f_fsid: c_ulong, - __f_unused: c_int, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - __glibc_reserved1: c_long, - pub shm_dtime: crate::time_t, - __glibc_reserved2: c_long, - pub shm_ctime: crate::time_t, - __glibc_reserved3: c_long, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __glibc_reserved5: c_ulong, - __glibc_reserved6: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - __glibc_reserved1: c_uint, - pub msg_rtime: crate::time_t, - __glibc_reserved2: c_uint, - pub msg_ctime: crate::time_t, - __glibc_reserved3: c_uint, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __glibc_reserved4: c_ulong, - __glibc_reserved5: c_ulong, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_code: c_int, - pub si_errno: c_int, - _pad: [c_int; 29], - _align: [usize; 0], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } -} - -s_no_extra_traits! { - #[repr(align(2))] - pub struct max_align_t { - priv_: [i8; 20], - } -} - -pub const VEOF: usize = 4; -pub const RTLD_DEEPBIND: c_int = 0x8; -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; -pub const O_DIRECT: c_int = 0x10000; -pub const O_DIRECTORY: c_int = 0x4000; -pub const O_NOFOLLOW: c_int = 0x8000; -pub const O_LARGEFILE: c_int = 0x20000; -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_FSYNC: c_int = 0x101000; -pub const O_ASYNC: c_int = 0x2000; -pub const O_NDELAY: c_int = 0x800; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_ANONYMOUS: c_int = 0x0020; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_SYNC: c_int = 0x080000; - -pub const EDEADLOCK: c_int = 35; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const EHWPOISON: c_int = 133; -pub const ERFKILL: c_int = 132; - -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETOWN: c_int = 8; - -pub const PTRACE_GETFPXREGS: c_uint = 18; -pub const PTRACE_SETFPXREGS: c_uint = 19; -pub const PTRACE_SYSEMU: c_uint = 31; -pub const PTRACE_SYSEMU_SINGLESTEP: c_uint = 32; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const EFD_NONBLOCK: c_int = 0x800; -pub const SFD_NONBLOCK: c_int = 0x0800; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] -pub const SIGUNUSED: c_int = 31; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: crate::tcflag_t = 0x00000800; -pub const TAB2: crate::tcflag_t = 0x00001000; -pub const TAB3: crate::tcflag_t = 0x00001800; -pub const CR1: crate::tcflag_t = 0x00000200; -pub const CR2: crate::tcflag_t = 0x00000400; -pub const CR3: crate::tcflag_t = 0x00000600; -pub const FF1: crate::tcflag_t = 0x00008000; -pub const BS1: crate::tcflag_t = 0x00002000; -pub const VT1: crate::tcflag_t = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; -pub const EXTPROC: crate::tcflag_t = 0x00010000; - -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -pub const SYS_restart_syscall: c_long = 0; -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_waitpid: c_long = 7; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execve: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_time32: c_long = 13; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_chown16: c_long = 16; -pub const SYS_stat: c_long = 18; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_mount: c_long = 21; -pub const SYS_oldumount: c_long = 22; -pub const SYS_setuid16: c_long = 23; -pub const SYS_getuid16: c_long = 24; -pub const SYS_stime32: c_long = 25; -pub const SYS_ptrace: c_long = 26; -pub const SYS_alarm: c_long = 27; -pub const SYS_fstat: c_long = 28; -pub const SYS_pause: c_long = 29; -pub const SYS_utime32: c_long = 30; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_rename: c_long = 38; -pub const SYS_mkdir: c_long = 39; -pub const SYS_rmdir: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_brk: c_long = 45; -pub const SYS_setgid16: c_long = 46; -pub const SYS_getgid16: c_long = 47; -pub const SYS_signal: c_long = 48; -pub const SYS_geteuid16: c_long = 49; -pub const SYS_getegid16: c_long = 50; -pub const SYS_acct: c_long = 51; -pub const SYS_umount: c_long = 52; -pub const SYS_ioctl: c_long = 54; -pub const SYS_fcntl: c_long = 55; -pub const SYS_setpgid: c_long = 57; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_ustat: c_long = 62; -pub const SYS_dup2: c_long = 63; -pub const SYS_getppid: c_long = 64; -pub const SYS_getpgrp: c_long = 65; -pub const SYS_setsid: c_long = 66; -pub const SYS_sigaction: c_long = 67; -pub const SYS_sgetmask: c_long = 68; -pub const SYS_ssetmask: c_long = 69; -pub const SYS_setreuid16: c_long = 70; -pub const SYS_setregid16: c_long = 71; -pub const SYS_sigsuspend: c_long = 72; -pub const SYS_sigpending: c_long = 73; -pub const SYS_sethostname: c_long = 74; -pub const SYS_setrlimit: c_long = 75; -pub const SYS_old_getrlimit: c_long = 76; -pub const SYS_getrusage: c_long = 77; -pub const SYS_gettimeofday: c_long = 78; -pub const SYS_settimeofday: c_long = 79; -pub const SYS_getgroups16: c_long = 80; -pub const SYS_setgroups16: c_long = 81; -pub const SYS_old_select: c_long = 82; -pub const SYS_symlink: c_long = 83; -pub const SYS_lstat: c_long = 84; -pub const SYS_readlink: c_long = 85; -pub const SYS_uselib: c_long = 86; -pub const SYS_swapon: c_long = 87; -pub const SYS_reboot: c_long = 88; -pub const SYS_old_readdir: c_long = 89; -pub const SYS_old_mmap: c_long = 90; -pub const SYS_munmap: c_long = 91; -pub const SYS_truncate: c_long = 92; -pub const SYS_ftruncate: c_long = 93; -pub const SYS_fchmod: c_long = 94; -pub const SYS_fchown16: c_long = 95; -pub const SYS_getpriority: c_long = 96; -pub const SYS_setpriority: c_long = 97; -pub const SYS_statfs: c_long = 99; -pub const SYS_fstatfs: c_long = 100; -pub const SYS_socketcall: c_long = 102; -pub const SYS_syslog: c_long = 103; -pub const SYS_setitimer: c_long = 104; -pub const SYS_getitimer: c_long = 105; -pub const SYS_newstat: c_long = 106; -pub const SYS_newlstat: c_long = 107; -pub const SYS_newfstat: c_long = 108; -pub const SYS_vhangup: c_long = 111; -pub const SYS_wait4: c_long = 114; -pub const SYS_swapoff: c_long = 115; -pub const SYS_sysinfo: c_long = 116; -pub const SYS_ipc: c_long = 117; -pub const SYS_fsync: c_long = 118; -pub const SYS_sigreturn: c_long = 119; -pub const SYS_clone: c_long = 120; -pub const SYS_setdomainname: c_long = 121; -pub const SYS_newuname: c_long = 122; -pub const SYS_cacheflush: c_long = 123; -pub const SYS_adjtimex_time32: c_long = 124; -pub const SYS_mprotect: c_long = 125; -pub const SYS_sigprocmask: c_long = 126; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 127; -pub const SYS_init_module: c_long = 128; -pub const SYS_delete_module: c_long = 129; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 130; -pub const SYS_quotactl: c_long = 131; -pub const SYS_getpgid: c_long = 132; -pub const SYS_fchdir: c_long = 133; -pub const SYS_bdflush: c_long = 134; -pub const SYS_sysfs: c_long = 135; -pub const SYS_personality: c_long = 136; -pub const SYS_setfsuid16: c_long = 138; -pub const SYS_setfsgid16: c_long = 139; -pub const SYS_llseek: c_long = 140; -pub const SYS_getdents: c_long = 141; -pub const SYS_select: c_long = 142; -pub const SYS_flock: c_long = 143; -pub const SYS_msync: c_long = 144; -pub const SYS_readv: c_long = 145; -pub const SYS_writev: c_long = 146; -pub const SYS_getsid: c_long = 147; -pub const SYS_fdatasync: c_long = 148; -pub const SYS__sysctl: c_long = 149; -pub const SYS_mlock: c_long = 150; -pub const SYS_munlock: c_long = 151; -pub const SYS_mlockall: c_long = 152; -pub const SYS_munlockall: c_long = 153; -pub const SYS_sched_setparam: c_long = 154; -pub const SYS_sched_getparam: c_long = 155; -pub const SYS_sched_setscheduler: c_long = 156; -pub const SYS_sched_getscheduler: c_long = 157; -pub const SYS_sched_yield: c_long = 158; -pub const SYS_sched_get_priority_max: c_long = 159; -pub const SYS_sched_get_priority_min: c_long = 160; -pub const SYS_sched_rr_get_interval_time32: c_long = 161; -pub const SYS_nanosleep_time32: c_long = 162; -pub const SYS_mremap: c_long = 163; -pub const SYS_setresuid16: c_long = 164; -pub const SYS_getresuid16: c_long = 165; -pub const SYS_getpagesize: c_long = 166; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 167; -pub const SYS_poll: c_long = 168; -pub const SYS_nfsservctl: c_long = 169; -pub const SYS_setresgid16: c_long = 170; -pub const SYS_getresgid16: c_long = 171; -pub const SYS_prctl: c_long = 172; -pub const SYS_rt_sigreturn: c_long = 173; -pub const SYS_rt_sigaction: c_long = 174; -pub const SYS_rt_sigprocmask: c_long = 175; -pub const SYS_rt_sigpending: c_long = 176; -pub const SYS_rt_sigtimedwait_time32: c_long = 177; -pub const SYS_rt_sigqueueinfo: c_long = 178; -pub const SYS_rt_sigsuspend: c_long = 179; -pub const SYS_pread64: c_long = 180; -pub const SYS_pwrite64: c_long = 181; -pub const SYS_lchown16: c_long = 182; -pub const SYS_getcwd: c_long = 183; -pub const SYS_capget: c_long = 184; -pub const SYS_capset: c_long = 185; -pub const SYS_sigaltstack: c_long = 186; -pub const SYS_sendfile: c_long = 187; -pub const SYS_getpmsg: c_long = 188; -pub const SYS_putpmsg: c_long = 189; -pub const SYS_vfork: c_long = 190; -pub const SYS_getrlimit: c_long = 191; -pub const SYS_mmap2: c_long = 192; -pub const SYS_truncate64: c_long = 193; -pub const SYS_ftruncate64: c_long = 194; -pub const SYS_stat64: c_long = 195; -pub const SYS_lstat64: c_long = 196; -pub const SYS_fstat64: c_long = 197; -pub const SYS_chown: c_long = 198; -pub const SYS_getuid: c_long = 199; -pub const SYS_getgid: c_long = 200; -pub const SYS_geteuid: c_long = 201; -pub const SYS_getegid: c_long = 202; -pub const SYS_setreuid: c_long = 203; -pub const SYS_setregid: c_long = 204; -pub const SYS_getgroups: c_long = 205; -pub const SYS_setgroups: c_long = 206; -pub const SYS_fchown: c_long = 207; -pub const SYS_setresuid: c_long = 208; -pub const SYS_getresuid: c_long = 209; -pub const SYS_setresgid: c_long = 210; -pub const SYS_getresgid: c_long = 211; -pub const SYS_lchown: c_long = 212; -pub const SYS_setuid: c_long = 213; -pub const SYS_setgid: c_long = 214; -pub const SYS_setfsuid: c_long = 215; -pub const SYS_setfsgid: c_long = 216; -pub const SYS_pivot_root: c_long = 217; -pub const SYS_getdents64: c_long = 220; -pub const SYS_gettid: c_long = 221; -pub const SYS_tkill: c_long = 222; -pub const SYS_setxattr: c_long = 223; -pub const SYS_lsetxattr: c_long = 224; -pub const SYS_fsetxattr: c_long = 225; -pub const SYS_getxattr: c_long = 226; -pub const SYS_lgetxattr: c_long = 227; -pub const SYS_fgetxattr: c_long = 228; -pub const SYS_listxattr: c_long = 229; -pub const SYS_llistxattr: c_long = 230; -pub const SYS_flistxattr: c_long = 231; -pub const SYS_removexattr: c_long = 232; -pub const SYS_lremovexattr: c_long = 233; -pub const SYS_fremovexattr: c_long = 234; -pub const SYS_futex_time32: c_long = 235; -pub const SYS_sendfile64: c_long = 236; -pub const SYS_mincore: c_long = 237; -pub const SYS_madvise: c_long = 238; -pub const SYS_fcntl64: c_long = 239; -pub const SYS_readahead: c_long = 240; -pub const SYS_io_setup: c_long = 241; -pub const SYS_io_destroy: c_long = 242; -pub const SYS_io_getevents_time32: c_long = 243; -pub const SYS_io_submit: c_long = 244; -pub const SYS_io_cancel: c_long = 245; -pub const SYS_fadvise64: c_long = 246; -pub const SYS_exit_group: c_long = 247; -pub const SYS_lookup_dcookie: c_long = 248; -pub const SYS_epoll_create: c_long = 249; -pub const SYS_epoll_ctl: c_long = 250; -pub const SYS_epoll_wait: c_long = 251; -pub const SYS_remap_file_pages: c_long = 252; -pub const SYS_set_tid_address: c_long = 253; -pub const SYS_timer_create: c_long = 254; -pub const SYS_timer_settime32: c_long = 255; -pub const SYS_timer_gettime32: c_long = 256; -pub const SYS_timer_getoverrun: c_long = 257; -pub const SYS_timer_delete: c_long = 258; -pub const SYS_clock_settime32: c_long = 259; -pub const SYS_clock_gettime32: c_long = 260; -pub const SYS_clock_getres_time32: c_long = 261; -pub const SYS_clock_nanosleep_time32: c_long = 262; -pub const SYS_statfs64: c_long = 263; -pub const SYS_fstatfs64: c_long = 264; -pub const SYS_tgkill: c_long = 265; -pub const SYS_utimes_time32: c_long = 266; -pub const SYS_fadvise64_64: c_long = 267; -pub const SYS_mbind: c_long = 268; -pub const SYS_get_mempolicy: c_long = 269; -pub const SYS_set_mempolicy: c_long = 270; -pub const SYS_mq_open: c_long = 271; -pub const SYS_mq_unlink: c_long = 272; -pub const SYS_mq_timedsend_time32: c_long = 273; -pub const SYS_mq_timedreceive_time32: c_long = 274; -pub const SYS_mq_notify: c_long = 275; -pub const SYS_mq_getsetattr: c_long = 276; -pub const SYS_waitid: c_long = 277; -pub const SYS_add_key: c_long = 279; -pub const SYS_request_key: c_long = 280; -pub const SYS_keyctl: c_long = 281; -pub const SYS_ioprio_set: c_long = 282; -pub const SYS_ioprio_get: c_long = 283; -pub const SYS_inotify_init: c_long = 284; -pub const SYS_inotify_add_watch: c_long = 285; -pub const SYS_inotify_rm_watch: c_long = 286; -pub const SYS_migrate_pages: c_long = 287; -pub const SYS_openat: c_long = 288; -pub const SYS_mkdirat: c_long = 289; -pub const SYS_mknodat: c_long = 290; -pub const SYS_fchownat: c_long = 291; -pub const SYS_futimesat_time32: c_long = 292; -pub const SYS_fstatat64: c_long = 293; -pub const SYS_unlinkat: c_long = 294; -pub const SYS_renameat: c_long = 295; -pub const SYS_linkat: c_long = 296; -pub const SYS_symlinkat: c_long = 297; -pub const SYS_readlinkat: c_long = 298; -pub const SYS_fchmodat: c_long = 299; -pub const SYS_faccessat: c_long = 300; -pub const SYS_pselect6_time32: c_long = 301; -pub const SYS_ppoll_time32: c_long = 302; -pub const SYS_unshare: c_long = 303; -pub const SYS_set_robust_list: c_long = 304; -pub const SYS_get_robust_list: c_long = 305; -pub const SYS_splice: c_long = 306; -pub const SYS_sync_file_range: c_long = 307; -pub const SYS_tee: c_long = 308; -pub const SYS_vmsplice: c_long = 309; -pub const SYS_move_pages: c_long = 310; -pub const SYS_sched_setaffinity: c_long = 311; -pub const SYS_sched_getaffinity: c_long = 312; -pub const SYS_kexec_load: c_long = 313; -pub const SYS_getcpu: c_long = 314; -pub const SYS_epoll_pwait: c_long = 315; -pub const SYS_utimensat_time32: c_long = 316; -pub const SYS_signalfd: c_long = 317; -pub const SYS_timerfd_create: c_long = 318; -pub const SYS_eventfd: c_long = 319; -pub const SYS_fallocate: c_long = 320; -pub const SYS_timerfd_settime32: c_long = 321; -pub const SYS_timerfd_gettime32: c_long = 322; -pub const SYS_signalfd4: c_long = 323; -pub const SYS_eventfd2: c_long = 324; -pub const SYS_epoll_create1: c_long = 325; -pub const SYS_dup3: c_long = 326; -pub const SYS_pipe2: c_long = 327; -pub const SYS_inotify_init1: c_long = 328; -pub const SYS_preadv: c_long = 329; -pub const SYS_pwritev: c_long = 330; -pub const SYS_rt_tgsigqueueinfo: c_long = 331; -pub const SYS_perf_event_open: c_long = 332; -pub const SYS_get_thread_area: c_long = 333; -pub const SYS_set_thread_area: c_long = 334; -pub const SYS_atomic_cmpxchg_32: c_long = 335; -pub const SYS_atomic_barrier: c_long = 336; -pub const SYS_fanotify_init: c_long = 337; -pub const SYS_fanotify_mark: c_long = 338; -pub const SYS_prlimit64: c_long = 339; -pub const SYS_name_to_handle_at: c_long = 340; -pub const SYS_open_by_handle_at: c_long = 341; -pub const SYS_clock_adjtime32: c_long = 342; -pub const SYS_syncfs: c_long = 343; -pub const SYS_setns: c_long = 344; -pub const SYS_process_vm_readv: c_long = 345; -pub const SYS_process_vm_writev: c_long = 346; -pub const SYS_kcmp: c_long = 347; -pub const SYS_finit_module: c_long = 348; -pub const SYS_sched_setattr: c_long = 349; -pub const SYS_sched_getattr: c_long = 350; -pub const SYS_renameat2: c_long = 351; -pub const SYS_getrandom: c_long = 352; -pub const SYS_memfd_create: c_long = 353; -pub const SYS_bpf: c_long = 354; -pub const SYS_execveat: c_long = 355; -pub const SYS_socket: c_long = 356; -pub const SYS_socketpair: c_long = 357; -pub const SYS_bind: c_long = 358; -pub const SYS_connect: c_long = 359; -pub const SYS_listen: c_long = 360; -pub const SYS_accept4: c_long = 361; -pub const SYS_getsockopt: c_long = 362; -pub const SYS_setsockopt: c_long = 363; -pub const SYS_getsockname: c_long = 364; -pub const SYS_getpeername: c_long = 365; -pub const SYS_sendto: c_long = 366; -pub const SYS_sendmsg: c_long = 367; -pub const SYS_recvfrom: c_long = 368; -pub const SYS_recvmsg: c_long = 369; -pub const SYS_shutdown: c_long = 370; -pub const SYS_recvmmsg_time32: c_long = 371; -pub const SYS_sendmmsg: c_long = 372; -pub const SYS_userfaultfd: c_long = 373; -pub const SYS_membarrier: c_long = 374; -pub const SYS_mlock2: c_long = 375; -pub const SYS_copy_file_range: c_long = 376; -pub const SYS_preadv2: c_long = 377; -pub const SYS_pwritev2: c_long = 378; -pub const SYS_statx: c_long = 379; -pub const SYS_seccomp: c_long = 380; -pub const SYS_pkey_mprotect: c_long = 381; -pub const SYS_pkey_alloc: c_long = 382; -pub const SYS_pkey_free: c_long = 383; -pub const SYS_rseq: c_long = 384; -pub const SYS_semget: c_long = 393; -pub const SYS_semctl: c_long = 394; -pub const SYS_shmget: c_long = 395; -pub const SYS_shmctl: c_long = 396; -pub const SYS_shmat: c_long = 397; -pub const SYS_shmdt: c_long = 398; -pub const SYS_msgget: c_long = 399; -pub const SYS_msgsnd: c_long = 400; -pub const SYS_msgrcv: c_long = 401; -pub const SYS_msgctl: c_long = 402; -pub const SYS_clock_gettime: c_long = 403; -pub const SYS_clock_settime: c_long = 404; -pub const SYS_clock_adjtime: c_long = 405; -pub const SYS_clock_getres: c_long = 406; -pub const SYS_clock_nanosleep: c_long = 407; -pub const SYS_timer_gettime: c_long = 408; -pub const SYS_timer_settime: c_long = 409; -pub const SYS_timerfd_gettime: c_long = 410; -pub const SYS_timerfd_settime: c_long = 411; -pub const SYS_utimensat: c_long = 412; -pub const SYS_pselect6: c_long = 413; -pub const SYS_ppoll: c_long = 414; -pub const SYS_io_pgetevents: c_long = 416; -pub const SYS_recvmmsg: c_long = 417; -pub const SYS_mq_timedsend: c_long = 418; -pub const SYS_mq_timedreceive: c_long = 419; -pub const SYS_semtimedop: c_long = 420; -pub const SYS_rt_sigtimedwait: c_long = 421; -pub const SYS_futex: c_long = 422; -pub const SYS_sched_rr_get_interval: c_long = 423; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/mips/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/mips/mod.rs deleted file mode 100644 index 3d2775cd800ae7..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b32/mips/mod.rs +++ /dev/null @@ -1,925 +0,0 @@ -use crate::prelude::*; -use crate::{off64_t, off_t}; - -pub type wchar_t = i32; - -s! { - pub struct stat { - #[cfg(not(gnu_time_bits64))] - pub st_dev: c_ulong, - #[cfg(gnu_time_bits64)] - pub st_dev: crate::dev_t, - - #[cfg(not(gnu_time_bits64))] - st_pad1: [c_long; 3], - - pub st_ino: crate::ino_t, - - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - - #[cfg(not(gnu_time_bits64))] - pub st_rdev: c_ulong, - #[cfg(gnu_time_bits64)] - pub st_rdev: crate::dev_t, - - #[cfg(not(gnu_file_offset_bits64))] - st_pad2: [c_long; 2], - #[cfg(all(not(gnu_time_bits64), gnu_file_offset_bits64))] - st_pad2: [c_long; 3], - - pub st_size: off_t, - - #[cfg(not(gnu_file_offset_bits64))] - st_pad3: c_long, - - #[cfg(gnu_time_bits64)] - pub st_blksize: crate::blksize_t, - #[cfg(gnu_time_bits64)] - pub st_blocks: crate::blkcnt_t, - - pub st_atime: crate::time_t, - #[cfg(gnu_time_bits64)] - _atime_pad: c_int, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - #[cfg(gnu_time_bits64)] - _mtime_pad: c_int, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - #[cfg(gnu_time_bits64)] - _ctime_pad: c_int, - pub st_ctime_nsec: c_long, - - #[cfg(not(gnu_time_bits64))] - pub st_blksize: crate::blksize_t, - #[cfg(all(not(gnu_time_bits64), gnu_file_offset_bits64))] - st_pad4: c_long, - #[cfg(not(gnu_time_bits64))] - pub st_blocks: crate::blkcnt_t, - #[cfg(not(gnu_time_bits64))] - st_pad5: [c_long; 14], - } - - pub struct stat64 { - #[cfg(not(gnu_time_bits64))] - pub st_dev: c_ulong, - #[cfg(gnu_time_bits64)] - pub st_dev: crate::dev_t, - - #[cfg(not(gnu_time_bits64))] - st_pad1: [c_long; 3], - - pub st_ino: crate::ino64_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - - #[cfg(not(gnu_time_bits64))] - pub st_rdev: c_ulong, - #[cfg(gnu_time_bits64)] - pub st_rdev: crate::dev_t, - - #[cfg(not(gnu_time_bits64))] - st_pad2: [c_long; 3], - - pub st_size: off64_t, - - #[cfg(gnu_time_bits64)] - pub st_blksize: crate::blksize_t, - #[cfg(gnu_time_bits64)] - pub st_blocks: crate::blkcnt_t, - - pub st_atime: crate::time_t, - #[cfg(gnu_time_bits64)] - _atime_pad: c_int, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - #[cfg(gnu_time_bits64)] - _mtime_pad: c_int, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - #[cfg(gnu_time_bits64)] - _ctime_pad: c_int, - pub st_ctime_nsec: c_long, - - #[cfg(not(gnu_time_bits64))] - pub st_blksize: crate::blksize_t, - #[cfg(not(gnu_time_bits64))] - st_pad3: c_long, - #[cfg(not(gnu_time_bits64))] - pub st_blocks: crate::blkcnt64_t, - #[cfg(not(gnu_time_bits64))] - st_pad5: [c_long; 14], - } - - pub struct statfs { - pub f_type: c_long, - pub f_bsize: c_long, - pub f_frsize: c_long, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_files: crate::fsblkcnt_t, - pub f_ffree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_fsid: crate::fsid_t, - - pub f_namelen: c_long, - pub f_flags: c_long, - f_spare: [c_long; 5], - } - - pub struct statfs64 { - pub f_type: c_long, - pub f_bsize: c_long, - pub f_frsize: c_long, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_bavail: u64, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_long, - pub f_flags: c_long, - pub f_spare: [c_long; 5], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - __f_unused: c_int, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_flags: c_int, - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_restorer: Option, - _resv: [c_int; 1], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_code: c_int, - pub si_errno: c_int, - pub _pad: [c_int; 29], - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_uint, - pub __seq: c_ushort, - __pad1: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused4: c_ulong, - __unused5: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - #[cfg(all(not(gnu_time_bits64), target_endian = "big"))] - __glibc_reserved1: c_ulong, - pub msg_stime: crate::time_t, - #[cfg(all(not(gnu_time_bits64), target_endian = "little"))] - __glibc_reserved1: c_ulong, - #[cfg(all(not(gnu_time_bits64), target_endian = "big"))] - __glibc_reserved2: c_ulong, - pub msg_rtime: crate::time_t, - #[cfg(all(not(gnu_time_bits64), target_endian = "little"))] - __glibc_reserved2: c_ulong, - #[cfg(all(not(gnu_time_bits64), target_endian = "big"))] - __glibc_reserved3: c_ulong, - pub msg_ctime: crate::time_t, - #[cfg(target_endian = "little")] - __glibc_reserved3: c_ulong, - __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __glibc_reserved4: c_ulong, - __glibc_reserved5: c_ulong, - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - #[cfg(not(gnu_file_offset_bits64))] - pub l_sysid: c_long, - pub l_pid: crate::pid_t, - #[cfg(not(gnu_file_offset_bits64))] - __glibc_reserved0: [c_long; 4], - } -} - -s_no_extra_traits! { - #[repr(align(8))] - pub struct max_align_t { - priv_: [f32; 4], - } -} - -pub const O_LARGEFILE: c_int = 0x2000; - -pub const SYS_syscall: c_long = 4000 + 0; -pub const SYS_exit: c_long = 4000 + 1; -pub const SYS_fork: c_long = 4000 + 2; -pub const SYS_read: c_long = 4000 + 3; -pub const SYS_write: c_long = 4000 + 4; -pub const SYS_open: c_long = 4000 + 5; -pub const SYS_close: c_long = 4000 + 6; -pub const SYS_waitpid: c_long = 4000 + 7; -pub const SYS_creat: c_long = 4000 + 8; -pub const SYS_link: c_long = 4000 + 9; -pub const SYS_unlink: c_long = 4000 + 10; -pub const SYS_execve: c_long = 4000 + 11; -pub const SYS_chdir: c_long = 4000 + 12; -pub const SYS_time: c_long = 4000 + 13; -pub const SYS_mknod: c_long = 4000 + 14; -pub const SYS_chmod: c_long = 4000 + 15; -pub const SYS_lchown: c_long = 4000 + 16; -pub const SYS_break: c_long = 4000 + 17; -pub const SYS_lseek: c_long = 4000 + 19; -pub const SYS_getpid: c_long = 4000 + 20; -pub const SYS_mount: c_long = 4000 + 21; -pub const SYS_umount: c_long = 4000 + 22; -pub const SYS_setuid: c_long = 4000 + 23; -pub const SYS_getuid: c_long = 4000 + 24; -pub const SYS_stime: c_long = 4000 + 25; -pub const SYS_ptrace: c_long = 4000 + 26; -pub const SYS_alarm: c_long = 4000 + 27; -pub const SYS_pause: c_long = 4000 + 29; -pub const SYS_utime: c_long = 4000 + 30; -pub const SYS_stty: c_long = 4000 + 31; -pub const SYS_gtty: c_long = 4000 + 32; -pub const SYS_access: c_long = 4000 + 33; -pub const SYS_nice: c_long = 4000 + 34; -pub const SYS_ftime: c_long = 4000 + 35; -pub const SYS_sync: c_long = 4000 + 36; -pub const SYS_kill: c_long = 4000 + 37; -pub const SYS_rename: c_long = 4000 + 38; -pub const SYS_mkdir: c_long = 4000 + 39; -pub const SYS_rmdir: c_long = 4000 + 40; -pub const SYS_dup: c_long = 4000 + 41; -pub const SYS_pipe: c_long = 4000 + 42; -pub const SYS_times: c_long = 4000 + 43; -pub const SYS_prof: c_long = 4000 + 44; -pub const SYS_brk: c_long = 4000 + 45; -pub const SYS_setgid: c_long = 4000 + 46; -pub const SYS_getgid: c_long = 4000 + 47; -pub const SYS_signal: c_long = 4000 + 48; -pub const SYS_geteuid: c_long = 4000 + 49; -pub const SYS_getegid: c_long = 4000 + 50; -pub const SYS_acct: c_long = 4000 + 51; -pub const SYS_umount2: c_long = 4000 + 52; -pub const SYS_lock: c_long = 4000 + 53; -pub const SYS_ioctl: c_long = 4000 + 54; -pub const SYS_fcntl: c_long = 4000 + 55; -pub const SYS_mpx: c_long = 4000 + 56; -pub const SYS_setpgid: c_long = 4000 + 57; -pub const SYS_ulimit: c_long = 4000 + 58; -pub const SYS_umask: c_long = 4000 + 60; -pub const SYS_chroot: c_long = 4000 + 61; -pub const SYS_ustat: c_long = 4000 + 62; -pub const SYS_dup2: c_long = 4000 + 63; -pub const SYS_getppid: c_long = 4000 + 64; -pub const SYS_getpgrp: c_long = 4000 + 65; -pub const SYS_setsid: c_long = 4000 + 66; -pub const SYS_sigaction: c_long = 4000 + 67; -pub const SYS_sgetmask: c_long = 4000 + 68; -pub const SYS_ssetmask: c_long = 4000 + 69; -pub const SYS_setreuid: c_long = 4000 + 70; -pub const SYS_setregid: c_long = 4000 + 71; -pub const SYS_sigsuspend: c_long = 4000 + 72; -pub const SYS_sigpending: c_long = 4000 + 73; -pub const SYS_sethostname: c_long = 4000 + 74; -pub const SYS_setrlimit: c_long = 4000 + 75; -pub const SYS_getrlimit: c_long = 4000 + 76; -pub const SYS_getrusage: c_long = 4000 + 77; -pub const SYS_gettimeofday: c_long = 4000 + 78; -pub const SYS_settimeofday: c_long = 4000 + 79; -pub const SYS_getgroups: c_long = 4000 + 80; -pub const SYS_setgroups: c_long = 4000 + 81; -pub const SYS_symlink: c_long = 4000 + 83; -pub const SYS_readlink: c_long = 4000 + 85; -pub const SYS_uselib: c_long = 4000 + 86; -pub const SYS_swapon: c_long = 4000 + 87; -pub const SYS_reboot: c_long = 4000 + 88; -pub const SYS_readdir: c_long = 4000 + 89; -pub const SYS_mmap: c_long = 4000 + 90; -pub const SYS_munmap: c_long = 4000 + 91; -pub const SYS_truncate: c_long = 4000 + 92; -pub const SYS_ftruncate: c_long = 4000 + 93; -pub const SYS_fchmod: c_long = 4000 + 94; -pub const SYS_fchown: c_long = 4000 + 95; -pub const SYS_getpriority: c_long = 4000 + 96; -pub const SYS_setpriority: c_long = 4000 + 97; -pub const SYS_profil: c_long = 4000 + 98; -pub const SYS_statfs: c_long = 4000 + 99; -pub const SYS_fstatfs: c_long = 4000 + 100; -pub const SYS_ioperm: c_long = 4000 + 101; -pub const SYS_socketcall: c_long = 4000 + 102; -pub const SYS_syslog: c_long = 4000 + 103; -pub const SYS_setitimer: c_long = 4000 + 104; -pub const SYS_getitimer: c_long = 4000 + 105; -pub const SYS_stat: c_long = 4000 + 106; -pub const SYS_lstat: c_long = 4000 + 107; -pub const SYS_fstat: c_long = 4000 + 108; -pub const SYS_iopl: c_long = 4000 + 110; -pub const SYS_vhangup: c_long = 4000 + 111; -pub const SYS_idle: c_long = 4000 + 112; -pub const SYS_vm86: c_long = 4000 + 113; -pub const SYS_wait4: c_long = 4000 + 114; -pub const SYS_swapoff: c_long = 4000 + 115; -pub const SYS_sysinfo: c_long = 4000 + 116; -pub const SYS_ipc: c_long = 4000 + 117; -pub const SYS_fsync: c_long = 4000 + 118; -pub const SYS_sigreturn: c_long = 4000 + 119; -pub const SYS_clone: c_long = 4000 + 120; -pub const SYS_setdomainname: c_long = 4000 + 121; -pub const SYS_uname: c_long = 4000 + 122; -pub const SYS_modify_ldt: c_long = 4000 + 123; -pub const SYS_adjtimex: c_long = 4000 + 124; -pub const SYS_mprotect: c_long = 4000 + 125; -pub const SYS_sigprocmask: c_long = 4000 + 126; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 4000 + 127; -pub const SYS_init_module: c_long = 4000 + 128; -pub const SYS_delete_module: c_long = 4000 + 129; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 4000 + 130; -pub const SYS_quotactl: c_long = 4000 + 131; -pub const SYS_getpgid: c_long = 4000 + 132; -pub const SYS_fchdir: c_long = 4000 + 133; -pub const SYS_bdflush: c_long = 4000 + 134; -pub const SYS_sysfs: c_long = 4000 + 135; -pub const SYS_personality: c_long = 4000 + 136; -pub const SYS_afs_syscall: c_long = 4000 + 137; -pub const SYS_setfsuid: c_long = 4000 + 138; -pub const SYS_setfsgid: c_long = 4000 + 139; -pub const SYS__llseek: c_long = 4000 + 140; -pub const SYS_getdents: c_long = 4000 + 141; -pub const SYS__newselect: c_long = 4000 + 142; -pub const SYS_flock: c_long = 4000 + 143; -pub const SYS_msync: c_long = 4000 + 144; -pub const SYS_readv: c_long = 4000 + 145; -pub const SYS_writev: c_long = 4000 + 146; -pub const SYS_cacheflush: c_long = 4000 + 147; -pub const SYS_cachectl: c_long = 4000 + 148; -pub const SYS_sysmips: c_long = 4000 + 149; -pub const SYS_getsid: c_long = 4000 + 151; -pub const SYS_fdatasync: c_long = 4000 + 152; -pub const SYS__sysctl: c_long = 4000 + 153; -pub const SYS_mlock: c_long = 4000 + 154; -pub const SYS_munlock: c_long = 4000 + 155; -pub const SYS_mlockall: c_long = 4000 + 156; -pub const SYS_munlockall: c_long = 4000 + 157; -pub const SYS_sched_setparam: c_long = 4000 + 158; -pub const SYS_sched_getparam: c_long = 4000 + 159; -pub const SYS_sched_setscheduler: c_long = 4000 + 160; -pub const SYS_sched_getscheduler: c_long = 4000 + 161; -pub const SYS_sched_yield: c_long = 4000 + 162; -pub const SYS_sched_get_priority_max: c_long = 4000 + 163; -pub const SYS_sched_get_priority_min: c_long = 4000 + 164; -pub const SYS_sched_rr_get_interval: c_long = 4000 + 165; -pub const SYS_nanosleep: c_long = 4000 + 166; -pub const SYS_mremap: c_long = 4000 + 167; -pub const SYS_accept: c_long = 4000 + 168; -pub const SYS_bind: c_long = 4000 + 169; -pub const SYS_connect: c_long = 4000 + 170; -pub const SYS_getpeername: c_long = 4000 + 171; -pub const SYS_getsockname: c_long = 4000 + 172; -pub const SYS_getsockopt: c_long = 4000 + 173; -pub const SYS_listen: c_long = 4000 + 174; -pub const SYS_recv: c_long = 4000 + 175; -pub const SYS_recvfrom: c_long = 4000 + 176; -pub const SYS_recvmsg: c_long = 4000 + 177; -pub const SYS_send: c_long = 4000 + 178; -pub const SYS_sendmsg: c_long = 4000 + 179; -pub const SYS_sendto: c_long = 4000 + 180; -pub const SYS_setsockopt: c_long = 4000 + 181; -pub const SYS_shutdown: c_long = 4000 + 182; -pub const SYS_socket: c_long = 4000 + 183; -pub const SYS_socketpair: c_long = 4000 + 184; -pub const SYS_setresuid: c_long = 4000 + 185; -pub const SYS_getresuid: c_long = 4000 + 186; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 4000 + 187; -pub const SYS_poll: c_long = 4000 + 188; -pub const SYS_nfsservctl: c_long = 4000 + 189; -pub const SYS_setresgid: c_long = 4000 + 190; -pub const SYS_getresgid: c_long = 4000 + 191; -pub const SYS_prctl: c_long = 4000 + 192; -pub const SYS_rt_sigreturn: c_long = 4000 + 193; -pub const SYS_rt_sigaction: c_long = 4000 + 194; -pub const SYS_rt_sigprocmask: c_long = 4000 + 195; -pub const SYS_rt_sigpending: c_long = 4000 + 196; -pub const SYS_rt_sigtimedwait: c_long = 4000 + 197; -pub const SYS_rt_sigqueueinfo: c_long = 4000 + 198; -pub const SYS_rt_sigsuspend: c_long = 4000 + 199; -pub const SYS_pread64: c_long = 4000 + 200; -pub const SYS_pwrite64: c_long = 4000 + 201; -pub const SYS_chown: c_long = 4000 + 202; -pub const SYS_getcwd: c_long = 4000 + 203; -pub const SYS_capget: c_long = 4000 + 204; -pub const SYS_capset: c_long = 4000 + 205; -pub const SYS_sigaltstack: c_long = 4000 + 206; -pub const SYS_sendfile: c_long = 4000 + 207; -pub const SYS_getpmsg: c_long = 4000 + 208; -pub const SYS_putpmsg: c_long = 4000 + 209; -pub const SYS_mmap2: c_long = 4000 + 210; -pub const SYS_truncate64: c_long = 4000 + 211; -pub const SYS_ftruncate64: c_long = 4000 + 212; -pub const SYS_stat64: c_long = 4000 + 213; -pub const SYS_lstat64: c_long = 4000 + 214; -pub const SYS_fstat64: c_long = 4000 + 215; -pub const SYS_pivot_root: c_long = 4000 + 216; -pub const SYS_mincore: c_long = 4000 + 217; -pub const SYS_madvise: c_long = 4000 + 218; -pub const SYS_getdents64: c_long = 4000 + 219; -pub const SYS_fcntl64: c_long = 4000 + 220; -pub const SYS_gettid: c_long = 4000 + 222; -pub const SYS_readahead: c_long = 4000 + 223; -pub const SYS_setxattr: c_long = 4000 + 224; -pub const SYS_lsetxattr: c_long = 4000 + 225; -pub const SYS_fsetxattr: c_long = 4000 + 226; -pub const SYS_getxattr: c_long = 4000 + 227; -pub const SYS_lgetxattr: c_long = 4000 + 228; -pub const SYS_fgetxattr: c_long = 4000 + 229; -pub const SYS_listxattr: c_long = 4000 + 230; -pub const SYS_llistxattr: c_long = 4000 + 231; -pub const SYS_flistxattr: c_long = 4000 + 232; -pub const SYS_removexattr: c_long = 4000 + 233; -pub const SYS_lremovexattr: c_long = 4000 + 234; -pub const SYS_fremovexattr: c_long = 4000 + 235; -pub const SYS_tkill: c_long = 4000 + 236; -pub const SYS_sendfile64: c_long = 4000 + 237; -pub const SYS_futex: c_long = 4000 + 238; -pub const SYS_sched_setaffinity: c_long = 4000 + 239; -pub const SYS_sched_getaffinity: c_long = 4000 + 240; -pub const SYS_io_setup: c_long = 4000 + 241; -pub const SYS_io_destroy: c_long = 4000 + 242; -pub const SYS_io_getevents: c_long = 4000 + 243; -pub const SYS_io_submit: c_long = 4000 + 244; -pub const SYS_io_cancel: c_long = 4000 + 245; -pub const SYS_exit_group: c_long = 4000 + 246; -pub const SYS_lookup_dcookie: c_long = 4000 + 247; -pub const SYS_epoll_create: c_long = 4000 + 248; -pub const SYS_epoll_ctl: c_long = 4000 + 249; -pub const SYS_epoll_wait: c_long = 4000 + 250; -pub const SYS_remap_file_pages: c_long = 4000 + 251; -pub const SYS_set_tid_address: c_long = 4000 + 252; -pub const SYS_restart_syscall: c_long = 4000 + 253; -pub const SYS_fadvise64: c_long = 4000 + 254; -pub const SYS_statfs64: c_long = 4000 + 255; -pub const SYS_fstatfs64: c_long = 4000 + 256; -pub const SYS_timer_create: c_long = 4000 + 257; -pub const SYS_timer_settime: c_long = 4000 + 258; -pub const SYS_timer_gettime: c_long = 4000 + 259; -pub const SYS_timer_getoverrun: c_long = 4000 + 260; -pub const SYS_timer_delete: c_long = 4000 + 261; -pub const SYS_clock_settime: c_long = 4000 + 262; -pub const SYS_clock_gettime: c_long = 4000 + 263; -pub const SYS_clock_getres: c_long = 4000 + 264; -pub const SYS_clock_nanosleep: c_long = 4000 + 265; -pub const SYS_tgkill: c_long = 4000 + 266; -pub const SYS_utimes: c_long = 4000 + 267; -pub const SYS_mbind: c_long = 4000 + 268; -pub const SYS_get_mempolicy: c_long = 4000 + 269; -pub const SYS_set_mempolicy: c_long = 4000 + 270; -pub const SYS_mq_open: c_long = 4000 + 271; -pub const SYS_mq_unlink: c_long = 4000 + 272; -pub const SYS_mq_timedsend: c_long = 4000 + 273; -pub const SYS_mq_timedreceive: c_long = 4000 + 274; -pub const SYS_mq_notify: c_long = 4000 + 275; -pub const SYS_mq_getsetattr: c_long = 4000 + 276; -pub const SYS_vserver: c_long = 4000 + 277; -pub const SYS_waitid: c_long = 4000 + 278; -/* pub const SYS_sys_setaltroot: c_long = 4000 + 279; */ -pub const SYS_add_key: c_long = 4000 + 280; -pub const SYS_request_key: c_long = 4000 + 281; -pub const SYS_keyctl: c_long = 4000 + 282; -pub const SYS_set_thread_area: c_long = 4000 + 283; -pub const SYS_inotify_init: c_long = 4000 + 284; -pub const SYS_inotify_add_watch: c_long = 4000 + 285; -pub const SYS_inotify_rm_watch: c_long = 4000 + 286; -pub const SYS_migrate_pages: c_long = 4000 + 287; -pub const SYS_openat: c_long = 4000 + 288; -pub const SYS_mkdirat: c_long = 4000 + 289; -pub const SYS_mknodat: c_long = 4000 + 290; -pub const SYS_fchownat: c_long = 4000 + 291; -pub const SYS_futimesat: c_long = 4000 + 292; -pub const SYS_fstatat64: c_long = 4000 + 293; -pub const SYS_unlinkat: c_long = 4000 + 294; -pub const SYS_renameat: c_long = 4000 + 295; -pub const SYS_linkat: c_long = 4000 + 296; -pub const SYS_symlinkat: c_long = 4000 + 297; -pub const SYS_readlinkat: c_long = 4000 + 298; -pub const SYS_fchmodat: c_long = 4000 + 299; -pub const SYS_faccessat: c_long = 4000 + 300; -pub const SYS_pselect6: c_long = 4000 + 301; -pub const SYS_ppoll: c_long = 4000 + 302; -pub const SYS_unshare: c_long = 4000 + 303; -pub const SYS_splice: c_long = 4000 + 304; -pub const SYS_sync_file_range: c_long = 4000 + 305; -pub const SYS_tee: c_long = 4000 + 306; -pub const SYS_vmsplice: c_long = 4000 + 307; -pub const SYS_move_pages: c_long = 4000 + 308; -pub const SYS_set_robust_list: c_long = 4000 + 309; -pub const SYS_get_robust_list: c_long = 4000 + 310; -pub const SYS_kexec_load: c_long = 4000 + 311; -pub const SYS_getcpu: c_long = 4000 + 312; -pub const SYS_epoll_pwait: c_long = 4000 + 313; -pub const SYS_ioprio_set: c_long = 4000 + 314; -pub const SYS_ioprio_get: c_long = 4000 + 315; -pub const SYS_utimensat: c_long = 4000 + 316; -pub const SYS_signalfd: c_long = 4000 + 317; -pub const SYS_timerfd: c_long = 4000 + 318; -pub const SYS_eventfd: c_long = 4000 + 319; -pub const SYS_fallocate: c_long = 4000 + 320; -pub const SYS_timerfd_create: c_long = 4000 + 321; -pub const SYS_timerfd_gettime: c_long = 4000 + 322; -pub const SYS_timerfd_settime: c_long = 4000 + 323; -pub const SYS_signalfd4: c_long = 4000 + 324; -pub const SYS_eventfd2: c_long = 4000 + 325; -pub const SYS_epoll_create1: c_long = 4000 + 326; -pub const SYS_dup3: c_long = 4000 + 327; -pub const SYS_pipe2: c_long = 4000 + 328; -pub const SYS_inotify_init1: c_long = 4000 + 329; -pub const SYS_preadv: c_long = 4000 + 330; -pub const SYS_pwritev: c_long = 4000 + 331; -pub const SYS_rt_tgsigqueueinfo: c_long = 4000 + 332; -pub const SYS_perf_event_open: c_long = 4000 + 333; -pub const SYS_accept4: c_long = 4000 + 334; -pub const SYS_recvmmsg: c_long = 4000 + 335; -pub const SYS_fanotify_init: c_long = 4000 + 336; -pub const SYS_fanotify_mark: c_long = 4000 + 337; -pub const SYS_prlimit64: c_long = 4000 + 338; -pub const SYS_name_to_handle_at: c_long = 4000 + 339; -pub const SYS_open_by_handle_at: c_long = 4000 + 340; -pub const SYS_clock_adjtime: c_long = 4000 + 341; -pub const SYS_syncfs: c_long = 4000 + 342; -pub const SYS_sendmmsg: c_long = 4000 + 343; -pub const SYS_setns: c_long = 4000 + 344; -pub const SYS_process_vm_readv: c_long = 4000 + 345; -pub const SYS_process_vm_writev: c_long = 4000 + 346; -pub const SYS_kcmp: c_long = 4000 + 347; -pub const SYS_finit_module: c_long = 4000 + 348; -pub const SYS_sched_setattr: c_long = 4000 + 349; -pub const SYS_sched_getattr: c_long = 4000 + 350; -pub const SYS_renameat2: c_long = 4000 + 351; -pub const SYS_seccomp: c_long = 4000 + 352; -pub const SYS_getrandom: c_long = 4000 + 353; -pub const SYS_memfd_create: c_long = 4000 + 354; -pub const SYS_bpf: c_long = 4000 + 355; -pub const SYS_execveat: c_long = 4000 + 356; -pub const SYS_userfaultfd: c_long = 4000 + 357; -pub const SYS_membarrier: c_long = 4000 + 358; -pub const SYS_mlock2: c_long = 4000 + 359; -pub const SYS_copy_file_range: c_long = 4000 + 360; -pub const SYS_preadv2: c_long = 4000 + 361; -pub const SYS_pwritev2: c_long = 4000 + 362; -pub const SYS_pkey_mprotect: c_long = 4000 + 363; -pub const SYS_pkey_alloc: c_long = 4000 + 364; -pub const SYS_pkey_free: c_long = 4000 + 365; -pub const SYS_statx: c_long = 4000 + 366; -pub const SYS_rseq: c_long = 4000 + 367; -pub const SYS_pidfd_send_signal: c_long = 4000 + 424; -pub const SYS_io_uring_setup: c_long = 4000 + 425; -pub const SYS_io_uring_enter: c_long = 4000 + 426; -pub const SYS_io_uring_register: c_long = 4000 + 427; -pub const SYS_open_tree: c_long = 4000 + 428; -pub const SYS_move_mount: c_long = 4000 + 429; -pub const SYS_fsopen: c_long = 4000 + 430; -pub const SYS_fsconfig: c_long = 4000 + 431; -pub const SYS_fsmount: c_long = 4000 + 432; -pub const SYS_fspick: c_long = 4000 + 433; -pub const SYS_pidfd_open: c_long = 4000 + 434; -pub const SYS_clone3: c_long = 4000 + 435; -pub const SYS_close_range: c_long = 4000 + 436; -pub const SYS_openat2: c_long = 4000 + 437; -pub const SYS_pidfd_getfd: c_long = 4000 + 438; -pub const SYS_faccessat2: c_long = 4000 + 439; -pub const SYS_process_madvise: c_long = 4000 + 440; -pub const SYS_epoll_pwait2: c_long = 4000 + 441; -pub const SYS_mount_setattr: c_long = 4000 + 442; -pub const SYS_quotactl_fd: c_long = 4000 + 443; -pub const SYS_landlock_create_ruleset: c_long = 4000 + 444; -pub const SYS_landlock_add_rule: c_long = 4000 + 445; -pub const SYS_landlock_restrict_self: c_long = 4000 + 446; -pub const SYS_memfd_secret: c_long = 4000 + 447; -pub const SYS_process_mrelease: c_long = 4000 + 448; -pub const SYS_futex_waitv: c_long = 4000 + 449; -pub const SYS_set_mempolicy_home_node: c_long = 4000 + 450; - -pub const O_DIRECT: c_int = 0x8000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_NOFOLLOW: c_int = 0x20000; - -pub const O_APPEND: c_int = 8; -pub const O_CREAT: c_int = 256; -pub const O_EXCL: c_int = 1024; -pub const O_NOCTTY: c_int = 2048; -pub const O_NONBLOCK: c_int = 128; -pub const O_SYNC: c_int = 0x4010; -pub const O_RSYNC: c_int = 0x4010; -pub const O_DSYNC: c_int = 0x10; -pub const O_FSYNC: c_int = 0x4010; -pub const O_ASYNC: c_int = 0x1000; -pub const O_NDELAY: c_int = 0x80; - -pub const EDEADLK: c_int = 45; -pub const ENAMETOOLONG: c_int = 78; -pub const ENOLCK: c_int = 46; -pub const ENOSYS: c_int = 89; -pub const ENOTEMPTY: c_int = 93; -pub const ELOOP: c_int = 90; -pub const ENOMSG: c_int = 35; -pub const EIDRM: c_int = 36; -pub const ECHRNG: c_int = 37; -pub const EL2NSYNC: c_int = 38; -pub const EL3HLT: c_int = 39; -pub const EL3RST: c_int = 40; -pub const ELNRNG: c_int = 41; -pub const EUNATCH: c_int = 42; -pub const ENOCSI: c_int = 43; -pub const EL2HLT: c_int = 44; -pub const EBADE: c_int = 50; -pub const EBADR: c_int = 51; -pub const EXFULL: c_int = 52; -pub const ENOANO: c_int = 53; -pub const EBADRQC: c_int = 54; -pub const EBADSLT: c_int = 55; -pub const EDEADLOCK: c_int = 56; -pub const EMULTIHOP: c_int = 74; -pub const EOVERFLOW: c_int = 79; -pub const ENOTUNIQ: c_int = 80; -pub const EBADFD: c_int = 81; -pub const EBADMSG: c_int = 77; -pub const EREMCHG: c_int = 82; -pub const ELIBACC: c_int = 83; -pub const ELIBBAD: c_int = 84; -pub const ELIBSCN: c_int = 85; -pub const ELIBMAX: c_int = 86; -pub const ELIBEXEC: c_int = 87; -pub const EILSEQ: c_int = 88; -pub const ERESTART: c_int = 91; -pub const ESTRPIPE: c_int = 92; -pub const EUSERS: c_int = 94; -pub const ENOTSOCK: c_int = 95; -pub const EDESTADDRREQ: c_int = 96; -pub const EMSGSIZE: c_int = 97; -pub const EPROTOTYPE: c_int = 98; -pub const ENOPROTOOPT: c_int = 99; -pub const EPROTONOSUPPORT: c_int = 120; -pub const ESOCKTNOSUPPORT: c_int = 121; -pub const EOPNOTSUPP: c_int = 122; -pub const EPFNOSUPPORT: c_int = 123; -pub const EAFNOSUPPORT: c_int = 124; -pub const EADDRINUSE: c_int = 125; -pub const EADDRNOTAVAIL: c_int = 126; -pub const ENETDOWN: c_int = 127; -pub const ENETUNREACH: c_int = 128; -pub const ENETRESET: c_int = 129; -pub const ECONNABORTED: c_int = 130; -pub const ECONNRESET: c_int = 131; -pub const ENOBUFS: c_int = 132; -pub const EISCONN: c_int = 133; -pub const ENOTCONN: c_int = 134; -pub const ESHUTDOWN: c_int = 143; -pub const ETOOMANYREFS: c_int = 144; -pub const ETIMEDOUT: c_int = 145; -pub const ECONNREFUSED: c_int = 146; -pub const EHOSTDOWN: c_int = 147; -pub const EHOSTUNREACH: c_int = 148; -pub const EALREADY: c_int = 149; -pub const EINPROGRESS: c_int = 150; -pub const ESTALE: c_int = 151; -pub const EUCLEAN: c_int = 135; -pub const ENOTNAM: c_int = 137; -pub const ENAVAIL: c_int = 138; -pub const EISNAM: c_int = 139; -pub const EREMOTEIO: c_int = 140; -pub const EDQUOT: c_int = 1133; -pub const ENOMEDIUM: c_int = 159; -pub const EMEDIUMTYPE: c_int = 160; -pub const ECANCELED: c_int = 158; -pub const ENOKEY: c_int = 161; -pub const EKEYEXPIRED: c_int = 162; -pub const EKEYREVOKED: c_int = 163; -pub const EKEYREJECTED: c_int = 164; -pub const EOWNERDEAD: c_int = 165; -pub const ENOTRECOVERABLE: c_int = 166; -pub const ERFKILL: c_int = 167; - -pub const MAP_NORESERVE: c_int = 0x400; -pub const MAP_ANON: c_int = 0x800; -pub const MAP_ANONYMOUS: c_int = 0x800; -pub const MAP_GROWSDOWN: c_int = 0x1000; -pub const MAP_DENYWRITE: c_int = 0x2000; -pub const MAP_EXECUTABLE: c_int = 0x4000; -pub const MAP_LOCKED: c_int = 0x8000; -pub const MAP_POPULATE: c_int = 0x10000; -pub const MAP_NONBLOCK: c_int = 0x20000; -pub const MAP_STACK: c_int = 0x40000; - -pub const SOCK_STREAM: c_int = 2; -pub const SOCK_DGRAM: c_int = 1; - -pub const SA_SIGINFO: c_int = 0x00000008; -pub const SA_NOCLDWAIT: c_int = 0x00010000; - -pub const SIGEMT: c_int = 7; -pub const SIGCHLD: c_int = 18; -pub const SIGBUS: c_int = 10; -pub const SIGTTIN: c_int = 26; -pub const SIGTTOU: c_int = 27; -pub const SIGXCPU: c_int = 30; -pub const SIGXFSZ: c_int = 31; -pub const SIGVTALRM: c_int = 28; -pub const SIGPROF: c_int = 29; -pub const SIGWINCH: c_int = 20; -pub const SIGUSR1: c_int = 16; -pub const SIGUSR2: c_int = 17; -pub const SIGCONT: c_int = 25; -pub const SIGSTOP: c_int = 23; -pub const SIGTSTP: c_int = 24; -pub const SIGURG: c_int = 21; -pub const SIGIO: c_int = 22; -pub const SIGSYS: c_int = 12; -pub const SIGPOLL: c_int = 22; -pub const SIGPWR: c_int = 19; -pub const SIG_SETMASK: c_int = 3; -pub const SIG_BLOCK: c_int = 0x1; -pub const SIG_UNBLOCK: c_int = 0x2; - -pub const POLLWRNORM: c_short = 0x004; -pub const POLLWRBAND: c_short = 0x100; - -pub const VEOF: usize = 16; -pub const VEOL: usize = 17; -pub const VEOL2: usize = 6; -pub const VMIN: usize = 4; -pub const IEXTEN: crate::tcflag_t = 0x00000100; -pub const TOSTOP: crate::tcflag_t = 0x00008000; -pub const FLUSHO: crate::tcflag_t = 0x00002000; -pub const EXTPROC: crate::tcflag_t = 0o200000; -pub const TCSANOW: c_int = 0x540e; -pub const TCSADRAIN: c_int = 0x540f; -pub const TCSAFLUSH: c_int = 0x5410; - -pub const PTRACE_GETFPXREGS: c_uint = 18; -pub const PTRACE_SETFPXREGS: c_uint = 19; - -pub const MAP_HUGETLB: c_int = 0x080000; - -pub const EFD_NONBLOCK: c_int = 0x80; - -cfg_if! { - if #[cfg(gnu_file_offset_bits64)] { - pub const F_GETLK: c_int = 33; - } else { - pub const F_GETLK: c_int = 14; - } -} -pub const F_GETOWN: c_int = 23; -pub const F_SETOWN: c_int = 24; - -pub const SFD_NONBLOCK: c_int = 0x80; - -pub const RTLD_DEEPBIND: c_int = 0x10; -pub const RTLD_GLOBAL: c_int = 0x4; -pub const RTLD_NOLOAD: c_int = 0x8; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; - -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: crate::tcflag_t = 0x00000800; -pub const TAB2: crate::tcflag_t = 0x00001000; -pub const TAB3: crate::tcflag_t = 0x00001800; -pub const CR1: crate::tcflag_t = 0x00000200; -pub const CR2: crate::tcflag_t = 0x00000400; -pub const CR3: crate::tcflag_t = 0x00000600; -pub const FF1: crate::tcflag_t = 0x00008000; -pub const BS1: crate::tcflag_t = 0x00002000; -pub const VT1: crate::tcflag_t = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const EHWPOISON: c_int = 168; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/mod.rs deleted file mode 100644 index fe843a7643207d..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b32/mod.rs +++ /dev/null @@ -1,491 +0,0 @@ -//! 32-bit specific definitions for linux-like values - -use crate::prelude::*; -use crate::pthread_mutex_t; - -pub type clock_t = i32; - -pub type shmatt_t = c_ulong; -pub type msgqnum_t = c_ulong; -pub type msglen_t = c_ulong; -pub type nlink_t = u32; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; -pub type __fsword_t = i32; -pub type fsblkcnt64_t = u64; -pub type fsfilcnt64_t = u64; -pub type __syscall_ulong_t = c_ulong; -pub type __suseconds64_t = i64; - -cfg_if! { - if #[cfg(target_arch = "riscv32")] { - pub type time_t = i64; - pub type suseconds_t = i64; - type __ino_t = c_ulong; - type __ino64_t = u64; - pub type ino_t = __ino64_t; - pub type off_t = i64; - pub type blkcnt_t = i64; - pub type fsblkcnt_t = u64; - pub type fsfilcnt_t = u64; - pub type rlim_t = u64; - pub type blksize_t = i64; - } else if #[cfg(gnu_time_bits64)] { - pub type time_t = i64; - pub type suseconds_t = i32; - type __ino_t = c_ulong; - type __ino64_t = u64; - pub type ino_t = __ino64_t; - pub type off_t = i64; - pub type blkcnt_t = i64; - pub type fsblkcnt_t = u64; - pub type fsfilcnt_t = u64; - pub type rlim_t = u64; - pub type blksize_t = i32; - } else if #[cfg(gnu_file_offset_bits64)] { - pub type time_t = i32; - pub type suseconds_t = i32; - type __ino_t = c_ulong; - type __ino64_t = u64; - pub type ino_t = __ino64_t; - pub type off_t = i64; - pub type blkcnt_t = i64; - pub type fsblkcnt_t = u64; - pub type fsfilcnt_t = u64; - pub type rlim_t = u64; - pub type blksize_t = i32; - } else { - pub type time_t = i32; - pub type suseconds_t = i32; - type __ino_t = c_ulong; - type __ino64_t = u64; - pub type ino_t = __ino_t; - pub type off_t = i32; - pub type blkcnt_t = i32; - pub type fsblkcnt_t = c_ulong; - pub type fsfilcnt_t = c_ulong; - pub type rlim_t = c_ulong; - pub type blksize_t = i32; - } -} - -cfg_if! { - if #[cfg(not(any( - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "powerpc", - target_arch = "sparc" - )))] { - s! { - pub struct stat { - pub st_dev: crate::dev_t, - - #[cfg(not(gnu_time_bits64))] - __pad1: c_uint, - - #[cfg(any(gnu_time_bits64, not(gnu_file_offset_bits64)))] - pub st_ino: crate::ino_t, - #[cfg(all(not(gnu_time_bits64), gnu_file_offset_bits64))] - __st_ino: __ino_t, - - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - - pub st_rdev: crate::dev_t, - - #[cfg(not(gnu_time_bits64))] - __pad2: c_uint, - - pub st_size: off_t, - - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - #[cfg(gnu_time_bits64)] - _atime_pad: c_int, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - #[cfg(gnu_time_bits64)] - _mtime_pad: c_int, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - #[cfg(gnu_time_bits64)] - _ctime_pad: c_int, - - #[cfg(not(gnu_file_offset_bits64))] - __glibc_reserved4: c_long, - #[cfg(not(gnu_file_offset_bits64))] - __glibc_reserved5: c_long, - #[cfg(all(not(gnu_time_bits64), gnu_file_offset_bits64))] - pub st_ino: crate::ino_t, - } - } - } -} - -s! { - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - __f_unused: c_int, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct pthread_attr_t { - __size: [u32; 9], - } - - pub struct sigset_t { - __val: [c_ulong; 32], - } - - pub struct sysinfo { - pub uptime: c_long, - pub loads: [c_ulong; 3], - pub totalram: c_ulong, - pub freeram: c_ulong, - pub sharedram: c_ulong, - pub bufferram: c_ulong, - pub totalswap: c_ulong, - pub freeswap: c_ulong, - pub procs: c_ushort, - #[deprecated( - since = "0.2.58", - note = "This padding field might become private in the future" - )] - pub pad: c_ushort, - pub totalhigh: c_ulong, - pub freehigh: c_ulong, - pub mem_unit: c_uint, - pub _f: [c_char; 8], - } - - pub struct semid_ds { - pub sem_perm: ipc_perm, - #[cfg(all(not(gnu_time_bits64), target_arch = "powerpc"))] - __reserved: crate::__syscall_ulong_t, - pub sem_otime: crate::time_t, - #[cfg(not(any( - gnu_time_bits64, - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "powerpc" - )))] - __reserved: crate::__syscall_ulong_t, - #[cfg(all(not(gnu_time_bits64), target_arch = "powerpc"))] - __reserved2: crate::__syscall_ulong_t, - pub sem_ctime: crate::time_t, - #[cfg(not(any( - gnu_time_bits64, - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "powerpc" - )))] - __reserved2: crate::__syscall_ulong_t, - pub sem_nsems: crate::__syscall_ulong_t, - #[cfg(all( - gnu_time_bits64, - not(any( - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "powerpc", - target_arch = "arm", - target_arch = "x86" - )) - ))] - __reserved2: crate::__syscall_ulong_t, - __glibc_reserved3: crate::__syscall_ulong_t, - __glibc_reserved4: crate::__syscall_ulong_t, - } - - #[cfg(gnu_time_bits64)] - pub struct timex { - pub modes: c_uint, - _pad1: c_int, - pub offset: c_longlong, - pub freq: c_longlong, - pub maxerror: c_longlong, - pub esterror: c_longlong, - pub status: c_int, - _pad2: c_int, - pub constant: c_longlong, - pub precision: c_longlong, - pub tolerance: c_longlong, - pub time: crate::timeval, - pub tick: c_longlong, - pub ppsfreq: c_longlong, - pub jitter: c_longlong, - pub shift: c_int, - _pad3: c_int, - pub stabil: c_longlong, - pub jitcnt: c_longlong, - pub calcnt: c_longlong, - pub errcnt: c_longlong, - pub stbcnt: c_longlong, - pub tai: c_int, - pub __unused1: i32, - pub __unused2: i32, - pub __unused3: i32, - pub __unused4: i32, - pub __unused5: i32, - pub __unused6: i32, - pub __unused7: i32, - pub __unused8: i32, - pub __unused9: i32, - pub __unused10: i32, - pub __unused11: i32, - } - - #[cfg(not(gnu_time_bits64))] - pub struct timex { - pub modes: c_uint, - pub offset: c_long, - pub freq: c_long, - pub maxerror: c_long, - pub esterror: c_long, - pub status: c_int, - pub constant: c_long, - pub precision: c_long, - pub tolerance: c_long, - pub time: crate::timeval, - pub tick: c_long, - pub ppsfreq: c_long, - pub jitter: c_long, - pub shift: c_int, - pub stabil: c_long, - pub jitcnt: c_long, - pub calcnt: c_long, - pub errcnt: c_long, - pub stbcnt: c_long, - pub tai: c_int, - pub __unused1: i32, - pub __unused2: i32, - pub __unused3: i32, - pub __unused4: i32, - pub __unused5: i32, - pub __unused6: i32, - pub __unused7: i32, - pub __unused8: i32, - pub __unused9: i32, - pub __unused10: i32, - pub __unused11: i32, - } -} - -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; - -pub const F_OFD_GETLK: c_int = 36; -pub const F_OFD_SETLK: c_int = 37; -pub const F_OFD_SETLKW: c_int = 38; - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 20; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; - -cfg_if! { - if #[cfg(target_arch = "sparc")] { - pub const O_NOATIME: c_int = 0x200000; - pub const O_PATH: c_int = 0x1000000; - pub const O_TMPFILE: c_int = 0x2000000 | O_DIRECTORY; - - pub const SA_ONSTACK: c_int = 1; - - pub const PTRACE_DETACH: c_uint = 11; - - pub const F_RDLCK: c_int = 1; - pub const F_WRLCK: c_int = 2; - pub const F_UNLCK: c_int = 3; - - pub const SFD_CLOEXEC: c_int = 0x400000; - - pub const NCCS: usize = 17; - - pub const O_TRUNC: c_int = 0x400; - pub const O_CLOEXEC: c_int = 0x400000; - - pub const EBFONT: c_int = 109; - pub const ENOSTR: c_int = 72; - pub const ENODATA: c_int = 111; - pub const ETIME: c_int = 73; - pub const ENOSR: c_int = 74; - pub const ENONET: c_int = 80; - pub const ENOPKG: c_int = 113; - pub const EREMOTE: c_int = 71; - pub const ENOLINK: c_int = 82; - pub const EADV: c_int = 83; - pub const ESRMNT: c_int = 84; - pub const ECOMM: c_int = 85; - pub const EPROTO: c_int = 86; - pub const EDOTDOT: c_int = 88; - - pub const SA_NODEFER: c_int = 0x20; - pub const SA_RESETHAND: c_int = 0x4; - pub const SA_RESTART: c_int = 0x2; - pub const SA_NOCLDSTOP: c_int = 0x00000008; - - pub const EPOLL_CLOEXEC: c_int = 0x400000; - - pub const EFD_CLOEXEC: c_int = 0x400000; - } else { - pub const O_NOATIME: c_int = 0o1000000; - pub const O_PATH: c_int = 0o10000000; - pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; - - pub const SA_ONSTACK: c_int = 0x08000000; - - pub const PTRACE_DETACH: c_uint = 17; - - pub const F_RDLCK: c_int = 0; - pub const F_WRLCK: c_int = 1; - pub const F_UNLCK: c_int = 2; - - pub const SFD_CLOEXEC: c_int = 0x080000; - - pub const NCCS: usize = 32; - - pub const O_TRUNC: c_int = 512; - pub const O_CLOEXEC: c_int = 0x80000; - pub const EBFONT: c_int = 59; - pub const ENOSTR: c_int = 60; - pub const ENODATA: c_int = 61; - pub const ETIME: c_int = 62; - pub const ENOSR: c_int = 63; - pub const ENONET: c_int = 64; - pub const ENOPKG: c_int = 65; - pub const EREMOTE: c_int = 66; - pub const ENOLINK: c_int = 67; - pub const EADV: c_int = 68; - pub const ESRMNT: c_int = 69; - pub const ECOMM: c_int = 70; - pub const EPROTO: c_int = 71; - pub const EDOTDOT: c_int = 73; - - pub const SA_NODEFER: c_int = 0x40000000; - pub const SA_RESETHAND: c_int = 0x80000000; - pub const SA_RESTART: c_int = 0x10000000; - pub const SA_NOCLDSTOP: c_int = 0x00000001; - - pub const EPOLL_CLOEXEC: c_int = 0x80000; - - pub const EFD_CLOEXEC: c_int = 0x80000; - } -} -cfg_if! { - if #[cfg(target_arch = "sparc")] { - pub const F_SETLK: c_int = 8; - pub const F_SETLKW: c_int = 9; - } else if #[cfg(all( - gnu_file_offset_bits64, - any(target_arch = "mips", target_arch = "mips32r6") - ))] { - pub const F_SETLK: c_int = 34; - pub const F_SETLKW: c_int = 35; - } else if #[cfg(gnu_file_offset_bits64)] { - pub const F_SETLK: c_int = 13; - pub const F_SETLKW: c_int = 14; - } else { - pub const F_SETLK: c_int = 6; - pub const F_SETLKW: c_int = 7; - } -} - -#[cfg(target_endian = "little")] -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "little")] -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "little")] -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; - -pub const PTRACE_GETFPREGS: c_uint = 14; -pub const PTRACE_SETFPREGS: c_uint = 15; -pub const PTRACE_GETREGS: c_uint = 12; -pub const PTRACE_SETREGS: c_uint = 13; - -extern "C" { - pub fn sysctl( - name: *mut c_int, - namelen: c_int, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *mut c_void, - newlen: size_t, - ) -> c_int; -} - -cfg_if! { - if #[cfg(target_arch = "x86")] { - mod x86; - pub use self::x86::*; - } else if #[cfg(target_arch = "arm")] { - mod arm; - pub use self::arm::*; - } else if #[cfg(any(target_arch = "mips", target_arch = "mips32r6"))] { - mod mips; - pub use self::mips::*; - } else if #[cfg(target_arch = "m68k")] { - mod m68k; - pub use self::m68k::*; - } else if #[cfg(target_arch = "powerpc")] { - mod powerpc; - pub use self::powerpc::*; - } else if #[cfg(target_arch = "sparc")] { - mod sparc; - pub use self::sparc::*; - } else if #[cfg(target_arch = "riscv32")] { - mod riscv32; - pub use self::riscv32::*; - } else if #[cfg(target_arch = "csky")] { - mod csky; - pub use self::csky::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/powerpc.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/powerpc.rs deleted file mode 100644 index 791f14956806d1..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b32/powerpc.rs +++ /dev/null @@ -1,892 +0,0 @@ -use crate::prelude::*; -use crate::{off64_t, off_t}; - -pub type wchar_t = i32; - -s! { - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct statfs { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - f_spare: [crate::__fsword_t; 4], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - } - - pub struct ipc_perm { - __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - __seq: u32, - __pad1: u32, - __glibc_reserved1: u64, - __glibc_reserved2: u64, - } - - pub struct stat { - pub st_dev: crate::dev_t, - #[cfg(not(gnu_file_offset_bits64))] - __pad1: c_ushort, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - #[cfg(not(gnu_time_bits64))] - __pad2: c_ushort, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - #[cfg(gnu_time_bits64)] - _atime_pad: c_int, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - #[cfg(gnu_time_bits64)] - _mtime_pad: c_int, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - #[cfg(gnu_time_bits64)] - _ctime_pad: c_int, - pub st_ctime_nsec: c_long, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved4: c_ulong, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved5: c_ulong, - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - #[cfg(not(gnu_time_bits64))] - __pad2: c_ushort, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - #[cfg(gnu_time_bits64)] - _atime_pad: c_int, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - #[cfg(gnu_time_bits64)] - _mtime_pad: c_int, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - #[cfg(gnu_time_bits64)] - _ctime_pad: c_int, - pub st_ctime_nsec: c_long, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved4: c_ulong, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved5: c_ulong, - } - - pub struct statfs64 { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::fsid_t, - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - pub f_spare: [crate::__fsword_t; 4], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - __f_unused: c_int, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - #[cfg(gnu_time_bits64)] - pub shm_segsz: size_t, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved1: c_uint, - pub shm_atime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved2: c_uint, - pub shm_dtime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved3: c_uint, - pub shm_ctime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved4: c_uint, - #[cfg(not(gnu_time_bits64))] - pub shm_segsz: size_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __glibc_reserved5: c_ulong, - __glibc_reserved6: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved1: c_uint, - pub msg_stime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved2: c_uint, - pub msg_rtime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved3: c_uint, - pub msg_ctime: crate::time_t, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __glibc_reserved4: c_ulong, - __glibc_reserved5: c_ulong, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - #[doc(hidden)] - #[deprecated( - since = "0.2.54", - note = "Please leave a comment on \ - https://github.com/rust-lang/libc/pull/1316 if you're using \ - this field" - )] - pub _pad: [c_int; 29], - _align: [usize; 0], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } -} - -pub const VEOF: usize = 4; -pub const RTLD_DEEPBIND: c_int = 0x8; -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; -pub const O_DIRECT: c_int = 0x20000; -pub const O_DIRECTORY: c_int = 0x4000; -pub const O_NOFOLLOW: c_int = 0x8000; -pub const O_LARGEFILE: c_int = 0o200000; -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_FSYNC: c_int = 0x101000; -pub const O_ASYNC: c_int = 0x2000; -pub const O_NDELAY: c_int = 0x800; -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_LOCKED: c_int = 0x00080; -pub const MAP_NORESERVE: c_int = 0x00040; -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_ANONYMOUS: c_int = 0x0020; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_SYNC: c_int = 0x080000; - -pub const EDEADLOCK: c_int = 58; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const EHWPOISON: c_int = 133; -pub const ERFKILL: c_int = 132; - -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const MCL_CURRENT: c_int = 0x2000; -pub const MCL_FUTURE: c_int = 0x4000; -pub const MCL_ONFAULT: c_int = 0x8000; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -cfg_if! { - if #[cfg(gnu_file_offset_bits64)] { - pub const F_GETLK: c_int = 12; - } else { - pub const F_GETLK: c_int = 5; - } -} -pub const F_GETOWN: c_int = 9; -pub const F_SETOWN: c_int = 8; - -pub const EFD_NONBLOCK: c_int = 0x800; -pub const SFD_NONBLOCK: c_int = 0x0800; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] -pub const SIGUNUSED: c_int = 31; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGSTKSZ: size_t = 0x4000; -pub const MINSIGSTKSZ: size_t = 4096; -pub const CBAUD: crate::tcflag_t = 0xff; -pub const TAB1: crate::tcflag_t = 0x400; -pub const TAB2: crate::tcflag_t = 0x800; -pub const TAB3: crate::tcflag_t = 0xc00; -pub const CR1: crate::tcflag_t = 0x1000; -pub const CR2: crate::tcflag_t = 0x2000; -pub const CR3: crate::tcflag_t = 0x3000; -pub const FF1: crate::tcflag_t = 0x4000; -pub const BS1: crate::tcflag_t = 0x8000; -pub const VT1: crate::tcflag_t = 0x10000; -pub const VWERASE: usize = 0xa; -pub const VREPRINT: usize = 0xb; -pub const VSUSP: usize = 0xc; -pub const VSTART: usize = 0xd; -pub const VSTOP: usize = 0xe; -pub const VDISCARD: usize = 0x10; -pub const VTIME: usize = 0x7; -pub const IXON: crate::tcflag_t = 0x200; -pub const IXOFF: crate::tcflag_t = 0x400; -pub const ONLCR: crate::tcflag_t = 0x2; -pub const CSIZE: crate::tcflag_t = 0x300; -pub const CS6: crate::tcflag_t = 0x100; -pub const CS7: crate::tcflag_t = 0x200; -pub const CS8: crate::tcflag_t = 0x300; -pub const CSTOPB: crate::tcflag_t = 0x400; -pub const CREAD: crate::tcflag_t = 0x800; -pub const PARENB: crate::tcflag_t = 0x1000; -pub const PARODD: crate::tcflag_t = 0x2000; -pub const HUPCL: crate::tcflag_t = 0x4000; -pub const CLOCAL: crate::tcflag_t = 0x8000; -pub const ECHOKE: crate::tcflag_t = 0x1; -pub const ECHOE: crate::tcflag_t = 0x2; -pub const ECHOK: crate::tcflag_t = 0x4; -pub const ECHONL: crate::tcflag_t = 0x10; -pub const ECHOPRT: crate::tcflag_t = 0x20; -pub const ECHOCTL: crate::tcflag_t = 0x40; -pub const ISIG: crate::tcflag_t = 0x80; -pub const ICANON: crate::tcflag_t = 0x100; -pub const PENDIN: crate::tcflag_t = 0x20000000; -pub const NOFLSH: crate::tcflag_t = 0x80000000; -pub const VSWTC: usize = 9; -pub const OLCUC: crate::tcflag_t = 0o000004; -pub const NLDLY: crate::tcflag_t = 0o001400; -pub const CRDLY: crate::tcflag_t = 0o030000; -pub const TABDLY: crate::tcflag_t = 0o006000; -pub const BSDLY: crate::tcflag_t = 0o100000; -pub const FFDLY: crate::tcflag_t = 0o040000; -pub const VTDLY: crate::tcflag_t = 0o200000; -pub const XTABS: crate::tcflag_t = 0o006000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const CBAUDEX: crate::speed_t = 0o000020; -pub const B57600: crate::speed_t = 0o0020; -pub const B115200: crate::speed_t = 0o0021; -pub const B230400: crate::speed_t = 0o0022; -pub const B460800: crate::speed_t = 0o0023; -pub const B500000: crate::speed_t = 0o0024; -pub const B576000: crate::speed_t = 0o0025; -pub const B921600: crate::speed_t = 0o0026; -pub const B1000000: crate::speed_t = 0o0027; -pub const B1152000: crate::speed_t = 0o0030; -pub const B1500000: crate::speed_t = 0o0031; -pub const B2000000: crate::speed_t = 0o0032; -pub const B2500000: crate::speed_t = 0o0033; -pub const B3000000: crate::speed_t = 0o0034; -pub const B3500000: crate::speed_t = 0o0035; -pub const B4000000: crate::speed_t = 0o0036; - -pub const VEOL: usize = 6; -pub const VEOL2: usize = 8; -pub const VMIN: usize = 5; -pub const IEXTEN: crate::tcflag_t = 0x400; -pub const TOSTOP: crate::tcflag_t = 0x400000; -pub const FLUSHO: crate::tcflag_t = 0x800000; -pub const EXTPROC: crate::tcflag_t = 0x10000000; - -pub const SYS_restart_syscall: c_long = 0; -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_waitpid: c_long = 7; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execve: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_time: c_long = 13; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_lchown: c_long = 16; -pub const SYS_break: c_long = 17; -pub const SYS_oldstat: c_long = 18; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_mount: c_long = 21; -pub const SYS_umount: c_long = 22; -pub const SYS_setuid: c_long = 23; -pub const SYS_getuid: c_long = 24; -pub const SYS_stime: c_long = 25; -pub const SYS_ptrace: c_long = 26; -pub const SYS_alarm: c_long = 27; -pub const SYS_oldfstat: c_long = 28; -pub const SYS_pause: c_long = 29; -pub const SYS_utime: c_long = 30; -pub const SYS_stty: c_long = 31; -pub const SYS_gtty: c_long = 32; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_ftime: c_long = 35; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_rename: c_long = 38; -pub const SYS_mkdir: c_long = 39; -pub const SYS_rmdir: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_prof: c_long = 44; -pub const SYS_brk: c_long = 45; -pub const SYS_setgid: c_long = 46; -pub const SYS_getgid: c_long = 47; -pub const SYS_signal: c_long = 48; -pub const SYS_geteuid: c_long = 49; -pub const SYS_getegid: c_long = 50; -pub const SYS_acct: c_long = 51; -pub const SYS_umount2: c_long = 52; -pub const SYS_lock: c_long = 53; -pub const SYS_ioctl: c_long = 54; -pub const SYS_fcntl: c_long = 55; -pub const SYS_mpx: c_long = 56; -pub const SYS_setpgid: c_long = 57; -pub const SYS_ulimit: c_long = 58; -pub const SYS_oldolduname: c_long = 59; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_ustat: c_long = 62; -pub const SYS_dup2: c_long = 63; -pub const SYS_getppid: c_long = 64; -pub const SYS_getpgrp: c_long = 65; -pub const SYS_setsid: c_long = 66; -pub const SYS_sigaction: c_long = 67; -pub const SYS_sgetmask: c_long = 68; -pub const SYS_ssetmask: c_long = 69; -pub const SYS_setreuid: c_long = 70; -pub const SYS_setregid: c_long = 71; -pub const SYS_sigsuspend: c_long = 72; -pub const SYS_sigpending: c_long = 73; -pub const SYS_sethostname: c_long = 74; -pub const SYS_setrlimit: c_long = 75; -pub const SYS_getrlimit: c_long = 76; -pub const SYS_getrusage: c_long = 77; -pub const SYS_gettimeofday: c_long = 78; -pub const SYS_settimeofday: c_long = 79; -pub const SYS_getgroups: c_long = 80; -pub const SYS_setgroups: c_long = 81; -pub const SYS_select: c_long = 82; -pub const SYS_symlink: c_long = 83; -pub const SYS_oldlstat: c_long = 84; -pub const SYS_readlink: c_long = 85; -pub const SYS_uselib: c_long = 86; -pub const SYS_swapon: c_long = 87; -pub const SYS_reboot: c_long = 88; -pub const SYS_readdir: c_long = 89; -pub const SYS_mmap: c_long = 90; -pub const SYS_munmap: c_long = 91; -pub const SYS_truncate: c_long = 92; -pub const SYS_ftruncate: c_long = 93; -pub const SYS_fchmod: c_long = 94; -pub const SYS_fchown: c_long = 95; -pub const SYS_getpriority: c_long = 96; -pub const SYS_setpriority: c_long = 97; -pub const SYS_profil: c_long = 98; -pub const SYS_statfs: c_long = 99; -pub const SYS_fstatfs: c_long = 100; -pub const SYS_ioperm: c_long = 101; -pub const SYS_socketcall: c_long = 102; -pub const SYS_syslog: c_long = 103; -pub const SYS_setitimer: c_long = 104; -pub const SYS_getitimer: c_long = 105; -pub const SYS_stat: c_long = 106; -pub const SYS_lstat: c_long = 107; -pub const SYS_fstat: c_long = 108; -pub const SYS_olduname: c_long = 109; -pub const SYS_iopl: c_long = 110; -pub const SYS_vhangup: c_long = 111; -pub const SYS_idle: c_long = 112; -pub const SYS_vm86: c_long = 113; -pub const SYS_wait4: c_long = 114; -pub const SYS_swapoff: c_long = 115; -pub const SYS_sysinfo: c_long = 116; -pub const SYS_ipc: c_long = 117; -pub const SYS_fsync: c_long = 118; -pub const SYS_sigreturn: c_long = 119; -pub const SYS_clone: c_long = 120; -pub const SYS_setdomainname: c_long = 121; -pub const SYS_uname: c_long = 122; -pub const SYS_modify_ldt: c_long = 123; -pub const SYS_adjtimex: c_long = 124; -pub const SYS_mprotect: c_long = 125; -pub const SYS_sigprocmask: c_long = 126; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 127; -pub const SYS_init_module: c_long = 128; -pub const SYS_delete_module: c_long = 129; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 130; -pub const SYS_quotactl: c_long = 131; -pub const SYS_getpgid: c_long = 132; -pub const SYS_fchdir: c_long = 133; -pub const SYS_bdflush: c_long = 134; -pub const SYS_sysfs: c_long = 135; -pub const SYS_personality: c_long = 136; -pub const SYS_afs_syscall: c_long = 137; /* Syscall for Andrew File System */ -pub const SYS_setfsuid: c_long = 138; -pub const SYS_setfsgid: c_long = 139; -pub const SYS__llseek: c_long = 140; -pub const SYS_getdents: c_long = 141; -pub const SYS__newselect: c_long = 142; -pub const SYS_flock: c_long = 143; -pub const SYS_msync: c_long = 144; -pub const SYS_readv: c_long = 145; -pub const SYS_writev: c_long = 146; -pub const SYS_getsid: c_long = 147; -pub const SYS_fdatasync: c_long = 148; -pub const SYS__sysctl: c_long = 149; -pub const SYS_mlock: c_long = 150; -pub const SYS_munlock: c_long = 151; -pub const SYS_mlockall: c_long = 152; -pub const SYS_munlockall: c_long = 153; -pub const SYS_sched_setparam: c_long = 154; -pub const SYS_sched_getparam: c_long = 155; -pub const SYS_sched_setscheduler: c_long = 156; -pub const SYS_sched_getscheduler: c_long = 157; -pub const SYS_sched_yield: c_long = 158; -pub const SYS_sched_get_priority_max: c_long = 159; -pub const SYS_sched_get_priority_min: c_long = 160; -pub const SYS_sched_rr_get_interval: c_long = 161; -pub const SYS_nanosleep: c_long = 162; -pub const SYS_mremap: c_long = 163; -pub const SYS_setresuid: c_long = 164; -pub const SYS_getresuid: c_long = 165; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 166; -pub const SYS_poll: c_long = 167; -pub const SYS_nfsservctl: c_long = 168; -pub const SYS_setresgid: c_long = 169; -pub const SYS_getresgid: c_long = 170; -pub const SYS_prctl: c_long = 171; -pub const SYS_rt_sigreturn: c_long = 172; -pub const SYS_rt_sigaction: c_long = 173; -pub const SYS_rt_sigprocmask: c_long = 174; -pub const SYS_rt_sigpending: c_long = 175; -pub const SYS_rt_sigtimedwait: c_long = 176; -pub const SYS_rt_sigqueueinfo: c_long = 177; -pub const SYS_rt_sigsuspend: c_long = 178; -pub const SYS_pread64: c_long = 179; -pub const SYS_pwrite64: c_long = 180; -pub const SYS_chown: c_long = 181; -pub const SYS_getcwd: c_long = 182; -pub const SYS_capget: c_long = 183; -pub const SYS_capset: c_long = 184; -pub const SYS_sigaltstack: c_long = 185; -pub const SYS_sendfile: c_long = 186; -pub const SYS_getpmsg: c_long = 187; /* some people actually want streams */ -pub const SYS_putpmsg: c_long = 188; /* some people actually want streams */ -pub const SYS_vfork: c_long = 189; -pub const SYS_ugetrlimit: c_long = 190; /* SuS compliant getrlimit */ -pub const SYS_readahead: c_long = 191; -pub const SYS_mmap2: c_long = 192; -pub const SYS_truncate64: c_long = 193; -pub const SYS_ftruncate64: c_long = 194; -pub const SYS_stat64: c_long = 195; -pub const SYS_lstat64: c_long = 196; -pub const SYS_fstat64: c_long = 197; -pub const SYS_pciconfig_read: c_long = 198; -pub const SYS_pciconfig_write: c_long = 199; -pub const SYS_pciconfig_iobase: c_long = 200; -pub const SYS_multiplexer: c_long = 201; -pub const SYS_getdents64: c_long = 202; -pub const SYS_pivot_root: c_long = 203; -pub const SYS_fcntl64: c_long = 204; -pub const SYS_madvise: c_long = 205; -pub const SYS_mincore: c_long = 206; -pub const SYS_gettid: c_long = 207; -pub const SYS_tkill: c_long = 208; -pub const SYS_setxattr: c_long = 209; -pub const SYS_lsetxattr: c_long = 210; -pub const SYS_fsetxattr: c_long = 211; -pub const SYS_getxattr: c_long = 212; -pub const SYS_lgetxattr: c_long = 213; -pub const SYS_fgetxattr: c_long = 214; -pub const SYS_listxattr: c_long = 215; -pub const SYS_llistxattr: c_long = 216; -pub const SYS_flistxattr: c_long = 217; -pub const SYS_removexattr: c_long = 218; -pub const SYS_lremovexattr: c_long = 219; -pub const SYS_fremovexattr: c_long = 220; -pub const SYS_futex: c_long = 221; -pub const SYS_sched_setaffinity: c_long = 222; -pub const SYS_sched_getaffinity: c_long = 223; -pub const SYS_tuxcall: c_long = 225; -pub const SYS_sendfile64: c_long = 226; -pub const SYS_io_setup: c_long = 227; -pub const SYS_io_destroy: c_long = 228; -pub const SYS_io_getevents: c_long = 229; -pub const SYS_io_submit: c_long = 230; -pub const SYS_io_cancel: c_long = 231; -pub const SYS_set_tid_address: c_long = 232; -pub const SYS_fadvise64: c_long = 233; -pub const SYS_exit_group: c_long = 234; -pub const SYS_lookup_dcookie: c_long = 235; -pub const SYS_epoll_create: c_long = 236; -pub const SYS_epoll_ctl: c_long = 237; -pub const SYS_epoll_wait: c_long = 238; -pub const SYS_remap_file_pages: c_long = 239; -pub const SYS_timer_create: c_long = 240; -pub const SYS_timer_settime: c_long = 241; -pub const SYS_timer_gettime: c_long = 242; -pub const SYS_timer_getoverrun: c_long = 243; -pub const SYS_timer_delete: c_long = 244; -pub const SYS_clock_settime: c_long = 245; -pub const SYS_clock_gettime: c_long = 246; -pub const SYS_clock_getres: c_long = 247; -pub const SYS_clock_nanosleep: c_long = 248; -pub const SYS_swapcontext: c_long = 249; -pub const SYS_tgkill: c_long = 250; -pub const SYS_utimes: c_long = 251; -pub const SYS_statfs64: c_long = 252; -pub const SYS_fstatfs64: c_long = 253; -pub const SYS_fadvise64_64: c_long = 254; -pub const SYS_rtas: c_long = 255; -pub const SYS_sys_debug_setcontext: c_long = 256; -pub const SYS_migrate_pages: c_long = 258; -pub const SYS_mbind: c_long = 259; -pub const SYS_get_mempolicy: c_long = 260; -pub const SYS_set_mempolicy: c_long = 261; -pub const SYS_mq_open: c_long = 262; -pub const SYS_mq_unlink: c_long = 263; -pub const SYS_mq_timedsend: c_long = 264; -pub const SYS_mq_timedreceive: c_long = 265; -pub const SYS_mq_notify: c_long = 266; -pub const SYS_mq_getsetattr: c_long = 267; -pub const SYS_kexec_load: c_long = 268; -pub const SYS_add_key: c_long = 269; -pub const SYS_request_key: c_long = 270; -pub const SYS_keyctl: c_long = 271; -pub const SYS_waitid: c_long = 272; -pub const SYS_ioprio_set: c_long = 273; -pub const SYS_ioprio_get: c_long = 274; -pub const SYS_inotify_init: c_long = 275; -pub const SYS_inotify_add_watch: c_long = 276; -pub const SYS_inotify_rm_watch: c_long = 277; -pub const SYS_spu_run: c_long = 278; -pub const SYS_spu_create: c_long = 279; -pub const SYS_pselect6: c_long = 280; -pub const SYS_ppoll: c_long = 281; -pub const SYS_unshare: c_long = 282; -pub const SYS_splice: c_long = 283; -pub const SYS_tee: c_long = 284; -pub const SYS_vmsplice: c_long = 285; -pub const SYS_openat: c_long = 286; -pub const SYS_mkdirat: c_long = 287; -pub const SYS_mknodat: c_long = 288; -pub const SYS_fchownat: c_long = 289; -pub const SYS_futimesat: c_long = 290; -pub const SYS_fstatat64: c_long = 291; -pub const SYS_unlinkat: c_long = 292; -pub const SYS_renameat: c_long = 293; -pub const SYS_linkat: c_long = 294; -pub const SYS_symlinkat: c_long = 295; -pub const SYS_readlinkat: c_long = 296; -pub const SYS_fchmodat: c_long = 297; -pub const SYS_faccessat: c_long = 298; -pub const SYS_get_robust_list: c_long = 299; -pub const SYS_set_robust_list: c_long = 300; -pub const SYS_move_pages: c_long = 301; -pub const SYS_getcpu: c_long = 302; -pub const SYS_epoll_pwait: c_long = 303; -pub const SYS_utimensat: c_long = 304; -pub const SYS_signalfd: c_long = 305; -pub const SYS_timerfd_create: c_long = 306; -pub const SYS_eventfd: c_long = 307; -pub const SYS_sync_file_range2: c_long = 308; -pub const SYS_fallocate: c_long = 309; -pub const SYS_subpage_prot: c_long = 310; -pub const SYS_timerfd_settime: c_long = 311; -pub const SYS_timerfd_gettime: c_long = 312; -pub const SYS_signalfd4: c_long = 313; -pub const SYS_eventfd2: c_long = 314; -pub const SYS_epoll_create1: c_long = 315; -pub const SYS_dup3: c_long = 316; -pub const SYS_pipe2: c_long = 317; -pub const SYS_inotify_init1: c_long = 318; -pub const SYS_perf_event_open: c_long = 319; -pub const SYS_preadv: c_long = 320; -pub const SYS_pwritev: c_long = 321; -pub const SYS_rt_tgsigqueueinfo: c_long = 322; -pub const SYS_fanotify_init: c_long = 323; -pub const SYS_fanotify_mark: c_long = 324; -pub const SYS_prlimit64: c_long = 325; -pub const SYS_socket: c_long = 326; -pub const SYS_bind: c_long = 327; -pub const SYS_connect: c_long = 328; -pub const SYS_listen: c_long = 329; -pub const SYS_accept: c_long = 330; -pub const SYS_getsockname: c_long = 331; -pub const SYS_getpeername: c_long = 332; -pub const SYS_socketpair: c_long = 333; -pub const SYS_send: c_long = 334; -pub const SYS_sendto: c_long = 335; -pub const SYS_recv: c_long = 336; -pub const SYS_recvfrom: c_long = 337; -pub const SYS_shutdown: c_long = 338; -pub const SYS_setsockopt: c_long = 339; -pub const SYS_getsockopt: c_long = 340; -pub const SYS_sendmsg: c_long = 341; -pub const SYS_recvmsg: c_long = 342; -pub const SYS_recvmmsg: c_long = 343; -pub const SYS_accept4: c_long = 344; -pub const SYS_name_to_handle_at: c_long = 345; -pub const SYS_open_by_handle_at: c_long = 346; -pub const SYS_clock_adjtime: c_long = 347; -pub const SYS_syncfs: c_long = 348; -pub const SYS_sendmmsg: c_long = 349; -pub const SYS_setns: c_long = 350; -pub const SYS_process_vm_readv: c_long = 351; -pub const SYS_process_vm_writev: c_long = 352; -pub const SYS_finit_module: c_long = 353; -pub const SYS_kcmp: c_long = 354; -pub const SYS_sched_setattr: c_long = 355; -pub const SYS_sched_getattr: c_long = 356; -pub const SYS_renameat2: c_long = 357; -pub const SYS_seccomp: c_long = 358; -pub const SYS_getrandom: c_long = 359; -pub const SYS_memfd_create: c_long = 360; -pub const SYS_bpf: c_long = 361; -pub const SYS_execveat: c_long = 362; -pub const SYS_switch_endian: c_long = 363; -pub const SYS_userfaultfd: c_long = 364; -pub const SYS_membarrier: c_long = 365; -pub const SYS_mlock2: c_long = 378; -pub const SYS_copy_file_range: c_long = 379; -pub const SYS_preadv2: c_long = 380; -pub const SYS_pwritev2: c_long = 381; -pub const SYS_kexec_file_load: c_long = 382; -pub const SYS_statx: c_long = 383; -pub const SYS_rseq: c_long = 387; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; -pub const SYS_mseal: c_long = 462; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/riscv32/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/riscv32/mod.rs deleted file mode 100644 index b04ee50462745e..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b32/riscv32/mod.rs +++ /dev/null @@ -1,808 +0,0 @@ -//! RISC-V-specific definitions for 32-bit linux-like values - -use crate::prelude::*; -use crate::{off64_t, off_t}; - -pub type wchar_t = c_int; - -s! { - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - pub msg_rtime: crate::time_t, - pub msg_ctime: crate::time_t, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __glibc_reserved4: c_ulong, - __glibc_reserved5: c_ulong, - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub __pad1: crate::dev_t, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - pub __pad2: c_int, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_int; 2], - } - - pub struct statfs { - pub f_type: c_long, - pub f_bsize: c_long, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_long, - pub f_frsize: c_long, - pub f_flags: c_long, - pub f_spare: [c_long; 4], - } - - pub struct statfs64 { - pub f_type: c_long, - pub f_bsize: c_long, - pub f_blocks: crate::fsblkcnt64_t, - pub f_bfree: crate::fsblkcnt64_t, - pub f_bavail: crate::fsblkcnt64_t, - pub f_files: crate::fsfilcnt64_t, - pub f_ffree: crate::fsfilcnt64_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_long, - pub f_frsize: c_long, - pub f_flags: c_long, - pub f_spare: [c_long; 4], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt64_t, - pub f_bfree: crate::fsblkcnt64_t, - pub f_bavail: crate::fsblkcnt64_t, - pub f_files: crate::fsfilcnt64_t, - pub f_ffree: crate::fsfilcnt64_t, - pub f_favail: crate::fsfilcnt64_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - pub __f_spare: [c_int; 6], - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - #[doc(hidden)] - #[deprecated( - since = "0.2.54", - note = "Please leave a comment on \ - https://github.com/rust-lang/libc/pull/1316 if you're using \ - this field" - )] - pub _pad: [c_int; 29], - _align: [u64; 0], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_ushort, - __pad1: c_ushort, - pub __seq: c_ushort, - __pad2: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused5: c_ulong, - __unused6: c_ulong, - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - } - - pub struct user_regs_struct { - pub pc: c_ulong, - pub ra: c_ulong, - pub sp: c_ulong, - pub gp: c_ulong, - pub tp: c_ulong, - pub t0: c_ulong, - pub t1: c_ulong, - pub t2: c_ulong, - pub s0: c_ulong, - pub s1: c_ulong, - pub a0: c_ulong, - pub a1: c_ulong, - pub a2: c_ulong, - pub a3: c_ulong, - pub a4: c_ulong, - pub a5: c_ulong, - pub a6: c_ulong, - pub a7: c_ulong, - pub s2: c_ulong, - pub s3: c_ulong, - pub s4: c_ulong, - pub s5: c_ulong, - pub s6: c_ulong, - pub s7: c_ulong, - pub s8: c_ulong, - pub s9: c_ulong, - pub s10: c_ulong, - pub s11: c_ulong, - pub t3: c_ulong, - pub t4: c_ulong, - pub t5: c_ulong, - pub t6: c_ulong, - } -} - -s_no_extra_traits! { - pub struct ucontext_t { - pub __uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_sigmask: crate::sigset_t, - pub uc_mcontext: mcontext_t, - } - - #[repr(align(16))] - pub struct mcontext_t { - pub __gregs: [c_ulong; 32], - pub __fpregs: __riscv_mc_fp_state, - } - - pub union __riscv_mc_fp_state { - pub __f: __riscv_mc_f_ext_state, - pub __d: __riscv_mc_d_ext_state, - pub __q: __riscv_mc_q_ext_state, - } - - pub struct __riscv_mc_f_ext_state { - pub __f: [c_uint; 32], - pub __fcsr: c_uint, - } - - pub struct __riscv_mc_d_ext_state { - pub __f: [c_ulonglong; 32], - pub __fcsr: c_uint, - } - - #[repr(align(16))] - pub struct __riscv_mc_q_ext_state { - pub __f: [c_ulonglong; 64], - pub __fcsr: c_uint, - pub __glibc_reserved: [c_uint; 3], - } -} - -pub const O_LARGEFILE: c_int = 0; -pub const VEOF: usize = 4; -pub const RTLD_DEEPBIND: c_int = 0x8; -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_FSYNC: c_int = 1052672; -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_GROWSDOWN: c_int = 256; -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const EHWPOISON: c_int = 133; -pub const ERFKILL: c_int = 132; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; -pub const SA_SIGINFO: c_int = 4; -pub const SA_NOCLDWAIT: c_int = 2; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0; -pub const SIG_UNBLOCK: c_int = 1; -pub const POLLWRNORM: c_short = 256; -pub const POLLWRBAND: c_short = 512; -pub const O_ASYNC: c_int = 8192; -pub const O_NDELAY: c_int = 2048; -pub const EFD_NONBLOCK: c_int = 2048; -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETOWN: c_int = 8; -pub const SFD_NONBLOCK: c_int = 2048; -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; -pub const O_DIRECT: c_int = 16384; -pub const O_DIRECTORY: c_int = 65536; -pub const O_NOFOLLOW: c_int = 131072; -pub const MAP_HUGETLB: c_int = 262144; -pub const MAP_LOCKED: c_int = 8192; -pub const MAP_NORESERVE: c_int = 16384; -pub const MAP_ANON: c_int = 32; -pub const MAP_ANONYMOUS: c_int = 32; -pub const MAP_DENYWRITE: c_int = 2048; -pub const MAP_EXECUTABLE: c_int = 4096; -pub const MAP_POPULATE: c_int = 32768; -pub const MAP_NONBLOCK: c_int = 65536; -pub const MAP_STACK: c_int = 131072; -pub const MAP_SYNC: c_int = 0x080000; -pub const EDEADLOCK: c_int = 35; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const MCL_CURRENT: c_int = 1; -pub const MCL_FUTURE: c_int = 2; -pub const MCL_ONFAULT: c_int = 4; -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; -pub const CBAUD: crate::tcflag_t = 4111; -pub const TAB1: crate::tcflag_t = 2048; -pub const TAB2: crate::tcflag_t = 4096; -pub const TAB3: crate::tcflag_t = 6144; -pub const CR1: crate::tcflag_t = 512; -pub const CR2: crate::tcflag_t = 1024; -pub const CR3: crate::tcflag_t = 1536; -pub const FF1: crate::tcflag_t = 32768; -pub const BS1: crate::tcflag_t = 8192; -pub const VT1: crate::tcflag_t = 16384; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 1024; -pub const IXOFF: crate::tcflag_t = 4096; -pub const ONLCR: crate::tcflag_t = 4; -pub const CSIZE: crate::tcflag_t = 48; -pub const CS6: crate::tcflag_t = 16; -pub const CS7: crate::tcflag_t = 32; -pub const CS8: crate::tcflag_t = 48; -pub const CSTOPB: crate::tcflag_t = 64; -pub const CREAD: crate::tcflag_t = 128; -pub const PARENB: crate::tcflag_t = 256; -pub const PARODD: crate::tcflag_t = 512; -pub const HUPCL: crate::tcflag_t = 1024; -pub const CLOCAL: crate::tcflag_t = 2048; -pub const ECHOKE: crate::tcflag_t = 2048; -pub const ECHOE: crate::tcflag_t = 16; -pub const ECHOK: crate::tcflag_t = 32; -pub const ECHONL: crate::tcflag_t = 64; -pub const ECHOPRT: crate::tcflag_t = 1024; -pub const ECHOCTL: crate::tcflag_t = 512; -pub const ISIG: crate::tcflag_t = 1; -pub const ICANON: crate::tcflag_t = 2; -pub const PENDIN: crate::tcflag_t = 16384; -pub const NOFLSH: crate::tcflag_t = 128; -pub const CIBAUD: crate::tcflag_t = 269418496; -pub const CBAUDEX: crate::tcflag_t = 4096; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 2; -pub const NLDLY: crate::tcflag_t = 256; -pub const CRDLY: crate::tcflag_t = 1536; -pub const TABDLY: crate::tcflag_t = 6144; -pub const BSDLY: crate::tcflag_t = 8192; -pub const FFDLY: crate::tcflag_t = 32768; -pub const VTDLY: crate::tcflag_t = 16384; -pub const XTABS: crate::tcflag_t = 6144; -pub const B0: crate::speed_t = 0; -pub const B50: crate::speed_t = 1; -pub const B75: crate::speed_t = 2; -pub const B110: crate::speed_t = 3; -pub const B134: crate::speed_t = 4; -pub const B150: crate::speed_t = 5; -pub const B200: crate::speed_t = 6; -pub const B300: crate::speed_t = 7; -pub const B600: crate::speed_t = 8; -pub const B1200: crate::speed_t = 9; -pub const B1800: crate::speed_t = 10; -pub const B2400: crate::speed_t = 11; -pub const B4800: crate::speed_t = 12; -pub const B9600: crate::speed_t = 13; -pub const B19200: crate::speed_t = 14; -pub const B38400: crate::speed_t = 15; -pub const EXTA: crate::speed_t = 14; -pub const EXTB: crate::speed_t = 15; -pub const B57600: crate::speed_t = 4097; -pub const B115200: crate::speed_t = 4098; -pub const B230400: crate::speed_t = 4099; -pub const B460800: crate::speed_t = 4100; -pub const B500000: crate::speed_t = 4101; -pub const B576000: crate::speed_t = 4102; -pub const B921600: crate::speed_t = 4103; -pub const B1000000: crate::speed_t = 4104; -pub const B1152000: crate::speed_t = 4105; -pub const B1500000: crate::speed_t = 4106; -pub const B2000000: crate::speed_t = 4107; -pub const B2500000: crate::speed_t = 4108; -pub const B3000000: crate::speed_t = 4109; -pub const B3500000: crate::speed_t = 4110; -pub const B4000000: crate::speed_t = 4111; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 32768; -pub const TOSTOP: crate::tcflag_t = 256; -pub const FLUSHO: crate::tcflag_t = 4096; -pub const EXTPROC: crate::tcflag_t = 65536; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; -pub const NGREG: usize = 32; -pub const REG_PC: usize = 0; -pub const REG_RA: usize = 1; -pub const REG_SP: usize = 2; -pub const REG_TP: usize = 4; -pub const REG_S0: usize = 8; -pub const REG_S1: usize = 9; -pub const REG_A0: usize = 10; -pub const REG_S2: usize = 18; -pub const REG_NARGS: usize = 8; - -pub const SYS_read: c_long = 63; -pub const SYS_write: c_long = 64; -pub const SYS_close: c_long = 57; -pub const SYS_fstat: c_long = 80; -pub const SYS_lseek: c_long = 62; -pub const SYS_mmap: c_long = 222; -pub const SYS_mprotect: c_long = 226; -pub const SYS_munmap: c_long = 215; -pub const SYS_brk: c_long = 214; -pub const SYS_rt_sigaction: c_long = 134; -pub const SYS_rt_sigprocmask: c_long = 135; -pub const SYS_rt_sigreturn: c_long = 139; -pub const SYS_ioctl: c_long = 29; -pub const SYS_pread64: c_long = 67; -pub const SYS_pwrite64: c_long = 68; -pub const SYS_readv: c_long = 65; -pub const SYS_writev: c_long = 66; -pub const SYS_sched_yield: c_long = 124; -pub const SYS_mremap: c_long = 216; -pub const SYS_msync: c_long = 227; -pub const SYS_mincore: c_long = 232; -pub const SYS_madvise: c_long = 233; -pub const SYS_shmget: c_long = 194; -pub const SYS_shmat: c_long = 196; -pub const SYS_shmctl: c_long = 195; -pub const SYS_dup: c_long = 23; -pub const SYS_nanosleep: c_long = 101; -pub const SYS_getitimer: c_long = 102; -pub const SYS_setitimer: c_long = 103; -pub const SYS_getpid: c_long = 172; -pub const SYS_sendfile: c_long = 71; -pub const SYS_socket: c_long = 198; -pub const SYS_connect: c_long = 203; -pub const SYS_accept: c_long = 202; -pub const SYS_sendto: c_long = 206; -pub const SYS_recvfrom: c_long = 207; -pub const SYS_sendmsg: c_long = 211; -pub const SYS_recvmsg: c_long = 212; -pub const SYS_shutdown: c_long = 210; -pub const SYS_bind: c_long = 200; -pub const SYS_listen: c_long = 201; -pub const SYS_getsockname: c_long = 204; -pub const SYS_getpeername: c_long = 205; -pub const SYS_socketpair: c_long = 199; -pub const SYS_setsockopt: c_long = 208; -pub const SYS_getsockopt: c_long = 209; -pub const SYS_clone: c_long = 220; -pub const SYS_execve: c_long = 221; -pub const SYS_exit: c_long = 93; -pub const SYS_wait4: c_long = 260; -pub const SYS_kill: c_long = 129; -pub const SYS_uname: c_long = 160; -pub const SYS_semget: c_long = 190; -pub const SYS_semop: c_long = 193; -pub const SYS_semctl: c_long = 191; -pub const SYS_shmdt: c_long = 197; -pub const SYS_msgget: c_long = 186; -pub const SYS_msgsnd: c_long = 189; -pub const SYS_msgrcv: c_long = 188; -pub const SYS_msgctl: c_long = 187; -pub const SYS_fcntl: c_long = 25; -pub const SYS_flock: c_long = 32; -pub const SYS_fsync: c_long = 82; -pub const SYS_fdatasync: c_long = 83; -pub const SYS_truncate: c_long = 45; -pub const SYS_ftruncate: c_long = 46; -pub const SYS_getcwd: c_long = 17; -pub const SYS_chdir: c_long = 49; -pub const SYS_fchdir: c_long = 50; -pub const SYS_fchmod: c_long = 52; -pub const SYS_fchown: c_long = 55; -pub const SYS_umask: c_long = 166; -pub const SYS_gettimeofday: c_long = 169; -pub const SYS_getrlimit: c_long = 163; -pub const SYS_getrusage: c_long = 165; -pub const SYS_sysinfo: c_long = 179; -pub const SYS_times: c_long = 153; -pub const SYS_ptrace: c_long = 117; -pub const SYS_getuid: c_long = 174; -pub const SYS_syslog: c_long = 116; -pub const SYS_getgid: c_long = 176; -pub const SYS_setuid: c_long = 146; -pub const SYS_setgid: c_long = 144; -pub const SYS_geteuid: c_long = 175; -pub const SYS_getegid: c_long = 177; -pub const SYS_setpgid: c_long = 154; -pub const SYS_getppid: c_long = 173; -pub const SYS_setsid: c_long = 157; -pub const SYS_setreuid: c_long = 145; -pub const SYS_setregid: c_long = 143; -pub const SYS_getgroups: c_long = 158; -pub const SYS_setgroups: c_long = 159; -pub const SYS_setresuid: c_long = 147; -pub const SYS_getresuid: c_long = 148; -pub const SYS_setresgid: c_long = 149; -pub const SYS_getresgid: c_long = 150; -pub const SYS_getpgid: c_long = 155; -pub const SYS_setfsuid: c_long = 151; -pub const SYS_setfsgid: c_long = 152; -pub const SYS_getsid: c_long = 156; -pub const SYS_capget: c_long = 90; -pub const SYS_capset: c_long = 91; -pub const SYS_rt_sigpending: c_long = 136; -pub const SYS_rt_sigtimedwait: c_long = 137; -pub const SYS_rt_sigqueueinfo: c_long = 138; -pub const SYS_rt_sigsuspend: c_long = 133; -pub const SYS_sigaltstack: c_long = 132; -pub const SYS_personality: c_long = 92; -pub const SYS_statfs: c_long = 43; -pub const SYS_fstatfs: c_long = 44; -pub const SYS_getpriority: c_long = 141; -pub const SYS_setpriority: c_long = 140; -pub const SYS_sched_setparam: c_long = 118; -pub const SYS_sched_getparam: c_long = 121; -pub const SYS_sched_setscheduler: c_long = 119; -pub const SYS_sched_getscheduler: c_long = 120; -pub const SYS_sched_get_priority_max: c_long = 125; -pub const SYS_sched_get_priority_min: c_long = 126; -pub const SYS_sched_rr_get_interval: c_long = 127; -pub const SYS_mlock: c_long = 228; -pub const SYS_munlock: c_long = 229; -pub const SYS_mlockall: c_long = 230; -pub const SYS_munlockall: c_long = 231; -pub const SYS_vhangup: c_long = 58; -pub const SYS_pivot_root: c_long = 41; -pub const SYS_prctl: c_long = 167; -pub const SYS_adjtimex: c_long = 171; -pub const SYS_setrlimit: c_long = 164; -pub const SYS_chroot: c_long = 51; -pub const SYS_sync: c_long = 81; -pub const SYS_acct: c_long = 89; -pub const SYS_settimeofday: c_long = 170; -pub const SYS_mount: c_long = 40; -pub const SYS_umount2: c_long = 39; -pub const SYS_swapon: c_long = 224; -pub const SYS_swapoff: c_long = 225; -pub const SYS_reboot: c_long = 142; -pub const SYS_sethostname: c_long = 161; -pub const SYS_setdomainname: c_long = 162; -pub const SYS_init_module: c_long = 105; -pub const SYS_delete_module: c_long = 106; -pub const SYS_quotactl: c_long = 60; -pub const SYS_nfsservctl: c_long = 42; -pub const SYS_gettid: c_long = 178; -pub const SYS_readahead: c_long = 213; -pub const SYS_setxattr: c_long = 5; -pub const SYS_lsetxattr: c_long = 6; -pub const SYS_fsetxattr: c_long = 7; -pub const SYS_getxattr: c_long = 8; -pub const SYS_lgetxattr: c_long = 9; -pub const SYS_fgetxattr: c_long = 10; -pub const SYS_listxattr: c_long = 11; -pub const SYS_llistxattr: c_long = 12; -pub const SYS_flistxattr: c_long = 13; -pub const SYS_removexattr: c_long = 14; -pub const SYS_lremovexattr: c_long = 15; -pub const SYS_fremovexattr: c_long = 16; -pub const SYS_tkill: c_long = 130; -pub const SYS_futex: c_long = 98; -pub const SYS_sched_setaffinity: c_long = 122; -pub const SYS_sched_getaffinity: c_long = 123; -pub const SYS_io_setup: c_long = 0; -pub const SYS_io_destroy: c_long = 1; -pub const SYS_io_getevents: c_long = 4; -pub const SYS_io_submit: c_long = 2; -pub const SYS_io_cancel: c_long = 3; -pub const SYS_lookup_dcookie: c_long = 18; -pub const SYS_remap_file_pages: c_long = 234; -pub const SYS_getdents64: c_long = 61; -pub const SYS_set_tid_address: c_long = 96; -pub const SYS_restart_syscall: c_long = 128; -pub const SYS_semtimedop: c_long = 192; -pub const SYS_fadvise64: c_long = 223; -pub const SYS_timer_create: c_long = 107; -pub const SYS_timer_settime: c_long = 110; -pub const SYS_timer_gettime: c_long = 108; -pub const SYS_timer_getoverrun: c_long = 109; -pub const SYS_timer_delete: c_long = 111; -pub const SYS_clock_settime: c_long = 112; -pub const SYS_clock_gettime: c_long = 113; -pub const SYS_clock_getres: c_long = 114; -pub const SYS_clock_nanosleep: c_long = 115; -pub const SYS_exit_group: c_long = 94; -pub const SYS_epoll_ctl: c_long = 21; -pub const SYS_tgkill: c_long = 131; -pub const SYS_mbind: c_long = 235; -pub const SYS_set_mempolicy: c_long = 237; -pub const SYS_get_mempolicy: c_long = 236; -pub const SYS_mq_open: c_long = 180; -pub const SYS_mq_unlink: c_long = 181; -pub const SYS_mq_timedsend: c_long = 182; -pub const SYS_mq_timedreceive: c_long = 183; -pub const SYS_mq_notify: c_long = 184; -pub const SYS_mq_getsetattr: c_long = 185; -pub const SYS_kexec_load: c_long = 104; -pub const SYS_waitid: c_long = 95; -pub const SYS_add_key: c_long = 217; -pub const SYS_request_key: c_long = 218; -pub const SYS_keyctl: c_long = 219; -pub const SYS_ioprio_set: c_long = 30; -pub const SYS_ioprio_get: c_long = 31; -pub const SYS_inotify_add_watch: c_long = 27; -pub const SYS_inotify_rm_watch: c_long = 28; -pub const SYS_migrate_pages: c_long = 238; -pub const SYS_openat: c_long = 56; -pub const SYS_mkdirat: c_long = 34; -pub const SYS_mknodat: c_long = 33; -pub const SYS_fchownat: c_long = 54; -pub const SYS_newfstatat: c_long = 79; -pub const SYS_unlinkat: c_long = 35; -pub const SYS_linkat: c_long = 37; -pub const SYS_symlinkat: c_long = 36; -pub const SYS_readlinkat: c_long = 78; -pub const SYS_fchmodat: c_long = 53; -pub const SYS_faccessat: c_long = 48; -pub const SYS_pselect6: c_long = 72; -pub const SYS_ppoll: c_long = 73; -pub const SYS_unshare: c_long = 97; -pub const SYS_set_robust_list: c_long = 99; -pub const SYS_get_robust_list: c_long = 100; -pub const SYS_splice: c_long = 76; -pub const SYS_tee: c_long = 77; -pub const SYS_sync_file_range: c_long = 84; -pub const SYS_vmsplice: c_long = 75; -pub const SYS_move_pages: c_long = 239; -pub const SYS_utimensat: c_long = 88; -pub const SYS_epoll_pwait: c_long = 22; -pub const SYS_timerfd_create: c_long = 85; -pub const SYS_fallocate: c_long = 47; -pub const SYS_timerfd_settime: c_long = 86; -pub const SYS_timerfd_gettime: c_long = 87; -pub const SYS_accept4: c_long = 242; -pub const SYS_signalfd4: c_long = 74; -pub const SYS_eventfd2: c_long = 19; -pub const SYS_epoll_create1: c_long = 20; -pub const SYS_dup3: c_long = 24; -pub const SYS_pipe2: c_long = 59; -pub const SYS_inotify_init1: c_long = 26; -pub const SYS_preadv: c_long = 69; -pub const SYS_pwritev: c_long = 70; -pub const SYS_rt_tgsigqueueinfo: c_long = 240; -pub const SYS_perf_event_open: c_long = 241; -pub const SYS_recvmmsg: c_long = 243; -pub const SYS_fanotify_init: c_long = 262; -pub const SYS_fanotify_mark: c_long = 263; -pub const SYS_prlimit64: c_long = 261; -pub const SYS_name_to_handle_at: c_long = 264; -pub const SYS_open_by_handle_at: c_long = 265; -pub const SYS_clock_adjtime: c_long = 266; -pub const SYS_syncfs: c_long = 267; -pub const SYS_sendmmsg: c_long = 269; -pub const SYS_setns: c_long = 268; -pub const SYS_getcpu: c_long = 168; -pub const SYS_process_vm_readv: c_long = 270; -pub const SYS_process_vm_writev: c_long = 271; -pub const SYS_kcmp: c_long = 272; -pub const SYS_finit_module: c_long = 273; -pub const SYS_sched_setattr: c_long = 274; -pub const SYS_sched_getattr: c_long = 275; -pub const SYS_renameat2: c_long = 276; -pub const SYS_seccomp: c_long = 277; -pub const SYS_getrandom: c_long = 278; -pub const SYS_memfd_create: c_long = 279; -pub const SYS_bpf: c_long = 280; -pub const SYS_execveat: c_long = 281; -pub const SYS_userfaultfd: c_long = 282; -pub const SYS_membarrier: c_long = 283; -pub const SYS_mlock2: c_long = 284; -pub const SYS_copy_file_range: c_long = 285; -pub const SYS_preadv2: c_long = 286; -pub const SYS_pwritev2: c_long = 287; -pub const SYS_pkey_mprotect: c_long = 288; -pub const SYS_pkey_alloc: c_long = 289; -pub const SYS_pkey_free: c_long = 290; -pub const SYS_statx: c_long = 291; -pub const SYS_rseq: c_long = 293; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/sparc/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/sparc/mod.rs deleted file mode 100644 index 801f31e2c0e340..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b32/sparc/mod.rs +++ /dev/null @@ -1,865 +0,0 @@ -//! SPARC-specific definitions for 32-bit linux-like values - -use crate::prelude::*; -use crate::{off64_t, off_t}; - -pub type wchar_t = i32; - -s! { - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct statfs { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - f_spare: [crate::__fsword_t; 4], - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - _pad: [c_int; 29], - _align: [usize; 0], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - __reserved: c_short, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct stat { - pub st_dev: crate::dev_t, - #[cfg(not(gnu_file_offset_bits64))] - __pad1: c_ushort, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad2: c_ushort, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __glibc_reserved4: c_ulong, - __glibc_reserved5: c_ulong, - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad2: c_ushort, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __glibc_reserved4: c_ulong, - __glibc_reserved5: c_ulong, - } - - pub struct statfs64 { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::fsid_t, - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - pub f_spare: [crate::__fsword_t; 4], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - __f_unused: c_int, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - __pad1: c_ushort, - pub mode: c_ushort, - __pad2: c_ushort, - pub __seq: c_ushort, - __unused1: c_ulonglong, - __unused2: c_ulonglong, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - #[cfg(gnu_time_bits64)] - pub shm_segsz: size_t, - #[cfg(not(gnu_time_bits64))] - __pad1: c_uint, - pub shm_atime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __pad2: c_uint, - pub shm_dtime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __pad3: c_uint, - pub shm_ctime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - pub shm_segsz: size_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __reserved1: c_ulong, - __reserved2: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - #[cfg(not(gnu_time_bits64))] - __pad1: c_uint, - pub msg_stime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __pad2: c_uint, - pub msg_rtime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __pad3: c_uint, - pub msg_ctime: crate::time_t, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __glibc_reserved4: c_ulong, - __glibc_reserved5: c_ulong, - } -} - -s_no_extra_traits! { - #[repr(align(8))] - pub struct max_align_t { - priv_: [i64; 3], - } -} - -pub const VEOF: usize = 4; -pub const RTLD_DEEPBIND: c_int = 0x8; -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; - -pub const O_APPEND: c_int = 0x8; -pub const O_CREAT: c_int = 0x200; -pub const O_EXCL: c_int = 0x800; -pub const O_NOCTTY: c_int = 0x8000; -pub const O_NONBLOCK: c_int = 0x4000; -pub const O_SYNC: c_int = 0x802000; -pub const O_RSYNC: c_int = 0x802000; -pub const O_DSYNC: c_int = 0x2000; -pub const O_FSYNC: c_int = 0x802000; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_GROWSDOWN: c_int = 0x0200; -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_ANONYMOUS: c_int = 0x0020; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_SYNC: c_int = 0x080000; - -pub const EDEADLK: c_int = 78; -pub const ENAMETOOLONG: c_int = 63; -pub const ENOLCK: c_int = 79; -pub const ENOSYS: c_int = 90; -pub const ENOTEMPTY: c_int = 66; -pub const ELOOP: c_int = 62; -pub const ENOMSG: c_int = 75; -pub const EIDRM: c_int = 77; -pub const ECHRNG: c_int = 94; -pub const EL2NSYNC: c_int = 95; -pub const EL3HLT: c_int = 96; -pub const EL3RST: c_int = 97; -pub const ELNRNG: c_int = 98; -pub const EUNATCH: c_int = 99; -pub const ENOCSI: c_int = 100; -pub const EL2HLT: c_int = 101; -pub const EBADE: c_int = 102; -pub const EBADR: c_int = 103; -pub const EXFULL: c_int = 104; -pub const ENOANO: c_int = 105; -pub const EBADRQC: c_int = 106; -pub const EBADSLT: c_int = 107; -pub const EMULTIHOP: c_int = 87; -pub const EOVERFLOW: c_int = 92; -pub const ENOTUNIQ: c_int = 115; -pub const EBADFD: c_int = 93; -pub const EBADMSG: c_int = 76; -pub const EREMCHG: c_int = 89; -pub const ELIBACC: c_int = 114; -pub const ELIBBAD: c_int = 112; -pub const ELIBSCN: c_int = 124; -pub const ELIBMAX: c_int = 123; -pub const ELIBEXEC: c_int = 110; -pub const EILSEQ: c_int = 122; -pub const ERESTART: c_int = 116; -pub const ESTRPIPE: c_int = 91; -pub const EUSERS: c_int = 68; -pub const ENOTSOCK: c_int = 38; -pub const EDESTADDRREQ: c_int = 39; -pub const EMSGSIZE: c_int = 40; -pub const EPROTOTYPE: c_int = 41; -pub const ENOPROTOOPT: c_int = 42; -pub const EPROTONOSUPPORT: c_int = 43; -pub const ESOCKTNOSUPPORT: c_int = 44; -pub const EOPNOTSUPP: c_int = 45; -pub const EPFNOSUPPORT: c_int = 46; -pub const EAFNOSUPPORT: c_int = 47; -pub const EADDRINUSE: c_int = 48; -pub const EADDRNOTAVAIL: c_int = 49; -pub const ENETDOWN: c_int = 50; -pub const ENETUNREACH: c_int = 51; -pub const ENETRESET: c_int = 52; -pub const ECONNABORTED: c_int = 53; -pub const ECONNRESET: c_int = 54; -pub const ENOBUFS: c_int = 55; -pub const EISCONN: c_int = 56; -pub const ENOTCONN: c_int = 57; -pub const ESHUTDOWN: c_int = 58; -pub const ETOOMANYREFS: c_int = 59; -pub const ETIMEDOUT: c_int = 60; -pub const ECONNREFUSED: c_int = 61; -pub const EHOSTDOWN: c_int = 64; -pub const EHOSTUNREACH: c_int = 65; -pub const EALREADY: c_int = 37; -pub const EINPROGRESS: c_int = 36; -pub const ESTALE: c_int = 70; -pub const EDQUOT: c_int = 69; -pub const ENOMEDIUM: c_int = 125; -pub const EMEDIUMTYPE: c_int = 126; -pub const ECANCELED: c_int = 127; -pub const ENOKEY: c_int = 128; -pub const EKEYEXPIRED: c_int = 129; -pub const EKEYREVOKED: c_int = 130; -pub const EKEYREJECTED: c_int = 131; -pub const EOWNERDEAD: c_int = 132; -pub const ENOTRECOVERABLE: c_int = 133; -pub const EHWPOISON: c_int = 135; -pub const ERFKILL: c_int = 134; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const SA_SIGINFO: c_int = 0x200; -pub const SA_NOCLDWAIT: c_int = 0x100; - -pub const SIGEMT: c_int = 7; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGCHLD: c_int = 20; -pub const SIGBUS: c_int = 10; -pub const SIGUSR1: c_int = 30; -pub const SIGUSR2: c_int = 31; -pub const SIGCONT: c_int = 19; -pub const SIGSTOP: c_int = 17; -pub const SIGTSTP: c_int = 18; -pub const SIGURG: c_int = 16; -pub const SIGIO: c_int = 23; -pub const SIGSYS: c_int = 12; -pub const SIGPOLL: c_int = 23; -pub const SIGPWR: c_int = 29; -pub const SIG_SETMASK: c_int = 4; -pub const SIG_BLOCK: c_int = 1; -pub const SIG_UNBLOCK: c_int = 2; - -pub const POLLWRNORM: c_short = 4; -pub const POLLWRBAND: c_short = 0x100; - -pub const O_ASYNC: c_int = 0x40; -pub const O_NDELAY: c_int = 0x4004; - -pub const EFD_NONBLOCK: c_int = 0x4000; - -pub const F_GETLK: c_int = 7; -pub const F_GETOWN: c_int = 5; -pub const F_SETOWN: c_int = 6; - -pub const SFD_NONBLOCK: c_int = 0x4000; - -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -pub const O_DIRECTORY: c_int = 0o200000; -pub const O_NOFOLLOW: c_int = 0o400000; -pub const O_LARGEFILE: c_int = 0x40000; -pub const O_DIRECT: c_int = 0x100000; - -pub const MAP_LOCKED: c_int = 0x0100; -pub const MAP_NORESERVE: c_int = 0x00040; - -pub const EDEADLOCK: c_int = 108; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; - -pub const MCL_CURRENT: c_int = 0x2000; -pub const MCL_FUTURE: c_int = 0x4000; -pub const MCL_ONFAULT: c_int = 0x8000; - -pub const SIGSTKSZ: size_t = 16384; -pub const MINSIGSTKSZ: size_t = 4096; -pub const CBAUD: crate::tcflag_t = 0x0000100f; -pub const TAB1: crate::tcflag_t = 0x800; -pub const TAB2: crate::tcflag_t = 0x1000; -pub const TAB3: crate::tcflag_t = 0x1800; -pub const CR1: crate::tcflag_t = 0x200; -pub const CR2: crate::tcflag_t = 0x400; -pub const CR3: crate::tcflag_t = 0x600; -pub const FF1: crate::tcflag_t = 0x8000; -pub const BS1: crate::tcflag_t = 0x2000; -pub const VT1: crate::tcflag_t = 0x4000; -pub const VWERASE: usize = 0xe; -pub const VREPRINT: usize = 0xc; -pub const VSUSP: usize = 0xa; -pub const VSTART: usize = 0x8; -pub const VSTOP: usize = 0x9; -pub const VDISCARD: usize = 0xd; -pub const VTIME: usize = 0x5; -pub const IXON: crate::tcflag_t = 0x400; -pub const IXOFF: crate::tcflag_t = 0x1000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x30; -pub const CS6: crate::tcflag_t = 0x10; -pub const CS7: crate::tcflag_t = 0x20; -pub const CS8: crate::tcflag_t = 0x30; -pub const CSTOPB: crate::tcflag_t = 0x40; -pub const CREAD: crate::tcflag_t = 0x80; -pub const PARENB: crate::tcflag_t = 0x100; -pub const PARODD: crate::tcflag_t = 0x200; -pub const HUPCL: crate::tcflag_t = 0x400; -pub const CLOCAL: crate::tcflag_t = 0x800; -pub const ECHOKE: crate::tcflag_t = 0x800; -pub const ECHOE: crate::tcflag_t = 0x10; -pub const ECHOK: crate::tcflag_t = 0x20; -pub const ECHONL: crate::tcflag_t = 0x40; -pub const ECHOPRT: crate::tcflag_t = 0x400; -pub const ECHOCTL: crate::tcflag_t = 0x200; -pub const ISIG: crate::tcflag_t = 0x1; -pub const ICANON: crate::tcflag_t = 0x2; -pub const PENDIN: crate::tcflag_t = 0x4000; -pub const NOFLSH: crate::tcflag_t = 0x80; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0x00001000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const B57600: crate::speed_t = 0x1001; -pub const B115200: crate::speed_t = 0x1002; -pub const B230400: crate::speed_t = 0x1003; -pub const B460800: crate::speed_t = 0x1004; -pub const B76800: crate::speed_t = 0x1005; -pub const B153600: crate::speed_t = 0x1006; -pub const B307200: crate::speed_t = 0x1007; -pub const B614400: crate::speed_t = 0x1008; -pub const B921600: crate::speed_t = 0x1009; -pub const B500000: crate::speed_t = 0x100a; -pub const B576000: crate::speed_t = 0x100b; -pub const B1000000: crate::speed_t = 0x100c; -pub const B1152000: crate::speed_t = 0x100d; -pub const B1500000: crate::speed_t = 0x100e; -pub const B2000000: crate::speed_t = 0x100f; - -pub const VEOL: usize = 5; -pub const VEOL2: usize = 6; -pub const VMIN: usize = 4; -pub const IEXTEN: crate::tcflag_t = 0x8000; -pub const TOSTOP: crate::tcflag_t = 0x100; -pub const FLUSHO: crate::tcflag_t = 0x1000; -pub const EXTPROC: crate::tcflag_t = 0x10000; - -pub const SYS_restart_syscall: c_long = 0; -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_wait4: c_long = 7; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execv: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_chown: c_long = 13; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_lchown: c_long = 16; -pub const SYS_brk: c_long = 17; -pub const SYS_perfctr: c_long = 18; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_capget: c_long = 21; -pub const SYS_capset: c_long = 22; -pub const SYS_setuid: c_long = 23; -pub const SYS_getuid: c_long = 24; -pub const SYS_vmsplice: c_long = 25; -pub const SYS_ptrace: c_long = 26; -pub const SYS_alarm: c_long = 27; -pub const SYS_sigaltstack: c_long = 28; -pub const SYS_pause: c_long = 29; -pub const SYS_utime: c_long = 30; -pub const SYS_lchown32: c_long = 31; -pub const SYS_fchown32: c_long = 32; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_chown32: c_long = 35; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_stat: c_long = 38; -pub const SYS_sendfile: c_long = 39; -pub const SYS_lstat: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_getuid32: c_long = 44; -pub const SYS_umount2: c_long = 45; -pub const SYS_setgid: c_long = 46; -pub const SYS_getgid: c_long = 47; -pub const SYS_signal: c_long = 48; -pub const SYS_geteuid: c_long = 49; -pub const SYS_getegid: c_long = 50; -pub const SYS_acct: c_long = 51; -pub const SYS_getgid32: c_long = 53; -pub const SYS_ioctl: c_long = 54; -pub const SYS_reboot: c_long = 55; -pub const SYS_mmap2: c_long = 56; -pub const SYS_symlink: c_long = 57; -pub const SYS_readlink: c_long = 58; -pub const SYS_execve: c_long = 59; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_fstat: c_long = 62; -pub const SYS_fstat64: c_long = 63; -pub const SYS_getpagesize: c_long = 64; -pub const SYS_msync: c_long = 65; -pub const SYS_vfork: c_long = 66; -pub const SYS_pread64: c_long = 67; -pub const SYS_pwrite64: c_long = 68; -pub const SYS_geteuid32: c_long = 69; -pub const SYS_getegid32: c_long = 70; -pub const SYS_mmap: c_long = 71; -pub const SYS_setreuid32: c_long = 72; -pub const SYS_munmap: c_long = 73; -pub const SYS_mprotect: c_long = 74; -pub const SYS_madvise: c_long = 75; -pub const SYS_vhangup: c_long = 76; -pub const SYS_truncate64: c_long = 77; -pub const SYS_mincore: c_long = 78; -pub const SYS_getgroups: c_long = 79; -pub const SYS_setgroups: c_long = 80; -pub const SYS_getpgrp: c_long = 81; -pub const SYS_setgroups32: c_long = 82; -pub const SYS_setitimer: c_long = 83; -pub const SYS_ftruncate64: c_long = 84; -pub const SYS_swapon: c_long = 85; -pub const SYS_getitimer: c_long = 86; -pub const SYS_setuid32: c_long = 87; -pub const SYS_sethostname: c_long = 88; -pub const SYS_setgid32: c_long = 89; -pub const SYS_dup2: c_long = 90; -pub const SYS_setfsuid32: c_long = 91; -pub const SYS_fcntl: c_long = 92; -pub const SYS_select: c_long = 93; -pub const SYS_setfsgid32: c_long = 94; -pub const SYS_fsync: c_long = 95; -pub const SYS_setpriority: c_long = 96; -pub const SYS_socket: c_long = 97; -pub const SYS_connect: c_long = 98; -pub const SYS_accept: c_long = 99; -pub const SYS_getpriority: c_long = 100; -pub const SYS_rt_sigreturn: c_long = 101; -pub const SYS_rt_sigaction: c_long = 102; -pub const SYS_rt_sigprocmask: c_long = 103; -pub const SYS_rt_sigpending: c_long = 104; -pub const SYS_rt_sigtimedwait: c_long = 105; -pub const SYS_rt_sigqueueinfo: c_long = 106; -pub const SYS_rt_sigsuspend: c_long = 107; -pub const SYS_setresuid32: c_long = 108; -pub const SYS_getresuid32: c_long = 109; -pub const SYS_setresgid32: c_long = 110; -pub const SYS_getresgid32: c_long = 111; -pub const SYS_setregid32: c_long = 112; -pub const SYS_recvmsg: c_long = 113; -pub const SYS_sendmsg: c_long = 114; -pub const SYS_getgroups32: c_long = 115; -pub const SYS_gettimeofday: c_long = 116; -pub const SYS_getrusage: c_long = 117; -pub const SYS_getsockopt: c_long = 118; -pub const SYS_getcwd: c_long = 119; -pub const SYS_readv: c_long = 120; -pub const SYS_writev: c_long = 121; -pub const SYS_settimeofday: c_long = 122; -pub const SYS_fchown: c_long = 123; -pub const SYS_fchmod: c_long = 124; -pub const SYS_recvfrom: c_long = 125; -pub const SYS_setreuid: c_long = 126; -pub const SYS_setregid: c_long = 127; -pub const SYS_rename: c_long = 128; -pub const SYS_truncate: c_long = 129; -pub const SYS_ftruncate: c_long = 130; -pub const SYS_flock: c_long = 131; -pub const SYS_lstat64: c_long = 132; -pub const SYS_sendto: c_long = 133; -pub const SYS_shutdown: c_long = 134; -pub const SYS_socketpair: c_long = 135; -pub const SYS_mkdir: c_long = 136; -pub const SYS_rmdir: c_long = 137; -pub const SYS_utimes: c_long = 138; -pub const SYS_stat64: c_long = 139; -pub const SYS_sendfile64: c_long = 140; -pub const SYS_getpeername: c_long = 141; -pub const SYS_futex: c_long = 142; -pub const SYS_gettid: c_long = 143; -pub const SYS_getrlimit: c_long = 144; -pub const SYS_setrlimit: c_long = 145; -pub const SYS_pivot_root: c_long = 146; -pub const SYS_prctl: c_long = 147; -pub const SYS_pciconfig_read: c_long = 148; -pub const SYS_pciconfig_write: c_long = 149; -pub const SYS_getsockname: c_long = 150; -pub const SYS_inotify_init: c_long = 151; -pub const SYS_inotify_add_watch: c_long = 152; -pub const SYS_poll: c_long = 153; -pub const SYS_getdents64: c_long = 154; -pub const SYS_fcntl64: c_long = 155; -pub const SYS_inotify_rm_watch: c_long = 156; -pub const SYS_statfs: c_long = 157; -pub const SYS_fstatfs: c_long = 158; -pub const SYS_umount: c_long = 159; -pub const SYS_sched_set_affinity: c_long = 160; -pub const SYS_sched_get_affinity: c_long = 161; -pub const SYS_getdomainname: c_long = 162; -pub const SYS_setdomainname: c_long = 163; -pub const SYS_quotactl: c_long = 165; -pub const SYS_set_tid_address: c_long = 166; -pub const SYS_mount: c_long = 167; -pub const SYS_ustat: c_long = 168; -pub const SYS_setxattr: c_long = 169; -pub const SYS_lsetxattr: c_long = 170; -pub const SYS_fsetxattr: c_long = 171; -pub const SYS_getxattr: c_long = 172; -pub const SYS_lgetxattr: c_long = 173; -pub const SYS_getdents: c_long = 174; -pub const SYS_setsid: c_long = 175; -pub const SYS_fchdir: c_long = 176; -pub const SYS_fgetxattr: c_long = 177; -pub const SYS_listxattr: c_long = 178; -pub const SYS_llistxattr: c_long = 179; -pub const SYS_flistxattr: c_long = 180; -pub const SYS_removexattr: c_long = 181; -pub const SYS_lremovexattr: c_long = 182; -pub const SYS_sigpending: c_long = 183; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 184; -pub const SYS_setpgid: c_long = 185; -pub const SYS_fremovexattr: c_long = 186; -pub const SYS_tkill: c_long = 187; -pub const SYS_exit_group: c_long = 188; -pub const SYS_uname: c_long = 189; -pub const SYS_init_module: c_long = 190; -pub const SYS_personality: c_long = 191; -pub const SYS_remap_file_pages: c_long = 192; -pub const SYS_epoll_create: c_long = 193; -pub const SYS_epoll_ctl: c_long = 194; -pub const SYS_epoll_wait: c_long = 195; -pub const SYS_ioprio_set: c_long = 196; -pub const SYS_getppid: c_long = 197; -pub const SYS_sigaction: c_long = 198; -pub const SYS_sgetmask: c_long = 199; -pub const SYS_ssetmask: c_long = 200; -pub const SYS_sigsuspend: c_long = 201; -pub const SYS_oldlstat: c_long = 202; -pub const SYS_uselib: c_long = 203; -pub const SYS_readdir: c_long = 204; -pub const SYS_readahead: c_long = 205; -pub const SYS_socketcall: c_long = 206; -pub const SYS_syslog: c_long = 207; -pub const SYS_lookup_dcookie: c_long = 208; -pub const SYS_fadvise64: c_long = 209; -pub const SYS_fadvise64_64: c_long = 210; -pub const SYS_tgkill: c_long = 211; -pub const SYS_waitpid: c_long = 212; -pub const SYS_swapoff: c_long = 213; -pub const SYS_sysinfo: c_long = 214; -pub const SYS_ipc: c_long = 215; -pub const SYS_sigreturn: c_long = 216; -pub const SYS_clone: c_long = 217; -pub const SYS_ioprio_get: c_long = 218; -pub const SYS_adjtimex: c_long = 219; -pub const SYS_sigprocmask: c_long = 220; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 221; -pub const SYS_delete_module: c_long = 222; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 223; -pub const SYS_getpgid: c_long = 224; -pub const SYS_bdflush: c_long = 225; -pub const SYS_sysfs: c_long = 226; -pub const SYS_afs_syscall: c_long = 227; -pub const SYS_setfsuid: c_long = 228; -pub const SYS_setfsgid: c_long = 229; -pub const SYS__newselect: c_long = 230; -pub const SYS_time: c_long = 231; -pub const SYS_splice: c_long = 232; -pub const SYS_stime: c_long = 233; -pub const SYS_statfs64: c_long = 234; -pub const SYS_fstatfs64: c_long = 235; -pub const SYS__llseek: c_long = 236; -pub const SYS_mlock: c_long = 237; -pub const SYS_munlock: c_long = 238; -pub const SYS_mlockall: c_long = 239; -pub const SYS_munlockall: c_long = 240; -pub const SYS_sched_setparam: c_long = 241; -pub const SYS_sched_getparam: c_long = 242; -pub const SYS_sched_setscheduler: c_long = 243; -pub const SYS_sched_getscheduler: c_long = 244; -pub const SYS_sched_yield: c_long = 245; -pub const SYS_sched_get_priority_max: c_long = 246; -pub const SYS_sched_get_priority_min: c_long = 247; -pub const SYS_sched_rr_get_interval: c_long = 248; -pub const SYS_nanosleep: c_long = 249; -pub const SYS_mremap: c_long = 250; -pub const SYS__sysctl: c_long = 251; -pub const SYS_getsid: c_long = 252; -pub const SYS_fdatasync: c_long = 253; -pub const SYS_nfsservctl: c_long = 254; -pub const SYS_sync_file_range: c_long = 255; -pub const SYS_clock_settime: c_long = 256; -pub const SYS_clock_gettime: c_long = 257; -pub const SYS_clock_getres: c_long = 258; -pub const SYS_clock_nanosleep: c_long = 259; -pub const SYS_sched_getaffinity: c_long = 260; -pub const SYS_sched_setaffinity: c_long = 261; -pub const SYS_timer_settime: c_long = 262; -pub const SYS_timer_gettime: c_long = 263; -pub const SYS_timer_getoverrun: c_long = 264; -pub const SYS_timer_delete: c_long = 265; -pub const SYS_timer_create: c_long = 266; -pub const SYS_io_setup: c_long = 268; -pub const SYS_io_destroy: c_long = 269; -pub const SYS_io_submit: c_long = 270; -pub const SYS_io_cancel: c_long = 271; -pub const SYS_io_getevents: c_long = 272; -pub const SYS_mq_open: c_long = 273; -pub const SYS_mq_unlink: c_long = 274; -pub const SYS_mq_timedsend: c_long = 275; -pub const SYS_mq_timedreceive: c_long = 276; -pub const SYS_mq_notify: c_long = 277; -pub const SYS_mq_getsetattr: c_long = 278; -pub const SYS_waitid: c_long = 279; -pub const SYS_tee: c_long = 280; -pub const SYS_add_key: c_long = 281; -pub const SYS_request_key: c_long = 282; -pub const SYS_keyctl: c_long = 283; -pub const SYS_openat: c_long = 284; -pub const SYS_mkdirat: c_long = 285; -pub const SYS_mknodat: c_long = 286; -pub const SYS_fchownat: c_long = 287; -pub const SYS_futimesat: c_long = 288; -pub const SYS_fstatat64: c_long = 289; -pub const SYS_unlinkat: c_long = 290; -pub const SYS_renameat: c_long = 291; -pub const SYS_linkat: c_long = 292; -pub const SYS_symlinkat: c_long = 293; -pub const SYS_readlinkat: c_long = 294; -pub const SYS_fchmodat: c_long = 295; -pub const SYS_faccessat: c_long = 296; -pub const SYS_pselect6: c_long = 297; -pub const SYS_ppoll: c_long = 298; -pub const SYS_unshare: c_long = 299; -pub const SYS_set_robust_list: c_long = 300; -pub const SYS_get_robust_list: c_long = 301; -pub const SYS_migrate_pages: c_long = 302; -pub const SYS_mbind: c_long = 303; -pub const SYS_get_mempolicy: c_long = 304; -pub const SYS_set_mempolicy: c_long = 305; -pub const SYS_kexec_load: c_long = 306; -pub const SYS_move_pages: c_long = 307; -pub const SYS_getcpu: c_long = 308; -pub const SYS_epoll_pwait: c_long = 309; -pub const SYS_utimensat: c_long = 310; -pub const SYS_signalfd: c_long = 311; -pub const SYS_timerfd_create: c_long = 312; -pub const SYS_eventfd: c_long = 313; -pub const SYS_fallocate: c_long = 314; -pub const SYS_timerfd_settime: c_long = 315; -pub const SYS_timerfd_gettime: c_long = 316; -pub const SYS_signalfd4: c_long = 317; -pub const SYS_eventfd2: c_long = 318; -pub const SYS_epoll_create1: c_long = 319; -pub const SYS_dup3: c_long = 320; -pub const SYS_pipe2: c_long = 321; -pub const SYS_inotify_init1: c_long = 322; -pub const SYS_accept4: c_long = 323; -pub const SYS_preadv: c_long = 324; -pub const SYS_pwritev: c_long = 325; -pub const SYS_rt_tgsigqueueinfo: c_long = 326; -pub const SYS_perf_event_open: c_long = 327; -pub const SYS_recvmmsg: c_long = 328; -pub const SYS_fanotify_init: c_long = 329; -pub const SYS_fanotify_mark: c_long = 330; -pub const SYS_prlimit64: c_long = 331; -pub const SYS_name_to_handle_at: c_long = 332; -pub const SYS_open_by_handle_at: c_long = 333; -pub const SYS_clock_adjtime: c_long = 334; -pub const SYS_syncfs: c_long = 335; -pub const SYS_sendmmsg: c_long = 336; -pub const SYS_setns: c_long = 337; -pub const SYS_process_vm_readv: c_long = 338; -pub const SYS_process_vm_writev: c_long = 339; -pub const SYS_kern_features: c_long = 340; -pub const SYS_kcmp: c_long = 341; -pub const SYS_finit_module: c_long = 342; -pub const SYS_sched_setattr: c_long = 343; -pub const SYS_sched_getattr: c_long = 344; -pub const SYS_renameat2: c_long = 345; -pub const SYS_seccomp: c_long = 346; -pub const SYS_getrandom: c_long = 347; -pub const SYS_memfd_create: c_long = 348; -pub const SYS_bpf: c_long = 349; -pub const SYS_execveat: c_long = 350; -pub const SYS_membarrier: c_long = 351; -pub const SYS_userfaultfd: c_long = 352; -pub const SYS_bind: c_long = 353; -pub const SYS_listen: c_long = 354; -pub const SYS_setsockopt: c_long = 355; -pub const SYS_mlock2: c_long = 356; -pub const SYS_copy_file_range: c_long = 357; -pub const SYS_preadv2: c_long = 358; -pub const SYS_pwritev2: c_long = 359; -pub const SYS_statx: c_long = 360; -pub const SYS_rseq: c_long = 365; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -// Reserved in the kernel, but not actually implemented yet -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b32/x86/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b32/x86/mod.rs deleted file mode 100644 index 5f0dfe90adf818..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b32/x86/mod.rs +++ /dev/null @@ -1,1098 +0,0 @@ -use crate::prelude::*; -use crate::{off64_t, off_t}; - -pub type wchar_t = i32; -pub type greg_t = i32; - -s! { - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct statfs { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - f_spare: [crate::__fsword_t; 4], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - } - - pub struct _libc_fpreg { - pub significand: [u16; 4], - pub exponent: u16, - } - - pub struct _libc_fpstate { - pub cw: c_ulong, - pub sw: c_ulong, - pub tag: c_ulong, - pub ipoff: c_ulong, - pub cssel: c_ulong, - pub dataoff: c_ulong, - pub datasel: c_ulong, - pub _st: [_libc_fpreg; 8], - pub status: c_ulong, - } - - pub struct user_fpregs_struct { - pub cwd: c_long, - pub swd: c_long, - pub twd: c_long, - pub fip: c_long, - pub fcs: c_long, - pub foo: c_long, - pub fos: c_long, - pub st_space: [c_long; 20], - } - - pub struct user_regs_struct { - pub ebx: c_long, - pub ecx: c_long, - pub edx: c_long, - pub esi: c_long, - pub edi: c_long, - pub ebp: c_long, - pub eax: c_long, - pub xds: c_long, - pub xes: c_long, - pub xfs: c_long, - pub xgs: c_long, - pub orig_eax: c_long, - pub eip: c_long, - pub xcs: c_long, - pub eflags: c_long, - pub esp: c_long, - pub xss: c_long, - } - - pub struct user { - pub regs: user_regs_struct, - pub u_fpvalid: c_int, - pub i387: user_fpregs_struct, - pub u_tsize: c_ulong, - pub u_dsize: c_ulong, - pub u_ssize: c_ulong, - pub start_code: c_ulong, - pub start_stack: c_ulong, - pub signal: c_long, - __reserved: c_int, - pub u_ar0: *mut user_regs_struct, - pub u_fpstate: *mut user_fpregs_struct, - pub magic: c_ulong, - pub u_comm: [c_char; 32], - pub u_debugreg: [c_int; 8], - } - - pub struct mcontext_t { - pub gregs: [greg_t; 19], - pub fpregs: *mut _libc_fpstate, - pub oldmask: c_ulong, - pub cr2: c_ulong, - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_ushort, - __pad1: c_ushort, - pub __seq: c_ushort, - __pad2: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - #[cfg(not(gnu_time_bits64))] - __pad1: c_uint, - #[cfg(not(gnu_time_bits64))] - __st_ino: c_ulong, - #[cfg(gnu_time_bits64)] - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - #[cfg(not(gnu_time_bits64))] - __pad2: c_uint, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - #[cfg(gnu_time_bits64)] - _atime_pad: c_int, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - #[cfg(gnu_time_bits64)] - _mtime_pad: c_int, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - #[cfg(gnu_time_bits64)] - _ctime_pad: c_int, - #[cfg(not(gnu_time_bits64))] - pub st_ino: crate::ino64_t, - } - - pub struct statfs64 { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::fsid_t, - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - pub f_spare: [crate::__fsword_t; 4], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - __f_unused: c_int, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __unused1: c_ulong, - pub shm_dtime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __unused2: c_ulong, - pub shm_ctime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __unused3: c_ulong, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused4: c_ulong, - __unused5: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved1: c_ulong, - pub msg_rtime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved2: c_ulong, - pub msg_ctime: crate::time_t, - #[cfg(not(gnu_time_bits64))] - __glibc_reserved3: c_ulong, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __glibc_reserved4: c_ulong, - __glibc_reserved5: c_ulong, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - #[doc(hidden)] - #[deprecated( - since = "0.2.54", - note = "Please leave a comment on \ - https://github.com/rust-lang/libc/pull/1316 if you're using \ - this field" - )] - pub _pad: [c_int; 29], - _align: [usize; 0], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } -} - -s_no_extra_traits! { - pub struct user_fpxregs_struct { - pub cwd: c_ushort, - pub swd: c_ushort, - pub twd: c_ushort, - pub fop: c_ushort, - pub fip: c_long, - pub fcs: c_long, - pub foo: c_long, - pub fos: c_long, - pub mxcsr: c_long, - __reserved: c_long, - pub st_space: [c_long; 32], - pub xmm_space: [c_long; 32], - padding: [c_long; 56], - } - - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_mcontext: mcontext_t, - pub uc_sigmask: crate::sigset_t, - __private: [u8; 112], - __ssp: [c_ulong; 4], - } - - #[repr(align(16))] - pub struct max_align_t { - priv_: [f64; 6], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for user_fpxregs_struct { - fn eq(&self, other: &user_fpxregs_struct) -> bool { - self.cwd == other.cwd - && self.swd == other.swd - && self.twd == other.twd - && self.fop == other.fop - && self.fip == other.fip - && self.fcs == other.fcs - && self.foo == other.foo - && self.fos == other.fos - && self.mxcsr == other.mxcsr - // Ignore __reserved field - && self.st_space == other.st_space - && self.xmm_space == other.xmm_space - // Ignore padding field - } - } - - impl Eq for user_fpxregs_struct {} - - impl hash::Hash for user_fpxregs_struct { - fn hash(&self, state: &mut H) { - self.cwd.hash(state); - self.swd.hash(state); - self.twd.hash(state); - self.fop.hash(state); - self.fip.hash(state); - self.fcs.hash(state); - self.foo.hash(state); - self.fos.hash(state); - self.mxcsr.hash(state); - // Ignore __reserved field - self.st_space.hash(state); - self.xmm_space.hash(state); - // Ignore padding field - } - } - - impl PartialEq for ucontext_t { - fn eq(&self, other: &ucontext_t) -> bool { - self.uc_flags == other.uc_flags - && self.uc_link == other.uc_link - && self.uc_stack == other.uc_stack - && self.uc_mcontext == other.uc_mcontext - && self.uc_sigmask == other.uc_sigmask - // Ignore __private field - } - } - - impl Eq for ucontext_t {} - - impl hash::Hash for ucontext_t { - fn hash(&self, state: &mut H) { - self.uc_flags.hash(state); - self.uc_link.hash(state); - self.uc_stack.hash(state); - self.uc_mcontext.hash(state); - self.uc_sigmask.hash(state); - // Ignore __private field - } - } - } -} - -pub const VEOF: usize = 4; -pub const RTLD_DEEPBIND: c_int = 0x8; -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; -pub const O_DIRECT: c_int = 0x4000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_NOFOLLOW: c_int = 0x20000; -pub const O_LARGEFILE: c_int = 0o0100000; -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_FSYNC: c_int = 0x101000; -pub const O_ASYNC: c_int = 0x2000; -pub const O_NDELAY: c_int = 0x800; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_32BIT: c_int = 0x0040; -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_ANONYMOUS: c_int = 0x0020; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_SYNC: c_int = 0x080000; - -pub const EDEADLOCK: c_int = 35; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const EHWPOISON: c_int = 133; -pub const ERFKILL: c_int = 132; - -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -cfg_if! { - if #[cfg(gnu_file_offset_bits64)] { - pub const F_GETLK: c_int = 12; - } else { - pub const F_GETLK: c_int = 5; - } -} -pub const F_GETOWN: c_int = 9; -pub const F_SETOWN: c_int = 8; - -pub const PTRACE_GETFPXREGS: c_uint = 18; -pub const PTRACE_SETFPXREGS: c_uint = 19; -pub const PTRACE_SYSEMU: c_uint = 31; -pub const PTRACE_SYSEMU_SINGLESTEP: c_uint = 32; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const EFD_NONBLOCK: c_int = 0x800; -pub const SFD_NONBLOCK: c_int = 0x0800; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] -pub const SIGUNUSED: c_int = 31; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: crate::tcflag_t = 0x00000800; -pub const TAB2: crate::tcflag_t = 0x00001000; -pub const TAB3: crate::tcflag_t = 0x00001800; -pub const CR1: crate::tcflag_t = 0x00000200; -pub const CR2: crate::tcflag_t = 0x00000400; -pub const CR3: crate::tcflag_t = 0x00000600; -pub const FF1: crate::tcflag_t = 0x00008000; -pub const BS1: crate::tcflag_t = 0x00002000; -pub const VT1: crate::tcflag_t = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; -pub const EXTPROC: crate::tcflag_t = 0x00010000; - -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -// Syscall table -pub const SYS_restart_syscall: c_long = 0; -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_waitpid: c_long = 7; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execve: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_time: c_long = 13; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_lchown: c_long = 16; -pub const SYS_break: c_long = 17; -pub const SYS_oldstat: c_long = 18; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_mount: c_long = 21; -pub const SYS_umount: c_long = 22; -pub const SYS_setuid: c_long = 23; -pub const SYS_getuid: c_long = 24; -pub const SYS_stime: c_long = 25; -pub const SYS_ptrace: c_long = 26; -pub const SYS_alarm: c_long = 27; -pub const SYS_oldfstat: c_long = 28; -pub const SYS_pause: c_long = 29; -pub const SYS_utime: c_long = 30; -pub const SYS_stty: c_long = 31; -pub const SYS_gtty: c_long = 32; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_ftime: c_long = 35; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_rename: c_long = 38; -pub const SYS_mkdir: c_long = 39; -pub const SYS_rmdir: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_prof: c_long = 44; -pub const SYS_brk: c_long = 45; -pub const SYS_setgid: c_long = 46; -pub const SYS_getgid: c_long = 47; -pub const SYS_signal: c_long = 48; -pub const SYS_geteuid: c_long = 49; -pub const SYS_getegid: c_long = 50; -pub const SYS_acct: c_long = 51; -pub const SYS_umount2: c_long = 52; -pub const SYS_lock: c_long = 53; -pub const SYS_ioctl: c_long = 54; -pub const SYS_fcntl: c_long = 55; -pub const SYS_mpx: c_long = 56; -pub const SYS_setpgid: c_long = 57; -pub const SYS_ulimit: c_long = 58; -pub const SYS_oldolduname: c_long = 59; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_ustat: c_long = 62; -pub const SYS_dup2: c_long = 63; -pub const SYS_getppid: c_long = 64; -pub const SYS_getpgrp: c_long = 65; -pub const SYS_setsid: c_long = 66; -pub const SYS_sigaction: c_long = 67; -pub const SYS_sgetmask: c_long = 68; -pub const SYS_ssetmask: c_long = 69; -pub const SYS_setreuid: c_long = 70; -pub const SYS_setregid: c_long = 71; -pub const SYS_sigsuspend: c_long = 72; -pub const SYS_sigpending: c_long = 73; -pub const SYS_sethostname: c_long = 74; -pub const SYS_setrlimit: c_long = 75; -pub const SYS_getrlimit: c_long = 76; -pub const SYS_getrusage: c_long = 77; -pub const SYS_gettimeofday: c_long = 78; -pub const SYS_settimeofday: c_long = 79; -pub const SYS_getgroups: c_long = 80; -pub const SYS_setgroups: c_long = 81; -pub const SYS_select: c_long = 82; -pub const SYS_symlink: c_long = 83; -pub const SYS_oldlstat: c_long = 84; -pub const SYS_readlink: c_long = 85; -pub const SYS_uselib: c_long = 86; -pub const SYS_swapon: c_long = 87; -pub const SYS_reboot: c_long = 88; -pub const SYS_readdir: c_long = 89; -pub const SYS_mmap: c_long = 90; -pub const SYS_munmap: c_long = 91; -pub const SYS_truncate: c_long = 92; -pub const SYS_ftruncate: c_long = 93; -pub const SYS_fchmod: c_long = 94; -pub const SYS_fchown: c_long = 95; -pub const SYS_getpriority: c_long = 96; -pub const SYS_setpriority: c_long = 97; -pub const SYS_profil: c_long = 98; -pub const SYS_statfs: c_long = 99; -pub const SYS_fstatfs: c_long = 100; -pub const SYS_ioperm: c_long = 101; -pub const SYS_socketcall: c_long = 102; -pub const SYS_syslog: c_long = 103; -pub const SYS_setitimer: c_long = 104; -pub const SYS_getitimer: c_long = 105; -pub const SYS_stat: c_long = 106; -pub const SYS_lstat: c_long = 107; -pub const SYS_fstat: c_long = 108; -pub const SYS_olduname: c_long = 109; -pub const SYS_iopl: c_long = 110; -pub const SYS_vhangup: c_long = 111; -pub const SYS_idle: c_long = 112; -pub const SYS_vm86old: c_long = 113; -pub const SYS_wait4: c_long = 114; -pub const SYS_swapoff: c_long = 115; -pub const SYS_sysinfo: c_long = 116; -pub const SYS_ipc: c_long = 117; -pub const SYS_fsync: c_long = 118; -pub const SYS_sigreturn: c_long = 119; -pub const SYS_clone: c_long = 120; -pub const SYS_setdomainname: c_long = 121; -pub const SYS_uname: c_long = 122; -pub const SYS_modify_ldt: c_long = 123; -pub const SYS_adjtimex: c_long = 124; -pub const SYS_mprotect: c_long = 125; -pub const SYS_sigprocmask: c_long = 126; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 127; -pub const SYS_init_module: c_long = 128; -pub const SYS_delete_module: c_long = 129; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 130; -pub const SYS_quotactl: c_long = 131; -pub const SYS_getpgid: c_long = 132; -pub const SYS_fchdir: c_long = 133; -pub const SYS_bdflush: c_long = 134; -pub const SYS_sysfs: c_long = 135; -pub const SYS_personality: c_long = 136; -pub const SYS_afs_syscall: c_long = 137; -pub const SYS_setfsuid: c_long = 138; -pub const SYS_setfsgid: c_long = 139; -pub const SYS__llseek: c_long = 140; -pub const SYS_getdents: c_long = 141; -pub const SYS__newselect: c_long = 142; -pub const SYS_flock: c_long = 143; -pub const SYS_msync: c_long = 144; -pub const SYS_readv: c_long = 145; -pub const SYS_writev: c_long = 146; -pub const SYS_getsid: c_long = 147; -pub const SYS_fdatasync: c_long = 148; -pub const SYS__sysctl: c_long = 149; -pub const SYS_mlock: c_long = 150; -pub const SYS_munlock: c_long = 151; -pub const SYS_mlockall: c_long = 152; -pub const SYS_munlockall: c_long = 153; -pub const SYS_sched_setparam: c_long = 154; -pub const SYS_sched_getparam: c_long = 155; -pub const SYS_sched_setscheduler: c_long = 156; -pub const SYS_sched_getscheduler: c_long = 157; -pub const SYS_sched_yield: c_long = 158; -pub const SYS_sched_get_priority_max: c_long = 159; -pub const SYS_sched_get_priority_min: c_long = 160; -pub const SYS_sched_rr_get_interval: c_long = 161; -pub const SYS_nanosleep: c_long = 162; -pub const SYS_mremap: c_long = 163; -pub const SYS_setresuid: c_long = 164; -pub const SYS_getresuid: c_long = 165; -pub const SYS_vm86: c_long = 166; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 167; -pub const SYS_poll: c_long = 168; -pub const SYS_nfsservctl: c_long = 169; -pub const SYS_setresgid: c_long = 170; -pub const SYS_getresgid: c_long = 171; -pub const SYS_prctl: c_long = 172; -pub const SYS_rt_sigreturn: c_long = 173; -pub const SYS_rt_sigaction: c_long = 174; -pub const SYS_rt_sigprocmask: c_long = 175; -pub const SYS_rt_sigpending: c_long = 176; -pub const SYS_rt_sigtimedwait: c_long = 177; -pub const SYS_rt_sigqueueinfo: c_long = 178; -pub const SYS_rt_sigsuspend: c_long = 179; -pub const SYS_pread64: c_long = 180; -pub const SYS_pwrite64: c_long = 181; -pub const SYS_chown: c_long = 182; -pub const SYS_getcwd: c_long = 183; -pub const SYS_capget: c_long = 184; -pub const SYS_capset: c_long = 185; -pub const SYS_sigaltstack: c_long = 186; -pub const SYS_sendfile: c_long = 187; -pub const SYS_getpmsg: c_long = 188; -pub const SYS_putpmsg: c_long = 189; -pub const SYS_vfork: c_long = 190; -pub const SYS_ugetrlimit: c_long = 191; -pub const SYS_mmap2: c_long = 192; -pub const SYS_truncate64: c_long = 193; -pub const SYS_ftruncate64: c_long = 194; -pub const SYS_stat64: c_long = 195; -pub const SYS_lstat64: c_long = 196; -pub const SYS_fstat64: c_long = 197; -pub const SYS_lchown32: c_long = 198; -pub const SYS_getuid32: c_long = 199; -pub const SYS_getgid32: c_long = 200; -pub const SYS_geteuid32: c_long = 201; -pub const SYS_getegid32: c_long = 202; -pub const SYS_setreuid32: c_long = 203; -pub const SYS_setregid32: c_long = 204; -pub const SYS_getgroups32: c_long = 205; -pub const SYS_setgroups32: c_long = 206; -pub const SYS_fchown32: c_long = 207; -pub const SYS_setresuid32: c_long = 208; -pub const SYS_getresuid32: c_long = 209; -pub const SYS_setresgid32: c_long = 210; -pub const SYS_getresgid32: c_long = 211; -pub const SYS_chown32: c_long = 212; -pub const SYS_setuid32: c_long = 213; -pub const SYS_setgid32: c_long = 214; -pub const SYS_setfsuid32: c_long = 215; -pub const SYS_setfsgid32: c_long = 216; -pub const SYS_pivot_root: c_long = 217; -pub const SYS_mincore: c_long = 218; -pub const SYS_madvise: c_long = 219; -pub const SYS_getdents64: c_long = 220; -pub const SYS_fcntl64: c_long = 221; -pub const SYS_gettid: c_long = 224; -pub const SYS_readahead: c_long = 225; -pub const SYS_setxattr: c_long = 226; -pub const SYS_lsetxattr: c_long = 227; -pub const SYS_fsetxattr: c_long = 228; -pub const SYS_getxattr: c_long = 229; -pub const SYS_lgetxattr: c_long = 230; -pub const SYS_fgetxattr: c_long = 231; -pub const SYS_listxattr: c_long = 232; -pub const SYS_llistxattr: c_long = 233; -pub const SYS_flistxattr: c_long = 234; -pub const SYS_removexattr: c_long = 235; -pub const SYS_lremovexattr: c_long = 236; -pub const SYS_fremovexattr: c_long = 237; -pub const SYS_tkill: c_long = 238; -pub const SYS_sendfile64: c_long = 239; -pub const SYS_futex: c_long = 240; -pub const SYS_sched_setaffinity: c_long = 241; -pub const SYS_sched_getaffinity: c_long = 242; -pub const SYS_set_thread_area: c_long = 243; -pub const SYS_get_thread_area: c_long = 244; -pub const SYS_io_setup: c_long = 245; -pub const SYS_io_destroy: c_long = 246; -pub const SYS_io_getevents: c_long = 247; -pub const SYS_io_submit: c_long = 248; -pub const SYS_io_cancel: c_long = 249; -pub const SYS_fadvise64: c_long = 250; -pub const SYS_exit_group: c_long = 252; -pub const SYS_lookup_dcookie: c_long = 253; -pub const SYS_epoll_create: c_long = 254; -pub const SYS_epoll_ctl: c_long = 255; -pub const SYS_epoll_wait: c_long = 256; -pub const SYS_remap_file_pages: c_long = 257; -pub const SYS_set_tid_address: c_long = 258; -pub const SYS_timer_create: c_long = 259; -pub const SYS_timer_settime: c_long = 260; -pub const SYS_timer_gettime: c_long = 261; -pub const SYS_timer_getoverrun: c_long = 262; -pub const SYS_timer_delete: c_long = 263; -pub const SYS_clock_settime: c_long = 264; -pub const SYS_clock_gettime: c_long = 265; -pub const SYS_clock_getres: c_long = 266; -pub const SYS_clock_nanosleep: c_long = 267; -pub const SYS_statfs64: c_long = 268; -pub const SYS_fstatfs64: c_long = 269; -pub const SYS_tgkill: c_long = 270; -pub const SYS_utimes: c_long = 271; -pub const SYS_fadvise64_64: c_long = 272; -pub const SYS_vserver: c_long = 273; -pub const SYS_mbind: c_long = 274; -pub const SYS_get_mempolicy: c_long = 275; -pub const SYS_set_mempolicy: c_long = 276; -pub const SYS_mq_open: c_long = 277; -pub const SYS_mq_unlink: c_long = 278; -pub const SYS_mq_timedsend: c_long = 279; -pub const SYS_mq_timedreceive: c_long = 280; -pub const SYS_mq_notify: c_long = 281; -pub const SYS_mq_getsetattr: c_long = 282; -pub const SYS_kexec_load: c_long = 283; -pub const SYS_waitid: c_long = 284; -pub const SYS_add_key: c_long = 286; -pub const SYS_request_key: c_long = 287; -pub const SYS_keyctl: c_long = 288; -pub const SYS_ioprio_set: c_long = 289; -pub const SYS_ioprio_get: c_long = 290; -pub const SYS_inotify_init: c_long = 291; -pub const SYS_inotify_add_watch: c_long = 292; -pub const SYS_inotify_rm_watch: c_long = 293; -pub const SYS_migrate_pages: c_long = 294; -pub const SYS_openat: c_long = 295; -pub const SYS_mkdirat: c_long = 296; -pub const SYS_mknodat: c_long = 297; -pub const SYS_fchownat: c_long = 298; -pub const SYS_futimesat: c_long = 299; -pub const SYS_fstatat64: c_long = 300; -pub const SYS_unlinkat: c_long = 301; -pub const SYS_renameat: c_long = 302; -pub const SYS_linkat: c_long = 303; -pub const SYS_symlinkat: c_long = 304; -pub const SYS_readlinkat: c_long = 305; -pub const SYS_fchmodat: c_long = 306; -pub const SYS_faccessat: c_long = 307; -pub const SYS_pselect6: c_long = 308; -pub const SYS_ppoll: c_long = 309; -pub const SYS_unshare: c_long = 310; -pub const SYS_set_robust_list: c_long = 311; -pub const SYS_get_robust_list: c_long = 312; -pub const SYS_splice: c_long = 313; -pub const SYS_sync_file_range: c_long = 314; -pub const SYS_tee: c_long = 315; -pub const SYS_vmsplice: c_long = 316; -pub const SYS_move_pages: c_long = 317; -pub const SYS_getcpu: c_long = 318; -pub const SYS_epoll_pwait: c_long = 319; -pub const SYS_utimensat: c_long = 320; -pub const SYS_signalfd: c_long = 321; -pub const SYS_timerfd_create: c_long = 322; -pub const SYS_eventfd: c_long = 323; -pub const SYS_fallocate: c_long = 324; -pub const SYS_timerfd_settime: c_long = 325; -pub const SYS_timerfd_gettime: c_long = 326; -pub const SYS_signalfd4: c_long = 327; -pub const SYS_eventfd2: c_long = 328; -pub const SYS_epoll_create1: c_long = 329; -pub const SYS_dup3: c_long = 330; -pub const SYS_pipe2: c_long = 331; -pub const SYS_inotify_init1: c_long = 332; -pub const SYS_preadv: c_long = 333; -pub const SYS_pwritev: c_long = 334; -pub const SYS_rt_tgsigqueueinfo: c_long = 335; -pub const SYS_perf_event_open: c_long = 336; -pub const SYS_recvmmsg: c_long = 337; -pub const SYS_fanotify_init: c_long = 338; -pub const SYS_fanotify_mark: c_long = 339; -pub const SYS_prlimit64: c_long = 340; -pub const SYS_name_to_handle_at: c_long = 341; -pub const SYS_open_by_handle_at: c_long = 342; -pub const SYS_clock_adjtime: c_long = 343; -pub const SYS_syncfs: c_long = 344; -pub const SYS_sendmmsg: c_long = 345; -pub const SYS_setns: c_long = 346; -pub const SYS_process_vm_readv: c_long = 347; -pub const SYS_process_vm_writev: c_long = 348; -pub const SYS_kcmp: c_long = 349; -pub const SYS_finit_module: c_long = 350; -pub const SYS_sched_setattr: c_long = 351; -pub const SYS_sched_getattr: c_long = 352; -pub const SYS_renameat2: c_long = 353; -pub const SYS_seccomp: c_long = 354; -pub const SYS_getrandom: c_long = 355; -pub const SYS_memfd_create: c_long = 356; -pub const SYS_bpf: c_long = 357; -pub const SYS_execveat: c_long = 358; -pub const SYS_socket: c_long = 359; -pub const SYS_socketpair: c_long = 360; -pub const SYS_bind: c_long = 361; -pub const SYS_connect: c_long = 362; -pub const SYS_listen: c_long = 363; -pub const SYS_accept4: c_long = 364; -pub const SYS_getsockopt: c_long = 365; -pub const SYS_setsockopt: c_long = 366; -pub const SYS_getsockname: c_long = 367; -pub const SYS_getpeername: c_long = 368; -pub const SYS_sendto: c_long = 369; -pub const SYS_sendmsg: c_long = 370; -pub const SYS_recvfrom: c_long = 371; -pub const SYS_recvmsg: c_long = 372; -pub const SYS_shutdown: c_long = 373; -pub const SYS_userfaultfd: c_long = 374; -pub const SYS_membarrier: c_long = 375; -pub const SYS_mlock2: c_long = 376; -pub const SYS_copy_file_range: c_long = 377; -pub const SYS_preadv2: c_long = 378; -pub const SYS_pwritev2: c_long = 379; -pub const SYS_pkey_mprotect: c_long = 380; -pub const SYS_pkey_alloc: c_long = 381; -pub const SYS_pkey_free: c_long = 382; -pub const SYS_statx: c_long = 383; -pub const SYS_rseq: c_long = 386; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; -pub const SYS_fchmodat2: c_long = 452; -pub const SYS_mseal: c_long = 462; - -// offsets in user_regs_structs, from sys/reg.h -pub const EBX: c_int = 0; -pub const ECX: c_int = 1; -pub const EDX: c_int = 2; -pub const ESI: c_int = 3; -pub const EDI: c_int = 4; -pub const EBP: c_int = 5; -pub const EAX: c_int = 6; -pub const DS: c_int = 7; -pub const ES: c_int = 8; -pub const FS: c_int = 9; -pub const GS: c_int = 10; -pub const ORIG_EAX: c_int = 11; -pub const EIP: c_int = 12; -pub const CS: c_int = 13; -pub const EFL: c_int = 14; -pub const UESP: c_int = 15; -pub const SS: c_int = 16; - -// offsets in mcontext_t.gregs from sys/ucontext.h -pub const REG_GS: c_int = 0; -pub const REG_FS: c_int = 1; -pub const REG_ES: c_int = 2; -pub const REG_DS: c_int = 3; -pub const REG_EDI: c_int = 4; -pub const REG_ESI: c_int = 5; -pub const REG_EBP: c_int = 6; -pub const REG_ESP: c_int = 7; -pub const REG_EBX: c_int = 8; -pub const REG_EDX: c_int = 9; -pub const REG_ECX: c_int = 10; -pub const REG_EAX: c_int = 11; -pub const REG_TRAPNO: c_int = 12; -pub const REG_ERR: c_int = 13; -pub const REG_EIP: c_int = 14; -pub const REG_CS: c_int = 15; -pub const REG_EFL: c_int = 16; -pub const REG_UESP: c_int = 17; -pub const REG_SS: c_int = 18; - -extern "C" { - pub fn getcontext(ucp: *mut ucontext_t) -> c_int; - pub fn setcontext(ucp: *const ucontext_t) -> c_int; - pub fn makecontext(ucp: *mut ucontext_t, func: extern "C" fn(), argc: c_int, ...); - pub fn swapcontext(uocp: *mut ucontext_t, ucp: *const ucontext_t) -> c_int; -} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/ilp32.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/ilp32.rs deleted file mode 100644 index f808ff31f8cca5..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/ilp32.rs +++ /dev/null @@ -1,54 +0,0 @@ -use crate::prelude::*; -use crate::pthread_mutex_t; - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 32; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 48; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 20; - -#[cfg(target_endian = "little")] -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, - ], -}; -#[cfg(target_endian = "little")] -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, - ], -}; -#[cfg(target_endian = "little")] -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, - ], -}; - -pub const SYS_sync_file_range2: c_long = 84; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/lp64.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/lp64.rs deleted file mode 100644 index 960e5127806b34..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/lp64.rs +++ /dev/null @@ -1,57 +0,0 @@ -use crate::prelude::*; -use crate::pthread_mutex_t; - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 8; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 48; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 8; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 8; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; - -#[cfg(target_endian = "little")] -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "little")] -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "little")] -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; - -pub const SYS_renameat: c_long = 38; -pub const SYS_sync_file_range: c_long = 84; -pub const SYS_getrlimit: c_long = 163; -pub const SYS_setrlimit: c_long = 164; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/mod.rs deleted file mode 100644 index 28b4e40fde5438..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b64/aarch64/mod.rs +++ /dev/null @@ -1,973 +0,0 @@ -//! AArch64-specific definitions for 64-bit linux-like values - -use crate::prelude::*; -use crate::{off64_t, off_t}; - -pub type wchar_t = u32; -pub type nlink_t = u32; -pub type blksize_t = i32; -pub type suseconds_t = i64; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; - -s! { - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - #[cfg(target_arch = "sparc64")] - __reserved0: c_int, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct statfs { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - f_spare: [crate::__fsword_t; 5], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad1: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - __pad2: c_int, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_int; 2], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad1: crate::dev_t, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - __pad2: c_int, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_int; 2], - } - - pub struct statfs64 { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::fsid_t, - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - pub f_spare: [crate::__fsword_t; 4], - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct pthread_attr_t { - __size: [usize; 8], - } - - pub struct user_regs_struct { - pub regs: [c_ulonglong; 31], - pub sp: c_ulonglong, - pub pc: c_ulonglong, - pub pstate: c_ulonglong, - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_uint, - pub __seq: c_ushort, - __pad1: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused4: c_ulong, - __unused5: c_ulong, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - #[doc(hidden)] - #[deprecated( - since = "0.2.54", - note = "Please leave a comment on \ - https://github.com/rust-lang/libc/pull/1316 if you're using \ - this field" - )] - pub _pad: [c_int; 29], - _align: [usize; 0], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_sigmask: crate::sigset_t, - pub uc_mcontext: mcontext_t, - } - - #[repr(align(16))] - pub struct mcontext_t { - pub fault_address: c_ulonglong, - pub regs: [c_ulonglong; 31], - pub sp: c_ulonglong, - pub pc: c_ulonglong, - pub pstate: c_ulonglong, - __reserved: [u64; 512], - } - - pub struct user_fpsimd_struct { - pub vregs: [crate::__uint128_t; 32], - pub fpsr: c_uint, - pub fpcr: c_uint, - } - - #[repr(align(8))] - pub struct clone_args { - pub flags: c_ulonglong, - pub pidfd: c_ulonglong, - pub child_tid: c_ulonglong, - pub parent_tid: c_ulonglong, - pub exit_signal: c_ulonglong, - pub stack: c_ulonglong, - pub stack_size: c_ulonglong, - pub tls: c_ulonglong, - pub set_tid: c_ulonglong, - pub set_tid_size: c_ulonglong, - pub cgroup: c_ulonglong, - } -} - -s_no_extra_traits! { - #[repr(align(16))] - pub struct max_align_t { - priv_: [f32; 8], - } -} - -pub const VEOF: usize = 4; - -pub const RTLD_DEEPBIND: c_int = 0x8; -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; - -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_FSYNC: c_int = 0x101000; -pub const O_NOATIME: c_int = 0o1000000; -pub const O_PATH: c_int = 0o10000000; -pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_GROWSDOWN: c_int = 0x0100; - -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const EHWPOISON: c_int = 133; -pub const ERFKILL: c_int = 132; - -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] -pub const SIGUNUSED: c_int = 31; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const O_ASYNC: c_int = 0x2000; -pub const O_NDELAY: c_int = 0x800; - -pub const PTRACE_DETACH: c_uint = 17; - -pub const EFD_NONBLOCK: c_int = 0x800; - -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETOWN: c_int = 8; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_OFD_GETLK: c_int = 36; -pub const F_OFD_SETLK: c_int = 37; -pub const F_OFD_SETLKW: c_int = 38; - -pub const F_RDLCK: c_int = 0; -pub const F_WRLCK: c_int = 1; -pub const F_UNLCK: c_int = 2; - -pub const SFD_NONBLOCK: c_int = 0x0800; - -pub const SFD_CLOEXEC: c_int = 0x080000; - -pub const NCCS: usize = 32; - -pub const O_TRUNC: c_int = 512; - -pub const O_CLOEXEC: c_int = 0x80000; - -pub const EBFONT: c_int = 59; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENONET: c_int = 64; -pub const ENOPKG: c_int = 65; -pub const EREMOTE: c_int = 66; -pub const ENOLINK: c_int = 67; -pub const EADV: c_int = 68; -pub const ESRMNT: c_int = 69; -pub const ECOMM: c_int = 70; -pub const EPROTO: c_int = 71; -pub const EDOTDOT: c_int = 73; - -pub const SA_NODEFER: c_int = 0x40000000; -pub const SA_RESETHAND: c_int = 0x80000000; -pub const SA_RESTART: c_int = 0x10000000; -pub const SA_NOCLDSTOP: c_int = 0x00000001; - -pub const EPOLL_CLOEXEC: c_int = 0x80000; - -pub const EFD_CLOEXEC: c_int = 0x80000; - -pub const O_DIRECT: c_int = 0x10000; -pub const O_DIRECTORY: c_int = 0x4000; -pub const O_NOFOLLOW: c_int = 0x8000; - -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_ANONYMOUS: c_int = 0x0020; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_SYNC: c_int = 0x080000; - -pub const EDEADLOCK: c_int = 35; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; - -pub const SIGSTKSZ: size_t = 16384; -pub const MINSIGSTKSZ: size_t = 5120; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: crate::tcflag_t = 0x00000800; -pub const TAB2: crate::tcflag_t = 0x00001000; -pub const TAB3: crate::tcflag_t = 0x00001800; -pub const CR1: crate::tcflag_t = 0x00000200; -pub const CR2: crate::tcflag_t = 0x00000400; -pub const CR3: crate::tcflag_t = 0x00000600; -pub const FF1: crate::tcflag_t = 0x00008000; -pub const BS1: crate::tcflag_t = 0x00002000; -pub const VT1: crate::tcflag_t = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; -pub const EXTPROC: crate::tcflag_t = 0x00010000; - -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -// sys/auxv.h -pub const HWCAP_FP: c_ulong = 1 << 0; -pub const HWCAP_ASIMD: c_ulong = 1 << 1; -pub const HWCAP_EVTSTRM: c_ulong = 1 << 2; -pub const HWCAP_AES: c_ulong = 1 << 3; -pub const HWCAP_PMULL: c_ulong = 1 << 4; -pub const HWCAP_SHA1: c_ulong = 1 << 5; -pub const HWCAP_SHA2: c_ulong = 1 << 6; -pub const HWCAP_CRC32: c_ulong = 1 << 7; -pub const HWCAP_ATOMICS: c_ulong = 1 << 8; -pub const HWCAP_FPHP: c_ulong = 1 << 9; -pub const HWCAP_ASIMDHP: c_ulong = 1 << 10; -pub const HWCAP_CPUID: c_ulong = 1 << 11; -pub const HWCAP_ASIMDRDM: c_ulong = 1 << 12; -pub const HWCAP_JSCVT: c_ulong = 1 << 13; -pub const HWCAP_FCMA: c_ulong = 1 << 14; -pub const HWCAP_LRCPC: c_ulong = 1 << 15; -pub const HWCAP_DCPOP: c_ulong = 1 << 16; -pub const HWCAP_SHA3: c_ulong = 1 << 17; -pub const HWCAP_SM3: c_ulong = 1 << 18; -pub const HWCAP_SM4: c_ulong = 1 << 19; -pub const HWCAP_ASIMDDP: c_ulong = 1 << 20; -pub const HWCAP_SHA512: c_ulong = 1 << 21; -pub const HWCAP_SVE: c_ulong = 1 << 22; -pub const HWCAP_ASIMDFHM: c_ulong = 1 << 23; -pub const HWCAP_DIT: c_ulong = 1 << 24; -pub const HWCAP_USCAT: c_ulong = 1 << 25; -pub const HWCAP_ILRCPC: c_ulong = 1 << 26; -pub const HWCAP_FLAGM: c_ulong = 1 << 27; -pub const HWCAP_SSBS: c_ulong = 1 << 28; -pub const HWCAP_SB: c_ulong = 1 << 29; -pub const HWCAP_PACA: c_ulong = 1 << 30; -pub const HWCAP_PACG: c_ulong = 1 << 31; -// FIXME(linux): enable these again once linux-api-headers are up to date enough on CI. -// See discussion in https://github.com/rust-lang/libc/pull/1638 -//pub const HWCAP2_DCPODP: c_ulong = 1 << 0; -//pub const HWCAP2_SVE2: c_ulong = 1 << 1; -//pub const HWCAP2_SVEAES: c_ulong = 1 << 2; -//pub const HWCAP2_SVEPMULL: c_ulong = 1 << 3; -//pub const HWCAP2_SVEBITPERM: c_ulong = 1 << 4; -//pub const HWCAP2_SVESHA3: c_ulong = 1 << 5; -//pub const HWCAP2_SVESM4: c_ulong = 1 << 6; -//pub const HWCAP2_FLAGM2: c_ulong = 1 << 7; -//pub const HWCAP2_FRINT: c_ulong = 1 << 8; -//pub const HWCAP2_MTE: c_ulong = 1 << 18; - -// linux/prctl.h -pub const PR_PAC_RESET_KEYS: c_int = 54; -pub const PR_SET_TAGGED_ADDR_CTRL: c_int = 55; -pub const PR_GET_TAGGED_ADDR_CTRL: c_int = 56; -pub const PR_PAC_SET_ENABLED_KEYS: c_int = 60; -pub const PR_PAC_GET_ENABLED_KEYS: c_int = 61; - -pub const PR_TAGGED_ADDR_ENABLE: c_ulong = 1; - -pub const PR_PAC_APIAKEY: c_ulong = 1 << 0; -pub const PR_PAC_APIBKEY: c_ulong = 1 << 1; -pub const PR_PAC_APDAKEY: c_ulong = 1 << 2; -pub const PR_PAC_APDBKEY: c_ulong = 1 << 3; -pub const PR_PAC_APGAKEY: c_ulong = 1 << 4; - -pub const PR_SME_SET_VL: c_int = 63; -pub const PR_SME_GET_VL: c_int = 64; -pub const PR_SME_VL_LEN_MAX: c_int = 0xffff; - -pub const PR_SME_SET_VL_INHERIT: c_ulong = 1 << 17; -pub const PR_SME_SET_VL_ONE_EXEC: c_ulong = 1 << 18; - -// Syscall table -pub const SYS_io_setup: c_long = 0; -pub const SYS_io_destroy: c_long = 1; -pub const SYS_io_submit: c_long = 2; -pub const SYS_io_cancel: c_long = 3; -pub const SYS_io_getevents: c_long = 4; -pub const SYS_setxattr: c_long = 5; -pub const SYS_lsetxattr: c_long = 6; -pub const SYS_fsetxattr: c_long = 7; -pub const SYS_getxattr: c_long = 8; -pub const SYS_lgetxattr: c_long = 9; -pub const SYS_fgetxattr: c_long = 10; -pub const SYS_listxattr: c_long = 11; -pub const SYS_llistxattr: c_long = 12; -pub const SYS_flistxattr: c_long = 13; -pub const SYS_removexattr: c_long = 14; -pub const SYS_lremovexattr: c_long = 15; -pub const SYS_fremovexattr: c_long = 16; -pub const SYS_getcwd: c_long = 17; -pub const SYS_lookup_dcookie: c_long = 18; -pub const SYS_eventfd2: c_long = 19; -pub const SYS_epoll_create1: c_long = 20; -pub const SYS_epoll_ctl: c_long = 21; -pub const SYS_epoll_pwait: c_long = 22; -pub const SYS_dup: c_long = 23; -pub const SYS_dup3: c_long = 24; -pub const SYS_fcntl: c_long = 25; -pub const SYS_inotify_init1: c_long = 26; -pub const SYS_inotify_add_watch: c_long = 27; -pub const SYS_inotify_rm_watch: c_long = 28; -pub const SYS_ioctl: c_long = 29; -pub const SYS_ioprio_set: c_long = 30; -pub const SYS_ioprio_get: c_long = 31; -pub const SYS_flock: c_long = 32; -pub const SYS_mknodat: c_long = 33; -pub const SYS_mkdirat: c_long = 34; -pub const SYS_unlinkat: c_long = 35; -pub const SYS_symlinkat: c_long = 36; -pub const SYS_linkat: c_long = 37; -// 38 is renameat only on LP64 -pub const SYS_umount2: c_long = 39; -pub const SYS_mount: c_long = 40; -pub const SYS_pivot_root: c_long = 41; -pub const SYS_nfsservctl: c_long = 42; -pub const SYS_statfs: c_long = 43; -pub const SYS_fstatfs: c_long = 44; -pub const SYS_truncate: c_long = 45; -pub const SYS_ftruncate: c_long = 46; -pub const SYS_fallocate: c_long = 47; -pub const SYS_faccessat: c_long = 48; -pub const SYS_chdir: c_long = 49; -pub const SYS_fchdir: c_long = 50; -pub const SYS_chroot: c_long = 51; -pub const SYS_fchmod: c_long = 52; -pub const SYS_fchmodat: c_long = 53; -pub const SYS_fchownat: c_long = 54; -pub const SYS_fchown: c_long = 55; -pub const SYS_openat: c_long = 56; -pub const SYS_close: c_long = 57; -pub const SYS_vhangup: c_long = 58; -pub const SYS_pipe2: c_long = 59; -pub const SYS_quotactl: c_long = 60; -pub const SYS_getdents64: c_long = 61; -pub const SYS_lseek: c_long = 62; -pub const SYS_read: c_long = 63; -pub const SYS_write: c_long = 64; -pub const SYS_readv: c_long = 65; -pub const SYS_writev: c_long = 66; -pub const SYS_pread64: c_long = 67; -pub const SYS_pwrite64: c_long = 68; -pub const SYS_preadv: c_long = 69; -pub const SYS_pwritev: c_long = 70; -pub const SYS_pselect6: c_long = 72; -pub const SYS_ppoll: c_long = 73; -pub const SYS_signalfd4: c_long = 74; -pub const SYS_vmsplice: c_long = 75; -pub const SYS_splice: c_long = 76; -pub const SYS_tee: c_long = 77; -pub const SYS_readlinkat: c_long = 78; -pub const SYS_newfstatat: c_long = 79; -pub const SYS_fstat: c_long = 80; -pub const SYS_sync: c_long = 81; -pub const SYS_fsync: c_long = 82; -pub const SYS_fdatasync: c_long = 83; -// 84 sync_file_range on LP64 and sync_file_range2 on ILP32 -pub const SYS_timerfd_create: c_long = 85; -pub const SYS_timerfd_settime: c_long = 86; -pub const SYS_timerfd_gettime: c_long = 87; -pub const SYS_utimensat: c_long = 88; -pub const SYS_acct: c_long = 89; -pub const SYS_capget: c_long = 90; -pub const SYS_capset: c_long = 91; -pub const SYS_personality: c_long = 92; -pub const SYS_exit: c_long = 93; -pub const SYS_exit_group: c_long = 94; -pub const SYS_waitid: c_long = 95; -pub const SYS_set_tid_address: c_long = 96; -pub const SYS_unshare: c_long = 97; -pub const SYS_futex: c_long = 98; -pub const SYS_set_robust_list: c_long = 99; -pub const SYS_get_robust_list: c_long = 100; -pub const SYS_nanosleep: c_long = 101; -pub const SYS_getitimer: c_long = 102; -pub const SYS_setitimer: c_long = 103; -pub const SYS_kexec_load: c_long = 104; -pub const SYS_init_module: c_long = 105; -pub const SYS_delete_module: c_long = 106; -pub const SYS_timer_create: c_long = 107; -pub const SYS_timer_gettime: c_long = 108; -pub const SYS_timer_getoverrun: c_long = 109; -pub const SYS_timer_settime: c_long = 110; -pub const SYS_timer_delete: c_long = 111; -pub const SYS_clock_settime: c_long = 112; -pub const SYS_clock_gettime: c_long = 113; -pub const SYS_clock_getres: c_long = 114; -pub const SYS_clock_nanosleep: c_long = 115; -pub const SYS_syslog: c_long = 116; -pub const SYS_ptrace: c_long = 117; -pub const SYS_sched_setparam: c_long = 118; -pub const SYS_sched_setscheduler: c_long = 119; -pub const SYS_sched_getscheduler: c_long = 120; -pub const SYS_sched_getparam: c_long = 121; -pub const SYS_sched_setaffinity: c_long = 122; -pub const SYS_sched_getaffinity: c_long = 123; -pub const SYS_sched_yield: c_long = 124; -pub const SYS_sched_get_priority_max: c_long = 125; -pub const SYS_sched_get_priority_min: c_long = 126; -pub const SYS_sched_rr_get_interval: c_long = 127; -pub const SYS_restart_syscall: c_long = 128; -pub const SYS_kill: c_long = 129; -pub const SYS_tkill: c_long = 130; -pub const SYS_tgkill: c_long = 131; -pub const SYS_sigaltstack: c_long = 132; -pub const SYS_rt_sigsuspend: c_long = 133; -pub const SYS_rt_sigaction: c_long = 134; -pub const SYS_rt_sigprocmask: c_long = 135; -pub const SYS_rt_sigpending: c_long = 136; -pub const SYS_rt_sigtimedwait: c_long = 137; -pub const SYS_rt_sigqueueinfo: c_long = 138; -pub const SYS_rt_sigreturn: c_long = 139; -pub const SYS_setpriority: c_long = 140; -pub const SYS_getpriority: c_long = 141; -pub const SYS_reboot: c_long = 142; -pub const SYS_setregid: c_long = 143; -pub const SYS_setgid: c_long = 144; -pub const SYS_setreuid: c_long = 145; -pub const SYS_setuid: c_long = 146; -pub const SYS_setresuid: c_long = 147; -pub const SYS_getresuid: c_long = 148; -pub const SYS_setresgid: c_long = 149; -pub const SYS_getresgid: c_long = 150; -pub const SYS_setfsuid: c_long = 151; -pub const SYS_setfsgid: c_long = 152; -pub const SYS_times: c_long = 153; -pub const SYS_setpgid: c_long = 154; -pub const SYS_getpgid: c_long = 155; -pub const SYS_getsid: c_long = 156; -pub const SYS_setsid: c_long = 157; -pub const SYS_getgroups: c_long = 158; -pub const SYS_setgroups: c_long = 159; -pub const SYS_uname: c_long = 160; -pub const SYS_sethostname: c_long = 161; -pub const SYS_setdomainname: c_long = 162; -// 163 is getrlimit only on LP64 -// 164 is setrlimit only on LP64 -pub const SYS_getrusage: c_long = 165; -pub const SYS_umask: c_long = 166; -pub const SYS_prctl: c_long = 167; -pub const SYS_getcpu: c_long = 168; -pub const SYS_gettimeofday: c_long = 169; -pub const SYS_settimeofday: c_long = 170; -pub const SYS_adjtimex: c_long = 171; -pub const SYS_getpid: c_long = 172; -pub const SYS_getppid: c_long = 173; -pub const SYS_getuid: c_long = 174; -pub const SYS_geteuid: c_long = 175; -pub const SYS_getgid: c_long = 176; -pub const SYS_getegid: c_long = 177; -pub const SYS_gettid: c_long = 178; -pub const SYS_sysinfo: c_long = 179; -pub const SYS_mq_open: c_long = 180; -pub const SYS_mq_unlink: c_long = 181; -pub const SYS_mq_timedsend: c_long = 182; -pub const SYS_mq_timedreceive: c_long = 183; -pub const SYS_mq_notify: c_long = 184; -pub const SYS_mq_getsetattr: c_long = 185; -pub const SYS_msgget: c_long = 186; -pub const SYS_msgctl: c_long = 187; -pub const SYS_msgrcv: c_long = 188; -pub const SYS_msgsnd: c_long = 189; -pub const SYS_semget: c_long = 190; -pub const SYS_semctl: c_long = 191; -pub const SYS_semtimedop: c_long = 192; -pub const SYS_semop: c_long = 193; -pub const SYS_shmget: c_long = 194; -pub const SYS_shmctl: c_long = 195; -pub const SYS_shmat: c_long = 196; -pub const SYS_shmdt: c_long = 197; -pub const SYS_socket: c_long = 198; -pub const SYS_socketpair: c_long = 199; -pub const SYS_bind: c_long = 200; -pub const SYS_listen: c_long = 201; -pub const SYS_accept: c_long = 202; -pub const SYS_connect: c_long = 203; -pub const SYS_getsockname: c_long = 204; -pub const SYS_getpeername: c_long = 205; -pub const SYS_sendto: c_long = 206; -pub const SYS_recvfrom: c_long = 207; -pub const SYS_setsockopt: c_long = 208; -pub const SYS_getsockopt: c_long = 209; -pub const SYS_shutdown: c_long = 210; -pub const SYS_sendmsg: c_long = 211; -pub const SYS_recvmsg: c_long = 212; -pub const SYS_readahead: c_long = 213; -pub const SYS_brk: c_long = 214; -pub const SYS_munmap: c_long = 215; -pub const SYS_mremap: c_long = 216; -pub const SYS_add_key: c_long = 217; -pub const SYS_request_key: c_long = 218; -pub const SYS_keyctl: c_long = 219; -pub const SYS_clone: c_long = 220; -pub const SYS_execve: c_long = 221; -pub const SYS_mmap: c_long = 222; -pub const SYS_swapon: c_long = 224; -pub const SYS_swapoff: c_long = 225; -pub const SYS_mprotect: c_long = 226; -pub const SYS_msync: c_long = 227; -pub const SYS_mlock: c_long = 228; -pub const SYS_munlock: c_long = 229; -pub const SYS_mlockall: c_long = 230; -pub const SYS_munlockall: c_long = 231; -pub const SYS_mincore: c_long = 232; -pub const SYS_madvise: c_long = 233; -pub const SYS_remap_file_pages: c_long = 234; -pub const SYS_mbind: c_long = 235; -pub const SYS_get_mempolicy: c_long = 236; -pub const SYS_set_mempolicy: c_long = 237; -pub const SYS_migrate_pages: c_long = 238; -pub const SYS_move_pages: c_long = 239; -pub const SYS_rt_tgsigqueueinfo: c_long = 240; -pub const SYS_perf_event_open: c_long = 241; -pub const SYS_accept4: c_long = 242; -pub const SYS_recvmmsg: c_long = 243; -pub const SYS_wait4: c_long = 260; -pub const SYS_prlimit64: c_long = 261; -pub const SYS_fanotify_init: c_long = 262; -pub const SYS_fanotify_mark: c_long = 263; -pub const SYS_name_to_handle_at: c_long = 264; -pub const SYS_open_by_handle_at: c_long = 265; -pub const SYS_clock_adjtime: c_long = 266; -pub const SYS_syncfs: c_long = 267; -pub const SYS_setns: c_long = 268; -pub const SYS_sendmmsg: c_long = 269; -pub const SYS_process_vm_readv: c_long = 270; -pub const SYS_process_vm_writev: c_long = 271; -pub const SYS_kcmp: c_long = 272; -pub const SYS_finit_module: c_long = 273; -pub const SYS_sched_setattr: c_long = 274; -pub const SYS_sched_getattr: c_long = 275; -pub const SYS_renameat2: c_long = 276; -pub const SYS_seccomp: c_long = 277; -pub const SYS_getrandom: c_long = 278; -pub const SYS_memfd_create: c_long = 279; -pub const SYS_bpf: c_long = 280; -pub const SYS_execveat: c_long = 281; -pub const SYS_userfaultfd: c_long = 282; -pub const SYS_membarrier: c_long = 283; -pub const SYS_mlock2: c_long = 284; -pub const SYS_copy_file_range: c_long = 285; -pub const SYS_preadv2: c_long = 286; -pub const SYS_pwritev2: c_long = 287; -pub const SYS_pkey_mprotect: c_long = 288; -pub const SYS_pkey_alloc: c_long = 289; -pub const SYS_pkey_free: c_long = 290; -pub const SYS_statx: c_long = 291; -pub const SYS_rseq: c_long = 293; -pub const SYS_kexec_file_load: c_long = 294; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; -pub const SYS_mseal: c_long = 462; - -pub const PROT_BTI: c_int = 0x10; -pub const PROT_MTE: c_int = 0x20; - -extern "C" { - pub fn sysctl( - name: *mut c_int, - namelen: c_int, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *mut c_void, - newlen: size_t, - ) -> c_int; - - pub fn getcontext(ucp: *mut ucontext_t) -> c_int; - pub fn setcontext(ucp: *const ucontext_t) -> c_int; - pub fn makecontext(ucp: *mut ucontext_t, func: extern "C" fn(), argc: c_int, ...); - pub fn swapcontext(uocp: *mut ucontext_t, ucp: *const ucontext_t) -> c_int; -} - -cfg_if! { - if #[cfg(target_pointer_width = "32")] { - mod ilp32; - pub use self::ilp32::*; - } else { - mod lp64; - pub use self::lp64::*; - } -} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/loongarch64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/loongarch64/mod.rs deleted file mode 100644 index 8f15ce4d1529a9..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b64/loongarch64/mod.rs +++ /dev/null @@ -1,922 +0,0 @@ -use crate::prelude::*; -use crate::{off64_t, off_t, pthread_mutex_t}; - -pub type wchar_t = i32; - -pub type blksize_t = i32; -pub type nlink_t = u32; -pub type suseconds_t = i64; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad1: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - __pad2: c_int, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_int; 2], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub __pad1: crate::dev_t, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - pub __pad2: c_int, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_int; 2], - } - - pub struct statfs { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - pub f_spare: [crate::__fsword_t; 4], - } - - pub struct statfs64 { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::fsid_t, - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - pub f_spare: [crate::__fsword_t; 4], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct pthread_attr_t { - __size: [c_ulong; 7], - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - #[doc(hidden)] - #[deprecated( - since = "0.2.54", - note = "Please leave a comment on \ - https://github.com/rust-lang/libc/pull/1316 if you're using \ - this field" - )] - pub _pad: [c_int; 29], - _align: [u64; 0], - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_uint, - pub __seq: c_ushort, - __pad2: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused4: c_ulong, - __unused5: c_ulong, - } - - pub struct user_regs_struct { - pub regs: [u64; 32], - pub orig_a0: u64, - pub csr_era: u64, - pub csr_badv: u64, - pub reserved: [u64; 10], - } - - pub struct user_fp_struct { - pub fpr: [u64; 32], - pub fcc: u64, - pub fcsr: u32, - } - - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_sigmask: crate::sigset_t, - pub uc_mcontext: mcontext_t, - } - - #[repr(align(16))] - pub struct mcontext_t { - pub __pc: c_ulonglong, - pub __gregs: [c_ulonglong; 32], - pub __flags: c_uint, - pub __extcontext: [c_ulonglong; 0], - } - - #[repr(align(8))] - pub struct clone_args { - pub flags: c_ulonglong, - pub pidfd: c_ulonglong, - pub child_tid: c_ulonglong, - pub parent_tid: c_ulonglong, - pub exit_signal: c_ulonglong, - pub stack: c_ulonglong, - pub stack_size: c_ulonglong, - pub tls: c_ulonglong, - pub set_tid: c_ulonglong, - pub set_tid_size: c_ulonglong, - pub cgroup: c_ulonglong, - } -} - -s_no_extra_traits! { - #[repr(align(16))] - pub struct max_align_t { - priv_: [f64; 4], - } -} - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; - -#[cfg(target_endian = "little")] -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "little")] -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "little")] -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; - -pub const HWCAP_LOONGARCH_CPUCFG: c_ulong = 1 << 0; -pub const HWCAP_LOONGARCH_LAM: c_ulong = 1 << 1; -pub const HWCAP_LOONGARCH_UAL: c_ulong = 1 << 2; -pub const HWCAP_LOONGARCH_FPU: c_ulong = 1 << 3; -pub const HWCAP_LOONGARCH_LSX: c_ulong = 1 << 4; -pub const HWCAP_LOONGARCH_LASX: c_ulong = 1 << 5; -pub const HWCAP_LOONGARCH_CRC32: c_ulong = 1 << 6; -pub const HWCAP_LOONGARCH_COMPLEX: c_ulong = 1 << 7; -pub const HWCAP_LOONGARCH_CRYPTO: c_ulong = 1 << 8; -pub const HWCAP_LOONGARCH_LVZ: c_ulong = 1 << 9; -pub const HWCAP_LOONGARCH_LBT_X86: c_ulong = 1 << 10; -pub const HWCAP_LOONGARCH_LBT_ARM: c_ulong = 1 << 11; -pub const HWCAP_LOONGARCH_LBT_MIPS: c_ulong = 1 << 12; -pub const HWCAP_LOONGARCH_PTW: c_ulong = 1 << 13; - -pub const SYS_io_setup: c_long = 0; -pub const SYS_io_destroy: c_long = 1; -pub const SYS_io_submit: c_long = 2; -pub const SYS_io_cancel: c_long = 3; -pub const SYS_io_getevents: c_long = 4; -pub const SYS_setxattr: c_long = 5; -pub const SYS_lsetxattr: c_long = 6; -pub const SYS_fsetxattr: c_long = 7; -pub const SYS_getxattr: c_long = 8; -pub const SYS_lgetxattr: c_long = 9; -pub const SYS_fgetxattr: c_long = 10; -pub const SYS_listxattr: c_long = 11; -pub const SYS_llistxattr: c_long = 12; -pub const SYS_flistxattr: c_long = 13; -pub const SYS_removexattr: c_long = 14; -pub const SYS_lremovexattr: c_long = 15; -pub const SYS_fremovexattr: c_long = 16; -pub const SYS_getcwd: c_long = 17; -pub const SYS_lookup_dcookie: c_long = 18; -pub const SYS_eventfd2: c_long = 19; -pub const SYS_epoll_create1: c_long = 20; -pub const SYS_epoll_ctl: c_long = 21; -pub const SYS_epoll_pwait: c_long = 22; -pub const SYS_dup: c_long = 23; -pub const SYS_dup3: c_long = 24; -pub const SYS_fcntl: c_long = 25; -pub const SYS_inotify_init1: c_long = 26; -pub const SYS_inotify_add_watch: c_long = 27; -pub const SYS_inotify_rm_watch: c_long = 28; -pub const SYS_ioctl: c_long = 29; -pub const SYS_ioprio_set: c_long = 30; -pub const SYS_ioprio_get: c_long = 31; -pub const SYS_flock: c_long = 32; -pub const SYS_mknodat: c_long = 33; -pub const SYS_mkdirat: c_long = 34; -pub const SYS_unlinkat: c_long = 35; -pub const SYS_symlinkat: c_long = 36; -pub const SYS_linkat: c_long = 37; -pub const SYS_umount2: c_long = 39; -pub const SYS_mount: c_long = 40; -pub const SYS_pivot_root: c_long = 41; -pub const SYS_nfsservctl: c_long = 42; -pub const SYS_statfs: c_long = 43; -pub const SYS_fstatfs: c_long = 44; -pub const SYS_truncate: c_long = 45; -pub const SYS_ftruncate: c_long = 46; -pub const SYS_fallocate: c_long = 47; -pub const SYS_faccessat: c_long = 48; -pub const SYS_chdir: c_long = 49; -pub const SYS_fchdir: c_long = 50; -pub const SYS_chroot: c_long = 51; -pub const SYS_fchmod: c_long = 52; -pub const SYS_fchmodat: c_long = 53; -pub const SYS_fchownat: c_long = 54; -pub const SYS_fchown: c_long = 55; -pub const SYS_openat: c_long = 56; -pub const SYS_close: c_long = 57; -pub const SYS_vhangup: c_long = 58; -pub const SYS_pipe2: c_long = 59; -pub const SYS_quotactl: c_long = 60; -pub const SYS_getdents64: c_long = 61; -pub const SYS_lseek: c_long = 62; -pub const SYS_read: c_long = 63; -pub const SYS_write: c_long = 64; -pub const SYS_readv: c_long = 65; -pub const SYS_writev: c_long = 66; -pub const SYS_pread64: c_long = 67; -pub const SYS_pwrite64: c_long = 68; -pub const SYS_preadv: c_long = 69; -pub const SYS_pwritev: c_long = 70; -pub const SYS_sendfile: c_long = 71; -pub const SYS_pselect6: c_long = 72; -pub const SYS_ppoll: c_long = 73; -pub const SYS_signalfd4: c_long = 74; -pub const SYS_vmsplice: c_long = 75; -pub const SYS_splice: c_long = 76; -pub const SYS_tee: c_long = 77; -pub const SYS_readlinkat: c_long = 78; -pub const SYS_sync: c_long = 81; -pub const SYS_fsync: c_long = 82; -pub const SYS_fdatasync: c_long = 83; -pub const SYS_sync_file_range: c_long = 84; -pub const SYS_timerfd_create: c_long = 85; -pub const SYS_timerfd_settime: c_long = 86; -pub const SYS_timerfd_gettime: c_long = 87; -pub const SYS_utimensat: c_long = 88; -pub const SYS_acct: c_long = 89; -pub const SYS_capget: c_long = 90; -pub const SYS_capset: c_long = 91; -pub const SYS_personality: c_long = 92; -pub const SYS_exit: c_long = 93; -pub const SYS_exit_group: c_long = 94; -pub const SYS_waitid: c_long = 95; -pub const SYS_set_tid_address: c_long = 96; -pub const SYS_unshare: c_long = 97; -pub const SYS_futex: c_long = 98; -pub const SYS_set_robust_list: c_long = 99; -pub const SYS_get_robust_list: c_long = 100; -pub const SYS_nanosleep: c_long = 101; -pub const SYS_getitimer: c_long = 102; -pub const SYS_setitimer: c_long = 103; -pub const SYS_kexec_load: c_long = 104; -pub const SYS_init_module: c_long = 105; -pub const SYS_delete_module: c_long = 106; -pub const SYS_timer_create: c_long = 107; -pub const SYS_timer_gettime: c_long = 108; -pub const SYS_timer_getoverrun: c_long = 109; -pub const SYS_timer_settime: c_long = 110; -pub const SYS_timer_delete: c_long = 111; -pub const SYS_clock_settime: c_long = 112; -pub const SYS_clock_gettime: c_long = 113; -pub const SYS_clock_getres: c_long = 114; -pub const SYS_clock_nanosleep: c_long = 115; -pub const SYS_syslog: c_long = 116; -pub const SYS_ptrace: c_long = 117; -pub const SYS_sched_setparam: c_long = 118; -pub const SYS_sched_setscheduler: c_long = 119; -pub const SYS_sched_getscheduler: c_long = 120; -pub const SYS_sched_getparam: c_long = 121; -pub const SYS_sched_setaffinity: c_long = 122; -pub const SYS_sched_getaffinity: c_long = 123; -pub const SYS_sched_yield: c_long = 124; -pub const SYS_sched_get_priority_max: c_long = 125; -pub const SYS_sched_get_priority_min: c_long = 126; -pub const SYS_sched_rr_get_interval: c_long = 127; -pub const SYS_restart_syscall: c_long = 128; -pub const SYS_kill: c_long = 129; -pub const SYS_tkill: c_long = 130; -pub const SYS_tgkill: c_long = 131; -pub const SYS_sigaltstack: c_long = 132; -pub const SYS_rt_sigsuspend: c_long = 133; -pub const SYS_rt_sigaction: c_long = 134; -pub const SYS_rt_sigprocmask: c_long = 135; -pub const SYS_rt_sigpending: c_long = 136; -pub const SYS_rt_sigtimedwait: c_long = 137; -pub const SYS_rt_sigqueueinfo: c_long = 138; -pub const SYS_rt_sigreturn: c_long = 139; -pub const SYS_setpriority: c_long = 140; -pub const SYS_getpriority: c_long = 141; -pub const SYS_reboot: c_long = 142; -pub const SYS_setregid: c_long = 143; -pub const SYS_setgid: c_long = 144; -pub const SYS_setreuid: c_long = 145; -pub const SYS_setuid: c_long = 146; -pub const SYS_setresuid: c_long = 147; -pub const SYS_getresuid: c_long = 148; -pub const SYS_setresgid: c_long = 149; -pub const SYS_getresgid: c_long = 150; -pub const SYS_setfsuid: c_long = 151; -pub const SYS_setfsgid: c_long = 152; -pub const SYS_times: c_long = 153; -pub const SYS_setpgid: c_long = 154; -pub const SYS_getpgid: c_long = 155; -pub const SYS_getsid: c_long = 156; -pub const SYS_setsid: c_long = 157; -pub const SYS_getgroups: c_long = 158; -pub const SYS_setgroups: c_long = 159; -pub const SYS_uname: c_long = 160; -pub const SYS_sethostname: c_long = 161; -pub const SYS_setdomainname: c_long = 162; -pub const SYS_getrusage: c_long = 165; -pub const SYS_umask: c_long = 166; -pub const SYS_prctl: c_long = 167; -pub const SYS_getcpu: c_long = 168; -pub const SYS_gettimeofday: c_long = 169; -pub const SYS_settimeofday: c_long = 170; -pub const SYS_adjtimex: c_long = 171; -pub const SYS_getpid: c_long = 172; -pub const SYS_getppid: c_long = 173; -pub const SYS_getuid: c_long = 174; -pub const SYS_geteuid: c_long = 175; -pub const SYS_getgid: c_long = 176; -pub const SYS_getegid: c_long = 177; -pub const SYS_gettid: c_long = 178; -pub const SYS_sysinfo: c_long = 179; -pub const SYS_mq_open: c_long = 180; -pub const SYS_mq_unlink: c_long = 181; -pub const SYS_mq_timedsend: c_long = 182; -pub const SYS_mq_timedreceive: c_long = 183; -pub const SYS_mq_notify: c_long = 184; -pub const SYS_mq_getsetattr: c_long = 185; -pub const SYS_msgget: c_long = 186; -pub const SYS_msgctl: c_long = 187; -pub const SYS_msgrcv: c_long = 188; -pub const SYS_msgsnd: c_long = 189; -pub const SYS_semget: c_long = 190; -pub const SYS_semctl: c_long = 191; -pub const SYS_semtimedop: c_long = 192; -pub const SYS_semop: c_long = 193; -pub const SYS_shmget: c_long = 194; -pub const SYS_shmctl: c_long = 195; -pub const SYS_shmat: c_long = 196; -pub const SYS_shmdt: c_long = 197; -pub const SYS_socket: c_long = 198; -pub const SYS_socketpair: c_long = 199; -pub const SYS_bind: c_long = 200; -pub const SYS_listen: c_long = 201; -pub const SYS_accept: c_long = 202; -pub const SYS_connect: c_long = 203; -pub const SYS_getsockname: c_long = 204; -pub const SYS_getpeername: c_long = 205; -pub const SYS_sendto: c_long = 206; -pub const SYS_recvfrom: c_long = 207; -pub const SYS_setsockopt: c_long = 208; -pub const SYS_getsockopt: c_long = 209; -pub const SYS_shutdown: c_long = 210; -pub const SYS_sendmsg: c_long = 211; -pub const SYS_recvmsg: c_long = 212; -pub const SYS_readahead: c_long = 213; -pub const SYS_brk: c_long = 214; -pub const SYS_munmap: c_long = 215; -pub const SYS_mremap: c_long = 216; -pub const SYS_add_key: c_long = 217; -pub const SYS_request_key: c_long = 218; -pub const SYS_keyctl: c_long = 219; -pub const SYS_clone: c_long = 220; -pub const SYS_execve: c_long = 221; -pub const SYS_mmap: c_long = 222; -pub const SYS_fadvise64: c_long = 223; -pub const SYS_swapon: c_long = 224; -pub const SYS_swapoff: c_long = 225; -pub const SYS_mprotect: c_long = 226; -pub const SYS_msync: c_long = 227; -pub const SYS_mlock: c_long = 228; -pub const SYS_munlock: c_long = 229; -pub const SYS_mlockall: c_long = 230; -pub const SYS_munlockall: c_long = 231; -pub const SYS_mincore: c_long = 232; -pub const SYS_madvise: c_long = 233; -pub const SYS_remap_file_pages: c_long = 234; -pub const SYS_mbind: c_long = 235; -pub const SYS_get_mempolicy: c_long = 236; -pub const SYS_set_mempolicy: c_long = 237; -pub const SYS_migrate_pages: c_long = 238; -pub const SYS_move_pages: c_long = 239; -pub const SYS_rt_tgsigqueueinfo: c_long = 240; -pub const SYS_perf_event_open: c_long = 241; -pub const SYS_accept4: c_long = 242; -pub const SYS_recvmmsg: c_long = 243; -//pub const SYS_arch_specific_syscall: c_long = 244; -pub const SYS_wait4: c_long = 260; -pub const SYS_prlimit64: c_long = 261; -pub const SYS_fanotify_init: c_long = 262; -pub const SYS_fanotify_mark: c_long = 263; -pub const SYS_name_to_handle_at: c_long = 264; -pub const SYS_open_by_handle_at: c_long = 265; -pub const SYS_clock_adjtime: c_long = 266; -pub const SYS_syncfs: c_long = 267; -pub const SYS_setns: c_long = 268; -pub const SYS_sendmmsg: c_long = 269; -pub const SYS_process_vm_readv: c_long = 270; -pub const SYS_process_vm_writev: c_long = 271; -pub const SYS_kcmp: c_long = 272; -pub const SYS_finit_module: c_long = 273; -pub const SYS_sched_setattr: c_long = 274; -pub const SYS_sched_getattr: c_long = 275; -pub const SYS_renameat2: c_long = 276; -pub const SYS_seccomp: c_long = 277; -pub const SYS_getrandom: c_long = 278; -pub const SYS_memfd_create: c_long = 279; -pub const SYS_bpf: c_long = 280; -pub const SYS_execveat: c_long = 281; -pub const SYS_userfaultfd: c_long = 282; -pub const SYS_membarrier: c_long = 283; -pub const SYS_mlock2: c_long = 284; -pub const SYS_copy_file_range: c_long = 285; -pub const SYS_preadv2: c_long = 286; -pub const SYS_pwritev2: c_long = 287; -pub const SYS_pkey_mprotect: c_long = 288; -pub const SYS_pkey_alloc: c_long = 289; -pub const SYS_pkey_free: c_long = 290; -pub const SYS_statx: c_long = 291; -pub const SYS_io_pgetevents: c_long = 292; -pub const SYS_rseq: c_long = 293; -pub const SYS_kexec_file_load: c_long = 294; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; - -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; -pub const O_DIRECT: c_int = 0o00040000; -pub const O_DIRECTORY: c_int = 0o00200000; -pub const O_NOFOLLOW: c_int = 0o00400000; -pub const O_TRUNC: c_int = 0o00001000; -pub const O_NOATIME: c_int = 0o1000000; -pub const O_CLOEXEC: c_int = 0o02000000; -pub const O_PATH: c_int = 0o10000000; -pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; -pub const O_APPEND: c_int = 0o00002000; -pub const O_CREAT: c_int = 0o00000100; -pub const O_EXCL: c_int = 0o00000200; -pub const O_NOCTTY: c_int = 0o00000400; -pub const O_NONBLOCK: c_int = 0o00004000; -pub const FASYNC: c_int = 0o00020000; -pub const O_SYNC: c_int = 0o04010000; -pub const O_RSYNC: c_int = 0o04010000; -pub const O_FSYNC: c_int = O_SYNC; -pub const O_ASYNC: c_int = 0o00020000; -pub const O_DSYNC: c_int = 0o00010000; -pub const O_NDELAY: c_int = O_NONBLOCK; -pub const F_RDLCK: c_int = 0; -pub const F_WRLCK: c_int = 1; -pub const F_UNLCK: c_int = 2; -pub const F_GETLK: c_int = 5; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_SETOWN: c_int = 8; -pub const F_GETOWN: c_int = 9; -pub const F_OFD_GETLK: c_int = 36; -pub const F_OFD_SETLK: c_int = 37; -pub const F_OFD_SETLKW: c_int = 38; - -pub const EDEADLK: c_int = 35; -pub const EDEADLOCK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EBFONT: c_int = 59; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENONET: c_int = 64; -pub const ENOPKG: c_int = 65; -pub const EREMOTE: c_int = 66; -pub const ENOLINK: c_int = 67; -pub const EADV: c_int = 68; -pub const ESRMNT: c_int = 69; -pub const ECOMM: c_int = 70; -pub const EPROTO: c_int = 71; -pub const EDOTDOT: c_int = 73; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const ERFKILL: c_int = 132; -pub const EHWPOISON: c_int = 133; - -pub const MADV_SOFT_OFFLINE: c_int = 101; - -pub const MAP_NORESERVE: c_int = 0x4000; -pub const MAP_ANONYMOUS: c_int = 0x0020; -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x1000; -pub const MAP_LOCKED: c_int = 0x2000; -pub const MAP_POPULATE: c_int = 0x8000; -pub const MAP_NONBLOCK: c_int = 0x10000; -pub const MAP_STACK: c_int = 0x20000; -pub const MAP_HUGETLB: c_int = 0x40000; -pub const MAP_SYNC: c_int = 0x080000; -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const SFD_NONBLOCK: c_int = 0x800; -pub const SFD_CLOEXEC: c_int = 0x080000; -pub const SA_NODEFER: c_int = 0x40000000; -pub const SA_RESETHAND: c_int = 0x80000000; -pub const SA_RESTART: c_int = 0x10000000; -pub const SA_NOCLDSTOP: c_int = 0x00000001; -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; -pub const SIG_BLOCK: c_int = 0; -pub const SIG_UNBLOCK: c_int = 1; -pub const SIG_SETMASK: c_int = 2; -pub const SIGBUS: c_int = 7; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGSTKFLT: c_int = 16; -pub const SIGCHLD: c_int = 17; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGURG: c_int = 23; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGIO: c_int = 29; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIGSYS: c_int = 31; -pub const SIGUNUSED: c_int = 31; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const PTRACE_GETFPREGS: c_uint = 14; -pub const PTRACE_SETFPREGS: c_uint = 15; -pub const PTRACE_DETACH: c_uint = 17; -pub const PTRACE_GETFPXREGS: c_uint = 18; -pub const PTRACE_SETFPXREGS: c_uint = 19; -pub const PTRACE_GETREGS: c_uint = 12; -pub const PTRACE_SETREGS: c_uint = 13; -pub const PTRACE_SYSEMU: c_uint = 31; -pub const PTRACE_SYSEMU_SINGLESTEP: c_uint = 32; - -pub const RTLD_DEEPBIND: c_int = 0x8; -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; - -pub const VEOF: usize = 4; -pub const VTIME: usize = 5; -pub const VMIN: usize = 6; -pub const VSWTC: usize = 7; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VSUSP: usize = 10; -pub const VEOL: usize = 11; -pub const VREPRINT: usize = 12; -pub const VDISCARD: usize = 13; -pub const VWERASE: usize = 14; -pub const VEOL2: usize = 16; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; -pub const EXTPROC: crate::tcflag_t = 0x00010000; -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; -pub const SIGSTKSZ: size_t = 16384; -pub const MINSIGSTKSZ: size_t = 4096; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; -pub const TAB1: crate::tcflag_t = 0x00000800; -pub const TAB2: crate::tcflag_t = 0x00001000; -pub const TAB3: crate::tcflag_t = 0x00001800; -pub const CR1: crate::tcflag_t = 0x00000200; -pub const CR2: crate::tcflag_t = 0x00000400; -pub const CR3: crate::tcflag_t = 0x00000600; -pub const FF1: crate::tcflag_t = 0x00008000; -pub const BS1: crate::tcflag_t = 0x00002000; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; -pub const VT1: crate::tcflag_t = 0x00004000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const XCASE: crate::tcflag_t = 0x00000004; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; - -pub const NCCS: usize = 32; - -pub const EPOLL_CLOEXEC: c_int = 0x80000; - -pub const EFD_CLOEXEC: c_int = 0x80000; -pub const EFD_NONBLOCK: c_int = 0x800; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/mips64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/mips64/mod.rs deleted file mode 100644 index 7f66330d9c7ed2..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b64/mips64/mod.rs +++ /dev/null @@ -1,930 +0,0 @@ -use crate::prelude::*; -use crate::{off64_t, off_t, pthread_mutex_t}; - -pub type blksize_t = i64; -pub type nlink_t = u64; -pub type suseconds_t = i64; -pub type wchar_t = i32; -pub type __u64 = c_ulong; -pub type __s64 = c_long; - -s! { - pub struct stat { - pub st_dev: c_ulong, - st_pad1: [c_long; 2], - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: c_ulong, - st_pad2: [c_ulong; 1], - pub st_size: off_t, - st_pad3: c_long, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: crate::blksize_t, - st_pad4: c_long, - pub st_blocks: crate::blkcnt_t, - st_pad5: [c_long; 7], - } - - pub struct statfs { - pub f_type: c_long, - pub f_bsize: c_long, - pub f_frsize: c_long, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_files: crate::fsblkcnt_t, - pub f_ffree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_fsid: crate::fsid_t, - - pub f_namelen: c_long, - f_spare: [c_long; 6], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - } - - pub struct stat64 { - pub st_dev: c_ulong, - st_pad1: [c_long; 2], - pub st_ino: crate::ino64_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: c_ulong, - st_pad2: [c_long; 2], - pub st_size: off64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: crate::blksize_t, - st_pad3: c_long, - pub st_blocks: crate::blkcnt64_t, - st_pad5: [c_long; 7], - } - - pub struct statfs64 { - pub f_type: c_long, - pub f_bsize: c_long, - pub f_frsize: c_long, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_bavail: u64, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_long, - pub f_flags: c_long, - pub f_spare: [c_long; 5], - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct pthread_attr_t { - __size: [c_ulong; 7], - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_flags: c_int, - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_restorer: Option, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_code: c_int, - pub si_errno: c_int, - _pad: c_int, - _pad2: [c_long; 14], - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_uint, - pub __seq: c_ushort, - __pad1: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused4: c_ulong, - __unused5: c_ulong, - } -} - -s_no_extra_traits! { - #[repr(align(16))] - pub struct max_align_t { - priv_: [f64; 4], - } -} - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; - -#[cfg(target_endian = "little")] -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "little")] -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "little")] -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; - -pub const SYS_read: c_long = 5000 + 0; -pub const SYS_write: c_long = 5000 + 1; -pub const SYS_open: c_long = 5000 + 2; -pub const SYS_close: c_long = 5000 + 3; -pub const SYS_stat: c_long = 5000 + 4; -pub const SYS_fstat: c_long = 5000 + 5; -pub const SYS_lstat: c_long = 5000 + 6; -pub const SYS_poll: c_long = 5000 + 7; -pub const SYS_lseek: c_long = 5000 + 8; -pub const SYS_mmap: c_long = 5000 + 9; -pub const SYS_mprotect: c_long = 5000 + 10; -pub const SYS_munmap: c_long = 5000 + 11; -pub const SYS_brk: c_long = 5000 + 12; -pub const SYS_rt_sigaction: c_long = 5000 + 13; -pub const SYS_rt_sigprocmask: c_long = 5000 + 14; -pub const SYS_ioctl: c_long = 5000 + 15; -pub const SYS_pread64: c_long = 5000 + 16; -pub const SYS_pwrite64: c_long = 5000 + 17; -pub const SYS_readv: c_long = 5000 + 18; -pub const SYS_writev: c_long = 5000 + 19; -pub const SYS_access: c_long = 5000 + 20; -pub const SYS_pipe: c_long = 5000 + 21; -pub const SYS__newselect: c_long = 5000 + 22; -pub const SYS_sched_yield: c_long = 5000 + 23; -pub const SYS_mremap: c_long = 5000 + 24; -pub const SYS_msync: c_long = 5000 + 25; -pub const SYS_mincore: c_long = 5000 + 26; -pub const SYS_madvise: c_long = 5000 + 27; -pub const SYS_shmget: c_long = 5000 + 28; -pub const SYS_shmat: c_long = 5000 + 29; -pub const SYS_shmctl: c_long = 5000 + 30; -pub const SYS_dup: c_long = 5000 + 31; -pub const SYS_dup2: c_long = 5000 + 32; -pub const SYS_pause: c_long = 5000 + 33; -pub const SYS_nanosleep: c_long = 5000 + 34; -pub const SYS_getitimer: c_long = 5000 + 35; -pub const SYS_setitimer: c_long = 5000 + 36; -pub const SYS_alarm: c_long = 5000 + 37; -pub const SYS_getpid: c_long = 5000 + 38; -pub const SYS_sendfile: c_long = 5000 + 39; -pub const SYS_socket: c_long = 5000 + 40; -pub const SYS_connect: c_long = 5000 + 41; -pub const SYS_accept: c_long = 5000 + 42; -pub const SYS_sendto: c_long = 5000 + 43; -pub const SYS_recvfrom: c_long = 5000 + 44; -pub const SYS_sendmsg: c_long = 5000 + 45; -pub const SYS_recvmsg: c_long = 5000 + 46; -pub const SYS_shutdown: c_long = 5000 + 47; -pub const SYS_bind: c_long = 5000 + 48; -pub const SYS_listen: c_long = 5000 + 49; -pub const SYS_getsockname: c_long = 5000 + 50; -pub const SYS_getpeername: c_long = 5000 + 51; -pub const SYS_socketpair: c_long = 5000 + 52; -pub const SYS_setsockopt: c_long = 5000 + 53; -pub const SYS_getsockopt: c_long = 5000 + 54; -pub const SYS_clone: c_long = 5000 + 55; -pub const SYS_fork: c_long = 5000 + 56; -pub const SYS_execve: c_long = 5000 + 57; -pub const SYS_exit: c_long = 5000 + 58; -pub const SYS_wait4: c_long = 5000 + 59; -pub const SYS_kill: c_long = 5000 + 60; -pub const SYS_uname: c_long = 5000 + 61; -pub const SYS_semget: c_long = 5000 + 62; -pub const SYS_semop: c_long = 5000 + 63; -pub const SYS_semctl: c_long = 5000 + 64; -pub const SYS_shmdt: c_long = 5000 + 65; -pub const SYS_msgget: c_long = 5000 + 66; -pub const SYS_msgsnd: c_long = 5000 + 67; -pub const SYS_msgrcv: c_long = 5000 + 68; -pub const SYS_msgctl: c_long = 5000 + 69; -pub const SYS_fcntl: c_long = 5000 + 70; -pub const SYS_flock: c_long = 5000 + 71; -pub const SYS_fsync: c_long = 5000 + 72; -pub const SYS_fdatasync: c_long = 5000 + 73; -pub const SYS_truncate: c_long = 5000 + 74; -pub const SYS_ftruncate: c_long = 5000 + 75; -pub const SYS_getdents: c_long = 5000 + 76; -pub const SYS_getcwd: c_long = 5000 + 77; -pub const SYS_chdir: c_long = 5000 + 78; -pub const SYS_fchdir: c_long = 5000 + 79; -pub const SYS_rename: c_long = 5000 + 80; -pub const SYS_mkdir: c_long = 5000 + 81; -pub const SYS_rmdir: c_long = 5000 + 82; -pub const SYS_creat: c_long = 5000 + 83; -pub const SYS_link: c_long = 5000 + 84; -pub const SYS_unlink: c_long = 5000 + 85; -pub const SYS_symlink: c_long = 5000 + 86; -pub const SYS_readlink: c_long = 5000 + 87; -pub const SYS_chmod: c_long = 5000 + 88; -pub const SYS_fchmod: c_long = 5000 + 89; -pub const SYS_chown: c_long = 5000 + 90; -pub const SYS_fchown: c_long = 5000 + 91; -pub const SYS_lchown: c_long = 5000 + 92; -pub const SYS_umask: c_long = 5000 + 93; -pub const SYS_gettimeofday: c_long = 5000 + 94; -pub const SYS_getrlimit: c_long = 5000 + 95; -pub const SYS_getrusage: c_long = 5000 + 96; -pub const SYS_sysinfo: c_long = 5000 + 97; -pub const SYS_times: c_long = 5000 + 98; -pub const SYS_ptrace: c_long = 5000 + 99; -pub const SYS_getuid: c_long = 5000 + 100; -pub const SYS_syslog: c_long = 5000 + 101; -pub const SYS_getgid: c_long = 5000 + 102; -pub const SYS_setuid: c_long = 5000 + 103; -pub const SYS_setgid: c_long = 5000 + 104; -pub const SYS_geteuid: c_long = 5000 + 105; -pub const SYS_getegid: c_long = 5000 + 106; -pub const SYS_setpgid: c_long = 5000 + 107; -pub const SYS_getppid: c_long = 5000 + 108; -pub const SYS_getpgrp: c_long = 5000 + 109; -pub const SYS_setsid: c_long = 5000 + 110; -pub const SYS_setreuid: c_long = 5000 + 111; -pub const SYS_setregid: c_long = 5000 + 112; -pub const SYS_getgroups: c_long = 5000 + 113; -pub const SYS_setgroups: c_long = 5000 + 114; -pub const SYS_setresuid: c_long = 5000 + 115; -pub const SYS_getresuid: c_long = 5000 + 116; -pub const SYS_setresgid: c_long = 5000 + 117; -pub const SYS_getresgid: c_long = 5000 + 118; -pub const SYS_getpgid: c_long = 5000 + 119; -pub const SYS_setfsuid: c_long = 5000 + 120; -pub const SYS_setfsgid: c_long = 5000 + 121; -pub const SYS_getsid: c_long = 5000 + 122; -pub const SYS_capget: c_long = 5000 + 123; -pub const SYS_capset: c_long = 5000 + 124; -pub const SYS_rt_sigpending: c_long = 5000 + 125; -pub const SYS_rt_sigtimedwait: c_long = 5000 + 126; -pub const SYS_rt_sigqueueinfo: c_long = 5000 + 127; -pub const SYS_rt_sigsuspend: c_long = 5000 + 128; -pub const SYS_sigaltstack: c_long = 5000 + 129; -pub const SYS_utime: c_long = 5000 + 130; -pub const SYS_mknod: c_long = 5000 + 131; -pub const SYS_personality: c_long = 5000 + 132; -pub const SYS_ustat: c_long = 5000 + 133; -pub const SYS_statfs: c_long = 5000 + 134; -pub const SYS_fstatfs: c_long = 5000 + 135; -pub const SYS_sysfs: c_long = 5000 + 136; -pub const SYS_getpriority: c_long = 5000 + 137; -pub const SYS_setpriority: c_long = 5000 + 138; -pub const SYS_sched_setparam: c_long = 5000 + 139; -pub const SYS_sched_getparam: c_long = 5000 + 140; -pub const SYS_sched_setscheduler: c_long = 5000 + 141; -pub const SYS_sched_getscheduler: c_long = 5000 + 142; -pub const SYS_sched_get_priority_max: c_long = 5000 + 143; -pub const SYS_sched_get_priority_min: c_long = 5000 + 144; -pub const SYS_sched_rr_get_interval: c_long = 5000 + 145; -pub const SYS_mlock: c_long = 5000 + 146; -pub const SYS_munlock: c_long = 5000 + 147; -pub const SYS_mlockall: c_long = 5000 + 148; -pub const SYS_munlockall: c_long = 5000 + 149; -pub const SYS_vhangup: c_long = 5000 + 150; -pub const SYS_pivot_root: c_long = 5000 + 151; -pub const SYS__sysctl: c_long = 5000 + 152; -pub const SYS_prctl: c_long = 5000 + 153; -pub const SYS_adjtimex: c_long = 5000 + 154; -pub const SYS_setrlimit: c_long = 5000 + 155; -pub const SYS_chroot: c_long = 5000 + 156; -pub const SYS_sync: c_long = 5000 + 157; -pub const SYS_acct: c_long = 5000 + 158; -pub const SYS_settimeofday: c_long = 5000 + 159; -pub const SYS_mount: c_long = 5000 + 160; -pub const SYS_umount2: c_long = 5000 + 161; -pub const SYS_swapon: c_long = 5000 + 162; -pub const SYS_swapoff: c_long = 5000 + 163; -pub const SYS_reboot: c_long = 5000 + 164; -pub const SYS_sethostname: c_long = 5000 + 165; -pub const SYS_setdomainname: c_long = 5000 + 166; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 5000 + 167; -pub const SYS_init_module: c_long = 5000 + 168; -pub const SYS_delete_module: c_long = 5000 + 169; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 5000 + 170; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 5000 + 171; -pub const SYS_quotactl: c_long = 5000 + 172; -pub const SYS_nfsservctl: c_long = 5000 + 173; -pub const SYS_getpmsg: c_long = 5000 + 174; -pub const SYS_putpmsg: c_long = 5000 + 175; -pub const SYS_afs_syscall: c_long = 5000 + 176; -pub const SYS_gettid: c_long = 5000 + 178; -pub const SYS_readahead: c_long = 5000 + 179; -pub const SYS_setxattr: c_long = 5000 + 180; -pub const SYS_lsetxattr: c_long = 5000 + 181; -pub const SYS_fsetxattr: c_long = 5000 + 182; -pub const SYS_getxattr: c_long = 5000 + 183; -pub const SYS_lgetxattr: c_long = 5000 + 184; -pub const SYS_fgetxattr: c_long = 5000 + 185; -pub const SYS_listxattr: c_long = 5000 + 186; -pub const SYS_llistxattr: c_long = 5000 + 187; -pub const SYS_flistxattr: c_long = 5000 + 188; -pub const SYS_removexattr: c_long = 5000 + 189; -pub const SYS_lremovexattr: c_long = 5000 + 190; -pub const SYS_fremovexattr: c_long = 5000 + 191; -pub const SYS_tkill: c_long = 5000 + 192; -pub const SYS_futex: c_long = 5000 + 194; -pub const SYS_sched_setaffinity: c_long = 5000 + 195; -pub const SYS_sched_getaffinity: c_long = 5000 + 196; -pub const SYS_cacheflush: c_long = 5000 + 197; -pub const SYS_cachectl: c_long = 5000 + 198; -pub const SYS_sysmips: c_long = 5000 + 199; -pub const SYS_io_setup: c_long = 5000 + 200; -pub const SYS_io_destroy: c_long = 5000 + 201; -pub const SYS_io_getevents: c_long = 5000 + 202; -pub const SYS_io_submit: c_long = 5000 + 203; -pub const SYS_io_cancel: c_long = 5000 + 204; -pub const SYS_exit_group: c_long = 5000 + 205; -pub const SYS_lookup_dcookie: c_long = 5000 + 206; -pub const SYS_epoll_create: c_long = 5000 + 207; -pub const SYS_epoll_ctl: c_long = 5000 + 208; -pub const SYS_epoll_wait: c_long = 5000 + 209; -pub const SYS_remap_file_pages: c_long = 5000 + 210; -pub const SYS_rt_sigreturn: c_long = 5000 + 211; -pub const SYS_set_tid_address: c_long = 5000 + 212; -pub const SYS_restart_syscall: c_long = 5000 + 213; -pub const SYS_semtimedop: c_long = 5000 + 214; -pub const SYS_fadvise64: c_long = 5000 + 215; -pub const SYS_timer_create: c_long = 5000 + 216; -pub const SYS_timer_settime: c_long = 5000 + 217; -pub const SYS_timer_gettime: c_long = 5000 + 218; -pub const SYS_timer_getoverrun: c_long = 5000 + 219; -pub const SYS_timer_delete: c_long = 5000 + 220; -pub const SYS_clock_settime: c_long = 5000 + 221; -pub const SYS_clock_gettime: c_long = 5000 + 222; -pub const SYS_clock_getres: c_long = 5000 + 223; -pub const SYS_clock_nanosleep: c_long = 5000 + 224; -pub const SYS_tgkill: c_long = 5000 + 225; -pub const SYS_utimes: c_long = 5000 + 226; -pub const SYS_mbind: c_long = 5000 + 227; -pub const SYS_get_mempolicy: c_long = 5000 + 228; -pub const SYS_set_mempolicy: c_long = 5000 + 229; -pub const SYS_mq_open: c_long = 5000 + 230; -pub const SYS_mq_unlink: c_long = 5000 + 231; -pub const SYS_mq_timedsend: c_long = 5000 + 232; -pub const SYS_mq_timedreceive: c_long = 5000 + 233; -pub const SYS_mq_notify: c_long = 5000 + 234; -pub const SYS_mq_getsetattr: c_long = 5000 + 235; -pub const SYS_vserver: c_long = 5000 + 236; -pub const SYS_waitid: c_long = 5000 + 237; -/* pub const SYS_sys_setaltroot: c_long = 5000 + 238; */ -pub const SYS_add_key: c_long = 5000 + 239; -pub const SYS_request_key: c_long = 5000 + 240; -pub const SYS_keyctl: c_long = 5000 + 241; -pub const SYS_set_thread_area: c_long = 5000 + 242; -pub const SYS_inotify_init: c_long = 5000 + 243; -pub const SYS_inotify_add_watch: c_long = 5000 + 244; -pub const SYS_inotify_rm_watch: c_long = 5000 + 245; -pub const SYS_migrate_pages: c_long = 5000 + 246; -pub const SYS_openat: c_long = 5000 + 247; -pub const SYS_mkdirat: c_long = 5000 + 248; -pub const SYS_mknodat: c_long = 5000 + 249; -pub const SYS_fchownat: c_long = 5000 + 250; -pub const SYS_futimesat: c_long = 5000 + 251; -pub const SYS_newfstatat: c_long = 5000 + 252; -pub const SYS_unlinkat: c_long = 5000 + 253; -pub const SYS_renameat: c_long = 5000 + 254; -pub const SYS_linkat: c_long = 5000 + 255; -pub const SYS_symlinkat: c_long = 5000 + 256; -pub const SYS_readlinkat: c_long = 5000 + 257; -pub const SYS_fchmodat: c_long = 5000 + 258; -pub const SYS_faccessat: c_long = 5000 + 259; -pub const SYS_pselect6: c_long = 5000 + 260; -pub const SYS_ppoll: c_long = 5000 + 261; -pub const SYS_unshare: c_long = 5000 + 262; -pub const SYS_splice: c_long = 5000 + 263; -pub const SYS_sync_file_range: c_long = 5000 + 264; -pub const SYS_tee: c_long = 5000 + 265; -pub const SYS_vmsplice: c_long = 5000 + 266; -pub const SYS_move_pages: c_long = 5000 + 267; -pub const SYS_set_robust_list: c_long = 5000 + 268; -pub const SYS_get_robust_list: c_long = 5000 + 269; -pub const SYS_kexec_load: c_long = 5000 + 270; -pub const SYS_getcpu: c_long = 5000 + 271; -pub const SYS_epoll_pwait: c_long = 5000 + 272; -pub const SYS_ioprio_set: c_long = 5000 + 273; -pub const SYS_ioprio_get: c_long = 5000 + 274; -pub const SYS_utimensat: c_long = 5000 + 275; -pub const SYS_signalfd: c_long = 5000 + 276; -pub const SYS_timerfd: c_long = 5000 + 277; -pub const SYS_eventfd: c_long = 5000 + 278; -pub const SYS_fallocate: c_long = 5000 + 279; -pub const SYS_timerfd_create: c_long = 5000 + 280; -pub const SYS_timerfd_gettime: c_long = 5000 + 281; -pub const SYS_timerfd_settime: c_long = 5000 + 282; -pub const SYS_signalfd4: c_long = 5000 + 283; -pub const SYS_eventfd2: c_long = 5000 + 284; -pub const SYS_epoll_create1: c_long = 5000 + 285; -pub const SYS_dup3: c_long = 5000 + 286; -pub const SYS_pipe2: c_long = 5000 + 287; -pub const SYS_inotify_init1: c_long = 5000 + 288; -pub const SYS_preadv: c_long = 5000 + 289; -pub const SYS_pwritev: c_long = 5000 + 290; -pub const SYS_rt_tgsigqueueinfo: c_long = 5000 + 291; -pub const SYS_perf_event_open: c_long = 5000 + 292; -pub const SYS_accept4: c_long = 5000 + 293; -pub const SYS_recvmmsg: c_long = 5000 + 294; -pub const SYS_fanotify_init: c_long = 5000 + 295; -pub const SYS_fanotify_mark: c_long = 5000 + 296; -pub const SYS_prlimit64: c_long = 5000 + 297; -pub const SYS_name_to_handle_at: c_long = 5000 + 298; -pub const SYS_open_by_handle_at: c_long = 5000 + 299; -pub const SYS_clock_adjtime: c_long = 5000 + 300; -pub const SYS_syncfs: c_long = 5000 + 301; -pub const SYS_sendmmsg: c_long = 5000 + 302; -pub const SYS_setns: c_long = 5000 + 303; -pub const SYS_process_vm_readv: c_long = 5000 + 304; -pub const SYS_process_vm_writev: c_long = 5000 + 305; -pub const SYS_kcmp: c_long = 5000 + 306; -pub const SYS_finit_module: c_long = 5000 + 307; -pub const SYS_getdents64: c_long = 5000 + 308; -pub const SYS_sched_setattr: c_long = 5000 + 309; -pub const SYS_sched_getattr: c_long = 5000 + 310; -pub const SYS_renameat2: c_long = 5000 + 311; -pub const SYS_seccomp: c_long = 5000 + 312; -pub const SYS_getrandom: c_long = 5000 + 313; -pub const SYS_memfd_create: c_long = 5000 + 314; -pub const SYS_bpf: c_long = 5000 + 315; -pub const SYS_execveat: c_long = 5000 + 316; -pub const SYS_userfaultfd: c_long = 5000 + 317; -pub const SYS_membarrier: c_long = 5000 + 318; -pub const SYS_mlock2: c_long = 5000 + 319; -pub const SYS_copy_file_range: c_long = 5000 + 320; -pub const SYS_preadv2: c_long = 5000 + 321; -pub const SYS_pwritev2: c_long = 5000 + 322; -pub const SYS_pkey_mprotect: c_long = 5000 + 323; -pub const SYS_pkey_alloc: c_long = 5000 + 324; -pub const SYS_pkey_free: c_long = 5000 + 325; -pub const SYS_statx: c_long = 5000 + 326; -pub const SYS_rseq: c_long = 5000 + 327; -pub const SYS_pidfd_send_signal: c_long = 5000 + 424; -pub const SYS_io_uring_setup: c_long = 5000 + 425; -pub const SYS_io_uring_enter: c_long = 5000 + 426; -pub const SYS_io_uring_register: c_long = 5000 + 427; -pub const SYS_open_tree: c_long = 5000 + 428; -pub const SYS_move_mount: c_long = 5000 + 429; -pub const SYS_fsopen: c_long = 5000 + 430; -pub const SYS_fsconfig: c_long = 5000 + 431; -pub const SYS_fsmount: c_long = 5000 + 432; -pub const SYS_fspick: c_long = 5000 + 433; -pub const SYS_pidfd_open: c_long = 5000 + 434; -pub const SYS_clone3: c_long = 5000 + 435; -pub const SYS_close_range: c_long = 5000 + 436; -pub const SYS_openat2: c_long = 5000 + 437; -pub const SYS_pidfd_getfd: c_long = 5000 + 438; -pub const SYS_faccessat2: c_long = 5000 + 439; -pub const SYS_process_madvise: c_long = 5000 + 440; -pub const SYS_epoll_pwait2: c_long = 5000 + 441; -pub const SYS_mount_setattr: c_long = 5000 + 442; -pub const SYS_quotactl_fd: c_long = 5000 + 443; -pub const SYS_landlock_create_ruleset: c_long = 5000 + 444; -pub const SYS_landlock_add_rule: c_long = 5000 + 445; -pub const SYS_landlock_restrict_self: c_long = 5000 + 446; -pub const SYS_memfd_secret: c_long = 5000 + 447; -pub const SYS_process_mrelease: c_long = 5000 + 448; -pub const SYS_futex_waitv: c_long = 5000 + 449; -pub const SYS_set_mempolicy_home_node: c_long = 5000 + 450; - -pub const SFD_CLOEXEC: c_int = 0x080000; - -pub const NCCS: usize = 32; - -pub const O_TRUNC: c_int = 512; - -pub const O_NOATIME: c_int = 0o1000000; -pub const O_CLOEXEC: c_int = 0x80000; -pub const O_PATH: c_int = 0o10000000; -pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; - -pub const EBFONT: c_int = 59; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENONET: c_int = 64; -pub const ENOPKG: c_int = 65; -pub const EREMOTE: c_int = 66; -pub const ENOLINK: c_int = 67; -pub const EADV: c_int = 68; -pub const ESRMNT: c_int = 69; -pub const ECOMM: c_int = 70; -pub const EPROTO: c_int = 71; -pub const EDOTDOT: c_int = 73; - -pub const SA_NODEFER: c_int = 0x40000000; -pub const SA_RESETHAND: c_int = 0x80000000; -pub const SA_RESTART: c_int = 0x10000000; -pub const SA_NOCLDSTOP: c_int = 0x00000001; - -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; - -pub const EPOLL_CLOEXEC: c_int = 0x80000; - -pub const EFD_CLOEXEC: c_int = 0x80000; - -pub const O_DIRECT: c_int = 0x8000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_NOFOLLOW: c_int = 0x20000; - -pub const O_APPEND: c_int = 8; -pub const O_CREAT: c_int = 256; -pub const O_EXCL: c_int = 1024; -pub const O_NOCTTY: c_int = 2048; -pub const O_NONBLOCK: c_int = 128; -pub const O_SYNC: c_int = 0x4010; -pub const O_RSYNC: c_int = 0x4010; -pub const O_DSYNC: c_int = 0x10; -pub const O_FSYNC: c_int = 0x4010; -pub const O_ASYNC: c_int = 0x1000; -pub const O_NDELAY: c_int = 0x80; - -pub const EDEADLK: c_int = 45; -pub const ENAMETOOLONG: c_int = 78; -pub const ENOLCK: c_int = 46; -pub const ENOSYS: c_int = 89; -pub const ENOTEMPTY: c_int = 93; -pub const ELOOP: c_int = 90; -pub const ENOMSG: c_int = 35; -pub const EIDRM: c_int = 36; -pub const ECHRNG: c_int = 37; -pub const EL2NSYNC: c_int = 38; -pub const EL3HLT: c_int = 39; -pub const EL3RST: c_int = 40; -pub const ELNRNG: c_int = 41; -pub const EUNATCH: c_int = 42; -pub const ENOCSI: c_int = 43; -pub const EL2HLT: c_int = 44; -pub const EBADE: c_int = 50; -pub const EBADR: c_int = 51; -pub const EXFULL: c_int = 52; -pub const ENOANO: c_int = 53; -pub const EBADRQC: c_int = 54; -pub const EBADSLT: c_int = 55; -pub const EDEADLOCK: c_int = 56; -pub const EMULTIHOP: c_int = 74; -pub const EOVERFLOW: c_int = 79; -pub const ENOTUNIQ: c_int = 80; -pub const EBADFD: c_int = 81; -pub const EBADMSG: c_int = 77; -pub const EREMCHG: c_int = 82; -pub const ELIBACC: c_int = 83; -pub const ELIBBAD: c_int = 84; -pub const ELIBSCN: c_int = 85; -pub const ELIBMAX: c_int = 86; -pub const ELIBEXEC: c_int = 87; -pub const EILSEQ: c_int = 88; -pub const ERESTART: c_int = 91; -pub const ESTRPIPE: c_int = 92; -pub const EUSERS: c_int = 94; -pub const ENOTSOCK: c_int = 95; -pub const EDESTADDRREQ: c_int = 96; -pub const EMSGSIZE: c_int = 97; -pub const EPROTOTYPE: c_int = 98; -pub const ENOPROTOOPT: c_int = 99; -pub const EPROTONOSUPPORT: c_int = 120; -pub const ESOCKTNOSUPPORT: c_int = 121; -pub const EOPNOTSUPP: c_int = 122; -pub const EPFNOSUPPORT: c_int = 123; -pub const EAFNOSUPPORT: c_int = 124; -pub const EADDRINUSE: c_int = 125; -pub const EADDRNOTAVAIL: c_int = 126; -pub const ENETDOWN: c_int = 127; -pub const ENETUNREACH: c_int = 128; -pub const ENETRESET: c_int = 129; -pub const ECONNABORTED: c_int = 130; -pub const ECONNRESET: c_int = 131; -pub const ENOBUFS: c_int = 132; -pub const EISCONN: c_int = 133; -pub const ENOTCONN: c_int = 134; -pub const ESHUTDOWN: c_int = 143; -pub const ETOOMANYREFS: c_int = 144; -pub const ETIMEDOUT: c_int = 145; -pub const ECONNREFUSED: c_int = 146; -pub const EHOSTDOWN: c_int = 147; -pub const EHOSTUNREACH: c_int = 148; -pub const EALREADY: c_int = 149; -pub const EINPROGRESS: c_int = 150; -pub const ESTALE: c_int = 151; -pub const EUCLEAN: c_int = 135; -pub const ENOTNAM: c_int = 137; -pub const ENAVAIL: c_int = 138; -pub const EISNAM: c_int = 139; -pub const EREMOTEIO: c_int = 140; -pub const EDQUOT: c_int = 1133; -pub const ENOMEDIUM: c_int = 159; -pub const EMEDIUMTYPE: c_int = 160; -pub const ECANCELED: c_int = 158; -pub const ENOKEY: c_int = 161; -pub const EKEYEXPIRED: c_int = 162; -pub const EKEYREVOKED: c_int = 163; -pub const EKEYREJECTED: c_int = 164; -pub const EOWNERDEAD: c_int = 165; -pub const ENOTRECOVERABLE: c_int = 166; -pub const ERFKILL: c_int = 167; - -pub const MAP_NORESERVE: c_int = 0x400; -pub const MAP_ANON: c_int = 0x800; -pub const MAP_ANONYMOUS: c_int = 0x800; -pub const MAP_GROWSDOWN: c_int = 0x1000; -pub const MAP_DENYWRITE: c_int = 0x2000; -pub const MAP_EXECUTABLE: c_int = 0x4000; -pub const MAP_LOCKED: c_int = 0x8000; -pub const MAP_POPULATE: c_int = 0x10000; -pub const MAP_NONBLOCK: c_int = 0x20000; -pub const MAP_STACK: c_int = 0x40000; -pub const MAP_HUGETLB: c_int = 0x080000; - -pub const SOCK_STREAM: c_int = 2; -pub const SOCK_DGRAM: c_int = 1; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000008; -pub const SA_NOCLDWAIT: c_int = 0x00010000; - -pub const SIGEMT: c_int = 7; -pub const SIGCHLD: c_int = 18; -pub const SIGBUS: c_int = 10; -pub const SIGTTIN: c_int = 26; -pub const SIGTTOU: c_int = 27; -pub const SIGXCPU: c_int = 30; -pub const SIGXFSZ: c_int = 31; -pub const SIGVTALRM: c_int = 28; -pub const SIGPROF: c_int = 29; -pub const SIGWINCH: c_int = 20; -pub const SIGUSR1: c_int = 16; -pub const SIGUSR2: c_int = 17; -pub const SIGCONT: c_int = 25; -pub const SIGSTOP: c_int = 23; -pub const SIGTSTP: c_int = 24; -pub const SIGURG: c_int = 21; -pub const SIGIO: c_int = 22; -pub const SIGSYS: c_int = 12; -pub const SIGPOLL: c_int = 22; -pub const SIGPWR: c_int = 19; -pub const SIG_SETMASK: c_int = 3; -pub const SIG_BLOCK: c_int = 0x1; -pub const SIG_UNBLOCK: c_int = 0x2; - -pub const POLLWRNORM: c_short = 0x004; -pub const POLLWRBAND: c_short = 0x100; - -pub const VEOF: usize = 16; -pub const VEOL: usize = 17; -pub const VEOL2: usize = 6; -pub const VMIN: usize = 4; -pub const IEXTEN: crate::tcflag_t = 0x00000100; -pub const TOSTOP: crate::tcflag_t = 0x00008000; -pub const FLUSHO: crate::tcflag_t = 0x00002000; -pub const EXTPROC: crate::tcflag_t = 0o200000; -pub const TCSANOW: c_int = 0x540e; -pub const TCSADRAIN: c_int = 0x540f; -pub const TCSAFLUSH: c_int = 0x5410; - -pub const PTRACE_GETFPREGS: c_uint = 14; -pub const PTRACE_SETFPREGS: c_uint = 15; -pub const PTRACE_DETACH: c_uint = 17; -pub const PTRACE_GETFPXREGS: c_uint = 18; -pub const PTRACE_SETFPXREGS: c_uint = 19; -pub const PTRACE_GETREGS: c_uint = 12; -pub const PTRACE_SETREGS: c_uint = 13; - -pub const EFD_NONBLOCK: c_int = 0x80; - -pub const F_RDLCK: c_int = 0; -pub const F_WRLCK: c_int = 1; -pub const F_UNLCK: c_int = 2; -pub const F_GETLK: c_int = 14; -pub const F_GETOWN: c_int = 23; -pub const F_SETOWN: c_int = 24; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_OFD_GETLK: c_int = 36; -pub const F_OFD_SETLK: c_int = 37; -pub const F_OFD_SETLKW: c_int = 38; - -pub const SFD_NONBLOCK: c_int = 0x80; - -pub const RTLD_DEEPBIND: c_int = 0x10; -pub const RTLD_GLOBAL: c_int = 0x4; -pub const RTLD_NOLOAD: c_int = 0x8; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; - -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: crate::tcflag_t = 0x00000800; -pub const TAB2: crate::tcflag_t = 0x00001000; -pub const TAB3: crate::tcflag_t = 0x00001800; -pub const CR1: crate::tcflag_t = 0x00000200; -pub const CR2: crate::tcflag_t = 0x00000400; -pub const CR3: crate::tcflag_t = 0x00000600; -pub const FF1: crate::tcflag_t = 0x00008000; -pub const BS1: crate::tcflag_t = 0x00002000; -pub const VT1: crate::tcflag_t = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const EHWPOISON: c_int = 168; - -extern "C" { - pub fn sysctl( - name: *mut c_int, - namelen: c_int, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *mut c_void, - newlen: size_t, - ) -> c_int; -} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/mod.rs deleted file mode 100644 index ba5678b4597952..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b64/mod.rs +++ /dev/null @@ -1,213 +0,0 @@ -//! 64-bit specific definitions for linux-like values - -use crate::prelude::*; - -pub type ino_t = u64; -pub type off_t = i64; -pub type blkcnt_t = i64; -pub type shmatt_t = u64; -pub type msgqnum_t = u64; -pub type msglen_t = u64; -pub type fsblkcnt_t = u64; -pub type fsfilcnt_t = u64; -pub type rlim_t = u64; -#[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] -pub type __syscall_ulong_t = c_ulonglong; -#[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] -pub type __syscall_ulong_t = c_ulong; - -cfg_if! { - if #[cfg(all(target_arch = "aarch64", target_pointer_width = "32"))] { - pub type clock_t = i32; - pub type time_t = i32; - pub type __fsword_t = i32; - } else { - pub type __fsword_t = i64; - pub type clock_t = i64; - pub type time_t = i64; - } -} - -s! { - pub struct sigset_t { - #[cfg(target_pointer_width = "32")] - __val: [u32; 32], - #[cfg(target_pointer_width = "64")] - __val: [u64; 16], - } - - pub struct sysinfo { - pub uptime: i64, - pub loads: [u64; 3], - pub totalram: u64, - pub freeram: u64, - pub sharedram: u64, - pub bufferram: u64, - pub totalswap: u64, - pub freeswap: u64, - pub procs: c_ushort, - pub pad: c_ushort, - pub totalhigh: u64, - pub freehigh: u64, - pub mem_unit: c_uint, - pub _f: [c_char; 0], - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - pub msg_rtime: crate::time_t, - pub msg_ctime: crate::time_t, - pub __msg_cbytes: u64, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __glibc_reserved4: u64, - __glibc_reserved5: u64, - } - - pub struct semid_ds { - pub sem_perm: ipc_perm, - pub sem_otime: crate::time_t, - #[cfg(not(any( - target_arch = "aarch64", - target_arch = "loongarch64", - target_arch = "mips64", - target_arch = "mips64r6", - target_arch = "powerpc64", - target_arch = "riscv64", - target_arch = "sparc64", - target_arch = "s390x", - )))] - __reserved: crate::__syscall_ulong_t, - pub sem_ctime: crate::time_t, - #[cfg(not(any( - target_arch = "aarch64", - target_arch = "loongarch64", - target_arch = "mips64", - target_arch = "mips64r6", - target_arch = "powerpc64", - target_arch = "riscv64", - target_arch = "sparc64", - target_arch = "s390x", - )))] - __reserved2: crate::__syscall_ulong_t, - pub sem_nsems: crate::__syscall_ulong_t, - __glibc_reserved3: crate::__syscall_ulong_t, - __glibc_reserved4: crate::__syscall_ulong_t, - } - - pub struct timex { - pub modes: c_uint, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub offset: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub offset: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub freq: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub freq: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub maxerror: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub maxerror: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub esterror: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub esterror: c_long, - pub status: c_int, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub constant: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub constant: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub precision: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub precision: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub tolerance: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub tolerance: c_long, - pub time: crate::timeval, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub tick: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub tick: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub ppsfreq: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub ppsfreq: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub jitter: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub jitter: c_long, - pub shift: c_int, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub stabil: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub stabil: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub jitcnt: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub jitcnt: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub calcnt: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub calcnt: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub errcnt: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub errcnt: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub stbcnt: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub stbcnt: c_long, - pub tai: c_int, - pub __unused1: i32, - pub __unused2: i32, - pub __unused3: i32, - pub __unused4: i32, - pub __unused5: i32, - pub __unused6: i32, - pub __unused7: i32, - pub __unused8: i32, - pub __unused9: i32, - pub __unused10: i32, - pub __unused11: i32, - } -} - -pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; - -pub const O_LARGEFILE: c_int = 0; - -cfg_if! { - if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else if #[cfg(any(target_arch = "powerpc64"))] { - mod powerpc64; - pub use self::powerpc64::*; - } else if #[cfg(any(target_arch = "sparc64"))] { - mod sparc64; - pub use self::sparc64::*; - } else if #[cfg(any(target_arch = "mips64", target_arch = "mips64r6"))] { - mod mips64; - pub use self::mips64::*; - } else if #[cfg(any(target_arch = "s390x"))] { - mod s390x; - pub use self::s390x::*; - } else if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } else if #[cfg(any(target_arch = "riscv64"))] { - mod riscv64; - pub use self::riscv64::*; - } else if #[cfg(any(target_arch = "loongarch64"))] { - mod loongarch64; - pub use self::loongarch64::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/powerpc64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/powerpc64/mod.rs deleted file mode 100644 index 047efe55b1a388..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b64/powerpc64/mod.rs +++ /dev/null @@ -1,974 +0,0 @@ -//! PowerPC64-specific definitions for 64-bit linux-like values - -use crate::prelude::*; -use crate::{off64_t, off_t, pthread_mutex_t}; - -pub type wchar_t = i32; -pub type nlink_t = u64; -pub type blksize_t = i64; -pub type suseconds_t = i64; -pub type __u64 = c_ulong; -pub type __s64 = c_long; - -s! { - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - #[cfg(target_arch = "sparc64")] - __reserved0: c_int, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct statfs { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - f_spare: [crate::__fsword_t; 5], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - __pad0: c_int, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_long; 3], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - __pad0: c_int, - pub st_rdev: crate::dev_t, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __reserved: [c_long; 3], - } - - pub struct statfs64 { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::fsid_t, - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - pub f_spare: [crate::__fsword_t; 4], - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct pthread_attr_t { - __size: [u64; 7], - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - pub __seq: u32, - __pad1: u32, - __unused1: u64, - __unused2: c_ulong, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_segsz: size_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused4: c_ulong, - __unused5: c_ulong, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - #[doc(hidden)] - #[deprecated( - since = "0.2.54", - note = "Please leave a comment on \ - https://github.com/rust-lang/libc/pull/1316 if you're using \ - this field" - )] - pub _pad: [c_int; 29], - _align: [usize; 0], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } -} - -s_no_extra_traits! { - #[repr(align(16))] - pub struct max_align_t { - priv_: [i64; 4], - } -} - -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; - -pub const RTLD_DEEPBIND: c_int = 0x8; -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; -pub const VEOF: usize = 4; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; - -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_FSYNC: c_int = 0x101000; -pub const O_NOATIME: c_int = 0o1000000; -pub const O_PATH: c_int = 0o10000000; -pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_ANONYMOUS: c_int = 0x0020; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; - -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const EHWPOISON: c_int = 133; -pub const ERFKILL: c_int = 132; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] -pub const SIGUNUSED: c_int = 31; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const O_ASYNC: c_int = 0x2000; -pub const O_NDELAY: c_int = 0x800; - -pub const PTRACE_DETACH: c_uint = 17; - -pub const EFD_NONBLOCK: c_int = 0x800; - -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETOWN: c_int = 8; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_OFD_GETLK: c_int = 36; -pub const F_OFD_SETLK: c_int = 37; -pub const F_OFD_SETLKW: c_int = 38; - -pub const F_RDLCK: c_int = 0; -pub const F_WRLCK: c_int = 1; -pub const F_UNLCK: c_int = 2; - -pub const SFD_NONBLOCK: c_int = 0x0800; - -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -pub const SFD_CLOEXEC: c_int = 0x080000; - -pub const NCCS: usize = 32; - -pub const O_TRUNC: c_int = 512; - -pub const O_CLOEXEC: c_int = 0x80000; - -pub const EBFONT: c_int = 59; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENONET: c_int = 64; -pub const ENOPKG: c_int = 65; -pub const EREMOTE: c_int = 66; -pub const ENOLINK: c_int = 67; -pub const EADV: c_int = 68; -pub const ESRMNT: c_int = 69; -pub const ECOMM: c_int = 70; -pub const EPROTO: c_int = 71; -pub const EDOTDOT: c_int = 73; - -pub const SA_NODEFER: c_int = 0x40000000; -pub const SA_RESETHAND: c_int = 0x80000000; -pub const SA_RESTART: c_int = 0x10000000; -pub const SA_NOCLDSTOP: c_int = 0x00000001; - -pub const EPOLL_CLOEXEC: c_int = 0x80000; - -pub const EFD_CLOEXEC: c_int = 0x80000; - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; - -#[cfg(target_endian = "little")] -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "little")] -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "little")] -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; - -pub const O_DIRECTORY: c_int = 0x4000; -pub const O_NOFOLLOW: c_int = 0x8000; -pub const O_DIRECT: c_int = 0x20000; - -pub const MAP_LOCKED: c_int = 0x00080; -pub const MAP_NORESERVE: c_int = 0x00040; -pub const MAP_SYNC: c_int = 0x080000; - -pub const EDEADLOCK: c_int = 58; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; - -pub const MCL_CURRENT: c_int = 0x2000; -pub const MCL_FUTURE: c_int = 0x4000; -pub const MCL_ONFAULT: c_int = 0x8000; - -pub const SIGSTKSZ: size_t = 0x4000; -pub const MINSIGSTKSZ: size_t = 4096; -pub const CBAUD: crate::tcflag_t = 0xff; -pub const TAB1: crate::tcflag_t = 0x400; -pub const TAB2: crate::tcflag_t = 0x800; -pub const TAB3: crate::tcflag_t = 0xc00; -pub const CR1: crate::tcflag_t = 0x1000; -pub const CR2: crate::tcflag_t = 0x2000; -pub const CR3: crate::tcflag_t = 0x3000; -pub const FF1: crate::tcflag_t = 0x4000; -pub const BS1: crate::tcflag_t = 0x8000; -pub const VT1: crate::tcflag_t = 0x10000; -pub const VWERASE: usize = 0xa; -pub const VREPRINT: usize = 0xb; -pub const VSUSP: usize = 0xc; -pub const VSTART: usize = 0xd; -pub const VSTOP: usize = 0xe; -pub const VDISCARD: usize = 0x10; -pub const VTIME: usize = 0x7; -pub const IXON: crate::tcflag_t = 0x200; -pub const IXOFF: crate::tcflag_t = 0x400; -pub const ONLCR: crate::tcflag_t = 0x2; -pub const CSIZE: crate::tcflag_t = 0x300; -pub const CS6: crate::tcflag_t = 0x100; -pub const CS7: crate::tcflag_t = 0x200; -pub const CS8: crate::tcflag_t = 0x300; -pub const CSTOPB: crate::tcflag_t = 0x400; -pub const CREAD: crate::tcflag_t = 0x800; -pub const PARENB: crate::tcflag_t = 0x1000; -pub const PARODD: crate::tcflag_t = 0x2000; -pub const HUPCL: crate::tcflag_t = 0x4000; -pub const CLOCAL: crate::tcflag_t = 0x8000; -pub const ECHOKE: crate::tcflag_t = 0x1; -pub const ECHOE: crate::tcflag_t = 0x2; -pub const ECHOK: crate::tcflag_t = 0x4; -pub const ECHONL: crate::tcflag_t = 0x10; -pub const ECHOPRT: crate::tcflag_t = 0x20; -pub const ECHOCTL: crate::tcflag_t = 0x40; -pub const ISIG: crate::tcflag_t = 0x80; -pub const ICANON: crate::tcflag_t = 0x100; -pub const PENDIN: crate::tcflag_t = 0x20000000; -pub const NOFLSH: crate::tcflag_t = 0x80000000; -pub const VSWTC: usize = 9; -pub const OLCUC: crate::tcflag_t = 0o000004; -pub const NLDLY: crate::tcflag_t = 0o001400; -pub const CRDLY: crate::tcflag_t = 0o030000; -pub const TABDLY: crate::tcflag_t = 0o006000; -pub const BSDLY: crate::tcflag_t = 0o100000; -pub const FFDLY: crate::tcflag_t = 0o040000; -pub const VTDLY: crate::tcflag_t = 0o200000; -pub const XTABS: crate::tcflag_t = 0o006000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const CBAUDEX: crate::speed_t = 0o000020; -pub const B57600: crate::speed_t = 0o0020; -pub const B115200: crate::speed_t = 0o0021; -pub const B230400: crate::speed_t = 0o0022; -pub const B460800: crate::speed_t = 0o0023; -pub const B500000: crate::speed_t = 0o0024; -pub const B576000: crate::speed_t = 0o0025; -pub const B921600: crate::speed_t = 0o0026; -pub const B1000000: crate::speed_t = 0o0027; -pub const B1152000: crate::speed_t = 0o0030; -pub const B1500000: crate::speed_t = 0o0031; -pub const B2000000: crate::speed_t = 0o0032; -pub const B2500000: crate::speed_t = 0o0033; -pub const B3000000: crate::speed_t = 0o0034; -pub const B3500000: crate::speed_t = 0o0035; -pub const B4000000: crate::speed_t = 0o0036; - -pub const VEOL: usize = 6; -pub const VEOL2: usize = 8; -pub const VMIN: usize = 5; -pub const IEXTEN: crate::tcflag_t = 0x400; -pub const TOSTOP: crate::tcflag_t = 0x400000; -pub const FLUSHO: crate::tcflag_t = 0x800000; -pub const EXTPROC: crate::tcflag_t = 0x10000000; - -// Syscall table -pub const SYS_restart_syscall: c_long = 0; -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_waitpid: c_long = 7; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execve: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_time: c_long = 13; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_lchown: c_long = 16; -pub const SYS_break: c_long = 17; -pub const SYS_oldstat: c_long = 18; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_mount: c_long = 21; -pub const SYS_umount: c_long = 22; -pub const SYS_setuid: c_long = 23; -pub const SYS_getuid: c_long = 24; -pub const SYS_stime: c_long = 25; -pub const SYS_ptrace: c_long = 26; -pub const SYS_alarm: c_long = 27; -pub const SYS_oldfstat: c_long = 28; -pub const SYS_pause: c_long = 29; -pub const SYS_utime: c_long = 30; -pub const SYS_stty: c_long = 31; -pub const SYS_gtty: c_long = 32; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_ftime: c_long = 35; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_rename: c_long = 38; -pub const SYS_mkdir: c_long = 39; -pub const SYS_rmdir: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_prof: c_long = 44; -pub const SYS_brk: c_long = 45; -pub const SYS_setgid: c_long = 46; -pub const SYS_getgid: c_long = 47; -pub const SYS_signal: c_long = 48; -pub const SYS_geteuid: c_long = 49; -pub const SYS_getegid: c_long = 50; -pub const SYS_acct: c_long = 51; -pub const SYS_umount2: c_long = 52; -pub const SYS_lock: c_long = 53; -pub const SYS_ioctl: c_long = 54; -pub const SYS_fcntl: c_long = 55; -pub const SYS_mpx: c_long = 56; -pub const SYS_setpgid: c_long = 57; -pub const SYS_ulimit: c_long = 58; -pub const SYS_oldolduname: c_long = 59; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_ustat: c_long = 62; -pub const SYS_dup2: c_long = 63; -pub const SYS_getppid: c_long = 64; -pub const SYS_getpgrp: c_long = 65; -pub const SYS_setsid: c_long = 66; -pub const SYS_sigaction: c_long = 67; -pub const SYS_sgetmask: c_long = 68; -pub const SYS_ssetmask: c_long = 69; -pub const SYS_setreuid: c_long = 70; -pub const SYS_setregid: c_long = 71; -pub const SYS_sigsuspend: c_long = 72; -pub const SYS_sigpending: c_long = 73; -pub const SYS_sethostname: c_long = 74; -pub const SYS_setrlimit: c_long = 75; -pub const SYS_getrlimit: c_long = 76; -pub const SYS_getrusage: c_long = 77; -pub const SYS_gettimeofday: c_long = 78; -pub const SYS_settimeofday: c_long = 79; -pub const SYS_getgroups: c_long = 80; -pub const SYS_setgroups: c_long = 81; -pub const SYS_select: c_long = 82; -pub const SYS_symlink: c_long = 83; -pub const SYS_oldlstat: c_long = 84; -pub const SYS_readlink: c_long = 85; -pub const SYS_uselib: c_long = 86; -pub const SYS_swapon: c_long = 87; -pub const SYS_reboot: c_long = 88; -pub const SYS_readdir: c_long = 89; -pub const SYS_mmap: c_long = 90; -pub const SYS_munmap: c_long = 91; -pub const SYS_truncate: c_long = 92; -pub const SYS_ftruncate: c_long = 93; -pub const SYS_fchmod: c_long = 94; -pub const SYS_fchown: c_long = 95; -pub const SYS_getpriority: c_long = 96; -pub const SYS_setpriority: c_long = 97; -pub const SYS_profil: c_long = 98; -pub const SYS_statfs: c_long = 99; -pub const SYS_fstatfs: c_long = 100; -pub const SYS_ioperm: c_long = 101; -pub const SYS_socketcall: c_long = 102; -pub const SYS_syslog: c_long = 103; -pub const SYS_setitimer: c_long = 104; -pub const SYS_getitimer: c_long = 105; -pub const SYS_stat: c_long = 106; -pub const SYS_lstat: c_long = 107; -pub const SYS_fstat: c_long = 108; -pub const SYS_olduname: c_long = 109; -pub const SYS_iopl: c_long = 110; -pub const SYS_vhangup: c_long = 111; -pub const SYS_idle: c_long = 112; -pub const SYS_vm86: c_long = 113; -pub const SYS_wait4: c_long = 114; -pub const SYS_swapoff: c_long = 115; -pub const SYS_sysinfo: c_long = 116; -pub const SYS_ipc: c_long = 117; -pub const SYS_fsync: c_long = 118; -pub const SYS_sigreturn: c_long = 119; -pub const SYS_clone: c_long = 120; -pub const SYS_setdomainname: c_long = 121; -pub const SYS_uname: c_long = 122; -pub const SYS_modify_ldt: c_long = 123; -pub const SYS_adjtimex: c_long = 124; -pub const SYS_mprotect: c_long = 125; -pub const SYS_sigprocmask: c_long = 126; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 127; -pub const SYS_init_module: c_long = 128; -pub const SYS_delete_module: c_long = 129; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 130; -pub const SYS_quotactl: c_long = 131; -pub const SYS_getpgid: c_long = 132; -pub const SYS_fchdir: c_long = 133; -pub const SYS_bdflush: c_long = 134; -pub const SYS_sysfs: c_long = 135; -pub const SYS_personality: c_long = 136; -pub const SYS_afs_syscall: c_long = 137; /* Syscall for Andrew File System */ -pub const SYS_setfsuid: c_long = 138; -pub const SYS_setfsgid: c_long = 139; -pub const SYS__llseek: c_long = 140; -pub const SYS_getdents: c_long = 141; -pub const SYS__newselect: c_long = 142; -pub const SYS_flock: c_long = 143; -pub const SYS_msync: c_long = 144; -pub const SYS_readv: c_long = 145; -pub const SYS_writev: c_long = 146; -pub const SYS_getsid: c_long = 147; -pub const SYS_fdatasync: c_long = 148; -pub const SYS__sysctl: c_long = 149; -pub const SYS_mlock: c_long = 150; -pub const SYS_munlock: c_long = 151; -pub const SYS_mlockall: c_long = 152; -pub const SYS_munlockall: c_long = 153; -pub const SYS_sched_setparam: c_long = 154; -pub const SYS_sched_getparam: c_long = 155; -pub const SYS_sched_setscheduler: c_long = 156; -pub const SYS_sched_getscheduler: c_long = 157; -pub const SYS_sched_yield: c_long = 158; -pub const SYS_sched_get_priority_max: c_long = 159; -pub const SYS_sched_get_priority_min: c_long = 160; -pub const SYS_sched_rr_get_interval: c_long = 161; -pub const SYS_nanosleep: c_long = 162; -pub const SYS_mremap: c_long = 163; -pub const SYS_setresuid: c_long = 164; -pub const SYS_getresuid: c_long = 165; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 166; -pub const SYS_poll: c_long = 167; -pub const SYS_nfsservctl: c_long = 168; -pub const SYS_setresgid: c_long = 169; -pub const SYS_getresgid: c_long = 170; -pub const SYS_prctl: c_long = 171; -pub const SYS_rt_sigreturn: c_long = 172; -pub const SYS_rt_sigaction: c_long = 173; -pub const SYS_rt_sigprocmask: c_long = 174; -pub const SYS_rt_sigpending: c_long = 175; -pub const SYS_rt_sigtimedwait: c_long = 176; -pub const SYS_rt_sigqueueinfo: c_long = 177; -pub const SYS_rt_sigsuspend: c_long = 178; -pub const SYS_pread64: c_long = 179; -pub const SYS_pwrite64: c_long = 180; -pub const SYS_chown: c_long = 181; -pub const SYS_getcwd: c_long = 182; -pub const SYS_capget: c_long = 183; -pub const SYS_capset: c_long = 184; -pub const SYS_sigaltstack: c_long = 185; -pub const SYS_sendfile: c_long = 186; -pub const SYS_getpmsg: c_long = 187; /* some people actually want streams */ -pub const SYS_putpmsg: c_long = 188; /* some people actually want streams */ -pub const SYS_vfork: c_long = 189; -pub const SYS_ugetrlimit: c_long = 190; /* SuS compliant getrlimit */ -pub const SYS_readahead: c_long = 191; -pub const SYS_pciconfig_read: c_long = 198; -pub const SYS_pciconfig_write: c_long = 199; -pub const SYS_pciconfig_iobase: c_long = 200; -pub const SYS_multiplexer: c_long = 201; -pub const SYS_getdents64: c_long = 202; -pub const SYS_pivot_root: c_long = 203; -pub const SYS_madvise: c_long = 205; -pub const SYS_mincore: c_long = 206; -pub const SYS_gettid: c_long = 207; -pub const SYS_tkill: c_long = 208; -pub const SYS_setxattr: c_long = 209; -pub const SYS_lsetxattr: c_long = 210; -pub const SYS_fsetxattr: c_long = 211; -pub const SYS_getxattr: c_long = 212; -pub const SYS_lgetxattr: c_long = 213; -pub const SYS_fgetxattr: c_long = 214; -pub const SYS_listxattr: c_long = 215; -pub const SYS_llistxattr: c_long = 216; -pub const SYS_flistxattr: c_long = 217; -pub const SYS_removexattr: c_long = 218; -pub const SYS_lremovexattr: c_long = 219; -pub const SYS_fremovexattr: c_long = 220; -pub const SYS_futex: c_long = 221; -pub const SYS_sched_setaffinity: c_long = 222; -pub const SYS_sched_getaffinity: c_long = 223; -pub const SYS_tuxcall: c_long = 225; -pub const SYS_io_setup: c_long = 227; -pub const SYS_io_destroy: c_long = 228; -pub const SYS_io_getevents: c_long = 229; -pub const SYS_io_submit: c_long = 230; -pub const SYS_io_cancel: c_long = 231; -pub const SYS_set_tid_address: c_long = 232; -pub const SYS_exit_group: c_long = 234; -pub const SYS_lookup_dcookie: c_long = 235; -pub const SYS_epoll_create: c_long = 236; -pub const SYS_epoll_ctl: c_long = 237; -pub const SYS_epoll_wait: c_long = 238; -pub const SYS_remap_file_pages: c_long = 239; -pub const SYS_timer_create: c_long = 240; -pub const SYS_timer_settime: c_long = 241; -pub const SYS_timer_gettime: c_long = 242; -pub const SYS_timer_getoverrun: c_long = 243; -pub const SYS_timer_delete: c_long = 244; -pub const SYS_clock_settime: c_long = 245; -pub const SYS_clock_gettime: c_long = 246; -pub const SYS_clock_getres: c_long = 247; -pub const SYS_clock_nanosleep: c_long = 248; -pub const SYS_swapcontext: c_long = 249; -pub const SYS_tgkill: c_long = 250; -pub const SYS_utimes: c_long = 251; -pub const SYS_statfs64: c_long = 252; -pub const SYS_fstatfs64: c_long = 253; -pub const SYS_rtas: c_long = 255; -pub const SYS_sys_debug_setcontext: c_long = 256; -pub const SYS_migrate_pages: c_long = 258; -pub const SYS_mbind: c_long = 259; -pub const SYS_get_mempolicy: c_long = 260; -pub const SYS_set_mempolicy: c_long = 261; -pub const SYS_mq_open: c_long = 262; -pub const SYS_mq_unlink: c_long = 263; -pub const SYS_mq_timedsend: c_long = 264; -pub const SYS_mq_timedreceive: c_long = 265; -pub const SYS_mq_notify: c_long = 266; -pub const SYS_mq_getsetattr: c_long = 267; -pub const SYS_kexec_load: c_long = 268; -pub const SYS_add_key: c_long = 269; -pub const SYS_request_key: c_long = 270; -pub const SYS_keyctl: c_long = 271; -pub const SYS_waitid: c_long = 272; -pub const SYS_ioprio_set: c_long = 273; -pub const SYS_ioprio_get: c_long = 274; -pub const SYS_inotify_init: c_long = 275; -pub const SYS_inotify_add_watch: c_long = 276; -pub const SYS_inotify_rm_watch: c_long = 277; -pub const SYS_spu_run: c_long = 278; -pub const SYS_spu_create: c_long = 279; -pub const SYS_pselect6: c_long = 280; -pub const SYS_ppoll: c_long = 281; -pub const SYS_unshare: c_long = 282; -pub const SYS_splice: c_long = 283; -pub const SYS_tee: c_long = 284; -pub const SYS_vmsplice: c_long = 285; -pub const SYS_openat: c_long = 286; -pub const SYS_mkdirat: c_long = 287; -pub const SYS_mknodat: c_long = 288; -pub const SYS_fchownat: c_long = 289; -pub const SYS_futimesat: c_long = 290; -pub const SYS_newfstatat: c_long = 291; -pub const SYS_unlinkat: c_long = 292; -pub const SYS_renameat: c_long = 293; -pub const SYS_linkat: c_long = 294; -pub const SYS_symlinkat: c_long = 295; -pub const SYS_readlinkat: c_long = 296; -pub const SYS_fchmodat: c_long = 297; -pub const SYS_faccessat: c_long = 298; -pub const SYS_get_robust_list: c_long = 299; -pub const SYS_set_robust_list: c_long = 300; -pub const SYS_move_pages: c_long = 301; -pub const SYS_getcpu: c_long = 302; -pub const SYS_epoll_pwait: c_long = 303; -pub const SYS_utimensat: c_long = 304; -pub const SYS_signalfd: c_long = 305; -pub const SYS_timerfd_create: c_long = 306; -pub const SYS_eventfd: c_long = 307; -pub const SYS_sync_file_range2: c_long = 308; -pub const SYS_fallocate: c_long = 309; -pub const SYS_subpage_prot: c_long = 310; -pub const SYS_timerfd_settime: c_long = 311; -pub const SYS_timerfd_gettime: c_long = 312; -pub const SYS_signalfd4: c_long = 313; -pub const SYS_eventfd2: c_long = 314; -pub const SYS_epoll_create1: c_long = 315; -pub const SYS_dup3: c_long = 316; -pub const SYS_pipe2: c_long = 317; -pub const SYS_inotify_init1: c_long = 318; -pub const SYS_perf_event_open: c_long = 319; -pub const SYS_preadv: c_long = 320; -pub const SYS_pwritev: c_long = 321; -pub const SYS_rt_tgsigqueueinfo: c_long = 322; -pub const SYS_fanotify_init: c_long = 323; -pub const SYS_fanotify_mark: c_long = 324; -pub const SYS_prlimit64: c_long = 325; -pub const SYS_socket: c_long = 326; -pub const SYS_bind: c_long = 327; -pub const SYS_connect: c_long = 328; -pub const SYS_listen: c_long = 329; -pub const SYS_accept: c_long = 330; -pub const SYS_getsockname: c_long = 331; -pub const SYS_getpeername: c_long = 332; -pub const SYS_socketpair: c_long = 333; -pub const SYS_send: c_long = 334; -pub const SYS_sendto: c_long = 335; -pub const SYS_recv: c_long = 336; -pub const SYS_recvfrom: c_long = 337; -pub const SYS_shutdown: c_long = 338; -pub const SYS_setsockopt: c_long = 339; -pub const SYS_getsockopt: c_long = 340; -pub const SYS_sendmsg: c_long = 341; -pub const SYS_recvmsg: c_long = 342; -pub const SYS_recvmmsg: c_long = 343; -pub const SYS_accept4: c_long = 344; -pub const SYS_name_to_handle_at: c_long = 345; -pub const SYS_open_by_handle_at: c_long = 346; -pub const SYS_clock_adjtime: c_long = 347; -pub const SYS_syncfs: c_long = 348; -pub const SYS_sendmmsg: c_long = 349; -pub const SYS_setns: c_long = 350; -pub const SYS_process_vm_readv: c_long = 351; -pub const SYS_process_vm_writev: c_long = 352; -pub const SYS_finit_module: c_long = 353; -pub const SYS_kcmp: c_long = 354; -pub const SYS_sched_setattr: c_long = 355; -pub const SYS_sched_getattr: c_long = 356; -pub const SYS_renameat2: c_long = 357; -pub const SYS_seccomp: c_long = 358; -pub const SYS_getrandom: c_long = 359; -pub const SYS_memfd_create: c_long = 360; -pub const SYS_bpf: c_long = 361; -pub const SYS_execveat: c_long = 362; -pub const SYS_switch_endian: c_long = 363; -pub const SYS_userfaultfd: c_long = 364; -pub const SYS_membarrier: c_long = 365; -pub const SYS_mlock2: c_long = 378; -pub const SYS_copy_file_range: c_long = 379; -pub const SYS_preadv2: c_long = 380; -pub const SYS_pwritev2: c_long = 381; -pub const SYS_kexec_file_load: c_long = 382; -pub const SYS_statx: c_long = 383; -pub const SYS_rseq: c_long = 387; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; - -extern "C" { - pub fn sysctl( - name: *mut c_int, - namelen: c_int, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *mut c_void, - newlen: size_t, - ) -> c_int; -} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/riscv64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/riscv64/mod.rs deleted file mode 100644 index bfbc8ee5cf6833..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b64/riscv64/mod.rs +++ /dev/null @@ -1,910 +0,0 @@ -//! RISC-V-specific definitions for 64-bit linux-like values - -use crate::prelude::*; -use crate::{off64_t, off_t}; - -pub type wchar_t = c_int; - -pub type nlink_t = c_uint; -pub type blksize_t = c_int; -pub type fsblkcnt64_t = c_ulong; -pub type fsfilcnt64_t = c_ulong; -pub type suseconds_t = i64; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; - -s! { - pub struct pthread_attr_t { - __size: [c_ulong; 7], - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub __pad1: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub __pad2: c_int, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_int; 2usize], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub __pad1: crate::dev_t, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - pub __pad2: c_int, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_int; 2], - } - - pub struct statfs { - pub f_type: c_long, - pub f_bsize: c_long, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_long, - pub f_frsize: c_long, - pub f_flags: c_long, - pub f_spare: [c_long; 4], - } - - pub struct statfs64 { - pub f_type: c_long, - pub f_bsize: c_long, - pub f_blocks: crate::fsblkcnt64_t, - pub f_bfree: crate::fsblkcnt64_t, - pub f_bavail: crate::fsblkcnt64_t, - pub f_files: crate::fsfilcnt64_t, - pub f_ffree: crate::fsfilcnt64_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_long, - pub f_frsize: c_long, - pub f_flags: c_long, - pub f_spare: [c_long; 4], - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - pub __f_spare: [c_int; 6], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt64_t, - pub f_bfree: crate::fsblkcnt64_t, - pub f_bavail: crate::fsblkcnt64_t, - pub f_files: crate::fsfilcnt64_t, - pub f_ffree: crate::fsfilcnt64_t, - pub f_favail: crate::fsfilcnt64_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - pub __f_spare: [c_int; 6], - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - #[doc(hidden)] - #[deprecated( - since = "0.2.54", - note = "Please leave a comment on \ - https://github.com/rust-lang/libc/pull/1316 if you're using \ - this field" - )] - pub _pad: [c_int; 29], - _align: [u64; 0], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_ushort, - __pad1: c_ushort, - pub __seq: c_ushort, - __pad2: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused5: c_ulong, - __unused6: c_ulong, - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - } - - pub struct user_regs_struct { - pub pc: c_ulong, - pub ra: c_ulong, - pub sp: c_ulong, - pub gp: c_ulong, - pub tp: c_ulong, - pub t0: c_ulong, - pub t1: c_ulong, - pub t2: c_ulong, - pub s0: c_ulong, - pub s1: c_ulong, - pub a0: c_ulong, - pub a1: c_ulong, - pub a2: c_ulong, - pub a3: c_ulong, - pub a4: c_ulong, - pub a5: c_ulong, - pub a6: c_ulong, - pub a7: c_ulong, - pub s2: c_ulong, - pub s3: c_ulong, - pub s4: c_ulong, - pub s5: c_ulong, - pub s6: c_ulong, - pub s7: c_ulong, - pub s8: c_ulong, - pub s9: c_ulong, - pub s10: c_ulong, - pub s11: c_ulong, - pub t3: c_ulong, - pub t4: c_ulong, - pub t5: c_ulong, - pub t6: c_ulong, - } - - #[repr(align(8))] - pub struct clone_args { - pub flags: c_ulonglong, - pub pidfd: c_ulonglong, - pub child_tid: c_ulonglong, - pub parent_tid: c_ulonglong, - pub exit_signal: c_ulonglong, - pub stack: c_ulonglong, - pub stack_size: c_ulonglong, - pub tls: c_ulonglong, - pub set_tid: c_ulonglong, - pub set_tid_size: c_ulonglong, - pub cgroup: c_ulonglong, - } -} - -s_no_extra_traits! { - pub struct ucontext_t { - pub __uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_sigmask: crate::sigset_t, - pub uc_mcontext: mcontext_t, - } - - #[repr(align(16))] - pub struct mcontext_t { - pub __gregs: [c_ulong; 32], - pub __fpregs: __riscv_mc_fp_state, - } - - pub union __riscv_mc_fp_state { - pub __f: __riscv_mc_f_ext_state, - pub __d: __riscv_mc_d_ext_state, - pub __q: __riscv_mc_q_ext_state, - } - - pub struct __riscv_mc_f_ext_state { - pub __f: [c_uint; 32], - pub __fcsr: c_uint, - } - - pub struct __riscv_mc_d_ext_state { - pub __f: [c_ulonglong; 32], - pub __fcsr: c_uint, - } - - #[repr(align(16))] - pub struct __riscv_mc_q_ext_state { - pub __f: [c_ulonglong; 64], - pub __fcsr: c_uint, - pub __glibc_reserved: [c_uint; 3], - } -} - -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; -pub const VEOF: usize = 4; -pub const RTLD_DEEPBIND: c_int = 0x8; -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_FSYNC: c_int = 1052672; -pub const O_NOATIME: c_int = 262144; -pub const O_PATH: c_int = 2097152; -pub const O_TMPFILE: c_int = 4259840; -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_GROWSDOWN: c_int = 256; -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const EHWPOISON: c_int = 133; -pub const ERFKILL: c_int = 132; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; -pub const SA_ONSTACK: c_int = 134217728; -pub const SA_SIGINFO: c_int = 4; -pub const SA_NOCLDWAIT: c_int = 2; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0; -pub const SIG_UNBLOCK: c_int = 1; -pub const POLLWRNORM: c_short = 256; -pub const POLLWRBAND: c_short = 512; -pub const O_ASYNC: c_int = 8192; -pub const O_NDELAY: c_int = 2048; -pub const PTRACE_DETACH: c_uint = 17; -pub const EFD_NONBLOCK: c_int = 2048; -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETOWN: c_int = 8; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_RDLCK: c_int = 0; -pub const F_WRLCK: c_int = 1; -pub const F_UNLCK: c_int = 2; -pub const F_OFD_GETLK: c_int = 36; -pub const F_OFD_SETLK: c_int = 37; -pub const F_OFD_SETLKW: c_int = 38; -pub const SFD_NONBLOCK: c_int = 2048; -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; -pub const SFD_CLOEXEC: c_int = 524288; -pub const NCCS: usize = 32; -pub const O_TRUNC: c_int = 512; -pub const O_CLOEXEC: c_int = 524288; -pub const EBFONT: c_int = 59; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENONET: c_int = 64; -pub const ENOPKG: c_int = 65; -pub const EREMOTE: c_int = 66; -pub const ENOLINK: c_int = 67; -pub const EADV: c_int = 68; -pub const ESRMNT: c_int = 69; -pub const ECOMM: c_int = 70; -pub const EPROTO: c_int = 71; -pub const EDOTDOT: c_int = 73; -pub const SA_NODEFER: c_int = 1073741824; -pub const SA_RESETHAND: c_int = -2147483648; -pub const SA_RESTART: c_int = 268435456; -pub const SA_NOCLDSTOP: c_int = 1; -pub const EPOLL_CLOEXEC: c_int = 524288; -pub const EFD_CLOEXEC: c_int = 524288; -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; -pub const O_DIRECT: c_int = 16384; -pub const O_DIRECTORY: c_int = 65536; -pub const O_NOFOLLOW: c_int = 131072; -pub const MAP_HUGETLB: c_int = 262144; -pub const MAP_LOCKED: c_int = 8192; -pub const MAP_NORESERVE: c_int = 16384; -pub const MAP_ANON: c_int = 32; -pub const MAP_ANONYMOUS: c_int = 32; -pub const MAP_DENYWRITE: c_int = 2048; -pub const MAP_EXECUTABLE: c_int = 4096; -pub const MAP_POPULATE: c_int = 32768; -pub const MAP_NONBLOCK: c_int = 65536; -pub const MAP_STACK: c_int = 131072; -pub const MAP_SYNC: c_int = 0x080000; -pub const EDEADLOCK: c_int = 35; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const PTRACE_GETFPREGS: c_uint = 14; -pub const PTRACE_SETFPREGS: c_uint = 15; -pub const PTRACE_GETFPXREGS: c_uint = 18; -pub const PTRACE_SETFPXREGS: c_uint = 19; -pub const PTRACE_GETREGS: c_uint = 12; -pub const PTRACE_SETREGS: c_uint = 13; -pub const MCL_CURRENT: c_int = 1; -pub const MCL_FUTURE: c_int = 2; -pub const MCL_ONFAULT: c_int = 4; -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; -pub const CBAUD: crate::tcflag_t = 4111; -pub const TAB1: crate::tcflag_t = 2048; -pub const TAB2: crate::tcflag_t = 4096; -pub const TAB3: crate::tcflag_t = 6144; -pub const CR1: crate::tcflag_t = 512; -pub const CR2: crate::tcflag_t = 1024; -pub const CR3: crate::tcflag_t = 1536; -pub const FF1: crate::tcflag_t = 32768; -pub const BS1: crate::tcflag_t = 8192; -pub const VT1: crate::tcflag_t = 16384; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 1024; -pub const IXOFF: crate::tcflag_t = 4096; -pub const ONLCR: crate::tcflag_t = 4; -pub const CSIZE: crate::tcflag_t = 48; -pub const CS6: crate::tcflag_t = 16; -pub const CS7: crate::tcflag_t = 32; -pub const CS8: crate::tcflag_t = 48; -pub const CSTOPB: crate::tcflag_t = 64; -pub const CREAD: crate::tcflag_t = 128; -pub const PARENB: crate::tcflag_t = 256; -pub const PARODD: crate::tcflag_t = 512; -pub const HUPCL: crate::tcflag_t = 1024; -pub const CLOCAL: crate::tcflag_t = 2048; -pub const ECHOKE: crate::tcflag_t = 2048; -pub const ECHOE: crate::tcflag_t = 16; -pub const ECHOK: crate::tcflag_t = 32; -pub const ECHONL: crate::tcflag_t = 64; -pub const ECHOPRT: crate::tcflag_t = 1024; -pub const ECHOCTL: crate::tcflag_t = 512; -pub const ISIG: crate::tcflag_t = 1; -pub const ICANON: crate::tcflag_t = 2; -pub const PENDIN: crate::tcflag_t = 16384; -pub const NOFLSH: crate::tcflag_t = 128; -pub const CIBAUD: crate::tcflag_t = 269418496; -pub const CBAUDEX: crate::tcflag_t = 4096; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 2; -pub const NLDLY: crate::tcflag_t = 256; -pub const CRDLY: crate::tcflag_t = 1536; -pub const TABDLY: crate::tcflag_t = 6144; -pub const BSDLY: crate::tcflag_t = 8192; -pub const FFDLY: crate::tcflag_t = 32768; -pub const VTDLY: crate::tcflag_t = 16384; -pub const XTABS: crate::tcflag_t = 6144; -pub const B0: crate::speed_t = 0; -pub const B50: crate::speed_t = 1; -pub const B75: crate::speed_t = 2; -pub const B110: crate::speed_t = 3; -pub const B134: crate::speed_t = 4; -pub const B150: crate::speed_t = 5; -pub const B200: crate::speed_t = 6; -pub const B300: crate::speed_t = 7; -pub const B600: crate::speed_t = 8; -pub const B1200: crate::speed_t = 9; -pub const B1800: crate::speed_t = 10; -pub const B2400: crate::speed_t = 11; -pub const B4800: crate::speed_t = 12; -pub const B9600: crate::speed_t = 13; -pub const B19200: crate::speed_t = 14; -pub const B38400: crate::speed_t = 15; -pub const EXTA: crate::speed_t = 14; -pub const EXTB: crate::speed_t = 15; -pub const B57600: crate::speed_t = 4097; -pub const B115200: crate::speed_t = 4098; -pub const B230400: crate::speed_t = 4099; -pub const B460800: crate::speed_t = 4100; -pub const B500000: crate::speed_t = 4101; -pub const B576000: crate::speed_t = 4102; -pub const B921600: crate::speed_t = 4103; -pub const B1000000: crate::speed_t = 4104; -pub const B1152000: crate::speed_t = 4105; -pub const B1500000: crate::speed_t = 4106; -pub const B2000000: crate::speed_t = 4107; -pub const B2500000: crate::speed_t = 4108; -pub const B3000000: crate::speed_t = 4109; -pub const B3500000: crate::speed_t = 4110; -pub const B4000000: crate::speed_t = 4111; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 32768; -pub const TOSTOP: crate::tcflag_t = 256; -pub const FLUSHO: crate::tcflag_t = 4096; -pub const EXTPROC: crate::tcflag_t = 65536; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; -pub const NGREG: usize = 32; -pub const REG_PC: usize = 0; -pub const REG_RA: usize = 1; -pub const REG_SP: usize = 2; -pub const REG_TP: usize = 4; -pub const REG_S0: usize = 8; -pub const REG_S1: usize = 9; -pub const REG_A0: usize = 10; -pub const REG_S2: usize = 18; -pub const REG_NARGS: usize = 8; - -pub const COMPAT_HWCAP_ISA_I: c_ulong = 1 << (b'I' - b'A'); -pub const COMPAT_HWCAP_ISA_M: c_ulong = 1 << (b'M' - b'A'); -#[allow(clippy::eq_op)] -pub const COMPAT_HWCAP_ISA_A: c_ulong = 1 << (b'A' - b'A'); -pub const COMPAT_HWCAP_ISA_F: c_ulong = 1 << (b'F' - b'A'); -pub const COMPAT_HWCAP_ISA_D: c_ulong = 1 << (b'D' - b'A'); -pub const COMPAT_HWCAP_ISA_C: c_ulong = 1 << (b'C' - b'A'); -pub const COMPAT_HWCAP_ISA_V: c_ulong = 1 << (b'V' - b'A'); - -pub const SYS_read: c_long = 63; -pub const SYS_write: c_long = 64; -pub const SYS_close: c_long = 57; -pub const SYS_fstat: c_long = 80; -pub const SYS_lseek: c_long = 62; -pub const SYS_mmap: c_long = 222; -pub const SYS_mprotect: c_long = 226; -pub const SYS_munmap: c_long = 215; -pub const SYS_brk: c_long = 214; -pub const SYS_rt_sigaction: c_long = 134; -pub const SYS_rt_sigprocmask: c_long = 135; -pub const SYS_rt_sigreturn: c_long = 139; -pub const SYS_ioctl: c_long = 29; -pub const SYS_pread64: c_long = 67; -pub const SYS_pwrite64: c_long = 68; -pub const SYS_readv: c_long = 65; -pub const SYS_writev: c_long = 66; -pub const SYS_sched_yield: c_long = 124; -pub const SYS_mremap: c_long = 216; -pub const SYS_msync: c_long = 227; -pub const SYS_mincore: c_long = 232; -pub const SYS_madvise: c_long = 233; -pub const SYS_shmget: c_long = 194; -pub const SYS_shmat: c_long = 196; -pub const SYS_shmctl: c_long = 195; -pub const SYS_dup: c_long = 23; -pub const SYS_nanosleep: c_long = 101; -pub const SYS_getitimer: c_long = 102; -pub const SYS_setitimer: c_long = 103; -pub const SYS_getpid: c_long = 172; -pub const SYS_sendfile: c_long = 71; -pub const SYS_socket: c_long = 198; -pub const SYS_connect: c_long = 203; -pub const SYS_accept: c_long = 202; -pub const SYS_sendto: c_long = 206; -pub const SYS_recvfrom: c_long = 207; -pub const SYS_sendmsg: c_long = 211; -pub const SYS_recvmsg: c_long = 212; -pub const SYS_shutdown: c_long = 210; -pub const SYS_bind: c_long = 200; -pub const SYS_listen: c_long = 201; -pub const SYS_getsockname: c_long = 204; -pub const SYS_getpeername: c_long = 205; -pub const SYS_socketpair: c_long = 199; -pub const SYS_setsockopt: c_long = 208; -pub const SYS_getsockopt: c_long = 209; -pub const SYS_clone: c_long = 220; -pub const SYS_execve: c_long = 221; -pub const SYS_exit: c_long = 93; -pub const SYS_wait4: c_long = 260; -pub const SYS_kill: c_long = 129; -pub const SYS_uname: c_long = 160; -pub const SYS_semget: c_long = 190; -pub const SYS_semop: c_long = 193; -pub const SYS_semctl: c_long = 191; -pub const SYS_shmdt: c_long = 197; -pub const SYS_msgget: c_long = 186; -pub const SYS_msgsnd: c_long = 189; -pub const SYS_msgrcv: c_long = 188; -pub const SYS_msgctl: c_long = 187; -pub const SYS_fcntl: c_long = 25; -pub const SYS_flock: c_long = 32; -pub const SYS_fsync: c_long = 82; -pub const SYS_fdatasync: c_long = 83; -pub const SYS_truncate: c_long = 45; -pub const SYS_ftruncate: c_long = 46; -pub const SYS_getcwd: c_long = 17; -pub const SYS_chdir: c_long = 49; -pub const SYS_fchdir: c_long = 50; -pub const SYS_fchmod: c_long = 52; -pub const SYS_fchown: c_long = 55; -pub const SYS_umask: c_long = 166; -pub const SYS_gettimeofday: c_long = 169; -pub const SYS_getrlimit: c_long = 163; -pub const SYS_getrusage: c_long = 165; -pub const SYS_sysinfo: c_long = 179; -pub const SYS_times: c_long = 153; -pub const SYS_ptrace: c_long = 117; -pub const SYS_getuid: c_long = 174; -pub const SYS_syslog: c_long = 116; -pub const SYS_getgid: c_long = 176; -pub const SYS_setuid: c_long = 146; -pub const SYS_setgid: c_long = 144; -pub const SYS_geteuid: c_long = 175; -pub const SYS_getegid: c_long = 177; -pub const SYS_setpgid: c_long = 154; -pub const SYS_getppid: c_long = 173; -pub const SYS_setsid: c_long = 157; -pub const SYS_setreuid: c_long = 145; -pub const SYS_setregid: c_long = 143; -pub const SYS_getgroups: c_long = 158; -pub const SYS_setgroups: c_long = 159; -pub const SYS_setresuid: c_long = 147; -pub const SYS_getresuid: c_long = 148; -pub const SYS_setresgid: c_long = 149; -pub const SYS_getresgid: c_long = 150; -pub const SYS_getpgid: c_long = 155; -pub const SYS_setfsuid: c_long = 151; -pub const SYS_setfsgid: c_long = 152; -pub const SYS_getsid: c_long = 156; -pub const SYS_capget: c_long = 90; -pub const SYS_capset: c_long = 91; -pub const SYS_rt_sigpending: c_long = 136; -pub const SYS_rt_sigtimedwait: c_long = 137; -pub const SYS_rt_sigqueueinfo: c_long = 138; -pub const SYS_rt_sigsuspend: c_long = 133; -pub const SYS_sigaltstack: c_long = 132; -pub const SYS_personality: c_long = 92; -pub const SYS_statfs: c_long = 43; -pub const SYS_fstatfs: c_long = 44; -pub const SYS_getpriority: c_long = 141; -pub const SYS_setpriority: c_long = 140; -pub const SYS_sched_setparam: c_long = 118; -pub const SYS_sched_getparam: c_long = 121; -pub const SYS_sched_setscheduler: c_long = 119; -pub const SYS_sched_getscheduler: c_long = 120; -pub const SYS_sched_get_priority_max: c_long = 125; -pub const SYS_sched_get_priority_min: c_long = 126; -pub const SYS_sched_rr_get_interval: c_long = 127; -pub const SYS_mlock: c_long = 228; -pub const SYS_munlock: c_long = 229; -pub const SYS_mlockall: c_long = 230; -pub const SYS_munlockall: c_long = 231; -pub const SYS_vhangup: c_long = 58; -pub const SYS_pivot_root: c_long = 41; -pub const SYS_prctl: c_long = 167; -pub const SYS_adjtimex: c_long = 171; -pub const SYS_setrlimit: c_long = 164; -pub const SYS_chroot: c_long = 51; -pub const SYS_sync: c_long = 81; -pub const SYS_acct: c_long = 89; -pub const SYS_settimeofday: c_long = 170; -pub const SYS_mount: c_long = 40; -pub const SYS_umount2: c_long = 39; -pub const SYS_swapon: c_long = 224; -pub const SYS_swapoff: c_long = 225; -pub const SYS_reboot: c_long = 142; -pub const SYS_sethostname: c_long = 161; -pub const SYS_setdomainname: c_long = 162; -pub const SYS_init_module: c_long = 105; -pub const SYS_delete_module: c_long = 106; -pub const SYS_quotactl: c_long = 60; -pub const SYS_nfsservctl: c_long = 42; -pub const SYS_gettid: c_long = 178; -pub const SYS_readahead: c_long = 213; -pub const SYS_setxattr: c_long = 5; -pub const SYS_lsetxattr: c_long = 6; -pub const SYS_fsetxattr: c_long = 7; -pub const SYS_getxattr: c_long = 8; -pub const SYS_lgetxattr: c_long = 9; -pub const SYS_fgetxattr: c_long = 10; -pub const SYS_listxattr: c_long = 11; -pub const SYS_llistxattr: c_long = 12; -pub const SYS_flistxattr: c_long = 13; -pub const SYS_removexattr: c_long = 14; -pub const SYS_lremovexattr: c_long = 15; -pub const SYS_fremovexattr: c_long = 16; -pub const SYS_tkill: c_long = 130; -pub const SYS_futex: c_long = 98; -pub const SYS_sched_setaffinity: c_long = 122; -pub const SYS_sched_getaffinity: c_long = 123; -pub const SYS_io_setup: c_long = 0; -pub const SYS_io_destroy: c_long = 1; -pub const SYS_io_getevents: c_long = 4; -pub const SYS_io_submit: c_long = 2; -pub const SYS_io_cancel: c_long = 3; -pub const SYS_lookup_dcookie: c_long = 18; -pub const SYS_remap_file_pages: c_long = 234; -pub const SYS_getdents64: c_long = 61; -pub const SYS_set_tid_address: c_long = 96; -pub const SYS_restart_syscall: c_long = 128; -pub const SYS_semtimedop: c_long = 192; -pub const SYS_fadvise64: c_long = 223; -pub const SYS_timer_create: c_long = 107; -pub const SYS_timer_settime: c_long = 110; -pub const SYS_timer_gettime: c_long = 108; -pub const SYS_timer_getoverrun: c_long = 109; -pub const SYS_timer_delete: c_long = 111; -pub const SYS_clock_settime: c_long = 112; -pub const SYS_clock_gettime: c_long = 113; -pub const SYS_clock_getres: c_long = 114; -pub const SYS_clock_nanosleep: c_long = 115; -pub const SYS_exit_group: c_long = 94; -pub const SYS_epoll_ctl: c_long = 21; -pub const SYS_tgkill: c_long = 131; -pub const SYS_mbind: c_long = 235; -pub const SYS_set_mempolicy: c_long = 237; -pub const SYS_get_mempolicy: c_long = 236; -pub const SYS_mq_open: c_long = 180; -pub const SYS_mq_unlink: c_long = 181; -pub const SYS_mq_timedsend: c_long = 182; -pub const SYS_mq_timedreceive: c_long = 183; -pub const SYS_mq_notify: c_long = 184; -pub const SYS_mq_getsetattr: c_long = 185; -pub const SYS_kexec_load: c_long = 104; -pub const SYS_waitid: c_long = 95; -pub const SYS_add_key: c_long = 217; -pub const SYS_request_key: c_long = 218; -pub const SYS_keyctl: c_long = 219; -pub const SYS_ioprio_set: c_long = 30; -pub const SYS_ioprio_get: c_long = 31; -pub const SYS_inotify_add_watch: c_long = 27; -pub const SYS_inotify_rm_watch: c_long = 28; -pub const SYS_migrate_pages: c_long = 238; -pub const SYS_openat: c_long = 56; -pub const SYS_mkdirat: c_long = 34; -pub const SYS_mknodat: c_long = 33; -pub const SYS_fchownat: c_long = 54; -pub const SYS_newfstatat: c_long = 79; -pub const SYS_unlinkat: c_long = 35; -pub const SYS_linkat: c_long = 37; -pub const SYS_symlinkat: c_long = 36; -pub const SYS_readlinkat: c_long = 78; -pub const SYS_fchmodat: c_long = 53; -pub const SYS_faccessat: c_long = 48; -pub const SYS_pselect6: c_long = 72; -pub const SYS_ppoll: c_long = 73; -pub const SYS_unshare: c_long = 97; -pub const SYS_set_robust_list: c_long = 99; -pub const SYS_get_robust_list: c_long = 100; -pub const SYS_splice: c_long = 76; -pub const SYS_tee: c_long = 77; -pub const SYS_sync_file_range: c_long = 84; -pub const SYS_vmsplice: c_long = 75; -pub const SYS_move_pages: c_long = 239; -pub const SYS_utimensat: c_long = 88; -pub const SYS_epoll_pwait: c_long = 22; -pub const SYS_timerfd_create: c_long = 85; -pub const SYS_fallocate: c_long = 47; -pub const SYS_timerfd_settime: c_long = 86; -pub const SYS_timerfd_gettime: c_long = 87; -pub const SYS_accept4: c_long = 242; -pub const SYS_signalfd4: c_long = 74; -pub const SYS_eventfd2: c_long = 19; -pub const SYS_epoll_create1: c_long = 20; -pub const SYS_dup3: c_long = 24; -pub const SYS_pipe2: c_long = 59; -pub const SYS_inotify_init1: c_long = 26; -pub const SYS_preadv: c_long = 69; -pub const SYS_pwritev: c_long = 70; -pub const SYS_rt_tgsigqueueinfo: c_long = 240; -pub const SYS_perf_event_open: c_long = 241; -pub const SYS_recvmmsg: c_long = 243; -pub const SYS_fanotify_init: c_long = 262; -pub const SYS_fanotify_mark: c_long = 263; -pub const SYS_prlimit64: c_long = 261; -pub const SYS_name_to_handle_at: c_long = 264; -pub const SYS_open_by_handle_at: c_long = 265; -pub const SYS_clock_adjtime: c_long = 266; -pub const SYS_syncfs: c_long = 267; -pub const SYS_sendmmsg: c_long = 269; -pub const SYS_setns: c_long = 268; -pub const SYS_getcpu: c_long = 168; -pub const SYS_process_vm_readv: c_long = 270; -pub const SYS_process_vm_writev: c_long = 271; -pub const SYS_kcmp: c_long = 272; -pub const SYS_finit_module: c_long = 273; -pub const SYS_sched_setattr: c_long = 274; -pub const SYS_sched_getattr: c_long = 275; -pub const SYS_renameat2: c_long = 276; -pub const SYS_seccomp: c_long = 277; -pub const SYS_getrandom: c_long = 278; -pub const SYS_memfd_create: c_long = 279; -pub const SYS_bpf: c_long = 280; -pub const SYS_execveat: c_long = 281; -pub const SYS_userfaultfd: c_long = 282; -pub const SYS_membarrier: c_long = 283; -pub const SYS_mlock2: c_long = 284; -pub const SYS_copy_file_range: c_long = 285; -pub const SYS_preadv2: c_long = 286; -pub const SYS_pwritev2: c_long = 287; -pub const SYS_pkey_mprotect: c_long = 288; -pub const SYS_pkey_alloc: c_long = 289; -pub const SYS_pkey_free: c_long = 290; -pub const SYS_statx: c_long = 291; -pub const SYS_rseq: c_long = 293; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/s390x.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/s390x.rs deleted file mode 100644 index 029485c5b4a328..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b64/s390x.rs +++ /dev/null @@ -1,955 +0,0 @@ -//! s390x - -use crate::prelude::*; -use crate::{off64_t, off_t, pthread_mutex_t}; - -pub type blksize_t = i64; -pub type nlink_t = u64; -pub type suseconds_t = i64; -pub type wchar_t = i32; -pub type greg_t = u64; -pub type __u64 = u64; -pub type __s64 = i64; - -s! { - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - __glibc_reserved0: c_int, - pub sa_flags: c_int, - pub sa_restorer: Option, - pub sa_mask: crate::sigset_t, - } - - pub struct statfs { - pub f_type: c_uint, - pub f_bsize: c_uint, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_uint, - pub f_frsize: c_uint, - pub f_flags: c_uint, - f_spare: [c_uint; 4], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - _pad: c_int, - _pad2: [c_long; 14], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - st_pad0: c_int, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - __glibc_reserved: [c_long; 3], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - st_pad0: c_int, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - __glibc_reserved: [c_long; 3], - } - - pub struct pthread_attr_t { - __size: [c_ulong; 7], - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - pub __seq: c_ushort, - __pad1: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused4: c_ulong, - __unused5: c_ulong, - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct __psw_t { - pub mask: u64, - pub addr: u64, - } - - pub struct fpregset_t { - pub fpc: u32, - __pad: u32, - pub fprs: [fpreg_t; 16], - } - - pub struct mcontext_t { - pub psw: __psw_t, - pub gregs: [u64; 16], - pub aregs: [u32; 16], - pub fpregs: fpregset_t, - } - - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_mcontext: mcontext_t, - pub uc_sigmask: crate::sigset_t, - } - - pub struct statfs64 { - pub f_type: c_uint, - pub f_bsize: c_uint, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_uint, - pub f_frsize: c_uint, - pub f_flags: c_uint, - pub f_spare: [c_uint; 4], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } -} - -s_no_extra_traits! { - // FIXME(union): This is actually a union. - pub struct fpreg_t { - pub d: c_double, - // f: c_float, - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for fpreg_t { - fn eq(&self, other: &fpreg_t) -> bool { - self.d == other.d - } - } - - impl Eq for fpreg_t {} - - impl hash::Hash for fpreg_t { - fn hash(&self, state: &mut H) { - let d: u64 = self.d.to_bits(); - d.hash(state); - } - } - } -} - -pub const POSIX_FADV_DONTNEED: c_int = 6; -pub const POSIX_FADV_NOREUSE: c_int = 7; - -pub const VEOF: usize = 4; -pub const RTLD_DEEPBIND: c_int = 0x8; -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; -pub const SFD_CLOEXEC: c_int = 0x080000; - -pub const NCCS: usize = 32; - -pub const O_TRUNC: c_int = 512; -pub const O_NOATIME: c_int = 0o1000000; -pub const O_CLOEXEC: c_int = 0x80000; -pub const O_PATH: c_int = 0o10000000; -pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; - -pub const EBFONT: c_int = 59; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENONET: c_int = 64; -pub const ENOPKG: c_int = 65; -pub const EREMOTE: c_int = 66; -pub const ENOLINK: c_int = 67; -pub const EADV: c_int = 68; -pub const ESRMNT: c_int = 69; -pub const ECOMM: c_int = 70; -pub const EPROTO: c_int = 71; -pub const EDOTDOT: c_int = 73; - -pub const SA_NODEFER: c_int = 0x40000000; -pub const SA_RESETHAND: c_int = 0x80000000; -pub const SA_RESTART: c_int = 0x10000000; -pub const SA_NOCLDSTOP: c_int = 0x00000001; - -pub const EPOLL_CLOEXEC: c_int = 0x80000; - -pub const EFD_CLOEXEC: c_int = 0x80000; - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; - -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; - -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ECONNABORTED: c_int = 103; -pub const ECONNREFUSED: c_int = 111; -pub const ECONNRESET: c_int = 104; -pub const EDEADLK: c_int = 35; -pub const ENOSYS: c_int = 38; -pub const ENOTCONN: c_int = 107; -pub const ETIMEDOUT: c_int = 110; -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NONBLOCK: c_int = 2048; -pub const SA_NOCLDWAIT: c_int = 2; -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 4; -pub const SIGBUS: c_int = 7; -pub const SIGSTKSZ: size_t = 0x2000; -pub const MINSIGSTKSZ: size_t = 2048; -pub const SIG_SETMASK: c_int = 2; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const O_NOCTTY: c_int = 256; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_FSYNC: c_int = 0x101000; -pub const O_DIRECT: c_int = 0x4000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_NOFOLLOW: c_int = 0x20000; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_ANONYMOUS: c_int = 0x0020; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_SYNC: c_int = 0x080000; - -pub const EDEADLOCK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const EHWPOISON: c_int = 133; -pub const ERFKILL: c_int = 132; - -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGCHLD: c_int = 17; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] -pub const SIGUNUSED: c_int = 31; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const O_ASYNC: c_int = 0x2000; -pub const O_NDELAY: c_int = 0x800; - -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; - -pub const EXTPROC: crate::tcflag_t = 0x00010000; - -pub const PTRACE_DETACH: c_uint = 17; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; - -pub const EFD_NONBLOCK: c_int = 0x800; - -pub const F_RDLCK: c_int = 0; -pub const F_WRLCK: c_int = 1; -pub const F_UNLCK: c_int = 2; -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETOWN: c_int = 8; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_OFD_GETLK: c_int = 36; -pub const F_OFD_SETLK: c_int = 37; -pub const F_OFD_SETLKW: c_int = 38; - -pub const SFD_NONBLOCK: c_int = 0x0800; - -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -pub const VTIME: usize = 5; -pub const VSWTC: usize = 7; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VSUSP: usize = 10; -pub const VREPRINT: usize = 12; -pub const VDISCARD: usize = 13; -pub const VWERASE: usize = 14; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const ONLCR: crate::tcflag_t = 0o000004; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const CR1: crate::tcflag_t = 0x00000200; -pub const CR2: crate::tcflag_t = 0x00000400; -pub const CR3: crate::tcflag_t = 0x00000600; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const TAB1: crate::tcflag_t = 0x00000800; -pub const TAB2: crate::tcflag_t = 0x00001000; -pub const TAB3: crate::tcflag_t = 0x00001800; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const BS1: crate::tcflag_t = 0x00002000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const FF1: crate::tcflag_t = 0x00008000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const VT1: crate::tcflag_t = 0x00004000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const CBAUD: crate::speed_t = 0o010017; -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const CSIZE: crate::tcflag_t = 0o000060; -pub const CS6: crate::tcflag_t = 0o000020; -pub const CS7: crate::tcflag_t = 0o000040; -pub const CS8: crate::tcflag_t = 0o000060; -pub const CSTOPB: crate::tcflag_t = 0o000100; -pub const CREAD: crate::tcflag_t = 0o000200; -pub const PARENB: crate::tcflag_t = 0o000400; -pub const PARODD: crate::tcflag_t = 0o001000; -pub const HUPCL: crate::tcflag_t = 0o002000; -pub const CLOCAL: crate::tcflag_t = 0o004000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; - -pub const ISIG: crate::tcflag_t = 0o000001; -pub const ICANON: crate::tcflag_t = 0o000002; -pub const XCASE: crate::tcflag_t = 0o000004; -pub const ECHOE: crate::tcflag_t = 0o000020; -pub const ECHOK: crate::tcflag_t = 0o000040; -pub const ECHONL: crate::tcflag_t = 0o000100; -pub const NOFLSH: crate::tcflag_t = 0o000200; -pub const ECHOCTL: crate::tcflag_t = 0o001000; -pub const ECHOPRT: crate::tcflag_t = 0o002000; -pub const ECHOKE: crate::tcflag_t = 0o004000; -pub const PENDIN: crate::tcflag_t = 0o040000; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const IXON: crate::tcflag_t = 0o002000; -pub const IXOFF: crate::tcflag_t = 0o010000; - -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_restart_syscall: c_long = 7; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execve: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_mount: c_long = 21; -pub const SYS_umount: c_long = 22; -pub const SYS_ptrace: c_long = 26; -pub const SYS_alarm: c_long = 27; -pub const SYS_pause: c_long = 29; -pub const SYS_utime: c_long = 30; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_rename: c_long = 38; -pub const SYS_mkdir: c_long = 39; -pub const SYS_rmdir: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_brk: c_long = 45; -pub const SYS_signal: c_long = 48; -pub const SYS_acct: c_long = 51; -pub const SYS_umount2: c_long = 52; -pub const SYS_ioctl: c_long = 54; -pub const SYS_fcntl: c_long = 55; -pub const SYS_setpgid: c_long = 57; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_ustat: c_long = 62; -pub const SYS_dup2: c_long = 63; -pub const SYS_getppid: c_long = 64; -pub const SYS_getpgrp: c_long = 65; -pub const SYS_setsid: c_long = 66; -pub const SYS_sigaction: c_long = 67; -pub const SYS_sigsuspend: c_long = 72; -pub const SYS_sigpending: c_long = 73; -pub const SYS_sethostname: c_long = 74; -pub const SYS_setrlimit: c_long = 75; -pub const SYS_getrusage: c_long = 77; -pub const SYS_gettimeofday: c_long = 78; -pub const SYS_settimeofday: c_long = 79; -pub const SYS_symlink: c_long = 83; -pub const SYS_readlink: c_long = 85; -pub const SYS_uselib: c_long = 86; -pub const SYS_swapon: c_long = 87; -pub const SYS_reboot: c_long = 88; -pub const SYS_readdir: c_long = 89; -pub const SYS_mmap: c_long = 90; -pub const SYS_munmap: c_long = 91; -pub const SYS_truncate: c_long = 92; -pub const SYS_ftruncate: c_long = 93; -pub const SYS_fchmod: c_long = 94; -pub const SYS_getpriority: c_long = 96; -pub const SYS_setpriority: c_long = 97; -pub const SYS_statfs: c_long = 99; -pub const SYS_fstatfs: c_long = 100; -pub const SYS_socketcall: c_long = 102; -pub const SYS_syslog: c_long = 103; -pub const SYS_setitimer: c_long = 104; -pub const SYS_getitimer: c_long = 105; -pub const SYS_stat: c_long = 106; -pub const SYS_lstat: c_long = 107; -pub const SYS_fstat: c_long = 108; -pub const SYS_lookup_dcookie: c_long = 110; -pub const SYS_vhangup: c_long = 111; -pub const SYS_idle: c_long = 112; -pub const SYS_wait4: c_long = 114; -pub const SYS_swapoff: c_long = 115; -pub const SYS_sysinfo: c_long = 116; -pub const SYS_ipc: c_long = 117; -pub const SYS_fsync: c_long = 118; -pub const SYS_sigreturn: c_long = 119; -pub const SYS_clone: c_long = 120; -pub const SYS_setdomainname: c_long = 121; -pub const SYS_uname: c_long = 122; -pub const SYS_adjtimex: c_long = 124; -pub const SYS_mprotect: c_long = 125; -pub const SYS_sigprocmask: c_long = 126; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 127; -pub const SYS_init_module: c_long = 128; -pub const SYS_delete_module: c_long = 129; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 130; -pub const SYS_quotactl: c_long = 131; -pub const SYS_getpgid: c_long = 132; -pub const SYS_fchdir: c_long = 133; -pub const SYS_bdflush: c_long = 134; -pub const SYS_sysfs: c_long = 135; -pub const SYS_personality: c_long = 136; -pub const SYS_afs_syscall: c_long = 137; /* Syscall for Andrew File System */ -pub const SYS_getdents: c_long = 141; -pub const SYS_flock: c_long = 143; -pub const SYS_msync: c_long = 144; -pub const SYS_readv: c_long = 145; -pub const SYS_writev: c_long = 146; -pub const SYS_getsid: c_long = 147; -pub const SYS_fdatasync: c_long = 148; -pub const SYS__sysctl: c_long = 149; -pub const SYS_mlock: c_long = 150; -pub const SYS_munlock: c_long = 151; -pub const SYS_mlockall: c_long = 152; -pub const SYS_munlockall: c_long = 153; -pub const SYS_sched_setparam: c_long = 154; -pub const SYS_sched_getparam: c_long = 155; -pub const SYS_sched_setscheduler: c_long = 156; -pub const SYS_sched_getscheduler: c_long = 157; -pub const SYS_sched_yield: c_long = 158; -pub const SYS_sched_get_priority_max: c_long = 159; -pub const SYS_sched_get_priority_min: c_long = 160; -pub const SYS_sched_rr_get_interval: c_long = 161; -pub const SYS_nanosleep: c_long = 162; -pub const SYS_mremap: c_long = 163; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 167; -pub const SYS_poll: c_long = 168; -pub const SYS_nfsservctl: c_long = 169; -pub const SYS_prctl: c_long = 172; -pub const SYS_rt_sigreturn: c_long = 173; -pub const SYS_rt_sigaction: c_long = 174; -pub const SYS_rt_sigprocmask: c_long = 175; -pub const SYS_rt_sigpending: c_long = 176; -pub const SYS_rt_sigtimedwait: c_long = 177; -pub const SYS_rt_sigqueueinfo: c_long = 178; -pub const SYS_rt_sigsuspend: c_long = 179; -pub const SYS_pread64: c_long = 180; -pub const SYS_pwrite64: c_long = 181; -pub const SYS_getcwd: c_long = 183; -pub const SYS_capget: c_long = 184; -pub const SYS_capset: c_long = 185; -pub const SYS_sigaltstack: c_long = 186; -pub const SYS_sendfile: c_long = 187; -pub const SYS_getpmsg: c_long = 188; -pub const SYS_putpmsg: c_long = 189; -pub const SYS_vfork: c_long = 190; -pub const SYS_pivot_root: c_long = 217; -pub const SYS_mincore: c_long = 218; -pub const SYS_madvise: c_long = 219; -pub const SYS_getdents64: c_long = 220; -pub const SYS_readahead: c_long = 222; -pub const SYS_setxattr: c_long = 224; -pub const SYS_lsetxattr: c_long = 225; -pub const SYS_fsetxattr: c_long = 226; -pub const SYS_getxattr: c_long = 227; -pub const SYS_lgetxattr: c_long = 228; -pub const SYS_fgetxattr: c_long = 229; -pub const SYS_listxattr: c_long = 230; -pub const SYS_llistxattr: c_long = 231; -pub const SYS_flistxattr: c_long = 232; -pub const SYS_removexattr: c_long = 233; -pub const SYS_lremovexattr: c_long = 234; -pub const SYS_fremovexattr: c_long = 235; -pub const SYS_gettid: c_long = 236; -pub const SYS_tkill: c_long = 237; -pub const SYS_futex: c_long = 238; -pub const SYS_sched_setaffinity: c_long = 239; -pub const SYS_sched_getaffinity: c_long = 240; -pub const SYS_tgkill: c_long = 241; -pub const SYS_io_setup: c_long = 243; -pub const SYS_io_destroy: c_long = 244; -pub const SYS_io_getevents: c_long = 245; -pub const SYS_io_submit: c_long = 246; -pub const SYS_io_cancel: c_long = 247; -pub const SYS_exit_group: c_long = 248; -pub const SYS_epoll_create: c_long = 249; -pub const SYS_epoll_ctl: c_long = 250; -pub const SYS_epoll_wait: c_long = 251; -pub const SYS_set_tid_address: c_long = 252; -pub const SYS_fadvise64: c_long = 253; -pub const SYS_timer_create: c_long = 254; -pub const SYS_timer_settime: c_long = 255; -pub const SYS_timer_gettime: c_long = 256; -pub const SYS_timer_getoverrun: c_long = 257; -pub const SYS_timer_delete: c_long = 258; -pub const SYS_clock_settime: c_long = 259; -pub const SYS_clock_gettime: c_long = 260; -pub const SYS_clock_getres: c_long = 261; -pub const SYS_clock_nanosleep: c_long = 262; -pub const SYS_statfs64: c_long = 265; -pub const SYS_fstatfs64: c_long = 266; -pub const SYS_remap_file_pages: c_long = 267; -pub const SYS_mbind: c_long = 268; -pub const SYS_get_mempolicy: c_long = 269; -pub const SYS_set_mempolicy: c_long = 270; -pub const SYS_mq_open: c_long = 271; -pub const SYS_mq_unlink: c_long = 272; -pub const SYS_mq_timedsend: c_long = 273; -pub const SYS_mq_timedreceive: c_long = 274; -pub const SYS_mq_notify: c_long = 275; -pub const SYS_mq_getsetattr: c_long = 276; -pub const SYS_kexec_load: c_long = 277; -pub const SYS_add_key: c_long = 278; -pub const SYS_request_key: c_long = 279; -pub const SYS_keyctl: c_long = 280; -pub const SYS_waitid: c_long = 281; -pub const SYS_ioprio_set: c_long = 282; -pub const SYS_ioprio_get: c_long = 283; -pub const SYS_inotify_init: c_long = 284; -pub const SYS_inotify_add_watch: c_long = 285; -pub const SYS_inotify_rm_watch: c_long = 286; -pub const SYS_migrate_pages: c_long = 287; -pub const SYS_openat: c_long = 288; -pub const SYS_mkdirat: c_long = 289; -pub const SYS_mknodat: c_long = 290; -pub const SYS_fchownat: c_long = 291; -pub const SYS_futimesat: c_long = 292; -pub const SYS_unlinkat: c_long = 294; -pub const SYS_renameat: c_long = 295; -pub const SYS_linkat: c_long = 296; -pub const SYS_symlinkat: c_long = 297; -pub const SYS_readlinkat: c_long = 298; -pub const SYS_fchmodat: c_long = 299; -pub const SYS_faccessat: c_long = 300; -pub const SYS_pselect6: c_long = 301; -pub const SYS_ppoll: c_long = 302; -pub const SYS_unshare: c_long = 303; -pub const SYS_set_robust_list: c_long = 304; -pub const SYS_get_robust_list: c_long = 305; -pub const SYS_splice: c_long = 306; -pub const SYS_sync_file_range: c_long = 307; -pub const SYS_tee: c_long = 308; -pub const SYS_vmsplice: c_long = 309; -pub const SYS_move_pages: c_long = 310; -pub const SYS_getcpu: c_long = 311; -pub const SYS_epoll_pwait: c_long = 312; -pub const SYS_utimes: c_long = 313; -pub const SYS_fallocate: c_long = 314; -pub const SYS_utimensat: c_long = 315; -pub const SYS_signalfd: c_long = 316; -pub const SYS_timerfd: c_long = 317; -pub const SYS_eventfd: c_long = 318; -pub const SYS_timerfd_create: c_long = 319; -pub const SYS_timerfd_settime: c_long = 320; -pub const SYS_timerfd_gettime: c_long = 321; -pub const SYS_signalfd4: c_long = 322; -pub const SYS_eventfd2: c_long = 323; -pub const SYS_inotify_init1: c_long = 324; -pub const SYS_pipe2: c_long = 325; -pub const SYS_dup3: c_long = 326; -pub const SYS_epoll_create1: c_long = 327; -pub const SYS_preadv: c_long = 328; -pub const SYS_pwritev: c_long = 329; -pub const SYS_rt_tgsigqueueinfo: c_long = 330; -pub const SYS_perf_event_open: c_long = 331; -pub const SYS_fanotify_init: c_long = 332; -pub const SYS_fanotify_mark: c_long = 333; -pub const SYS_prlimit64: c_long = 334; -pub const SYS_name_to_handle_at: c_long = 335; -pub const SYS_open_by_handle_at: c_long = 336; -pub const SYS_clock_adjtime: c_long = 337; -pub const SYS_syncfs: c_long = 338; -pub const SYS_setns: c_long = 339; -pub const SYS_process_vm_readv: c_long = 340; -pub const SYS_process_vm_writev: c_long = 341; -pub const SYS_s390_runtime_instr: c_long = 342; -pub const SYS_kcmp: c_long = 343; -pub const SYS_finit_module: c_long = 344; -pub const SYS_sched_setattr: c_long = 345; -pub const SYS_sched_getattr: c_long = 346; -pub const SYS_renameat2: c_long = 347; -pub const SYS_seccomp: c_long = 348; -pub const SYS_getrandom: c_long = 349; -pub const SYS_memfd_create: c_long = 350; -pub const SYS_bpf: c_long = 351; -pub const SYS_s390_pci_mmio_write: c_long = 352; -pub const SYS_s390_pci_mmio_read: c_long = 353; -pub const SYS_execveat: c_long = 354; -pub const SYS_userfaultfd: c_long = 355; -pub const SYS_membarrier: c_long = 356; -pub const SYS_recvmmsg: c_long = 357; -pub const SYS_sendmmsg: c_long = 358; -pub const SYS_socket: c_long = 359; -pub const SYS_socketpair: c_long = 360; -pub const SYS_bind: c_long = 361; -pub const SYS_connect: c_long = 362; -pub const SYS_listen: c_long = 363; -pub const SYS_accept4: c_long = 364; -pub const SYS_getsockopt: c_long = 365; -pub const SYS_setsockopt: c_long = 366; -pub const SYS_getsockname: c_long = 367; -pub const SYS_getpeername: c_long = 368; -pub const SYS_sendto: c_long = 369; -pub const SYS_sendmsg: c_long = 370; -pub const SYS_recvfrom: c_long = 371; -pub const SYS_recvmsg: c_long = 372; -pub const SYS_shutdown: c_long = 373; -pub const SYS_mlock2: c_long = 374; -pub const SYS_copy_file_range: c_long = 375; -pub const SYS_preadv2: c_long = 376; -pub const SYS_pwritev2: c_long = 377; -pub const SYS_lchown: c_long = 198; -pub const SYS_setuid: c_long = 213; -pub const SYS_getuid: c_long = 199; -pub const SYS_setgid: c_long = 214; -pub const SYS_getgid: c_long = 200; -pub const SYS_geteuid: c_long = 201; -pub const SYS_setreuid: c_long = 203; -pub const SYS_setregid: c_long = 204; -pub const SYS_getrlimit: c_long = 191; -pub const SYS_getgroups: c_long = 205; -pub const SYS_fchown: c_long = 207; -pub const SYS_setresuid: c_long = 208; -pub const SYS_setresgid: c_long = 210; -pub const SYS_getresgid: c_long = 211; -pub const SYS_select: c_long = 142; -pub const SYS_getegid: c_long = 202; -pub const SYS_setgroups: c_long = 206; -pub const SYS_getresuid: c_long = 209; -pub const SYS_chown: c_long = 212; -pub const SYS_setfsuid: c_long = 215; -pub const SYS_setfsgid: c_long = 216; -pub const SYS_newfstatat: c_long = 293; -pub const SYS_statx: c_long = 379; -pub const SYS_rseq: c_long = 383; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; -pub const SYS_mseal: c_long = 462; - -extern "C" { - - pub fn sysctl( - name: *mut c_int, - namelen: c_int, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *mut c_void, - newlen: size_t, - ) -> c_int; - pub fn getcontext(ucp: *mut crate::ucontext_t) -> c_int; - pub fn setcontext(ucp: *const crate::ucontext_t) -> c_int; - pub fn makecontext(ucp: *mut crate::ucontext_t, func: extern "C" fn(), argc: c_int, ...); - pub fn swapcontext(uocp: *mut crate::ucontext_t, ucp: *const crate::ucontext_t) -> c_int; -} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/sparc64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/sparc64/mod.rs deleted file mode 100644 index f18e53a99b4661..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b64/sparc64/mod.rs +++ /dev/null @@ -1,930 +0,0 @@ -//! SPARC64-specific definitions for 64-bit linux-like values - -use crate::prelude::*; -use crate::{off64_t, off_t, pthread_mutex_t}; - -pub type wchar_t = i32; -pub type nlink_t = u32; -pub type blksize_t = i64; -pub type suseconds_t = i32; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; - -s! { - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - #[cfg(target_arch = "sparc64")] - __reserved0: c_int, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct statfs { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - f_spare: [crate::__fsword_t; 5], - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - #[doc(hidden)] - #[deprecated( - since = "0.2.54", - note = "Please leave a comment on \ - https://github.com/rust-lang/libc/pull/1316 if you're using \ - this field" - )] - pub _pad: [c_int; 29], - _align: [usize; 0], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - __reserved: c_short, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct stat { - pub st_dev: crate::dev_t, - __pad0: u64, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad1: u64, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_long; 2], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - __pad0: u64, - pub st_ino: crate::ino64_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad2: c_int, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __reserved: [c_long; 2], - } - - pub struct statfs64 { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::fsid_t, - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - pub f_spare: [crate::__fsword_t; 4], - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct pthread_attr_t { - __size: [u64; 7], - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - __pad0: u16, - pub __seq: c_ushort, - __unused1: c_ulonglong, - __unused2: c_ulonglong, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_segsz: size_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __reserved1: c_ulong, - __reserved2: c_ulong, - } -} - -s_no_extra_traits! { - #[repr(align(16))] - pub struct max_align_t { - priv_: [i64; 4], - } -} - -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; - -pub const VEOF: usize = 4; -pub const RTLD_DEEPBIND: c_int = 0x8; -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; - -pub const O_APPEND: c_int = 0x8; -pub const O_CREAT: c_int = 0x200; -pub const O_EXCL: c_int = 0x800; -pub const O_NOCTTY: c_int = 0x8000; -pub const O_NONBLOCK: c_int = 0x4000; -pub const O_SYNC: c_int = 0x802000; -pub const O_RSYNC: c_int = 0x802000; -pub const O_DSYNC: c_int = 0x2000; -pub const O_FSYNC: c_int = 0x802000; -pub const O_NOATIME: c_int = 0x200000; -pub const O_PATH: c_int = 0x1000000; -pub const O_TMPFILE: c_int = 0x2000000 | O_DIRECTORY; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_GROWSDOWN: c_int = 0x0200; -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_ANONYMOUS: c_int = 0x0020; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_SYNC: c_int = 0x080000; - -pub const EDEADLK: c_int = 78; -pub const ENAMETOOLONG: c_int = 63; -pub const ENOLCK: c_int = 79; -pub const ENOSYS: c_int = 90; -pub const ENOTEMPTY: c_int = 66; -pub const ELOOP: c_int = 62; -pub const ENOMSG: c_int = 75; -pub const EIDRM: c_int = 77; -pub const ECHRNG: c_int = 94; -pub const EL2NSYNC: c_int = 95; -pub const EL3HLT: c_int = 96; -pub const EL3RST: c_int = 97; -pub const ELNRNG: c_int = 98; -pub const EUNATCH: c_int = 99; -pub const ENOCSI: c_int = 100; -pub const EL2HLT: c_int = 101; -pub const EBADE: c_int = 102; -pub const EBADR: c_int = 103; -pub const EXFULL: c_int = 104; -pub const ENOANO: c_int = 105; -pub const EBADRQC: c_int = 106; -pub const EBADSLT: c_int = 107; -pub const EMULTIHOP: c_int = 87; -pub const EOVERFLOW: c_int = 92; -pub const ENOTUNIQ: c_int = 115; -pub const EBADFD: c_int = 93; -pub const EBADMSG: c_int = 76; -pub const EREMCHG: c_int = 89; -pub const ELIBACC: c_int = 114; -pub const ELIBBAD: c_int = 112; -pub const ELIBSCN: c_int = 124; -pub const ELIBMAX: c_int = 123; -pub const ELIBEXEC: c_int = 110; -pub const EILSEQ: c_int = 122; -pub const ERESTART: c_int = 116; -pub const ESTRPIPE: c_int = 91; -pub const EUSERS: c_int = 68; -pub const ENOTSOCK: c_int = 38; -pub const EDESTADDRREQ: c_int = 39; -pub const EMSGSIZE: c_int = 40; -pub const EPROTOTYPE: c_int = 41; -pub const ENOPROTOOPT: c_int = 42; -pub const EPROTONOSUPPORT: c_int = 43; -pub const ESOCKTNOSUPPORT: c_int = 44; -pub const EOPNOTSUPP: c_int = 45; -pub const EPFNOSUPPORT: c_int = 46; -pub const EAFNOSUPPORT: c_int = 47; -pub const EADDRINUSE: c_int = 48; -pub const EADDRNOTAVAIL: c_int = 49; -pub const ENETDOWN: c_int = 50; -pub const ENETUNREACH: c_int = 51; -pub const ENETRESET: c_int = 52; -pub const ECONNABORTED: c_int = 53; -pub const ECONNRESET: c_int = 54; -pub const ENOBUFS: c_int = 55; -pub const EISCONN: c_int = 56; -pub const ENOTCONN: c_int = 57; -pub const ESHUTDOWN: c_int = 58; -pub const ETOOMANYREFS: c_int = 59; -pub const ETIMEDOUT: c_int = 60; -pub const ECONNREFUSED: c_int = 61; -pub const EHOSTDOWN: c_int = 64; -pub const EHOSTUNREACH: c_int = 65; -pub const EALREADY: c_int = 37; -pub const EINPROGRESS: c_int = 36; -pub const ESTALE: c_int = 70; -pub const EDQUOT: c_int = 69; -pub const ENOMEDIUM: c_int = 125; -pub const EMEDIUMTYPE: c_int = 126; -pub const ECANCELED: c_int = 127; -pub const ENOKEY: c_int = 128; -pub const EKEYEXPIRED: c_int = 129; -pub const EKEYREVOKED: c_int = 130; -pub const EKEYREJECTED: c_int = 131; -pub const EOWNERDEAD: c_int = 132; -pub const ENOTRECOVERABLE: c_int = 133; -pub const EHWPOISON: c_int = 135; -pub const ERFKILL: c_int = 134; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const SA_ONSTACK: c_int = 1; -pub const SA_SIGINFO: c_int = 0x200; -pub const SA_NOCLDWAIT: c_int = 0x100; - -pub const SIGEMT: c_int = 7; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGCHLD: c_int = 20; -pub const SIGBUS: c_int = 10; -pub const SIGUSR1: c_int = 30; -pub const SIGUSR2: c_int = 31; -pub const SIGCONT: c_int = 19; -pub const SIGSTOP: c_int = 17; -pub const SIGTSTP: c_int = 18; -pub const SIGURG: c_int = 16; -pub const SIGIO: c_int = 23; -pub const SIGSYS: c_int = 12; -pub const SIGPOLL: c_int = 23; -pub const SIGPWR: c_int = 29; -pub const SIG_SETMASK: c_int = 4; -pub const SIG_BLOCK: c_int = 1; -pub const SIG_UNBLOCK: c_int = 2; - -pub const POLLWRNORM: c_short = 4; -pub const POLLWRBAND: c_short = 0x100; - -pub const O_ASYNC: c_int = 0x40; -pub const O_NDELAY: c_int = 0x4004; - -pub const PTRACE_DETACH: c_uint = 17; - -pub const EFD_NONBLOCK: c_int = 0x4000; - -pub const F_GETLK: c_int = 7; -pub const F_GETOWN: c_int = 5; -pub const F_SETOWN: c_int = 6; -pub const F_SETLK: c_int = 8; -pub const F_SETLKW: c_int = 9; -pub const F_OFD_GETLK: c_int = 36; -pub const F_OFD_SETLK: c_int = 37; -pub const F_OFD_SETLKW: c_int = 38; - -pub const F_RDLCK: c_int = 1; -pub const F_WRLCK: c_int = 2; -pub const F_UNLCK: c_int = 3; - -pub const SFD_NONBLOCK: c_int = 0x4000; - -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -pub const SFD_CLOEXEC: c_int = 0x400000; - -pub const NCCS: usize = 17; -pub const O_TRUNC: c_int = 0x400; - -pub const O_CLOEXEC: c_int = 0x400000; - -pub const EBFONT: c_int = 109; -pub const ENOSTR: c_int = 72; -pub const ENODATA: c_int = 111; -pub const ETIME: c_int = 73; -pub const ENOSR: c_int = 74; -pub const ENONET: c_int = 80; -pub const ENOPKG: c_int = 113; -pub const EREMOTE: c_int = 71; -pub const ENOLINK: c_int = 82; -pub const EADV: c_int = 83; -pub const ESRMNT: c_int = 84; -pub const ECOMM: c_int = 85; -pub const EPROTO: c_int = 86; -pub const EDOTDOT: c_int = 88; - -pub const SA_NODEFER: c_int = 0x20; -pub const SA_RESETHAND: c_int = 0x4; -pub const SA_RESTART: c_int = 0x2; -pub const SA_NOCLDSTOP: c_int = 0x00000008; - -pub const EPOLL_CLOEXEC: c_int = 0x400000; - -pub const EFD_CLOEXEC: c_int = 0x400000; -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; - -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; - -pub const O_DIRECTORY: c_int = 0o200000; -pub const O_NOFOLLOW: c_int = 0o400000; -pub const O_DIRECT: c_int = 0x100000; - -pub const MAP_LOCKED: c_int = 0x0100; -pub const MAP_NORESERVE: c_int = 0x00040; - -pub const EDEADLOCK: c_int = 108; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; - -pub const MCL_CURRENT: c_int = 0x2000; -pub const MCL_FUTURE: c_int = 0x4000; -pub const MCL_ONFAULT: c_int = 0x8000; - -pub const SIGSTKSZ: size_t = 16384; -pub const MINSIGSTKSZ: size_t = 4096; -pub const CBAUD: crate::tcflag_t = 0x0000100f; -pub const TAB1: crate::tcflag_t = 0x800; -pub const TAB2: crate::tcflag_t = 0x1000; -pub const TAB3: crate::tcflag_t = 0x1800; -pub const CR1: crate::tcflag_t = 0x200; -pub const CR2: crate::tcflag_t = 0x400; -pub const CR3: crate::tcflag_t = 0x600; -pub const FF1: crate::tcflag_t = 0x8000; -pub const BS1: crate::tcflag_t = 0x2000; -pub const VT1: crate::tcflag_t = 0x4000; -pub const VWERASE: usize = 0xe; -pub const VREPRINT: usize = 0xc; -pub const VSUSP: usize = 0xa; -pub const VSTART: usize = 0x8; -pub const VSTOP: usize = 0x9; -pub const VDISCARD: usize = 0xd; -pub const VTIME: usize = 0x5; -pub const IXON: crate::tcflag_t = 0x400; -pub const IXOFF: crate::tcflag_t = 0x1000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x30; -pub const CS6: crate::tcflag_t = 0x10; -pub const CS7: crate::tcflag_t = 0x20; -pub const CS8: crate::tcflag_t = 0x30; -pub const CSTOPB: crate::tcflag_t = 0x40; -pub const CREAD: crate::tcflag_t = 0x80; -pub const PARENB: crate::tcflag_t = 0x100; -pub const PARODD: crate::tcflag_t = 0x200; -pub const HUPCL: crate::tcflag_t = 0x400; -pub const CLOCAL: crate::tcflag_t = 0x800; -pub const ECHOKE: crate::tcflag_t = 0x800; -pub const ECHOE: crate::tcflag_t = 0x10; -pub const ECHOK: crate::tcflag_t = 0x20; -pub const ECHONL: crate::tcflag_t = 0x40; -pub const ECHOPRT: crate::tcflag_t = 0x400; -pub const ECHOCTL: crate::tcflag_t = 0x200; -pub const ISIG: crate::tcflag_t = 0x1; -pub const ICANON: crate::tcflag_t = 0x2; -pub const PENDIN: crate::tcflag_t = 0x4000; -pub const NOFLSH: crate::tcflag_t = 0x80; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0x00001000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const B57600: crate::speed_t = 0x1001; -pub const B115200: crate::speed_t = 0x1002; -pub const B230400: crate::speed_t = 0x1003; -pub const B460800: crate::speed_t = 0x1004; -pub const B76800: crate::speed_t = 0x1005; -pub const B153600: crate::speed_t = 0x1006; -pub const B307200: crate::speed_t = 0x1007; -pub const B614400: crate::speed_t = 0x1008; -pub const B921600: crate::speed_t = 0x1009; -pub const B500000: crate::speed_t = 0x100a; -pub const B576000: crate::speed_t = 0x100b; -pub const B1000000: crate::speed_t = 0x100c; -pub const B1152000: crate::speed_t = 0x100d; -pub const B1500000: crate::speed_t = 0x100e; -pub const B2000000: crate::speed_t = 0x100f; - -pub const VEOL: usize = 5; -pub const VEOL2: usize = 6; -pub const VMIN: usize = 4; -pub const IEXTEN: crate::tcflag_t = 0x8000; -pub const TOSTOP: crate::tcflag_t = 0x100; -pub const FLUSHO: crate::tcflag_t = 0x1000; -pub const EXTPROC: crate::tcflag_t = 0x10000; - -pub const SYS_restart_syscall: c_long = 0; -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_wait4: c_long = 7; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execv: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_chown: c_long = 13; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_lchown: c_long = 16; -pub const SYS_brk: c_long = 17; -pub const SYS_perfctr: c_long = 18; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_capget: c_long = 21; -pub const SYS_capset: c_long = 22; -pub const SYS_setuid: c_long = 23; -pub const SYS_getuid: c_long = 24; -pub const SYS_vmsplice: c_long = 25; -pub const SYS_ptrace: c_long = 26; -pub const SYS_alarm: c_long = 27; -pub const SYS_sigaltstack: c_long = 28; -pub const SYS_pause: c_long = 29; -pub const SYS_utime: c_long = 30; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_stat: c_long = 38; -pub const SYS_sendfile: c_long = 39; -pub const SYS_lstat: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_umount2: c_long = 45; -pub const SYS_setgid: c_long = 46; -pub const SYS_getgid: c_long = 47; -pub const SYS_signal: c_long = 48; -pub const SYS_geteuid: c_long = 49; -pub const SYS_getegid: c_long = 50; -pub const SYS_acct: c_long = 51; -pub const SYS_memory_ordering: c_long = 52; -pub const SYS_ioctl: c_long = 54; -pub const SYS_reboot: c_long = 55; -pub const SYS_symlink: c_long = 57; -pub const SYS_readlink: c_long = 58; -pub const SYS_execve: c_long = 59; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_fstat: c_long = 62; -pub const SYS_fstat64: c_long = 63; -pub const SYS_getpagesize: c_long = 64; -pub const SYS_msync: c_long = 65; -pub const SYS_vfork: c_long = 66; -pub const SYS_pread64: c_long = 67; -pub const SYS_pwrite64: c_long = 68; -pub const SYS_mmap: c_long = 71; -pub const SYS_munmap: c_long = 73; -pub const SYS_mprotect: c_long = 74; -pub const SYS_madvise: c_long = 75; -pub const SYS_vhangup: c_long = 76; -pub const SYS_mincore: c_long = 78; -pub const SYS_getgroups: c_long = 79; -pub const SYS_setgroups: c_long = 80; -pub const SYS_getpgrp: c_long = 81; -pub const SYS_setitimer: c_long = 83; -pub const SYS_swapon: c_long = 85; -pub const SYS_getitimer: c_long = 86; -pub const SYS_sethostname: c_long = 88; -pub const SYS_dup2: c_long = 90; -pub const SYS_fcntl: c_long = 92; -pub const SYS_select: c_long = 93; -pub const SYS_fsync: c_long = 95; -pub const SYS_setpriority: c_long = 96; -pub const SYS_socket: c_long = 97; -pub const SYS_connect: c_long = 98; -pub const SYS_accept: c_long = 99; -pub const SYS_getpriority: c_long = 100; -pub const SYS_rt_sigreturn: c_long = 101; -pub const SYS_rt_sigaction: c_long = 102; -pub const SYS_rt_sigprocmask: c_long = 103; -pub const SYS_rt_sigpending: c_long = 104; -pub const SYS_rt_sigtimedwait: c_long = 105; -pub const SYS_rt_sigqueueinfo: c_long = 106; -pub const SYS_rt_sigsuspend: c_long = 107; -pub const SYS_setresuid: c_long = 108; -pub const SYS_getresuid: c_long = 109; -pub const SYS_setresgid: c_long = 110; -pub const SYS_getresgid: c_long = 111; -pub const SYS_recvmsg: c_long = 113; -pub const SYS_sendmsg: c_long = 114; -pub const SYS_gettimeofday: c_long = 116; -pub const SYS_getrusage: c_long = 117; -pub const SYS_getsockopt: c_long = 118; -pub const SYS_getcwd: c_long = 119; -pub const SYS_readv: c_long = 120; -pub const SYS_writev: c_long = 121; -pub const SYS_settimeofday: c_long = 122; -pub const SYS_fchown: c_long = 123; -pub const SYS_fchmod: c_long = 124; -pub const SYS_recvfrom: c_long = 125; -pub const SYS_setreuid: c_long = 126; -pub const SYS_setregid: c_long = 127; -pub const SYS_rename: c_long = 128; -pub const SYS_truncate: c_long = 129; -pub const SYS_ftruncate: c_long = 130; -pub const SYS_flock: c_long = 131; -pub const SYS_lstat64: c_long = 132; -pub const SYS_sendto: c_long = 133; -pub const SYS_shutdown: c_long = 134; -pub const SYS_socketpair: c_long = 135; -pub const SYS_mkdir: c_long = 136; -pub const SYS_rmdir: c_long = 137; -pub const SYS_utimes: c_long = 138; -pub const SYS_stat64: c_long = 139; -pub const SYS_sendfile64: c_long = 140; -pub const SYS_getpeername: c_long = 141; -pub const SYS_futex: c_long = 142; -pub const SYS_gettid: c_long = 143; -pub const SYS_getrlimit: c_long = 144; -pub const SYS_setrlimit: c_long = 145; -pub const SYS_pivot_root: c_long = 146; -pub const SYS_prctl: c_long = 147; -pub const SYS_pciconfig_read: c_long = 148; -pub const SYS_pciconfig_write: c_long = 149; -pub const SYS_getsockname: c_long = 150; -pub const SYS_inotify_init: c_long = 151; -pub const SYS_inotify_add_watch: c_long = 152; -pub const SYS_poll: c_long = 153; -pub const SYS_getdents64: c_long = 154; -pub const SYS_inotify_rm_watch: c_long = 156; -pub const SYS_statfs: c_long = 157; -pub const SYS_fstatfs: c_long = 158; -pub const SYS_umount: c_long = 159; -pub const SYS_sched_set_affinity: c_long = 160; -pub const SYS_sched_get_affinity: c_long = 161; -pub const SYS_getdomainname: c_long = 162; -pub const SYS_setdomainname: c_long = 163; -pub const SYS_utrap_install: c_long = 164; -pub const SYS_quotactl: c_long = 165; -pub const SYS_set_tid_address: c_long = 166; -pub const SYS_mount: c_long = 167; -pub const SYS_ustat: c_long = 168; -pub const SYS_setxattr: c_long = 169; -pub const SYS_lsetxattr: c_long = 170; -pub const SYS_fsetxattr: c_long = 171; -pub const SYS_getxattr: c_long = 172; -pub const SYS_lgetxattr: c_long = 173; -pub const SYS_getdents: c_long = 174; -pub const SYS_setsid: c_long = 175; -pub const SYS_fchdir: c_long = 176; -pub const SYS_fgetxattr: c_long = 177; -pub const SYS_listxattr: c_long = 178; -pub const SYS_llistxattr: c_long = 179; -pub const SYS_flistxattr: c_long = 180; -pub const SYS_removexattr: c_long = 181; -pub const SYS_lremovexattr: c_long = 182; -pub const SYS_sigpending: c_long = 183; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 184; -pub const SYS_setpgid: c_long = 185; -pub const SYS_fremovexattr: c_long = 186; -pub const SYS_tkill: c_long = 187; -pub const SYS_exit_group: c_long = 188; -pub const SYS_uname: c_long = 189; -pub const SYS_init_module: c_long = 190; -pub const SYS_personality: c_long = 191; -pub const SYS_remap_file_pages: c_long = 192; -pub const SYS_epoll_create: c_long = 193; -pub const SYS_epoll_ctl: c_long = 194; -pub const SYS_epoll_wait: c_long = 195; -pub const SYS_ioprio_set: c_long = 196; -pub const SYS_getppid: c_long = 197; -pub const SYS_sigaction: c_long = 198; -pub const SYS_sgetmask: c_long = 199; -pub const SYS_ssetmask: c_long = 200; -pub const SYS_sigsuspend: c_long = 201; -pub const SYS_oldlstat: c_long = 202; -pub const SYS_uselib: c_long = 203; -pub const SYS_readdir: c_long = 204; -pub const SYS_readahead: c_long = 205; -pub const SYS_socketcall: c_long = 206; -pub const SYS_syslog: c_long = 207; -pub const SYS_lookup_dcookie: c_long = 208; -pub const SYS_fadvise64: c_long = 209; -pub const SYS_fadvise64_64: c_long = 210; -pub const SYS_tgkill: c_long = 211; -pub const SYS_waitpid: c_long = 212; -pub const SYS_swapoff: c_long = 213; -pub const SYS_sysinfo: c_long = 214; -pub const SYS_ipc: c_long = 215; -pub const SYS_sigreturn: c_long = 216; -pub const SYS_clone: c_long = 217; -pub const SYS_ioprio_get: c_long = 218; -pub const SYS_adjtimex: c_long = 219; -pub const SYS_sigprocmask: c_long = 220; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 221; -pub const SYS_delete_module: c_long = 222; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 223; -pub const SYS_getpgid: c_long = 224; -pub const SYS_bdflush: c_long = 225; -pub const SYS_sysfs: c_long = 226; -pub const SYS_afs_syscall: c_long = 227; -pub const SYS_setfsuid: c_long = 228; -pub const SYS_setfsgid: c_long = 229; -pub const SYS__newselect: c_long = 230; -pub const SYS_splice: c_long = 232; -pub const SYS_stime: c_long = 233; -pub const SYS_statfs64: c_long = 234; -pub const SYS_fstatfs64: c_long = 235; -pub const SYS__llseek: c_long = 236; -pub const SYS_mlock: c_long = 237; -pub const SYS_munlock: c_long = 238; -pub const SYS_mlockall: c_long = 239; -pub const SYS_munlockall: c_long = 240; -pub const SYS_sched_setparam: c_long = 241; -pub const SYS_sched_getparam: c_long = 242; -pub const SYS_sched_setscheduler: c_long = 243; -pub const SYS_sched_getscheduler: c_long = 244; -pub const SYS_sched_yield: c_long = 245; -pub const SYS_sched_get_priority_max: c_long = 246; -pub const SYS_sched_get_priority_min: c_long = 247; -pub const SYS_sched_rr_get_interval: c_long = 248; -pub const SYS_nanosleep: c_long = 249; -pub const SYS_mremap: c_long = 250; -pub const SYS__sysctl: c_long = 251; -pub const SYS_getsid: c_long = 252; -pub const SYS_fdatasync: c_long = 253; -pub const SYS_nfsservctl: c_long = 254; -pub const SYS_sync_file_range: c_long = 255; -pub const SYS_clock_settime: c_long = 256; -pub const SYS_clock_gettime: c_long = 257; -pub const SYS_clock_getres: c_long = 258; -pub const SYS_clock_nanosleep: c_long = 259; -pub const SYS_sched_getaffinity: c_long = 260; -pub const SYS_sched_setaffinity: c_long = 261; -pub const SYS_timer_settime: c_long = 262; -pub const SYS_timer_gettime: c_long = 263; -pub const SYS_timer_getoverrun: c_long = 264; -pub const SYS_timer_delete: c_long = 265; -pub const SYS_timer_create: c_long = 266; -pub const SYS_io_setup: c_long = 268; -pub const SYS_io_destroy: c_long = 269; -pub const SYS_io_submit: c_long = 270; -pub const SYS_io_cancel: c_long = 271; -pub const SYS_io_getevents: c_long = 272; -pub const SYS_mq_open: c_long = 273; -pub const SYS_mq_unlink: c_long = 274; -pub const SYS_mq_timedsend: c_long = 275; -pub const SYS_mq_timedreceive: c_long = 276; -pub const SYS_mq_notify: c_long = 277; -pub const SYS_mq_getsetattr: c_long = 278; -pub const SYS_waitid: c_long = 279; -pub const SYS_tee: c_long = 280; -pub const SYS_add_key: c_long = 281; -pub const SYS_request_key: c_long = 282; -pub const SYS_keyctl: c_long = 283; -pub const SYS_openat: c_long = 284; -pub const SYS_mkdirat: c_long = 285; -pub const SYS_mknodat: c_long = 286; -pub const SYS_fchownat: c_long = 287; -pub const SYS_futimesat: c_long = 288; -pub const SYS_fstatat64: c_long = 289; -pub const SYS_unlinkat: c_long = 290; -pub const SYS_renameat: c_long = 291; -pub const SYS_linkat: c_long = 292; -pub const SYS_symlinkat: c_long = 293; -pub const SYS_readlinkat: c_long = 294; -pub const SYS_fchmodat: c_long = 295; -pub const SYS_faccessat: c_long = 296; -pub const SYS_pselect6: c_long = 297; -pub const SYS_ppoll: c_long = 298; -pub const SYS_unshare: c_long = 299; -pub const SYS_set_robust_list: c_long = 300; -pub const SYS_get_robust_list: c_long = 301; -pub const SYS_migrate_pages: c_long = 302; -pub const SYS_mbind: c_long = 303; -pub const SYS_get_mempolicy: c_long = 304; -pub const SYS_set_mempolicy: c_long = 305; -pub const SYS_kexec_load: c_long = 306; -pub const SYS_move_pages: c_long = 307; -pub const SYS_getcpu: c_long = 308; -pub const SYS_epoll_pwait: c_long = 309; -pub const SYS_utimensat: c_long = 310; -pub const SYS_signalfd: c_long = 311; -pub const SYS_timerfd_create: c_long = 312; -pub const SYS_eventfd: c_long = 313; -pub const SYS_fallocate: c_long = 314; -pub const SYS_timerfd_settime: c_long = 315; -pub const SYS_timerfd_gettime: c_long = 316; -pub const SYS_signalfd4: c_long = 317; -pub const SYS_eventfd2: c_long = 318; -pub const SYS_epoll_create1: c_long = 319; -pub const SYS_dup3: c_long = 320; -pub const SYS_pipe2: c_long = 321; -pub const SYS_inotify_init1: c_long = 322; -pub const SYS_accept4: c_long = 323; -pub const SYS_preadv: c_long = 324; -pub const SYS_pwritev: c_long = 325; -pub const SYS_rt_tgsigqueueinfo: c_long = 326; -pub const SYS_perf_event_open: c_long = 327; -pub const SYS_recvmmsg: c_long = 328; -pub const SYS_fanotify_init: c_long = 329; -pub const SYS_fanotify_mark: c_long = 330; -pub const SYS_prlimit64: c_long = 331; -pub const SYS_name_to_handle_at: c_long = 332; -pub const SYS_open_by_handle_at: c_long = 333; -pub const SYS_clock_adjtime: c_long = 334; -pub const SYS_syncfs: c_long = 335; -pub const SYS_sendmmsg: c_long = 336; -pub const SYS_setns: c_long = 337; -pub const SYS_process_vm_readv: c_long = 338; -pub const SYS_process_vm_writev: c_long = 339; -pub const SYS_kern_features: c_long = 340; -pub const SYS_kcmp: c_long = 341; -pub const SYS_finit_module: c_long = 342; -pub const SYS_sched_setattr: c_long = 343; -pub const SYS_sched_getattr: c_long = 344; -pub const SYS_renameat2: c_long = 345; -pub const SYS_seccomp: c_long = 346; -pub const SYS_getrandom: c_long = 347; -pub const SYS_memfd_create: c_long = 348; -pub const SYS_bpf: c_long = 349; -pub const SYS_execveat: c_long = 350; -pub const SYS_membarrier: c_long = 351; -pub const SYS_userfaultfd: c_long = 352; -pub const SYS_bind: c_long = 353; -pub const SYS_listen: c_long = 354; -pub const SYS_setsockopt: c_long = 355; -pub const SYS_mlock2: c_long = 356; -pub const SYS_copy_file_range: c_long = 357; -pub const SYS_preadv2: c_long = 358; -pub const SYS_pwritev2: c_long = 359; -pub const SYS_statx: c_long = 360; -pub const SYS_rseq: c_long = 365; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -// Reserved in the kernel, but not actually implemented yet -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; - -extern "C" { - pub fn sysctl( - name: *mut c_int, - namelen: c_int, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *mut c_void, - newlen: size_t, - ) -> c_int; -} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/mod.rs deleted file mode 100644 index f4555ee4202308..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/mod.rs +++ /dev/null @@ -1,809 +0,0 @@ -//! x86_64-specific definitions for 64-bit linux-like values - -use crate::prelude::*; -use crate::{off64_t, off_t}; - -pub type wchar_t = i32; -pub type nlink_t = u64; -pub type blksize_t = i64; -pub type greg_t = i64; -pub type suseconds_t = i64; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; - -s! { - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - #[cfg(target_arch = "sparc64")] - __reserved0: c_int, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - pub struct statfs { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - f_spare: [crate::__fsword_t; 5], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - #[doc(hidden)] - #[deprecated( - since = "0.2.54", - note = "Please leave a comment on \ - https://github.com/rust-lang/libc/pull/1316 if you're using \ - this field" - )] - pub _pad: [c_int; 29], - _align: [u64; 0], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - __pad0: c_int, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: i64, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: i64, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: i64, - __unused: [i64; 3], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - __pad0: c_int, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: i64, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: i64, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: i64, - __reserved: [i64; 3], - } - - pub struct statfs64 { - pub f_type: crate::__fsword_t, - pub f_bsize: crate::__fsword_t, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_fsid: crate::fsid_t, - pub f_namelen: crate::__fsword_t, - pub f_frsize: crate::__fsword_t, - pub f_flags: crate::__fsword_t, - pub f_spare: [crate::__fsword_t; 4], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct pthread_attr_t { - #[cfg(target_pointer_width = "32")] - __size: [u32; 8], - #[cfg(target_pointer_width = "64")] - __size: [u64; 7], - } - - pub struct _libc_fpxreg { - pub significand: [u16; 4], - pub exponent: u16, - __private: [u16; 3], - } - - pub struct _libc_xmmreg { - pub element: [u32; 4], - } - - pub struct _libc_fpstate { - pub cwd: u16, - pub swd: u16, - pub ftw: u16, - pub fop: u16, - pub rip: u64, - pub rdp: u64, - pub mxcsr: u32, - pub mxcr_mask: u32, - pub _st: [_libc_fpxreg; 8], - pub _xmm: [_libc_xmmreg; 16], - __private: [u64; 12], - } - - pub struct user_regs_struct { - pub r15: c_ulonglong, - pub r14: c_ulonglong, - pub r13: c_ulonglong, - pub r12: c_ulonglong, - pub rbp: c_ulonglong, - pub rbx: c_ulonglong, - pub r11: c_ulonglong, - pub r10: c_ulonglong, - pub r9: c_ulonglong, - pub r8: c_ulonglong, - pub rax: c_ulonglong, - pub rcx: c_ulonglong, - pub rdx: c_ulonglong, - pub rsi: c_ulonglong, - pub rdi: c_ulonglong, - pub orig_rax: c_ulonglong, - pub rip: c_ulonglong, - pub cs: c_ulonglong, - pub eflags: c_ulonglong, - pub rsp: c_ulonglong, - pub ss: c_ulonglong, - pub fs_base: c_ulonglong, - pub gs_base: c_ulonglong, - pub ds: c_ulonglong, - pub es: c_ulonglong, - pub fs: c_ulonglong, - pub gs: c_ulonglong, - } - - pub struct user { - pub regs: user_regs_struct, - pub u_fpvalid: c_int, - pub i387: user_fpregs_struct, - pub u_tsize: c_ulonglong, - pub u_dsize: c_ulonglong, - pub u_ssize: c_ulonglong, - pub start_code: c_ulonglong, - pub start_stack: c_ulonglong, - pub signal: c_longlong, - __reserved: c_int, - #[cfg(target_pointer_width = "32")] - __pad1: u32, - pub u_ar0: *mut user_regs_struct, - #[cfg(target_pointer_width = "32")] - __pad2: u32, - pub u_fpstate: *mut user_fpregs_struct, - pub magic: c_ulonglong, - pub u_comm: [c_char; 32], - pub u_debugreg: [c_ulonglong; 8], - } - - pub struct mcontext_t { - pub gregs: [greg_t; 23], - pub fpregs: *mut _libc_fpstate, - __private: [u64; 8], - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_ushort, - __pad1: c_ushort, - pub __seq: c_ushort, - __pad2: c_ushort, - __unused1: u64, - __unused2: u64, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused4: u64, - __unused5: u64, - } - - pub struct ptrace_rseq_configuration { - pub rseq_abi_pointer: crate::__u64, - pub rseq_abi_size: crate::__u32, - pub signature: crate::__u32, - pub flags: crate::__u32, - pub pad: crate::__u32, - } - - #[repr(align(8))] - pub struct clone_args { - pub flags: c_ulonglong, - pub pidfd: c_ulonglong, - pub child_tid: c_ulonglong, - pub parent_tid: c_ulonglong, - pub exit_signal: c_ulonglong, - pub stack: c_ulonglong, - pub stack_size: c_ulonglong, - pub tls: c_ulonglong, - pub set_tid: c_ulonglong, - pub set_tid_size: c_ulonglong, - pub cgroup: c_ulonglong, - } -} - -s_no_extra_traits! { - pub struct user_fpregs_struct { - pub cwd: c_ushort, - pub swd: c_ushort, - pub ftw: c_ushort, - pub fop: c_ushort, - pub rip: c_ulonglong, - pub rdp: c_ulonglong, - pub mxcsr: c_uint, - pub mxcr_mask: c_uint, - pub st_space: [c_uint; 32], - pub xmm_space: [c_uint; 64], - padding: [c_uint; 24], - } - - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_mcontext: mcontext_t, - pub uc_sigmask: crate::sigset_t, - __private: [u8; 512], - // FIXME(glibc): the shadow stack field requires glibc >= 2.28. - // Re-add once we drop compatibility with glibc versions older than - // 2.28. - // - // __ssp: [c_ulonglong; 4], - } - - #[repr(align(16))] - pub struct max_align_t { - priv_: [f64; 4], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for user_fpregs_struct { - fn eq(&self, other: &user_fpregs_struct) -> bool { - self.cwd == other.cwd - && self.swd == other.swd - && self.ftw == other.ftw - && self.fop == other.fop - && self.rip == other.rip - && self.rdp == other.rdp - && self.mxcsr == other.mxcsr - && self.mxcr_mask == other.mxcr_mask - && self.st_space == other.st_space - && self - .xmm_space - .iter() - .zip(other.xmm_space.iter()) - .all(|(a, b)| a == b) - // Ignore padding field - } - } - - impl Eq for user_fpregs_struct {} - - impl hash::Hash for user_fpregs_struct { - fn hash(&self, state: &mut H) { - self.cwd.hash(state); - self.ftw.hash(state); - self.fop.hash(state); - self.rip.hash(state); - self.rdp.hash(state); - self.mxcsr.hash(state); - self.mxcr_mask.hash(state); - self.st_space.hash(state); - self.xmm_space.hash(state); - // Ignore padding field - } - } - - impl PartialEq for ucontext_t { - fn eq(&self, other: &ucontext_t) -> bool { - self.uc_flags == other.uc_flags - && self.uc_link == other.uc_link - && self.uc_stack == other.uc_stack - && self.uc_mcontext == other.uc_mcontext - && self.uc_sigmask == other.uc_sigmask - // Ignore __private field - } - } - - impl Eq for ucontext_t {} - - impl hash::Hash for ucontext_t { - fn hash(&self, state: &mut H) { - self.uc_flags.hash(state); - self.uc_link.hash(state); - self.uc_stack.hash(state); - self.uc_mcontext.hash(state); - self.uc_sigmask.hash(state); - // Ignore __private field - } - } - } -} - -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; - -pub const VEOF: usize = 4; -pub const RTLD_DEEPBIND: c_int = 0x8; -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; - -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_FSYNC: c_int = 0x101000; -pub const O_NOATIME: c_int = 0o1000000; -pub const O_PATH: c_int = 0o10000000; -pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_GROWSDOWN: c_int = 0x0100; - -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const EHWPOISON: c_int = 133; -pub const ERFKILL: c_int = 132; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] -pub const SIGUNUSED: c_int = 31; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const O_ASYNC: c_int = 0x2000; -pub const O_NDELAY: c_int = 0x800; - -pub const PTRACE_DETACH: c_uint = 17; -pub const PTRACE_GET_RSEQ_CONFIGURATION: c_uint = 0x420f; - -pub const EFD_NONBLOCK: c_int = 0x800; - -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETOWN: c_int = 8; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_OFD_GETLK: c_int = 36; -pub const F_OFD_SETLK: c_int = 37; -pub const F_OFD_SETLKW: c_int = 38; - -pub const F_RDLCK: c_int = 0; -pub const F_WRLCK: c_int = 1; -pub const F_UNLCK: c_int = 2; - -pub const SFD_NONBLOCK: c_int = 0x0800; - -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -pub const SFD_CLOEXEC: c_int = 0x080000; - -pub const NCCS: usize = 32; - -pub const O_TRUNC: c_int = 512; - -pub const O_CLOEXEC: c_int = 0x80000; - -pub const EBFONT: c_int = 59; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENONET: c_int = 64; -pub const ENOPKG: c_int = 65; -pub const EREMOTE: c_int = 66; -pub const ENOLINK: c_int = 67; -pub const EADV: c_int = 68; -pub const ESRMNT: c_int = 69; -pub const ECOMM: c_int = 70; -pub const EPROTO: c_int = 71; -pub const EDOTDOT: c_int = 73; - -pub const SA_NODEFER: c_int = 0x40000000; -pub const SA_RESETHAND: c_int = 0x80000000; -pub const SA_RESTART: c_int = 0x10000000; -pub const SA_NOCLDSTOP: c_int = 0x00000001; - -pub const EPOLL_CLOEXEC: c_int = 0x80000; - -pub const EFD_CLOEXEC: c_int = 0x80000; - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; - -pub const O_DIRECT: c_int = 0x4000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_NOFOLLOW: c_int = 0x20000; - -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_32BIT: c_int = 0x0040; -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_ANONYMOUS: c_int = 0x0020; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_SYNC: c_int = 0x080000; - -pub const EDEADLOCK: c_int = 35; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; - -pub const PTRACE_GETFPREGS: c_uint = 14; -pub const PTRACE_SETFPREGS: c_uint = 15; -pub const PTRACE_GETFPXREGS: c_uint = 18; -pub const PTRACE_SETFPXREGS: c_uint = 19; -pub const PTRACE_GETREGS: c_uint = 12; -pub const PTRACE_SETREGS: c_uint = 13; -pub const PTRACE_PEEKSIGINFO_SHARED: c_uint = 1; -pub const PTRACE_SYSEMU: c_uint = 31; -pub const PTRACE_SYSEMU_SINGLESTEP: c_uint = 32; - -pub const PR_GET_SPECULATION_CTRL: c_int = 52; -pub const PR_SET_SPECULATION_CTRL: c_int = 53; -pub const PR_SPEC_NOT_AFFECTED: c_uint = 0; -pub const PR_SPEC_PRCTL: c_uint = 1 << 0; -pub const PR_SPEC_ENABLE: c_uint = 1 << 1; -pub const PR_SPEC_DISABLE: c_uint = 1 << 2; -pub const PR_SPEC_FORCE_DISABLE: c_uint = 1 << 3; -pub const PR_SPEC_DISABLE_NOEXEC: c_uint = 1 << 4; -pub const PR_SPEC_STORE_BYPASS: c_int = 0; -pub const PR_SPEC_INDIRECT_BRANCH: c_int = 1; -// FIXME(linux): perharps for later -//pub const PR_SPEC_L1D_FLUSH: c_int = 2; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; - -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: crate::tcflag_t = 0x00000800; -pub const TAB2: crate::tcflag_t = 0x00001000; -pub const TAB3: crate::tcflag_t = 0x00001800; -pub const CR1: crate::tcflag_t = 0x00000200; -pub const CR2: crate::tcflag_t = 0x00000400; -pub const CR3: crate::tcflag_t = 0x00000600; -pub const FF1: crate::tcflag_t = 0x00008000; -pub const BS1: crate::tcflag_t = 0x00002000; -pub const VT1: crate::tcflag_t = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; -pub const EXTPROC: crate::tcflag_t = 0x00010000; - -// offsets in user_regs_structs, from sys/reg.h -pub const R15: c_int = 0; -pub const R14: c_int = 1; -pub const R13: c_int = 2; -pub const R12: c_int = 3; -pub const RBP: c_int = 4; -pub const RBX: c_int = 5; -pub const R11: c_int = 6; -pub const R10: c_int = 7; -pub const R9: c_int = 8; -pub const R8: c_int = 9; -pub const RAX: c_int = 10; -pub const RCX: c_int = 11; -pub const RDX: c_int = 12; -pub const RSI: c_int = 13; -pub const RDI: c_int = 14; -pub const ORIG_RAX: c_int = 15; -pub const RIP: c_int = 16; -pub const CS: c_int = 17; -pub const EFLAGS: c_int = 18; -pub const RSP: c_int = 19; -pub const SS: c_int = 20; -pub const FS_BASE: c_int = 21; -pub const GS_BASE: c_int = 22; -pub const DS: c_int = 23; -pub const ES: c_int = 24; -pub const FS: c_int = 25; -pub const GS: c_int = 26; - -// offsets in mcontext_t.gregs from sys/ucontext.h -pub const REG_R8: c_int = 0; -pub const REG_R9: c_int = 1; -pub const REG_R10: c_int = 2; -pub const REG_R11: c_int = 3; -pub const REG_R12: c_int = 4; -pub const REG_R13: c_int = 5; -pub const REG_R14: c_int = 6; -pub const REG_R15: c_int = 7; -pub const REG_RDI: c_int = 8; -pub const REG_RSI: c_int = 9; -pub const REG_RBP: c_int = 10; -pub const REG_RBX: c_int = 11; -pub const REG_RDX: c_int = 12; -pub const REG_RAX: c_int = 13; -pub const REG_RCX: c_int = 14; -pub const REG_RSP: c_int = 15; -pub const REG_RIP: c_int = 16; -pub const REG_EFL: c_int = 17; -pub const REG_CSGSFS: c_int = 18; -pub const REG_ERR: c_int = 19; -pub const REG_TRAPNO: c_int = 20; -pub const REG_OLDMASK: c_int = 21; -pub const REG_CR2: c_int = 22; - -extern "C" { - pub fn getcontext(ucp: *mut ucontext_t) -> c_int; - pub fn setcontext(ucp: *const ucontext_t) -> c_int; - pub fn makecontext(ucp: *mut ucontext_t, func: extern "C" fn(), argc: c_int, ...); - pub fn swapcontext(uocp: *mut ucontext_t, ucp: *const ucontext_t) -> c_int; -} - -cfg_if! { - if #[cfg(target_pointer_width = "32")] { - mod x32; - pub use self::x32::*; - } else { - mod not_x32; - pub use self::not_x32::*; - } -} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/not_x32.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/not_x32.rs deleted file mode 100644 index 27b96a60aabd83..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/not_x32.rs +++ /dev/null @@ -1,446 +0,0 @@ -use crate::prelude::*; -use crate::pthread_mutex_t; - -s! { - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } -} - -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; - -#[cfg(target_endian = "little")] -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "little")] -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "little")] -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; -#[cfg(target_endian = "big")] -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], -}; - -// Syscall table - -pub const SYS_read: c_long = 0; -pub const SYS_write: c_long = 1; -pub const SYS_open: c_long = 2; -pub const SYS_close: c_long = 3; -pub const SYS_stat: c_long = 4; -pub const SYS_fstat: c_long = 5; -pub const SYS_lstat: c_long = 6; -pub const SYS_poll: c_long = 7; -pub const SYS_lseek: c_long = 8; -pub const SYS_mmap: c_long = 9; -pub const SYS_mprotect: c_long = 10; -pub const SYS_munmap: c_long = 11; -pub const SYS_brk: c_long = 12; -pub const SYS_rt_sigaction: c_long = 13; -pub const SYS_rt_sigprocmask: c_long = 14; -pub const SYS_rt_sigreturn: c_long = 15; -pub const SYS_ioctl: c_long = 16; -pub const SYS_pread64: c_long = 17; -pub const SYS_pwrite64: c_long = 18; -pub const SYS_readv: c_long = 19; -pub const SYS_writev: c_long = 20; -pub const SYS_access: c_long = 21; -pub const SYS_pipe: c_long = 22; -pub const SYS_select: c_long = 23; -pub const SYS_sched_yield: c_long = 24; -pub const SYS_mremap: c_long = 25; -pub const SYS_msync: c_long = 26; -pub const SYS_mincore: c_long = 27; -pub const SYS_madvise: c_long = 28; -pub const SYS_shmget: c_long = 29; -pub const SYS_shmat: c_long = 30; -pub const SYS_shmctl: c_long = 31; -pub const SYS_dup: c_long = 32; -pub const SYS_dup2: c_long = 33; -pub const SYS_pause: c_long = 34; -pub const SYS_nanosleep: c_long = 35; -pub const SYS_getitimer: c_long = 36; -pub const SYS_alarm: c_long = 37; -pub const SYS_setitimer: c_long = 38; -pub const SYS_getpid: c_long = 39; -pub const SYS_sendfile: c_long = 40; -pub const SYS_socket: c_long = 41; -pub const SYS_connect: c_long = 42; -pub const SYS_accept: c_long = 43; -pub const SYS_sendto: c_long = 44; -pub const SYS_recvfrom: c_long = 45; -pub const SYS_sendmsg: c_long = 46; -pub const SYS_recvmsg: c_long = 47; -pub const SYS_shutdown: c_long = 48; -pub const SYS_bind: c_long = 49; -pub const SYS_listen: c_long = 50; -pub const SYS_getsockname: c_long = 51; -pub const SYS_getpeername: c_long = 52; -pub const SYS_socketpair: c_long = 53; -pub const SYS_setsockopt: c_long = 54; -pub const SYS_getsockopt: c_long = 55; -pub const SYS_clone: c_long = 56; -pub const SYS_fork: c_long = 57; -pub const SYS_vfork: c_long = 58; -pub const SYS_execve: c_long = 59; -pub const SYS_exit: c_long = 60; -pub const SYS_wait4: c_long = 61; -pub const SYS_kill: c_long = 62; -pub const SYS_uname: c_long = 63; -pub const SYS_semget: c_long = 64; -pub const SYS_semop: c_long = 65; -pub const SYS_semctl: c_long = 66; -pub const SYS_shmdt: c_long = 67; -pub const SYS_msgget: c_long = 68; -pub const SYS_msgsnd: c_long = 69; -pub const SYS_msgrcv: c_long = 70; -pub const SYS_msgctl: c_long = 71; -pub const SYS_fcntl: c_long = 72; -pub const SYS_flock: c_long = 73; -pub const SYS_fsync: c_long = 74; -pub const SYS_fdatasync: c_long = 75; -pub const SYS_truncate: c_long = 76; -pub const SYS_ftruncate: c_long = 77; -pub const SYS_getdents: c_long = 78; -pub const SYS_getcwd: c_long = 79; -pub const SYS_chdir: c_long = 80; -pub const SYS_fchdir: c_long = 81; -pub const SYS_rename: c_long = 82; -pub const SYS_mkdir: c_long = 83; -pub const SYS_rmdir: c_long = 84; -pub const SYS_creat: c_long = 85; -pub const SYS_link: c_long = 86; -pub const SYS_unlink: c_long = 87; -pub const SYS_symlink: c_long = 88; -pub const SYS_readlink: c_long = 89; -pub const SYS_chmod: c_long = 90; -pub const SYS_fchmod: c_long = 91; -pub const SYS_chown: c_long = 92; -pub const SYS_fchown: c_long = 93; -pub const SYS_lchown: c_long = 94; -pub const SYS_umask: c_long = 95; -pub const SYS_gettimeofday: c_long = 96; -pub const SYS_getrlimit: c_long = 97; -pub const SYS_getrusage: c_long = 98; -pub const SYS_sysinfo: c_long = 99; -pub const SYS_times: c_long = 100; -pub const SYS_ptrace: c_long = 101; -pub const SYS_getuid: c_long = 102; -pub const SYS_syslog: c_long = 103; -pub const SYS_getgid: c_long = 104; -pub const SYS_setuid: c_long = 105; -pub const SYS_setgid: c_long = 106; -pub const SYS_geteuid: c_long = 107; -pub const SYS_getegid: c_long = 108; -pub const SYS_setpgid: c_long = 109; -pub const SYS_getppid: c_long = 110; -pub const SYS_getpgrp: c_long = 111; -pub const SYS_setsid: c_long = 112; -pub const SYS_setreuid: c_long = 113; -pub const SYS_setregid: c_long = 114; -pub const SYS_getgroups: c_long = 115; -pub const SYS_setgroups: c_long = 116; -pub const SYS_setresuid: c_long = 117; -pub const SYS_getresuid: c_long = 118; -pub const SYS_setresgid: c_long = 119; -pub const SYS_getresgid: c_long = 120; -pub const SYS_getpgid: c_long = 121; -pub const SYS_setfsuid: c_long = 122; -pub const SYS_setfsgid: c_long = 123; -pub const SYS_getsid: c_long = 124; -pub const SYS_capget: c_long = 125; -pub const SYS_capset: c_long = 126; -pub const SYS_rt_sigpending: c_long = 127; -pub const SYS_rt_sigtimedwait: c_long = 128; -pub const SYS_rt_sigqueueinfo: c_long = 129; -pub const SYS_rt_sigsuspend: c_long = 130; -pub const SYS_sigaltstack: c_long = 131; -pub const SYS_utime: c_long = 132; -pub const SYS_mknod: c_long = 133; -pub const SYS_uselib: c_long = 134; -pub const SYS_personality: c_long = 135; -pub const SYS_ustat: c_long = 136; -pub const SYS_statfs: c_long = 137; -pub const SYS_fstatfs: c_long = 138; -pub const SYS_sysfs: c_long = 139; -pub const SYS_getpriority: c_long = 140; -pub const SYS_setpriority: c_long = 141; -pub const SYS_sched_setparam: c_long = 142; -pub const SYS_sched_getparam: c_long = 143; -pub const SYS_sched_setscheduler: c_long = 144; -pub const SYS_sched_getscheduler: c_long = 145; -pub const SYS_sched_get_priority_max: c_long = 146; -pub const SYS_sched_get_priority_min: c_long = 147; -pub const SYS_sched_rr_get_interval: c_long = 148; -pub const SYS_mlock: c_long = 149; -pub const SYS_munlock: c_long = 150; -pub const SYS_mlockall: c_long = 151; -pub const SYS_munlockall: c_long = 152; -pub const SYS_vhangup: c_long = 153; -pub const SYS_modify_ldt: c_long = 154; -pub const SYS_pivot_root: c_long = 155; -pub const SYS__sysctl: c_long = 156; -pub const SYS_prctl: c_long = 157; -pub const SYS_arch_prctl: c_long = 158; -pub const SYS_adjtimex: c_long = 159; -pub const SYS_setrlimit: c_long = 160; -pub const SYS_chroot: c_long = 161; -pub const SYS_sync: c_long = 162; -pub const SYS_acct: c_long = 163; -pub const SYS_settimeofday: c_long = 164; -pub const SYS_mount: c_long = 165; -pub const SYS_umount2: c_long = 166; -pub const SYS_swapon: c_long = 167; -pub const SYS_swapoff: c_long = 168; -pub const SYS_reboot: c_long = 169; -pub const SYS_sethostname: c_long = 170; -pub const SYS_setdomainname: c_long = 171; -pub const SYS_iopl: c_long = 172; -pub const SYS_ioperm: c_long = 173; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 174; -pub const SYS_init_module: c_long = 175; -pub const SYS_delete_module: c_long = 176; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 177; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 178; -pub const SYS_quotactl: c_long = 179; -pub const SYS_nfsservctl: c_long = 180; -pub const SYS_getpmsg: c_long = 181; -pub const SYS_putpmsg: c_long = 182; -pub const SYS_afs_syscall: c_long = 183; -pub const SYS_tuxcall: c_long = 184; -pub const SYS_security: c_long = 185; -pub const SYS_gettid: c_long = 186; -pub const SYS_readahead: c_long = 187; -pub const SYS_setxattr: c_long = 188; -pub const SYS_lsetxattr: c_long = 189; -pub const SYS_fsetxattr: c_long = 190; -pub const SYS_getxattr: c_long = 191; -pub const SYS_lgetxattr: c_long = 192; -pub const SYS_fgetxattr: c_long = 193; -pub const SYS_listxattr: c_long = 194; -pub const SYS_llistxattr: c_long = 195; -pub const SYS_flistxattr: c_long = 196; -pub const SYS_removexattr: c_long = 197; -pub const SYS_lremovexattr: c_long = 198; -pub const SYS_fremovexattr: c_long = 199; -pub const SYS_tkill: c_long = 200; -pub const SYS_time: c_long = 201; -pub const SYS_futex: c_long = 202; -pub const SYS_sched_setaffinity: c_long = 203; -pub const SYS_sched_getaffinity: c_long = 204; -pub const SYS_set_thread_area: c_long = 205; -pub const SYS_io_setup: c_long = 206; -pub const SYS_io_destroy: c_long = 207; -pub const SYS_io_getevents: c_long = 208; -pub const SYS_io_submit: c_long = 209; -pub const SYS_io_cancel: c_long = 210; -pub const SYS_get_thread_area: c_long = 211; -pub const SYS_lookup_dcookie: c_long = 212; -pub const SYS_epoll_create: c_long = 213; -pub const SYS_epoll_ctl_old: c_long = 214; -pub const SYS_epoll_wait_old: c_long = 215; -pub const SYS_remap_file_pages: c_long = 216; -pub const SYS_getdents64: c_long = 217; -pub const SYS_set_tid_address: c_long = 218; -pub const SYS_restart_syscall: c_long = 219; -pub const SYS_semtimedop: c_long = 220; -pub const SYS_fadvise64: c_long = 221; -pub const SYS_timer_create: c_long = 222; -pub const SYS_timer_settime: c_long = 223; -pub const SYS_timer_gettime: c_long = 224; -pub const SYS_timer_getoverrun: c_long = 225; -pub const SYS_timer_delete: c_long = 226; -pub const SYS_clock_settime: c_long = 227; -pub const SYS_clock_gettime: c_long = 228; -pub const SYS_clock_getres: c_long = 229; -pub const SYS_clock_nanosleep: c_long = 230; -pub const SYS_exit_group: c_long = 231; -pub const SYS_epoll_wait: c_long = 232; -pub const SYS_epoll_ctl: c_long = 233; -pub const SYS_tgkill: c_long = 234; -pub const SYS_utimes: c_long = 235; -pub const SYS_vserver: c_long = 236; -pub const SYS_mbind: c_long = 237; -pub const SYS_set_mempolicy: c_long = 238; -pub const SYS_get_mempolicy: c_long = 239; -pub const SYS_mq_open: c_long = 240; -pub const SYS_mq_unlink: c_long = 241; -pub const SYS_mq_timedsend: c_long = 242; -pub const SYS_mq_timedreceive: c_long = 243; -pub const SYS_mq_notify: c_long = 244; -pub const SYS_mq_getsetattr: c_long = 245; -pub const SYS_kexec_load: c_long = 246; -pub const SYS_waitid: c_long = 247; -pub const SYS_add_key: c_long = 248; -pub const SYS_request_key: c_long = 249; -pub const SYS_keyctl: c_long = 250; -pub const SYS_ioprio_set: c_long = 251; -pub const SYS_ioprio_get: c_long = 252; -pub const SYS_inotify_init: c_long = 253; -pub const SYS_inotify_add_watch: c_long = 254; -pub const SYS_inotify_rm_watch: c_long = 255; -pub const SYS_migrate_pages: c_long = 256; -pub const SYS_openat: c_long = 257; -pub const SYS_mkdirat: c_long = 258; -pub const SYS_mknodat: c_long = 259; -pub const SYS_fchownat: c_long = 260; -pub const SYS_futimesat: c_long = 261; -pub const SYS_newfstatat: c_long = 262; -pub const SYS_unlinkat: c_long = 263; -pub const SYS_renameat: c_long = 264; -pub const SYS_linkat: c_long = 265; -pub const SYS_symlinkat: c_long = 266; -pub const SYS_readlinkat: c_long = 267; -pub const SYS_fchmodat: c_long = 268; -pub const SYS_faccessat: c_long = 269; -pub const SYS_pselect6: c_long = 270; -pub const SYS_ppoll: c_long = 271; -pub const SYS_unshare: c_long = 272; -pub const SYS_set_robust_list: c_long = 273; -pub const SYS_get_robust_list: c_long = 274; -pub const SYS_splice: c_long = 275; -pub const SYS_tee: c_long = 276; -pub const SYS_sync_file_range: c_long = 277; -pub const SYS_vmsplice: c_long = 278; -pub const SYS_move_pages: c_long = 279; -pub const SYS_utimensat: c_long = 280; -pub const SYS_epoll_pwait: c_long = 281; -pub const SYS_signalfd: c_long = 282; -pub const SYS_timerfd_create: c_long = 283; -pub const SYS_eventfd: c_long = 284; -pub const SYS_fallocate: c_long = 285; -pub const SYS_timerfd_settime: c_long = 286; -pub const SYS_timerfd_gettime: c_long = 287; -pub const SYS_accept4: c_long = 288; -pub const SYS_signalfd4: c_long = 289; -pub const SYS_eventfd2: c_long = 290; -pub const SYS_epoll_create1: c_long = 291; -pub const SYS_dup3: c_long = 292; -pub const SYS_pipe2: c_long = 293; -pub const SYS_inotify_init1: c_long = 294; -pub const SYS_preadv: c_long = 295; -pub const SYS_pwritev: c_long = 296; -pub const SYS_rt_tgsigqueueinfo: c_long = 297; -pub const SYS_perf_event_open: c_long = 298; -pub const SYS_recvmmsg: c_long = 299; -pub const SYS_fanotify_init: c_long = 300; -pub const SYS_fanotify_mark: c_long = 301; -pub const SYS_prlimit64: c_long = 302; -pub const SYS_name_to_handle_at: c_long = 303; -pub const SYS_open_by_handle_at: c_long = 304; -pub const SYS_clock_adjtime: c_long = 305; -pub const SYS_syncfs: c_long = 306; -pub const SYS_sendmmsg: c_long = 307; -pub const SYS_setns: c_long = 308; -pub const SYS_getcpu: c_long = 309; -pub const SYS_process_vm_readv: c_long = 310; -pub const SYS_process_vm_writev: c_long = 311; -pub const SYS_kcmp: c_long = 312; -pub const SYS_finit_module: c_long = 313; -pub const SYS_sched_setattr: c_long = 314; -pub const SYS_sched_getattr: c_long = 315; -pub const SYS_renameat2: c_long = 316; -pub const SYS_seccomp: c_long = 317; -pub const SYS_getrandom: c_long = 318; -pub const SYS_memfd_create: c_long = 319; -pub const SYS_kexec_file_load: c_long = 320; -pub const SYS_bpf: c_long = 321; -pub const SYS_execveat: c_long = 322; -pub const SYS_userfaultfd: c_long = 323; -pub const SYS_membarrier: c_long = 324; -pub const SYS_mlock2: c_long = 325; -pub const SYS_copy_file_range: c_long = 326; -pub const SYS_preadv2: c_long = 327; -pub const SYS_pwritev2: c_long = 328; -pub const SYS_pkey_mprotect: c_long = 329; -pub const SYS_pkey_alloc: c_long = 330; -pub const SYS_pkey_free: c_long = 331; -pub const SYS_statx: c_long = 332; -pub const SYS_rseq: c_long = 334; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; -pub const SYS_fchmodat2: c_long = 452; -pub const SYS_mseal: c_long = 462; - -extern "C" { - pub fn sysctl( - name: *mut c_int, - namelen: c_int, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *mut c_void, - newlen: size_t, - ) -> c_int; -} diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/x32.rs b/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/x32.rs deleted file mode 100644 index 1a1cd34be035f7..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/b64/x86_64/x32.rs +++ /dev/null @@ -1,398 +0,0 @@ -use crate::prelude::*; -use crate::pthread_mutex_t; - -s! { - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } -} - -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 32; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 44; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 20; - -pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, - ], -}; -pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, - ], -}; -pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: crate::pthread_mutex_t = pthread_mutex_t { - size: [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, - ], -}; - -// Syscall table - -pub const __X32_SYSCALL_BIT: c_long = 0x40000000; - -pub const SYS_read: c_long = __X32_SYSCALL_BIT + 0; -pub const SYS_write: c_long = __X32_SYSCALL_BIT + 1; -pub const SYS_open: c_long = __X32_SYSCALL_BIT + 2; -pub const SYS_close: c_long = __X32_SYSCALL_BIT + 3; -pub const SYS_stat: c_long = __X32_SYSCALL_BIT + 4; -pub const SYS_fstat: c_long = __X32_SYSCALL_BIT + 5; -pub const SYS_lstat: c_long = __X32_SYSCALL_BIT + 6; -pub const SYS_poll: c_long = __X32_SYSCALL_BIT + 7; -pub const SYS_lseek: c_long = __X32_SYSCALL_BIT + 8; -pub const SYS_mmap: c_long = __X32_SYSCALL_BIT + 9; -pub const SYS_mprotect: c_long = __X32_SYSCALL_BIT + 10; -pub const SYS_munmap: c_long = __X32_SYSCALL_BIT + 11; -pub const SYS_brk: c_long = __X32_SYSCALL_BIT + 12; -pub const SYS_rt_sigprocmask: c_long = __X32_SYSCALL_BIT + 14; -pub const SYS_pread64: c_long = __X32_SYSCALL_BIT + 17; -pub const SYS_pwrite64: c_long = __X32_SYSCALL_BIT + 18; -pub const SYS_access: c_long = __X32_SYSCALL_BIT + 21; -pub const SYS_pipe: c_long = __X32_SYSCALL_BIT + 22; -pub const SYS_select: c_long = __X32_SYSCALL_BIT + 23; -pub const SYS_sched_yield: c_long = __X32_SYSCALL_BIT + 24; -pub const SYS_mremap: c_long = __X32_SYSCALL_BIT + 25; -pub const SYS_msync: c_long = __X32_SYSCALL_BIT + 26; -pub const SYS_mincore: c_long = __X32_SYSCALL_BIT + 27; -pub const SYS_madvise: c_long = __X32_SYSCALL_BIT + 28; -pub const SYS_shmget: c_long = __X32_SYSCALL_BIT + 29; -pub const SYS_shmat: c_long = __X32_SYSCALL_BIT + 30; -pub const SYS_shmctl: c_long = __X32_SYSCALL_BIT + 31; -pub const SYS_dup: c_long = __X32_SYSCALL_BIT + 32; -pub const SYS_dup2: c_long = __X32_SYSCALL_BIT + 33; -pub const SYS_pause: c_long = __X32_SYSCALL_BIT + 34; -pub const SYS_nanosleep: c_long = __X32_SYSCALL_BIT + 35; -pub const SYS_getitimer: c_long = __X32_SYSCALL_BIT + 36; -pub const SYS_alarm: c_long = __X32_SYSCALL_BIT + 37; -pub const SYS_setitimer: c_long = __X32_SYSCALL_BIT + 38; -pub const SYS_getpid: c_long = __X32_SYSCALL_BIT + 39; -pub const SYS_sendfile: c_long = __X32_SYSCALL_BIT + 40; -pub const SYS_socket: c_long = __X32_SYSCALL_BIT + 41; -pub const SYS_connect: c_long = __X32_SYSCALL_BIT + 42; -pub const SYS_accept: c_long = __X32_SYSCALL_BIT + 43; -pub const SYS_sendto: c_long = __X32_SYSCALL_BIT + 44; -pub const SYS_shutdown: c_long = __X32_SYSCALL_BIT + 48; -pub const SYS_bind: c_long = __X32_SYSCALL_BIT + 49; -pub const SYS_listen: c_long = __X32_SYSCALL_BIT + 50; -pub const SYS_getsockname: c_long = __X32_SYSCALL_BIT + 51; -pub const SYS_getpeername: c_long = __X32_SYSCALL_BIT + 52; -pub const SYS_socketpair: c_long = __X32_SYSCALL_BIT + 53; -pub const SYS_clone: c_long = __X32_SYSCALL_BIT + 56; -pub const SYS_fork: c_long = __X32_SYSCALL_BIT + 57; -pub const SYS_vfork: c_long = __X32_SYSCALL_BIT + 58; -pub const SYS_exit: c_long = __X32_SYSCALL_BIT + 60; -pub const SYS_wait4: c_long = __X32_SYSCALL_BIT + 61; -pub const SYS_kill: c_long = __X32_SYSCALL_BIT + 62; -pub const SYS_uname: c_long = __X32_SYSCALL_BIT + 63; -pub const SYS_semget: c_long = __X32_SYSCALL_BIT + 64; -pub const SYS_semop: c_long = __X32_SYSCALL_BIT + 65; -pub const SYS_semctl: c_long = __X32_SYSCALL_BIT + 66; -pub const SYS_shmdt: c_long = __X32_SYSCALL_BIT + 67; -pub const SYS_msgget: c_long = __X32_SYSCALL_BIT + 68; -pub const SYS_msgsnd: c_long = __X32_SYSCALL_BIT + 69; -pub const SYS_msgrcv: c_long = __X32_SYSCALL_BIT + 70; -pub const SYS_msgctl: c_long = __X32_SYSCALL_BIT + 71; -pub const SYS_fcntl: c_long = __X32_SYSCALL_BIT + 72; -pub const SYS_flock: c_long = __X32_SYSCALL_BIT + 73; -pub const SYS_fsync: c_long = __X32_SYSCALL_BIT + 74; -pub const SYS_fdatasync: c_long = __X32_SYSCALL_BIT + 75; -pub const SYS_truncate: c_long = __X32_SYSCALL_BIT + 76; -pub const SYS_ftruncate: c_long = __X32_SYSCALL_BIT + 77; -pub const SYS_getdents: c_long = __X32_SYSCALL_BIT + 78; -pub const SYS_getcwd: c_long = __X32_SYSCALL_BIT + 79; -pub const SYS_chdir: c_long = __X32_SYSCALL_BIT + 80; -pub const SYS_fchdir: c_long = __X32_SYSCALL_BIT + 81; -pub const SYS_rename: c_long = __X32_SYSCALL_BIT + 82; -pub const SYS_mkdir: c_long = __X32_SYSCALL_BIT + 83; -pub const SYS_rmdir: c_long = __X32_SYSCALL_BIT + 84; -pub const SYS_creat: c_long = __X32_SYSCALL_BIT + 85; -pub const SYS_link: c_long = __X32_SYSCALL_BIT + 86; -pub const SYS_unlink: c_long = __X32_SYSCALL_BIT + 87; -pub const SYS_symlink: c_long = __X32_SYSCALL_BIT + 88; -pub const SYS_readlink: c_long = __X32_SYSCALL_BIT + 89; -pub const SYS_chmod: c_long = __X32_SYSCALL_BIT + 90; -pub const SYS_fchmod: c_long = __X32_SYSCALL_BIT + 91; -pub const SYS_chown: c_long = __X32_SYSCALL_BIT + 92; -pub const SYS_fchown: c_long = __X32_SYSCALL_BIT + 93; -pub const SYS_lchown: c_long = __X32_SYSCALL_BIT + 94; -pub const SYS_umask: c_long = __X32_SYSCALL_BIT + 95; -pub const SYS_gettimeofday: c_long = __X32_SYSCALL_BIT + 96; -pub const SYS_getrlimit: c_long = __X32_SYSCALL_BIT + 97; -pub const SYS_getrusage: c_long = __X32_SYSCALL_BIT + 98; -pub const SYS_sysinfo: c_long = __X32_SYSCALL_BIT + 99; -pub const SYS_times: c_long = __X32_SYSCALL_BIT + 100; -pub const SYS_getuid: c_long = __X32_SYSCALL_BIT + 102; -pub const SYS_syslog: c_long = __X32_SYSCALL_BIT + 103; -pub const SYS_getgid: c_long = __X32_SYSCALL_BIT + 104; -pub const SYS_setuid: c_long = __X32_SYSCALL_BIT + 105; -pub const SYS_setgid: c_long = __X32_SYSCALL_BIT + 106; -pub const SYS_geteuid: c_long = __X32_SYSCALL_BIT + 107; -pub const SYS_getegid: c_long = __X32_SYSCALL_BIT + 108; -pub const SYS_setpgid: c_long = __X32_SYSCALL_BIT + 109; -pub const SYS_getppid: c_long = __X32_SYSCALL_BIT + 110; -pub const SYS_getpgrp: c_long = __X32_SYSCALL_BIT + 111; -pub const SYS_setsid: c_long = __X32_SYSCALL_BIT + 112; -pub const SYS_setreuid: c_long = __X32_SYSCALL_BIT + 113; -pub const SYS_setregid: c_long = __X32_SYSCALL_BIT + 114; -pub const SYS_getgroups: c_long = __X32_SYSCALL_BIT + 115; -pub const SYS_setgroups: c_long = __X32_SYSCALL_BIT + 116; -pub const SYS_setresuid: c_long = __X32_SYSCALL_BIT + 117; -pub const SYS_getresuid: c_long = __X32_SYSCALL_BIT + 118; -pub const SYS_setresgid: c_long = __X32_SYSCALL_BIT + 119; -pub const SYS_getresgid: c_long = __X32_SYSCALL_BIT + 120; -pub const SYS_getpgid: c_long = __X32_SYSCALL_BIT + 121; -pub const SYS_setfsuid: c_long = __X32_SYSCALL_BIT + 122; -pub const SYS_setfsgid: c_long = __X32_SYSCALL_BIT + 123; -pub const SYS_getsid: c_long = __X32_SYSCALL_BIT + 124; -pub const SYS_capget: c_long = __X32_SYSCALL_BIT + 125; -pub const SYS_capset: c_long = __X32_SYSCALL_BIT + 126; -pub const SYS_rt_sigsuspend: c_long = __X32_SYSCALL_BIT + 130; -pub const SYS_utime: c_long = __X32_SYSCALL_BIT + 132; -pub const SYS_mknod: c_long = __X32_SYSCALL_BIT + 133; -pub const SYS_personality: c_long = __X32_SYSCALL_BIT + 135; -pub const SYS_ustat: c_long = __X32_SYSCALL_BIT + 136; -pub const SYS_statfs: c_long = __X32_SYSCALL_BIT + 137; -pub const SYS_fstatfs: c_long = __X32_SYSCALL_BIT + 138; -pub const SYS_sysfs: c_long = __X32_SYSCALL_BIT + 139; -pub const SYS_getpriority: c_long = __X32_SYSCALL_BIT + 140; -pub const SYS_setpriority: c_long = __X32_SYSCALL_BIT + 141; -pub const SYS_sched_setparam: c_long = __X32_SYSCALL_BIT + 142; -pub const SYS_sched_getparam: c_long = __X32_SYSCALL_BIT + 143; -pub const SYS_sched_setscheduler: c_long = __X32_SYSCALL_BIT + 144; -pub const SYS_sched_getscheduler: c_long = __X32_SYSCALL_BIT + 145; -pub const SYS_sched_get_priority_max: c_long = __X32_SYSCALL_BIT + 146; -pub const SYS_sched_get_priority_min: c_long = __X32_SYSCALL_BIT + 147; -pub const SYS_sched_rr_get_interval: c_long = __X32_SYSCALL_BIT + 148; -pub const SYS_mlock: c_long = __X32_SYSCALL_BIT + 149; -pub const SYS_munlock: c_long = __X32_SYSCALL_BIT + 150; -pub const SYS_mlockall: c_long = __X32_SYSCALL_BIT + 151; -pub const SYS_munlockall: c_long = __X32_SYSCALL_BIT + 152; -pub const SYS_vhangup: c_long = __X32_SYSCALL_BIT + 153; -pub const SYS_modify_ldt: c_long = __X32_SYSCALL_BIT + 154; -pub const SYS_pivot_root: c_long = __X32_SYSCALL_BIT + 155; -pub const SYS_prctl: c_long = __X32_SYSCALL_BIT + 157; -pub const SYS_arch_prctl: c_long = __X32_SYSCALL_BIT + 158; -pub const SYS_adjtimex: c_long = __X32_SYSCALL_BIT + 159; -pub const SYS_setrlimit: c_long = __X32_SYSCALL_BIT + 160; -pub const SYS_chroot: c_long = __X32_SYSCALL_BIT + 161; -pub const SYS_sync: c_long = __X32_SYSCALL_BIT + 162; -pub const SYS_acct: c_long = __X32_SYSCALL_BIT + 163; -pub const SYS_settimeofday: c_long = __X32_SYSCALL_BIT + 164; -pub const SYS_mount: c_long = __X32_SYSCALL_BIT + 165; -pub const SYS_umount2: c_long = __X32_SYSCALL_BIT + 166; -pub const SYS_swapon: c_long = __X32_SYSCALL_BIT + 167; -pub const SYS_swapoff: c_long = __X32_SYSCALL_BIT + 168; -pub const SYS_reboot: c_long = __X32_SYSCALL_BIT + 169; -pub const SYS_sethostname: c_long = __X32_SYSCALL_BIT + 170; -pub const SYS_setdomainname: c_long = __X32_SYSCALL_BIT + 171; -pub const SYS_iopl: c_long = __X32_SYSCALL_BIT + 172; -pub const SYS_ioperm: c_long = __X32_SYSCALL_BIT + 173; -pub const SYS_init_module: c_long = __X32_SYSCALL_BIT + 175; -pub const SYS_delete_module: c_long = __X32_SYSCALL_BIT + 176; -pub const SYS_quotactl: c_long = __X32_SYSCALL_BIT + 179; -pub const SYS_getpmsg: c_long = __X32_SYSCALL_BIT + 181; -pub const SYS_putpmsg: c_long = __X32_SYSCALL_BIT + 182; -pub const SYS_afs_syscall: c_long = __X32_SYSCALL_BIT + 183; -pub const SYS_tuxcall: c_long = __X32_SYSCALL_BIT + 184; -pub const SYS_security: c_long = __X32_SYSCALL_BIT + 185; -pub const SYS_gettid: c_long = __X32_SYSCALL_BIT + 186; -pub const SYS_readahead: c_long = __X32_SYSCALL_BIT + 187; -pub const SYS_setxattr: c_long = __X32_SYSCALL_BIT + 188; -pub const SYS_lsetxattr: c_long = __X32_SYSCALL_BIT + 189; -pub const SYS_fsetxattr: c_long = __X32_SYSCALL_BIT + 190; -pub const SYS_getxattr: c_long = __X32_SYSCALL_BIT + 191; -pub const SYS_lgetxattr: c_long = __X32_SYSCALL_BIT + 192; -pub const SYS_fgetxattr: c_long = __X32_SYSCALL_BIT + 193; -pub const SYS_listxattr: c_long = __X32_SYSCALL_BIT + 194; -pub const SYS_llistxattr: c_long = __X32_SYSCALL_BIT + 195; -pub const SYS_flistxattr: c_long = __X32_SYSCALL_BIT + 196; -pub const SYS_removexattr: c_long = __X32_SYSCALL_BIT + 197; -pub const SYS_lremovexattr: c_long = __X32_SYSCALL_BIT + 198; -pub const SYS_fremovexattr: c_long = __X32_SYSCALL_BIT + 199; -pub const SYS_tkill: c_long = __X32_SYSCALL_BIT + 200; -pub const SYS_time: c_long = __X32_SYSCALL_BIT + 201; -pub const SYS_futex: c_long = __X32_SYSCALL_BIT + 202; -pub const SYS_sched_setaffinity: c_long = __X32_SYSCALL_BIT + 203; -pub const SYS_sched_getaffinity: c_long = __X32_SYSCALL_BIT + 204; -pub const SYS_io_destroy: c_long = __X32_SYSCALL_BIT + 207; -pub const SYS_io_getevents: c_long = __X32_SYSCALL_BIT + 208; -pub const SYS_io_cancel: c_long = __X32_SYSCALL_BIT + 210; -pub const SYS_lookup_dcookie: c_long = __X32_SYSCALL_BIT + 212; -pub const SYS_epoll_create: c_long = __X32_SYSCALL_BIT + 213; -pub const SYS_remap_file_pages: c_long = __X32_SYSCALL_BIT + 216; -pub const SYS_getdents64: c_long = __X32_SYSCALL_BIT + 217; -pub const SYS_set_tid_address: c_long = __X32_SYSCALL_BIT + 218; -pub const SYS_restart_syscall: c_long = __X32_SYSCALL_BIT + 219; -pub const SYS_semtimedop: c_long = __X32_SYSCALL_BIT + 220; -pub const SYS_fadvise64: c_long = __X32_SYSCALL_BIT + 221; -pub const SYS_timer_settime: c_long = __X32_SYSCALL_BIT + 223; -pub const SYS_timer_gettime: c_long = __X32_SYSCALL_BIT + 224; -pub const SYS_timer_getoverrun: c_long = __X32_SYSCALL_BIT + 225; -pub const SYS_timer_delete: c_long = __X32_SYSCALL_BIT + 226; -pub const SYS_clock_settime: c_long = __X32_SYSCALL_BIT + 227; -pub const SYS_clock_gettime: c_long = __X32_SYSCALL_BIT + 228; -pub const SYS_clock_getres: c_long = __X32_SYSCALL_BIT + 229; -pub const SYS_clock_nanosleep: c_long = __X32_SYSCALL_BIT + 230; -pub const SYS_exit_group: c_long = __X32_SYSCALL_BIT + 231; -pub const SYS_epoll_wait: c_long = __X32_SYSCALL_BIT + 232; -pub const SYS_epoll_ctl: c_long = __X32_SYSCALL_BIT + 233; -pub const SYS_tgkill: c_long = __X32_SYSCALL_BIT + 234; -pub const SYS_utimes: c_long = __X32_SYSCALL_BIT + 235; -pub const SYS_mbind: c_long = __X32_SYSCALL_BIT + 237; -pub const SYS_set_mempolicy: c_long = __X32_SYSCALL_BIT + 238; -pub const SYS_get_mempolicy: c_long = __X32_SYSCALL_BIT + 239; -pub const SYS_mq_open: c_long = __X32_SYSCALL_BIT + 240; -pub const SYS_mq_unlink: c_long = __X32_SYSCALL_BIT + 241; -pub const SYS_mq_timedsend: c_long = __X32_SYSCALL_BIT + 242; -pub const SYS_mq_timedreceive: c_long = __X32_SYSCALL_BIT + 243; -pub const SYS_mq_getsetattr: c_long = __X32_SYSCALL_BIT + 245; -pub const SYS_add_key: c_long = __X32_SYSCALL_BIT + 248; -pub const SYS_request_key: c_long = __X32_SYSCALL_BIT + 249; -pub const SYS_keyctl: c_long = __X32_SYSCALL_BIT + 250; -pub const SYS_ioprio_set: c_long = __X32_SYSCALL_BIT + 251; -pub const SYS_ioprio_get: c_long = __X32_SYSCALL_BIT + 252; -pub const SYS_inotify_init: c_long = __X32_SYSCALL_BIT + 253; -pub const SYS_inotify_add_watch: c_long = __X32_SYSCALL_BIT + 254; -pub const SYS_inotify_rm_watch: c_long = __X32_SYSCALL_BIT + 255; -pub const SYS_migrate_pages: c_long = __X32_SYSCALL_BIT + 256; -pub const SYS_openat: c_long = __X32_SYSCALL_BIT + 257; -pub const SYS_mkdirat: c_long = __X32_SYSCALL_BIT + 258; -pub const SYS_mknodat: c_long = __X32_SYSCALL_BIT + 259; -pub const SYS_fchownat: c_long = __X32_SYSCALL_BIT + 260; -pub const SYS_futimesat: c_long = __X32_SYSCALL_BIT + 261; -pub const SYS_newfstatat: c_long = __X32_SYSCALL_BIT + 262; -pub const SYS_unlinkat: c_long = __X32_SYSCALL_BIT + 263; -pub const SYS_renameat: c_long = __X32_SYSCALL_BIT + 264; -pub const SYS_linkat: c_long = __X32_SYSCALL_BIT + 265; -pub const SYS_symlinkat: c_long = __X32_SYSCALL_BIT + 266; -pub const SYS_readlinkat: c_long = __X32_SYSCALL_BIT + 267; -pub const SYS_fchmodat: c_long = __X32_SYSCALL_BIT + 268; -pub const SYS_faccessat: c_long = __X32_SYSCALL_BIT + 269; -pub const SYS_pselect6: c_long = __X32_SYSCALL_BIT + 270; -pub const SYS_ppoll: c_long = __X32_SYSCALL_BIT + 271; -pub const SYS_unshare: c_long = __X32_SYSCALL_BIT + 272; -pub const SYS_splice: c_long = __X32_SYSCALL_BIT + 275; -pub const SYS_tee: c_long = __X32_SYSCALL_BIT + 276; -pub const SYS_sync_file_range: c_long = __X32_SYSCALL_BIT + 277; -pub const SYS_utimensat: c_long = __X32_SYSCALL_BIT + 280; -pub const SYS_epoll_pwait: c_long = __X32_SYSCALL_BIT + 281; -pub const SYS_signalfd: c_long = __X32_SYSCALL_BIT + 282; -pub const SYS_timerfd_create: c_long = __X32_SYSCALL_BIT + 283; -pub const SYS_eventfd: c_long = __X32_SYSCALL_BIT + 284; -pub const SYS_fallocate: c_long = __X32_SYSCALL_BIT + 285; -pub const SYS_timerfd_settime: c_long = __X32_SYSCALL_BIT + 286; -pub const SYS_timerfd_gettime: c_long = __X32_SYSCALL_BIT + 287; -pub const SYS_accept4: c_long = __X32_SYSCALL_BIT + 288; -pub const SYS_signalfd4: c_long = __X32_SYSCALL_BIT + 289; -pub const SYS_eventfd2: c_long = __X32_SYSCALL_BIT + 290; -pub const SYS_epoll_create1: c_long = __X32_SYSCALL_BIT + 291; -pub const SYS_dup3: c_long = __X32_SYSCALL_BIT + 292; -pub const SYS_pipe2: c_long = __X32_SYSCALL_BIT + 293; -pub const SYS_inotify_init1: c_long = __X32_SYSCALL_BIT + 294; -pub const SYS_perf_event_open: c_long = __X32_SYSCALL_BIT + 298; -pub const SYS_fanotify_init: c_long = __X32_SYSCALL_BIT + 300; -pub const SYS_fanotify_mark: c_long = __X32_SYSCALL_BIT + 301; -pub const SYS_prlimit64: c_long = __X32_SYSCALL_BIT + 302; -pub const SYS_name_to_handle_at: c_long = __X32_SYSCALL_BIT + 303; -pub const SYS_open_by_handle_at: c_long = __X32_SYSCALL_BIT + 304; -pub const SYS_clock_adjtime: c_long = __X32_SYSCALL_BIT + 305; -pub const SYS_syncfs: c_long = __X32_SYSCALL_BIT + 306; -pub const SYS_setns: c_long = __X32_SYSCALL_BIT + 308; -pub const SYS_getcpu: c_long = __X32_SYSCALL_BIT + 309; -pub const SYS_kcmp: c_long = __X32_SYSCALL_BIT + 312; -pub const SYS_finit_module: c_long = __X32_SYSCALL_BIT + 313; -pub const SYS_sched_setattr: c_long = __X32_SYSCALL_BIT + 314; -pub const SYS_sched_getattr: c_long = __X32_SYSCALL_BIT + 315; -pub const SYS_renameat2: c_long = __X32_SYSCALL_BIT + 316; -pub const SYS_seccomp: c_long = __X32_SYSCALL_BIT + 317; -pub const SYS_getrandom: c_long = __X32_SYSCALL_BIT + 318; -pub const SYS_memfd_create: c_long = __X32_SYSCALL_BIT + 319; -pub const SYS_kexec_file_load: c_long = __X32_SYSCALL_BIT + 320; -pub const SYS_bpf: c_long = __X32_SYSCALL_BIT + 321; -pub const SYS_userfaultfd: c_long = __X32_SYSCALL_BIT + 323; -pub const SYS_membarrier: c_long = __X32_SYSCALL_BIT + 324; -pub const SYS_mlock2: c_long = __X32_SYSCALL_BIT + 325; -pub const SYS_copy_file_range: c_long = __X32_SYSCALL_BIT + 326; -pub const SYS_pkey_mprotect: c_long = __X32_SYSCALL_BIT + 329; -pub const SYS_pkey_alloc: c_long = __X32_SYSCALL_BIT + 330; -pub const SYS_pkey_free: c_long = __X32_SYSCALL_BIT + 331; -pub const SYS_statx: c_long = __X32_SYSCALL_BIT + 332; -pub const SYS_rseq: c_long = __X32_SYSCALL_BIT + 334; -pub const SYS_pidfd_send_signal: c_long = __X32_SYSCALL_BIT + 424; -pub const SYS_io_uring_setup: c_long = __X32_SYSCALL_BIT + 425; -pub const SYS_io_uring_enter: c_long = __X32_SYSCALL_BIT + 426; -pub const SYS_io_uring_register: c_long = __X32_SYSCALL_BIT + 427; -pub const SYS_open_tree: c_long = __X32_SYSCALL_BIT + 428; -pub const SYS_move_mount: c_long = __X32_SYSCALL_BIT + 429; -pub const SYS_fsopen: c_long = __X32_SYSCALL_BIT + 430; -pub const SYS_fsconfig: c_long = __X32_SYSCALL_BIT + 431; -pub const SYS_fsmount: c_long = __X32_SYSCALL_BIT + 432; -pub const SYS_fspick: c_long = __X32_SYSCALL_BIT + 433; -pub const SYS_pidfd_open: c_long = __X32_SYSCALL_BIT + 434; -pub const SYS_clone3: c_long = __X32_SYSCALL_BIT + 435; -pub const SYS_close_range: c_long = __X32_SYSCALL_BIT + 436; -pub const SYS_openat2: c_long = __X32_SYSCALL_BIT + 437; -pub const SYS_pidfd_getfd: c_long = __X32_SYSCALL_BIT + 438; -pub const SYS_faccessat2: c_long = __X32_SYSCALL_BIT + 439; -pub const SYS_process_madvise: c_long = __X32_SYSCALL_BIT + 440; -pub const SYS_epoll_pwait2: c_long = __X32_SYSCALL_BIT + 441; -pub const SYS_mount_setattr: c_long = __X32_SYSCALL_BIT + 442; -pub const SYS_quotactl_fd: c_long = __X32_SYSCALL_BIT + 443; -pub const SYS_landlock_create_ruleset: c_long = __X32_SYSCALL_BIT + 444; -pub const SYS_landlock_add_rule: c_long = __X32_SYSCALL_BIT + 445; -pub const SYS_landlock_restrict_self: c_long = __X32_SYSCALL_BIT + 446; -pub const SYS_memfd_secret: c_long = __X32_SYSCALL_BIT + 447; -pub const SYS_process_mrelease: c_long = __X32_SYSCALL_BIT + 448; -pub const SYS_futex_waitv: c_long = __X32_SYSCALL_BIT + 449; -pub const SYS_set_mempolicy_home_node: c_long = __X32_SYSCALL_BIT + 450; -pub const SYS_fchmodat2: c_long = __X32_SYSCALL_BIT + 452; -pub const SYS_rt_sigaction: c_long = __X32_SYSCALL_BIT + 512; -pub const SYS_rt_sigreturn: c_long = __X32_SYSCALL_BIT + 513; -pub const SYS_ioctl: c_long = __X32_SYSCALL_BIT + 514; -pub const SYS_readv: c_long = __X32_SYSCALL_BIT + 515; -pub const SYS_writev: c_long = __X32_SYSCALL_BIT + 516; -pub const SYS_recvfrom: c_long = __X32_SYSCALL_BIT + 517; -pub const SYS_sendmsg: c_long = __X32_SYSCALL_BIT + 518; -pub const SYS_recvmsg: c_long = __X32_SYSCALL_BIT + 519; -pub const SYS_execve: c_long = __X32_SYSCALL_BIT + 520; -pub const SYS_ptrace: c_long = __X32_SYSCALL_BIT + 521; -pub const SYS_rt_sigpending: c_long = __X32_SYSCALL_BIT + 522; -pub const SYS_rt_sigtimedwait: c_long = __X32_SYSCALL_BIT + 523; -pub const SYS_rt_sigqueueinfo: c_long = __X32_SYSCALL_BIT + 524; -pub const SYS_sigaltstack: c_long = __X32_SYSCALL_BIT + 525; -pub const SYS_timer_create: c_long = __X32_SYSCALL_BIT + 526; -pub const SYS_mq_notify: c_long = __X32_SYSCALL_BIT + 527; -pub const SYS_kexec_load: c_long = __X32_SYSCALL_BIT + 528; -pub const SYS_waitid: c_long = __X32_SYSCALL_BIT + 529; -pub const SYS_set_robust_list: c_long = __X32_SYSCALL_BIT + 530; -pub const SYS_get_robust_list: c_long = __X32_SYSCALL_BIT + 531; -pub const SYS_vmsplice: c_long = __X32_SYSCALL_BIT + 532; -pub const SYS_move_pages: c_long = __X32_SYSCALL_BIT + 533; -pub const SYS_preadv: c_long = __X32_SYSCALL_BIT + 534; -pub const SYS_pwritev: c_long = __X32_SYSCALL_BIT + 535; -pub const SYS_rt_tgsigqueueinfo: c_long = __X32_SYSCALL_BIT + 536; -pub const SYS_recvmmsg: c_long = __X32_SYSCALL_BIT + 537; -pub const SYS_sendmmsg: c_long = __X32_SYSCALL_BIT + 538; -pub const SYS_process_vm_readv: c_long = __X32_SYSCALL_BIT + 539; -pub const SYS_process_vm_writev: c_long = __X32_SYSCALL_BIT + 540; -pub const SYS_setsockopt: c_long = __X32_SYSCALL_BIT + 541; -pub const SYS_getsockopt: c_long = __X32_SYSCALL_BIT + 542; -pub const SYS_io_setup: c_long = __X32_SYSCALL_BIT + 543; -pub const SYS_io_submit: c_long = __X32_SYSCALL_BIT + 544; -pub const SYS_execveat: c_long = __X32_SYSCALL_BIT + 545; -pub const SYS_preadv2: c_long = __X32_SYSCALL_BIT + 546; -pub const SYS_pwritev2: c_long = __X32_SYSCALL_BIT + 547; diff --git a/vendor/libc/src/unix/linux_like/linux/gnu/mod.rs b/vendor/libc/src/unix/linux_like/linux/gnu/mod.rs deleted file mode 100644 index 17d11d27a4deab..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/gnu/mod.rs +++ /dev/null @@ -1,1382 +0,0 @@ -use crate::off64_t; -use crate::prelude::*; - -pub type pthread_t = c_ulong; -pub type __priority_which_t = c_uint; -pub type __rlimit_resource_t = c_uint; -pub type Lmid_t = c_long; -pub type regoff_t = c_int; -pub type __kernel_rwf_t = c_int; - -cfg_if! { - if #[cfg(doc)] { - // Used in `linux::arch` to define ioctl constants. - pub(crate) type Ioctl = c_ulong; - } else { - #[doc(hidden)] - pub type Ioctl = c_ulong; - } -} - -s! { - pub struct aiocb { - pub aio_fildes: c_int, - pub aio_lio_opcode: c_int, - pub aio_reqprio: c_int, - pub aio_buf: *mut c_void, - pub aio_nbytes: size_t, - pub aio_sigevent: crate::sigevent, - __next_prio: *mut aiocb, - __abs_prio: c_int, - __policy: c_int, - __error_code: c_int, - __return_value: ssize_t, - pub aio_offset: off_t, - #[cfg(all( - not(gnu_file_offset_bits64), - not(target_arch = "x86_64"), - target_pointer_width = "32" - ))] - __unused1: [c_char; 4], - __glibc_reserved: [c_char; 32], - } - - pub struct __exit_status { - pub e_termination: c_short, - pub e_exit: c_short, - } - - pub struct __timeval { - pub tv_sec: i32, - pub tv_usec: i32, - } - - pub struct glob64_t { - pub gl_pathc: size_t, - pub gl_pathv: *mut *mut c_char, - pub gl_offs: size_t, - pub gl_flags: c_int, - - __unused1: *mut c_void, - __unused2: *mut c_void, - __unused3: *mut c_void, - __unused4: *mut c_void, - __unused5: *mut c_void, - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: size_t, - pub msg_control: *mut c_void, - pub msg_controllen: size_t, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - pub cmsg_len: size_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; crate::NCCS], - #[cfg(not(any( - target_arch = "sparc", - target_arch = "sparc64", - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "mips64", - target_arch = "mips64r6" - )))] - pub c_ispeed: crate::speed_t, - #[cfg(not(any( - target_arch = "sparc", - target_arch = "sparc64", - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "mips64", - target_arch = "mips64r6" - )))] - pub c_ospeed: crate::speed_t, - } - - pub struct mallinfo { - pub arena: c_int, - pub ordblks: c_int, - pub smblks: c_int, - pub hblks: c_int, - pub hblkhd: c_int, - pub usmblks: c_int, - pub fsmblks: c_int, - pub uordblks: c_int, - pub fordblks: c_int, - pub keepcost: c_int, - } - - pub struct mallinfo2 { - pub arena: size_t, - pub ordblks: size_t, - pub smblks: size_t, - pub hblks: size_t, - pub hblkhd: size_t, - pub usmblks: size_t, - pub fsmblks: size_t, - pub uordblks: size_t, - pub fordblks: size_t, - pub keepcost: size_t, - } - - pub struct nl_pktinfo { - pub group: u32, - } - - pub struct nl_mmap_req { - pub nm_block_size: c_uint, - pub nm_block_nr: c_uint, - pub nm_frame_size: c_uint, - pub nm_frame_nr: c_uint, - } - - pub struct nl_mmap_hdr { - pub nm_status: c_uint, - pub nm_len: c_uint, - pub nm_group: u32, - pub nm_pid: u32, - pub nm_uid: u32, - pub nm_gid: u32, - } - - pub struct rtentry { - pub rt_pad1: c_ulong, - pub rt_dst: crate::sockaddr, - pub rt_gateway: crate::sockaddr, - pub rt_genmask: crate::sockaddr, - pub rt_flags: c_ushort, - pub rt_pad2: c_short, - pub rt_pad3: c_ulong, - pub rt_tos: c_uchar, - pub rt_class: c_uchar, - #[cfg(target_pointer_width = "64")] - pub rt_pad4: [c_short; 3usize], - #[cfg(not(target_pointer_width = "64"))] - pub rt_pad4: c_short, - pub rt_metric: c_short, - pub rt_dev: *mut c_char, - pub rt_mtu: c_ulong, - pub rt_window: c_ulong, - pub rt_irtt: c_ushort, - } - - pub struct ntptimeval { - pub time: crate::timeval, - pub maxerror: c_long, - pub esterror: c_long, - pub tai: c_long, - pub __glibc_reserved1: c_long, - pub __glibc_reserved2: c_long, - pub __glibc_reserved3: c_long, - pub __glibc_reserved4: c_long, - } - - pub struct regex_t { - __buffer: *mut c_void, - __allocated: size_t, - __used: size_t, - __syntax: c_ulong, - __fastmap: *mut c_char, - __translate: *mut c_char, - __re_nsub: size_t, - __bitfield: u8, - } - - pub struct Elf64_Chdr { - pub ch_type: crate::Elf64_Word, - pub ch_reserved: crate::Elf64_Word, - pub ch_size: crate::Elf64_Xword, - pub ch_addralign: crate::Elf64_Xword, - } - - pub struct Elf32_Chdr { - pub ch_type: crate::Elf32_Word, - pub ch_size: crate::Elf32_Word, - pub ch_addralign: crate::Elf32_Word, - } - - pub struct seminfo { - pub semmap: c_int, - pub semmni: c_int, - pub semmns: c_int, - pub semmnu: c_int, - pub semmsl: c_int, - pub semopm: c_int, - pub semume: c_int, - pub semusz: c_int, - pub semvmx: c_int, - pub semaem: c_int, - } - - pub struct ptrace_peeksiginfo_args { - pub off: crate::__u64, - pub flags: crate::__u32, - pub nr: crate::__s32, - } - - pub struct __c_anonymous_ptrace_syscall_info_entry { - pub nr: crate::__u64, - pub args: [crate::__u64; 6], - } - - pub struct __c_anonymous_ptrace_syscall_info_exit { - pub sval: crate::__s64, - pub is_error: crate::__u8, - } - - pub struct __c_anonymous_ptrace_syscall_info_seccomp { - pub nr: crate::__u64, - pub args: [crate::__u64; 6], - pub ret_data: crate::__u32, - } - - pub struct ptrace_syscall_info { - pub op: crate::__u8, - pub pad: [crate::__u8; 3], - pub arch: crate::__u32, - pub instruction_pointer: crate::__u64, - pub stack_pointer: crate::__u64, - pub u: __c_anonymous_ptrace_syscall_info_data, - } - - pub struct ptrace_sud_config { - pub mode: crate::__u64, - pub selector: crate::__u64, - pub offset: crate::__u64, - pub len: crate::__u64, - } - - pub struct iocb { - pub aio_data: crate::__u64, - #[cfg(target_endian = "little")] - pub aio_key: crate::__u32, - #[cfg(target_endian = "little")] - pub aio_rw_flags: crate::__kernel_rwf_t, - #[cfg(target_endian = "big")] - pub aio_rw_flags: crate::__kernel_rwf_t, - #[cfg(target_endian = "big")] - pub aio_key: crate::__u32, - pub aio_lio_opcode: crate::__u16, - pub aio_reqprio: crate::__s16, - pub aio_fildes: crate::__u32, - pub aio_buf: crate::__u64, - pub aio_nbytes: crate::__u64, - pub aio_offset: crate::__s64, - aio_reserved2: crate::__u64, - pub aio_flags: crate::__u32, - pub aio_resfd: crate::__u32, - } - - // netinet/tcp.h - - pub struct tcp_info { - pub tcpi_state: u8, - pub tcpi_ca_state: u8, - pub tcpi_retransmits: u8, - pub tcpi_probes: u8, - pub tcpi_backoff: u8, - pub tcpi_options: u8, - /// This contains the bitfields `tcpi_snd_wscale` and `tcpi_rcv_wscale`. - /// Each is 4 bits. - pub tcpi_snd_rcv_wscale: u8, - pub tcpi_rto: u32, - pub tcpi_ato: u32, - pub tcpi_snd_mss: u32, - pub tcpi_rcv_mss: u32, - pub tcpi_unacked: u32, - pub tcpi_sacked: u32, - pub tcpi_lost: u32, - pub tcpi_retrans: u32, - pub tcpi_fackets: u32, - pub tcpi_last_data_sent: u32, - pub tcpi_last_ack_sent: u32, - pub tcpi_last_data_recv: u32, - pub tcpi_last_ack_recv: u32, - pub tcpi_pmtu: u32, - pub tcpi_rcv_ssthresh: u32, - pub tcpi_rtt: u32, - pub tcpi_rttvar: u32, - pub tcpi_snd_ssthresh: u32, - pub tcpi_snd_cwnd: u32, - pub tcpi_advmss: u32, - pub tcpi_reordering: u32, - pub tcpi_rcv_rtt: u32, - pub tcpi_rcv_space: u32, - pub tcpi_total_retrans: u32, - } - - pub struct fanotify_event_info_pidfd { - pub hdr: crate::fanotify_event_info_header, - pub pidfd: crate::__s32, - } - - pub struct fanotify_event_info_error { - pub hdr: crate::fanotify_event_info_header, - pub error: crate::__s32, - pub error_count: crate::__u32, - } - - // FIXME(1.0) this is actually a union - #[cfg_attr(target_pointer_width = "32", repr(align(4)))] - #[cfg_attr(target_pointer_width = "64", repr(align(8)))] - pub struct sem_t { - #[cfg(target_pointer_width = "32")] - __size: [c_char; 16], - #[cfg(target_pointer_width = "64")] - __size: [c_char; 32], - } - - pub struct mbstate_t { - __count: c_int, - __wchb: [c_char; 4], - } - - pub struct fpos64_t { - __pos: off64_t, - __state: crate::mbstate_t, - } - - pub struct fpos_t { - #[cfg(not(gnu_file_offset_bits64))] - __pos: off_t, - #[cfg(gnu_file_offset_bits64)] - __pos: off64_t, - __state: crate::mbstate_t, - } - - // linux x32 compatibility - // See https://sourceware.org/bugzilla/show_bug.cgi?id=16437 - pub struct timespec { - pub tv_sec: time_t, - #[cfg(all(gnu_time_bits64, target_endian = "big"))] - __pad: i32, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub tv_nsec: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub tv_nsec: i64, - #[cfg(all(gnu_time_bits64, target_endian = "little"))] - __pad: i32, - } -} - -impl siginfo_t { - pub unsafe fn si_addr(&self) -> *mut c_void { - #[repr(C)] - struct siginfo_sigfault { - _si_signo: c_int, - _si_errno: c_int, - _si_code: c_int, - si_addr: *mut c_void, - } - (*(self as *const siginfo_t).cast::()).si_addr - } - - pub unsafe fn si_value(&self) -> crate::sigval { - #[repr(C)] - struct siginfo_timer { - _si_signo: c_int, - _si_errno: c_int, - _si_code: c_int, - _si_tid: c_int, - _si_overrun: c_int, - si_sigval: crate::sigval, - } - (*(self as *const siginfo_t).cast::()).si_sigval - } -} - -// Internal, for casts to access union fields -#[repr(C)] -struct sifields_sigchld { - si_pid: crate::pid_t, - si_uid: crate::uid_t, - si_status: c_int, - si_utime: c_long, - si_stime: c_long, -} -impl Copy for sifields_sigchld {} -impl Clone for sifields_sigchld { - fn clone(&self) -> sifields_sigchld { - *self - } -} - -// Internal, for casts to access union fields -#[repr(C)] -union sifields { - _align_pointer: *mut c_void, - sigchld: sifields_sigchld, -} - -// Internal, for casts to access union fields. Note that some variants -// of sifields start with a pointer, which makes the alignment of -// sifields vary on 32-bit and 64-bit architectures. -#[repr(C)] -struct siginfo_f { - _siginfo_base: [c_int; 3], - sifields: sifields, -} - -impl siginfo_t { - unsafe fn sifields(&self) -> &sifields { - &(*(self as *const siginfo_t).cast::()).sifields - } - - pub unsafe fn si_pid(&self) -> crate::pid_t { - self.sifields().sigchld.si_pid - } - - pub unsafe fn si_uid(&self) -> crate::uid_t { - self.sifields().sigchld.si_uid - } - - pub unsafe fn si_status(&self) -> c_int { - self.sifields().sigchld.si_status - } - - pub unsafe fn si_utime(&self) -> c_long { - self.sifields().sigchld.si_utime - } - - pub unsafe fn si_stime(&self) -> c_long { - self.sifields().sigchld.si_stime - } -} - -s_no_extra_traits! { - pub union __c_anonymous_ptrace_syscall_info_data { - pub entry: __c_anonymous_ptrace_syscall_info_entry, - pub exit: __c_anonymous_ptrace_syscall_info_exit, - pub seccomp: __c_anonymous_ptrace_syscall_info_seccomp, - } - - pub struct utmpx { - pub ut_type: c_short, - pub ut_pid: crate::pid_t, - pub ut_line: [c_char; __UT_LINESIZE], - pub ut_id: [c_char; 4], - - pub ut_user: [c_char; __UT_NAMESIZE], - pub ut_host: [c_char; __UT_HOSTSIZE], - pub ut_exit: __exit_status, - - #[cfg(any( - target_arch = "aarch64", - target_arch = "s390x", - target_arch = "loongarch64", - all(target_pointer_width = "32", not(target_arch = "x86_64")) - ))] - pub ut_session: c_long, - #[cfg(any( - target_arch = "aarch64", - target_arch = "s390x", - target_arch = "loongarch64", - all(target_pointer_width = "32", not(target_arch = "x86_64")) - ))] - pub ut_tv: crate::timeval, - - #[cfg(not(any( - target_arch = "aarch64", - target_arch = "s390x", - target_arch = "loongarch64", - all(target_pointer_width = "32", not(target_arch = "x86_64")) - )))] - pub ut_session: i32, - #[cfg(not(any( - target_arch = "aarch64", - target_arch = "s390x", - target_arch = "loongarch64", - all(target_pointer_width = "32", not(target_arch = "x86_64")) - )))] - pub ut_tv: __timeval, - - pub ut_addr_v6: [i32; 4], - __glibc_reserved: [c_char; 20], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for utmpx { - fn eq(&self, other: &utmpx) -> bool { - self.ut_type == other.ut_type - && self.ut_pid == other.ut_pid - && self.ut_line == other.ut_line - && self.ut_id == other.ut_id - && self.ut_user == other.ut_user - && self - .ut_host - .iter() - .zip(other.ut_host.iter()) - .all(|(a, b)| a == b) - && self.ut_exit == other.ut_exit - && self.ut_session == other.ut_session - && self.ut_tv == other.ut_tv - && self.ut_addr_v6 == other.ut_addr_v6 - && self.__glibc_reserved == other.__glibc_reserved - } - } - - impl Eq for utmpx {} - - impl hash::Hash for utmpx { - fn hash(&self, state: &mut H) { - self.ut_type.hash(state); - self.ut_pid.hash(state); - self.ut_line.hash(state); - self.ut_id.hash(state); - self.ut_user.hash(state); - self.ut_host.hash(state); - self.ut_exit.hash(state); - self.ut_session.hash(state); - self.ut_tv.hash(state); - self.ut_addr_v6.hash(state); - self.__glibc_reserved.hash(state); - } - } - - impl PartialEq for __c_anonymous_ptrace_syscall_info_data { - fn eq(&self, other: &__c_anonymous_ptrace_syscall_info_data) -> bool { - unsafe { - self.entry == other.entry - || self.exit == other.exit - || self.seccomp == other.seccomp - } - } - } - - impl Eq for __c_anonymous_ptrace_syscall_info_data {} - - impl hash::Hash for __c_anonymous_ptrace_syscall_info_data { - fn hash(&self, state: &mut H) { - unsafe { - self.entry.hash(state); - self.exit.hash(state); - self.seccomp.hash(state); - } - } - } - } -} - -// include/uapi/asm-generic/hugetlb_encode.h -pub const HUGETLB_FLAG_ENCODE_SHIFT: c_int = 26; -pub const HUGETLB_FLAG_ENCODE_MASK: c_int = 0x3f; - -pub const HUGETLB_FLAG_ENCODE_64KB: c_int = 16 << HUGETLB_FLAG_ENCODE_SHIFT; -pub const HUGETLB_FLAG_ENCODE_512KB: c_int = 19 << HUGETLB_FLAG_ENCODE_SHIFT; -pub const HUGETLB_FLAG_ENCODE_1MB: c_int = 20 << HUGETLB_FLAG_ENCODE_SHIFT; -pub const HUGETLB_FLAG_ENCODE_2MB: c_int = 21 << HUGETLB_FLAG_ENCODE_SHIFT; -pub const HUGETLB_FLAG_ENCODE_8MB: c_int = 23 << HUGETLB_FLAG_ENCODE_SHIFT; -pub const HUGETLB_FLAG_ENCODE_16MB: c_int = 24 << HUGETLB_FLAG_ENCODE_SHIFT; -pub const HUGETLB_FLAG_ENCODE_32MB: c_int = 25 << HUGETLB_FLAG_ENCODE_SHIFT; -pub const HUGETLB_FLAG_ENCODE_256MB: c_int = 28 << HUGETLB_FLAG_ENCODE_SHIFT; -pub const HUGETLB_FLAG_ENCODE_512MB: c_int = 29 << HUGETLB_FLAG_ENCODE_SHIFT; -pub const HUGETLB_FLAG_ENCODE_1GB: c_int = 30 << HUGETLB_FLAG_ENCODE_SHIFT; -pub const HUGETLB_FLAG_ENCODE_2GB: c_int = 31 << HUGETLB_FLAG_ENCODE_SHIFT; -pub const HUGETLB_FLAG_ENCODE_16GB: c_int = 34 << HUGETLB_FLAG_ENCODE_SHIFT; - -// include/uapi/linux/mman.h -/* - * Huge page size encoding when MAP_HUGETLB is specified, and a huge page - * size other than the default is desired. See hugetlb_encode.h. - * All known huge page size encodings are provided here. It is the - * responsibility of the application to know which sizes are supported on - * the running system. See mmap(2) man page for details. - */ -pub const MAP_HUGE_SHIFT: c_int = HUGETLB_FLAG_ENCODE_SHIFT; -pub const MAP_HUGE_MASK: c_int = HUGETLB_FLAG_ENCODE_MASK; - -pub const MAP_HUGE_64KB: c_int = HUGETLB_FLAG_ENCODE_64KB; -pub const MAP_HUGE_512KB: c_int = HUGETLB_FLAG_ENCODE_512KB; -pub const MAP_HUGE_1MB: c_int = HUGETLB_FLAG_ENCODE_1MB; -pub const MAP_HUGE_2MB: c_int = HUGETLB_FLAG_ENCODE_2MB; -pub const MAP_HUGE_8MB: c_int = HUGETLB_FLAG_ENCODE_8MB; -pub const MAP_HUGE_16MB: c_int = HUGETLB_FLAG_ENCODE_16MB; -pub const MAP_HUGE_32MB: c_int = HUGETLB_FLAG_ENCODE_32MB; -pub const MAP_HUGE_256MB: c_int = HUGETLB_FLAG_ENCODE_256MB; -pub const MAP_HUGE_512MB: c_int = HUGETLB_FLAG_ENCODE_512MB; -pub const MAP_HUGE_1GB: c_int = HUGETLB_FLAG_ENCODE_1GB; -pub const MAP_HUGE_2GB: c_int = HUGETLB_FLAG_ENCODE_2GB; -pub const MAP_HUGE_16GB: c_int = HUGETLB_FLAG_ENCODE_16GB; - -pub const PRIO_PROCESS: crate::__priority_which_t = 0; -pub const PRIO_PGRP: crate::__priority_which_t = 1; -pub const PRIO_USER: crate::__priority_which_t = 2; - -pub const MS_RMT_MASK: c_ulong = 0x02800051; - -pub const __UT_LINESIZE: usize = 32; -pub const __UT_NAMESIZE: usize = 32; -pub const __UT_HOSTSIZE: usize = 256; -pub const EMPTY: c_short = 0; -pub const RUN_LVL: c_short = 1; -pub const BOOT_TIME: c_short = 2; -pub const NEW_TIME: c_short = 3; -pub const OLD_TIME: c_short = 4; -pub const INIT_PROCESS: c_short = 5; -pub const LOGIN_PROCESS: c_short = 6; -pub const USER_PROCESS: c_short = 7; -pub const DEAD_PROCESS: c_short = 8; -pub const ACCOUNTING: c_short = 9; - -// dlfcn.h -pub const LM_ID_BASE: c_long = 0; -pub const LM_ID_NEWLM: c_long = -1; - -pub const RTLD_DI_LMID: c_int = 1; -pub const RTLD_DI_LINKMAP: c_int = 2; -pub const RTLD_DI_CONFIGADDR: c_int = 3; -pub const RTLD_DI_SERINFO: c_int = 4; -pub const RTLD_DI_SERINFOSIZE: c_int = 5; -pub const RTLD_DI_ORIGIN: c_int = 6; -pub const RTLD_DI_PROFILENAME: c_int = 7; -pub const RTLD_DI_PROFILEOUT: c_int = 8; -pub const RTLD_DI_TLS_MODID: c_int = 9; -pub const RTLD_DI_TLS_DATA: c_int = 10; - -pub const SOCK_NONBLOCK: c_int = O_NONBLOCK; - -pub const SOL_RXRPC: c_int = 272; -pub const SOL_PPPOL2TP: c_int = 273; -pub const SOL_PNPIPE: c_int = 275; -pub const SOL_RDS: c_int = 276; -pub const SOL_IUCV: c_int = 277; -pub const SOL_CAIF: c_int = 278; -pub const SOL_NFC: c_int = 280; - -pub const MSG_TRYHARD: c_int = 4; - -pub const LC_PAPER: c_int = 7; -pub const LC_NAME: c_int = 8; -pub const LC_ADDRESS: c_int = 9; -pub const LC_TELEPHONE: c_int = 10; -pub const LC_MEASUREMENT: c_int = 11; -pub const LC_IDENTIFICATION: c_int = 12; -pub const LC_PAPER_MASK: c_int = 1 << LC_PAPER; -pub const LC_NAME_MASK: c_int = 1 << LC_NAME; -pub const LC_ADDRESS_MASK: c_int = 1 << LC_ADDRESS; -pub const LC_TELEPHONE_MASK: c_int = 1 << LC_TELEPHONE; -pub const LC_MEASUREMENT_MASK: c_int = 1 << LC_MEASUREMENT; -pub const LC_IDENTIFICATION_MASK: c_int = 1 << LC_IDENTIFICATION; -pub const LC_ALL_MASK: c_int = crate::LC_CTYPE_MASK - | crate::LC_NUMERIC_MASK - | crate::LC_TIME_MASK - | crate::LC_COLLATE_MASK - | crate::LC_MONETARY_MASK - | crate::LC_MESSAGES_MASK - | LC_PAPER_MASK - | LC_NAME_MASK - | LC_ADDRESS_MASK - | LC_TELEPHONE_MASK - | LC_MEASUREMENT_MASK - | LC_IDENTIFICATION_MASK; - -pub const ENOTSUP: c_int = EOPNOTSUPP; - -pub const SOCK_SEQPACKET: c_int = 5; -pub const SOCK_DCCP: c_int = 6; -#[deprecated(since = "0.2.70", note = "AF_PACKET must be used instead")] -pub const SOCK_PACKET: c_int = 10; - -pub const AF_IB: c_int = 27; -pub const AF_MPLS: c_int = 28; -pub const AF_NFC: c_int = 39; -pub const AF_VSOCK: c_int = 40; -pub const AF_XDP: c_int = 44; -pub const PF_IB: c_int = AF_IB; -pub const PF_MPLS: c_int = AF_MPLS; -pub const PF_NFC: c_int = AF_NFC; -pub const PF_VSOCK: c_int = AF_VSOCK; -pub const PF_XDP: c_int = AF_XDP; - -pub const SIGEV_THREAD_ID: c_int = 4; - -pub const BUFSIZ: c_uint = 8192; -pub const TMP_MAX: c_uint = 238328; -pub const FOPEN_MAX: c_uint = 16; -pub const FILENAME_MAX: c_uint = 4096; -pub const POSIX_MADV_DONTNEED: c_int = 4; -pub const _CS_GNU_LIBC_VERSION: c_int = 2; -pub const _CS_GNU_LIBPTHREAD_VERSION: c_int = 3; -pub const _CS_V6_ENV: c_int = 1148; -pub const _CS_V7_ENV: c_int = 1149; -pub const _SC_EQUIV_CLASS_MAX: c_int = 41; -pub const _SC_CHARCLASS_NAME_MAX: c_int = 45; -pub const _SC_PII: c_int = 53; -pub const _SC_PII_XTI: c_int = 54; -pub const _SC_PII_SOCKET: c_int = 55; -pub const _SC_PII_INTERNET: c_int = 56; -pub const _SC_PII_OSI: c_int = 57; -pub const _SC_POLL: c_int = 58; -pub const _SC_SELECT: c_int = 59; -pub const _SC_PII_INTERNET_STREAM: c_int = 61; -pub const _SC_PII_INTERNET_DGRAM: c_int = 62; -pub const _SC_PII_OSI_COTS: c_int = 63; -pub const _SC_PII_OSI_CLTS: c_int = 64; -pub const _SC_PII_OSI_M: c_int = 65; -pub const _SC_T_IOV_MAX: c_int = 66; -pub const _SC_2_C_VERSION: c_int = 96; -pub const _SC_CHAR_BIT: c_int = 101; -pub const _SC_CHAR_MAX: c_int = 102; -pub const _SC_CHAR_MIN: c_int = 103; -pub const _SC_INT_MAX: c_int = 104; -pub const _SC_INT_MIN: c_int = 105; -pub const _SC_LONG_BIT: c_int = 106; -pub const _SC_WORD_BIT: c_int = 107; -pub const _SC_MB_LEN_MAX: c_int = 108; -pub const _SC_SSIZE_MAX: c_int = 110; -pub const _SC_SCHAR_MAX: c_int = 111; -pub const _SC_SCHAR_MIN: c_int = 112; -pub const _SC_SHRT_MAX: c_int = 113; -pub const _SC_SHRT_MIN: c_int = 114; -pub const _SC_UCHAR_MAX: c_int = 115; -pub const _SC_UINT_MAX: c_int = 116; -pub const _SC_ULONG_MAX: c_int = 117; -pub const _SC_USHRT_MAX: c_int = 118; -pub const _SC_NL_ARGMAX: c_int = 119; -pub const _SC_NL_LANGMAX: c_int = 120; -pub const _SC_NL_MSGMAX: c_int = 121; -pub const _SC_NL_NMAX: c_int = 122; -pub const _SC_NL_SETMAX: c_int = 123; -pub const _SC_NL_TEXTMAX: c_int = 124; -pub const _SC_BASE: c_int = 134; -pub const _SC_C_LANG_SUPPORT: c_int = 135; -pub const _SC_C_LANG_SUPPORT_R: c_int = 136; -pub const _SC_DEVICE_IO: c_int = 140; -pub const _SC_DEVICE_SPECIFIC: c_int = 141; -pub const _SC_DEVICE_SPECIFIC_R: c_int = 142; -pub const _SC_FD_MGMT: c_int = 143; -pub const _SC_FIFO: c_int = 144; -pub const _SC_PIPE: c_int = 145; -pub const _SC_FILE_ATTRIBUTES: c_int = 146; -pub const _SC_FILE_LOCKING: c_int = 147; -pub const _SC_FILE_SYSTEM: c_int = 148; -pub const _SC_MULTI_PROCESS: c_int = 150; -pub const _SC_SINGLE_PROCESS: c_int = 151; -pub const _SC_NETWORKING: c_int = 152; -pub const _SC_REGEX_VERSION: c_int = 156; -pub const _SC_SIGNALS: c_int = 158; -pub const _SC_SYSTEM_DATABASE: c_int = 162; -pub const _SC_SYSTEM_DATABASE_R: c_int = 163; -pub const _SC_USER_GROUPS: c_int = 166; -pub const _SC_USER_GROUPS_R: c_int = 167; -pub const _SC_LEVEL1_ICACHE_SIZE: c_int = 185; -pub const _SC_LEVEL1_ICACHE_ASSOC: c_int = 186; -pub const _SC_LEVEL1_ICACHE_LINESIZE: c_int = 187; -pub const _SC_LEVEL1_DCACHE_SIZE: c_int = 188; -pub const _SC_LEVEL1_DCACHE_ASSOC: c_int = 189; -pub const _SC_LEVEL1_DCACHE_LINESIZE: c_int = 190; -pub const _SC_LEVEL2_CACHE_SIZE: c_int = 191; -pub const _SC_LEVEL2_CACHE_ASSOC: c_int = 192; -pub const _SC_LEVEL2_CACHE_LINESIZE: c_int = 193; -pub const _SC_LEVEL3_CACHE_SIZE: c_int = 194; -pub const _SC_LEVEL3_CACHE_ASSOC: c_int = 195; -pub const _SC_LEVEL3_CACHE_LINESIZE: c_int = 196; -pub const _SC_LEVEL4_CACHE_SIZE: c_int = 197; -pub const _SC_LEVEL4_CACHE_ASSOC: c_int = 198; -pub const _SC_LEVEL4_CACHE_LINESIZE: c_int = 199; -pub const O_ACCMODE: c_int = 3; -pub const ST_RELATIME: c_ulong = 4096; -pub const NI_MAXHOST: crate::socklen_t = 1025; - -// Most `*_SUPER_MAGIC` constants are defined at the `linux_like` level; the -// following are only available on newer Linux versions than the versions -// currently used in CI in some configurations, so we define them here. -cfg_if! { - if #[cfg(not(target_arch = "s390x"))] { - pub const BINDERFS_SUPER_MAGIC: c_long = 0x6c6f6f70; - pub const XFS_SUPER_MAGIC: c_long = 0x58465342; - } else if #[cfg(target_arch = "s390x")] { - pub const BINDERFS_SUPER_MAGIC: c_uint = 0x6c6f6f70; - pub const XFS_SUPER_MAGIC: c_uint = 0x58465342; - } -} - -pub const CPU_SETSIZE: c_int = 0x400; - -pub const PTRACE_TRACEME: c_uint = 0; -pub const PTRACE_PEEKTEXT: c_uint = 1; -pub const PTRACE_PEEKDATA: c_uint = 2; -pub const PTRACE_PEEKUSER: c_uint = 3; -pub const PTRACE_POKETEXT: c_uint = 4; -pub const PTRACE_POKEDATA: c_uint = 5; -pub const PTRACE_POKEUSER: c_uint = 6; -pub const PTRACE_CONT: c_uint = 7; -pub const PTRACE_KILL: c_uint = 8; -pub const PTRACE_SINGLESTEP: c_uint = 9; -pub const PTRACE_ATTACH: c_uint = 16; -pub const PTRACE_SYSCALL: c_uint = 24; -pub const PTRACE_SETOPTIONS: c_uint = 0x4200; -pub const PTRACE_GETEVENTMSG: c_uint = 0x4201; -pub const PTRACE_GETSIGINFO: c_uint = 0x4202; -pub const PTRACE_SETSIGINFO: c_uint = 0x4203; -pub const PTRACE_GETREGSET: c_uint = 0x4204; -pub const PTRACE_SETREGSET: c_uint = 0x4205; -pub const PTRACE_SEIZE: c_uint = 0x4206; -pub const PTRACE_INTERRUPT: c_uint = 0x4207; -pub const PTRACE_LISTEN: c_uint = 0x4208; -pub const PTRACE_PEEKSIGINFO: c_uint = 0x4209; -pub const PTRACE_GETSIGMASK: c_uint = 0x420a; -pub const PTRACE_SETSIGMASK: c_uint = 0x420b; -pub const PTRACE_GET_SYSCALL_INFO: c_uint = 0x420e; -pub const PTRACE_SYSCALL_INFO_NONE: crate::__u8 = 0; -pub const PTRACE_SYSCALL_INFO_ENTRY: crate::__u8 = 1; -pub const PTRACE_SYSCALL_INFO_EXIT: crate::__u8 = 2; -pub const PTRACE_SYSCALL_INFO_SECCOMP: crate::__u8 = 3; -pub const PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG: crate::__u8 = 0x4210; -pub const PTRACE_GET_SYSCALL_USER_DISPATCH_CONFIG: crate::__u8 = 0x4211; - -// linux/rtnetlink.h -pub const TCA_PAD: c_ushort = 9; -pub const TCA_DUMP_INVISIBLE: c_ushort = 10; -pub const TCA_CHAIN: c_ushort = 11; -pub const TCA_HW_OFFLOAD: c_ushort = 12; - -pub const RTM_DELNETCONF: u16 = 81; -pub const RTM_NEWSTATS: u16 = 92; -pub const RTM_GETSTATS: u16 = 94; -pub const RTM_NEWCACHEREPORT: u16 = 96; - -pub const RTM_F_LOOKUP_TABLE: c_uint = 0x1000; -pub const RTM_F_FIB_MATCH: c_uint = 0x2000; - -pub const RTA_VIA: c_ushort = 18; -pub const RTA_NEWDST: c_ushort = 19; -pub const RTA_PREF: c_ushort = 20; -pub const RTA_ENCAP_TYPE: c_ushort = 21; -pub const RTA_ENCAP: c_ushort = 22; -pub const RTA_EXPIRES: c_ushort = 23; -pub const RTA_PAD: c_ushort = 24; -pub const RTA_UID: c_ushort = 25; -pub const RTA_TTL_PROPAGATE: c_ushort = 26; - -// linux/neighbor.h -pub const NTF_EXT_LEARNED: u8 = 0x10; -pub const NTF_OFFLOADED: u8 = 0x20; - -pub const NDA_MASTER: c_ushort = 9; -pub const NDA_LINK_NETNSID: c_ushort = 10; -pub const NDA_SRC_VNI: c_ushort = 11; - -// linux/personality.h -pub const UNAME26: c_int = 0x0020000; -pub const FDPIC_FUNCPTRS: c_int = 0x0080000; - -pub const MAX_LINKS: c_int = 32; - -pub const GENL_UNS_ADMIN_PERM: c_int = 0x10; - -pub const GENL_ID_VFS_DQUOT: c_int = crate::NLMSG_MIN_TYPE + 1; -pub const GENL_ID_PMCRAID: c_int = crate::NLMSG_MIN_TYPE + 2; - -pub const ELFOSABI_ARM_AEABI: u8 = 64; - -// linux/sched.h -pub const CLONE_NEWTIME: c_int = 0x80; -// DIFF(main): changed to `c_ulonglong` in e9abac9ac2 -pub const CLONE_CLEAR_SIGHAND: c_int = 0x100000000; -pub const CLONE_INTO_CGROUP: c_int = 0x200000000; - -// linux/keyctl.h -pub const KEYCTL_DH_COMPUTE: u32 = 23; -pub const KEYCTL_PKEY_QUERY: u32 = 24; -pub const KEYCTL_PKEY_ENCRYPT: u32 = 25; -pub const KEYCTL_PKEY_DECRYPT: u32 = 26; -pub const KEYCTL_PKEY_SIGN: u32 = 27; -pub const KEYCTL_PKEY_VERIFY: u32 = 28; -pub const KEYCTL_RESTRICT_KEYRING: u32 = 29; - -pub const KEYCTL_SUPPORTS_ENCRYPT: u32 = 0x01; -pub const KEYCTL_SUPPORTS_DECRYPT: u32 = 0x02; -pub const KEYCTL_SUPPORTS_SIGN: u32 = 0x04; -pub const KEYCTL_SUPPORTS_VERIFY: u32 = 0x08; -cfg_if! { - if #[cfg(not(any( - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "mips64", - target_arch = "mips64r6" - )))] { - pub const KEYCTL_MOVE: u32 = 30; - pub const KEYCTL_CAPABILITIES: u32 = 31; - - pub const KEYCTL_CAPS0_CAPABILITIES: u32 = 0x01; - pub const KEYCTL_CAPS0_PERSISTENT_KEYRINGS: u32 = 0x02; - pub const KEYCTL_CAPS0_DIFFIE_HELLMAN: u32 = 0x04; - pub const KEYCTL_CAPS0_PUBLIC_KEY: u32 = 0x08; - pub const KEYCTL_CAPS0_BIG_KEY: u32 = 0x10; - pub const KEYCTL_CAPS0_INVALIDATE: u32 = 0x20; - pub const KEYCTL_CAPS0_RESTRICT_KEYRING: u32 = 0x40; - pub const KEYCTL_CAPS0_MOVE: u32 = 0x80; - pub const KEYCTL_CAPS1_NS_KEYRING_NAME: u32 = 0x01; - pub const KEYCTL_CAPS1_NS_KEY_TAG: u32 = 0x02; - } -} - -pub const M_MXFAST: c_int = 1; -pub const M_NLBLKS: c_int = 2; -pub const M_GRAIN: c_int = 3; -pub const M_KEEP: c_int = 4; -pub const M_TRIM_THRESHOLD: c_int = -1; -pub const M_TOP_PAD: c_int = -2; -pub const M_MMAP_THRESHOLD: c_int = -3; -pub const M_MMAP_MAX: c_int = -4; -pub const M_CHECK_ACTION: c_int = -5; -pub const M_PERTURB: c_int = -6; -pub const M_ARENA_TEST: c_int = -7; -pub const M_ARENA_MAX: c_int = -8; - -pub const SOMAXCONN: c_int = 4096; - -// linux/mount.h -pub const MOVE_MOUNT_F_SYMLINKS: c_uint = 0x00000001; -pub const MOVE_MOUNT_F_AUTOMOUNTS: c_uint = 0x00000002; -pub const MOVE_MOUNT_F_EMPTY_PATH: c_uint = 0x00000004; -pub const MOVE_MOUNT_T_SYMLINKS: c_uint = 0x00000010; -pub const MOVE_MOUNT_T_AUTOMOUNTS: c_uint = 0x00000020; -pub const MOVE_MOUNT_T_EMPTY_PATH: c_uint = 0x00000040; -pub const MOVE_MOUNT_SET_GROUP: c_uint = 0x00000100; -pub const MOVE_MOUNT_BENEATH: c_uint = 0x00000200; - -// sys/timex.h -pub const ADJ_OFFSET: c_uint = 0x0001; -pub const ADJ_FREQUENCY: c_uint = 0x0002; -pub const ADJ_MAXERROR: c_uint = 0x0004; -pub const ADJ_ESTERROR: c_uint = 0x0008; -pub const ADJ_STATUS: c_uint = 0x0010; -pub const ADJ_TIMECONST: c_uint = 0x0020; -pub const ADJ_TAI: c_uint = 0x0080; -pub const ADJ_SETOFFSET: c_uint = 0x0100; -pub const ADJ_MICRO: c_uint = 0x1000; -pub const ADJ_NANO: c_uint = 0x2000; -pub const ADJ_TICK: c_uint = 0x4000; -pub const ADJ_OFFSET_SINGLESHOT: c_uint = 0x8001; -pub const ADJ_OFFSET_SS_READ: c_uint = 0xa001; -pub const MOD_OFFSET: c_uint = ADJ_OFFSET; -pub const MOD_FREQUENCY: c_uint = ADJ_FREQUENCY; -pub const MOD_MAXERROR: c_uint = ADJ_MAXERROR; -pub const MOD_ESTERROR: c_uint = ADJ_ESTERROR; -pub const MOD_STATUS: c_uint = ADJ_STATUS; -pub const MOD_TIMECONST: c_uint = ADJ_TIMECONST; -pub const MOD_CLKB: c_uint = ADJ_TICK; -pub const MOD_CLKA: c_uint = ADJ_OFFSET_SINGLESHOT; -pub const MOD_TAI: c_uint = ADJ_TAI; -pub const MOD_MICRO: c_uint = ADJ_MICRO; -pub const MOD_NANO: c_uint = ADJ_NANO; -pub const STA_PLL: c_int = 0x0001; -pub const STA_PPSFREQ: c_int = 0x0002; -pub const STA_PPSTIME: c_int = 0x0004; -pub const STA_FLL: c_int = 0x0008; -pub const STA_INS: c_int = 0x0010; -pub const STA_DEL: c_int = 0x0020; -pub const STA_UNSYNC: c_int = 0x0040; -pub const STA_FREQHOLD: c_int = 0x0080; -pub const STA_PPSSIGNAL: c_int = 0x0100; -pub const STA_PPSJITTER: c_int = 0x0200; -pub const STA_PPSWANDER: c_int = 0x0400; -pub const STA_PPSERROR: c_int = 0x0800; -pub const STA_CLOCKERR: c_int = 0x1000; -pub const STA_NANO: c_int = 0x2000; -pub const STA_MODE: c_int = 0x4000; -pub const STA_CLK: c_int = 0x8000; -pub const STA_RONLY: c_int = STA_PPSSIGNAL - | STA_PPSJITTER - | STA_PPSWANDER - | STA_PPSERROR - | STA_CLOCKERR - | STA_NANO - | STA_MODE - | STA_CLK; -pub const NTP_API: c_int = 4; -pub const TIME_OK: c_int = 0; -pub const TIME_INS: c_int = 1; -pub const TIME_DEL: c_int = 2; -pub const TIME_OOP: c_int = 3; -pub const TIME_WAIT: c_int = 4; -pub const TIME_ERROR: c_int = 5; -pub const TIME_BAD: c_int = TIME_ERROR; -pub const MAXTC: c_long = 6; - -// Portable GLOB_* flags are defined at the `linux_like` level. -// The following are GNU extensions. -pub const GLOB_PERIOD: c_int = 1 << 7; -pub const GLOB_ALTDIRFUNC: c_int = 1 << 9; -pub const GLOB_BRACE: c_int = 1 << 10; -pub const GLOB_NOMAGIC: c_int = 1 << 11; -pub const GLOB_TILDE: c_int = 1 << 12; -pub const GLOB_ONLYDIR: c_int = 1 << 13; -pub const GLOB_TILDE_CHECK: c_int = 1 << 14; - -pub const MADV_COLLAPSE: c_int = 25; - -cfg_if! { - if #[cfg(any( - target_arch = "arm", - target_arch = "x86", - target_arch = "x86_64", - target_arch = "s390x", - target_arch = "riscv64", - target_arch = "riscv32" - ))] { - pub const PTHREAD_STACK_MIN: size_t = 16384; - } else if #[cfg(any(target_arch = "sparc", target_arch = "sparc64"))] { - pub const PTHREAD_STACK_MIN: size_t = 0x6000; - } else { - pub const PTHREAD_STACK_MIN: size_t = 131072; - } -} -pub const PTHREAD_MUTEX_ADAPTIVE_NP: c_int = 3; - -pub const REG_STARTEND: c_int = 4; - -pub const REG_EEND: c_int = 14; -pub const REG_ESIZE: c_int = 15; -pub const REG_ERPAREN: c_int = 16; - -extern "C" { - pub fn fgetspent_r( - fp: *mut crate::FILE, - spbuf: *mut crate::spwd, - buf: *mut c_char, - buflen: size_t, - spbufp: *mut *mut crate::spwd, - ) -> c_int; - pub fn sgetspent_r( - s: *const c_char, - spbuf: *mut crate::spwd, - buf: *mut c_char, - buflen: size_t, - spbufp: *mut *mut crate::spwd, - ) -> c_int; - pub fn getspent_r( - spbuf: *mut crate::spwd, - buf: *mut c_char, - buflen: size_t, - spbufp: *mut *mut crate::spwd, - ) -> c_int; - pub fn qsort_r( - base: *mut c_void, - num: size_t, - size: size_t, - compar: Option c_int>, - arg: *mut c_void, - ); - #[cfg_attr(gnu_time_bits64, link_name = "__sendmmsg64")] - pub fn sendmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: c_uint, - flags: c_int, - ) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__recvmmsg64")] - pub fn recvmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: c_uint, - flags: c_int, - timeout: *mut crate::timespec, - ) -> c_int; - - pub fn getrlimit64(resource: crate::__rlimit_resource_t, rlim: *mut crate::rlimit64) -> c_int; - pub fn setrlimit64(resource: crate::__rlimit_resource_t, rlim: *const crate::rlimit64) - -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "getrlimit64")] - pub fn getrlimit(resource: crate::__rlimit_resource_t, rlim: *mut crate::rlimit) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "setrlimit64")] - pub fn setrlimit(resource: crate::__rlimit_resource_t, rlim: *const crate::rlimit) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "prlimit64")] - pub fn prlimit( - pid: crate::pid_t, - resource: crate::__rlimit_resource_t, - new_limit: *const crate::rlimit, - old_limit: *mut crate::rlimit, - ) -> c_int; - pub fn prlimit64( - pid: crate::pid_t, - resource: crate::__rlimit_resource_t, - new_limit: *const crate::rlimit64, - old_limit: *mut crate::rlimit64, - ) -> c_int; - pub fn utmpname(file: *const c_char) -> c_int; - pub fn utmpxname(file: *const c_char) -> c_int; - pub fn getutxent() -> *mut utmpx; - pub fn getutxid(ut: *const utmpx) -> *mut utmpx; - pub fn getutxline(ut: *const utmpx) -> *mut utmpx; - pub fn pututxline(ut: *const utmpx) -> *mut utmpx; - pub fn setutxent(); - pub fn endutxent(); - pub fn getpt() -> c_int; - pub fn mallopt(param: c_int, value: c_int) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__gettimeofday64")] - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut crate::timezone) -> c_int; - pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; - pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; - pub fn getauxval(type_: c_ulong) -> c_ulong; - - #[cfg_attr(gnu_time_bits64, link_name = "___adjtimex64")] - pub fn adjtimex(buf: *mut timex) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "___adjtimex64")] - pub fn ntp_adjtime(buf: *mut timex) -> c_int; - #[cfg_attr(not(gnu_time_bits64), link_name = "ntp_gettimex")] - #[cfg_attr(gnu_time_bits64, link_name = "__ntp_gettime64")] - pub fn ntp_gettime(buf: *mut ntptimeval) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__clock_adjtime64")] - pub fn clock_adjtime(clk_id: crate::clockid_t, buf: *mut crate::timex) -> c_int; - - pub fn fanotify_mark( - fd: c_int, - flags: c_uint, - mask: u64, - dirfd: c_int, - path: *const c_char, - ) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "preadv64v2")] - pub fn preadv2( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: off_t, - flags: c_int, - ) -> ssize_t; - #[cfg_attr(gnu_file_offset_bits64, link_name = "pwritev64v2")] - pub fn pwritev2( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: off_t, - flags: c_int, - ) -> ssize_t; - pub fn preadv64v2( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: off64_t, - flags: c_int, - ) -> ssize_t; - pub fn pwritev64v2( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: off64_t, - flags: c_int, - ) -> ssize_t; - pub fn renameat2( - olddirfd: c_int, - oldpath: *const c_char, - newdirfd: c_int, - newpath: *const c_char, - flags: c_uint, - ) -> c_int; - - // Added in `glibc` 2.25 - pub fn explicit_bzero(s: *mut c_void, len: size_t); - // Added in `glibc` 2.29 - pub fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void; - - pub fn ctermid(s: *mut c_char) -> *mut c_char; - pub fn backtrace(buf: *mut *mut c_void, sz: c_int) -> c_int; - pub fn backtrace_symbols(buffer: *const *mut c_void, len: c_int) -> *mut *mut c_char; - pub fn backtrace_symbols_fd(buffer: *const *mut c_void, len: c_int, fd: c_int); - #[cfg_attr(gnu_time_bits64, link_name = "__glob64_time64")] - pub fn glob64( - pattern: *const c_char, - flags: c_int, - errfunc: Option c_int>, - pglob: *mut glob64_t, - ) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__globfree64_time64")] - pub fn globfree64(pglob: *mut glob64_t); - pub fn ptrace(request: c_uint, ...) -> c_long; - pub fn pthread_attr_getaffinity_np( - attr: *const crate::pthread_attr_t, - cpusetsize: size_t, - cpuset: *mut crate::cpu_set_t, - ) -> c_int; - pub fn pthread_attr_setaffinity_np( - attr: *mut crate::pthread_attr_t, - cpusetsize: size_t, - cpuset: *const crate::cpu_set_t, - ) -> c_int; - pub fn getpriority(which: crate::__priority_which_t, who: crate::id_t) -> c_int; - pub fn setpriority(which: crate::__priority_which_t, who: crate::id_t, prio: c_int) -> c_int; - pub fn pthread_rwlockattr_getkind_np( - attr: *const crate::pthread_rwlockattr_t, - val: *mut c_int, - ) -> c_int; - pub fn pthread_rwlockattr_setkind_np( - attr: *mut crate::pthread_rwlockattr_t, - val: c_int, - ) -> c_int; - pub fn pthread_sigqueue(thread: crate::pthread_t, sig: c_int, value: crate::sigval) -> c_int; - pub fn mallinfo() -> crate::mallinfo; - pub fn mallinfo2() -> crate::mallinfo2; - pub fn malloc_stats(); - pub fn malloc_info(options: c_int, stream: *mut crate::FILE) -> c_int; - pub fn malloc_usable_size(ptr: *mut c_void) -> size_t; - pub fn getpwent_r( - pwd: *mut crate::passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::passwd, - ) -> c_int; - pub fn getgrent_r( - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn fgetpwent_r( - stream: *mut crate::FILE, - pwd: *mut crate::passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::passwd, - ) -> c_int; - pub fn fgetgrent_r( - stream: *mut crate::FILE, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - - pub fn putpwent(p: *const crate::passwd, stream: *mut crate::FILE) -> c_int; - pub fn putgrent(grp: *const crate::group, stream: *mut crate::FILE) -> c_int; - - pub fn sethostid(hostid: c_long) -> c_int; - - pub fn memfd_create(name: *const c_char, flags: c_uint) -> c_int; - pub fn mlock2(addr: *const c_void, len: size_t, flags: c_uint) -> c_int; - - pub fn euidaccess(pathname: *const c_char, mode: c_int) -> c_int; - pub fn eaccess(pathname: *const c_char, mode: c_int) -> c_int; - - pub fn asctime_r(tm: *const crate::tm, buf: *mut c_char) -> *mut c_char; - #[cfg_attr(gnu_time_bits64, link_name = "__ctime64_r")] - pub fn ctime_r(timep: *const time_t, buf: *mut c_char) -> *mut c_char; - - pub fn dirname(path: *mut c_char) -> *mut c_char; - /// POSIX version of `basename(3)`, defined in `libgen.h`. - #[link_name = "__xpg_basename"] - pub fn posix_basename(path: *mut c_char) -> *mut c_char; - /// GNU version of `basename(3)`, defined in `string.h`. - #[link_name = "basename"] - pub fn gnu_basename(path: *const c_char) -> *mut c_char; - pub fn dlmopen(lmid: Lmid_t, filename: *const c_char, flag: c_int) -> *mut c_void; - pub fn dlinfo(handle: *mut c_void, request: c_int, info: *mut c_void) -> c_int; - pub fn dladdr1( - addr: *const c_void, - info: *mut crate::Dl_info, - extra_info: *mut *mut c_void, - flags: c_int, - ) -> c_int; - pub fn dlvsym( - handle: *mut c_void, - symbol: *const c_char, - version: *const c_char, - ) -> *mut c_void; - pub fn malloc_trim(__pad: size_t) -> c_int; - pub fn gnu_get_libc_release() -> *const c_char; - pub fn gnu_get_libc_version() -> *const c_char; - - // posix/spawn.h - // Added in `glibc` 2.29 - pub fn posix_spawn_file_actions_addchdir_np( - actions: *mut crate::posix_spawn_file_actions_t, - path: *const c_char, - ) -> c_int; - // Added in `glibc` 2.29 - pub fn posix_spawn_file_actions_addfchdir_np( - actions: *mut crate::posix_spawn_file_actions_t, - fd: c_int, - ) -> c_int; - // Added in `glibc` 2.34 - pub fn posix_spawn_file_actions_addclosefrom_np( - actions: *mut crate::posix_spawn_file_actions_t, - from: c_int, - ) -> c_int; - // Added in `glibc` 2.35 - pub fn posix_spawn_file_actions_addtcsetpgrp_np( - actions: *mut crate::posix_spawn_file_actions_t, - tcfd: c_int, - ) -> c_int; - - // mntent.h - pub fn getmntent_r( - stream: *mut crate::FILE, - mntbuf: *mut crate::mntent, - buf: *mut c_char, - buflen: c_int, - ) -> *mut crate::mntent; - - pub fn execveat( - dirfd: c_int, - pathname: *const c_char, - argv: *const *mut c_char, - envp: *const *mut c_char, - flags: c_int, - ) -> c_int; - - // Added in `glibc` 2.34 - pub fn close_range(first: c_uint, last: c_uint, flags: c_int) -> c_int; - - pub fn mq_notify(mqdes: crate::mqd_t, sevp: *const crate::sigevent) -> c_int; - - #[cfg_attr(gnu_time_bits64, link_name = "__epoll_pwait2_time64")] - pub fn epoll_pwait2( - epfd: c_int, - events: *mut crate::epoll_event, - maxevents: c_int, - timeout: *const crate::timespec, - sigmask: *const crate::sigset_t, - ) -> c_int; - - pub fn mempcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; -} - -cfg_if! { - if #[cfg(any( - target_arch = "x86", - target_arch = "arm", - target_arch = "m68k", - target_arch = "csky", - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "powerpc", - target_arch = "sparc", - target_arch = "riscv32" - ))] { - mod b32; - pub use self::b32::*; - } else if #[cfg(any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - target_arch = "mips64", - target_arch = "mips64r6", - target_arch = "s390x", - target_arch = "sparc64", - target_arch = "riscv64", - target_arch = "loongarch64" - ))] { - mod b64; - pub use self::b64::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/unix/linux_like/linux/mod.rs b/vendor/libc/src/unix/linux_like/linux/mod.rs deleted file mode 100644 index 14401077479ed4..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/mod.rs +++ /dev/null @@ -1,6830 +0,0 @@ -//! Linux-specific definitions for linux-like values - -use crate::prelude::*; -use crate::{sock_filter, _IO, _IOR, _IOW, _IOWR}; - -pub type useconds_t = u32; -pub type dev_t = u64; -pub type socklen_t = u32; -pub type mode_t = u32; -pub type ino64_t = u64; -pub type off64_t = i64; -pub type blkcnt64_t = i64; -pub type rlim64_t = u64; -pub type mqd_t = c_int; -pub type nfds_t = c_ulong; -pub type nl_item = c_int; -pub type idtype_t = c_uint; -pub type loff_t = c_longlong; -pub type pthread_key_t = c_uint; -pub type pthread_once_t = c_int; -pub type pthread_spinlock_t = c_int; -pub type __kernel_fsid_t = __c_anonymous__kernel_fsid_t; -pub type __kernel_clockid_t = c_int; - -pub type __u8 = c_uchar; -pub type __u16 = c_ushort; -pub type __s16 = c_short; -pub type __u32 = c_uint; -pub type __s32 = c_int; - -pub type Elf32_Half = u16; -pub type Elf32_Word = u32; -pub type Elf32_Off = u32; -pub type Elf32_Addr = u32; -pub type Elf32_Xword = u64; -pub type Elf32_Sword = i32; - -pub type Elf64_Half = u16; -pub type Elf64_Word = u32; -pub type Elf64_Off = u64; -pub type Elf64_Addr = u64; -pub type Elf64_Xword = u64; -pub type Elf64_Sxword = i64; -pub type Elf64_Sword = i32; - -pub type Elf32_Section = u16; -pub type Elf64_Section = u16; - -pub type Elf32_Relr = Elf32_Word; -pub type Elf64_Relr = Elf32_Xword; -pub type Elf32_Rel = __c_anonymous_elf32_rel; -pub type Elf64_Rel = __c_anonymous_elf64_rel; - -cfg_if! { - if #[cfg(not(target_arch = "sparc64"))] { - pub type Elf32_Rela = __c_anonymous_elf32_rela; - pub type Elf64_Rela = __c_anonymous_elf64_rela; - } -} - -pub type iconv_t = *mut c_void; - -// linux/sctp.h -pub type sctp_assoc_t = __s32; - -pub type eventfd_t = u64; - -cfg_if! { - if #[cfg(not(target_env = "gnu"))] { - missing! { - #[derive(Debug)] - pub enum fpos64_t {} // FIXME(linux): fill this out with a struct - } - } -} - -e! { - #[repr(u32)] - pub enum tpacket_versions { - TPACKET_V1, - TPACKET_V2, - TPACKET_V3, - } -} - -c_enum! { - pub enum pid_type { - PIDTYPE_PID, - PIDTYPE_TGID, - PIDTYPE_PGID, - PIDTYPE_SID, - PIDTYPE_MAX, - } -} - -s! { - pub struct glob_t { - pub gl_pathc: size_t, - pub gl_pathv: *mut *mut c_char, - pub gl_offs: size_t, - pub gl_flags: c_int, - - __unused1: *mut c_void, - __unused2: *mut c_void, - __unused3: *mut c_void, - __unused4: *mut c_void, - __unused5: *mut c_void, - } - - pub struct passwd { - pub pw_name: *mut c_char, - pub pw_passwd: *mut c_char, - pub pw_uid: crate::uid_t, - pub pw_gid: crate::gid_t, - pub pw_gecos: *mut c_char, - pub pw_dir: *mut c_char, - pub pw_shell: *mut c_char, - } - - pub struct spwd { - pub sp_namp: *mut c_char, - pub sp_pwdp: *mut c_char, - pub sp_lstchg: c_long, - pub sp_min: c_long, - pub sp_max: c_long, - pub sp_warn: c_long, - pub sp_inact: c_long, - pub sp_expire: c_long, - pub sp_flag: c_ulong, - } - - pub struct dqblk { - pub dqb_bhardlimit: u64, - pub dqb_bsoftlimit: u64, - pub dqb_curspace: u64, - pub dqb_ihardlimit: u64, - pub dqb_isoftlimit: u64, - pub dqb_curinodes: u64, - pub dqb_btime: u64, - pub dqb_itime: u64, - pub dqb_valid: u32, - } - - pub struct signalfd_siginfo { - pub ssi_signo: u32, - pub ssi_errno: i32, - pub ssi_code: i32, - pub ssi_pid: u32, - pub ssi_uid: u32, - pub ssi_fd: i32, - pub ssi_tid: u32, - pub ssi_band: u32, - pub ssi_overrun: u32, - pub ssi_trapno: u32, - pub ssi_status: i32, - pub ssi_int: i32, - pub ssi_ptr: u64, - pub ssi_utime: u64, - pub ssi_stime: u64, - pub ssi_addr: u64, - pub ssi_addr_lsb: u16, - _pad2: u16, - pub ssi_syscall: i32, - pub ssi_call_addr: u64, - pub ssi_arch: u32, - _pad: [u8; 28], - } - - pub struct itimerspec { - pub it_interval: crate::timespec, - pub it_value: crate::timespec, - } - - pub struct fsid_t { - __val: [c_int; 2], - } - - pub struct fanout_args { - #[cfg(target_endian = "little")] - pub id: __u16, - pub type_flags: __u16, - #[cfg(target_endian = "big")] - pub id: __u16, - pub max_num_members: __u32, - } - - pub struct packet_mreq { - pub mr_ifindex: c_int, - pub mr_type: c_ushort, - pub mr_alen: c_ushort, - pub mr_address: [c_uchar; 8], - } - - #[deprecated(since = "0.2.70", note = "sockaddr_ll type must be used instead")] - pub struct sockaddr_pkt { - pub spkt_family: c_ushort, - pub spkt_device: [c_uchar; 14], - pub spkt_protocol: c_ushort, - } - - pub struct tpacket_auxdata { - pub tp_status: __u32, - pub tp_len: __u32, - pub tp_snaplen: __u32, - pub tp_mac: __u16, - pub tp_net: __u16, - pub tp_vlan_tci: __u16, - pub tp_vlan_tpid: __u16, - } - - pub struct tpacket_hdr { - pub tp_status: c_ulong, - pub tp_len: c_uint, - pub tp_snaplen: c_uint, - pub tp_mac: c_ushort, - pub tp_net: c_ushort, - pub tp_sec: c_uint, - pub tp_usec: c_uint, - } - - pub struct tpacket_hdr_variant1 { - pub tp_rxhash: __u32, - pub tp_vlan_tci: __u32, - pub tp_vlan_tpid: __u16, - pub tp_padding: __u16, - } - - pub struct tpacket2_hdr { - pub tp_status: __u32, - pub tp_len: __u32, - pub tp_snaplen: __u32, - pub tp_mac: __u16, - pub tp_net: __u16, - pub tp_sec: __u32, - pub tp_nsec: __u32, - pub tp_vlan_tci: __u16, - pub tp_vlan_tpid: __u16, - pub tp_padding: [__u8; 4], - } - - pub struct tpacket_req { - pub tp_block_size: c_uint, - pub tp_block_nr: c_uint, - pub tp_frame_size: c_uint, - pub tp_frame_nr: c_uint, - } - - pub struct tpacket_req3 { - pub tp_block_size: c_uint, - pub tp_block_nr: c_uint, - pub tp_frame_size: c_uint, - pub tp_frame_nr: c_uint, - pub tp_retire_blk_tov: c_uint, - pub tp_sizeof_priv: c_uint, - pub tp_feature_req_word: c_uint, - } - - #[repr(align(8))] - pub struct tpacket_rollover_stats { - pub tp_all: crate::__u64, - pub tp_huge: crate::__u64, - pub tp_failed: crate::__u64, - } - - pub struct tpacket_stats { - pub tp_packets: c_uint, - pub tp_drops: c_uint, - } - - pub struct tpacket_stats_v3 { - pub tp_packets: c_uint, - pub tp_drops: c_uint, - pub tp_freeze_q_cnt: c_uint, - } - - pub struct tpacket3_hdr { - pub tp_next_offset: __u32, - pub tp_sec: __u32, - pub tp_nsec: __u32, - pub tp_snaplen: __u32, - pub tp_len: __u32, - pub tp_status: __u32, - pub tp_mac: __u16, - pub tp_net: __u16, - pub hv1: crate::tpacket_hdr_variant1, - pub tp_padding: [__u8; 8], - } - - pub struct tpacket_bd_ts { - pub ts_sec: c_uint, - pub ts_usec: c_uint, - } - - #[repr(align(8))] - pub struct tpacket_hdr_v1 { - pub block_status: __u32, - pub num_pkts: __u32, - pub offset_to_first_pkt: __u32, - pub blk_len: __u32, - pub seq_num: crate::__u64, - pub ts_first_pkt: crate::tpacket_bd_ts, - pub ts_last_pkt: crate::tpacket_bd_ts, - } - - pub struct cpu_set_t { - #[cfg(all(target_pointer_width = "32", not(target_arch = "x86_64")))] - bits: [u32; 32], - #[cfg(not(all(target_pointer_width = "32", not(target_arch = "x86_64"))))] - bits: [u64; 16], - } - - pub struct if_nameindex { - pub if_index: c_uint, - pub if_name: *mut c_char, - } - - // System V IPC - pub struct msginfo { - pub msgpool: c_int, - pub msgmap: c_int, - pub msgmax: c_int, - pub msgmnb: c_int, - pub msgmni: c_int, - pub msgssz: c_int, - pub msgtql: c_int, - pub msgseg: c_ushort, - } - - pub struct sembuf { - pub sem_num: c_ushort, - pub sem_op: c_short, - pub sem_flg: c_short, - } - - pub struct input_event { - // FIXME(1.0): Change to the commented variant, see https://github.com/rust-lang/libc/pull/4148#discussion_r1857511742 - #[cfg(any(target_pointer_width = "64", not(linux_time_bits64)))] - pub time: crate::timeval, - // #[cfg(any(target_pointer_width = "64", not(linux_time_bits64)))] - // pub input_event_sec: time_t, - // #[cfg(any(target_pointer_width = "64", not(linux_time_bits64)))] - // pub input_event_usec: suseconds_t, - // #[cfg(target_arch = "sparc64")] - // _pad1: c_int, - #[cfg(all(target_pointer_width = "32", linux_time_bits64))] - pub input_event_sec: c_ulong, - - #[cfg(all(target_pointer_width = "32", linux_time_bits64))] - pub input_event_usec: c_ulong, - - pub type_: __u16, - pub code: __u16, - pub value: __s32, - } - - pub struct input_id { - pub bustype: __u16, - pub vendor: __u16, - pub product: __u16, - pub version: __u16, - } - - pub struct input_absinfo { - pub value: __s32, - pub minimum: __s32, - pub maximum: __s32, - pub fuzz: __s32, - pub flat: __s32, - pub resolution: __s32, - } - - pub struct input_keymap_entry { - pub flags: __u8, - pub len: __u8, - pub index: __u16, - pub keycode: __u32, - pub scancode: [__u8; 32], - } - - pub struct input_mask { - pub type_: __u32, - pub codes_size: __u32, - pub codes_ptr: crate::__u64, - } - - pub struct ff_replay { - pub length: __u16, - pub delay: __u16, - } - - pub struct ff_trigger { - pub button: __u16, - pub interval: __u16, - } - - pub struct ff_envelope { - pub attack_length: __u16, - pub attack_level: __u16, - pub fade_length: __u16, - pub fade_level: __u16, - } - - pub struct ff_constant_effect { - pub level: __s16, - pub envelope: ff_envelope, - } - - pub struct ff_ramp_effect { - pub start_level: __s16, - pub end_level: __s16, - pub envelope: ff_envelope, - } - - pub struct ff_condition_effect { - pub right_saturation: __u16, - pub left_saturation: __u16, - - pub right_coeff: __s16, - pub left_coeff: __s16, - - pub deadband: __u16, - pub center: __s16, - } - - pub struct ff_periodic_effect { - pub waveform: __u16, - pub period: __u16, - pub magnitude: __s16, - pub offset: __s16, - pub phase: __u16, - - pub envelope: ff_envelope, - - pub custom_len: __u32, - pub custom_data: *mut __s16, - } - - pub struct ff_rumble_effect { - pub strong_magnitude: __u16, - pub weak_magnitude: __u16, - } - - pub struct ff_effect { - pub type_: __u16, - pub id: __s16, - pub direction: __u16, - pub trigger: ff_trigger, - pub replay: ff_replay, - // FIXME(1.0): this is actually a union - #[cfg(target_pointer_width = "64")] - pub u: [u64; 4], - #[cfg(target_pointer_width = "32")] - pub u: [u32; 7], - } - - pub struct uinput_ff_upload { - pub request_id: __u32, - pub retval: __s32, - pub effect: ff_effect, - pub old: ff_effect, - } - - pub struct uinput_ff_erase { - pub request_id: __u32, - pub retval: __s32, - pub effect_id: __u32, - } - - pub struct uinput_abs_setup { - pub code: __u16, - pub absinfo: input_absinfo, - } - - pub struct dl_phdr_info { - #[cfg(target_pointer_width = "64")] - pub dlpi_addr: Elf64_Addr, - #[cfg(target_pointer_width = "32")] - pub dlpi_addr: Elf32_Addr, - - pub dlpi_name: *const c_char, - - #[cfg(target_pointer_width = "64")] - pub dlpi_phdr: *const Elf64_Phdr, - #[cfg(target_pointer_width = "32")] - pub dlpi_phdr: *const Elf32_Phdr, - - #[cfg(target_pointer_width = "64")] - pub dlpi_phnum: Elf64_Half, - #[cfg(target_pointer_width = "32")] - pub dlpi_phnum: Elf32_Half, - - // As of uClibc 1.0.36, the following fields are - // gated behind a "#if 0" block which always evaluates - // to false. So I'm just removing these, and if uClibc changes - // the #if block in the future to include the following fields, these - // will probably need including here. tsidea, skrap - // QNX (NTO) platform does not define these fields - #[cfg(not(any(target_env = "uclibc", target_os = "nto")))] - pub dlpi_adds: c_ulonglong, - #[cfg(not(any(target_env = "uclibc", target_os = "nto")))] - pub dlpi_subs: c_ulonglong, - #[cfg(not(any(target_env = "uclibc", target_os = "nto")))] - pub dlpi_tls_modid: size_t, - #[cfg(not(any(target_env = "uclibc", target_os = "nto")))] - pub dlpi_tls_data: *mut c_void, - } - - pub struct Elf32_Ehdr { - pub e_ident: [c_uchar; 16], - pub e_type: Elf32_Half, - pub e_machine: Elf32_Half, - pub e_version: Elf32_Word, - pub e_entry: Elf32_Addr, - pub e_phoff: Elf32_Off, - pub e_shoff: Elf32_Off, - pub e_flags: Elf32_Word, - pub e_ehsize: Elf32_Half, - pub e_phentsize: Elf32_Half, - pub e_phnum: Elf32_Half, - pub e_shentsize: Elf32_Half, - pub e_shnum: Elf32_Half, - pub e_shstrndx: Elf32_Half, - } - - pub struct Elf64_Ehdr { - pub e_ident: [c_uchar; 16], - pub e_type: Elf64_Half, - pub e_machine: Elf64_Half, - pub e_version: Elf64_Word, - pub e_entry: Elf64_Addr, - pub e_phoff: Elf64_Off, - pub e_shoff: Elf64_Off, - pub e_flags: Elf64_Word, - pub e_ehsize: Elf64_Half, - pub e_phentsize: Elf64_Half, - pub e_phnum: Elf64_Half, - pub e_shentsize: Elf64_Half, - pub e_shnum: Elf64_Half, - pub e_shstrndx: Elf64_Half, - } - - pub struct Elf32_Sym { - pub st_name: Elf32_Word, - pub st_value: Elf32_Addr, - pub st_size: Elf32_Word, - pub st_info: c_uchar, - pub st_other: c_uchar, - pub st_shndx: Elf32_Section, - } - - pub struct Elf64_Sym { - pub st_name: Elf64_Word, - pub st_info: c_uchar, - pub st_other: c_uchar, - pub st_shndx: Elf64_Section, - pub st_value: Elf64_Addr, - pub st_size: Elf64_Xword, - } - - pub struct Elf32_Phdr { - pub p_type: Elf32_Word, - pub p_offset: Elf32_Off, - pub p_vaddr: Elf32_Addr, - pub p_paddr: Elf32_Addr, - pub p_filesz: Elf32_Word, - pub p_memsz: Elf32_Word, - pub p_flags: Elf32_Word, - pub p_align: Elf32_Word, - } - - pub struct Elf64_Phdr { - pub p_type: Elf64_Word, - pub p_flags: Elf64_Word, - pub p_offset: Elf64_Off, - pub p_vaddr: Elf64_Addr, - pub p_paddr: Elf64_Addr, - pub p_filesz: Elf64_Xword, - pub p_memsz: Elf64_Xword, - pub p_align: Elf64_Xword, - } - - pub struct Elf32_Shdr { - pub sh_name: Elf32_Word, - pub sh_type: Elf32_Word, - pub sh_flags: Elf32_Word, - pub sh_addr: Elf32_Addr, - pub sh_offset: Elf32_Off, - pub sh_size: Elf32_Word, - pub sh_link: Elf32_Word, - pub sh_info: Elf32_Word, - pub sh_addralign: Elf32_Word, - pub sh_entsize: Elf32_Word, - } - - pub struct Elf64_Shdr { - pub sh_name: Elf64_Word, - pub sh_type: Elf64_Word, - pub sh_flags: Elf64_Xword, - pub sh_addr: Elf64_Addr, - pub sh_offset: Elf64_Off, - pub sh_size: Elf64_Xword, - pub sh_link: Elf64_Word, - pub sh_info: Elf64_Word, - pub sh_addralign: Elf64_Xword, - pub sh_entsize: Elf64_Xword, - } - - pub struct __c_anonymous_elf32_rel { - pub r_offset: Elf32_Addr, - pub r_info: Elf32_Word, - } - - pub struct __c_anonymous_elf64_rel { - pub r_offset: Elf64_Addr, - pub r_info: Elf64_Xword, - } - - pub struct __c_anonymous__kernel_fsid_t { - pub val: [c_int; 2], - } - - pub struct ucred { - pub pid: crate::pid_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - } - - pub struct mntent { - pub mnt_fsname: *mut c_char, - pub mnt_dir: *mut c_char, - pub mnt_type: *mut c_char, - pub mnt_opts: *mut c_char, - pub mnt_freq: c_int, - pub mnt_passno: c_int, - } - - pub struct posix_spawn_file_actions_t { - __allocated: c_int, - __used: c_int, - __actions: *mut c_int, - __pad: [c_int; 16], - } - - pub struct posix_spawnattr_t { - __flags: c_short, - __pgrp: crate::pid_t, - __sd: crate::sigset_t, - __ss: crate::sigset_t, - #[cfg(any(target_env = "musl", target_env = "ohos"))] - __prio: c_int, - #[cfg(not(any(target_env = "musl", target_env = "ohos")))] - __sp: crate::sched_param, - __policy: c_int, - __pad: [c_int; 16], - } - - pub struct genlmsghdr { - pub cmd: u8, - pub version: u8, - pub reserved: u16, - } - - pub struct in6_pktinfo { - pub ipi6_addr: crate::in6_addr, - pub ipi6_ifindex: c_uint, - } - - pub struct arpd_request { - pub req: c_ushort, - pub ip: u32, - pub dev: c_ulong, - pub stamp: c_ulong, - pub updated: c_ulong, - pub ha: [c_uchar; crate::MAX_ADDR_LEN], - } - - pub struct inotify_event { - pub wd: c_int, - pub mask: u32, - pub cookie: u32, - pub len: u32, - } - - pub struct fanotify_response { - pub fd: c_int, - pub response: __u32, - } - - pub struct fanotify_event_info_header { - pub info_type: __u8, - pub pad: __u8, - pub len: __u16, - } - - pub struct fanotify_event_info_fid { - pub hdr: fanotify_event_info_header, - pub fsid: crate::__kernel_fsid_t, - pub handle: [c_uchar; 0], - } - - pub struct sockaddr_vm { - pub svm_family: crate::sa_family_t, - pub svm_reserved1: c_ushort, - pub svm_port: c_uint, - pub svm_cid: c_uint, - pub svm_zero: [u8; 4], - } - - pub struct regmatch_t { - pub rm_so: regoff_t, - pub rm_eo: regoff_t, - } - - pub struct sock_extended_err { - pub ee_errno: u32, - pub ee_origin: u8, - pub ee_type: u8, - pub ee_code: u8, - pub ee_pad: u8, - pub ee_info: u32, - pub ee_data: u32, - } - - // linux/seccomp.h - pub struct seccomp_data { - pub nr: c_int, - pub arch: __u32, - pub instruction_pointer: crate::__u64, - pub args: [crate::__u64; 6], - } - - pub struct seccomp_notif_sizes { - pub seccomp_notif: __u16, - pub seccomp_notif_resp: __u16, - pub seccomp_data: __u16, - } - - pub struct seccomp_notif { - pub id: crate::__u64, - pub pid: __u32, - pub flags: __u32, - pub data: seccomp_data, - } - - pub struct seccomp_notif_resp { - pub id: crate::__u64, - pub val: crate::__s64, - pub error: __s32, - pub flags: __u32, - } - - pub struct seccomp_notif_addfd { - pub id: crate::__u64, - pub flags: __u32, - pub srcfd: __u32, - pub newfd: __u32, - pub newfd_flags: __u32, - } - - pub struct nlmsghdr { - pub nlmsg_len: u32, - pub nlmsg_type: u16, - pub nlmsg_flags: u16, - pub nlmsg_seq: u32, - pub nlmsg_pid: u32, - } - - pub struct nlmsgerr { - pub error: c_int, - pub msg: nlmsghdr, - } - - pub struct nlattr { - pub nla_len: u16, - pub nla_type: u16, - } - - pub struct __c_anonymous_ifru_map { - pub mem_start: c_ulong, - pub mem_end: c_ulong, - pub base_addr: c_ushort, - pub irq: c_uchar, - pub dma: c_uchar, - pub port: c_uchar, - } - - pub struct in6_ifreq { - pub ifr6_addr: crate::in6_addr, - pub ifr6_prefixlen: u32, - pub ifr6_ifindex: c_int, - } - - pub struct option { - pub name: *const c_char, - pub has_arg: c_int, - pub flag: *mut c_int, - pub val: c_int, - } - - // linux/openat2.h - #[non_exhaustive] - pub struct open_how { - pub flags: crate::__u64, - pub mode: crate::__u64, - pub resolve: crate::__u64, - } - - // linux/ptp_clock.h - pub struct ptp_clock_time { - pub sec: crate::__s64, - pub nsec: __u32, - pub reserved: __u32, - } - - pub struct ptp_extts_request { - pub index: c_uint, - pub flags: c_uint, - pub rsv: [c_uint; 2], - } - - pub struct ptp_sys_offset_extended { - pub n_samples: c_uint, - pub clockid: __kernel_clockid_t, - pub rsv: [c_uint; 2], - pub ts: [[ptp_clock_time; 3]; PTP_MAX_SAMPLES as usize], - } - - pub struct ptp_sys_offset_precise { - pub device: ptp_clock_time, - pub sys_realtime: ptp_clock_time, - pub sys_monoraw: ptp_clock_time, - pub rsv: [c_uint; 4], - } - - pub struct ptp_extts_event { - pub t: ptp_clock_time, - index: c_uint, - flags: c_uint, - rsv: [c_uint; 2], - } - - // linux/sctp.h - - pub struct sctp_initmsg { - pub sinit_num_ostreams: __u16, - pub sinit_max_instreams: __u16, - pub sinit_max_attempts: __u16, - pub sinit_max_init_timeo: __u16, - } - - pub struct sctp_sndrcvinfo { - pub sinfo_stream: __u16, - pub sinfo_ssn: __u16, - pub sinfo_flags: __u16, - pub sinfo_ppid: __u32, - pub sinfo_context: __u32, - pub sinfo_timetolive: __u32, - pub sinfo_tsn: __u32, - pub sinfo_cumtsn: __u32, - pub sinfo_assoc_id: crate::sctp_assoc_t, - } - - pub struct sctp_sndinfo { - pub snd_sid: __u16, - pub snd_flags: __u16, - pub snd_ppid: __u32, - pub snd_context: __u32, - pub snd_assoc_id: crate::sctp_assoc_t, - } - - pub struct sctp_rcvinfo { - pub rcv_sid: __u16, - pub rcv_ssn: __u16, - pub rcv_flags: __u16, - pub rcv_ppid: __u32, - pub rcv_tsn: __u32, - pub rcv_cumtsn: __u32, - pub rcv_context: __u32, - pub rcv_assoc_id: crate::sctp_assoc_t, - } - - pub struct sctp_nxtinfo { - pub nxt_sid: __u16, - pub nxt_flags: __u16, - pub nxt_ppid: __u32, - pub nxt_length: __u32, - pub nxt_assoc_id: crate::sctp_assoc_t, - } - - pub struct sctp_prinfo { - pub pr_policy: __u16, - pub pr_value: __u32, - } - - pub struct sctp_authinfo { - pub auth_keynumber: __u16, - } - - pub struct rlimit64 { - pub rlim_cur: rlim64_t, - pub rlim_max: rlim64_t, - } - - // linux/tls.h - - pub struct tls_crypto_info { - pub version: __u16, - pub cipher_type: __u16, - } - - pub struct tls12_crypto_info_aes_gcm_128 { - pub info: tls_crypto_info, - pub iv: [c_uchar; TLS_CIPHER_AES_GCM_128_IV_SIZE], - pub key: [c_uchar; TLS_CIPHER_AES_GCM_128_KEY_SIZE], - pub salt: [c_uchar; TLS_CIPHER_AES_GCM_128_SALT_SIZE], - pub rec_seq: [c_uchar; TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE], - } - - pub struct tls12_crypto_info_aes_gcm_256 { - pub info: tls_crypto_info, - pub iv: [c_uchar; TLS_CIPHER_AES_GCM_256_IV_SIZE], - pub key: [c_uchar; TLS_CIPHER_AES_GCM_256_KEY_SIZE], - pub salt: [c_uchar; TLS_CIPHER_AES_GCM_256_SALT_SIZE], - pub rec_seq: [c_uchar; TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE], - } - - pub struct tls12_crypto_info_aes_ccm_128 { - pub info: tls_crypto_info, - pub iv: [c_uchar; TLS_CIPHER_AES_CCM_128_IV_SIZE], - pub key: [c_uchar; TLS_CIPHER_AES_CCM_128_KEY_SIZE], - pub salt: [c_uchar; TLS_CIPHER_AES_CCM_128_SALT_SIZE], - pub rec_seq: [c_uchar; TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE], - } - - pub struct tls12_crypto_info_chacha20_poly1305 { - pub info: tls_crypto_info, - pub iv: [c_uchar; TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE], - pub key: [c_uchar; TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE], - pub salt: [c_uchar; TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE], - pub rec_seq: [c_uchar; TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE], - } - - pub struct tls12_crypto_info_sm4_gcm { - pub info: tls_crypto_info, - pub iv: [c_uchar; TLS_CIPHER_SM4_GCM_IV_SIZE], - pub key: [c_uchar; TLS_CIPHER_SM4_GCM_KEY_SIZE], - pub salt: [c_uchar; TLS_CIPHER_SM4_GCM_SALT_SIZE], - pub rec_seq: [c_uchar; TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE], - } - - pub struct tls12_crypto_info_sm4_ccm { - pub info: tls_crypto_info, - pub iv: [c_uchar; TLS_CIPHER_SM4_CCM_IV_SIZE], - pub key: [c_uchar; TLS_CIPHER_SM4_CCM_KEY_SIZE], - pub salt: [c_uchar; TLS_CIPHER_SM4_CCM_SALT_SIZE], - pub rec_seq: [c_uchar; TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE], - } - - pub struct tls12_crypto_info_aria_gcm_128 { - pub info: tls_crypto_info, - pub iv: [c_uchar; TLS_CIPHER_ARIA_GCM_128_IV_SIZE], - pub key: [c_uchar; TLS_CIPHER_ARIA_GCM_128_KEY_SIZE], - pub salt: [c_uchar; TLS_CIPHER_ARIA_GCM_128_SALT_SIZE], - pub rec_seq: [c_uchar; TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE], - } - - pub struct tls12_crypto_info_aria_gcm_256 { - pub info: tls_crypto_info, - pub iv: [c_uchar; TLS_CIPHER_ARIA_GCM_256_IV_SIZE], - pub key: [c_uchar; TLS_CIPHER_ARIA_GCM_256_KEY_SIZE], - pub salt: [c_uchar; TLS_CIPHER_ARIA_GCM_256_SALT_SIZE], - pub rec_seq: [c_uchar; TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE], - } - - // linux/wireless.h - - pub struct iw_param { - pub value: __s32, - pub fixed: __u8, - pub disabled: __u8, - pub flags: __u16, - } - - pub struct iw_point { - pub pointer: *mut c_void, - pub length: __u16, - pub flags: __u16, - } - - pub struct iw_freq { - pub m: __s32, - pub e: __s16, - pub i: __u8, - pub flags: __u8, - } - - pub struct iw_quality { - pub qual: __u8, - pub level: __u8, - pub noise: __u8, - pub updated: __u8, - } - - pub struct iw_discarded { - pub nwid: __u32, - pub code: __u32, - pub fragment: __u32, - pub retries: __u32, - pubmisc: __u32, - } - - pub struct iw_missed { - pub beacon: __u32, - } - - pub struct iw_scan_req { - pub scan_type: __u8, - pub essid_len: __u8, - pub num_channels: __u8, - pub flags: __u8, - pub bssid: crate::sockaddr, - pub essid: [__u8; IW_ESSID_MAX_SIZE], - pub min_channel_time: __u32, - pub max_channel_time: __u32, - pub channel_list: [iw_freq; IW_MAX_FREQUENCIES], - } - - pub struct iw_encode_ext { - pub ext_flags: __u32, - pub tx_seq: [__u8; IW_ENCODE_SEQ_MAX_SIZE], - pub rx_seq: [__u8; IW_ENCODE_SEQ_MAX_SIZE], - pub addr: crate::sockaddr, - pub alg: __u16, - pub key_len: __u16, - pub key: [__u8; 0], - } - - pub struct iw_pmksa { - pub cmd: __u32, - pub bssid: crate::sockaddr, - pub pmkid: [__u8; IW_PMKID_LEN], - } - - pub struct iw_pmkid_cand { - pub flags: __u32, - pub index: __u32, - pub bssid: crate::sockaddr, - } - - pub struct iw_statistics { - pub status: __u16, - pub qual: iw_quality, - pub discard: iw_discarded, - pub miss: iw_missed, - } - - pub struct iw_range { - pub throughput: __u32, - pub min_nwid: __u32, - pub max_nwid: __u32, - pub old_num_channels: __u16, - pub old_num_frequency: __u8, - pub scan_capa: __u8, - pub event_capa: [__u32; 6], - pub sensitivity: __s32, - pub max_qual: iw_quality, - pub avg_qual: iw_quality, - pub num_bitrates: __u8, - pub bitrate: [__s32; IW_MAX_BITRATES], - pub min_rts: __s32, - pub max_rts: __s32, - pub min_frag: __s32, - pub max_frag: __s32, - pub min_pmp: __s32, - pub max_pmp: __s32, - pub min_pmt: __s32, - pub max_pmt: __s32, - pub pmp_flags: __u16, - pub pmt_flags: __u16, - pub pm_capa: __u16, - pub encoding_size: [__u16; IW_MAX_ENCODING_SIZES], - pub num_encoding_sizes: __u8, - pub max_encoding_tokens: __u8, - pub encoding_login_index: __u8, - pub txpower_capa: __u16, - pub num_txpower: __u8, - pub txpower: [__s32; IW_MAX_TXPOWER], - pub we_version_compiled: __u8, - pub we_version_source: __u8, - pub retry_capa: __u16, - pub retry_flags: __u16, - pub r_time_flags: __u16, - pub min_retry: __s32, - pub max_retry: __s32, - pub min_r_time: __s32, - pub max_r_time: __s32, - pub num_channels: __u16, - pub num_frequency: __u8, - pub freq: [iw_freq; IW_MAX_FREQUENCIES], - pub enc_capa: __u32, - } - - pub struct iw_priv_args { - pub cmd: __u32, - pub set_args: __u16, - pub get_args: __u16, - pub name: [c_char; crate::IFNAMSIZ], - } - - // #include - - pub struct epoll_params { - pub busy_poll_usecs: u32, - pub busy_poll_budget: u16, - pub prefer_busy_poll: u8, - pub __pad: u8, // Must be zero - } - - #[cfg_attr( - any( - target_pointer_width = "32", - target_arch = "x86_64", - target_arch = "powerpc64", - target_arch = "mips64", - target_arch = "mips64r6", - target_arch = "s390x", - target_arch = "sparc64", - target_arch = "aarch64", - target_arch = "riscv64", - target_arch = "riscv32", - target_arch = "loongarch64" - ), - repr(align(4)) - )] - #[cfg_attr( - not(any( - target_pointer_width = "32", - target_arch = "x86_64", - target_arch = "powerpc64", - target_arch = "mips64", - target_arch = "mips64r6", - target_arch = "s390x", - target_arch = "sparc64", - target_arch = "aarch64", - target_arch = "riscv64", - target_arch = "riscv32", - target_arch = "loongarch64" - )), - repr(align(8)) - )] - pub struct pthread_mutexattr_t { - #[doc(hidden)] - size: [u8; crate::__SIZEOF_PTHREAD_MUTEXATTR_T], - } - - #[cfg_attr( - any(target_env = "musl", target_env = "ohos", target_pointer_width = "32"), - repr(align(4)) - )] - #[cfg_attr( - all( - not(target_env = "musl"), - not(target_env = "ohos"), - target_pointer_width = "64" - ), - repr(align(8)) - )] - pub struct pthread_rwlockattr_t { - #[doc(hidden)] - size: [u8; crate::__SIZEOF_PTHREAD_RWLOCKATTR_T], - } - - #[repr(align(4))] - pub struct pthread_condattr_t { - #[doc(hidden)] - size: [u8; crate::__SIZEOF_PTHREAD_CONDATTR_T], - } - - #[repr(align(4))] - pub struct pthread_barrierattr_t { - #[doc(hidden)] - size: [u8; crate::__SIZEOF_PTHREAD_BARRIERATTR_T], - } - - #[cfg(not(target_env = "musl"))] - #[repr(align(8))] - pub struct fanotify_event_metadata { - pub event_len: __u32, - pub vers: __u8, - pub reserved: __u8, - pub metadata_len: __u16, - pub mask: __u64, - pub fd: c_int, - pub pid: c_int, - } - - // linux/ptp_clock.h - - pub struct ptp_sys_offset { - pub n_samples: c_uint, - pub rsv: [c_uint; 3], - // FIXME(garando): replace length with `2 * PTP_MAX_SAMPLES + 1` when supported - pub ts: [ptp_clock_time; 51], - } - - pub struct ptp_pin_desc { - pub name: [c_char; 64], - pub index: c_uint, - pub func: c_uint, - pub chan: c_uint, - pub rsv: [c_uint; 5], - } - - pub struct ptp_clock_caps { - pub max_adj: c_int, - pub n_alarm: c_int, - pub n_ext_ts: c_int, - pub n_per_out: c_int, - pub pps: c_int, - pub n_pins: c_int, - pub cross_timestamping: c_int, - pub adjust_phase: c_int, - pub max_phase_adj: c_int, - pub rsv: [c_int; 11], - } - - // linux/if_xdp.h - - pub struct sockaddr_xdp { - pub sxdp_family: crate::__u16, - pub sxdp_flags: crate::__u16, - pub sxdp_ifindex: crate::__u32, - pub sxdp_queue_id: crate::__u32, - pub sxdp_shared_umem_fd: crate::__u32, - } - - pub struct xdp_ring_offset { - pub producer: crate::__u64, - pub consumer: crate::__u64, - pub desc: crate::__u64, - pub flags: crate::__u64, - } - - pub struct xdp_mmap_offsets { - pub rx: xdp_ring_offset, - pub tx: xdp_ring_offset, - pub fr: xdp_ring_offset, - pub cr: xdp_ring_offset, - } - - pub struct xdp_ring_offset_v1 { - pub producer: crate::__u64, - pub consumer: crate::__u64, - pub desc: crate::__u64, - } - - pub struct xdp_mmap_offsets_v1 { - pub rx: xdp_ring_offset_v1, - pub tx: xdp_ring_offset_v1, - pub fr: xdp_ring_offset_v1, - pub cr: xdp_ring_offset_v1, - } - - pub struct xdp_umem_reg { - pub addr: crate::__u64, - pub len: crate::__u64, - pub chunk_size: crate::__u32, - pub headroom: crate::__u32, - pub flags: crate::__u32, - pub tx_metadata_len: crate::__u32, - } - - pub struct xdp_umem_reg_v1 { - pub addr: crate::__u64, - pub len: crate::__u64, - pub chunk_size: crate::__u32, - pub headroom: crate::__u32, - } - - pub struct xdp_statistics { - pub rx_dropped: crate::__u64, - pub rx_invalid_descs: crate::__u64, - pub tx_invalid_descs: crate::__u64, - pub rx_ring_full: crate::__u64, - pub rx_fill_ring_empty_descs: crate::__u64, - pub tx_ring_empty_descs: crate::__u64, - } - - pub struct xdp_statistics_v1 { - pub rx_dropped: crate::__u64, - pub rx_invalid_descs: crate::__u64, - pub tx_invalid_descs: crate::__u64, - } - - pub struct xdp_options { - pub flags: crate::__u32, - } - - pub struct xdp_desc { - pub addr: crate::__u64, - pub len: crate::__u32, - pub options: crate::__u32, - } - - pub struct xsk_tx_metadata_completion { - pub tx_timestamp: crate::__u64, - } - - pub struct xsk_tx_metadata_request { - pub csum_start: __u16, - pub csum_offset: __u16, - } - - // linux/mount.h - - pub struct mount_attr { - pub attr_set: crate::__u64, - pub attr_clr: crate::__u64, - pub propagation: crate::__u64, - pub userns_fd: crate::__u64, - } - - // linux/nsfs.h - pub struct mnt_ns_info { - pub size: crate::__u32, - pub nr_mounts: crate::__u32, - pub mnt_ns_id: crate::__u64, - } - - // linux/pidfd.h - - #[non_exhaustive] - pub struct pidfd_info { - pub mask: crate::__u64, - pub cgroupid: crate::__u64, - pub pid: crate::__u32, - pub tgid: crate::__u32, - pub ppid: crate::__u32, - pub ruid: crate::__u32, - pub rgid: crate::__u32, - pub euid: crate::__u32, - pub egid: crate::__u32, - pub suid: crate::__u32, - pub sgid: crate::__u32, - pub fsuid: crate::__u32, - pub fsgid: crate::__u32, - pub exit_code: crate::__s32, - } - - // linux/uio.h - - pub struct dmabuf_cmsg { - pub frag_offset: crate::__u64, - pub frag_size: crate::__u32, - pub frag_token: crate::__u32, - pub dmabuf_id: crate::__u32, - pub flags: crate::__u32, - } - - pub struct dmabuf_token { - pub token_start: crate::__u32, - pub token_count: crate::__u32, - } -} - -cfg_if! { - if #[cfg(not(target_arch = "sparc64"))] { - s! { - pub struct iw_thrspy { - pub addr: crate::sockaddr, - pub qual: iw_quality, - pub low: iw_quality, - pub high: iw_quality, - } - - pub struct iw_mlme { - pub cmd: __u16, - pub reason_code: __u16, - pub addr: crate::sockaddr, - } - - pub struct iw_michaelmicfailure { - pub flags: __u32, - pub src_addr: crate::sockaddr, - pub tsc: [__u8; IW_ENCODE_SEQ_MAX_SIZE], - } - - pub struct __c_anonymous_elf32_rela { - pub r_offset: Elf32_Addr, - pub r_info: Elf32_Word, - pub r_addend: Elf32_Sword, - } - - pub struct __c_anonymous_elf64_rela { - pub r_offset: Elf64_Addr, - pub r_info: Elf64_Xword, - pub r_addend: Elf64_Sxword, - } - } - } -} - -s_no_extra_traits! { - pub struct sockaddr_nl { - pub nl_family: crate::sa_family_t, - nl_pad: c_ushort, - pub nl_pid: u32, - pub nl_groups: u32, - } - - pub struct dirent { - pub d_ino: crate::ino_t, - pub d_off: off_t, - pub d_reclen: c_ushort, - pub d_type: c_uchar, - pub d_name: [c_char; 256], - } - - pub struct sockaddr_alg { - pub salg_family: crate::sa_family_t, - pub salg_type: [c_uchar; 14], - pub salg_feat: u32, - pub salg_mask: u32, - pub salg_name: [c_uchar; 64], - } - - pub struct uinput_setup { - pub id: input_id, - pub name: [c_char; UINPUT_MAX_NAME_SIZE], - pub ff_effects_max: __u32, - } - - pub struct uinput_user_dev { - pub name: [c_char; UINPUT_MAX_NAME_SIZE], - pub id: input_id, - pub ff_effects_max: __u32, - pub absmax: [__s32; ABS_CNT], - pub absmin: [__s32; ABS_CNT], - pub absfuzz: [__s32; ABS_CNT], - pub absflat: [__s32; ABS_CNT], - } - - /// WARNING: The `PartialEq`, `Eq` and `Hash` implementations of this - /// type are unsound and will be removed in the future. - #[deprecated( - note = "this struct has unsafe trait implementations that will be \ - removed in the future", - since = "0.2.80" - )] - pub struct af_alg_iv { - pub ivlen: u32, - pub iv: [c_uchar; 0], - } - - // x32 compatibility - // See https://sourceware.org/bugzilla/show_bug.cgi?id=21279 - pub struct mq_attr { - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub mq_flags: i64, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub mq_maxmsg: i64, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub mq_msgsize: i64, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub mq_curmsgs: i64, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pad: [i64; 4], - - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub mq_flags: c_long, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub mq_maxmsg: c_long, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub mq_msgsize: c_long, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub mq_curmsgs: c_long, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pad: [c_long; 4], - } - - pub union __c_anonymous_ifr_ifru { - pub ifru_addr: crate::sockaddr, - pub ifru_dstaddr: crate::sockaddr, - pub ifru_broadaddr: crate::sockaddr, - pub ifru_netmask: crate::sockaddr, - pub ifru_hwaddr: crate::sockaddr, - pub ifru_flags: c_short, - pub ifru_ifindex: c_int, - pub ifru_metric: c_int, - pub ifru_mtu: c_int, - pub ifru_map: __c_anonymous_ifru_map, - pub ifru_slave: [c_char; crate::IFNAMSIZ], - pub ifru_newname: [c_char; crate::IFNAMSIZ], - pub ifru_data: *mut c_char, - } - - pub struct ifreq { - /// interface name, e.g. "en0" - pub ifr_name: [c_char; crate::IFNAMSIZ], - pub ifr_ifru: __c_anonymous_ifr_ifru, - } - - pub union __c_anonymous_ifc_ifcu { - pub ifcu_buf: *mut c_char, - pub ifcu_req: *mut crate::ifreq, - } - - /// Structure used in SIOCGIFCONF request. Used to retrieve interface configuration for - /// machine (useful for programs which must know all networks accessible). - pub struct ifconf { - /// Size of buffer - pub ifc_len: c_int, - pub ifc_ifcu: __c_anonymous_ifc_ifcu, - } - - pub struct hwtstamp_config { - pub flags: c_int, - pub tx_type: c_int, - pub rx_filter: c_int, - } - - pub struct dirent64 { - pub d_ino: crate::ino64_t, - pub d_off: off64_t, - pub d_reclen: c_ushort, - pub d_type: c_uchar, - pub d_name: [c_char; 256], - } - - pub struct sched_attr { - pub size: __u32, - pub sched_policy: __u32, - pub sched_flags: crate::__u64, - pub sched_nice: __s32, - pub sched_priority: __u32, - pub sched_runtime: crate::__u64, - pub sched_deadline: crate::__u64, - pub sched_period: crate::__u64, - } - - pub union tpacket_req_u { - pub req: crate::tpacket_req, - pub req3: crate::tpacket_req3, - } - - pub union tpacket_bd_header_u { - pub bh1: crate::tpacket_hdr_v1, - } - - pub struct tpacket_block_desc { - pub version: __u32, - pub offset_to_priv: __u32, - pub hdr: crate::tpacket_bd_header_u, - } - - #[cfg_attr( - all( - any(target_env = "musl", target_env = "ohos"), - target_pointer_width = "32" - ), - repr(align(4)) - )] - #[cfg_attr( - all( - any(target_env = "musl", target_env = "ohos"), - target_pointer_width = "64" - ), - repr(align(8)) - )] - #[cfg_attr( - all( - not(any(target_env = "musl", target_env = "ohos")), - target_arch = "x86" - ), - repr(align(4)) - )] - #[cfg_attr( - all( - not(any(target_env = "musl", target_env = "ohos")), - not(target_arch = "x86") - ), - repr(align(8)) - )] - pub struct pthread_cond_t { - #[doc(hidden)] - size: [u8; crate::__SIZEOF_PTHREAD_COND_T], - } - - #[cfg_attr( - all( - target_pointer_width = "32", - any( - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "arm", - target_arch = "hexagon", - target_arch = "m68k", - target_arch = "csky", - target_arch = "powerpc", - target_arch = "sparc", - target_arch = "x86_64", - target_arch = "x86" - ) - ), - repr(align(4)) - )] - #[cfg_attr( - any( - target_pointer_width = "64", - not(any( - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "arm", - target_arch = "hexagon", - target_arch = "m68k", - target_arch = "csky", - target_arch = "powerpc", - target_arch = "sparc", - target_arch = "x86_64", - target_arch = "x86" - )) - ), - repr(align(8)) - )] - pub struct pthread_mutex_t { - #[doc(hidden)] - size: [u8; crate::__SIZEOF_PTHREAD_MUTEX_T], - } - - #[cfg_attr( - all( - target_pointer_width = "32", - any( - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "arm", - target_arch = "hexagon", - target_arch = "m68k", - target_arch = "csky", - target_arch = "powerpc", - target_arch = "sparc", - target_arch = "x86_64", - target_arch = "x86" - ) - ), - repr(align(4)) - )] - #[cfg_attr( - any( - target_pointer_width = "64", - not(any( - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "arm", - target_arch = "hexagon", - target_arch = "m68k", - target_arch = "powerpc", - target_arch = "sparc", - target_arch = "x86_64", - target_arch = "x86" - )) - ), - repr(align(8)) - )] - pub struct pthread_rwlock_t { - size: [u8; crate::__SIZEOF_PTHREAD_RWLOCK_T], - } - - #[cfg_attr( - all( - target_pointer_width = "32", - any( - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "arm", - target_arch = "hexagon", - target_arch = "m68k", - target_arch = "csky", - target_arch = "powerpc", - target_arch = "sparc", - target_arch = "x86_64", - target_arch = "x86" - ) - ), - repr(align(4)) - )] - #[cfg_attr( - any( - target_pointer_width = "64", - not(any( - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "arm", - target_arch = "hexagon", - target_arch = "m68k", - target_arch = "csky", - target_arch = "powerpc", - target_arch = "sparc", - target_arch = "x86_64", - target_arch = "x86" - )) - ), - repr(align(8)) - )] - pub struct pthread_barrier_t { - size: [u8; crate::__SIZEOF_PTHREAD_BARRIER_T], - } - - // linux/net_tstamp.h - pub struct sock_txtime { - pub clockid: crate::clockid_t, - pub flags: __u32, - } - - // linux/wireless.h - pub union iwreq_data { - pub name: [c_char; crate::IFNAMSIZ], - pub essid: iw_point, - pub nwid: iw_param, - pub freq: iw_freq, - pub sens: iw_param, - pub bitrate: iw_param, - pub txpower: iw_param, - pub rts: iw_param, - pub frag: iw_param, - pub mode: __u32, - pub retry: iw_param, - pub encoding: iw_point, - pub power: iw_param, - pub qual: iw_quality, - pub ap_addr: crate::sockaddr, - pub addr: crate::sockaddr, - pub param: iw_param, - pub data: iw_point, - } - - pub struct iw_event { - pub len: __u16, - pub cmd: __u16, - pub u: iwreq_data, - } - - pub union __c_anonymous_iwreq { - pub ifrn_name: [c_char; crate::IFNAMSIZ], - } - - pub struct iwreq { - pub ifr_ifrn: __c_anonymous_iwreq, - pub u: iwreq_data, - } - - // linux/ptp_clock.h - pub union __c_anonymous_ptp_perout_request_1 { - pub start: ptp_clock_time, - pub phase: ptp_clock_time, - } - - pub union __c_anonymous_ptp_perout_request_2 { - pub on: ptp_clock_time, - pub rsv: [c_uint; 4], - } - - pub struct ptp_perout_request { - pub anonymous_1: __c_anonymous_ptp_perout_request_1, - pub period: ptp_clock_time, - pub index: c_uint, - pub flags: c_uint, - pub anonymous_2: __c_anonymous_ptp_perout_request_2, - } - - // linux/if_xdp.h - pub struct xsk_tx_metadata { - pub flags: crate::__u64, - pub xsk_tx_metadata_union: __c_anonymous_xsk_tx_metadata_union, - } - - pub union __c_anonymous_xsk_tx_metadata_union { - pub request: xsk_tx_metadata_request, - pub completion: xsk_tx_metadata_completion, - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for sockaddr_nl { - fn eq(&self, other: &sockaddr_nl) -> bool { - self.nl_family == other.nl_family - && self.nl_pid == other.nl_pid - && self.nl_groups == other.nl_groups - } - } - impl Eq for sockaddr_nl {} - impl hash::Hash for sockaddr_nl { - fn hash(&self, state: &mut H) { - self.nl_family.hash(state); - self.nl_pid.hash(state); - self.nl_groups.hash(state); - } - } - - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_ino == other.d_ino - && self.d_off == other.d_off - && self.d_reclen == other.d_reclen - && self.d_type == other.d_type - && self - .d_name - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for dirent {} - - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_ino.hash(state); - self.d_off.hash(state); - self.d_reclen.hash(state); - self.d_type.hash(state); - self.d_name.hash(state); - } - } - - impl PartialEq for dirent64 { - fn eq(&self, other: &dirent64) -> bool { - self.d_ino == other.d_ino - && self.d_off == other.d_off - && self.d_reclen == other.d_reclen - && self.d_type == other.d_type - && self - .d_name - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for dirent64 {} - - impl hash::Hash for dirent64 { - fn hash(&self, state: &mut H) { - self.d_ino.hash(state); - self.d_off.hash(state); - self.d_reclen.hash(state); - self.d_type.hash(state); - self.d_name.hash(state); - } - } - - impl PartialEq for pthread_cond_t { - fn eq(&self, other: &pthread_cond_t) -> bool { - self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) - } - } - - impl Eq for pthread_cond_t {} - - impl hash::Hash for pthread_cond_t { - fn hash(&self, state: &mut H) { - self.size.hash(state); - } - } - - impl PartialEq for pthread_mutex_t { - fn eq(&self, other: &pthread_mutex_t) -> bool { - self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) - } - } - - impl Eq for pthread_mutex_t {} - - impl hash::Hash for pthread_mutex_t { - fn hash(&self, state: &mut H) { - self.size.hash(state); - } - } - - impl PartialEq for pthread_rwlock_t { - fn eq(&self, other: &pthread_rwlock_t) -> bool { - self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) - } - } - - impl Eq for pthread_rwlock_t {} - - impl hash::Hash for pthread_rwlock_t { - fn hash(&self, state: &mut H) { - self.size.hash(state); - } - } - - impl PartialEq for pthread_barrier_t { - fn eq(&self, other: &pthread_barrier_t) -> bool { - self.size.iter().zip(other.size.iter()).all(|(a, b)| a == b) - } - } - - impl Eq for pthread_barrier_t {} - - impl hash::Hash for pthread_barrier_t { - fn hash(&self, state: &mut H) { - self.size.hash(state); - } - } - - impl PartialEq for sockaddr_alg { - fn eq(&self, other: &sockaddr_alg) -> bool { - self.salg_family == other.salg_family - && self - .salg_type - .iter() - .zip(other.salg_type.iter()) - .all(|(a, b)| a == b) - && self.salg_feat == other.salg_feat - && self.salg_mask == other.salg_mask - && self - .salg_name - .iter() - .zip(other.salg_name.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for sockaddr_alg {} - - impl hash::Hash for sockaddr_alg { - fn hash(&self, state: &mut H) { - self.salg_family.hash(state); - self.salg_type.hash(state); - self.salg_feat.hash(state); - self.salg_mask.hash(state); - self.salg_name.hash(state); - } - } - - impl PartialEq for uinput_setup { - fn eq(&self, other: &uinput_setup) -> bool { - self.id == other.id - && self.name[..] == other.name[..] - && self.ff_effects_max == other.ff_effects_max - } - } - impl Eq for uinput_setup {} - - impl hash::Hash for uinput_setup { - fn hash(&self, state: &mut H) { - self.id.hash(state); - self.name.hash(state); - self.ff_effects_max.hash(state); - } - } - - impl PartialEq for uinput_user_dev { - fn eq(&self, other: &uinput_user_dev) -> bool { - self.name[..] == other.name[..] - && self.id == other.id - && self.ff_effects_max == other.ff_effects_max - && self.absmax[..] == other.absmax[..] - && self.absmin[..] == other.absmin[..] - && self.absfuzz[..] == other.absfuzz[..] - && self.absflat[..] == other.absflat[..] - } - } - impl Eq for uinput_user_dev {} - - impl hash::Hash for uinput_user_dev { - fn hash(&self, state: &mut H) { - self.name.hash(state); - self.id.hash(state); - self.ff_effects_max.hash(state); - self.absmax.hash(state); - self.absmin.hash(state); - self.absfuzz.hash(state); - self.absflat.hash(state); - } - } - - #[allow(deprecated)] - impl af_alg_iv { - fn as_slice(&self) -> &[u8] { - unsafe { ::core::slice::from_raw_parts(self.iv.as_ptr(), self.ivlen as usize) } - } - } - - #[allow(deprecated)] - impl PartialEq for af_alg_iv { - fn eq(&self, other: &af_alg_iv) -> bool { - *self.as_slice() == *other.as_slice() - } - } - - #[allow(deprecated)] - impl Eq for af_alg_iv {} - - #[allow(deprecated)] - impl hash::Hash for af_alg_iv { - fn hash(&self, state: &mut H) { - self.as_slice().hash(state); - } - } - - impl PartialEq for mq_attr { - fn eq(&self, other: &mq_attr) -> bool { - self.mq_flags == other.mq_flags - && self.mq_maxmsg == other.mq_maxmsg - && self.mq_msgsize == other.mq_msgsize - && self.mq_curmsgs == other.mq_curmsgs - } - } - impl Eq for mq_attr {} - impl hash::Hash for mq_attr { - fn hash(&self, state: &mut H) { - self.mq_flags.hash(state); - self.mq_maxmsg.hash(state); - self.mq_msgsize.hash(state); - self.mq_curmsgs.hash(state); - } - } - impl PartialEq for hwtstamp_config { - fn eq(&self, other: &hwtstamp_config) -> bool { - self.flags == other.flags - && self.tx_type == other.tx_type - && self.rx_filter == other.rx_filter - } - } - impl Eq for hwtstamp_config {} - impl hash::Hash for hwtstamp_config { - fn hash(&self, state: &mut H) { - self.flags.hash(state); - self.tx_type.hash(state); - self.rx_filter.hash(state); - } - } - - impl PartialEq for sched_attr { - fn eq(&self, other: &sched_attr) -> bool { - self.size == other.size - && self.sched_policy == other.sched_policy - && self.sched_flags == other.sched_flags - && self.sched_nice == other.sched_nice - && self.sched_priority == other.sched_priority - && self.sched_runtime == other.sched_runtime - && self.sched_deadline == other.sched_deadline - && self.sched_period == other.sched_period - } - } - impl Eq for sched_attr {} - impl hash::Hash for sched_attr { - fn hash(&self, state: &mut H) { - self.size.hash(state); - self.sched_policy.hash(state); - self.sched_flags.hash(state); - self.sched_nice.hash(state); - self.sched_priority.hash(state); - self.sched_runtime.hash(state); - self.sched_deadline.hash(state); - self.sched_period.hash(state); - } - } - } -} - -cfg_if! { - if #[cfg(any( - target_env = "gnu", - target_env = "musl", - target_env = "ohos" - ))] { - pub const ABDAY_1: crate::nl_item = 0x20000; - pub const ABDAY_2: crate::nl_item = 0x20001; - pub const ABDAY_3: crate::nl_item = 0x20002; - pub const ABDAY_4: crate::nl_item = 0x20003; - pub const ABDAY_5: crate::nl_item = 0x20004; - pub const ABDAY_6: crate::nl_item = 0x20005; - pub const ABDAY_7: crate::nl_item = 0x20006; - - pub const DAY_1: crate::nl_item = 0x20007; - pub const DAY_2: crate::nl_item = 0x20008; - pub const DAY_3: crate::nl_item = 0x20009; - pub const DAY_4: crate::nl_item = 0x2000A; - pub const DAY_5: crate::nl_item = 0x2000B; - pub const DAY_6: crate::nl_item = 0x2000C; - pub const DAY_7: crate::nl_item = 0x2000D; - - pub const ABMON_1: crate::nl_item = 0x2000E; - pub const ABMON_2: crate::nl_item = 0x2000F; - pub const ABMON_3: crate::nl_item = 0x20010; - pub const ABMON_4: crate::nl_item = 0x20011; - pub const ABMON_5: crate::nl_item = 0x20012; - pub const ABMON_6: crate::nl_item = 0x20013; - pub const ABMON_7: crate::nl_item = 0x20014; - pub const ABMON_8: crate::nl_item = 0x20015; - pub const ABMON_9: crate::nl_item = 0x20016; - pub const ABMON_10: crate::nl_item = 0x20017; - pub const ABMON_11: crate::nl_item = 0x20018; - pub const ABMON_12: crate::nl_item = 0x20019; - - pub const MON_1: crate::nl_item = 0x2001A; - pub const MON_2: crate::nl_item = 0x2001B; - pub const MON_3: crate::nl_item = 0x2001C; - pub const MON_4: crate::nl_item = 0x2001D; - pub const MON_5: crate::nl_item = 0x2001E; - pub const MON_6: crate::nl_item = 0x2001F; - pub const MON_7: crate::nl_item = 0x20020; - pub const MON_8: crate::nl_item = 0x20021; - pub const MON_9: crate::nl_item = 0x20022; - pub const MON_10: crate::nl_item = 0x20023; - pub const MON_11: crate::nl_item = 0x20024; - pub const MON_12: crate::nl_item = 0x20025; - - pub const AM_STR: crate::nl_item = 0x20026; - pub const PM_STR: crate::nl_item = 0x20027; - - pub const D_T_FMT: crate::nl_item = 0x20028; - pub const D_FMT: crate::nl_item = 0x20029; - pub const T_FMT: crate::nl_item = 0x2002A; - pub const T_FMT_AMPM: crate::nl_item = 0x2002B; - - pub const ERA: crate::nl_item = 0x2002C; - pub const ERA_D_FMT: crate::nl_item = 0x2002E; - pub const ALT_DIGITS: crate::nl_item = 0x2002F; - pub const ERA_D_T_FMT: crate::nl_item = 0x20030; - pub const ERA_T_FMT: crate::nl_item = 0x20031; - - pub const CODESET: crate::nl_item = 14; - pub const CRNCYSTR: crate::nl_item = 0x4000F; - pub const RADIXCHAR: crate::nl_item = 0x10000; - pub const THOUSEP: crate::nl_item = 0x10001; - pub const YESEXPR: crate::nl_item = 0x50000; - pub const NOEXPR: crate::nl_item = 0x50001; - pub const YESSTR: crate::nl_item = 0x50002; - pub const NOSTR: crate::nl_item = 0x50003; - } -} - -pub const RUSAGE_CHILDREN: c_int = -1; -pub const L_tmpnam: c_uint = 20; -pub const _PC_LINK_MAX: c_int = 0; -pub const _PC_MAX_CANON: c_int = 1; -pub const _PC_MAX_INPUT: c_int = 2; -pub const _PC_NAME_MAX: c_int = 3; -pub const _PC_PATH_MAX: c_int = 4; -pub const _PC_PIPE_BUF: c_int = 5; -pub const _PC_CHOWN_RESTRICTED: c_int = 6; -pub const _PC_NO_TRUNC: c_int = 7; -pub const _PC_VDISABLE: c_int = 8; -pub const _PC_SYNC_IO: c_int = 9; -pub const _PC_ASYNC_IO: c_int = 10; -pub const _PC_PRIO_IO: c_int = 11; -pub const _PC_SOCK_MAXBUF: c_int = 12; -pub const _PC_FILESIZEBITS: c_int = 13; -pub const _PC_REC_INCR_XFER_SIZE: c_int = 14; -pub const _PC_REC_MAX_XFER_SIZE: c_int = 15; -pub const _PC_REC_MIN_XFER_SIZE: c_int = 16; -pub const _PC_REC_XFER_ALIGN: c_int = 17; -pub const _PC_ALLOC_SIZE_MIN: c_int = 18; -pub const _PC_SYMLINK_MAX: c_int = 19; -pub const _PC_2_SYMLINKS: c_int = 20; - -pub const MS_NOUSER: c_ulong = 0xffffffff80000000; - -pub const _SC_ARG_MAX: c_int = 0; -pub const _SC_CHILD_MAX: c_int = 1; -pub const _SC_CLK_TCK: c_int = 2; -pub const _SC_NGROUPS_MAX: c_int = 3; -pub const _SC_OPEN_MAX: c_int = 4; -pub const _SC_STREAM_MAX: c_int = 5; -pub const _SC_TZNAME_MAX: c_int = 6; -pub const _SC_JOB_CONTROL: c_int = 7; -pub const _SC_SAVED_IDS: c_int = 8; -pub const _SC_REALTIME_SIGNALS: c_int = 9; -pub const _SC_PRIORITY_SCHEDULING: c_int = 10; -pub const _SC_TIMERS: c_int = 11; -pub const _SC_ASYNCHRONOUS_IO: c_int = 12; -pub const _SC_PRIORITIZED_IO: c_int = 13; -pub const _SC_SYNCHRONIZED_IO: c_int = 14; -pub const _SC_FSYNC: c_int = 15; -pub const _SC_MAPPED_FILES: c_int = 16; -pub const _SC_MEMLOCK: c_int = 17; -pub const _SC_MEMLOCK_RANGE: c_int = 18; -pub const _SC_MEMORY_PROTECTION: c_int = 19; -pub const _SC_MESSAGE_PASSING: c_int = 20; -pub const _SC_SEMAPHORES: c_int = 21; -pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 22; -pub const _SC_AIO_LISTIO_MAX: c_int = 23; -pub const _SC_AIO_MAX: c_int = 24; -pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 25; -pub const _SC_DELAYTIMER_MAX: c_int = 26; -pub const _SC_MQ_OPEN_MAX: c_int = 27; -pub const _SC_MQ_PRIO_MAX: c_int = 28; -pub const _SC_VERSION: c_int = 29; -pub const _SC_PAGESIZE: c_int = 30; -pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; -pub const _SC_RTSIG_MAX: c_int = 31; -pub const _SC_SEM_NSEMS_MAX: c_int = 32; -pub const _SC_SEM_VALUE_MAX: c_int = 33; -pub const _SC_SIGQUEUE_MAX: c_int = 34; -pub const _SC_TIMER_MAX: c_int = 35; -pub const _SC_BC_BASE_MAX: c_int = 36; -pub const _SC_BC_DIM_MAX: c_int = 37; -pub const _SC_BC_SCALE_MAX: c_int = 38; -pub const _SC_BC_STRING_MAX: c_int = 39; -pub const _SC_COLL_WEIGHTS_MAX: c_int = 40; -pub const _SC_EXPR_NEST_MAX: c_int = 42; -pub const _SC_LINE_MAX: c_int = 43; -pub const _SC_RE_DUP_MAX: c_int = 44; -pub const _SC_2_VERSION: c_int = 46; -pub const _SC_2_C_BIND: c_int = 47; -pub const _SC_2_C_DEV: c_int = 48; -pub const _SC_2_FORT_DEV: c_int = 49; -pub const _SC_2_FORT_RUN: c_int = 50; -pub const _SC_2_SW_DEV: c_int = 51; -pub const _SC_2_LOCALEDEF: c_int = 52; -pub const _SC_UIO_MAXIOV: c_int = 60; -pub const _SC_IOV_MAX: c_int = 60; -pub const _SC_THREADS: c_int = 67; -pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 68; -pub const _SC_GETGR_R_SIZE_MAX: c_int = 69; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 70; -pub const _SC_LOGIN_NAME_MAX: c_int = 71; -pub const _SC_TTY_NAME_MAX: c_int = 72; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 73; -pub const _SC_THREAD_KEYS_MAX: c_int = 74; -pub const _SC_THREAD_STACK_MIN: c_int = 75; -pub const _SC_THREAD_THREADS_MAX: c_int = 76; -pub const _SC_THREAD_ATTR_STACKADDR: c_int = 77; -pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 78; -pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 79; -pub const _SC_THREAD_PRIO_INHERIT: c_int = 80; -pub const _SC_THREAD_PRIO_PROTECT: c_int = 81; -pub const _SC_THREAD_PROCESS_SHARED: c_int = 82; -pub const _SC_NPROCESSORS_CONF: c_int = 83; -pub const _SC_NPROCESSORS_ONLN: c_int = 84; -pub const _SC_PHYS_PAGES: c_int = 85; -pub const _SC_AVPHYS_PAGES: c_int = 86; -pub const _SC_ATEXIT_MAX: c_int = 87; -pub const _SC_PASS_MAX: c_int = 88; -pub const _SC_XOPEN_VERSION: c_int = 89; -pub const _SC_XOPEN_XCU_VERSION: c_int = 90; -pub const _SC_XOPEN_UNIX: c_int = 91; -pub const _SC_XOPEN_CRYPT: c_int = 92; -pub const _SC_XOPEN_ENH_I18N: c_int = 93; -pub const _SC_XOPEN_SHM: c_int = 94; -pub const _SC_2_CHAR_TERM: c_int = 95; -pub const _SC_2_UPE: c_int = 97; -pub const _SC_XOPEN_XPG2: c_int = 98; -pub const _SC_XOPEN_XPG3: c_int = 99; -pub const _SC_XOPEN_XPG4: c_int = 100; -pub const _SC_NZERO: c_int = 109; -pub const _SC_XBS5_ILP32_OFF32: c_int = 125; -pub const _SC_XBS5_ILP32_OFFBIG: c_int = 126; -pub const _SC_XBS5_LP64_OFF64: c_int = 127; -pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 128; -pub const _SC_XOPEN_LEGACY: c_int = 129; -pub const _SC_XOPEN_REALTIME: c_int = 130; -pub const _SC_XOPEN_REALTIME_THREADS: c_int = 131; -pub const _SC_ADVISORY_INFO: c_int = 132; -pub const _SC_BARRIERS: c_int = 133; -pub const _SC_CLOCK_SELECTION: c_int = 137; -pub const _SC_CPUTIME: c_int = 138; -pub const _SC_THREAD_CPUTIME: c_int = 139; -pub const _SC_MONOTONIC_CLOCK: c_int = 149; -pub const _SC_READER_WRITER_LOCKS: c_int = 153; -pub const _SC_SPIN_LOCKS: c_int = 154; -pub const _SC_REGEXP: c_int = 155; -pub const _SC_SHELL: c_int = 157; -pub const _SC_SPAWN: c_int = 159; -pub const _SC_SPORADIC_SERVER: c_int = 160; -pub const _SC_THREAD_SPORADIC_SERVER: c_int = 161; -pub const _SC_TIMEOUTS: c_int = 164; -pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 165; -pub const _SC_2_PBS: c_int = 168; -pub const _SC_2_PBS_ACCOUNTING: c_int = 169; -pub const _SC_2_PBS_LOCATE: c_int = 170; -pub const _SC_2_PBS_MESSAGE: c_int = 171; -pub const _SC_2_PBS_TRACK: c_int = 172; -pub const _SC_SYMLOOP_MAX: c_int = 173; -pub const _SC_STREAMS: c_int = 174; -pub const _SC_2_PBS_CHECKPOINT: c_int = 175; -pub const _SC_V6_ILP32_OFF32: c_int = 176; -pub const _SC_V6_ILP32_OFFBIG: c_int = 177; -pub const _SC_V6_LP64_OFF64: c_int = 178; -pub const _SC_V6_LPBIG_OFFBIG: c_int = 179; -pub const _SC_HOST_NAME_MAX: c_int = 180; -pub const _SC_TRACE: c_int = 181; -pub const _SC_TRACE_EVENT_FILTER: c_int = 182; -pub const _SC_TRACE_INHERIT: c_int = 183; -pub const _SC_TRACE_LOG: c_int = 184; -pub const _SC_IPV6: c_int = 235; -pub const _SC_RAW_SOCKETS: c_int = 236; -pub const _SC_V7_ILP32_OFF32: c_int = 237; -pub const _SC_V7_ILP32_OFFBIG: c_int = 238; -pub const _SC_V7_LP64_OFF64: c_int = 239; -pub const _SC_V7_LPBIG_OFFBIG: c_int = 240; -pub const _SC_SS_REPL_MAX: c_int = 241; -pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 242; -pub const _SC_TRACE_NAME_MAX: c_int = 243; -pub const _SC_TRACE_SYS_MAX: c_int = 244; -pub const _SC_TRACE_USER_EVENT_MAX: c_int = 245; -pub const _SC_XOPEN_STREAMS: c_int = 246; -pub const _SC_THREAD_ROBUST_PRIO_INHERIT: c_int = 247; -pub const _SC_THREAD_ROBUST_PRIO_PROTECT: c_int = 248; - -pub const _CS_PATH: c_int = 0; -pub const _CS_POSIX_V6_WIDTH_RESTRICTED_ENVS: c_int = 1; -pub const _CS_POSIX_V5_WIDTH_RESTRICTED_ENVS: c_int = 4; -pub const _CS_POSIX_V7_WIDTH_RESTRICTED_ENVS: c_int = 5; -pub const _CS_POSIX_V6_ILP32_OFF32_CFLAGS: c_int = 1116; -pub const _CS_POSIX_V6_ILP32_OFF32_LDFLAGS: c_int = 1117; -pub const _CS_POSIX_V6_ILP32_OFF32_LIBS: c_int = 1118; -pub const _CS_POSIX_V6_ILP32_OFF32_LINTFLAGS: c_int = 1119; -pub const _CS_POSIX_V6_ILP32_OFFBIG_CFLAGS: c_int = 1120; -pub const _CS_POSIX_V6_ILP32_OFFBIG_LDFLAGS: c_int = 1121; -pub const _CS_POSIX_V6_ILP32_OFFBIG_LIBS: c_int = 1122; -pub const _CS_POSIX_V6_ILP32_OFFBIG_LINTFLAGS: c_int = 1123; -pub const _CS_POSIX_V6_LP64_OFF64_CFLAGS: c_int = 1124; -pub const _CS_POSIX_V6_LP64_OFF64_LDFLAGS: c_int = 1125; -pub const _CS_POSIX_V6_LP64_OFF64_LIBS: c_int = 1126; -pub const _CS_POSIX_V6_LP64_OFF64_LINTFLAGS: c_int = 1127; -pub const _CS_POSIX_V6_LPBIG_OFFBIG_CFLAGS: c_int = 1128; -pub const _CS_POSIX_V6_LPBIG_OFFBIG_LDFLAGS: c_int = 1129; -pub const _CS_POSIX_V6_LPBIG_OFFBIG_LIBS: c_int = 1130; -pub const _CS_POSIX_V6_LPBIG_OFFBIG_LINTFLAGS: c_int = 1131; -pub const _CS_POSIX_V7_ILP32_OFF32_CFLAGS: c_int = 1132; -pub const _CS_POSIX_V7_ILP32_OFF32_LDFLAGS: c_int = 1133; -pub const _CS_POSIX_V7_ILP32_OFF32_LIBS: c_int = 1134; -pub const _CS_POSIX_V7_ILP32_OFF32_LINTFLAGS: c_int = 1135; -pub const _CS_POSIX_V7_ILP32_OFFBIG_CFLAGS: c_int = 1136; -pub const _CS_POSIX_V7_ILP32_OFFBIG_LDFLAGS: c_int = 1137; -pub const _CS_POSIX_V7_ILP32_OFFBIG_LIBS: c_int = 1138; -pub const _CS_POSIX_V7_ILP32_OFFBIG_LINTFLAGS: c_int = 1139; -pub const _CS_POSIX_V7_LP64_OFF64_CFLAGS: c_int = 1140; -pub const _CS_POSIX_V7_LP64_OFF64_LDFLAGS: c_int = 1141; -pub const _CS_POSIX_V7_LP64_OFF64_LIBS: c_int = 1142; -pub const _CS_POSIX_V7_LP64_OFF64_LINTFLAGS: c_int = 1143; -pub const _CS_POSIX_V7_LPBIG_OFFBIG_CFLAGS: c_int = 1144; -pub const _CS_POSIX_V7_LPBIG_OFFBIG_LDFLAGS: c_int = 1145; -pub const _CS_POSIX_V7_LPBIG_OFFBIG_LIBS: c_int = 1146; -pub const _CS_POSIX_V7_LPBIG_OFFBIG_LINTFLAGS: c_int = 1147; - -pub const RLIM_SAVED_MAX: crate::rlim_t = RLIM_INFINITY; -pub const RLIM_SAVED_CUR: crate::rlim_t = RLIM_INFINITY; - -// elf.h - Fields in the e_ident array. -pub const EI_NIDENT: usize = 16; - -pub const EI_MAG0: usize = 0; -pub const ELFMAG0: u8 = 0x7f; -pub const EI_MAG1: usize = 1; -pub const ELFMAG1: u8 = b'E'; -pub const EI_MAG2: usize = 2; -pub const ELFMAG2: u8 = b'L'; -pub const EI_MAG3: usize = 3; -pub const ELFMAG3: u8 = b'F'; -pub const SELFMAG: usize = 4; - -pub const EI_CLASS: usize = 4; -pub const ELFCLASSNONE: u8 = 0; -pub const ELFCLASS32: u8 = 1; -pub const ELFCLASS64: u8 = 2; -pub const ELFCLASSNUM: usize = 3; - -pub const EI_DATA: usize = 5; -pub const ELFDATANONE: u8 = 0; -pub const ELFDATA2LSB: u8 = 1; -pub const ELFDATA2MSB: u8 = 2; -pub const ELFDATANUM: usize = 3; - -pub const EI_VERSION: usize = 6; - -pub const EI_OSABI: usize = 7; -pub const ELFOSABI_NONE: u8 = 0; -pub const ELFOSABI_SYSV: u8 = 0; -pub const ELFOSABI_HPUX: u8 = 1; -pub const ELFOSABI_NETBSD: u8 = 2; -pub const ELFOSABI_GNU: u8 = 3; -pub const ELFOSABI_LINUX: u8 = ELFOSABI_GNU; -pub const ELFOSABI_SOLARIS: u8 = 6; -pub const ELFOSABI_AIX: u8 = 7; -pub const ELFOSABI_IRIX: u8 = 8; -pub const ELFOSABI_FREEBSD: u8 = 9; -pub const ELFOSABI_TRU64: u8 = 10; -pub const ELFOSABI_MODESTO: u8 = 11; -pub const ELFOSABI_OPENBSD: u8 = 12; -pub const ELFOSABI_ARM: u8 = 97; -pub const ELFOSABI_STANDALONE: u8 = 255; - -pub const EI_ABIVERSION: usize = 8; - -pub const EI_PAD: usize = 9; - -// elf.h - Legal values for e_type (object file type). -pub const ET_NONE: u16 = 0; -pub const ET_REL: u16 = 1; -pub const ET_EXEC: u16 = 2; -pub const ET_DYN: u16 = 3; -pub const ET_CORE: u16 = 4; -pub const ET_NUM: u16 = 5; -pub const ET_LOOS: u16 = 0xfe00; -pub const ET_HIOS: u16 = 0xfeff; -pub const ET_LOPROC: u16 = 0xff00; -pub const ET_HIPROC: u16 = 0xffff; - -// elf.h - Legal values for e_machine (architecture). -pub const EM_NONE: u16 = 0; -pub const EM_M32: u16 = 1; -pub const EM_SPARC: u16 = 2; -pub const EM_386: u16 = 3; -pub const EM_68K: u16 = 4; -pub const EM_88K: u16 = 5; -pub const EM_860: u16 = 7; -pub const EM_MIPS: u16 = 8; -pub const EM_S370: u16 = 9; -pub const EM_MIPS_RS3_LE: u16 = 10; -pub const EM_PARISC: u16 = 15; -pub const EM_VPP500: u16 = 17; -pub const EM_SPARC32PLUS: u16 = 18; -pub const EM_960: u16 = 19; -pub const EM_PPC: u16 = 20; -pub const EM_PPC64: u16 = 21; -pub const EM_S390: u16 = 22; -pub const EM_V800: u16 = 36; -pub const EM_FR20: u16 = 37; -pub const EM_RH32: u16 = 38; -pub const EM_RCE: u16 = 39; -pub const EM_ARM: u16 = 40; -pub const EM_FAKE_ALPHA: u16 = 41; -pub const EM_SH: u16 = 42; -pub const EM_SPARCV9: u16 = 43; -pub const EM_TRICORE: u16 = 44; -pub const EM_ARC: u16 = 45; -pub const EM_H8_300: u16 = 46; -pub const EM_H8_300H: u16 = 47; -pub const EM_H8S: u16 = 48; -pub const EM_H8_500: u16 = 49; -pub const EM_IA_64: u16 = 50; -pub const EM_MIPS_X: u16 = 51; -pub const EM_COLDFIRE: u16 = 52; -pub const EM_68HC12: u16 = 53; -pub const EM_MMA: u16 = 54; -pub const EM_PCP: u16 = 55; -pub const EM_NCPU: u16 = 56; -pub const EM_NDR1: u16 = 57; -pub const EM_STARCORE: u16 = 58; -pub const EM_ME16: u16 = 59; -pub const EM_ST100: u16 = 60; -pub const EM_TINYJ: u16 = 61; -pub const EM_X86_64: u16 = 62; -pub const EM_PDSP: u16 = 63; -pub const EM_FX66: u16 = 66; -pub const EM_ST9PLUS: u16 = 67; -pub const EM_ST7: u16 = 68; -pub const EM_68HC16: u16 = 69; -pub const EM_68HC11: u16 = 70; -pub const EM_68HC08: u16 = 71; -pub const EM_68HC05: u16 = 72; -pub const EM_SVX: u16 = 73; -pub const EM_ST19: u16 = 74; -pub const EM_VAX: u16 = 75; -pub const EM_CRIS: u16 = 76; -pub const EM_JAVELIN: u16 = 77; -pub const EM_FIREPATH: u16 = 78; -pub const EM_ZSP: u16 = 79; -pub const EM_MMIX: u16 = 80; -pub const EM_HUANY: u16 = 81; -pub const EM_PRISM: u16 = 82; -pub const EM_AVR: u16 = 83; -pub const EM_FR30: u16 = 84; -pub const EM_D10V: u16 = 85; -pub const EM_D30V: u16 = 86; -pub const EM_V850: u16 = 87; -pub const EM_M32R: u16 = 88; -pub const EM_MN10300: u16 = 89; -pub const EM_MN10200: u16 = 90; -pub const EM_PJ: u16 = 91; -pub const EM_OPENRISC: u16 = 92; -pub const EM_ARC_A5: u16 = 93; -pub const EM_XTENSA: u16 = 94; -pub const EM_AARCH64: u16 = 183; -pub const EM_TILEPRO: u16 = 188; -pub const EM_TILEGX: u16 = 191; -pub const EM_RISCV: u16 = 243; -pub const EM_ALPHA: u16 = 0x9026; - -// elf.h - Legal values for e_version (version). -pub const EV_NONE: u32 = 0; -pub const EV_CURRENT: u32 = 1; -pub const EV_NUM: u32 = 2; - -// elf.h - Legal values for p_type (segment type). -pub const PT_NULL: u32 = 0; -pub const PT_LOAD: u32 = 1; -pub const PT_DYNAMIC: u32 = 2; -pub const PT_INTERP: u32 = 3; -pub const PT_NOTE: u32 = 4; -pub const PT_SHLIB: u32 = 5; -pub const PT_PHDR: u32 = 6; -pub const PT_TLS: u32 = 7; -pub const PT_NUM: u32 = 8; -pub const PT_LOOS: u32 = 0x60000000; -pub const PT_GNU_EH_FRAME: u32 = 0x6474e550; -pub const PT_GNU_STACK: u32 = 0x6474e551; -pub const PT_GNU_RELRO: u32 = 0x6474e552; -pub const PT_LOSUNW: u32 = 0x6ffffffa; -pub const PT_SUNWBSS: u32 = 0x6ffffffa; -pub const PT_SUNWSTACK: u32 = 0x6ffffffb; -pub const PT_HISUNW: u32 = 0x6fffffff; -pub const PT_HIOS: u32 = 0x6fffffff; -pub const PT_LOPROC: u32 = 0x70000000; -pub const PT_HIPROC: u32 = 0x7fffffff; - -// Legal values for p_flags (segment flags). -pub const PF_X: u32 = 1 << 0; -pub const PF_W: u32 = 1 << 1; -pub const PF_R: u32 = 1 << 2; -pub const PF_MASKOS: u32 = 0x0ff00000; -pub const PF_MASKPROC: u32 = 0xf0000000; - -// elf.h - Legal values for a_type (entry type). -pub const AT_NULL: c_ulong = 0; -pub const AT_IGNORE: c_ulong = 1; -pub const AT_EXECFD: c_ulong = 2; -pub const AT_PHDR: c_ulong = 3; -pub const AT_PHENT: c_ulong = 4; -pub const AT_PHNUM: c_ulong = 5; -pub const AT_PAGESZ: c_ulong = 6; -pub const AT_BASE: c_ulong = 7; -pub const AT_FLAGS: c_ulong = 8; -pub const AT_ENTRY: c_ulong = 9; -pub const AT_NOTELF: c_ulong = 10; -pub const AT_UID: c_ulong = 11; -pub const AT_EUID: c_ulong = 12; -pub const AT_GID: c_ulong = 13; -pub const AT_EGID: c_ulong = 14; -pub const AT_PLATFORM: c_ulong = 15; -pub const AT_HWCAP: c_ulong = 16; -pub const AT_CLKTCK: c_ulong = 17; - -pub const AT_SECURE: c_ulong = 23; -pub const AT_BASE_PLATFORM: c_ulong = 24; -pub const AT_RANDOM: c_ulong = 25; -pub const AT_HWCAP2: c_ulong = 26; - -pub const AT_EXECFN: c_ulong = 31; - -// defined in arch//include/uapi/asm/auxvec.h but has the same value -// wherever it is defined. -pub const AT_SYSINFO_EHDR: c_ulong = 33; -pub const AT_MINSIGSTKSZ: c_ulong = 51; - -pub const GLOB_ERR: c_int = 1 << 0; -pub const GLOB_MARK: c_int = 1 << 1; -pub const GLOB_NOSORT: c_int = 1 << 2; -pub const GLOB_DOOFFS: c_int = 1 << 3; -pub const GLOB_NOCHECK: c_int = 1 << 4; -pub const GLOB_APPEND: c_int = 1 << 5; -pub const GLOB_NOESCAPE: c_int = 1 << 6; - -pub const GLOB_NOSPACE: c_int = 1; -pub const GLOB_ABORTED: c_int = 2; -pub const GLOB_NOMATCH: c_int = 3; - -pub const POSIX_MADV_NORMAL: c_int = 0; -pub const POSIX_MADV_RANDOM: c_int = 1; -pub const POSIX_MADV_SEQUENTIAL: c_int = 2; -pub const POSIX_MADV_WILLNEED: c_int = 3; -pub const POSIX_SPAWN_USEVFORK: c_int = 64; -pub const POSIX_SPAWN_SETSID: c_int = 128; - -pub const S_IEXEC: mode_t = 0o0100; -pub const S_IWRITE: mode_t = 0o0200; -pub const S_IREAD: mode_t = 0o0400; - -pub const F_LOCK: c_int = 1; -pub const F_TEST: c_int = 3; -pub const F_TLOCK: c_int = 2; -pub const F_ULOCK: c_int = 0; - -pub const F_SEAL_FUTURE_WRITE: c_int = 0x0010; -pub const F_SEAL_EXEC: c_int = 0x0020; - -pub const IFF_LOWER_UP: c_int = 0x10000; -pub const IFF_DORMANT: c_int = 0x20000; -pub const IFF_ECHO: c_int = 0x40000; - -// linux/if_addr.h -pub const IFA_UNSPEC: c_ushort = 0; -pub const IFA_ADDRESS: c_ushort = 1; -pub const IFA_LOCAL: c_ushort = 2; -pub const IFA_LABEL: c_ushort = 3; -pub const IFA_BROADCAST: c_ushort = 4; -pub const IFA_ANYCAST: c_ushort = 5; -pub const IFA_CACHEINFO: c_ushort = 6; -pub const IFA_MULTICAST: c_ushort = 7; -pub const IFA_FLAGS: c_ushort = 8; - -pub const IFA_F_SECONDARY: u32 = 0x01; -pub const IFA_F_TEMPORARY: u32 = 0x01; -pub const IFA_F_NODAD: u32 = 0x02; -pub const IFA_F_OPTIMISTIC: u32 = 0x04; -pub const IFA_F_DADFAILED: u32 = 0x08; -pub const IFA_F_HOMEADDRESS: u32 = 0x10; -pub const IFA_F_DEPRECATED: u32 = 0x20; -pub const IFA_F_TENTATIVE: u32 = 0x40; -pub const IFA_F_PERMANENT: u32 = 0x80; -pub const IFA_F_MANAGETEMPADDR: u32 = 0x100; -pub const IFA_F_NOPREFIXROUTE: u32 = 0x200; -pub const IFA_F_MCAUTOJOIN: u32 = 0x400; -pub const IFA_F_STABLE_PRIVACY: u32 = 0x800; - -// linux/fs.h - -// Flags for preadv2/pwritev2 -pub const RWF_HIPRI: c_int = 0x00000001; -pub const RWF_DSYNC: c_int = 0x00000002; -pub const RWF_SYNC: c_int = 0x00000004; -pub const RWF_NOWAIT: c_int = 0x00000008; -pub const RWF_APPEND: c_int = 0x00000010; -pub const RWF_NOAPPEND: c_int = 0x00000020; -pub const RWF_ATOMIC: c_int = 0x00000040; -pub const RWF_DONTCACHE: c_int = 0x00000080; - -// linux/if_link.h -pub const IFLA_UNSPEC: c_ushort = 0; -pub const IFLA_ADDRESS: c_ushort = 1; -pub const IFLA_BROADCAST: c_ushort = 2; -pub const IFLA_IFNAME: c_ushort = 3; -pub const IFLA_MTU: c_ushort = 4; -pub const IFLA_LINK: c_ushort = 5; -pub const IFLA_QDISC: c_ushort = 6; -pub const IFLA_STATS: c_ushort = 7; -pub const IFLA_COST: c_ushort = 8; -pub const IFLA_PRIORITY: c_ushort = 9; -pub const IFLA_MASTER: c_ushort = 10; -pub const IFLA_WIRELESS: c_ushort = 11; -pub const IFLA_PROTINFO: c_ushort = 12; -pub const IFLA_TXQLEN: c_ushort = 13; -pub const IFLA_MAP: c_ushort = 14; -pub const IFLA_WEIGHT: c_ushort = 15; -pub const IFLA_OPERSTATE: c_ushort = 16; -pub const IFLA_LINKMODE: c_ushort = 17; -pub const IFLA_LINKINFO: c_ushort = 18; -pub const IFLA_NET_NS_PID: c_ushort = 19; -pub const IFLA_IFALIAS: c_ushort = 20; -pub const IFLA_NUM_VF: c_ushort = 21; -pub const IFLA_VFINFO_LIST: c_ushort = 22; -pub const IFLA_STATS64: c_ushort = 23; -pub const IFLA_VF_PORTS: c_ushort = 24; -pub const IFLA_PORT_SELF: c_ushort = 25; -pub const IFLA_AF_SPEC: c_ushort = 26; -pub const IFLA_GROUP: c_ushort = 27; -pub const IFLA_NET_NS_FD: c_ushort = 28; -pub const IFLA_EXT_MASK: c_ushort = 29; -pub const IFLA_PROMISCUITY: c_ushort = 30; -pub const IFLA_NUM_TX_QUEUES: c_ushort = 31; -pub const IFLA_NUM_RX_QUEUES: c_ushort = 32; -pub const IFLA_CARRIER: c_ushort = 33; -pub const IFLA_PHYS_PORT_ID: c_ushort = 34; -pub const IFLA_CARRIER_CHANGES: c_ushort = 35; -pub const IFLA_PHYS_SWITCH_ID: c_ushort = 36; -pub const IFLA_LINK_NETNSID: c_ushort = 37; -pub const IFLA_PHYS_PORT_NAME: c_ushort = 38; -pub const IFLA_PROTO_DOWN: c_ushort = 39; -pub const IFLA_GSO_MAX_SEGS: c_ushort = 40; -pub const IFLA_GSO_MAX_SIZE: c_ushort = 41; -pub const IFLA_PAD: c_ushort = 42; -pub const IFLA_XDP: c_ushort = 43; -pub const IFLA_EVENT: c_ushort = 44; -pub const IFLA_NEW_NETNSID: c_ushort = 45; -pub const IFLA_IF_NETNSID: c_ushort = 46; -pub const IFLA_TARGET_NETNSID: c_ushort = IFLA_IF_NETNSID; -pub const IFLA_CARRIER_UP_COUNT: c_ushort = 47; -pub const IFLA_CARRIER_DOWN_COUNT: c_ushort = 48; -pub const IFLA_NEW_IFINDEX: c_ushort = 49; -pub const IFLA_MIN_MTU: c_ushort = 50; -pub const IFLA_MAX_MTU: c_ushort = 51; -pub const IFLA_PROP_LIST: c_ushort = 52; -pub const IFLA_ALT_IFNAME: c_ushort = 53; -pub const IFLA_PERM_ADDRESS: c_ushort = 54; -pub const IFLA_PROTO_DOWN_REASON: c_ushort = 55; -pub const IFLA_PARENT_DEV_NAME: c_ushort = 56; -pub const IFLA_PARENT_DEV_BUS_NAME: c_ushort = 57; -pub const IFLA_GRO_MAX_SIZE: c_ushort = 58; -pub const IFLA_TSO_MAX_SIZE: c_ushort = 59; -pub const IFLA_TSO_MAX_SEGS: c_ushort = 60; -pub const IFLA_ALLMULTI: c_ushort = 61; - -pub const IFLA_INFO_UNSPEC: c_ushort = 0; -pub const IFLA_INFO_KIND: c_ushort = 1; -pub const IFLA_INFO_DATA: c_ushort = 2; -pub const IFLA_INFO_XSTATS: c_ushort = 3; -pub const IFLA_INFO_SLAVE_KIND: c_ushort = 4; -pub const IFLA_INFO_SLAVE_DATA: c_ushort = 5; - -// Since Linux 3.1 -pub const SEEK_DATA: c_int = 3; -pub const SEEK_HOLE: c_int = 4; - -pub const ST_RDONLY: c_ulong = 1; -pub const ST_NOSUID: c_ulong = 2; -pub const ST_NODEV: c_ulong = 4; -pub const ST_NOEXEC: c_ulong = 8; -pub const ST_SYNCHRONOUS: c_ulong = 16; -pub const ST_MANDLOCK: c_ulong = 64; -pub const ST_WRITE: c_ulong = 128; -pub const ST_APPEND: c_ulong = 256; -pub const ST_IMMUTABLE: c_ulong = 512; -pub const ST_NOATIME: c_ulong = 1024; -pub const ST_NODIRATIME: c_ulong = 2048; - -pub const RTLD_NEXT: *mut c_void = -1i64 as *mut c_void; -pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); -pub const RTLD_NODELETE: c_int = 0x1000; -pub const RTLD_NOW: c_int = 0x2; - -pub const AT_EACCESS: c_int = 0x200; - -// linux/mempolicy.h -pub const MPOL_DEFAULT: c_int = 0; -pub const MPOL_PREFERRED: c_int = 1; -pub const MPOL_BIND: c_int = 2; -pub const MPOL_INTERLEAVE: c_int = 3; -pub const MPOL_LOCAL: c_int = 4; -pub const MPOL_F_NUMA_BALANCING: c_int = 1 << 13; -pub const MPOL_F_RELATIVE_NODES: c_int = 1 << 14; -pub const MPOL_F_STATIC_NODES: c_int = 1 << 15; - -// linux/membarrier.h -pub const MEMBARRIER_CMD_QUERY: c_int = 0; -pub const MEMBARRIER_CMD_GLOBAL: c_int = 1 << 0; -pub const MEMBARRIER_CMD_GLOBAL_EXPEDITED: c_int = 1 << 1; -pub const MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED: c_int = 1 << 2; -pub const MEMBARRIER_CMD_PRIVATE_EXPEDITED: c_int = 1 << 3; -pub const MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED: c_int = 1 << 4; -pub const MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE: c_int = 1 << 5; -pub const MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE: c_int = 1 << 6; -pub const MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ: c_int = 1 << 7; -pub const MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ: c_int = 1 << 8; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - size: [0; __SIZEOF_PTHREAD_MUTEX_T], -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - size: [0; __SIZEOF_PTHREAD_COND_T], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - size: [0; __SIZEOF_PTHREAD_RWLOCK_T], -}; - -pub const PTHREAD_BARRIER_SERIAL_THREAD: c_int = -1; -pub const PTHREAD_ONCE_INIT: pthread_once_t = 0; -pub const PTHREAD_MUTEX_NORMAL: c_int = 0; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; -pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; -pub const PTHREAD_MUTEX_STALLED: c_int = 0; -pub const PTHREAD_MUTEX_ROBUST: c_int = 1; -pub const PTHREAD_PRIO_NONE: c_int = 0; -pub const PTHREAD_PRIO_INHERIT: c_int = 1; -pub const PTHREAD_PRIO_PROTECT: c_int = 2; -pub const PTHREAD_PROCESS_PRIVATE: c_int = 0; -pub const PTHREAD_PROCESS_SHARED: c_int = 1; -pub const PTHREAD_INHERIT_SCHED: c_int = 0; -pub const PTHREAD_EXPLICIT_SCHED: c_int = 1; -pub const __SIZEOF_PTHREAD_COND_T: usize = 48; - -pub const RENAME_NOREPLACE: c_uint = 1; -pub const RENAME_EXCHANGE: c_uint = 2; -pub const RENAME_WHITEOUT: c_uint = 4; - -// netinet/in.h -// NOTE: These are in addition to the constants defined in src/unix/mod.rs - -#[deprecated( - since = "0.2.80", - note = "This value was increased in the newer kernel \ - and we'll change this following upstream in the future release. \ - See #1896 for more info." -)] -pub const IPPROTO_MAX: c_int = 256; - -// System V IPC -pub const IPC_PRIVATE: crate::key_t = 0; - -pub const IPC_CREAT: c_int = 0o1000; -pub const IPC_EXCL: c_int = 0o2000; -pub const IPC_NOWAIT: c_int = 0o4000; - -pub const IPC_RMID: c_int = 0; -pub const IPC_SET: c_int = 1; -pub const IPC_STAT: c_int = 2; -pub const IPC_INFO: c_int = 3; -pub const MSG_STAT: c_int = 11; -pub const MSG_INFO: c_int = 12; -pub const MSG_NOTIFICATION: c_int = 0x8000; - -pub const MSG_NOERROR: c_int = 0o10000; -pub const MSG_EXCEPT: c_int = 0o20000; -pub const MSG_ZEROCOPY: c_int = 0x4000000; - -pub const SEM_UNDO: c_int = 0x1000; - -pub const GETPID: c_int = 11; -pub const GETVAL: c_int = 12; -pub const GETALL: c_int = 13; -pub const GETNCNT: c_int = 14; -pub const GETZCNT: c_int = 15; -pub const SETVAL: c_int = 16; -pub const SETALL: c_int = 17; -pub const SEM_STAT: c_int = 18; -pub const SEM_INFO: c_int = 19; -pub const SEM_STAT_ANY: c_int = 20; - -pub const SHM_R: c_int = 0o400; -pub const SHM_W: c_int = 0o200; - -pub const SHM_RDONLY: c_int = 0o10000; -pub const SHM_RND: c_int = 0o20000; -pub const SHM_REMAP: c_int = 0o40000; - -pub const SHM_LOCK: c_int = 11; -pub const SHM_UNLOCK: c_int = 12; - -pub const SHM_HUGETLB: c_int = 0o4000; -#[cfg(not(all(target_env = "uclibc", target_arch = "mips")))] -pub const SHM_NORESERVE: c_int = 0o10000; - -pub const QFMT_VFS_OLD: c_int = 1; -pub const QFMT_VFS_V0: c_int = 2; -pub const QFMT_VFS_V1: c_int = 4; - -pub const EFD_SEMAPHORE: c_int = 0x1; - -pub const LOG_NFACILITIES: c_int = 24; - -pub const SEM_FAILED: *mut crate::sem_t = ptr::null_mut(); - -pub const RB_AUTOBOOT: c_int = 0x01234567u32 as i32; -pub const RB_HALT_SYSTEM: c_int = 0xcdef0123u32 as i32; -pub const RB_ENABLE_CAD: c_int = 0x89abcdefu32 as i32; -pub const RB_DISABLE_CAD: c_int = 0x00000000u32 as i32; -pub const RB_POWER_OFF: c_int = 0x4321fedcu32 as i32; -pub const RB_SW_SUSPEND: c_int = 0xd000fce2u32 as i32; -pub const RB_KEXEC: c_int = 0x45584543u32 as i32; - -pub const AI_PASSIVE: c_int = 0x0001; -pub const AI_CANONNAME: c_int = 0x0002; -pub const AI_NUMERICHOST: c_int = 0x0004; -pub const AI_V4MAPPED: c_int = 0x0008; -pub const AI_ALL: c_int = 0x0010; -pub const AI_ADDRCONFIG: c_int = 0x0020; - -pub const AI_NUMERICSERV: c_int = 0x0400; - -pub const EAI_BADFLAGS: c_int = -1; -pub const EAI_NONAME: c_int = -2; -pub const EAI_AGAIN: c_int = -3; -pub const EAI_FAIL: c_int = -4; -pub const EAI_NODATA: c_int = -5; -pub const EAI_FAMILY: c_int = -6; -pub const EAI_SOCKTYPE: c_int = -7; -pub const EAI_SERVICE: c_int = -8; -pub const EAI_MEMORY: c_int = -10; -pub const EAI_SYSTEM: c_int = -11; -pub const EAI_OVERFLOW: c_int = -12; - -pub const NI_NUMERICHOST: c_int = 1; -pub const NI_NUMERICSERV: c_int = 2; -pub const NI_NOFQDN: c_int = 4; -pub const NI_NAMEREQD: c_int = 8; -pub const NI_DGRAM: c_int = 16; -pub const NI_IDN: c_int = 32; - -pub const SYNC_FILE_RANGE_WAIT_BEFORE: c_uint = 1; -pub const SYNC_FILE_RANGE_WRITE: c_uint = 2; -pub const SYNC_FILE_RANGE_WAIT_AFTER: c_uint = 4; - -cfg_if! { - if #[cfg(not(target_env = "uclibc"))] { - pub const AIO_CANCELED: c_int = 0; - pub const AIO_NOTCANCELED: c_int = 1; - pub const AIO_ALLDONE: c_int = 2; - pub const LIO_READ: c_int = 0; - pub const LIO_WRITE: c_int = 1; - pub const LIO_NOP: c_int = 2; - pub const LIO_WAIT: c_int = 0; - pub const LIO_NOWAIT: c_int = 1; - pub const RUSAGE_THREAD: c_int = 1; - pub const MSG_COPY: c_int = 0o40000; - pub const SHM_EXEC: c_int = 0o100000; - pub const IPV6_MULTICAST_ALL: c_int = 29; - pub const IPV6_ROUTER_ALERT_ISOLATE: c_int = 30; - pub const PACKET_MR_UNICAST: c_int = 3; - pub const PTRACE_EVENT_STOP: c_int = 128; - pub const UDP_SEGMENT: c_int = 103; - pub const UDP_GRO: c_int = 104; - } -} - -pub const MREMAP_MAYMOVE: c_int = 1; -pub const MREMAP_FIXED: c_int = 2; -pub const MREMAP_DONTUNMAP: c_int = 4; - -// linux/nsfs.h -const NSIO: c_uint = 0xb7; - -pub const NS_GET_USERNS: Ioctl = _IO(NSIO, 0x1); -pub const NS_GET_PARENT: Ioctl = _IO(NSIO, 0x2); -pub const NS_GET_NSTYPE: Ioctl = _IO(NSIO, 0x3); -pub const NS_GET_OWNER_UID: Ioctl = _IO(NSIO, 0x4); - -pub const NS_GET_MNTNS_ID: Ioctl = _IOR::<__u64>(NSIO, 0x5); - -pub const NS_GET_PID_FROM_PIDNS: Ioctl = _IOR::(NSIO, 0x6); -pub const NS_GET_TGID_FROM_PIDNS: Ioctl = _IOR::(NSIO, 0x7); -pub const NS_GET_PID_IN_PIDNS: Ioctl = _IOR::(NSIO, 0x8); -pub const NS_GET_TGID_IN_PIDNS: Ioctl = _IOR::(NSIO, 0x9); - -pub const MNT_NS_INFO_SIZE_VER0: Ioctl = 16; - -pub const NS_MNT_GET_INFO: Ioctl = _IOR::(NSIO, 10); -pub const NS_MNT_GET_NEXT: Ioctl = _IOR::(NSIO, 11); -pub const NS_MNT_GET_PREV: Ioctl = _IOR::(NSIO, 12); - -// linux/pidfd.h -pub const PIDFD_NONBLOCK: c_uint = O_NONBLOCK as c_uint; -pub const PIDFD_THREAD: c_uint = O_EXCL as c_uint; - -pub const PIDFD_SIGNAL_THREAD: c_uint = 1 << 0; -pub const PIDFD_SIGNAL_THREAD_GROUP: c_uint = 1 << 1; -pub const PIDFD_SIGNAL_PROCESS_GROUP: c_uint = 1 << 2; - -pub const PIDFD_INFO_PID: c_uint = 1 << 0; -pub const PIDFD_INFO_CREDS: c_uint = 1 << 1; -pub const PIDFD_INFO_CGROUPID: c_uint = 1 << 2; -pub const PIDFD_INFO_EXIT: c_uint = 1 << 3; - -pub const PIDFD_INFO_SIZE_VER0: c_uint = 64; - -const PIDFS_IOCTL_MAGIC: c_uint = 0xFF; -pub const PIDFD_GET_CGROUP_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 1); -pub const PIDFD_GET_IPC_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 2); -pub const PIDFD_GET_MNT_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 3); -pub const PIDFD_GET_NET_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 4); -pub const PIDFD_GET_PID_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 5); -pub const PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 6); -pub const PIDFD_GET_TIME_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 7); -pub const PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 8); -pub const PIDFD_GET_USER_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 9); -pub const PIDFD_GET_UTS_NAMESPACE: Ioctl = _IO(PIDFS_IOCTL_MAGIC, 10); -pub const PIDFD_GET_INFO: Ioctl = _IOWR::(PIDFS_IOCTL_MAGIC, 11); - -// linux/prctl.h -pub const PR_SET_PDEATHSIG: c_int = 1; -pub const PR_GET_PDEATHSIG: c_int = 2; - -pub const PR_GET_DUMPABLE: c_int = 3; -pub const PR_SET_DUMPABLE: c_int = 4; - -pub const PR_GET_UNALIGN: c_int = 5; -pub const PR_SET_UNALIGN: c_int = 6; -pub const PR_UNALIGN_NOPRINT: c_int = 1; -pub const PR_UNALIGN_SIGBUS: c_int = 2; - -pub const PR_GET_KEEPCAPS: c_int = 7; -pub const PR_SET_KEEPCAPS: c_int = 8; - -pub const PR_GET_FPEMU: c_int = 9; -pub const PR_SET_FPEMU: c_int = 10; -pub const PR_FPEMU_NOPRINT: c_int = 1; -pub const PR_FPEMU_SIGFPE: c_int = 2; - -pub const PR_GET_FPEXC: c_int = 11; -pub const PR_SET_FPEXC: c_int = 12; -pub const PR_FP_EXC_SW_ENABLE: c_int = 0x80; -pub const PR_FP_EXC_DIV: c_int = 0x010000; -pub const PR_FP_EXC_OVF: c_int = 0x020000; -pub const PR_FP_EXC_UND: c_int = 0x040000; -pub const PR_FP_EXC_RES: c_int = 0x080000; -pub const PR_FP_EXC_INV: c_int = 0x100000; -pub const PR_FP_EXC_DISABLED: c_int = 0; -pub const PR_FP_EXC_NONRECOV: c_int = 1; -pub const PR_FP_EXC_ASYNC: c_int = 2; -pub const PR_FP_EXC_PRECISE: c_int = 3; - -pub const PR_GET_TIMING: c_int = 13; -pub const PR_SET_TIMING: c_int = 14; -pub const PR_TIMING_STATISTICAL: c_int = 0; -pub const PR_TIMING_TIMESTAMP: c_int = 1; - -pub const PR_SET_NAME: c_int = 15; -pub const PR_GET_NAME: c_int = 16; - -pub const PR_GET_ENDIAN: c_int = 19; -pub const PR_SET_ENDIAN: c_int = 20; -pub const PR_ENDIAN_BIG: c_int = 0; -pub const PR_ENDIAN_LITTLE: c_int = 1; -pub const PR_ENDIAN_PPC_LITTLE: c_int = 2; - -pub const PR_GET_SECCOMP: c_int = 21; -pub const PR_SET_SECCOMP: c_int = 22; - -pub const PR_CAPBSET_READ: c_int = 23; -pub const PR_CAPBSET_DROP: c_int = 24; - -pub const PR_GET_TSC: c_int = 25; -pub const PR_SET_TSC: c_int = 26; -pub const PR_TSC_ENABLE: c_int = 1; -pub const PR_TSC_SIGSEGV: c_int = 2; - -pub const PR_GET_SECUREBITS: c_int = 27; -pub const PR_SET_SECUREBITS: c_int = 28; - -pub const PR_SET_TIMERSLACK: c_int = 29; -pub const PR_GET_TIMERSLACK: c_int = 30; - -pub const PR_TASK_PERF_EVENTS_DISABLE: c_int = 31; -pub const PR_TASK_PERF_EVENTS_ENABLE: c_int = 32; - -pub const PR_MCE_KILL: c_int = 33; -pub const PR_MCE_KILL_CLEAR: c_int = 0; -pub const PR_MCE_KILL_SET: c_int = 1; - -pub const PR_MCE_KILL_LATE: c_int = 0; -pub const PR_MCE_KILL_EARLY: c_int = 1; -pub const PR_MCE_KILL_DEFAULT: c_int = 2; - -pub const PR_MCE_KILL_GET: c_int = 34; - -pub const PR_SET_MM: c_int = 35; -pub const PR_SET_MM_START_CODE: c_int = 1; -pub const PR_SET_MM_END_CODE: c_int = 2; -pub const PR_SET_MM_START_DATA: c_int = 3; -pub const PR_SET_MM_END_DATA: c_int = 4; -pub const PR_SET_MM_START_STACK: c_int = 5; -pub const PR_SET_MM_START_BRK: c_int = 6; -pub const PR_SET_MM_BRK: c_int = 7; -pub const PR_SET_MM_ARG_START: c_int = 8; -pub const PR_SET_MM_ARG_END: c_int = 9; -pub const PR_SET_MM_ENV_START: c_int = 10; -pub const PR_SET_MM_ENV_END: c_int = 11; -pub const PR_SET_MM_AUXV: c_int = 12; -pub const PR_SET_MM_EXE_FILE: c_int = 13; -pub const PR_SET_MM_MAP: c_int = 14; -pub const PR_SET_MM_MAP_SIZE: c_int = 15; - -pub const PR_SET_PTRACER: c_int = 0x59616d61; -pub const PR_SET_PTRACER_ANY: c_ulong = 0xffffffffffffffff; - -pub const PR_SET_CHILD_SUBREAPER: c_int = 36; -pub const PR_GET_CHILD_SUBREAPER: c_int = 37; - -pub const PR_SET_NO_NEW_PRIVS: c_int = 38; -pub const PR_GET_NO_NEW_PRIVS: c_int = 39; - -pub const PR_SET_MDWE: c_int = 65; -pub const PR_GET_MDWE: c_int = 66; -pub const PR_MDWE_REFUSE_EXEC_GAIN: c_uint = 1 << 0; -pub const PR_MDWE_NO_INHERIT: c_uint = 1 << 1; - -pub const PR_GET_TID_ADDRESS: c_int = 40; - -pub const PR_SET_THP_DISABLE: c_int = 41; -pub const PR_GET_THP_DISABLE: c_int = 42; - -pub const PR_MPX_ENABLE_MANAGEMENT: c_int = 43; -pub const PR_MPX_DISABLE_MANAGEMENT: c_int = 44; - -pub const PR_SET_FP_MODE: c_int = 45; -pub const PR_GET_FP_MODE: c_int = 46; -pub const PR_FP_MODE_FR: c_int = 1 << 0; -pub const PR_FP_MODE_FRE: c_int = 1 << 1; - -pub const PR_CAP_AMBIENT: c_int = 47; -pub const PR_CAP_AMBIENT_IS_SET: c_int = 1; -pub const PR_CAP_AMBIENT_RAISE: c_int = 2; -pub const PR_CAP_AMBIENT_LOWER: c_int = 3; -pub const PR_CAP_AMBIENT_CLEAR_ALL: c_int = 4; - -pub const PR_SET_VMA: c_int = 0x53564d41; -pub const PR_SET_VMA_ANON_NAME: c_int = 0; - -pub const PR_SCHED_CORE: c_int = 62; -pub const PR_SCHED_CORE_GET: c_int = 0; -pub const PR_SCHED_CORE_CREATE: c_int = 1; -pub const PR_SCHED_CORE_SHARE_TO: c_int = 2; -pub const PR_SCHED_CORE_SHARE_FROM: c_int = 3; -pub const PR_SCHED_CORE_MAX: c_int = 4; -pub const PR_SCHED_CORE_SCOPE_THREAD: c_int = 0; -pub const PR_SCHED_CORE_SCOPE_THREAD_GROUP: c_int = 1; -pub const PR_SCHED_CORE_SCOPE_PROCESS_GROUP: c_int = 2; - -pub const GRND_NONBLOCK: c_uint = 0x0001; -pub const GRND_RANDOM: c_uint = 0x0002; -pub const GRND_INSECURE: c_uint = 0x0004; - -// -pub const SECCOMP_MODE_DISABLED: c_uint = 0; -pub const SECCOMP_MODE_STRICT: c_uint = 1; -pub const SECCOMP_MODE_FILTER: c_uint = 2; - -pub const SECCOMP_SET_MODE_STRICT: c_uint = 0; -pub const SECCOMP_SET_MODE_FILTER: c_uint = 1; -pub const SECCOMP_GET_ACTION_AVAIL: c_uint = 2; -pub const SECCOMP_GET_NOTIF_SIZES: c_uint = 3; - -pub const SECCOMP_FILTER_FLAG_TSYNC: c_ulong = 1 << 0; -pub const SECCOMP_FILTER_FLAG_LOG: c_ulong = 1 << 1; -pub const SECCOMP_FILTER_FLAG_SPEC_ALLOW: c_ulong = 1 << 2; -pub const SECCOMP_FILTER_FLAG_NEW_LISTENER: c_ulong = 1 << 3; -pub const SECCOMP_FILTER_FLAG_TSYNC_ESRCH: c_ulong = 1 << 4; -pub const SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV: c_ulong = 1 << 5; - -pub const SECCOMP_RET_KILL_PROCESS: c_uint = 0x80000000; -pub const SECCOMP_RET_KILL_THREAD: c_uint = 0x00000000; -pub const SECCOMP_RET_KILL: c_uint = SECCOMP_RET_KILL_THREAD; -pub const SECCOMP_RET_TRAP: c_uint = 0x00030000; -pub const SECCOMP_RET_ERRNO: c_uint = 0x00050000; -pub const SECCOMP_RET_USER_NOTIF: c_uint = 0x7fc00000; -pub const SECCOMP_RET_TRACE: c_uint = 0x7ff00000; -pub const SECCOMP_RET_LOG: c_uint = 0x7ffc0000; -pub const SECCOMP_RET_ALLOW: c_uint = 0x7fff0000; - -pub const SECCOMP_RET_ACTION_FULL: c_uint = 0xffff0000; -pub const SECCOMP_RET_ACTION: c_uint = 0x7fff0000; -pub const SECCOMP_RET_DATA: c_uint = 0x0000ffff; - -pub const SECCOMP_USER_NOTIF_FLAG_CONTINUE: c_ulong = 1; - -pub const SECCOMP_ADDFD_FLAG_SETFD: c_ulong = 1; -pub const SECCOMP_ADDFD_FLAG_SEND: c_ulong = 2; - -pub const ITIMER_REAL: c_int = 0; -pub const ITIMER_VIRTUAL: c_int = 1; -pub const ITIMER_PROF: c_int = 2; - -pub const TFD_CLOEXEC: c_int = O_CLOEXEC; -pub const TFD_NONBLOCK: c_int = O_NONBLOCK; -pub const TFD_TIMER_ABSTIME: c_int = 1; -pub const TFD_TIMER_CANCEL_ON_SET: c_int = 2; - -pub const _POSIX_VDISABLE: crate::cc_t = 0; - -pub const FALLOC_FL_KEEP_SIZE: c_int = 0x01; -pub const FALLOC_FL_PUNCH_HOLE: c_int = 0x02; -pub const FALLOC_FL_COLLAPSE_RANGE: c_int = 0x08; -pub const FALLOC_FL_ZERO_RANGE: c_int = 0x10; -pub const FALLOC_FL_INSERT_RANGE: c_int = 0x20; -pub const FALLOC_FL_UNSHARE_RANGE: c_int = 0x40; - -#[deprecated( - since = "0.2.55", - note = "ENOATTR is not available on Linux; use ENODATA instead" -)] -pub const ENOATTR: c_int = crate::ENODATA; - -pub const SO_ORIGINAL_DST: c_int = 80; - -pub const IP_RECVFRAGSIZE: c_int = 25; - -pub const IPV6_FLOWINFO: c_int = 11; -pub const IPV6_FLOWLABEL_MGR: c_int = 32; -pub const IPV6_FLOWINFO_SEND: c_int = 33; -pub const IPV6_RECVFRAGSIZE: c_int = 77; -pub const IPV6_FREEBIND: c_int = 78; -pub const IPV6_FLOWINFO_FLOWLABEL: c_int = 0x000fffff; -pub const IPV6_FLOWINFO_PRIORITY: c_int = 0x0ff00000; - -pub const IPV6_RTHDR_LOOSE: c_int = 0; -pub const IPV6_RTHDR_STRICT: c_int = 1; - -// SO_MEMINFO offsets -pub const SK_MEMINFO_RMEM_ALLOC: c_int = 0; -pub const SK_MEMINFO_RCVBUF: c_int = 1; -pub const SK_MEMINFO_WMEM_ALLOC: c_int = 2; -pub const SK_MEMINFO_SNDBUF: c_int = 3; -pub const SK_MEMINFO_FWD_ALLOC: c_int = 4; -pub const SK_MEMINFO_WMEM_QUEUED: c_int = 5; -pub const SK_MEMINFO_OPTMEM: c_int = 6; -pub const SK_MEMINFO_BACKLOG: c_int = 7; -pub const SK_MEMINFO_DROPS: c_int = 8; - -pub const IUTF8: crate::tcflag_t = 0x00004000; -#[cfg(not(all(target_env = "uclibc", target_arch = "mips")))] -pub const CMSPAR: crate::tcflag_t = 0o10000000000; - -pub const MFD_CLOEXEC: c_uint = 0x0001; -pub const MFD_ALLOW_SEALING: c_uint = 0x0002; -pub const MFD_HUGETLB: c_uint = 0x0004; -pub const MFD_NOEXEC_SEAL: c_uint = 0x0008; -pub const MFD_EXEC: c_uint = 0x0010; -pub const MFD_HUGE_64KB: c_uint = 0x40000000; -pub const MFD_HUGE_512KB: c_uint = 0x4c000000; -pub const MFD_HUGE_1MB: c_uint = 0x50000000; -pub const MFD_HUGE_2MB: c_uint = 0x54000000; -pub const MFD_HUGE_8MB: c_uint = 0x5c000000; -pub const MFD_HUGE_16MB: c_uint = 0x60000000; -pub const MFD_HUGE_32MB: c_uint = 0x64000000; -pub const MFD_HUGE_256MB: c_uint = 0x70000000; -pub const MFD_HUGE_512MB: c_uint = 0x74000000; -pub const MFD_HUGE_1GB: c_uint = 0x78000000; -pub const MFD_HUGE_2GB: c_uint = 0x7c000000; -pub const MFD_HUGE_16GB: c_uint = 0x88000000; -pub const MFD_HUGE_MASK: c_uint = 63; -pub const MFD_HUGE_SHIFT: c_uint = 26; - -// linux/close_range.h -pub const CLOSE_RANGE_UNSHARE: c_uint = 1 << 1; -pub const CLOSE_RANGE_CLOEXEC: c_uint = 1 << 2; - -// linux/filter.h -pub const SKF_AD_OFF: c_int = -0x1000; -pub const SKF_AD_PROTOCOL: c_int = 0; -pub const SKF_AD_PKTTYPE: c_int = 4; -pub const SKF_AD_IFINDEX: c_int = 8; -pub const SKF_AD_NLATTR: c_int = 12; -pub const SKF_AD_NLATTR_NEST: c_int = 16; -pub const SKF_AD_MARK: c_int = 20; -pub const SKF_AD_QUEUE: c_int = 24; -pub const SKF_AD_HATYPE: c_int = 28; -pub const SKF_AD_RXHASH: c_int = 32; -pub const SKF_AD_CPU: c_int = 36; -pub const SKF_AD_ALU_XOR_X: c_int = 40; -pub const SKF_AD_VLAN_TAG: c_int = 44; -pub const SKF_AD_VLAN_TAG_PRESENT: c_int = 48; -pub const SKF_AD_PAY_OFFSET: c_int = 52; -pub const SKF_AD_RANDOM: c_int = 56; -pub const SKF_AD_VLAN_TPID: c_int = 60; -pub const SKF_AD_MAX: c_int = 64; -pub const SKF_NET_OFF: c_int = -0x100000; -pub const SKF_LL_OFF: c_int = -0x200000; -pub const BPF_NET_OFF: c_int = SKF_NET_OFF; -pub const BPF_LL_OFF: c_int = SKF_LL_OFF; -pub const BPF_MEMWORDS: c_int = 16; -pub const BPF_MAXINSNS: c_int = 4096; - -// linux/bpf_common.h -pub const BPF_LD: __u32 = 0x00; -pub const BPF_LDX: __u32 = 0x01; -pub const BPF_ST: __u32 = 0x02; -pub const BPF_STX: __u32 = 0x03; -pub const BPF_ALU: __u32 = 0x04; -pub const BPF_JMP: __u32 = 0x05; -pub const BPF_RET: __u32 = 0x06; -pub const BPF_MISC: __u32 = 0x07; -pub const BPF_W: __u32 = 0x00; -pub const BPF_H: __u32 = 0x08; -pub const BPF_B: __u32 = 0x10; -pub const BPF_IMM: __u32 = 0x00; -pub const BPF_ABS: __u32 = 0x20; -pub const BPF_IND: __u32 = 0x40; -pub const BPF_MEM: __u32 = 0x60; -pub const BPF_LEN: __u32 = 0x80; -pub const BPF_MSH: __u32 = 0xa0; -pub const BPF_ADD: __u32 = 0x00; -pub const BPF_SUB: __u32 = 0x10; -pub const BPF_MUL: __u32 = 0x20; -pub const BPF_DIV: __u32 = 0x30; -pub const BPF_OR: __u32 = 0x40; -pub const BPF_AND: __u32 = 0x50; -pub const BPF_LSH: __u32 = 0x60; -pub const BPF_RSH: __u32 = 0x70; -pub const BPF_NEG: __u32 = 0x80; -pub const BPF_MOD: __u32 = 0x90; -pub const BPF_XOR: __u32 = 0xa0; -pub const BPF_JA: __u32 = 0x00; -pub const BPF_JEQ: __u32 = 0x10; -pub const BPF_JGT: __u32 = 0x20; -pub const BPF_JGE: __u32 = 0x30; -pub const BPF_JSET: __u32 = 0x40; -pub const BPF_K: __u32 = 0x00; -pub const BPF_X: __u32 = 0x08; - -// linux/filter.h - -pub const BPF_A: __u32 = 0x10; -pub const BPF_TAX: __u32 = 0x00; -pub const BPF_TXA: __u32 = 0x80; - -// linux/openat2.h -pub const RESOLVE_NO_XDEV: crate::__u64 = 0x01; -pub const RESOLVE_NO_MAGICLINKS: crate::__u64 = 0x02; -pub const RESOLVE_NO_SYMLINKS: crate::__u64 = 0x04; -pub const RESOLVE_BENEATH: crate::__u64 = 0x08; -pub const RESOLVE_IN_ROOT: crate::__u64 = 0x10; -pub const RESOLVE_CACHED: crate::__u64 = 0x20; - -// linux/if_ether.h -pub const ETH_ALEN: c_int = 6; -pub const ETH_HLEN: c_int = 14; -pub const ETH_ZLEN: c_int = 60; -pub const ETH_DATA_LEN: c_int = 1500; -pub const ETH_FRAME_LEN: c_int = 1514; -pub const ETH_FCS_LEN: c_int = 4; - -// These are the defined Ethernet Protocol ID's. -pub const ETH_P_LOOP: c_int = 0x0060; -pub const ETH_P_PUP: c_int = 0x0200; -pub const ETH_P_PUPAT: c_int = 0x0201; -pub const ETH_P_IP: c_int = 0x0800; -pub const ETH_P_X25: c_int = 0x0805; -pub const ETH_P_ARP: c_int = 0x0806; -pub const ETH_P_BPQ: c_int = 0x08FF; -pub const ETH_P_IEEEPUP: c_int = 0x0a00; -pub const ETH_P_IEEEPUPAT: c_int = 0x0a01; -pub const ETH_P_BATMAN: c_int = 0x4305; -pub const ETH_P_DEC: c_int = 0x6000; -pub const ETH_P_DNA_DL: c_int = 0x6001; -pub const ETH_P_DNA_RC: c_int = 0x6002; -pub const ETH_P_DNA_RT: c_int = 0x6003; -pub const ETH_P_LAT: c_int = 0x6004; -pub const ETH_P_DIAG: c_int = 0x6005; -pub const ETH_P_CUST: c_int = 0x6006; -pub const ETH_P_SCA: c_int = 0x6007; -pub const ETH_P_TEB: c_int = 0x6558; -pub const ETH_P_RARP: c_int = 0x8035; -pub const ETH_P_ATALK: c_int = 0x809B; -pub const ETH_P_AARP: c_int = 0x80F3; -pub const ETH_P_8021Q: c_int = 0x8100; -pub const ETH_P_IPX: c_int = 0x8137; -pub const ETH_P_IPV6: c_int = 0x86DD; -pub const ETH_P_PAUSE: c_int = 0x8808; -pub const ETH_P_SLOW: c_int = 0x8809; -pub const ETH_P_WCCP: c_int = 0x883E; -pub const ETH_P_MPLS_UC: c_int = 0x8847; -pub const ETH_P_MPLS_MC: c_int = 0x8848; -pub const ETH_P_ATMMPOA: c_int = 0x884c; -pub const ETH_P_PPP_DISC: c_int = 0x8863; -pub const ETH_P_PPP_SES: c_int = 0x8864; -pub const ETH_P_LINK_CTL: c_int = 0x886c; -pub const ETH_P_ATMFATE: c_int = 0x8884; -pub const ETH_P_PAE: c_int = 0x888E; -pub const ETH_P_AOE: c_int = 0x88A2; -pub const ETH_P_8021AD: c_int = 0x88A8; -pub const ETH_P_802_EX1: c_int = 0x88B5; -pub const ETH_P_TIPC: c_int = 0x88CA; -pub const ETH_P_MACSEC: c_int = 0x88E5; -pub const ETH_P_8021AH: c_int = 0x88E7; -pub const ETH_P_MVRP: c_int = 0x88F5; -pub const ETH_P_1588: c_int = 0x88F7; -pub const ETH_P_PRP: c_int = 0x88FB; -pub const ETH_P_FCOE: c_int = 0x8906; -pub const ETH_P_TDLS: c_int = 0x890D; -pub const ETH_P_FIP: c_int = 0x8914; -pub const ETH_P_80221: c_int = 0x8917; -pub const ETH_P_LOOPBACK: c_int = 0x9000; -pub const ETH_P_QINQ1: c_int = 0x9100; -pub const ETH_P_QINQ2: c_int = 0x9200; -pub const ETH_P_QINQ3: c_int = 0x9300; -pub const ETH_P_EDSA: c_int = 0xDADA; -pub const ETH_P_AF_IUCV: c_int = 0xFBFB; - -pub const ETH_P_802_3_MIN: c_int = 0x0600; - -// Non DIX types. Won't clash for 1500 types. -pub const ETH_P_802_3: c_int = 0x0001; -pub const ETH_P_AX25: c_int = 0x0002; -pub const ETH_P_ALL: c_int = 0x0003; -pub const ETH_P_802_2: c_int = 0x0004; -pub const ETH_P_SNAP: c_int = 0x0005; -pub const ETH_P_DDCMP: c_int = 0x0006; -pub const ETH_P_WAN_PPP: c_int = 0x0007; -pub const ETH_P_PPP_MP: c_int = 0x0008; -pub const ETH_P_LOCALTALK: c_int = 0x0009; -pub const ETH_P_CANFD: c_int = 0x000D; -pub const ETH_P_PPPTALK: c_int = 0x0010; -pub const ETH_P_TR_802_2: c_int = 0x0011; -pub const ETH_P_MOBITEX: c_int = 0x0015; -pub const ETH_P_CONTROL: c_int = 0x0016; -pub const ETH_P_IRDA: c_int = 0x0017; -pub const ETH_P_ECONET: c_int = 0x0018; -pub const ETH_P_HDLC: c_int = 0x0019; -pub const ETH_P_ARCNET: c_int = 0x001A; -pub const ETH_P_DSA: c_int = 0x001B; -pub const ETH_P_TRAILER: c_int = 0x001C; -pub const ETH_P_PHONET: c_int = 0x00F5; -pub const ETH_P_IEEE802154: c_int = 0x00F6; -pub const ETH_P_CAIF: c_int = 0x00F7; - -// DIFF(main): changed to `c_short` in f62eb023ab -pub const POSIX_SPAWN_RESETIDS: c_int = 0x01; -pub const POSIX_SPAWN_SETPGROUP: c_int = 0x02; -pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x04; -pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x08; -pub const POSIX_SPAWN_SETSCHEDPARAM: c_int = 0x10; -pub const POSIX_SPAWN_SETSCHEDULER: c_int = 0x20; - -pub const NLMSG_NOOP: c_int = 0x1; -pub const NLMSG_ERROR: c_int = 0x2; -pub const NLMSG_DONE: c_int = 0x3; -pub const NLMSG_OVERRUN: c_int = 0x4; -pub const NLMSG_MIN_TYPE: c_int = 0x10; - -// linux/netfilter/nfnetlink.h -pub const NFNLGRP_NONE: c_int = 0; -pub const NFNLGRP_CONNTRACK_NEW: c_int = 1; -pub const NFNLGRP_CONNTRACK_UPDATE: c_int = 2; -pub const NFNLGRP_CONNTRACK_DESTROY: c_int = 3; -pub const NFNLGRP_CONNTRACK_EXP_NEW: c_int = 4; -pub const NFNLGRP_CONNTRACK_EXP_UPDATE: c_int = 5; -pub const NFNLGRP_CONNTRACK_EXP_DESTROY: c_int = 6; -pub const NFNLGRP_NFTABLES: c_int = 7; -pub const NFNLGRP_ACCT_QUOTA: c_int = 8; -pub const NFNLGRP_NFTRACE: c_int = 9; - -pub const NFNETLINK_V0: c_int = 0; - -pub const NFNL_SUBSYS_NONE: c_int = 0; -pub const NFNL_SUBSYS_CTNETLINK: c_int = 1; -pub const NFNL_SUBSYS_CTNETLINK_EXP: c_int = 2; -pub const NFNL_SUBSYS_QUEUE: c_int = 3; -pub const NFNL_SUBSYS_ULOG: c_int = 4; -pub const NFNL_SUBSYS_OSF: c_int = 5; -pub const NFNL_SUBSYS_IPSET: c_int = 6; -pub const NFNL_SUBSYS_ACCT: c_int = 7; -pub const NFNL_SUBSYS_CTNETLINK_TIMEOUT: c_int = 8; -pub const NFNL_SUBSYS_CTHELPER: c_int = 9; -pub const NFNL_SUBSYS_NFTABLES: c_int = 10; -pub const NFNL_SUBSYS_NFT_COMPAT: c_int = 11; -pub const NFNL_SUBSYS_HOOK: c_int = 12; -pub const NFNL_SUBSYS_COUNT: c_int = 13; - -pub const NFNL_MSG_BATCH_BEGIN: c_int = NLMSG_MIN_TYPE; -pub const NFNL_MSG_BATCH_END: c_int = NLMSG_MIN_TYPE + 1; - -pub const NFNL_BATCH_UNSPEC: c_int = 0; -pub const NFNL_BATCH_GENID: c_int = 1; - -// linux/netfilter/nfnetlink_log.h -pub const NFULNL_MSG_PACKET: c_int = 0; -pub const NFULNL_MSG_CONFIG: c_int = 1; - -pub const NFULA_VLAN_UNSPEC: c_int = 0; -pub const NFULA_VLAN_PROTO: c_int = 1; -pub const NFULA_VLAN_TCI: c_int = 2; - -pub const NFULA_UNSPEC: c_int = 0; -pub const NFULA_PACKET_HDR: c_int = 1; -pub const NFULA_MARK: c_int = 2; -pub const NFULA_TIMESTAMP: c_int = 3; -pub const NFULA_IFINDEX_INDEV: c_int = 4; -pub const NFULA_IFINDEX_OUTDEV: c_int = 5; -pub const NFULA_IFINDEX_PHYSINDEV: c_int = 6; -pub const NFULA_IFINDEX_PHYSOUTDEV: c_int = 7; -pub const NFULA_HWADDR: c_int = 8; -pub const NFULA_PAYLOAD: c_int = 9; -pub const NFULA_PREFIX: c_int = 10; -pub const NFULA_UID: c_int = 11; -pub const NFULA_SEQ: c_int = 12; -pub const NFULA_SEQ_GLOBAL: c_int = 13; -pub const NFULA_GID: c_int = 14; -pub const NFULA_HWTYPE: c_int = 15; -pub const NFULA_HWHEADER: c_int = 16; -pub const NFULA_HWLEN: c_int = 17; -pub const NFULA_CT: c_int = 18; -pub const NFULA_CT_INFO: c_int = 19; -pub const NFULA_VLAN: c_int = 20; -pub const NFULA_L2HDR: c_int = 21; - -pub const NFULNL_CFG_CMD_NONE: c_int = 0; -pub const NFULNL_CFG_CMD_BIND: c_int = 1; -pub const NFULNL_CFG_CMD_UNBIND: c_int = 2; -pub const NFULNL_CFG_CMD_PF_BIND: c_int = 3; -pub const NFULNL_CFG_CMD_PF_UNBIND: c_int = 4; - -pub const NFULA_CFG_UNSPEC: c_int = 0; -pub const NFULA_CFG_CMD: c_int = 1; -pub const NFULA_CFG_MODE: c_int = 2; -pub const NFULA_CFG_NLBUFSIZ: c_int = 3; -pub const NFULA_CFG_TIMEOUT: c_int = 4; -pub const NFULA_CFG_QTHRESH: c_int = 5; -pub const NFULA_CFG_FLAGS: c_int = 6; - -pub const NFULNL_COPY_NONE: c_int = 0x00; -pub const NFULNL_COPY_META: c_int = 0x01; -pub const NFULNL_COPY_PACKET: c_int = 0x02; - -pub const NFULNL_CFG_F_SEQ: c_int = 0x0001; -pub const NFULNL_CFG_F_SEQ_GLOBAL: c_int = 0x0002; -pub const NFULNL_CFG_F_CONNTRACK: c_int = 0x0004; - -// linux/netfilter/nfnetlink_queue.h -pub const NFQNL_MSG_PACKET: c_int = 0; -pub const NFQNL_MSG_VERDICT: c_int = 1; -pub const NFQNL_MSG_CONFIG: c_int = 2; -pub const NFQNL_MSG_VERDICT_BATCH: c_int = 3; - -pub const NFQA_UNSPEC: c_int = 0; -pub const NFQA_PACKET_HDR: c_int = 1; -pub const NFQA_VERDICT_HDR: c_int = 2; -pub const NFQA_MARK: c_int = 3; -pub const NFQA_TIMESTAMP: c_int = 4; -pub const NFQA_IFINDEX_INDEV: c_int = 5; -pub const NFQA_IFINDEX_OUTDEV: c_int = 6; -pub const NFQA_IFINDEX_PHYSINDEV: c_int = 7; -pub const NFQA_IFINDEX_PHYSOUTDEV: c_int = 8; -pub const NFQA_HWADDR: c_int = 9; -pub const NFQA_PAYLOAD: c_int = 10; -pub const NFQA_CT: c_int = 11; -pub const NFQA_CT_INFO: c_int = 12; -pub const NFQA_CAP_LEN: c_int = 13; -pub const NFQA_SKB_INFO: c_int = 14; -pub const NFQA_EXP: c_int = 15; -pub const NFQA_UID: c_int = 16; -pub const NFQA_GID: c_int = 17; -pub const NFQA_SECCTX: c_int = 18; -pub const NFQA_VLAN: c_int = 19; -pub const NFQA_L2HDR: c_int = 20; -pub const NFQA_PRIORITY: c_int = 21; - -pub const NFQA_VLAN_UNSPEC: c_int = 0; -pub const NFQA_VLAN_PROTO: c_int = 1; -pub const NFQA_VLAN_TCI: c_int = 2; - -pub const NFQNL_CFG_CMD_NONE: c_int = 0; -pub const NFQNL_CFG_CMD_BIND: c_int = 1; -pub const NFQNL_CFG_CMD_UNBIND: c_int = 2; -pub const NFQNL_CFG_CMD_PF_BIND: c_int = 3; -pub const NFQNL_CFG_CMD_PF_UNBIND: c_int = 4; - -pub const NFQNL_COPY_NONE: c_int = 0; -pub const NFQNL_COPY_META: c_int = 1; -pub const NFQNL_COPY_PACKET: c_int = 2; - -pub const NFQA_CFG_UNSPEC: c_int = 0; -pub const NFQA_CFG_CMD: c_int = 1; -pub const NFQA_CFG_PARAMS: c_int = 2; -pub const NFQA_CFG_QUEUE_MAXLEN: c_int = 3; -pub const NFQA_CFG_MASK: c_int = 4; -pub const NFQA_CFG_FLAGS: c_int = 5; - -pub const NFQA_CFG_F_FAIL_OPEN: c_int = 0x0001; -pub const NFQA_CFG_F_CONNTRACK: c_int = 0x0002; -pub const NFQA_CFG_F_GSO: c_int = 0x0004; -pub const NFQA_CFG_F_UID_GID: c_int = 0x0008; -pub const NFQA_CFG_F_SECCTX: c_int = 0x0010; -pub const NFQA_CFG_F_MAX: c_int = 0x0020; - -pub const NFQA_SKB_CSUMNOTREADY: c_int = 0x0001; -pub const NFQA_SKB_GSO: c_int = 0x0002; -pub const NFQA_SKB_CSUM_NOTVERIFIED: c_int = 0x0004; - -// linux/genetlink.h - -pub const GENL_NAMSIZ: c_int = 16; - -pub const GENL_MIN_ID: c_int = NLMSG_MIN_TYPE; -pub const GENL_MAX_ID: c_int = 1023; - -pub const GENL_ADMIN_PERM: c_int = 0x01; -pub const GENL_CMD_CAP_DO: c_int = 0x02; -pub const GENL_CMD_CAP_DUMP: c_int = 0x04; -pub const GENL_CMD_CAP_HASPOL: c_int = 0x08; - -pub const GENL_ID_CTRL: c_int = NLMSG_MIN_TYPE; - -pub const CTRL_CMD_UNSPEC: c_int = 0; -pub const CTRL_CMD_NEWFAMILY: c_int = 1; -pub const CTRL_CMD_DELFAMILY: c_int = 2; -pub const CTRL_CMD_GETFAMILY: c_int = 3; -pub const CTRL_CMD_NEWOPS: c_int = 4; -pub const CTRL_CMD_DELOPS: c_int = 5; -pub const CTRL_CMD_GETOPS: c_int = 6; -pub const CTRL_CMD_NEWMCAST_GRP: c_int = 7; -pub const CTRL_CMD_DELMCAST_GRP: c_int = 8; -pub const CTRL_CMD_GETMCAST_GRP: c_int = 9; - -pub const CTRL_ATTR_UNSPEC: c_int = 0; -pub const CTRL_ATTR_FAMILY_ID: c_int = 1; -pub const CTRL_ATTR_FAMILY_NAME: c_int = 2; -pub const CTRL_ATTR_VERSION: c_int = 3; -pub const CTRL_ATTR_HDRSIZE: c_int = 4; -pub const CTRL_ATTR_MAXATTR: c_int = 5; -pub const CTRL_ATTR_OPS: c_int = 6; -pub const CTRL_ATTR_MCAST_GROUPS: c_int = 7; - -pub const CTRL_ATTR_OP_UNSPEC: c_int = 0; -pub const CTRL_ATTR_OP_ID: c_int = 1; -pub const CTRL_ATTR_OP_FLAGS: c_int = 2; - -pub const CTRL_ATTR_MCAST_GRP_UNSPEC: c_int = 0; -pub const CTRL_ATTR_MCAST_GRP_NAME: c_int = 1; -pub const CTRL_ATTR_MCAST_GRP_ID: c_int = 2; - -// linux/if_packet.h -pub const PACKET_HOST: c_uchar = 0; -pub const PACKET_BROADCAST: c_uchar = 1; -pub const PACKET_MULTICAST: c_uchar = 2; -pub const PACKET_OTHERHOST: c_uchar = 3; -pub const PACKET_OUTGOING: c_uchar = 4; -pub const PACKET_LOOPBACK: c_uchar = 5; -pub const PACKET_USER: c_uchar = 6; -pub const PACKET_KERNEL: c_uchar = 7; - -pub const PACKET_ADD_MEMBERSHIP: c_int = 1; -pub const PACKET_DROP_MEMBERSHIP: c_int = 2; -pub const PACKET_RECV_OUTPUT: c_int = 3; -pub const PACKET_RX_RING: c_int = 5; -pub const PACKET_STATISTICS: c_int = 6; -pub const PACKET_COPY_THRESH: c_int = 7; -pub const PACKET_AUXDATA: c_int = 8; -pub const PACKET_ORIGDEV: c_int = 9; -pub const PACKET_VERSION: c_int = 10; -pub const PACKET_HDRLEN: c_int = 11; -pub const PACKET_RESERVE: c_int = 12; -pub const PACKET_TX_RING: c_int = 13; -pub const PACKET_LOSS: c_int = 14; -pub const PACKET_VNET_HDR: c_int = 15; -pub const PACKET_TX_TIMESTAMP: c_int = 16; -pub const PACKET_TIMESTAMP: c_int = 17; -pub const PACKET_FANOUT: c_int = 18; -pub const PACKET_TX_HAS_OFF: c_int = 19; -pub const PACKET_QDISC_BYPASS: c_int = 20; -pub const PACKET_ROLLOVER_STATS: c_int = 21; -pub const PACKET_FANOUT_DATA: c_int = 22; -pub const PACKET_IGNORE_OUTGOING: c_int = 23; -pub const PACKET_VNET_HDR_SZ: c_int = 24; - -pub const PACKET_FANOUT_HASH: c_uint = 0; -pub const PACKET_FANOUT_LB: c_uint = 1; -pub const PACKET_FANOUT_CPU: c_uint = 2; -pub const PACKET_FANOUT_ROLLOVER: c_uint = 3; -pub const PACKET_FANOUT_RND: c_uint = 4; -pub const PACKET_FANOUT_QM: c_uint = 5; -pub const PACKET_FANOUT_CBPF: c_uint = 6; -pub const PACKET_FANOUT_EBPF: c_uint = 7; -pub const PACKET_FANOUT_FLAG_ROLLOVER: c_uint = 0x1000; -pub const PACKET_FANOUT_FLAG_UNIQUEID: c_uint = 0x2000; -pub const PACKET_FANOUT_FLAG_IGNORE_OUTGOING: c_uint = 0x4000; -pub const PACKET_FANOUT_FLAG_DEFRAG: c_uint = 0x8000; - -pub const PACKET_MR_MULTICAST: c_int = 0; -pub const PACKET_MR_PROMISC: c_int = 1; -pub const PACKET_MR_ALLMULTI: c_int = 2; - -pub const TP_STATUS_KERNEL: __u32 = 0; -pub const TP_STATUS_USER: __u32 = 1 << 0; -pub const TP_STATUS_COPY: __u32 = 1 << 1; -pub const TP_STATUS_LOSING: __u32 = 1 << 2; -pub const TP_STATUS_CSUMNOTREADY: __u32 = 1 << 3; -pub const TP_STATUS_VLAN_VALID: __u32 = 1 << 4; -pub const TP_STATUS_BLK_TMO: __u32 = 1 << 5; -pub const TP_STATUS_VLAN_TPID_VALID: __u32 = 1 << 6; -pub const TP_STATUS_CSUM_VALID: __u32 = 1 << 7; - -pub const TP_STATUS_AVAILABLE: __u32 = 0; -pub const TP_STATUS_SEND_REQUEST: __u32 = 1 << 0; -pub const TP_STATUS_SENDING: __u32 = 1 << 1; -pub const TP_STATUS_WRONG_FORMAT: __u32 = 1 << 2; - -pub const TP_STATUS_TS_SOFTWARE: __u32 = 1 << 29; -pub const TP_STATUS_TS_SYS_HARDWARE: __u32 = 1 << 30; -pub const TP_STATUS_TS_RAW_HARDWARE: __u32 = 1 << 31; - -pub const TP_FT_REQ_FILL_RXHASH: __u32 = 1; - -pub const TPACKET_ALIGNMENT: usize = 16; - -pub const TPACKET_HDRLEN: usize = ((size_of::() + TPACKET_ALIGNMENT - 1) - & !(TPACKET_ALIGNMENT - 1)) - + size_of::(); -pub const TPACKET2_HDRLEN: usize = ((size_of::() + TPACKET_ALIGNMENT - 1) - & !(TPACKET_ALIGNMENT - 1)) - + size_of::(); -pub const TPACKET3_HDRLEN: usize = ((size_of::() + TPACKET_ALIGNMENT - 1) - & !(TPACKET_ALIGNMENT - 1)) - + size_of::(); - -// linux/netfilter.h -pub const NF_DROP: c_int = 0; -pub const NF_ACCEPT: c_int = 1; -pub const NF_STOLEN: c_int = 2; -pub const NF_QUEUE: c_int = 3; -pub const NF_REPEAT: c_int = 4; -pub const NF_STOP: c_int = 5; -pub const NF_MAX_VERDICT: c_int = NF_STOP; - -pub const NF_VERDICT_MASK: c_int = 0x000000ff; -pub const NF_VERDICT_FLAG_QUEUE_BYPASS: c_int = 0x00008000; - -pub const NF_VERDICT_QMASK: c_int = 0xffff0000; -pub const NF_VERDICT_QBITS: c_int = 16; - -pub const NF_VERDICT_BITS: c_int = 16; - -pub const NF_INET_PRE_ROUTING: c_int = 0; -pub const NF_INET_LOCAL_IN: c_int = 1; -pub const NF_INET_FORWARD: c_int = 2; -pub const NF_INET_LOCAL_OUT: c_int = 3; -pub const NF_INET_POST_ROUTING: c_int = 4; -pub const NF_INET_NUMHOOKS: c_int = 5; -pub const NF_INET_INGRESS: c_int = NF_INET_NUMHOOKS; - -pub const NF_NETDEV_INGRESS: c_int = 0; -pub const NF_NETDEV_EGRESS: c_int = 1; -pub const NF_NETDEV_NUMHOOKS: c_int = 2; - -// Some NFPROTO are not compatible with musl and are defined in submodules. -pub const NFPROTO_UNSPEC: c_int = 0; -pub const NFPROTO_INET: c_int = 1; -pub const NFPROTO_IPV4: c_int = 2; -pub const NFPROTO_ARP: c_int = 3; -pub const NFPROTO_NETDEV: c_int = 5; -pub const NFPROTO_BRIDGE: c_int = 7; -pub const NFPROTO_IPV6: c_int = 10; -pub const NFPROTO_DECNET: c_int = 12; -pub const NFPROTO_NUMPROTO: c_int = 13; - -// linux/netfilter_arp.h -pub const NF_ARP: c_int = 0; -pub const NF_ARP_IN: c_int = 0; -pub const NF_ARP_OUT: c_int = 1; -pub const NF_ARP_FORWARD: c_int = 2; -pub const NF_ARP_NUMHOOKS: c_int = 3; - -// linux/netfilter_bridge.h -pub const NF_BR_PRE_ROUTING: c_int = 0; -pub const NF_BR_LOCAL_IN: c_int = 1; -pub const NF_BR_FORWARD: c_int = 2; -pub const NF_BR_LOCAL_OUT: c_int = 3; -pub const NF_BR_POST_ROUTING: c_int = 4; -pub const NF_BR_BROUTING: c_int = 5; -pub const NF_BR_NUMHOOKS: c_int = 6; - -pub const NF_BR_PRI_FIRST: c_int = crate::INT_MIN; -pub const NF_BR_PRI_NAT_DST_BRIDGED: c_int = -300; -pub const NF_BR_PRI_FILTER_BRIDGED: c_int = -200; -pub const NF_BR_PRI_BRNF: c_int = 0; -pub const NF_BR_PRI_NAT_DST_OTHER: c_int = 100; -pub const NF_BR_PRI_FILTER_OTHER: c_int = 200; -pub const NF_BR_PRI_NAT_SRC: c_int = 300; -pub const NF_BR_PRI_LAST: c_int = crate::INT_MAX; - -// linux/netfilter_ipv4.h -pub const NF_IP_PRE_ROUTING: c_int = 0; -pub const NF_IP_LOCAL_IN: c_int = 1; -pub const NF_IP_FORWARD: c_int = 2; -pub const NF_IP_LOCAL_OUT: c_int = 3; -pub const NF_IP_POST_ROUTING: c_int = 4; -pub const NF_IP_NUMHOOKS: c_int = 5; - -pub const NF_IP_PRI_FIRST: c_int = crate::INT_MIN; -pub const NF_IP_PRI_RAW_BEFORE_DEFRAG: c_int = -450; -pub const NF_IP_PRI_CONNTRACK_DEFRAG: c_int = -400; -pub const NF_IP_PRI_RAW: c_int = -300; -pub const NF_IP_PRI_SELINUX_FIRST: c_int = -225; -pub const NF_IP_PRI_CONNTRACK: c_int = -200; -pub const NF_IP_PRI_MANGLE: c_int = -150; -pub const NF_IP_PRI_NAT_DST: c_int = -100; -pub const NF_IP_PRI_FILTER: c_int = 0; -pub const NF_IP_PRI_SECURITY: c_int = 50; -pub const NF_IP_PRI_NAT_SRC: c_int = 100; -pub const NF_IP_PRI_SELINUX_LAST: c_int = 225; -pub const NF_IP_PRI_CONNTRACK_HELPER: c_int = 300; -pub const NF_IP_PRI_CONNTRACK_CONFIRM: c_int = crate::INT_MAX; -pub const NF_IP_PRI_LAST: c_int = crate::INT_MAX; - -// linux/netfilter_ipv6.h -pub const NF_IP6_PRE_ROUTING: c_int = 0; -pub const NF_IP6_LOCAL_IN: c_int = 1; -pub const NF_IP6_FORWARD: c_int = 2; -pub const NF_IP6_LOCAL_OUT: c_int = 3; -pub const NF_IP6_POST_ROUTING: c_int = 4; -pub const NF_IP6_NUMHOOKS: c_int = 5; - -pub const NF_IP6_PRI_FIRST: c_int = crate::INT_MIN; -pub const NF_IP6_PRI_RAW_BEFORE_DEFRAG: c_int = -450; -pub const NF_IP6_PRI_CONNTRACK_DEFRAG: c_int = -400; -pub const NF_IP6_PRI_RAW: c_int = -300; -pub const NF_IP6_PRI_SELINUX_FIRST: c_int = -225; -pub const NF_IP6_PRI_CONNTRACK: c_int = -200; -pub const NF_IP6_PRI_MANGLE: c_int = -150; -pub const NF_IP6_PRI_NAT_DST: c_int = -100; -pub const NF_IP6_PRI_FILTER: c_int = 0; -pub const NF_IP6_PRI_SECURITY: c_int = 50; -pub const NF_IP6_PRI_NAT_SRC: c_int = 100; -pub const NF_IP6_PRI_SELINUX_LAST: c_int = 225; -pub const NF_IP6_PRI_CONNTRACK_HELPER: c_int = 300; -pub const NF_IP6_PRI_LAST: c_int = crate::INT_MAX; - -// linux/netfilter_ipv6/ip6_tables.h -pub const IP6T_SO_ORIGINAL_DST: c_int = 80; - -pub const SIOCADDRT: c_ulong = 0x0000890B; -pub const SIOCDELRT: c_ulong = 0x0000890C; -pub const SIOCGIFNAME: c_ulong = 0x00008910; -pub const SIOCSIFLINK: c_ulong = 0x00008911; -pub const SIOCGIFCONF: c_ulong = 0x00008912; -pub const SIOCGIFFLAGS: c_ulong = 0x00008913; -pub const SIOCSIFFLAGS: c_ulong = 0x00008914; -pub const SIOCGIFADDR: c_ulong = 0x00008915; -pub const SIOCSIFADDR: c_ulong = 0x00008916; -pub const SIOCGIFDSTADDR: c_ulong = 0x00008917; -pub const SIOCSIFDSTADDR: c_ulong = 0x00008918; -pub const SIOCGIFBRDADDR: c_ulong = 0x00008919; -pub const SIOCSIFBRDADDR: c_ulong = 0x0000891A; -pub const SIOCGIFNETMASK: c_ulong = 0x0000891B; -pub const SIOCSIFNETMASK: c_ulong = 0x0000891C; -pub const SIOCGIFMETRIC: c_ulong = 0x0000891D; -pub const SIOCSIFMETRIC: c_ulong = 0x0000891E; -pub const SIOCGIFMEM: c_ulong = 0x0000891F; -pub const SIOCSIFMEM: c_ulong = 0x00008920; -pub const SIOCGIFMTU: c_ulong = 0x00008921; -pub const SIOCSIFMTU: c_ulong = 0x00008922; -pub const SIOCSIFNAME: c_ulong = 0x00008923; -pub const SIOCSIFHWADDR: c_ulong = 0x00008924; -pub const SIOCGIFENCAP: c_ulong = 0x00008925; -pub const SIOCSIFENCAP: c_ulong = 0x00008926; -pub const SIOCGIFHWADDR: c_ulong = 0x00008927; -pub const SIOCGIFSLAVE: c_ulong = 0x00008929; -pub const SIOCSIFSLAVE: c_ulong = 0x00008930; -pub const SIOCADDMULTI: c_ulong = 0x00008931; -pub const SIOCDELMULTI: c_ulong = 0x00008932; -pub const SIOCGIFINDEX: c_ulong = 0x00008933; -pub const SIOGIFINDEX: c_ulong = SIOCGIFINDEX; -pub const SIOCSIFPFLAGS: c_ulong = 0x00008934; -pub const SIOCGIFPFLAGS: c_ulong = 0x00008935; -pub const SIOCDIFADDR: c_ulong = 0x00008936; -pub const SIOCSIFHWBROADCAST: c_ulong = 0x00008937; -pub const SIOCGIFCOUNT: c_ulong = 0x00008938; -pub const SIOCGIFBR: c_ulong = 0x00008940; -pub const SIOCSIFBR: c_ulong = 0x00008941; -pub const SIOCGIFTXQLEN: c_ulong = 0x00008942; -pub const SIOCSIFTXQLEN: c_ulong = 0x00008943; -pub const SIOCETHTOOL: c_ulong = 0x00008946; -pub const SIOCGMIIPHY: c_ulong = 0x00008947; -pub const SIOCGMIIREG: c_ulong = 0x00008948; -pub const SIOCSMIIREG: c_ulong = 0x00008949; -pub const SIOCWANDEV: c_ulong = 0x0000894A; -pub const SIOCOUTQNSD: c_ulong = 0x0000894B; -pub const SIOCGSKNS: c_ulong = 0x0000894C; -pub const SIOCDARP: c_ulong = 0x00008953; -pub const SIOCGARP: c_ulong = 0x00008954; -pub const SIOCSARP: c_ulong = 0x00008955; -pub const SIOCDRARP: c_ulong = 0x00008960; -pub const SIOCGRARP: c_ulong = 0x00008961; -pub const SIOCSRARP: c_ulong = 0x00008962; -pub const SIOCGIFMAP: c_ulong = 0x00008970; -pub const SIOCSIFMAP: c_ulong = 0x00008971; -pub const SIOCSHWTSTAMP: c_ulong = 0x000089b0; -pub const SIOCGHWTSTAMP: c_ulong = 0x000089b1; - -// wireless.h -pub const WIRELESS_EXT: c_ulong = 0x16; - -pub const SIOCSIWCOMMIT: c_ulong = 0x8B00; -pub const SIOCGIWNAME: c_ulong = 0x8B01; - -pub const SIOCSIWNWID: c_ulong = 0x8B02; -pub const SIOCGIWNWID: c_ulong = 0x8B03; -pub const SIOCSIWFREQ: c_ulong = 0x8B04; -pub const SIOCGIWFREQ: c_ulong = 0x8B05; -pub const SIOCSIWMODE: c_ulong = 0x8B06; -pub const SIOCGIWMODE: c_ulong = 0x8B07; -pub const SIOCSIWSENS: c_ulong = 0x8B08; -pub const SIOCGIWSENS: c_ulong = 0x8B09; - -pub const SIOCSIWRANGE: c_ulong = 0x8B0A; -pub const SIOCGIWRANGE: c_ulong = 0x8B0B; -pub const SIOCSIWPRIV: c_ulong = 0x8B0C; -pub const SIOCGIWPRIV: c_ulong = 0x8B0D; -pub const SIOCSIWSTATS: c_ulong = 0x8B0E; -pub const SIOCGIWSTATS: c_ulong = 0x8B0F; - -pub const SIOCSIWSPY: c_ulong = 0x8B10; -pub const SIOCGIWSPY: c_ulong = 0x8B11; -pub const SIOCSIWTHRSPY: c_ulong = 0x8B12; -pub const SIOCGIWTHRSPY: c_ulong = 0x8B13; - -pub const SIOCSIWAP: c_ulong = 0x8B14; -pub const SIOCGIWAP: c_ulong = 0x8B15; -pub const SIOCGIWAPLIST: c_ulong = 0x8B17; -pub const SIOCSIWSCAN: c_ulong = 0x8B18; -pub const SIOCGIWSCAN: c_ulong = 0x8B19; - -pub const SIOCSIWESSID: c_ulong = 0x8B1A; -pub const SIOCGIWESSID: c_ulong = 0x8B1B; -pub const SIOCSIWNICKN: c_ulong = 0x8B1C; -pub const SIOCGIWNICKN: c_ulong = 0x8B1D; - -pub const SIOCSIWRATE: c_ulong = 0x8B20; -pub const SIOCGIWRATE: c_ulong = 0x8B21; -pub const SIOCSIWRTS: c_ulong = 0x8B22; -pub const SIOCGIWRTS: c_ulong = 0x8B23; -pub const SIOCSIWFRAG: c_ulong = 0x8B24; -pub const SIOCGIWFRAG: c_ulong = 0x8B25; -pub const SIOCSIWTXPOW: c_ulong = 0x8B26; -pub const SIOCGIWTXPOW: c_ulong = 0x8B27; -pub const SIOCSIWRETRY: c_ulong = 0x8B28; -pub const SIOCGIWRETRY: c_ulong = 0x8B29; - -pub const SIOCSIWENCODE: c_ulong = 0x8B2A; -pub const SIOCGIWENCODE: c_ulong = 0x8B2B; - -pub const SIOCSIWPOWER: c_ulong = 0x8B2C; -pub const SIOCGIWPOWER: c_ulong = 0x8B2D; - -pub const SIOCSIWGENIE: c_ulong = 0x8B30; -pub const SIOCGIWGENIE: c_ulong = 0x8B31; - -pub const SIOCSIWMLME: c_ulong = 0x8B16; - -pub const SIOCSIWAUTH: c_ulong = 0x8B32; -pub const SIOCGIWAUTH: c_ulong = 0x8B33; - -pub const SIOCSIWENCODEEXT: c_ulong = 0x8B34; -pub const SIOCGIWENCODEEXT: c_ulong = 0x8B35; - -pub const SIOCSIWPMKSA: c_ulong = 0x8B36; - -pub const SIOCIWFIRSTPRIV: c_ulong = 0x8BE0; -pub const SIOCIWLASTPRIV: c_ulong = 0x8BFF; - -pub const SIOCIWFIRST: c_ulong = 0x8B00; -pub const SIOCIWLAST: c_ulong = SIOCIWLASTPRIV; - -pub const IWEVTXDROP: c_ulong = 0x8C00; -pub const IWEVQUAL: c_ulong = 0x8C01; -pub const IWEVCUSTOM: c_ulong = 0x8C02; -pub const IWEVREGISTERED: c_ulong = 0x8C03; -pub const IWEVEXPIRED: c_ulong = 0x8C04; -pub const IWEVGENIE: c_ulong = 0x8C05; -pub const IWEVMICHAELMICFAILURE: c_ulong = 0x8C06; -pub const IWEVASSOCREQIE: c_ulong = 0x8C07; -pub const IWEVASSOCRESPIE: c_ulong = 0x8C08; -pub const IWEVPMKIDCAND: c_ulong = 0x8C09; -pub const IWEVFIRST: c_ulong = 0x8C00; - -pub const IW_PRIV_TYPE_MASK: c_ulong = 0x7000; -pub const IW_PRIV_TYPE_NONE: c_ulong = 0x0000; -pub const IW_PRIV_TYPE_BYTE: c_ulong = 0x1000; -pub const IW_PRIV_TYPE_CHAR: c_ulong = 0x2000; -pub const IW_PRIV_TYPE_INT: c_ulong = 0x4000; -pub const IW_PRIV_TYPE_FLOAT: c_ulong = 0x5000; -pub const IW_PRIV_TYPE_ADDR: c_ulong = 0x6000; - -pub const IW_PRIV_SIZE_FIXED: c_ulong = 0x0800; - -pub const IW_PRIV_SIZE_MASK: c_ulong = 0x07FF; - -pub const IW_MAX_FREQUENCIES: usize = 32; -pub const IW_MAX_BITRATES: usize = 32; -pub const IW_MAX_TXPOWER: usize = 8; -pub const IW_MAX_SPY: usize = 8; -pub const IW_MAX_AP: usize = 64; -pub const IW_ESSID_MAX_SIZE: usize = 32; - -pub const IW_MODE_AUTO: usize = 0; -pub const IW_MODE_ADHOC: usize = 1; -pub const IW_MODE_INFRA: usize = 2; -pub const IW_MODE_MASTER: usize = 3; -pub const IW_MODE_REPEAT: usize = 4; -pub const IW_MODE_SECOND: usize = 5; -pub const IW_MODE_MONITOR: usize = 6; -pub const IW_MODE_MESH: usize = 7; - -pub const IW_QUAL_QUAL_UPDATED: c_ulong = 0x01; -pub const IW_QUAL_LEVEL_UPDATED: c_ulong = 0x02; -pub const IW_QUAL_NOISE_UPDATED: c_ulong = 0x04; -pub const IW_QUAL_ALL_UPDATED: c_ulong = 0x07; -pub const IW_QUAL_DBM: c_ulong = 0x08; -pub const IW_QUAL_QUAL_INVALID: c_ulong = 0x10; -pub const IW_QUAL_LEVEL_INVALID: c_ulong = 0x20; -pub const IW_QUAL_NOISE_INVALID: c_ulong = 0x40; -pub const IW_QUAL_RCPI: c_ulong = 0x80; -pub const IW_QUAL_ALL_INVALID: c_ulong = 0x70; - -pub const IW_FREQ_AUTO: c_ulong = 0x00; -pub const IW_FREQ_FIXED: c_ulong = 0x01; - -pub const IW_MAX_ENCODING_SIZES: usize = 8; -pub const IW_ENCODING_TOKEN_MAX: usize = 64; - -pub const IW_ENCODE_INDEX: c_ulong = 0x00FF; -pub const IW_ENCODE_FLAGS: c_ulong = 0xFF00; -pub const IW_ENCODE_MODE: c_ulong = 0xF000; -pub const IW_ENCODE_DISABLED: c_ulong = 0x8000; -pub const IW_ENCODE_ENABLED: c_ulong = 0x0000; -pub const IW_ENCODE_RESTRICTED: c_ulong = 0x4000; -pub const IW_ENCODE_OPEN: c_ulong = 0x2000; -pub const IW_ENCODE_NOKEY: c_ulong = 0x0800; -pub const IW_ENCODE_TEMP: c_ulong = 0x0400; - -pub const IW_POWER_ON: c_ulong = 0x0000; -pub const IW_POWER_TYPE: c_ulong = 0xF000; -pub const IW_POWER_PERIOD: c_ulong = 0x1000; -pub const IW_POWER_TIMEOUT: c_ulong = 0x2000; -pub const IW_POWER_MODE: c_ulong = 0x0F00; -pub const IW_POWER_UNICAST_R: c_ulong = 0x0100; -pub const IW_POWER_MULTICAST_R: c_ulong = 0x0200; -pub const IW_POWER_ALL_R: c_ulong = 0x0300; -pub const IW_POWER_FORCE_S: c_ulong = 0x0400; -pub const IW_POWER_REPEATER: c_ulong = 0x0800; -pub const IW_POWER_MODIFIER: c_ulong = 0x000F; -pub const IW_POWER_MIN: c_ulong = 0x0001; -pub const IW_POWER_MAX: c_ulong = 0x0002; -pub const IW_POWER_RELATIVE: c_ulong = 0x0004; - -pub const IW_TXPOW_TYPE: c_ulong = 0x00FF; -pub const IW_TXPOW_DBM: c_ulong = 0x0000; -pub const IW_TXPOW_MWATT: c_ulong = 0x0001; -pub const IW_TXPOW_RELATIVE: c_ulong = 0x0002; -pub const IW_TXPOW_RANGE: c_ulong = 0x1000; - -pub const IW_RETRY_ON: c_ulong = 0x0000; -pub const IW_RETRY_TYPE: c_ulong = 0xF000; -pub const IW_RETRY_LIMIT: c_ulong = 0x1000; -pub const IW_RETRY_LIFETIME: c_ulong = 0x2000; -pub const IW_RETRY_MODIFIER: c_ulong = 0x00FF; -pub const IW_RETRY_MIN: c_ulong = 0x0001; -pub const IW_RETRY_MAX: c_ulong = 0x0002; -pub const IW_RETRY_RELATIVE: c_ulong = 0x0004; -pub const IW_RETRY_SHORT: c_ulong = 0x0010; -pub const IW_RETRY_LONG: c_ulong = 0x0020; - -pub const IW_SCAN_DEFAULT: c_ulong = 0x0000; -pub const IW_SCAN_ALL_ESSID: c_ulong = 0x0001; -pub const IW_SCAN_THIS_ESSID: c_ulong = 0x0002; -pub const IW_SCAN_ALL_FREQ: c_ulong = 0x0004; -pub const IW_SCAN_THIS_FREQ: c_ulong = 0x0008; -pub const IW_SCAN_ALL_MODE: c_ulong = 0x0010; -pub const IW_SCAN_THIS_MODE: c_ulong = 0x0020; -pub const IW_SCAN_ALL_RATE: c_ulong = 0x0040; -pub const IW_SCAN_THIS_RATE: c_ulong = 0x0080; - -pub const IW_SCAN_TYPE_ACTIVE: usize = 0; -pub const IW_SCAN_TYPE_PASSIVE: usize = 1; - -pub const IW_SCAN_MAX_DATA: usize = 4096; - -pub const IW_SCAN_CAPA_NONE: c_ulong = 0x00; -pub const IW_SCAN_CAPA_ESSID: c_ulong = 0x01; -pub const IW_SCAN_CAPA_BSSID: c_ulong = 0x02; -pub const IW_SCAN_CAPA_CHANNEL: c_ulong = 0x04; -pub const IW_SCAN_CAPA_MODE: c_ulong = 0x08; -pub const IW_SCAN_CAPA_RATE: c_ulong = 0x10; -pub const IW_SCAN_CAPA_TYPE: c_ulong = 0x20; -pub const IW_SCAN_CAPA_TIME: c_ulong = 0x40; - -pub const IW_CUSTOM_MAX: c_ulong = 256; - -pub const IW_GENERIC_IE_MAX: c_ulong = 1024; - -pub const IW_MLME_DEAUTH: c_ulong = 0; -pub const IW_MLME_DISASSOC: c_ulong = 1; -pub const IW_MLME_AUTH: c_ulong = 2; -pub const IW_MLME_ASSOC: c_ulong = 3; - -pub const IW_AUTH_INDEX: c_ulong = 0x0FFF; -pub const IW_AUTH_FLAGS: c_ulong = 0xF000; - -pub const IW_AUTH_WPA_VERSION: usize = 0; -pub const IW_AUTH_CIPHER_PAIRWISE: usize = 1; -pub const IW_AUTH_CIPHER_GROUP: usize = 2; -pub const IW_AUTH_KEY_MGMT: usize = 3; -pub const IW_AUTH_TKIP_COUNTERMEASURES: usize = 4; -pub const IW_AUTH_DROP_UNENCRYPTED: usize = 5; -pub const IW_AUTH_80211_AUTH_ALG: usize = 6; -pub const IW_AUTH_WPA_ENABLED: usize = 7; -pub const IW_AUTH_RX_UNENCRYPTED_EAPOL: usize = 8; -pub const IW_AUTH_ROAMING_CONTROL: usize = 9; -pub const IW_AUTH_PRIVACY_INVOKED: usize = 10; -pub const IW_AUTH_CIPHER_GROUP_MGMT: usize = 11; -pub const IW_AUTH_MFP: usize = 12; - -pub const IW_AUTH_WPA_VERSION_DISABLED: c_ulong = 0x00000001; -pub const IW_AUTH_WPA_VERSION_WPA: c_ulong = 0x00000002; -pub const IW_AUTH_WPA_VERSION_WPA2: c_ulong = 0x00000004; - -pub const IW_AUTH_CIPHER_NONE: c_ulong = 0x00000001; -pub const IW_AUTH_CIPHER_WEP40: c_ulong = 0x00000002; -pub const IW_AUTH_CIPHER_TKIP: c_ulong = 0x00000004; -pub const IW_AUTH_CIPHER_CCMP: c_ulong = 0x00000008; -pub const IW_AUTH_CIPHER_WEP104: c_ulong = 0x00000010; -pub const IW_AUTH_CIPHER_AES_CMAC: c_ulong = 0x00000020; - -pub const IW_AUTH_KEY_MGMT_802_1X: usize = 1; -pub const IW_AUTH_KEY_MGMT_PSK: usize = 2; - -pub const IW_AUTH_ALG_OPEN_SYSTEM: c_ulong = 0x00000001; -pub const IW_AUTH_ALG_SHARED_KEY: c_ulong = 0x00000002; -pub const IW_AUTH_ALG_LEAP: c_ulong = 0x00000004; - -pub const IW_AUTH_ROAMING_ENABLE: usize = 0; -pub const IW_AUTH_ROAMING_DISABLE: usize = 1; - -pub const IW_AUTH_MFP_DISABLED: usize = 0; -pub const IW_AUTH_MFP_OPTIONAL: usize = 1; -pub const IW_AUTH_MFP_REQUIRED: usize = 2; - -pub const IW_ENCODE_SEQ_MAX_SIZE: usize = 8; - -pub const IW_ENCODE_ALG_NONE: usize = 0; -pub const IW_ENCODE_ALG_WEP: usize = 1; -pub const IW_ENCODE_ALG_TKIP: usize = 2; -pub const IW_ENCODE_ALG_CCMP: usize = 3; -pub const IW_ENCODE_ALG_PMK: usize = 4; -pub const IW_ENCODE_ALG_AES_CMAC: usize = 5; - -pub const IW_ENCODE_EXT_TX_SEQ_VALID: c_ulong = 0x00000001; -pub const IW_ENCODE_EXT_RX_SEQ_VALID: c_ulong = 0x00000002; -pub const IW_ENCODE_EXT_GROUP_KEY: c_ulong = 0x00000004; -pub const IW_ENCODE_EXT_SET_TX_KEY: c_ulong = 0x00000008; - -pub const IW_MICFAILURE_KEY_ID: c_ulong = 0x00000003; -pub const IW_MICFAILURE_GROUP: c_ulong = 0x00000004; -pub const IW_MICFAILURE_PAIRWISE: c_ulong = 0x00000008; -pub const IW_MICFAILURE_STAKEY: c_ulong = 0x00000010; -pub const IW_MICFAILURE_COUNT: c_ulong = 0x00000060; - -pub const IW_ENC_CAPA_WPA: c_ulong = 0x00000001; -pub const IW_ENC_CAPA_WPA2: c_ulong = 0x00000002; -pub const IW_ENC_CAPA_CIPHER_TKIP: c_ulong = 0x00000004; -pub const IW_ENC_CAPA_CIPHER_CCMP: c_ulong = 0x00000008; -pub const IW_ENC_CAPA_4WAY_HANDSHAKE: c_ulong = 0x00000010; - -pub const IW_EVENT_CAPA_K_0: c_ulong = 0x4000050; // IW_EVENT_CAPA_MASK(0x8B04) | IW_EVENT_CAPA_MASK(0x8B06) | IW_EVENT_CAPA_MASK(0x8B1A); -pub const IW_EVENT_CAPA_K_1: c_ulong = 0x400; // W_EVENT_CAPA_MASK(0x8B2A); - -pub const IW_PMKSA_ADD: usize = 1; -pub const IW_PMKSA_REMOVE: usize = 2; -pub const IW_PMKSA_FLUSH: usize = 3; - -pub const IW_PMKID_LEN: usize = 16; - -pub const IW_PMKID_CAND_PREAUTH: c_ulong = 0x00000001; - -pub const IW_EV_LCP_PK_LEN: usize = 4; - -pub const IW_EV_CHAR_PK_LEN: usize = 20; // IW_EV_LCP_PK_LEN + crate::IFNAMSIZ; -pub const IW_EV_UINT_PK_LEN: usize = 8; // IW_EV_LCP_PK_LEN + size_of::(); -pub const IW_EV_FREQ_PK_LEN: usize = 12; // IW_EV_LCP_PK_LEN + size_of::(); -pub const IW_EV_PARAM_PK_LEN: usize = 12; // IW_EV_LCP_PK_LEN + size_of::(); -pub const IW_EV_ADDR_PK_LEN: usize = 20; // IW_EV_LCP_PK_LEN + size_of::(); -pub const IW_EV_QUAL_PK_LEN: usize = 8; // IW_EV_LCP_PK_LEN + size_of::(); -pub const IW_EV_POINT_PK_LEN: usize = 8; // IW_EV_LCP_PK_LEN + 4; - -pub const IPTOS_TOS_MASK: u8 = 0x1E; -pub const IPTOS_PREC_MASK: u8 = 0xE0; - -pub const IPTOS_ECN_NOT_ECT: u8 = 0x00; - -pub const RTF_UP: c_ushort = 0x0001; -pub const RTF_GATEWAY: c_ushort = 0x0002; - -pub const RTF_HOST: c_ushort = 0x0004; -pub const RTF_REINSTATE: c_ushort = 0x0008; -pub const RTF_DYNAMIC: c_ushort = 0x0010; -pub const RTF_MODIFIED: c_ushort = 0x0020; -pub const RTF_MTU: c_ushort = 0x0040; -pub const RTF_MSS: c_ushort = RTF_MTU; -pub const RTF_WINDOW: c_ushort = 0x0080; -pub const RTF_IRTT: c_ushort = 0x0100; -pub const RTF_REJECT: c_ushort = 0x0200; -pub const RTF_STATIC: c_ushort = 0x0400; -pub const RTF_XRESOLVE: c_ushort = 0x0800; -pub const RTF_NOFORWARD: c_ushort = 0x1000; -pub const RTF_THROW: c_ushort = 0x2000; -pub const RTF_NOPMTUDISC: c_ushort = 0x4000; - -pub const RTF_DEFAULT: u32 = 0x00010000; -pub const RTF_ALLONLINK: u32 = 0x00020000; -pub const RTF_ADDRCONF: u32 = 0x00040000; -pub const RTF_LINKRT: u32 = 0x00100000; -pub const RTF_NONEXTHOP: u32 = 0x00200000; -pub const RTF_CACHE: u32 = 0x01000000; -pub const RTF_FLOW: u32 = 0x02000000; -pub const RTF_POLICY: u32 = 0x04000000; - -pub const RTCF_VALVE: u32 = 0x00200000; -pub const RTCF_MASQ: u32 = 0x00400000; -pub const RTCF_NAT: u32 = 0x00800000; -pub const RTCF_DOREDIRECT: u32 = 0x01000000; -pub const RTCF_LOG: u32 = 0x02000000; -pub const RTCF_DIRECTSRC: u32 = 0x04000000; - -pub const RTF_LOCAL: u32 = 0x80000000; -pub const RTF_INTERFACE: u32 = 0x40000000; -pub const RTF_MULTICAST: u32 = 0x20000000; -pub const RTF_BROADCAST: u32 = 0x10000000; -pub const RTF_NAT: u32 = 0x08000000; -pub const RTF_ADDRCLASSMASK: u32 = 0xF8000000; - -pub const RT_CLASS_UNSPEC: u8 = 0; -pub const RT_CLASS_DEFAULT: u8 = 253; -pub const RT_CLASS_MAIN: u8 = 254; -pub const RT_CLASS_LOCAL: u8 = 255; -pub const RT_CLASS_MAX: u8 = 255; - -// linux/neighbor.h -pub const NUD_NONE: u16 = 0x00; -pub const NUD_INCOMPLETE: u16 = 0x01; -pub const NUD_REACHABLE: u16 = 0x02; -pub const NUD_STALE: u16 = 0x04; -pub const NUD_DELAY: u16 = 0x08; -pub const NUD_PROBE: u16 = 0x10; -pub const NUD_FAILED: u16 = 0x20; -pub const NUD_NOARP: u16 = 0x40; -pub const NUD_PERMANENT: u16 = 0x80; - -pub const NTF_USE: u8 = 0x01; -pub const NTF_SELF: u8 = 0x02; -pub const NTF_MASTER: u8 = 0x04; -pub const NTF_PROXY: u8 = 0x08; -pub const NTF_ROUTER: u8 = 0x80; - -pub const NDA_UNSPEC: c_ushort = 0; -pub const NDA_DST: c_ushort = 1; -pub const NDA_LLADDR: c_ushort = 2; -pub const NDA_CACHEINFO: c_ushort = 3; -pub const NDA_PROBES: c_ushort = 4; -pub const NDA_VLAN: c_ushort = 5; -pub const NDA_PORT: c_ushort = 6; -pub const NDA_VNI: c_ushort = 7; -pub const NDA_IFINDEX: c_ushort = 8; - -// linux/netlink.h -pub const NLA_ALIGNTO: c_int = 4; - -pub const NETLINK_ROUTE: c_int = 0; -pub const NETLINK_UNUSED: c_int = 1; -pub const NETLINK_USERSOCK: c_int = 2; -pub const NETLINK_FIREWALL: c_int = 3; -pub const NETLINK_SOCK_DIAG: c_int = 4; -pub const NETLINK_NFLOG: c_int = 5; -pub const NETLINK_XFRM: c_int = 6; -pub const NETLINK_SELINUX: c_int = 7; -pub const NETLINK_ISCSI: c_int = 8; -pub const NETLINK_AUDIT: c_int = 9; -pub const NETLINK_FIB_LOOKUP: c_int = 10; -pub const NETLINK_CONNECTOR: c_int = 11; -pub const NETLINK_NETFILTER: c_int = 12; -pub const NETLINK_IP6_FW: c_int = 13; -pub const NETLINK_DNRTMSG: c_int = 14; -pub const NETLINK_KOBJECT_UEVENT: c_int = 15; -pub const NETLINK_GENERIC: c_int = 16; -pub const NETLINK_SCSITRANSPORT: c_int = 18; -pub const NETLINK_ECRYPTFS: c_int = 19; -pub const NETLINK_RDMA: c_int = 20; -pub const NETLINK_CRYPTO: c_int = 21; -pub const NETLINK_INET_DIAG: c_int = NETLINK_SOCK_DIAG; - -pub const NLM_F_REQUEST: c_int = 1; -pub const NLM_F_MULTI: c_int = 2; -pub const NLM_F_ACK: c_int = 4; -pub const NLM_F_ECHO: c_int = 8; -pub const NLM_F_DUMP_INTR: c_int = 16; -pub const NLM_F_DUMP_FILTERED: c_int = 32; - -pub const NLM_F_ROOT: c_int = 0x100; -pub const NLM_F_MATCH: c_int = 0x200; -pub const NLM_F_ATOMIC: c_int = 0x400; -pub const NLM_F_DUMP: c_int = NLM_F_ROOT | NLM_F_MATCH; - -pub const NLM_F_REPLACE: c_int = 0x100; -pub const NLM_F_EXCL: c_int = 0x200; -pub const NLM_F_CREATE: c_int = 0x400; -pub const NLM_F_APPEND: c_int = 0x800; - -pub const NLM_F_NONREC: c_int = 0x100; -pub const NLM_F_BULK: c_int = 0x200; - -pub const NLM_F_CAPPED: c_int = 0x100; -pub const NLM_F_ACK_TLVS: c_int = 0x200; - -pub const NETLINK_ADD_MEMBERSHIP: c_int = 1; -pub const NETLINK_DROP_MEMBERSHIP: c_int = 2; -pub const NETLINK_PKTINFO: c_int = 3; -pub const NETLINK_BROADCAST_ERROR: c_int = 4; -pub const NETLINK_NO_ENOBUFS: c_int = 5; -pub const NETLINK_RX_RING: c_int = 6; -pub const NETLINK_TX_RING: c_int = 7; -pub const NETLINK_LISTEN_ALL_NSID: c_int = 8; -pub const NETLINK_LIST_MEMBERSHIPS: c_int = 9; -pub const NETLINK_CAP_ACK: c_int = 10; -pub const NETLINK_EXT_ACK: c_int = 11; -pub const NETLINK_GET_STRICT_CHK: c_int = 12; - -pub const NLA_F_NESTED: c_int = 1 << 15; -pub const NLA_F_NET_BYTEORDER: c_int = 1 << 14; -pub const NLA_TYPE_MASK: c_int = !(NLA_F_NESTED | NLA_F_NET_BYTEORDER); - -// linux/rtnetlink.h -pub const TCA_UNSPEC: c_ushort = 0; -pub const TCA_KIND: c_ushort = 1; -pub const TCA_OPTIONS: c_ushort = 2; -pub const TCA_STATS: c_ushort = 3; -pub const TCA_XSTATS: c_ushort = 4; -pub const TCA_RATE: c_ushort = 5; -pub const TCA_FCNT: c_ushort = 6; -pub const TCA_STATS2: c_ushort = 7; -pub const TCA_STAB: c_ushort = 8; - -pub const RTM_NEWLINK: u16 = 16; -pub const RTM_DELLINK: u16 = 17; -pub const RTM_GETLINK: u16 = 18; -pub const RTM_SETLINK: u16 = 19; -pub const RTM_NEWADDR: u16 = 20; -pub const RTM_DELADDR: u16 = 21; -pub const RTM_GETADDR: u16 = 22; -pub const RTM_NEWROUTE: u16 = 24; -pub const RTM_DELROUTE: u16 = 25; -pub const RTM_GETROUTE: u16 = 26; -pub const RTM_NEWNEIGH: u16 = 28; -pub const RTM_DELNEIGH: u16 = 29; -pub const RTM_GETNEIGH: u16 = 30; -pub const RTM_NEWRULE: u16 = 32; -pub const RTM_DELRULE: u16 = 33; -pub const RTM_GETRULE: u16 = 34; -pub const RTM_NEWQDISC: u16 = 36; -pub const RTM_DELQDISC: u16 = 37; -pub const RTM_GETQDISC: u16 = 38; -pub const RTM_NEWTCLASS: u16 = 40; -pub const RTM_DELTCLASS: u16 = 41; -pub const RTM_GETTCLASS: u16 = 42; -pub const RTM_NEWTFILTER: u16 = 44; -pub const RTM_DELTFILTER: u16 = 45; -pub const RTM_GETTFILTER: u16 = 46; -pub const RTM_NEWACTION: u16 = 48; -pub const RTM_DELACTION: u16 = 49; -pub const RTM_GETACTION: u16 = 50; -pub const RTM_NEWPREFIX: u16 = 52; -pub const RTM_GETMULTICAST: u16 = 58; -pub const RTM_GETANYCAST: u16 = 62; -pub const RTM_NEWNEIGHTBL: u16 = 64; -pub const RTM_GETNEIGHTBL: u16 = 66; -pub const RTM_SETNEIGHTBL: u16 = 67; -pub const RTM_NEWNDUSEROPT: u16 = 68; -pub const RTM_NEWADDRLABEL: u16 = 72; -pub const RTM_DELADDRLABEL: u16 = 73; -pub const RTM_GETADDRLABEL: u16 = 74; -pub const RTM_GETDCB: u16 = 78; -pub const RTM_SETDCB: u16 = 79; -pub const RTM_NEWNETCONF: u16 = 80; -pub const RTM_GETNETCONF: u16 = 82; -pub const RTM_NEWMDB: u16 = 84; -pub const RTM_DELMDB: u16 = 85; -pub const RTM_GETMDB: u16 = 86; -pub const RTM_NEWNSID: u16 = 88; -pub const RTM_DELNSID: u16 = 89; -pub const RTM_GETNSID: u16 = 90; - -pub const RTM_F_NOTIFY: c_uint = 0x100; -pub const RTM_F_CLONED: c_uint = 0x200; -pub const RTM_F_EQUALIZE: c_uint = 0x400; -pub const RTM_F_PREFIX: c_uint = 0x800; - -pub const RTA_UNSPEC: c_ushort = 0; -pub const RTA_DST: c_ushort = 1; -pub const RTA_SRC: c_ushort = 2; -pub const RTA_IIF: c_ushort = 3; -pub const RTA_OIF: c_ushort = 4; -pub const RTA_GATEWAY: c_ushort = 5; -pub const RTA_PRIORITY: c_ushort = 6; -pub const RTA_PREFSRC: c_ushort = 7; -pub const RTA_METRICS: c_ushort = 8; -pub const RTA_MULTIPATH: c_ushort = 9; -pub const RTA_PROTOINFO: c_ushort = 10; // No longer used -pub const RTA_FLOW: c_ushort = 11; -pub const RTA_CACHEINFO: c_ushort = 12; -pub const RTA_SESSION: c_ushort = 13; // No longer used -pub const RTA_MP_ALGO: c_ushort = 14; // No longer used -pub const RTA_TABLE: c_ushort = 15; -pub const RTA_MARK: c_ushort = 16; -pub const RTA_MFC_STATS: c_ushort = 17; - -pub const RTN_UNSPEC: c_uchar = 0; -pub const RTN_UNICAST: c_uchar = 1; -pub const RTN_LOCAL: c_uchar = 2; -pub const RTN_BROADCAST: c_uchar = 3; -pub const RTN_ANYCAST: c_uchar = 4; -pub const RTN_MULTICAST: c_uchar = 5; -pub const RTN_BLACKHOLE: c_uchar = 6; -pub const RTN_UNREACHABLE: c_uchar = 7; -pub const RTN_PROHIBIT: c_uchar = 8; -pub const RTN_THROW: c_uchar = 9; -pub const RTN_NAT: c_uchar = 10; -pub const RTN_XRESOLVE: c_uchar = 11; - -pub const RTPROT_UNSPEC: c_uchar = 0; -pub const RTPROT_REDIRECT: c_uchar = 1; -pub const RTPROT_KERNEL: c_uchar = 2; -pub const RTPROT_BOOT: c_uchar = 3; -pub const RTPROT_STATIC: c_uchar = 4; - -pub const RT_SCOPE_UNIVERSE: c_uchar = 0; -pub const RT_SCOPE_SITE: c_uchar = 200; -pub const RT_SCOPE_LINK: c_uchar = 253; -pub const RT_SCOPE_HOST: c_uchar = 254; -pub const RT_SCOPE_NOWHERE: c_uchar = 255; - -pub const RT_TABLE_UNSPEC: c_uchar = 0; -pub const RT_TABLE_COMPAT: c_uchar = 252; -pub const RT_TABLE_DEFAULT: c_uchar = 253; -pub const RT_TABLE_MAIN: c_uchar = 254; -pub const RT_TABLE_LOCAL: c_uchar = 255; - -pub const RTMSG_OVERRUN: u32 = crate::NLMSG_OVERRUN as u32; -pub const RTMSG_NEWDEVICE: u32 = 0x11; -pub const RTMSG_DELDEVICE: u32 = 0x12; -pub const RTMSG_NEWROUTE: u32 = 0x21; -pub const RTMSG_DELROUTE: u32 = 0x22; -pub const RTMSG_NEWRULE: u32 = 0x31; -pub const RTMSG_DELRULE: u32 = 0x32; -pub const RTMSG_CONTROL: u32 = 0x40; -pub const RTMSG_AR_FAILED: u32 = 0x51; - -pub const MAX_ADDR_LEN: usize = 7; -pub const ARPD_UPDATE: c_ushort = 0x01; -pub const ARPD_LOOKUP: c_ushort = 0x02; -pub const ARPD_FLUSH: c_ushort = 0x03; -pub const ATF_MAGIC: c_int = 0x80; - -pub const RTEXT_FILTER_VF: c_int = 1 << 0; -pub const RTEXT_FILTER_BRVLAN: c_int = 1 << 1; -pub const RTEXT_FILTER_BRVLAN_COMPRESSED: c_int = 1 << 2; -pub const RTEXT_FILTER_SKIP_STATS: c_int = 1 << 3; -pub const RTEXT_FILTER_MRP: c_int = 1 << 4; -pub const RTEXT_FILTER_CFM_CONFIG: c_int = 1 << 5; -pub const RTEXT_FILTER_CFM_STATUS: c_int = 1 << 6; - -// userspace compat definitions for RTNLGRP_* -pub const RTMGRP_LINK: c_int = 0x00001; -pub const RTMGRP_NOTIFY: c_int = 0x00002; -pub const RTMGRP_NEIGH: c_int = 0x00004; -pub const RTMGRP_TC: c_int = 0x00008; -pub const RTMGRP_IPV4_IFADDR: c_int = 0x00010; -pub const RTMGRP_IPV4_MROUTE: c_int = 0x00020; -pub const RTMGRP_IPV4_ROUTE: c_int = 0x00040; -pub const RTMGRP_IPV4_RULE: c_int = 0x00080; -pub const RTMGRP_IPV6_IFADDR: c_int = 0x00100; -pub const RTMGRP_IPV6_MROUTE: c_int = 0x00200; -pub const RTMGRP_IPV6_ROUTE: c_int = 0x00400; -pub const RTMGRP_IPV6_IFINFO: c_int = 0x00800; -pub const RTMGRP_DECnet_IFADDR: c_int = 0x01000; -pub const RTMGRP_DECnet_ROUTE: c_int = 0x04000; -pub const RTMGRP_IPV6_PREFIX: c_int = 0x20000; - -// enum rtnetlink_groups -pub const RTNLGRP_NONE: c_uint = 0x00; -pub const RTNLGRP_LINK: c_uint = 0x01; -pub const RTNLGRP_NOTIFY: c_uint = 0x02; -pub const RTNLGRP_NEIGH: c_uint = 0x03; -pub const RTNLGRP_TC: c_uint = 0x04; -pub const RTNLGRP_IPV4_IFADDR: c_uint = 0x05; -pub const RTNLGRP_IPV4_MROUTE: c_uint = 0x06; -pub const RTNLGRP_IPV4_ROUTE: c_uint = 0x07; -pub const RTNLGRP_IPV4_RULE: c_uint = 0x08; -pub const RTNLGRP_IPV6_IFADDR: c_uint = 0x09; -pub const RTNLGRP_IPV6_MROUTE: c_uint = 0x0a; -pub const RTNLGRP_IPV6_ROUTE: c_uint = 0x0b; -pub const RTNLGRP_IPV6_IFINFO: c_uint = 0x0c; -pub const RTNLGRP_DECnet_IFADDR: c_uint = 0x0d; -pub const RTNLGRP_NOP2: c_uint = 0x0e; -pub const RTNLGRP_DECnet_ROUTE: c_uint = 0x0f; -pub const RTNLGRP_DECnet_RULE: c_uint = 0x10; -pub const RTNLGRP_NOP4: c_uint = 0x11; -pub const RTNLGRP_IPV6_PREFIX: c_uint = 0x12; -pub const RTNLGRP_IPV6_RULE: c_uint = 0x13; -pub const RTNLGRP_ND_USEROPT: c_uint = 0x14; -pub const RTNLGRP_PHONET_IFADDR: c_uint = 0x15; -pub const RTNLGRP_PHONET_ROUTE: c_uint = 0x16; -pub const RTNLGRP_DCB: c_uint = 0x17; -pub const RTNLGRP_IPV4_NETCONF: c_uint = 0x18; -pub const RTNLGRP_IPV6_NETCONF: c_uint = 0x19; -pub const RTNLGRP_MDB: c_uint = 0x1a; -pub const RTNLGRP_MPLS_ROUTE: c_uint = 0x1b; -pub const RTNLGRP_NSID: c_uint = 0x1c; -pub const RTNLGRP_MPLS_NETCONF: c_uint = 0x1d; -pub const RTNLGRP_IPV4_MROUTE_R: c_uint = 0x1e; -pub const RTNLGRP_IPV6_MROUTE_R: c_uint = 0x1f; -pub const RTNLGRP_NEXTHOP: c_uint = 0x20; -pub const RTNLGRP_BRVLAN: c_uint = 0x21; -pub const RTNLGRP_MCTP_IFADDR: c_uint = 0x22; -pub const RTNLGRP_TUNNEL: c_uint = 0x23; -pub const RTNLGRP_STATS: c_uint = 0x24; - -// linux/cn_proc.h -c_enum! { - pub enum proc_cn_mcast_op { - PROC_CN_MCAST_LISTEN = 1, - PROC_CN_MCAST_IGNORE = 2, - } - - pub enum proc_cn_event { - PROC_EVENT_NONE = 0x00000000, - PROC_EVENT_FORK = 0x00000001, - PROC_EVENT_EXEC = 0x00000002, - PROC_EVENT_UID = 0x00000004, - PROC_EVENT_GID = 0x00000040, - PROC_EVENT_SID = 0x00000080, - PROC_EVENT_PTRACE = 0x00000100, - PROC_EVENT_COMM = 0x00000200, - PROC_EVENT_NONZERO_EXIT = 0x20000000, - PROC_EVENT_COREDUMP = 0x40000000, - PROC_EVENT_EXIT = 0x80000000, - } -} - -// linux/connector.h -pub const CN_IDX_PROC: c_uint = 0x1; -pub const CN_VAL_PROC: c_uint = 0x1; -pub const CN_IDX_CIFS: c_uint = 0x2; -pub const CN_VAL_CIFS: c_uint = 0x1; -pub const CN_W1_IDX: c_uint = 0x3; -pub const CN_W1_VAL: c_uint = 0x1; -pub const CN_IDX_V86D: c_uint = 0x4; -pub const CN_VAL_V86D_UVESAFB: c_uint = 0x1; -pub const CN_IDX_BB: c_uint = 0x5; -pub const CN_DST_IDX: c_uint = 0x6; -pub const CN_DST_VAL: c_uint = 0x1; -pub const CN_IDX_DM: c_uint = 0x7; -pub const CN_VAL_DM_USERSPACE_LOG: c_uint = 0x1; -pub const CN_IDX_DRBD: c_uint = 0x8; -pub const CN_VAL_DRBD: c_uint = 0x1; -pub const CN_KVP_IDX: c_uint = 0x9; -pub const CN_KVP_VAL: c_uint = 0x1; -pub const CN_VSS_IDX: c_uint = 0xA; -pub const CN_VSS_VAL: c_uint = 0x1; - -// linux/module.h -pub const MODULE_INIT_IGNORE_MODVERSIONS: c_uint = 0x0001; -pub const MODULE_INIT_IGNORE_VERMAGIC: c_uint = 0x0002; - -// linux/net_tstamp.h -pub const SOF_TIMESTAMPING_TX_HARDWARE: c_uint = 1 << 0; -pub const SOF_TIMESTAMPING_TX_SOFTWARE: c_uint = 1 << 1; -pub const SOF_TIMESTAMPING_RX_HARDWARE: c_uint = 1 << 2; -pub const SOF_TIMESTAMPING_RX_SOFTWARE: c_uint = 1 << 3; -pub const SOF_TIMESTAMPING_SOFTWARE: c_uint = 1 << 4; -pub const SOF_TIMESTAMPING_SYS_HARDWARE: c_uint = 1 << 5; -pub const SOF_TIMESTAMPING_RAW_HARDWARE: c_uint = 1 << 6; -pub const SOF_TIMESTAMPING_OPT_ID: c_uint = 1 << 7; -pub const SOF_TIMESTAMPING_TX_SCHED: c_uint = 1 << 8; -pub const SOF_TIMESTAMPING_TX_ACK: c_uint = 1 << 9; -pub const SOF_TIMESTAMPING_OPT_CMSG: c_uint = 1 << 10; -pub const SOF_TIMESTAMPING_OPT_TSONLY: c_uint = 1 << 11; -pub const SOF_TIMESTAMPING_OPT_STATS: c_uint = 1 << 12; -pub const SOF_TIMESTAMPING_OPT_PKTINFO: c_uint = 1 << 13; -pub const SOF_TIMESTAMPING_OPT_TX_SWHW: c_uint = 1 << 14; -pub const SOF_TIMESTAMPING_BIND_PHC: c_uint = 1 << 15; -pub const SOF_TIMESTAMPING_OPT_ID_TCP: c_uint = 1 << 16; -pub const SOF_TIMESTAMPING_OPT_RX_FILTER: c_uint = 1 << 17; -pub const SOF_TXTIME_DEADLINE_MODE: u32 = 1 << 0; -pub const SOF_TXTIME_REPORT_ERRORS: u32 = 1 << 1; - -pub const HWTSTAMP_TX_OFF: c_uint = 0; -pub const HWTSTAMP_TX_ON: c_uint = 1; -pub const HWTSTAMP_TX_ONESTEP_SYNC: c_uint = 2; -pub const HWTSTAMP_TX_ONESTEP_P2P: c_uint = 3; - -pub const HWTSTAMP_FILTER_NONE: c_uint = 0; -pub const HWTSTAMP_FILTER_ALL: c_uint = 1; -pub const HWTSTAMP_FILTER_SOME: c_uint = 2; -pub const HWTSTAMP_FILTER_PTP_V1_L4_EVENT: c_uint = 3; -pub const HWTSTAMP_FILTER_PTP_V1_L4_SYNC: c_uint = 4; -pub const HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: c_uint = 5; -pub const HWTSTAMP_FILTER_PTP_V2_L4_EVENT: c_uint = 6; -pub const HWTSTAMP_FILTER_PTP_V2_L4_SYNC: c_uint = 7; -pub const HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: c_uint = 8; -pub const HWTSTAMP_FILTER_PTP_V2_L2_EVENT: c_uint = 9; -pub const HWTSTAMP_FILTER_PTP_V2_L2_SYNC: c_uint = 10; -pub const HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: c_uint = 11; -pub const HWTSTAMP_FILTER_PTP_V2_EVENT: c_uint = 12; -pub const HWTSTAMP_FILTER_PTP_V2_SYNC: c_uint = 13; -pub const HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: c_uint = 14; -pub const HWTSTAMP_FILTER_NTP_ALL: c_uint = 15; - -// linux/ptp_clock.h -pub const PTP_MAX_SAMPLES: c_uint = 25; // Maximum allowed offset measurement samples. - -const PTP_CLK_MAGIC: u32 = b'=' as u32; - -pub const PTP_CLOCK_GETCAPS: Ioctl = _IOR::(PTP_CLK_MAGIC, 1); -pub const PTP_EXTTS_REQUEST: Ioctl = _IOW::(PTP_CLK_MAGIC, 2); -pub const PTP_PEROUT_REQUEST: Ioctl = _IOW::(PTP_CLK_MAGIC, 3); -pub const PTP_ENABLE_PPS: Ioctl = _IOW::(PTP_CLK_MAGIC, 4); -pub const PTP_SYS_OFFSET: Ioctl = _IOW::(PTP_CLK_MAGIC, 5); -pub const PTP_PIN_GETFUNC: Ioctl = _IOWR::(PTP_CLK_MAGIC, 6); -pub const PTP_PIN_SETFUNC: Ioctl = _IOW::(PTP_CLK_MAGIC, 7); -pub const PTP_SYS_OFFSET_PRECISE: Ioctl = _IOWR::(PTP_CLK_MAGIC, 8); -pub const PTP_SYS_OFFSET_EXTENDED: Ioctl = _IOWR::(PTP_CLK_MAGIC, 9); - -pub const PTP_CLOCK_GETCAPS2: Ioctl = _IOR::(PTP_CLK_MAGIC, 10); -pub const PTP_EXTTS_REQUEST2: Ioctl = _IOW::(PTP_CLK_MAGIC, 11); -pub const PTP_PEROUT_REQUEST2: Ioctl = _IOW::(PTP_CLK_MAGIC, 12); -pub const PTP_ENABLE_PPS2: Ioctl = _IOW::(PTP_CLK_MAGIC, 13); -pub const PTP_SYS_OFFSET2: Ioctl = _IOW::(PTP_CLK_MAGIC, 14); -pub const PTP_PIN_GETFUNC2: Ioctl = _IOWR::(PTP_CLK_MAGIC, 15); -pub const PTP_PIN_SETFUNC2: Ioctl = _IOW::(PTP_CLK_MAGIC, 16); -pub const PTP_SYS_OFFSET_PRECISE2: Ioctl = _IOWR::(PTP_CLK_MAGIC, 17); -pub const PTP_SYS_OFFSET_EXTENDED2: Ioctl = _IOWR::(PTP_CLK_MAGIC, 18); - -// enum ptp_pin_function -pub const PTP_PF_NONE: c_uint = 0; -pub const PTP_PF_EXTTS: c_uint = 1; -pub const PTP_PF_PEROUT: c_uint = 2; -pub const PTP_PF_PHYSYNC: c_uint = 3; - -// linux/tls.h -pub const TLS_TX: c_int = 1; -pub const TLS_RX: c_int = 2; - -pub const TLS_TX_ZEROCOPY_RO: c_int = 3; -pub const TLS_RX_EXPECT_NO_PAD: c_int = 4; - -pub const TLS_1_2_VERSION_MAJOR: __u8 = 0x3; -pub const TLS_1_2_VERSION_MINOR: __u8 = 0x3; -pub const TLS_1_2_VERSION: __u16 = - ((TLS_1_2_VERSION_MAJOR as __u16) << 8) | (TLS_1_2_VERSION_MINOR as __u16); - -pub const TLS_1_3_VERSION_MAJOR: __u8 = 0x3; -pub const TLS_1_3_VERSION_MINOR: __u8 = 0x4; -pub const TLS_1_3_VERSION: __u16 = - ((TLS_1_3_VERSION_MAJOR as __u16) << 8) | (TLS_1_3_VERSION_MINOR as __u16); - -pub const TLS_CIPHER_AES_GCM_128: __u16 = 51; -pub const TLS_CIPHER_AES_GCM_128_IV_SIZE: usize = 8; -pub const TLS_CIPHER_AES_GCM_128_KEY_SIZE: usize = 16; -pub const TLS_CIPHER_AES_GCM_128_SALT_SIZE: usize = 4; -pub const TLS_CIPHER_AES_GCM_128_TAG_SIZE: usize = 16; -pub const TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE: usize = 8; - -pub const TLS_CIPHER_AES_GCM_256: __u16 = 52; -pub const TLS_CIPHER_AES_GCM_256_IV_SIZE: usize = 8; -pub const TLS_CIPHER_AES_GCM_256_KEY_SIZE: usize = 32; -pub const TLS_CIPHER_AES_GCM_256_SALT_SIZE: usize = 4; -pub const TLS_CIPHER_AES_GCM_256_TAG_SIZE: usize = 16; -pub const TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE: usize = 8; - -pub const TLS_CIPHER_AES_CCM_128: __u16 = 53; -pub const TLS_CIPHER_AES_CCM_128_IV_SIZE: usize = 8; -pub const TLS_CIPHER_AES_CCM_128_KEY_SIZE: usize = 16; -pub const TLS_CIPHER_AES_CCM_128_SALT_SIZE: usize = 4; -pub const TLS_CIPHER_AES_CCM_128_TAG_SIZE: usize = 16; -pub const TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE: usize = 8; - -pub const TLS_CIPHER_CHACHA20_POLY1305: __u16 = 54; -pub const TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE: usize = 12; -pub const TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE: usize = 32; -pub const TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE: usize = 0; -pub const TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE: usize = 16; -pub const TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE: usize = 8; - -pub const TLS_CIPHER_SM4_GCM: __u16 = 55; -pub const TLS_CIPHER_SM4_GCM_IV_SIZE: usize = 8; -pub const TLS_CIPHER_SM4_GCM_KEY_SIZE: usize = 16; -pub const TLS_CIPHER_SM4_GCM_SALT_SIZE: usize = 4; -pub const TLS_CIPHER_SM4_GCM_TAG_SIZE: usize = 16; -pub const TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE: usize = 8; - -pub const TLS_CIPHER_SM4_CCM: __u16 = 56; -pub const TLS_CIPHER_SM4_CCM_IV_SIZE: usize = 8; -pub const TLS_CIPHER_SM4_CCM_KEY_SIZE: usize = 16; -pub const TLS_CIPHER_SM4_CCM_SALT_SIZE: usize = 4; -pub const TLS_CIPHER_SM4_CCM_TAG_SIZE: usize = 16; -pub const TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE: usize = 8; - -pub const TLS_CIPHER_ARIA_GCM_128: __u16 = 57; -pub const TLS_CIPHER_ARIA_GCM_128_IV_SIZE: usize = 8; -pub const TLS_CIPHER_ARIA_GCM_128_KEY_SIZE: usize = 16; -pub const TLS_CIPHER_ARIA_GCM_128_SALT_SIZE: usize = 4; -pub const TLS_CIPHER_ARIA_GCM_128_TAG_SIZE: usize = 16; -pub const TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE: usize = 8; - -pub const TLS_CIPHER_ARIA_GCM_256: __u16 = 58; -pub const TLS_CIPHER_ARIA_GCM_256_IV_SIZE: usize = 8; -pub const TLS_CIPHER_ARIA_GCM_256_KEY_SIZE: usize = 32; -pub const TLS_CIPHER_ARIA_GCM_256_SALT_SIZE: usize = 4; -pub const TLS_CIPHER_ARIA_GCM_256_TAG_SIZE: usize = 16; -pub const TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE: usize = 8; - -pub const TLS_SET_RECORD_TYPE: c_int = 1; -pub const TLS_GET_RECORD_TYPE: c_int = 2; - -pub const SOL_TLS: c_int = 282; - -// enum -pub const TLS_INFO_UNSPEC: c_int = 0x00; -pub const TLS_INFO_VERSION: c_int = 0x01; -pub const TLS_INFO_CIPHER: c_int = 0x02; -pub const TLS_INFO_TXCONF: c_int = 0x03; -pub const TLS_INFO_RXCONF: c_int = 0x04; -pub const TLS_INFO_ZC_RO_TX: c_int = 0x05; -pub const TLS_INFO_RX_NO_PAD: c_int = 0x06; - -pub const TLS_CONF_BASE: c_int = 1; -pub const TLS_CONF_SW: c_int = 2; -pub const TLS_CONF_HW: c_int = 3; -pub const TLS_CONF_HW_RECORD: c_int = 4; - -// linux/if_alg.h -pub const ALG_SET_KEY: c_int = 1; -pub const ALG_SET_IV: c_int = 2; -pub const ALG_SET_OP: c_int = 3; -pub const ALG_SET_AEAD_ASSOCLEN: c_int = 4; -pub const ALG_SET_AEAD_AUTHSIZE: c_int = 5; -pub const ALG_SET_DRBG_ENTROPY: c_int = 6; -pub const ALG_SET_KEY_BY_KEY_SERIAL: c_int = 7; - -pub const ALG_OP_DECRYPT: c_int = 0; -pub const ALG_OP_ENCRYPT: c_int = 1; - -// include/uapi/linux/if.h -pub const IF_OPER_UNKNOWN: c_int = 0; -pub const IF_OPER_NOTPRESENT: c_int = 1; -pub const IF_OPER_DOWN: c_int = 2; -pub const IF_OPER_LOWERLAYERDOWN: c_int = 3; -pub const IF_OPER_TESTING: c_int = 4; -pub const IF_OPER_DORMANT: c_int = 5; -pub const IF_OPER_UP: c_int = 6; - -pub const IF_LINK_MODE_DEFAULT: c_int = 0; -pub const IF_LINK_MODE_DORMANT: c_int = 1; -pub const IF_LINK_MODE_TESTING: c_int = 2; - -// include/uapi/linux/udp.h -pub const UDP_CORK: c_int = 1; -pub const UDP_ENCAP: c_int = 100; -pub const UDP_NO_CHECK6_TX: c_int = 101; -pub const UDP_NO_CHECK6_RX: c_int = 102; - -// include/uapi/linux/mman.h -pub const MAP_SHARED_VALIDATE: c_int = 0x3; -pub const MAP_DROPPABLE: c_int = 0x8; - -// include/uapi/asm-generic/mman-common.h -pub const MAP_FIXED_NOREPLACE: c_int = 0x100000; -pub const MLOCK_ONFAULT: c_uint = 0x01; - -// uapi/linux/vm_sockets.h -pub const VMADDR_CID_ANY: c_uint = 0xFFFFFFFF; -pub const VMADDR_CID_HYPERVISOR: c_uint = 0; -#[deprecated( - since = "0.2.74", - note = "VMADDR_CID_RESERVED is removed since Linux v5.6 and \ - replaced with VMADDR_CID_LOCAL" -)] -pub const VMADDR_CID_RESERVED: c_uint = 1; -pub const VMADDR_CID_LOCAL: c_uint = 1; -pub const VMADDR_CID_HOST: c_uint = 2; -pub const VMADDR_PORT_ANY: c_uint = 0xFFFFFFFF; - -// uapi/linux/inotify.h -pub const IN_ACCESS: u32 = 0x0000_0001; -pub const IN_MODIFY: u32 = 0x0000_0002; -pub const IN_ATTRIB: u32 = 0x0000_0004; -pub const IN_CLOSE_WRITE: u32 = 0x0000_0008; -pub const IN_CLOSE_NOWRITE: u32 = 0x0000_0010; -pub const IN_CLOSE: u32 = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE; -pub const IN_OPEN: u32 = 0x0000_0020; -pub const IN_MOVED_FROM: u32 = 0x0000_0040; -pub const IN_MOVED_TO: u32 = 0x0000_0080; -pub const IN_MOVE: u32 = IN_MOVED_FROM | IN_MOVED_TO; -pub const IN_CREATE: u32 = 0x0000_0100; -pub const IN_DELETE: u32 = 0x0000_0200; -pub const IN_DELETE_SELF: u32 = 0x0000_0400; -pub const IN_MOVE_SELF: u32 = 0x0000_0800; -pub const IN_UNMOUNT: u32 = 0x0000_2000; -pub const IN_Q_OVERFLOW: u32 = 0x0000_4000; -pub const IN_IGNORED: u32 = 0x0000_8000; -pub const IN_ONLYDIR: u32 = 0x0100_0000; -pub const IN_DONT_FOLLOW: u32 = 0x0200_0000; -pub const IN_EXCL_UNLINK: u32 = 0x0400_0000; - -// uapi/linux/securebits.h -const SECURE_NOROOT: c_int = 0; -const SECURE_NOROOT_LOCKED: c_int = 1; - -pub const SECBIT_NOROOT: c_int = issecure_mask(SECURE_NOROOT); -pub const SECBIT_NOROOT_LOCKED: c_int = issecure_mask(SECURE_NOROOT_LOCKED); - -const SECURE_NO_SETUID_FIXUP: c_int = 2; -const SECURE_NO_SETUID_FIXUP_LOCKED: c_int = 3; - -pub const SECBIT_NO_SETUID_FIXUP: c_int = issecure_mask(SECURE_NO_SETUID_FIXUP); -pub const SECBIT_NO_SETUID_FIXUP_LOCKED: c_int = issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED); - -const SECURE_KEEP_CAPS: c_int = 4; -const SECURE_KEEP_CAPS_LOCKED: c_int = 5; - -pub const SECBIT_KEEP_CAPS: c_int = issecure_mask(SECURE_KEEP_CAPS); -pub const SECBIT_KEEP_CAPS_LOCKED: c_int = issecure_mask(SECURE_KEEP_CAPS_LOCKED); - -const SECURE_NO_CAP_AMBIENT_RAISE: c_int = 6; -const SECURE_NO_CAP_AMBIENT_RAISE_LOCKED: c_int = 7; - -pub const SECBIT_NO_CAP_AMBIENT_RAISE: c_int = issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE); -pub const SECBIT_NO_CAP_AMBIENT_RAISE_LOCKED: c_int = - issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE_LOCKED); - -const SECURE_EXEC_RESTRICT_FILE: c_int = 8; -const SECURE_EXEC_RESTRICT_FILE_LOCKED: c_int = 9; - -pub const SECBIT_EXEC_RESTRICT_FILE: c_int = issecure_mask(SECURE_EXEC_RESTRICT_FILE); -pub const SECBIT_EXEC_RESTRICT_FILE_LOCKED: c_int = issecure_mask(SECURE_EXEC_RESTRICT_FILE_LOCKED); - -const SECURE_EXEC_DENY_INTERACTIVE: c_int = 10; -const SECURE_EXEC_DENY_INTERACTIVE_LOCKED: c_int = 11; - -pub const SECBIT_EXEC_DENY_INTERACTIVE: c_int = issecure_mask(SECURE_EXEC_DENY_INTERACTIVE); -pub const SECBIT_EXEC_DENY_INTERACTIVE_LOCKED: c_int = - issecure_mask(SECURE_EXEC_DENY_INTERACTIVE_LOCKED); - -pub const SECUREBITS_DEFAULT: c_int = 0x00000000; -pub const SECURE_ALL_BITS: c_int = SECBIT_NOROOT - | SECBIT_NO_SETUID_FIXUP - | SECBIT_KEEP_CAPS - | SECBIT_NO_CAP_AMBIENT_RAISE - | SECBIT_EXEC_RESTRICT_FILE - | SECBIT_EXEC_DENY_INTERACTIVE; -pub const SECURE_ALL_LOCKS: c_int = SECURE_ALL_BITS << 1; - -pub const SECURE_ALL_UNPRIVILEGED: c_int = - issecure_mask(SECURE_EXEC_RESTRICT_FILE) | issecure_mask(SECURE_EXEC_DENY_INTERACTIVE); - -const fn issecure_mask(x: c_int) -> c_int { - 1 << x -} - -// linux/keyctl.h -pub const KEY_SPEC_THREAD_KEYRING: i32 = -1; -pub const KEY_SPEC_PROCESS_KEYRING: i32 = -2; -pub const KEY_SPEC_SESSION_KEYRING: i32 = -3; -pub const KEY_SPEC_USER_KEYRING: i32 = -4; -pub const KEY_SPEC_USER_SESSION_KEYRING: i32 = -5; -pub const KEY_SPEC_GROUP_KEYRING: i32 = -6; -pub const KEY_SPEC_REQKEY_AUTH_KEY: i32 = -7; -pub const KEY_SPEC_REQUESTOR_KEYRING: i32 = -8; - -pub const KEY_REQKEY_DEFL_NO_CHANGE: i32 = -1; -pub const KEY_REQKEY_DEFL_DEFAULT: i32 = 0; -pub const KEY_REQKEY_DEFL_THREAD_KEYRING: i32 = 1; -pub const KEY_REQKEY_DEFL_PROCESS_KEYRING: i32 = 2; -pub const KEY_REQKEY_DEFL_SESSION_KEYRING: i32 = 3; -pub const KEY_REQKEY_DEFL_USER_KEYRING: i32 = 4; -pub const KEY_REQKEY_DEFL_USER_SESSION_KEYRING: i32 = 5; -pub const KEY_REQKEY_DEFL_GROUP_KEYRING: i32 = 6; -pub const KEY_REQKEY_DEFL_REQUESTOR_KEYRING: i32 = 7; - -pub const KEYCTL_GET_KEYRING_ID: u32 = 0; -pub const KEYCTL_JOIN_SESSION_KEYRING: u32 = 1; -pub const KEYCTL_UPDATE: u32 = 2; -pub const KEYCTL_REVOKE: u32 = 3; -pub const KEYCTL_CHOWN: u32 = 4; -pub const KEYCTL_SETPERM: u32 = 5; -pub const KEYCTL_DESCRIBE: u32 = 6; -pub const KEYCTL_CLEAR: u32 = 7; -pub const KEYCTL_LINK: u32 = 8; -pub const KEYCTL_UNLINK: u32 = 9; -pub const KEYCTL_SEARCH: u32 = 10; -pub const KEYCTL_READ: u32 = 11; -pub const KEYCTL_INSTANTIATE: u32 = 12; -pub const KEYCTL_NEGATE: u32 = 13; -pub const KEYCTL_SET_REQKEY_KEYRING: u32 = 14; -pub const KEYCTL_SET_TIMEOUT: u32 = 15; -pub const KEYCTL_ASSUME_AUTHORITY: u32 = 16; -pub const KEYCTL_GET_SECURITY: u32 = 17; -pub const KEYCTL_SESSION_TO_PARENT: u32 = 18; -pub const KEYCTL_REJECT: u32 = 19; -pub const KEYCTL_INSTANTIATE_IOV: u32 = 20; -pub const KEYCTL_INVALIDATE: u32 = 21; -pub const KEYCTL_GET_PERSISTENT: u32 = 22; - -pub const IN_MASK_CREATE: u32 = 0x1000_0000; -pub const IN_MASK_ADD: u32 = 0x2000_0000; -pub const IN_ISDIR: u32 = 0x4000_0000; -pub const IN_ONESHOT: u32 = 0x8000_0000; - -pub const IN_ALL_EVENTS: u32 = IN_ACCESS - | IN_MODIFY - | IN_ATTRIB - | IN_CLOSE_WRITE - | IN_CLOSE_NOWRITE - | IN_OPEN - | IN_MOVED_FROM - | IN_MOVED_TO - | IN_DELETE - | IN_CREATE - | IN_DELETE_SELF - | IN_MOVE_SELF; - -pub const IN_CLOEXEC: c_int = O_CLOEXEC; -pub const IN_NONBLOCK: c_int = O_NONBLOCK; - -// uapi/linux/mount.h -pub const OPEN_TREE_CLONE: c_uint = 0x01; -pub const OPEN_TREE_CLOEXEC: c_uint = O_CLOEXEC as c_uint; - -// uapi/linux/netfilter/nf_tables.h -pub const NFT_TABLE_MAXNAMELEN: c_int = 256; -pub const NFT_CHAIN_MAXNAMELEN: c_int = 256; -pub const NFT_SET_MAXNAMELEN: c_int = 256; -pub const NFT_OBJ_MAXNAMELEN: c_int = 256; -pub const NFT_USERDATA_MAXLEN: c_int = 256; - -pub const NFT_REG_VERDICT: c_int = 0; -pub const NFT_REG_1: c_int = 1; -pub const NFT_REG_2: c_int = 2; -pub const NFT_REG_3: c_int = 3; -pub const NFT_REG_4: c_int = 4; -pub const __NFT_REG_MAX: c_int = 5; -pub const NFT_REG32_00: c_int = 8; -pub const NFT_REG32_01: c_int = 9; -pub const NFT_REG32_02: c_int = 10; -pub const NFT_REG32_03: c_int = 11; -pub const NFT_REG32_04: c_int = 12; -pub const NFT_REG32_05: c_int = 13; -pub const NFT_REG32_06: c_int = 14; -pub const NFT_REG32_07: c_int = 15; -pub const NFT_REG32_08: c_int = 16; -pub const NFT_REG32_09: c_int = 17; -pub const NFT_REG32_10: c_int = 18; -pub const NFT_REG32_11: c_int = 19; -pub const NFT_REG32_12: c_int = 20; -pub const NFT_REG32_13: c_int = 21; -pub const NFT_REG32_14: c_int = 22; -pub const NFT_REG32_15: c_int = 23; - -pub const NFT_REG_SIZE: c_int = 16; -pub const NFT_REG32_SIZE: c_int = 4; - -pub const NFT_CONTINUE: c_int = -1; -pub const NFT_BREAK: c_int = -2; -pub const NFT_JUMP: c_int = -3; -pub const NFT_GOTO: c_int = -4; -pub const NFT_RETURN: c_int = -5; - -pub const NFT_MSG_NEWTABLE: c_int = 0; -pub const NFT_MSG_GETTABLE: c_int = 1; -pub const NFT_MSG_DELTABLE: c_int = 2; -pub const NFT_MSG_NEWCHAIN: c_int = 3; -pub const NFT_MSG_GETCHAIN: c_int = 4; -pub const NFT_MSG_DELCHAIN: c_int = 5; -pub const NFT_MSG_NEWRULE: c_int = 6; -pub const NFT_MSG_GETRULE: c_int = 7; -pub const NFT_MSG_DELRULE: c_int = 8; -pub const NFT_MSG_NEWSET: c_int = 9; -pub const NFT_MSG_GETSET: c_int = 10; -pub const NFT_MSG_DELSET: c_int = 11; -pub const NFT_MSG_NEWSETELEM: c_int = 12; -pub const NFT_MSG_GETSETELEM: c_int = 13; -pub const NFT_MSG_DELSETELEM: c_int = 14; -pub const NFT_MSG_NEWGEN: c_int = 15; -pub const NFT_MSG_GETGEN: c_int = 16; -pub const NFT_MSG_TRACE: c_int = 17; -cfg_if! { - if #[cfg(not(target_arch = "sparc64"))] { - pub const NFT_MSG_NEWOBJ: c_int = 18; - pub const NFT_MSG_GETOBJ: c_int = 19; - pub const NFT_MSG_DELOBJ: c_int = 20; - pub const NFT_MSG_GETOBJ_RESET: c_int = 21; - } -} -pub const NFT_MSG_MAX: c_int = 25; - -pub const NFT_SET_ANONYMOUS: c_int = 0x1; -pub const NFT_SET_CONSTANT: c_int = 0x2; -pub const NFT_SET_INTERVAL: c_int = 0x4; -pub const NFT_SET_MAP: c_int = 0x8; -pub const NFT_SET_TIMEOUT: c_int = 0x10; -pub const NFT_SET_EVAL: c_int = 0x20; - -pub const NFT_SET_POL_PERFORMANCE: c_int = 0; -pub const NFT_SET_POL_MEMORY: c_int = 1; - -pub const NFT_SET_ELEM_INTERVAL_END: c_int = 0x1; - -pub const NFT_DATA_VALUE: c_uint = 0; -pub const NFT_DATA_VERDICT: c_uint = 0xffffff00; - -pub const NFT_DATA_RESERVED_MASK: c_uint = 0xffffff00; - -pub const NFT_DATA_VALUE_MAXLEN: c_int = 64; - -pub const NFT_BYTEORDER_NTOH: c_int = 0; -pub const NFT_BYTEORDER_HTON: c_int = 1; - -pub const NFT_CMP_EQ: c_int = 0; -pub const NFT_CMP_NEQ: c_int = 1; -pub const NFT_CMP_LT: c_int = 2; -pub const NFT_CMP_LTE: c_int = 3; -pub const NFT_CMP_GT: c_int = 4; -pub const NFT_CMP_GTE: c_int = 5; - -pub const NFT_RANGE_EQ: c_int = 0; -pub const NFT_RANGE_NEQ: c_int = 1; - -pub const NFT_LOOKUP_F_INV: c_int = 1 << 0; - -pub const NFT_DYNSET_OP_ADD: c_int = 0; -pub const NFT_DYNSET_OP_UPDATE: c_int = 1; - -pub const NFT_DYNSET_F_INV: c_int = 1 << 0; - -pub const NFT_PAYLOAD_LL_HEADER: c_int = 0; -pub const NFT_PAYLOAD_NETWORK_HEADER: c_int = 1; -pub const NFT_PAYLOAD_TRANSPORT_HEADER: c_int = 2; - -pub const NFT_PAYLOAD_CSUM_NONE: c_int = 0; -pub const NFT_PAYLOAD_CSUM_INET: c_int = 1; - -pub const NFT_META_LEN: c_int = 0; -pub const NFT_META_PROTOCOL: c_int = 1; -pub const NFT_META_PRIORITY: c_int = 2; -pub const NFT_META_MARK: c_int = 3; -pub const NFT_META_IIF: c_int = 4; -pub const NFT_META_OIF: c_int = 5; -pub const NFT_META_IIFNAME: c_int = 6; -pub const NFT_META_OIFNAME: c_int = 7; -pub const NFT_META_IIFTYPE: c_int = 8; -pub const NFT_META_OIFTYPE: c_int = 9; -pub const NFT_META_SKUID: c_int = 10; -pub const NFT_META_SKGID: c_int = 11; -pub const NFT_META_NFTRACE: c_int = 12; -pub const NFT_META_RTCLASSID: c_int = 13; -pub const NFT_META_SECMARK: c_int = 14; -pub const NFT_META_NFPROTO: c_int = 15; -pub const NFT_META_L4PROTO: c_int = 16; -pub const NFT_META_BRI_IIFNAME: c_int = 17; -pub const NFT_META_BRI_OIFNAME: c_int = 18; -pub const NFT_META_PKTTYPE: c_int = 19; -pub const NFT_META_CPU: c_int = 20; -pub const NFT_META_IIFGROUP: c_int = 21; -pub const NFT_META_OIFGROUP: c_int = 22; -pub const NFT_META_CGROUP: c_int = 23; -pub const NFT_META_PRANDOM: c_int = 24; - -pub const NFT_CT_STATE: c_int = 0; -pub const NFT_CT_DIRECTION: c_int = 1; -pub const NFT_CT_STATUS: c_int = 2; -pub const NFT_CT_MARK: c_int = 3; -pub const NFT_CT_SECMARK: c_int = 4; -pub const NFT_CT_EXPIRATION: c_int = 5; -pub const NFT_CT_HELPER: c_int = 6; -pub const NFT_CT_L3PROTOCOL: c_int = 7; -pub const NFT_CT_SRC: c_int = 8; -pub const NFT_CT_DST: c_int = 9; -pub const NFT_CT_PROTOCOL: c_int = 10; -pub const NFT_CT_PROTO_SRC: c_int = 11; -pub const NFT_CT_PROTO_DST: c_int = 12; -pub const NFT_CT_LABELS: c_int = 13; -pub const NFT_CT_PKTS: c_int = 14; -pub const NFT_CT_BYTES: c_int = 15; -pub const NFT_CT_AVGPKT: c_int = 16; -pub const NFT_CT_ZONE: c_int = 17; -pub const NFT_CT_EVENTMASK: c_int = 18; -pub const NFT_CT_SRC_IP: c_int = 19; -pub const NFT_CT_DST_IP: c_int = 20; -pub const NFT_CT_SRC_IP6: c_int = 21; -pub const NFT_CT_DST_IP6: c_int = 22; - -pub const NFT_LIMIT_PKTS: c_int = 0; -pub const NFT_LIMIT_PKT_BYTES: c_int = 1; - -pub const NFT_LIMIT_F_INV: c_int = 1 << 0; - -pub const NFT_QUEUE_FLAG_BYPASS: c_int = 0x01; -pub const NFT_QUEUE_FLAG_CPU_FANOUT: c_int = 0x02; -pub const NFT_QUEUE_FLAG_MASK: c_int = 0x03; - -pub const NFT_QUOTA_F_INV: c_int = 1 << 0; - -pub const NFT_REJECT_ICMP_UNREACH: c_int = 0; -pub const NFT_REJECT_TCP_RST: c_int = 1; -pub const NFT_REJECT_ICMPX_UNREACH: c_int = 2; - -pub const NFT_REJECT_ICMPX_NO_ROUTE: c_int = 0; -pub const NFT_REJECT_ICMPX_PORT_UNREACH: c_int = 1; -pub const NFT_REJECT_ICMPX_HOST_UNREACH: c_int = 2; -pub const NFT_REJECT_ICMPX_ADMIN_PROHIBITED: c_int = 3; - -pub const NFT_NAT_SNAT: c_int = 0; -pub const NFT_NAT_DNAT: c_int = 1; - -pub const NFT_TRACETYPE_UNSPEC: c_int = 0; -pub const NFT_TRACETYPE_POLICY: c_int = 1; -pub const NFT_TRACETYPE_RETURN: c_int = 2; -pub const NFT_TRACETYPE_RULE: c_int = 3; - -pub const NFT_NG_INCREMENTAL: c_int = 0; -pub const NFT_NG_RANDOM: c_int = 1; - -// linux/input.h -pub const FF_MAX: __u16 = 0x7f; -pub const FF_CNT: usize = FF_MAX as usize + 1; - -// linux/input-event-codes.h -pub const INPUT_PROP_POINTER: __u16 = 0x00; -pub const INPUT_PROP_DIRECT: __u16 = 0x01; -pub const INPUT_PROP_BUTTONPAD: __u16 = 0x02; -pub const INPUT_PROP_SEMI_MT: __u16 = 0x03; -pub const INPUT_PROP_TOPBUTTONPAD: __u16 = 0x04; -pub const INPUT_PROP_POINTING_STICK: __u16 = 0x05; -pub const INPUT_PROP_ACCELEROMETER: __u16 = 0x06; -pub const INPUT_PROP_MAX: __u16 = 0x1f; -pub const INPUT_PROP_CNT: usize = INPUT_PROP_MAX as usize + 1; -pub const EV_MAX: __u16 = 0x1f; -pub const EV_CNT: usize = EV_MAX as usize + 1; -pub const SYN_MAX: __u16 = 0xf; -pub const SYN_CNT: usize = SYN_MAX as usize + 1; -pub const KEY_MAX: __u16 = 0x2ff; -pub const KEY_CNT: usize = KEY_MAX as usize + 1; -pub const REL_MAX: __u16 = 0x0f; -pub const REL_CNT: usize = REL_MAX as usize + 1; -pub const ABS_MAX: __u16 = 0x3f; -pub const ABS_CNT: usize = ABS_MAX as usize + 1; -pub const SW_MAX: __u16 = 0x10; -pub const SW_CNT: usize = SW_MAX as usize + 1; -pub const MSC_MAX: __u16 = 0x07; -pub const MSC_CNT: usize = MSC_MAX as usize + 1; -pub const LED_MAX: __u16 = 0x0f; -pub const LED_CNT: usize = LED_MAX as usize + 1; -pub const REP_MAX: __u16 = 0x01; -pub const REP_CNT: usize = REP_MAX as usize + 1; -pub const SND_MAX: __u16 = 0x07; -pub const SND_CNT: usize = SND_MAX as usize + 1; - -// linux/uinput.h -pub const UINPUT_VERSION: c_uint = 5; -pub const UINPUT_MAX_NAME_SIZE: usize = 80; - -// uapi/linux/fanotify.h -pub const FAN_ACCESS: u64 = 0x0000_0001; -pub const FAN_MODIFY: u64 = 0x0000_0002; -pub const FAN_ATTRIB: u64 = 0x0000_0004; -pub const FAN_CLOSE_WRITE: u64 = 0x0000_0008; -pub const FAN_CLOSE_NOWRITE: u64 = 0x0000_0010; -pub const FAN_OPEN: u64 = 0x0000_0020; -pub const FAN_MOVED_FROM: u64 = 0x0000_0040; -pub const FAN_MOVED_TO: u64 = 0x0000_0080; -pub const FAN_CREATE: u64 = 0x0000_0100; -pub const FAN_DELETE: u64 = 0x0000_0200; -pub const FAN_DELETE_SELF: u64 = 0x0000_0400; -pub const FAN_MOVE_SELF: u64 = 0x0000_0800; -pub const FAN_OPEN_EXEC: u64 = 0x0000_1000; - -pub const FAN_Q_OVERFLOW: u64 = 0x0000_4000; -pub const FAN_FS_ERROR: u64 = 0x0000_8000; - -pub const FAN_OPEN_PERM: u64 = 0x0001_0000; -pub const FAN_ACCESS_PERM: u64 = 0x0002_0000; -pub const FAN_OPEN_EXEC_PERM: u64 = 0x0004_0000; - -pub const FAN_EVENT_ON_CHILD: u64 = 0x0800_0000; - -pub const FAN_RENAME: u64 = 0x1000_0000; - -pub const FAN_ONDIR: u64 = 0x4000_0000; - -pub const FAN_CLOSE: u64 = FAN_CLOSE_WRITE | FAN_CLOSE_NOWRITE; -pub const FAN_MOVE: u64 = FAN_MOVED_FROM | FAN_MOVED_TO; - -pub const FAN_CLOEXEC: c_uint = 0x0000_0001; -pub const FAN_NONBLOCK: c_uint = 0x0000_0002; - -pub const FAN_CLASS_NOTIF: c_uint = 0x0000_0000; -pub const FAN_CLASS_CONTENT: c_uint = 0x0000_0004; -pub const FAN_CLASS_PRE_CONTENT: c_uint = 0x0000_0008; - -pub const FAN_UNLIMITED_QUEUE: c_uint = 0x0000_0010; -pub const FAN_UNLIMITED_MARKS: c_uint = 0x0000_0020; -pub const FAN_ENABLE_AUDIT: c_uint = 0x0000_0040; - -pub const FAN_REPORT_PIDFD: c_uint = 0x0000_0080; -pub const FAN_REPORT_TID: c_uint = 0x0000_0100; -pub const FAN_REPORT_FID: c_uint = 0x0000_0200; -pub const FAN_REPORT_DIR_FID: c_uint = 0x0000_0400; -pub const FAN_REPORT_NAME: c_uint = 0x0000_0800; -pub const FAN_REPORT_TARGET_FID: c_uint = 0x0000_1000; - -pub const FAN_REPORT_DFID_NAME: c_uint = FAN_REPORT_DIR_FID | FAN_REPORT_NAME; -pub const FAN_REPORT_DFID_NAME_TARGET: c_uint = - FAN_REPORT_DFID_NAME | FAN_REPORT_FID | FAN_REPORT_TARGET_FID; - -pub const FAN_MARK_ADD: c_uint = 0x0000_0001; -pub const FAN_MARK_REMOVE: c_uint = 0x0000_0002; -pub const FAN_MARK_DONT_FOLLOW: c_uint = 0x0000_0004; -pub const FAN_MARK_ONLYDIR: c_uint = 0x0000_0008; -pub const FAN_MARK_IGNORED_MASK: c_uint = 0x0000_0020; -pub const FAN_MARK_IGNORED_SURV_MODIFY: c_uint = 0x0000_0040; -pub const FAN_MARK_FLUSH: c_uint = 0x0000_0080; -pub const FAN_MARK_EVICTABLE: c_uint = 0x0000_0200; -pub const FAN_MARK_IGNORE: c_uint = 0x0000_0400; - -pub const FAN_MARK_INODE: c_uint = 0x0000_0000; -pub const FAN_MARK_MOUNT: c_uint = 0x0000_0010; -pub const FAN_MARK_FILESYSTEM: c_uint = 0x0000_0100; - -pub const FAN_MARK_IGNORE_SURV: c_uint = FAN_MARK_IGNORE | FAN_MARK_IGNORED_SURV_MODIFY; - -pub const FANOTIFY_METADATA_VERSION: u8 = 3; - -pub const FAN_EVENT_INFO_TYPE_FID: u8 = 1; -pub const FAN_EVENT_INFO_TYPE_DFID_NAME: u8 = 2; -pub const FAN_EVENT_INFO_TYPE_DFID: u8 = 3; -pub const FAN_EVENT_INFO_TYPE_PIDFD: u8 = 4; -pub const FAN_EVENT_INFO_TYPE_ERROR: u8 = 5; - -pub const FAN_EVENT_INFO_TYPE_OLD_DFID_NAME: u8 = 10; -pub const FAN_EVENT_INFO_TYPE_NEW_DFID_NAME: u8 = 12; - -pub const FAN_RESPONSE_INFO_NONE: u8 = 0; -pub const FAN_RESPONSE_INFO_AUDIT_RULE: u8 = 1; - -pub const FAN_ALLOW: u32 = 0x01; -pub const FAN_DENY: u32 = 0x02; -pub const FAN_AUDIT: u32 = 0x10; -pub const FAN_INFO: u32 = 0x20; - -pub const FAN_NOFD: c_int = -1; -pub const FAN_NOPIDFD: c_int = FAN_NOFD; -pub const FAN_EPIDFD: c_int = -2; - -// linux/futex.h -pub const FUTEX_WAIT: c_int = 0; -pub const FUTEX_WAKE: c_int = 1; -pub const FUTEX_FD: c_int = 2; -pub const FUTEX_REQUEUE: c_int = 3; -pub const FUTEX_CMP_REQUEUE: c_int = 4; -pub const FUTEX_WAKE_OP: c_int = 5; -pub const FUTEX_LOCK_PI: c_int = 6; -pub const FUTEX_UNLOCK_PI: c_int = 7; -pub const FUTEX_TRYLOCK_PI: c_int = 8; -pub const FUTEX_WAIT_BITSET: c_int = 9; -pub const FUTEX_WAKE_BITSET: c_int = 10; -pub const FUTEX_WAIT_REQUEUE_PI: c_int = 11; -pub const FUTEX_CMP_REQUEUE_PI: c_int = 12; -pub const FUTEX_LOCK_PI2: c_int = 13; - -pub const FUTEX_PRIVATE_FLAG: c_int = 128; -pub const FUTEX_CLOCK_REALTIME: c_int = 256; -pub const FUTEX_CMD_MASK: c_int = !(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME); - -pub const FUTEX_WAITERS: u32 = 0x80000000; -pub const FUTEX_OWNER_DIED: u32 = 0x40000000; -pub const FUTEX_TID_MASK: u32 = 0x3fffffff; - -pub const FUTEX_BITSET_MATCH_ANY: c_int = 0xffffffff; - -pub const FUTEX_OP_SET: c_int = 0; -pub const FUTEX_OP_ADD: c_int = 1; -pub const FUTEX_OP_OR: c_int = 2; -pub const FUTEX_OP_ANDN: c_int = 3; -pub const FUTEX_OP_XOR: c_int = 4; - -pub const FUTEX_OP_OPARG_SHIFT: c_int = 8; - -pub const FUTEX_OP_CMP_EQ: c_int = 0; -pub const FUTEX_OP_CMP_NE: c_int = 1; -pub const FUTEX_OP_CMP_LT: c_int = 2; -pub const FUTEX_OP_CMP_LE: c_int = 3; -pub const FUTEX_OP_CMP_GT: c_int = 4; -pub const FUTEX_OP_CMP_GE: c_int = 5; - -pub fn FUTEX_OP(op: c_int, oparg: c_int, cmp: c_int, cmparg: c_int) -> c_int { - ((op & 0xf) << 28) | ((cmp & 0xf) << 24) | ((oparg & 0xfff) << 12) | (cmparg & 0xfff) -} - -// linux/kexec.h -pub const KEXEC_ON_CRASH: c_int = 0x00000001; -pub const KEXEC_PRESERVE_CONTEXT: c_int = 0x00000002; -pub const KEXEC_ARCH_MASK: c_int = 0xffff0000; -pub const KEXEC_FILE_UNLOAD: c_int = 0x00000001; -pub const KEXEC_FILE_ON_CRASH: c_int = 0x00000002; -pub const KEXEC_FILE_NO_INITRAMFS: c_int = 0x00000004; - -// linux/reboot.h -pub const LINUX_REBOOT_MAGIC1: c_int = 0xfee1dead; -pub const LINUX_REBOOT_MAGIC2: c_int = 672274793; -pub const LINUX_REBOOT_MAGIC2A: c_int = 85072278; -pub const LINUX_REBOOT_MAGIC2B: c_int = 369367448; -pub const LINUX_REBOOT_MAGIC2C: c_int = 537993216; - -pub const LINUX_REBOOT_CMD_RESTART: c_int = 0x01234567; -pub const LINUX_REBOOT_CMD_HALT: c_int = 0xCDEF0123; -pub const LINUX_REBOOT_CMD_CAD_ON: c_int = 0x89ABCDEF; -pub const LINUX_REBOOT_CMD_CAD_OFF: c_int = 0x00000000; -pub const LINUX_REBOOT_CMD_POWER_OFF: c_int = 0x4321FEDC; -pub const LINUX_REBOOT_CMD_RESTART2: c_int = 0xA1B2C3D4; -pub const LINUX_REBOOT_CMD_SW_SUSPEND: c_int = 0xD000FCE2; -pub const LINUX_REBOOT_CMD_KEXEC: c_int = 0x45584543; - -pub const REG_EXTENDED: c_int = 1; -pub const REG_ICASE: c_int = 2; -pub const REG_NEWLINE: c_int = 4; -pub const REG_NOSUB: c_int = 8; - -pub const REG_NOTBOL: c_int = 1; -pub const REG_NOTEOL: c_int = 2; - -pub const REG_ENOSYS: c_int = -1; -pub const REG_NOMATCH: c_int = 1; -pub const REG_BADPAT: c_int = 2; -pub const REG_ECOLLATE: c_int = 3; -pub const REG_ECTYPE: c_int = 4; -pub const REG_EESCAPE: c_int = 5; -pub const REG_ESUBREG: c_int = 6; -pub const REG_EBRACK: c_int = 7; -pub const REG_EPAREN: c_int = 8; -pub const REG_EBRACE: c_int = 9; -pub const REG_BADBR: c_int = 10; -pub const REG_ERANGE: c_int = 11; -pub const REG_ESPACE: c_int = 12; -pub const REG_BADRPT: c_int = 13; - -// linux/errqueue.h -pub const SO_EE_ORIGIN_NONE: u8 = 0; -pub const SO_EE_ORIGIN_LOCAL: u8 = 1; -pub const SO_EE_ORIGIN_ICMP: u8 = 2; -pub const SO_EE_ORIGIN_ICMP6: u8 = 3; -pub const SO_EE_ORIGIN_TXSTATUS: u8 = 4; -pub const SO_EE_ORIGIN_TIMESTAMPING: u8 = SO_EE_ORIGIN_TXSTATUS; - -// errno.h -pub const EPERM: c_int = 1; -pub const ENOENT: c_int = 2; -pub const ESRCH: c_int = 3; -pub const EINTR: c_int = 4; -pub const EIO: c_int = 5; -pub const ENXIO: c_int = 6; -pub const E2BIG: c_int = 7; -pub const ENOEXEC: c_int = 8; -pub const EBADF: c_int = 9; -pub const ECHILD: c_int = 10; -pub const EAGAIN: c_int = 11; -pub const ENOMEM: c_int = 12; -pub const EACCES: c_int = 13; -pub const EFAULT: c_int = 14; -pub const ENOTBLK: c_int = 15; -pub const EBUSY: c_int = 16; -pub const EEXIST: c_int = 17; -pub const EXDEV: c_int = 18; -pub const ENODEV: c_int = 19; -pub const ENOTDIR: c_int = 20; -pub const EISDIR: c_int = 21; -pub const EINVAL: c_int = 22; -pub const ENFILE: c_int = 23; -pub const EMFILE: c_int = 24; -pub const ENOTTY: c_int = 25; -pub const ETXTBSY: c_int = 26; -pub const EFBIG: c_int = 27; -pub const ENOSPC: c_int = 28; -pub const ESPIPE: c_int = 29; -pub const EROFS: c_int = 30; -pub const EMLINK: c_int = 31; -pub const EPIPE: c_int = 32; -pub const EDOM: c_int = 33; -pub const ERANGE: c_int = 34; -pub const EWOULDBLOCK: c_int = EAGAIN; - -// linux/sctp.h -pub const SCTP_FUTURE_ASSOC: c_int = 0; -pub const SCTP_CURRENT_ASSOC: c_int = 1; -pub const SCTP_ALL_ASSOC: c_int = 2; -pub const SCTP_RTOINFO: c_int = 0; -pub const SCTP_ASSOCINFO: c_int = 1; -pub const SCTP_INITMSG: c_int = 2; -pub const SCTP_NODELAY: c_int = 3; -pub const SCTP_AUTOCLOSE: c_int = 4; -pub const SCTP_SET_PEER_PRIMARY_ADDR: c_int = 5; -pub const SCTP_PRIMARY_ADDR: c_int = 6; -pub const SCTP_ADAPTATION_LAYER: c_int = 7; -pub const SCTP_DISABLE_FRAGMENTS: c_int = 8; -pub const SCTP_PEER_ADDR_PARAMS: c_int = 9; -pub const SCTP_DEFAULT_SEND_PARAM: c_int = 10; -pub const SCTP_EVENTS: c_int = 11; -pub const SCTP_I_WANT_MAPPED_V4_ADDR: c_int = 12; -pub const SCTP_MAXSEG: c_int = 13; -pub const SCTP_STATUS: c_int = 14; -pub const SCTP_GET_PEER_ADDR_INFO: c_int = 15; -pub const SCTP_DELAYED_ACK_TIME: c_int = 16; -pub const SCTP_DELAYED_ACK: c_int = SCTP_DELAYED_ACK_TIME; -pub const SCTP_DELAYED_SACK: c_int = SCTP_DELAYED_ACK_TIME; -pub const SCTP_CONTEXT: c_int = 17; -pub const SCTP_FRAGMENT_INTERLEAVE: c_int = 18; -pub const SCTP_PARTIAL_DELIVERY_POINT: c_int = 19; -pub const SCTP_MAX_BURST: c_int = 20; -pub const SCTP_AUTH_CHUNK: c_int = 21; -pub const SCTP_HMAC_IDENT: c_int = 22; -pub const SCTP_AUTH_KEY: c_int = 23; -pub const SCTP_AUTH_ACTIVE_KEY: c_int = 24; -pub const SCTP_AUTH_DELETE_KEY: c_int = 25; -pub const SCTP_PEER_AUTH_CHUNKS: c_int = 26; -pub const SCTP_LOCAL_AUTH_CHUNKS: c_int = 27; -pub const SCTP_GET_ASSOC_NUMBER: c_int = 28; -pub const SCTP_GET_ASSOC_ID_LIST: c_int = 29; -pub const SCTP_AUTO_ASCONF: c_int = 30; -pub const SCTP_PEER_ADDR_THLDS: c_int = 31; -pub const SCTP_RECVRCVINFO: c_int = 32; -pub const SCTP_RECVNXTINFO: c_int = 33; -pub const SCTP_DEFAULT_SNDINFO: c_int = 34; -pub const SCTP_AUTH_DEACTIVATE_KEY: c_int = 35; -pub const SCTP_REUSE_PORT: c_int = 36; -pub const SCTP_PEER_ADDR_THLDS_V2: c_int = 37; -pub const SCTP_PR_SCTP_NONE: c_int = 0x0000; -pub const SCTP_PR_SCTP_TTL: c_int = 0x0010; -pub const SCTP_PR_SCTP_RTX: c_int = 0x0020; -pub const SCTP_PR_SCTP_PRIO: c_int = 0x0030; -pub const SCTP_PR_SCTP_MAX: c_int = SCTP_PR_SCTP_PRIO; -pub const SCTP_PR_SCTP_MASK: c_int = 0x0030; -pub const SCTP_ENABLE_RESET_STREAM_REQ: c_int = 0x01; -pub const SCTP_ENABLE_RESET_ASSOC_REQ: c_int = 0x02; -pub const SCTP_ENABLE_CHANGE_ASSOC_REQ: c_int = 0x04; -pub const SCTP_ENABLE_STRRESET_MASK: c_int = 0x07; -pub const SCTP_STREAM_RESET_INCOMING: c_int = 0x01; -pub const SCTP_STREAM_RESET_OUTGOING: c_int = 0x02; - -pub const SCTP_INIT: c_int = 0; -pub const SCTP_SNDRCV: c_int = 1; -pub const SCTP_SNDINFO: c_int = 2; -pub const SCTP_RCVINFO: c_int = 3; -pub const SCTP_NXTINFO: c_int = 4; -pub const SCTP_PRINFO: c_int = 5; -pub const SCTP_AUTHINFO: c_int = 6; -pub const SCTP_DSTADDRV4: c_int = 7; -pub const SCTP_DSTADDRV6: c_int = 8; - -pub const SCTP_UNORDERED: c_int = 1 << 0; -pub const SCTP_ADDR_OVER: c_int = 1 << 1; -pub const SCTP_ABORT: c_int = 1 << 2; -pub const SCTP_SACK_IMMEDIATELY: c_int = 1 << 3; -pub const SCTP_SENDALL: c_int = 1 << 6; -pub const SCTP_PR_SCTP_ALL: c_int = 1 << 7; -pub const SCTP_NOTIFICATION: c_int = MSG_NOTIFICATION; -pub const SCTP_EOF: c_int = crate::MSG_FIN; - -/* DCCP socket options */ -pub const DCCP_SOCKOPT_PACKET_SIZE: c_int = 1; -pub const DCCP_SOCKOPT_SERVICE: c_int = 2; -pub const DCCP_SOCKOPT_CHANGE_L: c_int = 3; -pub const DCCP_SOCKOPT_CHANGE_R: c_int = 4; -pub const DCCP_SOCKOPT_GET_CUR_MPS: c_int = 5; -pub const DCCP_SOCKOPT_SERVER_TIMEWAIT: c_int = 6; -pub const DCCP_SOCKOPT_SEND_CSCOV: c_int = 10; -pub const DCCP_SOCKOPT_RECV_CSCOV: c_int = 11; -pub const DCCP_SOCKOPT_AVAILABLE_CCIDS: c_int = 12; -pub const DCCP_SOCKOPT_CCID: c_int = 13; -pub const DCCP_SOCKOPT_TX_CCID: c_int = 14; -pub const DCCP_SOCKOPT_RX_CCID: c_int = 15; -pub const DCCP_SOCKOPT_QPOLICY_ID: c_int = 16; -pub const DCCP_SOCKOPT_QPOLICY_TXQLEN: c_int = 17; -pub const DCCP_SOCKOPT_CCID_RX_INFO: c_int = 128; -pub const DCCP_SOCKOPT_CCID_TX_INFO: c_int = 192; - -/// maximum number of services provided on the same listening port -pub const DCCP_SERVICE_LIST_MAX_LEN: c_int = 32; - -pub const CTL_KERN: c_int = 1; -pub const CTL_VM: c_int = 2; -pub const CTL_NET: c_int = 3; -pub const CTL_FS: c_int = 5; -pub const CTL_DEBUG: c_int = 6; -pub const CTL_DEV: c_int = 7; -pub const CTL_BUS: c_int = 8; -pub const CTL_ABI: c_int = 9; -pub const CTL_CPU: c_int = 10; - -pub const CTL_BUS_ISA: c_int = 1; - -pub const INOTIFY_MAX_USER_INSTANCES: c_int = 1; -pub const INOTIFY_MAX_USER_WATCHES: c_int = 2; -pub const INOTIFY_MAX_QUEUED_EVENTS: c_int = 3; - -pub const KERN_OSTYPE: c_int = 1; -pub const KERN_OSRELEASE: c_int = 2; -pub const KERN_OSREV: c_int = 3; -pub const KERN_VERSION: c_int = 4; -pub const KERN_SECUREMASK: c_int = 5; -pub const KERN_PROF: c_int = 6; -pub const KERN_NODENAME: c_int = 7; -pub const KERN_DOMAINNAME: c_int = 8; -pub const KERN_PANIC: c_int = 15; -pub const KERN_REALROOTDEV: c_int = 16; -pub const KERN_SPARC_REBOOT: c_int = 21; -pub const KERN_CTLALTDEL: c_int = 22; -pub const KERN_PRINTK: c_int = 23; -pub const KERN_NAMETRANS: c_int = 24; -pub const KERN_PPC_HTABRECLAIM: c_int = 25; -pub const KERN_PPC_ZEROPAGED: c_int = 26; -pub const KERN_PPC_POWERSAVE_NAP: c_int = 27; -pub const KERN_MODPROBE: c_int = 28; -pub const KERN_SG_BIG_BUFF: c_int = 29; -pub const KERN_ACCT: c_int = 30; -pub const KERN_PPC_L2CR: c_int = 31; -pub const KERN_RTSIGNR: c_int = 32; -pub const KERN_RTSIGMAX: c_int = 33; -pub const KERN_SHMMAX: c_int = 34; -pub const KERN_MSGMAX: c_int = 35; -pub const KERN_MSGMNB: c_int = 36; -pub const KERN_MSGPOOL: c_int = 37; -pub const KERN_SYSRQ: c_int = 38; -pub const KERN_MAX_THREADS: c_int = 39; -pub const KERN_RANDOM: c_int = 40; -pub const KERN_SHMALL: c_int = 41; -pub const KERN_MSGMNI: c_int = 42; -pub const KERN_SEM: c_int = 43; -pub const KERN_SPARC_STOP_A: c_int = 44; -pub const KERN_SHMMNI: c_int = 45; -pub const KERN_OVERFLOWUID: c_int = 46; -pub const KERN_OVERFLOWGID: c_int = 47; -pub const KERN_SHMPATH: c_int = 48; -pub const KERN_HOTPLUG: c_int = 49; -pub const KERN_IEEE_EMULATION_WARNINGS: c_int = 50; -pub const KERN_S390_USER_DEBUG_LOGGING: c_int = 51; -pub const KERN_CORE_USES_PID: c_int = 52; -pub const KERN_TAINTED: c_int = 53; -pub const KERN_CADPID: c_int = 54; -pub const KERN_PIDMAX: c_int = 55; -pub const KERN_CORE_PATTERN: c_int = 56; -pub const KERN_PANIC_ON_OOPS: c_int = 57; -pub const KERN_HPPA_PWRSW: c_int = 58; -pub const KERN_HPPA_UNALIGNED: c_int = 59; -pub const KERN_PRINTK_RATELIMIT: c_int = 60; -pub const KERN_PRINTK_RATELIMIT_BURST: c_int = 61; -pub const KERN_PTY: c_int = 62; -pub const KERN_NGROUPS_MAX: c_int = 63; -pub const KERN_SPARC_SCONS_PWROFF: c_int = 64; -pub const KERN_HZ_TIMER: c_int = 65; -pub const KERN_UNKNOWN_NMI_PANIC: c_int = 66; -pub const KERN_BOOTLOADER_TYPE: c_int = 67; -pub const KERN_RANDOMIZE: c_int = 68; -pub const KERN_SETUID_DUMPABLE: c_int = 69; -pub const KERN_SPIN_RETRY: c_int = 70; -pub const KERN_ACPI_VIDEO_FLAGS: c_int = 71; -pub const KERN_IA64_UNALIGNED: c_int = 72; -pub const KERN_COMPAT_LOG: c_int = 73; -pub const KERN_MAX_LOCK_DEPTH: c_int = 74; -pub const KERN_NMI_WATCHDOG: c_int = 75; -pub const KERN_PANIC_ON_NMI: c_int = 76; - -pub const VM_OVERCOMMIT_MEMORY: c_int = 5; -pub const VM_PAGE_CLUSTER: c_int = 10; -pub const VM_DIRTY_BACKGROUND: c_int = 11; -pub const VM_DIRTY_RATIO: c_int = 12; -pub const VM_DIRTY_WB_CS: c_int = 13; -pub const VM_DIRTY_EXPIRE_CS: c_int = 14; -pub const VM_NR_PDFLUSH_THREADS: c_int = 15; -pub const VM_OVERCOMMIT_RATIO: c_int = 16; -pub const VM_PAGEBUF: c_int = 17; -pub const VM_HUGETLB_PAGES: c_int = 18; -pub const VM_SWAPPINESS: c_int = 19; -pub const VM_LOWMEM_RESERVE_RATIO: c_int = 20; -pub const VM_MIN_FREE_KBYTES: c_int = 21; -pub const VM_MAX_MAP_COUNT: c_int = 22; -pub const VM_LAPTOP_MODE: c_int = 23; -pub const VM_BLOCK_DUMP: c_int = 24; -pub const VM_HUGETLB_GROUP: c_int = 25; -pub const VM_VFS_CACHE_PRESSURE: c_int = 26; -pub const VM_LEGACY_VA_LAYOUT: c_int = 27; -pub const VM_SWAP_TOKEN_TIMEOUT: c_int = 28; -pub const VM_DROP_PAGECACHE: c_int = 29; -pub const VM_PERCPU_PAGELIST_FRACTION: c_int = 30; -pub const VM_ZONE_RECLAIM_MODE: c_int = 31; -pub const VM_MIN_UNMAPPED: c_int = 32; -pub const VM_PANIC_ON_OOM: c_int = 33; -pub const VM_VDSO_ENABLED: c_int = 34; -pub const VM_MIN_SLAB: c_int = 35; - -pub const NET_CORE: c_int = 1; -pub const NET_ETHER: c_int = 2; -pub const NET_802: c_int = 3; -pub const NET_UNIX: c_int = 4; -pub const NET_IPV4: c_int = 5; -pub const NET_IPX: c_int = 6; -pub const NET_ATALK: c_int = 7; -pub const NET_NETROM: c_int = 8; -pub const NET_AX25: c_int = 9; -pub const NET_BRIDGE: c_int = 10; -pub const NET_ROSE: c_int = 11; -pub const NET_IPV6: c_int = 12; -pub const NET_X25: c_int = 13; -pub const NET_TR: c_int = 14; -pub const NET_DECNET: c_int = 15; -pub const NET_ECONET: c_int = 16; -pub const NET_SCTP: c_int = 17; -pub const NET_LLC: c_int = 18; -pub const NET_NETFILTER: c_int = 19; -pub const NET_DCCP: c_int = 20; -pub const NET_IRDA: c_int = 412; - -// include/linux/sched.h -/// I'm a virtual CPU. -pub const PF_VCPU: c_int = 0x00000001; -/// I am an IDLE thread. -pub const PF_IDLE: c_int = 0x00000002; -/// Getting shut down. -pub const PF_EXITING: c_int = 0x00000004; -/// Coredumps should ignore this task. -pub const PF_POSTCOREDUMP: c_int = 0x00000008; -/// Task is an IO worker. -pub const PF_IO_WORKER: c_int = 0x00000010; -/// I'm a workqueue worker. -pub const PF_WQ_WORKER: c_int = 0x00000020; -/// Forked but didn't exec. -pub const PF_FORKNOEXEC: c_int = 0x00000040; -/// Process policy on mce errors. -pub const PF_MCE_PROCESS: c_int = 0x00000080; -/// Used super-user privileges. -pub const PF_SUPERPRIV: c_int = 0x00000100; -/// Dumped core. -pub const PF_DUMPCORE: c_int = 0x00000200; -/// Killed by a signal. -pub const PF_SIGNALED: c_int = 0x00000400; -/// Allocating memory to free memory. -/// -/// See `memalloc_noreclaim_save()`. -pub const PF_MEMALLOC: c_int = 0x00000800; -/// `set_user()` noticed that `RLIMIT_NPROC` was exceeded. -pub const PF_NPROC_EXCEEDED: c_int = 0x00001000; -/// If unset the fpu must be initialized before use. -pub const PF_USED_MATH: c_int = 0x00002000; -/// Kernel thread cloned from userspace thread. -pub const PF_USER_WORKER: c_int = 0x00004000; -/// This thread should not be frozen. -pub const PF_NOFREEZE: c_int = 0x00008000; -/// I am `kswapd`. -pub const PF_KSWAPD: c_int = 0x00020000; -/// All allocations inherit `GFP_NOFS`. -/// -/// See `memalloc_nfs_save()`. -pub const PF_MEMALLOC_NOFS: c_int = 0x00040000; -/// All allocations inherit `GFP_NOIO`. -/// -/// See `memalloc_noio_save()`. -pub const PF_MEMALLOC_NOIO: c_int = 0x00080000; -/// Throttle writes only against the bdi I write to, I am cleaning -/// dirty pages from some other bdi. -pub const PF_LOCAL_THROTTLE: c_int = 0x00100000; -/// I am a kernel thread. -pub const PF_KTHREAD: c_int = 0x00200000; -/// Randomize virtual address space. -pub const PF_RANDOMIZE: c_int = 0x00400000; -/// Userland is not allowed to meddle with `cpus_mask`. -pub const PF_NO_SETAFFINITY: c_int = 0x04000000; -/// Early kill for mce process policy. -pub const PF_MCE_EARLY: c_int = 0x08000000; -/// Allocations constrained to zones which allow long term pinning. -/// -/// See `memalloc_pin_save()`. -pub const PF_MEMALLOC_PIN: c_int = 0x10000000; -/// Plug has ts that needs updating. -pub const PF_BLOCK_TS: c_int = 0x20000000; -/// This thread called `freeze_processes()` and should not be frozen. -pub const PF_SUSPEND_TASK: c_int = PF_SUSPEND_TASK_UINT as _; -// The used value is the highest possible bit fitting on 32 bits, so directly -// defining it as a signed integer causes the compiler to report an overflow. -// Use instead a private intermediary that assuringly has the correct type and -// cast it where necessary to the wanted final type, which preserves the -// desired information as-is in terms of integer representation. -const PF_SUSPEND_TASK_UINT: c_uint = 0x80000000; - -pub const CSIGNAL: c_int = 0x000000ff; - -pub const SCHED_NORMAL: c_int = 0; -pub const SCHED_OTHER: c_int = 0; -pub const SCHED_FIFO: c_int = 1; -pub const SCHED_RR: c_int = 2; -pub const SCHED_BATCH: c_int = 3; -pub const SCHED_IDLE: c_int = 5; -pub const SCHED_DEADLINE: c_int = 6; - -pub const SCHED_RESET_ON_FORK: c_int = 0x40000000; - -pub const CLONE_PIDFD: c_int = 0x1000; - -pub const SCHED_FLAG_RESET_ON_FORK: c_int = 0x01; -pub const SCHED_FLAG_RECLAIM: c_int = 0x02; -pub const SCHED_FLAG_DL_OVERRUN: c_int = 0x04; -pub const SCHED_FLAG_KEEP_POLICY: c_int = 0x08; -pub const SCHED_FLAG_KEEP_PARAMS: c_int = 0x10; -pub const SCHED_FLAG_UTIL_CLAMP_MIN: c_int = 0x20; -pub const SCHED_FLAG_UTIL_CLAMP_MAX: c_int = 0x40; - -// linux/if_xdp.h -pub const XDP_SHARED_UMEM: crate::__u16 = 1 << 0; -pub const XDP_COPY: crate::__u16 = 1 << 1; -pub const XDP_ZEROCOPY: crate::__u16 = 1 << 2; -pub const XDP_USE_NEED_WAKEUP: crate::__u16 = 1 << 3; -pub const XDP_USE_SG: crate::__u16 = 1 << 4; - -pub const XDP_UMEM_UNALIGNED_CHUNK_FLAG: crate::__u32 = 1 << 0; - -pub const XDP_RING_NEED_WAKEUP: crate::__u32 = 1 << 0; - -pub const XDP_MMAP_OFFSETS: c_int = 1; -pub const XDP_RX_RING: c_int = 2; -pub const XDP_TX_RING: c_int = 3; -pub const XDP_UMEM_REG: c_int = 4; -pub const XDP_UMEM_FILL_RING: c_int = 5; -pub const XDP_UMEM_COMPLETION_RING: c_int = 6; -pub const XDP_STATISTICS: c_int = 7; -pub const XDP_OPTIONS: c_int = 8; - -pub const XDP_OPTIONS_ZEROCOPY: crate::__u32 = 1 << 0; - -pub const XDP_PGOFF_RX_RING: crate::off_t = 0; -pub const XDP_PGOFF_TX_RING: crate::off_t = 0x80000000; -pub const XDP_UMEM_PGOFF_FILL_RING: crate::c_ulonglong = 0x100000000; -pub const XDP_UMEM_PGOFF_COMPLETION_RING: crate::c_ulonglong = 0x180000000; - -pub const XSK_UNALIGNED_BUF_OFFSET_SHIFT: crate::c_int = 48; -pub const XSK_UNALIGNED_BUF_ADDR_MASK: crate::c_ulonglong = - (1 << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1; - -pub const XDP_PKT_CONTD: crate::__u32 = 1 << 0; - -pub const XDP_UMEM_TX_SW_CSUM: crate::__u32 = 1 << 1; -pub const XDP_UMEM_TX_METADATA_LEN: crate::__u32 = 1 << 2; - -pub const XDP_TXMD_FLAGS_TIMESTAMP: crate::__u32 = 1 << 0; -pub const XDP_TXMD_FLAGS_CHECKSUM: crate::__u32 = 1 << 1; - -pub const XDP_TX_METADATA: crate::__u32 = 1 << 1; - -pub const SOL_XDP: c_int = 283; - -// linux/mount.h -pub const MOUNT_ATTR_RDONLY: crate::__u64 = 0x00000001; -pub const MOUNT_ATTR_NOSUID: crate::__u64 = 0x00000002; -pub const MOUNT_ATTR_NODEV: crate::__u64 = 0x00000004; -pub const MOUNT_ATTR_NOEXEC: crate::__u64 = 0x00000008; -pub const MOUNT_ATTR__ATIME: crate::__u64 = 0x00000070; -pub const MOUNT_ATTR_RELATIME: crate::__u64 = 0x00000000; -pub const MOUNT_ATTR_NOATIME: crate::__u64 = 0x00000010; -pub const MOUNT_ATTR_STRICTATIME: crate::__u64 = 0x00000020; -pub const MOUNT_ATTR_NODIRATIME: crate::__u64 = 0x00000080; -pub const MOUNT_ATTR_IDMAP: crate::__u64 = 0x00100000; -pub const MOUNT_ATTR_NOSYMFOLLOW: crate::__u64 = 0x00200000; - -pub const MOUNT_ATTR_SIZE_VER0: c_int = 32; - -// elf.h -pub const NT_PRSTATUS: c_int = 1; -pub const NT_PRFPREG: c_int = 2; -pub const NT_FPREGSET: c_int = 2; -pub const NT_PRPSINFO: c_int = 3; -pub const NT_PRXREG: c_int = 4; -pub const NT_TASKSTRUCT: c_int = 4; -pub const NT_PLATFORM: c_int = 5; -pub const NT_AUXV: c_int = 6; -pub const NT_GWINDOWS: c_int = 7; -pub const NT_ASRS: c_int = 8; -pub const NT_PSTATUS: c_int = 10; -pub const NT_PSINFO: c_int = 13; -pub const NT_PRCRED: c_int = 14; -pub const NT_UTSNAME: c_int = 15; -pub const NT_LWPSTATUS: c_int = 16; -pub const NT_LWPSINFO: c_int = 17; -pub const NT_PRFPXREG: c_int = 20; - -pub const SCHED_FLAG_KEEP_ALL: c_int = SCHED_FLAG_KEEP_POLICY | SCHED_FLAG_KEEP_PARAMS; - -pub const SCHED_FLAG_UTIL_CLAMP: c_int = SCHED_FLAG_UTIL_CLAMP_MIN | SCHED_FLAG_UTIL_CLAMP_MAX; - -pub const SCHED_FLAG_ALL: c_int = SCHED_FLAG_RESET_ON_FORK - | SCHED_FLAG_RECLAIM - | SCHED_FLAG_DL_OVERRUN - | SCHED_FLAG_KEEP_ALL - | SCHED_FLAG_UTIL_CLAMP; - -// ioctl_eventpoll: added in Linux 6.9 -pub const EPIOCSPARAMS: Ioctl = 0x40088a01; -pub const EPIOCGPARAMS: Ioctl = 0x80088a02; - -// siginfo.h -pub const SI_DETHREAD: c_int = -7; -pub const TRAP_PERF: c_int = 6; - -f! { - pub fn NLA_ALIGN(len: c_int) -> c_int { - return ((len) + NLA_ALIGNTO - 1) & !(NLA_ALIGNTO - 1); - } - - pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - if ((*cmsg).cmsg_len as usize) < size_of::() { - return core::ptr::null_mut::(); - } - let next = (cmsg as usize + super::CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr; - let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; - if (next.wrapping_offset(1)) as usize > max - || next as usize + super::CMSG_ALIGN((*next).cmsg_len as usize) > max - { - core::ptr::null_mut::() - } else { - next - } - } - - pub fn CPU_ALLOC_SIZE(count: c_int) -> size_t { - let _dummy: cpu_set_t = mem::zeroed(); - let size_in_bits = 8 * size_of_val(&_dummy.bits[0]); - ((count as size_t + size_in_bits - 1) / 8) as size_t - } - - pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { - for slot in &mut cpuset.bits { - *slot = 0; - } - } - - pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - cpuset.bits[idx] |= 1 << offset; - } - - pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { - let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); // 32, 64 etc - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - cpuset.bits[idx] &= !(1 << offset); - } - - pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { - let size_in_bits = 8 * size_of_val(&cpuset.bits[0]); - let (idx, offset) = (cpu / size_in_bits, cpu % size_in_bits); - 0 != (cpuset.bits[idx] & (1 << offset)) - } - - pub fn CPU_COUNT_S(size: usize, cpuset: &cpu_set_t) -> c_int { - let mut s: u32 = 0; - let size_of_mask = size_of_val(&cpuset.bits[0]); - for i in &cpuset.bits[..(size / size_of_mask)] { - s += i.count_ones(); - } - s as c_int - } - - pub fn CPU_COUNT(cpuset: &cpu_set_t) -> c_int { - CPU_COUNT_S(size_of::(), cpuset) - } - - pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { - set1.bits == set2.bits - } - - pub fn SCTP_PR_INDEX(policy: c_int) -> c_int { - policy >> (4 - 1) - } - - pub fn SCTP_PR_POLICY(policy: c_int) -> c_int { - policy & SCTP_PR_SCTP_MASK - } - - pub fn SCTP_PR_SET_POLICY(flags: &mut c_int, policy: c_int) -> () { - *flags &= !SCTP_PR_SCTP_MASK; - *flags |= policy; - } - - pub fn IPTOS_TOS(tos: u8) -> u8 { - tos & IPTOS_TOS_MASK - } - - pub fn IPTOS_PREC(tos: u8) -> u8 { - tos & IPTOS_PREC_MASK - } - - pub fn RT_TOS(tos: u8) -> u8 { - tos & crate::IPTOS_TOS_MASK - } - - pub fn RT_ADDRCLASS(flags: u32) -> u32 { - flags >> 23 - } - - pub fn RT_LOCALADDR(flags: u32) -> bool { - (flags & RTF_ADDRCLASSMASK) == (RTF_LOCAL | RTF_INTERFACE) - } - - pub fn SO_EE_OFFENDER(ee: *const crate::sock_extended_err) -> *mut crate::sockaddr { - ee.offset(1) as *mut crate::sockaddr - } - - pub fn TPACKET_ALIGN(x: usize) -> usize { - (x + TPACKET_ALIGNMENT - 1) & !(TPACKET_ALIGNMENT - 1) - } - - pub fn BPF_CLASS(code: __u32) -> __u32 { - code & 0x07 - } - - pub fn BPF_SIZE(code: __u32) -> __u32 { - code & 0x18 - } - - pub fn BPF_MODE(code: __u32) -> __u32 { - code & 0xe0 - } - - pub fn BPF_OP(code: __u32) -> __u32 { - code & 0xf0 - } - - pub fn BPF_SRC(code: __u32) -> __u32 { - code & 0x08 - } - - pub fn BPF_RVAL(code: __u32) -> __u32 { - code & 0x18 - } - - pub fn BPF_MISCOP(code: __u32) -> __u32 { - code & 0xf8 - } - - pub fn BPF_STMT(code: __u16, k: __u32) -> sock_filter { - sock_filter { - code, - jt: 0, - jf: 0, - k, - } - } - - pub fn BPF_JUMP(code: __u16, k: __u32, jt: __u8, jf: __u8) -> sock_filter { - sock_filter { code, jt, jf, k } - } - - pub fn ELF32_R_SYM(val: Elf32_Word) -> Elf32_Word { - val >> 8 - } - - pub fn ELF32_R_TYPE(val: Elf32_Word) -> Elf32_Word { - val & 0xff - } - - pub fn ELF32_R_INFO(sym: Elf32_Word, t: Elf32_Word) -> Elf32_Word { - sym << (8 + t) & 0xff - } - - pub fn ELF64_R_SYM(val: Elf64_Xword) -> Elf64_Xword { - val >> 32 - } - - pub fn ELF64_R_TYPE(val: Elf64_Xword) -> Elf64_Xword { - val & 0xffffffff - } - - pub fn ELF64_R_INFO(sym: Elf64_Xword, t: Elf64_Xword) -> Elf64_Xword { - sym << (32 + t) - } -} - -safe_f! { - pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { - let major = major as crate::dev_t; - let minor = minor as crate::dev_t; - let mut dev = 0; - dev |= (major & 0x00000fff) << 8; - dev |= (major & 0xfffff000) << 32; - dev |= (minor & 0x000000ff) << 0; - dev |= (minor & 0xffffff00) << 12; - dev - } - - pub const fn major(dev: crate::dev_t) -> c_uint { - let mut major = 0; - major |= (dev & 0x00000000000fff00) >> 8; - major |= (dev & 0xfffff00000000000) >> 32; - major as c_uint - } - - pub const fn minor(dev: crate::dev_t) -> c_uint { - let mut minor = 0; - minor |= (dev & 0x00000000000000ff) >> 0; - minor |= (dev & 0x00000ffffff00000) >> 12; - minor as c_uint - } - - pub const fn SCTP_PR_TTL_ENABLED(policy: c_int) -> bool { - policy == SCTP_PR_SCTP_TTL - } - - pub const fn SCTP_PR_RTX_ENABLED(policy: c_int) -> bool { - policy == SCTP_PR_SCTP_RTX - } - - pub const fn SCTP_PR_PRIO_ENABLED(policy: c_int) -> bool { - policy == SCTP_PR_SCTP_PRIO - } -} - -cfg_if! { - if #[cfg(all( - any(target_env = "gnu", target_env = "musl", target_env = "ohos"), - any(target_arch = "x86_64", target_arch = "x86") - ))] { - extern "C" { - pub fn iopl(level: c_int) -> c_int; - pub fn ioperm(from: c_ulong, num: c_ulong, turn_on: c_int) -> c_int; - } - } -} - -cfg_if! { - if #[cfg(all(not(target_env = "uclibc"), not(target_env = "ohos")))] { - extern "C" { - #[cfg_attr(gnu_file_offset_bits64, link_name = "aio_read64")] - pub fn aio_read(aiocbp: *mut aiocb) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "aio_write64")] - pub fn aio_write(aiocbp: *mut aiocb) -> c_int; - pub fn aio_fsync(op: c_int, aiocbp: *mut aiocb) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "aio_error64")] - pub fn aio_error(aiocbp: *const aiocb) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "aio_return64")] - pub fn aio_return(aiocbp: *mut aiocb) -> ssize_t; - #[cfg_attr(gnu_time_bits64, link_name = "__aio_suspend_time64")] - pub fn aio_suspend( - aiocb_list: *const *const aiocb, - nitems: c_int, - timeout: *const crate::timespec, - ) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "aio_cancel64")] - pub fn aio_cancel(fd: c_int, aiocbp: *mut aiocb) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "lio_listio64")] - pub fn lio_listio( - mode: c_int, - aiocb_list: *const *mut aiocb, - nitems: c_int, - sevp: *mut crate::sigevent, - ) -> c_int; - } - } -} - -cfg_if! { - if #[cfg(not(target_env = "uclibc"))] { - extern "C" { - #[cfg_attr(gnu_file_offset_bits64, link_name = "pwritev64")] - pub fn pwritev( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: off_t, - ) -> ssize_t; - #[cfg_attr(gnu_file_offset_bits64, link_name = "preadv64")] - pub fn preadv( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: off_t, - ) -> ssize_t; - pub fn getnameinfo( - sa: *const crate::sockaddr, - salen: crate::socklen_t, - host: *mut c_char, - hostlen: crate::socklen_t, - serv: *mut c_char, - servlen: crate::socklen_t, - flags: c_int, - ) -> c_int; - pub fn getloadavg(loadavg: *mut c_double, nelem: c_int) -> c_int; - pub fn process_vm_readv( - pid: crate::pid_t, - local_iov: *const crate::iovec, - liovcnt: c_ulong, - remote_iov: *const crate::iovec, - riovcnt: c_ulong, - flags: c_ulong, - ) -> isize; - pub fn process_vm_writev( - pid: crate::pid_t, - local_iov: *const crate::iovec, - liovcnt: c_ulong, - remote_iov: *const crate::iovec, - riovcnt: c_ulong, - flags: c_ulong, - ) -> isize; - #[cfg_attr(gnu_time_bits64, link_name = "__futimes64")] - pub fn futimes(fd: c_int, times: *const crate::timeval) -> c_int; - } - } -} - -// These functions are not available on OpenHarmony -cfg_if! { - if #[cfg(not(target_env = "ohos"))] { - extern "C" { - // Only `getspnam_r` is implemented for musl, out of all of the reenterant - // functions from `shadow.h`. - // https://git.musl-libc.org/cgit/musl/tree/include/shadow.h - pub fn getspnam_r( - name: *const c_char, - spbuf: *mut spwd, - buf: *mut c_char, - buflen: size_t, - spbufp: *mut *mut spwd, - ) -> c_int; - - pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> crate::mqd_t; - pub fn mq_close(mqd: crate::mqd_t) -> c_int; - pub fn mq_unlink(name: *const c_char) -> c_int; - pub fn mq_receive( - mqd: crate::mqd_t, - msg_ptr: *mut c_char, - msg_len: size_t, - msg_prio: *mut c_uint, - ) -> ssize_t; - #[cfg_attr(gnu_time_bits64, link_name = "__mq_timedreceive_time64")] - pub fn mq_timedreceive( - mqd: crate::mqd_t, - msg_ptr: *mut c_char, - msg_len: size_t, - msg_prio: *mut c_uint, - abs_timeout: *const crate::timespec, - ) -> ssize_t; - pub fn mq_send( - mqd: crate::mqd_t, - msg_ptr: *const c_char, - msg_len: size_t, - msg_prio: c_uint, - ) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__mq_timedsend_time64")] - pub fn mq_timedsend( - mqd: crate::mqd_t, - msg_ptr: *const c_char, - msg_len: size_t, - msg_prio: c_uint, - abs_timeout: *const crate::timespec, - ) -> c_int; - pub fn mq_getattr(mqd: crate::mqd_t, attr: *mut crate::mq_attr) -> c_int; - pub fn mq_setattr( - mqd: crate::mqd_t, - newattr: *const crate::mq_attr, - oldattr: *mut crate::mq_attr, - ) -> c_int; - - pub fn pthread_mutex_consistent(mutex: *mut pthread_mutex_t) -> c_int; - pub fn pthread_cancel(thread: crate::pthread_t) -> c_int; - pub fn pthread_mutexattr_getrobust( - attr: *const pthread_mutexattr_t, - robustness: *mut c_int, - ) -> c_int; - pub fn pthread_mutexattr_setrobust( - attr: *mut pthread_mutexattr_t, - robustness: c_int, - ) -> c_int; - } - } -} - -extern "C" { - #[cfg_attr( - not(any(target_env = "musl", target_env = "ohos")), - link_name = "__xpg_strerror_r" - )] - pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - - pub fn abs(i: c_int) -> c_int; - pub fn labs(i: c_long) -> c_long; - pub fn rand() -> c_int; - pub fn srand(seed: c_uint); - - pub fn drand48() -> c_double; - pub fn erand48(xseed: *mut c_ushort) -> c_double; - pub fn lrand48() -> c_long; - pub fn nrand48(xseed: *mut c_ushort) -> c_long; - pub fn mrand48() -> c_long; - pub fn jrand48(xseed: *mut c_ushort) -> c_long; - pub fn srand48(seed: c_long); - pub fn seed48(xseed: *mut c_ushort) -> *mut c_ushort; - pub fn lcong48(p: *mut c_ushort); - - #[cfg_attr(gnu_time_bits64, link_name = "__lutimes64")] - pub fn lutimes(file: *const c_char, times: *const crate::timeval) -> c_int; - - pub fn setpwent(); - pub fn endpwent(); - pub fn getpwent() -> *mut passwd; - pub fn setgrent(); - pub fn endgrent(); - pub fn getgrent() -> *mut crate::group; - pub fn setspent(); - pub fn endspent(); - pub fn getspent() -> *mut spwd; - - pub fn getspnam(name: *const c_char) -> *mut spwd; - - pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; - pub fn shm_unlink(name: *const c_char) -> c_int; - - // System V IPC - pub fn shmget(key: crate::key_t, size: size_t, shmflg: c_int) -> c_int; - pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; - pub fn shmdt(shmaddr: *const c_void) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__shmctl64")] - pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; - pub fn ftok(pathname: *const c_char, proj_id: c_int) -> crate::key_t; - pub fn semget(key: crate::key_t, nsems: c_int, semflag: c_int) -> c_int; - pub fn semop(semid: c_int, sops: *mut crate::sembuf, nsops: size_t) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__semctl64")] - pub fn semctl(semid: c_int, semnum: c_int, cmd: c_int, ...) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__msgctl64")] - pub fn msgctl(msqid: c_int, cmd: c_int, buf: *mut msqid_ds) -> c_int; - pub fn msgget(key: crate::key_t, msgflg: c_int) -> c_int; - pub fn msgrcv( - msqid: c_int, - msgp: *mut c_void, - msgsz: size_t, - msgtyp: c_long, - msgflg: c_int, - ) -> ssize_t; - pub fn msgsnd(msqid: c_int, msgp: *const c_void, msgsz: size_t, msgflg: c_int) -> c_int; - - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn __errno_location() -> *mut c_int; - - #[cfg_attr(gnu_file_offset_bits64, link_name = "fallocate64")] - pub fn fallocate(fd: c_int, mode: c_int, offset: off_t, len: off_t) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "posix_fallocate64")] - pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; - pub fn readahead(fd: c_int, offset: off64_t, count: size_t) -> ssize_t; - pub fn getxattr( - path: *const c_char, - name: *const c_char, - value: *mut c_void, - size: size_t, - ) -> ssize_t; - pub fn lgetxattr( - path: *const c_char, - name: *const c_char, - value: *mut c_void, - size: size_t, - ) -> ssize_t; - pub fn fgetxattr( - filedes: c_int, - name: *const c_char, - value: *mut c_void, - size: size_t, - ) -> ssize_t; - pub fn setxattr( - path: *const c_char, - name: *const c_char, - value: *const c_void, - size: size_t, - flags: c_int, - ) -> c_int; - pub fn lsetxattr( - path: *const c_char, - name: *const c_char, - value: *const c_void, - size: size_t, - flags: c_int, - ) -> c_int; - pub fn fsetxattr( - filedes: c_int, - name: *const c_char, - value: *const c_void, - size: size_t, - flags: c_int, - ) -> c_int; - pub fn listxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; - pub fn llistxattr(path: *const c_char, list: *mut c_char, size: size_t) -> ssize_t; - pub fn flistxattr(filedes: c_int, list: *mut c_char, size: size_t) -> ssize_t; - pub fn removexattr(path: *const c_char, name: *const c_char) -> c_int; - pub fn lremovexattr(path: *const c_char, name: *const c_char) -> c_int; - pub fn fremovexattr(filedes: c_int, name: *const c_char) -> c_int; - pub fn signalfd(fd: c_int, mask: *const crate::sigset_t, flags: c_int) -> c_int; - pub fn timerfd_create(clockid: crate::clockid_t, flags: c_int) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__timerfd_gettime64")] - pub fn timerfd_gettime(fd: c_int, curr_value: *mut itimerspec) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__timerfd_settime64")] - pub fn timerfd_settime( - fd: c_int, - flags: c_int, - new_value: *const itimerspec, - old_value: *mut itimerspec, - ) -> c_int; - pub fn quotactl(cmd: c_int, special: *const c_char, id: c_int, data: *mut c_char) -> c_int; - pub fn epoll_pwait( - epfd: c_int, - events: *mut crate::epoll_event, - maxevents: c_int, - timeout: c_int, - sigmask: *const crate::sigset_t, - ) -> c_int; - pub fn dup3(oldfd: c_int, newfd: c_int, flags: c_int) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__sigtimedwait64")] - pub fn sigtimedwait( - set: *const sigset_t, - info: *mut siginfo_t, - timeout: *const crate::timespec, - ) -> c_int; - pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; - pub fn nl_langinfo_l(item: crate::nl_item, locale: crate::locale_t) -> *mut c_char; - pub fn accept4( - fd: c_int, - addr: *mut crate::sockaddr, - len: *mut crate::socklen_t, - flg: c_int, - ) -> c_int; - pub fn pthread_getaffinity_np( - thread: crate::pthread_t, - cpusetsize: size_t, - cpuset: *mut crate::cpu_set_t, - ) -> c_int; - pub fn pthread_setaffinity_np( - thread: crate::pthread_t, - cpusetsize: size_t, - cpuset: *const crate::cpu_set_t, - ) -> c_int; - pub fn pthread_setschedprio(native: crate::pthread_t, priority: c_int) -> c_int; - pub fn reboot(how_to: c_int) -> c_int; - pub fn setfsgid(gid: crate::gid_t) -> c_int; - pub fn setfsuid(uid: crate::uid_t) -> c_int; - - // Not available now on Android - pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; - pub fn if_nameindex() -> *mut if_nameindex; - pub fn if_freenameindex(ptr: *mut if_nameindex); - pub fn sync_file_range(fd: c_int, offset: off64_t, nbytes: off64_t, flags: c_uint) -> c_int; - pub fn mremap( - addr: *mut c_void, - len: size_t, - new_len: size_t, - flags: c_int, - ... - ) -> *mut c_void; - - #[cfg_attr(gnu_time_bits64, link_name = "__glob64_time64")] - #[cfg_attr( - all(not(gnu_time_bits64), gnu_file_offset_bits64), - link_name = "glob64" - )] - pub fn glob( - pattern: *const c_char, - flags: c_int, - errfunc: Option c_int>, - pglob: *mut crate::glob_t, - ) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__globfree64_time64")] - #[cfg_attr( - all(not(gnu_time_bits64), gnu_file_offset_bits64), - link_name = "globfree64" - )] - pub fn globfree(pglob: *mut crate::glob_t); - - pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - - pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); - - pub fn telldir(dirp: *mut crate::DIR) -> c_long; - pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - - pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; - pub fn remap_file_pages( - addr: *mut c_void, - size: size_t, - prot: c_int, - pgoff: size_t, - flags: c_int, - ) -> c_int; - pub fn recvfrom( - socket: c_int, - buf: *mut c_void, - len: size_t, - flags: c_int, - addr: *mut crate::sockaddr, - addrlen: *mut crate::socklen_t, - ) -> ssize_t; - #[cfg_attr(gnu_file_offset_bits64, link_name = "mkstemps64")] - pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; - - pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; - - pub fn vhangup() -> c_int; - pub fn sync(); - pub fn syncfs(fd: c_int) -> c_int; - pub fn syscall(num: c_long, ...) -> c_long; - pub fn sched_getaffinity( - pid: crate::pid_t, - cpusetsize: size_t, - cpuset: *mut cpu_set_t, - ) -> c_int; - pub fn sched_setaffinity( - pid: crate::pid_t, - cpusetsize: size_t, - cpuset: *const cpu_set_t, - ) -> c_int; - pub fn epoll_create(size: c_int) -> c_int; - pub fn epoll_create1(flags: c_int) -> c_int; - pub fn epoll_wait( - epfd: c_int, - events: *mut crate::epoll_event, - maxevents: c_int, - timeout: c_int, - ) -> c_int; - pub fn epoll_ctl(epfd: c_int, op: c_int, fd: c_int, event: *mut crate::epoll_event) -> c_int; - pub fn pthread_getschedparam( - native: crate::pthread_t, - policy: *mut c_int, - param: *mut crate::sched_param, - ) -> c_int; - pub fn unshare(flags: c_int) -> c_int; - pub fn umount(target: *const c_char) -> c_int; - pub fn sched_get_priority_max(policy: c_int) -> c_int; - pub fn tee(fd_in: c_int, fd_out: c_int, len: size_t, flags: c_uint) -> ssize_t; - #[cfg_attr(gnu_time_bits64, link_name = "__settimeofday64")] - pub fn settimeofday(tv: *const crate::timeval, tz: *const crate::timezone) -> c_int; - pub fn splice( - fd_in: c_int, - off_in: *mut crate::loff_t, - fd_out: c_int, - off_out: *mut crate::loff_t, - len: size_t, - flags: c_uint, - ) -> ssize_t; - pub fn eventfd(init: c_uint, flags: c_int) -> c_int; - pub fn eventfd_read(fd: c_int, value: *mut eventfd_t) -> c_int; - pub fn eventfd_write(fd: c_int, value: eventfd_t) -> c_int; - - #[cfg_attr(gnu_time_bits64, link_name = "__sched_rr_get_interval64")] - pub fn sched_rr_get_interval(pid: crate::pid_t, tp: *mut crate::timespec) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__sem_timedwait64")] - pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; - pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; - pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; - pub fn setns(fd: c_int, nstype: c_int) -> c_int; - pub fn swapoff(path: *const c_char) -> c_int; - pub fn vmsplice(fd: c_int, iov: *const crate::iovec, nr_segs: size_t, flags: c_uint) - -> ssize_t; - pub fn mount( - src: *const c_char, - target: *const c_char, - fstype: *const c_char, - flags: c_ulong, - data: *const c_void, - ) -> c_int; - pub fn personality(persona: c_ulong) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__prctl_time64")] - pub fn prctl(option: c_int, ...) -> c_int; - pub fn sched_getparam(pid: crate::pid_t, param: *mut crate::sched_param) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__ppoll64")] - pub fn ppoll( - fds: *mut crate::pollfd, - nfds: nfds_t, - timeout: *const crate::timespec, - sigmask: *const sigset_t, - ) -> c_int; - pub fn pthread_mutexattr_getprotocol( - attr: *const pthread_mutexattr_t, - protocol: *mut c_int, - ) -> c_int; - pub fn pthread_mutexattr_setprotocol(attr: *mut pthread_mutexattr_t, protocol: c_int) -> c_int; - - #[cfg_attr(gnu_time_bits64, link_name = "__pthread_mutex_timedlock64")] - pub fn pthread_mutex_timedlock( - lock: *mut pthread_mutex_t, - abstime: *const crate::timespec, - ) -> c_int; - pub fn pthread_barrierattr_init(attr: *mut crate::pthread_barrierattr_t) -> c_int; - pub fn pthread_barrierattr_destroy(attr: *mut crate::pthread_barrierattr_t) -> c_int; - pub fn pthread_barrierattr_getpshared( - attr: *const crate::pthread_barrierattr_t, - shared: *mut c_int, - ) -> c_int; - pub fn pthread_barrierattr_setpshared( - attr: *mut crate::pthread_barrierattr_t, - shared: c_int, - ) -> c_int; - pub fn pthread_barrier_init( - barrier: *mut pthread_barrier_t, - attr: *const crate::pthread_barrierattr_t, - count: c_uint, - ) -> c_int; - pub fn pthread_barrier_destroy(barrier: *mut pthread_barrier_t) -> c_int; - pub fn pthread_barrier_wait(barrier: *mut pthread_barrier_t) -> c_int; - pub fn pthread_spin_init(lock: *mut crate::pthread_spinlock_t, pshared: c_int) -> c_int; - pub fn pthread_spin_destroy(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_spin_lock(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_spin_trylock(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_spin_unlock(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn clone( - cb: extern "C" fn(*mut c_void) -> c_int, - child_stack: *mut c_void, - flags: c_int, - arg: *mut c_void, - ... - ) -> c_int; - pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__clock_nanosleep_time64")] - pub fn clock_nanosleep( - clk_id: crate::clockid_t, - flags: c_int, - rqtp: *const crate::timespec, - rmtp: *mut crate::timespec, - ) -> c_int; - pub fn pthread_attr_getguardsize( - attr: *const crate::pthread_attr_t, - guardsize: *mut size_t, - ) -> c_int; - pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; - pub fn pthread_attr_getinheritsched( - attr: *const crate::pthread_attr_t, - inheritsched: *mut c_int, - ) -> c_int; - pub fn pthread_attr_setinheritsched( - attr: *mut crate::pthread_attr_t, - inheritsched: c_int, - ) -> c_int; - pub fn pthread_attr_getschedpolicy( - attr: *const crate::pthread_attr_t, - policy: *mut c_int, - ) -> c_int; - pub fn pthread_attr_setschedpolicy(attr: *mut crate::pthread_attr_t, policy: c_int) -> c_int; - pub fn pthread_attr_getschedparam( - attr: *const crate::pthread_attr_t, - param: *mut crate::sched_param, - ) -> c_int; - pub fn pthread_attr_setschedparam( - attr: *mut crate::pthread_attr_t, - param: *const crate::sched_param, - ) -> c_int; - pub fn sethostname(name: *const c_char, len: size_t) -> c_int; - pub fn sched_get_priority_min(policy: c_int) -> c_int; - pub fn pthread_condattr_getpshared( - attr: *const pthread_condattr_t, - pshared: *mut c_int, - ) -> c_int; - pub fn sysinfo(info: *mut crate::sysinfo) -> c_int; - pub fn umount2(target: *const c_char, flags: c_int) -> c_int; - pub fn pthread_setschedparam( - native: crate::pthread_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - pub fn swapon(path: *const c_char, swapflags: c_int) -> c_int; - pub fn sched_setscheduler( - pid: crate::pid_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "sendfile64")] - pub fn sendfile(out_fd: c_int, in_fd: c_int, offset: *mut off_t, count: size_t) -> ssize_t; - pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; - pub fn getgrgid_r( - gid: crate::gid_t, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; - pub fn sem_close(sem: *mut sem_t) -> c_int; - pub fn getdtablesize() -> c_int; - pub fn getgrnam_r( - name: *const c_char, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn initgroups(user: *const c_char, group: crate::gid_t) -> c_int; - pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; - pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; - pub fn getgrnam(name: *const c_char) -> *mut crate::group; - pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; - pub fn sem_unlink(name: *const c_char) -> c_int; - pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; - pub fn getpwnam_r( - name: *const c_char, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - pub fn getpwuid_r( - uid: crate::uid_t, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; - pub fn pthread_atfork( - prepare: Option, - parent: Option, - child: Option, - ) -> c_int; - pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; - pub fn getgrouplist( - user: *const c_char, - group: crate::gid_t, - groups: *mut crate::gid_t, - ngroups: *mut c_int, - ) -> c_int; - pub fn pthread_mutexattr_getpshared( - attr: *const pthread_mutexattr_t, - pshared: *mut c_int, - ) -> c_int; - pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; - pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; - pub fn pthread_create( - native: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - f: extern "C" fn(*mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - pub fn dl_iterate_phdr( - callback: Option< - unsafe extern "C" fn( - info: *mut crate::dl_phdr_info, - size: size_t, - data: *mut c_void, - ) -> c_int, - >, - data: *mut c_void, - ) -> c_int; - - pub fn setmntent(filename: *const c_char, ty: *const c_char) -> *mut crate::FILE; - pub fn getmntent(stream: *mut crate::FILE) -> *mut crate::mntent; - pub fn addmntent(stream: *mut crate::FILE, mnt: *const crate::mntent) -> c_int; - pub fn endmntent(streamp: *mut crate::FILE) -> c_int; - pub fn hasmntopt(mnt: *const crate::mntent, opt: *const c_char) -> *mut c_char; - - pub fn posix_spawn( - pid: *mut crate::pid_t, - path: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnp( - pid: *mut crate::pid_t, - file: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_getsigdefault( - attr: *const posix_spawnattr_t, - default: *mut crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigdefault( - attr: *mut posix_spawnattr_t, - default: *const crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getsigmask( - attr: *const posix_spawnattr_t, - default: *mut crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigmask( - attr: *mut posix_spawnattr_t, - default: *const crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; - pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; - pub fn posix_spawnattr_getpgroup( - attr: *const posix_spawnattr_t, - flags: *mut crate::pid_t, - ) -> c_int; - pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: crate::pid_t) -> c_int; - pub fn posix_spawnattr_getschedpolicy( - attr: *const posix_spawnattr_t, - flags: *mut c_int, - ) -> c_int; - pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, flags: c_int) -> c_int; - pub fn posix_spawnattr_getschedparam( - attr: *const posix_spawnattr_t, - param: *mut crate::sched_param, - ) -> c_int; - pub fn posix_spawnattr_setschedparam( - attr: *mut posix_spawnattr_t, - param: *const crate::sched_param, - ) -> c_int; - - pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_addopen( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - path: *const c_char, - oflag: c_int, - mode: mode_t, - ) -> c_int; - pub fn posix_spawn_file_actions_addclose( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - ) -> c_int; - pub fn posix_spawn_file_actions_adddup2( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - newfd: c_int, - ) -> c_int; - pub fn fread_unlocked( - buf: *mut c_void, - size: size_t, - nobj: size_t, - stream: *mut crate::FILE, - ) -> size_t; - pub fn inotify_rm_watch(fd: c_int, wd: c_int) -> c_int; - pub fn inotify_init() -> c_int; - pub fn inotify_init1(flags: c_int) -> c_int; - pub fn inotify_add_watch(fd: c_int, path: *const c_char, mask: u32) -> c_int; - pub fn fanotify_init(flags: c_uint, event_f_flags: c_uint) -> c_int; - - pub fn regcomp(preg: *mut crate::regex_t, pattern: *const c_char, cflags: c_int) -> c_int; - - pub fn regexec( - preg: *const crate::regex_t, - input: *const c_char, - nmatch: size_t, - pmatch: *mut regmatch_t, - eflags: c_int, - ) -> c_int; - - pub fn regerror( - errcode: c_int, - preg: *const crate::regex_t, - errbuf: *mut c_char, - errbuf_size: size_t, - ) -> size_t; - - pub fn regfree(preg: *mut crate::regex_t); - - pub fn iconv_open(tocode: *const c_char, fromcode: *const c_char) -> iconv_t; - pub fn iconv( - cd: iconv_t, - inbuf: *mut *mut c_char, - inbytesleft: *mut size_t, - outbuf: *mut *mut c_char, - outbytesleft: *mut size_t, - ) -> size_t; - pub fn iconv_close(cd: iconv_t) -> c_int; - - pub fn gettid() -> crate::pid_t; - - pub fn timer_create( - clockid: crate::clockid_t, - sevp: *mut crate::sigevent, - timerid: *mut crate::timer_t, - ) -> c_int; - pub fn timer_delete(timerid: crate::timer_t) -> c_int; - pub fn timer_getoverrun(timerid: crate::timer_t) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__timer_gettime64")] - pub fn timer_gettime(timerid: crate::timer_t, curr_value: *mut crate::itimerspec) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__timer_settime64")] - pub fn timer_settime( - timerid: crate::timer_t, - flags: c_int, - new_value: *const crate::itimerspec, - old_value: *mut crate::itimerspec, - ) -> c_int; - - pub fn gethostid() -> c_long; - - pub fn pthread_getcpuclockid(thread: crate::pthread_t, clk_id: *mut crate::clockid_t) -> c_int; - pub fn memmem( - haystack: *const c_void, - haystacklen: size_t, - needle: *const c_void, - needlelen: size_t, - ) -> *mut c_void; - pub fn sched_getcpu() -> c_int; - - pub fn pthread_getname_np(thread: crate::pthread_t, name: *mut c_char, len: size_t) -> c_int; - pub fn pthread_setname_np(thread: crate::pthread_t, name: *const c_char) -> c_int; - pub fn getopt_long( - argc: c_int, - argv: *const *mut c_char, - optstring: *const c_char, - longopts: *const option, - longindex: *mut c_int, - ) -> c_int; - - pub fn pthread_once(control: *mut pthread_once_t, routine: extern "C" fn()) -> c_int; - - pub fn copy_file_range( - fd_in: c_int, - off_in: *mut off64_t, - fd_out: c_int, - off_out: *mut off64_t, - len: size_t, - flags: c_uint, - ) -> ssize_t; - - pub fn klogctl(syslog_type: c_int, bufp: *mut c_char, len: c_int) -> c_int; -} - -// LFS64 extensions -// -// * musl has 64-bit versions only so aliases the LFS64 symbols to the standard ones -cfg_if! { - if #[cfg(not(target_env = "musl"))] { - extern "C" { - pub fn fallocate64(fd: c_int, mode: c_int, offset: off64_t, len: off64_t) -> c_int; - pub fn fgetpos64(stream: *mut crate::FILE, ptr: *mut fpos64_t) -> c_int; - pub fn fopen64(filename: *const c_char, mode: *const c_char) -> *mut crate::FILE; - pub fn freopen64( - filename: *const c_char, - mode: *const c_char, - file: *mut crate::FILE, - ) -> *mut crate::FILE; - pub fn fseeko64(stream: *mut crate::FILE, offset: off64_t, whence: c_int) -> c_int; - pub fn fsetpos64(stream: *mut crate::FILE, ptr: *const fpos64_t) -> c_int; - pub fn ftello64(stream: *mut crate::FILE) -> off64_t; - pub fn posix_fallocate64(fd: c_int, offset: off64_t, len: off64_t) -> c_int; - pub fn sendfile64( - out_fd: c_int, - in_fd: c_int, - offset: *mut off64_t, - count: size_t, - ) -> ssize_t; - pub fn tmpfile64() -> *mut crate::FILE; - } - } -} - -cfg_if! { - if #[cfg(target_env = "uclibc")] { - mod uclibc; - pub use self::uclibc::*; - } else if #[cfg(any(target_env = "musl", target_env = "ohos"))] { - mod musl; - pub use self::musl::*; - } else if #[cfg(target_env = "gnu")] { - mod gnu; - pub use self::gnu::*; - } -} - -mod arch; -pub use self::arch::*; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b32/arm/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b32/arm/mod.rs deleted file mode 100644 index a04f05ea50db8c..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b32/arm/mod.rs +++ /dev/null @@ -1,792 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -pub type wchar_t = u32; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - __st_dev_padding: c_int, - __st_ino_truncated: c_long, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __st_rdev_padding: c_int, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_ino: crate::ino_t, - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - __st_dev_padding: c_int, - __st_ino_truncated: c_long, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __st_rdev_padding: c_int, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_ino: crate::ino_t, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct ipc_perm { - #[cfg(musl_v1_2_3)] - pub __key: crate::key_t, - #[cfg(not(musl_v1_2_3))] - #[deprecated( - since = "0.2.173", - note = "This field is incorrectly named and will be changed - to __key in a future release." - )] - pub __ipc_perm_key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - pub __seq: c_int, - __unused1: c_long, - __unused2: c_long, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - __unused1: c_int, - pub shm_dtime: crate::time_t, - __unused2: c_int, - pub shm_ctime: crate::time_t, - __unused3: c_int, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: c_ulong, - __pad1: c_ulong, - __pad2: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - __unused1: c_int, - pub msg_rtime: crate::time_t, - __unused2: c_int, - pub msg_ctime: crate::time_t, - __unused3: c_int, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __pad1: c_ulong, - __pad2: c_ulong, - } - - pub struct mcontext_t { - pub trap_no: c_ulong, - pub error_code: c_ulong, - pub oldmask: c_ulong, - pub arm_r0: c_ulong, - pub arm_r1: c_ulong, - pub arm_r2: c_ulong, - pub arm_r3: c_ulong, - pub arm_r4: c_ulong, - pub arm_r5: c_ulong, - pub arm_r6: c_ulong, - pub arm_r7: c_ulong, - pub arm_r8: c_ulong, - pub arm_r9: c_ulong, - pub arm_r10: c_ulong, - pub arm_fp: c_ulong, - pub arm_ip: c_ulong, - pub arm_sp: c_ulong, - pub arm_lr: c_ulong, - pub arm_pc: c_ulong, - pub arm_cpsr: c_ulong, - pub fault_address: c_ulong, - } -} - -s_no_extra_traits! { - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_mcontext: mcontext_t, - pub uc_sigmask: crate::sigset_t, - pub uc_regspace: [c_ulonglong; 64], - } - - #[repr(align(8))] - pub struct max_align_t { - priv_: (i64, i64), - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for ucontext_t { - fn eq(&self, other: &ucontext_t) -> bool { - self.uc_flags == other.uc_flags - && self.uc_link == other.uc_link - && self.uc_stack == other.uc_stack - && self.uc_mcontext == other.uc_mcontext - && self.uc_sigmask == other.uc_sigmask - } - } - impl Eq for ucontext_t {} - impl hash::Hash for ucontext_t { - fn hash(&self, state: &mut H) { - self.uc_flags.hash(state); - self.uc_link.hash(state); - self.uc_stack.hash(state); - self.uc_mcontext.hash(state); - self.uc_sigmask.hash(state); - } - } - } -} - -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; - -pub const O_DIRECT: c_int = 0x10000; -pub const O_DIRECTORY: c_int = 0x4000; -pub const O_NOFOLLOW: c_int = 0x8000; -pub const O_ASYNC: c_int = 0x2000; -pub const O_LARGEFILE: c_int = 0o400000; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: c_int = 0x00000800; -pub const TAB2: c_int = 0x00001000; -pub const TAB3: c_int = 0x00001800; -pub const CR1: c_int = 0x00000200; -pub const CR2: c_int = 0x00000400; -pub const CR3: c_int = 0x00000600; -pub const FF1: c_int = 0x00008000; -pub const BS1: c_int = 0x00002000; -pub const VT1: c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; - -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_SYNC: c_int = 0x080000; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EDEADLOCK: c_int = EDEADLK; -pub const EMULTIHOP: c_int = 72; -pub const EBADMSG: c_int = 74; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const ERFKILL: c_int = 132; -pub const EHWPOISON: c_int = 133; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const EXTPROC: crate::tcflag_t = 0x00010000; - -pub const MAP_HUGETLB: c_int = 0x040000; - -pub const F_GETLK: c_int = 12; -pub const F_GETOWN: c_int = 9; -pub const F_SETLK: c_int = 13; -pub const F_SETLKW: c_int = 14; -pub const F_SETOWN: c_int = 8; - -pub const VEOF: usize = 4; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -// Syscall table -pub const SYS_restart_syscall: c_long = 0; -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execve: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_lchown: c_long = 16; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_mount: c_long = 21; -pub const SYS_setuid: c_long = 23; -pub const SYS_getuid: c_long = 24; -pub const SYS_ptrace: c_long = 26; -pub const SYS_pause: c_long = 29; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_rename: c_long = 38; -pub const SYS_mkdir: c_long = 39; -pub const SYS_rmdir: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_brk: c_long = 45; -pub const SYS_setgid: c_long = 46; -pub const SYS_getgid: c_long = 47; -pub const SYS_geteuid: c_long = 49; -pub const SYS_getegid: c_long = 50; -pub const SYS_acct: c_long = 51; -pub const SYS_umount2: c_long = 52; -pub const SYS_ioctl: c_long = 54; -pub const SYS_fcntl: c_long = 55; -pub const SYS_setpgid: c_long = 57; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_ustat: c_long = 62; -pub const SYS_dup2: c_long = 63; -pub const SYS_getppid: c_long = 64; -pub const SYS_getpgrp: c_long = 65; -pub const SYS_setsid: c_long = 66; -pub const SYS_sigaction: c_long = 67; -pub const SYS_setreuid: c_long = 70; -pub const SYS_setregid: c_long = 71; -pub const SYS_sigsuspend: c_long = 72; -pub const SYS_sigpending: c_long = 73; -pub const SYS_sethostname: c_long = 74; -pub const SYS_setrlimit: c_long = 75; -pub const SYS_getrusage: c_long = 77; -pub const SYS_gettimeofday: c_long = 78; -pub const SYS_settimeofday: c_long = 79; -pub const SYS_getgroups: c_long = 80; -pub const SYS_setgroups: c_long = 81; -pub const SYS_symlink: c_long = 83; -pub const SYS_readlink: c_long = 85; -pub const SYS_uselib: c_long = 86; -pub const SYS_swapon: c_long = 87; -pub const SYS_reboot: c_long = 88; -pub const SYS_munmap: c_long = 91; -pub const SYS_truncate: c_long = 92; -pub const SYS_ftruncate: c_long = 93; -pub const SYS_fchmod: c_long = 94; -pub const SYS_fchown: c_long = 95; -pub const SYS_getpriority: c_long = 96; -pub const SYS_setpriority: c_long = 97; -pub const SYS_statfs: c_long = 99; -pub const SYS_fstatfs: c_long = 100; -pub const SYS_syslog: c_long = 103; -pub const SYS_setitimer: c_long = 104; -pub const SYS_getitimer: c_long = 105; -pub const SYS_stat: c_long = 106; -pub const SYS_lstat: c_long = 107; -pub const SYS_fstat: c_long = 108; -pub const SYS_vhangup: c_long = 111; -pub const SYS_wait4: c_long = 114; -pub const SYS_swapoff: c_long = 115; -pub const SYS_sysinfo: c_long = 116; -pub const SYS_fsync: c_long = 118; -pub const SYS_sigreturn: c_long = 119; -pub const SYS_clone: c_long = 120; -pub const SYS_setdomainname: c_long = 121; -pub const SYS_uname: c_long = 122; -pub const SYS_adjtimex: c_long = 124; -pub const SYS_mprotect: c_long = 125; -pub const SYS_sigprocmask: c_long = 126; -pub const SYS_init_module: c_long = 128; -pub const SYS_delete_module: c_long = 129; -pub const SYS_quotactl: c_long = 131; -pub const SYS_getpgid: c_long = 132; -pub const SYS_fchdir: c_long = 133; -pub const SYS_bdflush: c_long = 134; -pub const SYS_sysfs: c_long = 135; -pub const SYS_personality: c_long = 136; -pub const SYS_setfsuid: c_long = 138; -pub const SYS_setfsgid: c_long = 139; -pub const SYS__llseek: c_long = 140; -pub const SYS_getdents: c_long = 141; -pub const SYS__newselect: c_long = 142; -pub const SYS_flock: c_long = 143; -pub const SYS_msync: c_long = 144; -pub const SYS_readv: c_long = 145; -pub const SYS_writev: c_long = 146; -pub const SYS_getsid: c_long = 147; -pub const SYS_fdatasync: c_long = 148; -pub const SYS__sysctl: c_long = 149; -pub const SYS_mlock: c_long = 150; -pub const SYS_munlock: c_long = 151; -pub const SYS_mlockall: c_long = 152; -pub const SYS_munlockall: c_long = 153; -pub const SYS_sched_setparam: c_long = 154; -pub const SYS_sched_getparam: c_long = 155; -pub const SYS_sched_setscheduler: c_long = 156; -pub const SYS_sched_getscheduler: c_long = 157; -pub const SYS_sched_yield: c_long = 158; -pub const SYS_sched_get_priority_max: c_long = 159; -pub const SYS_sched_get_priority_min: c_long = 160; -pub const SYS_sched_rr_get_interval: c_long = 161; -pub const SYS_nanosleep: c_long = 162; -pub const SYS_mremap: c_long = 163; -pub const SYS_setresuid: c_long = 164; -pub const SYS_getresuid: c_long = 165; -pub const SYS_poll: c_long = 168; -pub const SYS_nfsservctl: c_long = 169; -pub const SYS_setresgid: c_long = 170; -pub const SYS_getresgid: c_long = 171; -pub const SYS_prctl: c_long = 172; -pub const SYS_rt_sigreturn: c_long = 173; -pub const SYS_rt_sigaction: c_long = 174; -pub const SYS_rt_sigprocmask: c_long = 175; -pub const SYS_rt_sigpending: c_long = 176; -pub const SYS_rt_sigtimedwait: c_long = 177; -pub const SYS_rt_sigqueueinfo: c_long = 178; -pub const SYS_rt_sigsuspend: c_long = 179; -pub const SYS_pread64: c_long = 180; -pub const SYS_pwrite64: c_long = 181; -pub const SYS_chown: c_long = 182; -pub const SYS_getcwd: c_long = 183; -pub const SYS_capget: c_long = 184; -pub const SYS_capset: c_long = 185; -pub const SYS_sigaltstack: c_long = 186; -pub const SYS_sendfile: c_long = 187; -pub const SYS_vfork: c_long = 190; -pub const SYS_ugetrlimit: c_long = 191; -pub const SYS_mmap2: c_long = 192; -pub const SYS_truncate64: c_long = 193; -pub const SYS_ftruncate64: c_long = 194; -pub const SYS_stat64: c_long = 195; -pub const SYS_lstat64: c_long = 196; -pub const SYS_fstat64: c_long = 197; -pub const SYS_lchown32: c_long = 198; -pub const SYS_getuid32: c_long = 199; -pub const SYS_getgid32: c_long = 200; -pub const SYS_geteuid32: c_long = 201; -pub const SYS_getegid32: c_long = 202; -pub const SYS_setreuid32: c_long = 203; -pub const SYS_setregid32: c_long = 204; -pub const SYS_getgroups32: c_long = 205; -pub const SYS_setgroups32: c_long = 206; -pub const SYS_fchown32: c_long = 207; -pub const SYS_setresuid32: c_long = 208; -pub const SYS_getresuid32: c_long = 209; -pub const SYS_setresgid32: c_long = 210; -pub const SYS_getresgid32: c_long = 211; -pub const SYS_chown32: c_long = 212; -pub const SYS_setuid32: c_long = 213; -pub const SYS_setgid32: c_long = 214; -pub const SYS_setfsuid32: c_long = 215; -pub const SYS_setfsgid32: c_long = 216; -pub const SYS_getdents64: c_long = 217; -pub const SYS_pivot_root: c_long = 218; -pub const SYS_mincore: c_long = 219; -pub const SYS_madvise: c_long = 220; -pub const SYS_fcntl64: c_long = 221; -pub const SYS_gettid: c_long = 224; -pub const SYS_readahead: c_long = 225; -pub const SYS_setxattr: c_long = 226; -pub const SYS_lsetxattr: c_long = 227; -pub const SYS_fsetxattr: c_long = 228; -pub const SYS_getxattr: c_long = 229; -pub const SYS_lgetxattr: c_long = 230; -pub const SYS_fgetxattr: c_long = 231; -pub const SYS_listxattr: c_long = 232; -pub const SYS_llistxattr: c_long = 233; -pub const SYS_flistxattr: c_long = 234; -pub const SYS_removexattr: c_long = 235; -pub const SYS_lremovexattr: c_long = 236; -pub const SYS_fremovexattr: c_long = 237; -pub const SYS_tkill: c_long = 238; -pub const SYS_sendfile64: c_long = 239; -pub const SYS_futex: c_long = 240; -pub const SYS_sched_setaffinity: c_long = 241; -pub const SYS_sched_getaffinity: c_long = 242; -pub const SYS_io_setup: c_long = 243; -pub const SYS_io_destroy: c_long = 244; -pub const SYS_io_getevents: c_long = 245; -pub const SYS_io_submit: c_long = 246; -pub const SYS_io_cancel: c_long = 247; -pub const SYS_exit_group: c_long = 248; -pub const SYS_lookup_dcookie: c_long = 249; -pub const SYS_epoll_create: c_long = 250; -pub const SYS_epoll_ctl: c_long = 251; -pub const SYS_epoll_wait: c_long = 252; -pub const SYS_remap_file_pages: c_long = 253; -pub const SYS_set_tid_address: c_long = 256; -pub const SYS_timer_create: c_long = 257; -pub const SYS_timer_settime: c_long = 258; -pub const SYS_timer_gettime: c_long = 259; -pub const SYS_timer_getoverrun: c_long = 260; -pub const SYS_timer_delete: c_long = 261; -pub const SYS_clock_settime: c_long = 262; -pub const SYS_clock_gettime: c_long = 263; -pub const SYS_clock_getres: c_long = 264; -pub const SYS_clock_nanosleep: c_long = 265; -pub const SYS_statfs64: c_long = 266; -pub const SYS_fstatfs64: c_long = 267; -pub const SYS_tgkill: c_long = 268; -pub const SYS_utimes: c_long = 269; -pub const SYS_pciconfig_iobase: c_long = 271; -pub const SYS_pciconfig_read: c_long = 272; -pub const SYS_pciconfig_write: c_long = 273; -pub const SYS_mq_open: c_long = 274; -pub const SYS_mq_unlink: c_long = 275; -pub const SYS_mq_timedsend: c_long = 276; -pub const SYS_mq_timedreceive: c_long = 277; -pub const SYS_mq_notify: c_long = 278; -pub const SYS_mq_getsetattr: c_long = 279; -pub const SYS_waitid: c_long = 280; -pub const SYS_socket: c_long = 281; -pub const SYS_bind: c_long = 282; -pub const SYS_connect: c_long = 283; -pub const SYS_listen: c_long = 284; -pub const SYS_accept: c_long = 285; -pub const SYS_getsockname: c_long = 286; -pub const SYS_getpeername: c_long = 287; -pub const SYS_socketpair: c_long = 288; -pub const SYS_send: c_long = 289; -pub const SYS_sendto: c_long = 290; -pub const SYS_recv: c_long = 291; -pub const SYS_recvfrom: c_long = 292; -pub const SYS_shutdown: c_long = 293; -pub const SYS_setsockopt: c_long = 294; -pub const SYS_getsockopt: c_long = 295; -pub const SYS_sendmsg: c_long = 296; -pub const SYS_recvmsg: c_long = 297; -pub const SYS_semop: c_long = 298; -pub const SYS_semget: c_long = 299; -pub const SYS_semctl: c_long = 300; -pub const SYS_msgsnd: c_long = 301; -pub const SYS_msgrcv: c_long = 302; -pub const SYS_msgget: c_long = 303; -pub const SYS_msgctl: c_long = 304; -pub const SYS_shmat: c_long = 305; -pub const SYS_shmdt: c_long = 306; -pub const SYS_shmget: c_long = 307; -pub const SYS_shmctl: c_long = 308; -pub const SYS_add_key: c_long = 309; -pub const SYS_request_key: c_long = 310; -pub const SYS_keyctl: c_long = 311; -pub const SYS_semtimedop: c_long = 312; -pub const SYS_vserver: c_long = 313; -pub const SYS_ioprio_set: c_long = 314; -pub const SYS_ioprio_get: c_long = 315; -pub const SYS_inotify_init: c_long = 316; -pub const SYS_inotify_add_watch: c_long = 317; -pub const SYS_inotify_rm_watch: c_long = 318; -pub const SYS_mbind: c_long = 319; -pub const SYS_get_mempolicy: c_long = 320; -pub const SYS_set_mempolicy: c_long = 321; -pub const SYS_openat: c_long = 322; -pub const SYS_mkdirat: c_long = 323; -pub const SYS_mknodat: c_long = 324; -pub const SYS_fchownat: c_long = 325; -pub const SYS_futimesat: c_long = 326; -pub const SYS_fstatat64: c_long = 327; -pub const SYS_unlinkat: c_long = 328; -pub const SYS_renameat: c_long = 329; -pub const SYS_linkat: c_long = 330; -pub const SYS_symlinkat: c_long = 331; -pub const SYS_readlinkat: c_long = 332; -pub const SYS_fchmodat: c_long = 333; -pub const SYS_faccessat: c_long = 334; -pub const SYS_pselect6: c_long = 335; -pub const SYS_ppoll: c_long = 336; -pub const SYS_unshare: c_long = 337; -pub const SYS_set_robust_list: c_long = 338; -pub const SYS_get_robust_list: c_long = 339; -pub const SYS_splice: c_long = 340; -pub const SYS_tee: c_long = 342; -pub const SYS_vmsplice: c_long = 343; -pub const SYS_move_pages: c_long = 344; -pub const SYS_getcpu: c_long = 345; -pub const SYS_epoll_pwait: c_long = 346; -pub const SYS_kexec_load: c_long = 347; -pub const SYS_utimensat: c_long = 348; -pub const SYS_signalfd: c_long = 349; -pub const SYS_timerfd_create: c_long = 350; -pub const SYS_eventfd: c_long = 351; -pub const SYS_fallocate: c_long = 352; -pub const SYS_timerfd_settime: c_long = 353; -pub const SYS_timerfd_gettime: c_long = 354; -pub const SYS_signalfd4: c_long = 355; -pub const SYS_eventfd2: c_long = 356; -pub const SYS_epoll_create1: c_long = 357; -pub const SYS_dup3: c_long = 358; -pub const SYS_pipe2: c_long = 359; -pub const SYS_inotify_init1: c_long = 360; -pub const SYS_preadv: c_long = 361; -pub const SYS_pwritev: c_long = 362; -pub const SYS_rt_tgsigqueueinfo: c_long = 363; -pub const SYS_perf_event_open: c_long = 364; -pub const SYS_recvmmsg: c_long = 365; -pub const SYS_accept4: c_long = 366; -pub const SYS_fanotify_init: c_long = 367; -pub const SYS_fanotify_mark: c_long = 368; -pub const SYS_prlimit64: c_long = 369; -pub const SYS_name_to_handle_at: c_long = 370; -pub const SYS_open_by_handle_at: c_long = 371; -pub const SYS_clock_adjtime: c_long = 372; -pub const SYS_syncfs: c_long = 373; -pub const SYS_sendmmsg: c_long = 374; -pub const SYS_setns: c_long = 375; -pub const SYS_process_vm_readv: c_long = 376; -pub const SYS_process_vm_writev: c_long = 377; -pub const SYS_kcmp: c_long = 378; -pub const SYS_finit_module: c_long = 379; -pub const SYS_sched_setattr: c_long = 380; -pub const SYS_sched_getattr: c_long = 381; -pub const SYS_renameat2: c_long = 382; -pub const SYS_seccomp: c_long = 383; -pub const SYS_getrandom: c_long = 384; -pub const SYS_memfd_create: c_long = 385; -pub const SYS_bpf: c_long = 386; -pub const SYS_execveat: c_long = 387; -pub const SYS_userfaultfd: c_long = 388; -pub const SYS_membarrier: c_long = 389; -pub const SYS_mlock2: c_long = 390; -pub const SYS_copy_file_range: c_long = 391; -pub const SYS_preadv2: c_long = 392; -pub const SYS_pwritev2: c_long = 393; -pub const SYS_pkey_mprotect: c_long = 394; -pub const SYS_pkey_alloc: c_long = 395; -pub const SYS_pkey_free: c_long = 396; -pub const SYS_statx: c_long = 397; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; -pub const SYS_mseal: c_long = 462; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b32/hexagon.rs b/vendor/libc/src/unix/linux_like/linux/musl/b32/hexagon.rs deleted file mode 100644 index b6879535541848..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b32/hexagon.rs +++ /dev/null @@ -1,621 +0,0 @@ -use crate::prelude::*; - -pub type wchar_t = u32; -pub type stat64 = crate::stat; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: c_ulonglong, - pub st_mode: c_uint, - pub st_nlink: c_uint, - pub st_uid: c_uint, - pub st_gid: c_uint, - pub st_rdev: c_ulonglong, - __st_rdev_padding: c_ulong, - pub st_size: c_longlong, - pub st_blksize: crate::blksize_t, - __st_blksize_padding: c_int, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - - __unused: [c_int; 2], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct ipc_perm { - #[cfg(musl_v1_2_3)] - pub __key: crate::key_t, - #[cfg(not(musl_v1_2_3))] - #[deprecated( - since = "0.2.173", - note = "This field is incorrectly named and will be changed - to __key in a future release" - )] - pub __ipc_perm_key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - pub __seq: c_ushort, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - __unused1: c_int, - pub shm_dtime: crate::time_t, - __unused2: c_int, - pub shm_ctime: crate::time_t, - __unused3: c_int, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: c_ulong, - __pad1: c_ulong, - __pad2: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - __unused1: c_int, - pub msg_rtime: crate::time_t, - __unused2: c_int, - pub msg_ctime: crate::time_t, - __unused3: c_int, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __pad1: c_ulong, - __pad2: c_ulong, - } -} - -pub const AF_FILE: c_int = 1; -pub const AF_KCM: c_int = 41; -pub const AF_MAX: c_int = 43; -pub const AF_QIPCRTR: c_int = 42; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const EAFNOSUPPORT: c_int = 97; -pub const EALREADY: c_int = 114; -pub const EBADE: c_int = 52; -pub const EBADMSG: c_int = 74; -pub const EBADR: c_int = 53; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const ECANCELED: c_int = 125; -pub const ECHRNG: c_int = 44; -pub const ECONNABORTED: c_int = 103; -pub const ECONNREFUSED: c_int = 111; -pub const ECONNRESET: c_int = 104; -pub const EDEADLK: c_int = 35; -pub const EDEADLOCK: c_int = 35; -pub const EDESTADDRREQ: c_int = 89; -pub const EDQUOT: c_int = 122; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EHWPOISON: c_int = 133; -pub const EIDRM: c_int = 43; -pub const EILSEQ: c_int = 84; -pub const EINPROGRESS: c_int = 115; -pub const EISCONN: c_int = 106; -pub const EISNAM: c_int = 120; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREJECTED: c_int = 129; -pub const EKEYREVOKED: c_int = 128; -pub const EL2HLT: c_int = 51; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBEXEC: c_int = 83; -pub const ELIBMAX: c_int = 82; -pub const ELIBSCN: c_int = 81; -pub const ELNRNG: c_int = 48; -pub const ELOOP: c_int = 40; -pub const EMEDIUMTYPE: c_int = 124; -pub const EMSGSIZE: c_int = 90; -pub const EMULTIHOP: c_int = 72; -pub const ENAMETOOLONG: c_int = 36; -pub const ENAVAIL: c_int = 119; -pub const ENETDOWN: c_int = 100; -pub const ENETRESET: c_int = 102; -pub const ENETUNREACH: c_int = 101; -pub const ENOANO: c_int = 55; -pub const ENOBUFS: c_int = 105; -pub const ENOCSI: c_int = 50; -pub const ENOKEY: c_int = 126; -pub const ENOLCK: c_int = 37; -pub const ENOMEDIUM: c_int = 123; -pub const ENOMSG: c_int = 42; -pub const ENOPROTOOPT: c_int = 92; -pub const ENOSYS: c_int = 38; -pub const ENOTCONN: c_int = 107; -pub const ENOTEMPTY: c_int = 39; -pub const ENOTNAM: c_int = 118; -pub const ENOTRECOVERABLE: c_int = 131; -pub const ENOTSOCK: c_int = 88; -pub const ENOTSUP: c_int = 95; -pub const ENOTUNIQ: c_int = 76; -pub const EOPNOTSUPP: c_int = 95; -pub const EOVERFLOW: c_int = 75; -pub const EOWNERDEAD: c_int = 130; -pub const EPFNOSUPPORT: c_int = 96; -pub const EREMCHG: c_int = 78; -pub const ERESTART: c_int = 85; -pub const ERFKILL: c_int = 132; -pub const ESHUTDOWN: c_int = 108; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const ESTALE: c_int = 116; -pub const ESTRPIPE: c_int = 86; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const EUCLEAN: c_int = 117; -pub const EUNATCH: c_int = 49; -pub const EUSERS: c_int = 87; -pub const EXFULL: c_int = 54; -pub const EXTPROC: c_int = 65536; -pub const F_EXLCK: c_int = 4; -pub const F_GETLK: c_int = 12; -pub const F_GETOWN: c_int = 9; -pub const F_GETOWNER_UIDS: c_int = 17; -pub const F_GETOWN_EX: c_int = 16; -pub const F_GETSIG: c_int = 11; -pub const F_LINUX_SPECIFIC_BASE: c_int = 1024; -pub const FLUSHO: c_int = 4096; -pub const F_OWNER_PGRP: c_int = 2; -pub const F_OWNER_PID: c_int = 1; -pub const F_OWNER_TID: c_int = 0; -pub const F_SETLK: c_int = 13; -pub const F_SETLKW: c_int = 14; -pub const F_SETOWN: c_int = 8; -pub const F_SETOWN_EX: c_int = 15; -pub const F_SETSIG: c_int = 10; -pub const F_SHLCK: c_int = 8; -pub const IEXTEN: c_int = 32768; -pub const MAP_ANON: c_int = 32; -pub const MAP_DENYWRITE: c_int = 2048; -pub const MAP_EXECUTABLE: c_int = 4096; -pub const MAP_GROWSDOWN: c_int = 256; -pub const MAP_HUGETLB: c_int = 262144; -pub const MAP_LOCKED: c_int = 8192; -pub const MAP_NONBLOCK: c_int = 65536; -pub const MAP_NORESERVE: c_int = 16384; -pub const MAP_POPULATE: c_int = 32768; -pub const MAP_STACK: c_int = 131072; -pub const MAP_UNINITIALIZED: c_int = 0; -pub const O_APPEND: c_int = 1024; -pub const O_ASYNC: c_int = 8192; -pub const O_CREAT: c_int = 64; -pub const O_DIRECT: c_int = 16384; -pub const O_DIRECTORY: c_int = 65536; -pub const O_DSYNC: c_int = 4096; -pub const O_EXCL: c_int = 128; -pub const O_LARGEFILE: c_int = 32768; -pub const O_NOCTTY: c_int = 256; -pub const O_NOFOLLOW: c_int = 131072; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const PF_FILE: c_int = 1; -pub const PF_KCM: c_int = 41; -pub const PF_MAX: c_int = 43; -pub const PF_QIPCRTR: c_int = 42; -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; -pub const SIGBUS: c_int = 7; -pub const SIGCHLD: c_int = 17; -pub const SIGCONT: c_int = 18; -pub const SIGIO: c_int = 29; -pub const SIGPOLL: c_int = 29; -pub const SIGPROF: c_int = 27; -pub const SIGPWR: c_int = 30; -pub const SIGSTKFLT: c_int = 16; -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; -pub const SIGSTOP: c_int = 19; -pub const SIGSYS: c_int = 31; -pub const SIGTSTP: c_int = 20; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGURG: c_int = 23; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGVTALRM: c_int = 26; -pub const SIGWINCH: c_int = 28; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIG_SETMASK: c_int = 2; // FIXME(musl) check these -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; -pub const SOCK_DGRAM: c_int = 2; -pub const SOCK_STREAM: c_int = 1; -pub const SOL_CAIF: c_int = 278; -pub const SOL_IUCV: c_int = 277; -pub const SOL_KCM: c_int = 281; -pub const SOL_NFC: c_int = 280; -pub const SOL_PNPIPE: c_int = 275; -pub const SOL_PPPOL2TP: c_int = 273; -pub const SOL_RDS: c_int = 276; -pub const SOL_RXRPC: c_int = 272; - -pub const SYS3264_fadvise64: c_int = 223; -pub const SYS3264_fcntl: c_int = 25; -pub const SYS3264_fstatat: c_int = 79; -pub const SYS3264_fstat: c_int = 80; -pub const SYS3264_fstatfs: c_int = 44; -pub const SYS3264_ftruncate: c_int = 46; -pub const SYS3264_lseek: c_int = 62; -pub const SYS3264_lstat: c_int = 1039; -pub const SYS3264_mmap: c_int = 222; -pub const SYS3264_sendfile: c_int = 71; -pub const SYS3264_stat: c_int = 1038; -pub const SYS3264_statfs: c_int = 43; -pub const SYS3264_truncate: c_int = 45; -pub const SYS_accept4: c_int = 242; -pub const SYS_accept: c_int = 202; -pub const SYS_access: c_int = 1033; -pub const SYS_acct: c_int = 89; -pub const SYS_add_key: c_int = 217; -pub const SYS_adjtimex: c_int = 171; -pub const SYS_alarm: c_int = 1059; -pub const SYS_arch_specific_syscall: c_int = 244; -pub const SYS_bdflush: c_int = 1075; -pub const SYS_bind: c_int = 200; -pub const SYS_bpf: c_int = 280; -pub const SYS_brk: c_int = 214; -pub const SYS_capget: c_int = 90; -pub const SYS_capset: c_int = 91; -pub const SYS_chdir: c_int = 49; -pub const SYS_chmod: c_int = 1028; -pub const SYS_chown: c_int = 1029; -pub const SYS_chroot: c_int = 51; -pub const SYS_clock_adjtime: c_int = 266; -pub const SYS_clock_getres: c_int = 114; -pub const SYS_clock_gettime: c_int = 113; -pub const SYS_clock_nanosleep: c_int = 115; -pub const SYS_clock_settime: c_int = 112; -pub const SYS_clone: c_int = 220; -pub const SYS_close: c_int = 57; -pub const SYS_connect: c_int = 203; -pub const SYS_copy_file_range: c_int = -1; // FIXME(hexagon) -pub const SYS_creat: c_int = 1064; -pub const SYS_delete_module: c_int = 106; -pub const SYS_dup2: c_int = 1041; -pub const SYS_dup3: c_int = 24; -pub const SYS_dup: c_int = 23; -pub const SYS_epoll_create1: c_int = 20; -pub const SYS_epoll_create: c_int = 1042; -pub const SYS_epoll_ctl: c_int = 21; -pub const SYS_epoll_pwait: c_int = 22; -pub const SYS_epoll_wait: c_int = 1069; -pub const SYS_eventfd2: c_int = 19; -pub const SYS_eventfd: c_int = 1044; -pub const SYS_execveat: c_int = 281; -pub const SYS_execve: c_int = 221; -pub const SYS_exit: c_int = 93; -pub const SYS_exit_group: c_int = 94; -pub const SYS_faccessat: c_int = 48; -pub const SYS_fadvise64_64: c_int = 223; -pub const SYS_fallocate: c_int = 47; -pub const SYS_fanotify_init: c_int = 262; -pub const SYS_fanotify_mark: c_int = 263; -pub const SYS_fchdir: c_int = 50; -pub const SYS_fchmodat: c_int = 53; -pub const SYS_fchmod: c_int = 52; -pub const SYS_fchownat: c_int = 54; -pub const SYS_fchown: c_int = 55; -pub const SYS_fcntl64: c_int = 25; -pub const SYS_fcntl: c_int = 25; -pub const SYS_fdatasync: c_int = 83; -pub const SYS_fgetxattr: c_int = 10; -pub const SYS_finit_module: c_int = 273; -pub const SYS_flistxattr: c_int = 13; -pub const SYS_flock: c_int = 32; -pub const SYS_fork: c_int = 1079; -pub const SYS_fremovexattr: c_int = 16; -pub const SYS_fsetxattr: c_int = 7; -pub const SYS_fstat64: c_int = 80; -pub const SYS_fstatat64: c_int = 79; -pub const SYS_fstatfs64: c_int = 44; -pub const SYS_fstatfs: c_int = 44; -pub const SYS_fsync: c_int = 82; -pub const SYS_ftruncate64: c_int = 46; -pub const SYS_ftruncate: c_int = 46; -pub const SYS_futex: c_int = 98; -pub const SYS_futimesat: c_int = 1066; -pub const SYS_getcpu: c_int = 168; -pub const SYS_getcwd: c_int = 17; -pub const SYS_getdents64: c_int = 61; -pub const SYS_getdents: c_int = 1065; -pub const SYS_getegid: c_int = 177; -pub const SYS_geteuid: c_int = 175; -pub const SYS_getgid: c_int = 176; -pub const SYS_getgroups: c_int = 158; -pub const SYS_getitimer: c_int = 102; -pub const SYS_get_mempolicy: c_int = 236; -pub const SYS_getpeername: c_int = 205; -pub const SYS_getpgid: c_int = 155; -pub const SYS_getpgrp: c_int = 1060; -pub const SYS_getpid: c_int = 172; -pub const SYS_getppid: c_int = 173; -pub const SYS_getpriority: c_int = 141; -pub const SYS_getrandom: c_int = 278; -pub const SYS_getresgid: c_int = 150; -pub const SYS_getresuid: c_int = 148; -pub const SYS_getrlimit: c_int = 163; -pub const SYS_get_robust_list: c_int = 100; -pub const SYS_getrusage: c_int = 165; -pub const SYS_getsid: c_int = 156; -pub const SYS_getsockname: c_int = 204; -pub const SYS_getsockopt: c_int = 209; -pub const SYS_gettid: c_int = 178; -pub const SYS_gettimeofday: c_int = 169; -pub const SYS_getuid: c_int = 174; -pub const SYS_getxattr: c_int = 8; -pub const SYS_init_module: c_int = 105; -pub const SYS_inotify_add_watch: c_int = 27; -pub const SYS_inotify_init1: c_int = 26; -pub const SYS_inotify_init: c_int = 1043; -pub const SYS_inotify_rm_watch: c_int = 28; -pub const SYS_io_cancel: c_int = 3; -pub const SYS_ioctl: c_int = 29; -pub const SYS_io_destroy: c_int = 1; -pub const SYS_io_getevents: c_int = 4; -pub const SYS_ioprio_get: c_int = 31; -pub const SYS_ioprio_set: c_int = 30; -pub const SYS_io_setup: c_int = 0; -pub const SYS_io_submit: c_int = 2; -pub const SYS_kcmp: c_int = 272; -pub const SYS_kexec_load: c_int = 104; -pub const SYS_keyctl: c_int = 219; -pub const SYS_kill: c_int = 129; -pub const SYS_lchown: c_int = 1032; -pub const SYS_lgetxattr: c_int = 9; -pub const SYS_linkat: c_int = 37; -pub const SYS_link: c_int = 1025; -pub const SYS_listen: c_int = 201; -pub const SYS_listxattr: c_int = 11; -pub const SYS_llistxattr: c_int = 12; -pub const SYS__llseek: c_int = 62; -pub const SYS_lookup_dcookie: c_int = 18; -pub const SYS_lremovexattr: c_int = 15; -pub const SYS_lseek: c_int = 62; -pub const SYS_lsetxattr: c_int = 6; -pub const SYS_lstat64: c_int = 1039; -pub const SYS_lstat: c_int = 1039; -pub const SYS_madvise: c_int = 233; -pub const SYS_mbind: c_int = 235; -pub const SYS_memfd_create: c_int = 279; -pub const SYS_migrate_pages: c_int = 238; -pub const SYS_mincore: c_int = 232; -pub const SYS_mkdirat: c_int = 34; -pub const SYS_mkdir: c_int = 1030; -pub const SYS_mknodat: c_int = 33; -pub const SYS_mknod: c_int = 1027; -pub const SYS_mlockall: c_int = 230; -pub const SYS_mlock: c_int = 228; -pub const SYS_mmap2: c_int = 222; -pub const SYS_mount: c_int = 40; -pub const SYS_move_pages: c_int = 239; -pub const SYS_mprotect: c_int = 226; -pub const SYS_mq_getsetattr: c_int = 185; -pub const SYS_mq_notify: c_int = 184; -pub const SYS_mq_open: c_int = 180; -pub const SYS_mq_timedreceive: c_int = 183; -pub const SYS_mq_timedsend: c_int = 182; -pub const SYS_mq_unlink: c_int = 181; -pub const SYS_mremap: c_int = 216; -pub const SYS_msgctl: c_int = 187; -pub const SYS_msgget: c_int = 186; -pub const SYS_msgrcv: c_int = 188; -pub const SYS_msgsnd: c_int = 189; -pub const SYS_msync: c_int = 227; -pub const SYS_munlockall: c_int = 231; -pub const SYS_munlock: c_int = 229; -pub const SYS_munmap: c_int = 215; -pub const SYS_name_to_handle_at: c_int = 264; -pub const SYS_nanosleep: c_int = 101; -pub const SYS_newfstatat: c_int = 79; -pub const SYS_nfsservctl: c_int = 42; -pub const SYS_oldwait4: c_int = 1072; -pub const SYS_openat: c_int = 56; -pub const SYS_open_by_handle_at: c_int = 265; -pub const SYS_open: c_int = 1024; -pub const SYS_pause: c_int = 1061; -pub const SYS_perf_event_open: c_int = 241; -pub const SYS_personality: c_int = 92; -pub const SYS_pipe2: c_int = 59; -pub const SYS_pipe: c_int = 1040; -pub const SYS_pivot_root: c_int = 41; -pub const SYS_poll: c_int = 1068; -pub const SYS_ppoll: c_int = 73; -pub const SYS_prctl: c_int = 167; -pub const SYS_pread64: c_int = 67; -pub const SYS_preadv: c_int = 69; -pub const SYS_prlimit64: c_int = 261; -pub const SYS_process_vm_readv: c_int = 270; -pub const SYS_process_vm_writev: c_int = 271; -pub const SYS_pselect6: c_int = 72; -pub const SYS_ptrace: c_int = 117; -pub const SYS_pwrite64: c_int = 68; -pub const SYS_pwritev: c_int = 70; -pub const SYS_quotactl: c_int = 60; -pub const SYS_readahead: c_int = 213; -pub const SYS_read: c_int = 63; -pub const SYS_readlinkat: c_int = 78; -pub const SYS_readlink: c_int = 1035; -pub const SYS_readv: c_int = 65; -pub const SYS_reboot: c_int = 142; -pub const SYS_recv: c_int = 1073; -pub const SYS_recvfrom: c_int = 207; -pub const SYS_recvmmsg: c_int = 243; -pub const SYS_recvmsg: c_int = 212; -pub const SYS_remap_file_pages: c_int = 234; -pub const SYS_removexattr: c_int = 14; -pub const SYS_renameat2: c_int = 276; -pub const SYS_renameat: c_int = 38; -pub const SYS_rename: c_int = 1034; -pub const SYS_request_key: c_int = 218; -pub const SYS_restart_syscall: c_int = 128; -pub const SYS_rmdir: c_int = 1031; -pub const SYS_rt_sigaction: c_int = 134; -pub const SYS_rt_sigpending: c_int = 136; -pub const SYS_rt_sigprocmask: c_int = 135; -pub const SYS_rt_sigqueueinfo: c_int = 138; -pub const SYS_rt_sigreturn: c_int = 139; -pub const SYS_rt_sigsuspend: c_int = 133; -pub const SYS_rt_sigtimedwait: c_int = 137; -pub const SYS_rt_tgsigqueueinfo: c_int = 240; -pub const SYS_sched_getaffinity: c_int = 123; -pub const SYS_sched_getattr: c_int = 275; -pub const SYS_sched_getparam: c_int = 121; -pub const SYS_sched_get_priority_max: c_int = 125; -pub const SYS_sched_get_priority_min: c_int = 126; -pub const SYS_sched_getscheduler: c_int = 120; -pub const SYS_sched_rr_get_interval: c_int = 127; -pub const SYS_sched_setaffinity: c_int = 122; -pub const SYS_sched_setattr: c_int = 274; -pub const SYS_sched_setparam: c_int = 118; -pub const SYS_sched_setscheduler: c_int = 119; -pub const SYS_sched_yield: c_int = 124; -pub const SYS_seccomp: c_int = 277; -pub const SYS_select: c_int = 1067; -pub const SYS_semctl: c_int = 191; -pub const SYS_semget: c_int = 190; -pub const SYS_semop: c_int = 193; -pub const SYS_semtimedop: c_int = 192; -pub const SYS_send: c_int = 1074; -pub const SYS_sendfile64: c_int = 71; -pub const SYS_sendfile: c_int = 71; -pub const SYS_sendmmsg: c_int = 269; -pub const SYS_sendmsg: c_int = 211; -pub const SYS_sendto: c_int = 206; -pub const SYS_setdomainname: c_int = 162; -pub const SYS_setfsgid: c_int = 152; -pub const SYS_setfsuid: c_int = 151; -pub const SYS_setgid: c_int = 144; -pub const SYS_setgroups: c_int = 159; -pub const SYS_sethostname: c_int = 161; -pub const SYS_setitimer: c_int = 103; -pub const SYS_set_mempolicy: c_int = 237; -pub const SYS_setns: c_int = 268; -pub const SYS_setpgid: c_int = 154; -pub const SYS_setpriority: c_int = 140; -pub const SYS_setregid: c_int = 143; -pub const SYS_setresgid: c_int = 149; -pub const SYS_setresuid: c_int = 147; -pub const SYS_setreuid: c_int = 145; -pub const SYS_setrlimit: c_int = 164; -pub const SYS_set_robust_list: c_int = 99; -pub const SYS_setsid: c_int = 157; -pub const SYS_setsockopt: c_int = 208; -pub const SYS_set_tid_address: c_int = 96; -pub const SYS_settimeofday: c_int = 170; -pub const SYS_setuid: c_int = 146; -pub const SYS_setxattr: c_int = 5; -pub const SYS_shmat: c_int = 196; -pub const SYS_shmctl: c_int = 195; -pub const SYS_shmdt: c_int = 197; -pub const SYS_shmget: c_int = 194; -pub const SYS_shutdown: c_int = 210; -pub const SYS_sigaltstack: c_int = 132; -pub const SYS_signalfd4: c_int = 74; -pub const SYS_signalfd: c_int = 1045; -pub const SYS_socket: c_int = 198; -pub const SYS_socketpair: c_int = 199; -pub const SYS_splice: c_int = 76; -pub const SYS_stat64: c_int = 1038; -pub const SYS_stat: c_int = 1038; -pub const SYS_statfs64: c_int = 43; -pub const SYS_swapoff: c_int = 225; -pub const SYS_swapon: c_int = 224; -pub const SYS_symlinkat: c_int = 36; -pub const SYS_symlink: c_int = 1036; -pub const SYS_sync: c_int = 81; -pub const SYS_sync_file_range2: c_int = 84; -pub const SYS_sync_file_range: c_int = 84; -pub const SYS_syncfs: c_int = 267; -pub const SYS_syscalls: c_int = 1080; -pub const SYS__sysctl: c_int = 1078; -pub const SYS_sysinfo: c_int = 179; -pub const SYS_syslog: c_int = 116; -pub const SYS_tee: c_int = 77; -pub const SYS_tgkill: c_int = 131; -pub const SYS_time: c_int = 1062; -pub const SYS_timer_create: c_int = 107; -pub const SYS_timer_delete: c_int = 111; -pub const SYS_timerfd_create: c_int = 85; -pub const SYS_timerfd_gettime: c_int = 87; -pub const SYS_timerfd_settime: c_int = 86; -pub const SYS_timer_getoverrun: c_int = 109; -pub const SYS_timer_gettime: c_int = 108; -pub const SYS_timer_settime: c_int = 110; -pub const SYS_times: c_int = 153; -pub const SYS_tkill: c_int = 130; -pub const SYS_truncate64: c_int = 45; -pub const SYS_truncate: c_int = 45; -pub const SYS_umask: c_int = 166; -pub const SYS_umount2: c_int = 39; -pub const SYS_umount: c_int = 1076; -pub const SYS_uname: c_int = 160; -pub const SYS_unlinkat: c_int = 35; -pub const SYS_unlink: c_int = 1026; -pub const SYS_unshare: c_int = 97; -pub const SYS_uselib: c_int = 1077; -pub const SYS_ustat: c_int = 1070; -pub const SYS_utime: c_int = 1063; -pub const SYS_utimensat: c_int = 88; -pub const SYS_utimes: c_int = 1037; -pub const SYS_vfork: c_int = 1071; -pub const SYS_vhangup: c_int = 58; -pub const SYS_vmsplice: c_int = 75; -pub const SYS_wait4: c_int = 260; -pub const SYS_waitid: c_int = 95; -pub const SYS_write: c_int = 64; -pub const SYS_writev: c_int = 66; -pub const SYS_statx: c_int = 291; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const TIOCM_LOOP: c_int = 32768; -pub const TIOCM_OUT1: c_int = 8192; -pub const TIOCM_OUT2: c_int = 16384; -pub const TIOCSER_TEMT: c_int = 1; -pub const TOSTOP: c_int = 256; -pub const VEOF: c_int = 4; -pub const VEOL2: c_int = 16; -pub const VEOL: c_int = 11; -pub const VMIN: c_int = 6; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b32/mips/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b32/mips/mod.rs deleted file mode 100644 index a623ff9a9f7574..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b32/mips/mod.rs +++ /dev/null @@ -1,775 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -pub type wchar_t = c_int; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - __st_padding1: [c_long; 2], - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __st_padding2: [c_long; 2], - pub st_size: off_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: crate::blksize_t, - __st_padding3: c_long, - pub st_blocks: crate::blkcnt_t, - __st_padding4: [c_long; 14], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - __st_padding1: [c_long; 2], - pub st_ino: crate::ino64_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __st_padding2: [c_long; 2], - pub st_size: off_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: crate::blksize_t, - __st_padding3: c_long, - pub st_blocks: crate::blkcnt64_t, - __st_padding4: [c_long; 14], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } - - pub struct ipc_perm { - #[cfg(musl_v1_2_3)] - pub __key: crate::key_t, - #[cfg(not(musl_v1_2_3))] - #[deprecated( - since = "0.2.173", - note = "This field is incorrectly named and will be changed - to __key in a future release." - )] - pub __ipc_perm_key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - pub __seq: c_int, - __unused1: c_long, - __unused2: c_long, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: c_ulong, - __pad1: c_ulong, - __pad2: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - #[cfg(target_endian = "big")] - __unused1: c_int, - pub msg_stime: crate::time_t, - #[cfg(target_endian = "little")] - __unused1: c_int, - #[cfg(target_endian = "big")] - __unused2: c_int, - pub msg_rtime: crate::time_t, - #[cfg(target_endian = "little")] - __unused2: c_int, - #[cfg(target_endian = "big")] - __unused3: c_int, - pub msg_ctime: crate::time_t, - #[cfg(target_endian = "little")] - __unused3: c_int, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __pad1: c_ulong, - __pad2: c_ulong, - } - - pub struct statfs { - pub f_type: c_ulong, - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_ulong, - pub f_flags: c_ulong, - pub f_spare: [c_ulong; 5], - } - - pub struct statfs64 { - pub f_type: c_ulong, - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_ulong, - pub f_flags: c_ulong, - pub f_spare: [c_ulong; 5], - } -} - -s_no_extra_traits! { - #[repr(align(8))] - pub struct max_align_t { - priv_: [f32; 4], - } -} - -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; - -pub const O_DIRECT: c_int = 0o100000; -pub const O_DIRECTORY: c_int = 0o200000; -pub const O_NOFOLLOW: c_int = 0o400000; -pub const O_ASYNC: c_int = 0o10000; -pub const O_LARGEFILE: c_int = 0x2000; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: c_int = 0x00000800; -pub const TAB2: c_int = 0x00001000; -pub const TAB3: c_int = 0x00001800; -pub const CR1: c_int = 0x00000200; -pub const CR2: c_int = 0x00000400; -pub const CR3: c_int = 0x00000600; -pub const FF1: c_int = 0x00008000; -pub const BS1: c_int = 0x00002000; -pub const VT1: c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const O_APPEND: c_int = 0o010; -pub const O_CREAT: c_int = 0o400; -pub const O_EXCL: c_int = 0o2000; -pub const O_NOCTTY: c_int = 0o4000; -pub const O_NONBLOCK: c_int = 0o200; -pub const O_SYNC: c_int = 0o40020; -pub const O_RSYNC: c_int = 0o40020; -pub const O_DSYNC: c_int = 0o020; - -pub const MAP_ANON: c_int = 0x800; -pub const MAP_GROWSDOWN: c_int = 0x1000; -pub const MAP_DENYWRITE: c_int = 0x2000; -pub const MAP_EXECUTABLE: c_int = 0x4000; -pub const MAP_LOCKED: c_int = 0x8000; -pub const MAP_NORESERVE: c_int = 0x0400; -pub const MAP_POPULATE: c_int = 0x10000; -pub const MAP_NONBLOCK: c_int = 0x20000; -pub const MAP_STACK: c_int = 0x40000; -pub const MAP_HUGETLB: c_int = 0x80000; - -pub const EDEADLK: c_int = 45; -pub const ENAMETOOLONG: c_int = 78; -pub const ENOLCK: c_int = 46; -pub const ENOSYS: c_int = 89; -pub const ENOTEMPTY: c_int = 93; -pub const ELOOP: c_int = 90; -pub const ENOMSG: c_int = 35; -pub const EIDRM: c_int = 36; -pub const ECHRNG: c_int = 37; -pub const EL2NSYNC: c_int = 38; -pub const EL3HLT: c_int = 39; -pub const EL3RST: c_int = 40; -pub const ELNRNG: c_int = 41; -pub const EUNATCH: c_int = 42; -pub const ENOCSI: c_int = 43; -pub const EL2HLT: c_int = 44; -pub const EBADE: c_int = 50; -pub const EBADR: c_int = 51; -pub const EXFULL: c_int = 52; -pub const ENOANO: c_int = 53; -pub const EBADRQC: c_int = 54; -pub const EBADSLT: c_int = 55; -pub const EDEADLOCK: c_int = 56; -pub const EMULTIHOP: c_int = 74; -pub const EOVERFLOW: c_int = 79; -pub const ENOTUNIQ: c_int = 80; -pub const EBADFD: c_int = 81; -pub const EBADMSG: c_int = 77; -pub const EREMCHG: c_int = 82; -pub const ELIBACC: c_int = 83; -pub const ELIBBAD: c_int = 84; -pub const ELIBSCN: c_int = 85; -pub const ELIBMAX: c_int = 86; -pub const ELIBEXEC: c_int = 87; -pub const EILSEQ: c_int = 88; -pub const ERESTART: c_int = 91; -pub const ESTRPIPE: c_int = 92; -pub const EUSERS: c_int = 94; -pub const ENOTSOCK: c_int = 95; -pub const EDESTADDRREQ: c_int = 96; -pub const EMSGSIZE: c_int = 97; -pub const EPROTOTYPE: c_int = 98; -pub const ENOPROTOOPT: c_int = 99; -pub const EPROTONOSUPPORT: c_int = 120; -pub const ESOCKTNOSUPPORT: c_int = 121; -pub const EOPNOTSUPP: c_int = 122; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 123; -pub const EAFNOSUPPORT: c_int = 124; -pub const EADDRINUSE: c_int = 125; -pub const EADDRNOTAVAIL: c_int = 126; -pub const ENETDOWN: c_int = 127; -pub const ENETUNREACH: c_int = 128; -pub const ENETRESET: c_int = 129; -pub const ECONNABORTED: c_int = 130; -pub const ECONNRESET: c_int = 131; -pub const ENOBUFS: c_int = 132; -pub const EISCONN: c_int = 133; -pub const ENOTCONN: c_int = 134; -pub const ESHUTDOWN: c_int = 143; -pub const ETOOMANYREFS: c_int = 144; -pub const ETIMEDOUT: c_int = 145; -pub const ECONNREFUSED: c_int = 146; -pub const EHOSTDOWN: c_int = 147; -pub const EHOSTUNREACH: c_int = 148; -pub const EALREADY: c_int = 149; -pub const EINPROGRESS: c_int = 150; -pub const ESTALE: c_int = 151; -pub const EUCLEAN: c_int = 135; -pub const ENOTNAM: c_int = 137; -pub const ENAVAIL: c_int = 138; -pub const EISNAM: c_int = 139; -pub const EREMOTEIO: c_int = 140; -pub const EDQUOT: c_int = 1133; -pub const ENOMEDIUM: c_int = 159; -pub const EMEDIUMTYPE: c_int = 160; -pub const ECANCELED: c_int = 158; -pub const ENOKEY: c_int = 161; -pub const EKEYEXPIRED: c_int = 162; -pub const EKEYREVOKED: c_int = 163; -pub const EKEYREJECTED: c_int = 164; -pub const EOWNERDEAD: c_int = 165; -pub const ENOTRECOVERABLE: c_int = 166; -pub const EHWPOISON: c_int = 168; -pub const ERFKILL: c_int = 167; - -pub const SOCK_STREAM: c_int = 2; -pub const SOCK_DGRAM: c_int = 1; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 8; -pub const SA_NOCLDWAIT: c_int = 0x10000; - -pub const SIGEMT: c_int = 7; -pub const SIGCHLD: c_int = 18; -pub const SIGBUS: c_int = 10; -pub const SIGTTIN: c_int = 26; -pub const SIGTTOU: c_int = 27; -pub const SIGXCPU: c_int = 30; -pub const SIGXFSZ: c_int = 31; -pub const SIGVTALRM: c_int = 28; -pub const SIGPROF: c_int = 29; -pub const SIGWINCH: c_int = 20; -pub const SIGUSR1: c_int = 16; -pub const SIGUSR2: c_int = 17; -pub const SIGCONT: c_int = 25; -pub const SIGSTOP: c_int = 23; -pub const SIGTSTP: c_int = 24; -pub const SIGURG: c_int = 21; -pub const SIGIO: c_int = 22; -pub const SIGSYS: c_int = 12; -pub const SIGSTKFLT: c_int = 7; -pub const SIGPOLL: c_int = crate::SIGIO; -pub const SIGPWR: c_int = 19; -pub const SIG_SETMASK: c_int = 3; -pub const SIG_BLOCK: c_int = 1; -pub const SIG_UNBLOCK: c_int = 2; - -pub const EXTPROC: crate::tcflag_t = 0o200000; - -pub const F_GETLK: c_int = 33; -pub const F_GETOWN: c_int = 23; -pub const F_SETLK: c_int = 34; -pub const F_SETLKW: c_int = 35; -pub const F_SETOWN: c_int = 24; - -pub const VEOF: usize = 16; -pub const VEOL: usize = 17; -pub const VEOL2: usize = 6; -pub const VMIN: usize = 4; -pub const IEXTEN: crate::tcflag_t = 0o000400; -pub const TOSTOP: crate::tcflag_t = 0o100000; -pub const FLUSHO: crate::tcflag_t = 0o020000; - -pub const POLLWRNORM: c_short = 0x4; -pub const POLLWRBAND: c_short = 0x100; - -pub const SYS_syscall: c_long = 4000 + 0; -pub const SYS_exit: c_long = 4000 + 1; -pub const SYS_fork: c_long = 4000 + 2; -pub const SYS_read: c_long = 4000 + 3; -pub const SYS_write: c_long = 4000 + 4; -pub const SYS_open: c_long = 4000 + 5; -pub const SYS_close: c_long = 4000 + 6; -pub const SYS_waitpid: c_long = 4000 + 7; -pub const SYS_creat: c_long = 4000 + 8; -pub const SYS_link: c_long = 4000 + 9; -pub const SYS_unlink: c_long = 4000 + 10; -pub const SYS_execve: c_long = 4000 + 11; -pub const SYS_chdir: c_long = 4000 + 12; -pub const SYS_time: c_long = 4000 + 13; -pub const SYS_mknod: c_long = 4000 + 14; -pub const SYS_chmod: c_long = 4000 + 15; -pub const SYS_lchown: c_long = 4000 + 16; -pub const SYS_break: c_long = 4000 + 17; -pub const SYS_lseek: c_long = 4000 + 19; -pub const SYS_getpid: c_long = 4000 + 20; -pub const SYS_mount: c_long = 4000 + 21; -pub const SYS_umount: c_long = 4000 + 22; -pub const SYS_setuid: c_long = 4000 + 23; -pub const SYS_getuid: c_long = 4000 + 24; -pub const SYS_stime: c_long = 4000 + 25; -pub const SYS_ptrace: c_long = 4000 + 26; -pub const SYS_alarm: c_long = 4000 + 27; -pub const SYS_pause: c_long = 4000 + 29; -pub const SYS_utime: c_long = 4000 + 30; -pub const SYS_stty: c_long = 4000 + 31; -pub const SYS_gtty: c_long = 4000 + 32; -pub const SYS_access: c_long = 4000 + 33; -pub const SYS_nice: c_long = 4000 + 34; -pub const SYS_ftime: c_long = 4000 + 35; -pub const SYS_sync: c_long = 4000 + 36; -pub const SYS_kill: c_long = 4000 + 37; -pub const SYS_rename: c_long = 4000 + 38; -pub const SYS_mkdir: c_long = 4000 + 39; -pub const SYS_rmdir: c_long = 4000 + 40; -pub const SYS_dup: c_long = 4000 + 41; -pub const SYS_pipe: c_long = 4000 + 42; -pub const SYS_times: c_long = 4000 + 43; -pub const SYS_prof: c_long = 4000 + 44; -pub const SYS_brk: c_long = 4000 + 45; -pub const SYS_setgid: c_long = 4000 + 46; -pub const SYS_getgid: c_long = 4000 + 47; -pub const SYS_signal: c_long = 4000 + 48; -pub const SYS_geteuid: c_long = 4000 + 49; -pub const SYS_getegid: c_long = 4000 + 50; -pub const SYS_acct: c_long = 4000 + 51; -pub const SYS_umount2: c_long = 4000 + 52; -pub const SYS_lock: c_long = 4000 + 53; -pub const SYS_ioctl: c_long = 4000 + 54; -pub const SYS_fcntl: c_long = 4000 + 55; -pub const SYS_mpx: c_long = 4000 + 56; -pub const SYS_setpgid: c_long = 4000 + 57; -pub const SYS_ulimit: c_long = 4000 + 58; -pub const SYS_umask: c_long = 4000 + 60; -pub const SYS_chroot: c_long = 4000 + 61; -pub const SYS_ustat: c_long = 4000 + 62; -pub const SYS_dup2: c_long = 4000 + 63; -pub const SYS_getppid: c_long = 4000 + 64; -pub const SYS_getpgrp: c_long = 4000 + 65; -pub const SYS_setsid: c_long = 4000 + 66; -pub const SYS_sigaction: c_long = 4000 + 67; -pub const SYS_sgetmask: c_long = 4000 + 68; -pub const SYS_ssetmask: c_long = 4000 + 69; -pub const SYS_setreuid: c_long = 4000 + 70; -pub const SYS_setregid: c_long = 4000 + 71; -pub const SYS_sigsuspend: c_long = 4000 + 72; -pub const SYS_sigpending: c_long = 4000 + 73; -pub const SYS_sethostname: c_long = 4000 + 74; -pub const SYS_setrlimit: c_long = 4000 + 75; -pub const SYS_getrlimit: c_long = 4000 + 76; -pub const SYS_getrusage: c_long = 4000 + 77; -pub const SYS_gettimeofday: c_long = 4000 + 78; -pub const SYS_settimeofday: c_long = 4000 + 79; -pub const SYS_getgroups: c_long = 4000 + 80; -pub const SYS_setgroups: c_long = 4000 + 81; -pub const SYS_symlink: c_long = 4000 + 83; -pub const SYS_readlink: c_long = 4000 + 85; -pub const SYS_uselib: c_long = 4000 + 86; -pub const SYS_swapon: c_long = 4000 + 87; -pub const SYS_reboot: c_long = 4000 + 88; -pub const SYS_readdir: c_long = 4000 + 89; -pub const SYS_mmap: c_long = 4000 + 90; -pub const SYS_munmap: c_long = 4000 + 91; -pub const SYS_truncate: c_long = 4000 + 92; -pub const SYS_ftruncate: c_long = 4000 + 93; -pub const SYS_fchmod: c_long = 4000 + 94; -pub const SYS_fchown: c_long = 4000 + 95; -pub const SYS_getpriority: c_long = 4000 + 96; -pub const SYS_setpriority: c_long = 4000 + 97; -pub const SYS_profil: c_long = 4000 + 98; -pub const SYS_statfs: c_long = 4000 + 99; -pub const SYS_fstatfs: c_long = 4000 + 100; -pub const SYS_ioperm: c_long = 4000 + 101; -pub const SYS_socketcall: c_long = 4000 + 102; -pub const SYS_syslog: c_long = 4000 + 103; -pub const SYS_setitimer: c_long = 4000 + 104; -pub const SYS_getitimer: c_long = 4000 + 105; -pub const SYS_stat: c_long = 4000 + 106; -pub const SYS_lstat: c_long = 4000 + 107; -pub const SYS_fstat: c_long = 4000 + 108; -pub const SYS_iopl: c_long = 4000 + 110; -pub const SYS_vhangup: c_long = 4000 + 111; -pub const SYS_idle: c_long = 4000 + 112; -pub const SYS_vm86: c_long = 4000 + 113; -pub const SYS_wait4: c_long = 4000 + 114; -pub const SYS_swapoff: c_long = 4000 + 115; -pub const SYS_sysinfo: c_long = 4000 + 116; -pub const SYS_ipc: c_long = 4000 + 117; -pub const SYS_fsync: c_long = 4000 + 118; -pub const SYS_sigreturn: c_long = 4000 + 119; -pub const SYS_clone: c_long = 4000 + 120; -pub const SYS_setdomainname: c_long = 4000 + 121; -pub const SYS_uname: c_long = 4000 + 122; -pub const SYS_modify_ldt: c_long = 4000 + 123; -pub const SYS_adjtimex: c_long = 4000 + 124; -pub const SYS_mprotect: c_long = 4000 + 125; -pub const SYS_sigprocmask: c_long = 4000 + 126; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 4000 + 127; -pub const SYS_init_module: c_long = 4000 + 128; -pub const SYS_delete_module: c_long = 4000 + 129; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 4000 + 130; -pub const SYS_quotactl: c_long = 4000 + 131; -pub const SYS_getpgid: c_long = 4000 + 132; -pub const SYS_fchdir: c_long = 4000 + 133; -pub const SYS_bdflush: c_long = 4000 + 134; -pub const SYS_sysfs: c_long = 4000 + 135; -pub const SYS_personality: c_long = 4000 + 136; -pub const SYS_afs_syscall: c_long = 4000 + 137; -pub const SYS_setfsuid: c_long = 4000 + 138; -pub const SYS_setfsgid: c_long = 4000 + 139; -pub const SYS__llseek: c_long = 4000 + 140; -pub const SYS_getdents: c_long = 4000 + 141; -pub const SYS_flock: c_long = 4000 + 143; -pub const SYS_msync: c_long = 4000 + 144; -pub const SYS_readv: c_long = 4000 + 145; -pub const SYS_writev: c_long = 4000 + 146; -pub const SYS_cacheflush: c_long = 4000 + 147; -pub const SYS_cachectl: c_long = 4000 + 148; -pub const SYS_sysmips: c_long = 4000 + 149; -pub const SYS_getsid: c_long = 4000 + 151; -pub const SYS_fdatasync: c_long = 4000 + 152; -pub const SYS__sysctl: c_long = 4000 + 153; -pub const SYS_mlock: c_long = 4000 + 154; -pub const SYS_munlock: c_long = 4000 + 155; -pub const SYS_mlockall: c_long = 4000 + 156; -pub const SYS_munlockall: c_long = 4000 + 157; -pub const SYS_sched_setparam: c_long = 4000 + 158; -pub const SYS_sched_getparam: c_long = 4000 + 159; -pub const SYS_sched_setscheduler: c_long = 4000 + 160; -pub const SYS_sched_getscheduler: c_long = 4000 + 161; -pub const SYS_sched_yield: c_long = 4000 + 162; -pub const SYS_sched_get_priority_max: c_long = 4000 + 163; -pub const SYS_sched_get_priority_min: c_long = 4000 + 164; -pub const SYS_sched_rr_get_interval: c_long = 4000 + 165; -pub const SYS_nanosleep: c_long = 4000 + 166; -pub const SYS_mremap: c_long = 4000 + 167; -pub const SYS_accept: c_long = 4000 + 168; -pub const SYS_bind: c_long = 4000 + 169; -pub const SYS_connect: c_long = 4000 + 170; -pub const SYS_getpeername: c_long = 4000 + 171; -pub const SYS_getsockname: c_long = 4000 + 172; -pub const SYS_getsockopt: c_long = 4000 + 173; -pub const SYS_listen: c_long = 4000 + 174; -pub const SYS_recv: c_long = 4000 + 175; -pub const SYS_recvfrom: c_long = 4000 + 176; -pub const SYS_recvmsg: c_long = 4000 + 177; -pub const SYS_send: c_long = 4000 + 178; -pub const SYS_sendmsg: c_long = 4000 + 179; -pub const SYS_sendto: c_long = 4000 + 180; -pub const SYS_setsockopt: c_long = 4000 + 181; -pub const SYS_shutdown: c_long = 4000 + 182; -pub const SYS_socket: c_long = 4000 + 183; -pub const SYS_socketpair: c_long = 4000 + 184; -pub const SYS_setresuid: c_long = 4000 + 185; -pub const SYS_getresuid: c_long = 4000 + 186; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 4000 + 187; -pub const SYS_poll: c_long = 4000 + 188; -pub const SYS_nfsservctl: c_long = 4000 + 189; -pub const SYS_setresgid: c_long = 4000 + 190; -pub const SYS_getresgid: c_long = 4000 + 191; -pub const SYS_prctl: c_long = 4000 + 192; -pub const SYS_rt_sigreturn: c_long = 4000 + 193; -pub const SYS_rt_sigaction: c_long = 4000 + 194; -pub const SYS_rt_sigprocmask: c_long = 4000 + 195; -pub const SYS_rt_sigpending: c_long = 4000 + 196; -pub const SYS_rt_sigtimedwait: c_long = 4000 + 197; -pub const SYS_rt_sigqueueinfo: c_long = 4000 + 198; -pub const SYS_rt_sigsuspend: c_long = 4000 + 199; -pub const SYS_chown: c_long = 4000 + 202; -pub const SYS_getcwd: c_long = 4000 + 203; -pub const SYS_capget: c_long = 4000 + 204; -pub const SYS_capset: c_long = 4000 + 205; -pub const SYS_sigaltstack: c_long = 4000 + 206; -pub const SYS_sendfile: c_long = 4000 + 207; -pub const SYS_getpmsg: c_long = 4000 + 208; -pub const SYS_putpmsg: c_long = 4000 + 209; -pub const SYS_mmap2: c_long = 4000 + 210; -pub const SYS_truncate64: c_long = 4000 + 211; -pub const SYS_ftruncate64: c_long = 4000 + 212; -pub const SYS_stat64: c_long = 4000 + 213; -pub const SYS_lstat64: c_long = 4000 + 214; -pub const SYS_fstat64: c_long = 4000 + 215; -pub const SYS_pivot_root: c_long = 4000 + 216; -pub const SYS_mincore: c_long = 4000 + 217; -pub const SYS_madvise: c_long = 4000 + 218; -pub const SYS_getdents64: c_long = 4000 + 219; -pub const SYS_fcntl64: c_long = 4000 + 220; -pub const SYS_gettid: c_long = 4000 + 222; -pub const SYS_readahead: c_long = 4000 + 223; -pub const SYS_setxattr: c_long = 4000 + 224; -pub const SYS_lsetxattr: c_long = 4000 + 225; -pub const SYS_fsetxattr: c_long = 4000 + 226; -pub const SYS_getxattr: c_long = 4000 + 227; -pub const SYS_lgetxattr: c_long = 4000 + 228; -pub const SYS_fgetxattr: c_long = 4000 + 229; -pub const SYS_listxattr: c_long = 4000 + 230; -pub const SYS_llistxattr: c_long = 4000 + 231; -pub const SYS_flistxattr: c_long = 4000 + 232; -pub const SYS_removexattr: c_long = 4000 + 233; -pub const SYS_lremovexattr: c_long = 4000 + 234; -pub const SYS_fremovexattr: c_long = 4000 + 235; -pub const SYS_tkill: c_long = 4000 + 236; -pub const SYS_sendfile64: c_long = 4000 + 237; -pub const SYS_futex: c_long = 4000 + 238; -pub const SYS_sched_setaffinity: c_long = 4000 + 239; -pub const SYS_sched_getaffinity: c_long = 4000 + 240; -pub const SYS_io_setup: c_long = 4000 + 241; -pub const SYS_io_destroy: c_long = 4000 + 242; -pub const SYS_io_getevents: c_long = 4000 + 243; -pub const SYS_io_submit: c_long = 4000 + 244; -pub const SYS_io_cancel: c_long = 4000 + 245; -pub const SYS_exit_group: c_long = 4000 + 246; -pub const SYS_lookup_dcookie: c_long = 4000 + 247; -pub const SYS_epoll_create: c_long = 4000 + 248; -pub const SYS_epoll_ctl: c_long = 4000 + 249; -pub const SYS_epoll_wait: c_long = 4000 + 250; -pub const SYS_remap_file_pages: c_long = 4000 + 251; -pub const SYS_set_tid_address: c_long = 4000 + 252; -pub const SYS_restart_syscall: c_long = 4000 + 253; -pub const SYS_statfs64: c_long = 4000 + 255; -pub const SYS_fstatfs64: c_long = 4000 + 256; -pub const SYS_timer_create: c_long = 4000 + 257; -pub const SYS_timer_settime: c_long = 4000 + 258; -pub const SYS_timer_gettime: c_long = 4000 + 259; -pub const SYS_timer_getoverrun: c_long = 4000 + 260; -pub const SYS_timer_delete: c_long = 4000 + 261; -pub const SYS_clock_settime: c_long = 4000 + 262; -pub const SYS_clock_gettime: c_long = 4000 + 263; -pub const SYS_clock_getres: c_long = 4000 + 264; -pub const SYS_clock_nanosleep: c_long = 4000 + 265; -pub const SYS_tgkill: c_long = 4000 + 266; -pub const SYS_utimes: c_long = 4000 + 267; -pub const SYS_mbind: c_long = 4000 + 268; -pub const SYS_get_mempolicy: c_long = 4000 + 269; -pub const SYS_set_mempolicy: c_long = 4000 + 270; -pub const SYS_mq_open: c_long = 4000 + 271; -pub const SYS_mq_unlink: c_long = 4000 + 272; -pub const SYS_mq_timedsend: c_long = 4000 + 273; -pub const SYS_mq_timedreceive: c_long = 4000 + 274; -pub const SYS_mq_notify: c_long = 4000 + 275; -pub const SYS_mq_getsetattr: c_long = 4000 + 276; -pub const SYS_vserver: c_long = 4000 + 277; -pub const SYS_waitid: c_long = 4000 + 278; -/* pub const SYS_sys_setaltroot: c_long = 4000 + 279; */ -pub const SYS_add_key: c_long = 4000 + 280; -pub const SYS_request_key: c_long = 4000 + 281; -pub const SYS_keyctl: c_long = 4000 + 282; -pub const SYS_set_thread_area: c_long = 4000 + 283; -pub const SYS_inotify_init: c_long = 4000 + 284; -pub const SYS_inotify_add_watch: c_long = 4000 + 285; -pub const SYS_inotify_rm_watch: c_long = 4000 + 286; -pub const SYS_migrate_pages: c_long = 4000 + 287; -pub const SYS_openat: c_long = 4000 + 288; -pub const SYS_mkdirat: c_long = 4000 + 289; -pub const SYS_mknodat: c_long = 4000 + 290; -pub const SYS_fchownat: c_long = 4000 + 291; -pub const SYS_futimesat: c_long = 4000 + 292; -pub const SYS_unlinkat: c_long = 4000 + 294; -pub const SYS_renameat: c_long = 4000 + 295; -pub const SYS_linkat: c_long = 4000 + 296; -pub const SYS_symlinkat: c_long = 4000 + 297; -pub const SYS_readlinkat: c_long = 4000 + 298; -pub const SYS_fchmodat: c_long = 4000 + 299; -pub const SYS_faccessat: c_long = 4000 + 300; -pub const SYS_pselect6: c_long = 4000 + 301; -pub const SYS_ppoll: c_long = 4000 + 302; -pub const SYS_unshare: c_long = 4000 + 303; -pub const SYS_splice: c_long = 4000 + 304; -pub const SYS_sync_file_range: c_long = 4000 + 305; -pub const SYS_tee: c_long = 4000 + 306; -pub const SYS_vmsplice: c_long = 4000 + 307; -pub const SYS_move_pages: c_long = 4000 + 308; -pub const SYS_set_robust_list: c_long = 4000 + 309; -pub const SYS_get_robust_list: c_long = 4000 + 310; -pub const SYS_kexec_load: c_long = 4000 + 311; -pub const SYS_getcpu: c_long = 4000 + 312; -pub const SYS_epoll_pwait: c_long = 4000 + 313; -pub const SYS_ioprio_set: c_long = 4000 + 314; -pub const SYS_ioprio_get: c_long = 4000 + 315; -pub const SYS_utimensat: c_long = 4000 + 316; -pub const SYS_signalfd: c_long = 4000 + 317; -pub const SYS_timerfd: c_long = 4000 + 318; -pub const SYS_eventfd: c_long = 4000 + 319; -pub const SYS_fallocate: c_long = 4000 + 320; -pub const SYS_timerfd_create: c_long = 4000 + 321; -pub const SYS_timerfd_gettime: c_long = 4000 + 322; -pub const SYS_timerfd_settime: c_long = 4000 + 323; -pub const SYS_signalfd4: c_long = 4000 + 324; -pub const SYS_eventfd2: c_long = 4000 + 325; -pub const SYS_epoll_create1: c_long = 4000 + 326; -pub const SYS_dup3: c_long = 4000 + 327; -pub const SYS_pipe2: c_long = 4000 + 328; -pub const SYS_inotify_init1: c_long = 4000 + 329; -pub const SYS_preadv: c_long = 4000 + 330; -pub const SYS_pwritev: c_long = 4000 + 331; -pub const SYS_rt_tgsigqueueinfo: c_long = 4000 + 332; -pub const SYS_perf_event_open: c_long = 4000 + 333; -pub const SYS_accept4: c_long = 4000 + 334; -pub const SYS_recvmmsg: c_long = 4000 + 335; -pub const SYS_fanotify_init: c_long = 4000 + 336; -pub const SYS_fanotify_mark: c_long = 4000 + 337; -pub const SYS_prlimit64: c_long = 4000 + 338; -pub const SYS_name_to_handle_at: c_long = 4000 + 339; -pub const SYS_open_by_handle_at: c_long = 4000 + 340; -pub const SYS_clock_adjtime: c_long = 4000 + 341; -pub const SYS_syncfs: c_long = 4000 + 342; -pub const SYS_sendmmsg: c_long = 4000 + 343; -pub const SYS_setns: c_long = 4000 + 344; -pub const SYS_process_vm_readv: c_long = 4000 + 345; -pub const SYS_process_vm_writev: c_long = 4000 + 346; -pub const SYS_kcmp: c_long = 4000 + 347; -pub const SYS_finit_module: c_long = 4000 + 348; -pub const SYS_sched_setattr: c_long = 4000 + 349; -pub const SYS_sched_getattr: c_long = 4000 + 350; -pub const SYS_renameat2: c_long = 4000 + 351; -pub const SYS_seccomp: c_long = 4000 + 352; -pub const SYS_getrandom: c_long = 4000 + 353; -pub const SYS_memfd_create: c_long = 4000 + 354; -pub const SYS_bpf: c_long = 4000 + 355; -pub const SYS_execveat: c_long = 4000 + 356; -pub const SYS_userfaultfd: c_long = 4000 + 357; -pub const SYS_membarrier: c_long = 4000 + 358; -pub const SYS_mlock2: c_long = 4000 + 359; -pub const SYS_copy_file_range: c_long = 4000 + 360; -pub const SYS_preadv2: c_long = 4000 + 361; -pub const SYS_pwritev2: c_long = 4000 + 362; -pub const SYS_pkey_mprotect: c_long = 4000 + 363; -pub const SYS_pkey_alloc: c_long = 4000 + 364; -pub const SYS_pkey_free: c_long = 4000 + 365; -pub const SYS_statx: c_long = 4000 + 366; -pub const SYS_pidfd_send_signal: c_long = 4000 + 424; -pub const SYS_io_uring_setup: c_long = 4000 + 425; -pub const SYS_io_uring_enter: c_long = 4000 + 426; -pub const SYS_io_uring_register: c_long = 4000 + 427; -pub const SYS_open_tree: c_long = 4000 + 428; -pub const SYS_move_mount: c_long = 4000 + 429; -pub const SYS_fsopen: c_long = 4000 + 430; -pub const SYS_fsconfig: c_long = 4000 + 431; -pub const SYS_fsmount: c_long = 4000 + 432; -pub const SYS_fspick: c_long = 4000 + 433; -pub const SYS_pidfd_open: c_long = 4000 + 434; -pub const SYS_clone3: c_long = 4000 + 435; -pub const SYS_close_range: c_long = 4000 + 436; -pub const SYS_openat2: c_long = 4000 + 437; -pub const SYS_pidfd_getfd: c_long = 4000 + 438; -pub const SYS_faccessat2: c_long = 4000 + 439; -pub const SYS_process_madvise: c_long = 4000 + 440; -pub const SYS_epoll_pwait2: c_long = 4000 + 441; -pub const SYS_mount_setattr: c_long = 4000 + 442; -pub const SYS_quotactl_fd: c_long = 4000 + 443; -pub const SYS_landlock_create_ruleset: c_long = 4000 + 444; -pub const SYS_landlock_add_rule: c_long = 4000 + 445; -pub const SYS_landlock_restrict_self: c_long = 4000 + 446; -pub const SYS_memfd_secret: c_long = 4000 + 447; -pub const SYS_process_mrelease: c_long = 4000 + 448; -pub const SYS_futex_waitv: c_long = 4000 + 449; -pub const SYS_set_mempolicy_home_node: c_long = 4000 + 450; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b32/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b32/mod.rs deleted file mode 100644 index 00b3d7705090fb..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b32/mod.rs +++ /dev/null @@ -1,65 +0,0 @@ -use crate::prelude::*; - -pub type nlink_t = u32; -pub type blksize_t = c_long; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; -pub type regoff_t = c_int; - -s! { - pub struct pthread_attr_t { - __size: [u32; 9], - } - - pub struct sigset_t { - __val: [c_ulong; 32], - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: c_int, - pub msg_control: *mut c_void, - pub msg_controllen: crate::socklen_t, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - pub cmsg_len: crate::socklen_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct sem_t { - __val: [c_int; 4], - } -} - -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 20; - -cfg_if! { - if #[cfg(any(target_arch = "x86"))] { - mod x86; - pub use self::x86::*; - } else if #[cfg(any(target_arch = "mips"))] { - mod mips; - pub use self::mips::*; - } else if #[cfg(any(target_arch = "arm"))] { - mod arm; - pub use self::arm::*; - } else if #[cfg(any(target_arch = "powerpc"))] { - mod powerpc; - pub use self::powerpc::*; - } else if #[cfg(any(target_arch = "hexagon"))] { - mod hexagon; - pub use self::hexagon::*; - } else if #[cfg(any(target_arch = "riscv32"))] { - mod riscv32; - pub use self::riscv32::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b32/powerpc.rs b/vendor/libc/src/unix/linux_like/linux/musl/b32/powerpc.rs deleted file mode 100644 index a07dfda17794e8..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b32/powerpc.rs +++ /dev/null @@ -1,766 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -pub type wchar_t = i32; - -s! { - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_cc: [crate::cc_t; crate::NCCS], - pub c_line: crate::cc_t, - pub __c_ispeed: crate::speed_t, - pub __c_ospeed: crate::speed_t, - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __st_rdev_padding: c_short, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_long; 2], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __st_rdev_padding: c_short, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_long; 2], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct ipc_perm { - #[cfg(musl_v1_2_3)] - pub __key: crate::key_t, - #[cfg(not(musl_v1_2_3))] - #[deprecated( - since = "0.2.173", - note = "This field is incorrectly named and will be changed - to __key in a future release." - )] - pub __ipc_perm_key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - pub __seq: c_int, - __pad1: c_int, - __pad2: c_longlong, - __pad3: c_longlong, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - __unused1: c_int, - pub shm_atime: crate::time_t, - __unused2: c_int, - pub shm_dtime: crate::time_t, - __unused3: c_int, - pub shm_ctime: crate::time_t, - __unused4: c_int, - pub shm_segsz: size_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: c_ulong, - __pad1: c_ulong, - __pad2: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - __unused1: c_int, - pub msg_stime: crate::time_t, - __unused2: c_int, - pub msg_rtime: crate::time_t, - __unused3: c_int, - pub msg_ctime: crate::time_t, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __pad1: c_ulong, - __pad2: c_ulong, - } -} - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const SIGSTKSZ: size_t = 10240; -pub const MINSIGSTKSZ: size_t = 4096; - -pub const O_DIRECT: c_int = 0x20000; -pub const O_DIRECTORY: c_int = 0x4000; -pub const O_NOFOLLOW: c_int = 0x8000; -pub const O_ASYNC: c_int = 0x2000; -pub const O_LARGEFILE: c_int = 0x10000; - -pub const MCL_CURRENT: c_int = 0x2000; -pub const MCL_FUTURE: c_int = 0x4000; -pub const MCL_ONFAULT: c_int = 0x8000; -pub const CBAUD: crate::tcflag_t = 0o0000377; -pub const TAB1: c_int = 0x00000400; -pub const TAB2: c_int = 0x00000800; -pub const TAB3: c_int = 0x00000C00; -pub const CR1: c_int = 0x00001000; -pub const CR2: c_int = 0x00002000; -pub const CR3: c_int = 0x00003000; -pub const FF1: c_int = 0x00004000; -pub const BS1: c_int = 0x00008000; -pub const VT1: c_int = 0x00010000; -pub const VWERASE: usize = 10; -pub const VREPRINT: usize = 11; -pub const VSUSP: usize = 12; -pub const VSTART: usize = 13; -pub const VSTOP: usize = 14; -pub const VDISCARD: usize = 16; -pub const VTIME: usize = 7; -pub const IXON: crate::tcflag_t = 0x00000200; -pub const IXOFF: crate::tcflag_t = 0x00000400; -pub const ONLCR: crate::tcflag_t = 0x00000002; -pub const CSIZE: crate::tcflag_t = 0x00000300; -pub const CS6: crate::tcflag_t = 0x00000100; -pub const CS7: crate::tcflag_t = 0x00000200; -pub const CS8: crate::tcflag_t = 0x00000300; -pub const CSTOPB: crate::tcflag_t = 0x00000400; -pub const CREAD: crate::tcflag_t = 0x00000800; -pub const PARENB: crate::tcflag_t = 0x00001000; -pub const PARODD: crate::tcflag_t = 0x00002000; -pub const HUPCL: crate::tcflag_t = 0x00004000; -pub const CLOCAL: crate::tcflag_t = 0x00008000; -pub const ECHOKE: crate::tcflag_t = 0x00000001; -pub const ECHOE: crate::tcflag_t = 0x00000002; -pub const ECHOK: crate::tcflag_t = 0x00000004; -pub const ECHONL: crate::tcflag_t = 0x00000010; -pub const ECHOPRT: crate::tcflag_t = 0x00000020; -pub const ECHOCTL: crate::tcflag_t = 0x00000040; -pub const ISIG: crate::tcflag_t = 0x00000080; -pub const ICANON: crate::tcflag_t = 0x00000100; -pub const PENDIN: crate::tcflag_t = 0x20000000; -pub const NOFLSH: crate::tcflag_t = 0x80000000; -pub const CIBAUD: crate::tcflag_t = 0o00077600000; -pub const CBAUDEX: crate::tcflag_t = 0o000020; -pub const VSWTC: usize = 9; -pub const OLCUC: crate::tcflag_t = 0o000004; -pub const NLDLY: crate::tcflag_t = 0o001400; -pub const CRDLY: crate::tcflag_t = 0o030000; -pub const TABDLY: crate::tcflag_t = 0o006000; -pub const BSDLY: crate::tcflag_t = 0o100000; -pub const FFDLY: crate::tcflag_t = 0o040000; -pub const VTDLY: crate::tcflag_t = 0o200000; -pub const XTABS: crate::tcflag_t = 0o006000; -pub const B57600: crate::speed_t = 0o000020; -pub const B115200: crate::speed_t = 0o000021; -pub const B230400: crate::speed_t = 0o000022; -pub const B460800: crate::speed_t = 0o000023; -pub const B500000: crate::speed_t = 0o000024; -pub const B576000: crate::speed_t = 0o000025; -pub const B921600: crate::speed_t = 0o000026; -pub const B1000000: crate::speed_t = 0o000027; -pub const B1152000: crate::speed_t = 0o000030; -pub const B1500000: crate::speed_t = 0o000031; -pub const B2000000: crate::speed_t = 0o000032; -pub const B2500000: crate::speed_t = 0o000033; -pub const B3000000: crate::speed_t = 0o000034; -pub const B3500000: crate::speed_t = 0o000035; -pub const B4000000: crate::speed_t = 0o000036; - -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; - -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_LOCKED: c_int = 0x00080; -pub const MAP_NORESERVE: c_int = 0x00040; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_SYNC: c_int = 0x080000; - -pub const PTRACE_SYSEMU: c_int = 0x1d; -pub const PTRACE_SYSEMU_SINGLESTEP: c_int = 0x1e; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EDEADLOCK: c_int = 58; -pub const EMULTIHOP: c_int = 72; -pub const EBADMSG: c_int = 74; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const ERFKILL: c_int = 132; -pub const EHWPOISON: c_int = 133; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const EXTPROC: crate::tcflag_t = 0x10000000; - -pub const F_GETLK: c_int = 12; -pub const F_GETOWN: c_int = 9; -pub const F_SETLK: c_int = 13; -pub const F_SETLKW: c_int = 14; -pub const F_SETOWN: c_int = 8; - -pub const VEOF: usize = 4; -pub const VEOL: usize = 6; -pub const VEOL2: usize = 8; -pub const VMIN: usize = 5; -pub const IEXTEN: crate::tcflag_t = 0x00000400; -pub const TOSTOP: crate::tcflag_t = 0x00400000; -pub const FLUSHO: crate::tcflag_t = 0x00800000; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -// Syscall table -pub const SYS_restart_syscall: c_long = 0; -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_waitpid: c_long = 7; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execve: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_time: c_long = 13; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_lchown: c_long = 16; -pub const SYS_break: c_long = 17; -pub const SYS_oldstat: c_long = 18; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_mount: c_long = 21; -pub const SYS_umount: c_long = 22; -pub const SYS_setuid: c_long = 23; -pub const SYS_getuid: c_long = 24; -pub const SYS_stime: c_long = 25; -pub const SYS_ptrace: c_long = 26; -pub const SYS_alarm: c_long = 27; -pub const SYS_oldfstat: c_long = 28; -pub const SYS_pause: c_long = 29; -pub const SYS_utime: c_long = 30; -pub const SYS_stty: c_long = 31; -pub const SYS_gtty: c_long = 32; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_ftime: c_long = 35; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_rename: c_long = 38; -pub const SYS_mkdir: c_long = 39; -pub const SYS_rmdir: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_prof: c_long = 44; -pub const SYS_brk: c_long = 45; -pub const SYS_setgid: c_long = 46; -pub const SYS_getgid: c_long = 47; -pub const SYS_signal: c_long = 48; -pub const SYS_geteuid: c_long = 49; -pub const SYS_getegid: c_long = 50; -pub const SYS_acct: c_long = 51; -pub const SYS_umount2: c_long = 52; -pub const SYS_lock: c_long = 53; -pub const SYS_ioctl: c_long = 54; -pub const SYS_fcntl: c_long = 55; -pub const SYS_mpx: c_long = 56; -pub const SYS_setpgid: c_long = 57; -pub const SYS_ulimit: c_long = 58; -pub const SYS_oldolduname: c_long = 59; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_ustat: c_long = 62; -pub const SYS_dup2: c_long = 63; -pub const SYS_getppid: c_long = 64; -pub const SYS_getpgrp: c_long = 65; -pub const SYS_setsid: c_long = 66; -pub const SYS_sigaction: c_long = 67; -pub const SYS_sgetmask: c_long = 68; -pub const SYS_ssetmask: c_long = 69; -pub const SYS_setreuid: c_long = 70; -pub const SYS_setregid: c_long = 71; -pub const SYS_sigsuspend: c_long = 72; -pub const SYS_sigpending: c_long = 73; -pub const SYS_sethostname: c_long = 74; -pub const SYS_setrlimit: c_long = 75; -pub const SYS_getrlimit: c_long = 76; -pub const SYS_getrusage: c_long = 77; -pub const SYS_gettimeofday: c_long = 78; -pub const SYS_settimeofday: c_long = 79; -pub const SYS_getgroups: c_long = 80; -pub const SYS_setgroups: c_long = 81; -pub const SYS_select: c_long = 82; -pub const SYS_symlink: c_long = 83; -pub const SYS_oldlstat: c_long = 84; -pub const SYS_readlink: c_long = 85; -pub const SYS_uselib: c_long = 86; -pub const SYS_swapon: c_long = 87; -pub const SYS_reboot: c_long = 88; -pub const SYS_readdir: c_long = 89; -pub const SYS_mmap: c_long = 90; -pub const SYS_munmap: c_long = 91; -pub const SYS_truncate: c_long = 92; -pub const SYS_ftruncate: c_long = 93; -pub const SYS_fchmod: c_long = 94; -pub const SYS_fchown: c_long = 95; -pub const SYS_getpriority: c_long = 96; -pub const SYS_setpriority: c_long = 97; -pub const SYS_profil: c_long = 98; -pub const SYS_statfs: c_long = 99; -pub const SYS_fstatfs: c_long = 100; -pub const SYS_ioperm: c_long = 101; -pub const SYS_socketcall: c_long = 102; -pub const SYS_syslog: c_long = 103; -pub const SYS_setitimer: c_long = 104; -pub const SYS_getitimer: c_long = 105; -pub const SYS_stat: c_long = 106; -pub const SYS_lstat: c_long = 107; -pub const SYS_fstat: c_long = 108; -pub const SYS_olduname: c_long = 109; -pub const SYS_iopl: c_long = 110; -pub const SYS_vhangup: c_long = 111; -pub const SYS_idle: c_long = 112; -pub const SYS_vm86: c_long = 113; -pub const SYS_wait4: c_long = 114; -pub const SYS_swapoff: c_long = 115; -pub const SYS_sysinfo: c_long = 116; -pub const SYS_ipc: c_long = 117; -pub const SYS_fsync: c_long = 118; -pub const SYS_sigreturn: c_long = 119; -pub const SYS_clone: c_long = 120; -pub const SYS_setdomainname: c_long = 121; -pub const SYS_uname: c_long = 122; -pub const SYS_modify_ldt: c_long = 123; -pub const SYS_adjtimex: c_long = 124; -pub const SYS_mprotect: c_long = 125; -pub const SYS_sigprocmask: c_long = 126; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 127; -pub const SYS_init_module: c_long = 128; -pub const SYS_delete_module: c_long = 129; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 130; -pub const SYS_quotactl: c_long = 131; -pub const SYS_getpgid: c_long = 132; -pub const SYS_fchdir: c_long = 133; -pub const SYS_bdflush: c_long = 134; -pub const SYS_sysfs: c_long = 135; -pub const SYS_personality: c_long = 136; -pub const SYS_afs_syscall: c_long = 137; -pub const SYS_setfsuid: c_long = 138; -pub const SYS_setfsgid: c_long = 139; -pub const SYS__llseek: c_long = 140; -pub const SYS_getdents: c_long = 141; -pub const SYS__newselect: c_long = 142; -pub const SYS_flock: c_long = 143; -pub const SYS_msync: c_long = 144; -pub const SYS_readv: c_long = 145; -pub const SYS_writev: c_long = 146; -pub const SYS_getsid: c_long = 147; -pub const SYS_fdatasync: c_long = 148; -pub const SYS__sysctl: c_long = 149; -pub const SYS_mlock: c_long = 150; -pub const SYS_munlock: c_long = 151; -pub const SYS_mlockall: c_long = 152; -pub const SYS_munlockall: c_long = 153; -pub const SYS_sched_setparam: c_long = 154; -pub const SYS_sched_getparam: c_long = 155; -pub const SYS_sched_setscheduler: c_long = 156; -pub const SYS_sched_getscheduler: c_long = 157; -pub const SYS_sched_yield: c_long = 158; -pub const SYS_sched_get_priority_max: c_long = 159; -pub const SYS_sched_get_priority_min: c_long = 160; -pub const SYS_sched_rr_get_interval: c_long = 161; -pub const SYS_nanosleep: c_long = 162; -pub const SYS_mremap: c_long = 163; -pub const SYS_setresuid: c_long = 164; -pub const SYS_getresuid: c_long = 165; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 166; -pub const SYS_poll: c_long = 167; -pub const SYS_nfsservctl: c_long = 168; -pub const SYS_setresgid: c_long = 169; -pub const SYS_getresgid: c_long = 170; -pub const SYS_prctl: c_long = 171; -pub const SYS_rt_sigreturn: c_long = 172; -pub const SYS_rt_sigaction: c_long = 173; -pub const SYS_rt_sigprocmask: c_long = 174; -pub const SYS_rt_sigpending: c_long = 175; -pub const SYS_rt_sigtimedwait: c_long = 176; -pub const SYS_rt_sigqueueinfo: c_long = 177; -pub const SYS_rt_sigsuspend: c_long = 178; -pub const SYS_pread64: c_long = 179; -pub const SYS_pwrite64: c_long = 180; -pub const SYS_chown: c_long = 181; -pub const SYS_getcwd: c_long = 182; -pub const SYS_capget: c_long = 183; -pub const SYS_capset: c_long = 184; -pub const SYS_sigaltstack: c_long = 185; -pub const SYS_sendfile: c_long = 186; -pub const SYS_getpmsg: c_long = 187; -pub const SYS_putpmsg: c_long = 188; -pub const SYS_vfork: c_long = 189; -pub const SYS_ugetrlimit: c_long = 190; -pub const SYS_readahead: c_long = 191; -pub const SYS_mmap2: c_long = 192; -pub const SYS_truncate64: c_long = 193; -pub const SYS_ftruncate64: c_long = 194; -pub const SYS_stat64: c_long = 195; -pub const SYS_lstat64: c_long = 196; -pub const SYS_fstat64: c_long = 197; -pub const SYS_pciconfig_read: c_long = 198; -pub const SYS_pciconfig_write: c_long = 199; -pub const SYS_pciconfig_iobase: c_long = 200; -pub const SYS_multiplexer: c_long = 201; -pub const SYS_getdents64: c_long = 202; -pub const SYS_pivot_root: c_long = 203; -pub const SYS_fcntl64: c_long = 204; -pub const SYS_madvise: c_long = 205; -pub const SYS_mincore: c_long = 206; -pub const SYS_gettid: c_long = 207; -pub const SYS_tkill: c_long = 208; -pub const SYS_setxattr: c_long = 209; -pub const SYS_lsetxattr: c_long = 210; -pub const SYS_fsetxattr: c_long = 211; -pub const SYS_getxattr: c_long = 212; -pub const SYS_lgetxattr: c_long = 213; -pub const SYS_fgetxattr: c_long = 214; -pub const SYS_listxattr: c_long = 215; -pub const SYS_llistxattr: c_long = 216; -pub const SYS_flistxattr: c_long = 217; -pub const SYS_removexattr: c_long = 218; -pub const SYS_lremovexattr: c_long = 219; -pub const SYS_fremovexattr: c_long = 220; -pub const SYS_futex: c_long = 221; -pub const SYS_sched_setaffinity: c_long = 222; -pub const SYS_sched_getaffinity: c_long = 223; -pub const SYS_tuxcall: c_long = 225; -pub const SYS_sendfile64: c_long = 226; -pub const SYS_io_setup: c_long = 227; -pub const SYS_io_destroy: c_long = 228; -pub const SYS_io_getevents: c_long = 229; -pub const SYS_io_submit: c_long = 230; -pub const SYS_io_cancel: c_long = 231; -pub const SYS_set_tid_address: c_long = 232; -pub const SYS_fadvise64: c_long = 233; -pub const SYS_exit_group: c_long = 234; -pub const SYS_lookup_dcookie: c_long = 235; -pub const SYS_epoll_create: c_long = 236; -pub const SYS_epoll_ctl: c_long = 237; -pub const SYS_epoll_wait: c_long = 238; -pub const SYS_remap_file_pages: c_long = 239; -pub const SYS_timer_create: c_long = 240; -pub const SYS_timer_settime: c_long = 241; -pub const SYS_timer_gettime: c_long = 242; -pub const SYS_timer_getoverrun: c_long = 243; -pub const SYS_timer_delete: c_long = 244; -pub const SYS_clock_settime: c_long = 245; -pub const SYS_clock_gettime: c_long = 246; -pub const SYS_clock_getres: c_long = 247; -pub const SYS_clock_nanosleep: c_long = 248; -pub const SYS_swapcontext: c_long = 249; -pub const SYS_tgkill: c_long = 250; -pub const SYS_utimes: c_long = 251; -pub const SYS_statfs64: c_long = 252; -pub const SYS_fstatfs64: c_long = 253; -pub const SYS_fadvise64_64: c_long = 254; -pub const SYS_rtas: c_long = 255; -pub const SYS_sys_debug_setcontext: c_long = 256; -pub const SYS_migrate_pages: c_long = 258; -pub const SYS_mbind: c_long = 259; -pub const SYS_get_mempolicy: c_long = 260; -pub const SYS_set_mempolicy: c_long = 261; -pub const SYS_mq_open: c_long = 262; -pub const SYS_mq_unlink: c_long = 263; -pub const SYS_mq_timedsend: c_long = 264; -pub const SYS_mq_timedreceive: c_long = 265; -pub const SYS_mq_notify: c_long = 266; -pub const SYS_mq_getsetattr: c_long = 267; -pub const SYS_kexec_load: c_long = 268; -pub const SYS_add_key: c_long = 269; -pub const SYS_request_key: c_long = 270; -pub const SYS_keyctl: c_long = 271; -pub const SYS_waitid: c_long = 272; -pub const SYS_ioprio_set: c_long = 273; -pub const SYS_ioprio_get: c_long = 274; -pub const SYS_inotify_init: c_long = 275; -pub const SYS_inotify_add_watch: c_long = 276; -pub const SYS_inotify_rm_watch: c_long = 277; -pub const SYS_spu_run: c_long = 278; -pub const SYS_spu_create: c_long = 279; -pub const SYS_pselect6: c_long = 280; -pub const SYS_ppoll: c_long = 281; -pub const SYS_unshare: c_long = 282; -pub const SYS_splice: c_long = 283; -pub const SYS_tee: c_long = 284; -pub const SYS_vmsplice: c_long = 285; -pub const SYS_openat: c_long = 286; -pub const SYS_mkdirat: c_long = 287; -pub const SYS_mknodat: c_long = 288; -pub const SYS_fchownat: c_long = 289; -pub const SYS_futimesat: c_long = 290; -pub const SYS_fstatat64: c_long = 291; -pub const SYS_unlinkat: c_long = 292; -pub const SYS_renameat: c_long = 293; -pub const SYS_linkat: c_long = 294; -pub const SYS_symlinkat: c_long = 295; -pub const SYS_readlinkat: c_long = 296; -pub const SYS_fchmodat: c_long = 297; -pub const SYS_faccessat: c_long = 298; -pub const SYS_get_robust_list: c_long = 299; -pub const SYS_set_robust_list: c_long = 300; -pub const SYS_move_pages: c_long = 301; -pub const SYS_getcpu: c_long = 302; -pub const SYS_epoll_pwait: c_long = 303; -pub const SYS_utimensat: c_long = 304; -pub const SYS_signalfd: c_long = 305; -pub const SYS_timerfd_create: c_long = 306; -pub const SYS_eventfd: c_long = 307; -pub const SYS_sync_file_range2: c_long = 308; -pub const SYS_fallocate: c_long = 309; -pub const SYS_subpage_prot: c_long = 310; -pub const SYS_timerfd_settime: c_long = 311; -pub const SYS_timerfd_gettime: c_long = 312; -pub const SYS_signalfd4: c_long = 313; -pub const SYS_eventfd2: c_long = 314; -pub const SYS_epoll_create1: c_long = 315; -pub const SYS_dup3: c_long = 316; -pub const SYS_pipe2: c_long = 317; -pub const SYS_inotify_init1: c_long = 318; -pub const SYS_perf_event_open: c_long = 319; -pub const SYS_preadv: c_long = 320; -pub const SYS_pwritev: c_long = 321; -pub const SYS_rt_tgsigqueueinfo: c_long = 322; -pub const SYS_fanotify_init: c_long = 323; -pub const SYS_fanotify_mark: c_long = 324; -pub const SYS_prlimit64: c_long = 325; -pub const SYS_socket: c_long = 326; -pub const SYS_bind: c_long = 327; -pub const SYS_connect: c_long = 328; -pub const SYS_listen: c_long = 329; -pub const SYS_accept: c_long = 330; -pub const SYS_getsockname: c_long = 331; -pub const SYS_getpeername: c_long = 332; -pub const SYS_socketpair: c_long = 333; -pub const SYS_send: c_long = 334; -pub const SYS_sendto: c_long = 335; -pub const SYS_recv: c_long = 336; -pub const SYS_recvfrom: c_long = 337; -pub const SYS_shutdown: c_long = 338; -pub const SYS_setsockopt: c_long = 339; -pub const SYS_getsockopt: c_long = 340; -pub const SYS_sendmsg: c_long = 341; -pub const SYS_recvmsg: c_long = 342; -pub const SYS_recvmmsg: c_long = 343; -pub const SYS_accept4: c_long = 344; -pub const SYS_name_to_handle_at: c_long = 345; -pub const SYS_open_by_handle_at: c_long = 346; -pub const SYS_clock_adjtime: c_long = 347; -pub const SYS_syncfs: c_long = 348; -pub const SYS_sendmmsg: c_long = 349; -pub const SYS_setns: c_long = 350; -pub const SYS_process_vm_readv: c_long = 351; -pub const SYS_process_vm_writev: c_long = 352; -pub const SYS_finit_module: c_long = 353; -pub const SYS_kcmp: c_long = 354; -pub const SYS_sched_setattr: c_long = 355; -pub const SYS_sched_getattr: c_long = 356; -pub const SYS_renameat2: c_long = 357; -pub const SYS_seccomp: c_long = 358; -pub const SYS_getrandom: c_long = 359; -pub const SYS_memfd_create: c_long = 360; -pub const SYS_bpf: c_long = 361; -pub const SYS_execveat: c_long = 362; -pub const SYS_switch_endian: c_long = 363; -pub const SYS_userfaultfd: c_long = 364; -pub const SYS_membarrier: c_long = 365; -pub const SYS_mlock2: c_long = 378; -pub const SYS_copy_file_range: c_long = 379; -pub const SYS_preadv2: c_long = 380; -pub const SYS_pwritev2: c_long = 381; -pub const SYS_kexec_file_load: c_long = 382; -pub const SYS_statx: c_long = 383; -pub const SYS_pkey_alloc: c_long = 384; -pub const SYS_pkey_free: c_long = 385; -pub const SYS_pkey_mprotect: c_long = 386; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; -pub const SYS_mseal: c_long = 462; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b32/riscv32/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b32/riscv32/mod.rs deleted file mode 100644 index ea4b51f006f0f5..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b32/riscv32/mod.rs +++ /dev/null @@ -1,655 +0,0 @@ -//! RISC-V-specific definitions for 32-bit linux-like values - -use crate::prelude::*; -use crate::{off64_t, off_t}; - -pub type wchar_t = c_int; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub __pad1: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub __pad2: c_int, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_int; 2usize], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub __pad1: crate::dev_t, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - pub __pad2: c_int, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_int; 2], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_ushort, - __pad1: c_ushort, - pub __seq: c_ushort, - __pad2: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused5: c_ulong, - __unused6: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - __unused1: c_int, - pub msg_rtime: crate::time_t, - __unused2: c_int, - pub msg_ctime: crate::time_t, - __unused3: c_int, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __pad1: c_ulong, - __pad2: c_ulong, - } -} - -s_no_extra_traits! { - #[repr(align(8))] - pub struct max_align_t { - priv_: (i64, f64), - } -} - -//pub const RLIM_INFINITY: crate::rlim_t = !0; -pub const VEOF: usize = 4; -//pub const RLIMIT_RSS: crate::__rlimit_resource_t = 5; -//pub const RLIMIT_AS: crate::__rlimit_resource_t = 9; -//pub const RLIMIT_MEMLOCK: crate::__rlimit_resource_t = 8; -//pub const RLIMIT_NOFILE: crate::__rlimit_resource_t = 7; -//pub const RLIMIT_NPROC: crate::__rlimit_resource_t = 6; -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const MAP_GROWSDOWN: c_int = 256; -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const EHWPOISON: c_int = 133; -pub const ERFKILL: c_int = 132; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 4; -pub const SA_NOCLDWAIT: c_int = 2; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0; -pub const SIG_UNBLOCK: c_int = 1; -pub const POLLWRNORM: c_short = 256; -pub const POLLWRBAND: c_short = 512; -pub const O_ASYNC: c_int = 8192; -pub const F_SETOWN: c_int = 8; -pub const F_GETOWN: c_int = 9; -pub const F_GETLK: c_int = 12; -pub const F_SETLK: c_int = 13; -pub const F_SETLKW: c_int = 14; - -pub const O_DIRECT: c_int = 16384; -pub const O_DIRECTORY: c_int = 65536; -pub const O_LARGEFILE: c_int = 0o0100000; -pub const O_NOFOLLOW: c_int = 131072; -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_HUGETLB: c_int = 262144; -pub const MAP_LOCKED: c_int = 8192; -pub const MAP_NORESERVE: c_int = 16384; -pub const MAP_ANON: c_int = 32; -pub const MAP_DENYWRITE: c_int = 2048; -pub const MAP_EXECUTABLE: c_int = 4096; -pub const MAP_POPULATE: c_int = 32768; -pub const MAP_NONBLOCK: c_int = 65536; -pub const MAP_STACK: c_int = 131072; -pub const MAP_SYNC: c_int = 0x080000; -pub const EDEADLOCK: c_int = 35; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const MCL_CURRENT: c_int = 1; -pub const MCL_FUTURE: c_int = 2; -pub const MCL_ONFAULT: c_int = 4; -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; -pub const CBAUD: crate::tcflag_t = 4111; -pub const TAB1: crate::tcflag_t = 2048; -pub const TAB2: crate::tcflag_t = 4096; -pub const TAB3: crate::tcflag_t = 6144; -pub const CR1: crate::tcflag_t = 512; -pub const CR2: crate::tcflag_t = 1024; -pub const CR3: crate::tcflag_t = 1536; -pub const FF1: crate::tcflag_t = 32768; -pub const BS1: crate::tcflag_t = 8192; -pub const VT1: crate::tcflag_t = 16384; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 1024; -pub const IXOFF: crate::tcflag_t = 4096; -pub const ONLCR: crate::tcflag_t = 4; -pub const CSIZE: crate::tcflag_t = 48; -pub const CS6: crate::tcflag_t = 16; -pub const CS7: crate::tcflag_t = 32; -pub const CS8: crate::tcflag_t = 48; -pub const CSTOPB: crate::tcflag_t = 64; -pub const CREAD: crate::tcflag_t = 128; -pub const PARENB: crate::tcflag_t = 256; -pub const PARODD: crate::tcflag_t = 512; -pub const HUPCL: crate::tcflag_t = 1024; -pub const CLOCAL: crate::tcflag_t = 2048; -pub const ECHOKE: crate::tcflag_t = 2048; -pub const ECHOE: crate::tcflag_t = 16; -pub const ECHOK: crate::tcflag_t = 32; -pub const ECHONL: crate::tcflag_t = 64; -pub const ECHOPRT: crate::tcflag_t = 1024; -pub const ECHOCTL: crate::tcflag_t = 512; -pub const ISIG: crate::tcflag_t = 1; -pub const ICANON: crate::tcflag_t = 2; -pub const PENDIN: crate::tcflag_t = 16384; -pub const NOFLSH: crate::tcflag_t = 128; -pub const CIBAUD: crate::tcflag_t = 269418496; -pub const CBAUDEX: crate::tcflag_t = 4096; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 2; -pub const NLDLY: crate::tcflag_t = 256; -pub const CRDLY: crate::tcflag_t = 1536; -pub const TABDLY: crate::tcflag_t = 6144; -pub const BSDLY: crate::tcflag_t = 8192; -pub const FFDLY: crate::tcflag_t = 32768; -pub const VTDLY: crate::tcflag_t = 16384; -pub const XTABS: crate::tcflag_t = 6144; -pub const B57600: crate::speed_t = 4097; -pub const B115200: crate::speed_t = 4098; -pub const B230400: crate::speed_t = 4099; -pub const B460800: crate::speed_t = 4100; -pub const B500000: crate::speed_t = 4101; -pub const B576000: crate::speed_t = 4102; -pub const B921600: crate::speed_t = 4103; -pub const B1000000: crate::speed_t = 4104; -pub const B1152000: crate::speed_t = 4105; -pub const B1500000: crate::speed_t = 4106; -pub const B2000000: crate::speed_t = 4107; -pub const B2500000: crate::speed_t = 4108; -pub const B3000000: crate::speed_t = 4109; -pub const B3500000: crate::speed_t = 4110; -pub const B4000000: crate::speed_t = 4111; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 32768; -pub const TOSTOP: crate::tcflag_t = 256; -pub const FLUSHO: crate::tcflag_t = 4096; -pub const EXTPROC: crate::tcflag_t = 65536; - -pub const SYS_read: c_long = 63; -pub const SYS_write: c_long = 64; -pub const SYS_close: c_long = 57; -// RISC-V don't have SYS_fstat, use statx instead. -pub const SYS_lseek: c_long = 62; -pub const SYS_mmap: c_long = 222; -pub const SYS_mprotect: c_long = 226; -pub const SYS_munmap: c_long = 215; -pub const SYS_brk: c_long = 214; -pub const SYS_rt_sigaction: c_long = 134; -pub const SYS_rt_sigprocmask: c_long = 135; -pub const SYS_rt_sigreturn: c_long = 139; -pub const SYS_ioctl: c_long = 29; -pub const SYS_pread64: c_long = 67; -pub const SYS_pwrite64: c_long = 68; -pub const SYS_readv: c_long = 65; -pub const SYS_writev: c_long = 66; -pub const SYS_sched_yield: c_long = 124; -pub const SYS_mremap: c_long = 216; -pub const SYS_msync: c_long = 227; -pub const SYS_mincore: c_long = 232; -pub const SYS_madvise: c_long = 233; -pub const SYS_shmget: c_long = 194; -pub const SYS_shmat: c_long = 196; -pub const SYS_shmctl: c_long = 195; -pub const SYS_dup: c_long = 23; -pub const SYS_getitimer: c_long = 102; -pub const SYS_setitimer: c_long = 103; -pub const SYS_getpid: c_long = 172; -pub const SYS_sendfile: c_long = 71; -pub const SYS_socket: c_long = 198; -pub const SYS_connect: c_long = 203; -pub const SYS_accept: c_long = 202; -pub const SYS_sendto: c_long = 206; -pub const SYS_recvfrom: c_long = 207; -pub const SYS_sendmsg: c_long = 211; -pub const SYS_recvmsg: c_long = 212; -pub const SYS_shutdown: c_long = 210; -pub const SYS_bind: c_long = 200; -pub const SYS_listen: c_long = 201; -pub const SYS_getsockname: c_long = 204; -pub const SYS_getpeername: c_long = 205; -pub const SYS_socketpair: c_long = 199; -pub const SYS_setsockopt: c_long = 208; -pub const SYS_getsockopt: c_long = 209; -pub const SYS_clone: c_long = 220; -pub const SYS_execve: c_long = 221; -pub const SYS_exit: c_long = 93; -// RISC-V don't have wait4, use waitid instead. -pub const SYS_kill: c_long = 129; -pub const SYS_uname: c_long = 160; -pub const SYS_semget: c_long = 190; -pub const SYS_semop: c_long = 193; -pub const SYS_semctl: c_long = 191; -pub const SYS_shmdt: c_long = 197; -pub const SYS_msgget: c_long = 186; -pub const SYS_msgsnd: c_long = 189; -pub const SYS_msgrcv: c_long = 188; -pub const SYS_msgctl: c_long = 187; -pub const SYS_fcntl: c_long = 25; -pub const SYS_flock: c_long = 32; -pub const SYS_fsync: c_long = 82; -pub const SYS_fdatasync: c_long = 83; -pub const SYS_truncate: c_long = 45; -pub const SYS_ftruncate: c_long = 46; -pub const SYS_getcwd: c_long = 17; -pub const SYS_chdir: c_long = 49; -pub const SYS_fchdir: c_long = 50; -pub const SYS_fchmod: c_long = 52; -pub const SYS_fchown: c_long = 55; -pub const SYS_umask: c_long = 166; -// RISC-V don't have gettimeofday, use clock_gettime64 instead. -// RISC-V don't have getrlimit, use prlimit64 instead. -pub const SYS_getrusage: c_long = 165; -pub const SYS_sysinfo: c_long = 179; -pub const SYS_times: c_long = 153; -pub const SYS_ptrace: c_long = 117; -pub const SYS_getuid: c_long = 174; -pub const SYS_syslog: c_long = 116; -pub const SYS_getgid: c_long = 176; -pub const SYS_setuid: c_long = 146; -pub const SYS_setgid: c_long = 144; -pub const SYS_geteuid: c_long = 175; -pub const SYS_getegid: c_long = 177; -pub const SYS_setpgid: c_long = 154; -pub const SYS_getppid: c_long = 173; -pub const SYS_setsid: c_long = 157; -pub const SYS_setreuid: c_long = 145; -pub const SYS_setregid: c_long = 143; -pub const SYS_getgroups: c_long = 158; -pub const SYS_setgroups: c_long = 159; -pub const SYS_setresuid: c_long = 147; -pub const SYS_getresuid: c_long = 148; -pub const SYS_setresgid: c_long = 149; -pub const SYS_getresgid: c_long = 150; -pub const SYS_getpgid: c_long = 155; -pub const SYS_setfsuid: c_long = 151; -pub const SYS_setfsgid: c_long = 152; -pub const SYS_getsid: c_long = 156; -pub const SYS_capget: c_long = 90; -pub const SYS_capset: c_long = 91; -pub const SYS_rt_sigpending: c_long = 136; -pub const SYS_rt_sigtimedwait_time64: c_long = 421; -pub const SYS_rt_sigqueueinfo: c_long = 138; -pub const SYS_rt_sigsuspend: c_long = 133; -pub const SYS_sigaltstack: c_long = 132; -pub const SYS_personality: c_long = 92; -pub const SYS_statfs: c_long = 43; -pub const SYS_fstatfs: c_long = 44; -pub const SYS_getpriority: c_long = 141; -pub const SYS_setpriority: c_long = 140; -pub const SYS_sched_setparam: c_long = 118; -pub const SYS_sched_getparam: c_long = 121; -pub const SYS_sched_setscheduler: c_long = 119; -pub const SYS_sched_getscheduler: c_long = 120; -pub const SYS_sched_get_priority_max: c_long = 125; -pub const SYS_sched_get_priority_min: c_long = 126; -pub const SYS_sched_rr_get_interval_time64: c_long = 423; -pub const SYS_mlock: c_long = 228; -pub const SYS_munlock: c_long = 229; -pub const SYS_mlockall: c_long = 230; -pub const SYS_munlockall: c_long = 231; -pub const SYS_vhangup: c_long = 58; -pub const SYS_pivot_root: c_long = 41; -pub const SYS_prctl: c_long = 167; -// RISC-V don't have setrlimit, use prlimit64 instead. -pub const SYS_chroot: c_long = 51; -pub const SYS_sync: c_long = 81; -pub const SYS_acct: c_long = 89; -// RISC-V don't have settimeofday, use clock_settime64 instead. -pub const SYS_mount: c_long = 40; -pub const SYS_umount2: c_long = 39; -pub const SYS_swapon: c_long = 224; -pub const SYS_swapoff: c_long = 225; -pub const SYS_reboot: c_long = 142; -pub const SYS_sethostname: c_long = 161; -pub const SYS_setdomainname: c_long = 162; -pub const SYS_init_module: c_long = 105; -pub const SYS_delete_module: c_long = 106; -pub const SYS_quotactl: c_long = 60; -pub const SYS_nfsservctl: c_long = 42; -pub const SYS_gettid: c_long = 178; -pub const SYS_readahead: c_long = 213; -pub const SYS_setxattr: c_long = 5; -pub const SYS_lsetxattr: c_long = 6; -pub const SYS_fsetxattr: c_long = 7; -pub const SYS_getxattr: c_long = 8; -pub const SYS_lgetxattr: c_long = 9; -pub const SYS_fgetxattr: c_long = 10; -pub const SYS_listxattr: c_long = 11; -pub const SYS_llistxattr: c_long = 12; -pub const SYS_flistxattr: c_long = 13; -pub const SYS_removexattr: c_long = 14; -pub const SYS_lremovexattr: c_long = 15; -pub const SYS_fremovexattr: c_long = 16; -pub const SYS_tkill: c_long = 130; -pub const SYS_futex_time64: c_long = 422; -pub const SYS_sched_setaffinity: c_long = 122; -pub const SYS_sched_getaffinity: c_long = 123; -pub const SYS_io_setup: c_long = 0; -pub const SYS_io_destroy: c_long = 1; -pub const SYS_io_pgetevents_time64: c_long = 416; -pub const SYS_io_submit: c_long = 2; -pub const SYS_io_cancel: c_long = 3; -pub const SYS_lookup_dcookie: c_long = 18; -pub const SYS_remap_file_pages: c_long = 234; -pub const SYS_getdents64: c_long = 61; -pub const SYS_set_tid_address: c_long = 96; -pub const SYS_restart_syscall: c_long = 128; -pub const SYS_semtimedop_time64: c_long = 420; -pub const SYS_fadvise64: c_long = 223; -pub const SYS_timer_create: c_long = 107; -pub const SYS_timer_settime64: c_long = 409; -pub const SYS_timer_gettime64: c_long = 408; -pub const SYS_timer_getoverrun: c_long = 109; -pub const SYS_timer_delete: c_long = 111; -pub const SYS_clock_settime64: c_long = 404; -pub const SYS_clock_gettime64: c_long = 403; -pub const SYS_clock_getres_time64: c_long = 406; -pub const SYS_clock_nanosleep_time64: c_long = 407; -pub const SYS_exit_group: c_long = 94; -pub const SYS_epoll_ctl: c_long = 21; -pub const SYS_tgkill: c_long = 131; -pub const SYS_mbind: c_long = 235; -pub const SYS_set_mempolicy: c_long = 237; -pub const SYS_get_mempolicy: c_long = 236; -pub const SYS_mq_open: c_long = 180; -pub const SYS_mq_unlink: c_long = 181; -pub const SYS_mq_timedsend_time64: c_long = 418; -pub const SYS_mq_timedreceive_time64: c_long = 419; -pub const SYS_mq_notify: c_long = 184; -pub const SYS_mq_getsetattr: c_long = 185; -pub const SYS_kexec_load: c_long = 104; -pub const SYS_waitid: c_long = 95; -pub const SYS_add_key: c_long = 217; -pub const SYS_request_key: c_long = 218; -pub const SYS_keyctl: c_long = 219; -pub const SYS_ioprio_set: c_long = 30; -pub const SYS_ioprio_get: c_long = 31; -pub const SYS_inotify_add_watch: c_long = 27; -pub const SYS_inotify_rm_watch: c_long = 28; -pub const SYS_migrate_pages: c_long = 238; -pub const SYS_openat: c_long = 56; -pub const SYS_mkdirat: c_long = 34; -pub const SYS_mknodat: c_long = 33; -pub const SYS_fchownat: c_long = 54; -// RISC-V don't have newfstatat, use statx instead. -pub const SYS_unlinkat: c_long = 35; -pub const SYS_linkat: c_long = 37; -pub const SYS_symlinkat: c_long = 36; -pub const SYS_readlinkat: c_long = 78; -pub const SYS_fchmodat: c_long = 53; -pub const SYS_faccessat: c_long = 48; -pub const SYS_pselect6_time64: c_long = 413; -pub const SYS_ppoll_time64: c_long = 414; -pub const SYS_unshare: c_long = 97; -pub const SYS_set_robust_list: c_long = 99; -pub const SYS_get_robust_list: c_long = 100; -pub const SYS_splice: c_long = 76; -pub const SYS_tee: c_long = 77; -pub const SYS_sync_file_range: c_long = 84; -pub const SYS_vmsplice: c_long = 75; -pub const SYS_move_pages: c_long = 239; -pub const SYS_utimensat_time64: c_long = 412; -pub const SYS_epoll_pwait: c_long = 22; -pub const SYS_timerfd_create: c_long = 85; -pub const SYS_fallocate: c_long = 47; -pub const SYS_timerfd_settime64: c_long = 411; -pub const SYS_timerfd_gettime64: c_long = 410; -pub const SYS_accept4: c_long = 242; -pub const SYS_signalfd4: c_long = 74; -pub const SYS_eventfd2: c_long = 19; -pub const SYS_epoll_create1: c_long = 20; -pub const SYS_dup3: c_long = 24; -pub const SYS_pipe2: c_long = 59; -pub const SYS_inotify_init1: c_long = 26; -pub const SYS_preadv: c_long = 69; -pub const SYS_pwritev: c_long = 70; -pub const SYS_rt_tgsigqueueinfo: c_long = 240; -pub const SYS_perf_event_open: c_long = 241; -pub const SYS_recvmmsg_time64: c_long = 417; -pub const SYS_fanotify_init: c_long = 262; -pub const SYS_fanotify_mark: c_long = 263; -pub const SYS_prlimit64: c_long = 261; -pub const SYS_name_to_handle_at: c_long = 264; -pub const SYS_open_by_handle_at: c_long = 265; -pub const SYS_clock_adjtime64: c_long = 405; -pub const SYS_syncfs: c_long = 267; -pub const SYS_sendmmsg: c_long = 269; -pub const SYS_setns: c_long = 268; -pub const SYS_getcpu: c_long = 168; -pub const SYS_process_vm_readv: c_long = 270; -pub const SYS_process_vm_writev: c_long = 271; -pub const SYS_kcmp: c_long = 272; -pub const SYS_finit_module: c_long = 273; -pub const SYS_sched_setattr: c_long = 274; -pub const SYS_sched_getattr: c_long = 275; -pub const SYS_renameat2: c_long = 276; -pub const SYS_seccomp: c_long = 277; -pub const SYS_getrandom: c_long = 278; -pub const SYS_memfd_create: c_long = 279; -pub const SYS_bpf: c_long = 280; -pub const SYS_execveat: c_long = 281; -pub const SYS_userfaultfd: c_long = 282; -pub const SYS_membarrier: c_long = 283; -pub const SYS_mlock2: c_long = 284; -pub const SYS_copy_file_range: c_long = 285; -pub const SYS_preadv2: c_long = 286; -pub const SYS_pwritev2: c_long = 287; -pub const SYS_pkey_mprotect: c_long = 288; -pub const SYS_pkey_alloc: c_long = 289; -pub const SYS_pkey_free: c_long = 290; -pub const SYS_statx: c_long = 291; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; - -// Plain syscalls aliased to their time64 variants -pub const SYS_clock_gettime: c_long = SYS_clock_gettime64; -pub const SYS_clock_settime: c_long = SYS_clock_settime64; -pub const SYS_clock_adjtime: c_long = SYS_clock_adjtime64; -pub const SYS_clock_getres: c_long = SYS_clock_getres_time64; -pub const SYS_clock_nanosleep: c_long = SYS_clock_nanosleep_time64; -pub const SYS_timer_gettime: c_long = SYS_timer_gettime64; -pub const SYS_timer_settime: c_long = SYS_timer_settime64; -pub const SYS_timerfd_gettime: c_long = SYS_timerfd_gettime64; -pub const SYS_timerfd_settime: c_long = SYS_timerfd_settime64; -pub const SYS_utimensat: c_long = SYS_utimensat_time64; -pub const SYS_pselect6: c_long = SYS_pselect6_time64; -pub const SYS_ppoll: c_long = SYS_ppoll_time64; -pub const SYS_recvmmsg: c_long = SYS_recvmmsg_time64; -pub const SYS_mq_timedsend: c_long = SYS_mq_timedsend_time64; -pub const SYS_mq_timedreceive: c_long = SYS_mq_timedreceive_time64; -pub const SYS_rt_sigtimedwait: c_long = SYS_rt_sigtimedwait_time64; -pub const SYS_futex: c_long = SYS_futex_time64; -pub const SYS_sched_rr_get_interval: c_long = SYS_sched_rr_get_interval_time64; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b32/x86/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b32/x86/mod.rs deleted file mode 100644 index ae8b7d761dd6f1..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b32/x86/mod.rs +++ /dev/null @@ -1,889 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -pub type wchar_t = i32; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - __st_dev_padding: c_int, - __st_ino_truncated: c_long, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __st_rdev_padding: c_int, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_ino: crate::ino_t, - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - __st_dev_padding: c_int, - __st_ino_truncated: c_long, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __st_rdev_padding: c_int, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_ino: crate::ino_t, - } - - pub struct mcontext_t { - __private: [u32; 22], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct ipc_perm { - #[cfg(musl_v1_2_3)] - pub __key: crate::key_t, - #[cfg(not(musl_v1_2_3))] - #[deprecated( - since = "0.2.173", - note = "This field is incorrectly named and will be changed - to __key in a future release." - )] - pub __ipc_perm_key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - pub __seq: c_int, - __unused1: c_long, - __unused2: c_long, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - __unused1: c_int, - pub shm_dtime: crate::time_t, - __unused2: c_int, - pub shm_ctime: crate::time_t, - __unused3: c_int, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: c_ulong, - __pad1: c_ulong, - __pad2: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - __unused1: c_int, - pub msg_rtime: crate::time_t, - __unused2: c_int, - pub msg_ctime: crate::time_t, - __unused3: c_int, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __pad1: c_ulong, - __pad2: c_ulong, - } -} - -s_no_extra_traits! { - pub struct user_fpxregs_struct { - pub cwd: c_ushort, - pub swd: c_ushort, - pub twd: c_ushort, - pub fop: c_ushort, - pub fip: c_long, - pub fcs: c_long, - pub foo: c_long, - pub fos: c_long, - pub mxcsr: c_long, - __reserved: c_long, - pub st_space: [c_long; 32], - pub xmm_space: [c_long; 32], - padding: [c_long; 56], - } - - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_mcontext: mcontext_t, - pub uc_sigmask: crate::sigset_t, - __private: [u8; 112], - } - - #[repr(align(8))] - pub struct max_align_t { - priv_: [f64; 3], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for user_fpxregs_struct { - fn eq(&self, other: &user_fpxregs_struct) -> bool { - self.cwd == other.cwd - && self.swd == other.swd - && self.twd == other.twd - && self.fop == other.fop - && self.fip == other.fip - && self.fcs == other.fcs - && self.foo == other.foo - && self.fos == other.fos - && self.mxcsr == other.mxcsr - // Ignore __reserved field - && self.st_space == other.st_space - && self.xmm_space == other.xmm_space - // Ignore padding field - } - } - - impl Eq for user_fpxregs_struct {} - - impl hash::Hash for user_fpxregs_struct { - fn hash(&self, state: &mut H) { - self.cwd.hash(state); - self.swd.hash(state); - self.twd.hash(state); - self.fop.hash(state); - self.fip.hash(state); - self.fcs.hash(state); - self.foo.hash(state); - self.fos.hash(state); - self.mxcsr.hash(state); - // Ignore __reserved field - self.st_space.hash(state); - self.xmm_space.hash(state); - // Ignore padding field - } - } - - impl PartialEq for ucontext_t { - fn eq(&self, other: &ucontext_t) -> bool { - self.uc_flags == other.uc_flags - && self.uc_link == other.uc_link - && self.uc_stack == other.uc_stack - && self.uc_mcontext == other.uc_mcontext - && self.uc_sigmask == other.uc_sigmask - && self - .__private - .iter() - .zip(other.__private.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for ucontext_t {} - - impl hash::Hash for ucontext_t { - fn hash(&self, state: &mut H) { - self.uc_flags.hash(state); - self.uc_link.hash(state); - self.uc_stack.hash(state); - self.uc_mcontext.hash(state); - self.uc_sigmask.hash(state); - self.__private.hash(state); - } - } - } -} - -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; - -pub const O_DIRECT: c_int = 0x4000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_NOFOLLOW: c_int = 0x20000; -pub const O_ASYNC: c_int = 0x2000; -pub const O_LARGEFILE: c_int = 0o0100000; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: c_int = 0x00000800; -pub const TAB2: c_int = 0x00001000; -pub const TAB3: c_int = 0x00001800; -pub const CR1: c_int = 0x00000200; -pub const CR2: c_int = 0x00000400; -pub const CR3: c_int = 0x00000600; -pub const FF1: c_int = 0x00008000; -pub const BS1: c_int = 0x00002000; -pub const VT1: c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; - -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_SYNC: c_int = 0x080000; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const EDEADLK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EDEADLOCK: c_int = EDEADLK; -pub const EMULTIHOP: c_int = 72; -pub const EBADMSG: c_int = 74; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const ERFKILL: c_int = 132; -pub const EHWPOISON: c_int = 133; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const EXTPROC: crate::tcflag_t = 0x00010000; - -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_32BIT: c_int = 0x0040; - -pub const F_GETLK: c_int = 12; -pub const F_GETOWN: c_int = 9; -pub const F_SETLK: c_int = 13; -pub const F_SETLKW: c_int = 14; -pub const F_SETOWN: c_int = 8; - -pub const VEOF: usize = 4; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const PTRACE_SYSEMU: c_int = 31; -pub const PTRACE_SYSEMU_SINGLESTEP: c_int = 32; - -// Syscall table -pub const SYS_restart_syscall: c_long = 0; -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_waitpid: c_long = 7; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execve: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_time: c_long = 13; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_lchown: c_long = 16; -pub const SYS_break: c_long = 17; -pub const SYS_oldstat: c_long = 18; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_mount: c_long = 21; -pub const SYS_umount: c_long = 22; -pub const SYS_setuid: c_long = 23; -pub const SYS_getuid: c_long = 24; -pub const SYS_stime: c_long = 25; -pub const SYS_ptrace: c_long = 26; -pub const SYS_alarm: c_long = 27; -pub const SYS_oldfstat: c_long = 28; -pub const SYS_pause: c_long = 29; -pub const SYS_utime: c_long = 30; -pub const SYS_stty: c_long = 31; -pub const SYS_gtty: c_long = 32; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_ftime: c_long = 35; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_rename: c_long = 38; -pub const SYS_mkdir: c_long = 39; -pub const SYS_rmdir: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_prof: c_long = 44; -pub const SYS_brk: c_long = 45; -pub const SYS_setgid: c_long = 46; -pub const SYS_getgid: c_long = 47; -pub const SYS_signal: c_long = 48; -pub const SYS_geteuid: c_long = 49; -pub const SYS_getegid: c_long = 50; -pub const SYS_acct: c_long = 51; -pub const SYS_umount2: c_long = 52; -pub const SYS_lock: c_long = 53; -pub const SYS_ioctl: c_long = 54; -pub const SYS_fcntl: c_long = 55; -pub const SYS_mpx: c_long = 56; -pub const SYS_setpgid: c_long = 57; -pub const SYS_ulimit: c_long = 58; -pub const SYS_oldolduname: c_long = 59; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_ustat: c_long = 62; -pub const SYS_dup2: c_long = 63; -pub const SYS_getppid: c_long = 64; -pub const SYS_getpgrp: c_long = 65; -pub const SYS_setsid: c_long = 66; -pub const SYS_sigaction: c_long = 67; -pub const SYS_sgetmask: c_long = 68; -pub const SYS_ssetmask: c_long = 69; -pub const SYS_setreuid: c_long = 70; -pub const SYS_setregid: c_long = 71; -pub const SYS_sigsuspend: c_long = 72; -pub const SYS_sigpending: c_long = 73; -pub const SYS_sethostname: c_long = 74; -pub const SYS_setrlimit: c_long = 75; -pub const SYS_getrlimit: c_long = 76; -pub const SYS_getrusage: c_long = 77; -pub const SYS_gettimeofday: c_long = 78; -pub const SYS_settimeofday: c_long = 79; -pub const SYS_getgroups: c_long = 80; -pub const SYS_setgroups: c_long = 81; -pub const SYS_select: c_long = 82; -pub const SYS_symlink: c_long = 83; -pub const SYS_oldlstat: c_long = 84; -pub const SYS_readlink: c_long = 85; -pub const SYS_uselib: c_long = 86; -pub const SYS_swapon: c_long = 87; -pub const SYS_reboot: c_long = 88; -pub const SYS_readdir: c_long = 89; -pub const SYS_mmap: c_long = 90; -pub const SYS_munmap: c_long = 91; -pub const SYS_truncate: c_long = 92; -pub const SYS_ftruncate: c_long = 93; -pub const SYS_fchmod: c_long = 94; -pub const SYS_fchown: c_long = 95; -pub const SYS_getpriority: c_long = 96; -pub const SYS_setpriority: c_long = 97; -pub const SYS_profil: c_long = 98; -pub const SYS_statfs: c_long = 99; -pub const SYS_fstatfs: c_long = 100; -pub const SYS_ioperm: c_long = 101; -pub const SYS_socketcall: c_long = 102; -pub const SYS_syslog: c_long = 103; -pub const SYS_setitimer: c_long = 104; -pub const SYS_getitimer: c_long = 105; -pub const SYS_stat: c_long = 106; -pub const SYS_lstat: c_long = 107; -pub const SYS_fstat: c_long = 108; -pub const SYS_olduname: c_long = 109; -pub const SYS_iopl: c_long = 110; -pub const SYS_vhangup: c_long = 111; -pub const SYS_idle: c_long = 112; -pub const SYS_vm86old: c_long = 113; -pub const SYS_wait4: c_long = 114; -pub const SYS_swapoff: c_long = 115; -pub const SYS_sysinfo: c_long = 116; -pub const SYS_ipc: c_long = 117; -pub const SYS_fsync: c_long = 118; -pub const SYS_sigreturn: c_long = 119; -pub const SYS_clone: c_long = 120; -pub const SYS_setdomainname: c_long = 121; -pub const SYS_uname: c_long = 122; -pub const SYS_modify_ldt: c_long = 123; -pub const SYS_adjtimex: c_long = 124; -pub const SYS_mprotect: c_long = 125; -pub const SYS_sigprocmask: c_long = 126; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 127; -pub const SYS_init_module: c_long = 128; -pub const SYS_delete_module: c_long = 129; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 130; -pub const SYS_quotactl: c_long = 131; -pub const SYS_getpgid: c_long = 132; -pub const SYS_fchdir: c_long = 133; -pub const SYS_bdflush: c_long = 134; -pub const SYS_sysfs: c_long = 135; -pub const SYS_personality: c_long = 136; -pub const SYS_afs_syscall: c_long = 137; -pub const SYS_setfsuid: c_long = 138; -pub const SYS_setfsgid: c_long = 139; -pub const SYS__llseek: c_long = 140; -pub const SYS_getdents: c_long = 141; -pub const SYS__newselect: c_long = 142; -pub const SYS_flock: c_long = 143; -pub const SYS_msync: c_long = 144; -pub const SYS_readv: c_long = 145; -pub const SYS_writev: c_long = 146; -pub const SYS_getsid: c_long = 147; -pub const SYS_fdatasync: c_long = 148; -pub const SYS__sysctl: c_long = 149; -pub const SYS_mlock: c_long = 150; -pub const SYS_munlock: c_long = 151; -pub const SYS_mlockall: c_long = 152; -pub const SYS_munlockall: c_long = 153; -pub const SYS_sched_setparam: c_long = 154; -pub const SYS_sched_getparam: c_long = 155; -pub const SYS_sched_setscheduler: c_long = 156; -pub const SYS_sched_getscheduler: c_long = 157; -pub const SYS_sched_yield: c_long = 158; -pub const SYS_sched_get_priority_max: c_long = 159; -pub const SYS_sched_get_priority_min: c_long = 160; -pub const SYS_sched_rr_get_interval: c_long = 161; -pub const SYS_nanosleep: c_long = 162; -pub const SYS_mremap: c_long = 163; -pub const SYS_setresuid: c_long = 164; -pub const SYS_getresuid: c_long = 165; -pub const SYS_vm86: c_long = 166; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 167; -pub const SYS_poll: c_long = 168; -pub const SYS_nfsservctl: c_long = 169; -pub const SYS_setresgid: c_long = 170; -pub const SYS_getresgid: c_long = 171; -pub const SYS_prctl: c_long = 172; -pub const SYS_rt_sigreturn: c_long = 173; -pub const SYS_rt_sigaction: c_long = 174; -pub const SYS_rt_sigprocmask: c_long = 175; -pub const SYS_rt_sigpending: c_long = 176; -pub const SYS_rt_sigtimedwait: c_long = 177; -pub const SYS_rt_sigqueueinfo: c_long = 178; -pub const SYS_rt_sigsuspend: c_long = 179; -pub const SYS_pread64: c_long = 180; -pub const SYS_pwrite64: c_long = 181; -pub const SYS_chown: c_long = 182; -pub const SYS_getcwd: c_long = 183; -pub const SYS_capget: c_long = 184; -pub const SYS_capset: c_long = 185; -pub const SYS_sigaltstack: c_long = 186; -pub const SYS_sendfile: c_long = 187; -pub const SYS_getpmsg: c_long = 188; -pub const SYS_putpmsg: c_long = 189; -pub const SYS_vfork: c_long = 190; -pub const SYS_ugetrlimit: c_long = 191; -pub const SYS_mmap2: c_long = 192; -pub const SYS_truncate64: c_long = 193; -pub const SYS_ftruncate64: c_long = 194; -pub const SYS_stat64: c_long = 195; -pub const SYS_lstat64: c_long = 196; -pub const SYS_fstat64: c_long = 197; -pub const SYS_lchown32: c_long = 198; -pub const SYS_getuid32: c_long = 199; -pub const SYS_getgid32: c_long = 200; -pub const SYS_geteuid32: c_long = 201; -pub const SYS_getegid32: c_long = 202; -pub const SYS_setreuid32: c_long = 203; -pub const SYS_setregid32: c_long = 204; -pub const SYS_getgroups32: c_long = 205; -pub const SYS_setgroups32: c_long = 206; -pub const SYS_fchown32: c_long = 207; -pub const SYS_setresuid32: c_long = 208; -pub const SYS_getresuid32: c_long = 209; -pub const SYS_setresgid32: c_long = 210; -pub const SYS_getresgid32: c_long = 211; -pub const SYS_chown32: c_long = 212; -pub const SYS_setuid32: c_long = 213; -pub const SYS_setgid32: c_long = 214; -pub const SYS_setfsuid32: c_long = 215; -pub const SYS_setfsgid32: c_long = 216; -pub const SYS_pivot_root: c_long = 217; -pub const SYS_mincore: c_long = 218; -pub const SYS_madvise: c_long = 219; -pub const SYS_getdents64: c_long = 220; -pub const SYS_fcntl64: c_long = 221; -pub const SYS_gettid: c_long = 224; -pub const SYS_readahead: c_long = 225; -pub const SYS_setxattr: c_long = 226; -pub const SYS_lsetxattr: c_long = 227; -pub const SYS_fsetxattr: c_long = 228; -pub const SYS_getxattr: c_long = 229; -pub const SYS_lgetxattr: c_long = 230; -pub const SYS_fgetxattr: c_long = 231; -pub const SYS_listxattr: c_long = 232; -pub const SYS_llistxattr: c_long = 233; -pub const SYS_flistxattr: c_long = 234; -pub const SYS_removexattr: c_long = 235; -pub const SYS_lremovexattr: c_long = 236; -pub const SYS_fremovexattr: c_long = 237; -pub const SYS_tkill: c_long = 238; -pub const SYS_sendfile64: c_long = 239; -pub const SYS_futex: c_long = 240; -pub const SYS_sched_setaffinity: c_long = 241; -pub const SYS_sched_getaffinity: c_long = 242; -pub const SYS_set_thread_area: c_long = 243; -pub const SYS_get_thread_area: c_long = 244; -pub const SYS_io_setup: c_long = 245; -pub const SYS_io_destroy: c_long = 246; -pub const SYS_io_getevents: c_long = 247; -pub const SYS_io_submit: c_long = 248; -pub const SYS_io_cancel: c_long = 249; -pub const SYS_fadvise64: c_long = 250; -pub const SYS_exit_group: c_long = 252; -pub const SYS_lookup_dcookie: c_long = 253; -pub const SYS_epoll_create: c_long = 254; -pub const SYS_epoll_ctl: c_long = 255; -pub const SYS_epoll_wait: c_long = 256; -pub const SYS_remap_file_pages: c_long = 257; -pub const SYS_set_tid_address: c_long = 258; -pub const SYS_timer_create: c_long = 259; -pub const SYS_timer_settime: c_long = 260; -pub const SYS_timer_gettime: c_long = 261; -pub const SYS_timer_getoverrun: c_long = 262; -pub const SYS_timer_delete: c_long = 263; -pub const SYS_clock_settime: c_long = 264; -pub const SYS_clock_gettime: c_long = 265; -pub const SYS_clock_getres: c_long = 266; -pub const SYS_clock_nanosleep: c_long = 267; -pub const SYS_statfs64: c_long = 268; -pub const SYS_fstatfs64: c_long = 269; -pub const SYS_tgkill: c_long = 270; -pub const SYS_utimes: c_long = 271; -pub const SYS_fadvise64_64: c_long = 272; -pub const SYS_vserver: c_long = 273; -pub const SYS_mbind: c_long = 274; -pub const SYS_get_mempolicy: c_long = 275; -pub const SYS_set_mempolicy: c_long = 276; -pub const SYS_mq_open: c_long = 277; -pub const SYS_mq_unlink: c_long = 278; -pub const SYS_mq_timedsend: c_long = 279; -pub const SYS_mq_timedreceive: c_long = 280; -pub const SYS_mq_notify: c_long = 281; -pub const SYS_mq_getsetattr: c_long = 282; -pub const SYS_kexec_load: c_long = 283; -pub const SYS_waitid: c_long = 284; -pub const SYS_add_key: c_long = 286; -pub const SYS_request_key: c_long = 287; -pub const SYS_keyctl: c_long = 288; -pub const SYS_ioprio_set: c_long = 289; -pub const SYS_ioprio_get: c_long = 290; -pub const SYS_inotify_init: c_long = 291; -pub const SYS_inotify_add_watch: c_long = 292; -pub const SYS_inotify_rm_watch: c_long = 293; -pub const SYS_migrate_pages: c_long = 294; -pub const SYS_openat: c_long = 295; -pub const SYS_mkdirat: c_long = 296; -pub const SYS_mknodat: c_long = 297; -pub const SYS_fchownat: c_long = 298; -pub const SYS_futimesat: c_long = 299; -pub const SYS_fstatat64: c_long = 300; -pub const SYS_unlinkat: c_long = 301; -pub const SYS_renameat: c_long = 302; -pub const SYS_linkat: c_long = 303; -pub const SYS_symlinkat: c_long = 304; -pub const SYS_readlinkat: c_long = 305; -pub const SYS_fchmodat: c_long = 306; -pub const SYS_faccessat: c_long = 307; -pub const SYS_pselect6: c_long = 308; -pub const SYS_ppoll: c_long = 309; -pub const SYS_unshare: c_long = 310; -pub const SYS_set_robust_list: c_long = 311; -pub const SYS_get_robust_list: c_long = 312; -pub const SYS_splice: c_long = 313; -pub const SYS_sync_file_range: c_long = 314; -pub const SYS_tee: c_long = 315; -pub const SYS_vmsplice: c_long = 316; -pub const SYS_move_pages: c_long = 317; -pub const SYS_getcpu: c_long = 318; -pub const SYS_epoll_pwait: c_long = 319; -pub const SYS_utimensat: c_long = 320; -pub const SYS_signalfd: c_long = 321; -pub const SYS_timerfd_create: c_long = 322; -pub const SYS_eventfd: c_long = 323; -pub const SYS_fallocate: c_long = 324; -pub const SYS_timerfd_settime: c_long = 325; -pub const SYS_timerfd_gettime: c_long = 326; -pub const SYS_signalfd4: c_long = 327; -pub const SYS_eventfd2: c_long = 328; -pub const SYS_epoll_create1: c_long = 329; -pub const SYS_dup3: c_long = 330; -pub const SYS_pipe2: c_long = 331; -pub const SYS_inotify_init1: c_long = 332; -pub const SYS_preadv: c_long = 333; -pub const SYS_pwritev: c_long = 334; -pub const SYS_rt_tgsigqueueinfo: c_long = 335; -pub const SYS_perf_event_open: c_long = 336; -pub const SYS_recvmmsg: c_long = 337; -pub const SYS_fanotify_init: c_long = 338; -pub const SYS_fanotify_mark: c_long = 339; -pub const SYS_prlimit64: c_long = 340; -pub const SYS_name_to_handle_at: c_long = 341; -pub const SYS_open_by_handle_at: c_long = 342; -pub const SYS_clock_adjtime: c_long = 343; -pub const SYS_syncfs: c_long = 344; -pub const SYS_sendmmsg: c_long = 345; -pub const SYS_setns: c_long = 346; -pub const SYS_process_vm_readv: c_long = 347; -pub const SYS_process_vm_writev: c_long = 348; -pub const SYS_kcmp: c_long = 349; -pub const SYS_finit_module: c_long = 350; -pub const SYS_sched_setattr: c_long = 351; -pub const SYS_sched_getattr: c_long = 352; -pub const SYS_renameat2: c_long = 353; -pub const SYS_seccomp: c_long = 354; -pub const SYS_getrandom: c_long = 355; -pub const SYS_memfd_create: c_long = 356; -pub const SYS_bpf: c_long = 357; -pub const SYS_execveat: c_long = 358; -pub const SYS_socket: c_long = 359; -pub const SYS_socketpair: c_long = 360; -pub const SYS_bind: c_long = 361; -pub const SYS_connect: c_long = 362; -pub const SYS_listen: c_long = 363; -pub const SYS_accept4: c_long = 364; -pub const SYS_getsockopt: c_long = 365; -pub const SYS_setsockopt: c_long = 366; -pub const SYS_getsockname: c_long = 367; -pub const SYS_getpeername: c_long = 368; -pub const SYS_sendto: c_long = 369; -pub const SYS_sendmsg: c_long = 370; -pub const SYS_recvfrom: c_long = 371; -pub const SYS_recvmsg: c_long = 372; -pub const SYS_shutdown: c_long = 373; -pub const SYS_userfaultfd: c_long = 374; -pub const SYS_membarrier: c_long = 375; -pub const SYS_mlock2: c_long = 376; -pub const SYS_copy_file_range: c_long = 377; -pub const SYS_preadv2: c_long = 378; -pub const SYS_pwritev2: c_long = 379; -pub const SYS_pkey_mprotect: c_long = 380; -pub const SYS_pkey_alloc: c_long = 381; -pub const SYS_pkey_free: c_long = 382; -pub const SYS_statx: c_long = 383; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; -pub const SYS_fchmodat2: c_long = 452; - -// offsets in user_regs_structs, from sys/reg.h -pub const EBX: c_int = 0; -pub const ECX: c_int = 1; -pub const EDX: c_int = 2; -pub const ESI: c_int = 3; -pub const EDI: c_int = 4; -pub const EBP: c_int = 5; -pub const EAX: c_int = 6; -pub const DS: c_int = 7; -pub const ES: c_int = 8; -pub const FS: c_int = 9; -pub const GS: c_int = 10; -pub const ORIG_EAX: c_int = 11; -pub const EIP: c_int = 12; -pub const CS: c_int = 13; -pub const EFL: c_int = 14; -pub const UESP: c_int = 15; -pub const SS: c_int = 16; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/mod.rs deleted file mode 100644 index 67151a8d37116f..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b64/aarch64/mod.rs +++ /dev/null @@ -1,712 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; -pub type wchar_t = u32; -pub type nlink_t = u32; -pub type blksize_t = c_int; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad0: c_ulong, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - __pad1: c_int, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_uint; 2], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad0: c_ulong, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - __pad1: c_int, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_uint; 2], - } - - pub struct user_regs_struct { - pub regs: [c_ulonglong; 31], - pub sp: c_ulonglong, - pub pc: c_ulonglong, - pub pstate: c_ulonglong, - } - - pub struct ipc_perm { - #[cfg(musl_v1_2_3)] - pub __key: crate::key_t, - #[cfg(not(musl_v1_2_3))] - #[deprecated( - since = "0.2.173", - note = "This field is incorrectly named and will be changed - to __key in a future release." - )] - pub __ipc_perm_key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - - #[cfg(musl_v1_2_3)] - pub __seq: c_int, - #[cfg(not(musl_v1_2_3))] - #[deprecated( - since = "0.2.173", - note = "The type of this field has changed from c_ushort to c_int, - we'll follow that change in the future release." - )] - pub __seq: c_ushort, - __unused1: c_long, - __unused2: c_long, - } - - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_sigmask: crate::sigset_t, - pub uc_mcontext: mcontext_t, - } - - #[repr(align(16))] - pub struct mcontext_t { - pub fault_address: c_ulong, - pub regs: [c_ulong; 31], - pub sp: c_ulong, - pub pc: c_ulong, - pub pstate: c_ulong, - __reserved: [u64; 512], - } - - #[repr(align(8))] - pub struct clone_args { - pub flags: c_ulonglong, - pub pidfd: c_ulonglong, - pub child_tid: c_ulonglong, - pub parent_tid: c_ulonglong, - pub exit_signal: c_ulonglong, - pub stack: c_ulonglong, - pub stack_size: c_ulonglong, - pub tls: c_ulonglong, - pub set_tid: c_ulonglong, - pub set_tid_size: c_ulonglong, - pub cgroup: c_ulonglong, - } - - pub struct user_fpsimd_struct { - pub vregs: [crate::__uint128_t; 32], - pub fpsr: u32, - pub fpcr: u32, - } -} - -s_no_extra_traits! { - #[repr(align(16))] - pub struct max_align_t { - priv_: [f32; 8], - } -} - -pub const O_APPEND: c_int = 1024; -pub const O_DIRECT: c_int = 0x10000; -pub const O_DIRECTORY: c_int = 0x4000; -pub const O_LARGEFILE: c_int = 0x20000; -pub const O_NOFOLLOW: c_int = 0x8000; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_ASYNC: c_int = 0x2000; - -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EBADMSG: c_int = 74; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const ERFKILL: c_int = 132; -pub const EHWPOISON: c_int = 133; - -// bits/hwcap.h -pub const HWCAP_FP: c_ulong = 1 << 0; -pub const HWCAP_ASIMD: c_ulong = 1 << 1; -pub const HWCAP_EVTSTRM: c_ulong = 1 << 2; -pub const HWCAP_AES: c_ulong = 1 << 3; -pub const HWCAP_PMULL: c_ulong = 1 << 4; -pub const HWCAP_SHA1: c_ulong = 1 << 5; -pub const HWCAP_SHA2: c_ulong = 1 << 6; -pub const HWCAP_CRC32: c_ulong = 1 << 7; -pub const HWCAP_ATOMICS: c_ulong = 1 << 8; -pub const HWCAP_FPHP: c_ulong = 1 << 9; -pub const HWCAP_ASIMDHP: c_ulong = 1 << 10; -pub const HWCAP_CPUID: c_ulong = 1 << 11; -pub const HWCAP_ASIMDRDM: c_ulong = 1 << 12; -pub const HWCAP_JSCVT: c_ulong = 1 << 13; -pub const HWCAP_FCMA: c_ulong = 1 << 14; -pub const HWCAP_LRCPC: c_ulong = 1 << 15; -pub const HWCAP_DCPOP: c_ulong = 1 << 16; -pub const HWCAP_SHA3: c_ulong = 1 << 17; -pub const HWCAP_SM3: c_ulong = 1 << 18; -pub const HWCAP_SM4: c_ulong = 1 << 19; -pub const HWCAP_ASIMDDP: c_ulong = 1 << 20; -pub const HWCAP_SHA512: c_ulong = 1 << 21; -pub const HWCAP_SVE: c_ulong = 1 << 22; -pub const HWCAP_ASIMDFHM: c_ulong = 1 << 23; -pub const HWCAP_DIT: c_ulong = 1 << 24; -pub const HWCAP_USCAT: c_ulong = 1 << 25; -pub const HWCAP_ILRCPC: c_ulong = 1 << 26; -pub const HWCAP_FLAGM: c_ulong = 1 << 27; -pub const HWCAP_SSBS: c_ulong = 1 << 28; -pub const HWCAP_SB: c_ulong = 1 << 29; -pub const HWCAP_PACA: c_ulong = 1 << 30; -pub const HWCAP_PACG: c_ulong = 1 << 31; - -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_SYNC: c_int = 0x080000; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_SETOWN: c_int = 8; - -pub const VEOF: usize = 4; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const MINSIGSTKSZ: size_t = 6144; -pub const SIGSTKSZ: size_t = 12288; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const SYS_io_setup: c_long = 0; -pub const SYS_io_destroy: c_long = 1; -pub const SYS_io_submit: c_long = 2; -pub const SYS_io_cancel: c_long = 3; -pub const SYS_io_getevents: c_long = 4; -pub const SYS_setxattr: c_long = 5; -pub const SYS_lsetxattr: c_long = 6; -pub const SYS_fsetxattr: c_long = 7; -pub const SYS_getxattr: c_long = 8; -pub const SYS_lgetxattr: c_long = 9; -pub const SYS_fgetxattr: c_long = 10; -pub const SYS_listxattr: c_long = 11; -pub const SYS_llistxattr: c_long = 12; -pub const SYS_flistxattr: c_long = 13; -pub const SYS_removexattr: c_long = 14; -pub const SYS_lremovexattr: c_long = 15; -pub const SYS_fremovexattr: c_long = 16; -pub const SYS_getcwd: c_long = 17; -pub const SYS_lookup_dcookie: c_long = 18; -pub const SYS_eventfd2: c_long = 19; -pub const SYS_epoll_create1: c_long = 20; -pub const SYS_epoll_ctl: c_long = 21; -pub const SYS_epoll_pwait: c_long = 22; -pub const SYS_dup: c_long = 23; -pub const SYS_dup3: c_long = 24; -pub const SYS_fcntl: c_long = 25; -pub const SYS_inotify_init1: c_long = 26; -pub const SYS_inotify_add_watch: c_long = 27; -pub const SYS_inotify_rm_watch: c_long = 28; -pub const SYS_ioctl: c_long = 29; -pub const SYS_ioprio_set: c_long = 30; -pub const SYS_ioprio_get: c_long = 31; -pub const SYS_flock: c_long = 32; -pub const SYS_mknodat: c_long = 33; -pub const SYS_mkdirat: c_long = 34; -pub const SYS_unlinkat: c_long = 35; -pub const SYS_symlinkat: c_long = 36; -pub const SYS_linkat: c_long = 37; -pub const SYS_renameat: c_long = 38; -pub const SYS_umount2: c_long = 39; -pub const SYS_mount: c_long = 40; -pub const SYS_pivot_root: c_long = 41; -pub const SYS_nfsservctl: c_long = 42; -pub const SYS_statfs: c_long = 43; -pub const SYS_fstatfs: c_long = 44; -pub const SYS_truncate: c_long = 45; -pub const SYS_ftruncate: c_long = 46; -pub const SYS_fallocate: c_long = 47; -pub const SYS_faccessat: c_long = 48; -pub const SYS_chdir: c_long = 49; -pub const SYS_fchdir: c_long = 50; -pub const SYS_chroot: c_long = 51; -pub const SYS_fchmod: c_long = 52; -pub const SYS_fchmodat: c_long = 53; -pub const SYS_fchownat: c_long = 54; -pub const SYS_fchown: c_long = 55; -pub const SYS_openat: c_long = 56; -pub const SYS_close: c_long = 57; -pub const SYS_vhangup: c_long = 58; -pub const SYS_pipe2: c_long = 59; -pub const SYS_quotactl: c_long = 60; -pub const SYS_getdents64: c_long = 61; -pub const SYS_lseek: c_long = 62; -pub const SYS_read: c_long = 63; -pub const SYS_write: c_long = 64; -pub const SYS_readv: c_long = 65; -pub const SYS_writev: c_long = 66; -pub const SYS_pread64: c_long = 67; -pub const SYS_pwrite64: c_long = 68; -pub const SYS_preadv: c_long = 69; -pub const SYS_pwritev: c_long = 70; -pub const SYS_pselect6: c_long = 72; -pub const SYS_ppoll: c_long = 73; -pub const SYS_signalfd4: c_long = 74; -pub const SYS_vmsplice: c_long = 75; -pub const SYS_splice: c_long = 76; -pub const SYS_tee: c_long = 77; -pub const SYS_readlinkat: c_long = 78; -pub const SYS_newfstatat: c_long = 79; -pub const SYS_fstat: c_long = 80; -pub const SYS_sync: c_long = 81; -pub const SYS_fsync: c_long = 82; -pub const SYS_fdatasync: c_long = 83; -pub const SYS_sync_file_range: c_long = 84; -pub const SYS_timerfd_create: c_long = 85; -pub const SYS_timerfd_settime: c_long = 86; -pub const SYS_timerfd_gettime: c_long = 87; -pub const SYS_utimensat: c_long = 88; -pub const SYS_acct: c_long = 89; -pub const SYS_capget: c_long = 90; -pub const SYS_capset: c_long = 91; -pub const SYS_personality: c_long = 92; -pub const SYS_exit: c_long = 93; -pub const SYS_exit_group: c_long = 94; -pub const SYS_waitid: c_long = 95; -pub const SYS_set_tid_address: c_long = 96; -pub const SYS_unshare: c_long = 97; -pub const SYS_futex: c_long = 98; -pub const SYS_set_robust_list: c_long = 99; -pub const SYS_get_robust_list: c_long = 100; -pub const SYS_nanosleep: c_long = 101; -pub const SYS_getitimer: c_long = 102; -pub const SYS_setitimer: c_long = 103; -pub const SYS_kexec_load: c_long = 104; -pub const SYS_init_module: c_long = 105; -pub const SYS_delete_module: c_long = 106; -pub const SYS_timer_create: c_long = 107; -pub const SYS_timer_gettime: c_long = 108; -pub const SYS_timer_getoverrun: c_long = 109; -pub const SYS_timer_settime: c_long = 110; -pub const SYS_timer_delete: c_long = 111; -pub const SYS_clock_settime: c_long = 112; -pub const SYS_clock_gettime: c_long = 113; -pub const SYS_clock_getres: c_long = 114; -pub const SYS_clock_nanosleep: c_long = 115; -pub const SYS_syslog: c_long = 116; -pub const SYS_ptrace: c_long = 117; -pub const SYS_sched_setparam: c_long = 118; -pub const SYS_sched_setscheduler: c_long = 119; -pub const SYS_sched_getscheduler: c_long = 120; -pub const SYS_sched_getparam: c_long = 121; -pub const SYS_sched_setaffinity: c_long = 122; -pub const SYS_sched_getaffinity: c_long = 123; -pub const SYS_sched_yield: c_long = 124; -pub const SYS_sched_get_priority_max: c_long = 125; -pub const SYS_sched_get_priority_min: c_long = 126; -pub const SYS_sched_rr_get_interval: c_long = 127; -pub const SYS_restart_syscall: c_long = 128; -pub const SYS_kill: c_long = 129; -pub const SYS_tkill: c_long = 130; -pub const SYS_tgkill: c_long = 131; -pub const SYS_sigaltstack: c_long = 132; -pub const SYS_rt_sigsuspend: c_long = 133; -pub const SYS_rt_sigaction: c_long = 134; -pub const SYS_rt_sigprocmask: c_long = 135; -pub const SYS_rt_sigpending: c_long = 136; -pub const SYS_rt_sigtimedwait: c_long = 137; -pub const SYS_rt_sigqueueinfo: c_long = 138; -pub const SYS_rt_sigreturn: c_long = 139; -pub const SYS_setpriority: c_long = 140; -pub const SYS_getpriority: c_long = 141; -pub const SYS_reboot: c_long = 142; -pub const SYS_setregid: c_long = 143; -pub const SYS_setgid: c_long = 144; -pub const SYS_setreuid: c_long = 145; -pub const SYS_setuid: c_long = 146; -pub const SYS_setresuid: c_long = 147; -pub const SYS_getresuid: c_long = 148; -pub const SYS_setresgid: c_long = 149; -pub const SYS_getresgid: c_long = 150; -pub const SYS_setfsuid: c_long = 151; -pub const SYS_setfsgid: c_long = 152; -pub const SYS_times: c_long = 153; -pub const SYS_setpgid: c_long = 154; -pub const SYS_getpgid: c_long = 155; -pub const SYS_getsid: c_long = 156; -pub const SYS_setsid: c_long = 157; -pub const SYS_getgroups: c_long = 158; -pub const SYS_setgroups: c_long = 159; -pub const SYS_uname: c_long = 160; -pub const SYS_sethostname: c_long = 161; -pub const SYS_setdomainname: c_long = 162; -pub const SYS_getrlimit: c_long = 163; -pub const SYS_setrlimit: c_long = 164; -pub const SYS_getrusage: c_long = 165; -pub const SYS_umask: c_long = 166; -pub const SYS_prctl: c_long = 167; -pub const SYS_getcpu: c_long = 168; -pub const SYS_gettimeofday: c_long = 169; -pub const SYS_settimeofday: c_long = 170; -pub const SYS_adjtimex: c_long = 171; -pub const SYS_getpid: c_long = 172; -pub const SYS_getppid: c_long = 173; -pub const SYS_getuid: c_long = 174; -pub const SYS_geteuid: c_long = 175; -pub const SYS_getgid: c_long = 176; -pub const SYS_getegid: c_long = 177; -pub const SYS_gettid: c_long = 178; -pub const SYS_sysinfo: c_long = 179; -pub const SYS_mq_open: c_long = 180; -pub const SYS_mq_unlink: c_long = 181; -pub const SYS_mq_timedsend: c_long = 182; -pub const SYS_mq_timedreceive: c_long = 183; -pub const SYS_mq_notify: c_long = 184; -pub const SYS_mq_getsetattr: c_long = 185; -pub const SYS_msgget: c_long = 186; -pub const SYS_msgctl: c_long = 187; -pub const SYS_msgrcv: c_long = 188; -pub const SYS_msgsnd: c_long = 189; -pub const SYS_semget: c_long = 190; -pub const SYS_semctl: c_long = 191; -pub const SYS_semtimedop: c_long = 192; -pub const SYS_semop: c_long = 193; -pub const SYS_shmget: c_long = 194; -pub const SYS_shmctl: c_long = 195; -pub const SYS_shmat: c_long = 196; -pub const SYS_shmdt: c_long = 197; -pub const SYS_socket: c_long = 198; -pub const SYS_socketpair: c_long = 199; -pub const SYS_bind: c_long = 200; -pub const SYS_listen: c_long = 201; -pub const SYS_accept: c_long = 202; -pub const SYS_connect: c_long = 203; -pub const SYS_getsockname: c_long = 204; -pub const SYS_getpeername: c_long = 205; -pub const SYS_sendto: c_long = 206; -pub const SYS_recvfrom: c_long = 207; -pub const SYS_setsockopt: c_long = 208; -pub const SYS_getsockopt: c_long = 209; -pub const SYS_shutdown: c_long = 210; -pub const SYS_sendmsg: c_long = 211; -pub const SYS_recvmsg: c_long = 212; -pub const SYS_readahead: c_long = 213; -pub const SYS_brk: c_long = 214; -pub const SYS_munmap: c_long = 215; -pub const SYS_mremap: c_long = 216; -pub const SYS_add_key: c_long = 217; -pub const SYS_request_key: c_long = 218; -pub const SYS_keyctl: c_long = 219; -pub const SYS_clone: c_long = 220; -pub const SYS_execve: c_long = 221; -pub const SYS_mmap: c_long = 222; -pub const SYS_swapon: c_long = 224; -pub const SYS_swapoff: c_long = 225; -pub const SYS_mprotect: c_long = 226; -pub const SYS_msync: c_long = 227; -pub const SYS_mlock: c_long = 228; -pub const SYS_munlock: c_long = 229; -pub const SYS_mlockall: c_long = 230; -pub const SYS_munlockall: c_long = 231; -pub const SYS_mincore: c_long = 232; -pub const SYS_madvise: c_long = 233; -pub const SYS_remap_file_pages: c_long = 234; -pub const SYS_mbind: c_long = 235; -pub const SYS_get_mempolicy: c_long = 236; -pub const SYS_set_mempolicy: c_long = 237; -pub const SYS_migrate_pages: c_long = 238; -pub const SYS_move_pages: c_long = 239; -pub const SYS_rt_tgsigqueueinfo: c_long = 240; -pub const SYS_perf_event_open: c_long = 241; -pub const SYS_accept4: c_long = 242; -pub const SYS_recvmmsg: c_long = 243; -pub const SYS_wait4: c_long = 260; -pub const SYS_prlimit64: c_long = 261; -pub const SYS_fanotify_init: c_long = 262; -pub const SYS_fanotify_mark: c_long = 263; -pub const SYS_name_to_handle_at: c_long = 264; -pub const SYS_open_by_handle_at: c_long = 265; -pub const SYS_clock_adjtime: c_long = 266; -pub const SYS_syncfs: c_long = 267; -pub const SYS_setns: c_long = 268; -pub const SYS_sendmmsg: c_long = 269; -pub const SYS_process_vm_readv: c_long = 270; -pub const SYS_process_vm_writev: c_long = 271; -pub const SYS_kcmp: c_long = 272; -pub const SYS_finit_module: c_long = 273; -pub const SYS_sched_setattr: c_long = 274; -pub const SYS_sched_getattr: c_long = 275; -pub const SYS_renameat2: c_long = 276; -pub const SYS_seccomp: c_long = 277; -pub const SYS_getrandom: c_long = 278; -pub const SYS_memfd_create: c_long = 279; -pub const SYS_bpf: c_long = 280; -pub const SYS_execveat: c_long = 281; -pub const SYS_userfaultfd: c_long = 282; -pub const SYS_membarrier: c_long = 283; -pub const SYS_mlock2: c_long = 284; -pub const SYS_copy_file_range: c_long = 285; -pub const SYS_preadv2: c_long = 286; -pub const SYS_pwritev2: c_long = 287; -pub const SYS_pkey_mprotect: c_long = 288; -pub const SYS_pkey_alloc: c_long = 289; -pub const SYS_pkey_free: c_long = 290; -pub const SYS_statx: c_long = 291; -pub const SYS_io_pgetevents: c_long = 292; -pub const SYS_rseq: c_long = 293; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; -pub const SYS_mseal: c_long = 462; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: c_int = 0x00000800; -pub const TAB2: c_int = 0x00001000; -pub const TAB3: c_int = 0x00001800; -pub const CR1: c_int = 0x00000200; -pub const CR2: c_int = 0x00000400; -pub const CR3: c_int = 0x00000600; -pub const FF1: c_int = 0x00008000; -pub const BS1: c_int = 0x00002000; -pub const VT1: c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const EDEADLK: c_int = 35; -pub const EDEADLOCK: c_int = EDEADLK; - -pub const EXTPROC: crate::tcflag_t = 0x00010000; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/loongarch64/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/loongarch64/mod.rs deleted file mode 100644 index e014fbf48c0dae..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b64/loongarch64/mod.rs +++ /dev/null @@ -1,667 +0,0 @@ -//! LoongArch-specific definitions for 64-bit linux-like values - -use crate::prelude::*; -use crate::{off64_t, off_t}; - -pub type wchar_t = c_int; - -pub type nlink_t = c_uint; -pub type blksize_t = c_int; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad1: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - __pad2: c_int, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_int; 2usize], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub __pad1: crate::dev_t, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - pub __pad2: c_int, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_int; 2], - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_uint, - pub __seq: c_int, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct user_regs_struct { - pub regs: [u64; 32], - pub orig_a0: u64, - pub csr_era: u64, - pub csr_badv: u64, - pub reserved: [u64; 10], - } - - pub struct user_fp_struct { - pub fpr: [u64; 32], - pub fcc: u64, - pub fcsr: u32, - } - - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_sigmask: crate::sigset_t, - pub uc_mcontext: mcontext_t, - } - - #[repr(align(16))] - pub struct mcontext_t { - pub __pc: c_ulong, - pub __gregs: [c_ulong; 32], - pub __flags: c_uint, - pub __extcontext: [c_ulong; 0], - } - - #[repr(align(8))] - pub struct clone_args { - pub flags: c_ulonglong, - pub pidfd: c_ulonglong, - pub child_tid: c_ulonglong, - pub parent_tid: c_ulonglong, - pub exit_signal: c_ulonglong, - pub stack: c_ulonglong, - pub stack_size: c_ulonglong, - pub tls: c_ulonglong, - pub set_tid: c_ulonglong, - pub set_tid_size: c_ulonglong, - pub cgroup: c_ulonglong, - } -} - -s_no_extra_traits! { - #[repr(align(16))] - pub struct max_align_t { - priv_: [f64; 4], - } -} - -pub const SYS_io_setup: c_long = 0; -pub const SYS_io_destroy: c_long = 1; -pub const SYS_io_submit: c_long = 2; -pub const SYS_io_cancel: c_long = 3; -pub const SYS_io_getevents: c_long = 4; -pub const SYS_setxattr: c_long = 5; -pub const SYS_lsetxattr: c_long = 6; -pub const SYS_fsetxattr: c_long = 7; -pub const SYS_getxattr: c_long = 8; -pub const SYS_lgetxattr: c_long = 9; -pub const SYS_fgetxattr: c_long = 10; -pub const SYS_listxattr: c_long = 11; -pub const SYS_llistxattr: c_long = 12; -pub const SYS_flistxattr: c_long = 13; -pub const SYS_removexattr: c_long = 14; -pub const SYS_lremovexattr: c_long = 15; -pub const SYS_fremovexattr: c_long = 16; -pub const SYS_getcwd: c_long = 17; -pub const SYS_lookup_dcookie: c_long = 18; -pub const SYS_eventfd2: c_long = 19; -pub const SYS_epoll_create1: c_long = 20; -pub const SYS_epoll_ctl: c_long = 21; -pub const SYS_epoll_pwait: c_long = 22; -pub const SYS_dup: c_long = 23; -pub const SYS_dup3: c_long = 24; -pub const SYS_fcntl: c_long = 25; -pub const SYS_inotify_init1: c_long = 26; -pub const SYS_inotify_add_watch: c_long = 27; -pub const SYS_inotify_rm_watch: c_long = 28; -pub const SYS_ioctl: c_long = 29; -pub const SYS_ioprio_set: c_long = 30; -pub const SYS_ioprio_get: c_long = 31; -pub const SYS_flock: c_long = 32; -pub const SYS_mknodat: c_long = 33; -pub const SYS_mkdirat: c_long = 34; -pub const SYS_unlinkat: c_long = 35; -pub const SYS_symlinkat: c_long = 36; -pub const SYS_linkat: c_long = 37; -pub const SYS_umount2: c_long = 39; -pub const SYS_mount: c_long = 40; -pub const SYS_pivot_root: c_long = 41; -pub const SYS_nfsservctl: c_long = 42; -pub const SYS_statfs: c_long = 43; -pub const SYS_fstatfs: c_long = 44; -pub const SYS_truncate: c_long = 45; -pub const SYS_ftruncate: c_long = 46; -pub const SYS_fallocate: c_long = 47; -pub const SYS_faccessat: c_long = 48; -pub const SYS_chdir: c_long = 49; -pub const SYS_fchdir: c_long = 50; -pub const SYS_chroot: c_long = 51; -pub const SYS_fchmod: c_long = 52; -pub const SYS_fchmodat: c_long = 53; -pub const SYS_fchownat: c_long = 54; -pub const SYS_fchown: c_long = 55; -pub const SYS_openat: c_long = 56; -pub const SYS_close: c_long = 57; -pub const SYS_vhangup: c_long = 58; -pub const SYS_pipe2: c_long = 59; -pub const SYS_quotactl: c_long = 60; -pub const SYS_getdents64: c_long = 61; -pub const SYS_lseek: c_long = 62; -pub const SYS_read: c_long = 63; -pub const SYS_write: c_long = 64; -pub const SYS_readv: c_long = 65; -pub const SYS_writev: c_long = 66; -pub const SYS_pread64: c_long = 67; -pub const SYS_pwrite64: c_long = 68; -pub const SYS_preadv: c_long = 69; -pub const SYS_pwritev: c_long = 70; -pub const SYS_sendfile: c_long = 71; -pub const SYS_pselect6: c_long = 72; -pub const SYS_ppoll: c_long = 73; -pub const SYS_signalfd4: c_long = 74; -pub const SYS_vmsplice: c_long = 75; -pub const SYS_splice: c_long = 76; -pub const SYS_tee: c_long = 77; -pub const SYS_readlinkat: c_long = 78; -pub const SYS_sync: c_long = 81; -pub const SYS_fsync: c_long = 82; -pub const SYS_fdatasync: c_long = 83; -pub const SYS_sync_file_range: c_long = 84; -pub const SYS_timerfd_create: c_long = 85; -pub const SYS_timerfd_settime: c_long = 86; -pub const SYS_timerfd_gettime: c_long = 87; -pub const SYS_utimensat: c_long = 88; -pub const SYS_acct: c_long = 89; -pub const SYS_capget: c_long = 90; -pub const SYS_capset: c_long = 91; -pub const SYS_personality: c_long = 92; -pub const SYS_exit: c_long = 93; -pub const SYS_exit_group: c_long = 94; -pub const SYS_waitid: c_long = 95; -pub const SYS_set_tid_address: c_long = 96; -pub const SYS_unshare: c_long = 97; -pub const SYS_futex: c_long = 98; -pub const SYS_set_robust_list: c_long = 99; -pub const SYS_get_robust_list: c_long = 100; -pub const SYS_nanosleep: c_long = 101; -pub const SYS_getitimer: c_long = 102; -pub const SYS_setitimer: c_long = 103; -pub const SYS_kexec_load: c_long = 104; -pub const SYS_init_module: c_long = 105; -pub const SYS_delete_module: c_long = 106; -pub const SYS_timer_create: c_long = 107; -pub const SYS_timer_gettime: c_long = 108; -pub const SYS_timer_getoverrun: c_long = 109; -pub const SYS_timer_settime: c_long = 110; -pub const SYS_timer_delete: c_long = 111; -pub const SYS_clock_settime: c_long = 112; -pub const SYS_clock_gettime: c_long = 113; -pub const SYS_clock_getres: c_long = 114; -pub const SYS_clock_nanosleep: c_long = 115; -pub const SYS_syslog: c_long = 116; -pub const SYS_ptrace: c_long = 117; -pub const SYS_sched_setparam: c_long = 118; -pub const SYS_sched_setscheduler: c_long = 119; -pub const SYS_sched_getscheduler: c_long = 120; -pub const SYS_sched_getparam: c_long = 121; -pub const SYS_sched_setaffinity: c_long = 122; -pub const SYS_sched_getaffinity: c_long = 123; -pub const SYS_sched_yield: c_long = 124; -pub const SYS_sched_get_priority_max: c_long = 125; -pub const SYS_sched_get_priority_min: c_long = 126; -pub const SYS_sched_rr_get_interval: c_long = 127; -pub const SYS_restart_syscall: c_long = 128; -pub const SYS_kill: c_long = 129; -pub const SYS_tkill: c_long = 130; -pub const SYS_tgkill: c_long = 131; -pub const SYS_sigaltstack: c_long = 132; -pub const SYS_rt_sigsuspend: c_long = 133; -pub const SYS_rt_sigaction: c_long = 134; -pub const SYS_rt_sigprocmask: c_long = 135; -pub const SYS_rt_sigpending: c_long = 136; -pub const SYS_rt_sigtimedwait: c_long = 137; -pub const SYS_rt_sigqueueinfo: c_long = 138; -pub const SYS_rt_sigreturn: c_long = 139; -pub const SYS_setpriority: c_long = 140; -pub const SYS_getpriority: c_long = 141; -pub const SYS_reboot: c_long = 142; -pub const SYS_setregid: c_long = 143; -pub const SYS_setgid: c_long = 144; -pub const SYS_setreuid: c_long = 145; -pub const SYS_setuid: c_long = 146; -pub const SYS_setresuid: c_long = 147; -pub const SYS_getresuid: c_long = 148; -pub const SYS_setresgid: c_long = 149; -pub const SYS_getresgid: c_long = 150; -pub const SYS_setfsuid: c_long = 151; -pub const SYS_setfsgid: c_long = 152; -pub const SYS_times: c_long = 153; -pub const SYS_setpgid: c_long = 154; -pub const SYS_getpgid: c_long = 155; -pub const SYS_getsid: c_long = 156; -pub const SYS_setsid: c_long = 157; -pub const SYS_getgroups: c_long = 158; -pub const SYS_setgroups: c_long = 159; -pub const SYS_uname: c_long = 160; -pub const SYS_sethostname: c_long = 161; -pub const SYS_setdomainname: c_long = 162; -pub const SYS_getrusage: c_long = 165; -pub const SYS_umask: c_long = 166; -pub const SYS_prctl: c_long = 167; -pub const SYS_getcpu: c_long = 168; -pub const SYS_gettimeofday: c_long = 169; -pub const SYS_settimeofday: c_long = 170; -pub const SYS_adjtimex: c_long = 171; -pub const SYS_getpid: c_long = 172; -pub const SYS_getppid: c_long = 173; -pub const SYS_getuid: c_long = 174; -pub const SYS_geteuid: c_long = 175; -pub const SYS_getgid: c_long = 176; -pub const SYS_getegid: c_long = 177; -pub const SYS_gettid: c_long = 178; -pub const SYS_sysinfo: c_long = 179; -pub const SYS_mq_open: c_long = 180; -pub const SYS_mq_unlink: c_long = 181; -pub const SYS_mq_timedsend: c_long = 182; -pub const SYS_mq_timedreceive: c_long = 183; -pub const SYS_mq_notify: c_long = 184; -pub const SYS_mq_getsetattr: c_long = 185; -pub const SYS_msgget: c_long = 186; -pub const SYS_msgctl: c_long = 187; -pub const SYS_msgrcv: c_long = 188; -pub const SYS_msgsnd: c_long = 189; -pub const SYS_semget: c_long = 190; -pub const SYS_semctl: c_long = 191; -pub const SYS_semtimedop: c_long = 192; -pub const SYS_semop: c_long = 193; -pub const SYS_shmget: c_long = 194; -pub const SYS_shmctl: c_long = 195; -pub const SYS_shmat: c_long = 196; -pub const SYS_shmdt: c_long = 197; -pub const SYS_socket: c_long = 198; -pub const SYS_socketpair: c_long = 199; -pub const SYS_bind: c_long = 200; -pub const SYS_listen: c_long = 201; -pub const SYS_accept: c_long = 202; -pub const SYS_connect: c_long = 203; -pub const SYS_getsockname: c_long = 204; -pub const SYS_getpeername: c_long = 205; -pub const SYS_sendto: c_long = 206; -pub const SYS_recvfrom: c_long = 207; -pub const SYS_setsockopt: c_long = 208; -pub const SYS_getsockopt: c_long = 209; -pub const SYS_shutdown: c_long = 210; -pub const SYS_sendmsg: c_long = 211; -pub const SYS_recvmsg: c_long = 212; -pub const SYS_readahead: c_long = 213; -pub const SYS_brk: c_long = 214; -pub const SYS_munmap: c_long = 215; -pub const SYS_mremap: c_long = 216; -pub const SYS_add_key: c_long = 217; -pub const SYS_request_key: c_long = 218; -pub const SYS_keyctl: c_long = 219; -pub const SYS_clone: c_long = 220; -pub const SYS_execve: c_long = 221; -pub const SYS_mmap: c_long = 222; -pub const SYS_fadvise64: c_long = 223; -pub const SYS_swapon: c_long = 224; -pub const SYS_swapoff: c_long = 225; -pub const SYS_mprotect: c_long = 226; -pub const SYS_msync: c_long = 227; -pub const SYS_mlock: c_long = 228; -pub const SYS_munlock: c_long = 229; -pub const SYS_mlockall: c_long = 230; -pub const SYS_munlockall: c_long = 231; -pub const SYS_mincore: c_long = 232; -pub const SYS_madvise: c_long = 233; -pub const SYS_remap_file_pages: c_long = 234; -pub const SYS_mbind: c_long = 235; -pub const SYS_get_mempolicy: c_long = 236; -pub const SYS_set_mempolicy: c_long = 237; -pub const SYS_migrate_pages: c_long = 238; -pub const SYS_move_pages: c_long = 239; -pub const SYS_rt_tgsigqueueinfo: c_long = 240; -pub const SYS_perf_event_open: c_long = 241; -pub const SYS_accept4: c_long = 242; -pub const SYS_recvmmsg: c_long = 243; -pub const SYS_arch_specific_syscall: c_long = 244; -pub const SYS_wait4: c_long = 260; -pub const SYS_prlimit64: c_long = 261; -pub const SYS_fanotify_init: c_long = 262; -pub const SYS_fanotify_mark: c_long = 263; -pub const SYS_name_to_handle_at: c_long = 264; -pub const SYS_open_by_handle_at: c_long = 265; -pub const SYS_clock_adjtime: c_long = 266; -pub const SYS_syncfs: c_long = 267; -pub const SYS_setns: c_long = 268; -pub const SYS_sendmmsg: c_long = 269; -pub const SYS_process_vm_readv: c_long = 270; -pub const SYS_process_vm_writev: c_long = 271; -pub const SYS_kcmp: c_long = 272; -pub const SYS_finit_module: c_long = 273; -pub const SYS_sched_setattr: c_long = 274; -pub const SYS_sched_getattr: c_long = 275; -pub const SYS_renameat2: c_long = 276; -pub const SYS_seccomp: c_long = 277; -pub const SYS_getrandom: c_long = 278; -pub const SYS_memfd_create: c_long = 279; -pub const SYS_bpf: c_long = 280; -pub const SYS_execveat: c_long = 281; -pub const SYS_userfaultfd: c_long = 282; -pub const SYS_membarrier: c_long = 283; -pub const SYS_mlock2: c_long = 284; -pub const SYS_copy_file_range: c_long = 285; -pub const SYS_preadv2: c_long = 286; -pub const SYS_pwritev2: c_long = 287; -pub const SYS_pkey_mprotect: c_long = 288; -pub const SYS_pkey_alloc: c_long = 289; -pub const SYS_pkey_free: c_long = 290; -pub const SYS_statx: c_long = 291; -pub const SYS_io_pgetevents: c_long = 292; -pub const SYS_rseq: c_long = 293; -pub const SYS_kexec_file_load: c_long = 294; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; -pub const SYS_cachestat: c_long = 451; -pub const SYS_fchmodat2: c_long = 452; -pub const SYS_map_shadow_stack: c_long = 453; -pub const SYS_futex_wake: c_long = 454; -pub const SYS_futex_wait: c_long = 455; -pub const SYS_futex_requeue: c_long = 456; - -pub const O_APPEND: c_int = 1024; -pub const O_DIRECT: c_int = 0x4000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_LARGEFILE: c_int = 0o0100000; -pub const O_NOFOLLOW: c_int = 0x20000; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_ASYNC: c_int = 0o20000; - -pub const SIGSTKSZ: size_t = 16384; -pub const MINSIGSTKSZ: size_t = 4096; - -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const EHWPOISON: c_int = 133; -pub const ERFKILL: c_int = 132; - -pub const MADV_SOFT_OFFLINE: c_int = 101; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0; -pub const SIG_UNBLOCK: c_int = 1; - -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_SETOWN: c_int = 8; - -pub const VEOF: usize = 4; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_SYNC: c_int = 0x080000; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: c_int = 0x00000800; -pub const TAB2: c_int = 0x00001000; -pub const TAB3: c_int = 0x00001800; -pub const CR1: c_int = 0x00000200; -pub const CR2: c_int = 0x00000400; -pub const CR3: c_int = 0x00000600; -pub const FF1: c_int = 0x00008000; -pub const BS1: c_int = 0x00002000; -pub const VT1: c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const XCASE: crate::tcflag_t = 0x00000004; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const EDEADLK: c_int = 35; -pub const EDEADLOCK: c_int = EDEADLK; -pub const EXTPROC: crate::tcflag_t = 0x00010000; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/mips64.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/mips64.rs deleted file mode 100644 index 95dd37c8898042..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b64/mips64.rs +++ /dev/null @@ -1,708 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -pub type wchar_t = i32; -pub type __u64 = c_ulong; -pub type __s64 = c_long; -pub type nlink_t = c_uint; -pub type blksize_t = i64; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - __pad1: [c_int; 3], - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad2: [c_uint; 2], - pub st_size: off_t, - __pad3: c_int, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: crate::blksize_t, - __pad4: c_uint, - pub st_blocks: crate::blkcnt_t, - __pad5: [c_int; 14], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - __pad1: [c_int; 3], - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - __pad2: [c_uint; 2], - pub st_size: off_t, - __pad3: c_int, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: crate::blksize_t, - __pad4: c_uint, - pub st_blocks: crate::blkcnt_t, - __pad5: [c_int; 14], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } - - pub struct ipc_perm { - #[cfg(musl_v1_2_3)] - pub __key: crate::key_t, - #[cfg(not(musl_v1_2_3))] - #[deprecated( - since = "0.2.173", - note = "This field is incorrectly named and will be changed - to __key in a future release." - )] - pub __ipc_perm_key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - pub __seq: c_int, - __pad1: c_int, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct statfs { - pub f_type: c_ulong, - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_ulong, - pub f_flags: c_ulong, - pub f_spare: [c_ulong; 5], - } - - pub struct statfs64 { - pub f_type: c_ulong, - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt64_t, - pub f_bfree: crate::fsblkcnt64_t, - pub f_files: crate::fsfilcnt64_t, - pub f_ffree: crate::fsfilcnt64_t, - pub f_bavail: crate::fsblkcnt64_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_ulong, - pub f_flags: c_ulong, - pub f_spare: [c_ulong; 5], - } -} - -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; - -pub const SYS_read: c_long = 5000 + 0; -pub const SYS_write: c_long = 5000 + 1; -pub const SYS_open: c_long = 5000 + 2; -pub const SYS_close: c_long = 5000 + 3; -pub const SYS_stat: c_long = 5000 + 4; -pub const SYS_fstat: c_long = 5000 + 5; -pub const SYS_lstat: c_long = 5000 + 6; -pub const SYS_poll: c_long = 5000 + 7; -pub const SYS_lseek: c_long = 5000 + 8; -pub const SYS_mmap: c_long = 5000 + 9; -pub const SYS_mprotect: c_long = 5000 + 10; -pub const SYS_munmap: c_long = 5000 + 11; -pub const SYS_brk: c_long = 5000 + 12; -pub const SYS_rt_sigaction: c_long = 5000 + 13; -pub const SYS_rt_sigprocmask: c_long = 5000 + 14; -pub const SYS_ioctl: c_long = 5000 + 15; -pub const SYS_pread64: c_long = 5000 + 16; -pub const SYS_pwrite64: c_long = 5000 + 17; -pub const SYS_readv: c_long = 5000 + 18; -pub const SYS_writev: c_long = 5000 + 19; -pub const SYS_access: c_long = 5000 + 20; -pub const SYS_pipe: c_long = 5000 + 21; -pub const SYS__newselect: c_long = 5000 + 22; -pub const SYS_sched_yield: c_long = 5000 + 23; -pub const SYS_mremap: c_long = 5000 + 24; -pub const SYS_msync: c_long = 5000 + 25; -pub const SYS_mincore: c_long = 5000 + 26; -pub const SYS_madvise: c_long = 5000 + 27; -pub const SYS_shmget: c_long = 5000 + 28; -pub const SYS_shmat: c_long = 5000 + 29; -pub const SYS_shmctl: c_long = 5000 + 30; -pub const SYS_dup: c_long = 5000 + 31; -pub const SYS_dup2: c_long = 5000 + 32; -pub const SYS_pause: c_long = 5000 + 33; -pub const SYS_nanosleep: c_long = 5000 + 34; -pub const SYS_getitimer: c_long = 5000 + 35; -pub const SYS_setitimer: c_long = 5000 + 36; -pub const SYS_alarm: c_long = 5000 + 37; -pub const SYS_getpid: c_long = 5000 + 38; -pub const SYS_sendfile: c_long = 5000 + 39; -pub const SYS_socket: c_long = 5000 + 40; -pub const SYS_connect: c_long = 5000 + 41; -pub const SYS_accept: c_long = 5000 + 42; -pub const SYS_sendto: c_long = 5000 + 43; -pub const SYS_recvfrom: c_long = 5000 + 44; -pub const SYS_sendmsg: c_long = 5000 + 45; -pub const SYS_recvmsg: c_long = 5000 + 46; -pub const SYS_shutdown: c_long = 5000 + 47; -pub const SYS_bind: c_long = 5000 + 48; -pub const SYS_listen: c_long = 5000 + 49; -pub const SYS_getsockname: c_long = 5000 + 50; -pub const SYS_getpeername: c_long = 5000 + 51; -pub const SYS_socketpair: c_long = 5000 + 52; -pub const SYS_setsockopt: c_long = 5000 + 53; -pub const SYS_getsockopt: c_long = 5000 + 54; -pub const SYS_clone: c_long = 5000 + 55; -pub const SYS_fork: c_long = 5000 + 56; -pub const SYS_execve: c_long = 5000 + 57; -pub const SYS_exit: c_long = 5000 + 58; -pub const SYS_wait4: c_long = 5000 + 59; -pub const SYS_kill: c_long = 5000 + 60; -pub const SYS_uname: c_long = 5000 + 61; -pub const SYS_semget: c_long = 5000 + 62; -pub const SYS_semop: c_long = 5000 + 63; -pub const SYS_semctl: c_long = 5000 + 64; -pub const SYS_shmdt: c_long = 5000 + 65; -pub const SYS_msgget: c_long = 5000 + 66; -pub const SYS_msgsnd: c_long = 5000 + 67; -pub const SYS_msgrcv: c_long = 5000 + 68; -pub const SYS_msgctl: c_long = 5000 + 69; -pub const SYS_fcntl: c_long = 5000 + 70; -pub const SYS_flock: c_long = 5000 + 71; -pub const SYS_fsync: c_long = 5000 + 72; -pub const SYS_fdatasync: c_long = 5000 + 73; -pub const SYS_truncate: c_long = 5000 + 74; -pub const SYS_ftruncate: c_long = 5000 + 75; -pub const SYS_getdents: c_long = 5000 + 76; -pub const SYS_getcwd: c_long = 5000 + 77; -pub const SYS_chdir: c_long = 5000 + 78; -pub const SYS_fchdir: c_long = 5000 + 79; -pub const SYS_rename: c_long = 5000 + 80; -pub const SYS_mkdir: c_long = 5000 + 81; -pub const SYS_rmdir: c_long = 5000 + 82; -pub const SYS_creat: c_long = 5000 + 83; -pub const SYS_link: c_long = 5000 + 84; -pub const SYS_unlink: c_long = 5000 + 85; -pub const SYS_symlink: c_long = 5000 + 86; -pub const SYS_readlink: c_long = 5000 + 87; -pub const SYS_chmod: c_long = 5000 + 88; -pub const SYS_fchmod: c_long = 5000 + 89; -pub const SYS_chown: c_long = 5000 + 90; -pub const SYS_fchown: c_long = 5000 + 91; -pub const SYS_lchown: c_long = 5000 + 92; -pub const SYS_umask: c_long = 5000 + 93; -pub const SYS_gettimeofday: c_long = 5000 + 94; -pub const SYS_getrlimit: c_long = 5000 + 95; -pub const SYS_getrusage: c_long = 5000 + 96; -pub const SYS_sysinfo: c_long = 5000 + 97; -pub const SYS_times: c_long = 5000 + 98; -pub const SYS_ptrace: c_long = 5000 + 99; -pub const SYS_getuid: c_long = 5000 + 100; -pub const SYS_syslog: c_long = 5000 + 101; -pub const SYS_getgid: c_long = 5000 + 102; -pub const SYS_setuid: c_long = 5000 + 103; -pub const SYS_setgid: c_long = 5000 + 104; -pub const SYS_geteuid: c_long = 5000 + 105; -pub const SYS_getegid: c_long = 5000 + 106; -pub const SYS_setpgid: c_long = 5000 + 107; -pub const SYS_getppid: c_long = 5000 + 108; -pub const SYS_getpgrp: c_long = 5000 + 109; -pub const SYS_setsid: c_long = 5000 + 110; -pub const SYS_setreuid: c_long = 5000 + 111; -pub const SYS_setregid: c_long = 5000 + 112; -pub const SYS_getgroups: c_long = 5000 + 113; -pub const SYS_setgroups: c_long = 5000 + 114; -pub const SYS_setresuid: c_long = 5000 + 115; -pub const SYS_getresuid: c_long = 5000 + 116; -pub const SYS_setresgid: c_long = 5000 + 117; -pub const SYS_getresgid: c_long = 5000 + 118; -pub const SYS_getpgid: c_long = 5000 + 119; -pub const SYS_setfsuid: c_long = 5000 + 120; -pub const SYS_setfsgid: c_long = 5000 + 121; -pub const SYS_getsid: c_long = 5000 + 122; -pub const SYS_capget: c_long = 5000 + 123; -pub const SYS_capset: c_long = 5000 + 124; -pub const SYS_rt_sigpending: c_long = 5000 + 125; -pub const SYS_rt_sigtimedwait: c_long = 5000 + 126; -pub const SYS_rt_sigqueueinfo: c_long = 5000 + 127; -pub const SYS_rt_sigsuspend: c_long = 5000 + 128; -pub const SYS_sigaltstack: c_long = 5000 + 129; -pub const SYS_utime: c_long = 5000 + 130; -pub const SYS_mknod: c_long = 5000 + 131; -pub const SYS_personality: c_long = 5000 + 132; -pub const SYS_ustat: c_long = 5000 + 133; -pub const SYS_statfs: c_long = 5000 + 134; -pub const SYS_fstatfs: c_long = 5000 + 135; -pub const SYS_sysfs: c_long = 5000 + 136; -pub const SYS_getpriority: c_long = 5000 + 137; -pub const SYS_setpriority: c_long = 5000 + 138; -pub const SYS_sched_setparam: c_long = 5000 + 139; -pub const SYS_sched_getparam: c_long = 5000 + 140; -pub const SYS_sched_setscheduler: c_long = 5000 + 141; -pub const SYS_sched_getscheduler: c_long = 5000 + 142; -pub const SYS_sched_get_priority_max: c_long = 5000 + 143; -pub const SYS_sched_get_priority_min: c_long = 5000 + 144; -pub const SYS_sched_rr_get_interval: c_long = 5000 + 145; -pub const SYS_mlock: c_long = 5000 + 146; -pub const SYS_munlock: c_long = 5000 + 147; -pub const SYS_mlockall: c_long = 5000 + 148; -pub const SYS_munlockall: c_long = 5000 + 149; -pub const SYS_vhangup: c_long = 5000 + 150; -pub const SYS_pivot_root: c_long = 5000 + 151; -pub const SYS__sysctl: c_long = 5000 + 152; -pub const SYS_prctl: c_long = 5000 + 153; -pub const SYS_adjtimex: c_long = 5000 + 154; -pub const SYS_setrlimit: c_long = 5000 + 155; -pub const SYS_chroot: c_long = 5000 + 156; -pub const SYS_sync: c_long = 5000 + 157; -pub const SYS_acct: c_long = 5000 + 158; -pub const SYS_settimeofday: c_long = 5000 + 159; -pub const SYS_mount: c_long = 5000 + 160; -pub const SYS_umount2: c_long = 5000 + 161; -pub const SYS_swapon: c_long = 5000 + 162; -pub const SYS_swapoff: c_long = 5000 + 163; -pub const SYS_reboot: c_long = 5000 + 164; -pub const SYS_sethostname: c_long = 5000 + 165; -pub const SYS_setdomainname: c_long = 5000 + 166; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 5000 + 167; -pub const SYS_init_module: c_long = 5000 + 168; -pub const SYS_delete_module: c_long = 5000 + 169; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 5000 + 170; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 5000 + 171; -pub const SYS_quotactl: c_long = 5000 + 172; -pub const SYS_nfsservctl: c_long = 5000 + 173; -pub const SYS_getpmsg: c_long = 5000 + 174; -pub const SYS_putpmsg: c_long = 5000 + 175; -pub const SYS_afs_syscall: c_long = 5000 + 176; -pub const SYS_gettid: c_long = 5000 + 178; -pub const SYS_readahead: c_long = 5000 + 179; -pub const SYS_setxattr: c_long = 5000 + 180; -pub const SYS_lsetxattr: c_long = 5000 + 181; -pub const SYS_fsetxattr: c_long = 5000 + 182; -pub const SYS_getxattr: c_long = 5000 + 183; -pub const SYS_lgetxattr: c_long = 5000 + 184; -pub const SYS_fgetxattr: c_long = 5000 + 185; -pub const SYS_listxattr: c_long = 5000 + 186; -pub const SYS_llistxattr: c_long = 5000 + 187; -pub const SYS_flistxattr: c_long = 5000 + 188; -pub const SYS_removexattr: c_long = 5000 + 189; -pub const SYS_lremovexattr: c_long = 5000 + 190; -pub const SYS_fremovexattr: c_long = 5000 + 191; -pub const SYS_tkill: c_long = 5000 + 192; -pub const SYS_futex: c_long = 5000 + 194; -pub const SYS_sched_setaffinity: c_long = 5000 + 195; -pub const SYS_sched_getaffinity: c_long = 5000 + 196; -pub const SYS_cacheflush: c_long = 5000 + 197; -pub const SYS_cachectl: c_long = 5000 + 198; -pub const SYS_sysmips: c_long = 5000 + 199; -pub const SYS_io_setup: c_long = 5000 + 200; -pub const SYS_io_destroy: c_long = 5000 + 201; -pub const SYS_io_getevents: c_long = 5000 + 202; -pub const SYS_io_submit: c_long = 5000 + 203; -pub const SYS_io_cancel: c_long = 5000 + 204; -pub const SYS_exit_group: c_long = 5000 + 205; -pub const SYS_lookup_dcookie: c_long = 5000 + 206; -pub const SYS_epoll_create: c_long = 5000 + 207; -pub const SYS_epoll_ctl: c_long = 5000 + 208; -pub const SYS_epoll_wait: c_long = 5000 + 209; -pub const SYS_remap_file_pages: c_long = 5000 + 210; -pub const SYS_rt_sigreturn: c_long = 5000 + 211; -pub const SYS_set_tid_address: c_long = 5000 + 212; -pub const SYS_restart_syscall: c_long = 5000 + 213; -pub const SYS_semtimedop: c_long = 5000 + 214; -pub const SYS_fadvise64: c_long = 5000 + 215; -pub const SYS_timer_create: c_long = 5000 + 216; -pub const SYS_timer_settime: c_long = 5000 + 217; -pub const SYS_timer_gettime: c_long = 5000 + 218; -pub const SYS_timer_getoverrun: c_long = 5000 + 219; -pub const SYS_timer_delete: c_long = 5000 + 220; -pub const SYS_clock_settime: c_long = 5000 + 221; -pub const SYS_clock_gettime: c_long = 5000 + 222; -pub const SYS_clock_getres: c_long = 5000 + 223; -pub const SYS_clock_nanosleep: c_long = 5000 + 224; -pub const SYS_tgkill: c_long = 5000 + 225; -pub const SYS_utimes: c_long = 5000 + 226; -pub const SYS_mbind: c_long = 5000 + 227; -pub const SYS_get_mempolicy: c_long = 5000 + 228; -pub const SYS_set_mempolicy: c_long = 5000 + 229; -pub const SYS_mq_open: c_long = 5000 + 230; -pub const SYS_mq_unlink: c_long = 5000 + 231; -pub const SYS_mq_timedsend: c_long = 5000 + 232; -pub const SYS_mq_timedreceive: c_long = 5000 + 233; -pub const SYS_mq_notify: c_long = 5000 + 234; -pub const SYS_mq_getsetattr: c_long = 5000 + 235; -pub const SYS_vserver: c_long = 5000 + 236; -pub const SYS_waitid: c_long = 5000 + 237; -/* pub const SYS_sys_setaltroot: c_long = 5000 + 238; */ -pub const SYS_add_key: c_long = 5000 + 239; -pub const SYS_request_key: c_long = 5000 + 240; -pub const SYS_keyctl: c_long = 5000 + 241; -pub const SYS_set_thread_area: c_long = 5000 + 242; -pub const SYS_inotify_init: c_long = 5000 + 243; -pub const SYS_inotify_add_watch: c_long = 5000 + 244; -pub const SYS_inotify_rm_watch: c_long = 5000 + 245; -pub const SYS_migrate_pages: c_long = 5000 + 246; -pub const SYS_openat: c_long = 5000 + 247; -pub const SYS_mkdirat: c_long = 5000 + 248; -pub const SYS_mknodat: c_long = 5000 + 249; -pub const SYS_fchownat: c_long = 5000 + 250; -pub const SYS_futimesat: c_long = 5000 + 251; -pub const SYS_newfstatat: c_long = 5000 + 252; -pub const SYS_unlinkat: c_long = 5000 + 253; -pub const SYS_renameat: c_long = 5000 + 254; -pub const SYS_linkat: c_long = 5000 + 255; -pub const SYS_symlinkat: c_long = 5000 + 256; -pub const SYS_readlinkat: c_long = 5000 + 257; -pub const SYS_fchmodat: c_long = 5000 + 258; -pub const SYS_faccessat: c_long = 5000 + 259; -pub const SYS_pselect6: c_long = 5000 + 260; -pub const SYS_ppoll: c_long = 5000 + 261; -pub const SYS_unshare: c_long = 5000 + 262; -pub const SYS_splice: c_long = 5000 + 263; -pub const SYS_sync_file_range: c_long = 5000 + 264; -pub const SYS_tee: c_long = 5000 + 265; -pub const SYS_vmsplice: c_long = 5000 + 266; -pub const SYS_move_pages: c_long = 5000 + 267; -pub const SYS_set_robust_list: c_long = 5000 + 268; -pub const SYS_get_robust_list: c_long = 5000 + 269; -pub const SYS_kexec_load: c_long = 5000 + 270; -pub const SYS_getcpu: c_long = 5000 + 271; -pub const SYS_epoll_pwait: c_long = 5000 + 272; -pub const SYS_ioprio_set: c_long = 5000 + 273; -pub const SYS_ioprio_get: c_long = 5000 + 274; -pub const SYS_utimensat: c_long = 5000 + 275; -pub const SYS_signalfd: c_long = 5000 + 276; -pub const SYS_timerfd: c_long = 5000 + 277; -pub const SYS_eventfd: c_long = 5000 + 278; -pub const SYS_fallocate: c_long = 5000 + 279; -pub const SYS_timerfd_create: c_long = 5000 + 280; -pub const SYS_timerfd_gettime: c_long = 5000 + 281; -pub const SYS_timerfd_settime: c_long = 5000 + 282; -pub const SYS_signalfd4: c_long = 5000 + 283; -pub const SYS_eventfd2: c_long = 5000 + 284; -pub const SYS_epoll_create1: c_long = 5000 + 285; -pub const SYS_dup3: c_long = 5000 + 286; -pub const SYS_pipe2: c_long = 5000 + 287; -pub const SYS_inotify_init1: c_long = 5000 + 288; -pub const SYS_preadv: c_long = 5000 + 289; -pub const SYS_pwritev: c_long = 5000 + 290; -pub const SYS_rt_tgsigqueueinfo: c_long = 5000 + 291; -pub const SYS_perf_event_open: c_long = 5000 + 292; -pub const SYS_accept4: c_long = 5000 + 293; -pub const SYS_recvmmsg: c_long = 5000 + 294; -pub const SYS_fanotify_init: c_long = 5000 + 295; -pub const SYS_fanotify_mark: c_long = 5000 + 296; -pub const SYS_prlimit64: c_long = 5000 + 297; -pub const SYS_name_to_handle_at: c_long = 5000 + 298; -pub const SYS_open_by_handle_at: c_long = 5000 + 299; -pub const SYS_clock_adjtime: c_long = 5000 + 300; -pub const SYS_syncfs: c_long = 5000 + 301; -pub const SYS_sendmmsg: c_long = 5000 + 302; -pub const SYS_setns: c_long = 5000 + 303; -pub const SYS_process_vm_readv: c_long = 5000 + 304; -pub const SYS_process_vm_writev: c_long = 5000 + 305; -pub const SYS_kcmp: c_long = 5000 + 306; -pub const SYS_finit_module: c_long = 5000 + 307; -pub const SYS_getdents64: c_long = 5000 + 308; -pub const SYS_sched_setattr: c_long = 5000 + 309; -pub const SYS_sched_getattr: c_long = 5000 + 310; -pub const SYS_renameat2: c_long = 5000 + 311; -pub const SYS_seccomp: c_long = 5000 + 312; -pub const SYS_getrandom: c_long = 5000 + 313; -pub const SYS_memfd_create: c_long = 5000 + 314; -pub const SYS_bpf: c_long = 5000 + 315; -pub const SYS_execveat: c_long = 5000 + 316; -pub const SYS_userfaultfd: c_long = 5000 + 317; -pub const SYS_membarrier: c_long = 5000 + 318; -pub const SYS_mlock2: c_long = 5000 + 319; -pub const SYS_copy_file_range: c_long = 5000 + 320; -pub const SYS_preadv2: c_long = 5000 + 321; -pub const SYS_pwritev2: c_long = 5000 + 322; -pub const SYS_pkey_mprotect: c_long = 5000 + 323; -pub const SYS_pkey_alloc: c_long = 5000 + 324; -pub const SYS_pkey_free: c_long = 5000 + 325; -pub const SYS_statx: c_long = 5000 + 326; -pub const SYS_pidfd_send_signal: c_long = 5000 + 424; -pub const SYS_io_uring_setup: c_long = 5000 + 425; -pub const SYS_io_uring_enter: c_long = 5000 + 426; -pub const SYS_io_uring_register: c_long = 5000 + 427; -pub const SYS_open_tree: c_long = 5000 + 428; -pub const SYS_move_mount: c_long = 5000 + 429; -pub const SYS_fsopen: c_long = 5000 + 430; -pub const SYS_fsconfig: c_long = 5000 + 431; -pub const SYS_fsmount: c_long = 5000 + 432; -pub const SYS_fspick: c_long = 5000 + 433; -pub const SYS_pidfd_open: c_long = 5000 + 434; -pub const SYS_clone3: c_long = 5000 + 435; -pub const SYS_close_range: c_long = 5000 + 436; -pub const SYS_openat2: c_long = 5000 + 437; -pub const SYS_pidfd_getfd: c_long = 5000 + 438; -pub const SYS_faccessat2: c_long = 5000 + 439; -pub const SYS_process_madvise: c_long = 5000 + 440; -pub const SYS_epoll_pwait2: c_long = 5000 + 441; -pub const SYS_mount_setattr: c_long = 5000 + 442; -pub const SYS_quotactl_fd: c_long = 5000 + 443; -pub const SYS_landlock_create_ruleset: c_long = 5000 + 444; -pub const SYS_landlock_add_rule: c_long = 5000 + 445; -pub const SYS_landlock_restrict_self: c_long = 5000 + 446; -pub const SYS_memfd_secret: c_long = 5000 + 447; -pub const SYS_process_mrelease: c_long = 5000 + 448; -pub const SYS_futex_waitv: c_long = 5000 + 449; -pub const SYS_set_mempolicy_home_node: c_long = 5000 + 450; - -pub const O_DIRECT: c_int = 0x8000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_NOFOLLOW: c_int = 0x20000; - -pub const O_APPEND: c_int = 8; -pub const O_CREAT: c_int = 256; -pub const O_EXCL: c_int = 1024; -pub const O_NOCTTY: c_int = 2048; -pub const O_NONBLOCK: c_int = 128; -pub const O_SYNC: c_int = 0x4010; -pub const O_RSYNC: c_int = 0x4010; -pub const O_DSYNC: c_int = 0x10; -pub const O_ASYNC: c_int = 0x1000; -pub const O_LARGEFILE: c_int = 0x2000; - -pub const EDEADLK: c_int = 45; -pub const ENAMETOOLONG: c_int = 78; -pub const ENOLCK: c_int = 46; -pub const ENOSYS: c_int = 89; -pub const ENOTEMPTY: c_int = 93; -pub const ELOOP: c_int = 90; -pub const ENOMSG: c_int = 35; -pub const EIDRM: c_int = 36; -pub const ECHRNG: c_int = 37; -pub const EL2NSYNC: c_int = 38; -pub const EL3HLT: c_int = 39; -pub const EL3RST: c_int = 40; -pub const ELNRNG: c_int = 41; -pub const EUNATCH: c_int = 42; -pub const ENOCSI: c_int = 43; -pub const EL2HLT: c_int = 44; -pub const EBADE: c_int = 50; -pub const EBADR: c_int = 51; -pub const EXFULL: c_int = 52; -pub const ENOANO: c_int = 53; -pub const EBADRQC: c_int = 54; -pub const EBADSLT: c_int = 55; -pub const EDEADLOCK: c_int = 56; -pub const EMULTIHOP: c_int = 74; -pub const EOVERFLOW: c_int = 79; -pub const ENOTUNIQ: c_int = 80; -pub const EBADFD: c_int = 81; -pub const EBADMSG: c_int = 77; -pub const EREMCHG: c_int = 82; -pub const ELIBACC: c_int = 83; -pub const ELIBBAD: c_int = 84; -pub const ELIBSCN: c_int = 85; -pub const ELIBMAX: c_int = 86; -pub const ELIBEXEC: c_int = 87; -pub const EILSEQ: c_int = 88; -pub const ERESTART: c_int = 91; -pub const ESTRPIPE: c_int = 92; -pub const EUSERS: c_int = 94; -pub const ENOTSOCK: c_int = 95; -pub const EDESTADDRREQ: c_int = 96; -pub const EMSGSIZE: c_int = 97; -pub const EPROTOTYPE: c_int = 98; -pub const ENOPROTOOPT: c_int = 99; -pub const EPROTONOSUPPORT: c_int = 120; -pub const ESOCKTNOSUPPORT: c_int = 121; -pub const EOPNOTSUPP: c_int = 122; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 123; -pub const EAFNOSUPPORT: c_int = 124; -pub const EADDRINUSE: c_int = 125; -pub const EADDRNOTAVAIL: c_int = 126; -pub const ENETDOWN: c_int = 127; -pub const ENETUNREACH: c_int = 128; -pub const ENETRESET: c_int = 129; -pub const ECONNABORTED: c_int = 130; -pub const ECONNRESET: c_int = 131; -pub const ENOBUFS: c_int = 132; -pub const EISCONN: c_int = 133; -pub const ENOTCONN: c_int = 134; -pub const ESHUTDOWN: c_int = 143; -pub const ETOOMANYREFS: c_int = 144; -pub const ETIMEDOUT: c_int = 145; -pub const ECONNREFUSED: c_int = 146; -pub const EHOSTDOWN: c_int = 147; -pub const EHOSTUNREACH: c_int = 148; -pub const EALREADY: c_int = 149; -pub const EINPROGRESS: c_int = 150; -pub const ESTALE: c_int = 151; -pub const EUCLEAN: c_int = 135; -pub const ENOTNAM: c_int = 137; -pub const ENAVAIL: c_int = 138; -pub const EISNAM: c_int = 139; -pub const EREMOTEIO: c_int = 140; -pub const EDQUOT: c_int = 1133; -pub const ENOMEDIUM: c_int = 159; -pub const EMEDIUMTYPE: c_int = 160; -pub const ECANCELED: c_int = 158; -pub const ENOKEY: c_int = 161; -pub const EKEYEXPIRED: c_int = 162; -pub const EKEYREVOKED: c_int = 163; -pub const EKEYREJECTED: c_int = 164; -pub const EOWNERDEAD: c_int = 165; -pub const ENOTRECOVERABLE: c_int = 166; -pub const ERFKILL: c_int = 167; - -pub const MAP_ANON: c_int = 0x800; -pub const MAP_GROWSDOWN: c_int = 0x1000; -pub const MAP_DENYWRITE: c_int = 0x2000; -pub const MAP_EXECUTABLE: c_int = 0x4000; -pub const MAP_LOCKED: c_int = 0x8000; -pub const MAP_NORESERVE: c_int = 0x400; -pub const MAP_POPULATE: c_int = 0x10000; -pub const MAP_NONBLOCK: c_int = 0x20000; -pub const MAP_STACK: c_int = 0x40000; -pub const MAP_HUGETLB: c_int = 0x080000; - -pub const SOCK_STREAM: c_int = 2; -pub const SOCK_DGRAM: c_int = 1; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000008; -pub const SA_NOCLDWAIT: c_int = 0x00010000; - -pub const SIGEMT: c_int = 7; -pub const SIGCHLD: c_int = 18; -pub const SIGBUS: c_int = 10; -pub const SIGTTIN: c_int = 26; -pub const SIGTTOU: c_int = 27; -pub const SIGXCPU: c_int = 30; -pub const SIGXFSZ: c_int = 31; -pub const SIGVTALRM: c_int = 28; -pub const SIGPROF: c_int = 29; -pub const SIGWINCH: c_int = 20; -pub const SIGUSR1: c_int = 16; -pub const SIGUSR2: c_int = 17; -pub const SIGCONT: c_int = 25; -pub const SIGSTOP: c_int = 23; -pub const SIGTSTP: c_int = 24; -pub const SIGURG: c_int = 21; -pub const SIGIO: c_int = 22; -pub const SIGSYS: c_int = 12; -pub const SIGPOLL: c_int = 22; -pub const SIGPWR: c_int = 19; -pub const SIG_SETMASK: c_int = 3; -pub const SIG_BLOCK: c_int = 0x1; -pub const SIG_UNBLOCK: c_int = 0x2; - -pub const POLLWRNORM: c_short = 0x004; -pub const POLLWRBAND: c_short = 0x100; - -pub const VEOF: usize = 16; -pub const VEOL: usize = 17; -pub const VEOL2: usize = 6; -pub const VMIN: usize = 4; -pub const IEXTEN: crate::tcflag_t = 0x00000100; -pub const TOSTOP: crate::tcflag_t = 0x00008000; -pub const FLUSHO: crate::tcflag_t = 0x00002000; -pub const EXTPROC: crate::tcflag_t = 0o200000; - -pub const F_GETLK: c_int = 14; -pub const F_GETOWN: c_int = 23; -pub const F_SETOWN: c_int = 24; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; - -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: crate::tcflag_t = 0x00000800; -pub const TAB2: crate::tcflag_t = 0x00001000; -pub const TAB3: crate::tcflag_t = 0x00001800; -pub const CR1: crate::tcflag_t = 0x00000200; -pub const CR2: crate::tcflag_t = 0x00000400; -pub const CR3: crate::tcflag_t = 0x00000600; -pub const FF1: crate::tcflag_t = 0x00008000; -pub const BS1: crate::tcflag_t = 0x00002000; -pub const VT1: crate::tcflag_t = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const EHWPOISON: c_int = 168; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/mod.rs deleted file mode 100644 index 1bfd812ab2a344..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b64/mod.rs +++ /dev/null @@ -1,116 +0,0 @@ -use crate::prelude::*; - -pub type regoff_t = c_long; - -s! { - // MIPS implementation is special, see the subfolder. - #[cfg(not(target_arch = "mips64"))] - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct pthread_attr_t { - __size: [u64; 7], - } - - pub struct sigset_t { - __val: [c_ulong; 16], - } - - // PowerPC implementation is special, see the subfolder. - #[cfg(not(target_arch = "powerpc64"))] - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: c_ulong, - __pad1: c_ulong, - __pad2: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - pub msg_rtime: crate::time_t, - pub msg_ctime: crate::time_t, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __pad1: c_ulong, - __pad2: c_ulong, - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - #[cfg(target_endian = "big")] - __pad1: c_int, - pub msg_iovlen: c_int, - #[cfg(target_endian = "little")] - __pad1: c_int, - pub msg_control: *mut c_void, - #[cfg(target_endian = "big")] - __pad2: c_int, - pub msg_controllen: crate::socklen_t, - #[cfg(target_endian = "little")] - __pad2: c_int, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - #[cfg(target_endian = "big")] - pub __pad1: c_int, - pub cmsg_len: crate::socklen_t, - #[cfg(target_endian = "little")] - pub __pad1: c_int, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct sem_t { - __val: [c_int; 8], - } -} - -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; - -cfg_if! { - if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else if #[cfg(target_arch = "mips64")] { - mod mips64; - pub use self::mips64::*; - } else if #[cfg(any(target_arch = "powerpc64"))] { - mod powerpc64; - pub use self::powerpc64::*; - } else if #[cfg(any(target_arch = "s390x"))] { - mod s390x; - pub use self::s390x::*; - } else if #[cfg(any(target_arch = "x86_64"))] { - mod x86_64; - pub use self::x86_64::*; - } else if #[cfg(any(target_arch = "riscv64"))] { - mod riscv64; - pub use self::riscv64::*; - } else if #[cfg(any(target_arch = "loongarch64"))] { - mod loongarch64; - pub use self::loongarch64::*; - } else if #[cfg(any(target_arch = "wasm32"))] { - mod wasm32; - pub use self::wasm32::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/powerpc64.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/powerpc64.rs deleted file mode 100644 index bbcd382211dfde..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b64/powerpc64.rs +++ /dev/null @@ -1,752 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -pub type wchar_t = i32; -pub type __u64 = c_ulong; -pub type __s64 = c_long; -pub type nlink_t = u64; -pub type blksize_t = c_long; - -s! { - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_cc: [crate::cc_t; crate::NCCS], - pub c_line: crate::cc_t, - pub __c_ispeed: crate::speed_t, - pub __c_ospeed: crate::speed_t, - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - __pad0: c_int, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_long; 3], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - __pad0: c_int, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __reserved: [c_long; 3], - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_segsz: size_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: c_ulong, - __unused: [c_ulong; 2], - } - - pub struct ipc_perm { - #[cfg(musl_v1_2_3)] - pub __key: crate::key_t, - #[cfg(not(musl_v1_2_3))] - #[deprecated( - since = "0.2.173", - note = "This field is incorrectly named and will be changed - to __key in a future release." - )] - pub __ipc_perm_key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - pub __seq: c_int, - __unused1: c_long, - __unused2: c_long, - } -} - -pub const MADV_SOFT_OFFLINE: c_int = 101; -#[deprecated( - since = "0.2.175", - note = "Linux does not define MAP_32BIT on any architectures \ - other than x86 and x86_64, this constant will be removed in the future" -)] -pub const MAP_32BIT: c_int = 0x0040; -pub const O_APPEND: c_int = 1024; -pub const O_DIRECT: c_int = 0x20000; -pub const O_DIRECTORY: c_int = 0x4000; -pub const O_LARGEFILE: c_int = 0x10000; -pub const O_NOFOLLOW: c_int = 0x8000; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_ASYNC: c_int = 0x2000; - -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EBADMSG: c_int = 74; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const ERFKILL: c_int = 132; -pub const EHWPOISON: c_int = 133; - -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_LOCKED: c_int = 0x80; -pub const MAP_NORESERVE: c_int = 0x40; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_SYNC: c_int = 0x080000; - -pub const PTRACE_SYSEMU: c_int = 0x1d; -pub const PTRACE_SYSEMU_SINGLESTEP: c_int = 0x1e; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_SETOWN: c_int = 8; - -pub const VEOF: usize = 4; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const SIGSTKSZ: size_t = 10240; -pub const MINSIGSTKSZ: size_t = 4096; - -// Syscall table -pub const SYS_restart_syscall: c_long = 0; -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_waitpid: c_long = 7; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execve: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_time: c_long = 13; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_lchown: c_long = 16; -pub const SYS_break: c_long = 17; -pub const SYS_oldstat: c_long = 18; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_mount: c_long = 21; -pub const SYS_umount: c_long = 22; -pub const SYS_setuid: c_long = 23; -pub const SYS_getuid: c_long = 24; -pub const SYS_stime: c_long = 25; -pub const SYS_ptrace: c_long = 26; -pub const SYS_alarm: c_long = 27; -pub const SYS_oldfstat: c_long = 28; -pub const SYS_pause: c_long = 29; -pub const SYS_utime: c_long = 30; -pub const SYS_stty: c_long = 31; -pub const SYS_gtty: c_long = 32; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_ftime: c_long = 35; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_rename: c_long = 38; -pub const SYS_mkdir: c_long = 39; -pub const SYS_rmdir: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_prof: c_long = 44; -pub const SYS_brk: c_long = 45; -pub const SYS_setgid: c_long = 46; -pub const SYS_getgid: c_long = 47; -pub const SYS_signal: c_long = 48; -pub const SYS_geteuid: c_long = 49; -pub const SYS_getegid: c_long = 50; -pub const SYS_acct: c_long = 51; -pub const SYS_umount2: c_long = 52; -pub const SYS_lock: c_long = 53; -pub const SYS_ioctl: c_long = 54; -pub const SYS_fcntl: c_long = 55; -pub const SYS_mpx: c_long = 56; -pub const SYS_setpgid: c_long = 57; -pub const SYS_ulimit: c_long = 58; -pub const SYS_oldolduname: c_long = 59; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_ustat: c_long = 62; -pub const SYS_dup2: c_long = 63; -pub const SYS_getppid: c_long = 64; -pub const SYS_getpgrp: c_long = 65; -pub const SYS_setsid: c_long = 66; -pub const SYS_sigaction: c_long = 67; -pub const SYS_sgetmask: c_long = 68; -pub const SYS_ssetmask: c_long = 69; -pub const SYS_setreuid: c_long = 70; -pub const SYS_setregid: c_long = 71; -pub const SYS_sigsuspend: c_long = 72; -pub const SYS_sigpending: c_long = 73; -pub const SYS_sethostname: c_long = 74; -pub const SYS_setrlimit: c_long = 75; -pub const SYS_getrlimit: c_long = 76; -pub const SYS_getrusage: c_long = 77; -pub const SYS_gettimeofday: c_long = 78; -pub const SYS_settimeofday: c_long = 79; -pub const SYS_getgroups: c_long = 80; -pub const SYS_setgroups: c_long = 81; -pub const SYS_select: c_long = 82; -pub const SYS_symlink: c_long = 83; -pub const SYS_oldlstat: c_long = 84; -pub const SYS_readlink: c_long = 85; -pub const SYS_uselib: c_long = 86; -pub const SYS_swapon: c_long = 87; -pub const SYS_reboot: c_long = 88; -pub const SYS_readdir: c_long = 89; -pub const SYS_mmap: c_long = 90; -pub const SYS_munmap: c_long = 91; -pub const SYS_truncate: c_long = 92; -pub const SYS_ftruncate: c_long = 93; -pub const SYS_fchmod: c_long = 94; -pub const SYS_fchown: c_long = 95; -pub const SYS_getpriority: c_long = 96; -pub const SYS_setpriority: c_long = 97; -pub const SYS_profil: c_long = 98; -pub const SYS_statfs: c_long = 99; -pub const SYS_fstatfs: c_long = 100; -pub const SYS_ioperm: c_long = 101; -pub const SYS_socketcall: c_long = 102; -pub const SYS_syslog: c_long = 103; -pub const SYS_setitimer: c_long = 104; -pub const SYS_getitimer: c_long = 105; -pub const SYS_stat: c_long = 106; -pub const SYS_lstat: c_long = 107; -pub const SYS_fstat: c_long = 108; -pub const SYS_olduname: c_long = 109; -pub const SYS_iopl: c_long = 110; -pub const SYS_vhangup: c_long = 111; -pub const SYS_idle: c_long = 112; -pub const SYS_vm86: c_long = 113; -pub const SYS_wait4: c_long = 114; -pub const SYS_swapoff: c_long = 115; -pub const SYS_sysinfo: c_long = 116; -pub const SYS_ipc: c_long = 117; -pub const SYS_fsync: c_long = 118; -pub const SYS_sigreturn: c_long = 119; -pub const SYS_clone: c_long = 120; -pub const SYS_setdomainname: c_long = 121; -pub const SYS_uname: c_long = 122; -pub const SYS_modify_ldt: c_long = 123; -pub const SYS_adjtimex: c_long = 124; -pub const SYS_mprotect: c_long = 125; -pub const SYS_sigprocmask: c_long = 126; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 127; -pub const SYS_init_module: c_long = 128; -pub const SYS_delete_module: c_long = 129; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 130; -pub const SYS_quotactl: c_long = 131; -pub const SYS_getpgid: c_long = 132; -pub const SYS_fchdir: c_long = 133; -pub const SYS_bdflush: c_long = 134; -pub const SYS_sysfs: c_long = 135; -pub const SYS_personality: c_long = 136; -pub const SYS_afs_syscall: c_long = 137; /* Syscall for Andrew File System */ -pub const SYS_setfsuid: c_long = 138; -pub const SYS_setfsgid: c_long = 139; -pub const SYS__llseek: c_long = 140; -pub const SYS_getdents: c_long = 141; -pub const SYS__newselect: c_long = 142; -pub const SYS_flock: c_long = 143; -pub const SYS_msync: c_long = 144; -pub const SYS_readv: c_long = 145; -pub const SYS_writev: c_long = 146; -pub const SYS_getsid: c_long = 147; -pub const SYS_fdatasync: c_long = 148; -pub const SYS__sysctl: c_long = 149; -pub const SYS_mlock: c_long = 150; -pub const SYS_munlock: c_long = 151; -pub const SYS_mlockall: c_long = 152; -pub const SYS_munlockall: c_long = 153; -pub const SYS_sched_setparam: c_long = 154; -pub const SYS_sched_getparam: c_long = 155; -pub const SYS_sched_setscheduler: c_long = 156; -pub const SYS_sched_getscheduler: c_long = 157; -pub const SYS_sched_yield: c_long = 158; -pub const SYS_sched_get_priority_max: c_long = 159; -pub const SYS_sched_get_priority_min: c_long = 160; -pub const SYS_sched_rr_get_interval: c_long = 161; -pub const SYS_nanosleep: c_long = 162; -pub const SYS_mremap: c_long = 163; -pub const SYS_setresuid: c_long = 164; -pub const SYS_getresuid: c_long = 165; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 166; -pub const SYS_poll: c_long = 167; -pub const SYS_nfsservctl: c_long = 168; -pub const SYS_setresgid: c_long = 169; -pub const SYS_getresgid: c_long = 170; -pub const SYS_prctl: c_long = 171; -pub const SYS_rt_sigreturn: c_long = 172; -pub const SYS_rt_sigaction: c_long = 173; -pub const SYS_rt_sigprocmask: c_long = 174; -pub const SYS_rt_sigpending: c_long = 175; -pub const SYS_rt_sigtimedwait: c_long = 176; -pub const SYS_rt_sigqueueinfo: c_long = 177; -pub const SYS_rt_sigsuspend: c_long = 178; -pub const SYS_pread64: c_long = 179; -pub const SYS_pwrite64: c_long = 180; -pub const SYS_chown: c_long = 181; -pub const SYS_getcwd: c_long = 182; -pub const SYS_capget: c_long = 183; -pub const SYS_capset: c_long = 184; -pub const SYS_sigaltstack: c_long = 185; -pub const SYS_sendfile: c_long = 186; -pub const SYS_getpmsg: c_long = 187; /* some people actually want streams */ -pub const SYS_putpmsg: c_long = 188; /* some people actually want streams */ -pub const SYS_vfork: c_long = 189; -pub const SYS_ugetrlimit: c_long = 190; /* SuS compliant getrlimit */ -pub const SYS_readahead: c_long = 191; -pub const SYS_pciconfig_read: c_long = 198; -pub const SYS_pciconfig_write: c_long = 199; -pub const SYS_pciconfig_iobase: c_long = 200; -pub const SYS_multiplexer: c_long = 201; -pub const SYS_getdents64: c_long = 202; -pub const SYS_pivot_root: c_long = 203; -pub const SYS_madvise: c_long = 205; -pub const SYS_mincore: c_long = 206; -pub const SYS_gettid: c_long = 207; -pub const SYS_tkill: c_long = 208; -pub const SYS_setxattr: c_long = 209; -pub const SYS_lsetxattr: c_long = 210; -pub const SYS_fsetxattr: c_long = 211; -pub const SYS_getxattr: c_long = 212; -pub const SYS_lgetxattr: c_long = 213; -pub const SYS_fgetxattr: c_long = 214; -pub const SYS_listxattr: c_long = 215; -pub const SYS_llistxattr: c_long = 216; -pub const SYS_flistxattr: c_long = 217; -pub const SYS_removexattr: c_long = 218; -pub const SYS_lremovexattr: c_long = 219; -pub const SYS_fremovexattr: c_long = 220; -pub const SYS_futex: c_long = 221; -pub const SYS_sched_setaffinity: c_long = 222; -pub const SYS_sched_getaffinity: c_long = 223; -pub const SYS_tuxcall: c_long = 225; -pub const SYS_io_setup: c_long = 227; -pub const SYS_io_destroy: c_long = 228; -pub const SYS_io_getevents: c_long = 229; -pub const SYS_io_submit: c_long = 230; -pub const SYS_io_cancel: c_long = 231; -pub const SYS_set_tid_address: c_long = 232; -pub const SYS_exit_group: c_long = 234; -pub const SYS_lookup_dcookie: c_long = 235; -pub const SYS_epoll_create: c_long = 236; -pub const SYS_epoll_ctl: c_long = 237; -pub const SYS_epoll_wait: c_long = 238; -pub const SYS_remap_file_pages: c_long = 239; -pub const SYS_timer_create: c_long = 240; -pub const SYS_timer_settime: c_long = 241; -pub const SYS_timer_gettime: c_long = 242; -pub const SYS_timer_getoverrun: c_long = 243; -pub const SYS_timer_delete: c_long = 244; -pub const SYS_clock_settime: c_long = 245; -pub const SYS_clock_gettime: c_long = 246; -pub const SYS_clock_getres: c_long = 247; -pub const SYS_clock_nanosleep: c_long = 248; -pub const SYS_swapcontext: c_long = 249; -pub const SYS_tgkill: c_long = 250; -pub const SYS_utimes: c_long = 251; -pub const SYS_statfs64: c_long = 252; -pub const SYS_fstatfs64: c_long = 253; -pub const SYS_rtas: c_long = 255; -pub const SYS_sys_debug_setcontext: c_long = 256; -pub const SYS_migrate_pages: c_long = 258; -pub const SYS_mbind: c_long = 259; -pub const SYS_get_mempolicy: c_long = 260; -pub const SYS_set_mempolicy: c_long = 261; -pub const SYS_mq_open: c_long = 262; -pub const SYS_mq_unlink: c_long = 263; -pub const SYS_mq_timedsend: c_long = 264; -pub const SYS_mq_timedreceive: c_long = 265; -pub const SYS_mq_notify: c_long = 266; -pub const SYS_mq_getsetattr: c_long = 267; -pub const SYS_kexec_load: c_long = 268; -pub const SYS_add_key: c_long = 269; -pub const SYS_request_key: c_long = 270; -pub const SYS_keyctl: c_long = 271; -pub const SYS_waitid: c_long = 272; -pub const SYS_ioprio_set: c_long = 273; -pub const SYS_ioprio_get: c_long = 274; -pub const SYS_inotify_init: c_long = 275; -pub const SYS_inotify_add_watch: c_long = 276; -pub const SYS_inotify_rm_watch: c_long = 277; -pub const SYS_spu_run: c_long = 278; -pub const SYS_spu_create: c_long = 279; -pub const SYS_pselect6: c_long = 280; -pub const SYS_ppoll: c_long = 281; -pub const SYS_unshare: c_long = 282; -pub const SYS_splice: c_long = 283; -pub const SYS_tee: c_long = 284; -pub const SYS_vmsplice: c_long = 285; -pub const SYS_openat: c_long = 286; -pub const SYS_mkdirat: c_long = 287; -pub const SYS_mknodat: c_long = 288; -pub const SYS_fchownat: c_long = 289; -pub const SYS_futimesat: c_long = 290; -pub const SYS_newfstatat: c_long = 291; -pub const SYS_unlinkat: c_long = 292; -pub const SYS_renameat: c_long = 293; -pub const SYS_linkat: c_long = 294; -pub const SYS_symlinkat: c_long = 295; -pub const SYS_readlinkat: c_long = 296; -pub const SYS_fchmodat: c_long = 297; -pub const SYS_faccessat: c_long = 298; -pub const SYS_get_robust_list: c_long = 299; -pub const SYS_set_robust_list: c_long = 300; -pub const SYS_move_pages: c_long = 301; -pub const SYS_getcpu: c_long = 302; -pub const SYS_epoll_pwait: c_long = 303; -pub const SYS_utimensat: c_long = 304; -pub const SYS_signalfd: c_long = 305; -pub const SYS_timerfd_create: c_long = 306; -pub const SYS_eventfd: c_long = 307; -pub const SYS_sync_file_range2: c_long = 308; -pub const SYS_fallocate: c_long = 309; -pub const SYS_subpage_prot: c_long = 310; -pub const SYS_timerfd_settime: c_long = 311; -pub const SYS_timerfd_gettime: c_long = 312; -pub const SYS_signalfd4: c_long = 313; -pub const SYS_eventfd2: c_long = 314; -pub const SYS_epoll_create1: c_long = 315; -pub const SYS_dup3: c_long = 316; -pub const SYS_pipe2: c_long = 317; -pub const SYS_inotify_init1: c_long = 318; -pub const SYS_perf_event_open: c_long = 319; -pub const SYS_preadv: c_long = 320; -pub const SYS_pwritev: c_long = 321; -pub const SYS_rt_tgsigqueueinfo: c_long = 322; -pub const SYS_fanotify_init: c_long = 323; -pub const SYS_fanotify_mark: c_long = 324; -pub const SYS_prlimit64: c_long = 325; -pub const SYS_socket: c_long = 326; -pub const SYS_bind: c_long = 327; -pub const SYS_connect: c_long = 328; -pub const SYS_listen: c_long = 329; -pub const SYS_accept: c_long = 330; -pub const SYS_getsockname: c_long = 331; -pub const SYS_getpeername: c_long = 332; -pub const SYS_socketpair: c_long = 333; -pub const SYS_send: c_long = 334; -pub const SYS_sendto: c_long = 335; -pub const SYS_recv: c_long = 336; -pub const SYS_recvfrom: c_long = 337; -pub const SYS_shutdown: c_long = 338; -pub const SYS_setsockopt: c_long = 339; -pub const SYS_getsockopt: c_long = 340; -pub const SYS_sendmsg: c_long = 341; -pub const SYS_recvmsg: c_long = 342; -pub const SYS_recvmmsg: c_long = 343; -pub const SYS_accept4: c_long = 344; -pub const SYS_name_to_handle_at: c_long = 345; -pub const SYS_open_by_handle_at: c_long = 346; -pub const SYS_clock_adjtime: c_long = 347; -pub const SYS_syncfs: c_long = 348; -pub const SYS_sendmmsg: c_long = 349; -pub const SYS_setns: c_long = 350; -pub const SYS_process_vm_readv: c_long = 351; -pub const SYS_process_vm_writev: c_long = 352; -pub const SYS_finit_module: c_long = 353; -pub const SYS_kcmp: c_long = 354; -pub const SYS_sched_setattr: c_long = 355; -pub const SYS_sched_getattr: c_long = 356; -pub const SYS_renameat2: c_long = 357; -pub const SYS_seccomp: c_long = 358; -pub const SYS_getrandom: c_long = 359; -pub const SYS_memfd_create: c_long = 360; -pub const SYS_bpf: c_long = 361; -pub const SYS_execveat: c_long = 362; -pub const SYS_switch_endian: c_long = 363; -pub const SYS_userfaultfd: c_long = 364; -pub const SYS_membarrier: c_long = 365; -pub const SYS_mlock2: c_long = 378; -pub const SYS_copy_file_range: c_long = 379; -pub const SYS_preadv2: c_long = 380; -pub const SYS_pwritev2: c_long = 381; -pub const SYS_kexec_file_load: c_long = 382; -pub const SYS_statx: c_long = 383; -pub const SYS_pkey_alloc: c_long = 384; -pub const SYS_pkey_free: c_long = 385; -pub const SYS_pkey_mprotect: c_long = 386; -pub const SYS_rseq: c_long = 387; -pub const SYS_io_pgetevents: c_long = 388; -pub const SYS_semtimedop: c_long = 392; -pub const SYS_semget: c_long = 393; -pub const SYS_semctl: c_long = 394; -pub const SYS_shmget: c_long = 395; -pub const SYS_shmctl: c_long = 396; -pub const SYS_shmat: c_long = 397; -pub const SYS_shmdt: c_long = 398; -pub const SYS_msgget: c_long = 399; -pub const SYS_msgsnd: c_long = 400; -pub const SYS_msgrcv: c_long = 401; -pub const SYS_msgctl: c_long = 402; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; - -pub const EDEADLK: c_int = 35; -pub const EDEADLOCK: c_int = 58; - -pub const EXTPROC: crate::tcflag_t = 0x10000000; -pub const VEOL: usize = 6; -pub const VEOL2: usize = 8; -pub const VMIN: usize = 5; -pub const IEXTEN: crate::tcflag_t = 0x00000400; -pub const TOSTOP: crate::tcflag_t = 0x00400000; -pub const FLUSHO: crate::tcflag_t = 0x00800000; - -pub const MCL_CURRENT: c_int = 0x2000; -pub const MCL_FUTURE: c_int = 0x4000; -pub const MCL_ONFAULT: c_int = 0x8000; -pub const CBAUD: crate::tcflag_t = 0xff; -pub const TAB1: c_int = 0x400; -pub const TAB2: c_int = 0x800; -pub const TAB3: c_int = 0xc00; -pub const CR1: c_int = 0x1000; -pub const CR2: c_int = 0x2000; -pub const CR3: c_int = 0x3000; -pub const FF1: c_int = 0x4000; -pub const BS1: c_int = 0x8000; -pub const VT1: c_int = 0x10000; -pub const VWERASE: usize = 10; -pub const VREPRINT: usize = 11; -pub const VSUSP: usize = 12; -pub const VSTART: usize = 13; -pub const VSTOP: usize = 14; -pub const VDISCARD: usize = 16; -pub const VTIME: usize = 7; -pub const IXON: crate::tcflag_t = 0x00000200; -pub const IXOFF: crate::tcflag_t = 0x00000400; -pub const ONLCR: crate::tcflag_t = 0x2; -pub const CSIZE: crate::tcflag_t = 0x00000300; - -pub const CS6: crate::tcflag_t = 0x00000100; -pub const CS7: crate::tcflag_t = 0x00000200; -pub const CS8: crate::tcflag_t = 0x00000300; -pub const CSTOPB: crate::tcflag_t = 0x00000400; -pub const CREAD: crate::tcflag_t = 0x00000800; -pub const PARENB: crate::tcflag_t = 0x00001000; -pub const PARODD: crate::tcflag_t = 0x00002000; -pub const HUPCL: crate::tcflag_t = 0x00004000; -pub const CLOCAL: crate::tcflag_t = 0x00008000; -pub const ECHOKE: crate::tcflag_t = 0x00000001; -pub const ECHOE: crate::tcflag_t = 0x00000002; -pub const ECHOK: crate::tcflag_t = 0x00000004; -pub const ECHONL: crate::tcflag_t = 0x00000010; -pub const ECHOPRT: crate::tcflag_t = 0x00000020; -pub const ECHOCTL: crate::tcflag_t = 0x00000040; -pub const ISIG: crate::tcflag_t = 0x00000080; -pub const ICANON: crate::tcflag_t = 0x00000100; -pub const PENDIN: crate::tcflag_t = 0x20000000; -pub const NOFLSH: crate::tcflag_t = 0x80000000; - -pub const CIBAUD: crate::tcflag_t = 0o77600000; -pub const CBAUDEX: crate::tcflag_t = 0o0000020; -pub const VSWTC: usize = 9; -pub const OLCUC: crate::tcflag_t = 0o000004; -pub const NLDLY: crate::tcflag_t = 0o0001400; -pub const CRDLY: crate::tcflag_t = 0o0030000; -pub const TABDLY: crate::tcflag_t = 0o0006000; -pub const BSDLY: crate::tcflag_t = 0o0100000; -pub const FFDLY: crate::tcflag_t = 0o0040000; -pub const VTDLY: crate::tcflag_t = 0o0200000; -pub const XTABS: crate::tcflag_t = 0o00006000; - -pub const B57600: crate::speed_t = 0o00020; -pub const B115200: crate::speed_t = 0o00021; -pub const B230400: crate::speed_t = 0o00022; -pub const B460800: crate::speed_t = 0o00023; -pub const B500000: crate::speed_t = 0o00024; -pub const B576000: crate::speed_t = 0o00025; -pub const B921600: crate::speed_t = 0o00026; -pub const B1000000: crate::speed_t = 0o00027; -pub const B1152000: crate::speed_t = 0o00030; -pub const B1500000: crate::speed_t = 0o00031; -pub const B2000000: crate::speed_t = 0o00032; -pub const B2500000: crate::speed_t = 0o00033; -pub const B3000000: crate::speed_t = 0o00034; -pub const B3500000: crate::speed_t = 0o00035; -pub const B4000000: crate::speed_t = 0o00036; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/riscv64/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/riscv64/mod.rs deleted file mode 100644 index 8389af961cf584..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b64/riscv64/mod.rs +++ /dev/null @@ -1,672 +0,0 @@ -//! RISC-V-specific definitions for 64-bit linux-like values - -use crate::prelude::*; -use crate::{off64_t, off_t}; - -pub type wchar_t = c_int; - -pub type nlink_t = c_uint; -pub type blksize_t = c_int; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub __pad1: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub __pad2: c_int, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_int; 2usize], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub __pad1: crate::dev_t, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - pub __pad2: c_int, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_int; 2], - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_ushort, - __pad1: c_ushort, - pub __seq: c_ushort, - __pad2: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - #[repr(align(8))] - pub struct clone_args { - pub flags: c_ulonglong, - pub pidfd: c_ulonglong, - pub child_tid: c_ulonglong, - pub parent_tid: c_ulonglong, - pub exit_signal: c_ulonglong, - pub stack: c_ulonglong, - pub stack_size: c_ulonglong, - pub tls: c_ulonglong, - pub set_tid: c_ulonglong, - pub set_tid_size: c_ulonglong, - pub cgroup: c_ulonglong, - } -} - -s_no_extra_traits! { - pub struct ucontext_t { - pub __uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_sigmask: crate::sigset_t, - pub uc_mcontext: mcontext_t, - } - - #[repr(align(16))] - pub struct mcontext_t { - pub __gregs: [c_ulong; 32], - pub __fpregs: __riscv_mc_fp_state, - } - - pub union __riscv_mc_fp_state { - pub __f: __riscv_mc_f_ext_state, - pub __d: __riscv_mc_d_ext_state, - pub __q: __riscv_mc_q_ext_state, - } - - pub struct __riscv_mc_f_ext_state { - pub __f: [c_uint; 32], - pub __fcsr: c_uint, - } - - pub struct __riscv_mc_d_ext_state { - pub __f: [c_ulonglong; 32], - pub __fcsr: c_uint, - } - - #[repr(align(16))] - pub struct __riscv_mc_q_ext_state { - pub __f: [c_ulonglong; 64], - pub __fcsr: c_uint, - pub __glibc_reserved: [c_uint; 3], - } -} - -pub const SYS_read: c_long = 63; -pub const SYS_write: c_long = 64; -pub const SYS_close: c_long = 57; -pub const SYS_fstat: c_long = 80; -pub const SYS_lseek: c_long = 62; -pub const SYS_mmap: c_long = 222; -pub const SYS_mprotect: c_long = 226; -pub const SYS_munmap: c_long = 215; -pub const SYS_brk: c_long = 214; -pub const SYS_rt_sigaction: c_long = 134; -pub const SYS_rt_sigprocmask: c_long = 135; -pub const SYS_rt_sigreturn: c_long = 139; -pub const SYS_ioctl: c_long = 29; -pub const SYS_pread64: c_long = 67; -pub const SYS_pwrite64: c_long = 68; -pub const SYS_readv: c_long = 65; -pub const SYS_writev: c_long = 66; -pub const SYS_sched_yield: c_long = 124; -pub const SYS_mremap: c_long = 216; -pub const SYS_msync: c_long = 227; -pub const SYS_mincore: c_long = 232; -pub const SYS_madvise: c_long = 233; -pub const SYS_shmget: c_long = 194; -pub const SYS_shmat: c_long = 196; -pub const SYS_shmctl: c_long = 195; -pub const SYS_dup: c_long = 23; -pub const SYS_nanosleep: c_long = 101; -pub const SYS_getitimer: c_long = 102; -pub const SYS_setitimer: c_long = 103; -pub const SYS_getpid: c_long = 172; -pub const SYS_sendfile: c_long = 71; -pub const SYS_socket: c_long = 198; -pub const SYS_connect: c_long = 203; -pub const SYS_accept: c_long = 202; -pub const SYS_sendto: c_long = 206; -pub const SYS_recvfrom: c_long = 207; -pub const SYS_sendmsg: c_long = 211; -pub const SYS_recvmsg: c_long = 212; -pub const SYS_shutdown: c_long = 210; -pub const SYS_bind: c_long = 200; -pub const SYS_listen: c_long = 201; -pub const SYS_getsockname: c_long = 204; -pub const SYS_getpeername: c_long = 205; -pub const SYS_socketpair: c_long = 199; -pub const SYS_setsockopt: c_long = 208; -pub const SYS_getsockopt: c_long = 209; -pub const SYS_clone: c_long = 220; -pub const SYS_execve: c_long = 221; -pub const SYS_exit: c_long = 93; -pub const SYS_wait4: c_long = 260; -pub const SYS_kill: c_long = 129; -pub const SYS_uname: c_long = 160; -pub const SYS_semget: c_long = 190; -pub const SYS_semop: c_long = 193; -pub const SYS_semctl: c_long = 191; -pub const SYS_shmdt: c_long = 197; -pub const SYS_msgget: c_long = 186; -pub const SYS_msgsnd: c_long = 189; -pub const SYS_msgrcv: c_long = 188; -pub const SYS_msgctl: c_long = 187; -pub const SYS_fcntl: c_long = 25; -pub const SYS_flock: c_long = 32; -pub const SYS_fsync: c_long = 82; -pub const SYS_fdatasync: c_long = 83; -pub const SYS_truncate: c_long = 45; -pub const SYS_ftruncate: c_long = 46; -pub const SYS_getcwd: c_long = 17; -pub const SYS_chdir: c_long = 49; -pub const SYS_fchdir: c_long = 50; -pub const SYS_fchmod: c_long = 52; -pub const SYS_fchown: c_long = 55; -pub const SYS_umask: c_long = 166; -pub const SYS_gettimeofday: c_long = 169; -pub const SYS_getrlimit: c_long = 163; -pub const SYS_getrusage: c_long = 165; -pub const SYS_sysinfo: c_long = 179; -pub const SYS_times: c_long = 153; -pub const SYS_ptrace: c_long = 117; -pub const SYS_getuid: c_long = 174; -pub const SYS_syslog: c_long = 116; -pub const SYS_getgid: c_long = 176; -pub const SYS_setuid: c_long = 146; -pub const SYS_setgid: c_long = 144; -pub const SYS_geteuid: c_long = 175; -pub const SYS_getegid: c_long = 177; -pub const SYS_setpgid: c_long = 154; -pub const SYS_getppid: c_long = 173; -pub const SYS_setsid: c_long = 157; -pub const SYS_setreuid: c_long = 145; -pub const SYS_setregid: c_long = 143; -pub const SYS_getgroups: c_long = 158; -pub const SYS_setgroups: c_long = 159; -pub const SYS_setresuid: c_long = 147; -pub const SYS_getresuid: c_long = 148; -pub const SYS_setresgid: c_long = 149; -pub const SYS_getresgid: c_long = 150; -pub const SYS_getpgid: c_long = 155; -pub const SYS_setfsuid: c_long = 151; -pub const SYS_setfsgid: c_long = 152; -pub const SYS_getsid: c_long = 156; -pub const SYS_capget: c_long = 90; -pub const SYS_capset: c_long = 91; -pub const SYS_rt_sigpending: c_long = 136; -pub const SYS_rt_sigtimedwait: c_long = 137; -pub const SYS_rt_sigqueueinfo: c_long = 138; -pub const SYS_rt_sigsuspend: c_long = 133; -pub const SYS_sigaltstack: c_long = 132; -pub const SYS_personality: c_long = 92; -pub const SYS_statfs: c_long = 43; -pub const SYS_fstatfs: c_long = 44; -pub const SYS_getpriority: c_long = 141; -pub const SYS_setpriority: c_long = 140; -pub const SYS_sched_setparam: c_long = 118; -pub const SYS_sched_getparam: c_long = 121; -pub const SYS_sched_setscheduler: c_long = 119; -pub const SYS_sched_getscheduler: c_long = 120; -pub const SYS_sched_get_priority_max: c_long = 125; -pub const SYS_sched_get_priority_min: c_long = 126; -pub const SYS_sched_rr_get_interval: c_long = 127; -pub const SYS_mlock: c_long = 228; -pub const SYS_munlock: c_long = 229; -pub const SYS_mlockall: c_long = 230; -pub const SYS_munlockall: c_long = 231; -pub const SYS_vhangup: c_long = 58; -pub const SYS_pivot_root: c_long = 41; -pub const SYS_prctl: c_long = 167; -pub const SYS_adjtimex: c_long = 171; -pub const SYS_setrlimit: c_long = 164; -pub const SYS_chroot: c_long = 51; -pub const SYS_sync: c_long = 81; -pub const SYS_acct: c_long = 89; -pub const SYS_settimeofday: c_long = 170; -pub const SYS_mount: c_long = 40; -pub const SYS_umount2: c_long = 39; -pub const SYS_swapon: c_long = 224; -pub const SYS_swapoff: c_long = 225; -pub const SYS_reboot: c_long = 142; -pub const SYS_sethostname: c_long = 161; -pub const SYS_setdomainname: c_long = 162; -pub const SYS_init_module: c_long = 105; -pub const SYS_delete_module: c_long = 106; -pub const SYS_quotactl: c_long = 60; -pub const SYS_nfsservctl: c_long = 42; -pub const SYS_gettid: c_long = 178; -pub const SYS_readahead: c_long = 213; -pub const SYS_setxattr: c_long = 5; -pub const SYS_lsetxattr: c_long = 6; -pub const SYS_fsetxattr: c_long = 7; -pub const SYS_getxattr: c_long = 8; -pub const SYS_lgetxattr: c_long = 9; -pub const SYS_fgetxattr: c_long = 10; -pub const SYS_listxattr: c_long = 11; -pub const SYS_llistxattr: c_long = 12; -pub const SYS_flistxattr: c_long = 13; -pub const SYS_removexattr: c_long = 14; -pub const SYS_lremovexattr: c_long = 15; -pub const SYS_fremovexattr: c_long = 16; -pub const SYS_tkill: c_long = 130; -pub const SYS_futex: c_long = 98; -pub const SYS_sched_setaffinity: c_long = 122; -pub const SYS_sched_getaffinity: c_long = 123; -pub const SYS_io_setup: c_long = 0; -pub const SYS_io_destroy: c_long = 1; -pub const SYS_io_getevents: c_long = 4; -pub const SYS_io_submit: c_long = 2; -pub const SYS_io_cancel: c_long = 3; -pub const SYS_lookup_dcookie: c_long = 18; -pub const SYS_remap_file_pages: c_long = 234; -pub const SYS_getdents64: c_long = 61; -pub const SYS_set_tid_address: c_long = 96; -pub const SYS_restart_syscall: c_long = 128; -pub const SYS_semtimedop: c_long = 192; -pub const SYS_fadvise64: c_long = 223; -pub const SYS_timer_create: c_long = 107; -pub const SYS_timer_settime: c_long = 110; -pub const SYS_timer_gettime: c_long = 108; -pub const SYS_timer_getoverrun: c_long = 109; -pub const SYS_timer_delete: c_long = 111; -pub const SYS_clock_settime: c_long = 112; -pub const SYS_clock_gettime: c_long = 113; -pub const SYS_clock_getres: c_long = 114; -pub const SYS_clock_nanosleep: c_long = 115; -pub const SYS_exit_group: c_long = 94; -pub const SYS_epoll_ctl: c_long = 21; -pub const SYS_tgkill: c_long = 131; -pub const SYS_mbind: c_long = 235; -pub const SYS_set_mempolicy: c_long = 237; -pub const SYS_get_mempolicy: c_long = 236; -pub const SYS_mq_open: c_long = 180; -pub const SYS_mq_unlink: c_long = 181; -pub const SYS_mq_timedsend: c_long = 182; -pub const SYS_mq_timedreceive: c_long = 183; -pub const SYS_mq_notify: c_long = 184; -pub const SYS_mq_getsetattr: c_long = 185; -pub const SYS_kexec_load: c_long = 104; -pub const SYS_waitid: c_long = 95; -pub const SYS_add_key: c_long = 217; -pub const SYS_request_key: c_long = 218; -pub const SYS_keyctl: c_long = 219; -pub const SYS_ioprio_set: c_long = 30; -pub const SYS_ioprio_get: c_long = 31; -pub const SYS_inotify_add_watch: c_long = 27; -pub const SYS_inotify_rm_watch: c_long = 28; -pub const SYS_migrate_pages: c_long = 238; -pub const SYS_openat: c_long = 56; -pub const SYS_mkdirat: c_long = 34; -pub const SYS_mknodat: c_long = 33; -pub const SYS_fchownat: c_long = 54; -pub const SYS_newfstatat: c_long = 79; -pub const SYS_unlinkat: c_long = 35; -pub const SYS_linkat: c_long = 37; -pub const SYS_symlinkat: c_long = 36; -pub const SYS_readlinkat: c_long = 78; -pub const SYS_fchmodat: c_long = 53; -pub const SYS_faccessat: c_long = 48; -pub const SYS_pselect6: c_long = 72; -pub const SYS_ppoll: c_long = 73; -pub const SYS_unshare: c_long = 97; -pub const SYS_set_robust_list: c_long = 99; -pub const SYS_get_robust_list: c_long = 100; -pub const SYS_splice: c_long = 76; -pub const SYS_tee: c_long = 77; -pub const SYS_sync_file_range: c_long = 84; -pub const SYS_vmsplice: c_long = 75; -pub const SYS_move_pages: c_long = 239; -pub const SYS_utimensat: c_long = 88; -pub const SYS_epoll_pwait: c_long = 22; -pub const SYS_timerfd_create: c_long = 85; -pub const SYS_fallocate: c_long = 47; -pub const SYS_timerfd_settime: c_long = 86; -pub const SYS_timerfd_gettime: c_long = 87; -pub const SYS_accept4: c_long = 242; -pub const SYS_signalfd4: c_long = 74; -pub const SYS_eventfd2: c_long = 19; -pub const SYS_epoll_create1: c_long = 20; -pub const SYS_dup3: c_long = 24; -pub const SYS_pipe2: c_long = 59; -pub const SYS_inotify_init1: c_long = 26; -pub const SYS_preadv: c_long = 69; -pub const SYS_pwritev: c_long = 70; -pub const SYS_rt_tgsigqueueinfo: c_long = 240; -pub const SYS_perf_event_open: c_long = 241; -pub const SYS_recvmmsg: c_long = 243; -pub const SYS_fanotify_init: c_long = 262; -pub const SYS_fanotify_mark: c_long = 263; -pub const SYS_prlimit64: c_long = 261; -pub const SYS_name_to_handle_at: c_long = 264; -pub const SYS_open_by_handle_at: c_long = 265; -pub const SYS_clock_adjtime: c_long = 266; -pub const SYS_syncfs: c_long = 267; -pub const SYS_sendmmsg: c_long = 269; -pub const SYS_setns: c_long = 268; -pub const SYS_getcpu: c_long = 168; -pub const SYS_process_vm_readv: c_long = 270; -pub const SYS_process_vm_writev: c_long = 271; -pub const SYS_kcmp: c_long = 272; -pub const SYS_finit_module: c_long = 273; -pub const SYS_sched_setattr: c_long = 274; -pub const SYS_sched_getattr: c_long = 275; -pub const SYS_renameat2: c_long = 276; -pub const SYS_seccomp: c_long = 277; -pub const SYS_getrandom: c_long = 278; -pub const SYS_memfd_create: c_long = 279; -pub const SYS_bpf: c_long = 280; -pub const SYS_execveat: c_long = 281; -pub const SYS_userfaultfd: c_long = 282; -pub const SYS_membarrier: c_long = 283; -pub const SYS_mlock2: c_long = 284; -pub const SYS_copy_file_range: c_long = 285; -pub const SYS_preadv2: c_long = 286; -pub const SYS_pwritev2: c_long = 287; -pub const SYS_pkey_mprotect: c_long = 288; -pub const SYS_pkey_alloc: c_long = 289; -pub const SYS_pkey_free: c_long = 290; -pub const SYS_statx: c_long = 291; -pub const SYS_io_pgetevents: c_long = 292; -pub const SYS_rseq: c_long = 293; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; - -pub const O_APPEND: c_int = 1024; -pub const O_DIRECT: c_int = 0x4000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_LARGEFILE: c_int = 0o100000; -pub const O_NOFOLLOW: c_int = 0x20000; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_ASYNC: c_int = 0x2000; - -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; - -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const EHWPOISON: c_int = 133; -pub const ERFKILL: c_int = 132; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_SETOWN: c_int = 8; - -pub const VEOF: usize = 4; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_SYNC: c_int = 0x080000; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: c_int = 0x00000800; -pub const TAB2: c_int = 0x00001000; -pub const TAB3: c_int = 0x00001800; -pub const CR1: c_int = 0x00000200; -pub const CR2: c_int = 0x00000400; -pub const CR3: c_int = 0x00000600; -pub const FF1: c_int = 0x00008000; -pub const BS1: c_int = 0x00002000; -pub const VT1: c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const EDEADLK: c_int = 35; -pub const EDEADLOCK: c_int = EDEADLK; -pub const EXTPROC: crate::tcflag_t = 0x00010000; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; - -pub const NGREG: usize = 32; -pub const REG_PC: usize = 0; -pub const REG_RA: usize = 1; -pub const REG_SP: usize = 2; -pub const REG_TP: usize = 4; -pub const REG_S0: usize = 8; -pub const REG_S1: usize = 9; -pub const REG_A0: usize = 10; -pub const REG_S2: usize = 18; -pub const REG_NARGS: usize = 8; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/s390x.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/s390x.rs deleted file mode 100644 index 06cc61685b7ac9..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b64/s390x.rs +++ /dev/null @@ -1,732 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -pub type blksize_t = i64; -pub type nlink_t = u64; -pub type wchar_t = i32; -pub type greg_t = u64; -pub type __u64 = u64; -pub type __s64 = i64; -pub type statfs64 = statfs; - -s! { - pub struct ipc_perm { - #[cfg(musl_v1_2_3)] - pub __key: crate::key_t, - #[cfg(not(musl_v1_2_3))] - #[deprecated( - since = "0.2.173", - note = "This field is incorrectly named and will be changed - to __key in a future release." - )] - pub __ipc_perm_key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - pub __seq: c_int, - __pad1: c_long, - __pad2: c_long, - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - __unused: [c_long; 3], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - __unused: [c_long; 3], - } - - pub struct statfs { - pub f_type: c_uint, - pub f_bsize: c_uint, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_uint, - pub f_frsize: c_uint, - pub f_flags: c_uint, - pub f_spare: [c_uint; 4], - } -} - -s_no_extra_traits! { - pub union fpreg_t { - pub d: c_double, - pub f: c_float, - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for fpreg_t { - fn eq(&self, _other: &fpreg_t) -> bool { - unimplemented!("traits") - } - } - - impl Eq for fpreg_t {} - - impl hash::Hash for fpreg_t { - fn hash(&self, _state: &mut H) { - unimplemented!("traits") - } - } - } -} - -pub const VEOF: usize = 4; - -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ECONNABORTED: c_int = 103; -pub const ECONNREFUSED: c_int = 111; -pub const ECONNRESET: c_int = 104; -pub const EDEADLK: c_int = 35; -pub const ENOSYS: c_int = 38; -pub const ENOTCONN: c_int = 107; -pub const ETIMEDOUT: c_int = 110; -pub const O_APPEND: c_int = 1024; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_LARGEFILE: c_int = 0x8000; -pub const O_NONBLOCK: c_int = 2048; -pub const SA_NOCLDWAIT: c_int = 2; -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 4; -pub const SIGBUS: c_int = 7; -pub const SIGSTKSZ: size_t = 0x2000; -pub const MINSIGSTKSZ: size_t = 2048; -pub const SIG_SETMASK: c_int = 2; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const O_NOCTTY: c_int = 256; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_DIRECT: c_int = 0x4000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_NOFOLLOW: c_int = 0x20000; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_SYNC: c_int = 0x080000; - -pub const PTRACE_SYSEMU: c_int = 31; -pub const PTRACE_SYSEMU_SINGLESTEP: c_int = 32; - -pub const EDEADLOCK: c_int = 35; -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EBADMSG: c_int = 74; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const EHWPOISON: c_int = 133; -pub const ERFKILL: c_int = 132; - -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGCHLD: c_int = 17; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const O_ASYNC: c_int = 0x2000; - -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; - -pub const EXTPROC: crate::tcflag_t = 0x00010000; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; - -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETOWN: c_int = 8; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; - -pub const VTIME: usize = 5; -pub const VSWTC: usize = 7; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VSUSP: usize = 10; -pub const VREPRINT: usize = 12; -pub const VDISCARD: usize = 13; -pub const VWERASE: usize = 14; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const ONLCR: crate::tcflag_t = 0o000004; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const CR1: crate::tcflag_t = 0x00000200; -pub const CR2: crate::tcflag_t = 0x00000400; -pub const CR3: crate::tcflag_t = 0x00000600; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const TAB1: crate::tcflag_t = 0x00000800; -pub const TAB2: crate::tcflag_t = 0x00001000; -pub const TAB3: crate::tcflag_t = 0x00001800; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const BS1: crate::tcflag_t = 0x00002000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const FF1: crate::tcflag_t = 0x00008000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const VT1: crate::tcflag_t = 0x00004000; -pub const XTABS: crate::tcflag_t = 0o014000; - -pub const CBAUD: crate::speed_t = 0o010017; -pub const CSIZE: crate::tcflag_t = 0o000060; -pub const CS6: crate::tcflag_t = 0o000020; -pub const CS7: crate::tcflag_t = 0o000040; -pub const CS8: crate::tcflag_t = 0o000060; -pub const CSTOPB: crate::tcflag_t = 0o000100; -pub const CREAD: crate::tcflag_t = 0o000200; -pub const PARENB: crate::tcflag_t = 0o000400; -pub const PARODD: crate::tcflag_t = 0o001000; -pub const HUPCL: crate::tcflag_t = 0o002000; -pub const CLOCAL: crate::tcflag_t = 0o004000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; - -pub const ISIG: crate::tcflag_t = 0o000001; -pub const ICANON: crate::tcflag_t = 0o000002; -pub const XCASE: crate::tcflag_t = 0o000004; -pub const ECHOE: crate::tcflag_t = 0o000020; -pub const ECHOK: crate::tcflag_t = 0o000040; -pub const ECHONL: crate::tcflag_t = 0o000100; -pub const NOFLSH: crate::tcflag_t = 0o000200; -pub const ECHOCTL: crate::tcflag_t = 0o001000; -pub const ECHOPRT: crate::tcflag_t = 0o002000; -pub const ECHOKE: crate::tcflag_t = 0o004000; -pub const PENDIN: crate::tcflag_t = 0o040000; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const IXON: crate::tcflag_t = 0o002000; -pub const IXOFF: crate::tcflag_t = 0o010000; - -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_restart_syscall: c_long = 7; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execve: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_mount: c_long = 21; -pub const SYS_umount: c_long = 22; -pub const SYS_ptrace: c_long = 26; -pub const SYS_alarm: c_long = 27; -pub const SYS_pause: c_long = 29; -pub const SYS_utime: c_long = 30; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_rename: c_long = 38; -pub const SYS_mkdir: c_long = 39; -pub const SYS_rmdir: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_brk: c_long = 45; -pub const SYS_signal: c_long = 48; -pub const SYS_acct: c_long = 51; -pub const SYS_umount2: c_long = 52; -pub const SYS_ioctl: c_long = 54; -pub const SYS_fcntl: c_long = 55; -pub const SYS_setpgid: c_long = 57; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_ustat: c_long = 62; -pub const SYS_dup2: c_long = 63; -pub const SYS_getppid: c_long = 64; -pub const SYS_getpgrp: c_long = 65; -pub const SYS_setsid: c_long = 66; -pub const SYS_sigaction: c_long = 67; -pub const SYS_sigsuspend: c_long = 72; -pub const SYS_sigpending: c_long = 73; -pub const SYS_sethostname: c_long = 74; -pub const SYS_setrlimit: c_long = 75; -pub const SYS_getrusage: c_long = 77; -pub const SYS_gettimeofday: c_long = 78; -pub const SYS_settimeofday: c_long = 79; -pub const SYS_symlink: c_long = 83; -pub const SYS_readlink: c_long = 85; -pub const SYS_uselib: c_long = 86; -pub const SYS_swapon: c_long = 87; -pub const SYS_reboot: c_long = 88; -pub const SYS_readdir: c_long = 89; -pub const SYS_mmap: c_long = 90; -pub const SYS_munmap: c_long = 91; -pub const SYS_truncate: c_long = 92; -pub const SYS_ftruncate: c_long = 93; -pub const SYS_fchmod: c_long = 94; -pub const SYS_getpriority: c_long = 96; -pub const SYS_setpriority: c_long = 97; -pub const SYS_statfs: c_long = 99; -pub const SYS_fstatfs: c_long = 100; -pub const SYS_socketcall: c_long = 102; -pub const SYS_syslog: c_long = 103; -pub const SYS_setitimer: c_long = 104; -pub const SYS_getitimer: c_long = 105; -pub const SYS_stat: c_long = 106; -pub const SYS_lstat: c_long = 107; -pub const SYS_fstat: c_long = 108; -pub const SYS_lookup_dcookie: c_long = 110; -pub const SYS_vhangup: c_long = 111; -pub const SYS_idle: c_long = 112; -pub const SYS_wait4: c_long = 114; -pub const SYS_swapoff: c_long = 115; -pub const SYS_sysinfo: c_long = 116; -pub const SYS_ipc: c_long = 117; -pub const SYS_fsync: c_long = 118; -pub const SYS_sigreturn: c_long = 119; -pub const SYS_clone: c_long = 120; -pub const SYS_setdomainname: c_long = 121; -pub const SYS_uname: c_long = 122; -pub const SYS_adjtimex: c_long = 124; -pub const SYS_mprotect: c_long = 125; -pub const SYS_sigprocmask: c_long = 126; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 127; -pub const SYS_init_module: c_long = 128; -pub const SYS_delete_module: c_long = 129; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 130; -pub const SYS_quotactl: c_long = 131; -pub const SYS_getpgid: c_long = 132; -pub const SYS_fchdir: c_long = 133; -pub const SYS_bdflush: c_long = 134; -pub const SYS_sysfs: c_long = 135; -pub const SYS_personality: c_long = 136; -pub const SYS_afs_syscall: c_long = 137; /* Syscall for Andrew File System */ -pub const SYS_getdents: c_long = 141; -pub const SYS_select: c_long = 142; -pub const SYS_flock: c_long = 143; -pub const SYS_msync: c_long = 144; -pub const SYS_readv: c_long = 145; -pub const SYS_writev: c_long = 146; -pub const SYS_getsid: c_long = 147; -pub const SYS_fdatasync: c_long = 148; -pub const SYS__sysctl: c_long = 149; -pub const SYS_mlock: c_long = 150; -pub const SYS_munlock: c_long = 151; -pub const SYS_mlockall: c_long = 152; -pub const SYS_munlockall: c_long = 153; -pub const SYS_sched_setparam: c_long = 154; -pub const SYS_sched_getparam: c_long = 155; -pub const SYS_sched_setscheduler: c_long = 156; -pub const SYS_sched_getscheduler: c_long = 157; -pub const SYS_sched_yield: c_long = 158; -pub const SYS_sched_get_priority_max: c_long = 159; -pub const SYS_sched_get_priority_min: c_long = 160; -pub const SYS_sched_rr_get_interval: c_long = 161; -pub const SYS_nanosleep: c_long = 162; -pub const SYS_mremap: c_long = 163; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 167; -pub const SYS_poll: c_long = 168; -pub const SYS_nfsservctl: c_long = 169; -pub const SYS_prctl: c_long = 172; -pub const SYS_rt_sigreturn: c_long = 173; -pub const SYS_rt_sigaction: c_long = 174; -pub const SYS_rt_sigprocmask: c_long = 175; -pub const SYS_rt_sigpending: c_long = 176; -pub const SYS_rt_sigtimedwait: c_long = 177; -pub const SYS_rt_sigqueueinfo: c_long = 178; -pub const SYS_rt_sigsuspend: c_long = 179; -pub const SYS_pread64: c_long = 180; -pub const SYS_pwrite64: c_long = 181; -pub const SYS_getcwd: c_long = 183; -pub const SYS_capget: c_long = 184; -pub const SYS_capset: c_long = 185; -pub const SYS_sigaltstack: c_long = 186; -pub const SYS_sendfile: c_long = 187; -pub const SYS_getpmsg: c_long = 188; -pub const SYS_putpmsg: c_long = 189; -pub const SYS_vfork: c_long = 190; -pub const SYS_getrlimit: c_long = 191; -pub const SYS_lchown: c_long = 198; -pub const SYS_getuid: c_long = 199; -pub const SYS_getgid: c_long = 200; -pub const SYS_geteuid: c_long = 201; -pub const SYS_getegid: c_long = 202; -pub const SYS_setreuid: c_long = 203; -pub const SYS_setregid: c_long = 204; -pub const SYS_getgroups: c_long = 205; -pub const SYS_setgroups: c_long = 206; -pub const SYS_fchown: c_long = 207; -pub const SYS_setresuid: c_long = 208; -pub const SYS_getresuid: c_long = 209; -pub const SYS_setresgid: c_long = 210; -pub const SYS_getresgid: c_long = 211; -pub const SYS_chown: c_long = 212; -pub const SYS_setuid: c_long = 213; -pub const SYS_setgid: c_long = 214; -pub const SYS_setfsuid: c_long = 215; -pub const SYS_setfsgid: c_long = 216; -pub const SYS_pivot_root: c_long = 217; -pub const SYS_mincore: c_long = 218; -pub const SYS_madvise: c_long = 219; -pub const SYS_getdents64: c_long = 220; -pub const SYS_readahead: c_long = 222; -pub const SYS_setxattr: c_long = 224; -pub const SYS_lsetxattr: c_long = 225; -pub const SYS_fsetxattr: c_long = 226; -pub const SYS_getxattr: c_long = 227; -pub const SYS_lgetxattr: c_long = 228; -pub const SYS_fgetxattr: c_long = 229; -pub const SYS_listxattr: c_long = 230; -pub const SYS_llistxattr: c_long = 231; -pub const SYS_flistxattr: c_long = 232; -pub const SYS_removexattr: c_long = 233; -pub const SYS_lremovexattr: c_long = 234; -pub const SYS_fremovexattr: c_long = 235; -pub const SYS_gettid: c_long = 236; -pub const SYS_tkill: c_long = 237; -pub const SYS_futex: c_long = 238; -pub const SYS_sched_setaffinity: c_long = 239; -pub const SYS_sched_getaffinity: c_long = 240; -pub const SYS_tgkill: c_long = 241; -pub const SYS_io_setup: c_long = 243; -pub const SYS_io_destroy: c_long = 244; -pub const SYS_io_getevents: c_long = 245; -pub const SYS_io_submit: c_long = 246; -pub const SYS_io_cancel: c_long = 247; -pub const SYS_exit_group: c_long = 248; -pub const SYS_epoll_create: c_long = 249; -pub const SYS_epoll_ctl: c_long = 250; -pub const SYS_epoll_wait: c_long = 251; -pub const SYS_set_tid_address: c_long = 252; -pub const SYS_fadvise64: c_long = 253; -pub const SYS_timer_create: c_long = 254; -pub const SYS_timer_settime: c_long = 255; -pub const SYS_timer_gettime: c_long = 256; -pub const SYS_timer_getoverrun: c_long = 257; -pub const SYS_timer_delete: c_long = 258; -pub const SYS_clock_settime: c_long = 259; -pub const SYS_clock_gettime: c_long = 260; -pub const SYS_clock_getres: c_long = 261; -pub const SYS_clock_nanosleep: c_long = 262; -pub const SYS_statfs64: c_long = 265; -pub const SYS_fstatfs64: c_long = 266; -pub const SYS_remap_file_pages: c_long = 267; -pub const SYS_mbind: c_long = 268; -pub const SYS_get_mempolicy: c_long = 269; -pub const SYS_set_mempolicy: c_long = 270; -pub const SYS_mq_open: c_long = 271; -pub const SYS_mq_unlink: c_long = 272; -pub const SYS_mq_timedsend: c_long = 273; -pub const SYS_mq_timedreceive: c_long = 274; -pub const SYS_mq_notify: c_long = 275; -pub const SYS_mq_getsetattr: c_long = 276; -pub const SYS_kexec_load: c_long = 277; -pub const SYS_add_key: c_long = 278; -pub const SYS_request_key: c_long = 279; -pub const SYS_keyctl: c_long = 280; -pub const SYS_waitid: c_long = 281; -pub const SYS_ioprio_set: c_long = 282; -pub const SYS_ioprio_get: c_long = 283; -pub const SYS_inotify_init: c_long = 284; -pub const SYS_inotify_add_watch: c_long = 285; -pub const SYS_inotify_rm_watch: c_long = 286; -pub const SYS_migrate_pages: c_long = 287; -pub const SYS_openat: c_long = 288; -pub const SYS_mkdirat: c_long = 289; -pub const SYS_mknodat: c_long = 290; -pub const SYS_fchownat: c_long = 291; -pub const SYS_futimesat: c_long = 292; -pub const SYS_newfstatat: c_long = 293; -pub const SYS_unlinkat: c_long = 294; -pub const SYS_renameat: c_long = 295; -pub const SYS_linkat: c_long = 296; -pub const SYS_symlinkat: c_long = 297; -pub const SYS_readlinkat: c_long = 298; -pub const SYS_fchmodat: c_long = 299; -pub const SYS_faccessat: c_long = 300; -pub const SYS_pselect6: c_long = 301; -pub const SYS_ppoll: c_long = 302; -pub const SYS_unshare: c_long = 303; -pub const SYS_set_robust_list: c_long = 304; -pub const SYS_get_robust_list: c_long = 305; -pub const SYS_splice: c_long = 306; -pub const SYS_sync_file_range: c_long = 307; -pub const SYS_tee: c_long = 308; -pub const SYS_vmsplice: c_long = 309; -pub const SYS_move_pages: c_long = 310; -pub const SYS_getcpu: c_long = 311; -pub const SYS_epoll_pwait: c_long = 312; -pub const SYS_utimes: c_long = 313; -pub const SYS_fallocate: c_long = 314; -pub const SYS_utimensat: c_long = 315; -pub const SYS_signalfd: c_long = 316; -pub const SYS_timerfd: c_long = 317; -pub const SYS_eventfd: c_long = 318; -pub const SYS_timerfd_create: c_long = 319; -pub const SYS_timerfd_settime: c_long = 320; -pub const SYS_timerfd_gettime: c_long = 321; -pub const SYS_signalfd4: c_long = 322; -pub const SYS_eventfd2: c_long = 323; -pub const SYS_inotify_init1: c_long = 324; -pub const SYS_pipe2: c_long = 325; -pub const SYS_dup3: c_long = 326; -pub const SYS_epoll_create1: c_long = 327; -pub const SYS_preadv: c_long = 328; -pub const SYS_pwritev: c_long = 329; -pub const SYS_rt_tgsigqueueinfo: c_long = 330; -pub const SYS_perf_event_open: c_long = 331; -pub const SYS_fanotify_init: c_long = 332; -pub const SYS_fanotify_mark: c_long = 333; -pub const SYS_prlimit64: c_long = 334; -pub const SYS_name_to_handle_at: c_long = 335; -pub const SYS_open_by_handle_at: c_long = 336; -pub const SYS_clock_adjtime: c_long = 337; -pub const SYS_syncfs: c_long = 338; -pub const SYS_setns: c_long = 339; -pub const SYS_process_vm_readv: c_long = 340; -pub const SYS_process_vm_writev: c_long = 341; -pub const SYS_s390_runtime_instr: c_long = 342; -pub const SYS_kcmp: c_long = 343; -pub const SYS_finit_module: c_long = 344; -pub const SYS_sched_setattr: c_long = 345; -pub const SYS_sched_getattr: c_long = 346; -pub const SYS_renameat2: c_long = 347; -pub const SYS_seccomp: c_long = 348; -pub const SYS_getrandom: c_long = 349; -pub const SYS_memfd_create: c_long = 350; -pub const SYS_bpf: c_long = 351; -pub const SYS_s390_pci_mmio_write: c_long = 352; -pub const SYS_s390_pci_mmio_read: c_long = 353; -pub const SYS_execveat: c_long = 354; -pub const SYS_userfaultfd: c_long = 355; -pub const SYS_membarrier: c_long = 356; -pub const SYS_recvmmsg: c_long = 357; -pub const SYS_sendmmsg: c_long = 358; -pub const SYS_socket: c_long = 359; -pub const SYS_socketpair: c_long = 360; -pub const SYS_bind: c_long = 361; -pub const SYS_connect: c_long = 362; -pub const SYS_listen: c_long = 363; -pub const SYS_accept4: c_long = 364; -pub const SYS_getsockopt: c_long = 365; -pub const SYS_setsockopt: c_long = 366; -pub const SYS_getsockname: c_long = 367; -pub const SYS_getpeername: c_long = 368; -pub const SYS_sendto: c_long = 369; -pub const SYS_sendmsg: c_long = 370; -pub const SYS_recvfrom: c_long = 371; -pub const SYS_recvmsg: c_long = 372; -pub const SYS_shutdown: c_long = 373; -pub const SYS_mlock2: c_long = 374; -pub const SYS_copy_file_range: c_long = 375; -pub const SYS_preadv2: c_long = 376; -pub const SYS_pwritev2: c_long = 377; -pub const SYS_s390_guarded_storage: c_long = 378; -pub const SYS_statx: c_long = 379; -pub const SYS_s390_sthyi: c_long = 380; -pub const SYS_kexec_file_load: c_long = 381; -pub const SYS_io_pgetevents: c_long = 382; -pub const SYS_rseq: c_long = 383; -pub const SYS_pkey_mprotect: c_long = 384; -pub const SYS_pkey_alloc: c_long = 385; -pub const SYS_pkey_free: c_long = 386; -pub const SYS_semtimedop: c_long = 392; -pub const SYS_semget: c_long = 393; -pub const SYS_semctl: c_long = 394; -pub const SYS_shmget: c_long = 395; -pub const SYS_shmctl: c_long = 396; -pub const SYS_shmat: c_long = 397; -pub const SYS_shmdt: c_long = 398; -pub const SYS_msgget: c_long = 399; -pub const SYS_msgsnd: c_long = 400; -pub const SYS_msgrcv: c_long = 401; -pub const SYS_msgctl: c_long = 402; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; -pub const SYS_cachestat: c_long = 451; -pub const SYS_fchmodat2: c_long = 452; -pub const SYS_mseal: c_long = 462; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/mod.rs deleted file mode 100644 index 29750e79e17e65..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/mod.rs +++ /dev/null @@ -1,688 +0,0 @@ -//! Wasm32 definitions conforming to the WALI ABI. -//! The WALI ABI closely mirrors `x86_64` Linux and is thus implemented within the `b64` module as opposed to `b32` -use crate::off_t; -use crate::prelude::*; - -pub type wchar_t = i32; -pub type nlink_t = u64; -pub type blksize_t = c_long; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - __pad0: c_int, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_long; 3], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - __pad0: c_int, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __reserved: [c_long; 3], - } - - pub struct ipc_perm { - #[cfg(musl_v1_2_3)] - pub __key: crate::key_t, - #[cfg(not(musl_v1_2_3))] - #[deprecated( - since = "0.2.173", - note = "This field is incorrectly named and will be changed - to __key in a future release." - )] - pub __ipc_perm_key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - pub __seq: c_int, - __unused1: c_long, - __unused2: c_long, - } -} - -// Syscall table -pub const SYS_read: c_long = 0; -pub const SYS_write: c_long = 1; -pub const SYS_open: c_long = 2; -pub const SYS_close: c_long = 3; -pub const SYS_stat: c_long = 4; -pub const SYS_fstat: c_long = 5; -pub const SYS_lstat: c_long = 6; -pub const SYS_poll: c_long = 7; -pub const SYS_lseek: c_long = 8; -pub const SYS_mmap: c_long = 9; -pub const SYS_mprotect: c_long = 10; -pub const SYS_munmap: c_long = 11; -pub const SYS_brk: c_long = 12; -pub const SYS_rt_sigaction: c_long = 13; -pub const SYS_rt_sigprocmask: c_long = 14; -pub const SYS_rt_sigreturn: c_long = 15; -pub const SYS_ioctl: c_long = 16; -pub const SYS_pread64: c_long = 17; -pub const SYS_pwrite64: c_long = 18; -pub const SYS_readv: c_long = 19; -pub const SYS_writev: c_long = 20; -pub const SYS_access: c_long = 21; -pub const SYS_pipe: c_long = 22; -pub const SYS_select: c_long = 23; -pub const SYS_sched_yield: c_long = 24; -pub const SYS_mremap: c_long = 25; -pub const SYS_msync: c_long = 26; -pub const SYS_mincore: c_long = 27; -pub const SYS_madvise: c_long = 28; -pub const SYS_shmget: c_long = 29; -pub const SYS_shmat: c_long = 30; -pub const SYS_shmctl: c_long = 31; -pub const SYS_dup: c_long = 32; -pub const SYS_dup2: c_long = 33; -pub const SYS_pause: c_long = 34; -pub const SYS_nanosleep: c_long = 35; -pub const SYS_getitimer: c_long = 36; -pub const SYS_alarm: c_long = 37; -pub const SYS_setitimer: c_long = 38; -pub const SYS_getpid: c_long = 39; -pub const SYS_sendfile: c_long = 40; -pub const SYS_socket: c_long = 41; -pub const SYS_connect: c_long = 42; -pub const SYS_accept: c_long = 43; -pub const SYS_sendto: c_long = 44; -pub const SYS_recvfrom: c_long = 45; -pub const SYS_sendmsg: c_long = 46; -pub const SYS_recvmsg: c_long = 47; -pub const SYS_shutdown: c_long = 48; -pub const SYS_bind: c_long = 49; -pub const SYS_listen: c_long = 50; -pub const SYS_getsockname: c_long = 51; -pub const SYS_getpeername: c_long = 52; -pub const SYS_socketpair: c_long = 53; -pub const SYS_setsockopt: c_long = 54; -pub const SYS_getsockopt: c_long = 55; -pub const SYS_clone: c_long = 56; -pub const SYS_fork: c_long = 57; -pub const SYS_vfork: c_long = 58; -pub const SYS_execve: c_long = 59; -pub const SYS_exit: c_long = 60; -pub const SYS_wait4: c_long = 61; -pub const SYS_kill: c_long = 62; -pub const SYS_uname: c_long = 63; -pub const SYS_semget: c_long = 64; -pub const SYS_semop: c_long = 65; -pub const SYS_semctl: c_long = 66; -pub const SYS_shmdt: c_long = 67; -pub const SYS_msgget: c_long = 68; -pub const SYS_msgsnd: c_long = 69; -pub const SYS_msgrcv: c_long = 70; -pub const SYS_msgctl: c_long = 71; -pub const SYS_fcntl: c_long = 72; -pub const SYS_flock: c_long = 73; -pub const SYS_fsync: c_long = 74; -pub const SYS_fdatasync: c_long = 75; -pub const SYS_truncate: c_long = 76; -pub const SYS_ftruncate: c_long = 77; -pub const SYS_getdents: c_long = 78; -pub const SYS_getcwd: c_long = 79; -pub const SYS_chdir: c_long = 80; -pub const SYS_fchdir: c_long = 81; -pub const SYS_rename: c_long = 82; -pub const SYS_mkdir: c_long = 83; -pub const SYS_rmdir: c_long = 84; -pub const SYS_creat: c_long = 85; -pub const SYS_link: c_long = 86; -pub const SYS_unlink: c_long = 87; -pub const SYS_symlink: c_long = 88; -pub const SYS_readlink: c_long = 89; -pub const SYS_chmod: c_long = 90; -pub const SYS_fchmod: c_long = 91; -pub const SYS_chown: c_long = 92; -pub const SYS_fchown: c_long = 93; -pub const SYS_lchown: c_long = 94; -pub const SYS_umask: c_long = 95; -pub const SYS_gettimeofday: c_long = 96; -pub const SYS_getrlimit: c_long = 97; -pub const SYS_getrusage: c_long = 98; -pub const SYS_sysinfo: c_long = 99; -pub const SYS_times: c_long = 100; -pub const SYS_ptrace: c_long = 101; -pub const SYS_getuid: c_long = 102; -pub const SYS_syslog: c_long = 103; -pub const SYS_getgid: c_long = 104; -pub const SYS_setuid: c_long = 105; -pub const SYS_setgid: c_long = 106; -pub const SYS_geteuid: c_long = 107; -pub const SYS_getegid: c_long = 108; -pub const SYS_setpgid: c_long = 109; -pub const SYS_getppid: c_long = 110; -pub const SYS_getpgrp: c_long = 111; -pub const SYS_setsid: c_long = 112; -pub const SYS_setreuid: c_long = 113; -pub const SYS_setregid: c_long = 114; -pub const SYS_getgroups: c_long = 115; -pub const SYS_setgroups: c_long = 116; -pub const SYS_setresuid: c_long = 117; -pub const SYS_getresuid: c_long = 118; -pub const SYS_setresgid: c_long = 119; -pub const SYS_getresgid: c_long = 120; -pub const SYS_getpgid: c_long = 121; -pub const SYS_setfsuid: c_long = 122; -pub const SYS_setfsgid: c_long = 123; -pub const SYS_getsid: c_long = 124; -pub const SYS_capget: c_long = 125; -pub const SYS_capset: c_long = 126; -pub const SYS_rt_sigpending: c_long = 127; -pub const SYS_rt_sigtimedwait: c_long = 128; -pub const SYS_rt_sigqueueinfo: c_long = 129; -pub const SYS_rt_sigsuspend: c_long = 130; -pub const SYS_sigaltstack: c_long = 131; -pub const SYS_utime: c_long = 132; -pub const SYS_mknod: c_long = 133; -pub const SYS_uselib: c_long = 134; -pub const SYS_personality: c_long = 135; -pub const SYS_ustat: c_long = 136; -pub const SYS_statfs: c_long = 137; -pub const SYS_fstatfs: c_long = 138; -pub const SYS_sysfs: c_long = 139; -pub const SYS_getpriority: c_long = 140; -pub const SYS_setpriority: c_long = 141; -pub const SYS_sched_setparam: c_long = 142; -pub const SYS_sched_getparam: c_long = 143; -pub const SYS_sched_setscheduler: c_long = 144; -pub const SYS_sched_getscheduler: c_long = 145; -pub const SYS_sched_get_priority_max: c_long = 146; -pub const SYS_sched_get_priority_min: c_long = 147; -pub const SYS_sched_rr_get_interval: c_long = 148; -pub const SYS_mlock: c_long = 149; -pub const SYS_munlock: c_long = 150; -pub const SYS_mlockall: c_long = 151; -pub const SYS_munlockall: c_long = 152; -pub const SYS_vhangup: c_long = 153; -pub const SYS_modify_ldt: c_long = 154; -pub const SYS_pivot_root: c_long = 155; -pub const SYS__sysctl: c_long = 156; -pub const SYS_prctl: c_long = 157; -pub const SYS_arch_prctl: c_long = 158; -pub const SYS_adjtimex: c_long = 159; -pub const SYS_setrlimit: c_long = 160; -pub const SYS_chroot: c_long = 161; -pub const SYS_sync: c_long = 162; -pub const SYS_acct: c_long = 163; -pub const SYS_settimeofday: c_long = 164; -pub const SYS_mount: c_long = 165; -pub const SYS_umount2: c_long = 166; -pub const SYS_swapon: c_long = 167; -pub const SYS_swapoff: c_long = 168; -pub const SYS_reboot: c_long = 169; -pub const SYS_sethostname: c_long = 170; -pub const SYS_setdomainname: c_long = 171; -pub const SYS_iopl: c_long = 172; -pub const SYS_ioperm: c_long = 173; -pub const SYS_create_module: c_long = 174; -pub const SYS_init_module: c_long = 175; -pub const SYS_delete_module: c_long = 176; -pub const SYS_get_kernel_syms: c_long = 177; -pub const SYS_query_module: c_long = 178; -pub const SYS_quotactl: c_long = 179; -pub const SYS_nfsservctl: c_long = 180; -pub const SYS_getpmsg: c_long = 181; -pub const SYS_putpmsg: c_long = 182; -pub const SYS_afs_syscall: c_long = 183; -pub const SYS_tuxcall: c_long = 184; -pub const SYS_security: c_long = 185; -pub const SYS_gettid: c_long = 186; -pub const SYS_readahead: c_long = 187; -pub const SYS_setxattr: c_long = 188; -pub const SYS_lsetxattr: c_long = 189; -pub const SYS_fsetxattr: c_long = 190; -pub const SYS_getxattr: c_long = 191; -pub const SYS_lgetxattr: c_long = 192; -pub const SYS_fgetxattr: c_long = 193; -pub const SYS_listxattr: c_long = 194; -pub const SYS_llistxattr: c_long = 195; -pub const SYS_flistxattr: c_long = 196; -pub const SYS_removexattr: c_long = 197; -pub const SYS_lremovexattr: c_long = 198; -pub const SYS_fremovexattr: c_long = 199; -pub const SYS_tkill: c_long = 200; -pub const SYS_time: c_long = 201; -pub const SYS_futex: c_long = 202; -pub const SYS_sched_setaffinity: c_long = 203; -pub const SYS_sched_getaffinity: c_long = 204; -pub const SYS_set_thread_area: c_long = 205; -pub const SYS_io_setup: c_long = 206; -pub const SYS_io_destroy: c_long = 207; -pub const SYS_io_getevents: c_long = 208; -pub const SYS_io_submit: c_long = 209; -pub const SYS_io_cancel: c_long = 210; -pub const SYS_get_thread_area: c_long = 211; -pub const SYS_lookup_dcookie: c_long = 212; -pub const SYS_epoll_create: c_long = 213; -pub const SYS_epoll_ctl_old: c_long = 214; -pub const SYS_epoll_wait_old: c_long = 215; -pub const SYS_remap_file_pages: c_long = 216; -pub const SYS_getdents64: c_long = 217; -pub const SYS_set_tid_address: c_long = 218; -pub const SYS_restart_syscall: c_long = 219; -pub const SYS_semtimedop: c_long = 220; -pub const SYS_fadvise64: c_long = 221; -pub const SYS_timer_create: c_long = 222; -pub const SYS_timer_settime: c_long = 223; -pub const SYS_timer_gettime: c_long = 224; -pub const SYS_timer_getoverrun: c_long = 225; -pub const SYS_timer_delete: c_long = 226; -pub const SYS_clock_settime: c_long = 227; -pub const SYS_clock_gettime: c_long = 228; -pub const SYS_clock_getres: c_long = 229; -pub const SYS_clock_nanosleep: c_long = 230; -pub const SYS_exit_group: c_long = 231; -pub const SYS_epoll_wait: c_long = 232; -pub const SYS_epoll_ctl: c_long = 233; -pub const SYS_tgkill: c_long = 234; -pub const SYS_utimes: c_long = 235; -pub const SYS_vserver: c_long = 236; -pub const SYS_mbind: c_long = 237; -pub const SYS_set_mempolicy: c_long = 238; -pub const SYS_get_mempolicy: c_long = 239; -pub const SYS_mq_open: c_long = 240; -pub const SYS_mq_unlink: c_long = 241; -pub const SYS_mq_timedsend: c_long = 242; -pub const SYS_mq_timedreceive: c_long = 243; -pub const SYS_mq_notify: c_long = 244; -pub const SYS_mq_getsetattr: c_long = 245; -pub const SYS_kexec_load: c_long = 246; -pub const SYS_waitid: c_long = 247; -pub const SYS_add_key: c_long = 248; -pub const SYS_request_key: c_long = 249; -pub const SYS_keyctl: c_long = 250; -pub const SYS_ioprio_set: c_long = 251; -pub const SYS_ioprio_get: c_long = 252; -pub const SYS_inotify_init: c_long = 253; -pub const SYS_inotify_add_watch: c_long = 254; -pub const SYS_inotify_rm_watch: c_long = 255; -pub const SYS_migrate_pages: c_long = 256; -pub const SYS_openat: c_long = 257; -pub const SYS_mkdirat: c_long = 258; -pub const SYS_mknodat: c_long = 259; -pub const SYS_fchownat: c_long = 260; -pub const SYS_futimesat: c_long = 261; -pub const SYS_newfstatat: c_long = 262; -pub const SYS_unlinkat: c_long = 263; -pub const SYS_renameat: c_long = 264; -pub const SYS_linkat: c_long = 265; -pub const SYS_symlinkat: c_long = 266; -pub const SYS_readlinkat: c_long = 267; -pub const SYS_fchmodat: c_long = 268; -pub const SYS_faccessat: c_long = 269; -pub const SYS_pselect6: c_long = 270; -pub const SYS_ppoll: c_long = 271; -pub const SYS_unshare: c_long = 272; -pub const SYS_set_robust_list: c_long = 273; -pub const SYS_get_robust_list: c_long = 274; -pub const SYS_splice: c_long = 275; -pub const SYS_tee: c_long = 276; -pub const SYS_sync_file_range: c_long = 277; -pub const SYS_vmsplice: c_long = 278; -pub const SYS_move_pages: c_long = 279; -pub const SYS_utimensat: c_long = 280; -pub const SYS_epoll_pwait: c_long = 281; -pub const SYS_signalfd: c_long = 282; -pub const SYS_timerfd_create: c_long = 283; -pub const SYS_eventfd: c_long = 284; -pub const SYS_fallocate: c_long = 285; -pub const SYS_timerfd_settime: c_long = 286; -pub const SYS_timerfd_gettime: c_long = 287; -pub const SYS_accept4: c_long = 288; -pub const SYS_signalfd4: c_long = 289; -pub const SYS_eventfd2: c_long = 290; -pub const SYS_epoll_create1: c_long = 291; -pub const SYS_dup3: c_long = 292; -pub const SYS_pipe2: c_long = 293; -pub const SYS_inotify_init1: c_long = 294; -pub const SYS_preadv: c_long = 295; -pub const SYS_pwritev: c_long = 296; -pub const SYS_rt_tgsigqueueinfo: c_long = 297; -pub const SYS_perf_event_open: c_long = 298; -pub const SYS_recvmmsg: c_long = 299; -pub const SYS_fanotify_init: c_long = 300; -pub const SYS_fanotify_mark: c_long = 301; -pub const SYS_prlimit64: c_long = 302; -pub const SYS_name_to_handle_at: c_long = 303; -pub const SYS_open_by_handle_at: c_long = 304; -pub const SYS_clock_adjtime: c_long = 305; -pub const SYS_syncfs: c_long = 306; -pub const SYS_sendmmsg: c_long = 307; -pub const SYS_setns: c_long = 308; -pub const SYS_getcpu: c_long = 309; -pub const SYS_process_vm_readv: c_long = 310; -pub const SYS_process_vm_writev: c_long = 311; -pub const SYS_kcmp: c_long = 312; -pub const SYS_finit_module: c_long = 313; -pub const SYS_sched_setattr: c_long = 314; -pub const SYS_sched_getattr: c_long = 315; -pub const SYS_renameat2: c_long = 316; -pub const SYS_seccomp: c_long = 317; -pub const SYS_getrandom: c_long = 318; -pub const SYS_memfd_create: c_long = 319; -pub const SYS_kexec_file_load: c_long = 320; -pub const SYS_bpf: c_long = 321; -pub const SYS_execveat: c_long = 322; -pub const SYS_userfaultfd: c_long = 323; -pub const SYS_membarrier: c_long = 324; -pub const SYS_mlock2: c_long = 325; -pub const SYS_copy_file_range: c_long = 326; -pub const SYS_preadv2: c_long = 327; -pub const SYS_pwritev2: c_long = 328; -pub const SYS_pkey_mprotect: c_long = 329; -pub const SYS_pkey_alloc: c_long = 330; -pub const SYS_pkey_free: c_long = 331; -pub const SYS_statx: c_long = 332; -pub const SYS_io_pgetevents: c_long = 333; -pub const SYS_rseq: c_long = 334; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; - -// Syscall aliases for WALI -pub const SYS_fadvise: c_long = SYS_fadvise64; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const O_APPEND: c_int = 1024; -pub const O_DIRECT: c_int = 0x4000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_LARGEFILE: c_int = 0; -pub const O_NOFOLLOW: c_int = 0x20000; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_ASYNC: c_int = 0x2000; - -pub const PTRACE_SYSEMU: c_int = 31; -pub const PTRACE_SYSEMU_SINGLESTEP: c_int = 32; - -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; - -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EBADMSG: c_int = 74; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const ERFKILL: c_int = 132; -pub const EHWPOISON: c_int = 133; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_SETOWN: c_int = 8; - -pub const VEOF: usize = 4; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_SYNC: c_int = 0x080000; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: c_int = 0x00000800; -pub const TAB2: c_int = 0x00001000; -pub const TAB3: c_int = 0x00001800; -pub const CR1: c_int = 0x00000200; -pub const CR2: c_int = 0x00000400; -pub const CR3: c_int = 0x00000600; -pub const FF1: c_int = 0x00008000; -pub const BS1: c_int = 0x00002000; -pub const VT1: c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const EDEADLK: c_int = 35; -pub const EDEADLOCK: c_int = EDEADLK; - -pub const EXTPROC: crate::tcflag_t = 0x00010000; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; - -cfg_if! { - if #[cfg(target_vendor = "wali")] { - mod wali; - pub use self::wali::*; - } -} diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/wali.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/wali.rs deleted file mode 100644 index bda5c241c1d2d9..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b64/wasm32/wali.rs +++ /dev/null @@ -1,441 +0,0 @@ -//! WebAssembly Linux Interface syscall specification - -// --- Autogenerated from WALI/scripts/autogen.py --- -#[link(wasm_import_module = "wali")] -extern "C" { - /* 0 */ - #[link_name = "SYS_read"] - pub fn __syscall_SYS_read(a1: i32, a2: i32, a3: u32) -> ::c_long; - /* 1 */ - #[link_name = "SYS_write"] - pub fn __syscall_SYS_write(a1: i32, a2: i32, a3: u32) -> ::c_long; - /* 2 */ - #[link_name = "SYS_open"] - pub fn __syscall_SYS_open(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 3 */ - #[link_name = "SYS_close"] - pub fn __syscall_SYS_close(a1: i32) -> ::c_long; - /* 4 */ - #[link_name = "SYS_stat"] - pub fn __syscall_SYS_stat(a1: i32, a2: i32) -> ::c_long; - /* 5 */ - #[link_name = "SYS_fstat"] - pub fn __syscall_SYS_fstat(a1: i32, a2: i32) -> ::c_long; - /* 6 */ - #[link_name = "SYS_lstat"] - pub fn __syscall_SYS_lstat(a1: i32, a2: i32) -> ::c_long; - /* 7 */ - #[link_name = "SYS_poll"] - pub fn __syscall_SYS_poll(a1: i32, a2: u32, a3: i32) -> ::c_long; - /* 8 */ - #[link_name = "SYS_lseek"] - pub fn __syscall_SYS_lseek(a1: i32, a2: i64, a3: i32) -> ::c_long; - /* 9 */ - #[link_name = "SYS_mmap"] - pub fn __syscall_SYS_mmap(a1: i32, a2: u32, a3: i32, a4: i32, a5: i32, a6: i64) -> ::c_long; - /* 10 */ - #[link_name = "SYS_mprotect"] - pub fn __syscall_SYS_mprotect(a1: i32, a2: u32, a3: i32) -> ::c_long; - /* 11 */ - #[link_name = "SYS_munmap"] - pub fn __syscall_SYS_munmap(a1: i32, a2: u32) -> ::c_long; - /* 12 */ - #[link_name = "SYS_brk"] - pub fn __syscall_SYS_brk(a1: i32) -> ::c_long; - /* 13 */ - #[link_name = "SYS_rt_sigaction"] - pub fn __syscall_SYS_rt_sigaction(a1: i32, a2: i32, a3: i32, a4: u32) -> ::c_long; - /* 14 */ - #[link_name = "SYS_rt_sigprocmask"] - pub fn __syscall_SYS_rt_sigprocmask(a1: i32, a2: i32, a3: i32, a4: u32) -> ::c_long; - /* 15 */ - #[link_name = "SYS_rt_sigreturn"] - pub fn __syscall_SYS_rt_sigreturn(a1: i64) -> ::c_long; - /* 16 */ - #[link_name = "SYS_ioctl"] - pub fn __syscall_SYS_ioctl(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 17 */ - #[link_name = "SYS_pread64"] - pub fn __syscall_SYS_pread64(a1: i32, a2: i32, a3: u32, a4: i64) -> ::c_long; - /* 18 */ - #[link_name = "SYS_pwrite64"] - pub fn __syscall_SYS_pwrite64(a1: i32, a2: i32, a3: u32, a4: i64) -> ::c_long; - /* 19 */ - #[link_name = "SYS_readv"] - pub fn __syscall_SYS_readv(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 20 */ - #[link_name = "SYS_writev"] - pub fn __syscall_SYS_writev(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 21 */ - #[link_name = "SYS_access"] - pub fn __syscall_SYS_access(a1: i32, a2: i32) -> ::c_long; - /* 22 */ - #[link_name = "SYS_pipe"] - pub fn __syscall_SYS_pipe(a1: i32) -> ::c_long; - /* 23 */ - #[link_name = "SYS_select"] - pub fn __syscall_SYS_select(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32) -> ::c_long; - /* 24 */ - #[link_name = "SYS_sched_yield"] - pub fn __syscall_SYS_sched_yield() -> ::c_long; - /* 25 */ - #[link_name = "SYS_mremap"] - pub fn __syscall_SYS_mremap(a1: i32, a2: u32, a3: u32, a4: i32, a5: i32) -> ::c_long; - /* 26 */ - #[link_name = "SYS_msync"] - pub fn __syscall_SYS_msync(a1: i32, a2: u32, a3: i32) -> ::c_long; - /* 28 */ - #[link_name = "SYS_madvise"] - pub fn __syscall_SYS_madvise(a1: i32, a2: u32, a3: i32) -> ::c_long; - /* 32 */ - #[link_name = "SYS_dup"] - pub fn __syscall_SYS_dup(a1: i32) -> ::c_long; - /* 33 */ - #[link_name = "SYS_dup2"] - pub fn __syscall_SYS_dup2(a1: i32, a2: i32) -> ::c_long; - /* 35 */ - #[link_name = "SYS_nanosleep"] - pub fn __syscall_SYS_nanosleep(a1: i32, a2: i32) -> ::c_long; - /* 37 */ - #[link_name = "SYS_alarm"] - pub fn __syscall_SYS_alarm(a1: i32) -> ::c_long; - /* 38 */ - #[link_name = "SYS_setitimer"] - pub fn __syscall_SYS_setitimer(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 39 */ - #[link_name = "SYS_getpid"] - pub fn __syscall_SYS_getpid() -> ::c_long; - /* 41 */ - #[link_name = "SYS_socket"] - pub fn __syscall_SYS_socket(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 42 */ - #[link_name = "SYS_connect"] - pub fn __syscall_SYS_connect(a1: i32, a2: i32, a3: u32) -> ::c_long; - /* 43 */ - #[link_name = "SYS_accept"] - pub fn __syscall_SYS_accept(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 44 */ - #[link_name = "SYS_sendto"] - pub fn __syscall_SYS_sendto(a1: i32, a2: i32, a3: u32, a4: i32, a5: i32, a6: u32) -> ::c_long; - /* 45 */ - #[link_name = "SYS_recvfrom"] - pub fn __syscall_SYS_recvfrom(a1: i32, a2: i32, a3: u32, a4: i32, a5: i32, a6: i32) - -> ::c_long; - /* 46 */ - #[link_name = "SYS_sendmsg"] - pub fn __syscall_SYS_sendmsg(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 47 */ - #[link_name = "SYS_recvmsg"] - pub fn __syscall_SYS_recvmsg(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 48 */ - #[link_name = "SYS_shutdown"] - pub fn __syscall_SYS_shutdown(a1: i32, a2: i32) -> ::c_long; - /* 49 */ - #[link_name = "SYS_bind"] - pub fn __syscall_SYS_bind(a1: i32, a2: i32, a3: u32) -> ::c_long; - /* 50 */ - #[link_name = "SYS_listen"] - pub fn __syscall_SYS_listen(a1: i32, a2: i32) -> ::c_long; - /* 51 */ - #[link_name = "SYS_getsockname"] - pub fn __syscall_SYS_getsockname(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 52 */ - #[link_name = "SYS_getpeername"] - pub fn __syscall_SYS_getpeername(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 53 */ - #[link_name = "SYS_socketpair"] - pub fn __syscall_SYS_socketpair(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; - /* 54 */ - #[link_name = "SYS_setsockopt"] - pub fn __syscall_SYS_setsockopt(a1: i32, a2: i32, a3: i32, a4: i32, a5: u32) -> ::c_long; - /* 55 */ - #[link_name = "SYS_getsockopt"] - pub fn __syscall_SYS_getsockopt(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32) -> ::c_long; - /* 57 */ - #[link_name = "SYS_fork"] - pub fn __syscall_SYS_fork() -> ::c_long; - /* 59 */ - #[link_name = "SYS_execve"] - pub fn __syscall_SYS_execve(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 60 */ - #[link_name = "SYS_exit"] - pub fn __syscall_SYS_exit(a1: i32) -> ::c_long; - /* 61 */ - #[link_name = "SYS_wait4"] - pub fn __syscall_SYS_wait4(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; - /* 62 */ - #[link_name = "SYS_kill"] - pub fn __syscall_SYS_kill(a1: i32, a2: i32) -> ::c_long; - /* 63 */ - #[link_name = "SYS_uname"] - pub fn __syscall_SYS_uname(a1: i32) -> ::c_long; - /* 72 */ - #[link_name = "SYS_fcntl"] - pub fn __syscall_SYS_fcntl(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 73 */ - #[link_name = "SYS_flock"] - pub fn __syscall_SYS_flock(a1: i32, a2: i32) -> ::c_long; - /* 74 */ - #[link_name = "SYS_fsync"] - pub fn __syscall_SYS_fsync(a1: i32) -> ::c_long; - /* 75 */ - #[link_name = "SYS_fdatasync"] - pub fn __syscall_SYS_fdatasync(a1: i32) -> ::c_long; - /* 77 */ - #[link_name = "SYS_ftruncate"] - pub fn __syscall_SYS_ftruncate(a1: i32, a2: i64) -> ::c_long; - /* 78 */ - #[link_name = "SYS_getdents"] - pub fn __syscall_SYS_getdents(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 79 */ - #[link_name = "SYS_getcwd"] - pub fn __syscall_SYS_getcwd(a1: i32, a2: u32) -> ::c_long; - /* 80 */ - #[link_name = "SYS_chdir"] - pub fn __syscall_SYS_chdir(a1: i32) -> ::c_long; - /* 81 */ - #[link_name = "SYS_fchdir"] - pub fn __syscall_SYS_fchdir(a1: i32) -> ::c_long; - /* 82 */ - #[link_name = "SYS_rename"] - pub fn __syscall_SYS_rename(a1: i32, a2: i32) -> ::c_long; - /* 83 */ - #[link_name = "SYS_mkdir"] - pub fn __syscall_SYS_mkdir(a1: i32, a2: i32) -> ::c_long; - /* 84 */ - #[link_name = "SYS_rmdir"] - pub fn __syscall_SYS_rmdir(a1: i32) -> ::c_long; - /* 86 */ - #[link_name = "SYS_link"] - pub fn __syscall_SYS_link(a1: i32, a2: i32) -> ::c_long; - /* 87 */ - #[link_name = "SYS_unlink"] - pub fn __syscall_SYS_unlink(a1: i32) -> ::c_long; - /* 88 */ - #[link_name = "SYS_symlink"] - pub fn __syscall_SYS_symlink(a1: i32, a2: i32) -> ::c_long; - /* 89 */ - #[link_name = "SYS_readlink"] - pub fn __syscall_SYS_readlink(a1: i32, a2: i32, a3: u32) -> ::c_long; - /* 90 */ - #[link_name = "SYS_chmod"] - pub fn __syscall_SYS_chmod(a1: i32, a2: i32) -> ::c_long; - /* 91 */ - #[link_name = "SYS_fchmod"] - pub fn __syscall_SYS_fchmod(a1: i32, a2: i32) -> ::c_long; - /* 92 */ - #[link_name = "SYS_chown"] - pub fn __syscall_SYS_chown(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 93 */ - #[link_name = "SYS_fchown"] - pub fn __syscall_SYS_fchown(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 95 */ - #[link_name = "SYS_umask"] - pub fn __syscall_SYS_umask(a1: i32) -> ::c_long; - /* 97 */ - #[link_name = "SYS_getrlimit"] - pub fn __syscall_SYS_getrlimit(a1: i32, a2: i32) -> ::c_long; - /* 98 */ - #[link_name = "SYS_getrusage"] - pub fn __syscall_SYS_getrusage(a1: i32, a2: i32) -> ::c_long; - /* 99 */ - #[link_name = "SYS_sysinfo"] - pub fn __syscall_SYS_sysinfo(a1: i32) -> ::c_long; - /* 102 */ - #[link_name = "SYS_getuid"] - pub fn __syscall_SYS_getuid() -> ::c_long; - /* 104 */ - #[link_name = "SYS_getgid"] - pub fn __syscall_SYS_getgid() -> ::c_long; - /* 105 */ - #[link_name = "SYS_setuid"] - pub fn __syscall_SYS_setuid(a1: i32) -> ::c_long; - /* 106 */ - #[link_name = "SYS_setgid"] - pub fn __syscall_SYS_setgid(a1: i32) -> ::c_long; - /* 107 */ - #[link_name = "SYS_geteuid"] - pub fn __syscall_SYS_geteuid() -> ::c_long; - /* 108 */ - #[link_name = "SYS_getegid"] - pub fn __syscall_SYS_getegid() -> ::c_long; - /* 109 */ - #[link_name = "SYS_setpgid"] - pub fn __syscall_SYS_setpgid(a1: i32, a2: i32) -> ::c_long; - /* 110 */ - #[link_name = "SYS_getppid"] - pub fn __syscall_SYS_getppid() -> ::c_long; - /* 112 */ - #[link_name = "SYS_setsid"] - pub fn __syscall_SYS_setsid() -> ::c_long; - /* 113 */ - #[link_name = "SYS_setreuid"] - pub fn __syscall_SYS_setreuid(a1: i32, a2: i32) -> ::c_long; - /* 114 */ - #[link_name = "SYS_setregid"] - pub fn __syscall_SYS_setregid(a1: i32, a2: i32) -> ::c_long; - /* 115 */ - #[link_name = "SYS_getgroups"] - pub fn __syscall_SYS_getgroups(a1: u32, a2: i32) -> ::c_long; - /* 116 */ - #[link_name = "SYS_setgroups"] - pub fn __syscall_SYS_setgroups(a1: u32, a2: i32) -> ::c_long; - /* 117 */ - #[link_name = "SYS_setresuid"] - pub fn __syscall_SYS_setresuid(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 119 */ - #[link_name = "SYS_setresgid"] - pub fn __syscall_SYS_setresgid(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 121 */ - #[link_name = "SYS_getpgid"] - pub fn __syscall_SYS_getpgid(a1: i32) -> ::c_long; - /* 124 */ - #[link_name = "SYS_getsid"] - pub fn __syscall_SYS_getsid(a1: i32) -> ::c_long; - /* 127 */ - #[link_name = "SYS_rt_sigpending"] - pub fn __syscall_SYS_rt_sigpending(a1: i32, a2: u32) -> ::c_long; - /* 130 */ - #[link_name = "SYS_rt_sigsuspend"] - pub fn __syscall_SYS_rt_sigsuspend(a1: i32, a2: u32) -> ::c_long; - /* 131 */ - #[link_name = "SYS_sigaltstack"] - pub fn __syscall_SYS_sigaltstack(a1: i32, a2: i32) -> ::c_long; - /* 132 */ - #[link_name = "SYS_utime"] - pub fn __syscall_SYS_utime(a1: i32, a2: i32) -> ::c_long; - /* 137 */ - #[link_name = "SYS_statfs"] - pub fn __syscall_SYS_statfs(a1: i32, a2: i32) -> ::c_long; - /* 138 */ - #[link_name = "SYS_fstatfs"] - pub fn __syscall_SYS_fstatfs(a1: i32, a2: i32) -> ::c_long; - /* 157 */ - #[link_name = "SYS_prctl"] - pub fn __syscall_SYS_prctl(a1: i32, a2: u64, a3: u64, a4: u64, a5: u64) -> ::c_long; - /* 160 */ - #[link_name = "SYS_setrlimit"] - pub fn __syscall_SYS_setrlimit(a1: i32, a2: i32) -> ::c_long; - /* 161 */ - #[link_name = "SYS_chroot"] - pub fn __syscall_SYS_chroot(a1: i32) -> ::c_long; - /* 186 */ - #[link_name = "SYS_gettid"] - pub fn __syscall_SYS_gettid() -> ::c_long; - /* 200 */ - #[link_name = "SYS_tkill"] - pub fn __syscall_SYS_tkill(a1: i32, a2: i32) -> ::c_long; - /* 202 */ - #[link_name = "SYS_futex"] - pub fn __syscall_SYS_futex(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32, a6: i32) -> ::c_long; - /* 204 */ - #[link_name = "SYS_sched_getaffinity"] - pub fn __syscall_SYS_sched_getaffinity(a1: i32, a2: u32, a3: i32) -> ::c_long; - /* 217 */ - #[link_name = "SYS_getdents64"] - pub fn __syscall_SYS_getdents64(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 218 */ - #[link_name = "SYS_set_tid_address"] - pub fn __syscall_SYS_set_tid_address(a1: i32) -> ::c_long; - /* 221 */ - #[link_name = "SYS_fadvise"] - pub fn __syscall_SYS_fadvise(a1: i32, a2: i64, a3: i64, a4: i32) -> ::c_long; - /* 228 */ - #[link_name = "SYS_clock_gettime"] - pub fn __syscall_SYS_clock_gettime(a1: i32, a2: i32) -> ::c_long; - /* 229 */ - #[link_name = "SYS_clock_getres"] - pub fn __syscall_SYS_clock_getres(a1: i32, a2: i32) -> ::c_long; - /* 230 */ - #[link_name = "SYS_clock_nanosleep"] - pub fn __syscall_SYS_clock_nanosleep(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; - /* 231 */ - #[link_name = "SYS_exit_group"] - pub fn __syscall_SYS_exit_group(a1: i32) -> ::c_long; - /* 233 */ - #[link_name = "SYS_epoll_ctl"] - pub fn __syscall_SYS_epoll_ctl(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; - /* 257 */ - #[link_name = "SYS_openat"] - pub fn __syscall_SYS_openat(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; - /* 258 */ - #[link_name = "SYS_mkdirat"] - pub fn __syscall_SYS_mkdirat(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 260 */ - #[link_name = "SYS_fchownat"] - pub fn __syscall_SYS_fchownat(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32) -> ::c_long; - /* 262 */ - #[link_name = "SYS_fstatat"] - pub fn __syscall_SYS_fstatat(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; - /* 263 */ - #[link_name = "SYS_unlinkat"] - pub fn __syscall_SYS_unlinkat(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 265 */ - #[link_name = "SYS_linkat"] - pub fn __syscall_SYS_linkat(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32) -> ::c_long; - /* 266 */ - #[link_name = "SYS_symlinkat"] - pub fn __syscall_SYS_symlinkat(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 267 */ - #[link_name = "SYS_readlinkat"] - pub fn __syscall_SYS_readlinkat(a1: i32, a2: i32, a3: i32, a4: u32) -> ::c_long; - /* 268 */ - #[link_name = "SYS_fchmodat"] - pub fn __syscall_SYS_fchmodat(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; - /* 269 */ - #[link_name = "SYS_faccessat"] - pub fn __syscall_SYS_faccessat(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; - /* 270 */ - #[link_name = "SYS_pselect6"] - pub fn __syscall_SYS_pselect6(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32, a6: i32) - -> ::c_long; - /* 271 */ - #[link_name = "SYS_ppoll"] - pub fn __syscall_SYS_ppoll(a1: i32, a2: u32, a3: i32, a4: i32, a5: u32) -> ::c_long; - /* 280 */ - #[link_name = "SYS_utimensat"] - pub fn __syscall_SYS_utimensat(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; - /* 281 */ - #[link_name = "SYS_epoll_pwait"] - pub fn __syscall_SYS_epoll_pwait( - a1: i32, - a2: i32, - a3: i32, - a4: i32, - a5: i32, - a6: u32, - ) -> ::c_long; - /* 284 */ - #[link_name = "SYS_eventfd"] - pub fn __syscall_SYS_eventfd(a1: i32) -> ::c_long; - /* 288 */ - #[link_name = "SYS_accept4"] - pub fn __syscall_SYS_accept4(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; - /* 290 */ - #[link_name = "SYS_eventfd2"] - pub fn __syscall_SYS_eventfd2(a1: i32, a2: i32) -> ::c_long; - /* 291 */ - #[link_name = "SYS_epoll_create1"] - pub fn __syscall_SYS_epoll_create1(a1: i32) -> ::c_long; - /* 292 */ - #[link_name = "SYS_dup3"] - pub fn __syscall_SYS_dup3(a1: i32, a2: i32, a3: i32) -> ::c_long; - /* 293 */ - #[link_name = "SYS_pipe2"] - pub fn __syscall_SYS_pipe2(a1: i32, a2: i32) -> ::c_long; - /* 302 */ - #[link_name = "SYS_prlimit64"] - pub fn __syscall_SYS_prlimit64(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; - /* 316 */ - #[link_name = "SYS_renameat2"] - pub fn __syscall_SYS_renameat2(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32) -> ::c_long; - /* 318 */ - #[link_name = "SYS_getrandom"] - pub fn __syscall_SYS_getrandom(a1: i32, a2: u32, a3: i32) -> ::c_long; - /* 332 */ - #[link_name = "SYS_statx"] - pub fn __syscall_SYS_statx(a1: i32, a2: i32, a3: i32, a4: i32, a5: i32) -> ::c_long; - /* 439 */ - #[link_name = "SYS_faccessat2"] - pub fn __syscall_SYS_faccessat2(a1: i32, a2: i32, a3: i32, a4: i32) -> ::c_long; -} diff --git a/vendor/libc/src/unix/linux_like/linux/musl/b64/x86_64/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/b64/x86_64/mod.rs deleted file mode 100644 index ce8319f015e975..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/b64/x86_64/mod.rs +++ /dev/null @@ -1,915 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -pub type wchar_t = i32; -pub type nlink_t = u64; -pub type blksize_t = c_long; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; -pub type greg_t = i64; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - __pad0: c_int, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused: [c_long; 3], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino64_t, - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - __pad0: c_int, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __reserved: [c_long; 3], - } - - pub struct user_regs_struct { - pub r15: c_ulong, - pub r14: c_ulong, - pub r13: c_ulong, - pub r12: c_ulong, - pub rbp: c_ulong, - pub rbx: c_ulong, - pub r11: c_ulong, - pub r10: c_ulong, - pub r9: c_ulong, - pub r8: c_ulong, - pub rax: c_ulong, - pub rcx: c_ulong, - pub rdx: c_ulong, - pub rsi: c_ulong, - pub rdi: c_ulong, - pub orig_rax: c_ulong, - pub rip: c_ulong, - pub cs: c_ulong, - pub eflags: c_ulong, - pub rsp: c_ulong, - pub ss: c_ulong, - pub fs_base: c_ulong, - pub gs_base: c_ulong, - pub ds: c_ulong, - pub es: c_ulong, - pub fs: c_ulong, - pub gs: c_ulong, - } - - pub struct user { - pub regs: user_regs_struct, - pub u_fpvalid: c_int, - pub i387: user_fpregs_struct, - pub u_tsize: c_ulong, - pub u_dsize: c_ulong, - pub u_ssize: c_ulong, - pub start_code: c_ulong, - pub start_stack: c_ulong, - pub signal: c_long, - __reserved: c_int, - #[cfg(target_pointer_width = "32")] - __pad1: u32, - pub u_ar0: *mut user_regs_struct, - #[cfg(target_pointer_width = "32")] - __pad2: u32, - pub u_fpstate: *mut user_fpregs_struct, - pub magic: c_ulong, - pub u_comm: [c_char; 32], - pub u_debugreg: [c_ulong; 8], - } - - // GitHub repo: ifduyue/musl/ - // commit: b4b1e10364c8737a632be61582e05a8d3acf5690 - // file: arch/x86_64/bits/signal.h#L80-L84 - pub struct mcontext_t { - pub gregs: [greg_t; 23], - __private: [u64; 9], - } - - pub struct ipc_perm { - #[cfg(musl_v1_2_3)] - pub __key: crate::key_t, - #[cfg(not(musl_v1_2_3))] - #[deprecated( - since = "0.2.173", - note = "This field is incorrectly named and will be changed - to __key in a future release." - )] - pub __ipc_perm_key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: crate::mode_t, - pub __seq: c_int, - __unused1: c_long, - __unused2: c_long, - } - - #[repr(align(8))] - pub struct clone_args { - pub flags: c_ulonglong, - pub pidfd: c_ulonglong, - pub child_tid: c_ulonglong, - pub parent_tid: c_ulonglong, - pub exit_signal: c_ulonglong, - pub stack: c_ulonglong, - pub stack_size: c_ulonglong, - pub tls: c_ulonglong, - pub set_tid: c_ulonglong, - pub set_tid_size: c_ulonglong, - pub cgroup: c_ulonglong, - } -} - -s_no_extra_traits! { - pub struct user_fpregs_struct { - pub cwd: c_ushort, - pub swd: c_ushort, - pub ftw: c_ushort, - pub fop: c_ushort, - pub rip: c_ulong, - pub rdp: c_ulong, - pub mxcsr: c_uint, - pub mxcr_mask: c_uint, - pub st_space: [c_uint; 32], - pub xmm_space: [c_uint; 64], - padding: [c_uint; 24], - } - - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_stack: crate::stack_t, - pub uc_mcontext: mcontext_t, - pub uc_sigmask: crate::sigset_t, - __private: [u8; 512], - } - - #[repr(align(16))] - pub struct max_align_t { - priv_: [f64; 4], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for user_fpregs_struct { - fn eq(&self, other: &user_fpregs_struct) -> bool { - self.cwd == other.cwd - && self.swd == other.swd - && self.ftw == other.ftw - && self.fop == other.fop - && self.rip == other.rip - && self.rdp == other.rdp - && self.mxcsr == other.mxcsr - && self.mxcr_mask == other.mxcr_mask - && self.st_space == other.st_space - && self - .xmm_space - .iter() - .zip(other.xmm_space.iter()) - .all(|(a, b)| a == b) - // Ignore padding field - } - } - - impl Eq for user_fpregs_struct {} - - impl hash::Hash for user_fpregs_struct { - fn hash(&self, state: &mut H) { - self.cwd.hash(state); - self.ftw.hash(state); - self.fop.hash(state); - self.rip.hash(state); - self.rdp.hash(state); - self.mxcsr.hash(state); - self.mxcr_mask.hash(state); - self.st_space.hash(state); - self.xmm_space.hash(state); - // Ignore padding field - } - } - - impl PartialEq for ucontext_t { - fn eq(&self, other: &ucontext_t) -> bool { - self.uc_flags == other.uc_flags - && self.uc_link == other.uc_link - && self.uc_stack == other.uc_stack - && self.uc_mcontext == other.uc_mcontext - && self.uc_sigmask == other.uc_sigmask - && self - .__private - .iter() - .zip(other.__private.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for ucontext_t {} - - impl hash::Hash for ucontext_t { - fn hash(&self, state: &mut H) { - self.uc_flags.hash(state); - self.uc_link.hash(state); - self.uc_stack.hash(state); - self.uc_mcontext.hash(state); - self.uc_sigmask.hash(state); - self.__private.hash(state); - } - } - } -} - -// Syscall table - -pub const SYS_read: c_long = 0; -pub const SYS_write: c_long = 1; -pub const SYS_open: c_long = 2; -pub const SYS_close: c_long = 3; -pub const SYS_stat: c_long = 4; -pub const SYS_fstat: c_long = 5; -pub const SYS_lstat: c_long = 6; -pub const SYS_poll: c_long = 7; -pub const SYS_lseek: c_long = 8; -pub const SYS_mmap: c_long = 9; -pub const SYS_mprotect: c_long = 10; -pub const SYS_munmap: c_long = 11; -pub const SYS_brk: c_long = 12; -pub const SYS_rt_sigaction: c_long = 13; -pub const SYS_rt_sigprocmask: c_long = 14; -pub const SYS_rt_sigreturn: c_long = 15; -pub const SYS_ioctl: c_long = 16; -pub const SYS_pread64: c_long = 17; -pub const SYS_pwrite64: c_long = 18; -pub const SYS_readv: c_long = 19; -pub const SYS_writev: c_long = 20; -pub const SYS_access: c_long = 21; -pub const SYS_pipe: c_long = 22; -pub const SYS_select: c_long = 23; -pub const SYS_sched_yield: c_long = 24; -pub const SYS_mremap: c_long = 25; -pub const SYS_msync: c_long = 26; -pub const SYS_mincore: c_long = 27; -pub const SYS_madvise: c_long = 28; -pub const SYS_shmget: c_long = 29; -pub const SYS_shmat: c_long = 30; -pub const SYS_shmctl: c_long = 31; -pub const SYS_dup: c_long = 32; -pub const SYS_dup2: c_long = 33; -pub const SYS_pause: c_long = 34; -pub const SYS_nanosleep: c_long = 35; -pub const SYS_getitimer: c_long = 36; -pub const SYS_alarm: c_long = 37; -pub const SYS_setitimer: c_long = 38; -pub const SYS_getpid: c_long = 39; -pub const SYS_sendfile: c_long = 40; -pub const SYS_socket: c_long = 41; -pub const SYS_connect: c_long = 42; -pub const SYS_accept: c_long = 43; -pub const SYS_sendto: c_long = 44; -pub const SYS_recvfrom: c_long = 45; -pub const SYS_sendmsg: c_long = 46; -pub const SYS_recvmsg: c_long = 47; -pub const SYS_shutdown: c_long = 48; -pub const SYS_bind: c_long = 49; -pub const SYS_listen: c_long = 50; -pub const SYS_getsockname: c_long = 51; -pub const SYS_getpeername: c_long = 52; -pub const SYS_socketpair: c_long = 53; -pub const SYS_setsockopt: c_long = 54; -pub const SYS_getsockopt: c_long = 55; -pub const SYS_clone: c_long = 56; -pub const SYS_fork: c_long = 57; -pub const SYS_vfork: c_long = 58; -pub const SYS_execve: c_long = 59; -pub const SYS_exit: c_long = 60; -pub const SYS_wait4: c_long = 61; -pub const SYS_kill: c_long = 62; -pub const SYS_uname: c_long = 63; -pub const SYS_semget: c_long = 64; -pub const SYS_semop: c_long = 65; -pub const SYS_semctl: c_long = 66; -pub const SYS_shmdt: c_long = 67; -pub const SYS_msgget: c_long = 68; -pub const SYS_msgsnd: c_long = 69; -pub const SYS_msgrcv: c_long = 70; -pub const SYS_msgctl: c_long = 71; -pub const SYS_fcntl: c_long = 72; -pub const SYS_flock: c_long = 73; -pub const SYS_fsync: c_long = 74; -pub const SYS_fdatasync: c_long = 75; -pub const SYS_truncate: c_long = 76; -pub const SYS_ftruncate: c_long = 77; -pub const SYS_getdents: c_long = 78; -pub const SYS_getcwd: c_long = 79; -pub const SYS_chdir: c_long = 80; -pub const SYS_fchdir: c_long = 81; -pub const SYS_rename: c_long = 82; -pub const SYS_mkdir: c_long = 83; -pub const SYS_rmdir: c_long = 84; -pub const SYS_creat: c_long = 85; -pub const SYS_link: c_long = 86; -pub const SYS_unlink: c_long = 87; -pub const SYS_symlink: c_long = 88; -pub const SYS_readlink: c_long = 89; -pub const SYS_chmod: c_long = 90; -pub const SYS_fchmod: c_long = 91; -pub const SYS_chown: c_long = 92; -pub const SYS_fchown: c_long = 93; -pub const SYS_lchown: c_long = 94; -pub const SYS_umask: c_long = 95; -pub const SYS_gettimeofday: c_long = 96; -pub const SYS_getrlimit: c_long = 97; -pub const SYS_getrusage: c_long = 98; -pub const SYS_sysinfo: c_long = 99; -pub const SYS_times: c_long = 100; -pub const SYS_ptrace: c_long = 101; -pub const SYS_getuid: c_long = 102; -pub const SYS_syslog: c_long = 103; -pub const SYS_getgid: c_long = 104; -pub const SYS_setuid: c_long = 105; -pub const SYS_setgid: c_long = 106; -pub const SYS_geteuid: c_long = 107; -pub const SYS_getegid: c_long = 108; -pub const SYS_setpgid: c_long = 109; -pub const SYS_getppid: c_long = 110; -pub const SYS_getpgrp: c_long = 111; -pub const SYS_setsid: c_long = 112; -pub const SYS_setreuid: c_long = 113; -pub const SYS_setregid: c_long = 114; -pub const SYS_getgroups: c_long = 115; -pub const SYS_setgroups: c_long = 116; -pub const SYS_setresuid: c_long = 117; -pub const SYS_getresuid: c_long = 118; -pub const SYS_setresgid: c_long = 119; -pub const SYS_getresgid: c_long = 120; -pub const SYS_getpgid: c_long = 121; -pub const SYS_setfsuid: c_long = 122; -pub const SYS_setfsgid: c_long = 123; -pub const SYS_getsid: c_long = 124; -pub const SYS_capget: c_long = 125; -pub const SYS_capset: c_long = 126; -pub const SYS_rt_sigpending: c_long = 127; -pub const SYS_rt_sigtimedwait: c_long = 128; -pub const SYS_rt_sigqueueinfo: c_long = 129; -pub const SYS_rt_sigsuspend: c_long = 130; -pub const SYS_sigaltstack: c_long = 131; -pub const SYS_utime: c_long = 132; -pub const SYS_mknod: c_long = 133; -pub const SYS_uselib: c_long = 134; -pub const SYS_personality: c_long = 135; -pub const SYS_ustat: c_long = 136; -pub const SYS_statfs: c_long = 137; -pub const SYS_fstatfs: c_long = 138; -pub const SYS_sysfs: c_long = 139; -pub const SYS_getpriority: c_long = 140; -pub const SYS_setpriority: c_long = 141; -pub const SYS_sched_setparam: c_long = 142; -pub const SYS_sched_getparam: c_long = 143; -pub const SYS_sched_setscheduler: c_long = 144; -pub const SYS_sched_getscheduler: c_long = 145; -pub const SYS_sched_get_priority_max: c_long = 146; -pub const SYS_sched_get_priority_min: c_long = 147; -pub const SYS_sched_rr_get_interval: c_long = 148; -pub const SYS_mlock: c_long = 149; -pub const SYS_munlock: c_long = 150; -pub const SYS_mlockall: c_long = 151; -pub const SYS_munlockall: c_long = 152; -pub const SYS_vhangup: c_long = 153; -pub const SYS_modify_ldt: c_long = 154; -pub const SYS_pivot_root: c_long = 155; -pub const SYS__sysctl: c_long = 156; -pub const SYS_prctl: c_long = 157; -pub const SYS_arch_prctl: c_long = 158; -pub const SYS_adjtimex: c_long = 159; -pub const SYS_setrlimit: c_long = 160; -pub const SYS_chroot: c_long = 161; -pub const SYS_sync: c_long = 162; -pub const SYS_acct: c_long = 163; -pub const SYS_settimeofday: c_long = 164; -pub const SYS_mount: c_long = 165; -pub const SYS_umount2: c_long = 166; -pub const SYS_swapon: c_long = 167; -pub const SYS_swapoff: c_long = 168; -pub const SYS_reboot: c_long = 169; -pub const SYS_sethostname: c_long = 170; -pub const SYS_setdomainname: c_long = 171; -pub const SYS_iopl: c_long = 172; -pub const SYS_ioperm: c_long = 173; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 174; -pub const SYS_init_module: c_long = 175; -pub const SYS_delete_module: c_long = 176; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 177; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 178; -pub const SYS_quotactl: c_long = 179; -pub const SYS_nfsservctl: c_long = 180; -pub const SYS_getpmsg: c_long = 181; -pub const SYS_putpmsg: c_long = 182; -pub const SYS_afs_syscall: c_long = 183; -pub const SYS_tuxcall: c_long = 184; -pub const SYS_security: c_long = 185; -pub const SYS_gettid: c_long = 186; -pub const SYS_readahead: c_long = 187; -pub const SYS_setxattr: c_long = 188; -pub const SYS_lsetxattr: c_long = 189; -pub const SYS_fsetxattr: c_long = 190; -pub const SYS_getxattr: c_long = 191; -pub const SYS_lgetxattr: c_long = 192; -pub const SYS_fgetxattr: c_long = 193; -pub const SYS_listxattr: c_long = 194; -pub const SYS_llistxattr: c_long = 195; -pub const SYS_flistxattr: c_long = 196; -pub const SYS_removexattr: c_long = 197; -pub const SYS_lremovexattr: c_long = 198; -pub const SYS_fremovexattr: c_long = 199; -pub const SYS_tkill: c_long = 200; -pub const SYS_time: c_long = 201; -pub const SYS_futex: c_long = 202; -pub const SYS_sched_setaffinity: c_long = 203; -pub const SYS_sched_getaffinity: c_long = 204; -pub const SYS_set_thread_area: c_long = 205; -pub const SYS_io_setup: c_long = 206; -pub const SYS_io_destroy: c_long = 207; -pub const SYS_io_getevents: c_long = 208; -pub const SYS_io_submit: c_long = 209; -pub const SYS_io_cancel: c_long = 210; -pub const SYS_get_thread_area: c_long = 211; -pub const SYS_lookup_dcookie: c_long = 212; -pub const SYS_epoll_create: c_long = 213; -pub const SYS_epoll_ctl_old: c_long = 214; -pub const SYS_epoll_wait_old: c_long = 215; -pub const SYS_remap_file_pages: c_long = 216; -pub const SYS_getdents64: c_long = 217; -pub const SYS_set_tid_address: c_long = 218; -pub const SYS_restart_syscall: c_long = 219; -pub const SYS_semtimedop: c_long = 220; -pub const SYS_fadvise64: c_long = 221; -pub const SYS_timer_create: c_long = 222; -pub const SYS_timer_settime: c_long = 223; -pub const SYS_timer_gettime: c_long = 224; -pub const SYS_timer_getoverrun: c_long = 225; -pub const SYS_timer_delete: c_long = 226; -pub const SYS_clock_settime: c_long = 227; -pub const SYS_clock_gettime: c_long = 228; -pub const SYS_clock_getres: c_long = 229; -pub const SYS_clock_nanosleep: c_long = 230; -pub const SYS_exit_group: c_long = 231; -pub const SYS_epoll_wait: c_long = 232; -pub const SYS_epoll_ctl: c_long = 233; -pub const SYS_tgkill: c_long = 234; -pub const SYS_utimes: c_long = 235; -pub const SYS_vserver: c_long = 236; -pub const SYS_mbind: c_long = 237; -pub const SYS_set_mempolicy: c_long = 238; -pub const SYS_get_mempolicy: c_long = 239; -pub const SYS_mq_open: c_long = 240; -pub const SYS_mq_unlink: c_long = 241; -pub const SYS_mq_timedsend: c_long = 242; -pub const SYS_mq_timedreceive: c_long = 243; -pub const SYS_mq_notify: c_long = 244; -pub const SYS_mq_getsetattr: c_long = 245; -pub const SYS_kexec_load: c_long = 246; -pub const SYS_waitid: c_long = 247; -pub const SYS_add_key: c_long = 248; -pub const SYS_request_key: c_long = 249; -pub const SYS_keyctl: c_long = 250; -pub const SYS_ioprio_set: c_long = 251; -pub const SYS_ioprio_get: c_long = 252; -pub const SYS_inotify_init: c_long = 253; -pub const SYS_inotify_add_watch: c_long = 254; -pub const SYS_inotify_rm_watch: c_long = 255; -pub const SYS_migrate_pages: c_long = 256; -pub const SYS_openat: c_long = 257; -pub const SYS_mkdirat: c_long = 258; -pub const SYS_mknodat: c_long = 259; -pub const SYS_fchownat: c_long = 260; -pub const SYS_futimesat: c_long = 261; -pub const SYS_newfstatat: c_long = 262; -pub const SYS_unlinkat: c_long = 263; -pub const SYS_renameat: c_long = 264; -pub const SYS_linkat: c_long = 265; -pub const SYS_symlinkat: c_long = 266; -pub const SYS_readlinkat: c_long = 267; -pub const SYS_fchmodat: c_long = 268; -pub const SYS_faccessat: c_long = 269; -pub const SYS_pselect6: c_long = 270; -pub const SYS_ppoll: c_long = 271; -pub const SYS_unshare: c_long = 272; -pub const SYS_set_robust_list: c_long = 273; -pub const SYS_get_robust_list: c_long = 274; -pub const SYS_splice: c_long = 275; -pub const SYS_tee: c_long = 276; -pub const SYS_sync_file_range: c_long = 277; -pub const SYS_vmsplice: c_long = 278; -pub const SYS_move_pages: c_long = 279; -pub const SYS_utimensat: c_long = 280; -pub const SYS_epoll_pwait: c_long = 281; -pub const SYS_signalfd: c_long = 282; -pub const SYS_timerfd_create: c_long = 283; -pub const SYS_eventfd: c_long = 284; -pub const SYS_fallocate: c_long = 285; -pub const SYS_timerfd_settime: c_long = 286; -pub const SYS_timerfd_gettime: c_long = 287; -pub const SYS_accept4: c_long = 288; -pub const SYS_signalfd4: c_long = 289; -pub const SYS_eventfd2: c_long = 290; -pub const SYS_epoll_create1: c_long = 291; -pub const SYS_dup3: c_long = 292; -pub const SYS_pipe2: c_long = 293; -pub const SYS_inotify_init1: c_long = 294; -pub const SYS_preadv: c_long = 295; -pub const SYS_pwritev: c_long = 296; -pub const SYS_rt_tgsigqueueinfo: c_long = 297; -pub const SYS_perf_event_open: c_long = 298; -pub const SYS_recvmmsg: c_long = 299; -pub const SYS_fanotify_init: c_long = 300; -pub const SYS_fanotify_mark: c_long = 301; -pub const SYS_prlimit64: c_long = 302; -pub const SYS_name_to_handle_at: c_long = 303; -pub const SYS_open_by_handle_at: c_long = 304; -pub const SYS_clock_adjtime: c_long = 305; -pub const SYS_syncfs: c_long = 306; -pub const SYS_sendmmsg: c_long = 307; -pub const SYS_setns: c_long = 308; -pub const SYS_getcpu: c_long = 309; -pub const SYS_process_vm_readv: c_long = 310; -pub const SYS_process_vm_writev: c_long = 311; -pub const SYS_kcmp: c_long = 312; -pub const SYS_finit_module: c_long = 313; -pub const SYS_sched_setattr: c_long = 314; -pub const SYS_sched_getattr: c_long = 315; -pub const SYS_renameat2: c_long = 316; -pub const SYS_seccomp: c_long = 317; -pub const SYS_getrandom: c_long = 318; -pub const SYS_memfd_create: c_long = 319; -pub const SYS_kexec_file_load: c_long = 320; -pub const SYS_bpf: c_long = 321; -pub const SYS_execveat: c_long = 322; -pub const SYS_userfaultfd: c_long = 323; -pub const SYS_membarrier: c_long = 324; -pub const SYS_mlock2: c_long = 325; -pub const SYS_copy_file_range: c_long = 326; -pub const SYS_preadv2: c_long = 327; -pub const SYS_pwritev2: c_long = 328; -pub const SYS_pkey_mprotect: c_long = 329; -pub const SYS_pkey_alloc: c_long = 330; -pub const SYS_pkey_free: c_long = 331; -pub const SYS_statx: c_long = 332; -pub const SYS_io_pgetevents: c_long = 333; -pub const SYS_rseq: c_long = 334; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; -pub const SYS_fchmodat2: c_long = 452; -pub const SYS_mseal: c_long = 462; - -// offsets in user_regs_structs, from sys/reg.h -pub const R15: c_int = 0; -pub const R14: c_int = 1; -pub const R13: c_int = 2; -pub const R12: c_int = 3; -pub const RBP: c_int = 4; -pub const RBX: c_int = 5; -pub const R11: c_int = 6; -pub const R10: c_int = 7; -pub const R9: c_int = 8; -pub const R8: c_int = 9; -pub const RAX: c_int = 10; -pub const RCX: c_int = 11; -pub const RDX: c_int = 12; -pub const RSI: c_int = 13; -pub const RDI: c_int = 14; -pub const ORIG_RAX: c_int = 15; -pub const RIP: c_int = 16; -pub const CS: c_int = 17; -pub const EFLAGS: c_int = 18; -pub const RSP: c_int = 19; -pub const SS: c_int = 20; -pub const FS_BASE: c_int = 21; -pub const GS_BASE: c_int = 22; -pub const DS: c_int = 23; -pub const ES: c_int = 24; -pub const FS: c_int = 25; -pub const GS: c_int = 26; - -// offsets in mcontext_t.gregs from bits/signal.h -// GitHub repo: ifduyue/musl/ -// commit: b4b1e10364c8737a632be61582e05a8d3acf5690 -// file: arch/x86_64/bits/signal.h#L9-L56 -pub const REG_R8: c_int = 0; -pub const REG_R9: c_int = 1; -pub const REG_R10: c_int = 2; -pub const REG_R11: c_int = 3; -pub const REG_R12: c_int = 4; -pub const REG_R13: c_int = 5; -pub const REG_R14: c_int = 6; -pub const REG_R15: c_int = 7; -pub const REG_RDI: c_int = 8; -pub const REG_RSI: c_int = 9; -pub const REG_RBP: c_int = 10; -pub const REG_RBX: c_int = 11; -pub const REG_RDX: c_int = 12; -pub const REG_RAX: c_int = 13; -pub const REG_RCX: c_int = 14; -pub const REG_RSP: c_int = 15; -pub const REG_RIP: c_int = 16; -pub const REG_EFL: c_int = 17; -pub const REG_CSGSFS: c_int = 18; -pub const REG_ERR: c_int = 19; -pub const REG_TRAPNO: c_int = 20; -pub const REG_OLDMASK: c_int = 21; -pub const REG_CR2: c_int = 22; - -pub const MADV_SOFT_OFFLINE: c_int = 101; -pub const MAP_32BIT: c_int = 0x0040; -pub const O_APPEND: c_int = 1024; -pub const O_DIRECT: c_int = 0x4000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_LARGEFILE: c_int = 0o0100000; -pub const O_NOFOLLOW: c_int = 0x20000; -pub const O_CREAT: c_int = 64; -pub const O_EXCL: c_int = 128; -pub const O_NOCTTY: c_int = 256; -pub const O_NONBLOCK: c_int = 2048; -pub const O_SYNC: c_int = 1052672; -pub const O_RSYNC: c_int = 1052672; -pub const O_DSYNC: c_int = 4096; -pub const O_ASYNC: c_int = 0x2000; - -pub const PTRACE_SYSEMU: c_int = 31; -pub const PTRACE_SYSEMU_SINGLESTEP: c_int = 32; - -pub const SIGSTKSZ: size_t = 8192; -pub const MINSIGSTKSZ: size_t = 2048; - -pub const ENAMETOOLONG: c_int = 36; -pub const ENOLCK: c_int = 37; -pub const ENOSYS: c_int = 38; -pub const ENOTEMPTY: c_int = 39; -pub const ELOOP: c_int = 40; -pub const ENOMSG: c_int = 42; -pub const EIDRM: c_int = 43; -pub const ECHRNG: c_int = 44; -pub const EL2NSYNC: c_int = 45; -pub const EL3HLT: c_int = 46; -pub const EL3RST: c_int = 47; -pub const ELNRNG: c_int = 48; -pub const EUNATCH: c_int = 49; -pub const ENOCSI: c_int = 50; -pub const EL2HLT: c_int = 51; -pub const EBADE: c_int = 52; -pub const EBADR: c_int = 53; -pub const EXFULL: c_int = 54; -pub const ENOANO: c_int = 55; -pub const EBADRQC: c_int = 56; -pub const EBADSLT: c_int = 57; -pub const EMULTIHOP: c_int = 72; -pub const EBADMSG: c_int = 74; -pub const EOVERFLOW: c_int = 75; -pub const ENOTUNIQ: c_int = 76; -pub const EBADFD: c_int = 77; -pub const EREMCHG: c_int = 78; -pub const ELIBACC: c_int = 79; -pub const ELIBBAD: c_int = 80; -pub const ELIBSCN: c_int = 81; -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; -pub const EILSEQ: c_int = 84; -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; -pub const EUSERS: c_int = 87; -pub const ENOTSOCK: c_int = 88; -pub const EDESTADDRREQ: c_int = 89; -pub const EMSGSIZE: c_int = 90; -pub const EPROTOTYPE: c_int = 91; -pub const ENOPROTOOPT: c_int = 92; -pub const EPROTONOSUPPORT: c_int = 93; -pub const ESOCKTNOSUPPORT: c_int = 94; -pub const EOPNOTSUPP: c_int = 95; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 96; -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; -pub const EADDRNOTAVAIL: c_int = 99; -pub const ENETDOWN: c_int = 100; -pub const ENETUNREACH: c_int = 101; -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EISCONN: c_int = 106; -pub const ENOTCONN: c_int = 107; -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; -pub const ETIMEDOUT: c_int = 110; -pub const ECONNREFUSED: c_int = 111; -pub const EHOSTDOWN: c_int = 112; -pub const EHOSTUNREACH: c_int = 113; -pub const EALREADY: c_int = 114; -pub const EINPROGRESS: c_int = 115; -pub const ESTALE: c_int = 116; -pub const EUCLEAN: c_int = 117; -pub const ENOTNAM: c_int = 118; -pub const ENAVAIL: c_int = 119; -pub const EISNAM: c_int = 120; -pub const EREMOTEIO: c_int = 121; -pub const EDQUOT: c_int = 122; -pub const ENOMEDIUM: c_int = 123; -pub const EMEDIUMTYPE: c_int = 124; -pub const ECANCELED: c_int = 125; -pub const ENOKEY: c_int = 126; -pub const EKEYEXPIRED: c_int = 127; -pub const EKEYREVOKED: c_int = 128; -pub const EKEYREJECTED: c_int = 129; -pub const EOWNERDEAD: c_int = 130; -pub const ENOTRECOVERABLE: c_int = 131; -pub const ERFKILL: c_int = 132; -pub const EHWPOISON: c_int = 133; - -pub const SA_ONSTACK: c_int = 0x08000000; -pub const SA_SIGINFO: c_int = 0x00000004; -pub const SA_NOCLDWAIT: c_int = 0x00000002; - -pub const SIGCHLD: c_int = 17; -pub const SIGBUS: c_int = 7; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGUSR1: c_int = 10; -pub const SIGUSR2: c_int = 12; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGURG: c_int = 23; -pub const SIGIO: c_int = 29; -pub const SIGSYS: c_int = 31; -pub const SIGSTKFLT: c_int = 16; -pub const SIGPOLL: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0x000000; -pub const SIG_UNBLOCK: c_int = 0x01; - -pub const F_GETLK: c_int = 5; -pub const F_GETOWN: c_int = 9; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_SETOWN: c_int = 8; - -pub const VEOF: usize = 4; - -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_GROWSDOWN: c_int = 0x0100; -pub const MAP_DENYWRITE: c_int = 0x0800; -pub const MAP_EXECUTABLE: c_int = 0x01000; -pub const MAP_LOCKED: c_int = 0x02000; -pub const MAP_NORESERVE: c_int = 0x04000; -pub const MAP_POPULATE: c_int = 0x08000; -pub const MAP_NONBLOCK: c_int = 0x010000; -pub const MAP_STACK: c_int = 0x020000; -pub const MAP_HUGETLB: c_int = 0x040000; -pub const MAP_SYNC: c_int = 0x080000; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const TAB1: c_int = 0x00000800; -pub const TAB2: c_int = 0x00001000; -pub const TAB3: c_int = 0x00001800; -pub const CR1: c_int = 0x00000200; -pub const CR2: c_int = 0x00000400; -pub const CR3: c_int = 0x00000600; -pub const FF1: c_int = 0x00008000; -pub const BS1: c_int = 0x00002000; -pub const VT1: c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const CIBAUD: crate::tcflag_t = 0o02003600000; -pub const CBAUDEX: crate::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const NLDLY: crate::tcflag_t = 0o000400; -pub const CRDLY: crate::tcflag_t = 0o003000; -pub const TABDLY: crate::tcflag_t = 0o014000; -pub const BSDLY: crate::tcflag_t = 0o020000; -pub const FFDLY: crate::tcflag_t = 0o100000; -pub const VTDLY: crate::tcflag_t = 0o040000; -pub const XTABS: crate::tcflag_t = 0o014000; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -pub const EDEADLK: c_int = 35; -pub const EDEADLOCK: c_int = EDEADLK; - -pub const EXTPROC: crate::tcflag_t = 0x00010000; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; -pub const FLUSHO: crate::tcflag_t = 0x00001000; diff --git a/vendor/libc/src/unix/linux_like/linux/musl/lfs64.rs b/vendor/libc/src/unix/linux_like/linux/musl/lfs64.rs deleted file mode 100644 index e6506fd3d385de..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/lfs64.rs +++ /dev/null @@ -1,239 +0,0 @@ -use crate::off64_t; -use crate::prelude::*; - -#[inline] -pub unsafe extern "C" fn creat64(path: *const c_char, mode: crate::mode_t) -> c_int { - crate::creat(path, mode) -} - -#[inline] -pub unsafe extern "C" fn fallocate64( - fd: c_int, - mode: c_int, - offset: off64_t, - len: off64_t, -) -> c_int { - crate::fallocate(fd, mode, offset, len) -} - -#[inline] -pub unsafe extern "C" fn fgetpos64(stream: *mut crate::FILE, pos: *mut crate::fpos64_t) -> c_int { - crate::fgetpos(stream, pos as *mut _) -} - -#[inline] -pub unsafe extern "C" fn fopen64(pathname: *const c_char, mode: *const c_char) -> *mut crate::FILE { - crate::fopen(pathname, mode) -} - -#[inline] -pub unsafe extern "C" fn freopen64( - pathname: *const c_char, - mode: *const c_char, - stream: *mut crate::FILE, -) -> *mut crate::FILE { - crate::freopen(pathname, mode, stream) -} - -#[inline] -pub unsafe extern "C" fn fseeko64( - stream: *mut crate::FILE, - offset: off64_t, - whence: c_int, -) -> c_int { - crate::fseeko(stream, offset, whence) -} - -#[inline] -pub unsafe extern "C" fn fsetpos64(stream: *mut crate::FILE, pos: *const crate::fpos64_t) -> c_int { - crate::fsetpos(stream, pos as *mut _) -} - -#[inline] -pub unsafe extern "C" fn fstat64(fildes: c_int, buf: *mut crate::stat64) -> c_int { - crate::fstat(fildes, buf as *mut _) -} - -#[inline] -pub unsafe extern "C" fn fstatat64( - fd: c_int, - path: *const c_char, - buf: *mut crate::stat64, - flag: c_int, -) -> c_int { - crate::fstatat(fd, path, buf as *mut _, flag) -} - -#[inline] -pub unsafe extern "C" fn fstatfs64(fd: c_int, buf: *mut crate::statfs64) -> c_int { - crate::fstatfs(fd, buf as *mut _) -} - -#[inline] -pub unsafe extern "C" fn fstatvfs64(fd: c_int, buf: *mut crate::statvfs64) -> c_int { - crate::fstatvfs(fd, buf as *mut _) -} - -#[inline] -pub unsafe extern "C" fn ftello64(stream: *mut crate::FILE) -> off64_t { - crate::ftello(stream) -} - -#[inline] -pub unsafe extern "C" fn ftruncate64(fd: c_int, length: off64_t) -> c_int { - crate::ftruncate(fd, length) -} - -#[inline] -pub unsafe extern "C" fn getrlimit64(resource: c_int, rlim: *mut crate::rlimit64) -> c_int { - crate::getrlimit(resource, rlim as *mut _) -} - -#[inline] -pub unsafe extern "C" fn lseek64(fd: c_int, offset: off64_t, whence: c_int) -> off64_t { - crate::lseek(fd, offset, whence) -} - -#[inline] -pub unsafe extern "C" fn lstat64(path: *const c_char, buf: *mut crate::stat64) -> c_int { - crate::lstat(path, buf as *mut _) -} - -#[inline] -pub unsafe extern "C" fn mmap64( - addr: *mut c_void, - length: size_t, - prot: c_int, - flags: c_int, - fd: c_int, - offset: off64_t, -) -> *mut c_void { - crate::mmap(addr, length, prot, flags, fd, offset) -} - -// These functions are variadic in the C ABI since the `mode` argument is "optional". Variadic -// `extern "C"` functions are unstable in Rust so we cannot write a shim function for these -// entrypoints. See https://github.com/rust-lang/rust/issues/44930. -// -// These aliases are mostly fine though, neither function takes a LFS64-namespaced type as an -// argument, nor do their names clash with any declared types. -pub use crate::{open as open64, openat as openat64}; - -#[inline] -pub unsafe extern "C" fn posix_fadvise64( - fd: c_int, - offset: off64_t, - len: off64_t, - advice: c_int, -) -> c_int { - crate::posix_fadvise(fd, offset, len, advice) -} - -#[inline] -pub unsafe extern "C" fn posix_fallocate64(fd: c_int, offset: off64_t, len: off64_t) -> c_int { - crate::posix_fallocate(fd, offset, len) -} - -#[inline] -pub unsafe extern "C" fn pread64( - fd: c_int, - buf: *mut c_void, - count: size_t, - offset: off64_t, -) -> ssize_t { - crate::pread(fd, buf, count, offset) -} - -#[inline] -pub unsafe extern "C" fn preadv64( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: off64_t, -) -> ssize_t { - crate::preadv(fd, iov, iovcnt, offset) -} - -#[inline] -pub unsafe extern "C" fn prlimit64( - pid: crate::pid_t, - resource: c_int, - new_limit: *const crate::rlimit64, - old_limit: *mut crate::rlimit64, -) -> c_int { - crate::prlimit(pid, resource, new_limit as *mut _, old_limit as *mut _) -} - -#[inline] -pub unsafe extern "C" fn pwrite64( - fd: c_int, - buf: *const c_void, - count: size_t, - offset: off64_t, -) -> ssize_t { - crate::pwrite(fd, buf, count, offset) -} - -#[inline] -pub unsafe extern "C" fn pwritev64( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: off64_t, -) -> ssize_t { - crate::pwritev(fd, iov, iovcnt, offset) -} - -#[inline] -pub unsafe extern "C" fn readdir64(dirp: *mut crate::DIR) -> *mut crate::dirent64 { - crate::readdir(dirp) as *mut _ -} - -#[inline] -pub unsafe extern "C" fn readdir64_r( - dirp: *mut crate::DIR, - entry: *mut crate::dirent64, - result: *mut *mut crate::dirent64, -) -> c_int { - crate::readdir_r(dirp, entry as *mut _, result as *mut _) -} - -#[inline] -pub unsafe extern "C" fn sendfile64( - out_fd: c_int, - in_fd: c_int, - offset: *mut off64_t, - count: size_t, -) -> ssize_t { - crate::sendfile(out_fd, in_fd, offset, count) -} - -#[inline] -pub unsafe extern "C" fn setrlimit64(resource: c_int, rlim: *const crate::rlimit64) -> c_int { - crate::setrlimit(resource, rlim as *mut _) -} - -#[inline] -pub unsafe extern "C" fn stat64(pathname: *const c_char, statbuf: *mut crate::stat64) -> c_int { - crate::stat(pathname, statbuf as *mut _) -} - -#[inline] -pub unsafe extern "C" fn statfs64(pathname: *const c_char, buf: *mut crate::statfs64) -> c_int { - crate::statfs(pathname, buf as *mut _) -} - -#[inline] -pub unsafe extern "C" fn statvfs64(path: *const c_char, buf: *mut crate::statvfs64) -> c_int { - crate::statvfs(path, buf as *mut _) -} - -#[inline] -pub unsafe extern "C" fn tmpfile64() -> *mut crate::FILE { - crate::tmpfile() -} - -#[inline] -pub unsafe extern "C" fn truncate64(path: *const c_char, length: off64_t) -> c_int { - crate::truncate(path, length) -} diff --git a/vendor/libc/src/unix/linux_like/linux/musl/mod.rs b/vendor/libc/src/unix/linux_like/linux/musl/mod.rs deleted file mode 100644 index 4bc11449145c7a..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/musl/mod.rs +++ /dev/null @@ -1,1006 +0,0 @@ -use crate::off64_t; -use crate::prelude::*; - -pub type pthread_t = *mut c_void; -pub type clock_t = c_long; -#[cfg_attr( - not(feature = "rustc-dep-of-std"), - deprecated( - since = "0.2.80", - note = "This type is changed to 64-bit in musl 1.2.0, \ - we'll follow that change in the future release. \ - See #1848 for more info." - ) -)] -pub type time_t = c_long; -pub type suseconds_t = c_long; -pub type ino_t = u64; -pub type off_t = i64; -pub type blkcnt_t = i64; - -pub type shmatt_t = c_ulong; -pub type msgqnum_t = c_ulong; -pub type msglen_t = c_ulong; -pub type fsblkcnt_t = c_ulonglong; -pub type fsblkcnt64_t = c_ulonglong; -pub type fsfilcnt_t = c_ulonglong; -pub type fsfilcnt64_t = c_ulonglong; -pub type rlim_t = c_ulonglong; - -cfg_if! { - if #[cfg(doc)] { - // Used in `linux::arch` to define ioctl constants. - pub(crate) type Ioctl = c_int; - } else { - #[doc(hidden)] - pub type Ioctl = c_int; - } -} - -impl siginfo_t { - pub unsafe fn si_addr(&self) -> *mut c_void { - #[repr(C)] - struct siginfo_sigfault { - _si_signo: c_int, - _si_errno: c_int, - _si_code: c_int, - si_addr: *mut c_void, - } - (*(self as *const siginfo_t as *const siginfo_sigfault)).si_addr - } - - pub unsafe fn si_value(&self) -> crate::sigval { - #[repr(C)] - struct siginfo_si_value { - _si_signo: c_int, - _si_errno: c_int, - _si_code: c_int, - _si_timerid: c_int, - _si_overrun: c_int, - si_value: crate::sigval, - } - (*(self as *const siginfo_t as *const siginfo_si_value)).si_value - } -} - -// Internal, for casts to access union fields -#[repr(C)] -struct sifields_sigchld { - si_pid: crate::pid_t, - si_uid: crate::uid_t, - si_status: c_int, - si_utime: c_long, - si_stime: c_long, -} -impl Copy for sifields_sigchld {} -impl Clone for sifields_sigchld { - fn clone(&self) -> sifields_sigchld { - *self - } -} - -// Internal, for casts to access union fields -#[repr(C)] -union sifields { - _align_pointer: *mut c_void, - sigchld: sifields_sigchld, -} - -// Internal, for casts to access union fields. Note that some variants -// of sifields start with a pointer, which makes the alignment of -// sifields vary on 32-bit and 64-bit architectures. -#[repr(C)] -struct siginfo_f { - _siginfo_base: [c_int; 3], - sifields: sifields, -} - -impl siginfo_t { - unsafe fn sifields(&self) -> &sifields { - &(*(self as *const siginfo_t as *const siginfo_f)).sifields - } - - pub unsafe fn si_pid(&self) -> crate::pid_t { - self.sifields().sigchld.si_pid - } - - pub unsafe fn si_uid(&self) -> crate::uid_t { - self.sifields().sigchld.si_uid - } - - pub unsafe fn si_status(&self) -> c_int { - self.sifields().sigchld.si_status - } - - pub unsafe fn si_utime(&self) -> c_long { - self.sifields().sigchld.si_utime - } - - pub unsafe fn si_stime(&self) -> c_long { - self.sifields().sigchld.si_stime - } -} - -s! { - pub struct aiocb { - pub aio_fildes: c_int, - pub aio_lio_opcode: c_int, - pub aio_reqprio: c_int, - pub aio_buf: *mut c_void, - pub aio_nbytes: size_t, - pub aio_sigevent: crate::sigevent, - __td: *mut c_void, - __lock: [c_int; 2], - __err: c_int, - __ret: ssize_t, - pub aio_offset: off_t, - __next: *mut c_void, - __prev: *mut c_void, - __dummy4: [c_char; 32 - 2 * size_of::<*const ()>()], - } - - #[repr(align(8))] - pub struct fanotify_event_metadata { - pub event_len: c_uint, - pub vers: c_uchar, - pub reserved: c_uchar, - pub metadata_len: c_ushort, - pub mask: c_ulonglong, - pub fd: c_int, - pub pid: c_int, - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - pub sa_restorer: Option, - } - - // `mips*` targets swap the `s_errno` and `s_code` fields otherwise this struct is - // target-agnostic (see https://www.openwall.com/lists/musl/2016/01/27/1/2) - // - // FIXME(union): C implementation uses unions - pub struct siginfo_t { - pub si_signo: c_int, - #[cfg(not(any(target_arch = "mips", target_arch = "mips64")))] - pub si_errno: c_int, - pub si_code: c_int, - #[cfg(any(target_arch = "mips", target_arch = "mips64"))] - pub si_errno: c_int, - #[doc(hidden)] - #[deprecated( - since = "0.2.54", - note = "Please leave a comment on https://github.com/rust-lang/libc/pull/1316 \ - if you're using this field" - )] - pub _pad: [c_int; 29], - _align: [usize; 0], - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - #[cfg(target_endian = "little")] - pub f_fsid: c_ulong, - #[cfg(target_pointer_width = "32")] - __pad: c_int, - #[cfg(target_endian = "big")] - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_reserved: [c_int; 6], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt64_t, - pub f_bfree: crate::fsblkcnt64_t, - pub f_bavail: crate::fsblkcnt64_t, - pub f_files: crate::fsfilcnt64_t, - pub f_ffree: crate::fsfilcnt64_t, - pub f_favail: crate::fsfilcnt64_t, - #[cfg(target_endian = "little")] - pub f_fsid: c_ulong, - #[cfg(target_pointer_width = "32")] - __pad: c_int, - #[cfg(target_endian = "big")] - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_reserved: [c_int; 6], - } - - // PowerPC implementations are special, see the subfolders - #[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))] - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; crate::NCCS], - pub __c_ispeed: crate::speed_t, - pub __c_ospeed: crate::speed_t, - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct flock64 { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off64_t, - pub l_len: off64_t, - pub l_pid: crate::pid_t, - } - - pub struct regex_t { - __re_nsub: size_t, - __opaque: *mut c_void, - __padding: [*mut c_void; 4usize], - __nsub2: size_t, - __padding2: c_char, - } - - pub struct rtentry { - pub rt_pad1: c_ulong, - pub rt_dst: crate::sockaddr, - pub rt_gateway: crate::sockaddr, - pub rt_genmask: crate::sockaddr, - pub rt_flags: c_ushort, - pub rt_pad2: c_short, - pub rt_pad3: c_ulong, - pub rt_tos: c_uchar, - pub rt_class: c_uchar, - #[cfg(target_pointer_width = "64")] - pub rt_pad4: [c_short; 3usize], - #[cfg(not(target_pointer_width = "64"))] - pub rt_pad4: [c_short; 1usize], - pub rt_metric: c_short, - pub rt_dev: *mut c_char, - pub rt_mtu: c_ulong, - pub rt_window: c_ulong, - pub rt_irtt: c_ushort, - } - - pub struct __exit_status { - pub e_termination: c_short, - pub e_exit: c_short, - } - - pub struct Elf64_Chdr { - pub ch_type: crate::Elf64_Word, - pub ch_reserved: crate::Elf64_Word, - pub ch_size: crate::Elf64_Xword, - pub ch_addralign: crate::Elf64_Xword, - } - - pub struct Elf32_Chdr { - pub ch_type: crate::Elf32_Word, - pub ch_size: crate::Elf32_Word, - pub ch_addralign: crate::Elf32_Word, - } - - pub struct timex { - pub modes: c_uint, - pub offset: c_long, - pub freq: c_long, - pub maxerror: c_long, - pub esterror: c_long, - pub status: c_int, - pub constant: c_long, - pub precision: c_long, - pub tolerance: c_long, - pub time: crate::timeval, - pub tick: c_long, - pub ppsfreq: c_long, - pub jitter: c_long, - pub shift: c_int, - pub stabil: c_long, - pub jitcnt: c_long, - pub calcnt: c_long, - pub errcnt: c_long, - pub stbcnt: c_long, - pub tai: c_int, - pub __padding: [c_int; 11], - } - - pub struct ntptimeval { - pub time: crate::timeval, - pub maxerror: c_long, - pub esterror: c_long, - } - - // netinet/tcp.h - - pub struct tcp_info { - pub tcpi_state: u8, - pub tcpi_ca_state: u8, - pub tcpi_retransmits: u8, - pub tcpi_probes: u8, - pub tcpi_backoff: u8, - pub tcpi_options: u8, - /// This contains the bitfields `tcpi_snd_wscale` and `tcpi_rcv_wscale`. - /// Each is 4 bits. - pub tcpi_snd_rcv_wscale: u8, - /// This contains the bitfields `tcpi_delivery_rate_app_limited` (1 bit) and - /// `tcpi_fastopen_client_fail` (2 bits). - pub tcpi_delivery_fastopen_bitfields: u8, - pub tcpi_rto: u32, - pub tcpi_ato: u32, - pub tcpi_snd_mss: u32, - pub tcpi_rcv_mss: u32, - pub tcpi_unacked: u32, - pub tcpi_sacked: u32, - pub tcpi_lost: u32, - pub tcpi_retrans: u32, - pub tcpi_fackets: u32, - pub tcpi_last_data_sent: u32, - pub tcpi_last_ack_sent: u32, - pub tcpi_last_data_recv: u32, - pub tcpi_last_ack_recv: u32, - pub tcpi_pmtu: u32, - pub tcpi_rcv_ssthresh: u32, - pub tcpi_rtt: u32, - pub tcpi_rttvar: u32, - pub tcpi_snd_ssthresh: u32, - pub tcpi_snd_cwnd: u32, - pub tcpi_advmss: u32, - pub tcpi_reordering: u32, - pub tcpi_rcv_rtt: u32, - pub tcpi_rcv_space: u32, - pub tcpi_total_retrans: u32, - pub tcpi_pacing_rate: u64, - pub tcpi_max_pacing_rate: u64, - pub tcpi_bytes_acked: u64, - pub tcpi_bytes_received: u64, - pub tcpi_segs_out: u32, - pub tcpi_segs_in: u32, - pub tcpi_notsent_bytes: u32, - pub tcpi_min_rtt: u32, - pub tcpi_data_segs_in: u32, - pub tcpi_data_segs_out: u32, - pub tcpi_delivery_rate: u64, - pub tcpi_busy_time: u64, - pub tcpi_rwnd_limited: u64, - pub tcpi_sndbuf_limited: u64, - pub tcpi_delivered: u32, - pub tcpi_delivered_ce: u32, - pub tcpi_bytes_sent: u64, - pub tcpi_bytes_retrans: u64, - pub tcpi_dsack_dups: u32, - pub tcpi_reord_seen: u32, - pub tcpi_rcv_ooopack: u32, - pub tcpi_snd_wnd: u32, - } - - // MIPS/s390x implementation is special (see arch folders) - #[cfg(not(any(target_arch = "mips", target_arch = "mips64", target_arch = "s390x")))] - pub struct statfs { - pub f_type: c_ulong, - pub f_bsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_ulong, - pub f_frsize: c_ulong, - pub f_flags: c_ulong, - pub f_spare: [c_ulong; 4], - } - - // MIPS/s390x implementation is special (see arch folders) - #[cfg(not(any(target_arch = "mips", target_arch = "mips64", target_arch = "s390x")))] - pub struct statfs64 { - pub f_type: c_ulong, - pub f_bsize: c_ulong, - pub f_blocks: crate::fsblkcnt64_t, - pub f_bfree: crate::fsblkcnt64_t, - pub f_bavail: crate::fsblkcnt64_t, - pub f_files: crate::fsfilcnt64_t, - pub f_ffree: crate::fsfilcnt64_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_ulong, - pub f_frsize: c_ulong, - pub f_flags: c_ulong, - pub f_spare: [c_ulong; 4], - } -} - -s_no_extra_traits! { - pub struct sysinfo { - pub uptime: c_ulong, - pub loads: [c_ulong; 3], - pub totalram: c_ulong, - pub freeram: c_ulong, - pub sharedram: c_ulong, - pub bufferram: c_ulong, - pub totalswap: c_ulong, - pub freeswap: c_ulong, - pub procs: c_ushort, - pub pad: c_ushort, - pub totalhigh: c_ulong, - pub freehigh: c_ulong, - pub mem_unit: c_uint, - pub __reserved: [c_char; 256], - } - - pub struct utmpx { - pub ut_type: c_short, - __ut_pad1: c_short, - pub ut_pid: crate::pid_t, - pub ut_line: [c_char; 32], - pub ut_id: [c_char; 4], - pub ut_user: [c_char; 32], - pub ut_host: [c_char; 256], - pub ut_exit: __exit_status, - - #[cfg(not(musl_v1_2_3))] - #[deprecated( - since = "0.2.173", - note = "The ABI of this field has changed from c_long to c_int with padding, \ - we'll follow that change in the future release. See #4443 for more info." - )] - pub ut_session: c_long, - - #[cfg(musl_v1_2_3)] - #[cfg(not(target_endian = "little"))] - __ut_pad2: c_int, - - #[cfg(musl_v1_2_3)] - pub ut_session: c_int, - - #[cfg(musl_v1_2_3)] - #[cfg(target_endian = "little")] - __ut_pad2: c_int, - - pub ut_tv: crate::timeval, - pub ut_addr_v6: [c_uint; 4], - __unused: [c_char; 20], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for sysinfo { - fn eq(&self, other: &sysinfo) -> bool { - self.uptime == other.uptime - && self.loads == other.loads - && self.totalram == other.totalram - && self.freeram == other.freeram - && self.sharedram == other.sharedram - && self.bufferram == other.bufferram - && self.totalswap == other.totalswap - && self.freeswap == other.freeswap - && self.procs == other.procs - && self.pad == other.pad - && self.totalhigh == other.totalhigh - && self.freehigh == other.freehigh - && self.mem_unit == other.mem_unit - && self - .__reserved - .iter() - .zip(other.__reserved.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for sysinfo {} - - impl hash::Hash for sysinfo { - fn hash(&self, state: &mut H) { - self.uptime.hash(state); - self.loads.hash(state); - self.totalram.hash(state); - self.freeram.hash(state); - self.sharedram.hash(state); - self.bufferram.hash(state); - self.totalswap.hash(state); - self.freeswap.hash(state); - self.procs.hash(state); - self.pad.hash(state); - self.totalhigh.hash(state); - self.freehigh.hash(state); - self.mem_unit.hash(state); - self.__reserved.hash(state); - } - } - - impl PartialEq for utmpx { - #[allow(deprecated)] - fn eq(&self, other: &utmpx) -> bool { - self.ut_type == other.ut_type - //&& self.__ut_pad1 == other.__ut_pad1 - && self.ut_pid == other.ut_pid - && self.ut_line == other.ut_line - && self.ut_id == other.ut_id - && self.ut_user == other.ut_user - && self - .ut_host - .iter() - .zip(other.ut_host.iter()) - .all(|(a,b)| a == b) - && self.ut_exit == other.ut_exit - && self.ut_session == other.ut_session - //&& self.__ut_pad2 == other.__ut_pad2 - && self.ut_tv == other.ut_tv - && self.ut_addr_v6 == other.ut_addr_v6 - && self.__unused == other.__unused - } - } - - impl Eq for utmpx {} - - impl hash::Hash for utmpx { - #[allow(deprecated)] - fn hash(&self, state: &mut H) { - self.ut_type.hash(state); - //self.__ut_pad1.hash(state); - self.ut_pid.hash(state); - self.ut_line.hash(state); - self.ut_id.hash(state); - self.ut_user.hash(state); - self.ut_host.hash(state); - self.ut_exit.hash(state); - self.ut_session.hash(state); - //self.__ut_pad2.hash(state); - self.ut_tv.hash(state); - self.ut_addr_v6.hash(state); - self.__unused.hash(state); - } - } - } -} - -// include/sys/mman.h -/* - * Huge page size encoding when MAP_HUGETLB is specified, and a huge page - * size other than the default is desired. See hugetlb_encode.h. - * All known huge page size encodings are provided here. It is the - * responsibility of the application to know which sizes are supported on - * the running system. See mmap(2) man page for details. - */ -pub const MAP_HUGE_SHIFT: c_int = 26; -pub const MAP_HUGE_MASK: c_int = 0x3f; - -pub const MAP_HUGE_64KB: c_int = 16 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_512KB: c_int = 19 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_1MB: c_int = 20 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_2MB: c_int = 21 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_8MB: c_int = 23 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_16MB: c_int = 24 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_32MB: c_int = 25 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_256MB: c_int = 28 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_512MB: c_int = 29 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_1GB: c_int = 30 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_2GB: c_int = 31 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_16GB: c_int = 34 << MAP_HUGE_SHIFT; - -pub const MS_RMT_MASK: c_ulong = 0x02800051; - -// include/utmpx.h -pub const EMPTY: c_short = 0; -pub const RUN_LVL: c_short = 1; -pub const BOOT_TIME: c_short = 2; -pub const NEW_TIME: c_short = 3; -pub const OLD_TIME: c_short = 4; -pub const INIT_PROCESS: c_short = 5; -pub const LOGIN_PROCESS: c_short = 6; -pub const USER_PROCESS: c_short = 7; -pub const DEAD_PROCESS: c_short = 8; -pub const ACCOUNTING: c_short = 9; - -pub const SFD_CLOEXEC: c_int = 0x080000; - -#[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))] -pub const NCCS: usize = 32; -#[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))] -pub const NCCS: usize = 19; - -pub const O_TRUNC: c_int = 512; -pub const O_NOATIME: c_int = 0o1000000; -pub const O_CLOEXEC: c_int = 0x80000; -pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; - -pub const EBFONT: c_int = 59; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENONET: c_int = 64; -pub const ENOPKG: c_int = 65; -pub const EREMOTE: c_int = 66; -pub const ENOLINK: c_int = 67; -pub const EADV: c_int = 68; -pub const ESRMNT: c_int = 69; -pub const ECOMM: c_int = 70; -pub const EPROTO: c_int = 71; -pub const EDOTDOT: c_int = 73; - -pub const F_OFD_GETLK: c_int = 36; -pub const F_OFD_SETLK: c_int = 37; -pub const F_OFD_SETLKW: c_int = 38; - -pub const F_RDLCK: c_int = 0; -pub const F_WRLCK: c_int = 1; -pub const F_UNLCK: c_int = 2; - -pub const SA_NODEFER: c_int = 0x40000000; -pub const SA_RESETHAND: c_int = 0x80000000; -pub const SA_RESTART: c_int = 0x10000000; -pub const SA_NOCLDSTOP: c_int = 0x00000001; - -pub const EPOLL_CLOEXEC: c_int = 0x80000; - -pub const EFD_CLOEXEC: c_int = 0x80000; - -pub const BUFSIZ: c_uint = 1024; -pub const TMP_MAX: c_uint = 10000; -pub const FOPEN_MAX: c_uint = 1000; -pub const FILENAME_MAX: c_uint = 4096; -pub const O_PATH: c_int = 0o10000000; -pub const O_EXEC: c_int = 0o10000000; -pub const O_SEARCH: c_int = 0o10000000; -pub const O_ACCMODE: c_int = 0o10000003; -pub const O_NDELAY: c_int = O_NONBLOCK; -pub const NI_MAXHOST: crate::socklen_t = 255; -pub const PTHREAD_STACK_MIN: size_t = 2048; - -pub const POSIX_MADV_DONTNEED: c_int = 4; - -pub const MAP_ANONYMOUS: c_int = MAP_ANON; - -pub const SOCK_SEQPACKET: c_int = 5; -pub const SOCK_DCCP: c_int = 6; -pub const SOCK_NONBLOCK: c_int = O_NONBLOCK; -#[deprecated(since = "0.2.70", note = "AF_PACKET must be used instead")] -pub const SOCK_PACKET: c_int = 10; - -pub const SOMAXCONN: c_int = 128; - -#[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] -pub const SIGUNUSED: c_int = crate::SIGSYS; - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; - -// FIXME(musl): Value is 1024 for all architectures since 1.2.4 -#[cfg(not(target_arch = "loongarch64"))] -pub const CPU_SETSIZE: c_int = 128; -#[cfg(target_arch = "loongarch64")] -pub const CPU_SETSIZE: c_int = 1024; - -pub const PTRACE_TRACEME: c_int = 0; -pub const PTRACE_PEEKTEXT: c_int = 1; -pub const PTRACE_PEEKDATA: c_int = 2; -pub const PTRACE_PEEKUSER: c_int = 3; -pub const PTRACE_POKETEXT: c_int = 4; -pub const PTRACE_POKEDATA: c_int = 5; -pub const PTRACE_POKEUSER: c_int = 6; -pub const PTRACE_CONT: c_int = 7; -pub const PTRACE_KILL: c_int = 8; -pub const PTRACE_SINGLESTEP: c_int = 9; -pub const PTRACE_GETREGS: c_int = 12; -pub const PTRACE_SETREGS: c_int = 13; -pub const PTRACE_GETFPREGS: c_int = 14; -pub const PTRACE_SETFPREGS: c_int = 15; -pub const PTRACE_ATTACH: c_int = 16; -pub const PTRACE_DETACH: c_int = 17; -pub const PTRACE_GETFPXREGS: c_int = 18; -pub const PTRACE_SETFPXREGS: c_int = 19; -pub const PTRACE_SYSCALL: c_int = 24; -pub const PTRACE_SETOPTIONS: c_int = 0x4200; -pub const PTRACE_GETEVENTMSG: c_int = 0x4201; -pub const PTRACE_GETSIGINFO: c_int = 0x4202; -pub const PTRACE_SETSIGINFO: c_int = 0x4203; -pub const PTRACE_GETREGSET: c_int = 0x4204; -pub const PTRACE_SETREGSET: c_int = 0x4205; -pub const PTRACE_SEIZE: c_int = 0x4206; -pub const PTRACE_INTERRUPT: c_int = 0x4207; -pub const PTRACE_LISTEN: c_int = 0x4208; -pub const PTRACE_PEEKSIGINFO: c_int = 0x4209; -pub const PTRACE_GETSIGMASK: c_uint = 0x420a; -pub const PTRACE_SETSIGMASK: c_uint = 0x420b; - -pub const AF_IB: c_int = 27; -pub const AF_MPLS: c_int = 28; -pub const AF_NFC: c_int = 39; -pub const AF_VSOCK: c_int = 40; -pub const AF_XDP: c_int = 44; -pub const PF_IB: c_int = AF_IB; -pub const PF_MPLS: c_int = AF_MPLS; -pub const PF_NFC: c_int = AF_NFC; -pub const PF_VSOCK: c_int = AF_VSOCK; -pub const PF_XDP: c_int = AF_XDP; - -pub const EFD_NONBLOCK: c_int = crate::O_NONBLOCK; - -pub const SFD_NONBLOCK: c_int = crate::O_NONBLOCK; - -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_NOLOAD: c_int = 0x4; - -pub const CLOCK_SGI_CYCLE: crate::clockid_t = 10; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const EXTA: crate::speed_t = B19200; -pub const EXTB: crate::speed_t = B38400; - -pub const REG_OK: c_int = 0; - -pub const PRIO_PROCESS: c_int = 0; -pub const PRIO_PGRP: c_int = 1; -pub const PRIO_USER: c_int = 2; - -pub const ADJ_OFFSET: c_uint = 0x0001; -pub const ADJ_FREQUENCY: c_uint = 0x0002; -pub const ADJ_MAXERROR: c_uint = 0x0004; -pub const ADJ_ESTERROR: c_uint = 0x0008; -pub const ADJ_STATUS: c_uint = 0x0010; -pub const ADJ_TIMECONST: c_uint = 0x0020; -pub const ADJ_TAI: c_uint = 0x0080; -pub const ADJ_SETOFFSET: c_uint = 0x0100; -pub const ADJ_MICRO: c_uint = 0x1000; -pub const ADJ_NANO: c_uint = 0x2000; -pub const ADJ_TICK: c_uint = 0x4000; -pub const ADJ_OFFSET_SINGLESHOT: c_uint = 0x8001; -pub const ADJ_OFFSET_SS_READ: c_uint = 0xa001; -pub const MOD_OFFSET: c_uint = ADJ_OFFSET; -pub const MOD_FREQUENCY: c_uint = ADJ_FREQUENCY; -pub const MOD_MAXERROR: c_uint = ADJ_MAXERROR; -pub const MOD_ESTERROR: c_uint = ADJ_ESTERROR; -pub const MOD_STATUS: c_uint = ADJ_STATUS; -pub const MOD_TIMECONST: c_uint = ADJ_TIMECONST; -pub const MOD_CLKB: c_uint = ADJ_TICK; -pub const MOD_CLKA: c_uint = ADJ_OFFSET_SINGLESHOT; -pub const MOD_TAI: c_uint = ADJ_TAI; -pub const MOD_MICRO: c_uint = ADJ_MICRO; -pub const MOD_NANO: c_uint = ADJ_NANO; -pub const STA_PLL: c_int = 0x0001; -pub const STA_PPSFREQ: c_int = 0x0002; -pub const STA_PPSTIME: c_int = 0x0004; -pub const STA_FLL: c_int = 0x0008; -pub const STA_INS: c_int = 0x0010; -pub const STA_DEL: c_int = 0x0020; -pub const STA_UNSYNC: c_int = 0x0040; -pub const STA_FREQHOLD: c_int = 0x0080; -pub const STA_PPSSIGNAL: c_int = 0x0100; -pub const STA_PPSJITTER: c_int = 0x0200; -pub const STA_PPSWANDER: c_int = 0x0400; -pub const STA_PPSERROR: c_int = 0x0800; -pub const STA_CLOCKERR: c_int = 0x1000; -pub const STA_NANO: c_int = 0x2000; -pub const STA_MODE: c_int = 0x4000; -pub const STA_CLK: c_int = 0x8000; -pub const STA_RONLY: c_int = STA_PPSSIGNAL - | STA_PPSJITTER - | STA_PPSWANDER - | STA_PPSERROR - | STA_CLOCKERR - | STA_NANO - | STA_MODE - | STA_CLK; - -pub const TIME_OK: c_int = 0; -pub const TIME_INS: c_int = 1; -pub const TIME_DEL: c_int = 2; -pub const TIME_OOP: c_int = 3; -pub const TIME_WAIT: c_int = 4; -pub const TIME_ERROR: c_int = 5; -pub const TIME_BAD: c_int = TIME_ERROR; -pub const MAXTC: c_long = 6; - -pub const _CS_V6_ENV: c_int = 1148; -pub const _CS_V7_ENV: c_int = 1149; - -pub const CLONE_NEWTIME: c_int = 0x80; - -pub const UT_HOSTSIZE: usize = 256; -pub const UT_LINESIZE: usize = 32; -pub const UT_NAMESIZE: usize = 32; - -cfg_if! { - if #[cfg(target_arch = "s390x")] { - pub const POSIX_FADV_DONTNEED: c_int = 6; - pub const POSIX_FADV_NOREUSE: c_int = 7; - } else { - pub const POSIX_FADV_DONTNEED: c_int = 4; - pub const POSIX_FADV_NOREUSE: c_int = 5; - } -} - -extern "C" { - pub fn sendmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: c_uint, - flags: c_uint, - ) -> c_int; - pub fn recvmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: c_uint, - flags: c_uint, - timeout: *mut crate::timespec, - ) -> c_int; - - pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; - pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; - pub fn prlimit( - pid: crate::pid_t, - resource: c_int, - new_limit: *const crate::rlimit, - old_limit: *mut crate::rlimit, - ) -> c_int; - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; - pub fn ptrace(request: c_int, ...) -> c_long; - pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; - pub fn setpriority(which: c_int, who: crate::id_t, prio: c_int) -> c_int; - // Musl targets need the `mask` argument of `fanotify_mark` be specified - // `c_ulonglong` instead of `u64` or there will be a type mismatch between - // `long long unsigned int` and the expected `uint64_t`. - pub fn fanotify_mark( - fd: c_int, - flags: c_uint, - mask: c_ulonglong, - dirfd: c_int, - path: *const c_char, - ) -> c_int; - pub fn preadv2( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: off_t, - flags: c_int, - ) -> ssize_t; - pub fn pwritev2( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: off_t, - flags: c_int, - ) -> ssize_t; - pub fn getauxval(type_: c_ulong) -> c_ulong; - - // Added in `musl` 1.1.20 - pub fn explicit_bzero(s: *mut c_void, len: size_t); - // Added in `musl` 1.2.2 - pub fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void; - - pub fn adjtimex(buf: *mut crate::timex) -> c_int; - pub fn clock_adjtime(clk_id: crate::clockid_t, buf: *mut crate::timex) -> c_int; - - pub fn ctermid(s: *mut c_char) -> *mut c_char; - - pub fn memfd_create(name: *const c_char, flags: c_uint) -> c_int; - pub fn mlock2(addr: *const c_void, len: size_t, flags: c_uint) -> c_int; - pub fn malloc_usable_size(ptr: *mut c_void) -> size_t; - - pub fn euidaccess(pathname: *const c_char, mode: c_int) -> c_int; - pub fn eaccess(pathname: *const c_char, mode: c_int) -> c_int; - - pub fn asctime_r(tm: *const crate::tm, buf: *mut c_char) -> *mut c_char; - - pub fn dirname(path: *mut c_char) -> *mut c_char; - pub fn basename(path: *mut c_char) -> *mut c_char; - - // Added in `musl` 1.1.20 - pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; - - // Added in `musl` 1.1.24 - pub fn posix_spawn_file_actions_addchdir_np( - actions: *mut crate::posix_spawn_file_actions_t, - path: *const c_char, - ) -> c_int; - // Added in `musl` 1.1.24 - pub fn posix_spawn_file_actions_addfchdir_np( - actions: *mut crate::posix_spawn_file_actions_t, - fd: c_int, - ) -> c_int; - - #[deprecated( - since = "0.2.172", - note = "musl provides `utmp` as stubs and an alternative should be preferred; see https://wiki.musl-libc.org/faq.html" - )] - pub fn getutxent() -> *mut utmpx; - #[deprecated( - since = "0.2.172", - note = "musl provides `utmp` as stubs and an alternative should be preferred; see https://wiki.musl-libc.org/faq.html" - )] - pub fn getutxid(ut: *const utmpx) -> *mut utmpx; - #[deprecated( - since = "0.2.172", - note = "musl provides `utmp` as stubs and an alternative should be preferred; see https://wiki.musl-libc.org/faq.html" - )] - pub fn getutxline(ut: *const utmpx) -> *mut utmpx; - #[deprecated( - since = "0.2.172", - note = "musl provides `utmp` as stubs and an alternative should be preferred; see https://wiki.musl-libc.org/faq.html" - )] - pub fn pututxline(ut: *const utmpx) -> *mut utmpx; - #[deprecated( - since = "0.2.172", - note = "musl provides `utmp` as stubs and an alternative should be preferred; see https://wiki.musl-libc.org/faq.html" - )] - pub fn setutxent(); - #[deprecated( - since = "0.2.172", - note = "musl provides `utmp` as stubs and an alternative should be preferred; see https://wiki.musl-libc.org/faq.html" - )] - pub fn endutxent(); - #[deprecated( - since = "0.2.172", - note = "musl provides `utmp` as stubs and an alternative should be preferred; see https://wiki.musl-libc.org/faq.html" - )] - pub fn utmpxname(file: *const c_char) -> c_int; -} - -// Alias to 64 to mimic glibc's LFS64 support -mod lfs64; -pub use self::lfs64::*; - -cfg_if! { - if #[cfg(any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "mips64", - target_arch = "powerpc64", - target_arch = "s390x", - target_arch = "riscv64", - target_arch = "loongarch64", - // musl-linux ABI for wasm32 follows b64 convention - target_arch = "wasm32", - ))] { - mod b64; - pub use self::b64::*; - } else if #[cfg(any( - target_arch = "x86", - target_arch = "mips", - target_arch = "powerpc", - target_arch = "hexagon", - target_arch = "riscv32", - target_arch = "arm" - ))] { - mod b32; - pub use self::b32::*; - } else { - } -} diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/arm/mod.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/arm/mod.rs deleted file mode 100644 index c54d77b194c48f..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/uclibc/arm/mod.rs +++ /dev/null @@ -1,925 +0,0 @@ -use crate::off64_t; -use crate::prelude::*; - -pub type wchar_t = c_uint; -pub type time_t = c_long; - -pub type clock_t = c_long; -pub type fsblkcnt_t = c_ulong; -pub type fsfilcnt_t = c_ulong; -pub type ino_t = c_ulong; -pub type off_t = c_long; -pub type pthread_t = c_ulong; -pub type suseconds_t = c_long; - -pub type nlink_t = c_uint; -pub type blksize_t = c_long; -pub type blkcnt_t = c_long; - -pub type fsblkcnt64_t = u64; -pub type fsfilcnt64_t = u64; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; - -s! { - pub struct cmsghdr { - pub cmsg_len: size_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: c_int, - pub msg_control: *mut c_void, - pub msg_controllen: crate::socklen_t, - pub msg_flags: c_int, - } - - pub struct pthread_attr_t { - __size: [c_long; 9], - } - - pub struct stat { - pub st_dev: c_ulonglong, - __pad1: c_ushort, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: c_ulonglong, - __pad2: c_ushort, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - __unused4: c_ulong, - __unused5: c_ulong, - } - - pub struct stat64 { - pub st_dev: c_ulonglong, - pub __pad1: c_uint, - pub __st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: c_ulonglong, - pub __pad2: c_uint, - pub st_size: off64_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_ino: crate::ino64_t, - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - } - - pub struct sysinfo { - pub uptime: c_long, - pub loads: [c_ulong; 3], - pub totalram: c_ulong, - pub freeram: c_ulong, - pub sharedram: c_ulong, - pub bufferram: c_ulong, - pub totalswap: c_ulong, - pub freeswap: c_ulong, - pub procs: c_ushort, - pub pad: c_ushort, - pub totalhigh: c_ulong, - pub freehigh: c_ulong, - pub mem_unit: c_uint, - pub _f: [c_char; 8], - } - - pub struct statfs { - pub f_type: c_int, - pub f_bsize: c_int, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - - pub f_fsid: crate::fsid_t, - pub f_namelen: c_int, - pub f_frsize: c_int, - pub f_flags: c_int, - pub f_spare: [c_int; 4], - } - - pub struct statfs64 { - pub f_type: c_int, - pub f_bsize: c_int, - pub f_blocks: crate::fsblkcnt64_t, - pub f_bfree: crate::fsblkcnt64_t, - pub f_bavail: crate::fsblkcnt64_t, - pub f_files: crate::fsfilcnt64_t, - pub f_ffree: crate::fsfilcnt64_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_int, - pub f_frsize: c_int, - pub f_flags: c_int, - pub f_spare: [c_int; 4], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - __f_unused: c_int, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct sigset_t { - __val: [c_ulong; 2], - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_flags: c_ulong, - pub sa_restorer: Option, - pub sa_mask: sigset_t, - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; crate::NCCS], - pub c_ispeed: crate::speed_t, - pub c_ospeed: crate::speed_t, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - pub _pad: [c_int; 29], - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_ushort, - __pad1: c_ushort, - pub __seq: c_ushort, - __pad2: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - __unused1: c_ulong, - pub msg_rtime: crate::time_t, - __unused2: c_ulong, - pub msg_ctime: crate::time_t, - __unused3: c_ulong, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __unused4: c_ulong, - __unused5: c_ulong, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - __unused1: c_ulong, - pub shm_dtime: crate::time_t, - __unused2: c_ulong, - pub shm_ctime: crate::time_t, - __unused3: c_ulong, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused4: c_ulong, - __unused5: c_ulong, - } - - // FIXME(1.0) this is actually a union - #[cfg_attr(target_pointer_width = "32", repr(align(4)))] - #[cfg_attr(target_pointer_width = "64", repr(align(8)))] - pub struct sem_t { - #[cfg(target_pointer_width = "32")] - __size: [c_char; 16], - #[cfg(target_pointer_width = "64")] - __size: [c_char; 32], - } -} - -pub const O_CLOEXEC: c_int = 0o2000000; -pub const __SIZEOF_PTHREAD_ATTR_T: usize = 36; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_COND_COMPAT_T: usize = 12; -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; -pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 20; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; -pub const NCCS: usize = 32; - -// I wasn't able to find those constants -// in uclibc build environment for armv7 -pub const MAP_HUGETLB: c_int = 0x040000; // from linux/other/mod.rs - -// autogenerated constants with hand tuned types -pub const B0: crate::speed_t = 0; -pub const B1000000: crate::speed_t = 0x1008; -pub const B110: crate::speed_t = 0x3; -pub const B115200: crate::speed_t = 0x1002; -pub const B1152000: crate::speed_t = 0x1009; -pub const B1200: crate::speed_t = 0x9; -pub const B134: crate::speed_t = 0x4; -pub const B150: crate::speed_t = 0x5; -pub const B1500000: crate::speed_t = 0x100a; -pub const B1800: crate::speed_t = 0xa; -pub const B19200: crate::speed_t = 0xe; -pub const B200: crate::speed_t = 0x6; -pub const B2000000: crate::speed_t = 0x100b; -pub const B230400: crate::speed_t = 0x1003; -pub const B2400: crate::speed_t = 0xb; -pub const B2500000: crate::speed_t = 0x100c; -pub const B300: crate::speed_t = 0x7; -pub const B3000000: crate::speed_t = 0x100d; -pub const B3500000: crate::speed_t = 0x100e; -pub const B38400: crate::speed_t = 0xf; -pub const B4000000: crate::speed_t = 0x100f; -pub const B460800: crate::speed_t = 0x1004; -pub const B4800: crate::speed_t = 0xc; -pub const B50: crate::speed_t = 0x1; -pub const B500000: crate::speed_t = 0x1005; -pub const B57600: crate::speed_t = 0x1001; -pub const B576000: crate::speed_t = 0x1006; -pub const B600: crate::speed_t = 0x8; -pub const B75: crate::speed_t = 0x2; -pub const B921600: crate::speed_t = 0x1007; -pub const B9600: crate::speed_t = 0xd; -pub const BS1: c_int = 0x2000; -pub const BSDLY: c_int = 0x2000; -pub const CBAUD: crate::tcflag_t = 0x100f; -pub const CBAUDEX: crate::tcflag_t = 0x1000; -pub const CIBAUD: crate::tcflag_t = 0x100f0000; -pub const CLOCAL: crate::tcflag_t = 0x800; -pub const CPU_SETSIZE: c_int = 0x400; -pub const CR1: c_int = 0x200; -pub const CR2: c_int = 0x400; -pub const CR3: c_int = 0x600; -pub const CRDLY: c_int = 0x600; -pub const CREAD: crate::tcflag_t = 0x80; -pub const CS6: crate::tcflag_t = 0x10; -pub const CS7: crate::tcflag_t = 0x20; -pub const CS8: crate::tcflag_t = 0x30; -pub const CSIZE: crate::tcflag_t = 0x30; -pub const CSTOPB: crate::tcflag_t = 0x40; -pub const EADDRINUSE: c_int = 0x62; -pub const EADDRNOTAVAIL: c_int = 0x63; -pub const EADV: c_int = 0x44; -pub const EAFNOSUPPORT: c_int = 0x61; -pub const EALREADY: c_int = 0x72; -pub const EBADE: c_int = 0x34; -pub const EBADFD: c_int = 0x4d; -pub const EBADMSG: c_int = 0x4a; -pub const EBADR: c_int = 0x35; -pub const EBADRQC: c_int = 0x38; -pub const EBADSLT: c_int = 0x39; -pub const EBFONT: c_int = 0x3b; -pub const ECANCELED: c_int = 0x7d; -pub const ECHOCTL: crate::tcflag_t = 0x200; -pub const ECHOE: crate::tcflag_t = 0x10; -pub const ECHOK: crate::tcflag_t = 0x20; -pub const ECHOKE: crate::tcflag_t = 0x800; -pub const ECHONL: crate::tcflag_t = 0x40; -pub const ECHOPRT: crate::tcflag_t = 0x400; -pub const ECHRNG: c_int = 0x2c; -pub const ECOMM: c_int = 0x46; -pub const ECONNABORTED: c_int = 0x67; -pub const ECONNREFUSED: c_int = 0x6f; -pub const ECONNRESET: c_int = 0x68; -pub const EDEADLK: c_int = 0x23; -pub const EDESTADDRREQ: c_int = 0x59; -pub const EDOTDOT: c_int = 0x49; -pub const EDQUOT: c_int = 0x7a; -pub const EFD_CLOEXEC: c_int = 0x80000; -pub const EFD_NONBLOCK: c_int = 0x800; -pub const EHOSTDOWN: c_int = 0x70; -pub const EHOSTUNREACH: c_int = 0x71; -pub const EHWPOISON: c_int = 0x85; -pub const EIDRM: c_int = 0x2b; -pub const EILSEQ: c_int = 0x54; -pub const EINPROGRESS: c_int = 0x73; -pub const EISCONN: c_int = 0x6a; -pub const EISNAM: c_int = 0x78; -pub const EKEYEXPIRED: c_int = 0x7f; -pub const EKEYREJECTED: c_int = 0x81; -pub const EKEYREVOKED: c_int = 0x80; -pub const EL2HLT: c_int = 0x33; -pub const EL2NSYNC: c_int = 0x2d; -pub const EL3HLT: c_int = 0x2e; -pub const EL3RST: c_int = 0x2f; -pub const ELIBACC: c_int = 0x4f; -pub const ELIBBAD: c_int = 0x50; -pub const ELIBEXEC: c_int = 0x53; -pub const ELIBMAX: c_int = 0x52; -pub const ELIBSCN: c_int = 0x51; -pub const ELNRNG: c_int = 0x30; -pub const ELOOP: c_int = 0x28; -pub const EMEDIUMTYPE: c_int = 0x7c; -pub const EMSGSIZE: c_int = 0x5a; -pub const EMULTIHOP: c_int = 0x48; -pub const ENAMETOOLONG: c_int = 0x24; -pub const ENAVAIL: c_int = 0x77; -pub const ENETDOWN: c_int = 0x64; -pub const ENETRESET: c_int = 0x66; -pub const ENETUNREACH: c_int = 0x65; -pub const ENOANO: c_int = 0x37; -pub const ENOBUFS: c_int = 0x69; -pub const ENOCSI: c_int = 0x32; -pub const ENODATA: c_int = 0x3d; -pub const ENOKEY: c_int = 0x7e; -pub const ENOLCK: c_int = 0x25; -pub const ENOLINK: c_int = 0x43; -pub const ENOMEDIUM: c_int = 0x7b; -pub const ENOMSG: c_int = 0x2a; -pub const ENONET: c_int = 0x40; -pub const ENOPKG: c_int = 0x41; -pub const ENOPROTOOPT: c_int = 0x5c; -pub const ENOSR: c_int = 0x3f; -pub const ENOSTR: c_int = 0x3c; -pub const ENOSYS: c_int = 0x26; -pub const ENOTCONN: c_int = 0x6b; -pub const ENOTEMPTY: c_int = 0x27; -pub const ENOTNAM: c_int = 0x76; -pub const ENOTRECOVERABLE: c_int = 0x83; -pub const ENOTSOCK: c_int = 0x58; -pub const ENOTUNIQ: c_int = 0x4c; -pub const EOPNOTSUPP: c_int = 0x5f; -pub const EOVERFLOW: c_int = 0x4b; -pub const EOWNERDEAD: c_int = 0x82; -pub const EPFNOSUPPORT: c_int = 0x60; -pub const EPOLL_CLOEXEC: c_int = 0x80000; -pub const EPROTO: c_int = 0x47; -pub const EPROTONOSUPPORT: c_int = 0x5d; -pub const EPROTOTYPE: c_int = 0x5b; -pub const EREMCHG: c_int = 0x4e; -pub const EREMOTE: c_int = 0x42; -pub const EREMOTEIO: c_int = 0x79; -pub const ERESTART: c_int = 0x55; -pub const ERFKILL: c_int = 0x84; -pub const ESHUTDOWN: c_int = 0x6c; -pub const ESOCKTNOSUPPORT: c_int = 0x5e; -pub const ESRMNT: c_int = 0x45; -pub const ESTALE: c_int = 0x74; -pub const ESTRPIPE: c_int = 0x56; -pub const ETIME: c_int = 0x3e; -pub const ETIMEDOUT: c_int = 0x6e; -pub const ETOOMANYREFS: c_int = 0x6d; -pub const EUCLEAN: c_int = 0x75; -pub const EUNATCH: c_int = 0x31; -pub const EUSERS: c_int = 0x57; -pub const EXFULL: c_int = 0x36; -pub const FF1: c_int = 0x8000; -pub const FFDLY: c_int = 0x8000; -pub const FLUSHO: crate::tcflag_t = 0x1000; -pub const F_GETLK: c_int = 0x5; -pub const F_SETLK: c_int = 0x6; -pub const F_SETLKW: c_int = 0x7; -pub const HUPCL: crate::tcflag_t = 0x400; -pub const ICANON: crate::tcflag_t = 0x2; -pub const IEXTEN: crate::tcflag_t = 0x8000; -pub const ISIG: crate::tcflag_t = 0x1; -pub const IXOFF: crate::tcflag_t = 0x1000; -pub const IXON: crate::tcflag_t = 0x400; -pub const MAP_ANON: c_int = 0x20; -pub const MAP_ANONYMOUS: c_int = 0x20; -pub const MAP_DENYWRITE: c_int = 0x800; -pub const MAP_EXECUTABLE: c_int = 0x1000; -pub const MAP_GROWSDOWN: c_int = 0x100; -pub const MAP_LOCKED: c_int = 0x2000; -pub const MAP_NONBLOCK: c_int = 0x10000; -pub const MAP_NORESERVE: c_int = 0x4000; -pub const MAP_POPULATE: c_int = 0x8000; -pub const MAP_STACK: c_int = 0x20000; -pub const NLDLY: crate::tcflag_t = 0x100; -pub const NOFLSH: crate::tcflag_t = 0x80; -pub const OLCUC: crate::tcflag_t = 0x2; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const O_ACCMODE: c_int = 0x3; -pub const O_APPEND: c_int = 0x400; -pub const O_ASYNC: c_int = 0o20000; -pub const O_CREAT: c_int = 0x40; -pub const O_DIRECT: c_int = 0x10000; -pub const O_DIRECTORY: c_int = 0x4000; -pub const O_DSYNC: c_int = O_SYNC; -pub const O_EXCL: c_int = 0x80; -pub const O_FSYNC: c_int = O_SYNC; -pub const O_LARGEFILE: c_int = 0o400000; -pub const O_NDELAY: c_int = O_NONBLOCK; -pub const O_NOATIME: c_int = 0o1000000; -pub const O_NOCTTY: c_int = 0x100; -pub const O_NOFOLLOW: c_int = 0x8000; -pub const O_NONBLOCK: c_int = 0x800; -pub const O_PATH: c_int = 0o10000000; -pub const O_RSYNC: c_int = O_SYNC; -pub const O_SYNC: c_int = 0o10000; -pub const O_TRUNC: c_int = 0x200; -pub const PARENB: crate::tcflag_t = 0x100; -pub const PARODD: crate::tcflag_t = 0x200; -pub const PENDIN: crate::tcflag_t = 0x4000; -pub const POLLWRBAND: c_short = 0x200; -pub const POLLWRNORM: c_short = 0x100; -pub const PTHREAD_STACK_MIN: size_t = 16384; -pub const RTLD_GLOBAL: c_int = 0x00100; - -// These are typed unsigned to match sigaction -pub const SA_NOCLDSTOP: c_ulong = 0x1; -pub const SA_NOCLDWAIT: c_ulong = 0x2; -pub const SA_SIGINFO: c_ulong = 0x4; -pub const SA_NODEFER: c_ulong = 0x40000000; -pub const SA_ONSTACK: c_ulong = 0x8000000; -pub const SA_RESETHAND: c_ulong = 0x80000000; -pub const SA_RESTART: c_ulong = 0x10000000; - -pub const SFD_CLOEXEC: c_int = 0x80000; -pub const SFD_NONBLOCK: c_int = 0x800; -pub const SIGBUS: c_int = 0x7; -pub const SIGCHLD: c_int = 0x11; -pub const SIGCONT: c_int = 0x12; -pub const SIGIO: c_int = 0x1d; -pub const SIGPROF: c_int = 0x1b; -pub const SIGPWR: c_int = 0x1e; -pub const SIGSTKFLT: c_int = 0x10; -pub const SIGSTKSZ: size_t = 8192; -pub const SIGSTOP: c_int = 0x13; -pub const SIGSYS: c_int = 0x1f; -pub const SIGTSTP: c_int = 0x14; -pub const SIGTTIN: c_int = 0x15; -pub const SIGTTOU: c_int = 0x16; -pub const SIGURG: c_int = 0x17; -pub const SIGUSR1: c_int = 0xa; -pub const SIGUSR2: c_int = 0xc; -pub const SIGVTALRM: c_int = 0x1a; -pub const SIGWINCH: c_int = 0x1c; -pub const SIGXCPU: c_int = 0x18; -pub const SIGXFSZ: c_int = 0x19; -pub const SIG_BLOCK: c_int = 0; -pub const SIG_SETMASK: c_int = 0x2; -pub const SIG_UNBLOCK: c_int = 0x1; -pub const SOCK_DGRAM: c_int = 0x2; -pub const SOCK_NONBLOCK: c_int = 0o0004000; -pub const SOCK_SEQPACKET: c_int = 0x5; -pub const SOCK_STREAM: c_int = 0x1; - -pub const TAB1: c_int = 0x800; -pub const TAB2: c_int = 0x1000; -pub const TAB3: c_int = 0x1800; -pub const TABDLY: c_int = 0x1800; -pub const TCSADRAIN: c_int = 0x1; -pub const TCSAFLUSH: c_int = 0x2; -pub const TCSANOW: c_int = 0; -pub const TOSTOP: crate::tcflag_t = 0x100; -pub const VDISCARD: usize = 0xd; -pub const VEOF: usize = 0x4; -pub const VEOL: usize = 0xb; -pub const VEOL2: usize = 0x10; -pub const VMIN: usize = 0x6; -pub const VREPRINT: usize = 0xc; -pub const VSTART: usize = 0x8; -pub const VSTOP: usize = 0x9; -pub const VSUSP: usize = 0xa; -pub const VSWTC: usize = 0x7; -pub const VT1: c_int = 0x4000; -pub const VTDLY: c_int = 0x4000; -pub const VTIME: usize = 0x5; -pub const VWERASE: usize = 0xe; -pub const XTABS: crate::tcflag_t = 0x1800; - -pub const MADV_SOFT_OFFLINE: c_int = 101; - -// Syscall table is copied from src/unix/notbsd/linux/musl/b32/arm.rs -pub const SYS_restart_syscall: c_long = 0; -pub const SYS_exit: c_long = 1; -pub const SYS_fork: c_long = 2; -pub const SYS_read: c_long = 3; -pub const SYS_write: c_long = 4; -pub const SYS_open: c_long = 5; -pub const SYS_close: c_long = 6; -pub const SYS_creat: c_long = 8; -pub const SYS_link: c_long = 9; -pub const SYS_unlink: c_long = 10; -pub const SYS_execve: c_long = 11; -pub const SYS_chdir: c_long = 12; -pub const SYS_mknod: c_long = 14; -pub const SYS_chmod: c_long = 15; -pub const SYS_lchown: c_long = 16; -pub const SYS_lseek: c_long = 19; -pub const SYS_getpid: c_long = 20; -pub const SYS_mount: c_long = 21; -pub const SYS_setuid: c_long = 23; -pub const SYS_getuid: c_long = 24; -pub const SYS_ptrace: c_long = 26; -pub const SYS_pause: c_long = 29; -pub const SYS_access: c_long = 33; -pub const SYS_nice: c_long = 34; -pub const SYS_sync: c_long = 36; -pub const SYS_kill: c_long = 37; -pub const SYS_rename: c_long = 38; -pub const SYS_mkdir: c_long = 39; -pub const SYS_rmdir: c_long = 40; -pub const SYS_dup: c_long = 41; -pub const SYS_pipe: c_long = 42; -pub const SYS_times: c_long = 43; -pub const SYS_brk: c_long = 45; -pub const SYS_setgid: c_long = 46; -pub const SYS_getgid: c_long = 47; -pub const SYS_geteuid: c_long = 49; -pub const SYS_getegid: c_long = 50; -pub const SYS_acct: c_long = 51; -pub const SYS_umount2: c_long = 52; -pub const SYS_ioctl: c_long = 54; -pub const SYS_fcntl: c_long = 55; -pub const SYS_setpgid: c_long = 57; -pub const SYS_umask: c_long = 60; -pub const SYS_chroot: c_long = 61; -pub const SYS_ustat: c_long = 62; -pub const SYS_dup2: c_long = 63; -pub const SYS_getppid: c_long = 64; -pub const SYS_getpgrp: c_long = 65; -pub const SYS_setsid: c_long = 66; -pub const SYS_sigaction: c_long = 67; -pub const SYS_setreuid: c_long = 70; -pub const SYS_setregid: c_long = 71; -pub const SYS_sigsuspend: c_long = 72; -pub const SYS_sigpending: c_long = 73; -pub const SYS_sethostname: c_long = 74; -pub const SYS_setrlimit: c_long = 75; -pub const SYS_getrusage: c_long = 77; -pub const SYS_gettimeofday: c_long = 78; -pub const SYS_settimeofday: c_long = 79; -pub const SYS_getgroups: c_long = 80; -pub const SYS_setgroups: c_long = 81; -pub const SYS_symlink: c_long = 83; -pub const SYS_readlink: c_long = 85; -pub const SYS_uselib: c_long = 86; -pub const SYS_swapon: c_long = 87; -pub const SYS_reboot: c_long = 88; -pub const SYS_munmap: c_long = 91; -pub const SYS_truncate: c_long = 92; -pub const SYS_ftruncate: c_long = 93; -pub const SYS_fchmod: c_long = 94; -pub const SYS_fchown: c_long = 95; -pub const SYS_getpriority: c_long = 96; -pub const SYS_setpriority: c_long = 97; -pub const SYS_statfs: c_long = 99; -pub const SYS_fstatfs: c_long = 100; -pub const SYS_syslog: c_long = 103; -pub const SYS_setitimer: c_long = 104; -pub const SYS_getitimer: c_long = 105; -pub const SYS_stat: c_long = 106; -pub const SYS_lstat: c_long = 107; -pub const SYS_fstat: c_long = 108; -pub const SYS_vhangup: c_long = 111; -pub const SYS_wait4: c_long = 114; -pub const SYS_swapoff: c_long = 115; -pub const SYS_sysinfo: c_long = 116; -pub const SYS_fsync: c_long = 118; -pub const SYS_sigreturn: c_long = 119; -pub const SYS_clone: c_long = 120; -pub const SYS_setdomainname: c_long = 121; -pub const SYS_uname: c_long = 122; -pub const SYS_adjtimex: c_long = 124; -pub const SYS_mprotect: c_long = 125; -pub const SYS_sigprocmask: c_long = 126; -pub const SYS_init_module: c_long = 128; -pub const SYS_delete_module: c_long = 129; -pub const SYS_quotactl: c_long = 131; -pub const SYS_getpgid: c_long = 132; -pub const SYS_fchdir: c_long = 133; -pub const SYS_bdflush: c_long = 134; -pub const SYS_sysfs: c_long = 135; -pub const SYS_personality: c_long = 136; -pub const SYS_setfsuid: c_long = 138; -pub const SYS_setfsgid: c_long = 139; -pub const SYS__llseek: c_long = 140; -pub const SYS_getdents: c_long = 141; -pub const SYS__newselect: c_long = 142; -pub const SYS_flock: c_long = 143; -pub const SYS_msync: c_long = 144; -pub const SYS_readv: c_long = 145; -pub const SYS_writev: c_long = 146; -pub const SYS_getsid: c_long = 147; -pub const SYS_fdatasync: c_long = 148; -pub const SYS__sysctl: c_long = 149; -pub const SYS_mlock: c_long = 150; -pub const SYS_munlock: c_long = 151; -pub const SYS_mlockall: c_long = 152; -pub const SYS_munlockall: c_long = 153; -pub const SYS_sched_setparam: c_long = 154; -pub const SYS_sched_getparam: c_long = 155; -pub const SYS_sched_setscheduler: c_long = 156; -pub const SYS_sched_getscheduler: c_long = 157; -pub const SYS_sched_yield: c_long = 158; -pub const SYS_sched_get_priority_max: c_long = 159; -pub const SYS_sched_get_priority_min: c_long = 160; -pub const SYS_sched_rr_get_interval: c_long = 161; -pub const SYS_nanosleep: c_long = 162; -pub const SYS_mremap: c_long = 163; -pub const SYS_setresuid: c_long = 164; -pub const SYS_getresuid: c_long = 165; -pub const SYS_poll: c_long = 168; -pub const SYS_nfsservctl: c_long = 169; -pub const SYS_setresgid: c_long = 170; -pub const SYS_getresgid: c_long = 171; -pub const SYS_prctl: c_long = 172; -pub const SYS_rt_sigreturn: c_long = 173; -pub const SYS_rt_sigaction: c_long = 174; -pub const SYS_rt_sigprocmask: c_long = 175; -pub const SYS_rt_sigpending: c_long = 176; -pub const SYS_rt_sigtimedwait: c_long = 177; -pub const SYS_rt_sigqueueinfo: c_long = 178; -pub const SYS_rt_sigsuspend: c_long = 179; -pub const SYS_pread64: c_long = 180; -pub const SYS_pwrite64: c_long = 181; -pub const SYS_chown: c_long = 182; -pub const SYS_getcwd: c_long = 183; -pub const SYS_capget: c_long = 184; -pub const SYS_capset: c_long = 185; -pub const SYS_sigaltstack: c_long = 186; -pub const SYS_sendfile: c_long = 187; -pub const SYS_vfork: c_long = 190; -pub const SYS_ugetrlimit: c_long = 191; -pub const SYS_mmap2: c_long = 192; -pub const SYS_truncate64: c_long = 193; -pub const SYS_ftruncate64: c_long = 194; -pub const SYS_stat64: c_long = 195; -pub const SYS_lstat64: c_long = 196; -pub const SYS_fstat64: c_long = 197; -pub const SYS_lchown32: c_long = 198; -pub const SYS_getuid32: c_long = 199; -pub const SYS_getgid32: c_long = 200; -pub const SYS_geteuid32: c_long = 201; -pub const SYS_getegid32: c_long = 202; -pub const SYS_setreuid32: c_long = 203; -pub const SYS_setregid32: c_long = 204; -pub const SYS_getgroups32: c_long = 205; -pub const SYS_setgroups32: c_long = 206; -pub const SYS_fchown32: c_long = 207; -pub const SYS_setresuid32: c_long = 208; -pub const SYS_getresuid32: c_long = 209; -pub const SYS_setresgid32: c_long = 210; -pub const SYS_getresgid32: c_long = 211; -pub const SYS_chown32: c_long = 212; -pub const SYS_setuid32: c_long = 213; -pub const SYS_setgid32: c_long = 214; -pub const SYS_setfsuid32: c_long = 215; -pub const SYS_setfsgid32: c_long = 216; -pub const SYS_getdents64: c_long = 217; -pub const SYS_pivot_root: c_long = 218; -pub const SYS_mincore: c_long = 219; -pub const SYS_madvise: c_long = 220; -pub const SYS_fcntl64: c_long = 221; -pub const SYS_gettid: c_long = 224; -pub const SYS_readahead: c_long = 225; -pub const SYS_setxattr: c_long = 226; -pub const SYS_lsetxattr: c_long = 227; -pub const SYS_fsetxattr: c_long = 228; -pub const SYS_getxattr: c_long = 229; -pub const SYS_lgetxattr: c_long = 230; -pub const SYS_fgetxattr: c_long = 231; -pub const SYS_listxattr: c_long = 232; -pub const SYS_llistxattr: c_long = 233; -pub const SYS_flistxattr: c_long = 234; -pub const SYS_removexattr: c_long = 235; -pub const SYS_lremovexattr: c_long = 236; -pub const SYS_fremovexattr: c_long = 237; -pub const SYS_tkill: c_long = 238; -pub const SYS_sendfile64: c_long = 239; -pub const SYS_futex: c_long = 240; -pub const SYS_sched_setaffinity: c_long = 241; -pub const SYS_sched_getaffinity: c_long = 242; -pub const SYS_io_setup: c_long = 243; -pub const SYS_io_destroy: c_long = 244; -pub const SYS_io_getevents: c_long = 245; -pub const SYS_io_submit: c_long = 246; -pub const SYS_io_cancel: c_long = 247; -pub const SYS_exit_group: c_long = 248; -pub const SYS_lookup_dcookie: c_long = 249; -pub const SYS_epoll_create: c_long = 250; -pub const SYS_epoll_ctl: c_long = 251; -pub const SYS_epoll_wait: c_long = 252; -pub const SYS_remap_file_pages: c_long = 253; -pub const SYS_set_tid_address: c_long = 256; -pub const SYS_timer_create: c_long = 257; -pub const SYS_timer_settime: c_long = 258; -pub const SYS_timer_gettime: c_long = 259; -pub const SYS_timer_getoverrun: c_long = 260; -pub const SYS_timer_delete: c_long = 261; -pub const SYS_clock_settime: c_long = 262; -pub const SYS_clock_gettime: c_long = 263; -pub const SYS_clock_getres: c_long = 264; -pub const SYS_clock_nanosleep: c_long = 265; -pub const SYS_statfs64: c_long = 266; -pub const SYS_fstatfs64: c_long = 267; -pub const SYS_tgkill: c_long = 268; -pub const SYS_utimes: c_long = 269; -pub const SYS_pciconfig_iobase: c_long = 271; -pub const SYS_pciconfig_read: c_long = 272; -pub const SYS_pciconfig_write: c_long = 273; -pub const SYS_mq_open: c_long = 274; -pub const SYS_mq_unlink: c_long = 275; -pub const SYS_mq_timedsend: c_long = 276; -pub const SYS_mq_timedreceive: c_long = 277; -pub const SYS_mq_notify: c_long = 278; -pub const SYS_mq_getsetattr: c_long = 279; -pub const SYS_waitid: c_long = 280; -pub const SYS_socket: c_long = 281; -pub const SYS_bind: c_long = 282; -pub const SYS_connect: c_long = 283; -pub const SYS_listen: c_long = 284; -pub const SYS_accept: c_long = 285; -pub const SYS_getsockname: c_long = 286; -pub const SYS_getpeername: c_long = 287; -pub const SYS_socketpair: c_long = 288; -pub const SYS_send: c_long = 289; -pub const SYS_sendto: c_long = 290; -pub const SYS_recv: c_long = 291; -pub const SYS_recvfrom: c_long = 292; -pub const SYS_shutdown: c_long = 293; -pub const SYS_setsockopt: c_long = 294; -pub const SYS_getsockopt: c_long = 295; -pub const SYS_sendmsg: c_long = 296; -pub const SYS_recvmsg: c_long = 297; -pub const SYS_semop: c_long = 298; -pub const SYS_semget: c_long = 299; -pub const SYS_semctl: c_long = 300; -pub const SYS_msgsnd: c_long = 301; -pub const SYS_msgrcv: c_long = 302; -pub const SYS_msgget: c_long = 303; -pub const SYS_msgctl: c_long = 304; -pub const SYS_shmat: c_long = 305; -pub const SYS_shmdt: c_long = 306; -pub const SYS_shmget: c_long = 307; -pub const SYS_shmctl: c_long = 308; -pub const SYS_add_key: c_long = 309; -pub const SYS_request_key: c_long = 310; -pub const SYS_keyctl: c_long = 311; -pub const SYS_semtimedop: c_long = 312; -pub const SYS_vserver: c_long = 313; -pub const SYS_ioprio_set: c_long = 314; -pub const SYS_ioprio_get: c_long = 315; -pub const SYS_inotify_init: c_long = 316; -pub const SYS_inotify_add_watch: c_long = 317; -pub const SYS_inotify_rm_watch: c_long = 318; -pub const SYS_mbind: c_long = 319; -pub const SYS_get_mempolicy: c_long = 320; -pub const SYS_set_mempolicy: c_long = 321; -pub const SYS_openat: c_long = 322; -pub const SYS_mkdirat: c_long = 323; -pub const SYS_mknodat: c_long = 324; -pub const SYS_fchownat: c_long = 325; -pub const SYS_futimesat: c_long = 326; -pub const SYS_fstatat64: c_long = 327; -pub const SYS_unlinkat: c_long = 328; -pub const SYS_renameat: c_long = 329; -pub const SYS_linkat: c_long = 330; -pub const SYS_symlinkat: c_long = 331; -pub const SYS_readlinkat: c_long = 332; -pub const SYS_fchmodat: c_long = 333; -pub const SYS_faccessat: c_long = 334; -pub const SYS_pselect6: c_long = 335; -pub const SYS_ppoll: c_long = 336; -pub const SYS_unshare: c_long = 337; -pub const SYS_set_robust_list: c_long = 338; -pub const SYS_get_robust_list: c_long = 339; -pub const SYS_splice: c_long = 340; -pub const SYS_tee: c_long = 342; -pub const SYS_vmsplice: c_long = 343; -pub const SYS_move_pages: c_long = 344; -pub const SYS_getcpu: c_long = 345; -pub const SYS_epoll_pwait: c_long = 346; -pub const SYS_kexec_load: c_long = 347; -pub const SYS_utimensat: c_long = 348; -pub const SYS_signalfd: c_long = 349; -pub const SYS_timerfd_create: c_long = 350; -pub const SYS_eventfd: c_long = 351; -pub const SYS_fallocate: c_long = 352; -pub const SYS_timerfd_settime: c_long = 353; -pub const SYS_timerfd_gettime: c_long = 354; -pub const SYS_signalfd4: c_long = 355; -pub const SYS_eventfd2: c_long = 356; -pub const SYS_epoll_create1: c_long = 357; -pub const SYS_dup3: c_long = 358; -pub const SYS_pipe2: c_long = 359; -pub const SYS_inotify_init1: c_long = 360; -pub const SYS_preadv: c_long = 361; -pub const SYS_pwritev: c_long = 362; -pub const SYS_rt_tgsigqueueinfo: c_long = 363; -pub const SYS_perf_event_open: c_long = 364; -pub const SYS_recvmmsg: c_long = 365; -pub const SYS_accept4: c_long = 366; -pub const SYS_fanotify_init: c_long = 367; -pub const SYS_fanotify_mark: c_long = 368; -pub const SYS_prlimit64: c_long = 369; -pub const SYS_name_to_handle_at: c_long = 370; -pub const SYS_open_by_handle_at: c_long = 371; -pub const SYS_clock_adjtime: c_long = 372; -pub const SYS_syncfs: c_long = 373; -pub const SYS_sendmmsg: c_long = 374; -pub const SYS_setns: c_long = 375; -pub const SYS_process_vm_readv: c_long = 376; -pub const SYS_process_vm_writev: c_long = 377; -pub const SYS_kcmp: c_long = 378; -pub const SYS_finit_module: c_long = 379; -pub const SYS_sched_setattr: c_long = 380; -pub const SYS_sched_getattr: c_long = 381; -pub const SYS_renameat2: c_long = 382; -pub const SYS_seccomp: c_long = 383; -pub const SYS_getrandom: c_long = 384; -pub const SYS_memfd_create: c_long = 385; -pub const SYS_bpf: c_long = 386; -pub const SYS_execveat: c_long = 387; -pub const SYS_userfaultfd: c_long = 388; -pub const SYS_membarrier: c_long = 389; -pub const SYS_mlock2: c_long = 390; -pub const SYS_copy_file_range: c_long = 391; -pub const SYS_preadv2: c_long = 392; -pub const SYS_pwritev2: c_long = 393; -pub const SYS_pkey_mprotect: c_long = 394; -pub const SYS_pkey_alloc: c_long = 395; -pub const SYS_pkey_free: c_long = 396; -// FIXME(linux): should be a `c_long` too, but a bug slipped in. -pub const SYS_statx: c_int = 397; -pub const SYS_pidfd_send_signal: c_long = 424; -pub const SYS_io_uring_setup: c_long = 425; -pub const SYS_io_uring_enter: c_long = 426; -pub const SYS_io_uring_register: c_long = 427; -pub const SYS_open_tree: c_long = 428; -pub const SYS_move_mount: c_long = 429; -pub const SYS_fsopen: c_long = 430; -pub const SYS_fsconfig: c_long = 431; -pub const SYS_fsmount: c_long = 432; -pub const SYS_fspick: c_long = 433; -pub const SYS_pidfd_open: c_long = 434; -pub const SYS_clone3: c_long = 435; -pub const SYS_close_range: c_long = 436; -pub const SYS_openat2: c_long = 437; -pub const SYS_pidfd_getfd: c_long = 438; -pub const SYS_faccessat2: c_long = 439; -pub const SYS_process_madvise: c_long = 440; -pub const SYS_epoll_pwait2: c_long = 441; -pub const SYS_mount_setattr: c_long = 442; -pub const SYS_quotactl_fd: c_long = 443; -pub const SYS_landlock_create_ruleset: c_long = 444; -pub const SYS_landlock_add_rule: c_long = 445; -pub const SYS_landlock_restrict_self: c_long = 446; -pub const SYS_memfd_secret: c_long = 447; -pub const SYS_process_mrelease: c_long = 448; -pub const SYS_futex_waitv: c_long = 449; -pub const SYS_set_mempolicy_home_node: c_long = 450; diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips32/mod.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips32/mod.rs deleted file mode 100644 index 7dd04409078555..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips32/mod.rs +++ /dev/null @@ -1,695 +0,0 @@ -use crate::off64_t; -use crate::prelude::*; - -pub type clock_t = i32; -pub type time_t = i32; -pub type suseconds_t = i32; -pub type wchar_t = i32; -pub type off_t = i32; -pub type ino_t = u32; -pub type blkcnt_t = i32; -pub type blksize_t = i32; -pub type nlink_t = u32; -pub type fsblkcnt_t = c_ulong; -pub type fsfilcnt_t = c_ulong; -pub type __u64 = c_ulonglong; -pub type __s64 = c_longlong; -pub type fsblkcnt64_t = u64; -pub type fsfilcnt64_t = u64; - -s! { - pub struct stat { - pub st_dev: crate::dev_t, - st_pad1: [c_long; 2], - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_pad2: [c_long; 1], - pub st_size: off_t, - st_pad3: c_long, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - st_pad5: [c_long; 14], - } - - pub struct stat64 { - pub st_dev: crate::dev_t, - st_pad1: [c_long; 2], - pub st_ino: crate::ino64_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - st_pad2: [c_long; 2], - pub st_size: off64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: crate::blksize_t, - st_pad3: c_long, - pub st_blocks: crate::blkcnt64_t, - st_pad5: [c_long; 14], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt64_t, - pub f_bfree: crate::fsblkcnt64_t, - pub f_bavail: crate::fsblkcnt64_t, - pub f_files: crate::fsfilcnt64_t, - pub f_ffree: crate::fsfilcnt64_t, - pub f_favail: crate::fsfilcnt64_t, - pub f_fsid: c_ulong, - pub __f_unused: c_int, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - pub __f_spare: [c_int; 6], - } - - pub struct pthread_attr_t { - __size: [u32; 9], - } - - pub struct sigaction { - pub sa_flags: c_uint, - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: sigset_t, - _restorer: *mut c_void, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } - - pub struct sigset_t { - __val: [c_ulong; 4], - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_code: c_int, - pub si_errno: c_int, - pub _pad: [c_int; 29], - } - - pub struct glob64_t { - pub gl_pathc: size_t, - pub gl_pathv: *mut *mut c_char, - pub gl_offs: size_t, - pub gl_flags: c_int, - - __unused1: *mut c_void, - __unused2: *mut c_void, - __unused3: *mut c_void, - __unused4: *mut c_void, - __unused5: *mut c_void, - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_uint, - pub __seq: c_ushort, - __pad1: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused4: c_ulong, - __unused5: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - #[cfg(target_endian = "big")] - __glibc_reserved1: c_ulong, - pub msg_stime: crate::time_t, - #[cfg(target_endian = "little")] - __glibc_reserved1: c_ulong, - #[cfg(target_endian = "big")] - __glibc_reserved2: c_ulong, - pub msg_rtime: crate::time_t, - #[cfg(target_endian = "little")] - __glibc_reserved2: c_ulong, - #[cfg(target_endian = "big")] - __glibc_reserved3: c_ulong, - pub msg_ctime: crate::time_t, - #[cfg(target_endian = "little")] - __glibc_reserved3: c_ulong, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __glibc_reserved4: c_ulong, - __glibc_reserved5: c_ulong, - } - - pub struct statfs { - pub f_type: c_long, - pub f_bsize: c_long, - pub f_frsize: c_long, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_files: crate::fsblkcnt_t, - pub f_ffree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_fsid: crate::fsid_t, - - pub f_namelen: c_long, - f_spare: [c_long; 6], - } - - pub struct statfs64 { - pub f_type: c_long, - pub f_bsize: c_long, - pub f_frsize: c_long, - pub f_blocks: crate::fsblkcnt64_t, - pub f_bfree: crate::fsblkcnt64_t, - pub f_files: crate::fsblkcnt64_t, - pub f_ffree: crate::fsblkcnt64_t, - pub f_bavail: crate::fsblkcnt64_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_long, - pub f_flags: c_long, - pub f_spare: [c_long; 5], - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: c_int, - pub msg_control: *mut c_void, - pub msg_controllen: size_t, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - pub cmsg_len: size_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; crate::NCCS], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_sysid: c_long, - pub l_pid: crate::pid_t, - pad: [c_long; 4], - } - - pub struct sysinfo { - pub uptime: c_long, - pub loads: [c_ulong; 3], - pub totalram: c_ulong, - pub freeram: c_ulong, - pub sharedram: c_ulong, - pub bufferram: c_ulong, - pub totalswap: c_ulong, - pub freeswap: c_ulong, - pub procs: c_ushort, - pub pad: c_ushort, - pub totalhigh: c_ulong, - pub freehigh: c_ulong, - pub mem_unit: c_uint, - pub _f: [c_char; 8], - } - - // FIXME(1.0): this is actually a union - #[cfg_attr(target_pointer_width = "32", repr(align(4)))] - #[cfg_attr(target_pointer_width = "64", repr(align(8)))] - pub struct sem_t { - #[cfg(target_pointer_width = "32")] - __size: [c_char; 16], - #[cfg(target_pointer_width = "64")] - __size: [c_char; 32], - } -} - -pub const __SIZEOF_PTHREAD_ATTR_T: usize = 36; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; -pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 20; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; - -pub const SYS_syscall: c_long = 4000 + 0; -pub const SYS_exit: c_long = 4000 + 1; -pub const SYS_fork: c_long = 4000 + 2; -pub const SYS_read: c_long = 4000 + 3; -pub const SYS_write: c_long = 4000 + 4; -pub const SYS_open: c_long = 4000 + 5; -pub const SYS_close: c_long = 4000 + 6; -pub const SYS_waitpid: c_long = 4000 + 7; -pub const SYS_creat: c_long = 4000 + 8; -pub const SYS_link: c_long = 4000 + 9; -pub const SYS_unlink: c_long = 4000 + 10; -pub const SYS_execve: c_long = 4000 + 11; -pub const SYS_chdir: c_long = 4000 + 12; -pub const SYS_time: c_long = 4000 + 13; -pub const SYS_mknod: c_long = 4000 + 14; -pub const SYS_chmod: c_long = 4000 + 15; -pub const SYS_lchown: c_long = 4000 + 16; -pub const SYS_break: c_long = 4000 + 17; -pub const SYS_lseek: c_long = 4000 + 19; -pub const SYS_getpid: c_long = 4000 + 20; -pub const SYS_mount: c_long = 4000 + 21; -pub const SYS_umount: c_long = 4000 + 22; -pub const SYS_setuid: c_long = 4000 + 23; -pub const SYS_getuid: c_long = 4000 + 24; -pub const SYS_stime: c_long = 4000 + 25; -pub const SYS_ptrace: c_long = 4000 + 26; -pub const SYS_alarm: c_long = 4000 + 27; -pub const SYS_pause: c_long = 4000 + 29; -pub const SYS_utime: c_long = 4000 + 30; -pub const SYS_stty: c_long = 4000 + 31; -pub const SYS_gtty: c_long = 4000 + 32; -pub const SYS_access: c_long = 4000 + 33; -pub const SYS_nice: c_long = 4000 + 34; -pub const SYS_ftime: c_long = 4000 + 35; -pub const SYS_sync: c_long = 4000 + 36; -pub const SYS_kill: c_long = 4000 + 37; -pub const SYS_rename: c_long = 4000 + 38; -pub const SYS_mkdir: c_long = 4000 + 39; -pub const SYS_rmdir: c_long = 4000 + 40; -pub const SYS_dup: c_long = 4000 + 41; -pub const SYS_pipe: c_long = 4000 + 42; -pub const SYS_times: c_long = 4000 + 43; -pub const SYS_prof: c_long = 4000 + 44; -pub const SYS_brk: c_long = 4000 + 45; -pub const SYS_setgid: c_long = 4000 + 46; -pub const SYS_getgid: c_long = 4000 + 47; -pub const SYS_signal: c_long = 4000 + 48; -pub const SYS_geteuid: c_long = 4000 + 49; -pub const SYS_getegid: c_long = 4000 + 50; -pub const SYS_acct: c_long = 4000 + 51; -pub const SYS_umount2: c_long = 4000 + 52; -pub const SYS_lock: c_long = 4000 + 53; -pub const SYS_ioctl: c_long = 4000 + 54; -pub const SYS_fcntl: c_long = 4000 + 55; -pub const SYS_mpx: c_long = 4000 + 56; -pub const SYS_setpgid: c_long = 4000 + 57; -pub const SYS_ulimit: c_long = 4000 + 58; -pub const SYS_umask: c_long = 4000 + 60; -pub const SYS_chroot: c_long = 4000 + 61; -pub const SYS_ustat: c_long = 4000 + 62; -pub const SYS_dup2: c_long = 4000 + 63; -pub const SYS_getppid: c_long = 4000 + 64; -pub const SYS_getpgrp: c_long = 4000 + 65; -pub const SYS_setsid: c_long = 4000 + 66; -pub const SYS_sigaction: c_long = 4000 + 67; -pub const SYS_sgetmask: c_long = 4000 + 68; -pub const SYS_ssetmask: c_long = 4000 + 69; -pub const SYS_setreuid: c_long = 4000 + 70; -pub const SYS_setregid: c_long = 4000 + 71; -pub const SYS_sigsuspend: c_long = 4000 + 72; -pub const SYS_sigpending: c_long = 4000 + 73; -pub const SYS_sethostname: c_long = 4000 + 74; -pub const SYS_setrlimit: c_long = 4000 + 75; -pub const SYS_getrlimit: c_long = 4000 + 76; -pub const SYS_getrusage: c_long = 4000 + 77; -pub const SYS_gettimeofday: c_long = 4000 + 78; -pub const SYS_settimeofday: c_long = 4000 + 79; -pub const SYS_getgroups: c_long = 4000 + 80; -pub const SYS_setgroups: c_long = 4000 + 81; -pub const SYS_symlink: c_long = 4000 + 83; -pub const SYS_readlink: c_long = 4000 + 85; -pub const SYS_uselib: c_long = 4000 + 86; -pub const SYS_swapon: c_long = 4000 + 87; -pub const SYS_reboot: c_long = 4000 + 88; -pub const SYS_readdir: c_long = 4000 + 89; -pub const SYS_mmap: c_long = 4000 + 90; -pub const SYS_munmap: c_long = 4000 + 91; -pub const SYS_truncate: c_long = 4000 + 92; -pub const SYS_ftruncate: c_long = 4000 + 93; -pub const SYS_fchmod: c_long = 4000 + 94; -pub const SYS_fchown: c_long = 4000 + 95; -pub const SYS_getpriority: c_long = 4000 + 96; -pub const SYS_setpriority: c_long = 4000 + 97; -pub const SYS_profil: c_long = 4000 + 98; -pub const SYS_statfs: c_long = 4000 + 99; -pub const SYS_fstatfs: c_long = 4000 + 100; -pub const SYS_ioperm: c_long = 4000 + 101; -pub const SYS_socketcall: c_long = 4000 + 102; -pub const SYS_syslog: c_long = 4000 + 103; -pub const SYS_setitimer: c_long = 4000 + 104; -pub const SYS_getitimer: c_long = 4000 + 105; -pub const SYS_stat: c_long = 4000 + 106; -pub const SYS_lstat: c_long = 4000 + 107; -pub const SYS_fstat: c_long = 4000 + 108; -pub const SYS_iopl: c_long = 4000 + 110; -pub const SYS_vhangup: c_long = 4000 + 111; -pub const SYS_idle: c_long = 4000 + 112; -pub const SYS_vm86: c_long = 4000 + 113; -pub const SYS_wait4: c_long = 4000 + 114; -pub const SYS_swapoff: c_long = 4000 + 115; -pub const SYS_sysinfo: c_long = 4000 + 116; -pub const SYS_ipc: c_long = 4000 + 117; -pub const SYS_fsync: c_long = 4000 + 118; -pub const SYS_sigreturn: c_long = 4000 + 119; -pub const SYS_clone: c_long = 4000 + 120; -pub const SYS_setdomainname: c_long = 4000 + 121; -pub const SYS_uname: c_long = 4000 + 122; -pub const SYS_modify_ldt: c_long = 4000 + 123; -pub const SYS_adjtimex: c_long = 4000 + 124; -pub const SYS_mprotect: c_long = 4000 + 125; -pub const SYS_sigprocmask: c_long = 4000 + 126; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_create_module: c_long = 4000 + 127; -pub const SYS_init_module: c_long = 4000 + 128; -pub const SYS_delete_module: c_long = 4000 + 129; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_get_kernel_syms: c_long = 4000 + 130; -pub const SYS_quotactl: c_long = 4000 + 131; -pub const SYS_getpgid: c_long = 4000 + 132; -pub const SYS_fchdir: c_long = 4000 + 133; -pub const SYS_bdflush: c_long = 4000 + 134; -pub const SYS_sysfs: c_long = 4000 + 135; -pub const SYS_personality: c_long = 4000 + 136; -pub const SYS_afs_syscall: c_long = 4000 + 137; -pub const SYS_setfsuid: c_long = 4000 + 138; -pub const SYS_setfsgid: c_long = 4000 + 139; -pub const SYS__llseek: c_long = 4000 + 140; -pub const SYS_getdents: c_long = 4000 + 141; -pub const SYS__newselect: c_long = 4000 + 142; -pub const SYS_flock: c_long = 4000 + 143; -pub const SYS_msync: c_long = 4000 + 144; -pub const SYS_readv: c_long = 4000 + 145; -pub const SYS_writev: c_long = 4000 + 146; -pub const SYS_cacheflush: c_long = 4000 + 147; -pub const SYS_cachectl: c_long = 4000 + 148; -pub const SYS_sysmips: c_long = 4000 + 149; -pub const SYS_getsid: c_long = 4000 + 151; -pub const SYS_fdatasync: c_long = 4000 + 152; -pub const SYS__sysctl: c_long = 4000 + 153; -pub const SYS_mlock: c_long = 4000 + 154; -pub const SYS_munlock: c_long = 4000 + 155; -pub const SYS_mlockall: c_long = 4000 + 156; -pub const SYS_munlockall: c_long = 4000 + 157; -pub const SYS_sched_setparam: c_long = 4000 + 158; -pub const SYS_sched_getparam: c_long = 4000 + 159; -pub const SYS_sched_setscheduler: c_long = 4000 + 160; -pub const SYS_sched_getscheduler: c_long = 4000 + 161; -pub const SYS_sched_yield: c_long = 4000 + 162; -pub const SYS_sched_get_priority_max: c_long = 4000 + 163; -pub const SYS_sched_get_priority_min: c_long = 4000 + 164; -pub const SYS_sched_rr_get_interval: c_long = 4000 + 165; -pub const SYS_nanosleep: c_long = 4000 + 166; -pub const SYS_mremap: c_long = 4000 + 167; -pub const SYS_accept: c_long = 4000 + 168; -pub const SYS_bind: c_long = 4000 + 169; -pub const SYS_connect: c_long = 4000 + 170; -pub const SYS_getpeername: c_long = 4000 + 171; -pub const SYS_getsockname: c_long = 4000 + 172; -pub const SYS_getsockopt: c_long = 4000 + 173; -pub const SYS_listen: c_long = 4000 + 174; -pub const SYS_recv: c_long = 4000 + 175; -pub const SYS_recvfrom: c_long = 4000 + 176; -pub const SYS_recvmsg: c_long = 4000 + 177; -pub const SYS_send: c_long = 4000 + 178; -pub const SYS_sendmsg: c_long = 4000 + 179; -pub const SYS_sendto: c_long = 4000 + 180; -pub const SYS_setsockopt: c_long = 4000 + 181; -pub const SYS_shutdown: c_long = 4000 + 182; -pub const SYS_socket: c_long = 4000 + 183; -pub const SYS_socketpair: c_long = 4000 + 184; -pub const SYS_setresuid: c_long = 4000 + 185; -pub const SYS_getresuid: c_long = 4000 + 186; -#[deprecated(since = "0.2.70", note = "Functional up to 2.6 kernel")] -pub const SYS_query_module: c_long = 4000 + 187; -pub const SYS_poll: c_long = 4000 + 188; -pub const SYS_nfsservctl: c_long = 4000 + 189; -pub const SYS_setresgid: c_long = 4000 + 190; -pub const SYS_getresgid: c_long = 4000 + 191; -pub const SYS_prctl: c_long = 4000 + 192; -pub const SYS_rt_sigreturn: c_long = 4000 + 193; -pub const SYS_rt_sigaction: c_long = 4000 + 194; -pub const SYS_rt_sigprocmask: c_long = 4000 + 195; -pub const SYS_rt_sigpending: c_long = 4000 + 196; -pub const SYS_rt_sigtimedwait: c_long = 4000 + 197; -pub const SYS_rt_sigqueueinfo: c_long = 4000 + 198; -pub const SYS_rt_sigsuspend: c_long = 4000 + 199; -pub const SYS_pread64: c_long = 4000 + 200; -pub const SYS_pwrite64: c_long = 4000 + 201; -pub const SYS_chown: c_long = 4000 + 202; -pub const SYS_getcwd: c_long = 4000 + 203; -pub const SYS_capget: c_long = 4000 + 204; -pub const SYS_capset: c_long = 4000 + 205; -pub const SYS_sigaltstack: c_long = 4000 + 206; -pub const SYS_sendfile: c_long = 4000 + 207; -pub const SYS_getpmsg: c_long = 4000 + 208; -pub const SYS_putpmsg: c_long = 4000 + 209; -pub const SYS_mmap2: c_long = 4000 + 210; -pub const SYS_truncate64: c_long = 4000 + 211; -pub const SYS_ftruncate64: c_long = 4000 + 212; -pub const SYS_stat64: c_long = 4000 + 213; -pub const SYS_lstat64: c_long = 4000 + 214; -pub const SYS_fstat64: c_long = 4000 + 215; -pub const SYS_pivot_root: c_long = 4000 + 216; -pub const SYS_mincore: c_long = 4000 + 217; -pub const SYS_madvise: c_long = 4000 + 218; -pub const SYS_getdents64: c_long = 4000 + 219; -pub const SYS_fcntl64: c_long = 4000 + 220; -pub const SYS_gettid: c_long = 4000 + 222; -pub const SYS_readahead: c_long = 4000 + 223; -pub const SYS_setxattr: c_long = 4000 + 224; -pub const SYS_lsetxattr: c_long = 4000 + 225; -pub const SYS_fsetxattr: c_long = 4000 + 226; -pub const SYS_getxattr: c_long = 4000 + 227; -pub const SYS_lgetxattr: c_long = 4000 + 228; -pub const SYS_fgetxattr: c_long = 4000 + 229; -pub const SYS_listxattr: c_long = 4000 + 230; -pub const SYS_llistxattr: c_long = 4000 + 231; -pub const SYS_flistxattr: c_long = 4000 + 232; -pub const SYS_removexattr: c_long = 4000 + 233; -pub const SYS_lremovexattr: c_long = 4000 + 234; -pub const SYS_fremovexattr: c_long = 4000 + 235; -pub const SYS_tkill: c_long = 4000 + 236; -pub const SYS_sendfile64: c_long = 4000 + 237; -pub const SYS_futex: c_long = 4000 + 238; -pub const SYS_sched_setaffinity: c_long = 4000 + 239; -pub const SYS_sched_getaffinity: c_long = 4000 + 240; -pub const SYS_io_setup: c_long = 4000 + 241; -pub const SYS_io_destroy: c_long = 4000 + 242; -pub const SYS_io_getevents: c_long = 4000 + 243; -pub const SYS_io_submit: c_long = 4000 + 244; -pub const SYS_io_cancel: c_long = 4000 + 245; -pub const SYS_exit_group: c_long = 4000 + 246; -pub const SYS_lookup_dcookie: c_long = 4000 + 247; -pub const SYS_epoll_create: c_long = 4000 + 248; -pub const SYS_epoll_ctl: c_long = 4000 + 249; -pub const SYS_epoll_wait: c_long = 4000 + 250; -pub const SYS_remap_file_pages: c_long = 4000 + 251; -pub const SYS_set_tid_address: c_long = 4000 + 252; -pub const SYS_restart_syscall: c_long = 4000 + 253; -pub const SYS_fadvise64: c_long = 4000 + 254; -pub const SYS_statfs64: c_long = 4000 + 255; -pub const SYS_fstatfs64: c_long = 4000 + 256; -pub const SYS_timer_create: c_long = 4000 + 257; -pub const SYS_timer_settime: c_long = 4000 + 258; -pub const SYS_timer_gettime: c_long = 4000 + 259; -pub const SYS_timer_getoverrun: c_long = 4000 + 260; -pub const SYS_timer_delete: c_long = 4000 + 261; -pub const SYS_clock_settime: c_long = 4000 + 262; -pub const SYS_clock_gettime: c_long = 4000 + 263; -pub const SYS_clock_getres: c_long = 4000 + 264; -pub const SYS_clock_nanosleep: c_long = 4000 + 265; -pub const SYS_tgkill: c_long = 4000 + 266; -pub const SYS_utimes: c_long = 4000 + 267; -pub const SYS_mbind: c_long = 4000 + 268; -pub const SYS_get_mempolicy: c_long = 4000 + 269; -pub const SYS_set_mempolicy: c_long = 4000 + 270; -pub const SYS_mq_open: c_long = 4000 + 271; -pub const SYS_mq_unlink: c_long = 4000 + 272; -pub const SYS_mq_timedsend: c_long = 4000 + 273; -pub const SYS_mq_timedreceive: c_long = 4000 + 274; -pub const SYS_mq_notify: c_long = 4000 + 275; -pub const SYS_mq_getsetattr: c_long = 4000 + 276; -pub const SYS_vserver: c_long = 4000 + 277; -pub const SYS_waitid: c_long = 4000 + 278; -/* pub const SYS_sys_setaltroot: c_long = 4000 + 279; */ -pub const SYS_add_key: c_long = 4000 + 280; -pub const SYS_request_key: c_long = 4000 + 281; -pub const SYS_keyctl: c_long = 4000 + 282; -pub const SYS_set_thread_area: c_long = 4000 + 283; -pub const SYS_inotify_init: c_long = 4000 + 284; -pub const SYS_inotify_add_watch: c_long = 4000 + 285; -pub const SYS_inotify_rm_watch: c_long = 4000 + 286; -pub const SYS_migrate_pages: c_long = 4000 + 287; -pub const SYS_openat: c_long = 4000 + 288; -pub const SYS_mkdirat: c_long = 4000 + 289; -pub const SYS_mknodat: c_long = 4000 + 290; -pub const SYS_fchownat: c_long = 4000 + 291; -pub const SYS_futimesat: c_long = 4000 + 292; -pub const SYS_fstatat64: c_long = 4000 + 293; -pub const SYS_unlinkat: c_long = 4000 + 294; -pub const SYS_renameat: c_long = 4000 + 295; -pub const SYS_linkat: c_long = 4000 + 296; -pub const SYS_symlinkat: c_long = 4000 + 297; -pub const SYS_readlinkat: c_long = 4000 + 298; -pub const SYS_fchmodat: c_long = 4000 + 299; -pub const SYS_faccessat: c_long = 4000 + 300; -pub const SYS_pselect6: c_long = 4000 + 301; -pub const SYS_ppoll: c_long = 4000 + 302; -pub const SYS_unshare: c_long = 4000 + 303; -pub const SYS_splice: c_long = 4000 + 304; -pub const SYS_sync_file_range: c_long = 4000 + 305; -pub const SYS_tee: c_long = 4000 + 306; -pub const SYS_vmsplice: c_long = 4000 + 307; -pub const SYS_move_pages: c_long = 4000 + 308; -pub const SYS_set_robust_list: c_long = 4000 + 309; -pub const SYS_get_robust_list: c_long = 4000 + 310; -pub const SYS_kexec_load: c_long = 4000 + 311; -pub const SYS_getcpu: c_long = 4000 + 312; -pub const SYS_epoll_pwait: c_long = 4000 + 313; -pub const SYS_ioprio_set: c_long = 4000 + 314; -pub const SYS_ioprio_get: c_long = 4000 + 315; -pub const SYS_utimensat: c_long = 4000 + 316; -pub const SYS_signalfd: c_long = 4000 + 317; -pub const SYS_timerfd: c_long = 4000 + 318; -pub const SYS_eventfd: c_long = 4000 + 319; -pub const SYS_fallocate: c_long = 4000 + 320; -pub const SYS_timerfd_create: c_long = 4000 + 321; -pub const SYS_timerfd_gettime: c_long = 4000 + 322; -pub const SYS_timerfd_settime: c_long = 4000 + 323; -pub const SYS_signalfd4: c_long = 4000 + 324; -pub const SYS_eventfd2: c_long = 4000 + 325; -pub const SYS_epoll_create1: c_long = 4000 + 326; -pub const SYS_dup3: c_long = 4000 + 327; -pub const SYS_pipe2: c_long = 4000 + 328; -pub const SYS_inotify_init1: c_long = 4000 + 329; -pub const SYS_preadv: c_long = 4000 + 330; -pub const SYS_pwritev: c_long = 4000 + 331; -pub const SYS_rt_tgsigqueueinfo: c_long = 4000 + 332; -pub const SYS_perf_event_open: c_long = 4000 + 333; -pub const SYS_accept4: c_long = 4000 + 334; -pub const SYS_recvmmsg: c_long = 4000 + 335; -pub const SYS_fanotify_init: c_long = 4000 + 336; -pub const SYS_fanotify_mark: c_long = 4000 + 337; -pub const SYS_prlimit64: c_long = 4000 + 338; -pub const SYS_name_to_handle_at: c_long = 4000 + 339; -pub const SYS_open_by_handle_at: c_long = 4000 + 340; -pub const SYS_clock_adjtime: c_long = 4000 + 341; -pub const SYS_syncfs: c_long = 4000 + 342; -pub const SYS_sendmmsg: c_long = 4000 + 343; -pub const SYS_setns: c_long = 4000 + 344; -pub const SYS_process_vm_readv: c_long = 4000 + 345; -pub const SYS_process_vm_writev: c_long = 4000 + 346; -pub const SYS_kcmp: c_long = 4000 + 347; -pub const SYS_finit_module: c_long = 4000 + 348; -pub const SYS_sched_setattr: c_long = 4000 + 349; -pub const SYS_sched_getattr: c_long = 4000 + 350; -pub const SYS_renameat2: c_long = 4000 + 351; -pub const SYS_seccomp: c_long = 4000 + 352; -pub const SYS_getrandom: c_long = 4000 + 353; -pub const SYS_memfd_create: c_long = 4000 + 354; -pub const SYS_bpf: c_long = 4000 + 355; -pub const SYS_execveat: c_long = 4000 + 356; -pub const SYS_userfaultfd: c_long = 4000 + 357; -pub const SYS_membarrier: c_long = 4000 + 358; -pub const SYS_mlock2: c_long = 4000 + 359; -pub const SYS_copy_file_range: c_long = 4000 + 360; -pub const SYS_preadv2: c_long = 4000 + 361; -pub const SYS_pwritev2: c_long = 4000 + 362; -pub const SYS_pkey_mprotect: c_long = 4000 + 363; -pub const SYS_pkey_alloc: c_long = 4000 + 364; -pub const SYS_pkey_free: c_long = 4000 + 365; -pub const SYS_statx: c_long = 4000 + 366; -pub const SYS_pidfd_send_signal: c_long = 4000 + 424; -pub const SYS_io_uring_setup: c_long = 4000 + 425; -pub const SYS_io_uring_enter: c_long = 4000 + 426; -pub const SYS_io_uring_register: c_long = 4000 + 427; -pub const SYS_open_tree: c_long = 4000 + 428; -pub const SYS_move_mount: c_long = 4000 + 429; -pub const SYS_fsopen: c_long = 4000 + 430; -pub const SYS_fsconfig: c_long = 4000 + 431; -pub const SYS_fsmount: c_long = 4000 + 432; -pub const SYS_fspick: c_long = 4000 + 433; -pub const SYS_pidfd_open: c_long = 4000 + 434; -pub const SYS_clone3: c_long = 4000 + 435; -pub const SYS_close_range: c_long = 4000 + 436; -pub const SYS_openat2: c_long = 4000 + 437; -pub const SYS_pidfd_getfd: c_long = 4000 + 438; -pub const SYS_faccessat2: c_long = 4000 + 439; -pub const SYS_process_madvise: c_long = 4000 + 440; -pub const SYS_epoll_pwait2: c_long = 4000 + 441; -pub const SYS_mount_setattr: c_long = 4000 + 442; -pub const SYS_quotactl_fd: c_long = 4000 + 443; -pub const SYS_landlock_create_ruleset: c_long = 4000 + 444; -pub const SYS_landlock_add_rule: c_long = 4000 + 445; -pub const SYS_landlock_restrict_self: c_long = 4000 + 446; -pub const SYS_memfd_secret: c_long = 4000 + 447; -pub const SYS_process_mrelease: c_long = 4000 + 448; -pub const SYS_futex_waitv: c_long = 4000 + 449; -pub const SYS_set_mempolicy_home_node: c_long = 4000 + 450; - -#[link(name = "util")] -extern "C" { - pub fn sysctl( - name: *mut c_int, - namelen: c_int, - oldp: *mut c_void, - oldlenp: *mut size_t, - newp: *mut c_void, - newlen: size_t, - ) -> c_int; - pub fn glob64( - pattern: *const c_char, - flags: c_int, - errfunc: Option c_int>, - pglob: *mut glob64_t, - ) -> c_int; - pub fn globfree64(pglob: *mut glob64_t); - pub fn pthread_attr_getaffinity_np( - attr: *const crate::pthread_attr_t, - cpusetsize: size_t, - cpuset: *mut crate::cpu_set_t, - ) -> c_int; - pub fn pthread_attr_setaffinity_np( - attr: *mut crate::pthread_attr_t, - cpusetsize: size_t, - cpuset: *const crate::cpu_set_t, - ) -> c_int; -} diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips64/mod.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips64/mod.rs deleted file mode 100644 index 39eb0242730d84..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mips64/mod.rs +++ /dev/null @@ -1,204 +0,0 @@ -use crate::off64_t; -use crate::prelude::*; - -pub type blkcnt_t = i64; -pub type blksize_t = i64; -pub type fsblkcnt_t = c_ulong; -pub type fsfilcnt_t = c_ulong; -pub type ino_t = u64; -pub type nlink_t = u64; -pub type off_t = i64; -pub type suseconds_t = i64; -pub type time_t = i64; -pub type wchar_t = i32; - -s! { - pub struct stat { - pub st_dev: c_ulong, - st_pad1: [c_long; 2], - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: c_ulong, - st_pad2: [c_ulong; 1], - pub st_size: off_t, - st_pad3: c_long, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: crate::blksize_t, - st_pad4: c_long, - pub st_blocks: crate::blkcnt_t, - st_pad5: [c_long; 7], - } - - pub struct stat64 { - pub st_dev: c_ulong, - st_pad1: [c_long; 2], - pub st_ino: crate::ino64_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: c_ulong, - st_pad2: [c_long; 2], - pub st_size: off64_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: crate::blksize_t, - st_pad3: c_long, - pub st_blocks: crate::blkcnt64_t, - st_pad5: [c_long; 7], - } - - pub struct pthread_attr_t { - __size: [c_ulong; 7], - } - - pub struct sigaction { - pub sa_flags: c_int, - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: sigset_t, - _restorer: *mut c_void, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } - - pub struct sigset_t { - __size: [c_ulong; 16], - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_code: c_int, - pub si_errno: c_int, - _pad: c_int, - _pad2: [c_long; 14], - } - - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_uint, - pub __seq: c_ushort, - __pad1: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused4: c_ulong, - __unused5: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - pub msg_rtime: crate::time_t, - pub msg_ctime: crate::time_t, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __glibc_reserved4: c_ulong, - __glibc_reserved5: c_ulong, - } - - pub struct statfs { - pub f_type: c_long, - pub f_bsize: c_long, - pub f_frsize: c_long, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_files: crate::fsblkcnt_t, - pub f_ffree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_fsid: crate::fsid_t, - - pub f_namelen: c_long, - f_spare: [c_long; 6], - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: size_t, - pub msg_control: *mut c_void, - pub msg_controllen: size_t, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - pub cmsg_len: size_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; crate::NCCS], - } - - pub struct sysinfo { - pub uptime: c_long, - pub loads: [c_ulong; 3], - pub totalram: c_ulong, - pub freeram: c_ulong, - pub sharedram: c_ulong, - pub bufferram: c_ulong, - pub totalswap: c_ulong, - pub freeswap: c_ulong, - pub procs: c_ushort, - pub pad: c_ushort, - pub totalhigh: c_ulong, - pub freehigh: c_ulong, - pub mem_unit: c_uint, - pub _f: [c_char; 0], - } - - // FIXME(1.0): this is actually a union - #[cfg_attr(target_pointer_width = "32", repr(align(4)))] - #[cfg_attr(target_pointer_width = "64", repr(align(8)))] - pub struct sem_t { - __size: [c_char; 32], - } -} - -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; - -pub const SYS_gettid: c_long = 5178; // Valid for n64 diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mod.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mod.rs deleted file mode 100644 index 8d17aa8e98e9aa..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/uclibc/mips/mod.rs +++ /dev/null @@ -1,312 +0,0 @@ -use crate::prelude::*; - -pub type pthread_t = c_ulong; - -pub const SFD_CLOEXEC: c_int = 0x080000; - -pub const NCCS: usize = 32; - -pub const O_TRUNC: c_int = 512; - -pub const O_CLOEXEC: c_int = 0x80000; - -pub const EBFONT: c_int = 59; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENONET: c_int = 64; -pub const ENOPKG: c_int = 65; -pub const EREMOTE: c_int = 66; -pub const ENOLINK: c_int = 67; -pub const EADV: c_int = 68; -pub const ESRMNT: c_int = 69; -pub const ECOMM: c_int = 70; -pub const EPROTO: c_int = 71; -pub const EDOTDOT: c_int = 73; - -pub const SA_NODEFER: c_uint = 0x40000000; -pub const SA_RESETHAND: c_uint = 0x80000000; -pub const SA_RESTART: c_uint = 0x10000000; -pub const SA_NOCLDSTOP: c_uint = 0x00000001; - -pub const EPOLL_CLOEXEC: c_int = 0x80000; - -pub const EFD_CLOEXEC: c_int = 0x80000; - -pub const TMP_MAX: c_uint = 238328; -pub const _SC_2_C_VERSION: c_int = 96; -pub const O_ACCMODE: c_int = 3; -pub const O_DIRECT: c_int = 0x8000; -pub const O_DIRECTORY: c_int = 0x10000; -pub const O_NOFOLLOW: c_int = 0x20000; -pub const O_NOATIME: c_int = 0x40000; -pub const O_PATH: c_int = 0o010000000; - -pub const O_APPEND: c_int = 8; -pub const O_CREAT: c_int = 256; -pub const O_EXCL: c_int = 1024; -pub const O_NOCTTY: c_int = 2048; -pub const O_NONBLOCK: c_int = 128; -pub const O_SYNC: c_int = 0x10; -pub const O_RSYNC: c_int = 0x10; -pub const O_DSYNC: c_int = 0x10; -pub const O_FSYNC: c_int = 0x10; -pub const O_ASYNC: c_int = 0x1000; -pub const O_LARGEFILE: c_int = 0x2000; -pub const O_NDELAY: c_int = 0x80; - -pub const SOCK_NONBLOCK: c_int = 128; - -pub const EDEADLK: c_int = 45; -pub const ENAMETOOLONG: c_int = 78; -pub const ENOLCK: c_int = 46; -pub const ENOSYS: c_int = 89; -pub const ENOTEMPTY: c_int = 93; -pub const ELOOP: c_int = 90; -pub const ENOMSG: c_int = 35; -pub const EIDRM: c_int = 36; -pub const ECHRNG: c_int = 37; -pub const EL2NSYNC: c_int = 38; -pub const EL3HLT: c_int = 39; -pub const EL3RST: c_int = 40; -pub const ELNRNG: c_int = 41; -pub const EUNATCH: c_int = 42; -pub const ENOCSI: c_int = 43; -pub const EL2HLT: c_int = 44; -pub const EBADE: c_int = 50; -pub const EBADR: c_int = 51; -pub const EXFULL: c_int = 52; -pub const FFDLY: c_int = 0o0100000; -pub const ENOANO: c_int = 53; -pub const EBADRQC: c_int = 54; -pub const EBADSLT: c_int = 55; -pub const EMULTIHOP: c_int = 74; -pub const EOVERFLOW: c_int = 79; -pub const ENOTUNIQ: c_int = 80; -pub const EBADFD: c_int = 81; -pub const EBADMSG: c_int = 77; -pub const EREMCHG: c_int = 82; -pub const ELIBACC: c_int = 83; -pub const ELIBBAD: c_int = 84; -pub const ELIBSCN: c_int = 85; -pub const ELIBMAX: c_int = 86; -pub const ELIBEXEC: c_int = 87; -pub const EILSEQ: c_int = 88; -pub const ERESTART: c_int = 91; -pub const ESTRPIPE: c_int = 92; -pub const EUSERS: c_int = 94; -pub const ENOTSOCK: c_int = 95; -pub const EDESTADDRREQ: c_int = 96; -pub const EMSGSIZE: c_int = 97; -pub const EPROTOTYPE: c_int = 98; -pub const ENOPROTOOPT: c_int = 99; -pub const EPROTONOSUPPORT: c_int = 120; -pub const ESOCKTNOSUPPORT: c_int = 121; -pub const EOPNOTSUPP: c_int = 122; -pub const EPFNOSUPPORT: c_int = 123; -pub const EAFNOSUPPORT: c_int = 124; -pub const EADDRINUSE: c_int = 125; -pub const EADDRNOTAVAIL: c_int = 126; -pub const ENETDOWN: c_int = 127; -pub const ENETUNREACH: c_int = 128; -pub const ENETRESET: c_int = 129; -pub const ECONNABORTED: c_int = 130; -pub const ECONNRESET: c_int = 131; -pub const ENOBUFS: c_int = 132; -pub const EISCONN: c_int = 133; -pub const ENOTCONN: c_int = 134; -pub const ESHUTDOWN: c_int = 143; -pub const ETOOMANYREFS: c_int = 144; -pub const ETIMEDOUT: c_int = 145; -pub const ECONNREFUSED: c_int = 146; -pub const EHOSTDOWN: c_int = 147; -pub const EHOSTUNREACH: c_int = 148; -pub const EALREADY: c_int = 149; -pub const EINPROGRESS: c_int = 150; -pub const ESTALE: c_int = 151; -pub const EUCLEAN: c_int = 135; -pub const ENOTNAM: c_int = 137; -pub const ENAVAIL: c_int = 138; -pub const EISNAM: c_int = 139; -pub const EREMOTEIO: c_int = 140; -pub const EDQUOT: c_int = 1133; -pub const ENOMEDIUM: c_int = 159; -pub const EMEDIUMTYPE: c_int = 160; -pub const ECANCELED: c_int = 158; -pub const ENOKEY: c_int = 161; -pub const EKEYEXPIRED: c_int = 162; -pub const EKEYREVOKED: c_int = 163; -pub const EKEYREJECTED: c_int = 164; -pub const EOWNERDEAD: c_int = 165; -pub const ENOTRECOVERABLE: c_int = 166; -pub const ERFKILL: c_int = 167; - -pub const MAP_NORESERVE: c_int = 0x400; -pub const MAP_ANON: c_int = 0x800; -pub const MAP_ANONYMOUS: c_int = 0x800; -pub const MAP_GROWSDOWN: c_int = 0x1000; -pub const MAP_DENYWRITE: c_int = 0x2000; -pub const MAP_EXECUTABLE: c_int = 0x4000; -pub const MAP_LOCKED: c_int = 0x8000; -pub const MAP_POPULATE: c_int = 0x10000; -pub const MAP_NONBLOCK: c_int = 0x20000; -pub const MAP_STACK: c_int = 0x40000; - -pub const NLDLY: crate::tcflag_t = 0o0000400; - -pub const SOCK_STREAM: c_int = 2; -pub const SOCK_DGRAM: c_int = 1; -pub const SOCK_SEQPACKET: c_int = 5; - -pub const SA_ONSTACK: c_uint = 0x08000000; -pub const SA_SIGINFO: c_uint = 0x00000008; -pub const SA_NOCLDWAIT: c_int = 0x00010000; - -pub const SIGEMT: c_int = 7; -pub const SIGCHLD: c_int = 18; -pub const SIGBUS: c_int = 10; -pub const SIGTTIN: c_int = 26; -pub const SIGTTOU: c_int = 27; -pub const SIGXCPU: c_int = 30; -pub const SIGXFSZ: c_int = 31; -pub const SIGVTALRM: c_int = 28; -pub const SIGPROF: c_int = 29; -pub const SIGWINCH: c_int = 20; -pub const SIGUSR1: c_int = 16; -pub const SIGUSR2: c_int = 17; -pub const SIGCONT: c_int = 25; -pub const SIGSTOP: c_int = 23; -pub const SIGTSTP: c_int = 24; -pub const SIGURG: c_int = 21; -pub const SIGIO: c_int = 22; -pub const SIGSYS: c_int = 12; -pub const SIGPWR: c_int = 19; -pub const SIG_SETMASK: c_int = 3; -pub const SIG_BLOCK: c_int = 0x1; -pub const SIG_UNBLOCK: c_int = 0x2; - -pub const POLLWRNORM: c_short = 0x004; -pub const POLLWRBAND: c_short = 0x100; - -pub const PTHREAD_STACK_MIN: size_t = 16384; - -pub const VEOF: usize = 16; -pub const VEOL: usize = 17; -pub const VEOL2: usize = 6; -pub const VMIN: usize = 4; -pub const IEXTEN: crate::tcflag_t = 0x00000100; -pub const TOSTOP: crate::tcflag_t = 0x00008000; -pub const FLUSHO: crate::tcflag_t = 0x00002000; -pub const TCSANOW: c_int = 0x540e; -pub const TCSADRAIN: c_int = 0x540f; -pub const TCSAFLUSH: c_int = 0x5410; - -pub const CPU_SETSIZE: c_int = 0x400; - -pub const EFD_NONBLOCK: c_int = 0x80; - -pub const F_GETLK: c_int = 14; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; - -pub const SFD_NONBLOCK: c_int = 0x80; - -pub const RTLD_GLOBAL: c_int = 0x4; - -pub const SIGSTKSZ: size_t = 8192; -pub const CBAUD: crate::tcflag_t = 0o0010017; -pub const CBAUDEX: crate::tcflag_t = 0o0010000; -pub const CIBAUD: crate::tcflag_t = 0o002003600000; -pub const TAB1: crate::tcflag_t = 0x00000800; -pub const TAB2: crate::tcflag_t = 0x00001000; -pub const TAB3: crate::tcflag_t = 0x00001800; -pub const TABDLY: crate::tcflag_t = 0o0014000; -pub const CR1: crate::tcflag_t = 0x00000200; -pub const CR2: crate::tcflag_t = 0x00000400; -pub const CR3: crate::tcflag_t = 0x00000600; -pub const FF1: crate::tcflag_t = 0x00008000; -pub const BS1: crate::tcflag_t = 0x00002000; -pub const BSDLY: crate::tcflag_t = 0o0020000; -pub const VT1: crate::tcflag_t = 0x00004000; -pub const VWERASE: usize = 14; -pub const XTABS: crate::tcflag_t = 0o0014000; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSWTC: usize = 7; -pub const VTDLY: c_int = 0o0040000; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const OLCUC: crate::tcflag_t = 0o0000002; -pub const ONLCR: crate::tcflag_t = 0x4; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x00000010; -pub const CS7: crate::tcflag_t = 0x00000020; -pub const CS8: crate::tcflag_t = 0x00000030; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CRDLY: c_int = 0o0003000; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOPRT: crate::tcflag_t = 0x00000400; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const PENDIN: crate::tcflag_t = 0x00004000; -pub const NOFLSH: crate::tcflag_t = 0x00000080; - -pub const MAP_HUGETLB: c_int = 0x80000; - -pub const B0: crate::speed_t = 0o000000; -pub const B50: crate::speed_t = 0o000001; -pub const B75: crate::speed_t = 0o000002; -pub const B110: crate::speed_t = 0o000003; -pub const B134: crate::speed_t = 0o000004; -pub const B150: crate::speed_t = 0o000005; -pub const B200: crate::speed_t = 0o000006; -pub const B300: crate::speed_t = 0o000007; -pub const B600: crate::speed_t = 0o000010; -pub const B1200: crate::speed_t = 0o000011; -pub const B1800: crate::speed_t = 0o000012; -pub const B2400: crate::speed_t = 0o000013; -pub const B4800: crate::speed_t = 0o000014; -pub const B9600: crate::speed_t = 0o000015; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; -pub const B57600: crate::speed_t = 0o010001; -pub const B115200: crate::speed_t = 0o010002; -pub const B230400: crate::speed_t = 0o010003; -pub const B460800: crate::speed_t = 0o010004; -pub const B500000: crate::speed_t = 0o010005; -pub const B576000: crate::speed_t = 0o010006; -pub const B921600: crate::speed_t = 0o010007; -pub const B1000000: crate::speed_t = 0o010010; -pub const B1152000: crate::speed_t = 0o010011; -pub const B1500000: crate::speed_t = 0o010012; -pub const B2000000: crate::speed_t = 0o010013; -pub const B2500000: crate::speed_t = 0o010014; -pub const B3000000: crate::speed_t = 0o010015; -pub const B3500000: crate::speed_t = 0o010016; -pub const B4000000: crate::speed_t = 0o010017; - -cfg_if! { - if #[cfg(target_arch = "mips")] { - mod mips32; - pub use self::mips32::*; - } else if #[cfg(target_arch = "mips64")] { - mod mips64; - pub use self::mips64::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/mod.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/mod.rs deleted file mode 100644 index 4fef82ed8e1671..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/uclibc/mod.rs +++ /dev/null @@ -1,517 +0,0 @@ -// FIXME(ulibc): this module has definitions that are redundant with the parent -#![allow(dead_code)] - -use crate::off64_t; -use crate::prelude::*; - -pub type shmatt_t = c_ulong; -pub type msgqnum_t = c_ulong; -pub type msglen_t = c_ulong; -pub type regoff_t = c_int; -pub type rlim_t = c_ulong; -pub type __rlimit_resource_t = c_ulong; -pub type __priority_which_t = c_uint; - -cfg_if! { - if #[cfg(doc)] { - // Used in `linux::arch` to define ioctl constants. - pub(crate) type Ioctl = c_ulong; - } else { - #[doc(hidden)] - pub type Ioctl = c_ulong; - } -} - -s! { - pub struct statvfs { - // Different than GNU! - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - #[cfg(target_endian = "little")] - pub f_fsid: c_ulong, - #[cfg(target_pointer_width = "32")] - __f_unused: c_int, - #[cfg(target_endian = "big")] - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct regex_t { - __buffer: *mut c_void, - __allocated: size_t, - __used: size_t, - __syntax: c_ulong, - __fastmap: *mut c_char, - __translate: *mut c_char, - __re_nsub: size_t, - __bitfield: u8, - } - - pub struct rtentry { - pub rt_pad1: c_ulong, - pub rt_dst: crate::sockaddr, - pub rt_gateway: crate::sockaddr, - pub rt_genmask: crate::sockaddr, - pub rt_flags: c_ushort, - pub rt_pad2: c_short, - pub rt_pad3: c_ulong, - pub rt_tos: c_uchar, - pub rt_class: c_uchar, - #[cfg(target_pointer_width = "64")] - pub rt_pad4: [c_short; 3usize], - #[cfg(not(target_pointer_width = "64"))] - pub rt_pad4: c_short, - pub rt_metric: c_short, - pub rt_dev: *mut c_char, - pub rt_mtu: c_ulong, - pub rt_window: c_ulong, - pub rt_irtt: c_ushort, - } - - pub struct __exit_status { - pub e_termination: c_short, - pub e_exit: c_short, - } - - pub struct ptrace_peeksiginfo_args { - pub off: crate::__u64, - pub flags: crate::__u32, - pub nr: crate::__s32, - } - - #[cfg_attr( - any( - target_pointer_width = "32", - target_arch = "x86_64", - target_arch = "powerpc64", - target_arch = "mips64", - target_arch = "s390x", - target_arch = "sparc64" - ), - repr(align(4)) - )] - #[cfg_attr( - not(any( - target_pointer_width = "32", - target_arch = "x86_64", - target_arch = "powerpc64", - target_arch = "mips64", - target_arch = "s390x", - target_arch = "sparc64" - )), - repr(align(8)) - )] - pub struct pthread_mutexattr_t { - size: [u8; crate::__SIZEOF_PTHREAD_MUTEXATTR_T], - } - - #[repr(align(4))] - pub struct pthread_condattr_t { - size: [u8; crate::__SIZEOF_PTHREAD_CONDATTR_T], - } - - pub struct tcp_info { - pub tcpi_state: u8, - pub tcpi_ca_state: u8, - pub tcpi_retransmits: u8, - pub tcpi_probes: u8, - pub tcpi_backoff: u8, - pub tcpi_options: u8, - /// This contains the bitfields `tcpi_snd_wscale` and `tcpi_rcv_wscale`. - /// Each is 4 bits. - pub tcpi_snd_rcv_wscale: u8, - pub tcpi_rto: u32, - pub tcpi_ato: u32, - pub tcpi_snd_mss: u32, - pub tcpi_rcv_mss: u32, - pub tcpi_unacked: u32, - pub tcpi_sacked: u32, - pub tcpi_lost: u32, - pub tcpi_retrans: u32, - pub tcpi_fackets: u32, - pub tcpi_last_data_sent: u32, - pub tcpi_last_ack_sent: u32, - pub tcpi_last_data_recv: u32, - pub tcpi_last_ack_recv: u32, - pub tcpi_pmtu: u32, - pub tcpi_rcv_ssthresh: u32, - pub tcpi_rtt: u32, - pub tcpi_rttvar: u32, - pub tcpi_snd_ssthresh: u32, - pub tcpi_snd_cwnd: u32, - pub tcpi_advmss: u32, - pub tcpi_reordering: u32, - pub tcpi_rcv_rtt: u32, - pub tcpi_rcv_space: u32, - pub tcpi_total_retrans: u32, - } -} - -impl siginfo_t { - pub unsafe fn si_addr(&self) -> *mut c_void { - #[repr(C)] - struct siginfo_sigfault { - _si_signo: c_int, - _si_errno: c_int, - _si_code: c_int, - si_addr: *mut c_void, - } - (*(self as *const siginfo_t as *const siginfo_sigfault)).si_addr - } - - pub unsafe fn si_value(&self) -> crate::sigval { - #[repr(C)] - struct siginfo_si_value { - _si_signo: c_int, - _si_errno: c_int, - _si_code: c_int, - _si_timerid: c_int, - _si_overrun: c_int, - si_value: crate::sigval, - } - (*(self as *const siginfo_t as *const siginfo_si_value)).si_value - } -} - -// Internal, for casts to access union fields -#[repr(C)] -struct sifields_sigchld { - si_pid: crate::pid_t, - si_uid: crate::uid_t, - si_status: c_int, - si_utime: c_long, - si_stime: c_long, -} -impl Copy for sifields_sigchld {} -impl Clone for sifields_sigchld { - fn clone(&self) -> sifields_sigchld { - *self - } -} - -// Internal, for casts to access union fields -#[repr(C)] -union sifields { - _align_pointer: *mut c_void, - sigchld: sifields_sigchld, -} - -// Internal, for casts to access union fields. Note that some variants -// of sifields start with a pointer, which makes the alignment of -// sifields vary on 32-bit and 64-bit architectures. -#[repr(C)] -struct siginfo_f { - _siginfo_base: [c_int; 3], - sifields: sifields, -} - -impl siginfo_t { - unsafe fn sifields(&self) -> &sifields { - &(*(self as *const siginfo_t as *const siginfo_f)).sifields - } - - pub unsafe fn si_pid(&self) -> crate::pid_t { - self.sifields().sigchld.si_pid - } - - pub unsafe fn si_uid(&self) -> crate::uid_t { - self.sifields().sigchld.si_uid - } - - pub unsafe fn si_status(&self) -> c_int { - self.sifields().sigchld.si_status - } - - pub unsafe fn si_utime(&self) -> c_long { - self.sifields().sigchld.si_utime - } - - pub unsafe fn si_stime(&self) -> c_long { - self.sifields().sigchld.si_stime - } -} - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; -pub const MCL_ONFAULT: c_int = 0x0004; - -pub const SIGEV_THREAD_ID: c_int = 4; - -pub const AF_VSOCK: c_int = 40; - -// Most `*_SUPER_MAGIC` constants are defined at the `linux_like` level; the -// following are only available on newer Linux versions than the versions -// currently used in CI in some configurations, so we define them here. -pub const BINDERFS_SUPER_MAGIC: c_long = 0x6c6f6f70; -pub const XFS_SUPER_MAGIC: c_long = 0x58465342; - -pub const PTRACE_TRACEME: c_int = 0; -pub const PTRACE_PEEKTEXT: c_int = 1; -pub const PTRACE_PEEKDATA: c_int = 2; -pub const PTRACE_PEEKUSER: c_int = 3; -pub const PTRACE_POKETEXT: c_int = 4; -pub const PTRACE_POKEDATA: c_int = 5; -pub const PTRACE_POKEUSER: c_int = 6; -pub const PTRACE_CONT: c_int = 7; -pub const PTRACE_KILL: c_int = 8; -pub const PTRACE_SINGLESTEP: c_int = 9; -pub const PTRACE_GETREGS: c_int = 12; -pub const PTRACE_SETREGS: c_int = 13; -pub const PTRACE_GETFPREGS: c_int = 14; -pub const PTRACE_SETFPREGS: c_int = 15; -pub const PTRACE_ATTACH: c_int = 16; -pub const PTRACE_DETACH: c_int = 17; -pub const PTRACE_GETFPXREGS: c_int = 18; -pub const PTRACE_SETFPXREGS: c_int = 19; -pub const PTRACE_SYSCALL: c_int = 24; -pub const PTRACE_SETOPTIONS: c_int = 0x4200; -pub const PTRACE_GETEVENTMSG: c_int = 0x4201; -pub const PTRACE_GETSIGINFO: c_int = 0x4202; -pub const PTRACE_SETSIGINFO: c_int = 0x4203; -pub const PTRACE_GETREGSET: c_int = 0x4204; -pub const PTRACE_SETREGSET: c_int = 0x4205; -pub const PTRACE_SEIZE: c_int = 0x4206; -pub const PTRACE_INTERRUPT: c_int = 0x4207; -pub const PTRACE_LISTEN: c_int = 0x4208; - -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; - -// These are different than GNU! -pub const LC_CTYPE: c_int = 0; -pub const LC_NUMERIC: c_int = 1; -pub const LC_TIME: c_int = 3; -pub const LC_COLLATE: c_int = 4; -pub const LC_MONETARY: c_int = 2; -pub const LC_MESSAGES: c_int = 5; -pub const LC_ALL: c_int = 6; -// end different section - -// MS_ flags for mount(2) -pub const MS_RMT_MASK: c_ulong = - crate::MS_RDONLY | crate::MS_SYNCHRONOUS | crate::MS_MANDLOCK | crate::MS_I_VERSION; - -pub const ENOTSUP: c_int = EOPNOTSUPP; - -pub const IPV6_JOIN_GROUP: c_int = 20; -pub const IPV6_LEAVE_GROUP: c_int = 21; - -// These are different from GNU -pub const ABDAY_1: crate::nl_item = 0x300; -pub const ABDAY_2: crate::nl_item = 0x301; -pub const ABDAY_3: crate::nl_item = 0x302; -pub const ABDAY_4: crate::nl_item = 0x303; -pub const ABDAY_5: crate::nl_item = 0x304; -pub const ABDAY_6: crate::nl_item = 0x305; -pub const ABDAY_7: crate::nl_item = 0x306; -pub const DAY_1: crate::nl_item = 0x307; -pub const DAY_2: crate::nl_item = 0x308; -pub const DAY_3: crate::nl_item = 0x309; -pub const DAY_4: crate::nl_item = 0x30A; -pub const DAY_5: crate::nl_item = 0x30B; -pub const DAY_6: crate::nl_item = 0x30C; -pub const DAY_7: crate::nl_item = 0x30D; -pub const ABMON_1: crate::nl_item = 0x30E; -pub const ABMON_2: crate::nl_item = 0x30F; -pub const ABMON_3: crate::nl_item = 0x310; -pub const ABMON_4: crate::nl_item = 0x311; -pub const ABMON_5: crate::nl_item = 0x312; -pub const ABMON_6: crate::nl_item = 0x313; -pub const ABMON_7: crate::nl_item = 0x314; -pub const ABMON_8: crate::nl_item = 0x315; -pub const ABMON_9: crate::nl_item = 0x316; -pub const ABMON_10: crate::nl_item = 0x317; -pub const ABMON_11: crate::nl_item = 0x318; -pub const ABMON_12: crate::nl_item = 0x319; -pub const MON_1: crate::nl_item = 0x31A; -pub const MON_2: crate::nl_item = 0x31B; -pub const MON_3: crate::nl_item = 0x31C; -pub const MON_4: crate::nl_item = 0x31D; -pub const MON_5: crate::nl_item = 0x31E; -pub const MON_6: crate::nl_item = 0x31F; -pub const MON_7: crate::nl_item = 0x320; -pub const MON_8: crate::nl_item = 0x321; -pub const MON_9: crate::nl_item = 0x322; -pub const MON_10: crate::nl_item = 0x323; -pub const MON_11: crate::nl_item = 0x324; -pub const MON_12: crate::nl_item = 0x325; -pub const AM_STR: crate::nl_item = 0x326; -pub const PM_STR: crate::nl_item = 0x327; -pub const D_T_FMT: crate::nl_item = 0x328; -pub const D_FMT: crate::nl_item = 0x329; -pub const T_FMT: crate::nl_item = 0x32A; -pub const T_FMT_AMPM: crate::nl_item = 0x32B; -pub const ERA: crate::nl_item = 0x32C; -pub const ERA_D_FMT: crate::nl_item = 0x32E; -pub const ALT_DIGITS: crate::nl_item = 0x32F; -pub const ERA_D_T_FMT: crate::nl_item = 0x330; -pub const ERA_T_FMT: crate::nl_item = 0x331; -pub const CODESET: crate::nl_item = 10; -pub const CRNCYSTR: crate::nl_item = 0x215; -pub const RADIXCHAR: crate::nl_item = 0x100; -pub const THOUSEP: crate::nl_item = 0x101; -pub const NOEXPR: crate::nl_item = 0x501; -pub const YESSTR: crate::nl_item = 0x502; -pub const NOSTR: crate::nl_item = 0x503; - -// Different than Gnu. -pub const FILENAME_MAX: c_uint = 4095; - -pub const PRIO_PROCESS: c_int = 0; -pub const PRIO_PGRP: c_int = 1; -pub const PRIO_USER: c_int = 2; - -pub const SOMAXCONN: c_int = 128; - -pub const ST_RELATIME: c_ulong = 4096; - -pub const AF_NFC: c_int = PF_NFC; -pub const BUFSIZ: c_int = 4096; -pub const EDEADLOCK: c_int = EDEADLK; -pub const EXTA: c_uint = B19200; -pub const EXTB: c_uint = B38400; -pub const EXTPROC: crate::tcflag_t = 0o200000; -pub const FOPEN_MAX: c_int = 16; -pub const F_GETOWN: c_int = 9; -pub const F_OFD_GETLK: c_int = 36; -pub const F_OFD_SETLK: c_int = 37; -pub const F_OFD_SETLKW: c_int = 38; -pub const F_RDLCK: c_int = 0; -pub const F_SETOWN: c_int = 8; -pub const F_UNLCK: c_int = 2; -pub const F_WRLCK: c_int = 1; -pub const IPV6_MULTICAST_ALL: c_int = 29; -pub const IPV6_ROUTER_ALERT_ISOLATE: c_int = 30; -pub const MAP_HUGE_SHIFT: c_int = 26; -pub const MAP_HUGE_MASK: c_int = 0x3f; -pub const MAP_HUGE_64KB: c_int = 16 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_512KB: c_int = 19 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_1MB: c_int = 20 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_2MB: c_int = 21 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_8MB: c_int = 23 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_16MB: c_int = 24 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_32MB: c_int = 25 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_256MB: c_int = 28 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_512MB: c_int = 29 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_1GB: c_int = 30 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_2GB: c_int = 31 << MAP_HUGE_SHIFT; -pub const MAP_HUGE_16GB: c_int = 34 << MAP_HUGE_SHIFT; -pub const MINSIGSTKSZ: c_int = 2048; -pub const MSG_COPY: c_int = 0o40000; -pub const NI_MAXHOST: crate::socklen_t = 1025; -pub const O_TMPFILE: c_int = 0o20000000 | O_DIRECTORY; -pub const PACKET_MR_UNICAST: c_int = 3; -pub const PF_NFC: c_int = 39; -pub const PF_VSOCK: c_int = 40; -pub const POSIX_MADV_DONTNEED: c_int = 4; -pub const PTRACE_EVENT_STOP: c_int = 128; -pub const PTRACE_GETSIGMASK: c_uint = 0x420a; -pub const PTRACE_PEEKSIGINFO: c_int = 0x4209; -pub const PTRACE_SETSIGMASK: c_uint = 0x420b; -pub const RTLD_NOLOAD: c_int = 0x00004; -pub const RUSAGE_THREAD: c_int = 1; -pub const SHM_EXEC: c_int = 0o100000; -pub const SIGPOLL: c_int = SIGIO; -pub const SOCK_DCCP: c_int = 6; -#[deprecated(since = "0.2.70", note = "AF_PACKET must be used instead")] -pub const SOCK_PACKET: c_int = 10; -pub const TCP_COOKIE_TRANSACTIONS: c_int = 15; -pub const UDP_GRO: c_int = 104; -pub const UDP_SEGMENT: c_int = 103; -pub const YESEXPR: c_int = ((5) << 8) | (0); - -extern "C" { - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut crate::timezone) -> c_int; - - pub fn pthread_rwlockattr_getkind_np( - attr: *const crate::pthread_rwlockattr_t, - val: *mut c_int, - ) -> c_int; - pub fn pthread_rwlockattr_setkind_np( - attr: *mut crate::pthread_rwlockattr_t, - val: c_int, - ) -> c_int; - - pub fn ptrace(request: c_uint, ...) -> c_long; - - pub fn sendmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: c_uint, - flags: c_int, - ) -> c_int; - pub fn recvmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: c_uint, - flags: c_int, - timeout: *mut crate::timespec, - ) -> c_int; - - pub fn openpty( - amaster: *mut c_int, - aslave: *mut c_int, - name: *mut c_char, - termp: *mut termios, - winp: *mut crate::winsize, - ) -> c_int; - pub fn forkpty( - amaster: *mut c_int, - name: *mut c_char, - termp: *mut termios, - winp: *mut crate::winsize, - ) -> crate::pid_t; - - pub fn getnameinfo( - sa: *const crate::sockaddr, - salen: crate::socklen_t, - host: *mut c_char, - hostlen: crate::socklen_t, - serv: *mut c_char, - servlen: crate::socklen_t, - flags: c_int, - ) -> c_int; - - pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off64_t) -> ssize_t; - pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off64_t) -> ssize_t; - - pub fn sethostid(hostid: c_long) -> c_int; - pub fn fanotify_mark( - fd: c_int, - flags: c_uint, - mask: u64, - dirfd: c_int, - path: *const c_char, - ) -> c_int; - pub fn getrlimit64(resource: crate::__rlimit_resource_t, rlim: *mut crate::rlimit64) -> c_int; - pub fn setrlimit64(resource: crate::__rlimit_resource_t, rlim: *const crate::rlimit64) - -> c_int; - pub fn getrlimit(resource: crate::__rlimit_resource_t, rlim: *mut crate::rlimit) -> c_int; - pub fn setrlimit(resource: crate::__rlimit_resource_t, rlim: *const crate::rlimit) -> c_int; - pub fn getpriority(which: crate::__priority_which_t, who: crate::id_t) -> c_int; - pub fn setpriority(which: crate::__priority_which_t, who: crate::id_t, prio: c_int) -> c_int; - pub fn getauxval(type_: c_ulong) -> c_ulong; -} - -cfg_if! { - if #[cfg(any(target_arch = "mips", target_arch = "mips64"))] { - mod mips; - pub use self::mips::*; - } else if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } else if #[cfg(target_arch = "arm")] { - mod arm; - pub use self::arm::*; - } else { - pub use unsupported_target; - } -} diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/l4re.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/l4re.rs deleted file mode 100644 index 536c716ca48682..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/l4re.rs +++ /dev/null @@ -1,53 +0,0 @@ -use crate::prelude::*; - -/// L4Re specifics -/// This module contains definitions required by various L4Re libc backends. -/// Some of them are formally not part of the libc, but are a dependency of the -/// libc and hence we should provide them here. - -pub type l4_umword_t = c_ulong; // Unsigned machine word. -pub type pthread_t = *mut c_void; - -s! { - /// CPU sets. - pub struct l4_sched_cpu_set_t { - // from the L4Re docs - /// Combination of granularity and offset. - /// - /// The granularity defines how many CPUs each bit in map describes. - /// The offset is the number of the first CPU described by the first - /// bit in the bitmap. - /// offset must be a multiple of 2^graularity. - /// - /// | MSB | LSB | - /// | ---------------- | ------------------- | - /// | 8bit granularity | 24bit offset .. | - gran_offset: l4_umword_t, - /// Bitmap of CPUs. - map: l4_umword_t, - } - - pub struct pthread_attr_t { - pub __detachstate: c_int, - pub __schedpolicy: c_int, - pub __schedparam: super::__sched_param, - pub __inheritsched: c_int, - pub __scope: c_int, - pub __guardsize: size_t, - pub __stackaddr_set: c_int, - pub __stackaddr: *mut c_void, // better don't use it - pub __stacksize: size_t, - // L4Re specifics - pub affinity: l4_sched_cpu_set_t, - pub create_flags: c_uint, - } -} - -// L4Re requires a min stack size of 64k; that isn't defined in uClibc, but -// somewhere in the core libraries. uClibc wants 16k, but that's not enough. -pub const PTHREAD_STACK_MIN: usize = 65536; - -// Misc other constants required for building. -pub const SIGIO: c_int = 29; -pub const B19200: crate::speed_t = 0o000016; -pub const B38400: crate::speed_t = 0o000017; diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/mod.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/mod.rs deleted file mode 100644 index 1a2e4bcc1a897f..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/mod.rs +++ /dev/null @@ -1,355 +0,0 @@ -//! Definitions for uclibc on 64bit systems - -use crate::off64_t; -use crate::prelude::*; - -pub type blkcnt_t = i64; -pub type blksize_t = i64; -pub type clock_t = i64; -pub type fsblkcnt_t = c_ulong; -pub type fsfilcnt_t = c_ulong; -pub type fsword_t = c_long; -pub type ino_t = c_ulong; -pub type nlink_t = c_uint; -pub type off_t = c_long; -// [uClibc docs] Note stat64 has the same shape as stat for x86-64. -pub type stat64 = stat; -pub type suseconds_t = c_long; -pub type time_t = c_int; -pub type wchar_t = c_int; - -pub type fsblkcnt64_t = u64; -pub type fsfilcnt64_t = u64; -pub type __u64 = c_ulong; -pub type __s64 = c_long; - -s! { - pub struct ipc_perm { - pub __key: crate::key_t, - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: c_ushort, // read / write - __pad1: c_ushort, - pub __seq: c_ushort, - __pad2: c_ushort, - __unused1: c_ulong, - __unused2: c_ulong, - } - - #[cfg(not(target_os = "l4re"))] - pub struct pthread_attr_t { - __detachstate: c_int, - __schedpolicy: c_int, - __schedparam: __sched_param, - __inheritsched: c_int, - __scope: c_int, - __guardsize: size_t, - __stackaddr_set: c_int, - __stackaddr: *mut c_void, // better don't use it - __stacksize: size_t, - } - - pub struct __sched_param { - __sched_priority: c_int, - } - - pub struct siginfo_t { - si_signo: c_int, // signal number - si_errno: c_int, // if not zero: error value of signal, see errno.h - si_code: c_int, // signal code - pub _pad: [c_int; 28], // unported union - _align: [usize; 0], - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, // segment size in bytes - pub shm_atime: crate::time_t, // time of last shmat() - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_cpid: crate::pid_t, - pub shm_lpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - __unused1: c_ulong, - __unused2: c_ulong, - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_stime: crate::time_t, - pub msg_rtime: crate::time_t, - pub msg_ctime: crate::time_t, - pub __msg_cbytes: c_ulong, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - __ignored1: c_ulong, - __ignored2: c_ulong, - } - - pub struct sockaddr { - pub sa_family: crate::sa_family_t, - pub sa_data: [c_char; 14], - } - - pub struct sockaddr_in { - pub sin_family: crate::sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [u8; 8], - } - - pub struct sockaddr_in6 { - pub sin6_family: crate::sa_family_t, - pub sin6_port: crate::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: u32, - } - - /* ------------------------------------------------------------ - * definitions below are *unverified* and might **break** the software - */ - - // pub struct in_addr { - // pub s_addr: in_addr_t, - // } - // - // pub struct in6_addr { - // pub s6_addr: [u8; 16], - // } - - pub struct stat { - pub st_dev: c_ulong, - pub st_ino: crate::ino_t, - // According to uclibc/libc/sysdeps/linux/x86_64/bits/stat.h, order of - // nlink and mode are swapped on 64 bit systems. - pub st_nlink: crate::nlink_t, - pub st_mode: crate::mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: c_ulong, // dev_t - pub st_size: off_t, // file size - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_ulong, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_ulong, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_ulong, - st_pad4: [c_long; 3], - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_handler: crate::sighandler_t, - pub sa_flags: c_ulong, - pub sa_restorer: Option, - pub sa_mask: crate::sigset_t, - } - - pub struct stack_t { - // FIXME(ulibc) - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, - } - - pub struct statfs { - // FIXME(ulibc) - pub f_type: fsword_t, - pub f_bsize: fsword_t, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: fsword_t, - pub f_frsize: fsword_t, - f_spare: [fsword_t; 5], - } - - pub struct statfs64 { - pub f_type: c_int, - pub f_bsize: c_int, - pub f_blocks: crate::fsblkcnt64_t, - pub f_bfree: crate::fsblkcnt64_t, - pub f_bavail: crate::fsblkcnt64_t, - pub f_files: crate::fsfilcnt64_t, - pub f_ffree: crate::fsfilcnt64_t, - pub f_fsid: crate::fsid_t, - pub f_namelen: c_int, - pub f_frsize: c_int, - pub f_flags: c_int, - pub f_spare: [c_int; 4], - } - - pub struct statvfs64 { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, - pub f_files: u64, - pub f_ffree: u64, - pub f_favail: u64, - pub f_fsid: c_ulong, - __f_unused: c_int, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - __f_spare: [c_int; 6], - } - - pub struct msghdr { - // FIXME(ulibc) - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: size_t, - pub msg_control: *mut c_void, - pub msg_controllen: size_t, - pub msg_flags: c_int, - } - - pub struct termios { - // FIXME(ulibc) - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; crate::NCCS], - } - - pub struct sigset_t { - // FIXME(ulibc) - __val: [c_ulong; 16], - } - - pub struct sysinfo { - // FIXME(ulibc) - pub uptime: c_long, - pub loads: [c_ulong; 3], - pub totalram: c_ulong, - pub freeram: c_ulong, - pub sharedram: c_ulong, - pub bufferram: c_ulong, - pub totalswap: c_ulong, - pub freeswap: c_ulong, - pub procs: c_ushort, - pub pad: c_ushort, - pub totalhigh: c_ulong, - pub freehigh: c_ulong, - pub mem_unit: c_uint, - pub _f: [c_char; 0], - } - - pub struct glob_t { - // FIXME(ulibc) - pub gl_pathc: size_t, - pub gl_pathv: *mut *mut c_char, - pub gl_offs: size_t, - pub gl_flags: c_int, - __unused1: *mut c_void, - __unused2: *mut c_void, - __unused3: *mut c_void, - __unused4: *mut c_void, - __unused5: *mut c_void, - } - - pub struct cpu_set_t { - // FIXME(ulibc) - #[cfg(target_pointer_width = "32")] - bits: [u32; 32], - #[cfg(target_pointer_width = "64")] - bits: [u64; 16], - } - - pub struct fsid_t { - // FIXME(ulibc) - __val: [c_int; 2], - } - - // FIXME(1.0): this is actually a union - pub struct sem_t { - #[cfg(target_pointer_width = "32")] - __size: [c_char; 16], - #[cfg(target_pointer_width = "64")] - __size: [c_char; 32], - __align: [c_long; 0], - } - - pub struct cmsghdr { - pub cmsg_len: size_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } -} - -s_no_extra_traits! { - pub struct dirent { - pub d_ino: crate::ino64_t, - pub d_off: off64_t, - pub d_reclen: u16, - pub d_type: u8, - pub d_name: [c_char; 256], - } -} - -// constants -pub const ENAMETOOLONG: c_int = 36; // File name too long -pub const ENOTEMPTY: c_int = 39; // Directory not empty -pub const ELOOP: c_int = 40; // Too many symbolic links encountered -pub const EADDRINUSE: c_int = 98; // Address already in use -pub const EADDRNOTAVAIL: c_int = 99; // Cannot assign requested address -pub const ENETDOWN: c_int = 100; // Network is down -pub const ENETUNREACH: c_int = 101; // Network is unreachable -pub const ECONNABORTED: c_int = 103; // Software caused connection abort -pub const ECONNREFUSED: c_int = 111; // Connection refused -pub const ECONNRESET: c_int = 104; // Connection reset by peer -pub const EDEADLK: c_int = 35; // Resource deadlock would occur -pub const ENOSYS: c_int = 38; // Function not implemented -pub const ENOTCONN: c_int = 107; // Transport endpoint is not connected -pub const ETIMEDOUT: c_int = 110; // connection timed out -pub const ESTALE: c_int = 116; // Stale file handle -pub const EHOSTUNREACH: c_int = 113; // No route to host -pub const EDQUOT: c_int = 122; // Quota exceeded -pub const EOPNOTSUPP: c_int = 0x5f; -pub const ENODATA: c_int = 0x3d; -pub const O_APPEND: c_int = 0o2000; -pub const O_ACCMODE: c_int = 0o003; -pub const O_CLOEXEC: c_int = 0x80000; -pub const O_CREAT: c_int = 0100; -pub const O_DIRECTORY: c_int = 0o200000; -pub const O_EXCL: c_int = 0o200; -pub const O_NOFOLLOW: c_int = 0x20000; -pub const O_NONBLOCK: c_int = 0o4000; -pub const O_TRUNC: c_int = 0o1000; -pub const NCCS: usize = 32; -pub const SIG_SETMASK: c_int = 2; // Set the set of blocked signals -pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; -pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; -pub const SOCK_DGRAM: c_int = 2; // connectionless, unreliable datagrams -pub const SOCK_STREAM: c_int = 1; // …/common/bits/socket_type.h -pub const __SIZEOF_PTHREAD_COND_T: usize = 48; -pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; -pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; -pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; -pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; - -cfg_if! { - if #[cfg(target_os = "l4re")] { - mod l4re; - pub use self::l4re::*; - } else { - mod other; - pub use other::*; - } -} diff --git a/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/other.rs b/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/other.rs deleted file mode 100644 index dc16d02c87977a..00000000000000 --- a/vendor/libc/src/unix/linux_like/linux/uclibc/x86_64/other.rs +++ /dev/null @@ -1,7 +0,0 @@ -use crate::prelude::*; - -// Thestyle checker discourages the use of #[cfg], so this has to go into a -// separate module -pub type pthread_t = c_ulong; - -pub const PTHREAD_STACK_MIN: usize = 16384; diff --git a/vendor/libc/src/unix/linux_like/mod.rs b/vendor/libc/src/unix/linux_like/mod.rs deleted file mode 100644 index fd3fa996caad4b..00000000000000 --- a/vendor/libc/src/unix/linux_like/mod.rs +++ /dev/null @@ -1,2214 +0,0 @@ -use crate::prelude::*; - -pub type sa_family_t = u16; -pub type speed_t = c_uint; -pub type tcflag_t = c_uint; -pub type clockid_t = c_int; -pub type timer_t = *mut c_void; -pub type key_t = c_int; -pub type id_t = c_uint; - -missing! { - #[derive(Debug)] - pub enum timezone {} -} - -s! { - pub struct in_addr { - pub s_addr: crate::in_addr_t, - } - - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct ip_mreqn { - pub imr_multiaddr: in_addr, - pub imr_address: in_addr, - pub imr_ifindex: c_int, - } - - pub struct ip_mreq_source { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - pub imr_sourceaddr: in_addr, - } - - pub struct sockaddr { - pub sa_family: sa_family_t, - pub sa_data: [c_char; 14], - } - - pub struct sockaddr_in { - pub sin_family: sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [u8; 8], - } - - pub struct sockaddr_in6 { - pub sin6_family: sa_family_t, - pub sin6_port: crate::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: u32, - } - - // The order of the `ai_addr` field in this struct is crucial - // for converting between the Rust and C types. - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - pub ai_addrlen: socklen_t, - - #[cfg(any(target_os = "linux", target_os = "emscripten"))] - pub ai_addr: *mut crate::sockaddr, - - pub ai_canonname: *mut c_char, - - #[cfg(target_os = "android")] - pub ai_addr: *mut crate::sockaddr, - - pub ai_next: *mut addrinfo, - } - - pub struct sockaddr_ll { - pub sll_family: c_ushort, - pub sll_protocol: c_ushort, - pub sll_ifindex: c_int, - pub sll_hatype: c_ushort, - pub sll_pkttype: c_uchar, - pub sll_halen: c_uchar, - pub sll_addr: [c_uchar; 8], - } - - pub struct fd_set { - fds_bits: [c_ulong; FD_SETSIZE as usize / ULONG_SIZE], - } - - pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - pub tm_gmtoff: c_long, - pub tm_zone: *const c_char, - } - - pub struct sched_param { - pub sched_priority: c_int, - #[cfg(any(target_env = "musl", target_os = "emscripten", target_env = "ohos"))] - pub sched_ss_low_priority: c_int, - #[cfg(any(target_env = "musl", target_os = "emscripten", target_env = "ohos"))] - pub sched_ss_repl_period: crate::timespec, - #[cfg(any(target_env = "musl", target_os = "emscripten", target_env = "ohos"))] - pub sched_ss_init_budget: crate::timespec, - #[cfg(any(target_env = "musl", target_os = "emscripten", target_env = "ohos"))] - pub sched_ss_max_repl: c_int, - } - - pub struct Dl_info { - pub dli_fname: *const c_char, - pub dli_fbase: *mut c_void, - pub dli_sname: *const c_char, - pub dli_saddr: *mut c_void, - } - - pub struct lconv { - pub decimal_point: *mut c_char, - pub thousands_sep: *mut c_char, - pub grouping: *mut c_char, - pub int_curr_symbol: *mut c_char, - pub currency_symbol: *mut c_char, - pub mon_decimal_point: *mut c_char, - pub mon_thousands_sep: *mut c_char, - pub mon_grouping: *mut c_char, - pub positive_sign: *mut c_char, - pub negative_sign: *mut c_char, - pub int_frac_digits: c_char, - pub frac_digits: c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub p_sign_posn: c_char, - pub n_sign_posn: c_char, - pub int_p_cs_precedes: c_char, - pub int_p_sep_by_space: c_char, - pub int_n_cs_precedes: c_char, - pub int_n_sep_by_space: c_char, - pub int_p_sign_posn: c_char, - pub int_n_sign_posn: c_char, - } - - pub struct in_pktinfo { - pub ipi_ifindex: c_int, - pub ipi_spec_dst: crate::in_addr, - pub ipi_addr: crate::in_addr, - } - - pub struct ifaddrs { - pub ifa_next: *mut ifaddrs, - pub ifa_name: *mut c_char, - pub ifa_flags: c_uint, - pub ifa_addr: *mut crate::sockaddr, - pub ifa_netmask: *mut crate::sockaddr, - pub ifa_ifu: *mut crate::sockaddr, // FIXME(union) This should be a union - pub ifa_data: *mut c_void, - } - - pub struct in6_rtmsg { - rtmsg_dst: crate::in6_addr, - rtmsg_src: crate::in6_addr, - rtmsg_gateway: crate::in6_addr, - rtmsg_type: u32, - rtmsg_dst_len: u16, - rtmsg_src_len: u16, - rtmsg_metric: u32, - rtmsg_info: c_ulong, - rtmsg_flags: u32, - rtmsg_ifindex: c_int, - } - - pub struct arpreq { - pub arp_pa: crate::sockaddr, - pub arp_ha: crate::sockaddr, - pub arp_flags: c_int, - pub arp_netmask: crate::sockaddr, - pub arp_dev: [c_char; 16], - } - - pub struct arpreq_old { - pub arp_pa: crate::sockaddr, - pub arp_ha: crate::sockaddr, - pub arp_flags: c_int, - pub arp_netmask: crate::sockaddr, - } - - pub struct arphdr { - pub ar_hrd: u16, - pub ar_pro: u16, - pub ar_hln: u8, - pub ar_pln: u8, - pub ar_op: u16, - } - - pub struct mmsghdr { - pub msg_hdr: crate::msghdr, - pub msg_len: c_uint, - } -} - -cfg_if! { - if #[cfg(not(target_os = "emscripten"))] { - s! { - pub struct file_clone_range { - pub src_fd: crate::__s64, - pub src_offset: crate::__u64, - pub src_length: crate::__u64, - pub dest_offset: crate::__u64, - } - - // linux/filter.h - pub struct sock_filter { - pub code: __u16, - pub jt: __u8, - pub jf: __u8, - pub k: __u32, - } - - pub struct sock_fprog { - pub len: c_ushort, - pub filter: *mut sock_filter, - } - } - } -} - -cfg_if! { - if #[cfg(any( - target_env = "gnu", - target_os = "android", - all(target_env = "musl", musl_v1_2_3) - ))] { - s! { - pub struct statx { - pub stx_mask: crate::__u32, - pub stx_blksize: crate::__u32, - pub stx_attributes: crate::__u64, - pub stx_nlink: crate::__u32, - pub stx_uid: crate::__u32, - pub stx_gid: crate::__u32, - pub stx_mode: crate::__u16, - __statx_pad1: [crate::__u16; 1], - pub stx_ino: crate::__u64, - pub stx_size: crate::__u64, - pub stx_blocks: crate::__u64, - pub stx_attributes_mask: crate::__u64, - pub stx_atime: statx_timestamp, - pub stx_btime: statx_timestamp, - pub stx_ctime: statx_timestamp, - pub stx_mtime: statx_timestamp, - pub stx_rdev_major: crate::__u32, - pub stx_rdev_minor: crate::__u32, - pub stx_dev_major: crate::__u32, - pub stx_dev_minor: crate::__u32, - pub stx_mnt_id: crate::__u64, - pub stx_dio_mem_align: crate::__u32, - pub stx_dio_offset_align: crate::__u32, - __statx_pad3: [crate::__u64; 12], - } - - pub struct statx_timestamp { - pub tv_sec: crate::__s64, - pub tv_nsec: crate::__u32, - __statx_timestamp_pad1: [crate::__s32; 1], - } - } - } -} - -s_no_extra_traits! { - #[cfg_attr( - any(target_arch = "x86_64", all(target_arch = "x86", target_env = "gnu")), - repr(packed) - )] - pub struct epoll_event { - pub events: u32, - pub u64: u64, - } - - pub struct sockaddr_un { - pub sun_family: sa_family_t, - pub sun_path: [c_char; 108], - } - - pub struct sockaddr_storage { - pub ss_family: sa_family_t, - #[cfg(target_pointer_width = "32")] - __ss_pad2: [u8; 128 - 2 - 4], - #[cfg(target_pointer_width = "64")] - __ss_pad2: [u8; 128 - 2 - 8], - __ss_align: size_t, - } - - pub struct utsname { - pub sysname: [c_char; 65], - pub nodename: [c_char; 65], - pub release: [c_char; 65], - pub version: [c_char; 65], - pub machine: [c_char; 65], - pub domainname: [c_char; 65], - } - - pub struct sigevent { - pub sigev_value: crate::sigval, - pub sigev_signo: c_int, - pub sigev_notify: c_int, - // Actually a union. We only expose sigev_notify_thread_id because it's - // the most useful member - pub sigev_notify_thread_id: c_int, - #[cfg(target_pointer_width = "64")] - __unused1: [c_int; 11], - #[cfg(target_pointer_width = "32")] - __unused1: [c_int; 12], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for epoll_event { - fn eq(&self, other: &epoll_event) -> bool { - self.events == other.events && self.u64 == other.u64 - } - } - impl Eq for epoll_event {} - impl hash::Hash for epoll_event { - fn hash(&self, state: &mut H) { - let events = self.events; - let u64 = self.u64; - events.hash(state); - u64.hash(state); - } - } - - impl PartialEq for sockaddr_un { - fn eq(&self, other: &sockaddr_un) -> bool { - self.sun_family == other.sun_family - && self - .sun_path - .iter() - .zip(other.sun_path.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for sockaddr_un {} - impl hash::Hash for sockaddr_un { - fn hash(&self, state: &mut H) { - self.sun_family.hash(state); - self.sun_path.hash(state); - } - } - - impl PartialEq for sockaddr_storage { - fn eq(&self, other: &sockaddr_storage) -> bool { - self.ss_family == other.ss_family - && self - .__ss_pad2 - .iter() - .zip(other.__ss_pad2.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for sockaddr_storage {} - - impl hash::Hash for sockaddr_storage { - fn hash(&self, state: &mut H) { - self.ss_family.hash(state); - self.__ss_pad2.hash(state); - } - } - - impl PartialEq for utsname { - fn eq(&self, other: &utsname) -> bool { - self.sysname - .iter() - .zip(other.sysname.iter()) - .all(|(a, b)| a == b) - && self - .nodename - .iter() - .zip(other.nodename.iter()) - .all(|(a, b)| a == b) - && self - .release - .iter() - .zip(other.release.iter()) - .all(|(a, b)| a == b) - && self - .version - .iter() - .zip(other.version.iter()) - .all(|(a, b)| a == b) - && self - .machine - .iter() - .zip(other.machine.iter()) - .all(|(a, b)| a == b) - && self - .domainname - .iter() - .zip(other.domainname.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for utsname {} - - impl hash::Hash for utsname { - fn hash(&self, state: &mut H) { - self.sysname.hash(state); - self.nodename.hash(state); - self.release.hash(state); - self.version.hash(state); - self.machine.hash(state); - self.domainname.hash(state); - } - } - - impl PartialEq for sigevent { - fn eq(&self, other: &sigevent) -> bool { - self.sigev_value == other.sigev_value - && self.sigev_signo == other.sigev_signo - && self.sigev_notify == other.sigev_notify - && self.sigev_notify_thread_id == other.sigev_notify_thread_id - } - } - impl Eq for sigevent {} - impl hash::Hash for sigevent { - fn hash(&self, state: &mut H) { - self.sigev_value.hash(state); - self.sigev_signo.hash(state); - self.sigev_notify.hash(state); - self.sigev_notify_thread_id.hash(state); - } - } - } -} - -// intentionally not public, only used for fd_set -cfg_if! { - if #[cfg(target_pointer_width = "32")] { - const ULONG_SIZE: usize = 32; - } else if #[cfg(target_pointer_width = "64")] { - const ULONG_SIZE: usize = 64; - } else { - // Unknown target_pointer_width - } -} - -pub const EXIT_FAILURE: c_int = 1; -pub const EXIT_SUCCESS: c_int = 0; -pub const RAND_MAX: c_int = 2147483647; -pub const EOF: c_int = -1; -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; -pub const _IOFBF: c_int = 0; -pub const _IONBF: c_int = 2; -pub const _IOLBF: c_int = 1; - -pub const F_DUPFD: c_int = 0; -pub const F_GETFD: c_int = 1; -pub const F_SETFD: c_int = 2; -pub const F_GETFL: c_int = 3; -pub const F_SETFL: c_int = 4; - -// Linux-specific fcntls -pub const F_SETLEASE: c_int = 1024; -pub const F_GETLEASE: c_int = 1025; -pub const F_NOTIFY: c_int = 1026; -pub const F_CANCELLK: c_int = 1029; -pub const F_DUPFD_CLOEXEC: c_int = 1030; -pub const F_SETPIPE_SZ: c_int = 1031; -pub const F_GETPIPE_SZ: c_int = 1032; -pub const F_ADD_SEALS: c_int = 1033; -pub const F_GET_SEALS: c_int = 1034; - -pub const F_SEAL_SEAL: c_int = 0x0001; -pub const F_SEAL_SHRINK: c_int = 0x0002; -pub const F_SEAL_GROW: c_int = 0x0004; -pub const F_SEAL_WRITE: c_int = 0x0008; - -// FIXME(#235): Include file sealing fcntls once we have a way to verify them. - -pub const SIGTRAP: c_int = 5; - -pub const PTHREAD_CREATE_JOINABLE: c_int = 0; -pub const PTHREAD_CREATE_DETACHED: c_int = 1; - -pub const CLOCK_REALTIME: crate::clockid_t = 0; -pub const CLOCK_MONOTONIC: crate::clockid_t = 1; -pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 2; -pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 3; -pub const CLOCK_MONOTONIC_RAW: crate::clockid_t = 4; -pub const CLOCK_REALTIME_COARSE: crate::clockid_t = 5; -pub const CLOCK_MONOTONIC_COARSE: crate::clockid_t = 6; -pub const CLOCK_BOOTTIME: crate::clockid_t = 7; -pub const CLOCK_REALTIME_ALARM: crate::clockid_t = 8; -pub const CLOCK_BOOTTIME_ALARM: crate::clockid_t = 9; -pub const CLOCK_TAI: crate::clockid_t = 11; -pub const TIMER_ABSTIME: c_int = 1; - -pub const RUSAGE_SELF: c_int = 0; - -pub const O_RDONLY: c_int = 0; -pub const O_WRONLY: c_int = 1; -pub const O_RDWR: c_int = 2; - -pub const SOCK_CLOEXEC: c_int = O_CLOEXEC; - -pub const S_IFIFO: mode_t = 0o1_0000; -pub const S_IFCHR: mode_t = 0o2_0000; -pub const S_IFBLK: mode_t = 0o6_0000; -pub const S_IFDIR: mode_t = 0o4_0000; -pub const S_IFREG: mode_t = 0o10_0000; -pub const S_IFLNK: mode_t = 0o12_0000; -pub const S_IFSOCK: mode_t = 0o14_0000; -pub const S_IFMT: mode_t = 0o17_0000; -pub const S_IRWXU: mode_t = 0o0700; -pub const S_IXUSR: mode_t = 0o0100; -pub const S_IWUSR: mode_t = 0o0200; -pub const S_IRUSR: mode_t = 0o0400; -pub const S_IRWXG: mode_t = 0o0070; -pub const S_IXGRP: mode_t = 0o0010; -pub const S_IWGRP: mode_t = 0o0020; -pub const S_IRGRP: mode_t = 0o0040; -pub const S_IRWXO: mode_t = 0o0007; -pub const S_IXOTH: mode_t = 0o0001; -pub const S_IWOTH: mode_t = 0o0002; -pub const S_IROTH: mode_t = 0o0004; -pub const F_OK: c_int = 0; -pub const R_OK: c_int = 4; -pub const W_OK: c_int = 2; -pub const X_OK: c_int = 1; -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGABRT: c_int = 6; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGSEGV: c_int = 11; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; - -pub const PROT_NONE: c_int = 0; -pub const PROT_READ: c_int = 1; -pub const PROT_WRITE: c_int = 2; -pub const PROT_EXEC: c_int = 4; - -pub const XATTR_CREATE: c_int = 0x1; -pub const XATTR_REPLACE: c_int = 0x2; - -cfg_if! { - if #[cfg(target_os = "android")] { - pub const RLIM64_INFINITY: c_ulonglong = !0; - } else { - pub const RLIM64_INFINITY: crate::rlim64_t = !0; - } -} - -cfg_if! { - if #[cfg(target_env = "ohos")] { - pub const LC_CTYPE: c_int = 0; - pub const LC_NUMERIC: c_int = 1; - pub const LC_TIME: c_int = 2; - pub const LC_COLLATE: c_int = 3; - pub const LC_MONETARY: c_int = 4; - pub const LC_MESSAGES: c_int = 5; - pub const LC_PAPER: c_int = 6; - pub const LC_NAME: c_int = 7; - pub const LC_ADDRESS: c_int = 8; - pub const LC_TELEPHONE: c_int = 9; - pub const LC_MEASUREMENT: c_int = 10; - pub const LC_IDENTIFICATION: c_int = 11; - pub const LC_ALL: c_int = 12; - } else if #[cfg(not(target_env = "uclibc"))] { - pub const LC_CTYPE: c_int = 0; - pub const LC_NUMERIC: c_int = 1; - pub const LC_TIME: c_int = 2; - pub const LC_COLLATE: c_int = 3; - pub const LC_MONETARY: c_int = 4; - pub const LC_MESSAGES: c_int = 5; - pub const LC_ALL: c_int = 6; - } -} - -pub const LC_CTYPE_MASK: c_int = 1 << LC_CTYPE; -pub const LC_NUMERIC_MASK: c_int = 1 << LC_NUMERIC; -pub const LC_TIME_MASK: c_int = 1 << LC_TIME; -pub const LC_COLLATE_MASK: c_int = 1 << LC_COLLATE; -pub const LC_MONETARY_MASK: c_int = 1 << LC_MONETARY; -pub const LC_MESSAGES_MASK: c_int = 1 << LC_MESSAGES; -// LC_ALL_MASK defined per platform - -pub const MAP_FILE: c_int = 0x0000; -pub const MAP_SHARED: c_int = 0x0001; -pub const MAP_PRIVATE: c_int = 0x0002; -pub const MAP_FIXED: c_int = 0x0010; - -pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; - -// MS_ flags for msync(2) -pub const MS_ASYNC: c_int = 0x0001; -pub const MS_INVALIDATE: c_int = 0x0002; -pub const MS_SYNC: c_int = 0x0004; - -// MS_ flags for mount(2) -pub const MS_RDONLY: c_ulong = 0x01; -pub const MS_NOSUID: c_ulong = 0x02; -pub const MS_NODEV: c_ulong = 0x04; -pub const MS_NOEXEC: c_ulong = 0x08; -pub const MS_SYNCHRONOUS: c_ulong = 0x10; -pub const MS_REMOUNT: c_ulong = 0x20; -pub const MS_MANDLOCK: c_ulong = 0x40; -pub const MS_DIRSYNC: c_ulong = 0x80; -pub const MS_NOSYMFOLLOW: c_ulong = 0x100; -pub const MS_NOATIME: c_ulong = 0x0400; -pub const MS_NODIRATIME: c_ulong = 0x0800; -pub const MS_BIND: c_ulong = 0x1000; -pub const MS_MOVE: c_ulong = 0x2000; -pub const MS_REC: c_ulong = 0x4000; -pub const MS_SILENT: c_ulong = 0x8000; -pub const MS_POSIXACL: c_ulong = 0x010000; -pub const MS_UNBINDABLE: c_ulong = 0x020000; -pub const MS_PRIVATE: c_ulong = 0x040000; -pub const MS_SLAVE: c_ulong = 0x080000; -pub const MS_SHARED: c_ulong = 0x100000; -pub const MS_RELATIME: c_ulong = 0x200000; -pub const MS_KERNMOUNT: c_ulong = 0x400000; -pub const MS_I_VERSION: c_ulong = 0x800000; -pub const MS_STRICTATIME: c_ulong = 0x1000000; -pub const MS_LAZYTIME: c_ulong = 0x2000000; -pub const MS_ACTIVE: c_ulong = 0x40000000; -pub const MS_MGC_VAL: c_ulong = 0xc0ed0000; -pub const MS_MGC_MSK: c_ulong = 0xffff0000; - -pub const SCM_RIGHTS: c_int = 0x01; -pub const SCM_CREDENTIALS: c_int = 0x02; - -pub const PROT_GROWSDOWN: c_int = 0x1000000; -pub const PROT_GROWSUP: c_int = 0x2000000; - -pub const MAP_TYPE: c_int = 0x000f; - -pub const MADV_NORMAL: c_int = 0; -pub const MADV_RANDOM: c_int = 1; -pub const MADV_SEQUENTIAL: c_int = 2; -pub const MADV_WILLNEED: c_int = 3; -pub const MADV_DONTNEED: c_int = 4; -pub const MADV_FREE: c_int = 8; -pub const MADV_REMOVE: c_int = 9; -pub const MADV_DONTFORK: c_int = 10; -pub const MADV_DOFORK: c_int = 11; -pub const MADV_MERGEABLE: c_int = 12; -pub const MADV_UNMERGEABLE: c_int = 13; -pub const MADV_HUGEPAGE: c_int = 14; -pub const MADV_NOHUGEPAGE: c_int = 15; -pub const MADV_DONTDUMP: c_int = 16; -pub const MADV_DODUMP: c_int = 17; -pub const MADV_WIPEONFORK: c_int = 18; -pub const MADV_KEEPONFORK: c_int = 19; -pub const MADV_COLD: c_int = 20; -pub const MADV_PAGEOUT: c_int = 21; -pub const MADV_HWPOISON: c_int = 100; -cfg_if! { - if #[cfg(not(target_os = "emscripten"))] { - pub const MADV_POPULATE_READ: c_int = 22; - pub const MADV_POPULATE_WRITE: c_int = 23; - pub const MADV_DONTNEED_LOCKED: c_int = 24; - } -} - -pub const IFF_UP: c_int = 0x1; -pub const IFF_BROADCAST: c_int = 0x2; -pub const IFF_DEBUG: c_int = 0x4; -pub const IFF_LOOPBACK: c_int = 0x8; -pub const IFF_POINTOPOINT: c_int = 0x10; -pub const IFF_NOTRAILERS: c_int = 0x20; -pub const IFF_RUNNING: c_int = 0x40; -pub const IFF_NOARP: c_int = 0x80; -pub const IFF_PROMISC: c_int = 0x100; -pub const IFF_ALLMULTI: c_int = 0x200; -pub const IFF_MASTER: c_int = 0x400; -pub const IFF_SLAVE: c_int = 0x800; -pub const IFF_MULTICAST: c_int = 0x1000; -pub const IFF_PORTSEL: c_int = 0x2000; -pub const IFF_AUTOMEDIA: c_int = 0x4000; -pub const IFF_DYNAMIC: c_int = 0x8000; - -pub const SOL_IP: c_int = 0; -pub const SOL_TCP: c_int = 6; -pub const SOL_UDP: c_int = 17; -pub const SOL_IPV6: c_int = 41; -pub const SOL_ICMPV6: c_int = 58; -pub const SOL_RAW: c_int = 255; -pub const SOL_DECNET: c_int = 261; -pub const SOL_X25: c_int = 262; -pub const SOL_PACKET: c_int = 263; -pub const SOL_ATM: c_int = 264; -pub const SOL_AAL: c_int = 265; -pub const SOL_IRDA: c_int = 266; -pub const SOL_NETBEUI: c_int = 267; -pub const SOL_LLC: c_int = 268; -pub const SOL_DCCP: c_int = 269; -pub const SOL_NETLINK: c_int = 270; -pub const SOL_TIPC: c_int = 271; -pub const SOL_BLUETOOTH: c_int = 274; -pub const SOL_ALG: c_int = 279; - -pub const AF_UNSPEC: c_int = 0; -pub const AF_UNIX: c_int = 1; -pub const AF_LOCAL: c_int = 1; -pub const AF_INET: c_int = 2; -pub const AF_AX25: c_int = 3; -pub const AF_IPX: c_int = 4; -pub const AF_APPLETALK: c_int = 5; -pub const AF_NETROM: c_int = 6; -pub const AF_BRIDGE: c_int = 7; -pub const AF_ATMPVC: c_int = 8; -pub const AF_X25: c_int = 9; -pub const AF_INET6: c_int = 10; -pub const AF_ROSE: c_int = 11; -pub const AF_DECnet: c_int = 12; -pub const AF_NETBEUI: c_int = 13; -pub const AF_SECURITY: c_int = 14; -pub const AF_KEY: c_int = 15; -pub const AF_NETLINK: c_int = 16; -pub const AF_ROUTE: c_int = AF_NETLINK; -pub const AF_PACKET: c_int = 17; -pub const AF_ASH: c_int = 18; -pub const AF_ECONET: c_int = 19; -pub const AF_ATMSVC: c_int = 20; -pub const AF_RDS: c_int = 21; -pub const AF_SNA: c_int = 22; -pub const AF_IRDA: c_int = 23; -pub const AF_PPPOX: c_int = 24; -pub const AF_WANPIPE: c_int = 25; -pub const AF_LLC: c_int = 26; -pub const AF_CAN: c_int = 29; -pub const AF_TIPC: c_int = 30; -pub const AF_BLUETOOTH: c_int = 31; -pub const AF_IUCV: c_int = 32; -pub const AF_RXRPC: c_int = 33; -pub const AF_ISDN: c_int = 34; -pub const AF_PHONET: c_int = 35; -pub const AF_IEEE802154: c_int = 36; -pub const AF_CAIF: c_int = 37; -pub const AF_ALG: c_int = 38; - -pub const PF_UNSPEC: c_int = AF_UNSPEC; -pub const PF_UNIX: c_int = AF_UNIX; -pub const PF_LOCAL: c_int = AF_LOCAL; -pub const PF_INET: c_int = AF_INET; -pub const PF_AX25: c_int = AF_AX25; -pub const PF_IPX: c_int = AF_IPX; -pub const PF_APPLETALK: c_int = AF_APPLETALK; -pub const PF_NETROM: c_int = AF_NETROM; -pub const PF_BRIDGE: c_int = AF_BRIDGE; -pub const PF_ATMPVC: c_int = AF_ATMPVC; -pub const PF_X25: c_int = AF_X25; -pub const PF_INET6: c_int = AF_INET6; -pub const PF_ROSE: c_int = AF_ROSE; -pub const PF_DECnet: c_int = AF_DECnet; -pub const PF_NETBEUI: c_int = AF_NETBEUI; -pub const PF_SECURITY: c_int = AF_SECURITY; -pub const PF_KEY: c_int = AF_KEY; -pub const PF_NETLINK: c_int = AF_NETLINK; -pub const PF_ROUTE: c_int = AF_ROUTE; -pub const PF_PACKET: c_int = AF_PACKET; -pub const PF_ASH: c_int = AF_ASH; -pub const PF_ECONET: c_int = AF_ECONET; -pub const PF_ATMSVC: c_int = AF_ATMSVC; -pub const PF_RDS: c_int = AF_RDS; -pub const PF_SNA: c_int = AF_SNA; -pub const PF_IRDA: c_int = AF_IRDA; -pub const PF_PPPOX: c_int = AF_PPPOX; -pub const PF_WANPIPE: c_int = AF_WANPIPE; -pub const PF_LLC: c_int = AF_LLC; -pub const PF_CAN: c_int = AF_CAN; -pub const PF_TIPC: c_int = AF_TIPC; -pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; -pub const PF_IUCV: c_int = AF_IUCV; -pub const PF_RXRPC: c_int = AF_RXRPC; -pub const PF_ISDN: c_int = AF_ISDN; -pub const PF_PHONET: c_int = AF_PHONET; -pub const PF_IEEE802154: c_int = AF_IEEE802154; -pub const PF_CAIF: c_int = AF_CAIF; -pub const PF_ALG: c_int = AF_ALG; - -pub const MSG_OOB: c_int = 1; -pub const MSG_PEEK: c_int = 2; -pub const MSG_DONTROUTE: c_int = 4; -pub const MSG_CTRUNC: c_int = 8; -pub const MSG_TRUNC: c_int = 0x20; -pub const MSG_DONTWAIT: c_int = 0x40; -pub const MSG_EOR: c_int = 0x80; -pub const MSG_WAITALL: c_int = 0x100; -pub const MSG_FIN: c_int = 0x200; -pub const MSG_SYN: c_int = 0x400; -pub const MSG_CONFIRM: c_int = 0x800; -pub const MSG_RST: c_int = 0x1000; -pub const MSG_ERRQUEUE: c_int = 0x2000; -pub const MSG_NOSIGNAL: c_int = 0x4000; -pub const MSG_MORE: c_int = 0x8000; -pub const MSG_WAITFORONE: c_int = 0x10000; -pub const MSG_FASTOPEN: c_int = 0x20000000; -pub const MSG_CMSG_CLOEXEC: c_int = 0x40000000; - -pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; - -pub const SOCK_RAW: c_int = 3; -pub const SOCK_RDM: c_int = 4; -pub const IP_TOS: c_int = 1; -pub const IP_TTL: c_int = 2; -pub const IP_HDRINCL: c_int = 3; -pub const IP_OPTIONS: c_int = 4; -pub const IP_ROUTER_ALERT: c_int = 5; -pub const IP_RECVOPTS: c_int = 6; -pub const IP_RETOPTS: c_int = 7; -pub const IP_PKTINFO: c_int = 8; -pub const IP_PKTOPTIONS: c_int = 9; -pub const IP_MTU_DISCOVER: c_int = 10; -pub const IP_RECVERR: c_int = 11; -pub const IP_RECVTTL: c_int = 12; -pub const IP_RECVTOS: c_int = 13; -pub const IP_MTU: c_int = 14; -pub const IP_FREEBIND: c_int = 15; -pub const IP_IPSEC_POLICY: c_int = 16; -pub const IP_XFRM_POLICY: c_int = 17; -pub const IP_PASSSEC: c_int = 18; -pub const IP_TRANSPARENT: c_int = 19; -pub const IP_ORIGDSTADDR: c_int = 20; -pub const IP_RECVORIGDSTADDR: c_int = IP_ORIGDSTADDR; -pub const IP_MINTTL: c_int = 21; -pub const IP_NODEFRAG: c_int = 22; -pub const IP_CHECKSUM: c_int = 23; -pub const IP_BIND_ADDRESS_NO_PORT: c_int = 24; -pub const IP_MULTICAST_IF: c_int = 32; -pub const IP_MULTICAST_TTL: c_int = 33; -pub const IP_MULTICAST_LOOP: c_int = 34; -pub const IP_ADD_MEMBERSHIP: c_int = 35; -pub const IP_DROP_MEMBERSHIP: c_int = 36; -pub const IP_UNBLOCK_SOURCE: c_int = 37; -pub const IP_BLOCK_SOURCE: c_int = 38; -pub const IP_ADD_SOURCE_MEMBERSHIP: c_int = 39; -pub const IP_DROP_SOURCE_MEMBERSHIP: c_int = 40; -pub const IP_MSFILTER: c_int = 41; -pub const IP_MULTICAST_ALL: c_int = 49; -pub const IP_UNICAST_IF: c_int = 50; - -pub const IP_DEFAULT_MULTICAST_TTL: c_int = 1; -pub const IP_DEFAULT_MULTICAST_LOOP: c_int = 1; - -pub const IP_PMTUDISC_DONT: c_int = 0; -pub const IP_PMTUDISC_WANT: c_int = 1; -pub const IP_PMTUDISC_DO: c_int = 2; -pub const IP_PMTUDISC_PROBE: c_int = 3; -pub const IP_PMTUDISC_INTERFACE: c_int = 4; -pub const IP_PMTUDISC_OMIT: c_int = 5; - -// IPPROTO_IP defined in src/unix/mod.rs -/// Hop-by-hop option header -pub const IPPROTO_HOPOPTS: c_int = 0; -// IPPROTO_ICMP defined in src/unix/mod.rs -/// group mgmt protocol -pub const IPPROTO_IGMP: c_int = 2; -/// for compatibility -pub const IPPROTO_IPIP: c_int = 4; -// IPPROTO_TCP defined in src/unix/mod.rs -/// exterior gateway protocol -pub const IPPROTO_EGP: c_int = 8; -/// pup -pub const IPPROTO_PUP: c_int = 12; -// IPPROTO_UDP defined in src/unix/mod.rs -/// xns idp -pub const IPPROTO_IDP: c_int = 22; -/// tp-4 w/ class negotiation -pub const IPPROTO_TP: c_int = 29; -/// DCCP -pub const IPPROTO_DCCP: c_int = 33; -// IPPROTO_IPV6 defined in src/unix/mod.rs -/// IP6 routing header -pub const IPPROTO_ROUTING: c_int = 43; -/// IP6 fragmentation header -pub const IPPROTO_FRAGMENT: c_int = 44; -/// resource reservation -pub const IPPROTO_RSVP: c_int = 46; -/// General Routing Encap. -pub const IPPROTO_GRE: c_int = 47; -/// IP6 Encap Sec. Payload -pub const IPPROTO_ESP: c_int = 50; -/// IP6 Auth Header -pub const IPPROTO_AH: c_int = 51; -// IPPROTO_ICMPV6 defined in src/unix/mod.rs -/// IP6 no next header -pub const IPPROTO_NONE: c_int = 59; -/// IP6 destination option -pub const IPPROTO_DSTOPTS: c_int = 60; -pub const IPPROTO_MTP: c_int = 92; -/// encapsulation header -pub const IPPROTO_ENCAP: c_int = 98; -/// Protocol indep. multicast -pub const IPPROTO_PIM: c_int = 103; -/// IP Payload Comp. Protocol -pub const IPPROTO_COMP: c_int = 108; -/// SCTP -pub const IPPROTO_SCTP: c_int = 132; -pub const IPPROTO_MH: c_int = 135; -pub const IPPROTO_UDPLITE: c_int = 136; -/// raw IP packet -pub const IPPROTO_RAW: c_int = 255; -pub const IPPROTO_BEETPH: c_int = 94; -pub const IPPROTO_MPLS: c_int = 137; -/// Multipath TCP -pub const IPPROTO_MPTCP: c_int = 262; -/// Ethernet-within-IPv6 encapsulation. -pub const IPPROTO_ETHERNET: c_int = 143; - -pub const MCAST_EXCLUDE: c_int = 0; -pub const MCAST_INCLUDE: c_int = 1; -pub const MCAST_JOIN_GROUP: c_int = 42; -pub const MCAST_BLOCK_SOURCE: c_int = 43; -pub const MCAST_UNBLOCK_SOURCE: c_int = 44; -pub const MCAST_LEAVE_GROUP: c_int = 45; -pub const MCAST_JOIN_SOURCE_GROUP: c_int = 46; -pub const MCAST_LEAVE_SOURCE_GROUP: c_int = 47; -pub const MCAST_MSFILTER: c_int = 48; - -pub const IPV6_ADDRFORM: c_int = 1; -pub const IPV6_2292PKTINFO: c_int = 2; -pub const IPV6_2292HOPOPTS: c_int = 3; -pub const IPV6_2292DSTOPTS: c_int = 4; -pub const IPV6_2292RTHDR: c_int = 5; -pub const IPV6_2292PKTOPTIONS: c_int = 6; -pub const IPV6_CHECKSUM: c_int = 7; -pub const IPV6_2292HOPLIMIT: c_int = 8; -pub const IPV6_NEXTHOP: c_int = 9; -pub const IPV6_AUTHHDR: c_int = 10; -pub const IPV6_UNICAST_HOPS: c_int = 16; -pub const IPV6_MULTICAST_IF: c_int = 17; -pub const IPV6_MULTICAST_HOPS: c_int = 18; -pub const IPV6_MULTICAST_LOOP: c_int = 19; -pub const IPV6_ADD_MEMBERSHIP: c_int = 20; -pub const IPV6_DROP_MEMBERSHIP: c_int = 21; -pub const IPV6_ROUTER_ALERT: c_int = 22; -pub const IPV6_MTU_DISCOVER: c_int = 23; -pub const IPV6_MTU: c_int = 24; -pub const IPV6_RECVERR: c_int = 25; -pub const IPV6_V6ONLY: c_int = 26; -pub const IPV6_JOIN_ANYCAST: c_int = 27; -pub const IPV6_LEAVE_ANYCAST: c_int = 28; -pub const IPV6_IPSEC_POLICY: c_int = 34; -pub const IPV6_XFRM_POLICY: c_int = 35; -pub const IPV6_HDRINCL: c_int = 36; -pub const IPV6_RECVPKTINFO: c_int = 49; -pub const IPV6_PKTINFO: c_int = 50; -pub const IPV6_RECVHOPLIMIT: c_int = 51; -pub const IPV6_HOPLIMIT: c_int = 52; -pub const IPV6_RECVHOPOPTS: c_int = 53; -pub const IPV6_HOPOPTS: c_int = 54; -pub const IPV6_RTHDRDSTOPTS: c_int = 55; -pub const IPV6_RECVRTHDR: c_int = 56; -pub const IPV6_RTHDR: c_int = 57; -pub const IPV6_RECVDSTOPTS: c_int = 58; -pub const IPV6_DSTOPTS: c_int = 59; -pub const IPV6_RECVPATHMTU: c_int = 60; -pub const IPV6_PATHMTU: c_int = 61; -pub const IPV6_DONTFRAG: c_int = 62; -pub const IPV6_RECVTCLASS: c_int = 66; -pub const IPV6_TCLASS: c_int = 67; -pub const IPV6_AUTOFLOWLABEL: c_int = 70; -pub const IPV6_ADDR_PREFERENCES: c_int = 72; -pub const IPV6_MINHOPCOUNT: c_int = 73; -pub const IPV6_ORIGDSTADDR: c_int = 74; -pub const IPV6_RECVORIGDSTADDR: c_int = IPV6_ORIGDSTADDR; -pub const IPV6_TRANSPARENT: c_int = 75; -pub const IPV6_UNICAST_IF: c_int = 76; -pub const IPV6_PREFER_SRC_TMP: c_int = 0x0001; -pub const IPV6_PREFER_SRC_PUBLIC: c_int = 0x0002; -pub const IPV6_PREFER_SRC_PUBTMP_DEFAULT: c_int = 0x0100; -pub const IPV6_PREFER_SRC_COA: c_int = 0x0004; -pub const IPV6_PREFER_SRC_HOME: c_int = 0x0400; -pub const IPV6_PREFER_SRC_CGA: c_int = 0x0008; -pub const IPV6_PREFER_SRC_NONCGA: c_int = 0x0800; - -pub const IPV6_PMTUDISC_DONT: c_int = 0; -pub const IPV6_PMTUDISC_WANT: c_int = 1; -pub const IPV6_PMTUDISC_DO: c_int = 2; -pub const IPV6_PMTUDISC_PROBE: c_int = 3; -pub const IPV6_PMTUDISC_INTERFACE: c_int = 4; -pub const IPV6_PMTUDISC_OMIT: c_int = 5; - -pub const TCP_NODELAY: c_int = 1; -pub const TCP_MAXSEG: c_int = 2; -pub const TCP_CORK: c_int = 3; -pub const TCP_KEEPIDLE: c_int = 4; -pub const TCP_KEEPINTVL: c_int = 5; -pub const TCP_KEEPCNT: c_int = 6; -pub const TCP_SYNCNT: c_int = 7; -pub const TCP_LINGER2: c_int = 8; -pub const TCP_DEFER_ACCEPT: c_int = 9; -pub const TCP_WINDOW_CLAMP: c_int = 10; -pub const TCP_INFO: c_int = 11; -pub const TCP_QUICKACK: c_int = 12; -pub const TCP_CONGESTION: c_int = 13; -pub const TCP_MD5SIG: c_int = 14; -cfg_if! { - if #[cfg(all( - target_os = "linux", - any(target_env = "gnu", target_env = "musl", target_env = "ohos") - ))] { - // WARN: deprecated - pub const TCP_COOKIE_TRANSACTIONS: c_int = 15; - } -} -pub const TCP_THIN_LINEAR_TIMEOUTS: c_int = 16; -pub const TCP_THIN_DUPACK: c_int = 17; -pub const TCP_USER_TIMEOUT: c_int = 18; -pub const TCP_REPAIR: c_int = 19; -pub const TCP_REPAIR_QUEUE: c_int = 20; -pub const TCP_QUEUE_SEQ: c_int = 21; -pub const TCP_REPAIR_OPTIONS: c_int = 22; -pub const TCP_FASTOPEN: c_int = 23; -pub const TCP_TIMESTAMP: c_int = 24; -pub const TCP_NOTSENT_LOWAT: c_int = 25; -pub const TCP_CC_INFO: c_int = 26; -pub const TCP_SAVE_SYN: c_int = 27; -pub const TCP_SAVED_SYN: c_int = 28; -cfg_if! { - if #[cfg(not(target_os = "emscripten"))] { - // NOTE: emscripten doesn't support these options yet. - - pub const TCP_REPAIR_WINDOW: c_int = 29; - pub const TCP_FASTOPEN_CONNECT: c_int = 30; - pub const TCP_ULP: c_int = 31; - pub const TCP_MD5SIG_EXT: c_int = 32; - pub const TCP_FASTOPEN_KEY: c_int = 33; - pub const TCP_FASTOPEN_NO_COOKIE: c_int = 34; - pub const TCP_ZEROCOPY_RECEIVE: c_int = 35; - pub const TCP_INQ: c_int = 36; - pub const TCP_CM_INQ: c_int = TCP_INQ; - // NOTE: Some CI images doesn't have this option yet. - // pub const TCP_TX_DELAY: c_int = 37; - pub const TCP_MD5SIG_MAXKEYLEN: usize = 80; - } -} - -pub const SO_DEBUG: c_int = 1; - -pub const SHUT_RD: c_int = 0; -pub const SHUT_WR: c_int = 1; -pub const SHUT_RDWR: c_int = 2; - -pub const LOCK_SH: c_int = 1; -pub const LOCK_EX: c_int = 2; -pub const LOCK_NB: c_int = 4; -pub const LOCK_UN: c_int = 8; - -pub const SS_ONSTACK: c_int = 1; -pub const SS_DISABLE: c_int = 2; - -pub const PATH_MAX: c_int = 4096; - -pub const UIO_MAXIOV: c_int = 1024; - -pub const FD_SETSIZE: usize = 1024; - -pub const EPOLLIN: c_int = 0x1; -pub const EPOLLPRI: c_int = 0x2; -pub const EPOLLOUT: c_int = 0x4; -pub const EPOLLERR: c_int = 0x8; -pub const EPOLLHUP: c_int = 0x10; -pub const EPOLLRDNORM: c_int = 0x40; -pub const EPOLLRDBAND: c_int = 0x80; -pub const EPOLLWRNORM: c_int = 0x100; -pub const EPOLLWRBAND: c_int = 0x200; -pub const EPOLLMSG: c_int = 0x400; -pub const EPOLLRDHUP: c_int = 0x2000; -pub const EPOLLEXCLUSIVE: c_int = 0x10000000; -pub const EPOLLWAKEUP: c_int = 0x20000000; -pub const EPOLLONESHOT: c_int = 0x40000000; -pub const EPOLLET: c_int = 0x80000000; - -pub const EPOLL_CTL_ADD: c_int = 1; -pub const EPOLL_CTL_MOD: c_int = 3; -pub const EPOLL_CTL_DEL: c_int = 2; - -pub const MNT_FORCE: c_int = 0x1; -pub const MNT_DETACH: c_int = 0x2; -pub const MNT_EXPIRE: c_int = 0x4; -pub const UMOUNT_NOFOLLOW: c_int = 0x8; - -pub const Q_GETFMT: c_int = 0x800004; -pub const Q_GETINFO: c_int = 0x800005; -pub const Q_SETINFO: c_int = 0x800006; -pub const QIF_BLIMITS: u32 = 1; -pub const QIF_SPACE: u32 = 2; -pub const QIF_ILIMITS: u32 = 4; -pub const QIF_INODES: u32 = 8; -pub const QIF_BTIME: u32 = 16; -pub const QIF_ITIME: u32 = 32; -pub const QIF_LIMITS: u32 = 5; -pub const QIF_USAGE: u32 = 10; -pub const QIF_TIMES: u32 = 48; -pub const QIF_ALL: u32 = 63; - -pub const Q_SYNC: c_int = 0x800001; -pub const Q_QUOTAON: c_int = 0x800002; -pub const Q_QUOTAOFF: c_int = 0x800003; -pub const Q_GETQUOTA: c_int = 0x800007; -pub const Q_SETQUOTA: c_int = 0x800008; - -pub const TCIOFF: c_int = 2; -pub const TCION: c_int = 3; -pub const TCOOFF: c_int = 0; -pub const TCOON: c_int = 1; -pub const TCIFLUSH: c_int = 0; -pub const TCOFLUSH: c_int = 1; -pub const TCIOFLUSH: c_int = 2; -pub const NL0: crate::tcflag_t = 0x00000000; -pub const NL1: crate::tcflag_t = 0x00000100; -pub const TAB0: crate::tcflag_t = 0x00000000; -pub const CR0: crate::tcflag_t = 0x00000000; -pub const FF0: crate::tcflag_t = 0x00000000; -pub const BS0: crate::tcflag_t = 0x00000000; -pub const VT0: crate::tcflag_t = 0x00000000; -pub const VERASE: usize = 2; -pub const VKILL: usize = 3; -pub const VINTR: usize = 0; -pub const VQUIT: usize = 1; -pub const VLNEXT: usize = 15; -pub const IGNBRK: crate::tcflag_t = 0x00000001; -pub const BRKINT: crate::tcflag_t = 0x00000002; -pub const IGNPAR: crate::tcflag_t = 0x00000004; -pub const PARMRK: crate::tcflag_t = 0x00000008; -pub const INPCK: crate::tcflag_t = 0x00000010; -pub const ISTRIP: crate::tcflag_t = 0x00000020; -pub const INLCR: crate::tcflag_t = 0x00000040; -pub const IGNCR: crate::tcflag_t = 0x00000080; -pub const ICRNL: crate::tcflag_t = 0x00000100; -pub const IXANY: crate::tcflag_t = 0x00000800; -pub const IMAXBEL: crate::tcflag_t = 0x00002000; -pub const OPOST: crate::tcflag_t = 0x1; -pub const CS5: crate::tcflag_t = 0x00000000; -pub const CRTSCTS: crate::tcflag_t = 0x80000000; -pub const ECHO: crate::tcflag_t = 0x00000008; -pub const OCRNL: crate::tcflag_t = 0o000010; -pub const ONOCR: crate::tcflag_t = 0o000020; -pub const ONLRET: crate::tcflag_t = 0o000040; -pub const OFILL: crate::tcflag_t = 0o000100; -pub const OFDEL: crate::tcflag_t = 0o000200; - -pub const CLONE_VM: c_int = 0x100; -pub const CLONE_FS: c_int = 0x200; -pub const CLONE_FILES: c_int = 0x400; -pub const CLONE_SIGHAND: c_int = 0x800; -pub const CLONE_PTRACE: c_int = 0x2000; -pub const CLONE_VFORK: c_int = 0x4000; -pub const CLONE_PARENT: c_int = 0x8000; -pub const CLONE_THREAD: c_int = 0x10000; -pub const CLONE_NEWNS: c_int = 0x20000; -pub const CLONE_SYSVSEM: c_int = 0x40000; -pub const CLONE_SETTLS: c_int = 0x80000; -pub const CLONE_PARENT_SETTID: c_int = 0x100000; -pub const CLONE_CHILD_CLEARTID: c_int = 0x200000; -pub const CLONE_DETACHED: c_int = 0x400000; -pub const CLONE_UNTRACED: c_int = 0x800000; -pub const CLONE_CHILD_SETTID: c_int = 0x01000000; -pub const CLONE_NEWCGROUP: c_int = 0x02000000; -pub const CLONE_NEWUTS: c_int = 0x04000000; -pub const CLONE_NEWIPC: c_int = 0x08000000; -pub const CLONE_NEWUSER: c_int = 0x10000000; -pub const CLONE_NEWPID: c_int = 0x20000000; -pub const CLONE_NEWNET: c_int = 0x40000000; -pub const CLONE_IO: c_int = 0x80000000; - -pub const WNOHANG: c_int = 0x00000001; -pub const WUNTRACED: c_int = 0x00000002; -pub const WSTOPPED: c_int = WUNTRACED; -pub const WEXITED: c_int = 0x00000004; -pub const WCONTINUED: c_int = 0x00000008; -pub const WNOWAIT: c_int = 0x01000000; - -// Options for personality(2). -pub const ADDR_NO_RANDOMIZE: c_int = 0x0040000; -pub const MMAP_PAGE_ZERO: c_int = 0x0100000; -pub const ADDR_COMPAT_LAYOUT: c_int = 0x0200000; -pub const READ_IMPLIES_EXEC: c_int = 0x0400000; -pub const ADDR_LIMIT_32BIT: c_int = 0x0800000; -pub const SHORT_INODE: c_int = 0x1000000; -pub const WHOLE_SECONDS: c_int = 0x2000000; -pub const STICKY_TIMEOUTS: c_int = 0x4000000; -pub const ADDR_LIMIT_3GB: c_int = 0x8000000; - -// Options set using PTRACE_SETOPTIONS. -pub const PTRACE_O_TRACESYSGOOD: c_int = 0x00000001; -pub const PTRACE_O_TRACEFORK: c_int = 0x00000002; -pub const PTRACE_O_TRACEVFORK: c_int = 0x00000004; -pub const PTRACE_O_TRACECLONE: c_int = 0x00000008; -pub const PTRACE_O_TRACEEXEC: c_int = 0x00000010; -pub const PTRACE_O_TRACEVFORKDONE: c_int = 0x00000020; -pub const PTRACE_O_TRACEEXIT: c_int = 0x00000040; -pub const PTRACE_O_TRACESECCOMP: c_int = 0x00000080; -pub const PTRACE_O_SUSPEND_SECCOMP: c_int = 0x00200000; -pub const PTRACE_O_EXITKILL: c_int = 0x00100000; -pub const PTRACE_O_MASK: c_int = 0x003000ff; - -// Wait extended result codes for the above trace options. -pub const PTRACE_EVENT_FORK: c_int = 1; -pub const PTRACE_EVENT_VFORK: c_int = 2; -pub const PTRACE_EVENT_CLONE: c_int = 3; -pub const PTRACE_EVENT_EXEC: c_int = 4; -pub const PTRACE_EVENT_VFORK_DONE: c_int = 5; -pub const PTRACE_EVENT_EXIT: c_int = 6; -pub const PTRACE_EVENT_SECCOMP: c_int = 7; - -pub const __WNOTHREAD: c_int = 0x20000000; -pub const __WALL: c_int = 0x40000000; -pub const __WCLONE: c_int = 0x80000000; - -pub const SPLICE_F_MOVE: c_uint = 0x01; -pub const SPLICE_F_NONBLOCK: c_uint = 0x02; -pub const SPLICE_F_MORE: c_uint = 0x04; -pub const SPLICE_F_GIFT: c_uint = 0x08; - -pub const RTLD_LOCAL: c_int = 0; -pub const RTLD_LAZY: c_int = 1; - -pub const POSIX_FADV_NORMAL: c_int = 0; -pub const POSIX_FADV_RANDOM: c_int = 1; -pub const POSIX_FADV_SEQUENTIAL: c_int = 2; -pub const POSIX_FADV_WILLNEED: c_int = 3; - -pub const AT_FDCWD: c_int = -100; -pub const AT_SYMLINK_NOFOLLOW: c_int = 0x100; -pub const AT_REMOVEDIR: c_int = 0x200; -pub const AT_SYMLINK_FOLLOW: c_int = 0x400; -pub const AT_NO_AUTOMOUNT: c_int = 0x800; -pub const AT_EMPTY_PATH: c_int = 0x1000; -pub const AT_RECURSIVE: c_int = 0x8000; - -pub const LOG_CRON: c_int = 9 << 3; -pub const LOG_AUTHPRIV: c_int = 10 << 3; -pub const LOG_FTP: c_int = 11 << 3; -pub const LOG_PERROR: c_int = 0x20; - -pub const PIPE_BUF: usize = 4096; - -pub const SI_LOAD_SHIFT: c_uint = 16; - -// si_code values -pub const SI_USER: c_int = 0; -pub const SI_KERNEL: c_int = 0x80; -pub const SI_QUEUE: c_int = -1; -cfg_if! { - if #[cfg(not(any( - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "mips64" - )))] { - pub const SI_TIMER: c_int = -2; - pub const SI_MESGQ: c_int = -3; - pub const SI_ASYNCIO: c_int = -4; - } else { - pub const SI_TIMER: c_int = -3; - pub const SI_MESGQ: c_int = -4; - pub const SI_ASYNCIO: c_int = -2; - } -} -pub const SI_SIGIO: c_int = -5; -pub const SI_TKILL: c_int = -6; -pub const SI_ASYNCNL: c_int = -60; - -// si_code values for SIGBUS signal -pub const BUS_ADRALN: c_int = 1; -pub const BUS_ADRERR: c_int = 2; -pub const BUS_OBJERR: c_int = 3; -// Linux-specific si_code values for SIGBUS signal -pub const BUS_MCEERR_AR: c_int = 4; -pub const BUS_MCEERR_AO: c_int = 5; - -// si_code values for SIGTRAP -pub const TRAP_BRKPT: c_int = 1; -pub const TRAP_TRACE: c_int = 2; -pub const TRAP_BRANCH: c_int = 3; -pub const TRAP_HWBKPT: c_int = 4; -pub const TRAP_UNK: c_int = 5; - -// si_code values for SIGCHLD signal -pub const CLD_EXITED: c_int = 1; -pub const CLD_KILLED: c_int = 2; -pub const CLD_DUMPED: c_int = 3; -pub const CLD_TRAPPED: c_int = 4; -pub const CLD_STOPPED: c_int = 5; -pub const CLD_CONTINUED: c_int = 6; - -pub const SIGEV_SIGNAL: c_int = 0; -pub const SIGEV_NONE: c_int = 1; -pub const SIGEV_THREAD: c_int = 2; - -pub const P_ALL: idtype_t = 0; -pub const P_PID: idtype_t = 1; -pub const P_PGID: idtype_t = 2; -cfg_if! { - if #[cfg(not(target_os = "emscripten"))] { - pub const P_PIDFD: idtype_t = 3; - } -} - -pub const UTIME_OMIT: c_long = 1073741822; -pub const UTIME_NOW: c_long = 1073741823; - -pub const POLLIN: c_short = 0x1; -pub const POLLPRI: c_short = 0x2; -pub const POLLOUT: c_short = 0x4; -pub const POLLERR: c_short = 0x8; -pub const POLLHUP: c_short = 0x10; -pub const POLLNVAL: c_short = 0x20; -pub const POLLRDNORM: c_short = 0x040; -pub const POLLRDBAND: c_short = 0x080; -#[cfg(not(any(target_arch = "sparc", target_arch = "sparc64")))] -pub const POLLRDHUP: c_short = 0x2000; -#[cfg(any(target_arch = "sparc", target_arch = "sparc64"))] -pub const POLLRDHUP: c_short = 0x800; - -pub const IPTOS_LOWDELAY: u8 = 0x10; -pub const IPTOS_THROUGHPUT: u8 = 0x08; -pub const IPTOS_RELIABILITY: u8 = 0x04; -pub const IPTOS_MINCOST: u8 = 0x02; - -pub const IPTOS_PREC_NETCONTROL: u8 = 0xe0; -pub const IPTOS_PREC_INTERNETCONTROL: u8 = 0xc0; -pub const IPTOS_PREC_CRITIC_ECP: u8 = 0xa0; -pub const IPTOS_PREC_FLASHOVERRIDE: u8 = 0x80; -pub const IPTOS_PREC_FLASH: u8 = 0x60; -pub const IPTOS_PREC_IMMEDIATE: u8 = 0x40; -pub const IPTOS_PREC_PRIORITY: u8 = 0x20; -pub const IPTOS_PREC_ROUTINE: u8 = 0x00; - -pub const IPTOS_ECN_MASK: u8 = 0x03; -pub const IPTOS_ECN_ECT1: u8 = 0x01; -pub const IPTOS_ECN_ECT0: u8 = 0x02; -pub const IPTOS_ECN_CE: u8 = 0x03; - -pub const IPOPT_COPY: u8 = 0x80; -pub const IPOPT_CLASS_MASK: u8 = 0x60; -pub const IPOPT_NUMBER_MASK: u8 = 0x1f; - -pub const IPOPT_CONTROL: u8 = 0x00; -pub const IPOPT_RESERVED1: u8 = 0x20; -pub const IPOPT_MEASUREMENT: u8 = 0x40; -pub const IPOPT_RESERVED2: u8 = 0x60; -pub const IPOPT_END: u8 = 0 | IPOPT_CONTROL; -pub const IPOPT_NOOP: u8 = 1 | IPOPT_CONTROL; -pub const IPOPT_SEC: u8 = 2 | IPOPT_CONTROL | IPOPT_COPY; -pub const IPOPT_LSRR: u8 = 3 | IPOPT_CONTROL | IPOPT_COPY; -pub const IPOPT_TIMESTAMP: u8 = 4 | IPOPT_MEASUREMENT; -pub const IPOPT_RR: u8 = 7 | IPOPT_CONTROL; -pub const IPOPT_SID: u8 = 8 | IPOPT_CONTROL | IPOPT_COPY; -pub const IPOPT_SSRR: u8 = 9 | IPOPT_CONTROL | IPOPT_COPY; -pub const IPOPT_RA: u8 = 20 | IPOPT_CONTROL | IPOPT_COPY; -pub const IPVERSION: u8 = 4; -pub const MAXTTL: u8 = 255; -pub const IPDEFTTL: u8 = 64; -pub const IPOPT_OPTVAL: u8 = 0; -pub const IPOPT_OLEN: u8 = 1; -pub const IPOPT_OFFSET: u8 = 2; -pub const IPOPT_MINOFF: u8 = 4; -pub const MAX_IPOPTLEN: u8 = 40; -pub const IPOPT_NOP: u8 = IPOPT_NOOP; -pub const IPOPT_EOL: u8 = IPOPT_END; -pub const IPOPT_TS: u8 = IPOPT_TIMESTAMP; -pub const IPOPT_TS_TSONLY: u8 = 0; -pub const IPOPT_TS_TSANDADDR: u8 = 1; -pub const IPOPT_TS_PRESPEC: u8 = 3; - -pub const ARPOP_RREQUEST: u16 = 3; -pub const ARPOP_RREPLY: u16 = 4; -pub const ARPOP_InREQUEST: u16 = 8; -pub const ARPOP_InREPLY: u16 = 9; -pub const ARPOP_NAK: u16 = 10; - -pub const ATF_NETMASK: c_int = 0x20; -pub const ATF_DONTPUB: c_int = 0x40; - -pub const ARPHRD_NETROM: u16 = 0; -pub const ARPHRD_ETHER: u16 = 1; -pub const ARPHRD_EETHER: u16 = 2; -pub const ARPHRD_AX25: u16 = 3; -pub const ARPHRD_PRONET: u16 = 4; -pub const ARPHRD_CHAOS: u16 = 5; -pub const ARPHRD_IEEE802: u16 = 6; -pub const ARPHRD_ARCNET: u16 = 7; -pub const ARPHRD_APPLETLK: u16 = 8; -pub const ARPHRD_DLCI: u16 = 15; -pub const ARPHRD_ATM: u16 = 19; -pub const ARPHRD_METRICOM: u16 = 23; -pub const ARPHRD_IEEE1394: u16 = 24; -pub const ARPHRD_EUI64: u16 = 27; -pub const ARPHRD_INFINIBAND: u16 = 32; - -pub const ARPHRD_SLIP: u16 = 256; -pub const ARPHRD_CSLIP: u16 = 257; -pub const ARPHRD_SLIP6: u16 = 258; -pub const ARPHRD_CSLIP6: u16 = 259; -pub const ARPHRD_RSRVD: u16 = 260; -pub const ARPHRD_ADAPT: u16 = 264; -pub const ARPHRD_ROSE: u16 = 270; -pub const ARPHRD_X25: u16 = 271; -pub const ARPHRD_HWX25: u16 = 272; -pub const ARPHRD_CAN: u16 = 280; -pub const ARPHRD_PPP: u16 = 512; -pub const ARPHRD_CISCO: u16 = 513; -pub const ARPHRD_HDLC: u16 = ARPHRD_CISCO; -pub const ARPHRD_LAPB: u16 = 516; -pub const ARPHRD_DDCMP: u16 = 517; -pub const ARPHRD_RAWHDLC: u16 = 518; - -pub const ARPHRD_TUNNEL: u16 = 768; -pub const ARPHRD_TUNNEL6: u16 = 769; -pub const ARPHRD_FRAD: u16 = 770; -pub const ARPHRD_SKIP: u16 = 771; -pub const ARPHRD_LOOPBACK: u16 = 772; -pub const ARPHRD_LOCALTLK: u16 = 773; -pub const ARPHRD_FDDI: u16 = 774; -pub const ARPHRD_BIF: u16 = 775; -pub const ARPHRD_SIT: u16 = 776; -pub const ARPHRD_IPDDP: u16 = 777; -pub const ARPHRD_IPGRE: u16 = 778; -pub const ARPHRD_PIMREG: u16 = 779; -pub const ARPHRD_HIPPI: u16 = 780; -pub const ARPHRD_ASH: u16 = 781; -pub const ARPHRD_ECONET: u16 = 782; -pub const ARPHRD_IRDA: u16 = 783; -pub const ARPHRD_FCPP: u16 = 784; -pub const ARPHRD_FCAL: u16 = 785; -pub const ARPHRD_FCPL: u16 = 786; -pub const ARPHRD_FCFABRIC: u16 = 787; -pub const ARPHRD_IEEE802_TR: u16 = 800; -pub const ARPHRD_IEEE80211: u16 = 801; -pub const ARPHRD_IEEE80211_PRISM: u16 = 802; -pub const ARPHRD_IEEE80211_RADIOTAP: u16 = 803; -pub const ARPHRD_IEEE802154: u16 = 804; - -pub const ARPHRD_VOID: u16 = 0xFFFF; -pub const ARPHRD_NONE: u16 = 0xFFFE; - -cfg_if! { - if #[cfg(not(target_os = "emscripten"))] { - // linux/if_tun.h - /* TUNSETIFF ifr flags */ - pub const IFF_TUN: c_int = 0x0001; - pub const IFF_TAP: c_int = 0x0002; - pub const IFF_NAPI: c_int = 0x0010; - pub const IFF_NAPI_FRAGS: c_int = 0x0020; - // Used in TUNSETIFF to bring up tun/tap without carrier - pub const IFF_NO_CARRIER: c_int = 0x0040; - pub const IFF_NO_PI: c_int = 0x1000; - // Read queue size - pub const TUN_READQ_SIZE: c_short = 500; - // TUN device type flags: deprecated. Use IFF_TUN/IFF_TAP instead. - pub const TUN_TUN_DEV: c_short = crate::IFF_TUN as c_short; - pub const TUN_TAP_DEV: c_short = crate::IFF_TAP as c_short; - pub const TUN_TYPE_MASK: c_short = 0x000f; - // This flag has no real effect - pub const IFF_ONE_QUEUE: c_int = 0x2000; - pub const IFF_VNET_HDR: c_int = 0x4000; - pub const IFF_TUN_EXCL: c_int = 0x8000; - pub const IFF_MULTI_QUEUE: c_int = 0x0100; - pub const IFF_ATTACH_QUEUE: c_int = 0x0200; - pub const IFF_DETACH_QUEUE: c_int = 0x0400; - // read-only flag - pub const IFF_PERSIST: c_int = 0x0800; - pub const IFF_NOFILTER: c_int = 0x1000; - // Socket options - pub const TUN_TX_TIMESTAMP: c_int = 1; - // Features for GSO (TUNSETOFFLOAD) - pub const TUN_F_CSUM: c_uint = 0x01; - pub const TUN_F_TSO4: c_uint = 0x02; - pub const TUN_F_TSO6: c_uint = 0x04; - pub const TUN_F_TSO_ECN: c_uint = 0x08; - pub const TUN_F_UFO: c_uint = 0x10; - pub const TUN_F_USO4: c_uint = 0x20; - pub const TUN_F_USO6: c_uint = 0x40; - // Protocol info prepended to the packets (when IFF_NO_PI is not set) - pub const TUN_PKT_STRIP: c_int = 0x0001; - // Accept all multicast packets - pub const TUN_FLT_ALLMULTI: c_int = 0x0001; - // Ioctl operation codes - const T_TYPE: u32 = b'T' as u32; - pub const TUNSETNOCSUM: Ioctl = _IOW::(T_TYPE, 200); - pub const TUNSETDEBUG: Ioctl = _IOW::(T_TYPE, 201); - pub const TUNSETIFF: Ioctl = _IOW::(T_TYPE, 202); - pub const TUNSETPERSIST: Ioctl = _IOW::(T_TYPE, 203); - pub const TUNSETOWNER: Ioctl = _IOW::(T_TYPE, 204); - pub const TUNSETLINK: Ioctl = _IOW::(T_TYPE, 205); - pub const TUNSETGROUP: Ioctl = _IOW::(T_TYPE, 206); - pub const TUNGETFEATURES: Ioctl = _IOR::(T_TYPE, 207); - pub const TUNSETOFFLOAD: Ioctl = _IOW::(T_TYPE, 208); - pub const TUNSETTXFILTER: Ioctl = _IOW::(T_TYPE, 209); - pub const TUNGETIFF: Ioctl = _IOR::(T_TYPE, 210); - pub const TUNGETSNDBUF: Ioctl = _IOR::(T_TYPE, 211); - pub const TUNSETSNDBUF: Ioctl = _IOW::(T_TYPE, 212); - pub const TUNATTACHFILTER: Ioctl = _IOW::(T_TYPE, 213); - pub const TUNDETACHFILTER: Ioctl = _IOW::(T_TYPE, 214); - pub const TUNGETVNETHDRSZ: Ioctl = _IOR::(T_TYPE, 215); - pub const TUNSETVNETHDRSZ: Ioctl = _IOW::(T_TYPE, 216); - pub const TUNSETQUEUE: Ioctl = _IOW::(T_TYPE, 217); - pub const TUNSETIFINDEX: Ioctl = _IOW::(T_TYPE, 218); - pub const TUNGETFILTER: Ioctl = _IOR::(T_TYPE, 219); - pub const TUNSETVNETLE: Ioctl = _IOW::(T_TYPE, 220); - pub const TUNGETVNETLE: Ioctl = _IOR::(T_TYPE, 221); - pub const TUNSETVNETBE: Ioctl = _IOW::(T_TYPE, 222); - pub const TUNGETVNETBE: Ioctl = _IOR::(T_TYPE, 223); - pub const TUNSETSTEERINGEBPF: Ioctl = _IOR::(T_TYPE, 224); - pub const TUNSETFILTEREBPF: Ioctl = _IOR::(T_TYPE, 225); - pub const TUNSETCARRIER: Ioctl = _IOW::(T_TYPE, 226); - pub const TUNGETDEVNETNS: Ioctl = _IO(T_TYPE, 227); - - // linux/fs.h - pub const FS_IOC_GETFLAGS: Ioctl = _IOR::('f' as u32, 1); - pub const FS_IOC_SETFLAGS: Ioctl = _IOW::('f' as u32, 2); - pub const FS_IOC_GETVERSION: Ioctl = _IOR::('v' as u32, 1); - pub const FS_IOC_SETVERSION: Ioctl = _IOW::('v' as u32, 2); - pub const FS_IOC32_GETFLAGS: Ioctl = _IOR::('f' as u32, 1); - pub const FS_IOC32_SETFLAGS: Ioctl = _IOW::('f' as u32, 2); - pub const FS_IOC32_GETVERSION: Ioctl = _IOR::('v' as u32, 1); - pub const FS_IOC32_SETVERSION: Ioctl = _IOW::('v' as u32, 2); - - pub const FICLONE: Ioctl = _IOW::(0x94, 9); - pub const FICLONERANGE: Ioctl = _IOW::(0x94, 13); - } -} - -cfg_if! { - if #[cfg(target_os = "emscripten")] { - // Emscripten does not define any `*_SUPER_MAGIC` constants. - } else if #[cfg(not(target_arch = "s390x"))] { - pub const ADFS_SUPER_MAGIC: c_long = 0x0000adf5; - pub const AFFS_SUPER_MAGIC: c_long = 0x0000adff; - pub const AFS_SUPER_MAGIC: c_long = 0x5346414f; - pub const AUTOFS_SUPER_MAGIC: c_long = 0x0187; - pub const BPF_FS_MAGIC: c_long = 0xcafe4a11; - pub const BTRFS_SUPER_MAGIC: c_long = 0x9123683e; - pub const CGROUP2_SUPER_MAGIC: c_long = 0x63677270; - pub const CGROUP_SUPER_MAGIC: c_long = 0x27e0eb; - pub const CODA_SUPER_MAGIC: c_long = 0x73757245; - pub const CRAMFS_MAGIC: c_long = 0x28cd3d45; - pub const DEBUGFS_MAGIC: c_long = 0x64626720; - pub const DEVPTS_SUPER_MAGIC: c_long = 0x1cd1; - pub const ECRYPTFS_SUPER_MAGIC: c_long = 0xf15f; - pub const EFS_SUPER_MAGIC: c_long = 0x00414a53; - pub const EXT2_SUPER_MAGIC: c_long = 0x0000ef53; - pub const EXT3_SUPER_MAGIC: c_long = 0x0000ef53; - pub const EXT4_SUPER_MAGIC: c_long = 0x0000ef53; - pub const F2FS_SUPER_MAGIC: c_long = 0xf2f52010; - pub const FUSE_SUPER_MAGIC: c_long = 0x65735546; - pub const FUTEXFS_SUPER_MAGIC: c_long = 0xbad1dea; - pub const HOSTFS_SUPER_MAGIC: c_long = 0x00c0ffee; - pub const HPFS_SUPER_MAGIC: c_long = 0xf995e849; - pub const HUGETLBFS_MAGIC: c_long = 0x958458f6; - pub const ISOFS_SUPER_MAGIC: c_long = 0x00009660; - pub const JFFS2_SUPER_MAGIC: c_long = 0x000072b6; - pub const MINIX2_SUPER_MAGIC2: c_long = 0x00002478; - pub const MINIX2_SUPER_MAGIC: c_long = 0x00002468; - pub const MINIX3_SUPER_MAGIC: c_long = 0x4d5a; - pub const MINIX_SUPER_MAGIC2: c_long = 0x0000138f; - pub const MINIX_SUPER_MAGIC: c_long = 0x0000137f; - pub const MSDOS_SUPER_MAGIC: c_long = 0x00004d44; - pub const NCP_SUPER_MAGIC: c_long = 0x0000564c; - pub const NFS_SUPER_MAGIC: c_long = 0x00006969; - pub const NILFS_SUPER_MAGIC: c_long = 0x3434; - pub const OCFS2_SUPER_MAGIC: c_long = 0x7461636f; - pub const OPENPROM_SUPER_MAGIC: c_long = 0x00009fa1; - pub const OVERLAYFS_SUPER_MAGIC: c_long = 0x794c7630; - pub const PROC_SUPER_MAGIC: c_long = 0x00009fa0; - pub const QNX4_SUPER_MAGIC: c_long = 0x0000002f; - pub const QNX6_SUPER_MAGIC: c_long = 0x68191122; - pub const RDTGROUP_SUPER_MAGIC: c_long = 0x7655821; - pub const REISERFS_SUPER_MAGIC: c_long = 0x52654973; - pub const SECURITYFS_MAGIC: c_long = 0x73636673; - pub const SELINUX_MAGIC: c_long = 0xf97cff8c; - pub const SMACK_MAGIC: c_long = 0x43415d53; - pub const SMB_SUPER_MAGIC: c_long = 0x0000517b; - pub const SYSFS_MAGIC: c_long = 0x62656572; - pub const TMPFS_MAGIC: c_long = 0x01021994; - pub const TRACEFS_MAGIC: c_long = 0x74726163; - pub const UDF_SUPER_MAGIC: c_long = 0x15013346; - pub const USBDEVICE_SUPER_MAGIC: c_long = 0x00009fa2; - pub const XENFS_SUPER_MAGIC: c_long = 0xabba1974; - pub const NSFS_MAGIC: c_long = 0x6e736673; - } else if #[cfg(target_arch = "s390x")] { - pub const ADFS_SUPER_MAGIC: c_uint = 0x0000adf5; - pub const AFFS_SUPER_MAGIC: c_uint = 0x0000adff; - pub const AFS_SUPER_MAGIC: c_uint = 0x5346414f; - pub const AUTOFS_SUPER_MAGIC: c_uint = 0x0187; - pub const BPF_FS_MAGIC: c_uint = 0xcafe4a11; - pub const BTRFS_SUPER_MAGIC: c_uint = 0x9123683e; - pub const CGROUP2_SUPER_MAGIC: c_uint = 0x63677270; - pub const CGROUP_SUPER_MAGIC: c_uint = 0x27e0eb; - pub const CODA_SUPER_MAGIC: c_uint = 0x73757245; - pub const CRAMFS_MAGIC: c_uint = 0x28cd3d45; - pub const DEBUGFS_MAGIC: c_uint = 0x64626720; - pub const DEVPTS_SUPER_MAGIC: c_uint = 0x1cd1; - pub const ECRYPTFS_SUPER_MAGIC: c_uint = 0xf15f; - pub const EFS_SUPER_MAGIC: c_uint = 0x00414a53; - pub const EXT2_SUPER_MAGIC: c_uint = 0x0000ef53; - pub const EXT3_SUPER_MAGIC: c_uint = 0x0000ef53; - pub const EXT4_SUPER_MAGIC: c_uint = 0x0000ef53; - pub const F2FS_SUPER_MAGIC: c_uint = 0xf2f52010; - pub const FUSE_SUPER_MAGIC: c_uint = 0x65735546; - pub const FUTEXFS_SUPER_MAGIC: c_uint = 0xbad1dea; - pub const HOSTFS_SUPER_MAGIC: c_uint = 0x00c0ffee; - pub const HPFS_SUPER_MAGIC: c_uint = 0xf995e849; - pub const HUGETLBFS_MAGIC: c_uint = 0x958458f6; - pub const ISOFS_SUPER_MAGIC: c_uint = 0x00009660; - pub const JFFS2_SUPER_MAGIC: c_uint = 0x000072b6; - pub const MINIX2_SUPER_MAGIC2: c_uint = 0x00002478; - pub const MINIX2_SUPER_MAGIC: c_uint = 0x00002468; - pub const MINIX3_SUPER_MAGIC: c_uint = 0x4d5a; - pub const MINIX_SUPER_MAGIC2: c_uint = 0x0000138f; - pub const MINIX_SUPER_MAGIC: c_uint = 0x0000137f; - pub const MSDOS_SUPER_MAGIC: c_uint = 0x00004d44; - pub const NCP_SUPER_MAGIC: c_uint = 0x0000564c; - pub const NFS_SUPER_MAGIC: c_uint = 0x00006969; - pub const NILFS_SUPER_MAGIC: c_uint = 0x3434; - pub const OCFS2_SUPER_MAGIC: c_uint = 0x7461636f; - pub const OPENPROM_SUPER_MAGIC: c_uint = 0x00009fa1; - pub const OVERLAYFS_SUPER_MAGIC: c_uint = 0x794c7630; - pub const PROC_SUPER_MAGIC: c_uint = 0x00009fa0; - pub const QNX4_SUPER_MAGIC: c_uint = 0x0000002f; - pub const QNX6_SUPER_MAGIC: c_uint = 0x68191122; - pub const RDTGROUP_SUPER_MAGIC: c_uint = 0x7655821; - pub const REISERFS_SUPER_MAGIC: c_uint = 0x52654973; - pub const SECURITYFS_MAGIC: c_uint = 0x73636673; - pub const SELINUX_MAGIC: c_uint = 0xf97cff8c; - pub const SMACK_MAGIC: c_uint = 0x43415d53; - pub const SMB_SUPER_MAGIC: c_uint = 0x0000517b; - pub const SYSFS_MAGIC: c_uint = 0x62656572; - pub const TMPFS_MAGIC: c_uint = 0x01021994; - pub const TRACEFS_MAGIC: c_uint = 0x74726163; - pub const UDF_SUPER_MAGIC: c_uint = 0x15013346; - pub const USBDEVICE_SUPER_MAGIC: c_uint = 0x00009fa2; - pub const XENFS_SUPER_MAGIC: c_uint = 0xabba1974; - pub const NSFS_MAGIC: c_uint = 0x6e736673; - } -} - -cfg_if! { - if #[cfg(any( - target_env = "gnu", - target_os = "android", - all(target_env = "musl", musl_v1_2_3) - ))] { - pub const AT_STATX_SYNC_TYPE: c_int = 0x6000; - pub const AT_STATX_SYNC_AS_STAT: c_int = 0x0000; - pub const AT_STATX_FORCE_SYNC: c_int = 0x2000; - pub const AT_STATX_DONT_SYNC: c_int = 0x4000; - pub const STATX_TYPE: c_uint = 0x0001; - pub const STATX_MODE: c_uint = 0x0002; - pub const STATX_NLINK: c_uint = 0x0004; - pub const STATX_UID: c_uint = 0x0008; - pub const STATX_GID: c_uint = 0x0010; - pub const STATX_ATIME: c_uint = 0x0020; - pub const STATX_MTIME: c_uint = 0x0040; - pub const STATX_CTIME: c_uint = 0x0080; - pub const STATX_INO: c_uint = 0x0100; - pub const STATX_SIZE: c_uint = 0x0200; - pub const STATX_BLOCKS: c_uint = 0x0400; - pub const STATX_BASIC_STATS: c_uint = 0x07ff; - pub const STATX_BTIME: c_uint = 0x0800; - pub const STATX_ALL: c_uint = 0x0fff; - pub const STATX_MNT_ID: c_uint = 0x1000; - pub const STATX_DIOALIGN: c_uint = 0x2000; - pub const STATX__RESERVED: c_int = 0x80000000; - pub const STATX_ATTR_COMPRESSED: c_int = 0x0004; - pub const STATX_ATTR_IMMUTABLE: c_int = 0x0010; - pub const STATX_ATTR_APPEND: c_int = 0x0020; - pub const STATX_ATTR_NODUMP: c_int = 0x0040; - pub const STATX_ATTR_ENCRYPTED: c_int = 0x0800; - pub const STATX_ATTR_AUTOMOUNT: c_int = 0x1000; - pub const STATX_ATTR_MOUNT_ROOT: c_int = 0x2000; - pub const STATX_ATTR_VERITY: c_int = 0x100000; - pub const STATX_ATTR_DAX: c_int = 0x200000; - } -} - -// https://github.com/search?q=repo%3Atorvalds%2Flinux+%22%23define+_IOC_NONE%22&type=code -cfg_if! { - if #[cfg(not(target_os = "emscripten"))] { - const _IOC_NRBITS: u32 = 8; - const _IOC_TYPEBITS: u32 = 8; - - cfg_if! { - if #[cfg(any( - any(target_arch = "powerpc", target_arch = "powerpc64"), - any(target_arch = "sparc", target_arch = "sparc64"), - any(target_arch = "mips", target_arch = "mips64"), - ))] { - // https://github.com/torvalds/linux/blob/b311c1b497e51a628aa89e7cb954481e5f9dced2/arch/powerpc/include/uapi/asm/ioctl.h - // https://github.com/torvalds/linux/blob/b311c1b497e51a628aa89e7cb954481e5f9dced2/arch/sparc/include/uapi/asm/ioctl.h - // https://github.com/torvalds/linux/blob/b311c1b497e51a628aa89e7cb954481e5f9dced2/arch/mips/include/uapi/asm/ioctl.h - - const _IOC_SIZEBITS: u32 = 13; - const _IOC_DIRBITS: u32 = 3; - - const _IOC_NONE: u32 = 1; - const _IOC_READ: u32 = 2; - const _IOC_WRITE: u32 = 4; - } else { - // https://github.com/torvalds/linux/blob/b311c1b497e51a628aa89e7cb954481e5f9dced2/include/uapi/asm-generic/ioctl.h - - const _IOC_SIZEBITS: u32 = 14; - const _IOC_DIRBITS: u32 = 2; - - const _IOC_NONE: u32 = 0; - const _IOC_WRITE: u32 = 1; - const _IOC_READ: u32 = 2; - } - } - const _IOC_NRMASK: u32 = (1 << _IOC_NRBITS) - 1; - const _IOC_TYPEMASK: u32 = (1 << _IOC_TYPEBITS) - 1; - const _IOC_SIZEMASK: u32 = (1 << _IOC_SIZEBITS) - 1; - const _IOC_DIRMASK: u32 = (1 << _IOC_DIRBITS) - 1; - - const _IOC_NRSHIFT: u32 = 0; - const _IOC_TYPESHIFT: u32 = _IOC_NRSHIFT + _IOC_NRBITS; - const _IOC_SIZESHIFT: u32 = _IOC_TYPESHIFT + _IOC_TYPEBITS; - const _IOC_DIRSHIFT: u32 = _IOC_SIZESHIFT + _IOC_SIZEBITS; - - // adapted from https://github.com/torvalds/linux/blob/8a696a29c6905594e4abf78eaafcb62165ac61f1/rust/kernel/ioctl.rs - - /// Build an ioctl number, analogous to the C macro of the same name. - const fn _IOC(dir: u32, ty: u32, nr: u32, size: usize) -> Ioctl { - core::debug_assert!(dir <= _IOC_DIRMASK); - core::debug_assert!(ty <= _IOC_TYPEMASK); - core::debug_assert!(nr <= _IOC_NRMASK); - core::debug_assert!(size <= (_IOC_SIZEMASK as usize)); - - ((dir << _IOC_DIRSHIFT) - | (ty << _IOC_TYPESHIFT) - | (nr << _IOC_NRSHIFT) - | ((size as u32) << _IOC_SIZESHIFT)) as Ioctl - } - - /// Build an ioctl number for an argumentless ioctl. - pub const fn _IO(ty: u32, nr: u32) -> Ioctl { - _IOC(_IOC_NONE, ty, nr, 0) - } - - /// Build an ioctl number for an read-only ioctl. - pub const fn _IOR(ty: u32, nr: u32) -> Ioctl { - _IOC(_IOC_READ, ty, nr, size_of::()) - } - - /// Build an ioctl number for an write-only ioctl. - pub const fn _IOW(ty: u32, nr: u32) -> Ioctl { - _IOC(_IOC_WRITE, ty, nr, size_of::()) - } - - /// Build an ioctl number for a read-write ioctl. - pub const fn _IOWR(ty: u32, nr: u32) -> Ioctl { - _IOC(_IOC_READ | _IOC_WRITE, ty, nr, size_of::()) - } - - extern "C" { - #[cfg_attr(gnu_time_bits64, link_name = "__ioctl_time64")] - pub fn ioctl(fd: c_int, request: Ioctl, ...) -> c_int; - } - } -} - -const fn CMSG_ALIGN(len: usize) -> usize { - (len + size_of::() - 1) & !(size_of::() - 1) -} - -f! { - pub fn CMSG_FIRSTHDR(mhdr: *const crate::msghdr) -> *mut crate::cmsghdr { - if (*mhdr).msg_controllen as usize >= size_of::() { - (*mhdr).msg_control.cast::() - } else { - core::ptr::null_mut::() - } - } - - pub fn CMSG_DATA(cmsg: *const crate::cmsghdr) -> *mut c_uchar { - cmsg.offset(1) as *mut c_uchar - } - - pub const fn CMSG_SPACE(length: c_uint) -> c_uint { - (CMSG_ALIGN(length as usize) + CMSG_ALIGN(size_of::())) as c_uint - } - - pub const fn CMSG_LEN(length: c_uint) -> c_uint { - CMSG_ALIGN(size_of::()) as c_uint + length - } - - pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] &= !(1 << (fd % size)); - return; - } - - pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0; - } - - pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] |= 1 << (fd % size); - return; - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - for slot in &mut (*set).fds_bits { - *slot = 0; - } - } -} - -safe_f! { - pub fn SIGRTMAX() -> c_int { - unsafe { __libc_current_sigrtmax() } - } - - pub fn SIGRTMIN() -> c_int { - unsafe { __libc_current_sigrtmin() } - } - - pub const fn WIFSTOPPED(status: c_int) -> bool { - (status & 0xff) == 0x7f - } - - pub const fn WSTOPSIG(status: c_int) -> c_int { - (status >> 8) & 0xff - } - - pub const fn WIFCONTINUED(status: c_int) -> bool { - status == 0xffff - } - - pub const fn WIFSIGNALED(status: c_int) -> bool { - ((status & 0x7f) + 1) as i8 >= 2 - } - - pub const fn WTERMSIG(status: c_int) -> c_int { - status & 0x7f - } - - pub const fn WIFEXITED(status: c_int) -> bool { - (status & 0x7f) == 0 - } - - pub const fn WEXITSTATUS(status: c_int) -> c_int { - (status >> 8) & 0xff - } - - pub const fn WCOREDUMP(status: c_int) -> bool { - (status & 0x80) != 0 - } - - pub const fn W_EXITCODE(ret: c_int, sig: c_int) -> c_int { - (ret << 8) | sig - } - - pub const fn W_STOPCODE(sig: c_int) -> c_int { - (sig << 8) | 0x7f - } - - pub const fn QCMD(cmd: c_int, type_: c_int) -> c_int { - (cmd << 8) | (type_ & 0x00ff) - } - - pub const fn IPOPT_COPIED(o: u8) -> u8 { - o & IPOPT_COPY - } - - pub const fn IPOPT_CLASS(o: u8) -> u8 { - o & IPOPT_CLASS_MASK - } - - pub const fn IPOPT_NUMBER(o: u8) -> u8 { - o & IPOPT_NUMBER_MASK - } - - pub const fn IPTOS_ECN(x: u8) -> u8 { - x & crate::IPTOS_ECN_MASK - } - - #[allow(ellipsis_inclusive_range_patterns)] - pub const fn KERNEL_VERSION(a: u32, b: u32, c: u32) -> u32 { - ((a << 16) + (b << 8)) + if c > 255 { 255 } else { c } - } -} - -extern "C" { - #[doc(hidden)] - pub fn __libc_current_sigrtmax() -> c_int; - #[doc(hidden)] - pub fn __libc_current_sigrtmin() -> c_int; - - pub fn sem_destroy(sem: *mut sem_t) -> c_int; - pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; - pub fn fdatasync(fd: c_int) -> c_int; - pub fn mincore(addr: *mut c_void, len: size_t, vec: *mut c_uchar) -> c_int; - - #[cfg_attr(gnu_time_bits64, link_name = "__clock_getres64")] - pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__clock_gettime64")] - pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__clock_settime64")] - pub fn clock_settime(clk_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; - pub fn clock_getcpuclockid(pid: crate::pid_t, clk_id: *mut crate::clockid_t) -> c_int; - - pub fn dirfd(dirp: *mut crate::DIR) -> c_int; - - pub fn pthread_getattr_np(native: crate::pthread_t, attr: *mut crate::pthread_attr_t) -> c_int; - pub fn pthread_attr_getstack( - attr: *const crate::pthread_attr_t, - stackaddr: *mut *mut c_void, - stacksize: *mut size_t, - ) -> c_int; - pub fn pthread_attr_setstack( - attr: *mut crate::pthread_attr_t, - stackaddr: *mut c_void, - stacksize: size_t, - ) -> c_int; - pub fn memalign(align: size_t, size: size_t) -> *mut c_void; - pub fn setgroups(ngroups: size_t, ptr: *const crate::gid_t) -> c_int; - pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "statfs64")] - pub fn statfs(path: *const c_char, buf: *mut statfs) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "fstatfs64")] - pub fn fstatfs(fd: c_int, buf: *mut statfs) -> c_int; - pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; - #[cfg_attr(gnu_file_offset_bits64, link_name = "posix_fadvise64")] - pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__futimens64")] - pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__utimensat64")] - pub fn utimensat( - dirfd: c_int, - path: *const c_char, - times: *const crate::timespec, - flag: c_int, - ) -> c_int; - pub fn duplocale(base: crate::locale_t) -> crate::locale_t; - pub fn freelocale(loc: crate::locale_t); - pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; - pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; - pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; - pub fn pthread_condattr_getclock( - attr: *const pthread_condattr_t, - clock_id: *mut clockid_t, - ) -> c_int; - pub fn pthread_condattr_setclock( - attr: *mut pthread_condattr_t, - clock_id: crate::clockid_t, - ) -> c_int; - pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: c_int) -> c_int; - pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; - pub fn pthread_rwlockattr_getpshared( - attr: *const pthread_rwlockattr_t, - val: *mut c_int, - ) -> c_int; - pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, val: c_int) -> c_int; - pub fn ptsname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - pub fn clearenv() -> c_int; - pub fn waitid( - idtype: idtype_t, - id: id_t, - infop: *mut crate::siginfo_t, - options: c_int, - ) -> c_int; - pub fn getresuid( - ruid: *mut crate::uid_t, - euid: *mut crate::uid_t, - suid: *mut crate::uid_t, - ) -> c_int; - pub fn getresgid( - rgid: *mut crate::gid_t, - egid: *mut crate::gid_t, - sgid: *mut crate::gid_t, - ) -> c_int; - pub fn acct(filename: *const c_char) -> c_int; - pub fn brk(addr: *mut c_void) -> c_int; - pub fn sbrk(increment: intptr_t) -> *mut c_void; - #[deprecated( - since = "0.2.66", - note = "causes memory corruption, see rust-lang/libc#1596" - )] - pub fn vfork() -> crate::pid_t; - pub fn setresgid(rgid: crate::gid_t, egid: crate::gid_t, sgid: crate::gid_t) -> c_int; - pub fn setresuid(ruid: crate::uid_t, euid: crate::uid_t, suid: crate::uid_t) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__wait4_time64")] - pub fn wait4( - pid: crate::pid_t, - status: *mut c_int, - options: c_int, - rusage: *mut crate::rusage, - ) -> crate::pid_t; - pub fn login_tty(fd: c_int) -> c_int; - - // DIFF(main): changed to `*const *mut` in e77f551de9 - pub fn execvpe( - file: *const c_char, - argv: *const *const c_char, - envp: *const *const c_char, - ) -> c_int; - pub fn fexecve(fd: c_int, argv: *const *const c_char, envp: *const *const c_char) -> c_int; - - pub fn getifaddrs(ifap: *mut *mut crate::ifaddrs) -> c_int; - pub fn freeifaddrs(ifa: *mut crate::ifaddrs); - pub fn bind( - socket: c_int, - address: *const crate::sockaddr, - address_len: crate::socklen_t, - ) -> c_int; - - pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - - #[cfg_attr(gnu_time_bits64, link_name = "__sendmsg64")] - pub fn sendmsg(fd: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; - #[cfg_attr(gnu_time_bits64, link_name = "__recvmsg64")] - pub fn recvmsg(fd: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; - pub fn uname(buf: *mut crate::utsname) -> c_int; - - pub fn strchrnul(s: *const c_char, c: c_int) -> *mut c_char; - - pub fn strftime( - s: *mut c_char, - max: size_t, - format: *const c_char, - tm: *const crate::tm, - ) -> size_t; - pub fn strftime_l( - s: *mut c_char, - max: size_t, - format: *const c_char, - tm: *const crate::tm, - locale: crate::locale_t, - ) -> size_t; - pub fn strptime(s: *const c_char, format: *const c_char, tm: *mut crate::tm) -> *mut c_char; - - #[cfg_attr(gnu_file_offset_bits64, link_name = "mkostemp64")] - pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "mkostemps64")] - pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; - - pub fn getdomainname(name: *mut c_char, len: size_t) -> c_int; - pub fn setdomainname(name: *const c_char, len: size_t) -> c_int; -} - -// LFS64 extensions -// -// * musl and Emscripten has 64-bit versions only so aliases the LFS64 symbols to the standard ones -// * ulibc doesn't have preadv64/pwritev64 -cfg_if! { - if #[cfg(not(any(target_env = "musl", target_os = "emscripten")))] { - extern "C" { - pub fn fstatfs64(fd: c_int, buf: *mut statfs64) -> c_int; - pub fn statvfs64(path: *const c_char, buf: *mut statvfs64) -> c_int; - pub fn fstatvfs64(fd: c_int, buf: *mut statvfs64) -> c_int; - pub fn statfs64(path: *const c_char, buf: *mut statfs64) -> c_int; - pub fn creat64(path: *const c_char, mode: mode_t) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__fstat64_time64")] - pub fn fstat64(fildes: c_int, buf: *mut stat64) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__fstatat64_time64")] - pub fn fstatat64( - dirfd: c_int, - pathname: *const c_char, - buf: *mut stat64, - flags: c_int, - ) -> c_int; - pub fn ftruncate64(fd: c_int, length: off64_t) -> c_int; - pub fn lseek64(fd: c_int, offset: off64_t, whence: c_int) -> off64_t; - #[cfg_attr(gnu_time_bits64, link_name = "__lstat64_time64")] - pub fn lstat64(path: *const c_char, buf: *mut stat64) -> c_int; - pub fn mmap64( - addr: *mut c_void, - len: size_t, - prot: c_int, - flags: c_int, - fd: c_int, - offset: off64_t, - ) -> *mut c_void; - pub fn open64(path: *const c_char, oflag: c_int, ...) -> c_int; - pub fn openat64(fd: c_int, path: *const c_char, oflag: c_int, ...) -> c_int; - pub fn posix_fadvise64( - fd: c_int, - offset: off64_t, - len: off64_t, - advise: c_int, - ) -> c_int; - pub fn pread64(fd: c_int, buf: *mut c_void, count: size_t, offset: off64_t) -> ssize_t; - pub fn pwrite64( - fd: c_int, - buf: *const c_void, - count: size_t, - offset: off64_t, - ) -> ssize_t; - pub fn readdir64(dirp: *mut crate::DIR) -> *mut crate::dirent64; - pub fn readdir64_r( - dirp: *mut crate::DIR, - entry: *mut crate::dirent64, - result: *mut *mut crate::dirent64, - ) -> c_int; - #[cfg_attr(gnu_time_bits64, link_name = "__stat64_time64")] - pub fn stat64(path: *const c_char, buf: *mut stat64) -> c_int; - pub fn truncate64(path: *const c_char, length: off64_t) -> c_int; - } - } -} - -cfg_if! { - if #[cfg(not(any( - target_env = "uclibc", - target_env = "musl", - target_os = "emscripten" - )))] { - extern "C" { - pub fn preadv64( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: off64_t, - ) -> ssize_t; - pub fn pwritev64( - fd: c_int, - iov: *const crate::iovec, - iovcnt: c_int, - offset: off64_t, - ) -> ssize_t; - } - } -} - -cfg_if! { - if #[cfg(not(target_env = "uclibc"))] { - extern "C" { - // uclibc has separate non-const version of this function - pub fn forkpty( - amaster: *mut c_int, - name: *mut c_char, - termp: *const termios, - winp: *const crate::winsize, - ) -> crate::pid_t; - // uclibc has separate non-const version of this function - pub fn openpty( - amaster: *mut c_int, - aslave: *mut c_int, - name: *mut c_char, - termp: *const termios, - winp: *const crate::winsize, - ) -> c_int; - } - } -} - -// The statx syscall, available on some libcs. -cfg_if! { - if #[cfg(any( - target_env = "gnu", - target_os = "android", - all(target_env = "musl", musl_v1_2_3) - ))] { - extern "C" { - pub fn statx( - dirfd: c_int, - pathname: *const c_char, - flags: c_int, - mask: c_uint, - statxbuf: *mut statx, - ) -> c_int; - } - } -} - -cfg_if! { - if #[cfg(target_os = "emscripten")] { - mod emscripten; - pub use self::emscripten::*; - } else if #[cfg(target_os = "linux")] { - mod linux; - pub use self::linux::*; - } else if #[cfg(target_os = "l4re")] { - mod linux; - pub use self::linux::*; - } else if #[cfg(target_os = "android")] { - mod android; - pub use self::android::*; - } else { - // Unknown target_os - } -} diff --git a/vendor/libc/src/unix/mod.rs b/vendor/libc/src/unix/mod.rs deleted file mode 100644 index 6ba5d87de7ca09..00000000000000 --- a/vendor/libc/src/unix/mod.rs +++ /dev/null @@ -1,1901 +0,0 @@ -//! Definitions found commonly among almost all Unix derivatives -//! -//! More functions and definitions can be found in the more specific modules -//! according to the platform in question. - -use crate::prelude::*; - -pub type intmax_t = i64; -pub type uintmax_t = u64; - -pub type size_t = usize; -pub type ptrdiff_t = isize; -pub type intptr_t = isize; -pub type uintptr_t = usize; -pub type ssize_t = isize; - -pub type pid_t = i32; -pub type in_addr_t = u32; -pub type in_port_t = u16; -pub type sighandler_t = size_t; -pub type cc_t = c_uchar; - -cfg_if! { - if #[cfg(any( - target_os = "espidf", - target_os = "horizon", - target_os = "vita" - ))] { - pub type uid_t = c_ushort; - pub type gid_t = c_ushort; - } else if #[cfg(target_os = "nto")] { - pub type uid_t = i32; - pub type gid_t = i32; - } else { - pub type uid_t = u32; - pub type gid_t = u32; - } -} - -missing! { - #[derive(Debug)] - pub enum DIR {} -} -pub type locale_t = *mut c_void; - -s! { - pub struct group { - pub gr_name: *mut c_char, - pub gr_passwd: *mut c_char, - pub gr_gid: crate::gid_t, - pub gr_mem: *mut *mut c_char, - } - - pub struct utimbuf { - pub actime: time_t, - pub modtime: time_t, - } - - pub struct timeval { - pub tv_sec: time_t, - #[cfg(not(gnu_time_bits64))] - pub tv_usec: suseconds_t, - // For 64 bit time on 32 bit linux glibc, suseconds_t is still - // a 32 bit type. Use __suseconds64_t instead - #[cfg(gnu_time_bits64)] - pub tv_usec: __suseconds64_t, - } - - // linux x32 compatibility - // See https://sourceware.org/bugzilla/show_bug.cgi?id=16437 - #[cfg(not(target_env = "gnu"))] - pub struct timespec { - pub tv_sec: time_t, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - pub tv_nsec: i64, - #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] - pub tv_nsec: c_long, - } - - pub struct rlimit { - pub rlim_cur: rlim_t, - pub rlim_max: rlim_t, - } - - pub struct rusage { - pub ru_utime: timeval, - pub ru_stime: timeval, - pub ru_maxrss: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad1: u32, - pub ru_ixrss: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad2: u32, - pub ru_idrss: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad3: u32, - pub ru_isrss: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad4: u32, - pub ru_minflt: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad5: u32, - pub ru_majflt: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad6: u32, - pub ru_nswap: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad7: u32, - pub ru_inblock: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad8: u32, - pub ru_oublock: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad9: u32, - pub ru_msgsnd: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad10: u32, - pub ru_msgrcv: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad11: u32, - pub ru_nsignals: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad12: u32, - pub ru_nvcsw: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad13: u32, - pub ru_nivcsw: c_long, - #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] - __pad14: u32, - - #[cfg(any(target_env = "musl", target_env = "ohos", target_os = "emscripten"))] - __reserved: [c_long; 16], - } - - pub struct ipv6_mreq { - pub ipv6mr_multiaddr: in6_addr, - #[cfg(target_os = "android")] - pub ipv6mr_interface: c_int, - #[cfg(not(target_os = "android"))] - pub ipv6mr_interface: c_uint, - } - - #[cfg(not(target_os = "cygwin"))] - pub struct hostent { - pub h_name: *mut c_char, - pub h_aliases: *mut *mut c_char, - pub h_addrtype: c_int, - pub h_length: c_int, - pub h_addr_list: *mut *mut c_char, - } - - pub struct iovec { - pub iov_base: *mut c_void, - pub iov_len: size_t, - } - - pub struct pollfd { - pub fd: c_int, - pub events: c_short, - pub revents: c_short, - } - - pub struct winsize { - pub ws_row: c_ushort, - pub ws_col: c_ushort, - pub ws_xpixel: c_ushort, - pub ws_ypixel: c_ushort, - } - - #[cfg(not(target_os = "cygwin"))] - pub struct linger { - pub l_onoff: c_int, - pub l_linger: c_int, - } - - pub struct sigval { - // Actually a union of an int and a void* - pub sival_ptr: *mut c_void, - } - - // - pub struct itimerval { - pub it_interval: crate::timeval, - pub it_value: crate::timeval, - } - - // - pub struct tms { - pub tms_utime: crate::clock_t, - pub tms_stime: crate::clock_t, - pub tms_cutime: crate::clock_t, - pub tms_cstime: crate::clock_t, - } - - pub struct servent { - pub s_name: *mut c_char, - pub s_aliases: *mut *mut c_char, - #[cfg(target_os = "cygwin")] - pub s_port: c_short, - #[cfg(not(target_os = "cygwin"))] - pub s_port: c_int, - pub s_proto: *mut c_char, - } - - pub struct protoent { - pub p_name: *mut c_char, - pub p_aliases: *mut *mut c_char, - #[cfg(not(target_os = "cygwin"))] - pub p_proto: c_int, - #[cfg(target_os = "cygwin")] - pub p_proto: c_short, - } - - #[repr(align(4))] - pub struct in6_addr { - pub s6_addr: [u8; 16], - } -} - -pub const INT_MIN: c_int = -2147483648; -pub const INT_MAX: c_int = 2147483647; - -pub const SIG_DFL: sighandler_t = 0 as sighandler_t; -pub const SIG_IGN: sighandler_t = 1 as sighandler_t; -pub const SIG_ERR: sighandler_t = !0 as sighandler_t; - -cfg_if! { - if #[cfg(all(not(target_os = "nto"), not(target_os = "aix")))] { - pub const DT_UNKNOWN: u8 = 0; - pub const DT_FIFO: u8 = 1; - pub const DT_CHR: u8 = 2; - pub const DT_DIR: u8 = 4; - pub const DT_BLK: u8 = 6; - pub const DT_REG: u8 = 8; - pub const DT_LNK: u8 = 10; - pub const DT_SOCK: u8 = 12; - } -} -cfg_if! { - if #[cfg(not(target_os = "redox"))] { - pub const FD_CLOEXEC: c_int = 0x1; - } -} - -cfg_if! { - if #[cfg(not(target_os = "nto"))] { - pub const USRQUOTA: c_int = 0; - pub const GRPQUOTA: c_int = 1; - } -} -pub const SIGIOT: c_int = 6; - -pub const S_ISUID: mode_t = 0o4000; -pub const S_ISGID: mode_t = 0o2000; -pub const S_ISVTX: mode_t = 0o1000; - -cfg_if! { - if #[cfg(not(any( - target_os = "haiku", - target_os = "illumos", - target_os = "solaris", - target_os = "cygwin" - )))] { - pub const IF_NAMESIZE: size_t = 16; - pub const IFNAMSIZ: size_t = IF_NAMESIZE; - } -} - -pub const LOG_EMERG: c_int = 0; -pub const LOG_ALERT: c_int = 1; -pub const LOG_CRIT: c_int = 2; -pub const LOG_ERR: c_int = 3; -pub const LOG_WARNING: c_int = 4; -pub const LOG_NOTICE: c_int = 5; -pub const LOG_INFO: c_int = 6; -pub const LOG_DEBUG: c_int = 7; - -pub const LOG_KERN: c_int = 0; -pub const LOG_USER: c_int = 1 << 3; -pub const LOG_MAIL: c_int = 2 << 3; -pub const LOG_DAEMON: c_int = 3 << 3; -pub const LOG_AUTH: c_int = 4 << 3; -pub const LOG_SYSLOG: c_int = 5 << 3; -pub const LOG_LPR: c_int = 6 << 3; -pub const LOG_NEWS: c_int = 7 << 3; -pub const LOG_UUCP: c_int = 8 << 3; -pub const LOG_LOCAL0: c_int = 16 << 3; -pub const LOG_LOCAL1: c_int = 17 << 3; -pub const LOG_LOCAL2: c_int = 18 << 3; -pub const LOG_LOCAL3: c_int = 19 << 3; -pub const LOG_LOCAL4: c_int = 20 << 3; -pub const LOG_LOCAL5: c_int = 21 << 3; -pub const LOG_LOCAL6: c_int = 22 << 3; -pub const LOG_LOCAL7: c_int = 23 << 3; - -cfg_if! { - if #[cfg(not(target_os = "haiku"))] { - pub const LOG_PID: c_int = 0x01; - pub const LOG_CONS: c_int = 0x02; - pub const LOG_ODELAY: c_int = 0x04; - pub const LOG_NDELAY: c_int = 0x08; - pub const LOG_NOWAIT: c_int = 0x10; - } -} -pub const LOG_PRIMASK: c_int = 7; -pub const LOG_FACMASK: c_int = 0x3f8; - -cfg_if! { - if #[cfg(not(target_os = "nto"))] { - pub const PRIO_MIN: c_int = -20; - pub const PRIO_MAX: c_int = 20; - } -} -pub const IPPROTO_ICMP: c_int = 1; -pub const IPPROTO_ICMPV6: c_int = 58; -pub const IPPROTO_TCP: c_int = 6; -pub const IPPROTO_UDP: c_int = 17; -pub const IPPROTO_IP: c_int = 0; -pub const IPPROTO_IPV6: c_int = 41; - -pub const INADDR_LOOPBACK: in_addr_t = 2130706433; -pub const INADDR_ANY: in_addr_t = 0; -pub const INADDR_BROADCAST: in_addr_t = 4294967295; -pub const INADDR_NONE: in_addr_t = 4294967295; - -pub const IN6ADDR_LOOPBACK_INIT: in6_addr = in6_addr { - s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], -}; - -pub const IN6ADDR_ANY_INIT: in6_addr = in6_addr { - s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], -}; - -pub const ARPOP_REQUEST: u16 = 1; -pub const ARPOP_REPLY: u16 = 2; - -pub const ATF_COM: c_int = 0x02; -pub const ATF_PERM: c_int = 0x04; -pub const ATF_PUBL: c_int = 0x08; -pub const ATF_USETRAILERS: c_int = 0x10; - -cfg_if! { - if #[cfg(any(target_os = "nto", target_os = "aix"))] { - pub const FNM_PERIOD: c_int = 1 << 1; - } else { - pub const FNM_PERIOD: c_int = 1 << 2; - } -} -pub const FNM_NOMATCH: c_int = 1; - -cfg_if! { - if #[cfg(any(target_os = "illumos", target_os = "solaris",))] { - pub const FNM_CASEFOLD: c_int = 1 << 3; - } else if #[cfg(not(target_os = "aix"))] { - pub const FNM_CASEFOLD: c_int = 1 << 4; - } -} - -cfg_if! { - if #[cfg(any( - target_os = "macos", - target_os = "freebsd", - target_os = "android", - target_os = "openbsd", - target_os = "cygwin", - ))] { - pub const FNM_PATHNAME: c_int = 1 << 1; - } else { - pub const FNM_PATHNAME: c_int = 1 << 0; - } -} - -cfg_if! { - if #[cfg(any( - target_os = "macos", - target_os = "freebsd", - target_os = "android", - target_os = "openbsd", - ))] { - pub const FNM_NOESCAPE: c_int = 1 << 0; - } else if #[cfg(target_os = "nto")] { - pub const FNM_NOESCAPE: c_int = 1 << 2; - } else if #[cfg(target_os = "aix")] { - pub const FNM_NOESCAPE: c_int = 1 << 3; - } else { - pub const FNM_NOESCAPE: c_int = 1 << 1; - } -} - -extern "C" { - pub static in6addr_loopback: in6_addr; - pub static in6addr_any: in6_addr; -} - -cfg_if! { - if #[cfg(any( - target_os = "l4re", - target_os = "espidf", - target_os = "nuttx" - ))] { - // required libraries are linked externally for these platforms: - // * L4Re - // * ESP-IDF - // * NuttX - } else if #[cfg(feature = "std")] { - // cargo build, don't pull in anything extra as the std dep - // already pulls in all libs. - } else if #[cfg(all( - any( - all( - target_os = "linux", - any(target_env = "gnu", target_env = "uclibc") - ), - target_os = "cygwin" - ), - feature = "rustc-dep-of-std" - ))] { - #[link( - name = "util", - kind = "static", - modifiers = "-bundle", - cfg(target_feature = "crt-static") - )] - #[link( - name = "rt", - kind = "static", - modifiers = "-bundle", - cfg(target_feature = "crt-static") - )] - #[link( - name = "pthread", - kind = "static", - modifiers = "-bundle", - cfg(target_feature = "crt-static") - )] - #[link( - name = "m", - kind = "static", - modifiers = "-bundle", - cfg(target_feature = "crt-static") - )] - #[link( - name = "dl", - kind = "static", - modifiers = "-bundle", - cfg(target_feature = "crt-static") - )] - #[link( - name = "c", - kind = "static", - modifiers = "-bundle", - cfg(target_feature = "crt-static") - )] - #[link( - name = "gcc_eh", - kind = "static", - modifiers = "-bundle", - cfg(target_feature = "crt-static") - )] - #[link( - name = "gcc", - kind = "static", - modifiers = "-bundle", - cfg(target_feature = "crt-static") - )] - #[link( - name = "c", - kind = "static", - modifiers = "-bundle", - cfg(target_feature = "crt-static") - )] - #[link(name = "util", cfg(not(target_feature = "crt-static")))] - #[link(name = "rt", cfg(not(target_feature = "crt-static")))] - #[link(name = "pthread", cfg(not(target_feature = "crt-static")))] - #[link(name = "m", cfg(not(target_feature = "crt-static")))] - #[link(name = "dl", cfg(not(target_feature = "crt-static")))] - #[link(name = "c", cfg(not(target_feature = "crt-static")))] - extern "C" {} - } else if #[cfg(any(target_env = "musl", target_env = "ohos"))] { - #[cfg_attr( - feature = "rustc-dep-of-std", - link( - name = "c", - kind = "static", - modifiers = "-bundle", - cfg(target_feature = "crt-static") - ) - )] - #[cfg_attr( - feature = "rustc-dep-of-std", - link(name = "c", cfg(not(target_feature = "crt-static"))) - )] - extern "C" {} - } else if #[cfg(target_os = "emscripten")] { - // Don't pass -lc to Emscripten, it breaks. See: - // https://github.com/emscripten-core/emscripten/issues/22758 - } else if #[cfg(all(target_os = "android", feature = "rustc-dep-of-std"))] { - #[link( - name = "c", - kind = "static", - modifiers = "-bundle", - cfg(target_feature = "crt-static") - )] - #[link( - name = "m", - kind = "static", - modifiers = "-bundle", - cfg(target_feature = "crt-static") - )] - #[link(name = "m", cfg(not(target_feature = "crt-static")))] - #[link(name = "c", cfg(not(target_feature = "crt-static")))] - extern "C" {} - } else if #[cfg(any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "watchos", - target_os = "visionos", - target_os = "android", - target_os = "openbsd", - target_os = "nto", - ))] { - #[link(name = "c")] - #[link(name = "m")] - extern "C" {} - } else if #[cfg(target_os = "haiku")] { - #[link(name = "root")] - #[link(name = "network")] - extern "C" {} - } else if #[cfg(target_env = "newlib")] { - #[link(name = "c")] - #[link(name = "m")] - extern "C" {} - } else if #[cfg(target_env = "illumos")] { - #[link(name = "c")] - #[link(name = "m")] - extern "C" {} - } else if #[cfg(target_os = "redox")] { - #[cfg_attr( - feature = "rustc-dep-of-std", - link( - name = "c", - kind = "static", - modifiers = "-bundle", - cfg(target_feature = "crt-static") - ) - )] - #[cfg_attr( - feature = "rustc-dep-of-std", - link(name = "c", cfg(not(target_feature = "crt-static"))) - )] - extern "C" {} - } else if #[cfg(target_os = "aix")] { - #[link(name = "c")] - #[link(name = "m")] - #[link(name = "bsd")] - #[link(name = "pthread")] - extern "C" {} - } else { - #[link(name = "c")] - #[link(name = "m")] - #[link(name = "rt")] - #[link(name = "pthread")] - extern "C" {} - } -} - -cfg_if! { - if #[cfg(not(all(target_os = "linux", target_env = "gnu")))] { - missing! { - #[derive(Debug)] - pub enum fpos_t {} // FIXME(unix): fill this out with a struct - } - } -} - -missing! { - #[derive(Debug)] - pub enum FILE {} -} - -extern "C" { - pub fn isalnum(c: c_int) -> c_int; - pub fn isalpha(c: c_int) -> c_int; - pub fn iscntrl(c: c_int) -> c_int; - pub fn isdigit(c: c_int) -> c_int; - pub fn isgraph(c: c_int) -> c_int; - pub fn islower(c: c_int) -> c_int; - pub fn isprint(c: c_int) -> c_int; - pub fn ispunct(c: c_int) -> c_int; - pub fn isspace(c: c_int) -> c_int; - pub fn isupper(c: c_int) -> c_int; - pub fn isxdigit(c: c_int) -> c_int; - pub fn isblank(c: c_int) -> c_int; - pub fn tolower(c: c_int) -> c_int; - pub fn toupper(c: c_int) -> c_int; - pub fn qsort( - base: *mut c_void, - num: size_t, - size: size_t, - compar: Option c_int>, - ); - pub fn bsearch( - key: *const c_void, - base: *const c_void, - num: size_t, - size: size_t, - compar: Option c_int>, - ) -> *mut c_void; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "fopen$UNIX2003" - )] - #[cfg_attr(gnu_file_offset_bits64, link_name = "fopen64")] - pub fn fopen(filename: *const c_char, mode: *const c_char) -> *mut FILE; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "freopen$UNIX2003" - )] - #[cfg_attr(gnu_file_offset_bits64, link_name = "freopen64")] - pub fn freopen(filename: *const c_char, mode: *const c_char, file: *mut FILE) -> *mut FILE; - - pub fn fflush(file: *mut FILE) -> c_int; - pub fn fclose(file: *mut FILE) -> c_int; - pub fn remove(filename: *const c_char) -> c_int; - pub fn rename(oldname: *const c_char, newname: *const c_char) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "tmpfile64")] - pub fn tmpfile() -> *mut FILE; - pub fn setvbuf(stream: *mut FILE, buffer: *mut c_char, mode: c_int, size: size_t) -> c_int; - pub fn setbuf(stream: *mut FILE, buf: *mut c_char); - pub fn getchar() -> c_int; - pub fn putchar(c: c_int) -> c_int; - pub fn fgetc(stream: *mut FILE) -> c_int; - pub fn fgets(buf: *mut c_char, n: c_int, stream: *mut FILE) -> *mut c_char; - pub fn fputc(c: c_int, stream: *mut FILE) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "fputs$UNIX2003" - )] - pub fn fputs(s: *const c_char, stream: *mut FILE) -> c_int; - pub fn puts(s: *const c_char) -> c_int; - pub fn ungetc(c: c_int, stream: *mut FILE) -> c_int; - pub fn fread(ptr: *mut c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "fwrite$UNIX2003" - )] - pub fn fwrite(ptr: *const c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; - pub fn fseek(stream: *mut FILE, offset: c_long, whence: c_int) -> c_int; - pub fn ftell(stream: *mut FILE) -> c_long; - pub fn rewind(stream: *mut FILE); - #[cfg_attr(target_os = "netbsd", link_name = "__fgetpos50")] - #[cfg_attr(gnu_file_offset_bits64, link_name = "fgetpos64")] - pub fn fgetpos(stream: *mut FILE, ptr: *mut fpos_t) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__fsetpos50")] - #[cfg_attr(gnu_file_offset_bits64, link_name = "fsetpos64")] - pub fn fsetpos(stream: *mut FILE, ptr: *const fpos_t) -> c_int; - pub fn feof(stream: *mut FILE) -> c_int; - pub fn ferror(stream: *mut FILE) -> c_int; - pub fn clearerr(stream: *mut FILE); - pub fn perror(s: *const c_char); - pub fn atof(s: *const c_char) -> c_double; - pub fn atoi(s: *const c_char) -> c_int; - pub fn atol(s: *const c_char) -> c_long; - pub fn atoll(s: *const c_char) -> c_longlong; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "strtod$UNIX2003" - )] - pub fn strtod(s: *const c_char, endp: *mut *mut c_char) -> c_double; - pub fn strtof(s: *const c_char, endp: *mut *mut c_char) -> c_float; - pub fn strtol(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_long; - pub fn strtoll(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_longlong; - pub fn strtoul(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulong; - pub fn strtoull(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulonglong; - #[cfg_attr(target_os = "aix", link_name = "vec_calloc")] - pub fn calloc(nobj: size_t, size: size_t) -> *mut c_void; - #[cfg_attr(target_os = "aix", link_name = "vec_malloc")] - pub fn malloc(size: size_t) -> *mut c_void; - pub fn realloc(p: *mut c_void, size: size_t) -> *mut c_void; - pub fn free(p: *mut c_void); - pub fn abort() -> !; - pub fn exit(status: c_int) -> !; - pub fn _exit(status: c_int) -> !; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "system$UNIX2003" - )] - pub fn system(s: *const c_char) -> c_int; - pub fn getenv(s: *const c_char) -> *mut c_char; - - pub fn strcpy(dst: *mut c_char, src: *const c_char) -> *mut c_char; - pub fn strncpy(dst: *mut c_char, src: *const c_char, n: size_t) -> *mut c_char; - pub fn stpcpy(dst: *mut c_char, src: *const c_char) -> *mut c_char; - pub fn strcat(s: *mut c_char, ct: *const c_char) -> *mut c_char; - pub fn strncat(s: *mut c_char, ct: *const c_char, n: size_t) -> *mut c_char; - pub fn strcmp(cs: *const c_char, ct: *const c_char) -> c_int; - pub fn strncmp(cs: *const c_char, ct: *const c_char, n: size_t) -> c_int; - pub fn strcoll(cs: *const c_char, ct: *const c_char) -> c_int; - pub fn strchr(cs: *const c_char, c: c_int) -> *mut c_char; - pub fn strrchr(cs: *const c_char, c: c_int) -> *mut c_char; - pub fn strspn(cs: *const c_char, ct: *const c_char) -> size_t; - pub fn strcspn(cs: *const c_char, ct: *const c_char) -> size_t; - pub fn strdup(cs: *const c_char) -> *mut c_char; - pub fn strndup(cs: *const c_char, n: size_t) -> *mut c_char; - pub fn strpbrk(cs: *const c_char, ct: *const c_char) -> *mut c_char; - pub fn strstr(cs: *const c_char, ct: *const c_char) -> *mut c_char; - pub fn strcasecmp(s1: *const c_char, s2: *const c_char) -> c_int; - pub fn strncasecmp(s1: *const c_char, s2: *const c_char, n: size_t) -> c_int; - pub fn strlen(cs: *const c_char) -> size_t; - pub fn strnlen(cs: *const c_char, maxlen: size_t) -> size_t; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "strerror$UNIX2003" - )] - pub fn strerror(n: c_int) -> *mut c_char; - pub fn strtok(s: *mut c_char, t: *const c_char) -> *mut c_char; - pub fn strtok_r(s: *mut c_char, t: *const c_char, p: *mut *mut c_char) -> *mut c_char; - pub fn strxfrm(s: *mut c_char, ct: *const c_char, n: size_t) -> size_t; - pub fn strsignal(sig: c_int) -> *mut c_char; - pub fn wcslen(buf: *const wchar_t) -> size_t; - pub fn wcstombs(dest: *mut c_char, src: *const wchar_t, n: size_t) -> size_t; - - pub fn memchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; - pub fn wmemchr(cx: *const wchar_t, c: wchar_t, n: size_t) -> *mut wchar_t; - pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; - pub fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; - pub fn memmove(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; - pub fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void; - pub fn memccpy(dest: *mut c_void, src: *const c_void, c: c_int, n: size_t) -> *mut c_void; -} - -extern "C" { - #[cfg_attr(target_os = "netbsd", link_name = "__getpwnam50")] - pub fn getpwnam(name: *const c_char) -> *mut passwd; - #[cfg_attr(target_os = "netbsd", link_name = "__getpwuid50")] - pub fn getpwuid(uid: crate::uid_t) -> *mut passwd; - - pub fn fprintf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; - pub fn printf(format: *const c_char, ...) -> c_int; - pub fn snprintf(s: *mut c_char, n: size_t, format: *const c_char, ...) -> c_int; - pub fn sprintf(s: *mut c_char, format: *const c_char, ...) -> c_int; - #[cfg_attr( - all(target_os = "linux", not(target_env = "uclibc")), - link_name = "__isoc99_fscanf" - )] - pub fn fscanf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; - #[cfg_attr( - all(target_os = "linux", not(target_env = "uclibc")), - link_name = "__isoc99_scanf" - )] - pub fn scanf(format: *const c_char, ...) -> c_int; - #[cfg_attr( - all(target_os = "linux", not(target_env = "uclibc")), - link_name = "__isoc99_sscanf" - )] - pub fn sscanf(s: *const c_char, format: *const c_char, ...) -> c_int; - pub fn getchar_unlocked() -> c_int; - pub fn putchar_unlocked(c: c_int) -> c_int; - - #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] - #[cfg_attr(target_os = "netbsd", link_name = "__socket30")] - #[cfg_attr(target_os = "illumos", link_name = "__xnet_socket")] - #[cfg_attr(target_os = "solaris", link_name = "__xnet7_socket")] - #[cfg_attr(target_os = "espidf", link_name = "lwip_socket")] - pub fn socket(domain: c_int, ty: c_int, protocol: c_int) -> c_int; - #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "connect$UNIX2003" - )] - #[cfg_attr( - any(target_os = "illumos", target_os = "solaris"), - link_name = "__xnet_connect" - )] - #[cfg_attr(target_os = "espidf", link_name = "lwip_connect")] - pub fn connect(socket: c_int, address: *const sockaddr, len: socklen_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "listen$UNIX2003" - )] - #[cfg_attr(target_os = "espidf", link_name = "lwip_listen")] - pub fn listen(socket: c_int, backlog: c_int) -> c_int; - #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "accept$UNIX2003" - )] - #[cfg_attr(target_os = "espidf", link_name = "lwip_accept")] - #[cfg_attr(target_os = "aix", link_name = "naccept")] - pub fn accept(socket: c_int, address: *mut sockaddr, address_len: *mut socklen_t) -> c_int; - #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "getpeername$UNIX2003" - )] - #[cfg_attr(target_os = "espidf", link_name = "lwip_getpeername")] - #[cfg_attr(target_os = "aix", link_name = "ngetpeername")] - pub fn getpeername(socket: c_int, address: *mut sockaddr, address_len: *mut socklen_t) - -> c_int; - #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "getsockname$UNIX2003" - )] - #[cfg_attr(target_os = "espidf", link_name = "lwip_getsockname")] - #[cfg_attr(target_os = "aix", link_name = "ngetsockname")] - pub fn getsockname(socket: c_int, address: *mut sockaddr, address_len: *mut socklen_t) - -> c_int; - #[cfg_attr(target_os = "espidf", link_name = "lwip_setsockopt")] - #[cfg_attr(gnu_time_bits64, link_name = "__setsockopt64")] - pub fn setsockopt( - socket: c_int, - level: c_int, - name: c_int, - value: *const c_void, - option_len: socklen_t, - ) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "socketpair$UNIX2003" - )] - #[cfg_attr( - any(target_os = "illumos", target_os = "solaris"), - link_name = "__xnet_socketpair" - )] - pub fn socketpair( - domain: c_int, - type_: c_int, - protocol: c_int, - socket_vector: *mut c_int, - ) -> c_int; - #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "sendto$UNIX2003" - )] - #[cfg_attr( - any(target_os = "illumos", target_os = "solaris"), - link_name = "__xnet_sendto" - )] - #[cfg_attr(target_os = "espidf", link_name = "lwip_sendto")] - pub fn sendto( - socket: c_int, - buf: *const c_void, - len: size_t, - flags: c_int, - addr: *const sockaddr, - addrlen: socklen_t, - ) -> ssize_t; - #[cfg_attr(target_os = "espidf", link_name = "lwip_shutdown")] - pub fn shutdown(socket: c_int, how: c_int) -> c_int; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "chmod$UNIX2003" - )] - pub fn chmod(path: *const c_char, mode: mode_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "fchmod$UNIX2003" - )] - pub fn fchmod(fd: c_int, mode: mode_t) -> c_int; - - #[cfg_attr( - all(target_os = "macos", not(target_arch = "aarch64")), - link_name = "fstat$INODE64" - )] - #[cfg_attr(target_os = "netbsd", link_name = "__fstat50")] - #[cfg_attr( - all(target_os = "freebsd", any(freebsd11, freebsd10)), - link_name = "fstat@FBSD_1.0" - )] - #[cfg_attr(gnu_time_bits64, link_name = "__fstat64_time64")] - #[cfg_attr( - all(not(gnu_time_bits64), gnu_file_offset_bits64), - link_name = "fstat64" - )] - pub fn fstat(fildes: c_int, buf: *mut stat) -> c_int; - - pub fn mkdir(path: *const c_char, mode: mode_t) -> c_int; - - #[cfg_attr( - all(target_os = "macos", not(target_arch = "aarch64")), - link_name = "stat$INODE64" - )] - #[cfg_attr(target_os = "netbsd", link_name = "__stat50")] - #[cfg_attr( - all(target_os = "freebsd", any(freebsd11, freebsd10)), - link_name = "stat@FBSD_1.0" - )] - #[cfg_attr(gnu_time_bits64, link_name = "__stat64_time64")] - #[cfg_attr( - all(not(gnu_time_bits64), gnu_file_offset_bits64), - link_name = "stat64" - )] - pub fn stat(path: *const c_char, buf: *mut stat) -> c_int; - - pub fn pclose(stream: *mut crate::FILE) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "fdopen$UNIX2003" - )] - pub fn fdopen(fd: c_int, mode: *const c_char) -> *mut crate::FILE; - pub fn fileno(stream: *mut crate::FILE) -> c_int; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "open$UNIX2003" - )] - #[cfg_attr(gnu_file_offset_bits64, link_name = "open64")] - pub fn open(path: *const c_char, oflag: c_int, ...) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "creat$UNIX2003" - )] - #[cfg_attr(gnu_file_offset_bits64, link_name = "creat64")] - pub fn creat(path: *const c_char, mode: mode_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "fcntl$UNIX2003" - )] - #[cfg_attr(gnu_time_bits64, link_name = "__fcntl_time64")] - #[cfg_attr( - all(not(gnu_time_bits64), gnu_file_offset_bits64), - link_name = "__fcntl_time64" - )] - pub fn fcntl(fd: c_int, cmd: c_int, ...) -> c_int; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86_64"), - link_name = "opendir$INODE64" - )] - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "opendir$INODE64$UNIX2003" - )] - #[cfg_attr(target_os = "netbsd", link_name = "__opendir30")] - pub fn opendir(dirname: *const c_char) -> *mut crate::DIR; - - #[cfg_attr( - all(target_os = "macos", not(target_arch = "aarch64")), - link_name = "readdir$INODE64" - )] - #[cfg_attr(target_os = "netbsd", link_name = "__readdir30")] - #[cfg_attr( - all(target_os = "freebsd", any(freebsd11, freebsd10)), - link_name = "readdir@FBSD_1.0" - )] - #[cfg_attr(gnu_file_offset_bits64, link_name = "readdir64")] - pub fn readdir(dirp: *mut crate::DIR) -> *mut crate::dirent; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "closedir$UNIX2003" - )] - pub fn closedir(dirp: *mut crate::DIR) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86_64"), - link_name = "rewinddir$INODE64" - )] - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "rewinddir$INODE64$UNIX2003" - )] - pub fn rewinddir(dirp: *mut crate::DIR); - - pub fn fchmodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, flags: c_int) -> c_int; - pub fn fchown(fd: c_int, owner: crate::uid_t, group: crate::gid_t) -> c_int; - pub fn fchownat( - dirfd: c_int, - pathname: *const c_char, - owner: crate::uid_t, - group: crate::gid_t, - flags: c_int, - ) -> c_int; - #[cfg_attr( - all(target_os = "macos", not(target_arch = "aarch64")), - link_name = "fstatat$INODE64" - )] - #[cfg_attr( - all(target_os = "freebsd", any(freebsd11, freebsd10)), - link_name = "fstatat@FBSD_1.1" - )] - #[cfg_attr(gnu_time_bits64, link_name = "__fstatat64_time64")] - #[cfg_attr( - all(not(gnu_time_bits64), gnu_file_offset_bits64), - link_name = "fstatat64" - )] - pub fn fstatat(dirfd: c_int, pathname: *const c_char, buf: *mut stat, flags: c_int) -> c_int; - pub fn linkat( - olddirfd: c_int, - oldpath: *const c_char, - newdirfd: c_int, - newpath: *const c_char, - flags: c_int, - ) -> c_int; - pub fn renameat( - olddirfd: c_int, - oldpath: *const c_char, - newdirfd: c_int, - newpath: *const c_char, - ) -> c_int; - pub fn symlinkat(target: *const c_char, newdirfd: c_int, linkpath: *const c_char) -> c_int; - pub fn unlinkat(dirfd: c_int, pathname: *const c_char, flags: c_int) -> c_int; - - pub fn access(path: *const c_char, amode: c_int) -> c_int; - pub fn alarm(seconds: c_uint) -> c_uint; - pub fn chdir(dir: *const c_char) -> c_int; - pub fn fchdir(dirfd: c_int) -> c_int; - pub fn chown(path: *const c_char, uid: uid_t, gid: gid_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "lchown$UNIX2003" - )] - pub fn lchown(path: *const c_char, uid: uid_t, gid: gid_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "close$NOCANCEL$UNIX2003" - )] - #[cfg_attr( - all(target_os = "macos", target_arch = "x86_64"), - link_name = "close$NOCANCEL" - )] - pub fn close(fd: c_int) -> c_int; - pub fn dup(fd: c_int) -> c_int; - pub fn dup2(src: c_int, dst: c_int) -> c_int; - - pub fn execl(path: *const c_char, arg0: *const c_char, ...) -> c_int; - pub fn execle(path: *const c_char, arg0: *const c_char, ...) -> c_int; - pub fn execlp(file: *const c_char, arg0: *const c_char, ...) -> c_int; - - // DIFF(main): changed to `*const *mut` in e77f551de9 - pub fn execv(prog: *const c_char, argv: *const *const c_char) -> c_int; - pub fn execve( - prog: *const c_char, - argv: *const *const c_char, - envp: *const *const c_char, - ) -> c_int; - pub fn execvp(c: *const c_char, argv: *const *const c_char) -> c_int; - - pub fn fork() -> pid_t; - pub fn fpathconf(filedes: c_int, name: c_int) -> c_long; - pub fn getcwd(buf: *mut c_char, size: size_t) -> *mut c_char; - pub fn getegid() -> gid_t; - pub fn geteuid() -> uid_t; - pub fn getgid() -> gid_t; - pub fn getgroups(ngroups_max: c_int, groups: *mut gid_t) -> c_int; - #[cfg_attr(target_os = "illumos", link_name = "getloginx")] - pub fn getlogin() -> *mut c_char; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "getopt$UNIX2003" - )] - pub fn getopt(argc: c_int, argv: *const *mut c_char, optstr: *const c_char) -> c_int; - pub fn getpgid(pid: pid_t) -> pid_t; - pub fn getpgrp() -> pid_t; - pub fn getpid() -> pid_t; - pub fn getppid() -> pid_t; - pub fn getuid() -> uid_t; - pub fn isatty(fd: c_int) -> c_int; - #[cfg_attr(target_os = "solaris", link_name = "__link_xpg4")] - pub fn link(src: *const c_char, dst: *const c_char) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "lseek64")] - pub fn lseek(fd: c_int, offset: off_t, whence: c_int) -> off_t; - pub fn pathconf(path: *const c_char, name: c_int) -> c_long; - pub fn pipe(fds: *mut c_int) -> c_int; - pub fn posix_memalign(memptr: *mut *mut c_void, align: size_t, size: size_t) -> c_int; - pub fn aligned_alloc(alignment: size_t, size: size_t) -> *mut c_void; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "read$UNIX2003" - )] - pub fn read(fd: c_int, buf: *mut c_void, count: size_t) -> ssize_t; - pub fn rmdir(path: *const c_char) -> c_int; - pub fn seteuid(uid: uid_t) -> c_int; - pub fn setegid(gid: gid_t) -> c_int; - pub fn setgid(gid: gid_t) -> c_int; - pub fn setpgid(pid: pid_t, pgid: pid_t) -> c_int; - pub fn setsid() -> pid_t; - pub fn setuid(uid: uid_t) -> c_int; - pub fn setreuid(ruid: uid_t, euid: uid_t) -> c_int; - pub fn setregid(rgid: gid_t, egid: gid_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "sleep$UNIX2003" - )] - pub fn sleep(secs: c_uint) -> c_uint; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "nanosleep$UNIX2003" - )] - #[cfg_attr(target_os = "netbsd", link_name = "__nanosleep50")] - #[cfg_attr(gnu_time_bits64, link_name = "__nanosleep64")] - pub fn nanosleep(rqtp: *const timespec, rmtp: *mut timespec) -> c_int; - pub fn tcgetpgrp(fd: c_int) -> pid_t; - pub fn tcsetpgrp(fd: c_int, pgrp: crate::pid_t) -> c_int; - pub fn ttyname(fd: c_int) -> *mut c_char; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "ttyname_r$UNIX2003" - )] - #[cfg_attr( - any(target_os = "illumos", target_os = "solaris"), - link_name = "__posix_ttyname_r" - )] - pub fn ttyname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - pub fn unlink(c: *const c_char) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "wait$UNIX2003" - )] - pub fn wait(status: *mut c_int) -> pid_t; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "waitpid$UNIX2003" - )] - pub fn waitpid(pid: pid_t, status: *mut c_int, options: c_int) -> pid_t; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "write$UNIX2003" - )] - pub fn write(fd: c_int, buf: *const c_void, count: size_t) -> ssize_t; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pread$UNIX2003" - )] - #[cfg_attr(gnu_file_offset_bits64, link_name = "pread64")] - pub fn pread(fd: c_int, buf: *mut c_void, count: size_t, offset: off_t) -> ssize_t; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pwrite$UNIX2003" - )] - #[cfg_attr(gnu_file_offset_bits64, link_name = "pwrite64")] - pub fn pwrite(fd: c_int, buf: *const c_void, count: size_t, offset: off_t) -> ssize_t; - pub fn umask(mask: mode_t) -> mode_t; - - #[cfg_attr(target_os = "netbsd", link_name = "__utime50")] - #[cfg_attr(gnu_time_bits64, link_name = "__utime64")] - pub fn utime(file: *const c_char, buf: *const utimbuf) -> c_int; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "kill$UNIX2003" - )] - pub fn kill(pid: pid_t, sig: c_int) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "killpg$UNIX2003" - )] - pub fn killpg(pgrp: pid_t, sig: c_int) -> c_int; - - pub fn mlock(addr: *const c_void, len: size_t) -> c_int; - pub fn munlock(addr: *const c_void, len: size_t) -> c_int; - pub fn mlockall(flags: c_int) -> c_int; - pub fn munlockall() -> c_int; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "mmap$UNIX2003" - )] - #[cfg_attr(gnu_file_offset_bits64, link_name = "mmap64")] - pub fn mmap( - addr: *mut c_void, - len: size_t, - prot: c_int, - flags: c_int, - fd: c_int, - offset: off_t, - ) -> *mut c_void; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "munmap$UNIX2003" - )] - pub fn munmap(addr: *mut c_void, len: size_t) -> c_int; - - pub fn if_nametoindex(ifname: *const c_char) -> c_uint; - pub fn if_indextoname(ifindex: c_uint, ifname: *mut c_char) -> *mut c_char; - - #[cfg_attr( - all(target_os = "macos", not(target_arch = "aarch64")), - link_name = "lstat$INODE64" - )] - #[cfg_attr(target_os = "netbsd", link_name = "__lstat50")] - #[cfg_attr( - all(target_os = "freebsd", any(freebsd11, freebsd10)), - link_name = "lstat@FBSD_1.0" - )] - #[cfg_attr(gnu_time_bits64, link_name = "__lstat64_time64")] - #[cfg_attr( - all(not(gnu_time_bits64), gnu_file_offset_bits64), - link_name = "lstat64" - )] - pub fn lstat(path: *const c_char, buf: *mut stat) -> c_int; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "fsync$UNIX2003" - )] - pub fn fsync(fd: c_int) -> c_int; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "setenv$UNIX2003" - )] - pub fn setenv(name: *const c_char, val: *const c_char, overwrite: c_int) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "unsetenv$UNIX2003" - )] - #[cfg_attr(target_os = "netbsd", link_name = "__unsetenv13")] - pub fn unsetenv(name: *const c_char) -> c_int; - - pub fn symlink(path1: *const c_char, path2: *const c_char) -> c_int; - - #[cfg_attr(gnu_file_offset_bits64, link_name = "truncate64")] - pub fn truncate(path: *const c_char, length: off_t) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "ftruncate64")] - pub fn ftruncate(fd: c_int, length: off_t) -> c_int; - - pub fn signal(signum: c_int, handler: sighandler_t) -> sighandler_t; - - #[cfg_attr(target_os = "netbsd", link_name = "__getrusage50")] - #[cfg_attr(gnu_time_bits64, link_name = "__getrusage64")] - pub fn getrusage(resource: c_int, usage: *mut rusage) -> c_int; - - #[cfg_attr( - any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "watchos", - target_os = "visionos" - ), - link_name = "realpath$DARWIN_EXTSN" - )] - pub fn realpath(pathname: *const c_char, resolved: *mut c_char) -> *mut c_char; - - #[cfg_attr(target_os = "netbsd", link_name = "__times13")] - pub fn times(buf: *mut crate::tms) -> crate::clock_t; - - pub fn pthread_self() -> crate::pthread_t; - pub fn pthread_equal(t1: crate::pthread_t, t2: crate::pthread_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_join$UNIX2003" - )] - pub fn pthread_join(native: crate::pthread_t, value: *mut *mut c_void) -> c_int; - pub fn pthread_exit(value: *mut c_void) -> !; - pub fn pthread_attr_init(attr: *mut crate::pthread_attr_t) -> c_int; - pub fn pthread_attr_destroy(attr: *mut crate::pthread_attr_t) -> c_int; - pub fn pthread_attr_getstacksize( - attr: *const crate::pthread_attr_t, - stacksize: *mut size_t, - ) -> c_int; - pub fn pthread_attr_setstacksize(attr: *mut crate::pthread_attr_t, stack_size: size_t) - -> c_int; - pub fn pthread_attr_setdetachstate(attr: *mut crate::pthread_attr_t, state: c_int) -> c_int; - pub fn pthread_detach(thread: crate::pthread_t) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__libc_thr_yield")] - pub fn sched_yield() -> c_int; - pub fn pthread_key_create( - key: *mut pthread_key_t, - dtor: Option, - ) -> c_int; - pub fn pthread_key_delete(key: pthread_key_t) -> c_int; - pub fn pthread_getspecific(key: pthread_key_t) -> *mut c_void; - pub fn pthread_setspecific(key: pthread_key_t, value: *const c_void) -> c_int; - pub fn pthread_mutex_init( - lock: *mut pthread_mutex_t, - attr: *const pthread_mutexattr_t, - ) -> c_int; - pub fn pthread_mutex_destroy(lock: *mut pthread_mutex_t) -> c_int; - pub fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> c_int; - pub fn pthread_mutex_trylock(lock: *mut pthread_mutex_t) -> c_int; - pub fn pthread_mutex_unlock(lock: *mut pthread_mutex_t) -> c_int; - - pub fn pthread_mutexattr_init(attr: *mut pthread_mutexattr_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_mutexattr_destroy$UNIX2003" - )] - pub fn pthread_mutexattr_destroy(attr: *mut pthread_mutexattr_t) -> c_int; - pub fn pthread_mutexattr_settype(attr: *mut pthread_mutexattr_t, _type: c_int) -> c_int; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_cond_init$UNIX2003" - )] - pub fn pthread_cond_init(cond: *mut pthread_cond_t, attr: *const pthread_condattr_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_cond_wait$UNIX2003" - )] - pub fn pthread_cond_wait(cond: *mut pthread_cond_t, lock: *mut pthread_mutex_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_cond_timedwait$UNIX2003" - )] - #[cfg_attr(gnu_time_bits64, link_name = "__pthread_cond_timedwait64")] - pub fn pthread_cond_timedwait( - cond: *mut pthread_cond_t, - lock: *mut pthread_mutex_t, - abstime: *const crate::timespec, - ) -> c_int; - pub fn pthread_cond_signal(cond: *mut pthread_cond_t) -> c_int; - pub fn pthread_cond_broadcast(cond: *mut pthread_cond_t) -> c_int; - pub fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> c_int; - pub fn pthread_condattr_init(attr: *mut pthread_condattr_t) -> c_int; - pub fn pthread_condattr_destroy(attr: *mut pthread_condattr_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_rwlock_init$UNIX2003" - )] - pub fn pthread_rwlock_init( - lock: *mut pthread_rwlock_t, - attr: *const pthread_rwlockattr_t, - ) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_rwlock_destroy$UNIX2003" - )] - pub fn pthread_rwlock_destroy(lock: *mut pthread_rwlock_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_rwlock_rdlock$UNIX2003" - )] - pub fn pthread_rwlock_rdlock(lock: *mut pthread_rwlock_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_rwlock_tryrdlock$UNIX2003" - )] - pub fn pthread_rwlock_tryrdlock(lock: *mut pthread_rwlock_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_rwlock_wrlock$UNIX2003" - )] - pub fn pthread_rwlock_wrlock(lock: *mut pthread_rwlock_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_rwlock_trywrlock$UNIX2003" - )] - pub fn pthread_rwlock_trywrlock(lock: *mut pthread_rwlock_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pthread_rwlock_unlock$UNIX2003" - )] - pub fn pthread_rwlock_unlock(lock: *mut pthread_rwlock_t) -> c_int; - pub fn pthread_rwlockattr_init(attr: *mut pthread_rwlockattr_t) -> c_int; - pub fn pthread_rwlockattr_destroy(attr: *mut pthread_rwlockattr_t) -> c_int; - - #[cfg_attr( - any(target_os = "illumos", target_os = "solaris"), - link_name = "__xnet_getsockopt" - )] - #[cfg_attr(target_os = "espidf", link_name = "lwip_getsockopt")] - #[cfg_attr(gnu_time_bits64, link_name = "__getsockopt64")] - pub fn getsockopt( - sockfd: c_int, - level: c_int, - optname: c_int, - optval: *mut c_void, - optlen: *mut crate::socklen_t, - ) -> c_int; - pub fn raise(signum: c_int) -> c_int; - - #[cfg_attr(target_os = "netbsd", link_name = "__utimes50")] - #[cfg_attr(gnu_time_bits64, link_name = "__utimes64")] - pub fn utimes(filename: *const c_char, times: *const crate::timeval) -> c_int; - pub fn dlopen(filename: *const c_char, flag: c_int) -> *mut c_void; - pub fn dlerror() -> *mut c_char; - pub fn dlsym(handle: *mut c_void, symbol: *const c_char) -> *mut c_void; - pub fn dlclose(handle: *mut c_void) -> c_int; - - #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] - #[cfg_attr( - any(target_os = "illumos", target_os = "solaris"), - link_name = "__xnet_getaddrinfo" - )] - #[cfg_attr(target_os = "espidf", link_name = "lwip_getaddrinfo")] - pub fn getaddrinfo( - node: *const c_char, - service: *const c_char, - hints: *const addrinfo, - res: *mut *mut addrinfo, - ) -> c_int; - #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] - #[cfg_attr(target_os = "espidf", link_name = "lwip_freeaddrinfo")] - pub fn freeaddrinfo(res: *mut addrinfo); - pub fn hstrerror(errcode: c_int) -> *const c_char; - pub fn gai_strerror(errcode: c_int) -> *const c_char; - #[cfg_attr( - any( - all( - target_os = "linux", - not(any(target_env = "musl", target_env = "ohos")) - ), - target_os = "freebsd", - target_os = "cygwin", - target_os = "dragonfly", - target_os = "haiku" - ), - link_name = "__res_init" - )] - #[cfg_attr( - any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "watchos", - target_os = "visionos" - ), - link_name = "res_9_init" - )] - #[cfg_attr(target_os = "aix", link_name = "_res_init")] - pub fn res_init() -> c_int; - - #[cfg_attr(target_os = "netbsd", link_name = "__gmtime_r50")] - #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] - // FIXME(time): for `time_t` - #[cfg_attr(gnu_time_bits64, link_name = "__gmtime64_r")] - pub fn gmtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; - #[cfg_attr(target_os = "netbsd", link_name = "__localtime_r50")] - #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] - // FIXME(time): for `time_t` - #[cfg_attr(gnu_time_bits64, link_name = "__localtime64_r")] - pub fn localtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "mktime$UNIX2003" - )] - #[cfg_attr(target_os = "netbsd", link_name = "__mktime50")] - #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] - // FIXME: for `time_t` - #[cfg_attr(gnu_time_bits64, link_name = "__mktime64")] - pub fn mktime(tm: *mut tm) -> time_t; - #[cfg_attr(target_os = "netbsd", link_name = "__time50")] - #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] - // FIXME: for `time_t` - #[cfg_attr(gnu_time_bits64, link_name = "__time64")] - pub fn time(time: *mut time_t) -> time_t; - #[cfg_attr(target_os = "netbsd", link_name = "__gmtime50")] - #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] - // FIXME(time): for `time_t` - #[cfg_attr(gnu_time_bits64, link_name = "__gmtime64")] - pub fn gmtime(time_p: *const time_t) -> *mut tm; - #[cfg_attr(target_os = "netbsd", link_name = "__locatime50")] - #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] - // FIXME(time): for `time_t` - #[cfg_attr(gnu_time_bits64, link_name = "__localtime64")] - pub fn localtime(time_p: *const time_t) -> *mut tm; - #[cfg_attr(target_os = "netbsd", link_name = "__difftime50")] - #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] - // FIXME(time): for `time_t` - #[cfg_attr(gnu_time_bits64, link_name = "__difftime64")] - pub fn difftime(time1: time_t, time0: time_t) -> c_double; - #[cfg(not(target_os = "aix"))] - #[cfg_attr(target_os = "netbsd", link_name = "__timegm50")] - #[cfg_attr(any(target_env = "musl", target_env = "ohos"), allow(deprecated))] - // FIXME(time): for `time_t` - #[cfg_attr(gnu_time_bits64, link_name = "__timegm64")] - pub fn timegm(tm: *mut crate::tm) -> time_t; - - #[cfg_attr(target_os = "netbsd", link_name = "__mknod50")] - #[cfg_attr( - all(target_os = "freebsd", any(freebsd11, freebsd10)), - link_name = "mknod@FBSD_1.0" - )] - pub fn mknod(pathname: *const c_char, mode: mode_t, dev: crate::dev_t) -> c_int; - pub fn gethostname(name: *mut c_char, len: size_t) -> c_int; - pub fn endservent(); - pub fn getservbyname(name: *const c_char, proto: *const c_char) -> *mut servent; - pub fn getservbyport(port: c_int, proto: *const c_char) -> *mut servent; - pub fn getservent() -> *mut servent; - pub fn setservent(stayopen: c_int); - pub fn getprotobyname(name: *const c_char) -> *mut protoent; - pub fn getprotobynumber(proto: c_int) -> *mut protoent; - pub fn chroot(name: *const c_char) -> c_int; - #[cfg(target_os = "cygwin")] - pub fn usleep(secs: useconds_t) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "usleep$UNIX2003" - )] - #[cfg(not(target_os = "cygwin"))] - pub fn usleep(secs: c_uint) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "send$UNIX2003" - )] - #[cfg_attr(target_os = "espidf", link_name = "lwip_send")] - pub fn send(socket: c_int, buf: *const c_void, len: size_t, flags: c_int) -> ssize_t; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "recv$UNIX2003" - )] - #[cfg_attr(target_os = "espidf", link_name = "lwip_recv")] - pub fn recv(socket: c_int, buf: *mut c_void, len: size_t, flags: c_int) -> ssize_t; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "putenv$UNIX2003" - )] - #[cfg_attr(target_os = "netbsd", link_name = "__putenv50")] - pub fn putenv(string: *mut c_char) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "poll$UNIX2003" - )] - pub fn poll(fds: *mut pollfd, nfds: nfds_t, timeout: c_int) -> c_int; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86_64"), - link_name = "select$1050" - )] - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "select$UNIX2003" - )] - #[cfg_attr(target_os = "netbsd", link_name = "__select50")] - #[cfg_attr(target_os = "aix", link_name = "__fd_select")] - #[cfg_attr(gnu_time_bits64, link_name = "__select64")] - pub fn select( - nfds: c_int, - readfds: *mut fd_set, - writefds: *mut fd_set, - errorfds: *mut fd_set, - timeout: *mut timeval, - ) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__setlocale50")] - pub fn setlocale(category: c_int, locale: *const c_char) -> *mut c_char; - pub fn localeconv() -> *mut lconv; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "sem_wait$UNIX2003" - )] - pub fn sem_wait(sem: *mut sem_t) -> c_int; - pub fn sem_trywait(sem: *mut sem_t) -> c_int; - pub fn sem_post(sem: *mut sem_t) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "statvfs64")] - pub fn statvfs(path: *const c_char, buf: *mut statvfs) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "fstatvfs64")] - pub fn fstatvfs(fd: c_int, buf: *mut statvfs) -> c_int; - - #[cfg_attr(target_os = "netbsd", link_name = "__sigemptyset14")] - pub fn sigemptyset(set: *mut sigset_t) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__sigaddset14")] - pub fn sigaddset(set: *mut sigset_t, signum: c_int) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__sigfillset14")] - pub fn sigfillset(set: *mut sigset_t) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__sigdelset14")] - pub fn sigdelset(set: *mut sigset_t, signum: c_int) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__sigismember14")] - pub fn sigismember(set: *const sigset_t, signum: c_int) -> c_int; - - #[cfg_attr(target_os = "netbsd", link_name = "__sigprocmask14")] - pub fn sigprocmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__sigpending14")] - pub fn sigpending(set: *mut sigset_t) -> c_int; - - #[cfg_attr(target_os = "solaris", link_name = "__sysconf_xpg7")] - pub fn sysconf(name: c_int) -> c_long; - - pub fn mkfifo(path: *const c_char, mode: mode_t) -> c_int; - - #[cfg_attr(gnu_file_offset_bits64, link_name = "fseeko64")] - pub fn fseeko(stream: *mut crate::FILE, offset: off_t, whence: c_int) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "ftello64")] - pub fn ftello(stream: *mut crate::FILE) -> off_t; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "tcdrain$UNIX2003" - )] - pub fn tcdrain(fd: c_int) -> c_int; - pub fn cfgetispeed(termios: *const crate::termios) -> crate::speed_t; - pub fn cfgetospeed(termios: *const crate::termios) -> crate::speed_t; - pub fn cfsetispeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int; - pub fn cfsetospeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int; - pub fn tcgetattr(fd: c_int, termios: *mut crate::termios) -> c_int; - pub fn tcsetattr(fd: c_int, optional_actions: c_int, termios: *const crate::termios) -> c_int; - pub fn tcflow(fd: c_int, action: c_int) -> c_int; - pub fn tcflush(fd: c_int, action: c_int) -> c_int; - pub fn tcgetsid(fd: c_int) -> crate::pid_t; - pub fn tcsendbreak(fd: c_int, duration: c_int) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "mkstemp64")] - pub fn mkstemp(template: *mut c_char) -> c_int; - pub fn mkdtemp(template: *mut c_char) -> *mut c_char; - - pub fn tmpnam(ptr: *mut c_char) -> *mut c_char; - - pub fn openlog(ident: *const c_char, logopt: c_int, facility: c_int); - pub fn closelog(); - pub fn setlogmask(maskpri: c_int) -> c_int; - #[cfg_attr(target_os = "macos", link_name = "syslog$DARWIN_EXTSN")] - pub fn syslog(priority: c_int, message: *const c_char, ...); - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "nice$UNIX2003" - )] - pub fn nice(incr: c_int) -> c_int; - - pub fn grantpt(fd: c_int) -> c_int; - pub fn posix_openpt(flags: c_int) -> c_int; - pub fn ptsname(fd: c_int) -> *mut c_char; - pub fn unlockpt(fd: c_int) -> c_int; - - #[cfg(not(target_os = "aix"))] - pub fn strcasestr(cs: *const c_char, ct: *const c_char) -> *mut c_char; - pub fn getline(lineptr: *mut *mut c_char, n: *mut size_t, stream: *mut FILE) -> ssize_t; - - #[cfg_attr(gnu_file_offset_bits64, link_name = "lockf64")] - pub fn lockf(fd: c_int, cmd: c_int, len: off_t) -> c_int; - -} - -safe_f! { - // It seems htonl, etc are macros on macOS. So we have to reimplement them. So let's - // reimplement them for all UNIX platforms - pub const fn htonl(hostlong: u32) -> u32 { - u32::to_be(hostlong) - } - pub const fn htons(hostshort: u16) -> u16 { - u16::to_be(hostshort) - } - pub const fn ntohl(netlong: u32) -> u32 { - u32::from_be(netlong) - } - pub const fn ntohs(netshort: u16) -> u16 { - u16::from_be(netshort) - } -} - -cfg_if! { - if #[cfg(not(any( - target_os = "emscripten", - target_os = "android", - target_os = "haiku", - target_os = "nto", - target_os = "solaris", - target_os = "cygwin", - target_os = "aix", - )))] { - extern "C" { - #[cfg_attr(gnu_time_bits64, link_name = "__adjtime64")] - pub fn adjtime(delta: *const timeval, olddelta: *mut timeval) -> c_int; - } - } else if #[cfg(target_os = "solaris")] { - extern "C" { - pub fn adjtime(delta: *mut timeval, olddelta: *mut timeval) -> c_int; - } - } -} - -cfg_if! { - if #[cfg(not(any( - target_os = "emscripten", - target_os = "android", - target_os = "nto" - )))] { - extern "C" { - pub fn stpncpy(dst: *mut c_char, src: *const c_char, n: size_t) -> *mut c_char; - } - } -} - -cfg_if! { - if #[cfg(not(any( - target_os = "dragonfly", - target_os = "emscripten", - target_os = "hurd", - target_os = "macos", - target_os = "openbsd", - )))] { - extern "C" { - pub fn sigqueue(pid: pid_t, sig: c_int, value: crate::sigval) -> c_int; - } - } -} - -cfg_if! { - if #[cfg(not(target_os = "android"))] { - extern "C" { - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "confstr$UNIX2003" - )] - #[cfg_attr(target_os = "solaris", link_name = "__confstr_xpg7")] - pub fn confstr(name: c_int, buf: *mut c_char, len: size_t) -> size_t; - } - } -} - -cfg_if! { - if #[cfg(not(target_os = "aix"))] { - extern "C" { - pub fn dladdr(addr: *const c_void, info: *mut Dl_info) -> c_int; - } - } -} - -cfg_if! { - if #[cfg(not(target_os = "solaris"))] { - extern "C" { - pub fn flock(fd: c_int, operation: c_int) -> c_int; - } - } -} - -cfg_if! { - if #[cfg(not(any(target_env = "uclibc", target_os = "nto")))] { - extern "C" { - pub fn open_wmemstream(ptr: *mut *mut wchar_t, sizeloc: *mut size_t) -> *mut FILE; - } - } -} - -cfg_if! { - if #[cfg(not(target_os = "redox"))] { - extern "C" { - pub fn getsid(pid: pid_t) -> pid_t; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pause$UNIX2003" - )] - pub fn pause() -> c_int; - - pub fn mkdirat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; - #[cfg_attr(gnu_file_offset_bits64, link_name = "openat64")] - pub fn openat(dirfd: c_int, pathname: *const c_char, flags: c_int, ...) -> c_int; - - #[cfg_attr( - all(target_os = "macos", target_arch = "x86_64"), - link_name = "fdopendir$INODE64" - )] - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "fdopendir$INODE64$UNIX2003" - )] - pub fn fdopendir(fd: c_int) -> *mut crate::DIR; - - #[cfg_attr( - all(target_os = "macos", not(target_arch = "aarch64")), - link_name = "readdir_r$INODE64" - )] - #[cfg_attr(target_os = "netbsd", link_name = "__readdir_r30")] - #[cfg_attr( - all(target_os = "freebsd", any(freebsd11, freebsd10)), - link_name = "readdir_r@FBSD_1.0" - )] - #[cfg_attr( - all(target_os = "freebsd", not(any(freebsd11, freebsd10))), - link_name = "readdir_r@FBSD_1.5" - )] - #[allow(non_autolinks)] // FIXME(docs): `<>` breaks line length limit. - /// The 64-bit libc on Solaris and illumos only has readdir_r. If a - /// 32-bit Solaris or illumos target is ever created, it should use - /// __posix_readdir_r. See libc(3LIB) on Solaris or illumos: - /// https://illumos.org/man/3lib/libc - /// https://docs.oracle.com/cd/E36784_01/html/E36873/libc-3lib.html - /// https://www.unix.com/man-page/opensolaris/3LIB/libc/ - #[cfg_attr(gnu_file_offset_bits64, link_name = "readdir64_r")] - pub fn readdir_r( - dirp: *mut crate::DIR, - entry: *mut crate::dirent, - result: *mut *mut crate::dirent, - ) -> c_int; - } - } -} - -cfg_if! { - if #[cfg(target_os = "nto")] { - extern "C" { - pub fn readlinkat( - dirfd: c_int, - pathname: *const c_char, - buf: *mut c_char, - bufsiz: size_t, - ) -> c_int; - pub fn readlink(path: *const c_char, buf: *mut c_char, bufsz: size_t) -> c_int; - pub fn pselect( - nfds: c_int, - readfds: *mut fd_set, - writefds: *mut fd_set, - errorfds: *mut fd_set, - timeout: *mut timespec, - sigmask: *const sigset_t, - ) -> c_int; - pub fn sigaction(signum: c_int, act: *const sigaction, oldact: *mut sigaction) - -> c_int; - } - } else { - extern "C" { - pub fn readlinkat( - dirfd: c_int, - pathname: *const c_char, - buf: *mut c_char, - bufsiz: size_t, - ) -> ssize_t; - pub fn fmemopen(buf: *mut c_void, size: size_t, mode: *const c_char) -> *mut FILE; - pub fn open_memstream(ptr: *mut *mut c_char, sizeloc: *mut size_t) -> *mut FILE; - pub fn atexit(cb: extern "C" fn()) -> c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__sigaction14")] - pub fn sigaction(signum: c_int, act: *const sigaction, oldact: *mut sigaction) - -> c_int; - pub fn readlink(path: *const c_char, buf: *mut c_char, bufsz: size_t) -> ssize_t; - #[cfg_attr( - all(target_os = "macos", target_arch = "x86_64"), - link_name = "pselect$1050" - )] - #[cfg_attr( - all(target_os = "macos", target_arch = "x86"), - link_name = "pselect$UNIX2003" - )] - #[cfg_attr(target_os = "netbsd", link_name = "__pselect50")] - #[cfg_attr(gnu_time_bits64, link_name = "__pselect64")] - pub fn pselect( - nfds: c_int, - readfds: *mut fd_set, - writefds: *mut fd_set, - errorfds: *mut fd_set, - timeout: *const timespec, - sigmask: *const sigset_t, - ) -> c_int; - } - } -} - -cfg_if! { - if #[cfg(target_os = "aix")] { - extern "C" { - pub fn cfmakeraw(termios: *mut crate::termios) -> c_int; - pub fn cfsetspeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int; - } - } else if #[cfg(not(any( - target_os = "solaris", - target_os = "illumos", - target_os = "nto", - )))] { - extern "C" { - pub fn cfmakeraw(termios: *mut crate::termios); - pub fn cfsetspeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int; - } - } -} - -extern "C" { - pub fn fnmatch(pattern: *const c_char, name: *const c_char, flags: c_int) -> c_int; -} - -cfg_if! { - if #[cfg(target_env = "newlib")] { - mod newlib; - pub use self::newlib::*; - } else if #[cfg(any( - target_os = "linux", - target_os = "l4re", - target_os = "android", - target_os = "emscripten" - ))] { - mod linux_like; - pub use self::linux_like::*; - } else if #[cfg(any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "watchos", - target_os = "visionos", - target_os = "freebsd", - target_os = "dragonfly", - target_os = "openbsd", - target_os = "netbsd" - ))] { - mod bsd; - pub use self::bsd::*; - } else if #[cfg(any(target_os = "solaris", target_os = "illumos"))] { - mod solarish; - pub use self::solarish::*; - } else if #[cfg(target_os = "haiku")] { - mod haiku; - pub use self::haiku::*; - } else if #[cfg(target_os = "redox")] { - mod redox; - pub use self::redox::*; - } else if #[cfg(target_os = "cygwin")] { - mod cygwin; - pub use self::cygwin::*; - } else if #[cfg(target_os = "nto")] { - mod nto; - pub use self::nto::*; - } else if #[cfg(target_os = "aix")] { - mod aix; - pub use self::aix::*; - } else if #[cfg(target_os = "hurd")] { - mod hurd; - pub use self::hurd::*; - } else if #[cfg(target_os = "nuttx")] { - mod nuttx; - pub use self::nuttx::*; - } else { - // Unknown target_os - } -} diff --git a/vendor/libc/src/unix/newlib/aarch64/mod.rs b/vendor/libc/src/unix/newlib/aarch64/mod.rs deleted file mode 100644 index e4640580e2478b..00000000000000 --- a/vendor/libc/src/unix/newlib/aarch64/mod.rs +++ /dev/null @@ -1,52 +0,0 @@ -use crate::prelude::*; - -pub type clock_t = c_long; -pub type wchar_t = u32; - -s! { - pub struct sockaddr { - pub sa_len: u8, - pub sa_family: crate::sa_family_t, - pub sa_data: [c_char; 14], - } - - pub struct sockaddr_in6 { - pub sin6_len: u8, - pub sin6_family: crate::sa_family_t, - pub sin6_port: crate::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: u32, - } - - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: crate::sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [c_char; 8], - } -} - -pub const AF_INET6: c_int = 23; - -pub const FIONBIO: c_ulong = 1; - -pub const POLLIN: c_short = 0x1; -pub const POLLPRI: c_short = 0x2; -pub const POLLOUT: c_short = 0x4; -pub const POLLERR: c_short = 0x8; -pub const POLLHUP: c_short = 0x10; -pub const POLLNVAL: c_short = 0x20; - -pub const SOL_SOCKET: c_int = 65535; - -pub const MSG_OOB: c_int = 1; -pub const MSG_PEEK: c_int = 2; -pub const MSG_DONTWAIT: c_int = 4; -pub const MSG_DONTROUTE: c_int = 0; -pub const MSG_WAITALL: c_int = 0; -pub const MSG_MORE: c_int = 0; -pub const MSG_NOSIGNAL: c_int = 0; - -pub use crate::unix::newlib::generic::{dirent, sigset_t, stat}; diff --git a/vendor/libc/src/unix/newlib/arm/mod.rs b/vendor/libc/src/unix/newlib/arm/mod.rs deleted file mode 100644 index aea4ed764b03c0..00000000000000 --- a/vendor/libc/src/unix/newlib/arm/mod.rs +++ /dev/null @@ -1,54 +0,0 @@ -use crate::prelude::*; - -pub type clock_t = c_long; -pub type wchar_t = u32; - -s! { - pub struct sockaddr { - pub sa_family: crate::sa_family_t, - pub sa_data: [c_char; 14], - } - - pub struct sockaddr_in6 { - pub sin6_family: crate::sa_family_t, - pub sin6_port: crate::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: u32, - } - - pub struct sockaddr_in { - pub sin_family: crate::sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [u8; 8], - } - - pub struct sockaddr_storage { - pub ss_family: crate::sa_family_t, - pub __ss_padding: [u8; 26], - } -} - -pub const AF_INET6: c_int = 23; - -pub const FIONBIO: c_ulong = 1; - -pub const POLLIN: c_short = 0x1; -pub const POLLPRI: c_short = 0x2; -pub const POLLHUP: c_short = 0x4; -pub const POLLERR: c_short = 0x8; -pub const POLLOUT: c_short = 0x10; -pub const POLLNVAL: c_short = 0x20; - -pub const SOL_SOCKET: c_int = 65535; - -pub const MSG_OOB: c_int = 1; -pub const MSG_PEEK: c_int = 2; -pub const MSG_DONTWAIT: c_int = 4; -pub const MSG_DONTROUTE: c_int = 0; -pub const MSG_WAITALL: c_int = 0; -pub const MSG_MORE: c_int = 0; -pub const MSG_NOSIGNAL: c_int = 0; - -pub use crate::unix::newlib::generic::{dirent, sigset_t, stat}; diff --git a/vendor/libc/src/unix/newlib/espidf/mod.rs b/vendor/libc/src/unix/newlib/espidf/mod.rs deleted file mode 100644 index 57a033fcaf2637..00000000000000 --- a/vendor/libc/src/unix/newlib/espidf/mod.rs +++ /dev/null @@ -1,120 +0,0 @@ -use crate::prelude::*; - -pub type clock_t = c_ulong; -pub type wchar_t = u32; - -s! { - pub struct cmsghdr { - pub cmsg_len: crate::socklen_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: c_int, - pub msg_control: *mut c_void, - pub msg_controllen: crate::socklen_t, - pub msg_flags: c_int, - } - - pub struct sockaddr_un { - pub sun_family: crate::sa_family_t, - pub sun_path: [c_char; 108], - } - - pub struct sockaddr { - pub sa_len: u8, - pub sa_family: crate::sa_family_t, - pub sa_data: [c_char; 14], - } - - pub struct sockaddr_in6 { - pub sin6_len: u8, - pub sin6_family: crate::sa_family_t, - pub sin6_port: crate::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: u32, - } - - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: crate::sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [c_char; 8], - } - - pub struct sockaddr_storage { - pub s2_len: u8, - pub ss_family: crate::sa_family_t, - pub s2_data1: [c_char; 2], - pub s2_data2: [u32; 3], - pub s2_data3: [u32; 3], - } -} - -pub const AF_UNIX: c_int = 1; -pub const AF_INET6: c_int = 10; - -pub const FIONBIO: c_ulong = 2147772030; - -pub const POLLIN: c_short = 1 << 0; -pub const POLLRDNORM: c_short = 1 << 1; -pub const POLLRDBAND: c_short = 1 << 2; -pub const POLLPRI: c_short = POLLRDBAND; -pub const POLLOUT: c_short = 1 << 3; -pub const POLLWRNORM: c_short = POLLOUT; -pub const POLLWRBAND: c_short = 1 << 4; -pub const POLLERR: c_short = 1 << 5; -pub const POLLHUP: c_short = 1 << 6; - -pub const SOL_SOCKET: c_int = 0xfff; - -pub const MSG_OOB: c_int = 0x04; -pub const MSG_PEEK: c_int = 0x01; -pub const MSG_DONTWAIT: c_int = 0x08; -pub const MSG_DONTROUTE: c_int = 0x4; -pub const MSG_WAITALL: c_int = 0x02; -pub const MSG_MORE: c_int = 0x10; -pub const MSG_NOSIGNAL: c_int = 0x20; -pub const MSG_TRUNC: c_int = 0x04; -pub const MSG_CTRUNC: c_int = 0x08; -pub const MSG_EOR: c_int = 0x08; - -pub const PTHREAD_STACK_MIN: size_t = 768; - -pub const SIGABRT: c_int = 6; -pub const SIGFPE: c_int = 8; -pub const SIGILL: c_int = 4; -pub const SIGINT: c_int = 2; -pub const SIGSEGV: c_int = 11; -pub const SIGTERM: c_int = 15; -pub const SIGHUP: c_int = 1; -pub const SIGQUIT: c_int = 3; -pub const NSIG: size_t = 32; - -extern "C" { - pub fn pthread_create( - native: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - f: extern "C" fn(_: *mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - - pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; - - pub fn gethostname(name: *mut c_char, namelen: ssize_t); - - #[link_name = "lwip_sendmsg"] - pub fn sendmsg(s: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; - #[link_name = "lwip_recvmsg"] - pub fn recvmsg(s: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; - - pub fn eventfd(initval: c_uint, flags: c_int) -> c_int; -} - -pub use crate::unix::newlib::generic::{dirent, sigset_t, stat}; diff --git a/vendor/libc/src/unix/newlib/generic.rs b/vendor/libc/src/unix/newlib/generic.rs deleted file mode 100644 index ba4dfbe528b69d..00000000000000 --- a/vendor/libc/src/unix/newlib/generic.rs +++ /dev/null @@ -1,39 +0,0 @@ -//! Common types used by most newlib platforms - -use crate::off_t; -use crate::prelude::*; - -s! { - pub struct sigset_t { - #[cfg(target_os = "horizon")] - __val: [c_ulong; 16], - #[cfg(not(target_os = "horizon"))] - __val: u32, - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_atime: crate::time_t, - pub st_spare1: c_long, - pub st_mtime: crate::time_t, - pub st_spare2: c_long, - pub st_ctime: crate::time_t, - pub st_spare3: c_long, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_spare4: [c_long; 2usize], - } - - pub struct dirent { - pub d_ino: crate::ino_t, - pub d_type: c_uchar, - pub d_name: [c_char; 256usize], - } -} diff --git a/vendor/libc/src/unix/newlib/horizon/mod.rs b/vendor/libc/src/unix/newlib/horizon/mod.rs deleted file mode 100644 index 3958e02734adaf..00000000000000 --- a/vendor/libc/src/unix/newlib/horizon/mod.rs +++ /dev/null @@ -1,278 +0,0 @@ -//! ARMv6K Nintendo 3DS C Newlib definitions - -use crate::off_t; -use crate::prelude::*; - -pub type wchar_t = c_uint; - -pub type u_register_t = c_uint; -pub type u_char = c_uchar; -pub type u_short = c_ushort; -pub type u_int = c_uint; -pub type u_long = c_ulong; -pub type ushort = c_ushort; -pub type uint = c_uint; -pub type ulong = c_ulong; -pub type clock_t = c_ulong; -pub type daddr_t = c_long; -pub type caddr_t = *mut c_char; -pub type sbintime_t = c_longlong; -pub type sigset_t = c_ulong; - -s! { - pub struct hostent { - pub h_name: *mut c_char, - pub h_aliases: *mut *mut c_char, - pub h_addrtype: u16, - pub h_length: u16, - pub h_addr_list: *mut *mut c_char, - } - - pub struct sockaddr { - pub sa_family: crate::sa_family_t, - pub sa_data: [c_char; 26usize], - } - - pub struct sockaddr_storage { - pub ss_family: crate::sa_family_t, - pub __ss_padding: [c_char; 26usize], - } - - pub struct sockaddr_in { - pub sin_family: crate::sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [c_char; 8], - } - - pub struct sockaddr_in6 { - pub sin6_family: crate::sa_family_t, - pub sin6_port: crate::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: u32, - } - - pub struct sockaddr_un { - pub sun_len: c_uchar, - pub sun_family: crate::sa_family_t, - pub sun_path: [c_char; 104usize], - } - - pub struct sched_param { - pub sched_priority: c_int, - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_atim: crate::timespec, - pub st_mtim: crate::timespec, - pub st_ctim: crate::timespec, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_spare4: [c_long; 2usize], - } -} - -pub const SIGEV_NONE: c_int = 1; -pub const SIGEV_SIGNAL: c_int = 2; -pub const SIGEV_THREAD: c_int = 3; -pub const SA_NOCLDSTOP: c_int = 1; -pub const MINSIGSTKSZ: c_int = 2048; -pub const SIGSTKSZ: c_int = 8192; -pub const SS_ONSTACK: c_int = 1; -pub const SS_DISABLE: c_int = 2; -pub const SIG_SETMASK: c_int = 0; -pub const SIG_BLOCK: c_int = 1; -pub const SIG_UNBLOCK: c_int = 2; -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGTRAP: c_int = 5; -pub const SIGABRT: c_int = 6; -pub const SIGEMT: c_int = 7; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGBUS: c_int = 10; -pub const SIGSEGV: c_int = 11; -pub const SIGSYS: c_int = 12; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; -pub const SIGURG: c_int = 16; -pub const SIGSTOP: c_int = 17; -pub const SIGTSTP: c_int = 18; -pub const SIGCONT: c_int = 19; -pub const SIGCHLD: c_int = 20; -pub const SIGCLD: c_int = 20; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGIO: c_int = 23; -pub const SIGPOLL: c_int = 23; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGLOST: c_int = 29; -pub const SIGUSR1: c_int = 30; -pub const SIGUSR2: c_int = 31; -pub const NSIG: c_int = 32; -pub const CLOCK_ENABLED: c_uint = 1; -pub const CLOCK_DISABLED: c_uint = 0; -pub const CLOCK_ALLOWED: c_uint = 1; -pub const CLOCK_DISALLOWED: c_uint = 0; -pub const TIMER_ABSTIME: c_uint = 4; -pub const SOL_SOCKET: c_int = 65535; -pub const MSG_OOB: c_int = 1; -pub const MSG_PEEK: c_int = 2; -pub const MSG_DONTWAIT: c_int = 4; -pub const MSG_DONTROUTE: c_int = 0; -pub const MSG_WAITALL: c_int = 0; -pub const MSG_MORE: c_int = 0; -pub const MSG_NOSIGNAL: c_int = 0; -pub const SOL_CONFIG: c_uint = 65534; - -pub const _SC_PAGESIZE: c_int = 8; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 51; - -pub const PTHREAD_STACK_MIN: size_t = 4096; -pub const WNOHANG: c_int = 1; - -pub const POLLIN: c_short = 0x0001; -pub const POLLPRI: c_short = 0x0002; -pub const POLLOUT: c_short = 0x0004; -pub const POLLRDNORM: c_short = 0x0040; -pub const POLLWRNORM: c_short = POLLOUT; -pub const POLLRDBAND: c_short = 0x0080; -pub const POLLWRBAND: c_short = 0x0100; -pub const POLLERR: c_short = 0x0008; -pub const POLLHUP: c_short = 0x0010; -pub const POLLNVAL: c_short = 0x0020; - -pub const EAI_AGAIN: c_int = 2; -pub const EAI_BADFLAGS: c_int = 3; -pub const EAI_FAIL: c_int = 4; -pub const EAI_SERVICE: c_int = 9; -pub const EAI_SYSTEM: c_int = 11; -pub const EAI_BADHINTS: c_int = 12; -pub const EAI_PROTOCOL: c_int = 13; -pub const EAI_OVERFLOW: c_int = 14; -pub const EAI_MAX: c_int = 15; - -pub const AF_UNIX: c_int = 1; -pub const AF_INET6: c_int = 23; - -pub const FIONBIO: c_ulong = 1; - -pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); - -// For pthread get/setschedparam -pub const SCHED_FIFO: c_int = 1; -pub const SCHED_RR: c_int = 2; - -// For getrandom() -pub const GRND_NONBLOCK: c_uint = 0x1; -pub const GRND_RANDOM: c_uint = 0x2; - -// Horizon OS works doesn't or can't hold any of this information -safe_f! { - pub const fn WIFSTOPPED(_status: c_int) -> bool { - false - } - - pub const fn WSTOPSIG(_status: c_int) -> c_int { - 0 - } - - pub const fn WIFCONTINUED(_status: c_int) -> bool { - true - } - - pub const fn WIFSIGNALED(_status: c_int) -> bool { - false - } - - pub const fn WTERMSIG(_status: c_int) -> c_int { - 0 - } - - pub const fn WIFEXITED(_status: c_int) -> bool { - true - } - - pub const fn WEXITSTATUS(_status: c_int) -> c_int { - 0 - } - - pub const fn WCOREDUMP(_status: c_int) -> bool { - false - } -} - -extern "C" { - pub fn pthread_create( - native: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - f: extern "C" fn(_: *mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - - pub fn pthread_attr_getschedparam( - attr: *const crate::pthread_attr_t, - param: *mut sched_param, - ) -> c_int; - - pub fn pthread_attr_setschedparam( - attr: *mut crate::pthread_attr_t, - param: *const sched_param, - ) -> c_int; - - pub fn pthread_attr_getprocessorid_np( - attr: *const crate::pthread_attr_t, - processor_id: *mut c_int, - ) -> c_int; - - pub fn pthread_attr_setprocessorid_np( - attr: *mut crate::pthread_attr_t, - processor_id: c_int, - ) -> c_int; - - pub fn pthread_getschedparam( - native: crate::pthread_t, - policy: *mut c_int, - param: *mut crate::sched_param, - ) -> c_int; - - pub fn pthread_setschedparam( - native: crate::pthread_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - - pub fn pthread_condattr_getclock( - attr: *const crate::pthread_condattr_t, - clock_id: *mut crate::clockid_t, - ) -> c_int; - - pub fn pthread_condattr_setclock( - attr: *mut crate::pthread_condattr_t, - clock_id: crate::clockid_t, - ) -> c_int; - - pub fn pthread_getprocessorid_np() -> c_int; - - pub fn getrandom(buf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; - - pub fn gethostid() -> c_long; -} - -pub use crate::unix::newlib::generic::dirent; diff --git a/vendor/libc/src/unix/newlib/mod.rs b/vendor/libc/src/unix/newlib/mod.rs deleted file mode 100644 index 0193083f4e63b5..00000000000000 --- a/vendor/libc/src/unix/newlib/mod.rs +++ /dev/null @@ -1,997 +0,0 @@ -use crate::prelude::*; - -pub type blkcnt_t = i32; -pub type blksize_t = i32; - -pub type clockid_t = c_ulong; - -cfg_if! { - if #[cfg(any(target_os = "espidf"))] { - pub type dev_t = c_short; - pub type ino_t = c_ushort; - pub type off_t = c_long; - } else if #[cfg(any(target_os = "vita"))] { - pub type dev_t = c_short; - pub type ino_t = c_ushort; - pub type off_t = c_int; - } else { - pub type dev_t = u32; - pub type ino_t = u32; - pub type off_t = i64; - } -} - -pub type fsblkcnt_t = u64; -pub type fsfilcnt_t = u32; -pub type id_t = u32; -pub type key_t = c_int; -pub type loff_t = c_longlong; -pub type mode_t = c_uint; -pub type nfds_t = u32; -pub type nlink_t = c_ushort; -pub type pthread_t = c_ulong; -pub type pthread_key_t = c_uint; -pub type rlim_t = u32; - -cfg_if! { - if #[cfg(target_os = "horizon")] { - pub type sa_family_t = u16; - } else { - pub type sa_family_t = u8; - } -} - -pub type socklen_t = u32; -pub type speed_t = u32; -pub type suseconds_t = i32; -cfg_if! { - if #[cfg(target_os = "espidf")] { - pub type tcflag_t = u16; - } else { - pub type tcflag_t = c_uint; - } -} -pub type useconds_t = u32; - -cfg_if! { - if #[cfg(any( - target_os = "horizon", - all(target_os = "espidf", not(espidf_time32)) - ))] { - pub type time_t = c_longlong; - } else { - pub type time_t = i32; - } -} - -cfg_if! { - if #[cfg(not(target_os = "horizon"))] { - s! { - pub struct hostent { - pub h_name: *mut c_char, - pub h_aliases: *mut *mut c_char, - pub h_addrtype: c_int, - pub h_length: c_int, - pub h_addr_list: *mut *mut c_char, - pub h_addr: *mut c_char, - } - } - } -} - -s! { - // The order of the `ai_addr` field in this struct is crucial - // for converting between the Rust and C types. - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - pub ai_addrlen: socklen_t, - - #[cfg(target_os = "espidf")] - pub ai_addr: *mut sockaddr, - - pub ai_canonname: *mut c_char, - - #[cfg(not(any( - target_os = "espidf", - all(target_arch = "powerpc", target_vendor = "nintendo") - )))] - pub ai_addr: *mut sockaddr, - - pub ai_next: *mut addrinfo, - } - - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct linger { - pub l_onoff: c_int, - pub l_linger: c_int, - } - - pub struct in_addr { - pub s_addr: crate::in_addr_t, - } - - pub struct pollfd { - pub fd: c_int, - pub events: c_int, - pub revents: c_int, - } - - pub struct lconv { - pub decimal_point: *mut c_char, - pub thousands_sep: *mut c_char, - pub grouping: *mut c_char, - pub int_curr_symbol: *mut c_char, - pub currency_symbol: *mut c_char, - pub mon_decimal_point: *mut c_char, - pub mon_thousands_sep: *mut c_char, - pub mon_grouping: *mut c_char, - pub positive_sign: *mut c_char, - pub negative_sign: *mut c_char, - pub int_frac_digits: c_char, - pub frac_digits: c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub p_sign_posn: c_char, - pub n_sign_posn: c_char, - pub int_n_cs_precedes: c_char, - pub int_n_sep_by_space: c_char, - pub int_n_sign_posn: c_char, - pub int_p_cs_precedes: c_char, - pub int_p_sep_by_space: c_char, - pub int_p_sign_posn: c_char, - } - - pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: fsblkcnt_t, - pub f_bfree: fsblkcnt_t, - pub f_bavail: fsblkcnt_t, - pub f_files: fsfilcnt_t, - pub f_ffree: fsfilcnt_t, - pub f_favail: fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - } - - pub struct sigaction { - pub sa_handler: extern "C" fn(arg1: c_int), - pub sa_mask: sigset_t, - pub sa_flags: c_int, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: usize, - } - - pub struct fd_set { - // Unverified - fds_bits: [c_ulong; FD_SETSIZE as usize / ULONG_SIZE], - } - - pub struct passwd { - // Unverified - pub pw_name: *mut c_char, - pub pw_passwd: *mut c_char, - pub pw_uid: crate::uid_t, - pub pw_gid: crate::gid_t, - pub pw_gecos: *mut c_char, - pub pw_dir: *mut c_char, - pub pw_shell: *mut c_char, - } - - pub struct termios { - // Unverified - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; crate::NCCS], - #[cfg(target_os = "espidf")] - pub c_ispeed: u32, - #[cfg(target_os = "espidf")] - pub c_ospeed: u32, - } - - pub struct sem_t { - // Unverified - __size: [c_char; 16], - } - - pub struct Dl_info { - // Unverified - pub dli_fname: *const c_char, - pub dli_fbase: *mut c_void, - pub dli_sname: *const c_char, - pub dli_saddr: *mut c_void, - } - - pub struct utsname { - // Unverified - pub sysname: [c_char; 65], - pub nodename: [c_char; 65], - pub release: [c_char; 65], - pub version: [c_char; 65], - pub machine: [c_char; 65], - pub domainname: [c_char; 65], - } - - pub struct cpu_set_t { - // Unverified - bits: [u32; 32], - } - - pub struct pthread_attr_t { - // Unverified - #[cfg(not(target_os = "espidf"))] - __size: [u8; __SIZEOF_PTHREAD_ATTR_T], - #[cfg(target_os = "espidf")] - pub is_initialized: i32, - #[cfg(target_os = "espidf")] - pub stackaddr: *mut c_void, - #[cfg(target_os = "espidf")] - pub stacksize: i32, - #[cfg(target_os = "espidf")] - pub contentionscope: i32, - #[cfg(target_os = "espidf")] - pub inheritsched: i32, - #[cfg(target_os = "espidf")] - pub schedpolicy: i32, - #[cfg(target_os = "espidf")] - pub schedparam: i32, - #[cfg(target_os = "espidf")] - pub detachstate: i32, - } - - pub struct pthread_rwlockattr_t { - // Unverified - __size: [u8; __SIZEOF_PTHREAD_RWLOCKATTR_T], - } - - #[cfg_attr( - all( - target_pointer_width = "32", - any(target_arch = "mips", target_arch = "arm", target_arch = "powerpc") - ), - repr(align(4)) - )] - #[cfg_attr( - any( - target_pointer_width = "64", - not(any(target_arch = "mips", target_arch = "arm", target_arch = "powerpc")) - ), - repr(align(8)) - )] - pub struct pthread_mutex_t { - // Unverified - size: [u8; crate::__SIZEOF_PTHREAD_MUTEX_T], - } - - #[cfg_attr( - all( - target_pointer_width = "32", - any(target_arch = "mips", target_arch = "arm", target_arch = "powerpc") - ), - repr(align(4)) - )] - #[cfg_attr( - any( - target_pointer_width = "64", - not(any(target_arch = "mips", target_arch = "arm", target_arch = "powerpc")) - ), - repr(align(8)) - )] - pub struct pthread_rwlock_t { - // Unverified - size: [u8; crate::__SIZEOF_PTHREAD_RWLOCK_T], - } - - #[cfg_attr( - any( - target_pointer_width = "32", - target_arch = "x86_64", - target_arch = "powerpc64", - target_arch = "mips64", - target_arch = "s390x", - target_arch = "sparc64" - ), - repr(align(4)) - )] - #[cfg_attr( - not(any( - target_pointer_width = "32", - target_arch = "x86_64", - target_arch = "powerpc64", - target_arch = "mips64", - target_arch = "s390x", - target_arch = "sparc64" - )), - repr(align(8)) - )] - pub struct pthread_mutexattr_t { - // Unverified - size: [u8; crate::__SIZEOF_PTHREAD_MUTEXATTR_T], - } - - #[repr(align(8))] - pub struct pthread_cond_t { - // Unverified - size: [u8; crate::__SIZEOF_PTHREAD_COND_T], - } - - #[repr(align(4))] - pub struct pthread_condattr_t { - // Unverified - size: [u8; crate::__SIZEOF_PTHREAD_CONDATTR_T], - } -} - -// unverified constants -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - size: [__PTHREAD_INITIALIZER_BYTE; __SIZEOF_PTHREAD_MUTEX_T], -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - size: [__PTHREAD_INITIALIZER_BYTE; __SIZEOF_PTHREAD_COND_T], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - size: [__PTHREAD_INITIALIZER_BYTE; __SIZEOF_PTHREAD_RWLOCK_T], -}; - -cfg_if! { - if #[cfg(target_os = "espidf")] { - pub const NCCS: usize = 11; - } else { - pub const NCCS: usize = 32; - } -} - -cfg_if! { - if #[cfg(target_os = "espidf")] { - const __PTHREAD_INITIALIZER_BYTE: u8 = 0xff; - pub const __SIZEOF_PTHREAD_ATTR_T: usize = 32; - pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 4; - pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 12; - pub const __SIZEOF_PTHREAD_COND_T: usize = 4; - pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 8; - pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 4; - pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 12; - pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; - } else if #[cfg(target_os = "vita")] { - const __PTHREAD_INITIALIZER_BYTE: u8 = 0xff; - pub const __SIZEOF_PTHREAD_ATTR_T: usize = 4; - pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 4; - pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; - pub const __SIZEOF_PTHREAD_COND_T: usize = 4; - pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; - pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 4; - pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 4; - pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 4; - } else if #[cfg(target_os = "rtems")] { - const __PTHREAD_INITIALIZER_BYTE: u8 = 0x00; - pub const __SIZEOF_PTHREAD_ATTR_T: usize = 96; - pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 64; - pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 24; - pub const __SIZEOF_PTHREAD_COND_T: usize = 28; - pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 24; - pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; - pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; - pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; - } else { - const __PTHREAD_INITIALIZER_BYTE: u8 = 0; - pub const __SIZEOF_PTHREAD_ATTR_T: usize = 56; - pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; - pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; - pub const __SIZEOF_PTHREAD_COND_T: usize = 48; - pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; - pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; - pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; - pub const __SIZEOF_PTHREAD_BARRIER_T: usize = 32; - } -} - -pub const __SIZEOF_PTHREAD_BARRIERATTR_T: usize = 4; -pub const __PTHREAD_MUTEX_HAVE_PREV: usize = 1; -pub const __PTHREAD_RWLOCK_INT_FLAGS_SHARED: usize = 1; -pub const PTHREAD_MUTEX_NORMAL: c_int = 0; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; - -cfg_if! { - if #[cfg(any(target_os = "horizon", target_os = "espidf"))] { - pub const FD_SETSIZE: usize = 64; - } else if #[cfg(target_os = "vita")] { - pub const FD_SETSIZE: usize = 256; - } else { - pub const FD_SETSIZE: usize = 1024; - } -} -// intentionally not public, only used for fd_set -const ULONG_SIZE: usize = 32; - -// Other constants -pub const EPERM: c_int = 1; -pub const ENOENT: c_int = 2; -pub const ESRCH: c_int = 3; -pub const EINTR: c_int = 4; -pub const EIO: c_int = 5; -pub const ENXIO: c_int = 6; -pub const E2BIG: c_int = 7; -pub const ENOEXEC: c_int = 8; -pub const EBADF: c_int = 9; -pub const ECHILD: c_int = 10; -pub const EAGAIN: c_int = 11; -pub const ENOMEM: c_int = 12; -pub const EACCES: c_int = 13; -pub const EFAULT: c_int = 14; -pub const EBUSY: c_int = 16; -pub const EEXIST: c_int = 17; -pub const EXDEV: c_int = 18; -pub const ENODEV: c_int = 19; -pub const ENOTDIR: c_int = 20; -pub const EISDIR: c_int = 21; -pub const EINVAL: c_int = 22; -pub const ENFILE: c_int = 23; -pub const EMFILE: c_int = 24; -pub const ENOTTY: c_int = 25; -pub const ETXTBSY: c_int = 26; -pub const EFBIG: c_int = 27; -pub const ENOSPC: c_int = 28; -pub const ESPIPE: c_int = 29; -pub const EROFS: c_int = 30; -pub const EMLINK: c_int = 31; -pub const EPIPE: c_int = 32; -pub const EDOM: c_int = 33; -pub const ERANGE: c_int = 34; -pub const ENOMSG: c_int = 35; -pub const EIDRM: c_int = 36; -pub const EDEADLK: c_int = 45; -pub const ENOLCK: c_int = 46; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENOLINK: c_int = 67; -pub const EPROTO: c_int = 71; -pub const EMULTIHOP: c_int = 74; -pub const EBADMSG: c_int = 77; -pub const EFTYPE: c_int = 79; -pub const ENOSYS: c_int = 88; -pub const ENOTEMPTY: c_int = 90; -pub const ENAMETOOLONG: c_int = 91; -pub const ELOOP: c_int = 92; -pub const EOPNOTSUPP: c_int = 95; -pub const EPFNOSUPPORT: c_int = 96; -pub const ECONNRESET: c_int = 104; -pub const ENOBUFS: c_int = 105; -pub const EAFNOSUPPORT: c_int = 106; -pub const EPROTOTYPE: c_int = 107; -pub const ENOTSOCK: c_int = 108; -pub const ENOPROTOOPT: c_int = 109; -pub const ECONNREFUSED: c_int = 111; -pub const EADDRINUSE: c_int = 112; -pub const ECONNABORTED: c_int = 113; -pub const ENETUNREACH: c_int = 114; -pub const ENETDOWN: c_int = 115; -pub const ETIMEDOUT: c_int = 116; -pub const EHOSTDOWN: c_int = 117; -pub const EHOSTUNREACH: c_int = 118; -pub const EINPROGRESS: c_int = 119; -pub const EALREADY: c_int = 120; -pub const EDESTADDRREQ: c_int = 121; -pub const EMSGSIZE: c_int = 122; -pub const EPROTONOSUPPORT: c_int = 123; -pub const EADDRNOTAVAIL: c_int = 125; -pub const ENETRESET: c_int = 126; -pub const EISCONN: c_int = 127; -pub const ENOTCONN: c_int = 128; -pub const ETOOMANYREFS: c_int = 129; -pub const EDQUOT: c_int = 132; -pub const ESTALE: c_int = 133; -pub const ENOTSUP: c_int = 134; -pub const EILSEQ: c_int = 138; -pub const EOVERFLOW: c_int = 139; -pub const ECANCELED: c_int = 140; -pub const ENOTRECOVERABLE: c_int = 141; -pub const EOWNERDEAD: c_int = 142; -pub const EWOULDBLOCK: c_int = 11; - -pub const F_DUPFD: c_int = 0; -pub const F_GETFD: c_int = 1; -pub const F_SETFD: c_int = 2; -pub const F_GETFL: c_int = 3; -pub const F_SETFL: c_int = 4; -pub const F_GETOWN: c_int = 5; -pub const F_SETOWN: c_int = 6; -pub const F_GETLK: c_int = 7; -pub const F_SETLK: c_int = 8; -pub const F_SETLKW: c_int = 9; -pub const F_RGETLK: c_int = 10; -pub const F_RSETLK: c_int = 11; -pub const F_CNVT: c_int = 12; -pub const F_RSETLKW: c_int = 13; -pub const F_DUPFD_CLOEXEC: c_int = 14; - -pub const O_RDONLY: c_int = 0; -pub const O_WRONLY: c_int = 1; -pub const O_RDWR: c_int = 2; -pub const O_APPEND: c_int = 8; -pub const O_CREAT: c_int = 512; -pub const O_TRUNC: c_int = 1024; -pub const O_EXCL: c_int = 2048; -pub const O_SYNC: c_int = 8192; -pub const O_NONBLOCK: c_int = 16384; - -pub const O_ACCMODE: c_int = 3; -cfg_if! { - if #[cfg(target_os = "espidf")] { - pub const O_CLOEXEC: c_int = 0x40000; - } else { - pub const O_CLOEXEC: c_int = 0x80000; - } -} - -pub const RTLD_LAZY: c_int = 0x1; - -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; - -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; - -pub const FIOCLEX: c_ulong = 0x20006601; -pub const FIONCLEX: c_ulong = 0x20006602; - -pub const S_BLKSIZE: mode_t = 1024; -pub const S_IREAD: mode_t = 0o0400; -pub const S_IWRITE: mode_t = 0o0200; -pub const S_IEXEC: mode_t = 0o0100; -pub const S_ENFMT: mode_t = 0o2000; -pub const S_IFMT: mode_t = 0o17_0000; -pub const S_IFDIR: mode_t = 0o4_0000; -pub const S_IFCHR: mode_t = 0o2_0000; -pub const S_IFBLK: mode_t = 0o6_0000; -pub const S_IFREG: mode_t = 0o10_0000; -pub const S_IFLNK: mode_t = 0o12_0000; -pub const S_IFSOCK: mode_t = 0o14_0000; -pub const S_IFIFO: mode_t = 0o1_0000; -pub const S_IRUSR: mode_t = 0o0400; -pub const S_IWUSR: mode_t = 0o0200; -pub const S_IXUSR: mode_t = 0o0100; -pub const S_IRGRP: mode_t = 0o0040; -pub const S_IWGRP: mode_t = 0o0020; -pub const S_IXGRP: mode_t = 0o0010; -pub const S_IROTH: mode_t = 0o0004; -pub const S_IWOTH: mode_t = 0o0002; -pub const S_IXOTH: mode_t = 0o0001; - -pub const SOL_TCP: c_int = 6; - -pub const PF_UNSPEC: c_int = 0; -pub const PF_INET: c_int = 2; -cfg_if! { - if #[cfg(target_os = "espidf")] { - pub const PF_INET6: c_int = 10; - } else { - pub const PF_INET6: c_int = 23; - } -} - -pub const AF_UNSPEC: c_int = 0; -pub const AF_INET: c_int = 2; - -pub const CLOCK_REALTIME: crate::clockid_t = 1; -pub const CLOCK_MONOTONIC: crate::clockid_t = 4; -pub const CLOCK_BOOTTIME: crate::clockid_t = 4; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; - -pub const SHUT_RD: c_int = 0; -pub const SHUT_WR: c_int = 1; -pub const SHUT_RDWR: c_int = 2; - -pub const SO_BINTIME: c_int = 0x2000; -pub const SO_NO_OFFLOAD: c_int = 0x4000; -pub const SO_NO_DDP: c_int = 0x8000; -pub const SO_REUSEPORT_LB: c_int = 0x10000; -pub const SO_LABEL: c_int = 0x1009; -pub const SO_PEERLABEL: c_int = 0x1010; -pub const SO_LISTENQLIMIT: c_int = 0x1011; -pub const SO_LISTENQLEN: c_int = 0x1012; -pub const SO_LISTENINCQLEN: c_int = 0x1013; -pub const SO_SETFIB: c_int = 0x1014; -pub const SO_USER_COOKIE: c_int = 0x1015; -pub const SO_PROTOCOL: c_int = 0x1016; -pub const SO_PROTOTYPE: c_int = SO_PROTOCOL; -pub const SO_VENDOR: c_int = 0x80000000; -pub const SO_DEBUG: c_int = 0x01; -pub const SO_ACCEPTCONN: c_int = 0x0002; -pub const SO_REUSEADDR: c_int = 0x0004; -pub const SO_KEEPALIVE: c_int = 0x0008; -pub const SO_DONTROUTE: c_int = 0x0010; -pub const SO_BROADCAST: c_int = 0x0020; -pub const SO_USELOOPBACK: c_int = 0x0040; -pub const SO_LINGER: c_int = 0x0080; -pub const SO_OOBINLINE: c_int = 0x0100; -pub const SO_REUSEPORT: c_int = 0x0200; -pub const SO_TIMESTAMP: c_int = 0x0400; -pub const SO_NOSIGPIPE: c_int = 0x0800; -pub const SO_ACCEPTFILTER: c_int = 0x1000; -pub const SO_SNDBUF: c_int = 0x1001; -pub const SO_RCVBUF: c_int = 0x1002; -pub const SO_SNDLOWAT: c_int = 0x1003; -pub const SO_RCVLOWAT: c_int = 0x1004; -pub const SO_SNDTIMEO: c_int = 0x1005; -pub const SO_RCVTIMEO: c_int = 0x1006; -cfg_if! { - if #[cfg(target_os = "horizon")] { - pub const SO_ERROR: c_int = 0x1009; - } else { - pub const SO_ERROR: c_int = 0x1007; - } -} -pub const SO_TYPE: c_int = 0x1008; - -pub const SOCK_CLOEXEC: c_int = O_CLOEXEC; - -pub const INET_ADDRSTRLEN: c_int = 16; - -// https://github.com/bminor/newlib/blob/HEAD/newlib/libc/sys/linux/include/net/if.h#L121 -pub const IFF_UP: c_int = 0x1; // interface is up -pub const IFF_BROADCAST: c_int = 0x2; // broadcast address valid -pub const IFF_DEBUG: c_int = 0x4; // turn on debugging -pub const IFF_LOOPBACK: c_int = 0x8; // is a loopback net -pub const IFF_POINTOPOINT: c_int = 0x10; // interface is point-to-point link -pub const IFF_NOTRAILERS: c_int = 0x20; // avoid use of trailers -pub const IFF_RUNNING: c_int = 0x40; // resources allocated -pub const IFF_NOARP: c_int = 0x80; // no address resolution protocol -pub const IFF_PROMISC: c_int = 0x100; // receive all packets -pub const IFF_ALLMULTI: c_int = 0x200; // receive all multicast packets -pub const IFF_OACTIVE: c_int = 0x400; // transmission in progress -pub const IFF_SIMPLEX: c_int = 0x800; // can't hear own transmissions -pub const IFF_LINK0: c_int = 0x1000; // per link layer defined bit -pub const IFF_LINK1: c_int = 0x2000; // per link layer defined bit -pub const IFF_LINK2: c_int = 0x4000; // per link layer defined bit -pub const IFF_ALTPHYS: c_int = IFF_LINK2; // use alternate physical connection -pub const IFF_MULTICAST: c_int = 0x8000; // supports multicast - -cfg_if! { - if #[cfg(target_os = "vita")] { - pub const TCP_NODELAY: c_int = 1; - pub const TCP_MAXSEG: c_int = 2; - } else if #[cfg(target_os = "espidf")] { - pub const TCP_NODELAY: c_int = 1; - pub const TCP_MAXSEG: c_int = 8194; - } else { - pub const TCP_NODELAY: c_int = 8193; - pub const TCP_MAXSEG: c_int = 8194; - } -} - -pub const TCP_NOPUSH: c_int = 4; -pub const TCP_NOOPT: c_int = 8; -cfg_if! { - if #[cfg(target_os = "espidf")] { - pub const TCP_KEEPIDLE: c_int = 3; - pub const TCP_KEEPINTVL: c_int = 4; - pub const TCP_KEEPCNT: c_int = 5; - } else { - pub const TCP_KEEPIDLE: c_int = 256; - pub const TCP_KEEPINTVL: c_int = 512; - pub const TCP_KEEPCNT: c_int = 1024; - } -} - -cfg_if! { - if #[cfg(target_os = "horizon")] { - pub const IP_TOS: c_int = 7; - } else if #[cfg(target_os = "espidf")] { - pub const IP_TOS: c_int = 1; - } else { - pub const IP_TOS: c_int = 3; - } -} -cfg_if! { - if #[cfg(target_os = "vita")] { - pub const IP_TTL: c_int = 4; - } else if #[cfg(target_os = "espidf")] { - pub const IP_TTL: c_int = 2; - } else { - pub const IP_TTL: c_int = 8; - } -} - -cfg_if! { - if #[cfg(target_os = "espidf")] { - pub const IP_MULTICAST_IF: c_int = 6; - pub const IP_MULTICAST_TTL: c_int = 5; - pub const IP_MULTICAST_LOOP: c_int = 7; - } else { - pub const IP_MULTICAST_IF: c_int = 9; - pub const IP_MULTICAST_TTL: c_int = 10; - pub const IP_MULTICAST_LOOP: c_int = 11; - } -} - -cfg_if! { - if #[cfg(target_os = "vita")] { - pub const IP_ADD_MEMBERSHIP: c_int = 12; - pub const IP_DROP_MEMBERSHIP: c_int = 13; - } else if #[cfg(target_os = "espidf")] { - pub const IP_ADD_MEMBERSHIP: c_int = 3; - pub const IP_DROP_MEMBERSHIP: c_int = 4; - } else { - pub const IP_ADD_MEMBERSHIP: c_int = 11; - pub const IP_DROP_MEMBERSHIP: c_int = 12; - } -} -pub const IPV6_UNICAST_HOPS: c_int = 4; -cfg_if! { - if #[cfg(target_os = "espidf")] { - pub const IPV6_MULTICAST_IF: c_int = 768; - pub const IPV6_MULTICAST_HOPS: c_int = 769; - pub const IPV6_MULTICAST_LOOP: c_int = 770; - } else { - pub const IPV6_MULTICAST_IF: c_int = 9; - pub const IPV6_MULTICAST_HOPS: c_int = 10; - pub const IPV6_MULTICAST_LOOP: c_int = 11; - } -} -pub const IPV6_V6ONLY: c_int = 27; -pub const IPV6_JOIN_GROUP: c_int = 12; -pub const IPV6_LEAVE_GROUP: c_int = 13; -pub const IPV6_ADD_MEMBERSHIP: c_int = 12; -pub const IPV6_DROP_MEMBERSHIP: c_int = 13; - -cfg_if! { - if #[cfg(target_os = "espidf")] { - pub const HOST_NOT_FOUND: c_int = 210; - pub const NO_DATA: c_int = 211; - pub const NO_RECOVERY: c_int = 212; - pub const TRY_AGAIN: c_int = 213; - } else { - pub const HOST_NOT_FOUND: c_int = 1; - pub const NO_DATA: c_int = 2; - pub const NO_RECOVERY: c_int = 3; - pub const TRY_AGAIN: c_int = 4; - } -} -pub const NO_ADDRESS: c_int = 2; - -pub const AI_PASSIVE: c_int = 1; -pub const AI_CANONNAME: c_int = 2; -pub const AI_NUMERICHOST: c_int = 4; -cfg_if! { - if #[cfg(target_os = "espidf")] { - pub const AI_NUMERICSERV: c_int = 8; - pub const AI_ADDRCONFIG: c_int = 64; - } else { - pub const AI_NUMERICSERV: c_int = 0; - pub const AI_ADDRCONFIG: c_int = 0; - } -} - -pub const NI_MAXHOST: c_int = 1025; -pub const NI_MAXSERV: c_int = 32; -pub const NI_NOFQDN: c_int = 1; -pub const NI_NUMERICHOST: c_int = 2; -pub const NI_NAMEREQD: c_int = 4; -cfg_if! { - if #[cfg(target_os = "espidf")] { - pub const NI_NUMERICSERV: c_int = 8; - pub const NI_DGRAM: c_int = 16; - } else { - pub const NI_NUMERICSERV: c_int = 0; - pub const NI_DGRAM: c_int = 0; - } -} - -cfg_if! { - // Defined in vita/mod.rs for "vita" - if #[cfg(target_os = "espidf")] { - pub const EAI_FAMILY: c_int = 204; - pub const EAI_MEMORY: c_int = 203; - pub const EAI_NONAME: c_int = 200; - pub const EAI_SOCKTYPE: c_int = 10; - } else if #[cfg(not(target_os = "vita"))] { - pub const EAI_FAMILY: c_int = -303; - pub const EAI_MEMORY: c_int = -304; - pub const EAI_NONAME: c_int = -305; - pub const EAI_SOCKTYPE: c_int = -307; - } -} - -pub const EXIT_SUCCESS: c_int = 0; -pub const EXIT_FAILURE: c_int = 1; - -pub const PRIO_PROCESS: c_int = 0; -pub const PRIO_PGRP: c_int = 1; -pub const PRIO_USER: c_int = 2; - -f! { - pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { - let bits = size_of_val(&(*set).fds_bits[0]) * 8; - let fd = fd as usize; - (*set).fds_bits[fd / bits] &= !(1 << (fd % bits)); - return; - } - - pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { - let bits = size_of_val(&(*set).fds_bits[0]) * 8; - let fd = fd as usize; - return ((*set).fds_bits[fd / bits] & (1 << (fd % bits))) != 0; - } - - pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { - let bits = size_of_val(&(*set).fds_bits[0]) * 8; - let fd = fd as usize; - (*set).fds_bits[fd / bits] |= 1 << (fd % bits); - return; - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - for slot in (*set).fds_bits.iter_mut() { - *slot = 0; - } - } -} - -extern "C" { - pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; - pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; - - #[cfg_attr(target_os = "linux", link_name = "__xpg_strerror_r")] - pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - - pub fn sem_destroy(sem: *mut sem_t) -> c_int; - pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; - - pub fn abs(i: c_int) -> c_int; - pub fn labs(i: c_long) -> c_long; - pub fn rand() -> c_int; - pub fn srand(seed: c_uint); - - #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] - #[cfg_attr(target_os = "espidf", link_name = "lwip_bind")] - pub fn bind(fd: c_int, addr: *const sockaddr, len: socklen_t) -> c_int; - pub fn clock_settime(clock_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; - pub fn clock_gettime(clock_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn clock_getres(clock_id: crate::clockid_t, res: *mut crate::timespec) -> c_int; - #[cfg_attr(target_os = "espidf", link_name = "lwip_close")] - pub fn closesocket(sockfd: c_int) -> c_int; - pub fn ioctl(fd: c_int, request: c_ulong, ...) -> c_int; - #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] - #[cfg_attr(target_os = "espidf", link_name = "lwip_recvfrom")] - pub fn recvfrom( - fd: c_int, - buf: *mut c_void, - n: usize, - flags: c_int, - addr: *mut sockaddr, - addr_len: *mut socklen_t, - ) -> isize; - #[cfg(not(all(target_arch = "powerpc", target_vendor = "nintendo")))] - pub fn getnameinfo( - sa: *const sockaddr, - salen: socklen_t, - host: *mut c_char, - hostlen: socklen_t, - serv: *mut c_char, - servlen: socklen_t, - flags: c_int, - ) -> c_int; - pub fn memalign(align: size_t, size: size_t) -> *mut c_void; - - // DIFF(main): changed to `*const *mut` in e77f551de9 - pub fn fexecve(fd: c_int, argv: *const *const c_char, envp: *const *const c_char) -> c_int; - - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; - pub fn getgrgid_r( - gid: crate::gid_t, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; - pub fn sem_close(sem: *mut sem_t) -> c_int; - pub fn getdtablesize() -> c_int; - pub fn getgrnam_r( - name: *const c_char, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; - pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; - pub fn getgrnam(name: *const c_char) -> *mut crate::group; - pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; - pub fn sem_unlink(name: *const c_char) -> c_int; - pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; - pub fn getpwnam_r( - name: *const c_char, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - pub fn getpwuid_r( - uid: crate::uid_t, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; - pub fn pthread_atfork( - prepare: Option, - parent: Option, - child: Option, - ) -> c_int; - pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; - pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; - pub fn uname(buf: *mut crate::utsname) -> c_int; -} - -mod generic; - -cfg_if! { - if #[cfg(target_os = "espidf")] { - mod espidf; - pub use self::espidf::*; - } else if #[cfg(target_os = "horizon")] { - mod horizon; - pub use self::horizon::*; - } else if #[cfg(target_os = "vita")] { - mod vita; - pub use self::vita::*; - } else if #[cfg(target_arch = "arm")] { - mod arm; - pub use self::arm::*; - } else if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else if #[cfg(target_arch = "powerpc")] { - mod powerpc; - pub use self::powerpc::*; - } else { - // Only tested on ARM so far. Other platforms might have different - // definitions for types and constants. - pub use target_arch_not_implemented; - } -} - -cfg_if! { - if #[cfg(target_os = "rtems")] { - mod rtems; - pub use self::rtems::*; - } -} diff --git a/vendor/libc/src/unix/newlib/powerpc/mod.rs b/vendor/libc/src/unix/newlib/powerpc/mod.rs deleted file mode 100644 index c4d4a2ed07c5ee..00000000000000 --- a/vendor/libc/src/unix/newlib/powerpc/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -use crate::prelude::*; - -pub type clock_t = c_ulong; -pub type wchar_t = c_int; - -pub use crate::unix::newlib::generic::{dirent, sigset_t, stat}; - -// the newlib shipped with devkitPPC does not support the following components: -// - sockaddr -// - AF_INET6 -// - FIONBIO -// - POLL* -// - SOL_SOCKET -// - MSG_* diff --git a/vendor/libc/src/unix/newlib/rtems/mod.rs b/vendor/libc/src/unix/newlib/rtems/mod.rs deleted file mode 100644 index 0e23352744149b..00000000000000 --- a/vendor/libc/src/unix/newlib/rtems/mod.rs +++ /dev/null @@ -1,146 +0,0 @@ -// defined in architecture specific module - -use crate::prelude::*; - -s! { - pub struct sockaddr_un { - pub sun_family: crate::sa_family_t, - pub sun_path: [c_char; 108usize], - } -} - -pub const AF_UNIX: c_int = 1; - -pub const RTLD_DEFAULT: *mut c_void = -2isize as *mut c_void; - -pub const UTIME_OMIT: c_long = -1; -pub const AT_FDCWD: c_int = -2; - -pub const O_DIRECTORY: c_int = 0x200000; -pub const O_NOFOLLOW: c_int = 0x100000; - -pub const AT_EACCESS: c_int = 1; -pub const AT_SYMLINK_NOFOLLOW: c_int = 2; -pub const AT_SYMLINK_FOLLOW: c_int = 4; -pub const AT_REMOVEDIR: c_int = 8; - -// signal.h -pub const SIG_BLOCK: c_int = 1; -pub const SIG_UNBLOCK: c_int = 2; -pub const SIG_SETMASK: c_int = 0; -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGTRAP: c_int = 5; -pub const SIGABRT: c_int = 6; -pub const SIGEMT: c_int = 7; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGBUS: c_int = 10; -pub const SIGSEGV: c_int = 11; -pub const SIGSYS: c_int = 12; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; -pub const SIGURG: c_int = 16; -pub const SIGSTOP: c_int = 17; -pub const SIGTSTP: c_int = 18; -pub const SIGCONT: c_int = 19; -pub const SIGCHLD: c_int = 20; -pub const SIGCLD: c_int = 20; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGIO: c_int = 23; -pub const SIGWINCH: c_int = 24; -pub const SIGUSR1: c_int = 25; -pub const SIGUSR2: c_int = 26; -pub const SIGRTMIN: c_int = 27; -pub const SIGRTMAX: c_int = 31; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; - -pub const SA_NOCLDSTOP: c_ulong = 0x00000001; -pub const SA_SIGINFO: c_ulong = 0x00000002; -pub const SA_ONSTACK: c_ulong = 0x00000004; - -pub const EAI_AGAIN: c_int = 2; -pub const EAI_BADFLAGS: c_int = 3; -pub const EAI_FAIL: c_int = 4; -pub const EAI_SERVICE: c_int = 9; -pub const EAI_SYSTEM: c_int = 11; -pub const EAI_OVERFLOW: c_int = 14; - -pub const _SC_PAGESIZE: c_int = 8; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 51; -pub const PTHREAD_STACK_MIN: size_t = 0; - -// sys/wait.h -pub const WNOHANG: c_int = 1; -pub const WUNTRACED: c_int = 2; - -// sys/socket.h -pub const SOMAXCONN: c_int = 128; - -safe_f! { - pub const fn WIFSTOPPED(status: c_int) -> bool { - (status & 0xff) == 0x7f - } - - pub const fn WSTOPSIG(status: c_int) -> c_int { - // (status >> 8) & 0xff - WEXITSTATUS(status) - } - - pub const fn WIFSIGNALED(status: c_int) -> bool { - ((status & 0x7f) > 0) && ((status & 0x7f) < 0x7f) - } - - pub const fn WTERMSIG(status: c_int) -> c_int { - status & 0x7f - } - - pub const fn WIFEXITED(status: c_int) -> bool { - (status & 0xff) == 0 - } - - pub const fn WEXITSTATUS(status: c_int) -> c_int { - (status >> 8) & 0xff - } - - // RTEMS doesn't have native WIFCONTINUED. - pub const fn WIFCONTINUED(_status: c_int) -> bool { - true - } - - // RTEMS doesn't have native WCOREDUMP. - pub const fn WCOREDUMP(_status: c_int) -> bool { - false - } -} - -extern "C" { - pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; - pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - - pub fn pthread_create( - native: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - f: extern "C" fn(_: *mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - - pub fn pthread_condattr_setclock( - attr: *mut crate::pthread_condattr_t, - clock_id: crate::clockid_t, - ) -> c_int; - - pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; - - pub fn arc4random_buf(buf: *mut core::ffi::c_void, nbytes: size_t); - - pub fn setgroups(ngroups: c_int, grouplist: *const crate::gid_t) -> c_int; -} diff --git a/vendor/libc/src/unix/newlib/vita/mod.rs b/vendor/libc/src/unix/newlib/vita/mod.rs deleted file mode 100644 index 62cd300e1d6f0f..00000000000000 --- a/vendor/libc/src/unix/newlib/vita/mod.rs +++ /dev/null @@ -1,235 +0,0 @@ -use crate::off_t; -use crate::prelude::*; - -pub type clock_t = c_long; - -pub type wchar_t = u32; - -pub type sigset_t = c_ulong; - -s! { - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: c_int, - pub msg_control: *mut c_void, - pub msg_controllen: crate::socklen_t, - pub msg_flags: c_int, - } - - pub struct sockaddr { - pub sa_len: u8, - pub sa_family: crate::sa_family_t, - pub sa_data: [c_char; 14], - } - - pub struct sockaddr_in6 { - pub sin6_len: u8, - pub sin6_family: crate::sa_family_t, - pub sin6_port: crate::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_vport: crate::in_port_t, - pub sin6_scope_id: u32, - } - - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: crate::sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_vport: crate::in_port_t, - pub sin_zero: [u8; 6], - } - - pub struct sockaddr_un { - pub ss_len: u8, - pub sun_family: crate::sa_family_t, - pub sun_path: [c_char; 108usize], - } - - pub struct sockaddr_storage { - pub ss_len: u8, - pub ss_family: crate::sa_family_t, - pub __ss_pad1: [u8; 2], - pub __ss_align: i64, - pub __ss_pad2: [u8; 116], - } - - pub struct sched_param { - pub sched_priority: c_int, - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: crate::mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_atime: crate::time_t, - pub st_mtime: crate::time_t, - pub st_ctime: crate::time_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_spare4: [c_long; 2usize], - } - - #[repr(align(8))] - pub struct dirent { - __offset: [u8; 88], - pub d_name: [c_char; 256usize], - __pad: [u8; 8], - } -} - -pub const AF_UNIX: c_int = 1; -pub const AF_INET6: c_int = 24; - -pub const SOCK_RAW: c_int = 3; -pub const SOCK_RDM: c_int = 4; -pub const SOCK_SEQPACKET: c_int = 5; - -pub const SOMAXCONN: c_int = 128; - -pub const FIONBIO: c_ulong = 1; - -pub const POLLIN: c_short = 0x0001; -pub const POLLPRI: c_short = POLLIN; -pub const POLLOUT: c_short = 0x0004; -pub const POLLRDNORM: c_short = POLLIN; -pub const POLLRDBAND: c_short = POLLIN; -pub const POLLWRNORM: c_short = POLLOUT; -pub const POLLWRBAND: c_short = POLLOUT; -pub const POLLERR: c_short = 0x0008; -pub const POLLHUP: c_short = 0x0010; -pub const POLLNVAL: c_short = 0x0020; - -pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); - -pub const SOL_SOCKET: c_int = 0xffff; -pub const SO_NONBLOCK: c_int = 0x1100; - -pub const MSG_OOB: c_int = 0x1; -pub const MSG_PEEK: c_int = 0x2; -pub const MSG_DONTROUTE: c_int = 0x4; -pub const MSG_EOR: c_int = 0x8; -pub const MSG_TRUNC: c_int = 0x10; -pub const MSG_CTRUNC: c_int = 0x20; -pub const MSG_WAITALL: c_int = 0x40; -pub const MSG_DONTWAIT: c_int = 0x80; -pub const MSG_BCAST: c_int = 0x100; -pub const MSG_MCAST: c_int = 0x200; - -pub const UTIME_OMIT: c_long = -1; -pub const AT_FDCWD: c_int = -2; - -pub const O_DIRECTORY: c_int = 0x200000; -pub const O_NOFOLLOW: c_int = 0x100000; - -pub const AT_EACCESS: c_int = 1; -pub const AT_SYMLINK_NOFOLLOW: c_int = 2; -pub const AT_SYMLINK_FOLLOW: c_int = 4; -pub const AT_REMOVEDIR: c_int = 8; - -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGTRAP: c_int = 5; -pub const SIGABRT: c_int = 6; -pub const SIGEMT: c_int = 7; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGBUS: c_int = 10; -pub const SIGSEGV: c_int = 11; -pub const SIGSYS: c_int = 12; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; - -pub const EAI_BADFLAGS: c_int = -1; -pub const EAI_NONAME: c_int = -2; -pub const EAI_AGAIN: c_int = -3; -pub const EAI_FAIL: c_int = -4; -pub const EAI_NODATA: c_int = -5; -pub const EAI_FAMILY: c_int = -6; -pub const EAI_SOCKTYPE: c_int = -7; -pub const EAI_SERVICE: c_int = -8; -pub const EAI_ADDRFAMILY: c_int = -9; -pub const EAI_MEMORY: c_int = -10; -pub const EAI_SYSTEM: c_int = -11; -pub const EAI_OVERFLOW: c_int = -12; - -pub const _SC_PAGESIZE: c_int = 8; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 51; -pub const PTHREAD_STACK_MIN: size_t = 32 * 1024; - -pub const IP_HDRINCL: c_int = 2; - -extern "C" { - pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; - pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - - pub fn sendmsg(s: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; - pub fn recvmsg(s: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; - - pub fn pthread_create( - native: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - f: extern "C" fn(_: *mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - - pub fn pthread_attr_getschedparam( - attr: *const crate::pthread_attr_t, - param: *mut sched_param, - ) -> c_int; - - pub fn pthread_attr_setschedparam( - attr: *mut crate::pthread_attr_t, - param: *const sched_param, - ) -> c_int; - - pub fn pthread_attr_getprocessorid_np( - attr: *const crate::pthread_attr_t, - processor_id: *mut c_int, - ) -> c_int; - - pub fn pthread_attr_setprocessorid_np( - attr: *mut crate::pthread_attr_t, - processor_id: c_int, - ) -> c_int; - - pub fn pthread_getschedparam( - native: crate::pthread_t, - policy: *mut c_int, - param: *mut crate::sched_param, - ) -> c_int; - - pub fn pthread_setschedparam( - native: crate::pthread_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - - pub fn pthread_condattr_getclock( - attr: *const crate::pthread_condattr_t, - clock_id: *mut crate::clockid_t, - ) -> c_int; - - pub fn pthread_condattr_setclock( - attr: *mut crate::pthread_condattr_t, - clock_id: crate::clockid_t, - ) -> c_int; - - pub fn pthread_getprocessorid_np() -> c_int; - - pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; - - pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; -} diff --git a/vendor/libc/src/unix/nto/aarch64.rs b/vendor/libc/src/unix/nto/aarch64.rs deleted file mode 100644 index 559ab6e49a45dc..00000000000000 --- a/vendor/libc/src/unix/nto/aarch64.rs +++ /dev/null @@ -1,35 +0,0 @@ -use crate::prelude::*; - -pub type wchar_t = u32; -pub type time_t = i64; - -s! { - pub struct aarch64_qreg_t { - pub qlo: u64, - pub qhi: u64, - } - - pub struct aarch64_fpu_registers { - pub reg: [crate::aarch64_qreg_t; 32], - pub fpsr: u32, - pub fpcr: u32, - } - - pub struct aarch64_cpu_registers { - pub gpr: [u64; 32], - pub elr: u64, - pub pstate: u64, - } - - #[repr(align(16))] - pub struct mcontext_t { - pub cpu: crate::aarch64_cpu_registers, - pub fpu: crate::aarch64_fpu_registers, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } -} diff --git a/vendor/libc/src/unix/nto/mod.rs b/vendor/libc/src/unix/nto/mod.rs deleted file mode 100644 index 75f5b56902f7f2..00000000000000 --- a/vendor/libc/src/unix/nto/mod.rs +++ /dev/null @@ -1,3406 +0,0 @@ -use crate::prelude::*; - -pub type clock_t = u32; - -pub type sa_family_t = u8; -pub type speed_t = c_uint; -pub type tcflag_t = c_uint; -pub type clockid_t = c_int; -pub type timer_t = c_int; -pub type key_t = c_uint; -pub type id_t = c_int; - -pub type useconds_t = u32; -pub type dev_t = u32; -pub type socklen_t = u32; -pub type mode_t = u32; -pub type rlim64_t = u64; -pub type mqd_t = c_int; -pub type nfds_t = c_uint; -pub type idtype_t = c_uint; -pub type errno_t = c_int; -pub type rsize_t = c_ulong; - -pub type Elf32_Half = u16; -pub type Elf32_Word = u32; -pub type Elf32_Off = u32; -pub type Elf32_Addr = u32; -pub type Elf32_Lword = u64; -pub type Elf32_Sword = i32; - -pub type Elf64_Half = u16; -pub type Elf64_Word = u32; -pub type Elf64_Off = u64; -pub type Elf64_Addr = u64; -pub type Elf64_Xword = u64; -pub type Elf64_Sxword = i64; -pub type Elf64_Lword = u64; -pub type Elf64_Sword = i32; - -pub type Elf32_Section = u16; -pub type Elf64_Section = u16; - -pub type _Time32t = u32; - -pub type pthread_t = c_int; -pub type regoff_t = ssize_t; - -pub type nlink_t = u32; -pub type blksize_t = u32; -pub type suseconds_t = i32; - -pub type ino_t = u64; -pub type off_t = i64; -pub type blkcnt_t = u64; -pub type msgqnum_t = u64; -pub type msglen_t = u64; -pub type fsblkcnt_t = u64; -pub type fsfilcnt_t = u64; -pub type rlim_t = u64; -pub type posix_spawn_file_actions_t = *mut c_void; -pub type posix_spawnattr_t = crate::uintptr_t; - -pub type pthread_mutex_t = crate::sync_t; -pub type pthread_mutexattr_t = crate::_sync_attr; -pub type pthread_cond_t = crate::sync_t; -pub type pthread_condattr_t = crate::_sync_attr; -pub type pthread_rwlockattr_t = crate::_sync_attr; -pub type pthread_key_t = c_int; -pub type pthread_spinlock_t = sync_t; -pub type pthread_barrierattr_t = _sync_attr; -pub type sem_t = sync_t; - -pub type nl_item = c_int; - -#[derive(Debug)] -pub enum timezone {} -impl Copy for timezone {} -impl Clone for timezone { - fn clone(&self) -> timezone { - *self - } -} - -s! { - pub struct dirent_extra { - pub d_datalen: u16, - pub d_type: u16, - pub d_reserved: u32, - } - - pub struct stat { - pub st_ino: crate::ino_t, - pub st_size: off_t, - pub st_dev: crate::dev_t, - pub st_rdev: crate::dev_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub __old_st_mtime: crate::_Time32t, - pub __old_st_atime: crate::_Time32t, - pub __old_st_ctime: crate::_Time32t, - pub st_mode: mode_t, - pub st_nlink: crate::nlink_t, - pub st_blocksize: crate::blksize_t, - pub st_nblocks: i32, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_mtim: crate::timespec, - pub st_atim: crate::timespec, - pub st_ctim: crate::timespec, - } - - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - #[cfg_attr(any(target_env = "nto71", target_env = "nto70"), repr(packed))] - pub struct in_addr { - pub s_addr: crate::in_addr_t, - } - - pub struct sockaddr { - pub sa_len: u8, - pub sa_family: sa_family_t, - pub sa_data: [c_char; 14], - } - - #[cfg(not(target_env = "nto71_iosock"))] - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [i8; 8], - } - - #[cfg(target_env = "nto71_iosock")] - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [c_char; 8], - } - - pub struct sockaddr_in6 { - pub sin6_len: u8, - pub sin6_family: sa_family_t, - pub sin6_port: crate::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: u32, - } - - // The order of the `ai_addr` field in this struct is crucial - // for converting between the Rust and C types. - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - pub ai_addrlen: socklen_t, - pub ai_canonname: *mut c_char, - pub ai_addr: *mut crate::sockaddr, - pub ai_next: *mut addrinfo, - } - - pub struct fd_set { - fds_bits: [c_uint; 2 * FD_SETSIZE as usize / ULONG_SIZE], - } - - pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - pub tm_gmtoff: c_long, - pub tm_zone: *const c_char, - } - - #[repr(align(8))] - pub struct sched_param { - pub sched_priority: c_int, - pub sched_curpriority: c_int, - pub reserved: [c_int; 10], - } - - #[repr(align(8))] - pub struct __sched_param { - pub __sched_priority: c_int, - pub __sched_curpriority: c_int, - pub reserved: [c_int; 10], - } - - pub struct Dl_info { - pub dli_fname: *const c_char, - pub dli_fbase: *mut c_void, - pub dli_sname: *const c_char, - pub dli_saddr: *mut c_void, - } - - pub struct lconv { - pub currency_symbol: *mut c_char, - pub int_curr_symbol: *mut c_char, - pub mon_decimal_point: *mut c_char, - pub mon_grouping: *mut c_char, - pub mon_thousands_sep: *mut c_char, - pub negative_sign: *mut c_char, - pub positive_sign: *mut c_char, - pub frac_digits: c_char, - pub int_frac_digits: c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub n_sign_posn: c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub p_sign_posn: c_char, - - pub int_n_cs_precedes: c_char, - pub int_n_sep_by_space: c_char, - pub int_n_sign_posn: c_char, - pub int_p_cs_precedes: c_char, - pub int_p_sep_by_space: c_char, - pub int_p_sign_posn: c_char, - - pub decimal_point: *mut c_char, - pub grouping: *mut c_char, - pub thousands_sep: *mut c_char, - - pub _Frac_grouping: *mut c_char, - pub _Frac_sep: *mut c_char, - pub _False: *mut c_char, - pub _True: *mut c_char, - - pub _No: *mut c_char, - pub _Yes: *mut c_char, - pub _Nostr: *mut c_char, - pub _Yesstr: *mut c_char, - pub _Reserved: [*mut c_char; 8], - } - - // Does not exist in io-sock - #[cfg(not(target_env = "nto71_iosock"))] - pub struct in_pktinfo { - pub ipi_addr: crate::in_addr, - pub ipi_ifindex: c_uint, - } - - pub struct ifaddrs { - pub ifa_next: *mut ifaddrs, - pub ifa_name: *mut c_char, - pub ifa_flags: c_uint, - pub ifa_addr: *mut crate::sockaddr, - pub ifa_netmask: *mut crate::sockaddr, - pub ifa_dstaddr: *mut crate::sockaddr, - pub ifa_data: *mut c_void, - } - - pub struct arpreq { - pub arp_pa: crate::sockaddr, - pub arp_ha: crate::sockaddr, - pub arp_flags: c_int, - } - - #[cfg_attr(any(target_env = "nto71", target_env = "nto70"), repr(packed))] - pub struct arphdr { - pub ar_hrd: u16, - pub ar_pro: u16, - pub ar_hln: u8, - pub ar_pln: u8, - pub ar_op: u16, - } - - #[cfg(not(target_env = "nto71_iosock"))] - pub struct mmsghdr { - pub msg_hdr: crate::msghdr, - pub msg_len: c_uint, - } - - #[cfg(target_env = "nto71_iosock")] - pub struct mmsghdr { - pub msg_hdr: crate::msghdr, - pub msg_len: ssize_t, - } - - #[repr(align(8))] - pub struct siginfo_t { - pub si_signo: c_int, - pub si_code: c_int, - pub si_errno: c_int, - __data: [u8; 36], // union - } - - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_flags: c_int, - pub sa_mask: crate::sigset_t, - } - - pub struct _sync { - _union: c_uint, - __owner: c_uint, - } - pub struct rlimit64 { - pub rlim_cur: rlim64_t, - pub rlim_max: rlim64_t, - } - - pub struct glob_t { - pub gl_pathc: size_t, - pub gl_matchc: c_int, - pub gl_pathv: *mut *mut c_char, - pub gl_offs: size_t, - pub gl_flags: c_int, - pub gl_errfunc: extern "C" fn(*const c_char, c_int) -> c_int, - - __unused1: *mut c_void, - __unused2: *mut c_void, - __unused3: *mut c_void, - __unused4: *mut c_void, - __unused5: *mut c_void, - } - - pub struct passwd { - pub pw_name: *mut c_char, - pub pw_passwd: *mut c_char, - pub pw_uid: crate::uid_t, - pub pw_gid: crate::gid_t, - pub pw_age: *mut c_char, - pub pw_comment: *mut c_char, - pub pw_gecos: *mut c_char, - pub pw_dir: *mut c_char, - pub pw_shell: *mut c_char, - } - - pub struct if_nameindex { - pub if_index: c_uint, - pub if_name: *mut c_char, - } - - pub struct sembuf { - pub sem_num: c_ushort, - pub sem_op: c_short, - pub sem_flg: c_short, - } - - pub struct Elf32_Ehdr { - pub e_ident: [c_uchar; 16], - pub e_type: Elf32_Half, - pub e_machine: Elf32_Half, - pub e_version: Elf32_Word, - pub e_entry: Elf32_Addr, - pub e_phoff: Elf32_Off, - pub e_shoff: Elf32_Off, - pub e_flags: Elf32_Word, - pub e_ehsize: Elf32_Half, - pub e_phentsize: Elf32_Half, - pub e_phnum: Elf32_Half, - pub e_shentsize: Elf32_Half, - pub e_shnum: Elf32_Half, - pub e_shstrndx: Elf32_Half, - } - - pub struct Elf64_Ehdr { - pub e_ident: [c_uchar; 16], - pub e_type: Elf64_Half, - pub e_machine: Elf64_Half, - pub e_version: Elf64_Word, - pub e_entry: Elf64_Addr, - pub e_phoff: Elf64_Off, - pub e_shoff: Elf64_Off, - pub e_flags: Elf64_Word, - pub e_ehsize: Elf64_Half, - pub e_phentsize: Elf64_Half, - pub e_phnum: Elf64_Half, - pub e_shentsize: Elf64_Half, - pub e_shnum: Elf64_Half, - pub e_shstrndx: Elf64_Half, - } - - pub struct Elf32_Sym { - pub st_name: Elf32_Word, - pub st_value: Elf32_Addr, - pub st_size: Elf32_Word, - pub st_info: c_uchar, - pub st_other: c_uchar, - pub st_shndx: Elf32_Section, - } - - pub struct Elf64_Sym { - pub st_name: Elf64_Word, - pub st_info: c_uchar, - pub st_other: c_uchar, - pub st_shndx: Elf64_Section, - pub st_value: Elf64_Addr, - pub st_size: Elf64_Xword, - } - - pub struct Elf32_Phdr { - pub p_type: Elf32_Word, - pub p_offset: Elf32_Off, - pub p_vaddr: Elf32_Addr, - pub p_paddr: Elf32_Addr, - pub p_filesz: Elf32_Word, - pub p_memsz: Elf32_Word, - pub p_flags: Elf32_Word, - pub p_align: Elf32_Word, - } - - pub struct Elf64_Phdr { - pub p_type: Elf64_Word, - pub p_flags: Elf64_Word, - pub p_offset: Elf64_Off, - pub p_vaddr: Elf64_Addr, - pub p_paddr: Elf64_Addr, - pub p_filesz: Elf64_Xword, - pub p_memsz: Elf64_Xword, - pub p_align: Elf64_Xword, - } - - pub struct Elf32_Shdr { - pub sh_name: Elf32_Word, - pub sh_type: Elf32_Word, - pub sh_flags: Elf32_Word, - pub sh_addr: Elf32_Addr, - pub sh_offset: Elf32_Off, - pub sh_size: Elf32_Word, - pub sh_link: Elf32_Word, - pub sh_info: Elf32_Word, - pub sh_addralign: Elf32_Word, - pub sh_entsize: Elf32_Word, - } - - pub struct Elf64_Shdr { - pub sh_name: Elf64_Word, - pub sh_type: Elf64_Word, - pub sh_flags: Elf64_Xword, - pub sh_addr: Elf64_Addr, - pub sh_offset: Elf64_Off, - pub sh_size: Elf64_Xword, - pub sh_link: Elf64_Word, - pub sh_info: Elf64_Word, - pub sh_addralign: Elf64_Xword, - pub sh_entsize: Elf64_Xword, - } - - pub struct in6_pktinfo { - pub ipi6_addr: crate::in6_addr, - pub ipi6_ifindex: c_uint, - } - - pub struct inotify_event { - pub wd: c_int, - pub mask: u32, - pub cookie: u32, - pub len: u32, - } - - pub struct regmatch_t { - pub rm_so: regoff_t, - pub rm_eo: regoff_t, - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: c_int, - pub msg_control: *mut c_void, - pub msg_controllen: crate::socklen_t, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - pub cmsg_len: crate::socklen_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_cc: [crate::cc_t; crate::NCCS], - __reserved: [c_uint; 3], - pub c_ispeed: crate::speed_t, - pub c_ospeed: crate::speed_t, - } - - pub struct mallinfo { - pub arena: c_int, - pub ordblks: c_int, - pub smblks: c_int, - pub hblks: c_int, - pub hblkhd: c_int, - pub usmblks: c_int, - pub fsmblks: c_int, - pub uordblks: c_int, - pub fordblks: c_int, - pub keepcost: c_int, - } - - pub struct flock { - pub l_type: i16, - pub l_whence: i16, - pub l_zero1: i32, - pub l_start: off_t, - pub l_len: off_t, - pub l_pid: crate::pid_t, - pub l_sysid: u32, - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_basetype: [c_char; 16], - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - f_filler: [c_uint; 21], - } - - pub struct aiocb { - pub aio_fildes: c_int, - pub aio_reqprio: c_int, - pub aio_offset: off_t, - pub aio_buf: *mut c_void, - pub aio_nbytes: size_t, - pub aio_sigevent: crate::sigevent, - pub aio_lio_opcode: c_int, - pub _aio_lio_state: *mut c_void, - _aio_pad: [c_int; 3], - pub _aio_next: *mut crate::aiocb, - pub _aio_flag: c_uint, - pub _aio_iotype: c_uint, - pub _aio_result: ssize_t, - pub _aio_error: c_uint, - pub _aio_suspend: *mut c_void, - pub _aio_plist: *mut c_void, - pub _aio_policy: c_int, - pub _aio_param: crate::__sched_param, - } - - pub struct pthread_attr_t { - __data1: c_long, - __data2: [u8; 96], - } - - pub struct ipc_perm { - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: mode_t, - pub seq: c_uint, - pub key: crate::key_t, - _reserved: [c_int; 4], - } - - pub struct regex_t { - re_magic: c_int, - re_nsub: size_t, - re_endp: *const c_char, - re_g: *mut c_void, - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct _thread_attr { - pub __flags: c_int, - pub __stacksize: size_t, - pub __stackaddr: *mut c_void, - pub __exitfunc: Option, - pub __policy: c_int, - pub __param: crate::__sched_param, - pub __guardsize: c_uint, - pub __prealloc: c_uint, - __spare: [c_int; 2], - } - - pub struct _sync_attr { - pub __protocol: c_int, - pub __flags: c_int, - pub __prioceiling: c_int, - pub __clockid: c_int, - pub __count: c_int, - __reserved: [c_int; 3], - } - - pub struct sockcred { - pub sc_uid: crate::uid_t, - pub sc_euid: crate::uid_t, - pub sc_gid: crate::gid_t, - pub sc_egid: crate::gid_t, - pub sc_ngroups: c_int, - pub sc_groups: [crate::gid_t; 1], - } - - pub struct bpf_program { - pub bf_len: c_uint, - pub bf_insns: *mut crate::bpf_insn, - } - - #[cfg(not(target_env = "nto71_iosock"))] - pub struct bpf_stat { - pub bs_recv: u64, - pub bs_drop: u64, - pub bs_capt: u64, - bs_padding: [u64; 13], - } - - #[cfg(target_env = "nto71_iosock")] - pub struct bpf_stat { - pub bs_recv: c_uint, - pub bs_drop: c_uint, - } - - pub struct bpf_version { - pub bv_major: c_ushort, - pub bv_minor: c_ushort, - } - - pub struct bpf_hdr { - pub bh_tstamp: crate::timeval, - pub bh_caplen: u32, - pub bh_datalen: u32, - pub bh_hdrlen: u16, - } - - pub struct bpf_insn { - pub code: u16, - pub jt: c_uchar, - pub jf: c_uchar, - pub k: u32, - } - - pub struct bpf_dltlist { - pub bfl_len: c_uint, - pub bfl_list: *mut c_uint, - } - - // Does not exist in io-sock - #[cfg(not(target_env = "nto71_iosock"))] - pub struct unpcbid { - pub unp_pid: crate::pid_t, - pub unp_euid: crate::uid_t, - pub unp_egid: crate::gid_t, - } - - pub struct dl_phdr_info { - pub dlpi_addr: crate::Elf64_Addr, - pub dlpi_name: *const c_char, - pub dlpi_phdr: *const crate::Elf64_Phdr, - pub dlpi_phnum: crate::Elf64_Half, - } - - #[repr(align(8))] - pub struct ucontext_t { - pub uc_link: *mut ucontext_t, - pub uc_sigmask: crate::sigset_t, - pub uc_stack: stack_t, - pub uc_mcontext: mcontext_t, - } -} - -s_no_extra_traits! { - pub struct sockaddr_un { - pub sun_len: u8, - pub sun_family: sa_family_t, - pub sun_path: [c_char; 104], - } - - pub struct sockaddr_storage { - pub ss_len: u8, - pub ss_family: sa_family_t, - __ss_pad1: [c_char; 6], - __ss_align: i64, - __ss_pad2: [c_char; 112], - } - - pub struct utsname { - pub sysname: [c_char; _SYSNAME_SIZE], - pub nodename: [c_char; _SYSNAME_SIZE], - pub release: [c_char; _SYSNAME_SIZE], - pub version: [c_char; _SYSNAME_SIZE], - pub machine: [c_char; _SYSNAME_SIZE], - } - - pub struct sigevent { - pub sigev_notify: c_int, - pub __padding1: c_int, - pub sigev_signo: c_int, // union - pub __padding2: c_int, - pub sigev_value: crate::sigval, - __sigev_un2: usize, // union - } - pub struct dirent { - pub d_ino: crate::ino_t, - pub d_offset: off_t, - pub d_reclen: c_short, - pub d_namelen: c_short, - pub d_name: [c_char; 1], // flex array - } - - pub struct sigset_t { - __val: [u32; 2], - } - - pub struct mq_attr { - pub mq_maxmsg: c_long, - pub mq_msgsize: c_long, - pub mq_flags: c_long, - pub mq_curmsgs: c_long, - pub mq_sendwait: c_long, - pub mq_recvwait: c_long, - } - - pub struct msg { - pub msg_next: *mut crate::msg, - pub msg_type: c_long, - pub msg_ts: c_ushort, - pub msg_spot: c_short, - _pad: [u8; 4], - } - - pub struct msqid_ds { - pub msg_perm: crate::ipc_perm, - pub msg_first: *mut crate::msg, - pub msg_last: *mut crate::msg, - pub msg_cbytes: crate::msglen_t, - pub msg_qnum: crate::msgqnum_t, - pub msg_qbytes: crate::msglen_t, - pub msg_lspid: crate::pid_t, - pub msg_lrpid: crate::pid_t, - pub msg_stime: crate::time_t, - msg_pad1: c_long, - pub msg_rtime: crate::time_t, - msg_pad2: c_long, - pub msg_ctime: crate::time_t, - msg_pad3: c_long, - msg_pad4: [c_long; 4], - } - - #[cfg(not(target_env = "nto71_iosock"))] - pub struct sockaddr_dl { - pub sdl_len: c_uchar, - pub sdl_family: crate::sa_family_t, - pub sdl_index: u16, - pub sdl_type: c_uchar, - pub sdl_nlen: c_uchar, - pub sdl_alen: c_uchar, - pub sdl_slen: c_uchar, - pub sdl_data: [c_char; 12], - } - - #[cfg(target_env = "nto71_iosock")] - pub struct sockaddr_dl { - pub sdl_len: c_uchar, - pub sdl_family: c_uchar, - pub sdl_index: c_ushort, - pub sdl_type: c_uchar, - pub sdl_nlen: c_uchar, - pub sdl_alen: c_uchar, - pub sdl_slen: c_uchar, - pub sdl_data: [c_char; 46], - } - - pub struct sync_t { - __u: c_uint, // union - pub __owner: c_uint, - } - - #[repr(align(4))] - pub struct pthread_barrier_t { - // union - __pad: [u8; 28], // union - } - - pub struct pthread_rwlock_t { - pub __active: c_int, - pub __blockedwriters: c_int, - pub __blockedreaders: c_int, - pub __heavy: c_int, - pub __lock: crate::pthread_mutex_t, // union - pub __rcond: crate::pthread_cond_t, // union - pub __wcond: crate::pthread_cond_t, // union - pub __owner: c_uint, - pub __spare: c_uint, - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - // sigevent - impl PartialEq for sigevent { - fn eq(&self, other: &sigevent) -> bool { - self.sigev_notify == other.sigev_notify - && self.sigev_signo == other.sigev_signo - && self.sigev_value == other.sigev_value - && self.__sigev_un2 == other.__sigev_un2 - } - } - impl Eq for sigevent {} - impl hash::Hash for sigevent { - fn hash(&self, state: &mut H) { - self.sigev_notify.hash(state); - self.sigev_signo.hash(state); - self.sigev_value.hash(state); - self.__sigev_un2.hash(state); - } - } - - impl PartialEq for sockaddr_un { - fn eq(&self, other: &sockaddr_un) -> bool { - self.sun_len == other.sun_len - && self.sun_family == other.sun_family - && self - .sun_path - .iter() - .zip(other.sun_path.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for sockaddr_un {} - - impl hash::Hash for sockaddr_un { - fn hash(&self, state: &mut H) { - self.sun_len.hash(state); - self.sun_family.hash(state); - self.sun_path.hash(state); - } - } - - // sigset_t - impl PartialEq for sigset_t { - fn eq(&self, other: &sigset_t) -> bool { - self.__val == other.__val - } - } - impl Eq for sigset_t {} - impl hash::Hash for sigset_t { - fn hash(&self, state: &mut H) { - self.__val.hash(state); - } - } - - // msg - - // msqid_ds - - // sockaddr_dl - impl PartialEq for sockaddr_dl { - fn eq(&self, other: &sockaddr_dl) -> bool { - self.sdl_len == other.sdl_len - && self.sdl_family == other.sdl_family - && self.sdl_index == other.sdl_index - && self.sdl_type == other.sdl_type - && self.sdl_nlen == other.sdl_nlen - && self.sdl_alen == other.sdl_alen - && self.sdl_slen == other.sdl_slen - && self - .sdl_data - .iter() - .zip(other.sdl_data.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for sockaddr_dl {} - impl hash::Hash for sockaddr_dl { - fn hash(&self, state: &mut H) { - self.sdl_len.hash(state); - self.sdl_family.hash(state); - self.sdl_index.hash(state); - self.sdl_type.hash(state); - self.sdl_nlen.hash(state); - self.sdl_alen.hash(state); - self.sdl_slen.hash(state); - self.sdl_data.hash(state); - } - } - - impl PartialEq for utsname { - fn eq(&self, other: &utsname) -> bool { - self.sysname - .iter() - .zip(other.sysname.iter()) - .all(|(a, b)| a == b) - && self - .nodename - .iter() - .zip(other.nodename.iter()) - .all(|(a, b)| a == b) - && self - .release - .iter() - .zip(other.release.iter()) - .all(|(a, b)| a == b) - && self - .version - .iter() - .zip(other.version.iter()) - .all(|(a, b)| a == b) - && self - .machine - .iter() - .zip(other.machine.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for utsname {} - - impl hash::Hash for utsname { - fn hash(&self, state: &mut H) { - self.sysname.hash(state); - self.nodename.hash(state); - self.release.hash(state); - self.version.hash(state); - self.machine.hash(state); - } - } - - impl PartialEq for mq_attr { - fn eq(&self, other: &mq_attr) -> bool { - self.mq_maxmsg == other.mq_maxmsg - && self.mq_msgsize == other.mq_msgsize - && self.mq_flags == other.mq_flags - && self.mq_curmsgs == other.mq_curmsgs - && self.mq_msgsize == other.mq_msgsize - && self.mq_sendwait == other.mq_sendwait - && self.mq_recvwait == other.mq_recvwait - } - } - - impl Eq for mq_attr {} - - impl hash::Hash for mq_attr { - fn hash(&self, state: &mut H) { - self.mq_maxmsg.hash(state); - self.mq_msgsize.hash(state); - self.mq_flags.hash(state); - self.mq_curmsgs.hash(state); - self.mq_sendwait.hash(state); - self.mq_recvwait.hash(state); - } - } - - impl PartialEq for sockaddr_storage { - fn eq(&self, other: &sockaddr_storage) -> bool { - self.ss_len == other.ss_len - && self.ss_family == other.ss_family - && self.__ss_pad1 == other.__ss_pad1 - && self.__ss_align == other.__ss_align - && self - .__ss_pad2 - .iter() - .zip(other.__ss_pad2.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for sockaddr_storage {} - - impl hash::Hash for sockaddr_storage { - fn hash(&self, state: &mut H) { - self.ss_len.hash(state); - self.ss_family.hash(state); - self.__ss_pad1.hash(state); - self.__ss_align.hash(state); - self.__ss_pad2.hash(state); - } - } - - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_ino == other.d_ino - && self.d_offset == other.d_offset - && self.d_reclen == other.d_reclen - && self.d_namelen == other.d_namelen - && self.d_name[..self.d_namelen as _] - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for dirent {} - - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_ino.hash(state); - self.d_offset.hash(state); - self.d_reclen.hash(state); - self.d_namelen.hash(state); - self.d_name[..self.d_namelen as _].hash(state); - } - } - } -} - -pub const _SYSNAME_SIZE: usize = 256 + 1; -pub const RLIM_INFINITY: crate::rlim_t = 0xfffffffffffffffd; -pub const O_LARGEFILE: c_int = 0o0100000; - -// intentionally not public, only used for fd_set -cfg_if! { - if #[cfg(target_pointer_width = "32")] { - const ULONG_SIZE: usize = 32; - } else if #[cfg(target_pointer_width = "64")] { - const ULONG_SIZE: usize = 64; - } else { - // Unknown target_pointer_width - } -} - -pub const EXIT_FAILURE: c_int = 1; -pub const EXIT_SUCCESS: c_int = 0; -pub const RAND_MAX: c_int = 32767; -pub const EOF: c_int = -1; -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; -pub const _IOFBF: c_int = 0; -pub const _IONBF: c_int = 2; -pub const _IOLBF: c_int = 1; - -pub const F_DUPFD: c_int = 0; -pub const F_GETFD: c_int = 1; -pub const F_SETFD: c_int = 2; -pub const F_GETFL: c_int = 3; -pub const F_SETFL: c_int = 4; - -pub const F_DUPFD_CLOEXEC: c_int = 5; - -pub const SIGTRAP: c_int = 5; - -pub const CLOCK_REALTIME: crate::clockid_t = 0; -pub const CLOCK_MONOTONIC: crate::clockid_t = 2; -pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 3; -pub const CLOCK_THREAD_CPUTIME_ID: crate::clockid_t = 4; -pub const TIMER_ABSTIME: c_uint = 0x80000000; - -pub const RUSAGE_SELF: c_int = 0; - -pub const F_OK: c_int = 0; -pub const X_OK: c_int = 1; -pub const W_OK: c_int = 2; -pub const R_OK: c_int = 4; - -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; - -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGABRT: c_int = 6; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGSEGV: c_int = 11; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; - -pub const PROT_NONE: c_int = 0x00000000; -pub const PROT_READ: c_int = 0x00000100; -pub const PROT_WRITE: c_int = 0x00000200; -pub const PROT_EXEC: c_int = 0x00000400; - -pub const MAP_FILE: c_int = 0; -pub const MAP_SHARED: c_int = 1; -pub const MAP_PRIVATE: c_int = 2; -pub const MAP_FIXED: c_int = 0x10; - -pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; - -pub const MS_ASYNC: c_int = 1; -pub const MS_INVALIDATE: c_int = 4; -pub const MS_SYNC: c_int = 2; - -pub const SCM_RIGHTS: c_int = 0x01; -pub const SCM_TIMESTAMP: c_int = 0x02; -cfg_if! { - if #[cfg(not(target_env = "nto71_iosock"))] { - pub const SCM_CREDS: c_int = 0x04; - pub const IFF_NOTRAILERS: c_int = 0x00000020; - pub const AF_INET6: c_int = 24; - pub const AF_BLUETOOTH: c_int = 31; - pub const pseudo_AF_KEY: c_int = 29; - pub const MSG_NOSIGNAL: c_int = 0x0800; - pub const MSG_WAITFORONE: c_int = 0x2000; - pub const IP_IPSEC_POLICY_COMPAT: c_int = 22; - pub const IP_PKTINFO: c_int = 25; - pub const IPPROTO_DIVERT: c_int = 259; - pub const IPV6_IPSEC_POLICY_COMPAT: c_int = 28; - pub const TCP_KEEPALIVE: c_int = 0x04; - pub const ARPHRD_ARCNET: u16 = 7; - pub const SO_BINDTODEVICE: c_int = 0x0800; - pub const EAI_NODATA: c_int = 7; - pub const IPTOS_ECN_NOT_ECT: u8 = 0x00; - pub const RTF_BROADCAST: u32 = 0x80000; - pub const UDP_ENCAP: c_int = 100; - pub const HW_IOSTATS: c_int = 9; - pub const HW_MACHINE_ARCH: c_int = 10; - pub const HW_ALIGNBYTES: c_int = 11; - pub const HW_CNMAGIC: c_int = 12; - pub const HW_PHYSMEM64: c_int = 13; - pub const HW_USERMEM64: c_int = 14; - pub const HW_IOSTATNAMES: c_int = 15; - pub const HW_MAXID: c_int = 15; - pub const CTL_UNSPEC: c_int = 0; - pub const CTL_QNX: c_int = 9; - pub const CTL_PROC: c_int = 10; - pub const CTL_VENDOR: c_int = 11; - pub const CTL_EMUL: c_int = 12; - pub const CTL_SECURITY: c_int = 13; - pub const CTL_MAXID: c_int = 14; - pub const AF_ARP: c_int = 28; - pub const AF_IEEE80211: c_int = 32; - pub const AF_NATM: c_int = 27; - pub const AF_NS: c_int = 6; - pub const BIOCGDLTLIST: c_int = -1072676233; - pub const BIOCGETIF: c_int = 1083196011; - pub const BIOCGSEESENT: c_int = 1074020984; - pub const BIOCGSTATS: c_int = 1082147439; - pub const BIOCSDLT: c_int = -2147204490; - pub const BIOCSETIF: c_int = -2138029460; - pub const BIOCSSEESENT: c_int = -2147204487; - pub const FIONSPACE: c_int = 1074030200; - pub const FIONWRITE: c_int = 1074030201; - pub const IFF_ACCEPTRTADV: c_int = 0x40000000; - pub const IFF_IP6FORWARDING: c_int = 0x20000000; - pub const IFF_SHIM: c_int = 0x80000000; - pub const KERN_ARND: c_int = 81; - pub const KERN_IOV_MAX: c_int = 38; - pub const KERN_LOGSIGEXIT: c_int = 46; - pub const KERN_MAXID: c_int = 83; - pub const KERN_PROC_ARGS: c_int = 48; - pub const KERN_PROC_ENV: c_int = 3; - pub const KERN_PROC_GID: c_int = 7; - pub const KERN_PROC_RGID: c_int = 8; - pub const LOCAL_CONNWAIT: c_int = 0x0002; - pub const LOCAL_CREDS: c_int = 0x0001; - pub const LOCAL_PEEREID: c_int = 0x0003; - pub const MSG_NOTIFICATION: c_int = 0x0400; - pub const NET_RT_IFLIST: c_int = 4; - pub const NI_NUMERICSCOPE: c_int = 0x00000040; - pub const PF_ARP: c_int = 28; - pub const PF_NATM: c_int = 27; - pub const pseudo_AF_HDRCMPLT: c_int = 30; - pub const SIOCGIFADDR: c_int = -1064277727; - pub const SO_FIB: c_int = 0x100a; - pub const SO_TXPRIO: c_int = 0x100b; - pub const SO_SETFIB: c_int = 0x100a; - pub const SO_VLANPRIO: c_int = 0x100c; - pub const USER_ATEXIT_MAX: c_int = 21; - pub const USER_MAXID: c_int = 22; - pub const SO_OVERFLOWED: c_int = 0x1009; - } else { - pub const SCM_CREDS: c_int = 0x03; - pub const AF_INET6: c_int = 28; - pub const AF_BLUETOOTH: c_int = 36; - pub const pseudo_AF_KEY: c_int = 27; - pub const MSG_NOSIGNAL: c_int = 0x20000; - pub const MSG_WAITFORONE: c_int = 0x00080000; - pub const IPPROTO_DIVERT: c_int = 258; - pub const RTF_BROADCAST: u32 = 0x400000; - pub const UDP_ENCAP: c_int = 1; - pub const HW_MACHINE_ARCH: c_int = 11; - pub const AF_ARP: c_int = 35; - pub const AF_IEEE80211: c_int = 37; - pub const AF_NATM: c_int = 29; - pub const BIOCGDLTLIST: c_ulong = 0xffffffffc0104279; - pub const BIOCGETIF: c_int = 0x4020426b; - pub const BIOCGSEESENT: c_int = 0x40044276; - pub const BIOCGSTATS: c_int = 0x4008426f; - pub const BIOCSDLT: c_int = 0x80044278; - pub const BIOCSETIF: c_int = 0x8020426c; - pub const BIOCSSEESENT: c_int = 0x80044277; - pub const KERN_ARND: c_int = 37; - pub const KERN_IOV_MAX: c_int = 35; - pub const KERN_LOGSIGEXIT: c_int = 34; - pub const KERN_PROC_ARGS: c_int = 7; - pub const KERN_PROC_ENV: c_int = 35; - pub const KERN_PROC_GID: c_int = 11; - pub const KERN_PROC_RGID: c_int = 10; - pub const LOCAL_CONNWAIT: c_int = 4; - pub const LOCAL_CREDS: c_int = 2; - pub const MSG_NOTIFICATION: c_int = 0x00002000; - pub const NET_RT_IFLIST: c_int = 3; - pub const NI_NUMERICSCOPE: c_int = 0x00000020; - pub const PF_ARP: c_int = AF_ARP; - pub const PF_NATM: c_int = AF_NATM; - pub const pseudo_AF_HDRCMPLT: c_int = 31; - pub const SIOCGIFADDR: c_int = 0xc0206921; - pub const SO_SETFIB: c_int = 0x1014; - } -} - -pub const MAP_TYPE: c_int = 0x3; - -pub const IFF_UP: c_int = 0x00000001; -pub const IFF_BROADCAST: c_int = 0x00000002; -pub const IFF_DEBUG: c_int = 0x00000004; -pub const IFF_LOOPBACK: c_int = 0x00000008; -pub const IFF_POINTOPOINT: c_int = 0x00000010; -pub const IFF_RUNNING: c_int = 0x00000040; -pub const IFF_NOARP: c_int = 0x00000080; -pub const IFF_PROMISC: c_int = 0x00000100; -pub const IFF_ALLMULTI: c_int = 0x00000200; -pub const IFF_MULTICAST: c_int = 0x00008000; - -pub const AF_UNSPEC: c_int = 0; -pub const AF_UNIX: c_int = AF_LOCAL; -pub const AF_LOCAL: c_int = 1; -pub const AF_INET: c_int = 2; -pub const AF_IPX: c_int = 23; -pub const AF_APPLETALK: c_int = 16; -pub const AF_ROUTE: c_int = 17; -pub const AF_SNA: c_int = 11; - -pub const AF_ISDN: c_int = 26; - -pub const PF_UNSPEC: c_int = AF_UNSPEC; -pub const PF_UNIX: c_int = PF_LOCAL; -pub const PF_LOCAL: c_int = AF_LOCAL; -pub const PF_INET: c_int = AF_INET; -pub const PF_IPX: c_int = AF_IPX; -pub const PF_APPLETALK: c_int = AF_APPLETALK; -pub const PF_INET6: c_int = AF_INET6; -pub const PF_KEY: c_int = pseudo_AF_KEY; -pub const PF_ROUTE: c_int = AF_ROUTE; -pub const PF_SNA: c_int = AF_SNA; - -pub const PF_BLUETOOTH: c_int = AF_BLUETOOTH; -pub const PF_ISDN: c_int = AF_ISDN; - -pub const SOMAXCONN: c_int = 128; - -pub const MSG_OOB: c_int = 0x0001; -pub const MSG_PEEK: c_int = 0x0002; -pub const MSG_DONTROUTE: c_int = 0x0004; -pub const MSG_CTRUNC: c_int = 0x0020; -pub const MSG_TRUNC: c_int = 0x0010; -pub const MSG_DONTWAIT: c_int = 0x0080; -pub const MSG_EOR: c_int = 0x0008; -pub const MSG_WAITALL: c_int = 0x0040; - -pub const IP_TOS: c_int = 3; -pub const IP_TTL: c_int = 4; -pub const IP_HDRINCL: c_int = 2; -pub const IP_OPTIONS: c_int = 1; -pub const IP_RECVOPTS: c_int = 5; -pub const IP_RETOPTS: c_int = 8; -pub const IP_MULTICAST_IF: c_int = 9; -pub const IP_MULTICAST_TTL: c_int = 10; -pub const IP_MULTICAST_LOOP: c_int = 11; -pub const IP_ADD_MEMBERSHIP: c_int = 12; -pub const IP_DROP_MEMBERSHIP: c_int = 13; -pub const IP_DEFAULT_MULTICAST_TTL: c_int = 1; -pub const IP_DEFAULT_MULTICAST_LOOP: c_int = 1; - -pub const IPPROTO_HOPOPTS: c_int = 0; -pub const IPPROTO_IGMP: c_int = 2; -pub const IPPROTO_IPIP: c_int = 4; -pub const IPPROTO_EGP: c_int = 8; -pub const IPPROTO_PUP: c_int = 12; -pub const IPPROTO_IDP: c_int = 22; -pub const IPPROTO_TP: c_int = 29; -pub const IPPROTO_ROUTING: c_int = 43; -pub const IPPROTO_FRAGMENT: c_int = 44; -pub const IPPROTO_RSVP: c_int = 46; -pub const IPPROTO_GRE: c_int = 47; -pub const IPPROTO_ESP: c_int = 50; -pub const IPPROTO_AH: c_int = 51; -pub const IPPROTO_NONE: c_int = 59; -pub const IPPROTO_DSTOPTS: c_int = 60; -pub const IPPROTO_ENCAP: c_int = 98; -pub const IPPROTO_PIM: c_int = 103; -pub const IPPROTO_SCTP: c_int = 132; -pub const IPPROTO_RAW: c_int = 255; -pub const IPPROTO_MAX: c_int = 256; -pub const IPPROTO_CARP: c_int = 112; -pub const IPPROTO_DONE: c_int = 257; -pub const IPPROTO_EON: c_int = 80; -pub const IPPROTO_ETHERIP: c_int = 97; -pub const IPPROTO_GGP: c_int = 3; -pub const IPPROTO_IPCOMP: c_int = 108; -pub const IPPROTO_MOBILE: c_int = 55; - -pub const IPV6_RTHDR_LOOSE: c_int = 0; -pub const IPV6_RTHDR_STRICT: c_int = 1; -pub const IPV6_UNICAST_HOPS: c_int = 4; -pub const IPV6_MULTICAST_IF: c_int = 9; -pub const IPV6_MULTICAST_HOPS: c_int = 10; -pub const IPV6_MULTICAST_LOOP: c_int = 11; -pub const IPV6_JOIN_GROUP: c_int = 12; -pub const IPV6_LEAVE_GROUP: c_int = 13; -pub const IPV6_CHECKSUM: c_int = 26; -pub const IPV6_V6ONLY: c_int = 27; -pub const IPV6_RTHDRDSTOPTS: c_int = 35; -pub const IPV6_RECVPKTINFO: c_int = 36; -pub const IPV6_RECVHOPLIMIT: c_int = 37; -pub const IPV6_RECVRTHDR: c_int = 38; -pub const IPV6_RECVHOPOPTS: c_int = 39; -pub const IPV6_RECVDSTOPTS: c_int = 40; -pub const IPV6_RECVPATHMTU: c_int = 43; -pub const IPV6_PATHMTU: c_int = 44; -pub const IPV6_PKTINFO: c_int = 46; -pub const IPV6_HOPLIMIT: c_int = 47; -pub const IPV6_NEXTHOP: c_int = 48; -pub const IPV6_HOPOPTS: c_int = 49; -pub const IPV6_DSTOPTS: c_int = 50; -pub const IPV6_RECVTCLASS: c_int = 57; -pub const IPV6_TCLASS: c_int = 61; -pub const IPV6_DONTFRAG: c_int = 62; - -pub const TCP_NODELAY: c_int = 0x01; -pub const TCP_MAXSEG: c_int = 0x02; -pub const TCP_MD5SIG: c_int = 0x10; - -pub const SHUT_RD: c_int = 0; -pub const SHUT_WR: c_int = 1; -pub const SHUT_RDWR: c_int = 2; - -pub const LOCK_SH: c_int = 0x1; -pub const LOCK_EX: c_int = 0x2; -pub const LOCK_NB: c_int = 0x4; -pub const LOCK_UN: c_int = 0x8; - -pub const SS_ONSTACK: c_int = 1; -pub const SS_DISABLE: c_int = 2; - -pub const PATH_MAX: c_int = 1024; - -pub const UIO_MAXIOV: c_int = 1024; - -pub const FD_SETSIZE: usize = 256; - -pub const TCIOFF: c_int = 0x0002; -pub const TCION: c_int = 0x0003; -pub const TCOOFF: c_int = 0x0000; -pub const TCOON: c_int = 0x0001; -pub const TCIFLUSH: c_int = 0; -pub const TCOFLUSH: c_int = 1; -pub const TCIOFLUSH: c_int = 2; -pub const NL0: crate::tcflag_t = 0x000; -pub const NL1: crate::tcflag_t = 0x100; -pub const TAB0: crate::tcflag_t = 0x0000; -pub const CR0: crate::tcflag_t = 0x000; -pub const FF0: crate::tcflag_t = 0x0000; -pub const BS0: crate::tcflag_t = 0x0000; -pub const VT0: crate::tcflag_t = 0x0000; -pub const VERASE: usize = 2; -pub const VKILL: usize = 3; -pub const VINTR: usize = 0; -pub const VQUIT: usize = 1; -pub const VLNEXT: usize = 15; -pub const IGNBRK: crate::tcflag_t = 0x00000001; -pub const BRKINT: crate::tcflag_t = 0x00000002; -pub const IGNPAR: crate::tcflag_t = 0x00000004; -pub const PARMRK: crate::tcflag_t = 0x00000008; -pub const INPCK: crate::tcflag_t = 0x00000010; -pub const ISTRIP: crate::tcflag_t = 0x00000020; -pub const INLCR: crate::tcflag_t = 0x00000040; -pub const IGNCR: crate::tcflag_t = 0x00000080; -pub const ICRNL: crate::tcflag_t = 0x00000100; -pub const IXANY: crate::tcflag_t = 0x00000800; -pub const IMAXBEL: crate::tcflag_t = 0x00002000; -pub const OPOST: crate::tcflag_t = 0x00000001; -pub const CS5: crate::tcflag_t = 0x00; -pub const ECHO: crate::tcflag_t = 0x00000008; -pub const OCRNL: crate::tcflag_t = 0x00000008; -pub const ONOCR: crate::tcflag_t = 0x00000010; -pub const ONLRET: crate::tcflag_t = 0x00000020; -pub const OFILL: crate::tcflag_t = 0x00000040; -pub const OFDEL: crate::tcflag_t = 0x00000080; - -pub const WNOHANG: c_int = 0x0040; -pub const WUNTRACED: c_int = 0x0004; -pub const WSTOPPED: c_int = WUNTRACED; -pub const WEXITED: c_int = 0x0001; -pub const WCONTINUED: c_int = 0x0008; -pub const WNOWAIT: c_int = 0x0080; -pub const WTRAPPED: c_int = 0x0002; - -pub const RTLD_LOCAL: c_int = 0x0200; -pub const RTLD_LAZY: c_int = 0x0001; - -pub const POSIX_FADV_NORMAL: c_int = 0; -pub const POSIX_FADV_RANDOM: c_int = 2; -pub const POSIX_FADV_SEQUENTIAL: c_int = 1; -pub const POSIX_FADV_WILLNEED: c_int = 3; - -pub const AT_FDCWD: c_int = -100; -pub const AT_EACCESS: c_int = 0x0001; -pub const AT_SYMLINK_NOFOLLOW: c_int = 0x0002; -pub const AT_SYMLINK_FOLLOW: c_int = 0x0004; -pub const AT_REMOVEDIR: c_int = 0x0008; - -pub const LOG_CRON: c_int = 9 << 3; -pub const LOG_AUTHPRIV: c_int = 10 << 3; -pub const LOG_FTP: c_int = 11 << 3; -pub const LOG_PERROR: c_int = 0x20; - -pub const PIPE_BUF: usize = 5120; - -pub const CLD_EXITED: c_int = 1; -pub const CLD_KILLED: c_int = 2; -pub const CLD_DUMPED: c_int = 3; -pub const CLD_TRAPPED: c_int = 4; -pub const CLD_STOPPED: c_int = 5; -pub const CLD_CONTINUED: c_int = 6; - -pub const UTIME_OMIT: c_long = 0x40000002; -pub const UTIME_NOW: c_long = 0x40000001; - -pub const POLLIN: c_short = POLLRDNORM | POLLRDBAND; -pub const POLLPRI: c_short = 0x0008; -pub const POLLOUT: c_short = 0x0002; -pub const POLLERR: c_short = 0x0020; -pub const POLLHUP: c_short = 0x0040; -pub const POLLNVAL: c_short = 0x1000; -pub const POLLRDNORM: c_short = 0x0001; -pub const POLLRDBAND: c_short = 0x0004; - -pub const IPTOS_LOWDELAY: u8 = 0x10; -pub const IPTOS_THROUGHPUT: u8 = 0x08; -pub const IPTOS_RELIABILITY: u8 = 0x04; -pub const IPTOS_MINCOST: u8 = 0x02; - -pub const IPTOS_PREC_NETCONTROL: u8 = 0xe0; -pub const IPTOS_PREC_INTERNETCONTROL: u8 = 0xc0; -pub const IPTOS_PREC_CRITIC_ECP: u8 = 0xa0; -pub const IPTOS_PREC_FLASHOVERRIDE: u8 = 0x80; -pub const IPTOS_PREC_FLASH: u8 = 0x60; -pub const IPTOS_PREC_IMMEDIATE: u8 = 0x40; -pub const IPTOS_PREC_PRIORITY: u8 = 0x20; -pub const IPTOS_PREC_ROUTINE: u8 = 0x00; - -pub const IPTOS_ECN_MASK: u8 = 0x03; -pub const IPTOS_ECN_ECT1: u8 = 0x01; -pub const IPTOS_ECN_ECT0: u8 = 0x02; -pub const IPTOS_ECN_CE: u8 = 0x03; - -pub const IPOPT_CONTROL: u8 = 0x00; -pub const IPOPT_RESERVED1: u8 = 0x20; -pub const IPOPT_RESERVED2: u8 = 0x60; -pub const IPOPT_LSRR: u8 = 131; -pub const IPOPT_RR: u8 = 7; -pub const IPOPT_SSRR: u8 = 137; -pub const IPDEFTTL: u8 = 64; -pub const IPOPT_OPTVAL: u8 = 0; -pub const IPOPT_OLEN: u8 = 1; -pub const IPOPT_OFFSET: u8 = 2; -pub const IPOPT_MINOFF: u8 = 4; -pub const IPOPT_NOP: u8 = 1; -pub const IPOPT_EOL: u8 = 0; -pub const IPOPT_TS: u8 = 68; -pub const IPOPT_TS_TSONLY: u8 = 0; -pub const IPOPT_TS_TSANDADDR: u8 = 1; -pub const IPOPT_TS_PRESPEC: u8 = 3; - -pub const MAX_IPOPTLEN: u8 = 40; -pub const IPVERSION: u8 = 4; -pub const MAXTTL: u8 = 255; - -pub const ARPHRD_ETHER: u16 = 1; -pub const ARPHRD_IEEE802: u16 = 6; -pub const ARPHRD_IEEE1394: u16 = 24; - -pub const SOL_SOCKET: c_int = 0xffff; - -pub const SO_DEBUG: c_int = 0x0001; -pub const SO_REUSEADDR: c_int = 0x0004; -pub const SO_TYPE: c_int = 0x1008; -pub const SO_ERROR: c_int = 0x1007; -pub const SO_DONTROUTE: c_int = 0x0010; -pub const SO_BROADCAST: c_int = 0x0020; -pub const SO_SNDBUF: c_int = 0x1001; -pub const SO_RCVBUF: c_int = 0x1002; -pub const SO_KEEPALIVE: c_int = 0x0008; -pub const SO_OOBINLINE: c_int = 0x0100; -pub const SO_LINGER: c_int = 0x0080; -pub const SO_REUSEPORT: c_int = 0x0200; -pub const SO_RCVLOWAT: c_int = 0x1004; -pub const SO_SNDLOWAT: c_int = 0x1003; -pub const SO_RCVTIMEO: c_int = 0x1006; -pub const SO_SNDTIMEO: c_int = 0x1005; -pub const SO_TIMESTAMP: c_int = 0x0400; -pub const SO_ACCEPTCONN: c_int = 0x0002; - -pub const TIOCM_LE: c_int = 0x0100; -pub const TIOCM_DTR: c_int = 0x0001; -pub const TIOCM_RTS: c_int = 0x0002; -pub const TIOCM_ST: c_int = 0x0200; -pub const TIOCM_SR: c_int = 0x0400; -pub const TIOCM_CTS: c_int = 0x1000; -pub const TIOCM_CAR: c_int = TIOCM_CD; -pub const TIOCM_CD: c_int = 0x8000; -pub const TIOCM_RNG: c_int = TIOCM_RI; -pub const TIOCM_RI: c_int = 0x4000; -pub const TIOCM_DSR: c_int = 0x2000; - -pub const SCHED_OTHER: c_int = 3; -pub const SCHED_FIFO: c_int = 1; -pub const SCHED_RR: c_int = 2; - -pub const IPC_PRIVATE: crate::key_t = 0; - -pub const IPC_CREAT: c_int = 0o001000; -pub const IPC_EXCL: c_int = 0o002000; -pub const IPC_NOWAIT: c_int = 0o004000; - -pub const IPC_RMID: c_int = 0; -pub const IPC_SET: c_int = 1; -pub const IPC_STAT: c_int = 2; - -pub const MSG_NOERROR: c_int = 0o010000; - -pub const LOG_NFACILITIES: c_int = 24; - -pub const SEM_FAILED: *mut crate::sem_t = 0xFFFFFFFFFFFFFFFF as *mut sem_t; - -pub const AI_PASSIVE: c_int = 0x00000001; -pub const AI_CANONNAME: c_int = 0x00000002; -pub const AI_NUMERICHOST: c_int = 0x00000004; - -pub const AI_NUMERICSERV: c_int = 0x00000008; - -pub const EAI_BADFLAGS: c_int = 3; -pub const EAI_NONAME: c_int = 8; -pub const EAI_AGAIN: c_int = 2; -pub const EAI_FAIL: c_int = 4; -pub const EAI_FAMILY: c_int = 5; -pub const EAI_SOCKTYPE: c_int = 10; -pub const EAI_SERVICE: c_int = 9; -pub const EAI_MEMORY: c_int = 6; -pub const EAI_SYSTEM: c_int = 11; -pub const EAI_OVERFLOW: c_int = 14; - -pub const NI_NUMERICHOST: c_int = 0x00000002; -pub const NI_NUMERICSERV: c_int = 0x00000008; -pub const NI_NOFQDN: c_int = 0x00000001; -pub const NI_NAMEREQD: c_int = 0x00000004; -pub const NI_DGRAM: c_int = 0x00000010; - -pub const AIO_CANCELED: c_int = 0; -pub const AIO_NOTCANCELED: c_int = 2; -pub const AIO_ALLDONE: c_int = 1; -pub const LIO_READ: c_int = 1; -pub const LIO_WRITE: c_int = 2; -pub const LIO_NOP: c_int = 0; -pub const LIO_WAIT: c_int = 1; -pub const LIO_NOWAIT: c_int = 0; - -pub const ITIMER_REAL: c_int = 0; -pub const ITIMER_VIRTUAL: c_int = 1; -pub const ITIMER_PROF: c_int = 2; - -// DIFF(main): changed to `c_short` in f62eb023ab -pub const POSIX_SPAWN_RESETIDS: c_int = 0x00000010; -pub const POSIX_SPAWN_SETPGROUP: c_int = 0x00000001; -pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x00000004; -pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x00000002; -pub const POSIX_SPAWN_SETSCHEDPARAM: c_int = 0x00000400; -pub const POSIX_SPAWN_SETSCHEDULER: c_int = 0x00000040; - -pub const RTF_UP: c_ushort = 0x0001; -pub const RTF_GATEWAY: c_ushort = 0x0002; - -pub const RTF_HOST: c_ushort = 0x0004; -pub const RTF_DYNAMIC: c_ushort = 0x0010; -pub const RTF_MODIFIED: c_ushort = 0x0020; -pub const RTF_REJECT: c_ushort = 0x0008; -pub const RTF_STATIC: c_ushort = 0x0800; -pub const RTF_XRESOLVE: c_ushort = 0x0200; -pub const RTM_NEWADDR: u16 = 0xc; -pub const RTM_DELADDR: u16 = 0xd; -pub const RTA_DST: c_ushort = 0x1; -pub const RTA_GATEWAY: c_ushort = 0x2; - -pub const IN_ACCESS: u32 = 0x00000001; -pub const IN_MODIFY: u32 = 0x00000002; -pub const IN_ATTRIB: u32 = 0x00000004; -pub const IN_CLOSE_WRITE: u32 = 0x00000008; -pub const IN_CLOSE_NOWRITE: u32 = 0x00000010; -pub const IN_CLOSE: u32 = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE; -pub const IN_OPEN: u32 = 0x00000020; -pub const IN_MOVED_FROM: u32 = 0x00000040; -pub const IN_MOVED_TO: u32 = 0x00000080; -pub const IN_MOVE: u32 = IN_MOVED_FROM | IN_MOVED_TO; -pub const IN_CREATE: u32 = 0x00000100; -pub const IN_DELETE: u32 = 0x00000200; -pub const IN_DELETE_SELF: u32 = 0x00000400; -pub const IN_MOVE_SELF: u32 = 0x00000800; -pub const IN_UNMOUNT: u32 = 0x00002000; -pub const IN_Q_OVERFLOW: u32 = 0x00004000; -pub const IN_IGNORED: u32 = 0x00008000; -pub const IN_ONLYDIR: u32 = 0x01000000; -pub const IN_DONT_FOLLOW: u32 = 0x02000000; - -pub const IN_ISDIR: u32 = 0x40000000; -pub const IN_ONESHOT: u32 = 0x80000000; - -pub const REG_EXTENDED: c_int = 0o0001; -pub const REG_ICASE: c_int = 0o0002; -pub const REG_NEWLINE: c_int = 0o0010; -pub const REG_NOSUB: c_int = 0o0004; - -pub const REG_NOTBOL: c_int = 0o00001; -pub const REG_NOTEOL: c_int = 0o00002; - -pub const REG_ENOSYS: c_int = 17; -pub const REG_NOMATCH: c_int = 1; -pub const REG_BADPAT: c_int = 2; -pub const REG_ECOLLATE: c_int = 3; -pub const REG_ECTYPE: c_int = 4; -pub const REG_EESCAPE: c_int = 5; -pub const REG_ESUBREG: c_int = 6; -pub const REG_EBRACK: c_int = 7; -pub const REG_EPAREN: c_int = 8; -pub const REG_EBRACE: c_int = 9; -pub const REG_BADBR: c_int = 10; -pub const REG_ERANGE: c_int = 11; -pub const REG_ESPACE: c_int = 12; -pub const REG_BADRPT: c_int = 13; - -// errno.h -pub const EOK: c_int = 0; -pub const EWOULDBLOCK: c_int = EAGAIN; -pub const EPERM: c_int = 1; -pub const ENOENT: c_int = 2; -pub const ESRCH: c_int = 3; -pub const EINTR: c_int = 4; -pub const EIO: c_int = 5; -pub const ENXIO: c_int = 6; -pub const E2BIG: c_int = 7; -pub const ENOEXEC: c_int = 8; -pub const EBADF: c_int = 9; -pub const ECHILD: c_int = 10; -pub const EAGAIN: c_int = 11; -pub const ENOMEM: c_int = 12; -pub const EACCES: c_int = 13; -pub const EFAULT: c_int = 14; -pub const ENOTBLK: c_int = 15; -pub const EBUSY: c_int = 16; -pub const EEXIST: c_int = 17; -pub const EXDEV: c_int = 18; -pub const ENODEV: c_int = 19; -pub const ENOTDIR: c_int = 20; -pub const EISDIR: c_int = 21; -pub const EINVAL: c_int = 22; -pub const ENFILE: c_int = 23; -pub const EMFILE: c_int = 24; -pub const ENOTTY: c_int = 25; -pub const ETXTBSY: c_int = 26; -pub const EFBIG: c_int = 27; -pub const ENOSPC: c_int = 28; -pub const ESPIPE: c_int = 29; -pub const EROFS: c_int = 30; -pub const EMLINK: c_int = 31; -pub const EPIPE: c_int = 32; -pub const EDOM: c_int = 33; -pub const ERANGE: c_int = 34; -pub const ENOMSG: c_int = 35; -pub const EIDRM: c_int = 36; -pub const ECHRNG: c_int = 37; -pub const EL2NSYNC: c_int = 38; -pub const EL3HLT: c_int = 39; -pub const EL3RST: c_int = 40; -pub const ELNRNG: c_int = 41; -pub const EUNATCH: c_int = 42; -pub const ENOCSI: c_int = 43; -pub const EL2HLT: c_int = 44; -pub const EDEADLK: c_int = 45; -pub const ENOLCK: c_int = 46; -pub const ECANCELED: c_int = 47; -pub const EDQUOT: c_int = 49; -pub const EBADE: c_int = 50; -pub const EBADR: c_int = 51; -pub const EXFULL: c_int = 52; -pub const ENOANO: c_int = 53; -pub const EBADRQC: c_int = 54; -pub const EBADSLT: c_int = 55; -pub const EDEADLOCK: c_int = 56; -pub const EBFONT: c_int = 57; -pub const EOWNERDEAD: c_int = 58; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENONET: c_int = 64; -pub const ENOPKG: c_int = 65; -pub const EREMOTE: c_int = 66; -pub const ENOLINK: c_int = 67; -pub const EADV: c_int = 68; -pub const ESRMNT: c_int = 69; -pub const ECOMM: c_int = 70; -pub const EPROTO: c_int = 71; -pub const EMULTIHOP: c_int = 74; -pub const EBADMSG: c_int = 77; -pub const ENAMETOOLONG: c_int = 78; -pub const EOVERFLOW: c_int = 79; -pub const ENOTUNIQ: c_int = 80; -pub const EBADFD: c_int = 81; -pub const EREMCHG: c_int = 82; -pub const ELIBACC: c_int = 83; -pub const ELIBBAD: c_int = 84; -pub const ELIBSCN: c_int = 85; -pub const ELIBMAX: c_int = 86; -pub const ELIBEXEC: c_int = 87; -pub const EILSEQ: c_int = 88; -pub const ENOSYS: c_int = 89; -pub const ELOOP: c_int = 90; -pub const ERESTART: c_int = 91; -pub const ESTRPIPE: c_int = 92; -pub const ENOTEMPTY: c_int = 93; -pub const EUSERS: c_int = 94; -pub const ENOTRECOVERABLE: c_int = 95; -pub const EOPNOTSUPP: c_int = 103; -pub const EFPOS: c_int = 110; -pub const ESTALE: c_int = 122; -pub const EINPROGRESS: c_int = 236; -pub const EALREADY: c_int = 237; -pub const ENOTSOCK: c_int = 238; -pub const EDESTADDRREQ: c_int = 239; -pub const EMSGSIZE: c_int = 240; -pub const EPROTOTYPE: c_int = 241; -pub const ENOPROTOOPT: c_int = 242; -pub const EPROTONOSUPPORT: c_int = 243; -pub const ESOCKTNOSUPPORT: c_int = 244; -pub const EPFNOSUPPORT: c_int = 246; -pub const EAFNOSUPPORT: c_int = 247; -pub const EADDRINUSE: c_int = 248; -pub const EADDRNOTAVAIL: c_int = 249; -pub const ENETDOWN: c_int = 250; -pub const ENETUNREACH: c_int = 251; -pub const ENETRESET: c_int = 252; -pub const ECONNABORTED: c_int = 253; -pub const ECONNRESET: c_int = 254; -pub const ENOBUFS: c_int = 255; -pub const EISCONN: c_int = 256; -pub const ENOTCONN: c_int = 257; -pub const ESHUTDOWN: c_int = 258; -pub const ETOOMANYREFS: c_int = 259; -pub const ETIMEDOUT: c_int = 260; -pub const ECONNREFUSED: c_int = 261; -pub const EHOSTDOWN: c_int = 264; -pub const EHOSTUNREACH: c_int = 265; -pub const EBADRPC: c_int = 272; -pub const ERPCMISMATCH: c_int = 273; -pub const EPROGUNAVAIL: c_int = 274; -pub const EPROGMISMATCH: c_int = 275; -pub const EPROCUNAVAIL: c_int = 276; -pub const ENOREMOTE: c_int = 300; -pub const ENONDP: c_int = 301; -pub const EBADFSYS: c_int = 302; -pub const EMORE: c_int = 309; -pub const ECTRLTERM: c_int = 310; -pub const ENOLIC: c_int = 311; -pub const ESRVRFAULT: c_int = 312; -pub const EENDIAN: c_int = 313; -pub const ESECTYPEINVAL: c_int = 314; - -pub const RUSAGE_CHILDREN: c_int = -1; -pub const L_tmpnam: c_uint = 255; - -pub const _PC_LINK_MAX: c_int = 1; -pub const _PC_MAX_CANON: c_int = 2; -pub const _PC_MAX_INPUT: c_int = 3; -pub const _PC_NAME_MAX: c_int = 4; -pub const _PC_PATH_MAX: c_int = 5; -pub const _PC_PIPE_BUF: c_int = 6; -pub const _PC_CHOWN_RESTRICTED: c_int = 9; -pub const _PC_NO_TRUNC: c_int = 7; -pub const _PC_VDISABLE: c_int = 8; -pub const _PC_SYNC_IO: c_int = 14; -pub const _PC_ASYNC_IO: c_int = 12; -pub const _PC_PRIO_IO: c_int = 13; -pub const _PC_SOCK_MAXBUF: c_int = 15; -pub const _PC_FILESIZEBITS: c_int = 16; -pub const _PC_REC_INCR_XFER_SIZE: c_int = 22; -pub const _PC_REC_MAX_XFER_SIZE: c_int = 23; -pub const _PC_REC_MIN_XFER_SIZE: c_int = 24; -pub const _PC_REC_XFER_ALIGN: c_int = 25; -pub const _PC_ALLOC_SIZE_MIN: c_int = 21; -pub const _PC_SYMLINK_MAX: c_int = 17; -pub const _PC_2_SYMLINKS: c_int = 20; - -pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; -pub const _SC_ARG_MAX: c_int = 1; -pub const _SC_CHILD_MAX: c_int = 2; -pub const _SC_CLK_TCK: c_int = 3; -pub const _SC_NGROUPS_MAX: c_int = 4; -pub const _SC_OPEN_MAX: c_int = 5; -pub const _SC_JOB_CONTROL: c_int = 6; -pub const _SC_SAVED_IDS: c_int = 7; -pub const _SC_VERSION: c_int = 8; -pub const _SC_PASS_MAX: c_int = 9; -pub const _SC_PAGESIZE: c_int = 11; -pub const _SC_XOPEN_VERSION: c_int = 12; -pub const _SC_STREAM_MAX: c_int = 13; -pub const _SC_TZNAME_MAX: c_int = 14; -pub const _SC_AIO_LISTIO_MAX: c_int = 15; -pub const _SC_AIO_MAX: c_int = 16; -pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 17; -pub const _SC_DELAYTIMER_MAX: c_int = 18; -pub const _SC_MQ_OPEN_MAX: c_int = 19; -pub const _SC_MQ_PRIO_MAX: c_int = 20; -pub const _SC_RTSIG_MAX: c_int = 21; -pub const _SC_SEM_NSEMS_MAX: c_int = 22; -pub const _SC_SEM_VALUE_MAX: c_int = 23; -pub const _SC_SIGQUEUE_MAX: c_int = 24; -pub const _SC_TIMER_MAX: c_int = 25; -pub const _SC_ASYNCHRONOUS_IO: c_int = 26; -pub const _SC_FSYNC: c_int = 27; -pub const _SC_MAPPED_FILES: c_int = 28; -pub const _SC_MEMLOCK: c_int = 29; -pub const _SC_MEMLOCK_RANGE: c_int = 30; -pub const _SC_MEMORY_PROTECTION: c_int = 31; -pub const _SC_MESSAGE_PASSING: c_int = 32; -pub const _SC_PRIORITIZED_IO: c_int = 33; -pub const _SC_PRIORITY_SCHEDULING: c_int = 34; -pub const _SC_REALTIME_SIGNALS: c_int = 35; -pub const _SC_SEMAPHORES: c_int = 36; -pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 37; -pub const _SC_SYNCHRONIZED_IO: c_int = 38; -pub const _SC_TIMERS: c_int = 39; -pub const _SC_GETGR_R_SIZE_MAX: c_int = 40; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 41; -pub const _SC_LOGIN_NAME_MAX: c_int = 42; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 43; -pub const _SC_THREAD_KEYS_MAX: c_int = 44; -pub const _SC_THREAD_STACK_MIN: c_int = 45; -pub const _SC_THREAD_THREADS_MAX: c_int = 46; -pub const _SC_TTY_NAME_MAX: c_int = 47; -pub const _SC_THREADS: c_int = 48; -pub const _SC_THREAD_ATTR_STACKADDR: c_int = 49; -pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 50; -pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 51; -pub const _SC_THREAD_PRIO_INHERIT: c_int = 52; -pub const _SC_THREAD_PRIO_PROTECT: c_int = 53; -pub const _SC_THREAD_PROCESS_SHARED: c_int = 54; -pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 55; -pub const _SC_2_CHAR_TERM: c_int = 56; -pub const _SC_2_C_BIND: c_int = 57; -pub const _SC_2_C_DEV: c_int = 58; -pub const _SC_2_C_VERSION: c_int = 59; -pub const _SC_2_FORT_DEV: c_int = 60; -pub const _SC_2_FORT_RUN: c_int = 61; -pub const _SC_2_LOCALEDEF: c_int = 62; -pub const _SC_2_SW_DEV: c_int = 63; -pub const _SC_2_UPE: c_int = 64; -pub const _SC_2_VERSION: c_int = 65; -pub const _SC_ATEXIT_MAX: c_int = 66; -pub const _SC_AVPHYS_PAGES: c_int = 67; -pub const _SC_BC_BASE_MAX: c_int = 68; -pub const _SC_BC_DIM_MAX: c_int = 69; -pub const _SC_BC_SCALE_MAX: c_int = 70; -pub const _SC_BC_STRING_MAX: c_int = 71; -pub const _SC_CHARCLASS_NAME_MAX: c_int = 72; -pub const _SC_CHAR_BIT: c_int = 73; -pub const _SC_CHAR_MAX: c_int = 74; -pub const _SC_CHAR_MIN: c_int = 75; -pub const _SC_COLL_WEIGHTS_MAX: c_int = 76; -pub const _SC_EQUIV_CLASS_MAX: c_int = 77; -pub const _SC_EXPR_NEST_MAX: c_int = 78; -pub const _SC_INT_MAX: c_int = 79; -pub const _SC_INT_MIN: c_int = 80; -pub const _SC_LINE_MAX: c_int = 81; -pub const _SC_LONG_BIT: c_int = 82; -pub const _SC_MB_LEN_MAX: c_int = 83; -pub const _SC_NL_ARGMAX: c_int = 84; -pub const _SC_NL_LANGMAX: c_int = 85; -pub const _SC_NL_MSGMAX: c_int = 86; -pub const _SC_NL_NMAX: c_int = 87; -pub const _SC_NL_SETMAX: c_int = 88; -pub const _SC_NL_TEXTMAX: c_int = 89; -pub const _SC_NPROCESSORS_CONF: c_int = 90; -pub const _SC_NPROCESSORS_ONLN: c_int = 91; -pub const _SC_NZERO: c_int = 92; -pub const _SC_PHYS_PAGES: c_int = 93; -pub const _SC_PII: c_int = 94; -pub const _SC_PII_INTERNET: c_int = 95; -pub const _SC_PII_INTERNET_DGRAM: c_int = 96; -pub const _SC_PII_INTERNET_STREAM: c_int = 97; -pub const _SC_PII_OSI: c_int = 98; -pub const _SC_PII_OSI_CLTS: c_int = 99; -pub const _SC_PII_OSI_COTS: c_int = 100; -pub const _SC_PII_OSI_M: c_int = 101; -pub const _SC_PII_SOCKET: c_int = 102; -pub const _SC_PII_XTI: c_int = 103; -pub const _SC_POLL: c_int = 104; -pub const _SC_RE_DUP_MAX: c_int = 105; -pub const _SC_SCHAR_MAX: c_int = 106; -pub const _SC_SCHAR_MIN: c_int = 107; -pub const _SC_SELECT: c_int = 108; -pub const _SC_SHRT_MAX: c_int = 109; -pub const _SC_SHRT_MIN: c_int = 110; -pub const _SC_SSIZE_MAX: c_int = 111; -pub const _SC_T_IOV_MAX: c_int = 112; -pub const _SC_UCHAR_MAX: c_int = 113; -pub const _SC_UINT_MAX: c_int = 114; -pub const _SC_UIO_MAXIOV: c_int = 115; -pub const _SC_ULONG_MAX: c_int = 116; -pub const _SC_USHRT_MAX: c_int = 117; -pub const _SC_WORD_BIT: c_int = 118; -pub const _SC_XOPEN_CRYPT: c_int = 119; -pub const _SC_XOPEN_ENH_I18N: c_int = 120; -pub const _SC_XOPEN_SHM: c_int = 121; -pub const _SC_XOPEN_UNIX: c_int = 122; -pub const _SC_XOPEN_XCU_VERSION: c_int = 123; -pub const _SC_XOPEN_XPG2: c_int = 124; -pub const _SC_XOPEN_XPG3: c_int = 125; -pub const _SC_XOPEN_XPG4: c_int = 126; -pub const _SC_XBS5_ILP32_OFF32: c_int = 127; -pub const _SC_XBS5_ILP32_OFFBIG: c_int = 128; -pub const _SC_XBS5_LP64_OFF64: c_int = 129; -pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 130; -pub const _SC_ADVISORY_INFO: c_int = 131; -pub const _SC_CPUTIME: c_int = 132; -pub const _SC_SPAWN: c_int = 133; -pub const _SC_SPORADIC_SERVER: c_int = 134; -pub const _SC_THREAD_CPUTIME: c_int = 135; -pub const _SC_THREAD_SPORADIC_SERVER: c_int = 136; -pub const _SC_TIMEOUTS: c_int = 137; -pub const _SC_BARRIERS: c_int = 138; -pub const _SC_CLOCK_SELECTION: c_int = 139; -pub const _SC_MONOTONIC_CLOCK: c_int = 140; -pub const _SC_READER_WRITER_LOCKS: c_int = 141; -pub const _SC_SPIN_LOCKS: c_int = 142; -pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 143; -pub const _SC_TRACE_EVENT_FILTER: c_int = 144; -pub const _SC_TRACE: c_int = 145; -pub const _SC_TRACE_INHERIT: c_int = 146; -pub const _SC_TRACE_LOG: c_int = 147; -pub const _SC_2_PBS: c_int = 148; -pub const _SC_2_PBS_ACCOUNTING: c_int = 149; -pub const _SC_2_PBS_CHECKPOINT: c_int = 150; -pub const _SC_2_PBS_LOCATE: c_int = 151; -pub const _SC_2_PBS_MESSAGE: c_int = 152; -pub const _SC_2_PBS_TRACK: c_int = 153; -pub const _SC_HOST_NAME_MAX: c_int = 154; -pub const _SC_IOV_MAX: c_int = 155; -pub const _SC_IPV6: c_int = 156; -pub const _SC_RAW_SOCKETS: c_int = 157; -pub const _SC_REGEXP: c_int = 158; -pub const _SC_SHELL: c_int = 159; -pub const _SC_SS_REPL_MAX: c_int = 160; -pub const _SC_SYMLOOP_MAX: c_int = 161; -pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 162; -pub const _SC_TRACE_NAME_MAX: c_int = 163; -pub const _SC_TRACE_SYS_MAX: c_int = 164; -pub const _SC_TRACE_USER_EVENT_MAX: c_int = 165; -pub const _SC_V6_ILP32_OFF32: c_int = 166; -pub const _SC_V6_ILP32_OFFBIG: c_int = 167; -pub const _SC_V6_LP64_OFF64: c_int = 168; -pub const _SC_V6_LPBIG_OFFBIG: c_int = 169; -pub const _SC_XOPEN_REALTIME: c_int = 170; -pub const _SC_XOPEN_REALTIME_THREADS: c_int = 171; -pub const _SC_XOPEN_LEGACY: c_int = 172; -pub const _SC_XOPEN_STREAMS: c_int = 173; -pub const _SC_V7_ILP32_OFF32: c_int = 176; -pub const _SC_V7_ILP32_OFFBIG: c_int = 177; -pub const _SC_V7_LP64_OFF64: c_int = 178; -pub const _SC_V7_LPBIG_OFFBIG: c_int = 179; - -pub const GLOB_ERR: c_int = 0x0001; -pub const GLOB_MARK: c_int = 0x0002; -pub const GLOB_NOSORT: c_int = 0x0004; -pub const GLOB_DOOFFS: c_int = 0x0008; -pub const GLOB_NOCHECK: c_int = 0x0010; -pub const GLOB_APPEND: c_int = 0x0020; -pub const GLOB_NOESCAPE: c_int = 0x0040; - -pub const GLOB_NOSPACE: c_int = 1; -pub const GLOB_ABORTED: c_int = 2; -pub const GLOB_NOMATCH: c_int = 3; - -pub const S_IEXEC: mode_t = crate::S_IXUSR; -pub const S_IWRITE: mode_t = crate::S_IWUSR; -pub const S_IREAD: mode_t = crate::S_IRUSR; - -pub const S_IFIFO: mode_t = 0o1_0000; -pub const S_IFCHR: mode_t = 0o2_0000; -pub const S_IFDIR: mode_t = 0o4_0000; -pub const S_IFBLK: mode_t = 0o6_0000; -pub const S_IFREG: mode_t = 0o10_0000; -pub const S_IFLNK: mode_t = 0o12_0000; -pub const S_IFSOCK: mode_t = 0o14_0000; -pub const S_IFMT: mode_t = 0o17_0000; - -pub const S_IXOTH: mode_t = 0o0001; -pub const S_IWOTH: mode_t = 0o0002; -pub const S_IROTH: mode_t = 0o0004; -pub const S_IRWXO: mode_t = 0o0007; -pub const S_IXGRP: mode_t = 0o0010; -pub const S_IWGRP: mode_t = 0o0020; -pub const S_IRGRP: mode_t = 0o0040; -pub const S_IRWXG: mode_t = 0o0070; -pub const S_IXUSR: mode_t = 0o0100; -pub const S_IWUSR: mode_t = 0o0200; -pub const S_IRUSR: mode_t = 0o0400; -pub const S_IRWXU: mode_t = 0o0700; - -pub const F_LOCK: c_int = 1; -pub const F_TEST: c_int = 3; -pub const F_TLOCK: c_int = 2; -pub const F_ULOCK: c_int = 0; - -pub const ST_RDONLY: c_ulong = 0x01; -pub const ST_NOSUID: c_ulong = 0x04; -pub const ST_NOEXEC: c_ulong = 0x02; -pub const ST_NOATIME: c_ulong = 0x20; - -pub const RTLD_NEXT: *mut c_void = -3i64 as *mut c_void; -pub const RTLD_DEFAULT: *mut c_void = -2i64 as *mut c_void; -pub const RTLD_NODELETE: c_int = 0x1000; -pub const RTLD_NOW: c_int = 0x0002; - -pub const EMPTY: c_short = 0; -pub const RUN_LVL: c_short = 1; -pub const BOOT_TIME: c_short = 2; -pub const NEW_TIME: c_short = 4; -pub const OLD_TIME: c_short = 3; -pub const INIT_PROCESS: c_short = 5; -pub const LOGIN_PROCESS: c_short = 6; -pub const USER_PROCESS: c_short = 7; -pub const DEAD_PROCESS: c_short = 8; -pub const ACCOUNTING: c_short = 9; - -pub const ENOTSUP: c_int = 48; - -pub const BUFSIZ: c_uint = 1024; -pub const TMP_MAX: c_uint = 26 * 26 * 26; -pub const FOPEN_MAX: c_uint = 16; -pub const FILENAME_MAX: c_uint = 255; - -pub const NI_MAXHOST: crate::socklen_t = 1025; -pub const M_KEEP: c_int = 4; -pub const REG_STARTEND: c_int = 0o00004; -pub const VEOF: usize = 4; - -pub const RTLD_GLOBAL: c_int = 0x0100; -pub const RTLD_NOLOAD: c_int = 0x0004; - -pub const O_RDONLY: c_int = 0o000000; -pub const O_WRONLY: c_int = 0o000001; -pub const O_RDWR: c_int = 0o000002; - -pub const O_EXEC: c_int = 0o00003; -pub const O_ASYNC: c_int = 0o0200000; -pub const O_NDELAY: c_int = O_NONBLOCK; -pub const O_TRUNC: c_int = 0o001000; -pub const O_CLOEXEC: c_int = 0o020000; -pub const O_DIRECTORY: c_int = 0o4000000; -pub const O_ACCMODE: c_int = 0o000007; -pub const O_APPEND: c_int = 0o000010; -pub const O_CREAT: c_int = 0o000400; -pub const O_EXCL: c_int = 0o002000; -pub const O_NOCTTY: c_int = 0o004000; -pub const O_NONBLOCK: c_int = 0o000200; -pub const O_SYNC: c_int = 0o000040; -pub const O_RSYNC: c_int = 0o000100; -pub const O_DSYNC: c_int = 0o000020; -pub const O_NOFOLLOW: c_int = 0o010000; - -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; - -pub const SOCK_SEQPACKET: c_int = 5; -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; -pub const SOCK_RAW: c_int = 3; -pub const SOCK_RDM: c_int = 4; -pub const SOCK_CLOEXEC: c_int = 0x10000000; - -pub const SA_SIGINFO: c_int = 0x0002; -pub const SA_NOCLDWAIT: c_int = 0x0020; -pub const SA_NODEFER: c_int = 0x0010; -pub const SA_RESETHAND: c_int = 0x0004; -pub const SA_NOCLDSTOP: c_int = 0x0001; - -pub const SIGTTIN: c_int = 26; -pub const SIGTTOU: c_int = 27; -pub const SIGXCPU: c_int = 30; -pub const SIGXFSZ: c_int = 31; -pub const SIGVTALRM: c_int = 28; -pub const SIGPROF: c_int = 29; -pub const SIGWINCH: c_int = 20; -pub const SIGCHLD: c_int = 18; -pub const SIGBUS: c_int = 10; -pub const SIGUSR1: c_int = 16; -pub const SIGUSR2: c_int = 17; -pub const SIGCONT: c_int = 25; -pub const SIGSTOP: c_int = 23; -pub const SIGTSTP: c_int = 24; -pub const SIGURG: c_int = 21; -pub const SIGIO: c_int = SIGPOLL; -pub const SIGSYS: c_int = 12; -pub const SIGPOLL: c_int = 22; -pub const SIGPWR: c_int = 19; -pub const SIG_SETMASK: c_int = 2; -pub const SIG_BLOCK: c_int = 0; -pub const SIG_UNBLOCK: c_int = 1; - -pub const POLLWRNORM: c_short = crate::POLLOUT; -pub const POLLWRBAND: c_short = 0x0010; - -pub const F_SETLK: c_int = 106; -pub const F_SETLKW: c_int = 107; -pub const F_ALLOCSP: c_int = 110; -pub const F_FREESP: c_int = 111; -pub const F_GETLK: c_int = 114; - -pub const F_RDLCK: c_int = 1; -pub const F_WRLCK: c_int = 2; -pub const F_UNLCK: c_int = 3; - -pub const NCCS: usize = 40; - -pub const MAP_ANON: c_int = MAP_ANONYMOUS; -pub const MAP_ANONYMOUS: c_int = 0x00080000; - -pub const MCL_CURRENT: c_int = 0x000000001; -pub const MCL_FUTURE: c_int = 0x000000002; - -pub const _TIO_CBAUD: crate::tcflag_t = 15; -pub const CBAUD: crate::tcflag_t = _TIO_CBAUD; -pub const TAB1: crate::tcflag_t = 0x0800; -pub const TAB2: crate::tcflag_t = 0x1000; -pub const TAB3: crate::tcflag_t = 0x1800; -pub const CR1: crate::tcflag_t = 0x200; -pub const CR2: crate::tcflag_t = 0x400; -pub const CR3: crate::tcflag_t = 0x600; -pub const FF1: crate::tcflag_t = 0x8000; -pub const BS1: crate::tcflag_t = 0x2000; -pub const VT1: crate::tcflag_t = 0x4000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 17; -pub const IXON: crate::tcflag_t = 0x00000400; -pub const IXOFF: crate::tcflag_t = 0x00001000; -pub const ONLCR: crate::tcflag_t = 0x00000004; -pub const CSIZE: crate::tcflag_t = 0x00000030; -pub const CS6: crate::tcflag_t = 0x10; -pub const CS7: crate::tcflag_t = 0x20; -pub const CS8: crate::tcflag_t = 0x30; -pub const CSTOPB: crate::tcflag_t = 0x00000040; -pub const CREAD: crate::tcflag_t = 0x00000080; -pub const PARENB: crate::tcflag_t = 0x00000100; -pub const PARODD: crate::tcflag_t = 0x00000200; -pub const HUPCL: crate::tcflag_t = 0x00000400; -pub const CLOCAL: crate::tcflag_t = 0x00000800; -pub const ECHOKE: crate::tcflag_t = 0x00000800; -pub const ECHOE: crate::tcflag_t = 0x00000010; -pub const ECHOK: crate::tcflag_t = 0x00000020; -pub const ECHONL: crate::tcflag_t = 0x00000040; -pub const ECHOCTL: crate::tcflag_t = 0x00000200; -pub const ISIG: crate::tcflag_t = 0x00000001; -pub const ICANON: crate::tcflag_t = 0x00000002; -pub const NOFLSH: crate::tcflag_t = 0x00000080; -pub const OLCUC: crate::tcflag_t = 0x00000002; -pub const NLDLY: crate::tcflag_t = 0x00000100; -pub const CRDLY: crate::tcflag_t = 0x00000600; -pub const TABDLY: crate::tcflag_t = 0x00001800; -pub const BSDLY: crate::tcflag_t = 0x00002000; -pub const FFDLY: crate::tcflag_t = 0x00008000; -pub const VTDLY: crate::tcflag_t = 0x00004000; -pub const XTABS: crate::tcflag_t = 0x1800; - -pub const B0: crate::speed_t = 0; -pub const B50: crate::speed_t = 1; -pub const B75: crate::speed_t = 2; -pub const B110: crate::speed_t = 3; -pub const B134: crate::speed_t = 4; -pub const B150: crate::speed_t = 5; -pub const B200: crate::speed_t = 6; -pub const B300: crate::speed_t = 7; -pub const B600: crate::speed_t = 8; -pub const B1200: crate::speed_t = 9; -pub const B1800: crate::speed_t = 10; -pub const B2400: crate::speed_t = 11; -pub const B4800: crate::speed_t = 12; -pub const B9600: crate::speed_t = 13; -pub const B19200: crate::speed_t = 14; -pub const B38400: crate::speed_t = 15; -pub const EXTA: crate::speed_t = 14; -pub const EXTB: crate::speed_t = 15; -pub const B57600: crate::speed_t = 57600; -pub const B115200: crate::speed_t = 115200; - -pub const VEOL: usize = 5; -pub const VEOL2: usize = 6; -pub const VMIN: usize = 16; -pub const IEXTEN: crate::tcflag_t = 0x00008000; -pub const TOSTOP: crate::tcflag_t = 0x00000100; - -pub const TCSANOW: c_int = 0x0001; -pub const TCSADRAIN: c_int = 0x0002; -pub const TCSAFLUSH: c_int = 0x0004; - -pub const HW_MACHINE: c_int = 1; -pub const HW_MODEL: c_int = 2; -pub const HW_NCPU: c_int = 3; -pub const HW_BYTEORDER: c_int = 4; -pub const HW_PHYSMEM: c_int = 5; -pub const HW_USERMEM: c_int = 6; -pub const HW_PAGESIZE: c_int = 7; -pub const HW_DISKNAMES: c_int = 8; -pub const CTL_KERN: c_int = 1; -pub const CTL_VM: c_int = 2; -pub const CTL_VFS: c_int = 3; -pub const CTL_NET: c_int = 4; -pub const CTL_DEBUG: c_int = 5; -pub const CTL_HW: c_int = 6; -pub const CTL_MACHDEP: c_int = 7; -pub const CTL_USER: c_int = 8; - -pub const DAY_1: crate::nl_item = 8; -pub const DAY_2: crate::nl_item = 9; -pub const DAY_3: crate::nl_item = 10; -pub const DAY_4: crate::nl_item = 11; -pub const DAY_5: crate::nl_item = 12; -pub const DAY_6: crate::nl_item = 13; -pub const DAY_7: crate::nl_item = 14; - -pub const MON_1: crate::nl_item = 22; -pub const MON_2: crate::nl_item = 23; -pub const MON_3: crate::nl_item = 24; -pub const MON_4: crate::nl_item = 25; -pub const MON_5: crate::nl_item = 26; -pub const MON_6: crate::nl_item = 27; -pub const MON_7: crate::nl_item = 28; -pub const MON_8: crate::nl_item = 29; -pub const MON_9: crate::nl_item = 30; -pub const MON_10: crate::nl_item = 31; -pub const MON_11: crate::nl_item = 32; -pub const MON_12: crate::nl_item = 33; - -pub const ABDAY_1: crate::nl_item = 15; -pub const ABDAY_2: crate::nl_item = 16; -pub const ABDAY_3: crate::nl_item = 17; -pub const ABDAY_4: crate::nl_item = 18; -pub const ABDAY_5: crate::nl_item = 19; -pub const ABDAY_6: crate::nl_item = 20; -pub const ABDAY_7: crate::nl_item = 21; - -pub const ABMON_1: crate::nl_item = 34; -pub const ABMON_2: crate::nl_item = 35; -pub const ABMON_3: crate::nl_item = 36; -pub const ABMON_4: crate::nl_item = 37; -pub const ABMON_5: crate::nl_item = 38; -pub const ABMON_6: crate::nl_item = 39; -pub const ABMON_7: crate::nl_item = 40; -pub const ABMON_8: crate::nl_item = 41; -pub const ABMON_9: crate::nl_item = 42; -pub const ABMON_10: crate::nl_item = 43; -pub const ABMON_11: crate::nl_item = 44; -pub const ABMON_12: crate::nl_item = 45; - -pub const AF_CCITT: c_int = 10; -pub const AF_CHAOS: c_int = 5; -pub const AF_CNT: c_int = 21; -pub const AF_COIP: c_int = 20; -pub const AF_DATAKIT: c_int = 9; -pub const AF_DECnet: c_int = 12; -pub const AF_DLI: c_int = 13; -pub const AF_E164: c_int = 26; -pub const AF_ECMA: c_int = 8; -pub const AF_HYLINK: c_int = 15; -pub const AF_IMPLINK: c_int = 3; -pub const AF_ISO: c_int = 7; -pub const AF_LAT: c_int = 14; -pub const AF_LINK: c_int = 18; -pub const AF_OSI: c_int = 7; -pub const AF_PUP: c_int = 4; -pub const ALT_DIGITS: crate::nl_item = 50; -pub const AM_STR: crate::nl_item = 6; -pub const B76800: crate::speed_t = 76800; - -pub const BIOCFLUSH: c_int = 17000; -pub const BIOCGBLEN: c_int = 1074020966; -pub const BIOCGDLT: c_int = 1074020970; -pub const BIOCGHDRCMPLT: c_int = 1074020980; -pub const BIOCGRTIMEOUT: c_int = 1074807406; -pub const BIOCIMMEDIATE: c_int = -2147204496; -pub const BIOCPROMISC: c_int = 17001; -pub const BIOCSBLEN: c_int = -1073462682; -pub const BIOCSETF: c_int = -2146418073; -pub const BIOCSHDRCMPLT: c_int = -2147204491; -pub const BIOCSRTIMEOUT: c_int = -2146418067; -pub const BIOCVERSION: c_int = 1074020977; - -pub const BPF_ALIGNMENT: usize = size_of::(); -pub const CHAR_BIT: usize = 8; -pub const CODESET: crate::nl_item = 1; -pub const CRNCYSTR: crate::nl_item = 55; - -pub const D_FLAG_FILTER: c_int = 0x00000001; -pub const D_FLAG_STAT: c_int = 0x00000002; -pub const D_FLAG_STAT_FORM_MASK: c_int = 0x000000f0; -pub const D_FLAG_STAT_FORM_T32_2001: c_int = 0x00000010; -pub const D_FLAG_STAT_FORM_T32_2008: c_int = 0x00000020; -pub const D_FLAG_STAT_FORM_T64_2008: c_int = 0x00000030; -pub const D_FLAG_STAT_FORM_UNSET: c_int = 0x00000000; - -pub const D_FMT: crate::nl_item = 3; -pub const D_GETFLAG: c_int = 1; -pub const D_SETFLAG: c_int = 2; -pub const D_T_FMT: crate::nl_item = 2; -pub const ERA: crate::nl_item = 46; -pub const ERA_D_FMT: crate::nl_item = 47; -pub const ERA_D_T_FMT: crate::nl_item = 48; -pub const ERA_T_FMT: crate::nl_item = 49; -pub const RADIXCHAR: crate::nl_item = 51; -pub const THOUSEP: crate::nl_item = 52; -pub const YESEXPR: crate::nl_item = 53; -pub const NOEXPR: crate::nl_item = 54; -pub const F_GETOWN: c_int = 35; - -pub const FIONBIO: c_int = -2147195266; -pub const FIOASYNC: c_int = -2147195267; -pub const FIOCLEX: c_int = 26113; -pub const FIOGETOWN: c_int = 1074030203; -pub const FIONCLEX: c_int = 26114; -pub const FIONREAD: c_int = 1074030207; -pub const FIOSETOWN: c_int = -2147195268; - -pub const F_SETOWN: c_int = 36; -pub const IFF_LINK0: c_int = 0x00001000; -pub const IFF_LINK1: c_int = 0x00002000; -pub const IFF_LINK2: c_int = 0x00004000; -pub const IFF_OACTIVE: c_int = 0x00000400; -pub const IFF_SIMPLEX: c_int = 0x00000800; -pub const IHFLOW: tcflag_t = 0x00000001; -pub const IIDLE: tcflag_t = 0x00000008; -pub const IP_RECVDSTADDR: c_int = 7; -pub const IP_RECVIF: c_int = 20; -pub const IPTOS_ECN_NOTECT: u8 = 0x00; -pub const IUCLC: tcflag_t = 0x00000200; -pub const IUTF8: tcflag_t = 0x0004000; - -pub const KERN_ARGMAX: c_int = 8; -pub const KERN_BOOTTIME: c_int = 21; -pub const KERN_CLOCKRATE: c_int = 12; -pub const KERN_FILE: c_int = 15; -pub const KERN_HOSTID: c_int = 11; -pub const KERN_HOSTNAME: c_int = 10; -pub const KERN_JOB_CONTROL: c_int = 19; -pub const KERN_MAXFILES: c_int = 7; -pub const KERN_MAXPROC: c_int = 6; -pub const KERN_MAXVNODES: c_int = 5; -pub const KERN_NGROUPS: c_int = 18; -pub const KERN_OSRELEASE: c_int = 2; -pub const KERN_OSREV: c_int = 3; -pub const KERN_OSTYPE: c_int = 1; -pub const KERN_POSIX1: c_int = 17; -pub const KERN_PROC: c_int = 14; -pub const KERN_PROC_ALL: c_int = 0; -pub const KERN_PROC_PGRP: c_int = 2; -pub const KERN_PROC_PID: c_int = 1; -pub const KERN_PROC_RUID: c_int = 6; -pub const KERN_PROC_SESSION: c_int = 3; -pub const KERN_PROC_TTY: c_int = 4; -pub const KERN_PROC_UID: c_int = 5; -pub const KERN_PROF: c_int = 16; -pub const KERN_SAVED_IDS: c_int = 20; -pub const KERN_SECURELVL: c_int = 9; -pub const KERN_VERSION: c_int = 4; -pub const KERN_VNODE: c_int = 13; - -pub const LC_ALL: c_int = 63; -pub const LC_COLLATE: c_int = 1; -pub const LC_CTYPE: c_int = 2; -pub const LC_MESSAGES: c_int = 32; -pub const LC_MONETARY: c_int = 4; -pub const LC_NUMERIC: c_int = 8; -pub const LC_TIME: c_int = 16; - -pub const MAP_STACK: c_int = 0x00001000; -pub const MNT_NOEXEC: c_int = 0x02; -pub const MNT_NOSUID: c_int = 0x04; -pub const MNT_RDONLY: c_int = 0x01; - -pub const NET_RT_DUMP: c_int = 1; -pub const NET_RT_FLAGS: c_int = 2; -pub const OHFLOW: tcflag_t = 0x00000002; -pub const P_ALL: idtype_t = 0; -pub const PARSTK: tcflag_t = 0x00000004; -pub const PF_CCITT: c_int = 10; -pub const PF_CHAOS: c_int = 5; -pub const PF_CNT: c_int = 21; -pub const PF_COIP: c_int = 20; -pub const PF_DATAKIT: c_int = 9; -pub const PF_DECnet: c_int = 12; -pub const PF_DLI: c_int = 13; -pub const PF_ECMA: c_int = 8; -pub const PF_HYLINK: c_int = 15; -pub const PF_IMPLINK: c_int = 3; -pub const PF_ISO: c_int = 7; -pub const PF_LAT: c_int = 14; -pub const PF_LINK: c_int = 18; -pub const PF_OSI: c_int = 7; -pub const PF_PIP: c_int = 25; -pub const PF_PUP: c_int = 4; -pub const PF_RTIP: c_int = 22; -pub const PF_XTP: c_int = 19; -pub const PM_STR: crate::nl_item = 7; -pub const POSIX_MADV_DONTNEED: c_int = 4; -pub const POSIX_MADV_NORMAL: c_int = 0; -pub const POSIX_MADV_RANDOM: c_int = 2; -pub const POSIX_MADV_SEQUENTIAL: c_int = 1; -pub const POSIX_MADV_WILLNEED: c_int = 3; -pub const _POSIX_VDISABLE: c_int = 0; -pub const P_PGID: idtype_t = 2; -pub const P_PID: idtype_t = 1; -pub const PRIO_PGRP: c_int = 1; -pub const PRIO_PROCESS: c_int = 0; -pub const PRIO_USER: c_int = 2; -pub const pseudo_AF_PIP: c_int = 25; -pub const pseudo_AF_RTIP: c_int = 22; -pub const pseudo_AF_XTP: c_int = 19; -pub const REG_ASSERT: c_int = 15; -pub const REG_ATOI: c_int = 255; -pub const REG_BACKR: c_int = 0x400; -pub const REG_BASIC: c_int = 0x00; -pub const REG_DUMP: c_int = 0x80; -pub const REG_EMPTY: c_int = 14; -pub const REG_INVARG: c_int = 16; -pub const REG_ITOA: c_int = 0o400; -pub const REG_LARGE: c_int = 0x200; -pub const REG_NOSPEC: c_int = 0x10; -pub const REG_OK: c_int = 0; -pub const REG_PEND: c_int = 0x20; -pub const REG_TRACE: c_int = 0x100; - -pub const RLIMIT_AS: c_int = 6; -pub const RLIMIT_CORE: c_int = 4; -pub const RLIMIT_CPU: c_int = 0; -pub const RLIMIT_DATA: c_int = 2; -pub const RLIMIT_FSIZE: c_int = 1; -pub const RLIMIT_MEMLOCK: c_int = 7; -pub const RLIMIT_NOFILE: c_int = 5; -pub const RLIMIT_NPROC: c_int = 8; -pub const RLIMIT_RSS: c_int = 6; -pub const RLIMIT_STACK: c_int = 3; -pub const RLIMIT_VMEM: c_int = 6; -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIM_NLIMITS: c_int = 14; - -pub const SCHED_ADJTOHEAD: c_int = 5; -pub const SCHED_ADJTOTAIL: c_int = 6; -pub const SCHED_MAXPOLICY: c_int = 7; -pub const SCHED_SETPRIO: c_int = 7; -pub const SCHED_SPORADIC: c_int = 4; - -pub const SHM_ANON: *mut c_char = -1isize as *mut c_char; -pub const SIGCLD: c_int = SIGCHLD; -pub const SIGDEADLK: c_int = 7; -pub const SIGEMT: c_int = 7; -pub const SIGEV_NONE: c_int = 0; -pub const SIGEV_SIGNAL: c_int = 129; -pub const SIGEV_THREAD: c_int = 135; -pub const SO_USELOOPBACK: c_int = 0x0040; -pub const _SS_ALIGNSIZE: usize = size_of::(); -pub const _SS_MAXSIZE: usize = 128; -pub const _SS_PAD1SIZE: usize = _SS_ALIGNSIZE - 2; -pub const _SS_PAD2SIZE: usize = _SS_MAXSIZE - 2 - _SS_PAD1SIZE - _SS_ALIGNSIZE; -pub const TC_CPOSIX: tcflag_t = CLOCAL | CREAD | CSIZE | CSTOPB | HUPCL | PARENB | PARODD; -pub const TCGETS: c_int = 0x404c540d; -pub const TC_IPOSIX: tcflag_t = - BRKINT | ICRNL | IGNBRK | IGNPAR | INLCR | INPCK | ISTRIP | IXOFF | IXON | PARMRK; -pub const TC_LPOSIX: tcflag_t = - ECHO | ECHOE | ECHOK | ECHONL | ICANON | IEXTEN | ISIG | NOFLSH | TOSTOP; -pub const TC_OPOSIX: tcflag_t = OPOST; -pub const T_FMT_AMPM: crate::nl_item = 5; - -pub const TIOCCBRK: c_int = 29818; -pub const TIOCCDTR: c_int = 29816; -pub const TIOCDRAIN: c_int = 29790; -pub const TIOCEXCL: c_int = 29709; -pub const TIOCFLUSH: c_int = -2147191792; -pub const TIOCGETA: c_int = 1078752275; -pub const TIOCGPGRP: c_int = 1074033783; -pub const TIOCGWINSZ: c_int = 1074295912; -pub const TIOCMBIC: c_int = -2147191701; -pub const TIOCMBIS: c_int = -2147191700; -pub const TIOCMGET: c_int = 1074033770; -pub const TIOCMSET: c_int = -2147191699; -pub const TIOCNOTTY: c_int = 29809; -pub const TIOCNXCL: c_int = 29710; -pub const TIOCOUTQ: c_int = 1074033779; -pub const TIOCPKT: c_int = -2147191696; -pub const TIOCPKT_DATA: c_int = 0x00; -pub const TIOCPKT_DOSTOP: c_int = 0x20; -pub const TIOCPKT_FLUSHREAD: c_int = 0x01; -pub const TIOCPKT_FLUSHWRITE: c_int = 0x02; -pub const TIOCPKT_IOCTL: c_int = 0x40; -pub const TIOCPKT_NOSTOP: c_int = 0x10; -pub const TIOCPKT_START: c_int = 0x08; -pub const TIOCPKT_STOP: c_int = 0x04; -pub const TIOCSBRK: c_int = 29819; -pub const TIOCSCTTY: c_int = 29793; -pub const TIOCSDTR: c_int = 29817; -pub const TIOCSETA: c_int = -2142473196; -pub const TIOCSETAF: c_int = -2142473194; -pub const TIOCSETAW: c_int = -2142473195; -pub const TIOCSPGRP: c_int = -2147191690; -pub const TIOCSTART: c_int = 29806; -pub const TIOCSTI: c_int = -2147388302; -pub const TIOCSTOP: c_int = 29807; -pub const TIOCSWINSZ: c_int = -2146929561; - -pub const USER_CS_PATH: c_int = 1; -pub const USER_BC_BASE_MAX: c_int = 2; -pub const USER_BC_DIM_MAX: c_int = 3; -pub const USER_BC_SCALE_MAX: c_int = 4; -pub const USER_BC_STRING_MAX: c_int = 5; -pub const USER_COLL_WEIGHTS_MAX: c_int = 6; -pub const USER_EXPR_NEST_MAX: c_int = 7; -pub const USER_LINE_MAX: c_int = 8; -pub const USER_RE_DUP_MAX: c_int = 9; -pub const USER_POSIX2_VERSION: c_int = 10; -pub const USER_POSIX2_C_BIND: c_int = 11; -pub const USER_POSIX2_C_DEV: c_int = 12; -pub const USER_POSIX2_CHAR_TERM: c_int = 13; -pub const USER_POSIX2_FORT_DEV: c_int = 14; -pub const USER_POSIX2_FORT_RUN: c_int = 15; -pub const USER_POSIX2_LOCALEDEF: c_int = 16; -pub const USER_POSIX2_SW_DEV: c_int = 17; -pub const USER_POSIX2_UPE: c_int = 18; -pub const USER_STREAM_MAX: c_int = 19; -pub const USER_TZNAME_MAX: c_int = 20; - -pub const VDOWN: usize = 31; -pub const VINS: usize = 32; -pub const VDEL: usize = 33; -pub const VRUB: usize = 34; -pub const VCAN: usize = 35; -pub const VHOME: usize = 36; -pub const VEND: usize = 37; -pub const VSPARE3: usize = 38; -pub const VSPARE4: usize = 39; -pub const VSWTCH: usize = 7; -pub const VDSUSP: usize = 11; -pub const VFWD: usize = 18; -pub const VLOGIN: usize = 19; -pub const VPREFIX: usize = 20; -pub const VSUFFIX: usize = 24; -pub const VLEFT: usize = 28; -pub const VRIGHT: usize = 29; -pub const VUP: usize = 30; -pub const XCASE: tcflag_t = 0x00000004; - -pub const PTHREAD_BARRIER_SERIAL_THREAD: c_int = -1; -pub const PTHREAD_CREATE_JOINABLE: c_int = 0x00; -pub const PTHREAD_CREATE_DETACHED: c_int = 0x01; - -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 1; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 2; -pub const PTHREAD_MUTEX_NORMAL: c_int = 3; -pub const PTHREAD_STACK_MIN: size_t = 256; -pub const PTHREAD_MUTEX_DEFAULT: c_int = 0; -pub const PTHREAD_MUTEX_STALLED: c_int = 0x00; -pub const PTHREAD_MUTEX_ROBUST: c_int = 0x10; -pub const PTHREAD_PROCESS_PRIVATE: c_int = 0x00; -pub const PTHREAD_PROCESS_SHARED: c_int = 0x01; - -pub const PTHREAD_KEYS_MAX: usize = 128; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - __u: 0x80000000, - __owner: 0xffffffff, -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - __u: CLOCK_REALTIME as u32, - __owner: 0xfffffffb, -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - __active: 0, - __blockedwriters: 0, - __blockedreaders: 0, - __heavy: 0, - __lock: PTHREAD_MUTEX_INITIALIZER, - __rcond: PTHREAD_COND_INITIALIZER, - __wcond: PTHREAD_COND_INITIALIZER, - __owner: -2i32 as c_uint, - __spare: 0, -}; - -const fn _CMSG_ALIGN(len: usize) -> usize { - len + size_of::() - 1 & !(size_of::() - 1) -} - -const fn _ALIGN(p: usize, b: usize) -> usize { - (p + b - 1) & !(b - 1) -} - -f! { - pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr { - if (*mhdr).msg_controllen as usize >= size_of::() { - (*mhdr).msg_control as *mut cmsghdr - } else { - core::ptr::null_mut::() - } - } - - pub fn CMSG_NXTHDR(mhdr: *const crate::msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - let msg = _CMSG_ALIGN((*cmsg).cmsg_len as usize); - let next = cmsg as usize + msg + _CMSG_ALIGN(size_of::()); - if next > (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize { - core::ptr::null_mut::() - } else { - (cmsg as usize + msg) as *mut cmsghdr - } - } - - pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { - (cmsg as *mut c_uchar).offset(_CMSG_ALIGN(size_of::()) as isize) - } - - pub const fn CMSG_LEN(length: c_uint) -> c_uint { - _CMSG_ALIGN(size_of::()) as c_uint + length - } - - pub const fn CMSG_SPACE(length: c_uint) -> c_uint { - (_CMSG_ALIGN(size_of::()) + _CMSG_ALIGN(length as usize)) as c_uint - } - - pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] &= !(1 << (fd % size)); - return; - } - - pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0; - } - - pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] |= 1 << (fd % size); - return; - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - for slot in (*set).fds_bits.iter_mut() { - *slot = 0; - } - } - - pub fn _DEXTRA_FIRST(_d: *const dirent) -> *mut crate::dirent_extra { - let _f = &((*(_d)).d_name) as *const _; - let _s = _d as usize; - - _ALIGN(_s + _f as usize - _s + (*_d).d_namelen as usize + 1, 8) as *mut crate::dirent_extra - } - - pub fn _DEXTRA_VALID(_x: *const crate::dirent_extra, _d: *const dirent) -> bool { - let sz = _x as usize - _d as usize + size_of::(); - let rsz = (*_d).d_reclen as usize; - - if sz > rsz || sz + (*_x).d_datalen as usize > rsz { - false - } else { - true - } - } - - pub fn _DEXTRA_NEXT(_x: *const crate::dirent_extra) -> *mut crate::dirent_extra { - _ALIGN( - _x as usize + size_of::() + (*_x).d_datalen as usize, - 8, - ) as *mut crate::dirent_extra - } - - pub fn SOCKCREDSIZE(ngrps: usize) -> usize { - let ngrps = if ngrps > 0 { ngrps - 1 } else { 0 }; - size_of::() + size_of::() * ngrps - } -} - -safe_f! { - pub const fn WIFSTOPPED(status: c_int) -> bool { - (status & 0xff) == 0x7f - } - - pub const fn WSTOPSIG(status: c_int) -> c_int { - (status >> 8) & 0xff - } - - pub const fn WIFCONTINUED(status: c_int) -> bool { - status == 0xffff - } - - pub const fn WIFSIGNALED(status: c_int) -> bool { - ((status & 0x7f) + 1) as i8 >= 2 - } - - pub const fn WTERMSIG(status: c_int) -> c_int { - status & 0x7f - } - - pub const fn WIFEXITED(status: c_int) -> bool { - (status & 0x7f) == 0 - } - - pub const fn WEXITSTATUS(status: c_int) -> c_int { - (status >> 8) & 0xff - } - - pub const fn WCOREDUMP(status: c_int) -> bool { - (status & 0x80) != 0 - } - - pub const fn IPTOS_ECN(x: u8) -> u8 { - x & crate::IPTOS_ECN_MASK - } - - pub const fn makedev(major: c_uint, minor: c_uint) -> crate::dev_t { - ((major << 10) | (minor)) as crate::dev_t - } - - pub const fn major(dev: crate::dev_t) -> c_uint { - ((dev as c_uint) >> 10) & 0x3f - } - - pub const fn minor(dev: crate::dev_t) -> c_uint { - (dev as c_uint) & 0x3ff - } -} - -cfg_if! { - if #[cfg(not(target_env = "nto71_iosock"))] { - extern "C" { - pub fn sendmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: c_uint, - flags: c_uint, - ) -> c_int; - pub fn recvmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: c_uint, - flags: c_uint, - timeout: *mut crate::timespec, - ) -> c_int; - } - } else { - extern "C" { - pub fn sendmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: size_t, - flags: c_int, - ) -> ssize_t; - pub fn recvmmsg( - sockfd: c_int, - msgvec: *mut crate::mmsghdr, - vlen: size_t, - flags: c_int, - timeout: *const crate::timespec, - ) -> ssize_t; - } - } -} - -// Network related functions are provided by libsocket and regex -// functions are provided by libregex. -// In QNX <=7.0, libregex functions were included in libc itself. -#[link(name = "socket")] -#[cfg_attr(not(target_env = "nto70"), link(name = "regex"))] -extern "C" { - pub fn sem_destroy(sem: *mut sem_t) -> c_int; - pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; - pub fn fdatasync(fd: c_int) -> c_int; - pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; - pub fn setpriority(which: c_int, who: crate::id_t, prio: c_int) -> c_int; - pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; - pub fn mknodat(__fd: c_int, pathname: *const c_char, mode: mode_t, dev: crate::dev_t) -> c_int; - - pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn clock_settime(clk_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; - pub fn clock_getcpuclockid(pid: crate::pid_t, clk_id: *mut crate::clockid_t) -> c_int; - - pub fn pthread_attr_getstack( - attr: *const crate::pthread_attr_t, - stackaddr: *mut *mut c_void, - stacksize: *mut size_t, - ) -> c_int; - pub fn memalign(align: size_t, size: size_t) -> *mut c_void; - pub fn setgroups(ngroups: c_int, ptr: *const crate::gid_t) -> c_int; - - pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; - pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; - pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; - - pub fn utimensat( - dirfd: c_int, - path: *const c_char, - times: *const crate::timespec, - flag: c_int, - ) -> c_int; - - pub fn pthread_condattr_getclock( - attr: *const pthread_condattr_t, - clock_id: *mut clockid_t, - ) -> c_int; - pub fn pthread_condattr_setclock( - attr: *mut pthread_condattr_t, - clock_id: crate::clockid_t, - ) -> c_int; - pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: c_int) -> c_int; - pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: c_int) -> c_int; - pub fn pthread_rwlockattr_getpshared( - attr: *const pthread_rwlockattr_t, - val: *mut c_int, - ) -> c_int; - pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, val: c_int) -> c_int; - pub fn ptsname_r(fd: c_int, buf: *mut c_char, buflen: size_t) -> *mut c_char; - pub fn clearenv() -> c_int; - pub fn waitid( - idtype: idtype_t, - id: id_t, - infop: *mut crate::siginfo_t, - options: c_int, - ) -> c_int; - pub fn wait4( - pid: crate::pid_t, - status: *mut c_int, - options: c_int, - rusage: *mut crate::rusage, - ) -> crate::pid_t; - - // DIFF(main): changed to `*const *mut` in e77f551de9 - pub fn execvpe( - file: *const c_char, - argv: *const *const c_char, - envp: *const *const c_char, - ) -> c_int; - - pub fn getifaddrs(ifap: *mut *mut crate::ifaddrs) -> c_int; - pub fn freeifaddrs(ifa: *mut crate::ifaddrs); - pub fn bind( - socket: c_int, - address: *const crate::sockaddr, - address_len: crate::socklen_t, - ) -> c_int; - - pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - - pub fn sendmsg(fd: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; - pub fn recvmsg(fd: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; - pub fn openpty( - amaster: *mut c_int, - aslave: *mut c_int, - name: *mut c_char, - termp: *mut termios, - winp: *mut crate::winsize, - ) -> c_int; - pub fn forkpty( - amaster: *mut c_int, - name: *mut c_char, - termp: *mut termios, - winp: *mut crate::winsize, - ) -> crate::pid_t; - pub fn login_tty(fd: c_int) -> c_int; - - pub fn uname(buf: *mut crate::utsname) -> c_int; - - pub fn getpeereid(socket: c_int, euid: *mut crate::uid_t, egid: *mut crate::gid_t) -> c_int; - - pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - - pub fn abs(i: c_int) -> c_int; - pub fn labs(i: c_long) -> c_long; - pub fn rand() -> c_int; - pub fn srand(seed: c_uint); - - pub fn setpwent(); - pub fn endpwent(); - pub fn getpwent() -> *mut passwd; - pub fn setgrent(); - pub fn endgrent(); - pub fn getgrent() -> *mut crate::group; - pub fn setspent(); - pub fn endspent(); - - pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; - - pub fn ftok(pathname: *const c_char, proj_id: c_int) -> crate::key_t; - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - - pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; - pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; - pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; - pub fn sigtimedwait( - set: *const sigset_t, - info: *mut siginfo_t, - timeout: *const crate::timespec, - ) -> c_int; - pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> c_int; - pub fn pthread_setschedprio(native: crate::pthread_t, priority: c_int) -> c_int; - - pub fn if_nameindex() -> *mut if_nameindex; - pub fn if_freenameindex(ptr: *mut if_nameindex); - - pub fn glob( - pattern: *const c_char, - flags: c_int, - errfunc: Option c_int>, - pglob: *mut crate::glob_t, - ) -> c_int; - pub fn globfree(pglob: *mut crate::glob_t); - - pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - - pub fn shm_unlink(name: *const c_char) -> c_int; - - pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); - - pub fn telldir(dirp: *mut crate::DIR) -> c_long; - - pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; - - pub fn recvfrom( - socket: c_int, - buf: *mut c_void, - len: size_t, - flags: c_int, - addr: *mut crate::sockaddr, - addrlen: *mut crate::socklen_t, - ) -> ssize_t; - pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; - - pub fn getdomainname(name: *mut c_char, len: size_t) -> c_int; - pub fn setdomainname(name: *const c_char, len: size_t) -> c_int; - pub fn sync(); - pub fn pthread_getschedparam( - native: crate::pthread_t, - policy: *mut c_int, - param: *mut crate::sched_param, - ) -> c_int; - pub fn umount(target: *const c_char, flags: c_int) -> c_int; - pub fn sched_get_priority_max(policy: c_int) -> c_int; - pub fn settimeofday(tv: *const crate::timeval, tz: *const c_void) -> c_int; - pub fn sched_rr_get_interval(pid: crate::pid_t, tp: *mut crate::timespec) -> c_int; - pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; - pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; - pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; - pub fn mount( - special_device: *const c_char, - mount_directory: *const c_char, - flags: c_int, - mount_type: *const c_char, - mount_data: *const c_void, - mount_datalen: c_int, - ) -> c_int; - pub fn sched_getparam(pid: crate::pid_t, param: *mut crate::sched_param) -> c_int; - pub fn pthread_mutex_consistent(mutex: *mut pthread_mutex_t) -> c_int; - pub fn pthread_mutex_timedlock( - lock: *mut pthread_mutex_t, - abstime: *const crate::timespec, - ) -> c_int; - pub fn pthread_spin_init(lock: *mut crate::pthread_spinlock_t, pshared: c_int) -> c_int; - pub fn pthread_spin_destroy(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_spin_lock(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_spin_trylock(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_spin_unlock(lock: *mut crate::pthread_spinlock_t) -> c_int; - pub fn pthread_barrierattr_init(__attr: *mut crate::pthread_barrierattr_t) -> c_int; - pub fn pthread_barrierattr_destroy(__attr: *mut crate::pthread_barrierattr_t) -> c_int; - pub fn pthread_barrierattr_getpshared( - __attr: *const crate::pthread_barrierattr_t, - __pshared: *mut c_int, - ) -> c_int; - pub fn pthread_barrierattr_setpshared( - __attr: *mut crate::pthread_barrierattr_t, - __pshared: c_int, - ) -> c_int; - pub fn pthread_barrier_init( - __barrier: *mut crate::pthread_barrier_t, - __attr: *const crate::pthread_barrierattr_t, - __count: c_uint, - ) -> c_int; - pub fn pthread_barrier_destroy(__barrier: *mut crate::pthread_barrier_t) -> c_int; - pub fn pthread_barrier_wait(__barrier: *mut crate::pthread_barrier_t) -> c_int; - - pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; - pub fn clock_nanosleep( - clk_id: crate::clockid_t, - flags: c_int, - rqtp: *const crate::timespec, - rmtp: *mut crate::timespec, - ) -> c_int; - pub fn pthread_attr_getguardsize( - attr: *const crate::pthread_attr_t, - guardsize: *mut size_t, - ) -> c_int; - pub fn pthread_attr_setguardsize(attr: *mut crate::pthread_attr_t, guardsize: size_t) -> c_int; - pub fn sethostname(name: *const c_char, len: size_t) -> c_int; - pub fn sched_get_priority_min(policy: c_int) -> c_int; - pub fn pthread_condattr_getpshared( - attr: *const pthread_condattr_t, - pshared: *mut c_int, - ) -> c_int; - pub fn pthread_setschedparam( - native: crate::pthread_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - pub fn sched_setscheduler( - pid: crate::pid_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; - pub fn getgrgid_r( - gid: crate::gid_t, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn sem_close(sem: *mut sem_t) -> c_int; - pub fn getdtablesize() -> c_int; - pub fn getgrnam_r( - name: *const c_char, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn initgroups(user: *const c_char, group: crate::gid_t) -> c_int; - pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; - pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; - pub fn getgrnam(name: *const c_char) -> *mut crate::group; - pub fn pthread_cancel(thread: crate::pthread_t) -> c_int; - pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; - pub fn sem_unlink(name: *const c_char) -> c_int; - pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; - pub fn getpwnam_r( - name: *const c_char, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - pub fn getpwuid_r( - uid: crate::uid_t, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; - pub fn pthread_atfork( - prepare: Option, - parent: Option, - child: Option, - ) -> c_int; - pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; - pub fn getgrouplist( - user: *const c_char, - group: crate::gid_t, - groups: *mut crate::gid_t, - ngroups: *mut c_int, - ) -> c_int; - pub fn pthread_mutexattr_getpshared( - attr: *const pthread_mutexattr_t, - pshared: *mut c_int, - ) -> c_int; - pub fn pthread_mutexattr_getrobust( - attr: *const pthread_mutexattr_t, - robustness: *mut c_int, - ) -> c_int; - pub fn pthread_mutexattr_setrobust(attr: *mut pthread_mutexattr_t, robustness: c_int) -> c_int; - pub fn pthread_create( - native: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - f: extern "C" fn(*mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - pub fn getitimer(which: c_int, curr_value: *mut crate::itimerval) -> c_int; - pub fn setitimer( - which: c_int, - value: *const crate::itimerval, - ovalue: *mut crate::itimerval, - ) -> c_int; - pub fn posix_spawn( - pid: *mut crate::pid_t, - path: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnp( - pid: *mut crate::pid_t, - file: *const c_char, - file_actions: *const crate::posix_spawn_file_actions_t, - attrp: *const crate::posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_getsigdefault( - attr: *const posix_spawnattr_t, - default: *mut crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigdefault( - attr: *mut posix_spawnattr_t, - default: *const crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getsigmask( - attr: *const posix_spawnattr_t, - default: *mut crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigmask( - attr: *mut posix_spawnattr_t, - default: *const crate::sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; - pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; - pub fn posix_spawnattr_getpgroup( - attr: *const posix_spawnattr_t, - flags: *mut crate::pid_t, - ) -> c_int; - pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, flags: crate::pid_t) -> c_int; - pub fn posix_spawnattr_getschedpolicy( - attr: *const posix_spawnattr_t, - flags: *mut c_int, - ) -> c_int; - pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, flags: c_int) -> c_int; - pub fn posix_spawnattr_getschedparam( - attr: *const posix_spawnattr_t, - param: *mut crate::sched_param, - ) -> c_int; - pub fn posix_spawnattr_setschedparam( - attr: *mut posix_spawnattr_t, - param: *const crate::sched_param, - ) -> c_int; - - pub fn posix_spawn_file_actions_init(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_destroy(actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_addopen( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - path: *const c_char, - oflag: c_int, - mode: mode_t, - ) -> c_int; - pub fn posix_spawn_file_actions_addclose( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - ) -> c_int; - pub fn posix_spawn_file_actions_adddup2( - actions: *mut posix_spawn_file_actions_t, - fd: c_int, - newfd: c_int, - ) -> c_int; - pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; - pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; - pub fn inotify_rm_watch(fd: c_int, wd: c_int) -> c_int; - pub fn inotify_init() -> c_int; - pub fn inotify_add_watch(fd: c_int, path: *const c_char, mask: u32) -> c_int; - - pub fn gettid() -> crate::pid_t; - - pub fn pthread_getcpuclockid(thread: crate::pthread_t, clk_id: *mut crate::clockid_t) -> c_int; - - pub fn getnameinfo( - sa: *const crate::sockaddr, - salen: crate::socklen_t, - host: *mut c_char, - hostlen: crate::socklen_t, - serv: *mut c_char, - servlen: crate::socklen_t, - flags: c_int, - ) -> c_int; - - pub fn mallopt(param: c_int, value: i64) -> c_int; - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; - - pub fn ctermid(s: *mut c_char) -> *mut c_char; - pub fn ioctl(fd: c_int, request: c_int, ...) -> c_int; - - pub fn mallinfo() -> crate::mallinfo; - pub fn getpwent_r( - pwd: *mut crate::passwd, - buf: *mut c_char, - __bufsize: c_int, - __result: *mut *mut crate::passwd, - ) -> c_int; - pub fn pthread_getname_np(thread: crate::pthread_t, name: *mut c_char, len: c_int) -> c_int; - pub fn pthread_setname_np(thread: crate::pthread_t, name: *const c_char) -> c_int; - - pub fn sysctl( - _: *const c_int, - _: c_uint, - _: *mut c_void, - _: *mut size_t, - _: *const c_void, - _: size_t, - ) -> c_int; - - pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; - pub fn setrlimit(resource: c_int, rlp: *const crate::rlimit) -> c_int; - - pub fn lio_listio( - __mode: c_int, - __list: *const *mut aiocb, - __nent: c_int, - __sig: *mut sigevent, - ) -> c_int; - - pub fn dl_iterate_phdr( - callback: Option< - unsafe extern "C" fn( - // The original .h file declares this as *const, but for consistency with other platforms, - // changing this to *mut to make it easier to use. - // Maybe in v0.3 all platforms should use this as a *const. - info: *mut dl_phdr_info, - size: size_t, - data: *mut c_void, - ) -> c_int, - >, - data: *mut c_void, - ) -> c_int; - - pub fn memset_s(s: *mut c_void, smax: size_t, c: c_int, n: size_t) -> c_int; - - pub fn regcomp(__preg: *mut crate::regex_t, __pattern: *const c_char, __cflags: c_int) - -> c_int; - pub fn regexec( - __preg: *const crate::regex_t, - __str: *const c_char, - __nmatch: size_t, - __pmatch: *mut crate::regmatch_t, - __eflags: c_int, - ) -> c_int; - pub fn regerror( - __errcode: c_int, - __preg: *const crate::regex_t, - __errbuf: *mut c_char, - __errbuf_size: size_t, - ) -> size_t; - pub fn regfree(__preg: *mut crate::regex_t); - pub fn dirfd(__dirp: *mut crate::DIR) -> c_int; - pub fn dircntl(dir: *mut crate::DIR, cmd: c_int, ...) -> c_int; - - pub fn aio_cancel(__fd: c_int, __aiocbp: *mut crate::aiocb) -> c_int; - pub fn aio_error(__aiocbp: *const crate::aiocb) -> c_int; - pub fn aio_fsync(__operation: c_int, __aiocbp: *mut crate::aiocb) -> c_int; - pub fn aio_read(__aiocbp: *mut crate::aiocb) -> c_int; - pub fn aio_return(__aiocpb: *mut crate::aiocb) -> ssize_t; - pub fn aio_suspend( - __list: *const *const crate::aiocb, - __nent: c_int, - __timeout: *const crate::timespec, - ) -> c_int; - pub fn aio_write(__aiocpb: *mut crate::aiocb) -> c_int; - - pub fn mq_close(__mqdes: crate::mqd_t) -> c_int; - pub fn mq_getattr(__mqdes: crate::mqd_t, __mqstat: *mut crate::mq_attr) -> c_int; - pub fn mq_notify(__mqdes: crate::mqd_t, __notification: *const crate::sigevent) -> c_int; - pub fn mq_open(__name: *const c_char, __oflag: c_int, ...) -> crate::mqd_t; - pub fn mq_receive( - __mqdes: crate::mqd_t, - __msg_ptr: *mut c_char, - __msg_len: size_t, - __msg_prio: *mut c_uint, - ) -> ssize_t; - pub fn mq_send( - __mqdes: crate::mqd_t, - __msg_ptr: *const c_char, - __msg_len: size_t, - __msg_prio: c_uint, - ) -> c_int; - pub fn mq_setattr( - __mqdes: crate::mqd_t, - __mqstat: *const mq_attr, - __omqstat: *mut mq_attr, - ) -> c_int; - pub fn mq_timedreceive( - __mqdes: crate::mqd_t, - __msg_ptr: *mut c_char, - __msg_len: size_t, - __msg_prio: *mut c_uint, - __abs_timeout: *const crate::timespec, - ) -> ssize_t; - pub fn mq_timedsend( - __mqdes: crate::mqd_t, - __msg_ptr: *const c_char, - __msg_len: size_t, - __msg_prio: c_uint, - __abs_timeout: *const crate::timespec, - ) -> c_int; - pub fn mq_unlink(__name: *const c_char) -> c_int; - pub fn __get_errno_ptr() -> *mut c_int; - - // System page, see https://www.qnx.com/developers/docs/7.1#com.qnx.doc.neutrino.building/topic/syspage/syspage_about.html - pub static mut _syspage_ptr: *mut syspage_entry; - - // Function on the stack after a call to pthread_create(). This is used - // as a sentinel to work around an infitnite loop in the unwinding code. - pub fn __my_thread_exit(value_ptr: *mut *const c_void); -} - -// Models the implementation in stdlib.h. Ctest will fail if trying to use the -// default symbol from libc -pub unsafe fn atexit(cb: extern "C" fn()) -> c_int { - extern "C" { - static __dso_handle: *mut c_void; - pub fn __cxa_atexit(cb: extern "C" fn(), __arg: *mut c_void, __dso: *mut c_void) -> c_int; - } - __cxa_atexit(cb, 0 as *mut c_void, __dso_handle) -} - -impl siginfo_t { - pub unsafe fn si_addr(&self) -> *mut c_void { - #[repr(C)] - struct siginfo_si_addr { - _pad: [u8; 32], - si_addr: *mut c_void, - } - (*(self as *const siginfo_t as *const siginfo_si_addr)).si_addr - } - - pub unsafe fn si_value(&self) -> crate::sigval { - #[repr(C)] - struct siginfo_si_value { - _pad: [u8; 32], - si_value: crate::sigval, - } - (*(self as *const siginfo_t as *const siginfo_si_value)).si_value - } - - pub unsafe fn si_pid(&self) -> crate::pid_t { - #[repr(C)] - struct siginfo_si_pid { - _pad: [u8; 16], - si_pid: crate::pid_t, - } - (*(self as *const siginfo_t as *const siginfo_si_pid)).si_pid - } - - pub unsafe fn si_uid(&self) -> crate::uid_t { - #[repr(C)] - struct siginfo_si_uid { - _pad: [u8; 24], - si_uid: crate::uid_t, - } - (*(self as *const siginfo_t as *const siginfo_si_uid)).si_uid - } - - pub unsafe fn si_status(&self) -> c_int { - #[repr(C)] - struct siginfo_si_status { - _pad: [u8; 28], - si_status: c_int, - } - (*(self as *const siginfo_t as *const siginfo_si_status)).si_status - } -} - -cfg_if! { - if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } else if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else { - panic!("Unsupported arch"); - } -} - -mod neutrino; -pub use self::neutrino::*; diff --git a/vendor/libc/src/unix/nto/neutrino.rs b/vendor/libc/src/unix/nto/neutrino.rs deleted file mode 100644 index 8aac4680097850..00000000000000 --- a/vendor/libc/src/unix/nto/neutrino.rs +++ /dev/null @@ -1,1270 +0,0 @@ -use crate::prelude::*; - -pub type nto_job_t = crate::sync_t; - -s! { - pub struct syspage_entry_info { - pub entry_off: u16, - pub entry_size: u16, - } - pub struct syspage_array_info { - entry_off: u16, - entry_size: u16, - element_size: u16, - } - - pub struct intrspin { - pub value: c_uint, // volatile - } - - pub struct iov_t { - pub iov_base: *mut c_void, // union - pub iov_len: size_t, - } - - pub struct _itimer { - pub nsec: u64, - pub interval_nsec: u64, - } - - pub struct _msg_info64 { - pub nd: u32, - pub srcnd: u32, - pub pid: crate::pid_t, - pub tid: i32, - pub chid: i32, - pub scoid: i32, - pub coid: i32, - pub priority: i16, - pub flags: i16, - pub msglen: isize, - pub srcmsglen: isize, - pub dstmsglen: isize, - pub type_id: u32, - reserved: u32, - } - - pub struct _cred_info { - pub ruid: crate::uid_t, - pub euid: crate::uid_t, - pub suid: crate::uid_t, - pub rgid: crate::gid_t, - pub egid: crate::gid_t, - pub sgid: crate::gid_t, - pub ngroups: u32, - pub grouplist: [crate::gid_t; 8], - } - - pub struct _client_info { - pub nd: u32, - pub pid: crate::pid_t, - pub sid: crate::pid_t, - pub flags: u32, - pub cred: crate::_cred_info, - } - - pub struct _client_able { - pub ability: u32, - pub flags: u32, - pub range_lo: u64, - pub range_hi: u64, - } - - pub struct nto_channel_config { - pub event: crate::sigevent, - pub num_pulses: c_uint, - pub rearm_threshold: c_uint, - pub options: c_uint, - reserved: [c_uint; 3], - } - - // TODO: The following structures are defined in a header file which doesn't - // appear as part of the default headers found in a standard installation - // of Neutrino 7.1 SDP. Commented out for now. - //pub struct _asyncmsg_put_header { - // pub err: c_int, - // pub iov: *mut crate::iov_t, - // pub parts: c_int, - // pub handle: c_uint, - // pub cb: Option< - // unsafe extern "C" fn( - // err: c_int, - // buf: *mut c_void, - // handle: c_uint, - // ) -> c_int>, - // pub put_hdr_flags: c_uint, - //} - - //pub struct _asyncmsg_connection_attr { - // pub call_back: Option< - // unsafe extern "C" fn( - // err: c_int, - // buff: *mut c_void, - // handle: c_uint, - // ) -> c_int>, - // pub buffer_size: size_t, - // pub max_num_buffer: c_uint, - // pub trigger_num_msg: c_uint, - // pub trigger_time: crate::_itimer, - // reserve: c_uint, - //} - - //pub struct _asyncmsg_connection_descriptor { - // pub flags: c_uint, - // pub sendq_size: c_uint, - // pub sendq_head: c_uint, - // pub sendq_tail: c_uint, - // pub sendq_free: c_uint, - // pub err: c_int, - // pub ev: crate::sigevent, - // pub num_curmsg: c_uint, - // pub ttimer: crate::timer_t, - // pub block_con: crate::pthread_cond_t, - // pub mu: crate::pthread_mutex_t, - // reserved: c_uint, - // pub attr: crate::_asyncmsg_connection_attr, - // pub reserves: [c_uint; 3], - // pub sendq: [crate::_asyncmsg_put_header; 1], // flexarray - //} - - pub struct __c_anonymous_struct_ev { - pub event: crate::sigevent, - pub coid: c_int, - } - - pub struct _channel_connect_attr { - // union - pub ev: crate::__c_anonymous_struct_ev, - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct _sighandler_info { - pub siginfo: crate::siginfo_t, - pub handler: Option, - pub context: *mut c_void, - } - - pub struct __c_anonymous_struct_time { - pub length: c_uint, - pub scale: c_uint, - } - - pub struct _idle_hook { - pub hook_size: c_uint, - pub cmd: c_uint, - pub mode: c_uint, - pub latency: c_uint, - pub next_fire: u64, - pub curr_time: u64, - pub tod_adjust: u64, - pub resp: c_uint, - pub time: __c_anonymous_struct_time, - pub trigger: crate::sigevent, - pub intrs: *mut c_uint, - pub block_stack_size: c_uint, - } - - pub struct _clockadjust { - pub tick_count: u32, - pub tick_nsec_inc: i32, - } - - pub struct qtime_entry { - pub cycles_per_sec: u64, - pub nsec_tod_adjust: u64, // volatile - pub nsec: u64, // volatile - pub nsec_inc: u32, - pub boot_time: u32, - pub adjust: _clockadjust, - pub timer_rate: u32, - pub timer_scale: i32, - pub timer_load: u32, - pub intr: i32, - pub epoch: u32, - pub flags: u32, - pub rr_interval_mul: u32, - pub timer_load_hi: u32, - pub nsec_stable: u64, // volatile - pub timer_load_max: u64, - pub timer_prog_time: u32, - spare: [u32; 7], - } - - pub struct _sched_info { - pub priority_min: c_int, - pub priority_max: c_int, - pub interval: u64, - pub priority_priv: c_int, - reserved: [c_int; 11], - } - - pub struct _timer_info { - pub itime: crate::_itimer, - pub otime: crate::_itimer, - pub flags: u32, - pub tid: i32, - pub notify: i32, - pub clockid: crate::clockid_t, - pub overruns: u32, - pub event: crate::sigevent, // union - } - - pub struct _clockperiod { - pub nsec: u32, - pub fract: i32, - } -} - -s_no_extra_traits! { - #[repr(align(8))] - pub struct syspage_entry { - pub size: u16, - pub total_size: u16, - pub type_: u16, - pub num_cpu: u16, - pub system_private: syspage_entry_info, - pub old_asinfo: syspage_entry_info, - pub __mangle_name_to_cause_compilation_errs_meminfo: syspage_entry_info, - pub hwinfo: syspage_entry_info, - pub old_cpuinfo: syspage_entry_info, - pub old_cacheattr: syspage_entry_info, - pub qtime: syspage_entry_info, - pub callout: syspage_entry_info, - pub callin: syspage_entry_info, - pub typed_strings: syspage_entry_info, - pub strings: syspage_entry_info, - pub old_intrinfo: syspage_entry_info, - pub smp: syspage_entry_info, - pub pminfo: syspage_entry_info, - pub old_mdriver: syspage_entry_info, - spare0: [u32; 1], - __reserved: [u8; 160], // anonymous union with architecture dependent structs - pub new_asinfo: syspage_array_info, - pub new_cpuinfo: syspage_array_info, - pub new_cacheattr: syspage_array_info, - pub new_intrinfo: syspage_array_info, - pub new_mdriver: syspage_array_info, - } -} - -pub const SYSMGR_PID: u32 = 1; -pub const SYSMGR_CHID: u32 = 1; -pub const SYSMGR_COID: u32 = _NTO_SIDE_CHANNEL; -pub const SYSMGR_HANDLE: u32 = 0; - -pub const STATE_DEAD: c_int = 0x00; -pub const STATE_RUNNING: c_int = 0x01; -pub const STATE_READY: c_int = 0x02; -pub const STATE_STOPPED: c_int = 0x03; -pub const STATE_SEND: c_int = 0x04; -pub const STATE_RECEIVE: c_int = 0x05; -pub const STATE_REPLY: c_int = 0x06; -pub const STATE_STACK: c_int = 0x07; -pub const STATE_WAITTHREAD: c_int = 0x08; -pub const STATE_WAITPAGE: c_int = 0x09; -pub const STATE_SIGSUSPEND: c_int = 0x0a; -pub const STATE_SIGWAITINFO: c_int = 0x0b; -pub const STATE_NANOSLEEP: c_int = 0x0c; -pub const STATE_MUTEX: c_int = 0x0d; -pub const STATE_CONDVAR: c_int = 0x0e; -pub const STATE_JOIN: c_int = 0x0f; -pub const STATE_INTR: c_int = 0x10; -pub const STATE_SEM: c_int = 0x11; -pub const STATE_WAITCTX: c_int = 0x12; -pub const STATE_NET_SEND: c_int = 0x13; -pub const STATE_NET_REPLY: c_int = 0x14; -pub const STATE_MAX: c_int = 0x18; - -pub const _NTO_TIMEOUT_RECEIVE: i32 = 1 << STATE_RECEIVE; -pub const _NTO_TIMEOUT_SEND: i32 = 1 << STATE_SEND; -pub const _NTO_TIMEOUT_REPLY: i32 = 1 << STATE_REPLY; -pub const _NTO_TIMEOUT_SIGSUSPEND: i32 = 1 << STATE_SIGSUSPEND; -pub const _NTO_TIMEOUT_SIGWAITINFO: i32 = 1 << STATE_SIGWAITINFO; -pub const _NTO_TIMEOUT_NANOSLEEP: i32 = 1 << STATE_NANOSLEEP; -pub const _NTO_TIMEOUT_MUTEX: i32 = 1 << STATE_MUTEX; -pub const _NTO_TIMEOUT_CONDVAR: i32 = 1 << STATE_CONDVAR; -pub const _NTO_TIMEOUT_JOIN: i32 = 1 << STATE_JOIN; -pub const _NTO_TIMEOUT_INTR: i32 = 1 << STATE_INTR; -pub const _NTO_TIMEOUT_SEM: i32 = 1 << STATE_SEM; - -pub const _NTO_MI_ENDIAN_BIG: u32 = 1; -pub const _NTO_MI_ENDIAN_DIFF: u32 = 2; -pub const _NTO_MI_UNBLOCK_REQ: u32 = 256; -pub const _NTO_MI_NET_CRED_DIRTY: u32 = 512; -pub const _NTO_MI_CONSTRAINED: u32 = 1024; -pub const _NTO_MI_CHROOT: u32 = 2048; -pub const _NTO_MI_BITS_64: u32 = 4096; -pub const _NTO_MI_BITS_DIFF: u32 = 8192; -pub const _NTO_MI_SANDBOX: u32 = 16384; - -pub const _NTO_CI_ENDIAN_BIG: u32 = 1; -pub const _NTO_CI_BKGND_PGRP: u32 = 4; -pub const _NTO_CI_ORPHAN_PGRP: u32 = 8; -pub const _NTO_CI_STOPPED: u32 = 128; -pub const _NTO_CI_UNABLE: u32 = 256; -pub const _NTO_CI_TYPE_ID: u32 = 512; -pub const _NTO_CI_CHROOT: u32 = 2048; -pub const _NTO_CI_BITS_64: u32 = 4096; -pub const _NTO_CI_SANDBOX: u32 = 16384; -pub const _NTO_CI_LOADER: u32 = 32768; -pub const _NTO_CI_FULL_GROUPS: u32 = 2147483648; - -pub const _NTO_TI_ACTIVE: u32 = 1; -pub const _NTO_TI_ABSOLUTE: u32 = 2; -pub const _NTO_TI_EXPIRED: u32 = 4; -pub const _NTO_TI_TOD_BASED: u32 = 8; -pub const _NTO_TI_TARGET_PROCESS: u32 = 16; -pub const _NTO_TI_REPORT_TOLERANCE: u32 = 32; -pub const _NTO_TI_PRECISE: u32 = 64; -pub const _NTO_TI_TOLERANT: u32 = 128; -pub const _NTO_TI_WAKEUP: u32 = 256; -pub const _NTO_TI_PROCESS_TOLERANT: u32 = 512; -pub const _NTO_TI_HIGH_RESOLUTION: u32 = 1024; - -pub const _PULSE_TYPE: u32 = 0; -pub const _PULSE_SUBTYPE: u32 = 0; -pub const _PULSE_CODE_UNBLOCK: i32 = -32; -pub const _PULSE_CODE_DISCONNECT: i32 = -33; -pub const _PULSE_CODE_THREADDEATH: i32 = -34; -pub const _PULSE_CODE_COIDDEATH: i32 = -35; -pub const _PULSE_CODE_NET_ACK: i32 = -36; -pub const _PULSE_CODE_NET_UNBLOCK: i32 = -37; -pub const _PULSE_CODE_NET_DETACH: i32 = -38; -pub const _PULSE_CODE_RESTART: i32 = -39; -pub const _PULSE_CODE_NORESTART: i32 = -40; -pub const _PULSE_CODE_UNBLOCK_RESTART: i32 = -41; -pub const _PULSE_CODE_UNBLOCK_TIMER: i32 = -42; -pub const _PULSE_CODE_MINAVAIL: u32 = 0; -pub const _PULSE_CODE_MAXAVAIL: u32 = 127; - -pub const _NTO_HARD_FLAGS_END: u32 = 1; - -pub const _NTO_PULSE_IF_UNIQUE: u32 = 4096; -pub const _NTO_PULSE_REPLACE: u32 = 8192; - -pub const _NTO_PF_NOCLDSTOP: u32 = 1; -pub const _NTO_PF_LOADING: u32 = 2; -pub const _NTO_PF_TERMING: u32 = 4; -pub const _NTO_PF_ZOMBIE: u32 = 8; -pub const _NTO_PF_NOZOMBIE: u32 = 16; -pub const _NTO_PF_FORKED: u32 = 32; -pub const _NTO_PF_ORPHAN_PGRP: u32 = 64; -pub const _NTO_PF_STOPPED: u32 = 128; -pub const _NTO_PF_DEBUG_STOPPED: u32 = 256; -pub const _NTO_PF_BKGND_PGRP: u32 = 512; -pub const _NTO_PF_NOISYNC: u32 = 1024; -pub const _NTO_PF_CONTINUED: u32 = 2048; -pub const _NTO_PF_CHECK_INTR: u32 = 4096; -pub const _NTO_PF_COREDUMP: u32 = 8192; -pub const _NTO_PF_RING0: u32 = 32768; -pub const _NTO_PF_SLEADER: u32 = 65536; -pub const _NTO_PF_WAITINFO: u32 = 131072; -pub const _NTO_PF_DESTROYALL: u32 = 524288; -pub const _NTO_PF_NOCOREDUMP: u32 = 1048576; -pub const _NTO_PF_WAITDONE: u32 = 4194304; -pub const _NTO_PF_TERM_WAITING: u32 = 8388608; -pub const _NTO_PF_ASLR: u32 = 16777216; -pub const _NTO_PF_EXECED: u32 = 33554432; -pub const _NTO_PF_APP_STOPPED: u32 = 67108864; -pub const _NTO_PF_64BIT: u32 = 134217728; -pub const _NTO_PF_NET: u32 = 268435456; -pub const _NTO_PF_NOLAZYSTACK: u32 = 536870912; -pub const _NTO_PF_NOEXEC_STACK: u32 = 1073741824; -pub const _NTO_PF_LOADER_PERMS: u32 = 2147483648; - -pub const _NTO_TF_INTR_PENDING: u32 = 65536; -pub const _NTO_TF_DETACHED: u32 = 131072; -pub const _NTO_TF_SHR_MUTEX: u32 = 262144; -pub const _NTO_TF_SHR_MUTEX_EUID: u32 = 524288; -pub const _NTO_TF_THREADS_HOLD: u32 = 1048576; -pub const _NTO_TF_UNBLOCK_REQ: u32 = 4194304; -pub const _NTO_TF_ALIGN_FAULT: u32 = 16777216; -pub const _NTO_TF_SSTEP: u32 = 33554432; -pub const _NTO_TF_ALLOCED_STACK: u32 = 67108864; -pub const _NTO_TF_NOMULTISIG: u32 = 134217728; -pub const _NTO_TF_LOW_LATENCY: u32 = 268435456; -pub const _NTO_TF_IOPRIV: u32 = 2147483648; - -pub const _NTO_TCTL_IO_PRIV: u32 = 1; -pub const _NTO_TCTL_THREADS_HOLD: u32 = 2; -pub const _NTO_TCTL_THREADS_CONT: u32 = 3; -pub const _NTO_TCTL_RUNMASK: u32 = 4; -pub const _NTO_TCTL_ALIGN_FAULT: u32 = 5; -pub const _NTO_TCTL_RUNMASK_GET_AND_SET: u32 = 6; -pub const _NTO_TCTL_PERFCOUNT: u32 = 7; -pub const _NTO_TCTL_ONE_THREAD_HOLD: u32 = 8; -pub const _NTO_TCTL_ONE_THREAD_CONT: u32 = 9; -pub const _NTO_TCTL_RUNMASK_GET_AND_SET_INHERIT: u32 = 10; -pub const _NTO_TCTL_NAME: u32 = 11; -pub const _NTO_TCTL_RCM_GET_AND_SET: u32 = 12; -pub const _NTO_TCTL_SHR_MUTEX: u32 = 13; -pub const _NTO_TCTL_IO: u32 = 14; -pub const _NTO_TCTL_NET_KIF_GET_AND_SET: u32 = 15; -pub const _NTO_TCTL_LOW_LATENCY: u32 = 16; -pub const _NTO_TCTL_ADD_EXIT_EVENT: u32 = 17; -pub const _NTO_TCTL_DEL_EXIT_EVENT: u32 = 18; -pub const _NTO_TCTL_IO_LEVEL: u32 = 19; -pub const _NTO_TCTL_RESERVED: u32 = 2147483648; -pub const _NTO_TCTL_IO_LEVEL_INHERIT: u32 = 1073741824; -pub const _NTO_IO_LEVEL_NONE: u32 = 1; -pub const _NTO_IO_LEVEL_1: u32 = 2; -pub const _NTO_IO_LEVEL_2: u32 = 3; - -pub const _NTO_THREAD_NAME_MAX: u32 = 100; - -pub const _NTO_CHF_FIXED_PRIORITY: u32 = 1; -pub const _NTO_CHF_UNBLOCK: u32 = 2; -pub const _NTO_CHF_THREAD_DEATH: u32 = 4; -pub const _NTO_CHF_DISCONNECT: u32 = 8; -pub const _NTO_CHF_NET_MSG: u32 = 16; -pub const _NTO_CHF_SENDER_LEN: u32 = 32; -pub const _NTO_CHF_COID_DISCONNECT: u32 = 64; -pub const _NTO_CHF_REPLY_LEN: u32 = 128; -pub const _NTO_CHF_PULSE_POOL: u32 = 256; -pub const _NTO_CHF_ASYNC_NONBLOCK: u32 = 512; -pub const _NTO_CHF_ASYNC: u32 = 1024; -pub const _NTO_CHF_GLOBAL: u32 = 2048; -pub const _NTO_CHF_PRIVATE: u32 = 4096; -pub const _NTO_CHF_MSG_PAUSING: u32 = 8192; -pub const _NTO_CHF_INHERIT_RUNMASK: u32 = 16384; -pub const _NTO_CHF_UNBLOCK_TIMER: u32 = 32768; - -pub const _NTO_CHO_CUSTOM_EVENT: u32 = 1; - -pub const _NTO_COF_CLOEXEC: u32 = 1; -pub const _NTO_COF_DEAD: u32 = 2; -pub const _NTO_COF_NOSHARE: u32 = 64; -pub const _NTO_COF_NETCON: u32 = 128; -pub const _NTO_COF_NONBLOCK: u32 = 256; -pub const _NTO_COF_ASYNC: u32 = 512; -pub const _NTO_COF_GLOBAL: u32 = 1024; -pub const _NTO_COF_NOEVENT: u32 = 2048; -pub const _NTO_COF_INSECURE: u32 = 4096; -pub const _NTO_COF_REG_EVENTS: u32 = 8192; -pub const _NTO_COF_UNREG_EVENTS: u32 = 16384; -pub const _NTO_COF_MASK: u32 = 65535; - -pub const _NTO_SIDE_CHANNEL: u32 = 1073741824; - -pub const _NTO_CONNECTION_SCOID: u32 = 65536; -pub const _NTO_GLOBAL_CHANNEL: u32 = 1073741824; - -pub const _NTO_TIMEOUT_MASK: u32 = (1 << STATE_MAX) - 1; -pub const _NTO_TIMEOUT_ACTIVE: u32 = 1 << STATE_MAX; -pub const _NTO_TIMEOUT_IMMEDIATE: u32 = 1 << (STATE_MAX + 1); - -pub const _NTO_IC_LATENCY: u32 = 0; - -pub const _NTO_INTR_FLAGS_END: u32 = 1; -pub const _NTO_INTR_FLAGS_NO_UNMASK: u32 = 2; -pub const _NTO_INTR_FLAGS_PROCESS: u32 = 4; -pub const _NTO_INTR_FLAGS_TRK_MSK: u32 = 8; -pub const _NTO_INTR_FLAGS_ARRAY: u32 = 16; -pub const _NTO_INTR_FLAGS_EXCLUSIVE: u32 = 32; -pub const _NTO_INTR_FLAGS_FPU: u32 = 64; - -pub const _NTO_INTR_CLASS_EXTERNAL: u32 = 0; -pub const _NTO_INTR_CLASS_SYNTHETIC: u32 = 2147418112; - -pub const _NTO_INTR_SPARE: u32 = 2147483647; - -pub const _NTO_HOOK_IDLE: u32 = 2147418113; -pub const _NTO_HOOK_OVERDRIVE: u32 = 2147418114; -pub const _NTO_HOOK_LAST: u32 = 2147418114; -pub const _NTO_HOOK_IDLE2_FLAG: u32 = 32768; - -pub const _NTO_IH_CMD_SLEEP_SETUP: u32 = 1; -pub const _NTO_IH_CMD_SLEEP_BLOCK: u32 = 2; -pub const _NTO_IH_CMD_SLEEP_WAKEUP: u32 = 4; -pub const _NTO_IH_CMD_SLEEP_ONLINE: u32 = 8; -pub const _NTO_IH_RESP_NEEDS_BLOCK: u32 = 1; -pub const _NTO_IH_RESP_NEEDS_WAKEUP: u32 = 2; -pub const _NTO_IH_RESP_NEEDS_ONLINE: u32 = 4; -pub const _NTO_IH_RESP_SYNC_TIME: u32 = 16; -pub const _NTO_IH_RESP_SYNC_TLB: u32 = 32; -pub const _NTO_IH_RESP_SUGGEST_OFFLINE: u32 = 256; -pub const _NTO_IH_RESP_SLEEP_MODE_REACHED: u32 = 512; -pub const _NTO_IH_RESP_DELIVER_INTRS: u32 = 1024; - -pub const _NTO_READIOV_SEND: u32 = 0; -pub const _NTO_READIOV_REPLY: u32 = 1; - -pub const _NTO_KEYDATA_VTID: u32 = 2147483648; - -pub const _NTO_KEYDATA_PATHSIGN: u32 = 32768; -pub const _NTO_KEYDATA_OP_MASK: u32 = 255; -pub const _NTO_KEYDATA_VERIFY: u32 = 0; -pub const _NTO_KEYDATA_CALCULATE: u32 = 1; -pub const _NTO_KEYDATA_CALCULATE_REUSE: u32 = 2; -pub const _NTO_KEYDATA_PATHSIGN_VERIFY: u32 = 32768; -pub const _NTO_KEYDATA_PATHSIGN_CALCULATE: u32 = 32769; -pub const _NTO_KEYDATA_PATHSIGN_CALCULATE_REUSE: u32 = 32770; - -pub const _NTO_SCTL_SETPRIOCEILING: u32 = 1; -pub const _NTO_SCTL_GETPRIOCEILING: u32 = 2; -pub const _NTO_SCTL_SETEVENT: u32 = 3; -pub const _NTO_SCTL_MUTEX_WAKEUP: u32 = 4; -pub const _NTO_SCTL_MUTEX_CONSISTENT: u32 = 5; -pub const _NTO_SCTL_SEM_VALUE: u32 = 6; - -pub const _NTO_CLIENTINFO_GETGROUPS: u32 = 1; -pub const _NTO_CLIENTINFO_GETTYPEID: u32 = 2; - -extern "C" { - pub fn ChannelCreate(__flags: c_uint) -> c_int; - pub fn ChannelCreate_r(__flags: c_uint) -> c_int; - pub fn ChannelCreatePulsePool(__flags: c_uint, __config: *const nto_channel_config) -> c_int; - pub fn ChannelCreateExt( - __flags: c_uint, - __mode: crate::mode_t, - __bufsize: usize, - __maxnumbuf: c_uint, - __ev: *const crate::sigevent, - __cred: *mut _cred_info, - ) -> c_int; - pub fn ChannelDestroy(__chid: c_int) -> c_int; - pub fn ChannelDestroy_r(__chid: c_int) -> c_int; - pub fn ConnectAttach( - __nd: u32, - __pid: crate::pid_t, - __chid: c_int, - __index: c_uint, - __flags: c_int, - ) -> c_int; - pub fn ConnectAttach_r( - __nd: u32, - __pid: crate::pid_t, - __chid: c_int, - __index: c_uint, - __flags: c_int, - ) -> c_int; - - // TODO: The following function uses a structure defined in a header file - // which doesn't appear as part of the default headers found in a - // standard installation of Neutrino 7.1 SDP. Commented out for now. - //pub fn ConnectAttachExt( - // __nd: u32, - // __pid: crate::pid_t, - // __chid: c_int, - // __index: c_uint, - // __flags: c_int, - // __cd: *mut _asyncmsg_connection_descriptor, - //) -> c_int; - pub fn ConnectDetach(__coid: c_int) -> c_int; - pub fn ConnectDetach_r(__coid: c_int) -> c_int; - pub fn ConnectServerInfo(__pid: crate::pid_t, __coid: c_int, __info: *mut _msg_info64) - -> c_int; - pub fn ConnectServerInfo_r( - __pid: crate::pid_t, - __coid: c_int, - __info: *mut _msg_info64, - ) -> c_int; - pub fn ConnectClientInfoExtraArgs( - __scoid: c_int, - __info_pp: *mut _client_info, - __ngroups: c_int, - __abilities: *mut _client_able, - __nable: c_int, - __type_id: *mut c_uint, - ) -> c_int; - pub fn ConnectClientInfoExtraArgs_r( - __scoid: c_int, - __info_pp: *mut _client_info, - __ngroups: c_int, - __abilities: *mut _client_able, - __nable: c_int, - __type_id: *mut c_uint, - ) -> c_int; - pub fn ConnectClientInfo(__scoid: c_int, __info: *mut _client_info, __ngroups: c_int) -> c_int; - pub fn ConnectClientInfo_r( - __scoid: c_int, - __info: *mut _client_info, - __ngroups: c_int, - ) -> c_int; - pub fn ConnectClientInfoExt( - __scoid: c_int, - __info_pp: *mut *mut _client_info, - flags: c_int, - ) -> c_int; - pub fn ClientInfoExtFree(__info_pp: *mut *mut _client_info) -> c_int; - pub fn ConnectClientInfoAble( - __scoid: c_int, - __info_pp: *mut *mut _client_info, - flags: c_int, - abilities: *mut _client_able, - nable: c_int, - ) -> c_int; - pub fn ConnectFlags( - __pid: crate::pid_t, - __coid: c_int, - __mask: c_uint, - __bits: c_uint, - ) -> c_int; - pub fn ConnectFlags_r( - __pid: crate::pid_t, - __coid: c_int, - __mask: c_uint, - __bits: c_uint, - ) -> c_int; - pub fn ChannelConnectAttr( - __id: c_uint, - __old_attr: *mut _channel_connect_attr, - __new_attr: *mut _channel_connect_attr, - __flags: c_uint, - ) -> c_int; - pub fn MsgSend( - __coid: c_int, - __smsg: *const c_void, - __sbytes: usize, - __rmsg: *mut c_void, - __rbytes: usize, - ) -> c_long; - pub fn MsgSend_r( - __coid: c_int, - __smsg: *const c_void, - __sbytes: usize, - __rmsg: *mut c_void, - __rbytes: usize, - ) -> c_long; - pub fn MsgSendnc( - __coid: c_int, - __smsg: *const c_void, - __sbytes: usize, - __rmsg: *mut c_void, - __rbytes: usize, - ) -> c_long; - pub fn MsgSendnc_r( - __coid: c_int, - __smsg: *const c_void, - __sbytes: usize, - __rmsg: *mut c_void, - __rbytes: usize, - ) -> c_long; - pub fn MsgSendsv( - __coid: c_int, - __smsg: *const c_void, - __sbytes: usize, - __riov: *const crate::iovec, - __rparts: usize, - ) -> c_long; - pub fn MsgSendsv_r( - __coid: c_int, - __smsg: *const c_void, - __sbytes: usize, - __riov: *const crate::iovec, - __rparts: usize, - ) -> c_long; - pub fn MsgSendsvnc( - __coid: c_int, - __smsg: *const c_void, - __sbytes: usize, - __riov: *const crate::iovec, - __rparts: usize, - ) -> c_long; - pub fn MsgSendsvnc_r( - __coid: c_int, - __smsg: *const c_void, - __sbytes: usize, - __riov: *const crate::iovec, - __rparts: usize, - ) -> c_long; - pub fn MsgSendvs( - __coid: c_int, - __siov: *const crate::iovec, - __sparts: usize, - __rmsg: *mut c_void, - __rbytes: usize, - ) -> c_long; - pub fn MsgSendvs_r( - __coid: c_int, - __siov: *const crate::iovec, - __sparts: usize, - __rmsg: *mut c_void, - __rbytes: usize, - ) -> c_long; - pub fn MsgSendvsnc( - __coid: c_int, - __siov: *const crate::iovec, - __sparts: usize, - __rmsg: *mut c_void, - __rbytes: usize, - ) -> c_long; - pub fn MsgSendvsnc_r( - __coid: c_int, - __siov: *const crate::iovec, - __sparts: usize, - __rmsg: *mut c_void, - __rbytes: usize, - ) -> c_long; - pub fn MsgSendv( - __coid: c_int, - __siov: *const crate::iovec, - __sparts: usize, - __riov: *const crate::iovec, - __rparts: usize, - ) -> c_long; - pub fn MsgSendv_r( - __coid: c_int, - __siov: *const crate::iovec, - __sparts: usize, - __riov: *const crate::iovec, - __rparts: usize, - ) -> c_long; - pub fn MsgSendvnc( - __coid: c_int, - __siov: *const crate::iovec, - __sparts: usize, - __riov: *const crate::iovec, - __rparts: usize, - ) -> c_long; - pub fn MsgSendvnc_r( - __coid: c_int, - __siov: *const crate::iovec, - __sparts: usize, - __riov: *const crate::iovec, - __rparts: usize, - ) -> c_long; - pub fn MsgReceive( - __chid: c_int, - __msg: *mut c_void, - __bytes: usize, - __info: *mut _msg_info64, - ) -> c_int; - pub fn MsgReceive_r( - __chid: c_int, - __msg: *mut c_void, - __bytes: usize, - __info: *mut _msg_info64, - ) -> c_int; - pub fn MsgReceivev( - __chid: c_int, - __iov: *const crate::iovec, - __parts: usize, - __info: *mut _msg_info64, - ) -> c_int; - pub fn MsgReceivev_r( - __chid: c_int, - __iov: *const crate::iovec, - __parts: usize, - __info: *mut _msg_info64, - ) -> c_int; - pub fn MsgReceivePulse( - __chid: c_int, - __pulse: *mut c_void, - __bytes: usize, - __info: *mut _msg_info64, - ) -> c_int; - pub fn MsgReceivePulse_r( - __chid: c_int, - __pulse: *mut c_void, - __bytes: usize, - __info: *mut _msg_info64, - ) -> c_int; - pub fn MsgReceivePulsev( - __chid: c_int, - __iov: *const crate::iovec, - __parts: usize, - __info: *mut _msg_info64, - ) -> c_int; - pub fn MsgReceivePulsev_r( - __chid: c_int, - __iov: *const crate::iovec, - __parts: usize, - __info: *mut _msg_info64, - ) -> c_int; - pub fn MsgReply( - __rcvid: c_int, - __status: c_long, - __msg: *const c_void, - __bytes: usize, - ) -> c_int; - pub fn MsgReply_r( - __rcvid: c_int, - __status: c_long, - __msg: *const c_void, - __bytes: usize, - ) -> c_int; - pub fn MsgReplyv( - __rcvid: c_int, - __status: c_long, - __iov: *const crate::iovec, - __parts: usize, - ) -> c_int; - pub fn MsgReplyv_r( - __rcvid: c_int, - __status: c_long, - __iov: *const crate::iovec, - __parts: usize, - ) -> c_int; - pub fn MsgReadiov( - __rcvid: c_int, - __iov: *const crate::iovec, - __parts: usize, - __offset: usize, - __flags: c_int, - ) -> isize; - pub fn MsgReadiov_r( - __rcvid: c_int, - __iov: *const crate::iovec, - __parts: usize, - __offset: usize, - __flags: c_int, - ) -> isize; - pub fn MsgRead(__rcvid: c_int, __msg: *mut c_void, __bytes: usize, __offset: usize) -> isize; - pub fn MsgRead_r(__rcvid: c_int, __msg: *mut c_void, __bytes: usize, __offset: usize) -> isize; - pub fn MsgReadv( - __rcvid: c_int, - __iov: *const crate::iovec, - __parts: usize, - __offset: usize, - ) -> isize; - pub fn MsgReadv_r( - __rcvid: c_int, - __iov: *const crate::iovec, - __parts: usize, - __offset: usize, - ) -> isize; - pub fn MsgWrite(__rcvid: c_int, __msg: *const c_void, __bytes: usize, __offset: usize) - -> isize; - pub fn MsgWrite_r( - __rcvid: c_int, - __msg: *const c_void, - __bytes: usize, - __offset: usize, - ) -> isize; - pub fn MsgWritev( - __rcvid: c_int, - __iov: *const crate::iovec, - __parts: usize, - __offset: usize, - ) -> isize; - pub fn MsgWritev_r( - __rcvid: c_int, - __iov: *const crate::iovec, - __parts: usize, - __offset: usize, - ) -> isize; - pub fn MsgSendPulse(__coid: c_int, __priority: c_int, __code: c_int, __value: c_int) -> c_int; - pub fn MsgSendPulse_r(__coid: c_int, __priority: c_int, __code: c_int, __value: c_int) - -> c_int; - pub fn MsgSendPulsePtr( - __coid: c_int, - __priority: c_int, - __code: c_int, - __value: *mut c_void, - ) -> c_int; - pub fn MsgSendPulsePtr_r( - __coid: c_int, - __priority: c_int, - __code: c_int, - __value: *mut c_void, - ) -> c_int; - pub fn MsgDeliverEvent(__rcvid: c_int, __event: *const crate::sigevent) -> c_int; - pub fn MsgDeliverEvent_r(__rcvid: c_int, __event: *const crate::sigevent) -> c_int; - pub fn MsgVerifyEvent(__rcvid: c_int, __event: *const crate::sigevent) -> c_int; - pub fn MsgVerifyEvent_r(__rcvid: c_int, __event: *const crate::sigevent) -> c_int; - pub fn MsgRegisterEvent(__event: *mut crate::sigevent, __coid: c_int) -> c_int; - pub fn MsgRegisterEvent_r(__event: *mut crate::sigevent, __coid: c_int) -> c_int; - pub fn MsgUnregisterEvent(__event: *const crate::sigevent) -> c_int; - pub fn MsgUnregisterEvent_r(__event: *const crate::sigevent) -> c_int; - pub fn MsgInfo(__rcvid: c_int, __info: *mut _msg_info64) -> c_int; - pub fn MsgInfo_r(__rcvid: c_int, __info: *mut _msg_info64) -> c_int; - pub fn MsgKeyData( - __rcvid: c_int, - __oper: c_int, - __key: u32, - __newkey: *mut u32, - __iov: *const crate::iovec, - __parts: c_int, - ) -> c_int; - pub fn MsgKeyData_r( - __rcvid: c_int, - __oper: c_int, - __key: u32, - __newkey: *mut u32, - __iov: *const crate::iovec, - __parts: c_int, - ) -> c_int; - pub fn MsgError(__rcvid: c_int, __err: c_int) -> c_int; - pub fn MsgError_r(__rcvid: c_int, __err: c_int) -> c_int; - pub fn MsgCurrent(__rcvid: c_int) -> c_int; - pub fn MsgCurrent_r(__rcvid: c_int) -> c_int; - pub fn MsgSendAsyncGbl( - __coid: c_int, - __smsg: *const c_void, - __sbytes: usize, - __msg_prio: c_uint, - ) -> c_int; - pub fn MsgSendAsync(__coid: c_int) -> c_int; - pub fn MsgReceiveAsyncGbl( - __chid: c_int, - __rmsg: *mut c_void, - __rbytes: usize, - __info: *mut _msg_info64, - __coid: c_int, - ) -> c_int; - pub fn MsgReceiveAsync(__chid: c_int, __iov: *const crate::iovec, __parts: c_uint) -> c_int; - pub fn MsgPause(__rcvid: c_int, __cookie: c_uint) -> c_int; - pub fn MsgPause_r(__rcvid: c_int, __cookie: c_uint) -> c_int; - - pub fn SignalKill( - __nd: u32, - __pid: crate::pid_t, - __tid: c_int, - __signo: c_int, - __code: c_int, - __value: c_int, - ) -> c_int; - pub fn SignalKill_r( - __nd: u32, - __pid: crate::pid_t, - __tid: c_int, - __signo: c_int, - __code: c_int, - __value: c_int, - ) -> c_int; - pub fn SignalKillSigval( - __nd: u32, - __pid: crate::pid_t, - __tid: c_int, - __signo: c_int, - __code: c_int, - __value: *const crate::sigval, - ) -> c_int; - pub fn SignalKillSigval_r( - __nd: u32, - __pid: crate::pid_t, - __tid: c_int, - __signo: c_int, - __code: c_int, - __value: *const crate::sigval, - ) -> c_int; - pub fn SignalReturn(__info: *mut _sighandler_info) -> c_int; - pub fn SignalFault(__sigcode: c_uint, __regs: *mut c_void, __refaddr: usize) -> c_int; - pub fn SignalAction( - __pid: crate::pid_t, - __sigstub: unsafe extern "C" fn(), - __signo: c_int, - __act: *const crate::sigaction, - __oact: *mut crate::sigaction, - ) -> c_int; - pub fn SignalAction_r( - __pid: crate::pid_t, - __sigstub: unsafe extern "C" fn(), - __signo: c_int, - __act: *const crate::sigaction, - __oact: *mut crate::sigaction, - ) -> c_int; - pub fn SignalProcmask( - __pid: crate::pid_t, - __tid: c_int, - __how: c_int, - __set: *const crate::sigset_t, - __oldset: *mut crate::sigset_t, - ) -> c_int; - pub fn SignalProcmask_r( - __pid: crate::pid_t, - __tid: c_int, - __how: c_int, - __set: *const crate::sigset_t, - __oldset: *mut crate::sigset_t, - ) -> c_int; - pub fn SignalSuspend(__set: *const crate::sigset_t) -> c_int; - pub fn SignalSuspend_r(__set: *const crate::sigset_t) -> c_int; - pub fn SignalWaitinfo(__set: *const crate::sigset_t, __info: *mut crate::siginfo_t) -> c_int; - pub fn SignalWaitinfo_r(__set: *const crate::sigset_t, __info: *mut crate::siginfo_t) -> c_int; - pub fn SignalWaitinfoMask( - __set: *const crate::sigset_t, - __info: *mut crate::siginfo_t, - __mask: *const crate::sigset_t, - ) -> c_int; - pub fn SignalWaitinfoMask_r( - __set: *const crate::sigset_t, - __info: *mut crate::siginfo_t, - __mask: *const crate::sigset_t, - ) -> c_int; - pub fn ThreadCreate( - __pid: crate::pid_t, - __func: unsafe extern "C" fn(__arg: *mut c_void) -> *mut c_void, - __arg: *mut c_void, - __attr: *const crate::_thread_attr, - ) -> c_int; - pub fn ThreadCreate_r( - __pid: crate::pid_t, - __func: unsafe extern "C" fn(__arg: *mut c_void) -> *mut c_void, - __arg: *mut c_void, - __attr: *const crate::_thread_attr, - ) -> c_int; - - pub fn ThreadDestroy(__tid: c_int, __priority: c_int, __status: *mut c_void) -> c_int; - pub fn ThreadDestroy_r(__tid: c_int, __priority: c_int, __status: *mut c_void) -> c_int; - pub fn ThreadDetach(__tid: c_int) -> c_int; - pub fn ThreadDetach_r(__tid: c_int) -> c_int; - pub fn ThreadJoin(__tid: c_int, __status: *mut *mut c_void) -> c_int; - pub fn ThreadJoin_r(__tid: c_int, __status: *mut *mut c_void) -> c_int; - pub fn ThreadCancel(__tid: c_int, __canstub: unsafe extern "C" fn()) -> c_int; - pub fn ThreadCancel_r(__tid: c_int, __canstub: unsafe extern "C" fn()) -> c_int; - pub fn ThreadCtl(__cmd: c_int, __data: *mut c_void) -> c_int; - pub fn ThreadCtl_r(__cmd: c_int, __data: *mut c_void) -> c_int; - pub fn ThreadCtlExt( - __pid: crate::pid_t, - __tid: c_int, - __cmd: c_int, - __data: *mut c_void, - ) -> c_int; - pub fn ThreadCtlExt_r( - __pid: crate::pid_t, - __tid: c_int, - __cmd: c_int, - __data: *mut c_void, - ) -> c_int; - - pub fn InterruptHookTrace( - __handler: Option *const crate::sigevent>, - __flags: c_uint, - ) -> c_int; - pub fn InterruptHookIdle( - __handler: Option, - __flags: c_uint, - ) -> c_int; - pub fn InterruptHookIdle2( - __handler: Option< - unsafe extern "C" fn(arg1: c_uint, arg2: *mut syspage_entry, arg3: *mut _idle_hook), - >, - __flags: c_uint, - ) -> c_int; - pub fn InterruptHookOverdriveEvent(__event: *const crate::sigevent, __flags: c_uint) -> c_int; - pub fn InterruptAttachEvent( - __intr: c_int, - __event: *const crate::sigevent, - __flags: c_uint, - ) -> c_int; - pub fn InterruptAttachEvent_r( - __intr: c_int, - __event: *const crate::sigevent, - __flags: c_uint, - ) -> c_int; - pub fn InterruptAttach( - __intr: c_int, - __handler: Option< - unsafe extern "C" fn(__area: *mut c_void, __id: c_int) -> *const crate::sigevent, - >, - __area: *const c_void, - __size: c_int, - __flags: c_uint, - ) -> c_int; - pub fn InterruptAttach_r( - __intr: c_int, - __handler: Option< - unsafe extern "C" fn(__area: *mut c_void, __id: c_int) -> *const crate::sigevent, - >, - __area: *const c_void, - __size: c_int, - __flags: c_uint, - ) -> c_int; - pub fn InterruptAttachArray( - __intr: c_int, - __handler: Option< - unsafe extern "C" fn(__area: *mut c_void, __id: c_int) -> *const *const crate::sigevent, - >, - __area: *const c_void, - __size: c_int, - __flags: c_uint, - ) -> c_int; - pub fn InterruptAttachArray_r( - __intr: c_int, - __handler: Option< - unsafe extern "C" fn(__area: *mut c_void, __id: c_int) -> *const *const crate::sigevent, - >, - __area: *const c_void, - __size: c_int, - __flags: c_uint, - ) -> c_int; - pub fn InterruptDetach(__id: c_int) -> c_int; - pub fn InterruptDetach_r(__id: c_int) -> c_int; - pub fn InterruptWait(__flags: c_int, __timeout: *const u64) -> c_int; - pub fn InterruptWait_r(__flags: c_int, __timeout: *const u64) -> c_int; - pub fn InterruptCharacteristic( - __type: c_int, - __id: c_int, - __new: *mut c_uint, - __old: *mut c_uint, - ) -> c_int; - pub fn InterruptCharacteristic_r( - __type: c_int, - __id: c_int, - __new: *mut c_uint, - __old: *mut c_uint, - ) -> c_int; - - pub fn SchedGet(__pid: crate::pid_t, __tid: c_int, __param: *mut crate::sched_param) -> c_int; - pub fn SchedGet_r(__pid: crate::pid_t, __tid: c_int, __param: *mut crate::sched_param) - -> c_int; - pub fn SchedGetCpuNum() -> c_uint; - pub fn SchedSet( - __pid: crate::pid_t, - __tid: c_int, - __algorithm: c_int, - __param: *const crate::sched_param, - ) -> c_int; - pub fn SchedSet_r( - __pid: crate::pid_t, - __tid: c_int, - __algorithm: c_int, - __param: *const crate::sched_param, - ) -> c_int; - pub fn SchedInfo( - __pid: crate::pid_t, - __algorithm: c_int, - __info: *mut crate::_sched_info, - ) -> c_int; - pub fn SchedInfo_r( - __pid: crate::pid_t, - __algorithm: c_int, - __info: *mut crate::_sched_info, - ) -> c_int; - pub fn SchedYield() -> c_int; - pub fn SchedYield_r() -> c_int; - pub fn SchedCtl(__cmd: c_int, __data: *mut c_void, __length: usize) -> c_int; - pub fn SchedCtl_r(__cmd: c_int, __data: *mut c_void, __length: usize) -> c_int; - pub fn SchedJobCreate(__job: *mut nto_job_t) -> c_int; - pub fn SchedJobCreate_r(__job: *mut nto_job_t) -> c_int; - pub fn SchedJobDestroy(__job: *mut nto_job_t) -> c_int; - pub fn SchedJobDestroy_r(__job: *mut nto_job_t) -> c_int; - pub fn SchedWaypoint( - __job: *mut nto_job_t, - __new: *const i64, - __max: *const i64, - __old: *mut i64, - ) -> c_int; - pub fn SchedWaypoint_r( - __job: *mut nto_job_t, - __new: *const i64, - __max: *const i64, - __old: *mut i64, - ) -> c_int; - - pub fn TimerCreate(__id: crate::clockid_t, __notify: *const crate::sigevent) -> c_int; - pub fn TimerCreate_r(__id: crate::clockid_t, __notify: *const crate::sigevent) -> c_int; - pub fn TimerDestroy(__id: crate::timer_t) -> c_int; - pub fn TimerDestroy_r(__id: crate::timer_t) -> c_int; - pub fn TimerSettime( - __id: crate::timer_t, - __flags: c_int, - __itime: *const crate::_itimer, - __oitime: *mut crate::_itimer, - ) -> c_int; - pub fn TimerSettime_r( - __id: crate::timer_t, - __flags: c_int, - __itime: *const crate::_itimer, - __oitime: *mut crate::_itimer, - ) -> c_int; - pub fn TimerInfo( - __pid: crate::pid_t, - __id: crate::timer_t, - __flags: c_int, - __info: *mut crate::_timer_info, - ) -> c_int; - pub fn TimerInfo_r( - __pid: crate::pid_t, - __id: crate::timer_t, - __flags: c_int, - __info: *mut crate::_timer_info, - ) -> c_int; - pub fn TimerAlarm( - __id: crate::clockid_t, - __itime: *const crate::_itimer, - __otime: *mut crate::_itimer, - ) -> c_int; - pub fn TimerAlarm_r( - __id: crate::clockid_t, - __itime: *const crate::_itimer, - __otime: *mut crate::_itimer, - ) -> c_int; - pub fn TimerTimeout( - __id: crate::clockid_t, - __flags: c_int, - __notify: *const crate::sigevent, - __ntime: *const u64, - __otime: *mut u64, - ) -> c_int; - pub fn TimerTimeout_r( - __id: crate::clockid_t, - __flags: c_int, - __notify: *const crate::sigevent, - __ntime: *const u64, - __otime: *mut u64, - ) -> c_int; - - pub fn SyncTypeCreate( - __type: c_uint, - __sync: *mut crate::sync_t, - __attr: *const crate::_sync_attr, - ) -> c_int; - pub fn SyncTypeCreate_r( - __type: c_uint, - __sync: *mut crate::sync_t, - __attr: *const crate::_sync_attr, - ) -> c_int; - pub fn SyncDestroy(__sync: *mut crate::sync_t) -> c_int; - pub fn SyncDestroy_r(__sync: *mut crate::sync_t) -> c_int; - pub fn SyncCtl(__cmd: c_int, __sync: *mut crate::sync_t, __data: *mut c_void) -> c_int; - pub fn SyncCtl_r(__cmd: c_int, __sync: *mut crate::sync_t, __data: *mut c_void) -> c_int; - pub fn SyncMutexEvent(__sync: *mut crate::sync_t, event: *const crate::sigevent) -> c_int; - pub fn SyncMutexEvent_r(__sync: *mut crate::sync_t, event: *const crate::sigevent) -> c_int; - pub fn SyncMutexLock(__sync: *mut crate::sync_t) -> c_int; - pub fn SyncMutexLock_r(__sync: *mut crate::sync_t) -> c_int; - pub fn SyncMutexUnlock(__sync: *mut crate::sync_t) -> c_int; - pub fn SyncMutexUnlock_r(__sync: *mut crate::sync_t) -> c_int; - pub fn SyncMutexRevive(__sync: *mut crate::sync_t) -> c_int; - pub fn SyncMutexRevive_r(__sync: *mut crate::sync_t) -> c_int; - pub fn SyncCondvarWait(__sync: *mut crate::sync_t, __mutex: *mut crate::sync_t) -> c_int; - pub fn SyncCondvarWait_r(__sync: *mut crate::sync_t, __mutex: *mut crate::sync_t) -> c_int; - pub fn SyncCondvarSignal(__sync: *mut crate::sync_t, __all: c_int) -> c_int; - pub fn SyncCondvarSignal_r(__sync: *mut crate::sync_t, __all: c_int) -> c_int; - pub fn SyncSemPost(__sync: *mut crate::sync_t) -> c_int; - pub fn SyncSemPost_r(__sync: *mut crate::sync_t) -> c_int; - pub fn SyncSemWait(__sync: *mut crate::sync_t, __tryto: c_int) -> c_int; - pub fn SyncSemWait_r(__sync: *mut crate::sync_t, __tryto: c_int) -> c_int; - - pub fn ClockTime(__id: crate::clockid_t, _new: *const u64, __old: *mut u64) -> c_int; - pub fn ClockTime_r(__id: crate::clockid_t, _new: *const u64, __old: *mut u64) -> c_int; - pub fn ClockAdjust( - __id: crate::clockid_t, - _new: *const crate::_clockadjust, - __old: *mut crate::_clockadjust, - ) -> c_int; - pub fn ClockAdjust_r( - __id: crate::clockid_t, - _new: *const crate::_clockadjust, - __old: *mut crate::_clockadjust, - ) -> c_int; - pub fn ClockPeriod( - __id: crate::clockid_t, - _new: *const crate::_clockperiod, - __old: *mut crate::_clockperiod, - __reserved: c_int, - ) -> c_int; - pub fn ClockPeriod_r( - __id: crate::clockid_t, - _new: *const crate::_clockperiod, - __old: *mut crate::_clockperiod, - __reserved: c_int, - ) -> c_int; - pub fn ClockId(__pid: crate::pid_t, __tid: c_int) -> c_int; - pub fn ClockId_r(__pid: crate::pid_t, __tid: c_int) -> c_int; - - // - //TODO: The following commented out functions are implemented in assembly. - // We can implmement them either via a C stub or rust's inline assembly. - // - //pub fn InterruptEnable(); - //pub fn InterruptDisable(); - pub fn InterruptMask(__intr: c_int, __id: c_int) -> c_int; - pub fn InterruptUnmask(__intr: c_int, __id: c_int) -> c_int; - //pub fn InterruptLock(__spin: *mut intrspin); - //pub fn InterruptUnlock(__spin: *mut intrspin); - //pub fn InterruptStatus() -> c_uint; -} diff --git a/vendor/libc/src/unix/nto/x86_64.rs b/vendor/libc/src/unix/nto/x86_64.rs deleted file mode 100644 index 521b5d4ab78796..00000000000000 --- a/vendor/libc/src/unix/nto/x86_64.rs +++ /dev/null @@ -1,111 +0,0 @@ -use crate::prelude::*; - -pub type wchar_t = u32; -pub type time_t = i64; - -s! { - #[repr(align(8))] - pub struct x86_64_cpu_registers { - pub rdi: u64, - pub rsi: u64, - pub rdx: u64, - pub r10: u64, - pub r8: u64, - pub r9: u64, - pub rax: u64, - pub rbx: u64, - pub rbp: u64, - pub rcx: u64, - pub r11: u64, - pub r12: u64, - pub r13: u64, - pub r14: u64, - pub r15: u64, - pub rip: u64, - pub cs: u32, - rsvd1: u32, - pub rflags: u64, - pub rsp: u64, - pub ss: u32, - rsvd2: u32, - } - - #[repr(align(8))] - pub struct mcontext_t { - pub cpu: x86_64_cpu_registers, - pub fpu: x86_64_fpu_registers, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } - - pub struct fsave_area_64 { - pub fpu_control_word: u32, - pub fpu_status_word: u32, - pub fpu_tag_word: u32, - pub fpu_ip: u32, - pub fpu_cs: u32, - pub fpu_op: u32, - pub fpu_ds: u32, - pub st_regs: [u8; 80], - } - - pub struct fxsave_area_64 { - pub fpu_control_word: u16, - pub fpu_status_word: u16, - pub fpu_tag_word: u16, - pub fpu_operand: u16, - pub fpu_rip: u64, - pub fpu_rdp: u64, - pub mxcsr: u32, - pub mxcsr_mask: u32, - pub st_regs: [u8; 128], - pub xmm_regs: [u8; 128], - reserved2: [u8; 224], - } - - pub struct fpu_extention_savearea_64 { - pub other: [u8; 512], - pub xstate_bv: u64, - pub xstate_undef: [u64; 7], - pub xstate_info: [u8; 224], - } -} - -s_no_extra_traits! { - pub union x86_64_fpu_registers { - pub fsave_area: fsave_area_64, - pub fxsave_area: fxsave_area_64, - pub xsave_area: fpu_extention_savearea_64, - pub data: [u8; 1024], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl Eq for x86_64_fpu_registers {} - - impl PartialEq for x86_64_fpu_registers { - fn eq(&self, other: &x86_64_fpu_registers) -> bool { - unsafe { - self.fsave_area == other.fsave_area - || self.fxsave_area == other.fxsave_area - || self.xsave_area == other.xsave_area - } - } - } - - impl hash::Hash for x86_64_fpu_registers { - fn hash(&self, state: &mut H) { - unsafe { - self.fsave_area.hash(state); - self.fxsave_area.hash(state); - self.xsave_area.hash(state); - } - } - } - } -} diff --git a/vendor/libc/src/unix/nuttx/mod.rs b/vendor/libc/src/unix/nuttx/mod.rs deleted file mode 100644 index 3d3e2c3448841d..00000000000000 --- a/vendor/libc/src/unix/nuttx/mod.rs +++ /dev/null @@ -1,597 +0,0 @@ -use crate::prelude::*; -use crate::{in6_addr, in_addr_t, timespec, DIR}; - -pub type nlink_t = u16; -pub type ino_t = u16; -pub type blkcnt_t = u64; -pub type blksize_t = i16; -pub type cc_t = u8; -pub type clock_t = i64; -pub type dev_t = i32; -pub type fsblkcnt_t = u64; -pub type locale_t = *mut i8; -pub type mode_t = u32; -pub type nfds_t = u32; -pub type off_t = i64; -pub type pthread_key_t = i32; -pub type pthread_mutexattr_t = u8; -pub type pthread_rwlockattr_t = i32; -pub type pthread_t = i32; -pub type rlim_t = i64; -pub type sa_family_t = u16; -pub type socklen_t = u32; -pub type speed_t = usize; -pub type suseconds_t = i32; -pub type tcflag_t = u32; -pub type clockid_t = i32; -pub type time_t = i64; -pub type wchar_t = i32; - -s! { - pub struct stat { - pub st_dev: dev_t, - pub st_ino: ino_t, - pub st_mode: mode_t, - pub st_nlink: nlink_t, - pub st_uid: u32, - pub st_gid: u32, - pub st_rdev: dev_t, - pub st_size: off_t, - pub st_atim: timespec, - pub st_mtim: timespec, - pub st_ctim: timespec, - pub st_blksize: blksize_t, - pub st_blocks: i64, - __reserved: [usize; __DEFAULT_RESERVED_SIZE__], - } - - pub struct sockaddr { - pub sa_family: sa_family_t, - pub sa_data: [u8; 14], - } - - pub struct passwd { - pub pw_name: *const c_char, - pub pw_passwd: *const c_char, - pub pw_uid: u32, - pub pw_gid: u32, - pub pw_gecos: *const c_char, - pub pw_dir: *const c_char, - pub pw_shell: *const c_char, - __reserved: [usize; __DEFAULT_RESERVED_SIZE__], - } - - pub struct sem_t { - __val: [usize; __SEM_SIZE__], - } - - pub struct pthread_attr_t { - __val: [usize; __PTHREAD_ATTR_SIZE__], - } - - pub struct pthread_mutex_t { - __val: [usize; __PTHREAD_MUTEX_SIZE__], - } - - pub struct pthread_cond_t { - __val: [usize; __PTHREAD_COND_SIZE__], - } - - pub struct pthread_condattr_t { - __val: [usize; __PTHREAD_CONDATTR_SIZE__], - } - - pub struct Dl_info { - pub dli_fname: *const c_char, - pub dli_fbase: *mut c_void, - pub dli_sname: *const c_char, - pub dli_saddr: *mut c_void, - } - - pub struct lconv { - pub decimal_point: *const c_char, - pub thousands_sep: *const c_char, - pub grouping: *const c_char, - pub int_curr_symbol: *const c_char, - pub currency_symbol: *const c_char, - pub mon_decimal_point: *const c_char, - pub mon_thousands_sep: *const c_char, - pub mon_grouping: *const c_char, - pub positive_sign: *const c_char, - pub negative_sign: *const c_char, - pub int_frac_digits: i8, - pub frac_digits: i8, - pub p_cs_precedes: i8, - pub p_sep_by_space: i8, - pub n_cs_precedes: i8, - pub n_sep_by_space: i8, - pub p_sign_posn: i8, - pub n_sign_posn: i8, - pub int_n_cs_precedes: i8, - pub int_n_sep_by_space: i8, - pub int_n_sign_posn: i8, - pub int_p_cs_precedes: i8, - pub int_p_sep_by_space: i8, - pub int_p_sign_posn: i8, - __reserved: [usize; __DEFAULT_RESERVED_SIZE__], - } - - pub struct tm { - pub tm_sec: i32, - pub tm_min: i32, - pub tm_hour: i32, - pub tm_mday: i32, - pub tm_mon: i32, - pub tm_year: i32, - pub tm_wday: i32, - pub tm_yday: i32, - pub tm_isdst: i32, - pub tm_gmtoff: isize, - pub tm_zone: *const c_char, - __reserved: [usize; __DEFAULT_RESERVED_SIZE__], - } - - pub struct addrinfo { - pub ai_flags: i32, - pub ai_family: i32, - pub ai_socktype: i32, - pub ai_protocol: i32, - pub ai_addrlen: socklen_t, - pub ai_addr: *mut sockaddr, - pub ai_canonname: *mut c_char, - pub ai_next: *mut addrinfo, - __reserved: [usize; __DEFAULT_RESERVED_SIZE__], - } - - pub struct pthread_rwlock_t { - __val: [usize; __PTHREAD_RWLOCK_SIZE__], - } - - pub struct statvfs { - pub f_bsize: usize, - pub f_frsize: usize, - pub f_blocks: fsblkcnt_t, - pub f_bfree: fsblkcnt_t, - pub f_bavail: fsblkcnt_t, - pub f_files: fsblkcnt_t, - pub f_ffree: fsblkcnt_t, - pub f_favail: fsblkcnt_t, - pub f_fsid: usize, - pub f_flag: usize, - pub f_namemax: usize, - __reserved: [usize; __DEFAULT_RESERVED_SIZE__], - } - - pub struct dirent { - pub d_type: u8, - pub d_name: [c_char; __NAME_MAX__ + 1], - } - - pub struct fd_set { - __val: [u32; __FDSET_SIZE__], - } - - pub struct sigset_t { - __val: [u32; __SIGSET_SIZE__], - } - - pub struct sigaction { - pub sa_handler: usize, - pub sa_mask: sigset_t, - pub sa_flags: i32, - pub sa_user: usize, - __reserved: [usize; __DEFAULT_RESERVED_SIZE__], - } - - pub struct termios { - pub c_iflag: tcflag_t, - pub c_oflag: tcflag_t, - pub c_cflag: tcflag_t, - pub c_lflag: tcflag_t, - pub c_cc: [cc_t; 12], - pub c_speed: speed_t, - __reserved: [usize; __DEFAULT_RESERVED_SIZE__], - } - - pub struct in_addr { - pub s_addr: in_addr_t, - } - - pub struct sockaddr_in { - pub sin_family: sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [u8; 8], - } - - pub struct sockaddr_in6 { - pub sin6_family: sa_family_t, - pub sin6_port: crate::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: u32, - } - - pub struct sockaddr_un { - pub sun_family: sa_family_t, - pub sun_path: [c_char; 108], - } - - pub struct sockaddr_storage { - pub ss_family: sa_family_t, - ss_data: [u32; __SOCKADDR_STORAGE_SIZE__], - } - - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct ipv6_mreq { - pub ipv6mr_multiaddr: in6_addr, - pub ipv6mr_interface: u32, - } - - pub struct timeval { - pub tv_sec: time_t, - pub tv_usec: suseconds_t, - } -} - -// Reserved two pointer size for reserved area for some structures. -// This ensures that the size of these structures is large enough -// if more fields are added in the NuttX side. -// -// These structures are that defined by POSIX but only necessary fields are included, -// for example, struct passwd, https://pubs.opengroup.org/onlinepubs/009695399/basedefs/pwd.h.html, -// POSIX only defines following fields in struct passwd: -// char *pw_name User's login name. -// char *pw_passwd Encrypted password. -// uid_t pw_uid Numerical user ID. -// gid_t pw_gid Numerical group ID. -// char *pw_dir Initial working directory. -// char *pw_shell Program to use as shell. -// Other fields can be different depending on the implementation. - -const __DEFAULT_RESERVED_SIZE__: usize = 2; - -const __SOCKADDR_STORAGE_SIZE__: usize = 36; -const __PTHREAD_ATTR_SIZE__: usize = 5; -const __PTHREAD_MUTEX_SIZE__: usize = 9; -const __PTHREAD_COND_SIZE__: usize = 7; -const __PTHREAD_CONDATTR_SIZE__: usize = 5; -const __PTHREAD_RWLOCK_SIZE__: usize = 17; -const __SEM_SIZE__: usize = 6; -const __NAME_MAX__: usize = 64; -const __FDSET_SIZE__: usize = 10; -const __SIGSET_SIZE__: usize = 8; - -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - __val: [0; __PTHREAD_COND_SIZE__], -}; -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - __val: [0; __PTHREAD_MUTEX_SIZE__], -}; - -// dlfcn.h -pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); - -// stdlib.h -pub const EXIT_SUCCESS: i32 = 0; -pub const EXIT_FAILURE: i32 = 1; - -// time.h -pub const CLOCK_REALTIME: i32 = 0; -pub const CLOCK_MONOTONIC: i32 = 1; - -// errno.h -pub const EPERM: i32 = 1; -pub const ENOENT: i32 = 2; -pub const ESRCH: i32 = 3; -pub const EINTR: i32 = 4; -pub const EIO: i32 = 5; -pub const ENXIO: i32 = 6; -pub const E2BIG: i32 = 7; -pub const ENOEXEC: i32 = 8; -pub const EBADF: i32 = 9; -pub const ECHILD: i32 = 10; -pub const EAGAIN: i32 = 11; -pub const ENOMEM: i32 = 12; -pub const EACCES: i32 = 13; -pub const EFAULT: i32 = 14; -pub const ENOTBLK: i32 = 15; -pub const EBUSY: i32 = 16; -pub const EEXIST: i32 = 17; -pub const EXDEV: i32 = 18; -pub const ENODEV: i32 = 19; -pub const ENOTDIR: i32 = 20; -pub const EISDIR: i32 = 21; -pub const EINVAL: i32 = 22; -pub const ENFILE: i32 = 23; -pub const EMFILE: i32 = 24; -pub const ENOTTY: i32 = 25; -pub const ETXTBSY: i32 = 26; -pub const EFBIG: i32 = 27; -pub const ENOSPC: i32 = 28; -pub const ESPIPE: i32 = 29; -pub const EROFS: i32 = 30; -pub const EMLINK: i32 = 31; -pub const EPIPE: i32 = 32; -pub const EDOM: i32 = 33; -pub const ERANGE: i32 = 34; -pub const EDEADLK: i32 = 35; -pub const ENAMETOOLONG: i32 = 36; -pub const ENOLCK: i32 = 37; -pub const ENOSYS: i32 = 38; -pub const ENOTEMPTY: i32 = 39; -pub const ELOOP: i32 = 40; -pub const EWOULDBLOCK: i32 = EAGAIN; -pub const ENOMSG: i32 = 42; -pub const EIDRM: i32 = 43; -pub const ECHRNG: i32 = 44; -pub const EL2NSYNC: i32 = 45; -pub const EL3HLT: i32 = 46; -pub const EL3RST: i32 = 47; -pub const ELNRNG: i32 = 48; -pub const EUNATCH: i32 = 49; -pub const ENOCSI: i32 = 50; -pub const EL2HLT: i32 = 51; -pub const EBADE: i32 = 52; -pub const EBADR: i32 = 53; -pub const EXFULL: i32 = 54; -pub const ENOANO: i32 = 55; -pub const EBADRQC: i32 = 56; -pub const EBADSLT: i32 = 57; -pub const EDEADLOCK: i32 = EDEADLK; -pub const EBFONT: i32 = 59; -pub const ENOSTR: i32 = 60; -pub const ENODATA: i32 = 61; -pub const ETIME: i32 = 62; -pub const ENOSR: i32 = 63; -pub const ENONET: i32 = 64; -pub const ENOPKG: i32 = 65; -pub const EREMOTE: i32 = 66; -pub const ENOLINK: i32 = 67; -pub const EADV: i32 = 68; -pub const ESRMNT: i32 = 69; -pub const ECOMM: i32 = 70; -pub const EPROTO: i32 = 71; -pub const EMULTIHOP: i32 = 72; -pub const EDOTDOT: i32 = 73; -pub const EBADMSG: i32 = 74; -pub const EOVERFLOW: i32 = 75; -pub const ENOTUNIQ: i32 = 76; -pub const EBADFD: i32 = 77; -pub const EREMCHG: i32 = 78; -pub const ELIBACC: i32 = 79; -pub const ELIBBAD: i32 = 80; -pub const ELIBSCN: i32 = 81; -pub const ELIBMAX: i32 = 82; -pub const ELIBEXEC: i32 = 83; -pub const EILSEQ: i32 = 84; -pub const ERESTART: i32 = 85; -pub const ESTRPIPE: i32 = 86; -pub const EUSERS: i32 = 87; -pub const ENOTSOCK: i32 = 88; -pub const EDESTADDRREQ: i32 = 89; -pub const EMSGSIZE: i32 = 90; -pub const EPROTOTYPE: i32 = 91; -pub const ENOPROTOOPT: i32 = 92; -pub const EPROTONOSUPPORT: i32 = 93; -pub const ESOCKTNOSUPPORT: i32 = 94; -pub const EOPNOTSUPP: i32 = 95; -pub const EPFNOSUPPORT: i32 = 96; -pub const EAFNOSUPPORT: i32 = 97; -pub const EADDRINUSE: i32 = 98; -pub const EADDRNOTAVAIL: i32 = 99; -pub const ENETDOWN: i32 = 100; -pub const ENETUNREACH: i32 = 101; -pub const ENETRESET: i32 = 102; -pub const ECONNABORTED: i32 = 103; -pub const ECONNRESET: i32 = 104; -pub const ENOBUFS: i32 = 105; -pub const EISCONN: i32 = 106; -pub const ENOTCONN: i32 = 107; -pub const ESHUTDOWN: i32 = 108; -pub const ETOOMANYREFS: i32 = 109; -pub const ETIMEDOUT: i32 = 110; -pub const ECONNREFUSED: i32 = 111; -pub const EHOSTDOWN: i32 = 112; -pub const EHOSTUNREACH: i32 = 113; -pub const EALREADY: i32 = 114; -pub const EINPROGRESS: i32 = 115; -pub const ESTALE: i32 = 116; -pub const EUCLEAN: i32 = 117; -pub const ENOTNAM: i32 = 118; -pub const ENAVAIL: i32 = 119; -pub const EISNAM: i32 = 120; -pub const EREMOTEIO: i32 = 121; -pub const EDQUOT: i32 = 122; -pub const ENOMEDIUM: i32 = 123; -pub const EMEDIUMTYPE: i32 = 124; -pub const ECANCELED: i32 = 125; -pub const ENOKEY: i32 = 126; -pub const EKEYEXPIRED: i32 = 127; -pub const EKEYREVOKED: i32 = 128; -pub const EKEYREJECTED: i32 = 129; -pub const EOWNERDEAD: i32 = 130; -pub const ENOTRECOVERABLE: i32 = 131; -pub const ERFKILL: i32 = 132; -pub const EHWPOISON: i32 = 133; -pub const ELBIN: i32 = 134; -pub const EFTYPE: i32 = 135; -pub const ENMFILE: i32 = 136; -pub const EPROCLIM: i32 = 137; -pub const ENOTSUP: i32 = 138; -pub const ENOSHARE: i32 = 139; -pub const ECASECLASH: i32 = 140; - -// fcntl.h -pub const FIOCLEX: i32 = 0x30b; -pub const F_SETFL: i32 = 0x9; -pub const F_DUPFD_CLOEXEC: i32 = 0x12; -pub const F_GETFD: i32 = 0x1; -pub const F_GETFL: i32 = 0x2; -pub const O_RDONLY: i32 = 0x1; -pub const O_WRONLY: i32 = 0x2; -pub const O_RDWR: i32 = 0x3; -pub const O_CREAT: i32 = 0x4; -pub const O_EXCL: i32 = 0x8; -pub const O_NOCTTY: i32 = 0x0; -pub const O_TRUNC: i32 = 0x20; -pub const O_APPEND: i32 = 0x10; -pub const O_NONBLOCK: i32 = 0x40; -pub const O_DSYNC: i32 = 0x80; -pub const O_DIRECT: i32 = 0x200; -pub const O_LARGEFILE: i32 = 0x2000; -pub const O_DIRECTORY: i32 = 0x800; -pub const O_NOFOLLOW: i32 = 0x1000; -pub const O_NOATIME: i32 = 0x40000; -pub const O_CLOEXEC: i32 = 0x400; -pub const O_ACCMODE: i32 = 0x0003; -pub const AT_FDCWD: i32 = -100; -pub const AT_REMOVEDIR: i32 = 0x200; - -// sys/types.h -pub const SEEK_SET: i32 = 0; -pub const SEEK_CUR: i32 = 1; -pub const SEEK_END: i32 = 2; - -// sys/stat.h -pub const S_IFDIR: u32 = 0x4000; -pub const S_IFLNK: u32 = 0xA000; -pub const S_IFREG: u32 = 0x8000; -pub const S_IFMT: u32 = 0xF000; -pub const S_IFIFO: u32 = 0x1000; -pub const S_IFSOCK: u32 = 0xc000; -pub const S_IFBLK: u32 = 0x6000; -pub const S_IFCHR: u32 = 0x2000; -pub const S_IRUSR: u32 = 0x100; -pub const S_IWUSR: u32 = 0x80; -pub const S_IXUSR: u32 = 0x40; -pub const S_IRGRP: u32 = 0x20; -pub const S_IWGRP: u32 = 0x10; -pub const S_IXGRP: u32 = 0x8; -pub const S_IROTH: u32 = 0x004; -pub const S_IWOTH: u32 = 0x002; -pub const S_IXOTH: u32 = 0x001; - -// sys/poll.h -pub const POLLIN: i16 = 0x01; -pub const POLLOUT: i16 = 0x04; -pub const POLLHUP: i16 = 0x10; -pub const POLLERR: i16 = 0x08; -pub const POLLNVAL: i16 = 0x20; - -// sys/socket.h -pub const AF_UNIX: i32 = 1; -pub const SOCK_DGRAM: i32 = 2; -pub const SOCK_STREAM: i32 = 1; -pub const AF_INET: i32 = 2; -pub const AF_INET6: i32 = 10; -pub const MSG_PEEK: i32 = 0x02; -pub const SOL_SOCKET: i32 = 1; -pub const SHUT_WR: i32 = 2; -pub const SHUT_RD: i32 = 1; -pub const SHUT_RDWR: i32 = 3; -pub const SO_ERROR: i32 = 4; -pub const SO_REUSEADDR: i32 = 11; -pub const SOMAXCONN: i32 = 8; -pub const SO_LINGER: i32 = 6; -pub const SO_RCVTIMEO: i32 = 0xa; -pub const SO_SNDTIMEO: i32 = 0xe; -pub const SO_BROADCAST: i32 = 1; - -// netinet/tcp.h -pub const TCP_NODELAY: i32 = 0x10; - -// nuttx/fs/ioctl.h -pub const FIONBIO: i32 = 0x30a; - -// unistd.h -pub const STDIN_FILENO: i32 = 0; -pub const STDOUT_FILENO: i32 = 1; -pub const STDERR_FILENO: i32 = 2; -pub const _SC_PAGESIZE: i32 = 0x36; -pub const _SC_THREAD_STACK_MIN: i32 = 0x58; -pub const _SC_GETPW_R_SIZE_MAX: i32 = 0x25; - -// signal.h -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGTRAP: c_int = 5; -pub const SIGABRT: c_int = 6; -pub const SIGIOT: c_int = 6; -pub const SIGBUS: c_int = 7; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGUSR1: c_int = 10; -pub const SIGSEGV: c_int = 11; -pub const SIGUSR2: c_int = 12; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; -pub const SIGSTKFLT: c_int = 16; -pub const SIGCHLD: c_int = 17; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGURG: c_int = 23; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGIO: c_int = 29; -pub const SIGPOLL: c_int = SIGIO; -pub const SIGPWR: c_int = 30; -pub const SIGSYS: c_int = 31; - -// pthread.h -pub const PTHREAD_MUTEX_NORMAL: i32 = 0; - -// netinet/in.h -pub const IP_TTL: i32 = 0x1e; -pub const IPV6_V6ONLY: i32 = 0x17; -pub const IPV6_JOIN_GROUP: i32 = 0x11; -pub const IPV6_LEAVE_GROUP: i32 = 0x12; -pub const IP_MULTICAST_LOOP: i32 = 0x13; -pub const IPV6_MULTICAST_LOOP: i32 = 0x15; -pub const IP_MULTICAST_TTL: i32 = 0x12; -pub const IP_ADD_MEMBERSHIP: i32 = 0x14; -pub const IP_DROP_MEMBERSHIP: i32 = 0x15; - -extern "C" { - pub fn __errno() -> *mut c_int; - pub fn bind(sockfd: i32, addr: *const sockaddr, addrlen: socklen_t) -> i32; - pub fn ioctl(fd: i32, request: i32, ...) -> i32; - pub fn dirfd(dirp: *mut DIR) -> i32; - pub fn recvfrom( - sockfd: i32, - buf: *mut c_void, - len: usize, - flags: i32, - src_addr: *mut sockaddr, - addrlen: *mut socklen_t, - ) -> i32; - - pub fn pthread_create( - thread: *mut pthread_t, - attr: *const pthread_attr_t, - start_routine: extern "C" fn(*mut c_void) -> *mut c_void, - arg: *mut c_void, - ) -> i32; - - pub fn clock_gettime(clockid: clockid_t, tp: *mut timespec) -> i32; - pub fn futimens(fd: i32, times: *const timespec) -> i32; - pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, clock_id: clockid_t) -> i32; - pub fn pthread_setname_np(thread: pthread_t, name: *const c_char) -> i32; - pub fn pthread_getname_np(thread: pthread_t, name: *mut c_char, len: usize) -> i32; - pub fn getrandom(buf: *mut c_void, buflen: usize, flags: u32) -> isize; - pub fn arc4random() -> u32; - pub fn arc4random_buf(bytes: *mut c_void, nbytes: usize); -} diff --git a/vendor/libc/src/unix/redox/mod.rs b/vendor/libc/src/unix/redox/mod.rs deleted file mode 100644 index 50bdaf4d4f06ba..00000000000000 --- a/vendor/libc/src/unix/redox/mod.rs +++ /dev/null @@ -1,1496 +0,0 @@ -use crate::prelude::*; - -pub type wchar_t = i32; - -pub type blkcnt_t = c_ulong; -pub type blksize_t = c_long; -pub type clock_t = c_long; -pub type clockid_t = c_int; -pub type dev_t = c_long; -pub type fsblkcnt_t = c_ulong; -pub type fsfilcnt_t = c_ulong; -pub type ino_t = c_ulonglong; -pub type mode_t = c_int; -pub type nfds_t = c_ulong; -pub type nlink_t = c_ulong; -pub type off_t = c_longlong; -pub type pthread_t = *mut c_void; -// Must be usize due to library/std/sys_common/thread_local.rs, -// should technically be *mut c_void -pub type pthread_key_t = usize; -pub type rlim_t = c_ulonglong; -pub type sa_family_t = u16; -pub type sem_t = *mut c_void; -pub type sigset_t = c_ulonglong; -pub type socklen_t = u32; -pub type speed_t = u32; -pub type suseconds_t = c_int; -pub type tcflag_t = u32; -pub type time_t = c_longlong; -pub type id_t = c_uint; -pub type pid_t = usize; -pub type uid_t = c_int; -pub type gid_t = c_int; - -#[derive(Debug)] -pub enum timezone {} -impl Copy for timezone {} -impl Clone for timezone { - fn clone(&self) -> timezone { - *self - } -} - -s_no_extra_traits! { - #[repr(C)] - pub struct utsname { - pub sysname: [c_char; UTSLENGTH], - pub nodename: [c_char; UTSLENGTH], - pub release: [c_char; UTSLENGTH], - pub version: [c_char; UTSLENGTH], - pub machine: [c_char; UTSLENGTH], - pub domainname: [c_char; UTSLENGTH], - } - - pub struct dirent { - pub d_ino: crate::ino_t, - pub d_off: off_t, - pub d_reclen: c_ushort, - pub d_type: c_uchar, - pub d_name: [c_char; 256], - } - - pub struct sockaddr_un { - pub sun_family: crate::sa_family_t, - pub sun_path: [c_char; 108], - } - - pub struct sockaddr_storage { - pub ss_family: crate::sa_family_t, - __ss_padding: [u8; 128 - size_of::() - size_of::()], - __ss_align: c_ulong, - } -} - -s! { - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - pub ai_addrlen: size_t, - pub ai_canonname: *mut c_char, - pub ai_addr: *mut crate::sockaddr, - pub ai_next: *mut crate::addrinfo, - } - - pub struct Dl_info { - pub dli_fname: *const c_char, - pub dli_fbase: *mut c_void, - pub dli_sname: *const c_char, - pub dli_saddr: *mut c_void, - } - - pub struct epoll_event { - pub events: u32, - pub u64: u64, - pub _pad: u64, - } - - pub struct fd_set { - fds_bits: [c_ulong; crate::FD_SETSIZE as usize / ULONG_SIZE], - } - - pub struct in_addr { - pub s_addr: crate::in_addr_t, - } - - pub struct ip_mreq { - pub imr_multiaddr: crate::in_addr, - pub imr_interface: crate::in_addr, - } - - pub struct lconv { - pub currency_symbol: *const c_char, - pub decimal_point: *const c_char, - pub frac_digits: c_char, - pub grouping: *const c_char, - pub int_curr_symbol: *const c_char, - pub int_frac_digits: c_char, - pub mon_decimal_point: *const c_char, - pub mon_grouping: *const c_char, - pub mon_thousands_sep: *const c_char, - pub negative_sign: *const c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub n_sign_posn: c_char, - pub positive_sign: *const c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub p_sign_posn: c_char, - pub thousands_sep: *const c_char, - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: size_t, - pub msg_control: *mut c_void, - pub msg_controllen: size_t, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - pub cmsg_len: size_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct passwd { - pub pw_name: *mut c_char, - pub pw_passwd: *mut c_char, - pub pw_uid: crate::uid_t, - pub pw_gid: crate::gid_t, - pub pw_gecos: *mut c_char, - pub pw_dir: *mut c_char, - pub pw_shell: *mut c_char, - } - - // FIXME(1.0): This should not implement `PartialEq` - #[allow(unpredictable_function_pointer_comparisons)] - pub struct sigaction { - pub sa_sigaction: crate::sighandler_t, - pub sa_flags: c_ulong, - pub sa_restorer: Option, - pub sa_mask: crate::sigset_t, - } - - pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - _pad: [c_int; 29], - _align: [usize; 0], - } - - pub struct sockaddr { - pub sa_family: crate::sa_family_t, - pub sa_data: [c_char; 14], - } - - pub struct sockaddr_in { - pub sin_family: crate::sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [c_char; 8], - } - - pub struct sockaddr_in6 { - pub sin6_family: crate::sa_family_t, - pub sin6_port: crate::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: u32, - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_nlink: crate::nlink_t, - pub st_mode: mode_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - _pad: [c_char; 24], - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_line: crate::cc_t, - pub c_cc: [crate::cc_t; crate::NCCS], - pub c_ispeed: crate::speed_t, - pub c_ospeed: crate::speed_t, - } - - pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - pub tm_gmtoff: c_long, - pub tm_zone: *const c_char, - } - - pub struct ucred { - pub pid: pid_t, - pub uid: uid_t, - pub gid: gid_t, - } - - #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))] - #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))] - pub struct pthread_attr_t { - bytes: [u8; _PTHREAD_ATTR_SIZE], - } - #[repr(C)] - #[repr(align(4))] - pub struct pthread_barrier_t { - bytes: [u8; _PTHREAD_BARRIER_SIZE], - } - #[repr(C)] - #[repr(align(4))] - pub struct pthread_barrierattr_t { - bytes: [u8; _PTHREAD_BARRIERATTR_SIZE], - } - #[repr(C)] - #[repr(align(4))] - pub struct pthread_mutex_t { - bytes: [u8; _PTHREAD_MUTEX_SIZE], - } - #[repr(C)] - #[repr(align(4))] - pub struct pthread_rwlock_t { - bytes: [u8; _PTHREAD_RWLOCK_SIZE], - } - #[repr(C)] - #[repr(align(4))] - pub struct pthread_mutexattr_t { - bytes: [u8; _PTHREAD_MUTEXATTR_SIZE], - } - #[repr(C)] - #[repr(align(1))] - pub struct pthread_rwlockattr_t { - bytes: [u8; _PTHREAD_RWLOCKATTR_SIZE], - } - #[repr(C)] - #[repr(align(4))] - pub struct pthread_cond_t { - bytes: [u8; _PTHREAD_COND_SIZE], - } - #[repr(C)] - #[repr(align(4))] - pub struct pthread_condattr_t { - bytes: [u8; _PTHREAD_CONDATTR_SIZE], - } - #[repr(C)] - #[repr(align(4))] - pub struct pthread_once_t { - bytes: [u8; _PTHREAD_ONCE_SIZE], - } - #[repr(C)] - #[repr(align(4))] - pub struct pthread_spinlock_t { - bytes: [u8; _PTHREAD_SPINLOCK_SIZE], - } -} -const _PTHREAD_ATTR_SIZE: usize = 32; -const _PTHREAD_RWLOCKATTR_SIZE: usize = 1; -const _PTHREAD_RWLOCK_SIZE: usize = 4; -const _PTHREAD_BARRIER_SIZE: usize = 24; -const _PTHREAD_BARRIERATTR_SIZE: usize = 4; -const _PTHREAD_CONDATTR_SIZE: usize = 8; -const _PTHREAD_COND_SIZE: usize = 8; -const _PTHREAD_MUTEX_SIZE: usize = 12; -const _PTHREAD_MUTEXATTR_SIZE: usize = 20; -const _PTHREAD_ONCE_SIZE: usize = 4; -const _PTHREAD_SPINLOCK_SIZE: usize = 4; - -pub const UTSLENGTH: usize = 65; - -// intentionally not public, only used for fd_set -cfg_if! { - if #[cfg(target_pointer_width = "32")] { - const ULONG_SIZE: usize = 32; - } else if #[cfg(target_pointer_width = "64")] { - const ULONG_SIZE: usize = 64; - } else { - // Unknown target_pointer_width - } -} - -// limits.h -pub const PATH_MAX: c_int = 4096; - -// fcntl.h -pub const F_GETLK: c_int = 5; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_ULOCK: c_int = 0; -pub const F_LOCK: c_int = 1; -pub const F_TLOCK: c_int = 2; -pub const F_TEST: c_int = 3; - -pub const AT_FDCWD: c_int = -100; - -// FIXME(redox): relibc { -pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); -// } - -// dlfcn.h -pub const RTLD_LAZY: c_int = 0x0001; -pub const RTLD_NOW: c_int = 0x0002; -pub const RTLD_GLOBAL: c_int = 0x0100; -pub const RTLD_LOCAL: c_int = 0x0000; - -// errno.h -pub const EPERM: c_int = 1; /* Operation not permitted */ -pub const ENOENT: c_int = 2; /* No such file or directory */ -pub const ESRCH: c_int = 3; /* No such process */ -pub const EINTR: c_int = 4; /* Interrupted system call */ -pub const EIO: c_int = 5; /* I/O error */ -pub const ENXIO: c_int = 6; /* No such device or address */ -pub const E2BIG: c_int = 7; /* Argument list too long */ -pub const ENOEXEC: c_int = 8; /* Exec format error */ -pub const EBADF: c_int = 9; /* Bad file number */ -pub const ECHILD: c_int = 10; /* No child processes */ -pub const EAGAIN: c_int = 11; /* Try again */ -pub const ENOMEM: c_int = 12; /* Out of memory */ -pub const EACCES: c_int = 13; /* Permission denied */ -pub const EFAULT: c_int = 14; /* Bad address */ -pub const ENOTBLK: c_int = 15; /* Block device required */ -pub const EBUSY: c_int = 16; /* Device or resource busy */ -pub const EEXIST: c_int = 17; /* File exists */ -pub const EXDEV: c_int = 18; /* Cross-device link */ -pub const ENODEV: c_int = 19; /* No such device */ -pub const ENOTDIR: c_int = 20; /* Not a directory */ -pub const EISDIR: c_int = 21; /* Is a directory */ -pub const EINVAL: c_int = 22; /* Invalid argument */ -pub const ENFILE: c_int = 23; /* File table overflow */ -pub const EMFILE: c_int = 24; /* Too many open files */ -pub const ENOTTY: c_int = 25; /* Not a typewriter */ -pub const ETXTBSY: c_int = 26; /* Text file busy */ -pub const EFBIG: c_int = 27; /* File too large */ -pub const ENOSPC: c_int = 28; /* No space left on device */ -pub const ESPIPE: c_int = 29; /* Illegal seek */ -pub const EROFS: c_int = 30; /* Read-only file system */ -pub const EMLINK: c_int = 31; /* Too many links */ -pub const EPIPE: c_int = 32; /* Broken pipe */ -pub const EDOM: c_int = 33; /* Math argument out of domain of func */ -pub const ERANGE: c_int = 34; /* Math result not representable */ -pub const EDEADLK: c_int = 35; /* Resource deadlock would occur */ -pub const ENAMETOOLONG: c_int = 36; /* File name too long */ -pub const ENOLCK: c_int = 37; /* No record locks available */ -pub const ENOSYS: c_int = 38; /* Function not implemented */ -pub const ENOTEMPTY: c_int = 39; /* Directory not empty */ -pub const ELOOP: c_int = 40; /* Too many symbolic links encountered */ -pub const EWOULDBLOCK: c_int = 41; /* Operation would block */ -pub const ENOMSG: c_int = 42; /* No message of desired type */ -pub const EIDRM: c_int = 43; /* Identifier removed */ -pub const ECHRNG: c_int = 44; /* Channel number out of range */ -pub const EL2NSYNC: c_int = 45; /* Level 2 not synchronized */ -pub const EL3HLT: c_int = 46; /* Level 3 halted */ -pub const EL3RST: c_int = 47; /* Level 3 reset */ -pub const ELNRNG: c_int = 48; /* Link number out of range */ -pub const EUNATCH: c_int = 49; /* Protocol driver not attached */ -pub const ENOCSI: c_int = 50; /* No CSI structure available */ -pub const EL2HLT: c_int = 51; /* Level 2 halted */ -pub const EBADE: c_int = 52; /* Invalid exchange */ -pub const EBADR: c_int = 53; /* Invalid request descriptor */ -pub const EXFULL: c_int = 54; /* Exchange full */ -pub const ENOANO: c_int = 55; /* No anode */ -pub const EBADRQC: c_int = 56; /* Invalid request code */ -pub const EBADSLT: c_int = 57; /* Invalid slot */ -pub const EDEADLOCK: c_int = 58; /* Resource deadlock would occur */ -pub const EBFONT: c_int = 59; /* Bad font file format */ -pub const ENOSTR: c_int = 60; /* Device not a stream */ -pub const ENODATA: c_int = 61; /* No data available */ -pub const ETIME: c_int = 62; /* Timer expired */ -pub const ENOSR: c_int = 63; /* Out of streams resources */ -pub const ENONET: c_int = 64; /* Machine is not on the network */ -pub const ENOPKG: c_int = 65; /* Package not installed */ -pub const EREMOTE: c_int = 66; /* Object is remote */ -pub const ENOLINK: c_int = 67; /* Link has been severed */ -pub const EADV: c_int = 68; /* Advertise error */ -pub const ESRMNT: c_int = 69; /* Srmount error */ -pub const ECOMM: c_int = 70; /* Communication error on send */ -pub const EPROTO: c_int = 71; /* Protocol error */ -pub const EMULTIHOP: c_int = 72; /* Multihop attempted */ -pub const EDOTDOT: c_int = 73; /* RFS specific error */ -pub const EBADMSG: c_int = 74; /* Not a data message */ -pub const EOVERFLOW: c_int = 75; /* Value too large for defined data type */ -pub const ENOTUNIQ: c_int = 76; /* Name not unique on network */ -pub const EBADFD: c_int = 77; /* File descriptor in bad state */ -pub const EREMCHG: c_int = 78; /* Remote address changed */ -pub const ELIBACC: c_int = 79; /* Can not access a needed shared library */ -pub const ELIBBAD: c_int = 80; /* Accessing a corrupted shared library */ -pub const ELIBSCN: c_int = 81; /* .lib section in a.out corrupted */ -/* Attempting to link in too many shared libraries */ -pub const ELIBMAX: c_int = 82; -pub const ELIBEXEC: c_int = 83; /* Cannot exec a shared library directly */ -pub const EILSEQ: c_int = 84; /* Illegal byte sequence */ -/* Interrupted system call should be restarted */ -pub const ERESTART: c_int = 85; -pub const ESTRPIPE: c_int = 86; /* Streams pipe error */ -pub const EUSERS: c_int = 87; /* Too many users */ -pub const ENOTSOCK: c_int = 88; /* Socket operation on non-socket */ -pub const EDESTADDRREQ: c_int = 89; /* Destination address required */ -pub const EMSGSIZE: c_int = 90; /* Message too long */ -pub const EPROTOTYPE: c_int = 91; /* Protocol wrong type for socket */ -pub const ENOPROTOOPT: c_int = 92; /* Protocol not available */ -pub const EPROTONOSUPPORT: c_int = 93; /* Protocol not supported */ -pub const ESOCKTNOSUPPORT: c_int = 94; /* Socket type not supported */ -/* Operation not supported on transport endpoint */ -pub const EOPNOTSUPP: c_int = 95; -pub const ENOTSUP: c_int = EOPNOTSUPP; -pub const EPFNOSUPPORT: c_int = 96; /* Protocol family not supported */ -/* Address family not supported by protocol */ -pub const EAFNOSUPPORT: c_int = 97; -pub const EADDRINUSE: c_int = 98; /* Address already in use */ -pub const EADDRNOTAVAIL: c_int = 99; /* Cannot assign requested address */ -pub const ENETDOWN: c_int = 100; /* Network is down */ -pub const ENETUNREACH: c_int = 101; /* Network is unreachable */ -/* Network dropped connection because of reset */ -pub const ENETRESET: c_int = 102; -pub const ECONNABORTED: c_int = 103; /* Software caused connection abort */ -pub const ECONNRESET: c_int = 104; /* Connection reset by peer */ -pub const ENOBUFS: c_int = 105; /* No buffer space available */ -pub const EISCONN: c_int = 106; /* Transport endpoint is already connected */ -pub const ENOTCONN: c_int = 107; /* Transport endpoint is not connected */ -/* Cannot send after transport endpoint shutdown */ -pub const ESHUTDOWN: c_int = 108; -pub const ETOOMANYREFS: c_int = 109; /* Too many references: cannot splice */ -pub const ETIMEDOUT: c_int = 110; /* Connection timed out */ -pub const ECONNREFUSED: c_int = 111; /* Connection refused */ -pub const EHOSTDOWN: c_int = 112; /* Host is down */ -pub const EHOSTUNREACH: c_int = 113; /* No route to host */ -pub const EALREADY: c_int = 114; /* Operation already in progress */ -pub const EINPROGRESS: c_int = 115; /* Operation now in progress */ -pub const ESTALE: c_int = 116; /* Stale NFS file handle */ -pub const EUCLEAN: c_int = 117; /* Structure needs cleaning */ -pub const ENOTNAM: c_int = 118; /* Not a XENIX named type file */ -pub const ENAVAIL: c_int = 119; /* No XENIX semaphores available */ -pub const EISNAM: c_int = 120; /* Is a named type file */ -pub const EREMOTEIO: c_int = 121; /* Remote I/O error */ -pub const EDQUOT: c_int = 122; /* Quota exceeded */ -pub const ENOMEDIUM: c_int = 123; /* No medium found */ -pub const EMEDIUMTYPE: c_int = 124; /* Wrong medium type */ -pub const ECANCELED: c_int = 125; /* Operation Canceled */ -pub const ENOKEY: c_int = 126; /* Required key not available */ -pub const EKEYEXPIRED: c_int = 127; /* Key has expired */ -pub const EKEYREVOKED: c_int = 128; /* Key has been revoked */ -pub const EKEYREJECTED: c_int = 129; /* Key was rejected by service */ -pub const EOWNERDEAD: c_int = 130; /* Owner died */ -pub const ENOTRECOVERABLE: c_int = 131; /* State not recoverable */ - -// fcntl.h -pub const F_DUPFD: c_int = 0; -pub const F_GETFD: c_int = 1; -pub const F_SETFD: c_int = 2; -pub const F_GETFL: c_int = 3; -pub const F_SETFL: c_int = 4; -// FIXME(redox): relibc { -pub const F_DUPFD_CLOEXEC: c_int = crate::F_DUPFD; -// } -pub const FD_CLOEXEC: c_int = 0x0100_0000; -pub const O_RDONLY: c_int = 0x0001_0000; -pub const O_WRONLY: c_int = 0x0002_0000; -pub const O_RDWR: c_int = 0x0003_0000; -pub const O_ACCMODE: c_int = 0x0003_0000; -pub const O_NONBLOCK: c_int = 0x0004_0000; -pub const O_NDELAY: c_int = O_NONBLOCK; -pub const O_APPEND: c_int = 0x0008_0000; -pub const O_SHLOCK: c_int = 0x0010_0000; -pub const O_EXLOCK: c_int = 0x0020_0000; -pub const O_ASYNC: c_int = 0x0040_0000; -pub const O_FSYNC: c_int = 0x0080_0000; -pub const O_CLOEXEC: c_int = 0x0100_0000; -pub const O_CREAT: c_int = 0x0200_0000; -pub const O_TRUNC: c_int = 0x0400_0000; -pub const O_EXCL: c_int = 0x0800_0000; -pub const O_DIRECTORY: c_int = 0x1000_0000; -pub const O_PATH: c_int = 0x2000_0000; -pub const O_SYMLINK: c_int = 0x4000_0000; -// Negative to allow it to be used as int -// FIXME(redox): Fix negative values missing from includes -pub const O_NOFOLLOW: c_int = -0x8000_0000; -pub const O_NOCTTY: c_int = 0x00000200; - -// locale.h -pub const LC_ALL: c_int = 0; -pub const LC_COLLATE: c_int = 1; -pub const LC_CTYPE: c_int = 2; -pub const LC_MESSAGES: c_int = 3; -pub const LC_MONETARY: c_int = 4; -pub const LC_NUMERIC: c_int = 5; -pub const LC_TIME: c_int = 6; - -// netdb.h -pub const AI_PASSIVE: c_int = 0x0001; -pub const AI_CANONNAME: c_int = 0x0002; -pub const AI_NUMERICHOST: c_int = 0x0004; -pub const AI_V4MAPPED: c_int = 0x0008; -pub const AI_ALL: c_int = 0x0010; -pub const AI_ADDRCONFIG: c_int = 0x0020; -pub const AI_NUMERICSERV: c_int = 0x0400; -pub const EAI_BADFLAGS: c_int = -1; -pub const EAI_NONAME: c_int = -2; -pub const EAI_AGAIN: c_int = -3; -pub const EAI_FAIL: c_int = -4; -pub const EAI_NODATA: c_int = -5; -pub const EAI_FAMILY: c_int = -6; -pub const EAI_SOCKTYPE: c_int = -7; -pub const EAI_SERVICE: c_int = -8; -pub const EAI_ADDRFAMILY: c_int = -9; -pub const EAI_MEMORY: c_int = -10; -pub const EAI_SYSTEM: c_int = -11; -pub const EAI_OVERFLOW: c_int = -12; -pub const NI_MAXHOST: c_int = 1025; -pub const NI_MAXSERV: c_int = 32; -pub const NI_NUMERICHOST: c_int = 0x0001; -pub const NI_NUMERICSERV: c_int = 0x0002; -pub const NI_NOFQDN: c_int = 0x0004; -pub const NI_NAMEREQD: c_int = 0x0008; -pub const NI_DGRAM: c_int = 0x0010; - -// netinet/in.h -// FIXME(redox): relibc { -pub const IP_TTL: c_int = 2; -pub const IPV6_UNICAST_HOPS: c_int = 16; -pub const IPV6_MULTICAST_IF: c_int = 17; -pub const IPV6_MULTICAST_HOPS: c_int = 18; -pub const IPV6_MULTICAST_LOOP: c_int = 19; -pub const IPV6_ADD_MEMBERSHIP: c_int = 20; -pub const IPV6_DROP_MEMBERSHIP: c_int = 21; -pub const IPV6_V6ONLY: c_int = 26; -pub const IP_MULTICAST_IF: c_int = 32; -pub const IP_MULTICAST_TTL: c_int = 33; -pub const IP_MULTICAST_LOOP: c_int = 34; -pub const IP_ADD_MEMBERSHIP: c_int = 35; -pub const IP_DROP_MEMBERSHIP: c_int = 36; -pub const IP_TOS: c_int = 1; -pub const IP_RECVTOS: c_int = 2; -pub const IPPROTO_IGMP: c_int = 2; -pub const IPPROTO_PUP: c_int = 12; -pub const IPPROTO_IDP: c_int = 22; -pub const IPPROTO_RAW: c_int = 255; -pub const IPPROTO_MAX: c_int = 255; -// } - -// netinet/tcp.h -pub const TCP_NODELAY: c_int = 1; -// FIXME(redox): relibc { -pub const TCP_KEEPIDLE: c_int = 1; -// } - -// poll.h -pub const POLLIN: c_short = 0x001; -pub const POLLPRI: c_short = 0x002; -pub const POLLOUT: c_short = 0x004; -pub const POLLERR: c_short = 0x008; -pub const POLLHUP: c_short = 0x010; -pub const POLLNVAL: c_short = 0x020; -pub const POLLRDNORM: c_short = 0x040; -pub const POLLRDBAND: c_short = 0x080; -pub const POLLWRNORM: c_short = 0x100; -pub const POLLWRBAND: c_short = 0x200; - -// pthread.h -pub const PTHREAD_MUTEX_NORMAL: c_int = 0; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; -pub const PTHREAD_MUTEX_INITIALIZER: crate::pthread_mutex_t = crate::pthread_mutex_t { - bytes: [0; _PTHREAD_MUTEX_SIZE], -}; -pub const PTHREAD_COND_INITIALIZER: crate::pthread_cond_t = crate::pthread_cond_t { - bytes: [0; _PTHREAD_COND_SIZE], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: crate::pthread_rwlock_t = crate::pthread_rwlock_t { - bytes: [0; _PTHREAD_RWLOCK_SIZE], -}; -pub const PTHREAD_STACK_MIN: size_t = 4096; - -// signal.h -pub const SIG_BLOCK: c_int = 0; -pub const SIG_UNBLOCK: c_int = 1; -pub const SIG_SETMASK: c_int = 2; -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGTRAP: c_int = 5; -pub const SIGABRT: c_int = 6; -pub const SIGBUS: c_int = 7; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGUSR1: c_int = 10; -pub const SIGSEGV: c_int = 11; -pub const SIGUSR2: c_int = 12; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; -pub const SIGSTKFLT: c_int = 16; -pub const SIGCHLD: c_int = 17; -pub const SIGCONT: c_int = 18; -pub const SIGSTOP: c_int = 19; -pub const SIGTSTP: c_int = 20; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGURG: c_int = 23; -pub const SIGXCPU: c_int = 24; -pub const SIGXFSZ: c_int = 25; -pub const SIGVTALRM: c_int = 26; -pub const SIGPROF: c_int = 27; -pub const SIGWINCH: c_int = 28; -pub const SIGIO: c_int = 29; -pub const SIGPWR: c_int = 30; -pub const SIGSYS: c_int = 31; -pub const NSIG: c_int = 32; - -pub const SA_NOCLDWAIT: c_ulong = 0x0000_0002; -pub const SA_RESTORER: c_ulong = 0x0000_0004; // FIXME(redox): remove after relibc removes it -pub const SA_SIGINFO: c_ulong = 0x0200_0000; -pub const SA_ONSTACK: c_ulong = 0x0400_0000; -pub const SA_RESTART: c_ulong = 0x0800_0000; -pub const SA_NODEFER: c_ulong = 0x1000_0000; -pub const SA_RESETHAND: c_ulong = 0x2000_0000; -pub const SA_NOCLDSTOP: c_ulong = 0x4000_0000; - -// sys/file.h -pub const LOCK_SH: c_int = 1; -pub const LOCK_EX: c_int = 2; -pub const LOCK_NB: c_int = 4; -pub const LOCK_UN: c_int = 8; - -// sys/epoll.h -pub const EPOLL_CLOEXEC: c_int = 0x0100_0000; -pub const EPOLL_CTL_ADD: c_int = 1; -pub const EPOLL_CTL_DEL: c_int = 2; -pub const EPOLL_CTL_MOD: c_int = 3; -pub const EPOLLIN: c_int = 0x001; -pub const EPOLLPRI: c_int = 0x002; -pub const EPOLLOUT: c_int = 0x004; -pub const EPOLLERR: c_int = 0x008; -pub const EPOLLHUP: c_int = 0x010; -pub const EPOLLNVAL: c_int = 0x020; -pub const EPOLLRDNORM: c_int = 0x040; -pub const EPOLLRDBAND: c_int = 0x080; -pub const EPOLLWRNORM: c_int = 0x100; -pub const EPOLLWRBAND: c_int = 0x200; -pub const EPOLLMSG: c_int = 0x400; -pub const EPOLLRDHUP: c_int = 0x2000; -pub const EPOLLEXCLUSIVE: c_int = 1 << 28; -pub const EPOLLWAKEUP: c_int = 1 << 29; -pub const EPOLLONESHOT: c_int = 1 << 30; -pub const EPOLLET: c_int = 1 << 31; - -// sys/stat.h -pub const S_IFMT: c_int = 0o17_0000; -pub const S_IFDIR: c_int = 0o4_0000; -pub const S_IFCHR: c_int = 0o2_0000; -pub const S_IFBLK: c_int = 0o6_0000; -pub const S_IFREG: c_int = 0o10_0000; -pub const S_IFIFO: c_int = 0o1_0000; -pub const S_IFLNK: c_int = 0o12_0000; -pub const S_IFSOCK: c_int = 0o14_0000; -pub const S_IRWXU: c_int = 0o0700; -pub const S_IRUSR: c_int = 0o0400; -pub const S_IWUSR: c_int = 0o0200; -pub const S_IXUSR: c_int = 0o0100; -pub const S_IRWXG: c_int = 0o0070; -pub const S_IRGRP: c_int = 0o0040; -pub const S_IWGRP: c_int = 0o0020; -pub const S_IXGRP: c_int = 0o0010; -pub const S_IRWXO: c_int = 0o0007; -pub const S_IROTH: c_int = 0o0004; -pub const S_IWOTH: c_int = 0o0002; -pub const S_IXOTH: c_int = 0o0001; - -// stdlib.h -pub const EXIT_SUCCESS: c_int = 0; -pub const EXIT_FAILURE: c_int = 1; - -// sys/ioctl.h -// FIXME(redox): relibc { -pub const FIONREAD: c_ulong = 0x541B; -pub const FIONBIO: c_ulong = 0x5421; -pub const FIOCLEX: c_ulong = 0x5451; -// } -pub const TCGETS: c_ulong = 0x5401; -pub const TCSETS: c_ulong = 0x5402; -pub const TCFLSH: c_ulong = 0x540B; -pub const TIOCSCTTY: c_ulong = 0x540E; -pub const TIOCGPGRP: c_ulong = 0x540F; -pub const TIOCSPGRP: c_ulong = 0x5410; -pub const TIOCGWINSZ: c_ulong = 0x5413; -pub const TIOCSWINSZ: c_ulong = 0x5414; - -// sys/mman.h -pub const PROT_NONE: c_int = 0x0000; -pub const PROT_READ: c_int = 0x0004; -pub const PROT_WRITE: c_int = 0x0002; -pub const PROT_EXEC: c_int = 0x0001; - -pub const MADV_NORMAL: c_int = 0; -pub const MADV_RANDOM: c_int = 1; -pub const MADV_SEQUENTIAL: c_int = 2; -pub const MADV_WILLNEED: c_int = 3; -pub const MADV_DONTNEED: c_int = 4; - -pub const MAP_SHARED: c_int = 0x0001; -pub const MAP_PRIVATE: c_int = 0x0002; -pub const MAP_ANON: c_int = 0x0020; -pub const MAP_ANONYMOUS: c_int = MAP_ANON; -pub const MAP_FIXED: c_int = 0x0004; -pub const MAP_FAILED: *mut c_void = !0 as _; - -pub const MS_ASYNC: c_int = 0x0001; -pub const MS_INVALIDATE: c_int = 0x0002; -pub const MS_SYNC: c_int = 0x0004; - -// sys/resource.h -pub const RLIM_INFINITY: rlim_t = !0; -pub const RLIM_SAVED_CUR: rlim_t = RLIM_INFINITY; -pub const RLIM_SAVED_MAX: rlim_t = RLIM_INFINITY; -pub const RLIMIT_CPU: c_int = 0; -pub const RLIMIT_FSIZE: c_int = 1; -pub const RLIMIT_DATA: c_int = 2; -pub const RLIMIT_STACK: c_int = 3; -pub const RLIMIT_CORE: c_int = 4; -pub const RLIMIT_RSS: c_int = 5; -pub const RLIMIT_NPROC: c_int = 6; -pub const RLIMIT_NOFILE: c_int = 7; -pub const RLIMIT_MEMLOCK: c_int = 8; -pub const RLIMIT_AS: c_int = 9; -pub const RLIMIT_LOCKS: c_int = 10; -pub const RLIMIT_SIGPENDING: c_int = 11; -pub const RLIMIT_MSGQUEUE: c_int = 12; -pub const RLIMIT_NICE: c_int = 13; -pub const RLIMIT_RTPRIO: c_int = 14; -pub const RLIMIT_NLIMITS: c_int = 15; - -pub const RUSAGE_SELF: c_int = 0; -pub const RUSAGE_CHILDREN: c_int = -1; -pub const RUSAGE_BOTH: c_int = -2; -pub const RUSAGE_THREAD: c_int = 1; - -// sys/select.h -pub const FD_SETSIZE: usize = 1024; - -// sys/socket.h -pub const AF_INET: c_int = 2; -pub const AF_INET6: c_int = 10; -pub const AF_UNIX: c_int = 1; -pub const AF_UNSPEC: c_int = 0; -pub const PF_INET: c_int = 2; -pub const PF_INET6: c_int = 10; -pub const PF_UNIX: c_int = 1; -pub const PF_UNSPEC: c_int = 0; -pub const MSG_CTRUNC: c_int = 8; -pub const MSG_DONTROUTE: c_int = 4; -pub const MSG_EOR: c_int = 128; -pub const MSG_OOB: c_int = 1; -pub const MSG_PEEK: c_int = 2; -pub const MSG_TRUNC: c_int = 32; -pub const MSG_DONTWAIT: c_int = 64; -pub const MSG_WAITALL: c_int = 256; -pub const SCM_RIGHTS: c_int = 1; -pub const SHUT_RD: c_int = 0; -pub const SHUT_WR: c_int = 1; -pub const SHUT_RDWR: c_int = 2; -pub const SO_DEBUG: c_int = 1; -pub const SO_REUSEADDR: c_int = 2; -pub const SO_TYPE: c_int = 3; -pub const SO_ERROR: c_int = 4; -pub const SO_DONTROUTE: c_int = 5; -pub const SO_BROADCAST: c_int = 6; -pub const SO_SNDBUF: c_int = 7; -pub const SO_RCVBUF: c_int = 8; -pub const SO_KEEPALIVE: c_int = 9; -pub const SO_OOBINLINE: c_int = 10; -pub const SO_NO_CHECK: c_int = 11; -pub const SO_PRIORITY: c_int = 12; -pub const SO_LINGER: c_int = 13; -pub const SO_BSDCOMPAT: c_int = 14; -pub const SO_REUSEPORT: c_int = 15; -pub const SO_PASSCRED: c_int = 16; -pub const SO_PEERCRED: c_int = 17; -pub const SO_RCVLOWAT: c_int = 18; -pub const SO_SNDLOWAT: c_int = 19; -pub const SO_RCVTIMEO: c_int = 20; -pub const SO_SNDTIMEO: c_int = 21; -pub const SO_ACCEPTCONN: c_int = 30; -pub const SO_PEERSEC: c_int = 31; -pub const SO_SNDBUFFORCE: c_int = 32; -pub const SO_RCVBUFFORCE: c_int = 33; -pub const SO_PROTOCOL: c_int = 38; -pub const SO_DOMAIN: c_int = 39; -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; -pub const SOCK_RAW: c_int = 3; -pub const SOCK_NONBLOCK: c_int = 0o4_000; -pub const SOCK_CLOEXEC: c_int = 0o2_000_000; -pub const SOCK_SEQPACKET: c_int = 5; -pub const SOL_SOCKET: c_int = 1; -pub const SOMAXCONN: c_int = 128; - -// sys/termios.h -pub const VEOF: usize = 0; -pub const VEOL: usize = 1; -pub const VEOL2: usize = 2; -pub const VERASE: usize = 3; -pub const VWERASE: usize = 4; -pub const VKILL: usize = 5; -pub const VREPRINT: usize = 6; -pub const VSWTC: usize = 7; -pub const VINTR: usize = 8; -pub const VQUIT: usize = 9; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 12; -pub const VSTOP: usize = 13; -pub const VLNEXT: usize = 14; -pub const VDISCARD: usize = 15; -pub const VMIN: usize = 16; -pub const VTIME: usize = 17; -pub const NCCS: usize = 32; - -pub const IGNBRK: crate::tcflag_t = 0o000_001; -pub const BRKINT: crate::tcflag_t = 0o000_002; -pub const IGNPAR: crate::tcflag_t = 0o000_004; -pub const PARMRK: crate::tcflag_t = 0o000_010; -pub const INPCK: crate::tcflag_t = 0o000_020; -pub const ISTRIP: crate::tcflag_t = 0o000_040; -pub const INLCR: crate::tcflag_t = 0o000_100; -pub const IGNCR: crate::tcflag_t = 0o000_200; -pub const ICRNL: crate::tcflag_t = 0o000_400; -pub const IXON: crate::tcflag_t = 0o001_000; -pub const IXOFF: crate::tcflag_t = 0o002_000; - -pub const OPOST: crate::tcflag_t = 0o000_001; -pub const ONLCR: crate::tcflag_t = 0o000_002; -pub const OLCUC: crate::tcflag_t = 0o000_004; -pub const OCRNL: crate::tcflag_t = 0o000_010; -pub const ONOCR: crate::tcflag_t = 0o000_020; -pub const ONLRET: crate::tcflag_t = 0o000_040; -pub const OFILL: crate::tcflag_t = 0o0000_100; -pub const OFDEL: crate::tcflag_t = 0o0000_200; - -pub const B0: speed_t = 0o000_000; -pub const B50: speed_t = 0o000_001; -pub const B75: speed_t = 0o000_002; -pub const B110: speed_t = 0o000_003; -pub const B134: speed_t = 0o000_004; -pub const B150: speed_t = 0o000_005; -pub const B200: speed_t = 0o000_006; -pub const B300: speed_t = 0o000_007; -pub const B600: speed_t = 0o000_010; -pub const B1200: speed_t = 0o000_011; -pub const B1800: speed_t = 0o000_012; -pub const B2400: speed_t = 0o000_013; -pub const B4800: speed_t = 0o000_014; -pub const B9600: speed_t = 0o000_015; -pub const B19200: speed_t = 0o000_016; -pub const B38400: speed_t = 0o000_017; - -pub const B57600: speed_t = 0o0_020; -pub const B115200: speed_t = 0o0_021; -pub const B230400: speed_t = 0o0_022; -pub const B460800: speed_t = 0o0_023; -pub const B500000: speed_t = 0o0_024; -pub const B576000: speed_t = 0o0_025; -pub const B921600: speed_t = 0o0_026; -pub const B1000000: speed_t = 0o0_027; -pub const B1152000: speed_t = 0o0_030; -pub const B1500000: speed_t = 0o0_031; -pub const B2000000: speed_t = 0o0_032; -pub const B2500000: speed_t = 0o0_033; -pub const B3000000: speed_t = 0o0_034; -pub const B3500000: speed_t = 0o0_035; -pub const B4000000: speed_t = 0o0_036; - -pub const CSIZE: crate::tcflag_t = 0o001_400; -pub const CS5: crate::tcflag_t = 0o000_000; -pub const CS6: crate::tcflag_t = 0o000_400; -pub const CS7: crate::tcflag_t = 0o001_000; -pub const CS8: crate::tcflag_t = 0o001_400; - -pub const CSTOPB: crate::tcflag_t = 0o002_000; -pub const CREAD: crate::tcflag_t = 0o004_000; -pub const PARENB: crate::tcflag_t = 0o010_000; -pub const PARODD: crate::tcflag_t = 0o020_000; -pub const HUPCL: crate::tcflag_t = 0o040_000; - -pub const CLOCAL: crate::tcflag_t = 0o0100000; - -pub const ISIG: crate::tcflag_t = 0x0000_0080; -pub const ICANON: crate::tcflag_t = 0x0000_0100; -pub const ECHO: crate::tcflag_t = 0x0000_0008; -pub const ECHOE: crate::tcflag_t = 0x0000_0002; -pub const ECHOK: crate::tcflag_t = 0x0000_0004; -pub const ECHONL: crate::tcflag_t = 0x0000_0010; -pub const NOFLSH: crate::tcflag_t = 0x8000_0000; -pub const TOSTOP: crate::tcflag_t = 0x0040_0000; -pub const IEXTEN: crate::tcflag_t = 0x0000_0400; - -pub const TCOOFF: c_int = 0; -pub const TCOON: c_int = 1; -pub const TCIOFF: c_int = 2; -pub const TCION: c_int = 3; - -pub const TCIFLUSH: c_int = 0; -pub const TCOFLUSH: c_int = 1; -pub const TCIOFLUSH: c_int = 2; - -pub const TCSANOW: c_int = 0; -pub const TCSADRAIN: c_int = 1; -pub const TCSAFLUSH: c_int = 2; - -pub const _POSIX_VDISABLE: crate::cc_t = 0; - -// sys/wait.h -pub const WNOHANG: c_int = 1; -pub const WUNTRACED: c_int = 2; - -pub const WSTOPPED: c_int = 2; -pub const WEXITED: c_int = 4; -pub const WCONTINUED: c_int = 8; -pub const WNOWAIT: c_int = 0x0100_0000; - -pub const __WNOTHREAD: c_int = 0x2000_0000; -pub const __WALL: c_int = 0x4000_0000; -#[allow(overflowing_literals)] -pub const __WCLONE: c_int = 0x8000_0000; - -// time.h -pub const CLOCK_REALTIME: c_int = 1; -pub const CLOCK_MONOTONIC: c_int = 4; -pub const CLOCK_PROCESS_CPUTIME_ID: crate::clockid_t = 2; -pub const CLOCKS_PER_SEC: crate::clock_t = 1_000_000; - -// unistd.h -// POSIX.1 { -pub const _SC_ARG_MAX: c_int = 0; -pub const _SC_CHILD_MAX: c_int = 1; -pub const _SC_CLK_TCK: c_int = 2; -pub const _SC_NGROUPS_MAX: c_int = 3; -pub const _SC_OPEN_MAX: c_int = 4; -pub const _SC_STREAM_MAX: c_int = 5; -pub const _SC_TZNAME_MAX: c_int = 6; -// ... -pub const _SC_VERSION: c_int = 29; -pub const _SC_PAGESIZE: c_int = 30; -pub const _SC_PAGE_SIZE: c_int = 30; -// ... -pub const _SC_RE_DUP_MAX: c_int = 44; - -pub const _SC_NPROCESSORS_CONF: c_int = 57; -pub const _SC_NPROCESSORS_ONLN: c_int = 58; - -// ... -pub const _SC_GETGR_R_SIZE_MAX: c_int = 69; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 70; -pub const _SC_LOGIN_NAME_MAX: c_int = 71; -pub const _SC_TTY_NAME_MAX: c_int = 72; -// ... -pub const _SC_SYMLOOP_MAX: c_int = 173; -// ... -pub const _SC_HOST_NAME_MAX: c_int = 180; -// ... -pub const _SC_SIGQUEUE_MAX: c_int = 190; -pub const _SC_REALTIME_SIGNALS: c_int = 191; -// } POSIX.1 - -// confstr -pub const _CS_PATH: c_int = 0; -pub const _CS_POSIX_V6_WIDTH_RESTRICTED_ENVS: c_int = 1; -pub const _CS_POSIX_V5_WIDTH_RESTRICTED_ENVS: c_int = 4; -pub const _CS_POSIX_V7_WIDTH_RESTRICTED_ENVS: c_int = 5; -pub const _CS_POSIX_V6_ILP32_OFF32_CFLAGS: c_int = 1116; -pub const _CS_POSIX_V6_ILP32_OFF32_LDFLAGS: c_int = 1117; -pub const _CS_POSIX_V6_ILP32_OFF32_LIBS: c_int = 1118; -pub const _CS_POSIX_V6_ILP32_OFF32_LINTFLAGS: c_int = 1119; -pub const _CS_POSIX_V6_ILP32_OFFBIG_CFLAGS: c_int = 1120; -pub const _CS_POSIX_V6_ILP32_OFFBIG_LDFLAGS: c_int = 1121; -pub const _CS_POSIX_V6_ILP32_OFFBIG_LIBS: c_int = 1122; -pub const _CS_POSIX_V6_ILP32_OFFBIG_LINTFLAGS: c_int = 1123; -pub const _CS_POSIX_V6_LP64_OFF64_CFLAGS: c_int = 1124; -pub const _CS_POSIX_V6_LP64_OFF64_LDFLAGS: c_int = 1125; -pub const _CS_POSIX_V6_LP64_OFF64_LIBS: c_int = 1126; -pub const _CS_POSIX_V6_LP64_OFF64_LINTFLAGS: c_int = 1127; -pub const _CS_POSIX_V6_LPBIG_OFFBIG_CFLAGS: c_int = 1128; -pub const _CS_POSIX_V6_LPBIG_OFFBIG_LDFLAGS: c_int = 1129; -pub const _CS_POSIX_V6_LPBIG_OFFBIG_LIBS: c_int = 1130; -pub const _CS_POSIX_V6_LPBIG_OFFBIG_LINTFLAGS: c_int = 1131; -pub const _CS_POSIX_V7_ILP32_OFF32_CFLAGS: c_int = 1132; -pub const _CS_POSIX_V7_ILP32_OFF32_LDFLAGS: c_int = 1133; -pub const _CS_POSIX_V7_ILP32_OFF32_LIBS: c_int = 1134; -pub const _CS_POSIX_V7_ILP32_OFF32_LINTFLAGS: c_int = 1135; -pub const _CS_POSIX_V7_ILP32_OFFBIG_CFLAGS: c_int = 1136; -pub const _CS_POSIX_V7_ILP32_OFFBIG_LDFLAGS: c_int = 1137; -pub const _CS_POSIX_V7_ILP32_OFFBIG_LIBS: c_int = 1138; -pub const _CS_POSIX_V7_ILP32_OFFBIG_LINTFLAGS: c_int = 1139; -pub const _CS_POSIX_V7_LP64_OFF64_CFLAGS: c_int = 1140; -pub const _CS_POSIX_V7_LP64_OFF64_LDFLAGS: c_int = 1141; -pub const _CS_POSIX_V7_LP64_OFF64_LIBS: c_int = 1142; -pub const _CS_POSIX_V7_LP64_OFF64_LINTFLAGS: c_int = 1143; -pub const _CS_POSIX_V7_LPBIG_OFFBIG_CFLAGS: c_int = 1144; -pub const _CS_POSIX_V7_LPBIG_OFFBIG_LDFLAGS: c_int = 1145; -pub const _CS_POSIX_V7_LPBIG_OFFBIG_LIBS: c_int = 1146; -pub const _CS_POSIX_V7_LPBIG_OFFBIG_LINTFLAGS: c_int = 1147; - -pub const F_OK: c_int = 0; -pub const R_OK: c_int = 4; -pub const W_OK: c_int = 2; -pub const X_OK: c_int = 1; - -// stdio.h -pub const BUFSIZ: c_uint = 1024; -pub const _IOFBF: c_int = 0; -pub const _IOLBF: c_int = 1; -pub const _IONBF: c_int = 2; -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; - -pub const _PC_LINK_MAX: c_int = 0; -pub const _PC_MAX_CANON: c_int = 1; -pub const _PC_MAX_INPUT: c_int = 2; -pub const _PC_NAME_MAX: c_int = 3; -pub const _PC_PATH_MAX: c_int = 4; -pub const _PC_PIPE_BUF: c_int = 5; -pub const _PC_CHOWN_RESTRICTED: c_int = 6; -pub const _PC_NO_TRUNC: c_int = 7; -pub const _PC_VDISABLE: c_int = 8; -pub const _PC_SYNC_IO: c_int = 9; -pub const _PC_ASYNC_IO: c_int = 10; -pub const _PC_PRIO_IO: c_int = 11; -pub const _PC_SOCK_MAXBUF: c_int = 12; -pub const _PC_FILESIZEBITS: c_int = 13; -pub const _PC_REC_INCR_XFER_SIZE: c_int = 14; -pub const _PC_REC_MAX_XFER_SIZE: c_int = 15; -pub const _PC_REC_MIN_XFER_SIZE: c_int = 16; -pub const _PC_REC_XFER_ALIGN: c_int = 17; -pub const _PC_ALLOC_SIZE_MIN: c_int = 18; -pub const _PC_SYMLINK_MAX: c_int = 19; -pub const _PC_2_SYMLINKS: c_int = 20; - -pub const PRIO_PROCESS: c_int = 0; -pub const PRIO_PGRP: c_int = 1; -pub const PRIO_USER: c_int = 2; - -f! { - //sys/socket.h - pub const fn CMSG_ALIGN(len: size_t) -> size_t { - (len + size_of::() - 1) & !(size_of::() - 1) - } - pub const fn CMSG_LEN(length: c_uint) -> c_uint { - (CMSG_ALIGN(size_of::()) + length as usize) as c_uint - } - pub const fn CMSG_SPACE(len: c_uint) -> c_uint { - (CMSG_ALIGN(len as size_t) + CMSG_ALIGN(size_of::())) as c_uint - } - - // wait.h - pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] &= !(1 << (fd % size)); - return; - } - - pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0; - } - - pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { - let fd = fd as usize; - let size = size_of_val(&(*set).fds_bits[0]) * 8; - (*set).fds_bits[fd / size] |= 1 << (fd % size); - return; - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - for slot in (*set).fds_bits.iter_mut() { - *slot = 0; - } - } -} - -safe_f! { - pub const fn WIFSTOPPED(status: c_int) -> bool { - (status & 0xff) == 0x7f - } - - pub const fn WSTOPSIG(status: c_int) -> c_int { - (status >> 8) & 0xff - } - - pub const fn WIFCONTINUED(status: c_int) -> bool { - status == 0xffff - } - - pub const fn WIFSIGNALED(status: c_int) -> bool { - ((status & 0x7f) + 1) as i8 >= 2 - } - - pub const fn WTERMSIG(status: c_int) -> c_int { - status & 0x7f - } - - pub const fn WIFEXITED(status: c_int) -> bool { - (status & 0x7f) == 0 - } - - pub const fn WEXITSTATUS(status: c_int) -> c_int { - (status >> 8) & 0xff - } - - pub const fn WCOREDUMP(status: c_int) -> bool { - (status & 0x80) != 0 - } -} - -extern "C" { - // errno.h - pub fn __errno_location() -> *mut c_int; - pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - - // dirent.h - pub fn dirfd(dirp: *mut crate::DIR) -> c_int; - - // unistd.h - pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; - pub fn getdtablesize() -> c_int; - - // grp.h - pub fn getgrent() -> *mut crate::group; - pub fn setgrent(); - pub fn endgrent(); - pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; - pub fn getgrgid_r( - gid: crate::gid_t, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn getgrnam(name: *const c_char) -> *mut crate::group; - pub fn getgrnam_r( - name: *const c_char, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn getgrouplist( - user: *const c_char, - group: crate::gid_t, - groups: *mut crate::gid_t, - ngroups: *mut c_int, - ) -> c_int; - - // malloc.h - pub fn memalign(align: size_t, size: size_t) -> *mut c_void; - - // netdb.h - pub fn getnameinfo( - addr: *const crate::sockaddr, - addrlen: crate::socklen_t, - host: *mut c_char, - hostlen: crate::socklen_t, - serv: *mut c_char, - servlen: crate::socklen_t, - flags: c_int, - ) -> c_int; - - // pthread.h - pub fn pthread_atfork( - prepare: Option, - parent: Option, - child: Option, - ) -> c_int; - pub fn pthread_create( - tid: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - start: extern "C" fn(*mut c_void) -> *mut c_void, - arg: *mut c_void, - ) -> c_int; - pub fn pthread_condattr_setclock( - attr: *mut pthread_condattr_t, - clock_id: crate::clockid_t, - ) -> c_int; - - //pty.h - pub fn openpty( - amaster: *mut c_int, - aslave: *mut c_int, - name: *mut c_char, - termp: *const termios, - winp: *const crate::winsize, - ) -> c_int; - - // pwd.h - pub fn getpwent() -> *mut passwd; - pub fn setpwent(); - pub fn endpwent(); - pub fn getpwnam_r( - name: *const c_char, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - pub fn getpwuid_r( - uid: crate::uid_t, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - - // signal.h - pub fn pthread_sigmask( - how: c_int, - set: *const crate::sigset_t, - oldset: *mut crate::sigset_t, - ) -> c_int; - pub fn pthread_cancel(thread: crate::pthread_t) -> c_int; - pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; - pub fn sigtimedwait( - set: *const sigset_t, - sig: *mut siginfo_t, - timeout: *const crate::timespec, - ) -> c_int; - pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; - - // stdlib.h - pub fn getsubopt( - optionp: *mut *mut c_char, - tokens: *const *mut c_char, - valuep: *mut *mut c_char, - ) -> c_int; - pub fn mkostemp(template: *mut c_char, flags: c_int) -> c_int; - pub fn mkostemps(template: *mut c_char, suffixlen: c_int, flags: c_int) -> c_int; - pub fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void; - - // string.h - pub fn explicit_bzero(p: *mut c_void, len: size_t); - pub fn strlcat(dst: *mut c_char, src: *const c_char, siz: size_t) -> size_t; - pub fn strlcpy(dst: *mut c_char, src: *const c_char, siz: size_t) -> size_t; - - // sys/epoll.h - pub fn epoll_create(size: c_int) -> c_int; - pub fn epoll_create1(flags: c_int) -> c_int; - pub fn epoll_wait( - epfd: c_int, - events: *mut crate::epoll_event, - maxevents: c_int, - timeout: c_int, - ) -> c_int; - pub fn epoll_ctl(epfd: c_int, op: c_int, fd: c_int, event: *mut crate::epoll_event) -> c_int; - - // sys/ioctl.h - pub fn ioctl(fd: c_int, request: c_ulong, ...) -> c_int; - - // sys/mman.h - pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; - pub fn shm_unlink(name: *const c_char) -> c_int; - - // sys/resource.h - pub fn getpriority(which: c_int, who: crate::id_t) -> c_int; - pub fn setpriority(which: c_int, who: crate::id_t, prio: c_int) -> c_int; - pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; - pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; - - // sys/socket.h - pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar; - pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr; - pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr; - pub fn bind( - socket: c_int, - address: *const crate::sockaddr, - address_len: crate::socklen_t, - ) -> c_int; - pub fn recvfrom( - socket: c_int, - buf: *mut c_void, - len: size_t, - flags: c_int, - addr: *mut crate::sockaddr, - addrlen: *mut crate::socklen_t, - ) -> ssize_t; - pub fn recvmsg(socket: c_int, msg: *mut msghdr, flags: c_int) -> ssize_t; - pub fn sendmsg(socket: c_int, msg: *const msghdr, flags: c_int) -> ssize_t; - - // sys/stat.h - pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; - - // sys/uio.h - pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - - // sys/utsname.h - pub fn uname(utsname: *mut utsname) -> c_int; - - // time.h - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut crate::timezone) -> c_int; - pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn strftime( - s: *mut c_char, - max: size_t, - format: *const c_char, - tm: *const crate::tm, - ) -> size_t; - - // utmp.h - pub fn login_tty(fd: c_int) -> c_int; -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for dirent { - fn eq(&self, other: &dirent) -> bool { - self.d_ino == other.d_ino - && self.d_off == other.d_off - && self.d_reclen == other.d_reclen - && self.d_type == other.d_type - && self - .d_name - .iter() - .zip(other.d_name.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for dirent {} - - impl hash::Hash for dirent { - fn hash(&self, state: &mut H) { - self.d_ino.hash(state); - self.d_off.hash(state); - self.d_reclen.hash(state); - self.d_type.hash(state); - self.d_name.hash(state); - } - } - - impl PartialEq for sockaddr_un { - fn eq(&self, other: &sockaddr_un) -> bool { - self.sun_family == other.sun_family - && self - .sun_path - .iter() - .zip(other.sun_path.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for sockaddr_un {} - - impl hash::Hash for sockaddr_un { - fn hash(&self, state: &mut H) { - self.sun_family.hash(state); - self.sun_path.hash(state); - } - } - - impl PartialEq for sockaddr_storage { - fn eq(&self, other: &sockaddr_storage) -> bool { - self.ss_family == other.ss_family - && self.__ss_align == self.__ss_align - && self - .__ss_padding - .iter() - .zip(other.__ss_padding.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for sockaddr_storage {} - - impl hash::Hash for sockaddr_storage { - fn hash(&self, state: &mut H) { - self.ss_family.hash(state); - self.__ss_padding.hash(state); - self.__ss_align.hash(state); - } - } - - impl PartialEq for utsname { - fn eq(&self, other: &utsname) -> bool { - self.sysname - .iter() - .zip(other.sysname.iter()) - .all(|(a, b)| a == b) - && self - .nodename - .iter() - .zip(other.nodename.iter()) - .all(|(a, b)| a == b) - && self - .release - .iter() - .zip(other.release.iter()) - .all(|(a, b)| a == b) - && self - .version - .iter() - .zip(other.version.iter()) - .all(|(a, b)| a == b) - && self - .machine - .iter() - .zip(other.machine.iter()) - .all(|(a, b)| a == b) - && self - .domainname - .iter() - .zip(other.domainname.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for utsname {} - - impl hash::Hash for utsname { - fn hash(&self, state: &mut H) { - self.sysname.hash(state); - self.nodename.hash(state); - self.release.hash(state); - self.version.hash(state); - self.machine.hash(state); - self.domainname.hash(state); - } - } - } -} diff --git a/vendor/libc/src/unix/solarish/compat.rs b/vendor/libc/src/unix/solarish/compat.rs deleted file mode 100644 index 22bcf12edcc822..00000000000000 --- a/vendor/libc/src/unix/solarish/compat.rs +++ /dev/null @@ -1,218 +0,0 @@ -// Common functions that are unfortunately missing on illumos and -// Solaris, but often needed by other crates. -use core::cmp::min; - -use crate::unix::solarish::*; -use crate::{c_char, c_int, size_t}; - -pub unsafe fn cfmakeraw(termios: *mut crate::termios) { - (*termios).c_iflag &= - !(IMAXBEL | IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON); - (*termios).c_oflag &= !OPOST; - (*termios).c_lflag &= !(ECHO | ECHONL | ICANON | ISIG | IEXTEN); - (*termios).c_cflag &= !(CSIZE | PARENB); - (*termios).c_cflag |= CS8; - - // By default, most software expects a pending read to block until at - // least one byte becomes available. As per termio(7I), this requires - // setting the MIN and TIME parameters appropriately. - // - // As a somewhat unfortunate artefact of history, the MIN and TIME slots - // in the control character array overlap with the EOF and EOL slots used - // for canonical mode processing. Because the EOF character needs to be - // the ASCII EOT value (aka Control-D), it has the byte value 4. When - // switching to raw mode, this is interpreted as a MIN value of 4; i.e., - // reads will block until at least four bytes have been input. - // - // Other platforms with a distinct MIN slot like Linux and FreeBSD appear - // to default to a MIN value of 1, so we'll force that value here: - (*termios).c_cc[VMIN] = 1; - (*termios).c_cc[VTIME] = 0; -} - -pub unsafe fn cfsetspeed(termios: *mut crate::termios, speed: crate::speed_t) -> c_int { - // Neither of these functions on illumos or Solaris actually ever - // return an error - crate::cfsetispeed(termios, speed); - crate::cfsetospeed(termios, speed); - 0 -} - -#[cfg(target_os = "illumos")] -unsafe fn bail(fdm: c_int, fds: c_int) -> c_int { - let e = *___errno(); - if fds >= 0 { - crate::close(fds); - } - if fdm >= 0 { - crate::close(fdm); - } - *___errno() = e; - -1 -} - -#[cfg(target_os = "illumos")] -pub unsafe fn openpty( - amain: *mut c_int, - asubord: *mut c_int, - name: *mut c_char, - termp: *const termios, - winp: *const crate::winsize, -) -> c_int { - const PTEM: &[u8] = b"ptem\0"; - const LDTERM: &[u8] = b"ldterm\0"; - - // Open the main pseudo-terminal device, making sure not to set it as the - // controlling terminal for this process: - let fdm = crate::posix_openpt(O_RDWR | O_NOCTTY); - if fdm < 0 { - return -1; - } - - // Set permissions and ownership on the subordinate device and unlock it: - if crate::grantpt(fdm) < 0 || crate::unlockpt(fdm) < 0 { - return bail(fdm, -1); - } - - // Get the path name of the subordinate device: - let subordpath = crate::ptsname(fdm); - if subordpath.is_null() { - return bail(fdm, -1); - } - - // Open the subordinate device without setting it as the controlling - // terminal for this process: - let fds = crate::open(subordpath, O_RDWR | O_NOCTTY); - if fds < 0 { - return bail(fdm, -1); - } - - // Check if the STREAMS modules are already pushed: - let setup = crate::ioctl(fds, I_FIND, LDTERM.as_ptr()); - if setup < 0 { - return bail(fdm, fds); - } else if setup == 0 { - // The line discipline is not present, so push the appropriate STREAMS - // modules for the subordinate device: - if crate::ioctl(fds, I_PUSH, PTEM.as_ptr()) < 0 - || crate::ioctl(fds, I_PUSH, LDTERM.as_ptr()) < 0 - { - return bail(fdm, fds); - } - } - - // If provided, set the terminal parameters: - if !termp.is_null() && crate::tcsetattr(fds, TCSAFLUSH, termp) != 0 { - return bail(fdm, fds); - } - - // If provided, set the window size: - if !winp.is_null() && crate::ioctl(fds, TIOCSWINSZ, winp) < 0 { - return bail(fdm, fds); - } - - // If the caller wants the name of the subordinate device, copy it out. - // - // Note that this is a terrible interface: there appears to be no standard - // upper bound on the copy length for this pointer. Nobody should pass - // anything but NULL here, preferring instead to use ptsname(3C) directly. - if !name.is_null() { - crate::strcpy(name, subordpath); - } - - *amain = fdm; - *asubord = fds; - 0 -} - -#[cfg(target_os = "illumos")] -pub unsafe fn forkpty( - amain: *mut c_int, - name: *mut c_char, - termp: *const termios, - winp: *const crate::winsize, -) -> crate::pid_t { - let mut fds = -1; - - if openpty(amain, &mut fds, name, termp, winp) != 0 { - return -1; - } - - let pid = crate::fork(); - if pid < 0 { - return bail(*amain, fds); - } else if pid > 0 { - // In the parent process, we close the subordinate device and return the - // process ID of the new child: - crate::close(fds); - return pid; - } - - // The rest of this function executes in the child process. - - // Close the main side of the pseudo-terminal pair: - crate::close(*amain); - - // Use TIOCSCTTY to set the subordinate device as our controlling - // terminal. This will fail (with ENOTTY) if we are not the leader in - // our own session, so we call setsid() first. Finally, arrange for - // the pseudo-terminal to occupy the standard I/O descriptors. - if crate::setsid() < 0 - || crate::ioctl(fds, TIOCSCTTY, 0) < 0 - || crate::dup2(fds, 0) < 0 - || crate::dup2(fds, 1) < 0 - || crate::dup2(fds, 2) < 0 - { - // At this stage there are no particularly good ways to handle failure. - // Exit as abruptly as possible, using _exit() to avoid messing with any - // state still shared with the parent process. - crate::_exit(EXIT_FAILURE); - } - // Close the inherited descriptor, taking care to avoid closing the standard - // descriptors by mistake: - if fds > 2 { - crate::close(fds); - } - - 0 -} - -pub unsafe fn getpwent_r( - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, -) -> c_int { - let old_errno = *crate::___errno(); - *crate::___errno() = 0; - *result = native_getpwent_r(pwd, buf, min(buflen, c_int::MAX as size_t) as c_int); - - let ret = if (*result).is_null() { - *crate::___errno() - } else { - 0 - }; - *crate::___errno() = old_errno; - - ret -} - -pub unsafe fn getgrent_r( - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, -) -> c_int { - let old_errno = *crate::___errno(); - *crate::___errno() = 0; - *result = native_getgrent_r(grp, buf, min(buflen, c_int::MAX as size_t) as c_int); - - let ret = if (*result).is_null() { - *crate::___errno() - } else { - 0 - }; - *crate::___errno() = old_errno; - - ret -} diff --git a/vendor/libc/src/unix/solarish/illumos.rs b/vendor/libc/src/unix/solarish/illumos.rs deleted file mode 100644 index fbeadaf344fa0c..00000000000000 --- a/vendor/libc/src/unix/solarish/illumos.rs +++ /dev/null @@ -1,343 +0,0 @@ -use crate::prelude::*; -use crate::{ - exit_status, off_t, NET_MAC_AWARE, NET_MAC_AWARE_INHERIT, PRIV_AWARE_RESET, PRIV_DEBUG, - PRIV_PFEXEC, PRIV_XPOLICY, -}; - -pub type lgrp_rsrc_t = c_int; -pub type lgrp_affinity_t = c_int; - -s! { - pub struct aiocb { - pub aio_fildes: c_int, - pub aio_buf: *mut c_void, - pub aio_nbytes: size_t, - pub aio_offset: off_t, - pub aio_reqprio: c_int, - pub aio_sigevent: crate::sigevent, - pub aio_lio_opcode: c_int, - pub aio_resultp: crate::aio_result_t, - pub aio_state: c_int, - pub aio__pad: [c_int; 1], - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_amp: *mut c_void, - pub shm_lkcnt: c_ushort, - pub shm_lpid: crate::pid_t, - pub shm_cpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - pub shm_cnattch: c_ulong, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_pad4: [i64; 4], - } - - pub struct fil_info { - pub fi_flags: c_int, - pub fi_pos: c_int, - pub fi_name: [c_char; crate::FILNAME_MAX as usize], - } -} - -s_no_extra_traits! { - #[cfg_attr(any(target_arch = "x86", target_arch = "x86_64"), repr(packed(4)))] - pub struct epoll_event { - pub events: u32, - pub u64: u64, - } - - pub struct utmpx { - pub ut_user: [c_char; _UTX_USERSIZE], - pub ut_id: [c_char; _UTX_IDSIZE], - pub ut_line: [c_char; _UTX_LINESIZE], - pub ut_pid: crate::pid_t, - pub ut_type: c_short, - pub ut_exit: exit_status, - pub ut_tv: crate::timeval, - pub ut_session: c_int, - pub ut_pad: [c_int; _UTX_PADSIZE], - pub ut_syslen: c_short, - pub ut_host: [c_char; _UTX_HOSTSIZE], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for utmpx { - fn eq(&self, other: &utmpx) -> bool { - self.ut_type == other.ut_type - && self.ut_pid == other.ut_pid - && self.ut_user == other.ut_user - && self.ut_line == other.ut_line - && self.ut_id == other.ut_id - && self.ut_exit == other.ut_exit - && self.ut_session == other.ut_session - && self.ut_tv == other.ut_tv - && self.ut_syslen == other.ut_syslen - && self.ut_pad == other.ut_pad - && self - .ut_host - .iter() - .zip(other.ut_host.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for utmpx {} - - impl hash::Hash for utmpx { - fn hash(&self, state: &mut H) { - self.ut_user.hash(state); - self.ut_type.hash(state); - self.ut_pid.hash(state); - self.ut_line.hash(state); - self.ut_id.hash(state); - self.ut_host.hash(state); - self.ut_exit.hash(state); - self.ut_session.hash(state); - self.ut_tv.hash(state); - self.ut_syslen.hash(state); - self.ut_pad.hash(state); - } - } - - impl PartialEq for epoll_event { - fn eq(&self, other: &epoll_event) -> bool { - self.events == other.events && self.u64 == other.u64 - } - } - impl Eq for epoll_event {} - impl hash::Hash for epoll_event { - fn hash(&self, state: &mut H) { - let events = self.events; - let u64 = self.u64; - events.hash(state); - u64.hash(state); - } - } - } -} - -pub const _UTX_USERSIZE: usize = 32; -pub const _UTX_LINESIZE: usize = 32; -pub const _UTX_PADSIZE: usize = 5; -pub const _UTX_IDSIZE: usize = 4; -pub const _UTX_HOSTSIZE: usize = 257; - -pub const AF_LOCAL: c_int = 1; // AF_UNIX -pub const AF_FILE: c_int = 1; // AF_UNIX - -pub const EFD_SEMAPHORE: c_int = 0x1; -pub const EFD_NONBLOCK: c_int = 0x800; -pub const EFD_CLOEXEC: c_int = 0x80000; - -pub const POLLRDHUP: c_short = 0x4000; - -pub const TCP_KEEPIDLE: c_int = 34; -pub const TCP_KEEPCNT: c_int = 35; -pub const TCP_KEEPINTVL: c_int = 36; -pub const TCP_CONGESTION: c_int = 37; - -// These constants are correct for 64-bit programs or 32-bit programs that are -// not using large-file mode. If Rust ever supports anything other than 64-bit -// compilation on illumos, this may require adjustment: -pub const F_OFD_GETLK: c_int = 47; -pub const F_OFD_SETLK: c_int = 48; -pub const F_OFD_SETLKW: c_int = 49; -pub const F_FLOCK: c_int = 53; -pub const F_FLOCKW: c_int = 54; - -pub const F_DUPFD_CLOEXEC: c_int = 37; -pub const F_DUPFD_CLOFORK: c_int = 58; -pub const F_DUP2FD_CLOEXEC: c_int = 36; -pub const F_DUP2FD_CLOFORK: c_int = 57; -pub const F_DUP3FD: c_int = 59; - -pub const FD_CLOFORK: c_int = 2; - -pub const FIL_ATTACH: c_int = 0x1; -pub const FIL_DETACH: c_int = 0x2; -pub const FIL_LIST: c_int = 0x3; -pub const FILNAME_MAX: c_int = 32; -pub const FILF_PROG: c_int = 0x1; -pub const FILF_AUTO: c_int = 0x2; -pub const FILF_BYPASS: c_int = 0x4; -pub const SOL_FILTER: c_int = 0xfffc; - -pub const MADV_PURGE: c_int = 9; - -pub const POSIX_FADV_NORMAL: c_int = 0; -pub const POSIX_FADV_RANDOM: c_int = 1; -pub const POSIX_FADV_SEQUENTIAL: c_int = 2; -pub const POSIX_FADV_WILLNEED: c_int = 3; -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; - -pub const POSIX_SPAWN_SETSID: c_short = 0x40; - -pub const SIGINFO: c_int = 41; - -pub const O_DIRECT: c_int = 0x2000000; -pub const O_CLOFORK: c_int = 0x4000000; - -pub const MSG_CMSG_CLOEXEC: c_int = 0x1000; -pub const MSG_CMSG_CLOFORK: c_int = 0x2000; - -pub const PBIND_HARD: crate::processorid_t = -3; -pub const PBIND_SOFT: crate::processorid_t = -4; - -pub const PS_SYSTEM: c_int = 1; - -pub const MAP_FILE: c_int = 0; - -pub const MAP_32BIT: c_int = 0x80; - -pub const AF_NCA: c_int = 28; - -pub const PF_NCA: c_int = AF_NCA; - -pub const LOCK_SH: c_int = 1; -pub const LOCK_EX: c_int = 2; -pub const LOCK_NB: c_int = 4; -pub const LOCK_UN: c_int = 8; - -pub const _PC_LAST: c_int = 101; - -pub const VSTATUS: usize = 16; -pub const VERASE2: usize = 17; - -pub const EPOLLIN: c_int = 0x1; -pub const EPOLLPRI: c_int = 0x2; -pub const EPOLLOUT: c_int = 0x4; -pub const EPOLLRDNORM: c_int = 0x40; -pub const EPOLLRDBAND: c_int = 0x80; -pub const EPOLLWRNORM: c_int = 0x100; -pub const EPOLLWRBAND: c_int = 0x200; -pub const EPOLLMSG: c_int = 0x400; -pub const EPOLLERR: c_int = 0x8; -pub const EPOLLHUP: c_int = 0x10; -pub const EPOLLET: c_int = 0x80000000; -pub const EPOLLRDHUP: c_int = 0x2000; -pub const EPOLLONESHOT: c_int = 0x40000000; -pub const EPOLLWAKEUP: c_int = 0x20000000; -pub const EPOLLEXCLUSIVE: c_int = 0x10000000; -pub const EPOLL_CLOEXEC: c_int = 0x80000; -pub const EPOLL_CTL_ADD: c_int = 1; -pub const EPOLL_CTL_MOD: c_int = 3; -pub const EPOLL_CTL_DEL: c_int = 2; - -pub const PRIV_USER: c_uint = PRIV_DEBUG - | NET_MAC_AWARE - | NET_MAC_AWARE_INHERIT - | PRIV_XPOLICY - | PRIV_AWARE_RESET - | PRIV_PFEXEC; - -pub const LGRP_RSRC_COUNT: crate::lgrp_rsrc_t = 2; -pub const LGRP_RSRC_CPU: crate::lgrp_rsrc_t = 0; -pub const LGRP_RSRC_MEM: crate::lgrp_rsrc_t = 1; - -pub const P_DISABLED: c_int = 0x008; - -pub const AT_SUN_HWCAP2: c_uint = 2023; -pub const AT_SUN_FPTYPE: c_uint = 2027; - -pub const B1000000: crate::speed_t = 24; -pub const B1152000: crate::speed_t = 25; -pub const B1500000: crate::speed_t = 26; -pub const B2000000: crate::speed_t = 27; -pub const B2500000: crate::speed_t = 28; -pub const B3000000: crate::speed_t = 29; -pub const B3500000: crate::speed_t = 30; -pub const B4000000: crate::speed_t = 31; - -// sys/systeminfo.h -pub const SI_ADDRESS_WIDTH: c_int = 520; - -// sys/timerfd.h -pub const TFD_CLOEXEC: i32 = 0o2000000; -pub const TFD_NONBLOCK: i32 = 0o4000; -pub const TFD_TIMER_ABSTIME: i32 = 1 << 0; -pub const TFD_TIMER_CANCEL_ON_SET: i32 = 1 << 1; - -extern "C" { - pub fn eventfd(init: c_uint, flags: c_int) -> c_int; - - pub fn epoll_pwait( - epfd: c_int, - events: *mut crate::epoll_event, - maxevents: c_int, - timeout: c_int, - sigmask: *const crate::sigset_t, - ) -> c_int; - pub fn epoll_create(size: c_int) -> c_int; - pub fn epoll_create1(flags: c_int) -> c_int; - pub fn epoll_wait( - epfd: c_int, - events: *mut crate::epoll_event, - maxevents: c_int, - timeout: c_int, - ) -> c_int; - pub fn epoll_ctl(epfd: c_int, op: c_int, fd: c_int, event: *mut crate::epoll_event) -> c_int; - - pub fn mincore(addr: crate::caddr_t, len: size_t, vec: *mut c_char) -> c_int; - - pub fn pset_bind_lwp( - pset: crate::psetid_t, - id: crate::id_t, - pid: crate::pid_t, - opset: *mut crate::psetid_t, - ) -> c_int; - pub fn pset_getloadavg(pset: crate::psetid_t, load: *mut c_double, num: c_int) -> c_int; - - pub fn pthread_attr_get_np(thread: crate::pthread_t, attr: *mut crate::pthread_attr_t) - -> c_int; - pub fn pthread_attr_getstackaddr( - attr: *const crate::pthread_attr_t, - stackaddr: *mut *mut c_void, - ) -> c_int; - pub fn pthread_attr_setstack( - attr: *mut crate::pthread_attr_t, - stackaddr: *mut c_void, - stacksize: size_t, - ) -> c_int; - pub fn pthread_attr_setstackaddr( - attr: *mut crate::pthread_attr_t, - stackaddr: *mut c_void, - ) -> c_int; - - pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advice: c_int) -> c_int; - pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn getpagesizes2(pagesize: *mut size_t, nelem: c_int) -> c_int; - - pub fn posix_spawn_file_actions_addfchdir_np( - file_actions: *mut crate::posix_spawn_file_actions_t, - fd: c_int, - ) -> c_int; - - pub fn ptsname_r(fildes: c_int, name: *mut c_char, namelen: size_t) -> c_int; - - pub fn syncfs(fd: c_int) -> c_int; - - pub fn strcasecmp_l(s1: *const c_char, s2: *const c_char, loc: crate::locale_t) -> c_int; - pub fn strncasecmp_l( - s1: *const c_char, - s2: *const c_char, - n: size_t, - loc: crate::locale_t, - ) -> c_int; - - pub fn timerfd_create(clockid: c_int, flags: c_int) -> c_int; - pub fn timerfd_gettime(fd: c_int, curr_value: *mut crate::itimerspec) -> c_int; - pub fn timerfd_settime( - fd: c_int, - flags: c_int, - new_value: *const crate::itimerspec, - old_value: *mut crate::itimerspec, - ) -> c_int; -} diff --git a/vendor/libc/src/unix/solarish/mod.rs b/vendor/libc/src/unix/solarish/mod.rs deleted file mode 100644 index d8b32dfc0aae9c..00000000000000 --- a/vendor/libc/src/unix/solarish/mod.rs +++ /dev/null @@ -1,3240 +0,0 @@ -use crate::prelude::*; - -pub type caddr_t = *mut c_char; - -pub type clockid_t = c_int; -pub type blkcnt_t = c_long; -pub type clock_t = c_long; -pub type daddr_t = c_long; -pub type dev_t = c_ulong; -pub type fsblkcnt_t = c_ulong; -pub type fsfilcnt_t = c_ulong; -pub type ino_t = c_ulong; -pub type key_t = c_int; -pub type major_t = c_uint; -pub type minor_t = c_uint; -pub type mode_t = c_uint; -pub type nlink_t = c_uint; -pub type rlim_t = c_ulong; -pub type speed_t = c_uint; -pub type tcflag_t = c_uint; -pub type time_t = c_long; -pub type timer_t = c_int; -pub type wchar_t = c_int; -pub type nfds_t = c_ulong; -pub type projid_t = c_int; -pub type zoneid_t = c_int; -pub type psetid_t = c_int; -pub type processorid_t = c_int; -pub type chipid_t = c_int; -pub type ctid_t = crate::id_t; - -pub type suseconds_t = c_long; -pub type off_t = c_long; -pub type useconds_t = c_uint; -pub type socklen_t = c_uint; -pub type sa_family_t = u16; -pub type pthread_t = c_uint; -pub type pthread_key_t = c_uint; -pub type thread_t = c_uint; -pub type blksize_t = c_int; -pub type nl_item = c_int; -pub type mqd_t = *mut c_void; -pub type id_t = c_int; -pub type idtype_t = c_uint; -pub type shmatt_t = c_ulong; - -pub type lgrp_id_t = crate::id_t; -pub type lgrp_mem_size_t = c_longlong; -pub type lgrp_cookie_t = crate::uintptr_t; -pub type lgrp_content_t = c_uint; -pub type lgrp_lat_between_t = c_uint; -pub type lgrp_mem_size_flag_t = c_uint; -pub type lgrp_view_t = c_uint; - -pub type posix_spawnattr_t = *mut c_void; -pub type posix_spawn_file_actions_t = *mut c_void; - -#[derive(Debug)] -pub enum timezone {} -impl Copy for timezone {} -impl Clone for timezone { - fn clone(&self) -> timezone { - *self - } -} - -#[derive(Debug)] -pub enum ucred_t {} -impl Copy for ucred_t {} -impl Clone for ucred_t { - fn clone(&self) -> ucred_t { - *self - } -} - -s! { - pub struct in_addr { - pub s_addr: crate::in_addr_t, - } - - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct ip_mreq_source { - pub imr_multiaddr: in_addr, - pub imr_sourceaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct ipc_perm { - pub uid: crate::uid_t, - pub gid: crate::gid_t, - pub cuid: crate::uid_t, - pub cgid: crate::gid_t, - pub mode: mode_t, - pub seq: c_uint, - pub key: crate::key_t, - } - - pub struct sockaddr { - pub sa_family: sa_family_t, - pub sa_data: [c_char; 14], - } - - pub struct sockaddr_in { - pub sin_family: sa_family_t, - pub sin_port: crate::in_port_t, - pub sin_addr: crate::in_addr, - pub sin_zero: [c_char; 8], - } - - pub struct sockaddr_in6 { - pub sin6_family: sa_family_t, - pub sin6_port: crate::in_port_t, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: u32, - pub __sin6_src_id: u32, - } - - pub struct in_pktinfo { - pub ipi_ifindex: c_uint, - pub ipi_spec_dst: crate::in_addr, - pub ipi_addr: crate::in_addr, - } - - pub struct in6_pktinfo { - pub ipi6_addr: crate::in6_addr, - pub ipi6_ifindex: c_uint, - } - - pub struct passwd { - pub pw_name: *mut c_char, - pub pw_passwd: *mut c_char, - pub pw_uid: crate::uid_t, - pub pw_gid: crate::gid_t, - pub pw_age: *mut c_char, - pub pw_comment: *mut c_char, - pub pw_gecos: *mut c_char, - pub pw_dir: *mut c_char, - pub pw_shell: *mut c_char, - } - - pub struct ifaddrs { - pub ifa_next: *mut ifaddrs, - pub ifa_name: *mut c_char, - pub ifa_flags: u64, - pub ifa_addr: *mut crate::sockaddr, - pub ifa_netmask: *mut crate::sockaddr, - pub ifa_dstaddr: *mut crate::sockaddr, - pub ifa_data: *mut c_void, - } - - pub struct itimerspec { - pub it_interval: crate::timespec, - pub it_value: crate::timespec, - } - - pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: crate::socklen_t, - pub msg_iov: *mut crate::iovec, - pub msg_iovlen: c_int, - pub msg_control: *mut c_void, - pub msg_controllen: crate::socklen_t, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - pub cmsg_len: crate::socklen_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - pub struct pthread_attr_t { - __pthread_attrp: *mut c_void, - } - - pub struct pthread_mutex_t { - __pthread_mutex_flag1: u16, - __pthread_mutex_flag2: u8, - __pthread_mutex_ceiling: u8, - __pthread_mutex_type: u16, - __pthread_mutex_magic: u16, - __pthread_mutex_lock: u64, - __pthread_mutex_data: u64, - } - - pub struct pthread_mutexattr_t { - __pthread_mutexattrp: *mut c_void, - } - - pub struct pthread_cond_t { - __pthread_cond_flag: [u8; 4], - __pthread_cond_type: u16, - __pthread_cond_magic: u16, - __pthread_cond_data: u64, - } - - pub struct pthread_condattr_t { - __pthread_condattrp: *mut c_void, - } - - pub struct pthread_rwlock_t { - __pthread_rwlock_readers: i32, - __pthread_rwlock_type: u16, - __pthread_rwlock_magic: u16, - __pthread_rwlock_mutex: crate::pthread_mutex_t, - __pthread_rwlock_readercv: crate::pthread_cond_t, - __pthread_rwlock_writercv: crate::pthread_cond_t, - } - - pub struct pthread_rwlockattr_t { - __pthread_rwlockattrp: *mut c_void, - } - - pub struct dirent { - pub d_ino: crate::ino_t, - pub d_off: off_t, - pub d_reclen: u16, - pub d_name: [c_char; 3], - } - - pub struct glob_t { - pub gl_pathc: size_t, - pub gl_pathv: *mut *mut c_char, - pub gl_offs: size_t, - __unused1: *mut c_void, - __unused2: c_int, - #[cfg(target_os = "illumos")] - __unused3: c_int, - #[cfg(target_os = "illumos")] - __unused4: c_int, - #[cfg(target_os = "illumos")] - __unused5: *mut c_void, - #[cfg(target_os = "illumos")] - __unused6: *mut c_void, - #[cfg(target_os = "illumos")] - __unused7: *mut c_void, - #[cfg(target_os = "illumos")] - __unused8: *mut c_void, - #[cfg(target_os = "illumos")] - __unused9: *mut c_void, - #[cfg(target_os = "illumos")] - __unused10: *mut c_void, - } - - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - #[cfg(target_arch = "sparc64")] - __sparcv9_pad: c_int, - pub ai_addrlen: crate::socklen_t, - pub ai_canonname: *mut c_char, - pub ai_addr: *mut crate::sockaddr, - pub ai_next: *mut addrinfo, - } - - pub struct sigset_t { - bits: [u32; 4], - } - - pub struct sigaction { - pub sa_flags: c_int, - pub sa_sigaction: crate::sighandler_t, - pub sa_mask: sigset_t, - } - - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } - - pub struct statvfs { - pub f_bsize: c_ulong, - pub f_frsize: c_ulong, - pub f_blocks: crate::fsblkcnt_t, - pub f_bfree: crate::fsblkcnt_t, - pub f_bavail: crate::fsblkcnt_t, - pub f_files: crate::fsfilcnt_t, - pub f_ffree: crate::fsfilcnt_t, - pub f_favail: crate::fsfilcnt_t, - pub f_fsid: c_ulong, - pub f_basetype: [c_char; 16], - pub f_flag: c_ulong, - pub f_namemax: c_ulong, - pub f_fstr: [c_char; 32], - } - - pub struct sendfilevec_t { - pub sfv_fd: c_int, - pub sfv_flag: c_uint, - pub sfv_off: off_t, - pub sfv_len: size_t, - } - - pub struct sched_param { - pub sched_priority: c_int, - sched_pad: [c_int; 8], - } - - pub struct Dl_info { - pub dli_fname: *const c_char, - pub dli_fbase: *mut c_void, - pub dli_sname: *const c_char, - pub dli_saddr: *mut c_void, - } - - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_atime: crate::time_t, - pub st_atime_nsec: c_long, - pub st_mtime: crate::time_t, - pub st_mtime_nsec: c_long, - pub st_ctime: crate::time_t, - pub st_ctime_nsec: c_long, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_fstype: [c_char; _ST_FSTYPSZ as usize], - } - - pub struct termios { - pub c_iflag: crate::tcflag_t, - pub c_oflag: crate::tcflag_t, - pub c_cflag: crate::tcflag_t, - pub c_lflag: crate::tcflag_t, - pub c_cc: [crate::cc_t; crate::NCCS], - } - - pub struct lconv { - pub decimal_point: *mut c_char, - pub thousands_sep: *mut c_char, - pub grouping: *mut c_char, - pub int_curr_symbol: *mut c_char, - pub currency_symbol: *mut c_char, - pub mon_decimal_point: *mut c_char, - pub mon_thousands_sep: *mut c_char, - pub mon_grouping: *mut c_char, - pub positive_sign: *mut c_char, - pub negative_sign: *mut c_char, - pub int_frac_digits: c_char, - pub frac_digits: c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub p_sign_posn: c_char, - pub n_sign_posn: c_char, - pub int_p_cs_precedes: c_char, - pub int_p_sep_by_space: c_char, - pub int_n_cs_precedes: c_char, - pub int_n_sep_by_space: c_char, - pub int_p_sign_posn: c_char, - pub int_n_sign_posn: c_char, - } - - pub struct sem_t { - pub sem_count: u32, - pub sem_type: u16, - pub sem_magic: u16, - pub sem_pad1: [u64; 3], - pub sem_pad2: [u64; 2], - } - - pub struct flock { - pub l_type: c_short, - pub l_whence: c_short, - pub l_start: off_t, - pub l_len: off_t, - pub l_sysid: c_int, - pub l_pid: crate::pid_t, - pub l_pad: [c_long; 4], - } - - pub struct if_nameindex { - pub if_index: c_uint, - pub if_name: *mut c_char, - } - - pub struct mq_attr { - pub mq_flags: c_long, - pub mq_maxmsg: c_long, - pub mq_msgsize: c_long, - pub mq_curmsgs: c_long, - _pad: [c_int; 12], - } - - pub struct port_event { - pub portev_events: c_int, - pub portev_source: c_ushort, - pub portev_pad: c_ushort, - pub portev_object: crate::uintptr_t, - pub portev_user: *mut c_void, - } - - pub struct port_notify { - pub portnfy_port: c_int, - pub portnfy_user: *mut c_void, - } - - pub struct aio_result_t { - pub aio_return: ssize_t, - pub aio_errno: c_int, - } - - pub struct exit_status { - e_termination: c_short, - e_exit: c_short, - } - - pub struct utmp { - pub ut_user: [c_char; 8], - pub ut_id: [c_char; 4], - pub ut_line: [c_char; 12], - pub ut_pid: c_short, - pub ut_type: c_short, - pub ut_exit: exit_status, - pub ut_time: crate::time_t, - } - - pub struct timex { - pub modes: u32, - pub offset: i32, - pub freq: i32, - pub maxerror: i32, - pub esterror: i32, - pub status: i32, - pub constant: i32, - pub precision: i32, - pub tolerance: i32, - pub ppsfreq: i32, - pub jitter: i32, - pub shift: i32, - pub stabil: i32, - pub jitcnt: i32, - pub calcnt: i32, - pub errcnt: i32, - pub stbcnt: i32, - } - - pub struct ntptimeval { - pub time: crate::timeval, - pub maxerror: i32, - pub esterror: i32, - } - - pub struct mmapobj_result_t { - pub mr_addr: crate::caddr_t, - pub mr_msize: size_t, - pub mr_fsize: size_t, - pub mr_offset: size_t, - pub mr_prot: c_uint, - pub mr_flags: c_uint, - } - - pub struct lgrp_affinity_args_t { - pub idtype: crate::idtype_t, - pub id: crate::id_t, - pub lgrp: crate::lgrp_id_t, - pub aff: crate::lgrp_affinity_t, - } - - pub struct processor_info_t { - pub pi_state: c_int, - pub pi_processor_type: [c_char; PI_TYPELEN as usize], - pub pi_fputypes: [c_char; PI_FPUTYPE as usize], - pub pi_clock: c_int, - } - - pub struct option { - pub name: *const c_char, - pub has_arg: c_int, - pub flag: *mut c_int, - pub val: c_int, - } -} - -s_no_extra_traits! { - pub struct sockaddr_un { - pub sun_family: sa_family_t, - pub sun_path: [c_char; 108], - } - - pub struct utsname { - pub sysname: [c_char; 257], - pub nodename: [c_char; 257], - pub release: [c_char; 257], - pub version: [c_char; 257], - pub machine: [c_char; 257], - } - - pub struct fd_set { - #[cfg(target_pointer_width = "64")] - fds_bits: [i64; FD_SETSIZE as usize / 64], - #[cfg(target_pointer_width = "32")] - fds_bits: [i32; FD_SETSIZE as usize / 32], - } - - pub struct sockaddr_storage { - pub ss_family: crate::sa_family_t, - __ss_pad1: [u8; 6], - __ss_align: i64, - __ss_pad2: [u8; 240], - } - - #[cfg_attr(target_pointer_width = "64", repr(align(8)))] - pub struct siginfo_t { - pub si_signo: c_int, - pub si_code: c_int, - pub si_errno: c_int, - #[cfg(target_pointer_width = "64")] - pub si_pad: c_int, - - __data_pad: [c_int; SIGINFO_DATA_SIZE], - } - - pub struct sockaddr_dl { - pub sdl_family: c_ushort, - pub sdl_index: c_ushort, - pub sdl_type: c_uchar, - pub sdl_nlen: c_uchar, - pub sdl_alen: c_uchar, - pub sdl_slen: c_uchar, - pub sdl_data: [c_char; 244], - } - - pub struct sigevent { - pub sigev_notify: c_int, - pub sigev_signo: c_int, - pub sigev_value: crate::sigval, - pub ss_sp: *mut c_void, - pub sigev_notify_attributes: *const crate::pthread_attr_t, - __sigev_pad2: c_int, - } - - #[repr(align(16))] - pub union pad128_t { - // pub _q in this structure would be a "long double", of 16 bytes - pub _l: [i32; 4], - } - - #[repr(align(16))] - pub union upad128_t { - // pub _q in this structure would be a "long double", of 16 bytes - pub _l: [u32; 4], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for sockaddr_un { - fn eq(&self, other: &sockaddr_un) -> bool { - self.sun_family == other.sun_family - && self - .sun_path - .iter() - .zip(other.sun_path.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for sockaddr_un {} - impl hash::Hash for sockaddr_un { - fn hash(&self, state: &mut H) { - self.sun_family.hash(state); - self.sun_path.hash(state); - } - } - - impl PartialEq for utsname { - fn eq(&self, other: &utsname) -> bool { - self.sysname - .iter() - .zip(other.sysname.iter()) - .all(|(a, b)| a == b) - && self - .nodename - .iter() - .zip(other.nodename.iter()) - .all(|(a, b)| a == b) - && self - .release - .iter() - .zip(other.release.iter()) - .all(|(a, b)| a == b) - && self - .version - .iter() - .zip(other.version.iter()) - .all(|(a, b)| a == b) - && self - .machine - .iter() - .zip(other.machine.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for utsname {} - impl hash::Hash for utsname { - fn hash(&self, state: &mut H) { - self.sysname.hash(state); - self.nodename.hash(state); - self.release.hash(state); - self.version.hash(state); - self.machine.hash(state); - } - } - - impl PartialEq for fd_set { - fn eq(&self, other: &fd_set) -> bool { - self.fds_bits - .iter() - .zip(other.fds_bits.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for fd_set {} - impl hash::Hash for fd_set { - fn hash(&self, state: &mut H) { - self.fds_bits.hash(state); - } - } - - impl PartialEq for sockaddr_storage { - fn eq(&self, other: &sockaddr_storage) -> bool { - self.ss_family == other.ss_family - && self.__ss_pad1 == other.__ss_pad1 - && self.__ss_align == other.__ss_align - && self - .__ss_pad2 - .iter() - .zip(other.__ss_pad2.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for sockaddr_storage {} - impl hash::Hash for sockaddr_storage { - fn hash(&self, state: &mut H) { - self.ss_family.hash(state); - self.__ss_pad1.hash(state); - self.__ss_align.hash(state); - self.__ss_pad2.hash(state); - } - } - - impl siginfo_t { - /// The siginfo_t will have differing contents based on the delivered signal. Based on - /// `si_signo`, this determines how many of the `c_int` pad fields contain valid data - /// exposed by the C unions. - /// - /// It is not yet exhausitive for the OS-defined types, and defaults to assuming the - /// entire data pad area is "valid" for otherwise unrecognized signal numbers. - fn data_field_count(&self) -> usize { - match self.si_signo { - SIGSEGV | SIGBUS | SIGILL | SIGTRAP | SIGFPE => { - size_of::() / size_of::() - } - SIGCLD => size_of::() / size_of::(), - SIGHUP - | SIGINT - | SIGQUIT - | SIGABRT - | SIGSYS - | SIGPIPE - | SIGALRM - | SIGTERM - | crate::SIGUSR1 - | crate::SIGUSR2 - | SIGPWR - | SIGWINCH - | SIGURG => size_of::() / size_of::(), - _ => SIGINFO_DATA_SIZE, - } - } - } - impl PartialEq for siginfo_t { - fn eq(&self, other: &siginfo_t) -> bool { - if self.si_signo == other.si_signo - && self.si_code == other.si_code - && self.si_errno == other.si_errno - { - // FIXME(solarish): The `si_pad` field in the 64-bit version of the struct is ignored - // (for now) when doing comparisons. - - let field_count = self.data_field_count(); - self.__data_pad[..field_count] - .iter() - .zip(other.__data_pad[..field_count].iter()) - .all(|(a, b)| a == b) - } else { - false - } - } - } - impl Eq for siginfo_t {} - impl hash::Hash for siginfo_t { - fn hash(&self, state: &mut H) { - self.si_signo.hash(state); - self.si_code.hash(state); - self.si_errno.hash(state); - - // FIXME(solarish): The `si_pad` field in the 64-bit version of the struct is ignored - // (for now) when doing hashing. - - let field_count = self.data_field_count(); - self.__data_pad[..field_count].hash(state) - } - } - - impl PartialEq for sockaddr_dl { - fn eq(&self, other: &sockaddr_dl) -> bool { - self.sdl_family == other.sdl_family - && self.sdl_index == other.sdl_index - && self.sdl_type == other.sdl_type - && self.sdl_nlen == other.sdl_nlen - && self.sdl_alen == other.sdl_alen - && self.sdl_slen == other.sdl_slen - && self - .sdl_data - .iter() - .zip(other.sdl_data.iter()) - .all(|(a, b)| a == b) - } - } - impl Eq for sockaddr_dl {} - impl hash::Hash for sockaddr_dl { - fn hash(&self, state: &mut H) { - self.sdl_family.hash(state); - self.sdl_index.hash(state); - self.sdl_type.hash(state); - self.sdl_nlen.hash(state); - self.sdl_alen.hash(state); - self.sdl_slen.hash(state); - self.sdl_data.hash(state); - } - } - - impl PartialEq for sigevent { - fn eq(&self, other: &sigevent) -> bool { - self.sigev_notify == other.sigev_notify - && self.sigev_signo == other.sigev_signo - && self.sigev_value == other.sigev_value - && self.ss_sp == other.ss_sp - && self.sigev_notify_attributes == other.sigev_notify_attributes - } - } - impl Eq for sigevent {} - impl hash::Hash for sigevent { - fn hash(&self, state: &mut H) { - self.sigev_notify.hash(state); - self.sigev_signo.hash(state); - self.sigev_value.hash(state); - self.ss_sp.hash(state); - self.sigev_notify_attributes.hash(state); - } - } - - impl PartialEq for pad128_t { - fn eq(&self, other: &pad128_t) -> bool { - unsafe { - // FIXME(solarish): self._q == other._q || - self._l == other._l - } - } - } - impl Eq for pad128_t {} - impl hash::Hash for pad128_t { - fn hash(&self, state: &mut H) { - unsafe { - // FIXME(solarish): state.write_i64(self._q as i64); - self._l.hash(state); - } - } - } - impl PartialEq for upad128_t { - fn eq(&self, other: &upad128_t) -> bool { - unsafe { - // FIXME(solarish): self._q == other._q || - self._l == other._l - } - } - } - impl Eq for upad128_t {} - impl hash::Hash for upad128_t { - fn hash(&self, state: &mut H) { - unsafe { - // FIXME(solarish): state.write_i64(self._q as i64); - self._l.hash(state); - } - } - } - } -} - -cfg_if! { - if #[cfg(target_pointer_width = "64")] { - const SIGINFO_DATA_SIZE: usize = 60; - } else { - const SIGINFO_DATA_SIZE: usize = 29; - } -} - -#[repr(C)] -struct siginfo_fault { - addr: *mut c_void, - trapno: c_int, - pc: *mut crate::caddr_t, -} -impl Copy for siginfo_fault {} -impl Clone for siginfo_fault { - fn clone(&self) -> Self { - *self - } -} - -#[repr(C)] -struct siginfo_cldval { - utime: crate::clock_t, - status: c_int, - stime: crate::clock_t, -} -impl Copy for siginfo_cldval {} -impl Clone for siginfo_cldval { - fn clone(&self) -> Self { - *self - } -} - -#[repr(C)] -struct siginfo_killval { - uid: crate::uid_t, - value: crate::sigval, - // Pad out to match the SIGCLD value size - _pad: *mut c_void, -} -impl Copy for siginfo_killval {} -impl Clone for siginfo_killval { - fn clone(&self) -> Self { - *self - } -} - -#[repr(C)] -struct siginfo_sigcld { - pid: crate::pid_t, - val: siginfo_cldval, - ctid: crate::ctid_t, - zoneid: crate::zoneid_t, -} -impl Copy for siginfo_sigcld {} -impl Clone for siginfo_sigcld { - fn clone(&self) -> Self { - *self - } -} - -#[repr(C)] -struct siginfo_kill { - pid: crate::pid_t, - val: siginfo_killval, - ctid: crate::ctid_t, - zoneid: crate::zoneid_t, -} -impl Copy for siginfo_kill {} -impl Clone for siginfo_kill { - fn clone(&self) -> Self { - *self - } -} - -impl siginfo_t { - unsafe fn sidata(&self) -> T { - *((&self.__data_pad) as *const c_int as *const T) - } - pub unsafe fn si_addr(&self) -> *mut c_void { - let sifault: siginfo_fault = self.sidata(); - sifault.addr - } - pub unsafe fn si_uid(&self) -> crate::uid_t { - let kill: siginfo_kill = self.sidata(); - kill.val.uid - } - pub unsafe fn si_value(&self) -> crate::sigval { - let kill: siginfo_kill = self.sidata(); - kill.val.value - } - pub unsafe fn si_pid(&self) -> crate::pid_t { - let sigcld: siginfo_sigcld = self.sidata(); - sigcld.pid - } - pub unsafe fn si_status(&self) -> c_int { - let sigcld: siginfo_sigcld = self.sidata(); - sigcld.val.status - } - pub unsafe fn si_utime(&self) -> c_long { - let sigcld: siginfo_sigcld = self.sidata(); - sigcld.val.utime - } - pub unsafe fn si_stime(&self) -> c_long { - let sigcld: siginfo_sigcld = self.sidata(); - sigcld.val.stime - } -} - -pub const LC_CTYPE: c_int = 0; -pub const LC_NUMERIC: c_int = 1; -pub const LC_TIME: c_int = 2; -pub const LC_COLLATE: c_int = 3; -pub const LC_MONETARY: c_int = 4; -pub const LC_MESSAGES: c_int = 5; -pub const LC_ALL: c_int = 6; -pub const LC_CTYPE_MASK: c_int = 1 << LC_CTYPE; -pub const LC_NUMERIC_MASK: c_int = 1 << LC_NUMERIC; -pub const LC_TIME_MASK: c_int = 1 << LC_TIME; -pub const LC_COLLATE_MASK: c_int = 1 << LC_COLLATE; -pub const LC_MONETARY_MASK: c_int = 1 << LC_MONETARY; -pub const LC_MESSAGES_MASK: c_int = 1 << LC_MESSAGES; -pub const LC_ALL_MASK: c_int = LC_CTYPE_MASK - | LC_NUMERIC_MASK - | LC_TIME_MASK - | LC_COLLATE_MASK - | LC_MONETARY_MASK - | LC_MESSAGES_MASK; - -pub const DAY_1: crate::nl_item = 1; -pub const DAY_2: crate::nl_item = 2; -pub const DAY_3: crate::nl_item = 3; -pub const DAY_4: crate::nl_item = 4; -pub const DAY_5: crate::nl_item = 5; -pub const DAY_6: crate::nl_item = 6; -pub const DAY_7: crate::nl_item = 7; - -pub const ABDAY_1: crate::nl_item = 8; -pub const ABDAY_2: crate::nl_item = 9; -pub const ABDAY_3: crate::nl_item = 10; -pub const ABDAY_4: crate::nl_item = 11; -pub const ABDAY_5: crate::nl_item = 12; -pub const ABDAY_6: crate::nl_item = 13; -pub const ABDAY_7: crate::nl_item = 14; - -pub const MON_1: crate::nl_item = 15; -pub const MON_2: crate::nl_item = 16; -pub const MON_3: crate::nl_item = 17; -pub const MON_4: crate::nl_item = 18; -pub const MON_5: crate::nl_item = 19; -pub const MON_6: crate::nl_item = 20; -pub const MON_7: crate::nl_item = 21; -pub const MON_8: crate::nl_item = 22; -pub const MON_9: crate::nl_item = 23; -pub const MON_10: crate::nl_item = 24; -pub const MON_11: crate::nl_item = 25; -pub const MON_12: crate::nl_item = 26; - -pub const ABMON_1: crate::nl_item = 27; -pub const ABMON_2: crate::nl_item = 28; -pub const ABMON_3: crate::nl_item = 29; -pub const ABMON_4: crate::nl_item = 30; -pub const ABMON_5: crate::nl_item = 31; -pub const ABMON_6: crate::nl_item = 32; -pub const ABMON_7: crate::nl_item = 33; -pub const ABMON_8: crate::nl_item = 34; -pub const ABMON_9: crate::nl_item = 35; -pub const ABMON_10: crate::nl_item = 36; -pub const ABMON_11: crate::nl_item = 37; -pub const ABMON_12: crate::nl_item = 38; - -pub const RADIXCHAR: crate::nl_item = 39; -pub const THOUSEP: crate::nl_item = 40; -pub const YESSTR: crate::nl_item = 41; -pub const NOSTR: crate::nl_item = 42; -pub const CRNCYSTR: crate::nl_item = 43; - -pub const D_T_FMT: crate::nl_item = 44; -pub const D_FMT: crate::nl_item = 45; -pub const T_FMT: crate::nl_item = 46; -pub const AM_STR: crate::nl_item = 47; -pub const PM_STR: crate::nl_item = 48; - -pub const CODESET: crate::nl_item = 49; -pub const T_FMT_AMPM: crate::nl_item = 50; -pub const ERA: crate::nl_item = 51; -pub const ERA_D_FMT: crate::nl_item = 52; -pub const ERA_D_T_FMT: crate::nl_item = 53; -pub const ERA_T_FMT: crate::nl_item = 54; -pub const ALT_DIGITS: crate::nl_item = 55; -pub const YESEXPR: crate::nl_item = 56; -pub const NOEXPR: crate::nl_item = 57; -pub const _DATE_FMT: crate::nl_item = 58; -pub const MAXSTRMSG: crate::nl_item = 58; - -pub const PATH_MAX: c_int = 1024; - -pub const SA_ONSTACK: c_int = 0x00000001; -pub const SA_RESETHAND: c_int = 0x00000002; -pub const SA_RESTART: c_int = 0x00000004; -pub const SA_SIGINFO: c_int = 0x00000008; -pub const SA_NODEFER: c_int = 0x00000010; -pub const SA_NOCLDWAIT: c_int = 0x00010000; -pub const SA_NOCLDSTOP: c_int = 0x00020000; - -pub const SS_ONSTACK: c_int = 1; -pub const SS_DISABLE: c_int = 2; - -pub const FIOCLEX: c_int = 0x20006601; -pub const FIONCLEX: c_int = 0x20006602; -pub const FIONREAD: c_int = 0x4004667f; -pub const FIONBIO: c_int = 0x8004667e; -pub const FIOASYNC: c_int = 0x8004667d; -pub const FIOSETOWN: c_int = 0x8004667c; -pub const FIOGETOWN: c_int = 0x4004667b; - -pub const SIGCHLD: c_int = 18; -pub const SIGCLD: c_int = SIGCHLD; -pub const SIGBUS: c_int = 10; -pub const SIG_BLOCK: c_int = 1; -pub const SIG_UNBLOCK: c_int = 2; -pub const SIG_SETMASK: c_int = 3; - -pub const AIO_CANCELED: c_int = 0; -pub const AIO_ALLDONE: c_int = 1; -pub const AIO_NOTCANCELED: c_int = 2; -pub const LIO_NOP: c_int = 0; -pub const LIO_READ: c_int = 1; -pub const LIO_WRITE: c_int = 2; -pub const LIO_NOWAIT: c_int = 0; -pub const LIO_WAIT: c_int = 1; - -pub const SIGEV_NONE: c_int = 1; -pub const SIGEV_SIGNAL: c_int = 2; -pub const SIGEV_THREAD: c_int = 3; -pub const SIGEV_PORT: c_int = 4; - -pub const CLD_EXITED: c_int = 1; -pub const CLD_KILLED: c_int = 2; -pub const CLD_DUMPED: c_int = 3; -pub const CLD_TRAPPED: c_int = 4; -pub const CLD_STOPPED: c_int = 5; -pub const CLD_CONTINUED: c_int = 6; - -pub const IP_RECVDSTADDR: c_int = 0x7; -pub const IP_PKTINFO: c_int = 0x1a; -pub const IP_DONTFRAG: c_int = 0x1b; -pub const IP_SEC_OPT: c_int = 0x22; - -pub const IPV6_UNICAST_HOPS: c_int = 0x5; -pub const IPV6_MULTICAST_IF: c_int = 0x6; -pub const IPV6_MULTICAST_HOPS: c_int = 0x7; -pub const IPV6_MULTICAST_LOOP: c_int = 0x8; -pub const IPV6_PKTINFO: c_int = 0xb; -pub const IPV6_RECVPKTINFO: c_int = 0x12; -pub const IPV6_RECVTCLASS: c_int = 0x19; -pub const IPV6_DONTFRAG: c_int = 0x21; -pub const IPV6_SEC_OPT: c_int = 0x22; -pub const IPV6_TCLASS: c_int = 0x26; -pub const IPV6_V6ONLY: c_int = 0x27; -pub const IPV6_BOUND_IF: c_int = 0x41; - -cfg_if! { - if #[cfg(target_pointer_width = "64")] { - pub const FD_SETSIZE: usize = 65536; - } else { - pub const FD_SETSIZE: usize = 1024; - } -} - -pub const ST_RDONLY: c_ulong = 1; -pub const ST_NOSUID: c_ulong = 2; - -pub const NI_MAXHOST: crate::socklen_t = 1025; -pub const NI_MAXSERV: crate::socklen_t = 32; - -pub const EXIT_FAILURE: c_int = 1; -pub const EXIT_SUCCESS: c_int = 0; -pub const RAND_MAX: c_int = 32767; -pub const EOF: c_int = -1; -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; -pub const SEEK_DATA: c_int = 3; -pub const SEEK_HOLE: c_int = 4; -pub const _IOFBF: c_int = 0; -pub const _IONBF: c_int = 4; -pub const _IOLBF: c_int = 64; -pub const BUFSIZ: c_uint = 1024; -pub const FOPEN_MAX: c_uint = 20; -pub const FILENAME_MAX: c_uint = 1024; -pub const L_tmpnam: c_uint = 25; -pub const TMP_MAX: c_uint = 17576; -pub const PIPE_BUF: c_int = 5120; - -pub const GRND_NONBLOCK: c_uint = 0x0001; -pub const GRND_RANDOM: c_uint = 0x0002; - -pub const O_RDONLY: c_int = 0; -pub const O_WRONLY: c_int = 1; -pub const O_RDWR: c_int = 2; -pub const O_NDELAY: c_int = 0x04; -pub const O_APPEND: c_int = 8; -pub const O_DSYNC: c_int = 0x40; -pub const O_RSYNC: c_int = 0x8000; -pub const O_CREAT: c_int = 256; -pub const O_EXCL: c_int = 1024; -pub const O_NOCTTY: c_int = 2048; -pub const O_TRUNC: c_int = 512; -pub const O_NOFOLLOW: c_int = 0x20000; -pub const O_SEARCH: c_int = 0x200000; -pub const O_EXEC: c_int = 0x400000; -pub const O_CLOEXEC: c_int = 0x800000; -pub const O_ACCMODE: c_int = 0x600003; -pub const O_XATTR: c_int = 0x4000; -pub const O_DIRECTORY: c_int = 0x1000000; -pub const S_IFIFO: mode_t = 0o1_0000; -pub const S_IFCHR: mode_t = 0o2_0000; -pub const S_IFBLK: mode_t = 0o6_0000; -pub const S_IFDIR: mode_t = 0o4_0000; -pub const S_IFREG: mode_t = 0o10_0000; -pub const S_IFLNK: mode_t = 0o12_0000; -pub const S_IFSOCK: mode_t = 0o14_0000; -pub const S_IFMT: mode_t = 0o17_0000; -pub const S_IEXEC: mode_t = 0o0100; -pub const S_IWRITE: mode_t = 0o0200; -pub const S_IREAD: mode_t = 0o0400; -pub const S_IRWXU: mode_t = 0o0700; -pub const S_IXUSR: mode_t = 0o0100; -pub const S_IWUSR: mode_t = 0o0200; -pub const S_IRUSR: mode_t = 0o0400; -pub const S_IRWXG: mode_t = 0o0070; -pub const S_IXGRP: mode_t = 0o0010; -pub const S_IWGRP: mode_t = 0o0020; -pub const S_IRGRP: mode_t = 0o0040; -pub const S_IRWXO: mode_t = 0o0007; -pub const S_IXOTH: mode_t = 0o0001; -pub const S_IWOTH: mode_t = 0o0002; -pub const S_IROTH: mode_t = 0o0004; -pub const F_OK: c_int = 0; -pub const R_OK: c_int = 4; -pub const W_OK: c_int = 2; -pub const X_OK: c_int = 1; -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; -pub const F_LOCK: c_int = 1; -pub const F_TEST: c_int = 3; -pub const F_TLOCK: c_int = 2; -pub const F_ULOCK: c_int = 0; -pub const F_SETLK: c_int = 6; -pub const F_SETLKW: c_int = 7; -pub const F_GETLK: c_int = 14; -pub const F_ALLOCSP: c_int = 10; -pub const F_FREESP: c_int = 11; -pub const F_BLOCKS: c_int = 18; -pub const F_BLKSIZE: c_int = 19; -pub const F_SHARE: c_int = 40; -pub const F_UNSHARE: c_int = 41; -pub const F_ISSTREAM: c_int = 13; -pub const F_PRIV: c_int = 15; -pub const F_NPRIV: c_int = 16; -pub const F_QUOTACTL: c_int = 17; -pub const F_GETOWN: c_int = 23; -pub const F_SETOWN: c_int = 24; -pub const F_REVOKE: c_int = 25; -pub const F_HASREMOTELOCKS: c_int = 26; -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGABRT: c_int = 6; -pub const SIGEMT: c_int = 7; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGSEGV: c_int = 11; -pub const SIGSYS: c_int = 12; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; -pub const SIGUSR1: c_int = 16; -pub const SIGUSR2: c_int = 17; -pub const SIGPWR: c_int = 19; -pub const SIGWINCH: c_int = 20; -pub const SIGURG: c_int = 21; -pub const SIGPOLL: c_int = 22; -pub const SIGIO: c_int = SIGPOLL; -pub const SIGSTOP: c_int = 23; -pub const SIGTSTP: c_int = 24; -pub const SIGCONT: c_int = 25; -pub const SIGTTIN: c_int = 26; -pub const SIGTTOU: c_int = 27; -pub const SIGVTALRM: c_int = 28; -pub const SIGPROF: c_int = 29; -pub const SIGXCPU: c_int = 30; -pub const SIGXFSZ: c_int = 31; - -pub const WNOHANG: c_int = 0x40; -pub const WUNTRACED: c_int = 0x04; - -pub const WEXITED: c_int = 0x01; -pub const WTRAPPED: c_int = 0x02; -pub const WSTOPPED: c_int = WUNTRACED; -pub const WCONTINUED: c_int = 0x08; -pub const WNOWAIT: c_int = 0x80; - -pub const AT_FDCWD: c_int = 0xffd19553; -pub const AT_SYMLINK_NOFOLLOW: c_int = 0x1000; -pub const AT_SYMLINK_FOLLOW: c_int = 0x2000; -pub const AT_REMOVEDIR: c_int = 0x1; -pub const _AT_TRIGGER: c_int = 0x2; -pub const AT_EACCESS: c_int = 0x4; - -pub const P_PID: idtype_t = 0; -pub const P_PPID: idtype_t = 1; -pub const P_PGID: idtype_t = 2; -pub const P_SID: idtype_t = 3; -pub const P_CID: idtype_t = 4; -pub const P_UID: idtype_t = 5; -pub const P_GID: idtype_t = 6; -pub const P_ALL: idtype_t = 7; -pub const P_LWPID: idtype_t = 8; -pub const P_TASKID: idtype_t = 9; -pub const P_PROJID: idtype_t = 10; -pub const P_POOLID: idtype_t = 11; -pub const P_ZONEID: idtype_t = 12; -pub const P_CTID: idtype_t = 13; -pub const P_CPUID: idtype_t = 14; -pub const P_PSETID: idtype_t = 15; - -pub const PBIND_NONE: crate::processorid_t = -1; -pub const PBIND_QUERY: crate::processorid_t = -2; - -pub const PS_NONE: c_int = -1; -pub const PS_QUERY: c_int = -2; -pub const PS_MYID: c_int = -3; -pub const PS_SOFT: c_int = -4; -pub const PS_HARD: c_int = -5; -pub const PS_QUERY_TYPE: c_int = -6; -pub const PS_PRIVATE: c_int = 2; - -pub const UTIME_OMIT: c_long = -2; -pub const UTIME_NOW: c_long = -1; - -pub const PROT_NONE: c_int = 0; -pub const PROT_READ: c_int = 1; -pub const PROT_WRITE: c_int = 2; -pub const PROT_EXEC: c_int = 4; - -pub const MAP_SHARED: c_int = 0x0001; -pub const MAP_PRIVATE: c_int = 0x0002; -pub const MAP_FIXED: c_int = 0x0010; -pub const MAP_NORESERVE: c_int = 0x40; -pub const MAP_ANON: c_int = 0x0100; -pub const MAP_ANONYMOUS: c_int = 0x0100; -pub const MAP_RENAME: c_int = 0x20; -pub const MAP_ALIGN: c_int = 0x200; -pub const MAP_TEXT: c_int = 0x400; -pub const MAP_INITDATA: c_int = 0x800; -pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; - -pub const MCL_CURRENT: c_int = 0x0001; -pub const MCL_FUTURE: c_int = 0x0002; - -pub const MS_SYNC: c_int = 0x0004; -pub const MS_ASYNC: c_int = 0x0001; -pub const MS_INVALIDATE: c_int = 0x0002; - -pub const MMOBJ_PADDING: c_uint = 0x10000; -pub const MMOBJ_INTERPRET: c_uint = 0x20000; -pub const MR_PADDING: c_uint = 0x1; -pub const MR_HDR_ELF: c_uint = 0x2; - -pub const EPERM: c_int = 1; -pub const ENOENT: c_int = 2; -pub const ESRCH: c_int = 3; -pub const EINTR: c_int = 4; -pub const EIO: c_int = 5; -pub const ENXIO: c_int = 6; -pub const E2BIG: c_int = 7; -pub const ENOEXEC: c_int = 8; -pub const EBADF: c_int = 9; -pub const ECHILD: c_int = 10; -pub const EAGAIN: c_int = 11; -pub const ENOMEM: c_int = 12; -pub const EACCES: c_int = 13; -pub const EFAULT: c_int = 14; -pub const ENOTBLK: c_int = 15; -pub const EBUSY: c_int = 16; -pub const EEXIST: c_int = 17; -pub const EXDEV: c_int = 18; -pub const ENODEV: c_int = 19; -pub const ENOTDIR: c_int = 20; -pub const EISDIR: c_int = 21; -pub const EINVAL: c_int = 22; -pub const ENFILE: c_int = 23; -pub const EMFILE: c_int = 24; -pub const ENOTTY: c_int = 25; -pub const ETXTBSY: c_int = 26; -pub const EFBIG: c_int = 27; -pub const ENOSPC: c_int = 28; -pub const ESPIPE: c_int = 29; -pub const EROFS: c_int = 30; -pub const EMLINK: c_int = 31; -pub const EPIPE: c_int = 32; -pub const EDOM: c_int = 33; -pub const ERANGE: c_int = 34; -pub const ENOMSG: c_int = 35; -pub const EIDRM: c_int = 36; -pub const ECHRNG: c_int = 37; -pub const EL2NSYNC: c_int = 38; -pub const EL3HLT: c_int = 39; -pub const EL3RST: c_int = 40; -pub const ELNRNG: c_int = 41; -pub const EUNATCH: c_int = 42; -pub const ENOCSI: c_int = 43; -pub const EL2HLT: c_int = 44; -pub const EDEADLK: c_int = 45; -pub const ENOLCK: c_int = 46; -pub const ECANCELED: c_int = 47; -pub const ENOTSUP: c_int = 48; -pub const EDQUOT: c_int = 49; -pub const EBADE: c_int = 50; -pub const EBADR: c_int = 51; -pub const EXFULL: c_int = 52; -pub const ENOANO: c_int = 53; -pub const EBADRQC: c_int = 54; -pub const EBADSLT: c_int = 55; -pub const EDEADLOCK: c_int = 56; -pub const EBFONT: c_int = 57; -pub const EOWNERDEAD: c_int = 58; -pub const ENOTRECOVERABLE: c_int = 59; -pub const ENOSTR: c_int = 60; -pub const ENODATA: c_int = 61; -pub const ETIME: c_int = 62; -pub const ENOSR: c_int = 63; -pub const ENONET: c_int = 64; -pub const ENOPKG: c_int = 65; -pub const EREMOTE: c_int = 66; -pub const ENOLINK: c_int = 67; -pub const EADV: c_int = 68; -pub const ESRMNT: c_int = 69; -pub const ECOMM: c_int = 70; -pub const EPROTO: c_int = 71; -pub const ELOCKUNMAPPED: c_int = 72; -pub const ENOTACTIVE: c_int = 73; -pub const EMULTIHOP: c_int = 74; -pub const EADI: c_int = 75; -pub const EBADMSG: c_int = 77; -pub const ENAMETOOLONG: c_int = 78; -pub const EOVERFLOW: c_int = 79; -pub const ENOTUNIQ: c_int = 80; -pub const EBADFD: c_int = 81; -pub const EREMCHG: c_int = 82; -pub const ELIBACC: c_int = 83; -pub const ELIBBAD: c_int = 84; -pub const ELIBSCN: c_int = 85; -pub const ELIBMAX: c_int = 86; -pub const ELIBEXEC: c_int = 87; -pub const EILSEQ: c_int = 88; -pub const ENOSYS: c_int = 89; -pub const ELOOP: c_int = 90; -pub const ERESTART: c_int = 91; -pub const ESTRPIPE: c_int = 92; -pub const ENOTEMPTY: c_int = 93; -pub const EUSERS: c_int = 94; -pub const ENOTSOCK: c_int = 95; -pub const EDESTADDRREQ: c_int = 96; -pub const EMSGSIZE: c_int = 97; -pub const EPROTOTYPE: c_int = 98; -pub const ENOPROTOOPT: c_int = 99; -pub const EPROTONOSUPPORT: c_int = 120; -pub const ESOCKTNOSUPPORT: c_int = 121; -pub const EOPNOTSUPP: c_int = 122; -pub const EPFNOSUPPORT: c_int = 123; -pub const EAFNOSUPPORT: c_int = 124; -pub const EADDRINUSE: c_int = 125; -pub const EADDRNOTAVAIL: c_int = 126; -pub const ENETDOWN: c_int = 127; -pub const ENETUNREACH: c_int = 128; -pub const ENETRESET: c_int = 129; -pub const ECONNABORTED: c_int = 130; -pub const ECONNRESET: c_int = 131; -pub const ENOBUFS: c_int = 132; -pub const EISCONN: c_int = 133; -pub const ENOTCONN: c_int = 134; -pub const ESHUTDOWN: c_int = 143; -pub const ETOOMANYREFS: c_int = 144; -pub const ETIMEDOUT: c_int = 145; -pub const ECONNREFUSED: c_int = 146; -pub const EHOSTDOWN: c_int = 147; -pub const EHOSTUNREACH: c_int = 148; -pub const EWOULDBLOCK: c_int = EAGAIN; -pub const EALREADY: c_int = 149; -pub const EINPROGRESS: c_int = 150; -pub const ESTALE: c_int = 151; - -pub const EAI_AGAIN: c_int = 2; -pub const EAI_BADFLAGS: c_int = 3; -pub const EAI_FAIL: c_int = 4; -pub const EAI_FAMILY: c_int = 5; -pub const EAI_MEMORY: c_int = 6; -pub const EAI_NODATA: c_int = 7; -pub const EAI_NONAME: c_int = 8; -pub const EAI_SERVICE: c_int = 9; -pub const EAI_SOCKTYPE: c_int = 10; -pub const EAI_SYSTEM: c_int = 11; -pub const EAI_OVERFLOW: c_int = 12; - -pub const NI_NOFQDN: c_uint = 0x0001; -pub const NI_NUMERICHOST: c_uint = 0x0002; -pub const NI_NAMEREQD: c_uint = 0x0004; -pub const NI_NUMERICSERV: c_uint = 0x0008; -pub const NI_DGRAM: c_uint = 0x0010; -pub const NI_WITHSCOPEID: c_uint = 0x0020; -pub const NI_NUMERICSCOPE: c_uint = 0x0040; - -pub const F_DUPFD: c_int = 0; -pub const F_DUP2FD: c_int = 9; -pub const F_GETFD: c_int = 1; -pub const F_SETFD: c_int = 2; -pub const F_GETFL: c_int = 3; -pub const F_SETFL: c_int = 4; -pub const F_GETXFL: c_int = 45; - -pub const SIGTRAP: c_int = 5; - -pub const GLOB_APPEND: c_int = 32; -pub const GLOB_DOOFFS: c_int = 16; -pub const GLOB_ERR: c_int = 1; -pub const GLOB_MARK: c_int = 2; -pub const GLOB_NOCHECK: c_int = 8; -pub const GLOB_NOSORT: c_int = 4; -pub const GLOB_NOESCAPE: c_int = 64; - -pub const GLOB_NOSPACE: c_int = -2; -pub const GLOB_ABORTED: c_int = -1; -pub const GLOB_NOMATCH: c_int = -3; - -pub const POLLIN: c_short = 0x1; -pub const POLLPRI: c_short = 0x2; -pub const POLLOUT: c_short = 0x4; -pub const POLLERR: c_short = 0x8; -pub const POLLHUP: c_short = 0x10; -pub const POLLNVAL: c_short = 0x20; -pub const POLLNORM: c_short = 0x0040; -pub const POLLRDNORM: c_short = 0x0040; -pub const POLLWRNORM: c_short = 0x4; /* POLLOUT */ -pub const POLLRDBAND: c_short = 0x0080; -pub const POLLWRBAND: c_short = 0x0100; - -pub const POSIX_MADV_NORMAL: c_int = 0; -pub const POSIX_MADV_RANDOM: c_int = 1; -pub const POSIX_MADV_SEQUENTIAL: c_int = 2; -pub const POSIX_MADV_WILLNEED: c_int = 3; -pub const POSIX_MADV_DONTNEED: c_int = 4; - -pub const POSIX_SPAWN_RESETIDS: c_short = 0x1; -pub const POSIX_SPAWN_SETPGROUP: c_short = 0x2; -pub const POSIX_SPAWN_SETSIGDEF: c_short = 0x4; -pub const POSIX_SPAWN_SETSIGMASK: c_short = 0x8; -pub const POSIX_SPAWN_SETSCHEDPARAM: c_short = 0x10; -pub const POSIX_SPAWN_SETSCHEDULER: c_short = 0x20; -pub const POSIX_SPAWN_SETSIGIGN_NP: c_short = 0x800; -pub const POSIX_SPAWN_NOSIGCHLD_NP: c_short = 0x1000; -pub const POSIX_SPAWN_WAITPID_NP: c_short = 0x2000; -pub const POSIX_SPAWN_NOEXECERR_NP: c_short = 0x4000; - -pub const PTHREAD_CREATE_JOINABLE: c_int = 0; -pub const PTHREAD_CREATE_DETACHED: c_int = 0x40; -pub const PTHREAD_PROCESS_SHARED: c_int = 1; -pub const PTHREAD_PROCESS_PRIVATE: c_ushort = 0; -pub const PTHREAD_STACK_MIN: size_t = 4096; - -pub const SIGSTKSZ: size_t = 8192; - -// https://illumos.org/man/3c/clock_gettime -// https://github.com/illumos/illumos-gate/ -// blob/HEAD/usr/src/lib/libc/amd64/sys/__clock_gettime.s -// clock_gettime(3c) doesn't seem to accept anything other than CLOCK_REALTIME -// or __CLOCK_REALTIME0 -// -// https://github.com/illumos/illumos-gate/ -// blob/HEAD/usr/src/uts/common/sys/time_impl.h -// Confusing! CLOCK_HIGHRES==CLOCK_MONOTONIC==4 -// __CLOCK_REALTIME0==0 is an obsoleted version of CLOCK_REALTIME==3 -pub const CLOCK_REALTIME: crate::clockid_t = 3; -pub const CLOCK_MONOTONIC: crate::clockid_t = 4; -pub const TIMER_RELTIME: c_int = 0; -pub const TIMER_ABSTIME: c_int = 1; - -pub const RLIMIT_CPU: c_int = 0; -pub const RLIMIT_FSIZE: c_int = 1; -pub const RLIMIT_DATA: c_int = 2; -pub const RLIMIT_STACK: c_int = 3; -pub const RLIMIT_CORE: c_int = 4; -pub const RLIMIT_NOFILE: c_int = 5; -pub const RLIMIT_VMEM: c_int = 6; -pub const RLIMIT_AS: c_int = RLIMIT_VMEM; - -#[deprecated(since = "0.2.64", note = "Not stable across OS versions")] -pub const RLIM_NLIMITS: rlim_t = 7; -pub const RLIM_INFINITY: rlim_t = 0xfffffffffffffffd; - -pub const RUSAGE_SELF: c_int = 0; -pub const RUSAGE_CHILDREN: c_int = -1; - -pub const MADV_NORMAL: c_int = 0; -pub const MADV_RANDOM: c_int = 1; -pub const MADV_SEQUENTIAL: c_int = 2; -pub const MADV_WILLNEED: c_int = 3; -pub const MADV_DONTNEED: c_int = 4; -pub const MADV_FREE: c_int = 5; -pub const MADV_ACCESS_DEFAULT: c_int = 6; -pub const MADV_ACCESS_LWP: c_int = 7; -pub const MADV_ACCESS_MANY: c_int = 8; - -pub const AF_UNSPEC: c_int = 0; -pub const AF_UNIX: c_int = 1; -pub const AF_INET: c_int = 2; -pub const AF_IMPLINK: c_int = 3; -pub const AF_PUP: c_int = 4; -pub const AF_CHAOS: c_int = 5; -pub const AF_NS: c_int = 6; -pub const AF_NBS: c_int = 7; -pub const AF_ECMA: c_int = 8; -pub const AF_DATAKIT: c_int = 9; -pub const AF_CCITT: c_int = 10; -pub const AF_SNA: c_int = 11; -pub const AF_DECnet: c_int = 12; -pub const AF_DLI: c_int = 13; -pub const AF_LAT: c_int = 14; -pub const AF_HYLINK: c_int = 15; -pub const AF_APPLETALK: c_int = 16; -pub const AF_NIT: c_int = 17; -pub const AF_802: c_int = 18; -pub const AF_OSI: c_int = 19; -pub const AF_X25: c_int = 20; -pub const AF_OSINET: c_int = 21; -pub const AF_GOSIP: c_int = 22; -pub const AF_IPX: c_int = 23; -pub const AF_ROUTE: c_int = 24; -pub const AF_LINK: c_int = 25; -pub const AF_INET6: c_int = 26; -pub const AF_KEY: c_int = 27; -pub const AF_POLICY: c_int = 29; -pub const AF_INET_OFFLOAD: c_int = 30; -pub const AF_TRILL: c_int = 31; -pub const AF_PACKET: c_int = 32; - -pub const PF_UNSPEC: c_int = AF_UNSPEC; -pub const PF_UNIX: c_int = AF_UNIX; -pub const PF_LOCAL: c_int = PF_UNIX; -pub const PF_FILE: c_int = PF_UNIX; -pub const PF_INET: c_int = AF_INET; -pub const PF_IMPLINK: c_int = AF_IMPLINK; -pub const PF_PUP: c_int = AF_PUP; -pub const PF_CHAOS: c_int = AF_CHAOS; -pub const PF_NS: c_int = AF_NS; -pub const PF_NBS: c_int = AF_NBS; -pub const PF_ECMA: c_int = AF_ECMA; -pub const PF_DATAKIT: c_int = AF_DATAKIT; -pub const PF_CCITT: c_int = AF_CCITT; -pub const PF_SNA: c_int = AF_SNA; -pub const PF_DECnet: c_int = AF_DECnet; -pub const PF_DLI: c_int = AF_DLI; -pub const PF_LAT: c_int = AF_LAT; -pub const PF_HYLINK: c_int = AF_HYLINK; -pub const PF_APPLETALK: c_int = AF_APPLETALK; -pub const PF_NIT: c_int = AF_NIT; -pub const PF_802: c_int = AF_802; -pub const PF_OSI: c_int = AF_OSI; -pub const PF_X25: c_int = AF_X25; -pub const PF_OSINET: c_int = AF_OSINET; -pub const PF_GOSIP: c_int = AF_GOSIP; -pub const PF_IPX: c_int = AF_IPX; -pub const PF_ROUTE: c_int = AF_ROUTE; -pub const PF_LINK: c_int = AF_LINK; -pub const PF_INET6: c_int = AF_INET6; -pub const PF_KEY: c_int = AF_KEY; -pub const PF_POLICY: c_int = AF_POLICY; -pub const PF_INET_OFFLOAD: c_int = AF_INET_OFFLOAD; -pub const PF_TRILL: c_int = AF_TRILL; -pub const PF_PACKET: c_int = AF_PACKET; - -pub const SOCK_DGRAM: c_int = 1; -pub const SOCK_STREAM: c_int = 2; -pub const SOCK_RAW: c_int = 4; -pub const SOCK_RDM: c_int = 5; -pub const SOCK_SEQPACKET: c_int = 6; -pub const IP_MULTICAST_IF: c_int = 16; -pub const IP_MULTICAST_TTL: c_int = 17; -pub const IP_MULTICAST_LOOP: c_int = 18; -pub const IP_HDRINCL: c_int = 2; -pub const IP_TOS: c_int = 3; -pub const IP_TTL: c_int = 4; -pub const IP_ADD_MEMBERSHIP: c_int = 19; -pub const IP_DROP_MEMBERSHIP: c_int = 20; -pub const IPV6_JOIN_GROUP: c_int = 9; -pub const IPV6_LEAVE_GROUP: c_int = 10; -pub const IP_ADD_SOURCE_MEMBERSHIP: c_int = 23; -pub const IP_DROP_SOURCE_MEMBERSHIP: c_int = 24; -pub const IP_BLOCK_SOURCE: c_int = 21; -pub const IP_UNBLOCK_SOURCE: c_int = 22; -pub const IP_BOUND_IF: c_int = 0x41; - -// These TCP socket options are common between illumos and Solaris, while higher -// numbers have generally diverged: -pub const TCP_NODELAY: c_int = 0x1; -pub const TCP_MAXSEG: c_int = 0x2; -pub const TCP_KEEPALIVE: c_int = 0x8; -pub const TCP_NOTIFY_THRESHOLD: c_int = 0x10; -pub const TCP_ABORT_THRESHOLD: c_int = 0x11; -pub const TCP_CONN_NOTIFY_THRESHOLD: c_int = 0x12; -pub const TCP_CONN_ABORT_THRESHOLD: c_int = 0x13; -pub const TCP_RECVDSTADDR: c_int = 0x14; -pub const TCP_INIT_CWND: c_int = 0x15; -pub const TCP_KEEPALIVE_THRESHOLD: c_int = 0x16; -pub const TCP_KEEPALIVE_ABORT_THRESHOLD: c_int = 0x17; -pub const TCP_CORK: c_int = 0x18; -pub const TCP_RTO_INITIAL: c_int = 0x19; -pub const TCP_RTO_MIN: c_int = 0x1a; -pub const TCP_RTO_MAX: c_int = 0x1b; -pub const TCP_LINGER2: c_int = 0x1c; - -pub const UDP_NAT_T_ENDPOINT: c_int = 0x0103; - -pub const SOMAXCONN: c_int = 128; - -pub const SOL_SOCKET: c_int = 0xffff; -pub const SO_DEBUG: c_int = 0x01; -pub const SO_ACCEPTCONN: c_int = 0x0002; -pub const SO_REUSEADDR: c_int = 0x0004; -pub const SO_KEEPALIVE: c_int = 0x0008; -pub const SO_DONTROUTE: c_int = 0x0010; -pub const SO_BROADCAST: c_int = 0x0020; -pub const SO_USELOOPBACK: c_int = 0x0040; -pub const SO_LINGER: c_int = 0x0080; -pub const SO_OOBINLINE: c_int = 0x0100; -pub const SO_SNDBUF: c_int = 0x1001; -pub const SO_RCVBUF: c_int = 0x1002; -pub const SO_SNDLOWAT: c_int = 0x1003; -pub const SO_RCVLOWAT: c_int = 0x1004; -pub const SO_SNDTIMEO: c_int = 0x1005; -pub const SO_RCVTIMEO: c_int = 0x1006; -pub const SO_ERROR: c_int = 0x1007; -pub const SO_TYPE: c_int = 0x1008; -pub const SO_PROTOTYPE: c_int = 0x1009; -pub const SO_DOMAIN: c_int = 0x100c; -pub const SO_TIMESTAMP: c_int = 0x1013; -pub const SO_EXCLBIND: c_int = 0x1015; - -pub const SCM_RIGHTS: c_int = 0x1010; -pub const SCM_UCRED: c_int = 0x1012; -pub const SCM_TIMESTAMP: c_int = SO_TIMESTAMP; - -pub const MSG_OOB: c_int = 0x1; -pub const MSG_PEEK: c_int = 0x2; -pub const MSG_DONTROUTE: c_int = 0x4; -pub const MSG_EOR: c_int = 0x8; -pub const MSG_CTRUNC: c_int = 0x10; -pub const MSG_TRUNC: c_int = 0x20; -pub const MSG_WAITALL: c_int = 0x40; -pub const MSG_DONTWAIT: c_int = 0x80; -pub const MSG_NOTIFICATION: c_int = 0x100; -pub const MSG_NOSIGNAL: c_int = 0x200; -pub const MSG_DUPCTRL: c_int = 0x800; -pub const MSG_XPG4_2: c_int = 0x8000; -pub const MSG_MAXIOVLEN: c_int = 16; - -pub const IF_NAMESIZE: size_t = 32; -pub const IFNAMSIZ: size_t = 16; - -// https://docs.oracle.com/cd/E23824_01/html/821-1475/if-7p.html -pub const IFF_UP: c_int = 0x0000000001; // Address is up -pub const IFF_BROADCAST: c_int = 0x0000000002; // Broadcast address valid -pub const IFF_DEBUG: c_int = 0x0000000004; // Turn on debugging -pub const IFF_LOOPBACK: c_int = 0x0000000008; // Loopback net -pub const IFF_POINTOPOINT: c_int = 0x0000000010; // Interface is p-to-p -pub const IFF_NOTRAILERS: c_int = 0x0000000020; // Avoid use of trailers -pub const IFF_RUNNING: c_int = 0x0000000040; // Resources allocated -pub const IFF_NOARP: c_int = 0x0000000080; // No address res. protocol -pub const IFF_PROMISC: c_int = 0x0000000100; // Receive all packets -pub const IFF_ALLMULTI: c_int = 0x0000000200; // Receive all multicast pkts -pub const IFF_INTELLIGENT: c_int = 0x0000000400; // Protocol code on board -pub const IFF_MULTICAST: c_int = 0x0000000800; // Supports multicast - -// Multicast using broadcst. add. -pub const IFF_MULTI_BCAST: c_int = 0x0000001000; -pub const IFF_UNNUMBERED: c_int = 0x0000002000; // Non-unique address -pub const IFF_DHCPRUNNING: c_int = 0x0000004000; // DHCP controls interface -pub const IFF_PRIVATE: c_int = 0x0000008000; // Do not advertise -pub const IFF_NOXMIT: c_int = 0x0000010000; // Do not transmit pkts - -// No address - just on-link subnet -pub const IFF_NOLOCAL: c_int = 0x0000020000; -pub const IFF_DEPRECATED: c_int = 0x0000040000; // Address is deprecated -pub const IFF_ADDRCONF: c_int = 0x0000080000; // Addr. from stateless addrconf -pub const IFF_ROUTER: c_int = 0x0000100000; // Router on interface -pub const IFF_NONUD: c_int = 0x0000200000; // No NUD on interface -pub const IFF_ANYCAST: c_int = 0x0000400000; // Anycast address -pub const IFF_NORTEXCH: c_int = 0x0000800000; // Don't xchange rout. info -pub const IFF_IPV4: c_int = 0x0001000000; // IPv4 interface -pub const IFF_IPV6: c_int = 0x0002000000; // IPv6 interface -pub const IFF_NOFAILOVER: c_int = 0x0008000000; // in.mpathd test address -pub const IFF_FAILED: c_int = 0x0010000000; // Interface has failed -pub const IFF_STANDBY: c_int = 0x0020000000; // Interface is a hot-spare -pub const IFF_INACTIVE: c_int = 0x0040000000; // Functioning but not used -pub const IFF_OFFLINE: c_int = 0x0080000000; // Interface is offline - // If CoS marking is supported -pub const IFF_COS_ENABLED: c_longlong = 0x0200000000; -pub const IFF_PREFERRED: c_longlong = 0x0400000000; // Prefer as source addr. -pub const IFF_TEMPORARY: c_longlong = 0x0800000000; // RFC3041 -pub const IFF_FIXEDMTU: c_longlong = 0x1000000000; // MTU set with SIOCSLIFMTU -pub const IFF_VIRTUAL: c_longlong = 0x2000000000; // Cannot send/receive pkts -pub const IFF_DUPLICATE: c_longlong = 0x4000000000; // Local address in use -pub const IFF_IPMP: c_longlong = 0x8000000000; // IPMP IP interface - -// sys/ipc.h: -pub const IPC_ALLOC: c_int = 0x8000; -pub const IPC_CREAT: c_int = 0x200; -pub const IPC_EXCL: c_int = 0x400; -pub const IPC_NOWAIT: c_int = 0x800; -pub const IPC_PRIVATE: key_t = 0; -pub const IPC_RMID: c_int = 10; -pub const IPC_SET: c_int = 11; -pub const IPC_SEAT: c_int = 12; - -// sys/shm.h -pub const SHM_R: c_int = 0o400; -pub const SHM_W: c_int = 0o200; -pub const SHM_RDONLY: c_int = 0o10000; -pub const SHM_RND: c_int = 0o20000; -pub const SHM_SHARE_MMU: c_int = 0o40000; -pub const SHM_PAGEABLE: c_int = 0o100000; - -pub const SHUT_RD: c_int = 0; -pub const SHUT_WR: c_int = 1; -pub const SHUT_RDWR: c_int = 2; - -pub const F_RDLCK: c_short = 1; -pub const F_WRLCK: c_short = 2; -pub const F_UNLCK: c_short = 3; - -pub const O_SYNC: c_int = 16; -pub const O_NONBLOCK: c_int = 128; - -pub const IPPROTO_RAW: c_int = 255; - -pub const _PC_LINK_MAX: c_int = 1; -pub const _PC_MAX_CANON: c_int = 2; -pub const _PC_MAX_INPUT: c_int = 3; -pub const _PC_NAME_MAX: c_int = 4; -pub const _PC_PATH_MAX: c_int = 5; -pub const _PC_PIPE_BUF: c_int = 6; -pub const _PC_NO_TRUNC: c_int = 7; -pub const _PC_VDISABLE: c_int = 8; -pub const _PC_CHOWN_RESTRICTED: c_int = 9; -pub const _PC_ASYNC_IO: c_int = 10; -pub const _PC_PRIO_IO: c_int = 11; -pub const _PC_SYNC_IO: c_int = 12; -pub const _PC_ALLOC_SIZE_MIN: c_int = 13; -pub const _PC_REC_INCR_XFER_SIZE: c_int = 14; -pub const _PC_REC_MAX_XFER_SIZE: c_int = 15; -pub const _PC_REC_MIN_XFER_SIZE: c_int = 16; -pub const _PC_REC_XFER_ALIGN: c_int = 17; -pub const _PC_SYMLINK_MAX: c_int = 18; -pub const _PC_2_SYMLINKS: c_int = 19; -pub const _PC_ACL_ENABLED: c_int = 20; -pub const _PC_MIN_HOLE_SIZE: c_int = 21; -pub const _PC_CASE_BEHAVIOR: c_int = 22; -pub const _PC_SATTR_ENABLED: c_int = 23; -pub const _PC_SATTR_EXISTS: c_int = 24; -pub const _PC_ACCESS_FILTERING: c_int = 25; -pub const _PC_TIMESTAMP_RESOLUTION: c_int = 26; -pub const _PC_FILESIZEBITS: c_int = 67; -pub const _PC_XATTR_ENABLED: c_int = 100; -pub const _PC_XATTR_EXISTS: c_int = 101; - -pub const _POSIX_VDISABLE: crate::cc_t = 0; - -pub const _SC_ARG_MAX: c_int = 1; -pub const _SC_CHILD_MAX: c_int = 2; -pub const _SC_CLK_TCK: c_int = 3; -pub const _SC_NGROUPS_MAX: c_int = 4; -pub const _SC_OPEN_MAX: c_int = 5; -pub const _SC_JOB_CONTROL: c_int = 6; -pub const _SC_SAVED_IDS: c_int = 7; -pub const _SC_VERSION: c_int = 8; -pub const _SC_PASS_MAX: c_int = 9; -pub const _SC_LOGNAME_MAX: c_int = 10; -pub const _SC_PAGESIZE: c_int = 11; -pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; -pub const _SC_XOPEN_VERSION: c_int = 12; -pub const _SC_NPROCESSORS_CONF: c_int = 14; -pub const _SC_NPROCESSORS_ONLN: c_int = 15; -pub const _SC_STREAM_MAX: c_int = 16; -pub const _SC_TZNAME_MAX: c_int = 17; -pub const _SC_AIO_LISTIO_MAX: c_int = 18; -pub const _SC_AIO_MAX: c_int = 19; -pub const _SC_AIO_PRIO_DELTA_MAX: c_int = 20; -pub const _SC_ASYNCHRONOUS_IO: c_int = 21; -pub const _SC_DELAYTIMER_MAX: c_int = 22; -pub const _SC_FSYNC: c_int = 23; -pub const _SC_MAPPED_FILES: c_int = 24; -pub const _SC_MEMLOCK: c_int = 25; -pub const _SC_MEMLOCK_RANGE: c_int = 26; -pub const _SC_MEMORY_PROTECTION: c_int = 27; -pub const _SC_MESSAGE_PASSING: c_int = 28; -pub const _SC_MQ_OPEN_MAX: c_int = 29; -pub const _SC_MQ_PRIO_MAX: c_int = 30; -pub const _SC_PRIORITIZED_IO: c_int = 31; -pub const _SC_PRIORITY_SCHEDULING: c_int = 32; -pub const _SC_REALTIME_SIGNALS: c_int = 33; -pub const _SC_RTSIG_MAX: c_int = 34; -pub const _SC_SEMAPHORES: c_int = 35; -pub const _SC_SEM_NSEMS_MAX: c_int = 36; -pub const _SC_SEM_VALUE_MAX: c_int = 37; -pub const _SC_SHARED_MEMORY_OBJECTS: c_int = 38; -pub const _SC_SIGQUEUE_MAX: c_int = 39; -pub const _SC_SIGRT_MIN: c_int = 40; -pub const _SC_SIGRT_MAX: c_int = 41; -pub const _SC_SYNCHRONIZED_IO: c_int = 42; -pub const _SC_TIMERS: c_int = 43; -pub const _SC_TIMER_MAX: c_int = 44; -pub const _SC_2_C_BIND: c_int = 45; -pub const _SC_2_C_DEV: c_int = 46; -pub const _SC_2_C_VERSION: c_int = 47; -pub const _SC_2_FORT_DEV: c_int = 48; -pub const _SC_2_FORT_RUN: c_int = 49; -pub const _SC_2_LOCALEDEF: c_int = 50; -pub const _SC_2_SW_DEV: c_int = 51; -pub const _SC_2_UPE: c_int = 52; -pub const _SC_2_VERSION: c_int = 53; -pub const _SC_BC_BASE_MAX: c_int = 54; -pub const _SC_BC_DIM_MAX: c_int = 55; -pub const _SC_BC_SCALE_MAX: c_int = 56; -pub const _SC_BC_STRING_MAX: c_int = 57; -pub const _SC_COLL_WEIGHTS_MAX: c_int = 58; -pub const _SC_EXPR_NEST_MAX: c_int = 59; -pub const _SC_LINE_MAX: c_int = 60; -pub const _SC_RE_DUP_MAX: c_int = 61; -pub const _SC_XOPEN_CRYPT: c_int = 62; -pub const _SC_XOPEN_ENH_I18N: c_int = 63; -pub const _SC_XOPEN_SHM: c_int = 64; -pub const _SC_2_CHAR_TERM: c_int = 66; -pub const _SC_XOPEN_XCU_VERSION: c_int = 67; -pub const _SC_ATEXIT_MAX: c_int = 76; -pub const _SC_IOV_MAX: c_int = 77; -pub const _SC_XOPEN_UNIX: c_int = 78; -pub const _SC_T_IOV_MAX: c_int = 79; -pub const _SC_PHYS_PAGES: c_int = 500; -pub const _SC_AVPHYS_PAGES: c_int = 501; -pub const _SC_COHER_BLKSZ: c_int = 503; -pub const _SC_SPLIT_CACHE: c_int = 504; -pub const _SC_ICACHE_SZ: c_int = 505; -pub const _SC_DCACHE_SZ: c_int = 506; -pub const _SC_ICACHE_LINESZ: c_int = 507; -pub const _SC_DCACHE_LINESZ: c_int = 508; -pub const _SC_ICACHE_BLKSZ: c_int = 509; -pub const _SC_DCACHE_BLKSZ: c_int = 510; -pub const _SC_DCACHE_TBLKSZ: c_int = 511; -pub const _SC_ICACHE_ASSOC: c_int = 512; -pub const _SC_DCACHE_ASSOC: c_int = 513; -pub const _SC_MAXPID: c_int = 514; -pub const _SC_STACK_PROT: c_int = 515; -pub const _SC_NPROCESSORS_MAX: c_int = 516; -pub const _SC_CPUID_MAX: c_int = 517; -pub const _SC_EPHID_MAX: c_int = 518; -pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: c_int = 568; -pub const _SC_GETGR_R_SIZE_MAX: c_int = 569; -pub const _SC_GETPW_R_SIZE_MAX: c_int = 570; -pub const _SC_LOGIN_NAME_MAX: c_int = 571; -pub const _SC_THREAD_KEYS_MAX: c_int = 572; -pub const _SC_THREAD_STACK_MIN: c_int = 573; -pub const _SC_THREAD_THREADS_MAX: c_int = 574; -pub const _SC_TTY_NAME_MAX: c_int = 575; -pub const _SC_THREADS: c_int = 576; -pub const _SC_THREAD_ATTR_STACKADDR: c_int = 577; -pub const _SC_THREAD_ATTR_STACKSIZE: c_int = 578; -pub const _SC_THREAD_PRIORITY_SCHEDULING: c_int = 579; -pub const _SC_THREAD_PRIO_INHERIT: c_int = 580; -pub const _SC_THREAD_PRIO_PROTECT: c_int = 581; -pub const _SC_THREAD_PROCESS_SHARED: c_int = 582; -pub const _SC_THREAD_SAFE_FUNCTIONS: c_int = 583; -pub const _SC_XOPEN_LEGACY: c_int = 717; -pub const _SC_XOPEN_REALTIME: c_int = 718; -pub const _SC_XOPEN_REALTIME_THREADS: c_int = 719; -pub const _SC_XBS5_ILP32_OFF32: c_int = 720; -pub const _SC_XBS5_ILP32_OFFBIG: c_int = 721; -pub const _SC_XBS5_LP64_OFF64: c_int = 722; -pub const _SC_XBS5_LPBIG_OFFBIG: c_int = 723; -pub const _SC_2_PBS: c_int = 724; -pub const _SC_2_PBS_ACCOUNTING: c_int = 725; -pub const _SC_2_PBS_CHECKPOINT: c_int = 726; -pub const _SC_2_PBS_LOCATE: c_int = 728; -pub const _SC_2_PBS_MESSAGE: c_int = 729; -pub const _SC_2_PBS_TRACK: c_int = 730; -pub const _SC_ADVISORY_INFO: c_int = 731; -pub const _SC_BARRIERS: c_int = 732; -pub const _SC_CLOCK_SELECTION: c_int = 733; -pub const _SC_CPUTIME: c_int = 734; -pub const _SC_HOST_NAME_MAX: c_int = 735; -pub const _SC_MONOTONIC_CLOCK: c_int = 736; -pub const _SC_READER_WRITER_LOCKS: c_int = 737; -pub const _SC_REGEXP: c_int = 738; -pub const _SC_SHELL: c_int = 739; -pub const _SC_SPAWN: c_int = 740; -pub const _SC_SPIN_LOCKS: c_int = 741; -pub const _SC_SPORADIC_SERVER: c_int = 742; -pub const _SC_SS_REPL_MAX: c_int = 743; -pub const _SC_SYMLOOP_MAX: c_int = 744; -pub const _SC_THREAD_CPUTIME: c_int = 745; -pub const _SC_THREAD_SPORADIC_SERVER: c_int = 746; -pub const _SC_TIMEOUTS: c_int = 747; -pub const _SC_TRACE: c_int = 748; -pub const _SC_TRACE_EVENT_FILTER: c_int = 749; -pub const _SC_TRACE_EVENT_NAME_MAX: c_int = 750; -pub const _SC_TRACE_INHERIT: c_int = 751; -pub const _SC_TRACE_LOG: c_int = 752; -pub const _SC_TRACE_NAME_MAX: c_int = 753; -pub const _SC_TRACE_SYS_MAX: c_int = 754; -pub const _SC_TRACE_USER_EVENT_MAX: c_int = 755; -pub const _SC_TYPED_MEMORY_OBJECTS: c_int = 756; -pub const _SC_V6_ILP32_OFF32: c_int = 757; -pub const _SC_V6_ILP32_OFFBIG: c_int = 758; -pub const _SC_V6_LP64_OFF64: c_int = 759; -pub const _SC_V6_LPBIG_OFFBIG: c_int = 760; -pub const _SC_XOPEN_STREAMS: c_int = 761; -pub const _SC_IPV6: c_int = 762; -pub const _SC_RAW_SOCKETS: c_int = 763; - -pub const _ST_FSTYPSZ: c_int = 16; - -pub const _MUTEX_MAGIC: u16 = 0x4d58; // MX -pub const _COND_MAGIC: u16 = 0x4356; // CV -pub const _RWL_MAGIC: u16 = 0x5257; // RW - -pub const NCCS: usize = 19; - -pub const LOG_CRON: c_int = 15 << 3; - -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - __pthread_mutex_flag1: 0, - __pthread_mutex_flag2: 0, - __pthread_mutex_ceiling: 0, - __pthread_mutex_type: PTHREAD_PROCESS_PRIVATE, - __pthread_mutex_magic: _MUTEX_MAGIC, - __pthread_mutex_lock: 0, - __pthread_mutex_data: 0, -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - __pthread_cond_flag: [0; 4], - __pthread_cond_type: PTHREAD_PROCESS_PRIVATE, - __pthread_cond_magic: _COND_MAGIC, - __pthread_cond_data: 0, -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - __pthread_rwlock_readers: 0, - __pthread_rwlock_type: PTHREAD_PROCESS_PRIVATE, - __pthread_rwlock_magic: _RWL_MAGIC, - __pthread_rwlock_mutex: PTHREAD_MUTEX_INITIALIZER, - __pthread_rwlock_readercv: PTHREAD_COND_INITIALIZER, - __pthread_rwlock_writercv: PTHREAD_COND_INITIALIZER, -}; -pub const PTHREAD_MUTEX_NORMAL: c_int = 0; -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 4; -pub const PTHREAD_MUTEX_DEFAULT: c_int = crate::PTHREAD_MUTEX_NORMAL; - -pub const RTLD_NEXT: *mut c_void = -1isize as *mut c_void; -pub const RTLD_DEFAULT: *mut c_void = -2isize as *mut c_void; -pub const RTLD_SELF: *mut c_void = -3isize as *mut c_void; -pub const RTLD_PROBE: *mut c_void = -4isize as *mut c_void; - -pub const RTLD_LAZY: c_int = 0x1; -pub const RTLD_NOW: c_int = 0x2; -pub const RTLD_NOLOAD: c_int = 0x4; -pub const RTLD_GLOBAL: c_int = 0x100; -pub const RTLD_LOCAL: c_int = 0x0; -pub const RTLD_PARENT: c_int = 0x200; -pub const RTLD_GROUP: c_int = 0x400; -pub const RTLD_WORLD: c_int = 0x800; -pub const RTLD_NODELETE: c_int = 0x1000; -pub const RTLD_FIRST: c_int = 0x2000; -pub const RTLD_CONFGEN: c_int = 0x10000; - -pub const PORT_SOURCE_AIO: c_int = 1; -pub const PORT_SOURCE_TIMER: c_int = 2; -pub const PORT_SOURCE_USER: c_int = 3; -pub const PORT_SOURCE_FD: c_int = 4; -pub const PORT_SOURCE_ALERT: c_int = 5; -pub const PORT_SOURCE_MQ: c_int = 6; -pub const PORT_SOURCE_FILE: c_int = 7; - -pub const NONROOT_USR: c_short = 2; - -pub const EMPTY: c_short = 0; -pub const RUN_LVL: c_short = 1; -pub const BOOT_TIME: c_short = 2; -pub const OLD_TIME: c_short = 3; -pub const NEW_TIME: c_short = 4; -pub const INIT_PROCESS: c_short = 5; -pub const LOGIN_PROCESS: c_short = 6; -pub const USER_PROCESS: c_short = 7; -pub const DEAD_PROCESS: c_short = 8; -pub const ACCOUNTING: c_short = 9; -pub const DOWN_TIME: c_short = 10; - -const _TIOC: c_int = ('T' as i32) << 8; -const tIOC: c_int = ('t' as i32) << 8; -pub const TCGETA: c_int = _TIOC | 1; -pub const TCSETA: c_int = _TIOC | 2; -pub const TCSETAW: c_int = _TIOC | 3; -pub const TCSETAF: c_int = _TIOC | 4; -pub const TCSBRK: c_int = _TIOC | 5; -pub const TCXONC: c_int = _TIOC | 6; -pub const TCFLSH: c_int = _TIOC | 7; -pub const TCDSET: c_int = _TIOC | 32; -pub const TCGETS: c_int = _TIOC | 13; -pub const TCSETS: c_int = _TIOC | 14; -pub const TCSANOW: c_int = _TIOC | 14; -pub const TCSETSW: c_int = _TIOC | 15; -pub const TCSADRAIN: c_int = _TIOC | 15; -pub const TCSETSF: c_int = _TIOC | 16; -pub const TCSAFLUSH: c_int = _TIOC | 16; -pub const TCIFLUSH: c_int = 0; -pub const TCOFLUSH: c_int = 1; -pub const TCIOFLUSH: c_int = 2; -pub const TCOOFF: c_int = 0; -pub const TCOON: c_int = 1; -pub const TCIOFF: c_int = 2; -pub const TCION: c_int = 3; -pub const TIOC: c_int = _TIOC; -pub const TIOCKBON: c_int = _TIOC | 8; -pub const TIOCKBOF: c_int = _TIOC | 9; -pub const TIOCGWINSZ: c_int = _TIOC | 104; -pub const TIOCSWINSZ: c_int = _TIOC | 103; -pub const TIOCGSOFTCAR: c_int = _TIOC | 105; -pub const TIOCSSOFTCAR: c_int = _TIOC | 106; -pub const TIOCGPPS: c_int = _TIOC | 125; -pub const TIOCSPPS: c_int = _TIOC | 126; -pub const TIOCGPPSEV: c_int = _TIOC | 127; -pub const TIOCGETD: c_int = tIOC | 0; -pub const TIOCSETD: c_int = tIOC | 1; -pub const TIOCHPCL: c_int = tIOC | 2; -pub const TIOCGETP: c_int = tIOC | 8; -pub const TIOCSETP: c_int = tIOC | 9; -pub const TIOCSETN: c_int = tIOC | 10; -pub const TIOCEXCL: c_int = tIOC | 13; -pub const TIOCNXCL: c_int = tIOC | 14; -pub const TIOCFLUSH: c_int = tIOC | 16; -pub const TIOCSETC: c_int = tIOC | 17; -pub const TIOCGETC: c_int = tIOC | 18; -pub const TIOCLBIS: c_int = tIOC | 127; -pub const TIOCLBIC: c_int = tIOC | 126; -pub const TIOCLSET: c_int = tIOC | 125; -pub const TIOCLGET: c_int = tIOC | 124; -pub const TIOCSBRK: c_int = tIOC | 123; -pub const TIOCCBRK: c_int = tIOC | 122; -pub const TIOCSDTR: c_int = tIOC | 121; -pub const TIOCCDTR: c_int = tIOC | 120; -pub const TIOCSLTC: c_int = tIOC | 117; -pub const TIOCGLTC: c_int = tIOC | 116; -pub const TIOCOUTQ: c_int = tIOC | 115; -pub const TIOCNOTTY: c_int = tIOC | 113; -pub const TIOCSCTTY: c_int = tIOC | 132; -pub const TIOCSTOP: c_int = tIOC | 111; -pub const TIOCSTART: c_int = tIOC | 110; -pub const TIOCSILOOP: c_int = tIOC | 109; -pub const TIOCCILOOP: c_int = tIOC | 108; -pub const TIOCGPGRP: c_int = tIOC | 20; -pub const TIOCSPGRP: c_int = tIOC | 21; -pub const TIOCGSID: c_int = tIOC | 22; -pub const TIOCSTI: c_int = tIOC | 23; -pub const TIOCMSET: c_int = tIOC | 26; -pub const TIOCMBIS: c_int = tIOC | 27; -pub const TIOCMBIC: c_int = tIOC | 28; -pub const TIOCMGET: c_int = tIOC | 29; -pub const TIOCREMOTE: c_int = tIOC | 30; -pub const TIOCSIGNAL: c_int = tIOC | 31; - -pub const TIOCM_LE: c_int = 0o0001; -pub const TIOCM_DTR: c_int = 0o0002; -pub const TIOCM_RTS: c_int = 0o0004; -pub const TIOCM_ST: c_int = 0o0010; -pub const TIOCM_SR: c_int = 0o0020; -pub const TIOCM_CTS: c_int = 0o0040; -pub const TIOCM_CAR: c_int = 0o0100; -pub const TIOCM_CD: c_int = TIOCM_CAR; -pub const TIOCM_RNG: c_int = 0o0200; -pub const TIOCM_RI: c_int = TIOCM_RNG; -pub const TIOCM_DSR: c_int = 0o0400; - -/* termios */ -pub const B0: speed_t = 0; -pub const B50: speed_t = 1; -pub const B75: speed_t = 2; -pub const B110: speed_t = 3; -pub const B134: speed_t = 4; -pub const B150: speed_t = 5; -pub const B200: speed_t = 6; -pub const B300: speed_t = 7; -pub const B600: speed_t = 8; -pub const B1200: speed_t = 9; -pub const B1800: speed_t = 10; -pub const B2400: speed_t = 11; -pub const B4800: speed_t = 12; -pub const B9600: speed_t = 13; -pub const B19200: speed_t = 14; -pub const B38400: speed_t = 15; -pub const B57600: speed_t = 16; -pub const B76800: speed_t = 17; -pub const B115200: speed_t = 18; -pub const B153600: speed_t = 19; -pub const B230400: speed_t = 20; -pub const B307200: speed_t = 21; -pub const B460800: speed_t = 22; -pub const B921600: speed_t = 23; -pub const CSTART: crate::tcflag_t = 0o21; -pub const CSTOP: crate::tcflag_t = 0o23; -pub const CSWTCH: crate::tcflag_t = 0o32; -pub const CBAUD: crate::tcflag_t = 0o17; -pub const CIBAUD: crate::tcflag_t = 0o3600000; -pub const CBAUDEXT: crate::tcflag_t = 0o10000000; -pub const CIBAUDEXT: crate::tcflag_t = 0o20000000; -pub const CSIZE: crate::tcflag_t = 0o000060; -pub const CS5: crate::tcflag_t = 0; -pub const CS6: crate::tcflag_t = 0o000020; -pub const CS7: crate::tcflag_t = 0o000040; -pub const CS8: crate::tcflag_t = 0o000060; -pub const CSTOPB: crate::tcflag_t = 0o000100; -pub const ECHO: crate::tcflag_t = 0o000010; -pub const ECHOE: crate::tcflag_t = 0o000020; -pub const ECHOK: crate::tcflag_t = 0o000040; -pub const ECHONL: crate::tcflag_t = 0o000100; -pub const ECHOCTL: crate::tcflag_t = 0o001000; -pub const ECHOPRT: crate::tcflag_t = 0o002000; -pub const ECHOKE: crate::tcflag_t = 0o004000; -pub const EXTPROC: crate::tcflag_t = 0o200000; -pub const IGNBRK: crate::tcflag_t = 0o000001; -pub const BRKINT: crate::tcflag_t = 0o000002; -pub const IGNPAR: crate::tcflag_t = 0o000004; -pub const PARMRK: crate::tcflag_t = 0o000010; -pub const INPCK: crate::tcflag_t = 0o000020; -pub const ISTRIP: crate::tcflag_t = 0o000040; -pub const INLCR: crate::tcflag_t = 0o000100; -pub const IGNCR: crate::tcflag_t = 0o000200; -pub const ICRNL: crate::tcflag_t = 0o000400; -pub const IUCLC: crate::tcflag_t = 0o001000; -pub const IXON: crate::tcflag_t = 0o002000; -pub const IXOFF: crate::tcflag_t = 0o010000; -pub const IXANY: crate::tcflag_t = 0o004000; -pub const IMAXBEL: crate::tcflag_t = 0o020000; -pub const DOSMODE: crate::tcflag_t = 0o100000; -pub const OPOST: crate::tcflag_t = 0o000001; -pub const OLCUC: crate::tcflag_t = 0o000002; -pub const ONLCR: crate::tcflag_t = 0o000004; -pub const OCRNL: crate::tcflag_t = 0o000010; -pub const ONOCR: crate::tcflag_t = 0o000020; -pub const ONLRET: crate::tcflag_t = 0o000040; -pub const OFILL: crate::tcflag_t = 0o0000100; -pub const OFDEL: crate::tcflag_t = 0o0000200; -pub const CREAD: crate::tcflag_t = 0o000200; -pub const PARENB: crate::tcflag_t = 0o000400; -pub const PARODD: crate::tcflag_t = 0o001000; -pub const HUPCL: crate::tcflag_t = 0o002000; -pub const CLOCAL: crate::tcflag_t = 0o004000; -pub const CRTSXOFF: crate::tcflag_t = 0o10000000000; -pub const CRTSCTS: crate::tcflag_t = 0o20000000000; -pub const ISIG: crate::tcflag_t = 0o000001; -pub const ICANON: crate::tcflag_t = 0o000002; -pub const IEXTEN: crate::tcflag_t = 0o100000; -pub const TOSTOP: crate::tcflag_t = 0o000400; -pub const FLUSHO: crate::tcflag_t = 0o020000; -pub const PENDIN: crate::tcflag_t = 0o040000; -pub const NOFLSH: crate::tcflag_t = 0o000200; -pub const VINTR: usize = 0; -pub const VQUIT: usize = 1; -pub const VERASE: usize = 2; -pub const VKILL: usize = 3; -pub const VEOF: usize = 4; -pub const VEOL: usize = 5; -pub const VEOL2: usize = 6; -pub const VMIN: usize = 4; -pub const VTIME: usize = 5; -pub const VSWTCH: usize = 7; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VSUSP: usize = 10; -pub const VDSUSP: usize = 11; -pub const VREPRINT: usize = 12; -pub const VDISCARD: usize = 13; -pub const VWERASE: usize = 14; -pub const VLNEXT: usize = 15; - -// -const STR: c_int = (b'S' as c_int) << 8; -pub const I_NREAD: c_int = STR | 0o1; -pub const I_PUSH: c_int = STR | 0o2; -pub const I_POP: c_int = STR | 0o3; -pub const I_LOOK: c_int = STR | 0o4; -pub const I_FLUSH: c_int = STR | 0o5; -pub const I_SRDOPT: c_int = STR | 0o6; -pub const I_GRDOPT: c_int = STR | 0o7; -pub const I_STR: c_int = STR | 0o10; -pub const I_SETSIG: c_int = STR | 0o11; -pub const I_GETSIG: c_int = STR | 0o12; -pub const I_FIND: c_int = STR | 0o13; -pub const I_LINK: c_int = STR | 0o14; -pub const I_UNLINK: c_int = STR | 0o15; -pub const I_PEEK: c_int = STR | 0o17; -pub const I_FDINSERT: c_int = STR | 0o20; -pub const I_SENDFD: c_int = STR | 0o21; -pub const I_RECVFD: c_int = STR | 0o16; -pub const I_SWROPT: c_int = STR | 0o23; -pub const I_GWROPT: c_int = STR | 0o24; -pub const I_LIST: c_int = STR | 0o25; -pub const I_PLINK: c_int = STR | 0o26; -pub const I_PUNLINK: c_int = STR | 0o27; -pub const I_ANCHOR: c_int = STR | 0o30; -pub const I_FLUSHBAND: c_int = STR | 0o34; -pub const I_CKBAND: c_int = STR | 0o35; -pub const I_GETBAND: c_int = STR | 0o36; -pub const I_ATMARK: c_int = STR | 0o37; -pub const I_SETCLTIME: c_int = STR | 0o40; -pub const I_GETCLTIME: c_int = STR | 0o41; -pub const I_CANPUT: c_int = STR | 0o42; -pub const I_SERROPT: c_int = STR | 0o43; -pub const I_GERROPT: c_int = STR | 0o44; -pub const I_ESETSIG: c_int = STR | 0o45; -pub const I_EGETSIG: c_int = STR | 0o46; -pub const __I_PUSH_NOCTTY: c_int = STR | 0o47; - -// 3SOCKET flags -pub const SOCK_CLOEXEC: c_int = 0x080000; -pub const SOCK_NONBLOCK: c_int = 0x100000; -pub const SOCK_NDELAY: c_int = 0x200000; - -// -pub const SCALE_KG: c_int = 1 << 6; -pub const SCALE_KF: c_int = 1 << 16; -pub const SCALE_KH: c_int = 1 << 2; -pub const MAXTC: c_int = 1 << 6; -pub const SCALE_PHASE: c_int = 1 << 22; -pub const SCALE_USEC: c_int = 1 << 16; -pub const SCALE_UPDATE: c_int = SCALE_KG * MAXTC; -pub const FINEUSEC: c_int = 1 << 22; -pub const MAXPHASE: c_int = 512000; -pub const MAXFREQ: c_int = 512 * SCALE_USEC; -pub const MAXTIME: c_int = 200 << PPS_AVG; -pub const MINSEC: c_int = 16; -pub const MAXSEC: c_int = 1200; -pub const PPS_AVG: c_int = 2; -pub const PPS_SHIFT: c_int = 2; -pub const PPS_SHIFTMAX: c_int = 8; -pub const PPS_VALID: c_int = 120; -pub const MAXGLITCH: c_int = 30; -pub const MOD_OFFSET: u32 = 0x0001; -pub const MOD_FREQUENCY: u32 = 0x0002; -pub const MOD_MAXERROR: u32 = 0x0004; -pub const MOD_ESTERROR: u32 = 0x0008; -pub const MOD_STATUS: u32 = 0x0010; -pub const MOD_TIMECONST: u32 = 0x0020; -pub const MOD_CLKB: u32 = 0x4000; -pub const MOD_CLKA: u32 = 0x8000; -pub const STA_PLL: u32 = 0x0001; -pub const STA_PPSFREQ: i32 = 0x0002; -pub const STA_PPSTIME: i32 = 0x0004; -pub const STA_FLL: i32 = 0x0008; -pub const STA_INS: i32 = 0x0010; -pub const STA_DEL: i32 = 0x0020; -pub const STA_UNSYNC: i32 = 0x0040; -pub const STA_FREQHOLD: i32 = 0x0080; -pub const STA_PPSSIGNAL: i32 = 0x0100; -pub const STA_PPSJITTER: i32 = 0x0200; -pub const STA_PPSWANDER: i32 = 0x0400; -pub const STA_PPSERROR: i32 = 0x0800; -pub const STA_CLOCKERR: i32 = 0x1000; -pub const STA_RONLY: i32 = - STA_PPSSIGNAL | STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR | STA_CLOCKERR; -pub const TIME_OK: i32 = 0; -pub const TIME_INS: i32 = 1; -pub const TIME_DEL: i32 = 2; -pub const TIME_OOP: i32 = 3; -pub const TIME_WAIT: i32 = 4; -pub const TIME_ERROR: i32 = 5; - -pub const PRIO_PROCESS: c_int = 0; -pub const PRIO_PGRP: c_int = 1; -pub const PRIO_USER: c_int = 2; - -pub const SCHED_OTHER: c_int = 0; -pub const SCHED_FIFO: c_int = 1; -pub const SCHED_RR: c_int = 2; -pub const SCHED_SYS: c_int = 3; -pub const SCHED_IA: c_int = 4; -pub const SCHED_FSS: c_int = 5; -pub const SCHED_FX: c_int = 6; - -// sys/priv.h -pub const PRIV_DEBUG: c_uint = 0x0001; -pub const PRIV_AWARE: c_uint = 0x0002; -pub const PRIV_AWARE_INHERIT: c_uint = 0x0004; -pub const __PROC_PROTECT: c_uint = 0x0008; -pub const NET_MAC_AWARE: c_uint = 0x0010; -pub const NET_MAC_AWARE_INHERIT: c_uint = 0x0020; -pub const PRIV_AWARE_RESET: c_uint = 0x0040; -pub const PRIV_XPOLICY: c_uint = 0x0080; -pub const PRIV_PFEXEC: c_uint = 0x0100; - -// sys/systeminfo.h -pub const SI_SYSNAME: c_int = 1; -pub const SI_HOSTNAME: c_int = 2; -pub const SI_RELEASE: c_int = 3; -pub const SI_VERSION: c_int = 4; -pub const SI_MACHINE: c_int = 5; -pub const SI_ARCHITECTURE: c_int = 6; -pub const SI_HW_SERIAL: c_int = 7; -pub const SI_HW_PROVIDER: c_int = 8; -pub const SI_SET_HOSTNAME: c_int = 258; -pub const SI_SET_SRPC_DOMAIN: c_int = 265; -pub const SI_PLATFORM: c_int = 513; -pub const SI_ISALIST: c_int = 514; -pub const SI_DHCP_CACHE: c_int = 515; -pub const SI_ARCHITECTURE_32: c_int = 516; -pub const SI_ARCHITECTURE_64: c_int = 517; -pub const SI_ARCHITECTURE_K: c_int = 518; -pub const SI_ARCHITECTURE_NATIVE: c_int = 519; - -// sys/lgrp_user.h -pub const LGRP_COOKIE_NONE: crate::lgrp_cookie_t = 0; -pub const LGRP_AFF_NONE: crate::lgrp_affinity_t = 0x0; -pub const LGRP_AFF_WEAK: crate::lgrp_affinity_t = 0x10; -pub const LGRP_AFF_STRONG: crate::lgrp_affinity_t = 0x100; -pub const LGRP_CONTENT_ALL: crate::lgrp_content_t = 0; -pub const LGRP_CONTENT_HIERARCHY: crate::lgrp_content_t = LGRP_CONTENT_ALL; -pub const LGRP_CONTENT_DIRECT: crate::lgrp_content_t = 1; -pub const LGRP_LAT_CPU_TO_MEM: crate::lgrp_lat_between_t = 0; -pub const LGRP_MEM_SZ_FREE: crate::lgrp_mem_size_flag_t = 0; -pub const LGRP_MEM_SZ_INSTALLED: crate::lgrp_mem_size_flag_t = 1; -pub const LGRP_VIEW_CALLER: crate::lgrp_view_t = 0; -pub const LGRP_VIEW_OS: crate::lgrp_view_t = 1; - -// sys/processor.h - -pub const P_OFFLINE: c_int = 0x001; -pub const P_ONLINE: c_int = 0x002; -pub const P_STATUS: c_int = 0x003; -pub const P_FAULTED: c_int = 0x004; -pub const P_POWEROFF: c_int = 0x005; -pub const P_NOINTR: c_int = 0x006; -pub const P_SPARE: c_int = 0x007; -pub const P_FORCED: c_int = 0x10000000; -pub const PI_TYPELEN: c_int = 16; -pub const PI_FPUTYPE: c_int = 32; - -// sys/auxv.h -pub const AT_SUN_HWCAP: c_uint = 2009; - -// As per sys/socket.h, header alignment must be 8 bytes on SPARC -// and 4 bytes everywhere else: -#[cfg(target_arch = "sparc64")] -const _CMSG_HDR_ALIGNMENT: usize = 8; -#[cfg(not(target_arch = "sparc64"))] -const _CMSG_HDR_ALIGNMENT: usize = 4; - -const _CMSG_DATA_ALIGNMENT: usize = size_of::(); - -const NEWDEV: c_int = 1; - -// sys/sendfile.h -pub const SFV_FD_SELF: c_int = -2; - -const fn _CMSG_HDR_ALIGN(p: usize) -> usize { - (p + _CMSG_HDR_ALIGNMENT - 1) & !(_CMSG_HDR_ALIGNMENT - 1) -} - -const fn _CMSG_DATA_ALIGN(p: usize) -> usize { - (p + _CMSG_DATA_ALIGNMENT - 1) & !(_CMSG_DATA_ALIGNMENT - 1) -} - -f! { - pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { - _CMSG_DATA_ALIGN(cmsg.offset(1) as usize) as *mut c_uchar - } - - pub const fn CMSG_LEN(length: c_uint) -> c_uint { - _CMSG_DATA_ALIGN(size_of::()) as c_uint + length - } - - pub fn CMSG_FIRSTHDR(mhdr: *const crate::msghdr) -> *mut cmsghdr { - if ((*mhdr).msg_controllen as usize) < size_of::() { - core::ptr::null_mut::() - } else { - (*mhdr).msg_control as *mut cmsghdr - } - } - - pub fn CMSG_NXTHDR(mhdr: *const crate::msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - if cmsg.is_null() { - return crate::CMSG_FIRSTHDR(mhdr); - } - let next = - _CMSG_HDR_ALIGN(cmsg as usize + (*cmsg).cmsg_len as usize + size_of::()); - let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; - if next > max { - core::ptr::null_mut::() - } else { - _CMSG_HDR_ALIGN(cmsg as usize + (*cmsg).cmsg_len as usize) as *mut cmsghdr - } - } - - pub const fn CMSG_SPACE(length: c_uint) -> c_uint { - _CMSG_HDR_ALIGN(size_of::() as usize + length as usize) as c_uint - } - - pub fn FD_CLR(fd: c_int, set: *mut fd_set) -> () { - let bits = size_of_val(&(*set).fds_bits[0]) * 8; - let fd = fd as usize; - (*set).fds_bits[fd / bits] &= !(1 << (fd % bits)); - return; - } - - pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { - let bits = size_of_val(&(*set).fds_bits[0]) * 8; - let fd = fd as usize; - return ((*set).fds_bits[fd / bits] & (1 << (fd % bits))) != 0; - } - - pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { - let bits = size_of_val(&(*set).fds_bits[0]) * 8; - let fd = fd as usize; - (*set).fds_bits[fd / bits] |= 1 << (fd % bits); - return; - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - for slot in (*set).fds_bits.iter_mut() { - *slot = 0; - } - } -} - -safe_f! { - pub fn SIGRTMAX() -> c_int { - unsafe { crate::sysconf(_SC_SIGRT_MAX) as c_int } - } - - pub fn SIGRTMIN() -> c_int { - unsafe { crate::sysconf(_SC_SIGRT_MIN) as c_int } - } - - pub const fn WIFEXITED(status: c_int) -> bool { - (status & 0xFF) == 0 - } - - pub const fn WEXITSTATUS(status: c_int) -> c_int { - (status >> 8) & 0xFF - } - - pub const fn WTERMSIG(status: c_int) -> c_int { - status & 0x7F - } - - pub const fn WIFCONTINUED(status: c_int) -> bool { - (status & 0xffff) == 0xffff - } - - pub const fn WSTOPSIG(status: c_int) -> c_int { - (status & 0xff00) >> 8 - } - - pub const fn WIFSIGNALED(status: c_int) -> bool { - ((status & 0xff) > 0) && (status & 0xff00 == 0) - } - - pub const fn WIFSTOPPED(status: c_int) -> bool { - ((status & 0xff) == 0x7f) && ((status & 0xff00) != 0) - } - - pub const fn WCOREDUMP(status: c_int) -> bool { - (status & 0x80) != 0 - } - - pub const fn MR_GET_TYPE(flags: c_uint) -> c_uint { - flags & 0x0000ffff - } -} - -extern "C" { - pub fn getrlimit(resource: c_int, rlim: *mut crate::rlimit) -> c_int; - pub fn setrlimit(resource: c_int, rlim: *const crate::rlimit) -> c_int; - - pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - - pub fn sem_destroy(sem: *mut sem_t) -> c_int; - pub fn sem_init(sem: *mut sem_t, pshared: c_int, value: c_uint) -> c_int; - - pub fn abs(i: c_int) -> c_int; - pub fn acct(filename: *const c_char) -> c_int; - pub fn dirfd(dirp: *mut crate::DIR) -> c_int; - pub fn labs(i: c_long) -> c_long; - pub fn rand() -> c_int; - pub fn srand(seed: c_uint); - pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; - pub fn getrandom(bbuf: *mut c_void, buflen: size_t, flags: c_uint) -> ssize_t; - - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; - pub fn settimeofday(tp: *const crate::timeval, tz: *const c_void) -> c_int; - pub fn getifaddrs(ifap: *mut *mut crate::ifaddrs) -> c_int; - pub fn freeifaddrs(ifa: *mut crate::ifaddrs); - - pub fn stack_getbounds(sp: *mut crate::stack_t) -> c_int; - pub fn getgrouplist( - name: *const c_char, - basegid: crate::gid_t, - groups: *mut crate::gid_t, - ngroups: *mut c_int, - ) -> c_int; - pub fn initgroups(name: *const c_char, basegid: crate::gid_t) -> c_int; - pub fn setgroups(ngroups: c_int, ptr: *const crate::gid_t) -> c_int; - pub fn ioctl(fildes: c_int, request: c_int, ...) -> c_int; - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn ___errno() -> *mut c_int; - pub fn clock_getres(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn clock_gettime(clk_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - pub fn clock_nanosleep( - clk_id: crate::clockid_t, - flags: c_int, - rqtp: *const crate::timespec, - rmtp: *mut crate::timespec, - ) -> c_int; - pub fn clock_settime(clk_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; - pub fn getnameinfo( - sa: *const crate::sockaddr, - salen: crate::socklen_t, - host: *mut c_char, - hostlen: crate::socklen_t, - serv: *mut c_char, - servlen: crate::socklen_t, - flags: c_int, - ) -> c_int; - pub fn setpwent(); - pub fn endpwent(); - pub fn getpwent() -> *mut passwd; - pub fn fdatasync(fd: c_int) -> c_int; - pub fn nl_langinfo_l(item: crate::nl_item, locale: crate::locale_t) -> *mut c_char; - pub fn duplocale(base: crate::locale_t) -> crate::locale_t; - pub fn freelocale(loc: crate::locale_t); - pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; - pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; - pub fn getprogname() -> *const c_char; - pub fn setprogname(name: *const c_char); - pub fn getloadavg(loadavg: *mut c_double, nelem: c_int) -> c_int; - pub fn getpriority(which: c_int, who: c_int) -> c_int; - pub fn setpriority(which: c_int, who: c_int, prio: c_int) -> c_int; - - pub fn mknodat(dirfd: c_int, pathname: *const c_char, mode: mode_t, dev: dev_t) -> c_int; - pub fn mkfifoat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; - pub fn sethostname(name: *const c_char, len: c_int) -> c_int; - pub fn if_nameindex() -> *mut if_nameindex; - pub fn if_freenameindex(ptr: *mut if_nameindex); - pub fn pthread_create( - native: *mut crate::pthread_t, - attr: *const crate::pthread_attr_t, - f: extern "C" fn(*mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - pub fn pthread_attr_getstack( - attr: *const crate::pthread_attr_t, - stackaddr: *mut *mut c_void, - stacksize: *mut size_t, - ) -> c_int; - pub fn pthread_condattr_getclock( - attr: *const pthread_condattr_t, - clock_id: *mut clockid_t, - ) -> c_int; - pub fn pthread_condattr_setclock( - attr: *mut pthread_condattr_t, - clock_id: crate::clockid_t, - ) -> c_int; - pub fn sem_timedwait(sem: *mut sem_t, abstime: *const crate::timespec) -> c_int; - pub fn sem_getvalue(sem: *mut sem_t, sval: *mut c_int) -> c_int; - pub fn pthread_mutex_timedlock( - lock: *mut pthread_mutex_t, - abstime: *const crate::timespec, - ) -> c_int; - pub fn pthread_getname_np(tid: crate::pthread_t, name: *mut c_char, len: size_t) -> c_int; - pub fn pthread_setname_np(tid: crate::pthread_t, name: *const c_char) -> c_int; - pub fn waitid( - idtype: idtype_t, - id: id_t, - infop: *mut crate::siginfo_t, - options: c_int, - ) -> c_int; - - #[cfg_attr(target_os = "illumos", link_name = "_glob_ext")] - pub fn glob( - pattern: *const c_char, - flags: c_int, - errfunc: Option c_int>, - pglob: *mut crate::glob_t, - ) -> c_int; - - #[cfg_attr(target_os = "illumos", link_name = "_globfree_ext")] - pub fn globfree(pglob: *mut crate::glob_t); - - pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; - pub fn posix_madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - - pub fn posix_spawn( - pid: *mut crate::pid_t, - path: *const c_char, - file_actions: *const posix_spawn_file_actions_t, - attrp: *const posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - pub fn posix_spawnp( - pid: *mut crate::pid_t, - file: *const c_char, - file_actions: *const posix_spawn_file_actions_t, - attrp: *const posix_spawnattr_t, - argv: *const *mut c_char, - envp: *const *mut c_char, - ) -> c_int; - - pub fn posix_spawn_file_actions_init(file_actions: *mut posix_spawn_file_actions_t) -> c_int; - pub fn posix_spawn_file_actions_destroy(file_actions: *mut posix_spawn_file_actions_t) - -> c_int; - pub fn posix_spawn_file_actions_addopen( - file_actions: *mut posix_spawn_file_actions_t, - fildes: c_int, - path: *const c_char, - oflag: c_int, - mode: mode_t, - ) -> c_int; - pub fn posix_spawn_file_actions_addclose( - file_actions: *mut posix_spawn_file_actions_t, - fildes: c_int, - ) -> c_int; - pub fn posix_spawn_file_actions_adddup2( - file_actions: *mut posix_spawn_file_actions_t, - fildes: c_int, - newfildes: c_int, - ) -> c_int; - pub fn posix_spawn_file_actions_addclosefrom_np( - file_actions: *mut posix_spawn_file_actions_t, - lowfiledes: c_int, - ) -> c_int; - pub fn posix_spawn_file_actions_addchdir( - file_actions: *mut posix_spawn_file_actions_t, - path: *const c_char, - ) -> c_int; - pub fn posix_spawn_file_actions_addchdir_np( - file_actions: *mut posix_spawn_file_actions_t, - path: *const c_char, - ) -> c_int; - pub fn posix_spawn_file_actions_addfchdir( - file_actions: *mut posix_spawn_file_actions_t, - fd: c_int, - ) -> c_int; - - pub fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_destroy(attr: *mut posix_spawnattr_t) -> c_int; - pub fn posix_spawnattr_setflags(attr: *mut posix_spawnattr_t, flags: c_short) -> c_int; - pub fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *mut c_short) -> c_int; - pub fn posix_spawnattr_setpgroup(attr: *mut posix_spawnattr_t, pgroup: crate::pid_t) -> c_int; - pub fn posix_spawnattr_getpgroup( - attr: *const posix_spawnattr_t, - _pgroup: *mut crate::pid_t, - ) -> c_int; - pub fn posix_spawnattr_setschedparam( - attr: *mut posix_spawnattr_t, - param: *const crate::sched_param, - ) -> c_int; - pub fn posix_spawnattr_getschedparam( - attr: *const posix_spawnattr_t, - param: *mut crate::sched_param, - ) -> c_int; - pub fn posix_spawnattr_setschedpolicy(attr: *mut posix_spawnattr_t, policy: c_int) -> c_int; - pub fn posix_spawnattr_getschedpolicy( - attr: *const posix_spawnattr_t, - _policy: *mut c_int, - ) -> c_int; - pub fn posix_spawnattr_setsigdefault( - attr: *mut posix_spawnattr_t, - sigdefault: *const sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getsigdefault( - attr: *const posix_spawnattr_t, - sigdefault: *mut sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigignore_np( - attr: *mut posix_spawnattr_t, - sigignore: *const sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getsigignore_np( - attr: *const posix_spawnattr_t, - sigignore: *mut sigset_t, - ) -> c_int; - pub fn posix_spawnattr_setsigmask( - attr: *mut posix_spawnattr_t, - sigmask: *const sigset_t, - ) -> c_int; - pub fn posix_spawnattr_getsigmask( - attr: *const posix_spawnattr_t, - sigmask: *mut sigset_t, - ) -> c_int; - - pub fn shmat(shmid: c_int, shmaddr: *const c_void, shmflg: c_int) -> *mut c_void; - - pub fn shmctl(shmid: c_int, cmd: c_int, buf: *mut crate::shmid_ds) -> c_int; - - pub fn shmdt(shmaddr: *const c_void) -> c_int; - - pub fn shmget(key: key_t, size: size_t, shmflg: c_int) -> c_int; - - pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; - pub fn shm_unlink(name: *const c_char) -> c_int; - - pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); - - pub fn telldir(dirp: *mut crate::DIR) -> c_long; - pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int) -> c_int; - - pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; - - pub fn memalign(align: size_t, size: size_t) -> *mut c_void; - - pub fn recvfrom( - socket: c_int, - buf: *mut c_void, - len: size_t, - flags: c_int, - addr: *mut crate::sockaddr, - addrlen: *mut crate::socklen_t, - ) -> ssize_t; - pub fn mkstemps(template: *mut c_char, suffixlen: c_int) -> c_int; - pub fn futimesat(fd: c_int, path: *const c_char, times: *const crate::timeval) -> c_int; - pub fn futimens(dirfd: c_int, times: *const crate::timespec) -> c_int; - pub fn utimensat( - dirfd: c_int, - path: *const c_char, - times: *const crate::timespec, - flag: c_int, - ) -> c_int; - pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; - - #[link_name = "__xnet_bind"] - pub fn bind( - socket: c_int, - address: *const crate::sockaddr, - address_len: crate::socklen_t, - ) -> c_int; - - pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - - #[link_name = "__xnet_sendmsg"] - pub fn sendmsg(fd: c_int, msg: *const crate::msghdr, flags: c_int) -> ssize_t; - #[link_name = "__xnet_recvmsg"] - pub fn recvmsg(fd: c_int, msg: *mut crate::msghdr, flags: c_int) -> ssize_t; - pub fn accept4( - fd: c_int, - address: *mut sockaddr, - address_len: *mut socklen_t, - flags: c_int, - ) -> c_int; - - pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> crate::mqd_t; - pub fn mq_close(mqd: crate::mqd_t) -> c_int; - pub fn mq_unlink(name: *const c_char) -> c_int; - pub fn mq_receive( - mqd: crate::mqd_t, - msg_ptr: *mut c_char, - msg_len: size_t, - msg_prio: *mut c_uint, - ) -> ssize_t; - pub fn mq_timedreceive( - mqd: crate::mqd_t, - msg_ptr: *mut c_char, - msg_len: size_t, - msg_prio: *mut c_uint, - abs_timeout: *const crate::timespec, - ) -> ssize_t; - pub fn mq_send( - mqd: crate::mqd_t, - msg_ptr: *const c_char, - msg_len: size_t, - msg_prio: c_uint, - ) -> c_int; - pub fn mq_timedsend( - mqd: crate::mqd_t, - msg_ptr: *const c_char, - msg_len: size_t, - msg_prio: c_uint, - abs_timeout: *const crate::timespec, - ) -> c_int; - pub fn mq_getattr(mqd: crate::mqd_t, attr: *mut crate::mq_attr) -> c_int; - pub fn mq_setattr( - mqd: crate::mqd_t, - newattr: *const crate::mq_attr, - oldattr: *mut crate::mq_attr, - ) -> c_int; - pub fn port_create() -> c_int; - pub fn port_associate( - port: c_int, - source: c_int, - object: crate::uintptr_t, - events: c_int, - user: *mut c_void, - ) -> c_int; - pub fn port_dissociate(port: c_int, source: c_int, object: crate::uintptr_t) -> c_int; - pub fn port_get(port: c_int, pe: *mut port_event, timeout: *mut crate::timespec) -> c_int; - pub fn port_getn( - port: c_int, - pe_list: *mut port_event, - max: c_uint, - nget: *mut c_uint, - timeout: *mut crate::timespec, - ) -> c_int; - pub fn port_send(port: c_int, events: c_int, user: *mut c_void) -> c_int; - pub fn port_sendn( - port_list: *mut c_int, - error_list: *mut c_int, - nent: c_uint, - events: c_int, - user: *mut c_void, - ) -> c_int; - #[cfg_attr( - any(target_os = "solaris", target_os = "illumos"), - link_name = "__posix_getgrgid_r" - )] - pub fn getgrgid_r( - gid: crate::gid_t, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> c_int; - pub fn sigsuspend(mask: *const crate::sigset_t) -> c_int; - pub fn sem_close(sem: *mut sem_t) -> c_int; - pub fn getdtablesize() -> c_int; - - #[cfg_attr( - any(target_os = "solaris", target_os = "illumos"), - link_name = "__posix_getgrnam_r" - )] - pub fn getgrnam_r( - name: *const c_char, - grp: *mut crate::group, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut crate::group, - ) -> c_int; - pub fn thr_self() -> crate::thread_t; - pub fn pthread_sigmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; - pub fn sem_open(name: *const c_char, oflag: c_int, ...) -> *mut sem_t; - pub fn getgrnam(name: *const c_char) -> *mut crate::group; - #[cfg_attr(target_os = "solaris", link_name = "__pthread_kill_xpg7")] - pub fn pthread_kill(thread: crate::pthread_t, sig: c_int) -> c_int; - pub fn sched_get_priority_min(policy: c_int) -> c_int; - pub fn sched_get_priority_max(policy: c_int) -> c_int; - pub fn sched_getparam(pid: crate::pid_t, param: *mut sched_param) -> c_int; - pub fn sched_setparam(pid: crate::pid_t, param: *const sched_param) -> c_int; - pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; - pub fn sched_setscheduler( - pid: crate::pid_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - pub fn sem_unlink(name: *const c_char) -> c_int; - pub fn daemon(nochdir: c_int, noclose: c_int) -> c_int; - #[cfg_attr( - any(target_os = "solaris", target_os = "illumos"), - link_name = "__posix_getpwnam_r" - )] - pub fn getpwnam_r( - name: *const c_char, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - #[cfg_attr( - any(target_os = "solaris", target_os = "illumos"), - link_name = "__posix_getpwuid_r" - )] - pub fn getpwuid_r( - uid: crate::uid_t, - pwd: *mut passwd, - buf: *mut c_char, - buflen: size_t, - result: *mut *mut passwd, - ) -> c_int; - #[cfg_attr( - any(target_os = "solaris", target_os = "illumos"), - link_name = "getpwent_r" - )] - fn native_getpwent_r(pwd: *mut passwd, buf: *mut c_char, buflen: c_int) -> *mut passwd; - #[cfg_attr( - any(target_os = "solaris", target_os = "illumos"), - link_name = "getgrent_r" - )] - fn native_getgrent_r( - grp: *mut crate::group, - buf: *mut c_char, - buflen: c_int, - ) -> *mut crate::group; - #[cfg_attr( - any(target_os = "solaris", target_os = "illumos"), - link_name = "__posix_sigwait" - )] - pub fn sigwait(set: *const sigset_t, sig: *mut c_int) -> c_int; - pub fn pthread_atfork( - prepare: Option, - parent: Option, - child: Option, - ) -> c_int; - pub fn getgrgid(gid: crate::gid_t) -> *mut crate::group; - pub fn setgrent(); - pub fn endgrent(); - pub fn getgrent() -> *mut crate::group; - pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; - - pub fn dup3(src: c_int, dst: c_int, flags: c_int) -> c_int; - pub fn uname(buf: *mut crate::utsname) -> c_int; - pub fn pipe2(fds: *mut c_int, flags: c_int) -> c_int; - - pub fn makeutx(ux: *const utmpx) -> *mut utmpx; - pub fn modutx(ux: *const utmpx) -> *mut utmpx; - pub fn updwtmpx(file: *const c_char, ut: *mut utmpx); - pub fn utmpxname(file: *const c_char) -> c_int; - pub fn getutxent() -> *mut utmpx; - pub fn getutxid(ut: *const utmpx) -> *mut utmpx; - pub fn getutxline(ut: *const utmpx) -> *mut utmpx; - pub fn pututxline(ut: *const utmpx) -> *mut utmpx; - pub fn setutxent(); - pub fn endutxent(); - - pub fn endutent(); - pub fn getutent() -> *mut utmp; - pub fn getutid(u: *const utmp) -> *mut utmp; - pub fn getutline(u: *const utmp) -> *mut utmp; - pub fn pututline(u: *const utmp) -> *mut utmp; - pub fn setutent(); - pub fn utmpname(file: *const c_char) -> c_int; - - pub fn getutmp(ux: *const utmpx, u: *mut utmp); - pub fn getutmpx(u: *const utmp, ux: *mut utmpx); - pub fn updwtmp(file: *const c_char, u: *mut utmp); - - pub fn ntp_adjtime(buf: *mut timex) -> c_int; - pub fn ntp_gettime(buf: *mut ntptimeval) -> c_int; - - pub fn timer_create(clock_id: clockid_t, evp: *mut sigevent, timerid: *mut timer_t) -> c_int; - pub fn timer_delete(timerid: timer_t) -> c_int; - pub fn timer_getoverrun(timerid: timer_t) -> c_int; - pub fn timer_gettime(timerid: timer_t, value: *mut itimerspec) -> c_int; - pub fn timer_settime( - timerid: timer_t, - flags: c_int, - value: *const itimerspec, - ovalue: *mut itimerspec, - ) -> c_int; - - pub fn ucred_get(pid: crate::pid_t) -> *mut ucred_t; - pub fn getpeerucred(fd: c_int, ucred: *mut *mut ucred_t) -> c_int; - - pub fn ucred_free(ucred: *mut ucred_t); - - pub fn ucred_geteuid(ucred: *const ucred_t) -> crate::uid_t; - pub fn ucred_getruid(ucred: *const ucred_t) -> crate::uid_t; - pub fn ucred_getsuid(ucred: *const ucred_t) -> crate::uid_t; - pub fn ucred_getegid(ucred: *const ucred_t) -> crate::gid_t; - pub fn ucred_getrgid(ucred: *const ucred_t) -> crate::gid_t; - pub fn ucred_getsgid(ucred: *const ucred_t) -> crate::gid_t; - pub fn ucred_getgroups(ucred: *const ucred_t, groups: *mut *const crate::gid_t) -> c_int; - pub fn ucred_getpid(ucred: *const ucred_t) -> crate::pid_t; - pub fn ucred_getprojid(ucred: *const ucred_t) -> projid_t; - pub fn ucred_getzoneid(ucred: *const ucred_t) -> zoneid_t; - pub fn ucred_getpflags(ucred: *const ucred_t, flags: c_uint) -> c_uint; - - pub fn ucred_size() -> size_t; - - pub fn pset_create(newpset: *mut crate::psetid_t) -> c_int; - pub fn pset_destroy(pset: crate::psetid_t) -> c_int; - pub fn pset_assign( - pset: crate::psetid_t, - cpu: crate::processorid_t, - opset: *mut psetid_t, - ) -> c_int; - pub fn pset_info( - pset: crate::psetid_t, - tpe: *mut c_int, - numcpus: *mut c_uint, - cpulist: *mut processorid_t, - ) -> c_int; - pub fn pset_bind( - pset: crate::psetid_t, - idtype: crate::idtype_t, - id: crate::id_t, - opset: *mut psetid_t, - ) -> c_int; - pub fn pset_list(pset: *mut psetid_t, numpsets: *mut c_uint) -> c_int; - pub fn pset_setattr(pset: psetid_t, attr: c_uint) -> c_int; - pub fn pset_getattr(pset: psetid_t, attr: *mut c_uint) -> c_int; - pub fn processor_bind( - idtype: crate::idtype_t, - id: crate::id_t, - new_binding: crate::processorid_t, - old_binding: *mut processorid_t, - ) -> c_int; - pub fn p_online(processorid: crate::processorid_t, flag: c_int) -> c_int; - pub fn processor_info(processorid: crate::processorid_t, infop: *mut processor_info_t) - -> c_int; - - pub fn getexecname() -> *const c_char; - - pub fn gethostid() -> c_long; - - pub fn getpflags(flags: c_uint) -> c_uint; - pub fn setpflags(flags: c_uint, value: c_uint) -> c_int; - - pub fn sysinfo(command: c_int, buf: *mut c_char, count: c_long) -> c_int; - - pub fn faccessat(fd: c_int, path: *const c_char, amode: c_int, flag: c_int) -> c_int; - - // #include - #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] - pub fn dl_iterate_phdr( - callback: Option< - unsafe extern "C" fn(info: *mut dl_phdr_info, size: usize, data: *mut c_void) -> c_int, - >, - data: *mut c_void, - ) -> c_int; - pub fn getpagesize() -> c_int; - pub fn getpagesizes(pagesize: *mut size_t, nelem: c_int) -> c_int; - pub fn mmapobj( - fd: c_int, - flags: c_uint, - storage: *mut mmapobj_result_t, - elements: *mut c_uint, - arg: *mut c_void, - ) -> c_int; - pub fn meminfo( - inaddr: *const u64, - addr_count: c_int, - info_req: *const c_uint, - info_count: c_int, - outdata: *mut u64, - validity: *mut c_uint, - ) -> c_int; - - pub fn strsep(string: *mut *mut c_char, delim: *const c_char) -> *mut c_char; - - pub fn getisax(array: *mut u32, n: c_uint) -> c_uint; - - pub fn backtrace(buffer: *mut *mut c_void, size: c_int) -> c_int; - pub fn backtrace_symbols(buffer: *const *mut c_void, size: c_int) -> *mut *mut c_char; - pub fn backtrace_symbols_fd(buffer: *const *mut c_void, size: c_int, fd: c_int); - - pub fn getopt_long( - argc: c_int, - argv: *const *mut c_char, - optstring: *const c_char, - longopts: *const option, - longindex: *mut c_int, - ) -> c_int; - - pub fn sync(); - - pub fn aio_cancel(fd: c_int, aiocbp: *mut aiocb) -> c_int; - pub fn aio_error(aiocbp: *const aiocb) -> c_int; - pub fn aio_fsync(op: c_int, aiocbp: *mut aiocb) -> c_int; - pub fn aio_read(aiocbp: *mut aiocb) -> c_int; - pub fn aio_return(aiocbp: *mut aiocb) -> ssize_t; - pub fn aio_suspend( - aiocb_list: *const *const aiocb, - nitems: c_int, - timeout: *const crate::timespec, - ) -> c_int; - pub fn aio_waitn( - aiocb_list: *mut *mut aiocb, - nent: c_uint, - nwait: *mut c_uint, - timeout: *const crate::timespec, - ) -> c_int; - pub fn aio_write(aiocbp: *mut aiocb) -> c_int; - pub fn lio_listio( - mode: c_int, - aiocb_list: *const *mut aiocb, - nitems: c_int, - sevp: *mut sigevent, - ) -> c_int; - - pub fn __major(version: c_int, devnum: crate::dev_t) -> crate::major_t; - pub fn __minor(version: c_int, devnum: crate::dev_t) -> crate::minor_t; - pub fn __makedev( - version: c_int, - majdev: crate::major_t, - mindev: crate::minor_t, - ) -> crate::dev_t; - - pub fn arc4random() -> u32; - pub fn arc4random_buf(buf: *mut c_void, nbytes: size_t); - pub fn arc4random_uniform(upper_bound: u32) -> u32; - - pub fn secure_getenv(name: *const c_char) -> *mut c_char; - - #[cfg_attr(target_os = "solaris", link_name = "__strftime_xpg7")] - pub fn strftime( - s: *mut c_char, - maxsize: size_t, - format: *const c_char, - timeptr: *const crate::tm, - ) -> size_t; - pub fn strftime_l( - s: *mut c_char, - maxsize: size_t, - format: *const c_char, - timeptr: *const crate::tm, - loc: crate::locale_t, - ) -> size_t; -} - -#[link(name = "sendfile")] -extern "C" { - pub fn sendfile(out_fd: c_int, in_fd: c_int, off: *mut off_t, len: size_t) -> ssize_t; - pub fn sendfilev( - fildes: c_int, - vec: *const sendfilevec_t, - sfvcnt: c_int, - xferred: *mut size_t, - ) -> ssize_t; -} - -#[link(name = "lgrp")] -extern "C" { - pub fn lgrp_init(view: lgrp_view_t) -> lgrp_cookie_t; - pub fn lgrp_fini(cookie: lgrp_cookie_t) -> c_int; - pub fn lgrp_affinity_get( - idtype: crate::idtype_t, - id: crate::id_t, - lgrp: crate::lgrp_id_t, - ) -> crate::lgrp_affinity_t; - pub fn lgrp_affinity_set( - idtype: crate::idtype_t, - id: crate::id_t, - lgrp: crate::lgrp_id_t, - aff: lgrp_affinity_t, - ) -> c_int; - pub fn lgrp_cpus( - cookie: crate::lgrp_cookie_t, - lgrp: crate::lgrp_id_t, - cpuids: *mut crate::processorid_t, - count: c_uint, - content: crate::lgrp_content_t, - ) -> c_int; - pub fn lgrp_mem_size( - cookie: crate::lgrp_cookie_t, - lgrp: crate::lgrp_id_t, - tpe: crate::lgrp_mem_size_flag_t, - content: crate::lgrp_content_t, - ) -> crate::lgrp_mem_size_t; - pub fn lgrp_nlgrps(cookie: crate::lgrp_cookie_t) -> c_int; - pub fn lgrp_view(cookie: crate::lgrp_cookie_t) -> crate::lgrp_view_t; - pub fn lgrp_home(idtype: crate::idtype_t, id: crate::id_t) -> crate::lgrp_id_t; - pub fn lgrp_version(version: c_int) -> c_int; - pub fn lgrp_resources( - cookie: crate::lgrp_cookie_t, - lgrp: crate::lgrp_id_t, - lgrps: *mut crate::lgrp_id_t, - count: c_uint, - tpe: crate::lgrp_rsrc_t, - ) -> c_int; - pub fn lgrp_root(cookie: crate::lgrp_cookie_t) -> crate::lgrp_id_t; -} - -pub unsafe fn major(device: crate::dev_t) -> crate::major_t { - __major(NEWDEV, device) -} - -pub unsafe fn minor(device: crate::dev_t) -> crate::minor_t { - __minor(NEWDEV, device) -} - -pub unsafe fn makedev(maj: crate::major_t, min: crate::minor_t) -> crate::dev_t { - __makedev(NEWDEV, maj, min) -} - -mod compat; -pub use self::compat::*; - -cfg_if! { - if #[cfg(target_os = "illumos")] { - mod illumos; - pub use self::illumos::*; - } else if #[cfg(target_os = "solaris")] { - mod solaris; - pub use self::solaris::*; - } else { - // Unknown target_os - } -} - -cfg_if! { - if #[cfg(target_arch = "x86_64")] { - mod x86_64; - mod x86_common; - pub use self::x86_64::*; - pub use self::x86_common::*; - } else if #[cfg(target_arch = "x86")] { - mod x86; - mod x86_common; - pub use self::x86::*; - pub use self::x86_common::*; - } -} diff --git a/vendor/libc/src/unix/solarish/solaris.rs b/vendor/libc/src/unix/solarish/solaris.rs deleted file mode 100644 index 58b097a16269b9..00000000000000 --- a/vendor/libc/src/unix/solarish/solaris.rs +++ /dev/null @@ -1,239 +0,0 @@ -use crate::prelude::*; -use crate::{ - exit_status, off_t, termios, NET_MAC_AWARE, NET_MAC_AWARE_INHERIT, PRIV_AWARE_RESET, - PRIV_DEBUG, PRIV_PFEXEC, PRIV_XPOLICY, -}; - -pub type door_attr_t = c_uint; -pub type door_id_t = c_ulonglong; -pub type lgrp_affinity_t = c_uint; - -e! { - #[repr(u32)] - pub enum lgrp_rsrc_t { - LGRP_RSRC_CPU = 0, - LGRP_RSRC_MEM = 1, - LGRP_RSRC_TYPES = 2, - } -} - -s! { - pub struct aiocb { - pub aio_fildes: c_int, - pub aio_buf: *mut c_void, - pub aio_nbytes: size_t, - pub aio_offset: off_t, - pub aio_reqprio: c_int, - pub aio_sigevent: crate::sigevent, - pub aio_lio_opcode: c_int, - pub aio_resultp: crate::aio_result_t, - pub aio_state: c_char, - pub aio_returned: c_char, - pub aio__pad1: [c_char; 2], - pub aio_flags: c_int, - } - - pub struct shmid_ds { - pub shm_perm: crate::ipc_perm, - pub shm_segsz: size_t, - pub shm_flags: crate::uintptr_t, - pub shm_lkcnt: c_ushort, - pub shm_lpid: crate::pid_t, - pub shm_cpid: crate::pid_t, - pub shm_nattch: crate::shmatt_t, - pub shm_cnattch: c_ulong, - pub shm_atime: crate::time_t, - pub shm_dtime: crate::time_t, - pub shm_ctime: crate::time_t, - pub shm_amp: *mut c_void, - pub shm_gransize: u64, - pub shm_allocated: u64, - pub shm_pad4: [i64; 1], - } - - pub struct xrs_t { - pub xrs_id: c_ulong, - pub xrs_ptr: *mut c_char, - } -} - -s_no_extra_traits! { - #[repr(packed)] - pub struct door_desc_t__d_data__d_desc { - pub d_descriptor: c_int, - pub d_id: crate::door_id_t, - } - - pub union door_desc_t__d_data { - pub d_desc: door_desc_t__d_data__d_desc, - d_resv: [c_int; 5], /* Check out /usr/include/sys/door.h */ - } - - pub struct door_desc_t { - pub d_attributes: door_attr_t, - pub d_data: door_desc_t__d_data, - } - - pub struct door_arg_t { - pub data_ptr: *const c_char, - pub data_size: size_t, - pub desc_ptr: *const door_desc_t, - pub dec_num: c_uint, - pub rbuf: *const c_char, - pub rsize: size_t, - } - - pub struct utmpx { - pub ut_user: [c_char; _UTMP_USER_LEN], - pub ut_id: [c_char; _UTMP_ID_LEN], - pub ut_line: [c_char; _UTMP_LINE_LEN], - pub ut_pid: crate::pid_t, - pub ut_type: c_short, - pub ut_exit: exit_status, - pub ut_tv: crate::timeval, - pub ut_session: c_int, - pub pad: [c_int; 5], - pub ut_syslen: c_short, - pub ut_host: [c_char; 257], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for utmpx { - fn eq(&self, other: &utmpx) -> bool { - self.ut_type == other.ut_type - && self.ut_pid == other.ut_pid - && self.ut_user == other.ut_user - && self.ut_line == other.ut_line - && self.ut_id == other.ut_id - && self.ut_exit == other.ut_exit - && self.ut_session == other.ut_session - && self.ut_tv == other.ut_tv - && self.ut_syslen == other.ut_syslen - && self.pad == other.pad - && self - .ut_host - .iter() - .zip(other.ut_host.iter()) - .all(|(a, b)| a == b) - } - } - - impl Eq for utmpx {} - - impl hash::Hash for utmpx { - fn hash(&self, state: &mut H) { - self.ut_user.hash(state); - self.ut_type.hash(state); - self.ut_pid.hash(state); - self.ut_line.hash(state); - self.ut_id.hash(state); - self.ut_host.hash(state); - self.ut_exit.hash(state); - self.ut_session.hash(state); - self.ut_tv.hash(state); - self.ut_syslen.hash(state); - self.pad.hash(state); - } - } - } -} - -// FIXME(solaris): O_DIRECT and SIGINFO are NOT available on Solaris. -// But in past they were defined here and thus other crates expected them. -// Latest version v0.29.0 of Nix crate still expects this. Since last -// version of Nix crate is almost one year ago let's define these two -// temporarily before new Nix version is released. -pub const O_DIRECT: c_int = 0x2000000; -pub const SIGINFO: c_int = 41; - -pub const _UTMP_USER_LEN: usize = 32; -pub const _UTMP_LINE_LEN: usize = 32; -pub const _UTMP_ID_LEN: usize = 4; - -pub const PORT_SOURCE_POSTWAIT: c_int = 8; -pub const PORT_SOURCE_SIGNAL: c_int = 9; - -pub const AF_LOCAL: c_int = 1; // AF_UNIX -pub const AF_FILE: c_int = 1; // AF_UNIX - -pub const TCP_KEEPIDLE: c_int = 0x1d; -pub const TCP_KEEPINTVL: c_int = 0x1e; -pub const TCP_KEEPCNT: c_int = 0x1f; - -pub const F_DUPFD_CLOEXEC: c_int = 47; -pub const F_DUPFD_CLOFORK: c_int = 49; -pub const F_DUP2FD_CLOEXEC: c_int = 48; -pub const F_DUP2FD_CLOFORK: c_int = 50; - -pub const _PC_LAST: c_int = 102; - -pub const PRIV_PROC_SENSITIVE: c_uint = 0x0008; -pub const PRIV_PFEXEC_AUTH: c_uint = 0x0200; -pub const PRIV_PROC_TPD: c_uint = 0x0400; -pub const PRIV_TPD_UNSAFE: c_uint = 0x0800; -pub const PRIV_PROC_TPD_RESET: c_uint = 0x1000; -pub const PRIV_TPD_KILLABLE: c_uint = 0x2000; - -pub const POSIX_SPAWN_SETSID: c_short = 0x400; - -pub const PRIV_USER: c_uint = PRIV_DEBUG - | PRIV_PROC_SENSITIVE - | NET_MAC_AWARE - | NET_MAC_AWARE_INHERIT - | PRIV_XPOLICY - | PRIV_AWARE_RESET - | PRIV_PFEXEC - | PRIV_PFEXEC_AUTH - | PRIV_PROC_TPD - | PRIV_TPD_UNSAFE - | PRIV_TPD_KILLABLE - | PRIV_PROC_TPD_RESET; - -extern "C" { - // DIFF(main): changed to `*const *mut` in e77f551de9 - pub fn fexecve(fd: c_int, argv: *const *const c_char, envp: *const *const c_char) -> c_int; - - pub fn mincore(addr: *mut c_void, len: size_t, vec: *mut c_char) -> c_int; - - pub fn door_call(d: c_int, params: *mut door_arg_t) -> c_int; - pub fn door_return( - data_ptr: *mut c_char, - data_size: size_t, - desc_ptr: *mut door_desc_t, - num_desc: c_uint, - ) -> c_int; - pub fn door_create( - server_procedure: extern "C" fn( - cookie: *mut c_void, - argp: *mut c_char, - arg_size: size_t, - dp: *mut door_desc_t, - n_desc: c_uint, - ), - cookie: *mut c_void, - attributes: door_attr_t, - ) -> c_int; - - pub fn fattach(fildes: c_int, path: *const c_char) -> c_int; - - pub fn pthread_getattr_np(thread: crate::pthread_t, attr: *mut crate::pthread_attr_t) -> c_int; - - pub fn euidaccess(path: *const c_char, amode: c_int) -> c_int; - - pub fn openpty( - amain: *mut c_int, - asubord: *mut c_int, - name: *mut c_char, - termp: *mut termios, - winp: *mut crate::winsize, - ) -> c_int; - - pub fn forkpty( - amain: *mut c_int, - name: *mut c_char, - termp: *mut termios, - winp: *mut crate::winsize, - ) -> crate::pid_t; -} diff --git a/vendor/libc/src/unix/solarish/x86.rs b/vendor/libc/src/unix/solarish/x86.rs deleted file mode 100644 index a37ed3d74e978c..00000000000000 --- a/vendor/libc/src/unix/solarish/x86.rs +++ /dev/null @@ -1,31 +0,0 @@ -use crate::prelude::*; - -pub type Elf32_Addr = c_ulong; -pub type Elf32_Half = c_ushort; -pub type Elf32_Off = c_ulong; -pub type Elf32_Sword = c_long; -pub type Elf32_Word = c_ulong; -pub type Elf32_Lword = c_ulonglong; -pub type Elf32_Phdr = __c_anonymous_Elf32_Phdr; - -s! { - pub struct __c_anonymous_Elf32_Phdr { - pub p_type: Elf32_Word, - pub p_offset: Elf32_Off, - pub p_vaddr: Elf32_Addr, - pub p_paddr: Elf32_Addr, - pub p_filesz: Elf32_Word, - pub p_memsz: Elf32_Word, - pub p_flags: Elf32_Word, - pub p_align: Elf32_Word, - } - - pub struct dl_phdr_info { - pub dlpi_addr: Elf32_Addr, - pub dlpi_name: *const c_char, - pub dlpi_phdr: *const Elf32_Phdr, - pub dlpi_phnum: Elf32_Half, - pub dlpi_adds: c_ulonglong, - pub dlpi_subs: c_ulonglong, - } -} diff --git a/vendor/libc/src/unix/solarish/x86_64.rs b/vendor/libc/src/unix/solarish/x86_64.rs deleted file mode 100644 index a45ca4b7d09761..00000000000000 --- a/vendor/libc/src/unix/solarish/x86_64.rs +++ /dev/null @@ -1,170 +0,0 @@ -use crate::prelude::*; - -cfg_if! { - if #[cfg(target_os = "solaris")] { - use crate::unix::solarish::solaris; - } -} - -pub type greg_t = c_long; - -pub type Elf64_Addr = c_ulong; -pub type Elf64_Half = c_ushort; -pub type Elf64_Off = c_ulong; -pub type Elf64_Sword = c_int; -pub type Elf64_Sxword = c_long; -pub type Elf64_Word = c_uint; -pub type Elf64_Xword = c_ulong; -pub type Elf64_Lword = c_ulong; -pub type Elf64_Phdr = __c_anonymous_Elf64_Phdr; - -s! { - pub struct __c_anonymous_fpchip_state { - pub cw: u16, - pub sw: u16, - pub fctw: u8, - pub __fx_rsvd: u8, - pub fop: u16, - pub rip: u64, - pub rdp: u64, - pub mxcsr: u32, - pub mxcsr_mask: u32, - pub st: [crate::upad128_t; 8], - pub xmm: [crate::upad128_t; 16], - pub __fx_ign: [crate::upad128_t; 6], - pub status: u32, - pub xstatus: u32, - } - - pub struct __c_anonymous_Elf64_Phdr { - pub p_type: crate::Elf64_Word, - pub p_flags: crate::Elf64_Word, - pub p_offset: crate::Elf64_Off, - pub p_vaddr: crate::Elf64_Addr, - pub p_paddr: crate::Elf64_Addr, - pub p_filesz: crate::Elf64_Xword, - pub p_memsz: crate::Elf64_Xword, - pub p_align: crate::Elf64_Xword, - } - - pub struct dl_phdr_info { - pub dlpi_addr: crate::Elf64_Addr, - pub dlpi_name: *const c_char, - pub dlpi_phdr: *const crate::Elf64_Phdr, - pub dlpi_phnum: crate::Elf64_Half, - pub dlpi_adds: c_ulonglong, - pub dlpi_subs: c_ulonglong, - #[cfg(target_os = "solaris")] - pub dlpi_tls_modid: c_ulong, - #[cfg(target_os = "solaris")] - pub dlpi_tls_data: *mut c_void, - } -} - -s_no_extra_traits! { - pub union __c_anonymous_fp_reg_set { - pub fpchip_state: __c_anonymous_fpchip_state, - pub f_fpregs: [[u32; 13]; 10], - } - - pub struct fpregset_t { - pub fp_reg_set: __c_anonymous_fp_reg_set, - } - - pub struct mcontext_t { - pub gregs: [crate::greg_t; 28], - pub fpregs: fpregset_t, - } - - pub struct ucontext_t { - pub uc_flags: c_ulong, - pub uc_link: *mut ucontext_t, - pub uc_sigmask: crate::sigset_t, - pub uc_stack: crate::stack_t, - pub uc_mcontext: mcontext_t, - #[cfg(target_os = "illumos")] - pub uc_brand_data: [*mut c_void; 3], - #[cfg(target_os = "illumos")] - pub uc_xsave: c_long, - #[cfg(target_os = "illumos")] - pub uc_filler: c_long, - #[cfg(target_os = "solaris")] - pub uc_xrs: solaris::xrs_t, - #[cfg(target_os = "solaris")] - pub uc_lwpid: c_uint, - #[cfg(target_os = "solaris")] - pub uc_filler: [c_long; 2], - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for __c_anonymous_fp_reg_set { - fn eq(&self, other: &__c_anonymous_fp_reg_set) -> bool { - unsafe { - self.fpchip_state == other.fpchip_state - || self - .f_fpregs - .iter() - .zip(other.f_fpregs.iter()) - .all(|(a, b)| a == b) - } - } - } - impl Eq for __c_anonymous_fp_reg_set {} - impl PartialEq for fpregset_t { - fn eq(&self, other: &fpregset_t) -> bool { - self.fp_reg_set == other.fp_reg_set - } - } - impl Eq for fpregset_t {} - impl PartialEq for mcontext_t { - fn eq(&self, other: &mcontext_t) -> bool { - self.gregs == other.gregs && self.fpregs == other.fpregs - } - } - impl Eq for mcontext_t {} - impl PartialEq for ucontext_t { - fn eq(&self, other: &ucontext_t) -> bool { - self.uc_flags == other.uc_flags - && self.uc_link == other.uc_link - && self.uc_sigmask == other.uc_sigmask - && self.uc_stack == other.uc_stack - && self.uc_mcontext == other.uc_mcontext - && self.uc_filler == other.uc_filler - } - } - impl Eq for ucontext_t {} - } -} - -// sys/regset.h - -pub const REG_GSBASE: c_int = 27; -pub const REG_FSBASE: c_int = 26; -pub const REG_DS: c_int = 25; -pub const REG_ES: c_int = 24; -pub const REG_GS: c_int = 23; -pub const REG_FS: c_int = 22; -pub const REG_SS: c_int = 21; -pub const REG_RSP: c_int = 20; -pub const REG_RFL: c_int = 19; -pub const REG_CS: c_int = 18; -pub const REG_RIP: c_int = 17; -pub const REG_ERR: c_int = 16; -pub const REG_TRAPNO: c_int = 15; -pub const REG_RAX: c_int = 14; -pub const REG_RCX: c_int = 13; -pub const REG_RDX: c_int = 12; -pub const REG_RBX: c_int = 11; -pub const REG_RBP: c_int = 10; -pub const REG_RSI: c_int = 9; -pub const REG_RDI: c_int = 8; -pub const REG_R8: c_int = 7; -pub const REG_R9: c_int = 6; -pub const REG_R10: c_int = 5; -pub const REG_R11: c_int = 4; -pub const REG_R12: c_int = 3; -pub const REG_R13: c_int = 2; -pub const REG_R14: c_int = 1; -pub const REG_R15: c_int = 0; diff --git a/vendor/libc/src/unix/solarish/x86_common.rs b/vendor/libc/src/unix/solarish/x86_common.rs deleted file mode 100644 index e72a22a83b4178..00000000000000 --- a/vendor/libc/src/unix/solarish/x86_common.rs +++ /dev/null @@ -1,69 +0,0 @@ -// AT_SUN_HWCAP -pub const AV_386_FPU: u32 = 0x00001; -pub const AV_386_TSC: u32 = 0x00002; -pub const AV_386_CX8: u32 = 0x00004; -pub const AV_386_SEP: u32 = 0x00008; -pub const AV_386_AMD_SYSC: u32 = 0x00010; -pub const AV_386_CMOV: u32 = 0x00020; -pub const AV_386_MMX: u32 = 0x00040; -pub const AV_386_AMD_MMX: u32 = 0x00080; -pub const AV_386_AMD_3DNow: u32 = 0x00100; -pub const AV_386_AMD_3DNowx: u32 = 0x00200; -pub const AV_386_FXSR: u32 = 0x00400; -pub const AV_386_SSE: u32 = 0x00800; -pub const AV_386_SSE2: u32 = 0x01000; -pub const AV_386_CX16: u32 = 0x10000; -pub const AV_386_AHF: u32 = 0x20000; -pub const AV_386_TSCP: u32 = 0x40000; -pub const AV_386_AMD_SSE4A: u32 = 0x80000; -pub const AV_386_POPCNT: u32 = 0x100000; -pub const AV_386_AMD_LZCNT: u32 = 0x200000; -pub const AV_386_SSSE3: u32 = 0x400000; -pub const AV_386_SSE4_1: u32 = 0x800000; -pub const AV_386_SSE4_2: u32 = 0x1000000; -pub const AV_386_MOVBE: u32 = 0x2000000; -pub const AV_386_AES: u32 = 0x4000000; -pub const AV_386_PCLMULQDQ: u32 = 0x8000000; -pub const AV_386_XSAVE: u32 = 0x10000000; -pub const AV_386_AVX: u32 = 0x20000000; -cfg_if! { - if #[cfg(target_os = "illumos")] { - pub const AV_386_VMX: u32 = 0x40000000; - pub const AV_386_AMD_SVM: u32 = 0x80000000; - // AT_SUN_HWCAP2 - pub const AV_386_2_F16C: u32 = 0x00000001; - pub const AV_386_2_RDRAND: u32 = 0x00000002; - pub const AV_386_2_BMI1: u32 = 0x00000004; - pub const AV_386_2_BMI2: u32 = 0x00000008; - pub const AV_386_2_FMA: u32 = 0x00000010; - pub const AV_386_2_AVX2: u32 = 0x00000020; - pub const AV_386_2_ADX: u32 = 0x00000040; - pub const AV_386_2_RDSEED: u32 = 0x00000080; - pub const AV_386_2_AVX512F: u32 = 0x00000100; - pub const AV_386_2_AVX512DQ: u32 = 0x00000200; - pub const AV_386_2_AVX512IFMA: u32 = 0x00000400; - pub const AV_386_2_AVX512PF: u32 = 0x00000800; - pub const AV_386_2_AVX512ER: u32 = 0x00001000; - pub const AV_386_2_AVX512CD: u32 = 0x00002000; - pub const AV_386_2_AVX512BW: u32 = 0x00004000; - pub const AV_386_2_AVX512VL: u32 = 0x00008000; - pub const AV_386_2_AVX512VBMI: u32 = 0x00010000; - pub const AV_386_2_AVX512VPOPCDQ: u32 = 0x00020000; - pub const AV_386_2_AVX512_4NNIW: u32 = 0x00040000; - pub const AV_386_2_AVX512_4FMAPS: u32 = 0x00080000; - pub const AV_386_2_SHA: u32 = 0x00100000; - pub const AV_386_2_FSGSBASE: u32 = 0x00200000; - pub const AV_386_2_CLFLUSHOPT: u32 = 0x00400000; - pub const AV_386_2_CLWB: u32 = 0x00800000; - pub const AV_386_2_MONITORX: u32 = 0x01000000; - pub const AV_386_2_CLZERO: u32 = 0x02000000; - pub const AV_386_2_AVX512_VNNI: u32 = 0x04000000; - pub const AV_386_2_VPCLMULQDQ: u32 = 0x08000000; - pub const AV_386_2_VAES: u32 = 0x10000000; - // AT_SUN_FPTYPE - pub const AT_386_FPINFO_NONE: u32 = 0; - pub const AT_386_FPINFO_FXSAVE: u32 = 1; - pub const AT_386_FPINFO_XSAVE: u32 = 2; - pub const AT_386_FPINFO_XSAVE_AMD: u32 = 3; - } -} diff --git a/vendor/libc/src/vxworks/aarch64.rs b/vendor/libc/src/vxworks/aarch64.rs deleted file mode 100644 index 376783c8234baf..00000000000000 --- a/vendor/libc/src/vxworks/aarch64.rs +++ /dev/null @@ -1 +0,0 @@ -pub type wchar_t = u32; diff --git a/vendor/libc/src/vxworks/arm.rs b/vendor/libc/src/vxworks/arm.rs deleted file mode 100644 index 376783c8234baf..00000000000000 --- a/vendor/libc/src/vxworks/arm.rs +++ /dev/null @@ -1 +0,0 @@ -pub type wchar_t = u32; diff --git a/vendor/libc/src/vxworks/mod.rs b/vendor/libc/src/vxworks/mod.rs deleted file mode 100644 index 809640d1122216..00000000000000 --- a/vendor/libc/src/vxworks/mod.rs +++ /dev/null @@ -1,2018 +0,0 @@ -//! Interface to VxWorks C library - -use core::ptr::null_mut; - -use crate::prelude::*; - -#[derive(Debug)] -pub enum DIR {} -impl Copy for DIR {} -impl Clone for DIR { - fn clone(&self) -> DIR { - *self - } -} - -pub type intmax_t = i64; -pub type uintmax_t = u64; - -pub type uintptr_t = usize; -pub type intptr_t = isize; -pub type ptrdiff_t = isize; -pub type size_t = crate::uintptr_t; -pub type ssize_t = intptr_t; - -pub type pid_t = c_int; -pub type in_addr_t = u32; -pub type sighandler_t = size_t; -pub type cpuset_t = u32; - -pub type blkcnt_t = c_long; -pub type blksize_t = c_long; -pub type ino_t = c_ulong; - -pub type rlim_t = c_ulong; -pub type suseconds_t = c_long; -pub type time_t = c_longlong; - -pub type errno_t = c_int; - -pub type useconds_t = c_ulong; - -pub type socklen_t = c_uint; - -pub type pthread_t = c_ulong; - -pub type clockid_t = c_int; - -//defined for the structs -pub type dev_t = c_ulong; -pub type mode_t = c_int; -pub type nlink_t = c_ulong; -pub type uid_t = c_ushort; -pub type gid_t = c_ushort; -pub type sigset_t = c_ulonglong; -pub type key_t = c_long; - -pub type nfds_t = c_uint; -pub type stat64 = crate::stat; - -pub type pthread_key_t = c_ulong; - -// From b_off_t.h -pub type off_t = c_longlong; -pub type off64_t = off_t; - -// From b_BOOL.h -pub type BOOL = c_int; - -// From vxWind.h .. -pub type _Vx_OBJ_HANDLE = c_int; -pub type _Vx_TASK_ID = crate::_Vx_OBJ_HANDLE; -pub type _Vx_MSG_Q_ID = crate::_Vx_OBJ_HANDLE; -pub type _Vx_SEM_ID_KERNEL = crate::_Vx_OBJ_HANDLE; -pub type _Vx_RTP_ID = crate::_Vx_OBJ_HANDLE; -pub type _Vx_SD_ID = crate::_Vx_OBJ_HANDLE; -pub type _Vx_CONDVAR_ID = crate::_Vx_OBJ_HANDLE; -pub type _Vx_SEM_ID = *mut crate::_Vx_semaphore; -pub type OBJ_HANDLE = crate::_Vx_OBJ_HANDLE; -pub type TASK_ID = crate::OBJ_HANDLE; -pub type MSG_Q_ID = crate::OBJ_HANDLE; -pub type SEM_ID_KERNEL = crate::OBJ_HANDLE; -pub type RTP_ID = crate::OBJ_HANDLE; -pub type SD_ID = crate::OBJ_HANDLE; -pub type CONDVAR_ID = crate::OBJ_HANDLE; -pub type STATUS = crate::OBJ_HANDLE; - -// From vxTypes.h -pub type _Vx_usr_arg_t = isize; -pub type _Vx_exit_code_t = isize; -pub type _Vx_ticks_t = c_uint; -pub type _Vx_ticks64_t = c_ulonglong; - -pub type sa_family_t = c_uchar; - -// mqueue.h -pub type mqd_t = c_int; - -#[derive(Debug)] -pub enum _Vx_semaphore {} -impl Copy for _Vx_semaphore {} -impl Clone for _Vx_semaphore { - fn clone(&self) -> _Vx_semaphore { - *self - } -} - -impl siginfo_t { - pub unsafe fn si_addr(&self) -> *mut c_void { - self.si_addr - } - - pub unsafe fn si_value(&self) -> crate::sigval { - self.si_value - } - - pub unsafe fn si_pid(&self) -> crate::pid_t { - self.si_pid - } - - pub unsafe fn si_uid(&self) -> crate::uid_t { - self.si_uid - } - - pub unsafe fn si_status(&self) -> c_int { - self.si_status - } -} - -s! { - // b_pthread_condattr_t.h - pub struct pthread_condattr_t { - pub condAttrStatus: c_int, - pub condAttrPshared: c_int, - pub condAttrClockId: crate::clockid_t, - } - - // b_pthread_cond_t.h - pub struct pthread_cond_t { - pub condSemId: crate::_Vx_SEM_ID, - pub condValid: c_int, - pub condInitted: c_int, - pub condRefCount: c_int, - pub condMutex: *mut crate::pthread_mutex_t, - pub condAttr: crate::pthread_condattr_t, - pub condSemName: [c_char; _PTHREAD_SHARED_SEM_NAME_MAX], - } - - // b_pthread_rwlockattr_t.h - pub struct pthread_rwlockattr_t { - pub rwlockAttrStatus: c_int, - pub rwlockAttrPshared: c_int, - pub rwlockAttrMaxReaders: c_uint, - pub rwlockAttrConformOpt: c_uint, - } - - // b_pthread_rwlock_t.h - pub struct pthread_rwlock_t { - pub rwlockSemId: crate::_Vx_SEM_ID, - pub rwlockReadersRefCount: c_uint, - pub rwlockValid: c_int, - pub rwlockInitted: c_int, - pub rwlockAttr: crate::pthread_rwlockattr_t, - pub rwlockSemName: [c_char; _PTHREAD_SHARED_SEM_NAME_MAX], - } - - // b_struct_timeval.h - pub struct timeval { - pub tv_sec: crate::time_t, - pub tv_usec: crate::suseconds_t, - } - - // socket.h - pub struct linger { - pub l_onoff: c_int, - pub l_linger: c_int, - } - - pub struct sockaddr { - pub sa_len: c_uchar, - pub sa_family: sa_family_t, - pub sa_data: [c_char; 14], - } - - pub struct iovec { - pub iov_base: *mut c_void, - pub iov_len: size_t, - } - - pub struct msghdr { - pub msg_name: *mut c_void, - pub msg_namelen: socklen_t, - pub msg_iov: *mut iovec, - pub msg_iovlen: c_int, - pub msg_control: *mut c_void, - pub msg_controllen: socklen_t, - pub msg_flags: c_int, - } - - pub struct cmsghdr { - pub cmsg_len: socklen_t, - pub cmsg_level: c_int, - pub cmsg_type: c_int, - } - - // poll.h - pub struct pollfd { - pub fd: c_int, - pub events: c_short, - pub revents: c_short, - } - - // resource.h - pub struct rlimit { - pub rlim_cur: crate::rlim_t, - pub rlim_max: crate::rlim_t, - } - - // stat.h - pub struct stat { - pub st_dev: crate::dev_t, - pub st_ino: crate::ino_t, - pub st_mode: mode_t, - pub st_nlink: crate::nlink_t, - pub st_uid: crate::uid_t, - pub st_gid: crate::gid_t, - pub st_rdev: crate::dev_t, - pub st_size: off_t, - pub st_atime: crate::time_t, - pub st_mtime: crate::time_t, - pub st_ctime: crate::time_t, - pub st_blksize: crate::blksize_t, - pub st_blocks: crate::blkcnt_t, - pub st_attrib: c_uchar, - pub st_reserved1: c_int, - pub st_reserved2: c_int, - pub st_reserved3: c_int, - pub st_reserved4: c_int, - } - - //b_struct__Timespec.h - pub struct _Timespec { - pub tv_sec: crate::time_t, - pub tv_nsec: c_long, - } - - // b_struct__Sched_param.h - pub struct sched_param { - pub sched_priority: c_int, /* scheduling priority */ - pub sched_ss_low_priority: c_int, /* low scheduling priority */ - pub sched_ss_repl_period: crate::_Timespec, /* replenishment period */ - pub sched_ss_init_budget: crate::_Timespec, /* initial budget */ - pub sched_ss_max_repl: c_int, /* max pending replenishment */ - } - - // b_pthread_attr_t.h - pub struct pthread_attr_t { - pub threadAttrStatus: c_int, - pub threadAttrStacksize: size_t, - pub threadAttrStackaddr: *mut c_void, - pub threadAttrGuardsize: size_t, - pub threadAttrDetachstate: c_int, - pub threadAttrContentionscope: c_int, - pub threadAttrInheritsched: c_int, - pub threadAttrSchedpolicy: c_int, - pub threadAttrName: *mut c_char, - pub threadAttrOptions: c_int, - pub threadAttrSchedparam: crate::sched_param, - } - - // signal.h - - pub struct sigaction { - pub sa_u: crate::sa_u_t, - pub sa_mask: crate::sigset_t, - pub sa_flags: c_int, - } - - // b_stack_t.h - pub struct stack_t { - pub ss_sp: *mut c_void, - pub ss_size: size_t, - pub ss_flags: c_int, - } - - // signal.h - pub struct siginfo_t { - pub si_signo: c_int, - pub si_code: c_int, - pub si_value: crate::sigval, - pub si_errno: c_int, - pub si_status: c_int, - pub si_addr: *mut c_void, - pub si_uid: crate::uid_t, - pub si_pid: crate::pid_t, - } - - // pthread.h (krnl) - // b_pthread_mutexattr_t.h (usr) - pub struct pthread_mutexattr_t { - mutexAttrStatus: c_int, - mutexAttrPshared: c_int, - mutexAttrProtocol: c_int, - mutexAttrPrioceiling: c_int, - mutexAttrType: c_int, - } - - // pthread.h (krnl) - // b_pthread_mutex_t.h (usr) - pub struct pthread_mutex_t { - pub mutexSemId: crate::_Vx_SEM_ID, /*_Vx_SEM_ID ..*/ - pub mutexValid: c_int, - pub mutexInitted: c_int, - pub mutexCondRefCount: c_int, - pub mutexSavPriority: c_int, - pub mutexAttr: crate::pthread_mutexattr_t, - pub mutexSemName: [c_char; _PTHREAD_SHARED_SEM_NAME_MAX], - } - - // b_struct_timespec.h - pub struct timespec { - pub tv_sec: crate::time_t, - pub tv_nsec: c_long, - } - - // time.h - pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - } - - // in.h - pub struct in_addr { - pub s_addr: in_addr_t, - } - - // in.h - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - // in6.h - #[repr(align(4))] - pub struct in6_addr { - pub s6_addr: [u8; 16], - } - - // in6.h - pub struct ipv6_mreq { - pub ipv6mr_multiaddr: in6_addr, - pub ipv6mr_interface: c_uint, - } - - // netdb.h - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - pub ai_addrlen: size_t, - pub ai_canonname: *mut c_char, - pub ai_addr: *mut crate::sockaddr, - pub ai_next: *mut crate::addrinfo, - } - - // in.h - pub struct sockaddr_in { - pub sin_len: u8, - pub sin_family: u8, - pub sin_port: u16, - pub sin_addr: crate::in_addr, - pub sin_zero: [c_char; 8], - } - - // in6.h - pub struct sockaddr_in6 { - pub sin6_len: u8, - pub sin6_family: u8, - pub sin6_port: u16, - pub sin6_flowinfo: u32, - pub sin6_addr: crate::in6_addr, - pub sin6_scope_id: u32, - } - - pub struct Dl_info { - pub dli_fname: *const c_char, - pub dli_fbase: *mut c_void, - pub dli_sname: *const c_char, - pub dli_saddr: *mut c_void, - } - - pub struct mq_attr { - pub mq_maxmsg: c_long, - pub mq_msgsize: c_long, - pub mq_flags: c_long, - pub mq_curmsgs: c_long, - } -} - -s_no_extra_traits! { - // dirent.h - pub struct dirent { - pub d_ino: crate::ino_t, - pub d_name: [c_char; _PARM_NAME_MAX as usize + 1], - pub d_type: c_uchar, - } - - pub struct sockaddr_un { - pub sun_len: u8, - pub sun_family: sa_family_t, - pub sun_path: [c_char; 104], - } - - // rtpLibCommon.h - pub struct RTP_DESC { - pub status: c_int, - pub options: u32, - pub entrAddr: *mut c_void, - pub initTaskId: crate::TASK_ID, - pub parentId: crate::RTP_ID, - pub pathName: [c_char; VX_RTP_NAME_LENGTH as usize + 1], - pub taskCnt: c_int, - pub textStart: *mut c_void, - pub textEnd: *mut c_void, - } - // socket.h - pub struct sockaddr_storage { - pub ss_len: c_uchar, - pub ss_family: crate::sa_family_t, - pub __ss_pad1: [c_char; _SS_PAD1SIZE], - pub __ss_align: i32, - pub __ss_pad2: [c_char; _SS_PAD2SIZE], - } - - pub union sa_u_t { - pub sa_handler: Option !>, - pub sa_sigaction: - Option !>, - } - - pub union sigval { - pub sival_int: c_int, - pub sival_ptr: *mut c_void, - } -} - -cfg_if! { - if #[cfg(feature = "extra_traits")] { - impl PartialEq for sa_u_t { - fn eq(&self, other: &sa_u_t) -> bool { - unsafe { - let h1 = match self.sa_handler { - Some(handler) => handler as usize, - None => 0 as usize, - }; - let h2 = match other.sa_handler { - Some(handler) => handler as usize, - None => 0 as usize, - }; - h1 == h2 - } - } - } - impl Eq for sa_u_t {} - impl hash::Hash for sa_u_t { - fn hash(&self, state: &mut H) { - unsafe { - let h = match self.sa_handler { - Some(handler) => handler as usize, - None => 0 as usize, - }; - h.hash(state) - } - } - } - - impl PartialEq for sigval { - fn eq(&self, other: &sigval) -> bool { - unsafe { self.sival_ptr as usize == other.sival_ptr as usize } - } - } - impl Eq for sigval {} - impl hash::Hash for sigval { - fn hash(&self, state: &mut H) { - unsafe { (self.sival_ptr as usize).hash(state) }; - } - } - } -} - -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; - -pub const EXIT_SUCCESS: c_int = 0; -pub const EXIT_FAILURE: c_int = 1; - -pub const EAI_SERVICE: c_int = 9; -pub const EAI_SOCKTYPE: c_int = 10; -pub const EAI_SYSTEM: c_int = 11; - -// FIXME(vxworks): This is not defined in vxWorks, but we have to define it here -// to make the building pass for getrandom and std -pub const RTLD_DEFAULT: *mut c_void = ptr::null_mut(); - -//Clock Lib Stuff -pub const CLOCK_REALTIME: c_int = 0x0; -pub const CLOCK_MONOTONIC: c_int = 0x1; -pub const CLOCK_PROCESS_CPUTIME_ID: c_int = 0x2; -pub const CLOCK_THREAD_CPUTIME_ID: c_int = 0x3; -pub const TIMER_ABSTIME: c_int = 0x1; -pub const TIMER_RELTIME: c_int = 0x0; - -// PTHREAD STUFF -pub const PTHREAD_INITIALIZED_OBJ: c_int = 0xF70990EF; -pub const PTHREAD_DESTROYED_OBJ: c_int = -1; -pub const PTHREAD_VALID_OBJ: c_int = 0xEC542A37; -pub const PTHREAD_INVALID_OBJ: c_int = -1; -pub const PTHREAD_UNUSED_YET_OBJ: c_int = -1; - -pub const PTHREAD_PRIO_NONE: c_int = 0; -pub const PTHREAD_PRIO_INHERIT: c_int = 1; -pub const PTHREAD_PRIO_PROTECT: c_int = 2; - -pub const PTHREAD_MUTEX_NORMAL: c_int = 0; -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 1; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 2; -pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; -pub const PTHREAD_STACK_MIN: usize = 4096; -pub const _PTHREAD_SHARED_SEM_NAME_MAX: usize = 30; - -//sched.h -pub const SCHED_FIFO: c_int = 0x01; -pub const SCHED_RR: c_int = 0x02; -pub const SCHED_OTHER: c_int = 0x04; -pub const SCHED_SPORADIC: c_int = 0x08; -pub const PRIO_PROCESS: c_uint = 0; -pub const SCHED_FIFO_HIGH_PRI: c_int = 255; -pub const SCHED_FIFO_LOW_PRI: c_int = 0; -pub const SCHED_RR_HIGH_PRI: c_int = 255; -pub const SCHED_RR_LOW_PRI: c_int = 0; -pub const SCHED_SPORADIC_HIGH_PRI: c_int = 255; -pub const SCHED_SPORADIC_LOW_PRI: c_int = 0; - -// ERRNO STUFF -pub const ERROR: c_int = -1; -pub const OK: c_int = 0; -pub const EPERM: c_int = 1; /* Not owner */ -pub const ENOENT: c_int = 2; /* No such file or directory */ -pub const ESRCH: c_int = 3; /* No such process */ -pub const EINTR: c_int = 4; /* Interrupted system call */ -pub const EIO: c_int = 5; /* I/O error */ -pub const ENXIO: c_int = 6; /* No such device or address */ -pub const E2BIG: c_int = 7; /* Arg list too long */ -pub const ENOEXEC: c_int = 8; /* Exec format error */ -pub const EBADF: c_int = 9; /* Bad file number */ -pub const ECHILD: c_int = 10; /* No children */ -pub const EAGAIN: c_int = 11; /* No more processes */ -pub const ENOMEM: c_int = 12; /* Not enough core */ -pub const EACCES: c_int = 13; /* Permission denied */ -pub const EFAULT: c_int = 14; -pub const ENOTEMPTY: c_int = 15; -pub const EBUSY: c_int = 16; -pub const EEXIST: c_int = 17; -pub const EXDEV: c_int = 18; -pub const ENODEV: c_int = 19; -pub const ENOTDIR: c_int = 20; -pub const EISDIR: c_int = 21; -pub const EINVAL: c_int = 22; -pub const ENFILE: c_int = 23; -pub const EMFILE: c_int = 24; -pub const ENOTTY: c_int = 25; -pub const ENAMETOOLONG: c_int = 26; -pub const EFBIG: c_int = 27; -pub const ENOSPC: c_int = 28; -pub const ESPIPE: c_int = 29; -pub const EROFS: c_int = 30; -pub const EMLINK: c_int = 31; -pub const EPIPE: c_int = 32; -pub const EDEADLK: c_int = 33; -pub const ENOLCK: c_int = 34; -pub const ENOTSUP: c_int = 35; -pub const EMSGSIZE: c_int = 36; -pub const EDOM: c_int = 37; -pub const ERANGE: c_int = 38; -pub const EDOOM: c_int = 39; -pub const EDESTADDRREQ: c_int = 40; -pub const EPROTOTYPE: c_int = 41; -pub const ENOPROTOOPT: c_int = 42; -pub const EPROTONOSUPPORT: c_int = 43; -pub const ESOCKTNOSUPPORT: c_int = 44; -pub const EOPNOTSUPP: c_int = 45; -pub const EPFNOSUPPORT: c_int = 46; -pub const EAFNOSUPPORT: c_int = 47; -pub const EADDRINUSE: c_int = 48; -pub const EADDRNOTAVAIL: c_int = 49; -pub const ENOTSOCK: c_int = 50; -pub const ENETUNREACH: c_int = 51; -pub const ENETRESET: c_int = 52; -pub const ECONNABORTED: c_int = 53; -pub const ECONNRESET: c_int = 54; -pub const ENOBUFS: c_int = 55; -pub const EISCONN: c_int = 56; -pub const ENOTCONN: c_int = 57; -pub const ESHUTDOWN: c_int = 58; -pub const ETOOMANYREFS: c_int = 59; -pub const ETIMEDOUT: c_int = 60; -pub const ECONNREFUSED: c_int = 61; -pub const ENETDOWN: c_int = 62; -pub const ETXTBSY: c_int = 63; -pub const ELOOP: c_int = 64; -pub const EHOSTUNREACH: c_int = 65; -pub const ENOTBLK: c_int = 66; -pub const EHOSTDOWN: c_int = 67; -pub const EINPROGRESS: c_int = 68; -pub const EALREADY: c_int = 69; -pub const EWOULDBLOCK: c_int = 70; -pub const ENOSYS: c_int = 71; -pub const ECANCELED: c_int = 72; -pub const ENOSR: c_int = 74; -pub const ENOSTR: c_int = 75; -pub const EPROTO: c_int = 76; -pub const EBADMSG: c_int = 77; -pub const ENODATA: c_int = 78; -pub const ETIME: c_int = 79; -pub const ENOMSG: c_int = 80; -pub const EFPOS: c_int = 81; -pub const EILSEQ: c_int = 82; -pub const EDQUOT: c_int = 83; -pub const EIDRM: c_int = 84; -pub const EOVERFLOW: c_int = 85; -pub const EMULTIHOP: c_int = 86; -pub const ENOLINK: c_int = 87; -pub const ESTALE: c_int = 88; -pub const EOWNERDEAD: c_int = 89; -pub const ENOTRECOVERABLE: c_int = 90; - -// NFS errnos: Refer to pkgs_v2/storage/fs/nfs/h/nfs/nfsCommon.h -const M_nfsStat: c_int = 48 << 16; -enum nfsstat { - NFSERR_REMOTE = 71, - NFSERR_WFLUSH = 99, - NFSERR_BADHANDLE = 10001, - NFSERR_NOT_SYNC = 10002, - NFSERR_BAD_COOKIE = 10003, - NFSERR_TOOSMALL = 10005, - NFSERR_BADTYPE = 10007, - NFSERR_JUKEBOX = 10008, -} - -pub const S_nfsLib_NFS_OK: c_int = OK; -pub const S_nfsLib_NFSERR_PERM: c_int = EPERM; -pub const S_nfsLib_NFSERR_NOENT: c_int = ENOENT; -pub const S_nfsLib_NFSERR_IO: c_int = EIO; -pub const S_nfsLib_NFSERR_NXIO: c_int = ENXIO; -pub const S_nfsLib_NFSERR_ACCESS: c_int = EACCES; -pub const S_nfsLib_NFSERR_EXIST: c_int = EEXIST; -pub const S_nfsLib_NFSERR_ENODEV: c_int = ENODEV; -pub const S_nfsLib_NFSERR_NOTDIR: c_int = ENOTDIR; -pub const S_nfsLib_NFSERR_ISDIR: c_int = EISDIR; -pub const S_nfsLib_NFSERR_INVAL: c_int = EINVAL; -pub const S_nfsLib_NFSERR_FBIG: c_int = EFBIG; -pub const S_nfsLib_NFSERR_NOSPC: c_int = ENOSPC; -pub const S_nfsLib_NFSERR_ROFS: c_int = EROFS; -pub const S_nfsLib_NFSERR_NAMETOOLONG: c_int = ENAMETOOLONG; -pub const S_nfsLib_NFSERR_NOTEMPTY: c_int = ENOTEMPTY; -pub const S_nfsLib_NFSERR_DQUOT: c_int = EDQUOT; -pub const S_nfsLib_NFSERR_STALE: c_int = ESTALE; -pub const S_nfsLib_NFSERR_WFLUSH: c_int = M_nfsStat | nfsstat::NFSERR_WFLUSH as c_int; -pub const S_nfsLib_NFSERR_REMOTE: c_int = M_nfsStat | nfsstat::NFSERR_REMOTE as c_int; -pub const S_nfsLib_NFSERR_BADHANDLE: c_int = M_nfsStat | nfsstat::NFSERR_BADHANDLE as c_int; -pub const S_nfsLib_NFSERR_NOT_SYNC: c_int = M_nfsStat | nfsstat::NFSERR_NOT_SYNC as c_int; -pub const S_nfsLib_NFSERR_BAD_COOKIE: c_int = M_nfsStat | nfsstat::NFSERR_BAD_COOKIE as c_int; -pub const S_nfsLib_NFSERR_NOTSUPP: c_int = EOPNOTSUPP; -pub const S_nfsLib_NFSERR_TOOSMALL: c_int = M_nfsStat | nfsstat::NFSERR_TOOSMALL as c_int; -pub const S_nfsLib_NFSERR_SERVERFAULT: c_int = EIO; -pub const S_nfsLib_NFSERR_BADTYPE: c_int = M_nfsStat | nfsstat::NFSERR_BADTYPE as c_int; -pub const S_nfsLib_NFSERR_JUKEBOX: c_int = M_nfsStat | nfsstat::NFSERR_JUKEBOX as c_int; - -// internal offset values for below constants -const taskErrorBase: c_int = 0x00030000; -const semErrorBase: c_int = 0x00160000; -const objErrorBase: c_int = 0x003d0000; - -// taskLibCommon.h -pub const S_taskLib_NAME_NOT_FOUND: c_int = taskErrorBase + 0x0065; -pub const S_taskLib_TASK_HOOK_TABLE_FULL: c_int = taskErrorBase + 0x0066; -pub const S_taskLib_TASK_HOOK_NOT_FOUND: c_int = taskErrorBase + 0x0067; -pub const S_taskLib_ILLEGAL_PRIORITY: c_int = taskErrorBase + 0x0068; - -// FIXME(vxworks): could also be useful for TASK_DESC type -pub const VX_TASK_NAME_LENGTH: c_int = 31; -pub const VX_TASK_RENAME_LENGTH: c_int = 16; - -// semLibCommon.h -pub const S_semLib_INVALID_STATE: c_int = semErrorBase + 0x0065; -pub const S_semLib_INVALID_OPTION: c_int = semErrorBase + 0x0066; -pub const S_semLib_INVALID_QUEUE_TYPE: c_int = semErrorBase + 0x0067; -pub const S_semLib_INVALID_OPERATION: c_int = semErrorBase + 0x0068; - -// objLibCommon.h -pub const S_objLib_OBJ_ID_ERROR: c_int = objErrorBase + 0x0001; -pub const S_objLib_OBJ_UNAVAILABLE: c_int = objErrorBase + 0x0002; -pub const S_objLib_OBJ_DELETED: c_int = objErrorBase + 0x0003; -pub const S_objLib_OBJ_TIMEOUT: c_int = objErrorBase + 0x0004; -pub const S_objLib_OBJ_NO_METHOD: c_int = objErrorBase + 0x0005; - -// in.h -pub const IPPROTO_IP: c_int = 0; -pub const IPPROTO_IPV6: c_int = 41; - -pub const IP_TTL: c_int = 4; -pub const IP_MULTICAST_IF: c_int = 9; -pub const IP_MULTICAST_TTL: c_int = 10; -pub const IP_MULTICAST_LOOP: c_int = 11; -pub const IP_ADD_MEMBERSHIP: c_int = 12; -pub const IP_DROP_MEMBERSHIP: c_int = 13; - -// in6.h -pub const IPV6_V6ONLY: c_int = 1; -pub const IPV6_UNICAST_HOPS: c_int = 4; -pub const IPV6_MULTICAST_IF: c_int = 9; -pub const IPV6_MULTICAST_HOPS: c_int = 10; -pub const IPV6_MULTICAST_LOOP: c_int = 11; -pub const IPV6_ADD_MEMBERSHIP: c_int = 12; -pub const IPV6_DROP_MEMBERSHIP: c_int = 13; - -// STAT Stuff -pub const S_IFMT: c_int = 0o17_0000; -pub const S_IFIFO: c_int = 0o1_0000; -pub const S_IFCHR: c_int = 0o2_0000; -pub const S_IFDIR: c_int = 0o4_0000; -pub const S_IFBLK: c_int = 0o6_0000; -pub const S_IFREG: c_int = 0o10_0000; -pub const S_IFLNK: c_int = 0o12_0000; -pub const S_IFSHM: c_int = 0o13_0000; -pub const S_IFSOCK: c_int = 0o14_0000; -pub const S_ISUID: c_int = 0o4000; -pub const S_ISGID: c_int = 0o2000; -pub const S_ISTXT: c_int = 0o1000; -pub const S_ISVTX: c_int = 0o1000; -pub const S_IRUSR: c_int = 0o0400; -pub const S_IWUSR: c_int = 0o0200; -pub const S_IXUSR: c_int = 0o0100; -pub const S_IRWXU: c_int = 0o0700; -pub const S_IRGRP: c_int = 0o0040; -pub const S_IWGRP: c_int = 0o0020; -pub const S_IXGRP: c_int = 0o0010; -pub const S_IRWXG: c_int = 0o0070; -pub const S_IROTH: c_int = 0o0004; -pub const S_IWOTH: c_int = 0o0002; -pub const S_IXOTH: c_int = 0o0001; -pub const S_IRWXO: c_int = 0o0007; - -pub const UTIME_OMIT: c_long = 0x3ffffffe; -pub const UTIME_NOW: c_long = 0x3fffffff; - -// socket.h -pub const SOL_SOCKET: c_int = 0xffff; -pub const SOMAXCONN: c_int = 128; - -pub const SO_DEBUG: c_int = 0x0001; -pub const SO_REUSEADDR: c_int = 0x0004; -pub const SO_KEEPALIVE: c_int = 0x0008; -pub const SO_DONTROUTE: c_int = 0x0010; -pub const SO_RCVLOWAT: c_int = 0x0012; -pub const SO_SNDLOWAT: c_int = 0x0013; -pub const SO_SNDTIMEO: c_int = 0x1005; -pub const SO_ACCEPTCONN: c_int = 0x001e; -pub const SO_BROADCAST: c_int = 0x0020; -pub const SO_USELOOPBACK: c_int = 0x0040; -pub const SO_LINGER: c_int = 0x0080; -pub const SO_REUSEPORT: c_int = 0x0200; - -pub const SO_VLAN: c_int = 0x8000; - -pub const SO_SNDBUF: c_int = 0x1001; -pub const SO_RCVBUF: c_int = 0x1002; -pub const SO_RCVTIMEO: c_int = 0x1006; -pub const SO_ERROR: c_int = 0x1007; -pub const SO_TYPE: c_int = 0x1008; -pub const SO_BINDTODEVICE: c_int = 0x1010; -pub const SO_OOBINLINE: c_int = 0x1011; -pub const SO_CONNTIMEO: c_int = 0x100a; - -pub const SOCK_STREAM: c_int = 1; -pub const SOCK_DGRAM: c_int = 2; -pub const SOCK_RAW: c_int = 3; -pub const SOCK_RDM: c_int = 4; -pub const SOCK_SEQPACKET: c_int = 5; -pub const SOCK_PACKET: c_int = 10; - -pub const _SS_MAXSIZE: usize = 128; -pub const _SS_ALIGNSIZE: usize = size_of::(); -pub const _SS_PAD1SIZE: usize = - _SS_ALIGNSIZE - size_of::() - size_of::(); -pub const _SS_PAD2SIZE: usize = _SS_MAXSIZE - - size_of::() - - size_of::() - - _SS_PAD1SIZE - - _SS_ALIGNSIZE; - -pub const MSG_OOB: c_int = 0x0001; -pub const MSG_PEEK: c_int = 0x0002; -pub const MSG_DONTROUTE: c_int = 0x0004; -pub const MSG_EOR: c_int = 0x0008; -pub const MSG_TRUNC: c_int = 0x0010; -pub const MSG_CTRUNC: c_int = 0x0020; -pub const MSG_WAITALL: c_int = 0x0040; -pub const MSG_DONTWAIT: c_int = 0x0080; -pub const MSG_EOF: c_int = 0x0100; -pub const MSG_EXP: c_int = 0x0200; -pub const MSG_MBUF: c_int = 0x0400; -pub const MSG_NOTIFICATION: c_int = 0x0800; -pub const MSG_COMPAT: c_int = 0x8000; - -pub const AF_UNSPEC: c_int = 0; -pub const AF_LOCAL: c_int = 1; -pub const AF_UNIX: c_int = AF_LOCAL; -pub const AF_INET: c_int = 2; -pub const AF_NETLINK: c_int = 16; -pub const AF_ROUTE: c_int = 17; -pub const AF_LINK: c_int = 18; -pub const AF_PACKET: c_int = 19; -pub const pseudo_AF_KEY: c_int = 27; -pub const AF_KEY: c_int = pseudo_AF_KEY; -pub const AF_INET6: c_int = 28; -pub const AF_SOCKDEV: c_int = 31; -pub const AF_TIPC: c_int = 33; -pub const AF_MIPC: c_int = 34; -pub const AF_MIPC_SAFE: c_int = 35; -pub const AF_MAX: c_int = 37; - -pub const SHUT_RD: c_int = 0; -pub const SHUT_WR: c_int = 1; -pub const SHUT_RDWR: c_int = 2; - -pub const IPPROTO_TCP: c_int = 6; -pub const TCP_NODELAY: c_int = 1; -pub const TCP_MAXSEG: c_int = 2; -pub const TCP_NOPUSH: c_int = 3; -pub const TCP_KEEPIDLE: c_int = 4; -pub const TCP_KEEPINTVL: c_int = 5; -pub const TCP_KEEPCNT: c_int = 6; - -// ioLib.h -pub const FIONREAD: c_int = 0x40040001; -pub const FIOFLUSH: c_int = 2; -pub const FIOOPTIONS: c_int = 3; -pub const FIOBAUDRATE: c_int = 4; -pub const FIODISKFORMAT: c_int = 5; -pub const FIODISKINIT: c_int = 6; -pub const FIOSEEK: c_int = 7; -pub const FIOWHERE: c_int = 8; -pub const FIODIRENTRY: c_int = 9; -pub const FIORENAME: c_int = 10; -pub const FIOREADYCHANGE: c_int = 11; -pub const FIODISKCHANGE: c_int = 13; -pub const FIOCANCEL: c_int = 14; -pub const FIOSQUEEZE: c_int = 15; -pub const FIOGETNAME: c_int = 18; -pub const FIONBIO: c_int = 0x90040010; - -// limits.h -pub const PATH_MAX: c_int = _PARM_PATH_MAX; -pub const _POSIX_PATH_MAX: c_int = 256; - -// Some poll stuff -pub const POLLIN: c_short = 0x0001; -pub const POLLPRI: c_short = 0x0002; -pub const POLLOUT: c_short = 0x0004; -pub const POLLRDNORM: c_short = 0x0040; -pub const POLLWRNORM: c_short = POLLOUT; -pub const POLLRDBAND: c_short = 0x0080; -pub const POLLWRBAND: c_short = 0x0100; -pub const POLLERR: c_short = 0x0008; -pub const POLLHUP: c_short = 0x0010; -pub const POLLNVAL: c_short = 0x0020; - -// fnctlcom.h -pub const FD_CLOEXEC: c_int = 1; -pub const F_DUPFD: c_int = 0; -pub const F_GETFD: c_int = 1; -pub const F_SETFD: c_int = 2; -pub const F_GETFL: c_int = 3; -pub const F_SETFL: c_int = 4; -pub const F_GETOWN: c_int = 5; -pub const F_SETOWN: c_int = 6; -pub const F_GETLK: c_int = 7; -pub const F_SETLK: c_int = 8; -pub const F_SETLKW: c_int = 9; -pub const F_DUPFD_CLOEXEC: c_int = 14; - -// signal.h -pub const SIG_DFL: sighandler_t = 0 as sighandler_t; -pub const SIG_IGN: sighandler_t = 1 as sighandler_t; -pub const SIG_ERR: sighandler_t = -1 as isize as sighandler_t; - -pub const SIGHUP: c_int = 1; -pub const SIGINT: c_int = 2; -pub const SIGQUIT: c_int = 3; -pub const SIGILL: c_int = 4; -pub const SIGTRAP: c_int = 5; -pub const SIGABRT: c_int = 6; -pub const SIGEMT: c_int = 7; -pub const SIGFPE: c_int = 8; -pub const SIGKILL: c_int = 9; -pub const SIGBUS: c_int = 10; -pub const SIGSEGV: c_int = 11; -pub const SIGFMT: c_int = 12; -pub const SIGPIPE: c_int = 13; -pub const SIGALRM: c_int = 14; -pub const SIGTERM: c_int = 15; -pub const SIGCNCL: c_int = 16; -pub const SIGSTOP: c_int = 17; -pub const SIGTSTP: c_int = 18; -pub const SIGCONT: c_int = 19; -pub const SIGCHLD: c_int = 20; -pub const SIGTTIN: c_int = 21; -pub const SIGTTOU: c_int = 22; -pub const SIGUSR1: c_int = 30; -pub const SIGUSR2: c_int = 31; -pub const SIGPOLL: c_int = 32; -pub const SIGPROF: c_int = 33; -pub const SIGSYS: c_int = 34; -pub const SIGURG: c_int = 35; -pub const SIGVTALRM: c_int = 36; -pub const SIGXCPU: c_int = 37; -pub const SIGXFSZ: c_int = 38; -pub const SIGRTMIN: c_int = 48; - -pub const SIGIO: c_int = SIGRTMIN; -pub const SIGWINCH: c_int = SIGRTMIN + 5; -pub const SIGLOST: c_int = SIGRTMIN + 6; - -pub const SIG_BLOCK: c_int = 1; -pub const SIG_UNBLOCK: c_int = 2; -pub const SIG_SETMASK: c_int = 3; - -pub const SA_NOCLDSTOP: c_int = 0x0001; -pub const SA_SIGINFO: c_int = 0x0002; -pub const SA_ONSTACK: c_int = 0x0004; -pub const SA_INTERRUPT: c_int = 0x0008; -pub const SA_RESETHAND: c_int = 0x0010; -pub const SA_RESTART: c_int = 0x0020; -pub const SA_NODEFER: c_int = 0x0040; -pub const SA_NOCLDWAIT: c_int = 0x0080; - -pub const SI_SYNC: c_int = 0; -pub const SI_USER: c_int = -1; -pub const SI_QUEUE: c_int = -2; -pub const SI_TIMER: c_int = -3; -pub const SI_ASYNCIO: c_int = -4; -pub const SI_MESGQ: c_int = -5; -pub const SI_CHILD: c_int = -6; -pub const SI_KILL: c_int = SI_USER; - -// vxParams.h definitions -pub const _PARM_NAME_MAX: c_int = 255; -pub const _PARM_PATH_MAX: c_int = 1024; - -// WAIT STUFF -pub const WNOHANG: c_int = 0x01; -pub const WUNTRACED: c_int = 0x02; - -const PTHREAD_MUTEXATTR_INITIALIZER: pthread_mutexattr_t = pthread_mutexattr_t { - mutexAttrStatus: PTHREAD_INITIALIZED_OBJ, - mutexAttrProtocol: PTHREAD_PRIO_NONE, - mutexAttrPrioceiling: 0, - mutexAttrType: PTHREAD_MUTEX_DEFAULT, - mutexAttrPshared: 1, -}; -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - mutexSemId: null_mut(), - mutexValid: PTHREAD_VALID_OBJ, - mutexInitted: PTHREAD_UNUSED_YET_OBJ, - mutexCondRefCount: 0, - mutexSavPriority: -1, - mutexAttr: PTHREAD_MUTEXATTR_INITIALIZER, - mutexSemName: [0; _PTHREAD_SHARED_SEM_NAME_MAX], -}; - -const PTHREAD_CONDATTR_INITIALIZER: pthread_condattr_t = pthread_condattr_t { - condAttrStatus: 0xf70990ef, - condAttrPshared: 1, - condAttrClockId: CLOCK_REALTIME, -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - condSemId: null_mut(), - condValid: PTHREAD_VALID_OBJ, - condInitted: PTHREAD_UNUSED_YET_OBJ, - condRefCount: 0, - condMutex: null_mut(), - condAttr: PTHREAD_CONDATTR_INITIALIZER, - condSemName: [0; _PTHREAD_SHARED_SEM_NAME_MAX], -}; - -const PTHREAD_RWLOCKATTR_INITIALIZER: pthread_rwlockattr_t = pthread_rwlockattr_t { - rwlockAttrStatus: PTHREAD_INITIALIZED_OBJ, - rwlockAttrPshared: 1, - rwlockAttrMaxReaders: 0, - rwlockAttrConformOpt: 1, -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - rwlockSemId: null_mut(), - rwlockReadersRefCount: 0, - rwlockValid: PTHREAD_VALID_OBJ, - rwlockInitted: PTHREAD_UNUSED_YET_OBJ, - rwlockAttr: PTHREAD_RWLOCKATTR_INITIALIZER, - rwlockSemName: [0; _PTHREAD_SHARED_SEM_NAME_MAX], -}; - -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; - -// rtpLibCommon.h -pub const VX_RTP_NAME_LENGTH: c_int = 255; -pub const RTP_ID_ERROR: crate::RTP_ID = -1; - -// h/public/unistd.h -pub const _SC_GETPW_R_SIZE_MAX: c_int = 21; // Via unistd.h -pub const _SC_PAGESIZE: c_int = 39; -pub const O_ACCMODE: c_int = 3; -pub const O_CLOEXEC: c_int = 0x100000; // fcntlcom -pub const O_EXCL: c_int = 0x0800; -pub const O_CREAT: c_int = 0x0200; -pub const O_TRUNC: c_int = 0x0400; -pub const O_APPEND: c_int = 0x0008; -pub const O_RDWR: c_int = 0x0002; -pub const O_WRONLY: c_int = 0x0001; -pub const O_RDONLY: c_int = 0; -pub const O_NONBLOCK: c_int = 0x4000; - -// mman.h -pub const PROT_NONE: c_int = 0x0000; -pub const PROT_READ: c_int = 0x0001; -pub const PROT_WRITE: c_int = 0x0002; -pub const PROT_EXEC: c_int = 0x0004; - -pub const MAP_SHARED: c_int = 0x0001; -pub const MAP_PRIVATE: c_int = 0x0002; -pub const MAP_ANON: c_int = 0x0004; -pub const MAP_ANONYMOUS: c_int = MAP_ANON; -pub const MAP_FIXED: c_int = 0x0010; -pub const MAP_CONTIG: c_int = 0x0020; - -pub const MAP_FAILED: *mut c_void = !0 as *mut c_void; - -#[derive(Debug)] -pub enum FILE {} -impl Copy for FILE {} -impl Clone for FILE { - fn clone(&self) -> FILE { - *self - } -} -#[derive(Debug)] -pub enum fpos_t {} // FIXME(vxworks): fill this out with a struct -impl Copy for fpos_t {} -impl Clone for fpos_t { - fn clone(&self) -> fpos_t { - *self - } -} - -f! { - pub const fn CMSG_ALIGN(len: usize) -> usize { - len + size_of::() - 1 & !(size_of::() - 1) - } - - pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { - let next = cmsg as usize - + CMSG_ALIGN((*cmsg).cmsg_len as usize) - + CMSG_ALIGN(size_of::()); - let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; - if next <= max { - (cmsg as usize + CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr - } else { - core::ptr::null_mut::() - } - } - - pub fn CMSG_FIRSTHDR(mhdr: *const msghdr) -> *mut cmsghdr { - if (*mhdr).msg_controllen as usize > 0 { - (*mhdr).msg_control as *mut cmsghdr - } else { - core::ptr::null_mut::() - } - } - - pub fn CMSG_DATA(cmsg: *const cmsghdr) -> *mut c_uchar { - (cmsg as *mut c_uchar).offset(CMSG_ALIGN(size_of::()) as isize) - } - - pub const fn CMSG_SPACE(length: c_uint) -> c_uint { - (CMSG_ALIGN(length as usize) + CMSG_ALIGN(size_of::())) as c_uint - } - - pub const fn CMSG_LEN(length: c_uint) -> c_uint { - CMSG_ALIGN(size_of::()) as c_uint + length - } -} - -extern "C" { - pub fn isalnum(c: c_int) -> c_int; - pub fn isalpha(c: c_int) -> c_int; - pub fn iscntrl(c: c_int) -> c_int; - pub fn isdigit(c: c_int) -> c_int; - pub fn isgraph(c: c_int) -> c_int; - pub fn islower(c: c_int) -> c_int; - pub fn isprint(c: c_int) -> c_int; - pub fn ispunct(c: c_int) -> c_int; - pub fn isspace(c: c_int) -> c_int; - pub fn isupper(c: c_int) -> c_int; - pub fn isxdigit(c: c_int) -> c_int; - pub fn isblank(c: c_int) -> c_int; - pub fn tolower(c: c_int) -> c_int; - pub fn toupper(c: c_int) -> c_int; - pub fn fopen(filename: *const c_char, mode: *const c_char) -> *mut FILE; - pub fn freopen(filename: *const c_char, mode: *const c_char, file: *mut FILE) -> *mut FILE; - pub fn fflush(file: *mut FILE) -> c_int; - pub fn fclose(file: *mut FILE) -> c_int; - pub fn remove(filename: *const c_char) -> c_int; - pub fn rename(oldname: *const c_char, newname: *const c_char) -> c_int; - pub fn tmpfile() -> *mut FILE; - pub fn setvbuf(stream: *mut FILE, buffer: *mut c_char, mode: c_int, size: size_t) -> c_int; - pub fn setbuf(stream: *mut FILE, buf: *mut c_char); - pub fn getchar() -> c_int; - pub fn putchar(c: c_int) -> c_int; - pub fn fgetc(stream: *mut FILE) -> c_int; - pub fn fgets(buf: *mut c_char, n: c_int, stream: *mut FILE) -> *mut c_char; - pub fn fputc(c: c_int, stream: *mut FILE) -> c_int; - pub fn fputs(s: *const c_char, stream: *mut FILE) -> c_int; - pub fn puts(s: *const c_char) -> c_int; - pub fn ungetc(c: c_int, stream: *mut FILE) -> c_int; - pub fn fread(ptr: *mut c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; - pub fn fwrite(ptr: *const c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; - pub fn fseek(stream: *mut FILE, offset: c_long, whence: c_int) -> c_int; - pub fn ftell(stream: *mut FILE) -> c_long; - pub fn rewind(stream: *mut FILE); - pub fn fgetpos(stream: *mut FILE, ptr: *mut fpos_t) -> c_int; - pub fn fsetpos(stream: *mut FILE, ptr: *const fpos_t) -> c_int; - pub fn feof(stream: *mut FILE) -> c_int; - pub fn ferror(stream: *mut FILE) -> c_int; - pub fn perror(s: *const c_char); - pub fn atof(s: *const c_char) -> c_double; - pub fn atoi(s: *const c_char) -> c_int; - pub fn atol(s: *const c_char) -> c_long; - pub fn atoll(s: *const c_char) -> c_longlong; - pub fn strtod(s: *const c_char, endp: *mut *mut c_char) -> c_double; - pub fn strtof(s: *const c_char, endp: *mut *mut c_char) -> c_float; - pub fn strtol(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_long; - pub fn strtoll(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_longlong; - pub fn strtoul(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulong; - pub fn strtoull(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulonglong; - pub fn calloc(nobj: size_t, size: size_t) -> *mut c_void; - pub fn malloc(size: size_t) -> *mut c_void; - pub fn realloc(p: *mut c_void, size: size_t) -> *mut c_void; - pub fn free(p: *mut c_void); - pub fn abort() -> !; - pub fn exit(status: c_int) -> !; - pub fn atexit(cb: extern "C" fn()) -> c_int; - pub fn system(s: *const c_char) -> c_int; - pub fn getenv(s: *const c_char) -> *mut c_char; - - pub fn strcpy(dst: *mut c_char, src: *const c_char) -> *mut c_char; - pub fn strncpy(dst: *mut c_char, src: *const c_char, n: size_t) -> *mut c_char; - pub fn strcat(s: *mut c_char, ct: *const c_char) -> *mut c_char; - pub fn strncat(s: *mut c_char, ct: *const c_char, n: size_t) -> *mut c_char; - pub fn strcmp(cs: *const c_char, ct: *const c_char) -> c_int; - pub fn strncmp(cs: *const c_char, ct: *const c_char, n: size_t) -> c_int; - pub fn strcoll(cs: *const c_char, ct: *const c_char) -> c_int; - pub fn strchr(cs: *const c_char, c: c_int) -> *mut c_char; - pub fn strrchr(cs: *const c_char, c: c_int) -> *mut c_char; - pub fn strspn(cs: *const c_char, ct: *const c_char) -> size_t; - pub fn strcspn(cs: *const c_char, ct: *const c_char) -> size_t; - pub fn strdup(cs: *const c_char) -> *mut c_char; - pub fn strpbrk(cs: *const c_char, ct: *const c_char) -> *mut c_char; - pub fn strstr(cs: *const c_char, ct: *const c_char) -> *mut c_char; - pub fn strcasecmp(s1: *const c_char, s2: *const c_char) -> c_int; - pub fn strncasecmp(s1: *const c_char, s2: *const c_char, n: size_t) -> c_int; - pub fn strlen(cs: *const c_char) -> size_t; - pub fn strerror(n: c_int) -> *mut c_char; - pub fn strtok(s: *mut c_char, t: *const c_char) -> *mut c_char; - pub fn strxfrm(s: *mut c_char, ct: *const c_char, n: size_t) -> size_t; - pub fn wcslen(buf: *const wchar_t) -> size_t; - pub fn wcstombs(dest: *mut c_char, src: *const wchar_t, n: size_t) -> size_t; - - pub fn memchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; - pub fn wmemchr(cx: *const wchar_t, c: wchar_t, n: size_t) -> *mut wchar_t; - pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; - pub fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; - pub fn memmove(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; - pub fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void; -} - -extern "C" { - pub fn fprintf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; - pub fn printf(format: *const c_char, ...) -> c_int; - pub fn snprintf(s: *mut c_char, n: size_t, format: *const c_char, ...) -> c_int; - pub fn sprintf(s: *mut c_char, format: *const c_char, ...) -> c_int; - pub fn fscanf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; - pub fn scanf(format: *const c_char, ...) -> c_int; - pub fn sscanf(s: *const c_char, format: *const c_char, ...) -> c_int; - pub fn getchar_unlocked() -> c_int; - pub fn putchar_unlocked(c: c_int) -> c_int; - pub fn stat(path: *const c_char, buf: *mut stat) -> c_int; - pub fn fdopen(fd: c_int, mode: *const c_char) -> *mut crate::FILE; - pub fn fileno(stream: *mut crate::FILE) -> c_int; - pub fn creat(path: *const c_char, mode: mode_t) -> c_int; - pub fn rewinddir(dirp: *mut crate::DIR); - pub fn fchown(fd: c_int, owner: crate::uid_t, group: crate::gid_t) -> c_int; - pub fn access(path: *const c_char, amode: c_int) -> c_int; - pub fn alarm(seconds: c_uint) -> c_uint; - pub fn fchdir(dirfd: c_int) -> c_int; - pub fn chown(path: *const c_char, uid: uid_t, gid: gid_t) -> c_int; - pub fn fpathconf(filedes: c_int, name: c_int) -> c_long; - pub fn getegid() -> gid_t; - pub fn geteuid() -> uid_t; - pub fn getgroups(ngroups_max: c_int, groups: *mut gid_t) -> c_int; - pub fn getlogin() -> *mut c_char; - pub fn getopt(argc: c_int, argv: *const *mut c_char, optstr: *const c_char) -> c_int; - pub fn pathconf(path: *const c_char, name: c_int) -> c_long; - pub fn pause() -> c_int; - pub fn seteuid(uid: uid_t) -> c_int; - pub fn setegid(gid: gid_t) -> c_int; - pub fn sleep(secs: c_uint) -> c_uint; - pub fn ttyname(fd: c_int) -> *mut c_char; - pub fn wait(status: *mut c_int) -> pid_t; - pub fn umask(mask: mode_t) -> mode_t; - pub fn mlock(addr: *const c_void, len: size_t) -> c_int; - pub fn mlockall(flags: c_int) -> c_int; - pub fn munlock(addr: *const c_void, len: size_t) -> c_int; - pub fn munlockall() -> c_int; - - pub fn mmap( - addr: *mut c_void, - len: size_t, - prot: c_int, - flags: c_int, - fd: c_int, - offset: off_t, - ) -> *mut c_void; - pub fn munmap(addr: *mut c_void, len: size_t) -> c_int; - - pub fn mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int; - pub fn msync(addr: *mut c_void, len: size_t, flags: c_int) -> c_int; - - pub fn truncate(path: *const c_char, length: off_t) -> c_int; - pub fn shm_open(name: *const c_char, oflag: c_int, mode: mode_t) -> c_int; - pub fn shm_unlink(name: *const c_char) -> c_int; - - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; - pub fn pthread_exit(value: *mut c_void) -> !; - pub fn pthread_attr_setdetachstate(attr: *mut crate::pthread_attr_t, state: c_int) -> c_int; - - pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - - pub fn sigaddset(set: *mut sigset_t, signum: c_int) -> c_int; - - pub fn sigaction(signum: c_int, act: *const sigaction, oldact: *mut sigaction) -> c_int; - - pub fn utimes(filename: *const c_char, times: *const crate::timeval) -> c_int; - - pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; - - #[link_name = "_rtld_dlopen"] - pub fn dlopen(filename: *const c_char, flag: c_int) -> *mut c_void; - - #[link_name = "_rtld_dlerror"] - pub fn dlerror() -> *mut c_char; - - #[link_name = "_rtld_dlsym"] - pub fn dlsym(handle: *mut c_void, symbol: *const c_char) -> *mut c_void; - - #[link_name = "_rtld_dlclose"] - pub fn dlclose(handle: *mut c_void) -> c_int; - - #[link_name = "_rtld_dladdr"] - pub fn dladdr(addr: *mut c_void, info: *mut Dl_info) -> c_int; - - // time.h - pub fn gmtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; - pub fn localtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; - pub fn mktime(tm: *mut tm) -> time_t; - pub fn time(time: *mut time_t) -> time_t; - pub fn gmtime(time_p: *const time_t) -> *mut tm; - pub fn localtime(time_p: *const time_t) -> *mut tm; - pub fn timegm(tm: *mut tm) -> time_t; - pub fn difftime(time1: time_t, time0: time_t) -> c_double; - pub fn gethostname(name: *mut c_char, len: size_t) -> c_int; - pub fn usleep(secs: crate::useconds_t) -> c_int; - pub fn putenv(string: *mut c_char) -> c_int; - pub fn setlocale(category: c_int, locale: *const c_char) -> *mut c_char; - - pub fn sigprocmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int; - pub fn sigpending(set: *mut sigset_t) -> c_int; - - pub fn mkfifo(path: *const c_char, mode: mode_t) -> c_int; - - pub fn fseeko(stream: *mut crate::FILE, offset: off_t, whence: c_int) -> c_int; - pub fn ftello(stream: *mut crate::FILE) -> off_t; - pub fn mkstemp(template: *mut c_char) -> c_int; - - pub fn tmpnam(ptr: *mut c_char) -> *mut c_char; - - pub fn openlog(ident: *const c_char, logopt: c_int, facility: c_int); - pub fn closelog(); - pub fn setlogmask(maskpri: c_int) -> c_int; - pub fn syslog(priority: c_int, message: *const c_char, ...); - pub fn getline(lineptr: *mut *mut c_char, n: *mut size_t, stream: *mut FILE) -> ssize_t; - -} - -extern "C" { - // stdlib.h - pub fn memalign(block_size: size_t, size_arg: size_t) -> *mut c_void; - - // ioLib.h - pub fn getcwd(buf: *mut c_char, size: size_t) -> *mut c_char; - - // ioLib.h - pub fn chdir(attr: *const c_char) -> c_int; - - // pthread.h - pub fn pthread_mutexattr_init(attr: *mut pthread_mutexattr_t) -> c_int; - - // pthread.h - pub fn pthread_mutexattr_destroy(attr: *mut pthread_mutexattr_t) -> c_int; - - // pthread.h - pub fn pthread_mutexattr_settype(pAttr: *mut crate::pthread_mutexattr_t, pType: c_int) - -> c_int; - - // pthread.h - pub fn pthread_mutex_init( - mutex: *mut pthread_mutex_t, - attr: *const pthread_mutexattr_t, - ) -> c_int; - - // pthread.h - pub fn pthread_mutex_destroy(mutex: *mut pthread_mutex_t) -> c_int; - - // pthread.h - pub fn pthread_mutex_lock(mutex: *mut pthread_mutex_t) -> c_int; - - // pthread.h - pub fn pthread_mutex_trylock(mutex: *mut pthread_mutex_t) -> c_int; - - // pthread.h - pub fn pthread_mutex_timedlock(attr: *mut pthread_mutex_t, spec: *const timespec) -> c_int; - - // pthread.h - pub fn pthread_mutex_unlock(mutex: *mut pthread_mutex_t) -> c_int; - - // pthread.h - pub fn pthread_attr_setname(pAttr: *mut crate::pthread_attr_t, name: *mut c_char) -> c_int; - - // pthread.h - pub fn pthread_attr_setstacksize(attr: *mut crate::pthread_attr_t, stacksize: size_t) -> c_int; - - // pthread.h - pub fn pthread_attr_getstacksize( - attr: *const crate::pthread_attr_t, - size: *mut size_t, - ) -> c_int; - - // pthread.h - pub fn pthread_attr_init(attr: *mut crate::pthread_attr_t) -> c_int; - - // pthread.h - pub fn pthread_create( - pThread: *mut crate::pthread_t, - pAttr: *const crate::pthread_attr_t, - start_routine: extern "C" fn(*mut c_void) -> *mut c_void, - value: *mut c_void, - ) -> c_int; - - //pthread.h - pub fn pthread_setschedparam( - native: crate::pthread_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - - //pthread.h - pub fn pthread_getschedparam( - native: crate::pthread_t, - policy: *mut c_int, - param: *mut crate::sched_param, - ) -> c_int; - - //pthread.h - pub fn pthread_attr_setinheritsched( - attr: *mut crate::pthread_attr_t, - inheritsched: c_int, - ) -> c_int; - - //pthread.h - pub fn pthread_attr_setschedpolicy(attr: *mut crate::pthread_attr_t, policy: c_int) -> c_int; - - // pthread.h - pub fn pthread_attr_destroy(thread: *mut crate::pthread_attr_t) -> c_int; - - // pthread.h - pub fn pthread_detach(thread: crate::pthread_t) -> c_int; - - // int pthread_atfork (void (*)(void), void (*)(void), void (*)(void)); - pub fn pthread_atfork( - prepare: Option, - parent: Option, - child: Option, - ) -> c_int; - - // stat.h - pub fn fstat(fildes: c_int, buf: *mut stat) -> c_int; - - // stat.h - pub fn lstat(path: *const c_char, buf: *mut stat) -> c_int; - - // unistd.h - pub fn ftruncate(fd: c_int, length: off_t) -> c_int; - - // dirent.h - pub fn readdir_r( - pDir: *mut crate::DIR, - entry: *mut crate::dirent, - result: *mut *mut crate::dirent, - ) -> c_int; - - // dirent.h - pub fn readdir(pDir: *mut crate::DIR) -> *mut crate::dirent; - - // fcntl.h or - // ioLib.h - pub fn open(path: *const c_char, oflag: c_int, ...) -> c_int; - - // poll.h - pub fn poll(fds: *mut pollfd, nfds: nfds_t, timeout: c_int) -> c_int; - - // pthread.h - pub fn pthread_condattr_init(attr: *mut crate::pthread_condattr_t) -> c_int; - - // pthread.h - pub fn pthread_condattr_destroy(attr: *mut crate::pthread_condattr_t) -> c_int; - - // pthread.h - pub fn pthread_condattr_getclock( - pAttr: *const crate::pthread_condattr_t, - pClockId: *mut crate::clockid_t, - ) -> c_int; - - // pthread.h - pub fn pthread_condattr_setclock( - pAttr: *mut crate::pthread_condattr_t, - clockId: crate::clockid_t, - ) -> c_int; - - // pthread.h - pub fn pthread_cond_init( - cond: *mut crate::pthread_cond_t, - attr: *const crate::pthread_condattr_t, - ) -> c_int; - - // pthread.h - pub fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> c_int; - - // pthread.h - pub fn pthread_cond_signal(cond: *mut crate::pthread_cond_t) -> c_int; - - // pthread.h - pub fn pthread_cond_broadcast(cond: *mut crate::pthread_cond_t) -> c_int; - - // pthread.h - pub fn pthread_cond_wait( - cond: *mut crate::pthread_cond_t, - mutex: *mut crate::pthread_mutex_t, - ) -> c_int; - - // pthread.h - pub fn pthread_rwlockattr_init(attr: *mut crate::pthread_rwlockattr_t) -> c_int; - - // pthread.h - pub fn pthread_rwlockattr_destroy(attr: *mut crate::pthread_rwlockattr_t) -> c_int; - - // pthread.h - pub fn pthread_rwlockattr_setmaxreaders( - attr: *mut crate::pthread_rwlockattr_t, - attr2: c_uint, - ) -> c_int; - - // pthread.h - pub fn pthread_rwlock_init( - attr: *mut crate::pthread_rwlock_t, - host: *const crate::pthread_rwlockattr_t, - ) -> c_int; - - // pthread.h - pub fn pthread_rwlock_destroy(attr: *mut crate::pthread_rwlock_t) -> c_int; - - // pthread.h - pub fn pthread_rwlock_rdlock(attr: *mut crate::pthread_rwlock_t) -> c_int; - - // pthread.h - pub fn pthread_rwlock_tryrdlock(attr: *mut crate::pthread_rwlock_t) -> c_int; - - // pthread.h - pub fn pthread_rwlock_timedrdlock( - attr: *mut crate::pthread_rwlock_t, - host: *const crate::timespec, - ) -> c_int; - - // pthread.h - pub fn pthread_rwlock_wrlock(attr: *mut crate::pthread_rwlock_t) -> c_int; - - // pthread.h - pub fn pthread_rwlock_trywrlock(attr: *mut crate::pthread_rwlock_t) -> c_int; - - // pthread.h - pub fn pthread_rwlock_timedwrlock( - attr: *mut crate::pthread_rwlock_t, - host: *const crate::timespec, - ) -> c_int; - - // pthread.h - pub fn pthread_rwlock_unlock(attr: *mut crate::pthread_rwlock_t) -> c_int; - - // pthread.h - pub fn pthread_key_create( - key: *mut crate::pthread_key_t, - dtor: Option, - ) -> c_int; - - // pthread.h - pub fn pthread_key_delete(key: crate::pthread_key_t) -> c_int; - - // pthread.h - pub fn pthread_setspecific(key: crate::pthread_key_t, value: *const c_void) -> c_int; - - // pthread.h - pub fn pthread_getspecific(key: crate::pthread_key_t) -> *mut c_void; - - // pthread.h - pub fn pthread_cond_timedwait( - cond: *mut crate::pthread_cond_t, - mutex: *mut crate::pthread_mutex_t, - abstime: *const crate::timespec, - ) -> c_int; - - // pthread.h - pub fn pthread_attr_getname(attr: *mut crate::pthread_attr_t, name: *mut *mut c_char) -> c_int; - - // pthread.h - pub fn pthread_join(thread: crate::pthread_t, status: *mut *mut c_void) -> c_int; - - // pthread.h - pub fn pthread_self() -> crate::pthread_t; - - // clockLib.h - pub fn clock_gettime(clock_id: crate::clockid_t, tp: *mut crate::timespec) -> c_int; - - // clockLib.h - pub fn clock_settime(clock_id: crate::clockid_t, tp: *const crate::timespec) -> c_int; - - // clockLib.h - pub fn clock_getres(clock_id: crate::clockid_t, res: *mut crate::timespec) -> c_int; - - // clockLib.h - pub fn clock_nanosleep( - clock_id: crate::clockid_t, - flags: c_int, - rqtp: *const crate::timespec, - rmtp: *mut crate::timespec, - ) -> c_int; - - // timerLib.h - pub fn nanosleep(rqtp: *const crate::timespec, rmtp: *mut crate::timespec) -> c_int; - - // socket.h - pub fn accept(s: c_int, addr: *mut crate::sockaddr, addrlen: *mut crate::socklen_t) -> c_int; - - // socket.h - pub fn bind(fd: c_int, addr: *const sockaddr, len: socklen_t) -> c_int; - - // socket.h - pub fn connect(s: c_int, name: *const crate::sockaddr, namelen: crate::socklen_t) -> c_int; - - // socket.h - pub fn getpeername( - s: c_int, - name: *mut crate::sockaddr, - namelen: *mut crate::socklen_t, - ) -> c_int; - - // socket.h - pub fn getsockname(socket: c_int, address: *mut sockaddr, address_len: *mut socklen_t) - -> c_int; - - // socket.h - pub fn getsockopt( - sockfd: c_int, - level: c_int, - optname: c_int, - optval: *mut c_void, - optlen: *mut crate::socklen_t, - ) -> c_int; - - // socket.h - pub fn listen(socket: c_int, backlog: c_int) -> c_int; - - // socket.h - pub fn recv(s: c_int, buf: *mut c_void, bufLen: size_t, flags: c_int) -> ssize_t; - - // socket.h - pub fn recvfrom( - s: c_int, - buf: *mut c_void, - bufLen: size_t, - flags: c_int, - from: *mut crate::sockaddr, - pFromLen: *mut crate::socklen_t, - ) -> ssize_t; - - pub fn recvmsg(socket: c_int, mp: *mut crate::msghdr, flags: c_int) -> ssize_t; - - // socket.h - pub fn send(socket: c_int, buf: *const c_void, len: size_t, flags: c_int) -> ssize_t; - - pub fn sendmsg(socket: c_int, mp: *const crate::msghdr, flags: c_int) -> ssize_t; - - // socket.h - pub fn sendto( - socket: c_int, - buf: *const c_void, - len: size_t, - flags: c_int, - addr: *const sockaddr, - addrlen: socklen_t, - ) -> ssize_t; - - // socket.h - pub fn setsockopt( - socket: c_int, - level: c_int, - name: c_int, - value: *const c_void, - option_len: socklen_t, - ) -> c_int; - - // socket.h - pub fn shutdown(s: c_int, how: c_int) -> c_int; - - // socket.h - pub fn socket(domain: c_int, _type: c_int, protocol: c_int) -> c_int; - - // icotl.h - pub fn ioctl(fd: c_int, request: c_int, ...) -> c_int; - - // fcntl.h - pub fn fcntl(fd: c_int, cmd: c_int, ...) -> c_int; - - // ntp_rfc2553.h for kernel - // netdb.h for user - pub fn gai_strerror(errcode: c_int) -> *mut c_char; - - // ioLib.h or - // unistd.h - pub fn close(fd: c_int) -> c_int; - - // ioLib.h or - // unistd.h - pub fn read(fd: c_int, buf: *mut c_void, count: size_t) -> ssize_t; - - // ioLib.h or - // unistd.h - pub fn write(fd: c_int, buf: *const c_void, count: size_t) -> ssize_t; - - // ioLib.h or - // unistd.h - pub fn isatty(fd: c_int) -> c_int; - - // ioLib.h or - // unistd.h - pub fn dup(src: c_int) -> c_int; - - // ioLib.h or - // unistd.h - pub fn dup2(src: c_int, dst: c_int) -> c_int; - - // ioLib.h or - // unistd.h - pub fn pipe(fds: *mut c_int) -> c_int; - - // ioLib.h or - // unistd.h - pub fn unlink(pathname: *const c_char) -> c_int; - - // unistd.h and - // ioLib.h - pub fn lseek(fd: c_int, offset: off_t, whence: c_int) -> off_t; - - // netdb.h - pub fn getaddrinfo( - node: *const c_char, - service: *const c_char, - hints: *const addrinfo, - res: *mut *mut addrinfo, - ) -> c_int; - - // netdb.h - pub fn freeaddrinfo(res: *mut addrinfo); - - // signal.h - pub fn signal(signum: c_int, handler: sighandler_t) -> sighandler_t; - - // unistd.h - pub fn getpid() -> pid_t; - - // unistd.h - pub fn getppid() -> pid_t; - - // wait.h - pub fn waitpid(pid: pid_t, status: *mut c_int, options: c_int) -> pid_t; - - // unistd.h - pub fn sysconf(attr: c_int) -> c_long; - - // stdlib.h - pub fn setenv( - // setenv.c - envVarName: *const c_char, - envVarValue: *const c_char, - overwrite: c_int, - ) -> c_int; - - // stdlib.h - pub fn unsetenv( - // setenv.c - envVarName: *const c_char, - ) -> c_int; - - // stdlib.h - pub fn realpath(fileName: *const c_char, resolvedName: *mut c_char) -> *mut c_char; - - // unistd.h - pub fn link(src: *const c_char, dst: *const c_char) -> c_int; - - // unistd.h - pub fn readlink(path: *const c_char, buf: *mut c_char, bufsize: size_t) -> ssize_t; - - // unistd.h - pub fn symlink(path1: *const c_char, path2: *const c_char) -> c_int; - - // dirent.h - pub fn opendir(name: *const c_char) -> *mut crate::DIR; - - // unistd.h - pub fn rmdir(path: *const c_char) -> c_int; - - // stat.h - pub fn mkdir(dirName: *const c_char, mode: mode_t) -> c_int; - - // stat.h - pub fn chmod(path: *const c_char, mode: mode_t) -> c_int; - - // stat.h - pub fn fchmod(attr1: c_int, attr2: mode_t) -> c_int; - - // unistd.h - pub fn fsync(fd: c_int) -> c_int; - - // dirent.h - pub fn closedir(ptr: *mut crate::DIR) -> c_int; - - //sched.h - pub fn sched_get_priority_max(policy: c_int) -> c_int; - - //sched.h - pub fn sched_get_priority_min(policy: c_int) -> c_int; - - //sched.h - pub fn sched_setparam(pid: crate::pid_t, param: *const crate::sched_param) -> c_int; - - //sched.h - pub fn sched_getparam(pid: crate::pid_t, param: *mut crate::sched_param) -> c_int; - - //sched.h - pub fn sched_setscheduler( - pid: crate::pid_t, - policy: c_int, - param: *const crate::sched_param, - ) -> c_int; - - //sched.h - pub fn sched_getscheduler(pid: crate::pid_t) -> c_int; - - //sched.h - pub fn sched_rr_get_interval(pid: crate::pid_t, tp: *mut crate::timespec) -> c_int; - - // sched.h - pub fn sched_yield() -> c_int; - - // errnoLib.h - pub fn errnoSet(err: c_int) -> c_int; - - // errnoLib.h - pub fn errnoGet() -> c_int; - - // unistd.h - pub fn _exit(status: c_int) -> !; - - // unistd.h - pub fn setgid(gid: crate::gid_t) -> c_int; - - // unistd.h - pub fn getgid() -> crate::gid_t; - - // unistd.h - pub fn setuid(uid: crate::uid_t) -> c_int; - - // unistd.h - pub fn getuid() -> crate::uid_t; - - // signal.h - pub fn sigemptyset(__set: *mut sigset_t) -> c_int; - - // pthread.h for kernel - // signal.h for user - pub fn pthread_sigmask(__how: c_int, __set: *const sigset_t, __oset: *mut sigset_t) -> c_int; - - // signal.h for user - pub fn kill(__pid: pid_t, __signo: c_int) -> c_int; - - // signal.h for user - pub fn sigqueue(__pid: pid_t, __signo: c_int, __value: crate::sigval) -> c_int; - - // signal.h for user - pub fn _sigqueue( - rtpId: crate::RTP_ID, - signo: c_int, - pValue: *const crate::sigval, - sigCode: c_int, - ) -> c_int; - - // signal.h - pub fn taskKill(taskId: crate::TASK_ID, signo: c_int) -> c_int; - - // signal.h - pub fn raise(__signo: c_int) -> c_int; - - // taskLibCommon.h - pub fn taskIdSelf() -> crate::TASK_ID; - pub fn taskDelay(ticks: crate::_Vx_ticks_t) -> c_int; - - // taskLib.h - pub fn taskNameSet(task_id: crate::TASK_ID, task_name: *mut c_char) -> c_int; - pub fn taskNameGet(task_id: crate::TASK_ID, buf_name: *mut c_char, bufsize: size_t) -> c_int; - - // rtpLibCommon.h - pub fn rtpInfoGet(rtpId: crate::RTP_ID, rtpStruct: *mut crate::RTP_DESC) -> c_int; - pub fn rtpSpawn( - pubrtpFileName: *const c_char, - argv: *mut *const c_char, - envp: *mut *const c_char, - priority: c_int, - uStackSize: size_t, - options: c_int, - taskOptions: c_int, - ) -> RTP_ID; - - // ioLib.h - pub fn _realpath(fileName: *const c_char, resolvedName: *mut c_char) -> *mut c_char; - - // pathLib.h - pub fn _pathIsAbsolute(filepath: *const c_char, pNameTail: *mut *const c_char) -> BOOL; - - pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - - // randomNumGen.h - pub fn randBytes(buf: *mut c_uchar, length: c_int) -> c_int; - pub fn randABytes(buf: *mut c_uchar, length: c_int) -> c_int; - pub fn randUBytes(buf: *mut c_uchar, length: c_int) -> c_int; - pub fn randSecure() -> c_int; - - // mqueue.h - pub fn mq_open(name: *const c_char, oflag: c_int, ...) -> crate::mqd_t; - pub fn mq_close(mqd: crate::mqd_t) -> c_int; - pub fn mq_unlink(name: *const c_char) -> c_int; - pub fn mq_receive( - mqd: crate::mqd_t, - msg_ptr: *mut c_char, - msg_len: size_t, - msg_prio: *mut c_uint, - ) -> ssize_t; - pub fn mq_timedreceive( - mqd: crate::mqd_t, - msg_ptr: *mut c_char, - msg_len: size_t, - msg_prio: *mut c_uint, - abs_timeout: *const crate::timespec, - ) -> ssize_t; - pub fn mq_send( - mqd: crate::mqd_t, - msg_ptr: *const c_char, - msg_len: size_t, - msg_prio: c_uint, - ) -> c_int; - pub fn mq_timedsend( - mqd: crate::mqd_t, - msg_ptr: *const c_char, - msg_len: size_t, - msg_prio: c_uint, - abs_timeout: *const crate::timespec, - ) -> c_int; - pub fn mq_getattr(mqd: crate::mqd_t, attr: *mut crate::mq_attr) -> c_int; - pub fn mq_setattr( - mqd: crate::mqd_t, - newattr: *const crate::mq_attr, - oldattr: *mut crate::mq_attr, - ) -> c_int; - - // vxCpuLib.h - pub fn vxCpuEnabledGet() -> crate::cpuset_t; // Get set of running CPU's in the system - pub fn vxCpuConfiguredGet() -> crate::cpuset_t; // Get set of Configured CPU's in the system -} - -//Dummy functions, these don't really exist in VxWorks. - -// wait.h macros -safe_f! { - pub const fn WIFEXITED(status: c_int) -> bool { - (status & 0xFF00) == 0 - } - pub const fn WIFSIGNALED(status: c_int) -> bool { - (status & 0xFF00) != 0 - } - pub const fn WIFSTOPPED(status: c_int) -> bool { - (status & 0xFF0000) != 0 - } - pub const fn WEXITSTATUS(status: c_int) -> c_int { - status & 0xFF - } - pub const fn WTERMSIG(status: c_int) -> c_int { - (status >> 8) & 0xFF - } - pub const fn WSTOPSIG(status: c_int) -> c_int { - (status >> 16) & 0xFF - } -} - -pub fn pread(_fd: c_int, _buf: *mut c_void, _count: size_t, _offset: off64_t) -> ssize_t { - -1 -} - -pub fn pwrite(_fd: c_int, _buf: *const c_void, _count: size_t, _offset: off64_t) -> ssize_t { - -1 -} -pub fn posix_memalign(memptr: *mut *mut c_void, align: size_t, size: size_t) -> c_int { - // check to see if align is a power of 2 and if align is a multiple - // of sizeof(void *) - if (align & align - 1 != 0) || (align as usize % size_of::() != 0) { - return crate::EINVAL; - } - - unsafe { - // posix_memalign should not set errno - let e = crate::errnoGet(); - - let temp = memalign(align, size); - crate::errnoSet(e as c_int); - - if temp.is_null() { - crate::ENOMEM - } else { - *memptr = temp; - 0 - } - } -} - -cfg_if! { - if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else if #[cfg(target_arch = "arm")] { - mod arm; - pub use self::arm::*; - } else if #[cfg(target_arch = "x86")] { - mod x86; - pub use self::x86::*; - } else if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } else if #[cfg(target_arch = "powerpc")] { - mod powerpc; - pub use self::powerpc::*; - } else if #[cfg(target_arch = "powerpc64")] { - mod powerpc64; - pub use self::powerpc64::*; - } else if #[cfg(target_arch = "riscv32")] { - mod riscv32; - pub use self::riscv32::*; - } else if #[cfg(target_arch = "riscv64")] { - mod riscv64; - pub use self::riscv64::*; - } else { - // Unknown target_arch - } -} diff --git a/vendor/libc/src/vxworks/powerpc.rs b/vendor/libc/src/vxworks/powerpc.rs deleted file mode 100644 index 376783c8234baf..00000000000000 --- a/vendor/libc/src/vxworks/powerpc.rs +++ /dev/null @@ -1 +0,0 @@ -pub type wchar_t = u32; diff --git a/vendor/libc/src/vxworks/powerpc64.rs b/vendor/libc/src/vxworks/powerpc64.rs deleted file mode 100644 index 376783c8234baf..00000000000000 --- a/vendor/libc/src/vxworks/powerpc64.rs +++ /dev/null @@ -1 +0,0 @@ -pub type wchar_t = u32; diff --git a/vendor/libc/src/vxworks/riscv32.rs b/vendor/libc/src/vxworks/riscv32.rs deleted file mode 100644 index f562626f7fb2b7..00000000000000 --- a/vendor/libc/src/vxworks/riscv32.rs +++ /dev/null @@ -1 +0,0 @@ -pub type wchar_t = i32; diff --git a/vendor/libc/src/vxworks/riscv64.rs b/vendor/libc/src/vxworks/riscv64.rs deleted file mode 100644 index f562626f7fb2b7..00000000000000 --- a/vendor/libc/src/vxworks/riscv64.rs +++ /dev/null @@ -1 +0,0 @@ -pub type wchar_t = i32; diff --git a/vendor/libc/src/vxworks/x86.rs b/vendor/libc/src/vxworks/x86.rs deleted file mode 100644 index f562626f7fb2b7..00000000000000 --- a/vendor/libc/src/vxworks/x86.rs +++ /dev/null @@ -1 +0,0 @@ -pub type wchar_t = i32; diff --git a/vendor/libc/src/vxworks/x86_64.rs b/vendor/libc/src/vxworks/x86_64.rs deleted file mode 100644 index f562626f7fb2b7..00000000000000 --- a/vendor/libc/src/vxworks/x86_64.rs +++ /dev/null @@ -1 +0,0 @@ -pub type wchar_t = i32; diff --git a/vendor/libc/src/wasi/mod.rs b/vendor/libc/src/wasi/mod.rs deleted file mode 100644 index bb3b7295487801..00000000000000 --- a/vendor/libc/src/wasi/mod.rs +++ /dev/null @@ -1,853 +0,0 @@ -//! [wasi-libc](https://github.com/WebAssembly/wasi-libc) definitions. -//! -//! `wasi-libc` project provides multiple libraries including emulated features, but we list only -//! basic features with `libc.a` here. - -use core::iter::Iterator; - -use crate::prelude::*; - -pub type intmax_t = i64; -pub type uintmax_t = u64; -pub type size_t = usize; -pub type ssize_t = isize; -pub type ptrdiff_t = isize; -pub type intptr_t = isize; -pub type uintptr_t = usize; -pub type off_t = i64; -pub type pid_t = i32; -pub type clock_t = c_longlong; -pub type time_t = c_longlong; -pub type ino_t = u64; -pub type sigset_t = c_uchar; -pub type suseconds_t = c_longlong; -pub type mode_t = u32; -pub type dev_t = u64; -pub type uid_t = u32; -pub type gid_t = u32; -pub type nlink_t = u64; -pub type blksize_t = c_long; -pub type blkcnt_t = i64; -pub type nfds_t = c_ulong; -pub type wchar_t = i32; -pub type nl_item = c_int; -pub type __wasi_rights_t = u64; -pub type locale_t = *mut __locale_struct; - -s_no_extra_traits! { - #[repr(align(16))] - pub struct max_align_t { - priv_: [f64; 4], - } -} - -#[allow(missing_copy_implementations)] -#[derive(Debug)] -pub enum FILE {} -#[allow(missing_copy_implementations)] -#[derive(Debug)] -pub enum DIR {} -#[allow(missing_copy_implementations)] -#[derive(Debug)] -pub enum __locale_struct {} - -s_paren! { - // in wasi-libc clockid_t is const struct __clockid* (where __clockid is an opaque struct), - // but that's an implementation detail that we don't want to have to deal with - #[repr(transparent)] - #[allow(dead_code)] - pub struct clockid_t(*const u8); -} - -unsafe impl Send for clockid_t {} -unsafe impl Sync for clockid_t {} - -s! { - #[repr(align(8))] - pub struct fpos_t { - data: [u8; 16], - } - - pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - pub __tm_gmtoff: c_int, - pub __tm_zone: *const c_char, - pub __tm_nsec: c_int, - } - - pub struct timeval { - pub tv_sec: time_t, - pub tv_usec: suseconds_t, - } - - pub struct timespec { - pub tv_sec: time_t, - pub tv_nsec: c_long, - } - - pub struct tms { - pub tms_utime: clock_t, - pub tms_stime: clock_t, - pub tms_cutime: clock_t, - pub tms_cstime: clock_t, - } - - pub struct itimerspec { - pub it_interval: timespec, - pub it_value: timespec, - } - - pub struct iovec { - pub iov_base: *mut c_void, - pub iov_len: size_t, - } - - pub struct lconv { - pub decimal_point: *mut c_char, - pub thousands_sep: *mut c_char, - pub grouping: *mut c_char, - pub int_curr_symbol: *mut c_char, - pub currency_symbol: *mut c_char, - pub mon_decimal_point: *mut c_char, - pub mon_thousands_sep: *mut c_char, - pub mon_grouping: *mut c_char, - pub positive_sign: *mut c_char, - pub negative_sign: *mut c_char, - pub int_frac_digits: c_char, - pub frac_digits: c_char, - pub p_cs_precedes: c_char, - pub p_sep_by_space: c_char, - pub n_cs_precedes: c_char, - pub n_sep_by_space: c_char, - pub p_sign_posn: c_char, - pub n_sign_posn: c_char, - pub int_p_cs_precedes: c_char, - pub int_p_sep_by_space: c_char, - pub int_n_cs_precedes: c_char, - pub int_n_sep_by_space: c_char, - pub int_p_sign_posn: c_char, - pub int_n_sign_posn: c_char, - } - - pub struct pollfd { - pub fd: c_int, - pub events: c_short, - pub revents: c_short, - } - - pub struct rusage { - pub ru_utime: timeval, - pub ru_stime: timeval, - } - - pub struct stat { - pub st_dev: dev_t, - pub st_ino: ino_t, - pub st_nlink: nlink_t, - pub st_mode: mode_t, - pub st_uid: uid_t, - pub st_gid: gid_t, - __pad0: c_uint, - pub st_rdev: dev_t, - pub st_size: off_t, - pub st_blksize: blksize_t, - pub st_blocks: blkcnt_t, - pub st_atim: timespec, - pub st_mtim: timespec, - pub st_ctim: timespec, - __reserved: [c_longlong; 3], - } - - pub struct fd_set { - __nfds: usize, - __fds: [c_int; FD_SETSIZE as usize], - } -} - -// Declare dirent outside of s! so that it doesn't implement Copy, Eq, Hash, -// etc., since it contains a flexible array member with a dynamic size. -#[repr(C)] -#[allow(missing_copy_implementations)] -#[derive(Debug)] -pub struct dirent { - pub d_ino: ino_t, - pub d_type: c_uchar, - /// d_name is declared in WASI libc as a flexible array member, which - /// can't be directly expressed in Rust. As an imperfect workaround, - /// declare it as a zero-length array instead. - pub d_name: [c_char; 0], -} - -pub const EXIT_SUCCESS: c_int = 0; -pub const EXIT_FAILURE: c_int = 1; -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; -pub const _IOFBF: c_int = 0; -pub const _IONBF: c_int = 2; -pub const _IOLBF: c_int = 1; -pub const F_GETFD: c_int = 1; -pub const F_SETFD: c_int = 2; -pub const F_GETFL: c_int = 3; -pub const F_SETFL: c_int = 4; -pub const FD_CLOEXEC: c_int = 1; -pub const FD_SETSIZE: size_t = 1024; -pub const O_APPEND: c_int = 0x0001; -pub const O_DSYNC: c_int = 0x0002; -pub const O_NONBLOCK: c_int = 0x0004; -pub const O_RSYNC: c_int = 0x0008; -pub const O_SYNC: c_int = 0x0010; -pub const O_CREAT: c_int = 0x0001 << 12; -pub const O_DIRECTORY: c_int = 0x0002 << 12; -pub const O_EXCL: c_int = 0x0004 << 12; -pub const O_TRUNC: c_int = 0x0008 << 12; -pub const O_NOFOLLOW: c_int = 0x01000000; -pub const O_EXEC: c_int = 0x02000000; -pub const O_RDONLY: c_int = 0x04000000; -pub const O_SEARCH: c_int = 0x08000000; -pub const O_WRONLY: c_int = 0x10000000; -pub const O_CLOEXEC: c_int = 0x0; -pub const O_RDWR: c_int = O_WRONLY | O_RDONLY; -pub const O_ACCMODE: c_int = O_EXEC | O_RDWR | O_SEARCH; -pub const O_NOCTTY: c_int = 0x0; -pub const POSIX_FADV_DONTNEED: c_int = 4; -pub const POSIX_FADV_NOREUSE: c_int = 5; -pub const POSIX_FADV_NORMAL: c_int = 0; -pub const POSIX_FADV_RANDOM: c_int = 2; -pub const POSIX_FADV_SEQUENTIAL: c_int = 1; -pub const POSIX_FADV_WILLNEED: c_int = 3; -pub const AT_FDCWD: c_int = -2; -pub const AT_EACCESS: c_int = 0x0; -pub const AT_SYMLINK_NOFOLLOW: c_int = 0x1; -pub const AT_SYMLINK_FOLLOW: c_int = 0x2; -pub const AT_REMOVEDIR: c_int = 0x4; -pub const UTIME_OMIT: c_long = 0xfffffffe; -pub const UTIME_NOW: c_long = 0xffffffff; -pub const S_IFIFO: mode_t = 0o1_0000; -pub const S_IFCHR: mode_t = 0o2_0000; -pub const S_IFBLK: mode_t = 0o6_0000; -pub const S_IFDIR: mode_t = 0o4_0000; -pub const S_IFREG: mode_t = 0o10_0000; -pub const S_IFLNK: mode_t = 0o12_0000; -pub const S_IFSOCK: mode_t = 0o14_0000; -pub const S_IFMT: mode_t = 0o17_0000; -pub const S_IRWXO: mode_t = 0o0007; -pub const S_IXOTH: mode_t = 0o0001; -pub const S_IWOTH: mode_t = 0o0002; -pub const S_IROTH: mode_t = 0o0004; -pub const S_IRWXG: mode_t = 0o0070; -pub const S_IXGRP: mode_t = 0o0010; -pub const S_IWGRP: mode_t = 0o0020; -pub const S_IRGRP: mode_t = 0o0040; -pub const S_IRWXU: mode_t = 0o0700; -pub const S_IXUSR: mode_t = 0o0100; -pub const S_IWUSR: mode_t = 0o0200; -pub const S_IRUSR: mode_t = 0o0400; -pub const S_ISVTX: mode_t = 0o1000; -pub const S_ISGID: mode_t = 0o2000; -pub const S_ISUID: mode_t = 0o4000; -pub const DT_UNKNOWN: u8 = 0; -pub const DT_BLK: u8 = 1; -pub const DT_CHR: u8 = 2; -pub const DT_DIR: u8 = 3; -pub const DT_REG: u8 = 4; -pub const DT_LNK: u8 = 7; -pub const FIONREAD: c_int = 1; -pub const FIONBIO: c_int = 2; -pub const F_OK: c_int = 0; -pub const R_OK: c_int = 4; -pub const W_OK: c_int = 2; -pub const X_OK: c_int = 1; -pub const POLLIN: c_short = 0x1; -pub const POLLOUT: c_short = 0x2; -pub const POLLERR: c_short = 0x1000; -pub const POLLHUP: c_short = 0x2000; -pub const POLLNVAL: c_short = 0x4000; -pub const POLLRDNORM: c_short = 0x1; -pub const POLLWRNORM: c_short = 0x2; - -pub const E2BIG: c_int = 1; -pub const EACCES: c_int = 2; -pub const EADDRINUSE: c_int = 3; -pub const EADDRNOTAVAIL: c_int = 4; -pub const EAFNOSUPPORT: c_int = 5; -pub const EAGAIN: c_int = 6; -pub const EALREADY: c_int = 7; -pub const EBADF: c_int = 8; -pub const EBADMSG: c_int = 9; -pub const EBUSY: c_int = 10; -pub const ECANCELED: c_int = 11; -pub const ECHILD: c_int = 12; -pub const ECONNABORTED: c_int = 13; -pub const ECONNREFUSED: c_int = 14; -pub const ECONNRESET: c_int = 15; -pub const EDEADLK: c_int = 16; -pub const EDESTADDRREQ: c_int = 17; -pub const EDOM: c_int = 18; -pub const EDQUOT: c_int = 19; -pub const EEXIST: c_int = 20; -pub const EFAULT: c_int = 21; -pub const EFBIG: c_int = 22; -pub const EHOSTUNREACH: c_int = 23; -pub const EIDRM: c_int = 24; -pub const EILSEQ: c_int = 25; -pub const EINPROGRESS: c_int = 26; -pub const EINTR: c_int = 27; -pub const EINVAL: c_int = 28; -pub const EIO: c_int = 29; -pub const EISCONN: c_int = 30; -pub const EISDIR: c_int = 31; -pub const ELOOP: c_int = 32; -pub const EMFILE: c_int = 33; -pub const EMLINK: c_int = 34; -pub const EMSGSIZE: c_int = 35; -pub const EMULTIHOP: c_int = 36; -pub const ENAMETOOLONG: c_int = 37; -pub const ENETDOWN: c_int = 38; -pub const ENETRESET: c_int = 39; -pub const ENETUNREACH: c_int = 40; -pub const ENFILE: c_int = 41; -pub const ENOBUFS: c_int = 42; -pub const ENODEV: c_int = 43; -pub const ENOENT: c_int = 44; -pub const ENOEXEC: c_int = 45; -pub const ENOLCK: c_int = 46; -pub const ENOLINK: c_int = 47; -pub const ENOMEM: c_int = 48; -pub const ENOMSG: c_int = 49; -pub const ENOPROTOOPT: c_int = 50; -pub const ENOSPC: c_int = 51; -pub const ENOSYS: c_int = 52; -pub const ENOTCONN: c_int = 53; -pub const ENOTDIR: c_int = 54; -pub const ENOTEMPTY: c_int = 55; -pub const ENOTRECOVERABLE: c_int = 56; -pub const ENOTSOCK: c_int = 57; -pub const ENOTSUP: c_int = 58; -pub const ENOTTY: c_int = 59; -pub const ENXIO: c_int = 60; -pub const EOVERFLOW: c_int = 61; -pub const EOWNERDEAD: c_int = 62; -pub const EPERM: c_int = 63; -pub const EPIPE: c_int = 64; -pub const EPROTO: c_int = 65; -pub const EPROTONOSUPPORT: c_int = 66; -pub const EPROTOTYPE: c_int = 67; -pub const ERANGE: c_int = 68; -pub const EROFS: c_int = 69; -pub const ESPIPE: c_int = 70; -pub const ESRCH: c_int = 71; -pub const ESTALE: c_int = 72; -pub const ETIMEDOUT: c_int = 73; -pub const ETXTBSY: c_int = 74; -pub const EXDEV: c_int = 75; -pub const ENOTCAPABLE: c_int = 76; -pub const EOPNOTSUPP: c_int = ENOTSUP; -pub const EWOULDBLOCK: c_int = EAGAIN; - -pub const _SC_PAGESIZE: c_int = 30; -pub const _SC_PAGE_SIZE: c_int = _SC_PAGESIZE; -pub const _SC_IOV_MAX: c_int = 60; -pub const _SC_SYMLOOP_MAX: c_int = 173; - -// FIXME(msrv): `addr_of!(EXTERN_STATIC)` is now safe; remove `unsafe` when MSRV >= 1.82 -#[allow(unused_unsafe)] -pub static CLOCK_MONOTONIC: clockid_t = unsafe { clockid_t(core::ptr::addr_of!(_CLOCK_MONOTONIC)) }; -#[allow(unused_unsafe)] -pub static CLOCK_PROCESS_CPUTIME_ID: clockid_t = - unsafe { clockid_t(core::ptr::addr_of!(_CLOCK_PROCESS_CPUTIME_ID)) }; -#[allow(unused_unsafe)] -pub static CLOCK_REALTIME: clockid_t = unsafe { clockid_t(core::ptr::addr_of!(_CLOCK_REALTIME)) }; -#[allow(unused_unsafe)] -pub static CLOCK_THREAD_CPUTIME_ID: clockid_t = - unsafe { clockid_t(core::ptr::addr_of!(_CLOCK_THREAD_CPUTIME_ID)) }; - -pub const ABDAY_1: crate::nl_item = 0x20000; -pub const ABDAY_2: crate::nl_item = 0x20001; -pub const ABDAY_3: crate::nl_item = 0x20002; -pub const ABDAY_4: crate::nl_item = 0x20003; -pub const ABDAY_5: crate::nl_item = 0x20004; -pub const ABDAY_6: crate::nl_item = 0x20005; -pub const ABDAY_7: crate::nl_item = 0x20006; - -pub const DAY_1: crate::nl_item = 0x20007; -pub const DAY_2: crate::nl_item = 0x20008; -pub const DAY_3: crate::nl_item = 0x20009; -pub const DAY_4: crate::nl_item = 0x2000A; -pub const DAY_5: crate::nl_item = 0x2000B; -pub const DAY_6: crate::nl_item = 0x2000C; -pub const DAY_7: crate::nl_item = 0x2000D; - -pub const ABMON_1: crate::nl_item = 0x2000E; -pub const ABMON_2: crate::nl_item = 0x2000F; -pub const ABMON_3: crate::nl_item = 0x20010; -pub const ABMON_4: crate::nl_item = 0x20011; -pub const ABMON_5: crate::nl_item = 0x20012; -pub const ABMON_6: crate::nl_item = 0x20013; -pub const ABMON_7: crate::nl_item = 0x20014; -pub const ABMON_8: crate::nl_item = 0x20015; -pub const ABMON_9: crate::nl_item = 0x20016; -pub const ABMON_10: crate::nl_item = 0x20017; -pub const ABMON_11: crate::nl_item = 0x20018; -pub const ABMON_12: crate::nl_item = 0x20019; - -pub const MON_1: crate::nl_item = 0x2001A; -pub const MON_2: crate::nl_item = 0x2001B; -pub const MON_3: crate::nl_item = 0x2001C; -pub const MON_4: crate::nl_item = 0x2001D; -pub const MON_5: crate::nl_item = 0x2001E; -pub const MON_6: crate::nl_item = 0x2001F; -pub const MON_7: crate::nl_item = 0x20020; -pub const MON_8: crate::nl_item = 0x20021; -pub const MON_9: crate::nl_item = 0x20022; -pub const MON_10: crate::nl_item = 0x20023; -pub const MON_11: crate::nl_item = 0x20024; -pub const MON_12: crate::nl_item = 0x20025; - -pub const AM_STR: crate::nl_item = 0x20026; -pub const PM_STR: crate::nl_item = 0x20027; - -pub const D_T_FMT: crate::nl_item = 0x20028; -pub const D_FMT: crate::nl_item = 0x20029; -pub const T_FMT: crate::nl_item = 0x2002A; -pub const T_FMT_AMPM: crate::nl_item = 0x2002B; - -pub const ERA: crate::nl_item = 0x2002C; -pub const ERA_D_FMT: crate::nl_item = 0x2002E; -pub const ALT_DIGITS: crate::nl_item = 0x2002F; -pub const ERA_D_T_FMT: crate::nl_item = 0x20030; -pub const ERA_T_FMT: crate::nl_item = 0x20031; - -pub const CODESET: crate::nl_item = 14; -pub const CRNCYSTR: crate::nl_item = 0x4000F; -pub const RADIXCHAR: crate::nl_item = 0x10000; -pub const THOUSEP: crate::nl_item = 0x10001; -pub const YESEXPR: crate::nl_item = 0x50000; -pub const NOEXPR: crate::nl_item = 0x50001; -pub const YESSTR: crate::nl_item = 0x50002; -pub const NOSTR: crate::nl_item = 0x50003; - -f! { - pub fn FD_ISSET(fd: c_int, set: *const fd_set) -> bool { - let set = &*set; - let n = set.__nfds; - return set.__fds[..n].iter().any(|p| *p == fd); - } - - pub fn FD_SET(fd: c_int, set: *mut fd_set) -> () { - let set = &mut *set; - let n = set.__nfds; - if !set.__fds[..n].iter().any(|p| *p == fd) { - set.__nfds = n + 1; - set.__fds[n] = fd; - } - } - - pub fn FD_ZERO(set: *mut fd_set) -> () { - (*set).__nfds = 0; - return; - } -} - -#[cfg_attr( - feature = "rustc-dep-of-std", - link( - name = "c", - kind = "static", - modifiers = "-bundle", - cfg(target_feature = "crt-static") - ) -)] -#[cfg_attr( - feature = "rustc-dep-of-std", - link(name = "c", cfg(not(target_feature = "crt-static"))) -)] -extern "C" { - pub fn _Exit(code: c_int) -> !; - pub fn _exit(code: c_int) -> !; - pub fn abort() -> !; - pub fn aligned_alloc(a: size_t, b: size_t) -> *mut c_void; - pub fn calloc(amt: size_t, amt2: size_t) -> *mut c_void; - pub fn exit(code: c_int) -> !; - pub fn free(ptr: *mut c_void); - pub fn getenv(s: *const c_char) -> *mut c_char; - pub fn malloc(amt: size_t) -> *mut c_void; - pub fn malloc_usable_size(ptr: *mut c_void) -> size_t; - pub fn sbrk(increment: intptr_t) -> *mut c_void; - pub fn rand() -> c_int; - pub fn read(fd: c_int, ptr: *mut c_void, size: size_t) -> ssize_t; - pub fn realloc(ptr: *mut c_void, amt: size_t) -> *mut c_void; - pub fn setenv(k: *const c_char, v: *const c_char, a: c_int) -> c_int; - pub fn unsetenv(k: *const c_char) -> c_int; - pub fn clearenv() -> c_int; - pub fn write(fd: c_int, ptr: *const c_void, size: size_t) -> ssize_t; - pub static mut environ: *mut *mut c_char; - pub fn fopen(a: *const c_char, b: *const c_char) -> *mut FILE; - pub fn freopen(a: *const c_char, b: *const c_char, f: *mut FILE) -> *mut FILE; - pub fn fclose(f: *mut FILE) -> c_int; - pub fn remove(a: *const c_char) -> c_int; - pub fn rename(a: *const c_char, b: *const c_char) -> c_int; - pub fn feof(f: *mut FILE) -> c_int; - pub fn ferror(f: *mut FILE) -> c_int; - pub fn fflush(f: *mut FILE) -> c_int; - pub fn clearerr(f: *mut FILE); - pub fn fseek(f: *mut FILE, b: c_long, c: c_int) -> c_int; - pub fn ftell(f: *mut FILE) -> c_long; - pub fn rewind(f: *mut FILE); - pub fn fgetpos(f: *mut FILE, pos: *mut fpos_t) -> c_int; - pub fn fsetpos(f: *mut FILE, pos: *const fpos_t) -> c_int; - pub fn fread(buf: *mut c_void, a: size_t, b: size_t, f: *mut FILE) -> size_t; - pub fn fwrite(buf: *const c_void, a: size_t, b: size_t, f: *mut FILE) -> size_t; - pub fn fgetc(f: *mut FILE) -> c_int; - pub fn getc(f: *mut FILE) -> c_int; - pub fn getchar() -> c_int; - pub fn ungetc(a: c_int, f: *mut FILE) -> c_int; - pub fn fputc(a: c_int, f: *mut FILE) -> c_int; - pub fn putc(a: c_int, f: *mut FILE) -> c_int; - pub fn putchar(a: c_int) -> c_int; - pub fn fputs(a: *const c_char, f: *mut FILE) -> c_int; - pub fn puts(a: *const c_char) -> c_int; - pub fn perror(a: *const c_char); - pub fn srand(a: c_uint); - pub fn atexit(a: extern "C" fn()) -> c_int; - pub fn at_quick_exit(a: extern "C" fn()) -> c_int; - pub fn quick_exit(a: c_int) -> !; - pub fn posix_memalign(a: *mut *mut c_void, b: size_t, c: size_t) -> c_int; - pub fn rand_r(a: *mut c_uint) -> c_int; - pub fn random() -> c_long; - pub fn srandom(a: c_uint); - pub fn putenv(a: *mut c_char) -> c_int; - pub fn clock() -> clock_t; - pub fn time(a: *mut time_t) -> time_t; - pub fn difftime(a: time_t, b: time_t) -> c_double; - pub fn mktime(a: *mut tm) -> time_t; - pub fn strftime(a: *mut c_char, b: size_t, c: *const c_char, d: *const tm) -> size_t; - pub fn gmtime(a: *const time_t) -> *mut tm; - pub fn gmtime_r(a: *const time_t, b: *mut tm) -> *mut tm; - pub fn localtime(a: *const time_t) -> *mut tm; - pub fn localtime_r(a: *const time_t, b: *mut tm) -> *mut tm; - pub fn asctime_r(a: *const tm, b: *mut c_char) -> *mut c_char; - pub fn ctime_r(a: *const time_t, b: *mut c_char) -> *mut c_char; - - static _CLOCK_MONOTONIC: u8; - static _CLOCK_PROCESS_CPUTIME_ID: u8; - static _CLOCK_REALTIME: u8; - static _CLOCK_THREAD_CPUTIME_ID: u8; - pub fn nanosleep(a: *const timespec, b: *mut timespec) -> c_int; - pub fn clock_getres(a: clockid_t, b: *mut timespec) -> c_int; - pub fn clock_gettime(a: clockid_t, b: *mut timespec) -> c_int; - pub fn clock_nanosleep(a: clockid_t, a2: c_int, b: *const timespec, c: *mut timespec) -> c_int; - - pub fn isalnum(c: c_int) -> c_int; - pub fn isalpha(c: c_int) -> c_int; - pub fn iscntrl(c: c_int) -> c_int; - pub fn isdigit(c: c_int) -> c_int; - pub fn isgraph(c: c_int) -> c_int; - pub fn islower(c: c_int) -> c_int; - pub fn isprint(c: c_int) -> c_int; - pub fn ispunct(c: c_int) -> c_int; - pub fn isspace(c: c_int) -> c_int; - pub fn isupper(c: c_int) -> c_int; - pub fn isxdigit(c: c_int) -> c_int; - pub fn isblank(c: c_int) -> c_int; - pub fn tolower(c: c_int) -> c_int; - pub fn toupper(c: c_int) -> c_int; - pub fn setvbuf(stream: *mut FILE, buffer: *mut c_char, mode: c_int, size: size_t) -> c_int; - pub fn setbuf(stream: *mut FILE, buf: *mut c_char); - pub fn fgets(buf: *mut c_char, n: c_int, stream: *mut FILE) -> *mut c_char; - pub fn atof(s: *const c_char) -> c_double; - pub fn atoi(s: *const c_char) -> c_int; - pub fn atol(s: *const c_char) -> c_long; - pub fn atoll(s: *const c_char) -> c_longlong; - pub fn strtod(s: *const c_char, endp: *mut *mut c_char) -> c_double; - pub fn strtof(s: *const c_char, endp: *mut *mut c_char) -> c_float; - pub fn strtol(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_long; - pub fn strtoll(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_longlong; - pub fn strtoul(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulong; - pub fn strtoull(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulonglong; - - pub fn strcpy(dst: *mut c_char, src: *const c_char) -> *mut c_char; - pub fn strncpy(dst: *mut c_char, src: *const c_char, n: size_t) -> *mut c_char; - pub fn strcat(s: *mut c_char, ct: *const c_char) -> *mut c_char; - pub fn strncat(s: *mut c_char, ct: *const c_char, n: size_t) -> *mut c_char; - pub fn strcmp(cs: *const c_char, ct: *const c_char) -> c_int; - pub fn strncmp(cs: *const c_char, ct: *const c_char, n: size_t) -> c_int; - pub fn strcoll(cs: *const c_char, ct: *const c_char) -> c_int; - pub fn strchr(cs: *const c_char, c: c_int) -> *mut c_char; - pub fn strrchr(cs: *const c_char, c: c_int) -> *mut c_char; - pub fn strspn(cs: *const c_char, ct: *const c_char) -> size_t; - pub fn strcspn(cs: *const c_char, ct: *const c_char) -> size_t; - pub fn strdup(cs: *const c_char) -> *mut c_char; - pub fn strndup(cs: *const c_char, n: size_t) -> *mut c_char; - pub fn strpbrk(cs: *const c_char, ct: *const c_char) -> *mut c_char; - pub fn strstr(cs: *const c_char, ct: *const c_char) -> *mut c_char; - pub fn strcasecmp(s1: *const c_char, s2: *const c_char) -> c_int; - pub fn strncasecmp(s1: *const c_char, s2: *const c_char, n: size_t) -> c_int; - pub fn strlen(cs: *const c_char) -> size_t; - pub fn strnlen(cs: *const c_char, maxlen: size_t) -> size_t; - pub fn strerror(n: c_int) -> *mut c_char; - pub fn strtok(s: *mut c_char, t: *const c_char) -> *mut c_char; - pub fn strxfrm(s: *mut c_char, ct: *const c_char, n: size_t) -> size_t; - - pub fn memchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; - pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; - pub fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; - pub fn memmove(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; - pub fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void; - - pub fn fprintf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; - pub fn printf(format: *const c_char, ...) -> c_int; - pub fn snprintf(s: *mut c_char, n: size_t, format: *const c_char, ...) -> c_int; - pub fn sprintf(s: *mut c_char, format: *const c_char, ...) -> c_int; - pub fn fscanf(stream: *mut crate::FILE, format: *const c_char, ...) -> c_int; - pub fn scanf(format: *const c_char, ...) -> c_int; - pub fn sscanf(s: *const c_char, format: *const c_char, ...) -> c_int; - pub fn getchar_unlocked() -> c_int; - pub fn putchar_unlocked(c: c_int) -> c_int; - - pub fn shutdown(socket: c_int, how: c_int) -> c_int; - pub fn fstat(fildes: c_int, buf: *mut stat) -> c_int; - pub fn mkdir(path: *const c_char, mode: mode_t) -> c_int; - pub fn stat(path: *const c_char, buf: *mut stat) -> c_int; - pub fn fdopen(fd: c_int, mode: *const c_char) -> *mut crate::FILE; - pub fn fileno(stream: *mut crate::FILE) -> c_int; - pub fn open(path: *const c_char, oflag: c_int, ...) -> c_int; - pub fn creat(path: *const c_char, mode: mode_t) -> c_int; - pub fn fcntl(fd: c_int, cmd: c_int, ...) -> c_int; - pub fn opendir(dirname: *const c_char) -> *mut crate::DIR; - pub fn fdopendir(fd: c_int) -> *mut crate::DIR; - pub fn readdir(dirp: *mut crate::DIR) -> *mut crate::dirent; - pub fn closedir(dirp: *mut crate::DIR) -> c_int; - pub fn rewinddir(dirp: *mut crate::DIR); - pub fn dirfd(dirp: *mut crate::DIR) -> c_int; - pub fn seekdir(dirp: *mut crate::DIR, loc: c_long); - pub fn telldir(dirp: *mut crate::DIR) -> c_long; - - pub fn openat(dirfd: c_int, pathname: *const c_char, flags: c_int, ...) -> c_int; - pub fn fstatat(dirfd: c_int, pathname: *const c_char, buf: *mut stat, flags: c_int) -> c_int; - pub fn linkat( - olddirfd: c_int, - oldpath: *const c_char, - newdirfd: c_int, - newpath: *const c_char, - flags: c_int, - ) -> c_int; - pub fn mkdirat(dirfd: c_int, pathname: *const c_char, mode: mode_t) -> c_int; - pub fn readlinkat( - dirfd: c_int, - pathname: *const c_char, - buf: *mut c_char, - bufsiz: size_t, - ) -> ssize_t; - pub fn renameat( - olddirfd: c_int, - oldpath: *const c_char, - newdirfd: c_int, - newpath: *const c_char, - ) -> c_int; - pub fn symlinkat(target: *const c_char, newdirfd: c_int, linkpath: *const c_char) -> c_int; - pub fn unlinkat(dirfd: c_int, pathname: *const c_char, flags: c_int) -> c_int; - - pub fn access(path: *const c_char, amode: c_int) -> c_int; - pub fn close(fd: c_int) -> c_int; - pub fn fpathconf(filedes: c_int, name: c_int) -> c_long; - pub fn getopt(argc: c_int, argv: *const *mut c_char, optstr: *const c_char) -> c_int; - pub fn isatty(fd: c_int) -> c_int; - pub fn link(src: *const c_char, dst: *const c_char) -> c_int; - pub fn lseek(fd: c_int, offset: off_t, whence: c_int) -> off_t; - pub fn pathconf(path: *const c_char, name: c_int) -> c_long; - pub fn rmdir(path: *const c_char) -> c_int; - pub fn sleep(secs: c_uint) -> c_uint; - pub fn unlink(c: *const c_char) -> c_int; - pub fn pread(fd: c_int, buf: *mut c_void, count: size_t, offset: off_t) -> ssize_t; - pub fn pwrite(fd: c_int, buf: *const c_void, count: size_t, offset: off_t) -> ssize_t; - - pub fn lstat(path: *const c_char, buf: *mut stat) -> c_int; - - pub fn fsync(fd: c_int) -> c_int; - pub fn fdatasync(fd: c_int) -> c_int; - - pub fn symlink(path1: *const c_char, path2: *const c_char) -> c_int; - - pub fn truncate(path: *const c_char, length: off_t) -> c_int; - pub fn ftruncate(fd: c_int, length: off_t) -> c_int; - - pub fn getrusage(resource: c_int, usage: *mut rusage) -> c_int; - - pub fn gettimeofday(tp: *mut crate::timeval, tz: *mut c_void) -> c_int; - pub fn times(buf: *mut crate::tms) -> crate::clock_t; - - pub fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int; - - pub fn usleep(secs: c_uint) -> c_int; - pub fn send(socket: c_int, buf: *const c_void, len: size_t, flags: c_int) -> ssize_t; - pub fn recv(socket: c_int, buf: *mut c_void, len: size_t, flags: c_int) -> ssize_t; - pub fn poll(fds: *mut pollfd, nfds: nfds_t, timeout: c_int) -> c_int; - pub fn setlocale(category: c_int, locale: *const c_char) -> *mut c_char; - pub fn localeconv() -> *mut lconv; - - pub fn readlink(path: *const c_char, buf: *mut c_char, bufsz: size_t) -> ssize_t; - - pub fn timegm(tm: *mut crate::tm) -> time_t; - - pub fn sysconf(name: c_int) -> c_long; - - pub fn ioctl(fd: c_int, request: c_int, ...) -> c_int; - - pub fn fseeko(stream: *mut crate::FILE, offset: off_t, whence: c_int) -> c_int; - pub fn ftello(stream: *mut crate::FILE) -> off_t; - pub fn posix_fallocate(fd: c_int, offset: off_t, len: off_t) -> c_int; - - pub fn strcasestr(cs: *const c_char, ct: *const c_char) -> *mut c_char; - pub fn getline(lineptr: *mut *mut c_char, n: *mut size_t, stream: *mut FILE) -> ssize_t; - - pub fn faccessat(dirfd: c_int, pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; - pub fn writev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - pub fn readv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int) -> ssize_t; - pub fn pwritev(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn preadv(fd: c_int, iov: *const crate::iovec, iovcnt: c_int, offset: off_t) -> ssize_t; - pub fn posix_fadvise(fd: c_int, offset: off_t, len: off_t, advise: c_int) -> c_int; - pub fn futimens(fd: c_int, times: *const crate::timespec) -> c_int; - pub fn utimensat( - dirfd: c_int, - path: *const c_char, - times: *const crate::timespec, - flag: c_int, - ) -> c_int; - pub fn getentropy(buf: *mut c_void, buflen: size_t) -> c_int; - pub fn memrchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; - pub fn abs(i: c_int) -> c_int; - pub fn labs(i: c_long) -> c_long; - pub fn duplocale(base: crate::locale_t) -> crate::locale_t; - pub fn freelocale(loc: crate::locale_t); - pub fn newlocale(mask: c_int, locale: *const c_char, base: crate::locale_t) -> crate::locale_t; - pub fn uselocale(loc: crate::locale_t) -> crate::locale_t; - pub fn sched_yield() -> c_int; - pub fn getcwd(buf: *mut c_char, size: size_t) -> *mut c_char; - pub fn chdir(dir: *const c_char) -> c_int; - - pub fn nl_langinfo(item: crate::nl_item) -> *mut c_char; - pub fn nl_langinfo_l(item: crate::nl_item, loc: crate::locale_t) -> *mut c_char; - - pub fn select( - nfds: c_int, - readfds: *mut fd_set, - writefds: *mut fd_set, - errorfds: *mut fd_set, - timeout: *const timeval, - ) -> c_int; - - pub fn __wasilibc_register_preopened_fd(fd: c_int, path: *const c_char) -> c_int; - pub fn __wasilibc_fd_renumber(fd: c_int, newfd: c_int) -> c_int; - pub fn __wasilibc_unlinkat(fd: c_int, path: *const c_char) -> c_int; - pub fn __wasilibc_rmdirat(fd: c_int, path: *const c_char) -> c_int; - pub fn __wasilibc_find_relpath( - path: *const c_char, - abs_prefix: *mut *const c_char, - relative_path: *mut *mut c_char, - relative_path_len: usize, - ) -> c_int; - pub fn __wasilibc_tell(fd: c_int) -> off_t; - pub fn __wasilibc_nocwd___wasilibc_unlinkat(dirfd: c_int, path: *const c_char) -> c_int; - pub fn __wasilibc_nocwd___wasilibc_rmdirat(dirfd: c_int, path: *const c_char) -> c_int; - pub fn __wasilibc_nocwd_linkat( - olddirfd: c_int, - oldpath: *const c_char, - newdirfd: c_int, - newpath: *const c_char, - flags: c_int, - ) -> c_int; - pub fn __wasilibc_nocwd_symlinkat( - target: *const c_char, - dirfd: c_int, - path: *const c_char, - ) -> c_int; - pub fn __wasilibc_nocwd_readlinkat( - dirfd: c_int, - path: *const c_char, - buf: *mut c_char, - bufsize: usize, - ) -> isize; - pub fn __wasilibc_nocwd_faccessat( - dirfd: c_int, - path: *const c_char, - mode: c_int, - flags: c_int, - ) -> c_int; - pub fn __wasilibc_nocwd_renameat( - olddirfd: c_int, - oldpath: *const c_char, - newdirfd: c_int, - newpath: *const c_char, - ) -> c_int; - pub fn __wasilibc_nocwd_openat_nomode(dirfd: c_int, path: *const c_char, flags: c_int) - -> c_int; - pub fn __wasilibc_nocwd_fstatat( - dirfd: c_int, - path: *const c_char, - buf: *mut stat, - flags: c_int, - ) -> c_int; - pub fn __wasilibc_nocwd_mkdirat_nomode(dirfd: c_int, path: *const c_char) -> c_int; - pub fn __wasilibc_nocwd_utimensat( - dirfd: c_int, - path: *const c_char, - times: *const crate::timespec, - flags: c_int, - ) -> c_int; - pub fn __wasilibc_nocwd_opendirat(dirfd: c_int, path: *const c_char) -> *mut crate::DIR; - pub fn __wasilibc_access(pathname: *const c_char, mode: c_int, flags: c_int) -> c_int; - pub fn __wasilibc_stat(pathname: *const c_char, buf: *mut stat, flags: c_int) -> c_int; - pub fn __wasilibc_utimens( - pathname: *const c_char, - times: *const crate::timespec, - flags: c_int, - ) -> c_int; - pub fn __wasilibc_link(oldpath: *const c_char, newpath: *const c_char, flags: c_int) -> c_int; - pub fn __wasilibc_link_oldat( - olddirfd: c_int, - oldpath: *const c_char, - newpath: *const c_char, - flags: c_int, - ) -> c_int; - pub fn __wasilibc_link_newat( - oldpath: *const c_char, - newdirfd: c_int, - newpath: *const c_char, - flags: c_int, - ) -> c_int; - pub fn __wasilibc_rename_oldat( - olddirfd: c_int, - oldpath: *const c_char, - newpath: *const c_char, - ) -> c_int; - pub fn __wasilibc_rename_newat( - oldpath: *const c_char, - newdirfd: c_int, - newpath: *const c_char, - ) -> c_int; - - pub fn arc4random() -> u32; - pub fn arc4random_buf(a: *mut c_void, b: size_t); - pub fn arc4random_uniform(a: u32) -> u32; - - pub fn __errno_location() -> *mut c_int; -} - -cfg_if! { - if #[cfg(not(target_env = "p1"))] { - mod p2; - pub use self::p2::*; - } -} diff --git a/vendor/libc/src/wasi/p2.rs b/vendor/libc/src/wasi/p2.rs deleted file mode 100644 index 7332a779396d37..00000000000000 --- a/vendor/libc/src/wasi/p2.rs +++ /dev/null @@ -1,188 +0,0 @@ -use crate::prelude::*; - -pub type sa_family_t = c_ushort; -pub type in_port_t = c_ushort; -pub type in_addr_t = c_uint; - -pub type socklen_t = c_uint; - -s! { - #[repr(align(16))] - pub struct sockaddr { - pub sa_family: sa_family_t, - pub sa_data: [c_char; 0], - } - - pub struct in_addr { - pub s_addr: in_addr_t, - } - - #[repr(align(16))] - pub struct sockaddr_in { - pub sin_family: sa_family_t, - pub sin_port: in_port_t, - pub sin_addr: in_addr, - } - - #[repr(align(4))] - pub struct in6_addr { - pub s6_addr: [c_uchar; 16], - } - - #[repr(align(16))] - pub struct sockaddr_in6 { - pub sin6_family: sa_family_t, - pub sin6_port: in_port_t, - pub sin6_flowinfo: c_uint, - pub sin6_addr: in6_addr, - pub sin6_scope_id: c_uint, - } - - #[repr(align(16))] - pub struct sockaddr_storage { - pub ss_family: sa_family_t, - pub __ss_data: [c_char; 32], - } - - pub struct addrinfo { - pub ai_flags: c_int, - pub ai_family: c_int, - pub ai_socktype: c_int, - pub ai_protocol: c_int, - pub ai_addrlen: socklen_t, - pub ai_addr: *mut sockaddr, - pub ai_canonname: *mut c_char, - pub ai_next: *mut addrinfo, - } - - pub struct ip_mreq { - pub imr_multiaddr: in_addr, - pub imr_interface: in_addr, - } - - pub struct ipv6_mreq { - pub ipv6mr_multiaddr: in6_addr, - pub ipv6mr_interface: c_uint, - } - - pub struct linger { - pub l_onoff: c_int, - pub l_linger: c_int, - } -} - -pub const SHUT_RD: c_int = 1 << 0; -pub const SHUT_WR: c_int = 1 << 1; -pub const SHUT_RDWR: c_int = SHUT_RD | SHUT_WR; - -pub const MSG_NOSIGNAL: c_int = 0x4000; -pub const MSG_PEEK: c_int = 0x0002; - -pub const SO_REUSEADDR: c_int = 2; -pub const SO_TYPE: c_int = 3; -pub const SO_ERROR: c_int = 4; -pub const SO_BROADCAST: c_int = 6; -pub const SO_SNDBUF: c_int = 7; -pub const SO_RCVBUF: c_int = 8; -pub const SO_KEEPALIVE: c_int = 9; -pub const SO_LINGER: c_int = 13; -pub const SO_ACCEPTCONN: c_int = 30; -pub const SO_PROTOCOL: c_int = 38; -pub const SO_DOMAIN: c_int = 39; -pub const SO_RCVTIMEO: c_int = 66; -pub const SO_SNDTIMEO: c_int = 67; - -pub const SOCK_DGRAM: c_int = 5; -pub const SOCK_STREAM: c_int = 6; -pub const SOCK_NONBLOCK: c_int = 0x00004000; - -pub const SOL_SOCKET: c_int = 0x7fffffff; - -pub const AF_UNSPEC: c_int = 0; -pub const AF_INET: c_int = 1; -pub const AF_INET6: c_int = 2; - -pub const IPPROTO_IP: c_int = 0; -pub const IPPROTO_TCP: c_int = 6; -pub const IPPROTO_UDP: c_int = 17; -pub const IPPROTO_IPV6: c_int = 41; - -pub const IP_TTL: c_int = 2; -pub const IP_MULTICAST_TTL: c_int = 33; -pub const IP_MULTICAST_LOOP: c_int = 34; -pub const IP_ADD_MEMBERSHIP: c_int = 35; -pub const IP_DROP_MEMBERSHIP: c_int = 36; - -pub const IPV6_UNICAST_HOPS: c_int = 16; -pub const IPV6_MULTICAST_LOOP: c_int = 19; -pub const IPV6_JOIN_GROUP: c_int = 20; -pub const IPV6_LEAVE_GROUP: c_int = 21; -pub const IPV6_V6ONLY: c_int = 26; - -pub const IPV6_ADD_MEMBERSHIP: c_int = IPV6_JOIN_GROUP; -pub const IPV6_DROP_MEMBERSHIP: c_int = IPV6_LEAVE_GROUP; - -pub const TCP_NODELAY: c_int = 1; -pub const TCP_KEEPIDLE: c_int = 4; -pub const TCP_KEEPINTVL: c_int = 5; -pub const TCP_KEEPCNT: c_int = 6; - -pub const EAI_SYSTEM: c_int = -11; - -extern "C" { - pub fn socket(domain: c_int, type_: c_int, protocol: c_int) -> c_int; - pub fn connect(fd: c_int, name: *const sockaddr, addrlen: socklen_t) -> c_int; - pub fn bind(socket: c_int, addr: *const sockaddr, addrlen: socklen_t) -> c_int; - pub fn listen(socket: c_int, backlog: c_int) -> c_int; - pub fn accept(socket: c_int, addr: *mut sockaddr, addrlen: *mut socklen_t) -> c_int; - pub fn accept4( - socket: c_int, - addr: *mut sockaddr, - addrlen: *mut socklen_t, - flags: c_int, - ) -> c_int; - - pub fn getsockname(socket: c_int, addr: *mut sockaddr, addrlen: *mut socklen_t) -> c_int; - pub fn getpeername(socket: c_int, addr: *mut sockaddr, addrlen: *mut socklen_t) -> c_int; - - pub fn sendto( - socket: c_int, - buffer: *const c_void, - length: size_t, - flags: c_int, - addr: *const sockaddr, - addrlen: socklen_t, - ) -> ssize_t; - pub fn recvfrom( - socket: c_int, - buffer: *mut c_void, - length: size_t, - flags: c_int, - addr: *mut sockaddr, - addrlen: *mut socklen_t, - ) -> ssize_t; - - pub fn getsockopt( - sockfd: c_int, - level: c_int, - optname: c_int, - optval: *mut c_void, - optlen: *mut socklen_t, - ) -> c_int; - pub fn setsockopt( - sockfd: c_int, - level: c_int, - optname: c_int, - optval: *const c_void, - optlen: socklen_t, - ) -> c_int; - - pub fn getaddrinfo( - host: *const c_char, - serv: *const c_char, - hint: *const addrinfo, - res: *mut *mut addrinfo, - ) -> c_int; - pub fn freeaddrinfo(p: *mut addrinfo); - pub fn gai_strerror(ecode: c_int) -> *const c_char; -} diff --git a/vendor/libc/src/windows/gnu/mod.rs b/vendor/libc/src/windows/gnu/mod.rs deleted file mode 100644 index aee2c1efed1081..00000000000000 --- a/vendor/libc/src/windows/gnu/mod.rs +++ /dev/null @@ -1,36 +0,0 @@ -use crate::prelude::*; - -cfg_if! { - if #[cfg(target_pointer_width = "64")] { - s_no_extra_traits! { - #[repr(align(16))] - pub struct max_align_t { - priv_: [f64; 4], - } - } - } else if #[cfg(target_pointer_width = "32")] { - s_no_extra_traits! { - #[repr(align(16))] - pub struct max_align_t { - priv_: [i64; 6], - } - } - } -} - -pub const L_tmpnam: c_uint = 14; -pub const TMP_MAX: c_uint = 0x7fff; - -// stdio file descriptor numbers -pub const STDIN_FILENO: c_int = 0; -pub const STDOUT_FILENO: c_int = 1; -pub const STDERR_FILENO: c_int = 2; - -extern "C" { - pub fn strcasecmp(s1: *const c_char, s2: *const c_char) -> c_int; - pub fn strncasecmp(s1: *const c_char, s2: *const c_char, n: size_t) -> c_int; - - // NOTE: For MSVC target, `wmemchr` is only a inline function in `` - // header file. We cannot find a way to link to that symbol from Rust. - pub fn wmemchr(cx: *const crate::wchar_t, c: crate::wchar_t, n: size_t) -> *mut crate::wchar_t; -} diff --git a/vendor/libc/src/windows/mod.rs b/vendor/libc/src/windows/mod.rs deleted file mode 100644 index 2f35af84c7493d..00000000000000 --- a/vendor/libc/src/windows/mod.rs +++ /dev/null @@ -1,611 +0,0 @@ -//! Windows CRT definitions - -use crate::prelude::*; - -pub type intmax_t = i64; -pub type uintmax_t = u64; - -pub type size_t = usize; -pub type ptrdiff_t = isize; -pub type intptr_t = isize; -pub type uintptr_t = usize; -pub type ssize_t = isize; -pub type sighandler_t = usize; - -pub type wchar_t = u16; - -pub type clock_t = i32; - -pub type errno_t = c_int; - -cfg_if! { - if #[cfg(all(target_arch = "x86", target_env = "gnu"))] { - pub type time_t = i32; - } else { - pub type time_t = i64; - } -} - -pub type off_t = i32; -pub type dev_t = u32; -pub type ino_t = u16; -#[derive(Debug)] -pub enum timezone {} -impl Copy for timezone {} -impl Clone for timezone { - fn clone(&self) -> timezone { - *self - } -} -pub type time64_t = i64; - -pub type SOCKET = crate::uintptr_t; - -s! { - // note this is the struct called stat64 in Windows. Not stat, nor stati64. - pub struct stat { - pub st_dev: dev_t, - pub st_ino: ino_t, - pub st_mode: u16, - pub st_nlink: c_short, - pub st_uid: c_short, - pub st_gid: c_short, - pub st_rdev: dev_t, - pub st_size: i64, - pub st_atime: time64_t, - pub st_mtime: time64_t, - pub st_ctime: time64_t, - } - - // note that this is called utimbuf64 in Windows - pub struct utimbuf { - pub actime: time64_t, - pub modtime: time64_t, - } - - pub struct tm { - pub tm_sec: c_int, - pub tm_min: c_int, - pub tm_hour: c_int, - pub tm_mday: c_int, - pub tm_mon: c_int, - pub tm_year: c_int, - pub tm_wday: c_int, - pub tm_yday: c_int, - pub tm_isdst: c_int, - } - - pub struct timeval { - pub tv_sec: c_long, - pub tv_usec: c_long, - } - - pub struct timespec { - pub tv_sec: time_t, - pub tv_nsec: c_long, - } - - pub struct sockaddr { - pub sa_family: c_ushort, - pub sa_data: [c_char; 14], - } -} - -pub const INT_MIN: c_int = -2147483648; -pub const INT_MAX: c_int = 2147483647; - -pub const EXIT_FAILURE: c_int = 1; -pub const EXIT_SUCCESS: c_int = 0; -pub const RAND_MAX: c_int = 32767; -pub const EOF: c_int = -1; -pub const SEEK_SET: c_int = 0; -pub const SEEK_CUR: c_int = 1; -pub const SEEK_END: c_int = 2; -pub const _IOFBF: c_int = 0; -pub const _IONBF: c_int = 4; -pub const _IOLBF: c_int = 64; -pub const BUFSIZ: c_uint = 512; -pub const FOPEN_MAX: c_uint = 20; -pub const FILENAME_MAX: c_uint = 260; - -// fcntl.h -pub const O_RDONLY: c_int = 0x0000; -pub const O_WRONLY: c_int = 0x0001; -pub const O_RDWR: c_int = 0x0002; -pub const O_APPEND: c_int = 0x0008; -pub const O_CREAT: c_int = 0x0100; -pub const O_TRUNC: c_int = 0x0200; -pub const O_EXCL: c_int = 0x0400; -pub const O_TEXT: c_int = 0x4000; -pub const O_BINARY: c_int = 0x8000; -pub const _O_WTEXT: c_int = 0x10000; -pub const _O_U16TEXT: c_int = 0x20000; -pub const _O_U8TEXT: c_int = 0x40000; -pub const O_RAW: c_int = O_BINARY; -pub const O_NOINHERIT: c_int = 0x0080; -pub const O_TEMPORARY: c_int = 0x0040; -pub const _O_SHORT_LIVED: c_int = 0x1000; -pub const _O_OBTAIN_DIR: c_int = 0x2000; -pub const O_SEQUENTIAL: c_int = 0x0020; -pub const O_RANDOM: c_int = 0x0010; - -pub const S_IFCHR: c_int = 0o2_0000; -pub const S_IFDIR: c_int = 0o4_0000; -pub const S_IFREG: c_int = 0o10_0000; -pub const S_IFMT: c_int = 0o17_0000; -pub const S_IEXEC: c_int = 0o0100; -pub const S_IWRITE: c_int = 0o0200; -pub const S_IREAD: c_int = 0o0400; - -pub const LC_ALL: c_int = 0; -pub const LC_COLLATE: c_int = 1; -pub const LC_CTYPE: c_int = 2; -pub const LC_MONETARY: c_int = 3; -pub const LC_NUMERIC: c_int = 4; -pub const LC_TIME: c_int = 5; - -pub const EPERM: c_int = 1; -pub const ENOENT: c_int = 2; -pub const ESRCH: c_int = 3; -pub const EINTR: c_int = 4; -pub const EIO: c_int = 5; -pub const ENXIO: c_int = 6; -pub const E2BIG: c_int = 7; -pub const ENOEXEC: c_int = 8; -pub const EBADF: c_int = 9; -pub const ECHILD: c_int = 10; -pub const EAGAIN: c_int = 11; -pub const ENOMEM: c_int = 12; -pub const EACCES: c_int = 13; -pub const EFAULT: c_int = 14; -pub const EBUSY: c_int = 16; -pub const EEXIST: c_int = 17; -pub const EXDEV: c_int = 18; -pub const ENODEV: c_int = 19; -pub const ENOTDIR: c_int = 20; -pub const EISDIR: c_int = 21; -pub const EINVAL: c_int = 22; -pub const ENFILE: c_int = 23; -pub const EMFILE: c_int = 24; -pub const ENOTTY: c_int = 25; -pub const EFBIG: c_int = 27; -pub const ENOSPC: c_int = 28; -pub const ESPIPE: c_int = 29; -pub const EROFS: c_int = 30; -pub const EMLINK: c_int = 31; -pub const EPIPE: c_int = 32; -pub const EDOM: c_int = 33; -pub const ERANGE: c_int = 34; -pub const EDEADLK: c_int = 36; -pub const EDEADLOCK: c_int = 36; -pub const ENAMETOOLONG: c_int = 38; -pub const ENOLCK: c_int = 39; -pub const ENOSYS: c_int = 40; -pub const ENOTEMPTY: c_int = 41; -pub const EILSEQ: c_int = 42; -pub const STRUNCATE: c_int = 80; - -// POSIX Supplement (from errno.h) -pub const EADDRINUSE: c_int = 100; -pub const EADDRNOTAVAIL: c_int = 101; -pub const EAFNOSUPPORT: c_int = 102; -pub const EALREADY: c_int = 103; -pub const EBADMSG: c_int = 104; -pub const ECANCELED: c_int = 105; -pub const ECONNABORTED: c_int = 106; -pub const ECONNREFUSED: c_int = 107; -pub const ECONNRESET: c_int = 108; -pub const EDESTADDRREQ: c_int = 109; -pub const EHOSTUNREACH: c_int = 110; -pub const EIDRM: c_int = 111; -pub const EINPROGRESS: c_int = 112; -pub const EISCONN: c_int = 113; -pub const ELOOP: c_int = 114; -pub const EMSGSIZE: c_int = 115; -pub const ENETDOWN: c_int = 116; -pub const ENETRESET: c_int = 117; -pub const ENETUNREACH: c_int = 118; -pub const ENOBUFS: c_int = 119; -pub const ENODATA: c_int = 120; -pub const ENOLINK: c_int = 121; -pub const ENOMSG: c_int = 122; -pub const ENOPROTOOPT: c_int = 123; -pub const ENOSR: c_int = 124; -pub const ENOSTR: c_int = 125; -pub const ENOTCONN: c_int = 126; -pub const ENOTRECOVERABLE: c_int = 127; -pub const ENOTSOCK: c_int = 128; -pub const ENOTSUP: c_int = 129; -pub const EOPNOTSUPP: c_int = 130; -pub const EOVERFLOW: c_int = 132; -pub const EOWNERDEAD: c_int = 133; -pub const EPROTO: c_int = 134; -pub const EPROTONOSUPPORT: c_int = 135; -pub const EPROTOTYPE: c_int = 136; -pub const ETIME: c_int = 137; -pub const ETIMEDOUT: c_int = 138; -pub const ETXTBSY: c_int = 139; -pub const EWOULDBLOCK: c_int = 140; - -// signal codes -pub const SIGINT: c_int = 2; -pub const SIGILL: c_int = 4; -pub const SIGFPE: c_int = 8; -pub const SIGSEGV: c_int = 11; -pub const SIGTERM: c_int = 15; -pub const SIGABRT: c_int = 22; -pub const NSIG: c_int = 23; - -pub const SIG_ERR: c_int = -1; -pub const SIG_DFL: crate::sighandler_t = 0; -pub const SIG_IGN: crate::sighandler_t = 1; -pub const SIG_GET: crate::sighandler_t = 2; -pub const SIG_SGE: crate::sighandler_t = 3; -pub const SIG_ACK: crate::sighandler_t = 4; - -// DIFF(main): removed in 458c58f409 -// FIXME(msrv): done by `std` starting in 1.79.0 -// inline comment below appeases style checker -#[cfg(all(target_env = "msvc", feature = "rustc-dep-of-std"))] // " if " -#[link(name = "msvcrt", cfg(not(target_feature = "crt-static")))] -#[link(name = "libcmt", cfg(target_feature = "crt-static"))] -extern "C" {} - -#[derive(Debug)] -pub enum FILE {} -impl Copy for FILE {} -impl Clone for FILE { - fn clone(&self) -> FILE { - *self - } -} -#[derive(Debug)] -pub enum fpos_t {} // FIXME(windows): fill this out with a struct -impl Copy for fpos_t {} -impl Clone for fpos_t { - fn clone(&self) -> fpos_t { - *self - } -} - -// Special handling for all print and scan type functions because of https://github.com/rust-lang/libc/issues/2860 -cfg_if! { - if #[cfg(not(feature = "rustc-dep-of-std"))] { - #[cfg_attr( - all(windows, target_env = "msvc"), - link(name = "legacy_stdio_definitions") - )] - extern "C" { - pub fn printf(format: *const c_char, ...) -> c_int; - pub fn fprintf(stream: *mut FILE, format: *const c_char, ...) -> c_int; - } - } -} - -extern "C" { - pub fn isalnum(c: c_int) -> c_int; - pub fn isalpha(c: c_int) -> c_int; - pub fn iscntrl(c: c_int) -> c_int; - pub fn isdigit(c: c_int) -> c_int; - pub fn isgraph(c: c_int) -> c_int; - pub fn islower(c: c_int) -> c_int; - pub fn isprint(c: c_int) -> c_int; - pub fn ispunct(c: c_int) -> c_int; - pub fn isspace(c: c_int) -> c_int; - pub fn isupper(c: c_int) -> c_int; - pub fn isxdigit(c: c_int) -> c_int; - pub fn isblank(c: c_int) -> c_int; - pub fn tolower(c: c_int) -> c_int; - pub fn toupper(c: c_int) -> c_int; - pub fn qsort( - base: *mut c_void, - num: size_t, - size: size_t, - compar: Option c_int>, - ); - pub fn qsort_s( - base: *mut c_void, - num: size_t, - size: size_t, - compar: Option c_int>, - arg: *mut c_void, - ); - pub fn fopen(filename: *const c_char, mode: *const c_char) -> *mut FILE; - pub fn freopen(filename: *const c_char, mode: *const c_char, file: *mut FILE) -> *mut FILE; - pub fn fflush(file: *mut FILE) -> c_int; - pub fn fclose(file: *mut FILE) -> c_int; - pub fn remove(filename: *const c_char) -> c_int; - pub fn rename(oldname: *const c_char, newname: *const c_char) -> c_int; - pub fn tmpfile() -> *mut FILE; - pub fn setvbuf(stream: *mut FILE, buffer: *mut c_char, mode: c_int, size: size_t) -> c_int; - pub fn setbuf(stream: *mut FILE, buf: *mut c_char); - pub fn getchar() -> c_int; - pub fn putchar(c: c_int) -> c_int; - pub fn fgetc(stream: *mut FILE) -> c_int; - pub fn fgets(buf: *mut c_char, n: c_int, stream: *mut FILE) -> *mut c_char; - pub fn fputc(c: c_int, stream: *mut FILE) -> c_int; - pub fn fputs(s: *const c_char, stream: *mut FILE) -> c_int; - pub fn puts(s: *const c_char) -> c_int; - pub fn ungetc(c: c_int, stream: *mut FILE) -> c_int; - pub fn fread(ptr: *mut c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; - pub fn fwrite(ptr: *const c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; - pub fn fseek(stream: *mut FILE, offset: c_long, whence: c_int) -> c_int; - pub fn ftell(stream: *mut FILE) -> c_long; - pub fn rewind(stream: *mut FILE); - pub fn fgetpos(stream: *mut FILE, ptr: *mut fpos_t) -> c_int; - pub fn fsetpos(stream: *mut FILE, ptr: *const fpos_t) -> c_int; - pub fn feof(stream: *mut FILE) -> c_int; - pub fn ferror(stream: *mut FILE) -> c_int; - pub fn perror(s: *const c_char); - pub fn atof(s: *const c_char) -> c_double; - pub fn atoi(s: *const c_char) -> c_int; - pub fn atol(s: *const c_char) -> c_long; - pub fn atoll(s: *const c_char) -> c_longlong; - pub fn strtod(s: *const c_char, endp: *mut *mut c_char) -> c_double; - pub fn strtof(s: *const c_char, endp: *mut *mut c_char) -> c_float; - pub fn strtol(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_long; - pub fn strtoll(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_longlong; - pub fn strtoul(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulong; - pub fn strtoull(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulonglong; - pub fn calloc(nobj: size_t, size: size_t) -> *mut c_void; - pub fn malloc(size: size_t) -> *mut c_void; - pub fn _msize(p: *mut c_void) -> size_t; - pub fn realloc(p: *mut c_void, size: size_t) -> *mut c_void; - pub fn free(p: *mut c_void); - pub fn abort() -> !; - pub fn exit(status: c_int) -> !; - pub fn _exit(status: c_int) -> !; - pub fn atexit(cb: extern "C" fn()) -> c_int; - pub fn system(s: *const c_char) -> c_int; - pub fn getenv(s: *const c_char) -> *mut c_char; - - pub fn strcpy(dst: *mut c_char, src: *const c_char) -> *mut c_char; - pub fn strncpy(dst: *mut c_char, src: *const c_char, n: size_t) -> *mut c_char; - pub fn strcat(s: *mut c_char, ct: *const c_char) -> *mut c_char; - pub fn strncat(s: *mut c_char, ct: *const c_char, n: size_t) -> *mut c_char; - pub fn strcmp(cs: *const c_char, ct: *const c_char) -> c_int; - pub fn strncmp(cs: *const c_char, ct: *const c_char, n: size_t) -> c_int; - pub fn strcoll(cs: *const c_char, ct: *const c_char) -> c_int; - pub fn strchr(cs: *const c_char, c: c_int) -> *mut c_char; - pub fn strrchr(cs: *const c_char, c: c_int) -> *mut c_char; - pub fn strspn(cs: *const c_char, ct: *const c_char) -> size_t; - pub fn strcspn(cs: *const c_char, ct: *const c_char) -> size_t; - pub fn strdup(cs: *const c_char) -> *mut c_char; - pub fn strpbrk(cs: *const c_char, ct: *const c_char) -> *mut c_char; - pub fn strstr(cs: *const c_char, ct: *const c_char) -> *mut c_char; - pub fn strlen(cs: *const c_char) -> size_t; - pub fn strnlen(cs: *const c_char, maxlen: size_t) -> size_t; - pub fn strerror(n: c_int) -> *mut c_char; - pub fn strtok(s: *mut c_char, t: *const c_char) -> *mut c_char; - pub fn strxfrm(s: *mut c_char, ct: *const c_char, n: size_t) -> size_t; - pub fn wcslen(buf: *const wchar_t) -> size_t; - pub fn wcsnlen(str: *const wchar_t, numberOfElements: size_t) -> size_t; - pub fn wcstombs(dest: *mut c_char, src: *const wchar_t, n: size_t) -> size_t; - - pub fn memchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; - pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; - pub fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; - pub fn memmove(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; - pub fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void; - - pub fn abs(i: c_int) -> c_int; - pub fn labs(i: c_long) -> c_long; - pub fn rand() -> c_int; - pub fn srand(seed: c_uint); - - pub fn signal(signum: c_int, handler: sighandler_t) -> sighandler_t; - pub fn raise(signum: c_int) -> c_int; - - pub fn clock() -> clock_t; - pub fn ctime(sourceTime: *const time_t) -> *mut c_char; - pub fn difftime(timeEnd: time_t, timeStart: time_t) -> c_double; - #[link_name = "_gmtime64_s"] - pub fn gmtime_s(destTime: *mut tm, srcTime: *const time_t) -> c_int; - #[link_name = "_get_daylight"] - pub fn get_daylight(hours: *mut c_int) -> errno_t; - #[link_name = "_get_dstbias"] - pub fn get_dstbias(seconds: *mut c_long) -> errno_t; - #[link_name = "_get_timezone"] - pub fn get_timezone(seconds: *mut c_long) -> errno_t; - #[link_name = "_get_tzname"] - pub fn get_tzname( - p_return_value: *mut size_t, - time_zone_name: *mut c_char, - size_in_bytes: size_t, - index: c_int, - ) -> errno_t; - #[link_name = "_localtime64_s"] - pub fn localtime_s(tmDest: *mut tm, sourceTime: *const time_t) -> crate::errno_t; - #[link_name = "_time64"] - pub fn time(destTime: *mut time_t) -> time_t; - #[link_name = "_tzset"] - pub fn tzset(); - #[link_name = "_chmod"] - pub fn chmod(path: *const c_char, mode: c_int) -> c_int; - #[link_name = "_wchmod"] - pub fn wchmod(path: *const wchar_t, mode: c_int) -> c_int; - #[link_name = "_mkdir"] - pub fn mkdir(path: *const c_char) -> c_int; - #[link_name = "_wrmdir"] - pub fn wrmdir(path: *const wchar_t) -> c_int; - #[link_name = "_fstat64"] - pub fn fstat(fildes: c_int, buf: *mut stat) -> c_int; - #[link_name = "_stat64"] - pub fn stat(path: *const c_char, buf: *mut stat) -> c_int; - #[link_name = "_wstat64"] - pub fn wstat(path: *const wchar_t, buf: *mut stat) -> c_int; - #[link_name = "_wutime64"] - pub fn wutime(file: *const wchar_t, buf: *mut utimbuf) -> c_int; - #[link_name = "_popen"] - pub fn popen(command: *const c_char, mode: *const c_char) -> *mut crate::FILE; - #[link_name = "_pclose"] - pub fn pclose(stream: *mut crate::FILE) -> c_int; - #[link_name = "_fdopen"] - pub fn fdopen(fd: c_int, mode: *const c_char) -> *mut crate::FILE; - #[link_name = "_fileno"] - pub fn fileno(stream: *mut crate::FILE) -> c_int; - #[link_name = "_open"] - pub fn open(path: *const c_char, oflag: c_int, ...) -> c_int; - #[link_name = "_wopen"] - pub fn wopen(path: *const wchar_t, oflag: c_int, ...) -> c_int; - #[link_name = "_creat"] - pub fn creat(path: *const c_char, mode: c_int) -> c_int; - #[link_name = "_access"] - pub fn access(path: *const c_char, amode: c_int) -> c_int; - #[link_name = "_chdir"] - pub fn chdir(dir: *const c_char) -> c_int; - #[link_name = "_close"] - pub fn close(fd: c_int) -> c_int; - #[link_name = "_dup"] - pub fn dup(fd: c_int) -> c_int; - #[link_name = "_dup2"] - pub fn dup2(src: c_int, dst: c_int) -> c_int; - #[link_name = "_execl"] - pub fn execl(path: *const c_char, arg0: *const c_char, ...) -> intptr_t; - #[link_name = "_wexecl"] - pub fn wexecl(path: *const wchar_t, arg0: *const wchar_t, ...) -> intptr_t; - #[link_name = "_execle"] - pub fn execle(path: *const c_char, arg0: *const c_char, ...) -> intptr_t; - #[link_name = "_wexecle"] - pub fn wexecle(path: *const wchar_t, arg0: *const wchar_t, ...) -> intptr_t; - #[link_name = "_execlp"] - pub fn execlp(path: *const c_char, arg0: *const c_char, ...) -> intptr_t; - #[link_name = "_wexeclp"] - pub fn wexeclp(path: *const wchar_t, arg0: *const wchar_t, ...) -> intptr_t; - #[link_name = "_execlpe"] - pub fn execlpe(path: *const c_char, arg0: *const c_char, ...) -> intptr_t; - #[link_name = "_wexeclpe"] - pub fn wexeclpe(path: *const wchar_t, arg0: *const wchar_t, ...) -> intptr_t; - #[link_name = "_execv"] - // DIFF(main): changed to `intptr_t` in e77f551de9 - pub fn execv(prog: *const c_char, argv: *const *const c_char) -> intptr_t; - #[link_name = "_execve"] - pub fn execve( - prog: *const c_char, - argv: *const *const c_char, - envp: *const *const c_char, - ) -> c_int; - #[link_name = "_execvp"] - pub fn execvp(c: *const c_char, argv: *const *const c_char) -> c_int; - #[link_name = "_execvpe"] - pub fn execvpe( - c: *const c_char, - argv: *const *const c_char, - envp: *const *const c_char, - ) -> c_int; - - #[link_name = "_wexecv"] - pub fn wexecv(prog: *const wchar_t, argv: *const *const wchar_t) -> intptr_t; - #[link_name = "_wexecve"] - pub fn wexecve( - prog: *const wchar_t, - argv: *const *const wchar_t, - envp: *const *const wchar_t, - ) -> intptr_t; - #[link_name = "_wexecvp"] - pub fn wexecvp(c: *const wchar_t, argv: *const *const wchar_t) -> intptr_t; - #[link_name = "_wexecvpe"] - pub fn wexecvpe( - c: *const wchar_t, - argv: *const *const wchar_t, - envp: *const *const wchar_t, - ) -> intptr_t; - #[link_name = "_getcwd"] - pub fn getcwd(buf: *mut c_char, size: c_int) -> *mut c_char; - #[link_name = "_getpid"] - pub fn getpid() -> c_int; - #[link_name = "_isatty"] - pub fn isatty(fd: c_int) -> c_int; - #[link_name = "_lseek"] - pub fn lseek(fd: c_int, offset: c_long, origin: c_int) -> c_long; - #[link_name = "_lseeki64"] - pub fn lseek64(fd: c_int, offset: c_longlong, origin: c_int) -> c_longlong; - #[link_name = "_pipe"] - pub fn pipe(fds: *mut c_int, psize: c_uint, textmode: c_int) -> c_int; - #[link_name = "_read"] - pub fn read(fd: c_int, buf: *mut c_void, count: c_uint) -> c_int; - #[link_name = "_rmdir"] - pub fn rmdir(path: *const c_char) -> c_int; - #[link_name = "_unlink"] - pub fn unlink(c: *const c_char) -> c_int; - #[link_name = "_write"] - pub fn write(fd: c_int, buf: *const c_void, count: c_uint) -> c_int; - #[link_name = "_commit"] - pub fn commit(fd: c_int) -> c_int; - #[link_name = "_get_osfhandle"] - pub fn get_osfhandle(fd: c_int) -> intptr_t; - #[link_name = "_open_osfhandle"] - pub fn open_osfhandle(osfhandle: intptr_t, flags: c_int) -> c_int; - pub fn setlocale(category: c_int, locale: *const c_char) -> *mut c_char; - #[link_name = "_wsetlocale"] - pub fn wsetlocale(category: c_int, locale: *const wchar_t) -> *mut wchar_t; - #[link_name = "_aligned_malloc"] - pub fn aligned_malloc(size: size_t, alignment: size_t) -> *mut c_void; - #[link_name = "_aligned_free"] - pub fn aligned_free(ptr: *mut c_void); - #[link_name = "_aligned_realloc"] - pub fn aligned_realloc(memblock: *mut c_void, size: size_t, alignment: size_t) -> *mut c_void; - #[link_name = "_putenv"] - pub fn putenv(envstring: *const c_char) -> c_int; - #[link_name = "_wputenv"] - pub fn wputenv(envstring: *const crate::wchar_t) -> c_int; - #[link_name = "_putenv_s"] - pub fn putenv_s(envstring: *const c_char, value_string: *const c_char) -> crate::errno_t; - #[link_name = "_wputenv_s"] - pub fn wputenv_s( - envstring: *const crate::wchar_t, - value_string: *const crate::wchar_t, - ) -> crate::errno_t; -} - -extern "system" { - pub fn listen(s: SOCKET, backlog: c_int) -> c_int; - pub fn accept(s: SOCKET, addr: *mut crate::sockaddr, addrlen: *mut c_int) -> SOCKET; - pub fn bind(s: SOCKET, name: *const crate::sockaddr, namelen: c_int) -> c_int; - pub fn connect(s: SOCKET, name: *const crate::sockaddr, namelen: c_int) -> c_int; - pub fn getpeername(s: SOCKET, name: *mut crate::sockaddr, nameln: *mut c_int) -> c_int; - pub fn getsockname(s: SOCKET, name: *mut crate::sockaddr, nameln: *mut c_int) -> c_int; - pub fn getsockopt( - s: SOCKET, - level: c_int, - optname: c_int, - optval: *mut c_char, - optlen: *mut c_int, - ) -> c_int; - pub fn recvfrom( - s: SOCKET, - buf: *mut c_char, - len: c_int, - flags: c_int, - from: *mut crate::sockaddr, - fromlen: *mut c_int, - ) -> c_int; - pub fn sendto( - s: SOCKET, - buf: *const c_char, - len: c_int, - flags: c_int, - to: *const crate::sockaddr, - tolen: c_int, - ) -> c_int; - pub fn setsockopt( - s: SOCKET, - level: c_int, - optname: c_int, - optval: *const c_char, - optlen: c_int, - ) -> c_int; - pub fn socket(af: c_int, socket_type: c_int, protocol: c_int) -> SOCKET; -} - -cfg_if! { - if #[cfg(all(target_env = "gnu"))] { - mod gnu; - pub use self::gnu::*; - } else if #[cfg(all(target_env = "msvc"))] { - mod msvc; - pub use self::msvc::*; - } else { - // Unknown target_env - } -} diff --git a/vendor/libc/src/windows/msvc/mod.rs b/vendor/libc/src/windows/msvc/mod.rs deleted file mode 100644 index 5b620bc6c1afa1..00000000000000 --- a/vendor/libc/src/windows/msvc/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -use crate::prelude::*; - -pub const L_tmpnam: c_uint = 260; -pub const TMP_MAX: c_uint = 0x7fff_ffff; - -// POSIX Supplement (from errno.h) -// This particular error code is only currently available in msvc toolchain -pub const EOTHER: c_int = 131; - -extern "C" { - #[link_name = "_stricmp"] - pub fn stricmp(s1: *const c_char, s2: *const c_char) -> c_int; - #[link_name = "_strnicmp"] - pub fn strnicmp(s1: *const c_char, s2: *const c_char, n: size_t) -> c_int; - #[link_name = "_memccpy"] - pub fn memccpy(dest: *mut c_void, src: *const c_void, c: c_int, count: size_t) -> *mut c_void; -} diff --git a/vendor/libc/src/xous.rs b/vendor/libc/src/xous.rs deleted file mode 100644 index 2415fd42824e1c..00000000000000 --- a/vendor/libc/src/xous.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! Xous C type definitions - -use crate::prelude::*; - -pub type intmax_t = i64; -pub type uintmax_t = u64; - -pub type size_t = usize; -pub type ptrdiff_t = isize; -pub type intptr_t = isize; -pub type uintptr_t = usize; -pub type ssize_t = isize; - -pub type off_t = i64; -pub type wchar_t = u32; - -pub const INT_MIN: c_int = -2147483648; -pub const INT_MAX: c_int = 2147483647; diff --git a/vendor/libc/tests/const_fn.rs b/vendor/libc/tests/const_fn.rs deleted file mode 100644 index d9b41b8073c70d..00000000000000 --- a/vendor/libc/tests/const_fn.rs +++ /dev/null @@ -1,3 +0,0 @@ -#[cfg(target_os = "linux")] -const _FOO: libc::c_uint = unsafe { libc::CMSG_SPACE(1) }; -//^ if CMSG_SPACE is not const, this will fail to compile diff --git a/vendor/libloading/.cargo-checksum.json b/vendor/libloading/.cargo-checksum.json deleted file mode 100644 index 49d35f1ffa9e92..00000000000000 --- a/vendor/libloading/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"2c2a09896cf605d801de7c91dc9c99904e92c46aa4ec8d7963f67a12bd33f93b",".github/workflows/libloading.yml":"46df13bc0dc7c70e42bd818084fd4a3f5b8ecb74cb9b45cc585542952f96ac54","Cargo.lock":"516b49980abf75390492d18d669219adfce3b87dbabf5186b68d260ae2225c8c","Cargo.toml":"c70a979f3e8987bcf85b377d310b895b4bbeae29f6b0a28735cf48230087d5e2","Cargo.toml.orig":"a3ba7155546bbf086806c8afe512a63af09288d2afd3b26e781d4f999534eb03","LICENSE":"b29f8b01452350c20dd1af16ef83b598fea3053578ccc1c7a0ef40e57be2620f","README.mkd":"707e1cae9fa4b691ce5cb8a3976573158fc60b67cb89948f8f5d51c5908bd0a8","src/changelog.rs":"e3683b87d485ac6369349b9bbd7b04957664b8fdbc2fee845a4c5e56ce226036","src/error.rs":"24dbe0edbe6e0c3635168cc8548a32ef8c9eb939a3f6b976d48e7b7c29d752de","src/lib.rs":"f54281f105189a23f88464b1ad02c5d5073a873e5b5736d59a03d94cb485a861","src/os/mod.rs":"6c59ef8c1120953ae6b6c32f27766c643ca90d85075c49c3545d2fe1ed82cedd","src/os/unix/consts.rs":"61a73d876c19ec0542c1ca32d43eddb3b9991761d05d79351ac831dc88900b2e","src/os/unix/mod.rs":"d080b693c0a235917d6fb462ff7ef39c344883b00d8f741b18dee184538e3530","src/os/windows/mod.rs":"bce75443921d24734fe6ebc38f4b0c5ffb6303db643c88dd54779c93014f2b38","src/safe.rs":"c91c743162488495b28a9735c20c5b9fb6ea3f06fe936cd3d19ba4d1ddb2707c","src/test_helpers.rs":"201403e143e5b3204864124cd38067cf8813d5273dc1a9099288a9dc4bdd15b6","src/util.rs":"0b6dcfb9eafff2d87966460ef6b1b99980f888813037e787ed92deee602f8c2b","tests/constants.rs":"4778c062605ed22238c1bed16de4c076d0857282f090f36e6d985dafb7b4544d","tests/functions.rs":"bfe07fc286693235b12e9e04d8d079722b320871722baf3867bcf20dfb69cc43","tests/library_filename.rs":"5f43ce556e7631a63fd5c1466c82afa8bc3fbc5613210ce185227b40431b81db","tests/markers.rs":"0ebc8f807b92e39452d35732988012cdca7ce96231c57eaac9c3f4217225ad39","tests/nagisa32.dll":"5c69b2bd9c8a6ad04165c221075fc9fade1dd66ca697399ace528a5a62328e36","tests/nagisa64.dll":"e20b95e3036f3289421abd100760874d4f455afd33c3b5b64fec56b191f7d477","tests/windows.rs":"d47752de5ce18b304697a957f430351a91b0188a80a881d6558d601df69f4036"},"package":"d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55"} \ No newline at end of file diff --git a/vendor/libloading/.cargo_vcs_info.json b/vendor/libloading/.cargo_vcs_info.json deleted file mode 100644 index a5ed37147c280d..00000000000000 --- a/vendor/libloading/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "f4ec9e702de2d0778bccff8525dc44e4cacac2d1" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/libloading/.github/workflows/libloading.yml b/vendor/libloading/.github/workflows/libloading.yml deleted file mode 100644 index 817d1df19e6eba..00000000000000 --- a/vendor/libloading/.github/workflows/libloading.yml +++ /dev/null @@ -1,126 +0,0 @@ -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -on: - push: - branches: [master] - paths-ignore: ['*.mkd', 'LICENSE'] - pull_request: - types: [opened, reopened, synchronize] - -jobs: - native-test: - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - rust_toolchain: [nightly, stable, 1.71.0] - os: [ubuntu-latest, windows-latest, macOS-latest] - timeout-minutes: 20 - steps: - - uses: actions/checkout@v2 - - run: rustup install ${{ matrix.rust_toolchain }} --profile=minimal - - run: rustup default ${{ matrix.rust_toolchain }} - - run: rustup component add clippy - - run: cargo update -p libc --precise 0.2.155 - if: ${{ matrix.rust_toolchain == '1.71.0' }} - - run: cargo clippy - - run: cargo test -- --nocapture - - run: cargo test --release -- --nocapture - - run: cargo rustdoc -Zunstable-options --config 'build.rustdocflags=["--cfg", "libloading_docs", "-D", "rustdoc::broken_intra_doc_links"]' - if: ${{ matrix.rust_toolchain == 'nightly' }} - # pwsh.exe drops quotes kekw. https://stackoverflow.com/a/59036879 - shell: bash - - windows-test: - runs-on: windows-latest - strategy: - fail-fast: false - matrix: - rust_toolchain: [nightly, stable] - rust_target: - - x86_64-pc-windows-gnullvm - - i686-pc-windows-gnu - include: - - rust_target: x86_64-pc-windows-gnullvm - mingw_path: C:/msys64/clang64/bin - package: mingw-w64-clang-x86_64-clang - - rust_target: i686-pc-windows-gnu - mingw_path: C:/msys64/mingw32/bin - package: mingw-w64-i686-gcc - steps: - - uses: actions/checkout@v2 - - run: rustup install ${{ matrix.rust_toolchain }} --profile=minimal - - run: rustup default ${{ matrix.rust_toolchain }} - - run: rustup target add ${{ matrix.rust_target }} - - uses: msys2/setup-msys2@v2 - with: - release: false - install: ${{ matrix.package }} - - run: echo "${{ matrix.mingw_path }}" | Out-File -FilePath $env:GITHUB_PATH -Append - if: ${{ matrix.mingw_path }}" - - run: cargo test --target ${{ matrix.rust_target }} - env: - TARGET: ${{ matrix.rust_target}} - - msys2-test: - runs-on: windows-latest - strategy: - fail-fast: false - steps: - - uses: actions/checkout@v2 - - run: rustup install nightly --profile=minimal - - run: rustup default nightly - - run: rustup component add rust-src - - uses: msys2/setup-msys2@v2 - with: - release: false - install: gcc - - run: echo "INPUT(libmsys-2.0.a)" | Out-File -FilePath "C:\msys64\usr\lib\libcygwin.a" - - run: | - $env:PATH = "C:\msys64\usr\bin\;$env:PATH" - cargo test --target x86_64-pc-cygwin -Zbuild-std - env: - CARGO_TARGET_X86_64_PC_CYGWIN_LINKER: x86_64-pc-cygwin-gcc.exe - - bare-cross-build: - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - rust_toolchain: [nightly] - rust_target: - # BSDs: could be tested with full system emulation - # - x86_64-unknown-dragonfly - # - x86_64-unknown-freebsd - - x86_64-unknown-haiku - # - x86_64-unknown-netbsd - - x86_64-unknown-openbsd - - x86_64-unknown-redox - - x86_64-unknown-fuchsia - - wasm32-unknown-unknown - timeout-minutes: 20 - steps: - - uses: actions/checkout@v2 - - run: rustup install ${{ matrix.rust_toolchain }} --profile=minimal - - run: rustup default ${{ matrix.rust_toolchain }} - - run: rustup component add rust-src --toolchain nightly --target ${{ matrix.rust_target }} - - run: cargo build --target ${{ matrix.rust_target }} -Zbuild-std - - cross-ios-build: - runs-on: macos-latest - strategy: - fail-fast: false - matrix: - rust_toolchain: [nightly, stable] - rust_target: - - aarch64-apple-ios - - x86_64-apple-ios - timeout-minutes: 20 - steps: - - uses: actions/checkout@v2 - - run: rustup install ${{ matrix.rust_toolchain }} --profile=minimal - - run: rustup default ${{ matrix.rust_toolchain }} - - run: rustup target add ${{ matrix.rust_target }} - - run: cargo build --target=${{ matrix.rust_target }} diff --git a/vendor/libloading/Cargo.lock b/vendor/libloading/Cargo.lock deleted file mode 100644 index 935af143d5477f..00000000000000 --- a/vendor/libloading/Cargo.lock +++ /dev/null @@ -1,47 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "cfg-if" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" - -[[package]] -name = "libc" -version = "0.2.175" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" - -[[package]] -name = "libloading" -version = "0.8.9" -dependencies = [ - "cfg-if", - "libc", - "static_assertions", - "windows-link", - "windows-sys", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "windows-link" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" - -[[package]] -name = "windows-sys" -version = "0.61.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e201184e40b2ede64bc2ea34968b28e33622acdbbf37104f0e4a33f7abe657aa" -dependencies = [ - "windows-link", -] diff --git a/vendor/libloading/Cargo.toml b/vendor/libloading/Cargo.toml deleted file mode 100644 index 2c8eb67f592a94..00000000000000 --- a/vendor/libloading/Cargo.toml +++ /dev/null @@ -1,90 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2015" -rust-version = "1.71.0" -name = "libloading" -version = "0.8.9" -authors = ["Simonas Kazlauskas "] -build = false -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = "Bindings around the platform's dynamic library loading primitives with greatly improved memory safety." -documentation = "https://docs.rs/libloading/" -readme = "README.mkd" -keywords = [ - "dlopen", - "load", - "shared", - "dylib", -] -categories = ["api-bindings"] -license = "ISC" -repository = "https://github.com/nagisa/rust_libloading/" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ - "--cfg", - "libloading_docs", -] - -[lib] -name = "libloading" -path = "src/lib.rs" - -[[test]] -name = "constants" -path = "tests/constants.rs" - -[[test]] -name = "functions" -path = "tests/functions.rs" - -[[test]] -name = "library_filename" -path = "tests/library_filename.rs" - -[[test]] -name = "markers" -path = "tests/markers.rs" - -[[test]] -name = "windows" -path = "tests/windows.rs" - -[dev-dependencies.libc] -version = "0.2" - -[dev-dependencies.static_assertions] -version = "1.1" - -[target."cfg(unix)".dependencies.cfg-if] -version = "1" - -[target."cfg(windows)".dependencies.windows-link] -version = "0.2" - -[target."cfg(windows)".dev-dependencies.windows-sys] -version = "0.61" -features = ["Win32_Foundation"] - -[lints.rust.unexpected_cfgs] -level = "warn" -priority = 0 -check-cfg = [ - "cfg(libloading_docs)", - 'cfg(target_os, values("cygwin"))', -] diff --git a/vendor/libloading/LICENSE b/vendor/libloading/LICENSE deleted file mode 100644 index 9137d5607a4284..00000000000000 --- a/vendor/libloading/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright © 2015, Simonas Kazlauskas - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without -fee is hereby granted, provided that the above copyright notice and this permission notice appear -in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS -SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, -NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF -THIS SOFTWARE. diff --git a/vendor/libloading/README.mkd b/vendor/libloading/README.mkd deleted file mode 100644 index 7ff55b04b2dfa4..00000000000000 --- a/vendor/libloading/README.mkd +++ /dev/null @@ -1,16 +0,0 @@ -# libloading - -Bindings around the platform's dynamic library loading primitives with greatly improved memory -safety. The most important safety guarantee of this library is the prevention of dangling `Symbol`s -that may occur after a `Library` is unloaded. - -Using this library allows the loading of dynamic libraries, also known as shared libraries, as well -as the use of the functions and static variables that these libraries may contain. - -* [Documentation][docs] -* [Changelog][changelog] - -[docs]: https://docs.rs/libloading/ -[changelog]: https://docs.rs/libloading/*/libloading/changelog/index.html - -libloading is available to use under ISC (MIT-like) license. diff --git a/vendor/libloading/src/changelog.rs b/vendor/libloading/src/changelog.rs deleted file mode 100644 index 181915e985dceb..00000000000000 --- a/vendor/libloading/src/changelog.rs +++ /dev/null @@ -1,405 +0,0 @@ -//! The change log. - -/// Release 0.8.9 (2025-09-17) -/// -/// ## Non-breaking changes -/// -/// Migrate from windows-targets to windows-link for linking Windows API functions. -pub mod r0_8_9 {} - -/// Release 0.8.8 (2025-05-27) -/// -/// ## Non-breaking changes -/// -/// Add `os::window::Library::pin`. -pub mod r0_8_8 {} - -/// Release 0.8.7 (2025-04-26) -/// -/// ## Non-breaking changes -/// -/// Add support for the `*-pc-cygwin` target. -pub mod r0_8_7 {} - -/// Release 0.8.4 (2024-06-23) -/// -/// ## Non-breaking changes -/// -/// Compilation when targeting Apple's visionos, watchos and tvos targets has been fixed. -pub mod r0_8_4 {} - -/// Release 0.8.3 (2024-03-05) -/// -/// ## Non-breaking changes -/// -/// A `dev-dependency` on `windows-sys` that was unconditionally introduced in -/// [0.8.2](r0_8_2) has been made conditional. -pub mod r0_8_3 {} - -/// Release 0.8.2 (2024-03-01) -/// -/// ## (Potentially) breaking changes -/// -/// MSRV has been increased to 1.56.0. Since both rustc versions are ancient, this has been deemed -/// to not be breaking enough to warrant a semver-breaking release of libloading. If you're stick -/// with a version of rustc older than 1.56.0, lock `libloading` dependency to `0.8.1`. -/// -/// ## Non-breaking changes -/// -/// * The crate switches the dependency on `windows-sys` to a `windows-target` one for Windows -/// bindings. In order to enable this `libloading` defines any bindings necessary for its operation -/// internally, just like has been done for `unix` targets. This should result in leaner dependency -/// trees. -/// * `os::unix::with_dlerror` has been exposed for the users who need to invoke `dl*` family of -/// functions manually. -pub mod r0_8_2 {} - -/// Release 0.8.1 (2023-09-30) -/// -/// ## Non-breaking changes -/// -/// * Support for GNU Hurd. -pub mod r0_8_1 {} - -/// Release 0.8.0 (2023-04-11) -/// -/// ## (Potentially) breaking changes -/// -/// * `winapi` dependency has been replaced with `windows-sys`. -/// * As a result the MSRV has been increased to 1.48. -/// -/// ## Non-breaking changes -/// -/// * Support for the QNX Neutrino target has been added. -pub mod r0_8_0 {} - -/// Release 0.7.4 (2022-11-07) -/// -/// This release has no functional changes. -/// -/// `RTLD_LAZY`, `RTLD_GLOBAL` and `RTLD_LOCAL` constants have been implemented for AIX platforms. -pub mod r0_7_4 {} - -/// Release 0.7.3 (2022-01-15) -/// -/// This release has no functional changes. -/// -/// In this release the `docsrs` `cfg` has been renamed to `libloading_docs` to better reflect that -/// this `cfg` is intended to be only used by `libloading` and only specifically for the invocation -/// of `rustdoc` when documenting `libloading`. Setting this `cfg` in any other situation is -/// unsupported and will not work. -pub mod r0_7_3 {} - -/// Release 0.7.2 (2021-11-14) -/// -/// Cargo.toml now specifies the MSRV bounds, which enables tooling to report an early failure when -/// the version of the toolchain is insufficient. Refer to the [min-rust-version RFC] and its -/// [tracking issue]. -/// -/// [min-rust-version RFC]: https://rust-lang.github.io/rfcs/2495-min-rust-version.html -/// [tracking issue]: https://github.com/rust-lang/rust/issues/65262 -/// -/// Additionally, on platforms `libloading` has no support (today: `not(any(unix, windows))`), we -/// will no longer attempt to implement the cross-platform `Library` and `Symbol` types. This makes -/// `libloading` compile on targets such as `wasm32-unknown-unknown` and gives ability to the -/// downstream consumers of this library to decide how they want to handle the absence of the -/// library loading implementation in their code. One of such approaches could be depending on -/// `libloading` itself optionally as such: -/// -/// ```toml -/// [target.'cfg(any(unix, windows))'.dependencies.libloading] -/// version = "0.7" -/// ``` -pub mod r0_7_2 {} - -/// Release 0.7.1 (2021-10-09) -/// -/// Significantly improved the consistency and style of the documentation. -pub mod r0_7_1 {} - -/// Release 0.7.0 (2021-02-06) -/// -/// ## Breaking changes -/// -/// ### Loading functions are now `unsafe` -/// -/// A number of associated methods involved in loading a library were changed to -/// be `unsafe`. The affected functions are: [`Library::new`], [`os::unix::Library::new`], -/// [`os::unix::Library::open`], [`os::windows::Library::new`], -/// [`os::windows::Library::load_with_flags`]. This is the most prominent breaking change in this -/// release and affects majority of the users of `libloading`. -/// -/// In order to see why it was necessary, consider the following snippet of C++ code: -/// -/// ```c++ -/// #include -/// #include -/// -/// static std::vector UNSHUU = { 1, 2, 3 }; -/// -/// int main() { -/// std::cout << UNSHUU[0] << UNSHUU[1] << UNSHUU[2] << std::endl; // Prints 123 -/// return 0; -/// } -/// ``` -/// -/// The `std::vector` type, much like in Rust's `Vec`, stores its contents in a buffer allocated on -/// the heap. In this example the vector object itself is stored and initialized as a static -/// variable – a compile time construct. The heap, on the other hand, is a runtime construct. And -/// yet the code works exactly as you'd expect – the vector contains numbers 1, 2 and 3 stored in -/// a buffer on heap. So, _what_ makes it work out, exactly? -/// -/// Various executable and shared library formats define conventions and machinery to execute -/// arbitrary code when a program or a shared library is loaded. On systems using the PE format -/// (e.g. Windows) this is available via the optional `DllMain` initializer. Various systems -/// utilizing the ELF format take a slightly different approach of maintaining an array of function -/// pointers in the `.init_array` section. A very similar mechanism exists on systems that utilize -/// the Mach-O format. -/// -/// For the C++ program above, the object stored in the `UNSHUU` global variable is constructed -/// by code run as part of such an initializer routine. This initializer is run before the entry -/// point (the `main` function) is executed, allowing for this magical behaviour to be possible. -/// Were the C++ code built as a shared library instead, the initialization routines would run as -/// the resulting shared library is loaded. In case of `libloading` – during the call to -/// `Library::new` and other methods affected by this change. -/// -/// These initialization (and very closely related termination) routines can be utilized outside of -/// C++ too. Anybody can build a shared library in variety of different programming languages and -/// set up the initializers to execute arbitrary code. Potentially code that does all sorts of -/// wildly unsound stuff. -/// -/// The routines are executed by components that are an integral part of the operating system. -/// Changing or controlling the operation of these components is infeasible. With that in -/// mind, the initializer and termination routines are something anybody loading a library must -/// carefully evaluate the libraries loaded for soundness. -/// -/// In practice, a vast majority of the libraries can be considered a good citizen and their -/// initialization and termination routines, if they have any at all, can be trusted to be sound. -/// -/// Also see: [issue #86]. -/// -/// ### Better & more consistent default behaviour on UNIX systems -/// -/// On UNIX systems the [`Library::new`], [`os::unix::Library::new`] and -/// [`os::unix::Library::this`] methods have been changed to use -/// [RTLD_LAZY] | [RTLD_LOCAL] as the default set of loader options (previously: -/// [`RTLD_NOW`]). This has a couple benefits. Namely: -/// -/// * Lazy binding is generally quicker to execute when only a subset of symbols from a library are -/// used and is typically the default when neither `RTLD_LAZY` nor `RTLD_NOW` are specified when -/// calling the underlying `dlopen` API; -/// * On most UNIX systems (macOS being a notable exception) `RTLD_LOCAL` is the default when -/// neither `RTLD_LOCAL` nor [`RTLD_GLOBAL`] are specified. The explicit setting of the -/// `RTLD_LOCAL` flag makes this behaviour consistent across platforms. -/// -/// ### Dropped support for Windows XP/Vista -/// -/// The (broken) support for Windows XP and Windows Vista environments was removed. This was -/// prompted primarily by a similar policy change in the [Rust -/// project](https://github.com/rust-lang/compiler-team/issues/378) but also as an acknowledgement -/// to the fact that `libloading` never worked in these environments anyway. -/// -/// ### More accurate error variant names -/// -/// Finally, the `Error::LoadLibraryW` renamed to [`Error::LoadLibraryExW`] to more accurately -/// represent the underlying API that's failing. No functional changes as part of this rename -/// intended. -/// -/// [issue #86]: https://github.com/nagisa/rust_libloading/issues/86 -/// [`Library::new`]: crate::Library::new -/// [`Error::LoadLibraryExW`]: crate::Error::LoadLibraryExW -/// [`os::unix::Library::this`]: crate::os::unix::Library::this -/// [`os::unix::Library::new`]: crate::os::unix::Library::new -/// [`os::unix::Library::open`]: crate::os::unix::Library::new -/// [`os::windows::Library::new`]: crate::os::windows::Library::new -/// [`os::windows::Library::load_with_flags`]: crate::os::windows::Library::load_with_flags -/// [`RTLD_NOW`]: crate::os::unix::RTLD_NOW -/// [RTLD_LAZY]: crate::os::unix::RTLD_LAZY -/// [RTLD_LOCAL]: crate::os::unix::RTLD_LOCAL -/// [`RTLD_GLOBAL`]: crate::os::unix::RTLD_GLOBAL -pub mod r0_7_0 {} - -/// Release 0.6.7 (2021-01-14) -/// -/// * Added a [`os::windows::Library::open_already_loaded`] to obtain a handle to a library that -/// must already be loaded. There is no portable equivalent for all UNIX targets. Users who do -/// not care about portability across UNIX platforms may use [`os::unix::Library::open`] with -/// `libc::RTLD_NOLOAD`; -/// -/// [`os::windows::Library::open_already_loaded`]: crate::os::windows::Library::open_already_loaded -/// [`os::unix::Library::open`]: crate::os::unix::Library::open -pub mod r0_6_7 {} - -/// Release 0.6.6 (2020-12-03) -/// -/// * Fix a double-release of resources when [`Library::close`] or [`os::windows::Library::close`] -/// is used on Windows. -/// -/// [`Library::close`]: crate::Library::close -/// [`os::windows::Library::close`]: crate::os::windows::Library::close -pub mod r0_6_6 {} - -/// Release 0.6.5 (2020-10-23) -/// -/// * Upgrade cfg-if 0.1 to 1.0 -pub mod r0_6_5 {} - -/// Release 0.6.4 (2020-10-10) -/// -/// * Remove use of `build.rs` making it easier to build `libloading` without cargo. It also -/// almost halves the build time of this crate. -pub mod r0_6_4 {} - -/// Release 0.6.3 (2020-08-22) -/// -/// * Improve documentation, allowing to view all of the os-specific functionality from -/// documentation generated for any target; -/// * Add [`os::windows::Library::this`]; -/// * Added constants to use with OS-specific `Library::open`; -/// * Add [`library_filename`]. -/// -/// [`os::windows::Library::this`]: crate::os::windows::Library::this -/// [`library_filename`]: crate::library_filename -pub mod r0_6_3 {} - -/// Release 0.6.2 (2020-05-06) -/// -/// * Fixed building of this library on Illumos. -pub mod r0_6_2 {} - -/// Release 0.6.1 (2020-04-15) -/// -/// * Introduced a new method [`os::windows::Library::load_with_flags`]; -/// * Added support for the Illumos triple. -/// -/// [`os::windows::Library::load_with_flags`]: crate::os::windows::Library::load_with_flags -pub mod r0_6_1 {} - -/// Release 0.6.0 (2020-04-05) -/// -/// * Introduced a new method [`os::unix::Library::get_singlethreaded`]; -/// * Added (untested) support for building when targeting Redox and Fuchsia; -/// * The APIs exposed by this library no longer panic and instead return an `Err` when it used -/// to panic. -/// -/// ## Breaking changes -/// -/// * Minimum required (stable) version of Rust to build this library is now 1.40.0; -/// * This crate now implements a custom [`Error`] type and all APIs now return this type rather -/// than returning the `std::io::Error`; -/// * `libloading::Result` has been removed; -/// * Removed the dependency on the C compiler to build this library on UNIX-like platforms. -/// `libloading` used to utilize a snippet written in C to work-around the unlikely possibility -/// of the target having a thread-unsafe implementation of the `dlerror` function. The effect of -/// the work-around was very opportunistic: it would not work if the function was called by -/// forgoing `libloading`. -/// -/// Starting with 0.6.0, [`Library::get`] on platforms where `dlerror` is not MT-safe (such as -/// FreeBSD, DragonflyBSD or NetBSD) will unconditionally return an error when the underlying -/// `dlsym` returns a null pointer. For the use-cases where loading null pointers is necessary -/// consider using [`os::unix::Library::get_singlethreaded`] instead. -/// -/// [`Library::get`]: crate::Library::get -/// [`os::unix::Library::get_singlethreaded`]: crate::os::unix::Library::get_singlethreaded -/// [`Error`]: crate::Error -pub mod r0_6_0 {} - -/// Release 0.5.2 (2019-07-07) -/// -/// * Added API to convert OS-specific `Library` and `Symbol` conversion to underlying resources. -pub mod r0_5_2 {} - -/// Release 0.5.1 (2019-06-01) -/// -/// * Build on Haiku targets. -pub mod r0_5_1 {} - -/// Release 0.5.0 (2018-01-11) -/// -/// * Update to `winapi = ^0.3`; -/// -/// ## Breaking changes -/// -/// * libloading now requires a C compiler to build on UNIX; -/// * This is a temporary measure until the [`linkage`] attribute is stabilised; -/// * Necessary to resolve [#32]. -/// -/// [`linkage`]: https://github.com/rust-lang/rust/issues/29603 -/// [#32]: https://github.com/nagisa/rust_libloading/issues/32 -pub mod r0_5_0 {} - -/// Release 0.4.3 (2017-12-07) -/// -/// * Bump lazy-static dependency to `^1.0`; -/// * `cargo test --release` now works when testing libloading. -pub mod r0_4_3 {} - -/// Release 0.4.2 (2017-09-24) -/// -/// * Improved error and race-condition handling on Windows; -/// * Improved documentation about thread-safety of Library; -/// * Added `Symbol::::lift_option() -> Option>` convenience method. -pub mod r0_4_2 {} - -/// Release 0.4.1 (2017-08-29) -/// -/// * Solaris support -pub mod r0_4_1 {} - -/// Release 0.4.0 (2017-05-01) -/// -/// * Remove build-time dependency on target_build_utils (and by extension serde/phf); -/// * Require at least version 1.14.0 of rustc to build; -/// * Actually, it is cargo which has to be more recent here. The one shipped with rustc 1.14.0 -/// is what’s being required from now on. -pub mod r0_4_0 {} - -/// Release 0.3.4 (2017-03-25) -/// -/// * Remove rogue println! -pub mod r0_3_4 {} - -/// Release 0.3.3 (2017-03-25) -/// -/// * Panics when `Library::get` is called for incompatibly sized type such as named function -/// types (which are zero-sized). -pub mod r0_3_3 {} - -/// Release 0.3.2 (2017-02-10) -/// -/// * Minimum version required is now rustc 1.12.0; -/// * Updated dependency versions (most notably target_build_utils to 0.3.0) -pub mod r0_3_2 {} - -/// Release 0.3.1 (2016-10-01) -/// -/// * `Symbol` and `os::*::Symbol` now implement `Send` where `T: Send`; -/// * `Symbol` and `os::*::Symbol` now implement `Sync` where `T: Sync`; -/// * `Library` and `os::*::Library` now implement `Sync` (they were `Send` in 0.3.0 already). -pub mod r0_3_1 {} - -/// Release 0.3.0 (2016-07-27) -/// -/// * Greatly improved documentation, especially around platform-specific behaviours; -/// * Improved test suite by building our own library to test against; -/// * All `Library`-ies now implement `Send`. -/// * Added `impl From for Library` and `impl From for -/// os::platform::Library` allowing wrapping and extracting the platform-specific library handle; -/// * Added methods to wrap (`Symbol::from_raw`) and unwrap (`Symbol::into_raw`) the safe `Symbol` -/// wrapper into unsafe `os::platform::Symbol`. -/// -/// The last two additions focus on not restricting potential usecases of this library, allowing -/// users of the library to circumvent safety checks if need be. -/// -/// ## Breaking Changes -/// -/// `Library::new` defaults to `RTLD_NOW` instead of `RTLD_LAZY` on UNIX for more consistent -/// cross-platform behaviour. If a library loaded with `Library::new` had any linking errors, but -/// unresolved references weren’t forced to be resolved, the library would’ve “just worked”, -/// whereas now the call to `Library::new` will return an error signifying presence of such error. -/// -/// ## os::platform -/// * Added `os::unix::Library::open` which allows specifying arbitrary flags (e.g. `RTLD_LAZY`); -/// * Added `os::windows::Library::get_ordinal` which allows finding a function or variable by its -/// ordinal number; -pub mod r0_3_0 {} diff --git a/vendor/libloading/src/error.rs b/vendor/libloading/src/error.rs deleted file mode 100644 index 43cf320b1bcc63..00000000000000 --- a/vendor/libloading/src/error.rs +++ /dev/null @@ -1,146 +0,0 @@ -use std::ffi::{CStr, CString}; - -/// A `dlerror` error. -pub struct DlDescription(pub(crate) CString); - -impl std::fmt::Debug for DlDescription { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - std::fmt::Debug::fmt(&self.0, f) - } -} - -impl From<&CStr> for DlDescription { - fn from(value: &CStr) -> Self { - Self(value.into()) - } -} - -/// A Windows API error. -pub struct WindowsError(pub(crate) std::io::Error); - -impl std::fmt::Debug for WindowsError { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - std::fmt::Debug::fmt(&self.0, f) - } -} - -/// Errors. -#[derive(Debug)] -#[non_exhaustive] -pub enum Error { - /// The `dlopen` call failed. - DlOpen { - /// The source error. - desc: DlDescription, - }, - /// The `dlopen` call failed and system did not report an error. - DlOpenUnknown, - /// The `dlsym` call failed. - DlSym { - /// The source error. - desc: DlDescription, - }, - /// The `dlsym` call failed and system did not report an error. - DlSymUnknown, - /// The `dlclose` call failed. - DlClose { - /// The source error. - desc: DlDescription, - }, - /// The `dlclose` call failed and system did not report an error. - DlCloseUnknown, - /// The `LoadLibraryW` call failed. - LoadLibraryExW { - /// The source error. - source: WindowsError, - }, - /// The `LoadLibraryW` call failed and system did not report an error. - LoadLibraryExWUnknown, - /// The `GetModuleHandleExW` call failed. - GetModuleHandleExW { - /// The source error. - source: WindowsError, - }, - /// The `GetModuleHandleExW` call failed and system did not report an error. - GetModuleHandleExWUnknown, - /// The `GetProcAddress` call failed. - GetProcAddress { - /// The source error. - source: WindowsError, - }, - /// The `GetProcAddressUnknown` call failed and system did not report an error. - GetProcAddressUnknown, - /// The `FreeLibrary` call failed. - FreeLibrary { - /// The source error. - source: WindowsError, - }, - /// The `FreeLibrary` call failed and system did not report an error. - FreeLibraryUnknown, - /// The requested type cannot possibly work. - IncompatibleSize, - /// Could not create a new CString. - CreateCString { - /// The source error. - source: std::ffi::NulError, - }, - /// Could not create a new CString from bytes with trailing null. - CreateCStringWithTrailing { - /// The source error. - source: std::ffi::FromBytesWithNulError, - }, -} - -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - use Error::*; - match *self { - CreateCString { ref source } => Some(source), - CreateCStringWithTrailing { ref source } => Some(source), - LoadLibraryExW { ref source } => Some(&source.0), - GetModuleHandleExW { ref source } => Some(&source.0), - GetProcAddress { ref source } => Some(&source.0), - FreeLibrary { ref source } => Some(&source.0), - _ => None, - } - } -} - -impl std::fmt::Display for Error { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - use Error::*; - match *self { - DlOpen { ref desc } => write!(f, "{}", desc.0.to_string_lossy()), - DlOpenUnknown => write!(f, "dlopen failed, but system did not report the error"), - DlSym { ref desc } => write!(f, "{}", desc.0.to_string_lossy()), - DlSymUnknown => write!(f, "dlsym failed, but system did not report the error"), - DlClose { ref desc } => write!(f, "{}", desc.0.to_string_lossy()), - DlCloseUnknown => write!(f, "dlclose failed, but system did not report the error"), - LoadLibraryExW { .. } => write!(f, "LoadLibraryExW failed"), - LoadLibraryExWUnknown => write!( - f, - "LoadLibraryExW failed, but system did not report the error" - ), - GetModuleHandleExW { .. } => write!(f, "GetModuleHandleExW failed"), - GetModuleHandleExWUnknown => write!( - f, - "GetModuleHandleExWUnknown failed, but system did not report the error" - ), - GetProcAddress { .. } => write!(f, "GetProcAddress failed"), - GetProcAddressUnknown => write!( - f, - "GetProcAddress failed, but system did not report the error" - ), - FreeLibrary { .. } => write!(f, "FreeLibrary failed"), - FreeLibraryUnknown => { - write!(f, "FreeLibrary failed, but system did not report the error") - } - CreateCString { .. } => write!(f, "could not create a C string from bytes"), - CreateCStringWithTrailing { .. } => write!( - f, - "could not create a C string from bytes with trailing null" - ), - IncompatibleSize => write!(f, "requested type cannot possibly work"), - } - } -} diff --git a/vendor/libloading/src/lib.rs b/vendor/libloading/src/lib.rs deleted file mode 100644 index d1e2ced62f180d..00000000000000 --- a/vendor/libloading/src/lib.rs +++ /dev/null @@ -1,81 +0,0 @@ -//! Bindings around the platform's dynamic library loading primitives with greatly improved memory safety. -//! -//! Using this library allows the loading of [dynamic libraries](struct.Library.html), also known as -//! shared libraries, and the use of the functions and static variables they contain. -//! -//! The `libloading` crate exposes a cross-platform interface to load a library and make use of its -//! contents, but little is done to hide the differences in behaviour between platforms. -//! The API documentation strives to document such differences as much as possible. -//! -//! Platform-specific APIs are also available in the [`os`](crate::os) module. These APIs are more -//! flexible, but less safe. -//! -//! # Installation -//! -//! Add the `libloading` library to your dependencies in `Cargo.toml`: -//! -//! ```toml -//! [dependencies] -//! libloading = "0.8" -//! ``` -//! -//! # Usage -//! -//! In your code, run the following: -//! -//! ```no_run -//! fn call_dynamic() -> Result> { -//! unsafe { -//! let lib = libloading::Library::new("/path/to/liblibrary.so")?; -//! let func: libloading::Symbol u32> = lib.get(b"my_func")?; -//! Ok(func()) -//! } -//! } -//! ``` -//! -//! The compiler will ensure that the loaded function will not outlive the `Library` from which it comes, -//! preventing the most common memory-safety issues. -#![cfg_attr( - any(unix, windows), - deny(missing_docs, clippy::all, unreachable_pub, unused) -)] -#![cfg_attr(libloading_docs, feature(doc_cfg))] - -pub mod changelog; -mod error; -pub mod os; -#[cfg(any(unix, windows, libloading_docs))] -mod safe; -mod util; - -pub use self::error::Error; -#[cfg(any(unix, windows, libloading_docs))] -pub use self::safe::{Library, Symbol}; -use std::env::consts::{DLL_PREFIX, DLL_SUFFIX}; -use std::ffi::{OsStr, OsString}; - -/// Converts a library name to a filename generally appropriate for use on the system. -/// -/// This function will prepend prefixes (such as `lib`) and suffixes (such as `.so`) to the library -/// `name` to construct the filename. -/// -/// # Examples -/// -/// It can be used to load global libraries in a platform independent manner: -/// -/// ``` -/// use libloading::{Library, library_filename}; -/// // Will attempt to load `libLLVM.so` on Linux, `libLLVM.dylib` on macOS and `LLVM.dll` on -/// // Windows. -/// let library = unsafe { -/// Library::new(library_filename("LLVM")) -/// }; -/// ``` -pub fn library_filename>(name: S) -> OsString { - let name = name.as_ref(); - let mut string = OsString::with_capacity(name.len() + DLL_PREFIX.len() + DLL_SUFFIX.len()); - string.push(DLL_PREFIX); - string.push(name); - string.push(DLL_SUFFIX); - string -} diff --git a/vendor/libloading/src/os/mod.rs b/vendor/libloading/src/os/mod.rs deleted file mode 100644 index 710353f5ef3852..00000000000000 --- a/vendor/libloading/src/os/mod.rs +++ /dev/null @@ -1,27 +0,0 @@ -//! Unsafe but flexible platform-specific bindings to dynamic library loading facilities. -//! -//! These modules expose more extensive and powerful bindings to the dynamic -//! library loading facilities. Use of these bindings come at the cost of less (in most cases, -//! none at all) safety guarantees, which are provided by the top-level bindings. -//! -//! # Examples -//! -//! Using these modules will likely involve conditional compilation: -//! -//! ```ignore -//! # extern crate libloading; -//! #[cfg(unix)] -//! use libloading::os::unix::*; -//! #[cfg(windows)] -//! use libloading::os::windows::*; -//! ``` - -/// UNIX implementation of dynamic library loading. -#[cfg(any(unix, libloading_docs))] -#[cfg_attr(libloading_docs, doc(cfg(unix)))] -pub mod unix; - -/// Windows implementation of dynamic library loading. -#[cfg(any(windows, libloading_docs))] -#[cfg_attr(libloading_docs, doc(cfg(windows)))] -pub mod windows; diff --git a/vendor/libloading/src/os/unix/consts.rs b/vendor/libloading/src/os/unix/consts.rs deleted file mode 100644 index 4ae00592dad5a1..00000000000000 --- a/vendor/libloading/src/os/unix/consts.rs +++ /dev/null @@ -1,265 +0,0 @@ -use std::os::raw::c_int; - -/// Perform lazy binding. -/// -/// Relocations shall be performed at an implementation-defined time, ranging from the time -/// of the [`Library::open`] call until the first reference to a given symbol occurs. -/// Specifying `RTLD_LAZY` should improve performance on implementations supporting dynamic -/// symbol binding since a process might not reference all of the symbols in an executable -/// object file. And, for systems supporting dynamic symbol resolution for normal process -/// execution, this behaviour mimics the normal handling of process execution. -/// -/// Conflicts with [`RTLD_NOW`]. -/// -/// [`Library::open`]: crate::os::unix::Library::open -pub const RTLD_LAZY: c_int = posix::RTLD_LAZY; - -/// Perform eager binding. -/// -/// All necessary relocations shall be performed when the executable object file is first -/// loaded. This may waste some processing if relocations are performed for symbols -/// that are never referenced. This behaviour may be useful for applications that need to -/// know that all symbols referenced during execution will be available before -/// [`Library::open`] returns. -/// -/// Conflicts with [`RTLD_LAZY`]. -/// -/// [`Library::open`]: crate::os::unix::Library::open -pub const RTLD_NOW: c_int = posix::RTLD_NOW; - -/// Make loaded symbols available for resolution globally. -/// -/// The executable object file's symbols shall be made available for relocation processing of any -/// other executable object file. In addition, calls to [`Library::get`] on `Library` obtained from -/// [`Library::this`] allows executable object files loaded with this mode to be searched. -/// -/// [`Library::this`]: crate::os::unix::Library::this -/// [`Library::get`]: crate::os::unix::Library::get -pub const RTLD_GLOBAL: c_int = posix::RTLD_GLOBAL; - -/// Load symbols into an isolated namespace. -/// -/// The executable object file's symbols shall not be made available for relocation processing of -/// any other executable object file. This mode of operation is most appropriate for e.g. plugins. -pub const RTLD_LOCAL: c_int = posix::RTLD_LOCAL; - -#[cfg(all(libloading_docs, not(unix)))] -mod posix { - use super::c_int; - pub(super) const RTLD_LAZY: c_int = !0; - pub(super) const RTLD_NOW: c_int = !0; - pub(super) const RTLD_GLOBAL: c_int = !0; - pub(super) const RTLD_LOCAL: c_int = !0; -} - -#[cfg(any(not(libloading_docs), unix))] -mod posix { - extern crate cfg_if; - use self::cfg_if::cfg_if; - use super::c_int; - cfg_if! { - if #[cfg(target_os = "haiku")] { - pub(super) const RTLD_LAZY: c_int = 0; - } else if #[cfg(target_os = "aix")] { - pub(super) const RTLD_LAZY: c_int = 4; - } else if #[cfg(any( - target_os = "linux", - target_os = "android", - target_os = "emscripten", - - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "visionos", - target_os = "watchos", - - target_os = "freebsd", - target_os = "dragonfly", - target_os = "openbsd", - target_os = "netbsd", - - target_os = "solaris", - target_os = "illumos", - - target_env = "uclibc", - target_env = "newlib", - - target_os = "fuchsia", - target_os = "redox", - target_os = "nto", - target_os = "hurd", - target_os = "cygwin", - ))] { - pub(super) const RTLD_LAZY: c_int = 1; - } else { - compile_error!( - "Target has no known `RTLD_LAZY` value. Please submit an issue or PR adding it." - ); - } - } - - cfg_if! { - if #[cfg(target_os = "haiku")] { - pub(super) const RTLD_NOW: c_int = 1; - } else if #[cfg(any( - target_os = "linux", - all(target_os = "android", target_pointer_width = "64"), - target_os = "emscripten", - - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "visionos", - target_os = "watchos", - - target_os = "freebsd", - target_os = "dragonfly", - target_os = "openbsd", - target_os = "netbsd", - - target_os = "aix", - target_os = "solaris", - target_os = "illumos", - - target_env = "uclibc", - target_env = "newlib", - - target_os = "fuchsia", - target_os = "redox", - target_os = "nto", - target_os = "hurd", - target_os = "cygwin", - ))] { - pub(super) const RTLD_NOW: c_int = 2; - } else if #[cfg(all(target_os = "android",target_pointer_width = "32"))] { - pub(super) const RTLD_NOW: c_int = 0; - } else { - compile_error!( - "Target has no known `RTLD_NOW` value. Please submit an issue or PR adding it." - ); - } - } - - cfg_if! { - if #[cfg(any( - target_os = "haiku", - all(target_os = "android",target_pointer_width = "32"), - ))] { - pub(super) const RTLD_GLOBAL: c_int = 2; - } else if #[cfg(target_os = "aix")] { - pub(super) const RTLD_GLOBAL: c_int = 0x10000; - } else if #[cfg(any( - target_env = "uclibc", - all(target_os = "linux", target_arch = "mips"), - all(target_os = "linux", target_arch = "mips64"), - target_os = "cygwin", - ))] { - pub(super) const RTLD_GLOBAL: c_int = 4; - } else if #[cfg(any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "visionos", - target_os = "watchos", - ))] { - pub(super) const RTLD_GLOBAL: c_int = 8; - } else if #[cfg(any( - target_os = "linux", - all(target_os = "android", target_pointer_width = "64"), - target_os = "emscripten", - - target_os = "freebsd", - target_os = "dragonfly", - target_os = "openbsd", - target_os = "netbsd", - - target_os = "solaris", - target_os = "illumos", - - target_env = "newlib", - - target_os = "fuchsia", - target_os = "redox", - target_os = "nto", - target_os = "hurd", - ))] { - pub(super) const RTLD_GLOBAL: c_int = 0x100; - } else { - compile_error!( - "Target has no known `RTLD_GLOBAL` value. Please submit an issue or PR adding it." - ); - } - } - - cfg_if! { - if #[cfg(any( - target_os = "netbsd", - target_os = "nto", - ))] { - pub(super) const RTLD_LOCAL: c_int = 0x200; - } else if #[cfg(target_os = "aix")] { - pub(super) const RTLD_LOCAL: c_int = 0x80000; - } else if #[cfg(any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "visionos", - target_os = "watchos", - ))] { - pub(super) const RTLD_LOCAL: c_int = 4; - } else if #[cfg(any( - target_os = "linux", - target_os = "android", - target_os = "emscripten", - - target_os = "freebsd", - target_os = "dragonfly", - target_os = "openbsd", - - target_os = "haiku", - - target_os = "solaris", - target_os = "illumos", - - target_env = "uclibc", - target_env = "newlib", - - target_os = "fuchsia", - target_os = "redox", - target_os = "hurd", - target_os = "cygwin", - ))] { - pub(super) const RTLD_LOCAL: c_int = 0; - } else { - compile_error!( - "Target has no known `RTLD_LOCAL` value. Please submit an issue or PR adding it." - ); - } - } -} - -// Other constants that exist but are not bound because they are platform-specific (non-posix) -// extensions. Some of these constants are only relevant to `dlsym` or `dlmopen` calls. -// -// RTLD_CONFGEN -// RTLD_DEFAULT -// RTLD_DI_CONFIGADDR -// RTLD_DI_LINKMAP -// RTLD_DI_LMID -// RTLD_DI_ORIGIN -// RTLD_DI_PROFILENAME -// RTLD_DI_PROFILEOUT -// RTLD_DI_SERINFO -// RTLD_DI_SERINFOSIZE -// RTLD_DI_TLS_DATA -// RTLD_DI_TLS_MODID -// RTLD_FIRST -// RTLD_GROUP -// RTLD_NEXT -// RTLD_PARENT -// RTLD_PROBE -// RTLD_SELF -// RTLD_WORLD -// RTLD_NODELETE -// RTLD_NOLOAD -// RTLD_DEEPBIND diff --git a/vendor/libloading/src/os/unix/mod.rs b/vendor/libloading/src/os/unix/mod.rs deleted file mode 100644 index 0e42c50d9b19dd..00000000000000 --- a/vendor/libloading/src/os/unix/mod.rs +++ /dev/null @@ -1,485 +0,0 @@ -// A hack for docs.rs to build documentation that has both windows and linux documentation in the -// same rustdoc build visible. -#[cfg(all(libloading_docs, not(unix)))] -mod unix_imports {} -#[cfg(any(not(libloading_docs), unix))] -mod unix_imports { - pub(super) use std::os::unix::ffi::OsStrExt; -} - -pub use self::consts::*; -use self::unix_imports::*; -use std::ffi::{CStr, OsStr}; -use std::os::raw; -use std::{fmt, marker, mem, ptr}; -use util::{cstr_cow_from_bytes, ensure_compatible_types}; - -mod consts; - -/// Run code and handle errors reported by `dlerror`. -/// -/// This function first executes the `closure` function containing calls to the functions that -/// report their errors via `dlerror`. This closure may return either `None` or `Some(*)` to -/// further affect operation of this function. -/// -/// In case the `closure` returns `None`, `with_dlerror` inspects the `dlerror`. `dlerror` may -/// decide to not provide any error description, in which case `Err(None)` is returned to the -/// caller. Otherwise the `error` callback is invoked to allow inspection and conversion of the -/// error message. The conversion result is returned as `Err(Some(Error))`. -/// -/// If the operations that report their errors via `dlerror` were all successful, `closure` should -/// return `Some(T)` instead. In this case `dlerror` is not inspected at all. -/// -/// # Notes -/// -/// The whole `dlerror` handling scheme is done via setting and querying some global state. For -/// that reason it is not safe to use dynamic library loading in MT-capable environment at all. -/// Only in POSIX 2008+TC1 a thread-local state was allowed for `dlerror`, making the dl* family of -/// functions possibly MT-safe, depending on the implementation of `dlerror`. -/// -/// In practice (as of 2020-04-01) most of the widely used targets use a thread-local for error -/// state and have been doing so for a long time. -pub fn with_dlerror(closure: F, error: fn(&CStr) -> Error) -> Result> -where - F: FnOnce() -> Option, -{ - // We used to guard all uses of dl* functions with our own mutex. This made them safe to use in - // MT programs provided the only way a program used dl* was via this library. However, it also - // had a number of downsides or cases where it failed to handle the problems. For instance, - // if any other library called `dlerror` internally concurrently with `libloading` things would - // still go awry. - // - // On platforms where `dlerror` is still MT-unsafe, `dlsym` (`Library::get`) can spuriously - // succeed and return a null pointer for a symbol when the actual symbol look-up operation - // fails. Instances where the actual symbol _could_ be `NULL` are platform specific. For - // instance on GNU glibc based-systems (an excerpt from dlsym(3)): - // - // > The value of a symbol returned by dlsym() will never be NULL if the shared object is the - // > result of normal compilation, since a global symbol is never placed at the NULL - // > address. There are nevertheless cases where a lookup using dlsym() may return NULL as the - // > value of a symbol. For example, the symbol value may be the result of a GNU indirect - // > function (IFUNC) resolver function that returns NULL as the resolved value. - - // While we could could call `dlerror` here to clear the previous error value, only the `dlsym` - // call depends on it being cleared beforehand and only in some cases too. We will instead - // clear the error inside the dlsym binding instead. - // - // In all the other cases, clearing the error here will only be hiding misuse of these bindings - // or a bug in implementation of dl* family of functions. - closure().ok_or_else(|| unsafe { - // This code will only get executed if the `closure` returns `None`. - let dlerror_str = dlerror(); - if dlerror_str.is_null() { - // In non-dlsym case this may happen when there’re bugs in our bindings or there’s - // non-libloading user of libdl; possibly in another thread. - None - } else { - // You can’t even rely on error string being static here; call to subsequent dlerror - // may invalidate or overwrite the error message. Why couldn’t they simply give up the - // ownership over the message? - // TODO: should do locale-aware conversion here. OTOH Rust doesn’t seem to work well in - // any system that uses non-utf8 locale, so I doubt there’s a problem here. - Some(error(CStr::from_ptr(dlerror_str))) - // Since we do a copy of the error string above, maybe we should call dlerror again to - // let libdl know it may free its copy of the string now? - } - }) -} - -/// A platform-specific counterpart of the cross-platform [`Library`](crate::Library). -pub struct Library { - handle: *mut raw::c_void, -} - -unsafe impl Send for Library {} - -// That being said... this section in the volume 2 of POSIX.1-2008 states: -// -// > All functions defined by this volume of POSIX.1-2008 shall be thread-safe, except that the -// > following functions need not be thread-safe. -// -// With notable absence of any dl* function other than dlerror in the list. By “this volume” -// I suppose they refer precisely to the “volume 2”. dl* family of functions are specified -// by this same volume, so the conclusion is indeed that dl* functions are required by POSIX -// to be thread-safe. Great! -// -// See for more details: -// -// * https://github.com/nagisa/rust_libloading/pull/17 -// * http://pubs.opengroup.org/onlinepubs/9699919799/functions/V2_chap02.html#tag_15_09_01 -unsafe impl Sync for Library {} - -impl Library { - /// Find and eagerly load a shared library (module). - /// - /// If the `filename` contains a [path separator], the `filename` is interpreted as a `path` to - /// a file. Otherwise, platform-specific algorithms are employed to find a library with a - /// matching file name. - /// - /// This is equivalent to [Library::open](filename, [RTLD_LAZY] | [RTLD_LOCAL]). - /// - /// [path separator]: std::path::MAIN_SEPARATOR - /// - /// # Safety - /// - /// When a library is loaded, initialisation routines contained within the library are executed. - /// For the purposes of safety, the execution of these routines is conceptually the same calling an - /// unknown foreign function and may impose arbitrary requirements on the caller for the call - /// to be sound. - /// - /// Additionally, the callers of this function must also ensure that execution of the - /// termination routines contained within the library is safe as well. These routines may be - /// executed when the library is unloaded. - #[inline] - pub unsafe fn new>(filename: P) -> Result { - Library::open(Some(filename), RTLD_LAZY | RTLD_LOCAL) - } - - /// Load the `Library` representing the current executable. - /// - /// [`Library::get`] calls of the returned `Library` will look for symbols in following - /// locations in order: - /// - /// 1. The original program image; - /// 2. Any executable object files (e.g. shared libraries) loaded at program startup; - /// 3. Any executable object files loaded at runtime (e.g. via other `Library::new` calls or via - /// calls to the `dlopen` function). - /// - /// Note that the behaviour of a `Library` loaded with this method is different from that of - /// Libraries loaded with [`os::windows::Library::this`]. - /// - /// This is equivalent to [Library::open](None, [RTLD_LAZY] | [RTLD_LOCAL]). - /// - /// [`os::windows::Library::this`]: crate::os::windows::Library::this - #[inline] - pub fn this() -> Library { - unsafe { - // SAFE: this does not load any new shared library images, no danger in it executing - // initialiser routines. - Library::open(None::<&OsStr>, RTLD_LAZY | RTLD_LOCAL).expect("this should never fail") - } - } - - /// Find and load an executable object file (shared library). - /// - /// See documentation for [`Library::this`] for further description of the behaviour - /// when the `filename` is `None`. Otherwise see [`Library::new`]. - /// - /// Corresponds to `dlopen(filename, flags)`. - /// - /// # Safety - /// - /// When a library is loaded, initialisation routines contained within the library are executed. - /// For the purposes of safety, the execution of these routines is conceptually the same calling an - /// unknown foreign function and may impose arbitrary requirements on the caller for the call - /// to be sound. - /// - /// Additionally, the callers of this function must also ensure that execution of the - /// termination routines contained within the library is safe as well. These routines may be - /// executed when the library is unloaded. - pub unsafe fn open

(filename: Option

(&self, predicate: P) -> Option - where - P: Fn(Self::Item) -> bool; - /// Get the byte offset from the element's position in the stream - fn slice_index(&self, count: usize) -> Result; -} - -/// Abstracts slicing operations -pub trait InputTake: Sized { - /// Returns a slice of `count` bytes. panics if count > length - fn take(&self, count: usize) -> Self; - /// Split the stream at the `count` byte offset. panics if count > length - fn take_split(&self, count: usize) -> (Self, Self); -} - -impl<'a> InputIter for &'a [u8] { - type Item = u8; - type Iter = Enumerate; - type IterElem = Copied>; - - #[inline] - fn iter_indices(&self) -> Self::Iter { - self.iter_elements().enumerate() - } - #[inline] - fn iter_elements(&self) -> Self::IterElem { - self.iter().copied() - } - #[inline] - fn position

(&self, predicate: P) -> Option - where - P: Fn(Self::Item) -> bool, - { - self.iter().position(|b| predicate(*b)) - } - #[inline] - fn slice_index(&self, count: usize) -> Result { - if self.len() >= count { - Ok(count) - } else { - Err(Needed::new(count - self.len())) - } - } -} - -impl<'a> InputTake for &'a [u8] { - #[inline] - fn take(&self, count: usize) -> Self { - &self[0..count] - } - #[inline] - fn take_split(&self, count: usize) -> (Self, Self) { - let (prefix, suffix) = self.split_at(count); - (suffix, prefix) - } -} - -impl<'a> InputIter for &'a str { - type Item = char; - type Iter = CharIndices<'a>; - type IterElem = Chars<'a>; - #[inline] - fn iter_indices(&self) -> Self::Iter { - self.char_indices() - } - #[inline] - fn iter_elements(&self) -> Self::IterElem { - self.chars() - } - fn position

(&self, predicate: P) -> Option - where - P: Fn(Self::Item) -> bool, - { - for (o, c) in self.char_indices() { - if predicate(c) { - return Some(o); - } - } - None - } - #[inline] - fn slice_index(&self, count: usize) -> Result { - let mut cnt = 0; - for (index, _) in self.char_indices() { - if cnt == count { - return Ok(index); - } - cnt += 1; - } - if cnt == count { - return Ok(self.len()); - } - Err(Needed::Unknown) - } -} - -impl<'a> InputTake for &'a str { - #[inline] - fn take(&self, count: usize) -> Self { - &self[..count] - } - - // return byte index - #[inline] - fn take_split(&self, count: usize) -> (Self, Self) { - let (prefix, suffix) = self.split_at(count); - (suffix, prefix) - } -} - -/// Dummy trait used for default implementations (currently only used for `InputTakeAtPosition` and `Compare`). -/// -/// When implementing a custom input type, it is possible to use directly the -/// default implementation: If the input type implements `InputLength`, `InputIter`, -/// `InputTake` and `Clone`, you can implement `UnspecializedInput` and get -/// a default version of `InputTakeAtPosition` and `Compare`. -/// -/// For performance reasons, you might want to write a custom implementation of -/// `InputTakeAtPosition` (like the one for `&[u8]`). -pub trait UnspecializedInput {} - -/// Methods to take as much input as possible until the provided function returns true for the current element. -/// -/// A large part of nom's basic parsers are built using this trait. -pub trait InputTakeAtPosition: Sized { - /// The current input type is a sequence of that `Item` type. - /// - /// Example: `u8` for `&[u8]` or `char` for `&str` - type Item; - - /// Looks for the first element of the input type for which the condition returns true, - /// and returns the input up to this position. - /// - /// *streaming version*: If no element is found matching the condition, this will return `Incomplete` - fn split_at_position>(&self, predicate: P) -> IResult - where - P: Fn(Self::Item) -> bool; - - /// Looks for the first element of the input type for which the condition returns true - /// and returns the input up to this position. - /// - /// Fails if the produced slice is empty. - /// - /// *streaming version*: If no element is found matching the condition, this will return `Incomplete` - fn split_at_position1>( - &self, - predicate: P, - e: ErrorKind, - ) -> IResult - where - P: Fn(Self::Item) -> bool; - - /// Looks for the first element of the input type for which the condition returns true, - /// and returns the input up to this position. - /// - /// *complete version*: If no element is found matching the condition, this will return the whole input - fn split_at_position_complete>( - &self, - predicate: P, - ) -> IResult - where - P: Fn(Self::Item) -> bool; - - /// Looks for the first element of the input type for which the condition returns true - /// and returns the input up to this position. - /// - /// Fails if the produced slice is empty. - /// - /// *complete version*: If no element is found matching the condition, this will return the whole input - fn split_at_position1_complete>( - &self, - predicate: P, - e: ErrorKind, - ) -> IResult - where - P: Fn(Self::Item) -> bool; -} - -impl InputTakeAtPosition - for T -{ - type Item = ::Item; - - fn split_at_position>(&self, predicate: P) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match self.position(predicate) { - Some(n) => Ok(self.take_split(n)), - None => Err(Err::Incomplete(Needed::new(1))), - } - } - - fn split_at_position1>( - &self, - predicate: P, - e: ErrorKind, - ) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match self.position(predicate) { - Some(0) => Err(Err::Error(E::from_error_kind(self.clone(), e))), - Some(n) => Ok(self.take_split(n)), - None => Err(Err::Incomplete(Needed::new(1))), - } - } - - fn split_at_position_complete>( - &self, - predicate: P, - ) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match self.split_at_position(predicate) { - Err(Err::Incomplete(_)) => Ok(self.take_split(self.input_len())), - res => res, - } - } - - fn split_at_position1_complete>( - &self, - predicate: P, - e: ErrorKind, - ) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match self.split_at_position1(predicate, e) { - Err(Err::Incomplete(_)) => { - if self.input_len() == 0 { - Err(Err::Error(E::from_error_kind(self.clone(), e))) - } else { - Ok(self.take_split(self.input_len())) - } - } - res => res, - } - } -} - -impl<'a> InputTakeAtPosition for &'a [u8] { - type Item = u8; - - fn split_at_position>(&self, predicate: P) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match self.iter().position(|c| predicate(*c)) { - Some(i) => Ok(self.take_split(i)), - None => Err(Err::Incomplete(Needed::new(1))), - } - } - - fn split_at_position1>( - &self, - predicate: P, - e: ErrorKind, - ) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match self.iter().position(|c| predicate(*c)) { - Some(0) => Err(Err::Error(E::from_error_kind(self, e))), - Some(i) => Ok(self.take_split(i)), - None => Err(Err::Incomplete(Needed::new(1))), - } - } - - fn split_at_position_complete>( - &self, - predicate: P, - ) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match self.iter().position(|c| predicate(*c)) { - Some(i) => Ok(self.take_split(i)), - None => Ok(self.take_split(self.input_len())), - } - } - - fn split_at_position1_complete>( - &self, - predicate: P, - e: ErrorKind, - ) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match self.iter().position(|c| predicate(*c)) { - Some(0) => Err(Err::Error(E::from_error_kind(self, e))), - Some(i) => Ok(self.take_split(i)), - None => { - if self.is_empty() { - Err(Err::Error(E::from_error_kind(self, e))) - } else { - Ok(self.take_split(self.input_len())) - } - } - } - } -} - -impl<'a> InputTakeAtPosition for &'a str { - type Item = char; - - fn split_at_position>(&self, predicate: P) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match self.find(predicate) { - // find() returns a byte index that is already in the slice at a char boundary - Some(i) => unsafe { Ok((self.get_unchecked(i..), self.get_unchecked(..i))) }, - None => Err(Err::Incomplete(Needed::new(1))), - } - } - - fn split_at_position1>( - &self, - predicate: P, - e: ErrorKind, - ) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match self.find(predicate) { - Some(0) => Err(Err::Error(E::from_error_kind(self, e))), - // find() returns a byte index that is already in the slice at a char boundary - Some(i) => unsafe { Ok((self.get_unchecked(i..), self.get_unchecked(..i))) }, - None => Err(Err::Incomplete(Needed::new(1))), - } - } - - fn split_at_position_complete>( - &self, - predicate: P, - ) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match self.find(predicate) { - // find() returns a byte index that is already in the slice at a char boundary - Some(i) => unsafe { Ok((self.get_unchecked(i..), self.get_unchecked(..i))) }, - // the end of slice is a char boundary - None => unsafe { - Ok(( - self.get_unchecked(self.len()..), - self.get_unchecked(..self.len()), - )) - }, - } - } - - fn split_at_position1_complete>( - &self, - predicate: P, - e: ErrorKind, - ) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match self.find(predicate) { - Some(0) => Err(Err::Error(E::from_error_kind(self, e))), - // find() returns a byte index that is already in the slice at a char boundary - Some(i) => unsafe { Ok((self.get_unchecked(i..), self.get_unchecked(..i))) }, - None => { - if self.is_empty() { - Err(Err::Error(E::from_error_kind(self, e))) - } else { - // the end of slice is a char boundary - unsafe { - Ok(( - self.get_unchecked(self.len()..), - self.get_unchecked(..self.len()), - )) - } - } - } - } - } -} - -/// Indicates whether a comparison was successful, an error, or -/// if more data was needed -#[derive(Debug, PartialEq)] -pub enum CompareResult { - /// Comparison was successful - Ok, - /// We need more data to be sure - Incomplete, - /// Comparison failed - Error, -} - -/// Abstracts comparison operations -pub trait Compare { - /// Compares self to another value for equality - fn compare(&self, t: T) -> CompareResult; - /// Compares self to another value for equality - /// independently of the case. - /// - /// Warning: for `&str`, the comparison is done - /// by lowercasing both strings and comparing - /// the result. This is a temporary solution until - /// a better one appears - fn compare_no_case(&self, t: T) -> CompareResult; -} - -fn lowercase_byte(c: u8) -> u8 { - match c { - b'A'..=b'Z' => c - b'A' + b'a', - _ => c, - } -} - -impl<'a, 'b> Compare<&'b [u8]> for &'a [u8] { - #[inline(always)] - fn compare(&self, t: &'b [u8]) -> CompareResult { - let pos = self.iter().zip(t.iter()).position(|(a, b)| a != b); - - match pos { - Some(_) => CompareResult::Error, - None => { - if self.len() >= t.len() { - CompareResult::Ok - } else { - CompareResult::Incomplete - } - } - } - - /* - let len = self.len(); - let blen = t.len(); - let m = if len < blen { len } else { blen }; - let reduced = &self[..m]; - let b = &t[..m]; - - if reduced != b { - CompareResult::Error - } else if m < blen { - CompareResult::Incomplete - } else { - CompareResult::Ok - } - */ - } - - #[inline(always)] - fn compare_no_case(&self, t: &'b [u8]) -> CompareResult { - if self - .iter() - .zip(t) - .any(|(a, b)| lowercase_byte(*a) != lowercase_byte(*b)) - { - CompareResult::Error - } else if self.len() < t.len() { - CompareResult::Incomplete - } else { - CompareResult::Ok - } - } -} - -impl< - T: InputLength + InputIter + InputTake + UnspecializedInput, - O: InputLength + InputIter + InputTake, - > Compare for T -{ - #[inline(always)] - fn compare(&self, t: O) -> CompareResult { - let pos = self - .iter_elements() - .zip(t.iter_elements()) - .position(|(a, b)| a != b); - - match pos { - Some(_) => CompareResult::Error, - None => { - if self.input_len() >= t.input_len() { - CompareResult::Ok - } else { - CompareResult::Incomplete - } - } - } - } - - #[inline(always)] - fn compare_no_case(&self, t: O) -> CompareResult { - if self - .iter_elements() - .zip(t.iter_elements()) - .any(|(a, b)| lowercase_byte(a) != lowercase_byte(b)) - { - CompareResult::Error - } else if self.input_len() < t.input_len() { - CompareResult::Incomplete - } else { - CompareResult::Ok - } - } -} - -impl<'a, 'b> Compare<&'b str> for &'a [u8] { - #[inline(always)] - fn compare(&self, t: &'b str) -> CompareResult { - self.compare(AsBytes::as_bytes(t)) - } - #[inline(always)] - fn compare_no_case(&self, t: &'b str) -> CompareResult { - self.compare_no_case(AsBytes::as_bytes(t)) - } -} - -impl<'a, 'b> Compare<&'b str> for &'a str { - #[inline(always)] - fn compare(&self, t: &'b str) -> CompareResult { - self.as_bytes().compare(t.as_bytes()) - } - - //FIXME: this version is too simple and does not use the current locale - #[inline(always)] - fn compare_no_case(&self, t: &'b str) -> CompareResult { - let pos = self - .chars() - .zip(t.chars()) - .position(|(a, b)| a.to_lowercase().ne(b.to_lowercase())); - - match pos { - Some(_) => CompareResult::Error, - None => { - if self.len() >= t.len() { - CompareResult::Ok - } else { - CompareResult::Incomplete - } - } - } - } -} - -impl<'a, 'b> Compare<&'b [u8]> for &'a str { - #[inline(always)] - fn compare(&self, t: &'b [u8]) -> CompareResult { - AsBytes::as_bytes(self).compare(t) - } - #[inline(always)] - fn compare_no_case(&self, t: &'b [u8]) -> CompareResult { - AsBytes::as_bytes(self).compare_no_case(t) - } -} - -/// Look for a token in self -pub trait FindToken { - /// Returns true if self contains the token - fn find_token(&self, token: T) -> bool; -} - -impl<'a> FindToken for &'a [u8] { - fn find_token(&self, token: u8) -> bool { - memchr::memchr(token, self).is_some() - } -} - -impl<'a> FindToken for &'a str { - fn find_token(&self, token: u8) -> bool { - self.as_bytes().find_token(token) - } -} - -impl<'a, 'b> FindToken<&'a u8> for &'b [u8] { - fn find_token(&self, token: &u8) -> bool { - self.find_token(*token) - } -} - -impl<'a, 'b> FindToken<&'a u8> for &'b str { - fn find_token(&self, token: &u8) -> bool { - self.as_bytes().find_token(token) - } -} - -impl<'a> FindToken for &'a [u8] { - fn find_token(&self, token: char) -> bool { - self.iter().any(|i| *i == token as u8) - } -} - -impl<'a> FindToken for &'a str { - fn find_token(&self, token: char) -> bool { - self.chars().any(|i| i == token) - } -} - -impl<'a> FindToken for &'a [char] { - fn find_token(&self, token: char) -> bool { - self.iter().any(|i| *i == token) - } -} - -impl<'a, 'b> FindToken<&'a char> for &'b [char] { - fn find_token(&self, token: &char) -> bool { - self.find_token(*token) - } -} - -/// Look for a substring in self -pub trait FindSubstring { - /// Returns the byte position of the substring if it is found - fn find_substring(&self, substr: T) -> Option; -} - -impl<'a, 'b> FindSubstring<&'b [u8]> for &'a [u8] { - fn find_substring(&self, substr: &'b [u8]) -> Option { - if substr.len() > self.len() { - return None; - } - - let (&substr_first, substr_rest) = match substr.split_first() { - Some(split) => split, - // an empty substring is found at position 0 - // This matches the behavior of str.find(""). - None => return Some(0), - }; - - if substr_rest.is_empty() { - return memchr::memchr(substr_first, self); - } - - let mut offset = 0; - let haystack = &self[..self.len() - substr_rest.len()]; - - while let Some(position) = memchr::memchr(substr_first, &haystack[offset..]) { - offset += position; - let next_offset = offset + 1; - if &self[next_offset..][..substr_rest.len()] == substr_rest { - return Some(offset); - } - - offset = next_offset; - } - - None - } -} - -impl<'a, 'b> FindSubstring<&'b str> for &'a [u8] { - fn find_substring(&self, substr: &'b str) -> Option { - self.find_substring(AsBytes::as_bytes(substr)) - } -} - -impl<'a, 'b> FindSubstring<&'b str> for &'a str { - //returns byte index - fn find_substring(&self, substr: &'b str) -> Option { - self.find(substr) - } -} - -/// Used to integrate `str`'s `parse()` method -pub trait ParseTo { - /// Succeeds if `parse()` succeeded. The byte slice implementation - /// will first convert it to a `&str`, then apply the `parse()` function - fn parse_to(&self) -> Option; -} - -impl<'a, R: FromStr> ParseTo for &'a [u8] { - fn parse_to(&self) -> Option { - from_utf8(self).ok().and_then(|s| s.parse().ok()) - } -} - -impl<'a, R: FromStr> ParseTo for &'a str { - fn parse_to(&self) -> Option { - self.parse().ok() - } -} - -/// Slicing operations using ranges. -/// -/// This trait is loosely based on -/// `Index`, but can actually return -/// something else than a `&[T]` or `&str` -pub trait Slice { - /// Slices self according to the range argument - fn slice(&self, range: R) -> Self; -} - -macro_rules! impl_fn_slice { - ( $ty:ty ) => { - fn slice(&self, range: $ty) -> Self { - &self[range] - } - }; -} - -macro_rules! slice_range_impl { - ( [ $for_type:ident ], $ty:ty ) => { - impl<'a, $for_type> Slice<$ty> for &'a [$for_type] { - impl_fn_slice!($ty); - } - }; - ( $for_type:ty, $ty:ty ) => { - impl<'a> Slice<$ty> for &'a $for_type { - impl_fn_slice!($ty); - } - }; -} - -macro_rules! slice_ranges_impl { - ( [ $for_type:ident ] ) => { - slice_range_impl! {[$for_type], Range} - slice_range_impl! {[$for_type], RangeTo} - slice_range_impl! {[$for_type], RangeFrom} - slice_range_impl! {[$for_type], RangeFull} - }; - ( $for_type:ty ) => { - slice_range_impl! {$for_type, Range} - slice_range_impl! {$for_type, RangeTo} - slice_range_impl! {$for_type, RangeFrom} - slice_range_impl! {$for_type, RangeFull} - }; -} - -slice_ranges_impl! {str} -slice_ranges_impl! {[T]} - -macro_rules! array_impls { - ($($N:expr)+) => { - $( - impl InputLength for [u8; $N] { - #[inline] - fn input_len(&self) -> usize { - self.len() - } - } - - impl<'a> InputLength for &'a [u8; $N] { - #[inline] - fn input_len(&self) -> usize { - self.len() - } - } - - impl<'a> InputIter for &'a [u8; $N] { - type Item = u8; - type Iter = Enumerate; - type IterElem = Copied>; - - fn iter_indices(&self) -> Self::Iter { - (&self[..]).iter_indices() - } - - fn iter_elements(&self) -> Self::IterElem { - (&self[..]).iter_elements() - } - - fn position

(&self, predicate: P) -> Option - where P: Fn(Self::Item) -> bool { - (&self[..]).position(predicate) - } - - fn slice_index(&self, count: usize) -> Result { - (&self[..]).slice_index(count) - } - } - - impl<'a> Compare<[u8; $N]> for &'a [u8] { - #[inline(always)] - fn compare(&self, t: [u8; $N]) -> CompareResult { - self.compare(&t[..]) - } - - #[inline(always)] - fn compare_no_case(&self, t: [u8;$N]) -> CompareResult { - self.compare_no_case(&t[..]) - } - } - - impl<'a,'b> Compare<&'b [u8; $N]> for &'a [u8] { - #[inline(always)] - fn compare(&self, t: &'b [u8; $N]) -> CompareResult { - self.compare(&t[..]) - } - - #[inline(always)] - fn compare_no_case(&self, t: &'b [u8;$N]) -> CompareResult { - self.compare_no_case(&t[..]) - } - } - - impl FindToken for [u8; $N] { - fn find_token(&self, token: u8) -> bool { - memchr::memchr(token, &self[..]).is_some() - } - } - - impl<'a> FindToken<&'a u8> for [u8; $N] { - fn find_token(&self, token: &u8) -> bool { - self.find_token(*token) - } - } - )+ - }; -} - -array_impls! { - 0 1 2 3 4 5 6 7 8 9 - 10 11 12 13 14 15 16 17 18 19 - 20 21 22 23 24 25 26 27 28 29 - 30 31 32 -} - -/// Abstracts something which can extend an `Extend`. -/// Used to build modified input slices in `escaped_transform` -pub trait ExtendInto { - /// The current input type is a sequence of that `Item` type. - /// - /// Example: `u8` for `&[u8]` or `char` for `&str` - type Item; - - /// The type that will be produced - type Extender; - - /// Create a new `Extend` of the correct type - fn new_builder(&self) -> Self::Extender; - /// Accumulate the input into an accumulator - fn extend_into(&self, acc: &mut Self::Extender); -} - -#[cfg(feature = "alloc")] -impl ExtendInto for [u8] { - type Item = u8; - type Extender = Vec; - - #[inline] - fn new_builder(&self) -> Vec { - Vec::new() - } - #[inline] - fn extend_into(&self, acc: &mut Vec) { - acc.extend(self.iter().cloned()); - } -} - -#[cfg(feature = "alloc")] -impl ExtendInto for &[u8] { - type Item = u8; - type Extender = Vec; - - #[inline] - fn new_builder(&self) -> Vec { - Vec::new() - } - #[inline] - fn extend_into(&self, acc: &mut Vec) { - acc.extend_from_slice(self); - } -} - -#[cfg(feature = "alloc")] -impl ExtendInto for str { - type Item = char; - type Extender = String; - - #[inline] - fn new_builder(&self) -> String { - String::new() - } - #[inline] - fn extend_into(&self, acc: &mut String) { - acc.push_str(self); - } -} - -#[cfg(feature = "alloc")] -impl ExtendInto for &str { - type Item = char; - type Extender = String; - - #[inline] - fn new_builder(&self) -> String { - String::new() - } - #[inline] - fn extend_into(&self, acc: &mut String) { - acc.push_str(self); - } -} - -#[cfg(feature = "alloc")] -impl ExtendInto for char { - type Item = char; - type Extender = String; - - #[inline] - fn new_builder(&self) -> String { - String::new() - } - #[inline] - fn extend_into(&self, acc: &mut String) { - acc.push(*self); - } -} - -/// Helper trait to convert numbers to usize. -/// -/// By default, usize implements `From` and `From` but not -/// `From` and `From` because that would be invalid on some -/// platforms. This trait implements the conversion for platforms -/// with 32 and 64 bits pointer platforms -pub trait ToUsize { - /// converts self to usize - fn to_usize(&self) -> usize; -} - -impl ToUsize for u8 { - #[inline] - fn to_usize(&self) -> usize { - *self as usize - } -} - -impl ToUsize for u16 { - #[inline] - fn to_usize(&self) -> usize { - *self as usize - } -} - -impl ToUsize for usize { - #[inline] - fn to_usize(&self) -> usize { - *self - } -} - -#[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] -impl ToUsize for u32 { - #[inline] - fn to_usize(&self) -> usize { - *self as usize - } -} - -#[cfg(target_pointer_width = "64")] -impl ToUsize for u64 { - #[inline] - fn to_usize(&self) -> usize { - *self as usize - } -} - -/// Equivalent From implementation to avoid orphan rules in bits parsers -pub trait ErrorConvert { - /// Transform to another error type - fn convert(self) -> E; -} - -impl ErrorConvert<(I, ErrorKind)> for ((I, usize), ErrorKind) { - fn convert(self) -> (I, ErrorKind) { - ((self.0).0, self.1) - } -} - -impl ErrorConvert<((I, usize), ErrorKind)> for (I, ErrorKind) { - fn convert(self) -> ((I, usize), ErrorKind) { - ((self.0, 0), self.1) - } -} - -use crate::error; -impl ErrorConvert> for error::Error<(I, usize)> { - fn convert(self) -> error::Error { - error::Error { - input: self.input.0, - code: self.code, - } - } -} - -impl ErrorConvert> for error::Error { - fn convert(self) -> error::Error<(I, usize)> { - error::Error { - input: (self.input, 0), - code: self.code, - } - } -} - -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -impl ErrorConvert> for error::VerboseError<(I, usize)> { - fn convert(self) -> error::VerboseError { - error::VerboseError { - errors: self.errors.into_iter().map(|(i, e)| (i.0, e)).collect(), - } - } -} - -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -impl ErrorConvert> for error::VerboseError { - fn convert(self) -> error::VerboseError<(I, usize)> { - error::VerboseError { - errors: self.errors.into_iter().map(|(i, e)| ((i, 0), e)).collect(), - } - } -} - -impl ErrorConvert<()> for () { - fn convert(self) {} -} - -#[cfg(feature = "std")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "std")))] -/// Helper trait to show a byte slice as a hex dump -pub trait HexDisplay { - /// Converts the value of `self` to a hex dump, returning the owned - /// `String`. - fn to_hex(&self, chunk_size: usize) -> String; - - /// Converts the value of `self` to a hex dump beginning at `from` address, returning the owned - /// `String`. - fn to_hex_from(&self, chunk_size: usize, from: usize) -> String; -} - -#[cfg(feature = "std")] -static CHARS: &[u8] = b"0123456789abcdef"; - -#[cfg(feature = "std")] -impl HexDisplay for [u8] { - #[allow(unused_variables)] - fn to_hex(&self, chunk_size: usize) -> String { - self.to_hex_from(chunk_size, 0) - } - - #[allow(unused_variables)] - fn to_hex_from(&self, chunk_size: usize, from: usize) -> String { - let mut v = Vec::with_capacity(self.len() * 3); - let mut i = from; - for chunk in self.chunks(chunk_size) { - let s = format!("{:08x}", i); - for &ch in s.as_bytes().iter() { - v.push(ch); - } - v.push(b'\t'); - - i += chunk_size; - - for &byte in chunk { - v.push(CHARS[(byte >> 4) as usize]); - v.push(CHARS[(byte & 0xf) as usize]); - v.push(b' '); - } - if chunk_size > chunk.len() { - for j in 0..(chunk_size - chunk.len()) { - v.push(b' '); - v.push(b' '); - v.push(b' '); - } - } - v.push(b'\t'); - - for &byte in chunk { - if (byte >= 32 && byte <= 126) || byte >= 128 { - v.push(byte); - } else { - v.push(b'.'); - } - } - v.push(b'\n'); - } - - String::from_utf8_lossy(&v[..]).into_owned() - } -} - -#[cfg(feature = "std")] -impl HexDisplay for str { - #[allow(unused_variables)] - fn to_hex(&self, chunk_size: usize) -> String { - self.to_hex_from(chunk_size, 0) - } - - #[allow(unused_variables)] - fn to_hex_from(&self, chunk_size: usize, from: usize) -> String { - self.as_bytes().to_hex_from(chunk_size, from) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_offset_u8() { - let s = b"abcd123"; - let a = &s[..]; - let b = &a[2..]; - let c = &a[..4]; - let d = &a[3..5]; - assert_eq!(a.offset(b), 2); - assert_eq!(a.offset(c), 0); - assert_eq!(a.offset(d), 3); - } - - #[test] - fn test_offset_str() { - let s = "abcřèÂßÇd123"; - let a = &s[..]; - let b = &a[7..]; - let c = &a[..5]; - let d = &a[5..9]; - assert_eq!(a.offset(b), 7); - assert_eq!(a.offset(c), 0); - assert_eq!(a.offset(d), 5); - } -} diff --git a/vendor/nom/tests/arithmetic.rs b/vendor/nom/tests/arithmetic.rs deleted file mode 100644 index 5b627a97a5c47c..00000000000000 --- a/vendor/nom/tests/arithmetic.rs +++ /dev/null @@ -1,94 +0,0 @@ -use nom::{ - branch::alt, - bytes::complete::tag, - character::complete::char, - character::complete::{digit1 as digit, space0 as space}, - combinator::map_res, - multi::fold_many0, - sequence::{delimited, pair}, - IResult, -}; - -// Parser definition - -use std::str::FromStr; - -// We parse any expr surrounded by parens, ignoring all whitespaces around those -fn parens(i: &str) -> IResult<&str, i64> { - delimited(space, delimited(tag("("), expr, tag(")")), space)(i) -} - -// We transform an integer string into a i64, ignoring surrounding whitespaces -// We look for a digit suite, and try to convert it. -// If either str::from_utf8 or FromStr::from_str fail, -// we fallback to the parens parser defined above -fn factor(i: &str) -> IResult<&str, i64> { - alt(( - map_res(delimited(space, digit, space), FromStr::from_str), - parens, - ))(i) -} - -// We read an initial factor and for each time we find -// a * or / operator followed by another factor, we do -// the math by folding everything -fn term(i: &str) -> IResult<&str, i64> { - let (i, init) = factor(i)?; - - fold_many0( - pair(alt((char('*'), char('/'))), factor), - move || init, - |acc, (op, val): (char, i64)| { - if op == '*' { - acc * val - } else { - acc / val - } - }, - )(i) -} - -fn expr(i: &str) -> IResult<&str, i64> { - let (i, init) = term(i)?; - - fold_many0( - pair(alt((char('+'), char('-'))), term), - move || init, - |acc, (op, val): (char, i64)| { - if op == '+' { - acc + val - } else { - acc - val - } - }, - )(i) -} - -#[test] -fn factor_test() { - assert_eq!(factor("3"), Ok(("", 3))); - assert_eq!(factor(" 12"), Ok(("", 12))); - assert_eq!(factor("537 "), Ok(("", 537))); - assert_eq!(factor(" 24 "), Ok(("", 24))); -} - -#[test] -fn term_test() { - assert_eq!(term(" 12 *2 / 3"), Ok(("", 8))); - assert_eq!(term(" 2* 3 *2 *2 / 3"), Ok(("", 8))); - assert_eq!(term(" 48 / 3/2"), Ok(("", 8))); -} - -#[test] -fn expr_test() { - assert_eq!(expr(" 1 + 2 "), Ok(("", 3))); - assert_eq!(expr(" 12 + 6 - 4+ 3"), Ok(("", 17))); - assert_eq!(expr(" 1 + 2*3 + 4"), Ok(("", 11))); -} - -#[test] -fn parens_test() { - assert_eq!(expr(" ( 2 )"), Ok(("", 2))); - assert_eq!(expr(" 2* ( 3 + 4 ) "), Ok(("", 14))); - assert_eq!(expr(" 2*2 / ( 5 - 1) + 3"), Ok(("", 4))); -} diff --git a/vendor/nom/tests/arithmetic_ast.rs b/vendor/nom/tests/arithmetic_ast.rs deleted file mode 100644 index ca1511096099a7..00000000000000 --- a/vendor/nom/tests/arithmetic_ast.rs +++ /dev/null @@ -1,161 +0,0 @@ -use std::fmt; -use std::fmt::{Debug, Display, Formatter}; - -use std::str::FromStr; - -use nom::{ - branch::alt, - bytes::complete::tag, - character::complete::{digit1 as digit, multispace0 as multispace}, - combinator::{map, map_res}, - multi::many0, - sequence::{delimited, preceded}, - IResult, -}; - -pub enum Expr { - Value(i64), - Add(Box, Box), - Sub(Box, Box), - Mul(Box, Box), - Div(Box, Box), - Paren(Box), -} - -#[derive(Debug)] -pub enum Oper { - Add, - Sub, - Mul, - Div, -} - -impl Display for Expr { - fn fmt(&self, format: &mut Formatter<'_>) -> fmt::Result { - use self::Expr::*; - match *self { - Value(val) => write!(format, "{}", val), - Add(ref left, ref right) => write!(format, "{} + {}", left, right), - Sub(ref left, ref right) => write!(format, "{} - {}", left, right), - Mul(ref left, ref right) => write!(format, "{} * {}", left, right), - Div(ref left, ref right) => write!(format, "{} / {}", left, right), - Paren(ref expr) => write!(format, "({})", expr), - } - } -} - -impl Debug for Expr { - fn fmt(&self, format: &mut Formatter<'_>) -> fmt::Result { - use self::Expr::*; - match *self { - Value(val) => write!(format, "{}", val), - Add(ref left, ref right) => write!(format, "({:?} + {:?})", left, right), - Sub(ref left, ref right) => write!(format, "({:?} - {:?})", left, right), - Mul(ref left, ref right) => write!(format, "({:?} * {:?})", left, right), - Div(ref left, ref right) => write!(format, "({:?} / {:?})", left, right), - Paren(ref expr) => write!(format, "[{:?}]", expr), - } - } -} - -fn parens(i: &str) -> IResult<&str, Expr> { - delimited( - multispace, - delimited(tag("("), map(expr, |e| Expr::Paren(Box::new(e))), tag(")")), - multispace, - )(i) -} - -fn factor(i: &str) -> IResult<&str, Expr> { - alt(( - map( - map_res(delimited(multispace, digit, multispace), FromStr::from_str), - Expr::Value, - ), - parens, - ))(i) -} - -fn fold_exprs(initial: Expr, remainder: Vec<(Oper, Expr)>) -> Expr { - remainder.into_iter().fold(initial, |acc, pair| { - let (oper, expr) = pair; - match oper { - Oper::Add => Expr::Add(Box::new(acc), Box::new(expr)), - Oper::Sub => Expr::Sub(Box::new(acc), Box::new(expr)), - Oper::Mul => Expr::Mul(Box::new(acc), Box::new(expr)), - Oper::Div => Expr::Div(Box::new(acc), Box::new(expr)), - } - }) -} - -fn term(i: &str) -> IResult<&str, Expr> { - let (i, initial) = factor(i)?; - let (i, remainder) = many0(alt(( - |i| { - let (i, mul) = preceded(tag("*"), factor)(i)?; - Ok((i, (Oper::Mul, mul))) - }, - |i| { - let (i, div) = preceded(tag("/"), factor)(i)?; - Ok((i, (Oper::Div, div))) - }, - )))(i)?; - - Ok((i, fold_exprs(initial, remainder))) -} - -fn expr(i: &str) -> IResult<&str, Expr> { - let (i, initial) = term(i)?; - let (i, remainder) = many0(alt(( - |i| { - let (i, add) = preceded(tag("+"), term)(i)?; - Ok((i, (Oper::Add, add))) - }, - |i| { - let (i, sub) = preceded(tag("-"), term)(i)?; - Ok((i, (Oper::Sub, sub))) - }, - )))(i)?; - - Ok((i, fold_exprs(initial, remainder))) -} - -#[test] -fn factor_test() { - assert_eq!( - factor(" 3 ").map(|(i, x)| (i, format!("{:?}", x))), - Ok(("", String::from("3"))) - ); -} - -#[test] -fn term_test() { - assert_eq!( - term(" 3 * 5 ").map(|(i, x)| (i, format!("{:?}", x))), - Ok(("", String::from("(3 * 5)"))) - ); -} - -#[test] -fn expr_test() { - assert_eq!( - expr(" 1 + 2 * 3 ").map(|(i, x)| (i, format!("{:?}", x))), - Ok(("", String::from("(1 + (2 * 3))"))) - ); - assert_eq!( - expr(" 1 + 2 * 3 / 4 - 5 ").map(|(i, x)| (i, format!("{:?}", x))), - Ok(("", String::from("((1 + ((2 * 3) / 4)) - 5)"))) - ); - assert_eq!( - expr(" 72 / 2 / 3 ").map(|(i, x)| (i, format!("{:?}", x))), - Ok(("", String::from("((72 / 2) / 3)"))) - ); -} - -#[test] -fn parens_test() { - assert_eq!( - expr(" ( 1 + 2 ) * 3 ").map(|(i, x)| (i, format!("{:?}", x))), - Ok(("", String::from("([(1 + 2)] * 3)"))) - ); -} diff --git a/vendor/nom/tests/css.rs b/vendor/nom/tests/css.rs deleted file mode 100644 index ad3d72b8fae22a..00000000000000 --- a/vendor/nom/tests/css.rs +++ /dev/null @@ -1,45 +0,0 @@ -use nom::bytes::complete::{tag, take_while_m_n}; -use nom::combinator::map_res; -use nom::sequence::tuple; -use nom::IResult; - -#[derive(Debug, PartialEq)] -pub struct Color { - pub red: u8, - pub green: u8, - pub blue: u8, -} - -fn from_hex(input: &str) -> Result { - u8::from_str_radix(input, 16) -} - -fn is_hex_digit(c: char) -> bool { - c.is_digit(16) -} - -fn hex_primary(input: &str) -> IResult<&str, u8> { - map_res(take_while_m_n(2, 2, is_hex_digit), from_hex)(input) -} - -fn hex_color(input: &str) -> IResult<&str, Color> { - let (input, _) = tag("#")(input)?; - let (input, (red, green, blue)) = tuple((hex_primary, hex_primary, hex_primary))(input)?; - - Ok((input, Color { red, green, blue })) -} - -#[test] -fn parse_color() { - assert_eq!( - hex_color("#2F14DF"), - Ok(( - "", - Color { - red: 47, - green: 20, - blue: 223, - } - )) - ); -} diff --git a/vendor/nom/tests/custom_errors.rs b/vendor/nom/tests/custom_errors.rs deleted file mode 100644 index 2021713341a205..00000000000000 --- a/vendor/nom/tests/custom_errors.rs +++ /dev/null @@ -1,48 +0,0 @@ -#![allow(dead_code)] - -use nom::bytes::streaming::tag; -use nom::character::streaming::digit1 as digit; -use nom::combinator::verify; -use nom::error::{ErrorKind, ParseError}; -#[cfg(feature = "alloc")] -use nom::multi::count; -use nom::sequence::terminated; -use nom::IResult; - -#[derive(Debug)] -pub struct CustomError(String); - -impl<'a> From<(&'a str, ErrorKind)> for CustomError { - fn from(error: (&'a str, ErrorKind)) -> Self { - CustomError(format!("error code was: {:?}", error)) - } -} - -impl<'a> ParseError<&'a str> for CustomError { - fn from_error_kind(_: &'a str, kind: ErrorKind) -> Self { - CustomError(format!("error code was: {:?}", kind)) - } - - fn append(_: &'a str, kind: ErrorKind, other: CustomError) -> Self { - CustomError(format!("{:?}\nerror code was: {:?}", other, kind)) - } -} - -fn test1(input: &str) -> IResult<&str, &str, CustomError> { - //fix_error!(input, CustomError, tag!("abcd")) - tag("abcd")(input) -} - -fn test2(input: &str) -> IResult<&str, &str, CustomError> { - //terminated!(input, test1, fix_error!(CustomError, digit)) - terminated(test1, digit)(input) -} - -fn test3(input: &str) -> IResult<&str, &str, CustomError> { - verify(test1, |s: &str| s.starts_with("abcd"))(input) -} - -#[cfg(feature = "alloc")] -fn test4(input: &str) -> IResult<&str, Vec<&str>, CustomError> { - count(test1, 4)(input) -} diff --git a/vendor/nom/tests/escaped.rs b/vendor/nom/tests/escaped.rs deleted file mode 100644 index 47c6a71e52613b..00000000000000 --- a/vendor/nom/tests/escaped.rs +++ /dev/null @@ -1,28 +0,0 @@ -use nom::bytes::complete::escaped; -use nom::character::complete::digit1; -use nom::character::complete::one_of; -use nom::{error::ErrorKind, Err, IResult}; - -fn esc(s: &str) -> IResult<&str, &str, (&str, ErrorKind)> { - escaped(digit1, '\\', one_of("\"n\\"))(s) -} - -#[cfg(feature = "alloc")] -fn esc_trans(s: &str) -> IResult<&str, String, (&str, ErrorKind)> { - use nom::bytes::complete::{escaped_transform, tag}; - escaped_transform(digit1, '\\', tag("n"))(s) -} - -#[test] -fn test_escaped() { - assert_eq!(esc("abcd"), Err(Err::Error(("abcd", ErrorKind::Escaped)))); -} - -#[test] -#[cfg(feature = "alloc")] -fn test_escaped_transform() { - assert_eq!( - esc_trans("abcd"), - Err(Err::Error(("abcd", ErrorKind::EscapedTransform))) - ); -} diff --git a/vendor/nom/tests/float.rs b/vendor/nom/tests/float.rs deleted file mode 100644 index 634b189899bfba..00000000000000 --- a/vendor/nom/tests/float.rs +++ /dev/null @@ -1,46 +0,0 @@ -use nom::branch::alt; -use nom::bytes::complete::tag; -use nom::character::streaming::digit1 as digit; -use nom::combinator::{map, map_res, opt, recognize}; -use nom::sequence::{delimited, pair}; -use nom::IResult; - -use std::str; -use std::str::FromStr; - -fn unsigned_float(i: &[u8]) -> IResult<&[u8], f32> { - let float_bytes = recognize(alt(( - delimited(digit, tag("."), opt(digit)), - delimited(opt(digit), tag("."), digit), - ))); - let float_str = map_res(float_bytes, str::from_utf8); - map_res(float_str, FromStr::from_str)(i) -} - -fn float(i: &[u8]) -> IResult<&[u8], f32> { - map( - pair(opt(alt((tag("+"), tag("-")))), unsigned_float), - |(sign, value)| { - sign - .and_then(|s| if s[0] == b'-' { Some(-1f32) } else { None }) - .unwrap_or(1f32) - * value - }, - )(i) -} - -#[test] -fn unsigned_float_test() { - assert_eq!(unsigned_float(&b"123.456;"[..]), Ok((&b";"[..], 123.456))); - assert_eq!(unsigned_float(&b"0.123;"[..]), Ok((&b";"[..], 0.123))); - assert_eq!(unsigned_float(&b"123.0;"[..]), Ok((&b";"[..], 123.0))); - assert_eq!(unsigned_float(&b"123.;"[..]), Ok((&b";"[..], 123.0))); - assert_eq!(unsigned_float(&b".123;"[..]), Ok((&b";"[..], 0.123))); -} - -#[test] -fn float_test() { - assert_eq!(float(&b"123.456;"[..]), Ok((&b";"[..], 123.456))); - assert_eq!(float(&b"+123.456;"[..]), Ok((&b";"[..], 123.456))); - assert_eq!(float(&b"-123.456;"[..]), Ok((&b";"[..], -123.456))); -} diff --git a/vendor/nom/tests/fnmut.rs b/vendor/nom/tests/fnmut.rs deleted file mode 100644 index b1486cbe636c62..00000000000000 --- a/vendor/nom/tests/fnmut.rs +++ /dev/null @@ -1,39 +0,0 @@ -use nom::{ - bytes::complete::tag, - multi::{many0, many0_count}, -}; - -#[test] -fn parse() { - let mut counter = 0; - - let res = { - let mut parser = many0::<_, _, (), _>(|i| { - counter += 1; - tag("abc")(i) - }); - - parser("abcabcabcabc").unwrap() - }; - - println!("res: {:?}", res); - assert_eq!(counter, 5); -} - -#[test] -fn accumulate() { - let mut v = Vec::new(); - - let (_, count) = { - let mut parser = many0_count::<_, _, (), _>(|i| { - let (i, o) = tag("abc")(i)?; - v.push(o); - Ok((i, ())) - }); - parser("abcabcabcabc").unwrap() - }; - - println!("v: {:?}", v); - assert_eq!(count, 4); - assert_eq!(v.len(), 4); -} diff --git a/vendor/nom/tests/ini.rs b/vendor/nom/tests/ini.rs deleted file mode 100644 index e556f44a3c073d..00000000000000 --- a/vendor/nom/tests/ini.rs +++ /dev/null @@ -1,207 +0,0 @@ -use nom::{ - bytes::complete::take_while, - character::complete::{ - alphanumeric1 as alphanumeric, char, multispace0 as multispace, space0 as space, - }, - combinator::{map, map_res, opt}, - multi::many0, - sequence::{delimited, pair, separated_pair, terminated, tuple}, - IResult, -}; - -use std::collections::HashMap; -use std::str; - -fn category(i: &[u8]) -> IResult<&[u8], &str> { - map_res( - delimited(char('['), take_while(|c| c != b']'), char(']')), - str::from_utf8, - )(i) -} - -fn key_value(i: &[u8]) -> IResult<&[u8], (&str, &str)> { - let (i, key) = map_res(alphanumeric, str::from_utf8)(i)?; - let (i, _) = tuple((opt(space), char('='), opt(space)))(i)?; - let (i, val) = map_res(take_while(|c| c != b'\n' && c != b';'), str::from_utf8)(i)?; - let (i, _) = opt(pair(char(';'), take_while(|c| c != b'\n')))(i)?; - Ok((i, (key, val))) -} - -fn keys_and_values(i: &[u8]) -> IResult<&[u8], HashMap<&str, &str>> { - map(many0(terminated(key_value, opt(multispace))), |vec| { - vec.into_iter().collect() - })(i) -} - -fn category_and_keys(i: &[u8]) -> IResult<&[u8], (&str, HashMap<&str, &str>)> { - let (i, category) = terminated(category, opt(multispace))(i)?; - let (i, keys) = keys_and_values(i)?; - Ok((i, (category, keys))) -} - -fn categories(i: &[u8]) -> IResult<&[u8], HashMap<&str, HashMap<&str, &str>>> { - map( - many0(separated_pair( - category, - opt(multispace), - map( - many0(terminated(key_value, opt(multispace))), - |vec: Vec<_>| vec.into_iter().collect(), - ), - )), - |vec: Vec<_>| vec.into_iter().collect(), - )(i) -} - -#[test] -fn parse_category_test() { - let ini_file = &b"[category] - -parameter=value -key = value2"[..]; - - let ini_without_category = &b"\n\nparameter=value -key = value2"[..]; - - let res = category(ini_file); - println!("{:?}", res); - match res { - Ok((i, o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i), o), - _ => println!("error"), - } - - assert_eq!(res, Ok((ini_without_category, "category"))); -} - -#[test] -fn parse_key_value_test() { - let ini_file = &b"parameter=value -key = value2"[..]; - - let ini_without_key_value = &b"\nkey = value2"[..]; - - let res = key_value(ini_file); - println!("{:?}", res); - match res { - Ok((i, (o1, o2))) => println!("i: {:?} | o: ({:?},{:?})", str::from_utf8(i), o1, o2), - _ => println!("error"), - } - - assert_eq!(res, Ok((ini_without_key_value, ("parameter", "value")))); -} - -#[test] -fn parse_key_value_with_space_test() { - let ini_file = &b"parameter = value -key = value2"[..]; - - let ini_without_key_value = &b"\nkey = value2"[..]; - - let res = key_value(ini_file); - println!("{:?}", res); - match res { - Ok((i, (o1, o2))) => println!("i: {:?} | o: ({:?},{:?})", str::from_utf8(i), o1, o2), - _ => println!("error"), - } - - assert_eq!(res, Ok((ini_without_key_value, ("parameter", "value")))); -} - -#[test] -fn parse_key_value_with_comment_test() { - let ini_file = &b"parameter=value;abc -key = value2"[..]; - - let ini_without_key_value = &b"\nkey = value2"[..]; - - let res = key_value(ini_file); - println!("{:?}", res); - match res { - Ok((i, (o1, o2))) => println!("i: {:?} | o: ({:?},{:?})", str::from_utf8(i), o1, o2), - _ => println!("error"), - } - - assert_eq!(res, Ok((ini_without_key_value, ("parameter", "value")))); -} - -#[test] -fn parse_multiple_keys_and_values_test() { - let ini_file = &b"parameter=value;abc - -key = value2 - -[category]"[..]; - - let ini_without_key_value = &b"[category]"[..]; - - let res = keys_and_values(ini_file); - println!("{:?}", res); - match res { - Ok((i, ref o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i), o), - _ => println!("error"), - } - - let mut expected: HashMap<&str, &str> = HashMap::new(); - expected.insert("parameter", "value"); - expected.insert("key", "value2"); - assert_eq!(res, Ok((ini_without_key_value, expected))); -} - -#[test] -fn parse_category_then_multiple_keys_and_values_test() { - //FIXME: there can be an empty line or a comment line after a category - let ini_file = &b"[abcd] -parameter=value;abc - -key = value2 - -[category]"[..]; - - let ini_after_parser = &b"[category]"[..]; - - let res = category_and_keys(ini_file); - println!("{:?}", res); - match res { - Ok((i, ref o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i), o), - _ => println!("error"), - } - - let mut expected_h: HashMap<&str, &str> = HashMap::new(); - expected_h.insert("parameter", "value"); - expected_h.insert("key", "value2"); - assert_eq!(res, Ok((ini_after_parser, ("abcd", expected_h)))); -} - -#[test] -fn parse_multiple_categories_test() { - let ini_file = &b"[abcd] - -parameter=value;abc - -key = value2 - -[category] -parameter3=value3 -key4 = value4 -"[..]; - - let ini_after_parser = &b""[..]; - - let res = categories(ini_file); - //println!("{:?}", res); - match res { - Ok((i, ref o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i), o), - _ => println!("error"), - } - - let mut expected_1: HashMap<&str, &str> = HashMap::new(); - expected_1.insert("parameter", "value"); - expected_1.insert("key", "value2"); - let mut expected_2: HashMap<&str, &str> = HashMap::new(); - expected_2.insert("parameter3", "value3"); - expected_2.insert("key4", "value4"); - let mut expected_h: HashMap<&str, HashMap<&str, &str>> = HashMap::new(); - expected_h.insert("abcd", expected_1); - expected_h.insert("category", expected_2); - assert_eq!(res, Ok((ini_after_parser, expected_h))); -} diff --git a/vendor/nom/tests/ini_str.rs b/vendor/nom/tests/ini_str.rs deleted file mode 100644 index 3702303527615e..00000000000000 --- a/vendor/nom/tests/ini_str.rs +++ /dev/null @@ -1,217 +0,0 @@ -use nom::{ - bytes::complete::{is_a, tag, take_till, take_while}, - character::complete::{alphanumeric1 as alphanumeric, char, space0 as space}, - combinator::opt, - multi::many0, - sequence::{delimited, pair, terminated, tuple}, - IResult, -}; - -use std::collections::HashMap; - -fn is_line_ending_or_comment(chr: char) -> bool { - chr == ';' || chr == '\n' -} - -fn not_line_ending(i: &str) -> IResult<&str, &str> { - take_while(|c| c != '\r' && c != '\n')(i) -} - -fn space_or_line_ending(i: &str) -> IResult<&str, &str> { - is_a(" \r\n")(i) -} - -fn category(i: &str) -> IResult<&str, &str> { - terminated( - delimited(char('['), take_while(|c| c != ']'), char(']')), - opt(is_a(" \r\n")), - )(i) -} - -fn key_value(i: &str) -> IResult<&str, (&str, &str)> { - let (i, key) = alphanumeric(i)?; - let (i, _) = tuple((opt(space), tag("="), opt(space)))(i)?; - let (i, val) = take_till(is_line_ending_or_comment)(i)?; - let (i, _) = opt(space)(i)?; - let (i, _) = opt(pair(tag(";"), not_line_ending))(i)?; - let (i, _) = opt(space_or_line_ending)(i)?; - - Ok((i, (key, val))) -} - -fn keys_and_values_aggregator(i: &str) -> IResult<&str, Vec<(&str, &str)>> { - many0(key_value)(i) -} - -fn keys_and_values(input: &str) -> IResult<&str, HashMap<&str, &str>> { - match keys_and_values_aggregator(input) { - Ok((i, tuple_vec)) => Ok((i, tuple_vec.into_iter().collect())), - Err(e) => Err(e), - } -} - -fn category_and_keys(i: &str) -> IResult<&str, (&str, HashMap<&str, &str>)> { - pair(category, keys_and_values)(i) -} - -fn categories_aggregator(i: &str) -> IResult<&str, Vec<(&str, HashMap<&str, &str>)>> { - many0(category_and_keys)(i) -} - -fn categories(input: &str) -> IResult<&str, HashMap<&str, HashMap<&str, &str>>> { - match categories_aggregator(input) { - Ok((i, tuple_vec)) => Ok((i, tuple_vec.into_iter().collect())), - Err(e) => Err(e), - } -} - -#[test] -fn parse_category_test() { - let ini_file = "[category] - -parameter=value -key = value2"; - - let ini_without_category = "parameter=value -key = value2"; - - let res = category(ini_file); - println!("{:?}", res); - match res { - Ok((i, o)) => println!("i: {} | o: {:?}", i, o), - _ => println!("error"), - } - - assert_eq!(res, Ok((ini_without_category, "category"))); -} - -#[test] -fn parse_key_value_test() { - let ini_file = "parameter=value -key = value2"; - - let ini_without_key_value = "key = value2"; - - let res = key_value(ini_file); - println!("{:?}", res); - match res { - Ok((i, (o1, o2))) => println!("i: {} | o: ({:?},{:?})", i, o1, o2), - _ => println!("error"), - } - - assert_eq!(res, Ok((ini_without_key_value, ("parameter", "value")))); -} - -#[test] -fn parse_key_value_with_space_test() { - let ini_file = "parameter = value -key = value2"; - - let ini_without_key_value = "key = value2"; - - let res = key_value(ini_file); - println!("{:?}", res); - match res { - Ok((i, (o1, o2))) => println!("i: {} | o: ({:?},{:?})", i, o1, o2), - _ => println!("error"), - } - - assert_eq!(res, Ok((ini_without_key_value, ("parameter", "value")))); -} - -#[test] -fn parse_key_value_with_comment_test() { - let ini_file = "parameter=value;abc -key = value2"; - - let ini_without_key_value = "key = value2"; - - let res = key_value(ini_file); - println!("{:?}", res); - match res { - Ok((i, (o1, o2))) => println!("i: {} | o: ({:?},{:?})", i, o1, o2), - _ => println!("error"), - } - - assert_eq!(res, Ok((ini_without_key_value, ("parameter", "value")))); -} - -#[test] -fn parse_multiple_keys_and_values_test() { - let ini_file = "parameter=value;abc - -key = value2 - -[category]"; - - let ini_without_key_value = "[category]"; - - let res = keys_and_values(ini_file); - println!("{:?}", res); - match res { - Ok((i, ref o)) => println!("i: {} | o: {:?}", i, o), - _ => println!("error"), - } - - let mut expected: HashMap<&str, &str> = HashMap::new(); - expected.insert("parameter", "value"); - expected.insert("key", "value2"); - assert_eq!(res, Ok((ini_without_key_value, expected))); -} - -#[test] -fn parse_category_then_multiple_keys_and_values_test() { - //FIXME: there can be an empty line or a comment line after a category - let ini_file = "[abcd] -parameter=value;abc - -key = value2 - -[category]"; - - let ini_after_parser = "[category]"; - - let res = category_and_keys(ini_file); - println!("{:?}", res); - match res { - Ok((i, ref o)) => println!("i: {} | o: {:?}", i, o), - _ => println!("error"), - } - - let mut expected_h: HashMap<&str, &str> = HashMap::new(); - expected_h.insert("parameter", "value"); - expected_h.insert("key", "value2"); - assert_eq!(res, Ok((ini_after_parser, ("abcd", expected_h)))); -} - -#[test] -fn parse_multiple_categories_test() { - let ini_file = "[abcd] - -parameter=value;abc - -key = value2 - -[category] -parameter3=value3 -key4 = value4 -"; - - let res = categories(ini_file); - //println!("{:?}", res); - match res { - Ok((i, ref o)) => println!("i: {} | o: {:?}", i, o), - _ => println!("error"), - } - - let mut expected_1: HashMap<&str, &str> = HashMap::new(); - expected_1.insert("parameter", "value"); - expected_1.insert("key", "value2"); - let mut expected_2: HashMap<&str, &str> = HashMap::new(); - expected_2.insert("parameter3", "value3"); - expected_2.insert("key4", "value4"); - let mut expected_h: HashMap<&str, HashMap<&str, &str>> = HashMap::new(); - expected_h.insert("abcd", expected_1); - expected_h.insert("category", expected_2); - assert_eq!(res, Ok(("", expected_h))); -} diff --git a/vendor/nom/tests/issues.rs b/vendor/nom/tests/issues.rs deleted file mode 100644 index 7985702f678530..00000000000000 --- a/vendor/nom/tests/issues.rs +++ /dev/null @@ -1,242 +0,0 @@ -//#![feature(trace_macros)] -#![allow(dead_code)] -#![cfg_attr(feature = "cargo-clippy", allow(redundant_closure))] - -use nom::{error::ErrorKind, Err, IResult, Needed}; - -#[allow(dead_code)] -struct Range { - start: char, - end: char, -} - -pub fn take_char(input: &[u8]) -> IResult<&[u8], char> { - if !input.is_empty() { - Ok((&input[1..], input[0] as char)) - } else { - Err(Err::Incomplete(Needed::new(1))) - } -} - -#[cfg(feature = "std")] -mod parse_int { - use nom::HexDisplay; - use nom::{ - character::streaming::{digit1 as digit, space1 as space}, - combinator::{complete, map, opt}, - multi::many0, - IResult, - }; - use std::str; - - fn parse_ints(input: &[u8]) -> IResult<&[u8], Vec> { - many0(spaces_or_int)(input) - } - - fn spaces_or_int(input: &[u8]) -> IResult<&[u8], i32> { - println!("{}", input.to_hex(8)); - let (i, _) = opt(complete(space))(input)?; - let (i, res) = map(complete(digit), |x| { - println!("x: {:?}", x); - let result = str::from_utf8(x).unwrap(); - println!("Result: {}", result); - println!("int is empty?: {}", x.is_empty()); - match result.parse() { - Ok(i) => i, - Err(e) => panic!("UH OH! NOT A DIGIT! {:?}", e), - } - })(i)?; - - Ok((i, res)) - } - - #[test] - fn issue_142() { - let subject = parse_ints(&b"12 34 5689a"[..]); - let expected = Ok((&b"a"[..], vec![12, 34, 5689])); - assert_eq!(subject, expected); - - let subject = parse_ints(&b"12 34 5689 "[..]); - let expected = Ok((&b" "[..], vec![12, 34, 5689])); - assert_eq!(subject, expected) - } -} - -#[test] -fn usize_length_bytes_issue() { - use nom::multi::length_data; - use nom::number::streaming::be_u16; - let _: IResult<&[u8], &[u8], (&[u8], ErrorKind)> = length_data(be_u16)(b"012346"); -} - -#[test] -fn take_till_issue() { - use nom::bytes::streaming::take_till; - - fn nothing(i: &[u8]) -> IResult<&[u8], &[u8]> { - take_till(|_| true)(i) - } - - assert_eq!(nothing(b""), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(nothing(b"abc"), Ok((&b"abc"[..], &b""[..]))); -} - -#[test] -fn issue_655() { - use nom::character::streaming::{line_ending, not_line_ending}; - fn twolines(i: &str) -> IResult<&str, (&str, &str)> { - let (i, l1) = not_line_ending(i)?; - let (i, _) = line_ending(i)?; - let (i, l2) = not_line_ending(i)?; - let (i, _) = line_ending(i)?; - - Ok((i, (l1, l2))) - } - - assert_eq!(twolines("foo\nbar\n"), Ok(("", ("foo", "bar")))); - assert_eq!(twolines("féo\nbar\n"), Ok(("", ("féo", "bar")))); - assert_eq!(twolines("foé\nbar\n"), Ok(("", ("foé", "bar")))); - assert_eq!(twolines("foé\r\nbar\n"), Ok(("", ("foé", "bar")))); -} - -#[cfg(feature = "alloc")] -fn issue_717(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - use nom::bytes::complete::{is_not, tag}; - use nom::multi::separated_list0; - - separated_list0(tag([0x0]), is_not([0x0u8]))(i) -} - -mod issue_647 { - use nom::bytes::streaming::tag; - use nom::combinator::complete; - use nom::multi::separated_list0; - use nom::{error::Error, number::streaming::be_f64, Err, IResult}; - pub type Input<'a> = &'a [u8]; - - #[derive(PartialEq, Debug, Clone)] - struct Data { - c: f64, - v: Vec, - } - - fn list<'a, 'b>( - input: Input<'a>, - _cs: &'b f64, - ) -> Result<(Input<'a>, Vec), Err>> { - separated_list0(complete(tag(",")), complete(be_f64))(input) - } - - fn data(input: Input<'_>) -> IResult, Data> { - let (i, c) = be_f64(input)?; - let (i, _) = tag("\n")(i)?; - let (i, v) = list(i, &c)?; - Ok((i, Data { c, v })) - } -} - -#[test] -fn issue_848_overflow_incomplete_bits_to_bytes() { - fn take(i: &[u8]) -> IResult<&[u8], &[u8]> { - use nom::bytes::streaming::take; - take(0x2000000000000000_usize)(i) - } - fn parser(i: &[u8]) -> IResult<&[u8], &[u8]> { - use nom::bits::{bits, bytes}; - - bits(bytes(take))(i) - } - assert_eq!( - parser(&b""[..]), - Err(Err::Failure(nom::error_position!( - &b""[..], - ErrorKind::TooLarge - ))) - ); -} - -#[test] -fn issue_942() { - use nom::error::{ContextError, ParseError}; - pub fn parser<'a, E: ParseError<&'a str> + ContextError<&'a str>>( - i: &'a str, - ) -> IResult<&'a str, usize, E> { - use nom::{character::complete::char, error::context, multi::many0_count}; - many0_count(context("char_a", char('a')))(i) - } - assert_eq!(parser::<()>("aaa"), Ok(("", 3))); -} - -#[test] -fn issue_many_m_n_with_zeros() { - use nom::character::complete::char; - use nom::multi::many_m_n; - let mut parser = many_m_n::<_, _, (), _>(0, 0, char('a')); - assert_eq!(parser("aaa"), Ok(("aaa", vec![]))); -} - -#[test] -fn issue_1027_convert_error_panic_nonempty() { - use nom::character::complete::char; - use nom::error::{convert_error, VerboseError}; - use nom::sequence::pair; - - let input = "a"; - - let result: IResult<_, _, VerboseError<&str>> = pair(char('a'), char('b'))(input); - let err = match result.unwrap_err() { - Err::Error(e) => e, - _ => unreachable!(), - }; - - let msg = convert_error(input, err); - assert_eq!( - msg, - "0: at line 1:\na\n ^\nexpected \'b\', got end of input\n\n" - ); -} - -#[test] -fn issue_1231_bits_expect_fn_closure() { - use nom::bits::{bits, complete::take}; - use nom::error::Error; - use nom::sequence::tuple; - pub fn example(input: &[u8]) -> IResult<&[u8], (u8, u8)> { - bits::<_, _, Error<_>, _, _>(tuple((take(1usize), take(1usize))))(input) - } - assert_eq!(example(&[0xff]), Ok((&b""[..], (1, 1)))); -} - -#[test] -fn issue_1282_findtoken_char() { - use nom::character::complete::one_of; - use nom::error::Error; - let parser = one_of::<_, _, Error<_>>(&['a', 'b', 'c'][..]); - assert_eq!(parser("aaa"), Ok(("aa", 'a'))); -} - -#[test] -fn issue_1459_clamp_capacity() { - use nom::character::complete::char; - - // shouldn't panic - use nom::multi::many_m_n; - let mut parser = many_m_n::<_, _, (), _>(usize::MAX, usize::MAX, char('a')); - assert_eq!(parser("a"), Err(nom::Err::Error(()))); - - // shouldn't panic - use nom::multi::count; - let mut parser = count::<_, _, (), _>(char('a'), usize::MAX); - assert_eq!(parser("a"), Err(nom::Err::Error(()))); -} - -#[test] -fn issue_1617_count_parser_returning_zero_size() { - use nom::{bytes::complete::tag, combinator::map, error::Error, multi::count}; - - // previously, `count()` panicked if the parser had type `O = ()` - let parser = map(tag::<_, _, Error<&str>>("abc"), |_| ()); - // shouldn't panic - let result = count(parser, 3)("abcabcabcdef").expect("parsing should succeed"); - assert_eq!(result, ("def", vec![(), (), ()])); -} diff --git a/vendor/nom/tests/json.rs b/vendor/nom/tests/json.rs deleted file mode 100644 index e8a06fd778a137..00000000000000 --- a/vendor/nom/tests/json.rs +++ /dev/null @@ -1,236 +0,0 @@ -#![cfg(feature = "alloc")] - -use nom::{ - branch::alt, - bytes::complete::{tag, take}, - character::complete::{anychar, char, multispace0, none_of}, - combinator::{map, map_opt, map_res, value, verify}, - error::ParseError, - multi::{fold_many0, separated_list0}, - number::complete::double, - sequence::{delimited, preceded, separated_pair}, - IResult, Parser, -}; - -use std::collections::HashMap; - -#[derive(Debug, PartialEq, Clone)] -pub enum JsonValue { - Null, - Bool(bool), - Str(String), - Num(f64), - Array(Vec), - Object(HashMap), -} - -fn boolean(input: &str) -> IResult<&str, bool> { - alt((value(false, tag("false")), value(true, tag("true"))))(input) -} - -fn u16_hex(input: &str) -> IResult<&str, u16> { - map_res(take(4usize), |s| u16::from_str_radix(s, 16))(input) -} - -fn unicode_escape(input: &str) -> IResult<&str, char> { - map_opt( - alt(( - // Not a surrogate - map(verify(u16_hex, |cp| !(0xD800..0xE000).contains(cp)), |cp| { - cp as u32 - }), - // See https://en.wikipedia.org/wiki/UTF-16#Code_points_from_U+010000_to_U+10FFFF for details - map( - verify( - separated_pair(u16_hex, tag("\\u"), u16_hex), - |(high, low)| (0xD800..0xDC00).contains(high) && (0xDC00..0xE000).contains(low), - ), - |(high, low)| { - let high_ten = (high as u32) - 0xD800; - let low_ten = (low as u32) - 0xDC00; - (high_ten << 10) + low_ten + 0x10000 - }, - ), - )), - // Could be probably replaced with .unwrap() or _unchecked due to the verify checks - std::char::from_u32, - )(input) -} - -fn character(input: &str) -> IResult<&str, char> { - let (input, c) = none_of("\"")(input)?; - if c == '\\' { - alt(( - map_res(anychar, |c| { - Ok(match c { - '"' | '\\' | '/' => c, - 'b' => '\x08', - 'f' => '\x0C', - 'n' => '\n', - 'r' => '\r', - 't' => '\t', - _ => return Err(()), - }) - }), - preceded(char('u'), unicode_escape), - ))(input) - } else { - Ok((input, c)) - } -} - -fn string(input: &str) -> IResult<&str, String> { - delimited( - char('"'), - fold_many0(character, String::new, |mut string, c| { - string.push(c); - string - }), - char('"'), - )(input) -} - -fn ws<'a, O, E: ParseError<&'a str>, F: Parser<&'a str, O, E>>(f: F) -> impl Parser<&'a str, O, E> { - delimited(multispace0, f, multispace0) -} - -fn array(input: &str) -> IResult<&str, Vec> { - delimited( - char('['), - ws(separated_list0(ws(char(',')), json_value)), - char(']'), - )(input) -} - -fn object(input: &str) -> IResult<&str, HashMap> { - map( - delimited( - char('{'), - ws(separated_list0( - ws(char(',')), - separated_pair(string, ws(char(':')), json_value), - )), - char('}'), - ), - |key_values| key_values.into_iter().collect(), - )(input) -} - -fn json_value(input: &str) -> IResult<&str, JsonValue> { - use JsonValue::*; - - alt(( - value(Null, tag("null")), - map(boolean, Bool), - map(string, Str), - map(double, Num), - map(array, Array), - map(object, Object), - ))(input) -} - -fn json(input: &str) -> IResult<&str, JsonValue> { - ws(json_value).parse(input) -} - -#[test] -fn json_string() { - assert_eq!(string("\"\""), Ok(("", "".to_string()))); - assert_eq!(string("\"abc\""), Ok(("", "abc".to_string()))); - assert_eq!( - string("\"abc\\\"\\\\\\/\\b\\f\\n\\r\\t\\u0001\\u2014\u{2014}def\""), - Ok(("", "abc\"\\/\x08\x0C\n\r\t\x01——def".to_string())), - ); - assert_eq!(string("\"\\uD83D\\uDE10\""), Ok(("", "😐".to_string()))); - - assert!(string("\"").is_err()); - assert!(string("\"abc").is_err()); - assert!(string("\"\\\"").is_err()); - assert!(string("\"\\u123\"").is_err()); - assert!(string("\"\\uD800\"").is_err()); - assert!(string("\"\\uD800\\uD800\"").is_err()); - assert!(string("\"\\uDC00\"").is_err()); -} - -#[test] -fn json_object() { - use JsonValue::*; - - let input = r#"{"a":42,"b":"x"}"#; - - let expected = Object( - vec![ - ("a".to_string(), Num(42.0)), - ("b".to_string(), Str("x".to_string())), - ] - .into_iter() - .collect(), - ); - - assert_eq!(json(input), Ok(("", expected))); -} - -#[test] -fn json_array() { - use JsonValue::*; - - let input = r#"[42,"x"]"#; - - let expected = Array(vec![Num(42.0), Str("x".to_string())]); - - assert_eq!(json(input), Ok(("", expected))); -} - -#[test] -fn json_whitespace() { - use JsonValue::*; - - let input = r#" - { - "null" : null, - "true" :true , - "false": false , - "number" : 123e4 , - "string" : " abc 123 " , - "array" : [ false , 1 , "two" ] , - "object" : { "a" : 1.0 , "b" : "c" } , - "empty_array" : [ ] , - "empty_object" : { } - } - "#; - - assert_eq!( - json(input), - Ok(( - "", - Object( - vec![ - ("null".to_string(), Null), - ("true".to_string(), Bool(true)), - ("false".to_string(), Bool(false)), - ("number".to_string(), Num(123e4)), - ("string".to_string(), Str(" abc 123 ".to_string())), - ( - "array".to_string(), - Array(vec![Bool(false), Num(1.0), Str("two".to_string())]) - ), - ( - "object".to_string(), - Object( - vec![ - ("a".to_string(), Num(1.0)), - ("b".to_string(), Str("c".to_string())), - ] - .into_iter() - .collect() - ) - ), - ("empty_array".to_string(), Array(vec![]),), - ("empty_object".to_string(), Object(HashMap::new()),), - ] - .into_iter() - .collect() - ) - )) - ); -} diff --git a/vendor/nom/tests/mp4.rs b/vendor/nom/tests/mp4.rs deleted file mode 100644 index 852bf29555f6ea..00000000000000 --- a/vendor/nom/tests/mp4.rs +++ /dev/null @@ -1,320 +0,0 @@ -#![allow(dead_code)] - -use nom::{ - branch::alt, - bytes::streaming::{tag, take}, - combinator::{map, map_res}, - error::ErrorKind, - multi::many0, - number::streaming::{be_f32, be_u16, be_u32, be_u64}, - Err, IResult, Needed, -}; - -use std::str; - -fn mp4_box(input: &[u8]) -> IResult<&[u8], &[u8]> { - match be_u32(input) { - Ok((i, offset)) => { - let sz: usize = offset as usize; - if i.len() >= sz - 4 { - Ok((&i[(sz - 4)..], &i[0..(sz - 4)])) - } else { - Err(Err::Incomplete(Needed::new(offset as usize + 4))) - } - } - Err(e) => Err(e), - } -} - -#[cfg_attr(rustfmt, rustfmt_skip)] -#[derive(PartialEq,Eq,Debug)] -struct FileType<'a> { - major_brand: &'a str, - major_brand_version: &'a [u8], - compatible_brands: Vec<&'a str> -} - -#[cfg_attr(rustfmt, rustfmt_skip)] -#[allow(non_snake_case)] -#[derive(Debug,Clone)] -pub struct Mvhd32 { - version_flags: u32, // actually: - // version: u8, - // flags: u24 // 3 bytes - created_date: u32, - modified_date: u32, - scale: u32, - duration: u32, - speed: f32, - volume: u16, // actually a 2 bytes decimal - /* 10 bytes reserved */ - scaleA: f32, - rotateB: f32, - angleU: f32, - rotateC: f32, - scaleD: f32, - angleV: f32, - positionX: f32, - positionY: f32, - scaleW: f32, - preview: u64, - poster: u32, - selection: u64, - current_time: u32, - track_id: u32 -} - -#[cfg_attr(rustfmt, rustfmt_skip)] -#[allow(non_snake_case)] -#[derive(Debug,Clone)] -pub struct Mvhd64 { - version_flags: u32, // actually: - // version: u8, - // flags: u24 // 3 bytes - created_date: u64, - modified_date: u64, - scale: u32, - duration: u64, - speed: f32, - volume: u16, // actually a 2 bytes decimal - /* 10 bytes reserved */ - scaleA: f32, - rotateB: f32, - angleU: f32, - rotateC: f32, - scaleD: f32, - angleV: f32, - positionX: f32, - positionY: f32, - scaleW: f32, - preview: u64, - poster: u32, - selection: u64, - current_time: u32, - track_id: u32 -} - -#[cfg_attr(rustfmt, rustfmt_skip)] -fn mvhd32(i: &[u8]) -> IResult<&[u8], MvhdBox> { - let (i, version_flags) = be_u32(i)?; - let (i, created_date) = be_u32(i)?; - let (i, modified_date) = be_u32(i)?; - let (i, scale) = be_u32(i)?; - let (i, duration) = be_u32(i)?; - let (i, speed) = be_f32(i)?; - let (i, volume) = be_u16(i)?; // actually a 2 bytes decimal - let (i, _) = take(10_usize)(i)?; - let (i, scale_a) = be_f32(i)?; - let (i, rotate_b) = be_f32(i)?; - let (i, angle_u) = be_f32(i)?; - let (i, rotate_c) = be_f32(i)?; - let (i, scale_d) = be_f32(i)?; - let (i, angle_v) = be_f32(i)?; - let (i, position_x) = be_f32(i)?; - let (i, position_y) = be_f32(i)?; - let (i, scale_w) = be_f32(i)?; - let (i, preview) = be_u64(i)?; - let (i, poster) = be_u32(i)?; - let (i, selection) = be_u64(i)?; - let (i, current_time) = be_u32(i)?; - let (i, track_id) = be_u32(i)?; - - let mvhd_box = MvhdBox::M32(Mvhd32 { - version_flags, - created_date, - modified_date, - scale, - duration, - speed, - volume, - scaleA: scale_a, - rotateB: rotate_b, - angleU: angle_u, - rotateC: rotate_c, - scaleD: scale_d, - angleV: angle_v, - positionX: position_x, - positionY: position_y, - scaleW: scale_w, - preview, - poster, - selection, - current_time, - track_id, - }); - - Ok((i, mvhd_box)) -} - -#[cfg_attr(rustfmt, rustfmt_skip)] -fn mvhd64(i: &[u8]) -> IResult<&[u8], MvhdBox> { - let (i, version_flags) = be_u32(i)?; - let (i, created_date) = be_u64(i)?; - let (i, modified_date) = be_u64(i)?; - let (i, scale) = be_u32(i)?; - let (i, duration) = be_u64(i)?; - let (i, speed) = be_f32(i)?; - let (i, volume) = be_u16(i)?; // actually a 2 bytes decimal - let (i, _) = take(10_usize)(i)?; - let (i, scale_a) = be_f32(i)?; - let (i, rotate_b) = be_f32(i)?; - let (i, angle_u) = be_f32(i)?; - let (i, rotate_c) = be_f32(i)?; - let (i, scale_d) = be_f32(i)?; - let (i, angle_v) = be_f32(i)?; - let (i, position_x) = be_f32(i)?; - let (i, position_y) = be_f32(i)?; - let (i, scale_w) = be_f32(i)?; - let (i, preview) = be_u64(i)?; - let (i, poster) = be_u32(i)?; - let (i, selection) = be_u64(i)?; - let (i, current_time) = be_u32(i)?; - let (i, track_id) = be_u32(i)?; - - let mvhd_box = MvhdBox::M64(Mvhd64 { - version_flags, - created_date, - modified_date, - scale, - duration, - speed, - volume, - scaleA: scale_a, - rotateB: rotate_b, - angleU: angle_u, - rotateC: rotate_c, - scaleD: scale_d, - angleV: angle_v, - positionX: position_x, - positionY: position_y, - scaleW: scale_w, - preview, - poster, - selection, - current_time, - track_id, - }); - - Ok((i, mvhd_box)) -} - -#[derive(Debug, Clone)] -pub enum MvhdBox { - M32(Mvhd32), - M64(Mvhd64), -} - -#[derive(Debug, Clone)] -pub enum MoovBox { - Mdra, - Dref, - Cmov, - Rmra, - Iods, - Mvhd(MvhdBox), - Clip, - Trak, - Udta, -} - -#[derive(Debug)] -enum MP4BoxType { - Ftyp, - Moov, - Mdat, - Free, - Skip, - Wide, - Mdra, - Dref, - Cmov, - Rmra, - Iods, - Mvhd, - Clip, - Trak, - Udta, - Unknown, -} - -#[derive(Debug)] -struct MP4BoxHeader { - length: u32, - tag: MP4BoxType, -} - -fn brand_name(input: &[u8]) -> IResult<&[u8], &str> { - map_res(take(4_usize), str::from_utf8)(input) -} - -fn filetype_parser(input: &[u8]) -> IResult<&[u8], FileType<'_>> { - let (i, name) = brand_name(input)?; - let (i, version) = take(4_usize)(i)?; - let (i, brands) = many0(brand_name)(i)?; - - let ft = FileType { - major_brand: name, - major_brand_version: version, - compatible_brands: brands, - }; - Ok((i, ft)) -} - -fn mvhd_box(input: &[u8]) -> IResult<&[u8], MvhdBox> { - let res = if input.len() < 100 { - Err(Err::Incomplete(Needed::new(100))) - } else if input.len() == 100 { - mvhd32(input) - } else if input.len() == 112 { - mvhd64(input) - } else { - Err(Err::Error(nom::error_position!(input, ErrorKind::TooLarge))) - }; - println!("res: {:?}", res); - res -} - -fn unknown_box_type(input: &[u8]) -> IResult<&[u8], MP4BoxType> { - Ok((input, MP4BoxType::Unknown)) -} - -fn box_type(input: &[u8]) -> IResult<&[u8], MP4BoxType> { - alt(( - map(tag("ftyp"), |_| MP4BoxType::Ftyp), - map(tag("moov"), |_| MP4BoxType::Moov), - map(tag("mdat"), |_| MP4BoxType::Mdat), - map(tag("free"), |_| MP4BoxType::Free), - map(tag("skip"), |_| MP4BoxType::Skip), - map(tag("wide"), |_| MP4BoxType::Wide), - unknown_box_type, - ))(input) -} - -// warning, an alt combinator with 9 branches containing a tag combinator -// can make the compilation very slow. Use functions as sub parsers, -// or split into multiple alt parsers if it gets slow -fn moov_type(input: &[u8]) -> IResult<&[u8], MP4BoxType> { - alt(( - map(tag("mdra"), |_| MP4BoxType::Mdra), - map(tag("dref"), |_| MP4BoxType::Dref), - map(tag("cmov"), |_| MP4BoxType::Cmov), - map(tag("rmra"), |_| MP4BoxType::Rmra), - map(tag("iods"), |_| MP4BoxType::Iods), - map(tag("mvhd"), |_| MP4BoxType::Mvhd), - map(tag("clip"), |_| MP4BoxType::Clip), - map(tag("trak"), |_| MP4BoxType::Trak), - map(tag("udta"), |_| MP4BoxType::Udta), - ))(input) -} - -fn box_header(input: &[u8]) -> IResult<&[u8], MP4BoxHeader> { - let (i, length) = be_u32(input)?; - let (i, tag) = box_type(i)?; - Ok((i, MP4BoxHeader { length, tag })) -} - -fn moov_header(input: &[u8]) -> IResult<&[u8], MP4BoxHeader> { - let (i, length) = be_u32(input)?; - let (i, tag) = moov_type(i)?; - Ok((i, MP4BoxHeader { length, tag })) -} diff --git a/vendor/nom/tests/multiline.rs b/vendor/nom/tests/multiline.rs deleted file mode 100644 index 7378b9e3b4ddf4..00000000000000 --- a/vendor/nom/tests/multiline.rs +++ /dev/null @@ -1,31 +0,0 @@ -use nom::{ - character::complete::{alphanumeric1 as alphanumeric, line_ending as eol}, - multi::many0, - sequence::terminated, - IResult, -}; - -pub fn end_of_line(input: &str) -> IResult<&str, &str> { - if input.is_empty() { - Ok((input, input)) - } else { - eol(input) - } -} - -pub fn read_line(input: &str) -> IResult<&str, &str> { - terminated(alphanumeric, end_of_line)(input) -} - -pub fn read_lines(input: &str) -> IResult<&str, Vec<&str>> { - many0(read_line)(input) -} - -#[cfg(feature = "alloc")] -#[test] -fn read_lines_test() { - let res = Ok(("", vec!["Duck", "Dog", "Cow"])); - - assert_eq!(read_lines("Duck\nDog\nCow\n"), res); - assert_eq!(read_lines("Duck\nDog\nCow"), res); -} diff --git a/vendor/nom/tests/overflow.rs b/vendor/nom/tests/overflow.rs deleted file mode 100644 index ea513bb395bffe..00000000000000 --- a/vendor/nom/tests/overflow.rs +++ /dev/null @@ -1,145 +0,0 @@ -#![cfg_attr(feature = "cargo-clippy", allow(unreadable_literal))] -#![cfg(target_pointer_width = "64")] - -use nom::bytes::streaming::take; -#[cfg(feature = "alloc")] -use nom::multi::{length_data, many0}; -#[cfg(feature = "alloc")] -use nom::number::streaming::be_u64; -use nom::sequence::tuple; -use nom::{Err, IResult, Needed}; - -// Parser definition - -// We request a length that would trigger an overflow if computing consumed + requested -fn parser02(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> { - tuple((take(1_usize), take(18446744073709551615_usize)))(i) -} - -#[test] -fn overflow_incomplete_tuple() { - assert_eq!( - parser02(&b"3"[..]), - Err(Err::Incomplete(Needed::new(18446744073709551615))) - ); -} - -#[test] -#[cfg(feature = "alloc")] -fn overflow_incomplete_length_bytes() { - fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - many0(length_data(be_u64))(i) - } - - // Trigger an overflow in length_data - assert_eq!( - multi(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xff"[..]), - Err(Err::Incomplete(Needed::new(18446744073709551615))) - ); -} - -#[test] -#[cfg(feature = "alloc")] -fn overflow_incomplete_many0() { - fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - many0(length_data(be_u64))(i) - } - - // Trigger an overflow in many0 - assert_eq!( - multi(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xef"[..]), - Err(Err::Incomplete(Needed::new(18446744073709551599))) - ); -} - -#[test] -#[cfg(feature = "alloc")] -fn overflow_incomplete_many1() { - use nom::multi::many1; - - fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - many1(length_data(be_u64))(i) - } - - // Trigger an overflow in many1 - assert_eq!( - multi(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xef"[..]), - Err(Err::Incomplete(Needed::new(18446744073709551599))) - ); -} - -#[test] -#[cfg(feature = "alloc")] -fn overflow_incomplete_many_till() { - use nom::{bytes::complete::tag, multi::many_till}; - - fn multi(i: &[u8]) -> IResult<&[u8], (Vec<&[u8]>, &[u8])> { - many_till(length_data(be_u64), tag("abc"))(i) - } - - // Trigger an overflow in many_till - assert_eq!( - multi(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xef"[..]), - Err(Err::Incomplete(Needed::new(18446744073709551599))) - ); -} - -#[test] -#[cfg(feature = "alloc")] -fn overflow_incomplete_many_m_n() { - use nom::multi::many_m_n; - - fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - many_m_n(2, 4, length_data(be_u64))(i) - } - - // Trigger an overflow in many_m_n - assert_eq!( - multi(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xef"[..]), - Err(Err::Incomplete(Needed::new(18446744073709551599))) - ); -} - -#[test] -#[cfg(feature = "alloc")] -fn overflow_incomplete_count() { - use nom::multi::count; - - fn counter(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - count(length_data(be_u64), 2)(i) - } - - assert_eq!( - counter(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xef"[..]), - Err(Err::Incomplete(Needed::new(18446744073709551599))) - ); -} - -#[test] -#[cfg(feature = "alloc")] -fn overflow_incomplete_length_count() { - use nom::multi::length_count; - use nom::number::streaming::be_u8; - - fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - length_count(be_u8, length_data(be_u64))(i) - } - - assert_eq!( - multi(&b"\x04\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xee"[..]), - Err(Err::Incomplete(Needed::new(18446744073709551598))) - ); -} - -#[test] -#[cfg(feature = "alloc")] -fn overflow_incomplete_length_data() { - fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - many0(length_data(be_u64))(i) - } - - assert_eq!( - multi(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xff"[..]), - Err(Err::Incomplete(Needed::new(18446744073709551615))) - ); -} diff --git a/vendor/nom/tests/reborrow_fold.rs b/vendor/nom/tests/reborrow_fold.rs deleted file mode 100644 index 486617e427268d..00000000000000 --- a/vendor/nom/tests/reborrow_fold.rs +++ /dev/null @@ -1,31 +0,0 @@ -#![allow(dead_code)] -// #![allow(unused_variables)] - -use std::str; - -use nom::bytes::complete::is_not; -use nom::character::complete::char; -use nom::combinator::{map, map_res}; -use nom::multi::fold_many0; -use nom::sequence::delimited; -use nom::IResult; - -fn atom<'a>(_tomb: &'a mut ()) -> impl FnMut(&'a [u8]) -> IResult<&'a [u8], String> { - move |input| { - map( - map_res(is_not(" \t\r\n"), str::from_utf8), - ToString::to_string, - )(input) - } -} - -// FIXME: should we support the use case of borrowing data mutably in a parser? -fn list<'a>(i: &'a [u8], tomb: &'a mut ()) -> IResult<&'a [u8], String> { - delimited( - char('('), - fold_many0(atom(tomb), String::new, |acc: String, next: String| { - acc + next.as_str() - }), - char(')'), - )(i) -} diff --git a/vendor/prettyplease/.cargo-checksum.json b/vendor/prettyplease/.cargo-checksum.json deleted file mode 100644 index 16a3962905f02a..00000000000000 --- a/vendor/prettyplease/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"a42290dfff809a03b196211cde4dccabf08a93b4957620c610ba293bfed0f910",".github/FUNDING.yml":"b017158736b3c9751a2d21edfce7fe61c8954e2fced8da8dd3013c2f3e295bd9",".github/workflows/ci.yml":"d39fda7f894021a5e8f3906e835be0d0320afd3e458f3756c2e1587cbbf051ee","Cargo.lock":"9a073091a7cdc7f92ae7002d1ad6fc6c0ca919aac280322318096f269abfe629","Cargo.toml":"fadc7182205c8cebaea0545496490a549f3d102d17d7b13ae26b2f6c24828a0b","Cargo.toml.orig":"1163b5012ba2567947bc3f662645c7769428f57f8ed033352579d46acfbe7b3f","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"a7e6d152cdc6ea603077e50b8d55af374d9d21fd9f62d08a008588b17d785e6e","build.rs":"79a5b2d260aa97aeac7105fbfa00774982f825cd708c100ea96d01c39974bb88","examples/.tokeignore":"23ab7a9f335a33747ebbc74f39c81b145cb7928e7fefe8055ca4db0a4fac7557","examples/input.rs":"53350088f12a346a99034af41ef432dedcc9e5d581c5592d9aae3807c42656c1","examples/output.prettyplease.rs":"fa63c118daadb64c456ec5b8d5e46e5d7fabbbeb6a6e61a08eabc23360a18fbd","examples/output.rustc.rs":"04647e9b01f2aa85982f849c2d897acf3b6931121c1ef953de5fb6a67b80e05a","examples/output.rustfmt.rs":"914a9aea1c51e097bfd80c9af4011811e6126c9df5fb0eac3d40b1203fba7c58","src/algorithm.rs":"901c91416d7526038bfad30e0066295a03d2bb995830016ace49a41540079010","src/attr.rs":"0a5c64b1c1f6fe4944c1d805c528ee4a9a8c6223e875afe9f48371cba66732ee","src/classify.rs":"2ce2d63ad9071aac10b1037e6382703736e0147d96b3ccf32a53182d12883f1b","src/convenience.rs":"dd392b009b691d3587c7d8e3caeaacf450303c4223792b5f89c336358e371c39","src/data.rs":"5bc2dce1cfa1aa5c1324ccdc2d76a6bd5df2382530c7e863d2bb50dea60cc4bc","src/expr.rs":"c73157238a80b0fb9a1949c6250cbb01f2df9217770f263dffcf5e5fbb570296","src/file.rs":"5689efa3c5959a6a0d8cfc2c13bf8a37ab0669e2b81dbded3f3c28884a88fca0","src/fixup.rs":"ecf87543c342fffc79bae54e4fa174cbfd5c341817315caa3b95cce0d49ebf7c","src/generics.rs":"1d33884399edf9ebb26afb998c5257b6d5238a77956b646f8e9fd728a6decee8","src/item.rs":"731732e0084c29ed77aa52ccc5e9cb970ccbfb3652035e329e4590f9b9274e8d","src/iter.rs":"38b2cd3b38719c6024fb6b3aa739f6f8736c83193fd21e2365d4f6c27bc41666","src/lib.rs":"ce24c5d146c17b70241b2b14a0b47a745af943fa22753f40975abd0e608dc01a","src/lifetime.rs":"6d420430168185b2da3409bc38a45f63cced9443915f04e6aec71367fc070dcf","src/lit.rs":"9ea6d25533e64df4ff01c084fa1c31ddf64fb3b159409eec7d80dbf281e5171e","src/mac.rs":"c1f8f9d60a6d116a63a7aa86d3dafdc5279c030b7f6a3e9bf119df109a913c8e","src/pat.rs":"8e53fd1b5382bb068210162bfab9921246093cfdd80dd93cd8627fcfdae39940","src/path.rs":"e73d83dc38f5c6c0c82f824da7eb090a16027f32fc40446b185580ee5e99be58","src/precedence.rs":"a8ce97ba0a25f442b5f238c64f078d70f4114b4b0f9df82764d533dd39a47abb","src/ring.rs":"517b1a02f8e0a9c1316830117daad1e30d17e1fcf6428c6b438c626aa43286ae","src/stmt.rs":"e17ab9647fed9daa4f5b2fbd007015128f2a7fc65686a988593444a37242f885","src/token.rs":"c288b1d81f2a35673d4ca1dd10d3386670b067460121df3038303e1ed73b41a7","src/ty.rs":"6fdae0aeb40d3cfb67a98f8806d0be29ad7517bf91fb2387cda0bcdbf3075ffe","tests/test.rs":"c6f8c7830b7491fca1d56e41aa4acc6256b683a3556a48982f57ae62d38aaaa2","tests/test_precedence.rs":"de0c770b9a72e5eba8a52dcac0614d6db8ff5041ba601e1e67f113d68c9afd50"},"package":"479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b"} \ No newline at end of file diff --git a/vendor/prettyplease/.cargo_vcs_info.json b/vendor/prettyplease/.cargo_vcs_info.json deleted file mode 100644 index f99293c21f941e..00000000000000 --- a/vendor/prettyplease/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "c971184fa8c5ef5a2828196e35bd99469455b46b" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/prettyplease/.github/FUNDING.yml b/vendor/prettyplease/.github/FUNDING.yml deleted file mode 100644 index 750707701cdae9..00000000000000 --- a/vendor/prettyplease/.github/FUNDING.yml +++ /dev/null @@ -1 +0,0 @@ -github: dtolnay diff --git a/vendor/prettyplease/.github/workflows/ci.yml b/vendor/prettyplease/.github/workflows/ci.yml deleted file mode 100644 index e45d52e90582e7..00000000000000 --- a/vendor/prettyplease/.github/workflows/ci.yml +++ /dev/null @@ -1,123 +0,0 @@ -name: CI - -on: - push: - pull_request: - workflow_dispatch: - schedule: [cron: "40 1 * * *"] - -permissions: - contents: read - -env: - RUSTFLAGS: -Dwarnings - -jobs: - pre_ci: - uses: dtolnay/.github/.github/workflows/pre_ci.yml@master - - test: - name: Rust ${{matrix.rust}} - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - rust: [nightly, beta, stable, 1.62.0] - timeout-minutes: 45 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@master - with: - toolchain: ${{matrix.rust}} - - run: cargo check - - run: cargo check --features verbatim - - run: cargo test - env: - RUSTFLAGS: ${{env.RUSTFLAGS}} ${{matrix.rust == 'nightly' && '--cfg exhaustive' || ''}} - - run: cargo test --release --test test_precedence - env: - RUSTFLAGS: ${{env.RUSTFLAGS}} ${{matrix.rust == 'nightly' && '--cfg exhaustive' || ''}} - if: matrix.rust != '1.62.0' - - uses: actions/upload-artifact@v4 - if: matrix.rust == 'nightly' && always() - with: - name: Cargo.lock - path: Cargo.lock - continue-on-error: true - - examples: - name: Examples - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@nightly - with: - components: llvm-tools, rustc-dev, rustfmt - - run: cargo run --manifest-path examples/update/Cargo.toml - - run: git diff --exit-code - - run: cargo run --manifest-path cargo-expand/update/Cargo.toml - - run: git diff --exit-code - - fuzz: - name: Fuzz - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@nightly - - uses: dtolnay/install@cargo-fuzz - - run: cargo fuzz check - - minimal: - name: Minimal versions - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@nightly - - run: cargo generate-lockfile -Z minimal-versions - - run: cargo check --locked - - doc: - name: Documentation - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - env: - RUSTDOCFLAGS: -Dwarnings - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@nightly - - uses: dtolnay/install@cargo-docs-rs - - run: cargo docs-rs - - clippy: - name: Clippy - runs-on: ubuntu-latest - if: github.event_name != 'pull_request' - timeout-minutes: 45 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@clippy - - run: cargo clippy --features verbatim -- -Dclippy::all -Dclippy::pedantic - - outdated: - name: Outdated - runs-on: ubuntu-latest - if: github.event_name != 'pull_request' - timeout-minutes: 45 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - - uses: dtolnay/install@cargo-outdated - - run: cargo outdated --workspace --exit-code 1 diff --git a/vendor/prettyplease/Cargo.lock b/vendor/prettyplease/Cargo.lock deleted file mode 100644 index 8eb36c84d7020e..00000000000000 --- a/vendor/prettyplease/Cargo.lock +++ /dev/null @@ -1,54 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "indoc" -version = "2.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd" - -[[package]] -name = "prettyplease" -version = "0.2.37" -dependencies = [ - "indoc", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "proc-macro2" -version = "1.0.101" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "syn" -version = "2.0.106" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "unicode-ident" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" diff --git a/vendor/prettyplease/Cargo.toml b/vendor/prettyplease/Cargo.toml deleted file mode 100644 index ae2438cf4d2655..00000000000000 --- a/vendor/prettyplease/Cargo.toml +++ /dev/null @@ -1,90 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.62" -name = "prettyplease" -version = "0.2.37" -authors = ["David Tolnay "] -build = "build.rs" -links = "prettyplease02" -exclude = ["cargo-expand"] -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = "A minimal `syn` syntax tree pretty-printer" -documentation = "https://docs.rs/prettyplease" -readme = "README.md" -keywords = ["rustfmt"] -categories = ["development-tools"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/dtolnay/prettyplease" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] -rustdoc-args = [ - "--generate-link-to-definition", - "--extern-html-root-url=core=https://doc.rust-lang.org", - "--extern-html-root-url=alloc=https://doc.rust-lang.org", - "--extern-html-root-url=std=https://doc.rust-lang.org", -] - -[package.metadata.playground] -features = ["verbatim"] - -[features] -verbatim = ["syn/parsing"] - -[lib] -name = "prettyplease" -path = "src/lib.rs" - -[[test]] -name = "test" -path = "tests/test.rs" - -[[test]] -name = "test_precedence" -path = "tests/test_precedence.rs" - -[dependencies.proc-macro2] -version = "1.0.80" -default-features = false - -[dependencies.syn] -version = "2.0.105" -features = ["full"] -default-features = false - -[dev-dependencies.indoc] -version = "2" - -[dev-dependencies.proc-macro2] -version = "1.0.80" -default-features = false - -[dev-dependencies.quote] -version = "1.0.35" -default-features = false - -[dev-dependencies.syn] -version = "2.0.105" -features = [ - "clone-impls", - "extra-traits", - "parsing", - "printing", - "visit-mut", -] -default-features = false diff --git a/vendor/prettyplease/LICENSE-APACHE b/vendor/prettyplease/LICENSE-APACHE deleted file mode 100644 index 1b5ec8b78e237b..00000000000000 --- a/vendor/prettyplease/LICENSE-APACHE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS diff --git a/vendor/prettyplease/LICENSE-MIT b/vendor/prettyplease/LICENSE-MIT deleted file mode 100644 index 31aa79387f27e7..00000000000000 --- a/vendor/prettyplease/LICENSE-MIT +++ /dev/null @@ -1,23 +0,0 @@ -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/prettyplease/README.md b/vendor/prettyplease/README.md deleted file mode 100644 index 4584c48587644b..00000000000000 --- a/vendor/prettyplease/README.md +++ /dev/null @@ -1,312 +0,0 @@ -prettyplease::unparse -===================== - -[github](https://github.com/dtolnay/prettyplease) -[crates.io](https://crates.io/crates/prettyplease) -[docs.rs](https://docs.rs/prettyplease) -[build status](https://github.com/dtolnay/prettyplease/actions?query=branch%3Amaster) - -A minimal `syn` syntax tree pretty-printer. - -
- -## Overview - -This is a pretty-printer to turn a `syn` syntax tree into a `String` of -well-formatted source code. In contrast to rustfmt, this library is intended to -be suitable for arbitrary generated code. - -Rustfmt prioritizes high-quality output that is impeccable enough that you'd be -comfortable spending your career staring at its output — but that means -some heavyweight algorithms, and it has a tendency to bail out on code that is -hard to format (for example [rustfmt#3697], and there are dozens more issues -like it). That's not necessarily a big deal for human-generated code because -when code gets highly nested, the human will naturally be inclined to refactor -into more easily formattable code. But for generated code, having the formatter -just give up leaves it totally unreadable. - -[rustfmt#3697]: https://github.com/rust-lang/rustfmt/issues/3697 - -This library is designed using the simplest possible algorithm and data -structures that can deliver about 95% of the quality of rustfmt-formatted -output. In my experience testing real-world code, approximately 97-98% of output -lines come out identical between rustfmt's formatting and this crate's. The rest -have slightly different linebreak decisions, but still clearly follow the -dominant modern Rust style. - -The tradeoffs made by this crate are a good fit for generated code that you will -*not* spend your career staring at. For example, the output of `bindgen`, or the -output of `cargo-expand`. In those cases it's more important that the whole -thing be formattable without the formatter giving up, than that it be flawless. - -
- -## Feature matrix - -Here are a few superficial comparisons of this crate against the AST -pretty-printer built into rustc, and rustfmt. The sections below go into more -detail comparing the output of each of these libraries. - -| | prettyplease | rustc | rustfmt | -|:---|:---:|:---:|:---:| -| non-pathological behavior on big or generated code | 💚 | ❌ | ❌ | -| idiomatic modern formatting ("locally indistinguishable from rustfmt") | 💚 | ❌ | 💚 | -| throughput | 60 MB/s | 39 MB/s | 2.8 MB/s | -| number of dependencies | 3 | 72 | 66 | -| compile time including dependencies | 2.4 sec | 23.1 sec | 29.8 sec | -| buildable using a stable Rust compiler | 💚 | ❌ | ❌ | -| published to crates.io | 💚 | ❌ | ❌ | -| extensively configurable output | ❌ | ❌ | 💚 | -| intended to accommodate hand-maintained source code | ❌ | ❌ | 💚 | - -
- -## Comparison to rustfmt - -- [input.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/input.rs) -- [output.prettyplease.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.prettyplease.rs) -- [output.rustfmt.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.rustfmt.rs) - -If you weren't told which output file is which, it would be practically -impossible to tell — **except** for line 435 in the rustfmt output, which -is more than 1000 characters long because rustfmt just gave up formatting that -part of the file: - -```rust - match segments[5] { - 0 => write!(f, "::{}", ipv4), - 0xffff => write!(f, "::ffff:{}", ipv4), - _ => unreachable!(), - } - } else { # [derive (Copy , Clone , Default)] struct Span { start : usize , len : usize , } let zeroes = { let mut longest = Span :: default () ; let mut current = Span :: default () ; for (i , & segment) in segments . iter () . enumerate () { if segment == 0 { if current . len == 0 { current . start = i ; } current . len += 1 ; if current . len > longest . len { longest = current ; } } else { current = Span :: default () ; } } longest } ; # [doc = " Write a colon-separated part of the address"] # [inline] fn fmt_subslice (f : & mut fmt :: Formatter < '_ > , chunk : & [u16]) -> fmt :: Result { if let Some ((first , tail)) = chunk . split_first () { write ! (f , "{:x}" , first) ? ; for segment in tail { f . write_char (':') ? ; write ! (f , "{:x}" , segment) ? ; } } Ok (()) } if zeroes . len > 1 { fmt_subslice (f , & segments [.. zeroes . start]) ? ; f . write_str ("::") ? ; fmt_subslice (f , & segments [zeroes . start + zeroes . len ..]) } else { fmt_subslice (f , & segments) } } - } else { - const IPV6_BUF_LEN: usize = (4 * 8) + 7; - let mut buf = [0u8; IPV6_BUF_LEN]; - let mut buf_slice = &mut buf[..]; -``` - -This is a pretty typical manifestation of rustfmt bailing out in generated code -— a chunk of the input ends up on one line. The other manifestation is -that you're working on some code, running rustfmt on save like a conscientious -developer, but after a while notice it isn't doing anything. You introduce an -intentional formatting issue, like a stray indent or semicolon, and run rustfmt -to check your suspicion. Nope, it doesn't get cleaned up — rustfmt is just -not formatting the part of the file you are working on. - -The prettyplease library is designed to have no pathological cases that force a -bail out; the entire input you give it will get formatted in some "good enough" -form. - -Separately, rustfmt can be problematic to integrate into projects. It's written -using rustc's internal syntax tree, so it can't be built by a stable compiler. -Its releases are not regularly published to crates.io, so in Cargo builds you'd -need to depend on it as a git dependency, which precludes publishing your crate -to crates.io also. You can shell out to a `rustfmt` binary, but that'll be -whatever rustfmt version is installed on each developer's system (if any), which -can lead to spurious diffs in checked-in generated code formatted by different -versions. In contrast prettyplease is designed to be easy to pull in as a -library, and compiles fast. - -
- -## Comparison to rustc_ast_pretty - -- [input.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/input.rs) -- [output.prettyplease.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.prettyplease.rs) -- [output.rustc.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.rustc.rs) - -This is the pretty-printer that gets used when rustc prints source code, such as -`rustc -Zunpretty=expanded`. It's used also by the standard library's -`stringify!` when stringifying an interpolated macro_rules AST fragment, like an -$:expr, and transitively by `dbg!` and many macros in the ecosystem. - -Rustc's formatting is mostly okay, but does not hew closely to the dominant -contemporary style of Rust formatting. Some things wouldn't ever be written on -one line, like this `match` expression, and certainly not with a comma in front -of the closing brace: - -```rust -fn eq(&self, other: &IpAddr) -> bool { - match other { IpAddr::V4(v4) => self == v4, IpAddr::V6(_) => false, } -} -``` - -Some places use non-multiple-of-4 indentation, which is definitely not the norm: - -```rust -pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { - let [a, b, c, d] = self.octets(); - Ipv6Addr{inner: - c::in6_addr{s6_addr: - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, - 0xFF, a, b, c, d],},} -} -``` - -And although there isn't an egregious example of it in the link because the -input code is pretty tame, in general rustc_ast_pretty has pathological behavior -on generated code. It has a tendency to use excessive horizontal indentation and -rapidly run out of width: - -```rust -::std::io::_print(::core::fmt::Arguments::new_v1(&[""], - &match (&msg,) { - _args => - [::core::fmt::ArgumentV1::new(_args.0, - ::core::fmt::Display::fmt)], - })); -``` - -The snippets above are clearly different from modern rustfmt style. In contrast, -prettyplease is designed to have output that is practically indistinguishable -from rustfmt-formatted code. - -
- -## Example - -```rust -// [dependencies] -// prettyplease = "0.2" -// syn = { version = "2", default-features = false, features = ["full", "parsing"] } - -const INPUT: &str = stringify! { - use crate::{ - lazy::{Lazy, SyncLazy, SyncOnceCell}, panic, - sync::{ atomic::{AtomicUsize, Ordering::SeqCst}, - mpsc::channel, Mutex, }, - thread, - }; - impl Into for T where U: From { - fn into(self) -> U { U::from(self) } - } -}; - -fn main() { - let syntax_tree = syn::parse_file(INPUT).unwrap(); - let formatted = prettyplease::unparse(&syntax_tree); - print!("{}", formatted); -} -``` - -
- -## Algorithm notes - -The approach and terminology used in the implementation are derived from [*Derek -C. Oppen, "Pretty Printing" (1979)*][paper], on which rustc_ast_pretty is also -based, and from rustc_ast_pretty's implementation written by Graydon Hoare in -2011 (and modernized over the years by dozens of volunteer maintainers). - -[paper]: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/770/CS-TR-79-770.pdf - -The paper describes two language-agnostic interacting procedures `Scan()` and -`Print()`. Language-specific code decomposes an input data structure into a -stream of `string` and `break` tokens, and `begin` and `end` tokens for -grouping. Each `begin`–`end` range may be identified as either "consistent -breaking" or "inconsistent breaking". If a group is consistently breaking, then -if the whole contents do not fit on the line, *every* `break` token in the group -will receive a linebreak. This is appropriate, for example, for Rust struct -literals, or arguments of a function call. If a group is inconsistently -breaking, then the `string` tokens in the group are greedily placed on the line -until out of space, and linebroken only at those `break` tokens for which the -next string would not fit. For example, this is appropriate for the contents of -a braced `use` statement in Rust. - -Scan's job is to efficiently accumulate sizing information about groups and -breaks. For every `begin` token we compute the distance to the matched `end` -token, and for every `break` we compute the distance to the next `break`. The -algorithm uses a ringbuffer to hold tokens whose size is not yet ascertained. -The maximum size of the ringbuffer is bounded by the target line length and does -not grow indefinitely, regardless of deep nesting in the input stream. That's -because once a group is sufficiently big, the precise size can no longer make a -difference to linebreak decisions and we can effectively treat it as "infinity". - -Print's job is to use the sizing information to efficiently assign a "broken" or -"not broken" status to every `begin` token. At that point the output is easily -constructed by concatenating `string` tokens and breaking at `break` tokens -contained within a broken group. - -Leveraging these primitives (i.e. cleverly placing the all-or-nothing consistent -breaks and greedy inconsistent breaks) to yield rustfmt-compatible formatting -for all of Rust's syntax tree nodes is a fun challenge. - -Here is a visualization of some Rust tokens fed into the pretty printing -algorithm. Consistently breaking `begin`—`end` pairs are represented by -`«`⁠`»`, inconsistently breaking by `‹`⁠`›`, `break` by `·`, and the -rest of the non-whitespace are `string`. - -```text -use crate::«{· -‹ lazy::«{·‹Lazy,· SyncLazy,· SyncOnceCell›·}»,· - panic,· - sync::«{· -‹ atomic::«{·‹AtomicUsize,· Ordering::SeqCst›·}»,· - mpsc::channel,· Mutex›,· - }»,· - thread›,· -}»;· -«‹«impl<«·T‹›,· U‹›·»>» Into<«·U·»>· for T›· -where· - U:‹ From<«·T·»>›,· -{· -« fn into(·«·self·») -> U {· -‹ U::from(«·self·»)›· -» }· -»}· -``` - -The algorithm described in the paper is not quite sufficient for producing -well-formatted Rust code that is locally indistinguishable from rustfmt's style. -The reason is that in the paper, the complete non-whitespace contents are -assumed to be independent of linebreak decisions, with Scan and Print being only -in control of the whitespace (spaces and line breaks). In Rust as idiomatically -formatted by rustfmt, that is not the case. Trailing commas are one example; the -punctuation is only known *after* the broken vs non-broken status of the -surrounding group is known: - -```rust -let _ = Struct { x: 0, y: true }; - -let _ = Struct { - x: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx, - y: yyyyyyyyyyyyyyyyyyyyyyyyyyyyyy, //<- trailing comma if the expression wrapped -}; -``` - -The formatting of `match` expressions is another case; we want small arms on the -same line as the pattern, and big arms wrapped in a brace. The presence of the -brace punctuation, comma, and semicolon are all dependent on whether the arm -fits on the line: - -```rust -match total_nanos.checked_add(entry.nanos as u64) { - Some(n) => tmp = n, //<- small arm, inline with comma - None => { - total_secs = total_secs - .checked_add(total_nanos / NANOS_PER_SEC as u64) - .expect("overflow in iter::sum over durations"); - } //<- big arm, needs brace added, and also semicolon^ -} -``` - -The printing algorithm implementation in this crate accommodates all of these -situations with conditional punctuation tokens whose selection can be deferred -and populated after it's known that the group is or is not broken. - -
- -#### License - - -Licensed under either of Apache License, Version -2.0 or MIT license at your option. - - -
- - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in this crate by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. - diff --git a/vendor/prettyplease/build.rs b/vendor/prettyplease/build.rs deleted file mode 100644 index 4fc36f7468cc27..00000000000000 --- a/vendor/prettyplease/build.rs +++ /dev/null @@ -1,21 +0,0 @@ -use std::env; -use std::ffi::OsString; -use std::process; - -fn main() { - println!("cargo:rerun-if-changed=build.rs"); - - println!("cargo:rustc-check-cfg=cfg(exhaustive)"); - println!("cargo:rustc-check-cfg=cfg(prettyplease_debug)"); - println!("cargo:rustc-check-cfg=cfg(prettyplease_debug_indent)"); - - let pkg_version = cargo_env_var("CARGO_PKG_VERSION"); - println!("cargo:VERSION={}", pkg_version.to_str().unwrap()); -} - -fn cargo_env_var(key: &str) -> OsString { - env::var_os(key).unwrap_or_else(|| { - eprintln!("Environment variable ${key} is not set during execution of build script"); - process::exit(1); - }) -} diff --git a/vendor/prettyplease/examples/.tokeignore b/vendor/prettyplease/examples/.tokeignore deleted file mode 100644 index 6f5f3d11d3ed50..00000000000000 --- a/vendor/prettyplease/examples/.tokeignore +++ /dev/null @@ -1 +0,0 @@ -*.rs diff --git a/vendor/prettyplease/examples/input.rs b/vendor/prettyplease/examples/input.rs deleted file mode 100644 index ca3d9803a82aa6..00000000000000 --- a/vendor/prettyplease/examples/input.rs +++ /dev/null @@ -1 +0,0 @@ -use crate :: cmp :: Ordering ; use crate :: fmt :: { self , Write as FmtWrite } ; use crate :: hash ; use crate :: io :: Write as IoWrite ; use crate :: mem :: transmute ; use crate :: sys :: net :: netc as c ; use crate :: sys_common :: { AsInner , FromInner , IntoInner } ; # [derive (Copy , Clone , Eq , PartialEq , Hash , PartialOrd , Ord)] pub enum IpAddr { V4 (Ipv4Addr) , V6 (Ipv6Addr) , } # [derive (Copy)] pub struct Ipv4Addr { inner : c :: in_addr , } # [derive (Copy)] pub struct Ipv6Addr { inner : c :: in6_addr , } # [derive (Copy , PartialEq , Eq , Clone , Hash , Debug)] # [non_exhaustive] pub enum Ipv6MulticastScope { InterfaceLocal , LinkLocal , RealmLocal , AdminLocal , SiteLocal , OrganizationLocal , Global , } impl IpAddr { pub const fn is_unspecified (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_unspecified () , IpAddr :: V6 (ip) => ip . is_unspecified () , } } pub const fn is_loopback (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_loopback () , IpAddr :: V6 (ip) => ip . is_loopback () , } } pub const fn is_global (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_global () , IpAddr :: V6 (ip) => ip . is_global () , } } pub const fn is_multicast (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_multicast () , IpAddr :: V6 (ip) => ip . is_multicast () , } } pub const fn is_documentation (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_documentation () , IpAddr :: V6 (ip) => ip . is_documentation () , } } pub const fn is_benchmarking (& self) -> bool { match self { IpAddr :: V4 (ip) => ip . is_benchmarking () , IpAddr :: V6 (ip) => ip . is_benchmarking () , } } pub const fn is_ipv4 (& self) -> bool { matches ! (self , IpAddr :: V4 (_)) } pub const fn is_ipv6 (& self) -> bool { matches ! (self , IpAddr :: V6 (_)) } pub const fn to_canonical (& self) -> IpAddr { match self { & v4 @ IpAddr :: V4 (_) => v4 , IpAddr :: V6 (v6) => v6 . to_canonical () , } } } impl Ipv4Addr { pub const fn new (a : u8 , b : u8 , c : u8 , d : u8) -> Ipv4Addr { Ipv4Addr { inner : c :: in_addr { s_addr : u32 :: from_ne_bytes ([a , b , c , d]) } } } pub const LOCALHOST : Self = Ipv4Addr :: new (127 , 0 , 0 , 1) ; # [doc (alias = "INADDR_ANY")] pub const UNSPECIFIED : Self = Ipv4Addr :: new (0 , 0 , 0 , 0) ; pub const BROADCAST : Self = Ipv4Addr :: new (255 , 255 , 255 , 255) ; pub const fn octets (& self) -> [u8 ; 4] { self . inner . s_addr . to_ne_bytes () } pub const fn is_unspecified (& self) -> bool { self . inner . s_addr == 0 } pub const fn is_loopback (& self) -> bool { self . octets () [0] == 127 } pub const fn is_private (& self) -> bool { match self . octets () { [10 , ..] => true , [172 , b , ..] if b >= 16 && b <= 31 => true , [192 , 168 , ..] => true , _ => false , } } pub const fn is_link_local (& self) -> bool { matches ! (self . octets () , [169 , 254 , ..]) } pub const fn is_global (& self) -> bool { if u32 :: from_be_bytes (self . octets ()) == 0xc0000009 || u32 :: from_be_bytes (self . octets ()) == 0xc000000a { return true ; } ! self . is_private () && ! self . is_loopback () && ! self . is_link_local () && ! self . is_broadcast () && ! self . is_documentation () && ! self . is_shared () && ! (self . octets () [0] == 192 && self . octets () [1] == 0 && self . octets () [2] == 0) && ! self . is_reserved () && ! self . is_benchmarking () && self . octets () [0] != 0 } pub const fn is_shared (& self) -> bool { self . octets () [0] == 100 && (self . octets () [1] & 0b1100_0000 == 0b0100_0000) } pub const fn is_benchmarking (& self) -> bool { self . octets () [0] == 198 && (self . octets () [1] & 0xfe) == 18 } pub const fn is_reserved (& self) -> bool { self . octets () [0] & 240 == 240 && ! self . is_broadcast () } pub const fn is_multicast (& self) -> bool { self . octets () [0] >= 224 && self . octets () [0] <= 239 } pub const fn is_broadcast (& self) -> bool { u32 :: from_be_bytes (self . octets ()) == u32 :: from_be_bytes (Self :: BROADCAST . octets ()) } pub const fn is_documentation (& self) -> bool { matches ! (self . octets () , [192 , 0 , 2 , _] | [198 , 51 , 100 , _] | [203 , 0 , 113 , _]) } pub const fn to_ipv6_compatible (& self) -> Ipv6Addr { let [a , b , c , d] = self . octets () ; Ipv6Addr { inner : c :: in6_addr { s6_addr : [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , a , b , c , d] } , } } pub const fn to_ipv6_mapped (& self) -> Ipv6Addr { let [a , b , c , d] = self . octets () ; Ipv6Addr { inner : c :: in6_addr { s6_addr : [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0xFF , 0xFF , a , b , c , d] } , } } } impl fmt :: Display for IpAddr { fn fmt (& self , fmt : & mut fmt :: Formatter < '_ >) -> fmt :: Result { match self { IpAddr :: V4 (ip) => ip . fmt (fmt) , IpAddr :: V6 (ip) => ip . fmt (fmt) , } } } impl fmt :: Debug for IpAddr { fn fmt (& self , fmt : & mut fmt :: Formatter < '_ >) -> fmt :: Result { fmt :: Display :: fmt (self , fmt) } } impl From < Ipv4Addr > for IpAddr { fn from (ipv4 : Ipv4Addr) -> IpAddr { IpAddr :: V4 (ipv4) } } impl From < Ipv6Addr > for IpAddr { fn from (ipv6 : Ipv6Addr) -> IpAddr { IpAddr :: V6 (ipv6) } } impl fmt :: Display for Ipv4Addr { fn fmt (& self , fmt : & mut fmt :: Formatter < '_ >) -> fmt :: Result { let octets = self . octets () ; if fmt . precision () . is_none () && fmt . width () . is_none () { write ! (fmt , "{}.{}.{}.{}" , octets [0] , octets [1] , octets [2] , octets [3]) } else { const IPV4_BUF_LEN : usize = 15 ; let mut buf = [0u8 ; IPV4_BUF_LEN] ; let mut buf_slice = & mut buf [..] ; write ! (buf_slice , "{}.{}.{}.{}" , octets [0] , octets [1] , octets [2] , octets [3]) . unwrap () ; let len = IPV4_BUF_LEN - buf_slice . len () ; let buf = unsafe { crate :: str :: from_utf8_unchecked (& buf [.. len]) } ; fmt . pad (buf) } } } impl fmt :: Debug for Ipv4Addr { fn fmt (& self , fmt : & mut fmt :: Formatter < '_ >) -> fmt :: Result { fmt :: Display :: fmt (self , fmt) } } impl Clone for Ipv4Addr { fn clone (& self) -> Ipv4Addr { * self } } impl PartialEq for Ipv4Addr { fn eq (& self , other : & Ipv4Addr) -> bool { self . inner . s_addr == other . inner . s_addr } } impl PartialEq < Ipv4Addr > for IpAddr { fn eq (& self , other : & Ipv4Addr) -> bool { match self { IpAddr :: V4 (v4) => v4 == other , IpAddr :: V6 (_) => false , } } } impl PartialEq < IpAddr > for Ipv4Addr { fn eq (& self , other : & IpAddr) -> bool { match other { IpAddr :: V4 (v4) => self == v4 , IpAddr :: V6 (_) => false , } } } impl Eq for Ipv4Addr { } impl hash :: Hash for Ipv4Addr { fn hash < H : hash :: Hasher > (& self , s : & mut H) { { self . inner . s_addr } . hash (s) } } impl PartialOrd for Ipv4Addr { fn partial_cmp (& self , other : & Ipv4Addr) -> Option < Ordering > { Some (self . cmp (other)) } } impl PartialOrd < Ipv4Addr > for IpAddr { fn partial_cmp (& self , other : & Ipv4Addr) -> Option < Ordering > { match self { IpAddr :: V4 (v4) => v4 . partial_cmp (other) , IpAddr :: V6 (_) => Some (Ordering :: Greater) , } } } impl PartialOrd < IpAddr > for Ipv4Addr { fn partial_cmp (& self , other : & IpAddr) -> Option < Ordering > { match other { IpAddr :: V4 (v4) => self . partial_cmp (v4) , IpAddr :: V6 (_) => Some (Ordering :: Less) , } } } impl Ord for Ipv4Addr { fn cmp (& self , other : & Ipv4Addr) -> Ordering { u32 :: from_be (self . inner . s_addr) . cmp (& u32 :: from_be (other . inner . s_addr)) } } impl IntoInner < c :: in_addr > for Ipv4Addr { fn into_inner (self) -> c :: in_addr { self . inner } } impl From < Ipv4Addr > for u32 { fn from (ip : Ipv4Addr) -> u32 { let ip = ip . octets () ; u32 :: from_be_bytes (ip) } } impl From < u32 > for Ipv4Addr { fn from (ip : u32) -> Ipv4Addr { Ipv4Addr :: from (ip . to_be_bytes ()) } } impl From < [u8 ; 4] > for Ipv4Addr { fn from (octets : [u8 ; 4]) -> Ipv4Addr { Ipv4Addr :: new (octets [0] , octets [1] , octets [2] , octets [3]) } } impl From < [u8 ; 4] > for IpAddr { fn from (octets : [u8 ; 4]) -> IpAddr { IpAddr :: V4 (Ipv4Addr :: from (octets)) } } impl Ipv6Addr { pub const fn new (a : u16 , b : u16 , c : u16 , d : u16 , e : u16 , f : u16 , g : u16 , h : u16) -> Ipv6Addr { let addr16 = [a . to_be () , b . to_be () , c . to_be () , d . to_be () , e . to_be () , f . to_be () , g . to_be () , h . to_be () ,] ; Ipv6Addr { inner : c :: in6_addr { s6_addr : unsafe { transmute :: < _ , [u8 ; 16] > (addr16) } , } , } } pub const LOCALHOST : Self = Ipv6Addr :: new (0 , 0 , 0 , 0 , 0 , 0 , 0 , 1) ; pub const UNSPECIFIED : Self = Ipv6Addr :: new (0 , 0 , 0 , 0 , 0 , 0 , 0 , 0) ; pub const fn segments (& self) -> [u16 ; 8] { let [a , b , c , d , e , f , g , h] = unsafe { transmute :: < _ , [u16 ; 8] > (self . inner . s6_addr) } ; [u16 :: from_be (a) , u16 :: from_be (b) , u16 :: from_be (c) , u16 :: from_be (d) , u16 :: from_be (e) , u16 :: from_be (f) , u16 :: from_be (g) , u16 :: from_be (h) ,] } pub const fn is_unspecified (& self) -> bool { u128 :: from_be_bytes (self . octets ()) == u128 :: from_be_bytes (Ipv6Addr :: UNSPECIFIED . octets ()) } pub const fn is_loopback (& self) -> bool { u128 :: from_be_bytes (self . octets ()) == u128 :: from_be_bytes (Ipv6Addr :: LOCALHOST . octets ()) } pub const fn is_global (& self) -> bool { match self . multicast_scope () { Some (Ipv6MulticastScope :: Global) => true , None => self . is_unicast_global () , _ => false , } } pub const fn is_unique_local (& self) -> bool { (self . segments () [0] & 0xfe00) == 0xfc00 } pub const fn is_unicast (& self) -> bool { ! self . is_multicast () } pub const fn is_unicast_link_local (& self) -> bool { (self . segments () [0] & 0xffc0) == 0xfe80 } pub const fn is_documentation (& self) -> bool { (self . segments () [0] == 0x2001) && (self . segments () [1] == 0xdb8) } pub const fn is_benchmarking (& self) -> bool { (self . segments () [0] == 0x2001) && (self . segments () [1] == 0x2) && (self . segments () [2] == 0) } pub const fn is_unicast_global (& self) -> bool { self . is_unicast () && ! self . is_loopback () && ! self . is_unicast_link_local () && ! self . is_unique_local () && ! self . is_unspecified () && ! self . is_documentation () } pub const fn multicast_scope (& self) -> Option < Ipv6MulticastScope > { if self . is_multicast () { match self . segments () [0] & 0x000f { 1 => Some (Ipv6MulticastScope :: InterfaceLocal) , 2 => Some (Ipv6MulticastScope :: LinkLocal) , 3 => Some (Ipv6MulticastScope :: RealmLocal) , 4 => Some (Ipv6MulticastScope :: AdminLocal) , 5 => Some (Ipv6MulticastScope :: SiteLocal) , 8 => Some (Ipv6MulticastScope :: OrganizationLocal) , 14 => Some (Ipv6MulticastScope :: Global) , _ => None , } } else { None } } pub const fn is_multicast (& self) -> bool { (self . segments () [0] & 0xff00) == 0xff00 } pub const fn to_ipv4_mapped (& self) -> Option < Ipv4Addr > { match self . octets () { [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0xff , 0xff , a , b , c , d] => { Some (Ipv4Addr :: new (a , b , c , d)) } _ => None , } } pub const fn to_ipv4 (& self) -> Option < Ipv4Addr > { if let [0 , 0 , 0 , 0 , 0 , 0 | 0xffff , ab , cd] = self . segments () { let [a , b] = ab . to_be_bytes () ; let [c , d] = cd . to_be_bytes () ; Some (Ipv4Addr :: new (a , b , c , d)) } else { None } } pub const fn to_canonical (& self) -> IpAddr { if let Some (mapped) = self . to_ipv4_mapped () { return IpAddr :: V4 (mapped) ; } IpAddr :: V6 (* self) } pub const fn octets (& self) -> [u8 ; 16] { self . inner . s6_addr } } impl fmt :: Display for Ipv6Addr { fn fmt (& self , f : & mut fmt :: Formatter < '_ >) -> fmt :: Result { if f . precision () . is_none () && f . width () . is_none () { let segments = self . segments () ; if self . is_unspecified () { f . write_str ("::") } else if self . is_loopback () { f . write_str ("::1") } else if let Some (ipv4) = self . to_ipv4 () { match segments [5] { 0 => write ! (f , "::{}" , ipv4) , 0xffff => write ! (f , "::ffff:{}" , ipv4) , _ => unreachable ! () , } } else { # [derive (Copy , Clone , Default)] struct Span { start : usize , len : usize , } let zeroes = { let mut longest = Span :: default () ; let mut current = Span :: default () ; for (i , & segment) in segments . iter () . enumerate () { if segment == 0 { if current . len == 0 { current . start = i ; } current . len += 1 ; if current . len > longest . len { longest = current ; } } else { current = Span :: default () ; } } longest } ; # [doc = " Write a colon-separated part of the address"] # [inline] fn fmt_subslice (f : & mut fmt :: Formatter < '_ > , chunk : & [u16]) -> fmt :: Result { if let Some ((first , tail)) = chunk . split_first () { write ! (f , "{:x}" , first) ? ; for segment in tail { f . write_char (':') ? ; write ! (f , "{:x}" , segment) ? ; } } Ok (()) } if zeroes . len > 1 { fmt_subslice (f , & segments [.. zeroes . start]) ? ; f . write_str ("::") ? ; fmt_subslice (f , & segments [zeroes . start + zeroes . len ..]) } else { fmt_subslice (f , & segments) } } } else { const IPV6_BUF_LEN : usize = (4 * 8) + 7 ; let mut buf = [0u8 ; IPV6_BUF_LEN] ; let mut buf_slice = & mut buf [..] ; write ! (buf_slice , "{}" , self) . unwrap () ; let len = IPV6_BUF_LEN - buf_slice . len () ; let buf = unsafe { crate :: str :: from_utf8_unchecked (& buf [.. len]) } ; f . pad (buf) } } } impl fmt :: Debug for Ipv6Addr { fn fmt (& self , fmt : & mut fmt :: Formatter < '_ >) -> fmt :: Result { fmt :: Display :: fmt (self , fmt) } } impl Clone for Ipv6Addr { fn clone (& self) -> Ipv6Addr { * self } } impl PartialEq for Ipv6Addr { fn eq (& self , other : & Ipv6Addr) -> bool { self . inner . s6_addr == other . inner . s6_addr } } impl PartialEq < IpAddr > for Ipv6Addr { fn eq (& self , other : & IpAddr) -> bool { match other { IpAddr :: V4 (_) => false , IpAddr :: V6 (v6) => self == v6 , } } } impl PartialEq < Ipv6Addr > for IpAddr { fn eq (& self , other : & Ipv6Addr) -> bool { match self { IpAddr :: V4 (_) => false , IpAddr :: V6 (v6) => v6 == other , } } } impl Eq for Ipv6Addr { } impl hash :: Hash for Ipv6Addr { fn hash < H : hash :: Hasher > (& self , s : & mut H) { self . inner . s6_addr . hash (s) } } impl PartialOrd for Ipv6Addr { fn partial_cmp (& self , other : & Ipv6Addr) -> Option < Ordering > { Some (self . cmp (other)) } } impl PartialOrd < Ipv6Addr > for IpAddr { fn partial_cmp (& self , other : & Ipv6Addr) -> Option < Ordering > { match self { IpAddr :: V4 (_) => Some (Ordering :: Less) , IpAddr :: V6 (v6) => v6 . partial_cmp (other) , } } } impl PartialOrd < IpAddr > for Ipv6Addr { fn partial_cmp (& self , other : & IpAddr) -> Option < Ordering > { match other { IpAddr :: V4 (_) => Some (Ordering :: Greater) , IpAddr :: V6 (v6) => self . partial_cmp (v6) , } } } impl Ord for Ipv6Addr { fn cmp (& self , other : & Ipv6Addr) -> Ordering { self . segments () . cmp (& other . segments ()) } } impl AsInner < c :: in6_addr > for Ipv6Addr { fn as_inner (& self) -> & c :: in6_addr { & self . inner } } impl FromInner < c :: in6_addr > for Ipv6Addr { fn from_inner (addr : c :: in6_addr) -> Ipv6Addr { Ipv6Addr { inner : addr } } } impl From < Ipv6Addr > for u128 { fn from (ip : Ipv6Addr) -> u128 { let ip = ip . octets () ; u128 :: from_be_bytes (ip) } } impl From < u128 > for Ipv6Addr { fn from (ip : u128) -> Ipv6Addr { Ipv6Addr :: from (ip . to_be_bytes ()) } } impl From < [u8 ; 16] > for Ipv6Addr { fn from (octets : [u8 ; 16]) -> Ipv6Addr { let inner = c :: in6_addr { s6_addr : octets } ; Ipv6Addr :: from_inner (inner) } } impl From < [u16 ; 8] > for Ipv6Addr { fn from (segments : [u16 ; 8]) -> Ipv6Addr { let [a , b , c , d , e , f , g , h] = segments ; Ipv6Addr :: new (a , b , c , d , e , f , g , h) } } impl From < [u8 ; 16] > for IpAddr { fn from (octets : [u8 ; 16]) -> IpAddr { IpAddr :: V6 (Ipv6Addr :: from (octets)) } } impl From < [u16 ; 8] > for IpAddr { fn from (segments : [u16 ; 8]) -> IpAddr { IpAddr :: V6 (Ipv6Addr :: from (segments)) } } diff --git a/vendor/prettyplease/examples/output.prettyplease.rs b/vendor/prettyplease/examples/output.prettyplease.rs deleted file mode 100644 index 45b65d00f103c7..00000000000000 --- a/vendor/prettyplease/examples/output.prettyplease.rs +++ /dev/null @@ -1,593 +0,0 @@ -use crate::cmp::Ordering; -use crate::fmt::{self, Write as FmtWrite}; -use crate::hash; -use crate::io::Write as IoWrite; -use crate::mem::transmute; -use crate::sys::net::netc as c; -use crate::sys_common::{AsInner, FromInner, IntoInner}; -#[derive(Copy, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)] -pub enum IpAddr { - V4(Ipv4Addr), - V6(Ipv6Addr), -} -#[derive(Copy)] -pub struct Ipv4Addr { - inner: c::in_addr, -} -#[derive(Copy)] -pub struct Ipv6Addr { - inner: c::in6_addr, -} -#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)] -#[non_exhaustive] -pub enum Ipv6MulticastScope { - InterfaceLocal, - LinkLocal, - RealmLocal, - AdminLocal, - SiteLocal, - OrganizationLocal, - Global, -} -impl IpAddr { - pub const fn is_unspecified(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_unspecified(), - IpAddr::V6(ip) => ip.is_unspecified(), - } - } - pub const fn is_loopback(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_loopback(), - IpAddr::V6(ip) => ip.is_loopback(), - } - } - pub const fn is_global(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_global(), - IpAddr::V6(ip) => ip.is_global(), - } - } - pub const fn is_multicast(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_multicast(), - IpAddr::V6(ip) => ip.is_multicast(), - } - } - pub const fn is_documentation(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_documentation(), - IpAddr::V6(ip) => ip.is_documentation(), - } - } - pub const fn is_benchmarking(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_benchmarking(), - IpAddr::V6(ip) => ip.is_benchmarking(), - } - } - pub const fn is_ipv4(&self) -> bool { - matches!(self, IpAddr::V4(_)) - } - pub const fn is_ipv6(&self) -> bool { - matches!(self, IpAddr::V6(_)) - } - pub const fn to_canonical(&self) -> IpAddr { - match self { - &v4 @ IpAddr::V4(_) => v4, - IpAddr::V6(v6) => v6.to_canonical(), - } - } -} -impl Ipv4Addr { - pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr { - Ipv4Addr { - inner: c::in_addr { - s_addr: u32::from_ne_bytes([a, b, c, d]), - }, - } - } - pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1); - #[doc(alias = "INADDR_ANY")] - pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0); - pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255); - pub const fn octets(&self) -> [u8; 4] { - self.inner.s_addr.to_ne_bytes() - } - pub const fn is_unspecified(&self) -> bool { - self.inner.s_addr == 0 - } - pub const fn is_loopback(&self) -> bool { - self.octets()[0] == 127 - } - pub const fn is_private(&self) -> bool { - match self.octets() { - [10, ..] => true, - [172, b, ..] if b >= 16 && b <= 31 => true, - [192, 168, ..] => true, - _ => false, - } - } - pub const fn is_link_local(&self) -> bool { - matches!(self.octets(), [169, 254, ..]) - } - pub const fn is_global(&self) -> bool { - if u32::from_be_bytes(self.octets()) == 0xc0000009 - || u32::from_be_bytes(self.octets()) == 0xc000000a - { - return true; - } - !self.is_private() && !self.is_loopback() && !self.is_link_local() - && !self.is_broadcast() && !self.is_documentation() && !self.is_shared() - && !(self.octets()[0] == 192 && self.octets()[1] == 0 - && self.octets()[2] == 0) && !self.is_reserved() - && !self.is_benchmarking() && self.octets()[0] != 0 - } - pub const fn is_shared(&self) -> bool { - self.octets()[0] == 100 && (self.octets()[1] & 0b1100_0000 == 0b0100_0000) - } - pub const fn is_benchmarking(&self) -> bool { - self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18 - } - pub const fn is_reserved(&self) -> bool { - self.octets()[0] & 240 == 240 && !self.is_broadcast() - } - pub const fn is_multicast(&self) -> bool { - self.octets()[0] >= 224 && self.octets()[0] <= 239 - } - pub const fn is_broadcast(&self) -> bool { - u32::from_be_bytes(self.octets()) == u32::from_be_bytes(Self::BROADCAST.octets()) - } - pub const fn is_documentation(&self) -> bool { - matches!(self.octets(), [192, 0, 2, _] | [198, 51, 100, _] | [203, 0, 113, _]) - } - pub const fn to_ipv6_compatible(&self) -> Ipv6Addr { - let [a, b, c, d] = self.octets(); - Ipv6Addr { - inner: c::in6_addr { - s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, b, c, d], - }, - } - } - pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { - let [a, b, c, d] = self.octets(); - Ipv6Addr { - inner: c::in6_addr { - s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, a, b, c, d], - }, - } - } -} -impl fmt::Display for IpAddr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - IpAddr::V4(ip) => ip.fmt(fmt), - IpAddr::V6(ip) => ip.fmt(fmt), - } - } -} -impl fmt::Debug for IpAddr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} -impl From for IpAddr { - fn from(ipv4: Ipv4Addr) -> IpAddr { - IpAddr::V4(ipv4) - } -} -impl From for IpAddr { - fn from(ipv6: Ipv6Addr) -> IpAddr { - IpAddr::V6(ipv6) - } -} -impl fmt::Display for Ipv4Addr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let octets = self.octets(); - if fmt.precision().is_none() && fmt.width().is_none() { - write!(fmt, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]) - } else { - const IPV4_BUF_LEN: usize = 15; - let mut buf = [0u8; IPV4_BUF_LEN]; - let mut buf_slice = &mut buf[..]; - write!(buf_slice, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]) - .unwrap(); - let len = IPV4_BUF_LEN - buf_slice.len(); - let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; - fmt.pad(buf) - } - } -} -impl fmt::Debug for Ipv4Addr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} -impl Clone for Ipv4Addr { - fn clone(&self) -> Ipv4Addr { - *self - } -} -impl PartialEq for Ipv4Addr { - fn eq(&self, other: &Ipv4Addr) -> bool { - self.inner.s_addr == other.inner.s_addr - } -} -impl PartialEq for IpAddr { - fn eq(&self, other: &Ipv4Addr) -> bool { - match self { - IpAddr::V4(v4) => v4 == other, - IpAddr::V6(_) => false, - } - } -} -impl PartialEq for Ipv4Addr { - fn eq(&self, other: &IpAddr) -> bool { - match other { - IpAddr::V4(v4) => self == v4, - IpAddr::V6(_) => false, - } - } -} -impl Eq for Ipv4Addr {} -impl hash::Hash for Ipv4Addr { - fn hash(&self, s: &mut H) { - { self.inner.s_addr }.hash(s) - } -} -impl PartialOrd for Ipv4Addr { - fn partial_cmp(&self, other: &Ipv4Addr) -> Option { - Some(self.cmp(other)) - } -} -impl PartialOrd for IpAddr { - fn partial_cmp(&self, other: &Ipv4Addr) -> Option { - match self { - IpAddr::V4(v4) => v4.partial_cmp(other), - IpAddr::V6(_) => Some(Ordering::Greater), - } - } -} -impl PartialOrd for Ipv4Addr { - fn partial_cmp(&self, other: &IpAddr) -> Option { - match other { - IpAddr::V4(v4) => self.partial_cmp(v4), - IpAddr::V6(_) => Some(Ordering::Less), - } - } -} -impl Ord for Ipv4Addr { - fn cmp(&self, other: &Ipv4Addr) -> Ordering { - u32::from_be(self.inner.s_addr).cmp(&u32::from_be(other.inner.s_addr)) - } -} -impl IntoInner for Ipv4Addr { - fn into_inner(self) -> c::in_addr { - self.inner - } -} -impl From for u32 { - fn from(ip: Ipv4Addr) -> u32 { - let ip = ip.octets(); - u32::from_be_bytes(ip) - } -} -impl From for Ipv4Addr { - fn from(ip: u32) -> Ipv4Addr { - Ipv4Addr::from(ip.to_be_bytes()) - } -} -impl From<[u8; 4]> for Ipv4Addr { - fn from(octets: [u8; 4]) -> Ipv4Addr { - Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3]) - } -} -impl From<[u8; 4]> for IpAddr { - fn from(octets: [u8; 4]) -> IpAddr { - IpAddr::V4(Ipv4Addr::from(octets)) - } -} -impl Ipv6Addr { - pub const fn new( - a: u16, - b: u16, - c: u16, - d: u16, - e: u16, - f: u16, - g: u16, - h: u16, - ) -> Ipv6Addr { - let addr16 = [ - a.to_be(), - b.to_be(), - c.to_be(), - d.to_be(), - e.to_be(), - f.to_be(), - g.to_be(), - h.to_be(), - ]; - Ipv6Addr { - inner: c::in6_addr { - s6_addr: unsafe { transmute::<_, [u8; 16]>(addr16) }, - }, - } - } - pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); - pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0); - pub const fn segments(&self) -> [u16; 8] { - let [a, b, c, d, e, f, g, h] = unsafe { - transmute::<_, [u16; 8]>(self.inner.s6_addr) - }; - [ - u16::from_be(a), - u16::from_be(b), - u16::from_be(c), - u16::from_be(d), - u16::from_be(e), - u16::from_be(f), - u16::from_be(g), - u16::from_be(h), - ] - } - pub const fn is_unspecified(&self) -> bool { - u128::from_be_bytes(self.octets()) - == u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets()) - } - pub const fn is_loopback(&self) -> bool { - u128::from_be_bytes(self.octets()) - == u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets()) - } - pub const fn is_global(&self) -> bool { - match self.multicast_scope() { - Some(Ipv6MulticastScope::Global) => true, - None => self.is_unicast_global(), - _ => false, - } - } - pub const fn is_unique_local(&self) -> bool { - (self.segments()[0] & 0xfe00) == 0xfc00 - } - pub const fn is_unicast(&self) -> bool { - !self.is_multicast() - } - pub const fn is_unicast_link_local(&self) -> bool { - (self.segments()[0] & 0xffc0) == 0xfe80 - } - pub const fn is_documentation(&self) -> bool { - (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8) - } - pub const fn is_benchmarking(&self) -> bool { - (self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) - && (self.segments()[2] == 0) - } - pub const fn is_unicast_global(&self) -> bool { - self.is_unicast() && !self.is_loopback() && !self.is_unicast_link_local() - && !self.is_unique_local() && !self.is_unspecified() - && !self.is_documentation() - } - pub const fn multicast_scope(&self) -> Option { - if self.is_multicast() { - match self.segments()[0] & 0x000f { - 1 => Some(Ipv6MulticastScope::InterfaceLocal), - 2 => Some(Ipv6MulticastScope::LinkLocal), - 3 => Some(Ipv6MulticastScope::RealmLocal), - 4 => Some(Ipv6MulticastScope::AdminLocal), - 5 => Some(Ipv6MulticastScope::SiteLocal), - 8 => Some(Ipv6MulticastScope::OrganizationLocal), - 14 => Some(Ipv6MulticastScope::Global), - _ => None, - } - } else { - None - } - } - pub const fn is_multicast(&self) -> bool { - (self.segments()[0] & 0xff00) == 0xff00 - } - pub const fn to_ipv4_mapped(&self) -> Option { - match self.octets() { - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => { - Some(Ipv4Addr::new(a, b, c, d)) - } - _ => None, - } - } - pub const fn to_ipv4(&self) -> Option { - if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() { - let [a, b] = ab.to_be_bytes(); - let [c, d] = cd.to_be_bytes(); - Some(Ipv4Addr::new(a, b, c, d)) - } else { - None - } - } - pub const fn to_canonical(&self) -> IpAddr { - if let Some(mapped) = self.to_ipv4_mapped() { - return IpAddr::V4(mapped); - } - IpAddr::V6(*self) - } - pub const fn octets(&self) -> [u8; 16] { - self.inner.s6_addr - } -} -impl fmt::Display for Ipv6Addr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if f.precision().is_none() && f.width().is_none() { - let segments = self.segments(); - if self.is_unspecified() { - f.write_str("::") - } else if self.is_loopback() { - f.write_str("::1") - } else if let Some(ipv4) = self.to_ipv4() { - match segments[5] { - 0 => write!(f, "::{}", ipv4), - 0xffff => write!(f, "::ffff:{}", ipv4), - _ => unreachable!(), - } - } else { - #[derive(Copy, Clone, Default)] - struct Span { - start: usize, - len: usize, - } - let zeroes = { - let mut longest = Span::default(); - let mut current = Span::default(); - for (i, &segment) in segments.iter().enumerate() { - if segment == 0 { - if current.len == 0 { - current.start = i; - } - current.len += 1; - if current.len > longest.len { - longest = current; - } - } else { - current = Span::default(); - } - } - longest - }; - /// Write a colon-separated part of the address - #[inline] - fn fmt_subslice( - f: &mut fmt::Formatter<'_>, - chunk: &[u16], - ) -> fmt::Result { - if let Some((first, tail)) = chunk.split_first() { - write!(f, "{:x}", first)?; - for segment in tail { - f.write_char(':')?; - write!(f, "{:x}", segment)?; - } - } - Ok(()) - } - if zeroes.len > 1 { - fmt_subslice(f, &segments[..zeroes.start])?; - f.write_str("::")?; - fmt_subslice(f, &segments[zeroes.start + zeroes.len..]) - } else { - fmt_subslice(f, &segments) - } - } - } else { - const IPV6_BUF_LEN: usize = (4 * 8) + 7; - let mut buf = [0u8; IPV6_BUF_LEN]; - let mut buf_slice = &mut buf[..]; - write!(buf_slice, "{}", self).unwrap(); - let len = IPV6_BUF_LEN - buf_slice.len(); - let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; - f.pad(buf) - } - } -} -impl fmt::Debug for Ipv6Addr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} -impl Clone for Ipv6Addr { - fn clone(&self) -> Ipv6Addr { - *self - } -} -impl PartialEq for Ipv6Addr { - fn eq(&self, other: &Ipv6Addr) -> bool { - self.inner.s6_addr == other.inner.s6_addr - } -} -impl PartialEq for Ipv6Addr { - fn eq(&self, other: &IpAddr) -> bool { - match other { - IpAddr::V4(_) => false, - IpAddr::V6(v6) => self == v6, - } - } -} -impl PartialEq for IpAddr { - fn eq(&self, other: &Ipv6Addr) -> bool { - match self { - IpAddr::V4(_) => false, - IpAddr::V6(v6) => v6 == other, - } - } -} -impl Eq for Ipv6Addr {} -impl hash::Hash for Ipv6Addr { - fn hash(&self, s: &mut H) { - self.inner.s6_addr.hash(s) - } -} -impl PartialOrd for Ipv6Addr { - fn partial_cmp(&self, other: &Ipv6Addr) -> Option { - Some(self.cmp(other)) - } -} -impl PartialOrd for IpAddr { - fn partial_cmp(&self, other: &Ipv6Addr) -> Option { - match self { - IpAddr::V4(_) => Some(Ordering::Less), - IpAddr::V6(v6) => v6.partial_cmp(other), - } - } -} -impl PartialOrd for Ipv6Addr { - fn partial_cmp(&self, other: &IpAddr) -> Option { - match other { - IpAddr::V4(_) => Some(Ordering::Greater), - IpAddr::V6(v6) => self.partial_cmp(v6), - } - } -} -impl Ord for Ipv6Addr { - fn cmp(&self, other: &Ipv6Addr) -> Ordering { - self.segments().cmp(&other.segments()) - } -} -impl AsInner for Ipv6Addr { - fn as_inner(&self) -> &c::in6_addr { - &self.inner - } -} -impl FromInner for Ipv6Addr { - fn from_inner(addr: c::in6_addr) -> Ipv6Addr { - Ipv6Addr { inner: addr } - } -} -impl From for u128 { - fn from(ip: Ipv6Addr) -> u128 { - let ip = ip.octets(); - u128::from_be_bytes(ip) - } -} -impl From for Ipv6Addr { - fn from(ip: u128) -> Ipv6Addr { - Ipv6Addr::from(ip.to_be_bytes()) - } -} -impl From<[u8; 16]> for Ipv6Addr { - fn from(octets: [u8; 16]) -> Ipv6Addr { - let inner = c::in6_addr { s6_addr: octets }; - Ipv6Addr::from_inner(inner) - } -} -impl From<[u16; 8]> for Ipv6Addr { - fn from(segments: [u16; 8]) -> Ipv6Addr { - let [a, b, c, d, e, f, g, h] = segments; - Ipv6Addr::new(a, b, c, d, e, f, g, h) - } -} -impl From<[u8; 16]> for IpAddr { - fn from(octets: [u8; 16]) -> IpAddr { - IpAddr::V6(Ipv6Addr::from(octets)) - } -} -impl From<[u16; 8]> for IpAddr { - fn from(segments: [u16; 8]) -> IpAddr { - IpAddr::V6(Ipv6Addr::from(segments)) - } -} diff --git a/vendor/prettyplease/examples/output.rustc.rs b/vendor/prettyplease/examples/output.rustc.rs deleted file mode 100644 index a77a14a8ec077c..00000000000000 --- a/vendor/prettyplease/examples/output.rustc.rs +++ /dev/null @@ -1,506 +0,0 @@ -use crate::cmp::Ordering;use crate::fmt::{self, Write as FmtWrite}; -use crate::hash; -use crate::io::Write as IoWrite; -use crate::mem::transmute; -use crate::sys::net::netc as c; -use crate::sys_common::{AsInner, FromInner, IntoInner}; -#[derive(Copy, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)] -pub enum IpAddr { V4(Ipv4Addr), V6(Ipv6Addr), } -#[derive(Copy)] -pub struct Ipv4Addr { - inner: c::in_addr, -} -#[derive(Copy)] -pub struct Ipv6Addr { - inner: c::in6_addr, -} -#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)] -#[non_exhaustive] -pub enum Ipv6MulticastScope { - InterfaceLocal, - LinkLocal, - RealmLocal, - AdminLocal, - SiteLocal, - OrganizationLocal, - Global, -} -impl IpAddr { - pub const fn is_unspecified(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_unspecified(), - IpAddr::V6(ip) => ip.is_unspecified(), - } - } - pub const fn is_loopback(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_loopback(), - IpAddr::V6(ip) => ip.is_loopback(), - } - } - pub const fn is_global(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_global(), - IpAddr::V6(ip) => ip.is_global(), - } - } - pub const fn is_multicast(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_multicast(), - IpAddr::V6(ip) => ip.is_multicast(), - } - } - pub const fn is_documentation(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_documentation(), - IpAddr::V6(ip) => ip.is_documentation(), - } - } - pub const fn is_benchmarking(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_benchmarking(), - IpAddr::V6(ip) => ip.is_benchmarking(), - } - } - pub const fn is_ipv4(&self) -> bool { matches!(self, IpAddr :: V4(_)) } - pub const fn is_ipv6(&self) -> bool { matches!(self, IpAddr :: V6(_)) } - pub const fn to_canonical(&self) -> IpAddr { - match self { - &v4 @ IpAddr::V4(_) => v4, - IpAddr::V6(v6) => v6.to_canonical(), - } - } -} -impl Ipv4Addr { - pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr { - Ipv4Addr { - inner: c::in_addr { s_addr: u32::from_ne_bytes([a, b, c, d]) }, - } - } - pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1); - #[doc(alias = "INADDR_ANY")] - pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0); - pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255); - pub const fn octets(&self) -> [u8; 4] { self.inner.s_addr.to_ne_bytes() } - pub const fn is_unspecified(&self) -> bool { self.inner.s_addr == 0 } - pub const fn is_loopback(&self) -> bool { self.octets()[0] == 127 } - pub const fn is_private(&self) -> bool { - match self.octets() { - [10, ..] => true, - [172, b, ..] if b >= 16 && b <= 31 => true, - [192, 168, ..] => true, - _ => false, - } - } - pub const fn is_link_local(&self) -> bool { - matches!(self.octets(), [169, 254, ..]) - } - pub const fn is_global(&self) -> bool { - if u32::from_be_bytes(self.octets()) == 0xc0000009 || - u32::from_be_bytes(self.octets()) == 0xc000000a { - return true; - } - !self.is_private() && !self.is_loopback() && !self.is_link_local() && - !self.is_broadcast() && !self.is_documentation() && - !self.is_shared() && - !(self.octets()[0] == 192 && self.octets()[1] == 0 && - self.octets()[2] == 0) && !self.is_reserved() && - !self.is_benchmarking() && self.octets()[0] != 0 - } - pub const fn is_shared(&self) -> bool { - self.octets()[0] == 100 && - (self.octets()[1] & 0b1100_0000 == 0b0100_0000) - } - pub const fn is_benchmarking(&self) -> bool { - self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18 - } - pub const fn is_reserved(&self) -> bool { - self.octets()[0] & 240 == 240 && !self.is_broadcast() - } - pub const fn is_multicast(&self) -> bool { - self.octets()[0] >= 224 && self.octets()[0] <= 239 - } - pub const fn is_broadcast(&self) -> bool { - u32::from_be_bytes(self.octets()) == - u32::from_be_bytes(Self::BROADCAST.octets()) - } - pub const fn is_documentation(&self) -> bool { - matches!(self.octets(), [192, 0, 2, _] | [198, 51, 100, _] | - [203, 0, 113, _]) - } - pub const fn to_ipv6_compatible(&self) -> Ipv6Addr { - let [a, b, c, d] = self.octets(); - Ipv6Addr { - inner: c::in6_addr { - s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, b, c, d], - }, - } - } - pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { - let [a, b, c, d] = self.octets(); - Ipv6Addr { - inner: c::in6_addr { - s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, a, b, c, - d], - }, - } - } -} -impl fmt::Display for IpAddr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - IpAddr::V4(ip) => ip.fmt(fmt), - IpAddr::V6(ip) => ip.fmt(fmt), - } - } -} -impl fmt::Debug for IpAddr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} -impl From for IpAddr { - fn from(ipv4: Ipv4Addr) -> IpAddr { IpAddr::V4(ipv4) } -} -impl From for IpAddr { - fn from(ipv6: Ipv6Addr) -> IpAddr { IpAddr::V6(ipv6) } -} -impl fmt::Display for Ipv4Addr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let octets = self.octets(); - if fmt.precision().is_none() && fmt.width().is_none() { - write!(fmt, "{}.{}.{}.{}", octets [0], octets [1], octets [2], - octets [3]) - } else { - const IPV4_BUF_LEN: usize = 15; - let mut buf = [0u8; IPV4_BUF_LEN]; - let mut buf_slice = &mut buf[..]; - write!(buf_slice, "{}.{}.{}.{}", octets [0], octets [1], octets - [2], octets [3]).unwrap(); - let len = IPV4_BUF_LEN - buf_slice.len(); - let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; - fmt.pad(buf) - } - } -} -impl fmt::Debug for Ipv4Addr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} -impl Clone for Ipv4Addr { - fn clone(&self) -> Ipv4Addr { *self } -} -impl PartialEq for Ipv4Addr { - fn eq(&self, other: &Ipv4Addr) -> bool { - self.inner.s_addr == other.inner.s_addr - } -} -impl PartialEq for IpAddr { - fn eq(&self, other: &Ipv4Addr) -> bool { - match self { IpAddr::V4(v4) => v4 == other, IpAddr::V6(_) => false, } - } -} -impl PartialEq for Ipv4Addr { - fn eq(&self, other: &IpAddr) -> bool { - match other { IpAddr::V4(v4) => self == v4, IpAddr::V6(_) => false, } - } -} -impl Eq for Ipv4Addr {} -impl hash::Hash for Ipv4Addr { - fn hash(&self, s: &mut H) { - { self.inner.s_addr }.hash(s) - } -} -impl PartialOrd for Ipv4Addr { - fn partial_cmp(&self, other: &Ipv4Addr) -> Option { - Some(self.cmp(other)) - } -} -impl PartialOrd for IpAddr { - fn partial_cmp(&self, other: &Ipv4Addr) -> Option { - match self { - IpAddr::V4(v4) => v4.partial_cmp(other), - IpAddr::V6(_) => Some(Ordering::Greater), - } - } -} -impl PartialOrd for Ipv4Addr { - fn partial_cmp(&self, other: &IpAddr) -> Option { - match other { - IpAddr::V4(v4) => self.partial_cmp(v4), - IpAddr::V6(_) => Some(Ordering::Less), - } - } -} -impl Ord for Ipv4Addr { - fn cmp(&self, other: &Ipv4Addr) -> Ordering { - u32::from_be(self.inner.s_addr).cmp(&u32::from_be(other.inner.s_addr)) - } -} -impl IntoInner for Ipv4Addr { - fn into_inner(self) -> c::in_addr { self.inner } -} -impl From for u32 { - fn from(ip: Ipv4Addr) -> u32 { - let ip = ip.octets(); - u32::from_be_bytes(ip) - } -} -impl From for Ipv4Addr { - fn from(ip: u32) -> Ipv4Addr { Ipv4Addr::from(ip.to_be_bytes()) } -} -impl From<[u8; 4]> for Ipv4Addr { - fn from(octets: [u8; 4]) -> Ipv4Addr { - Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3]) - } -} -impl From<[u8; 4]> for IpAddr { - fn from(octets: [u8; 4]) -> IpAddr { IpAddr::V4(Ipv4Addr::from(octets)) } -} -impl Ipv6Addr { - pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, - h: u16) -> Ipv6Addr { - let addr16 = - [a.to_be(), b.to_be(), c.to_be(), d.to_be(), e.to_be(), f.to_be(), - g.to_be(), h.to_be()]; - Ipv6Addr { - inner: c::in6_addr { - s6_addr: unsafe { transmute::<_, [u8; 16]>(addr16) }, - }, - } - } - pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); - pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0); - pub const fn segments(&self) -> [u16; 8] { - let [a, b, c, d, e, f, g, h] = - unsafe { transmute::<_, [u16; 8]>(self.inner.s6_addr) }; - [u16::from_be(a), u16::from_be(b), u16::from_be(c), u16::from_be(d), - u16::from_be(e), u16::from_be(f), u16::from_be(g), - u16::from_be(h)] - } - pub const fn is_unspecified(&self) -> bool { - u128::from_be_bytes(self.octets()) == - u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets()) - } - pub const fn is_loopback(&self) -> bool { - u128::from_be_bytes(self.octets()) == - u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets()) - } - pub const fn is_global(&self) -> bool { - match self.multicast_scope() { - Some(Ipv6MulticastScope::Global) => true, - None => self.is_unicast_global(), - _ => false, - } - } - pub const fn is_unique_local(&self) -> bool { - (self.segments()[0] & 0xfe00) == 0xfc00 - } - pub const fn is_unicast(&self) -> bool { !self.is_multicast() } - pub const fn is_unicast_link_local(&self) -> bool { - (self.segments()[0] & 0xffc0) == 0xfe80 - } - pub const fn is_documentation(&self) -> bool { - (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8) - } - pub const fn is_benchmarking(&self) -> bool { - (self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) && - (self.segments()[2] == 0) - } - pub const fn is_unicast_global(&self) -> bool { - self.is_unicast() && !self.is_loopback() && - !self.is_unicast_link_local() && !self.is_unique_local() && - !self.is_unspecified() && !self.is_documentation() - } - pub const fn multicast_scope(&self) -> Option { - if self.is_multicast() { - match self.segments()[0] & 0x000f { - 1 => Some(Ipv6MulticastScope::InterfaceLocal), - 2 => Some(Ipv6MulticastScope::LinkLocal), - 3 => Some(Ipv6MulticastScope::RealmLocal), - 4 => Some(Ipv6MulticastScope::AdminLocal), - 5 => Some(Ipv6MulticastScope::SiteLocal), - 8 => Some(Ipv6MulticastScope::OrganizationLocal), - 14 => Some(Ipv6MulticastScope::Global), - _ => None, - } - } else { None } - } - pub const fn is_multicast(&self) -> bool { - (self.segments()[0] & 0xff00) == 0xff00 - } - pub const fn to_ipv4_mapped(&self) -> Option { - match self.octets() { - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => { - Some(Ipv4Addr::new(a, b, c, d)) - } - _ => None, - } - } - pub const fn to_ipv4(&self) -> Option { - if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() { - let [a, b] = ab.to_be_bytes(); - let [c, d] = cd.to_be_bytes(); - Some(Ipv4Addr::new(a, b, c, d)) - } else { None } - } - pub const fn to_canonical(&self) -> IpAddr { - if let Some(mapped) = self.to_ipv4_mapped() { - return IpAddr::V4(mapped); - } - IpAddr::V6(*self) - } - pub const fn octets(&self) -> [u8; 16] { self.inner.s6_addr } -} -impl fmt::Display for Ipv6Addr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if f.precision().is_none() && f.width().is_none() { - let segments = self.segments(); - if self.is_unspecified() { - f.write_str("::") - } else if self.is_loopback() { - f.write_str("::1") - } else if let Some(ipv4) = self.to_ipv4() { - match segments[5] { - 0 => write!(f, "::{}", ipv4), - 0xffff => write!(f, "::ffff:{}", ipv4), - _ => unreachable!(), - } - } else { - #[derive(Copy, Clone, Default)] - struct Span { - start: usize, - len: usize, - } - let zeroes = - { - let mut longest = Span::default(); - let mut current = Span::default(); - for (i, &segment) in segments.iter().enumerate() { - if segment == 0 { - if current.len == 0 { current.start = i; } - current.len += 1; - if current.len > longest.len { longest = current; } - } else { current = Span::default(); } - } - longest - }; - #[doc = " Write a colon-separated part of the address"] - #[inline] - fn fmt_subslice(f: &mut fmt::Formatter<'_>, chunk: &[u16]) - -> fmt::Result { - if let Some((first, tail)) = chunk.split_first() { - write!(f, "{:x}", first)?; - for segment in tail { - f.write_char(':')?; - write!(f, "{:x}", segment)?; - } - } - Ok(()) - } - if zeroes.len > 1 { - fmt_subslice(f, &segments[..zeroes.start])?; - f.write_str("::")?; - fmt_subslice(f, &segments[zeroes.start + zeroes.len..]) - } else { fmt_subslice(f, &segments) } - } - } else { - const IPV6_BUF_LEN: usize = (4 * 8) + 7; - let mut buf = [0u8; IPV6_BUF_LEN]; - let mut buf_slice = &mut buf[..]; - write!(buf_slice, "{}", self).unwrap(); - let len = IPV6_BUF_LEN - buf_slice.len(); - let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; - f.pad(buf) - } - } -} -impl fmt::Debug for Ipv6Addr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} -impl Clone for Ipv6Addr { - fn clone(&self) -> Ipv6Addr { *self } -} -impl PartialEq for Ipv6Addr { - fn eq(&self, other: &Ipv6Addr) -> bool { - self.inner.s6_addr == other.inner.s6_addr - } -} -impl PartialEq for Ipv6Addr { - fn eq(&self, other: &IpAddr) -> bool { - match other { IpAddr::V4(_) => false, IpAddr::V6(v6) => self == v6, } - } -} -impl PartialEq for IpAddr { - fn eq(&self, other: &Ipv6Addr) -> bool { - match self { IpAddr::V4(_) => false, IpAddr::V6(v6) => v6 == other, } - } -} -impl Eq for Ipv6Addr {} -impl hash::Hash for Ipv6Addr { - fn hash(&self, s: &mut H) { self.inner.s6_addr.hash(s) } -} -impl PartialOrd for Ipv6Addr { - fn partial_cmp(&self, other: &Ipv6Addr) -> Option { - Some(self.cmp(other)) - } -} -impl PartialOrd for IpAddr { - fn partial_cmp(&self, other: &Ipv6Addr) -> Option { - match self { - IpAddr::V4(_) => Some(Ordering::Less), - IpAddr::V6(v6) => v6.partial_cmp(other), - } - } -} -impl PartialOrd for Ipv6Addr { - fn partial_cmp(&self, other: &IpAddr) -> Option { - match other { - IpAddr::V4(_) => Some(Ordering::Greater), - IpAddr::V6(v6) => self.partial_cmp(v6), - } - } -} -impl Ord for Ipv6Addr { - fn cmp(&self, other: &Ipv6Addr) -> Ordering { - self.segments().cmp(&other.segments()) - } -} -impl AsInner for Ipv6Addr { - fn as_inner(&self) -> &c::in6_addr { &self.inner } -} -impl FromInner for Ipv6Addr { - fn from_inner(addr: c::in6_addr) -> Ipv6Addr { Ipv6Addr { inner: addr } } -} -impl From for u128 { - fn from(ip: Ipv6Addr) -> u128 { - let ip = ip.octets(); - u128::from_be_bytes(ip) - } -} -impl From for Ipv6Addr { - fn from(ip: u128) -> Ipv6Addr { Ipv6Addr::from(ip.to_be_bytes()) } -} -impl From<[u8; 16]> for Ipv6Addr { - fn from(octets: [u8; 16]) -> Ipv6Addr { - let inner = c::in6_addr { s6_addr: octets }; - Ipv6Addr::from_inner(inner) - } -} -impl From<[u16; 8]> for Ipv6Addr { - fn from(segments: [u16; 8]) -> Ipv6Addr { - let [a, b, c, d, e, f, g, h] = segments; - Ipv6Addr::new(a, b, c, d, e, f, g, h) - } -} -impl From<[u8; 16]> for IpAddr { - fn from(octets: [u8; 16]) -> IpAddr { IpAddr::V6(Ipv6Addr::from(octets)) } -} -impl From<[u16; 8]> for IpAddr { - fn from(segments: [u16; 8]) -> IpAddr { - IpAddr::V6(Ipv6Addr::from(segments)) - } -} diff --git a/vendor/prettyplease/examples/output.rustfmt.rs b/vendor/prettyplease/examples/output.rustfmt.rs deleted file mode 100644 index 3c7181d8efda6f..00000000000000 --- a/vendor/prettyplease/examples/output.rustfmt.rs +++ /dev/null @@ -1,552 +0,0 @@ -use crate::cmp::Ordering; -use crate::fmt::{self, Write as FmtWrite}; -use crate::hash; -use crate::io::Write as IoWrite; -use crate::mem::transmute; -use crate::sys::net::netc as c; -use crate::sys_common::{AsInner, FromInner, IntoInner}; -#[derive(Copy, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)] -pub enum IpAddr { - V4(Ipv4Addr), - V6(Ipv6Addr), -} -#[derive(Copy)] -pub struct Ipv4Addr { - inner: c::in_addr, -} -#[derive(Copy)] -pub struct Ipv6Addr { - inner: c::in6_addr, -} -#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)] -#[non_exhaustive] -pub enum Ipv6MulticastScope { - InterfaceLocal, - LinkLocal, - RealmLocal, - AdminLocal, - SiteLocal, - OrganizationLocal, - Global, -} -impl IpAddr { - pub const fn is_unspecified(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_unspecified(), - IpAddr::V6(ip) => ip.is_unspecified(), - } - } - pub const fn is_loopback(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_loopback(), - IpAddr::V6(ip) => ip.is_loopback(), - } - } - pub const fn is_global(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_global(), - IpAddr::V6(ip) => ip.is_global(), - } - } - pub const fn is_multicast(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_multicast(), - IpAddr::V6(ip) => ip.is_multicast(), - } - } - pub const fn is_documentation(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_documentation(), - IpAddr::V6(ip) => ip.is_documentation(), - } - } - pub const fn is_benchmarking(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_benchmarking(), - IpAddr::V6(ip) => ip.is_benchmarking(), - } - } - pub const fn is_ipv4(&self) -> bool { - matches!(self, IpAddr::V4(_)) - } - pub const fn is_ipv6(&self) -> bool { - matches!(self, IpAddr::V6(_)) - } - pub const fn to_canonical(&self) -> IpAddr { - match self { - &v4 @ IpAddr::V4(_) => v4, - IpAddr::V6(v6) => v6.to_canonical(), - } - } -} -impl Ipv4Addr { - pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr { - Ipv4Addr { - inner: c::in_addr { - s_addr: u32::from_ne_bytes([a, b, c, d]), - }, - } - } - pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1); - #[doc(alias = "INADDR_ANY")] - pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0); - pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255); - pub const fn octets(&self) -> [u8; 4] { - self.inner.s_addr.to_ne_bytes() - } - pub const fn is_unspecified(&self) -> bool { - self.inner.s_addr == 0 - } - pub const fn is_loopback(&self) -> bool { - self.octets()[0] == 127 - } - pub const fn is_private(&self) -> bool { - match self.octets() { - [10, ..] => true, - [172, b, ..] if b >= 16 && b <= 31 => true, - [192, 168, ..] => true, - _ => false, - } - } - pub const fn is_link_local(&self) -> bool { - matches!(self.octets(), [169, 254, ..]) - } - pub const fn is_global(&self) -> bool { - if u32::from_be_bytes(self.octets()) == 0xc0000009 - || u32::from_be_bytes(self.octets()) == 0xc000000a - { - return true; - } - !self.is_private() - && !self.is_loopback() - && !self.is_link_local() - && !self.is_broadcast() - && !self.is_documentation() - && !self.is_shared() - && !(self.octets()[0] == 192 && self.octets()[1] == 0 && self.octets()[2] == 0) - && !self.is_reserved() - && !self.is_benchmarking() - && self.octets()[0] != 0 - } - pub const fn is_shared(&self) -> bool { - self.octets()[0] == 100 && (self.octets()[1] & 0b1100_0000 == 0b0100_0000) - } - pub const fn is_benchmarking(&self) -> bool { - self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18 - } - pub const fn is_reserved(&self) -> bool { - self.octets()[0] & 240 == 240 && !self.is_broadcast() - } - pub const fn is_multicast(&self) -> bool { - self.octets()[0] >= 224 && self.octets()[0] <= 239 - } - pub const fn is_broadcast(&self) -> bool { - u32::from_be_bytes(self.octets()) == u32::from_be_bytes(Self::BROADCAST.octets()) - } - pub const fn is_documentation(&self) -> bool { - matches!( - self.octets(), - [192, 0, 2, _] | [198, 51, 100, _] | [203, 0, 113, _] - ) - } - pub const fn to_ipv6_compatible(&self) -> Ipv6Addr { - let [a, b, c, d] = self.octets(); - Ipv6Addr { - inner: c::in6_addr { - s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, b, c, d], - }, - } - } - pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { - let [a, b, c, d] = self.octets(); - Ipv6Addr { - inner: c::in6_addr { - s6_addr: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, a, b, c, d], - }, - } - } -} -impl fmt::Display for IpAddr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - IpAddr::V4(ip) => ip.fmt(fmt), - IpAddr::V6(ip) => ip.fmt(fmt), - } - } -} -impl fmt::Debug for IpAddr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} -impl From for IpAddr { - fn from(ipv4: Ipv4Addr) -> IpAddr { - IpAddr::V4(ipv4) - } -} -impl From for IpAddr { - fn from(ipv6: Ipv6Addr) -> IpAddr { - IpAddr::V6(ipv6) - } -} -impl fmt::Display for Ipv4Addr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let octets = self.octets(); - if fmt.precision().is_none() && fmt.width().is_none() { - write!( - fmt, - "{}.{}.{}.{}", - octets[0], octets[1], octets[2], octets[3] - ) - } else { - const IPV4_BUF_LEN: usize = 15; - let mut buf = [0u8; IPV4_BUF_LEN]; - let mut buf_slice = &mut buf[..]; - write!( - buf_slice, - "{}.{}.{}.{}", - octets[0], octets[1], octets[2], octets[3] - ) - .unwrap(); - let len = IPV4_BUF_LEN - buf_slice.len(); - let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; - fmt.pad(buf) - } - } -} -impl fmt::Debug for Ipv4Addr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} -impl Clone for Ipv4Addr { - fn clone(&self) -> Ipv4Addr { - *self - } -} -impl PartialEq for Ipv4Addr { - fn eq(&self, other: &Ipv4Addr) -> bool { - self.inner.s_addr == other.inner.s_addr - } -} -impl PartialEq for IpAddr { - fn eq(&self, other: &Ipv4Addr) -> bool { - match self { - IpAddr::V4(v4) => v4 == other, - IpAddr::V6(_) => false, - } - } -} -impl PartialEq for Ipv4Addr { - fn eq(&self, other: &IpAddr) -> bool { - match other { - IpAddr::V4(v4) => self == v4, - IpAddr::V6(_) => false, - } - } -} -impl Eq for Ipv4Addr {} -impl hash::Hash for Ipv4Addr { - fn hash(&self, s: &mut H) { - { self.inner.s_addr }.hash(s) - } -} -impl PartialOrd for Ipv4Addr { - fn partial_cmp(&self, other: &Ipv4Addr) -> Option { - Some(self.cmp(other)) - } -} -impl PartialOrd for IpAddr { - fn partial_cmp(&self, other: &Ipv4Addr) -> Option { - match self { - IpAddr::V4(v4) => v4.partial_cmp(other), - IpAddr::V6(_) => Some(Ordering::Greater), - } - } -} -impl PartialOrd for Ipv4Addr { - fn partial_cmp(&self, other: &IpAddr) -> Option { - match other { - IpAddr::V4(v4) => self.partial_cmp(v4), - IpAddr::V6(_) => Some(Ordering::Less), - } - } -} -impl Ord for Ipv4Addr { - fn cmp(&self, other: &Ipv4Addr) -> Ordering { - u32::from_be(self.inner.s_addr).cmp(&u32::from_be(other.inner.s_addr)) - } -} -impl IntoInner for Ipv4Addr { - fn into_inner(self) -> c::in_addr { - self.inner - } -} -impl From for u32 { - fn from(ip: Ipv4Addr) -> u32 { - let ip = ip.octets(); - u32::from_be_bytes(ip) - } -} -impl From for Ipv4Addr { - fn from(ip: u32) -> Ipv4Addr { - Ipv4Addr::from(ip.to_be_bytes()) - } -} -impl From<[u8; 4]> for Ipv4Addr { - fn from(octets: [u8; 4]) -> Ipv4Addr { - Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3]) - } -} -impl From<[u8; 4]> for IpAddr { - fn from(octets: [u8; 4]) -> IpAddr { - IpAddr::V4(Ipv4Addr::from(octets)) - } -} -impl Ipv6Addr { - pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Ipv6Addr { - let addr16 = [ - a.to_be(), - b.to_be(), - c.to_be(), - d.to_be(), - e.to_be(), - f.to_be(), - g.to_be(), - h.to_be(), - ]; - Ipv6Addr { - inner: c::in6_addr { - s6_addr: unsafe { transmute::<_, [u8; 16]>(addr16) }, - }, - } - } - pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); - pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0); - pub const fn segments(&self) -> [u16; 8] { - let [a, b, c, d, e, f, g, h] = unsafe { transmute::<_, [u16; 8]>(self.inner.s6_addr) }; - [ - u16::from_be(a), - u16::from_be(b), - u16::from_be(c), - u16::from_be(d), - u16::from_be(e), - u16::from_be(f), - u16::from_be(g), - u16::from_be(h), - ] - } - pub const fn is_unspecified(&self) -> bool { - u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets()) - } - pub const fn is_loopback(&self) -> bool { - u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets()) - } - pub const fn is_global(&self) -> bool { - match self.multicast_scope() { - Some(Ipv6MulticastScope::Global) => true, - None => self.is_unicast_global(), - _ => false, - } - } - pub const fn is_unique_local(&self) -> bool { - (self.segments()[0] & 0xfe00) == 0xfc00 - } - pub const fn is_unicast(&self) -> bool { - !self.is_multicast() - } - pub const fn is_unicast_link_local(&self) -> bool { - (self.segments()[0] & 0xffc0) == 0xfe80 - } - pub const fn is_documentation(&self) -> bool { - (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8) - } - pub const fn is_benchmarking(&self) -> bool { - (self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) && (self.segments()[2] == 0) - } - pub const fn is_unicast_global(&self) -> bool { - self.is_unicast() - && !self.is_loopback() - && !self.is_unicast_link_local() - && !self.is_unique_local() - && !self.is_unspecified() - && !self.is_documentation() - } - pub const fn multicast_scope(&self) -> Option { - if self.is_multicast() { - match self.segments()[0] & 0x000f { - 1 => Some(Ipv6MulticastScope::InterfaceLocal), - 2 => Some(Ipv6MulticastScope::LinkLocal), - 3 => Some(Ipv6MulticastScope::RealmLocal), - 4 => Some(Ipv6MulticastScope::AdminLocal), - 5 => Some(Ipv6MulticastScope::SiteLocal), - 8 => Some(Ipv6MulticastScope::OrganizationLocal), - 14 => Some(Ipv6MulticastScope::Global), - _ => None, - } - } else { - None - } - } - pub const fn is_multicast(&self) -> bool { - (self.segments()[0] & 0xff00) == 0xff00 - } - pub const fn to_ipv4_mapped(&self) -> Option { - match self.octets() { - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => { - Some(Ipv4Addr::new(a, b, c, d)) - } - _ => None, - } - } - pub const fn to_ipv4(&self) -> Option { - if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() { - let [a, b] = ab.to_be_bytes(); - let [c, d] = cd.to_be_bytes(); - Some(Ipv4Addr::new(a, b, c, d)) - } else { - None - } - } - pub const fn to_canonical(&self) -> IpAddr { - if let Some(mapped) = self.to_ipv4_mapped() { - return IpAddr::V4(mapped); - } - IpAddr::V6(*self) - } - pub const fn octets(&self) -> [u8; 16] { - self.inner.s6_addr - } -} -impl fmt::Display for Ipv6Addr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if f.precision().is_none() && f.width().is_none() { - let segments = self.segments(); - if self.is_unspecified() { - f.write_str("::") - } else if self.is_loopback() { - f.write_str("::1") - } else if let Some(ipv4) = self.to_ipv4() { - match segments[5] { - 0 => write!(f, "::{}", ipv4), - 0xffff => write!(f, "::ffff:{}", ipv4), - _ => unreachable!(), - } - } else { # [derive (Copy , Clone , Default)] struct Span { start : usize , len : usize , } let zeroes = { let mut longest = Span :: default () ; let mut current = Span :: default () ; for (i , & segment) in segments . iter () . enumerate () { if segment == 0 { if current . len == 0 { current . start = i ; } current . len += 1 ; if current . len > longest . len { longest = current ; } } else { current = Span :: default () ; } } longest } ; # [doc = " Write a colon-separated part of the address"] # [inline] fn fmt_subslice (f : & mut fmt :: Formatter < '_ > , chunk : & [u16]) -> fmt :: Result { if let Some ((first , tail)) = chunk . split_first () { write ! (f , "{:x}" , first) ? ; for segment in tail { f . write_char (':') ? ; write ! (f , "{:x}" , segment) ? ; } } Ok (()) } if zeroes . len > 1 { fmt_subslice (f , & segments [.. zeroes . start]) ? ; f . write_str ("::") ? ; fmt_subslice (f , & segments [zeroes . start + zeroes . len ..]) } else { fmt_subslice (f , & segments) } } - } else { - const IPV6_BUF_LEN: usize = (4 * 8) + 7; - let mut buf = [0u8; IPV6_BUF_LEN]; - let mut buf_slice = &mut buf[..]; - write!(buf_slice, "{}", self).unwrap(); - let len = IPV6_BUF_LEN - buf_slice.len(); - let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) }; - f.pad(buf) - } - } -} -impl fmt::Debug for Ipv6Addr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} -impl Clone for Ipv6Addr { - fn clone(&self) -> Ipv6Addr { - *self - } -} -impl PartialEq for Ipv6Addr { - fn eq(&self, other: &Ipv6Addr) -> bool { - self.inner.s6_addr == other.inner.s6_addr - } -} -impl PartialEq for Ipv6Addr { - fn eq(&self, other: &IpAddr) -> bool { - match other { - IpAddr::V4(_) => false, - IpAddr::V6(v6) => self == v6, - } - } -} -impl PartialEq for IpAddr { - fn eq(&self, other: &Ipv6Addr) -> bool { - match self { - IpAddr::V4(_) => false, - IpAddr::V6(v6) => v6 == other, - } - } -} -impl Eq for Ipv6Addr {} -impl hash::Hash for Ipv6Addr { - fn hash(&self, s: &mut H) { - self.inner.s6_addr.hash(s) - } -} -impl PartialOrd for Ipv6Addr { - fn partial_cmp(&self, other: &Ipv6Addr) -> Option { - Some(self.cmp(other)) - } -} -impl PartialOrd for IpAddr { - fn partial_cmp(&self, other: &Ipv6Addr) -> Option { - match self { - IpAddr::V4(_) => Some(Ordering::Less), - IpAddr::V6(v6) => v6.partial_cmp(other), - } - } -} -impl PartialOrd for Ipv6Addr { - fn partial_cmp(&self, other: &IpAddr) -> Option { - match other { - IpAddr::V4(_) => Some(Ordering::Greater), - IpAddr::V6(v6) => self.partial_cmp(v6), - } - } -} -impl Ord for Ipv6Addr { - fn cmp(&self, other: &Ipv6Addr) -> Ordering { - self.segments().cmp(&other.segments()) - } -} -impl AsInner for Ipv6Addr { - fn as_inner(&self) -> &c::in6_addr { - &self.inner - } -} -impl FromInner for Ipv6Addr { - fn from_inner(addr: c::in6_addr) -> Ipv6Addr { - Ipv6Addr { inner: addr } - } -} -impl From for u128 { - fn from(ip: Ipv6Addr) -> u128 { - let ip = ip.octets(); - u128::from_be_bytes(ip) - } -} -impl From for Ipv6Addr { - fn from(ip: u128) -> Ipv6Addr { - Ipv6Addr::from(ip.to_be_bytes()) - } -} -impl From<[u8; 16]> for Ipv6Addr { - fn from(octets: [u8; 16]) -> Ipv6Addr { - let inner = c::in6_addr { s6_addr: octets }; - Ipv6Addr::from_inner(inner) - } -} -impl From<[u16; 8]> for Ipv6Addr { - fn from(segments: [u16; 8]) -> Ipv6Addr { - let [a, b, c, d, e, f, g, h] = segments; - Ipv6Addr::new(a, b, c, d, e, f, g, h) - } -} -impl From<[u8; 16]> for IpAddr { - fn from(octets: [u8; 16]) -> IpAddr { - IpAddr::V6(Ipv6Addr::from(octets)) - } -} -impl From<[u16; 8]> for IpAddr { - fn from(segments: [u16; 8]) -> IpAddr { - IpAddr::V6(Ipv6Addr::from(segments)) - } -} diff --git a/vendor/prettyplease/src/algorithm.rs b/vendor/prettyplease/src/algorithm.rs deleted file mode 100644 index 482b3ad7d1e21c..00000000000000 --- a/vendor/prettyplease/src/algorithm.rs +++ /dev/null @@ -1,386 +0,0 @@ -// Adapted from https://github.com/rust-lang/rust/blob/1.57.0/compiler/rustc_ast_pretty/src/pp.rs. -// See "Algorithm notes" in the crate-level rustdoc. - -use crate::ring::RingBuffer; -use crate::{MARGIN, MIN_SPACE}; -use std::borrow::Cow; -use std::cmp; -use std::collections::VecDeque; -use std::iter; - -#[derive(Clone, Copy, PartialEq)] -pub enum Breaks { - Consistent, - Inconsistent, -} - -#[derive(Clone, Copy, Default)] -pub struct BreakToken { - pub offset: isize, - pub blank_space: usize, - pub pre_break: Option, - pub post_break: &'static str, - pub no_break: Option, - pub if_nonempty: bool, - pub never_break: bool, -} - -#[derive(Clone, Copy)] -pub struct BeginToken { - pub offset: isize, - pub breaks: Breaks, -} - -#[derive(Clone)] -pub enum Token { - String(Cow<'static, str>), - Break(BreakToken), - Begin(BeginToken), - End, -} - -#[derive(Copy, Clone)] -enum PrintFrame { - Fits(Breaks), - Broken(usize, Breaks), -} - -pub const SIZE_INFINITY: isize = 0xffff; - -pub struct Printer { - out: String, - // Number of spaces left on line - space: isize, - // Ring-buffer of tokens and calculated sizes - buf: RingBuffer, - // Total size of tokens already printed - left_total: isize, - // Total size of tokens enqueued, including printed and not yet printed - right_total: isize, - // Holds the ring-buffer index of the Begin that started the current block, - // possibly with the most recent Break after that Begin (if there is any) on - // top of it. Values are pushed and popped on the back of the queue using it - // like stack, and elsewhere old values are popped from the front of the - // queue as they become irrelevant due to the primary ring-buffer advancing. - scan_stack: VecDeque, - // Stack of blocks-in-progress being flushed by print - print_stack: Vec, - // Level of indentation of current line - indent: usize, - // Buffered indentation to avoid writing trailing whitespace - pending_indentation: usize, -} - -#[derive(Clone)] -struct BufEntry { - token: Token, - size: isize, -} - -impl Printer { - pub fn new() -> Self { - Printer { - out: String::new(), - space: MARGIN, - buf: RingBuffer::new(), - left_total: 0, - right_total: 0, - scan_stack: VecDeque::new(), - print_stack: Vec::new(), - indent: 0, - pending_indentation: 0, - } - } - - pub fn eof(mut self) -> String { - if !self.scan_stack.is_empty() { - self.check_stack(0); - self.advance_left(); - } - self.out - } - - pub fn scan_begin(&mut self, token: BeginToken) { - if self.scan_stack.is_empty() { - self.left_total = 1; - self.right_total = 1; - self.buf.clear(); - } - let right = self.buf.push(BufEntry { - token: Token::Begin(token), - size: -self.right_total, - }); - self.scan_stack.push_back(right); - } - - pub fn scan_end(&mut self) { - if self.scan_stack.is_empty() { - self.print_end(); - } else { - if !self.buf.is_empty() { - if let Token::Break(break_token) = self.buf.last().token { - if self.buf.len() >= 2 { - if let Token::Begin(_) = self.buf.second_last().token { - self.buf.pop_last(); - self.buf.pop_last(); - self.scan_stack.pop_back(); - self.scan_stack.pop_back(); - self.right_total -= break_token.blank_space as isize; - return; - } - } - if break_token.if_nonempty { - self.buf.pop_last(); - self.scan_stack.pop_back(); - self.right_total -= break_token.blank_space as isize; - } - } - } - let right = self.buf.push(BufEntry { - token: Token::End, - size: -1, - }); - self.scan_stack.push_back(right); - } - } - - pub fn scan_break(&mut self, token: BreakToken) { - if self.scan_stack.is_empty() { - self.left_total = 1; - self.right_total = 1; - self.buf.clear(); - } else { - self.check_stack(0); - } - let right = self.buf.push(BufEntry { - token: Token::Break(token), - size: -self.right_total, - }); - self.scan_stack.push_back(right); - self.right_total += token.blank_space as isize; - } - - pub fn scan_string(&mut self, string: Cow<'static, str>) { - if self.scan_stack.is_empty() { - self.print_string(string); - } else { - let len = string.len() as isize; - self.buf.push(BufEntry { - token: Token::String(string), - size: len, - }); - self.right_total += len; - self.check_stream(); - } - } - - #[track_caller] - pub fn offset(&mut self, offset: isize) { - match &mut self.buf.last_mut().token { - Token::Break(token) => token.offset += offset, - Token::Begin(_) => {} - Token::String(_) | Token::End => unreachable!(), - } - } - - pub fn end_with_max_width(&mut self, max: isize) { - let mut depth = 1; - for &index in self.scan_stack.iter().rev() { - let entry = &self.buf[index]; - match entry.token { - Token::Begin(_) => { - depth -= 1; - if depth == 0 { - if entry.size < 0 { - let actual_width = entry.size + self.right_total; - if actual_width > max { - self.buf.push(BufEntry { - token: Token::String(Cow::Borrowed("")), - size: SIZE_INFINITY, - }); - self.right_total += SIZE_INFINITY; - } - } - break; - } - } - Token::End => depth += 1, - Token::Break(_) => {} - Token::String(_) => unreachable!(), - } - } - self.scan_end(); - } - - pub fn ends_with(&self, ch: char) -> bool { - for i in self.buf.index_range().rev() { - if let Token::String(token) = &self.buf[i].token { - return token.ends_with(ch); - } - } - self.out.ends_with(ch) - } - - fn check_stream(&mut self) { - while self.right_total - self.left_total > self.space { - if *self.scan_stack.front().unwrap() == self.buf.index_range().start { - self.scan_stack.pop_front().unwrap(); - self.buf.first_mut().size = SIZE_INFINITY; - } - - self.advance_left(); - - if self.buf.is_empty() { - break; - } - } - } - - fn advance_left(&mut self) { - while self.buf.first().size >= 0 { - let left = self.buf.pop_first(); - - match left.token { - Token::String(string) => { - self.left_total += left.size; - self.print_string(string); - } - Token::Break(token) => { - self.left_total += token.blank_space as isize; - self.print_break(token, left.size); - } - Token::Begin(token) => self.print_begin(token, left.size), - Token::End => self.print_end(), - } - - if self.buf.is_empty() { - break; - } - } - } - - fn check_stack(&mut self, mut depth: usize) { - while let Some(&index) = self.scan_stack.back() { - let entry = &mut self.buf[index]; - match entry.token { - Token::Begin(_) => { - if depth == 0 { - break; - } - self.scan_stack.pop_back().unwrap(); - entry.size += self.right_total; - depth -= 1; - } - Token::End => { - self.scan_stack.pop_back().unwrap(); - entry.size = 1; - depth += 1; - } - Token::Break(_) => { - self.scan_stack.pop_back().unwrap(); - entry.size += self.right_total; - if depth == 0 { - break; - } - } - Token::String(_) => unreachable!(), - } - } - } - - fn get_top(&self) -> PrintFrame { - const OUTER: PrintFrame = PrintFrame::Broken(0, Breaks::Inconsistent); - self.print_stack.last().map_or(OUTER, PrintFrame::clone) - } - - fn print_begin(&mut self, token: BeginToken, size: isize) { - if cfg!(prettyplease_debug) { - self.out.push(match token.breaks { - Breaks::Consistent => '«', - Breaks::Inconsistent => '‹', - }); - if cfg!(prettyplease_debug_indent) { - self.out - .extend(token.offset.to_string().chars().map(|ch| match ch { - '0'..='9' => ['₀', '₁', '₂', '₃', '₄', '₅', '₆', '₇', '₈', '₉'] - [(ch as u8 - b'0') as usize], - '-' => '₋', - _ => unreachable!(), - })); - } - } - if size > self.space { - self.print_stack - .push(PrintFrame::Broken(self.indent, token.breaks)); - self.indent = usize::try_from(self.indent as isize + token.offset).unwrap(); - } else { - self.print_stack.push(PrintFrame::Fits(token.breaks)); - } - } - - fn print_end(&mut self) { - let breaks = match self.print_stack.pop().unwrap() { - PrintFrame::Broken(indent, breaks) => { - self.indent = indent; - breaks - } - PrintFrame::Fits(breaks) => breaks, - }; - if cfg!(prettyplease_debug) { - self.out.push(match breaks { - Breaks::Consistent => '»', - Breaks::Inconsistent => '›', - }); - } - } - - fn print_break(&mut self, token: BreakToken, size: isize) { - let fits = token.never_break - || match self.get_top() { - PrintFrame::Fits(..) => true, - PrintFrame::Broken(.., Breaks::Consistent) => false, - PrintFrame::Broken(.., Breaks::Inconsistent) => size <= self.space, - }; - if fits { - self.pending_indentation += token.blank_space; - self.space -= token.blank_space as isize; - if let Some(no_break) = token.no_break { - self.out.push(no_break); - self.space -= no_break.len_utf8() as isize; - } - if cfg!(prettyplease_debug) { - self.out.push('·'); - } - } else { - if let Some(pre_break) = token.pre_break { - self.print_indent(); - self.out.push(pre_break); - } - if cfg!(prettyplease_debug) { - self.out.push('·'); - } - self.out.push('\n'); - let indent = self.indent as isize + token.offset; - self.pending_indentation = usize::try_from(indent).unwrap(); - self.space = cmp::max(MARGIN - indent, MIN_SPACE); - if !token.post_break.is_empty() { - self.print_indent(); - self.out.push_str(token.post_break); - self.space -= token.post_break.len() as isize; - } - } - } - - fn print_string(&mut self, string: Cow<'static, str>) { - self.print_indent(); - self.out.push_str(&string); - self.space -= string.len() as isize; - } - - fn print_indent(&mut self) { - self.out.reserve(self.pending_indentation); - self.out - .extend(iter::repeat(' ').take(self.pending_indentation)); - self.pending_indentation = 0; - } -} diff --git a/vendor/prettyplease/src/attr.rs b/vendor/prettyplease/src/attr.rs deleted file mode 100644 index b436283f3c6969..00000000000000 --- a/vendor/prettyplease/src/attr.rs +++ /dev/null @@ -1,288 +0,0 @@ -use crate::algorithm::Printer; -use crate::fixup::FixupContext; -use crate::path::PathKind; -use crate::INDENT; -use proc_macro2::{Delimiter, Group, TokenStream, TokenTree}; -use syn::{AttrStyle, Attribute, Expr, Lit, MacroDelimiter, Meta, MetaList, MetaNameValue}; - -impl Printer { - pub fn outer_attrs(&mut self, attrs: &[Attribute]) { - for attr in attrs { - if let AttrStyle::Outer = attr.style { - self.attr(attr); - } - } - } - - pub fn inner_attrs(&mut self, attrs: &[Attribute]) { - for attr in attrs { - if let AttrStyle::Inner(_) = attr.style { - self.attr(attr); - } - } - } - - fn attr(&mut self, attr: &Attribute) { - if let Some(mut doc) = value_of_attribute("doc", attr) { - if !doc.contains('\n') - && match attr.style { - AttrStyle::Outer => !doc.starts_with('/'), - AttrStyle::Inner(_) => true, - } - { - trim_trailing_spaces(&mut doc); - self.word(match attr.style { - AttrStyle::Outer => "///", - AttrStyle::Inner(_) => "//!", - }); - self.word(doc); - self.hardbreak(); - return; - } else if can_be_block_comment(&doc) - && match attr.style { - AttrStyle::Outer => !doc.starts_with(&['*', '/'][..]), - AttrStyle::Inner(_) => true, - } - { - trim_interior_trailing_spaces(&mut doc); - self.word(match attr.style { - AttrStyle::Outer => "/**", - AttrStyle::Inner(_) => "/*!", - }); - self.word(doc); - self.word("*/"); - self.hardbreak(); - return; - } - } else if let Some(mut comment) = value_of_attribute("comment", attr) { - if !comment.contains('\n') { - trim_trailing_spaces(&mut comment); - self.word("//"); - self.word(comment); - self.hardbreak(); - return; - } else if can_be_block_comment(&comment) && !comment.starts_with(&['*', '!'][..]) { - trim_interior_trailing_spaces(&mut comment); - self.word("/*"); - self.word(comment); - self.word("*/"); - self.hardbreak(); - return; - } - } - - self.word(match attr.style { - AttrStyle::Outer => "#", - AttrStyle::Inner(_) => "#!", - }); - self.word("["); - self.meta(&attr.meta); - self.word("]"); - self.space(); - } - - fn meta(&mut self, meta: &Meta) { - match meta { - Meta::Path(path) => self.path(path, PathKind::Simple), - Meta::List(meta) => self.meta_list(meta), - Meta::NameValue(meta) => self.meta_name_value(meta), - } - } - - fn meta_list(&mut self, meta: &MetaList) { - self.path(&meta.path, PathKind::Simple); - let delimiter = match meta.delimiter { - MacroDelimiter::Paren(_) => Delimiter::Parenthesis, - MacroDelimiter::Brace(_) => Delimiter::Brace, - MacroDelimiter::Bracket(_) => Delimiter::Bracket, - }; - let group = Group::new(delimiter, meta.tokens.clone()); - self.attr_tokens(TokenStream::from(TokenTree::Group(group))); - } - - fn meta_name_value(&mut self, meta: &MetaNameValue) { - self.path(&meta.path, PathKind::Simple); - self.word(" = "); - self.expr(&meta.value, FixupContext::NONE); - } - - fn attr_tokens(&mut self, tokens: TokenStream) { - let mut stack = Vec::new(); - stack.push((tokens.into_iter().peekable(), Delimiter::None)); - let mut space = Self::nbsp as fn(&mut Self); - - #[derive(PartialEq)] - enum State { - Word, - Punct, - TrailingComma, - } - - use State::*; - let mut state = Word; - - while let Some((tokens, delimiter)) = stack.last_mut() { - match tokens.next() { - Some(TokenTree::Ident(ident)) => { - if let Word = state { - space(self); - } - self.ident(&ident); - state = Word; - } - Some(TokenTree::Punct(punct)) => { - let ch = punct.as_char(); - if let (Word, '=') = (state, ch) { - self.nbsp(); - } - if ch == ',' && tokens.peek().is_none() { - self.trailing_comma(true); - state = TrailingComma; - } else { - self.token_punct(ch); - if ch == '=' { - self.nbsp(); - } else if ch == ',' { - space(self); - } - state = Punct; - } - } - Some(TokenTree::Literal(literal)) => { - if let Word = state { - space(self); - } - self.token_literal(&literal); - state = Word; - } - Some(TokenTree::Group(group)) => { - let delimiter = group.delimiter(); - let stream = group.stream(); - match delimiter { - Delimiter::Parenthesis => { - self.word("("); - self.cbox(INDENT); - self.zerobreak(); - state = Punct; - } - Delimiter::Brace => { - self.word("{"); - state = Punct; - } - Delimiter::Bracket => { - self.word("["); - state = Punct; - } - Delimiter::None => {} - } - stack.push((stream.into_iter().peekable(), delimiter)); - space = Self::space; - } - None => { - match delimiter { - Delimiter::Parenthesis => { - if state != TrailingComma { - self.zerobreak(); - } - self.offset(-INDENT); - self.end(); - self.word(")"); - state = Punct; - } - Delimiter::Brace => { - self.word("}"); - state = Punct; - } - Delimiter::Bracket => { - self.word("]"); - state = Punct; - } - Delimiter::None => {} - } - stack.pop(); - if stack.is_empty() { - space = Self::nbsp; - } - } - } - } - } -} - -fn value_of_attribute(requested: &str, attr: &Attribute) -> Option { - let value = match &attr.meta { - Meta::NameValue(meta) if meta.path.is_ident(requested) => &meta.value, - _ => return None, - }; - let lit = match value { - Expr::Lit(expr) if expr.attrs.is_empty() => &expr.lit, - _ => return None, - }; - match lit { - Lit::Str(string) => Some(string.value()), - _ => None, - } -} - -pub fn has_outer(attrs: &[Attribute]) -> bool { - for attr in attrs { - if let AttrStyle::Outer = attr.style { - return true; - } - } - false -} - -pub fn has_inner(attrs: &[Attribute]) -> bool { - for attr in attrs { - if let AttrStyle::Inner(_) = attr.style { - return true; - } - } - false -} - -fn trim_trailing_spaces(doc: &mut String) { - doc.truncate(doc.trim_end_matches(' ').len()); -} - -fn trim_interior_trailing_spaces(doc: &mut String) { - if !doc.contains(" \n") { - return; - } - let mut trimmed = String::with_capacity(doc.len()); - let mut lines = doc.split('\n').peekable(); - while let Some(line) = lines.next() { - if lines.peek().is_some() { - trimmed.push_str(line.trim_end_matches(' ')); - trimmed.push('\n'); - } else { - trimmed.push_str(line); - } - } - *doc = trimmed; -} - -fn can_be_block_comment(value: &str) -> bool { - let mut depth = 0usize; - let bytes = value.as_bytes(); - let mut i = 0usize; - let upper = bytes.len() - 1; - - while i < upper { - if bytes[i] == b'/' && bytes[i + 1] == b'*' { - depth += 1; - i += 2; - } else if bytes[i] == b'*' && bytes[i + 1] == b'/' { - if depth == 0 { - return false; - } - depth -= 1; - i += 2; - } else { - i += 1; - } - } - - depth == 0 && !value.ends_with('/') -} diff --git a/vendor/prettyplease/src/classify.rs b/vendor/prettyplease/src/classify.rs deleted file mode 100644 index 17648f6c8b7af8..00000000000000 --- a/vendor/prettyplease/src/classify.rs +++ /dev/null @@ -1,324 +0,0 @@ -use proc_macro2::{Delimiter, TokenStream, TokenTree}; -use std::ops::ControlFlow; -use syn::punctuated::Punctuated; -use syn::{Expr, MacroDelimiter, Path, PathArguments, ReturnType, Token, Type, TypeParamBound}; - -pub(crate) fn requires_semi_to_be_stmt(expr: &Expr) -> bool { - match expr { - Expr::Macro(expr) => !matches!(expr.mac.delimiter, MacroDelimiter::Brace(_)), - _ => requires_comma_to_be_match_arm(expr), - } -} - -pub(crate) fn requires_comma_to_be_match_arm(mut expr: &Expr) -> bool { - loop { - match expr { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Expr::If(_) - | Expr::Match(_) - | Expr::Block(_) | Expr::Unsafe(_) // both under ExprKind::Block in rustc - | Expr::While(_) - | Expr::Loop(_) - | Expr::ForLoop(_) - | Expr::TryBlock(_) - | Expr::Const(_) => return false, - - Expr::Array(_) - | Expr::Assign(_) - | Expr::Async(_) - | Expr::Await(_) - | Expr::Binary(_) - | Expr::Break(_) - | Expr::Call(_) - | Expr::Cast(_) - | Expr::Closure(_) - | Expr::Continue(_) - | Expr::Field(_) - | Expr::Index(_) - | Expr::Infer(_) - | Expr::Let(_) - | Expr::Lit(_) - | Expr::Macro(_) - | Expr::MethodCall(_) - | Expr::Paren(_) - | Expr::Path(_) - | Expr::Range(_) - | Expr::RawAddr(_) - | Expr::Reference(_) - | Expr::Repeat(_) - | Expr::Return(_) - | Expr::Struct(_) - | Expr::Try(_) - | Expr::Tuple(_) - | Expr::Unary(_) - | Expr::Yield(_) - | Expr::Verbatim(_) => return true, - - Expr::Group(group) => expr = &group.expr, - - _ => return true, - } - } -} - -pub(crate) fn trailing_unparameterized_path(mut ty: &Type) -> bool { - loop { - match ty { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Type::BareFn(t) => match &t.output { - ReturnType::Default => return false, - ReturnType::Type(_, ret) => ty = ret, - }, - Type::ImplTrait(t) => match last_type_in_bounds(&t.bounds) { - ControlFlow::Break(trailing_path) => return trailing_path, - ControlFlow::Continue(t) => ty = t, - }, - Type::Path(t) => match last_type_in_path(&t.path) { - ControlFlow::Break(trailing_path) => return trailing_path, - ControlFlow::Continue(t) => ty = t, - }, - Type::Ptr(t) => ty = &t.elem, - Type::Reference(t) => ty = &t.elem, - Type::TraitObject(t) => match last_type_in_bounds(&t.bounds) { - ControlFlow::Break(trailing_path) => return trailing_path, - ControlFlow::Continue(t) => ty = t, - }, - - Type::Array(_) - | Type::Group(_) - | Type::Infer(_) - | Type::Macro(_) - | Type::Never(_) - | Type::Paren(_) - | Type::Slice(_) - | Type::Tuple(_) - | Type::Verbatim(_) => return false, - - _ => return false, - } - } - - fn last_type_in_path(path: &Path) -> ControlFlow { - match &path.segments.last().unwrap().arguments { - PathArguments::None => ControlFlow::Break(true), - PathArguments::AngleBracketed(_) => ControlFlow::Break(false), - PathArguments::Parenthesized(arg) => match &arg.output { - ReturnType::Default => ControlFlow::Break(false), - ReturnType::Type(_, ret) => ControlFlow::Continue(ret), - }, - } - } - - fn last_type_in_bounds( - bounds: &Punctuated, - ) -> ControlFlow { - match bounds.last().unwrap() { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - TypeParamBound::Trait(t) => last_type_in_path(&t.path), - TypeParamBound::Lifetime(_) - | TypeParamBound::PreciseCapture(_) - | TypeParamBound::Verbatim(_) => ControlFlow::Break(false), - _ => ControlFlow::Break(false), - } - } -} - -/// Whether the expression's first token is the label of a loop/block. -pub(crate) fn expr_leading_label(mut expr: &Expr) -> bool { - loop { - match expr { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Expr::Block(e) => return e.label.is_some(), - Expr::ForLoop(e) => return e.label.is_some(), - Expr::Loop(e) => return e.label.is_some(), - Expr::While(e) => return e.label.is_some(), - - Expr::Assign(e) => expr = &e.left, - Expr::Await(e) => expr = &e.base, - Expr::Binary(e) => expr = &e.left, - Expr::Call(e) => expr = &e.func, - Expr::Cast(e) => expr = &e.expr, - Expr::Field(e) => expr = &e.base, - Expr::Index(e) => expr = &e.expr, - Expr::MethodCall(e) => expr = &e.receiver, - Expr::Range(e) => match &e.start { - Some(start) => expr = start, - None => return false, - }, - Expr::Try(e) => expr = &e.expr, - - Expr::Array(_) - | Expr::Async(_) - | Expr::Break(_) - | Expr::Closure(_) - | Expr::Const(_) - | Expr::Continue(_) - | Expr::If(_) - | Expr::Infer(_) - | Expr::Let(_) - | Expr::Lit(_) - | Expr::Macro(_) - | Expr::Match(_) - | Expr::Paren(_) - | Expr::Path(_) - | Expr::RawAddr(_) - | Expr::Reference(_) - | Expr::Repeat(_) - | Expr::Return(_) - | Expr::Struct(_) - | Expr::TryBlock(_) - | Expr::Tuple(_) - | Expr::Unary(_) - | Expr::Unsafe(_) - | Expr::Verbatim(_) - | Expr::Yield(_) => return false, - - Expr::Group(e) => { - if !e.attrs.is_empty() { - return false; - } - expr = &e.expr; - } - - _ => return false, - } - } -} - -/// Whether the expression's last token is `}`. -pub(crate) fn expr_trailing_brace(mut expr: &Expr) -> bool { - loop { - match expr { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Expr::Async(_) - | Expr::Block(_) - | Expr::Const(_) - | Expr::ForLoop(_) - | Expr::If(_) - | Expr::Loop(_) - | Expr::Match(_) - | Expr::Struct(_) - | Expr::TryBlock(_) - | Expr::Unsafe(_) - | Expr::While(_) => return true, - - Expr::Assign(e) => expr = &e.right, - Expr::Binary(e) => expr = &e.right, - Expr::Break(e) => match &e.expr { - Some(e) => expr = e, - None => return false, - }, - Expr::Cast(e) => return type_trailing_brace(&e.ty), - Expr::Closure(e) => expr = &e.body, - Expr::Group(e) => expr = &e.expr, - Expr::Let(e) => expr = &e.expr, - Expr::Macro(e) => return matches!(e.mac.delimiter, MacroDelimiter::Brace(_)), - Expr::Range(e) => match &e.end { - Some(end) => expr = end, - None => return false, - }, - Expr::RawAddr(e) => expr = &e.expr, - Expr::Reference(e) => expr = &e.expr, - Expr::Return(e) => match &e.expr { - Some(e) => expr = e, - None => return false, - }, - Expr::Unary(e) => expr = &e.expr, - Expr::Verbatim(e) => return tokens_trailing_brace(e), - Expr::Yield(e) => match &e.expr { - Some(e) => expr = e, - None => return false, - }, - - Expr::Array(_) - | Expr::Await(_) - | Expr::Call(_) - | Expr::Continue(_) - | Expr::Field(_) - | Expr::Index(_) - | Expr::Infer(_) - | Expr::Lit(_) - | Expr::MethodCall(_) - | Expr::Paren(_) - | Expr::Path(_) - | Expr::Repeat(_) - | Expr::Try(_) - | Expr::Tuple(_) => return false, - - _ => return false, - } - } - - fn type_trailing_brace(mut ty: &Type) -> bool { - loop { - match ty { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Type::BareFn(t) => match &t.output { - ReturnType::Default => return false, - ReturnType::Type(_, ret) => ty = ret, - }, - Type::ImplTrait(t) => match last_type_in_bounds(&t.bounds) { - ControlFlow::Break(trailing_brace) => return trailing_brace, - ControlFlow::Continue(t) => ty = t, - }, - Type::Macro(t) => return matches!(t.mac.delimiter, MacroDelimiter::Brace(_)), - Type::Path(t) => match last_type_in_path(&t.path) { - Some(t) => ty = t, - None => return false, - }, - Type::Ptr(t) => ty = &t.elem, - Type::Reference(t) => ty = &t.elem, - Type::TraitObject(t) => match last_type_in_bounds(&t.bounds) { - ControlFlow::Break(trailing_brace) => return trailing_brace, - ControlFlow::Continue(t) => ty = t, - }, - Type::Verbatim(t) => return tokens_trailing_brace(t), - - Type::Array(_) - | Type::Group(_) - | Type::Infer(_) - | Type::Never(_) - | Type::Paren(_) - | Type::Slice(_) - | Type::Tuple(_) => return false, - - _ => return false, - } - } - } - - fn last_type_in_path(path: &Path) -> Option<&Type> { - match &path.segments.last().unwrap().arguments { - PathArguments::None | PathArguments::AngleBracketed(_) => None, - PathArguments::Parenthesized(arg) => match &arg.output { - ReturnType::Default => None, - ReturnType::Type(_, ret) => Some(ret), - }, - } - } - - fn last_type_in_bounds( - bounds: &Punctuated, - ) -> ControlFlow { - match bounds.last().unwrap() { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - TypeParamBound::Trait(t) => match last_type_in_path(&t.path) { - Some(t) => ControlFlow::Continue(t), - None => ControlFlow::Break(false), - }, - TypeParamBound::Lifetime(_) | TypeParamBound::PreciseCapture(_) => { - ControlFlow::Break(false) - } - TypeParamBound::Verbatim(t) => ControlFlow::Break(tokens_trailing_brace(t)), - _ => ControlFlow::Break(false), - } - } - - fn tokens_trailing_brace(tokens: &TokenStream) -> bool { - if let Some(TokenTree::Group(last)) = tokens.clone().into_iter().last() { - last.delimiter() == Delimiter::Brace - } else { - false - } - } -} diff --git a/vendor/prettyplease/src/convenience.rs b/vendor/prettyplease/src/convenience.rs deleted file mode 100644 index bc4add6e08be24..00000000000000 --- a/vendor/prettyplease/src/convenience.rs +++ /dev/null @@ -1,98 +0,0 @@ -use crate::algorithm::{self, BeginToken, BreakToken, Breaks, Printer}; -use std::borrow::Cow; - -impl Printer { - pub fn ibox(&mut self, indent: isize) { - self.scan_begin(BeginToken { - offset: indent, - breaks: Breaks::Inconsistent, - }); - } - - pub fn cbox(&mut self, indent: isize) { - self.scan_begin(BeginToken { - offset: indent, - breaks: Breaks::Consistent, - }); - } - - pub fn end(&mut self) { - self.scan_end(); - } - - pub fn word>>(&mut self, wrd: S) { - let s = wrd.into(); - self.scan_string(s); - } - - fn spaces(&mut self, n: usize) { - self.scan_break(BreakToken { - blank_space: n, - ..BreakToken::default() - }); - } - - pub fn zerobreak(&mut self) { - self.spaces(0); - } - - pub fn space(&mut self) { - self.spaces(1); - } - - pub fn nbsp(&mut self) { - self.word(" "); - } - - pub fn hardbreak(&mut self) { - self.spaces(algorithm::SIZE_INFINITY as usize); - } - - pub fn space_if_nonempty(&mut self) { - self.scan_break(BreakToken { - blank_space: 1, - if_nonempty: true, - ..BreakToken::default() - }); - } - - pub fn hardbreak_if_nonempty(&mut self) { - self.scan_break(BreakToken { - blank_space: algorithm::SIZE_INFINITY as usize, - if_nonempty: true, - ..BreakToken::default() - }); - } - - pub fn trailing_comma(&mut self, is_last: bool) { - if is_last { - self.scan_break(BreakToken { - pre_break: Some(','), - ..BreakToken::default() - }); - } else { - self.word(","); - self.space(); - } - } - - pub fn trailing_comma_or_space(&mut self, is_last: bool) { - if is_last { - self.scan_break(BreakToken { - blank_space: 1, - pre_break: Some(','), - ..BreakToken::default() - }); - } else { - self.word(","); - self.space(); - } - } - - pub fn neverbreak(&mut self) { - self.scan_break(BreakToken { - never_break: true, - ..BreakToken::default() - }); - } -} diff --git a/vendor/prettyplease/src/data.rs b/vendor/prettyplease/src/data.rs deleted file mode 100644 index 3561a49b4a1cdc..00000000000000 --- a/vendor/prettyplease/src/data.rs +++ /dev/null @@ -1,79 +0,0 @@ -use crate::algorithm::Printer; -use crate::fixup::FixupContext; -use crate::iter::IterDelimited; -use crate::path::PathKind; -use crate::INDENT; -use syn::{Field, Fields, FieldsUnnamed, Variant, VisRestricted, Visibility}; - -impl Printer { - pub fn variant(&mut self, variant: &Variant) { - self.outer_attrs(&variant.attrs); - self.ident(&variant.ident); - match &variant.fields { - Fields::Named(fields) => { - self.nbsp(); - self.word("{"); - self.cbox(INDENT); - self.space(); - for field in fields.named.iter().delimited() { - self.field(&field); - self.trailing_comma_or_space(field.is_last); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - } - Fields::Unnamed(fields) => { - self.cbox(INDENT); - self.fields_unnamed(fields); - self.end(); - } - Fields::Unit => {} - } - if let Some((_eq_token, discriminant)) = &variant.discriminant { - self.word(" = "); - self.expr(discriminant, FixupContext::NONE); - } - } - - pub fn fields_unnamed(&mut self, fields: &FieldsUnnamed) { - self.word("("); - self.zerobreak(); - for field in fields.unnamed.iter().delimited() { - self.field(&field); - self.trailing_comma(field.is_last); - } - self.offset(-INDENT); - self.word(")"); - } - - pub fn field(&mut self, field: &Field) { - self.outer_attrs(&field.attrs); - self.visibility(&field.vis); - if let Some(ident) = &field.ident { - self.ident(ident); - self.word(": "); - } - self.ty(&field.ty); - } - - pub fn visibility(&mut self, vis: &Visibility) { - match vis { - Visibility::Public(_) => self.word("pub "), - Visibility::Restricted(vis) => self.vis_restricted(vis), - Visibility::Inherited => {} - } - } - - fn vis_restricted(&mut self, vis: &VisRestricted) { - self.word("pub("); - let omit_in = vis.path.get_ident().map_or(false, |ident| { - matches!(ident.to_string().as_str(), "self" | "super" | "crate") - }); - if !omit_in { - self.word("in "); - } - self.path(&vis.path, PathKind::Simple); - self.word(") "); - } -} diff --git a/vendor/prettyplease/src/expr.rs b/vendor/prettyplease/src/expr.rs deleted file mode 100644 index 55b1b605531123..00000000000000 --- a/vendor/prettyplease/src/expr.rs +++ /dev/null @@ -1,1533 +0,0 @@ -use crate::algorithm::{BreakToken, Printer}; -use crate::attr; -use crate::classify; -use crate::fixup::FixupContext; -use crate::iter::IterDelimited; -use crate::path::PathKind; -use crate::precedence::Precedence; -use crate::stmt; -use crate::INDENT; -use proc_macro2::TokenStream; -use syn::punctuated::Punctuated; -use syn::{ - token, Arm, Attribute, BinOp, Block, Expr, ExprArray, ExprAssign, ExprAsync, ExprAwait, - ExprBinary, ExprBlock, ExprBreak, ExprCall, ExprCast, ExprClosure, ExprConst, ExprContinue, - ExprField, ExprForLoop, ExprGroup, ExprIf, ExprIndex, ExprInfer, ExprLet, ExprLit, ExprLoop, - ExprMacro, ExprMatch, ExprMethodCall, ExprParen, ExprPath, ExprRange, ExprRawAddr, - ExprReference, ExprRepeat, ExprReturn, ExprStruct, ExprTry, ExprTryBlock, ExprTuple, ExprUnary, - ExprUnsafe, ExprWhile, ExprYield, FieldValue, Index, Label, Lit, Member, PointerMutability, - RangeLimits, ReturnType, Stmt, Token, UnOp, -}; - -impl Printer { - pub fn expr(&mut self, expr: &Expr, mut fixup: FixupContext) { - let needs_paren = fixup.parenthesize(expr); - if needs_paren { - self.word("("); - fixup = FixupContext::NONE; - } - - let beginning_of_line = false; - - match expr { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Expr::Array(expr) => self.expr_array(expr), - Expr::Assign(expr) => self.expr_assign(expr, fixup), - Expr::Async(expr) => self.expr_async(expr), - Expr::Await(expr) => self.expr_await(expr, beginning_of_line, fixup), - Expr::Binary(expr) => self.expr_binary(expr, fixup), - Expr::Block(expr) => self.expr_block(expr), - Expr::Break(expr) => self.expr_break(expr, fixup), - Expr::Call(expr) => self.expr_call(expr, beginning_of_line, fixup), - Expr::Cast(expr) => self.expr_cast(expr, fixup), - Expr::Closure(expr) => self.expr_closure(expr, fixup), - Expr::Const(expr) => self.expr_const(expr), - Expr::Continue(expr) => self.expr_continue(expr), - Expr::Field(expr) => self.expr_field(expr, beginning_of_line, fixup), - Expr::ForLoop(expr) => self.expr_for_loop(expr), - Expr::Group(expr) => self.expr_group(expr, fixup), - Expr::If(expr) => self.expr_if(expr), - Expr::Index(expr) => self.expr_index(expr, beginning_of_line, fixup), - Expr::Infer(expr) => self.expr_infer(expr), - Expr::Let(expr) => self.expr_let(expr, fixup), - Expr::Lit(expr) => self.expr_lit(expr), - Expr::Loop(expr) => self.expr_loop(expr), - Expr::Macro(expr) => self.expr_macro(expr), - Expr::Match(expr) => self.expr_match(expr), - Expr::MethodCall(expr) => self.expr_method_call(expr, beginning_of_line, fixup), - Expr::Paren(expr) => self.expr_paren(expr), - Expr::Path(expr) => self.expr_path(expr), - Expr::Range(expr) => self.expr_range(expr, fixup), - Expr::RawAddr(expr) => self.expr_raw_addr(expr, fixup), - Expr::Reference(expr) => self.expr_reference(expr, fixup), - Expr::Repeat(expr) => self.expr_repeat(expr), - Expr::Return(expr) => self.expr_return(expr, fixup), - Expr::Struct(expr) => self.expr_struct(expr), - Expr::Try(expr) => self.expr_try(expr, beginning_of_line, fixup), - Expr::TryBlock(expr) => self.expr_try_block(expr), - Expr::Tuple(expr) => self.expr_tuple(expr), - Expr::Unary(expr) => self.expr_unary(expr, fixup), - Expr::Unsafe(expr) => self.expr_unsafe(expr), - Expr::Verbatim(expr) => self.expr_verbatim(expr, fixup), - Expr::While(expr) => self.expr_while(expr), - Expr::Yield(expr) => self.expr_yield(expr, fixup), - _ => unimplemented!("unknown Expr"), - } - - if needs_paren { - self.word(")"); - } - } - - pub fn expr_beginning_of_line( - &mut self, - expr: &Expr, - mut needs_paren: bool, - beginning_of_line: bool, - mut fixup: FixupContext, - ) { - needs_paren |= fixup.parenthesize(expr); - if needs_paren { - self.word("("); - fixup = FixupContext::NONE; - } - - match expr { - Expr::Await(expr) => self.expr_await(expr, beginning_of_line, fixup), - Expr::Field(expr) => self.expr_field(expr, beginning_of_line, fixup), - Expr::Index(expr) => self.expr_index(expr, beginning_of_line, fixup), - Expr::MethodCall(expr) => self.expr_method_call(expr, beginning_of_line, fixup), - Expr::Try(expr) => self.expr_try(expr, beginning_of_line, fixup), - _ => self.expr(expr, fixup), - } - - if needs_paren { - self.word(")"); - } - } - - fn prefix_subexpr( - &mut self, - expr: &Expr, - mut needs_paren: bool, - beginning_of_line: bool, - mut fixup: FixupContext, - ) { - needs_paren |= fixup.parenthesize(expr); - if needs_paren { - self.word("("); - fixup = FixupContext::NONE; - } - - match expr { - Expr::Await(expr) => self.prefix_subexpr_await(expr, beginning_of_line, fixup), - Expr::Call(expr) => self.prefix_subexpr_call(expr, fixup), - Expr::Field(expr) => self.prefix_subexpr_field(expr, beginning_of_line, fixup), - Expr::Index(expr) => self.prefix_subexpr_index(expr, beginning_of_line, fixup), - Expr::MethodCall(expr) => { - let unindent_call_args = false; - self.prefix_subexpr_method_call(expr, beginning_of_line, unindent_call_args, fixup); - } - Expr::Try(expr) => self.prefix_subexpr_try(expr, beginning_of_line, fixup), - _ => { - self.cbox(-INDENT); - self.expr(expr, fixup); - self.end(); - } - } - - if needs_paren { - self.word(")"); - } - } - - fn expr_condition(&mut self, expr: &Expr) { - self.cbox(0); - self.expr(expr, FixupContext::new_condition()); - if needs_newline_if_wrap(expr) { - self.space(); - } else { - self.nbsp(); - } - self.end(); - } - - pub fn subexpr(&mut self, expr: &Expr, needs_paren: bool, mut fixup: FixupContext) { - if needs_paren { - self.word("("); - fixup = FixupContext::NONE; - } - - self.expr(expr, fixup); - - if needs_paren { - self.word(")"); - } - } - - fn expr_array(&mut self, expr: &ExprArray) { - self.outer_attrs(&expr.attrs); - if expr.elems.is_empty() { - self.word("[]"); - } else if simple_array(&expr.elems) { - self.cbox(INDENT); - self.word("["); - self.zerobreak(); - self.ibox(0); - for elem in expr.elems.iter().delimited() { - self.expr(&elem, FixupContext::NONE); - if !elem.is_last { - self.word(","); - self.space(); - } - } - self.end(); - self.trailing_comma(true); - self.offset(-INDENT); - self.word("]"); - self.end(); - } else { - self.word("["); - self.cbox(INDENT); - self.zerobreak(); - for elem in expr.elems.iter().delimited() { - self.expr(&elem, FixupContext::NONE); - self.trailing_comma(elem.is_last); - } - self.offset(-INDENT); - self.end(); - self.word("]"); - } - } - - fn expr_assign(&mut self, expr: &ExprAssign, fixup: FixupContext) { - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( - &expr.left, - false, - false, - Precedence::Assign, - ); - let right_fixup = fixup.rightmost_subexpression_fixup(false, false, Precedence::Assign); - - self.outer_attrs(&expr.attrs); - self.ibox(0); - if !expr.attrs.is_empty() { - self.word("("); - } - self.subexpr(&expr.left, left_prec <= Precedence::Range, left_fixup); - self.word(" = "); - self.neverbreak(); - self.expr(&expr.right, right_fixup); - if !expr.attrs.is_empty() { - self.word(")"); - } - self.end(); - } - - fn expr_async(&mut self, expr: &ExprAsync) { - self.outer_attrs(&expr.attrs); - self.word("async "); - if expr.capture.is_some() { - self.word("move "); - } - self.cbox(INDENT); - self.small_block(&expr.block, &expr.attrs); - self.end(); - } - - fn expr_await(&mut self, expr: &ExprAwait, beginning_of_line: bool, fixup: FixupContext) { - self.outer_attrs(&expr.attrs); - self.cbox(INDENT); - self.prefix_subexpr_await(expr, beginning_of_line, fixup); - self.end(); - } - - fn prefix_subexpr_await( - &mut self, - expr: &ExprAwait, - beginning_of_line: bool, - fixup: FixupContext, - ) { - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&expr.base); - - self.prefix_subexpr( - &expr.base, - left_prec < Precedence::Unambiguous, - beginning_of_line, - left_fixup, - ); - if !(beginning_of_line && is_short_ident(&expr.base)) { - self.scan_break(BreakToken { - no_break: self.ends_with('.').then_some(' '), - ..BreakToken::default() - }); - } - self.word(".await"); - } - - fn expr_binary(&mut self, expr: &ExprBinary, fixup: FixupContext) { - let binop_prec = Precedence::of_binop(&expr.op); - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( - &expr.left, - match &expr.op { - BinOp::Sub(_) - | BinOp::Mul(_) - | BinOp::And(_) - | BinOp::Or(_) - | BinOp::BitAnd(_) - | BinOp::BitOr(_) - | BinOp::Shl(_) - | BinOp::Lt(_) => true, - _ => false, - }, - match &expr.op { - BinOp::Shl(_) | BinOp::Lt(_) => true, - _ => false, - }, - binop_prec, - ); - let left_needs_group = match binop_prec { - Precedence::Assign => left_prec <= Precedence::Range, - Precedence::Compare => left_prec <= binop_prec, - _ => left_prec < binop_prec, - }; - let right_fixup = fixup.rightmost_subexpression_fixup(false, false, binop_prec); - let right_needs_group = binop_prec != Precedence::Assign - && right_fixup.rightmost_subexpression_precedence(&expr.right) <= binop_prec; - - self.outer_attrs(&expr.attrs); - self.ibox(INDENT); - self.ibox(-INDENT); - if !expr.attrs.is_empty() { - self.word("("); - } - self.subexpr(&expr.left, left_needs_group, left_fixup); - self.end(); - self.space(); - self.binary_operator(&expr.op); - self.nbsp(); - self.subexpr(&expr.right, right_needs_group, right_fixup); - if !expr.attrs.is_empty() { - self.word(")"); - } - self.end(); - } - - pub fn expr_block(&mut self, expr: &ExprBlock) { - self.outer_attrs(&expr.attrs); - if let Some(label) = &expr.label { - self.label(label); - } - self.cbox(INDENT); - self.small_block(&expr.block, &expr.attrs); - self.end(); - } - - fn expr_break(&mut self, expr: &ExprBreak, fixup: FixupContext) { - self.outer_attrs(&expr.attrs); - self.word("break"); - if let Some(lifetime) = &expr.label { - self.nbsp(); - self.lifetime(lifetime); - } - if let Some(value) = &expr.expr { - self.nbsp(); - self.subexpr( - value, - expr.label.is_none() && classify::expr_leading_label(value), - fixup.rightmost_subexpression_fixup(true, true, Precedence::Jump), - ); - } - } - - fn expr_call(&mut self, expr: &ExprCall, beginning_of_line: bool, fixup: FixupContext) { - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( - &expr.func, - true, - false, - Precedence::Unambiguous, - ); - let needs_paren = if let Expr::Field(func) = &*expr.func { - matches!(func.member, Member::Named(_)) - } else { - left_prec < Precedence::Unambiguous - }; - - self.outer_attrs(&expr.attrs); - self.expr_beginning_of_line(&expr.func, needs_paren, beginning_of_line, left_fixup); - self.word("("); - self.call_args(&expr.args); - self.word(")"); - } - - fn prefix_subexpr_call(&mut self, expr: &ExprCall, fixup: FixupContext) { - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( - &expr.func, - true, - false, - Precedence::Unambiguous, - ); - let needs_paren = if let Expr::Field(func) = &*expr.func { - matches!(func.member, Member::Named(_)) - } else { - left_prec < Precedence::Unambiguous - }; - - let beginning_of_line = false; - self.prefix_subexpr(&expr.func, needs_paren, beginning_of_line, left_fixup); - self.word("("); - self.call_args(&expr.args); - self.word(")"); - } - - fn expr_cast(&mut self, expr: &ExprCast, fixup: FixupContext) { - let (left_prec, left_fixup) = - fixup.leftmost_subexpression_with_operator(&expr.expr, false, false, Precedence::Cast); - - self.outer_attrs(&expr.attrs); - self.ibox(INDENT); - self.ibox(-INDENT); - if !expr.attrs.is_empty() { - self.word("("); - } - self.subexpr(&expr.expr, left_prec < Precedence::Cast, left_fixup); - self.end(); - self.space(); - self.word("as "); - self.ty(&expr.ty); - if !expr.attrs.is_empty() { - self.word(")"); - } - self.end(); - } - - fn expr_closure(&mut self, expr: &ExprClosure, fixup: FixupContext) { - self.outer_attrs(&expr.attrs); - self.ibox(0); - if let Some(bound_lifetimes) = &expr.lifetimes { - self.bound_lifetimes(bound_lifetimes); - } - if expr.constness.is_some() { - self.word("const "); - } - if expr.movability.is_some() { - self.word("static "); - } - if expr.asyncness.is_some() { - self.word("async "); - } - if expr.capture.is_some() { - self.word("move "); - } - self.cbox(INDENT); - self.word("|"); - for pat in expr.inputs.iter().delimited() { - if pat.is_first { - self.zerobreak(); - } - self.pat(&pat); - if !pat.is_last { - self.word(","); - self.space(); - } - } - match &expr.output { - ReturnType::Default => { - self.word("|"); - self.space(); - self.offset(-INDENT); - self.end(); - self.neverbreak(); - let wrap_in_brace = match &*expr.body { - Expr::Match(ExprMatch { attrs, .. }) | Expr::Call(ExprCall { attrs, .. }) => { - attr::has_outer(attrs) - } - body => !is_blocklike(body), - }; - if wrap_in_brace { - self.cbox(INDENT); - let okay_to_brace = parseable_as_stmt(&expr.body); - self.scan_break(BreakToken { - pre_break: Some(if okay_to_brace { '{' } else { '(' }), - ..BreakToken::default() - }); - self.expr( - &expr.body, - fixup.rightmost_subexpression_fixup(false, false, Precedence::Jump), - ); - self.scan_break(BreakToken { - offset: -INDENT, - pre_break: (okay_to_brace && stmt::add_semi(&expr.body)).then_some(';'), - post_break: if okay_to_brace { "}" } else { ")" }, - ..BreakToken::default() - }); - self.end(); - } else { - self.expr( - &expr.body, - fixup.rightmost_subexpression_fixup(false, false, Precedence::Jump), - ); - } - } - ReturnType::Type(_arrow, ty) => { - if !expr.inputs.is_empty() { - self.trailing_comma(true); - self.offset(-INDENT); - } - self.word("|"); - self.end(); - self.word(" -> "); - self.ty(ty); - self.nbsp(); - self.neverbreak(); - if matches!(&*expr.body, Expr::Block(body) if body.attrs.is_empty() && body.label.is_none()) - { - self.expr( - &expr.body, - fixup.rightmost_subexpression_fixup(false, false, Precedence::Jump), - ); - } else { - self.cbox(INDENT); - self.expr_as_small_block(&expr.body, 0); - self.end(); - } - } - } - self.end(); - } - - pub fn expr_const(&mut self, expr: &ExprConst) { - self.outer_attrs(&expr.attrs); - self.word("const "); - self.cbox(INDENT); - self.small_block(&expr.block, &expr.attrs); - self.end(); - } - - fn expr_continue(&mut self, expr: &ExprContinue) { - self.outer_attrs(&expr.attrs); - self.word("continue"); - if let Some(lifetime) = &expr.label { - self.nbsp(); - self.lifetime(lifetime); - } - } - - fn expr_field(&mut self, expr: &ExprField, beginning_of_line: bool, fixup: FixupContext) { - self.outer_attrs(&expr.attrs); - self.cbox(INDENT); - self.prefix_subexpr_field(expr, beginning_of_line, fixup); - self.end(); - } - - fn prefix_subexpr_field( - &mut self, - expr: &ExprField, - beginning_of_line: bool, - fixup: FixupContext, - ) { - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&expr.base); - - self.prefix_subexpr( - &expr.base, - left_prec < Precedence::Unambiguous, - beginning_of_line, - left_fixup, - ); - if !(beginning_of_line && is_short_ident(&expr.base)) { - self.scan_break(BreakToken { - no_break: self.ends_with('.').then_some(' '), - ..BreakToken::default() - }); - } - self.word("."); - self.member(&expr.member); - } - - fn expr_for_loop(&mut self, expr: &ExprForLoop) { - self.outer_attrs(&expr.attrs); - self.ibox(0); - if let Some(label) = &expr.label { - self.label(label); - } - self.word("for "); - self.pat(&expr.pat); - self.word(" in "); - self.neverbreak(); - self.expr_condition(&expr.expr); - self.word("{"); - self.neverbreak(); - self.cbox(INDENT); - self.hardbreak_if_nonempty(); - self.inner_attrs(&expr.attrs); - for stmt in expr.body.stmts.iter().delimited() { - self.stmt(&stmt, stmt.is_last); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - self.end(); - } - - fn expr_group(&mut self, expr: &ExprGroup, fixup: FixupContext) { - self.outer_attrs(&expr.attrs); - self.expr(&expr.expr, fixup); - } - - fn expr_if(&mut self, expr: &ExprIf) { - self.outer_attrs(&expr.attrs); - self.cbox(INDENT); - self.word("if "); - self.cbox(-INDENT); - self.expr_condition(&expr.cond); - self.end(); - if let Some((_else_token, else_branch)) = &expr.else_branch { - let mut else_branch = &**else_branch; - self.small_block(&expr.then_branch, &[]); - loop { - self.word(" else "); - match else_branch { - Expr::If(expr) => { - self.word("if "); - self.cbox(-INDENT); - self.expr_condition(&expr.cond); - self.end(); - self.small_block(&expr.then_branch, &[]); - if let Some((_else_token, next)) = &expr.else_branch { - else_branch = next; - continue; - } - } - Expr::Block(expr) => { - self.small_block(&expr.block, &[]); - } - // If not one of the valid expressions to exist in an else - // clause, wrap in a block. - other => self.expr_as_small_block(other, INDENT), - } - break; - } - } else if expr.then_branch.stmts.is_empty() { - self.word("{}"); - } else { - self.word("{"); - self.hardbreak(); - for stmt in expr.then_branch.stmts.iter().delimited() { - self.stmt(&stmt, stmt.is_last); - } - self.offset(-INDENT); - self.word("}"); - } - self.end(); - } - - fn expr_index(&mut self, expr: &ExprIndex, beginning_of_line: bool, fixup: FixupContext) { - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( - &expr.expr, - true, - false, - Precedence::Unambiguous, - ); - - self.outer_attrs(&expr.attrs); - self.expr_beginning_of_line( - &expr.expr, - left_prec < Precedence::Unambiguous, - beginning_of_line, - left_fixup, - ); - self.word("["); - self.expr(&expr.index, FixupContext::NONE); - self.word("]"); - } - - fn prefix_subexpr_index( - &mut self, - expr: &ExprIndex, - beginning_of_line: bool, - fixup: FixupContext, - ) { - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( - &expr.expr, - true, - false, - Precedence::Unambiguous, - ); - - self.prefix_subexpr( - &expr.expr, - left_prec < Precedence::Unambiguous, - beginning_of_line, - left_fixup, - ); - self.word("["); - self.expr(&expr.index, FixupContext::NONE); - self.word("]"); - } - - fn expr_infer(&mut self, expr: &ExprInfer) { - self.outer_attrs(&expr.attrs); - self.word("_"); - } - - fn expr_let(&mut self, expr: &ExprLet, fixup: FixupContext) { - let (right_prec, right_fixup) = fixup.rightmost_subexpression(&expr.expr, Precedence::Let); - - self.outer_attrs(&expr.attrs); - self.ibox(0); - self.word("let "); - self.ibox(0); - self.pat(&expr.pat); - self.end(); - self.word(" = "); - self.neverbreak(); - self.ibox(0); - self.subexpr(&expr.expr, right_prec < Precedence::Let, right_fixup); - self.end(); - self.end(); - } - - pub fn expr_lit(&mut self, expr: &ExprLit) { - self.outer_attrs(&expr.attrs); - self.lit(&expr.lit); - } - - fn expr_loop(&mut self, expr: &ExprLoop) { - self.outer_attrs(&expr.attrs); - if let Some(label) = &expr.label { - self.label(label); - } - self.word("loop {"); - self.cbox(INDENT); - self.hardbreak_if_nonempty(); - self.inner_attrs(&expr.attrs); - for stmt in expr.body.stmts.iter().delimited() { - self.stmt(&stmt, stmt.is_last); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - } - - pub fn expr_macro(&mut self, expr: &ExprMacro) { - self.outer_attrs(&expr.attrs); - let semicolon = false; - self.mac(&expr.mac, None, semicolon); - } - - fn expr_match(&mut self, expr: &ExprMatch) { - self.outer_attrs(&expr.attrs); - self.ibox(0); - self.word("match "); - self.expr_condition(&expr.expr); - self.word("{"); - self.neverbreak(); - self.cbox(INDENT); - self.hardbreak_if_nonempty(); - self.inner_attrs(&expr.attrs); - for arm in &expr.arms { - self.arm(arm); - self.hardbreak(); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - self.end(); - } - - fn expr_method_call( - &mut self, - expr: &ExprMethodCall, - beginning_of_line: bool, - fixup: FixupContext, - ) { - self.outer_attrs(&expr.attrs); - self.cbox(INDENT); - let unindent_call_args = beginning_of_line && is_short_ident(&expr.receiver); - self.prefix_subexpr_method_call(expr, beginning_of_line, unindent_call_args, fixup); - self.end(); - } - - fn prefix_subexpr_method_call( - &mut self, - expr: &ExprMethodCall, - beginning_of_line: bool, - unindent_call_args: bool, - fixup: FixupContext, - ) { - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&expr.receiver); - - self.prefix_subexpr( - &expr.receiver, - left_prec < Precedence::Unambiguous, - beginning_of_line, - left_fixup, - ); - if !(beginning_of_line && is_short_ident(&expr.receiver)) { - self.scan_break(BreakToken { - no_break: self.ends_with('.').then_some(' '), - ..BreakToken::default() - }); - } - self.word("."); - self.ident(&expr.method); - if let Some(turbofish) = &expr.turbofish { - self.angle_bracketed_generic_arguments(turbofish, PathKind::Expr); - } - self.cbox(if unindent_call_args { -INDENT } else { 0 }); - self.word("("); - self.call_args(&expr.args); - self.word(")"); - self.end(); - } - - fn expr_paren(&mut self, expr: &ExprParen) { - self.outer_attrs(&expr.attrs); - self.word("("); - self.expr(&expr.expr, FixupContext::NONE); - self.word(")"); - } - - pub fn expr_path(&mut self, expr: &ExprPath) { - self.outer_attrs(&expr.attrs); - self.qpath(&expr.qself, &expr.path, PathKind::Expr); - } - - pub fn expr_range(&mut self, expr: &ExprRange, fixup: FixupContext) { - self.outer_attrs(&expr.attrs); - if !expr.attrs.is_empty() { - self.word("("); - } - if let Some(start) = &expr.start { - let (left_prec, left_fixup) = - fixup.leftmost_subexpression_with_operator(start, true, false, Precedence::Range); - self.subexpr(start, left_prec <= Precedence::Range, left_fixup); - } else if self.ends_with('.') { - self.nbsp(); - } - self.word(match expr.limits { - RangeLimits::HalfOpen(_) => "..", - RangeLimits::Closed(_) => "..=", - }); - if let Some(end) = &expr.end { - let right_fixup = fixup.rightmost_subexpression_fixup(false, true, Precedence::Range); - let right_prec = right_fixup.rightmost_subexpression_precedence(end); - self.subexpr(end, right_prec <= Precedence::Range, right_fixup); - } - if !expr.attrs.is_empty() { - self.word(")"); - } - } - - fn expr_raw_addr(&mut self, expr: &ExprRawAddr, fixup: FixupContext) { - let (right_prec, right_fixup) = - fixup.rightmost_subexpression(&expr.expr, Precedence::Prefix); - - self.outer_attrs(&expr.attrs); - self.word("&raw "); - self.pointer_mutability(&expr.mutability); - self.nbsp(); - self.subexpr(&expr.expr, right_prec < Precedence::Prefix, right_fixup); - } - - fn expr_reference(&mut self, expr: &ExprReference, fixup: FixupContext) { - let (right_prec, right_fixup) = - fixup.rightmost_subexpression(&expr.expr, Precedence::Prefix); - - self.outer_attrs(&expr.attrs); - self.word("&"); - if expr.mutability.is_some() { - self.word("mut "); - } - self.subexpr(&expr.expr, right_prec < Precedence::Prefix, right_fixup); - } - - fn expr_repeat(&mut self, expr: &ExprRepeat) { - self.outer_attrs(&expr.attrs); - self.word("["); - self.expr(&expr.expr, FixupContext::NONE); - self.word("; "); - self.expr(&expr.len, FixupContext::NONE); - self.word("]"); - } - - fn expr_return(&mut self, expr: &ExprReturn, fixup: FixupContext) { - self.outer_attrs(&expr.attrs); - self.word("return"); - if let Some(value) = &expr.expr { - self.nbsp(); - self.expr( - value, - fixup.rightmost_subexpression_fixup(true, false, Precedence::Jump), - ); - } - } - - fn expr_struct(&mut self, expr: &ExprStruct) { - self.outer_attrs(&expr.attrs); - self.cbox(INDENT); - self.ibox(-INDENT); - self.qpath(&expr.qself, &expr.path, PathKind::Expr); - self.end(); - self.word(" {"); - self.space_if_nonempty(); - for field_value in expr.fields.iter().delimited() { - self.field_value(&field_value); - self.trailing_comma_or_space(field_value.is_last && expr.rest.is_none()); - } - if let Some(rest) = &expr.rest { - self.word(".."); - self.expr(rest, FixupContext::NONE); - self.space(); - } - self.offset(-INDENT); - self.end_with_max_width(34); - self.word("}"); - } - - fn expr_try(&mut self, expr: &ExprTry, beginning_of_line: bool, fixup: FixupContext) { - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&expr.expr); - - self.outer_attrs(&expr.attrs); - self.expr_beginning_of_line( - &expr.expr, - left_prec < Precedence::Unambiguous, - beginning_of_line, - left_fixup, - ); - self.word("?"); - } - - fn prefix_subexpr_try(&mut self, expr: &ExprTry, beginning_of_line: bool, fixup: FixupContext) { - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&expr.expr); - - self.prefix_subexpr( - &expr.expr, - left_prec < Precedence::Unambiguous, - beginning_of_line, - left_fixup, - ); - self.word("?"); - } - - fn expr_try_block(&mut self, expr: &ExprTryBlock) { - self.outer_attrs(&expr.attrs); - self.word("try "); - self.cbox(INDENT); - self.small_block(&expr.block, &expr.attrs); - self.end(); - } - - fn expr_tuple(&mut self, expr: &ExprTuple) { - self.outer_attrs(&expr.attrs); - self.word("("); - self.cbox(INDENT); - self.zerobreak(); - for elem in expr.elems.iter().delimited() { - self.expr(&elem, FixupContext::NONE); - if expr.elems.len() == 1 { - self.word(","); - self.zerobreak(); - } else { - self.trailing_comma(elem.is_last); - } - } - self.offset(-INDENT); - self.end(); - self.word(")"); - } - - fn expr_unary(&mut self, expr: &ExprUnary, fixup: FixupContext) { - let (right_prec, right_fixup) = - fixup.rightmost_subexpression(&expr.expr, Precedence::Prefix); - - self.outer_attrs(&expr.attrs); - self.unary_operator(&expr.op); - self.subexpr(&expr.expr, right_prec < Precedence::Prefix, right_fixup); - } - - fn expr_unsafe(&mut self, expr: &ExprUnsafe) { - self.outer_attrs(&expr.attrs); - self.word("unsafe "); - self.cbox(INDENT); - self.small_block(&expr.block, &expr.attrs); - self.end(); - } - - #[cfg(not(feature = "verbatim"))] - fn expr_verbatim(&mut self, expr: &TokenStream, _fixup: FixupContext) { - if !expr.is_empty() { - unimplemented!("Expr::Verbatim `{}`", expr); - } - } - - #[cfg(feature = "verbatim")] - fn expr_verbatim(&mut self, tokens: &TokenStream, fixup: FixupContext) { - use syn::parse::discouraged::Speculative; - use syn::parse::{Parse, ParseStream, Result}; - use syn::{parenthesized, Ident}; - - enum ExprVerbatim { - Empty, - Ellipsis, - Become(Become), - Builtin(Builtin), - } - - struct Become { - attrs: Vec, - tail_call: Expr, - } - - struct Builtin { - attrs: Vec, - name: Ident, - args: TokenStream, - } - - mod kw { - syn::custom_keyword!(builtin); - syn::custom_keyword!(raw); - } - - impl Parse for ExprVerbatim { - fn parse(input: ParseStream) -> Result { - let ahead = input.fork(); - let attrs = ahead.call(Attribute::parse_outer)?; - let lookahead = ahead.lookahead1(); - if input.is_empty() { - Ok(ExprVerbatim::Empty) - } else if lookahead.peek(Token![become]) { - input.advance_to(&ahead); - input.parse::()?; - let tail_call: Expr = input.parse()?; - Ok(ExprVerbatim::Become(Become { attrs, tail_call })) - } else if lookahead.peek(kw::builtin) { - input.advance_to(&ahead); - input.parse::()?; - input.parse::()?; - let name: Ident = input.parse()?; - let args; - parenthesized!(args in input); - let args: TokenStream = args.parse()?; - Ok(ExprVerbatim::Builtin(Builtin { attrs, name, args })) - } else if lookahead.peek(Token![...]) { - input.parse::()?; - Ok(ExprVerbatim::Ellipsis) - } else { - Err(lookahead.error()) - } - } - } - - let expr: ExprVerbatim = match syn::parse2(tokens.clone()) { - Ok(expr) => expr, - Err(_) => unimplemented!("Expr::Verbatim `{}`", tokens), - }; - - match expr { - ExprVerbatim::Empty => {} - ExprVerbatim::Ellipsis => { - self.word("..."); - } - ExprVerbatim::Become(expr) => { - self.outer_attrs(&expr.attrs); - self.word("become"); - self.nbsp(); - self.expr( - &expr.tail_call, - fixup.rightmost_subexpression_fixup(true, false, Precedence::Jump), - ); - } - ExprVerbatim::Builtin(expr) => { - self.outer_attrs(&expr.attrs); - self.word("builtin # "); - self.ident(&expr.name); - self.word("("); - if !expr.args.is_empty() { - self.cbox(INDENT); - self.zerobreak(); - self.ibox(0); - self.macro_rules_tokens(expr.args, false); - self.end(); - self.zerobreak(); - self.offset(-INDENT); - self.end(); - } - self.word(")"); - } - } - } - - fn expr_while(&mut self, expr: &ExprWhile) { - self.outer_attrs(&expr.attrs); - if let Some(label) = &expr.label { - self.label(label); - } - self.word("while "); - self.expr_condition(&expr.cond); - self.word("{"); - self.neverbreak(); - self.cbox(INDENT); - self.hardbreak_if_nonempty(); - self.inner_attrs(&expr.attrs); - for stmt in expr.body.stmts.iter().delimited() { - self.stmt(&stmt, stmt.is_last); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - } - - fn expr_yield(&mut self, expr: &ExprYield, fixup: FixupContext) { - self.outer_attrs(&expr.attrs); - self.word("yield"); - if let Some(value) = &expr.expr { - self.nbsp(); - self.expr( - value, - fixup.rightmost_subexpression_fixup(true, false, Precedence::Jump), - ); - } - } - - fn label(&mut self, label: &Label) { - self.lifetime(&label.name); - self.word(": "); - } - - fn field_value(&mut self, field_value: &FieldValue) { - self.outer_attrs(&field_value.attrs); - self.member(&field_value.member); - if field_value.colon_token.is_some() { - self.word(": "); - self.ibox(0); - self.expr(&field_value.expr, FixupContext::NONE); - self.end(); - } - } - - fn arm(&mut self, arm: &Arm) { - self.outer_attrs(&arm.attrs); - self.ibox(0); - self.pat(&arm.pat); - if let Some((_if_token, guard)) = &arm.guard { - self.word(" if "); - self.expr(guard, FixupContext::NONE); - } - self.word(" => "); - let empty_block; - let mut body = &*arm.body; - while let Expr::Block(expr) = body { - if expr.attrs.is_empty() && expr.label.is_none() { - let mut stmts = expr.block.stmts.iter(); - if let (Some(Stmt::Expr(inner, None)), None) = (stmts.next(), stmts.next()) { - body = inner; - continue; - } - } - break; - } - if let Expr::Tuple(expr) = body { - if expr.elems.is_empty() && expr.attrs.is_empty() { - empty_block = Expr::Block(ExprBlock { - attrs: Vec::new(), - label: None, - block: Block { - brace_token: token::Brace::default(), - stmts: Vec::new(), - }, - }); - body = &empty_block; - } - } - if let Expr::Block(body) = body { - if let Some(label) = &body.label { - self.label(label); - } - self.word("{"); - self.neverbreak(); - self.cbox(INDENT); - self.hardbreak_if_nonempty(); - self.inner_attrs(&body.attrs); - for stmt in body.block.stmts.iter().delimited() { - self.stmt(&stmt, stmt.is_last); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - } else { - self.neverbreak(); - self.cbox(INDENT); - let okay_to_brace = parseable_as_stmt(body); - self.scan_break(BreakToken { - pre_break: Some(if okay_to_brace { '{' } else { '(' }), - ..BreakToken::default() - }); - self.expr_beginning_of_line(body, false, true, FixupContext::new_match_arm()); - self.scan_break(BreakToken { - offset: -INDENT, - pre_break: (okay_to_brace && stmt::add_semi(body)).then_some(';'), - post_break: if okay_to_brace { "}" } else { ")," }, - no_break: classify::requires_comma_to_be_match_arm(body).then_some(','), - ..BreakToken::default() - }); - self.end(); - } - self.end(); - } - - fn call_args(&mut self, args: &Punctuated) { - let mut iter = args.iter(); - match (iter.next(), iter.next()) { - (Some(expr), None) if is_blocklike(expr) => { - self.expr(expr, FixupContext::NONE); - } - _ => { - self.cbox(INDENT); - self.zerobreak(); - for arg in args.iter().delimited() { - self.expr(&arg, FixupContext::NONE); - self.trailing_comma(arg.is_last); - } - self.offset(-INDENT); - self.end(); - } - } - } - - pub fn small_block(&mut self, block: &Block, attrs: &[Attribute]) { - self.word("{"); - if attr::has_inner(attrs) || !block.stmts.is_empty() { - self.space(); - self.inner_attrs(attrs); - match block.stmts.as_slice() { - [Stmt::Expr(expr, None)] if stmt::break_after(expr) => { - self.ibox(0); - self.expr_beginning_of_line(expr, false, true, FixupContext::new_stmt()); - self.end(); - self.space(); - } - _ => { - for stmt in block.stmts.iter().delimited() { - self.stmt(&stmt, stmt.is_last); - } - } - } - self.offset(-INDENT); - } - self.word("}"); - } - - pub fn expr_as_small_block(&mut self, expr: &Expr, indent: isize) { - self.word("{"); - self.space(); - self.ibox(indent); - self.expr_beginning_of_line(expr, false, true, FixupContext::new_stmt()); - self.end(); - self.space(); - self.offset(-INDENT); - self.word("}"); - } - - pub fn member(&mut self, member: &Member) { - match member { - Member::Named(ident) => self.ident(ident), - Member::Unnamed(index) => self.index(index), - } - } - - fn index(&mut self, member: &Index) { - self.word(member.index.to_string()); - } - - fn binary_operator(&mut self, op: &BinOp) { - self.word( - match op { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - BinOp::Add(_) => "+", - BinOp::Sub(_) => "-", - BinOp::Mul(_) => "*", - BinOp::Div(_) => "/", - BinOp::Rem(_) => "%", - BinOp::And(_) => "&&", - BinOp::Or(_) => "||", - BinOp::BitXor(_) => "^", - BinOp::BitAnd(_) => "&", - BinOp::BitOr(_) => "|", - BinOp::Shl(_) => "<<", - BinOp::Shr(_) => ">>", - BinOp::Eq(_) => "==", - BinOp::Lt(_) => "<", - BinOp::Le(_) => "<=", - BinOp::Ne(_) => "!=", - BinOp::Ge(_) => ">=", - BinOp::Gt(_) => ">", - BinOp::AddAssign(_) => "+=", - BinOp::SubAssign(_) => "-=", - BinOp::MulAssign(_) => "*=", - BinOp::DivAssign(_) => "/=", - BinOp::RemAssign(_) => "%=", - BinOp::BitXorAssign(_) => "^=", - BinOp::BitAndAssign(_) => "&=", - BinOp::BitOrAssign(_) => "|=", - BinOp::ShlAssign(_) => "<<=", - BinOp::ShrAssign(_) => ">>=", - _ => unimplemented!("unknown BinOp"), - }, - ); - } - - fn unary_operator(&mut self, op: &UnOp) { - self.word( - match op { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - UnOp::Deref(_) => "*", - UnOp::Not(_) => "!", - UnOp::Neg(_) => "-", - _ => unimplemented!("unknown UnOp"), - }, - ); - } - - fn pointer_mutability(&mut self, mutability: &PointerMutability) { - match mutability { - PointerMutability::Const(_) => self.word("const"), - PointerMutability::Mut(_) => self.word("mut"), - } - } -} - -fn needs_newline_if_wrap(expr: &Expr) -> bool { - match expr { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Expr::Array(_) - | Expr::Async(_) - | Expr::Block(_) - | Expr::Break(ExprBreak { expr: None, .. }) - | Expr::Closure(_) - | Expr::Const(_) - | Expr::Continue(_) - | Expr::ForLoop(_) - | Expr::If(_) - | Expr::Infer(_) - | Expr::Lit(_) - | Expr::Loop(_) - | Expr::Macro(_) - | Expr::Match(_) - | Expr::Path(_) - | Expr::Range(ExprRange { end: None, .. }) - | Expr::Repeat(_) - | Expr::Return(ExprReturn { expr: None, .. }) - | Expr::Struct(_) - | Expr::TryBlock(_) - | Expr::Tuple(_) - | Expr::Unsafe(_) - | Expr::Verbatim(_) - | Expr::While(_) - | Expr::Yield(ExprYield { expr: None, .. }) => false, - - Expr::Assign(_) - | Expr::Await(_) - | Expr::Binary(_) - | Expr::Cast(_) - | Expr::Field(_) - | Expr::Index(_) - | Expr::MethodCall(_) => true, - - Expr::Break(ExprBreak { expr: Some(e), .. }) - | Expr::Call(ExprCall { func: e, .. }) - | Expr::Group(ExprGroup { expr: e, .. }) - | Expr::Let(ExprLet { expr: e, .. }) - | Expr::Paren(ExprParen { expr: e, .. }) - | Expr::Range(ExprRange { end: Some(e), .. }) - | Expr::RawAddr(ExprRawAddr { expr: e, .. }) - | Expr::Reference(ExprReference { expr: e, .. }) - | Expr::Return(ExprReturn { expr: Some(e), .. }) - | Expr::Try(ExprTry { expr: e, .. }) - | Expr::Unary(ExprUnary { expr: e, .. }) - | Expr::Yield(ExprYield { expr: Some(e), .. }) => needs_newline_if_wrap(e), - - _ => false, - } -} - -fn is_short_ident(expr: &Expr) -> bool { - if let Expr::Path(expr) = expr { - return expr.attrs.is_empty() - && expr.qself.is_none() - && expr - .path - .get_ident() - .map_or(false, |ident| ident.to_string().len() as isize <= INDENT); - } - false -} - -fn is_blocklike(expr: &Expr) -> bool { - match expr { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Expr::Array(ExprArray { attrs, .. }) - | Expr::Async(ExprAsync { attrs, .. }) - | Expr::Block(ExprBlock { attrs, .. }) - | Expr::Closure(ExprClosure { attrs, .. }) - | Expr::Const(ExprConst { attrs, .. }) - | Expr::Struct(ExprStruct { attrs, .. }) - | Expr::TryBlock(ExprTryBlock { attrs, .. }) - | Expr::Tuple(ExprTuple { attrs, .. }) - | Expr::Unsafe(ExprUnsafe { attrs, .. }) => !attr::has_outer(attrs), - - Expr::Assign(_) - | Expr::Await(_) - | Expr::Binary(_) - | Expr::Break(_) - | Expr::Call(_) - | Expr::Cast(_) - | Expr::Continue(_) - | Expr::Field(_) - | Expr::ForLoop(_) - | Expr::If(_) - | Expr::Index(_) - | Expr::Infer(_) - | Expr::Let(_) - | Expr::Lit(_) - | Expr::Loop(_) - | Expr::Macro(_) - | Expr::Match(_) - | Expr::MethodCall(_) - | Expr::Paren(_) - | Expr::Path(_) - | Expr::Range(_) - | Expr::RawAddr(_) - | Expr::Reference(_) - | Expr::Repeat(_) - | Expr::Return(_) - | Expr::Try(_) - | Expr::Unary(_) - | Expr::Verbatim(_) - | Expr::While(_) - | Expr::Yield(_) => false, - - Expr::Group(e) => is_blocklike(&e.expr), - - _ => false, - } -} - -pub fn simple_block(expr: &Expr) -> Option<&ExprBlock> { - if let Expr::Block(expr) = expr { - if expr.attrs.is_empty() && expr.label.is_none() { - return Some(expr); - } - } - None -} - -pub fn simple_array(elements: &Punctuated) -> bool { - for expr in elements { - if let Expr::Lit(expr) = expr { - match expr.lit { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Lit::Byte(_) | Lit::Char(_) | Lit::Int(_) | Lit::Bool(_) => {} - - Lit::Str(_) | Lit::ByteStr(_) | Lit::CStr(_) | Lit::Float(_) | Lit::Verbatim(_) => { - return false; - } - - _ => return false, - } - } else { - return false; - } - } - true -} - -// Expressions for which `$expr` and `{ $expr }` mean the same thing. -// -// This is not the case for all expressions. For example `{} | x | x` has some -// bitwise OR operators while `{ {} |x| x }` has a block followed by a closure. -fn parseable_as_stmt(mut expr: &Expr) -> bool { - loop { - match expr { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Expr::Array(_) - | Expr::Async(_) - | Expr::Block(_) - | Expr::Break(_) - | Expr::Closure(_) - | Expr::Const(_) - | Expr::Continue(_) - | Expr::ForLoop(_) - | Expr::If(_) - | Expr::Infer(_) - | Expr::Lit(_) - | Expr::Loop(_) - | Expr::Macro(_) - | Expr::Match(_) - | Expr::Paren(_) - | Expr::Path(_) - | Expr::RawAddr(_) - | Expr::Reference(_) - | Expr::Repeat(_) - | Expr::Return(_) - | Expr::Struct(_) - | Expr::TryBlock(_) - | Expr::Tuple(_) - | Expr::Unary(_) - | Expr::Unsafe(_) - | Expr::Verbatim(_) - | Expr::While(_) - | Expr::Yield(_) => return true, - - Expr::Let(_) => return false, - - Expr::Assign(e) => { - if !classify::requires_semi_to_be_stmt(&e.left) { - return false; - } - expr = &e.left; - } - Expr::Await(e) => expr = &e.base, - Expr::Binary(e) => { - if !classify::requires_semi_to_be_stmt(&e.left) { - return false; - } - expr = &e.left; - } - Expr::Call(e) => { - if !classify::requires_semi_to_be_stmt(&e.func) { - return false; - } - expr = &e.func; - } - Expr::Cast(e) => { - if !classify::requires_semi_to_be_stmt(&e.expr) { - return false; - } - expr = &e.expr; - } - Expr::Field(e) => expr = &e.base, - Expr::Group(e) => expr = &e.expr, - Expr::Index(e) => { - if !classify::requires_semi_to_be_stmt(&e.expr) { - return false; - } - expr = &e.expr; - } - Expr::MethodCall(e) => expr = &e.receiver, - Expr::Range(e) => match &e.start { - None => return true, - Some(start) => { - if !classify::requires_semi_to_be_stmt(start) { - return false; - } - expr = start; - } - }, - Expr::Try(e) => expr = &e.expr, - - _ => return false, - } - } -} diff --git a/vendor/prettyplease/src/file.rs b/vendor/prettyplease/src/file.rs deleted file mode 100644 index e23bd120feba58..00000000000000 --- a/vendor/prettyplease/src/file.rs +++ /dev/null @@ -1,17 +0,0 @@ -use crate::algorithm::Printer; -use syn::File; - -impl Printer { - pub fn file(&mut self, file: &File) { - self.cbox(0); - if let Some(shebang) = &file.shebang { - self.word(shebang.clone()); - self.hardbreak(); - } - self.inner_attrs(&file.attrs); - for item in &file.items { - self.item(item); - } - self.end(); - } -} diff --git a/vendor/prettyplease/src/fixup.rs b/vendor/prettyplease/src/fixup.rs deleted file mode 100644 index 2355d4905f9567..00000000000000 --- a/vendor/prettyplease/src/fixup.rs +++ /dev/null @@ -1,676 +0,0 @@ -use crate::classify; -use crate::precedence::Precedence; -use syn::{ - Expr, ExprBreak, ExprRange, ExprRawAddr, ExprReference, ExprReturn, ExprUnary, ExprYield, -}; - -#[derive(Copy, Clone)] -pub struct FixupContext { - previous_operator: Precedence, - next_operator: Precedence, - - // Print expression such that it can be parsed back as a statement - // consisting of the original expression. - // - // The effect of this is for binary operators in statement position to set - // `leftmost_subexpression_in_stmt` when printing their left-hand operand. - // - // (match x {}) - 1; // match needs parens when LHS of binary operator - // - // match x {}; // not when its own statement - // - stmt: bool, - - // This is the difference between: - // - // (match x {}) - 1; // subexpression needs parens - // - // let _ = match x {} - 1; // no parens - // - // There are 3 distinguishable contexts in which `print_expr` might be - // called with the expression `$match` as its argument, where `$match` - // represents an expression of kind `ExprKind::Match`: - // - // - stmt=false leftmost_subexpression_in_stmt=false - // - // Example: `let _ = $match - 1;` - // - // No parentheses required. - // - // - stmt=false leftmost_subexpression_in_stmt=true - // - // Example: `$match - 1;` - // - // Must parenthesize `($match)`, otherwise parsing back the output as a - // statement would terminate the statement after the closing brace of - // the match, parsing `-1;` as a separate statement. - // - // - stmt=true leftmost_subexpression_in_stmt=false - // - // Example: `$match;` - // - // No parentheses required. - leftmost_subexpression_in_stmt: bool, - - // Print expression such that it can be parsed as a match arm. - // - // This is almost equivalent to `stmt`, but the grammar diverges a tiny bit - // between statements and match arms when it comes to braced macro calls. - // Macro calls with brace delimiter terminate a statement without a - // semicolon, but do not terminate a match-arm without comma. - // - // m! {} - 1; // two statements: a macro call followed by -1 literal - // - // match () { - // _ => m! {} - 1, // binary subtraction operator - // } - // - match_arm: bool, - - // This is almost equivalent to `leftmost_subexpression_in_stmt`, other than - // for braced macro calls. - // - // If we have `m! {} - 1` as an expression, the leftmost subexpression - // `m! {}` will need to be parenthesized in the statement case but not the - // match-arm case. - // - // (m! {}) - 1; // subexpression needs parens - // - // match () { - // _ => m! {} - 1, // no parens - // } - // - leftmost_subexpression_in_match_arm: bool, - - // This is the difference between: - // - // if let _ = (Struct {}) {} // needs parens - // - // match () { - // () if let _ = Struct {} => {} // no parens - // } - // - condition: bool, - - // This is the difference between: - // - // if break Struct {} == (break) {} // needs parens - // - // if break break == Struct {} {} // no parens - // - rightmost_subexpression_in_condition: bool, - - // This is the difference between: - // - // if break ({ x }).field + 1 {} needs parens - // - // if break 1 + { x }.field {} // no parens - // - leftmost_subexpression_in_optional_operand: bool, - - // This is the difference between: - // - // let _ = (return) - 1; // without paren, this would return -1 - // - // let _ = return + 1; // no paren because '+' cannot begin expr - // - next_operator_can_begin_expr: bool, - - // This is the difference between: - // - // let _ = 1 + return 1; // no parens if rightmost subexpression - // - // let _ = 1 + (return 1) + 1; // needs parens - // - next_operator_can_continue_expr: bool, - - // This is the difference between: - // - // let _ = x as u8 + T; - // - // let _ = (x as u8) < T; - // - // Without parens, the latter would want to parse `u8 Self { - FixupContext { - stmt: true, - ..FixupContext::NONE - } - } - - /// Create the initial fixup for printing an expression as the right-hand - /// side of a match arm. - pub fn new_match_arm() -> Self { - FixupContext { - match_arm: true, - ..FixupContext::NONE - } - } - - /// Create the initial fixup for printing an expression as the "condition" - /// of an `if` or `while`. There are a few other positions which are - /// grammatically equivalent and also use this, such as the iterator - /// expression in `for` and the scrutinee in `match`. - pub fn new_condition() -> Self { - FixupContext { - condition: true, - rightmost_subexpression_in_condition: true, - ..FixupContext::NONE - } - } - - /// Transform this fixup into the one that should apply when printing the - /// leftmost subexpression of the current expression. - /// - /// The leftmost subexpression is any subexpression that has the same first - /// token as the current expression, but has a different last token. - /// - /// For example in `$a + $b` and `$a.method()`, the subexpression `$a` is a - /// leftmost subexpression. - /// - /// Not every expression has a leftmost subexpression. For example neither - /// `-$a` nor `[$a]` have one. - pub fn leftmost_subexpression_with_operator( - self, - expr: &Expr, - next_operator_can_begin_expr: bool, - next_operator_can_begin_generics: bool, - precedence: Precedence, - ) -> (Precedence, Self) { - let fixup = FixupContext { - next_operator: precedence, - stmt: false, - leftmost_subexpression_in_stmt: self.stmt || self.leftmost_subexpression_in_stmt, - match_arm: false, - leftmost_subexpression_in_match_arm: self.match_arm - || self.leftmost_subexpression_in_match_arm, - rightmost_subexpression_in_condition: false, - next_operator_can_begin_expr, - next_operator_can_continue_expr: true, - next_operator_can_begin_generics, - ..self - }; - - (fixup.leftmost_subexpression_precedence(expr), fixup) - } - - /// Transform this fixup into the one that should apply when printing a - /// leftmost subexpression followed by a `.` or `?` token, which confer - /// different statement boundary rules compared to other leftmost - /// subexpressions. - pub fn leftmost_subexpression_with_dot(self, expr: &Expr) -> (Precedence, Self) { - let fixup = FixupContext { - next_operator: Precedence::Unambiguous, - stmt: self.stmt || self.leftmost_subexpression_in_stmt, - leftmost_subexpression_in_stmt: false, - match_arm: self.match_arm || self.leftmost_subexpression_in_match_arm, - leftmost_subexpression_in_match_arm: false, - rightmost_subexpression_in_condition: false, - next_operator_can_begin_expr: false, - next_operator_can_continue_expr: true, - next_operator_can_begin_generics: false, - ..self - }; - - (fixup.leftmost_subexpression_precedence(expr), fixup) - } - - fn leftmost_subexpression_precedence(self, expr: &Expr) -> Precedence { - if !self.next_operator_can_begin_expr || self.next_operator == Precedence::Range { - if let Scan::Bailout = scan_right(expr, self, Precedence::MIN, 0, 0) { - if scan_left(expr, self) { - return Precedence::Unambiguous; - } - } - } - - self.precedence(expr) - } - - /// Transform this fixup into the one that should apply when printing the - /// rightmost subexpression of the current expression. - /// - /// The rightmost subexpression is any subexpression that has a different - /// first token than the current expression, but has the same last token. - /// - /// For example in `$a + $b` and `-$b`, the subexpression `$b` is a - /// rightmost subexpression. - /// - /// Not every expression has a rightmost subexpression. For example neither - /// `[$b]` nor `$a.f($b)` have one. - pub fn rightmost_subexpression( - self, - expr: &Expr, - precedence: Precedence, - ) -> (Precedence, Self) { - let fixup = self.rightmost_subexpression_fixup(false, false, precedence); - (fixup.rightmost_subexpression_precedence(expr), fixup) - } - - pub fn rightmost_subexpression_fixup( - self, - reset_allow_struct: bool, - optional_operand: bool, - precedence: Precedence, - ) -> Self { - FixupContext { - previous_operator: precedence, - stmt: false, - leftmost_subexpression_in_stmt: false, - match_arm: false, - leftmost_subexpression_in_match_arm: false, - condition: self.condition && !reset_allow_struct, - leftmost_subexpression_in_optional_operand: self.condition && optional_operand, - ..self - } - } - - pub fn rightmost_subexpression_precedence(self, expr: &Expr) -> Precedence { - let default_prec = self.precedence(expr); - - if match self.previous_operator { - Precedence::Assign | Precedence::Let | Precedence::Prefix => { - default_prec < self.previous_operator - } - _ => default_prec <= self.previous_operator, - } && match self.next_operator { - Precedence::Range | Precedence::Or | Precedence::And => true, - _ => !self.next_operator_can_begin_expr, - } { - if let Scan::Bailout | Scan::Fail = scan_right(expr, self, self.previous_operator, 1, 0) - { - if scan_left(expr, self) { - return Precedence::Prefix; - } - } - } - - default_prec - } - - /// Determine whether parentheses are needed around the given expression to - /// head off the early termination of a statement or condition. - pub fn parenthesize(self, expr: &Expr) -> bool { - (self.leftmost_subexpression_in_stmt && !classify::requires_semi_to_be_stmt(expr)) - || ((self.stmt || self.leftmost_subexpression_in_stmt) && matches!(expr, Expr::Let(_))) - || (self.leftmost_subexpression_in_match_arm - && !classify::requires_comma_to_be_match_arm(expr)) - || (self.condition && matches!(expr, Expr::Struct(_))) - || (self.rightmost_subexpression_in_condition - && matches!( - expr, - Expr::Return(ExprReturn { expr: None, .. }) - | Expr::Yield(ExprYield { expr: None, .. }) - )) - || (self.rightmost_subexpression_in_condition - && !self.condition - && matches!( - expr, - Expr::Break(ExprBreak { expr: None, .. }) - | Expr::Path(_) - | Expr::Range(ExprRange { end: None, .. }) - )) - || (self.leftmost_subexpression_in_optional_operand - && matches!(expr, Expr::Block(expr) if expr.attrs.is_empty() && expr.label.is_none())) - } - - /// Determines the effective precedence of a subexpression. Some expressions - /// have higher or lower precedence when adjacent to particular operators. - fn precedence(self, expr: &Expr) -> Precedence { - if self.next_operator_can_begin_expr { - // Decrease precedence of value-less jumps when followed by an - // operator that would otherwise get interpreted as beginning a - // value for the jump. - if let Expr::Break(ExprBreak { expr: None, .. }) - | Expr::Return(ExprReturn { expr: None, .. }) - | Expr::Yield(ExprYield { expr: None, .. }) = expr - { - return Precedence::Jump; - } - } - - if !self.next_operator_can_continue_expr { - match expr { - // Increase precedence of expressions that extend to the end of - // current statement or group. - Expr::Break(_) - | Expr::Closure(_) - | Expr::Let(_) - | Expr::Return(_) - | Expr::Yield(_) => { - return Precedence::Prefix; - } - Expr::Range(e) if e.start.is_none() => return Precedence::Prefix, - _ => {} - } - } - - if self.next_operator_can_begin_generics { - if let Expr::Cast(cast) = expr { - if classify::trailing_unparameterized_path(&cast.ty) { - return Precedence::MIN; - } - } - } - - Precedence::of(expr) - } -} - -#[derive(Copy, Clone, PartialEq)] -enum Scan { - Fail, - Bailout, - Consume, -} - -fn scan_left(expr: &Expr, fixup: FixupContext) -> bool { - match expr { - Expr::Assign(_) => fixup.previous_operator <= Precedence::Assign, - Expr::Binary(e) => match Precedence::of_binop(&e.op) { - Precedence::Assign => fixup.previous_operator <= Precedence::Assign, - binop_prec => fixup.previous_operator < binop_prec, - }, - Expr::Cast(_) => fixup.previous_operator < Precedence::Cast, - Expr::Range(e) => e.start.is_none() || fixup.previous_operator < Precedence::Assign, - _ => true, - } -} - -fn scan_right( - expr: &Expr, - fixup: FixupContext, - precedence: Precedence, - fail_offset: u8, - bailout_offset: u8, -) -> Scan { - let consume_by_precedence = if match precedence { - Precedence::Assign | Precedence::Compare => precedence <= fixup.next_operator, - _ => precedence < fixup.next_operator, - } || fixup.next_operator == Precedence::MIN - { - Scan::Consume - } else { - Scan::Bailout - }; - if fixup.parenthesize(expr) { - return consume_by_precedence; - } - match expr { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Expr::Assign(e) if e.attrs.is_empty() => { - if match fixup.next_operator { - Precedence::Unambiguous => fail_offset >= 2, - _ => bailout_offset >= 1, - } { - return Scan::Consume; - } - let right_fixup = fixup.rightmost_subexpression_fixup(false, false, Precedence::Assign); - let scan = scan_right( - &e.right, - right_fixup, - Precedence::Assign, - match fixup.next_operator { - Precedence::Unambiguous => fail_offset, - _ => 1, - }, - 1, - ); - if let Scan::Bailout | Scan::Consume = scan { - Scan::Consume - } else if let Precedence::Unambiguous = fixup.next_operator { - Scan::Fail - } else { - Scan::Bailout - } - } - Expr::Binary(e) if e.attrs.is_empty() => { - if match fixup.next_operator { - Precedence::Unambiguous => { - fail_offset >= 2 - && (consume_by_precedence == Scan::Consume || bailout_offset >= 1) - } - _ => bailout_offset >= 1, - } { - return Scan::Consume; - } - let binop_prec = Precedence::of_binop(&e.op); - if binop_prec == Precedence::Compare && fixup.next_operator == Precedence::Compare { - return Scan::Consume; - } - let right_fixup = fixup.rightmost_subexpression_fixup(false, false, binop_prec); - let scan = scan_right( - &e.right, - right_fixup, - binop_prec, - match fixup.next_operator { - Precedence::Unambiguous => fail_offset, - _ => 1, - }, - consume_by_precedence as u8 - Scan::Bailout as u8, - ); - match scan { - Scan::Fail => {} - Scan::Bailout => return consume_by_precedence, - Scan::Consume => return Scan::Consume, - } - let right_needs_group = binop_prec != Precedence::Assign - && right_fixup.rightmost_subexpression_precedence(&e.right) <= binop_prec; - if right_needs_group { - consume_by_precedence - } else if let (Scan::Fail, Precedence::Unambiguous) = (scan, fixup.next_operator) { - Scan::Fail - } else { - Scan::Bailout - } - } - Expr::RawAddr(ExprRawAddr { expr, .. }) - | Expr::Reference(ExprReference { expr, .. }) - | Expr::Unary(ExprUnary { expr, .. }) => { - if match fixup.next_operator { - Precedence::Unambiguous => { - fail_offset >= 2 - && (consume_by_precedence == Scan::Consume || bailout_offset >= 1) - } - _ => bailout_offset >= 1, - } { - return Scan::Consume; - } - let right_fixup = fixup.rightmost_subexpression_fixup(false, false, Precedence::Prefix); - let scan = scan_right( - expr, - right_fixup, - precedence, - match fixup.next_operator { - Precedence::Unambiguous => fail_offset, - _ => 1, - }, - consume_by_precedence as u8 - Scan::Bailout as u8, - ); - match scan { - Scan::Fail => {} - Scan::Bailout => return consume_by_precedence, - Scan::Consume => return Scan::Consume, - } - if right_fixup.rightmost_subexpression_precedence(expr) < Precedence::Prefix { - consume_by_precedence - } else if let (Scan::Fail, Precedence::Unambiguous) = (scan, fixup.next_operator) { - Scan::Fail - } else { - Scan::Bailout - } - } - Expr::Range(e) if e.attrs.is_empty() => match &e.end { - Some(end) => { - if fail_offset >= 2 { - return Scan::Consume; - } - let right_fixup = - fixup.rightmost_subexpression_fixup(false, true, Precedence::Range); - let scan = scan_right( - end, - right_fixup, - Precedence::Range, - fail_offset, - match fixup.next_operator { - Precedence::Assign | Precedence::Range => 0, - _ => 1, - }, - ); - if match (scan, fixup.next_operator) { - (Scan::Fail, _) => false, - (Scan::Bailout, Precedence::Assign | Precedence::Range) => false, - (Scan::Bailout | Scan::Consume, _) => true, - } { - return Scan::Consume; - } - if right_fixup.rightmost_subexpression_precedence(end) <= Precedence::Range { - Scan::Consume - } else { - Scan::Fail - } - } - None => { - if fixup.next_operator_can_begin_expr { - Scan::Consume - } else { - Scan::Fail - } - } - }, - Expr::Break(e) => match &e.expr { - Some(value) => { - if bailout_offset >= 1 || e.label.is_none() && classify::expr_leading_label(value) { - return Scan::Consume; - } - let right_fixup = fixup.rightmost_subexpression_fixup(true, true, Precedence::Jump); - match scan_right(value, right_fixup, Precedence::Jump, 1, 1) { - Scan::Fail => Scan::Bailout, - Scan::Bailout | Scan::Consume => Scan::Consume, - } - } - None => match fixup.next_operator { - Precedence::Assign if precedence > Precedence::Assign => Scan::Fail, - _ => Scan::Consume, - }, - }, - Expr::Return(ExprReturn { expr, .. }) | Expr::Yield(ExprYield { expr, .. }) => match expr { - Some(e) => { - if bailout_offset >= 1 { - return Scan::Consume; - } - let right_fixup = - fixup.rightmost_subexpression_fixup(true, false, Precedence::Jump); - match scan_right(e, right_fixup, Precedence::Jump, 1, 1) { - Scan::Fail => Scan::Bailout, - Scan::Bailout | Scan::Consume => Scan::Consume, - } - } - None => match fixup.next_operator { - Precedence::Assign if precedence > Precedence::Assign => Scan::Fail, - _ => Scan::Consume, - }, - }, - Expr::Closure(_) => Scan::Consume, - Expr::Let(e) => { - if bailout_offset >= 1 { - return Scan::Consume; - } - let right_fixup = fixup.rightmost_subexpression_fixup(false, false, Precedence::Let); - let scan = scan_right( - &e.expr, - right_fixup, - Precedence::Let, - 1, - if fixup.next_operator < Precedence::Let { - 0 - } else { - 1 - }, - ); - match scan { - Scan::Fail | Scan::Bailout if fixup.next_operator < Precedence::Let => { - return Scan::Bailout; - } - Scan::Consume => return Scan::Consume, - _ => {} - } - if right_fixup.rightmost_subexpression_precedence(&e.expr) < Precedence::Let { - Scan::Consume - } else if let Scan::Fail = scan { - Scan::Bailout - } else { - Scan::Consume - } - } - Expr::Group(e) => scan_right(&e.expr, fixup, precedence, fail_offset, bailout_offset), - Expr::Array(_) - | Expr::Assign(_) - | Expr::Async(_) - | Expr::Await(_) - | Expr::Binary(_) - | Expr::Block(_) - | Expr::Call(_) - | Expr::Cast(_) - | Expr::Const(_) - | Expr::Continue(_) - | Expr::Field(_) - | Expr::ForLoop(_) - | Expr::If(_) - | Expr::Index(_) - | Expr::Infer(_) - | Expr::Lit(_) - | Expr::Loop(_) - | Expr::Macro(_) - | Expr::Match(_) - | Expr::MethodCall(_) - | Expr::Paren(_) - | Expr::Path(_) - | Expr::Range(_) - | Expr::Repeat(_) - | Expr::Struct(_) - | Expr::Try(_) - | Expr::TryBlock(_) - | Expr::Tuple(_) - | Expr::Unsafe(_) - | Expr::Verbatim(_) - | Expr::While(_) => match fixup.next_operator { - Precedence::Assign | Precedence::Range if precedence == Precedence::Range => Scan::Fail, - _ if precedence == Precedence::Let && fixup.next_operator < Precedence::Let => { - Scan::Fail - } - _ => consume_by_precedence, - }, - - _ => match fixup.next_operator { - Precedence::Assign | Precedence::Range if precedence == Precedence::Range => Scan::Fail, - _ if precedence == Precedence::Let && fixup.next_operator < Precedence::Let => { - Scan::Fail - } - _ => consume_by_precedence, - }, - } -} diff --git a/vendor/prettyplease/src/generics.rs b/vendor/prettyplease/src/generics.rs deleted file mode 100644 index 6c9688b147064d..00000000000000 --- a/vendor/prettyplease/src/generics.rs +++ /dev/null @@ -1,426 +0,0 @@ -use crate::algorithm::Printer; -use crate::iter::IterDelimited; -use crate::path::PathKind; -use crate::INDENT; -use proc_macro2::TokenStream; -use std::ptr; -use syn::{ - BoundLifetimes, CapturedParam, ConstParam, Expr, GenericParam, Generics, LifetimeParam, - PreciseCapture, PredicateLifetime, PredicateType, TraitBound, TraitBoundModifier, TypeParam, - TypeParamBound, WhereClause, WherePredicate, -}; - -impl Printer { - pub fn generics(&mut self, generics: &Generics) { - if generics.params.is_empty() { - return; - } - - self.word("<"); - self.cbox(0); - self.zerobreak(); - - // Print lifetimes before types and consts, regardless of their - // order in self.params. - #[derive(Ord, PartialOrd, Eq, PartialEq)] - enum Group { - First, - Second, - } - fn group(param: &GenericParam) -> Group { - match param { - GenericParam::Lifetime(_) => Group::First, - GenericParam::Type(_) | GenericParam::Const(_) => Group::Second, - } - } - let last = generics.params.iter().max_by_key(|param| group(param)); - for current_group in [Group::First, Group::Second] { - for param in &generics.params { - if group(param) == current_group { - self.generic_param(param); - self.trailing_comma(ptr::eq(param, last.unwrap())); - } - } - } - - self.offset(-INDENT); - self.end(); - self.word(">"); - } - - fn generic_param(&mut self, generic_param: &GenericParam) { - match generic_param { - GenericParam::Type(type_param) => self.type_param(type_param), - GenericParam::Lifetime(lifetime_param) => self.lifetime_param(lifetime_param), - GenericParam::Const(const_param) => self.const_param(const_param), - } - } - - pub fn bound_lifetimes(&mut self, bound_lifetimes: &BoundLifetimes) { - self.word("for<"); - for param in bound_lifetimes.lifetimes.iter().delimited() { - self.generic_param(¶m); - if !param.is_last { - self.word(", "); - } - } - self.word("> "); - } - - fn lifetime_param(&mut self, lifetime_param: &LifetimeParam) { - self.outer_attrs(&lifetime_param.attrs); - self.lifetime(&lifetime_param.lifetime); - for lifetime in lifetime_param.bounds.iter().delimited() { - if lifetime.is_first { - self.word(": "); - } else { - self.word(" + "); - } - self.lifetime(&lifetime); - } - } - - fn type_param(&mut self, type_param: &TypeParam) { - self.outer_attrs(&type_param.attrs); - self.ident(&type_param.ident); - self.ibox(INDENT); - for type_param_bound in type_param.bounds.iter().delimited() { - if type_param_bound.is_first { - self.word(": "); - } else { - self.space(); - self.word("+ "); - } - self.type_param_bound(&type_param_bound); - } - if let Some(default) = &type_param.default { - self.space(); - self.word("= "); - self.ty(default); - } - self.end(); - } - - pub fn type_param_bound(&mut self, type_param_bound: &TypeParamBound) { - match type_param_bound { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - TypeParamBound::Trait(trait_bound) => { - self.trait_bound(trait_bound, TraitBoundConst::None); - } - TypeParamBound::Lifetime(lifetime) => self.lifetime(lifetime), - TypeParamBound::PreciseCapture(precise_capture) => { - self.precise_capture(precise_capture); - } - TypeParamBound::Verbatim(bound) => self.type_param_bound_verbatim(bound), - _ => unimplemented!("unknown TypeParamBound"), - } - } - - fn trait_bound(&mut self, trait_bound: &TraitBound, constness: TraitBoundConst) { - if trait_bound.paren_token.is_some() { - self.word("("); - } - if let Some(bound_lifetimes) = &trait_bound.lifetimes { - self.bound_lifetimes(bound_lifetimes); - } - match constness { - TraitBoundConst::None => {} - #[cfg(feature = "verbatim")] - TraitBoundConst::Conditional => self.word("[const] "), - #[cfg(feature = "verbatim")] - TraitBoundConst::Unconditional => self.word("const "), - } - self.trait_bound_modifier(&trait_bound.modifier); - for segment in trait_bound.path.segments.iter().delimited() { - if !segment.is_first || trait_bound.path.leading_colon.is_some() { - self.word("::"); - } - self.path_segment(&segment, PathKind::Type); - } - if trait_bound.paren_token.is_some() { - self.word(")"); - } - } - - fn trait_bound_modifier(&mut self, trait_bound_modifier: &TraitBoundModifier) { - match trait_bound_modifier { - TraitBoundModifier::None => {} - TraitBoundModifier::Maybe(_question_mark) => self.word("?"), - } - } - - #[cfg(not(feature = "verbatim"))] - fn type_param_bound_verbatim(&mut self, bound: &TokenStream) { - unimplemented!("TypeParamBound::Verbatim `{}`", bound); - } - - #[cfg(feature = "verbatim")] - fn type_param_bound_verbatim(&mut self, tokens: &TokenStream) { - use syn::parse::{Parse, ParseStream, Result}; - use syn::{ - bracketed, parenthesized, token, ParenthesizedGenericArguments, Path, PathArguments, - Token, - }; - - enum TypeParamBoundVerbatim { - Ellipsis, - Const(TraitBound, TraitBoundConst), - } - - impl Parse for TypeParamBoundVerbatim { - fn parse(input: ParseStream) -> Result { - if input.peek(Token![...]) { - input.parse::()?; - return Ok(TypeParamBoundVerbatim::Ellipsis); - } - - let content; - let content = if input.peek(token::Paren) { - parenthesized!(content in input); - &content - } else { - input - }; - - let lifetimes: Option = content.parse()?; - - let constness = if content.peek(token::Bracket) { - let conditionally_const; - bracketed!(conditionally_const in content); - conditionally_const.parse::()?; - TraitBoundConst::Conditional - } else if content.peek(Token![const]) { - content.parse::()?; - TraitBoundConst::Unconditional - } else { - TraitBoundConst::None - }; - - let modifier: TraitBoundModifier = content.parse()?; - - let mut path: Path = content.parse()?; - if path.segments.last().unwrap().arguments.is_empty() - && (content.peek(token::Paren) - || content.peek(Token![::]) && content.peek3(token::Paren)) - { - content.parse::>()?; - let args: ParenthesizedGenericArguments = content.parse()?; - let parenthesized = PathArguments::Parenthesized(args); - path.segments.last_mut().unwrap().arguments = parenthesized; - } - - Ok(TypeParamBoundVerbatim::Const( - TraitBound { - paren_token: None, - modifier, - lifetimes, - path, - }, - constness, - )) - } - } - - let bound: TypeParamBoundVerbatim = match syn::parse2(tokens.clone()) { - Ok(bound) => bound, - Err(_) => unimplemented!("TypeParamBound::Verbatim `{}`", tokens), - }; - - match bound { - TypeParamBoundVerbatim::Ellipsis => { - self.word("..."); - } - TypeParamBoundVerbatim::Const(trait_bound, constness) => { - self.trait_bound(&trait_bound, constness); - } - } - } - - fn const_param(&mut self, const_param: &ConstParam) { - self.outer_attrs(&const_param.attrs); - self.word("const "); - self.ident(&const_param.ident); - self.word(": "); - self.ty(&const_param.ty); - if let Some(default) = &const_param.default { - self.word(" = "); - self.const_argument(default); - } - } - - pub fn where_clause_for_body(&mut self, where_clause: &Option) { - let hardbreaks = true; - let semi = false; - self.where_clause_impl(where_clause, hardbreaks, semi); - } - - pub fn where_clause_semi(&mut self, where_clause: &Option) { - let hardbreaks = true; - let semi = true; - self.where_clause_impl(where_clause, hardbreaks, semi); - } - - pub fn where_clause_oneline(&mut self, where_clause: &Option) { - let hardbreaks = false; - let semi = false; - self.where_clause_impl(where_clause, hardbreaks, semi); - } - - pub fn where_clause_oneline_semi(&mut self, where_clause: &Option) { - let hardbreaks = false; - let semi = true; - self.where_clause_impl(where_clause, hardbreaks, semi); - } - - fn where_clause_impl( - &mut self, - where_clause: &Option, - hardbreaks: bool, - semi: bool, - ) { - let where_clause = match where_clause { - Some(where_clause) if !where_clause.predicates.is_empty() => where_clause, - _ => { - if semi { - self.word(";"); - } else { - self.nbsp(); - } - return; - } - }; - if hardbreaks { - self.hardbreak(); - self.offset(-INDENT); - self.word("where"); - self.hardbreak(); - for predicate in where_clause.predicates.iter().delimited() { - self.where_predicate(&predicate); - if predicate.is_last && semi { - self.word(";"); - } else { - self.word(","); - self.hardbreak(); - } - } - if !semi { - self.offset(-INDENT); - } - } else { - self.space(); - self.offset(-INDENT); - self.word("where"); - self.space(); - for predicate in where_clause.predicates.iter().delimited() { - self.where_predicate(&predicate); - if predicate.is_last && semi { - self.word(";"); - } else { - self.trailing_comma_or_space(predicate.is_last); - } - } - if !semi { - self.offset(-INDENT); - } - } - } - - fn where_predicate(&mut self, predicate: &WherePredicate) { - match predicate { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - WherePredicate::Type(predicate) => self.predicate_type(predicate), - WherePredicate::Lifetime(predicate) => self.predicate_lifetime(predicate), - _ => unimplemented!("unknown WherePredicate"), - } - } - - fn predicate_type(&mut self, predicate: &PredicateType) { - if let Some(bound_lifetimes) = &predicate.lifetimes { - self.bound_lifetimes(bound_lifetimes); - } - self.ty(&predicate.bounded_ty); - self.word(":"); - if predicate.bounds.len() == 1 { - self.ibox(0); - } else { - self.ibox(INDENT); - } - for type_param_bound in predicate.bounds.iter().delimited() { - if type_param_bound.is_first { - self.nbsp(); - } else { - self.space(); - self.word("+ "); - } - self.type_param_bound(&type_param_bound); - } - self.end(); - } - - fn predicate_lifetime(&mut self, predicate: &PredicateLifetime) { - self.lifetime(&predicate.lifetime); - self.word(":"); - self.ibox(INDENT); - for lifetime in predicate.bounds.iter().delimited() { - if lifetime.is_first { - self.nbsp(); - } else { - self.space(); - self.word("+ "); - } - self.lifetime(&lifetime); - } - self.end(); - } - - fn precise_capture(&mut self, precise_capture: &PreciseCapture) { - self.word("use<"); - for capture in precise_capture.params.iter().delimited() { - self.captured_param(&capture); - if !capture.is_last { - self.word(", "); - } - } - self.word(">"); - } - - fn captured_param(&mut self, capture: &CapturedParam) { - match capture { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - CapturedParam::Lifetime(lifetime) => self.lifetime(lifetime), - CapturedParam::Ident(ident) => self.ident(ident), - _ => unimplemented!("unknown CapturedParam"), - } - } - - pub fn const_argument(&mut self, expr: &Expr) { - match expr { - #![cfg_attr(all(test, exhaustive), allow(non_exhaustive_omitted_patterns))] - Expr::Lit(expr) => self.expr_lit(expr), - - Expr::Path(expr) - if expr.attrs.is_empty() - && expr.qself.is_none() - && expr.path.get_ident().is_some() => - { - self.expr_path(expr); - } - - Expr::Block(expr) => self.expr_block(expr), - - _ => { - self.cbox(INDENT); - self.expr_as_small_block(expr, 0); - self.end(); - } - } - } -} - -enum TraitBoundConst { - None, - #[cfg(feature = "verbatim")] - Conditional, - #[cfg(feature = "verbatim")] - Unconditional, -} diff --git a/vendor/prettyplease/src/item.rs b/vendor/prettyplease/src/item.rs deleted file mode 100644 index 40623479ab23dd..00000000000000 --- a/vendor/prettyplease/src/item.rs +++ /dev/null @@ -1,1813 +0,0 @@ -use crate::algorithm::Printer; -use crate::fixup::FixupContext; -use crate::iter::IterDelimited; -use crate::mac; -use crate::path::PathKind; -use crate::INDENT; -use proc_macro2::TokenStream; -use syn::{ - Fields, FnArg, ForeignItem, ForeignItemFn, ForeignItemMacro, ForeignItemStatic, - ForeignItemType, ImplItem, ImplItemConst, ImplItemFn, ImplItemMacro, ImplItemType, Item, - ItemConst, ItemEnum, ItemExternCrate, ItemFn, ItemForeignMod, ItemImpl, ItemMacro, ItemMod, - ItemStatic, ItemStruct, ItemTrait, ItemTraitAlias, ItemType, ItemUnion, ItemUse, Receiver, - Signature, StaticMutability, TraitItem, TraitItemConst, TraitItemFn, TraitItemMacro, - TraitItemType, Type, UseGlob, UseGroup, UseName, UsePath, UseRename, UseTree, Variadic, -}; - -impl Printer { - pub fn item(&mut self, item: &Item) { - match item { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Item::Const(item) => self.item_const(item), - Item::Enum(item) => self.item_enum(item), - Item::ExternCrate(item) => self.item_extern_crate(item), - Item::Fn(item) => self.item_fn(item), - Item::ForeignMod(item) => self.item_foreign_mod(item), - Item::Impl(item) => self.item_impl(item), - Item::Macro(item) => self.item_macro(item), - Item::Mod(item) => self.item_mod(item), - Item::Static(item) => self.item_static(item), - Item::Struct(item) => self.item_struct(item), - Item::Trait(item) => self.item_trait(item), - Item::TraitAlias(item) => self.item_trait_alias(item), - Item::Type(item) => self.item_type(item), - Item::Union(item) => self.item_union(item), - Item::Use(item) => self.item_use(item), - Item::Verbatim(item) => self.item_verbatim(item), - _ => unimplemented!("unknown Item"), - } - } - - fn item_const(&mut self, item: &ItemConst) { - self.outer_attrs(&item.attrs); - self.cbox(0); - self.visibility(&item.vis); - self.word("const "); - self.ident(&item.ident); - self.generics(&item.generics); - self.word(": "); - self.ty(&item.ty); - self.word(" = "); - self.neverbreak(); - self.expr(&item.expr, FixupContext::NONE); - self.word(";"); - self.end(); - self.hardbreak(); - } - - fn item_enum(&mut self, item: &ItemEnum) { - self.outer_attrs(&item.attrs); - self.cbox(INDENT); - self.visibility(&item.vis); - self.word("enum "); - self.ident(&item.ident); - self.generics(&item.generics); - self.where_clause_for_body(&item.generics.where_clause); - self.word("{"); - self.hardbreak_if_nonempty(); - for variant in &item.variants { - self.variant(variant); - self.word(","); - self.hardbreak(); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - self.hardbreak(); - } - - fn item_extern_crate(&mut self, item: &ItemExternCrate) { - self.outer_attrs(&item.attrs); - self.visibility(&item.vis); - self.word("extern crate "); - self.ident(&item.ident); - if let Some((_as_token, rename)) = &item.rename { - self.word(" as "); - self.ident(rename); - } - self.word(";"); - self.hardbreak(); - } - - fn item_fn(&mut self, item: &ItemFn) { - self.outer_attrs(&item.attrs); - self.cbox(INDENT); - self.visibility(&item.vis); - self.signature( - &item.sig, - #[cfg(feature = "verbatim")] - &verbatim::Safety::Disallowed, - ); - self.where_clause_for_body(&item.sig.generics.where_clause); - self.word("{"); - self.hardbreak_if_nonempty(); - self.inner_attrs(&item.attrs); - for stmt in item.block.stmts.iter().delimited() { - self.stmt(&stmt, stmt.is_last); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - self.hardbreak(); - } - - fn item_foreign_mod(&mut self, item: &ItemForeignMod) { - self.outer_attrs(&item.attrs); - self.cbox(INDENT); - if item.unsafety.is_some() { - self.word("unsafe "); - } - self.abi(&item.abi); - self.word("{"); - self.hardbreak_if_nonempty(); - self.inner_attrs(&item.attrs); - for foreign_item in &item.items { - self.foreign_item(foreign_item); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - self.hardbreak(); - } - - fn item_impl(&mut self, item: &ItemImpl) { - self.outer_attrs(&item.attrs); - self.cbox(INDENT); - self.ibox(-INDENT); - self.cbox(INDENT); - if item.defaultness.is_some() { - self.word("default "); - } - if item.unsafety.is_some() { - self.word("unsafe "); - } - self.word("impl"); - self.generics(&item.generics); - self.end(); - self.nbsp(); - if let Some((negative_polarity, path, _for_token)) = &item.trait_ { - if negative_polarity.is_some() { - self.word("!"); - } - self.path(path, PathKind::Type); - self.space(); - self.word("for "); - } - self.ty(&item.self_ty); - self.end(); - self.where_clause_for_body(&item.generics.where_clause); - self.word("{"); - self.hardbreak_if_nonempty(); - self.inner_attrs(&item.attrs); - for impl_item in &item.items { - self.impl_item(impl_item); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - self.hardbreak(); - } - - fn item_macro(&mut self, item: &ItemMacro) { - self.outer_attrs(&item.attrs); - let semicolon = mac::requires_semi(&item.mac.delimiter); - self.mac(&item.mac, item.ident.as_ref(), semicolon); - self.hardbreak(); - } - - fn item_mod(&mut self, item: &ItemMod) { - self.outer_attrs(&item.attrs); - self.cbox(INDENT); - self.visibility(&item.vis); - if item.unsafety.is_some() { - self.word("unsafe "); - } - self.word("mod "); - self.ident(&item.ident); - if let Some((_brace, items)) = &item.content { - self.word(" {"); - self.hardbreak_if_nonempty(); - self.inner_attrs(&item.attrs); - for item in items { - self.item(item); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - } else { - self.word(";"); - self.end(); - } - self.hardbreak(); - } - - fn item_static(&mut self, item: &ItemStatic) { - self.outer_attrs(&item.attrs); - self.cbox(0); - self.visibility(&item.vis); - self.word("static "); - self.static_mutability(&item.mutability); - self.ident(&item.ident); - self.word(": "); - self.ty(&item.ty); - self.word(" = "); - self.neverbreak(); - self.expr(&item.expr, FixupContext::NONE); - self.word(";"); - self.end(); - self.hardbreak(); - } - - fn item_struct(&mut self, item: &ItemStruct) { - self.outer_attrs(&item.attrs); - self.cbox(INDENT); - self.visibility(&item.vis); - self.word("struct "); - self.ident(&item.ident); - self.generics(&item.generics); - match &item.fields { - Fields::Named(fields) => { - self.where_clause_for_body(&item.generics.where_clause); - self.word("{"); - self.hardbreak_if_nonempty(); - for field in &fields.named { - self.field(field); - self.word(","); - self.hardbreak(); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - } - Fields::Unnamed(fields) => { - self.fields_unnamed(fields); - self.where_clause_semi(&item.generics.where_clause); - self.end(); - } - Fields::Unit => { - self.where_clause_semi(&item.generics.where_clause); - self.end(); - } - } - self.hardbreak(); - } - - fn item_trait(&mut self, item: &ItemTrait) { - self.outer_attrs(&item.attrs); - self.cbox(INDENT); - self.visibility(&item.vis); - if item.unsafety.is_some() { - self.word("unsafe "); - } - if item.auto_token.is_some() { - self.word("auto "); - } - self.word("trait "); - self.ident(&item.ident); - self.generics(&item.generics); - for supertrait in item.supertraits.iter().delimited() { - if supertrait.is_first { - self.word(": "); - } else { - self.word(" + "); - } - self.type_param_bound(&supertrait); - } - self.where_clause_for_body(&item.generics.where_clause); - self.word("{"); - self.hardbreak_if_nonempty(); - self.inner_attrs(&item.attrs); - for trait_item in &item.items { - self.trait_item(trait_item); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - self.hardbreak(); - } - - fn item_trait_alias(&mut self, item: &ItemTraitAlias) { - self.outer_attrs(&item.attrs); - self.cbox(INDENT); - self.visibility(&item.vis); - self.word("trait "); - self.ident(&item.ident); - self.generics(&item.generics); - self.word(" = "); - self.neverbreak(); - for bound in item.bounds.iter().delimited() { - if !bound.is_first { - self.space(); - self.word("+ "); - } - self.type_param_bound(&bound); - } - self.where_clause_semi(&item.generics.where_clause); - self.end(); - self.hardbreak(); - } - - fn item_type(&mut self, item: &ItemType) { - self.outer_attrs(&item.attrs); - self.cbox(INDENT); - self.visibility(&item.vis); - self.word("type "); - self.ident(&item.ident); - self.generics(&item.generics); - self.where_clause_oneline(&item.generics.where_clause); - self.word("= "); - self.neverbreak(); - self.ibox(-INDENT); - self.ty(&item.ty); - self.end(); - self.word(";"); - self.end(); - self.hardbreak(); - } - - fn item_union(&mut self, item: &ItemUnion) { - self.outer_attrs(&item.attrs); - self.cbox(INDENT); - self.visibility(&item.vis); - self.word("union "); - self.ident(&item.ident); - self.generics(&item.generics); - self.where_clause_for_body(&item.generics.where_clause); - self.word("{"); - self.hardbreak_if_nonempty(); - for field in &item.fields.named { - self.field(field); - self.word(","); - self.hardbreak(); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - self.hardbreak(); - } - - fn item_use(&mut self, item: &ItemUse) { - self.outer_attrs(&item.attrs); - self.visibility(&item.vis); - self.word("use "); - if item.leading_colon.is_some() { - self.word("::"); - } - self.use_tree(&item.tree); - self.word(";"); - self.hardbreak(); - } - - #[cfg(not(feature = "verbatim"))] - fn item_verbatim(&mut self, item: &TokenStream) { - if !item.is_empty() { - unimplemented!("Item::Verbatim `{}`", item); - } - self.hardbreak(); - } - - #[cfg(feature = "verbatim")] - fn item_verbatim(&mut self, tokens: &TokenStream) { - use syn::parse::{Parse, ParseStream, Result}; - use syn::punctuated::Punctuated; - use syn::{ - braced, parenthesized, token, Attribute, Generics, Ident, Lifetime, Token, Visibility, - }; - use verbatim::{ - FlexibleItemConst, FlexibleItemFn, FlexibleItemStatic, FlexibleItemType, - WhereClauseLocation, - }; - - enum ItemVerbatim { - Empty, - Ellipsis, - ConstFlexible(FlexibleItemConst), - FnFlexible(FlexibleItemFn), - ImplFlexible(ImplFlexible), - Macro2(Macro2), - StaticFlexible(FlexibleItemStatic), - TypeFlexible(FlexibleItemType), - UseBrace(UseBrace), - } - - struct ImplFlexible { - attrs: Vec, - vis: Visibility, - defaultness: bool, - unsafety: bool, - generics: Generics, - constness: ImplConstness, - negative_impl: bool, - trait_: Option, - self_ty: Type, - items: Vec, - } - - enum ImplConstness { - None, - MaybeConst, - Const, - } - - struct Macro2 { - attrs: Vec, - vis: Visibility, - ident: Ident, - args: Option, - body: TokenStream, - } - - struct UseBrace { - attrs: Vec, - vis: Visibility, - trees: Punctuated, - } - - struct RootUseTree { - leading_colon: Option, - inner: UseTree, - } - - impl Parse for ImplConstness { - fn parse(input: ParseStream) -> Result { - if input.parse::>()?.is_some() { - input.parse::()?; - Ok(ImplConstness::MaybeConst) - } else if input.parse::>()?.is_some() { - Ok(ImplConstness::Const) - } else { - Ok(ImplConstness::None) - } - } - } - - impl Parse for RootUseTree { - fn parse(input: ParseStream) -> Result { - Ok(RootUseTree { - leading_colon: input.parse()?, - inner: input.parse()?, - }) - } - } - - impl Parse for ItemVerbatim { - fn parse(input: ParseStream) -> Result { - if input.is_empty() { - return Ok(ItemVerbatim::Empty); - } else if input.peek(Token![...]) { - input.parse::()?; - return Ok(ItemVerbatim::Ellipsis); - } - - let mut attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - - let lookahead = input.lookahead1(); - if lookahead.peek(Token![const]) && (input.peek2(Ident) || input.peek2(Token![_])) { - let defaultness = false; - let flexible_item = FlexibleItemConst::parse(attrs, vis, defaultness, input)?; - Ok(ItemVerbatim::ConstFlexible(flexible_item)) - } else if input.peek(Token![const]) - || lookahead.peek(Token![async]) - || lookahead.peek(Token![unsafe]) && !input.peek2(Token![impl]) - || lookahead.peek(Token![extern]) - || lookahead.peek(Token![fn]) - { - let defaultness = false; - let flexible_item = FlexibleItemFn::parse(attrs, vis, defaultness, input)?; - Ok(ItemVerbatim::FnFlexible(flexible_item)) - } else if lookahead.peek(Token![default]) - || input.peek(Token![unsafe]) - || lookahead.peek(Token![impl]) - { - let defaultness = input.parse::>()?.is_some(); - let unsafety = input.parse::>()?.is_some(); - input.parse::()?; - let has_generics = input.peek(Token![<]) - && (input.peek2(Token![>]) - || input.peek2(Token![#]) - || (input.peek2(Ident) || input.peek2(Lifetime)) - && (input.peek3(Token![:]) - || input.peek3(Token![,]) - || input.peek3(Token![>]) - || input.peek3(Token![=])) - || input.peek2(Token![const])); - let mut generics: Generics = if has_generics { - input.parse()? - } else { - Generics::default() - }; - let constness: ImplConstness = input.parse()?; - let negative_impl = - !input.peek2(token::Brace) && input.parse::>()?.is_some(); - let first_ty: Type = input.parse()?; - let (trait_, self_ty) = if input.parse::>()?.is_some() { - (Some(first_ty), input.parse()?) - } else { - (None, first_ty) - }; - generics.where_clause = input.parse()?; - let content; - braced!(content in input); - let inner_attrs = content.call(Attribute::parse_inner)?; - attrs.extend(inner_attrs); - let mut items = Vec::new(); - while !content.is_empty() { - items.push(content.parse()?); - } - Ok(ItemVerbatim::ImplFlexible(ImplFlexible { - attrs, - vis, - defaultness, - unsafety, - generics, - constness, - negative_impl, - trait_, - self_ty, - items, - })) - } else if lookahead.peek(Token![macro]) { - input.parse::()?; - let ident: Ident = input.parse()?; - let args = if input.peek(token::Paren) { - let paren_content; - parenthesized!(paren_content in input); - Some(paren_content.parse::()?) - } else { - None - }; - let brace_content; - braced!(brace_content in input); - let body: TokenStream = brace_content.parse()?; - Ok(ItemVerbatim::Macro2(Macro2 { - attrs, - vis, - ident, - args, - body, - })) - } else if lookahead.peek(Token![static]) { - let flexible_item = FlexibleItemStatic::parse(attrs, vis, input)?; - Ok(ItemVerbatim::StaticFlexible(flexible_item)) - } else if lookahead.peek(Token![type]) { - let defaultness = false; - let flexible_item = FlexibleItemType::parse( - attrs, - vis, - defaultness, - input, - WhereClauseLocation::BeforeEq, - )?; - Ok(ItemVerbatim::TypeFlexible(flexible_item)) - } else if lookahead.peek(Token![use]) { - input.parse::()?; - let content; - braced!(content in input); - let trees = content.parse_terminated(RootUseTree::parse, Token![,])?; - input.parse::()?; - Ok(ItemVerbatim::UseBrace(UseBrace { attrs, vis, trees })) - } else { - Err(lookahead.error()) - } - } - } - - let item: ItemVerbatim = match syn::parse2(tokens.clone()) { - Ok(item) => item, - Err(_) => unimplemented!("Item::Verbatim `{}`", tokens), - }; - - match item { - ItemVerbatim::Empty => { - self.hardbreak(); - } - ItemVerbatim::Ellipsis => { - self.word("..."); - self.hardbreak(); - } - ItemVerbatim::ConstFlexible(item) => { - self.flexible_item_const(&item); - } - ItemVerbatim::FnFlexible(item) => { - self.flexible_item_fn(&item); - } - ItemVerbatim::ImplFlexible(item) => { - self.outer_attrs(&item.attrs); - self.cbox(INDENT); - self.ibox(-INDENT); - self.cbox(INDENT); - self.visibility(&item.vis); - if item.defaultness { - self.word("default "); - } - if item.unsafety { - self.word("unsafe "); - } - self.word("impl"); - self.generics(&item.generics); - self.end(); - self.nbsp(); - match item.constness { - ImplConstness::None => {} - ImplConstness::MaybeConst => self.word("?const "), - ImplConstness::Const => self.word("const "), - } - if item.negative_impl { - self.word("!"); - } - if let Some(trait_) = &item.trait_ { - self.ty(trait_); - self.space(); - self.word("for "); - } - self.ty(&item.self_ty); - self.end(); - self.where_clause_for_body(&item.generics.where_clause); - self.word("{"); - self.hardbreak_if_nonempty(); - self.inner_attrs(&item.attrs); - for impl_item in &item.items { - self.impl_item(impl_item); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - self.hardbreak(); - } - ItemVerbatim::Macro2(item) => { - self.outer_attrs(&item.attrs); - self.visibility(&item.vis); - self.word("macro "); - self.ident(&item.ident); - if let Some(args) = &item.args { - self.word("("); - self.cbox(INDENT); - self.zerobreak(); - self.ibox(0); - self.macro_rules_tokens(args.clone(), true); - self.end(); - self.zerobreak(); - self.offset(-INDENT); - self.end(); - self.word(")"); - } - self.word(" {"); - if !item.body.is_empty() { - self.neverbreak(); - self.cbox(INDENT); - self.hardbreak(); - self.ibox(0); - self.macro_rules_tokens(item.body.clone(), false); - self.end(); - self.hardbreak(); - self.offset(-INDENT); - self.end(); - } - self.word("}"); - self.hardbreak(); - } - ItemVerbatim::StaticFlexible(item) => { - self.flexible_item_static(&item); - } - ItemVerbatim::TypeFlexible(item) => { - self.flexible_item_type(&item); - } - ItemVerbatim::UseBrace(item) => { - self.outer_attrs(&item.attrs); - self.visibility(&item.vis); - self.word("use "); - if item.trees.len() == 1 { - self.word("::"); - self.use_tree(&item.trees[0].inner); - } else { - self.cbox(INDENT); - self.word("{"); - self.zerobreak(); - self.ibox(0); - for use_tree in item.trees.iter().delimited() { - if use_tree.leading_colon.is_some() { - self.word("::"); - } - self.use_tree(&use_tree.inner); - if !use_tree.is_last { - self.word(","); - let mut use_tree = &use_tree.inner; - while let UseTree::Path(use_path) = use_tree { - use_tree = &use_path.tree; - } - if let UseTree::Group(_) = use_tree { - self.hardbreak(); - } else { - self.space(); - } - } - } - self.end(); - self.trailing_comma(true); - self.offset(-INDENT); - self.word("}"); - self.end(); - } - self.word(";"); - self.hardbreak(); - } - } - } - - fn use_tree(&mut self, use_tree: &UseTree) { - match use_tree { - UseTree::Path(use_path) => self.use_path(use_path), - UseTree::Name(use_name) => self.use_name(use_name), - UseTree::Rename(use_rename) => self.use_rename(use_rename), - UseTree::Glob(use_glob) => self.use_glob(use_glob), - UseTree::Group(use_group) => self.use_group(use_group), - } - } - - fn use_path(&mut self, use_path: &UsePath) { - self.ident(&use_path.ident); - self.word("::"); - self.use_tree(&use_path.tree); - } - - fn use_name(&mut self, use_name: &UseName) { - self.ident(&use_name.ident); - } - - fn use_rename(&mut self, use_rename: &UseRename) { - self.ident(&use_rename.ident); - self.word(" as "); - self.ident(&use_rename.rename); - } - - fn use_glob(&mut self, use_glob: &UseGlob) { - let _ = use_glob; - self.word("*"); - } - - fn use_group(&mut self, use_group: &UseGroup) { - if use_group.items.is_empty() { - self.word("{}"); - } else if use_group.items.len() == 1 - && match &use_group.items[0] { - UseTree::Name(use_name) => use_name.ident != "self", - UseTree::Rename(use_rename) => use_rename.ident != "self", - _ => true, - } - { - self.use_tree(&use_group.items[0]); - } else { - self.cbox(INDENT); - self.word("{"); - self.zerobreak(); - self.ibox(0); - for use_tree in use_group.items.iter().delimited() { - self.use_tree(&use_tree); - if !use_tree.is_last { - self.word(","); - let mut use_tree = *use_tree; - while let UseTree::Path(use_path) = use_tree { - use_tree = &use_path.tree; - } - if let UseTree::Group(_) = use_tree { - self.hardbreak(); - } else { - self.space(); - } - } - } - self.end(); - self.trailing_comma(true); - self.offset(-INDENT); - self.word("}"); - self.end(); - } - } - - fn foreign_item(&mut self, foreign_item: &ForeignItem) { - match foreign_item { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - ForeignItem::Fn(item) => self.foreign_item_fn(item), - ForeignItem::Static(item) => self.foreign_item_static(item), - ForeignItem::Type(item) => self.foreign_item_type(item), - ForeignItem::Macro(item) => self.foreign_item_macro(item), - ForeignItem::Verbatim(item) => self.foreign_item_verbatim(item), - _ => unimplemented!("unknown ForeignItem"), - } - } - - fn foreign_item_fn(&mut self, foreign_item: &ForeignItemFn) { - self.outer_attrs(&foreign_item.attrs); - self.cbox(INDENT); - self.visibility(&foreign_item.vis); - self.signature( - &foreign_item.sig, - #[cfg(feature = "verbatim")] - &verbatim::Safety::Disallowed, - ); - self.where_clause_semi(&foreign_item.sig.generics.where_clause); - self.end(); - self.hardbreak(); - } - - fn foreign_item_static(&mut self, foreign_item: &ForeignItemStatic) { - self.outer_attrs(&foreign_item.attrs); - self.cbox(0); - self.visibility(&foreign_item.vis); - self.word("static "); - self.static_mutability(&foreign_item.mutability); - self.ident(&foreign_item.ident); - self.word(": "); - self.ty(&foreign_item.ty); - self.word(";"); - self.end(); - self.hardbreak(); - } - - fn foreign_item_type(&mut self, foreign_item: &ForeignItemType) { - self.outer_attrs(&foreign_item.attrs); - self.cbox(0); - self.visibility(&foreign_item.vis); - self.word("type "); - self.ident(&foreign_item.ident); - self.generics(&foreign_item.generics); - self.word(";"); - self.end(); - self.hardbreak(); - } - - fn foreign_item_macro(&mut self, foreign_item: &ForeignItemMacro) { - self.outer_attrs(&foreign_item.attrs); - let semicolon = mac::requires_semi(&foreign_item.mac.delimiter); - self.mac(&foreign_item.mac, None, semicolon); - self.hardbreak(); - } - - #[cfg(not(feature = "verbatim"))] - fn foreign_item_verbatim(&mut self, foreign_item: &TokenStream) { - if !foreign_item.is_empty() { - unimplemented!("ForeignItem::Verbatim `{}`", foreign_item); - } - self.hardbreak(); - } - - #[cfg(feature = "verbatim")] - fn foreign_item_verbatim(&mut self, tokens: &TokenStream) { - use syn::parse::{Parse, ParseStream, Result}; - use syn::{Abi, Attribute, Token, Visibility}; - use verbatim::{ - kw, FlexibleItemFn, FlexibleItemStatic, FlexibleItemType, WhereClauseLocation, - }; - - enum ForeignItemVerbatim { - Empty, - Ellipsis, - FnFlexible(FlexibleItemFn), - StaticFlexible(FlexibleItemStatic), - TypeFlexible(FlexibleItemType), - } - - fn peek_signature(input: ParseStream) -> bool { - let fork = input.fork(); - fork.parse::>().is_ok() - && fork.parse::>().is_ok() - && ((fork.peek(kw::safe) && fork.parse::().is_ok()) - || fork.parse::>().is_ok()) - && fork.parse::>().is_ok() - && fork.peek(Token![fn]) - } - - impl Parse for ForeignItemVerbatim { - fn parse(input: ParseStream) -> Result { - if input.is_empty() { - return Ok(ForeignItemVerbatim::Empty); - } else if input.peek(Token![...]) { - input.parse::()?; - return Ok(ForeignItemVerbatim::Ellipsis); - } - - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let defaultness = false; - - let lookahead = input.lookahead1(); - if lookahead.peek(Token![fn]) || peek_signature(input) { - let flexible_item = FlexibleItemFn::parse(attrs, vis, defaultness, input)?; - Ok(ForeignItemVerbatim::FnFlexible(flexible_item)) - } else if lookahead.peek(Token![static]) - || ((input.peek(Token![unsafe]) || input.peek(kw::safe)) - && input.peek2(Token![static])) - { - let flexible_item = FlexibleItemStatic::parse(attrs, vis, input)?; - Ok(ForeignItemVerbatim::StaticFlexible(flexible_item)) - } else if lookahead.peek(Token![type]) { - let flexible_item = FlexibleItemType::parse( - attrs, - vis, - defaultness, - input, - WhereClauseLocation::Both, - )?; - Ok(ForeignItemVerbatim::TypeFlexible(flexible_item)) - } else { - Err(lookahead.error()) - } - } - } - - let foreign_item: ForeignItemVerbatim = match syn::parse2(tokens.clone()) { - Ok(foreign_item) => foreign_item, - Err(_) => unimplemented!("ForeignItem::Verbatim `{}`", tokens), - }; - - match foreign_item { - ForeignItemVerbatim::Empty => { - self.hardbreak(); - } - ForeignItemVerbatim::Ellipsis => { - self.word("..."); - self.hardbreak(); - } - ForeignItemVerbatim::FnFlexible(foreign_item) => { - self.flexible_item_fn(&foreign_item); - } - ForeignItemVerbatim::StaticFlexible(foreign_item) => { - self.flexible_item_static(&foreign_item); - } - ForeignItemVerbatim::TypeFlexible(foreign_item) => { - self.flexible_item_type(&foreign_item); - } - } - } - - fn trait_item(&mut self, trait_item: &TraitItem) { - match trait_item { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - TraitItem::Const(item) => self.trait_item_const(item), - TraitItem::Fn(item) => self.trait_item_fn(item), - TraitItem::Type(item) => self.trait_item_type(item), - TraitItem::Macro(item) => self.trait_item_macro(item), - TraitItem::Verbatim(item) => self.trait_item_verbatim(item), - _ => unimplemented!("unknown TraitItem"), - } - } - - fn trait_item_const(&mut self, trait_item: &TraitItemConst) { - self.outer_attrs(&trait_item.attrs); - self.cbox(0); - self.word("const "); - self.ident(&trait_item.ident); - self.generics(&trait_item.generics); - self.word(": "); - self.ty(&trait_item.ty); - if let Some((_eq_token, default)) = &trait_item.default { - self.word(" = "); - self.neverbreak(); - self.expr(default, FixupContext::NONE); - } - self.word(";"); - self.end(); - self.hardbreak(); - } - - fn trait_item_fn(&mut self, trait_item: &TraitItemFn) { - self.outer_attrs(&trait_item.attrs); - self.cbox(INDENT); - self.signature( - &trait_item.sig, - #[cfg(feature = "verbatim")] - &verbatim::Safety::Disallowed, - ); - if let Some(block) = &trait_item.default { - self.where_clause_for_body(&trait_item.sig.generics.where_clause); - self.word("{"); - self.hardbreak_if_nonempty(); - self.inner_attrs(&trait_item.attrs); - for stmt in block.stmts.iter().delimited() { - self.stmt(&stmt, stmt.is_last); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - } else { - self.where_clause_semi(&trait_item.sig.generics.where_clause); - self.end(); - } - self.hardbreak(); - } - - fn trait_item_type(&mut self, trait_item: &TraitItemType) { - self.outer_attrs(&trait_item.attrs); - self.cbox(INDENT); - self.word("type "); - self.ident(&trait_item.ident); - self.generics(&trait_item.generics); - for bound in trait_item.bounds.iter().delimited() { - if bound.is_first { - self.word(": "); - } else { - self.space(); - self.word("+ "); - } - self.type_param_bound(&bound); - } - if let Some((_eq_token, default)) = &trait_item.default { - self.word(" = "); - self.neverbreak(); - self.ibox(-INDENT); - self.ty(default); - self.end(); - } - self.where_clause_oneline_semi(&trait_item.generics.where_clause); - self.end(); - self.hardbreak(); - } - - fn trait_item_macro(&mut self, trait_item: &TraitItemMacro) { - self.outer_attrs(&trait_item.attrs); - let semicolon = mac::requires_semi(&trait_item.mac.delimiter); - self.mac(&trait_item.mac, None, semicolon); - self.hardbreak(); - } - - #[cfg(not(feature = "verbatim"))] - fn trait_item_verbatim(&mut self, trait_item: &TokenStream) { - if !trait_item.is_empty() { - unimplemented!("TraitItem::Verbatim `{}`", trait_item); - } - self.hardbreak(); - } - - #[cfg(feature = "verbatim")] - fn trait_item_verbatim(&mut self, tokens: &TokenStream) { - use syn::parse::{Parse, ParseStream, Result}; - use syn::{Attribute, Ident, Token, Visibility}; - use verbatim::{FlexibleItemConst, FlexibleItemType, WhereClauseLocation}; - - enum TraitItemVerbatim { - Empty, - Ellipsis, - ConstFlexible(FlexibleItemConst), - TypeFlexible(FlexibleItemType), - PubOrDefault(PubOrDefaultTraitItem), - } - - struct PubOrDefaultTraitItem { - attrs: Vec, - vis: Visibility, - defaultness: bool, - trait_item: TraitItem, - } - - impl Parse for TraitItemVerbatim { - fn parse(input: ParseStream) -> Result { - if input.is_empty() { - return Ok(TraitItemVerbatim::Empty); - } else if input.peek(Token![...]) { - input.parse::()?; - return Ok(TraitItemVerbatim::Ellipsis); - } - - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let defaultness = input.parse::>()?.is_some(); - - let lookahead = input.lookahead1(); - if lookahead.peek(Token![const]) && (input.peek2(Ident) || input.peek2(Token![_])) { - let flexible_item = FlexibleItemConst::parse(attrs, vis, defaultness, input)?; - Ok(TraitItemVerbatim::ConstFlexible(flexible_item)) - } else if lookahead.peek(Token![type]) { - let flexible_item = FlexibleItemType::parse( - attrs, - vis, - defaultness, - input, - WhereClauseLocation::AfterEq, - )?; - Ok(TraitItemVerbatim::TypeFlexible(flexible_item)) - } else if (input.peek(Token![const]) - || lookahead.peek(Token![async]) - || lookahead.peek(Token![unsafe]) - || lookahead.peek(Token![extern]) - || lookahead.peek(Token![fn])) - && (!matches!(vis, Visibility::Inherited) || defaultness) - { - Ok(TraitItemVerbatim::PubOrDefault(PubOrDefaultTraitItem { - attrs, - vis, - defaultness, - trait_item: input.parse()?, - })) - } else { - Err(lookahead.error()) - } - } - } - - let impl_item: TraitItemVerbatim = match syn::parse2(tokens.clone()) { - Ok(impl_item) => impl_item, - Err(_) => unimplemented!("TraitItem::Verbatim `{}`", tokens), - }; - - match impl_item { - TraitItemVerbatim::Empty => { - self.hardbreak(); - } - TraitItemVerbatim::Ellipsis => { - self.word("..."); - self.hardbreak(); - } - TraitItemVerbatim::ConstFlexible(trait_item) => { - self.flexible_item_const(&trait_item); - } - TraitItemVerbatim::TypeFlexible(trait_item) => { - self.flexible_item_type(&trait_item); - } - TraitItemVerbatim::PubOrDefault(trait_item) => { - self.outer_attrs(&trait_item.attrs); - self.visibility(&trait_item.vis); - if trait_item.defaultness { - self.word("default "); - } - self.trait_item(&trait_item.trait_item); - } - } - } - - fn impl_item(&mut self, impl_item: &ImplItem) { - match impl_item { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - ImplItem::Const(item) => self.impl_item_const(item), - ImplItem::Fn(item) => self.impl_item_fn(item), - ImplItem::Type(item) => self.impl_item_type(item), - ImplItem::Macro(item) => self.impl_item_macro(item), - ImplItem::Verbatim(item) => self.impl_item_verbatim(item), - _ => unimplemented!("unknown ImplItem"), - } - } - - fn impl_item_const(&mut self, impl_item: &ImplItemConst) { - self.outer_attrs(&impl_item.attrs); - self.cbox(0); - self.visibility(&impl_item.vis); - if impl_item.defaultness.is_some() { - self.word("default "); - } - self.word("const "); - self.ident(&impl_item.ident); - self.generics(&impl_item.generics); - self.word(": "); - self.ty(&impl_item.ty); - self.word(" = "); - self.neverbreak(); - self.expr(&impl_item.expr, FixupContext::NONE); - self.word(";"); - self.end(); - self.hardbreak(); - } - - fn impl_item_fn(&mut self, impl_item: &ImplItemFn) { - self.outer_attrs(&impl_item.attrs); - self.cbox(INDENT); - self.visibility(&impl_item.vis); - if impl_item.defaultness.is_some() { - self.word("default "); - } - self.signature( - &impl_item.sig, - #[cfg(feature = "verbatim")] - &verbatim::Safety::Disallowed, - ); - self.where_clause_for_body(&impl_item.sig.generics.where_clause); - self.word("{"); - self.hardbreak_if_nonempty(); - self.inner_attrs(&impl_item.attrs); - for stmt in impl_item.block.stmts.iter().delimited() { - self.stmt(&stmt, stmt.is_last); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - self.hardbreak(); - } - - fn impl_item_type(&mut self, impl_item: &ImplItemType) { - self.outer_attrs(&impl_item.attrs); - self.cbox(INDENT); - self.visibility(&impl_item.vis); - if impl_item.defaultness.is_some() { - self.word("default "); - } - self.word("type "); - self.ident(&impl_item.ident); - self.generics(&impl_item.generics); - self.word(" = "); - self.neverbreak(); - self.ibox(-INDENT); - self.ty(&impl_item.ty); - self.end(); - self.where_clause_oneline_semi(&impl_item.generics.where_clause); - self.end(); - self.hardbreak(); - } - - fn impl_item_macro(&mut self, impl_item: &ImplItemMacro) { - self.outer_attrs(&impl_item.attrs); - let semicolon = mac::requires_semi(&impl_item.mac.delimiter); - self.mac(&impl_item.mac, None, semicolon); - self.hardbreak(); - } - - #[cfg(not(feature = "verbatim"))] - fn impl_item_verbatim(&mut self, impl_item: &TokenStream) { - if !impl_item.is_empty() { - unimplemented!("ImplItem::Verbatim `{}`", impl_item); - } - self.hardbreak(); - } - - #[cfg(feature = "verbatim")] - fn impl_item_verbatim(&mut self, tokens: &TokenStream) { - use syn::parse::{Parse, ParseStream, Result}; - use syn::{Attribute, Ident, Token, Visibility}; - use verbatim::{FlexibleItemConst, FlexibleItemFn, FlexibleItemType, WhereClauseLocation}; - - enum ImplItemVerbatim { - Empty, - Ellipsis, - ConstFlexible(FlexibleItemConst), - FnFlexible(FlexibleItemFn), - TypeFlexible(FlexibleItemType), - } - - impl Parse for ImplItemVerbatim { - fn parse(input: ParseStream) -> Result { - if input.is_empty() { - return Ok(ImplItemVerbatim::Empty); - } else if input.peek(Token![...]) { - input.parse::()?; - return Ok(ImplItemVerbatim::Ellipsis); - } - - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let defaultness = input.parse::>()?.is_some(); - - let lookahead = input.lookahead1(); - if lookahead.peek(Token![const]) && (input.peek2(Ident) || input.peek2(Token![_])) { - let flexible_item = FlexibleItemConst::parse(attrs, vis, defaultness, input)?; - Ok(ImplItemVerbatim::ConstFlexible(flexible_item)) - } else if input.peek(Token![const]) - || lookahead.peek(Token![async]) - || lookahead.peek(Token![unsafe]) - || lookahead.peek(Token![extern]) - || lookahead.peek(Token![fn]) - { - let flexible_item = FlexibleItemFn::parse(attrs, vis, defaultness, input)?; - Ok(ImplItemVerbatim::FnFlexible(flexible_item)) - } else if lookahead.peek(Token![type]) { - let flexible_item = FlexibleItemType::parse( - attrs, - vis, - defaultness, - input, - WhereClauseLocation::AfterEq, - )?; - Ok(ImplItemVerbatim::TypeFlexible(flexible_item)) - } else { - Err(lookahead.error()) - } - } - } - - let impl_item: ImplItemVerbatim = match syn::parse2(tokens.clone()) { - Ok(impl_item) => impl_item, - Err(_) => unimplemented!("ImplItem::Verbatim `{}`", tokens), - }; - - match impl_item { - ImplItemVerbatim::Empty => { - self.hardbreak(); - } - ImplItemVerbatim::Ellipsis => { - self.word("..."); - self.hardbreak(); - } - ImplItemVerbatim::ConstFlexible(impl_item) => { - self.flexible_item_const(&impl_item); - } - ImplItemVerbatim::FnFlexible(impl_item) => { - self.flexible_item_fn(&impl_item); - } - ImplItemVerbatim::TypeFlexible(impl_item) => { - self.flexible_item_type(&impl_item); - } - } - } - - fn signature( - &mut self, - signature: &Signature, - #[cfg(feature = "verbatim")] safety: &verbatim::Safety, - ) { - if signature.constness.is_some() { - self.word("const "); - } - if signature.asyncness.is_some() { - self.word("async "); - } - #[cfg(feature = "verbatim")] - { - if let verbatim::Safety::Disallowed = safety { - if signature.unsafety.is_some() { - self.word("unsafe "); - } - } else { - self.safety(safety); - } - } - #[cfg(not(feature = "verbatim"))] - { - if signature.unsafety.is_some() { - self.word("unsafe "); - } - } - if let Some(abi) = &signature.abi { - self.abi(abi); - } - self.word("fn "); - self.ident(&signature.ident); - self.generics(&signature.generics); - self.word("("); - self.neverbreak(); - self.cbox(0); - self.zerobreak(); - for input in signature.inputs.iter().delimited() { - self.fn_arg(&input); - let is_last = input.is_last && signature.variadic.is_none(); - self.trailing_comma(is_last); - } - if let Some(variadic) = &signature.variadic { - self.variadic(variadic); - self.zerobreak(); - } - self.offset(-INDENT); - self.end(); - self.word(")"); - self.cbox(-INDENT); - self.return_type(&signature.output); - self.end(); - } - - fn fn_arg(&mut self, fn_arg: &FnArg) { - match fn_arg { - FnArg::Receiver(receiver) => self.receiver(receiver), - FnArg::Typed(pat_type) => self.pat_type(pat_type), - } - } - - fn receiver(&mut self, receiver: &Receiver) { - self.outer_attrs(&receiver.attrs); - if let Some((_ampersand, lifetime)) = &receiver.reference { - self.word("&"); - if let Some(lifetime) = lifetime { - self.lifetime(lifetime); - self.nbsp(); - } - } - if receiver.mutability.is_some() { - self.word("mut "); - } - self.word("self"); - if receiver.colon_token.is_some() { - self.word(": "); - self.ty(&receiver.ty); - } else { - let consistent = match (&receiver.reference, &receiver.mutability, &*receiver.ty) { - (Some(_), mutability, Type::Reference(ty)) => { - mutability.is_some() == ty.mutability.is_some() - && match &*ty.elem { - Type::Path(ty) => ty.qself.is_none() && ty.path.is_ident("Self"), - _ => false, - } - } - (None, _, Type::Path(ty)) => ty.qself.is_none() && ty.path.is_ident("Self"), - _ => false, - }; - if !consistent { - self.word(": "); - self.ty(&receiver.ty); - } - } - } - - fn variadic(&mut self, variadic: &Variadic) { - self.outer_attrs(&variadic.attrs); - if let Some((pat, _colon)) = &variadic.pat { - self.pat(pat); - self.word(": "); - } - self.word("..."); - } - - fn static_mutability(&mut self, mutability: &StaticMutability) { - match mutability { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - StaticMutability::Mut(_) => self.word("mut "), - StaticMutability::None => {} - _ => unimplemented!("unknown StaticMutability"), - } - } -} - -#[cfg(feature = "verbatim")] -mod verbatim { - use crate::algorithm::Printer; - use crate::fixup::FixupContext; - use crate::iter::IterDelimited; - use crate::INDENT; - use syn::ext::IdentExt; - use syn::parse::{Parse, ParseStream, Result}; - use syn::{ - braced, token, Attribute, Block, Expr, Generics, Ident, Signature, StaticMutability, Stmt, - Token, Type, TypeParamBound, Visibility, WhereClause, - }; - - pub mod kw { - syn::custom_keyword!(safe); - } - - pub struct FlexibleItemConst { - pub attrs: Vec, - pub vis: Visibility, - pub defaultness: bool, - pub ident: Ident, - pub generics: Generics, - pub ty: Type, - pub value: Option, - } - - pub struct FlexibleItemFn { - pub attrs: Vec, - pub vis: Visibility, - pub defaultness: bool, - pub safety: Safety, - pub sig: Signature, - pub body: Option>, - } - - pub struct FlexibleItemStatic { - pub attrs: Vec, - pub vis: Visibility, - pub safety: Safety, - pub mutability: StaticMutability, - pub ident: Ident, - pub ty: Option, - pub expr: Option, - } - - pub struct FlexibleItemType { - pub attrs: Vec, - pub vis: Visibility, - pub defaultness: bool, - pub ident: Ident, - pub generics: Generics, - pub bounds: Vec, - pub definition: Option, - pub where_clause_after_eq: Option, - } - - pub enum Safety { - Unsafe, - Safe, - Default, - Disallowed, - } - - pub enum WhereClauseLocation { - // type Ty where T: 'static = T; - BeforeEq, - // type Ty = T where T: 'static; - AfterEq, - // TODO: goes away once the migration period on rust-lang/rust#89122 is over - Both, - } - - impl FlexibleItemConst { - pub fn parse( - attrs: Vec, - vis: Visibility, - defaultness: bool, - input: ParseStream, - ) -> Result { - input.parse::()?; - let ident = input.call(Ident::parse_any)?; - let mut generics: Generics = input.parse()?; - input.parse::()?; - let ty: Type = input.parse()?; - let value = if input.parse::>()?.is_some() { - let expr: Expr = input.parse()?; - Some(expr) - } else { - None - }; - generics.where_clause = input.parse()?; - input.parse::()?; - - Ok(FlexibleItemConst { - attrs, - vis, - defaultness, - ident, - generics, - ty, - value, - }) - } - } - - impl FlexibleItemFn { - pub fn parse( - mut attrs: Vec, - vis: Visibility, - defaultness: bool, - input: ParseStream, - ) -> Result { - let constness: Option = input.parse()?; - let asyncness: Option = input.parse()?; - let safety: Safety = input.parse()?; - - let lookahead = input.lookahead1(); - let sig: Signature = if lookahead.peek(Token![extern]) || lookahead.peek(Token![fn]) { - input.parse()? - } else { - return Err(lookahead.error()); - }; - - let lookahead = input.lookahead1(); - let body = if lookahead.peek(Token![;]) { - input.parse::()?; - None - } else if lookahead.peek(token::Brace) { - let content; - braced!(content in input); - attrs.extend(content.call(Attribute::parse_inner)?); - Some(content.call(Block::parse_within)?) - } else { - return Err(lookahead.error()); - }; - - Ok(FlexibleItemFn { - attrs, - vis, - defaultness, - safety, - sig: Signature { - constness, - asyncness, - unsafety: None, - ..sig - }, - body, - }) - } - } - - impl FlexibleItemStatic { - pub fn parse(attrs: Vec, vis: Visibility, input: ParseStream) -> Result { - let safety: Safety = input.parse()?; - input.parse::()?; - let mutability: StaticMutability = input.parse()?; - let ident = input.parse()?; - - let lookahead = input.lookahead1(); - let has_type = lookahead.peek(Token![:]); - let has_expr = lookahead.peek(Token![=]); - if !has_type && !has_expr { - return Err(lookahead.error()); - } - - let ty: Option = if has_type { - input.parse::()?; - input.parse().map(Some)? - } else { - None - }; - - let expr: Option = if input.parse::>()?.is_some() { - input.parse().map(Some)? - } else { - None - }; - - input.parse::()?; - - Ok(FlexibleItemStatic { - attrs, - vis, - safety, - mutability, - ident, - ty, - expr, - }) - } - } - - impl FlexibleItemType { - pub fn parse( - attrs: Vec, - vis: Visibility, - defaultness: bool, - input: ParseStream, - where_clause_location: WhereClauseLocation, - ) -> Result { - input.parse::()?; - let ident: Ident = input.parse()?; - let mut generics: Generics = input.parse()?; - - let mut bounds = Vec::new(); - if input.parse::>()?.is_some() { - loop { - if input.peek(Token![where]) || input.peek(Token![=]) || input.peek(Token![;]) { - break; - } - bounds.push(input.parse::()?); - if input.peek(Token![where]) || input.peek(Token![=]) || input.peek(Token![;]) { - break; - } - input.parse::()?; - } - } - - match where_clause_location { - WhereClauseLocation::BeforeEq | WhereClauseLocation::Both => { - generics.where_clause = input.parse()?; - } - WhereClauseLocation::AfterEq => {} - } - - let definition = if input.parse::>()?.is_some() { - Some(input.parse()?) - } else { - None - }; - - let where_clause_after_eq = match where_clause_location { - WhereClauseLocation::AfterEq | WhereClauseLocation::Both - if generics.where_clause.is_none() => - { - input.parse()? - } - _ => None, - }; - - input.parse::()?; - - Ok(FlexibleItemType { - attrs, - vis, - defaultness, - ident, - generics, - bounds, - definition, - where_clause_after_eq, - }) - } - } - - impl Parse for Safety { - fn parse(input: ParseStream) -> Result { - if input.peek(Token![unsafe]) { - input.parse::()?; - Ok(Safety::Unsafe) - } else if input.peek(kw::safe) { - input.parse::()?; - Ok(Safety::Safe) - } else { - Ok(Safety::Default) - } - } - } - - impl Printer { - pub fn flexible_item_const(&mut self, item: &FlexibleItemConst) { - self.outer_attrs(&item.attrs); - self.cbox(INDENT); - self.visibility(&item.vis); - if item.defaultness { - self.word("default "); - } - self.word("const "); - self.ident(&item.ident); - self.generics(&item.generics); - self.word(": "); - self.cbox(-INDENT); - self.ty(&item.ty); - self.end(); - if let Some(value) = &item.value { - self.word(" = "); - self.neverbreak(); - self.ibox(-INDENT); - self.expr(value, FixupContext::NONE); - self.end(); - } - self.where_clause_oneline_semi(&item.generics.where_clause); - self.end(); - self.hardbreak(); - } - - pub fn flexible_item_fn(&mut self, item: &FlexibleItemFn) { - self.outer_attrs(&item.attrs); - self.cbox(INDENT); - self.visibility(&item.vis); - if item.defaultness { - self.word("default "); - } - self.signature(&item.sig, &item.safety); - if let Some(body) = &item.body { - self.where_clause_for_body(&item.sig.generics.where_clause); - self.word("{"); - self.hardbreak_if_nonempty(); - self.inner_attrs(&item.attrs); - for stmt in body.iter().delimited() { - self.stmt(&stmt, stmt.is_last); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - } else { - self.where_clause_semi(&item.sig.generics.where_clause); - self.end(); - } - self.hardbreak(); - } - - pub fn flexible_item_static(&mut self, item: &FlexibleItemStatic) { - self.outer_attrs(&item.attrs); - self.cbox(0); - self.visibility(&item.vis); - self.safety(&item.safety); - self.word("static "); - self.static_mutability(&item.mutability); - self.ident(&item.ident); - if let Some(ty) = &item.ty { - self.word(": "); - self.ty(ty); - } - if let Some(expr) = &item.expr { - self.word(" = "); - self.neverbreak(); - self.expr(expr, FixupContext::NONE); - } - self.word(";"); - self.end(); - self.hardbreak(); - } - - pub fn flexible_item_type(&mut self, item: &FlexibleItemType) { - self.outer_attrs(&item.attrs); - self.cbox(INDENT); - self.visibility(&item.vis); - if item.defaultness { - self.word("default "); - } - self.word("type "); - self.ident(&item.ident); - self.generics(&item.generics); - for bound in item.bounds.iter().delimited() { - if bound.is_first { - self.word(": "); - } else { - self.space(); - self.word("+ "); - } - self.type_param_bound(&bound); - } - if let Some(definition) = &item.definition { - self.where_clause_oneline(&item.generics.where_clause); - self.word("= "); - self.neverbreak(); - self.ibox(-INDENT); - self.ty(definition); - self.end(); - self.where_clause_oneline_semi(&item.where_clause_after_eq); - } else { - self.where_clause_oneline_semi(&item.generics.where_clause); - } - self.end(); - self.hardbreak(); - } - - pub fn safety(&mut self, safety: &Safety) { - match safety { - Safety::Unsafe => self.word("unsafe "), - Safety::Safe => self.word("safe "), - Safety::Default => {} - Safety::Disallowed => unreachable!(), - } - } - } -} diff --git a/vendor/prettyplease/src/iter.rs b/vendor/prettyplease/src/iter.rs deleted file mode 100644 index 702c653f52364d..00000000000000 --- a/vendor/prettyplease/src/iter.rs +++ /dev/null @@ -1,46 +0,0 @@ -use std::iter::Peekable; -use std::ops::Deref; - -pub struct Delimited { - is_first: bool, - iter: Peekable, -} - -pub trait IterDelimited: Iterator + Sized { - fn delimited(self) -> Delimited { - Delimited { - is_first: true, - iter: self.peekable(), - } - } -} - -impl IterDelimited for I {} - -pub struct IteratorItem { - value: T, - pub is_first: bool, - pub is_last: bool, -} - -impl Iterator for Delimited { - type Item = IteratorItem; - - fn next(&mut self) -> Option { - let item = IteratorItem { - value: self.iter.next()?, - is_first: self.is_first, - is_last: self.iter.peek().is_none(), - }; - self.is_first = false; - Some(item) - } -} - -impl Deref for IteratorItem { - type Target = T; - - fn deref(&self) -> &Self::Target { - &self.value - } -} diff --git a/vendor/prettyplease/src/lib.rs b/vendor/prettyplease/src/lib.rs deleted file mode 100644 index 2fc8846ecfa849..00000000000000 --- a/vendor/prettyplease/src/lib.rs +++ /dev/null @@ -1,385 +0,0 @@ -//! [![github]](https://github.com/dtolnay/prettyplease) [![crates-io]](https://crates.io/crates/prettyplease) [![docs-rs]](https://docs.rs/prettyplease) -//! -//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github -//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust -//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs -//! -//!
-//! -//! **prettyplease::unparse** — a minimal `syn` syntax tree pretty-printer -//! -//!
-//! -//! # Overview -//! -//! This is a pretty-printer to turn a `syn` syntax tree into a `String` of -//! well-formatted source code. In contrast to rustfmt, this library is intended -//! to be suitable for arbitrary generated code. -//! -//! Rustfmt prioritizes high-quality output that is impeccable enough that you'd -//! be comfortable spending your career staring at its output — but that -//! means some heavyweight algorithms, and it has a tendency to bail out on code -//! that is hard to format (for example [rustfmt#3697], and there are dozens -//! more issues like it). That's not necessarily a big deal for human-generated -//! code because when code gets highly nested, the human will naturally be -//! inclined to refactor into more easily formattable code. But for generated -//! code, having the formatter just give up leaves it totally unreadable. -//! -//! [rustfmt#3697]: https://github.com/rust-lang/rustfmt/issues/3697 -//! -//! This library is designed using the simplest possible algorithm and data -//! structures that can deliver about 95% of the quality of rustfmt-formatted -//! output. In my experience testing real-world code, approximately 97-98% of -//! output lines come out identical between rustfmt's formatting and this -//! crate's. The rest have slightly different linebreak decisions, but still -//! clearly follow the dominant modern Rust style. -//! -//! The tradeoffs made by this crate are a good fit for generated code that you -//! will *not* spend your career staring at. For example, the output of -//! `bindgen`, or the output of `cargo-expand`. In those cases it's more -//! important that the whole thing be formattable without the formatter giving -//! up, than that it be flawless. -//! -//!
-//! -//! # Feature matrix -//! -//! Here are a few superficial comparisons of this crate against the AST -//! pretty-printer built into rustc, and rustfmt. The sections below go into -//! more detail comparing the output of each of these libraries. -//! -//! | | prettyplease | rustc | rustfmt | -//! |:---|:---:|:---:|:---:| -//! | non-pathological behavior on big or generated code | 💚 | ❌ | ❌ | -//! | idiomatic modern formatting ("locally indistinguishable from rustfmt") | 💚 | ❌ | 💚 | -//! | throughput | 60 MB/s | 39 MB/s | 2.8 MB/s | -//! | number of dependencies | 3 | 72 | 66 | -//! | compile time including dependencies | 2.4 sec | 23.1 sec | 29.8 sec | -//! | buildable using a stable Rust compiler | 💚 | ❌ | ❌ | -//! | published to crates.io | 💚 | ❌ | ❌ | -//! | extensively configurable output | ❌ | ❌ | 💚 | -//! | intended to accommodate hand-maintained source code | ❌ | ❌ | 💚 | -//! -//!
-//! -//! # Comparison to rustfmt -//! -//! - [input.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/input.rs) -//! - [output.prettyplease.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.prettyplease.rs) -//! - [output.rustfmt.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.rustfmt.rs) -//! -//! If you weren't told which output file is which, it would be practically -//! impossible to tell — **except** for line 435 in the rustfmt output, -//! which is more than 1000 characters long because rustfmt just gave up -//! formatting that part of the file: -//! -//! ``` -//! # const _: &str = stringify! {{{ -//! match segments[5] { -//! 0 => write!(f, "::{}", ipv4), -//! 0xffff => write!(f, "::ffff:{}", ipv4), -//! _ => unreachable!(), -//! } -//! } else { # [derive (Copy , Clone , Default)] struct Span { start : usize , len : usize , } let zeroes = { let mut longest = Span :: default () ; let mut current = Span :: default () ; for (i , & segment) in segments . iter () . enumerate () { if segment == 0 { if current . len == 0 { current . start = i ; } current . len += 1 ; if current . len > longest . len { longest = current ; } } else { current = Span :: default () ; } } longest } ; # [doc = " Write a colon-separated part of the address"] # [inline] fn fmt_subslice (f : & mut fmt :: Formatter < '_ > , chunk : & [u16]) -> fmt :: Result { if let Some ((first , tail)) = chunk . split_first () { write ! (f , "{:x}" , first) ? ; for segment in tail { f . write_char (':') ? ; write ! (f , "{:x}" , segment) ? ; } } Ok (()) } if zeroes . len > 1 { fmt_subslice (f , & segments [.. zeroes . start]) ? ; f . write_str ("::") ? ; fmt_subslice (f , & segments [zeroes . start + zeroes . len ..]) } else { fmt_subslice (f , & segments) } } -//! } else { -//! const IPV6_BUF_LEN: usize = (4 * 8) + 7; -//! let mut buf = [0u8; IPV6_BUF_LEN]; -//! let mut buf_slice = &mut buf[..]; -//! # }}; -//! ``` -//! -//! This is a pretty typical manifestation of rustfmt bailing out in generated -//! code — a chunk of the input ends up on one line. The other -//! manifestation is that you're working on some code, running rustfmt on save -//! like a conscientious developer, but after a while notice it isn't doing -//! anything. You introduce an intentional formatting issue, like a stray indent -//! or semicolon, and run rustfmt to check your suspicion. Nope, it doesn't get -//! cleaned up — rustfmt is just not formatting the part of the file you -//! are working on. -//! -//! The prettyplease library is designed to have no pathological cases that -//! force a bail out; the entire input you give it will get formatted in some -//! "good enough" form. -//! -//! Separately, rustfmt can be problematic to integrate into projects. It's -//! written using rustc's internal syntax tree, so it can't be built by a stable -//! compiler. Its releases are not regularly published to crates.io, so in Cargo -//! builds you'd need to depend on it as a git dependency, which precludes -//! publishing your crate to crates.io also. You can shell out to a `rustfmt` -//! binary, but that'll be whatever rustfmt version is installed on each -//! developer's system (if any), which can lead to spurious diffs in checked-in -//! generated code formatted by different versions. In contrast prettyplease is -//! designed to be easy to pull in as a library, and compiles fast. -//! -//!
-//! -//! # Comparison to rustc_ast_pretty -//! -//! - [input.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/input.rs) -//! - [output.prettyplease.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.prettyplease.rs) -//! - [output.rustc.rs](https://github.com/dtolnay/prettyplease/blob/0.1.0/examples/output.rustc.rs) -//! -//! This is the pretty-printer that gets used when rustc prints source code, -//! such as `rustc -Zunpretty=expanded`. It's used also by the standard -//! library's `stringify!` when stringifying an interpolated macro_rules AST -//! fragment, like an $:expr, and transitively by `dbg!` and many macros in the -//! ecosystem. -//! -//! Rustc's formatting is mostly okay, but does not hew closely to the dominant -//! contemporary style of Rust formatting. Some things wouldn't ever be written -//! on one line, like this `match` expression, and certainly not with a comma in -//! front of the closing brace: -//! -//! ``` -//! # const _: &str = stringify! { -//! fn eq(&self, other: &IpAddr) -> bool { -//! match other { IpAddr::V4(v4) => self == v4, IpAddr::V6(_) => false, } -//! } -//! # }; -//! ``` -//! -//! Some places use non-multiple-of-4 indentation, which is definitely not the -//! norm: -//! -//! ``` -//! # const _: &str = stringify! { -//! pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { -//! let [a, b, c, d] = self.octets(); -//! Ipv6Addr{inner: -//! c::in6_addr{s6_addr: -//! [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, -//! 0xFF, a, b, c, d],},} -//! } -//! # }; -//! ``` -//! -//! And although there isn't an egregious example of it in the link because the -//! input code is pretty tame, in general rustc_ast_pretty has pathological -//! behavior on generated code. It has a tendency to use excessive horizontal -//! indentation and rapidly run out of width: -//! -//! ``` -//! # const _: &str = stringify! { -//! ::std::io::_print(::core::fmt::Arguments::new_v1(&[""], -//! &match (&msg,) { -//! _args => -//! [::core::fmt::ArgumentV1::new(_args.0, -//! ::core::fmt::Display::fmt)], -//! })); -//! # }; -//! ``` -//! -//! The snippets above are clearly different from modern rustfmt style. In -//! contrast, prettyplease is designed to have output that is practically -//! indistinguishable from rustfmt-formatted code. -//! -//!
-//! -//! # Example -//! -//! ``` -//! // [dependencies] -//! // prettyplease = "0.2" -//! // syn = { version = "2", default-features = false, features = ["full", "parsing"] } -//! -//! const INPUT: &str = stringify! { -//! use crate::{ -//! lazy::{Lazy, SyncLazy, SyncOnceCell}, panic, -//! sync::{ atomic::{AtomicUsize, Ordering::SeqCst}, -//! mpsc::channel, Mutex, }, -//! thread, -//! }; -//! impl Into for T where U: From { -//! fn into(self) -> U { U::from(self) } -//! } -//! }; -//! -//! fn main() { -//! let syntax_tree = syn::parse_file(INPUT).unwrap(); -//! let formatted = prettyplease::unparse(&syntax_tree); -//! print!("{}", formatted); -//! } -//! ``` -//! -//!
-//! -//! # Algorithm notes -//! -//! The approach and terminology used in the implementation are derived from -//! [*Derek C. Oppen, "Pretty Printing" (1979)*][paper], on which -//! rustc_ast_pretty is also based, and from rustc_ast_pretty's implementation -//! written by Graydon Hoare in 2011 (and modernized over the years by dozens of -//! volunteer maintainers). -//! -//! [paper]: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/770/CS-TR-79-770.pdf -//! -//! The paper describes two language-agnostic interacting procedures `Scan()` -//! and `Print()`. Language-specific code decomposes an input data structure -//! into a stream of `string` and `break` tokens, and `begin` and `end` tokens -//! for grouping. Each `begin`–`end` range may be identified as either -//! "consistent breaking" or "inconsistent breaking". If a group is consistently -//! breaking, then if the whole contents do not fit on the line, *every* `break` -//! token in the group will receive a linebreak. This is appropriate, for -//! example, for Rust struct literals, or arguments of a function call. If a -//! group is inconsistently breaking, then the `string` tokens in the group are -//! greedily placed on the line until out of space, and linebroken only at those -//! `break` tokens for which the next string would not fit. For example, this is -//! appropriate for the contents of a braced `use` statement in Rust. -//! -//! Scan's job is to efficiently accumulate sizing information about groups and -//! breaks. For every `begin` token we compute the distance to the matched `end` -//! token, and for every `break` we compute the distance to the next `break`. -//! The algorithm uses a ringbuffer to hold tokens whose size is not yet -//! ascertained. The maximum size of the ringbuffer is bounded by the target -//! line length and does not grow indefinitely, regardless of deep nesting in -//! the input stream. That's because once a group is sufficiently big, the -//! precise size can no longer make a difference to linebreak decisions and we -//! can effectively treat it as "infinity". -//! -//! Print's job is to use the sizing information to efficiently assign a -//! "broken" or "not broken" status to every `begin` token. At that point the -//! output is easily constructed by concatenating `string` tokens and breaking -//! at `break` tokens contained within a broken group. -//! -//! Leveraging these primitives (i.e. cleverly placing the all-or-nothing -//! consistent breaks and greedy inconsistent breaks) to yield -//! rustfmt-compatible formatting for all of Rust's syntax tree nodes is a fun -//! challenge. -//! -//! Here is a visualization of some Rust tokens fed into the pretty printing -//! algorithm. Consistently breaking `begin`—`end` pairs are represented -//! by `«`⁠`»`, inconsistently breaking by `‹`⁠`›`, `break` by `·`, -//! and the rest of the non-whitespace are `string`. -//! -//! ```text -//! use crate::«{· -//! ‹ lazy::«{·‹Lazy,· SyncLazy,· SyncOnceCell›·}»,· -//! panic,· -//! sync::«{· -//! ‹ atomic::«{·‹AtomicUsize,· Ordering::SeqCst›·}»,· -//! mpsc::channel,· Mutex›,· -//! }»,· -//! thread›,· -//! }»;· -//! «‹«impl<«·T‹›,· U‹›·»>» Into<«·U·»>· for T›· -//! where· -//! U:‹ From<«·T·»>›,· -//! {· -//! « fn into(·«·self·») -> U {· -//! ‹ U::from(«·self·»)›· -//! » }· -//! »}· -//! ``` -//! -//! The algorithm described in the paper is not quite sufficient for producing -//! well-formatted Rust code that is locally indistinguishable from rustfmt's -//! style. The reason is that in the paper, the complete non-whitespace contents -//! are assumed to be independent of linebreak decisions, with Scan and Print -//! being only in control of the whitespace (spaces and line breaks). In Rust as -//! idiomatically formatted by rustfmt, that is not the case. Trailing commas -//! are one example; the punctuation is only known *after* the broken vs -//! non-broken status of the surrounding group is known: -//! -//! ``` -//! # struct Struct { x: u64, y: bool } -//! # let xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx = 0; -//! # let yyyyyyyyyyyyyyyyyyyyyyyyyyyyyy = true; -//! # -//! let _ = Struct { x: 0, y: true }; -//! -//! let _ = Struct { -//! x: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx, -//! y: yyyyyyyyyyyyyyyyyyyyyyyyyyyyyy, //<- trailing comma if the expression wrapped -//! }; -//! ``` -//! -//! The formatting of `match` expressions is another case; we want small arms on -//! the same line as the pattern, and big arms wrapped in a brace. The presence -//! of the brace punctuation, comma, and semicolon are all dependent on whether -//! the arm fits on the line: -//! -//! ``` -//! # struct Entry { nanos: u32 } -//! # let total_nanos = 0u64; -//! # let mut total_secs = 0u64; -//! # let tmp; -//! # let entry = Entry { nanos: 0 }; -//! # const NANOS_PER_SEC: u32 = 1_000_000_000; -//! # -//! match total_nanos.checked_add(entry.nanos as u64) { -//! Some(n) => tmp = n, //<- small arm, inline with comma -//! None => { -//! total_secs = total_secs -//! .checked_add(total_nanos / NANOS_PER_SEC as u64) -//! .expect("overflow in iter::sum over durations"); -//! } //<- big arm, needs brace added, and also semicolon^ -//! } -//! ``` -//! -//! The printing algorithm implementation in this crate accommodates all of -//! these situations with conditional punctuation tokens whose selection can be -//! deferred and populated after it's known that the group is or is not broken. - -#![doc(html_root_url = "https://docs.rs/prettyplease/0.2.37")] -#![allow( - clippy::bool_to_int_with_if, - clippy::cast_possible_wrap, - clippy::cast_sign_loss, - clippy::derive_partial_eq_without_eq, - clippy::doc_markdown, - clippy::enum_glob_use, - clippy::items_after_statements, - clippy::let_underscore_untyped, - clippy::match_like_matches_macro, - clippy::match_same_arms, - clippy::module_name_repetitions, - clippy::must_use_candidate, - clippy::needless_pass_by_value, - clippy::ref_option, - clippy::similar_names, - clippy::struct_excessive_bools, - clippy::too_many_lines, - clippy::unused_self, - clippy::vec_init_then_push -)] -#![cfg_attr(all(test, exhaustive), feature(non_exhaustive_omitted_patterns_lint))] - -mod algorithm; -mod attr; -mod classify; -mod convenience; -mod data; -mod expr; -mod file; -mod fixup; -mod generics; -mod item; -mod iter; -mod lifetime; -mod lit; -mod mac; -mod pat; -mod path; -mod precedence; -mod ring; -mod stmt; -mod token; -mod ty; - -use crate::algorithm::Printer; -use syn::File; - -// Target line width. -const MARGIN: isize = 89; - -// Number of spaces increment at each level of block indentation. -const INDENT: isize = 4; - -// Every line is allowed at least this much space, even if highly indented. -const MIN_SPACE: isize = 60; - -pub fn unparse(file: &File) -> String { - let mut p = Printer::new(); - p.file(file); - p.eof() -} diff --git a/vendor/prettyplease/src/lifetime.rs b/vendor/prettyplease/src/lifetime.rs deleted file mode 100644 index 665caa324c6e1f..00000000000000 --- a/vendor/prettyplease/src/lifetime.rs +++ /dev/null @@ -1,9 +0,0 @@ -use crate::algorithm::Printer; -use syn::Lifetime; - -impl Printer { - pub fn lifetime(&mut self, lifetime: &Lifetime) { - self.word("'"); - self.ident(&lifetime.ident); - } -} diff --git a/vendor/prettyplease/src/lit.rs b/vendor/prettyplease/src/lit.rs deleted file mode 100644 index 10a86e4b03e1fe..00000000000000 --- a/vendor/prettyplease/src/lit.rs +++ /dev/null @@ -1,57 +0,0 @@ -use crate::algorithm::Printer; -use proc_macro2::Literal; -use syn::{Lit, LitBool, LitByte, LitByteStr, LitCStr, LitChar, LitFloat, LitInt, LitStr}; - -impl Printer { - pub fn lit(&mut self, lit: &Lit) { - match lit { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Lit::Str(lit) => self.lit_str(lit), - Lit::ByteStr(lit) => self.lit_byte_str(lit), - Lit::CStr(lit) => self.lit_c_str(lit), - Lit::Byte(lit) => self.lit_byte(lit), - Lit::Char(lit) => self.lit_char(lit), - Lit::Int(lit) => self.lit_int(lit), - Lit::Float(lit) => self.lit_float(lit), - Lit::Bool(lit) => self.lit_bool(lit), - Lit::Verbatim(lit) => self.lit_verbatim(lit), - _ => unimplemented!("unknown Lit"), - } - } - - pub fn lit_str(&mut self, lit: &LitStr) { - self.word(lit.token().to_string()); - } - - fn lit_byte_str(&mut self, lit: &LitByteStr) { - self.word(lit.token().to_string()); - } - - fn lit_c_str(&mut self, lit: &LitCStr) { - self.word(lit.token().to_string()); - } - - fn lit_byte(&mut self, lit: &LitByte) { - self.word(lit.token().to_string()); - } - - fn lit_char(&mut self, lit: &LitChar) { - self.word(lit.token().to_string()); - } - - fn lit_int(&mut self, lit: &LitInt) { - self.word(lit.token().to_string()); - } - - fn lit_float(&mut self, lit: &LitFloat) { - self.word(lit.token().to_string()); - } - - fn lit_bool(&mut self, lit: &LitBool) { - self.word(if lit.value { "true" } else { "false" }); - } - - fn lit_verbatim(&mut self, token: &Literal) { - self.word(token.to_string()); - } -} diff --git a/vendor/prettyplease/src/mac.rs b/vendor/prettyplease/src/mac.rs deleted file mode 100644 index ddb2b5feebaa6e..00000000000000 --- a/vendor/prettyplease/src/mac.rs +++ /dev/null @@ -1,706 +0,0 @@ -use crate::algorithm::Printer; -use crate::path::PathKind; -use crate::token::Token; -use crate::INDENT; -use proc_macro2::{Delimiter, Spacing, TokenStream}; -use syn::{Ident, Macro, MacroDelimiter}; - -impl Printer { - pub fn mac(&mut self, mac: &Macro, ident: Option<&Ident>, semicolon: bool) { - if mac.path.is_ident("macro_rules") { - if let Some(ident) = ident { - self.macro_rules(ident, &mac.tokens); - return; - } - } - #[cfg(feature = "verbatim")] - if ident.is_none() && self.standard_library_macro(mac, semicolon) { - return; - } - self.path(&mac.path, PathKind::Simple); - self.word("!"); - if let Some(ident) = ident { - self.nbsp(); - self.ident(ident); - } - let (open, close, delimiter_break) = match mac.delimiter { - MacroDelimiter::Paren(_) => ("(", ")", Self::zerobreak as fn(&mut Self)), - MacroDelimiter::Brace(_) => (" {", "}", Self::hardbreak as fn(&mut Self)), - MacroDelimiter::Bracket(_) => ("[", "]", Self::zerobreak as fn(&mut Self)), - }; - self.word(open); - if !mac.tokens.is_empty() { - self.cbox(INDENT); - delimiter_break(self); - self.ibox(0); - self.macro_rules_tokens(mac.tokens.clone(), false); - self.end(); - delimiter_break(self); - self.offset(-INDENT); - self.end(); - } - self.word(close); - if semicolon { - self.word(";"); - } - } - - fn macro_rules(&mut self, name: &Ident, rules: &TokenStream) { - enum State { - Start, - Matcher, - Equal, - Greater, - Expander, - } - - use State::*; - - self.word("macro_rules! "); - self.ident(name); - self.word(" {"); - self.cbox(INDENT); - self.hardbreak_if_nonempty(); - let mut state = State::Start; - for tt in rules.clone() { - let token = Token::from(tt); - match (state, token) { - (Start, Token::Group(delimiter, stream)) => { - self.delimiter_open(delimiter); - if !stream.is_empty() { - self.cbox(INDENT); - self.zerobreak(); - self.ibox(0); - self.macro_rules_tokens(stream, true); - self.end(); - self.zerobreak(); - self.offset(-INDENT); - self.end(); - } - self.delimiter_close(delimiter); - state = Matcher; - } - (Matcher, Token::Punct('=', Spacing::Joint)) => { - self.word(" ="); - state = Equal; - } - (Equal, Token::Punct('>', Spacing::Alone)) => { - self.word(">"); - state = Greater; - } - (Greater, Token::Group(_delimiter, stream)) => { - self.word(" {"); - self.neverbreak(); - if !stream.is_empty() { - self.cbox(INDENT); - self.hardbreak(); - self.ibox(0); - self.macro_rules_tokens(stream, false); - self.end(); - self.hardbreak(); - self.offset(-INDENT); - self.end(); - } - self.word("}"); - state = Expander; - } - (Expander, Token::Punct(';', Spacing::Alone)) => { - self.word(";"); - self.hardbreak(); - state = Start; - } - _ => unimplemented!("bad macro_rules syntax"), - } - } - match state { - Start => {} - Expander => { - self.word(";"); - self.hardbreak(); - } - _ => self.hardbreak(), - } - self.offset(-INDENT); - self.end(); - self.word("}"); - } - - pub fn macro_rules_tokens(&mut self, stream: TokenStream, matcher: bool) { - #[derive(PartialEq)] - enum State { - Start, - Dollar, - DollarCrate, - DollarIdent, - DollarIdentColon, - DollarParen, - DollarParenSep, - Pound, - PoundBang, - Dot, - Colon, - Colon2, - Ident, - IdentBang, - Delim, - Other, - } - - use State::*; - - let mut state = Start; - let mut previous_is_joint = true; - for tt in stream { - let token = Token::from(tt); - let (needs_space, next_state) = match (&state, &token) { - (Dollar, Token::Ident(_)) if matcher => (false, DollarIdent), - (Dollar, Token::Ident(ident)) if ident == "crate" => (false, DollarCrate), - (Dollar, Token::Ident(_)) => (false, Other), - (DollarIdent, Token::Punct(':', Spacing::Alone)) => (false, DollarIdentColon), - (DollarIdentColon, Token::Ident(_)) => (false, Other), - (DollarParen, Token::Punct('+' | '*' | '?', Spacing::Alone)) => (false, Other), - (DollarParen, Token::Ident(_) | Token::Literal(_)) => (false, DollarParenSep), - (DollarParen, Token::Punct(_, Spacing::Joint)) => (false, DollarParen), - (DollarParen, Token::Punct(_, Spacing::Alone)) => (false, DollarParenSep), - (DollarParenSep, Token::Punct('+' | '*', _)) => (false, Other), - (Pound, Token::Punct('!', _)) => (false, PoundBang), - (Dollar, Token::Group(Delimiter::Parenthesis, _)) => (false, DollarParen), - (Pound | PoundBang, Token::Group(Delimiter::Bracket, _)) => (false, Other), - (Ident, Token::Group(Delimiter::Parenthesis | Delimiter::Bracket, _)) => { - (false, Delim) - } - (Ident, Token::Punct('!', Spacing::Alone)) => (false, IdentBang), - (IdentBang, Token::Group(Delimiter::Parenthesis | Delimiter::Bracket, _)) => { - (false, Other) - } - (Colon, Token::Punct(':', _)) => (false, Colon2), - (_, Token::Group(Delimiter::Parenthesis | Delimiter::Bracket, _)) => (true, Delim), - (_, Token::Group(Delimiter::Brace | Delimiter::None, _)) => (true, Other), - (_, Token::Ident(ident)) if !is_keyword(ident) => { - (state != Dot && state != Colon2, Ident) - } - (_, Token::Literal(lit)) if lit.to_string().ends_with('.') => (state != Dot, Other), - (_, Token::Literal(_)) => (state != Dot, Ident), - (_, Token::Punct(',' | ';', _)) => (false, Other), - (_, Token::Punct('.', _)) if !matcher => (state != Ident && state != Delim, Dot), - (_, Token::Punct(':', Spacing::Joint)) => { - (state != Ident && state != DollarCrate, Colon) - } - (_, Token::Punct('$', _)) => (true, Dollar), - (_, Token::Punct('#', _)) => (true, Pound), - (_, _) => (true, Other), - }; - if !previous_is_joint { - if needs_space { - self.space(); - } else if let Token::Punct('.', _) = token { - self.zerobreak(); - } - } - previous_is_joint = match token { - Token::Punct(_, Spacing::Joint) | Token::Punct('$', _) => true, - _ => false, - }; - self.single_token( - token, - if matcher { - |printer, stream| printer.macro_rules_tokens(stream, true) - } else { - |printer, stream| printer.macro_rules_tokens(stream, false) - }, - ); - state = next_state; - } - } -} - -pub(crate) fn requires_semi(delimiter: &MacroDelimiter) -> bool { - match delimiter { - MacroDelimiter::Paren(_) | MacroDelimiter::Bracket(_) => true, - MacroDelimiter::Brace(_) => false, - } -} - -fn is_keyword(ident: &Ident) -> bool { - match ident.to_string().as_str() { - "as" | "async" | "await" | "box" | "break" | "const" | "continue" | "crate" | "dyn" - | "else" | "enum" | "extern" | "fn" | "for" | "if" | "impl" | "in" | "let" | "loop" - | "macro" | "match" | "mod" | "move" | "mut" | "pub" | "ref" | "return" | "static" - | "struct" | "trait" | "type" | "unsafe" | "use" | "where" | "while" | "yield" => true, - _ => false, - } -} - -#[cfg(feature = "verbatim")] -mod standard_library { - use crate::algorithm::Printer; - use crate::expr; - use crate::fixup::FixupContext; - use crate::iter::IterDelimited; - use crate::path::PathKind; - use crate::INDENT; - use syn::ext::IdentExt; - use syn::parse::{Parse, ParseStream, Parser, Result}; - use syn::punctuated::Punctuated; - use syn::{ - parenthesized, token, Attribute, Expr, ExprAssign, ExprPath, Ident, Lit, Macro, Pat, Path, - Token, Type, Visibility, - }; - - enum KnownMacro { - Expr(Expr), - Exprs(Vec), - Cfg(Cfg), - Matches(Matches), - ThreadLocal(Vec), - VecArray(Punctuated), - VecRepeat { elem: Expr, n: Expr }, - } - - enum Cfg { - Eq(Ident, Option), - Call(Ident, Vec), - } - - struct Matches { - expression: Expr, - pattern: Pat, - guard: Option, - } - - struct ThreadLocal { - attrs: Vec, - vis: Visibility, - name: Ident, - ty: Type, - init: Expr, - } - - struct FormatArgs { - format_string: Expr, - args: Vec, - } - - impl Parse for FormatArgs { - fn parse(input: ParseStream) -> Result { - let format_string: Expr = input.parse()?; - - let mut args = Vec::new(); - while !input.is_empty() { - input.parse::()?; - if input.is_empty() { - break; - } - let arg = if input.peek(Ident::peek_any) - && input.peek2(Token![=]) - && !input.peek2(Token![==]) - { - let key = input.call(Ident::parse_any)?; - let eq_token: Token![=] = input.parse()?; - let value: Expr = input.parse()?; - Expr::Assign(ExprAssign { - attrs: Vec::new(), - left: Box::new(Expr::Path(ExprPath { - attrs: Vec::new(), - qself: None, - path: Path::from(key), - })), - eq_token, - right: Box::new(value), - }) - } else { - input.parse()? - }; - args.push(arg); - } - - Ok(FormatArgs { - format_string, - args, - }) - } - } - - impl KnownMacro { - fn parse_expr(input: ParseStream) -> Result { - let expr: Expr = input.parse()?; - Ok(KnownMacro::Expr(expr)) - } - - fn parse_expr_comma(input: ParseStream) -> Result { - let expr: Expr = input.parse()?; - input.parse::>()?; - Ok(KnownMacro::Exprs(vec![expr])) - } - - fn parse_exprs(input: ParseStream) -> Result { - let exprs = input.parse_terminated(Expr::parse, Token![,])?; - Ok(KnownMacro::Exprs(Vec::from_iter(exprs))) - } - - fn parse_assert(input: ParseStream) -> Result { - let mut exprs = Vec::new(); - let cond: Expr = input.parse()?; - exprs.push(cond); - if input.parse::>()?.is_some() && !input.is_empty() { - let format_args: FormatArgs = input.parse()?; - exprs.push(format_args.format_string); - exprs.extend(format_args.args); - } - Ok(KnownMacro::Exprs(exprs)) - } - - fn parse_assert_cmp(input: ParseStream) -> Result { - let mut exprs = Vec::new(); - let left: Expr = input.parse()?; - exprs.push(left); - input.parse::()?; - let right: Expr = input.parse()?; - exprs.push(right); - if input.parse::>()?.is_some() && !input.is_empty() { - let format_args: FormatArgs = input.parse()?; - exprs.push(format_args.format_string); - exprs.extend(format_args.args); - } - Ok(KnownMacro::Exprs(exprs)) - } - - fn parse_cfg(input: ParseStream) -> Result { - fn parse_single(input: ParseStream) -> Result { - let ident: Ident = input.parse()?; - if input.peek(token::Paren) && (ident == "all" || ident == "any") { - let content; - parenthesized!(content in input); - let list = content.call(parse_multiple)?; - Ok(Cfg::Call(ident, list)) - } else if input.peek(token::Paren) && ident == "not" { - let content; - parenthesized!(content in input); - let cfg = content.call(parse_single)?; - content.parse::>()?; - Ok(Cfg::Call(ident, vec![cfg])) - } else if input.peek(Token![=]) { - input.parse::()?; - let string: Lit = input.parse()?; - Ok(Cfg::Eq(ident, Some(string))) - } else { - Ok(Cfg::Eq(ident, None)) - } - } - - fn parse_multiple(input: ParseStream) -> Result> { - let mut vec = Vec::new(); - while !input.is_empty() { - let cfg = input.call(parse_single)?; - vec.push(cfg); - if input.is_empty() { - break; - } - input.parse::()?; - } - Ok(vec) - } - - let cfg = input.call(parse_single)?; - input.parse::>()?; - Ok(KnownMacro::Cfg(cfg)) - } - - fn parse_env(input: ParseStream) -> Result { - let mut exprs = Vec::new(); - let name: Expr = input.parse()?; - exprs.push(name); - if input.parse::>()?.is_some() && !input.is_empty() { - let error_msg: Expr = input.parse()?; - exprs.push(error_msg); - input.parse::>()?; - } - Ok(KnownMacro::Exprs(exprs)) - } - - fn parse_format_args(input: ParseStream) -> Result { - let format_args: FormatArgs = input.parse()?; - let mut exprs = format_args.args; - exprs.insert(0, format_args.format_string); - Ok(KnownMacro::Exprs(exprs)) - } - - fn parse_matches(input: ParseStream) -> Result { - let expression: Expr = input.parse()?; - input.parse::()?; - let pattern = input.call(Pat::parse_multi_with_leading_vert)?; - let guard = if input.parse::>()?.is_some() { - Some(input.parse()?) - } else { - None - }; - input.parse::>()?; - Ok(KnownMacro::Matches(Matches { - expression, - pattern, - guard, - })) - } - - fn parse_thread_local(input: ParseStream) -> Result { - let mut items = Vec::new(); - while !input.is_empty() { - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - input.parse::()?; - let name: Ident = input.parse()?; - input.parse::()?; - let ty: Type = input.parse()?; - input.parse::()?; - let init: Expr = input.parse()?; - if input.is_empty() { - break; - } - input.parse::()?; - items.push(ThreadLocal { - attrs, - vis, - name, - ty, - init, - }); - } - Ok(KnownMacro::ThreadLocal(items)) - } - - fn parse_vec(input: ParseStream) -> Result { - if input.is_empty() { - return Ok(KnownMacro::VecArray(Punctuated::new())); - } - let first: Expr = input.parse()?; - if input.parse::>()?.is_some() { - let len: Expr = input.parse()?; - Ok(KnownMacro::VecRepeat { - elem: first, - n: len, - }) - } else { - let mut vec = Punctuated::new(); - vec.push_value(first); - while !input.is_empty() { - let comma: Token![,] = input.parse()?; - vec.push_punct(comma); - if input.is_empty() { - break; - } - let next: Expr = input.parse()?; - vec.push_value(next); - } - Ok(KnownMacro::VecArray(vec)) - } - } - - fn parse_write(input: ParseStream) -> Result { - let mut exprs = Vec::new(); - let dst: Expr = input.parse()?; - exprs.push(dst); - input.parse::()?; - let format_args: FormatArgs = input.parse()?; - exprs.push(format_args.format_string); - exprs.extend(format_args.args); - Ok(KnownMacro::Exprs(exprs)) - } - - fn parse_writeln(input: ParseStream) -> Result { - let mut exprs = Vec::new(); - let dst: Expr = input.parse()?; - exprs.push(dst); - if input.parse::>()?.is_some() && !input.is_empty() { - let format_args: FormatArgs = input.parse()?; - exprs.push(format_args.format_string); - exprs.extend(format_args.args); - } - Ok(KnownMacro::Exprs(exprs)) - } - } - - impl Printer { - pub fn standard_library_macro(&mut self, mac: &Macro, mut semicolon: bool) -> bool { - let name = mac.path.segments.last().unwrap().ident.to_string(); - let parser = match name.as_str() { - "addr_of" | "addr_of_mut" => KnownMacro::parse_expr, - "assert" | "debug_assert" => KnownMacro::parse_assert, - "assert_eq" | "assert_ne" | "debug_assert_eq" | "debug_assert_ne" => { - KnownMacro::parse_assert_cmp - } - "cfg" => KnownMacro::parse_cfg, - "compile_error" | "include" | "include_bytes" | "include_str" | "option_env" => { - KnownMacro::parse_expr_comma - } - "concat" | "concat_bytes" | "dbg" => KnownMacro::parse_exprs, - "const_format_args" | "eprint" | "eprintln" | "format" | "format_args" - | "format_args_nl" | "panic" | "print" | "println" | "todo" | "unimplemented" - | "unreachable" => KnownMacro::parse_format_args, - "env" => KnownMacro::parse_env, - "matches" => KnownMacro::parse_matches, - "thread_local" => KnownMacro::parse_thread_local, - "vec" => KnownMacro::parse_vec, - "write" => KnownMacro::parse_write, - "writeln" => KnownMacro::parse_writeln, - _ => return false, - }; - - let known_macro = match parser.parse2(mac.tokens.clone()) { - Ok(known_macro) => known_macro, - Err(_) => return false, - }; - - self.path(&mac.path, PathKind::Simple); - self.word("!"); - - match &known_macro { - KnownMacro::Expr(expr) => { - self.word("("); - self.cbox(INDENT); - self.zerobreak(); - self.expr(expr, FixupContext::NONE); - self.zerobreak(); - self.offset(-INDENT); - self.end(); - self.word(")"); - } - KnownMacro::Exprs(exprs) => { - self.word("("); - self.cbox(INDENT); - self.zerobreak(); - for elem in exprs.iter().delimited() { - self.expr(&elem, FixupContext::NONE); - self.trailing_comma(elem.is_last); - } - self.offset(-INDENT); - self.end(); - self.word(")"); - } - KnownMacro::Cfg(cfg) => { - self.word("("); - self.cfg(cfg); - self.word(")"); - } - KnownMacro::Matches(matches) => { - self.word("("); - self.cbox(INDENT); - self.zerobreak(); - self.expr(&matches.expression, FixupContext::NONE); - self.word(","); - self.space(); - self.pat(&matches.pattern); - if let Some(guard) = &matches.guard { - self.space(); - self.word("if "); - self.expr(guard, FixupContext::NONE); - } - self.zerobreak(); - self.offset(-INDENT); - self.end(); - self.word(")"); - } - KnownMacro::ThreadLocal(items) => { - self.word(" {"); - self.cbox(INDENT); - self.hardbreak_if_nonempty(); - for item in items { - self.outer_attrs(&item.attrs); - self.cbox(0); - self.visibility(&item.vis); - self.word("static "); - self.ident(&item.name); - self.word(": "); - self.ty(&item.ty); - self.word(" = "); - self.neverbreak(); - self.expr(&item.init, FixupContext::NONE); - self.word(";"); - self.end(); - self.hardbreak(); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - semicolon = false; - } - KnownMacro::VecArray(vec) => { - if vec.is_empty() { - self.word("[]"); - } else if expr::simple_array(vec) { - self.cbox(INDENT); - self.word("["); - self.zerobreak(); - self.ibox(0); - for elem in vec.iter().delimited() { - self.expr(&elem, FixupContext::NONE); - if !elem.is_last { - self.word(","); - self.space(); - } - } - self.end(); - self.trailing_comma(true); - self.offset(-INDENT); - self.word("]"); - self.end(); - } else { - self.word("["); - self.cbox(INDENT); - self.zerobreak(); - for elem in vec.iter().delimited() { - self.expr(&elem, FixupContext::NONE); - self.trailing_comma(elem.is_last); - } - self.offset(-INDENT); - self.end(); - self.word("]"); - } - } - KnownMacro::VecRepeat { elem, n } => { - self.word("["); - self.cbox(INDENT); - self.zerobreak(); - self.expr(elem, FixupContext::NONE); - self.word(";"); - self.space(); - self.expr(n, FixupContext::NONE); - self.zerobreak(); - self.offset(-INDENT); - self.end(); - self.word("]"); - } - } - - if semicolon { - self.word(";"); - } - - true - } - - fn cfg(&mut self, cfg: &Cfg) { - match cfg { - Cfg::Eq(ident, value) => { - self.ident(ident); - if let Some(value) = value { - self.word(" = "); - self.lit(value); - } - } - Cfg::Call(ident, args) => { - self.ident(ident); - self.word("("); - self.cbox(INDENT); - self.zerobreak(); - for arg in args.iter().delimited() { - self.cfg(&arg); - self.trailing_comma(arg.is_last); - } - self.offset(-INDENT); - self.end(); - self.word(")"); - } - } - } - } -} diff --git a/vendor/prettyplease/src/pat.rs b/vendor/prettyplease/src/pat.rs deleted file mode 100644 index 23a38cbb3396f1..00000000000000 --- a/vendor/prettyplease/src/pat.rs +++ /dev/null @@ -1,254 +0,0 @@ -use crate::algorithm::Printer; -use crate::fixup::FixupContext; -use crate::iter::IterDelimited; -use crate::path::PathKind; -use crate::INDENT; -use proc_macro2::TokenStream; -use syn::{ - FieldPat, Pat, PatIdent, PatOr, PatParen, PatReference, PatRest, PatSlice, PatStruct, PatTuple, - PatTupleStruct, PatType, PatWild, -}; - -impl Printer { - pub fn pat(&mut self, pat: &Pat) { - match pat { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Pat::Const(pat) => self.expr_const(pat), - Pat::Ident(pat) => self.pat_ident(pat), - Pat::Lit(pat) => self.expr_lit(pat), - Pat::Macro(pat) => self.expr_macro(pat), - Pat::Or(pat) => self.pat_or(pat), - Pat::Paren(pat) => self.pat_paren(pat), - Pat::Path(pat) => self.expr_path(pat), - Pat::Range(pat) => self.expr_range(pat, FixupContext::NONE), - Pat::Reference(pat) => self.pat_reference(pat), - Pat::Rest(pat) => self.pat_rest(pat), - Pat::Slice(pat) => self.pat_slice(pat), - Pat::Struct(pat) => self.pat_struct(pat), - Pat::Tuple(pat) => self.pat_tuple(pat), - Pat::TupleStruct(pat) => self.pat_tuple_struct(pat), - Pat::Type(pat) => self.pat_type(pat), - Pat::Verbatim(pat) => self.pat_verbatim(pat), - Pat::Wild(pat) => self.pat_wild(pat), - _ => unimplemented!("unknown Pat"), - } - } - - fn pat_ident(&mut self, pat: &PatIdent) { - self.outer_attrs(&pat.attrs); - if pat.by_ref.is_some() { - self.word("ref "); - } - if pat.mutability.is_some() { - self.word("mut "); - } - self.ident(&pat.ident); - if let Some((_at_token, subpat)) = &pat.subpat { - self.word(" @ "); - self.pat(subpat); - } - } - - fn pat_or(&mut self, pat: &PatOr) { - self.outer_attrs(&pat.attrs); - let mut consistent_break = false; - for case in &pat.cases { - match case { - Pat::Lit(_) | Pat::Wild(_) => {} - _ => { - consistent_break = true; - break; - } - } - } - if consistent_break { - self.cbox(0); - } else { - self.ibox(0); - } - for case in pat.cases.iter().delimited() { - if !case.is_first { - self.space(); - self.word("| "); - } - self.pat(&case); - } - self.end(); - } - - fn pat_paren(&mut self, pat: &PatParen) { - self.outer_attrs(&pat.attrs); - self.word("("); - self.pat(&pat.pat); - self.word(")"); - } - - fn pat_reference(&mut self, pat: &PatReference) { - self.outer_attrs(&pat.attrs); - self.word("&"); - if pat.mutability.is_some() { - self.word("mut "); - } - self.pat(&pat.pat); - } - - fn pat_rest(&mut self, pat: &PatRest) { - self.outer_attrs(&pat.attrs); - self.word(".."); - } - - fn pat_slice(&mut self, pat: &PatSlice) { - self.outer_attrs(&pat.attrs); - self.word("["); - for elem in pat.elems.iter().delimited() { - self.pat(&elem); - self.trailing_comma(elem.is_last); - } - self.word("]"); - } - - fn pat_struct(&mut self, pat: &PatStruct) { - self.outer_attrs(&pat.attrs); - self.cbox(INDENT); - self.path(&pat.path, PathKind::Expr); - self.word(" {"); - self.space_if_nonempty(); - for field in pat.fields.iter().delimited() { - self.field_pat(&field); - self.trailing_comma_or_space(field.is_last && pat.rest.is_none()); - } - if let Some(rest) = &pat.rest { - self.pat_rest(rest); - self.space(); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - } - - fn pat_tuple(&mut self, pat: &PatTuple) { - self.outer_attrs(&pat.attrs); - self.word("("); - self.cbox(INDENT); - self.zerobreak(); - for elem in pat.elems.iter().delimited() { - self.pat(&elem); - if pat.elems.len() == 1 { - if pat.elems.trailing_punct() { - self.word(","); - } - self.zerobreak(); - } else { - self.trailing_comma(elem.is_last); - } - } - self.offset(-INDENT); - self.end(); - self.word(")"); - } - - fn pat_tuple_struct(&mut self, pat: &PatTupleStruct) { - self.outer_attrs(&pat.attrs); - self.path(&pat.path, PathKind::Expr); - self.word("("); - self.cbox(INDENT); - self.zerobreak(); - for elem in pat.elems.iter().delimited() { - self.pat(&elem); - self.trailing_comma(elem.is_last); - } - self.offset(-INDENT); - self.end(); - self.word(")"); - } - - pub fn pat_type(&mut self, pat: &PatType) { - self.outer_attrs(&pat.attrs); - self.pat(&pat.pat); - self.word(": "); - self.ty(&pat.ty); - } - - #[cfg(not(feature = "verbatim"))] - fn pat_verbatim(&mut self, pat: &TokenStream) { - unimplemented!("Pat::Verbatim `{}`", pat); - } - - #[cfg(feature = "verbatim")] - fn pat_verbatim(&mut self, tokens: &TokenStream) { - use syn::parse::{Parse, ParseStream, Result}; - use syn::{braced, Attribute, Block, Token}; - - enum PatVerbatim { - Ellipsis, - Box(Pat), - Const(PatConst), - } - - struct PatConst { - attrs: Vec, - block: Block, - } - - impl Parse for PatVerbatim { - fn parse(input: ParseStream) -> Result { - let lookahead = input.lookahead1(); - if lookahead.peek(Token![box]) { - input.parse::()?; - let inner = Pat::parse_single(input)?; - Ok(PatVerbatim::Box(inner)) - } else if lookahead.peek(Token![const]) { - input.parse::()?; - let content; - let brace_token = braced!(content in input); - let attrs = content.call(Attribute::parse_inner)?; - let stmts = content.call(Block::parse_within)?; - Ok(PatVerbatim::Const(PatConst { - attrs, - block: Block { brace_token, stmts }, - })) - } else if lookahead.peek(Token![...]) { - input.parse::()?; - Ok(PatVerbatim::Ellipsis) - } else { - Err(lookahead.error()) - } - } - } - - let pat: PatVerbatim = match syn::parse2(tokens.clone()) { - Ok(pat) => pat, - Err(_) => unimplemented!("Pat::Verbatim `{}`", tokens), - }; - - match pat { - PatVerbatim::Ellipsis => { - self.word("..."); - } - PatVerbatim::Box(pat) => { - self.word("box "); - self.pat(&pat); - } - PatVerbatim::Const(pat) => { - self.word("const "); - self.cbox(INDENT); - self.small_block(&pat.block, &pat.attrs); - self.end(); - } - } - } - - fn pat_wild(&mut self, pat: &PatWild) { - self.outer_attrs(&pat.attrs); - self.word("_"); - } - - fn field_pat(&mut self, field_pat: &FieldPat) { - self.outer_attrs(&field_pat.attrs); - if field_pat.colon_token.is_some() { - self.member(&field_pat.member); - self.word(": "); - } - self.pat(&field_pat.pat); - } -} diff --git a/vendor/prettyplease/src/path.rs b/vendor/prettyplease/src/path.rs deleted file mode 100644 index 44428cc60f78ab..00000000000000 --- a/vendor/prettyplease/src/path.rs +++ /dev/null @@ -1,194 +0,0 @@ -use crate::algorithm::Printer; -use crate::iter::IterDelimited; -use crate::INDENT; -use std::ptr; -use syn::{ - AngleBracketedGenericArguments, AssocConst, AssocType, Constraint, GenericArgument, - ParenthesizedGenericArguments, Path, PathArguments, PathSegment, QSelf, -}; - -#[derive(Copy, Clone, PartialEq)] -pub enum PathKind { - // a::B - Simple, - // a::B - Type, - // a::B:: - Expr, -} - -impl Printer { - pub fn path(&mut self, path: &Path, kind: PathKind) { - assert!(!path.segments.is_empty()); - for segment in path.segments.iter().delimited() { - if !segment.is_first || path.leading_colon.is_some() { - self.word("::"); - } - self.path_segment(&segment, kind); - } - } - - pub fn path_segment(&mut self, segment: &PathSegment, kind: PathKind) { - self.ident(&segment.ident); - self.path_arguments(&segment.arguments, kind); - } - - fn path_arguments(&mut self, arguments: &PathArguments, kind: PathKind) { - match arguments { - PathArguments::None => {} - PathArguments::AngleBracketed(arguments) => { - self.angle_bracketed_generic_arguments(arguments, kind); - } - PathArguments::Parenthesized(arguments) => { - self.parenthesized_generic_arguments(arguments); - } - } - } - - fn generic_argument(&mut self, arg: &GenericArgument) { - match arg { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - GenericArgument::Lifetime(lifetime) => self.lifetime(lifetime), - GenericArgument::Type(ty) => self.ty(ty), - GenericArgument::Const(expr) => self.const_argument(expr), - GenericArgument::AssocType(assoc) => self.assoc_type(assoc), - GenericArgument::AssocConst(assoc) => self.assoc_const(assoc), - GenericArgument::Constraint(constraint) => self.constraint(constraint), - _ => unimplemented!("unknown GenericArgument"), - } - } - - pub fn angle_bracketed_generic_arguments( - &mut self, - generic: &AngleBracketedGenericArguments, - path_kind: PathKind, - ) { - if generic.args.is_empty() || path_kind == PathKind::Simple { - return; - } - - if path_kind == PathKind::Expr { - self.word("::"); - } - self.word("<"); - self.cbox(INDENT); - self.zerobreak(); - - // Print lifetimes before types/consts/bindings, regardless of their - // order in self.args. - #[derive(Ord, PartialOrd, Eq, PartialEq)] - enum Group { - First, - Second, - } - fn group(arg: &GenericArgument) -> Group { - match arg { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - GenericArgument::Lifetime(_) => Group::First, - GenericArgument::Type(_) - | GenericArgument::Const(_) - | GenericArgument::AssocType(_) - | GenericArgument::AssocConst(_) - | GenericArgument::Constraint(_) => Group::Second, - _ => Group::Second, - } - } - let last = generic.args.iter().max_by_key(|param| group(param)); - for current_group in [Group::First, Group::Second] { - for arg in &generic.args { - if group(arg) == current_group { - self.generic_argument(arg); - self.trailing_comma(ptr::eq(arg, last.unwrap())); - } - } - } - - self.offset(-INDENT); - self.end(); - self.word(">"); - } - - fn assoc_type(&mut self, assoc: &AssocType) { - self.ident(&assoc.ident); - if let Some(generics) = &assoc.generics { - self.angle_bracketed_generic_arguments(generics, PathKind::Type); - } - self.word(" = "); - self.ty(&assoc.ty); - } - - fn assoc_const(&mut self, assoc: &AssocConst) { - self.ident(&assoc.ident); - if let Some(generics) = &assoc.generics { - self.angle_bracketed_generic_arguments(generics, PathKind::Type); - } - self.word(" = "); - self.const_argument(&assoc.value); - } - - fn constraint(&mut self, constraint: &Constraint) { - self.ident(&constraint.ident); - if let Some(generics) = &constraint.generics { - self.angle_bracketed_generic_arguments(generics, PathKind::Type); - } - self.ibox(INDENT); - for bound in constraint.bounds.iter().delimited() { - if bound.is_first { - self.word(": "); - } else { - self.space(); - self.word("+ "); - } - self.type_param_bound(&bound); - } - self.end(); - } - - fn parenthesized_generic_arguments(&mut self, arguments: &ParenthesizedGenericArguments) { - self.cbox(INDENT); - self.word("("); - self.zerobreak(); - for ty in arguments.inputs.iter().delimited() { - self.ty(&ty); - self.trailing_comma(ty.is_last); - } - self.offset(-INDENT); - self.word(")"); - self.return_type(&arguments.output); - self.end(); - } - - pub fn qpath(&mut self, qself: &Option, path: &Path, kind: PathKind) { - let qself = if let Some(qself) = qself { - qself - } else { - self.path(path, kind); - return; - }; - - assert!(qself.position < path.segments.len()); - - self.word("<"); - self.ty(&qself.ty); - - let mut segments = path.segments.iter(); - if qself.position > 0 { - self.word(" as "); - for segment in segments.by_ref().take(qself.position).delimited() { - if !segment.is_first || path.leading_colon.is_some() { - self.word("::"); - } - self.path_segment(&segment, PathKind::Type); - if segment.is_last { - self.word(">"); - } - } - } else { - self.word(">"); - } - for segment in segments { - self.word("::"); - self.path_segment(segment, kind); - } - } -} diff --git a/vendor/prettyplease/src/precedence.rs b/vendor/prettyplease/src/precedence.rs deleted file mode 100644 index 03117d56de7537..00000000000000 --- a/vendor/prettyplease/src/precedence.rs +++ /dev/null @@ -1,148 +0,0 @@ -use syn::{ - AttrStyle, Attribute, BinOp, Expr, ExprArray, ExprAsync, ExprAwait, ExprBlock, ExprBreak, - ExprCall, ExprConst, ExprContinue, ExprField, ExprForLoop, ExprIf, ExprIndex, ExprInfer, - ExprLit, ExprLoop, ExprMacro, ExprMatch, ExprMethodCall, ExprParen, ExprPath, ExprRepeat, - ExprReturn, ExprStruct, ExprTry, ExprTryBlock, ExprTuple, ExprUnsafe, ExprWhile, ExprYield, - ReturnType, -}; - -// Reference: https://doc.rust-lang.org/reference/expressions.html#expression-precedence -#[derive(Copy, Clone, PartialEq, PartialOrd)] -pub enum Precedence { - // return, break, closures - Jump, - // = += -= *= /= %= &= |= ^= <<= >>= - Assign, - // .. ..= - Range, - // || - Or, - // && - And, - // let - Let, - // == != < > <= >= - Compare, - // | - BitOr, - // ^ - BitXor, - // & - BitAnd, - // << >> - Shift, - // + - - Sum, - // * / % - Product, - // as - Cast, - // unary - * ! & &mut - Prefix, - // paths, loops, function calls, array indexing, field expressions, method calls - Unambiguous, -} - -impl Precedence { - pub(crate) const MIN: Self = Precedence::Jump; - - pub(crate) fn of_binop(op: &BinOp) -> Self { - match op { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - BinOp::Add(_) | BinOp::Sub(_) => Precedence::Sum, - BinOp::Mul(_) | BinOp::Div(_) | BinOp::Rem(_) => Precedence::Product, - BinOp::And(_) => Precedence::And, - BinOp::Or(_) => Precedence::Or, - BinOp::BitXor(_) => Precedence::BitXor, - BinOp::BitAnd(_) => Precedence::BitAnd, - BinOp::BitOr(_) => Precedence::BitOr, - BinOp::Shl(_) | BinOp::Shr(_) => Precedence::Shift, - - BinOp::Eq(_) - | BinOp::Lt(_) - | BinOp::Le(_) - | BinOp::Ne(_) - | BinOp::Ge(_) - | BinOp::Gt(_) => Precedence::Compare, - - BinOp::AddAssign(_) - | BinOp::SubAssign(_) - | BinOp::MulAssign(_) - | BinOp::DivAssign(_) - | BinOp::RemAssign(_) - | BinOp::BitXorAssign(_) - | BinOp::BitAndAssign(_) - | BinOp::BitOrAssign(_) - | BinOp::ShlAssign(_) - | BinOp::ShrAssign(_) => Precedence::Assign, - - _ => Precedence::MIN, - } - } - - pub(crate) fn of(e: &Expr) -> Self { - fn prefix_attrs(attrs: &[Attribute]) -> Precedence { - for attr in attrs { - if let AttrStyle::Outer = attr.style { - return Precedence::Prefix; - } - } - Precedence::Unambiguous - } - - match e { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Expr::Closure(e) => match e.output { - ReturnType::Default => Precedence::Jump, - ReturnType::Type(..) => prefix_attrs(&e.attrs), - }, - - Expr::Break(ExprBreak { expr, .. }) - | Expr::Return(ExprReturn { expr, .. }) - | Expr::Yield(ExprYield { expr, .. }) => match expr { - Some(_) => Precedence::Jump, - None => Precedence::Unambiguous, - }, - - Expr::Assign(_) => Precedence::Assign, - Expr::Range(_) => Precedence::Range, - Expr::Binary(e) => Precedence::of_binop(&e.op), - Expr::Let(_) => Precedence::Let, - Expr::Cast(_) => Precedence::Cast, - Expr::RawAddr(_) | Expr::Reference(_) | Expr::Unary(_) => Precedence::Prefix, - - Expr::Array(ExprArray { attrs, .. }) - | Expr::Async(ExprAsync { attrs, .. }) - | Expr::Await(ExprAwait { attrs, .. }) - | Expr::Block(ExprBlock { attrs, .. }) - | Expr::Call(ExprCall { attrs, .. }) - | Expr::Const(ExprConst { attrs, .. }) - | Expr::Continue(ExprContinue { attrs, .. }) - | Expr::Field(ExprField { attrs, .. }) - | Expr::ForLoop(ExprForLoop { attrs, .. }) - | Expr::If(ExprIf { attrs, .. }) - | Expr::Index(ExprIndex { attrs, .. }) - | Expr::Infer(ExprInfer { attrs, .. }) - | Expr::Lit(ExprLit { attrs, .. }) - | Expr::Loop(ExprLoop { attrs, .. }) - | Expr::Macro(ExprMacro { attrs, .. }) - | Expr::Match(ExprMatch { attrs, .. }) - | Expr::MethodCall(ExprMethodCall { attrs, .. }) - | Expr::Paren(ExprParen { attrs, .. }) - | Expr::Path(ExprPath { attrs, .. }) - | Expr::Repeat(ExprRepeat { attrs, .. }) - | Expr::Struct(ExprStruct { attrs, .. }) - | Expr::Try(ExprTry { attrs, .. }) - | Expr::TryBlock(ExprTryBlock { attrs, .. }) - | Expr::Tuple(ExprTuple { attrs, .. }) - | Expr::Unsafe(ExprUnsafe { attrs, .. }) - | Expr::While(ExprWhile { attrs, .. }) => prefix_attrs(attrs), - - Expr::Group(e) => Precedence::of(&e.expr), - - Expr::Verbatim(_) => Precedence::Unambiguous, - - _ => Precedence::Unambiguous, - } - } -} diff --git a/vendor/prettyplease/src/ring.rs b/vendor/prettyplease/src/ring.rs deleted file mode 100644 index 882a988ecd3ad7..00000000000000 --- a/vendor/prettyplease/src/ring.rs +++ /dev/null @@ -1,81 +0,0 @@ -use std::collections::VecDeque; -use std::ops::{Index, IndexMut, Range}; - -pub struct RingBuffer { - data: VecDeque, - // Abstract index of data[0] in infinitely sized queue - offset: usize, -} - -impl RingBuffer { - pub fn new() -> Self { - RingBuffer { - data: VecDeque::new(), - offset: 0, - } - } - - pub fn is_empty(&self) -> bool { - self.data.is_empty() - } - - pub fn len(&self) -> usize { - self.data.len() - } - - pub fn push(&mut self, value: T) -> usize { - let index = self.offset + self.data.len(); - self.data.push_back(value); - index - } - - pub fn clear(&mut self) { - self.data.clear(); - } - - pub fn index_range(&self) -> Range { - self.offset..self.offset + self.data.len() - } - - pub fn first(&self) -> &T { - &self.data[0] - } - - pub fn first_mut(&mut self) -> &mut T { - &mut self.data[0] - } - - pub fn pop_first(&mut self) -> T { - self.offset += 1; - self.data.pop_front().unwrap() - } - - pub fn last(&self) -> &T { - self.data.back().unwrap() - } - - pub fn last_mut(&mut self) -> &mut T { - self.data.back_mut().unwrap() - } - - pub fn second_last(&self) -> &T { - &self.data[self.data.len() - 2] - } - - pub fn pop_last(&mut self) { - self.data.pop_back().unwrap(); - } -} - -impl Index for RingBuffer { - type Output = T; - fn index(&self, index: usize) -> &Self::Output { - &self.data[index.checked_sub(self.offset).unwrap()] - } -} - -impl IndexMut for RingBuffer { - fn index_mut(&mut self, index: usize) -> &mut Self::Output { - &mut self.data[index.checked_sub(self.offset).unwrap()] - } -} diff --git a/vendor/prettyplease/src/stmt.rs b/vendor/prettyplease/src/stmt.rs deleted file mode 100644 index ce58200e1977e8..00000000000000 --- a/vendor/prettyplease/src/stmt.rs +++ /dev/null @@ -1,221 +0,0 @@ -use crate::algorithm::Printer; -use crate::classify; -use crate::expr; -use crate::fixup::FixupContext; -use crate::mac; -use crate::INDENT; -use syn::{BinOp, Expr, Stmt}; - -impl Printer { - pub fn stmt(&mut self, stmt: &Stmt, is_last: bool) { - match stmt { - Stmt::Local(local) => { - self.outer_attrs(&local.attrs); - self.ibox(0); - self.word("let "); - self.pat(&local.pat); - if let Some(local_init) = &local.init { - self.word(" = "); - self.neverbreak(); - self.subexpr( - &local_init.expr, - local_init.diverge.is_some() - && classify::expr_trailing_brace(&local_init.expr), - FixupContext::NONE, - ); - if let Some((_else, diverge)) = &local_init.diverge { - self.space(); - self.word("else "); - self.end(); - self.neverbreak(); - self.cbox(INDENT); - if let Some(expr) = expr::simple_block(diverge) { - self.small_block(&expr.block, &[]); - } else { - self.expr_as_small_block(diverge, INDENT); - } - } - } - self.end(); - self.word(";"); - self.hardbreak(); - } - Stmt::Item(item) => self.item(item), - Stmt::Expr(expr, None) => { - if break_after(expr) { - self.ibox(0); - self.expr_beginning_of_line(expr, false, true, FixupContext::new_stmt()); - if add_semi(expr) { - self.word(";"); - } - self.end(); - self.hardbreak(); - } else { - self.expr_beginning_of_line(expr, false, true, FixupContext::new_stmt()); - } - } - Stmt::Expr(expr, Some(_semi)) => { - if let Expr::Verbatim(tokens) = expr { - if tokens.is_empty() { - return; - } - } - self.ibox(0); - self.expr_beginning_of_line(expr, false, true, FixupContext::new_stmt()); - if !remove_semi(expr) { - self.word(";"); - } - self.end(); - self.hardbreak(); - } - Stmt::Macro(stmt) => { - self.outer_attrs(&stmt.attrs); - let semicolon = stmt.semi_token.is_some() - || !is_last && mac::requires_semi(&stmt.mac.delimiter); - self.mac(&stmt.mac, None, semicolon); - self.hardbreak(); - } - } - } -} - -pub fn add_semi(expr: &Expr) -> bool { - match expr { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Expr::Assign(_) | Expr::Break(_) | Expr::Continue(_) | Expr::Return(_) | Expr::Yield(_) => { - true - } - Expr::Binary(expr) => - { - match expr.op { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - BinOp::AddAssign(_) - | BinOp::SubAssign(_) - | BinOp::MulAssign(_) - | BinOp::DivAssign(_) - | BinOp::RemAssign(_) - | BinOp::BitXorAssign(_) - | BinOp::BitAndAssign(_) - | BinOp::BitOrAssign(_) - | BinOp::ShlAssign(_) - | BinOp::ShrAssign(_) => true, - BinOp::Add(_) - | BinOp::Sub(_) - | BinOp::Mul(_) - | BinOp::Div(_) - | BinOp::Rem(_) - | BinOp::And(_) - | BinOp::Or(_) - | BinOp::BitXor(_) - | BinOp::BitAnd(_) - | BinOp::BitOr(_) - | BinOp::Shl(_) - | BinOp::Shr(_) - | BinOp::Eq(_) - | BinOp::Lt(_) - | BinOp::Le(_) - | BinOp::Ne(_) - | BinOp::Ge(_) - | BinOp::Gt(_) => false, - _ => unimplemented!("unknown BinOp"), - } - } - Expr::Group(group) => add_semi(&group.expr), - - Expr::Array(_) - | Expr::Async(_) - | Expr::Await(_) - | Expr::Block(_) - | Expr::Call(_) - | Expr::Cast(_) - | Expr::Closure(_) - | Expr::Const(_) - | Expr::Field(_) - | Expr::ForLoop(_) - | Expr::If(_) - | Expr::Index(_) - | Expr::Infer(_) - | Expr::Let(_) - | Expr::Lit(_) - | Expr::Loop(_) - | Expr::Macro(_) - | Expr::Match(_) - | Expr::MethodCall(_) - | Expr::Paren(_) - | Expr::Path(_) - | Expr::Range(_) - | Expr::RawAddr(_) - | Expr::Reference(_) - | Expr::Repeat(_) - | Expr::Struct(_) - | Expr::Try(_) - | Expr::TryBlock(_) - | Expr::Tuple(_) - | Expr::Unary(_) - | Expr::Unsafe(_) - | Expr::Verbatim(_) - | Expr::While(_) => false, - - _ => false, - } -} - -pub fn break_after(expr: &Expr) -> bool { - if let Expr::Group(group) = expr { - if let Expr::Verbatim(verbatim) = group.expr.as_ref() { - return !verbatim.is_empty(); - } - } - true -} - -fn remove_semi(expr: &Expr) -> bool { - match expr { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Expr::ForLoop(_) | Expr::While(_) => true, - Expr::Group(group) => remove_semi(&group.expr), - Expr::If(expr) => match &expr.else_branch { - Some((_else_token, else_branch)) => remove_semi(else_branch), - None => true, - }, - - Expr::Array(_) - | Expr::Assign(_) - | Expr::Async(_) - | Expr::Await(_) - | Expr::Binary(_) - | Expr::Block(_) - | Expr::Break(_) - | Expr::Call(_) - | Expr::Cast(_) - | Expr::Closure(_) - | Expr::Continue(_) - | Expr::Const(_) - | Expr::Field(_) - | Expr::Index(_) - | Expr::Infer(_) - | Expr::Let(_) - | Expr::Lit(_) - | Expr::Loop(_) - | Expr::Macro(_) - | Expr::Match(_) - | Expr::MethodCall(_) - | Expr::Paren(_) - | Expr::Path(_) - | Expr::Range(_) - | Expr::RawAddr(_) - | Expr::Reference(_) - | Expr::Repeat(_) - | Expr::Return(_) - | Expr::Struct(_) - | Expr::Try(_) - | Expr::TryBlock(_) - | Expr::Tuple(_) - | Expr::Unary(_) - | Expr::Unsafe(_) - | Expr::Verbatim(_) - | Expr::Yield(_) => false, - - _ => false, - } -} diff --git a/vendor/prettyplease/src/token.rs b/vendor/prettyplease/src/token.rs deleted file mode 100644 index e41fd728a6f536..00000000000000 --- a/vendor/prettyplease/src/token.rs +++ /dev/null @@ -1,80 +0,0 @@ -use crate::algorithm::Printer; -use proc_macro2::{Delimiter, Ident, Literal, Spacing, TokenStream, TokenTree}; - -impl Printer { - pub fn single_token(&mut self, token: Token, group_contents: fn(&mut Self, TokenStream)) { - match token { - Token::Group(delimiter, stream) => self.token_group(delimiter, stream, group_contents), - Token::Ident(ident) => self.ident(&ident), - Token::Punct(ch, _spacing) => self.token_punct(ch), - Token::Literal(literal) => self.token_literal(&literal), - } - } - - fn token_group( - &mut self, - delimiter: Delimiter, - stream: TokenStream, - group_contents: fn(&mut Self, TokenStream), - ) { - self.delimiter_open(delimiter); - if !stream.is_empty() { - if delimiter == Delimiter::Brace { - self.space(); - } - group_contents(self, stream); - if delimiter == Delimiter::Brace { - self.space(); - } - } - self.delimiter_close(delimiter); - } - - pub fn ident(&mut self, ident: &Ident) { - self.word(ident.to_string()); - } - - pub fn token_punct(&mut self, ch: char) { - self.word(ch.to_string()); - } - - pub fn token_literal(&mut self, literal: &Literal) { - self.word(literal.to_string()); - } - - pub fn delimiter_open(&mut self, delimiter: Delimiter) { - self.word(match delimiter { - Delimiter::Parenthesis => "(", - Delimiter::Brace => "{", - Delimiter::Bracket => "[", - Delimiter::None => return, - }); - } - - pub fn delimiter_close(&mut self, delimiter: Delimiter) { - self.word(match delimiter { - Delimiter::Parenthesis => ")", - Delimiter::Brace => "}", - Delimiter::Bracket => "]", - Delimiter::None => return, - }); - } -} - -pub enum Token { - Group(Delimiter, TokenStream), - Ident(Ident), - Punct(char, Spacing), - Literal(Literal), -} - -impl From for Token { - fn from(tt: TokenTree) -> Self { - match tt { - TokenTree::Group(group) => Token::Group(group.delimiter(), group.stream()), - TokenTree::Ident(ident) => Token::Ident(ident), - TokenTree::Punct(punct) => Token::Punct(punct.as_char(), punct.spacing()), - TokenTree::Literal(literal) => Token::Literal(literal), - } - } -} diff --git a/vendor/prettyplease/src/ty.rs b/vendor/prettyplease/src/ty.rs deleted file mode 100644 index 36cd56879def61..00000000000000 --- a/vendor/prettyplease/src/ty.rs +++ /dev/null @@ -1,326 +0,0 @@ -use crate::algorithm::Printer; -use crate::fixup::FixupContext; -use crate::iter::IterDelimited; -use crate::path::PathKind; -use crate::INDENT; -use proc_macro2::TokenStream; -use syn::{ - Abi, BareFnArg, BareVariadic, ReturnType, Type, TypeArray, TypeBareFn, TypeGroup, - TypeImplTrait, TypeInfer, TypeMacro, TypeNever, TypeParen, TypePath, TypePtr, TypeReference, - TypeSlice, TypeTraitObject, TypeTuple, -}; - -impl Printer { - pub fn ty(&mut self, ty: &Type) { - match ty { - #![cfg_attr(all(test, exhaustive), deny(non_exhaustive_omitted_patterns))] - Type::Array(ty) => self.type_array(ty), - Type::BareFn(ty) => self.type_bare_fn(ty), - Type::Group(ty) => self.type_group(ty), - Type::ImplTrait(ty) => self.type_impl_trait(ty), - Type::Infer(ty) => self.type_infer(ty), - Type::Macro(ty) => self.type_macro(ty), - Type::Never(ty) => self.type_never(ty), - Type::Paren(ty) => self.type_paren(ty), - Type::Path(ty) => self.type_path(ty), - Type::Ptr(ty) => self.type_ptr(ty), - Type::Reference(ty) => self.type_reference(ty), - Type::Slice(ty) => self.type_slice(ty), - Type::TraitObject(ty) => self.type_trait_object(ty), - Type::Tuple(ty) => self.type_tuple(ty), - Type::Verbatim(ty) => self.type_verbatim(ty), - _ => unimplemented!("unknown Type"), - } - } - - fn type_array(&mut self, ty: &TypeArray) { - self.word("["); - self.ty(&ty.elem); - self.word("; "); - self.expr(&ty.len, FixupContext::NONE); - self.word("]"); - } - - fn type_bare_fn(&mut self, ty: &TypeBareFn) { - if let Some(bound_lifetimes) = &ty.lifetimes { - self.bound_lifetimes(bound_lifetimes); - } - if ty.unsafety.is_some() { - self.word("unsafe "); - } - if let Some(abi) = &ty.abi { - self.abi(abi); - } - self.word("fn("); - self.cbox(INDENT); - self.zerobreak(); - for bare_fn_arg in ty.inputs.iter().delimited() { - self.bare_fn_arg(&bare_fn_arg); - self.trailing_comma(bare_fn_arg.is_last && ty.variadic.is_none()); - } - if let Some(variadic) = &ty.variadic { - self.bare_variadic(variadic); - self.zerobreak(); - } - self.offset(-INDENT); - self.end(); - self.word(")"); - self.return_type(&ty.output); - } - - fn type_group(&mut self, ty: &TypeGroup) { - self.ty(&ty.elem); - } - - fn type_impl_trait(&mut self, ty: &TypeImplTrait) { - self.word("impl "); - for type_param_bound in ty.bounds.iter().delimited() { - if !type_param_bound.is_first { - self.word(" + "); - } - self.type_param_bound(&type_param_bound); - } - } - - fn type_infer(&mut self, ty: &TypeInfer) { - let _ = ty; - self.word("_"); - } - - fn type_macro(&mut self, ty: &TypeMacro) { - let semicolon = false; - self.mac(&ty.mac, None, semicolon); - } - - fn type_never(&mut self, ty: &TypeNever) { - let _ = ty; - self.word("!"); - } - - fn type_paren(&mut self, ty: &TypeParen) { - self.word("("); - self.ty(&ty.elem); - self.word(")"); - } - - fn type_path(&mut self, ty: &TypePath) { - self.qpath(&ty.qself, &ty.path, PathKind::Type); - } - - fn type_ptr(&mut self, ty: &TypePtr) { - self.word("*"); - if ty.mutability.is_some() { - self.word("mut "); - } else { - self.word("const "); - } - self.ty(&ty.elem); - } - - fn type_reference(&mut self, ty: &TypeReference) { - self.word("&"); - if let Some(lifetime) = &ty.lifetime { - self.lifetime(lifetime); - self.nbsp(); - } - if ty.mutability.is_some() { - self.word("mut "); - } - self.ty(&ty.elem); - } - - fn type_slice(&mut self, ty: &TypeSlice) { - self.word("["); - self.ty(&ty.elem); - self.word("]"); - } - - fn type_trait_object(&mut self, ty: &TypeTraitObject) { - self.word("dyn "); - for type_param_bound in ty.bounds.iter().delimited() { - if !type_param_bound.is_first { - self.word(" + "); - } - self.type_param_bound(&type_param_bound); - } - } - - fn type_tuple(&mut self, ty: &TypeTuple) { - self.word("("); - self.cbox(INDENT); - self.zerobreak(); - for elem in ty.elems.iter().delimited() { - self.ty(&elem); - if ty.elems.len() == 1 { - self.word(","); - self.zerobreak(); - } else { - self.trailing_comma(elem.is_last); - } - } - self.offset(-INDENT); - self.end(); - self.word(")"); - } - - #[cfg(not(feature = "verbatim"))] - fn type_verbatim(&mut self, ty: &TokenStream) { - unimplemented!("Type::Verbatim `{}`", ty); - } - - #[cfg(feature = "verbatim")] - fn type_verbatim(&mut self, tokens: &TokenStream) { - use syn::parse::{Parse, ParseStream, Result}; - use syn::punctuated::Punctuated; - use syn::{token, FieldsNamed, Token, TypeParamBound}; - - enum TypeVerbatim { - Ellipsis, - AnonStruct(AnonStruct), - AnonUnion(AnonUnion), - DynStar(DynStar), - MutSelf(MutSelf), - } - - struct AnonStruct { - fields: FieldsNamed, - } - - struct AnonUnion { - fields: FieldsNamed, - } - - struct DynStar { - bounds: Punctuated, - } - - struct MutSelf { - ty: Option, - } - - impl Parse for TypeVerbatim { - fn parse(input: ParseStream) -> Result { - let lookahead = input.lookahead1(); - if lookahead.peek(Token![struct]) { - input.parse::()?; - let fields: FieldsNamed = input.parse()?; - Ok(TypeVerbatim::AnonStruct(AnonStruct { fields })) - } else if lookahead.peek(Token![union]) && input.peek2(token::Brace) { - input.parse::()?; - let fields: FieldsNamed = input.parse()?; - Ok(TypeVerbatim::AnonUnion(AnonUnion { fields })) - } else if lookahead.peek(Token![dyn]) { - input.parse::()?; - input.parse::()?; - let bounds = input.parse_terminated(TypeParamBound::parse, Token![+])?; - Ok(TypeVerbatim::DynStar(DynStar { bounds })) - } else if lookahead.peek(Token![mut]) { - input.parse::()?; - input.parse::()?; - let ty = if input.is_empty() { - None - } else { - input.parse::()?; - let ty: Type = input.parse()?; - Some(ty) - }; - Ok(TypeVerbatim::MutSelf(MutSelf { ty })) - } else if lookahead.peek(Token![...]) { - input.parse::()?; - Ok(TypeVerbatim::Ellipsis) - } else { - Err(lookahead.error()) - } - } - } - - let ty: TypeVerbatim = match syn::parse2(tokens.clone()) { - Ok(ty) => ty, - Err(_) => unimplemented!("Type::Verbatim `{}`", tokens), - }; - - match ty { - TypeVerbatim::Ellipsis => { - self.word("..."); - } - TypeVerbatim::AnonStruct(ty) => { - self.cbox(INDENT); - self.word("struct {"); - self.hardbreak_if_nonempty(); - for field in &ty.fields.named { - self.field(field); - self.word(","); - self.hardbreak(); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - } - TypeVerbatim::AnonUnion(ty) => { - self.cbox(INDENT); - self.word("union {"); - self.hardbreak_if_nonempty(); - for field in &ty.fields.named { - self.field(field); - self.word(","); - self.hardbreak(); - } - self.offset(-INDENT); - self.end(); - self.word("}"); - } - TypeVerbatim::DynStar(ty) => { - self.word("dyn* "); - for type_param_bound in ty.bounds.iter().delimited() { - if !type_param_bound.is_first { - self.word(" + "); - } - self.type_param_bound(&type_param_bound); - } - } - TypeVerbatim::MutSelf(bare_fn_arg) => { - self.word("mut self"); - if let Some(ty) = &bare_fn_arg.ty { - self.word(": "); - self.ty(ty); - } - } - } - } - - pub fn return_type(&mut self, ty: &ReturnType) { - match ty { - ReturnType::Default => {} - ReturnType::Type(_arrow, ty) => { - self.word(" -> "); - self.ty(ty); - } - } - } - - fn bare_fn_arg(&mut self, bare_fn_arg: &BareFnArg) { - self.outer_attrs(&bare_fn_arg.attrs); - if let Some((name, _colon)) = &bare_fn_arg.name { - self.ident(name); - self.word(": "); - } - self.ty(&bare_fn_arg.ty); - } - - fn bare_variadic(&mut self, variadic: &BareVariadic) { - self.outer_attrs(&variadic.attrs); - if let Some((name, _colon)) = &variadic.name { - self.ident(name); - self.word(": "); - } - self.word("..."); - } - - pub fn abi(&mut self, abi: &Abi) { - self.word("extern "); - if let Some(name) = &abi.name { - self.lit_str(name); - self.nbsp(); - } - } -} diff --git a/vendor/prettyplease/tests/test.rs b/vendor/prettyplease/tests/test.rs deleted file mode 100644 index aa6b849fcfdc15..00000000000000 --- a/vendor/prettyplease/tests/test.rs +++ /dev/null @@ -1,51 +0,0 @@ -use indoc::indoc; -use proc_macro2::{Delimiter, Group, TokenStream}; -use quote::quote; - -#[track_caller] -fn test(tokens: TokenStream, expected: &str) { - let syntax_tree: syn::File = syn::parse2(tokens).unwrap(); - let pretty = prettyplease::unparse(&syntax_tree); - assert_eq!(pretty, expected); -} - -#[test] -fn test_parenthesize_cond() { - let s = Group::new(Delimiter::None, quote!(Struct {})); - test( - quote! { - fn main() { - if #s == #s {} - } - }, - indoc! {" - fn main() { - if (Struct {}) == (Struct {}) {} - } - "}, - ); -} - -#[test] -fn test_parenthesize_match_guard() { - let expr_struct = Group::new(Delimiter::None, quote!(Struct {})); - let expr_binary = Group::new(Delimiter::None, quote!(true && false)); - test( - quote! { - fn main() { - match () { - () if let _ = #expr_struct => {} - () if let _ = #expr_binary => {} - } - } - }, - indoc! {" - fn main() { - match () { - () if let _ = Struct {} => {} - () if let _ = (true && false) => {} - } - } - "}, - ); -} diff --git a/vendor/prettyplease/tests/test_precedence.rs b/vendor/prettyplease/tests/test_precedence.rs deleted file mode 100644 index f1eec232ccbe09..00000000000000 --- a/vendor/prettyplease/tests/test_precedence.rs +++ /dev/null @@ -1,900 +0,0 @@ -use proc_macro2::{Ident, Span, TokenStream}; -use quote::ToTokens as _; -use std::mem; -use std::process::ExitCode; -use syn::punctuated::Punctuated; -use syn::visit_mut::{self, VisitMut}; -use syn::{ - token, AngleBracketedGenericArguments, Arm, BinOp, Block, Expr, ExprArray, ExprAssign, - ExprAsync, ExprAwait, ExprBinary, ExprBlock, ExprBreak, ExprCall, ExprCast, ExprClosure, - ExprConst, ExprContinue, ExprField, ExprForLoop, ExprIf, ExprIndex, ExprLet, ExprLit, ExprLoop, - ExprMacro, ExprMatch, ExprMethodCall, ExprPath, ExprRange, ExprRawAddr, ExprReference, - ExprReturn, ExprStruct, ExprTry, ExprTryBlock, ExprUnary, ExprUnsafe, ExprWhile, ExprYield, - File, GenericArgument, Generics, Item, ItemConst, Label, Lifetime, LifetimeParam, Lit, LitInt, - Macro, MacroDelimiter, Member, Pat, PatWild, Path, PathArguments, PathSegment, - PointerMutability, QSelf, RangeLimits, ReturnType, Stmt, StmtMacro, Token, Type, TypeInfer, - TypeParam, TypePath, UnOp, Visibility, -}; - -struct FlattenParens; - -impl VisitMut for FlattenParens { - fn visit_expr_mut(&mut self, e: &mut Expr) { - while let Expr::Paren(paren) = e { - *e = mem::replace(&mut *paren.expr, Expr::PLACEHOLDER); - } - visit_mut::visit_expr_mut(self, e); - } -} - -struct AsIfPrinted; - -impl VisitMut for AsIfPrinted { - fn visit_generics_mut(&mut self, generics: &mut Generics) { - if generics.params.is_empty() { - generics.lt_token = None; - generics.gt_token = None; - } - if let Some(where_clause) = &generics.where_clause { - if where_clause.predicates.is_empty() { - generics.where_clause = None; - } - } - visit_mut::visit_generics_mut(self, generics); - } - - fn visit_lifetime_param_mut(&mut self, param: &mut LifetimeParam) { - if param.bounds.is_empty() { - param.colon_token = None; - } - visit_mut::visit_lifetime_param_mut(self, param); - } - - fn visit_stmt_mut(&mut self, stmt: &mut Stmt) { - if let Stmt::Expr(expr, semi) = stmt { - if let Expr::Macro(e) = expr { - if match e.mac.delimiter { - MacroDelimiter::Brace(_) => true, - MacroDelimiter::Paren(_) | MacroDelimiter::Bracket(_) => semi.is_some(), - } { - let expr = match mem::replace(expr, Expr::PLACEHOLDER) { - Expr::Macro(expr) => expr, - _ => unreachable!(), - }; - *stmt = Stmt::Macro(StmtMacro { - attrs: expr.attrs, - mac: expr.mac, - semi_token: *semi, - }); - } - } - } - visit_mut::visit_stmt_mut(self, stmt); - } - - fn visit_type_param_mut(&mut self, param: &mut TypeParam) { - if param.bounds.is_empty() { - param.colon_token = None; - } - visit_mut::visit_type_param_mut(self, param); - } -} - -#[test] -fn test_permutations() -> ExitCode { - fn iter(depth: usize, f: &mut dyn FnMut(Expr)) { - let span = Span::call_site(); - - // Expr::Path - f(Expr::Path(ExprPath { - // `x` - attrs: Vec::new(), - qself: None, - path: Path::from(Ident::new("x", span)), - })); - if false { - f(Expr::Path(ExprPath { - // `x::` - attrs: Vec::new(), - qself: None, - path: Path { - leading_colon: None, - segments: Punctuated::from_iter([PathSegment { - ident: Ident::new("x", span), - arguments: PathArguments::AngleBracketed(AngleBracketedGenericArguments { - colon2_token: Some(Token![::](span)), - lt_token: Token![<](span), - args: Punctuated::from_iter([GenericArgument::Type(Type::Path( - TypePath { - qself: None, - path: Path::from(Ident::new("T", span)), - }, - ))]), - gt_token: Token![>](span), - }), - }]), - }, - })); - f(Expr::Path(ExprPath { - // `::CONST` - attrs: Vec::new(), - qself: Some(QSelf { - lt_token: Token![<](span), - ty: Box::new(Type::Path(TypePath { - qself: None, - path: Path::from(Ident::new("T", span)), - })), - position: 1, - as_token: Some(Token![as](span)), - gt_token: Token![>](span), - }), - path: Path { - leading_colon: None, - segments: Punctuated::from_iter([ - PathSegment::from(Ident::new("Trait", span)), - PathSegment::from(Ident::new("CONST", span)), - ]), - }, - })); - } - - let depth = match depth.checked_sub(1) { - Some(depth) => depth, - None => return, - }; - - // Expr::Assign - iter(depth, &mut |expr| { - iter(0, &mut |simple| { - f(Expr::Assign(ExprAssign { - // `x = $expr` - attrs: Vec::new(), - left: Box::new(simple.clone()), - eq_token: Token![=](span), - right: Box::new(expr.clone()), - })); - f(Expr::Assign(ExprAssign { - // `$expr = x` - attrs: Vec::new(), - left: Box::new(expr.clone()), - eq_token: Token![=](span), - right: Box::new(simple), - })); - }); - }); - - // Expr::Binary - iter(depth, &mut |expr| { - iter(0, &mut |simple| { - for op in [ - BinOp::Add(Token![+](span)), - //BinOp::Sub(Token![-](span)), - //BinOp::Mul(Token![*](span)), - //BinOp::Div(Token![/](span)), - //BinOp::Rem(Token![%](span)), - //BinOp::And(Token![&&](span)), - //BinOp::Or(Token![||](span)), - //BinOp::BitXor(Token![^](span)), - //BinOp::BitAnd(Token![&](span)), - //BinOp::BitOr(Token![|](span)), - //BinOp::Shl(Token![<<](span)), - //BinOp::Shr(Token![>>](span)), - //BinOp::Eq(Token![==](span)), - BinOp::Lt(Token![<](span)), - //BinOp::Le(Token![<=](span)), - //BinOp::Ne(Token![!=](span)), - //BinOp::Ge(Token![>=](span)), - //BinOp::Gt(Token![>](span)), - BinOp::ShlAssign(Token![<<=](span)), - ] { - f(Expr::Binary(ExprBinary { - // `x + $expr` - attrs: Vec::new(), - left: Box::new(simple.clone()), - op, - right: Box::new(expr.clone()), - })); - f(Expr::Binary(ExprBinary { - // `$expr + x` - attrs: Vec::new(), - left: Box::new(expr.clone()), - op, - right: Box::new(simple.clone()), - })); - } - }); - }); - - // Expr::Block - f(Expr::Block(ExprBlock { - // `{}` - attrs: Vec::new(), - label: None, - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - - // Expr::Break - f(Expr::Break(ExprBreak { - // `break` - attrs: Vec::new(), - break_token: Token![break](span), - label: None, - expr: None, - })); - iter(depth, &mut |expr| { - f(Expr::Break(ExprBreak { - // `break $expr` - attrs: Vec::new(), - break_token: Token![break](span), - label: None, - expr: Some(Box::new(expr)), - })); - }); - - // Expr::Call - iter(depth, &mut |expr| { - f(Expr::Call(ExprCall { - // `$expr()` - attrs: Vec::new(), - func: Box::new(expr), - paren_token: token::Paren(span), - args: Punctuated::new(), - })); - }); - - // Expr::Cast - iter(depth, &mut |expr| { - f(Expr::Cast(ExprCast { - // `$expr as T` - attrs: Vec::new(), - expr: Box::new(expr), - as_token: Token![as](span), - ty: Box::new(Type::Path(TypePath { - qself: None, - path: Path::from(Ident::new("T", span)), - })), - })); - }); - - // Expr::Closure - iter(depth, &mut |expr| { - f(Expr::Closure(ExprClosure { - // `|| $expr` - attrs: Vec::new(), - lifetimes: None, - constness: None, - movability: None, - asyncness: None, - capture: None, - or1_token: Token![|](span), - inputs: Punctuated::new(), - or2_token: Token![|](span), - output: ReturnType::Default, - body: Box::new(expr), - })); - }); - - // Expr::Field - iter(depth, &mut |expr| { - f(Expr::Field(ExprField { - // `$expr.field` - attrs: Vec::new(), - base: Box::new(expr), - dot_token: Token![.](span), - member: Member::Named(Ident::new("field", span)), - })); - }); - - // Expr::If - iter(depth, &mut |expr| { - f(Expr::If(ExprIf { - // `if $expr {}` - attrs: Vec::new(), - if_token: Token![if](span), - cond: Box::new(expr), - then_branch: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - else_branch: None, - })); - }); - - // Expr::Let - iter(depth, &mut |expr| { - f(Expr::Let(ExprLet { - attrs: Vec::new(), - let_token: Token![let](span), - pat: Box::new(Pat::Wild(PatWild { - attrs: Vec::new(), - underscore_token: Token![_](span), - })), - eq_token: Token![=](span), - expr: Box::new(expr), - })); - }); - - // Expr::Range - f(Expr::Range(ExprRange { - // `..` - attrs: Vec::new(), - start: None, - limits: RangeLimits::HalfOpen(Token![..](span)), - end: None, - })); - iter(depth, &mut |expr| { - f(Expr::Range(ExprRange { - // `..$expr` - attrs: Vec::new(), - start: None, - limits: RangeLimits::HalfOpen(Token![..](span)), - end: Some(Box::new(expr.clone())), - })); - f(Expr::Range(ExprRange { - // `$expr..` - attrs: Vec::new(), - start: Some(Box::new(expr)), - limits: RangeLimits::HalfOpen(Token![..](span)), - end: None, - })); - }); - - // Expr::Reference - iter(depth, &mut |expr| { - f(Expr::Reference(ExprReference { - // `&$expr` - attrs: Vec::new(), - and_token: Token![&](span), - mutability: None, - expr: Box::new(expr), - })); - }); - - // Expr::Return - f(Expr::Return(ExprReturn { - // `return` - attrs: Vec::new(), - return_token: Token![return](span), - expr: None, - })); - iter(depth, &mut |expr| { - f(Expr::Return(ExprReturn { - // `return $expr` - attrs: Vec::new(), - return_token: Token![return](span), - expr: Some(Box::new(expr)), - })); - }); - - // Expr::Try - iter(depth, &mut |expr| { - f(Expr::Try(ExprTry { - // `$expr?` - attrs: Vec::new(), - expr: Box::new(expr), - question_token: Token![?](span), - })); - }); - - // Expr::Unary - iter(depth, &mut |expr| { - for op in [ - UnOp::Deref(Token![*](span)), - //UnOp::Not(Token![!](span)), - //UnOp::Neg(Token![-](span)), - ] { - f(Expr::Unary(ExprUnary { - // `*$expr` - attrs: Vec::new(), - op, - expr: Box::new(expr.clone()), - })); - } - }); - - if false { - // Expr::Array - f(Expr::Array(ExprArray { - // `[]` - attrs: Vec::new(), - bracket_token: token::Bracket(span), - elems: Punctuated::new(), - })); - - // Expr::Async - f(Expr::Async(ExprAsync { - // `async {}` - attrs: Vec::new(), - async_token: Token![async](span), - capture: None, - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - - // Expr::Await - iter(depth, &mut |expr| { - f(Expr::Await(ExprAwait { - // `$expr.await` - attrs: Vec::new(), - base: Box::new(expr), - dot_token: Token![.](span), - await_token: Token![await](span), - })); - }); - - // Expr::Block - f(Expr::Block(ExprBlock { - // `'a: {}` - attrs: Vec::new(), - label: Some(Label { - name: Lifetime::new("'a", span), - colon_token: Token![:](span), - }), - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - iter(depth, &mut |expr| { - f(Expr::Block(ExprBlock { - // `{ $expr }` - attrs: Vec::new(), - label: None, - block: Block { - brace_token: token::Brace(span), - stmts: Vec::from([Stmt::Expr(expr.clone(), None)]), - }, - })); - f(Expr::Block(ExprBlock { - // `{ $expr; }` - attrs: Vec::new(), - label: None, - block: Block { - brace_token: token::Brace(span), - stmts: Vec::from([Stmt::Expr(expr, Some(Token![;](span)))]), - }, - })); - }); - - // Expr::Break - f(Expr::Break(ExprBreak { - // `break 'a` - attrs: Vec::new(), - break_token: Token![break](span), - label: Some(Lifetime::new("'a", span)), - expr: None, - })); - iter(depth, &mut |expr| { - f(Expr::Break(ExprBreak { - // `break 'a $expr` - attrs: Vec::new(), - break_token: Token![break](span), - label: Some(Lifetime::new("'a", span)), - expr: Some(Box::new(expr)), - })); - }); - - // Expr::Closure - f(Expr::Closure(ExprClosure { - // `|| -> T {}` - attrs: Vec::new(), - lifetimes: None, - constness: None, - movability: None, - asyncness: None, - capture: None, - or1_token: Token![|](span), - inputs: Punctuated::new(), - or2_token: Token![|](span), - output: ReturnType::Type( - Token![->](span), - Box::new(Type::Path(TypePath { - qself: None, - path: Path::from(Ident::new("T", span)), - })), - ), - body: Box::new(Expr::Block(ExprBlock { - attrs: Vec::new(), - label: None, - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })), - })); - - // Expr::Const - f(Expr::Const(ExprConst { - // `const {}` - attrs: Vec::new(), - const_token: Token![const](span), - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - - // Expr::Continue - f(Expr::Continue(ExprContinue { - // `continue` - attrs: Vec::new(), - continue_token: Token![continue](span), - label: None, - })); - f(Expr::Continue(ExprContinue { - // `continue 'a` - attrs: Vec::new(), - continue_token: Token![continue](span), - label: Some(Lifetime::new("'a", span)), - })); - - // Expr::ForLoop - iter(depth, &mut |expr| { - f(Expr::ForLoop(ExprForLoop { - // `for _ in $expr {}` - attrs: Vec::new(), - label: None, - for_token: Token![for](span), - pat: Box::new(Pat::Wild(PatWild { - attrs: Vec::new(), - underscore_token: Token![_](span), - })), - in_token: Token![in](span), - expr: Box::new(expr.clone()), - body: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - f(Expr::ForLoop(ExprForLoop { - // `'a: for _ in $expr {}` - attrs: Vec::new(), - label: Some(Label { - name: Lifetime::new("'a", span), - colon_token: Token![:](span), - }), - for_token: Token![for](span), - pat: Box::new(Pat::Wild(PatWild { - attrs: Vec::new(), - underscore_token: Token![_](span), - })), - in_token: Token![in](span), - expr: Box::new(expr), - body: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - }); - - // Expr::Index - iter(depth, &mut |expr| { - f(Expr::Index(ExprIndex { - // `$expr[0]` - attrs: Vec::new(), - expr: Box::new(expr), - bracket_token: token::Bracket(span), - index: Box::new(Expr::Lit(ExprLit { - attrs: Vec::new(), - lit: Lit::Int(LitInt::new("0", span)), - })), - })); - }); - - // Expr::Loop - f(Expr::Loop(ExprLoop { - // `loop {}` - attrs: Vec::new(), - label: None, - loop_token: Token![loop](span), - body: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - f(Expr::Loop(ExprLoop { - // `'a: loop {}` - attrs: Vec::new(), - label: Some(Label { - name: Lifetime::new("'a", span), - colon_token: Token![:](span), - }), - loop_token: Token![loop](span), - body: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - - // Expr::Macro - f(Expr::Macro(ExprMacro { - // `m!()` - attrs: Vec::new(), - mac: Macro { - path: Path::from(Ident::new("m", span)), - bang_token: Token![!](span), - delimiter: MacroDelimiter::Paren(token::Paren(span)), - tokens: TokenStream::new(), - }, - })); - f(Expr::Macro(ExprMacro { - // `m! {}` - attrs: Vec::new(), - mac: Macro { - path: Path::from(Ident::new("m", span)), - bang_token: Token![!](span), - delimiter: MacroDelimiter::Brace(token::Brace(span)), - tokens: TokenStream::new(), - }, - })); - - // Expr::Match - iter(depth, &mut |expr| { - f(Expr::Match(ExprMatch { - // `match $expr {}` - attrs: Vec::new(), - match_token: Token![match](span), - expr: Box::new(expr.clone()), - brace_token: token::Brace(span), - arms: Vec::new(), - })); - f(Expr::Match(ExprMatch { - // `match x { _ => $expr }` - attrs: Vec::new(), - match_token: Token![match](span), - expr: Box::new(Expr::Path(ExprPath { - attrs: Vec::new(), - qself: None, - path: Path::from(Ident::new("x", span)), - })), - brace_token: token::Brace(span), - arms: Vec::from([Arm { - attrs: Vec::new(), - pat: Pat::Wild(PatWild { - attrs: Vec::new(), - underscore_token: Token![_](span), - }), - guard: None, - fat_arrow_token: Token![=>](span), - body: Box::new(expr.clone()), - comma: None, - }]), - })); - f(Expr::Match(ExprMatch { - // `match x { _ if $expr => {} }` - attrs: Vec::new(), - match_token: Token![match](span), - expr: Box::new(Expr::Path(ExprPath { - attrs: Vec::new(), - qself: None, - path: Path::from(Ident::new("x", span)), - })), - brace_token: token::Brace(span), - arms: Vec::from([Arm { - attrs: Vec::new(), - pat: Pat::Wild(PatWild { - attrs: Vec::new(), - underscore_token: Token![_](span), - }), - guard: Some((Token![if](span), Box::new(expr))), - fat_arrow_token: Token![=>](span), - body: Box::new(Expr::Block(ExprBlock { - attrs: Vec::new(), - label: None, - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })), - comma: None, - }]), - })); - }); - - // Expr::MethodCall - iter(depth, &mut |expr| { - f(Expr::MethodCall(ExprMethodCall { - // `$expr.method()` - attrs: Vec::new(), - receiver: Box::new(expr.clone()), - dot_token: Token![.](span), - method: Ident::new("method", span), - turbofish: None, - paren_token: token::Paren(span), - args: Punctuated::new(), - })); - f(Expr::MethodCall(ExprMethodCall { - // `$expr.method::()` - attrs: Vec::new(), - receiver: Box::new(expr), - dot_token: Token![.](span), - method: Ident::new("method", span), - turbofish: Some(AngleBracketedGenericArguments { - colon2_token: Some(Token![::](span)), - lt_token: Token![<](span), - args: Punctuated::from_iter([GenericArgument::Type(Type::Path( - TypePath { - qself: None, - path: Path::from(Ident::new("T", span)), - }, - ))]), - gt_token: Token![>](span), - }), - paren_token: token::Paren(span), - args: Punctuated::new(), - })); - }); - - // Expr::RawAddr - iter(depth, &mut |expr| { - f(Expr::RawAddr(ExprRawAddr { - // `&raw const $expr` - attrs: Vec::new(), - and_token: Token![&](span), - raw: Token![raw](span), - mutability: PointerMutability::Const(Token![const](span)), - expr: Box::new(expr), - })); - }); - - // Expr::Struct - f(Expr::Struct(ExprStruct { - // `Struct {}` - attrs: Vec::new(), - qself: None, - path: Path::from(Ident::new("Struct", span)), - brace_token: token::Brace(span), - fields: Punctuated::new(), - dot2_token: None, - rest: None, - })); - - // Expr::TryBlock - f(Expr::TryBlock(ExprTryBlock { - // `try {}` - attrs: Vec::new(), - try_token: Token![try](span), - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - - // Expr::Unsafe - f(Expr::Unsafe(ExprUnsafe { - // `unsafe {}` - attrs: Vec::new(), - unsafe_token: Token![unsafe](span), - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - - // Expr::While - iter(depth, &mut |expr| { - f(Expr::While(ExprWhile { - // `while $expr {}` - attrs: Vec::new(), - label: None, - while_token: Token![while](span), - cond: Box::new(expr.clone()), - body: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - f(Expr::While(ExprWhile { - // `'a: while $expr {}` - attrs: Vec::new(), - label: Some(Label { - name: Lifetime::new("'a", span), - colon_token: Token![:](span), - }), - while_token: Token![while](span), - cond: Box::new(expr), - body: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - }); - - // Expr::Yield - f(Expr::Yield(ExprYield { - // `yield` - attrs: Vec::new(), - yield_token: Token![yield](span), - expr: None, - })); - iter(depth, &mut |expr| { - f(Expr::Yield(ExprYield { - // `yield $expr` - attrs: Vec::new(), - yield_token: Token![yield](span), - expr: Some(Box::new(expr)), - })); - }); - } - } - - let mut failures = 0; - macro_rules! fail { - ($($message:tt)*) => {{ - eprintln!($($message)*); - failures += 1; - return; - }}; - } - let mut assert = |mut original: Expr| { - let span = Span::call_site(); - // `const _: () = $expr;` - let pretty = prettyplease::unparse(&File { - shebang: None, - attrs: Vec::new(), - items: Vec::from([Item::Const(ItemConst { - attrs: Vec::new(), - vis: Visibility::Inherited, - const_token: Token![const](span), - ident: Ident::from(Token![_](span)), - generics: Generics::default(), - colon_token: Token![:](span), - ty: Box::new(Type::Infer(TypeInfer { - underscore_token: Token![_](span), - })), - eq_token: Token![=](span), - expr: Box::new(original.clone()), - semi_token: Token![;](span), - })]), - }); - let mut parsed = match syn::parse_file(&pretty) { - Ok(parsed) => parsed, - _ => fail!("failed to parse: {pretty}{original:#?}"), - }; - let item = match parsed.items.as_mut_slice() { - [Item::Const(item)] => item, - _ => unreachable!(), - }; - let mut parsed = mem::replace(&mut *item.expr, Expr::PLACEHOLDER); - AsIfPrinted.visit_expr_mut(&mut original); - FlattenParens.visit_expr_mut(&mut parsed); - if original != parsed { - fail!( - "before: {}\n{:#?}\nafter: {}\n{:#?}", - original.to_token_stream(), - original, - parsed.to_token_stream(), - parsed, - ); - } - if pretty.contains("(||") { - // https://github.com/dtolnay/prettyplease/issues/99 - return; - } - let no_paren = pretty.replace(['(', ')'], ""); - if pretty != no_paren { - if let Ok(mut parsed2) = syn::parse_file(&no_paren) { - let item = match parsed2.items.as_mut_slice() { - [Item::Const(item)] => item, - _ => unreachable!(), - }; - if original == *item.expr { - fail!("redundant parens: {}", pretty); - } - } - } - }; - - iter(if cfg!(debug_assertions) { 3 } else { 4 }, &mut assert); - if failures > 0 { - eprintln!("FAILURES: {failures}"); - ExitCode::FAILURE - } else { - ExitCode::SUCCESS - } -} diff --git a/vendor/proc-macro2/.cargo-checksum.json b/vendor/proc-macro2/.cargo-checksum.json deleted file mode 100644 index 9d997fa6ddedf1..00000000000000 --- a/vendor/proc-macro2/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"0c0e9279875a3f985b09df73eefab42d9e6f5566f26485c9e3a057e887d219b7",".github/FUNDING.yml":"b017158736b3c9751a2d21edfce7fe61c8954e2fced8da8dd3013c2f3e295bd9",".github/workflows/ci.yml":"b2004c92e8985c58c1338202b2ebef0f25fa50de01e9101fe46a17000ca59962","Cargo.lock":"4afb839b0f3299f791ccdda8213faddff1ee208d64a14e883b4e24ee48957aea","Cargo.toml":"8c059fba2000e51a2d88025b8ebdc7a0b0e26b3f67cb3baa96c222dafb9c31e4","Cargo.toml.orig":"42bf3a4709d2fcc1027a9c68f525054ea542683cedf626ef2c76b6b2ac63a5dc","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"e60d0a33eb3bfc8583866bb84ca53fbae5e5cb39b67acfbb3c1f35dae41e19a9","build.rs":"baeb20b52f6b536be8657a566591a507bb2e34a45cf8baa42b135510a0c3c729","rust-toolchain.toml":"6bbb61302978c736b2da03e4fb40e3beab908f85d533ab46fd541e637b5f3e0f","src/detection.rs":"ed9a5f9a979ab01247d7a68eeb1afa3c13209334c5bfff0f9289cb07e5bb4e8b","src/extra.rs":"29f094473279a29b71c3cc9f5fa27c2e2c30c670390cf7e4b7cf451486cc857e","src/fallback.rs":"962e1897fefb138101ae3f6fda9c46cecff787550cdfb9133066326379464d90","src/lib.rs":"c07a2ad1ccbda629d0f2018d6d7762f4dcb955e8d0714ffcf9c7f3d5cd0020f2","src/location.rs":"9225c5a55f03b56cce42bc55ceb509e8216a5e0b24c94aa1cd071b04e3d6c15f","src/marker.rs":"c11c5a1be8bdf18be3fcd224393f350a9aae7ce282e19ce583c84910c6903a8f","src/num.rs":"82d625cbcd255965e46231ac3af1b74ab8bff9787c799e8ed1f978de146cb0b5","src/parse.rs":"0c380fdbe8795d41e08a40e3a1e67e505e9aa9398277a6a794af7d96fab06ac6","src/probe.rs":"2b57e8ebf46a7c60ee2762f23f16d24ee9ddb8f1acd0a7faf7a99cf2e4187151","src/probe/proc_macro_span.rs":"f3f9c728438060c9450d4568621beca9125f559eb65faab9574d2e43e9b49643","src/probe/proc_macro_span_file.rs":"a20a1920d121b153ce33c8e2ea203b9370606744847b62e8ffd0c657d2545778","src/probe/proc_macro_span_location.rs":"71a4768f65f8a87e5a3c2bc2e05fb84d2562a0f4733780e9f919563f25ee07dc","src/rcvec.rs":"a159d246cac59aae2d51b899471ce34766f51f3c11c376ac36ee501ee3f12a7a","src/rustc_literal_escaper.rs":"188cbe8fffe7af3899977530cbb1b6c0b1dff51623db0ec115db1e082159e7b6","src/wrapper.rs":"057b7baa778e8205c0af47405c1af077d4fd19318ed4b79bd195ddceb4da0b15","tests/comments.rs":"11520f6baee23b9258db904f4c256fd3877493f754e2b99041f73a330e74a911","tests/features.rs":"7e52c0c801019b271bf11a994c2e1799a1429b0c1a3a34e551a23971797fe412","tests/marker.rs":"f16299460587d6c65603ed809f1a3b81853e4b99d6cb44d0b68bb07259d7e9f8","tests/test.rs":"c590a13e38c2b5d92a6181433652925dd9d19dd404c6839290abc7acbc3cb5a3","tests/test_fmt.rs":"b7743b612af65f2c88cbe109d50a093db7aa7e87f9e37bf45b7bbaeb240aa020","tests/test_size.rs":"62d8373ea46b669b87bc90a9c49b6d02f90ff4c21f9a25acebf60c9926e01fb7"},"package":"5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"} \ No newline at end of file diff --git a/vendor/proc-macro2/.cargo_vcs_info.json b/vendor/proc-macro2/.cargo_vcs_info.json deleted file mode 100644 index bdeb94e109b454..00000000000000 --- a/vendor/proc-macro2/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "d1bf13ac1d90c3b65c1b7fc131a26f37a8e2d0db" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/proc-macro2/.github/FUNDING.yml b/vendor/proc-macro2/.github/FUNDING.yml deleted file mode 100644 index 750707701cdae9..00000000000000 --- a/vendor/proc-macro2/.github/FUNDING.yml +++ /dev/null @@ -1 +0,0 @@ -github: dtolnay diff --git a/vendor/proc-macro2/.github/workflows/ci.yml b/vendor/proc-macro2/.github/workflows/ci.yml deleted file mode 100644 index 669a88c9c4f747..00000000000000 --- a/vendor/proc-macro2/.github/workflows/ci.yml +++ /dev/null @@ -1,232 +0,0 @@ -name: CI - -on: - push: - pull_request: - workflow_dispatch: - schedule: [cron: "40 1 * * *"] - -permissions: - contents: read - -env: - RUSTFLAGS: -Dwarnings - -jobs: - pre_ci: - uses: dtolnay/.github/.github/workflows/pre_ci.yml@master - - test: - name: Rust ${{matrix.rust}} - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - rust: [1.80.0, stable, beta] - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@master - with: - toolchain: ${{matrix.rust}} - components: rust-src - - run: cargo test - - run: cargo test --no-default-features - - run: cargo test --features span-locations - - name: RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo test - run: cargo test - env: - RUSTFLAGS: --cfg procmacro2_semver_exempt ${{env.RUSTFLAGS}} - - name: RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo test --no-default-features - run: cargo test --no-default-features - env: - RUSTFLAGS: --cfg procmacro2_semver_exempt ${{env.RUSTFLAGS}} - - nightly: - name: Rust nightly - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@nightly - with: - components: rust-src - - name: Enable type layout randomization - run: echo RUSTFLAGS=${RUSTFLAGS}\ -Zrandomize-layout\ --cfg=randomize_layout >> $GITHUB_ENV - - run: cargo check - env: - RUSTFLAGS: --cfg procmacro2_nightly_testing ${{env.RUSTFLAGS}} - - run: cargo test - - run: cargo test --no-default-features - - run: cargo test --no-default-features --test features -- --ignored make_sure_no_proc_macro # run the ignored test to make sure the `proc-macro` feature is disabled - - run: cargo test --features span-locations - - run: cargo test --manifest-path tests/ui/Cargo.toml - - name: RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo test - run: cargo test - env: - RUSTFLAGS: --cfg procmacro2_semver_exempt ${{env.RUSTFLAGS}} - - name: RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo test --no-default-features - run: cargo test --no-default-features - env: - RUSTFLAGS: --cfg procmacro2_semver_exempt ${{env.RUSTFLAGS}} - - name: RUSTFLAGS='-Z allow-features=' cargo test - run: cargo test - env: - RUSTFLAGS: -Z allow-features= --cfg procmacro2_backtrace ${{env.RUSTFLAGS}} - - uses: actions/upload-artifact@v4 - if: always() - with: - name: Cargo.lock - path: Cargo.lock - continue-on-error: true - - layout: - name: Layout - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@nightly - with: - components: rust-src - - run: cargo test --test test_size - - run: cargo test --test test_size --features span-locations - - run: cargo test --test test_size --no-default-features - - run: cargo test --test test_size --no-default-features --features span-locations - - msrv: - name: Rust 1.60.0 - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@1.60.0 - with: - components: rust-src - - run: cargo check - - run: cargo check --no-default-features - - run: cargo check --features span-locations - - name: RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo check - run: cargo check - env: - RUSTFLAGS: --cfg procmacro2_semver_exempt ${{env.RUSTFLAGS}} - - name: RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo check --no-default-features - run: cargo check --no-default-features - env: - RUSTFLAGS: --cfg procmacro2_semver_exempt ${{env.RUSTFLAGS}} - - minimal: - name: Minimal versions - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@nightly - - run: cargo generate-lockfile -Z minimal-versions - - run: cargo check --locked - - webassembly: - name: WebAssembly - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@nightly - with: - target: wasm32-unknown-unknown - components: rust-src - - name: Ignore WebAssembly linker warning - run: echo RUSTFLAGS=${RUSTFLAGS}\ -Alinker_messages >> $GITHUB_ENV - - run: cargo test --target wasm32-unknown-unknown --no-run - - fuzz: - name: Fuzz - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@nightly - with: - components: rust-src - - uses: dtolnay/install@cargo-fuzz - - run: cargo fuzz check - - run: cargo check --no-default-features --features afl - working-directory: fuzz - - uses: dtolnay/install@honggfuzz - - name: Run apt install binutils-dev libunwind-dev - run: | - sudo sed -i 's/^update_initramfs=yes$/update_initramfs=no/' /etc/initramfs-tools/update-initramfs.conf - sudo rm -f /var/lib/man-db/auto-update - sudo apt-get update - sudo apt-get install binutils-dev libunwind-dev - - run: cargo hfuzz build --no-default-features --features honggfuzz - working-directory: fuzz - - doc: - name: Documentation - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - env: - RUSTDOCFLAGS: -Dwarnings - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@nightly - with: - components: rust-src - - uses: dtolnay/install@cargo-docs-rs - - run: cargo docs-rs - - clippy: - name: Clippy - runs-on: ubuntu-latest - if: github.event_name != 'pull_request' - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@nightly - with: - components: clippy, rust-src - - run: cargo clippy --tests -- -Dclippy::all -Dclippy::pedantic - - run: cargo clippy --tests --all-features -- -Dclippy::all -Dclippy::pedantic - - miri: - name: Miri - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@miri - - run: cargo miri setup - - run: cargo miri test - env: - MIRIFLAGS: -Zmiri-strict-provenance - - outdated: - name: Outdated - runs-on: ubuntu-latest - if: github.event_name != 'pull_request' - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@stable - - uses: dtolnay/install@cargo-outdated - - run: cargo outdated --workspace --exit-code 1 - - run: cargo outdated --manifest-path fuzz/Cargo.toml --exit-code 1 diff --git a/vendor/proc-macro2/Cargo.lock b/vendor/proc-macro2/Cargo.lock deleted file mode 100644 index e37ffdd48fc7d3..00000000000000 --- a/vendor/proc-macro2/Cargo.lock +++ /dev/null @@ -1,326 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "adler2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" - -[[package]] -name = "bitflags" -version = "2.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" - -[[package]] -name = "cfg-if" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" - -[[package]] -name = "crc32fast" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" - -[[package]] -name = "either" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" - -[[package]] -name = "errno" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" -dependencies = [ - "libc", - "windows-sys 0.61.2", -] - -[[package]] -name = "filetime" -version = "0.2.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" -dependencies = [ - "cfg-if", - "libc", - "libredox", - "windows-sys 0.60.2", -] - -[[package]] -name = "flate2" -version = "1.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc5a4e564e38c699f2880d3fda590bedc2e69f3f84cd48b457bd892ce61d0aa9" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - -[[package]] -name = "libc" -version = "0.2.177" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" - -[[package]] -name = "libredox" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" -dependencies = [ - "bitflags", - "libc", - "redox_syscall", -] - -[[package]] -name = "linux-raw-sys" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" - -[[package]] -name = "miniz_oxide" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" -dependencies = [ - "adler2", - "simd-adler32", -] - -[[package]] -name = "proc-macro2" -version = "1.0.102" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e0f6df8eaa422d97d72edcd152e1451618fed47fabbdbd5a8864167b1d4aff7" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "proc-macro2" -version = "1.0.103" -dependencies = [ - "flate2", - "quote", - "rayon", - "rustversion", - "tar", - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" -dependencies = [ - "proc-macro2 1.0.102", -] - -[[package]] -name = "rayon" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" -dependencies = [ - "crossbeam-deque", - "crossbeam-utils", -] - -[[package]] -name = "redox_syscall" -version = "0.5.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" -dependencies = [ - "bitflags", -] - -[[package]] -name = "rustix" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" -dependencies = [ - "bitflags", - "errno", - "libc", - "linux-raw-sys", - "windows-sys 0.61.2", -] - -[[package]] -name = "rustversion" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" - -[[package]] -name = "simd-adler32" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" - -[[package]] -name = "tar" -version = "0.4.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" -dependencies = [ - "filetime", - "libc", - "xattr", -] - -[[package]] -name = "unicode-ident" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "462eeb75aeb73aea900253ce739c8e18a67423fadf006037cd3ff27e82748a06" - -[[package]] -name = "windows-link" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" - -[[package]] -name = "windows-sys" -version = "0.60.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-sys" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" -dependencies = [ - "windows-link", -] - -[[package]] -name = "windows-targets" -version = "0.53.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" -dependencies = [ - "windows-link", - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" - -[[package]] -name = "windows_i686_gnu" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" - -[[package]] -name = "windows_i686_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" - -[[package]] -name = "xattr" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" -dependencies = [ - "libc", - "rustix", -] diff --git a/vendor/proc-macro2/Cargo.toml b/vendor/proc-macro2/Cargo.toml deleted file mode 100644 index 3f0173c7d1b4aa..00000000000000 --- a/vendor/proc-macro2/Cargo.toml +++ /dev/null @@ -1,105 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.60" -name = "proc-macro2" -version = "1.0.103" -authors = [ - "David Tolnay ", - "Alex Crichton ", -] -build = "build.rs" -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = "A substitute implementation of the compiler's `proc_macro` API to decouple token-based libraries from the procedural macro use case." -documentation = "https://docs.rs/proc-macro2" -readme = "README.md" -keywords = [ - "macros", - "syn", -] -categories = ["development-tools::procedural-macro-helpers"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/dtolnay/proc-macro2" - -[package.metadata.docs.rs] -rustc-args = ["--cfg=procmacro2_semver_exempt"] -targets = ["x86_64-unknown-linux-gnu"] -rustdoc-args = [ - "--cfg=procmacro2_semver_exempt", - "--generate-link-to-definition", - "--generate-macro-expansion", - "--extern-html-root-url=core=https://doc.rust-lang.org", - "--extern-html-root-url=alloc=https://doc.rust-lang.org", - "--extern-html-root-url=std=https://doc.rust-lang.org", - "--extern-html-root-url=proc_macro=https://doc.rust-lang.org", -] - -[package.metadata.playground] -features = ["span-locations"] - -[features] -default = ["proc-macro"] -nightly = [] -proc-macro = [] -span-locations = [] - -[lib] -name = "proc_macro2" -path = "src/lib.rs" - -[[test]] -name = "comments" -path = "tests/comments.rs" - -[[test]] -name = "features" -path = "tests/features.rs" - -[[test]] -name = "marker" -path = "tests/marker.rs" - -[[test]] -name = "test" -path = "tests/test.rs" - -[[test]] -name = "test_fmt" -path = "tests/test_fmt.rs" - -[[test]] -name = "test_size" -path = "tests/test_size.rs" - -[dependencies.unicode-ident] -version = "1.0" - -[dev-dependencies.flate2] -version = "1.0" - -[dev-dependencies.quote] -version = "1.0" -default-features = false - -[dev-dependencies.rayon] -version = "1.0" - -[dev-dependencies.rustversion] -version = "1" - -[dev-dependencies.tar] -version = "0.4" diff --git a/vendor/proc-macro2/LICENSE-APACHE b/vendor/proc-macro2/LICENSE-APACHE deleted file mode 100644 index 1b5ec8b78e237b..00000000000000 --- a/vendor/proc-macro2/LICENSE-APACHE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS diff --git a/vendor/proc-macro2/LICENSE-MIT b/vendor/proc-macro2/LICENSE-MIT deleted file mode 100644 index 31aa79387f27e7..00000000000000 --- a/vendor/proc-macro2/LICENSE-MIT +++ /dev/null @@ -1,23 +0,0 @@ -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/proc-macro2/README.md b/vendor/proc-macro2/README.md deleted file mode 100644 index 0b6b490fa9cc6f..00000000000000 --- a/vendor/proc-macro2/README.md +++ /dev/null @@ -1,94 +0,0 @@ -# proc-macro2 - -[github](https://github.com/dtolnay/proc-macro2) -[crates.io](https://crates.io/crates/proc-macro2) -[docs.rs](https://docs.rs/proc-macro2) -[build status](https://github.com/dtolnay/proc-macro2/actions?query=branch%3Amaster) - -A wrapper around the procedural macro API of the compiler's `proc_macro` crate. -This library serves two purposes: - -- **Bring proc-macro-like functionality to other contexts like build.rs and - main.rs.** Types from `proc_macro` are entirely specific to procedural macros - and cannot ever exist in code outside of a procedural macro. Meanwhile - `proc_macro2` types may exist anywhere including non-macro code. By developing - foundational libraries like [syn] and [quote] against `proc_macro2` rather - than `proc_macro`, the procedural macro ecosystem becomes easily applicable to - many other use cases and we avoid reimplementing non-macro equivalents of - those libraries. - -- **Make procedural macros unit testable.** As a consequence of being specific - to procedural macros, nothing that uses `proc_macro` can be executed from a - unit test. In order for helper libraries or components of a macro to be - testable in isolation, they must be implemented using `proc_macro2`. - -[syn]: https://github.com/dtolnay/syn -[quote]: https://github.com/dtolnay/quote - -## Usage - -```toml -[dependencies] -proc-macro2 = "1.0" -``` - -The skeleton of a typical procedural macro typically looks like this: - -```rust -extern crate proc_macro; - -#[proc_macro_derive(MyDerive)] -pub fn my_derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let input = proc_macro2::TokenStream::from(input); - - let output: proc_macro2::TokenStream = { - /* transform input */ - }; - - proc_macro::TokenStream::from(output) -} -``` - -If parsing with [Syn], you'll use [`parse_macro_input!`] instead to propagate -parse errors correctly back to the compiler when parsing fails. - -[`parse_macro_input!`]: https://docs.rs/syn/2.0/syn/macro.parse_macro_input.html - -## Unstable features - -The default feature set of proc-macro2 tracks the most recent stable compiler -API. Functionality in `proc_macro` that is not yet stable is not exposed by -proc-macro2 by default. - -To opt into the additional APIs available in the most recent nightly compiler, -the `procmacro2_semver_exempt` config flag must be passed to rustc. We will -polyfill those nightly-only APIs back to Rust 1.60.0. As these are unstable APIs -that track the nightly compiler, minor versions of proc-macro2 may make breaking -changes to them at any time. - -``` -RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build -``` - -Note that this must not only be done for your crate, but for any crate that -depends on your crate. This infectious nature is intentional, as it serves as a -reminder that you are outside of the normal semver guarantees. - -Semver exempt methods are marked as such in the proc-macro2 documentation. - -
- -#### License - - -Licensed under either of Apache License, Version -2.0 or MIT license at your option. - - -
- - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in this crate by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. - diff --git a/vendor/proc-macro2/build.rs b/vendor/proc-macro2/build.rs deleted file mode 100644 index 26c3ed1bdcc519..00000000000000 --- a/vendor/proc-macro2/build.rs +++ /dev/null @@ -1,267 +0,0 @@ -#![allow(unknown_lints)] -#![allow(unexpected_cfgs)] -#![allow(clippy::uninlined_format_args)] - -use std::env; -use std::ffi::OsString; -use std::fs; -use std::io::ErrorKind; -use std::iter; -use std::path::Path; -use std::process::{self, Command, Stdio}; -use std::str; - -fn main() { - let rustc = rustc_minor_version().unwrap_or(u32::MAX); - - if rustc >= 80 { - println!("cargo:rustc-check-cfg=cfg(fuzzing)"); - println!("cargo:rustc-check-cfg=cfg(no_is_available)"); - println!("cargo:rustc-check-cfg=cfg(no_literal_byte_character)"); - println!("cargo:rustc-check-cfg=cfg(no_literal_c_string)"); - println!("cargo:rustc-check-cfg=cfg(no_source_text)"); - println!("cargo:rustc-check-cfg=cfg(proc_macro_span)"); - println!("cargo:rustc-check-cfg=cfg(proc_macro_span_file)"); - println!("cargo:rustc-check-cfg=cfg(proc_macro_span_location)"); - println!("cargo:rustc-check-cfg=cfg(procmacro2_backtrace)"); - println!("cargo:rustc-check-cfg=cfg(procmacro2_build_probe)"); - println!("cargo:rustc-check-cfg=cfg(procmacro2_nightly_testing)"); - println!("cargo:rustc-check-cfg=cfg(procmacro2_semver_exempt)"); - println!("cargo:rustc-check-cfg=cfg(randomize_layout)"); - println!("cargo:rustc-check-cfg=cfg(span_locations)"); - println!("cargo:rustc-check-cfg=cfg(super_unstable)"); - println!("cargo:rustc-check-cfg=cfg(wrap_proc_macro)"); - } - - let semver_exempt = cfg!(procmacro2_semver_exempt); - if semver_exempt { - // https://github.com/dtolnay/proc-macro2/issues/147 - println!("cargo:rustc-cfg=procmacro2_semver_exempt"); - } - - if semver_exempt || cfg!(feature = "span-locations") { - // Provide methods Span::start and Span::end which give the line/column - // location of a token. This is behind a cfg because tracking location - // inside spans is a performance hit. - println!("cargo:rustc-cfg=span_locations"); - } - - if rustc < 57 { - // Do not use proc_macro::is_available() to detect whether the proc - // macro API is available vs needs to be polyfilled. Instead, use the - // proc macro API unconditionally and catch the panic that occurs if it - // isn't available. - println!("cargo:rustc-cfg=no_is_available"); - } - - if rustc < 66 { - // Do not call libproc_macro's Span::source_text. Always return None. - println!("cargo:rustc-cfg=no_source_text"); - } - - if rustc < 79 { - // Do not call Literal::byte_character nor Literal::c_string. They can - // be emulated by way of Literal::from_str. - println!("cargo:rustc-cfg=no_literal_byte_character"); - println!("cargo:rustc-cfg=no_literal_c_string"); - } - - if !cfg!(feature = "proc-macro") { - println!("cargo:rerun-if-changed=build.rs"); - return; - } - - let proc_macro_span; - let consider_rustc_bootstrap; - if compile_probe_unstable("proc_macro_span", false) { - // This is a nightly or dev compiler, so it supports unstable features - // regardless of RUSTC_BOOTSTRAP. No need to rerun build script if - // RUSTC_BOOTSTRAP is changed. - proc_macro_span = true; - consider_rustc_bootstrap = false; - } else if let Some(rustc_bootstrap) = env::var_os("RUSTC_BOOTSTRAP") { - if compile_probe_unstable("proc_macro_span", true) { - // This is a stable or beta compiler for which the user has set - // RUSTC_BOOTSTRAP to turn on unstable features. Rerun build script - // if they change it. - proc_macro_span = true; - consider_rustc_bootstrap = true; - } else if rustc_bootstrap == "1" { - // This compiler does not support the proc macro Span API in the - // form that proc-macro2 expects. No need to pay attention to - // RUSTC_BOOTSTRAP. - proc_macro_span = false; - consider_rustc_bootstrap = false; - } else { - // This is a stable or beta compiler for which RUSTC_BOOTSTRAP is - // set to restrict the use of unstable features by this crate. - proc_macro_span = false; - consider_rustc_bootstrap = true; - } - } else { - // Without RUSTC_BOOTSTRAP, this compiler does not support the proc - // macro Span API in the form that proc-macro2 expects, but try again if - // the user turns on unstable features. - proc_macro_span = false; - consider_rustc_bootstrap = true; - } - - if proc_macro_span || !semver_exempt { - // Wrap types from libproc_macro rather than polyfilling the whole API. - // Enabled as long as procmacro2_semver_exempt is not set, because we - // can't emulate the unstable API without emulating everything else. - // Also enabled unconditionally on nightly, in which case the - // procmacro2_semver_exempt surface area is implemented by using the - // nightly-only proc_macro API. - println!("cargo:rustc-cfg=wrap_proc_macro"); - } - - if proc_macro_span { - // Enable non-dummy behavior of Span::byte_range and Span::join methods - // which requires an unstable compiler feature. Enabled when building - // with nightly, unless `-Z allow-feature` in RUSTFLAGS disallows - // unstable features. - println!("cargo:rustc-cfg=proc_macro_span"); - } - - if proc_macro_span || (rustc >= 88 && compile_probe_stable("proc_macro_span_location")) { - // Enable non-dummy behavior of Span::start and Span::end methods on - // Rust 1.88+. - println!("cargo:rustc-cfg=proc_macro_span_location"); - } - - if proc_macro_span || (rustc >= 88 && compile_probe_stable("proc_macro_span_file")) { - // Enable non-dummy behavior of Span::file and Span::local_file methods - // on Rust 1.88+. - println!("cargo:rustc-cfg=proc_macro_span_file"); - } - - if semver_exempt && proc_macro_span { - // Implement the semver exempt API in terms of the nightly-only - // proc_macro API. - println!("cargo:rustc-cfg=super_unstable"); - } - - if consider_rustc_bootstrap { - println!("cargo:rerun-if-env-changed=RUSTC_BOOTSTRAP"); - } -} - -fn compile_probe_unstable(feature: &str, rustc_bootstrap: bool) -> bool { - // RUSTC_STAGE indicates that this crate is being compiled as a dependency - // of a multistage rustc bootstrap. This environment uses Cargo in a highly - // non-standard way with issues such as: - // - // https://github.com/rust-lang/cargo/issues/11138 - // https://github.com/rust-lang/rust/issues/114839 - // - env::var_os("RUSTC_STAGE").is_none() && do_compile_probe(feature, rustc_bootstrap) -} - -fn compile_probe_stable(feature: &str) -> bool { - env::var_os("RUSTC_STAGE").is_some() || do_compile_probe(feature, true) -} - -fn do_compile_probe(feature: &str, rustc_bootstrap: bool) -> bool { - println!("cargo:rerun-if-changed=src/probe/{}.rs", feature); - - let rustc = cargo_env_var("RUSTC"); - let out_dir = cargo_env_var("OUT_DIR"); - let out_subdir = Path::new(&out_dir).join("probe"); - let probefile = Path::new("src") - .join("probe") - .join(feature) - .with_extension("rs"); - - if let Err(err) = fs::create_dir(&out_subdir) { - if err.kind() != ErrorKind::AlreadyExists { - eprintln!("Failed to create {}: {}", out_subdir.display(), err); - process::exit(1); - } - } - - let rustc_wrapper = env::var_os("RUSTC_WRAPPER").filter(|wrapper| !wrapper.is_empty()); - let rustc_workspace_wrapper = - env::var_os("RUSTC_WORKSPACE_WRAPPER").filter(|wrapper| !wrapper.is_empty()); - let mut rustc = rustc_wrapper - .into_iter() - .chain(rustc_workspace_wrapper) - .chain(iter::once(rustc)); - let mut cmd = Command::new(rustc.next().unwrap()); - cmd.args(rustc); - - if !rustc_bootstrap { - cmd.env_remove("RUSTC_BOOTSTRAP"); - } - - cmd.stderr(Stdio::null()) - .arg("--cfg=procmacro2_build_probe") - .arg("--edition=2021") - .arg("--crate-name=proc_macro2") - .arg("--crate-type=lib") - .arg("--cap-lints=allow") - .arg("--emit=dep-info,metadata") - .arg("--out-dir") - .arg(&out_subdir) - .arg(probefile); - - if let Some(target) = env::var_os("TARGET") { - cmd.arg("--target").arg(target); - } - - // If Cargo wants to set RUSTFLAGS, use that. - if let Ok(rustflags) = env::var("CARGO_ENCODED_RUSTFLAGS") { - if !rustflags.is_empty() { - for arg in rustflags.split('\x1f') { - cmd.arg(arg); - } - } - } - - let success = match cmd.status() { - Ok(status) => status.success(), - Err(_) => false, - }; - - // Clean up to avoid leaving nondeterministic absolute paths in the dep-info - // file in OUT_DIR, which causes nonreproducible builds in build systems - // that treat the entire OUT_DIR as an artifact. - if let Err(err) = fs::remove_dir_all(&out_subdir) { - // libc::ENOTEMPTY - // Some filesystems (NFSv3) have timing issues under load where '.nfs*' - // dummy files can continue to get created for a short period after the - // probe command completes, breaking remove_dir_all. - // To be replaced with ErrorKind::DirectoryNotEmpty (Rust 1.83+). - const ENOTEMPTY: i32 = 39; - - if !(err.kind() == ErrorKind::NotFound - || (cfg!(target_os = "linux") && err.raw_os_error() == Some(ENOTEMPTY))) - { - eprintln!("Failed to clean up {}: {}", out_subdir.display(), err); - process::exit(1); - } - } - - success -} - -fn rustc_minor_version() -> Option { - let rustc = cargo_env_var("RUSTC"); - let output = Command::new(rustc).arg("--version").output().ok()?; - let version = str::from_utf8(&output.stdout).ok()?; - let mut pieces = version.split('.'); - if pieces.next() != Some("rustc 1") { - return None; - } - pieces.next()?.parse().ok() -} - -fn cargo_env_var(key: &str) -> OsString { - env::var_os(key).unwrap_or_else(|| { - eprintln!( - "Environment variable ${} is not set during execution of build script", - key, - ); - process::exit(1); - }) -} diff --git a/vendor/proc-macro2/rust-toolchain.toml b/vendor/proc-macro2/rust-toolchain.toml deleted file mode 100644 index 20fe888c30ab44..00000000000000 --- a/vendor/proc-macro2/rust-toolchain.toml +++ /dev/null @@ -1,2 +0,0 @@ -[toolchain] -components = ["rust-src"] diff --git a/vendor/proc-macro2/src/detection.rs b/vendor/proc-macro2/src/detection.rs deleted file mode 100644 index beba7b23739569..00000000000000 --- a/vendor/proc-macro2/src/detection.rs +++ /dev/null @@ -1,75 +0,0 @@ -use core::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Once; - -static WORKS: AtomicUsize = AtomicUsize::new(0); -static INIT: Once = Once::new(); - -pub(crate) fn inside_proc_macro() -> bool { - match WORKS.load(Ordering::Relaxed) { - 1 => return false, - 2 => return true, - _ => {} - } - - INIT.call_once(initialize); - inside_proc_macro() -} - -pub(crate) fn force_fallback() { - WORKS.store(1, Ordering::Relaxed); -} - -pub(crate) fn unforce_fallback() { - initialize(); -} - -#[cfg(not(no_is_available))] -fn initialize() { - let available = proc_macro::is_available(); - WORKS.store(available as usize + 1, Ordering::Relaxed); -} - -// Swap in a null panic hook to avoid printing "thread panicked" to stderr, -// then use catch_unwind to determine whether the compiler's proc_macro is -// working. When proc-macro2 is used from outside of a procedural macro all -// of the proc_macro crate's APIs currently panic. -// -// The Once is to prevent the possibility of this ordering: -// -// thread 1 calls take_hook, gets the user's original hook -// thread 1 calls set_hook with the null hook -// thread 2 calls take_hook, thinks null hook is the original hook -// thread 2 calls set_hook with the null hook -// thread 1 calls set_hook with the actual original hook -// thread 2 calls set_hook with what it thinks is the original hook -// -// in which the user's hook has been lost. -// -// There is still a race condition where a panic in a different thread can -// happen during the interval that the user's original panic hook is -// unregistered such that their hook is incorrectly not called. This is -// sufficiently unlikely and less bad than printing panic messages to stderr -// on correct use of this crate. Maybe there is a libstd feature request -// here. For now, if a user needs to guarantee that this failure mode does -// not occur, they need to call e.g. `proc_macro2::Span::call_site()` from -// the main thread before launching any other threads. -#[cfg(no_is_available)] -fn initialize() { - use std::panic::{self, PanicInfo}; - - type PanicHook = dyn Fn(&PanicInfo) + Sync + Send + 'static; - - let null_hook: Box = Box::new(|_panic_info| { /* ignore */ }); - let sanity_check = &*null_hook as *const PanicHook; - let original_hook = panic::take_hook(); - panic::set_hook(null_hook); - - let works = panic::catch_unwind(proc_macro::Span::call_site).is_ok(); - WORKS.store(works as usize + 1, Ordering::Relaxed); - - let hopefully_null_hook = panic::take_hook(); - panic::set_hook(original_hook); - if sanity_check != &*hopefully_null_hook { - panic!("observed race condition in proc_macro2::inside_proc_macro"); - } -} diff --git a/vendor/proc-macro2/src/extra.rs b/vendor/proc-macro2/src/extra.rs deleted file mode 100644 index 522a90e136bea4..00000000000000 --- a/vendor/proc-macro2/src/extra.rs +++ /dev/null @@ -1,151 +0,0 @@ -//! Items which do not have a correspondence to any API in the proc_macro crate, -//! but are necessary to include in proc-macro2. - -use crate::fallback; -use crate::imp; -use crate::marker::{ProcMacroAutoTraits, MARKER}; -use crate::Span; -use core::fmt::{self, Debug}; - -/// Invalidate any `proc_macro2::Span` that exist on the current thread. -/// -/// The implementation of `Span` uses thread-local data structures and this -/// function clears them. Calling any method on a `Span` on the current thread -/// created prior to the invalidation will return incorrect values or crash. -/// -/// This function is useful for programs that process more than 232 -/// bytes of Rust source code on the same thread. Just like rustc, proc-macro2 -/// uses 32-bit source locations, and these wrap around when the total source -/// code processed by the same thread exceeds 232 bytes (4 -/// gigabytes). After a wraparound, `Span` methods such as `source_text()` can -/// return wrong data. -/// -/// # Example -/// -/// As of late 2023, there is 200 GB of Rust code published on crates.io. -/// Looking at just the newest version of every crate, it is 16 GB of code. So a -/// workload that involves parsing it all would overflow a 32-bit source -/// location unless spans are being invalidated. -/// -/// ``` -/// use flate2::read::GzDecoder; -/// use std::ffi::OsStr; -/// use std::io::{BufReader, Read}; -/// use std::str::FromStr; -/// use tar::Archive; -/// -/// rayon::scope(|s| { -/// for krate in every_version_of_every_crate() { -/// s.spawn(move |_| { -/// proc_macro2::extra::invalidate_current_thread_spans(); -/// -/// let reader = BufReader::new(krate); -/// let tar = GzDecoder::new(reader); -/// let mut archive = Archive::new(tar); -/// for entry in archive.entries().unwrap() { -/// let mut entry = entry.unwrap(); -/// let path = entry.path().unwrap(); -/// if path.extension() != Some(OsStr::new("rs")) { -/// continue; -/// } -/// let mut content = String::new(); -/// entry.read_to_string(&mut content).unwrap(); -/// match proc_macro2::TokenStream::from_str(&content) { -/// Ok(tokens) => {/* ... */}, -/// Err(_) => continue, -/// } -/// } -/// }); -/// } -/// }); -/// # -/// # fn every_version_of_every_crate() -> Vec { -/// # Vec::new() -/// # } -/// ``` -/// -/// # Panics -/// -/// This function is not applicable to and will panic if called from a -/// procedural macro. -#[cfg(span_locations)] -#[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] -pub fn invalidate_current_thread_spans() { - crate::imp::invalidate_current_thread_spans(); -} - -/// An object that holds a [`Group`]'s `span_open()` and `span_close()` together -/// in a more compact representation than holding those 2 spans individually. -/// -/// [`Group`]: crate::Group -#[derive(Copy, Clone)] -pub struct DelimSpan { - inner: DelimSpanEnum, - _marker: ProcMacroAutoTraits, -} - -#[derive(Copy, Clone)] -enum DelimSpanEnum { - #[cfg(wrap_proc_macro)] - Compiler { - join: proc_macro::Span, - open: proc_macro::Span, - close: proc_macro::Span, - }, - Fallback(fallback::Span), -} - -impl DelimSpan { - pub(crate) fn new(group: &imp::Group) -> Self { - #[cfg(wrap_proc_macro)] - let inner = match group { - imp::Group::Compiler(group) => DelimSpanEnum::Compiler { - join: group.span(), - open: group.span_open(), - close: group.span_close(), - }, - imp::Group::Fallback(group) => DelimSpanEnum::Fallback(group.span()), - }; - - #[cfg(not(wrap_proc_macro))] - let inner = DelimSpanEnum::Fallback(group.span()); - - DelimSpan { - inner, - _marker: MARKER, - } - } - - /// Returns a span covering the entire delimited group. - pub fn join(&self) -> Span { - match &self.inner { - #[cfg(wrap_proc_macro)] - DelimSpanEnum::Compiler { join, .. } => Span::_new(imp::Span::Compiler(*join)), - DelimSpanEnum::Fallback(span) => Span::_new_fallback(*span), - } - } - - /// Returns a span for the opening punctuation of the group only. - pub fn open(&self) -> Span { - match &self.inner { - #[cfg(wrap_proc_macro)] - DelimSpanEnum::Compiler { open, .. } => Span::_new(imp::Span::Compiler(*open)), - DelimSpanEnum::Fallback(span) => Span::_new_fallback(span.first_byte()), - } - } - - /// Returns a span for the closing punctuation of the group only. - pub fn close(&self) -> Span { - match &self.inner { - #[cfg(wrap_proc_macro)] - DelimSpanEnum::Compiler { close, .. } => Span::_new(imp::Span::Compiler(*close)), - DelimSpanEnum::Fallback(span) => Span::_new_fallback(span.last_byte()), - } - } -} - -impl Debug for DelimSpan { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Debug::fmt(&self.join(), f) - } -} diff --git a/vendor/proc-macro2/src/fallback.rs b/vendor/proc-macro2/src/fallback.rs deleted file mode 100644 index 61b7b91b08883e..00000000000000 --- a/vendor/proc-macro2/src/fallback.rs +++ /dev/null @@ -1,1256 +0,0 @@ -#[cfg(wrap_proc_macro)] -use crate::imp; -#[cfg(span_locations)] -use crate::location::LineColumn; -use crate::parse::{self, Cursor}; -use crate::rcvec::{RcVec, RcVecBuilder, RcVecIntoIter, RcVecMut}; -use crate::{Delimiter, Spacing, TokenTree}; -#[cfg(all(span_locations, not(fuzzing)))] -use alloc::collections::BTreeMap; -#[cfg(all(span_locations, not(fuzzing)))] -use core::cell::RefCell; -#[cfg(span_locations)] -use core::cmp; -#[cfg(all(span_locations, not(fuzzing)))] -use core::cmp::Ordering; -use core::fmt::{self, Debug, Display, Write}; -use core::mem::ManuallyDrop; -#[cfg(span_locations)] -use core::ops::Range; -use core::ops::RangeBounds; -use core::ptr; -use core::str; -#[cfg(feature = "proc-macro")] -use core::str::FromStr; -use std::ffi::CStr; -#[cfg(wrap_proc_macro)] -use std::panic; -#[cfg(span_locations)] -use std::path::PathBuf; - -/// Force use of proc-macro2's fallback implementation of the API for now, even -/// if the compiler's implementation is available. -pub fn force() { - #[cfg(wrap_proc_macro)] - crate::detection::force_fallback(); -} - -/// Resume using the compiler's implementation of the proc macro API if it is -/// available. -pub fn unforce() { - #[cfg(wrap_proc_macro)] - crate::detection::unforce_fallback(); -} - -#[derive(Clone)] -pub(crate) struct TokenStream { - inner: RcVec, -} - -#[derive(Debug)] -pub(crate) struct LexError { - pub(crate) span: Span, -} - -impl LexError { - pub(crate) fn span(&self) -> Span { - self.span - } - - pub(crate) fn call_site() -> Self { - LexError { - span: Span::call_site(), - } - } -} - -impl TokenStream { - pub(crate) fn new() -> Self { - TokenStream { - inner: RcVecBuilder::new().build(), - } - } - - pub(crate) fn from_str_checked(src: &str) -> Result { - // Create a dummy file & add it to the source map - let mut cursor = get_cursor(src); - - // Strip a byte order mark if present - const BYTE_ORDER_MARK: &str = "\u{feff}"; - if cursor.starts_with(BYTE_ORDER_MARK) { - cursor = cursor.advance(BYTE_ORDER_MARK.len()); - } - - parse::token_stream(cursor) - } - - #[cfg(feature = "proc-macro")] - pub(crate) fn from_str_unchecked(src: &str) -> Self { - Self::from_str_checked(src).unwrap() - } - - pub(crate) fn is_empty(&self) -> bool { - self.inner.len() == 0 - } - - fn take_inner(self) -> RcVecBuilder { - let nodrop = ManuallyDrop::new(self); - unsafe { ptr::read(&nodrop.inner) }.make_owned() - } -} - -fn push_token_from_proc_macro(mut vec: RcVecMut, token: TokenTree) { - // https://github.com/dtolnay/proc-macro2/issues/235 - match token { - TokenTree::Literal(crate::Literal { - #[cfg(wrap_proc_macro)] - inner: crate::imp::Literal::Fallback(literal), - #[cfg(not(wrap_proc_macro))] - inner: literal, - .. - }) if literal.repr.starts_with('-') => { - push_negative_literal(vec, literal); - } - _ => vec.push(token), - } - - #[cold] - fn push_negative_literal(mut vec: RcVecMut, mut literal: Literal) { - literal.repr.remove(0); - let mut punct = crate::Punct::new('-', Spacing::Alone); - punct.set_span(crate::Span::_new_fallback(literal.span)); - vec.push(TokenTree::Punct(punct)); - vec.push(TokenTree::Literal(crate::Literal::_new_fallback(literal))); - } -} - -// Nonrecursive to prevent stack overflow. -impl Drop for TokenStream { - fn drop(&mut self) { - let mut stack = Vec::new(); - let mut current = match self.inner.get_mut() { - Some(inner) => inner.take().into_iter(), - None => return, - }; - loop { - while let Some(token) = current.next() { - let group = match token { - TokenTree::Group(group) => group.inner, - _ => continue, - }; - #[cfg(wrap_proc_macro)] - let group = match group { - crate::imp::Group::Fallback(group) => group, - crate::imp::Group::Compiler(_) => continue, - }; - let mut group = group; - if let Some(inner) = group.stream.inner.get_mut() { - stack.push(current); - current = inner.take().into_iter(); - } - } - match stack.pop() { - Some(next) => current = next, - None => return, - } - } - } -} - -pub(crate) struct TokenStreamBuilder { - inner: RcVecBuilder, -} - -impl TokenStreamBuilder { - pub(crate) fn new() -> Self { - TokenStreamBuilder { - inner: RcVecBuilder::new(), - } - } - - pub(crate) fn with_capacity(cap: usize) -> Self { - TokenStreamBuilder { - inner: RcVecBuilder::with_capacity(cap), - } - } - - pub(crate) fn push_token_from_parser(&mut self, tt: TokenTree) { - self.inner.push(tt); - } - - pub(crate) fn build(self) -> TokenStream { - TokenStream { - inner: self.inner.build(), - } - } -} - -#[cfg(span_locations)] -fn get_cursor(src: &str) -> Cursor { - #[cfg(fuzzing)] - return Cursor { rest: src, off: 1 }; - - // Create a dummy file & add it to the source map - #[cfg(not(fuzzing))] - SOURCE_MAP.with(|sm| { - let mut sm = sm.borrow_mut(); - let span = sm.add_file(src); - Cursor { - rest: src, - off: span.lo, - } - }) -} - -#[cfg(not(span_locations))] -fn get_cursor(src: &str) -> Cursor { - Cursor { rest: src } -} - -impl Display for LexError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("cannot parse string into token stream") - } -} - -impl Display for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut joint = false; - for (i, tt) in self.inner.iter().enumerate() { - if i != 0 && !joint { - write!(f, " ")?; - } - joint = false; - match tt { - TokenTree::Group(tt) => write!(f, "{}", tt), - TokenTree::Ident(tt) => write!(f, "{}", tt), - TokenTree::Punct(tt) => { - joint = tt.spacing() == Spacing::Joint; - write!(f, "{}", tt) - } - TokenTree::Literal(tt) => write!(f, "{}", tt), - }?; - } - - Ok(()) - } -} - -impl Debug for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("TokenStream ")?; - f.debug_list().entries(self.clone()).finish() - } -} - -#[cfg(feature = "proc-macro")] -impl From for TokenStream { - fn from(inner: proc_macro::TokenStream) -> Self { - TokenStream::from_str_unchecked(&inner.to_string()) - } -} - -#[cfg(feature = "proc-macro")] -impl From for proc_macro::TokenStream { - fn from(inner: TokenStream) -> Self { - proc_macro::TokenStream::from_str_unchecked(&inner.to_string()) - } -} - -impl From for TokenStream { - fn from(tree: TokenTree) -> Self { - let mut stream = RcVecBuilder::new(); - push_token_from_proc_macro(stream.as_mut(), tree); - TokenStream { - inner: stream.build(), - } - } -} - -impl FromIterator for TokenStream { - fn from_iter>(tokens: I) -> Self { - let mut stream = TokenStream::new(); - stream.extend(tokens); - stream - } -} - -impl FromIterator for TokenStream { - fn from_iter>(streams: I) -> Self { - let mut v = RcVecBuilder::new(); - - for stream in streams { - v.extend(stream.take_inner()); - } - - TokenStream { inner: v.build() } - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, tokens: I) { - let mut vec = self.inner.make_mut(); - tokens - .into_iter() - .for_each(|token| push_token_from_proc_macro(vec.as_mut(), token)); - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { - self.inner.make_mut().extend(streams.into_iter().flatten()); - } -} - -pub(crate) type TokenTreeIter = RcVecIntoIter; - -impl IntoIterator for TokenStream { - type Item = TokenTree; - type IntoIter = TokenTreeIter; - - fn into_iter(self) -> TokenTreeIter { - self.take_inner().into_iter() - } -} - -#[cfg(all(span_locations, not(fuzzing)))] -thread_local! { - static SOURCE_MAP: RefCell = RefCell::new(SourceMap { - // Start with a single dummy file which all call_site() and def_site() - // spans reference. - files: vec![FileInfo { - source_text: String::new(), - span: Span { lo: 0, hi: 0 }, - lines: vec![0], - char_index_to_byte_offset: BTreeMap::new(), - }], - }); -} - -#[cfg(span_locations)] -pub(crate) fn invalidate_current_thread_spans() { - #[cfg(not(fuzzing))] - SOURCE_MAP.with(|sm| sm.borrow_mut().files.truncate(1)); -} - -#[cfg(all(span_locations, not(fuzzing)))] -struct FileInfo { - source_text: String, - span: Span, - lines: Vec, - char_index_to_byte_offset: BTreeMap, -} - -#[cfg(all(span_locations, not(fuzzing)))] -impl FileInfo { - fn offset_line_column(&self, offset: usize) -> LineColumn { - assert!(self.span_within(Span { - lo: offset as u32, - hi: offset as u32, - })); - let offset = offset - self.span.lo as usize; - match self.lines.binary_search(&offset) { - Ok(found) => LineColumn { - line: found + 1, - column: 0, - }, - Err(idx) => LineColumn { - line: idx, - column: offset - self.lines[idx - 1], - }, - } - } - - fn span_within(&self, span: Span) -> bool { - span.lo >= self.span.lo && span.hi <= self.span.hi - } - - fn byte_range(&mut self, span: Span) -> Range { - let lo_char = (span.lo - self.span.lo) as usize; - - // Look up offset of the largest already-computed char index that is - // less than or equal to the current requested one. We resume counting - // chars from that point. - let (&last_char_index, &last_byte_offset) = self - .char_index_to_byte_offset - .range(..=lo_char) - .next_back() - .unwrap_or((&0, &0)); - - let lo_byte = if last_char_index == lo_char { - last_byte_offset - } else { - let total_byte_offset = match self.source_text[last_byte_offset..] - .char_indices() - .nth(lo_char - last_char_index) - { - Some((additional_offset, _ch)) => last_byte_offset + additional_offset, - None => self.source_text.len(), - }; - self.char_index_to_byte_offset - .insert(lo_char, total_byte_offset); - total_byte_offset - }; - - let trunc_lo = &self.source_text[lo_byte..]; - let char_len = (span.hi - span.lo) as usize; - lo_byte..match trunc_lo.char_indices().nth(char_len) { - Some((offset, _ch)) => lo_byte + offset, - None => self.source_text.len(), - } - } - - fn source_text(&mut self, span: Span) -> String { - let byte_range = self.byte_range(span); - self.source_text[byte_range].to_owned() - } -} - -/// Computes the offsets of each line in the given source string -/// and the total number of characters -#[cfg(all(span_locations, not(fuzzing)))] -fn lines_offsets(s: &str) -> (usize, Vec) { - let mut lines = vec![0]; - let mut total = 0; - - for ch in s.chars() { - total += 1; - if ch == '\n' { - lines.push(total); - } - } - - (total, lines) -} - -#[cfg(all(span_locations, not(fuzzing)))] -struct SourceMap { - files: Vec, -} - -#[cfg(all(span_locations, not(fuzzing)))] -impl SourceMap { - fn next_start_pos(&self) -> u32 { - // Add 1 so there's always space between files. - // - // We'll always have at least 1 file, as we initialize our files list - // with a dummy file. - self.files.last().unwrap().span.hi + 1 - } - - fn add_file(&mut self, src: &str) -> Span { - let (len, lines) = lines_offsets(src); - let lo = self.next_start_pos(); - let span = Span { - lo, - hi: lo + (len as u32), - }; - - self.files.push(FileInfo { - source_text: src.to_owned(), - span, - lines, - // Populated lazily by source_text(). - char_index_to_byte_offset: BTreeMap::new(), - }); - - span - } - - fn find(&self, span: Span) -> usize { - match self.files.binary_search_by(|file| { - if file.span.hi < span.lo { - Ordering::Less - } else if file.span.lo > span.hi { - Ordering::Greater - } else { - assert!(file.span_within(span)); - Ordering::Equal - } - }) { - Ok(i) => i, - Err(_) => unreachable!("Invalid span with no related FileInfo!"), - } - } - - fn filepath(&self, span: Span) -> String { - let i = self.find(span); - if i == 0 { - "".to_owned() - } else { - format!("", i) - } - } - - fn fileinfo(&self, span: Span) -> &FileInfo { - let i = self.find(span); - &self.files[i] - } - - fn fileinfo_mut(&mut self, span: Span) -> &mut FileInfo { - let i = self.find(span); - &mut self.files[i] - } -} - -#[derive(Clone, Copy, PartialEq, Eq)] -pub(crate) struct Span { - #[cfg(span_locations)] - pub(crate) lo: u32, - #[cfg(span_locations)] - pub(crate) hi: u32, -} - -impl Span { - #[cfg(not(span_locations))] - pub(crate) fn call_site() -> Self { - Span {} - } - - #[cfg(span_locations)] - pub(crate) fn call_site() -> Self { - Span { lo: 0, hi: 0 } - } - - pub(crate) fn mixed_site() -> Self { - Span::call_site() - } - - #[cfg(procmacro2_semver_exempt)] - pub(crate) fn def_site() -> Self { - Span::call_site() - } - - pub(crate) fn resolved_at(&self, _other: Span) -> Span { - // Stable spans consist only of line/column information, so - // `resolved_at` and `located_at` only select which span the - // caller wants line/column information from. - *self - } - - pub(crate) fn located_at(&self, other: Span) -> Span { - other - } - - #[cfg(span_locations)] - pub(crate) fn byte_range(&self) -> Range { - #[cfg(fuzzing)] - return 0..0; - - #[cfg(not(fuzzing))] - { - if self.is_call_site() { - 0..0 - } else { - SOURCE_MAP.with(|sm| sm.borrow_mut().fileinfo_mut(*self).byte_range(*self)) - } - } - } - - #[cfg(span_locations)] - pub(crate) fn start(&self) -> LineColumn { - #[cfg(fuzzing)] - return LineColumn { line: 0, column: 0 }; - - #[cfg(not(fuzzing))] - SOURCE_MAP.with(|sm| { - let sm = sm.borrow(); - let fi = sm.fileinfo(*self); - fi.offset_line_column(self.lo as usize) - }) - } - - #[cfg(span_locations)] - pub(crate) fn end(&self) -> LineColumn { - #[cfg(fuzzing)] - return LineColumn { line: 0, column: 0 }; - - #[cfg(not(fuzzing))] - SOURCE_MAP.with(|sm| { - let sm = sm.borrow(); - let fi = sm.fileinfo(*self); - fi.offset_line_column(self.hi as usize) - }) - } - - #[cfg(span_locations)] - pub(crate) fn file(&self) -> String { - #[cfg(fuzzing)] - return "".to_owned(); - - #[cfg(not(fuzzing))] - SOURCE_MAP.with(|sm| { - let sm = sm.borrow(); - sm.filepath(*self) - }) - } - - #[cfg(span_locations)] - pub(crate) fn local_file(&self) -> Option { - None - } - - #[cfg(not(span_locations))] - pub(crate) fn join(&self, _other: Span) -> Option { - Some(Span {}) - } - - #[cfg(span_locations)] - pub(crate) fn join(&self, other: Span) -> Option { - #[cfg(fuzzing)] - return { - let _ = other; - None - }; - - #[cfg(not(fuzzing))] - SOURCE_MAP.with(|sm| { - let sm = sm.borrow(); - // If `other` is not within the same FileInfo as us, return None. - if !sm.fileinfo(*self).span_within(other) { - return None; - } - Some(Span { - lo: cmp::min(self.lo, other.lo), - hi: cmp::max(self.hi, other.hi), - }) - }) - } - - #[cfg(not(span_locations))] - pub(crate) fn source_text(&self) -> Option { - None - } - - #[cfg(span_locations)] - pub(crate) fn source_text(&self) -> Option { - #[cfg(fuzzing)] - return None; - - #[cfg(not(fuzzing))] - { - if self.is_call_site() { - None - } else { - Some(SOURCE_MAP.with(|sm| sm.borrow_mut().fileinfo_mut(*self).source_text(*self))) - } - } - } - - #[cfg(not(span_locations))] - pub(crate) fn first_byte(self) -> Self { - self - } - - #[cfg(span_locations)] - pub(crate) fn first_byte(self) -> Self { - Span { - lo: self.lo, - hi: cmp::min(self.lo.saturating_add(1), self.hi), - } - } - - #[cfg(not(span_locations))] - pub(crate) fn last_byte(self) -> Self { - self - } - - #[cfg(span_locations)] - pub(crate) fn last_byte(self) -> Self { - Span { - lo: cmp::max(self.hi.saturating_sub(1), self.lo), - hi: self.hi, - } - } - - #[cfg(span_locations)] - fn is_call_site(&self) -> bool { - self.lo == 0 && self.hi == 0 - } -} - -impl Debug for Span { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - #[cfg(span_locations)] - return write!(f, "bytes({}..{})", self.lo, self.hi); - - #[cfg(not(span_locations))] - write!(f, "Span") - } -} - -pub(crate) fn debug_span_field_if_nontrivial(debug: &mut fmt::DebugStruct, span: Span) { - #[cfg(span_locations)] - { - if span.is_call_site() { - return; - } - } - - if cfg!(span_locations) { - debug.field("span", &span); - } -} - -#[derive(Clone)] -pub(crate) struct Group { - delimiter: Delimiter, - stream: TokenStream, - span: Span, -} - -impl Group { - pub(crate) fn new(delimiter: Delimiter, stream: TokenStream) -> Self { - Group { - delimiter, - stream, - span: Span::call_site(), - } - } - - pub(crate) fn delimiter(&self) -> Delimiter { - self.delimiter - } - - pub(crate) fn stream(&self) -> TokenStream { - self.stream.clone() - } - - pub(crate) fn span(&self) -> Span { - self.span - } - - pub(crate) fn span_open(&self) -> Span { - self.span.first_byte() - } - - pub(crate) fn span_close(&self) -> Span { - self.span.last_byte() - } - - pub(crate) fn set_span(&mut self, span: Span) { - self.span = span; - } -} - -impl Display for Group { - // We attempt to match libproc_macro's formatting. - // Empty parens: () - // Nonempty parens: (...) - // Empty brackets: [] - // Nonempty brackets: [...] - // Empty braces: { } - // Nonempty braces: { ... } - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let (open, close) = match self.delimiter { - Delimiter::Parenthesis => ("(", ")"), - Delimiter::Brace => ("{ ", "}"), - Delimiter::Bracket => ("[", "]"), - Delimiter::None => ("", ""), - }; - - f.write_str(open)?; - Display::fmt(&self.stream, f)?; - if self.delimiter == Delimiter::Brace && !self.stream.inner.is_empty() { - f.write_str(" ")?; - } - f.write_str(close)?; - - Ok(()) - } -} - -impl Debug for Group { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let mut debug = fmt.debug_struct("Group"); - debug.field("delimiter", &self.delimiter); - debug.field("stream", &self.stream); - debug_span_field_if_nontrivial(&mut debug, self.span); - debug.finish() - } -} - -#[derive(Clone)] -pub(crate) struct Ident { - sym: Box, - span: Span, - raw: bool, -} - -impl Ident { - #[track_caller] - pub(crate) fn new_checked(string: &str, span: Span) -> Self { - validate_ident(string); - Ident::new_unchecked(string, span) - } - - pub(crate) fn new_unchecked(string: &str, span: Span) -> Self { - Ident { - sym: Box::from(string), - span, - raw: false, - } - } - - #[track_caller] - pub(crate) fn new_raw_checked(string: &str, span: Span) -> Self { - validate_ident_raw(string); - Ident::new_raw_unchecked(string, span) - } - - pub(crate) fn new_raw_unchecked(string: &str, span: Span) -> Self { - Ident { - sym: Box::from(string), - span, - raw: true, - } - } - - pub(crate) fn span(&self) -> Span { - self.span - } - - pub(crate) fn set_span(&mut self, span: Span) { - self.span = span; - } -} - -pub(crate) fn is_ident_start(c: char) -> bool { - c == '_' || unicode_ident::is_xid_start(c) -} - -pub(crate) fn is_ident_continue(c: char) -> bool { - unicode_ident::is_xid_continue(c) -} - -#[track_caller] -fn validate_ident(string: &str) { - if string.is_empty() { - panic!("Ident is not allowed to be empty; use Option"); - } - - if string.bytes().all(|digit| b'0' <= digit && digit <= b'9') { - panic!("Ident cannot be a number; use Literal instead"); - } - - fn ident_ok(string: &str) -> bool { - let mut chars = string.chars(); - let first = chars.next().unwrap(); - if !is_ident_start(first) { - return false; - } - for ch in chars { - if !is_ident_continue(ch) { - return false; - } - } - true - } - - if !ident_ok(string) { - panic!("{:?} is not a valid Ident", string); - } -} - -#[track_caller] -fn validate_ident_raw(string: &str) { - validate_ident(string); - - match string { - "_" | "super" | "self" | "Self" | "crate" => { - panic!("`r#{}` cannot be a raw identifier", string); - } - _ => {} - } -} - -impl PartialEq for Ident { - fn eq(&self, other: &Ident) -> bool { - self.sym == other.sym && self.raw == other.raw - } -} - -impl PartialEq for Ident -where - T: ?Sized + AsRef, -{ - fn eq(&self, other: &T) -> bool { - let other = other.as_ref(); - if self.raw { - other.starts_with("r#") && *self.sym == other[2..] - } else { - *self.sym == *other - } - } -} - -impl Display for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if self.raw { - f.write_str("r#")?; - } - f.write_str(&self.sym) - } -} - -#[allow(clippy::missing_fields_in_debug)] -impl Debug for Ident { - // Ident(proc_macro), Ident(r#union) - #[cfg(not(span_locations))] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut debug = f.debug_tuple("Ident"); - debug.field(&format_args!("{}", self)); - debug.finish() - } - - // Ident { - // sym: proc_macro, - // span: bytes(128..138) - // } - #[cfg(span_locations)] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut debug = f.debug_struct("Ident"); - debug.field("sym", &format_args!("{}", self)); - debug_span_field_if_nontrivial(&mut debug, self.span); - debug.finish() - } -} - -#[derive(Clone)] -pub(crate) struct Literal { - pub(crate) repr: String, - span: Span, -} - -macro_rules! suffixed_numbers { - ($($name:ident => $kind:ident,)*) => ($( - pub(crate) fn $name(n: $kind) -> Literal { - Literal::_new(format!(concat!("{}", stringify!($kind)), n)) - } - )*) -} - -macro_rules! unsuffixed_numbers { - ($($name:ident => $kind:ident,)*) => ($( - pub(crate) fn $name(n: $kind) -> Literal { - Literal::_new(n.to_string()) - } - )*) -} - -impl Literal { - pub(crate) fn _new(repr: String) -> Self { - Literal { - repr, - span: Span::call_site(), - } - } - - pub(crate) fn from_str_checked(repr: &str) -> Result { - let mut cursor = get_cursor(repr); - #[cfg(span_locations)] - let lo = cursor.off; - - let negative = cursor.starts_with_char('-'); - if negative { - cursor = cursor.advance(1); - if !cursor.starts_with_fn(|ch| ch.is_ascii_digit()) { - return Err(LexError::call_site()); - } - } - - if let Ok((rest, mut literal)) = parse::literal(cursor) { - if rest.is_empty() { - if negative { - literal.repr.insert(0, '-'); - } - literal.span = Span { - #[cfg(span_locations)] - lo, - #[cfg(span_locations)] - hi: rest.off, - }; - return Ok(literal); - } - } - Err(LexError::call_site()) - } - - pub(crate) unsafe fn from_str_unchecked(repr: &str) -> Self { - Literal::_new(repr.to_owned()) - } - - suffixed_numbers! { - u8_suffixed => u8, - u16_suffixed => u16, - u32_suffixed => u32, - u64_suffixed => u64, - u128_suffixed => u128, - usize_suffixed => usize, - i8_suffixed => i8, - i16_suffixed => i16, - i32_suffixed => i32, - i64_suffixed => i64, - i128_suffixed => i128, - isize_suffixed => isize, - - f32_suffixed => f32, - f64_suffixed => f64, - } - - unsuffixed_numbers! { - u8_unsuffixed => u8, - u16_unsuffixed => u16, - u32_unsuffixed => u32, - u64_unsuffixed => u64, - u128_unsuffixed => u128, - usize_unsuffixed => usize, - i8_unsuffixed => i8, - i16_unsuffixed => i16, - i32_unsuffixed => i32, - i64_unsuffixed => i64, - i128_unsuffixed => i128, - isize_unsuffixed => isize, - } - - pub(crate) fn f32_unsuffixed(f: f32) -> Literal { - let mut s = f.to_string(); - if !s.contains('.') { - s.push_str(".0"); - } - Literal::_new(s) - } - - pub(crate) fn f64_unsuffixed(f: f64) -> Literal { - let mut s = f.to_string(); - if !s.contains('.') { - s.push_str(".0"); - } - Literal::_new(s) - } - - pub(crate) fn string(string: &str) -> Literal { - let mut repr = String::with_capacity(string.len() + 2); - repr.push('"'); - escape_utf8(string, &mut repr); - repr.push('"'); - Literal::_new(repr) - } - - pub(crate) fn character(ch: char) -> Literal { - let mut repr = String::new(); - repr.push('\''); - if ch == '"' { - // escape_debug turns this into '\"' which is unnecessary. - repr.push(ch); - } else { - repr.extend(ch.escape_debug()); - } - repr.push('\''); - Literal::_new(repr) - } - - pub(crate) fn byte_character(byte: u8) -> Literal { - let mut repr = "b'".to_string(); - #[allow(clippy::match_overlapping_arm)] - match byte { - b'\0' => repr.push_str(r"\0"), - b'\t' => repr.push_str(r"\t"), - b'\n' => repr.push_str(r"\n"), - b'\r' => repr.push_str(r"\r"), - b'\'' => repr.push_str(r"\'"), - b'\\' => repr.push_str(r"\\"), - b'\x20'..=b'\x7E' => repr.push(byte as char), - _ => { - let _ = write!(repr, r"\x{:02X}", byte); - } - } - repr.push('\''); - Literal::_new(repr) - } - - pub(crate) fn byte_string(bytes: &[u8]) -> Literal { - let mut repr = "b\"".to_string(); - let mut bytes = bytes.iter(); - while let Some(&b) = bytes.next() { - #[allow(clippy::match_overlapping_arm)] - match b { - b'\0' => repr.push_str(match bytes.as_slice().first() { - // circumvent clippy::octal_escapes lint - Some(b'0'..=b'7') => r"\x00", - _ => r"\0", - }), - b'\t' => repr.push_str(r"\t"), - b'\n' => repr.push_str(r"\n"), - b'\r' => repr.push_str(r"\r"), - b'"' => repr.push_str("\\\""), - b'\\' => repr.push_str(r"\\"), - b'\x20'..=b'\x7E' => repr.push(b as char), - _ => { - let _ = write!(repr, r"\x{:02X}", b); - } - } - } - repr.push('"'); - Literal::_new(repr) - } - - pub(crate) fn c_string(string: &CStr) -> Literal { - let mut repr = "c\"".to_string(); - let mut bytes = string.to_bytes(); - while !bytes.is_empty() { - let (valid, invalid) = match str::from_utf8(bytes) { - Ok(all_valid) => { - bytes = b""; - (all_valid, bytes) - } - Err(utf8_error) => { - let (valid, rest) = bytes.split_at(utf8_error.valid_up_to()); - let valid = str::from_utf8(valid).unwrap(); - let invalid = utf8_error - .error_len() - .map_or(rest, |error_len| &rest[..error_len]); - bytes = &bytes[valid.len() + invalid.len()..]; - (valid, invalid) - } - }; - escape_utf8(valid, &mut repr); - for &byte in invalid { - let _ = write!(repr, r"\x{:02X}", byte); - } - } - repr.push('"'); - Literal::_new(repr) - } - - pub(crate) fn span(&self) -> Span { - self.span - } - - pub(crate) fn set_span(&mut self, span: Span) { - self.span = span; - } - - pub(crate) fn subspan>(&self, range: R) -> Option { - #[cfg(not(span_locations))] - { - let _ = range; - None - } - - #[cfg(span_locations)] - { - use core::ops::Bound; - - let lo = match range.start_bound() { - Bound::Included(start) => { - let start = u32::try_from(*start).ok()?; - self.span.lo.checked_add(start)? - } - Bound::Excluded(start) => { - let start = u32::try_from(*start).ok()?; - self.span.lo.checked_add(start)?.checked_add(1)? - } - Bound::Unbounded => self.span.lo, - }; - let hi = match range.end_bound() { - Bound::Included(end) => { - let end = u32::try_from(*end).ok()?; - self.span.lo.checked_add(end)?.checked_add(1)? - } - Bound::Excluded(end) => { - let end = u32::try_from(*end).ok()?; - self.span.lo.checked_add(end)? - } - Bound::Unbounded => self.span.hi, - }; - if lo <= hi && hi <= self.span.hi { - Some(Span { lo, hi }) - } else { - None - } - } - } -} - -impl Display for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&self.repr, f) - } -} - -impl Debug for Literal { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let mut debug = fmt.debug_struct("Literal"); - debug.field("lit", &format_args!("{}", self.repr)); - debug_span_field_if_nontrivial(&mut debug, self.span); - debug.finish() - } -} - -fn escape_utf8(string: &str, repr: &mut String) { - let mut chars = string.chars(); - while let Some(ch) = chars.next() { - if ch == '\0' { - repr.push_str( - if chars - .as_str() - .starts_with(|next| '0' <= next && next <= '7') - { - // circumvent clippy::octal_escapes lint - r"\x00" - } else { - r"\0" - }, - ); - } else if ch == '\'' { - // escape_debug turns this into "\'" which is unnecessary. - repr.push(ch); - } else { - repr.extend(ch.escape_debug()); - } - } -} - -#[cfg(feature = "proc-macro")] -pub(crate) trait FromStr2: FromStr { - #[cfg(wrap_proc_macro)] - fn valid(src: &str) -> bool; - - #[cfg(wrap_proc_macro)] - fn from_str_checked(src: &str) -> Result { - // Validate using fallback parser, because rustc is incapable of - // returning a recoverable Err for certain invalid token streams, and - // will instead permanently poison the compilation. - if !Self::valid(src) { - return Err(imp::LexError::CompilerPanic); - } - - // Catch panic to work around https://github.com/rust-lang/rust/issues/58736. - match panic::catch_unwind(|| Self::from_str(src)) { - Ok(Ok(ok)) => Ok(ok), - Ok(Err(lex)) => Err(imp::LexError::Compiler(lex)), - Err(_panic) => Err(imp::LexError::CompilerPanic), - } - } - - fn from_str_unchecked(src: &str) -> Self { - Self::from_str(src).unwrap() - } -} - -#[cfg(feature = "proc-macro")] -impl FromStr2 for proc_macro::TokenStream { - #[cfg(wrap_proc_macro)] - fn valid(src: &str) -> bool { - TokenStream::from_str_checked(src).is_ok() - } -} - -#[cfg(feature = "proc-macro")] -impl FromStr2 for proc_macro::Literal { - #[cfg(wrap_proc_macro)] - fn valid(src: &str) -> bool { - Literal::from_str_checked(src).is_ok() - } -} diff --git a/vendor/proc-macro2/src/lib.rs b/vendor/proc-macro2/src/lib.rs deleted file mode 100644 index 7952afaa491bdc..00000000000000 --- a/vendor/proc-macro2/src/lib.rs +++ /dev/null @@ -1,1495 +0,0 @@ -//! [![github]](https://github.com/dtolnay/proc-macro2) [![crates-io]](https://crates.io/crates/proc-macro2) [![docs-rs]](crate) -//! -//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github -//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust -//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs -//! -//!
-//! -//! A wrapper around the procedural macro API of the compiler's [`proc_macro`] -//! crate. This library serves two purposes: -//! -//! - **Bring proc-macro-like functionality to other contexts like build.rs and -//! main.rs.** Types from `proc_macro` are entirely specific to procedural -//! macros and cannot ever exist in code outside of a procedural macro. -//! Meanwhile `proc_macro2` types may exist anywhere including non-macro code. -//! By developing foundational libraries like [syn] and [quote] against -//! `proc_macro2` rather than `proc_macro`, the procedural macro ecosystem -//! becomes easily applicable to many other use cases and we avoid -//! reimplementing non-macro equivalents of those libraries. -//! -//! - **Make procedural macros unit testable.** As a consequence of being -//! specific to procedural macros, nothing that uses `proc_macro` can be -//! executed from a unit test. In order for helper libraries or components of -//! a macro to be testable in isolation, they must be implemented using -//! `proc_macro2`. -//! -//! [syn]: https://github.com/dtolnay/syn -//! [quote]: https://github.com/dtolnay/quote -//! -//! # Usage -//! -//! The skeleton of a typical procedural macro typically looks like this: -//! -//! ``` -//! extern crate proc_macro; -//! -//! # const IGNORE: &str = stringify! { -//! #[proc_macro_derive(MyDerive)] -//! # }; -//! # #[cfg(wrap_proc_macro)] -//! pub fn my_derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream { -//! let input = proc_macro2::TokenStream::from(input); -//! -//! let output: proc_macro2::TokenStream = { -//! /* transform input */ -//! # input -//! }; -//! -//! proc_macro::TokenStream::from(output) -//! } -//! ``` -//! -//! If parsing with [Syn], you'll use [`parse_macro_input!`] instead to -//! propagate parse errors correctly back to the compiler when parsing fails. -//! -//! [`parse_macro_input!`]: https://docs.rs/syn/2.0/syn/macro.parse_macro_input.html -//! -//! # Unstable features -//! -//! The default feature set of proc-macro2 tracks the most recent stable -//! compiler API. Functionality in `proc_macro` that is not yet stable is not -//! exposed by proc-macro2 by default. -//! -//! To opt into the additional APIs available in the most recent nightly -//! compiler, the `procmacro2_semver_exempt` config flag must be passed to -//! rustc. We will polyfill those nightly-only APIs back to Rust 1.60.0. As -//! these are unstable APIs that track the nightly compiler, minor versions of -//! proc-macro2 may make breaking changes to them at any time. -//! -//! ```sh -//! RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build -//! ``` -//! -//! Note that this must not only be done for your crate, but for any crate that -//! depends on your crate. This infectious nature is intentional, as it serves -//! as a reminder that you are outside of the normal semver guarantees. -//! -//! Semver exempt methods are marked as such in the proc-macro2 documentation. -//! -//! # Thread-Safety -//! -//! Most types in this crate are `!Sync` because the underlying compiler -//! types make use of thread-local memory, meaning they cannot be accessed from -//! a different thread. - -// Proc-macro2 types in rustdoc of other crates get linked to here. -#![doc(html_root_url = "https://docs.rs/proc-macro2/1.0.103")] -#![cfg_attr(any(proc_macro_span, super_unstable), feature(proc_macro_span))] -#![cfg_attr(super_unstable, feature(proc_macro_def_site))] -#![cfg_attr(docsrs, feature(doc_cfg))] -#![deny(unsafe_op_in_unsafe_fn)] -#![allow( - clippy::cast_lossless, - clippy::cast_possible_truncation, - clippy::checked_conversions, - clippy::doc_markdown, - clippy::elidable_lifetime_names, - clippy::incompatible_msrv, - clippy::items_after_statements, - clippy::iter_without_into_iter, - clippy::let_underscore_untyped, - clippy::manual_assert, - clippy::manual_range_contains, - clippy::missing_panics_doc, - clippy::missing_safety_doc, - clippy::must_use_candidate, - clippy::needless_doctest_main, - clippy::needless_lifetimes, - clippy::new_without_default, - clippy::return_self_not_must_use, - clippy::shadow_unrelated, - clippy::trivially_copy_pass_by_ref, - clippy::uninlined_format_args, - clippy::unnecessary_wraps, - clippy::unused_self, - clippy::used_underscore_binding, - clippy::vec_init_then_push -)] -#![allow(unknown_lints, mismatched_lifetime_syntaxes)] - -#[cfg(all(procmacro2_semver_exempt, wrap_proc_macro, not(super_unstable)))] -compile_error! {"\ - Something is not right. If you've tried to turn on \ - procmacro2_semver_exempt, you need to ensure that it \ - is turned on for the compilation of the proc-macro2 \ - build script as well. -"} - -#[cfg(all( - procmacro2_nightly_testing, - feature = "proc-macro", - not(proc_macro_span) -))] -compile_error! {"\ - Build script probe failed to compile. -"} - -extern crate alloc; - -#[cfg(feature = "proc-macro")] -extern crate proc_macro; - -mod marker; -mod parse; -mod probe; -mod rcvec; - -#[cfg(wrap_proc_macro)] -mod detection; - -// Public for proc_macro2::fallback::force() and unforce(), but those are quite -// a niche use case so we omit it from rustdoc. -#[doc(hidden)] -pub mod fallback; - -pub mod extra; - -#[cfg(not(wrap_proc_macro))] -use crate::fallback as imp; -#[path = "wrapper.rs"] -#[cfg(wrap_proc_macro)] -mod imp; - -#[cfg(span_locations)] -mod location; - -#[cfg(procmacro2_semver_exempt)] -mod num; -#[cfg(procmacro2_semver_exempt)] -#[allow(dead_code)] -mod rustc_literal_escaper; - -use crate::extra::DelimSpan; -use crate::marker::{ProcMacroAutoTraits, MARKER}; -#[cfg(procmacro2_semver_exempt)] -use crate::rustc_literal_escaper::MixedUnit; -use core::cmp::Ordering; -use core::fmt::{self, Debug, Display}; -use core::hash::{Hash, Hasher}; -#[cfg(span_locations)] -use core::ops::Range; -use core::ops::RangeBounds; -use core::str::FromStr; -use std::error::Error; -use std::ffi::CStr; -#[cfg(span_locations)] -use std::path::PathBuf; - -#[cfg(span_locations)] -#[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] -pub use crate::location::LineColumn; - -#[cfg(procmacro2_semver_exempt)] -#[cfg_attr(docsrs, doc(cfg(procmacro2_semver_exempt)))] -pub use crate::rustc_literal_escaper::EscapeError; - -/// An abstract stream of tokens, or more concretely a sequence of token trees. -/// -/// This type provides interfaces for iterating over token trees and for -/// collecting token trees into one stream. -/// -/// Token stream is both the input and output of `#[proc_macro]`, -/// `#[proc_macro_attribute]` and `#[proc_macro_derive]` definitions. -#[derive(Clone)] -pub struct TokenStream { - inner: imp::TokenStream, - _marker: ProcMacroAutoTraits, -} - -/// Error returned from `TokenStream::from_str`. -pub struct LexError { - inner: imp::LexError, - _marker: ProcMacroAutoTraits, -} - -impl TokenStream { - fn _new(inner: imp::TokenStream) -> Self { - TokenStream { - inner, - _marker: MARKER, - } - } - - fn _new_fallback(inner: fallback::TokenStream) -> Self { - TokenStream { - inner: imp::TokenStream::from(inner), - _marker: MARKER, - } - } - - /// Returns an empty `TokenStream` containing no token trees. - pub fn new() -> Self { - TokenStream::_new(imp::TokenStream::new()) - } - - /// Checks if this `TokenStream` is empty. - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } -} - -/// `TokenStream::default()` returns an empty stream, -/// i.e. this is equivalent with `TokenStream::new()`. -impl Default for TokenStream { - fn default() -> Self { - TokenStream::new() - } -} - -/// Attempts to break the string into tokens and parse those tokens into a token -/// stream. -/// -/// May fail for a number of reasons, for example, if the string contains -/// unbalanced delimiters or characters not existing in the language. -/// -/// NOTE: Some errors may cause panics instead of returning `LexError`. We -/// reserve the right to change these errors into `LexError`s later. -impl FromStr for TokenStream { - type Err = LexError; - - fn from_str(src: &str) -> Result { - match imp::TokenStream::from_str_checked(src) { - Ok(tokens) => Ok(TokenStream::_new(tokens)), - Err(lex) => Err(LexError { - inner: lex, - _marker: MARKER, - }), - } - } -} - -#[cfg(feature = "proc-macro")] -#[cfg_attr(docsrs, doc(cfg(feature = "proc-macro")))] -impl From for TokenStream { - fn from(inner: proc_macro::TokenStream) -> Self { - TokenStream::_new(imp::TokenStream::from(inner)) - } -} - -#[cfg(feature = "proc-macro")] -#[cfg_attr(docsrs, doc(cfg(feature = "proc-macro")))] -impl From for proc_macro::TokenStream { - fn from(inner: TokenStream) -> Self { - proc_macro::TokenStream::from(inner.inner) - } -} - -impl From for TokenStream { - fn from(token: TokenTree) -> Self { - TokenStream::_new(imp::TokenStream::from(token)) - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { - self.inner.extend(streams); - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { - self.inner - .extend(streams.into_iter().map(|stream| stream.inner)); - } -} - -/// Collects a number of token trees into a single stream. -impl FromIterator for TokenStream { - fn from_iter>(streams: I) -> Self { - TokenStream::_new(streams.into_iter().collect()) - } -} -impl FromIterator for TokenStream { - fn from_iter>(streams: I) -> Self { - TokenStream::_new(streams.into_iter().map(|i| i.inner).collect()) - } -} - -/// Prints the token stream as a string that is supposed to be losslessly -/// convertible back into the same token stream (modulo spans), except for -/// possibly `TokenTree::Group`s with `Delimiter::None` delimiters and negative -/// numeric literals. -impl Display for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&self.inner, f) - } -} - -/// Prints token in a form convenient for debugging. -impl Debug for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Debug::fmt(&self.inner, f) - } -} - -impl LexError { - pub fn span(&self) -> Span { - Span::_new(self.inner.span()) - } -} - -impl Debug for LexError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Debug::fmt(&self.inner, f) - } -} - -impl Display for LexError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&self.inner, f) - } -} - -impl Error for LexError {} - -/// A region of source code, along with macro expansion information. -#[derive(Copy, Clone)] -pub struct Span { - inner: imp::Span, - _marker: ProcMacroAutoTraits, -} - -impl Span { - fn _new(inner: imp::Span) -> Self { - Span { - inner, - _marker: MARKER, - } - } - - fn _new_fallback(inner: fallback::Span) -> Self { - Span { - inner: imp::Span::from(inner), - _marker: MARKER, - } - } - - /// The span of the invocation of the current procedural macro. - /// - /// Identifiers created with this span will be resolved as if they were - /// written directly at the macro call location (call-site hygiene) and - /// other code at the macro call site will be able to refer to them as well. - pub fn call_site() -> Self { - Span::_new(imp::Span::call_site()) - } - - /// The span located at the invocation of the procedural macro, but with - /// local variables, labels, and `$crate` resolved at the definition site - /// of the macro. This is the same hygiene behavior as `macro_rules`. - pub fn mixed_site() -> Self { - Span::_new(imp::Span::mixed_site()) - } - - /// A span that resolves at the macro definition site. - /// - /// This method is semver exempt and not exposed by default. - #[cfg(procmacro2_semver_exempt)] - #[cfg_attr(docsrs, doc(cfg(procmacro2_semver_exempt)))] - pub fn def_site() -> Self { - Span::_new(imp::Span::def_site()) - } - - /// Creates a new span with the same line/column information as `self` but - /// that resolves symbols as though it were at `other`. - pub fn resolved_at(&self, other: Span) -> Span { - Span::_new(self.inner.resolved_at(other.inner)) - } - - /// Creates a new span with the same name resolution behavior as `self` but - /// with the line/column information of `other`. - pub fn located_at(&self, other: Span) -> Span { - Span::_new(self.inner.located_at(other.inner)) - } - - /// Convert `proc_macro2::Span` to `proc_macro::Span`. - /// - /// This method is available when building with a nightly compiler, or when - /// building with rustc 1.29+ *without* semver exempt features. - /// - /// # Panics - /// - /// Panics if called from outside of a procedural macro. Unlike - /// `proc_macro2::Span`, the `proc_macro::Span` type can only exist within - /// the context of a procedural macro invocation. - #[cfg(wrap_proc_macro)] - pub fn unwrap(self) -> proc_macro::Span { - self.inner.unwrap() - } - - // Soft deprecated. Please use Span::unwrap. - #[cfg(wrap_proc_macro)] - #[doc(hidden)] - pub fn unstable(self) -> proc_macro::Span { - self.unwrap() - } - - /// Returns the span's byte position range in the source file. - /// - /// This method requires the `"span-locations"` feature to be enabled. - /// - /// When executing in a procedural macro context, the returned range is only - /// accurate if compiled with a nightly toolchain. The stable toolchain does - /// not have this information available. When executing outside of a - /// procedural macro, such as main.rs or build.rs, the byte range is always - /// accurate regardless of toolchain. - #[cfg(span_locations)] - #[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] - pub fn byte_range(&self) -> Range { - self.inner.byte_range() - } - - /// Get the starting line/column in the source file for this span. - /// - /// This method requires the `"span-locations"` feature to be enabled. - /// - /// When executing in a procedural macro context, the returned line/column - /// are only meaningful if compiled with a nightly toolchain. The stable - /// toolchain does not have this information available. When executing - /// outside of a procedural macro, such as main.rs or build.rs, the - /// line/column are always meaningful regardless of toolchain. - #[cfg(span_locations)] - #[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] - pub fn start(&self) -> LineColumn { - self.inner.start() - } - - /// Get the ending line/column in the source file for this span. - /// - /// This method requires the `"span-locations"` feature to be enabled. - /// - /// When executing in a procedural macro context, the returned line/column - /// are only meaningful if compiled with a nightly toolchain. The stable - /// toolchain does not have this information available. When executing - /// outside of a procedural macro, such as main.rs or build.rs, the - /// line/column are always meaningful regardless of toolchain. - #[cfg(span_locations)] - #[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] - pub fn end(&self) -> LineColumn { - self.inner.end() - } - - /// The path to the source file in which this span occurs, for display - /// purposes. - /// - /// This might not correspond to a valid file system path. It might be - /// remapped, or might be an artificial path such as `""`. - #[cfg(span_locations)] - #[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] - pub fn file(&self) -> String { - self.inner.file() - } - - /// The path to the source file in which this span occurs on disk. - /// - /// This is the actual path on disk. It is unaffected by path remapping. - /// - /// This path should not be embedded in the output of the macro; prefer - /// `file()` instead. - #[cfg(span_locations)] - #[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] - pub fn local_file(&self) -> Option { - self.inner.local_file() - } - - /// Create a new span encompassing `self` and `other`. - /// - /// Returns `None` if `self` and `other` are from different files. - /// - /// Warning: the underlying [`proc_macro::Span::join`] method is - /// nightly-only. When called from within a procedural macro not using a - /// nightly compiler, this method will always return `None`. - pub fn join(&self, other: Span) -> Option { - self.inner.join(other.inner).map(Span::_new) - } - - /// Compares two spans to see if they're equal. - /// - /// This method is semver exempt and not exposed by default. - #[cfg(procmacro2_semver_exempt)] - #[cfg_attr(docsrs, doc(cfg(procmacro2_semver_exempt)))] - pub fn eq(&self, other: &Span) -> bool { - self.inner.eq(&other.inner) - } - - /// Returns the source text behind a span. This preserves the original - /// source code, including spaces and comments. It only returns a result if - /// the span corresponds to real source code. - /// - /// Note: The observable result of a macro should only rely on the tokens - /// and not on this source text. The result of this function is a best - /// effort to be used for diagnostics only. - pub fn source_text(&self) -> Option { - self.inner.source_text() - } -} - -/// Prints a span in a form convenient for debugging. -impl Debug for Span { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Debug::fmt(&self.inner, f) - } -} - -/// A single token or a delimited sequence of token trees (e.g. `[1, (), ..]`). -#[derive(Clone)] -pub enum TokenTree { - /// A token stream surrounded by bracket delimiters. - Group(Group), - /// An identifier. - Ident(Ident), - /// A single punctuation character (`+`, `,`, `$`, etc.). - Punct(Punct), - /// A literal character (`'a'`), string (`"hello"`), number (`2.3`), etc. - Literal(Literal), -} - -impl TokenTree { - /// Returns the span of this tree, delegating to the `span` method of - /// the contained token or a delimited stream. - pub fn span(&self) -> Span { - match self { - TokenTree::Group(t) => t.span(), - TokenTree::Ident(t) => t.span(), - TokenTree::Punct(t) => t.span(), - TokenTree::Literal(t) => t.span(), - } - } - - /// Configures the span for *only this token*. - /// - /// Note that if this token is a `Group` then this method will not configure - /// the span of each of the internal tokens, this will simply delegate to - /// the `set_span` method of each variant. - pub fn set_span(&mut self, span: Span) { - match self { - TokenTree::Group(t) => t.set_span(span), - TokenTree::Ident(t) => t.set_span(span), - TokenTree::Punct(t) => t.set_span(span), - TokenTree::Literal(t) => t.set_span(span), - } - } -} - -impl From for TokenTree { - fn from(g: Group) -> Self { - TokenTree::Group(g) - } -} - -impl From for TokenTree { - fn from(g: Ident) -> Self { - TokenTree::Ident(g) - } -} - -impl From for TokenTree { - fn from(g: Punct) -> Self { - TokenTree::Punct(g) - } -} - -impl From for TokenTree { - fn from(g: Literal) -> Self { - TokenTree::Literal(g) - } -} - -/// Prints the token tree as a string that is supposed to be losslessly -/// convertible back into the same token tree (modulo spans), except for -/// possibly `TokenTree::Group`s with `Delimiter::None` delimiters and negative -/// numeric literals. -impl Display for TokenTree { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - TokenTree::Group(t) => Display::fmt(t, f), - TokenTree::Ident(t) => Display::fmt(t, f), - TokenTree::Punct(t) => Display::fmt(t, f), - TokenTree::Literal(t) => Display::fmt(t, f), - } - } -} - -/// Prints token tree in a form convenient for debugging. -impl Debug for TokenTree { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // Each of these has the name in the struct type in the derived debug, - // so don't bother with an extra layer of indirection - match self { - TokenTree::Group(t) => Debug::fmt(t, f), - TokenTree::Ident(t) => { - let mut debug = f.debug_struct("Ident"); - debug.field("sym", &format_args!("{}", t)); - imp::debug_span_field_if_nontrivial(&mut debug, t.span().inner); - debug.finish() - } - TokenTree::Punct(t) => Debug::fmt(t, f), - TokenTree::Literal(t) => Debug::fmt(t, f), - } - } -} - -/// A delimited token stream. -/// -/// A `Group` internally contains a `TokenStream` which is surrounded by -/// `Delimiter`s. -#[derive(Clone)] -pub struct Group { - inner: imp::Group, -} - -/// Describes how a sequence of token trees is delimited. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub enum Delimiter { - /// `( ... )` - Parenthesis, - /// `{ ... }` - Brace, - /// `[ ... ]` - Bracket, - /// `∅ ... ∅` - /// - /// An invisible delimiter, that may, for example, appear around tokens - /// coming from a "macro variable" `$var`. It is important to preserve - /// operator priorities in cases like `$var * 3` where `$var` is `1 + 2`. - /// Invisible delimiters may not survive roundtrip of a token stream through - /// a string. - /// - ///

- /// - /// Note: rustc currently can ignore the grouping of tokens delimited by `None` in the output - /// of a proc_macro. Only `None`-delimited groups created by a macro_rules macro in the input - /// of a proc_macro macro are preserved, and only in very specific circumstances. - /// Any `None`-delimited groups (re)created by a proc_macro will therefore not preserve - /// operator priorities as indicated above. The other `Delimiter` variants should be used - /// instead in this context. This is a rustc bug. For details, see - /// [rust-lang/rust#67062](https://github.com/rust-lang/rust/issues/67062). - /// - ///
- None, -} - -impl Group { - fn _new(inner: imp::Group) -> Self { - Group { inner } - } - - fn _new_fallback(inner: fallback::Group) -> Self { - Group { - inner: imp::Group::from(inner), - } - } - - /// Creates a new `Group` with the given delimiter and token stream. - /// - /// This constructor will set the span for this group to - /// `Span::call_site()`. To change the span you can use the `set_span` - /// method below. - pub fn new(delimiter: Delimiter, stream: TokenStream) -> Self { - Group { - inner: imp::Group::new(delimiter, stream.inner), - } - } - - /// Returns the punctuation used as the delimiter for this group: a set of - /// parentheses, square brackets, or curly braces. - pub fn delimiter(&self) -> Delimiter { - self.inner.delimiter() - } - - /// Returns the `TokenStream` of tokens that are delimited in this `Group`. - /// - /// Note that the returned token stream does not include the delimiter - /// returned above. - pub fn stream(&self) -> TokenStream { - TokenStream::_new(self.inner.stream()) - } - - /// Returns the span for the delimiters of this token stream, spanning the - /// entire `Group`. - /// - /// ```text - /// pub fn span(&self) -> Span { - /// ^^^^^^^ - /// ``` - pub fn span(&self) -> Span { - Span::_new(self.inner.span()) - } - - /// Returns the span pointing to the opening delimiter of this group. - /// - /// ```text - /// pub fn span_open(&self) -> Span { - /// ^ - /// ``` - pub fn span_open(&self) -> Span { - Span::_new(self.inner.span_open()) - } - - /// Returns the span pointing to the closing delimiter of this group. - /// - /// ```text - /// pub fn span_close(&self) -> Span { - /// ^ - /// ``` - pub fn span_close(&self) -> Span { - Span::_new(self.inner.span_close()) - } - - /// Returns an object that holds this group's `span_open()` and - /// `span_close()` together (in a more compact representation than holding - /// those 2 spans individually). - pub fn delim_span(&self) -> DelimSpan { - DelimSpan::new(&self.inner) - } - - /// Configures the span for this `Group`'s delimiters, but not its internal - /// tokens. - /// - /// This method will **not** set the span of all the internal tokens spanned - /// by this group, but rather it will only set the span of the delimiter - /// tokens at the level of the `Group`. - pub fn set_span(&mut self, span: Span) { - self.inner.set_span(span.inner); - } -} - -/// Prints the group as a string that should be losslessly convertible back -/// into the same group (modulo spans), except for possibly `TokenTree::Group`s -/// with `Delimiter::None` delimiters. -impl Display for Group { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&self.inner, formatter) - } -} - -impl Debug for Group { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - Debug::fmt(&self.inner, formatter) - } -} - -/// A `Punct` is a single punctuation character like `+`, `-` or `#`. -/// -/// Multicharacter operators like `+=` are represented as two instances of -/// `Punct` with different forms of `Spacing` returned. -#[derive(Clone)] -pub struct Punct { - ch: char, - spacing: Spacing, - span: Span, -} - -/// Whether a `Punct` is followed immediately by another `Punct` or followed by -/// another token or whitespace. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub enum Spacing { - /// E.g. `+` is `Alone` in `+ =`, `+ident` or `+()`. - Alone, - /// E.g. `+` is `Joint` in `+=` or `'` is `Joint` in `'#`. - /// - /// Additionally, single quote `'` can join with identifiers to form - /// lifetimes `'ident`. - Joint, -} - -impl Punct { - /// Creates a new `Punct` from the given character and spacing. - /// - /// The `ch` argument must be a valid punctuation character permitted by the - /// language, otherwise the function will panic. - /// - /// The returned `Punct` will have the default span of `Span::call_site()` - /// which can be further configured with the `set_span` method below. - pub fn new(ch: char, spacing: Spacing) -> Self { - if let '!' | '#' | '$' | '%' | '&' | '\'' | '*' | '+' | ',' | '-' | '.' | '/' | ':' | ';' - | '<' | '=' | '>' | '?' | '@' | '^' | '|' | '~' = ch - { - Punct { - ch, - spacing, - span: Span::call_site(), - } - } else { - panic!("unsupported proc macro punctuation character {:?}", ch); - } - } - - /// Returns the value of this punctuation character as `char`. - pub fn as_char(&self) -> char { - self.ch - } - - /// Returns the spacing of this punctuation character, indicating whether - /// it's immediately followed by another `Punct` in the token stream, so - /// they can potentially be combined into a multicharacter operator - /// (`Joint`), or it's followed by some other token or whitespace (`Alone`) - /// so the operator has certainly ended. - pub fn spacing(&self) -> Spacing { - self.spacing - } - - /// Returns the span for this punctuation character. - pub fn span(&self) -> Span { - self.span - } - - /// Configure the span for this punctuation character. - pub fn set_span(&mut self, span: Span) { - self.span = span; - } -} - -/// Prints the punctuation character as a string that should be losslessly -/// convertible back into the same character. -impl Display for Punct { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&self.ch, f) - } -} - -impl Debug for Punct { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let mut debug = fmt.debug_struct("Punct"); - debug.field("char", &self.ch); - debug.field("spacing", &self.spacing); - imp::debug_span_field_if_nontrivial(&mut debug, self.span.inner); - debug.finish() - } -} - -/// A word of Rust code, which may be a keyword or legal variable name. -/// -/// An identifier consists of at least one Unicode code point, the first of -/// which has the XID_Start property and the rest of which have the XID_Continue -/// property. -/// -/// - The empty string is not an identifier. Use `Option`. -/// - A lifetime is not an identifier. Use `syn::Lifetime` instead. -/// -/// An identifier constructed with `Ident::new` is permitted to be a Rust -/// keyword, though parsing one through its [`Parse`] implementation rejects -/// Rust keywords. Use `input.call(Ident::parse_any)` when parsing to match the -/// behaviour of `Ident::new`. -/// -/// [`Parse`]: https://docs.rs/syn/2.0/syn/parse/trait.Parse.html -/// -/// # Examples -/// -/// A new ident can be created from a string using the `Ident::new` function. -/// A span must be provided explicitly which governs the name resolution -/// behavior of the resulting identifier. -/// -/// ``` -/// use proc_macro2::{Ident, Span}; -/// -/// fn main() { -/// let call_ident = Ident::new("calligraphy", Span::call_site()); -/// -/// println!("{}", call_ident); -/// } -/// ``` -/// -/// An ident can be interpolated into a token stream using the `quote!` macro. -/// -/// ``` -/// use proc_macro2::{Ident, Span}; -/// use quote::quote; -/// -/// fn main() { -/// let ident = Ident::new("demo", Span::call_site()); -/// -/// // Create a variable binding whose name is this ident. -/// let expanded = quote! { let #ident = 10; }; -/// -/// // Create a variable binding with a slightly different name. -/// let temp_ident = Ident::new(&format!("new_{}", ident), Span::call_site()); -/// let expanded = quote! { let #temp_ident = 10; }; -/// } -/// ``` -/// -/// A string representation of the ident is available through the `to_string()` -/// method. -/// -/// ``` -/// # use proc_macro2::{Ident, Span}; -/// # -/// # let ident = Ident::new("another_identifier", Span::call_site()); -/// # -/// // Examine the ident as a string. -/// let ident_string = ident.to_string(); -/// if ident_string.len() > 60 { -/// println!("Very long identifier: {}", ident_string) -/// } -/// ``` -#[derive(Clone)] -pub struct Ident { - inner: imp::Ident, - _marker: ProcMacroAutoTraits, -} - -impl Ident { - fn _new(inner: imp::Ident) -> Self { - Ident { - inner, - _marker: MARKER, - } - } - - fn _new_fallback(inner: fallback::Ident) -> Self { - Ident { - inner: imp::Ident::from(inner), - _marker: MARKER, - } - } - - /// Creates a new `Ident` with the given `string` as well as the specified - /// `span`. - /// - /// The `string` argument must be a valid identifier permitted by the - /// language, otherwise the function will panic. - /// - /// Note that `span`, currently in rustc, configures the hygiene information - /// for this identifier. - /// - /// As of this time `Span::call_site()` explicitly opts-in to "call-site" - /// hygiene meaning that identifiers created with this span will be resolved - /// as if they were written directly at the location of the macro call, and - /// other code at the macro call site will be able to refer to them as well. - /// - /// Later spans like `Span::def_site()` will allow to opt-in to - /// "definition-site" hygiene meaning that identifiers created with this - /// span will be resolved at the location of the macro definition and other - /// code at the macro call site will not be able to refer to them. - /// - /// Due to the current importance of hygiene this constructor, unlike other - /// tokens, requires a `Span` to be specified at construction. - /// - /// # Panics - /// - /// Panics if the input string is neither a keyword nor a legal variable - /// name. If you are not sure whether the string contains an identifier and - /// need to handle an error case, use - /// syn::parse_str::<Ident> - /// rather than `Ident::new`. - #[track_caller] - pub fn new(string: &str, span: Span) -> Self { - Ident::_new(imp::Ident::new_checked(string, span.inner)) - } - - /// Same as `Ident::new`, but creates a raw identifier (`r#ident`). The - /// `string` argument must be a valid identifier permitted by the language - /// (including keywords, e.g. `fn`). Keywords which are usable in path - /// segments (e.g. `self`, `super`) are not supported, and will cause a - /// panic. - #[track_caller] - pub fn new_raw(string: &str, span: Span) -> Self { - Ident::_new(imp::Ident::new_raw_checked(string, span.inner)) - } - - /// Returns the span of this `Ident`. - pub fn span(&self) -> Span { - Span::_new(self.inner.span()) - } - - /// Configures the span of this `Ident`, possibly changing its hygiene - /// context. - pub fn set_span(&mut self, span: Span) { - self.inner.set_span(span.inner); - } -} - -impl PartialEq for Ident { - fn eq(&self, other: &Ident) -> bool { - self.inner == other.inner - } -} - -impl PartialEq for Ident -where - T: ?Sized + AsRef, -{ - fn eq(&self, other: &T) -> bool { - self.inner == other - } -} - -impl Eq for Ident {} - -impl PartialOrd for Ident { - fn partial_cmp(&self, other: &Ident) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Ident { - fn cmp(&self, other: &Ident) -> Ordering { - self.to_string().cmp(&other.to_string()) - } -} - -impl Hash for Ident { - fn hash(&self, hasher: &mut H) { - self.to_string().hash(hasher); - } -} - -/// Prints the identifier as a string that should be losslessly convertible back -/// into the same identifier. -impl Display for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&self.inner, f) - } -} - -impl Debug for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Debug::fmt(&self.inner, f) - } -} - -/// A literal string (`"hello"`), byte string (`b"hello"`), character (`'a'`), -/// byte character (`b'a'`), an integer or floating point number with or without -/// a suffix (`1`, `1u8`, `2.3`, `2.3f32`). -/// -/// Boolean literals like `true` and `false` do not belong here, they are -/// `Ident`s. -#[derive(Clone)] -pub struct Literal { - inner: imp::Literal, - _marker: ProcMacroAutoTraits, -} - -macro_rules! suffixed_int_literals { - ($($name:ident => $kind:ident,)*) => ($( - /// Creates a new suffixed integer literal with the specified value. - /// - /// This function will create an integer like `1u32` where the integer - /// value specified is the first part of the token and the integral is - /// also suffixed at the end. Literals created from negative numbers may - /// not survive roundtrips through `TokenStream` or strings and may be - /// broken into two tokens (`-` and positive literal). - /// - /// Literals created through this method have the `Span::call_site()` - /// span by default, which can be configured with the `set_span` method - /// below. - pub fn $name(n: $kind) -> Literal { - Literal::_new(imp::Literal::$name(n)) - } - )*) -} - -macro_rules! unsuffixed_int_literals { - ($($name:ident => $kind:ident,)*) => ($( - /// Creates a new unsuffixed integer literal with the specified value. - /// - /// This function will create an integer like `1` where the integer - /// value specified is the first part of the token. No suffix is - /// specified on this token, meaning that invocations like - /// `Literal::i8_unsuffixed(1)` are equivalent to - /// `Literal::u32_unsuffixed(1)`. Literals created from negative numbers - /// may not survive roundtrips through `TokenStream` or strings and may - /// be broken into two tokens (`-` and positive literal). - /// - /// Literals created through this method have the `Span::call_site()` - /// span by default, which can be configured with the `set_span` method - /// below. - pub fn $name(n: $kind) -> Literal { - Literal::_new(imp::Literal::$name(n)) - } - )*) -} - -impl Literal { - fn _new(inner: imp::Literal) -> Self { - Literal { - inner, - _marker: MARKER, - } - } - - fn _new_fallback(inner: fallback::Literal) -> Self { - Literal { - inner: imp::Literal::from(inner), - _marker: MARKER, - } - } - - suffixed_int_literals! { - u8_suffixed => u8, - u16_suffixed => u16, - u32_suffixed => u32, - u64_suffixed => u64, - u128_suffixed => u128, - usize_suffixed => usize, - i8_suffixed => i8, - i16_suffixed => i16, - i32_suffixed => i32, - i64_suffixed => i64, - i128_suffixed => i128, - isize_suffixed => isize, - } - - unsuffixed_int_literals! { - u8_unsuffixed => u8, - u16_unsuffixed => u16, - u32_unsuffixed => u32, - u64_unsuffixed => u64, - u128_unsuffixed => u128, - usize_unsuffixed => usize, - i8_unsuffixed => i8, - i16_unsuffixed => i16, - i32_unsuffixed => i32, - i64_unsuffixed => i64, - i128_unsuffixed => i128, - isize_unsuffixed => isize, - } - - /// Creates a new unsuffixed floating-point literal. - /// - /// This constructor is similar to those like `Literal::i8_unsuffixed` where - /// the float's value is emitted directly into the token but no suffix is - /// used, so it may be inferred to be a `f64` later in the compiler. - /// Literals created from negative numbers may not survive round-trips - /// through `TokenStream` or strings and may be broken into two tokens (`-` - /// and positive literal). - /// - /// # Panics - /// - /// This function requires that the specified float is finite, for example - /// if it is infinity or NaN this function will panic. - pub fn f64_unsuffixed(f: f64) -> Literal { - assert!(f.is_finite()); - Literal::_new(imp::Literal::f64_unsuffixed(f)) - } - - /// Creates a new suffixed floating-point literal. - /// - /// This constructor will create a literal like `1.0f64` where the value - /// specified is the preceding part of the token and `f64` is the suffix of - /// the token. This token will always be inferred to be an `f64` in the - /// compiler. Literals created from negative numbers may not survive - /// round-trips through `TokenStream` or strings and may be broken into two - /// tokens (`-` and positive literal). - /// - /// # Panics - /// - /// This function requires that the specified float is finite, for example - /// if it is infinity or NaN this function will panic. - pub fn f64_suffixed(f: f64) -> Literal { - assert!(f.is_finite()); - Literal::_new(imp::Literal::f64_suffixed(f)) - } - - /// Creates a new unsuffixed floating-point literal. - /// - /// This constructor is similar to those like `Literal::i8_unsuffixed` where - /// the float's value is emitted directly into the token but no suffix is - /// used, so it may be inferred to be a `f64` later in the compiler. - /// Literals created from negative numbers may not survive round-trips - /// through `TokenStream` or strings and may be broken into two tokens (`-` - /// and positive literal). - /// - /// # Panics - /// - /// This function requires that the specified float is finite, for example - /// if it is infinity or NaN this function will panic. - pub fn f32_unsuffixed(f: f32) -> Literal { - assert!(f.is_finite()); - Literal::_new(imp::Literal::f32_unsuffixed(f)) - } - - /// Creates a new suffixed floating-point literal. - /// - /// This constructor will create a literal like `1.0f32` where the value - /// specified is the preceding part of the token and `f32` is the suffix of - /// the token. This token will always be inferred to be an `f32` in the - /// compiler. Literals created from negative numbers may not survive - /// round-trips through `TokenStream` or strings and may be broken into two - /// tokens (`-` and positive literal). - /// - /// # Panics - /// - /// This function requires that the specified float is finite, for example - /// if it is infinity or NaN this function will panic. - pub fn f32_suffixed(f: f32) -> Literal { - assert!(f.is_finite()); - Literal::_new(imp::Literal::f32_suffixed(f)) - } - - /// String literal. - pub fn string(string: &str) -> Literal { - Literal::_new(imp::Literal::string(string)) - } - - /// Character literal. - pub fn character(ch: char) -> Literal { - Literal::_new(imp::Literal::character(ch)) - } - - /// Byte character literal. - pub fn byte_character(byte: u8) -> Literal { - Literal::_new(imp::Literal::byte_character(byte)) - } - - /// Byte string literal. - pub fn byte_string(bytes: &[u8]) -> Literal { - Literal::_new(imp::Literal::byte_string(bytes)) - } - - /// C string literal. - pub fn c_string(string: &CStr) -> Literal { - Literal::_new(imp::Literal::c_string(string)) - } - - /// Returns the span encompassing this literal. - pub fn span(&self) -> Span { - Span::_new(self.inner.span()) - } - - /// Configures the span associated for this literal. - pub fn set_span(&mut self, span: Span) { - self.inner.set_span(span.inner); - } - - /// Returns a `Span` that is a subset of `self.span()` containing only - /// the source bytes in range `range`. Returns `None` if the would-be - /// trimmed span is outside the bounds of `self`. - /// - /// Warning: the underlying [`proc_macro::Literal::subspan`] method is - /// nightly-only. When called from within a procedural macro not using a - /// nightly compiler, this method will always return `None`. - pub fn subspan>(&self, range: R) -> Option { - self.inner.subspan(range).map(Span::_new) - } - - /// Returns the unescaped string value if this is a string literal. - #[cfg(procmacro2_semver_exempt)] - pub fn str_value(&self) -> Result { - let repr = self.to_string(); - - if repr.starts_with('"') && repr[1..].ends_with('"') { - let quoted = &repr[1..repr.len() - 1]; - let mut value = String::with_capacity(quoted.len()); - let mut error = None; - rustc_literal_escaper::unescape_str(quoted, |_range, res| match res { - Ok(ch) => value.push(ch), - Err(err) => { - if err.is_fatal() { - error = Some(ConversionErrorKind::FailedToUnescape(err)); - } - } - }); - return match error { - Some(error) => Err(error), - None => Ok(value), - }; - } - - if repr.starts_with('r') { - if let Some(raw) = get_raw(&repr[1..]) { - return Ok(raw.to_owned()); - } - } - - Err(ConversionErrorKind::InvalidLiteralKind) - } - - /// Returns the unescaped string value (including nul terminator) if this is - /// a c-string literal. - #[cfg(procmacro2_semver_exempt)] - pub fn cstr_value(&self) -> Result, ConversionErrorKind> { - let repr = self.to_string(); - - if repr.starts_with("c\"") && repr[2..].ends_with('"') { - let quoted = &repr[2..repr.len() - 1]; - let mut value = Vec::with_capacity(quoted.len()); - let mut error = None; - rustc_literal_escaper::unescape_c_str(quoted, |_range, res| match res { - Ok(MixedUnit::Char(ch)) => { - value.extend_from_slice(ch.get().encode_utf8(&mut [0; 4]).as_bytes()); - } - Ok(MixedUnit::HighByte(byte)) => value.push(byte.get()), - Err(err) => { - if err.is_fatal() { - error = Some(ConversionErrorKind::FailedToUnescape(err)); - } - } - }); - return match error { - Some(error) => Err(error), - None => { - value.push(b'\0'); - Ok(value) - } - }; - } - - if repr.starts_with("cr") { - if let Some(raw) = get_raw(&repr[2..]) { - let mut value = Vec::with_capacity(raw.len() + 1); - value.extend_from_slice(raw.as_bytes()); - value.push(b'\0'); - return Ok(value); - } - } - - Err(ConversionErrorKind::InvalidLiteralKind) - } - - /// Returns the unescaped string value if this is a byte string literal. - #[cfg(procmacro2_semver_exempt)] - pub fn byte_str_value(&self) -> Result, ConversionErrorKind> { - let repr = self.to_string(); - - if repr.starts_with("b\"") && repr[2..].ends_with('"') { - let quoted = &repr[2..repr.len() - 1]; - let mut value = Vec::with_capacity(quoted.len()); - let mut error = None; - rustc_literal_escaper::unescape_byte_str(quoted, |_range, res| match res { - Ok(byte) => value.push(byte), - Err(err) => { - if err.is_fatal() { - error = Some(ConversionErrorKind::FailedToUnescape(err)); - } - } - }); - return match error { - Some(error) => Err(error), - None => Ok(value), - }; - } - - if repr.starts_with("br") { - if let Some(raw) = get_raw(&repr[2..]) { - return Ok(raw.as_bytes().to_owned()); - } - } - - Err(ConversionErrorKind::InvalidLiteralKind) - } - - // Intended for the `quote!` macro to use when constructing a proc-macro2 - // token out of a macro_rules $:literal token, which is already known to be - // a valid literal. This avoids reparsing/validating the literal's string - // representation. This is not public API other than for quote. - #[doc(hidden)] - pub unsafe fn from_str_unchecked(repr: &str) -> Self { - Literal::_new(unsafe { imp::Literal::from_str_unchecked(repr) }) - } -} - -impl FromStr for Literal { - type Err = LexError; - - fn from_str(repr: &str) -> Result { - match imp::Literal::from_str_checked(repr) { - Ok(lit) => Ok(Literal::_new(lit)), - Err(lex) => Err(LexError { - inner: lex, - _marker: MARKER, - }), - } - } -} - -impl Debug for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Debug::fmt(&self.inner, f) - } -} - -impl Display for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&self.inner, f) - } -} - -/// Error when retrieving a string literal's unescaped value. -#[cfg(procmacro2_semver_exempt)] -#[derive(Debug, PartialEq, Eq)] -pub enum ConversionErrorKind { - /// The literal is of the right string kind, but its contents are malformed - /// in a way that cannot be unescaped to a value. - FailedToUnescape(EscapeError), - /// The literal is not of the string kind whose value was requested, for - /// example byte string vs UTF-8 string. - InvalidLiteralKind, -} - -// ###"..."### -> ... -#[cfg(procmacro2_semver_exempt)] -fn get_raw(repr: &str) -> Option<&str> { - let pounds = repr.len() - repr.trim_start_matches('#').len(); - if repr.len() >= pounds + 1 + 1 + pounds - && repr[pounds..].starts_with('"') - && repr.trim_end_matches('#').len() + pounds == repr.len() - && repr[..repr.len() - pounds].ends_with('"') - { - Some(&repr[pounds + 1..repr.len() - pounds - 1]) - } else { - None - } -} - -/// Public implementation details for the `TokenStream` type, such as iterators. -pub mod token_stream { - use crate::marker::{ProcMacroAutoTraits, MARKER}; - use crate::{imp, TokenTree}; - use core::fmt::{self, Debug}; - - pub use crate::TokenStream; - - /// An iterator over `TokenStream`'s `TokenTree`s. - /// - /// The iteration is "shallow", e.g. the iterator doesn't recurse into - /// delimited groups, and returns whole groups as token trees. - #[derive(Clone)] - pub struct IntoIter { - inner: imp::TokenTreeIter, - _marker: ProcMacroAutoTraits, - } - - impl Iterator for IntoIter { - type Item = TokenTree; - - fn next(&mut self) -> Option { - self.inner.next() - } - - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } - } - - impl Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("TokenStream ")?; - f.debug_list().entries(self.clone()).finish() - } - } - - impl IntoIterator for TokenStream { - type Item = TokenTree; - type IntoIter = IntoIter; - - fn into_iter(self) -> IntoIter { - IntoIter { - inner: self.inner.into_iter(), - _marker: MARKER, - } - } - } -} diff --git a/vendor/proc-macro2/src/location.rs b/vendor/proc-macro2/src/location.rs deleted file mode 100644 index 7190e2d05255e0..00000000000000 --- a/vendor/proc-macro2/src/location.rs +++ /dev/null @@ -1,29 +0,0 @@ -use core::cmp::Ordering; - -/// A line-column pair representing the start or end of a `Span`. -/// -/// This type is semver exempt and not exposed by default. -#[cfg_attr(docsrs, doc(cfg(feature = "span-locations")))] -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub struct LineColumn { - /// The 1-indexed line in the source file on which the span starts or ends - /// (inclusive). - pub line: usize, - /// The 0-indexed column (in UTF-8 characters) in the source file on which - /// the span starts or ends (inclusive). - pub column: usize, -} - -impl Ord for LineColumn { - fn cmp(&self, other: &Self) -> Ordering { - self.line - .cmp(&other.line) - .then(self.column.cmp(&other.column)) - } -} - -impl PartialOrd for LineColumn { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} diff --git a/vendor/proc-macro2/src/marker.rs b/vendor/proc-macro2/src/marker.rs deleted file mode 100644 index 23b94ce6fa853e..00000000000000 --- a/vendor/proc-macro2/src/marker.rs +++ /dev/null @@ -1,17 +0,0 @@ -use alloc::rc::Rc; -use core::marker::PhantomData; -use core::panic::{RefUnwindSafe, UnwindSafe}; - -// Zero sized marker with the correct set of autotrait impls we want all proc -// macro types to have. -#[derive(Copy, Clone)] -#[cfg_attr( - all(procmacro2_semver_exempt, any(not(wrap_proc_macro), super_unstable)), - derive(PartialEq, Eq) -)] -pub(crate) struct ProcMacroAutoTraits(PhantomData>); - -pub(crate) const MARKER: ProcMacroAutoTraits = ProcMacroAutoTraits(PhantomData); - -impl UnwindSafe for ProcMacroAutoTraits {} -impl RefUnwindSafe for ProcMacroAutoTraits {} diff --git a/vendor/proc-macro2/src/num.rs b/vendor/proc-macro2/src/num.rs deleted file mode 100644 index 3ac82c8608df9a..00000000000000 --- a/vendor/proc-macro2/src/num.rs +++ /dev/null @@ -1,17 +0,0 @@ -// TODO: use NonZero in Rust 1.89+ -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub struct NonZeroChar(char); - -impl NonZeroChar { - pub fn new(ch: char) -> Option { - if ch == '\0' { - None - } else { - Some(NonZeroChar(ch)) - } - } - - pub fn get(self) -> char { - self.0 - } -} diff --git a/vendor/proc-macro2/src/parse.rs b/vendor/proc-macro2/src/parse.rs deleted file mode 100644 index b8be403f842f2c..00000000000000 --- a/vendor/proc-macro2/src/parse.rs +++ /dev/null @@ -1,995 +0,0 @@ -use crate::fallback::{ - self, is_ident_continue, is_ident_start, Group, Ident, LexError, Literal, Span, TokenStream, - TokenStreamBuilder, -}; -use crate::{Delimiter, Punct, Spacing, TokenTree}; -use core::char; -use core::str::{Bytes, CharIndices, Chars}; - -#[derive(Copy, Clone, Eq, PartialEq)] -pub(crate) struct Cursor<'a> { - pub(crate) rest: &'a str, - #[cfg(span_locations)] - pub(crate) off: u32, -} - -impl<'a> Cursor<'a> { - pub(crate) fn advance(&self, bytes: usize) -> Cursor<'a> { - let (_front, rest) = self.rest.split_at(bytes); - Cursor { - rest, - #[cfg(span_locations)] - off: self.off + _front.chars().count() as u32, - } - } - - pub(crate) fn starts_with(&self, s: &str) -> bool { - self.rest.starts_with(s) - } - - pub(crate) fn starts_with_char(&self, ch: char) -> bool { - self.rest.starts_with(ch) - } - - pub(crate) fn starts_with_fn(&self, f: Pattern) -> bool - where - Pattern: FnMut(char) -> bool, - { - self.rest.starts_with(f) - } - - pub(crate) fn is_empty(&self) -> bool { - self.rest.is_empty() - } - - fn len(&self) -> usize { - self.rest.len() - } - - fn as_bytes(&self) -> &'a [u8] { - self.rest.as_bytes() - } - - fn bytes(&self) -> Bytes<'a> { - self.rest.bytes() - } - - fn chars(&self) -> Chars<'a> { - self.rest.chars() - } - - fn char_indices(&self) -> CharIndices<'a> { - self.rest.char_indices() - } - - fn parse(&self, tag: &str) -> Result, Reject> { - if self.starts_with(tag) { - Ok(self.advance(tag.len())) - } else { - Err(Reject) - } - } -} - -pub(crate) struct Reject; -type PResult<'a, O> = Result<(Cursor<'a>, O), Reject>; - -fn skip_whitespace(input: Cursor) -> Cursor { - let mut s = input; - - while !s.is_empty() { - let byte = s.as_bytes()[0]; - if byte == b'/' { - if s.starts_with("//") - && (!s.starts_with("///") || s.starts_with("////")) - && !s.starts_with("//!") - { - let (cursor, _) = take_until_newline_or_eof(s); - s = cursor; - continue; - } else if s.starts_with("/**/") { - s = s.advance(4); - continue; - } else if s.starts_with("/*") - && (!s.starts_with("/**") || s.starts_with("/***")) - && !s.starts_with("/*!") - { - match block_comment(s) { - Ok((rest, _)) => { - s = rest; - continue; - } - Err(Reject) => return s, - } - } - } - match byte { - b' ' | 0x09..=0x0d => { - s = s.advance(1); - continue; - } - b if b.is_ascii() => {} - _ => { - let ch = s.chars().next().unwrap(); - if is_whitespace(ch) { - s = s.advance(ch.len_utf8()); - continue; - } - } - } - return s; - } - s -} - -fn block_comment(input: Cursor) -> PResult<&str> { - if !input.starts_with("/*") { - return Err(Reject); - } - - let mut depth = 0usize; - let bytes = input.as_bytes(); - let mut i = 0usize; - let upper = bytes.len() - 1; - - while i < upper { - if bytes[i] == b'/' && bytes[i + 1] == b'*' { - depth += 1; - i += 1; // eat '*' - } else if bytes[i] == b'*' && bytes[i + 1] == b'/' { - depth -= 1; - if depth == 0 { - return Ok((input.advance(i + 2), &input.rest[..i + 2])); - } - i += 1; // eat '/' - } - i += 1; - } - - Err(Reject) -} - -fn is_whitespace(ch: char) -> bool { - // Rust treats left-to-right mark and right-to-left mark as whitespace - ch.is_whitespace() || ch == '\u{200e}' || ch == '\u{200f}' -} - -fn word_break(input: Cursor) -> Result { - match input.chars().next() { - Some(ch) if is_ident_continue(ch) => Err(Reject), - Some(_) | None => Ok(input), - } -} - -// Rustc's representation of a macro expansion error in expression position or -// type position. -const ERROR: &str = "(/*ERROR*/)"; - -pub(crate) fn token_stream(mut input: Cursor) -> Result { - let mut trees = TokenStreamBuilder::new(); - let mut stack = Vec::new(); - - loop { - input = skip_whitespace(input); - - if let Ok((rest, ())) = doc_comment(input, &mut trees) { - input = rest; - continue; - } - - #[cfg(span_locations)] - let lo = input.off; - - let first = match input.bytes().next() { - Some(first) => first, - None => match stack.last() { - None => return Ok(trees.build()), - #[cfg(span_locations)] - Some((lo, _frame)) => { - return Err(LexError { - span: Span { lo: *lo, hi: *lo }, - }) - } - #[cfg(not(span_locations))] - Some(_frame) => return Err(LexError { span: Span {} }), - }, - }; - - if let Some(open_delimiter) = match first { - b'(' if !input.starts_with(ERROR) => Some(Delimiter::Parenthesis), - b'[' => Some(Delimiter::Bracket), - b'{' => Some(Delimiter::Brace), - _ => None, - } { - input = input.advance(1); - let frame = (open_delimiter, trees); - #[cfg(span_locations)] - let frame = (lo, frame); - stack.push(frame); - trees = TokenStreamBuilder::new(); - } else if let Some(close_delimiter) = match first { - b')' => Some(Delimiter::Parenthesis), - b']' => Some(Delimiter::Bracket), - b'}' => Some(Delimiter::Brace), - _ => None, - } { - let frame = match stack.pop() { - Some(frame) => frame, - None => return Err(lex_error(input)), - }; - #[cfg(span_locations)] - let (lo, frame) = frame; - let (open_delimiter, outer) = frame; - if open_delimiter != close_delimiter { - return Err(lex_error(input)); - } - input = input.advance(1); - let mut g = Group::new(open_delimiter, trees.build()); - g.set_span(Span { - #[cfg(span_locations)] - lo, - #[cfg(span_locations)] - hi: input.off, - }); - trees = outer; - trees.push_token_from_parser(TokenTree::Group(crate::Group::_new_fallback(g))); - } else { - let (rest, mut tt) = match leaf_token(input) { - Ok((rest, tt)) => (rest, tt), - Err(Reject) => return Err(lex_error(input)), - }; - tt.set_span(crate::Span::_new_fallback(Span { - #[cfg(span_locations)] - lo, - #[cfg(span_locations)] - hi: rest.off, - })); - trees.push_token_from_parser(tt); - input = rest; - } - } -} - -fn lex_error(cursor: Cursor) -> LexError { - #[cfg(not(span_locations))] - let _ = cursor; - LexError { - span: Span { - #[cfg(span_locations)] - lo: cursor.off, - #[cfg(span_locations)] - hi: cursor.off, - }, - } -} - -fn leaf_token(input: Cursor) -> PResult { - if let Ok((input, l)) = literal(input) { - // must be parsed before ident - Ok((input, TokenTree::Literal(crate::Literal::_new_fallback(l)))) - } else if let Ok((input, p)) = punct(input) { - Ok((input, TokenTree::Punct(p))) - } else if let Ok((input, i)) = ident(input) { - Ok((input, TokenTree::Ident(i))) - } else if input.starts_with(ERROR) { - let rest = input.advance(ERROR.len()); - let repr = crate::Literal::_new_fallback(Literal::_new(ERROR.to_owned())); - Ok((rest, TokenTree::Literal(repr))) - } else { - Err(Reject) - } -} - -fn ident(input: Cursor) -> PResult { - if [ - "r\"", "r#\"", "r##", "b\"", "b\'", "br\"", "br#", "c\"", "cr\"", "cr#", - ] - .iter() - .any(|prefix| input.starts_with(prefix)) - { - Err(Reject) - } else { - ident_any(input) - } -} - -fn ident_any(input: Cursor) -> PResult { - let raw = input.starts_with("r#"); - let rest = input.advance((raw as usize) << 1); - - let (rest, sym) = ident_not_raw(rest)?; - - if !raw { - let ident = - crate::Ident::_new_fallback(Ident::new_unchecked(sym, fallback::Span::call_site())); - return Ok((rest, ident)); - } - - match sym { - "_" | "super" | "self" | "Self" | "crate" => return Err(Reject), - _ => {} - } - - let ident = - crate::Ident::_new_fallback(Ident::new_raw_unchecked(sym, fallback::Span::call_site())); - Ok((rest, ident)) -} - -fn ident_not_raw(input: Cursor) -> PResult<&str> { - let mut chars = input.char_indices(); - - match chars.next() { - Some((_, ch)) if is_ident_start(ch) => {} - _ => return Err(Reject), - } - - let mut end = input.len(); - for (i, ch) in chars { - if !is_ident_continue(ch) { - end = i; - break; - } - } - - Ok((input.advance(end), &input.rest[..end])) -} - -pub(crate) fn literal(input: Cursor) -> PResult { - let rest = literal_nocapture(input)?; - let end = input.len() - rest.len(); - Ok((rest, Literal::_new(input.rest[..end].to_string()))) -} - -fn literal_nocapture(input: Cursor) -> Result { - if let Ok(ok) = string(input) { - Ok(ok) - } else if let Ok(ok) = byte_string(input) { - Ok(ok) - } else if let Ok(ok) = c_string(input) { - Ok(ok) - } else if let Ok(ok) = byte(input) { - Ok(ok) - } else if let Ok(ok) = character(input) { - Ok(ok) - } else if let Ok(ok) = float(input) { - Ok(ok) - } else if let Ok(ok) = int(input) { - Ok(ok) - } else { - Err(Reject) - } -} - -fn literal_suffix(input: Cursor) -> Cursor { - match ident_not_raw(input) { - Ok((input, _)) => input, - Err(Reject) => input, - } -} - -fn string(input: Cursor) -> Result { - if let Ok(input) = input.parse("\"") { - cooked_string(input) - } else if let Ok(input) = input.parse("r") { - raw_string(input) - } else { - Err(Reject) - } -} - -fn cooked_string(mut input: Cursor) -> Result { - let mut chars = input.char_indices(); - - while let Some((i, ch)) = chars.next() { - match ch { - '"' => { - let input = input.advance(i + 1); - return Ok(literal_suffix(input)); - } - '\r' => match chars.next() { - Some((_, '\n')) => {} - _ => break, - }, - '\\' => match chars.next() { - Some((_, 'x')) => { - backslash_x_char(&mut chars)?; - } - Some((_, 'n' | 'r' | 't' | '\\' | '\'' | '"' | '0')) => {} - Some((_, 'u')) => { - backslash_u(&mut chars)?; - } - Some((newline, ch @ ('\n' | '\r'))) => { - input = input.advance(newline + 1); - trailing_backslash(&mut input, ch as u8)?; - chars = input.char_indices(); - } - _ => break, - }, - _ch => {} - } - } - Err(Reject) -} - -fn raw_string(input: Cursor) -> Result { - let (input, delimiter) = delimiter_of_raw_string(input)?; - let mut bytes = input.bytes().enumerate(); - while let Some((i, byte)) = bytes.next() { - match byte { - b'"' if input.rest[i + 1..].starts_with(delimiter) => { - let rest = input.advance(i + 1 + delimiter.len()); - return Ok(literal_suffix(rest)); - } - b'\r' => match bytes.next() { - Some((_, b'\n')) => {} - _ => break, - }, - _ => {} - } - } - Err(Reject) -} - -fn byte_string(input: Cursor) -> Result { - if let Ok(input) = input.parse("b\"") { - cooked_byte_string(input) - } else if let Ok(input) = input.parse("br") { - raw_byte_string(input) - } else { - Err(Reject) - } -} - -fn cooked_byte_string(mut input: Cursor) -> Result { - let mut bytes = input.bytes().enumerate(); - while let Some((offset, b)) = bytes.next() { - match b { - b'"' => { - let input = input.advance(offset + 1); - return Ok(literal_suffix(input)); - } - b'\r' => match bytes.next() { - Some((_, b'\n')) => {} - _ => break, - }, - b'\\' => match bytes.next() { - Some((_, b'x')) => { - backslash_x_byte(&mut bytes)?; - } - Some((_, b'n' | b'r' | b't' | b'\\' | b'0' | b'\'' | b'"')) => {} - Some((newline, b @ (b'\n' | b'\r'))) => { - input = input.advance(newline + 1); - trailing_backslash(&mut input, b)?; - bytes = input.bytes().enumerate(); - } - _ => break, - }, - b if b.is_ascii() => {} - _ => break, - } - } - Err(Reject) -} - -fn delimiter_of_raw_string(input: Cursor) -> PResult<&str> { - for (i, byte) in input.bytes().enumerate() { - match byte { - b'"' => { - if i > 255 { - // https://github.com/rust-lang/rust/pull/95251 - return Err(Reject); - } - return Ok((input.advance(i + 1), &input.rest[..i])); - } - b'#' => {} - _ => break, - } - } - Err(Reject) -} - -fn raw_byte_string(input: Cursor) -> Result { - let (input, delimiter) = delimiter_of_raw_string(input)?; - let mut bytes = input.bytes().enumerate(); - while let Some((i, byte)) = bytes.next() { - match byte { - b'"' if input.rest[i + 1..].starts_with(delimiter) => { - let rest = input.advance(i + 1 + delimiter.len()); - return Ok(literal_suffix(rest)); - } - b'\r' => match bytes.next() { - Some((_, b'\n')) => {} - _ => break, - }, - other => { - if !other.is_ascii() { - break; - } - } - } - } - Err(Reject) -} - -fn c_string(input: Cursor) -> Result { - if let Ok(input) = input.parse("c\"") { - cooked_c_string(input) - } else if let Ok(input) = input.parse("cr") { - raw_c_string(input) - } else { - Err(Reject) - } -} - -fn raw_c_string(input: Cursor) -> Result { - let (input, delimiter) = delimiter_of_raw_string(input)?; - let mut bytes = input.bytes().enumerate(); - while let Some((i, byte)) = bytes.next() { - match byte { - b'"' if input.rest[i + 1..].starts_with(delimiter) => { - let rest = input.advance(i + 1 + delimiter.len()); - return Ok(literal_suffix(rest)); - } - b'\r' => match bytes.next() { - Some((_, b'\n')) => {} - _ => break, - }, - b'\0' => break, - _ => {} - } - } - Err(Reject) -} - -fn cooked_c_string(mut input: Cursor) -> Result { - let mut chars = input.char_indices(); - - while let Some((i, ch)) = chars.next() { - match ch { - '"' => { - let input = input.advance(i + 1); - return Ok(literal_suffix(input)); - } - '\r' => match chars.next() { - Some((_, '\n')) => {} - _ => break, - }, - '\\' => match chars.next() { - Some((_, 'x')) => { - backslash_x_nonzero(&mut chars)?; - } - Some((_, 'n' | 'r' | 't' | '\\' | '\'' | '"')) => {} - Some((_, 'u')) => { - if backslash_u(&mut chars)? == '\0' { - break; - } - } - Some((newline, ch @ ('\n' | '\r'))) => { - input = input.advance(newline + 1); - trailing_backslash(&mut input, ch as u8)?; - chars = input.char_indices(); - } - _ => break, - }, - '\0' => break, - _ch => {} - } - } - Err(Reject) -} - -fn byte(input: Cursor) -> Result { - let input = input.parse("b'")?; - let mut bytes = input.bytes().enumerate(); - let ok = match bytes.next().map(|(_, b)| b) { - Some(b'\\') => match bytes.next().map(|(_, b)| b) { - Some(b'x') => backslash_x_byte(&mut bytes).is_ok(), - Some(b'n' | b'r' | b't' | b'\\' | b'0' | b'\'' | b'"') => true, - _ => false, - }, - b => b.is_some(), - }; - if !ok { - return Err(Reject); - } - let (offset, _) = bytes.next().ok_or(Reject)?; - if !input.chars().as_str().is_char_boundary(offset) { - return Err(Reject); - } - let input = input.advance(offset).parse("'")?; - Ok(literal_suffix(input)) -} - -fn character(input: Cursor) -> Result { - let input = input.parse("'")?; - let mut chars = input.char_indices(); - let ok = match chars.next().map(|(_, ch)| ch) { - Some('\\') => match chars.next().map(|(_, ch)| ch) { - Some('x') => backslash_x_char(&mut chars).is_ok(), - Some('u') => backslash_u(&mut chars).is_ok(), - Some('n' | 'r' | 't' | '\\' | '0' | '\'' | '"') => true, - _ => false, - }, - ch => ch.is_some(), - }; - if !ok { - return Err(Reject); - } - let (idx, _) = chars.next().ok_or(Reject)?; - let input = input.advance(idx).parse("'")?; - Ok(literal_suffix(input)) -} - -macro_rules! next_ch { - ($chars:ident @ $pat:pat) => { - match $chars.next() { - Some((_, ch)) => match ch { - $pat => ch, - _ => return Err(Reject), - }, - None => return Err(Reject), - } - }; -} - -fn backslash_x_char(chars: &mut I) -> Result<(), Reject> -where - I: Iterator, -{ - next_ch!(chars @ '0'..='7'); - next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F'); - Ok(()) -} - -fn backslash_x_byte(chars: &mut I) -> Result<(), Reject> -where - I: Iterator, -{ - next_ch!(chars @ b'0'..=b'9' | b'a'..=b'f' | b'A'..=b'F'); - next_ch!(chars @ b'0'..=b'9' | b'a'..=b'f' | b'A'..=b'F'); - Ok(()) -} - -fn backslash_x_nonzero(chars: &mut I) -> Result<(), Reject> -where - I: Iterator, -{ - let first = next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F'); - let second = next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F'); - if first == '0' && second == '0' { - Err(Reject) - } else { - Ok(()) - } -} - -fn backslash_u(chars: &mut I) -> Result -where - I: Iterator, -{ - next_ch!(chars @ '{'); - let mut value = 0; - let mut len = 0; - for (_, ch) in chars { - let digit = match ch { - '0'..='9' => ch as u8 - b'0', - 'a'..='f' => 10 + ch as u8 - b'a', - 'A'..='F' => 10 + ch as u8 - b'A', - '_' if len > 0 => continue, - '}' if len > 0 => return char::from_u32(value).ok_or(Reject), - _ => break, - }; - if len == 6 { - break; - } - value *= 0x10; - value += u32::from(digit); - len += 1; - } - Err(Reject) -} - -fn trailing_backslash(input: &mut Cursor, mut last: u8) -> Result<(), Reject> { - let mut whitespace = input.bytes().enumerate(); - loop { - if last == b'\r' && whitespace.next().map_or(true, |(_, b)| b != b'\n') { - return Err(Reject); - } - match whitespace.next() { - Some((_, b @ (b' ' | b'\t' | b'\n' | b'\r'))) => { - last = b; - } - Some((offset, _)) => { - *input = input.advance(offset); - return Ok(()); - } - None => return Err(Reject), - } - } -} - -fn float(input: Cursor) -> Result { - let mut rest = float_digits(input)?; - if let Some(ch) = rest.chars().next() { - if is_ident_start(ch) { - rest = ident_not_raw(rest)?.0; - } - } - word_break(rest) -} - -fn float_digits(input: Cursor) -> Result { - let mut chars = input.chars().peekable(); - match chars.next() { - Some(ch) if '0' <= ch && ch <= '9' => {} - _ => return Err(Reject), - } - - let mut len = 1; - let mut has_dot = false; - let mut has_exp = false; - while let Some(&ch) = chars.peek() { - match ch { - '0'..='9' | '_' => { - chars.next(); - len += 1; - } - '.' => { - if has_dot { - break; - } - chars.next(); - if chars - .peek() - .map_or(false, |&ch| ch == '.' || is_ident_start(ch)) - { - return Err(Reject); - } - len += 1; - has_dot = true; - } - 'e' | 'E' => { - chars.next(); - len += 1; - has_exp = true; - break; - } - _ => break, - } - } - - if !(has_dot || has_exp) { - return Err(Reject); - } - - if has_exp { - let token_before_exp = if has_dot { - Ok(input.advance(len - 1)) - } else { - Err(Reject) - }; - let mut has_sign = false; - let mut has_exp_value = false; - while let Some(&ch) = chars.peek() { - match ch { - '+' | '-' => { - if has_exp_value { - break; - } - if has_sign { - return token_before_exp; - } - chars.next(); - len += 1; - has_sign = true; - } - '0'..='9' => { - chars.next(); - len += 1; - has_exp_value = true; - } - '_' => { - chars.next(); - len += 1; - } - _ => break, - } - } - if !has_exp_value { - return token_before_exp; - } - } - - Ok(input.advance(len)) -} - -fn int(input: Cursor) -> Result { - let mut rest = digits(input)?; - if let Some(ch) = rest.chars().next() { - if is_ident_start(ch) { - rest = ident_not_raw(rest)?.0; - } - } - word_break(rest) -} - -fn digits(mut input: Cursor) -> Result { - let base = if input.starts_with("0x") { - input = input.advance(2); - 16 - } else if input.starts_with("0o") { - input = input.advance(2); - 8 - } else if input.starts_with("0b") { - input = input.advance(2); - 2 - } else { - 10 - }; - - let mut len = 0; - let mut empty = true; - for b in input.bytes() { - match b { - b'0'..=b'9' => { - let digit = (b - b'0') as u64; - if digit >= base { - return Err(Reject); - } - } - b'a'..=b'f' => { - let digit = 10 + (b - b'a') as u64; - if digit >= base { - break; - } - } - b'A'..=b'F' => { - let digit = 10 + (b - b'A') as u64; - if digit >= base { - break; - } - } - b'_' => { - if empty && base == 10 { - return Err(Reject); - } - len += 1; - continue; - } - _ => break, - } - len += 1; - empty = false; - } - if empty { - Err(Reject) - } else { - Ok(input.advance(len)) - } -} - -fn punct(input: Cursor) -> PResult { - let (rest, ch) = punct_char(input)?; - if ch == '\'' { - let (after_lifetime, _ident) = ident_any(rest)?; - if after_lifetime.starts_with_char('\'') - || (after_lifetime.starts_with_char('#') && !rest.starts_with("r#")) - { - Err(Reject) - } else { - Ok((rest, Punct::new('\'', Spacing::Joint))) - } - } else { - let kind = match punct_char(rest) { - Ok(_) => Spacing::Joint, - Err(Reject) => Spacing::Alone, - }; - Ok((rest, Punct::new(ch, kind))) - } -} - -fn punct_char(input: Cursor) -> PResult { - if input.starts_with("//") || input.starts_with("/*") { - // Do not accept `/` of a comment as a punct. - return Err(Reject); - } - - let mut chars = input.chars(); - let first = match chars.next() { - Some(ch) => ch, - None => { - return Err(Reject); - } - }; - let recognized = "~!@#$%^&*-=+|;:,<.>/?'"; - if recognized.contains(first) { - Ok((input.advance(first.len_utf8()), first)) - } else { - Err(Reject) - } -} - -fn doc_comment<'a>(input: Cursor<'a>, trees: &mut TokenStreamBuilder) -> PResult<'a, ()> { - #[cfg(span_locations)] - let lo = input.off; - let (rest, (comment, inner)) = doc_comment_contents(input)?; - let fallback_span = Span { - #[cfg(span_locations)] - lo, - #[cfg(span_locations)] - hi: rest.off, - }; - let span = crate::Span::_new_fallback(fallback_span); - - let mut scan_for_bare_cr = comment; - while let Some(cr) = scan_for_bare_cr.find('\r') { - let rest = &scan_for_bare_cr[cr + 1..]; - if !rest.starts_with('\n') { - return Err(Reject); - } - scan_for_bare_cr = rest; - } - - let mut pound = Punct::new('#', Spacing::Alone); - pound.set_span(span); - trees.push_token_from_parser(TokenTree::Punct(pound)); - - if inner { - let mut bang = Punct::new('!', Spacing::Alone); - bang.set_span(span); - trees.push_token_from_parser(TokenTree::Punct(bang)); - } - - let doc_ident = crate::Ident::_new_fallback(Ident::new_unchecked("doc", fallback_span)); - let mut equal = Punct::new('=', Spacing::Alone); - equal.set_span(span); - let mut literal = crate::Literal::_new_fallback(Literal::string(comment)); - literal.set_span(span); - let mut bracketed = TokenStreamBuilder::with_capacity(3); - bracketed.push_token_from_parser(TokenTree::Ident(doc_ident)); - bracketed.push_token_from_parser(TokenTree::Punct(equal)); - bracketed.push_token_from_parser(TokenTree::Literal(literal)); - let group = Group::new(Delimiter::Bracket, bracketed.build()); - let mut group = crate::Group::_new_fallback(group); - group.set_span(span); - trees.push_token_from_parser(TokenTree::Group(group)); - - Ok((rest, ())) -} - -fn doc_comment_contents(input: Cursor) -> PResult<(&str, bool)> { - if input.starts_with("//!") { - let input = input.advance(3); - let (input, s) = take_until_newline_or_eof(input); - Ok((input, (s, true))) - } else if input.starts_with("/*!") { - let (input, s) = block_comment(input)?; - Ok((input, (&s[3..s.len() - 2], true))) - } else if input.starts_with("///") { - let input = input.advance(3); - if input.starts_with_char('/') { - return Err(Reject); - } - let (input, s) = take_until_newline_or_eof(input); - Ok((input, (s, false))) - } else if input.starts_with("/**") && !input.rest[3..].starts_with('*') { - let (input, s) = block_comment(input)?; - Ok((input, (&s[3..s.len() - 2], false))) - } else { - Err(Reject) - } -} - -fn take_until_newline_or_eof(input: Cursor) -> (Cursor, &str) { - let chars = input.char_indices(); - - for (i, ch) in chars { - if ch == '\n' { - return (input.advance(i), &input.rest[..i]); - } else if ch == '\r' && input.rest[i + 1..].starts_with('\n') { - return (input.advance(i + 1), &input.rest[..i]); - } - } - - (input.advance(input.len()), input.rest) -} diff --git a/vendor/proc-macro2/src/probe.rs b/vendor/proc-macro2/src/probe.rs deleted file mode 100644 index b67f52036218de..00000000000000 --- a/vendor/proc-macro2/src/probe.rs +++ /dev/null @@ -1,10 +0,0 @@ -#![allow(dead_code)] - -#[cfg(proc_macro_span)] -pub(crate) mod proc_macro_span; - -#[cfg(proc_macro_span_file)] -pub(crate) mod proc_macro_span_file; - -#[cfg(proc_macro_span_location)] -pub(crate) mod proc_macro_span_location; diff --git a/vendor/proc-macro2/src/probe/proc_macro_span.rs b/vendor/proc-macro2/src/probe/proc_macro_span.rs deleted file mode 100644 index 2d7d44e07708b7..00000000000000 --- a/vendor/proc-macro2/src/probe/proc_macro_span.rs +++ /dev/null @@ -1,51 +0,0 @@ -// This code exercises the surface area that we expect of Span's unstable API. -// If the current toolchain is able to compile it, then proc-macro2 is able to -// offer these APIs too. - -#![cfg_attr(procmacro2_build_probe, feature(proc_macro_span))] - -extern crate proc_macro; - -use core::ops::{Range, RangeBounds}; -use proc_macro::{Literal, Span}; -use std::path::PathBuf; - -pub fn byte_range(this: &Span) -> Range { - this.byte_range() -} - -pub fn start(this: &Span) -> Span { - this.start() -} - -pub fn end(this: &Span) -> Span { - this.end() -} - -pub fn line(this: &Span) -> usize { - this.line() -} - -pub fn column(this: &Span) -> usize { - this.column() -} - -pub fn file(this: &Span) -> String { - this.file() -} - -pub fn local_file(this: &Span) -> Option { - this.local_file() -} - -pub fn join(this: &Span, other: Span) -> Option { - this.join(other) -} - -pub fn subspan>(this: &Literal, range: R) -> Option { - this.subspan(range) -} - -// Include in sccache cache key. -#[cfg(procmacro2_build_probe)] -const _: Option<&str> = option_env!("RUSTC_BOOTSTRAP"); diff --git a/vendor/proc-macro2/src/probe/proc_macro_span_file.rs b/vendor/proc-macro2/src/probe/proc_macro_span_file.rs deleted file mode 100644 index 8b76bdf5007b91..00000000000000 --- a/vendor/proc-macro2/src/probe/proc_macro_span_file.rs +++ /dev/null @@ -1,14 +0,0 @@ -// The subset of Span's API stabilized in Rust 1.88. - -extern crate proc_macro; - -use proc_macro::Span; -use std::path::PathBuf; - -pub fn file(this: &Span) -> String { - this.file() -} - -pub fn local_file(this: &Span) -> Option { - this.local_file() -} diff --git a/vendor/proc-macro2/src/probe/proc_macro_span_location.rs b/vendor/proc-macro2/src/probe/proc_macro_span_location.rs deleted file mode 100644 index 79da34af54afea..00000000000000 --- a/vendor/proc-macro2/src/probe/proc_macro_span_location.rs +++ /dev/null @@ -1,21 +0,0 @@ -// The subset of Span's API stabilized in Rust 1.88. - -extern crate proc_macro; - -use proc_macro::Span; - -pub fn start(this: &Span) -> Span { - this.start() -} - -pub fn end(this: &Span) -> Span { - this.end() -} - -pub fn line(this: &Span) -> usize { - this.line() -} - -pub fn column(this: &Span) -> usize { - this.column() -} diff --git a/vendor/proc-macro2/src/rcvec.rs b/vendor/proc-macro2/src/rcvec.rs deleted file mode 100644 index 23edc77d597f91..00000000000000 --- a/vendor/proc-macro2/src/rcvec.rs +++ /dev/null @@ -1,146 +0,0 @@ -use alloc::rc::Rc; -use alloc::vec; -use core::mem; -use core::panic::RefUnwindSafe; -use core::slice; - -pub(crate) struct RcVec { - inner: Rc>, -} - -pub(crate) struct RcVecBuilder { - inner: Vec, -} - -pub(crate) struct RcVecMut<'a, T> { - inner: &'a mut Vec, -} - -#[derive(Clone)] -pub(crate) struct RcVecIntoIter { - inner: vec::IntoIter, -} - -impl RcVec { - pub(crate) fn is_empty(&self) -> bool { - self.inner.is_empty() - } - - pub(crate) fn len(&self) -> usize { - self.inner.len() - } - - pub(crate) fn iter(&self) -> slice::Iter { - self.inner.iter() - } - - pub(crate) fn make_mut(&mut self) -> RcVecMut - where - T: Clone, - { - RcVecMut { - inner: Rc::make_mut(&mut self.inner), - } - } - - pub(crate) fn get_mut(&mut self) -> Option> { - let inner = Rc::get_mut(&mut self.inner)?; - Some(RcVecMut { inner }) - } - - pub(crate) fn make_owned(mut self) -> RcVecBuilder - where - T: Clone, - { - let vec = if let Some(owned) = Rc::get_mut(&mut self.inner) { - mem::take(owned) - } else { - Vec::clone(&self.inner) - }; - RcVecBuilder { inner: vec } - } -} - -impl RcVecBuilder { - pub(crate) fn new() -> Self { - RcVecBuilder { inner: Vec::new() } - } - - pub(crate) fn with_capacity(cap: usize) -> Self { - RcVecBuilder { - inner: Vec::with_capacity(cap), - } - } - - pub(crate) fn push(&mut self, element: T) { - self.inner.push(element); - } - - pub(crate) fn extend(&mut self, iter: impl IntoIterator) { - self.inner.extend(iter); - } - - pub(crate) fn as_mut(&mut self) -> RcVecMut { - RcVecMut { - inner: &mut self.inner, - } - } - - pub(crate) fn build(self) -> RcVec { - RcVec { - inner: Rc::new(self.inner), - } - } -} - -impl<'a, T> RcVecMut<'a, T> { - pub(crate) fn push(&mut self, element: T) { - self.inner.push(element); - } - - pub(crate) fn extend(&mut self, iter: impl IntoIterator) { - self.inner.extend(iter); - } - - pub(crate) fn as_mut(&mut self) -> RcVecMut { - RcVecMut { inner: self.inner } - } - - pub(crate) fn take(self) -> RcVecBuilder { - let vec = mem::take(self.inner); - RcVecBuilder { inner: vec } - } -} - -impl Clone for RcVec { - fn clone(&self) -> Self { - RcVec { - inner: Rc::clone(&self.inner), - } - } -} - -impl IntoIterator for RcVecBuilder { - type Item = T; - type IntoIter = RcVecIntoIter; - - fn into_iter(self) -> Self::IntoIter { - RcVecIntoIter { - inner: self.inner.into_iter(), - } - } -} - -impl Iterator for RcVecIntoIter { - type Item = T; - - fn next(&mut self) -> Option { - self.inner.next() - } - - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} - -impl RefUnwindSafe for RcVec where T: RefUnwindSafe {} diff --git a/vendor/proc-macro2/src/rustc_literal_escaper.rs b/vendor/proc-macro2/src/rustc_literal_escaper.rs deleted file mode 100644 index 8233e5d6565db8..00000000000000 --- a/vendor/proc-macro2/src/rustc_literal_escaper.rs +++ /dev/null @@ -1,701 +0,0 @@ -// Vendored from rustc-literal-escaper v0.0.5 -// https://github.com/rust-lang/literal-escaper/tree/v0.0.5 - -//! Utilities for validating (raw) string, char, and byte literals and -//! turning escape sequences into the values they represent. - -use crate::num::NonZeroChar; -use std::ffi::CStr; -use std::num::NonZeroU8; -use std::ops::Range; -use std::str::Chars; - -/// Errors and warnings that can occur during string, char, and byte unescaping. -/// -/// Mostly relating to malformed escape sequences, but also a few other problems. -#[derive(Debug, PartialEq, Eq)] -pub enum EscapeError { - /// Expected 1 char, but 0 were found. - ZeroChars, - /// Expected 1 char, but more than 1 were found. - MoreThanOneChar, - - /// Escaped '\' character without continuation. - LoneSlash, - /// Invalid escape character (e.g. '\z'). - InvalidEscape, - /// Raw '\r' encountered. - BareCarriageReturn, - /// Raw '\r' encountered in raw string. - BareCarriageReturnInRawString, - /// Unescaped character that was expected to be escaped (e.g. raw '\t'). - EscapeOnlyChar, - - /// Numeric character escape is too short (e.g. '\x1'). - TooShortHexEscape, - /// Invalid character in numeric escape (e.g. '\xz') - InvalidCharInHexEscape, - /// Character code in numeric escape is non-ascii (e.g. '\xFF'). - OutOfRangeHexEscape, - - /// '\u' not followed by '{'. - NoBraceInUnicodeEscape, - /// Non-hexadecimal value in '\u{..}'. - InvalidCharInUnicodeEscape, - /// '\u{}' - EmptyUnicodeEscape, - /// No closing brace in '\u{..}', e.g. '\u{12'. - UnclosedUnicodeEscape, - /// '\u{_12}' - LeadingUnderscoreUnicodeEscape, - /// More than 6 characters in '\u{..}', e.g. '\u{10FFFF_FF}' - OverlongUnicodeEscape, - /// Invalid in-bound unicode character code, e.g. '\u{DFFF}'. - LoneSurrogateUnicodeEscape, - /// Out of bounds unicode character code, e.g. '\u{FFFFFF}'. - OutOfRangeUnicodeEscape, - - /// Unicode escape code in byte literal. - UnicodeEscapeInByte, - /// Non-ascii character in byte literal, byte string literal, or raw byte string literal. - NonAsciiCharInByte, - - /// `\0` in a C string literal. - NulInCStr, - - /// After a line ending with '\', the next line contains whitespace - /// characters that are not skipped. - UnskippedWhitespaceWarning, - - /// After a line ending with '\', multiple lines are skipped. - MultipleSkippedLinesWarning, -} - -impl EscapeError { - /// Returns true for actual errors, as opposed to warnings. - pub fn is_fatal(&self) -> bool { - !matches!( - self, - EscapeError::UnskippedWhitespaceWarning | EscapeError::MultipleSkippedLinesWarning - ) - } -} - -/// Check a raw string literal for validity -/// -/// Takes the contents of a raw string literal (without quotes) -/// and produces a sequence of characters or errors, -/// which are returned by invoking `callback`. -/// NOTE: Does no escaping, but produces errors for bare carriage return ('\r'). -pub fn check_raw_str(src: &str, callback: impl FnMut(Range, Result)) { - str::check_raw(src, callback); -} - -/// Check a raw byte string literal for validity -/// -/// Takes the contents of a raw byte string literal (without quotes) -/// and produces a sequence of bytes or errors, -/// which are returned by invoking `callback`. -/// NOTE: Does no escaping, but produces errors for bare carriage return ('\r'). -pub fn check_raw_byte_str(src: &str, callback: impl FnMut(Range, Result)) { - <[u8]>::check_raw(src, callback); -} - -/// Check a raw C string literal for validity -/// -/// Takes the contents of a raw C string literal (without quotes) -/// and produces a sequence of characters or errors, -/// which are returned by invoking `callback`. -/// NOTE: Does no escaping, but produces errors for bare carriage return ('\r'). -pub fn check_raw_c_str( - src: &str, - callback: impl FnMut(Range, Result), -) { - CStr::check_raw(src, callback); -} - -/// Trait for checking raw string literals for validity -trait CheckRaw { - /// Unit type of the implementing string type (`char` for string, `u8` for byte string) - type RawUnit; - - /// Converts chars to the unit type of the literal type - fn char2raw_unit(c: char) -> Result; - - /// Takes the contents of a raw literal (without quotes) - /// and produces a sequence of `Result` - /// which are returned via `callback`. - /// - /// NOTE: Does no escaping, but produces errors for bare carriage return ('\r'). - fn check_raw( - src: &str, - mut callback: impl FnMut(Range, Result), - ) { - let mut chars = src.chars(); - while let Some(c) = chars.next() { - let start = src.len() - chars.as_str().len() - c.len_utf8(); - let res = match c { - '\r' => Err(EscapeError::BareCarriageReturnInRawString), - _ => Self::char2raw_unit(c), - }; - let end = src.len() - chars.as_str().len(); - callback(start..end, res); - } - - // Unfortunately, it is a bit unclear whether the following equivalent code is slower or faster: bug 141855 - // src.char_indices().for_each(|(pos, c)| { - // callback( - // pos..pos + c.len_utf8(), - // if c == '\r' { - // Err(EscapeError::BareCarriageReturnInRawString) - // } else { - // Self::char2raw_unit(c) - // }, - // ); - // }); - } -} - -impl CheckRaw for str { - type RawUnit = char; - - #[inline] - fn char2raw_unit(c: char) -> Result { - Ok(c) - } -} - -impl CheckRaw for [u8] { - type RawUnit = u8; - - #[inline] - fn char2raw_unit(c: char) -> Result { - char2byte(c) - } -} - -/// Turn an ascii char into a byte -#[inline] -fn char2byte(c: char) -> Result { - // do NOT do: c.try_into().ok_or(EscapeError::NonAsciiCharInByte) - if c.is_ascii() { - Ok(c as u8) - } else { - Err(EscapeError::NonAsciiCharInByte) - } -} - -impl CheckRaw for CStr { - type RawUnit = NonZeroChar; - - #[inline] - fn char2raw_unit(c: char) -> Result { - NonZeroChar::new(c).ok_or(EscapeError::NulInCStr) - } -} - -/// Unescape a char literal -/// -/// Takes the contents of a char literal (without quotes), -/// and returns an unescaped char or an error. -#[inline] -pub fn unescape_char(src: &str) -> Result { - str::unescape_single(&mut src.chars()) -} - -/// Unescape a byte literal -/// -/// Takes the contents of a byte literal (without quotes), -/// and returns an unescaped byte or an error. -#[inline] -pub fn unescape_byte(src: &str) -> Result { - <[u8]>::unescape_single(&mut src.chars()) -} - -/// Unescape a string literal -/// -/// Takes the contents of a string literal (without quotes) -/// and produces a sequence of escaped characters or errors, -/// which are returned by invoking `callback`. -pub fn unescape_str(src: &str, callback: impl FnMut(Range, Result)) { - str::unescape(src, callback) -} - -/// Unescape a byte string literal -/// -/// Takes the contents of a byte string literal (without quotes) -/// and produces a sequence of escaped bytes or errors, -/// which are returned by invoking `callback`. -pub fn unescape_byte_str(src: &str, callback: impl FnMut(Range, Result)) { - <[u8]>::unescape(src, callback) -} - -/// Unescape a C string literal -/// -/// Takes the contents of a C string literal (without quotes) -/// and produces a sequence of escaped MixedUnits or errors, -/// which are returned by invoking `callback`. -pub fn unescape_c_str( - src: &str, - callback: impl FnMut(Range, Result), -) { - CStr::unescape(src, callback) -} - -/// Enum representing either a char or a byte -/// -/// Used for mixed utf8 string literals, i.e. those that allow both unicode -/// chars and high bytes. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum MixedUnit { - /// Used for ASCII chars (written directly or via `\x00`..`\x7f` escapes) - /// and Unicode chars (written directly or via `\u` escapes). - /// - /// For example, if '¥' appears in a string it is represented here as - /// `MixedUnit::Char('¥')`, and it will be appended to the relevant byte - /// string as the two-byte UTF-8 sequence `[0xc2, 0xa5]` - Char(NonZeroChar), - - /// Used for high bytes (`\x80`..`\xff`). - /// - /// For example, if `\xa5` appears in a string it is represented here as - /// `MixedUnit::HighByte(0xa5)`, and it will be appended to the relevant - /// byte string as the single byte `0xa5`. - HighByte(NonZeroU8), -} - -impl From for MixedUnit { - #[inline] - fn from(c: NonZeroChar) -> Self { - MixedUnit::Char(c) - } -} - -impl From for MixedUnit { - #[inline] - fn from(byte: NonZeroU8) -> Self { - if byte.get().is_ascii() { - MixedUnit::Char(NonZeroChar::new(byte.get() as char).unwrap()) - } else { - MixedUnit::HighByte(byte) - } - } -} - -impl TryFrom for MixedUnit { - type Error = EscapeError; - - #[inline] - fn try_from(c: char) -> Result { - NonZeroChar::new(c) - .map(MixedUnit::Char) - .ok_or(EscapeError::NulInCStr) - } -} - -impl TryFrom for MixedUnit { - type Error = EscapeError; - - #[inline] - fn try_from(byte: u8) -> Result { - NonZeroU8::new(byte) - .map(From::from) - .ok_or(EscapeError::NulInCStr) - } -} - -/// Trait for unescaping escape sequences in strings -trait Unescape { - /// Unit type of the implementing string type (`char` for string, `u8` for byte string) - type Unit; - - /// Result of unescaping the zero char ('\0') - const ZERO_RESULT: Result; - - /// Converts non-zero bytes to the unit type - fn nonzero_byte2unit(b: NonZeroU8) -> Self::Unit; - - /// Converts chars to the unit type - fn char2unit(c: char) -> Result; - - /// Converts the byte of a hex escape to the unit type - fn hex2unit(b: u8) -> Result; - - /// Converts the result of a unicode escape to the unit type - fn unicode2unit(r: Result) -> Result; - - /// Unescape a single unit (single quote syntax) - fn unescape_single(chars: &mut Chars<'_>) -> Result { - let res = match chars.next().ok_or(EscapeError::ZeroChars)? { - '\\' => Self::unescape_1(chars), - '\n' | '\t' | '\'' => Err(EscapeError::EscapeOnlyChar), - '\r' => Err(EscapeError::BareCarriageReturn), - c => Self::char2unit(c), - }?; - if chars.next().is_some() { - return Err(EscapeError::MoreThanOneChar); - } - Ok(res) - } - - /// Unescape the first unit of a string (double quoted syntax) - fn unescape_1(chars: &mut Chars<'_>) -> Result { - // Previous character was '\\', unescape what follows. - let c = chars.next().ok_or(EscapeError::LoneSlash)?; - if c == '0' { - Self::ZERO_RESULT - } else { - simple_escape(c) - .map(|b| Self::nonzero_byte2unit(b)) - .or_else(|c| match c { - 'x' => Self::hex2unit(hex_escape(chars)?), - 'u' => Self::unicode2unit({ - let value = unicode_escape(chars)?; - if value > char::MAX as u32 { - Err(EscapeError::OutOfRangeUnicodeEscape) - } else { - char::from_u32(value).ok_or(EscapeError::LoneSurrogateUnicodeEscape) - } - }), - _ => Err(EscapeError::InvalidEscape), - }) - } - } - - /// Unescape a string literal - /// - /// Takes the contents of a raw string literal (without quotes) - /// and produces a sequence of `Result` - /// which are returned via `callback`. - fn unescape( - src: &str, - mut callback: impl FnMut(Range, Result), - ) { - let mut chars = src.chars(); - while let Some(c) = chars.next() { - let start = src.len() - chars.as_str().len() - c.len_utf8(); - let res = match c { - '\\' => { - if let Some(b'\n') = chars.as_str().as_bytes().first() { - let _ = chars.next(); - // skip whitespace for backslash newline, see [Rust language reference] - // (https://doc.rust-lang.org/reference/tokens.html#string-literals). - let callback_err = |range, err| callback(range, Err(err)); - skip_ascii_whitespace(&mut chars, start, callback_err); - continue; - } else { - Self::unescape_1(&mut chars) - } - } - '"' => Err(EscapeError::EscapeOnlyChar), - '\r' => Err(EscapeError::BareCarriageReturn), - c => Self::char2unit(c), - }; - let end = src.len() - chars.as_str().len(); - callback(start..end, res); - } - } -} - -/// Interpret a non-nul ASCII escape -/// -/// Parses the character of an ASCII escape (except nul) without the leading backslash. -#[inline] // single use in Unescape::unescape_1 -fn simple_escape(c: char) -> Result { - // Previous character was '\\', unescape what follows. - Ok(NonZeroU8::new(match c { - '"' => b'"', - 'n' => b'\n', - 'r' => b'\r', - 't' => b'\t', - '\\' => b'\\', - '\'' => b'\'', - _ => Err(c)?, - }) - .unwrap()) -} - -/// Interpret a hexadecimal escape -/// -/// Parses the two hexadecimal characters of a hexadecimal escape without the leading r"\x". -#[inline] // single use in Unescape::unescape_1 -fn hex_escape(chars: &mut impl Iterator) -> Result { - let hi = chars.next().ok_or(EscapeError::TooShortHexEscape)?; - let hi = hi.to_digit(16).ok_or(EscapeError::InvalidCharInHexEscape)?; - - let lo = chars.next().ok_or(EscapeError::TooShortHexEscape)?; - let lo = lo.to_digit(16).ok_or(EscapeError::InvalidCharInHexEscape)?; - - Ok((hi * 16 + lo) as u8) -} - -/// Interpret a unicode escape -/// -/// Parse the braces with hexadecimal characters (and underscores) part of a unicode escape. -/// This r"{...}" normally comes after r"\u" and cannot start with an underscore. -#[inline] // single use in Unescape::unescape_1 -fn unicode_escape(chars: &mut impl Iterator) -> Result { - if chars.next() != Some('{') { - return Err(EscapeError::NoBraceInUnicodeEscape); - } - - // First character must be a hexadecimal digit. - let mut value: u32 = match chars.next().ok_or(EscapeError::UnclosedUnicodeEscape)? { - '_' => return Err(EscapeError::LeadingUnderscoreUnicodeEscape), - '}' => return Err(EscapeError::EmptyUnicodeEscape), - c => c - .to_digit(16) - .ok_or(EscapeError::InvalidCharInUnicodeEscape)?, - }; - - // First character is valid, now parse the rest of the number - // and closing brace. - let mut n_digits = 1; - loop { - match chars.next() { - None => return Err(EscapeError::UnclosedUnicodeEscape), - Some('_') => continue, - Some('}') => { - // Incorrect syntax has higher priority for error reporting - // than unallowed value for a literal. - return if n_digits > 6 { - Err(EscapeError::OverlongUnicodeEscape) - } else { - Ok(value) - }; - } - Some(c) => { - let digit: u32 = c - .to_digit(16) - .ok_or(EscapeError::InvalidCharInUnicodeEscape)?; - n_digits += 1; - if n_digits > 6 { - // Stop updating value since we're sure that it's incorrect already. - continue; - } - value = value * 16 + digit; - } - }; - } -} - -/// Interpret a string continuation escape (https://doc.rust-lang.org/reference/expressions/literal-expr.html#string-continuation-escapes) -/// -/// Skip ASCII whitespace, except for the formfeed character -/// (see [this issue](https://github.com/rust-lang/rust/issues/136600)). -/// Warns on unescaped newline and following non-ASCII whitespace. -#[inline] // single use in Unescape::unescape -fn skip_ascii_whitespace( - chars: &mut Chars<'_>, - start: usize, - mut callback: impl FnMut(Range, EscapeError), -) { - let rest = chars.as_str(); - let first_non_space = rest - .bytes() - .position(|b| b != b' ' && b != b'\t' && b != b'\n' && b != b'\r') - .unwrap_or(rest.len()); - let (space, rest) = rest.split_at(first_non_space); - // backslash newline adds 2 bytes - let end = start + 2 + first_non_space; - if space.contains('\n') { - callback(start..end, EscapeError::MultipleSkippedLinesWarning); - } - *chars = rest.chars(); - if let Some(c) = chars.clone().next() { - if c.is_whitespace() { - // for error reporting, include the character that was not skipped in the span - callback( - start..end + c.len_utf8(), - EscapeError::UnskippedWhitespaceWarning, - ); - } - } -} - -impl Unescape for str { - type Unit = char; - - const ZERO_RESULT: Result = Ok('\0'); - - #[inline] - fn nonzero_byte2unit(b: NonZeroU8) -> Self::Unit { - b.get().into() - } - - #[inline] - fn char2unit(c: char) -> Result { - Ok(c) - } - - #[inline] - fn hex2unit(b: u8) -> Result { - if b.is_ascii() { - Ok(b as char) - } else { - Err(EscapeError::OutOfRangeHexEscape) - } - } - - #[inline] - fn unicode2unit(r: Result) -> Result { - r - } -} - -impl Unescape for [u8] { - type Unit = u8; - - const ZERO_RESULT: Result = Ok(b'\0'); - - #[inline] - fn nonzero_byte2unit(b: NonZeroU8) -> Self::Unit { - b.get() - } - - #[inline] - fn char2unit(c: char) -> Result { - char2byte(c) - } - - #[inline] - fn hex2unit(b: u8) -> Result { - Ok(b) - } - - #[inline] - fn unicode2unit(_r: Result) -> Result { - Err(EscapeError::UnicodeEscapeInByte) - } -} - -impl Unescape for CStr { - type Unit = MixedUnit; - - const ZERO_RESULT: Result = Err(EscapeError::NulInCStr); - - #[inline] - fn nonzero_byte2unit(b: NonZeroU8) -> Self::Unit { - b.into() - } - - #[inline] - fn char2unit(c: char) -> Result { - c.try_into() - } - - #[inline] - fn hex2unit(byte: u8) -> Result { - byte.try_into() - } - - #[inline] - fn unicode2unit(r: Result) -> Result { - Self::char2unit(r?) - } -} - -/// Enum of the different kinds of literal -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum Mode { - /// `'a'` - Char, - - /// `b'a'` - Byte, - - /// `"hello"` - Str, - /// `r"hello"` - RawStr, - - /// `b"hello"` - ByteStr, - /// `br"hello"` - RawByteStr, - - /// `c"hello"` - CStr, - /// `cr"hello"` - RawCStr, -} - -impl Mode { - pub fn in_double_quotes(self) -> bool { - match self { - Mode::Str - | Mode::RawStr - | Mode::ByteStr - | Mode::RawByteStr - | Mode::CStr - | Mode::RawCStr => true, - Mode::Char | Mode::Byte => false, - } - } - - pub fn prefix_noraw(self) -> &'static str { - match self { - Mode::Char | Mode::Str | Mode::RawStr => "", - Mode::Byte | Mode::ByteStr | Mode::RawByteStr => "b", - Mode::CStr | Mode::RawCStr => "c", - } - } -} - -/// Check a literal only for errors -/// -/// Takes the contents of a literal (without quotes) -/// and produces a sequence of only errors, -/// which are returned by invoking `error_callback`. -/// -/// NB Does not produce any output other than errors -pub fn check_for_errors( - src: &str, - mode: Mode, - mut error_callback: impl FnMut(Range, EscapeError), -) { - match mode { - Mode::Char => { - let mut chars = src.chars(); - if let Err(e) = str::unescape_single(&mut chars) { - error_callback(0..(src.len() - chars.as_str().len()), e); - } - } - Mode::Byte => { - let mut chars = src.chars(); - if let Err(e) = <[u8]>::unescape_single(&mut chars) { - error_callback(0..(src.len() - chars.as_str().len()), e); - } - } - Mode::Str => unescape_str(src, |range, res| { - if let Err(e) = res { - error_callback(range, e); - } - }), - Mode::ByteStr => unescape_byte_str(src, |range, res| { - if let Err(e) = res { - error_callback(range, e); - } - }), - Mode::CStr => unescape_c_str(src, |range, res| { - if let Err(e) = res { - error_callback(range, e); - } - }), - Mode::RawStr => check_raw_str(src, |range, res| { - if let Err(e) = res { - error_callback(range, e); - } - }), - Mode::RawByteStr => check_raw_byte_str(src, |range, res| { - if let Err(e) = res { - error_callback(range, e); - } - }), - Mode::RawCStr => check_raw_c_str(src, |range, res| { - if let Err(e) = res { - error_callback(range, e); - } - }), - } -} diff --git a/vendor/proc-macro2/src/wrapper.rs b/vendor/proc-macro2/src/wrapper.rs deleted file mode 100644 index 2e3eb5b4d04e28..00000000000000 --- a/vendor/proc-macro2/src/wrapper.rs +++ /dev/null @@ -1,984 +0,0 @@ -use crate::detection::inside_proc_macro; -use crate::fallback::{self, FromStr2 as _}; -#[cfg(span_locations)] -use crate::location::LineColumn; -#[cfg(proc_macro_span)] -use crate::probe::proc_macro_span; -#[cfg(all(span_locations, proc_macro_span_file))] -use crate::probe::proc_macro_span_file; -#[cfg(all(span_locations, proc_macro_span_location))] -use crate::probe::proc_macro_span_location; -use crate::{Delimiter, Punct, Spacing, TokenTree}; -use core::fmt::{self, Debug, Display}; -#[cfg(span_locations)] -use core::ops::Range; -use core::ops::RangeBounds; -use std::ffi::CStr; -#[cfg(span_locations)] -use std::path::PathBuf; - -#[derive(Clone)] -pub(crate) enum TokenStream { - Compiler(DeferredTokenStream), - Fallback(fallback::TokenStream), -} - -// Work around https://github.com/rust-lang/rust/issues/65080. -// In `impl Extend for TokenStream` which is used heavily by quote, -// we hold on to the appended tokens and do proc_macro::TokenStream::extend as -// late as possible to batch together consecutive uses of the Extend impl. -#[derive(Clone)] -pub(crate) struct DeferredTokenStream { - stream: proc_macro::TokenStream, - extra: Vec, -} - -pub(crate) enum LexError { - Compiler(proc_macro::LexError), - Fallback(fallback::LexError), - - // Rustc was supposed to return a LexError, but it panicked instead. - // https://github.com/rust-lang/rust/issues/58736 - CompilerPanic, -} - -#[cold] -fn mismatch(line: u32) -> ! { - #[cfg(procmacro2_backtrace)] - { - let backtrace = std::backtrace::Backtrace::force_capture(); - panic!("compiler/fallback mismatch L{}\n\n{}", line, backtrace) - } - #[cfg(not(procmacro2_backtrace))] - { - panic!("compiler/fallback mismatch L{}", line) - } -} - -impl DeferredTokenStream { - fn new(stream: proc_macro::TokenStream) -> Self { - DeferredTokenStream { - stream, - extra: Vec::new(), - } - } - - fn is_empty(&self) -> bool { - self.stream.is_empty() && self.extra.is_empty() - } - - fn evaluate_now(&mut self) { - // If-check provides a fast short circuit for the common case of `extra` - // being empty, which saves a round trip over the proc macro bridge. - // Improves macro expansion time in winrt by 6% in debug mode. - if !self.extra.is_empty() { - self.stream.extend(self.extra.drain(..)); - } - } - - fn into_token_stream(mut self) -> proc_macro::TokenStream { - self.evaluate_now(); - self.stream - } -} - -impl TokenStream { - pub(crate) fn new() -> Self { - if inside_proc_macro() { - TokenStream::Compiler(DeferredTokenStream::new(proc_macro::TokenStream::new())) - } else { - TokenStream::Fallback(fallback::TokenStream::new()) - } - } - - pub(crate) fn from_str_checked(src: &str) -> Result { - if inside_proc_macro() { - Ok(TokenStream::Compiler(DeferredTokenStream::new( - proc_macro::TokenStream::from_str_checked(src)?, - ))) - } else { - Ok(TokenStream::Fallback( - fallback::TokenStream::from_str_checked(src)?, - )) - } - } - - pub(crate) fn is_empty(&self) -> bool { - match self { - TokenStream::Compiler(tts) => tts.is_empty(), - TokenStream::Fallback(tts) => tts.is_empty(), - } - } - - fn unwrap_nightly(self) -> proc_macro::TokenStream { - match self { - TokenStream::Compiler(s) => s.into_token_stream(), - TokenStream::Fallback(_) => mismatch(line!()), - } - } - - fn unwrap_stable(self) -> fallback::TokenStream { - match self { - TokenStream::Compiler(_) => mismatch(line!()), - TokenStream::Fallback(s) => s, - } - } -} - -impl Display for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - TokenStream::Compiler(tts) => Display::fmt(&tts.clone().into_token_stream(), f), - TokenStream::Fallback(tts) => Display::fmt(tts, f), - } - } -} - -impl From for TokenStream { - fn from(inner: proc_macro::TokenStream) -> Self { - TokenStream::Compiler(DeferredTokenStream::new(inner)) - } -} - -impl From for proc_macro::TokenStream { - fn from(inner: TokenStream) -> Self { - match inner { - TokenStream::Compiler(inner) => inner.into_token_stream(), - TokenStream::Fallback(inner) => { - proc_macro::TokenStream::from_str_unchecked(&inner.to_string()) - } - } - } -} - -impl From for TokenStream { - fn from(inner: fallback::TokenStream) -> Self { - TokenStream::Fallback(inner) - } -} - -// Assumes inside_proc_macro(). -fn into_compiler_token(token: TokenTree) -> proc_macro::TokenTree { - match token { - TokenTree::Group(tt) => proc_macro::TokenTree::Group(tt.inner.unwrap_nightly()), - TokenTree::Punct(tt) => { - let spacing = match tt.spacing() { - Spacing::Joint => proc_macro::Spacing::Joint, - Spacing::Alone => proc_macro::Spacing::Alone, - }; - let mut punct = proc_macro::Punct::new(tt.as_char(), spacing); - punct.set_span(tt.span().inner.unwrap_nightly()); - proc_macro::TokenTree::Punct(punct) - } - TokenTree::Ident(tt) => proc_macro::TokenTree::Ident(tt.inner.unwrap_nightly()), - TokenTree::Literal(tt) => proc_macro::TokenTree::Literal(tt.inner.unwrap_nightly()), - } -} - -impl From for TokenStream { - fn from(token: TokenTree) -> Self { - if inside_proc_macro() { - TokenStream::Compiler(DeferredTokenStream::new(proc_macro::TokenStream::from( - into_compiler_token(token), - ))) - } else { - TokenStream::Fallback(fallback::TokenStream::from(token)) - } - } -} - -impl FromIterator for TokenStream { - fn from_iter>(trees: I) -> Self { - if inside_proc_macro() { - TokenStream::Compiler(DeferredTokenStream::new( - trees.into_iter().map(into_compiler_token).collect(), - )) - } else { - TokenStream::Fallback(trees.into_iter().collect()) - } - } -} - -impl FromIterator for TokenStream { - fn from_iter>(streams: I) -> Self { - let mut streams = streams.into_iter(); - match streams.next() { - Some(TokenStream::Compiler(mut first)) => { - first.evaluate_now(); - first.stream.extend(streams.map(|s| match s { - TokenStream::Compiler(s) => s.into_token_stream(), - TokenStream::Fallback(_) => mismatch(line!()), - })); - TokenStream::Compiler(first) - } - Some(TokenStream::Fallback(mut first)) => { - first.extend(streams.map(|s| match s { - TokenStream::Fallback(s) => s, - TokenStream::Compiler(_) => mismatch(line!()), - })); - TokenStream::Fallback(first) - } - None => TokenStream::new(), - } - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, stream: I) { - match self { - TokenStream::Compiler(tts) => { - // Here is the reason for DeferredTokenStream. - for token in stream { - tts.extra.push(into_compiler_token(token)); - } - } - TokenStream::Fallback(tts) => tts.extend(stream), - } - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { - match self { - TokenStream::Compiler(tts) => { - tts.evaluate_now(); - tts.stream - .extend(streams.into_iter().map(TokenStream::unwrap_nightly)); - } - TokenStream::Fallback(tts) => { - tts.extend(streams.into_iter().map(TokenStream::unwrap_stable)); - } - } - } -} - -impl Debug for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - TokenStream::Compiler(tts) => Debug::fmt(&tts.clone().into_token_stream(), f), - TokenStream::Fallback(tts) => Debug::fmt(tts, f), - } - } -} - -impl LexError { - pub(crate) fn span(&self) -> Span { - match self { - LexError::Compiler(_) | LexError::CompilerPanic => Span::call_site(), - LexError::Fallback(e) => Span::Fallback(e.span()), - } - } -} - -impl From for LexError { - fn from(e: proc_macro::LexError) -> Self { - LexError::Compiler(e) - } -} - -impl From for LexError { - fn from(e: fallback::LexError) -> Self { - LexError::Fallback(e) - } -} - -impl Debug for LexError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - LexError::Compiler(e) => Debug::fmt(e, f), - LexError::Fallback(e) => Debug::fmt(e, f), - LexError::CompilerPanic => { - let fallback = fallback::LexError::call_site(); - Debug::fmt(&fallback, f) - } - } - } -} - -impl Display for LexError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - LexError::Compiler(e) => Display::fmt(e, f), - LexError::Fallback(e) => Display::fmt(e, f), - LexError::CompilerPanic => { - let fallback = fallback::LexError::call_site(); - Display::fmt(&fallback, f) - } - } - } -} - -#[derive(Clone)] -pub(crate) enum TokenTreeIter { - Compiler(proc_macro::token_stream::IntoIter), - Fallback(fallback::TokenTreeIter), -} - -impl IntoIterator for TokenStream { - type Item = TokenTree; - type IntoIter = TokenTreeIter; - - fn into_iter(self) -> TokenTreeIter { - match self { - TokenStream::Compiler(tts) => { - TokenTreeIter::Compiler(tts.into_token_stream().into_iter()) - } - TokenStream::Fallback(tts) => TokenTreeIter::Fallback(tts.into_iter()), - } - } -} - -impl Iterator for TokenTreeIter { - type Item = TokenTree; - - fn next(&mut self) -> Option { - let token = match self { - TokenTreeIter::Compiler(iter) => iter.next()?, - TokenTreeIter::Fallback(iter) => return iter.next(), - }; - Some(match token { - proc_macro::TokenTree::Group(tt) => { - TokenTree::Group(crate::Group::_new(Group::Compiler(tt))) - } - proc_macro::TokenTree::Punct(tt) => { - let spacing = match tt.spacing() { - proc_macro::Spacing::Joint => Spacing::Joint, - proc_macro::Spacing::Alone => Spacing::Alone, - }; - let mut o = Punct::new(tt.as_char(), spacing); - o.set_span(crate::Span::_new(Span::Compiler(tt.span()))); - TokenTree::Punct(o) - } - proc_macro::TokenTree::Ident(s) => { - TokenTree::Ident(crate::Ident::_new(Ident::Compiler(s))) - } - proc_macro::TokenTree::Literal(l) => { - TokenTree::Literal(crate::Literal::_new(Literal::Compiler(l))) - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - match self { - TokenTreeIter::Compiler(tts) => tts.size_hint(), - TokenTreeIter::Fallback(tts) => tts.size_hint(), - } - } -} - -#[derive(Copy, Clone)] -pub(crate) enum Span { - Compiler(proc_macro::Span), - Fallback(fallback::Span), -} - -impl Span { - pub(crate) fn call_site() -> Self { - if inside_proc_macro() { - Span::Compiler(proc_macro::Span::call_site()) - } else { - Span::Fallback(fallback::Span::call_site()) - } - } - - pub(crate) fn mixed_site() -> Self { - if inside_proc_macro() { - Span::Compiler(proc_macro::Span::mixed_site()) - } else { - Span::Fallback(fallback::Span::mixed_site()) - } - } - - #[cfg(super_unstable)] - pub(crate) fn def_site() -> Self { - if inside_proc_macro() { - Span::Compiler(proc_macro::Span::def_site()) - } else { - Span::Fallback(fallback::Span::def_site()) - } - } - - pub(crate) fn resolved_at(&self, other: Span) -> Span { - match (self, other) { - (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.resolved_at(b)), - (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.resolved_at(b)), - (Span::Compiler(_), Span::Fallback(_)) => mismatch(line!()), - (Span::Fallback(_), Span::Compiler(_)) => mismatch(line!()), - } - } - - pub(crate) fn located_at(&self, other: Span) -> Span { - match (self, other) { - (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.located_at(b)), - (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.located_at(b)), - (Span::Compiler(_), Span::Fallback(_)) => mismatch(line!()), - (Span::Fallback(_), Span::Compiler(_)) => mismatch(line!()), - } - } - - pub(crate) fn unwrap(self) -> proc_macro::Span { - match self { - Span::Compiler(s) => s, - Span::Fallback(_) => panic!("proc_macro::Span is only available in procedural macros"), - } - } - - #[cfg(span_locations)] - pub(crate) fn byte_range(&self) -> Range { - match self { - #[cfg(proc_macro_span)] - Span::Compiler(s) => proc_macro_span::byte_range(s), - #[cfg(not(proc_macro_span))] - Span::Compiler(_) => 0..0, - Span::Fallback(s) => s.byte_range(), - } - } - - #[cfg(span_locations)] - pub(crate) fn start(&self) -> LineColumn { - match self { - #[cfg(proc_macro_span_location)] - Span::Compiler(s) => LineColumn { - line: proc_macro_span_location::line(s), - column: proc_macro_span_location::column(s).saturating_sub(1), - }, - #[cfg(not(proc_macro_span_location))] - Span::Compiler(_) => LineColumn { line: 0, column: 0 }, - Span::Fallback(s) => s.start(), - } - } - - #[cfg(span_locations)] - pub(crate) fn end(&self) -> LineColumn { - match self { - #[cfg(proc_macro_span_location)] - Span::Compiler(s) => { - let end = proc_macro_span_location::end(s); - LineColumn { - line: proc_macro_span_location::line(&end), - column: proc_macro_span_location::column(&end).saturating_sub(1), - } - } - #[cfg(not(proc_macro_span_location))] - Span::Compiler(_) => LineColumn { line: 0, column: 0 }, - Span::Fallback(s) => s.end(), - } - } - - #[cfg(span_locations)] - pub(crate) fn file(&self) -> String { - match self { - #[cfg(proc_macro_span_file)] - Span::Compiler(s) => proc_macro_span_file::file(s), - #[cfg(not(proc_macro_span_file))] - Span::Compiler(_) => "".to_owned(), - Span::Fallback(s) => s.file(), - } - } - - #[cfg(span_locations)] - pub(crate) fn local_file(&self) -> Option { - match self { - #[cfg(proc_macro_span_file)] - Span::Compiler(s) => proc_macro_span_file::local_file(s), - #[cfg(not(proc_macro_span_file))] - Span::Compiler(_) => None, - Span::Fallback(s) => s.local_file(), - } - } - - pub(crate) fn join(&self, other: Span) -> Option { - let ret = match (self, other) { - #[cfg(proc_macro_span)] - (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(proc_macro_span::join(a, b)?), - (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.join(b)?), - _ => return None, - }; - Some(ret) - } - - #[cfg(super_unstable)] - pub(crate) fn eq(&self, other: &Span) -> bool { - match (self, other) { - (Span::Compiler(a), Span::Compiler(b)) => a.eq(b), - (Span::Fallback(a), Span::Fallback(b)) => a.eq(b), - _ => false, - } - } - - pub(crate) fn source_text(&self) -> Option { - match self { - #[cfg(not(no_source_text))] - Span::Compiler(s) => s.source_text(), - #[cfg(no_source_text)] - Span::Compiler(_) => None, - Span::Fallback(s) => s.source_text(), - } - } - - fn unwrap_nightly(self) -> proc_macro::Span { - match self { - Span::Compiler(s) => s, - Span::Fallback(_) => mismatch(line!()), - } - } -} - -impl From for crate::Span { - fn from(proc_span: proc_macro::Span) -> Self { - crate::Span::_new(Span::Compiler(proc_span)) - } -} - -impl From for Span { - fn from(inner: fallback::Span) -> Self { - Span::Fallback(inner) - } -} - -impl Debug for Span { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Span::Compiler(s) => Debug::fmt(s, f), - Span::Fallback(s) => Debug::fmt(s, f), - } - } -} - -pub(crate) fn debug_span_field_if_nontrivial(debug: &mut fmt::DebugStruct, span: Span) { - match span { - Span::Compiler(s) => { - debug.field("span", &s); - } - Span::Fallback(s) => fallback::debug_span_field_if_nontrivial(debug, s), - } -} - -#[derive(Clone)] -pub(crate) enum Group { - Compiler(proc_macro::Group), - Fallback(fallback::Group), -} - -impl Group { - pub(crate) fn new(delimiter: Delimiter, stream: TokenStream) -> Self { - match stream { - TokenStream::Compiler(tts) => { - let delimiter = match delimiter { - Delimiter::Parenthesis => proc_macro::Delimiter::Parenthesis, - Delimiter::Bracket => proc_macro::Delimiter::Bracket, - Delimiter::Brace => proc_macro::Delimiter::Brace, - Delimiter::None => proc_macro::Delimiter::None, - }; - Group::Compiler(proc_macro::Group::new(delimiter, tts.into_token_stream())) - } - TokenStream::Fallback(stream) => { - Group::Fallback(fallback::Group::new(delimiter, stream)) - } - } - } - - pub(crate) fn delimiter(&self) -> Delimiter { - match self { - Group::Compiler(g) => match g.delimiter() { - proc_macro::Delimiter::Parenthesis => Delimiter::Parenthesis, - proc_macro::Delimiter::Bracket => Delimiter::Bracket, - proc_macro::Delimiter::Brace => Delimiter::Brace, - proc_macro::Delimiter::None => Delimiter::None, - }, - Group::Fallback(g) => g.delimiter(), - } - } - - pub(crate) fn stream(&self) -> TokenStream { - match self { - Group::Compiler(g) => TokenStream::Compiler(DeferredTokenStream::new(g.stream())), - Group::Fallback(g) => TokenStream::Fallback(g.stream()), - } - } - - pub(crate) fn span(&self) -> Span { - match self { - Group::Compiler(g) => Span::Compiler(g.span()), - Group::Fallback(g) => Span::Fallback(g.span()), - } - } - - pub(crate) fn span_open(&self) -> Span { - match self { - Group::Compiler(g) => Span::Compiler(g.span_open()), - Group::Fallback(g) => Span::Fallback(g.span_open()), - } - } - - pub(crate) fn span_close(&self) -> Span { - match self { - Group::Compiler(g) => Span::Compiler(g.span_close()), - Group::Fallback(g) => Span::Fallback(g.span_close()), - } - } - - pub(crate) fn set_span(&mut self, span: Span) { - match (self, span) { - (Group::Compiler(g), Span::Compiler(s)) => g.set_span(s), - (Group::Fallback(g), Span::Fallback(s)) => g.set_span(s), - (Group::Compiler(_), Span::Fallback(_)) => mismatch(line!()), - (Group::Fallback(_), Span::Compiler(_)) => mismatch(line!()), - } - } - - fn unwrap_nightly(self) -> proc_macro::Group { - match self { - Group::Compiler(g) => g, - Group::Fallback(_) => mismatch(line!()), - } - } -} - -impl From for Group { - fn from(g: fallback::Group) -> Self { - Group::Fallback(g) - } -} - -impl Display for Group { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match self { - Group::Compiler(group) => Display::fmt(group, formatter), - Group::Fallback(group) => Display::fmt(group, formatter), - } - } -} - -impl Debug for Group { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match self { - Group::Compiler(group) => Debug::fmt(group, formatter), - Group::Fallback(group) => Debug::fmt(group, formatter), - } - } -} - -#[derive(Clone)] -pub(crate) enum Ident { - Compiler(proc_macro::Ident), - Fallback(fallback::Ident), -} - -impl Ident { - #[track_caller] - pub(crate) fn new_checked(string: &str, span: Span) -> Self { - match span { - Span::Compiler(s) => Ident::Compiler(proc_macro::Ident::new(string, s)), - Span::Fallback(s) => Ident::Fallback(fallback::Ident::new_checked(string, s)), - } - } - - #[track_caller] - pub(crate) fn new_raw_checked(string: &str, span: Span) -> Self { - match span { - Span::Compiler(s) => Ident::Compiler(proc_macro::Ident::new_raw(string, s)), - Span::Fallback(s) => Ident::Fallback(fallback::Ident::new_raw_checked(string, s)), - } - } - - pub(crate) fn span(&self) -> Span { - match self { - Ident::Compiler(t) => Span::Compiler(t.span()), - Ident::Fallback(t) => Span::Fallback(t.span()), - } - } - - pub(crate) fn set_span(&mut self, span: Span) { - match (self, span) { - (Ident::Compiler(t), Span::Compiler(s)) => t.set_span(s), - (Ident::Fallback(t), Span::Fallback(s)) => t.set_span(s), - (Ident::Compiler(_), Span::Fallback(_)) => mismatch(line!()), - (Ident::Fallback(_), Span::Compiler(_)) => mismatch(line!()), - } - } - - fn unwrap_nightly(self) -> proc_macro::Ident { - match self { - Ident::Compiler(s) => s, - Ident::Fallback(_) => mismatch(line!()), - } - } -} - -impl From for Ident { - fn from(inner: fallback::Ident) -> Self { - Ident::Fallback(inner) - } -} - -impl PartialEq for Ident { - fn eq(&self, other: &Ident) -> bool { - match (self, other) { - (Ident::Compiler(t), Ident::Compiler(o)) => t.to_string() == o.to_string(), - (Ident::Fallback(t), Ident::Fallback(o)) => t == o, - (Ident::Compiler(_), Ident::Fallback(_)) => mismatch(line!()), - (Ident::Fallback(_), Ident::Compiler(_)) => mismatch(line!()), - } - } -} - -impl PartialEq for Ident -where - T: ?Sized + AsRef, -{ - fn eq(&self, other: &T) -> bool { - let other = other.as_ref(); - match self { - Ident::Compiler(t) => t.to_string() == other, - Ident::Fallback(t) => t == other, - } - } -} - -impl Display for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Ident::Compiler(t) => Display::fmt(t, f), - Ident::Fallback(t) => Display::fmt(t, f), - } - } -} - -impl Debug for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Ident::Compiler(t) => Debug::fmt(t, f), - Ident::Fallback(t) => Debug::fmt(t, f), - } - } -} - -#[derive(Clone)] -pub(crate) enum Literal { - Compiler(proc_macro::Literal), - Fallback(fallback::Literal), -} - -macro_rules! suffixed_numbers { - ($($name:ident => $kind:ident,)*) => ($( - pub(crate) fn $name(n: $kind) -> Literal { - if inside_proc_macro() { - Literal::Compiler(proc_macro::Literal::$name(n)) - } else { - Literal::Fallback(fallback::Literal::$name(n)) - } - } - )*) -} - -macro_rules! unsuffixed_integers { - ($($name:ident => $kind:ident,)*) => ($( - pub(crate) fn $name(n: $kind) -> Literal { - if inside_proc_macro() { - Literal::Compiler(proc_macro::Literal::$name(n)) - } else { - Literal::Fallback(fallback::Literal::$name(n)) - } - } - )*) -} - -impl Literal { - pub(crate) fn from_str_checked(repr: &str) -> Result { - if inside_proc_macro() { - let literal = proc_macro::Literal::from_str_checked(repr)?; - Ok(Literal::Compiler(literal)) - } else { - let literal = fallback::Literal::from_str_checked(repr)?; - Ok(Literal::Fallback(literal)) - } - } - - pub(crate) unsafe fn from_str_unchecked(repr: &str) -> Self { - if inside_proc_macro() { - Literal::Compiler(proc_macro::Literal::from_str_unchecked(repr)) - } else { - Literal::Fallback(unsafe { fallback::Literal::from_str_unchecked(repr) }) - } - } - - suffixed_numbers! { - u8_suffixed => u8, - u16_suffixed => u16, - u32_suffixed => u32, - u64_suffixed => u64, - u128_suffixed => u128, - usize_suffixed => usize, - i8_suffixed => i8, - i16_suffixed => i16, - i32_suffixed => i32, - i64_suffixed => i64, - i128_suffixed => i128, - isize_suffixed => isize, - - f32_suffixed => f32, - f64_suffixed => f64, - } - - unsuffixed_integers! { - u8_unsuffixed => u8, - u16_unsuffixed => u16, - u32_unsuffixed => u32, - u64_unsuffixed => u64, - u128_unsuffixed => u128, - usize_unsuffixed => usize, - i8_unsuffixed => i8, - i16_unsuffixed => i16, - i32_unsuffixed => i32, - i64_unsuffixed => i64, - i128_unsuffixed => i128, - isize_unsuffixed => isize, - } - - pub(crate) fn f32_unsuffixed(f: f32) -> Literal { - if inside_proc_macro() { - Literal::Compiler(proc_macro::Literal::f32_unsuffixed(f)) - } else { - Literal::Fallback(fallback::Literal::f32_unsuffixed(f)) - } - } - - pub(crate) fn f64_unsuffixed(f: f64) -> Literal { - if inside_proc_macro() { - Literal::Compiler(proc_macro::Literal::f64_unsuffixed(f)) - } else { - Literal::Fallback(fallback::Literal::f64_unsuffixed(f)) - } - } - - pub(crate) fn string(string: &str) -> Literal { - if inside_proc_macro() { - Literal::Compiler(proc_macro::Literal::string(string)) - } else { - Literal::Fallback(fallback::Literal::string(string)) - } - } - - pub(crate) fn character(ch: char) -> Literal { - if inside_proc_macro() { - Literal::Compiler(proc_macro::Literal::character(ch)) - } else { - Literal::Fallback(fallback::Literal::character(ch)) - } - } - - pub(crate) fn byte_character(byte: u8) -> Literal { - if inside_proc_macro() { - Literal::Compiler({ - #[cfg(not(no_literal_byte_character))] - { - proc_macro::Literal::byte_character(byte) - } - - #[cfg(no_literal_byte_character)] - { - let fallback = fallback::Literal::byte_character(byte); - proc_macro::Literal::from_str_unchecked(&fallback.repr) - } - }) - } else { - Literal::Fallback(fallback::Literal::byte_character(byte)) - } - } - - pub(crate) fn byte_string(bytes: &[u8]) -> Literal { - if inside_proc_macro() { - Literal::Compiler(proc_macro::Literal::byte_string(bytes)) - } else { - Literal::Fallback(fallback::Literal::byte_string(bytes)) - } - } - - pub(crate) fn c_string(string: &CStr) -> Literal { - if inside_proc_macro() { - Literal::Compiler({ - #[cfg(not(no_literal_c_string))] - { - proc_macro::Literal::c_string(string) - } - - #[cfg(no_literal_c_string)] - { - let fallback = fallback::Literal::c_string(string); - proc_macro::Literal::from_str_unchecked(&fallback.repr) - } - }) - } else { - Literal::Fallback(fallback::Literal::c_string(string)) - } - } - - pub(crate) fn span(&self) -> Span { - match self { - Literal::Compiler(lit) => Span::Compiler(lit.span()), - Literal::Fallback(lit) => Span::Fallback(lit.span()), - } - } - - pub(crate) fn set_span(&mut self, span: Span) { - match (self, span) { - (Literal::Compiler(lit), Span::Compiler(s)) => lit.set_span(s), - (Literal::Fallback(lit), Span::Fallback(s)) => lit.set_span(s), - (Literal::Compiler(_), Span::Fallback(_)) => mismatch(line!()), - (Literal::Fallback(_), Span::Compiler(_)) => mismatch(line!()), - } - } - - pub(crate) fn subspan>(&self, range: R) -> Option { - match self { - #[cfg(proc_macro_span)] - Literal::Compiler(lit) => proc_macro_span::subspan(lit, range).map(Span::Compiler), - #[cfg(not(proc_macro_span))] - Literal::Compiler(_lit) => None, - Literal::Fallback(lit) => lit.subspan(range).map(Span::Fallback), - } - } - - fn unwrap_nightly(self) -> proc_macro::Literal { - match self { - Literal::Compiler(s) => s, - Literal::Fallback(_) => mismatch(line!()), - } - } -} - -impl From for Literal { - fn from(s: fallback::Literal) -> Self { - Literal::Fallback(s) - } -} - -impl Display for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Literal::Compiler(t) => Display::fmt(t, f), - Literal::Fallback(t) => Display::fmt(t, f), - } - } -} - -impl Debug for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Literal::Compiler(t) => Debug::fmt(t, f), - Literal::Fallback(t) => Debug::fmt(t, f), - } - } -} - -#[cfg(span_locations)] -pub(crate) fn invalidate_current_thread_spans() { - if inside_proc_macro() { - panic!( - "proc_macro2::extra::invalidate_current_thread_spans is not available in procedural macros" - ); - } else { - crate::fallback::invalidate_current_thread_spans(); - } -} diff --git a/vendor/proc-macro2/tests/comments.rs b/vendor/proc-macro2/tests/comments.rs deleted file mode 100644 index 34951f7f396ef7..00000000000000 --- a/vendor/proc-macro2/tests/comments.rs +++ /dev/null @@ -1,105 +0,0 @@ -#![allow(clippy::assertions_on_result_states, clippy::uninlined_format_args)] - -use proc_macro2::{Delimiter, Literal, Spacing, TokenStream, TokenTree}; - -// #[doc = "..."] -> "..." -fn lit_of_outer_doc_comment(tokens: &TokenStream) -> Literal { - lit_of_doc_comment(tokens, false) -} - -// #![doc = "..."] -> "..." -fn lit_of_inner_doc_comment(tokens: &TokenStream) -> Literal { - lit_of_doc_comment(tokens, true) -} - -fn lit_of_doc_comment(tokens: &TokenStream, inner: bool) -> Literal { - let mut iter = tokens.clone().into_iter(); - match iter.next().unwrap() { - TokenTree::Punct(punct) => { - assert_eq!(punct.as_char(), '#'); - assert_eq!(punct.spacing(), Spacing::Alone); - } - _ => panic!("wrong token {:?}", tokens), - } - if inner { - match iter.next().unwrap() { - TokenTree::Punct(punct) => { - assert_eq!(punct.as_char(), '!'); - assert_eq!(punct.spacing(), Spacing::Alone); - } - _ => panic!("wrong token {:?}", tokens), - } - } - iter = match iter.next().unwrap() { - TokenTree::Group(group) => { - assert_eq!(group.delimiter(), Delimiter::Bracket); - assert!(iter.next().is_none(), "unexpected token {:?}", tokens); - group.stream().into_iter() - } - _ => panic!("wrong token {:?}", tokens), - }; - match iter.next().unwrap() { - TokenTree::Ident(ident) => assert_eq!(ident.to_string(), "doc"), - _ => panic!("wrong token {:?}", tokens), - } - match iter.next().unwrap() { - TokenTree::Punct(punct) => { - assert_eq!(punct.as_char(), '='); - assert_eq!(punct.spacing(), Spacing::Alone); - } - _ => panic!("wrong token {:?}", tokens), - } - match iter.next().unwrap() { - TokenTree::Literal(literal) => { - assert!(iter.next().is_none(), "unexpected token {:?}", tokens); - literal - } - _ => panic!("wrong token {:?}", tokens), - } -} - -#[test] -fn closed_immediately() { - let stream = "/**/".parse::().unwrap(); - let tokens = stream.into_iter().collect::>(); - assert!(tokens.is_empty(), "not empty -- {:?}", tokens); -} - -#[test] -fn incomplete() { - assert!("/*/".parse::().is_err()); -} - -#[test] -fn lit() { - let stream = "/// doc".parse::().unwrap(); - let lit = lit_of_outer_doc_comment(&stream); - assert_eq!(lit.to_string(), "\" doc\""); - - let stream = "//! doc".parse::().unwrap(); - let lit = lit_of_inner_doc_comment(&stream); - assert_eq!(lit.to_string(), "\" doc\""); - - let stream = "/** doc */".parse::().unwrap(); - let lit = lit_of_outer_doc_comment(&stream); - assert_eq!(lit.to_string(), "\" doc \""); - - let stream = "/*! doc */".parse::().unwrap(); - let lit = lit_of_inner_doc_comment(&stream); - assert_eq!(lit.to_string(), "\" doc \""); -} - -#[test] -fn carriage_return() { - let stream = "///\r\n".parse::().unwrap(); - let lit = lit_of_outer_doc_comment(&stream); - assert_eq!(lit.to_string(), "\"\""); - - let stream = "/**\r\n*/".parse::().unwrap(); - let lit = lit_of_outer_doc_comment(&stream); - assert_eq!(lit.to_string(), "\"\\r\\n\""); - - "///\r".parse::().unwrap_err(); - "///\r \n".parse::().unwrap_err(); - "/**\r \n*/".parse::().unwrap_err(); -} diff --git a/vendor/proc-macro2/tests/features.rs b/vendor/proc-macro2/tests/features.rs deleted file mode 100644 index ea1704d992f3be..00000000000000 --- a/vendor/proc-macro2/tests/features.rs +++ /dev/null @@ -1,10 +0,0 @@ -#![allow(clippy::assertions_on_constants, clippy::ignore_without_reason)] - -#[test] -#[ignore] -fn make_sure_no_proc_macro() { - assert!( - !cfg!(feature = "proc-macro"), - "still compiled with proc_macro?" - ); -} diff --git a/vendor/proc-macro2/tests/marker.rs b/vendor/proc-macro2/tests/marker.rs deleted file mode 100644 index af8932a1fef523..00000000000000 --- a/vendor/proc-macro2/tests/marker.rs +++ /dev/null @@ -1,97 +0,0 @@ -#![allow(clippy::extra_unused_type_parameters)] - -use proc_macro2::{ - Delimiter, Group, Ident, LexError, Literal, Punct, Spacing, Span, TokenStream, TokenTree, -}; - -macro_rules! assert_impl { - ($ty:ident is $($marker:ident) and +) => { - #[test] - #[allow(non_snake_case)] - fn $ty() { - fn assert_implemented() {} - assert_implemented::<$ty>(); - } - }; - - ($ty:ident is not $($marker:ident) or +) => { - #[test] - #[allow(non_snake_case)] - fn $ty() { - $( - { - // Implemented for types that implement $marker. - #[allow(dead_code)] - trait IsNotImplemented { - fn assert_not_implemented() {} - } - impl IsNotImplemented for T {} - - // Implemented for the type being tested. - trait IsImplemented { - fn assert_not_implemented() {} - } - impl IsImplemented for $ty {} - - // If $ty does not implement $marker, there is no ambiguity - // in the following trait method call. - <$ty>::assert_not_implemented(); - } - )+ - } - }; -} - -assert_impl!(Delimiter is Send and Sync); -assert_impl!(Spacing is Send and Sync); - -assert_impl!(Group is not Send or Sync); -assert_impl!(Ident is not Send or Sync); -assert_impl!(LexError is not Send or Sync); -assert_impl!(Literal is not Send or Sync); -assert_impl!(Punct is not Send or Sync); -assert_impl!(Span is not Send or Sync); -assert_impl!(TokenStream is not Send or Sync); -assert_impl!(TokenTree is not Send or Sync); - -#[cfg(procmacro2_semver_exempt)] -mod semver_exempt { - use proc_macro2::LineColumn; - - assert_impl!(LineColumn is Send and Sync); -} - -mod unwind_safe { - #[cfg(procmacro2_semver_exempt)] - use proc_macro2::LineColumn; - use proc_macro2::{ - Delimiter, Group, Ident, LexError, Literal, Punct, Spacing, Span, TokenStream, TokenTree, - }; - use std::panic::{RefUnwindSafe, UnwindSafe}; - - macro_rules! assert_unwind_safe { - ($($types:ident)*) => { - $( - assert_impl!($types is UnwindSafe and RefUnwindSafe); - )* - }; - } - - assert_unwind_safe! { - Delimiter - Group - Ident - LexError - Literal - Punct - Spacing - Span - TokenStream - TokenTree - } - - #[cfg(procmacro2_semver_exempt)] - assert_unwind_safe! { - LineColumn - } -} diff --git a/vendor/proc-macro2/tests/test.rs b/vendor/proc-macro2/tests/test.rs deleted file mode 100644 index a9272716647014..00000000000000 --- a/vendor/proc-macro2/tests/test.rs +++ /dev/null @@ -1,1094 +0,0 @@ -#![allow( - clippy::assertions_on_result_states, - clippy::items_after_statements, - clippy::needless_pass_by_value, - clippy::needless_raw_string_hashes, - clippy::non_ascii_literal, - clippy::octal_escapes, - clippy::uninlined_format_args -)] - -use proc_macro2::{Ident, Literal, Punct, Spacing, Span, TokenStream, TokenTree}; -use std::ffi::CStr; -use std::iter; -use std::str::{self, FromStr}; - -#[test] -fn idents() { - assert_eq!( - Ident::new("String", Span::call_site()).to_string(), - "String" - ); - assert_eq!(Ident::new("fn", Span::call_site()).to_string(), "fn"); - assert_eq!(Ident::new("_", Span::call_site()).to_string(), "_"); -} - -#[test] -fn raw_idents() { - assert_eq!( - Ident::new_raw("String", Span::call_site()).to_string(), - "r#String" - ); - assert_eq!(Ident::new_raw("fn", Span::call_site()).to_string(), "r#fn"); -} - -#[test] -#[should_panic(expected = "`r#_` cannot be a raw identifier")] -fn ident_raw_underscore() { - Ident::new_raw("_", Span::call_site()); -} - -#[test] -#[should_panic(expected = "`r#super` cannot be a raw identifier")] -fn ident_raw_reserved() { - Ident::new_raw("super", Span::call_site()); -} - -#[test] -#[should_panic(expected = "Ident is not allowed to be empty; use Option")] -fn ident_empty() { - Ident::new("", Span::call_site()); -} - -#[test] -#[should_panic(expected = "Ident cannot be a number; use Literal instead")] -fn ident_number() { - Ident::new("255", Span::call_site()); -} - -#[test] -#[should_panic(expected = "\"a#\" is not a valid Ident")] -fn ident_invalid() { - Ident::new("a#", Span::call_site()); -} - -#[test] -#[should_panic(expected = "not a valid Ident")] -fn raw_ident_empty() { - Ident::new("r#", Span::call_site()); -} - -#[test] -#[should_panic(expected = "not a valid Ident")] -fn raw_ident_number() { - Ident::new("r#255", Span::call_site()); -} - -#[test] -#[should_panic(expected = "\"r#a#\" is not a valid Ident")] -fn raw_ident_invalid() { - Ident::new("r#a#", Span::call_site()); -} - -#[test] -#[should_panic(expected = "not a valid Ident")] -fn lifetime_empty() { - Ident::new("'", Span::call_site()); -} - -#[test] -#[should_panic(expected = "not a valid Ident")] -fn lifetime_number() { - Ident::new("'255", Span::call_site()); -} - -#[test] -#[should_panic(expected = r#""'a#" is not a valid Ident"#)] -fn lifetime_invalid() { - Ident::new("'a#", Span::call_site()); -} - -#[test] -fn literal_string() { - #[track_caller] - fn assert(literal: Literal, expected: &str) { - assert_eq!(literal.to_string(), expected.trim()); - } - - assert(Literal::string(""), r#" "" "#); - assert(Literal::string("aA"), r#" "aA" "#); - assert(Literal::string("\t"), r#" "\t" "#); - assert(Literal::string("❤"), r#" "❤" "#); - assert(Literal::string("'"), r#" "'" "#); - assert(Literal::string("\""), r#" "\"" "#); - assert(Literal::string("\0"), r#" "\0" "#); - assert(Literal::string("\u{1}"), r#" "\u{1}" "#); - assert( - Literal::string("a\00b\07c\08d\0e\0"), - r#" "a\x000b\x007c\08d\0e\0" "#, - ); - - "\"\\\r\n x\"".parse::().unwrap(); - "\"\\\r\n \rx\"".parse::().unwrap_err(); -} - -#[test] -fn literal_raw_string() { - "r\"\r\n\"".parse::().unwrap(); - - fn raw_string_literal_with_hashes(n: usize) -> String { - let mut literal = String::new(); - literal.push('r'); - literal.extend(iter::repeat('#').take(n)); - literal.push('"'); - literal.push('"'); - literal.extend(iter::repeat('#').take(n)); - literal - } - - raw_string_literal_with_hashes(255) - .parse::() - .unwrap(); - - // https://github.com/rust-lang/rust/pull/95251 - raw_string_literal_with_hashes(256) - .parse::() - .unwrap_err(); -} - -#[cfg(procmacro2_semver_exempt)] -#[test] -fn literal_string_value() { - for string in ["", "...", "...\t...", "...\\...", "...\0...", "...\u{1}..."] { - assert_eq!(string, Literal::string(string).str_value().unwrap()); - assert_eq!( - string, - format!("r\"{string}\"") - .parse::() - .unwrap() - .str_value() - .unwrap(), - ); - assert_eq!( - string, - format!("r##\"{string}\"##") - .parse::() - .unwrap() - .str_value() - .unwrap(), - ); - } -} - -#[test] -fn literal_byte_character() { - #[track_caller] - fn assert(literal: Literal, expected: &str) { - assert_eq!(literal.to_string(), expected.trim()); - } - - assert(Literal::byte_character(b'a'), r#" b'a' "#); - assert(Literal::byte_character(b'\0'), r#" b'\0' "#); - assert(Literal::byte_character(b'\t'), r#" b'\t' "#); - assert(Literal::byte_character(b'\n'), r#" b'\n' "#); - assert(Literal::byte_character(b'\r'), r#" b'\r' "#); - assert(Literal::byte_character(b'\''), r#" b'\'' "#); - assert(Literal::byte_character(b'\\'), r#" b'\\' "#); - assert(Literal::byte_character(b'\x1f'), r#" b'\x1F' "#); - assert(Literal::byte_character(b'"'), r#" b'"' "#); -} - -#[test] -fn literal_byte_string() { - #[track_caller] - fn assert(literal: Literal, expected: &str) { - assert_eq!(literal.to_string(), expected.trim()); - } - - assert(Literal::byte_string(b""), r#" b"" "#); - assert(Literal::byte_string(b"\0"), r#" b"\0" "#); - assert(Literal::byte_string(b"\t"), r#" b"\t" "#); - assert(Literal::byte_string(b"\n"), r#" b"\n" "#); - assert(Literal::byte_string(b"\r"), r#" b"\r" "#); - assert(Literal::byte_string(b"\""), r#" b"\"" "#); - assert(Literal::byte_string(b"\\"), r#" b"\\" "#); - assert(Literal::byte_string(b"\x1f"), r#" b"\x1F" "#); - assert(Literal::byte_string(b"'"), r#" b"'" "#); - assert( - Literal::byte_string(b"a\00b\07c\08d\0e\0"), - r#" b"a\x000b\x007c\08d\0e\0" "#, - ); - - "b\"\\\r\n x\"".parse::().unwrap(); - "b\"\\\r\n \rx\"".parse::().unwrap_err(); - "b\"\\\r\n \u{a0}x\"".parse::().unwrap_err(); - "br\"\u{a0}\"".parse::().unwrap_err(); -} - -#[cfg(procmacro2_semver_exempt)] -#[test] -fn literal_byte_string_value() { - for bytestr in [ - &b""[..], - b"...", - b"...\t...", - b"...\\...", - b"...\0...", - b"...\xF0...", - ] { - assert_eq!( - bytestr, - Literal::byte_string(bytestr).byte_str_value().unwrap(), - ); - if let Ok(string) = str::from_utf8(bytestr) { - assert_eq!( - bytestr, - format!("br\"{string}\"") - .parse::() - .unwrap() - .byte_str_value() - .unwrap(), - ); - assert_eq!( - bytestr, - format!("br##\"{string}\"##") - .parse::() - .unwrap() - .byte_str_value() - .unwrap(), - ); - } - } -} - -#[test] -fn literal_c_string() { - #[track_caller] - fn assert(literal: Literal, expected: &str) { - assert_eq!(literal.to_string(), expected.trim()); - } - - assert(Literal::c_string(<&CStr>::default()), r#" c"" "#); - assert( - Literal::c_string(CStr::from_bytes_with_nul(b"aA\0").unwrap()), - r#" c"aA" "#, - ); - assert( - Literal::c_string(CStr::from_bytes_with_nul(b"aA\0").unwrap()), - r#" c"aA" "#, - ); - assert( - Literal::c_string(CStr::from_bytes_with_nul(b"\t\0").unwrap()), - r#" c"\t" "#, - ); - assert( - Literal::c_string(CStr::from_bytes_with_nul(b"\xE2\x9D\xA4\0").unwrap()), - r#" c"❤" "#, - ); - assert( - Literal::c_string(CStr::from_bytes_with_nul(b"'\0").unwrap()), - r#" c"'" "#, - ); - assert( - Literal::c_string(CStr::from_bytes_with_nul(b"\"\0").unwrap()), - r#" c"\"" "#, - ); - assert( - Literal::c_string(CStr::from_bytes_with_nul(b"\x7F\xFF\xFE\xCC\xB3\0").unwrap()), - r#" c"\u{7f}\xFF\xFE\u{333}" "#, - ); - - let strings = r###" - c"hello\x80我叫\u{1F980}" // from the RFC - cr"\" - cr##"Hello "world"!"## - c"\t\n\r\"\\" - "###; - - let mut tokens = strings.parse::().unwrap().into_iter(); - - for expected in &[ - r#"c"hello\x80我叫\u{1F980}""#, - r#"cr"\""#, - r###"cr##"Hello "world"!"##"###, - r#"c"\t\n\r\"\\""#, - ] { - match tokens.next().unwrap() { - TokenTree::Literal(literal) => { - assert_eq!(literal.to_string(), *expected); - } - unexpected => panic!("unexpected token: {:?}", unexpected), - } - } - - if let Some(unexpected) = tokens.next() { - panic!("unexpected token: {:?}", unexpected); - } - - for invalid in &[r#"c"\0""#, r#"c"\x00""#, r#"c"\u{0}""#, "c\"\0\""] { - if let Ok(unexpected) = invalid.parse::() { - panic!("unexpected token: {:?}", unexpected); - } - } -} - -#[cfg(procmacro2_semver_exempt)] -#[test] -fn literal_c_string_value() { - for cstr in [ - c"", - c"...", - c"...\t...", - c"...\\...", - c"...\u{1}...", - c"...\xF0...", - ] { - assert_eq!( - cstr.to_bytes_with_nul(), - Literal::c_string(cstr).cstr_value().unwrap(), - ); - if let Ok(string) = cstr.to_str() { - assert_eq!( - cstr.to_bytes_with_nul(), - format!("cr\"{string}\"") - .parse::() - .unwrap() - .cstr_value() - .unwrap(), - ); - assert_eq!( - cstr.to_bytes_with_nul(), - format!("cr##\"{string}\"##") - .parse::() - .unwrap() - .cstr_value() - .unwrap(), - ); - } - } -} - -#[test] -fn literal_character() { - #[track_caller] - fn assert(literal: Literal, expected: &str) { - assert_eq!(literal.to_string(), expected.trim()); - } - - assert(Literal::character('a'), r#" 'a' "#); - assert(Literal::character('\t'), r#" '\t' "#); - assert(Literal::character('❤'), r#" '❤' "#); - assert(Literal::character('\''), r#" '\'' "#); - assert(Literal::character('"'), r#" '"' "#); - assert(Literal::character('\0'), r#" '\0' "#); - assert(Literal::character('\u{1}'), r#" '\u{1}' "#); -} - -#[test] -fn literal_integer() { - #[track_caller] - fn assert(literal: Literal, expected: &str) { - assert_eq!(literal.to_string(), expected); - } - - assert(Literal::u8_suffixed(10), "10u8"); - assert(Literal::u16_suffixed(10), "10u16"); - assert(Literal::u32_suffixed(10), "10u32"); - assert(Literal::u64_suffixed(10), "10u64"); - assert(Literal::u128_suffixed(10), "10u128"); - assert(Literal::usize_suffixed(10), "10usize"); - - assert(Literal::i8_suffixed(10), "10i8"); - assert(Literal::i16_suffixed(10), "10i16"); - assert(Literal::i32_suffixed(10), "10i32"); - assert(Literal::i64_suffixed(10), "10i64"); - assert(Literal::i128_suffixed(10), "10i128"); - assert(Literal::isize_suffixed(10), "10isize"); - - assert(Literal::u8_unsuffixed(10), "10"); - assert(Literal::u16_unsuffixed(10), "10"); - assert(Literal::u32_unsuffixed(10), "10"); - assert(Literal::u64_unsuffixed(10), "10"); - assert(Literal::u128_unsuffixed(10), "10"); - assert(Literal::usize_unsuffixed(10), "10"); - - assert(Literal::i8_unsuffixed(10), "10"); - assert(Literal::i16_unsuffixed(10), "10"); - assert(Literal::i32_unsuffixed(10), "10"); - assert(Literal::i64_unsuffixed(10), "10"); - assert(Literal::i128_unsuffixed(10), "10"); - assert(Literal::isize_unsuffixed(10), "10"); - - assert(Literal::i32_suffixed(-10), "-10i32"); - assert(Literal::i32_unsuffixed(-10), "-10"); -} - -#[test] -fn literal_float() { - #[track_caller] - fn assert(literal: Literal, expected: &str) { - assert_eq!(literal.to_string(), expected); - } - - assert(Literal::f32_suffixed(10.0), "10f32"); - assert(Literal::f32_suffixed(-10.0), "-10f32"); - assert(Literal::f64_suffixed(10.0), "10f64"); - assert(Literal::f64_suffixed(-10.0), "-10f64"); - - assert(Literal::f32_unsuffixed(10.0), "10.0"); - assert(Literal::f32_unsuffixed(-10.0), "-10.0"); - assert(Literal::f64_unsuffixed(10.0), "10.0"); - assert(Literal::f64_unsuffixed(-10.0), "-10.0"); - - assert( - Literal::f64_unsuffixed(1e100), - "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0", - ); -} - -#[test] -fn literal_suffix() { - fn token_count(p: &str) -> usize { - p.parse::().unwrap().into_iter().count() - } - - assert_eq!(token_count("999u256"), 1); - assert_eq!(token_count("999r#u256"), 3); - assert_eq!(token_count("1."), 1); - assert_eq!(token_count("1.f32"), 3); - assert_eq!(token_count("1.0_0"), 1); - assert_eq!(token_count("1._0"), 3); - assert_eq!(token_count("1._m"), 3); - assert_eq!(token_count("\"\"s"), 1); - assert_eq!(token_count("r\"\"r"), 1); - assert_eq!(token_count("r#\"\"#r"), 1); - assert_eq!(token_count("b\"\"b"), 1); - assert_eq!(token_count("br\"\"br"), 1); - assert_eq!(token_count("br#\"\"#br"), 1); - assert_eq!(token_count("c\"\"c"), 1); - assert_eq!(token_count("cr\"\"cr"), 1); - assert_eq!(token_count("cr#\"\"#cr"), 1); - assert_eq!(token_count("'c'c"), 1); - assert_eq!(token_count("b'b'b"), 1); - assert_eq!(token_count("0E"), 1); - assert_eq!(token_count("0o0A"), 1); - assert_eq!(token_count("0E--0"), 4); - assert_eq!(token_count("0.0ECMA"), 1); -} - -#[test] -fn literal_iter_negative() { - let negative_literal = Literal::i32_suffixed(-3); - let tokens = TokenStream::from(TokenTree::Literal(negative_literal)); - let mut iter = tokens.into_iter(); - match iter.next().unwrap() { - TokenTree::Punct(punct) => { - assert_eq!(punct.as_char(), '-'); - assert_eq!(punct.spacing(), Spacing::Alone); - } - unexpected => panic!("unexpected token {:?}", unexpected), - } - match iter.next().unwrap() { - TokenTree::Literal(literal) => { - assert_eq!(literal.to_string(), "3i32"); - } - unexpected => panic!("unexpected token {:?}", unexpected), - } - assert!(iter.next().is_none()); -} - -#[test] -fn literal_parse() { - assert!("1".parse::().is_ok()); - assert!("-1".parse::().is_ok()); - assert!("-1u12".parse::().is_ok()); - assert!("1.0".parse::().is_ok()); - assert!("-1.0".parse::().is_ok()); - assert!("-1.0f12".parse::().is_ok()); - assert!("'a'".parse::().is_ok()); - assert!("\"\n\"".parse::().is_ok()); - assert!("0 1".parse::().is_err()); - assert!(" 0".parse::().is_err()); - assert!("0 ".parse::().is_err()); - assert!("/* comment */0".parse::().is_err()); - assert!("0/* comment */".parse::().is_err()); - assert!("0// comment".parse::().is_err()); - assert!("- 1".parse::().is_err()); - assert!("- 1.0".parse::().is_err()); - assert!("-\"\"".parse::().is_err()); -} - -#[test] -fn literal_span() { - let positive = "0.1".parse::().unwrap(); - let negative = "-0.1".parse::().unwrap(); - let subspan = positive.subspan(1..2); - - #[cfg(not(span_locations))] - { - let _ = negative; - assert!(subspan.is_none()); - } - - #[cfg(span_locations)] - { - assert_eq!(positive.span().start().column, 0); - assert_eq!(positive.span().end().column, 3); - assert_eq!(negative.span().start().column, 0); - assert_eq!(negative.span().end().column, 4); - assert_eq!(subspan.unwrap().source_text().unwrap(), "."); - } - - assert!(positive.subspan(1..4).is_none()); -} - -#[cfg(span_locations)] -#[test] -fn source_text() { - let input = " 𓀕 a z "; - let mut tokens = input - .parse::() - .unwrap() - .into_iter(); - - let first = tokens.next().unwrap(); - assert_eq!("𓀕", first.span().source_text().unwrap()); - - let second = tokens.next().unwrap(); - let third = tokens.next().unwrap(); - assert_eq!("z", third.span().source_text().unwrap()); - assert_eq!("a", second.span().source_text().unwrap()); -} - -#[test] -fn lifetimes() { - let mut tokens = "'a 'static 'struct 'r#gen 'r#prefix#lifetime" - .parse::() - .unwrap() - .into_iter(); - assert!(match tokens.next() { - Some(TokenTree::Punct(punct)) => { - punct.as_char() == '\'' && punct.spacing() == Spacing::Joint - } - _ => false, - }); - assert!(match tokens.next() { - Some(TokenTree::Ident(ident)) => ident == "a", - _ => false, - }); - assert!(match tokens.next() { - Some(TokenTree::Punct(punct)) => { - punct.as_char() == '\'' && punct.spacing() == Spacing::Joint - } - _ => false, - }); - assert!(match tokens.next() { - Some(TokenTree::Ident(ident)) => ident == "static", - _ => false, - }); - assert!(match tokens.next() { - Some(TokenTree::Punct(punct)) => { - punct.as_char() == '\'' && punct.spacing() == Spacing::Joint - } - _ => false, - }); - assert!(match tokens.next() { - Some(TokenTree::Ident(ident)) => ident == "struct", - _ => false, - }); - assert!(match tokens.next() { - Some(TokenTree::Punct(punct)) => { - punct.as_char() == '\'' && punct.spacing() == Spacing::Joint - } - _ => false, - }); - assert!(match tokens.next() { - Some(TokenTree::Ident(ident)) => ident == "r#gen", - _ => false, - }); - assert!(match tokens.next() { - Some(TokenTree::Punct(punct)) => { - punct.as_char() == '\'' && punct.spacing() == Spacing::Joint - } - _ => false, - }); - assert!(match tokens.next() { - Some(TokenTree::Ident(ident)) => ident == "r#prefix", - _ => false, - }); - assert!(match tokens.next() { - Some(TokenTree::Punct(punct)) => { - punct.as_char() == '#' && punct.spacing() == Spacing::Alone - } - _ => false, - }); - assert!(match tokens.next() { - Some(TokenTree::Ident(ident)) => ident == "lifetime", - _ => false, - }); - - "' a".parse::().unwrap_err(); - "' r#gen".parse::().unwrap_err(); - "' prefix#lifetime".parse::().unwrap_err(); - "'prefix#lifetime".parse::().unwrap_err(); - "'aa'bb".parse::().unwrap_err(); - "'r#gen'a".parse::().unwrap_err(); -} - -#[test] -fn roundtrip() { - fn roundtrip(p: &str) { - println!("parse: {}", p); - let s = p.parse::().unwrap().to_string(); - println!("first: {}", s); - let s2 = s.parse::().unwrap().to_string(); - assert_eq!(s, s2); - } - roundtrip("a"); - roundtrip("<<"); - roundtrip("<<="); - roundtrip( - " - 1 - 1.0 - 1f32 - 2f64 - 1usize - 4isize - 4e10 - 1_000 - 1_0i32 - 8u8 - 9 - 0 - 0xffffffffffffffffffffffffffffffff - 1x - 1u80 - 1f320 - ", - ); - roundtrip("'a"); - roundtrip("'_"); - roundtrip("'static"); - roundtrip(r"'\u{10__FFFF}'"); - roundtrip("\"\\u{10_F0FF__}foo\\u{1_0_0_0__}\""); -} - -#[test] -fn fail() { - fn fail(p: &str) { - if let Ok(s) = p.parse::() { - panic!("should have failed to parse: {}\n{:#?}", p, s); - } - } - fail("' static"); - fail("r#1"); - fail("r#_"); - fail("\"\\u{0000000}\""); // overlong unicode escape (rust allows at most 6 hex digits) - fail("\"\\u{999999}\""); // outside of valid range of char - fail("\"\\u{_0}\""); // leading underscore - fail("\"\\u{}\""); // empty - fail("b\"\r\""); // bare carriage return in byte string - fail("r\"\r\""); // bare carriage return in raw string - fail("\"\\\r \""); // backslash carriage return - fail("'aa'aa"); - fail("br##\"\"#"); - fail("cr##\"\"#"); - fail("\"\\\n\u{85}\r\""); -} - -#[cfg(span_locations)] -#[test] -fn span_test() { - check_spans( - "\ -/// This is a document comment -testing 123 -{ - testing 234 -}", - &[ - (1, 0, 1, 30), // # - (1, 0, 1, 30), // [ ... ] - (1, 0, 1, 30), // doc - (1, 0, 1, 30), // = - (1, 0, 1, 30), // "This is..." - (2, 0, 2, 7), // testing - (2, 8, 2, 11), // 123 - (3, 0, 5, 1), // { ... } - (4, 2, 4, 9), // testing - (4, 10, 4, 13), // 234 - ], - ); -} - -#[cfg(procmacro2_semver_exempt)] -#[test] -fn default_span() { - let start = Span::call_site().start(); - assert_eq!(start.line, 1); - assert_eq!(start.column, 0); - let end = Span::call_site().end(); - assert_eq!(end.line, 1); - assert_eq!(end.column, 0); - assert_eq!(Span::call_site().file(), ""); - assert!(Span::call_site().local_file().is_none()); -} - -#[cfg(procmacro2_semver_exempt)] -#[test] -fn span_join() { - let source1 = "aaa\nbbb" - .parse::() - .unwrap() - .into_iter() - .collect::>(); - let source2 = "ccc\nddd" - .parse::() - .unwrap() - .into_iter() - .collect::>(); - - assert!(source1[0].span().file() != source2[0].span().file()); - assert_eq!(source1[0].span().file(), source1[1].span().file()); - - let joined1 = source1[0].span().join(source1[1].span()); - let joined2 = source1[0].span().join(source2[0].span()); - assert!(joined1.is_some()); - assert!(joined2.is_none()); - - let start = joined1.unwrap().start(); - let end = joined1.unwrap().end(); - assert_eq!(start.line, 1); - assert_eq!(start.column, 0); - assert_eq!(end.line, 2); - assert_eq!(end.column, 3); - - assert_eq!(joined1.unwrap().file(), source1[0].span().file()); -} - -#[test] -fn no_panic() { - let s = str::from_utf8(b"b\'\xc2\x86 \x00\x00\x00^\"").unwrap(); - assert!(s.parse::().is_err()); -} - -#[test] -fn punct_before_comment() { - let mut tts = TokenStream::from_str("~// comment").unwrap().into_iter(); - match tts.next().unwrap() { - TokenTree::Punct(tt) => { - assert_eq!(tt.as_char(), '~'); - assert_eq!(tt.spacing(), Spacing::Alone); - } - wrong => panic!("wrong token {:?}", wrong), - } -} - -#[test] -fn joint_last_token() { - // This test verifies that we match the behavior of libproc_macro *not* in - // the range nightly-2020-09-06 through nightly-2020-09-10, in which this - // behavior was temporarily broken. - // See https://github.com/rust-lang/rust/issues/76399 - - let joint_punct = Punct::new(':', Spacing::Joint); - let stream = TokenStream::from(TokenTree::Punct(joint_punct)); - let punct = match stream.into_iter().next().unwrap() { - TokenTree::Punct(punct) => punct, - _ => unreachable!(), - }; - assert_eq!(punct.spacing(), Spacing::Joint); -} - -#[test] -fn raw_identifier() { - let mut tts = TokenStream::from_str("r#dyn").unwrap().into_iter(); - match tts.next().unwrap() { - TokenTree::Ident(raw) => assert_eq!("r#dyn", raw.to_string()), - wrong => panic!("wrong token {:?}", wrong), - } - assert!(tts.next().is_none()); -} - -#[test] -fn test_display_ident() { - let ident = Ident::new("proc_macro", Span::call_site()); - assert_eq!(format!("{ident}"), "proc_macro"); - assert_eq!(format!("{ident:-^14}"), "proc_macro"); - - let ident = Ident::new_raw("proc_macro", Span::call_site()); - assert_eq!(format!("{ident}"), "r#proc_macro"); - assert_eq!(format!("{ident:-^14}"), "r#proc_macro"); -} - -#[test] -fn test_debug_ident() { - let ident = Ident::new("proc_macro", Span::call_site()); - let expected = if cfg!(span_locations) { - "Ident { sym: proc_macro }" - } else { - "Ident(proc_macro)" - }; - assert_eq!(expected, format!("{:?}", ident)); - - let ident = Ident::new_raw("proc_macro", Span::call_site()); - let expected = if cfg!(span_locations) { - "Ident { sym: r#proc_macro }" - } else { - "Ident(r#proc_macro)" - }; - assert_eq!(expected, format!("{:?}", ident)); -} - -#[test] -fn test_display_tokenstream() { - let tts = TokenStream::from_str("[a + 1]").unwrap(); - assert_eq!(format!("{tts}"), "[a + 1]"); - assert_eq!(format!("{tts:-^5}"), "[a + 1]"); -} - -#[test] -fn test_debug_tokenstream() { - let tts = TokenStream::from_str("[a + 1]").unwrap(); - - #[cfg(not(span_locations))] - let expected = "\ -TokenStream [ - Group { - delimiter: Bracket, - stream: TokenStream [ - Ident { - sym: a, - }, - Punct { - char: '+', - spacing: Alone, - }, - Literal { - lit: 1, - }, - ], - }, -]\ - "; - - #[cfg(not(span_locations))] - let expected_before_trailing_commas = "\ -TokenStream [ - Group { - delimiter: Bracket, - stream: TokenStream [ - Ident { - sym: a - }, - Punct { - char: '+', - spacing: Alone - }, - Literal { - lit: 1 - } - ] - } -]\ - "; - - #[cfg(span_locations)] - let expected = "\ -TokenStream [ - Group { - delimiter: Bracket, - stream: TokenStream [ - Ident { - sym: a, - span: bytes(2..3), - }, - Punct { - char: '+', - spacing: Alone, - span: bytes(4..5), - }, - Literal { - lit: 1, - span: bytes(6..7), - }, - ], - span: bytes(1..8), - }, -]\ - "; - - #[cfg(span_locations)] - let expected_before_trailing_commas = "\ -TokenStream [ - Group { - delimiter: Bracket, - stream: TokenStream [ - Ident { - sym: a, - span: bytes(2..3) - }, - Punct { - char: '+', - spacing: Alone, - span: bytes(4..5) - }, - Literal { - lit: 1, - span: bytes(6..7) - } - ], - span: bytes(1..8) - } -]\ - "; - - let actual = format!("{:#?}", tts); - if actual.ends_with(",\n]") { - assert_eq!(expected, actual); - } else { - assert_eq!(expected_before_trailing_commas, actual); - } -} - -#[test] -fn default_tokenstream_is_empty() { - let default_token_stream = ::default(); - - assert!(default_token_stream.is_empty()); -} - -#[test] -fn tokenstream_size_hint() { - let tokens = "a b (c d) e".parse::().unwrap(); - - assert_eq!(tokens.into_iter().size_hint(), (4, Some(4))); -} - -#[test] -fn tuple_indexing() { - // This behavior may change depending on https://github.com/rust-lang/rust/pull/71322 - let mut tokens = "tuple.0.0".parse::().unwrap().into_iter(); - assert_eq!("tuple", tokens.next().unwrap().to_string()); - assert_eq!(".", tokens.next().unwrap().to_string()); - assert_eq!("0.0", tokens.next().unwrap().to_string()); - assert!(tokens.next().is_none()); -} - -#[cfg(span_locations)] -#[test] -fn non_ascii_tokens() { - check_spans("// abc", &[]); - check_spans("// ábc", &[]); - check_spans("// abc x", &[]); - check_spans("// ábc x", &[]); - check_spans("/* abc */ x", &[(1, 10, 1, 11)]); - check_spans("/* ábc */ x", &[(1, 10, 1, 11)]); - check_spans("/* ab\nc */ x", &[(2, 5, 2, 6)]); - check_spans("/* áb\nc */ x", &[(2, 5, 2, 6)]); - check_spans("/*** abc */ x", &[(1, 12, 1, 13)]); - check_spans("/*** ábc */ x", &[(1, 12, 1, 13)]); - check_spans(r#""abc""#, &[(1, 0, 1, 5)]); - check_spans(r#""ábc""#, &[(1, 0, 1, 5)]); - check_spans(r##"r#"abc"#"##, &[(1, 0, 1, 8)]); - check_spans(r##"r#"ábc"#"##, &[(1, 0, 1, 8)]); - check_spans("r#\"a\nc\"#", &[(1, 0, 2, 3)]); - check_spans("r#\"á\nc\"#", &[(1, 0, 2, 3)]); - check_spans("'a'", &[(1, 0, 1, 3)]); - check_spans("'á'", &[(1, 0, 1, 3)]); - check_spans("//! abc", &[(1, 0, 1, 7), (1, 0, 1, 7), (1, 0, 1, 7)]); - check_spans("//! ábc", &[(1, 0, 1, 7), (1, 0, 1, 7), (1, 0, 1, 7)]); - check_spans("//! abc\n", &[(1, 0, 1, 7), (1, 0, 1, 7), (1, 0, 1, 7)]); - check_spans("//! ábc\n", &[(1, 0, 1, 7), (1, 0, 1, 7), (1, 0, 1, 7)]); - check_spans("/*! abc */", &[(1, 0, 1, 10), (1, 0, 1, 10), (1, 0, 1, 10)]); - check_spans("/*! ábc */", &[(1, 0, 1, 10), (1, 0, 1, 10), (1, 0, 1, 10)]); - check_spans("/*! a\nc */", &[(1, 0, 2, 4), (1, 0, 2, 4), (1, 0, 2, 4)]); - check_spans("/*! á\nc */", &[(1, 0, 2, 4), (1, 0, 2, 4), (1, 0, 2, 4)]); - check_spans("abc", &[(1, 0, 1, 3)]); - check_spans("ábc", &[(1, 0, 1, 3)]); - check_spans("ábć", &[(1, 0, 1, 3)]); - check_spans("abc// foo", &[(1, 0, 1, 3)]); - check_spans("ábc// foo", &[(1, 0, 1, 3)]); - check_spans("ábć// foo", &[(1, 0, 1, 3)]); - check_spans("b\"a\\\n c\"", &[(1, 0, 2, 3)]); -} - -#[cfg(span_locations)] -fn check_spans(p: &str, mut lines: &[(usize, usize, usize, usize)]) { - let ts = p.parse::().unwrap(); - check_spans_internal(ts, &mut lines); - assert!(lines.is_empty(), "leftover ranges: {:?}", lines); -} - -#[cfg(span_locations)] -fn check_spans_internal(ts: TokenStream, lines: &mut &[(usize, usize, usize, usize)]) { - for i in ts { - if let Some((&(sline, scol, eline, ecol), rest)) = lines.split_first() { - *lines = rest; - - let start = i.span().start(); - assert_eq!(start.line, sline, "sline did not match for {}", i); - assert_eq!(start.column, scol, "scol did not match for {}", i); - - let end = i.span().end(); - assert_eq!(end.line, eline, "eline did not match for {}", i); - assert_eq!(end.column, ecol, "ecol did not match for {}", i); - - if let TokenTree::Group(g) = i { - check_spans_internal(g.stream().clone(), lines); - } - } - } -} - -#[test] -fn whitespace() { - // space, horizontal tab, vertical tab, form feed, carriage return, line - // feed, non-breaking space, left-to-right mark, right-to-left mark - let various_spaces = " \t\u{b}\u{c}\r\n\u{a0}\u{200e}\u{200f}"; - let tokens = various_spaces.parse::().unwrap(); - assert_eq!(tokens.into_iter().count(), 0); - - let lone_carriage_returns = " \r \r\r\n "; - lone_carriage_returns.parse::().unwrap(); -} - -#[test] -fn byte_order_mark() { - let string = "\u{feff}foo"; - let tokens = string.parse::().unwrap(); - match tokens.into_iter().next().unwrap() { - TokenTree::Ident(ident) => assert_eq!(ident, "foo"), - _ => unreachable!(), - } - - let string = "foo\u{feff}"; - string.parse::().unwrap_err(); -} - -#[cfg(span_locations)] -fn create_span() -> proc_macro2::Span { - let tts: TokenStream = "1".parse().unwrap(); - match tts.into_iter().next().unwrap() { - TokenTree::Literal(literal) => literal.span(), - _ => unreachable!(), - } -} - -#[cfg(span_locations)] -#[test] -fn test_invalidate_current_thread_spans() { - let actual = format!("{:#?}", create_span()); - assert_eq!(actual, "bytes(1..2)"); - let actual = format!("{:#?}", create_span()); - assert_eq!(actual, "bytes(3..4)"); - - proc_macro2::extra::invalidate_current_thread_spans(); - - let actual = format!("{:#?}", create_span()); - // Test that span offsets have been reset after the call - // to invalidate_current_thread_spans() - assert_eq!(actual, "bytes(1..2)"); -} - -#[cfg(span_locations)] -#[test] -#[should_panic(expected = "Invalid span with no related FileInfo!")] -fn test_use_span_after_invalidation() { - let span = create_span(); - - proc_macro2::extra::invalidate_current_thread_spans(); - - span.source_text(); -} diff --git a/vendor/proc-macro2/tests/test_fmt.rs b/vendor/proc-macro2/tests/test_fmt.rs deleted file mode 100644 index 86a4c387634b33..00000000000000 --- a/vendor/proc-macro2/tests/test_fmt.rs +++ /dev/null @@ -1,28 +0,0 @@ -#![allow(clippy::from_iter_instead_of_collect)] - -use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream, TokenTree}; -use std::iter; - -#[test] -fn test_fmt_group() { - let ident = Ident::new("x", Span::call_site()); - let inner = TokenStream::from_iter(iter::once(TokenTree::Ident(ident))); - let parens_empty = Group::new(Delimiter::Parenthesis, TokenStream::new()); - let parens_nonempty = Group::new(Delimiter::Parenthesis, inner.clone()); - let brackets_empty = Group::new(Delimiter::Bracket, TokenStream::new()); - let brackets_nonempty = Group::new(Delimiter::Bracket, inner.clone()); - let braces_empty = Group::new(Delimiter::Brace, TokenStream::new()); - let braces_nonempty = Group::new(Delimiter::Brace, inner.clone()); - let none_empty = Group::new(Delimiter::None, TokenStream::new()); - let none_nonempty = Group::new(Delimiter::None, inner); - - // Matches libproc_macro. - assert_eq!("()", parens_empty.to_string()); - assert_eq!("(x)", parens_nonempty.to_string()); - assert_eq!("[]", brackets_empty.to_string()); - assert_eq!("[x]", brackets_nonempty.to_string()); - assert_eq!("{ }", braces_empty.to_string()); - assert_eq!("{ x }", braces_nonempty.to_string()); - assert_eq!("", none_empty.to_string()); - assert_eq!("x", none_nonempty.to_string()); -} diff --git a/vendor/proc-macro2/tests/test_size.rs b/vendor/proc-macro2/tests/test_size.rs deleted file mode 100644 index 8b6791518fe854..00000000000000 --- a/vendor/proc-macro2/tests/test_size.rs +++ /dev/null @@ -1,81 +0,0 @@ -#![allow(unused_attributes)] - -extern crate proc_macro; - -use std::mem; - -#[rustversion::attr(before(1.64), ignore = "requires Rust 1.64+")] -#[cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit")] -#[cfg_attr(randomize_layout, ignore = "disabled due to randomized layout")] -#[test] -fn test_proc_macro_size() { - assert_eq!(mem::size_of::(), 4); - assert_eq!(mem::size_of::>(), 4); - assert_eq!(mem::size_of::(), 20); - assert_eq!(mem::size_of::(), 12); - assert_eq!(mem::size_of::(), 8); - assert_eq!(mem::size_of::(), 16); - assert_eq!(mem::size_of::(), 4); -} - -#[cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit")] -#[cfg_attr(randomize_layout, ignore = "disabled due to randomized layout")] -#[cfg_attr(wrap_proc_macro, ignore = "wrapper mode")] -#[cfg_attr(span_locations, ignore = "span locations are on")] -#[test] -fn test_proc_macro2_fallback_size_without_locations() { - assert_eq!(mem::size_of::(), 0); - assert_eq!(mem::size_of::>(), 1); - assert_eq!(mem::size_of::(), 16); - assert_eq!(mem::size_of::(), 24); - assert_eq!(mem::size_of::(), 8); - assert_eq!(mem::size_of::(), 24); - assert_eq!(mem::size_of::(), 8); -} - -#[cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit")] -#[cfg_attr(randomize_layout, ignore = "disabled due to randomized layout")] -#[cfg_attr(wrap_proc_macro, ignore = "wrapper mode")] -#[cfg_attr(not(span_locations), ignore = "span locations are off")] -#[test] -fn test_proc_macro2_fallback_size_with_locations() { - assert_eq!(mem::size_of::(), 8); - assert_eq!(mem::size_of::>(), 12); - assert_eq!(mem::size_of::(), 24); - assert_eq!(mem::size_of::(), 32); - assert_eq!(mem::size_of::(), 16); - assert_eq!(mem::size_of::(), 32); - assert_eq!(mem::size_of::(), 8); -} - -#[rustversion::attr(before(1.71), ignore = "requires Rust 1.71+")] -#[cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit")] -#[cfg_attr(randomize_layout, ignore = "disabled due to randomized layout")] -#[cfg_attr(not(wrap_proc_macro), ignore = "fallback mode")] -#[cfg_attr(span_locations, ignore = "span locations are on")] -#[test] -fn test_proc_macro2_wrapper_size_without_locations() { - assert_eq!(mem::size_of::(), 4); - assert_eq!(mem::size_of::>(), 8); - assert_eq!(mem::size_of::(), 24); - assert_eq!(mem::size_of::(), 24); - assert_eq!(mem::size_of::(), 12); - assert_eq!(mem::size_of::(), 24); - assert_eq!(mem::size_of::(), 32); -} - -#[rustversion::attr(before(1.65), ignore = "requires Rust 1.65+")] -#[cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit")] -#[cfg_attr(randomize_layout, ignore = "disabled due to randomized layout")] -#[cfg_attr(not(wrap_proc_macro), ignore = "fallback mode")] -#[cfg_attr(not(span_locations), ignore = "span locations are off")] -#[test] -fn test_proc_macro2_wrapper_size_with_locations() { - assert_eq!(mem::size_of::(), 12); - assert_eq!(mem::size_of::>(), 12); - assert_eq!(mem::size_of::(), 32); - assert_eq!(mem::size_of::(), 32); - assert_eq!(mem::size_of::(), 20); - assert_eq!(mem::size_of::(), 32); - assert_eq!(mem::size_of::(), 32); -} diff --git a/vendor/quote/.cargo-checksum.json b/vendor/quote/.cargo-checksum.json deleted file mode 100644 index 0559d43caf7594..00000000000000 --- a/vendor/quote/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"31f077cccc677667ae9dbd3ca2a97807c645307199ec9dd6c2620fbf1b80015e",".github/FUNDING.yml":"b017158736b3c9751a2d21edfce7fe61c8954e2fced8da8dd3013c2f3e295bd9",".github/workflows/ci.yml":"a74a11b884e49e64e0af70d7b66a497dfe19f61d1e7375798fb7dcf46d074e30","Cargo.lock":"7f9f3eb56475b19bf94e20384421c6485c217ef1ab136867aa678b2dec7922b3","Cargo.toml":"f98585795e8fb0a2798c24fd5bc39d6de078f96cbe1c4be6532dee2f10ade5ae","Cargo.toml.orig":"8e7c7edea1aa52e0854b58bc77d5da20fb01a76138675757b162f03d2243c1c3","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"5bc59a97099fbdc7f9f8b69d3f9910e27629184647412b5009b274b5b8bfb6d1","build.rs":"cd6808c02e476b09a520105e2c6f6d325cccb1ecd542cbbcc836a0ae6f6fb0f1","rust-toolchain.toml":"6bbb61302978c736b2da03e4fb40e3beab908f85d533ab46fd541e637b5f3e0f","src/ext.rs":"33e41c8a11743de714c1cab1db37b242ce6df9cdb1dda43927c1f015b33701b3","src/format.rs":"141ee1049cfbe363f0d6e9210996dabc997bd3d1c67eb9695fab1c2a0b100e80","src/ident_fragment.rs":"0b3e6c2129e55910fd2d240e1e7efba6f1796801d24352d1c0bfbceb0e8b678f","src/lib.rs":"1f852ff55a08bc73e37ec76faf862bdd8769a8b825c2f49e5ca97e9b905b28c7","src/runtime.rs":"905008e29cb70a13845c2b334e531569121699b2a23be2acc7ab6070c45221e4","src/spanned.rs":"713678bf5cb3b4bf2f119dcf64d188a63dc59455a724c3d2567ceab83b734d73","src/to_tokens.rs":"5bd52437ed5764ae2b5d84843b23f29497ad0361f3ee3cfda621a4b91c70ef1c","tests/compiletest.rs":"4e381aa8ca3eabb7ac14d1e0c3700b3223e47640547a6988cfa13ad68255f60f","tests/test.rs":"c746974d738a6922b9a25eacb55416d0ef513cc418de3aa5ce5e12cacb7ee94d","tests/ui/does-not-have-iter-interpolated-dup.rs":"ad13eea21d4cdd2ab6c082f633392e1ff20fb0d1af5f2177041e0bf7f30da695","tests/ui/does-not-have-iter-interpolated-dup.stderr":"e5966b716290266591f97f1ab04107a47748d493e10ca99f19675fa76524f205","tests/ui/does-not-have-iter-interpolated.rs":"83a5b3f240651adcbe4b6e51076d76d653ad439b37442cf4054f1fd3c073f3b7","tests/ui/does-not-have-iter-interpolated.stderr":"a20403a06f36b54d45a195e455a11543cca7259e1c9f1bc78f0ce65cc0226347","tests/ui/does-not-have-iter-separated.rs":"fe413c48331d5e3a7ae5fef6a5892a90c72f610d54595879eb49d0a94154ba3f","tests/ui/does-not-have-iter-separated.stderr":"29718da7187e2da98c98bea9bfa405305a6df60af6c2f3c70cc27b7e13deead7","tests/ui/does-not-have-iter.rs":"09dc9499d861b63cebb0848b855b78e2dc9497bfde37ba6339f3625ae009a62f","tests/ui/does-not-have-iter.stderr":"691c985934330d5ba063fd4b172f89702673c710e610e8381e39ab78d729b0f1","tests/ui/not-quotable.rs":"5759d0884943417609f28faadc70254a3e2fd3d9bd6ff7297a3fb70a77fafd8a","tests/ui/not-quotable.stderr":"433a290bd53070d5cce6d623f9ef6f991756a78de109d3e486b46b699c2ce764","tests/ui/not-repeatable.rs":"a4b115c04e4e41049a05f5b69450503fbffeba031218b4189cb931839f7f9a9c","tests/ui/not-repeatable.stderr":"501ea5e47492b55bea457b02e991e0c624cd0c12601e0b759fff54a731370caf","tests/ui/wrong-type-span.rs":"6195e35ea844c0c52ba1cff5d790c3a371af6915d137d377834ad984229ef9ea","tests/ui/wrong-type-span.stderr":"cad072e40e0ecc04f375122ae41aede2f0da2a9244492b3fcf70249e59d1b128"},"package":"a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f"} \ No newline at end of file diff --git a/vendor/quote/.cargo_vcs_info.json b/vendor/quote/.cargo_vcs_info.json deleted file mode 100644 index 43e8425a26858b..00000000000000 --- a/vendor/quote/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "bb9e7a46b3105e11c73416bd59b4455a71068949" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/quote/.github/FUNDING.yml b/vendor/quote/.github/FUNDING.yml deleted file mode 100644 index 750707701cdae9..00000000000000 --- a/vendor/quote/.github/FUNDING.yml +++ /dev/null @@ -1 +0,0 @@ -github: dtolnay diff --git a/vendor/quote/.github/workflows/ci.yml b/vendor/quote/.github/workflows/ci.yml deleted file mode 100644 index 9e25479aa14023..00000000000000 --- a/vendor/quote/.github/workflows/ci.yml +++ /dev/null @@ -1,112 +0,0 @@ -name: CI - -on: - push: - pull_request: - workflow_dispatch: - schedule: [cron: "40 1 * * *"] - -permissions: - contents: read - -env: - RUSTFLAGS: -Dwarnings - -jobs: - pre_ci: - uses: dtolnay/.github/.github/workflows/pre_ci.yml@master - - test: - name: Rust ${{matrix.rust}} - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - rust: [nightly, stable, beta, 1.76.0, 1.68.0] - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@master - with: - toolchain: ${{matrix.rust}} - components: rust-src - - name: Enable type layout randomization - run: echo RUSTFLAGS=${RUSTFLAGS}\ -Zrandomize-layout >> $GITHUB_ENV - if: matrix.rust == 'nightly' - - run: cargo check - - run: cargo test - if: matrix.rust != '1.68.0' - - run: cargo run --manifest-path benches/Cargo.toml - - uses: actions/upload-artifact@v4 - if: matrix.rust == 'nightly' && always() - with: - name: Cargo.lock - path: Cargo.lock - continue-on-error: true - - minimal: - name: Minimal versions - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@nightly - - run: cargo generate-lockfile -Z minimal-versions - - run: cargo check --locked - - doc: - name: Documentation - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - env: - RUSTDOCFLAGS: -Dwarnings - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@nightly - with: - components: rust-src - - uses: dtolnay/install@cargo-docs-rs - - run: cargo docs-rs - - clippy: - name: Clippy - runs-on: ubuntu-latest - if: github.event_name != 'pull_request' - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@nightly - with: - components: clippy, rust-src - - run: cargo clippy --tests --workspace -- -Dclippy::all -Dclippy::pedantic - - miri: - name: Miri - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@miri - - run: cargo miri setup - - run: cargo miri test - env: - MIRIFLAGS: -Zmiri-strict-provenance - - outdated: - name: Outdated - runs-on: ubuntu-latest - if: github.event_name != 'pull_request' - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@stable - - uses: dtolnay/install@cargo-outdated - - run: cargo outdated --workspace --exit-code 1 diff --git a/vendor/quote/Cargo.lock b/vendor/quote/Cargo.lock deleted file mode 100644 index 038bc1b211a1e5..00000000000000 --- a/vendor/quote/Cargo.lock +++ /dev/null @@ -1,256 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "dissimilar" -version = "1.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8975ffdaa0ef3661bfe02dbdcc06c9f829dfafe6a3c474de366a8d5e44276921" - -[[package]] -name = "equivalent" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" - -[[package]] -name = "glob" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" - -[[package]] -name = "hashbrown" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" - -[[package]] -name = "indexmap" -version = "2.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" -dependencies = [ - "equivalent", - "hashbrown", -] - -[[package]] -name = "itoa" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" - -[[package]] -name = "memchr" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" - -[[package]] -name = "proc-macro2" -version = "1.0.103" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "quote" -version = "1.0.42" -dependencies = [ - "proc-macro2", - "rustversion", - "trybuild", -] - -[[package]] -name = "rustversion" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" - -[[package]] -name = "ryu" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" - -[[package]] -name = "serde" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" -dependencies = [ - "serde_core", -] - -[[package]] -name = "serde_core" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" -dependencies = [ - "proc-macro2", - "quote 1.0.41", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.145" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" -dependencies = [ - "itoa", - "memchr", - "ryu", - "serde", - "serde_core", -] - -[[package]] -name = "serde_spanned" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392" -dependencies = [ - "serde_core", -] - -[[package]] -name = "syn" -version = "2.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f17c7e013e88258aa9543dcbe81aca68a667a9ac37cd69c9fbc07858bfe0e2f" -dependencies = [ - "proc-macro2", - "quote 1.0.41", - "unicode-ident", -] - -[[package]] -name = "target-triple" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "591ef38edfb78ca4771ee32cf494cb8771944bee237a9b91fc9c1424ac4b777b" - -[[package]] -name = "termcolor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "toml" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" -dependencies = [ - "indexmap", - "serde_core", - "serde_spanned", - "toml_datetime", - "toml_parser", - "toml_writer", - "winnow", -] - -[[package]] -name = "toml_datetime" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" -dependencies = [ - "serde_core", -] - -[[package]] -name = "toml_parser" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" -dependencies = [ - "winnow", -] - -[[package]] -name = "toml_writer" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" - -[[package]] -name = "trybuild" -version = "1.0.113" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "559b6a626c0815c942ac98d434746138b4f89ddd6a1b8cbb168c6845fb3376c5" -dependencies = [ - "dissimilar", - "glob", - "serde", - "serde_derive", - "serde_json", - "target-triple", - "termcolor", - "toml", -] - -[[package]] -name = "unicode-ident" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" - -[[package]] -name = "winapi-util" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" -dependencies = [ - "windows-sys", -] - -[[package]] -name = "windows-link" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" - -[[package]] -name = "windows-sys" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" -dependencies = [ - "windows-link", -] - -[[package]] -name = "winnow" -version = "0.7.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" diff --git a/vendor/quote/Cargo.toml b/vendor/quote/Cargo.toml deleted file mode 100644 index 1b6fed14eafe7b..00000000000000 --- a/vendor/quote/Cargo.toml +++ /dev/null @@ -1,70 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.68" -name = "quote" -version = "1.0.42" -authors = ["David Tolnay "] -build = "build.rs" -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = "Quasi-quoting macro quote!(...)" -documentation = "https://docs.rs/quote/" -readme = "README.md" -keywords = [ - "macros", - "syn", -] -categories = ["development-tools::procedural-macro-helpers"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/dtolnay/quote" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] -rustdoc-args = [ - "--generate-link-to-definition", - "--generate-macro-expansion", - "--extern-html-root-url=core=https://doc.rust-lang.org", - "--extern-html-root-url=alloc=https://doc.rust-lang.org", - "--extern-html-root-url=std=https://doc.rust-lang.org", -] - -[features] -default = ["proc-macro"] -proc-macro = ["proc-macro2/proc-macro"] - -[lib] -name = "quote" -path = "src/lib.rs" - -[[test]] -name = "compiletest" -path = "tests/compiletest.rs" - -[[test]] -name = "test" -path = "tests/test.rs" - -[dependencies.proc-macro2] -version = "1.0.80" -default-features = false - -[dev-dependencies.rustversion] -version = "1.0" - -[dev-dependencies.trybuild] -version = "1.0.108" -features = ["diff"] diff --git a/vendor/quote/LICENSE-APACHE b/vendor/quote/LICENSE-APACHE deleted file mode 100644 index 1b5ec8b78e237b..00000000000000 --- a/vendor/quote/LICENSE-APACHE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS diff --git a/vendor/quote/LICENSE-MIT b/vendor/quote/LICENSE-MIT deleted file mode 100644 index 31aa79387f27e7..00000000000000 --- a/vendor/quote/LICENSE-MIT +++ /dev/null @@ -1,23 +0,0 @@ -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/quote/README.md b/vendor/quote/README.md deleted file mode 100644 index c4316be3b48219..00000000000000 --- a/vendor/quote/README.md +++ /dev/null @@ -1,271 +0,0 @@ -Rust Quasi-Quoting -================== - -[github](https://github.com/dtolnay/quote) -[crates.io](https://crates.io/crates/quote) -[docs.rs](https://docs.rs/quote) -[build status](https://github.com/dtolnay/quote/actions?query=branch%3Amaster) - -This crate provides the [`quote!`] macro for turning Rust syntax tree data -structures into tokens of source code. - -[`quote!`]: https://docs.rs/quote/1.0/quote/macro.quote.html - -Procedural macros in Rust receive a stream of tokens as input, execute arbitrary -Rust code to determine how to manipulate those tokens, and produce a stream of -tokens to hand back to the compiler to compile into the caller's crate. -Quasi-quoting is a solution to one piece of that — producing tokens to -return to the compiler. - -The idea of quasi-quoting is that we write *code* that we treat as *data*. -Within the `quote!` macro, we can write what looks like code to our text editor -or IDE. We get all the benefits of the editor's brace matching, syntax -highlighting, indentation, and maybe autocompletion. But rather than compiling -that as code into the current crate, we can treat it as data, pass it around, -mutate it, and eventually hand it back to the compiler as tokens to compile into -the macro caller's crate. - -This crate is motivated by the procedural macro use case, but is a -general-purpose Rust quasi-quoting library and is not specific to procedural -macros. - -```toml -[dependencies] -quote = "1.0" -``` - -*Version requirement: Quote supports rustc 1.68 and up.*
-[*Release notes*](https://github.com/dtolnay/quote/releases) - -
- -## Syntax - -The quote crate provides a [`quote!`] macro within which you can write Rust code -that gets packaged into a [`TokenStream`] and can be treated as data. You should -think of `TokenStream` as representing a fragment of Rust source code. - -[`TokenStream`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.TokenStream.html - -Within the `quote!` macro, interpolation is done with `#var`. Any type -implementing the [`quote::ToTokens`] trait can be interpolated. This includes -most Rust primitive types as well as most of the syntax tree types from [`syn`]. - -[`quote::ToTokens`]: https://docs.rs/quote/1.0/quote/trait.ToTokens.html -[`syn`]: https://github.com/dtolnay/syn - -```rust -let tokens = quote! { - struct SerializeWith #generics #where_clause { - value: &'a #field_ty, - phantom: core::marker::PhantomData<#item_ty>, - } - - impl #generics serde::Serialize for SerializeWith #generics #where_clause { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - #path(self.value, serializer) - } - } - - SerializeWith { - value: #value, - phantom: core::marker::PhantomData::<#item_ty>, - } -}; -``` - -
- -## Repetition - -Repetition is done using `#(...)*` or `#(...),*` similar to `macro_rules!`. This -iterates through the elements of any variable interpolated within the repetition -and inserts a copy of the repetition body for each one. The variables in an -interpolation may be a `Vec`, slice, `BTreeSet`, or any `Iterator`. - -- `#(#var)*` — no separators -- `#(#var),*` — the character before the asterisk is used as a separator -- `#( struct #var; )*` — the repetition can contain other things -- `#( #k => println!("{}", #v), )*` — even multiple interpolations - -Note that there is a difference between `#(#var ,)*` and `#(#var),*`—the latter -does not produce a trailing comma. This matches the behavior of delimiters in -`macro_rules!`. - -
- -## Returning tokens to the compiler - -The `quote!` macro evaluates to an expression of type -`proc_macro2::TokenStream`. Meanwhile Rust procedural macros are expected to -return the type `proc_macro::TokenStream`. - -The difference between the two types is that `proc_macro` types are entirely -specific to procedural macros and cannot ever exist in code outside of a -procedural macro, while `proc_macro2` types may exist anywhere including tests -and non-macro code like main.rs and build.rs. This is why even the procedural -macro ecosystem is largely built around `proc_macro2`, because that ensures the -libraries are unit testable and accessible in non-macro contexts. - -There is a [`From`]-conversion in both directions so returning the output of -`quote!` from a procedural macro usually looks like `tokens.into()` or -`proc_macro::TokenStream::from(tokens)`. - -[`From`]: https://doc.rust-lang.org/std/convert/trait.From.html - -
- -## Examples - -### Combining quoted fragments - -Usually you don't end up constructing an entire final `TokenStream` in one -piece. Different parts may come from different helper functions. The tokens -produced by `quote!` themselves implement `ToTokens` and so can be interpolated -into later `quote!` invocations to build up a final result. - -```rust -let type_definition = quote! {...}; -let methods = quote! {...}; - -let tokens = quote! { - #type_definition - #methods -}; -``` - -### Constructing identifiers - -Suppose we have an identifier `ident` which came from somewhere in a macro -input and we need to modify it in some way for the macro output. Let's consider -prepending the identifier with an underscore. - -Simply interpolating the identifier next to an underscore will not have the -behavior of concatenating them. The underscore and the identifier will continue -to be two separate tokens as if you had written `_ x`. - -```rust -// incorrect -quote! { - let mut _#ident = 0; -} -``` - -The solution is to build a new identifier token with the correct value. As this -is such a common case, the `format_ident!` macro provides a convenient utility -for doing so correctly. - -```rust -let varname = format_ident!("_{}", ident); -quote! { - let mut #varname = 0; -} -``` - -Alternatively, the APIs provided by Syn and proc-macro2 can be used to directly -build the identifier. This is roughly equivalent to the above, but will not -handle `ident` being a raw identifier. - -```rust -let concatenated = format!("_{}", ident); -let varname = syn::Ident::new(&concatenated, ident.span()); -quote! { - let mut #varname = 0; -} -``` - -### Making method calls - -Let's say our macro requires some type specified in the macro input to have a -constructor called `new`. We have the type in a variable called `field_type` of -type `syn::Type` and want to invoke the constructor. - -```rust -// incorrect -quote! { - let value = #field_type::new(); -} -``` - -This works only sometimes. If `field_type` is `String`, the expanded code -contains `String::new()` which is fine. But if `field_type` is something like -`Vec` then the expanded code is `Vec::new()` which is invalid syntax. -Ordinarily in handwritten Rust we would write `Vec::::new()` but for macros -often the following is more convenient. - -```rust -quote! { - let value = <#field_type>::new(); -} -``` - -This expands to `>::new()` which behaves correctly. - -A similar pattern is appropriate for trait methods. - -```rust -quote! { - let value = <#field_type as core::default::Default>::default(); -} -``` - -
- -## Hygiene - -Any interpolated tokens preserve the `Span` information provided by their -`ToTokens` implementation. Tokens that originate within a `quote!` invocation -are spanned with [`Span::call_site()`]. - -[`Span::call_site()`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.Span.html#method.call_site - -A different span can be provided explicitly through the [`quote_spanned!`] -macro. - -[`quote_spanned!`]: https://docs.rs/quote/1.0/quote/macro.quote_spanned.html - -
- -## Non-macro code generators - -When using `quote` in a build.rs or main.rs and writing the output out to a -file, consider having the code generator pass the tokens through [prettyplease] -before writing. This way if an error occurs in the generated code it is -convenient for a human to read and debug. - -Be aware that no kind of hygiene or span information is retained when tokens are -written to a file; the conversion from tokens to source code is lossy. - -Example usage in build.rs: - -```rust -let output = quote! { ... }; -let syntax_tree = syn::parse2(output).unwrap(); -let formatted = prettyplease::unparse(&syntax_tree); - -let out_dir = env::var_os("OUT_DIR").unwrap(); -let dest_path = Path::new(&out_dir).join("out.rs"); -fs::write(dest_path, formatted).unwrap(); -``` - -[prettyplease]: https://github.com/dtolnay/prettyplease - -
- -#### License - - -Licensed under either of Apache License, Version -2.0 or MIT license at your option. - - -
- - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in this crate by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. - diff --git a/vendor/quote/build.rs b/vendor/quote/build.rs deleted file mode 100644 index 50f98cb3bda604..00000000000000 --- a/vendor/quote/build.rs +++ /dev/null @@ -1,32 +0,0 @@ -use std::env; -use std::process::Command; -use std::str; - -fn main() { - println!("cargo:rerun-if-changed=build.rs"); - - let Some(minor) = rustc_minor_version() else { - return; - }; - - if minor >= 77 { - println!("cargo:rustc-check-cfg=cfg(no_diagnostic_namespace)"); - } - - // Support for the `#[diagnostic]` tool attribute namespace - // https://blog.rust-lang.org/2024/05/02/Rust-1.78.0.html#diagnostic-attributes - if minor < 78 { - println!("cargo:rustc-cfg=no_diagnostic_namespace"); - } -} - -fn rustc_minor_version() -> Option { - let rustc = env::var_os("RUSTC")?; - let output = Command::new(rustc).arg("--version").output().ok()?; - let version = str::from_utf8(&output.stdout).ok()?; - let mut pieces = version.split('.'); - if pieces.next() != Some("rustc 1") { - return None; - } - pieces.next()?.parse().ok() -} diff --git a/vendor/quote/rust-toolchain.toml b/vendor/quote/rust-toolchain.toml deleted file mode 100644 index 20fe888c30ab44..00000000000000 --- a/vendor/quote/rust-toolchain.toml +++ /dev/null @@ -1,2 +0,0 @@ -[toolchain] -components = ["rust-src"] diff --git a/vendor/quote/src/ext.rs b/vendor/quote/src/ext.rs deleted file mode 100644 index bc983a5d7d9166..00000000000000 --- a/vendor/quote/src/ext.rs +++ /dev/null @@ -1,136 +0,0 @@ -use super::ToTokens; -use core::iter; -use proc_macro2::{TokenStream, TokenTree}; - -/// TokenStream extension trait with methods for appending tokens. -/// -/// This trait is sealed and cannot be implemented outside of the `quote` crate. -pub trait TokenStreamExt: private::Sealed { - /// For use by `ToTokens` implementations. - /// - /// Appends the token specified to this list of tokens. - fn append(&mut self, token: U) - where - U: Into; - - /// For use by `ToTokens` implementations. - /// - /// ``` - /// # use quote::{quote, TokenStreamExt, ToTokens}; - /// # use proc_macro2::TokenStream; - /// # - /// struct X; - /// - /// impl ToTokens for X { - /// fn to_tokens(&self, tokens: &mut TokenStream) { - /// tokens.append_all(&[true, false]); - /// } - /// } - /// - /// let tokens = quote!(#X); - /// assert_eq!(tokens.to_string(), "true false"); - /// ``` - fn append_all(&mut self, iter: I) - where - I: IntoIterator, - I::Item: ToTokens; - - /// For use by `ToTokens` implementations. - /// - /// Appends all of the items in the iterator `I`, separated by the tokens - /// `U`. - fn append_separated(&mut self, iter: I, op: U) - where - I: IntoIterator, - I::Item: ToTokens, - U: ToTokens; - - /// For use by `ToTokens` implementations. - /// - /// Appends all tokens in the iterator `I`, appending `U` after each - /// element, including after the last element of the iterator. - fn append_terminated(&mut self, iter: I, term: U) - where - I: IntoIterator, - I::Item: ToTokens, - U: ToTokens; -} - -impl TokenStreamExt for TokenStream { - fn append(&mut self, token: U) - where - U: Into, - { - self.extend(iter::once(token.into())); - } - - fn append_all(&mut self, iter: I) - where - I: IntoIterator, - I::Item: ToTokens, - { - do_append_all(self, iter.into_iter()); - - fn do_append_all(stream: &mut TokenStream, iter: I) - where - I: Iterator, - I::Item: ToTokens, - { - for token in iter { - token.to_tokens(stream); - } - } - } - - fn append_separated(&mut self, iter: I, op: U) - where - I: IntoIterator, - I::Item: ToTokens, - U: ToTokens, - { - do_append_separated(self, iter.into_iter(), op); - - fn do_append_separated(stream: &mut TokenStream, iter: I, op: U) - where - I: Iterator, - I::Item: ToTokens, - U: ToTokens, - { - for (i, token) in iter.into_iter().enumerate() { - if i > 0 { - op.to_tokens(stream); - } - token.to_tokens(stream); - } - } - } - - fn append_terminated(&mut self, iter: I, term: U) - where - I: IntoIterator, - I::Item: ToTokens, - U: ToTokens, - { - do_append_terminated(self, iter.into_iter(), term); - - fn do_append_terminated(stream: &mut TokenStream, iter: I, term: U) - where - I: Iterator, - I::Item: ToTokens, - U: ToTokens, - { - for token in iter { - token.to_tokens(stream); - term.to_tokens(stream); - } - } - } -} - -mod private { - use proc_macro2::TokenStream; - - pub trait Sealed {} - - impl Sealed for TokenStream {} -} diff --git a/vendor/quote/src/format.rs b/vendor/quote/src/format.rs deleted file mode 100644 index ec0bbf38ba3776..00000000000000 --- a/vendor/quote/src/format.rs +++ /dev/null @@ -1,168 +0,0 @@ -/// Formatting macro for constructing `Ident`s. -/// -///
-/// -/// # Syntax -/// -/// Syntax is copied from the [`format!`] macro, supporting both positional and -/// named arguments. -/// -/// Only a limited set of formatting traits are supported. The current mapping -/// of format types to traits is: -/// -/// * `{}` ⇒ [`IdentFragment`] -/// * `{:o}` ⇒ [`Octal`](std::fmt::Octal) -/// * `{:x}` ⇒ [`LowerHex`](std::fmt::LowerHex) -/// * `{:X}` ⇒ [`UpperHex`](std::fmt::UpperHex) -/// * `{:b}` ⇒ [`Binary`](std::fmt::Binary) -/// -/// See [`std::fmt`] for more information. -/// -///
-/// -/// # IdentFragment -/// -/// Unlike `format!`, this macro uses the [`IdentFragment`] formatting trait by -/// default. This trait is like `Display`, with a few differences: -/// -/// * `IdentFragment` is only implemented for a limited set of types, such as -/// unsigned integers and strings. -/// * [`Ident`] arguments will have their `r#` prefixes stripped, if present. -/// -/// [`IdentFragment`]: crate::IdentFragment -/// [`Ident`]: proc_macro2::Ident -/// -///
-/// -/// # Hygiene -/// -/// The [`Span`] of the first `Ident` argument is used as the span of the final -/// identifier, falling back to [`Span::call_site`] when no identifiers are -/// provided. -/// -/// ``` -/// # use quote::format_ident; -/// # let ident = format_ident!("Ident"); -/// // If `ident` is an Ident, the span of `my_ident` will be inherited from it. -/// let my_ident = format_ident!("My{}{}", ident, "IsCool"); -/// assert_eq!(my_ident, "MyIdentIsCool"); -/// ``` -/// -/// Alternatively, the span can be overridden by passing the `span` named -/// argument. -/// -/// ``` -/// # use quote::format_ident; -/// # const IGNORE_TOKENS: &'static str = stringify! { -/// let my_span = /* ... */; -/// # }; -/// # let my_span = proc_macro2::Span::call_site(); -/// format_ident!("MyIdent", span = my_span); -/// ``` -/// -/// [`Span`]: proc_macro2::Span -/// [`Span::call_site`]: proc_macro2::Span::call_site -/// -///


-/// -/// # Panics -/// -/// This method will panic if the resulting formatted string is not a valid -/// identifier. -/// -///
-/// -/// # Examples -/// -/// Composing raw and non-raw identifiers: -/// ``` -/// # use quote::format_ident; -/// let my_ident = format_ident!("My{}", "Ident"); -/// assert_eq!(my_ident, "MyIdent"); -/// -/// let raw = format_ident!("r#Raw"); -/// assert_eq!(raw, "r#Raw"); -/// -/// let my_ident_raw = format_ident!("{}Is{}", my_ident, raw); -/// assert_eq!(my_ident_raw, "MyIdentIsRaw"); -/// ``` -/// -/// Integer formatting options: -/// ``` -/// # use quote::format_ident; -/// let num: u32 = 10; -/// -/// let decimal = format_ident!("Id_{}", num); -/// assert_eq!(decimal, "Id_10"); -/// -/// let octal = format_ident!("Id_{:o}", num); -/// assert_eq!(octal, "Id_12"); -/// -/// let binary = format_ident!("Id_{:b}", num); -/// assert_eq!(binary, "Id_1010"); -/// -/// let lower_hex = format_ident!("Id_{:x}", num); -/// assert_eq!(lower_hex, "Id_a"); -/// -/// let upper_hex = format_ident!("Id_{:X}", num); -/// assert_eq!(upper_hex, "Id_A"); -/// ``` -#[macro_export] -macro_rules! format_ident { - ($fmt:expr) => { - $crate::format_ident_impl!([ - $crate::__private::Option::None, - $fmt - ]) - }; - - ($fmt:expr, $($rest:tt)*) => { - $crate::format_ident_impl!([ - $crate::__private::Option::None, - $fmt - ] $($rest)*) - }; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! format_ident_impl { - // Final state - ([$span:expr, $($fmt:tt)*]) => { - $crate::__private::mk_ident( - &$crate::__private::format!($($fmt)*), - $span, - ) - }; - - // Span argument - ([$old:expr, $($fmt:tt)*] span = $span:expr) => { - $crate::format_ident_impl!([$old, $($fmt)*] span = $span,) - }; - ([$old:expr, $($fmt:tt)*] span = $span:expr, $($rest:tt)*) => { - $crate::format_ident_impl!([ - $crate::__private::Option::Some::<$crate::__private::Span>($span), - $($fmt)* - ] $($rest)*) - }; - - // Named argument - ([$span:expr, $($fmt:tt)*] $name:ident = $arg:expr) => { - $crate::format_ident_impl!([$span, $($fmt)*] $name = $arg,) - }; - ([$span:expr, $($fmt:tt)*] $name:ident = $arg:expr, $($rest:tt)*) => { - match $crate::__private::IdentFragmentAdapter(&$arg) { - arg => $crate::format_ident_impl!([$span.or(arg.span()), $($fmt)*, $name = arg] $($rest)*), - } - }; - - // Positional argument - ([$span:expr, $($fmt:tt)*] $arg:expr) => { - $crate::format_ident_impl!([$span, $($fmt)*] $arg,) - }; - ([$span:expr, $($fmt:tt)*] $arg:expr, $($rest:tt)*) => { - match $crate::__private::IdentFragmentAdapter(&$arg) { - arg => $crate::format_ident_impl!([$span.or(arg.span()), $($fmt)*, arg] $($rest)*), - } - }; -} diff --git a/vendor/quote/src/ident_fragment.rs b/vendor/quote/src/ident_fragment.rs deleted file mode 100644 index 6c2a9a87acb411..00000000000000 --- a/vendor/quote/src/ident_fragment.rs +++ /dev/null @@ -1,88 +0,0 @@ -use alloc::borrow::Cow; -use core::fmt; -use proc_macro2::{Ident, Span}; - -/// Specialized formatting trait used by `format_ident!`. -/// -/// [`Ident`] arguments formatted using this trait will have their `r#` prefix -/// stripped, if present. -/// -/// See [`format_ident!`] for more information. -/// -/// [`format_ident!`]: crate::format_ident -pub trait IdentFragment { - /// Format this value as an identifier fragment. - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result; - - /// Span associated with this `IdentFragment`. - /// - /// If non-`None`, may be inherited by formatted identifiers. - fn span(&self) -> Option { - None - } -} - -impl IdentFragment for &T { - fn span(&self) -> Option { - ::span(*self) - } - - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - IdentFragment::fmt(*self, f) - } -} - -impl IdentFragment for &mut T { - fn span(&self) -> Option { - ::span(*self) - } - - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - IdentFragment::fmt(*self, f) - } -} - -impl IdentFragment for Ident { - fn span(&self) -> Option { - Some(self.span()) - } - - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let id = self.to_string(); - if let Some(id) = id.strip_prefix("r#") { - fmt::Display::fmt(id, f) - } else { - fmt::Display::fmt(&id[..], f) - } - } -} - -impl IdentFragment for Cow<'_, T> -where - T: IdentFragment + ToOwned + ?Sized, -{ - fn span(&self) -> Option { - T::span(self) - } - - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - T::fmt(self, f) - } -} - -// Limited set of types which this is implemented for, as we want to avoid types -// which will often include non-identifier characters in their `Display` impl. -macro_rules! ident_fragment_display { - ($($T:ty),*) => { - $( - impl IdentFragment for $T { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) - } - } - )* - }; -} - -ident_fragment_display!(bool, str, String, char); -ident_fragment_display!(u8, u16, u32, u64, u128, usize); diff --git a/vendor/quote/src/lib.rs b/vendor/quote/src/lib.rs deleted file mode 100644 index dd2f5b7cab62ed..00000000000000 --- a/vendor/quote/src/lib.rs +++ /dev/null @@ -1,1455 +0,0 @@ -//! [![github]](https://github.com/dtolnay/quote) [![crates-io]](https://crates.io/crates/quote) [![docs-rs]](https://docs.rs/quote) -//! -//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github -//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust -//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs -//! -//!
-//! -//! This crate provides the [`quote!`] macro for turning Rust syntax tree data -//! structures into tokens of source code. -//! -//! Procedural macros in Rust receive a stream of tokens as input, execute -//! arbitrary Rust code to determine how to manipulate those tokens, and produce -//! a stream of tokens to hand back to the compiler to compile into the caller's -//! crate. Quasi-quoting is a solution to one piece of that — producing -//! tokens to return to the compiler. -//! -//! The idea of quasi-quoting is that we write *code* that we treat as *data*. -//! Within the `quote!` macro, we can write what looks like code to our text -//! editor or IDE. We get all the benefits of the editor's brace matching, -//! syntax highlighting, indentation, and maybe autocompletion. But rather than -//! compiling that as code into the current crate, we can treat it as data, pass -//! it around, mutate it, and eventually hand it back to the compiler as tokens -//! to compile into the macro caller's crate. -//! -//! This crate is motivated by the procedural macro use case, but is a -//! general-purpose Rust quasi-quoting library and is not specific to procedural -//! macros. -//! -//! ```toml -//! [dependencies] -//! quote = "1.0" -//! ``` -//! -//!
-//! -//! # Example -//! -//! The following quasi-quoted block of code is something you might find in [a] -//! procedural macro having to do with data structure serialization. The `#var` -//! syntax performs interpolation of runtime variables into the quoted tokens. -//! Check out the documentation of the [`quote!`] macro for more detail about -//! the syntax. See also the [`quote_spanned!`] macro which is important for -//! implementing hygienic procedural macros. -//! -//! [a]: https://serde.rs/ -//! -//! ``` -//! # use quote::quote; -//! # -//! # let generics = ""; -//! # let where_clause = ""; -//! # let field_ty = ""; -//! # let item_ty = ""; -//! # let path = ""; -//! # let value = ""; -//! # -//! let tokens = quote! { -//! struct SerializeWith #generics #where_clause { -//! value: &'a #field_ty, -//! phantom: core::marker::PhantomData<#item_ty>, -//! } -//! -//! impl #generics serde::Serialize for SerializeWith #generics #where_clause { -//! fn serialize(&self, serializer: S) -> Result -//! where -//! S: serde::Serializer, -//! { -//! #path(self.value, serializer) -//! } -//! } -//! -//! SerializeWith { -//! value: #value, -//! phantom: core::marker::PhantomData::<#item_ty>, -//! } -//! }; -//! ``` -//! -//!
-//! -//! # Non-macro code generators -//! -//! When using `quote` in a build.rs or main.rs and writing the output out to a -//! file, consider having the code generator pass the tokens through -//! [prettyplease] before writing. This way if an error occurs in the generated -//! code it is convenient for a human to read and debug. -//! -//! [prettyplease]: https://github.com/dtolnay/prettyplease - -// Quote types in rustdoc of other crates get linked to here. -#![doc(html_root_url = "https://docs.rs/quote/1.0.42")] -#![allow( - clippy::doc_markdown, - clippy::elidable_lifetime_names, - clippy::items_after_statements, - clippy::missing_errors_doc, - clippy::missing_panics_doc, - clippy::module_name_repetitions, - clippy::needless_lifetimes, - // false positive https://github.com/rust-lang/rust-clippy/issues/6983 - clippy::wrong_self_convention, -)] - -extern crate alloc; - -#[cfg(feature = "proc-macro")] -extern crate proc_macro; - -mod ext; -mod format; -mod ident_fragment; -mod to_tokens; - -// Not public API. -#[doc(hidden)] -#[path = "runtime.rs"] -pub mod __private; - -pub use crate::ext::TokenStreamExt; -pub use crate::ident_fragment::IdentFragment; -pub use crate::to_tokens::ToTokens; - -// Not public API. -#[doc(hidden)] -pub mod spanned; - -macro_rules! __quote { - ($quote:item) => { - /// The whole point. - /// - /// Performs variable interpolation against the input and produces it as - /// [`proc_macro2::TokenStream`]. - /// - /// Note: for returning tokens to the compiler in a procedural macro, use - /// `.into()` on the result to convert to [`proc_macro::TokenStream`]. - /// - ///
- /// - /// # Interpolation - /// - /// Variable interpolation is done with `#var` (similar to `$var` in - /// `macro_rules!` macros). This grabs the `var` variable that is currently in - /// scope and inserts it in that location in the output tokens. Any type - /// implementing the [`ToTokens`] trait can be interpolated. This includes most - /// Rust primitive types as well as most of the syntax tree types from the [Syn] - /// crate. - /// - /// [Syn]: https://github.com/dtolnay/syn - /// - /// Repetition is done using `#(...)*` or `#(...),*` again similar to - /// `macro_rules!`. This iterates through the elements of any variable - /// interpolated within the repetition and inserts a copy of the repetition body - /// for each one. The variables in an interpolation may be a `Vec`, slice, - /// `BTreeSet`, or any `Iterator`. - /// - /// - `#(#var)*` — no separators - /// - `#(#var),*` — the character before the asterisk is used as a separator - /// - `#( struct #var; )*` — the repetition can contain other tokens - /// - `#( #k => println!("{}", #v), )*` — even multiple interpolations - /// - ///
- /// - /// # Hygiene - /// - /// Any interpolated tokens preserve the `Span` information provided by their - /// `ToTokens` implementation. Tokens that originate within the `quote!` - /// invocation are spanned with [`Span::call_site()`]. - /// - /// [`Span::call_site()`]: proc_macro2::Span::call_site - /// - /// A different span can be provided through the [`quote_spanned!`] macro. - /// - ///
- /// - /// # Return type - /// - /// The macro evaluates to an expression of type `proc_macro2::TokenStream`. - /// Meanwhile Rust procedural macros are expected to return the type - /// `proc_macro::TokenStream`. - /// - /// The difference between the two types is that `proc_macro` types are entirely - /// specific to procedural macros and cannot ever exist in code outside of a - /// procedural macro, while `proc_macro2` types may exist anywhere including - /// tests and non-macro code like main.rs and build.rs. This is why even the - /// procedural macro ecosystem is largely built around `proc_macro2`, because - /// that ensures the libraries are unit testable and accessible in non-macro - /// contexts. - /// - /// There is a [`From`]-conversion in both directions so returning the output of - /// `quote!` from a procedural macro usually looks like `tokens.into()` or - /// `proc_macro::TokenStream::from(tokens)`. - /// - ///
- /// - /// # Examples - /// - /// ### Procedural macro - /// - /// The structure of a basic procedural macro is as follows. Refer to the [Syn] - /// crate for further useful guidance on using `quote!` as part of a procedural - /// macro. - /// - /// [Syn]: https://github.com/dtolnay/syn - /// - /// ``` - /// # #[cfg(any())] - /// extern crate proc_macro; - /// # extern crate proc_macro2; - /// - /// # #[cfg(any())] - /// use proc_macro::TokenStream; - /// # use proc_macro2::TokenStream; - /// use quote::quote; - /// - /// # const IGNORE_TOKENS: &'static str = stringify! { - /// #[proc_macro_derive(HeapSize)] - /// # }; - /// pub fn derive_heap_size(input: TokenStream) -> TokenStream { - /// // Parse the input and figure out what implementation to generate... - /// # const IGNORE_TOKENS: &'static str = stringify! { - /// let name = /* ... */; - /// let expr = /* ... */; - /// # }; - /// # - /// # let name = 0; - /// # let expr = 0; - /// - /// let expanded = quote! { - /// // The generated impl. - /// impl heapsize::HeapSize for #name { - /// fn heap_size_of_children(&self) -> usize { - /// #expr - /// } - /// } - /// }; - /// - /// // Hand the output tokens back to the compiler. - /// TokenStream::from(expanded) - /// } - /// ``` - /// - ///


- /// - /// ### Combining quoted fragments - /// - /// Usually you don't end up constructing an entire final `TokenStream` in one - /// piece. Different parts may come from different helper functions. The tokens - /// produced by `quote!` themselves implement `ToTokens` and so can be - /// interpolated into later `quote!` invocations to build up a final result. - /// - /// ``` - /// # use quote::quote; - /// # - /// let type_definition = quote! {...}; - /// let methods = quote! {...}; - /// - /// let tokens = quote! { - /// #type_definition - /// #methods - /// }; - /// ``` - /// - ///


- /// - /// ### Constructing identifiers - /// - /// Suppose we have an identifier `ident` which came from somewhere in a macro - /// input and we need to modify it in some way for the macro output. Let's - /// consider prepending the identifier with an underscore. - /// - /// Simply interpolating the identifier next to an underscore will not have the - /// behavior of concatenating them. The underscore and the identifier will - /// continue to be two separate tokens as if you had written `_ x`. - /// - /// ``` - /// # use proc_macro2::{self as syn, Span}; - /// # use quote::quote; - /// # - /// # let ident = syn::Ident::new("i", Span::call_site()); - /// # - /// // incorrect - /// quote! { - /// let mut _#ident = 0; - /// } - /// # ; - /// ``` - /// - /// The solution is to build a new identifier token with the correct value. As - /// this is such a common case, the [`format_ident!`] macro provides a - /// convenient utility for doing so correctly. - /// - /// ``` - /// # use proc_macro2::{Ident, Span}; - /// # use quote::{format_ident, quote}; - /// # - /// # let ident = Ident::new("i", Span::call_site()); - /// # - /// let varname = format_ident!("_{}", ident); - /// quote! { - /// let mut #varname = 0; - /// } - /// # ; - /// ``` - /// - /// Alternatively, the APIs provided by Syn and proc-macro2 can be used to - /// directly build the identifier. This is roughly equivalent to the above, but - /// will not handle `ident` being a raw identifier. - /// - /// ``` - /// # use proc_macro2::{self as syn, Span}; - /// # use quote::quote; - /// # - /// # let ident = syn::Ident::new("i", Span::call_site()); - /// # - /// let concatenated = format!("_{}", ident); - /// let varname = syn::Ident::new(&concatenated, ident.span()); - /// quote! { - /// let mut #varname = 0; - /// } - /// # ; - /// ``` - /// - ///


- /// - /// ### Making method calls - /// - /// Let's say our macro requires some type specified in the macro input to have - /// a constructor called `new`. We have the type in a variable called - /// `field_type` of type `syn::Type` and want to invoke the constructor. - /// - /// ``` - /// # use quote::quote; - /// # - /// # let field_type = quote!(...); - /// # - /// // incorrect - /// quote! { - /// let value = #field_type::new(); - /// } - /// # ; - /// ``` - /// - /// This works only sometimes. If `field_type` is `String`, the expanded code - /// contains `String::new()` which is fine. But if `field_type` is something - /// like `Vec` then the expanded code is `Vec::new()` which is invalid - /// syntax. Ordinarily in handwritten Rust we would write `Vec::::new()` - /// but for macros often the following is more convenient. - /// - /// ``` - /// # use quote::quote; - /// # - /// # let field_type = quote!(...); - /// # - /// quote! { - /// let value = <#field_type>::new(); - /// } - /// # ; - /// ``` - /// - /// This expands to `>::new()` which behaves correctly. - /// - /// A similar pattern is appropriate for trait methods. - /// - /// ``` - /// # use quote::quote; - /// # - /// # let field_type = quote!(...); - /// # - /// quote! { - /// let value = <#field_type as core::default::Default>::default(); - /// } - /// # ; - /// ``` - /// - ///


- /// - /// ### Interpolating text inside of doc comments - /// - /// Neither doc comments nor string literals get interpolation behavior in - /// quote: - /// - /// ```compile_fail - /// quote! { - /// /// try to interpolate: #ident - /// /// - /// /// ... - /// } - /// ``` - /// - /// ```compile_fail - /// quote! { - /// #[doc = "try to interpolate: #ident"] - /// } - /// ``` - /// - /// Instead the best way to build doc comments that involve variables is by - /// formatting the doc string literal outside of quote. - /// - /// ```rust - /// # use proc_macro2::{Ident, Span}; - /// # use quote::quote; - /// # - /// # const IGNORE: &str = stringify! { - /// let msg = format!(...); - /// # }; - /// # - /// # let ident = Ident::new("var", Span::call_site()); - /// # let msg = format!("try to interpolate: {}", ident); - /// quote! { - /// #[doc = #msg] - /// /// - /// /// ... - /// } - /// # ; - /// ``` - /// - ///


- /// - /// ### Indexing into a tuple struct - /// - /// When interpolating indices of a tuple or tuple struct, we need them not to - /// appears suffixed as integer literals by interpolating them as [`syn::Index`] - /// instead. - /// - /// [`syn::Index`]: https://docs.rs/syn/2.0/syn/struct.Index.html - /// - /// ```compile_fail - /// let i = 0usize..self.fields.len(); - /// - /// // expands to 0 + self.0usize.heap_size() + self.1usize.heap_size() + ... - /// // which is not valid syntax - /// quote! { - /// 0 #( + self.#i.heap_size() )* - /// } - /// ``` - /// - /// ``` - /// # use proc_macro2::{Ident, TokenStream}; - /// # use quote::quote; - /// # - /// # mod syn { - /// # use proc_macro2::{Literal, TokenStream}; - /// # use quote::{ToTokens, TokenStreamExt}; - /// # - /// # pub struct Index(usize); - /// # - /// # impl From for Index { - /// # fn from(i: usize) -> Self { - /// # Index(i) - /// # } - /// # } - /// # - /// # impl ToTokens for Index { - /// # fn to_tokens(&self, tokens: &mut TokenStream) { - /// # tokens.append(Literal::usize_unsuffixed(self.0)); - /// # } - /// # } - /// # } - /// # - /// # struct Struct { - /// # fields: Vec, - /// # } - /// # - /// # impl Struct { - /// # fn example(&self) -> TokenStream { - /// let i = (0..self.fields.len()).map(syn::Index::from); - /// - /// // expands to 0 + self.0.heap_size() + self.1.heap_size() + ... - /// quote! { - /// 0 #( + self.#i.heap_size() )* - /// } - /// # } - /// # } - /// ``` - $quote - }; -} - -#[cfg(doc)] -__quote![ - #[macro_export] - macro_rules! quote { - ($($tt:tt)*) => { - ... - }; - } -]; - -#[cfg(not(doc))] -__quote![ - #[macro_export] - macro_rules! quote { - () => { - $crate::__private::TokenStream::new() - }; - - // Special case rule for a single tt, for performance. - ($tt:tt) => {{ - let mut _s = $crate::__private::TokenStream::new(); - $crate::quote_token!{$tt _s} - _s - }}; - - // Special case rules for two tts, for performance. - (# $var:ident) => {{ - let mut _s = $crate::__private::TokenStream::new(); - $crate::ToTokens::to_tokens(&$var, &mut _s); - _s - }}; - ($tt1:tt $tt2:tt) => {{ - let mut _s = $crate::__private::TokenStream::new(); - $crate::quote_token!{$tt1 _s} - $crate::quote_token!{$tt2 _s} - _s - }}; - - // Rule for any other number of tokens. - ($($tt:tt)*) => {{ - let mut _s = $crate::__private::TokenStream::new(); - $crate::quote_each_token!{_s $($tt)*} - _s - }}; - } -]; - -macro_rules! __quote_spanned { - ($quote_spanned:item) => { - /// Same as `quote!`, but applies a given span to all tokens originating within - /// the macro invocation. - /// - ///
- /// - /// # Syntax - /// - /// A span expression of type [`Span`], followed by `=>`, followed by the tokens - /// to quote. The span expression should be brief — use a variable for - /// anything more than a few characters. There should be no space before the - /// `=>` token. - /// - /// [`Span`]: proc_macro2::Span - /// - /// ``` - /// # use proc_macro2::Span; - /// # use quote::quote_spanned; - /// # - /// # const IGNORE_TOKENS: &'static str = stringify! { - /// let span = /* ... */; - /// # }; - /// # let span = Span::call_site(); - /// # let init = 0; - /// - /// // On one line, use parentheses. - /// let tokens = quote_spanned!(span=> Box::into_raw(Box::new(#init))); - /// - /// // On multiple lines, place the span at the top and use braces. - /// let tokens = quote_spanned! {span=> - /// Box::into_raw(Box::new(#init)) - /// }; - /// ``` - /// - /// The lack of space before the `=>` should look jarring to Rust programmers - /// and this is intentional. The formatting is designed to be visibly - /// off-balance and draw the eye a particular way, due to the span expression - /// being evaluated in the context of the procedural macro and the remaining - /// tokens being evaluated in the generated code. - /// - ///
- /// - /// # Hygiene - /// - /// Any interpolated tokens preserve the `Span` information provided by their - /// `ToTokens` implementation. Tokens that originate within the `quote_spanned!` - /// invocation are spanned with the given span argument. - /// - ///
- /// - /// # Example - /// - /// The following procedural macro code uses `quote_spanned!` to assert that a - /// particular Rust type implements the [`Sync`] trait so that references can be - /// safely shared between threads. - /// - /// ``` - /// # use quote::{quote_spanned, TokenStreamExt, ToTokens}; - /// # use proc_macro2::{Span, TokenStream}; - /// # - /// # struct Type; - /// # - /// # impl Type { - /// # fn span(&self) -> Span { - /// # Span::call_site() - /// # } - /// # } - /// # - /// # impl ToTokens for Type { - /// # fn to_tokens(&self, _tokens: &mut TokenStream) {} - /// # } - /// # - /// # let ty = Type; - /// # let call_site = Span::call_site(); - /// # - /// let ty_span = ty.span(); - /// let assert_sync = quote_spanned! {ty_span=> - /// struct _AssertSync where #ty: Sync; - /// }; - /// ``` - /// - /// If the assertion fails, the user will see an error like the following. The - /// input span of their type is highlighted in the error. - /// - /// ```text - /// error[E0277]: the trait bound `*const (): std::marker::Sync` is not satisfied - /// --> src/main.rs:10:21 - /// | - /// 10 | static ref PTR: *const () = &(); - /// | ^^^^^^^^^ `*const ()` cannot be shared between threads safely - /// ``` - /// - /// In this example it is important for the where-clause to be spanned with the - /// line/column information of the user's input type so that error messages are - /// placed appropriately by the compiler. - $quote_spanned - }; -} - -#[cfg(doc)] -__quote_spanned![ - #[macro_export] - macro_rules! quote_spanned { - ($span:expr=> $($tt:tt)*) => { - ... - }; - } -]; - -#[cfg(not(doc))] -__quote_spanned![ - #[macro_export] - macro_rules! quote_spanned { - ($span:expr=>) => {{ - let _: $crate::__private::Span = $crate::__private::get_span($span).__into_span(); - $crate::__private::TokenStream::new() - }}; - - // Special case rule for a single tt, for performance. - ($span:expr=> $tt:tt) => {{ - let mut _s = $crate::__private::TokenStream::new(); - let _span: $crate::__private::Span = $crate::__private::get_span($span).__into_span(); - $crate::quote_token_spanned!{$tt _s _span} - _s - }}; - - // Special case rules for two tts, for performance. - ($span:expr=> # $var:ident) => {{ - let mut _s = $crate::__private::TokenStream::new(); - let _: $crate::__private::Span = $crate::__private::get_span($span).__into_span(); - $crate::ToTokens::to_tokens(&$var, &mut _s); - _s - }}; - ($span:expr=> $tt1:tt $tt2:tt) => {{ - let mut _s = $crate::__private::TokenStream::new(); - let _span: $crate::__private::Span = $crate::__private::get_span($span).__into_span(); - $crate::quote_token_spanned!{$tt1 _s _span} - $crate::quote_token_spanned!{$tt2 _s _span} - _s - }}; - - // Rule for any other number of tokens. - ($span:expr=> $($tt:tt)*) => {{ - let mut _s = $crate::__private::TokenStream::new(); - let _span: $crate::__private::Span = $crate::__private::get_span($span).__into_span(); - $crate::quote_each_token_spanned!{_s _span $($tt)*} - _s - }}; - } -]; - -// Extract the names of all #metavariables and pass them to the $call macro. -// -// in: pounded_var_names!(then!(...) a #b c #( #d )* #e) -// out: then!(... b); -// then!(... d); -// then!(... e); -#[macro_export] -#[doc(hidden)] -macro_rules! pounded_var_names { - ($call:ident! $extra:tt $($tts:tt)*) => { - $crate::pounded_var_names_with_context!{$call! $extra - (@ $($tts)*) - ($($tts)* @) - } - }; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! pounded_var_names_with_context { - ($call:ident! $extra:tt ($($b1:tt)*) ($($curr:tt)*)) => { - $( - $crate::pounded_var_with_context!{$call! $extra $b1 $curr} - )* - }; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! pounded_var_with_context { - ($call:ident! $extra:tt $b1:tt ( $($inner:tt)* )) => { - $crate::pounded_var_names!{$call! $extra $($inner)*} - }; - - ($call:ident! $extra:tt $b1:tt [ $($inner:tt)* ]) => { - $crate::pounded_var_names!{$call! $extra $($inner)*} - }; - - ($call:ident! $extra:tt $b1:tt { $($inner:tt)* }) => { - $crate::pounded_var_names!{$call! $extra $($inner)*} - }; - - ($call:ident!($($extra:tt)*) # $var:ident) => { - $crate::$call!($($extra)* $var); - }; - - ($call:ident! $extra:tt $b1:tt $curr:tt) => {}; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! quote_bind_into_iter { - ($has_iter:ident $var:ident) => { - // `mut` may be unused if $var occurs multiple times in the list. - #[allow(unused_mut)] - let (mut $var, i) = $var.quote_into_iter(); - let $has_iter = $has_iter | i; - }; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! quote_bind_next_or_break { - ($var:ident) => { - let $var = match $var.next() { - Some(_x) => $crate::__private::RepInterp(_x), - None => break, - }; - }; -} - -// The obvious way to write this macro is as a tt muncher. This implementation -// does something more complex for two reasons. -// -// - With a tt muncher it's easy to hit Rust's built-in recursion_limit, which -// this implementation avoids because it isn't tail recursive. -// -// - Compile times for a tt muncher are quadratic relative to the length of -// the input. This implementation is linear, so it will be faster -// (potentially much faster) for big inputs. However, the constant factors -// of this implementation are higher than that of a tt muncher, so it is -// somewhat slower than a tt muncher if there are many invocations with -// short inputs. -// -// An invocation like this: -// -// quote_each_token!(_s a b c d e f g h i j); -// -// expands to this: -// -// quote_tokens_with_context!(_s -// (@ @ @ @ @ @ a b c d e f g h i j) -// (@ @ @ @ @ a b c d e f g h i j @) -// (@ @ @ @ a b c d e f g h i j @ @) -// (@ @ @ (a) (b) (c) (d) (e) (f) (g) (h) (i) (j) @ @ @) -// (@ @ a b c d e f g h i j @ @ @ @) -// (@ a b c d e f g h i j @ @ @ @ @) -// (a b c d e f g h i j @ @ @ @ @ @) -// ); -// -// which gets transposed and expanded to this: -// -// quote_token_with_context!(_s @ @ @ @ @ @ a); -// quote_token_with_context!(_s @ @ @ @ @ a b); -// quote_token_with_context!(_s @ @ @ @ a b c); -// quote_token_with_context!(_s @ @ @ (a) b c d); -// quote_token_with_context!(_s @ @ a (b) c d e); -// quote_token_with_context!(_s @ a b (c) d e f); -// quote_token_with_context!(_s a b c (d) e f g); -// quote_token_with_context!(_s b c d (e) f g h); -// quote_token_with_context!(_s c d e (f) g h i); -// quote_token_with_context!(_s d e f (g) h i j); -// quote_token_with_context!(_s e f g (h) i j @); -// quote_token_with_context!(_s f g h (i) j @ @); -// quote_token_with_context!(_s g h i (j) @ @ @); -// quote_token_with_context!(_s h i j @ @ @ @); -// quote_token_with_context!(_s i j @ @ @ @ @); -// quote_token_with_context!(_s j @ @ @ @ @ @); -// -// Without having used muncher-style recursion, we get one invocation of -// quote_token_with_context for each original tt, with three tts of context on -// either side. This is enough for the longest possible interpolation form (a -// repetition with separator, as in `# (#var) , *`) to be fully represented with -// the first or last tt in the middle. -// -// The middle tt (surrounded by parentheses) is the tt being processed. -// -// - When it is a `#`, quote_token_with_context can do an interpolation. The -// interpolation kind will depend on the three subsequent tts. -// -// - When it is within a later part of an interpolation, it can be ignored -// because the interpolation has already been done. -// -// - When it is not part of an interpolation it can be pushed as a single -// token into the output. -// -// - When the middle token is an unparenthesized `@`, that call is one of the -// first 3 or last 3 calls of quote_token_with_context and does not -// correspond to one of the original input tokens, so turns into nothing. -#[macro_export] -#[doc(hidden)] -macro_rules! quote_each_token { - ($tokens:ident $($tts:tt)*) => { - $crate::quote_tokens_with_context!{$tokens - (@ @ @ @ @ @ $($tts)*) - (@ @ @ @ @ $($tts)* @) - (@ @ @ @ $($tts)* @ @) - (@ @ @ $(($tts))* @ @ @) - (@ @ $($tts)* @ @ @ @) - (@ $($tts)* @ @ @ @ @) - ($($tts)* @ @ @ @ @ @) - } - }; -} - -// See the explanation on quote_each_token. -#[macro_export] -#[doc(hidden)] -macro_rules! quote_each_token_spanned { - ($tokens:ident $span:ident $($tts:tt)*) => { - $crate::quote_tokens_with_context_spanned!{$tokens $span - (@ @ @ @ @ @ $($tts)*) - (@ @ @ @ @ $($tts)* @) - (@ @ @ @ $($tts)* @ @) - (@ @ @ $(($tts))* @ @ @) - (@ @ $($tts)* @ @ @ @) - (@ $($tts)* @ @ @ @ @) - ($($tts)* @ @ @ @ @ @) - } - }; -} - -// See the explanation on quote_each_token. -#[macro_export] -#[doc(hidden)] -macro_rules! quote_tokens_with_context { - ($tokens:ident - ($($b3:tt)*) ($($b2:tt)*) ($($b1:tt)*) - ($($curr:tt)*) - ($($a1:tt)*) ($($a2:tt)*) ($($a3:tt)*) - ) => { - $( - $crate::quote_token_with_context!{$tokens $b3 $b2 $b1 $curr $a1 $a2 $a3} - )* - }; -} - -// See the explanation on quote_each_token. -#[macro_export] -#[doc(hidden)] -macro_rules! quote_tokens_with_context_spanned { - ($tokens:ident $span:ident - ($($b3:tt)*) ($($b2:tt)*) ($($b1:tt)*) - ($($curr:tt)*) - ($($a1:tt)*) ($($a2:tt)*) ($($a3:tt)*) - ) => { - $( - $crate::quote_token_with_context_spanned!{$tokens $span $b3 $b2 $b1 $curr $a1 $a2 $a3} - )* - }; -} - -// See the explanation on quote_each_token. -#[macro_export] -#[doc(hidden)] -macro_rules! quote_token_with_context { - // Unparenthesized `@` indicates this call does not correspond to one of the - // original input tokens. Ignore it. - ($tokens:ident $b3:tt $b2:tt $b1:tt @ $a1:tt $a2:tt $a3:tt) => {}; - - // A repetition with no separator. - ($tokens:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) * $a3:tt) => {{ - use $crate::__private::ext::*; - let has_iter = $crate::__private::HasIterator::; - $crate::pounded_var_names!{quote_bind_into_iter!(has_iter) () $($inner)*} - <_ as $crate::__private::CheckHasIterator>::check(has_iter); - // This is `while true` instead of `loop` because if there are no - // iterators used inside of this repetition then the body would not - // contain any `break`, so the compiler would emit unreachable code - // warnings on anything below the loop. We use has_iter to detect and - // fail to compile when there are no iterators, so here we just work - // around the unneeded extra warning. - while true { - $crate::pounded_var_names!{quote_bind_next_or_break!() () $($inner)*} - $crate::quote_each_token!{$tokens $($inner)*} - } - }}; - // ... and one step later. - ($tokens:ident $b3:tt $b2:tt # (( $($inner:tt)* )) * $a2:tt $a3:tt) => {}; - // ... and one step later. - ($tokens:ident $b3:tt # ( $($inner:tt)* ) (*) $a1:tt $a2:tt $a3:tt) => {}; - - // A repetition with separator. - ($tokens:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) $sep:tt *) => {{ - use $crate::__private::ext::*; - let mut _i = 0usize; - let has_iter = $crate::__private::HasIterator::; - $crate::pounded_var_names!{quote_bind_into_iter!(has_iter) () $($inner)*} - <_ as $crate::__private::CheckHasIterator>::check(has_iter); - while true { - $crate::pounded_var_names!{quote_bind_next_or_break!() () $($inner)*} - if _i > 0 { - $crate::quote_token!{$sep $tokens} - } - _i += 1; - $crate::quote_each_token!{$tokens $($inner)*} - } - }}; - // ... and one step later. - ($tokens:ident $b3:tt $b2:tt # (( $($inner:tt)* )) $sep:tt * $a3:tt) => {}; - // ... and one step later. - ($tokens:ident $b3:tt # ( $($inner:tt)* ) ($sep:tt) * $a2:tt $a3:tt) => {}; - // (A special case for `#(var)**`, where the first `*` is treated as the - // repetition symbol and the second `*` is treated as an ordinary token.) - ($tokens:ident # ( $($inner:tt)* ) * (*) $a1:tt $a2:tt $a3:tt) => { - // https://github.com/dtolnay/quote/issues/130 - $crate::quote_token!{* $tokens} - }; - // ... and one step later. - ($tokens:ident # ( $($inner:tt)* ) $sep:tt (*) $a1:tt $a2:tt $a3:tt) => {}; - - // A non-repetition interpolation. - ($tokens:ident $b3:tt $b2:tt $b1:tt (#) $var:ident $a2:tt $a3:tt) => { - $crate::ToTokens::to_tokens(&$var, &mut $tokens); - }; - // ... and one step later. - ($tokens:ident $b3:tt $b2:tt # ($var:ident) $a1:tt $a2:tt $a3:tt) => {}; - - // An ordinary token, not part of any interpolation. - ($tokens:ident $b3:tt $b2:tt $b1:tt ($curr:tt) $a1:tt $a2:tt $a3:tt) => { - $crate::quote_token!{$curr $tokens} - }; -} - -// See the explanation on quote_each_token, and on the individual rules of -// quote_token_with_context. -#[macro_export] -#[doc(hidden)] -macro_rules! quote_token_with_context_spanned { - ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt @ $a1:tt $a2:tt $a3:tt) => {}; - - ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) * $a3:tt) => {{ - use $crate::__private::ext::*; - let has_iter = $crate::__private::HasIterator::; - $crate::pounded_var_names!{quote_bind_into_iter!(has_iter) () $($inner)*} - <_ as $crate::__private::CheckHasIterator>::check(has_iter); - while true { - $crate::pounded_var_names!{quote_bind_next_or_break!() () $($inner)*} - $crate::quote_each_token_spanned!{$tokens $span $($inner)*} - } - }}; - ($tokens:ident $span:ident $b3:tt $b2:tt # (( $($inner:tt)* )) * $a2:tt $a3:tt) => {}; - ($tokens:ident $span:ident $b3:tt # ( $($inner:tt)* ) (*) $a1:tt $a2:tt $a3:tt) => {}; - - ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) $sep:tt *) => {{ - use $crate::__private::ext::*; - let mut _i = 0usize; - let has_iter = $crate::__private::HasIterator::; - $crate::pounded_var_names!{quote_bind_into_iter!(has_iter) () $($inner)*} - <_ as $crate::__private::CheckHasIterator>::check(has_iter); - while true { - $crate::pounded_var_names!{quote_bind_next_or_break!() () $($inner)*} - if _i > 0 { - $crate::quote_token_spanned!{$sep $tokens $span} - } - _i += 1; - $crate::quote_each_token_spanned!{$tokens $span $($inner)*} - } - }}; - ($tokens:ident $span:ident $b3:tt $b2:tt # (( $($inner:tt)* )) $sep:tt * $a3:tt) => {}; - ($tokens:ident $span:ident $b3:tt # ( $($inner:tt)* ) ($sep:tt) * $a2:tt $a3:tt) => {}; - ($tokens:ident $span:ident # ( $($inner:tt)* ) * (*) $a1:tt $a2:tt $a3:tt) => { - // https://github.com/dtolnay/quote/issues/130 - $crate::quote_token_spanned!{* $tokens $span} - }; - ($tokens:ident $span:ident # ( $($inner:tt)* ) $sep:tt (*) $a1:tt $a2:tt $a3:tt) => {}; - - ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) $var:ident $a2:tt $a3:tt) => { - $crate::ToTokens::to_tokens(&$var, &mut $tokens); - }; - ($tokens:ident $span:ident $b3:tt $b2:tt # ($var:ident) $a1:tt $a2:tt $a3:tt) => {}; - - ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt ($curr:tt) $a1:tt $a2:tt $a3:tt) => { - $crate::quote_token_spanned!{$curr $tokens $span} - }; -} - -// These rules are ordered by approximate token frequency, at least for the -// first 10 or so, to improve compile times. Having `ident` first is by far the -// most important because it's typically 2-3x more common than the next most -// common token. -// -// Separately, we put the token being matched in the very front so that failing -// rules may fail to match as quickly as possible. -#[macro_export] -#[doc(hidden)] -macro_rules! quote_token { - ($ident:ident $tokens:ident) => { - $crate::__private::push_ident(&mut $tokens, stringify!($ident)); - }; - - (:: $tokens:ident) => { - $crate::__private::push_colon2(&mut $tokens); - }; - - (( $($inner:tt)* ) $tokens:ident) => { - $crate::__private::push_group( - &mut $tokens, - $crate::__private::Delimiter::Parenthesis, - $crate::quote!($($inner)*), - ); - }; - - ([ $($inner:tt)* ] $tokens:ident) => { - $crate::__private::push_group( - &mut $tokens, - $crate::__private::Delimiter::Bracket, - $crate::quote!($($inner)*), - ); - }; - - ({ $($inner:tt)* } $tokens:ident) => { - $crate::__private::push_group( - &mut $tokens, - $crate::__private::Delimiter::Brace, - $crate::quote!($($inner)*), - ); - }; - - (# $tokens:ident) => { - $crate::__private::push_pound(&mut $tokens); - }; - - (, $tokens:ident) => { - $crate::__private::push_comma(&mut $tokens); - }; - - (. $tokens:ident) => { - $crate::__private::push_dot(&mut $tokens); - }; - - (; $tokens:ident) => { - $crate::__private::push_semi(&mut $tokens); - }; - - (: $tokens:ident) => { - $crate::__private::push_colon(&mut $tokens); - }; - - (+ $tokens:ident) => { - $crate::__private::push_add(&mut $tokens); - }; - - (+= $tokens:ident) => { - $crate::__private::push_add_eq(&mut $tokens); - }; - - (& $tokens:ident) => { - $crate::__private::push_and(&mut $tokens); - }; - - (&& $tokens:ident) => { - $crate::__private::push_and_and(&mut $tokens); - }; - - (&= $tokens:ident) => { - $crate::__private::push_and_eq(&mut $tokens); - }; - - (@ $tokens:ident) => { - $crate::__private::push_at(&mut $tokens); - }; - - (! $tokens:ident) => { - $crate::__private::push_bang(&mut $tokens); - }; - - (^ $tokens:ident) => { - $crate::__private::push_caret(&mut $tokens); - }; - - (^= $tokens:ident) => { - $crate::__private::push_caret_eq(&mut $tokens); - }; - - (/ $tokens:ident) => { - $crate::__private::push_div(&mut $tokens); - }; - - (/= $tokens:ident) => { - $crate::__private::push_div_eq(&mut $tokens); - }; - - (.. $tokens:ident) => { - $crate::__private::push_dot2(&mut $tokens); - }; - - (... $tokens:ident) => { - $crate::__private::push_dot3(&mut $tokens); - }; - - (..= $tokens:ident) => { - $crate::__private::push_dot_dot_eq(&mut $tokens); - }; - - (= $tokens:ident) => { - $crate::__private::push_eq(&mut $tokens); - }; - - (== $tokens:ident) => { - $crate::__private::push_eq_eq(&mut $tokens); - }; - - (>= $tokens:ident) => { - $crate::__private::push_ge(&mut $tokens); - }; - - (> $tokens:ident) => { - $crate::__private::push_gt(&mut $tokens); - }; - - (<= $tokens:ident) => { - $crate::__private::push_le(&mut $tokens); - }; - - (< $tokens:ident) => { - $crate::__private::push_lt(&mut $tokens); - }; - - (*= $tokens:ident) => { - $crate::__private::push_mul_eq(&mut $tokens); - }; - - (!= $tokens:ident) => { - $crate::__private::push_ne(&mut $tokens); - }; - - (| $tokens:ident) => { - $crate::__private::push_or(&mut $tokens); - }; - - (|= $tokens:ident) => { - $crate::__private::push_or_eq(&mut $tokens); - }; - - (|| $tokens:ident) => { - $crate::__private::push_or_or(&mut $tokens); - }; - - (? $tokens:ident) => { - $crate::__private::push_question(&mut $tokens); - }; - - (-> $tokens:ident) => { - $crate::__private::push_rarrow(&mut $tokens); - }; - - (<- $tokens:ident) => { - $crate::__private::push_larrow(&mut $tokens); - }; - - (% $tokens:ident) => { - $crate::__private::push_rem(&mut $tokens); - }; - - (%= $tokens:ident) => { - $crate::__private::push_rem_eq(&mut $tokens); - }; - - (=> $tokens:ident) => { - $crate::__private::push_fat_arrow(&mut $tokens); - }; - - (<< $tokens:ident) => { - $crate::__private::push_shl(&mut $tokens); - }; - - (<<= $tokens:ident) => { - $crate::__private::push_shl_eq(&mut $tokens); - }; - - (>> $tokens:ident) => { - $crate::__private::push_shr(&mut $tokens); - }; - - (>>= $tokens:ident) => { - $crate::__private::push_shr_eq(&mut $tokens); - }; - - (* $tokens:ident) => { - $crate::__private::push_star(&mut $tokens); - }; - - (- $tokens:ident) => { - $crate::__private::push_sub(&mut $tokens); - }; - - (-= $tokens:ident) => { - $crate::__private::push_sub_eq(&mut $tokens); - }; - - ($lifetime:lifetime $tokens:ident) => { - $crate::__private::push_lifetime(&mut $tokens, stringify!($lifetime)); - }; - - (_ $tokens:ident) => { - $crate::__private::push_underscore(&mut $tokens); - }; - - ($other:tt $tokens:ident) => { - $crate::__private::parse(&mut $tokens, stringify!($other)); - }; -} - -// See the comment above `quote_token!` about the rule ordering. -#[macro_export] -#[doc(hidden)] -macro_rules! quote_token_spanned { - ($ident:ident $tokens:ident $span:ident) => { - $crate::__private::push_ident_spanned(&mut $tokens, $span, stringify!($ident)); - }; - - (:: $tokens:ident $span:ident) => { - $crate::__private::push_colon2_spanned(&mut $tokens, $span); - }; - - (( $($inner:tt)* ) $tokens:ident $span:ident) => { - $crate::__private::push_group_spanned( - &mut $tokens, - $span, - $crate::__private::Delimiter::Parenthesis, - $crate::quote_spanned!($span=> $($inner)*), - ); - }; - - ([ $($inner:tt)* ] $tokens:ident $span:ident) => { - $crate::__private::push_group_spanned( - &mut $tokens, - $span, - $crate::__private::Delimiter::Bracket, - $crate::quote_spanned!($span=> $($inner)*), - ); - }; - - ({ $($inner:tt)* } $tokens:ident $span:ident) => { - $crate::__private::push_group_spanned( - &mut $tokens, - $span, - $crate::__private::Delimiter::Brace, - $crate::quote_spanned!($span=> $($inner)*), - ); - }; - - (# $tokens:ident $span:ident) => { - $crate::__private::push_pound_spanned(&mut $tokens, $span); - }; - - (, $tokens:ident $span:ident) => { - $crate::__private::push_comma_spanned(&mut $tokens, $span); - }; - - (. $tokens:ident $span:ident) => { - $crate::__private::push_dot_spanned(&mut $tokens, $span); - }; - - (; $tokens:ident $span:ident) => { - $crate::__private::push_semi_spanned(&mut $tokens, $span); - }; - - (: $tokens:ident $span:ident) => { - $crate::__private::push_colon_spanned(&mut $tokens, $span); - }; - - (+ $tokens:ident $span:ident) => { - $crate::__private::push_add_spanned(&mut $tokens, $span); - }; - - (+= $tokens:ident $span:ident) => { - $crate::__private::push_add_eq_spanned(&mut $tokens, $span); - }; - - (& $tokens:ident $span:ident) => { - $crate::__private::push_and_spanned(&mut $tokens, $span); - }; - - (&& $tokens:ident $span:ident) => { - $crate::__private::push_and_and_spanned(&mut $tokens, $span); - }; - - (&= $tokens:ident $span:ident) => { - $crate::__private::push_and_eq_spanned(&mut $tokens, $span); - }; - - (@ $tokens:ident $span:ident) => { - $crate::__private::push_at_spanned(&mut $tokens, $span); - }; - - (! $tokens:ident $span:ident) => { - $crate::__private::push_bang_spanned(&mut $tokens, $span); - }; - - (^ $tokens:ident $span:ident) => { - $crate::__private::push_caret_spanned(&mut $tokens, $span); - }; - - (^= $tokens:ident $span:ident) => { - $crate::__private::push_caret_eq_spanned(&mut $tokens, $span); - }; - - (/ $tokens:ident $span:ident) => { - $crate::__private::push_div_spanned(&mut $tokens, $span); - }; - - (/= $tokens:ident $span:ident) => { - $crate::__private::push_div_eq_spanned(&mut $tokens, $span); - }; - - (.. $tokens:ident $span:ident) => { - $crate::__private::push_dot2_spanned(&mut $tokens, $span); - }; - - (... $tokens:ident $span:ident) => { - $crate::__private::push_dot3_spanned(&mut $tokens, $span); - }; - - (..= $tokens:ident $span:ident) => { - $crate::__private::push_dot_dot_eq_spanned(&mut $tokens, $span); - }; - - (= $tokens:ident $span:ident) => { - $crate::__private::push_eq_spanned(&mut $tokens, $span); - }; - - (== $tokens:ident $span:ident) => { - $crate::__private::push_eq_eq_spanned(&mut $tokens, $span); - }; - - (>= $tokens:ident $span:ident) => { - $crate::__private::push_ge_spanned(&mut $tokens, $span); - }; - - (> $tokens:ident $span:ident) => { - $crate::__private::push_gt_spanned(&mut $tokens, $span); - }; - - (<= $tokens:ident $span:ident) => { - $crate::__private::push_le_spanned(&mut $tokens, $span); - }; - - (< $tokens:ident $span:ident) => { - $crate::__private::push_lt_spanned(&mut $tokens, $span); - }; - - (*= $tokens:ident $span:ident) => { - $crate::__private::push_mul_eq_spanned(&mut $tokens, $span); - }; - - (!= $tokens:ident $span:ident) => { - $crate::__private::push_ne_spanned(&mut $tokens, $span); - }; - - (| $tokens:ident $span:ident) => { - $crate::__private::push_or_spanned(&mut $tokens, $span); - }; - - (|= $tokens:ident $span:ident) => { - $crate::__private::push_or_eq_spanned(&mut $tokens, $span); - }; - - (|| $tokens:ident $span:ident) => { - $crate::__private::push_or_or_spanned(&mut $tokens, $span); - }; - - (? $tokens:ident $span:ident) => { - $crate::__private::push_question_spanned(&mut $tokens, $span); - }; - - (-> $tokens:ident $span:ident) => { - $crate::__private::push_rarrow_spanned(&mut $tokens, $span); - }; - - (<- $tokens:ident $span:ident) => { - $crate::__private::push_larrow_spanned(&mut $tokens, $span); - }; - - (% $tokens:ident $span:ident) => { - $crate::__private::push_rem_spanned(&mut $tokens, $span); - }; - - (%= $tokens:ident $span:ident) => { - $crate::__private::push_rem_eq_spanned(&mut $tokens, $span); - }; - - (=> $tokens:ident $span:ident) => { - $crate::__private::push_fat_arrow_spanned(&mut $tokens, $span); - }; - - (<< $tokens:ident $span:ident) => { - $crate::__private::push_shl_spanned(&mut $tokens, $span); - }; - - (<<= $tokens:ident $span:ident) => { - $crate::__private::push_shl_eq_spanned(&mut $tokens, $span); - }; - - (>> $tokens:ident $span:ident) => { - $crate::__private::push_shr_spanned(&mut $tokens, $span); - }; - - (>>= $tokens:ident $span:ident) => { - $crate::__private::push_shr_eq_spanned(&mut $tokens, $span); - }; - - (* $tokens:ident $span:ident) => { - $crate::__private::push_star_spanned(&mut $tokens, $span); - }; - - (- $tokens:ident $span:ident) => { - $crate::__private::push_sub_spanned(&mut $tokens, $span); - }; - - (-= $tokens:ident $span:ident) => { - $crate::__private::push_sub_eq_spanned(&mut $tokens, $span); - }; - - ($lifetime:lifetime $tokens:ident $span:ident) => { - $crate::__private::push_lifetime_spanned(&mut $tokens, $span, stringify!($lifetime)); - }; - - (_ $tokens:ident $span:ident) => { - $crate::__private::push_underscore_spanned(&mut $tokens, $span); - }; - - ($other:tt $tokens:ident $span:ident) => { - $crate::__private::parse_spanned(&mut $tokens, $span, stringify!($other)); - }; -} diff --git a/vendor/quote/src/runtime.rs b/vendor/quote/src/runtime.rs deleted file mode 100644 index 28fb60c7a5fca9..00000000000000 --- a/vendor/quote/src/runtime.rs +++ /dev/null @@ -1,503 +0,0 @@ -use self::get_span::{GetSpan, GetSpanBase, GetSpanInner}; -use crate::{IdentFragment, ToTokens, TokenStreamExt}; -use core::fmt; -use core::iter; -use core::ops::BitOr; -use proc_macro2::{Group, Ident, Punct, Spacing, TokenTree}; - -#[doc(hidden)] -pub use alloc::format; -#[doc(hidden)] -pub use core::option::Option; - -#[doc(hidden)] -pub type Delimiter = proc_macro2::Delimiter; -#[doc(hidden)] -pub type Span = proc_macro2::Span; -#[doc(hidden)] -pub type TokenStream = proc_macro2::TokenStream; - -#[doc(hidden)] -pub struct HasIterator; - -impl BitOr> for HasIterator { - type Output = HasIterator; - fn bitor(self, _rhs: HasIterator) -> HasIterator { - HasIterator:: - } -} - -impl BitOr> for HasIterator { - type Output = HasIterator; - fn bitor(self, _rhs: HasIterator) -> HasIterator { - HasIterator:: - } -} - -impl BitOr> for HasIterator { - type Output = HasIterator; - fn bitor(self, _rhs: HasIterator) -> HasIterator { - HasIterator:: - } -} - -impl BitOr> for HasIterator { - type Output = HasIterator; - fn bitor(self, _rhs: HasIterator) -> HasIterator { - HasIterator:: - } -} - -#[doc(hidden)] -#[cfg_attr( - not(no_diagnostic_namespace), - diagnostic::on_unimplemented( - message = "repetition contains no interpolated value that is an iterator", - label = "none of the values interpolated inside this repetition are iterable" - ) -)] -pub trait CheckHasIterator: Sized { - fn check(self) {} -} - -impl CheckHasIterator for HasIterator {} - -/// Extension traits used by the implementation of `quote!`. These are defined -/// in separate traits, rather than as a single trait due to ambiguity issues. -/// -/// These traits expose a `quote_into_iter` method which should allow calling -/// whichever impl happens to be applicable. Calling that method repeatedly on -/// the returned value should be idempotent. -#[doc(hidden)] -pub mod ext { - use super::{HasIterator, RepInterp}; - use crate::ToTokens; - use alloc::collections::btree_set::{self, BTreeSet}; - use core::slice; - - /// Extension trait providing the `quote_into_iter` method on iterators. - #[doc(hidden)] - pub trait RepIteratorExt: Iterator + Sized { - fn quote_into_iter(self) -> (Self, HasIterator) { - (self, HasIterator::) - } - } - - impl RepIteratorExt for T {} - - /// Extension trait providing the `quote_into_iter` method for - /// non-iterable types. These types interpolate the same value in each - /// iteration of the repetition. - #[doc(hidden)] - pub trait RepToTokensExt { - /// Pretend to be an iterator for the purposes of `quote_into_iter`. - /// This allows repeated calls to `quote_into_iter` to continue - /// correctly returning HasIterator. - fn next(&self) -> Option<&Self> { - Some(self) - } - - fn quote_into_iter(&self) -> (&Self, HasIterator) { - (self, HasIterator::) - } - } - - impl RepToTokensExt for T {} - - /// Extension trait providing the `quote_into_iter` method for types that - /// can be referenced as an iterator. - #[doc(hidden)] - pub trait RepAsIteratorExt<'q> { - type Iter: Iterator; - - fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator); - } - - impl<'q, T: RepAsIteratorExt<'q> + ?Sized> RepAsIteratorExt<'q> for &T { - type Iter = T::Iter; - - fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator) { - ::quote_into_iter(*self) - } - } - - impl<'q, T: RepAsIteratorExt<'q> + ?Sized> RepAsIteratorExt<'q> for &mut T { - type Iter = T::Iter; - - fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator) { - ::quote_into_iter(*self) - } - } - - impl<'q, T: 'q> RepAsIteratorExt<'q> for [T] { - type Iter = slice::Iter<'q, T>; - - fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator) { - (self.iter(), HasIterator::) - } - } - - impl<'q, T: 'q, const N: usize> RepAsIteratorExt<'q> for [T; N] { - type Iter = slice::Iter<'q, T>; - - fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator) { - (self.iter(), HasIterator::) - } - } - - impl<'q, T: 'q> RepAsIteratorExt<'q> for Vec { - type Iter = slice::Iter<'q, T>; - - fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator) { - (self.iter(), HasIterator::) - } - } - - impl<'q, T: 'q> RepAsIteratorExt<'q> for BTreeSet { - type Iter = btree_set::Iter<'q, T>; - - fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator) { - (self.iter(), HasIterator::) - } - } - - impl<'q, T: RepAsIteratorExt<'q>> RepAsIteratorExt<'q> for RepInterp { - type Iter = T::Iter; - - fn quote_into_iter(&'q self) -> (Self::Iter, HasIterator) { - self.0.quote_into_iter() - } - } -} - -// Helper type used within interpolations to allow for repeated binding names. -// Implements the relevant traits, and exports a dummy `next()` method. -#[derive(Copy, Clone)] -#[doc(hidden)] -pub struct RepInterp(pub T); - -impl RepInterp { - // This method is intended to look like `Iterator::next`, and is called when - // a name is bound multiple times, as the previous binding will shadow the - // original `Iterator` object. This allows us to avoid advancing the - // iterator multiple times per iteration. - pub fn next(self) -> Option { - Some(self.0) - } -} - -impl Iterator for RepInterp { - type Item = T::Item; - - fn next(&mut self) -> Option { - self.0.next() - } -} - -impl ToTokens for RepInterp { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.0.to_tokens(tokens); - } -} - -#[doc(hidden)] -#[inline] -pub fn get_span(span: T) -> GetSpan { - GetSpan(GetSpanInner(GetSpanBase(span))) -} - -mod get_span { - use core::ops::Deref; - use proc_macro2::extra::DelimSpan; - use proc_macro2::Span; - - pub struct GetSpan(pub(crate) GetSpanInner); - - pub struct GetSpanInner(pub(crate) GetSpanBase); - - pub struct GetSpanBase(pub(crate) T); - - impl GetSpan { - #[inline] - pub fn __into_span(self) -> Span { - ((self.0).0).0 - } - } - - impl GetSpanInner { - #[inline] - pub fn __into_span(&self) -> Span { - (self.0).0.join() - } - } - - impl GetSpanBase { - #[allow(clippy::unused_self)] - pub fn __into_span(&self) -> T { - unreachable!() - } - } - - impl Deref for GetSpan { - type Target = GetSpanInner; - - #[inline] - fn deref(&self) -> &Self::Target { - &self.0 - } - } - - impl Deref for GetSpanInner { - type Target = GetSpanBase; - - #[inline] - fn deref(&self) -> &Self::Target { - &self.0 - } - } -} - -#[doc(hidden)] -pub fn push_group(tokens: &mut TokenStream, delimiter: Delimiter, inner: TokenStream) { - tokens.append(Group::new(delimiter, inner)); -} - -#[doc(hidden)] -pub fn push_group_spanned( - tokens: &mut TokenStream, - span: Span, - delimiter: Delimiter, - inner: TokenStream, -) { - let mut g = Group::new(delimiter, inner); - g.set_span(span); - tokens.append(g); -} - -#[doc(hidden)] -pub fn parse(tokens: &mut TokenStream, s: &str) { - let s: TokenStream = s.parse().expect("invalid token stream"); - tokens.extend(iter::once(s)); -} - -#[doc(hidden)] -pub fn parse_spanned(tokens: &mut TokenStream, span: Span, s: &str) { - let s: TokenStream = s.parse().expect("invalid token stream"); - for token in s { - tokens.append(respan_token_tree(token, span)); - } -} - -// Token tree with every span replaced by the given one. -fn respan_token_tree(mut token: TokenTree, span: Span) -> TokenTree { - match &mut token { - TokenTree::Group(g) => { - let mut tokens = TokenStream::new(); - for token in g.stream() { - tokens.append(respan_token_tree(token, span)); - } - *g = Group::new(g.delimiter(), tokens); - g.set_span(span); - } - other => other.set_span(span), - } - token -} - -#[doc(hidden)] -pub fn push_ident(tokens: &mut TokenStream, s: &str) { - let span = Span::call_site(); - push_ident_spanned(tokens, span, s); -} - -#[doc(hidden)] -pub fn push_ident_spanned(tokens: &mut TokenStream, span: Span, s: &str) { - tokens.append(ident_maybe_raw(s, span)); -} - -#[doc(hidden)] -pub fn push_lifetime(tokens: &mut TokenStream, lifetime: &str) { - tokens.append(TokenTree::Punct(Punct::new('\'', Spacing::Joint))); - tokens.append(TokenTree::Ident(Ident::new( - &lifetime[1..], - Span::call_site(), - ))); -} - -#[doc(hidden)] -pub fn push_lifetime_spanned(tokens: &mut TokenStream, span: Span, lifetime: &str) { - tokens.append(TokenTree::Punct({ - let mut apostrophe = Punct::new('\'', Spacing::Joint); - apostrophe.set_span(span); - apostrophe - })); - tokens.append(TokenTree::Ident(Ident::new(&lifetime[1..], span))); -} - -macro_rules! push_punct { - ($name:ident $spanned:ident $char1:tt) => { - #[doc(hidden)] - pub fn $name(tokens: &mut TokenStream) { - tokens.append(Punct::new($char1, Spacing::Alone)); - } - #[doc(hidden)] - pub fn $spanned(tokens: &mut TokenStream, span: Span) { - let mut punct = Punct::new($char1, Spacing::Alone); - punct.set_span(span); - tokens.append(punct); - } - }; - ($name:ident $spanned:ident $char1:tt $char2:tt) => { - #[doc(hidden)] - pub fn $name(tokens: &mut TokenStream) { - tokens.append(Punct::new($char1, Spacing::Joint)); - tokens.append(Punct::new($char2, Spacing::Alone)); - } - #[doc(hidden)] - pub fn $spanned(tokens: &mut TokenStream, span: Span) { - let mut punct = Punct::new($char1, Spacing::Joint); - punct.set_span(span); - tokens.append(punct); - let mut punct = Punct::new($char2, Spacing::Alone); - punct.set_span(span); - tokens.append(punct); - } - }; - ($name:ident $spanned:ident $char1:tt $char2:tt $char3:tt) => { - #[doc(hidden)] - pub fn $name(tokens: &mut TokenStream) { - tokens.append(Punct::new($char1, Spacing::Joint)); - tokens.append(Punct::new($char2, Spacing::Joint)); - tokens.append(Punct::new($char3, Spacing::Alone)); - } - #[doc(hidden)] - pub fn $spanned(tokens: &mut TokenStream, span: Span) { - let mut punct = Punct::new($char1, Spacing::Joint); - punct.set_span(span); - tokens.append(punct); - let mut punct = Punct::new($char2, Spacing::Joint); - punct.set_span(span); - tokens.append(punct); - let mut punct = Punct::new($char3, Spacing::Alone); - punct.set_span(span); - tokens.append(punct); - } - }; -} - -push_punct!(push_add push_add_spanned '+'); -push_punct!(push_add_eq push_add_eq_spanned '+' '='); -push_punct!(push_and push_and_spanned '&'); -push_punct!(push_and_and push_and_and_spanned '&' '&'); -push_punct!(push_and_eq push_and_eq_spanned '&' '='); -push_punct!(push_at push_at_spanned '@'); -push_punct!(push_bang push_bang_spanned '!'); -push_punct!(push_caret push_caret_spanned '^'); -push_punct!(push_caret_eq push_caret_eq_spanned '^' '='); -push_punct!(push_colon push_colon_spanned ':'); -push_punct!(push_colon2 push_colon2_spanned ':' ':'); -push_punct!(push_comma push_comma_spanned ','); -push_punct!(push_div push_div_spanned '/'); -push_punct!(push_div_eq push_div_eq_spanned '/' '='); -push_punct!(push_dot push_dot_spanned '.'); -push_punct!(push_dot2 push_dot2_spanned '.' '.'); -push_punct!(push_dot3 push_dot3_spanned '.' '.' '.'); -push_punct!(push_dot_dot_eq push_dot_dot_eq_spanned '.' '.' '='); -push_punct!(push_eq push_eq_spanned '='); -push_punct!(push_eq_eq push_eq_eq_spanned '=' '='); -push_punct!(push_ge push_ge_spanned '>' '='); -push_punct!(push_gt push_gt_spanned '>'); -push_punct!(push_le push_le_spanned '<' '='); -push_punct!(push_lt push_lt_spanned '<'); -push_punct!(push_mul_eq push_mul_eq_spanned '*' '='); -push_punct!(push_ne push_ne_spanned '!' '='); -push_punct!(push_or push_or_spanned '|'); -push_punct!(push_or_eq push_or_eq_spanned '|' '='); -push_punct!(push_or_or push_or_or_spanned '|' '|'); -push_punct!(push_pound push_pound_spanned '#'); -push_punct!(push_question push_question_spanned '?'); -push_punct!(push_rarrow push_rarrow_spanned '-' '>'); -push_punct!(push_larrow push_larrow_spanned '<' '-'); -push_punct!(push_rem push_rem_spanned '%'); -push_punct!(push_rem_eq push_rem_eq_spanned '%' '='); -push_punct!(push_fat_arrow push_fat_arrow_spanned '=' '>'); -push_punct!(push_semi push_semi_spanned ';'); -push_punct!(push_shl push_shl_spanned '<' '<'); -push_punct!(push_shl_eq push_shl_eq_spanned '<' '<' '='); -push_punct!(push_shr push_shr_spanned '>' '>'); -push_punct!(push_shr_eq push_shr_eq_spanned '>' '>' '='); -push_punct!(push_star push_star_spanned '*'); -push_punct!(push_sub push_sub_spanned '-'); -push_punct!(push_sub_eq push_sub_eq_spanned '-' '='); - -#[doc(hidden)] -pub fn push_underscore(tokens: &mut TokenStream) { - push_underscore_spanned(tokens, Span::call_site()); -} - -#[doc(hidden)] -pub fn push_underscore_spanned(tokens: &mut TokenStream, span: Span) { - tokens.append(Ident::new("_", span)); -} - -// Helper method for constructing identifiers from the `format_ident!` macro, -// handling `r#` prefixes. -#[doc(hidden)] -pub fn mk_ident(id: &str, span: Option) -> Ident { - let span = span.unwrap_or_else(Span::call_site); - ident_maybe_raw(id, span) -} - -fn ident_maybe_raw(id: &str, span: Span) -> Ident { - if let Some(id) = id.strip_prefix("r#") { - Ident::new_raw(id, span) - } else { - Ident::new(id, span) - } -} - -// Adapts from `IdentFragment` to `fmt::Display` for use by the `format_ident!` -// macro, and exposes span information from these fragments. -// -// This struct also has forwarding implementations of the formatting traits -// `Octal`, `LowerHex`, `UpperHex`, and `Binary` to allow for their use within -// `format_ident!`. -#[derive(Copy, Clone)] -#[doc(hidden)] -pub struct IdentFragmentAdapter(pub T); - -impl IdentFragmentAdapter { - pub fn span(&self) -> Option { - self.0.span() - } -} - -impl fmt::Display for IdentFragmentAdapter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - IdentFragment::fmt(&self.0, f) - } -} - -impl fmt::Octal for IdentFragmentAdapter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Octal::fmt(&self.0, f) - } -} - -impl fmt::LowerHex for IdentFragmentAdapter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::LowerHex::fmt(&self.0, f) - } -} - -impl fmt::UpperHex for IdentFragmentAdapter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::UpperHex::fmt(&self.0, f) - } -} - -impl fmt::Binary for IdentFragmentAdapter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Binary::fmt(&self.0, f) - } -} diff --git a/vendor/quote/src/spanned.rs b/vendor/quote/src/spanned.rs deleted file mode 100644 index 6afc6b30355977..00000000000000 --- a/vendor/quote/src/spanned.rs +++ /dev/null @@ -1,49 +0,0 @@ -use crate::ToTokens; -use proc_macro2::extra::DelimSpan; -use proc_macro2::{Span, TokenStream}; - -// Not public API other than via the syn crate. Use syn::spanned::Spanned. -pub trait Spanned: private::Sealed { - fn __span(&self) -> Span; -} - -impl Spanned for Span { - fn __span(&self) -> Span { - *self - } -} - -impl Spanned for DelimSpan { - fn __span(&self) -> Span { - self.join() - } -} - -impl Spanned for T { - fn __span(&self) -> Span { - join_spans(self.into_token_stream()) - } -} - -fn join_spans(tokens: TokenStream) -> Span { - let mut iter = tokens.into_iter().map(|tt| tt.span()); - - let Some(first) = iter.next() else { - return Span::call_site(); - }; - - iter.fold(None, |_prev, next| Some(next)) - .and_then(|last| first.join(last)) - .unwrap_or(first) -} - -mod private { - use crate::ToTokens; - use proc_macro2::extra::DelimSpan; - use proc_macro2::Span; - - pub trait Sealed {} - impl Sealed for Span {} - impl Sealed for DelimSpan {} - impl Sealed for T {} -} diff --git a/vendor/quote/src/to_tokens.rs b/vendor/quote/src/to_tokens.rs deleted file mode 100644 index f373092b650fcb..00000000000000 --- a/vendor/quote/src/to_tokens.rs +++ /dev/null @@ -1,271 +0,0 @@ -use super::TokenStreamExt; -use alloc::borrow::Cow; -use alloc::rc::Rc; -use core::iter; -use proc_macro2::{Group, Ident, Literal, Punct, Span, TokenStream, TokenTree}; -use std::ffi::{CStr, CString}; - -/// Types that can be interpolated inside a `quote!` invocation. -pub trait ToTokens { - /// Write `self` to the given `TokenStream`. - /// - /// The token append methods provided by the [`TokenStreamExt`] extension - /// trait may be useful for implementing `ToTokens`. - /// - /// # Example - /// - /// Example implementation for a struct representing Rust paths like - /// `std::cmp::PartialEq`: - /// - /// ``` - /// use proc_macro2::{TokenTree, Spacing, Span, Punct, TokenStream}; - /// use quote::{TokenStreamExt, ToTokens}; - /// - /// pub struct Path { - /// pub global: bool, - /// pub segments: Vec, - /// } - /// - /// impl ToTokens for Path { - /// fn to_tokens(&self, tokens: &mut TokenStream) { - /// for (i, segment) in self.segments.iter().enumerate() { - /// if i > 0 || self.global { - /// // Double colon `::` - /// tokens.append(Punct::new(':', Spacing::Joint)); - /// tokens.append(Punct::new(':', Spacing::Alone)); - /// } - /// segment.to_tokens(tokens); - /// } - /// } - /// } - /// # - /// # pub struct PathSegment; - /// # - /// # impl ToTokens for PathSegment { - /// # fn to_tokens(&self, tokens: &mut TokenStream) { - /// # unimplemented!() - /// # } - /// # } - /// ``` - fn to_tokens(&self, tokens: &mut TokenStream); - - /// Convert `self` directly into a `TokenStream` object. - /// - /// This method is implicitly implemented using `to_tokens`, and acts as a - /// convenience method for consumers of the `ToTokens` trait. - fn to_token_stream(&self) -> TokenStream { - let mut tokens = TokenStream::new(); - self.to_tokens(&mut tokens); - tokens - } - - /// Convert `self` directly into a `TokenStream` object. - /// - /// This method is implicitly implemented using `to_tokens`, and acts as a - /// convenience method for consumers of the `ToTokens` trait. - fn into_token_stream(self) -> TokenStream - where - Self: Sized, - { - self.to_token_stream() - } -} - -impl ToTokens for &T { - fn to_tokens(&self, tokens: &mut TokenStream) { - (**self).to_tokens(tokens); - } -} - -impl ToTokens for &mut T { - fn to_tokens(&self, tokens: &mut TokenStream) { - (**self).to_tokens(tokens); - } -} - -impl<'a, T: ?Sized + ToOwned + ToTokens> ToTokens for Cow<'a, T> { - fn to_tokens(&self, tokens: &mut TokenStream) { - (**self).to_tokens(tokens); - } -} - -impl ToTokens for Box { - fn to_tokens(&self, tokens: &mut TokenStream) { - (**self).to_tokens(tokens); - } -} - -impl ToTokens for Rc { - fn to_tokens(&self, tokens: &mut TokenStream) { - (**self).to_tokens(tokens); - } -} - -impl ToTokens for Option { - fn to_tokens(&self, tokens: &mut TokenStream) { - if let Some(t) = self { - t.to_tokens(tokens); - } - } -} - -impl ToTokens for str { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::string(self)); - } -} - -impl ToTokens for String { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.as_str().to_tokens(tokens); - } -} - -impl ToTokens for i8 { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::i8_suffixed(*self)); - } -} - -impl ToTokens for i16 { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::i16_suffixed(*self)); - } -} - -impl ToTokens for i32 { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::i32_suffixed(*self)); - } -} - -impl ToTokens for i64 { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::i64_suffixed(*self)); - } -} - -impl ToTokens for i128 { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::i128_suffixed(*self)); - } -} - -impl ToTokens for isize { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::isize_suffixed(*self)); - } -} - -impl ToTokens for u8 { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::u8_suffixed(*self)); - } -} - -impl ToTokens for u16 { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::u16_suffixed(*self)); - } -} - -impl ToTokens for u32 { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::u32_suffixed(*self)); - } -} - -impl ToTokens for u64 { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::u64_suffixed(*self)); - } -} - -impl ToTokens for u128 { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::u128_suffixed(*self)); - } -} - -impl ToTokens for usize { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::usize_suffixed(*self)); - } -} - -impl ToTokens for f32 { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::f32_suffixed(*self)); - } -} - -impl ToTokens for f64 { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::f64_suffixed(*self)); - } -} - -impl ToTokens for char { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::character(*self)); - } -} - -impl ToTokens for bool { - fn to_tokens(&self, tokens: &mut TokenStream) { - let word = if *self { "true" } else { "false" }; - tokens.append(Ident::new(word, Span::call_site())); - } -} - -impl ToTokens for CStr { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::c_string(self)); - } -} - -impl ToTokens for CString { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::c_string(self)); - } -} - -impl ToTokens for Group { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(self.clone()); - } -} - -impl ToTokens for Ident { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(self.clone()); - } -} - -impl ToTokens for Punct { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(self.clone()); - } -} - -impl ToTokens for Literal { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(self.clone()); - } -} - -impl ToTokens for TokenTree { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(self.clone()); - } -} - -impl ToTokens for TokenStream { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.extend(iter::once(self.clone())); - } - - fn into_token_stream(self) -> TokenStream { - self - } -} diff --git a/vendor/quote/tests/compiletest.rs b/vendor/quote/tests/compiletest.rs deleted file mode 100644 index 23a6a065ec960a..00000000000000 --- a/vendor/quote/tests/compiletest.rs +++ /dev/null @@ -1,7 +0,0 @@ -#[rustversion::attr(not(nightly), ignore = "requires nightly")] -#[cfg_attr(miri, ignore = "incompatible with miri")] -#[test] -fn ui() { - let t = trybuild::TestCases::new(); - t.compile_fail("tests/ui/*.rs"); -} diff --git a/vendor/quote/tests/test.rs b/vendor/quote/tests/test.rs deleted file mode 100644 index e096780e1fee05..00000000000000 --- a/vendor/quote/tests/test.rs +++ /dev/null @@ -1,568 +0,0 @@ -#![allow( - clippy::disallowed_names, - clippy::let_underscore_untyped, - clippy::shadow_unrelated, - clippy::unseparated_literal_suffix, - clippy::used_underscore_binding -)] - -extern crate proc_macro; - -use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream}; -use quote::{format_ident, quote, quote_spanned, TokenStreamExt}; -use std::borrow::Cow; -use std::collections::BTreeSet; -use std::ffi::{CStr, CString}; - -struct X; - -impl quote::ToTokens for X { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Ident::new("X", Span::call_site())); - } -} - -#[test] -fn test_quote_impl() { - let tokens = quote! { - impl<'a, T: ToTokens> ToTokens for &'a T { - fn to_tokens(&self, tokens: &mut TokenStream) { - (**self).to_tokens(tokens) - } - } - }; - - let expected = concat!( - "impl < 'a , T : ToTokens > ToTokens for & 'a T { ", - "fn to_tokens (& self , tokens : & mut TokenStream) { ", - "(* * self) . to_tokens (tokens) ", - "} ", - "}" - ); - - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_quote_spanned_impl() { - let span = Span::call_site(); - let tokens = quote_spanned! {span=> - impl<'a, T: ToTokens> ToTokens for &'a T { - fn to_tokens(&self, tokens: &mut TokenStream) { - (**self).to_tokens(tokens) - } - } - }; - - let expected = concat!( - "impl < 'a , T : ToTokens > ToTokens for & 'a T { ", - "fn to_tokens (& self , tokens : & mut TokenStream) { ", - "(* * self) . to_tokens (tokens) ", - "} ", - "}" - ); - - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_substitution() { - let x = X; - let tokens = quote!(#x <#x> (#x) [#x] {#x}); - - let expected = "X < X > (X) [X] { X }"; - - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_iter() { - let primes = &[X, X, X, X]; - - assert_eq!("X X X X", quote!(#(#primes)*).to_string()); - - assert_eq!("X , X , X , X ,", quote!(#(#primes,)*).to_string()); - - assert_eq!("X , X , X , X", quote!(#(#primes),*).to_string()); -} - -#[test] -fn test_array() { - let array: [u8; 40] = [0; 40]; - let _ = quote!(#(#array #array)*); - - let ref_array: &[u8; 40] = &[0; 40]; - let _ = quote!(#(#ref_array #ref_array)*); - - let ref_slice: &[u8] = &[0; 40]; - let _ = quote!(#(#ref_slice #ref_slice)*); - - let array: [X; 2] = [X, X]; // !Copy - let _ = quote!(#(#array #array)*); - - let ref_array: &[X; 2] = &[X, X]; - let _ = quote!(#(#ref_array #ref_array)*); - - let ref_slice: &[X] = &[X, X]; - let _ = quote!(#(#ref_slice #ref_slice)*); - - let array_of_array: [[u8; 2]; 2] = [[0; 2]; 2]; - let _ = quote!(#(#(#array_of_array)*)*); -} - -#[test] -fn test_advanced() { - let generics = quote!( <'a, T> ); - - let where_clause = quote!( where T: Serialize ); - - let field_ty = quote!(String); - - let item_ty = quote!(Cow<'a, str>); - - let path = quote!(SomeTrait::serialize_with); - - let value = quote!(self.x); - - let tokens = quote! { - struct SerializeWith #generics #where_clause { - value: &'a #field_ty, - phantom: ::std::marker::PhantomData<#item_ty>, - } - - impl #generics ::serde::Serialize for SerializeWith #generics #where_clause { - fn serialize(&self, s: &mut S) -> Result<(), S::Error> - where S: ::serde::Serializer - { - #path(self.value, s) - } - } - - SerializeWith { - value: #value, - phantom: ::std::marker::PhantomData::<#item_ty>, - } - }; - - let expected = concat!( - "struct SerializeWith < 'a , T > where T : Serialize { ", - "value : & 'a String , ", - "phantom : :: std :: marker :: PhantomData < Cow < 'a , str > > , ", - "} ", - "impl < 'a , T > :: serde :: Serialize for SerializeWith < 'a , T > where T : Serialize { ", - "fn serialize < S > (& self , s : & mut S) -> Result < () , S :: Error > ", - "where S : :: serde :: Serializer ", - "{ ", - "SomeTrait :: serialize_with (self . value , s) ", - "} ", - "} ", - "SerializeWith { ", - "value : self . x , ", - "phantom : :: std :: marker :: PhantomData :: < Cow < 'a , str > > , ", - "}" - ); - - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_integer() { - let ii8 = -1i8; - let ii16 = -1i16; - let ii32 = -1i32; - let ii64 = -1i64; - let ii128 = -1i128; - let iisize = -1isize; - let uu8 = 1u8; - let uu16 = 1u16; - let uu32 = 1u32; - let uu64 = 1u64; - let uu128 = 1u128; - let uusize = 1usize; - - let tokens = quote! { - 1 1i32 1u256 - #ii8 #ii16 #ii32 #ii64 #ii128 #iisize - #uu8 #uu16 #uu32 #uu64 #uu128 #uusize - }; - let expected = - "1 1i32 1u256 - 1i8 - 1i16 - 1i32 - 1i64 - 1i128 - 1isize 1u8 1u16 1u32 1u64 1u128 1usize"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_floating() { - let e32 = 2.345f32; - - let e64 = 2.345f64; - - let tokens = quote! { - #e32 - #e64 - }; - let expected = "2.345f32 2.345f64"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_char() { - let zero = '\u{1}'; - let pound = '#'; - let quote = '"'; - let apost = '\''; - let newline = '\n'; - let heart = '\u{2764}'; - - let tokens = quote! { - #zero #pound #quote #apost #newline #heart - }; - let expected = "'\\u{1}' '#' '\"' '\\'' '\\n' '\u{2764}'"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_str() { - let s = "\u{1} a 'b \" c"; - let tokens = quote!(#s); - let expected = "\"\\u{1} a 'b \\\" c\""; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_string() { - let s = "\u{1} a 'b \" c".to_string(); - let tokens = quote!(#s); - let expected = "\"\\u{1} a 'b \\\" c\""; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_c_str() { - let s = CStr::from_bytes_with_nul(b"\x01 a 'b \" c\0").unwrap(); - let tokens = quote!(#s); - let expected = "c\"\\u{1} a 'b \\\" c\""; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_c_string() { - let s = CString::new(&b"\x01 a 'b \" c"[..]).unwrap(); - let tokens = quote!(#s); - let expected = "c\"\\u{1} a 'b \\\" c\""; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_interpolated_literal() { - macro_rules! m { - ($literal:literal) => { - quote!($literal) - }; - } - - let tokens = m!(1); - let expected = "1"; - assert_eq!(expected, tokens.to_string()); - - let tokens = m!(-1); - let expected = "- 1"; - assert_eq!(expected, tokens.to_string()); - - let tokens = m!(true); - let expected = "true"; - assert_eq!(expected, tokens.to_string()); - - let tokens = m!(-true); - let expected = "- true"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_ident() { - let foo = Ident::new("Foo", Span::call_site()); - let bar = Ident::new(&format!("Bar{}", 7), Span::call_site()); - let tokens = quote!(struct #foo; enum #bar {}); - let expected = "struct Foo ; enum Bar7 { }"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_underscore() { - let tokens = quote!(let _;); - let expected = "let _ ;"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_duplicate() { - let ch = 'x'; - - let tokens = quote!(#ch #ch); - - let expected = "'x' 'x'"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_fancy_repetition() { - let foo = vec!["a", "b"]; - let bar = vec![true, false]; - - let tokens = quote! { - #(#foo: #bar),* - }; - - let expected = r#""a" : true , "b" : false"#; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_nested_fancy_repetition() { - let nested = vec![vec!['a', 'b', 'c'], vec!['x', 'y', 'z']]; - - let tokens = quote! { - #( - #(#nested)* - ),* - }; - - let expected = "'a' 'b' 'c' , 'x' 'y' 'z'"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_duplicate_name_repetition() { - let foo = &["a", "b"]; - - let tokens = quote! { - #(#foo: #foo),* - #(#foo: #foo),* - }; - - let expected = r#""a" : "a" , "b" : "b" "a" : "a" , "b" : "b""#; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_duplicate_name_repetition_no_copy() { - let foo = vec!["a".to_owned(), "b".to_owned()]; - - let tokens = quote! { - #(#foo: #foo),* - }; - - let expected = r#""a" : "a" , "b" : "b""#; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_btreeset_repetition() { - let mut set = BTreeSet::new(); - set.insert("a".to_owned()); - set.insert("b".to_owned()); - - let tokens = quote! { - #(#set: #set),* - }; - - let expected = r#""a" : "a" , "b" : "b""#; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_variable_name_conflict() { - // The implementation of `#(...),*` uses the variable `_i` but it should be - // fine, if a little confusing when debugging. - let _i = vec!['a', 'b']; - let tokens = quote! { #(#_i),* }; - let expected = "'a' , 'b'"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_nonrep_in_repetition() { - let rep = vec!["a", "b"]; - let nonrep = "c"; - - let tokens = quote! { - #(#rep #rep : #nonrep #nonrep),* - }; - - let expected = r#""a" "a" : "c" "c" , "b" "b" : "c" "c""#; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_empty_quote() { - let tokens = quote!(); - assert_eq!("", tokens.to_string()); -} - -#[test] -fn test_box_str() { - let b = "str".to_owned().into_boxed_str(); - let tokens = quote! { #b }; - assert_eq!("\"str\"", tokens.to_string()); -} - -#[test] -fn test_cow() { - let owned: Cow = Cow::Owned(Ident::new("owned", Span::call_site())); - - let ident = Ident::new("borrowed", Span::call_site()); - let borrowed = Cow::Borrowed(&ident); - - let tokens = quote! { #owned #borrowed }; - assert_eq!("owned borrowed", tokens.to_string()); -} - -#[test] -fn test_closure() { - fn field_i(i: usize) -> Ident { - format_ident!("__field{}", i) - } - - let fields = (0usize..3) - .map(field_i as fn(_) -> _) - .map(|var| quote! { #var }); - - let tokens = quote! { #(#fields)* }; - assert_eq!("__field0 __field1 __field2", tokens.to_string()); -} - -#[test] -fn test_append_tokens() { - let mut a = quote!(a); - let b = quote!(b); - a.append_all(b); - assert_eq!("a b", a.to_string()); -} - -#[test] -fn test_format_ident() { - let id0 = format_ident!("Aa"); - let id1 = format_ident!("Hello{x}", x = id0); - let id2 = format_ident!("Hello{x}", x = 5usize); - let id3 = format_ident!("Hello{}_{x}", id0, x = 10usize); - let id4 = format_ident!("Aa", span = Span::call_site()); - let id5 = format_ident!("Hello{}", Cow::Borrowed("World")); - - assert_eq!(id0, "Aa"); - assert_eq!(id1, "HelloAa"); - assert_eq!(id2, "Hello5"); - assert_eq!(id3, "HelloAa_10"); - assert_eq!(id4, "Aa"); - assert_eq!(id5, "HelloWorld"); -} - -#[test] -fn test_format_ident_strip_raw() { - let id = format_ident!("r#struct"); - let my_id = format_ident!("MyId{}", id); - let raw_my_id = format_ident!("r#MyId{}", id); - - assert_eq!(id, "r#struct"); - assert_eq!(my_id, "MyIdstruct"); - assert_eq!(raw_my_id, "r#MyIdstruct"); -} - -#[test] -fn test_outer_line_comment() { - let tokens = quote! { - /// doc - }; - let expected = "# [doc = r\" doc\"]"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_inner_line_comment() { - let tokens = quote! { - //! doc - }; - let expected = "# ! [doc = r\" doc\"]"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_outer_block_comment() { - let tokens = quote! { - /** doc */ - }; - let expected = "# [doc = r\" doc \"]"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_inner_block_comment() { - let tokens = quote! { - /*! doc */ - }; - let expected = "# ! [doc = r\" doc \"]"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_outer_attr() { - let tokens = quote! { - #[inline] - }; - let expected = "# [inline]"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_inner_attr() { - let tokens = quote! { - #![no_std] - }; - let expected = "# ! [no_std]"; - assert_eq!(expected, tokens.to_string()); -} - -// https://github.com/dtolnay/quote/issues/130 -#[test] -fn test_star_after_repetition() { - let c = vec!['0', '1']; - let tokens = quote! { - #( - f(#c); - )* - *out = None; - }; - let expected = "f ('0') ; f ('1') ; * out = None ;"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_quote_raw_id() { - let id = quote!(r#raw_id); - assert_eq!(id.to_string(), "r#raw_id"); -} - -#[test] -fn test_type_inference_for_span() { - trait CallSite { - fn get() -> Self; - } - - impl CallSite for Span { - fn get() -> Self { - Span::call_site() - } - } - - let span = Span::call_site(); - let _ = quote_spanned!(span=> ...); - - let delim_span = Group::new(Delimiter::Parenthesis, TokenStream::new()).delim_span(); - let _ = quote_spanned!(delim_span=> ...); - - let inferred = CallSite::get(); - let _ = quote_spanned!(inferred=> ...); - - if false { - let proc_macro_span = proc_macro::Span::call_site(); - let _ = quote_spanned!(proc_macro_span.into()=> ...); - } -} diff --git a/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.rs b/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.rs deleted file mode 100644 index 0a39f4150704fb..00000000000000 --- a/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.rs +++ /dev/null @@ -1,9 +0,0 @@ -use quote::quote; - -fn main() { - let nonrep = ""; - - // Without some protection against repetitions with no iterator somewhere - // inside, this would loop infinitely. - quote!(#(#nonrep #nonrep)*); -} diff --git a/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.stderr b/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.stderr deleted file mode 100644 index 96af816336d04e..00000000000000 --- a/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.stderr +++ /dev/null @@ -1,13 +0,0 @@ -error[E0277]: repetition contains no interpolated value that is an iterator - --> tests/ui/does-not-have-iter-interpolated-dup.rs:8:5 - | -8 | quote!(#(#nonrep #nonrep)*); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ none of the values interpolated inside this repetition are iterable - | -help: the trait `CheckHasIterator` is not implemented for `HasIterator` - but it is implemented for `HasIterator` - --> src/runtime.rs - | - | impl CheckHasIterator for HasIterator {} - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - = note: this error originates in the macro `$crate::quote_token_with_context` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/vendor/quote/tests/ui/does-not-have-iter-interpolated.rs b/vendor/quote/tests/ui/does-not-have-iter-interpolated.rs deleted file mode 100644 index 2c740cc0830fd8..00000000000000 --- a/vendor/quote/tests/ui/does-not-have-iter-interpolated.rs +++ /dev/null @@ -1,9 +0,0 @@ -use quote::quote; - -fn main() { - let nonrep = ""; - - // Without some protection against repetitions with no iterator somewhere - // inside, this would loop infinitely. - quote!(#(#nonrep)*); -} diff --git a/vendor/quote/tests/ui/does-not-have-iter-interpolated.stderr b/vendor/quote/tests/ui/does-not-have-iter-interpolated.stderr deleted file mode 100644 index 0c0572c90887a0..00000000000000 --- a/vendor/quote/tests/ui/does-not-have-iter-interpolated.stderr +++ /dev/null @@ -1,13 +0,0 @@ -error[E0277]: repetition contains no interpolated value that is an iterator - --> tests/ui/does-not-have-iter-interpolated.rs:8:5 - | -8 | quote!(#(#nonrep)*); - | ^^^^^^^^^^^^^^^^^^^ none of the values interpolated inside this repetition are iterable - | -help: the trait `CheckHasIterator` is not implemented for `HasIterator` - but it is implemented for `HasIterator` - --> src/runtime.rs - | - | impl CheckHasIterator for HasIterator {} - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - = note: this error originates in the macro `$crate::quote_token_with_context` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/vendor/quote/tests/ui/does-not-have-iter-separated.rs b/vendor/quote/tests/ui/does-not-have-iter-separated.rs deleted file mode 100644 index c027243ddac68f..00000000000000 --- a/vendor/quote/tests/ui/does-not-have-iter-separated.rs +++ /dev/null @@ -1,5 +0,0 @@ -use quote::quote; - -fn main() { - quote!(#(a b),*); -} diff --git a/vendor/quote/tests/ui/does-not-have-iter-separated.stderr b/vendor/quote/tests/ui/does-not-have-iter-separated.stderr deleted file mode 100644 index e899fb483052aa..00000000000000 --- a/vendor/quote/tests/ui/does-not-have-iter-separated.stderr +++ /dev/null @@ -1,13 +0,0 @@ -error[E0277]: repetition contains no interpolated value that is an iterator - --> tests/ui/does-not-have-iter-separated.rs:4:5 - | -4 | quote!(#(a b),*); - | ^^^^^^^^^^^^^^^^ none of the values interpolated inside this repetition are iterable - | -help: the trait `CheckHasIterator` is not implemented for `HasIterator` - but it is implemented for `HasIterator` - --> src/runtime.rs - | - | impl CheckHasIterator for HasIterator {} - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - = note: this error originates in the macro `$crate::quote_token_with_context` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/vendor/quote/tests/ui/does-not-have-iter.rs b/vendor/quote/tests/ui/does-not-have-iter.rs deleted file mode 100644 index 8908353b57d738..00000000000000 --- a/vendor/quote/tests/ui/does-not-have-iter.rs +++ /dev/null @@ -1,5 +0,0 @@ -use quote::quote; - -fn main() { - quote!(#(a b)*); -} diff --git a/vendor/quote/tests/ui/does-not-have-iter.stderr b/vendor/quote/tests/ui/does-not-have-iter.stderr deleted file mode 100644 index 348071cc42f263..00000000000000 --- a/vendor/quote/tests/ui/does-not-have-iter.stderr +++ /dev/null @@ -1,13 +0,0 @@ -error[E0277]: repetition contains no interpolated value that is an iterator - --> tests/ui/does-not-have-iter.rs:4:5 - | -4 | quote!(#(a b)*); - | ^^^^^^^^^^^^^^^ none of the values interpolated inside this repetition are iterable - | -help: the trait `CheckHasIterator` is not implemented for `HasIterator` - but it is implemented for `HasIterator` - --> src/runtime.rs - | - | impl CheckHasIterator for HasIterator {} - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - = note: this error originates in the macro `$crate::quote_token_with_context` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/vendor/quote/tests/ui/not-quotable.rs b/vendor/quote/tests/ui/not-quotable.rs deleted file mode 100644 index f991c1883d6d34..00000000000000 --- a/vendor/quote/tests/ui/not-quotable.rs +++ /dev/null @@ -1,7 +0,0 @@ -use quote::quote; -use std::net::Ipv4Addr; - -fn main() { - let ip = Ipv4Addr::LOCALHOST; - let _ = quote! { #ip }; -} diff --git a/vendor/quote/tests/ui/not-quotable.stderr b/vendor/quote/tests/ui/not-quotable.stderr deleted file mode 100644 index 15492463b6de6e..00000000000000 --- a/vendor/quote/tests/ui/not-quotable.stderr +++ /dev/null @@ -1,20 +0,0 @@ -error[E0277]: the trait bound `Ipv4Addr: ToTokens` is not satisfied - --> tests/ui/not-quotable.rs:6:13 - | -6 | let _ = quote! { #ip }; - | ^^^^^^^^^^^^^^ - | | - | the trait `ToTokens` is not implemented for `Ipv4Addr` - | required by a bound introduced by this call - | - = help: the following other types implement trait `ToTokens`: - &T - &mut T - Box - CStr - CString - Cow<'a, T> - Option - Rc - and $N others - = note: this error originates in the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/vendor/quote/tests/ui/not-repeatable.rs b/vendor/quote/tests/ui/not-repeatable.rs deleted file mode 100644 index a8f0fe773c5d17..00000000000000 --- a/vendor/quote/tests/ui/not-repeatable.rs +++ /dev/null @@ -1,8 +0,0 @@ -use quote::quote; - -struct Ipv4Addr; - -fn main() { - let ip = Ipv4Addr; - let _ = quote! { #(#ip)* }; -} diff --git a/vendor/quote/tests/ui/not-repeatable.stderr b/vendor/quote/tests/ui/not-repeatable.stderr deleted file mode 100644 index d5e13b040b483e..00000000000000 --- a/vendor/quote/tests/ui/not-repeatable.stderr +++ /dev/null @@ -1,42 +0,0 @@ -error[E0599]: the method `quote_into_iter` exists for struct `Ipv4Addr`, but its trait bounds were not satisfied - --> tests/ui/not-repeatable.rs:7:13 - | -3 | struct Ipv4Addr; - | --------------- method `quote_into_iter` not found for this struct because it doesn't satisfy `Ipv4Addr: Iterator`, `Ipv4Addr: ToTokens`, `Ipv4Addr: ext::RepIteratorExt` or `Ipv4Addr: ext::RepToTokensExt` -... -7 | let _ = quote! { #(#ip)* }; - | ^^^^^^^^^^^^^^^^^^ method cannot be called on `Ipv4Addr` due to unsatisfied trait bounds - | - = note: the following trait bounds were not satisfied: - `Ipv4Addr: Iterator` - which is required by `Ipv4Addr: ext::RepIteratorExt` - `&Ipv4Addr: Iterator` - which is required by `&Ipv4Addr: ext::RepIteratorExt` - `Ipv4Addr: ToTokens` - which is required by `Ipv4Addr: ext::RepToTokensExt` - `&mut Ipv4Addr: Iterator` - which is required by `&mut Ipv4Addr: ext::RepIteratorExt` -note: the traits `Iterator` and `ToTokens` must be implemented - --> $RUST/core/src/iter/traits/iterator.rs - | - | pub trait Iterator { - | ^^^^^^^^^^^^^^^^^^ - | - ::: src/to_tokens.rs - | - | pub trait ToTokens { - | ^^^^^^^^^^^^^^^^^^ - = help: items from traits can only be used if the trait is implemented and in scope - = note: the following traits define an item `quote_into_iter`, perhaps you need to implement one of them: - candidate #1: `ext::RepAsIteratorExt` - candidate #2: `ext::RepIteratorExt` - candidate #3: `ext::RepToTokensExt` - = note: this error originates in the macro `$crate::quote_bind_into_iter` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0282]: type annotations needed - --> tests/ui/not-repeatable.rs:7:13 - | -7 | let _ = quote! { #(#ip)* }; - | ^^^^^^^^^^^^^^^^^^ cannot infer type - | - = note: this error originates in the macro `$crate::quote_bind_next_or_break` which comes from the expansion of the macro `quote` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/vendor/quote/tests/ui/wrong-type-span.rs b/vendor/quote/tests/ui/wrong-type-span.rs deleted file mode 100644 index d5601c8a06f278..00000000000000 --- a/vendor/quote/tests/ui/wrong-type-span.rs +++ /dev/null @@ -1,7 +0,0 @@ -use quote::quote_spanned; - -fn main() { - let span = ""; - let x = 0i32; - quote_spanned!(span=> #x); -} diff --git a/vendor/quote/tests/ui/wrong-type-span.stderr b/vendor/quote/tests/ui/wrong-type-span.stderr deleted file mode 100644 index 12ad3077036572..00000000000000 --- a/vendor/quote/tests/ui/wrong-type-span.stderr +++ /dev/null @@ -1,10 +0,0 @@ -error[E0308]: mismatched types - --> tests/ui/wrong-type-span.rs:6:5 - | -6 | quote_spanned!(span=> #x); - | ^^^^^^^^^^^^^^^^^^^^^^^^^ - | | - | expected `Span`, found `&str` - | expected due to this - | - = note: this error originates in the macro `quote_spanned` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/vendor/regex-automata/.cargo-checksum.json b/vendor/regex-automata/.cargo-checksum.json deleted file mode 100644 index 84c43cc7be1227..00000000000000 --- a/vendor/regex-automata/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"813e732fc5802cefc31ee0cc60fea807d4f208a6f21997ab4352e0d9bd6cfbc6","Cargo.lock":"ec00a0a78cc268058c0df851b46025cee60832179b5cbd7f81479611ada4485a","Cargo.toml":"01dd259ddf18d6b99e84f799b4709fdaca8fbcbd30cb2ac2fbabc6309e2db06f","Cargo.toml.orig":"62d643cae321c8f8b42ac9a05fcad92609900ef6974f504ccdca54e287e915c6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"a7cfb89cd6d4de6b6e8b18e872227e5f1e47d91345e377aa1a75affc18c53aa1","src/dfa/accel.rs":"73f8e0c492a6c669fafbb872020091d6bfa5236503c9c0922aa94fd939fb2b1e","src/dfa/automaton.rs":"a2af61cdfb7f16a8419a25ccb3ae250afe736ff397c7a3101c8a77781d096a9b","src/dfa/dense.rs":"ec34a23a36464fa1b57cac01411ab2bdf6f2df5e1a497c7779ab10fd55d2515b","src/dfa/determinize.rs":"d72dc41a7e93b9289370d2a4e7d8524612be1870283504c703f9c08f9f3b316c","src/dfa/minimize.rs":"b5cadb462b9f24cd4aa7a665e75fb813cd06858a92b8986c9c5ae7fd9a60dfab","src/dfa/mod.rs":"530a1025d516a6df949eee46009acc5ef58c9e6788ec3d76702811734d76212f","src/dfa/onepass.rs":"b59ef139772cd2378f112ebaf3f88d75cf62f1592246449be02e47dd5300eb70","src/dfa/regex.rs":"567c7a59ca194117986f1818c092b31f825e860fb1b2c55c7de87de97eebb787","src/dfa/remapper.rs":"ca096abc0f8e45c43a2adf3a7743b8857714ae7411a623edea41cc3ce906a169","src/dfa/search.rs":"79b9ab2b0636177bc26d1ad6f0059ca033decf74824cb5a36f1ac19f020d2713","src/dfa/sparse.rs":"a5fde187faaf88f4f5b5dfeb69a1387e7dc7803f00b7cb0335dc30b7331c2f7e","src/dfa/special.rs":"1b939ad4addf2efb87fcd1ae67d7818b72540c017d895846ab7968cec267aee1","src/dfa/start.rs":"46b1dbaf8e4518ddddda6bbe596621aae36f8ba694390483a22355d9d799be8e","src/hybrid/dfa.rs":"cd0e71ad86161c9a49c6023d9dde8e07895ad03a9586723c3fb1f1c14bdb7faa","src/hybrid/error.rs":"2bca7eb9ff3859d2b2f5afcf00e618f5671c43e32f5ce8e7ab9de44b906a9422","src/hybrid/id.rs":"a529d45c5a7dd5ed64a471d3fab5a8c6a7aa2bd64bb3a81d5d6f1fcca424d41d","src/hybrid/mod.rs":"ca21e89062bdb5a0998d5cd1bc78609af1f6b795533e5982be969c383ac0463a","src/hybrid/regex.rs":"47815d025526330291f4cd749b4dd79b1122ef208fe6f0a49715c70fc1ea47c8","src/hybrid/search.rs":"76067f3f8675013dcdf7e9c9cc4d9d33d1107fb2cbcd7adcc05cfd42177d90cc","src/lib.rs":"47d562a98f5f50f7cbbffb1f103e277871c7419da05dbb2b0db78dee6e7b4c2e","src/macros.rs":"3e4b39252bfa471fad384160a43f113ebfec7bec46a85d16f006622881dd2081","src/meta/error.rs":"729ec5e2474ed2449fb47f3e0eeb65586ceeed0a6a67f00678f09eb5a46da931","src/meta/limited.rs":"182fb1b012a539cd091f0fa2f9c7806308c04293edcd4bae91a2a65904ea0f3e","src/meta/literal.rs":"2a4e71c5ffdd7b31f7f624a6a8bba3be0cddac1883ddbba6a01a48034a077978","src/meta/mod.rs":"f3b10b96fa08efaba3e4c9b81883cf40aac6e4c1f6ae55a497a534cf5805b46d","src/meta/regex.rs":"92295ff6a6b1e0e6d19fc1fe29679fa5681973160ee61e043d29bf29f44a65b5","src/meta/reverse_inner.rs":"945d6c2d4c7538e1609dbd430a096784d22abd33db58b1ba65c9c9af45a7d3c0","src/meta/stopat.rs":"acb6122e17d10a9b1b5e72d6030e6d95748227975bad0ff5cbbcc2587edfa6df","src/meta/strategy.rs":"3a59ea004755e34e3aeafd7e8c357e643b08554f5fb89e5cb3411a1de3637f26","src/meta/wrappers.rs":"d169ad27f3e5294fb4b2dcd6b179f72f741837ed6cb96d9d5cc654f40b9f43ae","src/nfa/mod.rs":"32a0ed46f4a0a9b4b81b53abf4aa7138e2fd456435897495fce74820b980d4d2","src/nfa/thompson/backtrack.rs":"041015ea153c1e485e9cf39ec60d1e51c7ab9e400ecd77cad2078af45775339b","src/nfa/thompson/builder.rs":"7adf6aba69171f6acd47fea0fec85ba589154fead83f2042a1c6fe9486aa4dbd","src/nfa/thompson/compiler.rs":"cc1fbc44f0106049f6c0020ee8beb879415bc2951bb53c9efdf76c8b6c2af816","src/nfa/thompson/error.rs":"12208c44486575f3ac505754e6559e0b93cac09351a9720ff63cd6fd548ba63d","src/nfa/thompson/literal_trie.rs":"3b5cf36842a31f8b50d820835e3959f878c0dedce0f17932bca449e1b6198651","src/nfa/thompson/map.rs":"fcd17ce7359b5179ef2e809fc9152dfa0b6c61d3d849d8c502497e1d0d8b0fa9","src/nfa/thompson/mod.rs":"dddbbd6f0e7076f369dd12a21aea4eb7e81e9c037d115201871e278cceed0a76","src/nfa/thompson/nfa.rs":"3ab46b912ece5218ba95e29e0c169b23a869796c7bcb138385c562a57c62a2a3","src/nfa/thompson/pikevm.rs":"230f879f05d7d9f868344407064d1e7a05131a13c0c90ab59d5a20f15af0ec56","src/nfa/thompson/range_trie.rs":"2304cab5cd580ca10961fbb14c75c163c2b7fcd29040622190d36f7935b446d3","src/util/alphabet.rs":"4f94d317459b43c7748e7a963935d61632fb70bd7c09dd9e536e354586d21df6","src/util/captures.rs":"81e48d060fc9bea41f98157676a0b262da54f6b1358be41e413c9e3e960f0155","src/util/determinize/mod.rs":"82f34f4e408aaf06f8e04a38a6c9ce0abdcc20b61581e29283ef099f84bc67a1","src/util/determinize/state.rs":"a850af545b7d0bd706f0bf72fdba504b6efdeea181763657109e10fef53aa88d","src/util/empty.rs":"13ec7d6cbd1520db5b4c1dae294f4419fa88d39d2bfc16f4ef258473d609f91c","src/util/escape.rs":"2c8275c56b75018a0d8f8363b5827eb620f2cb52e2e919d8dace2846e7e0cf3c","src/util/int.rs":"b863a62f8ba1edf24858416fc01f15b38bee7af2494ebeb037e1acbf0319415e","src/util/interpolate.rs":"1d716d26ed80beb0ba6526e1fb75fdb009b95122bf0907045237c7e9e4bfbe88","src/util/iter.rs":"d61335dc6b99b134d75c1b75e01f88e2dfe1174d48a36ac5a5e5efbc4c6114e2","src/util/lazy.rs":"116ff2eed0bb6d2aa574c812c74f732fb06c91beb1667e0e5d2a3210023d7db5","src/util/look.rs":"fca6dac7bf7b3b975f177db91e122af89e1510b3664d04210ca8b84738a08305","src/util/memchr.rs":"573109ce4983907083ae0b29a084a324b9b53da369b4d96f7f3a21fd5c8eb5c9","src/util/mod.rs":"6c828a493f0f88c8b515aee4f8faf91ba653eb07e8fc3c23c0524553410803f9","src/util/pool.rs":"22cd6f1a6fcabe6e1cb2759f6f7b87e64dfab8245fcf97b2ab2d3a6424015126","src/util/prefilter/aho_corasick.rs":"b5a56f0709ce718125256706234e1ff1bfa1c3bae2a7ccb72f679ca3d301bab6","src/util/prefilter/byteset.rs":"1c80fa432acc23223a75a5181e37c40034764dffe42410e4b77af6f24f48bd5c","src/util/prefilter/memchr.rs":"36c6fe6354b2e729db6830166dd4862e439bc48c9e59258d88e4b6c5654e20ef","src/util/prefilter/memmem.rs":"6f6ed9450b14abf3e4a33d395337e51fbaa9743a0a16aac0009f7680aa60c500","src/util/prefilter/mod.rs":"345787d5329a1712697700385979e6ee87925dd3447b1d5a0127c4fc222f0417","src/util/prefilter/teddy.rs":"ed54d26858b56e1c8c87e44afae5f63d81ab930787d79e671f3a3513f576e9cd","src/util/primitives.rs":"8a9cc19ef2e1ab183943cdc2d2f095b02252476e32b7e9fff4a06a251749b068","src/util/search.rs":"432720d85ede0fd4eac84069268bd4bb9c52c9680ddafa710ad01ee423e9d7fc","src/util/sparse_set.rs":"acbe7197f5e5fc95b45f54ba1e4e24f21226714af44a391e507d52b7b23cbaf6","src/util/start.rs":"1ab2dec7c452ae943118cd1c3b6becc84afba1fbb8b6894d81ef7d65141d95ab","src/util/syntax.rs":"cff4712c95fc5f94063e6e11a51b42d7678d5f5b82b492f11fcbb3928c3d6c8d","src/util/unicode_data/mod.rs":"54c3e10bbc393e9881bfac3295815b160f59e69e2056bc29ee7cf0addd8e3cf7","src/util/unicode_data/perl_word.rs":"30f073baae28ea34c373c7778c00f20c1621c3e644404eff031f7d1cc8e9c9e2","src/util/utf8.rs":"a02a559f0ec4013aa8bbc1a2d8717cfa0c82ed81577a4366fbbd8ef8660264fc","src/util/wire.rs":"f4eb8517d6d7ff165b39e829ce7ec7075ee40550878334456887abde486c5885","test":"39d79ce3532c31a51c0be89a2939816fad0e4868d2b03992c202cbe64dce9f6c","tests/dfa/api.rs":"c6ddbca1177c377a42bac1e19e79dc8c840a7a0af2042e6c3c08e46ba1a288fe","tests/dfa/mod.rs":"924d8fff500b9b7b140082623023e78007058a87323151cd8e361462945e4f16","tests/dfa/onepass/mod.rs":"d08f4ecb8ec243be584944c9602af1ed3a48a8732dd11cd573b0d1d182171303","tests/dfa/onepass/suite.rs":"f6a9cba40773db81fcd82ab605ba18ca415908f9857845e7621d47888cb67c91","tests/dfa/regression.rs":"ebcf2645290286aa7531eb2b7951385e5ed8167532437aeca2ad2049768fd796","tests/dfa/suite.rs":"26cfc5a89a2ceda338d15e9cde0aeb6a050ec4f751fb29b017eac54a9d9a0074","tests/fuzz/dense.rs":"3e1099a0cce61e85abc0ad81bc592e85f497f159ef0e5d1d32bac1936aa6f20c","tests/fuzz/mod.rs":"043773510e02f51def43ee0c2b8b867c53ecc8638c8a9233b2ac098de9c3ac1e","tests/fuzz/sparse.rs":"ba61db4927ab28953037a4b20317399c86d01b4d774e46c020ade19029215e25","tests/fuzz/testdata/deserialize_dense_crash-9486fb7c8a93b12c12a62166b43d31640c0208a9":"8961279a8237c3e318452024dd971b1d5a26b058260c297382a74daca1b7f0d1","tests/fuzz/testdata/deserialize_dense_minimized-from-9486fb7c8a93b12c12a62166b43d31640c0208a9":"c2d52e3dea78d3f159b5b521d433358a7fee45ce20ed1545067d461f45ef66b8","tests/fuzz/testdata/deserialize_sparse_crash-0da59c0434eaf35e5a6b470fa9244bb79c72b000":"5b2d273023de3fb04037eaf2e6b4f51cced4c5a08d2e6b44e4be540774f939b9","tests/fuzz/testdata/deserialize_sparse_crash-18cfc246f2ddfc3dfc92b0c7893178c7cf65efa9":"e2e22e2f46a9a75b5c876476442276cf675fe244c5cf918789e4f6b14078fbd9","tests/fuzz/testdata/deserialize_sparse_crash-61fd8e3003bf9d99f6c1e5a8488727eefd234b98":"24a12712e1f2ba0a40b5782707908a74dd19941dc372ef525d65a7134f91988c","tests/fuzz/testdata/deserialize_sparse_crash-a1b839d899ced76d5d7d0f78f9edb7a421505838":"a97f39b2febf9c73535681f7a86201e4b06d5a1ffcf135299c96c1cabfa9f6c4","tests/fuzz/testdata/deserialize_sparse_crash-c383ae07ec5e191422eadc492117439011816570":"44fe3ef878d35e2d51c2c17ff89bbbe3a4650e09d0cbbd48625c0f5e4dd0848b","tests/fuzz/testdata/deserialize_sparse_crash-d07703ceb94b10dcd9e4acb809f2051420449e2b":"d5534be36653b4af6cb94a7c63be58869bb8c204c5c63d67a4d6c986b44bb2e1","tests/fuzz/testdata/deserialize_sparse_crash-dbb8172d3984e7e7d03f4b5f8bb86ecd1460eff9":"77b844898610560afa09f2b8de73a85a0ba9a3b8cee4ff1bbf26b8c97ad4e8a2","tests/gen/README.md":"c36d7a7a0b8301234f861b6a94c68b4c6a8a8a5ac2a7a762acc241a96c9a8d46","tests/gen/dense/mod.rs":"5ae1cfb46212a674118ada2f66f37b25188e84643d406b95eb4665d722344262","tests/gen/dense/multi_pattern_v2.rs":"29b1e9a799adecbdbe7cd05e9748f664c2b915b10b1d2f5d36cfb6453826d1d2","tests/gen/dense/multi_pattern_v2_fwd.bigendian.dfa":"8421d5a1bfc0b6c3bdc8fc90dff591a046b0aaf8e06ef7de7cc293004a35d061","tests/gen/dense/multi_pattern_v2_fwd.littleendian.dfa":"dcf2fd5fd49f5f53cf1ec66f61623402f39401cb3aea30d6677b98bb1e9541bf","tests/gen/dense/multi_pattern_v2_rev.bigendian.dfa":"73c4f20d984e544dfa4cf05f3009d0a9b52fa84bc97b501ea0ccd179e2def4bc","tests/gen/dense/multi_pattern_v2_rev.littleendian.dfa":"74471209f05754e8e20c8a0222a5877b1b15b8b8f33cd8cac89ea65f708b4aff","tests/gen/mod.rs":"043773510e02f51def43ee0c2b8b867c53ecc8638c8a9233b2ac098de9c3ac1e","tests/gen/sparse/mod.rs":"5ae1cfb46212a674118ada2f66f37b25188e84643d406b95eb4665d722344262","tests/gen/sparse/multi_pattern_v2.rs":"e00fb2a510a215460aab84573196b1f51bb65884ff494c2382534c04f6fdbfe9","tests/gen/sparse/multi_pattern_v2_fwd.bigendian.dfa":"3287956bd2003cd69653b125f82aade95d99adbb20229bfdbb4958b8877c0a0b","tests/gen/sparse/multi_pattern_v2_fwd.littleendian.dfa":"bdf285901eaaac4596380115c5bbb20ab2f42f593d8d9e9238a00ed69863f9c9","tests/gen/sparse/multi_pattern_v2_rev.bigendian.dfa":"e466dc085dd68b2d2220932a0e4d28759edd161c1fdad652240aa3825fd85268","tests/gen/sparse/multi_pattern_v2_rev.littleendian.dfa":"80358d0c26c1cc7284065b0075f5b8804d83e673a8a8c8327f93a1c1ff455399","tests/hybrid/api.rs":"bd4862275c52f94c6f6737bf174c97e3de30f8075ca23f43c129c72a0d0afed7","tests/hybrid/mod.rs":"4856a49a4d9b5e9e079c2719a5e75c32408b37e9b76cbdea057b388a3537af6d","tests/hybrid/suite.rs":"d49081a07b13e923c9d31c211942439c015b970b2b9d2f38fd49935803e22bb1","tests/lib.rs":"9775b3c62fb338ea5c1bd3513a6589eff4b5c8d35c599439d9363dbf98c6f8d4","tests/meta/mod.rs":"d08f4ecb8ec243be584944c9602af1ed3a48a8732dd11cd573b0d1d182171303","tests/meta/suite.rs":"7cafd709c61481f2267de671768f880f8bbd4740f4cb523a449481abc80aa08a","tests/nfa/mod.rs":"49055c358e38d97e42acb1602c671f97dddf24cafe089490f0e79ed208d74d9b","tests/nfa/thompson/backtrack/mod.rs":"d08f4ecb8ec243be584944c9602af1ed3a48a8732dd11cd573b0d1d182171303","tests/nfa/thompson/backtrack/suite.rs":"c14b12ad3292b103d7f5be69c297c737adbeea65379cee12a75596601312c430","tests/nfa/thompson/mod.rs":"de9f5bcea1a8d1f03c85c55ad8c0747877d69e344fcd6c6886b0a402f0661291","tests/nfa/thompson/pikevm/mod.rs":"d08f4ecb8ec243be584944c9602af1ed3a48a8732dd11cd573b0d1d182171303","tests/nfa/thompson/pikevm/suite.rs":"cf21a58532f3dc8fd76df715093d1a9333b0c4072261b63c48ac8c86ca31fe25"},"package":"5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c"} \ No newline at end of file diff --git a/vendor/regex-automata/.cargo_vcs_info.json b/vendor/regex-automata/.cargo_vcs_info.json deleted file mode 100644 index a8433855ae044e..00000000000000 --- a/vendor/regex-automata/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "ab0b07171b82d1d4fdc8359505d12b2e818514d4" - }, - "path_in_vcs": "regex-automata" -} \ No newline at end of file diff --git a/vendor/regex-automata/Cargo.lock b/vendor/regex-automata/Cargo.lock deleted file mode 100644 index 36522cdf2ff0d1..00000000000000 --- a/vendor/regex-automata/Cargo.lock +++ /dev/null @@ -1,372 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "aho-corasick" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" -dependencies = [ - "log", - "memchr", -] - -[[package]] -name = "anyhow" -version = "1.0.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - -[[package]] -name = "bstr" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" -dependencies = [ - "memchr", - "serde", -] - -[[package]] -name = "cfg-if" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" - -[[package]] -name = "doc-comment" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" - -[[package]] -name = "env_logger" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" -dependencies = [ - "atty", - "humantime", - "log", - "termcolor", -] - -[[package]] -name = "equivalent" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" - -[[package]] -name = "getrandom" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "hashbrown" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "humantime" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" - -[[package]] -name = "indexmap" -version = "2.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" -dependencies = [ - "equivalent", - "hashbrown", -] - -[[package]] -name = "libc" -version = "0.2.177" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" - -[[package]] -name = "log" -version = "0.4.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" - -[[package]] -name = "memchr" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" -dependencies = [ - "log", -] - -[[package]] -name = "proc-macro2" -version = "1.0.101" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quickcheck" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" -dependencies = [ - "rand", -] - -[[package]] -name = "quote" -version = "1.0.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "regex-automata" -version = "0.4.13" -dependencies = [ - "aho-corasick", - "anyhow", - "bstr", - "doc-comment", - "env_logger", - "log", - "memchr", - "quickcheck", - "regex-syntax", - "regex-test", -] - -[[package]] -name = "regex-syntax" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" - -[[package]] -name = "regex-test" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da40f0939bc4c598b4326abdbb363a8987aa43d0526e5624aefcf3ed90344e62" -dependencies = [ - "anyhow", - "bstr", - "serde", - "toml", -] - -[[package]] -name = "serde" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" -dependencies = [ - "serde_core", - "serde_derive", -] - -[[package]] -name = "serde_core" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_spanned" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" -dependencies = [ - "serde", -] - -[[package]] -name = "syn" -version = "2.0.106" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "termcolor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "toml" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit", -] - -[[package]] -name = "toml_datetime" -version = "0.6.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_edit" -version = "0.22.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" -dependencies = [ - "indexmap", - "serde", - "serde_spanned", - "toml_datetime", - "winnow", -] - -[[package]] -name = "unicode-ident" -version = "1.0.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" - -[[package]] -name = "wasi" -version = "0.11.1+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" -dependencies = [ - "windows-sys", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-link" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" - -[[package]] -name = "windows-sys" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" -dependencies = [ - "windows-link", -] - -[[package]] -name = "winnow" -version = "0.7.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" -dependencies = [ - "memchr", -] diff --git a/vendor/regex-automata/Cargo.toml b/vendor/regex-automata/Cargo.toml deleted file mode 100644 index ac58e53a2dde98..00000000000000 --- a/vendor/regex-automata/Cargo.toml +++ /dev/null @@ -1,200 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.65" -name = "regex-automata" -version = "0.4.13" -authors = [ - "The Rust Project Developers", - "Andrew Gallant ", -] -build = false -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = "Automata construction and matching using regular expressions." -homepage = "https://github.com/rust-lang/regex/tree/master/regex-automata" -documentation = "https://docs.rs/regex-automata" -readme = "README.md" -keywords = [ - "regex", - "dfa", - "automata", - "automaton", - "nfa", -] -categories = ["text-processing"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/regex" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ - "--cfg", - "docsrs_regex", -] - -[features] -alloc = [] -default = [ - "std", - "syntax", - "perf", - "unicode", - "meta", - "nfa", - "dfa", - "hybrid", -] -dfa = [ - "dfa-build", - "dfa-search", - "dfa-onepass", -] -dfa-build = [ - "nfa-thompson", - "dfa-search", -] -dfa-onepass = ["nfa-thompson"] -dfa-search = [] -hybrid = [ - "alloc", - "nfa-thompson", -] -internal-instrument = ["internal-instrument-pikevm"] -internal-instrument-pikevm = [ - "logging", - "std", -] -logging = [ - "dep:log", - "aho-corasick?/logging", - "memchr?/logging", -] -meta = [ - "syntax", - "nfa-pikevm", -] -nfa = [ - "nfa-thompson", - "nfa-pikevm", - "nfa-backtrack", -] -nfa-backtrack = ["nfa-thompson"] -nfa-pikevm = ["nfa-thompson"] -nfa-thompson = ["alloc"] -perf = [ - "perf-inline", - "perf-literal", -] -perf-inline = [] -perf-literal = [ - "perf-literal-substring", - "perf-literal-multisubstring", -] -perf-literal-multisubstring = ["dep:aho-corasick"] -perf-literal-substring = [ - "aho-corasick?/perf-literal", - "dep:memchr", -] -std = [ - "regex-syntax?/std", - "memchr?/std", - "aho-corasick?/std", - "alloc", -] -syntax = [ - "dep:regex-syntax", - "alloc", -] -unicode = [ - "unicode-age", - "unicode-bool", - "unicode-case", - "unicode-gencat", - "unicode-perl", - "unicode-script", - "unicode-segment", - "unicode-word-boundary", - "regex-syntax?/unicode", -] -unicode-age = ["regex-syntax?/unicode-age"] -unicode-bool = ["regex-syntax?/unicode-bool"] -unicode-case = ["regex-syntax?/unicode-case"] -unicode-gencat = ["regex-syntax?/unicode-gencat"] -unicode-perl = ["regex-syntax?/unicode-perl"] -unicode-script = ["regex-syntax?/unicode-script"] -unicode-segment = ["regex-syntax?/unicode-segment"] -unicode-word-boundary = [] - -[lib] -name = "regex_automata" -path = "src/lib.rs" -bench = false - -[[test]] -name = "integration" -path = "tests/lib.rs" - -[dependencies.aho-corasick] -version = "1.0.0" -optional = true -default-features = false - -[dependencies.log] -version = "0.4.14" -optional = true - -[dependencies.memchr] -version = "2.6.0" -optional = true -default-features = false - -[dependencies.regex-syntax] -version = "0.8.5" -optional = true -default-features = false - -[dev-dependencies.anyhow] -version = "1.0.69" - -[dev-dependencies.bstr] -version = "1.3.0" -features = ["std"] -default-features = false - -[dev-dependencies.doc-comment] -version = "0.3.3" - -[dev-dependencies.env_logger] -version = "0.9.3" -features = [ - "atty", - "humantime", - "termcolor", -] -default-features = false - -[dev-dependencies.quickcheck] -version = "1.0.3" -default-features = false - -[dev-dependencies.regex-test] -version = "0.1.0" - -[lints.rust.unexpected_cfgs] -level = "allow" -priority = 0 -check-cfg = ["cfg(docsrs_regex)"] diff --git a/vendor/regex-automata/LICENSE-APACHE b/vendor/regex-automata/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e802f..00000000000000 --- a/vendor/regex-automata/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/regex-automata/LICENSE-MIT b/vendor/regex-automata/LICENSE-MIT deleted file mode 100644 index 39d4bdb5acd313..00000000000000 --- a/vendor/regex-automata/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/regex-automata/README.md b/vendor/regex-automata/README.md deleted file mode 100644 index cb6e86c9f97cb5..00000000000000 --- a/vendor/regex-automata/README.md +++ /dev/null @@ -1,117 +0,0 @@ -regex-automata -============== -This crate exposes a variety of regex engines used by the `regex` crate. -It provides a vast, sprawling and "expert" level API to each regex engine. -The regex engines provided by this crate focus heavily on finite automata -implementations and specifically guarantee worst case `O(m * n)` time -complexity for all searches. (Where `m ~ len(regex)` and `n ~ len(haystack)`.) - -[![Build status](https://github.com/rust-lang/regex/workflows/ci/badge.svg)](https://github.com/rust-lang/regex/actions) -[![Crates.io](https://img.shields.io/crates/v/regex-automata.svg)](https://crates.io/crates/regex-automata) - - -### Documentation - -https://docs.rs/regex-automata - - -### Example - -This example shows how to search for matches of multiple regexes, where each -regex uses the same capture group names to parse different key-value formats. - -```rust -use regex_automata::{meta::Regex, PatternID}; - -let re = Regex::new_many(&[ - r#"(?m)^(?[[:word:]]+)=(?[[:word:]]+)$"#, - r#"(?m)^(?[[:word:]]+)="(?[^"]+)"$"#, - r#"(?m)^(?[[:word:]]+)='(?[^']+)'$"#, - r#"(?m)^(?[[:word:]]+):\s*(?[[:word:]]+)$"#, -]).unwrap(); -let hay = r#" -best_album="Blow Your Face Out" -best_quote='"then as it was, then again it will be"' -best_year=1973 -best_simpsons_episode: HOMR -"#; -let mut kvs = vec![]; -for caps in re.captures_iter(hay) { - // N.B. One could use capture indices '1' and '2' here - // as well. Capture indices are local to each pattern. - // (Just like names are.) - let key = &hay[caps.get_group_by_name("key").unwrap()]; - let val = &hay[caps.get_group_by_name("val").unwrap()]; - kvs.push((key, val)); -} -assert_eq!(kvs, vec![ - ("best_album", "Blow Your Face Out"), - ("best_quote", "\"then as it was, then again it will be\""), - ("best_year", "1973"), - ("best_simpsons_episode", "HOMR"), -]); -``` - - -### Safety - -**I welcome audits of `unsafe` code.** - -This crate tries to be extremely conservative in its use of `unsafe`, but does -use it in a few spots. In general, I am very open to removing uses of `unsafe` -if it doesn't result in measurable performance regressions and doesn't result -in significantly more complex code. - -Below is an outline of how `unsafe` is used in this crate. - -* `util::pool::Pool` makes use of `unsafe` to implement a fast path for -accessing an element of the pool. The fast path applies to the first thread -that uses the pool. In effect, the fast path is fast because it avoids a mutex -lock. `unsafe` is also used in the no-std version of `Pool` to implement a spin -lock for synchronization. -* `util::lazy::Lazy` uses `unsafe` to implement a variant of -`once_cell::sync::Lazy` that works in no-std environments. A no-std no-alloc -implementation is also provided that requires use of `unsafe`. -* The `dfa` module makes extensive use of `unsafe` to support zero-copy -deserialization of DFAs. The high level problem is that you need to get from -`&[u8]` to the internal representation of a DFA without doing any copies. -This is required for support in no-std no-alloc environments. It also makes -deserialization extremely cheap. -* The `dfa` and `hybrid` modules use `unsafe` to explicitly elide bounds checks -in the core search loops. This makes the codegen tighter and typically leads to -consistent 5-10% performance improvements on some workloads. - -In general, the above reflect the only uses of `unsafe` throughout the entire -`regex` crate. At present, there are no plans to meaningfully expand the use -of `unsafe`. With that said, one thing folks have been asking for is cheap -deserialization of a `regex::Regex`. My sense is that this feature will require -a lot more `unsafe` in places to support zero-copy deserialization. It is -unclear at this point whether this will be pursued. - - -### Motivation - -I started out building this crate because I wanted to re-work the `regex` -crate internals to make it more amenable to optimizations. It turns out that -there are a lot of different ways to build regex engines and even more ways to -compose them. Moreover, heuristic literal optimizations are often tricky to -get correct, but the fruit they bear is attractive. All of these things were -difficult to expand upon without risking the introduction of more bugs. So I -decided to tear things down and start fresh. - -In the course of doing so, I ended up designing strong boundaries between each -component so that each component could be reasoned and tested independently. -This also made it somewhat natural to expose the components as a library unto -itself. Namely, folks have been asking for more capabilities in the regex -crate for a long time, but these capabilities usually come with additional API -complexity that I didn't want to introduce in the `regex` crate proper. But -exposing them in an "expert" level crate like `regex-automata` seemed quite -fine. - -In the end, I do still somewhat consider this crate an experiment. It is -unclear whether the strong boundaries between components will be an impediment -to ongoing development or not. De-coupling tends to lead to slower development -in my experience, and when you mix in the added cost of not introducing -breaking changes all the time, things can get quite complicated. But, I -don't think anyone has ever release the internals of a regex engine as a -library before. So it will be interesting to see how it plays out! diff --git a/vendor/regex-automata/src/dfa/accel.rs b/vendor/regex-automata/src/dfa/accel.rs deleted file mode 100644 index 47c84604808913..00000000000000 --- a/vendor/regex-automata/src/dfa/accel.rs +++ /dev/null @@ -1,517 +0,0 @@ -// This module defines some core types for dealing with accelerated DFA states. -// Briefly, a DFA state can be "accelerated" if all of its transitions except -// for a few loop back to itself. This directly implies that the only way out -// of such a state is if a byte corresponding to one of those non-loopback -// transitions is found. Such states are often found in simple repetitions in -// non-Unicode regexes. For example, consider '(?-u)[^a]+a'. We can look at its -// DFA with regex-cli: -// -// $ regex-cli debug dense dfa -p '(?-u)[^a]+a' -BbC --no-table -// D 000000: -// Q 000001: -// *000002: -// A 000003: \x00-` => 3, a => 8, b-\xFF => 3 -// A 000004: \x00-` => 4, a => 7, b-\xFF => 4 -// 000005: \x00-` => 4, b-\xFF => 4 -// 000006: \x00-` => 3, a => 6, b-\xFF => 3 -// 000007: \x00-\xFF => 2, EOI => 2 -// 000008: \x00-\xFF => 2, EOI => 2 -// -// In particular, state 3 is accelerated (shown via the 'A' indicator) since -// the only way to leave that state once entered is to see an 'a' byte. If -// there is a long run of non-'a' bytes, then using something like 'memchr' -// to find the next 'a' byte can be significantly faster than just using the -// standard byte-at-a-time state machine. -// -// Unfortunately, this optimization rarely applies when Unicode is enabled. -// For example, patterns like '[^a]' don't actually match any byte that isn't -// 'a', but rather, any UTF-8 encoding of a Unicode scalar value that isn't -// 'a'. This makes the state machine much more complex---far beyond a single -// state---and removes the ability to easily accelerate it. (Because if the -// machine sees a non-UTF-8 sequence, then the machine won't match through it.) -// -// In practice, we only consider accelerating states that have 3 or fewer -// non-loop transitions. At a certain point, you get diminishing returns, but -// also because that's what the memchr crate supports. The structures below -// hard-code this assumption and provide (de)serialization APIs for use inside -// a DFA. -// -// And finally, note that there is some trickery involved in making it very -// fast to not only check whether a state is accelerated at search time, but -// also to access the bytes to search for to implement the acceleration itself. -// dfa/special.rs provides more detail, but the short story is that all -// accelerated states appear contiguously in a DFA. This means we can represent -// the ID space of all accelerated DFA states with a single range. So given -// a state ID, we can determine whether it's accelerated via -// -// min_accel_id <= id <= max_accel_id -// -// And find its corresponding accelerator with: -// -// accels.get((id - min_accel_id) / dfa_stride) - -#[cfg(feature = "dfa-build")] -use alloc::{vec, vec::Vec}; - -use crate::util::{ - int::Pointer, - memchr, - wire::{self, DeserializeError, Endian, SerializeError}, -}; - -/// The base type used to represent a collection of accelerators. -/// -/// While an `Accel` is represented as a fixed size array of bytes, a -/// *collection* of `Accel`s (called `Accels`) is represented internally as a -/// slice of u32. While it's a bit unnatural to do this and costs us a bit of -/// fairly low-risk not-safe code, it lets us remove the need for a second type -/// parameter in the definition of dense::DFA. (Which really wants everything -/// to be a slice of u32.) -type AccelTy = u32; - -/// The size of the unit of representation for accelerators. -/// -/// ACCEL_CAP *must* be a multiple of this size. -const ACCEL_TY_SIZE: usize = core::mem::size_of::(); - -/// The maximum length in bytes that a single Accel can be. This is distinct -/// from the capacity of an accelerator in that the length represents only the -/// bytes that should be read. -const ACCEL_LEN: usize = 4; - -/// The capacity of each accelerator, in bytes. We set this to 8 since it's a -/// multiple of 4 (our ID size) and because it gives us a little wiggle room -/// if we want to support more accel bytes in the future without a breaking -/// change. -/// -/// This MUST be a multiple of ACCEL_TY_SIZE. -const ACCEL_CAP: usize = 8; - -/// Search for between 1 and 3 needle bytes in the given haystack, starting the -/// search at the given position. If `needles` has a length other than 1-3, -/// then this panics. -#[cfg_attr(feature = "perf-inline", inline(always))] -pub(crate) fn find_fwd( - needles: &[u8], - haystack: &[u8], - at: usize, -) -> Option { - let bs = needles; - let i = match needles.len() { - 1 => memchr::memchr(bs[0], &haystack[at..])?, - 2 => memchr::memchr2(bs[0], bs[1], &haystack[at..])?, - 3 => memchr::memchr3(bs[0], bs[1], bs[2], &haystack[at..])?, - 0 => panic!("cannot find with empty needles"), - n => panic!("invalid needles length: {n}"), - }; - Some(at + i) -} - -/// Search for between 1 and 3 needle bytes in the given haystack in reverse, -/// starting the search at the given position. If `needles` has a length other -/// than 1-3, then this panics. -#[cfg_attr(feature = "perf-inline", inline(always))] -pub(crate) fn find_rev( - needles: &[u8], - haystack: &[u8], - at: usize, -) -> Option { - let bs = needles; - match needles.len() { - 1 => memchr::memrchr(bs[0], &haystack[..at]), - 2 => memchr::memrchr2(bs[0], bs[1], &haystack[..at]), - 3 => memchr::memrchr3(bs[0], bs[1], bs[2], &haystack[..at]), - 0 => panic!("cannot find with empty needles"), - n => panic!("invalid needles length: {n}"), - } -} - -/// Represents the accelerators for all accelerated states in a dense DFA. -/// -/// The `A` type parameter represents the type of the underlying bytes. -/// Generally, this is either `&[AccelTy]` or `Vec`. -#[derive(Clone)] -pub(crate) struct Accels { - /// A length prefixed slice of contiguous accelerators. See the top comment - /// in this module for more details on how we can jump from a DFA's state - /// ID to an accelerator in this list. - /// - /// The first 4 bytes always correspond to the number of accelerators - /// that follow. - accels: A, -} - -#[cfg(feature = "dfa-build")] -impl Accels> { - /// Create an empty sequence of accelerators for a DFA. - pub fn empty() -> Accels> { - Accels { accels: vec![0] } - } - - /// Add an accelerator to this sequence. - /// - /// This adds to the accelerator to the end of the sequence and therefore - /// should be done in correspondence with its state in the DFA. - /// - /// This panics if this results in more accelerators than AccelTy::MAX. - pub fn add(&mut self, accel: Accel) { - self.accels.extend_from_slice(&accel.as_accel_tys()); - let len = self.len(); - self.set_len(len + 1); - } - - /// Set the number of accelerators in this sequence, which is encoded in - /// the first 4 bytes of the underlying bytes. - fn set_len(&mut self, new_len: usize) { - // The only way an accelerator gets added is if a state exists for - // it, and if a state exists, then its index is guaranteed to be - // representable by a AccelTy by virtue of the guarantees provided by - // StateID. - let new_len = AccelTy::try_from(new_len).unwrap(); - self.accels[0] = new_len; - } -} - -impl<'a> Accels<&'a [AccelTy]> { - /// Deserialize a sequence of accelerators from the given bytes. If there - /// was a problem deserializing, then an error is returned. - /// - /// This is guaranteed to run in constant time. This does not guarantee - /// that every accelerator in the returned collection is valid. Thus, - /// accessing one may panic, or not-safe code that relies on accelerators - /// being correct my result in UB. - /// - /// Callers may check the validity of every accelerator with the `validate` - /// method. - pub fn from_bytes_unchecked( - mut slice: &'a [u8], - ) -> Result<(Accels<&'a [AccelTy]>, usize), DeserializeError> { - let slice_start = slice.as_ptr().as_usize(); - - let (accel_len, _) = - wire::try_read_u32_as_usize(slice, "accelerators length")?; - // The accelerator length is part of the accel_tys slice that - // we deserialize. This is perhaps a bit idiosyncratic. It would - // probably be better to split out the length into a real field. - - let accel_tys_len = wire::add( - wire::mul(accel_len, 2, "total number of accelerator accel_tys")?, - 1, - "total number of accel_tys", - )?; - let accel_tys_bytes_len = wire::mul( - ACCEL_TY_SIZE, - accel_tys_len, - "total number of bytes in accelerators", - )?; - wire::check_slice_len(slice, accel_tys_bytes_len, "accelerators")?; - wire::check_alignment::(slice)?; - let accel_tys = &slice[..accel_tys_bytes_len]; - slice = &slice[accel_tys_bytes_len..]; - // SAFETY: We've checked the length and alignment above, and since - // slice is just bytes and AccelTy is just a u32, we can safely cast to - // a slice of &[AccelTy]. - let accels = unsafe { - core::slice::from_raw_parts( - accel_tys.as_ptr().cast::(), - accel_tys_len, - ) - }; - Ok((Accels { accels }, slice.as_ptr().as_usize() - slice_start)) - } -} - -impl> Accels { - /// Return an owned version of the accelerators. - #[cfg(feature = "alloc")] - pub fn to_owned(&self) -> Accels> { - Accels { accels: self.accels.as_ref().to_vec() } - } - - /// Return a borrowed version of the accelerators. - pub fn as_ref(&self) -> Accels<&[AccelTy]> { - Accels { accels: self.accels.as_ref() } - } - - /// Return the bytes representing the serialization of the accelerators. - pub fn as_bytes(&self) -> &[u8] { - let accels = self.accels.as_ref(); - // SAFETY: This is safe because accels is a just a slice of AccelTy, - // and u8 always has a smaller alignment. - unsafe { - core::slice::from_raw_parts( - accels.as_ptr().cast::(), - accels.len() * ACCEL_TY_SIZE, - ) - } - } - - /// Returns the memory usage, in bytes, of these accelerators. - /// - /// The memory usage is computed based on the number of bytes used to - /// represent all of the accelerators. - /// - /// This does **not** include the stack size used by this value. - pub fn memory_usage(&self) -> usize { - self.as_bytes().len() - } - - /// Return the bytes to search for corresponding to the accelerator in this - /// sequence at index `i`. If no such accelerator exists, then this panics. - /// - /// The significance of the index is that it should be in correspondence - /// with the index of the corresponding DFA. That is, accelerated DFA - /// states are stored contiguously in the DFA and have an ordering implied - /// by their respective state IDs. The state's index in that sequence - /// corresponds to the index of its corresponding accelerator. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub fn needles(&self, i: usize) -> &[u8] { - if i >= self.len() { - panic!("invalid accelerator index {i}"); - } - let bytes = self.as_bytes(); - let offset = ACCEL_TY_SIZE + i * ACCEL_CAP; - let len = usize::from(bytes[offset]); - &bytes[offset + 1..offset + 1 + len] - } - - /// Return the total number of accelerators in this sequence. - pub fn len(&self) -> usize { - // This should never panic since deserialization checks that the - // length can fit into a usize. - usize::try_from(self.accels.as_ref()[0]).unwrap() - } - - /// Return the accelerator in this sequence at index `i`. If no such - /// accelerator exists, then this returns None. - /// - /// See the docs for `needles` on the significance of the index. - fn get(&self, i: usize) -> Option { - if i >= self.len() { - return None; - } - let offset = ACCEL_TY_SIZE + i * ACCEL_CAP; - let accel = Accel::from_slice(&self.as_bytes()[offset..]) - .expect("Accels must contain valid accelerators"); - Some(accel) - } - - /// Returns an iterator of accelerators in this sequence. - fn iter(&self) -> IterAccels<'_, A> { - IterAccels { accels: self, i: 0 } - } - - /// Writes these accelerators to the given byte buffer using the indicated - /// endianness. If the given buffer is too small, then an error is - /// returned. Upon success, the total number of bytes written is returned. - /// The number of bytes written is guaranteed to be a multiple of 8. - pub fn write_to( - &self, - dst: &mut [u8], - ) -> Result { - let nwrite = self.write_to_len(); - assert_eq!( - nwrite % ACCEL_TY_SIZE, - 0, - "expected accelerator bytes written to be a multiple \ - of {ACCEL_TY_SIZE}", - ); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small("accelerators")); - } - - // The number of accelerators can never exceed AccelTy::MAX. - E::write_u32(AccelTy::try_from(self.len()).unwrap(), dst); - // The actual accelerators are just raw bytes and thus their endianness - // is irrelevant. So we can copy them as bytes. - dst[ACCEL_TY_SIZE..nwrite] - .copy_from_slice(&self.as_bytes()[ACCEL_TY_SIZE..nwrite]); - Ok(nwrite) - } - - /// Validates that every accelerator in this collection can be successfully - /// deserialized as a valid accelerator. - pub fn validate(&self) -> Result<(), DeserializeError> { - for chunk in self.as_bytes()[ACCEL_TY_SIZE..].chunks(ACCEL_CAP) { - let _ = Accel::from_slice(chunk)?; - } - Ok(()) - } - - /// Returns the total number of bytes written by `write_to`. - pub fn write_to_len(&self) -> usize { - self.as_bytes().len() - } -} - -impl> core::fmt::Debug for Accels { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "Accels(")?; - let mut list = f.debug_list(); - for a in self.iter() { - list.entry(&a); - } - list.finish()?; - write!(f, ")") - } -} - -#[derive(Debug)] -struct IterAccels<'a, A: AsRef<[AccelTy]>> { - accels: &'a Accels, - i: usize, -} - -impl<'a, A: AsRef<[AccelTy]>> Iterator for IterAccels<'a, A> { - type Item = Accel; - - fn next(&mut self) -> Option { - let accel = self.accels.get(self.i)?; - self.i += 1; - Some(accel) - } -} - -/// Accel represents a structure for determining how to "accelerate" a DFA -/// state. -/// -/// Namely, it contains zero or more bytes that must be seen in order for the -/// DFA to leave the state it is associated with. In practice, the actual range -/// is 1 to 3 bytes. -/// -/// The purpose of acceleration is to identify states whose vast majority -/// of transitions are just loops back to the same state. For example, -/// in the regex `(?-u)^[^a]+b`, the corresponding DFA will have a state -/// (corresponding to `[^a]+`) where all transitions *except* for `a` and -/// `b` loop back to itself. Thus, this state can be "accelerated" by simply -/// looking for the next occurrence of either `a` or `b` instead of explicitly -/// following transitions. (In this case, `b` transitions to the next state -/// where as `a` would transition to the dead state.) -#[derive(Clone)] -pub(crate) struct Accel { - /// The first byte is the length. Subsequent bytes are the accelerated - /// bytes. - /// - /// Note that we make every accelerator 8 bytes as a slightly wasteful - /// way of making sure alignment is always correct for state ID sizes of - /// 1, 2, 4 and 8. This should be okay since accelerated states aren't - /// particularly common, especially when Unicode is enabled. - bytes: [u8; ACCEL_CAP], -} - -impl Accel { - /// Returns an empty accel, where no bytes are accelerated. - #[cfg(feature = "dfa-build")] - pub fn new() -> Accel { - Accel { bytes: [0; ACCEL_CAP] } - } - - /// Returns a verified accelerator derived from the beginning of the given - /// slice. - /// - /// If the slice is not long enough or contains invalid bytes for an - /// accelerator, then this returns an error. - pub fn from_slice(mut slice: &[u8]) -> Result { - slice = &slice[..core::cmp::min(ACCEL_LEN, slice.len())]; - let bytes = slice - .try_into() - .map_err(|_| DeserializeError::buffer_too_small("accelerator"))?; - Accel::from_bytes(bytes) - } - - /// Returns a verified accelerator derived from raw bytes. - /// - /// If the given bytes are invalid, then this returns an error. - fn from_bytes(bytes: [u8; 4]) -> Result { - if usize::from(bytes[0]) >= ACCEL_LEN { - return Err(DeserializeError::generic( - "accelerator bytes cannot have length more than 3", - )); - } - Ok(Accel::from_bytes_unchecked(bytes)) - } - - /// Returns an accelerator derived from raw bytes. - /// - /// This does not check whether the given bytes are valid. Invalid bytes - /// cannot sacrifice memory safety, but may result in panics or silent - /// logic bugs. - fn from_bytes_unchecked(bytes: [u8; 4]) -> Accel { - Accel { bytes: [bytes[0], bytes[1], bytes[2], bytes[3], 0, 0, 0, 0] } - } - - /// Attempts to add the given byte to this accelerator. If the accelerator - /// is already full or thinks the byte is a poor accelerator, then this - /// returns false. Otherwise, returns true. - /// - /// If the given byte is already in this accelerator, then it panics. - #[cfg(feature = "dfa-build")] - pub fn add(&mut self, byte: u8) -> bool { - if self.len() >= 3 { - return false; - } - // As a special case, we totally reject trying to accelerate a state - // with an ASCII space. In most cases, it occurs very frequently, and - // tends to result in worse overall performance. - if byte == b' ' { - return false; - } - assert!( - !self.contains(byte), - "accelerator already contains {:?}", - crate::util::escape::DebugByte(byte) - ); - self.bytes[self.len() + 1] = byte; - self.bytes[0] += 1; - true - } - - /// Return the number of bytes in this accelerator. - pub fn len(&self) -> usize { - usize::from(self.bytes[0]) - } - - /// Returns true if and only if there are no bytes in this accelerator. - #[cfg(feature = "dfa-build")] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the slice of bytes to accelerate. - /// - /// If this accelerator is empty, then this returns an empty slice. - fn needles(&self) -> &[u8] { - &self.bytes[1..1 + self.len()] - } - - /// Returns true if and only if this accelerator will accelerate the given - /// byte. - #[cfg(feature = "dfa-build")] - fn contains(&self, byte: u8) -> bool { - self.needles().iter().position(|&b| b == byte).is_some() - } - - /// Returns the accelerator bytes as an array of AccelTys. - #[cfg(feature = "dfa-build")] - fn as_accel_tys(&self) -> [AccelTy; 2] { - assert_eq!(ACCEL_CAP, 8); - // These unwraps are OK since ACCEL_CAP is set to 8. - let first = - AccelTy::from_ne_bytes(self.bytes[0..4].try_into().unwrap()); - let second = - AccelTy::from_ne_bytes(self.bytes[4..8].try_into().unwrap()); - [first, second] - } -} - -impl core::fmt::Debug for Accel { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "Accel(")?; - let mut set = f.debug_set(); - for &b in self.needles() { - set.entry(&crate::util::escape::DebugByte(b)); - } - set.finish()?; - write!(f, ")") - } -} diff --git a/vendor/regex-automata/src/dfa/automaton.rs b/vendor/regex-automata/src/dfa/automaton.rs deleted file mode 100644 index 189700d83f05b9..00000000000000 --- a/vendor/regex-automata/src/dfa/automaton.rs +++ /dev/null @@ -1,2260 +0,0 @@ -#[cfg(feature = "alloc")] -use crate::util::search::PatternSet; -use crate::{ - dfa::search, - util::{ - empty, - prefilter::Prefilter, - primitives::{PatternID, StateID}, - search::{Anchored, HalfMatch, Input, MatchError}, - start, - }, -}; - -/// A trait describing the interface of a deterministic finite automaton (DFA). -/// -/// The complexity of this trait probably means that it's unlikely for others -/// to implement it. The primary purpose of the trait is to provide for a way -/// of abstracting over different types of DFAs. In this crate, that means -/// dense DFAs and sparse DFAs. (Dense DFAs are fast but memory hungry, where -/// as sparse DFAs are slower but come with a smaller memory footprint. But -/// they otherwise provide exactly equivalent expressive power.) For example, a -/// [`dfa::regex::Regex`](crate::dfa::regex::Regex) is generic over this trait. -/// -/// Normally, a DFA's execution model is very simple. You might have a single -/// start state, zero or more final or "match" states and a function that -/// transitions from one state to the next given the next byte of input. -/// Unfortunately, the interface described by this trait is significantly -/// more complicated than this. The complexity has a number of different -/// reasons, mostly motivated by performance, functionality or space savings: -/// -/// * A DFA can search for multiple patterns simultaneously. This -/// means extra information is returned when a match occurs. Namely, -/// a match is not just an offset, but an offset plus a pattern ID. -/// [`Automaton::pattern_len`] returns the number of patterns compiled into -/// the DFA, [`Automaton::match_len`] returns the total number of patterns -/// that match in a particular state and [`Automaton::match_pattern`] permits -/// iterating over the patterns that match in a particular state. -/// * A DFA can have multiple start states, and the choice of which start -/// state to use depends on the content of the string being searched and -/// position of the search, as well as whether the search is an anchored -/// search for a specific pattern in the DFA. Moreover, computing the start -/// state also depends on whether you're doing a forward or a reverse search. -/// [`Automaton::start_state_forward`] and [`Automaton::start_state_reverse`] -/// are used to compute the start state for forward and reverse searches, -/// respectively. -/// * All matches are delayed by one byte to support things like `$` and `\b` -/// at the end of a pattern. Therefore, every use of a DFA is required to use -/// [`Automaton::next_eoi_state`] -/// at the end of the search to compute the final transition. -/// * For optimization reasons, some states are treated specially. Every -/// state is either special or not, which can be determined via the -/// [`Automaton::is_special_state`] method. If it's special, then the state -/// must be at least one of a few possible types of states. (Note that some -/// types can overlap, for example, a match state can also be an accel state. -/// But some types can't. If a state is a dead state, then it can never be any -/// other type of state.) Those types are: -/// * A dead state. A dead state means the DFA will never enter a match -/// state. This can be queried via the [`Automaton::is_dead_state`] method. -/// * A quit state. A quit state occurs if the DFA had to stop the search -/// prematurely for some reason. This can be queried via the -/// [`Automaton::is_quit_state`] method. -/// * A match state. A match state occurs when a match is found. When a DFA -/// enters a match state, the search may stop immediately (when looking -/// for the earliest match), or it may continue to find the leftmost-first -/// match. This can be queried via the [`Automaton::is_match_state`] -/// method. -/// * A start state. A start state is where a search begins. For every -/// search, there is exactly one start state that is used, however, a -/// DFA may contain many start states. When the search is in a start -/// state, it may use a prefilter to quickly skip to candidate matches -/// without executing the DFA on every byte. This can be queried via the -/// [`Automaton::is_start_state`] method. -/// * An accel state. An accel state is a state that is accelerated. -/// That is, it is a state where _most_ of its transitions loop back to -/// itself and only a small number of transitions lead to other states. -/// This kind of state is said to be accelerated because a search routine -/// can quickly look for the bytes leading out of the state instead of -/// continuing to execute the DFA on each byte. This can be queried via the -/// [`Automaton::is_accel_state`] method. And the bytes that lead out of -/// the state can be queried via the [`Automaton::accelerator`] method. -/// -/// There are a number of provided methods on this trait that implement -/// efficient searching (for forwards and backwards) with a DFA using -/// all of the above features of this trait. In particular, given the -/// complexity of all these features, implementing a search routine in -/// this trait can be a little subtle. With that said, it is possible to -/// somewhat simplify the search routine. For example, handling accelerated -/// states is strictly optional, since it is always correct to assume that -/// `Automaton::is_accel_state` returns false. However, one complex part of -/// writing a search routine using this trait is handling the 1-byte delay of a -/// match. That is not optional. -/// -/// # Safety -/// -/// This trait is not safe to implement so that code may rely on the -/// correctness of implementations of this trait to avoid undefined behavior. -/// The primary correctness guarantees are: -/// -/// * `Automaton::start_state` always returns a valid state ID or an error or -/// panics. -/// * `Automaton::next_state`, when given a valid state ID, always returns -/// a valid state ID for all values of `anchored` and `byte`, or otherwise -/// panics. -/// -/// In general, the rest of the methods on `Automaton` need to uphold their -/// contracts as well. For example, `Automaton::is_dead` should only returns -/// true if the given state ID is actually a dead state. -pub unsafe trait Automaton { - /// Transitions from the current state to the next state, given the next - /// byte of input. - /// - /// Implementations must guarantee that the returned ID is always a valid - /// ID when `current` refers to a valid ID. Moreover, the transition - /// function must be defined for all possible values of `input`. - /// - /// # Panics - /// - /// If the given ID does not refer to a valid state, then this routine - /// may panic but it also may not panic and instead return an invalid ID. - /// However, if the caller provides an invalid ID then this must never - /// sacrifice memory safety. - /// - /// # Example - /// - /// This shows a simplistic example for walking a DFA for a given haystack - /// by using the `next_state` method. - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense}, Input}; - /// - /// let dfa = dense::DFA::new(r"[a-z]+r")?; - /// let haystack = "bar".as_bytes(); - /// - /// // The start state is determined by inspecting the position and the - /// // initial bytes of the haystack. - /// let mut state = dfa.start_state_forward(&Input::new(haystack))?; - /// // Walk all the bytes in the haystack. - /// for &b in haystack { - /// state = dfa.next_state(state, b); - /// } - /// // Matches are always delayed by 1 byte, so we must explicitly walk the - /// // special "EOI" transition at the end of the search. - /// state = dfa.next_eoi_state(state); - /// assert!(dfa.is_match_state(state)); - /// - /// # Ok::<(), Box>(()) - /// ``` - fn next_state(&self, current: StateID, input: u8) -> StateID; - - /// Transitions from the current state to the next state, given the next - /// byte of input. - /// - /// Unlike [`Automaton::next_state`], implementations may implement this - /// more efficiently by assuming that the `current` state ID is valid. - /// Typically, this manifests by eliding bounds checks. - /// - /// # Safety - /// - /// Callers of this method must guarantee that `current` refers to a valid - /// state ID. If `current` is not a valid state ID for this automaton, then - /// calling this routine may result in undefined behavior. - /// - /// If `current` is valid, then implementations must guarantee that the ID - /// returned is valid for all possible values of `input`. - unsafe fn next_state_unchecked( - &self, - current: StateID, - input: u8, - ) -> StateID; - - /// Transitions from the current state to the next state for the special - /// EOI symbol. - /// - /// Implementations must guarantee that the returned ID is always a valid - /// ID when `current` refers to a valid ID. - /// - /// This routine must be called at the end of every search in a correct - /// implementation of search. Namely, DFAs in this crate delay matches - /// by one byte in order to support look-around operators. Thus, after - /// reaching the end of a haystack, a search implementation must follow one - /// last EOI transition. - /// - /// It is best to think of EOI as an additional symbol in the alphabet of - /// a DFA that is distinct from every other symbol. That is, the alphabet - /// of DFAs in this crate has a logical size of 257 instead of 256, where - /// 256 corresponds to every possible inhabitant of `u8`. (In practice, the - /// physical alphabet size may be smaller because of alphabet compression - /// via equivalence classes, but EOI is always represented somehow in the - /// alphabet.) - /// - /// # Panics - /// - /// If the given ID does not refer to a valid state, then this routine - /// may panic but it also may not panic and instead return an invalid ID. - /// However, if the caller provides an invalid ID then this must never - /// sacrifice memory safety. - /// - /// # Example - /// - /// This shows a simplistic example for walking a DFA for a given haystack, - /// and then finishing the search with the final EOI transition. - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense}, Input}; - /// - /// let dfa = dense::DFA::new(r"[a-z]+r")?; - /// let haystack = "bar".as_bytes(); - /// - /// // The start state is determined by inspecting the position and the - /// // initial bytes of the haystack. - /// // - /// // The unwrap is OK because we aren't requesting a start state for a - /// // specific pattern. - /// let mut state = dfa.start_state_forward(&Input::new(haystack))?; - /// // Walk all the bytes in the haystack. - /// for &b in haystack { - /// state = dfa.next_state(state, b); - /// } - /// // Matches are always delayed by 1 byte, so we must explicitly walk - /// // the special "EOI" transition at the end of the search. Without this - /// // final transition, the assert below will fail since the DFA will not - /// // have entered a match state yet! - /// state = dfa.next_eoi_state(state); - /// assert!(dfa.is_match_state(state)); - /// - /// # Ok::<(), Box>(()) - /// ``` - fn next_eoi_state(&self, current: StateID) -> StateID; - - /// Return the ID of the start state for this DFA for the given starting - /// configuration. - /// - /// Unlike typical DFA implementations, the start state for DFAs in this - /// crate is dependent on a few different factors: - /// - /// * The [`Anchored`] mode of the search. Unanchored, anchored and - /// anchored searches for a specific [`PatternID`] all use different start - /// states. - /// * Whether a "look-behind" byte exists. For example, the `^` anchor - /// matches if and only if there is no look-behind byte. - /// * The specific value of that look-behind byte. For example, a `(?m:^)` - /// assertion only matches when there is either no look-behind byte, or - /// when the look-behind byte is a line terminator. - /// - /// The [starting configuration](start::Config) provides the above - /// information. - /// - /// This routine can be used for either forward or reverse searches. - /// Although, as a convenience, if you have an [`Input`], then it may - /// be more succinct to use [`Automaton::start_state_forward`] or - /// [`Automaton::start_state_reverse`]. Note, for example, that the - /// convenience routines return a [`MatchError`] on failure where as this - /// routine returns a [`StartError`]. - /// - /// # Errors - /// - /// This may return a [`StartError`] if the search needs to give up when - /// determining the start state (for example, if it sees a "quit" byte). - /// This can also return an error if the given configuration contains an - /// unsupported [`Anchored`] configuration. - fn start_state( - &self, - config: &start::Config, - ) -> Result; - - /// Return the ID of the start state for this DFA when executing a forward - /// search. - /// - /// This is a convenience routine for calling [`Automaton::start_state`] - /// that converts the given [`Input`] to a [start - /// configuration](start::Config). Additionally, if an error occurs, it is - /// converted from a [`StartError`] to a [`MatchError`] using the offset - /// information in the given [`Input`]. - /// - /// # Errors - /// - /// This may return a [`MatchError`] if the search needs to give up - /// when determining the start state (for example, if it sees a "quit" - /// byte). This can also return an error if the given `Input` contains an - /// unsupported [`Anchored`] configuration. - fn start_state_forward( - &self, - input: &Input<'_>, - ) -> Result { - let config = start::Config::from_input_forward(input); - self.start_state(&config).map_err(|err| match err { - StartError::Quit { byte } => { - let offset = input - .start() - .checked_sub(1) - .expect("no quit in start without look-behind"); - MatchError::quit(byte, offset) - } - StartError::UnsupportedAnchored { mode } => { - MatchError::unsupported_anchored(mode) - } - }) - } - - /// Return the ID of the start state for this DFA when executing a reverse - /// search. - /// - /// This is a convenience routine for calling [`Automaton::start_state`] - /// that converts the given [`Input`] to a [start - /// configuration](start::Config). Additionally, if an error occurs, it is - /// converted from a [`StartError`] to a [`MatchError`] using the offset - /// information in the given [`Input`]. - /// - /// # Errors - /// - /// This may return a [`MatchError`] if the search needs to give up - /// when determining the start state (for example, if it sees a "quit" - /// byte). This can also return an error if the given `Input` contains an - /// unsupported [`Anchored`] configuration. - fn start_state_reverse( - &self, - input: &Input<'_>, - ) -> Result { - let config = start::Config::from_input_reverse(input); - self.start_state(&config).map_err(|err| match err { - StartError::Quit { byte } => { - let offset = input.end(); - MatchError::quit(byte, offset) - } - StartError::UnsupportedAnchored { mode } => { - MatchError::unsupported_anchored(mode) - } - }) - } - - /// If this DFA has a universal starting state for the given anchor mode - /// and the DFA supports universal starting states, then this returns that - /// state's identifier. - /// - /// A DFA is said to have a universal starting state when the starting - /// state is invariant with respect to the haystack. Usually, the starting - /// state is chosen depending on the bytes immediately surrounding the - /// starting position of a search. However, the starting state only differs - /// when one or more of the patterns in the DFA have look-around assertions - /// in its prefix. - /// - /// Stated differently, if none of the patterns in a DFA have look-around - /// assertions in their prefix, then the DFA has a universal starting state - /// and _may_ be returned by this method. - /// - /// It is always correct for implementations to return `None`, and indeed, - /// this is what the default implementation does. When this returns `None`, - /// callers must use either `start_state_forward` or `start_state_reverse` - /// to get the starting state. - /// - /// # Use case - /// - /// There are a few reasons why one might want to use this: - /// - /// * If you know your regex patterns have no look-around assertions in - /// their prefix, then calling this routine is likely cheaper and perhaps - /// more semantically meaningful. - /// * When implementing prefilter support in a DFA regex implementation, - /// it is necessary to re-compute the start state after a candidate - /// is returned from the prefilter. However, this is only needed when - /// there isn't a universal start state. When one exists, one can avoid - /// re-computing the start state. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{ - /// dfa::{Automaton, dense::DFA}, - /// Anchored, - /// }; - /// - /// // There are no look-around assertions in the prefixes of any of the - /// // patterns, so we get a universal start state. - /// let dfa = DFA::new_many(&["[0-9]+", "[a-z]+$", "[A-Z]+"])?; - /// assert!(dfa.universal_start_state(Anchored::No).is_some()); - /// assert!(dfa.universal_start_state(Anchored::Yes).is_some()); - /// - /// // One of the patterns has a look-around assertion in its prefix, - /// // so this means there is no longer a universal start state. - /// let dfa = DFA::new_many(&["[0-9]+", "^[a-z]+$", "[A-Z]+"])?; - /// assert!(!dfa.universal_start_state(Anchored::No).is_some()); - /// assert!(!dfa.universal_start_state(Anchored::Yes).is_some()); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - fn universal_start_state(&self, _mode: Anchored) -> Option { - None - } - - /// Returns true if and only if the given identifier corresponds to a - /// "special" state. A special state is one or more of the following: - /// a dead state, a quit state, a match state, a start state or an - /// accelerated state. - /// - /// A correct implementation _may_ always return false for states that - /// are either start states or accelerated states, since that information - /// is only intended to be used for optimization purposes. Correct - /// implementations must return true if the state is a dead, quit or match - /// state. This is because search routines using this trait must be able - /// to rely on `is_special_state` as an indicator that a state may need - /// special treatment. (For example, when a search routine sees a dead - /// state, it must terminate.) - /// - /// This routine permits search implementations to use a single branch to - /// check whether a state needs special attention before executing the next - /// transition. The example below shows how to do this. - /// - /// # Example - /// - /// This example shows how `is_special_state` can be used to implement a - /// correct search routine with minimal branching. In particular, this - /// search routine implements "leftmost" matching, which means that it - /// doesn't immediately stop once a match is found. Instead, it continues - /// until it reaches a dead state. - /// - /// ``` - /// use regex_automata::{ - /// dfa::{Automaton, dense}, - /// HalfMatch, MatchError, Input, - /// }; - /// - /// fn find( - /// dfa: &A, - /// haystack: &[u8], - /// ) -> Result, MatchError> { - /// // The start state is determined by inspecting the position and the - /// // initial bytes of the haystack. Note that start states can never - /// // be match states (since DFAs in this crate delay matches by 1 - /// // byte), so we don't need to check if the start state is a match. - /// let mut state = dfa.start_state_forward(&Input::new(haystack))?; - /// let mut last_match = None; - /// // Walk all the bytes in the haystack. We can quit early if we see - /// // a dead or a quit state. The former means the automaton will - /// // never transition to any other state. The latter means that the - /// // automaton entered a condition in which its search failed. - /// for (i, &b) in haystack.iter().enumerate() { - /// state = dfa.next_state(state, b); - /// if dfa.is_special_state(state) { - /// if dfa.is_match_state(state) { - /// last_match = Some(HalfMatch::new( - /// dfa.match_pattern(state, 0), - /// i, - /// )); - /// } else if dfa.is_dead_state(state) { - /// return Ok(last_match); - /// } else if dfa.is_quit_state(state) { - /// // It is possible to enter into a quit state after - /// // observing a match has occurred. In that case, we - /// // should return the match instead of an error. - /// if last_match.is_some() { - /// return Ok(last_match); - /// } - /// return Err(MatchError::quit(b, i)); - /// } - /// // Implementors may also want to check for start or accel - /// // states and handle them differently for performance - /// // reasons. But it is not necessary for correctness. - /// } - /// } - /// // Matches are always delayed by 1 byte, so we must explicitly walk - /// // the special "EOI" transition at the end of the search. - /// state = dfa.next_eoi_state(state); - /// if dfa.is_match_state(state) { - /// last_match = Some(HalfMatch::new( - /// dfa.match_pattern(state, 0), - /// haystack.len(), - /// )); - /// } - /// Ok(last_match) - /// } - /// - /// // We use a greedy '+' operator to show how the search doesn't just - /// // stop once a match is detected. It continues extending the match. - /// // Using '[a-z]+?' would also work as expected and stop the search - /// // early. Greediness is built into the automaton. - /// let dfa = dense::DFA::new(r"[a-z]+")?; - /// let haystack = "123 foobar 4567".as_bytes(); - /// let mat = find(&dfa, haystack)?.unwrap(); - /// assert_eq!(mat.pattern().as_usize(), 0); - /// assert_eq!(mat.offset(), 10); - /// - /// // Here's another example that tests our handling of the special EOI - /// // transition. This will fail to find a match if we don't call - /// // 'next_eoi_state' at the end of the search since the match isn't - /// // found until the final byte in the haystack. - /// let dfa = dense::DFA::new(r"[0-9]{4}")?; - /// let haystack = "123 foobar 4567".as_bytes(); - /// let mat = find(&dfa, haystack)?.unwrap(); - /// assert_eq!(mat.pattern().as_usize(), 0); - /// assert_eq!(mat.offset(), 15); - /// - /// // And note that our search implementation above automatically works - /// // with multi-DFAs. Namely, `dfa.match_pattern(match_state, 0)` selects - /// // the appropriate pattern ID for us. - /// let dfa = dense::DFA::new_many(&[r"[a-z]+", r"[0-9]+"])?; - /// let haystack = "123 foobar 4567".as_bytes(); - /// let mat = find(&dfa, haystack)?.unwrap(); - /// assert_eq!(mat.pattern().as_usize(), 1); - /// assert_eq!(mat.offset(), 3); - /// let mat = find(&dfa, &haystack[3..])?.unwrap(); - /// assert_eq!(mat.pattern().as_usize(), 0); - /// assert_eq!(mat.offset(), 7); - /// let mat = find(&dfa, &haystack[10..])?.unwrap(); - /// assert_eq!(mat.pattern().as_usize(), 1); - /// assert_eq!(mat.offset(), 5); - /// - /// # Ok::<(), Box>(()) - /// ``` - fn is_special_state(&self, id: StateID) -> bool; - - /// Returns true if and only if the given identifier corresponds to a dead - /// state. When a DFA enters a dead state, it is impossible to leave. That - /// is, every transition on a dead state by definition leads back to the - /// same dead state. - /// - /// In practice, the dead state always corresponds to the identifier `0`. - /// Moreover, in practice, there is only one dead state. - /// - /// The existence of a dead state is not strictly required in the classical - /// model of finite state machines, where one generally only cares about - /// the question of whether an input sequence matches or not. Dead states - /// are not needed to answer that question, since one can immediately quit - /// as soon as one enters a final or "match" state. However, we don't just - /// care about matches but also care about the location of matches, and - /// more specifically, care about semantics like "greedy" matching. - /// - /// For example, given the pattern `a+` and the input `aaaz`, the dead - /// state won't be entered until the state machine reaches `z` in the - /// input, at which point, the search routine can quit. But without the - /// dead state, the search routine wouldn't know when to quit. In a - /// classical representation, the search routine would stop after seeing - /// the first `a` (which is when the search would enter a match state). But - /// this wouldn't implement "greedy" matching where `a+` matches as many - /// `a`'s as possible. - /// - /// # Example - /// - /// See the example for [`Automaton::is_special_state`] for how to use this - /// method correctly. - fn is_dead_state(&self, id: StateID) -> bool; - - /// Returns true if and only if the given identifier corresponds to a quit - /// state. A quit state is like a dead state (it has no transitions other - /// than to itself), except it indicates that the DFA failed to complete - /// the search. When this occurs, callers can neither accept or reject that - /// a match occurred. - /// - /// In practice, the quit state always corresponds to the state immediately - /// following the dead state. (Which is not usually represented by `1`, - /// since state identifiers are pre-multiplied by the state machine's - /// alphabet stride, and the alphabet stride varies between DFAs.) - /// - /// The typical way in which a quit state can occur is when heuristic - /// support for Unicode word boundaries is enabled via the - /// [`dense::Config::unicode_word_boundary`](crate::dfa::dense::Config::unicode_word_boundary) - /// option. But other options, like the lower level - /// [`dense::Config::quit`](crate::dfa::dense::Config::quit) - /// configuration, can also result in a quit state being entered. The - /// purpose of the quit state is to provide a way to execute a fast DFA - /// in common cases while delegating to slower routines when the DFA quits. - /// - /// The default search implementations provided by this crate will return a - /// [`MatchError::quit`] error when a quit state is entered. - /// - /// # Example - /// - /// See the example for [`Automaton::is_special_state`] for how to use this - /// method correctly. - fn is_quit_state(&self, id: StateID) -> bool; - - /// Returns true if and only if the given identifier corresponds to a - /// match state. A match state is also referred to as a "final" state and - /// indicates that a match has been found. - /// - /// If all you care about is whether a particular pattern matches in the - /// input sequence, then a search routine can quit early as soon as the - /// machine enters a match state. However, if you're looking for the - /// standard "leftmost-first" match location, then search _must_ continue - /// until either the end of the input or until the machine enters a dead - /// state. (Since either condition implies that no other useful work can - /// be done.) Namely, when looking for the location of a match, then - /// search implementations should record the most recent location in - /// which a match state was entered, but otherwise continue executing the - /// search as normal. (The search may even leave the match state.) Once - /// the termination condition is reached, the most recently recorded match - /// location should be returned. - /// - /// Finally, one additional power given to match states in this crate - /// is that they are always associated with a specific pattern in order - /// to support multi-DFAs. See [`Automaton::match_pattern`] for more - /// details and an example for how to query the pattern associated with a - /// particular match state. - /// - /// # Example - /// - /// See the example for [`Automaton::is_special_state`] for how to use this - /// method correctly. - fn is_match_state(&self, id: StateID) -> bool; - - /// Returns true only if the given identifier corresponds to a start - /// state - /// - /// A start state is a state in which a DFA begins a search. - /// All searches begin in a start state. Moreover, since all matches are - /// delayed by one byte, a start state can never be a match state. - /// - /// The main role of a start state is, as mentioned, to be a starting - /// point for a DFA. This starting point is determined via one of - /// [`Automaton::start_state_forward`] or - /// [`Automaton::start_state_reverse`], depending on whether one is doing - /// a forward or a reverse search, respectively. - /// - /// A secondary use of start states is for prefix acceleration. Namely, - /// while executing a search, if one detects that you're in a start state, - /// then it may be faster to look for the next match of a prefix of the - /// pattern, if one exists. If a prefix exists and since all matches must - /// begin with that prefix, then skipping ahead to occurrences of that - /// prefix may be much faster than executing the DFA. - /// - /// As mentioned in the documentation for - /// [`is_special_state`](Automaton::is_special_state) implementations - /// _may_ always return false, even if the given identifier is a start - /// state. This is because knowing whether a state is a start state or not - /// is not necessary for correctness and is only treated as a potential - /// performance optimization. (For example, the implementations of this - /// trait in this crate will only return true when the given identifier - /// corresponds to a start state and when [specialization of start - /// states](crate::dfa::dense::Config::specialize_start_states) was enabled - /// during DFA construction. If start state specialization is disabled - /// (which is the default), then this method will always return false.) - /// - /// # Example - /// - /// This example shows how to implement your own search routine that does - /// a prefix search whenever the search enters a start state. - /// - /// Note that you do not need to implement your own search routine - /// to make use of prefilters like this. The search routines - /// provided by this crate already implement prefilter support via - /// the [`Prefilter`](crate::util::prefilter::Prefilter) trait. - /// A prefilter can be added to your search configuration with - /// [`dense::Config::prefilter`](crate::dfa::dense::Config::prefilter) for - /// dense and sparse DFAs in this crate. - /// - /// This example is meant to show how you might deal with prefilters in a - /// simplified case if you are implementing your own search routine. - /// - /// ``` - /// use regex_automata::{ - /// dfa::{Automaton, dense}, - /// HalfMatch, MatchError, Input, - /// }; - /// - /// fn find_byte(slice: &[u8], at: usize, byte: u8) -> Option { - /// // Would be faster to use the memchr crate, but this is still - /// // faster than running through the DFA. - /// slice[at..].iter().position(|&b| b == byte).map(|i| at + i) - /// } - /// - /// fn find( - /// dfa: &A, - /// haystack: &[u8], - /// prefix_byte: Option, - /// ) -> Result, MatchError> { - /// // See the Automaton::is_special_state example for similar code - /// // with more comments. - /// - /// let mut state = dfa.start_state_forward(&Input::new(haystack))?; - /// let mut last_match = None; - /// let mut pos = 0; - /// while pos < haystack.len() { - /// let b = haystack[pos]; - /// state = dfa.next_state(state, b); - /// pos += 1; - /// if dfa.is_special_state(state) { - /// if dfa.is_match_state(state) { - /// last_match = Some(HalfMatch::new( - /// dfa.match_pattern(state, 0), - /// pos - 1, - /// )); - /// } else if dfa.is_dead_state(state) { - /// return Ok(last_match); - /// } else if dfa.is_quit_state(state) { - /// // It is possible to enter into a quit state after - /// // observing a match has occurred. In that case, we - /// // should return the match instead of an error. - /// if last_match.is_some() { - /// return Ok(last_match); - /// } - /// return Err(MatchError::quit(b, pos - 1)); - /// } else if dfa.is_start_state(state) { - /// // If we're in a start state and know all matches begin - /// // with a particular byte, then we can quickly skip to - /// // candidate matches without running the DFA through - /// // every byte inbetween. - /// if let Some(prefix_byte) = prefix_byte { - /// pos = match find_byte(haystack, pos, prefix_byte) { - /// Some(pos) => pos, - /// None => break, - /// }; - /// } - /// } - /// } - /// } - /// // Matches are always delayed by 1 byte, so we must explicitly walk - /// // the special "EOI" transition at the end of the search. - /// state = dfa.next_eoi_state(state); - /// if dfa.is_match_state(state) { - /// last_match = Some(HalfMatch::new( - /// dfa.match_pattern(state, 0), - /// haystack.len(), - /// )); - /// } - /// Ok(last_match) - /// } - /// - /// // In this example, it's obvious that all occurrences of our pattern - /// // begin with 'Z', so we pass in 'Z'. Note also that we need to - /// // enable start state specialization, or else it won't be possible to - /// // detect start states during a search. ('is_start_state' would always - /// // return false.) - /// let dfa = dense::DFA::builder() - /// .configure(dense::DFA::config().specialize_start_states(true)) - /// .build(r"Z[a-z]+")?; - /// let haystack = "123 foobar Zbaz quux".as_bytes(); - /// let mat = find(&dfa, haystack, Some(b'Z'))?.unwrap(); - /// assert_eq!(mat.pattern().as_usize(), 0); - /// assert_eq!(mat.offset(), 15); - /// - /// // But note that we don't need to pass in a prefix byte. If we don't, - /// // then the search routine does no acceleration. - /// let mat = find(&dfa, haystack, None)?.unwrap(); - /// assert_eq!(mat.pattern().as_usize(), 0); - /// assert_eq!(mat.offset(), 15); - /// - /// // However, if we pass an incorrect byte, then the prefix search will - /// // result in incorrect results. - /// assert_eq!(find(&dfa, haystack, Some(b'X'))?, None); - /// - /// # Ok::<(), Box>(()) - /// ``` - fn is_start_state(&self, id: StateID) -> bool; - - /// Returns true if and only if the given identifier corresponds to an - /// accelerated state. - /// - /// An accelerated state is a special optimization - /// trick implemented by this crate. Namely, if - /// [`dense::Config::accelerate`](crate::dfa::dense::Config::accelerate) is - /// enabled (and it is by default), then DFAs generated by this crate will - /// tag states meeting certain characteristics as accelerated. States meet - /// this criteria whenever most of their transitions are self-transitions. - /// That is, transitions that loop back to the same state. When a small - /// number of transitions aren't self-transitions, then it follows that - /// there are only a small number of bytes that can cause the DFA to leave - /// that state. Thus, there is an opportunity to look for those bytes - /// using more optimized routines rather than continuing to run through - /// the DFA. This trick is similar to the prefilter idea described in - /// the documentation of [`Automaton::is_start_state`] with two main - /// differences: - /// - /// 1. It is more limited since acceleration only applies to single bytes. - /// This means states are rarely accelerated when Unicode mode is enabled - /// (which is enabled by default). - /// 2. It can occur anywhere in the DFA, which increases optimization - /// opportunities. - /// - /// Like the prefilter idea, the main downside (and a possible reason to - /// disable it) is that it can lead to worse performance in some cases. - /// Namely, if a state is accelerated for very common bytes, then the - /// overhead of checking for acceleration and using the more optimized - /// routines to look for those bytes can cause overall performance to be - /// worse than if acceleration wasn't enabled at all. - /// - /// A simple example of a regex that has an accelerated state is - /// `(?-u)[^a]+a`. Namely, the `[^a]+` sub-expression gets compiled down - /// into a single state where all transitions except for `a` loop back to - /// itself, and where `a` is the only transition (other than the special - /// EOI transition) that goes to some other state. Thus, this state can - /// be accelerated and implemented more efficiently by calling an - /// optimized routine like `memchr` with `a` as the needle. Notice that - /// the `(?-u)` to disable Unicode is necessary here, as without it, - /// `[^a]` will match any UTF-8 encoding of any Unicode scalar value other - /// than `a`. This more complicated expression compiles down to many DFA - /// states and the simple acceleration optimization is no longer available. - /// - /// Typically, this routine is used to guard calls to - /// [`Automaton::accelerator`], which returns the accelerated bytes for - /// the specified state. - fn is_accel_state(&self, id: StateID) -> bool; - - /// Returns the total number of patterns compiled into this DFA. - /// - /// In the case of a DFA that contains no patterns, this must return `0`. - /// - /// # Example - /// - /// This example shows the pattern length for a DFA that never matches: - /// - /// ``` - /// use regex_automata::dfa::{Automaton, dense::DFA}; - /// - /// let dfa: DFA> = DFA::never_match()?; - /// assert_eq!(dfa.pattern_len(), 0); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// And another example for a DFA that matches at every position: - /// - /// ``` - /// use regex_automata::dfa::{Automaton, dense::DFA}; - /// - /// let dfa: DFA> = DFA::always_match()?; - /// assert_eq!(dfa.pattern_len(), 1); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// And finally, a DFA that was constructed from multiple patterns: - /// - /// ``` - /// use regex_automata::dfa::{Automaton, dense::DFA}; - /// - /// let dfa = DFA::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; - /// assert_eq!(dfa.pattern_len(), 3); - /// # Ok::<(), Box>(()) - /// ``` - fn pattern_len(&self) -> usize; - - /// Returns the total number of patterns that match in this state. - /// - /// If the given state is not a match state, then implementations may - /// panic. - /// - /// If the DFA was compiled with one pattern, then this must necessarily - /// always return `1` for all match states. - /// - /// Implementations must guarantee that [`Automaton::match_pattern`] can be - /// called with indices up to (but not including) the length returned by - /// this routine without panicking. - /// - /// # Panics - /// - /// Implementations are permitted to panic if the provided state ID does - /// not correspond to a match state. - /// - /// # Example - /// - /// This example shows a simple instance of implementing overlapping - /// matches. In particular, it shows not only how to determine how many - /// patterns have matched in a particular state, but also how to access - /// which specific patterns have matched. - /// - /// Notice that we must use - /// [`MatchKind::All`](crate::MatchKind::All) - /// when building the DFA. If we used - /// [`MatchKind::LeftmostFirst`](crate::MatchKind::LeftmostFirst) - /// instead, then the DFA would not be constructed in a way that - /// supports overlapping matches. (It would only report a single pattern - /// that matches at any particular point in time.) - /// - /// Another thing to take note of is the patterns used and the order in - /// which the pattern IDs are reported. In the example below, pattern `3` - /// is yielded first. Why? Because it corresponds to the match that - /// appears first. Namely, the `@` symbol is part of `\S+` but not part - /// of any of the other patterns. Since the `\S+` pattern has a match that - /// starts to the left of any other pattern, its ID is returned before any - /// other. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{dfa::{Automaton, dense}, Input, MatchKind}; - /// - /// let dfa = dense::Builder::new() - /// .configure(dense::Config::new().match_kind(MatchKind::All)) - /// .build_many(&[ - /// r"[[:word:]]+", r"[a-z]+", r"[A-Z]+", r"[[:^space:]]+", - /// ])?; - /// let haystack = "@bar".as_bytes(); - /// - /// // The start state is determined by inspecting the position and the - /// // initial bytes of the haystack. - /// let mut state = dfa.start_state_forward(&Input::new(haystack))?; - /// // Walk all the bytes in the haystack. - /// for &b in haystack { - /// state = dfa.next_state(state, b); - /// } - /// state = dfa.next_eoi_state(state); - /// - /// assert!(dfa.is_match_state(state)); - /// assert_eq!(dfa.match_len(state), 3); - /// // The following calls are guaranteed to not panic since `match_len` - /// // returned `3` above. - /// assert_eq!(dfa.match_pattern(state, 0).as_usize(), 3); - /// assert_eq!(dfa.match_pattern(state, 1).as_usize(), 0); - /// assert_eq!(dfa.match_pattern(state, 2).as_usize(), 1); - /// - /// # Ok::<(), Box>(()) - /// ``` - fn match_len(&self, id: StateID) -> usize; - - /// Returns the pattern ID corresponding to the given match index in the - /// given state. - /// - /// See [`Automaton::match_len`] for an example of how to use this - /// method correctly. Note that if you know your DFA is compiled with a - /// single pattern, then this routine is never necessary since it will - /// always return a pattern ID of `0` for an index of `0` when `id` - /// corresponds to a match state. - /// - /// Typically, this routine is used when implementing an overlapping - /// search, as the example for `Automaton::match_len` does. - /// - /// # Panics - /// - /// If the state ID is not a match state or if the match index is out - /// of bounds for the given state, then this routine may either panic - /// or produce an incorrect result. If the state ID is correct and the - /// match index is correct, then this routine must always produce a valid - /// `PatternID`. - fn match_pattern(&self, id: StateID, index: usize) -> PatternID; - - /// Returns true if and only if this automaton can match the empty string. - /// When it returns false, all possible matches are guaranteed to have a - /// non-zero length. - /// - /// This is useful as cheap way to know whether code needs to handle the - /// case of a zero length match. This is particularly important when UTF-8 - /// modes are enabled, as when UTF-8 mode is enabled, empty matches that - /// split a codepoint must never be reported. This extra handling can - /// sometimes be costly, and since regexes matching an empty string are - /// somewhat rare, it can be beneficial to treat such regexes specially. - /// - /// # Example - /// - /// This example shows a few different DFAs and whether they match the - /// empty string or not. Notice the empty string isn't merely a matter - /// of a string of length literally `0`, but rather, whether a match can - /// occur between specific pairs of bytes. - /// - /// ``` - /// use regex_automata::{dfa::{dense::DFA, Automaton}, util::syntax}; - /// - /// // The empty regex matches the empty string. - /// let dfa = DFA::new("")?; - /// assert!(dfa.has_empty(), "empty matches empty"); - /// // The '+' repetition operator requires at least one match, and so - /// // does not match the empty string. - /// let dfa = DFA::new("a+")?; - /// assert!(!dfa.has_empty(), "+ does not match empty"); - /// // But the '*' repetition operator does. - /// let dfa = DFA::new("a*")?; - /// assert!(dfa.has_empty(), "* does match empty"); - /// // And wrapping '+' in an operator that can match an empty string also - /// // causes it to match the empty string too. - /// let dfa = DFA::new("(a+)*")?; - /// assert!(dfa.has_empty(), "+ inside of * matches empty"); - /// - /// // If a regex is just made of a look-around assertion, even if the - /// // assertion requires some kind of non-empty string around it (such as - /// // \b), then it is still treated as if it matches the empty string. - /// // Namely, if a match occurs of just a look-around assertion, then the - /// // match returned is empty. - /// let dfa = DFA::builder() - /// .configure(DFA::config().unicode_word_boundary(true)) - /// .syntax(syntax::Config::new().utf8(false)) - /// .build(r"^$\A\z\b\B(?-u:\b\B)")?; - /// assert!(dfa.has_empty(), "assertions match empty"); - /// // Even when an assertion is wrapped in a '+', it still matches the - /// // empty string. - /// let dfa = DFA::new(r"^+")?; - /// assert!(dfa.has_empty(), "+ of an assertion matches empty"); - /// - /// // An alternation with even one branch that can match the empty string - /// // is also said to match the empty string overall. - /// let dfa = DFA::new("foo|(bar)?|quux")?; - /// assert!(dfa.has_empty(), "alternations can match empty"); - /// - /// // An NFA that matches nothing does not match the empty string. - /// let dfa = DFA::new("[a&&b]")?; - /// assert!(!dfa.has_empty(), "never matching means not matching empty"); - /// // But if it's wrapped in something that doesn't require a match at - /// // all, then it can match the empty string! - /// let dfa = DFA::new("[a&&b]*")?; - /// assert!(dfa.has_empty(), "* on never-match still matches empty"); - /// // Since a '+' requires a match, using it on something that can never - /// // match will itself produce a regex that can never match anything, - /// // and thus does not match the empty string. - /// let dfa = DFA::new("[a&&b]+")?; - /// assert!(!dfa.has_empty(), "+ on never-match still matches nothing"); - /// - /// # Ok::<(), Box>(()) - /// ``` - fn has_empty(&self) -> bool; - - /// Whether UTF-8 mode is enabled for this DFA or not. - /// - /// When UTF-8 mode is enabled, all matches reported by a DFA are - /// guaranteed to correspond to spans of valid UTF-8. This includes - /// zero-width matches. For example, the DFA must guarantee that the empty - /// regex will not match at the positions between code units in the UTF-8 - /// encoding of a single codepoint. - /// - /// See [`thompson::Config::utf8`](crate::nfa::thompson::Config::utf8) for - /// more information. - /// - /// # Example - /// - /// This example shows how UTF-8 mode can impact the match spans that may - /// be reported in certain cases. - /// - /// ``` - /// use regex_automata::{ - /// dfa::{dense::DFA, Automaton}, - /// nfa::thompson, - /// HalfMatch, Input, - /// }; - /// - /// // UTF-8 mode is enabled by default. - /// let re = DFA::new("")?; - /// assert!(re.is_utf8()); - /// let mut input = Input::new("☃"); - /// let got = re.try_search_fwd(&input)?; - /// assert_eq!(Some(HalfMatch::must(0, 0)), got); - /// - /// // Even though an empty regex matches at 1..1, our next match is - /// // 3..3 because 1..1 and 2..2 split the snowman codepoint (which is - /// // three bytes long). - /// input.set_start(1); - /// let got = re.try_search_fwd(&input)?; - /// assert_eq!(Some(HalfMatch::must(0, 3)), got); - /// - /// // But if we disable UTF-8, then we'll get matches at 1..1 and 2..2: - /// let re = DFA::builder() - /// .thompson(thompson::Config::new().utf8(false)) - /// .build("")?; - /// assert!(!re.is_utf8()); - /// let got = re.try_search_fwd(&input)?; - /// assert_eq!(Some(HalfMatch::must(0, 1)), got); - /// - /// input.set_start(2); - /// let got = re.try_search_fwd(&input)?; - /// assert_eq!(Some(HalfMatch::must(0, 2)), got); - /// - /// input.set_start(3); - /// let got = re.try_search_fwd(&input)?; - /// assert_eq!(Some(HalfMatch::must(0, 3)), got); - /// - /// input.set_start(4); - /// let got = re.try_search_fwd(&input)?; - /// assert_eq!(None, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - fn is_utf8(&self) -> bool; - - /// Returns true if and only if this DFA is limited to returning matches - /// whose start position is `0`. - /// - /// Note that if you're using DFAs provided by - /// this crate, then this is _orthogonal_ to - /// [`Config::start_kind`](crate::dfa::dense::Config::start_kind). - /// - /// This is useful in some cases because if a DFA is limited to producing - /// matches that start at offset `0`, then a reverse search is never - /// required for finding the start of a match. - /// - /// # Example - /// - /// ``` - /// use regex_automata::dfa::{dense::DFA, Automaton}; - /// - /// // The empty regex matches anywhere - /// let dfa = DFA::new("")?; - /// assert!(!dfa.is_always_start_anchored(), "empty matches anywhere"); - /// // 'a' matches anywhere. - /// let dfa = DFA::new("a")?; - /// assert!(!dfa.is_always_start_anchored(), "'a' matches anywhere"); - /// // '^' only matches at offset 0! - /// let dfa = DFA::new("^a")?; - /// assert!(dfa.is_always_start_anchored(), "'^a' matches only at 0"); - /// // But '(?m:^)' matches at 0 but at other offsets too. - /// let dfa = DFA::new("(?m:^)a")?; - /// assert!(!dfa.is_always_start_anchored(), "'(?m:^)a' matches anywhere"); - /// - /// # Ok::<(), Box>(()) - /// ``` - fn is_always_start_anchored(&self) -> bool; - - /// Return a slice of bytes to accelerate for the given state, if possible. - /// - /// If the given state has no accelerator, then an empty slice must be - /// returned. If `Automaton::is_accel_state` returns true for the given ID, - /// then this routine _must_ return a non-empty slice. But note that it is - /// not required for an implementation of this trait to ever return `true` - /// for `is_accel_state`, even if the state _could_ be accelerated. That - /// is, acceleration is an optional optimization. But the return values of - /// `is_accel_state` and `accelerator` must be in sync. - /// - /// If the given ID is not a valid state ID for this automaton, then - /// implementations may panic or produce incorrect results. - /// - /// See [`Automaton::is_accel_state`] for more details on state - /// acceleration. - /// - /// By default, this method will always return an empty slice. - /// - /// # Example - /// - /// This example shows a contrived case in which we build a regex that we - /// know is accelerated and extract the accelerator from a state. - /// - /// ``` - /// use regex_automata::{ - /// dfa::{Automaton, dense}, - /// util::{primitives::StateID, syntax}, - /// }; - /// - /// let dfa = dense::Builder::new() - /// // We disable Unicode everywhere and permit the regex to match - /// // invalid UTF-8. e.g., [^abc] matches \xFF, which is not valid - /// // UTF-8. If we left Unicode enabled, [^abc] would match any UTF-8 - /// // encoding of any Unicode scalar value except for 'a', 'b' or 'c'. - /// // That translates to a much more complicated DFA, and also - /// // inhibits the 'accelerator' optimization that we are trying to - /// // demonstrate in this example. - /// .syntax(syntax::Config::new().unicode(false).utf8(false)) - /// .build("[^abc]+a")?; - /// - /// // Here we just pluck out the state that we know is accelerated. - /// // While the stride calculations are something that can be relied - /// // on by callers, the specific position of the accelerated state is - /// // implementation defined. - /// // - /// // N.B. We get '3' by inspecting the state machine using 'regex-cli'. - /// // e.g., try `regex-cli debug dense dfa -p '[^abc]+a' -BbUC`. - /// let id = StateID::new(3 * dfa.stride()).unwrap(); - /// let accelerator = dfa.accelerator(id); - /// // The `[^abc]+` sub-expression permits [a, b, c] to be accelerated. - /// assert_eq!(accelerator, &[b'a', b'b', b'c']); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - fn accelerator(&self, _id: StateID) -> &[u8] { - &[] - } - - /// Returns the prefilter associated with a DFA, if one exists. - /// - /// The default implementation of this trait always returns `None`. And - /// indeed, it is always correct to return `None`. - /// - /// For DFAs in this crate, a prefilter can be attached to a DFA via - /// [`dense::Config::prefilter`](crate::dfa::dense::Config::prefilter). - /// - /// Do note that prefilters are not serialized by DFAs in this crate. - /// So if you deserialize a DFA that had a prefilter attached to it - /// at serialization time, then it will not have a prefilter after - /// deserialization. - #[inline] - fn get_prefilter(&self) -> Option<&Prefilter> { - None - } - - /// Executes a forward search and returns the end position of the leftmost - /// match that is found. If no match exists, then `None` is returned. - /// - /// In particular, this method continues searching even after it enters - /// a match state. The search only terminates once it has reached the - /// end of the input or when it has entered a dead or quit state. Upon - /// termination, the position of the last byte seen while still in a match - /// state is returned. - /// - /// # Errors - /// - /// This routine errors if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the DFA quitting. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search returns an error, callers cannot know whether a match - /// exists or not. - /// - /// # Notes for implementors - /// - /// Implementors of this trait are not required to implement any particular - /// match semantics (such as leftmost-first), which are instead manifest in - /// the DFA's transitions. But this search routine should behave as a - /// general "leftmost" search. - /// - /// In particular, this method must continue searching even after it enters - /// a match state. The search should only terminate once it has reached - /// the end of the input or when it has entered a dead or quit state. Upon - /// termination, the position of the last byte seen while still in a match - /// state is returned. - /// - /// Since this trait provides an implementation for this method by default, - /// it's unlikely that one will need to implement this. - /// - /// # Example - /// - /// This example shows how to use this method with a - /// [`dense::DFA`](crate::dfa::dense::DFA). - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; - /// - /// let dfa = dense::DFA::new("foo[0-9]+")?; - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new(b"foo12345"))?); - /// - /// // Even though a match is found after reading the first byte (`a`), - /// // the leftmost first match semantics demand that we find the earliest - /// // match that prefers earlier parts of the pattern over latter parts. - /// let dfa = dense::DFA::new("abc|a")?; - /// let expected = Some(HalfMatch::must(0, 3)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new(b"abc"))?); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: specific pattern search - /// - /// This example shows how to build a multi-DFA that permits searching for - /// specific patterns. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// dfa::{Automaton, dense}, - /// Anchored, HalfMatch, PatternID, Input, - /// }; - /// - /// let dfa = dense::Builder::new() - /// .configure(dense::Config::new().starts_for_each_pattern(true)) - /// .build_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?; - /// let haystack = "foo123".as_bytes(); - /// - /// // Since we are using the default leftmost-first match and both - /// // patterns match at the same starting position, only the first pattern - /// // will be returned in this case when doing a search for any of the - /// // patterns. - /// let expected = Some(HalfMatch::must(0, 6)); - /// let got = dfa.try_search_fwd(&Input::new(haystack))?; - /// assert_eq!(expected, got); - /// - /// // But if we want to check whether some other pattern matches, then we - /// // can provide its pattern ID. - /// let input = Input::new(haystack) - /// .anchored(Anchored::Pattern(PatternID::must(1))); - /// let expected = Some(HalfMatch::must(1, 6)); - /// let got = dfa.try_search_fwd(&input)?; - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: specifying the bounds of a search - /// - /// This example shows how providing the bounds of a search can produce - /// different results than simply sub-slicing the haystack. - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; - /// - /// // N.B. We disable Unicode here so that we use a simple ASCII word - /// // boundary. Alternatively, we could enable heuristic support for - /// // Unicode word boundaries. - /// let dfa = dense::DFA::new(r"(?-u)\b[0-9]{3}\b")?; - /// let haystack = "foo123bar".as_bytes(); - /// - /// // Since we sub-slice the haystack, the search doesn't know about the - /// // larger context and assumes that `123` is surrounded by word - /// // boundaries. And of course, the match position is reported relative - /// // to the sub-slice as well, which means we get `3` instead of `6`. - /// let input = Input::new(&haystack[3..6]); - /// let expected = Some(HalfMatch::must(0, 3)); - /// let got = dfa.try_search_fwd(&input)?; - /// assert_eq!(expected, got); - /// - /// // But if we provide the bounds of the search within the context of the - /// // entire haystack, then the search can take the surrounding context - /// // into account. (And if we did find a match, it would be reported - /// // as a valid offset into `haystack` instead of its sub-slice.) - /// let input = Input::new(haystack).range(3..6); - /// let expected = None; - /// let got = dfa.try_search_fwd(&input)?; - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - fn try_search_fwd( - &self, - input: &Input<'_>, - ) -> Result, MatchError> { - let utf8empty = self.has_empty() && self.is_utf8(); - let hm = match search::find_fwd(&self, input)? { - None => return Ok(None), - Some(hm) if !utf8empty => return Ok(Some(hm)), - Some(hm) => hm, - }; - // We get to this point when we know our DFA can match the empty string - // AND when UTF-8 mode is enabled. In this case, we skip any matches - // whose offset splits a codepoint. Such a match is necessarily a - // zero-width match, because UTF-8 mode requires the underlying NFA - // to be built such that all non-empty matches span valid UTF-8. - // Therefore, any match that ends in the middle of a codepoint cannot - // be part of a span of valid UTF-8 and thus must be an empty match. - // In such cases, we skip it, so as not to report matches that split a - // codepoint. - // - // Note that this is not a checked assumption. Callers *can* provide an - // NFA with UTF-8 mode enabled but produces non-empty matches that span - // invalid UTF-8. But doing so is documented to result in unspecified - // behavior. - empty::skip_splits_fwd(input, hm, hm.offset(), |input| { - let got = search::find_fwd(&self, input)?; - Ok(got.map(|hm| (hm, hm.offset()))) - }) - } - - /// Executes a reverse search and returns the start of the position of the - /// leftmost match that is found. If no match exists, then `None` is - /// returned. - /// - /// # Errors - /// - /// This routine errors if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the DFA quitting. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search returns an error, callers cannot know whether a match - /// exists or not. - /// - /// # Example - /// - /// This example shows how to use this method with a - /// [`dense::DFA`](crate::dfa::dense::DFA). In particular, this - /// routine is principally useful when used in conjunction with the - /// [`nfa::thompson::Config::reverse`](crate::nfa::thompson::Config::reverse) - /// configuration. In general, it's unlikely to be correct to use - /// both `try_search_fwd` and `try_search_rev` with the same DFA since - /// any particular DFA will only support searching in one direction with - /// respect to the pattern. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson, - /// dfa::{Automaton, dense}, - /// HalfMatch, Input, - /// }; - /// - /// let dfa = dense::Builder::new() - /// .thompson(thompson::Config::new().reverse(true)) - /// .build("foo[0-9]+")?; - /// let expected = Some(HalfMatch::must(0, 0)); - /// assert_eq!(expected, dfa.try_search_rev(&Input::new(b"foo12345"))?); - /// - /// // Even though a match is found after reading the last byte (`c`), - /// // the leftmost first match semantics demand that we find the earliest - /// // match that prefers earlier parts of the pattern over latter parts. - /// let dfa = dense::Builder::new() - /// .thompson(thompson::Config::new().reverse(true)) - /// .build("abc|c")?; - /// let expected = Some(HalfMatch::must(0, 0)); - /// assert_eq!(expected, dfa.try_search_rev(&Input::new(b"abc"))?); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: UTF-8 mode - /// - /// This examples demonstrates that UTF-8 mode applies to reverse - /// DFAs. When UTF-8 mode is enabled in the underlying NFA, then all - /// matches reported must correspond to valid UTF-8 spans. This includes - /// prohibiting zero-width matches that split a codepoint. - /// - /// UTF-8 mode is enabled by default. Notice below how the only zero-width - /// matches reported are those at UTF-8 boundaries: - /// - /// ``` - /// use regex_automata::{ - /// dfa::{dense::DFA, Automaton}, - /// nfa::thompson, - /// HalfMatch, Input, MatchKind, - /// }; - /// - /// let dfa = DFA::builder() - /// .thompson(thompson::Config::new().reverse(true)) - /// .build(r"")?; - /// - /// // Run the reverse DFA to collect all matches. - /// let mut input = Input::new("☃"); - /// let mut matches = vec![]; - /// loop { - /// match dfa.try_search_rev(&input)? { - /// None => break, - /// Some(hm) => { - /// matches.push(hm); - /// if hm.offset() == 0 || input.end() == 0 { - /// break; - /// } else if hm.offset() < input.end() { - /// input.set_end(hm.offset()); - /// } else { - /// // This is only necessary to handle zero-width - /// // matches, which of course occur in this example. - /// // Without this, the search would never advance - /// // backwards beyond the initial match. - /// input.set_end(input.end() - 1); - /// } - /// } - /// } - /// } - /// - /// // No matches split a codepoint. - /// let expected = vec![ - /// HalfMatch::must(0, 3), - /// HalfMatch::must(0, 0), - /// ]; - /// assert_eq!(expected, matches); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Now let's look at the same example, but with UTF-8 mode on the - /// original NFA disabled (which results in disabling UTF-8 mode on the - /// DFA): - /// - /// ``` - /// use regex_automata::{ - /// dfa::{dense::DFA, Automaton}, - /// nfa::thompson, - /// HalfMatch, Input, MatchKind, - /// }; - /// - /// let dfa = DFA::builder() - /// .thompson(thompson::Config::new().reverse(true).utf8(false)) - /// .build(r"")?; - /// - /// // Run the reverse DFA to collect all matches. - /// let mut input = Input::new("☃"); - /// let mut matches = vec![]; - /// loop { - /// match dfa.try_search_rev(&input)? { - /// None => break, - /// Some(hm) => { - /// matches.push(hm); - /// if hm.offset() == 0 || input.end() == 0 { - /// break; - /// } else if hm.offset() < input.end() { - /// input.set_end(hm.offset()); - /// } else { - /// // This is only necessary to handle zero-width - /// // matches, which of course occur in this example. - /// // Without this, the search would never advance - /// // backwards beyond the initial match. - /// input.set_end(input.end() - 1); - /// } - /// } - /// } - /// } - /// - /// // No matches split a codepoint. - /// let expected = vec![ - /// HalfMatch::must(0, 3), - /// HalfMatch::must(0, 2), - /// HalfMatch::must(0, 1), - /// HalfMatch::must(0, 0), - /// ]; - /// assert_eq!(expected, matches); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - fn try_search_rev( - &self, - input: &Input<'_>, - ) -> Result, MatchError> { - let utf8empty = self.has_empty() && self.is_utf8(); - let hm = match search::find_rev(self, input)? { - None => return Ok(None), - Some(hm) if !utf8empty => return Ok(Some(hm)), - Some(hm) => hm, - }; - empty::skip_splits_rev(input, hm, hm.offset(), |input| { - let got = search::find_rev(self, input)?; - Ok(got.map(|hm| (hm, hm.offset()))) - }) - } - - /// Executes an overlapping forward search. Matches, if one exists, can be - /// obtained via the [`OverlappingState::get_match`] method. - /// - /// This routine is principally only useful when searching for multiple - /// patterns on inputs where multiple patterns may match the same regions - /// of text. In particular, callers must preserve the automaton's search - /// state from prior calls so that the implementation knows where the last - /// match occurred. - /// - /// When using this routine to implement an iterator of overlapping - /// matches, the `start` of the search should always be set to the end - /// of the last match. If more patterns match at the previous location, - /// then they will be immediately returned. (This is tracked by the given - /// overlapping state.) Otherwise, the search continues at the starting - /// position given. - /// - /// If for some reason you want the search to forget about its previous - /// state and restart the search at a particular position, then setting the - /// state to [`OverlappingState::start`] will accomplish that. - /// - /// # Errors - /// - /// This routine errors if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the DFA quitting. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search returns an error, callers cannot know whether a match - /// exists or not. - /// - /// # Example - /// - /// This example shows how to run a basic overlapping search with a - /// [`dense::DFA`](crate::dfa::dense::DFA). Notice that we build the - /// automaton with a `MatchKind::All` configuration. Overlapping searches - /// are unlikely to work as one would expect when using the default - /// `MatchKind::LeftmostFirst` match semantics, since leftmost-first - /// matching is fundamentally incompatible with overlapping searches. - /// Namely, overlapping searches need to report matches as they are seen, - /// where as leftmost-first searches will continue searching even after a - /// match has been observed in order to find the conventional end position - /// of the match. More concretely, leftmost-first searches use dead states - /// to terminate a search after a specific match can no longer be extended. - /// Overlapping searches instead do the opposite by continuing the search - /// to find totally new matches (potentially of other patterns). - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// dfa::{Automaton, OverlappingState, dense}, - /// HalfMatch, Input, MatchKind, - /// }; - /// - /// let dfa = dense::Builder::new() - /// .configure(dense::Config::new().match_kind(MatchKind::All)) - /// .build_many(&[r"[[:word:]]+$", r"[[:^space:]]+$"])?; - /// let haystack = "@foo"; - /// let mut state = OverlappingState::start(); - /// - /// let expected = Some(HalfMatch::must(1, 4)); - /// dfa.try_search_overlapping_fwd(&Input::new(haystack), &mut state)?; - /// assert_eq!(expected, state.get_match()); - /// - /// // The first pattern also matches at the same position, so re-running - /// // the search will yield another match. Notice also that the first - /// // pattern is returned after the second. This is because the second - /// // pattern begins its match before the first, is therefore an earlier - /// // match and is thus reported first. - /// let expected = Some(HalfMatch::must(0, 4)); - /// dfa.try_search_overlapping_fwd(&Input::new(haystack), &mut state)?; - /// assert_eq!(expected, state.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - fn try_search_overlapping_fwd( - &self, - input: &Input<'_>, - state: &mut OverlappingState, - ) -> Result<(), MatchError> { - let utf8empty = self.has_empty() && self.is_utf8(); - search::find_overlapping_fwd(self, input, state)?; - match state.get_match() { - None => Ok(()), - Some(_) if !utf8empty => Ok(()), - Some(_) => skip_empty_utf8_splits_overlapping( - input, - state, - |input, state| { - search::find_overlapping_fwd(self, input, state) - }, - ), - } - } - - /// Executes a reverse overlapping forward search. Matches, if one exists, - /// can be obtained via the [`OverlappingState::get_match`] method. - /// - /// When using this routine to implement an iterator of overlapping - /// matches, the `start` of the search should remain invariant throughout - /// iteration. The `OverlappingState` given to the search will keep track - /// of the current position of the search. (This is because multiple - /// matches may be reported at the same position, so only the search - /// implementation itself knows when to advance the position.) - /// - /// If for some reason you want the search to forget about its previous - /// state and restart the search at a particular position, then setting the - /// state to [`OverlappingState::start`] will accomplish that. - /// - /// # Errors - /// - /// This routine errors if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the DFA quitting. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search returns an error, callers cannot know whether a match - /// exists or not. - /// - /// # Example: UTF-8 mode - /// - /// This examples demonstrates that UTF-8 mode applies to reverse - /// DFAs. When UTF-8 mode is enabled in the underlying NFA, then all - /// matches reported must correspond to valid UTF-8 spans. This includes - /// prohibiting zero-width matches that split a codepoint. - /// - /// UTF-8 mode is enabled by default. Notice below how the only zero-width - /// matches reported are those at UTF-8 boundaries: - /// - /// ``` - /// use regex_automata::{ - /// dfa::{dense::DFA, Automaton, OverlappingState}, - /// nfa::thompson, - /// HalfMatch, Input, MatchKind, - /// }; - /// - /// let dfa = DFA::builder() - /// .configure(DFA::config().match_kind(MatchKind::All)) - /// .thompson(thompson::Config::new().reverse(true)) - /// .build_many(&[r"", r"☃"])?; - /// - /// // Run the reverse DFA to collect all matches. - /// let input = Input::new("☃"); - /// let mut state = OverlappingState::start(); - /// let mut matches = vec![]; - /// loop { - /// dfa.try_search_overlapping_rev(&input, &mut state)?; - /// match state.get_match() { - /// None => break, - /// Some(hm) => matches.push(hm), - /// } - /// } - /// - /// // No matches split a codepoint. - /// let expected = vec![ - /// HalfMatch::must(0, 3), - /// HalfMatch::must(1, 0), - /// HalfMatch::must(0, 0), - /// ]; - /// assert_eq!(expected, matches); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Now let's look at the same example, but with UTF-8 mode on the - /// original NFA disabled (which results in disabling UTF-8 mode on the - /// DFA): - /// - /// ``` - /// use regex_automata::{ - /// dfa::{dense::DFA, Automaton, OverlappingState}, - /// nfa::thompson, - /// HalfMatch, Input, MatchKind, - /// }; - /// - /// let dfa = DFA::builder() - /// .configure(DFA::config().match_kind(MatchKind::All)) - /// .thompson(thompson::Config::new().reverse(true).utf8(false)) - /// .build_many(&[r"", r"☃"])?; - /// - /// // Run the reverse DFA to collect all matches. - /// let input = Input::new("☃"); - /// let mut state = OverlappingState::start(); - /// let mut matches = vec![]; - /// loop { - /// dfa.try_search_overlapping_rev(&input, &mut state)?; - /// match state.get_match() { - /// None => break, - /// Some(hm) => matches.push(hm), - /// } - /// } - /// - /// // Now *all* positions match, even within a codepoint, - /// // because we lifted the requirement that matches - /// // correspond to valid UTF-8 spans. - /// let expected = vec![ - /// HalfMatch::must(0, 3), - /// HalfMatch::must(0, 2), - /// HalfMatch::must(0, 1), - /// HalfMatch::must(1, 0), - /// HalfMatch::must(0, 0), - /// ]; - /// assert_eq!(expected, matches); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - fn try_search_overlapping_rev( - &self, - input: &Input<'_>, - state: &mut OverlappingState, - ) -> Result<(), MatchError> { - let utf8empty = self.has_empty() && self.is_utf8(); - search::find_overlapping_rev(self, input, state)?; - match state.get_match() { - None => Ok(()), - Some(_) if !utf8empty => Ok(()), - Some(_) => skip_empty_utf8_splits_overlapping( - input, - state, - |input, state| { - search::find_overlapping_rev(self, input, state) - }, - ), - } - } - - /// Writes the set of patterns that match anywhere in the given search - /// configuration to `patset`. If multiple patterns match at the same - /// position and the underlying DFA supports overlapping matches, then all - /// matching patterns are written to the given set. - /// - /// Unless all of the patterns in this DFA are anchored, then generally - /// speaking, this will visit every byte in the haystack. - /// - /// This search routine *does not* clear the pattern set. This gives some - /// flexibility to the caller (e.g., running multiple searches with the - /// same pattern set), but does make the API bug-prone if you're reusing - /// the same pattern set for multiple searches but intended them to be - /// independent. - /// - /// If a pattern ID matched but the given `PatternSet` does not have - /// sufficient capacity to store it, then it is not inserted and silently - /// dropped. - /// - /// # Errors - /// - /// This routine errors if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the DFA quitting. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search returns an error, callers cannot know whether a match - /// exists or not. - /// - /// # Example - /// - /// This example shows how to find all matching patterns in a haystack, - /// even when some patterns match at the same position as other patterns. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// dfa::{Automaton, dense::DFA}, - /// Input, MatchKind, PatternSet, - /// }; - /// - /// let patterns = &[ - /// r"[[:word:]]+", - /// r"[0-9]+", - /// r"[[:alpha:]]+", - /// r"foo", - /// r"bar", - /// r"barfoo", - /// r"foobar", - /// ]; - /// let dfa = DFA::builder() - /// .configure(DFA::config().match_kind(MatchKind::All)) - /// .build_many(patterns)?; - /// - /// let input = Input::new("foobar"); - /// let mut patset = PatternSet::new(dfa.pattern_len()); - /// dfa.try_which_overlapping_matches(&input, &mut patset)?; - /// let expected = vec![0, 2, 3, 4, 6]; - /// let got: Vec = patset.iter().map(|p| p.as_usize()).collect(); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "alloc")] - #[inline] - fn try_which_overlapping_matches( - &self, - input: &Input<'_>, - patset: &mut PatternSet, - ) -> Result<(), MatchError> { - let mut state = OverlappingState::start(); - while let Some(m) = { - self.try_search_overlapping_fwd(input, &mut state)?; - state.get_match() - } { - let _ = patset.insert(m.pattern()); - // There's nothing left to find, so we can stop. Or the caller - // asked us to. - if patset.is_full() || input.get_earliest() { - break; - } - } - Ok(()) - } -} - -unsafe impl<'a, A: Automaton + ?Sized> Automaton for &'a A { - #[inline] - fn next_state(&self, current: StateID, input: u8) -> StateID { - (**self).next_state(current, input) - } - - #[inline] - unsafe fn next_state_unchecked( - &self, - current: StateID, - input: u8, - ) -> StateID { - (**self).next_state_unchecked(current, input) - } - - #[inline] - fn next_eoi_state(&self, current: StateID) -> StateID { - (**self).next_eoi_state(current) - } - - #[inline] - fn start_state( - &self, - config: &start::Config, - ) -> Result { - (**self).start_state(config) - } - - #[inline] - fn start_state_forward( - &self, - input: &Input<'_>, - ) -> Result { - (**self).start_state_forward(input) - } - - #[inline] - fn start_state_reverse( - &self, - input: &Input<'_>, - ) -> Result { - (**self).start_state_reverse(input) - } - - #[inline] - fn universal_start_state(&self, mode: Anchored) -> Option { - (**self).universal_start_state(mode) - } - - #[inline] - fn is_special_state(&self, id: StateID) -> bool { - (**self).is_special_state(id) - } - - #[inline] - fn is_dead_state(&self, id: StateID) -> bool { - (**self).is_dead_state(id) - } - - #[inline] - fn is_quit_state(&self, id: StateID) -> bool { - (**self).is_quit_state(id) - } - - #[inline] - fn is_match_state(&self, id: StateID) -> bool { - (**self).is_match_state(id) - } - - #[inline] - fn is_start_state(&self, id: StateID) -> bool { - (**self).is_start_state(id) - } - - #[inline] - fn is_accel_state(&self, id: StateID) -> bool { - (**self).is_accel_state(id) - } - - #[inline] - fn pattern_len(&self) -> usize { - (**self).pattern_len() - } - - #[inline] - fn match_len(&self, id: StateID) -> usize { - (**self).match_len(id) - } - - #[inline] - fn match_pattern(&self, id: StateID, index: usize) -> PatternID { - (**self).match_pattern(id, index) - } - - #[inline] - fn has_empty(&self) -> bool { - (**self).has_empty() - } - - #[inline] - fn is_utf8(&self) -> bool { - (**self).is_utf8() - } - - #[inline] - fn is_always_start_anchored(&self) -> bool { - (**self).is_always_start_anchored() - } - - #[inline] - fn accelerator(&self, id: StateID) -> &[u8] { - (**self).accelerator(id) - } - - #[inline] - fn get_prefilter(&self) -> Option<&Prefilter> { - (**self).get_prefilter() - } - - #[inline] - fn try_search_fwd( - &self, - input: &Input<'_>, - ) -> Result, MatchError> { - (**self).try_search_fwd(input) - } - - #[inline] - fn try_search_rev( - &self, - input: &Input<'_>, - ) -> Result, MatchError> { - (**self).try_search_rev(input) - } - - #[inline] - fn try_search_overlapping_fwd( - &self, - input: &Input<'_>, - state: &mut OverlappingState, - ) -> Result<(), MatchError> { - (**self).try_search_overlapping_fwd(input, state) - } - - #[inline] - fn try_search_overlapping_rev( - &self, - input: &Input<'_>, - state: &mut OverlappingState, - ) -> Result<(), MatchError> { - (**self).try_search_overlapping_rev(input, state) - } - - #[cfg(feature = "alloc")] - #[inline] - fn try_which_overlapping_matches( - &self, - input: &Input<'_>, - patset: &mut PatternSet, - ) -> Result<(), MatchError> { - (**self).try_which_overlapping_matches(input, patset) - } -} - -/// Represents the current state of an overlapping search. -/// -/// This is used for overlapping searches since they need to know something -/// about the previous search. For example, when multiple patterns match at the -/// same position, this state tracks the last reported pattern so that the next -/// search knows whether to report another matching pattern or continue with -/// the search at the next position. Additionally, it also tracks which state -/// the last search call terminated in. -/// -/// This type provides little introspection capabilities. The only thing a -/// caller can do is construct it and pass it around to permit search routines -/// to use it to track state, and also ask whether a match has been found. -/// -/// Callers should always provide a fresh state constructed via -/// [`OverlappingState::start`] when starting a new search. Reusing state from -/// a previous search may result in incorrect results. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct OverlappingState { - /// The match reported by the most recent overlapping search to use this - /// state. - /// - /// If a search does not find any matches, then it is expected to clear - /// this value. - pub(crate) mat: Option, - /// The state ID of the state at which the search was in when the call - /// terminated. When this is a match state, `last_match` must be set to a - /// non-None value. - /// - /// A `None` value indicates the start state of the corresponding - /// automaton. We cannot use the actual ID, since any one automaton may - /// have many start states, and which one is in use depends on several - /// search-time factors. - pub(crate) id: Option, - /// The position of the search. - /// - /// When `id` is None (i.e., we are starting a search), this is set to - /// the beginning of the search as given by the caller regardless of its - /// current value. Subsequent calls to an overlapping search pick up at - /// this offset. - pub(crate) at: usize, - /// The index into the matching patterns of the next match to report if the - /// current state is a match state. Note that this may be 1 greater than - /// the total number of matches to report for the current match state. (In - /// which case, no more matches should be reported at the current position - /// and the search should advance to the next position.) - pub(crate) next_match_index: Option, - /// This is set to true when a reverse overlapping search has entered its - /// EOI transitions. - /// - /// This isn't used in a forward search because it knows to stop once the - /// position exceeds the end of the search range. In a reverse search, - /// since we use unsigned offsets, we don't "know" once we've gone past - /// `0`. So the only way to detect it is with this extra flag. The reverse - /// overlapping search knows to terminate specifically after it has - /// reported all matches after following the EOI transition. - pub(crate) rev_eoi: bool, -} - -impl OverlappingState { - /// Create a new overlapping state that begins at the start state of any - /// automaton. - pub fn start() -> OverlappingState { - OverlappingState { - mat: None, - id: None, - at: 0, - next_match_index: None, - rev_eoi: false, - } - } - - /// Return the match result of the most recent search to execute with this - /// state. - /// - /// A searches will clear this result automatically, such that if no - /// match is found, this will correctly report `None`. - pub fn get_match(&self) -> Option { - self.mat - } -} - -/// An error that can occur when computing the start state for a search. -/// -/// Computing a start state can fail for a few reasons, either based on -/// incorrect configuration or even based on whether the look-behind byte -/// triggers a quit state. Typically one does not need to handle this error -/// if you're using [`Automaton::start_state_forward`] (or its reverse -/// counterpart), as that routine automatically converts `StartError` to a -/// [`MatchError`] for you. -/// -/// This error may be returned by the [`Automaton::start_state`] routine. -/// -/// This error implements the `std::error::Error` trait when the `std` feature -/// is enabled. -/// -/// This error is marked as non-exhaustive. New variants may be added in a -/// semver compatible release. -#[non_exhaustive] -#[derive(Clone, Debug)] -pub enum StartError { - /// An error that occurs when a starting configuration's look-behind byte - /// is in this DFA's quit set. - Quit { - /// The quit byte that was found. - byte: u8, - }, - /// An error that occurs when the caller requests an anchored mode that - /// isn't supported by the DFA. - UnsupportedAnchored { - /// The anchored mode given that is unsupported. - mode: Anchored, - }, -} - -impl StartError { - pub(crate) fn quit(byte: u8) -> StartError { - StartError::Quit { byte } - } - - pub(crate) fn unsupported_anchored(mode: Anchored) -> StartError { - StartError::UnsupportedAnchored { mode } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for StartError {} - -impl core::fmt::Display for StartError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match *self { - StartError::Quit { byte } => write!( - f, - "error computing start state because the look-behind byte \ - {:?} triggered a quit state", - crate::util::escape::DebugByte(byte), - ), - StartError::UnsupportedAnchored { mode: Anchored::Yes } => { - write!( - f, - "error computing start state because \ - anchored searches are not supported or enabled" - ) - } - StartError::UnsupportedAnchored { mode: Anchored::No } => { - write!( - f, - "error computing start state because \ - unanchored searches are not supported or enabled" - ) - } - StartError::UnsupportedAnchored { - mode: Anchored::Pattern(pid), - } => { - write!( - f, - "error computing start state because \ - anchored searches for a specific pattern ({}) \ - are not supported or enabled", - pid.as_usize(), - ) - } - } - } -} - -/// Runs the given overlapping `search` function (forwards or backwards) until -/// a match is found whose offset does not split a codepoint. -/// -/// This is *not* always correct to call. It should only be called when the DFA -/// has UTF-8 mode enabled *and* it can produce zero-width matches. Calling -/// this when both of those things aren't true might result in legitimate -/// matches getting skipped. -#[cold] -#[inline(never)] -fn skip_empty_utf8_splits_overlapping( - input: &Input<'_>, - state: &mut OverlappingState, - mut search: F, -) -> Result<(), MatchError> -where - F: FnMut(&Input<'_>, &mut OverlappingState) -> Result<(), MatchError>, -{ - // Note that this routine works for forwards and reverse searches - // even though there's no code here to handle those cases. That's - // because overlapping searches drive themselves to completion via - // `OverlappingState`. So all we have to do is push it until no matches are - // found. - - let mut hm = match state.get_match() { - None => return Ok(()), - Some(hm) => hm, - }; - if input.get_anchored().is_anchored() { - if !input.is_char_boundary(hm.offset()) { - state.mat = None; - } - return Ok(()); - } - while !input.is_char_boundary(hm.offset()) { - search(input, state)?; - hm = match state.get_match() { - None => return Ok(()), - Some(hm) => hm, - }; - } - Ok(()) -} - -/// Write a prefix "state" indicator for fmt::Debug impls. -/// -/// Specifically, this tries to succinctly distinguish the different types of -/// states: dead states, quit states, accelerated states, start states and -/// match states. It even accounts for the possible overlapping of different -/// state types. -pub(crate) fn fmt_state_indicator( - f: &mut core::fmt::Formatter<'_>, - dfa: A, - id: StateID, -) -> core::fmt::Result { - if dfa.is_dead_state(id) { - write!(f, "D")?; - if dfa.is_start_state(id) { - write!(f, ">")?; - } else { - write!(f, " ")?; - } - } else if dfa.is_quit_state(id) { - write!(f, "Q ")?; - } else if dfa.is_start_state(id) { - if dfa.is_accel_state(id) { - write!(f, "A>")?; - } else { - write!(f, " >")?; - } - } else if dfa.is_match_state(id) { - if dfa.is_accel_state(id) { - write!(f, "A*")?; - } else { - write!(f, " *")?; - } - } else if dfa.is_accel_state(id) { - write!(f, "A ")?; - } else { - write!(f, " ")?; - } - Ok(()) -} - -#[cfg(all(test, feature = "syntax", feature = "dfa-build"))] -mod tests { - // A basic test ensuring that our Automaton trait is object safe. (This is - // the main reason why we don't define the search routines as generic over - // Into.) - #[test] - fn object_safe() { - use crate::{ - dfa::{dense, Automaton}, - HalfMatch, Input, - }; - - let dfa = dense::DFA::new("abc").unwrap(); - let dfa: &dyn Automaton = &dfa; - assert_eq!( - Ok(Some(HalfMatch::must(0, 6))), - dfa.try_search_fwd(&Input::new(b"xyzabcxyz")), - ); - } -} diff --git a/vendor/regex-automata/src/dfa/dense.rs b/vendor/regex-automata/src/dfa/dense.rs deleted file mode 100644 index d47163afa583bb..00000000000000 --- a/vendor/regex-automata/src/dfa/dense.rs +++ /dev/null @@ -1,5260 +0,0 @@ -/*! -Types and routines specific to dense DFAs. - -This module is the home of [`dense::DFA`](DFA). - -This module also contains a [`dense::Builder`](Builder) and a -[`dense::Config`](Config) for building and configuring a dense DFA. -*/ - -#[cfg(feature = "dfa-build")] -use core::cmp; -use core::{fmt, iter, mem::size_of, slice}; - -#[cfg(feature = "dfa-build")] -use alloc::{ - collections::{BTreeMap, BTreeSet}, - vec, - vec::Vec, -}; - -#[cfg(feature = "dfa-build")] -use crate::{ - dfa::{ - accel::Accel, determinize, minimize::Minimizer, remapper::Remapper, - sparse, - }, - nfa::thompson, - util::{look::LookMatcher, search::MatchKind}, -}; -use crate::{ - dfa::{ - accel::Accels, - automaton::{fmt_state_indicator, Automaton, StartError}, - special::Special, - start::StartKind, - DEAD, - }, - util::{ - alphabet::{self, ByteClasses, ByteSet}, - int::{Pointer, Usize}, - prefilter::Prefilter, - primitives::{PatternID, StateID}, - search::Anchored, - start::{self, Start, StartByteMap}, - wire::{self, DeserializeError, Endian, SerializeError}, - }, -}; - -/// The label that is pre-pended to a serialized DFA. -const LABEL: &str = "rust-regex-automata-dfa-dense"; - -/// The format version of dense regexes. This version gets incremented when a -/// change occurs. A change may not necessarily be a breaking change, but the -/// version does permit good error messages in the case where a breaking change -/// is made. -const VERSION: u32 = 2; - -/// The configuration used for compiling a dense DFA. -/// -/// As a convenience, [`DFA::config`] is an alias for [`Config::new`]. The -/// advantage of the former is that it often lets you avoid importing the -/// `Config` type directly. -/// -/// A dense DFA configuration is a simple data object that is typically used -/// with [`dense::Builder::configure`](self::Builder::configure). -/// -/// The default configuration guarantees that a search will never return -/// a "quit" error, although it is possible for a search to fail if -/// [`Config::starts_for_each_pattern`] wasn't enabled (which it is -/// not by default) and an [`Anchored::Pattern`] mode is requested via -/// [`Input`](crate::Input). -#[cfg(feature = "dfa-build")] -#[derive(Clone, Debug, Default)] -pub struct Config { - // As with other configuration types in this crate, we put all our knobs - // in options so that we can distinguish between "default" and "not set." - // This makes it possible to easily combine multiple configurations - // without default values overwriting explicitly specified values. See the - // 'overwrite' method. - // - // For docs on the fields below, see the corresponding method setters. - accelerate: Option, - pre: Option>, - minimize: Option, - match_kind: Option, - start_kind: Option, - starts_for_each_pattern: Option, - byte_classes: Option, - unicode_word_boundary: Option, - quitset: Option, - specialize_start_states: Option, - dfa_size_limit: Option>, - determinize_size_limit: Option>, -} - -#[cfg(feature = "dfa-build")] -impl Config { - /// Return a new default dense DFA compiler configuration. - pub fn new() -> Config { - Config::default() - } - - /// Enable state acceleration. - /// - /// When enabled, DFA construction will analyze each state to determine - /// whether it is eligible for simple acceleration. Acceleration typically - /// occurs when most of a state's transitions loop back to itself, leaving - /// only a select few bytes that will exit the state. When this occurs, - /// other routines like `memchr` can be used to look for those bytes which - /// may be much faster than traversing the DFA. - /// - /// Callers may elect to disable this if consistent performance is more - /// desirable than variable performance. Namely, acceleration can sometimes - /// make searching slower than it otherwise would be if the transitions - /// that leave accelerated states are traversed frequently. - /// - /// See [`Automaton::accelerator`] for an example. - /// - /// This is enabled by default. - pub fn accelerate(mut self, yes: bool) -> Config { - self.accelerate = Some(yes); - self - } - - /// Set a prefilter to be used whenever a start state is entered. - /// - /// A [`Prefilter`] in this context is meant to accelerate searches by - /// looking for literal prefixes that every match for the corresponding - /// pattern (or patterns) must start with. Once a prefilter produces a - /// match, the underlying search routine continues on to try and confirm - /// the match. - /// - /// Be warned that setting a prefilter does not guarantee that the search - /// will be faster. While it's usually a good bet, if the prefilter - /// produces a lot of false positive candidates (i.e., positions matched - /// by the prefilter but not by the regex), then the overall result can - /// be slower than if you had just executed the regex engine without any - /// prefilters. - /// - /// Note that unless [`Config::specialize_start_states`] has been - /// explicitly set, then setting this will also enable (when `pre` is - /// `Some`) or disable (when `pre` is `None`) start state specialization. - /// This occurs because without start state specialization, a prefilter - /// is likely to be less effective. And without a prefilter, start state - /// specialization is usually pointless. - /// - /// **WARNING:** Note that prefilters are not preserved as part of - /// serialization. Serializing a DFA will drop its prefilter. - /// - /// By default no prefilter is set. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{ - /// dfa::{dense::DFA, Automaton}, - /// util::prefilter::Prefilter, - /// Input, HalfMatch, MatchKind, - /// }; - /// - /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "bar"]); - /// let re = DFA::builder() - /// .configure(DFA::config().prefilter(pre)) - /// .build(r"(foo|bar)[a-z]+")?; - /// let input = Input::new("foo1 barfox bar"); - /// assert_eq!( - /// Some(HalfMatch::must(0, 11)), - /// re.try_search_fwd(&input)?, - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Be warned though that an incorrect prefilter can lead to incorrect - /// results! - /// - /// ``` - /// use regex_automata::{ - /// dfa::{dense::DFA, Automaton}, - /// util::prefilter::Prefilter, - /// Input, HalfMatch, MatchKind, - /// }; - /// - /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "car"]); - /// let re = DFA::builder() - /// .configure(DFA::config().prefilter(pre)) - /// .build(r"(foo|bar)[a-z]+")?; - /// let input = Input::new("foo1 barfox bar"); - /// assert_eq!( - /// // No match reported even though there clearly is one! - /// None, - /// re.try_search_fwd(&input)?, - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn prefilter(mut self, pre: Option) -> Config { - self.pre = Some(pre); - if self.specialize_start_states.is_none() { - self.specialize_start_states = - Some(self.get_prefilter().is_some()); - } - self - } - - /// Minimize the DFA. - /// - /// When enabled, the DFA built will be minimized such that it is as small - /// as possible. - /// - /// Whether one enables minimization or not depends on the types of costs - /// you're willing to pay and how much you care about its benefits. In - /// particular, minimization has worst case `O(n*k*logn)` time and `O(k*n)` - /// space, where `n` is the number of DFA states and `k` is the alphabet - /// size. In practice, minimization can be quite costly in terms of both - /// space and time, so it should only be done if you're willing to wait - /// longer to produce a DFA. In general, you might want a minimal DFA in - /// the following circumstances: - /// - /// 1. You would like to optimize for the size of the automaton. This can - /// manifest in one of two ways. Firstly, if you're converting the - /// DFA into Rust code (or a table embedded in the code), then a minimal - /// DFA will translate into a corresponding reduction in code size, and - /// thus, also the final compiled binary size. Secondly, if you are - /// building many DFAs and putting them on the heap, you'll be able to - /// fit more if they are smaller. Note though that building a minimal - /// DFA itself requires additional space; you only realize the space - /// savings once the minimal DFA is constructed (at which point, the - /// space used for minimization is freed). - /// 2. You've observed that a smaller DFA results in faster match - /// performance. Naively, this isn't guaranteed since there is no - /// inherent difference between matching with a bigger-than-minimal - /// DFA and a minimal DFA. However, a smaller DFA may make use of your - /// CPU's cache more efficiently. - /// 3. You are trying to establish an equivalence between regular - /// languages. The standard method for this is to build a minimal DFA - /// for each language and then compare them. If the DFAs are equivalent - /// (up to state renaming), then the languages are equivalent. - /// - /// Typically, minimization only makes sense as an offline process. That - /// is, one might minimize a DFA before serializing it to persistent - /// storage. In practical terms, minimization can take around an order of - /// magnitude more time than compiling the initial DFA via determinization. - /// - /// This option is disabled by default. - pub fn minimize(mut self, yes: bool) -> Config { - self.minimize = Some(yes); - self - } - - /// Set the desired match semantics. - /// - /// The default is [`MatchKind::LeftmostFirst`], which corresponds to the - /// match semantics of Perl-like regex engines. That is, when multiple - /// patterns would match at the same leftmost position, the pattern that - /// appears first in the concrete syntax is chosen. - /// - /// Currently, the only other kind of match semantics supported is - /// [`MatchKind::All`]. This corresponds to classical DFA construction - /// where all possible matches are added to the DFA. - /// - /// Typically, `All` is used when one wants to execute an overlapping - /// search and `LeftmostFirst` otherwise. In particular, it rarely makes - /// sense to use `All` with the various "leftmost" find routines, since the - /// leftmost routines depend on the `LeftmostFirst` automata construction - /// strategy. Specifically, `LeftmostFirst` adds dead states to the DFA - /// as a way to terminate the search and report a match. `LeftmostFirst` - /// also supports non-greedy matches using this strategy where as `All` - /// does not. - /// - /// # Example: overlapping search - /// - /// This example shows the typical use of `MatchKind::All`, which is to - /// report overlapping matches. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// dfa::{Automaton, OverlappingState, dense}, - /// HalfMatch, Input, MatchKind, - /// }; - /// - /// let dfa = dense::Builder::new() - /// .configure(dense::Config::new().match_kind(MatchKind::All)) - /// .build_many(&[r"\w+$", r"\S+$"])?; - /// let input = Input::new("@foo"); - /// let mut state = OverlappingState::start(); - /// - /// let expected = Some(HalfMatch::must(1, 4)); - /// dfa.try_search_overlapping_fwd(&input, &mut state)?; - /// assert_eq!(expected, state.get_match()); - /// - /// // The first pattern also matches at the same position, so re-running - /// // the search will yield another match. Notice also that the first - /// // pattern is returned after the second. This is because the second - /// // pattern begins its match before the first, is therefore an earlier - /// // match and is thus reported first. - /// let expected = Some(HalfMatch::must(0, 4)); - /// dfa.try_search_overlapping_fwd(&input, &mut state)?; - /// assert_eq!(expected, state.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: reverse automaton to find start of match - /// - /// Another example for using `MatchKind::All` is for constructing a - /// reverse automaton to find the start of a match. `All` semantics are - /// used for this in order to find the longest possible match, which - /// corresponds to the leftmost starting position. - /// - /// Note that if you need the starting position then - /// [`dfa::regex::Regex`](crate::dfa::regex::Regex) will handle this for - /// you, so it's usually not necessary to do this yourself. - /// - /// ``` - /// use regex_automata::{ - /// dfa::{dense, Automaton, StartKind}, - /// nfa::thompson::NFA, - /// Anchored, HalfMatch, Input, MatchKind, - /// }; - /// - /// let haystack = "123foobar456".as_bytes(); - /// let pattern = r"[a-z]+r"; - /// - /// let dfa_fwd = dense::DFA::new(pattern)?; - /// let dfa_rev = dense::Builder::new() - /// .thompson(NFA::config().reverse(true)) - /// .configure(dense::Config::new() - /// // This isn't strictly necessary since both anchored and - /// // unanchored searches are supported by default. But since - /// // finding the start-of-match only requires anchored searches, - /// // we can get rid of the unanchored configuration and possibly - /// // slim down our DFA considerably. - /// .start_kind(StartKind::Anchored) - /// .match_kind(MatchKind::All) - /// ) - /// .build(pattern)?; - /// let expected_fwd = HalfMatch::must(0, 9); - /// let expected_rev = HalfMatch::must(0, 3); - /// let got_fwd = dfa_fwd.try_search_fwd(&Input::new(haystack))?.unwrap(); - /// // Here we don't specify the pattern to search for since there's only - /// // one pattern and we're doing a leftmost search. But if this were an - /// // overlapping search, you'd need to specify the pattern that matched - /// // in the forward direction. (Otherwise, you might wind up finding the - /// // starting position of a match of some other pattern.) That in turn - /// // requires building the reverse automaton with starts_for_each_pattern - /// // enabled. Indeed, this is what Regex does internally. - /// let input = Input::new(haystack) - /// .range(..got_fwd.offset()) - /// .anchored(Anchored::Yes); - /// let got_rev = dfa_rev.try_search_rev(&input)?.unwrap(); - /// assert_eq!(expected_fwd, got_fwd); - /// assert_eq!(expected_rev, got_rev); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn match_kind(mut self, kind: MatchKind) -> Config { - self.match_kind = Some(kind); - self - } - - /// The type of starting state configuration to use for a DFA. - /// - /// By default, the starting state configuration is [`StartKind::Both`]. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{ - /// dfa::{dense::DFA, Automaton, StartKind}, - /// Anchored, HalfMatch, Input, - /// }; - /// - /// let haystack = "quux foo123"; - /// let expected = HalfMatch::must(0, 11); - /// - /// // By default, DFAs support both anchored and unanchored searches. - /// let dfa = DFA::new(r"[0-9]+")?; - /// let input = Input::new(haystack); - /// assert_eq!(Some(expected), dfa.try_search_fwd(&input)?); - /// - /// // But if we only need anchored searches, then we can build a DFA - /// // that only supports anchored searches. This leads to a smaller DFA - /// // (potentially significantly smaller in some cases), but a DFA that - /// // will panic if you try to use it with an unanchored search. - /// let dfa = DFA::builder() - /// .configure(DFA::config().start_kind(StartKind::Anchored)) - /// .build(r"[0-9]+")?; - /// let input = Input::new(haystack) - /// .range(8..) - /// .anchored(Anchored::Yes); - /// assert_eq!(Some(expected), dfa.try_search_fwd(&input)?); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn start_kind(mut self, kind: StartKind) -> Config { - self.start_kind = Some(kind); - self - } - - /// Whether to compile a separate start state for each pattern in the - /// automaton. - /// - /// When enabled, a separate **anchored** start state is added for each - /// pattern in the DFA. When this start state is used, then the DFA will - /// only search for matches for the pattern specified, even if there are - /// other patterns in the DFA. - /// - /// The main downside of this option is that it can potentially increase - /// the size of the DFA and/or increase the time it takes to build the DFA. - /// - /// There are a few reasons one might want to enable this (it's disabled - /// by default): - /// - /// 1. When looking for the start of an overlapping match (using a - /// reverse DFA), doing it correctly requires starting the reverse search - /// using the starting state of the pattern that matched in the forward - /// direction. Indeed, when building a [`Regex`](crate::dfa::regex::Regex), - /// it will automatically enable this option when building the reverse DFA - /// internally. - /// 2. When you want to use a DFA with multiple patterns to both search - /// for matches of any pattern or to search for anchored matches of one - /// particular pattern while using the same DFA. (Otherwise, you would need - /// to compile a new DFA for each pattern.) - /// 3. Since the start states added for each pattern are anchored, if you - /// compile an unanchored DFA with one pattern while also enabling this - /// option, then you can use the same DFA to perform anchored or unanchored - /// searches. The latter you get with the standard search APIs. The former - /// you get from the various `_at` search methods that allow you specify a - /// pattern ID to search for. - /// - /// By default this is disabled. - /// - /// # Example - /// - /// This example shows how to use this option to permit the same DFA to - /// run both anchored and unanchored searches for a single pattern. - /// - /// ``` - /// use regex_automata::{ - /// dfa::{dense, Automaton}, - /// Anchored, HalfMatch, PatternID, Input, - /// }; - /// - /// let dfa = dense::Builder::new() - /// .configure(dense::Config::new().starts_for_each_pattern(true)) - /// .build(r"foo[0-9]+")?; - /// let haystack = "quux foo123"; - /// - /// // Here's a normal unanchored search. Notice that we use 'None' for the - /// // pattern ID. Since the DFA was built as an unanchored machine, it - /// // use its default unanchored starting state. - /// let expected = HalfMatch::must(0, 11); - /// let input = Input::new(haystack); - /// assert_eq!(Some(expected), dfa.try_search_fwd(&input)?); - /// // But now if we explicitly specify the pattern to search ('0' being - /// // the only pattern in the DFA), then it will use the starting state - /// // for that specific pattern which is always anchored. Since the - /// // pattern doesn't have a match at the beginning of the haystack, we - /// // find nothing. - /// let input = Input::new(haystack) - /// .anchored(Anchored::Pattern(PatternID::must(0))); - /// assert_eq!(None, dfa.try_search_fwd(&input)?); - /// // And finally, an anchored search is not the same as putting a '^' at - /// // beginning of the pattern. An anchored search can only match at the - /// // beginning of the *search*, which we can change: - /// let input = Input::new(haystack) - /// .anchored(Anchored::Pattern(PatternID::must(0))) - /// .range(5..); - /// assert_eq!(Some(expected), dfa.try_search_fwd(&input)?); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn starts_for_each_pattern(mut self, yes: bool) -> Config { - self.starts_for_each_pattern = Some(yes); - self - } - - /// Whether to attempt to shrink the size of the DFA's alphabet or not. - /// - /// This option is enabled by default and should never be disabled unless - /// one is debugging a generated DFA. - /// - /// When enabled, the DFA will use a map from all possible bytes to their - /// corresponding equivalence class. Each equivalence class represents a - /// set of bytes that does not discriminate between a match and a non-match - /// in the DFA. For example, the pattern `[ab]+` has at least two - /// equivalence classes: a set containing `a` and `b` and a set containing - /// every byte except for `a` and `b`. `a` and `b` are in the same - /// equivalence class because they never discriminate between a match and a - /// non-match. - /// - /// The advantage of this map is that the size of the transition table - /// can be reduced drastically from `#states * 256 * sizeof(StateID)` to - /// `#states * k * sizeof(StateID)` where `k` is the number of equivalence - /// classes (rounded up to the nearest power of 2). As a result, total - /// space usage can decrease substantially. Moreover, since a smaller - /// alphabet is used, DFA compilation becomes faster as well. - /// - /// **WARNING:** This is only useful for debugging DFAs. Disabling this - /// does not yield any speed advantages. Namely, even when this is - /// disabled, a byte class map is still used while searching. The only - /// difference is that every byte will be forced into its own distinct - /// equivalence class. This is useful for debugging the actual generated - /// transitions because it lets one see the transitions defined on actual - /// bytes instead of the equivalence classes. - pub fn byte_classes(mut self, yes: bool) -> Config { - self.byte_classes = Some(yes); - self - } - - /// Heuristically enable Unicode word boundaries. - /// - /// When set, this will attempt to implement Unicode word boundaries as if - /// they were ASCII word boundaries. This only works when the search input - /// is ASCII only. If a non-ASCII byte is observed while searching, then a - /// [`MatchError::quit`](crate::MatchError::quit) error is returned. - /// - /// A possible alternative to enabling this option is to simply use an - /// ASCII word boundary, e.g., via `(?-u:\b)`. The main reason to use this - /// option is if you absolutely need Unicode support. This option lets one - /// use a fast search implementation (a DFA) for some potentially very - /// common cases, while providing the option to fall back to some other - /// regex engine to handle the general case when an error is returned. - /// - /// If the pattern provided has no Unicode word boundary in it, then this - /// option has no effect. (That is, quitting on a non-ASCII byte only - /// occurs when this option is enabled _and_ a Unicode word boundary is - /// present in the pattern.) - /// - /// This is almost equivalent to setting all non-ASCII bytes to be quit - /// bytes. The only difference is that this will cause non-ASCII bytes to - /// be quit bytes _only_ when a Unicode word boundary is present in the - /// pattern. - /// - /// When enabling this option, callers _must_ be prepared to handle - /// a [`MatchError`](crate::MatchError) error during search. - /// When using a [`Regex`](crate::dfa::regex::Regex), this corresponds - /// to using the `try_` suite of methods. Alternatively, if - /// callers can guarantee that their input is ASCII only, then a - /// [`MatchError::quit`](crate::MatchError::quit) error will never be - /// returned while searching. - /// - /// This is disabled by default. - /// - /// # Example - /// - /// This example shows how to heuristically enable Unicode word boundaries - /// in a pattern. It also shows what happens when a search comes across a - /// non-ASCII byte. - /// - /// ``` - /// use regex_automata::{ - /// dfa::{Automaton, dense}, - /// HalfMatch, Input, MatchError, - /// }; - /// - /// let dfa = dense::Builder::new() - /// .configure(dense::Config::new().unicode_word_boundary(true)) - /// .build(r"\b[0-9]+\b")?; - /// - /// // The match occurs before the search ever observes the snowman - /// // character, so no error occurs. - /// let haystack = "foo 123 ☃".as_bytes(); - /// let expected = Some(HalfMatch::must(0, 7)); - /// let got = dfa.try_search_fwd(&Input::new(haystack))?; - /// assert_eq!(expected, got); - /// - /// // Notice that this search fails, even though the snowman character - /// // occurs after the ending match offset. This is because search - /// // routines read one byte past the end of the search to account for - /// // look-around, and indeed, this is required here to determine whether - /// // the trailing \b matches. - /// let haystack = "foo 123 ☃".as_bytes(); - /// let expected = MatchError::quit(0xE2, 8); - /// let got = dfa.try_search_fwd(&Input::new(haystack)); - /// assert_eq!(Err(expected), got); - /// - /// // Another example is executing a search where the span of the haystack - /// // we specify is all ASCII, but there is non-ASCII just before it. This - /// // correctly also reports an error. - /// let input = Input::new("β123").range(2..); - /// let expected = MatchError::quit(0xB2, 1); - /// let got = dfa.try_search_fwd(&input); - /// assert_eq!(Err(expected), got); - /// - /// // And similarly for the trailing word boundary. - /// let input = Input::new("123β").range(..3); - /// let expected = MatchError::quit(0xCE, 3); - /// let got = dfa.try_search_fwd(&input); - /// assert_eq!(Err(expected), got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn unicode_word_boundary(mut self, yes: bool) -> Config { - // We have a separate option for this instead of just setting the - // appropriate quit bytes here because we don't want to set quit bytes - // for every regex. We only want to set them when the regex contains a - // Unicode word boundary. - self.unicode_word_boundary = Some(yes); - self - } - - /// Add a "quit" byte to the DFA. - /// - /// When a quit byte is seen during search time, then search will return - /// a [`MatchError::quit`](crate::MatchError::quit) error indicating the - /// offset at which the search stopped. - /// - /// A quit byte will always overrule any other aspects of a regex. For - /// example, if the `x` byte is added as a quit byte and the regex `\w` is - /// used, then observing `x` will cause the search to quit immediately - /// despite the fact that `x` is in the `\w` class. - /// - /// This mechanism is primarily useful for heuristically enabling certain - /// features like Unicode word boundaries in a DFA. Namely, if the input - /// to search is ASCII, then a Unicode word boundary can be implemented - /// via an ASCII word boundary with no change in semantics. Thus, a DFA - /// can attempt to match a Unicode word boundary but give up as soon as it - /// observes a non-ASCII byte. Indeed, if callers set all non-ASCII bytes - /// to be quit bytes, then Unicode word boundaries will be permitted when - /// building DFAs. Of course, callers should enable - /// [`Config::unicode_word_boundary`] if they want this behavior instead. - /// (The advantage being that non-ASCII quit bytes will only be added if a - /// Unicode word boundary is in the pattern.) - /// - /// When enabling this option, callers _must_ be prepared to handle a - /// [`MatchError`](crate::MatchError) error during search. When using a - /// [`Regex`](crate::dfa::regex::Regex), this corresponds to using the - /// `try_` suite of methods. - /// - /// By default, there are no quit bytes set. - /// - /// # Panics - /// - /// This panics if heuristic Unicode word boundaries are enabled and any - /// non-ASCII byte is removed from the set of quit bytes. Namely, enabling - /// Unicode word boundaries requires setting every non-ASCII byte to a quit - /// byte. So if the caller attempts to undo any of that, then this will - /// panic. - /// - /// # Example - /// - /// This example shows how to cause a search to terminate if it sees a - /// `\n` byte. This could be useful if, for example, you wanted to prevent - /// a user supplied pattern from matching across a line boundary. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{dfa::{Automaton, dense}, Input, MatchError}; - /// - /// let dfa = dense::Builder::new() - /// .configure(dense::Config::new().quit(b'\n', true)) - /// .build(r"foo\p{any}+bar")?; - /// - /// let haystack = "foo\nbar".as_bytes(); - /// // Normally this would produce a match, since \p{any} contains '\n'. - /// // But since we instructed the automaton to enter a quit state if a - /// // '\n' is observed, this produces a match error instead. - /// let expected = MatchError::quit(b'\n', 3); - /// let got = dfa.try_search_fwd(&Input::new(haystack)).unwrap_err(); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn quit(mut self, byte: u8, yes: bool) -> Config { - if self.get_unicode_word_boundary() && !byte.is_ascii() && !yes { - panic!( - "cannot set non-ASCII byte to be non-quit when \ - Unicode word boundaries are enabled" - ); - } - if self.quitset.is_none() { - self.quitset = Some(ByteSet::empty()); - } - if yes { - self.quitset.as_mut().unwrap().add(byte); - } else { - self.quitset.as_mut().unwrap().remove(byte); - } - self - } - - /// Enable specializing start states in the DFA. - /// - /// When start states are specialized, an implementor of a search routine - /// using a lazy DFA can tell when the search has entered a starting state. - /// When start states aren't specialized, then it is impossible to know - /// whether the search has entered a start state. - /// - /// Ideally, this option wouldn't need to exist and we could always - /// specialize start states. The problem is that start states can be quite - /// active. This in turn means that an efficient search routine is likely - /// to ping-pong between a heavily optimized hot loop that handles most - /// states and to a less optimized specialized handling of start states. - /// This causes branches to get heavily mispredicted and overall can - /// materially decrease throughput. Therefore, specializing start states - /// should only be enabled when it is needed. - /// - /// Knowing whether a search is in a start state is typically useful when a - /// prefilter is active for the search. A prefilter is typically only run - /// when in a start state and a prefilter can greatly accelerate a search. - /// Therefore, the possible cost of specializing start states is worth it - /// in this case. Otherwise, if you have no prefilter, there is likely no - /// reason to specialize start states. - /// - /// This is disabled by default, but note that it is automatically - /// enabled (or disabled) if [`Config::prefilter`] is set. Namely, unless - /// `specialize_start_states` has already been set, [`Config::prefilter`] - /// will automatically enable or disable it based on whether a prefilter - /// is present or not, respectively. This is done because a prefilter's - /// effectiveness is rooted in being executed whenever the DFA is in a - /// start state, and that's only possible to do when they are specialized. - /// - /// Note that it is plausibly reasonable to _disable_ this option - /// explicitly while _enabling_ a prefilter. In that case, a prefilter - /// will still be run at the beginning of a search, but never again. This - /// in theory could strike a good balance if you're in a situation where a - /// prefilter is likely to produce many false positive candidates. - /// - /// # Example - /// - /// This example shows how to enable start state specialization and then - /// shows how to check whether a state is a start state or not. - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense::DFA}, Input}; - /// - /// let dfa = DFA::builder() - /// .configure(DFA::config().specialize_start_states(true)) - /// .build(r"[a-z]+")?; - /// - /// let haystack = "123 foobar 4567".as_bytes(); - /// let sid = dfa.start_state_forward(&Input::new(haystack))?; - /// // The ID returned by 'start_state_forward' will always be tagged as - /// // a start state when start state specialization is enabled. - /// assert!(dfa.is_special_state(sid)); - /// assert!(dfa.is_start_state(sid)); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Compare the above with the default DFA configuration where start states - /// are _not_ specialized. In this case, the start state is not tagged at - /// all: - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense::DFA}, Input}; - /// - /// let dfa = DFA::new(r"[a-z]+")?; - /// - /// let haystack = "123 foobar 4567"; - /// let sid = dfa.start_state_forward(&Input::new(haystack))?; - /// // Start states are not special in the default configuration! - /// assert!(!dfa.is_special_state(sid)); - /// assert!(!dfa.is_start_state(sid)); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn specialize_start_states(mut self, yes: bool) -> Config { - self.specialize_start_states = Some(yes); - self - } - - /// Set a size limit on the total heap used by a DFA. - /// - /// This size limit is expressed in bytes and is applied during - /// determinization of an NFA into a DFA. If the DFA's heap usage, and only - /// the DFA, exceeds this configured limit, then determinization is stopped - /// and an error is returned. - /// - /// This limit does not apply to auxiliary storage used during - /// determinization that isn't part of the generated DFA. - /// - /// This limit is only applied during determinization. Currently, there is - /// no way to post-pone this check to after minimization if minimization - /// was enabled. - /// - /// The total limit on heap used during determinization is the sum of the - /// DFA and determinization size limits. - /// - /// The default is no limit. - /// - /// # Example - /// - /// This example shows a DFA that fails to build because of a configured - /// size limit. This particular example also serves as a cautionary tale - /// demonstrating just how big DFAs with large Unicode character classes - /// can get. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{dfa::{dense, Automaton}, Input}; - /// - /// // 6MB isn't enough! - /// dense::Builder::new() - /// .configure(dense::Config::new().dfa_size_limit(Some(6_000_000))) - /// .build(r"\w{20}") - /// .unwrap_err(); - /// - /// // ... but 7MB probably is! - /// // (Note that DFA sizes aren't necessarily stable between releases.) - /// let dfa = dense::Builder::new() - /// .configure(dense::Config::new().dfa_size_limit(Some(7_000_000))) - /// .build(r"\w{20}")?; - /// let haystack = "A".repeat(20).into_bytes(); - /// assert!(dfa.try_search_fwd(&Input::new(&haystack))?.is_some()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// While one needs a little more than 6MB to represent `\w{20}`, it - /// turns out that you only need a little more than 6KB to represent - /// `(?-u:\w{20})`. So only use Unicode if you need it! - /// - /// As with [`Config::determinize_size_limit`], the size of a DFA is - /// influenced by other factors, such as what start state configurations - /// to support. For example, if you only need unanchored searches and not - /// anchored searches, then configuring the DFA to only support unanchored - /// searches can reduce its size. By default, DFAs support both unanchored - /// and anchored searches. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{dfa::{dense, Automaton, StartKind}, Input}; - /// - /// // 3MB isn't enough! - /// dense::Builder::new() - /// .configure(dense::Config::new() - /// .dfa_size_limit(Some(3_000_000)) - /// .start_kind(StartKind::Unanchored) - /// ) - /// .build(r"\w{20}") - /// .unwrap_err(); - /// - /// // ... but 4MB probably is! - /// // (Note that DFA sizes aren't necessarily stable between releases.) - /// let dfa = dense::Builder::new() - /// .configure(dense::Config::new() - /// .dfa_size_limit(Some(4_000_000)) - /// .start_kind(StartKind::Unanchored) - /// ) - /// .build(r"\w{20}")?; - /// let haystack = "A".repeat(20).into_bytes(); - /// assert!(dfa.try_search_fwd(&Input::new(&haystack))?.is_some()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn dfa_size_limit(mut self, bytes: Option) -> Config { - self.dfa_size_limit = Some(bytes); - self - } - - /// Set a size limit on the total heap used by determinization. - /// - /// This size limit is expressed in bytes and is applied during - /// determinization of an NFA into a DFA. If the heap used for auxiliary - /// storage during determinization (memory that is not in the DFA but - /// necessary for building the DFA) exceeds this configured limit, then - /// determinization is stopped and an error is returned. - /// - /// This limit does not apply to heap used by the DFA itself. - /// - /// The total limit on heap used during determinization is the sum of the - /// DFA and determinization size limits. - /// - /// The default is no limit. - /// - /// # Example - /// - /// This example shows a DFA that fails to build because of a - /// configured size limit on the amount of heap space used by - /// determinization. This particular example complements the example for - /// [`Config::dfa_size_limit`] by demonstrating that not only does Unicode - /// potentially make DFAs themselves big, but it also results in more - /// auxiliary storage during determinization. (Although, auxiliary storage - /// is still not as much as the DFA itself.) - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// # if !cfg!(target_pointer_width = "64") { return Ok(()); } // see #1039 - /// use regex_automata::{dfa::{dense, Automaton}, Input}; - /// - /// // 700KB isn't enough! - /// dense::Builder::new() - /// .configure(dense::Config::new() - /// .determinize_size_limit(Some(700_000)) - /// ) - /// .build(r"\w{20}") - /// .unwrap_err(); - /// - /// // ... but 800KB probably is! - /// // (Note that auxiliary storage sizes aren't necessarily stable between - /// // releases.) - /// let dfa = dense::Builder::new() - /// .configure(dense::Config::new() - /// .determinize_size_limit(Some(800_000)) - /// ) - /// .build(r"\w{20}")?; - /// let haystack = "A".repeat(20).into_bytes(); - /// assert!(dfa.try_search_fwd(&Input::new(&haystack))?.is_some()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Note that some parts of the configuration on a DFA can have a - /// big impact on how big the DFA is, and thus, how much memory is - /// used. For example, the default setting for [`Config::start_kind`] is - /// [`StartKind::Both`]. But if you only need an anchored search, for - /// example, then it can be much cheaper to build a DFA that only supports - /// anchored searches. (Running an unanchored search with it would panic.) - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// # if !cfg!(target_pointer_width = "64") { return Ok(()); } // see #1039 - /// use regex_automata::{ - /// dfa::{dense, Automaton, StartKind}, - /// Anchored, Input, - /// }; - /// - /// // 200KB isn't enough! - /// dense::Builder::new() - /// .configure(dense::Config::new() - /// .determinize_size_limit(Some(200_000)) - /// .start_kind(StartKind::Anchored) - /// ) - /// .build(r"\w{20}") - /// .unwrap_err(); - /// - /// // ... but 300KB probably is! - /// // (Note that auxiliary storage sizes aren't necessarily stable between - /// // releases.) - /// let dfa = dense::Builder::new() - /// .configure(dense::Config::new() - /// .determinize_size_limit(Some(300_000)) - /// .start_kind(StartKind::Anchored) - /// ) - /// .build(r"\w{20}")?; - /// let haystack = "A".repeat(20).into_bytes(); - /// let input = Input::new(&haystack).anchored(Anchored::Yes); - /// assert!(dfa.try_search_fwd(&input)?.is_some()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn determinize_size_limit(mut self, bytes: Option) -> Config { - self.determinize_size_limit = Some(bytes); - self - } - - /// Returns whether this configuration has enabled simple state - /// acceleration. - pub fn get_accelerate(&self) -> bool { - self.accelerate.unwrap_or(true) - } - - /// Returns the prefilter attached to this configuration, if any. - pub fn get_prefilter(&self) -> Option<&Prefilter> { - self.pre.as_ref().unwrap_or(&None).as_ref() - } - - /// Returns whether this configuration has enabled the expensive process - /// of minimizing a DFA. - pub fn get_minimize(&self) -> bool { - self.minimize.unwrap_or(false) - } - - /// Returns the match semantics set in this configuration. - pub fn get_match_kind(&self) -> MatchKind { - self.match_kind.unwrap_or(MatchKind::LeftmostFirst) - } - - /// Returns the starting state configuration for a DFA. - pub fn get_starts(&self) -> StartKind { - self.start_kind.unwrap_or(StartKind::Both) - } - - /// Returns whether this configuration has enabled anchored starting states - /// for every pattern in the DFA. - pub fn get_starts_for_each_pattern(&self) -> bool { - self.starts_for_each_pattern.unwrap_or(false) - } - - /// Returns whether this configuration has enabled byte classes or not. - /// This is typically a debugging oriented option, as disabling it confers - /// no speed benefit. - pub fn get_byte_classes(&self) -> bool { - self.byte_classes.unwrap_or(true) - } - - /// Returns whether this configuration has enabled heuristic Unicode word - /// boundary support. When enabled, it is possible for a search to return - /// an error. - pub fn get_unicode_word_boundary(&self) -> bool { - self.unicode_word_boundary.unwrap_or(false) - } - - /// Returns whether this configuration will instruct the DFA to enter a - /// quit state whenever the given byte is seen during a search. When at - /// least one byte has this enabled, it is possible for a search to return - /// an error. - pub fn get_quit(&self, byte: u8) -> bool { - self.quitset.map_or(false, |q| q.contains(byte)) - } - - /// Returns whether this configuration will instruct the DFA to - /// "specialize" start states. When enabled, the DFA will mark start states - /// as "special" so that search routines using the DFA can detect when - /// it's in a start state and do some kind of optimization (like run a - /// prefilter). - pub fn get_specialize_start_states(&self) -> bool { - self.specialize_start_states.unwrap_or(false) - } - - /// Returns the DFA size limit of this configuration if one was set. - /// The size limit is total number of bytes on the heap that a DFA is - /// permitted to use. If the DFA exceeds this limit during construction, - /// then construction is stopped and an error is returned. - pub fn get_dfa_size_limit(&self) -> Option { - self.dfa_size_limit.unwrap_or(None) - } - - /// Returns the determinization size limit of this configuration if one - /// was set. The size limit is total number of bytes on the heap that - /// determinization is permitted to use. If determinization exceeds this - /// limit during construction, then construction is stopped and an error is - /// returned. - /// - /// This is different from the DFA size limit in that this only applies to - /// the auxiliary storage used during determinization. Once determinization - /// is complete, this memory is freed. - /// - /// The limit on the total heap memory used is the sum of the DFA and - /// determinization size limits. - pub fn get_determinize_size_limit(&self) -> Option { - self.determinize_size_limit.unwrap_or(None) - } - - /// Overwrite the default configuration such that the options in `o` are - /// always used. If an option in `o` is not set, then the corresponding - /// option in `self` is used. If it's not set in `self` either, then it - /// remains not set. - pub(crate) fn overwrite(&self, o: Config) -> Config { - Config { - accelerate: o.accelerate.or(self.accelerate), - pre: o.pre.or_else(|| self.pre.clone()), - minimize: o.minimize.or(self.minimize), - match_kind: o.match_kind.or(self.match_kind), - start_kind: o.start_kind.or(self.start_kind), - starts_for_each_pattern: o - .starts_for_each_pattern - .or(self.starts_for_each_pattern), - byte_classes: o.byte_classes.or(self.byte_classes), - unicode_word_boundary: o - .unicode_word_boundary - .or(self.unicode_word_boundary), - quitset: o.quitset.or(self.quitset), - specialize_start_states: o - .specialize_start_states - .or(self.specialize_start_states), - dfa_size_limit: o.dfa_size_limit.or(self.dfa_size_limit), - determinize_size_limit: o - .determinize_size_limit - .or(self.determinize_size_limit), - } - } -} - -/// A builder for constructing a deterministic finite automaton from regular -/// expressions. -/// -/// This builder provides two main things: -/// -/// 1. It provides a few different `build` routines for actually constructing -/// a DFA from different kinds of inputs. The most convenient is -/// [`Builder::build`], which builds a DFA directly from a pattern string. The -/// most flexible is [`Builder::build_from_nfa`], which builds a DFA straight -/// from an NFA. -/// 2. The builder permits configuring a number of things. -/// [`Builder::configure`] is used with [`Config`] to configure aspects of -/// the DFA and the construction process itself. [`Builder::syntax`] and -/// [`Builder::thompson`] permit configuring the regex parser and Thompson NFA -/// construction, respectively. The syntax and thompson configurations only -/// apply when building from a pattern string. -/// -/// This builder always constructs a *single* DFA. As such, this builder -/// can only be used to construct regexes that either detect the presence -/// of a match or find the end location of a match. A single DFA cannot -/// produce both the start and end of a match. For that information, use a -/// [`Regex`](crate::dfa::regex::Regex), which can be similarly configured -/// using [`regex::Builder`](crate::dfa::regex::Builder). The main reason to -/// use a DFA directly is if the end location of a match is enough for your use -/// case. Namely, a `Regex` will construct two DFAs instead of one, since a -/// second reverse DFA is needed to find the start of a match. -/// -/// Note that if one wants to build a sparse DFA, you must first build a dense -/// DFA and convert that to a sparse DFA. There is no way to build a sparse -/// DFA without first building a dense DFA. -/// -/// # Example -/// -/// This example shows how to build a minimized DFA that completely disables -/// Unicode. That is: -/// -/// * Things such as `\w`, `.` and `\b` are no longer Unicode-aware. `\w` -/// and `\b` are ASCII-only while `.` matches any byte except for `\n` -/// (instead of any UTF-8 encoding of a Unicode scalar value except for -/// `\n`). Things that are Unicode only, such as `\pL`, are not allowed. -/// * The pattern itself is permitted to match invalid UTF-8. For example, -/// things like `[^a]` that match any byte except for `a` are permitted. -/// -/// ``` -/// use regex_automata::{ -/// dfa::{Automaton, dense}, -/// util::syntax, -/// HalfMatch, Input, -/// }; -/// -/// let dfa = dense::Builder::new() -/// .configure(dense::Config::new().minimize(false)) -/// .syntax(syntax::Config::new().unicode(false).utf8(false)) -/// .build(r"foo[^b]ar.*")?; -/// -/// let haystack = b"\xFEfoo\xFFar\xE2\x98\xFF\n"; -/// let expected = Some(HalfMatch::must(0, 10)); -/// let got = dfa.try_search_fwd(&Input::new(haystack))?; -/// assert_eq!(expected, got); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[cfg(feature = "dfa-build")] -#[derive(Clone, Debug)] -pub struct Builder { - config: Config, - #[cfg(feature = "syntax")] - thompson: thompson::Compiler, -} - -#[cfg(feature = "dfa-build")] -impl Builder { - /// Create a new dense DFA builder with the default configuration. - pub fn new() -> Builder { - Builder { - config: Config::default(), - #[cfg(feature = "syntax")] - thompson: thompson::Compiler::new(), - } - } - - /// Build a DFA from the given pattern. - /// - /// If there was a problem parsing or compiling the pattern, then an error - /// is returned. - #[cfg(feature = "syntax")] - pub fn build(&self, pattern: &str) -> Result { - self.build_many(&[pattern]) - } - - /// Build a DFA from the given patterns. - /// - /// When matches are returned, the pattern ID corresponds to the index of - /// the pattern in the slice given. - #[cfg(feature = "syntax")] - pub fn build_many>( - &self, - patterns: &[P], - ) -> Result { - let nfa = self - .thompson - .clone() - // We can always forcefully disable captures because DFAs do not - // support them. - .configure( - thompson::Config::new() - .which_captures(thompson::WhichCaptures::None), - ) - .build_many(patterns) - .map_err(BuildError::nfa)?; - self.build_from_nfa(&nfa) - } - - /// Build a DFA from the given NFA. - /// - /// # Example - /// - /// This example shows how to build a DFA if you already have an NFA in - /// hand. - /// - /// ``` - /// use regex_automata::{ - /// dfa::{Automaton, dense}, - /// nfa::thompson::NFA, - /// HalfMatch, Input, - /// }; - /// - /// let haystack = "foo123bar".as_bytes(); - /// - /// // This shows how to set non-default options for building an NFA. - /// let nfa = NFA::compiler() - /// .configure(NFA::config().shrink(true)) - /// .build(r"[0-9]+")?; - /// let dfa = dense::Builder::new().build_from_nfa(&nfa)?; - /// let expected = Some(HalfMatch::must(0, 6)); - /// let got = dfa.try_search_fwd(&Input::new(haystack))?; - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn build_from_nfa( - &self, - nfa: &thompson::NFA, - ) -> Result { - let mut quitset = self.config.quitset.unwrap_or(ByteSet::empty()); - if self.config.get_unicode_word_boundary() - && nfa.look_set_any().contains_word_unicode() - { - for b in 0x80..=0xFF { - quitset.add(b); - } - } - let classes = if !self.config.get_byte_classes() { - // DFAs will always use the equivalence class map, but enabling - // this option is useful for debugging. Namely, this will cause all - // transitions to be defined over their actual bytes instead of an - // opaque equivalence class identifier. The former is much easier - // to grok as a human. - ByteClasses::singletons() - } else { - let mut set = nfa.byte_class_set().clone(); - // It is important to distinguish any "quit" bytes from all other - // bytes. Otherwise, a non-quit byte may end up in the same - // class as a quit byte, and thus cause the DFA to stop when it - // shouldn't. - // - // Test case: - // - // regex-cli find match dense --unicode-word-boundary \ - // -p '^#' -p '\b10\.55\.182\.100\b' -y @conn.json.1000x.log - if !quitset.is_empty() { - set.add_set(&quitset); - } - set.byte_classes() - }; - - let mut dfa = DFA::initial( - classes, - nfa.pattern_len(), - self.config.get_starts(), - nfa.look_matcher(), - self.config.get_starts_for_each_pattern(), - self.config.get_prefilter().map(|p| p.clone()), - quitset, - Flags::from_nfa(&nfa), - )?; - determinize::Config::new() - .match_kind(self.config.get_match_kind()) - .quit(quitset) - .dfa_size_limit(self.config.get_dfa_size_limit()) - .determinize_size_limit(self.config.get_determinize_size_limit()) - .run(nfa, &mut dfa)?; - if self.config.get_minimize() { - dfa.minimize(); - } - if self.config.get_accelerate() { - dfa.accelerate(); - } - // The state shuffling done before this point always assumes that start - // states should be marked as "special," even though it isn't the - // default configuration. State shuffling is complex enough as it is, - // so it's simpler to just "fix" our special state ID ranges to not - // include starting states after-the-fact. - if !self.config.get_specialize_start_states() { - dfa.special.set_no_special_start_states(); - } - // Look for and set the universal starting states. - dfa.set_universal_starts(); - dfa.tt.table.shrink_to_fit(); - dfa.st.table.shrink_to_fit(); - dfa.ms.slices.shrink_to_fit(); - dfa.ms.pattern_ids.shrink_to_fit(); - Ok(dfa) - } - - /// Apply the given dense DFA configuration options to this builder. - pub fn configure(&mut self, config: Config) -> &mut Builder { - self.config = self.config.overwrite(config); - self - } - - /// Set the syntax configuration for this builder using - /// [`syntax::Config`](crate::util::syntax::Config). - /// - /// This permits setting things like case insensitivity, Unicode and multi - /// line mode. - /// - /// These settings only apply when constructing a DFA directly from a - /// pattern. - #[cfg(feature = "syntax")] - pub fn syntax( - &mut self, - config: crate::util::syntax::Config, - ) -> &mut Builder { - self.thompson.syntax(config); - self - } - - /// Set the Thompson NFA configuration for this builder using - /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). - /// - /// This permits setting things like whether the DFA should match the regex - /// in reverse or if additional time should be spent shrinking the size of - /// the NFA. - /// - /// These settings only apply when constructing a DFA directly from a - /// pattern. - #[cfg(feature = "syntax")] - pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { - self.thompson.configure(config); - self - } -} - -#[cfg(feature = "dfa-build")] -impl Default for Builder { - fn default() -> Builder { - Builder::new() - } -} - -/// A convenience alias for an owned DFA. We use this particular instantiation -/// a lot in this crate, so it's worth giving it a name. This instantiation -/// is commonly used for mutable APIs on the DFA while building it. The main -/// reason for making DFAs generic is no_std support, and more generally, -/// making it possible to load a DFA from an arbitrary slice of bytes. -#[cfg(feature = "alloc")] -pub(crate) type OwnedDFA = DFA>; - -/// A dense table-based deterministic finite automaton (DFA). -/// -/// All dense DFAs have one or more start states, zero or more match states -/// and a transition table that maps the current state and the current byte -/// of input to the next state. A DFA can use this information to implement -/// fast searching. In particular, the use of a dense DFA generally makes the -/// trade off that match speed is the most valuable characteristic, even if -/// building the DFA may take significant time *and* space. (More concretely, -/// building a DFA takes time and space that is exponential in the size of the -/// pattern in the worst case.) As such, the processing of every byte of input -/// is done with a small constant number of operations that does not vary with -/// the pattern, its size or the size of the alphabet. If your needs don't line -/// up with this trade off, then a dense DFA may not be an adequate solution to -/// your problem. -/// -/// In contrast, a [`sparse::DFA`] makes the opposite -/// trade off: it uses less space but will execute a variable number of -/// instructions per byte at match time, which makes it slower for matching. -/// (Note that space usage is still exponential in the size of the pattern in -/// the worst case.) -/// -/// A DFA can be built using the default configuration via the -/// [`DFA::new`] constructor. Otherwise, one can -/// configure various aspects via [`dense::Builder`](Builder). -/// -/// A single DFA fundamentally supports the following operations: -/// -/// 1. Detection of a match. -/// 2. Location of the end of a match. -/// 3. In the case of a DFA with multiple patterns, which pattern matched is -/// reported as well. -/// -/// A notable absence from the above list of capabilities is the location of -/// the *start* of a match. In order to provide both the start and end of -/// a match, *two* DFAs are required. This functionality is provided by a -/// [`Regex`](crate::dfa::regex::Regex). -/// -/// # Type parameters -/// -/// A `DFA` has one type parameter, `T`, which is used to represent state IDs, -/// pattern IDs and accelerators. `T` is typically a `Vec` or a `&[u32]`. -/// -/// # The `Automaton` trait -/// -/// This type implements the [`Automaton`] trait, which means it can be used -/// for searching. For example: -/// -/// ``` -/// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; -/// -/// let dfa = DFA::new("foo[0-9]+")?; -/// let expected = HalfMatch::must(0, 8); -/// assert_eq!(Some(expected), dfa.try_search_fwd(&Input::new("foo12345"))?); -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone)] -pub struct DFA { - /// The transition table for this DFA. This includes the transitions - /// themselves, along with the stride, number of states and the equivalence - /// class mapping. - tt: TransitionTable, - /// The set of starting state identifiers for this DFA. The starting state - /// IDs act as pointers into the transition table. The specific starting - /// state chosen for each search is dependent on the context at which the - /// search begins. - st: StartTable, - /// The set of match states and the patterns that match for each - /// corresponding match state. - /// - /// This structure is technically only needed because of support for - /// multi-regexes. Namely, multi-regexes require answering not just whether - /// a match exists, but _which_ patterns match. So we need to store the - /// matching pattern IDs for each match state. We do this even when there - /// is only one pattern for the sake of simplicity. In practice, this uses - /// up very little space for the case of one pattern. - ms: MatchStates, - /// Information about which states are "special." Special states are states - /// that are dead, quit, matching, starting or accelerated. For more info, - /// see the docs for `Special`. - special: Special, - /// The accelerators for this DFA. - /// - /// If a state is accelerated, then there exist only a small number of - /// bytes that can cause the DFA to leave the state. This permits searching - /// to use optimized routines to find those specific bytes instead of using - /// the transition table. - /// - /// All accelerated states exist in a contiguous range in the DFA's - /// transition table. See dfa/special.rs for more details on how states are - /// arranged. - accels: Accels, - /// Any prefilter attached to this DFA. - /// - /// Note that currently prefilters are not serialized. When deserializing - /// a DFA from bytes, this is always set to `None`. - pre: Option, - /// The set of "quit" bytes for this DFA. - /// - /// This is only used when computing the start state for a particular - /// position in a haystack. Namely, in the case where there is a quit - /// byte immediately before the start of the search, this set needs to be - /// explicitly consulted. In all other cases, quit bytes are detected by - /// the DFA itself, by transitioning all quit bytes to a special "quit - /// state." - quitset: ByteSet, - /// Various flags describing the behavior of this DFA. - flags: Flags, -} - -#[cfg(feature = "dfa-build")] -impl OwnedDFA { - /// Parse the given regular expression using a default configuration and - /// return the corresponding DFA. - /// - /// If you want a non-default configuration, then use the - /// [`dense::Builder`](Builder) to set your own configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; - /// - /// let dfa = dense::DFA::new("foo[0-9]+bar")?; - /// let expected = Some(HalfMatch::must(0, 11)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn new(pattern: &str) -> Result { - Builder::new().build(pattern) - } - - /// Parse the given regular expressions using a default configuration and - /// return the corresponding multi-DFA. - /// - /// If you want a non-default configuration, then use the - /// [`dense::Builder`](Builder) to set your own configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; - /// - /// let dfa = dense::DFA::new_many(&["[0-9]+", "[a-z]+"])?; - /// let expected = Some(HalfMatch::must(1, 3)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn new_many>( - patterns: &[P], - ) -> Result { - Builder::new().build_many(patterns) - } -} - -#[cfg(feature = "dfa-build")] -impl OwnedDFA { - /// Create a new DFA that matches every input. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; - /// - /// let dfa = dense::DFA::always_match()?; - /// - /// let expected = Some(HalfMatch::must(0, 0)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new(""))?); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo"))?); - /// # Ok::<(), Box>(()) - /// ``` - pub fn always_match() -> Result { - let nfa = thompson::NFA::always_match(); - Builder::new().build_from_nfa(&nfa) - } - - /// Create a new DFA that never matches any input. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense}, Input}; - /// - /// let dfa = dense::DFA::never_match()?; - /// assert_eq!(None, dfa.try_search_fwd(&Input::new(""))?); - /// assert_eq!(None, dfa.try_search_fwd(&Input::new("foo"))?); - /// # Ok::<(), Box>(()) - /// ``` - pub fn never_match() -> Result { - let nfa = thompson::NFA::never_match(); - Builder::new().build_from_nfa(&nfa) - } - - /// Create an initial DFA with the given equivalence classes, pattern - /// length and whether anchored starting states are enabled for each - /// pattern. An initial DFA can be further mutated via determinization. - fn initial( - classes: ByteClasses, - pattern_len: usize, - starts: StartKind, - lookm: &LookMatcher, - starts_for_each_pattern: bool, - pre: Option, - quitset: ByteSet, - flags: Flags, - ) -> Result { - let start_pattern_len = - if starts_for_each_pattern { Some(pattern_len) } else { None }; - Ok(DFA { - tt: TransitionTable::minimal(classes), - st: StartTable::dead(starts, lookm, start_pattern_len)?, - ms: MatchStates::empty(pattern_len), - special: Special::new(), - accels: Accels::empty(), - pre, - quitset, - flags, - }) - } -} - -#[cfg(feature = "dfa-build")] -impl DFA<&[u32]> { - /// Return a new default dense DFA compiler configuration. - /// - /// This is a convenience routine to avoid needing to import the [`Config`] - /// type when customizing the construction of a dense DFA. - pub fn config() -> Config { - Config::new() - } - - /// Create a new dense DFA builder with the default configuration. - /// - /// This is a convenience routine to avoid needing to import the - /// [`Builder`] type in common cases. - pub fn builder() -> Builder { - Builder::new() - } -} - -impl> DFA { - /// Cheaply return a borrowed version of this dense DFA. Specifically, - /// the DFA returned always uses `&[u32]` for its transition table. - pub fn as_ref(&self) -> DFA<&'_ [u32]> { - DFA { - tt: self.tt.as_ref(), - st: self.st.as_ref(), - ms: self.ms.as_ref(), - special: self.special, - accels: self.accels(), - pre: self.pre.clone(), - quitset: self.quitset, - flags: self.flags, - } - } - - /// Return an owned version of this sparse DFA. Specifically, the DFA - /// returned always uses `Vec` for its transition table. - /// - /// Effectively, this returns a dense DFA whose transition table lives on - /// the heap. - #[cfg(feature = "alloc")] - pub fn to_owned(&self) -> OwnedDFA { - DFA { - tt: self.tt.to_owned(), - st: self.st.to_owned(), - ms: self.ms.to_owned(), - special: self.special, - accels: self.accels().to_owned(), - pre: self.pre.clone(), - quitset: self.quitset, - flags: self.flags, - } - } - - /// Returns the starting state configuration for this DFA. - /// - /// The default is [`StartKind::Both`], which means the DFA supports both - /// unanchored and anchored searches. However, this can generally lead to - /// bigger DFAs. Therefore, a DFA might be compiled with support for just - /// unanchored or anchored searches. In that case, running a search with - /// an unsupported configuration will panic. - pub fn start_kind(&self) -> StartKind { - self.st.kind - } - - /// Returns the start byte map used for computing the `Start` configuration - /// at the beginning of a search. - pub(crate) fn start_map(&self) -> &StartByteMap { - &self.st.start_map - } - - /// Returns true only if this DFA has starting states for each pattern. - /// - /// When a DFA has starting states for each pattern, then a search with the - /// DFA can be configured to only look for anchored matches of a specific - /// pattern. Specifically, APIs like [`Automaton::try_search_fwd`] can - /// accept a non-None `pattern_id` if and only if this method returns true. - /// Otherwise, calling `try_search_fwd` will panic. - /// - /// Note that if the DFA has no patterns, this always returns false. - pub fn starts_for_each_pattern(&self) -> bool { - self.st.pattern_len.is_some() - } - - /// Returns the equivalence classes that make up the alphabet for this DFA. - /// - /// Unless [`Config::byte_classes`] was disabled, it is possible that - /// multiple distinct bytes are grouped into the same equivalence class - /// if it is impossible for them to discriminate between a match and a - /// non-match. This has the effect of reducing the overall alphabet size - /// and in turn potentially substantially reducing the size of the DFA's - /// transition table. - /// - /// The downside of using equivalence classes like this is that every state - /// transition will automatically use this map to convert an arbitrary - /// byte to its corresponding equivalence class. In practice this has a - /// negligible impact on performance. - pub fn byte_classes(&self) -> &ByteClasses { - &self.tt.classes - } - - /// Returns the total number of elements in the alphabet for this DFA. - /// - /// That is, this returns the total number of transitions that each state - /// in this DFA must have. Typically, a normal byte oriented DFA would - /// always have an alphabet size of 256, corresponding to the number of - /// unique values in a single byte. However, this implementation has two - /// peculiarities that impact the alphabet length: - /// - /// * Every state has a special "EOI" transition that is only followed - /// after the end of some haystack is reached. This EOI transition is - /// necessary to account for one byte of look-ahead when implementing - /// things like `\b` and `$`. - /// * Bytes are grouped into equivalence classes such that no two bytes in - /// the same class can distinguish a match from a non-match. For example, - /// in the regex `^[a-z]+$`, the ASCII bytes `a-z` could all be in the - /// same equivalence class. This leads to a massive space savings. - /// - /// Note though that the alphabet length does _not_ necessarily equal the - /// total stride space taken up by a single DFA state in the transition - /// table. Namely, for performance reasons, the stride is always the - /// smallest power of two that is greater than or equal to the alphabet - /// length. For this reason, [`DFA::stride`] or [`DFA::stride2`] are - /// often more useful. The alphabet length is typically useful only for - /// informational purposes. - pub fn alphabet_len(&self) -> usize { - self.tt.alphabet_len() - } - - /// Returns the total stride for every state in this DFA, expressed as the - /// exponent of a power of 2. The stride is the amount of space each state - /// takes up in the transition table, expressed as a number of transitions. - /// (Unused transitions map to dead states.) - /// - /// The stride of a DFA is always equivalent to the smallest power of 2 - /// that is greater than or equal to the DFA's alphabet length. This - /// definition uses extra space, but permits faster translation between - /// premultiplied state identifiers and contiguous indices (by using shifts - /// instead of relying on integer division). - /// - /// For example, if the DFA's stride is 16 transitions, then its `stride2` - /// is `4` since `2^4 = 16`. - /// - /// The minimum `stride2` value is `1` (corresponding to a stride of `2`) - /// while the maximum `stride2` value is `9` (corresponding to a stride of - /// `512`). The maximum is not `8` since the maximum alphabet size is `257` - /// when accounting for the special EOI transition. However, an alphabet - /// length of that size is exceptionally rare since the alphabet is shrunk - /// into equivalence classes. - pub fn stride2(&self) -> usize { - self.tt.stride2 - } - - /// Returns the total stride for every state in this DFA. This corresponds - /// to the total number of transitions used by each state in this DFA's - /// transition table. - /// - /// Please see [`DFA::stride2`] for more information. In particular, this - /// returns the stride as the number of transitions, where as `stride2` - /// returns it as the exponent of a power of 2. - pub fn stride(&self) -> usize { - self.tt.stride() - } - - /// Returns the memory usage, in bytes, of this DFA. - /// - /// The memory usage is computed based on the number of bytes used to - /// represent this DFA. - /// - /// This does **not** include the stack size used up by this DFA. To - /// compute that, use `std::mem::size_of::()`. - pub fn memory_usage(&self) -> usize { - self.tt.memory_usage() - + self.st.memory_usage() - + self.ms.memory_usage() - + self.accels.memory_usage() - } -} - -/// Routines for converting a dense DFA to other representations, such as -/// sparse DFAs or raw bytes suitable for persistent storage. -impl> DFA { - /// Convert this dense DFA to a sparse DFA. - /// - /// If a `StateID` is too small to represent all states in the sparse - /// DFA, then this returns an error. In most cases, if a dense DFA is - /// constructable with `StateID` then a sparse DFA will be as well. - /// However, it is not guaranteed. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, Input}; - /// - /// let dense = dense::DFA::new("foo[0-9]+")?; - /// let sparse = dense.to_sparse()?; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, sparse.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "dfa-build")] - pub fn to_sparse(&self) -> Result>, BuildError> { - sparse::DFA::from_dense(self) - } - - /// Serialize this DFA as raw bytes to a `Vec` in little endian - /// format. Upon success, the `Vec` and the initial padding length are - /// returned. - /// - /// The written bytes are guaranteed to be deserialized correctly and - /// without errors in a semver compatible release of this crate by a - /// `DFA`'s deserialization APIs (assuming all other criteria for the - /// deserialization APIs has been satisfied): - /// - /// * [`DFA::from_bytes`] - /// * [`DFA::from_bytes_unchecked`] - /// - /// The padding returned is non-zero if the returned `Vec` starts at - /// an address that does not have the same alignment as `u32`. The padding - /// corresponds to the number of leading bytes written to the returned - /// `Vec`. - /// - /// # Example - /// - /// This example shows how to serialize and deserialize a DFA: - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; - /// - /// // Compile our original DFA. - /// let original_dfa = DFA::new("foo[0-9]+")?; - /// - /// // N.B. We use native endianness here to make the example work, but - /// // using to_bytes_little_endian would work on a little endian target. - /// let (buf, _) = original_dfa.to_bytes_native_endian(); - /// // Even if buf has initial padding, DFA::from_bytes will automatically - /// // ignore it. - /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf)?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "dfa-build")] - pub fn to_bytes_little_endian(&self) -> (Vec, usize) { - self.to_bytes::() - } - - /// Serialize this DFA as raw bytes to a `Vec` in big endian - /// format. Upon success, the `Vec` and the initial padding length are - /// returned. - /// - /// The written bytes are guaranteed to be deserialized correctly and - /// without errors in a semver compatible release of this crate by a - /// `DFA`'s deserialization APIs (assuming all other criteria for the - /// deserialization APIs has been satisfied): - /// - /// * [`DFA::from_bytes`] - /// * [`DFA::from_bytes_unchecked`] - /// - /// The padding returned is non-zero if the returned `Vec` starts at - /// an address that does not have the same alignment as `u32`. The padding - /// corresponds to the number of leading bytes written to the returned - /// `Vec`. - /// - /// # Example - /// - /// This example shows how to serialize and deserialize a DFA: - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; - /// - /// // Compile our original DFA. - /// let original_dfa = DFA::new("foo[0-9]+")?; - /// - /// // N.B. We use native endianness here to make the example work, but - /// // using to_bytes_big_endian would work on a big endian target. - /// let (buf, _) = original_dfa.to_bytes_native_endian(); - /// // Even if buf has initial padding, DFA::from_bytes will automatically - /// // ignore it. - /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf)?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "dfa-build")] - pub fn to_bytes_big_endian(&self) -> (Vec, usize) { - self.to_bytes::() - } - - /// Serialize this DFA as raw bytes to a `Vec` in native endian - /// format. Upon success, the `Vec` and the initial padding length are - /// returned. - /// - /// The written bytes are guaranteed to be deserialized correctly and - /// without errors in a semver compatible release of this crate by a - /// `DFA`'s deserialization APIs (assuming all other criteria for the - /// deserialization APIs has been satisfied): - /// - /// * [`DFA::from_bytes`] - /// * [`DFA::from_bytes_unchecked`] - /// - /// The padding returned is non-zero if the returned `Vec` starts at - /// an address that does not have the same alignment as `u32`. The padding - /// corresponds to the number of leading bytes written to the returned - /// `Vec`. - /// - /// Generally speaking, native endian format should only be used when - /// you know that the target you're compiling the DFA for matches the - /// endianness of the target on which you're compiling DFA. For example, - /// if serialization and deserialization happen in the same process or on - /// the same machine. Otherwise, when serializing a DFA for use in a - /// portable environment, you'll almost certainly want to serialize _both_ - /// a little endian and a big endian version and then load the correct one - /// based on the target's configuration. - /// - /// # Example - /// - /// This example shows how to serialize and deserialize a DFA: - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; - /// - /// // Compile our original DFA. - /// let original_dfa = DFA::new("foo[0-9]+")?; - /// - /// let (buf, _) = original_dfa.to_bytes_native_endian(); - /// // Even if buf has initial padding, DFA::from_bytes will automatically - /// // ignore it. - /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf)?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "dfa-build")] - pub fn to_bytes_native_endian(&self) -> (Vec, usize) { - self.to_bytes::() - } - - /// The implementation of the public `to_bytes` serialization methods, - /// which is generic over endianness. - #[cfg(feature = "dfa-build")] - fn to_bytes(&self) -> (Vec, usize) { - let len = self.write_to_len(); - let (mut buf, padding) = wire::alloc_aligned_buffer::(len); - // This should always succeed since the only possible serialization - // error is providing a buffer that's too small, but we've ensured that - // `buf` is big enough here. - self.as_ref().write_to::(&mut buf[padding..]).unwrap(); - (buf, padding) - } - - /// Serialize this DFA as raw bytes to the given slice, in little endian - /// format. Upon success, the total number of bytes written to `dst` is - /// returned. - /// - /// The written bytes are guaranteed to be deserialized correctly and - /// without errors in a semver compatible release of this crate by a - /// `DFA`'s deserialization APIs (assuming all other criteria for the - /// deserialization APIs has been satisfied): - /// - /// * [`DFA::from_bytes`] - /// * [`DFA::from_bytes_unchecked`] - /// - /// Note that unlike the various `to_byte_*` routines, this does not write - /// any padding. Callers are responsible for handling alignment correctly. - /// - /// # Errors - /// - /// This returns an error if the given destination slice is not big enough - /// to contain the full serialized DFA. If an error occurs, then nothing - /// is written to `dst`. - /// - /// # Example - /// - /// This example shows how to serialize and deserialize a DFA without - /// dynamic memory allocation. - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; - /// - /// // Compile our original DFA. - /// let original_dfa = DFA::new("foo[0-9]+")?; - /// - /// // Create a 4KB buffer on the stack to store our serialized DFA. We - /// // need to use a special type to force the alignment of our [u8; N] - /// // array to be aligned to a 4 byte boundary. Otherwise, deserializing - /// // the DFA may fail because of an alignment mismatch. - /// #[repr(C)] - /// struct Aligned { - /// _align: [u32; 0], - /// bytes: B, - /// } - /// let mut buf = Aligned { _align: [], bytes: [0u8; 4 * (1<<10)] }; - /// // N.B. We use native endianness here to make the example work, but - /// // using write_to_little_endian would work on a little endian target. - /// let written = original_dfa.write_to_native_endian(&mut buf.bytes)?; - /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf.bytes[..written])?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - pub fn write_to_little_endian( - &self, - dst: &mut [u8], - ) -> Result { - self.as_ref().write_to::(dst) - } - - /// Serialize this DFA as raw bytes to the given slice, in big endian - /// format. Upon success, the total number of bytes written to `dst` is - /// returned. - /// - /// The written bytes are guaranteed to be deserialized correctly and - /// without errors in a semver compatible release of this crate by a - /// `DFA`'s deserialization APIs (assuming all other criteria for the - /// deserialization APIs has been satisfied): - /// - /// * [`DFA::from_bytes`] - /// * [`DFA::from_bytes_unchecked`] - /// - /// Note that unlike the various `to_byte_*` routines, this does not write - /// any padding. Callers are responsible for handling alignment correctly. - /// - /// # Errors - /// - /// This returns an error if the given destination slice is not big enough - /// to contain the full serialized DFA. If an error occurs, then nothing - /// is written to `dst`. - /// - /// # Example - /// - /// This example shows how to serialize and deserialize a DFA without - /// dynamic memory allocation. - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; - /// - /// // Compile our original DFA. - /// let original_dfa = DFA::new("foo[0-9]+")?; - /// - /// // Create a 4KB buffer on the stack to store our serialized DFA. We - /// // need to use a special type to force the alignment of our [u8; N] - /// // array to be aligned to a 4 byte boundary. Otherwise, deserializing - /// // the DFA may fail because of an alignment mismatch. - /// #[repr(C)] - /// struct Aligned { - /// _align: [u32; 0], - /// bytes: B, - /// } - /// let mut buf = Aligned { _align: [], bytes: [0u8; 4 * (1<<10)] }; - /// // N.B. We use native endianness here to make the example work, but - /// // using write_to_big_endian would work on a big endian target. - /// let written = original_dfa.write_to_native_endian(&mut buf.bytes)?; - /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf.bytes[..written])?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - pub fn write_to_big_endian( - &self, - dst: &mut [u8], - ) -> Result { - self.as_ref().write_to::(dst) - } - - /// Serialize this DFA as raw bytes to the given slice, in native endian - /// format. Upon success, the total number of bytes written to `dst` is - /// returned. - /// - /// The written bytes are guaranteed to be deserialized correctly and - /// without errors in a semver compatible release of this crate by a - /// `DFA`'s deserialization APIs (assuming all other criteria for the - /// deserialization APIs has been satisfied): - /// - /// * [`DFA::from_bytes`] - /// * [`DFA::from_bytes_unchecked`] - /// - /// Generally speaking, native endian format should only be used when - /// you know that the target you're compiling the DFA for matches the - /// endianness of the target on which you're compiling DFA. For example, - /// if serialization and deserialization happen in the same process or on - /// the same machine. Otherwise, when serializing a DFA for use in a - /// portable environment, you'll almost certainly want to serialize _both_ - /// a little endian and a big endian version and then load the correct one - /// based on the target's configuration. - /// - /// Note that unlike the various `to_byte_*` routines, this does not write - /// any padding. Callers are responsible for handling alignment correctly. - /// - /// # Errors - /// - /// This returns an error if the given destination slice is not big enough - /// to contain the full serialized DFA. If an error occurs, then nothing - /// is written to `dst`. - /// - /// # Example - /// - /// This example shows how to serialize and deserialize a DFA without - /// dynamic memory allocation. - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; - /// - /// // Compile our original DFA. - /// let original_dfa = DFA::new("foo[0-9]+")?; - /// - /// // Create a 4KB buffer on the stack to store our serialized DFA. We - /// // need to use a special type to force the alignment of our [u8; N] - /// // array to be aligned to a 4 byte boundary. Otherwise, deserializing - /// // the DFA may fail because of an alignment mismatch. - /// #[repr(C)] - /// struct Aligned { - /// _align: [u32; 0], - /// bytes: B, - /// } - /// let mut buf = Aligned { _align: [], bytes: [0u8; 4 * (1<<10)] }; - /// let written = original_dfa.write_to_native_endian(&mut buf.bytes)?; - /// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf.bytes[..written])?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - pub fn write_to_native_endian( - &self, - dst: &mut [u8], - ) -> Result { - self.as_ref().write_to::(dst) - } - - /// Return the total number of bytes required to serialize this DFA. - /// - /// This is useful for determining the size of the buffer required to pass - /// to one of the serialization routines: - /// - /// * [`DFA::write_to_little_endian`] - /// * [`DFA::write_to_big_endian`] - /// * [`DFA::write_to_native_endian`] - /// - /// Passing a buffer smaller than the size returned by this method will - /// result in a serialization error. Serialization routines are guaranteed - /// to succeed when the buffer is big enough. - /// - /// # Example - /// - /// This example shows how to dynamically allocate enough room to serialize - /// a DFA. - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; - /// - /// let original_dfa = DFA::new("foo[0-9]+")?; - /// - /// let mut buf = vec![0; original_dfa.write_to_len()]; - /// // This is guaranteed to succeed, because the only serialization error - /// // that can occur is when the provided buffer is too small. But - /// // write_to_len guarantees a correct size. - /// let written = original_dfa.write_to_native_endian(&mut buf).unwrap(); - /// // But this is not guaranteed to succeed! In particular, - /// // deserialization requires proper alignment for &[u32], but our buffer - /// // was allocated as a &[u8] whose required alignment is smaller than - /// // &[u32]. However, it's likely to work in practice because of how most - /// // allocators work. So if you write code like this, make sure to either - /// // handle the error correctly and/or run it under Miri since Miri will - /// // likely provoke the error by returning Vec buffers with alignment - /// // less than &[u32]. - /// let dfa: DFA<&[u32]> = match DFA::from_bytes(&buf[..written]) { - /// // As mentioned above, it is legal for an error to be returned - /// // here. It is quite difficult to get a Vec with a guaranteed - /// // alignment equivalent to Vec. - /// Err(_) => return Ok(()), - /// Ok((dfa, _)) => dfa, - /// }; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Note that this example isn't actually guaranteed to work! In - /// particular, if `buf` is not aligned to a 4-byte boundary, then the - /// `DFA::from_bytes` call will fail. If you need this to work, then you - /// either need to deal with adding some initial padding yourself, or use - /// one of the `to_bytes` methods, which will do it for you. - pub fn write_to_len(&self) -> usize { - wire::write_label_len(LABEL) - + wire::write_endianness_check_len() - + wire::write_version_len() - + size_of::() // unused, intended for future flexibility - + self.flags.write_to_len() - + self.tt.write_to_len() - + self.st.write_to_len() - + self.ms.write_to_len() - + self.special.write_to_len() - + self.accels.write_to_len() - + self.quitset.write_to_len() - } -} - -impl<'a> DFA<&'a [u32]> { - /// Safely deserialize a DFA with a specific state identifier - /// representation. Upon success, this returns both the deserialized DFA - /// and the number of bytes read from the given slice. Namely, the contents - /// of the slice beyond the DFA are not read. - /// - /// Deserializing a DFA using this routine will never allocate heap memory. - /// For safety purposes, the DFA's transition table will be verified such - /// that every transition points to a valid state. If this verification is - /// too costly, then a [`DFA::from_bytes_unchecked`] API is provided, which - /// will always execute in constant time. - /// - /// The bytes given must be generated by one of the serialization APIs - /// of a `DFA` using a semver compatible release of this crate. Those - /// include: - /// - /// * [`DFA::to_bytes_little_endian`] - /// * [`DFA::to_bytes_big_endian`] - /// * [`DFA::to_bytes_native_endian`] - /// * [`DFA::write_to_little_endian`] - /// * [`DFA::write_to_big_endian`] - /// * [`DFA::write_to_native_endian`] - /// - /// The `to_bytes` methods allocate and return a `Vec` for you, along - /// with handling alignment correctly. The `write_to` methods do not - /// allocate and write to an existing slice (which may be on the stack). - /// Since deserialization always uses the native endianness of the target - /// platform, the serialization API you use should match the endianness of - /// the target platform. (It's often a good idea to generate serialized - /// DFAs for both forms of endianness and then load the correct one based - /// on endianness.) - /// - /// # Errors - /// - /// Generally speaking, it's easier to state the conditions in which an - /// error is _not_ returned. All of the following must be true: - /// - /// * The bytes given must be produced by one of the serialization APIs - /// on this DFA, as mentioned above. - /// * The endianness of the target platform matches the endianness used to - /// serialized the provided DFA. - /// * The slice given must have the same alignment as `u32`. - /// - /// If any of the above are not true, then an error will be returned. - /// - /// # Panics - /// - /// This routine will never panic for any input. - /// - /// # Example - /// - /// This example shows how to serialize a DFA to raw bytes, deserialize it - /// and then use it for searching. - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; - /// - /// let initial = DFA::new("foo[0-9]+")?; - /// let (bytes, _) = initial.to_bytes_native_endian(); - /// let dfa: DFA<&[u32]> = DFA::from_bytes(&bytes)?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: dealing with alignment and padding - /// - /// In the above example, we used the `to_bytes_native_endian` method to - /// serialize a DFA, but we ignored part of its return value corresponding - /// to padding added to the beginning of the serialized DFA. This is OK - /// because deserialization will skip this initial padding. What matters - /// is that the address immediately following the padding has an alignment - /// that matches `u32`. That is, the following is an equivalent but - /// alternative way to write the above example: - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; - /// - /// let initial = DFA::new("foo[0-9]+")?; - /// // Serialization returns the number of leading padding bytes added to - /// // the returned Vec. - /// let (bytes, pad) = initial.to_bytes_native_endian(); - /// let dfa: DFA<&[u32]> = DFA::from_bytes(&bytes[pad..])?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// This padding is necessary because Rust's standard library does - /// not expose any safe and robust way of creating a `Vec` with a - /// guaranteed alignment other than 1. Now, in practice, the underlying - /// allocator is likely to provide a `Vec` that meets our alignment - /// requirements, which means `pad` is zero in practice most of the time. - /// - /// The purpose of exposing the padding like this is flexibility for the - /// caller. For example, if one wants to embed a serialized DFA into a - /// compiled program, then it's important to guarantee that it starts at a - /// `u32`-aligned address. The simplest way to do this is to discard the - /// padding bytes and set it up so that the serialized DFA itself begins at - /// a properly aligned address. We can show this in two parts. The first - /// part is serializing the DFA to a file: - /// - /// ```no_run - /// use regex_automata::dfa::dense::DFA; - /// - /// let dfa = DFA::new("foo[0-9]+")?; - /// - /// let (bytes, pad) = dfa.to_bytes_big_endian(); - /// // Write the contents of the DFA *without* the initial padding. - /// std::fs::write("foo.bigendian.dfa", &bytes[pad..])?; - /// - /// // Do it again, but this time for little endian. - /// let (bytes, pad) = dfa.to_bytes_little_endian(); - /// std::fs::write("foo.littleendian.dfa", &bytes[pad..])?; - /// # Ok::<(), Box>(()) - /// ``` - /// - /// And now the second part is embedding the DFA into the compiled program - /// and deserializing it at runtime on first use. We use conditional - /// compilation to choose the correct endianness. - /// - /// ```no_run - /// use regex_automata::{ - /// dfa::{Automaton, dense::DFA}, - /// util::{lazy::Lazy, wire::AlignAs}, - /// HalfMatch, Input, - /// }; - /// - /// // This crate provides its own "lazy" type, kind of like - /// // lazy_static! or once_cell::sync::Lazy. But it works in no-alloc - /// // no-std environments and let's us write this using completely - /// // safe code. - /// static RE: Lazy> = Lazy::new(|| { - /// # const _: &str = stringify! { - /// // This assignment is made possible (implicitly) via the - /// // CoerceUnsized trait. This is what guarantees that our - /// // bytes are stored in memory on a 4 byte boundary. You - /// // *must* do this or something equivalent for correct - /// // deserialization. - /// static ALIGNED: &AlignAs<[u8], u32> = &AlignAs { - /// _align: [], - /// #[cfg(target_endian = "big")] - /// bytes: *include_bytes!("foo.bigendian.dfa"), - /// #[cfg(target_endian = "little")] - /// bytes: *include_bytes!("foo.littleendian.dfa"), - /// }; - /// # }; - /// # static ALIGNED: &AlignAs<[u8], u32> = &AlignAs { - /// # _align: [], - /// # bytes: [], - /// # }; - /// - /// let (dfa, _) = DFA::from_bytes(&ALIGNED.bytes) - /// .expect("serialized DFA should be valid"); - /// dfa - /// }); - /// - /// let expected = Ok(Some(HalfMatch::must(0, 8))); - /// assert_eq!(expected, RE.try_search_fwd(&Input::new("foo12345"))); - /// ``` - /// - /// An alternative to [`util::lazy::Lazy`](crate::util::lazy::Lazy) - /// is [`lazy_static`](https://crates.io/crates/lazy_static) or - /// [`once_cell`](https://crates.io/crates/once_cell), which provide - /// stronger guarantees (like the initialization function only being - /// executed once). And `once_cell` in particular provides a more - /// expressive API. But a `Lazy` value from this crate is likely just fine - /// in most circumstances. - /// - /// Note that regardless of which initialization method you use, you - /// will still need to use the [`AlignAs`](crate::util::wire::AlignAs) - /// trick above to force correct alignment, but this is safe to do and - /// `from_bytes` will return an error if you get it wrong. - pub fn from_bytes( - slice: &'a [u8], - ) -> Result<(DFA<&'a [u32]>, usize), DeserializeError> { - // SAFETY: This is safe because we validate the transition table, start - // table, match states and accelerators below. If any validation fails, - // then we return an error. - let (dfa, nread) = unsafe { DFA::from_bytes_unchecked(slice)? }; - // Note that validation order is important here: - // - // * `MatchState::validate` can be called with an untrusted DFA. - // * `TransistionTable::validate` uses `dfa.ms` through `match_len`. - // * `StartTable::validate` needs a valid transition table. - // - // So... validate the match states first. - dfa.accels.validate()?; - dfa.ms.validate(&dfa)?; - dfa.tt.validate(&dfa)?; - dfa.st.validate(&dfa)?; - // N.B. dfa.special doesn't have a way to do unchecked deserialization, - // so it has already been validated. - for state in dfa.states() { - // If the state is an accel state, then it must have a non-empty - // accelerator. - if dfa.is_accel_state(state.id()) { - let index = dfa.accelerator_index(state.id()); - if index >= dfa.accels.len() { - return Err(DeserializeError::generic( - "found DFA state with invalid accelerator index", - )); - } - let needles = dfa.accels.needles(index); - if !(1 <= needles.len() && needles.len() <= 3) { - return Err(DeserializeError::generic( - "accelerator needles has invalid length", - )); - } - } - } - Ok((dfa, nread)) - } - - /// Deserialize a DFA with a specific state identifier representation in - /// constant time by omitting the verification of the validity of the - /// transition table and other data inside the DFA. - /// - /// This is just like [`DFA::from_bytes`], except it can potentially return - /// a DFA that exhibits undefined behavior if its transition table contains - /// invalid state identifiers. - /// - /// This routine is useful if you need to deserialize a DFA cheaply - /// and cannot afford the transition table validation performed by - /// `from_bytes`. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch, Input}; - /// - /// let initial = DFA::new("foo[0-9]+")?; - /// let (bytes, _) = initial.to_bytes_native_endian(); - /// // SAFETY: This is guaranteed to be safe since the bytes given come - /// // directly from a compatible serialization routine. - /// let dfa: DFA<&[u32]> = unsafe { DFA::from_bytes_unchecked(&bytes)?.0 }; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - pub unsafe fn from_bytes_unchecked( - slice: &'a [u8], - ) -> Result<(DFA<&'a [u32]>, usize), DeserializeError> { - let mut nr = 0; - - nr += wire::skip_initial_padding(slice); - wire::check_alignment::(&slice[nr..])?; - nr += wire::read_label(&slice[nr..], LABEL)?; - nr += wire::read_endianness_check(&slice[nr..])?; - nr += wire::read_version(&slice[nr..], VERSION)?; - - let _unused = wire::try_read_u32(&slice[nr..], "unused space")?; - nr += size_of::(); - - let (flags, nread) = Flags::from_bytes(&slice[nr..])?; - nr += nread; - - let (tt, nread) = TransitionTable::from_bytes_unchecked(&slice[nr..])?; - nr += nread; - - let (st, nread) = StartTable::from_bytes_unchecked(&slice[nr..])?; - nr += nread; - - let (ms, nread) = MatchStates::from_bytes_unchecked(&slice[nr..])?; - nr += nread; - - let (special, nread) = Special::from_bytes(&slice[nr..])?; - nr += nread; - special.validate_state_len(tt.len(), tt.stride2)?; - - let (accels, nread) = Accels::from_bytes_unchecked(&slice[nr..])?; - nr += nread; - - let (quitset, nread) = ByteSet::from_bytes(&slice[nr..])?; - nr += nread; - - // Prefilters don't support serialization, so they're always absent. - let pre = None; - Ok((DFA { tt, st, ms, special, accels, pre, quitset, flags }, nr)) - } - - /// The implementation of the public `write_to` serialization methods, - /// which is generic over endianness. - /// - /// This is defined only for &[u32] to reduce binary size/compilation time. - fn write_to( - &self, - mut dst: &mut [u8], - ) -> Result { - let nwrite = self.write_to_len(); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small("dense DFA")); - } - dst = &mut dst[..nwrite]; - - let mut nw = 0; - nw += wire::write_label(LABEL, &mut dst[nw..])?; - nw += wire::write_endianness_check::(&mut dst[nw..])?; - nw += wire::write_version::(VERSION, &mut dst[nw..])?; - nw += { - // Currently unused, intended for future flexibility - E::write_u32(0, &mut dst[nw..]); - size_of::() - }; - nw += self.flags.write_to::(&mut dst[nw..])?; - nw += self.tt.write_to::(&mut dst[nw..])?; - nw += self.st.write_to::(&mut dst[nw..])?; - nw += self.ms.write_to::(&mut dst[nw..])?; - nw += self.special.write_to::(&mut dst[nw..])?; - nw += self.accels.write_to::(&mut dst[nw..])?; - nw += self.quitset.write_to::(&mut dst[nw..])?; - Ok(nw) - } -} - -/// Other routines that work for all `T`. -impl DFA { - /// Set or unset the prefilter attached to this DFA. - /// - /// This is useful when one has deserialized a DFA from `&[u8]`. - /// Deserialization does not currently include prefilters, so if you - /// want prefilter acceleration, you'll need to rebuild it and attach - /// it here. - pub fn set_prefilter(&mut self, prefilter: Option) { - self.pre = prefilter - } -} - -// The following methods implement mutable routines on the internal -// representation of a DFA. As such, we must fix the first type parameter to a -// `Vec` since a generic `T: AsRef<[u32]>` does not permit mutation. We -// can get away with this because these methods are internal to the crate and -// are exclusively used during construction of the DFA. -#[cfg(feature = "dfa-build")] -impl OwnedDFA { - /// Add a start state of this DFA. - pub(crate) fn set_start_state( - &mut self, - anchored: Anchored, - start: Start, - id: StateID, - ) { - assert!(self.tt.is_valid(id), "invalid start state"); - self.st.set_start(anchored, start, id); - } - - /// Set the given transition to this DFA. Both the `from` and `to` states - /// must already exist. - pub(crate) fn set_transition( - &mut self, - from: StateID, - byte: alphabet::Unit, - to: StateID, - ) { - self.tt.set(from, byte, to); - } - - /// An empty state (a state where all transitions lead to a dead state) - /// and return its identifier. The identifier returned is guaranteed to - /// not point to any other existing state. - /// - /// If adding a state would exceed `StateID::LIMIT`, then this returns an - /// error. - pub(crate) fn add_empty_state(&mut self) -> Result { - self.tt.add_empty_state() - } - - /// Swap the two states given in the transition table. - /// - /// This routine does not do anything to check the correctness of this - /// swap. Callers must ensure that other states pointing to id1 and id2 are - /// updated appropriately. - pub(crate) fn swap_states(&mut self, id1: StateID, id2: StateID) { - self.tt.swap(id1, id2); - } - - /// Remap all of the state identifiers in this DFA according to the map - /// function given. This includes all transitions and all starting state - /// identifiers. - pub(crate) fn remap(&mut self, map: impl Fn(StateID) -> StateID) { - // We could loop over each state ID and call 'remap_state' here, but - // this is more direct: just map every transition directly. This - // technically might do a little extra work since the alphabet length - // is likely less than the stride, but if that is indeed an issue we - // should benchmark it and fix it. - for sid in self.tt.table_mut().iter_mut() { - *sid = map(*sid); - } - for sid in self.st.table_mut().iter_mut() { - *sid = map(*sid); - } - } - - /// Remap the transitions for the state given according to the function - /// given. This applies the given map function to every transition in the - /// given state and changes the transition in place to the result of the - /// map function for that transition. - pub(crate) fn remap_state( - &mut self, - id: StateID, - map: impl Fn(StateID) -> StateID, - ) { - self.tt.remap(id, map); - } - - /// Truncate the states in this DFA to the given length. - /// - /// This routine does not do anything to check the correctness of this - /// truncation. Callers must ensure that other states pointing to truncated - /// states are updated appropriately. - pub(crate) fn truncate_states(&mut self, len: usize) { - self.tt.truncate(len); - } - - /// Minimize this DFA in place using Hopcroft's algorithm. - pub(crate) fn minimize(&mut self) { - Minimizer::new(self).run(); - } - - /// Updates the match state pattern ID map to use the one provided. - /// - /// This is useful when it's convenient to manipulate matching states - /// (and their corresponding pattern IDs) as a map. In particular, the - /// representation used by a DFA for this map is not amenable to mutation, - /// so if things need to be changed (like when shuffling states), it's - /// often easier to work with the map form. - pub(crate) fn set_pattern_map( - &mut self, - map: &BTreeMap>, - ) -> Result<(), BuildError> { - self.ms = self.ms.new_with_map(map)?; - Ok(()) - } - - /// Find states that have a small number of non-loop transitions and mark - /// them as candidates for acceleration during search. - pub(crate) fn accelerate(&mut self) { - // dead and quit states can never be accelerated. - if self.state_len() <= 2 { - return; - } - - // Go through every state and record their accelerator, if possible. - let mut accels = BTreeMap::new(); - // Count the number of accelerated match, start and non-match/start - // states. - let (mut cmatch, mut cstart, mut cnormal) = (0, 0, 0); - for state in self.states() { - if let Some(accel) = state.accelerate(self.byte_classes()) { - debug!( - "accelerating full DFA state {}: {:?}", - state.id().as_usize(), - accel, - ); - accels.insert(state.id(), accel); - if self.is_match_state(state.id()) { - cmatch += 1; - } else if self.is_start_state(state.id()) { - cstart += 1; - } else { - assert!(!self.is_dead_state(state.id())); - assert!(!self.is_quit_state(state.id())); - cnormal += 1; - } - } - } - // If no states were able to be accelerated, then we're done. - if accels.is_empty() { - return; - } - let original_accels_len = accels.len(); - - // A remapper keeps track of state ID changes. Once we're done - // shuffling, the remapper is used to rewrite all transitions in the - // DFA based on the new positions of states. - let mut remapper = Remapper::new(self); - - // As we swap states, if they are match states, we need to swap their - // pattern ID lists too (for multi-regexes). We do this by converting - // the lists to an easily swappable map, and then convert back to - // MatchStates once we're done. - let mut new_matches = self.ms.to_map(self); - - // There is at least one state that gets accelerated, so these are - // guaranteed to get set to sensible values below. - self.special.min_accel = StateID::MAX; - self.special.max_accel = StateID::ZERO; - let update_special_accel = - |special: &mut Special, accel_id: StateID| { - special.min_accel = cmp::min(special.min_accel, accel_id); - special.max_accel = cmp::max(special.max_accel, accel_id); - }; - - // Start by shuffling match states. Any match states that are - // accelerated get moved to the end of the match state range. - if cmatch > 0 && self.special.matches() { - // N.B. special.{min,max}_match do not need updating, since the - // range/number of match states does not change. Only the ordering - // of match states may change. - let mut next_id = self.special.max_match; - let mut cur_id = next_id; - while cur_id >= self.special.min_match { - if let Some(accel) = accels.remove(&cur_id) { - accels.insert(next_id, accel); - update_special_accel(&mut self.special, next_id); - - // No need to do any actual swapping for equivalent IDs. - if cur_id != next_id { - remapper.swap(self, cur_id, next_id); - - // Swap pattern IDs for match states. - let cur_pids = new_matches.remove(&cur_id).unwrap(); - let next_pids = new_matches.remove(&next_id).unwrap(); - new_matches.insert(cur_id, next_pids); - new_matches.insert(next_id, cur_pids); - } - next_id = self.tt.prev_state_id(next_id); - } - cur_id = self.tt.prev_state_id(cur_id); - } - } - - // This is where it gets tricky. Without acceleration, start states - // normally come right after match states. But we want accelerated - // states to be a single contiguous range (to make it very fast - // to determine whether a state *is* accelerated), while also keeping - // match and starting states as contiguous ranges for the same reason. - // So what we do here is shuffle states such that it looks like this: - // - // DQMMMMAAAAASSSSSSNNNNNNN - // | | - // |---------| - // accelerated states - // - // Where: - // D - dead state - // Q - quit state - // M - match state (may be accelerated) - // A - normal state that is accelerated - // S - start state (may be accelerated) - // N - normal state that is NOT accelerated - // - // We implement this by shuffling states, which is done by a sequence - // of pairwise swaps. We start by looking at all normal states to be - // accelerated. When we find one, we swap it with the earliest starting - // state, and then swap that with the earliest normal state. This - // preserves the contiguous property. - // - // Once we're done looking for accelerated normal states, now we look - // for accelerated starting states by moving them to the beginning - // of the starting state range (just like we moved accelerated match - // states to the end of the matching state range). - // - // For a more detailed/different perspective on this, see the docs - // in dfa/special.rs. - if cnormal > 0 { - // our next available starting and normal states for swapping. - let mut next_start_id = self.special.min_start; - let mut cur_id = self.to_state_id(self.state_len() - 1); - // This is guaranteed to exist since cnormal > 0. - let mut next_norm_id = - self.tt.next_state_id(self.special.max_start); - while cur_id >= next_norm_id { - if let Some(accel) = accels.remove(&cur_id) { - remapper.swap(self, next_start_id, cur_id); - remapper.swap(self, next_norm_id, cur_id); - // Keep our accelerator map updated with new IDs if the - // states we swapped were also accelerated. - if let Some(accel2) = accels.remove(&next_norm_id) { - accels.insert(cur_id, accel2); - } - if let Some(accel2) = accels.remove(&next_start_id) { - accels.insert(next_norm_id, accel2); - } - accels.insert(next_start_id, accel); - update_special_accel(&mut self.special, next_start_id); - // Our start range shifts one to the right now. - self.special.min_start = - self.tt.next_state_id(self.special.min_start); - self.special.max_start = - self.tt.next_state_id(self.special.max_start); - next_start_id = self.tt.next_state_id(next_start_id); - next_norm_id = self.tt.next_state_id(next_norm_id); - } - // This is pretty tricky, but if our 'next_norm_id' state also - // happened to be accelerated, then the result is that it is - // now in the position of cur_id, so we need to consider it - // again. This loop is still guaranteed to terminate though, - // because when accels contains cur_id, we're guaranteed to - // increment next_norm_id even if cur_id remains unchanged. - if !accels.contains_key(&cur_id) { - cur_id = self.tt.prev_state_id(cur_id); - } - } - } - // Just like we did for match states, but we want to move accelerated - // start states to the beginning of the range instead of the end. - if cstart > 0 { - // N.B. special.{min,max}_start do not need updating, since the - // range/number of start states does not change at this point. Only - // the ordering of start states may change. - let mut next_id = self.special.min_start; - let mut cur_id = next_id; - while cur_id <= self.special.max_start { - if let Some(accel) = accels.remove(&cur_id) { - remapper.swap(self, cur_id, next_id); - accels.insert(next_id, accel); - update_special_accel(&mut self.special, next_id); - next_id = self.tt.next_state_id(next_id); - } - cur_id = self.tt.next_state_id(cur_id); - } - } - - // Remap all transitions in our DFA and assert some things. - remapper.remap(self); - // This unwrap is OK because acceleration never changes the number of - // match states or patterns in those match states. Since acceleration - // runs after the pattern map has been set at least once, we know that - // our match states cannot error. - self.set_pattern_map(&new_matches).unwrap(); - self.special.set_max(); - self.special.validate().expect("special state ranges should validate"); - self.special - .validate_state_len(self.state_len(), self.stride2()) - .expect( - "special state ranges should be consistent with state length", - ); - assert_eq!( - self.special.accel_len(self.stride()), - // We record the number of accelerated states initially detected - // since the accels map is itself mutated in the process above. - // If mutated incorrectly, its size may change, and thus can't be - // trusted as a source of truth of how many accelerated states we - // expected there to be. - original_accels_len, - "mismatch with expected number of accelerated states", - ); - - // And finally record our accelerators. We kept our accels map updated - // as we shuffled states above, so the accelerators should now - // correspond to a contiguous range in the state ID space. (Which we - // assert.) - let mut prev: Option = None; - for (id, accel) in accels { - assert!(prev.map_or(true, |p| self.tt.next_state_id(p) == id)); - prev = Some(id); - self.accels.add(accel); - } - } - - /// Shuffle the states in this DFA so that starting states, match - /// states and accelerated states are all contiguous. - /// - /// See dfa/special.rs for more details. - pub(crate) fn shuffle( - &mut self, - mut matches: BTreeMap>, - ) -> Result<(), BuildError> { - // The determinizer always adds a quit state and it is always second. - self.special.quit_id = self.to_state_id(1); - // If all we have are the dead and quit states, then we're done and - // the DFA will never produce a match. - if self.state_len() <= 2 { - self.special.set_max(); - return Ok(()); - } - - // Collect all our non-DEAD start states into a convenient set and - // confirm there is no overlap with match states. In the classical DFA - // construction, start states can be match states. But because of - // look-around, we delay all matches by a byte, which prevents start - // states from being match states. - let mut is_start: BTreeSet = BTreeSet::new(); - for (start_id, _, _) in self.starts() { - // If a starting configuration points to a DEAD state, then we - // don't want to shuffle it. The DEAD state is always the first - // state with ID=0. So we can just leave it be. - if start_id == DEAD { - continue; - } - assert!( - !matches.contains_key(&start_id), - "{start_id:?} is both a start and a match state, \ - which is not allowed", - ); - is_start.insert(start_id); - } - - // We implement shuffling by a sequence of pairwise swaps of states. - // Since we have a number of things referencing states via their - // IDs and swapping them changes their IDs, we need to record every - // swap we make so that we can remap IDs. The remapper handles this - // book-keeping for us. - let mut remapper = Remapper::new(self); - - // Shuffle matching states. - if matches.is_empty() { - self.special.min_match = DEAD; - self.special.max_match = DEAD; - } else { - // The determinizer guarantees that the first two states are the - // dead and quit states, respectively. We want our match states to - // come right after quit. - let mut next_id = self.to_state_id(2); - let mut new_matches = BTreeMap::new(); - self.special.min_match = next_id; - for (id, pids) in matches { - remapper.swap(self, next_id, id); - new_matches.insert(next_id, pids); - // If we swapped a start state, then update our set. - if is_start.contains(&next_id) { - is_start.remove(&next_id); - is_start.insert(id); - } - next_id = self.tt.next_state_id(next_id); - } - matches = new_matches; - self.special.max_match = cmp::max( - self.special.min_match, - self.tt.prev_state_id(next_id), - ); - } - - // Shuffle starting states. - { - let mut next_id = self.to_state_id(2); - if self.special.matches() { - next_id = self.tt.next_state_id(self.special.max_match); - } - self.special.min_start = next_id; - for id in is_start { - remapper.swap(self, next_id, id); - next_id = self.tt.next_state_id(next_id); - } - self.special.max_start = cmp::max( - self.special.min_start, - self.tt.prev_state_id(next_id), - ); - } - - // Finally remap all transitions in our DFA. - remapper.remap(self); - self.set_pattern_map(&matches)?; - self.special.set_max(); - self.special.validate().expect("special state ranges should validate"); - self.special - .validate_state_len(self.state_len(), self.stride2()) - .expect( - "special state ranges should be consistent with state length", - ); - Ok(()) - } - - /// Checks whether there are universal start states (both anchored and - /// unanchored), and if so, sets the relevant fields to the start state - /// IDs. - /// - /// Universal start states occur precisely when the all patterns in the - /// DFA have no look-around assertions in their prefix. - fn set_universal_starts(&mut self) { - assert_eq!(6, Start::len(), "expected 6 start configurations"); - - let start_id = |dfa: &mut OwnedDFA, - anchored: Anchored, - start: Start| { - // This OK because we only call 'start' under conditions - // in which we know it will succeed. - dfa.st.start(anchored, start).expect("valid Input configuration") - }; - if self.start_kind().has_unanchored() { - let anchor = Anchored::No; - let sid = start_id(self, anchor, Start::NonWordByte); - if sid == start_id(self, anchor, Start::WordByte) - && sid == start_id(self, anchor, Start::Text) - && sid == start_id(self, anchor, Start::LineLF) - && sid == start_id(self, anchor, Start::LineCR) - && sid == start_id(self, anchor, Start::CustomLineTerminator) - { - self.st.universal_start_unanchored = Some(sid); - } - } - if self.start_kind().has_anchored() { - let anchor = Anchored::Yes; - let sid = start_id(self, anchor, Start::NonWordByte); - if sid == start_id(self, anchor, Start::WordByte) - && sid == start_id(self, anchor, Start::Text) - && sid == start_id(self, anchor, Start::LineLF) - && sid == start_id(self, anchor, Start::LineCR) - && sid == start_id(self, anchor, Start::CustomLineTerminator) - { - self.st.universal_start_anchored = Some(sid); - } - } - } -} - -// A variety of generic internal methods for accessing DFA internals. -impl> DFA { - /// Return the info about special states. - pub(crate) fn special(&self) -> &Special { - &self.special - } - - /// Return the info about special states as a mutable borrow. - #[cfg(feature = "dfa-build")] - pub(crate) fn special_mut(&mut self) -> &mut Special { - &mut self.special - } - - /// Returns the quit set (may be empty) used by this DFA. - pub(crate) fn quitset(&self) -> &ByteSet { - &self.quitset - } - - /// Returns the flags for this DFA. - pub(crate) fn flags(&self) -> &Flags { - &self.flags - } - - /// Returns an iterator over all states in this DFA. - /// - /// This iterator yields a tuple for each state. The first element of the - /// tuple corresponds to a state's identifier, and the second element - /// corresponds to the state itself (comprised of its transitions). - pub(crate) fn states(&self) -> StateIter<'_, T> { - self.tt.states() - } - - /// Return the total number of states in this DFA. Every DFA has at least - /// 1 state, even the empty DFA. - pub(crate) fn state_len(&self) -> usize { - self.tt.len() - } - - /// Return an iterator over all pattern IDs for the given match state. - /// - /// If the given state is not a match state, then this panics. - #[cfg(feature = "dfa-build")] - pub(crate) fn pattern_id_slice(&self, id: StateID) -> &[PatternID] { - assert!(self.is_match_state(id)); - self.ms.pattern_id_slice(self.match_state_index(id)) - } - - /// Return the total number of pattern IDs for the given match state. - /// - /// If the given state is not a match state, then this panics. - pub(crate) fn match_pattern_len(&self, id: StateID) -> usize { - assert!(self.is_match_state(id)); - self.ms.pattern_len(self.match_state_index(id)) - } - - /// Returns the total number of patterns matched by this DFA. - pub(crate) fn pattern_len(&self) -> usize { - self.ms.pattern_len - } - - /// Returns a map from match state ID to a list of pattern IDs that match - /// in that state. - #[cfg(feature = "dfa-build")] - pub(crate) fn pattern_map(&self) -> BTreeMap> { - self.ms.to_map(self) - } - - /// Returns the ID of the quit state for this DFA. - #[cfg(feature = "dfa-build")] - pub(crate) fn quit_id(&self) -> StateID { - self.to_state_id(1) - } - - /// Convert the given state identifier to the state's index. The state's - /// index corresponds to the position in which it appears in the transition - /// table. When a DFA is NOT premultiplied, then a state's identifier is - /// also its index. When a DFA is premultiplied, then a state's identifier - /// is equal to `index * alphabet_len`. This routine reverses that. - pub(crate) fn to_index(&self, id: StateID) -> usize { - self.tt.to_index(id) - } - - /// Convert an index to a state (in the range 0..self.state_len()) to an - /// actual state identifier. - /// - /// This is useful when using a `Vec` as an efficient map keyed by state - /// to some other information (such as a remapped state ID). - #[cfg(feature = "dfa-build")] - pub(crate) fn to_state_id(&self, index: usize) -> StateID { - self.tt.to_state_id(index) - } - - /// Return the table of state IDs for this DFA's start states. - pub(crate) fn starts(&self) -> StartStateIter<'_> { - self.st.iter() - } - - /// Returns the index of the match state for the given ID. If the - /// given ID does not correspond to a match state, then this may - /// panic or produce an incorrect result. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn match_state_index(&self, id: StateID) -> usize { - debug_assert!(self.is_match_state(id)); - // This is one of the places where we rely on the fact that match - // states are contiguous in the transition table. Namely, that the - // first match state ID always corresponds to dfa.special.min_match. - // From there, since we know the stride, we can compute the overall - // index of any match state given the match state's ID. - let min = self.special().min_match.as_usize(); - // CORRECTNESS: We're allowed to produce an incorrect result or panic, - // so both the subtraction and the unchecked StateID construction is - // OK. - self.to_index(StateID::new_unchecked(id.as_usize() - min)) - } - - /// Returns the index of the accelerator state for the given ID. If the - /// given ID does not correspond to an accelerator state, then this may - /// panic or produce an incorrect result. - fn accelerator_index(&self, id: StateID) -> usize { - let min = self.special().min_accel.as_usize(); - // CORRECTNESS: We're allowed to produce an incorrect result or panic, - // so both the subtraction and the unchecked StateID construction is - // OK. - self.to_index(StateID::new_unchecked(id.as_usize() - min)) - } - - /// Return the accelerators for this DFA. - fn accels(&self) -> Accels<&[u32]> { - self.accels.as_ref() - } - - /// Return this DFA's transition table as a slice. - fn trans(&self) -> &[StateID] { - self.tt.table() - } -} - -impl> fmt::Debug for DFA { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!(f, "dense::DFA(")?; - for state in self.states() { - fmt_state_indicator(f, self, state.id())?; - let id = if f.alternate() { - state.id().as_usize() - } else { - self.to_index(state.id()) - }; - write!(f, "{id:06?}: ")?; - state.fmt(f)?; - write!(f, "\n")?; - } - writeln!(f, "")?; - for (i, (start_id, anchored, sty)) in self.starts().enumerate() { - let id = if f.alternate() { - start_id.as_usize() - } else { - self.to_index(start_id) - }; - if i % self.st.stride == 0 { - match anchored { - Anchored::No => writeln!(f, "START-GROUP(unanchored)")?, - Anchored::Yes => writeln!(f, "START-GROUP(anchored)")?, - Anchored::Pattern(pid) => { - writeln!(f, "START_GROUP(pattern: {pid:?})")? - } - } - } - writeln!(f, " {sty:?} => {id:06?}")?; - } - if self.pattern_len() > 1 { - writeln!(f, "")?; - for i in 0..self.ms.len() { - let id = self.ms.match_state_id(self, i); - let id = if f.alternate() { - id.as_usize() - } else { - self.to_index(id) - }; - write!(f, "MATCH({id:06?}): ")?; - for (i, &pid) in self.ms.pattern_id_slice(i).iter().enumerate() - { - if i > 0 { - write!(f, ", ")?; - } - write!(f, "{pid:?}")?; - } - writeln!(f, "")?; - } - } - writeln!(f, "state length: {:?}", self.state_len())?; - writeln!(f, "pattern length: {:?}", self.pattern_len())?; - writeln!(f, "flags: {:?}", self.flags)?; - writeln!(f, ")")?; - Ok(()) - } -} - -// SAFETY: We assert that our implementation of each method is correct. -unsafe impl> Automaton for DFA { - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_special_state(&self, id: StateID) -> bool { - self.special.is_special_state(id) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_dead_state(&self, id: StateID) -> bool { - self.special.is_dead_state(id) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_quit_state(&self, id: StateID) -> bool { - self.special.is_quit_state(id) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_match_state(&self, id: StateID) -> bool { - self.special.is_match_state(id) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_start_state(&self, id: StateID) -> bool { - self.special.is_start_state(id) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_accel_state(&self, id: StateID) -> bool { - self.special.is_accel_state(id) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn next_state(&self, current: StateID, input: u8) -> StateID { - let input = self.byte_classes().get(input); - let o = current.as_usize() + usize::from(input); - self.trans()[o] - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - unsafe fn next_state_unchecked( - &self, - current: StateID, - byte: u8, - ) -> StateID { - // We don't (or shouldn't) need an unchecked variant for the byte - // class mapping, since bound checks should be omitted automatically - // by virtue of its representation. If this ends up not being true as - // confirmed by codegen, please file an issue. ---AG - let class = self.byte_classes().get(byte); - let o = current.as_usize() + usize::from(class); - let next = *self.trans().get_unchecked(o); - next - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn next_eoi_state(&self, current: StateID) -> StateID { - let eoi = self.byte_classes().eoi().as_usize(); - let o = current.as_usize() + eoi; - self.trans()[o] - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn pattern_len(&self) -> usize { - self.ms.pattern_len - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn match_len(&self, id: StateID) -> usize { - self.match_pattern_len(id) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn match_pattern(&self, id: StateID, match_index: usize) -> PatternID { - // This is an optimization for the very common case of a DFA with a - // single pattern. This conditional avoids a somewhat more costly path - // that finds the pattern ID from the state machine, which requires - // a bit of slicing/pointer-chasing. This optimization tends to only - // matter when matches are frequent. - if self.ms.pattern_len == 1 { - return PatternID::ZERO; - } - let state_index = self.match_state_index(id); - self.ms.pattern_id(state_index, match_index) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn has_empty(&self) -> bool { - self.flags.has_empty - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_utf8(&self) -> bool { - self.flags.is_utf8 - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_always_start_anchored(&self) -> bool { - self.flags.is_always_start_anchored - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn start_state( - &self, - config: &start::Config, - ) -> Result { - let anchored = config.get_anchored(); - let start = match config.get_look_behind() { - None => Start::Text, - Some(byte) => { - if !self.quitset.is_empty() && self.quitset.contains(byte) { - return Err(StartError::quit(byte)); - } - self.st.start_map.get(byte) - } - }; - self.st.start(anchored, start) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn universal_start_state(&self, mode: Anchored) -> Option { - match mode { - Anchored::No => self.st.universal_start_unanchored, - Anchored::Yes => self.st.universal_start_anchored, - Anchored::Pattern(_) => None, - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn accelerator(&self, id: StateID) -> &[u8] { - if !self.is_accel_state(id) { - return &[]; - } - self.accels.needles(self.accelerator_index(id)) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn get_prefilter(&self) -> Option<&Prefilter> { - self.pre.as_ref() - } -} - -/// The transition table portion of a dense DFA. -/// -/// The transition table is the core part of the DFA in that it describes how -/// to move from one state to another based on the input sequence observed. -#[derive(Clone)] -pub(crate) struct TransitionTable { - /// A contiguous region of memory representing the transition table in - /// row-major order. The representation is dense. That is, every state - /// has precisely the same number of transitions. The maximum number of - /// transitions per state is 257 (256 for each possible byte value, plus 1 - /// for the special EOI transition). If a DFA has been instructed to use - /// byte classes (the default), then the number of transitions is usually - /// substantially fewer. - /// - /// In practice, T is either `Vec` or `&[u32]`. - table: T, - /// A set of equivalence classes, where a single equivalence class - /// represents a set of bytes that never discriminate between a match - /// and a non-match in the DFA. Each equivalence class corresponds to a - /// single character in this DFA's alphabet, where the maximum number of - /// characters is 257 (each possible value of a byte plus the special - /// EOI transition). Consequently, the number of equivalence classes - /// corresponds to the number of transitions for each DFA state. Note - /// though that the *space* used by each DFA state in the transition table - /// may be larger. The total space used by each DFA state is known as the - /// stride. - /// - /// The only time the number of equivalence classes is fewer than 257 is if - /// the DFA's kind uses byte classes (which is the default). Equivalence - /// classes should generally only be disabled when debugging, so that - /// the transitions themselves aren't obscured. Disabling them has no - /// other benefit, since the equivalence class map is always used while - /// searching. In the vast majority of cases, the number of equivalence - /// classes is substantially smaller than 257, particularly when large - /// Unicode classes aren't used. - classes: ByteClasses, - /// The stride of each DFA state, expressed as a power-of-two exponent. - /// - /// The stride of a DFA corresponds to the total amount of space used by - /// each DFA state in the transition table. This may be bigger than the - /// size of a DFA's alphabet, since the stride is always the smallest - /// power of two greater than or equal to the alphabet size. - /// - /// While this wastes space, this avoids the need for integer division - /// to convert between premultiplied state IDs and their corresponding - /// indices. Instead, we can use simple bit-shifts. - /// - /// See the docs for the `stride2` method for more details. - /// - /// The minimum `stride2` value is `1` (corresponding to a stride of `2`) - /// while the maximum `stride2` value is `9` (corresponding to a stride of - /// `512`). The maximum is not `8` since the maximum alphabet size is `257` - /// when accounting for the special EOI transition. However, an alphabet - /// length of that size is exceptionally rare since the alphabet is shrunk - /// into equivalence classes. - stride2: usize, -} - -impl<'a> TransitionTable<&'a [u32]> { - /// Deserialize a transition table starting at the beginning of `slice`. - /// Upon success, return the total number of bytes read along with the - /// transition table. - /// - /// If there was a problem deserializing any part of the transition table, - /// then this returns an error. Notably, if the given slice does not have - /// the same alignment as `StateID`, then this will return an error (among - /// other possible errors). - /// - /// This is guaranteed to execute in constant time. - /// - /// # Safety - /// - /// This routine is not safe because it does not check the validity of the - /// transition table itself. In particular, the transition table can be - /// quite large, so checking its validity can be somewhat expensive. An - /// invalid transition table is not safe because other code may rely on the - /// transition table being correct (such as explicit bounds check elision). - /// Therefore, an invalid transition table can lead to undefined behavior. - /// - /// Callers that use this function must either pass on the safety invariant - /// or guarantee that the bytes given contain a valid transition table. - /// This guarantee is upheld by the bytes written by `write_to`. - unsafe fn from_bytes_unchecked( - mut slice: &'a [u8], - ) -> Result<(TransitionTable<&'a [u32]>, usize), DeserializeError> { - let slice_start = slice.as_ptr().as_usize(); - - let (state_len, nr) = - wire::try_read_u32_as_usize(slice, "state length")?; - slice = &slice[nr..]; - - let (stride2, nr) = wire::try_read_u32_as_usize(slice, "stride2")?; - slice = &slice[nr..]; - - let (classes, nr) = ByteClasses::from_bytes(slice)?; - slice = &slice[nr..]; - - // The alphabet length (determined by the byte class map) cannot be - // bigger than the stride (total space used by each DFA state). - if stride2 > 9 { - return Err(DeserializeError::generic( - "dense DFA has invalid stride2 (too big)", - )); - } - // It also cannot be zero, since even a DFA that never matches anything - // has a non-zero number of states with at least two equivalence - // classes: one for all 256 byte values and another for the EOI - // sentinel. - if stride2 < 1 { - return Err(DeserializeError::generic( - "dense DFA has invalid stride2 (too small)", - )); - } - // This is OK since 1 <= stride2 <= 9. - let stride = - 1usize.checked_shl(u32::try_from(stride2).unwrap()).unwrap(); - if classes.alphabet_len() > stride { - return Err(DeserializeError::generic( - "alphabet size cannot be bigger than transition table stride", - )); - } - - let trans_len = - wire::shl(state_len, stride2, "dense table transition length")?; - let table_bytes_len = wire::mul( - trans_len, - StateID::SIZE, - "dense table state byte length", - )?; - wire::check_slice_len(slice, table_bytes_len, "transition table")?; - wire::check_alignment::(slice)?; - let table_bytes = &slice[..table_bytes_len]; - slice = &slice[table_bytes_len..]; - // SAFETY: Since StateID is always representable as a u32, all we need - // to do is ensure that we have the proper length and alignment. We've - // checked both above, so the cast below is safe. - // - // N.B. This is the only not-safe code in this function. - let table = core::slice::from_raw_parts( - table_bytes.as_ptr().cast::(), - trans_len, - ); - let tt = TransitionTable { table, classes, stride2 }; - Ok((tt, slice.as_ptr().as_usize() - slice_start)) - } -} - -#[cfg(feature = "dfa-build")] -impl TransitionTable> { - /// Create a minimal transition table with just two states: a dead state - /// and a quit state. The alphabet length and stride of the transition - /// table is determined by the given set of equivalence classes. - fn minimal(classes: ByteClasses) -> TransitionTable> { - let mut tt = TransitionTable { - table: vec![], - classes, - stride2: classes.stride2(), - }; - // Two states, regardless of alphabet size, can always fit into u32. - tt.add_empty_state().unwrap(); // dead state - tt.add_empty_state().unwrap(); // quit state - tt - } - - /// Set a transition in this table. Both the `from` and `to` states must - /// already exist, otherwise this panics. `unit` should correspond to the - /// transition out of `from` to set to `to`. - fn set(&mut self, from: StateID, unit: alphabet::Unit, to: StateID) { - assert!(self.is_valid(from), "invalid 'from' state"); - assert!(self.is_valid(to), "invalid 'to' state"); - self.table[from.as_usize() + self.classes.get_by_unit(unit)] = - to.as_u32(); - } - - /// Add an empty state (a state where all transitions lead to a dead state) - /// and return its identifier. The identifier returned is guaranteed to - /// not point to any other existing state. - /// - /// If adding a state would exhaust the state identifier space, then this - /// returns an error. - fn add_empty_state(&mut self) -> Result { - // Normally, to get a fresh state identifier, we would just - // take the index of the next state added to the transition - // table. However, we actually perform an optimization here - // that pre-multiplies state IDs by the stride, such that they - // point immediately at the beginning of their transitions in - // the transition table. This avoids an extra multiplication - // instruction for state lookup at search time. - // - // Premultiplied identifiers means that instead of your matching - // loop looking something like this: - // - // state = dfa.start - // for byte in haystack: - // next = dfa.transitions[state * stride + byte] - // if dfa.is_match(next): - // return true - // return false - // - // it can instead look like this: - // - // state = dfa.start - // for byte in haystack: - // next = dfa.transitions[state + byte] - // if dfa.is_match(next): - // return true - // return false - // - // In other words, we save a multiplication instruction in the - // critical path. This turns out to be a decent performance win. - // The cost of using premultiplied state ids is that they can - // require a bigger state id representation. (And they also make - // the code a bit more complex, especially during minimization and - // when reshuffling states, as one needs to convert back and forth - // between state IDs and state indices.) - // - // To do this, we simply take the index of the state into the - // entire transition table, rather than the index of the state - // itself. e.g., If the stride is 64, then the ID of the 3rd state - // is 192, not 2. - let next = self.table.len(); - let id = - StateID::new(next).map_err(|_| BuildError::too_many_states())?; - self.table.extend(iter::repeat(0).take(self.stride())); - Ok(id) - } - - /// Swap the two states given in this transition table. - /// - /// This routine does not do anything to check the correctness of this - /// swap. Callers must ensure that other states pointing to id1 and id2 are - /// updated appropriately. - /// - /// Both id1 and id2 must point to valid states, otherwise this panics. - fn swap(&mut self, id1: StateID, id2: StateID) { - assert!(self.is_valid(id1), "invalid 'id1' state: {id1:?}"); - assert!(self.is_valid(id2), "invalid 'id2' state: {id2:?}"); - // We only need to swap the parts of the state that are used. So if the - // stride is 64, but the alphabet length is only 33, then we save a lot - // of work. - for b in 0..self.classes.alphabet_len() { - self.table.swap(id1.as_usize() + b, id2.as_usize() + b); - } - } - - /// Remap the transitions for the state given according to the function - /// given. This applies the given map function to every transition in the - /// given state and changes the transition in place to the result of the - /// map function for that transition. - fn remap(&mut self, id: StateID, map: impl Fn(StateID) -> StateID) { - for byte in 0..self.alphabet_len() { - let i = id.as_usize() + byte; - let next = self.table()[i]; - self.table_mut()[id.as_usize() + byte] = map(next); - } - } - - /// Truncate the states in this transition table to the given length. - /// - /// This routine does not do anything to check the correctness of this - /// truncation. Callers must ensure that other states pointing to truncated - /// states are updated appropriately. - fn truncate(&mut self, len: usize) { - self.table.truncate(len << self.stride2); - } -} - -impl> TransitionTable { - /// Writes a serialized form of this transition table to the buffer given. - /// If the buffer is too small, then an error is returned. To determine - /// how big the buffer must be, use `write_to_len`. - fn write_to( - &self, - mut dst: &mut [u8], - ) -> Result { - let nwrite = self.write_to_len(); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small("transition table")); - } - dst = &mut dst[..nwrite]; - - // write state length - // Unwrap is OK since number of states is guaranteed to fit in a u32. - E::write_u32(u32::try_from(self.len()).unwrap(), dst); - dst = &mut dst[size_of::()..]; - - // write state stride (as power of 2) - // Unwrap is OK since stride2 is guaranteed to be <= 9. - E::write_u32(u32::try_from(self.stride2).unwrap(), dst); - dst = &mut dst[size_of::()..]; - - // write byte class map - let n = self.classes.write_to(dst)?; - dst = &mut dst[n..]; - - // write actual transitions - for &sid in self.table() { - let n = wire::write_state_id::(sid, &mut dst); - dst = &mut dst[n..]; - } - Ok(nwrite) - } - - /// Returns the number of bytes the serialized form of this transition - /// table will use. - fn write_to_len(&self) -> usize { - size_of::() // state length - + size_of::() // stride2 - + self.classes.write_to_len() - + (self.table().len() * StateID::SIZE) - } - - /// Validates that every state ID in this transition table is valid. - /// - /// That is, every state ID can be used to correctly index a state in this - /// table. - fn validate(&self, dfa: &DFA) -> Result<(), DeserializeError> { - let sp = &dfa.special; - for state in self.states() { - // We check that the ID itself is well formed. That is, if it's - // a special state then it must actually be a quit, dead, accel, - // match or start state. - if sp.is_special_state(state.id()) { - let is_actually_special = sp.is_dead_state(state.id()) - || sp.is_quit_state(state.id()) - || sp.is_match_state(state.id()) - || sp.is_start_state(state.id()) - || sp.is_accel_state(state.id()); - if !is_actually_special { - // This is kind of a cryptic error message... - return Err(DeserializeError::generic( - "found dense state tagged as special but \ - wasn't actually special", - )); - } - if sp.is_match_state(state.id()) - && dfa.match_len(state.id()) == 0 - { - return Err(DeserializeError::generic( - "found match state with zero pattern IDs", - )); - } - } - for (_, to) in state.transitions() { - if !self.is_valid(to) { - return Err(DeserializeError::generic( - "found invalid state ID in transition table", - )); - } - } - } - Ok(()) - } - - /// Converts this transition table to a borrowed value. - fn as_ref(&self) -> TransitionTable<&'_ [u32]> { - TransitionTable { - table: self.table.as_ref(), - classes: self.classes.clone(), - stride2: self.stride2, - } - } - - /// Converts this transition table to an owned value. - #[cfg(feature = "alloc")] - fn to_owned(&self) -> TransitionTable> { - TransitionTable { - table: self.table.as_ref().to_vec(), - classes: self.classes.clone(), - stride2: self.stride2, - } - } - - /// Return the state for the given ID. If the given ID is not valid, then - /// this panics. - fn state(&self, id: StateID) -> State<'_> { - assert!(self.is_valid(id)); - - let i = id.as_usize(); - State { - id, - stride2: self.stride2, - transitions: &self.table()[i..i + self.alphabet_len()], - } - } - - /// Returns an iterator over all states in this transition table. - /// - /// This iterator yields a tuple for each state. The first element of the - /// tuple corresponds to a state's identifier, and the second element - /// corresponds to the state itself (comprised of its transitions). - fn states(&self) -> StateIter<'_, T> { - StateIter { - tt: self, - it: self.table().chunks(self.stride()).enumerate(), - } - } - - /// Convert a state identifier to an index to a state (in the range - /// 0..self.len()). - /// - /// This is useful when using a `Vec` as an efficient map keyed by state - /// to some other information (such as a remapped state ID). - /// - /// If the given ID is not valid, then this may panic or produce an - /// incorrect index. - fn to_index(&self, id: StateID) -> usize { - id.as_usize() >> self.stride2 - } - - /// Convert an index to a state (in the range 0..self.len()) to an actual - /// state identifier. - /// - /// This is useful when using a `Vec` as an efficient map keyed by state - /// to some other information (such as a remapped state ID). - /// - /// If the given index is not in the specified range, then this may panic - /// or produce an incorrect state ID. - fn to_state_id(&self, index: usize) -> StateID { - // CORRECTNESS: If the given index is not valid, then it is not - // required for this to panic or return a valid state ID. - StateID::new_unchecked(index << self.stride2) - } - - /// Returns the state ID for the state immediately following the one given. - /// - /// This does not check whether the state ID returned is invalid. In fact, - /// if the state ID given is the last state in this DFA, then the state ID - /// returned is guaranteed to be invalid. - #[cfg(feature = "dfa-build")] - fn next_state_id(&self, id: StateID) -> StateID { - self.to_state_id(self.to_index(id).checked_add(1).unwrap()) - } - - /// Returns the state ID for the state immediately preceding the one given. - /// - /// If the dead ID given (which is zero), then this panics. - #[cfg(feature = "dfa-build")] - fn prev_state_id(&self, id: StateID) -> StateID { - self.to_state_id(self.to_index(id).checked_sub(1).unwrap()) - } - - /// Returns the table as a slice of state IDs. - fn table(&self) -> &[StateID] { - wire::u32s_to_state_ids(self.table.as_ref()) - } - - /// Returns the total number of states in this transition table. - /// - /// Note that a DFA always has at least two states: the dead and quit - /// states. In particular, the dead state always has ID 0 and is - /// correspondingly always the first state. The dead state is never a match - /// state. - fn len(&self) -> usize { - self.table().len() >> self.stride2 - } - - /// Returns the total stride for every state in this DFA. This corresponds - /// to the total number of transitions used by each state in this DFA's - /// transition table. - fn stride(&self) -> usize { - 1 << self.stride2 - } - - /// Returns the total number of elements in the alphabet for this - /// transition table. This is always less than or equal to `self.stride()`. - /// It is only equal when the alphabet length is a power of 2. Otherwise, - /// it is always strictly less. - fn alphabet_len(&self) -> usize { - self.classes.alphabet_len() - } - - /// Returns true if and only if the given state ID is valid for this - /// transition table. Validity in this context means that the given ID can - /// be used as a valid offset with `self.stride()` to index this transition - /// table. - fn is_valid(&self, id: StateID) -> bool { - let id = id.as_usize(); - id < self.table().len() && id % self.stride() == 0 - } - - /// Return the memory usage, in bytes, of this transition table. - /// - /// This does not include the size of a `TransitionTable` value itself. - fn memory_usage(&self) -> usize { - self.table().len() * StateID::SIZE - } -} - -#[cfg(feature = "dfa-build")] -impl> TransitionTable { - /// Returns the table as a slice of state IDs. - fn table_mut(&mut self) -> &mut [StateID] { - wire::u32s_to_state_ids_mut(self.table.as_mut()) - } -} - -/// The set of all possible starting states in a DFA. -/// -/// The set of starting states corresponds to the possible choices one can make -/// in terms of starting a DFA. That is, before following the first transition, -/// you first need to select the state that you start in. -/// -/// Normally, a DFA converted from an NFA that has a single starting state -/// would itself just have one starting state. However, our support for look -/// around generally requires more starting states. The correct starting state -/// is chosen based on certain properties of the position at which we begin -/// our search. -/// -/// Before listing those properties, we first must define two terms: -/// -/// * `haystack` - The bytes to execute the search. The search always starts -/// at the beginning of `haystack` and ends before or at the end of -/// `haystack`. -/// * `context` - The (possibly empty) bytes surrounding `haystack`. `haystack` -/// must be contained within `context` such that `context` is at least as big -/// as `haystack`. -/// -/// This split is crucial for dealing with look-around. For example, consider -/// the context `foobarbaz`, the haystack `bar` and the regex `^bar$`. This -/// regex should _not_ match the haystack since `bar` does not appear at the -/// beginning of the input. Similarly, the regex `\Bbar\B` should match the -/// haystack because `bar` is not surrounded by word boundaries. But a search -/// that does not take context into account would not permit `\B` to match -/// since the beginning of any string matches a word boundary. Similarly, a -/// search that does not take context into account when searching `^bar$` in -/// the haystack `bar` would produce a match when it shouldn't. -/// -/// Thus, it follows that the starting state is chosen based on the following -/// criteria, derived from the position at which the search starts in the -/// `context` (corresponding to the start of `haystack`): -/// -/// 1. If the search starts at the beginning of `context`, then the `Text` -/// start state is used. (Since `^` corresponds to -/// `hir::Anchor::Start`.) -/// 2. If the search starts at a position immediately following a line -/// terminator, then the `Line` start state is used. (Since `(?m:^)` -/// corresponds to `hir::Anchor::StartLF`.) -/// 3. If the search starts at a position immediately following a byte -/// classified as a "word" character (`[_0-9a-zA-Z]`), then the `WordByte` -/// start state is used. (Since `(?-u:\b)` corresponds to a word boundary.) -/// 4. Otherwise, if the search starts at a position immediately following -/// a byte that is not classified as a "word" character (`[^_0-9a-zA-Z]`), -/// then the `NonWordByte` start state is used. (Since `(?-u:\B)` -/// corresponds to a not-word-boundary.) -/// -/// (N.B. Unicode word boundaries are not supported by the DFA because they -/// require multi-byte look-around and this is difficult to support in a DFA.) -/// -/// To further complicate things, we also support constructing individual -/// anchored start states for each pattern in the DFA. (Which is required to -/// implement overlapping regexes correctly, but is also generally useful.) -/// Thus, when individual start states for each pattern are enabled, then the -/// total number of start states represented is `4 + (4 * #patterns)`, where -/// the 4 comes from each of the 4 possibilities above. The first 4 represents -/// the starting states for the entire DFA, which support searching for -/// multiple patterns simultaneously (possibly unanchored). -/// -/// If individual start states are disabled, then this will only store 4 -/// start states. Typically, individual start states are only enabled when -/// constructing the reverse DFA for regex matching. But they are also useful -/// for building DFAs that can search for a specific pattern or even to support -/// both anchored and unanchored searches with the same DFA. -/// -/// Note though that while the start table always has either `4` or -/// `4 + (4 * #patterns)` starting state *ids*, the total number of states -/// might be considerably smaller. That is, many of the IDs may be duplicative. -/// (For example, if a regex doesn't have a `\b` sub-pattern, then there's no -/// reason to generate a unique starting state for handling word boundaries. -/// Similarly for start/end anchors.) -#[derive(Clone)] -pub(crate) struct StartTable { - /// The initial start state IDs. - /// - /// In practice, T is either `Vec` or `&[u32]`. - /// - /// The first `2 * stride` (currently always 8) entries always correspond - /// to the starts states for the entire DFA, with the first 4 entries being - /// for unanchored searches and the second 4 entries being for anchored - /// searches. To keep things simple, we always use 8 entries even if the - /// `StartKind` is not both. - /// - /// After that, there are `stride * patterns` state IDs, where `patterns` - /// may be zero in the case of a DFA with no patterns or in the case where - /// the DFA was built without enabling starting states for each pattern. - table: T, - /// The starting state configuration supported. When 'both', both - /// unanchored and anchored searches work. When 'unanchored', anchored - /// searches panic. When 'anchored', unanchored searches panic. - kind: StartKind, - /// The start state configuration for every possible byte. - start_map: StartByteMap, - /// The number of starting state IDs per pattern. - stride: usize, - /// The total number of patterns for which starting states are encoded. - /// This is `None` for DFAs that were built without start states for each - /// pattern. Thus, one cannot use this field to say how many patterns - /// are in the DFA in all cases. It is specific to how many patterns are - /// represented in this start table. - pattern_len: Option, - /// The universal starting state for unanchored searches. This is only - /// present when the DFA supports unanchored searches and when all starting - /// state IDs for an unanchored search are equivalent. - universal_start_unanchored: Option, - /// The universal starting state for anchored searches. This is only - /// present when the DFA supports anchored searches and when all starting - /// state IDs for an anchored search are equivalent. - universal_start_anchored: Option, -} - -#[cfg(feature = "dfa-build")] -impl StartTable> { - /// Create a valid set of start states all pointing to the dead state. - /// - /// When the corresponding DFA is constructed with start states for each - /// pattern, then `patterns` should be the number of patterns. Otherwise, - /// it should be zero. - /// - /// If the total table size could exceed the allocatable limit, then this - /// returns an error. In practice, this is unlikely to be able to occur, - /// since it's likely that allocation would have failed long before it got - /// to this point. - fn dead( - kind: StartKind, - lookm: &LookMatcher, - pattern_len: Option, - ) -> Result>, BuildError> { - if let Some(len) = pattern_len { - assert!(len <= PatternID::LIMIT); - } - let stride = Start::len(); - // OK because 2*4 is never going to overflow anything. - let starts_len = stride.checked_mul(2).unwrap(); - let pattern_starts_len = - match stride.checked_mul(pattern_len.unwrap_or(0)) { - Some(x) => x, - None => return Err(BuildError::too_many_start_states()), - }; - let table_len = match starts_len.checked_add(pattern_starts_len) { - Some(x) => x, - None => return Err(BuildError::too_many_start_states()), - }; - if let Err(_) = isize::try_from(table_len) { - return Err(BuildError::too_many_start_states()); - } - let table = vec![DEAD.as_u32(); table_len]; - let start_map = StartByteMap::new(lookm); - Ok(StartTable { - table, - kind, - start_map, - stride, - pattern_len, - universal_start_unanchored: None, - universal_start_anchored: None, - }) - } -} - -impl<'a> StartTable<&'a [u32]> { - /// Deserialize a table of start state IDs starting at the beginning of - /// `slice`. Upon success, return the total number of bytes read along with - /// the table of starting state IDs. - /// - /// If there was a problem deserializing any part of the starting IDs, - /// then this returns an error. Notably, if the given slice does not have - /// the same alignment as `StateID`, then this will return an error (among - /// other possible errors). - /// - /// This is guaranteed to execute in constant time. - /// - /// # Safety - /// - /// This routine is not safe because it does not check the validity of the - /// starting state IDs themselves. In particular, the number of starting - /// IDs can be of variable length, so it's possible that checking their - /// validity cannot be done in constant time. An invalid starting state - /// ID is not safe because other code may rely on the starting IDs being - /// correct (such as explicit bounds check elision). Therefore, an invalid - /// start ID can lead to undefined behavior. - /// - /// Callers that use this function must either pass on the safety invariant - /// or guarantee that the bytes given contain valid starting state IDs. - /// This guarantee is upheld by the bytes written by `write_to`. - unsafe fn from_bytes_unchecked( - mut slice: &'a [u8], - ) -> Result<(StartTable<&'a [u32]>, usize), DeserializeError> { - let slice_start = slice.as_ptr().as_usize(); - - let (kind, nr) = StartKind::from_bytes(slice)?; - slice = &slice[nr..]; - - let (start_map, nr) = StartByteMap::from_bytes(slice)?; - slice = &slice[nr..]; - - let (stride, nr) = - wire::try_read_u32_as_usize(slice, "start table stride")?; - slice = &slice[nr..]; - if stride != Start::len() { - return Err(DeserializeError::generic( - "invalid starting table stride", - )); - } - - let (maybe_pattern_len, nr) = - wire::try_read_u32_as_usize(slice, "start table patterns")?; - slice = &slice[nr..]; - let pattern_len = if maybe_pattern_len.as_u32() == u32::MAX { - None - } else { - Some(maybe_pattern_len) - }; - if pattern_len.map_or(false, |len| len > PatternID::LIMIT) { - return Err(DeserializeError::generic( - "invalid number of patterns", - )); - } - - let (universal_unanchored, nr) = - wire::try_read_u32(slice, "universal unanchored start")?; - slice = &slice[nr..]; - let universal_start_unanchored = if universal_unanchored == u32::MAX { - None - } else { - Some(StateID::try_from(universal_unanchored).map_err(|e| { - DeserializeError::state_id_error( - e, - "universal unanchored start", - ) - })?) - }; - - let (universal_anchored, nr) = - wire::try_read_u32(slice, "universal anchored start")?; - slice = &slice[nr..]; - let universal_start_anchored = if universal_anchored == u32::MAX { - None - } else { - Some(StateID::try_from(universal_anchored).map_err(|e| { - DeserializeError::state_id_error(e, "universal anchored start") - })?) - }; - - let pattern_table_size = wire::mul( - stride, - pattern_len.unwrap_or(0), - "invalid pattern length", - )?; - // Our start states always start with a two stride of start states for - // the entire automaton. The first stride is for unanchored starting - // states and the second stride is for anchored starting states. What - // follows it are an optional set of start states for each pattern. - let start_state_len = wire::add( - wire::mul(2, stride, "start state stride too big")?, - pattern_table_size, - "invalid 'any' pattern starts size", - )?; - let table_bytes_len = wire::mul( - start_state_len, - StateID::SIZE, - "pattern table bytes length", - )?; - wire::check_slice_len(slice, table_bytes_len, "start ID table")?; - wire::check_alignment::(slice)?; - let table_bytes = &slice[..table_bytes_len]; - slice = &slice[table_bytes_len..]; - // SAFETY: Since StateID is always representable as a u32, all we need - // to do is ensure that we have the proper length and alignment. We've - // checked both above, so the cast below is safe. - // - // N.B. This is the only not-safe code in this function. - let table = core::slice::from_raw_parts( - table_bytes.as_ptr().cast::(), - start_state_len, - ); - let st = StartTable { - table, - kind, - start_map, - stride, - pattern_len, - universal_start_unanchored, - universal_start_anchored, - }; - Ok((st, slice.as_ptr().as_usize() - slice_start)) - } -} - -impl> StartTable { - /// Writes a serialized form of this start table to the buffer given. If - /// the buffer is too small, then an error is returned. To determine how - /// big the buffer must be, use `write_to_len`. - fn write_to( - &self, - mut dst: &mut [u8], - ) -> Result { - let nwrite = self.write_to_len(); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small( - "starting table ids", - )); - } - dst = &mut dst[..nwrite]; - - // write start kind - let nw = self.kind.write_to::(dst)?; - dst = &mut dst[nw..]; - // write start byte map - let nw = self.start_map.write_to(dst)?; - dst = &mut dst[nw..]; - // write stride - // Unwrap is OK since the stride is always 4 (currently). - E::write_u32(u32::try_from(self.stride).unwrap(), dst); - dst = &mut dst[size_of::()..]; - // write pattern length - // Unwrap is OK since number of patterns is guaranteed to fit in a u32. - E::write_u32( - u32::try_from(self.pattern_len.unwrap_or(0xFFFF_FFFF)).unwrap(), - dst, - ); - dst = &mut dst[size_of::()..]; - // write universal start unanchored state id, u32::MAX if absent - E::write_u32( - self.universal_start_unanchored - .map_or(u32::MAX, |sid| sid.as_u32()), - dst, - ); - dst = &mut dst[size_of::()..]; - // write universal start anchored state id, u32::MAX if absent - E::write_u32( - self.universal_start_anchored.map_or(u32::MAX, |sid| sid.as_u32()), - dst, - ); - dst = &mut dst[size_of::()..]; - // write start IDs - for &sid in self.table() { - let n = wire::write_state_id::(sid, &mut dst); - dst = &mut dst[n..]; - } - Ok(nwrite) - } - - /// Returns the number of bytes the serialized form of this start ID table - /// will use. - fn write_to_len(&self) -> usize { - self.kind.write_to_len() - + self.start_map.write_to_len() - + size_of::() // stride - + size_of::() // # patterns - + size_of::() // universal unanchored start - + size_of::() // universal anchored start - + (self.table().len() * StateID::SIZE) - } - - /// Validates that every state ID in this start table is valid by checking - /// it against the given transition table (which must be for the same DFA). - /// - /// That is, every state ID can be used to correctly index a state. - fn validate(&self, dfa: &DFA) -> Result<(), DeserializeError> { - let tt = &dfa.tt; - if !self.universal_start_unanchored.map_or(true, |s| tt.is_valid(s)) { - return Err(DeserializeError::generic( - "found invalid universal unanchored starting state ID", - )); - } - if !self.universal_start_anchored.map_or(true, |s| tt.is_valid(s)) { - return Err(DeserializeError::generic( - "found invalid universal anchored starting state ID", - )); - } - for &id in self.table() { - if !tt.is_valid(id) { - return Err(DeserializeError::generic( - "found invalid starting state ID", - )); - } - } - Ok(()) - } - - /// Converts this start list to a borrowed value. - fn as_ref(&self) -> StartTable<&'_ [u32]> { - StartTable { - table: self.table.as_ref(), - kind: self.kind, - start_map: self.start_map.clone(), - stride: self.stride, - pattern_len: self.pattern_len, - universal_start_unanchored: self.universal_start_unanchored, - universal_start_anchored: self.universal_start_anchored, - } - } - - /// Converts this start list to an owned value. - #[cfg(feature = "alloc")] - fn to_owned(&self) -> StartTable> { - StartTable { - table: self.table.as_ref().to_vec(), - kind: self.kind, - start_map: self.start_map.clone(), - stride: self.stride, - pattern_len: self.pattern_len, - universal_start_unanchored: self.universal_start_unanchored, - universal_start_anchored: self.universal_start_anchored, - } - } - - /// Return the start state for the given input and starting configuration. - /// This returns an error if the input configuration is not supported by - /// this DFA. For example, requesting an unanchored search when the DFA was - /// not built with unanchored starting states. Or asking for an anchored - /// pattern search with an invalid pattern ID or on a DFA that was not - /// built with start states for each pattern. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn start( - &self, - anchored: Anchored, - start: Start, - ) -> Result { - let start_index = start.as_usize(); - let index = match anchored { - Anchored::No => { - if !self.kind.has_unanchored() { - return Err(StartError::unsupported_anchored(anchored)); - } - start_index - } - Anchored::Yes => { - if !self.kind.has_anchored() { - return Err(StartError::unsupported_anchored(anchored)); - } - self.stride + start_index - } - Anchored::Pattern(pid) => { - let len = match self.pattern_len { - None => { - return Err(StartError::unsupported_anchored(anchored)) - } - Some(len) => len, - }; - if pid.as_usize() >= len { - return Ok(DEAD); - } - (2 * self.stride) - + (self.stride * pid.as_usize()) - + start_index - } - }; - Ok(self.table()[index]) - } - - /// Returns an iterator over all start state IDs in this table. - /// - /// Each item is a triple of: start state ID, the start state type and the - /// pattern ID (if any). - fn iter(&self) -> StartStateIter<'_> { - StartStateIter { st: self.as_ref(), i: 0 } - } - - /// Returns the table as a slice of state IDs. - fn table(&self) -> &[StateID] { - wire::u32s_to_state_ids(self.table.as_ref()) - } - - /// Return the memory usage, in bytes, of this start list. - /// - /// This does not include the size of a `StartList` value itself. - fn memory_usage(&self) -> usize { - self.table().len() * StateID::SIZE - } -} - -#[cfg(feature = "dfa-build")] -impl> StartTable { - /// Set the start state for the given index and pattern. - /// - /// If the pattern ID or state ID are not valid, then this will panic. - fn set_start(&mut self, anchored: Anchored, start: Start, id: StateID) { - let start_index = start.as_usize(); - let index = match anchored { - Anchored::No => start_index, - Anchored::Yes => self.stride + start_index, - Anchored::Pattern(pid) => { - let pid = pid.as_usize(); - let len = self - .pattern_len - .expect("start states for each pattern enabled"); - assert!(pid < len, "invalid pattern ID {pid:?}"); - self.stride - .checked_mul(pid) - .unwrap() - .checked_add(self.stride.checked_mul(2).unwrap()) - .unwrap() - .checked_add(start_index) - .unwrap() - } - }; - self.table_mut()[index] = id; - } - - /// Returns the table as a mutable slice of state IDs. - fn table_mut(&mut self) -> &mut [StateID] { - wire::u32s_to_state_ids_mut(self.table.as_mut()) - } -} - -/// An iterator over start state IDs. -/// -/// This iterator yields a triple of start state ID, the anchored mode and the -/// start state type. If a pattern ID is relevant, then the anchored mode will -/// contain it. Start states with an anchored mode containing a pattern ID will -/// only occur when the DFA was compiled with start states for each pattern -/// (which is disabled by default). -pub(crate) struct StartStateIter<'a> { - st: StartTable<&'a [u32]>, - i: usize, -} - -impl<'a> Iterator for StartStateIter<'a> { - type Item = (StateID, Anchored, Start); - - fn next(&mut self) -> Option<(StateID, Anchored, Start)> { - let i = self.i; - let table = self.st.table(); - if i >= table.len() { - return None; - } - self.i += 1; - - // This unwrap is okay since the stride of the starting state table - // must always match the number of start state types. - let start_type = Start::from_usize(i % self.st.stride).unwrap(); - let anchored = if i < self.st.stride { - Anchored::No - } else if i < (2 * self.st.stride) { - Anchored::Yes - } else { - let pid = (i - (2 * self.st.stride)) / self.st.stride; - Anchored::Pattern(PatternID::new(pid).unwrap()) - }; - Some((table[i], anchored, start_type)) - } -} - -/// This type represents that patterns that should be reported whenever a DFA -/// enters a match state. This structure exists to support DFAs that search for -/// matches for multiple regexes. -/// -/// This structure relies on the fact that all match states in a DFA occur -/// contiguously in the DFA's transition table. (See dfa/special.rs for a more -/// detailed breakdown of the representation.) Namely, when a match occurs, we -/// know its state ID. Since we know the start and end of the contiguous region -/// of match states, we can use that to compute the position at which the match -/// state occurs. That in turn is used as an offset into this structure. -#[derive(Clone, Debug)] -struct MatchStates { - /// Slices is a flattened sequence of pairs, where each pair points to a - /// sub-slice of pattern_ids. The first element of the pair is an offset - /// into pattern_ids and the second element of the pair is the number - /// of 32-bit pattern IDs starting at that position. That is, each pair - /// corresponds to a single DFA match state and its corresponding match - /// IDs. The number of pairs always corresponds to the number of distinct - /// DFA match states. - /// - /// In practice, T is either Vec or &[u32]. - slices: T, - /// A flattened sequence of pattern IDs for each DFA match state. The only - /// way to correctly read this sequence is indirectly via `slices`. - /// - /// In practice, T is either Vec or &[u32]. - pattern_ids: T, - /// The total number of unique patterns represented by these match states. - pattern_len: usize, -} - -impl<'a> MatchStates<&'a [u32]> { - unsafe fn from_bytes_unchecked( - mut slice: &'a [u8], - ) -> Result<(MatchStates<&'a [u32]>, usize), DeserializeError> { - let slice_start = slice.as_ptr().as_usize(); - - // Read the total number of match states. - let (state_len, nr) = - wire::try_read_u32_as_usize(slice, "match state length")?; - slice = &slice[nr..]; - - // Read the slice start/length pairs. - let pair_len = wire::mul(2, state_len, "match state offset pairs")?; - let slices_bytes_len = wire::mul( - pair_len, - PatternID::SIZE, - "match state slice offset byte length", - )?; - wire::check_slice_len(slice, slices_bytes_len, "match state slices")?; - wire::check_alignment::(slice)?; - let slices_bytes = &slice[..slices_bytes_len]; - slice = &slice[slices_bytes_len..]; - // SAFETY: Since PatternID is always representable as a u32, all we - // need to do is ensure that we have the proper length and alignment. - // We've checked both above, so the cast below is safe. - // - // N.B. This is one of the few not-safe snippets in this function, - // so we mark it explicitly to call it out. - let slices = core::slice::from_raw_parts( - slices_bytes.as_ptr().cast::(), - pair_len, - ); - - // Read the total number of unique pattern IDs (which is always 1 more - // than the maximum pattern ID in this automaton, since pattern IDs are - // handed out contiguously starting at 0). - let (pattern_len, nr) = - wire::try_read_u32_as_usize(slice, "pattern length")?; - slice = &slice[nr..]; - - // Now read the pattern ID length. We don't need to store this - // explicitly, but we need it to know how many pattern IDs to read. - let (idlen, nr) = - wire::try_read_u32_as_usize(slice, "pattern ID length")?; - slice = &slice[nr..]; - - // Read the actual pattern IDs. - let pattern_ids_len = - wire::mul(idlen, PatternID::SIZE, "pattern ID byte length")?; - wire::check_slice_len(slice, pattern_ids_len, "match pattern IDs")?; - wire::check_alignment::(slice)?; - let pattern_ids_bytes = &slice[..pattern_ids_len]; - slice = &slice[pattern_ids_len..]; - // SAFETY: Since PatternID is always representable as a u32, all we - // need to do is ensure that we have the proper length and alignment. - // We've checked both above, so the cast below is safe. - // - // N.B. This is one of the few not-safe snippets in this function, - // so we mark it explicitly to call it out. - let pattern_ids = core::slice::from_raw_parts( - pattern_ids_bytes.as_ptr().cast::(), - idlen, - ); - - let ms = MatchStates { slices, pattern_ids, pattern_len }; - Ok((ms, slice.as_ptr().as_usize() - slice_start)) - } -} - -#[cfg(feature = "dfa-build")] -impl MatchStates> { - fn empty(pattern_len: usize) -> MatchStates> { - assert!(pattern_len <= PatternID::LIMIT); - MatchStates { slices: vec![], pattern_ids: vec![], pattern_len } - } - - fn new( - matches: &BTreeMap>, - pattern_len: usize, - ) -> Result>, BuildError> { - let mut m = MatchStates::empty(pattern_len); - for (_, pids) in matches.iter() { - let start = PatternID::new(m.pattern_ids.len()) - .map_err(|_| BuildError::too_many_match_pattern_ids())?; - m.slices.push(start.as_u32()); - // This is always correct since the number of patterns in a single - // match state can never exceed maximum number of allowable - // patterns. Why? Because a pattern can only appear once in a - // particular match state, by construction. (And since our pattern - // ID limit is one less than u32::MAX, we're guaranteed that the - // length fits in a u32.) - m.slices.push(u32::try_from(pids.len()).unwrap()); - for &pid in pids { - m.pattern_ids.push(pid.as_u32()); - } - } - m.pattern_len = pattern_len; - Ok(m) - } - - fn new_with_map( - &self, - matches: &BTreeMap>, - ) -> Result>, BuildError> { - MatchStates::new(matches, self.pattern_len) - } -} - -impl> MatchStates { - /// Writes a serialized form of these match states to the buffer given. If - /// the buffer is too small, then an error is returned. To determine how - /// big the buffer must be, use `write_to_len`. - fn write_to( - &self, - mut dst: &mut [u8], - ) -> Result { - let nwrite = self.write_to_len(); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small("match states")); - } - dst = &mut dst[..nwrite]; - - // write state ID length - // Unwrap is OK since number of states is guaranteed to fit in a u32. - E::write_u32(u32::try_from(self.len()).unwrap(), dst); - dst = &mut dst[size_of::()..]; - - // write slice offset pairs - for &pid in self.slices() { - let n = wire::write_pattern_id::(pid, &mut dst); - dst = &mut dst[n..]; - } - - // write unique pattern ID length - // Unwrap is OK since number of patterns is guaranteed to fit in a u32. - E::write_u32(u32::try_from(self.pattern_len).unwrap(), dst); - dst = &mut dst[size_of::()..]; - - // write pattern ID length - // Unwrap is OK since we check at construction (and deserialization) - // that the number of patterns is representable as a u32. - E::write_u32(u32::try_from(self.pattern_ids().len()).unwrap(), dst); - dst = &mut dst[size_of::()..]; - - // write pattern IDs - for &pid in self.pattern_ids() { - let n = wire::write_pattern_id::(pid, &mut dst); - dst = &mut dst[n..]; - } - - Ok(nwrite) - } - - /// Returns the number of bytes the serialized form of these match states - /// will use. - fn write_to_len(&self) -> usize { - size_of::() // match state length - + (self.slices().len() * PatternID::SIZE) - + size_of::() // unique pattern ID length - + size_of::() // pattern ID length - + (self.pattern_ids().len() * PatternID::SIZE) - } - - /// Validates that the match state info is itself internally consistent and - /// consistent with the recorded match state region in the given DFA. - fn validate(&self, dfa: &DFA) -> Result<(), DeserializeError> { - if self.len() != dfa.special.match_len(dfa.stride()) { - return Err(DeserializeError::generic( - "match state length mismatch", - )); - } - for si in 0..self.len() { - let start = self.slices()[si * 2].as_usize(); - let len = self.slices()[si * 2 + 1].as_usize(); - if start >= self.pattern_ids().len() { - return Err(DeserializeError::generic( - "invalid pattern ID start offset", - )); - } - if start + len > self.pattern_ids().len() { - return Err(DeserializeError::generic( - "invalid pattern ID length", - )); - } - for mi in 0..len { - let pid = self.pattern_id(si, mi); - if pid.as_usize() >= self.pattern_len { - return Err(DeserializeError::generic( - "invalid pattern ID", - )); - } - } - } - Ok(()) - } - - /// Converts these match states back into their map form. This is useful - /// when shuffling states, as the normal MatchStates representation is not - /// amenable to easy state swapping. But with this map, to swap id1 and - /// id2, all you need to do is: - /// - /// if let Some(pids) = map.remove(&id1) { - /// map.insert(id2, pids); - /// } - /// - /// Once shuffling is done, use MatchStates::new to convert back. - #[cfg(feature = "dfa-build")] - fn to_map(&self, dfa: &DFA) -> BTreeMap> { - let mut map = BTreeMap::new(); - for i in 0..self.len() { - let mut pids = vec![]; - for j in 0..self.pattern_len(i) { - pids.push(self.pattern_id(i, j)); - } - map.insert(self.match_state_id(dfa, i), pids); - } - map - } - - /// Converts these match states to a borrowed value. - fn as_ref(&self) -> MatchStates<&'_ [u32]> { - MatchStates { - slices: self.slices.as_ref(), - pattern_ids: self.pattern_ids.as_ref(), - pattern_len: self.pattern_len, - } - } - - /// Converts these match states to an owned value. - #[cfg(feature = "alloc")] - fn to_owned(&self) -> MatchStates> { - MatchStates { - slices: self.slices.as_ref().to_vec(), - pattern_ids: self.pattern_ids.as_ref().to_vec(), - pattern_len: self.pattern_len, - } - } - - /// Returns the match state ID given the match state index. (Where the - /// first match state corresponds to index 0.) - /// - /// This panics if there is no match state at the given index. - fn match_state_id(&self, dfa: &DFA, index: usize) -> StateID { - assert!(dfa.special.matches(), "no match states to index"); - // This is one of the places where we rely on the fact that match - // states are contiguous in the transition table. Namely, that the - // first match state ID always corresponds to dfa.special.min_start. - // From there, since we know the stride, we can compute the ID of any - // match state given its index. - let stride2 = u32::try_from(dfa.stride2()).unwrap(); - let offset = index.checked_shl(stride2).unwrap(); - let id = dfa.special.min_match.as_usize().checked_add(offset).unwrap(); - let sid = StateID::new(id).unwrap(); - assert!(dfa.is_match_state(sid)); - sid - } - - /// Returns the pattern ID at the given match index for the given match - /// state. - /// - /// The match state index is the state index minus the state index of the - /// first match state in the DFA. - /// - /// The match index is the index of the pattern ID for the given state. - /// The index must be less than `self.pattern_len(state_index)`. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn pattern_id(&self, state_index: usize, match_index: usize) -> PatternID { - self.pattern_id_slice(state_index)[match_index] - } - - /// Returns the number of patterns in the given match state. - /// - /// The match state index is the state index minus the state index of the - /// first match state in the DFA. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn pattern_len(&self, state_index: usize) -> usize { - self.slices()[state_index * 2 + 1].as_usize() - } - - /// Returns all of the pattern IDs for the given match state index. - /// - /// The match state index is the state index minus the state index of the - /// first match state in the DFA. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn pattern_id_slice(&self, state_index: usize) -> &[PatternID] { - let start = self.slices()[state_index * 2].as_usize(); - let len = self.pattern_len(state_index); - &self.pattern_ids()[start..start + len] - } - - /// Returns the pattern ID offset slice of u32 as a slice of PatternID. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn slices(&self) -> &[PatternID] { - wire::u32s_to_pattern_ids(self.slices.as_ref()) - } - - /// Returns the total number of match states. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn len(&self) -> usize { - assert_eq!(0, self.slices().len() % 2); - self.slices().len() / 2 - } - - /// Returns the pattern ID slice of u32 as a slice of PatternID. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn pattern_ids(&self) -> &[PatternID] { - wire::u32s_to_pattern_ids(self.pattern_ids.as_ref()) - } - - /// Return the memory usage, in bytes, of these match pairs. - fn memory_usage(&self) -> usize { - (self.slices().len() + self.pattern_ids().len()) * PatternID::SIZE - } -} - -/// A common set of flags for both dense and sparse DFAs. This primarily -/// centralizes the serialization format of these flags at a bitset. -#[derive(Clone, Copy, Debug)] -pub(crate) struct Flags { - /// Whether the DFA can match the empty string. When this is false, all - /// matches returned by this DFA are guaranteed to have non-zero length. - pub(crate) has_empty: bool, - /// Whether the DFA should only produce matches with spans that correspond - /// to valid UTF-8. This also includes omitting any zero-width matches that - /// split the UTF-8 encoding of a codepoint. - pub(crate) is_utf8: bool, - /// Whether the DFA is always anchored or not, regardless of `Input` - /// configuration. This is useful for avoiding a reverse scan even when - /// executing unanchored searches. - pub(crate) is_always_start_anchored: bool, -} - -impl Flags { - /// Creates a set of flags for a DFA from an NFA. - /// - /// N.B. This constructor was defined at the time of writing because all - /// of the flags are derived directly from the NFA. If this changes in the - /// future, we might be more thoughtful about how the `Flags` value is - /// itself built. - #[cfg(feature = "dfa-build")] - fn from_nfa(nfa: &thompson::NFA) -> Flags { - Flags { - has_empty: nfa.has_empty(), - is_utf8: nfa.is_utf8(), - is_always_start_anchored: nfa.is_always_start_anchored(), - } - } - - /// Deserializes the flags from the given slice. On success, this also - /// returns the number of bytes read from the slice. - pub(crate) fn from_bytes( - slice: &[u8], - ) -> Result<(Flags, usize), DeserializeError> { - let (bits, nread) = wire::try_read_u32(slice, "flag bitset")?; - let flags = Flags { - has_empty: bits & (1 << 0) != 0, - is_utf8: bits & (1 << 1) != 0, - is_always_start_anchored: bits & (1 << 2) != 0, - }; - Ok((flags, nread)) - } - - /// Writes these flags to the given byte slice. If the buffer is too small, - /// then an error is returned. To determine how big the buffer must be, - /// use `write_to_len`. - pub(crate) fn write_to( - &self, - dst: &mut [u8], - ) -> Result { - fn bool_to_int(b: bool) -> u32 { - if b { - 1 - } else { - 0 - } - } - - let nwrite = self.write_to_len(); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small("flag bitset")); - } - let bits = (bool_to_int(self.has_empty) << 0) - | (bool_to_int(self.is_utf8) << 1) - | (bool_to_int(self.is_always_start_anchored) << 2); - E::write_u32(bits, dst); - Ok(nwrite) - } - - /// Returns the number of bytes the serialized form of these flags - /// will use. - pub(crate) fn write_to_len(&self) -> usize { - size_of::() - } -} - -/// An iterator over all states in a DFA. -/// -/// This iterator yields a tuple for each state. The first element of the -/// tuple corresponds to a state's identifier, and the second element -/// corresponds to the state itself (comprised of its transitions). -/// -/// `'a` corresponding to the lifetime of original DFA, `T` corresponds to -/// the type of the transition table itself. -pub(crate) struct StateIter<'a, T> { - tt: &'a TransitionTable, - it: iter::Enumerate>, -} - -impl<'a, T: AsRef<[u32]>> Iterator for StateIter<'a, T> { - type Item = State<'a>; - - fn next(&mut self) -> Option> { - self.it.next().map(|(index, _)| { - let id = self.tt.to_state_id(index); - self.tt.state(id) - }) - } -} - -/// An immutable representation of a single DFA state. -/// -/// `'a` corresponding to the lifetime of a DFA's transition table. -pub(crate) struct State<'a> { - id: StateID, - stride2: usize, - transitions: &'a [StateID], -} - -impl<'a> State<'a> { - /// Return an iterator over all transitions in this state. This yields - /// a number of transitions equivalent to the alphabet length of the - /// corresponding DFA. - /// - /// Each transition is represented by a tuple. The first element is - /// the input byte for that transition and the second element is the - /// transitions itself. - pub(crate) fn transitions(&self) -> StateTransitionIter<'_> { - StateTransitionIter { - len: self.transitions.len(), - it: self.transitions.iter().enumerate(), - } - } - - /// Return an iterator over a sparse representation of the transitions in - /// this state. Only non-dead transitions are returned. - /// - /// The "sparse" representation in this case corresponds to a sequence of - /// triples. The first two elements of the triple comprise an inclusive - /// byte range while the last element corresponds to the transition taken - /// for all bytes in the range. - /// - /// This is somewhat more condensed than the classical sparse - /// representation (where you have an element for every non-dead - /// transition), but in practice, checking if a byte is in a range is very - /// cheap and using ranges tends to conserve quite a bit more space. - pub(crate) fn sparse_transitions(&self) -> StateSparseTransitionIter<'_> { - StateSparseTransitionIter { dense: self.transitions(), cur: None } - } - - /// Returns the identifier for this state. - pub(crate) fn id(&self) -> StateID { - self.id - } - - /// Analyzes this state to determine whether it can be accelerated. If so, - /// it returns an accelerator that contains at least one byte. - #[cfg(feature = "dfa-build")] - fn accelerate(&self, classes: &ByteClasses) -> Option { - // We just try to add bytes to our accelerator. Once adding fails - // (because we've added too many bytes), then give up. - let mut accel = Accel::new(); - for (class, id) in self.transitions() { - if id == self.id() { - continue; - } - for unit in classes.elements(class) { - if let Some(byte) = unit.as_u8() { - if !accel.add(byte) { - return None; - } - } - } - } - if accel.is_empty() { - None - } else { - Some(accel) - } - } -} - -impl<'a> fmt::Debug for State<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - for (i, (start, end, sid)) in self.sparse_transitions().enumerate() { - let id = if f.alternate() { - sid.as_usize() - } else { - sid.as_usize() >> self.stride2 - }; - if i > 0 { - write!(f, ", ")?; - } - if start == end { - write!(f, "{start:?} => {id:?}")?; - } else { - write!(f, "{start:?}-{end:?} => {id:?}")?; - } - } - Ok(()) - } -} - -/// An iterator over all transitions in a single DFA state. This yields -/// a number of transitions equivalent to the alphabet length of the -/// corresponding DFA. -/// -/// Each transition is represented by a tuple. The first element is the input -/// byte for that transition and the second element is the transition itself. -#[derive(Debug)] -pub(crate) struct StateTransitionIter<'a> { - len: usize, - it: iter::Enumerate>, -} - -impl<'a> Iterator for StateTransitionIter<'a> { - type Item = (alphabet::Unit, StateID); - - fn next(&mut self) -> Option<(alphabet::Unit, StateID)> { - self.it.next().map(|(i, &id)| { - let unit = if i + 1 == self.len { - alphabet::Unit::eoi(i) - } else { - let b = u8::try_from(i) - .expect("raw byte alphabet is never exceeded"); - alphabet::Unit::u8(b) - }; - (unit, id) - }) - } -} - -/// An iterator over all non-DEAD transitions in a single DFA state using a -/// sparse representation. -/// -/// Each transition is represented by a triple. The first two elements of the -/// triple comprise an inclusive byte range while the last element corresponds -/// to the transition taken for all bytes in the range. -/// -/// As a convenience, this always returns `alphabet::Unit` values of the same -/// type. That is, you'll never get a (byte, EOI) or a (EOI, byte). Only (byte, -/// byte) and (EOI, EOI) values are yielded. -#[derive(Debug)] -pub(crate) struct StateSparseTransitionIter<'a> { - dense: StateTransitionIter<'a>, - cur: Option<(alphabet::Unit, alphabet::Unit, StateID)>, -} - -impl<'a> Iterator for StateSparseTransitionIter<'a> { - type Item = (alphabet::Unit, alphabet::Unit, StateID); - - fn next(&mut self) -> Option<(alphabet::Unit, alphabet::Unit, StateID)> { - while let Some((unit, next)) = self.dense.next() { - let (prev_start, prev_end, prev_next) = match self.cur { - Some(t) => t, - None => { - self.cur = Some((unit, unit, next)); - continue; - } - }; - if prev_next == next && !unit.is_eoi() { - self.cur = Some((prev_start, unit, prev_next)); - } else { - self.cur = Some((unit, unit, next)); - if prev_next != DEAD { - return Some((prev_start, prev_end, prev_next)); - } - } - } - if let Some((start, end, next)) = self.cur.take() { - if next != DEAD { - return Some((start, end, next)); - } - } - None - } -} - -/// An error that occurred during the construction of a DFA. -/// -/// This error does not provide many introspection capabilities. There are -/// generally only two things you can do with it: -/// -/// * Obtain a human readable message via its `std::fmt::Display` impl. -/// * Access an underlying [`nfa::thompson::BuildError`](thompson::BuildError) -/// type from its `source` method via the `std::error::Error` trait. This error -/// only occurs when using convenience routines for building a DFA directly -/// from a pattern string. -/// -/// When the `std` feature is enabled, this implements the `std::error::Error` -/// trait. -#[cfg(feature = "dfa-build")] -#[derive(Clone, Debug)] -pub struct BuildError { - kind: BuildErrorKind, -} - -#[cfg(feature = "dfa-build")] -impl BuildError { - /// Returns true if and only if this error corresponds to an error with DFA - /// construction that occurred because of exceeding a size limit. - /// - /// While this can occur when size limits like [`Config::dfa_size_limit`] - /// or [`Config::determinize_size_limit`] are exceeded, this can also occur - /// when the number of states or patterns exceeds a hard-coded maximum. - /// (Where these maximums are derived based on the values representable by - /// [`StateID`] and [`PatternID`].) - /// - /// This predicate is useful in contexts where you want to distinguish - /// between errors related to something provided by an end user (for - /// example, an invalid regex pattern) and errors related to configured - /// heuristics. For example, building a DFA might be an optimization that - /// you want to skip if construction fails because of an exceeded size - /// limit, but where you want to bubble up an error if it fails for some - /// other reason. - /// - /// # Example - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// # if !cfg!(target_pointer_width = "64") { return Ok(()); } // see #1039 - /// use regex_automata::{dfa::{dense, Automaton}, Input}; - /// - /// let err = dense::Builder::new() - /// .configure(dense::Config::new() - /// .determinize_size_limit(Some(100_000)) - /// ) - /// .build(r"\w{20}") - /// .unwrap_err(); - /// // This error occurs because a size limit was exceeded. - /// // But things are otherwise valid. - /// assert!(err.is_size_limit_exceeded()); - /// - /// let err = dense::Builder::new() - /// .build(r"\bxyz\b") - /// .unwrap_err(); - /// // This error occurs because a Unicode word boundary - /// // was used without enabling heuristic support for it. - /// // So... not related to size limits. - /// assert!(!err.is_size_limit_exceeded()); - /// - /// let err = dense::Builder::new() - /// .build(r"(xyz") - /// .unwrap_err(); - /// // This error occurs because the pattern is invalid. - /// // So... not related to size limits. - /// assert!(!err.is_size_limit_exceeded()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn is_size_limit_exceeded(&self) -> bool { - use self::BuildErrorKind::*; - - match self.kind { - NFA(_) | Unsupported(_) => false, - TooManyStates - | TooManyStartStates - | TooManyMatchPatternIDs - | DFAExceededSizeLimit { .. } - | DeterminizeExceededSizeLimit { .. } => true, - } - } -} - -/// The kind of error that occurred during the construction of a DFA. -/// -/// Note that this error is non-exhaustive. Adding new variants is not -/// considered a breaking change. -#[cfg(feature = "dfa-build")] -#[derive(Clone, Debug)] -enum BuildErrorKind { - /// An error that occurred while constructing an NFA as a precursor step - /// before a DFA is compiled. - NFA(thompson::BuildError), - /// An error that occurred because an unsupported regex feature was used. - /// The message string describes which unsupported feature was used. - /// - /// The primary regex feature that is unsupported by DFAs is the Unicode - /// word boundary look-around assertion (`\b`). This can be worked around - /// by either using an ASCII word boundary (`(?-u:\b)`) or by enabling - /// Unicode word boundaries when building a DFA. - Unsupported(&'static str), - /// An error that occurs if too many states are produced while building a - /// DFA. - TooManyStates, - /// An error that occurs if too many start states are needed while building - /// a DFA. - /// - /// This is a kind of oddball error that occurs when building a DFA with - /// start states enabled for each pattern and enough patterns to cause - /// the table of start states to overflow `usize`. - TooManyStartStates, - /// This is another oddball error that can occur if there are too many - /// patterns spread out across too many match states. - TooManyMatchPatternIDs, - /// An error that occurs if the DFA got too big during determinization. - DFAExceededSizeLimit { limit: usize }, - /// An error that occurs if auxiliary storage (not the DFA) used during - /// determinization got too big. - DeterminizeExceededSizeLimit { limit: usize }, -} - -#[cfg(feature = "dfa-build")] -impl BuildError { - /// Return the kind of this error. - fn kind(&self) -> &BuildErrorKind { - &self.kind - } - - pub(crate) fn nfa(err: thompson::BuildError) -> BuildError { - BuildError { kind: BuildErrorKind::NFA(err) } - } - - pub(crate) fn unsupported_dfa_word_boundary_unicode() -> BuildError { - let msg = "cannot build DFAs for regexes with Unicode word \ - boundaries; switch to ASCII word boundaries, or \ - heuristically enable Unicode word boundaries or use a \ - different regex engine"; - BuildError { kind: BuildErrorKind::Unsupported(msg) } - } - - pub(crate) fn too_many_states() -> BuildError { - BuildError { kind: BuildErrorKind::TooManyStates } - } - - pub(crate) fn too_many_start_states() -> BuildError { - BuildError { kind: BuildErrorKind::TooManyStartStates } - } - - pub(crate) fn too_many_match_pattern_ids() -> BuildError { - BuildError { kind: BuildErrorKind::TooManyMatchPatternIDs } - } - - pub(crate) fn dfa_exceeded_size_limit(limit: usize) -> BuildError { - BuildError { kind: BuildErrorKind::DFAExceededSizeLimit { limit } } - } - - pub(crate) fn determinize_exceeded_size_limit(limit: usize) -> BuildError { - BuildError { - kind: BuildErrorKind::DeterminizeExceededSizeLimit { limit }, - } - } -} - -#[cfg(all(feature = "std", feature = "dfa-build"))] -impl std::error::Error for BuildError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self.kind() { - BuildErrorKind::NFA(ref err) => Some(err), - _ => None, - } - } -} - -#[cfg(feature = "dfa-build")] -impl core::fmt::Display for BuildError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self.kind() { - BuildErrorKind::NFA(_) => write!(f, "error building NFA"), - BuildErrorKind::Unsupported(ref msg) => { - write!(f, "unsupported regex feature for DFAs: {msg}") - } - BuildErrorKind::TooManyStates => write!( - f, - "number of DFA states exceeds limit of {}", - StateID::LIMIT, - ), - BuildErrorKind::TooManyStartStates => { - let stride = Start::len(); - // The start table has `stride` entries for starting states for - // the entire DFA, and then `stride` entries for each pattern - // if start states for each pattern are enabled (which is the - // only way this error can occur). Thus, the total number of - // patterns that can fit in the table is `stride` less than - // what we can allocate. - let max = usize::try_from(core::isize::MAX).unwrap(); - let limit = (max - stride) / stride; - write!( - f, - "compiling DFA with start states exceeds pattern \ - pattern limit of {}", - limit, - ) - } - BuildErrorKind::TooManyMatchPatternIDs => write!( - f, - "compiling DFA with total patterns in all match states \ - exceeds limit of {}", - PatternID::LIMIT, - ), - BuildErrorKind::DFAExceededSizeLimit { limit } => write!( - f, - "DFA exceeded size limit of {limit:?} during determinization", - ), - BuildErrorKind::DeterminizeExceededSizeLimit { limit } => { - write!(f, "determinization exceeded size limit of {limit:?}") - } - } - } -} - -#[cfg(all(test, feature = "syntax", feature = "dfa-build"))] -mod tests { - use crate::{Input, MatchError}; - - use super::*; - - #[test] - fn errors_with_unicode_word_boundary() { - let pattern = r"\b"; - assert!(Builder::new().build(pattern).is_err()); - } - - #[test] - fn roundtrip_never_match() { - let dfa = DFA::never_match().unwrap(); - let (buf, _) = dfa.to_bytes_native_endian(); - let dfa: DFA<&[u32]> = DFA::from_bytes(&buf).unwrap().0; - - assert_eq!(None, dfa.try_search_fwd(&Input::new("foo12345")).unwrap()); - } - - #[test] - fn roundtrip_always_match() { - use crate::HalfMatch; - - let dfa = DFA::always_match().unwrap(); - let (buf, _) = dfa.to_bytes_native_endian(); - let dfa: DFA<&[u32]> = DFA::from_bytes(&buf).unwrap().0; - - assert_eq!( - Some(HalfMatch::must(0, 0)), - dfa.try_search_fwd(&Input::new("foo12345")).unwrap() - ); - } - - // See the analogous test in src/hybrid/dfa.rs. - #[test] - fn heuristic_unicode_reverse() { - let dfa = DFA::builder() - .configure(DFA::config().unicode_word_boundary(true)) - .thompson(thompson::Config::new().reverse(true)) - .build(r"\b[0-9]+\b") - .unwrap(); - - let input = Input::new("β123").range(2..); - let expected = MatchError::quit(0xB2, 1); - let got = dfa.try_search_rev(&input); - assert_eq!(Err(expected), got); - - let input = Input::new("123β").range(..3); - let expected = MatchError::quit(0xCE, 3); - let got = dfa.try_search_rev(&input); - assert_eq!(Err(expected), got); - } - - // This panics in `TransitionTable::validate` if the match states are not - // validated first. - // - // See: https://github.com/rust-lang/regex/pull/1295 - #[test] - fn regression_validation_order() { - let mut dfa = DFA::new("abc").unwrap(); - dfa.ms = MatchStates { - slices: vec![], - pattern_ids: vec![], - pattern_len: 1, - }; - let (buf, _) = dfa.to_bytes_native_endian(); - DFA::from_bytes(&buf).unwrap_err(); - } -} diff --git a/vendor/regex-automata/src/dfa/determinize.rs b/vendor/regex-automata/src/dfa/determinize.rs deleted file mode 100644 index d53815cbde85cd..00000000000000 --- a/vendor/regex-automata/src/dfa/determinize.rs +++ /dev/null @@ -1,599 +0,0 @@ -use alloc::{collections::BTreeMap, vec::Vec}; - -use crate::{ - dfa::{ - dense::{self, BuildError}, - DEAD, - }, - nfa::thompson, - util::{ - self, - alphabet::{self, ByteSet}, - determinize::{State, StateBuilderEmpty, StateBuilderNFA}, - primitives::{PatternID, StateID}, - search::{Anchored, MatchKind}, - sparse_set::SparseSets, - start::Start, - }, -}; - -/// A builder for configuring and running a DFA determinizer. -#[derive(Clone, Debug)] -pub(crate) struct Config { - match_kind: MatchKind, - quit: ByteSet, - dfa_size_limit: Option, - determinize_size_limit: Option, -} - -impl Config { - /// Create a new default config for a determinizer. The determinizer may be - /// configured before calling `run`. - pub fn new() -> Config { - Config { - match_kind: MatchKind::LeftmostFirst, - quit: ByteSet::empty(), - dfa_size_limit: None, - determinize_size_limit: None, - } - } - - /// Run determinization on the given NFA and write the resulting DFA into - /// the one given. The DFA given should be initialized but otherwise empty. - /// "Initialized" means that it is setup to handle the NFA's byte classes, - /// number of patterns and whether to build start states for each pattern. - pub fn run( - &self, - nfa: &thompson::NFA, - dfa: &mut dense::OwnedDFA, - ) -> Result<(), BuildError> { - let dead = State::dead(); - let quit = State::dead(); - let mut cache = StateMap::default(); - // We only insert the dead state here since its representation is - // identical to the quit state. And we never want anything pointing - // to the quit state other than specific transitions derived from the - // determinizer's configured "quit" bytes. - // - // We do put the quit state into 'builder_states' below. This ensures - // that a proper DFA state ID is allocated for it, and that no other - // DFA state uses the "location after the DEAD state." That is, it - // is assumed that the quit state is always the state immediately - // following the DEAD state. - cache.insert(dead.clone(), DEAD); - - let runner = Runner { - config: self.clone(), - nfa, - dfa, - builder_states: alloc::vec![dead, quit], - cache, - memory_usage_state: 0, - sparses: SparseSets::new(nfa.states().len()), - stack: alloc::vec![], - scratch_state_builder: StateBuilderEmpty::new(), - }; - runner.run() - } - - /// The match semantics to use for determinization. - /// - /// MatchKind::All corresponds to the standard textbook construction. - /// All possible match states are represented in the DFA. - /// MatchKind::LeftmostFirst permits greediness and otherwise tries to - /// simulate the match semantics of backtracking regex engines. Namely, - /// only a subset of match states are built, and dead states are used to - /// stop searches with an unanchored prefix. - /// - /// The default is MatchKind::LeftmostFirst. - pub fn match_kind(&mut self, kind: MatchKind) -> &mut Config { - self.match_kind = kind; - self - } - - /// The set of bytes to use that will cause the DFA to enter a quit state, - /// stop searching and return an error. By default, this is empty. - pub fn quit(&mut self, set: ByteSet) -> &mut Config { - self.quit = set; - self - } - - /// The limit, in bytes of the heap, that the DFA is permitted to use. This - /// does not include the auxiliary heap storage used by determinization. - pub fn dfa_size_limit(&mut self, bytes: Option) -> &mut Config { - self.dfa_size_limit = bytes; - self - } - - /// The limit, in bytes of the heap, that determinization itself is allowed - /// to use. This does not include the size of the DFA being built. - pub fn determinize_size_limit( - &mut self, - bytes: Option, - ) -> &mut Config { - self.determinize_size_limit = bytes; - self - } -} - -/// The actual implementation of determinization that converts an NFA to a DFA -/// through powerset construction. -/// -/// This determinizer roughly follows the typical powerset construction, where -/// each DFA state is comprised of one or more NFA states. In the worst case, -/// there is one DFA state for every possible combination of NFA states. In -/// practice, this only happens in certain conditions, typically when there are -/// bounded repetitions. -/// -/// The main differences between this implementation and typical deteminization -/// are that this implementation delays matches by one state and hackily makes -/// look-around work. Comments below attempt to explain this. -/// -/// The lifetime variable `'a` refers to the lifetime of the NFA or DFA, -/// whichever is shorter. -#[derive(Debug)] -struct Runner<'a> { - /// The configuration used to initialize determinization. - config: Config, - /// The NFA we're converting into a DFA. - nfa: &'a thompson::NFA, - /// The DFA we're building. - dfa: &'a mut dense::OwnedDFA, - /// Each DFA state being built is defined as an *ordered* set of NFA - /// states, along with some meta facts about the ordered set of NFA states. - /// - /// This is never empty. The first state is always a dummy state such that - /// a state id == 0 corresponds to a dead state. The second state is always - /// the quit state. - /// - /// Why do we have states in both a `Vec` and in a cache map below? - /// Well, they serve two different roles based on access patterns. - /// `builder_states` is the canonical home of each state, and provides - /// constant random access by a DFA state's ID. The cache map below, on - /// the other hand, provides a quick way of searching for identical DFA - /// states by using the DFA state as a key in the map. Of course, we use - /// reference counting to avoid actually duplicating the state's data - /// itself. (Although this has never been benchmarked.) Note that the cache - /// map does not give us full minimization; it just lets us avoid some very - /// obvious redundant states. - /// - /// Note that the index into this Vec isn't quite the DFA's state ID. - /// Rather, it's just an index. To get the state ID, you have to multiply - /// it by the DFA's stride. That's done by self.dfa.from_index. And the - /// inverse is self.dfa.to_index. - /// - /// Moreover, DFA states don't usually retain the IDs assigned to them - /// by their position in this Vec. After determinization completes, - /// states are shuffled around to support other optimizations. See the - /// sibling 'special' module for more details on that. (The reason for - /// mentioning this is that if you print out the DFA for debugging during - /// determinization, and then print out the final DFA after it is fully - /// built, then the state IDs likely won't match up.) - builder_states: Vec, - /// A cache of DFA states that already exist and can be easily looked up - /// via ordered sets of NFA states. - /// - /// See `builder_states` docs for why we store states in two different - /// ways. - cache: StateMap, - /// The memory usage, in bytes, used by builder_states and cache. We track - /// this as new states are added since states use a variable amount of - /// heap. Tracking this as we add states makes it possible to compute the - /// total amount of memory used by the determinizer in constant time. - memory_usage_state: usize, - /// A pair of sparse sets for tracking ordered sets of NFA state IDs. - /// These are reused throughout determinization. A bounded sparse set - /// gives us constant time insertion, membership testing and clearing. - sparses: SparseSets, - /// Scratch space for a stack of NFA states to visit, for depth first - /// visiting without recursion. - stack: Vec, - /// Scratch space for storing an ordered sequence of NFA states, for - /// amortizing allocation. This is principally useful for when we avoid - /// adding a new DFA state since it already exists. In order to detect this - /// case though, we still need an ordered set of NFA state IDs. So we use - /// this space to stage that ordered set before we know whether we need to - /// create a new DFA state or not. - scratch_state_builder: StateBuilderEmpty, -} - -/// A map from states to state identifiers. When using std, we use a standard -/// hashmap, since it's a bit faster for this use case. (Other maps, like -/// one's based on FNV, have not yet been benchmarked.) -/// -/// The main purpose of this map is to reuse states where possible. This won't -/// fully minimize the DFA, but it works well in a lot of cases. -#[cfg(feature = "std")] -type StateMap = std::collections::HashMap; -#[cfg(not(feature = "std"))] -type StateMap = BTreeMap; - -impl<'a> Runner<'a> { - /// Build the DFA. If there was a problem constructing the DFA (e.g., if - /// the chosen state identifier representation is too small), then an error - /// is returned. - fn run(mut self) -> Result<(), BuildError> { - if self.nfa.look_set_any().contains_word_unicode() - && !self.config.quit.contains_range(0x80, 0xFF) - { - return Err(BuildError::unsupported_dfa_word_boundary_unicode()); - } - - // A sequence of "representative" bytes drawn from each equivalence - // class. These representative bytes are fed to the NFA to compute - // state transitions. This allows us to avoid re-computing state - // transitions for bytes that are guaranteed to produce identical - // results. Since computing the representatives needs to do a little - // work, we do it once here because we'll be iterating over them a lot. - let representatives: Vec = - self.dfa.byte_classes().representatives(..).collect(); - // The set of all DFA state IDs that still need to have their - // transitions set. We start by seeding this with all starting states. - let mut uncompiled = alloc::vec![]; - self.add_all_starts(&mut uncompiled)?; - while let Some(dfa_id) = uncompiled.pop() { - for &unit in &representatives { - if unit.as_u8().map_or(false, |b| self.config.quit.contains(b)) - { - continue; - } - // In many cases, the state we transition to has already been - // computed. 'cached_state' will do the minimal amount of work - // to check this, and if it exists, immediately return an - // already existing state ID. - let (next_dfa_id, is_new) = self.cached_state(dfa_id, unit)?; - self.dfa.set_transition(dfa_id, unit, next_dfa_id); - // If the state ID we got back is newly created, then we need - // to compile it, so add it to our uncompiled frontier. - if is_new { - uncompiled.push(next_dfa_id); - } - } - } - debug!( - "determinization complete, memory usage: {}, \ - dense DFA size: {}, \ - is reverse? {}", - self.memory_usage(), - self.dfa.memory_usage(), - self.nfa.is_reverse(), - ); - - // A map from DFA state ID to one or more NFA match IDs. Each NFA match - // ID corresponds to a distinct regex pattern that matches in the state - // corresponding to the key. - let mut matches: BTreeMap> = BTreeMap::new(); - self.cache.clear(); - #[cfg(feature = "logging")] - let mut total_pat_len = 0; - for (i, state) in self.builder_states.into_iter().enumerate() { - if let Some(pat_ids) = state.match_pattern_ids() { - let id = self.dfa.to_state_id(i); - log! { - total_pat_len += pat_ids.len(); - } - matches.insert(id, pat_ids); - } - } - log! { - use core::mem::size_of; - let per_elem = size_of::() + size_of::>(); - let pats = total_pat_len * size_of::(); - let mem = (matches.len() * per_elem) + pats; - log::debug!("matches map built, memory usage: {mem}"); - } - // At this point, we shuffle the "special" states in the final DFA. - // This permits a DFA's match loop to detect a match condition (among - // other things) by merely inspecting the current state's identifier, - // and avoids the need for any additional auxiliary storage. - self.dfa.shuffle(matches)?; - Ok(()) - } - - /// Return the identifier for the next DFA state given an existing DFA - /// state and an input byte. If the next DFA state already exists, then - /// return its identifier from the cache. Otherwise, build the state, cache - /// it and return its identifier. - /// - /// This routine returns a boolean indicating whether a new state was - /// built. If a new state is built, then the caller needs to add it to its - /// frontier of uncompiled DFA states to compute transitions for. - fn cached_state( - &mut self, - dfa_id: StateID, - unit: alphabet::Unit, - ) -> Result<(StateID, bool), BuildError> { - // Compute the set of all reachable NFA states, including epsilons. - let empty_builder = self.get_state_builder(); - let builder = util::determinize::next( - self.nfa, - self.config.match_kind, - &mut self.sparses, - &mut self.stack, - &self.builder_states[self.dfa.to_index(dfa_id)], - unit, - empty_builder, - ); - self.maybe_add_state(builder) - } - - /// Compute the set of DFA start states and add their identifiers in - /// 'dfa_state_ids' (no duplicates are added). - fn add_all_starts( - &mut self, - dfa_state_ids: &mut Vec, - ) -> Result<(), BuildError> { - // These should be the first states added. - assert!(dfa_state_ids.is_empty()); - // We only want to add (un)anchored starting states that is consistent - // with our DFA's configuration. Unconditionally adding both (although - // it is the default) can make DFAs quite a bit bigger. - if self.dfa.start_kind().has_unanchored() { - self.add_start_group(Anchored::No, dfa_state_ids)?; - } - if self.dfa.start_kind().has_anchored() { - self.add_start_group(Anchored::Yes, dfa_state_ids)?; - } - // I previously has an 'assert' here checking that either - // 'dfa_state_ids' was non-empty, or the NFA had zero patterns. But it - // turns out this isn't always true. For example, the NFA might have - // one or more patterns but where all such patterns are just 'fail' - // states. These will ultimately just compile down to DFA dead states, - // and since the dead state was added earlier, no new DFA states are - // added. And thus, it is valid and okay for 'dfa_state_ids' to be - // empty even if there are a non-zero number of patterns in the NFA. - - // We only need to compute anchored start states for each pattern if it - // was requested to do so. - if self.dfa.starts_for_each_pattern() { - for pid in self.nfa.patterns() { - self.add_start_group(Anchored::Pattern(pid), dfa_state_ids)?; - } - } - Ok(()) - } - - /// Add a group of start states for the given match pattern ID. Any new - /// DFA states added are pushed on to 'dfa_state_ids'. (No duplicates are - /// pushed.) - /// - /// When pattern_id is None, then this will compile a group of unanchored - /// start states (if the DFA is unanchored). When the pattern_id is - /// present, then this will compile a group of anchored start states that - /// only match the given pattern. - /// - /// This panics if `anchored` corresponds to an invalid pattern ID. - fn add_start_group( - &mut self, - anchored: Anchored, - dfa_state_ids: &mut Vec, - ) -> Result<(), BuildError> { - let nfa_start = match anchored { - Anchored::No => self.nfa.start_unanchored(), - Anchored::Yes => self.nfa.start_anchored(), - Anchored::Pattern(pid) => { - self.nfa.start_pattern(pid).expect("valid pattern ID") - } - }; - - // When compiling start states, we're careful not to build additional - // states that aren't necessary. For example, if the NFA has no word - // boundary assertion, then there's no reason to have distinct start - // states for 'NonWordByte' and 'WordByte' starting configurations. - // Instead, the 'WordByte' starting configuration can just point - // directly to the start state for the 'NonWordByte' config. - // - // Note though that we only need to care about assertions in the prefix - // of an NFA since this only concerns the starting states. (Actually, - // the most precisely thing we could do it is look at the prefix - // assertions of each pattern when 'anchored == Anchored::Pattern', - // and then only compile extra states if the prefix is non-empty.) But - // we settle for simplicity here instead of absolute minimalism. It is - // somewhat rare, after all, for multiple patterns in the same regex to - // have different prefix look-arounds. - - let (id, is_new) = - self.add_one_start(nfa_start, Start::NonWordByte)?; - self.dfa.set_start_state(anchored, Start::NonWordByte, id); - if is_new { - dfa_state_ids.push(id); - } - - if !self.nfa.look_set_prefix_any().contains_word() { - self.dfa.set_start_state(anchored, Start::WordByte, id); - } else { - let (id, is_new) = - self.add_one_start(nfa_start, Start::WordByte)?; - self.dfa.set_start_state(anchored, Start::WordByte, id); - if is_new { - dfa_state_ids.push(id); - } - } - if !self.nfa.look_set_prefix_any().contains_anchor() { - self.dfa.set_start_state(anchored, Start::Text, id); - self.dfa.set_start_state(anchored, Start::LineLF, id); - self.dfa.set_start_state(anchored, Start::LineCR, id); - self.dfa.set_start_state( - anchored, - Start::CustomLineTerminator, - id, - ); - } else { - let (id, is_new) = self.add_one_start(nfa_start, Start::Text)?; - self.dfa.set_start_state(anchored, Start::Text, id); - if is_new { - dfa_state_ids.push(id); - } - - let (id, is_new) = self.add_one_start(nfa_start, Start::LineLF)?; - self.dfa.set_start_state(anchored, Start::LineLF, id); - if is_new { - dfa_state_ids.push(id); - } - - let (id, is_new) = self.add_one_start(nfa_start, Start::LineCR)?; - self.dfa.set_start_state(anchored, Start::LineCR, id); - if is_new { - dfa_state_ids.push(id); - } - - let (id, is_new) = - self.add_one_start(nfa_start, Start::CustomLineTerminator)?; - self.dfa.set_start_state( - anchored, - Start::CustomLineTerminator, - id, - ); - if is_new { - dfa_state_ids.push(id); - } - } - - Ok(()) - } - - /// Add a new DFA start state corresponding to the given starting NFA - /// state, and the starting search configuration. (The starting search - /// configuration essentially tells us which look-behind assertions are - /// true for this particular state.) - /// - /// The boolean returned indicates whether the state ID returned is a newly - /// created state, or a previously cached state. - fn add_one_start( - &mut self, - nfa_start: StateID, - start: Start, - ) -> Result<(StateID, bool), BuildError> { - // Compute the look-behind assertions that are true in this starting - // configuration, and the determine the epsilon closure. While - // computing the epsilon closure, we only follow conditional epsilon - // transitions that satisfy the look-behind assertions in 'look_have'. - let mut builder_matches = self.get_state_builder().into_matches(); - util::determinize::set_lookbehind_from_start( - self.nfa, - &start, - &mut builder_matches, - ); - self.sparses.set1.clear(); - util::determinize::epsilon_closure( - self.nfa, - nfa_start, - builder_matches.look_have(), - &mut self.stack, - &mut self.sparses.set1, - ); - let mut builder = builder_matches.into_nfa(); - util::determinize::add_nfa_states( - &self.nfa, - &self.sparses.set1, - &mut builder, - ); - self.maybe_add_state(builder) - } - - /// Adds the given state to the DFA being built depending on whether it - /// already exists in this determinizer's cache. - /// - /// If it does exist, then the memory used by 'state' is put back into the - /// determinizer and the previously created state's ID is returned. (Along - /// with 'false', indicating that no new state was added.) - /// - /// If it does not exist, then the state is added to the DFA being built - /// and a fresh ID is allocated (if ID allocation fails, then an error is - /// returned) and returned. (Along with 'true', indicating that a new state - /// was added.) - fn maybe_add_state( - &mut self, - builder: StateBuilderNFA, - ) -> Result<(StateID, bool), BuildError> { - if let Some(&cached_id) = self.cache.get(builder.as_bytes()) { - // Since we have a cached state, put the constructed state's - // memory back into our scratch space, so that it can be reused. - self.put_state_builder(builder); - return Ok((cached_id, false)); - } - self.add_state(builder).map(|sid| (sid, true)) - } - - /// Add the given state to the DFA and make it available in the cache. - /// - /// The state initially has no transitions. That is, it transitions to the - /// dead state for all possible inputs, and transitions to the quit state - /// for all quit bytes. - /// - /// If adding the state would exceed the maximum value for StateID, then an - /// error is returned. - fn add_state( - &mut self, - builder: StateBuilderNFA, - ) -> Result { - let id = self.dfa.add_empty_state()?; - if !self.config.quit.is_empty() { - for b in self.config.quit.iter() { - self.dfa.set_transition( - id, - alphabet::Unit::u8(b), - self.dfa.quit_id(), - ); - } - } - let state = builder.to_state(); - // States use reference counting internally, so we only need to count - // their memory usage once. - self.memory_usage_state += state.memory_usage(); - self.builder_states.push(state.clone()); - self.cache.insert(state, id); - self.put_state_builder(builder); - if let Some(limit) = self.config.dfa_size_limit { - if self.dfa.memory_usage() > limit { - return Err(BuildError::dfa_exceeded_size_limit(limit)); - } - } - if let Some(limit) = self.config.determinize_size_limit { - if self.memory_usage() > limit { - return Err(BuildError::determinize_exceeded_size_limit( - limit, - )); - } - } - Ok(id) - } - - /// Returns a state builder from this determinizer that might have existing - /// capacity. This helps avoid allocs in cases where a state is built that - /// turns out to already be cached. - /// - /// Callers must put the state builder back with 'put_state_builder', - /// otherwise the allocation reuse won't work. - fn get_state_builder(&mut self) -> StateBuilderEmpty { - core::mem::replace( - &mut self.scratch_state_builder, - StateBuilderEmpty::new(), - ) - } - - /// Puts the given state builder back into this determinizer for reuse. - /// - /// Note that building a 'State' from a builder always creates a new - /// alloc, so callers should always put the builder back. - fn put_state_builder(&mut self, builder: StateBuilderNFA) { - let _ = core::mem::replace( - &mut self.scratch_state_builder, - builder.clear(), - ); - } - - /// Return the memory usage, in bytes, of this determinizer at the current - /// point in time. This does not include memory used by the NFA or the - /// dense DFA itself. - fn memory_usage(&self) -> usize { - use core::mem::size_of; - - self.builder_states.len() * size_of::() - // Maps likely use more memory than this, but it's probably close. - + self.cache.len() * (size_of::() + size_of::()) - + self.memory_usage_state - + self.stack.capacity() * size_of::() - + self.scratch_state_builder.capacity() - } -} diff --git a/vendor/regex-automata/src/dfa/minimize.rs b/vendor/regex-automata/src/dfa/minimize.rs deleted file mode 100644 index fea925bdc6cf72..00000000000000 --- a/vendor/regex-automata/src/dfa/minimize.rs +++ /dev/null @@ -1,463 +0,0 @@ -use core::{cell::RefCell, fmt, mem}; - -use alloc::{collections::BTreeMap, rc::Rc, vec, vec::Vec}; - -use crate::{ - dfa::{automaton::Automaton, dense, DEAD}, - util::{ - alphabet, - primitives::{PatternID, StateID}, - }, -}; - -/// An implementation of Hopcroft's algorithm for minimizing DFAs. -/// -/// The algorithm implemented here is mostly taken from Wikipedia: -/// https://en.wikipedia.org/wiki/DFA_minimization#Hopcroft's_algorithm -/// -/// This code has had some light optimization attention paid to it, -/// particularly in the form of reducing allocation as much as possible. -/// However, it is still generally slow. Future optimization work should -/// probably focus on the bigger picture rather than micro-optimizations. For -/// example: -/// -/// 1. Figure out how to more intelligently create initial partitions. That is, -/// Hopcroft's algorithm starts by creating two partitions of DFA states -/// that are known to NOT be equivalent: match states and non-match states. -/// The algorithm proceeds by progressively refining these partitions into -/// smaller partitions. If we could start with more partitions, then we -/// could reduce the amount of work that Hopcroft's algorithm needs to do. -/// 2. For every partition that we visit, we find all incoming transitions to -/// every state in the partition for *every* element in the alphabet. (This -/// is why using byte classes can significantly decrease minimization times, -/// since byte classes shrink the alphabet.) This is quite costly and there -/// is perhaps some redundant work being performed depending on the specific -/// states in the set. For example, we might be able to only visit some -/// elements of the alphabet based on the transitions. -/// 3. Move parts of minimization into determinization. If minimization has -/// fewer states to deal with, then it should run faster. A prime example -/// of this might be large Unicode classes, which are generated in way that -/// can create a lot of redundant states. (Some work has been done on this -/// point during NFA compilation via the algorithm described in the -/// "Incremental Construction of MinimalAcyclic Finite-State Automata" -/// paper.) -pub(crate) struct Minimizer<'a> { - dfa: &'a mut dense::OwnedDFA, - in_transitions: Vec>>, - partitions: Vec, - waiting: Vec, -} - -impl<'a> fmt::Debug for Minimizer<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Minimizer") - .field("dfa", &self.dfa) - .field("in_transitions", &self.in_transitions) - .field("partitions", &self.partitions) - .field("waiting", &self.waiting) - .finish() - } -} - -/// A set of states. A state set makes up a single partition in Hopcroft's -/// algorithm. -/// -/// It is represented by an ordered set of state identifiers. We use shared -/// ownership so that a single state set can be in both the set of partitions -/// and in the set of waiting sets simultaneously without an additional -/// allocation. Generally, once a state set is built, it becomes immutable. -/// -/// We use this representation because it avoids the overhead of more -/// traditional set data structures (HashSet/BTreeSet), and also because -/// computing intersection/subtraction on this representation is especially -/// fast. -#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord)] -struct StateSet { - ids: Rc>>, -} - -impl<'a> Minimizer<'a> { - pub fn new(dfa: &'a mut dense::OwnedDFA) -> Minimizer<'a> { - let in_transitions = Minimizer::incoming_transitions(dfa); - let partitions = Minimizer::initial_partitions(dfa); - let waiting = partitions.clone(); - Minimizer { dfa, in_transitions, partitions, waiting } - } - - pub fn run(mut self) { - let stride2 = self.dfa.stride2(); - let as_state_id = |index: usize| -> StateID { - StateID::new(index << stride2).unwrap() - }; - let as_index = |id: StateID| -> usize { id.as_usize() >> stride2 }; - - let mut incoming = StateSet::empty(); - let mut scratch1 = StateSet::empty(); - let mut scratch2 = StateSet::empty(); - let mut newparts = vec![]; - - // This loop is basically Hopcroft's algorithm. Everything else is just - // shuffling data around to fit our representation. - while let Some(set) = self.waiting.pop() { - for b in self.dfa.byte_classes().iter() { - self.find_incoming_to(b, &set, &mut incoming); - // If incoming is empty, then the intersection with any other - // set must also be empty. So 'newparts' just ends up being - // 'self.partitions'. So there's no need to go through the loop - // below. - // - // This actually turns out to be rather large optimization. On - // the order of making minimization 4-5x faster. It's likely - // that the vast majority of all states have very few incoming - // transitions. - if incoming.is_empty() { - continue; - } - - for p in 0..self.partitions.len() { - self.partitions[p].intersection(&incoming, &mut scratch1); - if scratch1.is_empty() { - newparts.push(self.partitions[p].clone()); - continue; - } - - self.partitions[p].subtract(&incoming, &mut scratch2); - if scratch2.is_empty() { - newparts.push(self.partitions[p].clone()); - continue; - } - - let (x, y) = - (scratch1.deep_clone(), scratch2.deep_clone()); - newparts.push(x.clone()); - newparts.push(y.clone()); - match self.find_waiting(&self.partitions[p]) { - Some(i) => { - self.waiting[i] = x; - self.waiting.push(y); - } - None => { - if x.len() <= y.len() { - self.waiting.push(x); - } else { - self.waiting.push(y); - } - } - } - } - newparts = mem::replace(&mut self.partitions, newparts); - newparts.clear(); - } - } - - // At this point, we now have a minimal partitioning of states, where - // each partition is an equivalence class of DFA states. Now we need to - // use this partitioning to update the DFA to only contain one state for - // each partition. - - // Create a map from DFA state ID to the representative ID of the - // equivalence class to which it belongs. The representative ID of an - // equivalence class of states is the minimum ID in that class. - let mut state_to_part = vec![DEAD; self.dfa.state_len()]; - for p in &self.partitions { - p.iter(|id| state_to_part[as_index(id)] = p.min()); - } - - // Generate a new contiguous sequence of IDs for minimal states, and - // create a map from equivalence IDs to the new IDs. Thus, the new - // minimal ID of *any* state in the unminimized DFA can be obtained - // with minimals_ids[state_to_part[old_id]]. - let mut minimal_ids = vec![DEAD; self.dfa.state_len()]; - let mut new_index = 0; - for state in self.dfa.states() { - if state_to_part[as_index(state.id())] == state.id() { - minimal_ids[as_index(state.id())] = as_state_id(new_index); - new_index += 1; - } - } - // The total number of states in the minimal DFA. - let minimal_count = new_index; - // Convenience function for remapping state IDs. This takes an old ID, - // looks up its Hopcroft partition and then maps that to the new ID - // range. - let remap = |old| minimal_ids[as_index(state_to_part[as_index(old)])]; - - // Re-map this DFA in place such that the only states remaining - // correspond to the representative states of every equivalence class. - for id in (0..self.dfa.state_len()).map(as_state_id) { - // If this state isn't a representative for an equivalence class, - // then we skip it since it won't appear in the minimal DFA. - if state_to_part[as_index(id)] != id { - continue; - } - self.dfa.remap_state(id, remap); - self.dfa.swap_states(id, minimal_ids[as_index(id)]); - } - // Trim off all unused states from the pre-minimized DFA. This - // represents all states that were merged into a non-singleton - // equivalence class of states, and appeared after the first state - // in each such class. (Because the state with the smallest ID in each - // equivalence class is its representative ID.) - self.dfa.truncate_states(minimal_count); - - // Update the new start states, which is now just the minimal ID of - // whatever state the old start state was collapsed into. Also, we - // collect everything before-hand to work around the borrow checker. - // We're already allocating so much that this is probably fine. If this - // turns out to be costly, then I guess add a `starts_mut` iterator. - let starts: Vec<_> = self.dfa.starts().collect(); - for (old_start_id, anchored, start_type) in starts { - self.dfa.set_start_state( - anchored, - start_type, - remap(old_start_id), - ); - } - - // Update the match state pattern ID list for multi-regexes. All we - // need to do is remap the match state IDs. The pattern ID lists are - // always the same as they were since match states with distinct - // pattern ID lists are always considered distinct states. - let mut pmap = BTreeMap::new(); - for (match_id, pattern_ids) in self.dfa.pattern_map() { - let new_id = remap(match_id); - pmap.insert(new_id, pattern_ids); - } - // This unwrap is OK because minimization never increases the number of - // match states or patterns in those match states. Since minimization - // runs after the pattern map has already been set at least once, we - // know that our match states cannot error. - self.dfa.set_pattern_map(&pmap).unwrap(); - - // In order to update the ID of the maximum match state, we need to - // find the maximum ID among all of the match states in the minimized - // DFA. This is not necessarily the new ID of the unminimized maximum - // match state, since that could have been collapsed with a much - // earlier match state. Therefore, to find the new max match state, - // we iterate over all previous match states, find their corresponding - // new minimal ID, and take the maximum of those. - let old = self.dfa.special().clone(); - let new = self.dfa.special_mut(); - // ... but only remap if we had match states. - if old.matches() { - new.min_match = StateID::MAX; - new.max_match = StateID::ZERO; - for i in as_index(old.min_match)..=as_index(old.max_match) { - let new_id = remap(as_state_id(i)); - if new_id < new.min_match { - new.min_match = new_id; - } - if new_id > new.max_match { - new.max_match = new_id; - } - } - } - // ... same, but for start states. - if old.starts() { - new.min_start = StateID::MAX; - new.max_start = StateID::ZERO; - for i in as_index(old.min_start)..=as_index(old.max_start) { - let new_id = remap(as_state_id(i)); - if new_id == DEAD { - continue; - } - if new_id < new.min_start { - new.min_start = new_id; - } - if new_id > new.max_start { - new.max_start = new_id; - } - } - if new.max_start == DEAD { - new.min_start = DEAD; - } - } - new.quit_id = remap(new.quit_id); - new.set_max(); - } - - fn find_waiting(&self, set: &StateSet) -> Option { - self.waiting.iter().position(|s| s == set) - } - - fn find_incoming_to( - &self, - b: alphabet::Unit, - set: &StateSet, - incoming: &mut StateSet, - ) { - incoming.clear(); - set.iter(|id| { - for &inid in - &self.in_transitions[self.dfa.to_index(id)][b.as_usize()] - { - incoming.add(inid); - } - }); - incoming.canonicalize(); - } - - fn initial_partitions(dfa: &dense::OwnedDFA) -> Vec { - // For match states, we know that two match states with different - // pattern ID lists will *always* be distinct, so we can partition them - // initially based on that. - let mut matching: BTreeMap, StateSet> = BTreeMap::new(); - let mut is_quit = StateSet::empty(); - let mut no_match = StateSet::empty(); - for state in dfa.states() { - if dfa.is_match_state(state.id()) { - let mut pids = vec![]; - for i in 0..dfa.match_len(state.id()) { - pids.push(dfa.match_pattern(state.id(), i)); - } - matching - .entry(pids) - .or_insert(StateSet::empty()) - .add(state.id()); - } else if dfa.is_quit_state(state.id()) { - is_quit.add(state.id()); - } else { - no_match.add(state.id()); - } - } - - let mut sets: Vec = - matching.into_iter().map(|(_, set)| set).collect(); - sets.push(no_match); - sets.push(is_quit); - sets - } - - fn incoming_transitions(dfa: &dense::OwnedDFA) -> Vec>> { - let mut incoming = vec![]; - for _ in dfa.states() { - incoming.push(vec![vec![]; dfa.alphabet_len()]); - } - for state in dfa.states() { - for (b, next) in state.transitions() { - incoming[dfa.to_index(next)][b.as_usize()].push(state.id()); - } - } - incoming - } -} - -impl StateSet { - fn empty() -> StateSet { - StateSet { ids: Rc::new(RefCell::new(vec![])) } - } - - fn add(&mut self, id: StateID) { - self.ids.borrow_mut().push(id); - } - - fn min(&self) -> StateID { - self.ids.borrow()[0] - } - - fn canonicalize(&mut self) { - self.ids.borrow_mut().sort(); - self.ids.borrow_mut().dedup(); - } - - fn clear(&mut self) { - self.ids.borrow_mut().clear(); - } - - fn len(&self) -> usize { - self.ids.borrow().len() - } - - fn is_empty(&self) -> bool { - self.len() == 0 - } - - fn deep_clone(&self) -> StateSet { - let ids = self.ids.borrow().iter().cloned().collect(); - StateSet { ids: Rc::new(RefCell::new(ids)) } - } - - fn iter(&self, mut f: F) { - for &id in self.ids.borrow().iter() { - f(id); - } - } - - fn intersection(&self, other: &StateSet, dest: &mut StateSet) { - dest.clear(); - if self.is_empty() || other.is_empty() { - return; - } - - let (seta, setb) = (self.ids.borrow(), other.ids.borrow()); - let (mut ita, mut itb) = (seta.iter().cloned(), setb.iter().cloned()); - let (mut a, mut b) = (ita.next().unwrap(), itb.next().unwrap()); - loop { - if a == b { - dest.add(a); - a = match ita.next() { - None => break, - Some(a) => a, - }; - b = match itb.next() { - None => break, - Some(b) => b, - }; - } else if a < b { - a = match ita.next() { - None => break, - Some(a) => a, - }; - } else { - b = match itb.next() { - None => break, - Some(b) => b, - }; - } - } - } - - fn subtract(&self, other: &StateSet, dest: &mut StateSet) { - dest.clear(); - if self.is_empty() || other.is_empty() { - self.iter(|s| dest.add(s)); - return; - } - - let (seta, setb) = (self.ids.borrow(), other.ids.borrow()); - let (mut ita, mut itb) = (seta.iter().cloned(), setb.iter().cloned()); - let (mut a, mut b) = (ita.next().unwrap(), itb.next().unwrap()); - loop { - if a == b { - a = match ita.next() { - None => break, - Some(a) => a, - }; - b = match itb.next() { - None => { - dest.add(a); - break; - } - Some(b) => b, - }; - } else if a < b { - dest.add(a); - a = match ita.next() { - None => break, - Some(a) => a, - }; - } else { - b = match itb.next() { - None => { - dest.add(a); - break; - } - Some(b) => b, - }; - } - } - for a in ita { - dest.add(a); - } - } -} diff --git a/vendor/regex-automata/src/dfa/mod.rs b/vendor/regex-automata/src/dfa/mod.rs deleted file mode 100644 index ff718cc434790e..00000000000000 --- a/vendor/regex-automata/src/dfa/mod.rs +++ /dev/null @@ -1,360 +0,0 @@ -/*! -A module for building and searching with deterministic finite automata (DFAs). - -Like other modules in this crate, DFAs support a rich regex syntax with Unicode -features. DFAs also have extensive options for configuring the best space vs -time trade off for your use case and provides support for cheap deserialization -of automata for use in `no_std` environments. - -If you're looking for lazy DFAs that build themselves incrementally during -search, then please see the top-level [`hybrid` module](crate::hybrid). - -# Overview - -This section gives a brief overview of the primary types in this module: - -* A [`regex::Regex`] provides a way to search for matches of a regular -expression using DFAs. This includes iterating over matches with both the start -and end positions of each match. -* A [`dense::DFA`] provides low level access to a DFA that uses a dense -representation (uses lots of space, but fast searching). -* A [`sparse::DFA`] provides the same API as a `dense::DFA`, but uses a sparse -representation (uses less space, but slower searching). -* An [`Automaton`] trait that defines an interface that both dense and sparse -DFAs implement. (A `regex::Regex` is generic over this trait.) -* Both dense DFAs and sparse DFAs support serialization to raw bytes (e.g., -[`dense::DFA::to_bytes_little_endian`]) and cheap deserialization (e.g., -[`dense::DFA::from_bytes`]). - -There is also a [`onepass`] module that provides a [one-pass -DFA](onepass::DFA). The unique advantage of this DFA is that, for the class -of regexes it can be built with, it supports reporting the spans of matching -capturing groups. It is the only DFA in this crate capable of such a thing. - -# Example: basic regex searching - -This example shows how to compile a regex using the default configuration -and then use it to find matches in a byte string: - -``` -use regex_automata::{Match, dfa::regex::Regex}; - -let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; -let text = b"2018-12-24 2016-10-08"; -let matches: Vec = re.find_iter(text).collect(); -assert_eq!(matches, vec![ - Match::must(0, 0..10), - Match::must(0, 11..21), -]); -# Ok::<(), Box>(()) -``` - -# Example: searching with regex sets - -The DFAs in this module all fully support searching with multiple regexes -simultaneously. You can use this support with standard leftmost-first style -searching to find non-overlapping matches: - -``` -# if cfg!(miri) { return Ok(()); } // miri takes too long -use regex_automata::{Match, dfa::regex::Regex}; - -let re = Regex::new_many(&[r"\w+", r"\S+"])?; -let text = b"@foo bar"; -let matches: Vec = re.find_iter(text).collect(); -assert_eq!(matches, vec![ - Match::must(1, 0..4), - Match::must(0, 5..8), -]); -# Ok::<(), Box>(()) -``` - -# Example: use sparse DFAs - -By default, compiling a regex will use dense DFAs internally. This uses more -memory, but executes searches more quickly. If you can abide slower searches -(somewhere around 3-5x), then sparse DFAs might make more sense since they can -use significantly less space. - -Using sparse DFAs is as easy as using `Regex::new_sparse` instead of -`Regex::new`: - -``` -use regex_automata::{Match, dfa::regex::Regex}; - -let re = Regex::new_sparse(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap(); -let text = b"2018-12-24 2016-10-08"; -let matches: Vec = re.find_iter(text).collect(); -assert_eq!(matches, vec![ - Match::must(0, 0..10), - Match::must(0, 11..21), -]); -# Ok::<(), Box>(()) -``` - -If you already have dense DFAs for some reason, they can be converted to sparse -DFAs and used to build a new `Regex`. For example: - -``` -use regex_automata::{Match, dfa::regex::Regex}; - -let dense_re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap(); -let sparse_re = Regex::builder().build_from_dfas( - dense_re.forward().to_sparse()?, - dense_re.reverse().to_sparse()?, -); -let text = b"2018-12-24 2016-10-08"; -let matches: Vec = sparse_re.find_iter(text).collect(); -assert_eq!(matches, vec![ - Match::must(0, 0..10), - Match::must(0, 11..21), -]); -# Ok::<(), Box>(()) -``` - -# Example: deserialize a DFA - -This shows how to first serialize a DFA into raw bytes, and then deserialize -those raw bytes back into a DFA. While this particular example is a -bit contrived, this same technique can be used in your program to -deserialize a DFA at start up time or by memory mapping a file. - -``` -use regex_automata::{Match, dfa::{dense, regex::Regex}}; - -let re1 = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap(); -// serialize both the forward and reverse DFAs, see note below -let (fwd_bytes, fwd_pad) = re1.forward().to_bytes_native_endian(); -let (rev_bytes, rev_pad) = re1.reverse().to_bytes_native_endian(); -// now deserialize both---we need to specify the correct type! -let fwd: dense::DFA<&[u32]> = dense::DFA::from_bytes(&fwd_bytes[fwd_pad..])?.0; -let rev: dense::DFA<&[u32]> = dense::DFA::from_bytes(&rev_bytes[rev_pad..])?.0; -// finally, reconstruct our regex -let re2 = Regex::builder().build_from_dfas(fwd, rev); - -// we can use it like normal -let text = b"2018-12-24 2016-10-08"; -let matches: Vec = re2.find_iter(text).collect(); -assert_eq!(matches, vec![ - Match::must(0, 0..10), - Match::must(0, 11..21), -]); -# Ok::<(), Box>(()) -``` - -There are a few points worth noting here: - -* We need to extract the raw DFAs used by the regex and serialize those. You -can build the DFAs manually yourself using [`dense::Builder`], but using -the DFAs from a `Regex` guarantees that the DFAs are built correctly. (In -particular, a `Regex` constructs a reverse DFA for finding the starting -location of matches.) -* To convert the DFA to raw bytes, we use the `to_bytes_native_endian` method. -In practice, you'll want to use either [`dense::DFA::to_bytes_little_endian`] -or [`dense::DFA::to_bytes_big_endian`], depending on which platform you're -deserializing your DFA from. If you intend to deserialize on either platform, -then you'll need to serialize both and deserialize the right one depending on -your target's endianness. -* Safely deserializing a DFA requires verifying the raw bytes, particularly if -they are untrusted, since an invalid DFA could cause logical errors, panics -or even undefined behavior. This verification step requires visiting all of -the transitions in the DFA, which can be costly. If cheaper verification is -desired, then [`dense::DFA::from_bytes_unchecked`] is available that only does -verification that can be performed in constant time. However, one can only use -this routine if the caller can guarantee that the bytes provided encoded a -valid DFA. - -The same process can be achieved with sparse DFAs as well: - -``` -use regex_automata::{Match, dfa::{sparse, regex::Regex}}; - -let re1 = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap(); -// serialize both -let fwd_bytes = re1.forward().to_sparse()?.to_bytes_native_endian(); -let rev_bytes = re1.reverse().to_sparse()?.to_bytes_native_endian(); -// now deserialize both---we need to specify the correct type! -let fwd: sparse::DFA<&[u8]> = sparse::DFA::from_bytes(&fwd_bytes)?.0; -let rev: sparse::DFA<&[u8]> = sparse::DFA::from_bytes(&rev_bytes)?.0; -// finally, reconstruct our regex -let re2 = Regex::builder().build_from_dfas(fwd, rev); - -// we can use it like normal -let text = b"2018-12-24 2016-10-08"; -let matches: Vec = re2.find_iter(text).collect(); -assert_eq!(matches, vec![ - Match::must(0, 0..10), - Match::must(0, 11..21), -]); -# Ok::<(), Box>(()) -``` - -Note that unlike dense DFAs, sparse DFAs have no alignment requirements. -Conversely, dense DFAs must be aligned to the same alignment as a -[`StateID`](crate::util::primitives::StateID). - -# Support for `no_std` and `alloc`-only - -This crate comes with `alloc` and `std` features that are enabled by default. -When the `alloc` or `std` features are enabled, the API of this module will -include the facilities necessary for compiling, serializing, deserializing -and searching with DFAs. When only the `alloc` feature is enabled, then -implementations of the `std::error::Error` trait are dropped, but everything -else generally remains the same. When both the `alloc` and `std` features are -disabled, the API of this module will shrink such that it only includes the -facilities necessary for deserializing and searching with DFAs. - -The intended workflow for `no_std` environments is thus as follows: - -* Write a program with the `alloc` or `std` features that compiles and -serializes a regular expression. You may need to serialize both little and big -endian versions of each DFA. (So that's 4 DFAs in total for each regex.) -* In your `no_std` environment, follow the examples above for deserializing -your previously serialized DFAs into regexes. You can then search with them as -you would any regex. - -Deserialization can happen anywhere. For example, with bytes embedded into a -binary or with a file memory mapped at runtime. - -The `regex-cli` command (found in the same repository as this crate) can be -used to serialize DFAs to files and generate Rust code to read them. - -# Syntax - -This module supports the same syntax as the `regex` crate, since they share the -same parser. You can find an exhaustive list of supported syntax in the -[documentation for the `regex` crate](https://docs.rs/regex/1/regex/#syntax). - -There are two things that are not supported by the DFAs in this module: - -* Capturing groups. The DFAs (and [`Regex`](regex::Regex)es built on top -of them) can only find the offsets of an entire match, but cannot resolve -the offsets of each capturing group. This is because DFAs do not have the -expressive power necessary. -* Unicode word boundaries. These present particularly difficult challenges for -DFA construction and would result in an explosion in the number of states. -One can enable [`dense::Config::unicode_word_boundary`] though, which provides -heuristic support for Unicode word boundaries that only works on ASCII text. -Otherwise, one can use `(?-u:\b)` for an ASCII word boundary, which will work -on any input. - -There are no plans to lift either of these limitations. - -Note that these restrictions are identical to the restrictions on lazy DFAs. - -# Differences with general purpose regexes - -The main goal of the [`regex`](https://docs.rs/regex) crate is to serve as a -general purpose regular expression engine. It aims to automatically balance low -compile times, fast search times and low memory usage, while also providing -a convenient API for users. In contrast, this module provides a lower level -regular expression interface based exclusively on DFAs that is a bit less -convenient while providing more explicit control over memory usage and search -times. - -Here are some specific negative differences: - -* **Compilation can take an exponential amount of time and space** in the size -of the regex pattern. While most patterns do not exhibit worst case exponential -time, such patterns do exist. For example, `[01]*1[01]{N}` will build a DFA -with approximately `2^(N+2)` states. For this reason, untrusted patterns should -not be compiled with this module. (In the future, the API may expose an option -to return an error if the DFA gets too big.) -* This module does not support sub-match extraction via capturing groups, which -can be achieved with the regex crate's "captures" API. -* While the regex crate doesn't necessarily sport fast compilation times, -the regexes in this module are almost universally slow to compile, especially -when they contain large Unicode character classes. For example, on my system, -compiling `\w{50}` takes about 1 second and almost 15MB of memory! (Compiling -a sparse regex takes about the same time but only uses about 1.2MB of -memory.) Conversely, compiling the same regex without Unicode support, e.g., -`(?-u)\w{50}`, takes under 1 millisecond and about 15KB of memory. For this -reason, you should only use Unicode character classes if you absolutely need -them! (They are enabled by default though.) -* This module does not support Unicode word boundaries. ASCII word boundaries -may be used though by disabling Unicode or selectively doing so in the syntax, -e.g., `(?-u:\b)`. There is also an option to -[heuristically enable Unicode word boundaries](crate::dfa::dense::Config::unicode_word_boundary), -where the corresponding DFA will give up if any non-ASCII byte is seen. -* As a lower level API, this module does not do literal optimizations -automatically. Although it does provide hooks in its API to make use of the -[`Prefilter`](crate::util::prefilter::Prefilter) trait. Missing literal -optimizations means that searches may run much slower than what you're -accustomed to, although, it does provide more predictable and consistent -performance. -* There is no `&str` API like in the regex crate. In this module, all APIs -operate on `&[u8]`. By default, match indices are -guaranteed to fall on UTF-8 boundaries, unless either of -[`syntax::Config::utf8`](crate::util::syntax::Config::utf8) or -[`thompson::Config::utf8`](crate::nfa::thompson::Config::utf8) are disabled. - -With some of the downsides out of the way, here are some positive differences: - -* Both dense and sparse DFAs can be serialized to raw bytes, and then cheaply -deserialized. Deserialization can be done in constant time with the unchecked -APIs, since searching can be performed directly on the raw serialized bytes of -a DFA. -* This module was specifically designed so that the searching phase of a -DFA has minimal runtime requirements, and can therefore be used in `no_std` -environments. While `no_std` environments cannot compile regexes, they can -deserialize pre-compiled regexes. -* Since this module builds DFAs ahead of time, it will generally out-perform -the `regex` crate on equivalent tasks. The performance difference is likely -not large. However, because of a complex set of optimizations in the regex -crate (like literal optimizations), an accurate performance comparison may be -difficult to do. -* Sparse DFAs provide a way to build a DFA ahead of time that sacrifices search -performance a small amount, but uses much less storage space. Potentially even -less than what the regex crate uses. -* This module exposes DFAs directly, such as [`dense::DFA`] and -[`sparse::DFA`], which enables one to do less work in some cases. For example, -if you only need the end of a match and not the start of a match, then you can -use a DFA directly without building a `Regex`, which always requires a second -DFA to find the start of a match. -* This module provides more control over memory usage. Aside from choosing -between dense and sparse DFAs, one can also choose a smaller state identifier -representation to use less space. Also, one can enable DFA minimization -via [`dense::Config::minimize`], but it can increase compilation times -dramatically. -*/ - -#[cfg(feature = "dfa-search")] -pub use crate::dfa::{ - automaton::{Automaton, OverlappingState, StartError}, - start::StartKind, -}; - -/// This is an alias for a state ID of zero. It has special significance -/// because it always corresponds to the first state in a DFA, and the first -/// state in a DFA is always "dead." That is, the dead state always has all -/// of its transitions set to itself. Moreover, the dead state is used as a -/// sentinel for various things. e.g., In search, reaching a dead state means -/// that the search must stop. -const DEAD: crate::util::primitives::StateID = - crate::util::primitives::StateID::ZERO; - -#[cfg(feature = "dfa-search")] -pub mod dense; -#[cfg(feature = "dfa-onepass")] -pub mod onepass; -#[cfg(feature = "dfa-search")] -pub mod regex; -#[cfg(feature = "dfa-search")] -pub mod sparse; - -#[cfg(feature = "dfa-search")] -pub(crate) mod accel; -#[cfg(feature = "dfa-search")] -mod automaton; -#[cfg(feature = "dfa-build")] -mod determinize; -#[cfg(feature = "dfa-build")] -mod minimize; -#[cfg(any(feature = "dfa-build", feature = "dfa-onepass"))] -mod remapper; -#[cfg(feature = "dfa-search")] -mod search; -#[cfg(feature = "dfa-search")] -mod special; -#[cfg(feature = "dfa-search")] -mod start; diff --git a/vendor/regex-automata/src/dfa/onepass.rs b/vendor/regex-automata/src/dfa/onepass.rs deleted file mode 100644 index 85f820ef547395..00000000000000 --- a/vendor/regex-automata/src/dfa/onepass.rs +++ /dev/null @@ -1,3192 +0,0 @@ -/*! -A DFA that can return spans for matching capturing groups. - -This module is the home of a [one-pass DFA](DFA). - -This module also contains a [`Builder`] and a [`Config`] for building and -configuring a one-pass DFA. -*/ - -// A note on naming and credit: -// -// As far as I know, Russ Cox came up with the practical vision and -// implementation of a "one-pass regex engine." He mentions and describes it -// briefly in the third article of his regexp article series: -// https://swtch.com/~rsc/regexp/regexp3.html -// -// Cox's implementation is in RE2, and the implementation below is most -// heavily inspired by RE2's. The key thing they have in common is that -// their transitions are defined over an alphabet of bytes. In contrast, -// Go's regex engine also has a one-pass engine, but its transitions are -// more firmly rooted on Unicode codepoints. The ideas are the same, but the -// implementations are different. -// -// RE2 tends to call this a "one-pass NFA." Here, we call it a "one-pass DFA." -// They're both true in their own ways: -// -// * The "one-pass" criterion is generally a property of the NFA itself. In -// particular, it is said that an NFA is one-pass if, after each byte of input -// during a search, there is at most one "VM thread" remaining to take for the -// next byte of input. That is, there is never any ambiguity as to the path to -// take through the NFA during a search. -// -// * On the other hand, once a one-pass NFA has its representation converted -// to something where a constant number of instructions is used for each byte -// of input, the implementation looks a lot more like a DFA. It's technically -// more powerful than a DFA since it has side effects (storing offsets inside -// of slots activated by a transition), but it is far closer to a DFA than an -// NFA simulation. -// -// Thus, in this crate, we call it a one-pass DFA. - -use alloc::{vec, vec::Vec}; - -use crate::{ - dfa::{remapper::Remapper, DEAD}, - nfa::thompson::{self, NFA}, - util::{ - alphabet::ByteClasses, - captures::Captures, - escape::DebugByte, - int::{Usize, U32, U64, U8}, - look::{Look, LookSet, UnicodeWordBoundaryError}, - primitives::{NonMaxUsize, PatternID, StateID}, - search::{Anchored, Input, Match, MatchError, MatchKind, Span}, - sparse_set::SparseSet, - }, -}; - -/// The configuration used for building a [one-pass DFA](DFA). -/// -/// A one-pass DFA configuration is a simple data object that is typically used -/// with [`Builder::configure`]. It can be cheaply cloned. -/// -/// A default configuration can be created either with `Config::new`, or -/// perhaps more conveniently, with [`DFA::config`]. -#[derive(Clone, Debug, Default)] -pub struct Config { - match_kind: Option, - starts_for_each_pattern: Option, - byte_classes: Option, - size_limit: Option>, -} - -impl Config { - /// Return a new default one-pass DFA configuration. - pub fn new() -> Config { - Config::default() - } - - /// Set the desired match semantics. - /// - /// The default is [`MatchKind::LeftmostFirst`], which corresponds to the - /// match semantics of Perl-like regex engines. That is, when multiple - /// patterns would match at the same leftmost position, the pattern that - /// appears first in the concrete syntax is chosen. - /// - /// Currently, the only other kind of match semantics supported is - /// [`MatchKind::All`]. This corresponds to "classical DFA" construction - /// where all possible matches are visited. - /// - /// When it comes to the one-pass DFA, it is rarer for preference order and - /// "longest match" to actually disagree. Since if they did disagree, then - /// the regex typically isn't one-pass. For example, searching `Samwise` - /// for `Sam|Samwise` will report `Sam` for leftmost-first matching and - /// `Samwise` for "longest match" or "all" matching. However, this regex is - /// not one-pass if taken literally. The equivalent regex, `Sam(?:|wise)` - /// is one-pass and `Sam|Samwise` may be optimized to it. - /// - /// The other main difference is that "all" match semantics don't support - /// non-greedy matches. "All" match semantics always try to match as much - /// as possible. - pub fn match_kind(mut self, kind: MatchKind) -> Config { - self.match_kind = Some(kind); - self - } - - /// Whether to compile a separate start state for each pattern in the - /// one-pass DFA. - /// - /// When enabled, a separate **anchored** start state is added for each - /// pattern in the DFA. When this start state is used, then the DFA will - /// only search for matches for the pattern specified, even if there are - /// other patterns in the DFA. - /// - /// The main downside of this option is that it can potentially increase - /// the size of the DFA and/or increase the time it takes to build the DFA. - /// - /// You might want to enable this option when you want to both search for - /// anchored matches of any pattern or to search for anchored matches of - /// one particular pattern while using the same DFA. (Otherwise, you would - /// need to compile a new DFA for each pattern.) - /// - /// By default this is disabled. - /// - /// # Example - /// - /// This example shows how to build a multi-regex and then search for - /// matches for a any of the patterns or matches for a specific pattern. - /// - /// ``` - /// use regex_automata::{ - /// dfa::onepass::DFA, Anchored, Input, Match, PatternID, - /// }; - /// - /// let re = DFA::builder() - /// .configure(DFA::config().starts_for_each_pattern(true)) - /// .build_many(&["[a-z]+", "[0-9]+"])?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let haystack = "123abc"; - /// let input = Input::new(haystack).anchored(Anchored::Yes); - /// - /// // A normal multi-pattern search will show pattern 1 matches. - /// re.try_search(&mut cache, &input, &mut caps)?; - /// assert_eq!(Some(Match::must(1, 0..3)), caps.get_match()); - /// - /// // If we only want to report pattern 0 matches, then we'll get no - /// // match here. - /// let input = input.anchored(Anchored::Pattern(PatternID::must(0))); - /// re.try_search(&mut cache, &input, &mut caps)?; - /// assert_eq!(None, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn starts_for_each_pattern(mut self, yes: bool) -> Config { - self.starts_for_each_pattern = Some(yes); - self - } - - /// Whether to attempt to shrink the size of the DFA's alphabet or not. - /// - /// This option is enabled by default and should never be disabled unless - /// one is debugging a one-pass DFA. - /// - /// When enabled, the DFA will use a map from all possible bytes to their - /// corresponding equivalence class. Each equivalence class represents a - /// set of bytes that does not discriminate between a match and a non-match - /// in the DFA. For example, the pattern `[ab]+` has at least two - /// equivalence classes: a set containing `a` and `b` and a set containing - /// every byte except for `a` and `b`. `a` and `b` are in the same - /// equivalence class because they never discriminate between a match and a - /// non-match. - /// - /// The advantage of this map is that the size of the transition table - /// can be reduced drastically from (approximately) `#states * 256 * - /// sizeof(StateID)` to `#states * k * sizeof(StateID)` where `k` is the - /// number of equivalence classes (rounded up to the nearest power of 2). - /// As a result, total space usage can decrease substantially. Moreover, - /// since a smaller alphabet is used, DFA compilation becomes faster as - /// well. - /// - /// **WARNING:** This is only useful for debugging DFAs. Disabling this - /// does not yield any speed advantages. Namely, even when this is - /// disabled, a byte class map is still used while searching. The only - /// difference is that every byte will be forced into its own distinct - /// equivalence class. This is useful for debugging the actual generated - /// transitions because it lets one see the transitions defined on actual - /// bytes instead of the equivalence classes. - pub fn byte_classes(mut self, yes: bool) -> Config { - self.byte_classes = Some(yes); - self - } - - /// Set a size limit on the total heap used by a one-pass DFA. - /// - /// This size limit is expressed in bytes and is applied during - /// construction of a one-pass DFA. If the DFA's heap usage exceeds - /// this configured limit, then construction is stopped and an error is - /// returned. - /// - /// The default is no limit. - /// - /// # Example - /// - /// This example shows a one-pass DFA that fails to build because of - /// a configured size limit. This particular example also serves as a - /// cautionary tale demonstrating just how big DFAs with large Unicode - /// character classes can get. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{dfa::onepass::DFA, Match}; - /// - /// // 6MB isn't enough! - /// DFA::builder() - /// .configure(DFA::config().size_limit(Some(6_000_000))) - /// .build(r"\w{20}") - /// .unwrap_err(); - /// - /// // ... but 7MB probably is! - /// // (Note that DFA sizes aren't necessarily stable between releases.) - /// let re = DFA::builder() - /// .configure(DFA::config().size_limit(Some(7_000_000))) - /// .build(r"\w{20}")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let haystack = "A".repeat(20); - /// re.captures(&mut cache, &haystack, &mut caps); - /// assert_eq!(Some(Match::must(0, 0..20)), caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// While one needs a little more than 3MB to represent `\w{20}`, it - /// turns out that you only need a little more than 4KB to represent - /// `(?-u:\w{20})`. So only use Unicode if you need it! - pub fn size_limit(mut self, limit: Option) -> Config { - self.size_limit = Some(limit); - self - } - - /// Returns the match semantics set in this configuration. - pub fn get_match_kind(&self) -> MatchKind { - self.match_kind.unwrap_or(MatchKind::LeftmostFirst) - } - - /// Returns whether this configuration has enabled anchored starting states - /// for every pattern in the DFA. - pub fn get_starts_for_each_pattern(&self) -> bool { - self.starts_for_each_pattern.unwrap_or(false) - } - - /// Returns whether this configuration has enabled byte classes or not. - /// This is typically a debugging oriented option, as disabling it confers - /// no speed benefit. - pub fn get_byte_classes(&self) -> bool { - self.byte_classes.unwrap_or(true) - } - - /// Returns the DFA size limit of this configuration if one was set. - /// The size limit is total number of bytes on the heap that a DFA is - /// permitted to use. If the DFA exceeds this limit during construction, - /// then construction is stopped and an error is returned. - pub fn get_size_limit(&self) -> Option { - self.size_limit.unwrap_or(None) - } - - /// Overwrite the default configuration such that the options in `o` are - /// always used. If an option in `o` is not set, then the corresponding - /// option in `self` is used. If it's not set in `self` either, then it - /// remains not set. - pub(crate) fn overwrite(&self, o: Config) -> Config { - Config { - match_kind: o.match_kind.or(self.match_kind), - starts_for_each_pattern: o - .starts_for_each_pattern - .or(self.starts_for_each_pattern), - byte_classes: o.byte_classes.or(self.byte_classes), - size_limit: o.size_limit.or(self.size_limit), - } - } -} - -/// A builder for a [one-pass DFA](DFA). -/// -/// This builder permits configuring options for the syntax of a pattern, the -/// NFA construction and the DFA construction. This builder is different from a -/// general purpose regex builder in that it permits fine grain configuration -/// of the construction process. The trade off for this is complexity, and -/// the possibility of setting a configuration that might not make sense. For -/// example, there are two different UTF-8 modes: -/// -/// * [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) controls -/// whether the pattern itself can contain sub-expressions that match invalid -/// UTF-8. -/// * [`thompson::Config::utf8`] controls whether empty matches that split a -/// Unicode codepoint are reported or not. -/// -/// Generally speaking, callers will want to either enable all of these or -/// disable all of these. -/// -/// # Example -/// -/// This example shows how to disable UTF-8 mode in the syntax and the NFA. -/// This is generally what you want for matching on arbitrary bytes. -/// -/// ``` -/// # if cfg!(miri) { return Ok(()); } // miri takes too long -/// use regex_automata::{ -/// dfa::onepass::DFA, -/// nfa::thompson, -/// util::syntax, -/// Match, -/// }; -/// -/// let re = DFA::builder() -/// .syntax(syntax::Config::new().utf8(false)) -/// .thompson(thompson::Config::new().utf8(false)) -/// .build(r"foo(?-u:[^b])ar.*")?; -/// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); -/// -/// let haystack = b"foo\xFFarzz\xE2\x98\xFF\n"; -/// re.captures(&mut cache, haystack, &mut caps); -/// // Notice that `(?-u:[^b])` matches invalid UTF-8, -/// // but the subsequent `.*` does not! Disabling UTF-8 -/// // on the syntax permits this. -/// // -/// // N.B. This example does not show the impact of -/// // disabling UTF-8 mode on a one-pass DFA Config, -/// // since that only impacts regexes that can -/// // produce matches of length 0. -/// assert_eq!(Some(Match::must(0, 0..8)), caps.get_match()); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct Builder { - config: Config, - #[cfg(feature = "syntax")] - thompson: thompson::Compiler, -} - -impl Builder { - /// Create a new one-pass DFA builder with the default configuration. - pub fn new() -> Builder { - Builder { - config: Config::default(), - #[cfg(feature = "syntax")] - thompson: thompson::Compiler::new(), - } - } - - /// Build a one-pass DFA from the given pattern. - /// - /// If there was a problem parsing or compiling the pattern, then an error - /// is returned. - #[cfg(feature = "syntax")] - pub fn build(&self, pattern: &str) -> Result { - self.build_many(&[pattern]) - } - - /// Build a one-pass DFA from the given patterns. - /// - /// When matches are returned, the pattern ID corresponds to the index of - /// the pattern in the slice given. - #[cfg(feature = "syntax")] - pub fn build_many>( - &self, - patterns: &[P], - ) -> Result { - let nfa = - self.thompson.build_many(patterns).map_err(BuildError::nfa)?; - self.build_from_nfa(nfa) - } - - /// Build a DFA from the given NFA. - /// - /// # Example - /// - /// This example shows how to build a DFA if you already have an NFA in - /// hand. - /// - /// ``` - /// use regex_automata::{dfa::onepass::DFA, nfa::thompson::NFA, Match}; - /// - /// // This shows how to set non-default options for building an NFA. - /// let nfa = NFA::compiler() - /// .configure(NFA::config().shrink(true)) - /// .build(r"[a-z0-9]+")?; - /// let re = DFA::builder().build_from_nfa(nfa)?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// re.captures(&mut cache, "foo123bar", &mut caps); - /// assert_eq!(Some(Match::must(0, 0..9)), caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn build_from_nfa(&self, nfa: NFA) -> Result { - // Why take ownership if we're just going to pass a reference to the - // NFA to our internal builder? Well, the first thing to note is that - // an NFA uses reference counting internally, so either choice is going - // to be cheap. So there isn't much cost either way. - // - // The real reason is that a one-pass DFA, semantically, shares - // ownership of an NFA. This is unlike other DFAs that don't share - // ownership of an NFA at all, primarily because they want to be - // self-contained in order to support cheap (de)serialization. - // - // But then why pass a '&nfa' below if we want to share ownership? - // Well, it turns out that using a '&NFA' in our internal builder - // separates its lifetime from the DFA we're building, and this turns - // out to make code a bit more composable. e.g., We can iterate over - // things inside the NFA while borrowing the builder as mutable because - // we know the NFA cannot be mutated. So TL;DR --- this weirdness is - // "because borrow checker." - InternalBuilder::new(self.config.clone(), &nfa).build() - } - - /// Apply the given one-pass DFA configuration options to this builder. - pub fn configure(&mut self, config: Config) -> &mut Builder { - self.config = self.config.overwrite(config); - self - } - - /// Set the syntax configuration for this builder using - /// [`syntax::Config`](crate::util::syntax::Config). - /// - /// This permits setting things like case insensitivity, Unicode and multi - /// line mode. - /// - /// These settings only apply when constructing a one-pass DFA directly - /// from a pattern. - #[cfg(feature = "syntax")] - pub fn syntax( - &mut self, - config: crate::util::syntax::Config, - ) -> &mut Builder { - self.thompson.syntax(config); - self - } - - /// Set the Thompson NFA configuration for this builder using - /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). - /// - /// This permits setting things like whether additional time should be - /// spent shrinking the size of the NFA. - /// - /// These settings only apply when constructing a DFA directly from a - /// pattern. - #[cfg(feature = "syntax")] - pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { - self.thompson.configure(config); - self - } -} - -/// An internal builder for encapsulating the state necessary to build a -/// one-pass DFA. Typical use is just `InternalBuilder::new(..).build()`. -/// -/// There is no separate pass for determining whether the NFA is one-pass or -/// not. We just try to build the DFA. If during construction we discover that -/// it is not one-pass, we bail out. This is likely to lead to some undesirable -/// expense in some cases, so it might make sense to try an identify common -/// patterns in the NFA that make it definitively not one-pass. That way, we -/// can avoid ever trying to build a one-pass DFA in the first place. For -/// example, '\w*\s' is not one-pass, and since '\w' is Unicode-aware by -/// default, it's probably not a trivial cost to try and build a one-pass DFA -/// for it and then fail. -/// -/// Note that some (immutable) fields are duplicated here. For example, the -/// 'nfa' and 'classes' fields are both in the 'DFA'. They are the same thing, -/// but we duplicate them because it makes composition easier below. Otherwise, -/// since the borrow checker can't see through method calls, the mutable borrow -/// we use to mutate the DFA winds up preventing borrowing from any other part -/// of the DFA, even though we aren't mutating those parts. We only do this -/// because the duplication is cheap. -#[derive(Debug)] -struct InternalBuilder<'a> { - /// The DFA we're building. - dfa: DFA, - /// An unordered collection of NFA state IDs that we haven't yet tried to - /// build into a DFA state yet. - /// - /// This collection does not ultimately wind up including every NFA state - /// ID. Instead, each ID represents a "start" state for a sub-graph of the - /// NFA. The set of NFA states we then use to build a DFA state consists - /// of that "start" state and all states reachable from it via epsilon - /// transitions. - uncompiled_nfa_ids: Vec, - /// A map from NFA state ID to DFA state ID. This is useful for easily - /// determining whether an NFA state has been used as a "starting" point - /// to build a DFA state yet. If it hasn't, then it is mapped to DEAD, - /// and since DEAD is specially added and never corresponds to any NFA - /// state, it follows that a mapping to DEAD implies the NFA state has - /// no corresponding DFA state yet. - nfa_to_dfa_id: Vec, - /// A stack used to traverse the NFA states that make up a single DFA - /// state. Traversal occurs until the stack is empty, and we only push to - /// the stack when the state ID isn't in 'seen'. Actually, even more than - /// that, if we try to push something on to this stack that is already in - /// 'seen', then we bail out on construction completely, since it implies - /// that the NFA is not one-pass. - stack: Vec<(StateID, Epsilons)>, - /// The set of NFA states that we've visited via 'stack'. - seen: SparseSet, - /// Whether a match NFA state has been observed while constructing a - /// one-pass DFA state. Once a match state is seen, assuming we are using - /// leftmost-first match semantics, then we don't add any more transitions - /// to the DFA state we're building. - matched: bool, - /// The config passed to the builder. - /// - /// This is duplicated in dfa.config. - config: Config, - /// The NFA we're building a one-pass DFA from. - /// - /// This is duplicated in dfa.nfa. - nfa: &'a NFA, - /// The equivalence classes that make up the alphabet for this DFA> - /// - /// This is duplicated in dfa.classes. - classes: ByteClasses, -} - -impl<'a> InternalBuilder<'a> { - /// Create a new builder with an initial empty DFA. - fn new(config: Config, nfa: &'a NFA) -> InternalBuilder<'a> { - let classes = if !config.get_byte_classes() { - // A one-pass DFA will always use the equivalence class map, but - // enabling this option is useful for debugging. Namely, this will - // cause all transitions to be defined over their actual bytes - // instead of an opaque equivalence class identifier. The former is - // much easier to grok as a human. - ByteClasses::singletons() - } else { - nfa.byte_classes().clone() - }; - // Normally a DFA alphabet includes the EOI symbol, but we don't need - // that in the one-pass DFA since we handle look-around explicitly - // without encoding it into the DFA. Thus, we don't need to delay - // matches by 1 byte. However, we reuse the space that *would* be used - // by the EOI transition by putting match information there (like which - // pattern matches and which look-around assertions need to hold). So - // this means our real alphabet length is 1 fewer than what the byte - // classes report, since we don't use EOI. - let alphabet_len = classes.alphabet_len().checked_sub(1).unwrap(); - let stride2 = classes.stride2(); - let dfa = DFA { - config: config.clone(), - nfa: nfa.clone(), - table: vec![], - starts: vec![], - // Since one-pass DFAs have a smaller state ID max than - // StateID::MAX, it follows that StateID::MAX is a valid initial - // value for min_match_id since no state ID can ever be greater - // than it. In the case of a one-pass DFA with no match states, the - // min_match_id will keep this sentinel value. - min_match_id: StateID::MAX, - classes: classes.clone(), - alphabet_len, - stride2, - pateps_offset: alphabet_len, - // OK because PatternID::MAX*2 is guaranteed not to overflow. - explicit_slot_start: nfa.pattern_len().checked_mul(2).unwrap(), - }; - InternalBuilder { - dfa, - uncompiled_nfa_ids: vec![], - nfa_to_dfa_id: vec![DEAD; nfa.states().len()], - stack: vec![], - seen: SparseSet::new(nfa.states().len()), - matched: false, - config, - nfa, - classes, - } - } - - /// Build the DFA from the NFA given to this builder. If the NFA is not - /// one-pass, then return an error. An error may also be returned if a - /// particular limit is exceeded. (Some limits, like the total heap memory - /// used, are configurable. Others, like the total patterns or slots, are - /// hard-coded based on representational limitations.) - fn build(mut self) -> Result { - self.nfa.look_set_any().available().map_err(BuildError::word)?; - for look in self.nfa.look_set_any().iter() { - // This is a future incompatibility check where if we add any - // more look-around assertions, then the one-pass DFA either - // needs to reject them (what we do here) or it needs to have its - // Transition representation modified to be capable of storing the - // new assertions. - if look.as_repr() > Look::WordUnicodeNegate.as_repr() { - return Err(BuildError::unsupported_look(look)); - } - } - if self.nfa.pattern_len().as_u64() > PatternEpsilons::PATTERN_ID_LIMIT - { - return Err(BuildError::too_many_patterns( - PatternEpsilons::PATTERN_ID_LIMIT, - )); - } - if self.nfa.group_info().explicit_slot_len() > Slots::LIMIT { - return Err(BuildError::not_one_pass( - "too many explicit capturing groups (max is 16)", - )); - } - assert_eq!(DEAD, self.add_empty_state()?); - - // This is where the explicit slots start. We care about this because - // we only need to track explicit slots. The implicit slots---two for - // each pattern---are tracked as part of the search routine itself. - let explicit_slot_start = self.nfa.pattern_len() * 2; - self.add_start_state(None, self.nfa.start_anchored())?; - if self.config.get_starts_for_each_pattern() { - for pid in self.nfa.patterns() { - self.add_start_state( - Some(pid), - self.nfa.start_pattern(pid).unwrap(), - )?; - } - } - // NOTE: One wonders what the effects of treating 'uncompiled_nfa_ids' - // as a stack are. It is really an unordered *set* of NFA state IDs. - // If it, for example, in practice led to discovering whether a regex - // was or wasn't one-pass later than if we processed NFA state IDs in - // ascending order, then that would make this routine more costly in - // the somewhat common case of a regex that isn't one-pass. - while let Some(nfa_id) = self.uncompiled_nfa_ids.pop() { - let dfa_id = self.nfa_to_dfa_id[nfa_id]; - // Once we see a match, we keep going, but don't add any new - // transitions. Normally we'd just stop, but we have to keep - // going in order to verify that our regex is actually one-pass. - self.matched = false; - // The NFA states we've already explored for this DFA state. - self.seen.clear(); - // The NFA states to explore via epsilon transitions. If we ever - // try to push an NFA state that we've already seen, then the NFA - // is not one-pass because it implies there are multiple epsilon - // transition paths that lead to the same NFA state. In other - // words, there is ambiguity. - self.stack_push(nfa_id, Epsilons::empty())?; - while let Some((id, epsilons)) = self.stack.pop() { - match *self.nfa.state(id) { - thompson::State::ByteRange { ref trans } => { - self.compile_transition(dfa_id, trans, epsilons)?; - } - thompson::State::Sparse(ref sparse) => { - for trans in sparse.transitions.iter() { - self.compile_transition(dfa_id, trans, epsilons)?; - } - } - thompson::State::Dense(ref dense) => { - for trans in dense.iter() { - self.compile_transition(dfa_id, &trans, epsilons)?; - } - } - thompson::State::Look { look, next } => { - let looks = epsilons.looks().insert(look); - self.stack_push(next, epsilons.set_looks(looks))?; - } - thompson::State::Union { ref alternates } => { - for &sid in alternates.iter().rev() { - self.stack_push(sid, epsilons)?; - } - } - thompson::State::BinaryUnion { alt1, alt2 } => { - self.stack_push(alt2, epsilons)?; - self.stack_push(alt1, epsilons)?; - } - thompson::State::Capture { next, slot, .. } => { - let slot = slot.as_usize(); - let epsilons = if slot < explicit_slot_start { - // If this is an implicit slot, we don't care - // about it, since we handle implicit slots in - // the search routine. We can get away with that - // because there are 2 implicit slots for every - // pattern. - epsilons - } else { - // Offset our explicit slots so that they start - // at index 0. - let offset = slot - explicit_slot_start; - epsilons.set_slots(epsilons.slots().insert(offset)) - }; - self.stack_push(next, epsilons)?; - } - thompson::State::Fail => { - continue; - } - thompson::State::Match { pattern_id } => { - // If we found two different paths to a match state - // for the same DFA state, then we have ambiguity. - // Thus, it's not one-pass. - if self.matched { - return Err(BuildError::not_one_pass( - "multiple epsilon transitions to match state", - )); - } - self.matched = true; - // Shove the matching pattern ID and the 'epsilons' - // into the current DFA state's pattern epsilons. The - // 'epsilons' includes the slots we need to capture - // before reporting the match and also the conditional - // epsilon transitions we need to check before we can - // report a match. - self.dfa.set_pattern_epsilons( - dfa_id, - PatternEpsilons::empty() - .set_pattern_id(pattern_id) - .set_epsilons(epsilons), - ); - // N.B. It is tempting to just bail out here when - // compiling a leftmost-first DFA, since we will never - // compile any more transitions in that case. But we - // actually need to keep going in order to verify that - // we actually have a one-pass regex. e.g., We might - // see more Match states (e.g., for other patterns) - // that imply that we don't have a one-pass regex. - // So instead, we mark that we've found a match and - // continue on. When we go to compile a new DFA state, - // we just skip that part. But otherwise check that the - // one-pass property is upheld. - } - } - } - } - self.shuffle_states(); - self.dfa.starts.shrink_to_fit(); - self.dfa.table.shrink_to_fit(); - Ok(self.dfa) - } - - /// Shuffle all match states to the end of the transition table and set - /// 'min_match_id' to the ID of the first such match state. - /// - /// The point of this is to make it extremely cheap to determine whether - /// a state is a match state or not. We need to check on this on every - /// transition during a search, so it being cheap is important. This - /// permits us to check it by simply comparing two state identifiers, as - /// opposed to looking for the pattern ID in the state's `PatternEpsilons`. - /// (Which requires a memory load and some light arithmetic.) - fn shuffle_states(&mut self) { - let mut remapper = Remapper::new(&self.dfa); - let mut next_dest = self.dfa.last_state_id(); - for i in (0..self.dfa.state_len()).rev() { - let id = StateID::must(i); - let is_match = - self.dfa.pattern_epsilons(id).pattern_id().is_some(); - if !is_match { - continue; - } - remapper.swap(&mut self.dfa, next_dest, id); - self.dfa.min_match_id = next_dest; - next_dest = self.dfa.prev_state_id(next_dest).expect( - "match states should be a proper subset of all states", - ); - } - remapper.remap(&mut self.dfa); - } - - /// Compile the given NFA transition into the DFA state given. - /// - /// 'Epsilons' corresponds to any conditional epsilon transitions that need - /// to be satisfied to follow this transition, and any slots that need to - /// be saved if the transition is followed. - /// - /// If this transition indicates that the NFA is not one-pass, then - /// this returns an error. (This occurs, for example, if the DFA state - /// already has a transition defined for the same input symbols as the - /// given transition, *and* the result of the old and new transitions is - /// different.) - fn compile_transition( - &mut self, - dfa_id: StateID, - trans: &thompson::Transition, - epsilons: Epsilons, - ) -> Result<(), BuildError> { - let next_dfa_id = self.add_dfa_state_for_nfa_state(trans.next)?; - for byte in self - .classes - .representatives(trans.start..=trans.end) - .filter_map(|r| r.as_u8()) - { - let oldtrans = self.dfa.transition(dfa_id, byte); - let newtrans = - Transition::new(self.matched, next_dfa_id, epsilons); - // If the old transition points to the DEAD state, then we know - // 'byte' has not been mapped to any transition for this DFA state - // yet. So set it unconditionally. Otherwise, we require that the - // old and new transitions are equivalent. Otherwise, there is - // ambiguity and thus the regex is not one-pass. - if oldtrans.state_id() == DEAD { - self.dfa.set_transition(dfa_id, byte, newtrans); - } else if oldtrans != newtrans { - return Err(BuildError::not_one_pass( - "conflicting transition", - )); - } - } - Ok(()) - } - - /// Add a start state to the DFA corresponding to the given NFA starting - /// state ID. - /// - /// If adding a state would blow any limits (configured or hard-coded), - /// then an error is returned. - /// - /// If the starting state is an anchored state for a particular pattern, - /// then callers must provide the pattern ID for that starting state. - /// Callers must also ensure that the first starting state added is the - /// start state for all patterns, and then each anchored starting state for - /// each pattern (if necessary) added in order. Otherwise, this panics. - fn add_start_state( - &mut self, - pid: Option, - nfa_id: StateID, - ) -> Result { - match pid { - // With no pid, this should be the start state for all patterns - // and thus be the first one. - None => assert!(self.dfa.starts.is_empty()), - // With a pid, we want it to be at self.dfa.starts[pid+1]. - Some(pid) => assert!(self.dfa.starts.len() == pid.one_more()), - } - let dfa_id = self.add_dfa_state_for_nfa_state(nfa_id)?; - self.dfa.starts.push(dfa_id); - Ok(dfa_id) - } - - /// Add a new DFA state corresponding to the given NFA state. If adding a - /// state would blow any limits (configured or hard-coded), then an error - /// is returned. If a DFA state already exists for the given NFA state, - /// then that DFA state's ID is returned and no new states are added. - /// - /// It is not expected that this routine is called for every NFA state. - /// Instead, an NFA state ID will usually correspond to the "start" state - /// for a sub-graph of the NFA, where all states in the sub-graph are - /// reachable via epsilon transitions (conditional or unconditional). That - /// sub-graph of NFA states is ultimately what produces a single DFA state. - fn add_dfa_state_for_nfa_state( - &mut self, - nfa_id: StateID, - ) -> Result { - // If we've already built a DFA state for the given NFA state, then - // just return that. We definitely do not want to have more than one - // DFA state in existence for the same NFA state, since all but one of - // them will likely become unreachable. And at least some of them are - // likely to wind up being incomplete. - let existing_dfa_id = self.nfa_to_dfa_id[nfa_id]; - if existing_dfa_id != DEAD { - return Ok(existing_dfa_id); - } - // If we don't have any DFA state yet, add it and then add the given - // NFA state to the list of states to explore. - let dfa_id = self.add_empty_state()?; - self.nfa_to_dfa_id[nfa_id] = dfa_id; - self.uncompiled_nfa_ids.push(nfa_id); - Ok(dfa_id) - } - - /// Unconditionally add a new empty DFA state. If adding it would exceed - /// any limits (configured or hard-coded), then an error is returned. The - /// ID of the new state is returned on success. - /// - /// The added state is *not* a match state. - fn add_empty_state(&mut self) -> Result { - let state_limit = Transition::STATE_ID_LIMIT; - // Note that unlike dense and lazy DFAs, we specifically do NOT - // premultiply our state IDs here. The reason is that we want to pack - // our state IDs into 64-bit transitions with other info, so the fewer - // the bits we use for state IDs the better. If we premultiply, then - // our state ID space shrinks. We justify this by the assumption that - // a one-pass DFA is just already doing a fair bit more work than a - // normal DFA anyway, so an extra multiplication to compute a state - // transition doesn't seem like a huge deal. - let next_id = self.dfa.table.len() >> self.dfa.stride2(); - let id = StateID::new(next_id) - .map_err(|_| BuildError::too_many_states(state_limit))?; - if id.as_u64() > Transition::STATE_ID_LIMIT { - return Err(BuildError::too_many_states(state_limit)); - } - self.dfa - .table - .extend(core::iter::repeat(Transition(0)).take(self.dfa.stride())); - // The default empty value for 'PatternEpsilons' is sadly not all - // zeroes. Instead, a special sentinel is used to indicate that there - // is no pattern. So we need to explicitly set the pattern epsilons to - // the correct "empty" PatternEpsilons. - self.dfa.set_pattern_epsilons(id, PatternEpsilons::empty()); - if let Some(size_limit) = self.config.get_size_limit() { - if self.dfa.memory_usage() > size_limit { - return Err(BuildError::exceeded_size_limit(size_limit)); - } - } - Ok(id) - } - - /// Push the given NFA state ID and its corresponding epsilons (slots and - /// conditional epsilon transitions) on to a stack for use in a depth first - /// traversal of a sub-graph of the NFA. - /// - /// If the given NFA state ID has already been pushed on to the stack, then - /// it indicates the regex is not one-pass and this correspondingly returns - /// an error. - fn stack_push( - &mut self, - nfa_id: StateID, - epsilons: Epsilons, - ) -> Result<(), BuildError> { - // If we already have seen a match and we are compiling a leftmost - // first DFA, then we shouldn't add any more states to look at. This is - // effectively how preference order and non-greediness is implemented. - // if !self.config.get_match_kind().continue_past_first_match() - // && self.matched - // { - // return Ok(()); - // } - if !self.seen.insert(nfa_id) { - return Err(BuildError::not_one_pass( - "multiple epsilon transitions to same state", - )); - } - self.stack.push((nfa_id, epsilons)); - Ok(()) - } -} - -/// A one-pass DFA for executing a subset of anchored regex searches while -/// resolving capturing groups. -/// -/// A one-pass DFA can be built from an NFA that is one-pass. An NFA is -/// one-pass when there is never any ambiguity about how to continue a search. -/// For example, `a*a` is not one-pass because during a search, it's not -/// possible to know whether to continue matching the `a*` or to move on to -/// the single `a`. However, `a*b` is one-pass, because for every byte in the -/// input, it's always clear when to move on from `a*` to `b`. -/// -/// # Only anchored searches are supported -/// -/// In this crate, especially for DFAs, unanchored searches are implemented by -/// treating the pattern as if it had a `(?s-u:.)*?` prefix. While the prefix -/// is one-pass on its own, adding anything after it, e.g., `(?s-u:.)*?a` will -/// make the overall pattern not one-pass. Why? Because the `(?s-u:.)` matches -/// any byte, and there is therefore ambiguity as to when the prefix should -/// stop matching and something else should start matching. -/// -/// Therefore, one-pass DFAs do not support unanchored searches. In addition -/// to many regexes simply not being one-pass, it implies that one-pass DFAs -/// have limited utility. With that said, when a one-pass DFA can be used, it -/// can potentially provide a dramatic speed up over alternatives like the -/// [`BoundedBacktracker`](crate::nfa::thompson::backtrack::BoundedBacktracker) -/// and the [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM). In particular, -/// a one-pass DFA is the only DFA capable of reporting the spans of matching -/// capturing groups. -/// -/// To clarify, when we say that unanchored searches are not supported, what -/// that actually means is: -/// -/// * The high level routines, [`DFA::is_match`] and [`DFA::captures`], always -/// do anchored searches. -/// * Since iterators are most useful in the context of unanchored searches, -/// there is no `DFA::captures_iter` method. -/// * For lower level routines like [`DFA::try_search`], an error will be -/// returned if the given [`Input`] is configured to do an unanchored search or -/// search for an invalid pattern ID. (Note that an [`Input`] is configured to -/// do an unanchored search by default, so just giving a `Input::new` is -/// guaranteed to return an error.) -/// -/// # Other limitations -/// -/// In addition to the [configurable heap limit](Config::size_limit) and -/// the requirement that a regex pattern be one-pass, there are some other -/// limitations: -/// -/// * There is an internal limit on the total number of explicit capturing -/// groups that appear across all patterns. It is somewhat small and there is -/// no way to configure it. If your pattern(s) exceed this limit, then building -/// a one-pass DFA will fail. -/// * If the number of patterns exceeds an internal unconfigurable limit, then -/// building a one-pass DFA will fail. This limit is quite large and you're -/// unlikely to hit it. -/// * If the total number of states exceeds an internal unconfigurable limit, -/// then building a one-pass DFA will fail. This limit is quite large and -/// you're unlikely to hit it. -/// -/// # Other examples of regexes that aren't one-pass -/// -/// One particularly unfortunate example is that enabling Unicode can cause -/// regexes that were one-pass to no longer be one-pass. Consider the regex -/// `(?-u)\w*\s` for example. It is one-pass because there is exactly no -/// overlap between the ASCII definitions of `\w` and `\s`. But `\w*\s` -/// (i.e., with Unicode enabled) is *not* one-pass because `\w` and `\s` get -/// translated to UTF-8 automatons. And while the *codepoints* in `\w` and `\s` -/// do not overlap, the underlying UTF-8 encodings do. Indeed, because of the -/// overlap between UTF-8 automata, the use of Unicode character classes will -/// tend to vastly increase the likelihood of a regex not being one-pass. -/// -/// # How does one know if a regex is one-pass or not? -/// -/// At the time of writing, the only way to know is to try and build a one-pass -/// DFA. The one-pass property is checked while constructing the DFA. -/// -/// This does mean that you might potentially waste some CPU cycles and memory -/// by optimistically trying to build a one-pass DFA. But this is currently the -/// only way. In the future, building a one-pass DFA might be able to use some -/// heuristics to detect common violations of the one-pass property and bail -/// more quickly. -/// -/// # Resource usage -/// -/// Unlike a general DFA, a one-pass DFA has stricter bounds on its resource -/// usage. Namely, construction of a one-pass DFA has a time and space -/// complexity of `O(n)`, where `n ~ nfa.states().len()`. (A general DFA's time -/// and space complexity is `O(2^n)`.) This smaller time bound is achieved -/// because there is at most one DFA state created for each NFA state. If -/// additional DFA states would be required, then the pattern is not one-pass -/// and construction will fail. -/// -/// Note though that currently, this DFA uses a fully dense representation. -/// This means that while its space complexity is no worse than an NFA, it may -/// in practice use more memory because of higher constant factors. The reason -/// for this trade off is two-fold. Firstly, a dense representation makes the -/// search faster. Secondly, the bigger an NFA, the more unlikely it is to be -/// one-pass. Therefore, most one-pass DFAs are usually pretty small. -/// -/// # Example -/// -/// This example shows that the one-pass DFA implements Unicode word boundaries -/// correctly while simultaneously reporting spans for capturing groups that -/// participate in a match. (This is the only DFA that implements full support -/// for Unicode word boundaries.) -/// -/// ``` -/// # if cfg!(miri) { return Ok(()); } // miri takes too long -/// use regex_automata::{dfa::onepass::DFA, Match, Span}; -/// -/// let re = DFA::new(r"\b(?P\w+)[[:space:]]+(?P\w+)\b")?; -/// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); -/// -/// re.captures(&mut cache, "Шерлок Холмс", &mut caps); -/// assert_eq!(Some(Match::must(0, 0..23)), caps.get_match()); -/// assert_eq!(Some(Span::from(0..12)), caps.get_group_by_name("first")); -/// assert_eq!(Some(Span::from(13..23)), caps.get_group_by_name("last")); -/// # Ok::<(), Box>(()) -/// ``` -/// -/// # Example: iteration -/// -/// Unlike other regex engines in this crate, this one does not provide -/// iterator search functions. This is because a one-pass DFA only supports -/// anchored searches, and so iterator functions are generally not applicable. -/// -/// However, if you know that all of your matches are -/// directly adjacent, then an iterator can be used. The -/// [`util::iter::Searcher`](crate::util::iter::Searcher) type can be used for -/// this purpose: -/// -/// ``` -/// # if cfg!(miri) { return Ok(()); } // miri takes too long -/// use regex_automata::{ -/// dfa::onepass::DFA, -/// util::iter::Searcher, -/// Anchored, Input, Span, -/// }; -/// -/// let re = DFA::new(r"\w(\d)\w")?; -/// let (mut cache, caps) = (re.create_cache(), re.create_captures()); -/// let input = Input::new("a1zb2yc3x").anchored(Anchored::Yes); -/// -/// let mut it = Searcher::new(input).into_captures_iter(caps, |input, caps| { -/// Ok(re.try_search(&mut cache, input, caps)?) -/// }).infallible(); -/// let caps0 = it.next().unwrap(); -/// assert_eq!(Some(Span::from(1..2)), caps0.get_group(1)); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone)] -pub struct DFA { - /// The configuration provided by the caller. - config: Config, - /// The NFA used to build this DFA. - /// - /// NOTE: We probably don't need to store the NFA here, but we use enough - /// bits from it that it's convenient to do so. And there really isn't much - /// cost to doing so either, since an NFA is reference counted internally. - nfa: NFA, - /// The transition table. Given a state ID 's' and a byte of haystack 'b', - /// the next state is `table[sid + classes[byte]]`. - /// - /// The stride of this table (i.e., the number of columns) is always - /// a power of 2, even if the alphabet length is smaller. This makes - /// converting between state IDs and state indices very cheap. - /// - /// Note that the stride always includes room for one extra "transition" - /// that isn't actually a transition. It is a 'PatternEpsilons' that is - /// used for match states only. Because of this, the maximum number of - /// active columns in the transition table is 257, which means the maximum - /// stride is 512 (the next power of 2 greater than or equal to 257). - table: Vec, - /// The DFA state IDs of the starting states. - /// - /// `starts[0]` is always present and corresponds to the starting state - /// when searching for matches of any pattern in the DFA. - /// - /// `starts[i]` where i>0 corresponds to the starting state for the pattern - /// ID 'i-1'. These starting states are optional. - starts: Vec, - /// Every state ID >= this value corresponds to a match state. - /// - /// This is what a search uses to detect whether a state is a match state - /// or not. It requires only a simple comparison instead of bit-unpacking - /// the PatternEpsilons from every state. - min_match_id: StateID, - /// The alphabet of this DFA, split into equivalence classes. Bytes in the - /// same equivalence class can never discriminate between a match and a - /// non-match. - classes: ByteClasses, - /// The number of elements in each state in the transition table. This may - /// be less than the stride, since the stride is always a power of 2 and - /// the alphabet length can be anything up to and including 256. - alphabet_len: usize, - /// The number of columns in the transition table, expressed as a power of - /// 2. - stride2: usize, - /// The offset at which the PatternEpsilons for a match state is stored in - /// the transition table. - /// - /// PERF: One wonders whether it would be better to put this in a separate - /// allocation, since only match states have a non-empty PatternEpsilons - /// and the number of match states tends be dwarfed by the number of - /// non-match states. So this would save '8*len(non_match_states)' for each - /// DFA. The question is whether moving this to a different allocation will - /// lead to a perf hit during searches. You might think dealing with match - /// states is rare, but some regexes spend a lot of time in match states - /// gobbling up input. But... match state handling is already somewhat - /// expensive, so maybe this wouldn't do much? Either way, it's worth - /// experimenting. - pateps_offset: usize, - /// The first explicit slot index. This refers to the first slot appearing - /// immediately after the last implicit slot. It is always 'patterns.len() - /// * 2'. - /// - /// We record this because we only store the explicit slots in our DFA - /// transition table that need to be saved. Implicit slots are handled - /// automatically as part of the search. - explicit_slot_start: usize, -} - -impl DFA { - /// Parse the given regular expression using the default configuration and - /// return the corresponding one-pass DFA. - /// - /// If you want a non-default configuration, then use the [`Builder`] to - /// set your own configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{dfa::onepass::DFA, Match}; - /// - /// let re = DFA::new("foo[0-9]+bar")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// re.captures(&mut cache, "foo12345barzzz", &mut caps); - /// assert_eq!(Some(Match::must(0, 0..11)), caps.get_match()); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - #[inline] - pub fn new(pattern: &str) -> Result { - DFA::builder().build(pattern) - } - - /// Like `new`, but parses multiple patterns into a single "multi regex." - /// This similarly uses the default regex configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{dfa::onepass::DFA, Match}; - /// - /// let re = DFA::new_many(&["[a-z]+", "[0-9]+"])?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// re.captures(&mut cache, "abc123", &mut caps); - /// assert_eq!(Some(Match::must(0, 0..3)), caps.get_match()); - /// - /// re.captures(&mut cache, "123abc", &mut caps); - /// assert_eq!(Some(Match::must(1, 0..3)), caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - #[inline] - pub fn new_many>(patterns: &[P]) -> Result { - DFA::builder().build_many(patterns) - } - - /// Like `new`, but builds a one-pass DFA directly from an NFA. This is - /// useful if you already have an NFA, or even if you hand-assembled the - /// NFA. - /// - /// # Example - /// - /// This shows how to hand assemble a regular expression via its HIR, - /// compile an NFA from it and build a one-pass DFA from the NFA. - /// - /// ``` - /// use regex_automata::{ - /// dfa::onepass::DFA, - /// nfa::thompson::NFA, - /// Match, - /// }; - /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; - /// - /// let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![ - /// ClassBytesRange::new(b'0', b'9'), - /// ClassBytesRange::new(b'A', b'Z'), - /// ClassBytesRange::new(b'_', b'_'), - /// ClassBytesRange::new(b'a', b'z'), - /// ]))); - /// - /// let config = NFA::config().nfa_size_limit(Some(1_000)); - /// let nfa = NFA::compiler().configure(config).build_from_hir(&hir)?; - /// - /// let re = DFA::new_from_nfa(nfa)?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let expected = Some(Match::must(0, 0..1)); - /// re.captures(&mut cache, "A", &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn new_from_nfa(nfa: NFA) -> Result { - DFA::builder().build_from_nfa(nfa) - } - - /// Create a new one-pass DFA that matches every input. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{dfa::onepass::DFA, Match}; - /// - /// let dfa = DFA::always_match()?; - /// let mut cache = dfa.create_cache(); - /// let mut caps = dfa.create_captures(); - /// - /// let expected = Match::must(0, 0..0); - /// dfa.captures(&mut cache, "", &mut caps); - /// assert_eq!(Some(expected), caps.get_match()); - /// dfa.captures(&mut cache, "foo", &mut caps); - /// assert_eq!(Some(expected), caps.get_match()); - /// # Ok::<(), Box>(()) - /// ``` - pub fn always_match() -> Result { - let nfa = thompson::NFA::always_match(); - Builder::new().build_from_nfa(nfa) - } - - /// Create a new one-pass DFA that never matches any input. - /// - /// # Example - /// - /// ``` - /// use regex_automata::dfa::onepass::DFA; - /// - /// let dfa = DFA::never_match()?; - /// let mut cache = dfa.create_cache(); - /// let mut caps = dfa.create_captures(); - /// - /// dfa.captures(&mut cache, "", &mut caps); - /// assert_eq!(None, caps.get_match()); - /// dfa.captures(&mut cache, "foo", &mut caps); - /// assert_eq!(None, caps.get_match()); - /// # Ok::<(), Box>(()) - /// ``` - pub fn never_match() -> Result { - let nfa = thompson::NFA::never_match(); - Builder::new().build_from_nfa(nfa) - } - - /// Return a default configuration for a DFA. - /// - /// This is a convenience routine to avoid needing to import the `Config` - /// type when customizing the construction of a DFA. - /// - /// # Example - /// - /// This example shows how to change the match semantics of this DFA from - /// its default "leftmost first" to "all." When using "all," non-greediness - /// doesn't apply and neither does preference order matching. Instead, the - /// longest match possible is always returned. (Although, by construction, - /// it's impossible for a one-pass DFA to have a different answer for - /// "preference order" vs "longest match.") - /// - /// ``` - /// use regex_automata::{dfa::onepass::DFA, Match, MatchKind}; - /// - /// let re = DFA::builder() - /// .configure(DFA::config().match_kind(MatchKind::All)) - /// .build(r"(abc)+?")?; - /// let mut cache = re.create_cache(); - /// let mut caps = re.create_captures(); - /// - /// re.captures(&mut cache, "abcabc", &mut caps); - /// // Normally, the non-greedy repetition would give us a 0..3 match. - /// assert_eq!(Some(Match::must(0, 0..6)), caps.get_match()); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn config() -> Config { - Config::new() - } - - /// Return a builder for configuring the construction of a DFA. - /// - /// This is a convenience routine to avoid needing to import the - /// [`Builder`] type in common cases. - /// - /// # Example - /// - /// This example shows how to use the builder to disable UTF-8 mode. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// dfa::onepass::DFA, - /// nfa::thompson, - /// util::syntax, - /// Match, - /// }; - /// - /// let re = DFA::builder() - /// .syntax(syntax::Config::new().utf8(false)) - /// .thompson(thompson::Config::new().utf8(false)) - /// .build(r"foo(?-u:[^b])ar.*")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// let haystack = b"foo\xFFarzz\xE2\x98\xFF\n"; - /// let expected = Some(Match::must(0, 0..8)); - /// re.captures(&mut cache, haystack, &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn builder() -> Builder { - Builder::new() - } - - /// Create a new empty set of capturing groups that is guaranteed to be - /// valid for the search APIs on this DFA. - /// - /// A `Captures` value created for a specific DFA cannot be used with any - /// other DFA. - /// - /// This is a convenience function for [`Captures::all`]. See the - /// [`Captures`] documentation for an explanation of its alternative - /// constructors that permit the DFA to do less work during a search, and - /// thus might make it faster. - #[inline] - pub fn create_captures(&self) -> Captures { - Captures::all(self.nfa.group_info().clone()) - } - - /// Create a new cache for this DFA. - /// - /// The cache returned should only be used for searches for this - /// DFA. If you want to reuse the cache for another DFA, then you - /// must call [`Cache::reset`] with that DFA (or, equivalently, - /// [`DFA::reset_cache`]). - #[inline] - pub fn create_cache(&self) -> Cache { - Cache::new(self) - } - - /// Reset the given cache such that it can be used for searching with the - /// this DFA (and only this DFA). - /// - /// A cache reset permits reusing memory already allocated in this cache - /// with a different DFA. - /// - /// # Example - /// - /// This shows how to re-purpose a cache for use with a different DFA. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{dfa::onepass::DFA, Match}; - /// - /// let re1 = DFA::new(r"\w")?; - /// let re2 = DFA::new(r"\W")?; - /// let mut caps1 = re1.create_captures(); - /// let mut caps2 = re2.create_captures(); - /// - /// let mut cache = re1.create_cache(); - /// assert_eq!( - /// Some(Match::must(0, 0..2)), - /// { re1.captures(&mut cache, "Δ", &mut caps1); caps1.get_match() }, - /// ); - /// - /// // Using 'cache' with re2 is not allowed. It may result in panics or - /// // incorrect results. In order to re-purpose the cache, we must reset - /// // it with the one-pass DFA we'd like to use it with. - /// // - /// // Similarly, after this reset, using the cache with 're1' is also not - /// // allowed. - /// re2.reset_cache(&mut cache); - /// assert_eq!( - /// Some(Match::must(0, 0..3)), - /// { re2.captures(&mut cache, "☃", &mut caps2); caps2.get_match() }, - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn reset_cache(&self, cache: &mut Cache) { - cache.reset(self); - } - - /// Return the config for this one-pass DFA. - #[inline] - pub fn get_config(&self) -> &Config { - &self.config - } - - /// Returns a reference to the underlying NFA. - #[inline] - pub fn get_nfa(&self) -> &NFA { - &self.nfa - } - - /// Returns the total number of patterns compiled into this DFA. - /// - /// In the case of a DFA that contains no patterns, this returns `0`. - #[inline] - pub fn pattern_len(&self) -> usize { - self.get_nfa().pattern_len() - } - - /// Returns the total number of states in this one-pass DFA. - /// - /// Note that unlike dense or sparse DFAs, a one-pass DFA does not expose - /// a low level DFA API. Therefore, this routine has little use other than - /// being informational. - #[inline] - pub fn state_len(&self) -> usize { - self.table.len() >> self.stride2() - } - - /// Returns the total number of elements in the alphabet for this DFA. - /// - /// That is, this returns the total number of transitions that each - /// state in this DFA must have. The maximum alphabet size is 256, which - /// corresponds to each possible byte value. - /// - /// The alphabet size may be less than 256 though, and unless - /// [`Config::byte_classes`] is disabled, it is typically must less than - /// 256. Namely, bytes are grouped into equivalence classes such that no - /// two bytes in the same class can distinguish a match from a non-match. - /// For example, in the regex `^[a-z]+$`, the ASCII bytes `a-z` could - /// all be in the same equivalence class. This leads to a massive space - /// savings. - /// - /// Note though that the alphabet length does _not_ necessarily equal the - /// total stride space taken up by a single DFA state in the transition - /// table. Namely, for performance reasons, the stride is always the - /// smallest power of two that is greater than or equal to the alphabet - /// length. For this reason, [`DFA::stride`] or [`DFA::stride2`] are - /// often more useful. The alphabet length is typically useful only for - /// informational purposes. - /// - /// Note also that unlike dense or sparse DFAs, a one-pass DFA does - /// not have a special end-of-input (EOI) transition. This is because - /// a one-pass DFA handles look-around assertions explicitly (like the - /// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM)) and does not build - /// them into the transitions of the DFA. - #[inline] - pub fn alphabet_len(&self) -> usize { - self.alphabet_len - } - - /// Returns the total stride for every state in this DFA, expressed as the - /// exponent of a power of 2. The stride is the amount of space each state - /// takes up in the transition table, expressed as a number of transitions. - /// (Unused transitions map to dead states.) - /// - /// The stride of a DFA is always equivalent to the smallest power of - /// 2 that is greater than or equal to the DFA's alphabet length. This - /// definition uses extra space, but possibly permits faster translation - /// between state identifiers and their corresponding offsets in this DFA's - /// transition table. - /// - /// For example, if the DFA's stride is 16 transitions, then its `stride2` - /// is `4` since `2^4 = 16`. - /// - /// The minimum `stride2` value is `1` (corresponding to a stride of `2`) - /// while the maximum `stride2` value is `9` (corresponding to a stride - /// of `512`). The maximum in theory should be `8`, but because of some - /// implementation quirks that may be relaxed in the future, it is one more - /// than `8`. (Do note that a maximal stride is incredibly rare, as it - /// would imply that there is almost no redundant in the regex pattern.) - /// - /// Note that unlike dense or sparse DFAs, a one-pass DFA does not expose - /// a low level DFA API. Therefore, this routine has little use other than - /// being informational. - #[inline] - pub fn stride2(&self) -> usize { - self.stride2 - } - - /// Returns the total stride for every state in this DFA. This corresponds - /// to the total number of transitions used by each state in this DFA's - /// transition table. - /// - /// Please see [`DFA::stride2`] for more information. In particular, this - /// returns the stride as the number of transitions, where as `stride2` - /// returns it as the exponent of a power of 2. - /// - /// Note that unlike dense or sparse DFAs, a one-pass DFA does not expose - /// a low level DFA API. Therefore, this routine has little use other than - /// being informational. - #[inline] - pub fn stride(&self) -> usize { - 1 << self.stride2() - } - - /// Returns the memory usage, in bytes, of this DFA. - /// - /// The memory usage is computed based on the number of bytes used to - /// represent this DFA. - /// - /// This does **not** include the stack size used up by this DFA. To - /// compute that, use `std::mem::size_of::()`. - #[inline] - pub fn memory_usage(&self) -> usize { - use core::mem::size_of; - - self.table.len() * size_of::() - + self.starts.len() * size_of::() - } -} - -impl DFA { - /// Executes an anchored leftmost forward search, and returns true if and - /// only if this one-pass DFA matches the given haystack. - /// - /// This routine may short circuit if it knows that scanning future - /// input will never lead to a different result. In particular, if the - /// underlying DFA enters a match state, then this routine will return - /// `true` immediately without inspecting any future input. (Consider how - /// this might make a difference given the regex `a+` on the haystack - /// `aaaaaaaaaaaaaaa`. This routine can stop after it sees the first `a`, - /// but routines like `find` need to continue searching because `+` is - /// greedy by default.) - /// - /// The given `Input` is forcefully set to use [`Anchored::Yes`] if the - /// given configuration was [`Anchored::No`] (which is the default). - /// - /// # Panics - /// - /// This routine panics if the search could not complete. This can occur - /// in the following circumstances: - /// - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. Concretely, - /// this occurs when using [`Anchored::Pattern`] without enabling - /// [`Config::starts_for_each_pattern`]. - /// - /// When a search panics, callers cannot know whether a match exists or - /// not. - /// - /// Use [`DFA::try_search`] if you want to handle these panics as error - /// values instead. - /// - /// # Example - /// - /// This shows basic usage: - /// - /// ``` - /// use regex_automata::dfa::onepass::DFA; - /// - /// let re = DFA::new("foo[0-9]+bar")?; - /// let mut cache = re.create_cache(); - /// - /// assert!(re.is_match(&mut cache, "foo12345bar")); - /// assert!(!re.is_match(&mut cache, "foobar")); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: consistency with search APIs - /// - /// `is_match` is guaranteed to return `true` whenever `captures` returns - /// a match. This includes searches that are executed entirely within a - /// codepoint: - /// - /// ``` - /// use regex_automata::{dfa::onepass::DFA, Input}; - /// - /// let re = DFA::new("a*")?; - /// let mut cache = re.create_cache(); - /// - /// assert!(!re.is_match(&mut cache, Input::new("☃").span(1..2))); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Notice that when UTF-8 mode is disabled, then the above reports a - /// match because the restriction against zero-width matches that split a - /// codepoint has been lifted: - /// - /// ``` - /// use regex_automata::{dfa::onepass::DFA, nfa::thompson::NFA, Input}; - /// - /// let re = DFA::builder() - /// .thompson(NFA::config().utf8(false)) - /// .build("a*")?; - /// let mut cache = re.create_cache(); - /// - /// assert!(re.is_match(&mut cache, Input::new("☃").span(1..2))); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn is_match<'h, I: Into>>( - &self, - cache: &mut Cache, - input: I, - ) -> bool { - let mut input = input.into().earliest(true); - if matches!(input.get_anchored(), Anchored::No) { - input.set_anchored(Anchored::Yes); - } - self.try_search_slots(cache, &input, &mut []).unwrap().is_some() - } - - /// Executes an anchored leftmost forward search, and returns a `Match` if - /// and only if this one-pass DFA matches the given haystack. - /// - /// This routine only includes the overall match span. To get access to the - /// individual spans of each capturing group, use [`DFA::captures`]. - /// - /// The given `Input` is forcefully set to use [`Anchored::Yes`] if the - /// given configuration was [`Anchored::No`] (which is the default). - /// - /// # Panics - /// - /// This routine panics if the search could not complete. This can occur - /// in the following circumstances: - /// - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. Concretely, - /// this occurs when using [`Anchored::Pattern`] without enabling - /// [`Config::starts_for_each_pattern`]. - /// - /// When a search panics, callers cannot know whether a match exists or - /// not. - /// - /// Use [`DFA::try_search`] if you want to handle these panics as error - /// values instead. - /// - /// # Example - /// - /// Leftmost first match semantics corresponds to the match with the - /// smallest starting offset, but where the end offset is determined by - /// preferring earlier branches in the original regular expression. For - /// example, `Sam|Samwise` will match `Sam` in `Samwise`, but `Samwise|Sam` - /// will match `Samwise` in `Samwise`. - /// - /// Generally speaking, the "leftmost first" match is how most backtracking - /// regular expressions tend to work. This is in contrast to POSIX-style - /// regular expressions that yield "leftmost longest" matches. Namely, - /// both `Sam|Samwise` and `Samwise|Sam` match `Samwise` when using - /// leftmost longest semantics. (This crate does not currently support - /// leftmost longest semantics.) - /// - /// ``` - /// use regex_automata::{dfa::onepass::DFA, Match}; - /// - /// let re = DFA::new("foo[0-9]+")?; - /// let mut cache = re.create_cache(); - /// let expected = Match::must(0, 0..8); - /// assert_eq!(Some(expected), re.find(&mut cache, "foo12345")); - /// - /// // Even though a match is found after reading the first byte (`a`), - /// // the leftmost first match semantics demand that we find the earliest - /// // match that prefers earlier parts of the pattern over later parts. - /// let re = DFA::new("abc|a")?; - /// let mut cache = re.create_cache(); - /// let expected = Match::must(0, 0..3); - /// assert_eq!(Some(expected), re.find(&mut cache, "abc")); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn find<'h, I: Into>>( - &self, - cache: &mut Cache, - input: I, - ) -> Option { - let mut input = input.into(); - if matches!(input.get_anchored(), Anchored::No) { - input.set_anchored(Anchored::Yes); - } - if self.get_nfa().pattern_len() == 1 { - let mut slots = [None, None]; - let pid = - self.try_search_slots(cache, &input, &mut slots).unwrap()?; - let start = slots[0].unwrap().get(); - let end = slots[1].unwrap().get(); - return Some(Match::new(pid, Span { start, end })); - } - let ginfo = self.get_nfa().group_info(); - let slots_len = ginfo.implicit_slot_len(); - let mut slots = vec![None; slots_len]; - let pid = self.try_search_slots(cache, &input, &mut slots).unwrap()?; - let start = slots[pid.as_usize() * 2].unwrap().get(); - let end = slots[pid.as_usize() * 2 + 1].unwrap().get(); - Some(Match::new(pid, Span { start, end })) - } - - /// Executes an anchored leftmost forward search and writes the spans - /// of capturing groups that participated in a match into the provided - /// [`Captures`] value. If no match was found, then [`Captures::is_match`] - /// is guaranteed to return `false`. - /// - /// The given `Input` is forcefully set to use [`Anchored::Yes`] if the - /// given configuration was [`Anchored::No`] (which is the default). - /// - /// # Panics - /// - /// This routine panics if the search could not complete. This can occur - /// in the following circumstances: - /// - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. Concretely, - /// this occurs when using [`Anchored::Pattern`] without enabling - /// [`Config::starts_for_each_pattern`]. - /// - /// When a search panics, callers cannot know whether a match exists or - /// not. - /// - /// Use [`DFA::try_search`] if you want to handle these panics as error - /// values instead. - /// - /// # Example - /// - /// This shows a simple example of a one-pass regex that extracts - /// capturing group spans. - /// - /// ``` - /// use regex_automata::{dfa::onepass::DFA, Match, Span}; - /// - /// let re = DFA::new( - /// // Notice that we use ASCII here. The corresponding Unicode regex - /// // is sadly not one-pass. - /// "(?P[[:alpha:]]+)[[:space:]]+(?P[[:alpha:]]+)", - /// )?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// re.captures(&mut cache, "Bruce Springsteen", &mut caps); - /// assert_eq!(Some(Match::must(0, 0..17)), caps.get_match()); - /// assert_eq!(Some(Span::from(0..5)), caps.get_group(1)); - /// assert_eq!(Some(Span::from(6..17)), caps.get_group_by_name("last")); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn captures<'h, I: Into>>( - &self, - cache: &mut Cache, - input: I, - caps: &mut Captures, - ) { - let mut input = input.into(); - if matches!(input.get_anchored(), Anchored::No) { - input.set_anchored(Anchored::Yes); - } - self.try_search(cache, &input, caps).unwrap(); - } - - /// Executes an anchored leftmost forward search and writes the spans - /// of capturing groups that participated in a match into the provided - /// [`Captures`] value. If no match was found, then [`Captures::is_match`] - /// is guaranteed to return `false`. - /// - /// The differences with [`DFA::captures`] are: - /// - /// 1. This returns an error instead of panicking if the search fails. - /// 2. Accepts an `&Input` instead of a `Into`. This permits reusing - /// the same input for multiple searches, which _may_ be important for - /// latency. - /// 3. This does not automatically change the [`Anchored`] mode from `No` - /// to `Yes`. Instead, if [`Input::anchored`] is `Anchored::No`, then an - /// error is returned. - /// - /// # Errors - /// - /// This routine errors if the search could not complete. This can occur - /// in the following circumstances: - /// - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. Concretely, - /// this occurs when using [`Anchored::Pattern`] without enabling - /// [`Config::starts_for_each_pattern`]. - /// - /// When a search returns an error, callers cannot know whether a match - /// exists or not. - /// - /// # Example: specific pattern search - /// - /// This example shows how to build a multi-regex that permits searching - /// for specific patterns. Note that this is somewhat less useful than - /// in other regex engines, since a one-pass DFA by definition has no - /// ambiguity about which pattern can match at a position. That is, if it - /// were possible for two different patterns to match at the same starting - /// position, then the multi-regex would not be one-pass and construction - /// would have failed. - /// - /// Nevertheless, this can still be useful if you only care about matches - /// for a specific pattern, and want the DFA to report "no match" even if - /// some other pattern would have matched. - /// - /// Note that in order to make use of this functionality, - /// [`Config::starts_for_each_pattern`] must be enabled. It is disabled - /// by default since it may result in higher memory usage. - /// - /// ``` - /// use regex_automata::{ - /// dfa::onepass::DFA, Anchored, Input, Match, PatternID, - /// }; - /// - /// let re = DFA::builder() - /// .configure(DFA::config().starts_for_each_pattern(true)) - /// .build_many(&["[a-z]+", "[0-9]+"])?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let haystack = "123abc"; - /// let input = Input::new(haystack).anchored(Anchored::Yes); - /// - /// // A normal multi-pattern search will show pattern 1 matches. - /// re.try_search(&mut cache, &input, &mut caps)?; - /// assert_eq!(Some(Match::must(1, 0..3)), caps.get_match()); - /// - /// // If we only want to report pattern 0 matches, then we'll get no - /// // match here. - /// let input = input.anchored(Anchored::Pattern(PatternID::must(0))); - /// re.try_search(&mut cache, &input, &mut caps)?; - /// assert_eq!(None, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: specifying the bounds of a search - /// - /// This example shows how providing the bounds of a search can produce - /// different results than simply sub-slicing the haystack. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{dfa::onepass::DFA, Anchored, Input, Match}; - /// - /// // one-pass DFAs fully support Unicode word boundaries! - /// // A sad joke is that a Unicode aware regex like \w+\s is not one-pass. - /// // :-( - /// let re = DFA::new(r"\b[0-9]{3}\b")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let haystack = "foo123bar"; - /// - /// // Since we sub-slice the haystack, the search doesn't know about - /// // the larger context and assumes that `123` is surrounded by word - /// // boundaries. And of course, the match position is reported relative - /// // to the sub-slice as well, which means we get `0..3` instead of - /// // `3..6`. - /// let expected = Some(Match::must(0, 0..3)); - /// let input = Input::new(&haystack[3..6]).anchored(Anchored::Yes); - /// re.try_search(&mut cache, &input, &mut caps)?; - /// assert_eq!(expected, caps.get_match()); - /// - /// // But if we provide the bounds of the search within the context of the - /// // entire haystack, then the search can take the surrounding context - /// // into account. (And if we did find a match, it would be reported - /// // as a valid offset into `haystack` instead of its sub-slice.) - /// let expected = None; - /// let input = Input::new(haystack).range(3..6).anchored(Anchored::Yes); - /// re.try_search(&mut cache, &input, &mut caps)?; - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn try_search( - &self, - cache: &mut Cache, - input: &Input<'_>, - caps: &mut Captures, - ) -> Result<(), MatchError> { - let pid = self.try_search_slots(cache, input, caps.slots_mut())?; - caps.set_pattern(pid); - Ok(()) - } - - /// Executes an anchored leftmost forward search and writes the spans - /// of capturing groups that participated in a match into the provided - /// `slots`, and returns the matching pattern ID. The contents of the - /// slots for patterns other than the matching pattern are unspecified. If - /// no match was found, then `None` is returned and the contents of all - /// `slots` is unspecified. - /// - /// This is like [`DFA::try_search`], but it accepts a raw slots slice - /// instead of a `Captures` value. This is useful in contexts where you - /// don't want or need to allocate a `Captures`. - /// - /// It is legal to pass _any_ number of slots to this routine. If the regex - /// engine would otherwise write a slot offset that doesn't fit in the - /// provided slice, then it is simply skipped. In general though, there are - /// usually three slice lengths you might want to use: - /// - /// * An empty slice, if you only care about which pattern matched. - /// * A slice with - /// [`pattern_len() * 2`](crate::dfa::onepass::DFA::pattern_len) - /// slots, if you only care about the overall match spans for each matching - /// pattern. - /// * A slice with - /// [`slot_len()`](crate::util::captures::GroupInfo::slot_len) slots, which - /// permits recording match offsets for every capturing group in every - /// pattern. - /// - /// # Errors - /// - /// This routine errors if the search could not complete. This can occur - /// in the following circumstances: - /// - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. Concretely, - /// this occurs when using [`Anchored::Pattern`] without enabling - /// [`Config::starts_for_each_pattern`]. - /// - /// When a search returns an error, callers cannot know whether a match - /// exists or not. - /// - /// # Example - /// - /// This example shows how to find the overall match offsets in a - /// multi-pattern search without allocating a `Captures` value. Indeed, we - /// can put our slots right on the stack. - /// - /// ``` - /// use regex_automata::{dfa::onepass::DFA, Anchored, Input, PatternID}; - /// - /// let re = DFA::new_many(&[ - /// r"[a-zA-Z]+", - /// r"[0-9]+", - /// ])?; - /// let mut cache = re.create_cache(); - /// let input = Input::new("123").anchored(Anchored::Yes); - /// - /// // We only care about the overall match offsets here, so we just - /// // allocate two slots for each pattern. Each slot records the start - /// // and end of the match. - /// let mut slots = [None; 4]; - /// let pid = re.try_search_slots(&mut cache, &input, &mut slots)?; - /// assert_eq!(Some(PatternID::must(1)), pid); - /// - /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'. - /// // See 'GroupInfo' for more details on the mapping between groups and - /// // slot indices. - /// let slot_start = pid.unwrap().as_usize() * 2; - /// let slot_end = slot_start + 1; - /// assert_eq!(Some(0), slots[slot_start].map(|s| s.get())); - /// assert_eq!(Some(3), slots[slot_end].map(|s| s.get())); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn try_search_slots( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Result, MatchError> { - let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); - if !utf8empty { - return self.try_search_slots_imp(cache, input, slots); - } - // See PikeVM::try_search_slots for why we do this. - let min = self.get_nfa().group_info().implicit_slot_len(); - if slots.len() >= min { - return self.try_search_slots_imp(cache, input, slots); - } - if self.get_nfa().pattern_len() == 1 { - let mut enough = [None, None]; - let got = self.try_search_slots_imp(cache, input, &mut enough)?; - // This is OK because we know `enough_slots` is strictly bigger - // than `slots`, otherwise this special case isn't reached. - slots.copy_from_slice(&enough[..slots.len()]); - return Ok(got); - } - let mut enough = vec![None; min]; - let got = self.try_search_slots_imp(cache, input, &mut enough)?; - // This is OK because we know `enough_slots` is strictly bigger than - // `slots`, otherwise this special case isn't reached. - slots.copy_from_slice(&enough[..slots.len()]); - Ok(got) - } - - #[inline(never)] - fn try_search_slots_imp( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Result, MatchError> { - let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); - match self.search_imp(cache, input, slots)? { - None => return Ok(None), - Some(pid) if !utf8empty => return Ok(Some(pid)), - Some(pid) => { - // These slot indices are always correct because we know our - // 'pid' is valid and thus we know that the slot indices for it - // are valid. - let slot_start = pid.as_usize().wrapping_mul(2); - let slot_end = slot_start.wrapping_add(1); - // OK because we know we have a match and we know our caller - // provided slots are big enough (which we make true above if - // the caller didn't). Namely, we're only here when 'utf8empty' - // is true, and when that's true, we require slots for every - // pattern. - let start = slots[slot_start].unwrap().get(); - let end = slots[slot_end].unwrap().get(); - // If our match splits a codepoint, then we cannot report is - // as a match. And since one-pass DFAs only support anchored - // searches, we don't try to skip ahead to find the next match. - // We can just quit with nothing. - if start == end && !input.is_char_boundary(start) { - return Ok(None); - } - Ok(Some(pid)) - } - } - } -} - -impl DFA { - fn search_imp( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Result, MatchError> { - // PERF: Some ideas. I ran out of steam after my initial impl to try - // many of these. - // - // 1) Try doing more state shuffling. Right now, all we do is push - // match states to the end of the transition table so that we can do - // 'if sid >= self.min_match_id' to know whether we're in a match - // state or not. But what about doing something like dense DFAs and - // pushing dead, match and states with captures/looks all toward the - // beginning of the transition table. Then we could do 'if sid <= - // self.max_special_id', in which case, we need to do some special - // handling of some sort. Otherwise, we get the happy path, just - // like in a DFA search. The main argument against this is that the - // one-pass DFA is likely to be used most often with capturing groups - // and if capturing groups are common, then this might wind up being a - // pessimization. - // - // 2) Consider moving 'PatternEpsilons' out of the transition table. - // It is only needed for match states and usually a small minority of - // states are match states. Therefore, we're using an extra 'u64' for - // most states. - // - // 3) I played around with the match state handling and it seems like - // there is probably a lot left on the table for improvement. The - // key tension is that the 'find_match' routine is a giant mess, but - // splitting it out into a non-inlineable function is a non-starter - // because the match state might consume input, so 'find_match' COULD - // be called quite a lot, and a function call at that point would trash - // perf. In theory, we could detect whether a match state consumes - // input and then specialize our search routine based on that. In that - // case, maybe an extra function call is OK, but even then, it might be - // too much of a latency hit. Another idea is to just try and figure - // out how to reduce the code size of 'find_match'. RE2 has a trick - // here where the match handling isn't done if we know the next byte of - // input yields a match too. Maybe we adopt that? - // - // This just might be a tricky DFA to optimize. - - if input.is_done() { - return Ok(None); - } - // We unfortunately have a bit of book-keeping to do to set things - // up. We do have to setup our cache and clear all of our slots. In - // particular, clearing the slots is necessary for the case where we - // report a match, but one of the capturing groups didn't participate - // in the match but had a span set from a previous search. That would - // be bad. In theory, we could avoid all this slot clearing if we knew - // that every slot was always activated for every match. Then we would - // know they would always be overwritten when a match is found. - let explicit_slots_len = core::cmp::min( - Slots::LIMIT, - slots.len().saturating_sub(self.explicit_slot_start), - ); - cache.setup_search(explicit_slots_len); - for slot in cache.explicit_slots() { - *slot = None; - } - for slot in slots.iter_mut() { - *slot = None; - } - // We set the starting slots for every pattern up front. This does - // increase our latency somewhat, but it avoids having to do it every - // time we see a match state (which could be many times in a single - // search if the match state consumes input). - for pid in self.nfa.patterns() { - let i = pid.as_usize() * 2; - if i >= slots.len() { - break; - } - slots[i] = NonMaxUsize::new(input.start()); - } - let mut pid = None; - let mut next_sid = match input.get_anchored() { - Anchored::Yes => self.start(), - Anchored::Pattern(pid) => self.start_pattern(pid)?, - Anchored::No => { - // If the regex is itself always anchored, then we're fine, - // even if the search is configured to be unanchored. - if !self.nfa.is_always_start_anchored() { - return Err(MatchError::unsupported_anchored( - Anchored::No, - )); - } - self.start() - } - }; - let leftmost_first = - matches!(self.config.get_match_kind(), MatchKind::LeftmostFirst); - for at in input.start()..input.end() { - let sid = next_sid; - let trans = self.transition(sid, input.haystack()[at]); - next_sid = trans.state_id(); - let epsilons = trans.epsilons(); - if sid >= self.min_match_id { - if self.find_match(cache, input, at, sid, slots, &mut pid) { - if input.get_earliest() - || (leftmost_first && trans.match_wins()) - { - return Ok(pid); - } - } - } - if sid == DEAD - || (!epsilons.looks().is_empty() - && !self.nfa.look_matcher().matches_set_inline( - epsilons.looks(), - input.haystack(), - at, - )) - { - return Ok(pid); - } - epsilons.slots().apply(at, cache.explicit_slots()); - } - if next_sid >= self.min_match_id { - self.find_match( - cache, - input, - input.end(), - next_sid, - slots, - &mut pid, - ); - } - Ok(pid) - } - - /// Assumes 'sid' is a match state and looks for whether a match can - /// be reported. If so, appropriate offsets are written to 'slots' and - /// 'matched_pid' is set to the matching pattern ID. - /// - /// Even when 'sid' is a match state, it's possible that a match won't - /// be reported. For example, when the conditional epsilon transitions - /// leading to the match state aren't satisfied at the given position in - /// the haystack. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn find_match( - &self, - cache: &mut Cache, - input: &Input<'_>, - at: usize, - sid: StateID, - slots: &mut [Option], - matched_pid: &mut Option, - ) -> bool { - debug_assert!(sid >= self.min_match_id); - let pateps = self.pattern_epsilons(sid); - let epsilons = pateps.epsilons(); - if !epsilons.looks().is_empty() - && !self.nfa.look_matcher().matches_set_inline( - epsilons.looks(), - input.haystack(), - at, - ) - { - return false; - } - let pid = pateps.pattern_id_unchecked(); - // This calculation is always correct because we know our 'pid' is - // valid and thus we know that the slot indices for it are valid. - let slot_end = pid.as_usize().wrapping_mul(2).wrapping_add(1); - // Set the implicit 'end' slot for the matching pattern. (The 'start' - // slot was set at the beginning of the search.) - if slot_end < slots.len() { - slots[slot_end] = NonMaxUsize::new(at); - } - // If the caller provided enough room, copy the previously recorded - // explicit slots from our scratch space to the caller provided slots. - // We *also* need to set any explicit slots that are active as part of - // the path to the match state. - if self.explicit_slot_start < slots.len() { - // NOTE: The 'cache.explicit_slots()' slice is setup at the - // beginning of every search such that it is guaranteed to return a - // slice of length equivalent to 'slots[explicit_slot_start..]'. - slots[self.explicit_slot_start..] - .copy_from_slice(cache.explicit_slots()); - epsilons.slots().apply(at, &mut slots[self.explicit_slot_start..]); - } - *matched_pid = Some(pid); - true - } -} - -impl DFA { - /// Returns the anchored start state for matching any pattern in this DFA. - fn start(&self) -> StateID { - self.starts[0] - } - - /// Returns the anchored start state for matching the given pattern. If - /// 'starts_for_each_pattern' - /// was not enabled, then this returns an error. If the given pattern is - /// not in this DFA, then `Ok(None)` is returned. - fn start_pattern(&self, pid: PatternID) -> Result { - if !self.config.get_starts_for_each_pattern() { - return Err(MatchError::unsupported_anchored(Anchored::Pattern( - pid, - ))); - } - // 'starts' always has non-zero length. The first entry is always the - // anchored starting state for all patterns, and the following entries - // are optional and correspond to the anchored starting states for - // patterns at pid+1. Thus, starts.len()-1 corresponds to the total - // number of patterns that one can explicitly search for. (And it may - // be zero.) - Ok(self.starts.get(pid.one_more()).copied().unwrap_or(DEAD)) - } - - /// Returns the transition from the given state ID and byte of input. The - /// transition includes the next state ID, the slots that should be saved - /// and any conditional epsilon transitions that must be satisfied in order - /// to take this transition. - fn transition(&self, sid: StateID, byte: u8) -> Transition { - let offset = sid.as_usize() << self.stride2(); - let class = self.classes.get(byte).as_usize(); - self.table[offset + class] - } - - /// Set the transition from the given state ID and byte of input to the - /// transition given. - fn set_transition(&mut self, sid: StateID, byte: u8, to: Transition) { - let offset = sid.as_usize() << self.stride2(); - let class = self.classes.get(byte).as_usize(); - self.table[offset + class] = to; - } - - /// Return an iterator of "sparse" transitions for the given state ID. - /// "sparse" in this context means that consecutive transitions that are - /// equivalent are returned as one group, and transitions to the DEAD state - /// are ignored. - /// - /// This winds up being useful for debug printing, since it's much terser - /// to display runs of equivalent transitions than the transition for every - /// possible byte value. Indeed, in practice, it's very common for runs - /// of equivalent transitions to appear. - fn sparse_transitions(&self, sid: StateID) -> SparseTransitionIter<'_> { - let start = sid.as_usize() << self.stride2(); - let end = start + self.alphabet_len(); - SparseTransitionIter { - it: self.table[start..end].iter().enumerate(), - cur: None, - } - } - - /// Return the pattern epsilons for the given state ID. - /// - /// If the given state ID does not correspond to a match state ID, then the - /// pattern epsilons returned is empty. - fn pattern_epsilons(&self, sid: StateID) -> PatternEpsilons { - let offset = sid.as_usize() << self.stride2(); - PatternEpsilons(self.table[offset + self.pateps_offset].0) - } - - /// Set the pattern epsilons for the given state ID. - fn set_pattern_epsilons(&mut self, sid: StateID, pateps: PatternEpsilons) { - let offset = sid.as_usize() << self.stride2(); - self.table[offset + self.pateps_offset] = Transition(pateps.0); - } - - /// Returns the state ID prior to the one given. This returns None if the - /// given ID is the first DFA state. - fn prev_state_id(&self, id: StateID) -> Option { - if id == DEAD { - None - } else { - // CORRECTNESS: Since 'id' is not the first state, subtracting 1 - // is always valid. - Some(StateID::new_unchecked(id.as_usize().checked_sub(1).unwrap())) - } - } - - /// Returns the state ID of the last state in this DFA's transition table. - /// "last" in this context means the last state to appear in memory, i.e., - /// the one with the greatest ID. - fn last_state_id(&self) -> StateID { - // CORRECTNESS: A DFA table is always non-empty since it always at - // least contains a DEAD state. Since every state has the same stride, - // we can just compute what the "next" state ID would have been and - // then subtract 1 from it. - StateID::new_unchecked( - (self.table.len() >> self.stride2()).checked_sub(1).unwrap(), - ) - } - - /// Move the transitions from 'id1' to 'id2' and vice versa. - /// - /// WARNING: This does not update the rest of the transition table to have - /// transitions to 'id1' changed to 'id2' and vice versa. This merely moves - /// the states in memory. - pub(super) fn swap_states(&mut self, id1: StateID, id2: StateID) { - let o1 = id1.as_usize() << self.stride2(); - let o2 = id2.as_usize() << self.stride2(); - for b in 0..self.stride() { - self.table.swap(o1 + b, o2 + b); - } - } - - /// Map all state IDs in this DFA (transition table + start states) - /// according to the closure given. - pub(super) fn remap(&mut self, map: impl Fn(StateID) -> StateID) { - for i in 0..self.state_len() { - let offset = i << self.stride2(); - for b in 0..self.alphabet_len() { - let next = self.table[offset + b].state_id(); - self.table[offset + b].set_state_id(map(next)); - } - } - for i in 0..self.starts.len() { - self.starts[i] = map(self.starts[i]); - } - } -} - -impl core::fmt::Debug for DFA { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - fn debug_state_transitions( - f: &mut core::fmt::Formatter, - dfa: &DFA, - sid: StateID, - ) -> core::fmt::Result { - for (i, (start, end, trans)) in - dfa.sparse_transitions(sid).enumerate() - { - let next = trans.state_id(); - if i > 0 { - write!(f, ", ")?; - } - if start == end { - write!( - f, - "{:?} => {:?}", - DebugByte(start), - next.as_usize(), - )?; - } else { - write!( - f, - "{:?}-{:?} => {:?}", - DebugByte(start), - DebugByte(end), - next.as_usize(), - )?; - } - if trans.match_wins() { - write!(f, " (MW)")?; - } - if !trans.epsilons().is_empty() { - write!(f, " ({:?})", trans.epsilons())?; - } - } - Ok(()) - } - - writeln!(f, "onepass::DFA(")?; - for index in 0..self.state_len() { - let sid = StateID::must(index); - let pateps = self.pattern_epsilons(sid); - if sid == DEAD { - write!(f, "D ")?; - } else if pateps.pattern_id().is_some() { - write!(f, "* ")?; - } else { - write!(f, " ")?; - } - write!(f, "{:06?}", sid.as_usize())?; - if !pateps.is_empty() { - write!(f, " ({pateps:?})")?; - } - write!(f, ": ")?; - debug_state_transitions(f, self, sid)?; - write!(f, "\n")?; - } - writeln!(f, "")?; - for (i, &sid) in self.starts.iter().enumerate() { - if i == 0 { - writeln!(f, "START(ALL): {:?}", sid.as_usize())?; - } else { - writeln!( - f, - "START(pattern: {:?}): {:?}", - i - 1, - sid.as_usize(), - )?; - } - } - writeln!(f, "state length: {:?}", self.state_len())?; - writeln!(f, "pattern length: {:?}", self.pattern_len())?; - writeln!(f, ")")?; - Ok(()) - } -} - -/// An iterator over groups of consecutive equivalent transitions in a single -/// state. -#[derive(Debug)] -struct SparseTransitionIter<'a> { - it: core::iter::Enumerate>, - cur: Option<(u8, u8, Transition)>, -} - -impl<'a> Iterator for SparseTransitionIter<'a> { - type Item = (u8, u8, Transition); - - fn next(&mut self) -> Option<(u8, u8, Transition)> { - while let Some((b, &trans)) = self.it.next() { - // Fine because we'll never have more than u8::MAX transitions in - // one state. - let b = b.as_u8(); - let (prev_start, prev_end, prev_trans) = match self.cur { - Some(t) => t, - None => { - self.cur = Some((b, b, trans)); - continue; - } - }; - if prev_trans == trans { - self.cur = Some((prev_start, b, prev_trans)); - } else { - self.cur = Some((b, b, trans)); - if prev_trans.state_id() != DEAD { - return Some((prev_start, prev_end, prev_trans)); - } - } - } - if let Some((start, end, trans)) = self.cur.take() { - if trans.state_id() != DEAD { - return Some((start, end, trans)); - } - } - None - } -} - -/// A cache represents mutable state that a one-pass [`DFA`] requires during a -/// search. -/// -/// For a given one-pass DFA, its corresponding cache may be created either via -/// [`DFA::create_cache`], or via [`Cache::new`]. They are equivalent in every -/// way, except the former does not require explicitly importing `Cache`. -/// -/// A particular `Cache` is coupled with the one-pass DFA from which it was -/// created. It may only be used with that one-pass DFA. A cache and its -/// allocations may be re-purposed via [`Cache::reset`], in which case, it can -/// only be used with the new one-pass DFA (and not the old one). -#[derive(Clone, Debug)] -pub struct Cache { - /// Scratch space used to store slots during a search. Basically, we use - /// the caller provided slots to store slots known when a match occurs. - /// But after a match occurs, we might continue a search but ultimately - /// fail to extend the match. When continuing the search, we need some - /// place to store candidate capture offsets without overwriting the slot - /// offsets recorded for the most recently seen match. - explicit_slots: Vec>, - /// The number of slots in the caller-provided 'Captures' value for the - /// current search. This is always at most 'explicit_slots.len()', but - /// might be less than it, if the caller provided fewer slots to fill. - explicit_slot_len: usize, -} - -impl Cache { - /// Create a new [`onepass::DFA`](DFA) cache. - /// - /// A potentially more convenient routine to create a cache is - /// [`DFA::create_cache`], as it does not require also importing the - /// `Cache` type. - /// - /// If you want to reuse the returned `Cache` with some other one-pass DFA, - /// then you must call [`Cache::reset`] with the desired one-pass DFA. - pub fn new(re: &DFA) -> Cache { - let mut cache = Cache { explicit_slots: vec![], explicit_slot_len: 0 }; - cache.reset(re); - cache - } - - /// Reset this cache such that it can be used for searching with a - /// different [`onepass::DFA`](DFA). - /// - /// A cache reset permits reusing memory already allocated in this cache - /// with a different one-pass DFA. - /// - /// # Example - /// - /// This shows how to re-purpose a cache for use with a different one-pass - /// DFA. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{dfa::onepass::DFA, Match}; - /// - /// let re1 = DFA::new(r"\w")?; - /// let re2 = DFA::new(r"\W")?; - /// let mut caps1 = re1.create_captures(); - /// let mut caps2 = re2.create_captures(); - /// - /// let mut cache = re1.create_cache(); - /// assert_eq!( - /// Some(Match::must(0, 0..2)), - /// { re1.captures(&mut cache, "Δ", &mut caps1); caps1.get_match() }, - /// ); - /// - /// // Using 'cache' with re2 is not allowed. It may result in panics or - /// // incorrect results. In order to re-purpose the cache, we must reset - /// // it with the one-pass DFA we'd like to use it with. - /// // - /// // Similarly, after this reset, using the cache with 're1' is also not - /// // allowed. - /// re2.reset_cache(&mut cache); - /// assert_eq!( - /// Some(Match::must(0, 0..3)), - /// { re2.captures(&mut cache, "☃", &mut caps2); caps2.get_match() }, - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn reset(&mut self, re: &DFA) { - let explicit_slot_len = re.get_nfa().group_info().explicit_slot_len(); - self.explicit_slots.resize(explicit_slot_len, None); - self.explicit_slot_len = explicit_slot_len; - } - - /// Returns the heap memory usage, in bytes, of this cache. - /// - /// This does **not** include the stack size used up by this cache. To - /// compute that, use `std::mem::size_of::()`. - pub fn memory_usage(&self) -> usize { - self.explicit_slots.len() * core::mem::size_of::>() - } - - fn explicit_slots(&mut self) -> &mut [Option] { - &mut self.explicit_slots[..self.explicit_slot_len] - } - - fn setup_search(&mut self, explicit_slot_len: usize) { - self.explicit_slot_len = explicit_slot_len; - } -} - -/// Represents a single transition in a one-pass DFA. -/// -/// The high 21 bits corresponds to the state ID. The bit following corresponds -/// to the special "match wins" flag. The remaining low 42 bits corresponds to -/// the transition epsilons, which contains the slots that should be saved when -/// this transition is followed and the conditional epsilon transitions that -/// must be satisfied in order to follow this transition. -#[derive(Clone, Copy, Eq, PartialEq)] -struct Transition(u64); - -impl Transition { - const STATE_ID_BITS: u64 = 21; - const STATE_ID_SHIFT: u64 = 64 - Transition::STATE_ID_BITS; - const STATE_ID_LIMIT: u64 = 1 << Transition::STATE_ID_BITS; - const MATCH_WINS_SHIFT: u64 = 64 - (Transition::STATE_ID_BITS + 1); - const INFO_MASK: u64 = 0x000003FF_FFFFFFFF; - - /// Return a new transition to the given state ID with the given epsilons. - fn new(match_wins: bool, sid: StateID, epsilons: Epsilons) -> Transition { - let match_wins = - if match_wins { 1 << Transition::MATCH_WINS_SHIFT } else { 0 }; - let sid = sid.as_u64() << Transition::STATE_ID_SHIFT; - Transition(sid | match_wins | epsilons.0) - } - - /// Returns true if and only if this transition points to the DEAD state. - fn is_dead(self) -> bool { - self.state_id() == DEAD - } - - /// Return whether this transition has a "match wins" property. - /// - /// When a transition has this property, it means that if a match has been - /// found and the search uses leftmost-first semantics, then that match - /// should be returned immediately instead of continuing on. - /// - /// The "match wins" name comes from RE2, which uses a pretty much - /// identical mechanism for implementing leftmost-first semantics. - fn match_wins(&self) -> bool { - (self.0 >> Transition::MATCH_WINS_SHIFT & 1) == 1 - } - - /// Return the "next" state ID that this transition points to. - fn state_id(&self) -> StateID { - // OK because a Transition has a valid StateID in its upper bits by - // construction. The cast to usize is also correct, even on 16-bit - // targets because, again, we know the upper bits is a valid StateID, - // which can never overflow usize on any supported target. - StateID::new_unchecked( - (self.0 >> Transition::STATE_ID_SHIFT).as_usize(), - ) - } - - /// Set the "next" state ID in this transition. - fn set_state_id(&mut self, sid: StateID) { - *self = Transition::new(self.match_wins(), sid, self.epsilons()); - } - - /// Return the epsilons embedded in this transition. - fn epsilons(&self) -> Epsilons { - Epsilons(self.0 & Transition::INFO_MASK) - } -} - -impl core::fmt::Debug for Transition { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - if self.is_dead() { - return write!(f, "0"); - } - write!(f, "{}", self.state_id().as_usize())?; - if self.match_wins() { - write!(f, "-MW")?; - } - if !self.epsilons().is_empty() { - write!(f, "-{:?}", self.epsilons())?; - } - Ok(()) - } -} - -/// A representation of a match state's pattern ID along with the epsilons for -/// when a match occurs. -/// -/// A match state in a one-pass DFA, unlike in a more general DFA, has exactly -/// one pattern ID. If it had more, then the original NFA would not have been -/// one-pass. -/// -/// The "epsilons" part of this corresponds to what was found in the epsilon -/// transitions between the transition taken in the last byte of input and the -/// ultimate match state. This might include saving slots and/or conditional -/// epsilon transitions that must be satisfied before one can report the match. -/// -/// Technically, every state has room for a 'PatternEpsilons', but it is only -/// ever non-empty for match states. -#[derive(Clone, Copy)] -struct PatternEpsilons(u64); - -impl PatternEpsilons { - const PATTERN_ID_BITS: u64 = 22; - const PATTERN_ID_SHIFT: u64 = 64 - PatternEpsilons::PATTERN_ID_BITS; - // A sentinel value indicating that this is not a match state. We don't - // use 0 since 0 is a valid pattern ID. - const PATTERN_ID_NONE: u64 = 0x00000000_003FFFFF; - const PATTERN_ID_LIMIT: u64 = PatternEpsilons::PATTERN_ID_NONE; - const PATTERN_ID_MASK: u64 = 0xFFFFFC00_00000000; - const EPSILONS_MASK: u64 = 0x000003FF_FFFFFFFF; - - /// Return a new empty pattern epsilons that has no pattern ID and has no - /// epsilons. This is suitable for non-match states. - fn empty() -> PatternEpsilons { - PatternEpsilons( - PatternEpsilons::PATTERN_ID_NONE - << PatternEpsilons::PATTERN_ID_SHIFT, - ) - } - - /// Whether this pattern epsilons is empty or not. It's empty when it has - /// no pattern ID and an empty epsilons. - fn is_empty(self) -> bool { - self.pattern_id().is_none() && self.epsilons().is_empty() - } - - /// Return the pattern ID in this pattern epsilons if one exists. - fn pattern_id(self) -> Option { - let pid = self.0 >> PatternEpsilons::PATTERN_ID_SHIFT; - if pid == PatternEpsilons::PATTERN_ID_LIMIT { - None - } else { - Some(PatternID::new_unchecked(pid.as_usize())) - } - } - - /// Returns the pattern ID without checking whether it's valid. If this is - /// called and there is no pattern ID in this `PatternEpsilons`, then this - /// will likely produce an incorrect result or possibly even a panic or - /// an overflow. But safety will not be violated. - /// - /// This is useful when you know a particular state is a match state. If - /// it's a match state, then it must have a pattern ID. - fn pattern_id_unchecked(self) -> PatternID { - let pid = self.0 >> PatternEpsilons::PATTERN_ID_SHIFT; - PatternID::new_unchecked(pid.as_usize()) - } - - /// Return a new pattern epsilons with the given pattern ID, but the same - /// epsilons. - fn set_pattern_id(self, pid: PatternID) -> PatternEpsilons { - PatternEpsilons( - (pid.as_u64() << PatternEpsilons::PATTERN_ID_SHIFT) - | (self.0 & PatternEpsilons::EPSILONS_MASK), - ) - } - - /// Return the epsilons part of this pattern epsilons. - fn epsilons(self) -> Epsilons { - Epsilons(self.0 & PatternEpsilons::EPSILONS_MASK) - } - - /// Return a new pattern epsilons with the given epsilons, but the same - /// pattern ID. - fn set_epsilons(self, epsilons: Epsilons) -> PatternEpsilons { - PatternEpsilons( - (self.0 & PatternEpsilons::PATTERN_ID_MASK) - | (u64::from(epsilons.0) & PatternEpsilons::EPSILONS_MASK), - ) - } -} - -impl core::fmt::Debug for PatternEpsilons { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - if self.is_empty() { - return write!(f, "N/A"); - } - if let Some(pid) = self.pattern_id() { - write!(f, "{}", pid.as_usize())?; - } - if !self.epsilons().is_empty() { - if self.pattern_id().is_some() { - write!(f, "/")?; - } - write!(f, "{:?}", self.epsilons())?; - } - Ok(()) - } -} - -/// Epsilons represents all of the NFA epsilons transitions that went into a -/// single transition in a single DFA state. In this case, it only represents -/// the epsilon transitions that have some kind of non-consuming side effect: -/// either the transition requires storing the current position of the search -/// into a slot, or the transition is conditional and requires the current -/// position in the input to satisfy an assertion before the transition may be -/// taken. -/// -/// This folds the cumulative effect of a group of NFA states (all connected -/// by epsilon transitions) down into a single set of bits. While these bits -/// can represent all possible conditional epsilon transitions, it only permits -/// storing up to a somewhat small number of slots. -/// -/// Epsilons is represented as a 42-bit integer. For example, it is packed into -/// the lower 42 bits of a `Transition`. (Where the high 22 bits contains a -/// `StateID` and a special "match wins" property.) -#[derive(Clone, Copy)] -struct Epsilons(u64); - -impl Epsilons { - const SLOT_MASK: u64 = 0x000003FF_FFFFFC00; - const SLOT_SHIFT: u64 = 10; - const LOOK_MASK: u64 = 0x00000000_000003FF; - - /// Create a new empty epsilons. It has no slots and no assertions that - /// need to be satisfied. - fn empty() -> Epsilons { - Epsilons(0) - } - - /// Returns true if this epsilons contains no slots and no assertions. - fn is_empty(self) -> bool { - self.0 == 0 - } - - /// Returns the slot epsilon transitions. - fn slots(self) -> Slots { - Slots((self.0 >> Epsilons::SLOT_SHIFT).low_u32()) - } - - /// Set the slot epsilon transitions. - fn set_slots(self, slots: Slots) -> Epsilons { - Epsilons( - (u64::from(slots.0) << Epsilons::SLOT_SHIFT) - | (self.0 & Epsilons::LOOK_MASK), - ) - } - - /// Return the set of look-around assertions in these epsilon transitions. - fn looks(self) -> LookSet { - LookSet { bits: (self.0 & Epsilons::LOOK_MASK).low_u32() } - } - - /// Set the look-around assertions on these epsilon transitions. - fn set_looks(self, look_set: LookSet) -> Epsilons { - Epsilons( - (self.0 & Epsilons::SLOT_MASK) - | (u64::from(look_set.bits) & Epsilons::LOOK_MASK), - ) - } -} - -impl core::fmt::Debug for Epsilons { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let mut wrote = false; - if !self.slots().is_empty() { - write!(f, "{:?}", self.slots())?; - wrote = true; - } - if !self.looks().is_empty() { - if wrote { - write!(f, "/")?; - } - write!(f, "{:?}", self.looks())?; - wrote = true; - } - if !wrote { - write!(f, "N/A")?; - } - Ok(()) - } -} - -/// The set of epsilon transitions indicating that the current position in a -/// search should be saved to a slot. -/// -/// This *only* represents explicit slots. So for example, the pattern -/// `[a-z]+([0-9]+)([a-z]+)` has: -/// -/// * 3 capturing groups, thus 6 slots. -/// * 1 implicit capturing group, thus 2 implicit slots. -/// * 2 explicit capturing groups, thus 4 explicit slots. -/// -/// While implicit slots are represented by epsilon transitions in an NFA, we -/// do not explicitly represent them here. Instead, implicit slots are assumed -/// to be present and handled automatically in the search code. Therefore, -/// that means we only need to represent explicit slots in our epsilon -/// transitions. -/// -/// Its representation is a bit set. The bit 'i' is set if and only if there -/// exists an explicit slot at index 'c', where 'c = (#patterns * 2) + i'. That -/// is, the bit 'i' corresponds to the first explicit slot and the first -/// explicit slot appears immediately following the last implicit slot. (If -/// this is confusing, see `GroupInfo` for more details on how slots works.) -/// -/// A single `Slots` represents all the active slots in a sub-graph of an NFA, -/// where all the states are connected by epsilon transitions. In effect, when -/// traversing the one-pass DFA during a search, all slots set in a particular -/// transition must be captured by recording the current search position. -/// -/// The API of `Slots` requires the caller to handle the explicit slot offset. -/// That is, a `Slots` doesn't know where the explicit slots start for a -/// particular NFA. Thus, if the callers see's the bit 'i' is set, then they -/// need to do the arithmetic above to find 'c', which is the real actual slot -/// index in the corresponding NFA. -#[derive(Clone, Copy)] -struct Slots(u32); - -impl Slots { - const LIMIT: usize = 32; - - /// Insert the slot at the given bit index. - fn insert(self, slot: usize) -> Slots { - debug_assert!(slot < Slots::LIMIT); - Slots(self.0 | (1 << slot.as_u32())) - } - - /// Remove the slot at the given bit index. - fn remove(self, slot: usize) -> Slots { - debug_assert!(slot < Slots::LIMIT); - Slots(self.0 & !(1 << slot.as_u32())) - } - - /// Returns true if and only if this set contains no slots. - fn is_empty(self) -> bool { - self.0 == 0 - } - - /// Returns an iterator over all of the set bits in this set. - fn iter(self) -> SlotsIter { - SlotsIter { slots: self } - } - - /// For the position `at` in the current haystack, copy it to - /// `caller_explicit_slots` for all slots that are in this set. - /// - /// Callers may pass a slice of any length. Slots in this set bigger than - /// the length of the given explicit slots are simply skipped. - /// - /// The slice *must* correspond only to the explicit slots and the first - /// element of the slice must always correspond to the first explicit slot - /// in the corresponding NFA. - fn apply( - self, - at: usize, - caller_explicit_slots: &mut [Option], - ) { - if self.is_empty() { - return; - } - let at = NonMaxUsize::new(at); - for slot in self.iter() { - if slot >= caller_explicit_slots.len() { - break; - } - caller_explicit_slots[slot] = at; - } - } -} - -impl core::fmt::Debug for Slots { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "S")?; - for slot in self.iter() { - write!(f, "-{slot:?}")?; - } - Ok(()) - } -} - -/// An iterator over all of the bits set in a slot set. -/// -/// This returns the bit index that is set, so callers may need to offset it -/// to get the actual NFA slot index. -#[derive(Debug)] -struct SlotsIter { - slots: Slots, -} - -impl Iterator for SlotsIter { - type Item = usize; - - fn next(&mut self) -> Option { - // Number of zeroes here is always <= u8::MAX, and so fits in a usize. - let slot = self.slots.0.trailing_zeros().as_usize(); - if slot >= Slots::LIMIT { - return None; - } - self.slots = self.slots.remove(slot); - Some(slot) - } -} - -/// An error that occurred during the construction of a one-pass DFA. -/// -/// This error does not provide many introspection capabilities. There are -/// generally only two things you can do with it: -/// -/// * Obtain a human readable message via its `std::fmt::Display` impl. -/// * Access an underlying [`thompson::BuildError`] type from its `source` -/// method via the `std::error::Error` trait. This error only occurs when using -/// convenience routines for building a one-pass DFA directly from a pattern -/// string. -/// -/// When the `std` feature is enabled, this implements the `std::error::Error` -/// trait. -#[derive(Clone, Debug)] -pub struct BuildError { - kind: BuildErrorKind, -} - -/// The kind of error that occurred during the construction of a one-pass DFA. -#[derive(Clone, Debug)] -enum BuildErrorKind { - NFA(crate::nfa::thompson::BuildError), - Word(UnicodeWordBoundaryError), - TooManyStates { limit: u64 }, - TooManyPatterns { limit: u64 }, - UnsupportedLook { look: Look }, - ExceededSizeLimit { limit: usize }, - NotOnePass { msg: &'static str }, -} - -impl BuildError { - fn nfa(err: crate::nfa::thompson::BuildError) -> BuildError { - BuildError { kind: BuildErrorKind::NFA(err) } - } - - fn word(err: UnicodeWordBoundaryError) -> BuildError { - BuildError { kind: BuildErrorKind::Word(err) } - } - - fn too_many_states(limit: u64) -> BuildError { - BuildError { kind: BuildErrorKind::TooManyStates { limit } } - } - - fn too_many_patterns(limit: u64) -> BuildError { - BuildError { kind: BuildErrorKind::TooManyPatterns { limit } } - } - - fn unsupported_look(look: Look) -> BuildError { - BuildError { kind: BuildErrorKind::UnsupportedLook { look } } - } - - fn exceeded_size_limit(limit: usize) -> BuildError { - BuildError { kind: BuildErrorKind::ExceededSizeLimit { limit } } - } - - fn not_one_pass(msg: &'static str) -> BuildError { - BuildError { kind: BuildErrorKind::NotOnePass { msg } } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for BuildError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - use self::BuildErrorKind::*; - - match self.kind { - NFA(ref err) => Some(err), - Word(ref err) => Some(err), - _ => None, - } - } -} - -impl core::fmt::Display for BuildError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - use self::BuildErrorKind::*; - - match self.kind { - NFA(_) => write!(f, "error building NFA"), - Word(_) => write!(f, "NFA contains Unicode word boundary"), - TooManyStates { limit } => write!( - f, - "one-pass DFA exceeded a limit of {limit:?} \ - for number of states", - ), - TooManyPatterns { limit } => write!( - f, - "one-pass DFA exceeded a limit of {limit:?} \ - for number of patterns", - ), - UnsupportedLook { look } => write!( - f, - "one-pass DFA does not support the {look:?} assertion", - ), - ExceededSizeLimit { limit } => write!( - f, - "one-pass DFA exceeded size limit of {limit:?} during building", - ), - NotOnePass { msg } => write!( - f, - "one-pass DFA could not be built because \ - pattern is not one-pass: {}", - msg, - ), - } - } -} - -#[cfg(all(test, feature = "syntax"))] -mod tests { - use alloc::string::ToString; - - use super::*; - - #[test] - fn fail_conflicting_transition() { - let predicate = |err: &str| err.contains("conflicting transition"); - - let err = DFA::new(r"a*[ab]").unwrap_err().to_string(); - assert!(predicate(&err), "{err}"); - } - - #[test] - fn fail_multiple_epsilon() { - let predicate = |err: &str| { - err.contains("multiple epsilon transitions to same state") - }; - - let err = DFA::new(r"(^|$)a").unwrap_err().to_string(); - assert!(predicate(&err), "{err}"); - } - - #[test] - fn fail_multiple_match() { - let predicate = |err: &str| { - err.contains("multiple epsilon transitions to match state") - }; - - let err = DFA::new_many(&[r"^", r"$"]).unwrap_err().to_string(); - assert!(predicate(&err), "{err}"); - } - - // This test is meant to build a one-pass regex with the maximum number of - // possible slots. - // - // NOTE: Remember that the slot limit only applies to explicit capturing - // groups. Any number of implicit capturing groups is supported (up to the - // maximum number of supported patterns), since implicit groups are handled - // by the search loop itself. - #[test] - fn max_slots() { - // One too many... - let pat = r"(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)(m)(n)(o)(p)(q)"; - assert!(DFA::new(pat).is_err()); - // Just right. - let pat = r"(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)(m)(n)(o)(p)"; - assert!(DFA::new(pat).is_ok()); - } - - // This test ensures that the one-pass DFA works with all look-around - // assertions that we expect it to work with. - // - // The utility of this test is that each one-pass transition has a small - // amount of space to store look-around assertions. Currently, there is - // logic in the one-pass constructor to ensure there aren't more than ten - // possible assertions. And indeed, there are only ten possible assertions - // (at time of writing), so this is okay. But conceivably, more assertions - // could be added. So we check that things at least work with what we - // expect them to work with. - #[test] - fn assertions() { - // haystack anchors - assert!(DFA::new(r"^").is_ok()); - assert!(DFA::new(r"$").is_ok()); - - // line anchors - assert!(DFA::new(r"(?m)^").is_ok()); - assert!(DFA::new(r"(?m)$").is_ok()); - assert!(DFA::new(r"(?Rm)^").is_ok()); - assert!(DFA::new(r"(?Rm)$").is_ok()); - - // word boundaries - if cfg!(feature = "unicode-word-boundary") { - assert!(DFA::new(r"\b").is_ok()); - assert!(DFA::new(r"\B").is_ok()); - } - assert!(DFA::new(r"(?-u)\b").is_ok()); - assert!(DFA::new(r"(?-u)\B").is_ok()); - } - - #[cfg(not(miri))] // takes too long on miri - #[test] - fn is_one_pass() { - use crate::util::syntax; - - assert!(DFA::new(r"a*b").is_ok()); - if cfg!(feature = "unicode-perl") { - assert!(DFA::new(r"\w").is_ok()); - } - assert!(DFA::new(r"(?-u)\w*\s").is_ok()); - assert!(DFA::new(r"(?s:.)*?").is_ok()); - assert!(DFA::builder() - .syntax(syntax::Config::new().utf8(false)) - .build(r"(?s-u:.)*?") - .is_ok()); - } - - #[test] - fn is_not_one_pass() { - assert!(DFA::new(r"a*a").is_err()); - assert!(DFA::new(r"(?s-u:.)*?").is_err()); - assert!(DFA::new(r"(?s:.)*?a").is_err()); - } - - #[cfg(not(miri))] - #[test] - fn is_not_one_pass_bigger() { - assert!(DFA::new(r"\w*\s").is_err()); - } -} diff --git a/vendor/regex-automata/src/dfa/regex.rs b/vendor/regex-automata/src/dfa/regex.rs deleted file mode 100644 index 892c442c8b48a8..00000000000000 --- a/vendor/regex-automata/src/dfa/regex.rs +++ /dev/null @@ -1,870 +0,0 @@ -/*! -A DFA-backed `Regex`. - -This module provides [`Regex`], which is defined generically over the -[`Automaton`] trait. A `Regex` implements convenience routines you might have -come to expect, such as finding the start/end of a match and iterating over -all non-overlapping matches. This `Regex` type is limited in its capabilities -to what a DFA can provide. Therefore, APIs involving capturing groups, for -example, are not provided. - -Internally, a `Regex` is composed of two DFAs. One is a "forward" DFA that -finds the end offset of a match, where as the other is a "reverse" DFA that -find the start offset of a match. - -See the [parent module](crate::dfa) for examples. -*/ - -#[cfg(feature = "alloc")] -use alloc::vec::Vec; - -#[cfg(feature = "dfa-build")] -use crate::dfa::dense::BuildError; -use crate::{ - dfa::{automaton::Automaton, dense}, - util::{iter, search::Input}, - Anchored, Match, MatchError, -}; -#[cfg(feature = "alloc")] -use crate::{ - dfa::{sparse, StartKind}, - util::search::MatchKind, -}; - -// When the alloc feature is enabled, the regex type sets its A type parameter -// to default to an owned dense DFA. But without alloc, we set no default. This -// makes things a lot more convenient in the common case, since writing out the -// DFA types is pretty annoying. -// -// Since we have two different definitions but only want to write one doc -// string, we use a macro to capture the doc and other attributes once and then -// repeat them for each definition. -macro_rules! define_regex_type { - ($(#[$doc:meta])*) => { - #[cfg(feature = "alloc")] - $(#[$doc])* - pub struct Regex { - forward: A, - reverse: A, - } - - #[cfg(not(feature = "alloc"))] - $(#[$doc])* - pub struct Regex { - forward: A, - reverse: A, - } - }; -} - -define_regex_type!( - /// A regular expression that uses deterministic finite automata for fast - /// searching. - /// - /// A regular expression is comprised of two DFAs, a "forward" DFA and a - /// "reverse" DFA. The forward DFA is responsible for detecting the end of - /// a match while the reverse DFA is responsible for detecting the start - /// of a match. Thus, in order to find the bounds of any given match, a - /// forward search must first be run followed by a reverse search. A match - /// found by the forward DFA guarantees that the reverse DFA will also find - /// a match. - /// - /// The type of the DFA used by a `Regex` corresponds to the `A` type - /// parameter, which must satisfy the [`Automaton`] trait. Typically, `A` - /// is either a [`dense::DFA`] or a [`sparse::DFA`], where dense DFAs use - /// more memory but search faster, while sparse DFAs use less memory but - /// search more slowly. - /// - /// # Crate features - /// - /// Note that despite what the documentation auto-generates, the _only_ - /// crate feature needed to use this type is `dfa-search`. You do _not_ - /// need to enable the `alloc` feature. - /// - /// By default, a regex's automaton type parameter is set to - /// `dense::DFA>` when the `alloc` feature is enabled. For most - /// in-memory work loads, this is the most convenient type that gives the - /// best search performance. When the `alloc` feature is disabled, no - /// default type is used. - /// - /// # When should I use this? - /// - /// Generally speaking, if you can afford the overhead of building a full - /// DFA for your regex, and you don't need things like capturing groups, - /// then this is a good choice if you're looking to optimize for matching - /// speed. Note however that its speed may be worse than a general purpose - /// regex engine if you don't provide a [`dense::Config::prefilter`] to the - /// underlying DFA. - /// - /// # Sparse DFAs - /// - /// Since a `Regex` is generic over the [`Automaton`] trait, it can be - /// used with any kind of DFA. While this crate constructs dense DFAs by - /// default, it is easy enough to build corresponding sparse DFAs, and then - /// build a regex from them: - /// - /// ``` - /// use regex_automata::dfa::regex::Regex; - /// - /// // First, build a regex that uses dense DFAs. - /// let dense_re = Regex::new("foo[0-9]+")?; - /// - /// // Second, build sparse DFAs from the forward and reverse dense DFAs. - /// let fwd = dense_re.forward().to_sparse()?; - /// let rev = dense_re.reverse().to_sparse()?; - /// - /// // Third, build a new regex from the constituent sparse DFAs. - /// let sparse_re = Regex::builder().build_from_dfas(fwd, rev); - /// - /// // A regex that uses sparse DFAs can be used just like with dense DFAs. - /// assert_eq!(true, sparse_re.is_match(b"foo123")); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Alternatively, one can use a [`Builder`] to construct a sparse DFA - /// more succinctly. (Note though that dense DFAs are still constructed - /// first internally, and then converted to sparse DFAs, as in the example - /// above.) - /// - /// ``` - /// use regex_automata::dfa::regex::Regex; - /// - /// let sparse_re = Regex::builder().build_sparse(r"foo[0-9]+")?; - /// // A regex that uses sparse DFAs can be used just like with dense DFAs. - /// assert!(sparse_re.is_match(b"foo123")); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Fallibility - /// - /// Most of the search routines defined on this type will _panic_ when the - /// underlying search fails. This might be because the DFA gave up because - /// it saw a quit byte, whether configured explicitly or via heuristic - /// Unicode word boundary support, although neither are enabled by default. - /// Or it might fail because an invalid `Input` configuration is given, - /// for example, with an unsupported [`Anchored`] mode. - /// - /// If you need to handle these error cases instead of allowing them to - /// trigger a panic, then the lower level [`Regex::try_search`] provides - /// a fallible API that never panics. - /// - /// # Example - /// - /// This example shows how to cause a search to terminate if it sees a - /// `\n` byte, and handle the error returned. This could be useful if, for - /// example, you wanted to prevent a user supplied pattern from matching - /// across a line boundary. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{dfa::{self, regex::Regex}, Input, MatchError}; - /// - /// let re = Regex::builder() - /// .dense(dfa::dense::Config::new().quit(b'\n', true)) - /// .build(r"foo\p{any}+bar")?; - /// - /// let input = Input::new("foo\nbar"); - /// // Normally this would produce a match, since \p{any} contains '\n'. - /// // But since we instructed the automaton to enter a quit state if a - /// // '\n' is observed, this produces a match error instead. - /// let expected = MatchError::quit(b'\n', 3); - /// let got = re.try_search(&input).unwrap_err(); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[derive(Clone, Debug)] -); - -#[cfg(all(feature = "syntax", feature = "dfa-build"))] -impl Regex { - /// Parse the given regular expression using the default configuration and - /// return the corresponding regex. - /// - /// If you want a non-default configuration, then use the [`Builder`] to - /// set your own configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{Match, dfa::regex::Regex}; - /// - /// let re = Regex::new("foo[0-9]+bar")?; - /// assert_eq!( - /// Some(Match::must(0, 3..14)), - /// re.find(b"zzzfoo12345barzzz"), - /// ); - /// # Ok::<(), Box>(()) - /// ``` - pub fn new(pattern: &str) -> Result { - Builder::new().build(pattern) - } - - /// Like `new`, but parses multiple patterns into a single "regex set." - /// This similarly uses the default regex configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{Match, dfa::regex::Regex}; - /// - /// let re = Regex::new_many(&["[a-z]+", "[0-9]+"])?; - /// - /// let mut it = re.find_iter(b"abc 1 foo 4567 0 quux"); - /// assert_eq!(Some(Match::must(0, 0..3)), it.next()); - /// assert_eq!(Some(Match::must(1, 4..5)), it.next()); - /// assert_eq!(Some(Match::must(0, 6..9)), it.next()); - /// assert_eq!(Some(Match::must(1, 10..14)), it.next()); - /// assert_eq!(Some(Match::must(1, 15..16)), it.next()); - /// assert_eq!(Some(Match::must(0, 17..21)), it.next()); - /// assert_eq!(None, it.next()); - /// # Ok::<(), Box>(()) - /// ``` - pub fn new_many>( - patterns: &[P], - ) -> Result { - Builder::new().build_many(patterns) - } -} - -#[cfg(all(feature = "syntax", feature = "dfa-build"))] -impl Regex>> { - /// Parse the given regular expression using the default configuration, - /// except using sparse DFAs, and return the corresponding regex. - /// - /// If you want a non-default configuration, then use the [`Builder`] to - /// set your own configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{Match, dfa::regex::Regex}; - /// - /// let re = Regex::new_sparse("foo[0-9]+bar")?; - /// assert_eq!( - /// Some(Match::must(0, 3..14)), - /// re.find(b"zzzfoo12345barzzz"), - /// ); - /// # Ok::<(), Box>(()) - /// ``` - pub fn new_sparse( - pattern: &str, - ) -> Result>>, BuildError> { - Builder::new().build_sparse(pattern) - } - - /// Like `new`, but parses multiple patterns into a single "regex set" - /// using sparse DFAs. This otherwise similarly uses the default regex - /// configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{Match, dfa::regex::Regex}; - /// - /// let re = Regex::new_many_sparse(&["[a-z]+", "[0-9]+"])?; - /// - /// let mut it = re.find_iter(b"abc 1 foo 4567 0 quux"); - /// assert_eq!(Some(Match::must(0, 0..3)), it.next()); - /// assert_eq!(Some(Match::must(1, 4..5)), it.next()); - /// assert_eq!(Some(Match::must(0, 6..9)), it.next()); - /// assert_eq!(Some(Match::must(1, 10..14)), it.next()); - /// assert_eq!(Some(Match::must(1, 15..16)), it.next()); - /// assert_eq!(Some(Match::must(0, 17..21)), it.next()); - /// assert_eq!(None, it.next()); - /// # Ok::<(), Box>(()) - /// ``` - pub fn new_many_sparse>( - patterns: &[P], - ) -> Result>>, BuildError> { - Builder::new().build_many_sparse(patterns) - } -} - -/// Convenience routines for regex construction. -impl Regex> { - /// Return a builder for configuring the construction of a `Regex`. - /// - /// This is a convenience routine to avoid needing to import the - /// [`Builder`] type in common cases. - /// - /// # Example - /// - /// This example shows how to use the builder to disable UTF-8 mode - /// everywhere. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// dfa::regex::Regex, nfa::thompson, util::syntax, Match, - /// }; - /// - /// let re = Regex::builder() - /// .syntax(syntax::Config::new().utf8(false)) - /// .thompson(thompson::Config::new().utf8(false)) - /// .build(r"foo(?-u:[^b])ar.*")?; - /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; - /// let expected = Some(Match::must(0, 1..9)); - /// let got = re.find(haystack); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn builder() -> Builder { - Builder::new() - } -} - -/// Standard search routines for finding and iterating over matches. -impl Regex { - /// Returns true if and only if this regex matches the given haystack. - /// - /// This routine may short circuit if it knows that scanning future input - /// will never lead to a different result. In particular, if the underlying - /// DFA enters a match state or a dead state, then this routine will return - /// `true` or `false`, respectively, without inspecting any future input. - /// - /// # Panics - /// - /// This routine panics if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the DFA quitting. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search panics, callers cannot know whether a match exists or - /// not. - /// - /// Use [`Regex::try_search`] if you want to handle these error conditions. - /// - /// # Example - /// - /// ``` - /// use regex_automata::dfa::regex::Regex; - /// - /// let re = Regex::new("foo[0-9]+bar")?; - /// assert_eq!(true, re.is_match("foo12345bar")); - /// assert_eq!(false, re.is_match("foobar")); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn is_match<'h, I: Into>>(&self, input: I) -> bool { - // Not only can we do an "earliest" search, but we can avoid doing a - // reverse scan too. - let input = input.into().earliest(true); - self.forward().try_search_fwd(&input).map(|x| x.is_some()).unwrap() - } - - /// Returns the start and end offset of the leftmost match. If no match - /// exists, then `None` is returned. - /// - /// # Panics - /// - /// This routine panics if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the DFA quitting. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search panics, callers cannot know whether a match exists or - /// not. - /// - /// Use [`Regex::try_search`] if you want to handle these error conditions. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{Match, dfa::regex::Regex}; - /// - /// // Greediness is applied appropriately. - /// let re = Regex::new("foo[0-9]+")?; - /// assert_eq!(Some(Match::must(0, 3..11)), re.find("zzzfoo12345zzz")); - /// - /// // Even though a match is found after reading the first byte (`a`), - /// // the default leftmost-first match semantics demand that we find the - /// // earliest match that prefers earlier parts of the pattern over latter - /// // parts. - /// let re = Regex::new("abc|a")?; - /// assert_eq!(Some(Match::must(0, 0..3)), re.find("abc")); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn find<'h, I: Into>>(&self, input: I) -> Option { - self.try_search(&input.into()).unwrap() - } - - /// Returns an iterator over all non-overlapping leftmost matches in the - /// given bytes. If no match exists, then the iterator yields no elements. - /// - /// This corresponds to the "standard" regex search iterator. - /// - /// # Panics - /// - /// If the search returns an error during iteration, then iteration - /// panics. See [`Regex::find`] for the panic conditions. - /// - /// Use [`Regex::try_search`] with - /// [`util::iter::Searcher`](crate::util::iter::Searcher) if you want to - /// handle these error conditions. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{Match, dfa::regex::Regex}; - /// - /// let re = Regex::new("foo[0-9]+")?; - /// let text = "foo1 foo12 foo123"; - /// let matches: Vec = re.find_iter(text).collect(); - /// assert_eq!(matches, vec![ - /// Match::must(0, 0..4), - /// Match::must(0, 5..10), - /// Match::must(0, 11..17), - /// ]); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn find_iter<'r, 'h, I: Into>>( - &'r self, - input: I, - ) -> FindMatches<'r, 'h, A> { - let it = iter::Searcher::new(input.into()); - FindMatches { re: self, it } - } -} - -/// Lower level fallible search routines that permit controlling where the -/// search starts and ends in a particular sequence. -impl Regex { - /// Returns the start and end offset of the leftmost match. If no match - /// exists, then `None` is returned. - /// - /// This is like [`Regex::find`] but with two differences: - /// - /// 1. It is not generic over `Into` and instead accepts a - /// `&Input`. This permits reusing the same `Input` for multiple searches - /// without needing to create a new one. This _may_ help with latency. - /// 2. It returns an error if the search could not complete where as - /// [`Regex::find`] will panic. - /// - /// # Errors - /// - /// This routine errors if the search could not complete. This can occur - /// in the following circumstances: - /// - /// * The configuration of the DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the DFA quitting. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search returns an error, callers cannot know whether a match - /// exists or not. - #[inline] - pub fn try_search( - &self, - input: &Input<'_>, - ) -> Result, MatchError> { - let (fwd, rev) = (self.forward(), self.reverse()); - let end = match fwd.try_search_fwd(input)? { - None => return Ok(None), - Some(end) => end, - }; - // This special cases an empty match at the beginning of the search. If - // our end matches our start, then since a reverse DFA can't match past - // the start, it must follow that our starting position is also our end - // position. So short circuit and skip the reverse search. - if input.start() == end.offset() { - return Ok(Some(Match::new( - end.pattern(), - end.offset()..end.offset(), - ))); - } - // We can also skip the reverse search if we know our search was - // anchored. This occurs either when the input config is anchored or - // when we know the regex itself is anchored. In this case, we know the - // start of the match, if one is found, must be the start of the - // search. - if self.is_anchored(input) { - return Ok(Some(Match::new( - end.pattern(), - input.start()..end.offset(), - ))); - } - // N.B. I have tentatively convinced myself that it isn't necessary - // to specify the specific pattern for the reverse search since the - // reverse search will always find the same pattern to match as the - // forward search. But I lack a rigorous proof. Why not just provide - // the pattern anyway? Well, if it is needed, then leaving it out - // gives us a chance to find a witness. (Also, if we don't need to - // specify the pattern, then we don't need to build the reverse DFA - // with 'starts_for_each_pattern' enabled.) - // - // We also need to be careful to disable 'earliest' for the reverse - // search, since it could be enabled for the forward search. In the - // reverse case, to satisfy "leftmost" criteria, we need to match - // as much as we can. We also need to be careful to make the search - // anchored. We don't want the reverse search to report any matches - // other than the one beginning at the end of our forward search. - let revsearch = input - .clone() - .span(input.start()..end.offset()) - .anchored(Anchored::Yes) - .earliest(false); - let start = rev - .try_search_rev(&revsearch)? - .expect("reverse search must match if forward search does"); - assert_eq!( - start.pattern(), - end.pattern(), - "forward and reverse search must match same pattern", - ); - assert!(start.offset() <= end.offset()); - Ok(Some(Match::new(end.pattern(), start.offset()..end.offset()))) - } - - /// Returns true if either the given input specifies an anchored search - /// or if the underlying DFA is always anchored. - fn is_anchored(&self, input: &Input<'_>) -> bool { - match input.get_anchored() { - Anchored::No => self.forward().is_always_start_anchored(), - Anchored::Yes | Anchored::Pattern(_) => true, - } - } -} - -/// Non-search APIs for querying information about the regex and setting a -/// prefilter. -impl Regex { - /// Return the underlying DFA responsible for forward matching. - /// - /// This is useful for accessing the underlying DFA and converting it to - /// some other format or size. See the [`Builder::build_from_dfas`] docs - /// for an example of where this might be useful. - pub fn forward(&self) -> &A { - &self.forward - } - - /// Return the underlying DFA responsible for reverse matching. - /// - /// This is useful for accessing the underlying DFA and converting it to - /// some other format or size. See the [`Builder::build_from_dfas`] docs - /// for an example of where this might be useful. - pub fn reverse(&self) -> &A { - &self.reverse - } - - /// Returns the total number of patterns matched by this regex. - /// - /// # Example - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::dfa::regex::Regex; - /// - /// let re = Regex::new_many(&[r"[a-z]+", r"[0-9]+", r"\w+"])?; - /// assert_eq!(3, re.pattern_len()); - /// # Ok::<(), Box>(()) - /// ``` - pub fn pattern_len(&self) -> usize { - assert_eq!(self.forward().pattern_len(), self.reverse().pattern_len()); - self.forward().pattern_len() - } -} - -/// An iterator over all non-overlapping matches for an infallible search. -/// -/// The iterator yields a [`Match`] value until no more matches could be found. -/// If the underlying regex engine returns an error, then a panic occurs. -/// -/// The type parameters are as follows: -/// -/// * `A` represents the type of the underlying DFA that implements the -/// [`Automaton`] trait. -/// -/// The lifetime parameters are as follows: -/// -/// * `'h` represents the lifetime of the haystack being searched. -/// * `'r` represents the lifetime of the regex object itself. -/// -/// This iterator can be created with the [`Regex::find_iter`] method. -#[derive(Debug)] -pub struct FindMatches<'r, 'h, A> { - re: &'r Regex, - it: iter::Searcher<'h>, -} - -impl<'r, 'h, A: Automaton> Iterator for FindMatches<'r, 'h, A> { - type Item = Match; - - #[inline] - fn next(&mut self) -> Option { - let FindMatches { re, ref mut it } = *self; - it.advance(|input| re.try_search(input)) - } -} - -/// A builder for a regex based on deterministic finite automatons. -/// -/// This builder permits configuring options for the syntax of a pattern, the -/// NFA construction, the DFA construction and finally the regex searching -/// itself. This builder is different from a general purpose regex builder in -/// that it permits fine grain configuration of the construction process. The -/// trade off for this is complexity, and the possibility of setting a -/// configuration that might not make sense. For example, there are two -/// different UTF-8 modes: -/// -/// * [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) controls -/// whether the pattern itself can contain sub-expressions that match invalid -/// UTF-8. -/// * [`thompson::Config::utf8`](crate::nfa::thompson::Config::utf8) controls -/// how the regex iterators themselves advance the starting position of the -/// next search when a match with zero length is found. -/// -/// Generally speaking, callers will want to either enable all of these or -/// disable all of these. -/// -/// Internally, building a regex requires building two DFAs, where one is -/// responsible for finding the end of a match and the other is responsible -/// for finding the start of a match. If you only need to detect whether -/// something matched, or only the end of a match, then you should use a -/// [`dense::Builder`] to construct a single DFA, which is cheaper than -/// building two DFAs. -/// -/// # Build methods -/// -/// This builder has a few "build" methods. In general, it's the result of -/// combining the following parameters: -/// -/// * Building one or many regexes. -/// * Building a regex with dense or sparse DFAs. -/// -/// The simplest "build" method is [`Builder::build`]. It accepts a single -/// pattern and builds a dense DFA using `usize` for the state identifier -/// representation. -/// -/// The most general "build" method is [`Builder::build_many`], which permits -/// building a regex that searches for multiple patterns simultaneously while -/// using a specific state identifier representation. -/// -/// The most flexible "build" method, but hardest to use, is -/// [`Builder::build_from_dfas`]. This exposes the fact that a [`Regex`] is -/// just a pair of DFAs, and this method allows you to specify those DFAs -/// exactly. -/// -/// # Example -/// -/// This example shows how to disable UTF-8 mode in the syntax and the regex -/// itself. This is generally what you want for matching on arbitrary bytes. -/// -/// ``` -/// # if cfg!(miri) { return Ok(()); } // miri takes too long -/// use regex_automata::{ -/// dfa::regex::Regex, nfa::thompson, util::syntax, Match, -/// }; -/// -/// let re = Regex::builder() -/// .syntax(syntax::Config::new().utf8(false)) -/// .thompson(thompson::Config::new().utf8(false)) -/// .build(r"foo(?-u:[^b])ar.*")?; -/// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; -/// let expected = Some(Match::must(0, 1..9)); -/// let got = re.find(haystack); -/// assert_eq!(expected, got); -/// // Notice that `(?-u:[^b])` matches invalid UTF-8, -/// // but the subsequent `.*` does not! Disabling UTF-8 -/// // on the syntax permits this. -/// assert_eq!(b"foo\xFFarzz", &haystack[got.unwrap().range()]); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct Builder { - #[cfg(feature = "dfa-build")] - dfa: dense::Builder, -} - -impl Builder { - /// Create a new regex builder with the default configuration. - pub fn new() -> Builder { - Builder { - #[cfg(feature = "dfa-build")] - dfa: dense::Builder::new(), - } - } - - /// Build a regex from the given pattern. - /// - /// If there was a problem parsing or compiling the pattern, then an error - /// is returned. - #[cfg(all(feature = "syntax", feature = "dfa-build"))] - pub fn build(&self, pattern: &str) -> Result { - self.build_many(&[pattern]) - } - - /// Build a regex from the given pattern using sparse DFAs. - /// - /// If there was a problem parsing or compiling the pattern, then an error - /// is returned. - #[cfg(all(feature = "syntax", feature = "dfa-build"))] - pub fn build_sparse( - &self, - pattern: &str, - ) -> Result>>, BuildError> { - self.build_many_sparse(&[pattern]) - } - - /// Build a regex from the given patterns. - #[cfg(all(feature = "syntax", feature = "dfa-build"))] - pub fn build_many>( - &self, - patterns: &[P], - ) -> Result { - let forward = self.dfa.build_many(patterns)?; - let reverse = self - .dfa - .clone() - .configure( - dense::Config::new() - .prefilter(None) - .specialize_start_states(false) - .start_kind(StartKind::Anchored) - .match_kind(MatchKind::All), - ) - .thompson(crate::nfa::thompson::Config::new().reverse(true)) - .build_many(patterns)?; - Ok(self.build_from_dfas(forward, reverse)) - } - - /// Build a sparse regex from the given patterns. - #[cfg(all(feature = "syntax", feature = "dfa-build"))] - pub fn build_many_sparse>( - &self, - patterns: &[P], - ) -> Result>>, BuildError> { - let re = self.build_many(patterns)?; - let forward = re.forward().to_sparse()?; - let reverse = re.reverse().to_sparse()?; - Ok(self.build_from_dfas(forward, reverse)) - } - - /// Build a regex from its component forward and reverse DFAs. - /// - /// This is useful when deserializing a regex from some arbitrary - /// memory region. This is also useful for building regexes from other - /// types of DFAs. - /// - /// If you're building the DFAs from scratch instead of building new DFAs - /// from other DFAs, then you'll need to make sure that the reverse DFA is - /// configured correctly to match the intended semantics. Namely: - /// - /// * It should be anchored. - /// * It should use [`MatchKind::All`] semantics. - /// * It should match in reverse. - /// * Otherwise, its configuration should match the forward DFA. - /// - /// If these conditions aren't satisfied, then the behavior of searches is - /// unspecified. - /// - /// Note that when using this constructor, no configuration is applied. - /// Since this routine provides the DFAs to the builder, there is no - /// opportunity to apply other configuration options. - /// - /// # Example - /// - /// This example is a bit a contrived. The usual use of these methods - /// would involve serializing `initial_re` somewhere and then deserializing - /// it later to build a regex. But in this case, we do everything in - /// memory. - /// - /// ``` - /// use regex_automata::dfa::regex::Regex; - /// - /// let initial_re = Regex::new("foo[0-9]+")?; - /// assert_eq!(true, initial_re.is_match(b"foo123")); - /// - /// let (fwd, rev) = (initial_re.forward(), initial_re.reverse()); - /// let re = Regex::builder().build_from_dfas(fwd, rev); - /// assert_eq!(true, re.is_match(b"foo123")); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// This example shows how to build a `Regex` that uses sparse DFAs instead - /// of dense DFAs without using one of the convenience `build_sparse` - /// routines: - /// - /// ``` - /// use regex_automata::dfa::regex::Regex; - /// - /// let initial_re = Regex::new("foo[0-9]+")?; - /// assert_eq!(true, initial_re.is_match(b"foo123")); - /// - /// let fwd = initial_re.forward().to_sparse()?; - /// let rev = initial_re.reverse().to_sparse()?; - /// let re = Regex::builder().build_from_dfas(fwd, rev); - /// assert_eq!(true, re.is_match(b"foo123")); - /// # Ok::<(), Box>(()) - /// ``` - pub fn build_from_dfas( - &self, - forward: A, - reverse: A, - ) -> Regex { - Regex { forward, reverse } - } - - /// Set the syntax configuration for this builder using - /// [`syntax::Config`](crate::util::syntax::Config). - /// - /// This permits setting things like case insensitivity, Unicode and multi - /// line mode. - #[cfg(all(feature = "syntax", feature = "dfa-build"))] - pub fn syntax( - &mut self, - config: crate::util::syntax::Config, - ) -> &mut Builder { - self.dfa.syntax(config); - self - } - - /// Set the Thompson NFA configuration for this builder using - /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). - /// - /// This permits setting things like whether additional time should be - /// spent shrinking the size of the NFA. - #[cfg(all(feature = "syntax", feature = "dfa-build"))] - pub fn thompson( - &mut self, - config: crate::nfa::thompson::Config, - ) -> &mut Builder { - self.dfa.thompson(config); - self - } - - /// Set the dense DFA compilation configuration for this builder using - /// [`dense::Config`]. - /// - /// This permits setting things like whether the underlying DFAs should - /// be minimized. - #[cfg(feature = "dfa-build")] - pub fn dense(&mut self, config: dense::Config) -> &mut Builder { - self.dfa.configure(config); - self - } -} - -impl Default for Builder { - fn default() -> Builder { - Builder::new() - } -} diff --git a/vendor/regex-automata/src/dfa/remapper.rs b/vendor/regex-automata/src/dfa/remapper.rs deleted file mode 100644 index 6e496467210b94..00000000000000 --- a/vendor/regex-automata/src/dfa/remapper.rs +++ /dev/null @@ -1,242 +0,0 @@ -use alloc::vec::Vec; - -use crate::util::primitives::StateID; - -/// Remappable is a tightly coupled abstraction that facilitates remapping -/// state identifiers in DFAs. -/// -/// The main idea behind remapping state IDs is that DFAs often need to check -/// if a certain state is a "special" state of some kind (like a match state) -/// during a search. Since this is extremely perf critical code, we want this -/// check to be as fast as possible. Partitioning state IDs into, for example, -/// into "non-match" and "match" states means one can tell if a state is a -/// match state via a simple comparison of the state ID. -/// -/// The issue is that during the DFA construction process, it's not -/// particularly easy to partition the states. Instead, the simplest thing is -/// to often just do a pass over all of the states and shuffle them into their -/// desired partitionings. To do that, we need a mechanism for swapping states. -/// Hence, this abstraction. -/// -/// Normally, for such little code, I would just duplicate it. But this is a -/// key optimization and the implementation is a bit subtle. So the abstraction -/// is basically a ham-fisted attempt at DRY. The only place we use this is in -/// the dense and one-pass DFAs. -/// -/// See also src/dfa/special.rs for a more detailed explanation of how dense -/// DFAs are partitioned. -pub(super) trait Remappable: core::fmt::Debug { - /// Return the total number of states. - fn state_len(&self) -> usize; - /// Return the power-of-2 exponent that yields the stride. The pertinent - /// laws here are, where N=stride2: 2^N=stride and len(alphabet) <= stride. - fn stride2(&self) -> usize; - /// Swap the states pointed to by the given IDs. The underlying finite - /// state machine should be mutated such that all of the transitions in - /// `id1` are now in the memory region where the transitions for `id2` - /// were, and all of the transitions in `id2` are now in the memory region - /// where the transitions for `id1` were. - /// - /// Essentially, this "moves" `id1` to `id2` and `id2` to `id1`. - /// - /// It is expected that, after calling this, the underlying value will be - /// left in an inconsistent state, since any other transitions pointing to, - /// e.g., `id1` need to be updated to point to `id2`, since that's where - /// `id1` moved to. - /// - /// In order to "fix" the underlying inconsistent state, a `Remapper` - /// should be used to guarantee that `remap` is called at the appropriate - /// time. - fn swap_states(&mut self, id1: StateID, id2: StateID); - /// This must remap every single state ID in the underlying value according - /// to the function given. For example, in a DFA, this should remap every - /// transition and every starting state ID. - fn remap(&mut self, map: impl Fn(StateID) -> StateID); -} - -/// Remapper is an abstraction the manages the remapping of state IDs in a -/// finite state machine. This is useful when one wants to shuffle states into -/// different positions in the machine. -/// -/// One of the key complexities this manages is the ability to correctly move -/// one state multiple times. -/// -/// Once shuffling is complete, `remap` must be called, which will rewrite -/// all pertinent transitions to updated state IDs. Neglecting to call `remap` -/// will almost certainly result in a corrupt machine. -#[derive(Debug)] -pub(super) struct Remapper { - /// A map from the index of a state to its pre-multiplied identifier. - /// - /// When a state is swapped with another, then their corresponding - /// locations in this map are also swapped. Thus, its new position will - /// still point to its old pre-multiplied StateID. - /// - /// While there is a bit more to it, this then allows us to rewrite the - /// state IDs in a DFA's transition table in a single pass. This is done - /// by iterating over every ID in this map, then iterating over each - /// transition for the state at that ID and re-mapping the transition from - /// `old_id` to `map[dfa.to_index(old_id)]`. That is, we find the position - /// in this map where `old_id` *started*, and set it to where it ended up - /// after all swaps have been completed. - map: Vec, - /// A mapper from state index to state ID (and back). - idxmap: IndexMapper, -} - -impl Remapper { - /// Create a new remapper from the given remappable implementation. The - /// remapper can then be used to swap states. The remappable value given - /// here must the same one given to `swap` and `remap`. - pub(super) fn new(r: &impl Remappable) -> Remapper { - let idxmap = IndexMapper { stride2: r.stride2() }; - let map = (0..r.state_len()).map(|i| idxmap.to_state_id(i)).collect(); - Remapper { map, idxmap } - } - - /// Swap two states. Once this is called, callers must follow through to - /// call `remap`, or else it's possible for the underlying remappable - /// value to be in a corrupt state. - pub(super) fn swap( - &mut self, - r: &mut impl Remappable, - id1: StateID, - id2: StateID, - ) { - if id1 == id2 { - return; - } - r.swap_states(id1, id2); - self.map.swap(self.idxmap.to_index(id1), self.idxmap.to_index(id2)); - } - - /// Complete the remapping process by rewriting all state IDs in the - /// remappable value according to the swaps performed. - pub(super) fn remap(mut self, r: &mut impl Remappable) { - // Update the map to account for states that have been swapped - // multiple times. For example, if (A, C) and (C, G) are swapped, then - // transitions previously pointing to A should now point to G. But if - // we don't update our map, they will erroneously be set to C. All we - // do is follow the swaps in our map until we see our original state - // ID. - // - // The intuition here is to think about how changes are made to the - // map: only through pairwise swaps. That means that starting at any - // given state, it is always possible to find the loop back to that - // state by following the swaps represented in the map (which might be - // 0 swaps). - // - // We are also careful to clone the map before starting in order to - // freeze it. We use the frozen map to find our loops, since we need to - // update our map as well. Without freezing it, our updates could break - // the loops referenced above and produce incorrect results. - let oldmap = self.map.clone(); - for i in 0..r.state_len() { - let cur_id = self.idxmap.to_state_id(i); - let mut new_id = oldmap[i]; - if cur_id == new_id { - continue; - } - loop { - let id = oldmap[self.idxmap.to_index(new_id)]; - if cur_id == id { - self.map[i] = new_id; - break; - } - new_id = id; - } - } - r.remap(|next| self.map[self.idxmap.to_index(next)]); - } -} - -/// A simple type for mapping between state indices and state IDs. -/// -/// The reason why this exists is because state IDs are "premultiplied." That -/// is, in order to get to the transitions for a particular state, one need -/// only use the state ID as-is, instead of having to multiple it by transition -/// table's stride. -/// -/// The downside of this is that it's inconvenient to map between state IDs -/// using a dense map, e.g., Vec. That's because state IDs look like -/// `0`, `0+stride`, `0+2*stride`, `0+3*stride`, etc., instead of `0`, `1`, -/// `2`, `3`, etc. -/// -/// Since our state IDs are premultiplied, we can convert back-and-forth -/// between IDs and indices by simply unmultiplying the IDs and multiplying the -/// indices. -#[derive(Debug)] -struct IndexMapper { - /// The power of 2 corresponding to the stride of the corresponding - /// transition table. 'id >> stride2' de-multiplies an ID while 'index << - /// stride2' pre-multiplies an index to an ID. - stride2: usize, -} - -impl IndexMapper { - /// Convert a state ID to a state index. - fn to_index(&self, id: StateID) -> usize { - id.as_usize() >> self.stride2 - } - - /// Convert a state index to a state ID. - fn to_state_id(&self, index: usize) -> StateID { - // CORRECTNESS: If the given index is not valid, then it is not - // required for this to panic or return a valid state ID. We'll "just" - // wind up with panics or silent logic errors at some other point. - StateID::new_unchecked(index << self.stride2) - } -} - -#[cfg(feature = "dfa-build")] -mod dense { - use crate::{dfa::dense::OwnedDFA, util::primitives::StateID}; - - use super::Remappable; - - impl Remappable for OwnedDFA { - fn state_len(&self) -> usize { - OwnedDFA::state_len(self) - } - - fn stride2(&self) -> usize { - OwnedDFA::stride2(self) - } - - fn swap_states(&mut self, id1: StateID, id2: StateID) { - OwnedDFA::swap_states(self, id1, id2) - } - - fn remap(&mut self, map: impl Fn(StateID) -> StateID) { - OwnedDFA::remap(self, map) - } - } -} - -#[cfg(feature = "dfa-onepass")] -mod onepass { - use crate::{dfa::onepass::DFA, util::primitives::StateID}; - - use super::Remappable; - - impl Remappable for DFA { - fn state_len(&self) -> usize { - DFA::state_len(self) - } - - fn stride2(&self) -> usize { - // We don't do pre-multiplication for the one-pass DFA, so - // returning 0 has the effect of making state IDs and state indices - // equivalent. - 0 - } - - fn swap_states(&mut self, id1: StateID, id2: StateID) { - DFA::swap_states(self, id1, id2) - } - - fn remap(&mut self, map: impl Fn(StateID) -> StateID) { - DFA::remap(self, map) - } - } -} diff --git a/vendor/regex-automata/src/dfa/search.rs b/vendor/regex-automata/src/dfa/search.rs deleted file mode 100644 index 5a82261f970f11..00000000000000 --- a/vendor/regex-automata/src/dfa/search.rs +++ /dev/null @@ -1,644 +0,0 @@ -use crate::{ - dfa::{ - accel, - automaton::{Automaton, OverlappingState}, - }, - util::{ - prefilter::Prefilter, - primitives::StateID, - search::{Anchored, HalfMatch, Input, Span}, - }, - MatchError, -}; - -#[inline(never)] -pub fn find_fwd( - dfa: &A, - input: &Input<'_>, -) -> Result, MatchError> { - if input.is_done() { - return Ok(None); - } - let pre = if input.get_anchored().is_anchored() { - None - } else { - dfa.get_prefilter() - }; - // Searching with a pattern ID is always anchored, so we should never use - // a prefilter. - if pre.is_some() { - if input.get_earliest() { - find_fwd_imp(dfa, input, pre, true) - } else { - find_fwd_imp(dfa, input, pre, false) - } - } else { - if input.get_earliest() { - find_fwd_imp(dfa, input, None, true) - } else { - find_fwd_imp(dfa, input, None, false) - } - } -} - -#[cfg_attr(feature = "perf-inline", inline(always))] -fn find_fwd_imp( - dfa: &A, - input: &Input<'_>, - pre: Option<&'_ Prefilter>, - earliest: bool, -) -> Result, MatchError> { - // See 'prefilter_restart' docs for explanation. - let universal_start = dfa.universal_start_state(Anchored::No).is_some(); - let mut mat = None; - let mut sid = init_fwd(dfa, input)?; - let mut at = input.start(); - // This could just be a closure, but then I think it would be unsound - // because it would need to be safe to invoke. This way, the lack of safety - // is clearer in the code below. - macro_rules! next_unchecked { - ($sid:expr, $at:expr) => {{ - let byte = *input.haystack().get_unchecked($at); - dfa.next_state_unchecked($sid, byte) - }}; - } - - if let Some(ref pre) = pre { - let span = Span::from(at..input.end()); - // If a prefilter doesn't report false positives, then we don't need to - // touch the DFA at all. However, since all matches include the pattern - // ID, and the prefilter infrastructure doesn't report pattern IDs, we - // limit this optimization to cases where there is exactly one pattern. - // In that case, any match must be the 0th pattern. - match pre.find(input.haystack(), span) { - None => return Ok(mat), - Some(ref span) => { - at = span.start; - if !universal_start { - sid = prefilter_restart(dfa, &input, at)?; - } - } - } - } - while at < input.end() { - // SAFETY: There are two safety invariants we need to uphold here in - // the loops below: that 'sid' and 'prev_sid' are valid state IDs - // for this DFA, and that 'at' is a valid index into 'haystack'. - // For the former, we rely on the invariant that next_state* and - // start_state_forward always returns a valid state ID (given a valid - // state ID in the former case). For the latter safety invariant, we - // always guard unchecked access with a check that 'at' is less than - // 'end', where 'end <= haystack.len()'. In the unrolled loop below, we - // ensure that 'at' is always in bounds. - // - // PERF: See a similar comment in src/hybrid/search.rs that justifies - // this extra work to make the search loop fast. The same reasoning and - // benchmarks apply here. - let mut prev_sid; - while at < input.end() { - prev_sid = unsafe { next_unchecked!(sid, at) }; - if dfa.is_special_state(prev_sid) || at + 3 >= input.end() { - core::mem::swap(&mut prev_sid, &mut sid); - break; - } - at += 1; - - sid = unsafe { next_unchecked!(prev_sid, at) }; - if dfa.is_special_state(sid) { - break; - } - at += 1; - - prev_sid = unsafe { next_unchecked!(sid, at) }; - if dfa.is_special_state(prev_sid) { - core::mem::swap(&mut prev_sid, &mut sid); - break; - } - at += 1; - - sid = unsafe { next_unchecked!(prev_sid, at) }; - if dfa.is_special_state(sid) { - break; - } - at += 1; - } - if dfa.is_special_state(sid) { - if dfa.is_start_state(sid) { - if let Some(ref pre) = pre { - let span = Span::from(at..input.end()); - match pre.find(input.haystack(), span) { - None => return Ok(mat), - Some(ref span) => { - // We want to skip any update to 'at' below - // at the end of this iteration and just - // jump immediately back to the next state - // transition at the leading position of the - // candidate match. - // - // ... but only if we actually made progress - // with our prefilter, otherwise if the start - // state has a self-loop, we can get stuck. - if span.start > at { - at = span.start; - if !universal_start { - sid = prefilter_restart(dfa, &input, at)?; - } - continue; - } - } - } - } else if dfa.is_accel_state(sid) { - let needles = dfa.accelerator(sid); - at = accel::find_fwd(needles, input.haystack(), at + 1) - .unwrap_or(input.end()); - continue; - } - } else if dfa.is_match_state(sid) { - let pattern = dfa.match_pattern(sid, 0); - mat = Some(HalfMatch::new(pattern, at)); - if earliest { - return Ok(mat); - } - if dfa.is_accel_state(sid) { - let needles = dfa.accelerator(sid); - at = accel::find_fwd(needles, input.haystack(), at + 1) - .unwrap_or(input.end()); - continue; - } - } else if dfa.is_accel_state(sid) { - let needs = dfa.accelerator(sid); - at = accel::find_fwd(needs, input.haystack(), at + 1) - .unwrap_or(input.end()); - continue; - } else if dfa.is_dead_state(sid) { - return Ok(mat); - } else { - // It's important that this is a debug_assert, since this can - // actually be tripped even if DFA::from_bytes succeeds and - // returns a supposedly valid DFA. - return Err(MatchError::quit(input.haystack()[at], at)); - } - } - at += 1; - } - eoi_fwd(dfa, input, &mut sid, &mut mat)?; - Ok(mat) -} - -#[inline(never)] -pub fn find_rev( - dfa: &A, - input: &Input<'_>, -) -> Result, MatchError> { - if input.is_done() { - return Ok(None); - } - if input.get_earliest() { - find_rev_imp(dfa, input, true) - } else { - find_rev_imp(dfa, input, false) - } -} - -#[cfg_attr(feature = "perf-inline", inline(always))] -fn find_rev_imp( - dfa: &A, - input: &Input<'_>, - earliest: bool, -) -> Result, MatchError> { - let mut mat = None; - let mut sid = init_rev(dfa, input)?; - // In reverse search, the loop below can't handle the case of searching an - // empty slice. Ideally we could write something congruent to the forward - // search, i.e., 'while at >= start', but 'start' might be 0. Since we use - // an unsigned offset, 'at >= 0' is trivially always true. We could avoid - // this extra case handling by using a signed offset, but Rust makes it - // annoying to do. So... We just handle the empty case separately. - if input.start() == input.end() { - eoi_rev(dfa, input, &mut sid, &mut mat)?; - return Ok(mat); - } - - let mut at = input.end() - 1; - macro_rules! next_unchecked { - ($sid:expr, $at:expr) => {{ - let byte = *input.haystack().get_unchecked($at); - dfa.next_state_unchecked($sid, byte) - }}; - } - loop { - // SAFETY: See comments in 'find_fwd' for a safety argument. - let mut prev_sid; - while at >= input.start() { - prev_sid = unsafe { next_unchecked!(sid, at) }; - if dfa.is_special_state(prev_sid) - || at <= input.start().saturating_add(3) - { - core::mem::swap(&mut prev_sid, &mut sid); - break; - } - at -= 1; - - sid = unsafe { next_unchecked!(prev_sid, at) }; - if dfa.is_special_state(sid) { - break; - } - at -= 1; - - prev_sid = unsafe { next_unchecked!(sid, at) }; - if dfa.is_special_state(prev_sid) { - core::mem::swap(&mut prev_sid, &mut sid); - break; - } - at -= 1; - - sid = unsafe { next_unchecked!(prev_sid, at) }; - if dfa.is_special_state(sid) { - break; - } - at -= 1; - } - if dfa.is_special_state(sid) { - if dfa.is_start_state(sid) { - if dfa.is_accel_state(sid) { - let needles = dfa.accelerator(sid); - at = accel::find_rev(needles, input.haystack(), at) - .map(|i| i + 1) - .unwrap_or(input.start()); - } - } else if dfa.is_match_state(sid) { - let pattern = dfa.match_pattern(sid, 0); - // Since reverse searches report the beginning of a match - // and the beginning is inclusive (not exclusive like the - // end of a match), we add 1 to make it inclusive. - mat = Some(HalfMatch::new(pattern, at + 1)); - if earliest { - return Ok(mat); - } - if dfa.is_accel_state(sid) { - let needles = dfa.accelerator(sid); - at = accel::find_rev(needles, input.haystack(), at) - .map(|i| i + 1) - .unwrap_or(input.start()); - } - } else if dfa.is_accel_state(sid) { - let needles = dfa.accelerator(sid); - // If the accelerator returns nothing, why don't we quit the - // search? Well, if the accelerator doesn't find anything, that - // doesn't mean we don't have a match. It just means that we - // can't leave the current state given one of the 255 possible - // byte values. However, there might be an EOI transition. So - // we set 'at' to the end of the haystack, which will cause - // this loop to stop and fall down into the EOI transition. - at = accel::find_rev(needles, input.haystack(), at) - .map(|i| i + 1) - .unwrap_or(input.start()); - } else if dfa.is_dead_state(sid) { - return Ok(mat); - } else { - return Err(MatchError::quit(input.haystack()[at], at)); - } - } - if at == input.start() { - break; - } - at -= 1; - } - eoi_rev(dfa, input, &mut sid, &mut mat)?; - Ok(mat) -} - -#[inline(never)] -pub fn find_overlapping_fwd( - dfa: &A, - input: &Input<'_>, - state: &mut OverlappingState, -) -> Result<(), MatchError> { - state.mat = None; - if input.is_done() { - return Ok(()); - } - let pre = if input.get_anchored().is_anchored() { - None - } else { - dfa.get_prefilter() - }; - if pre.is_some() { - find_overlapping_fwd_imp(dfa, input, pre, state) - } else { - find_overlapping_fwd_imp(dfa, input, None, state) - } -} - -#[cfg_attr(feature = "perf-inline", inline(always))] -fn find_overlapping_fwd_imp( - dfa: &A, - input: &Input<'_>, - pre: Option<&'_ Prefilter>, - state: &mut OverlappingState, -) -> Result<(), MatchError> { - // See 'prefilter_restart' docs for explanation. - let universal_start = dfa.universal_start_state(Anchored::No).is_some(); - let mut sid = match state.id { - None => { - state.at = input.start(); - init_fwd(dfa, input)? - } - Some(sid) => { - if let Some(match_index) = state.next_match_index { - let match_len = dfa.match_len(sid); - if match_index < match_len { - state.next_match_index = Some(match_index + 1); - let pattern = dfa.match_pattern(sid, match_index); - state.mat = Some(HalfMatch::new(pattern, state.at)); - return Ok(()); - } - } - // Once we've reported all matches at a given position, we need to - // advance the search to the next position. - state.at += 1; - if state.at > input.end() { - return Ok(()); - } - sid - } - }; - - // NOTE: We don't optimize the crap out of this routine primarily because - // it seems like most find_overlapping searches will have higher match - // counts, and thus, throughput is perhaps not as important. But if you - // have a use case for something faster, feel free to file an issue. - while state.at < input.end() { - sid = dfa.next_state(sid, input.haystack()[state.at]); - if dfa.is_special_state(sid) { - state.id = Some(sid); - if dfa.is_start_state(sid) { - if let Some(ref pre) = pre { - let span = Span::from(state.at..input.end()); - match pre.find(input.haystack(), span) { - None => return Ok(()), - Some(ref span) => { - if span.start > state.at { - state.at = span.start; - if !universal_start { - sid = prefilter_restart( - dfa, &input, state.at, - )?; - } - continue; - } - } - } - } else if dfa.is_accel_state(sid) { - let needles = dfa.accelerator(sid); - state.at = accel::find_fwd( - needles, - input.haystack(), - state.at + 1, - ) - .unwrap_or(input.end()); - continue; - } - } else if dfa.is_match_state(sid) { - state.next_match_index = Some(1); - let pattern = dfa.match_pattern(sid, 0); - state.mat = Some(HalfMatch::new(pattern, state.at)); - return Ok(()); - } else if dfa.is_accel_state(sid) { - let needs = dfa.accelerator(sid); - // If the accelerator returns nothing, why don't we quit the - // search? Well, if the accelerator doesn't find anything, that - // doesn't mean we don't have a match. It just means that we - // can't leave the current state given one of the 255 possible - // byte values. However, there might be an EOI transition. So - // we set 'at' to the end of the haystack, which will cause - // this loop to stop and fall down into the EOI transition. - state.at = - accel::find_fwd(needs, input.haystack(), state.at + 1) - .unwrap_or(input.end()); - continue; - } else if dfa.is_dead_state(sid) { - return Ok(()); - } else { - return Err(MatchError::quit( - input.haystack()[state.at], - state.at, - )); - } - } - state.at += 1; - } - - let result = eoi_fwd(dfa, input, &mut sid, &mut state.mat); - state.id = Some(sid); - if state.mat.is_some() { - // '1' is always correct here since if we get to this point, this - // always corresponds to the first (index '0') match discovered at - // this position. So the next match to report at this position (if - // it exists) is at index '1'. - state.next_match_index = Some(1); - } - result -} - -#[inline(never)] -pub(crate) fn find_overlapping_rev( - dfa: &A, - input: &Input<'_>, - state: &mut OverlappingState, -) -> Result<(), MatchError> { - state.mat = None; - if input.is_done() { - return Ok(()); - } - let mut sid = match state.id { - None => { - let sid = init_rev(dfa, input)?; - state.id = Some(sid); - if input.start() == input.end() { - state.rev_eoi = true; - } else { - state.at = input.end() - 1; - } - sid - } - Some(sid) => { - if let Some(match_index) = state.next_match_index { - let match_len = dfa.match_len(sid); - if match_index < match_len { - state.next_match_index = Some(match_index + 1); - let pattern = dfa.match_pattern(sid, match_index); - state.mat = Some(HalfMatch::new(pattern, state.at)); - return Ok(()); - } - } - // Once we've reported all matches at a given position, we need - // to advance the search to the next position. However, if we've - // already followed the EOI transition, then we know we're done - // with the search and there cannot be any more matches to report. - if state.rev_eoi { - return Ok(()); - } else if state.at == input.start() { - // At this point, we should follow the EOI transition. This - // will cause us the skip the main loop below and fall through - // to the final 'eoi_rev' transition. - state.rev_eoi = true; - } else { - // We haven't hit the end of the search yet, so move on. - state.at -= 1; - } - sid - } - }; - while !state.rev_eoi { - sid = dfa.next_state(sid, input.haystack()[state.at]); - if dfa.is_special_state(sid) { - state.id = Some(sid); - if dfa.is_start_state(sid) { - if dfa.is_accel_state(sid) { - let needles = dfa.accelerator(sid); - state.at = - accel::find_rev(needles, input.haystack(), state.at) - .map(|i| i + 1) - .unwrap_or(input.start()); - } - } else if dfa.is_match_state(sid) { - state.next_match_index = Some(1); - let pattern = dfa.match_pattern(sid, 0); - state.mat = Some(HalfMatch::new(pattern, state.at + 1)); - return Ok(()); - } else if dfa.is_accel_state(sid) { - let needles = dfa.accelerator(sid); - // If the accelerator returns nothing, why don't we quit the - // search? Well, if the accelerator doesn't find anything, that - // doesn't mean we don't have a match. It just means that we - // can't leave the current state given one of the 255 possible - // byte values. However, there might be an EOI transition. So - // we set 'at' to the end of the haystack, which will cause - // this loop to stop and fall down into the EOI transition. - state.at = - accel::find_rev(needles, input.haystack(), state.at) - .map(|i| i + 1) - .unwrap_or(input.start()); - } else if dfa.is_dead_state(sid) { - return Ok(()); - } else { - return Err(MatchError::quit( - input.haystack()[state.at], - state.at, - )); - } - } - if state.at == input.start() { - break; - } - state.at -= 1; - } - - let result = eoi_rev(dfa, input, &mut sid, &mut state.mat); - state.rev_eoi = true; - state.id = Some(sid); - if state.mat.is_some() { - // '1' is always correct here since if we get to this point, this - // always corresponds to the first (index '0') match discovered at - // this position. So the next match to report at this position (if - // it exists) is at index '1'. - state.next_match_index = Some(1); - } - result -} - -#[cfg_attr(feature = "perf-inline", inline(always))] -fn init_fwd( - dfa: &A, - input: &Input<'_>, -) -> Result { - let sid = dfa.start_state_forward(input)?; - // Start states can never be match states, since all matches are delayed - // by 1 byte. - debug_assert!(!dfa.is_match_state(sid)); - Ok(sid) -} - -#[cfg_attr(feature = "perf-inline", inline(always))] -fn init_rev( - dfa: &A, - input: &Input<'_>, -) -> Result { - let sid = dfa.start_state_reverse(input)?; - // Start states can never be match states, since all matches are delayed - // by 1 byte. - debug_assert!(!dfa.is_match_state(sid)); - Ok(sid) -} - -#[cfg_attr(feature = "perf-inline", inline(always))] -fn eoi_fwd( - dfa: &A, - input: &Input<'_>, - sid: &mut StateID, - mat: &mut Option, -) -> Result<(), MatchError> { - let sp = input.get_span(); - match input.haystack().get(sp.end) { - Some(&b) => { - *sid = dfa.next_state(*sid, b); - if dfa.is_match_state(*sid) { - let pattern = dfa.match_pattern(*sid, 0); - *mat = Some(HalfMatch::new(pattern, sp.end)); - } else if dfa.is_quit_state(*sid) { - return Err(MatchError::quit(b, sp.end)); - } - } - None => { - *sid = dfa.next_eoi_state(*sid); - if dfa.is_match_state(*sid) { - let pattern = dfa.match_pattern(*sid, 0); - *mat = Some(HalfMatch::new(pattern, input.haystack().len())); - } - } - } - Ok(()) -} - -#[cfg_attr(feature = "perf-inline", inline(always))] -fn eoi_rev( - dfa: &A, - input: &Input<'_>, - sid: &mut StateID, - mat: &mut Option, -) -> Result<(), MatchError> { - let sp = input.get_span(); - if sp.start > 0 { - let byte = input.haystack()[sp.start - 1]; - *sid = dfa.next_state(*sid, byte); - if dfa.is_match_state(*sid) { - let pattern = dfa.match_pattern(*sid, 0); - *mat = Some(HalfMatch::new(pattern, sp.start)); - } else if dfa.is_quit_state(*sid) { - return Err(MatchError::quit(byte, sp.start - 1)); - } - } else { - *sid = dfa.next_eoi_state(*sid); - if dfa.is_match_state(*sid) { - let pattern = dfa.match_pattern(*sid, 0); - *mat = Some(HalfMatch::new(pattern, 0)); - } - } - Ok(()) -} - -/// Re-compute the starting state that a DFA should be in after finding a -/// prefilter candidate match at the position `at`. -/// -/// The function with the same name has a bit more docs in hybrid/search.rs. -#[cfg_attr(feature = "perf-inline", inline(always))] -fn prefilter_restart( - dfa: &A, - input: &Input<'_>, - at: usize, -) -> Result { - let mut input = input.clone(); - input.set_start(at); - init_fwd(dfa, &input) -} diff --git a/vendor/regex-automata/src/dfa/sparse.rs b/vendor/regex-automata/src/dfa/sparse.rs deleted file mode 100644 index 5de00aca401276..00000000000000 --- a/vendor/regex-automata/src/dfa/sparse.rs +++ /dev/null @@ -1,2655 +0,0 @@ -/*! -Types and routines specific to sparse DFAs. - -This module is the home of [`sparse::DFA`](DFA). - -Unlike the [`dense`] module, this module does not contain a builder or -configuration specific for sparse DFAs. Instead, the intended way to build a -sparse DFA is either by using a default configuration with its constructor -[`sparse::DFA::new`](DFA::new), or by first configuring the construction of a -dense DFA with [`dense::Builder`] and then calling [`dense::DFA::to_sparse`]. -For example, this configures a sparse DFA to do an overlapping search: - -``` -use regex_automata::{ - dfa::{Automaton, OverlappingState, dense}, - HalfMatch, Input, MatchKind, -}; - -let dense_re = dense::Builder::new() - .configure(dense::Config::new().match_kind(MatchKind::All)) - .build(r"Samwise|Sam")?; -let sparse_re = dense_re.to_sparse()?; - -// Setup our haystack and initial start state. -let input = Input::new("Samwise"); -let mut state = OverlappingState::start(); - -// First, 'Sam' will match. -sparse_re.try_search_overlapping_fwd(&input, &mut state)?; -assert_eq!(Some(HalfMatch::must(0, 3)), state.get_match()); - -// And now 'Samwise' will match. -sparse_re.try_search_overlapping_fwd(&input, &mut state)?; -assert_eq!(Some(HalfMatch::must(0, 7)), state.get_match()); -# Ok::<(), Box>(()) -``` -*/ - -#[cfg(feature = "dfa-build")] -use core::iter; -use core::{fmt, mem::size_of}; - -#[cfg(feature = "dfa-build")] -use alloc::{vec, vec::Vec}; - -#[cfg(feature = "dfa-build")] -use crate::dfa::dense::{self, BuildError}; -use crate::{ - dfa::{ - automaton::{fmt_state_indicator, Automaton, StartError}, - dense::Flags, - special::Special, - StartKind, DEAD, - }, - util::{ - alphabet::{ByteClasses, ByteSet}, - escape::DebugByte, - int::{Pointer, Usize, U16, U32}, - prefilter::Prefilter, - primitives::{PatternID, StateID}, - search::Anchored, - start::{self, Start, StartByteMap}, - wire::{self, DeserializeError, Endian, SerializeError}, - }, -}; - -const LABEL: &str = "rust-regex-automata-dfa-sparse"; -const VERSION: u32 = 2; - -/// A sparse deterministic finite automaton (DFA) with variable sized states. -/// -/// In contrast to a [dense::DFA], a sparse DFA uses a more space efficient -/// representation for its transitions. Consequently, sparse DFAs may use much -/// less memory than dense DFAs, but this comes at a price. In particular, -/// reading the more space efficient transitions takes more work, and -/// consequently, searching using a sparse DFA is typically slower than a dense -/// DFA. -/// -/// A sparse DFA can be built using the default configuration via the -/// [`DFA::new`] constructor. Otherwise, one can configure various aspects of a -/// dense DFA via [`dense::Builder`], and then convert a dense DFA to a sparse -/// DFA using [`dense::DFA::to_sparse`]. -/// -/// In general, a sparse DFA supports all the same search operations as a dense -/// DFA. -/// -/// Making the choice between a dense and sparse DFA depends on your specific -/// work load. If you can sacrifice a bit of search time performance, then a -/// sparse DFA might be the best choice. In particular, while sparse DFAs are -/// probably always slower than dense DFAs, you may find that they are easily -/// fast enough for your purposes! -/// -/// # Type parameters -/// -/// A `DFA` has one type parameter, `T`, which is used to represent the parts -/// of a sparse DFA. `T` is typically a `Vec` or a `&[u8]`. -/// -/// # The `Automaton` trait -/// -/// This type implements the [`Automaton`] trait, which means it can be used -/// for searching. For example: -/// -/// ``` -/// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; -/// -/// let dfa = DFA::new("foo[0-9]+")?; -/// let expected = Some(HalfMatch::must(0, 8)); -/// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone)] -pub struct DFA { - // When compared to a dense DFA, a sparse DFA *looks* a lot simpler - // representation-wise. In reality, it is perhaps more complicated. Namely, - // in a dense DFA, all information needs to be very cheaply accessible - // using only state IDs. In a sparse DFA however, each state uses a - // variable amount of space because each state encodes more information - // than just its transitions. Each state also includes an accelerator if - // one exists, along with the matching pattern IDs if the state is a match - // state. - // - // That is, a lot of the complexity is pushed down into how each state - // itself is represented. - tt: Transitions, - st: StartTable, - special: Special, - pre: Option, - quitset: ByteSet, - flags: Flags, -} - -#[cfg(feature = "dfa-build")] -impl DFA> { - /// Parse the given regular expression using a default configuration and - /// return the corresponding sparse DFA. - /// - /// If you want a non-default configuration, then use the - /// [`dense::Builder`] to set your own configuration, and then call - /// [`dense::DFA::to_sparse`] to create a sparse DFA. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, sparse}, HalfMatch, Input}; - /// - /// let dfa = sparse::DFA::new("foo[0-9]+bar")?; - /// - /// let expected = Some(HalfMatch::must(0, 11)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn new(pattern: &str) -> Result>, BuildError> { - dense::Builder::new() - .build(pattern) - .and_then(|dense| dense.to_sparse()) - } - - /// Parse the given regular expressions using a default configuration and - /// return the corresponding multi-DFA. - /// - /// If you want a non-default configuration, then use the - /// [`dense::Builder`] to set your own configuration, and then call - /// [`dense::DFA::to_sparse`] to create a sparse DFA. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, sparse}, HalfMatch, Input}; - /// - /// let dfa = sparse::DFA::new_many(&["[0-9]+", "[a-z]+"])?; - /// let expected = Some(HalfMatch::must(1, 3)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn new_many>( - patterns: &[P], - ) -> Result>, BuildError> { - dense::Builder::new() - .build_many(patterns) - .and_then(|dense| dense.to_sparse()) - } -} - -#[cfg(feature = "dfa-build")] -impl DFA> { - /// Create a new DFA that matches every input. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{ - /// dfa::{Automaton, sparse}, - /// HalfMatch, Input, - /// }; - /// - /// let dfa = sparse::DFA::always_match()?; - /// - /// let expected = Some(HalfMatch::must(0, 0)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new(""))?); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo"))?); - /// # Ok::<(), Box>(()) - /// ``` - pub fn always_match() -> Result>, BuildError> { - dense::DFA::always_match()?.to_sparse() - } - - /// Create a new sparse DFA that never matches any input. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, sparse}, Input}; - /// - /// let dfa = sparse::DFA::never_match()?; - /// assert_eq!(None, dfa.try_search_fwd(&Input::new(""))?); - /// assert_eq!(None, dfa.try_search_fwd(&Input::new("foo"))?); - /// # Ok::<(), Box>(()) - /// ``` - pub fn never_match() -> Result>, BuildError> { - dense::DFA::never_match()?.to_sparse() - } - - /// The implementation for constructing a sparse DFA from a dense DFA. - pub(crate) fn from_dense>( - dfa: &dense::DFA, - ) -> Result>, BuildError> { - // In order to build the transition table, we need to be able to write - // state identifiers for each of the "next" transitions in each state. - // Our state identifiers correspond to the byte offset in the - // transition table at which the state is encoded. Therefore, we do not - // actually know what the state identifiers are until we've allocated - // exactly as much space as we need for each state. Thus, construction - // of the transition table happens in two passes. - // - // In the first pass, we fill out the shell of each state, which - // includes the transition length, the input byte ranges and - // zero-filled space for the transitions and accelerators, if present. - // In this first pass, we also build up a map from the state identifier - // index of the dense DFA to the state identifier in this sparse DFA. - // - // In the second pass, we fill in the transitions based on the map - // built in the first pass. - - // The capacity given here reflects a minimum. (Well, the true minimum - // is likely even bigger, but hopefully this saves a few reallocs.) - let mut sparse = Vec::with_capacity(StateID::SIZE * dfa.state_len()); - // This maps state indices from the dense DFA to StateIDs in the sparse - // DFA. We build out this map on the first pass, and then use it in the - // second pass to back-fill our transitions. - let mut remap: Vec = vec![DEAD; dfa.state_len()]; - for state in dfa.states() { - let pos = sparse.len(); - - remap[dfa.to_index(state.id())] = StateID::new(pos) - .map_err(|_| BuildError::too_many_states())?; - // zero-filled space for the transition length - sparse.push(0); - sparse.push(0); - - let mut transition_len = 0; - for (unit1, unit2, _) in state.sparse_transitions() { - match (unit1.as_u8(), unit2.as_u8()) { - (Some(b1), Some(b2)) => { - transition_len += 1; - sparse.push(b1); - sparse.push(b2); - } - (None, None) => {} - (Some(_), None) | (None, Some(_)) => { - // can never occur because sparse_transitions never - // groups EOI with any other transition. - unreachable!() - } - } - } - // Add dummy EOI transition. This is never actually read while - // searching, but having space equivalent to the total number - // of transitions is convenient. Otherwise, we'd need to track - // a different number of transitions for the byte ranges as for - // the 'next' states. - // - // N.B. The loop above is not guaranteed to yield the EOI - // transition, since it may point to a DEAD state. By putting - // it here, we always write the EOI transition, and thus - // guarantee that our transition length is >0. Why do we always - // need the EOI transition? Because in order to implement - // Automaton::next_eoi_state, this lets us just ask for the last - // transition. There are probably other/better ways to do this. - transition_len += 1; - sparse.push(0); - sparse.push(0); - - // Check some assumptions about transition length. - assert_ne!( - transition_len, 0, - "transition length should be non-zero", - ); - assert!( - transition_len <= 257, - "expected transition length {transition_len} to be <= 257", - ); - - // Fill in the transition length. - // Since transition length is always <= 257, we use the most - // significant bit to indicate whether this is a match state or - // not. - let ntrans = if dfa.is_match_state(state.id()) { - transition_len | (1 << 15) - } else { - transition_len - }; - wire::NE::write_u16(ntrans, &mut sparse[pos..]); - - // zero-fill the actual transitions. - // Unwraps are OK since transition_length <= 257 and our minimum - // support usize size is 16-bits. - let zeros = usize::try_from(transition_len) - .unwrap() - .checked_mul(StateID::SIZE) - .unwrap(); - sparse.extend(iter::repeat(0).take(zeros)); - - // If this is a match state, write the pattern IDs matched by this - // state. - if dfa.is_match_state(state.id()) { - let plen = dfa.match_pattern_len(state.id()); - // Write the actual pattern IDs with a u32 length prefix. - // First, zero-fill space. - let mut pos = sparse.len(); - // Unwraps are OK since it's guaranteed that plen <= - // PatternID::LIMIT, which is in turn guaranteed to fit into a - // u32. - let zeros = size_of::() - .checked_mul(plen) - .unwrap() - .checked_add(size_of::()) - .unwrap(); - sparse.extend(iter::repeat(0).take(zeros)); - - // Now write the length prefix. - wire::NE::write_u32( - // Will never fail since u32::MAX is invalid pattern ID. - // Thus, the number of pattern IDs is representable by a - // u32. - plen.try_into().expect("pattern ID length fits in u32"), - &mut sparse[pos..], - ); - pos += size_of::(); - - // Now write the pattern IDs. - for &pid in dfa.pattern_id_slice(state.id()) { - pos += wire::write_pattern_id::( - pid, - &mut sparse[pos..], - ); - } - } - - // And now add the accelerator, if one exists. An accelerator is - // at most 4 bytes and at least 1 byte. The first byte is the - // length, N. N bytes follow the length. The set of bytes that - // follow correspond (exhaustively) to the bytes that must be seen - // to leave this state. - let accel = dfa.accelerator(state.id()); - sparse.push(accel.len().try_into().unwrap()); - sparse.extend_from_slice(accel); - } - - let mut new = DFA { - tt: Transitions { - sparse, - classes: dfa.byte_classes().clone(), - state_len: dfa.state_len(), - pattern_len: dfa.pattern_len(), - }, - st: StartTable::from_dense_dfa(dfa, &remap)?, - special: dfa.special().remap(|id| remap[dfa.to_index(id)]), - pre: dfa.get_prefilter().map(|p| p.clone()), - quitset: dfa.quitset().clone(), - flags: dfa.flags().clone(), - }; - // And here's our second pass. Iterate over all of the dense states - // again, and update the transitions in each of the states in the - // sparse DFA. - for old_state in dfa.states() { - let new_id = remap[dfa.to_index(old_state.id())]; - let mut new_state = new.tt.state_mut(new_id); - let sparse = old_state.sparse_transitions(); - for (i, (_, _, next)) in sparse.enumerate() { - let next = remap[dfa.to_index(next)]; - new_state.set_next_at(i, next); - } - } - new.tt.sparse.shrink_to_fit(); - new.st.table.shrink_to_fit(); - debug!( - "created sparse DFA, memory usage: {} (dense memory usage: {})", - new.memory_usage(), - dfa.memory_usage(), - ); - Ok(new) - } -} - -impl> DFA { - /// Cheaply return a borrowed version of this sparse DFA. Specifically, the - /// DFA returned always uses `&[u8]` for its transitions. - pub fn as_ref<'a>(&'a self) -> DFA<&'a [u8]> { - DFA { - tt: self.tt.as_ref(), - st: self.st.as_ref(), - special: self.special, - pre: self.pre.clone(), - quitset: self.quitset, - flags: self.flags, - } - } - - /// Return an owned version of this sparse DFA. Specifically, the DFA - /// returned always uses `Vec` for its transitions. - /// - /// Effectively, this returns a sparse DFA whose transitions live on the - /// heap. - #[cfg(feature = "alloc")] - pub fn to_owned(&self) -> DFA> { - DFA { - tt: self.tt.to_owned(), - st: self.st.to_owned(), - special: self.special, - pre: self.pre.clone(), - quitset: self.quitset, - flags: self.flags, - } - } - - /// Returns the starting state configuration for this DFA. - /// - /// The default is [`StartKind::Both`], which means the DFA supports both - /// unanchored and anchored searches. However, this can generally lead to - /// bigger DFAs. Therefore, a DFA might be compiled with support for just - /// unanchored or anchored searches. In that case, running a search with - /// an unsupported configuration will panic. - pub fn start_kind(&self) -> StartKind { - self.st.kind - } - - /// Returns true only if this DFA has starting states for each pattern. - /// - /// When a DFA has starting states for each pattern, then a search with the - /// DFA can be configured to only look for anchored matches of a specific - /// pattern. Specifically, APIs like [`Automaton::try_search_fwd`] can - /// accept a [`Anchored::Pattern`] if and only if this method returns true. - /// Otherwise, an error will be returned. - /// - /// Note that if the DFA is empty, this always returns false. - pub fn starts_for_each_pattern(&self) -> bool { - self.st.pattern_len.is_some() - } - - /// Returns the equivalence classes that make up the alphabet for this DFA. - /// - /// Unless [`dense::Config::byte_classes`] was disabled, it is possible - /// that multiple distinct bytes are grouped into the same equivalence - /// class if it is impossible for them to discriminate between a match and - /// a non-match. This has the effect of reducing the overall alphabet size - /// and in turn potentially substantially reducing the size of the DFA's - /// transition table. - /// - /// The downside of using equivalence classes like this is that every state - /// transition will automatically use this map to convert an arbitrary - /// byte to its corresponding equivalence class. In practice this has a - /// negligible impact on performance. - pub fn byte_classes(&self) -> &ByteClasses { - &self.tt.classes - } - - /// Returns the memory usage, in bytes, of this DFA. - /// - /// The memory usage is computed based on the number of bytes used to - /// represent this DFA. - /// - /// This does **not** include the stack size used up by this DFA. To - /// compute that, use `std::mem::size_of::()`. - pub fn memory_usage(&self) -> usize { - self.tt.memory_usage() + self.st.memory_usage() - } -} - -/// Routines for converting a sparse DFA to other representations, such as raw -/// bytes suitable for persistent storage. -impl> DFA { - /// Serialize this DFA as raw bytes to a `Vec` in little endian - /// format. - /// - /// The written bytes are guaranteed to be deserialized correctly and - /// without errors in a semver compatible release of this crate by a - /// `DFA`'s deserialization APIs (assuming all other criteria for the - /// deserialization APIs has been satisfied): - /// - /// * [`DFA::from_bytes`] - /// * [`DFA::from_bytes_unchecked`] - /// - /// Note that unlike a [`dense::DFA`]'s serialization methods, this does - /// not add any initial padding to the returned bytes. Padding isn't - /// required for sparse DFAs since they have no alignment requirements. - /// - /// # Example - /// - /// This example shows how to serialize and deserialize a DFA: - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; - /// - /// // Compile our original DFA. - /// let original_dfa = DFA::new("foo[0-9]+")?; - /// - /// // N.B. We use native endianness here to make the example work, but - /// // using to_bytes_little_endian would work on a little endian target. - /// let buf = original_dfa.to_bytes_native_endian(); - /// // Even if buf has initial padding, DFA::from_bytes will automatically - /// // ignore it. - /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf)?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "dfa-build")] - pub fn to_bytes_little_endian(&self) -> Vec { - self.to_bytes::() - } - - /// Serialize this DFA as raw bytes to a `Vec` in big endian - /// format. - /// - /// The written bytes are guaranteed to be deserialized correctly and - /// without errors in a semver compatible release of this crate by a - /// `DFA`'s deserialization APIs (assuming all other criteria for the - /// deserialization APIs has been satisfied): - /// - /// * [`DFA::from_bytes`] - /// * [`DFA::from_bytes_unchecked`] - /// - /// Note that unlike a [`dense::DFA`]'s serialization methods, this does - /// not add any initial padding to the returned bytes. Padding isn't - /// required for sparse DFAs since they have no alignment requirements. - /// - /// # Example - /// - /// This example shows how to serialize and deserialize a DFA: - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; - /// - /// // Compile our original DFA. - /// let original_dfa = DFA::new("foo[0-9]+")?; - /// - /// // N.B. We use native endianness here to make the example work, but - /// // using to_bytes_big_endian would work on a big endian target. - /// let buf = original_dfa.to_bytes_native_endian(); - /// // Even if buf has initial padding, DFA::from_bytes will automatically - /// // ignore it. - /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf)?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "dfa-build")] - pub fn to_bytes_big_endian(&self) -> Vec { - self.to_bytes::() - } - - /// Serialize this DFA as raw bytes to a `Vec` in native endian - /// format. - /// - /// The written bytes are guaranteed to be deserialized correctly and - /// without errors in a semver compatible release of this crate by a - /// `DFA`'s deserialization APIs (assuming all other criteria for the - /// deserialization APIs has been satisfied): - /// - /// * [`DFA::from_bytes`] - /// * [`DFA::from_bytes_unchecked`] - /// - /// Note that unlike a [`dense::DFA`]'s serialization methods, this does - /// not add any initial padding to the returned bytes. Padding isn't - /// required for sparse DFAs since they have no alignment requirements. - /// - /// Generally speaking, native endian format should only be used when - /// you know that the target you're compiling the DFA for matches the - /// endianness of the target on which you're compiling DFA. For example, - /// if serialization and deserialization happen in the same process or on - /// the same machine. Otherwise, when serializing a DFA for use in a - /// portable environment, you'll almost certainly want to serialize _both_ - /// a little endian and a big endian version and then load the correct one - /// based on the target's configuration. - /// - /// # Example - /// - /// This example shows how to serialize and deserialize a DFA: - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; - /// - /// // Compile our original DFA. - /// let original_dfa = DFA::new("foo[0-9]+")?; - /// - /// let buf = original_dfa.to_bytes_native_endian(); - /// // Even if buf has initial padding, DFA::from_bytes will automatically - /// // ignore it. - /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf)?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "dfa-build")] - pub fn to_bytes_native_endian(&self) -> Vec { - self.to_bytes::() - } - - /// The implementation of the public `to_bytes` serialization methods, - /// which is generic over endianness. - #[cfg(feature = "dfa-build")] - fn to_bytes(&self) -> Vec { - let mut buf = vec![0; self.write_to_len()]; - // This should always succeed since the only possible serialization - // error is providing a buffer that's too small, but we've ensured that - // `buf` is big enough here. - self.write_to::(&mut buf).unwrap(); - buf - } - - /// Serialize this DFA as raw bytes to the given slice, in little endian - /// format. Upon success, the total number of bytes written to `dst` is - /// returned. - /// - /// The written bytes are guaranteed to be deserialized correctly and - /// without errors in a semver compatible release of this crate by a - /// `DFA`'s deserialization APIs (assuming all other criteria for the - /// deserialization APIs has been satisfied): - /// - /// * [`DFA::from_bytes`] - /// * [`DFA::from_bytes_unchecked`] - /// - /// # Errors - /// - /// This returns an error if the given destination slice is not big enough - /// to contain the full serialized DFA. If an error occurs, then nothing - /// is written to `dst`. - /// - /// # Example - /// - /// This example shows how to serialize and deserialize a DFA without - /// dynamic memory allocation. - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; - /// - /// // Compile our original DFA. - /// let original_dfa = DFA::new("foo[0-9]+")?; - /// - /// // Create a 4KB buffer on the stack to store our serialized DFA. - /// let mut buf = [0u8; 4 * (1<<10)]; - /// // N.B. We use native endianness here to make the example work, but - /// // using write_to_little_endian would work on a little endian target. - /// let written = original_dfa.write_to_native_endian(&mut buf)?; - /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - pub fn write_to_little_endian( - &self, - dst: &mut [u8], - ) -> Result { - self.write_to::(dst) - } - - /// Serialize this DFA as raw bytes to the given slice, in big endian - /// format. Upon success, the total number of bytes written to `dst` is - /// returned. - /// - /// The written bytes are guaranteed to be deserialized correctly and - /// without errors in a semver compatible release of this crate by a - /// `DFA`'s deserialization APIs (assuming all other criteria for the - /// deserialization APIs has been satisfied): - /// - /// * [`DFA::from_bytes`] - /// * [`DFA::from_bytes_unchecked`] - /// - /// # Errors - /// - /// This returns an error if the given destination slice is not big enough - /// to contain the full serialized DFA. If an error occurs, then nothing - /// is written to `dst`. - /// - /// # Example - /// - /// This example shows how to serialize and deserialize a DFA without - /// dynamic memory allocation. - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; - /// - /// // Compile our original DFA. - /// let original_dfa = DFA::new("foo[0-9]+")?; - /// - /// // Create a 4KB buffer on the stack to store our serialized DFA. - /// let mut buf = [0u8; 4 * (1<<10)]; - /// // N.B. We use native endianness here to make the example work, but - /// // using write_to_big_endian would work on a big endian target. - /// let written = original_dfa.write_to_native_endian(&mut buf)?; - /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - pub fn write_to_big_endian( - &self, - dst: &mut [u8], - ) -> Result { - self.write_to::(dst) - } - - /// Serialize this DFA as raw bytes to the given slice, in native endian - /// format. Upon success, the total number of bytes written to `dst` is - /// returned. - /// - /// The written bytes are guaranteed to be deserialized correctly and - /// without errors in a semver compatible release of this crate by a - /// `DFA`'s deserialization APIs (assuming all other criteria for the - /// deserialization APIs has been satisfied): - /// - /// * [`DFA::from_bytes`] - /// * [`DFA::from_bytes_unchecked`] - /// - /// Generally speaking, native endian format should only be used when - /// you know that the target you're compiling the DFA for matches the - /// endianness of the target on which you're compiling DFA. For example, - /// if serialization and deserialization happen in the same process or on - /// the same machine. Otherwise, when serializing a DFA for use in a - /// portable environment, you'll almost certainly want to serialize _both_ - /// a little endian and a big endian version and then load the correct one - /// based on the target's configuration. - /// - /// # Errors - /// - /// This returns an error if the given destination slice is not big enough - /// to contain the full serialized DFA. If an error occurs, then nothing - /// is written to `dst`. - /// - /// # Example - /// - /// This example shows how to serialize and deserialize a DFA without - /// dynamic memory allocation. - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; - /// - /// // Compile our original DFA. - /// let original_dfa = DFA::new("foo[0-9]+")?; - /// - /// // Create a 4KB buffer on the stack to store our serialized DFA. - /// let mut buf = [0u8; 4 * (1<<10)]; - /// let written = original_dfa.write_to_native_endian(&mut buf)?; - /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - pub fn write_to_native_endian( - &self, - dst: &mut [u8], - ) -> Result { - self.write_to::(dst) - } - - /// The implementation of the public `write_to` serialization methods, - /// which is generic over endianness. - fn write_to( - &self, - dst: &mut [u8], - ) -> Result { - let mut nw = 0; - nw += wire::write_label(LABEL, &mut dst[nw..])?; - nw += wire::write_endianness_check::(&mut dst[nw..])?; - nw += wire::write_version::(VERSION, &mut dst[nw..])?; - nw += { - // Currently unused, intended for future flexibility - E::write_u32(0, &mut dst[nw..]); - size_of::() - }; - nw += self.flags.write_to::(&mut dst[nw..])?; - nw += self.tt.write_to::(&mut dst[nw..])?; - nw += self.st.write_to::(&mut dst[nw..])?; - nw += self.special.write_to::(&mut dst[nw..])?; - nw += self.quitset.write_to::(&mut dst[nw..])?; - Ok(nw) - } - - /// Return the total number of bytes required to serialize this DFA. - /// - /// This is useful for determining the size of the buffer required to pass - /// to one of the serialization routines: - /// - /// * [`DFA::write_to_little_endian`] - /// * [`DFA::write_to_big_endian`] - /// * [`DFA::write_to_native_endian`] - /// - /// Passing a buffer smaller than the size returned by this method will - /// result in a serialization error. - /// - /// # Example - /// - /// This example shows how to dynamically allocate enough room to serialize - /// a sparse DFA. - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; - /// - /// // Compile our original DFA. - /// let original_dfa = DFA::new("foo[0-9]+")?; - /// - /// let mut buf = vec![0; original_dfa.write_to_len()]; - /// let written = original_dfa.write_to_native_endian(&mut buf)?; - /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - pub fn write_to_len(&self) -> usize { - wire::write_label_len(LABEL) - + wire::write_endianness_check_len() - + wire::write_version_len() - + size_of::() // unused, intended for future flexibility - + self.flags.write_to_len() - + self.tt.write_to_len() - + self.st.write_to_len() - + self.special.write_to_len() - + self.quitset.write_to_len() - } -} - -impl<'a> DFA<&'a [u8]> { - /// Safely deserialize a sparse DFA with a specific state identifier - /// representation. Upon success, this returns both the deserialized DFA - /// and the number of bytes read from the given slice. Namely, the contents - /// of the slice beyond the DFA are not read. - /// - /// Deserializing a DFA using this routine will never allocate heap memory. - /// For safety purposes, the DFA's transitions will be verified such that - /// every transition points to a valid state. If this verification is too - /// costly, then a [`DFA::from_bytes_unchecked`] API is provided, which - /// will always execute in constant time. - /// - /// The bytes given must be generated by one of the serialization APIs - /// of a `DFA` using a semver compatible release of this crate. Those - /// include: - /// - /// * [`DFA::to_bytes_little_endian`] - /// * [`DFA::to_bytes_big_endian`] - /// * [`DFA::to_bytes_native_endian`] - /// * [`DFA::write_to_little_endian`] - /// * [`DFA::write_to_big_endian`] - /// * [`DFA::write_to_native_endian`] - /// - /// The `to_bytes` methods allocate and return a `Vec` for you. The - /// `write_to` methods do not allocate and write to an existing slice - /// (which may be on the stack). Since deserialization always uses the - /// native endianness of the target platform, the serialization API you use - /// should match the endianness of the target platform. (It's often a good - /// idea to generate serialized DFAs for both forms of endianness and then - /// load the correct one based on endianness.) - /// - /// # Errors - /// - /// Generally speaking, it's easier to state the conditions in which an - /// error is _not_ returned. All of the following must be true: - /// - /// * The bytes given must be produced by one of the serialization APIs - /// on this DFA, as mentioned above. - /// * The endianness of the target platform matches the endianness used to - /// serialized the provided DFA. - /// - /// If any of the above are not true, then an error will be returned. - /// - /// Note that unlike deserializing a [`dense::DFA`], deserializing a sparse - /// DFA has no alignment requirements. That is, an alignment of `1` is - /// valid. - /// - /// # Panics - /// - /// This routine will never panic for any input. - /// - /// # Example - /// - /// This example shows how to serialize a DFA to raw bytes, deserialize it - /// and then use it for searching. - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; - /// - /// let initial = DFA::new("foo[0-9]+")?; - /// let bytes = initial.to_bytes_native_endian(); - /// let dfa: DFA<&[u8]> = DFA::from_bytes(&bytes)?.0; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: loading a DFA from static memory - /// - /// One use case this library supports is the ability to serialize a - /// DFA to disk and then use `include_bytes!` to store it in a compiled - /// Rust program. Those bytes can then be cheaply deserialized into a - /// `DFA` structure at runtime and used for searching without having to - /// re-compile the DFA (which can be quite costly). - /// - /// We can show this in two parts. The first part is serializing the DFA to - /// a file: - /// - /// ```no_run - /// use regex_automata::dfa::sparse::DFA; - /// - /// let dfa = DFA::new("foo[0-9]+")?; - /// - /// // Write a big endian serialized version of this DFA to a file. - /// let bytes = dfa.to_bytes_big_endian(); - /// std::fs::write("foo.bigendian.dfa", &bytes)?; - /// - /// // Do it again, but this time for little endian. - /// let bytes = dfa.to_bytes_little_endian(); - /// std::fs::write("foo.littleendian.dfa", &bytes)?; - /// # Ok::<(), Box>(()) - /// ``` - /// - /// And now the second part is embedding the DFA into the compiled program - /// and deserializing it at runtime on first use. We use conditional - /// compilation to choose the correct endianness. We do not need to employ - /// any special tricks to ensure a proper alignment, since a sparse DFA has - /// no alignment requirements. - /// - /// ```no_run - /// use regex_automata::{ - /// dfa::{Automaton, sparse::DFA}, - /// util::lazy::Lazy, - /// HalfMatch, Input, - /// }; - /// - /// // This crate provides its own "lazy" type, kind of like - /// // lazy_static! or once_cell::sync::Lazy. But it works in no-alloc - /// // no-std environments and let's us write this using completely - /// // safe code. - /// static RE: Lazy> = Lazy::new(|| { - /// # const _: &str = stringify! { - /// #[cfg(target_endian = "big")] - /// static BYTES: &[u8] = include_bytes!("foo.bigendian.dfa"); - /// #[cfg(target_endian = "little")] - /// static BYTES: &[u8] = include_bytes!("foo.littleendian.dfa"); - /// # }; - /// # static BYTES: &[u8] = b""; - /// - /// let (dfa, _) = DFA::from_bytes(BYTES) - /// .expect("serialized DFA should be valid"); - /// dfa - /// }); - /// - /// let expected = Ok(Some(HalfMatch::must(0, 8))); - /// assert_eq!(expected, RE.try_search_fwd(&Input::new("foo12345"))); - /// ``` - /// - /// Alternatively, consider using - /// [`lazy_static`](https://crates.io/crates/lazy_static) - /// or - /// [`once_cell`](https://crates.io/crates/once_cell), - /// which will guarantee safety for you. - pub fn from_bytes( - slice: &'a [u8], - ) -> Result<(DFA<&'a [u8]>, usize), DeserializeError> { - // SAFETY: This is safe because we validate both the sparse transitions - // (by trying to decode every state) and start state ID list below. If - // either validation fails, then we return an error. - let (dfa, nread) = unsafe { DFA::from_bytes_unchecked(slice)? }; - let seen = dfa.tt.validate(&dfa.special)?; - dfa.st.validate(&dfa.special, &seen)?; - // N.B. dfa.special doesn't have a way to do unchecked deserialization, - // so it has already been validated. - Ok((dfa, nread)) - } - - /// Deserialize a DFA with a specific state identifier representation in - /// constant time by omitting the verification of the validity of the - /// sparse transitions. - /// - /// This is just like [`DFA::from_bytes`], except it can potentially return - /// a DFA that exhibits undefined behavior if its transitions contains - /// invalid state identifiers. - /// - /// This routine is useful if you need to deserialize a DFA cheaply and - /// cannot afford the transition validation performed by `from_bytes`. - /// - /// # Safety - /// - /// This routine is not safe because it permits callers to provide - /// arbitrary transitions with possibly incorrect state identifiers. While - /// the various serialization routines will never return an incorrect - /// DFA, there is no guarantee that the bytes provided here are correct. - /// While `from_bytes_unchecked` will still do several forms of basic - /// validation, this routine does not check that the transitions themselves - /// are correct. Given an incorrect transition table, it is possible for - /// the search routines to access out-of-bounds memory because of explicit - /// bounds check elision. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; - /// - /// let initial = DFA::new("foo[0-9]+")?; - /// let bytes = initial.to_bytes_native_endian(); - /// // SAFETY: This is guaranteed to be safe since the bytes given come - /// // directly from a compatible serialization routine. - /// let dfa: DFA<&[u8]> = unsafe { DFA::from_bytes_unchecked(&bytes)?.0 }; - /// - /// let expected = Some(HalfMatch::must(0, 8)); - /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); - /// # Ok::<(), Box>(()) - /// ``` - pub unsafe fn from_bytes_unchecked( - slice: &'a [u8], - ) -> Result<(DFA<&'a [u8]>, usize), DeserializeError> { - let mut nr = 0; - - nr += wire::read_label(&slice[nr..], LABEL)?; - nr += wire::read_endianness_check(&slice[nr..])?; - nr += wire::read_version(&slice[nr..], VERSION)?; - - let _unused = wire::try_read_u32(&slice[nr..], "unused space")?; - nr += size_of::(); - - let (flags, nread) = Flags::from_bytes(&slice[nr..])?; - nr += nread; - - let (tt, nread) = Transitions::from_bytes_unchecked(&slice[nr..])?; - nr += nread; - - let (st, nread) = StartTable::from_bytes_unchecked(&slice[nr..])?; - nr += nread; - - let (special, nread) = Special::from_bytes(&slice[nr..])?; - nr += nread; - if special.max.as_usize() >= tt.sparse().len() { - return Err(DeserializeError::generic( - "max should not be greater than or equal to sparse bytes", - )); - } - - let (quitset, nread) = ByteSet::from_bytes(&slice[nr..])?; - nr += nread; - - // Prefilters don't support serialization, so they're always absent. - let pre = None; - Ok((DFA { tt, st, special, pre, quitset, flags }, nr)) - } -} - -/// Other routines that work for all `T`. -impl DFA { - /// Set or unset the prefilter attached to this DFA. - /// - /// This is useful when one has deserialized a DFA from `&[u8]`. - /// Deserialization does not currently include prefilters, so if you - /// want prefilter acceleration, you'll need to rebuild it and attach - /// it here. - pub fn set_prefilter(&mut self, prefilter: Option) { - self.pre = prefilter - } -} - -impl> fmt::Debug for DFA { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!(f, "sparse::DFA(")?; - for state in self.tt.states() { - fmt_state_indicator(f, self, state.id())?; - writeln!(f, "{:06?}: {:?}", state.id().as_usize(), state)?; - } - writeln!(f, "")?; - for (i, (start_id, anchored, sty)) in self.st.iter().enumerate() { - if i % self.st.stride == 0 { - match anchored { - Anchored::No => writeln!(f, "START-GROUP(unanchored)")?, - Anchored::Yes => writeln!(f, "START-GROUP(anchored)")?, - Anchored::Pattern(pid) => writeln!( - f, - "START_GROUP(pattern: {:?})", - pid.as_usize() - )?, - } - } - writeln!(f, " {:?} => {:06?}", sty, start_id.as_usize())?; - } - writeln!(f, "state length: {:?}", self.tt.state_len)?; - writeln!(f, "pattern length: {:?}", self.pattern_len())?; - writeln!(f, "flags: {:?}", self.flags)?; - writeln!(f, ")")?; - Ok(()) - } -} - -// SAFETY: We assert that our implementation of each method is correct. -unsafe impl> Automaton for DFA { - #[inline] - fn is_special_state(&self, id: StateID) -> bool { - self.special.is_special_state(id) - } - - #[inline] - fn is_dead_state(&self, id: StateID) -> bool { - self.special.is_dead_state(id) - } - - #[inline] - fn is_quit_state(&self, id: StateID) -> bool { - self.special.is_quit_state(id) - } - - #[inline] - fn is_match_state(&self, id: StateID) -> bool { - self.special.is_match_state(id) - } - - #[inline] - fn is_start_state(&self, id: StateID) -> bool { - self.special.is_start_state(id) - } - - #[inline] - fn is_accel_state(&self, id: StateID) -> bool { - self.special.is_accel_state(id) - } - - // This is marked as inline to help dramatically boost sparse searching, - // which decodes each state it enters to follow the next transition. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn next_state(&self, current: StateID, input: u8) -> StateID { - let input = self.tt.classes.get(input); - self.tt.state(current).next(input) - } - - #[inline] - unsafe fn next_state_unchecked( - &self, - current: StateID, - input: u8, - ) -> StateID { - self.next_state(current, input) - } - - #[inline] - fn next_eoi_state(&self, current: StateID) -> StateID { - self.tt.state(current).next_eoi() - } - - #[inline] - fn pattern_len(&self) -> usize { - self.tt.pattern_len - } - - #[inline] - fn match_len(&self, id: StateID) -> usize { - self.tt.state(id).pattern_len() - } - - #[inline] - fn match_pattern(&self, id: StateID, match_index: usize) -> PatternID { - // This is an optimization for the very common case of a DFA with a - // single pattern. This conditional avoids a somewhat more costly path - // that finds the pattern ID from the state machine, which requires - // a bit of slicing/pointer-chasing. This optimization tends to only - // matter when matches are frequent. - if self.tt.pattern_len == 1 { - return PatternID::ZERO; - } - self.tt.state(id).pattern_id(match_index) - } - - #[inline] - fn has_empty(&self) -> bool { - self.flags.has_empty - } - - #[inline] - fn is_utf8(&self) -> bool { - self.flags.is_utf8 - } - - #[inline] - fn is_always_start_anchored(&self) -> bool { - self.flags.is_always_start_anchored - } - - #[inline] - fn start_state( - &self, - config: &start::Config, - ) -> Result { - let anchored = config.get_anchored(); - let start = match config.get_look_behind() { - None => Start::Text, - Some(byte) => { - if !self.quitset.is_empty() && self.quitset.contains(byte) { - return Err(StartError::quit(byte)); - } - self.st.start_map.get(byte) - } - }; - self.st.start(anchored, start) - } - - #[inline] - fn universal_start_state(&self, mode: Anchored) -> Option { - match mode { - Anchored::No => self.st.universal_start_unanchored, - Anchored::Yes => self.st.universal_start_anchored, - Anchored::Pattern(_) => None, - } - } - - #[inline] - fn accelerator(&self, id: StateID) -> &[u8] { - self.tt.state(id).accelerator() - } - - #[inline] - fn get_prefilter(&self) -> Option<&Prefilter> { - self.pre.as_ref() - } -} - -/// The transition table portion of a sparse DFA. -/// -/// The transition table is the core part of the DFA in that it describes how -/// to move from one state to another based on the input sequence observed. -/// -/// Unlike a typical dense table based DFA, states in a sparse transition -/// table have variable size. That is, states with more transitions use more -/// space than states with fewer transitions. This means that finding the next -/// transition takes more work than with a dense DFA, but also typically uses -/// much less space. -#[derive(Clone)] -struct Transitions { - /// The raw encoding of each state in this DFA. - /// - /// Each state has the following information: - /// - /// * A set of transitions to subsequent states. Transitions to the dead - /// state are omitted. - /// * If the state can be accelerated, then any additional accelerator - /// information. - /// * If the state is a match state, then the state contains all pattern - /// IDs that match when in that state. - /// - /// To decode a state, use Transitions::state. - /// - /// In practice, T is either Vec or &[u8]. - sparse: T, - /// A set of equivalence classes, where a single equivalence class - /// represents a set of bytes that never discriminate between a match - /// and a non-match in the DFA. Each equivalence class corresponds to a - /// single character in this DFA's alphabet, where the maximum number of - /// characters is 257 (each possible value of a byte plus the special - /// EOI transition). Consequently, the number of equivalence classes - /// corresponds to the number of transitions for each DFA state. Note - /// though that the *space* used by each DFA state in the transition table - /// may be larger. The total space used by each DFA state is known as the - /// stride and is documented above. - /// - /// The only time the number of equivalence classes is fewer than 257 is - /// if the DFA's kind uses byte classes which is the default. Equivalence - /// classes should generally only be disabled when debugging, so that - /// the transitions themselves aren't obscured. Disabling them has no - /// other benefit, since the equivalence class map is always used while - /// searching. In the vast majority of cases, the number of equivalence - /// classes is substantially smaller than 257, particularly when large - /// Unicode classes aren't used. - /// - /// N.B. Equivalence classes aren't particularly useful in a sparse DFA - /// in the current implementation, since equivalence classes generally tend - /// to correspond to continuous ranges of bytes that map to the same - /// transition. So in a sparse DFA, equivalence classes don't really lead - /// to a space savings. In the future, it would be good to try and remove - /// them from sparse DFAs entirely, but requires a bit of work since sparse - /// DFAs are built from dense DFAs, which are in turn built on top of - /// equivalence classes. - classes: ByteClasses, - /// The total number of states in this DFA. Note that a DFA always has at - /// least one state---the dead state---even the empty DFA. In particular, - /// the dead state always has ID 0 and is correspondingly always the first - /// state. The dead state is never a match state. - state_len: usize, - /// The total number of unique patterns represented by these match states. - pattern_len: usize, -} - -impl<'a> Transitions<&'a [u8]> { - unsafe fn from_bytes_unchecked( - mut slice: &'a [u8], - ) -> Result<(Transitions<&'a [u8]>, usize), DeserializeError> { - let slice_start = slice.as_ptr().as_usize(); - - let (state_len, nr) = - wire::try_read_u32_as_usize(&slice, "state length")?; - slice = &slice[nr..]; - - let (pattern_len, nr) = - wire::try_read_u32_as_usize(&slice, "pattern length")?; - slice = &slice[nr..]; - - let (classes, nr) = ByteClasses::from_bytes(&slice)?; - slice = &slice[nr..]; - - let (len, nr) = - wire::try_read_u32_as_usize(&slice, "sparse transitions length")?; - slice = &slice[nr..]; - - wire::check_slice_len(slice, len, "sparse states byte length")?; - let sparse = &slice[..len]; - slice = &slice[len..]; - - let trans = Transitions { sparse, classes, state_len, pattern_len }; - Ok((trans, slice.as_ptr().as_usize() - slice_start)) - } -} - -impl> Transitions { - /// Writes a serialized form of this transition table to the buffer given. - /// If the buffer is too small, then an error is returned. To determine - /// how big the buffer must be, use `write_to_len`. - fn write_to( - &self, - mut dst: &mut [u8], - ) -> Result { - let nwrite = self.write_to_len(); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small( - "sparse transition table", - )); - } - dst = &mut dst[..nwrite]; - - // write state length - E::write_u32(u32::try_from(self.state_len).unwrap(), dst); - dst = &mut dst[size_of::()..]; - - // write pattern length - E::write_u32(u32::try_from(self.pattern_len).unwrap(), dst); - dst = &mut dst[size_of::()..]; - - // write byte class map - let n = self.classes.write_to(dst)?; - dst = &mut dst[n..]; - - // write number of bytes in sparse transitions - E::write_u32(u32::try_from(self.sparse().len()).unwrap(), dst); - dst = &mut dst[size_of::()..]; - - // write actual transitions - let mut id = DEAD; - while id.as_usize() < self.sparse().len() { - let state = self.state(id); - let n = state.write_to::(&mut dst)?; - dst = &mut dst[n..]; - // The next ID is the offset immediately following `state`. - id = StateID::new(id.as_usize() + state.write_to_len()).unwrap(); - } - Ok(nwrite) - } - - /// Returns the number of bytes the serialized form of this transition - /// table will use. - fn write_to_len(&self) -> usize { - size_of::() // state length - + size_of::() // pattern length - + self.classes.write_to_len() - + size_of::() // sparse transitions length - + self.sparse().len() - } - - /// Validates that every state ID in this transition table is valid. - /// - /// That is, every state ID can be used to correctly index a state in this - /// table. - fn validate(&self, sp: &Special) -> Result { - let mut verified = Seen::new(); - // We need to make sure that we decode the correct number of states. - // Otherwise, an empty set of transitions would validate even if the - // recorded state length is non-empty. - let mut len = 0; - // We can't use the self.states() iterator because it assumes the state - // encodings are valid. It could panic if they aren't. - let mut id = DEAD; - while id.as_usize() < self.sparse().len() { - // Before we even decode the state, we check that the ID itself - // is well formed. That is, if it's a special state then it must - // actually be a quit, dead, accel, match or start state. - if sp.is_special_state(id) { - let is_actually_special = sp.is_dead_state(id) - || sp.is_quit_state(id) - || sp.is_match_state(id) - || sp.is_start_state(id) - || sp.is_accel_state(id); - if !is_actually_special { - // This is kind of a cryptic error message... - return Err(DeserializeError::generic( - "found sparse state tagged as special but \ - wasn't actually special", - )); - } - } - let state = self.try_state(sp, id)?; - verified.insert(id); - // The next ID should be the offset immediately following `state`. - id = StateID::new(wire::add( - id.as_usize(), - state.write_to_len(), - "next state ID offset", - )?) - .map_err(|err| { - DeserializeError::state_id_error(err, "next state ID offset") - })?; - len += 1; - } - // Now that we've checked that all top-level states are correct and - // importantly, collected a set of valid state IDs, we have all the - // information we need to check that all transitions are correct too. - // - // Note that we can't use `valid_ids` to iterate because it will - // be empty in no-std no-alloc contexts. (And yes, that means our - // verification isn't quite as good.) We can use `self.states()` - // though at least, since we know that all states can at least be - // decoded and traversed correctly. - for state in self.states() { - // Check that all transitions in this state are correct. - for i in 0..state.ntrans { - let to = state.next_at(i); - // For no-alloc, we just check that the state can decode. It is - // technically possible that the state ID could still point to - // a non-existent state even if it decodes (fuzzing proved this - // to be true), but it shouldn't result in any memory unsafety - // or panics in non-debug mode. - #[cfg(not(feature = "alloc"))] - { - let _ = self.try_state(sp, to)?; - } - #[cfg(feature = "alloc")] - { - if !verified.contains(&to) { - return Err(DeserializeError::generic( - "found transition that points to a \ - non-existent state", - )); - } - } - } - } - if len != self.state_len { - return Err(DeserializeError::generic( - "mismatching sparse state length", - )); - } - Ok(verified) - } - - /// Converts these transitions to a borrowed value. - fn as_ref(&self) -> Transitions<&'_ [u8]> { - Transitions { - sparse: self.sparse(), - classes: self.classes.clone(), - state_len: self.state_len, - pattern_len: self.pattern_len, - } - } - - /// Converts these transitions to an owned value. - #[cfg(feature = "alloc")] - fn to_owned(&self) -> Transitions> { - Transitions { - sparse: self.sparse().to_vec(), - classes: self.classes.clone(), - state_len: self.state_len, - pattern_len: self.pattern_len, - } - } - - /// Return a convenient representation of the given state. - /// - /// This panics if the state is invalid. - /// - /// This is marked as inline to help dramatically boost sparse searching, - /// which decodes each state it enters to follow the next transition. Other - /// functions involved are also inlined, which should hopefully eliminate - /// a lot of the extraneous decoding that is never needed just to follow - /// the next transition. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn state(&self, id: StateID) -> State<'_> { - let mut state = &self.sparse()[id.as_usize()..]; - let mut ntrans = wire::read_u16(&state).as_usize(); - let is_match = (1 << 15) & ntrans != 0; - ntrans &= !(1 << 15); - state = &state[2..]; - - let (input_ranges, state) = state.split_at(ntrans * 2); - let (next, state) = state.split_at(ntrans * StateID::SIZE); - let (pattern_ids, state) = if is_match { - let npats = wire::read_u32(&state).as_usize(); - state[4..].split_at(npats * 4) - } else { - (&[][..], state) - }; - - let accel_len = usize::from(state[0]); - let accel = &state[1..accel_len + 1]; - State { id, is_match, ntrans, input_ranges, next, pattern_ids, accel } - } - - /// Like `state`, but will return an error if the state encoding is - /// invalid. This is useful for verifying states after deserialization, - /// which is required for a safe deserialization API. - /// - /// Note that this only verifies that this state is decodable and that - /// all of its data is consistent. It does not verify that its state ID - /// transitions point to valid states themselves, nor does it verify that - /// every pattern ID is valid. - fn try_state( - &self, - sp: &Special, - id: StateID, - ) -> Result, DeserializeError> { - if id.as_usize() > self.sparse().len() { - return Err(DeserializeError::generic( - "invalid caller provided sparse state ID", - )); - } - let mut state = &self.sparse()[id.as_usize()..]; - // Encoding format starts with a u16 that stores the total number of - // transitions in this state. - let (mut ntrans, _) = - wire::try_read_u16_as_usize(state, "state transition length")?; - let is_match = ((1 << 15) & ntrans) != 0; - ntrans &= !(1 << 15); - state = &state[2..]; - if ntrans > 257 || ntrans == 0 { - return Err(DeserializeError::generic( - "invalid transition length", - )); - } - if is_match && !sp.is_match_state(id) { - return Err(DeserializeError::generic( - "state marked as match but not in match ID range", - )); - } else if !is_match && sp.is_match_state(id) { - return Err(DeserializeError::generic( - "state in match ID range but not marked as match state", - )); - } - - // Each transition has two pieces: an inclusive range of bytes on which - // it is defined, and the state ID that those bytes transition to. The - // pairs come first, followed by a corresponding sequence of state IDs. - let input_ranges_len = ntrans.checked_mul(2).unwrap(); - wire::check_slice_len(state, input_ranges_len, "sparse byte pairs")?; - let (input_ranges, state) = state.split_at(input_ranges_len); - // Every range should be of the form A-B, where A<=B. - for pair in input_ranges.chunks(2) { - let (start, end) = (pair[0], pair[1]); - if start > end { - return Err(DeserializeError::generic("invalid input range")); - } - } - - // And now extract the corresponding sequence of state IDs. We leave - // this sequence as a &[u8] instead of a &[S] because sparse DFAs do - // not have any alignment requirements. - let next_len = ntrans - .checked_mul(self.id_len()) - .expect("state size * #trans should always fit in a usize"); - wire::check_slice_len(state, next_len, "sparse trans state IDs")?; - let (next, state) = state.split_at(next_len); - // We can at least verify that every state ID is in bounds. - for idbytes in next.chunks(self.id_len()) { - let (id, _) = - wire::read_state_id(idbytes, "sparse state ID in try_state")?; - wire::check_slice_len( - self.sparse(), - id.as_usize(), - "invalid sparse state ID", - )?; - } - - // If this is a match state, then read the pattern IDs for this state. - // Pattern IDs is a u32-length prefixed sequence of native endian - // encoded 32-bit integers. - let (pattern_ids, state) = if is_match { - let (npats, nr) = - wire::try_read_u32_as_usize(state, "pattern ID length")?; - let state = &state[nr..]; - if npats == 0 { - return Err(DeserializeError::generic( - "state marked as a match, but pattern length is zero", - )); - } - - let pattern_ids_len = - wire::mul(npats, 4, "sparse pattern ID byte length")?; - wire::check_slice_len( - state, - pattern_ids_len, - "sparse pattern IDs", - )?; - let (pattern_ids, state) = state.split_at(pattern_ids_len); - for patbytes in pattern_ids.chunks(PatternID::SIZE) { - wire::read_pattern_id( - patbytes, - "sparse pattern ID in try_state", - )?; - } - (pattern_ids, state) - } else { - (&[][..], state) - }; - if is_match && pattern_ids.is_empty() { - return Err(DeserializeError::generic( - "state marked as a match, but has no pattern IDs", - )); - } - if sp.is_match_state(id) && pattern_ids.is_empty() { - return Err(DeserializeError::generic( - "state marked special as a match, but has no pattern IDs", - )); - } - if sp.is_match_state(id) != is_match { - return Err(DeserializeError::generic( - "whether state is a match or not is inconsistent", - )); - } - - // Now read this state's accelerator info. The first byte is the length - // of the accelerator, which is typically 0 (for no acceleration) but - // is no bigger than 3. The length indicates the number of bytes that - // follow, where each byte corresponds to a transition out of this - // state. - if state.is_empty() { - return Err(DeserializeError::generic("no accelerator length")); - } - let (accel_len, state) = (usize::from(state[0]), &state[1..]); - - if accel_len > 3 { - return Err(DeserializeError::generic( - "sparse invalid accelerator length", - )); - } else if accel_len == 0 && sp.is_accel_state(id) { - return Err(DeserializeError::generic( - "got no accelerators in state, but in accelerator ID range", - )); - } else if accel_len > 0 && !sp.is_accel_state(id) { - return Err(DeserializeError::generic( - "state in accelerator ID range, but has no accelerators", - )); - } - - wire::check_slice_len( - state, - accel_len, - "sparse corrupt accelerator length", - )?; - let (accel, _) = (&state[..accel_len], &state[accel_len..]); - - let state = State { - id, - is_match, - ntrans, - input_ranges, - next, - pattern_ids, - accel, - }; - if sp.is_quit_state(state.next_at(state.ntrans - 1)) { - return Err(DeserializeError::generic( - "state with EOI transition to quit state is illegal", - )); - } - Ok(state) - } - - /// Return an iterator over all of the states in this DFA. - /// - /// The iterator returned yields tuples, where the first element is the - /// state ID and the second element is the state itself. - fn states(&self) -> StateIter<'_, T> { - StateIter { trans: self, id: DEAD.as_usize() } - } - - /// Returns the sparse transitions as raw bytes. - fn sparse(&self) -> &[u8] { - self.sparse.as_ref() - } - - /// Returns the number of bytes represented by a single state ID. - fn id_len(&self) -> usize { - StateID::SIZE - } - - /// Return the memory usage, in bytes, of these transitions. - /// - /// This does not include the size of a `Transitions` value itself. - fn memory_usage(&self) -> usize { - self.sparse().len() - } -} - -#[cfg(feature = "dfa-build")] -impl> Transitions { - /// Return a convenient mutable representation of the given state. - /// This panics if the state is invalid. - fn state_mut(&mut self, id: StateID) -> StateMut<'_> { - let mut state = &mut self.sparse_mut()[id.as_usize()..]; - let mut ntrans = wire::read_u16(&state).as_usize(); - let is_match = (1 << 15) & ntrans != 0; - ntrans &= !(1 << 15); - state = &mut state[2..]; - - let (input_ranges, state) = state.split_at_mut(ntrans * 2); - let (next, state) = state.split_at_mut(ntrans * StateID::SIZE); - let (pattern_ids, state) = if is_match { - let npats = wire::read_u32(&state).as_usize(); - state[4..].split_at_mut(npats * 4) - } else { - (&mut [][..], state) - }; - - let accel_len = usize::from(state[0]); - let accel = &mut state[1..accel_len + 1]; - StateMut { - id, - is_match, - ntrans, - input_ranges, - next, - pattern_ids, - accel, - } - } - - /// Returns the sparse transitions as raw mutable bytes. - fn sparse_mut(&mut self) -> &mut [u8] { - self.sparse.as_mut() - } -} - -/// The set of all possible starting states in a DFA. -/// -/// See the eponymous type in the `dense` module for more details. This type -/// is very similar to `dense::StartTable`, except that its underlying -/// representation is `&[u8]` instead of `&[S]`. (The latter would require -/// sparse DFAs to be aligned, which is explicitly something we do not require -/// because we don't really need it.) -#[derive(Clone)] -struct StartTable { - /// The initial start state IDs as a contiguous table of native endian - /// encoded integers, represented by `S`. - /// - /// In practice, T is either Vec or &[u8] and has no alignment - /// requirements. - /// - /// The first `2 * stride` (currently always 8) entries always correspond - /// to the starts states for the entire DFA, with the first 4 entries being - /// for unanchored searches and the second 4 entries being for anchored - /// searches. To keep things simple, we always use 8 entries even if the - /// `StartKind` is not both. - /// - /// After that, there are `stride * patterns` state IDs, where `patterns` - /// may be zero in the case of a DFA with no patterns or in the case where - /// the DFA was built without enabling starting states for each pattern. - table: T, - /// The starting state configuration supported. When 'both', both - /// unanchored and anchored searches work. When 'unanchored', anchored - /// searches panic. When 'anchored', unanchored searches panic. - kind: StartKind, - /// The start state configuration for every possible byte. - start_map: StartByteMap, - /// The number of starting state IDs per pattern. - stride: usize, - /// The total number of patterns for which starting states are encoded. - /// This is `None` for DFAs that were built without start states for each - /// pattern. Thus, one cannot use this field to say how many patterns - /// are in the DFA in all cases. It is specific to how many patterns are - /// represented in this start table. - pattern_len: Option, - /// The universal starting state for unanchored searches. This is only - /// present when the DFA supports unanchored searches and when all starting - /// state IDs for an unanchored search are equivalent. - universal_start_unanchored: Option, - /// The universal starting state for anchored searches. This is only - /// present when the DFA supports anchored searches and when all starting - /// state IDs for an anchored search are equivalent. - universal_start_anchored: Option, -} - -#[cfg(feature = "dfa-build")] -impl StartTable> { - fn new>( - dfa: &dense::DFA, - pattern_len: Option, - ) -> StartTable> { - let stride = Start::len(); - // This is OK since the only way we're here is if a dense DFA could be - // constructed successfully, which uses the same space. - let len = stride - .checked_mul(pattern_len.unwrap_or(0)) - .unwrap() - .checked_add(stride.checked_mul(2).unwrap()) - .unwrap() - .checked_mul(StateID::SIZE) - .unwrap(); - StartTable { - table: vec![0; len], - kind: dfa.start_kind(), - start_map: dfa.start_map().clone(), - stride, - pattern_len, - universal_start_unanchored: dfa - .universal_start_state(Anchored::No), - universal_start_anchored: dfa.universal_start_state(Anchored::Yes), - } - } - - fn from_dense_dfa>( - dfa: &dense::DFA, - remap: &[StateID], - ) -> Result>, BuildError> { - // Unless the DFA has start states compiled for each pattern, then - // as far as the starting state table is concerned, there are zero - // patterns to account for. It will instead only store starting states - // for the entire DFA. - let start_pattern_len = if dfa.starts_for_each_pattern() { - Some(dfa.pattern_len()) - } else { - None - }; - let mut sl = StartTable::new(dfa, start_pattern_len); - for (old_start_id, anchored, sty) in dfa.starts() { - let new_start_id = remap[dfa.to_index(old_start_id)]; - sl.set_start(anchored, sty, new_start_id); - } - if let Some(ref mut id) = sl.universal_start_anchored { - *id = remap[dfa.to_index(*id)]; - } - if let Some(ref mut id) = sl.universal_start_unanchored { - *id = remap[dfa.to_index(*id)]; - } - Ok(sl) - } -} - -impl<'a> StartTable<&'a [u8]> { - unsafe fn from_bytes_unchecked( - mut slice: &'a [u8], - ) -> Result<(StartTable<&'a [u8]>, usize), DeserializeError> { - let slice_start = slice.as_ptr().as_usize(); - - let (kind, nr) = StartKind::from_bytes(slice)?; - slice = &slice[nr..]; - - let (start_map, nr) = StartByteMap::from_bytes(slice)?; - slice = &slice[nr..]; - - let (stride, nr) = - wire::try_read_u32_as_usize(slice, "sparse start table stride")?; - slice = &slice[nr..]; - if stride != Start::len() { - return Err(DeserializeError::generic( - "invalid sparse starting table stride", - )); - } - - let (maybe_pattern_len, nr) = - wire::try_read_u32_as_usize(slice, "sparse start table patterns")?; - slice = &slice[nr..]; - let pattern_len = if maybe_pattern_len.as_u32() == u32::MAX { - None - } else { - Some(maybe_pattern_len) - }; - if pattern_len.map_or(false, |len| len > PatternID::LIMIT) { - return Err(DeserializeError::generic( - "sparse invalid number of patterns", - )); - } - - let (universal_unanchored, nr) = - wire::try_read_u32(slice, "universal unanchored start")?; - slice = &slice[nr..]; - let universal_start_unanchored = if universal_unanchored == u32::MAX { - None - } else { - Some(StateID::try_from(universal_unanchored).map_err(|e| { - DeserializeError::state_id_error( - e, - "universal unanchored start", - ) - })?) - }; - - let (universal_anchored, nr) = - wire::try_read_u32(slice, "universal anchored start")?; - slice = &slice[nr..]; - let universal_start_anchored = if universal_anchored == u32::MAX { - None - } else { - Some(StateID::try_from(universal_anchored).map_err(|e| { - DeserializeError::state_id_error(e, "universal anchored start") - })?) - }; - - let pattern_table_size = wire::mul( - stride, - pattern_len.unwrap_or(0), - "sparse invalid pattern length", - )?; - // Our start states always start with a single stride of start states - // for the entire automaton which permit it to match any pattern. What - // follows it are an optional set of start states for each pattern. - let start_state_len = wire::add( - wire::mul(2, stride, "start state stride too big")?, - pattern_table_size, - "sparse invalid 'any' pattern starts size", - )?; - let table_bytes_len = wire::mul( - start_state_len, - StateID::SIZE, - "sparse pattern table bytes length", - )?; - wire::check_slice_len( - slice, - table_bytes_len, - "sparse start ID table", - )?; - let table = &slice[..table_bytes_len]; - slice = &slice[table_bytes_len..]; - - let sl = StartTable { - table, - kind, - start_map, - stride, - pattern_len, - universal_start_unanchored, - universal_start_anchored, - }; - Ok((sl, slice.as_ptr().as_usize() - slice_start)) - } -} - -impl> StartTable { - fn write_to( - &self, - mut dst: &mut [u8], - ) -> Result { - let nwrite = self.write_to_len(); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small( - "sparse starting table ids", - )); - } - dst = &mut dst[..nwrite]; - - // write start kind - let nw = self.kind.write_to::(dst)?; - dst = &mut dst[nw..]; - // write start byte map - let nw = self.start_map.write_to(dst)?; - dst = &mut dst[nw..]; - // write stride - E::write_u32(u32::try_from(self.stride).unwrap(), dst); - dst = &mut dst[size_of::()..]; - // write pattern length - E::write_u32( - u32::try_from(self.pattern_len.unwrap_or(0xFFFF_FFFF)).unwrap(), - dst, - ); - dst = &mut dst[size_of::()..]; - // write universal start unanchored state id, u32::MAX if absent - E::write_u32( - self.universal_start_unanchored - .map_or(u32::MAX, |sid| sid.as_u32()), - dst, - ); - dst = &mut dst[size_of::()..]; - // write universal start anchored state id, u32::MAX if absent - E::write_u32( - self.universal_start_anchored.map_or(u32::MAX, |sid| sid.as_u32()), - dst, - ); - dst = &mut dst[size_of::()..]; - // write start IDs - for (sid, _, _) in self.iter() { - E::write_u32(sid.as_u32(), dst); - dst = &mut dst[StateID::SIZE..]; - } - Ok(nwrite) - } - - /// Returns the number of bytes the serialized form of this transition - /// table will use. - fn write_to_len(&self) -> usize { - self.kind.write_to_len() - + self.start_map.write_to_len() - + size_of::() // stride - + size_of::() // # patterns - + size_of::() // universal unanchored start - + size_of::() // universal anchored start - + self.table().len() - } - - /// Validates that every starting state ID in this table is valid. - /// - /// That is, every starting state ID can be used to correctly decode a - /// state in the DFA's sparse transitions. - fn validate( - &self, - sp: &Special, - seen: &Seen, - ) -> Result<(), DeserializeError> { - for (id, _, _) in self.iter() { - if !seen.contains(&id) { - return Err(DeserializeError::generic( - "found invalid start state ID", - )); - } - if sp.is_match_state(id) { - return Err(DeserializeError::generic( - "start states cannot be match states", - )); - } - } - Ok(()) - } - - /// Converts this start list to a borrowed value. - fn as_ref(&self) -> StartTable<&'_ [u8]> { - StartTable { - table: self.table(), - kind: self.kind, - start_map: self.start_map.clone(), - stride: self.stride, - pattern_len: self.pattern_len, - universal_start_unanchored: self.universal_start_unanchored, - universal_start_anchored: self.universal_start_anchored, - } - } - - /// Converts this start list to an owned value. - #[cfg(feature = "alloc")] - fn to_owned(&self) -> StartTable> { - StartTable { - table: self.table().to_vec(), - kind: self.kind, - start_map: self.start_map.clone(), - stride: self.stride, - pattern_len: self.pattern_len, - universal_start_unanchored: self.universal_start_unanchored, - universal_start_anchored: self.universal_start_anchored, - } - } - - /// Return the start state for the given index and pattern ID. If the - /// pattern ID is None, then the corresponding start state for the entire - /// DFA is returned. If the pattern ID is not None, then the corresponding - /// starting state for the given pattern is returned. If this start table - /// does not have individual starting states for each pattern, then this - /// panics. - fn start( - &self, - anchored: Anchored, - start: Start, - ) -> Result { - let start_index = start.as_usize(); - let index = match anchored { - Anchored::No => { - if !self.kind.has_unanchored() { - return Err(StartError::unsupported_anchored(anchored)); - } - start_index - } - Anchored::Yes => { - if !self.kind.has_anchored() { - return Err(StartError::unsupported_anchored(anchored)); - } - self.stride + start_index - } - Anchored::Pattern(pid) => { - let len = match self.pattern_len { - None => { - return Err(StartError::unsupported_anchored(anchored)) - } - Some(len) => len, - }; - if pid.as_usize() >= len { - return Ok(DEAD); - } - (2 * self.stride) - + (self.stride * pid.as_usize()) - + start_index - } - }; - let start = index * StateID::SIZE; - // This OK since we're allowed to assume that the start table contains - // valid StateIDs. - Ok(wire::read_state_id_unchecked(&self.table()[start..]).0) - } - - /// Return an iterator over all start IDs in this table. - fn iter(&self) -> StartStateIter<'_, T> { - StartStateIter { st: self, i: 0 } - } - - /// Returns the total number of start state IDs in this table. - fn len(&self) -> usize { - self.table().len() / StateID::SIZE - } - - /// Returns the table as a raw slice of bytes. - fn table(&self) -> &[u8] { - self.table.as_ref() - } - - /// Return the memory usage, in bytes, of this start list. - /// - /// This does not include the size of a `StartTable` value itself. - fn memory_usage(&self) -> usize { - self.table().len() - } -} - -#[cfg(feature = "dfa-build")] -impl> StartTable { - /// Set the start state for the given index and pattern. - /// - /// If the pattern ID or state ID are not valid, then this will panic. - fn set_start(&mut self, anchored: Anchored, start: Start, id: StateID) { - let start_index = start.as_usize(); - let index = match anchored { - Anchored::No => start_index, - Anchored::Yes => self.stride + start_index, - Anchored::Pattern(pid) => { - let pid = pid.as_usize(); - let len = self - .pattern_len - .expect("start states for each pattern enabled"); - assert!(pid < len, "invalid pattern ID {pid:?}"); - self.stride - .checked_mul(pid) - .unwrap() - .checked_add(self.stride.checked_mul(2).unwrap()) - .unwrap() - .checked_add(start_index) - .unwrap() - } - }; - let start = index * StateID::SIZE; - let end = start + StateID::SIZE; - wire::write_state_id::( - id, - &mut self.table.as_mut()[start..end], - ); - } -} - -/// An iterator over all state state IDs in a sparse DFA. -struct StartStateIter<'a, T> { - st: &'a StartTable, - i: usize, -} - -impl<'a, T: AsRef<[u8]>> Iterator for StartStateIter<'a, T> { - type Item = (StateID, Anchored, Start); - - fn next(&mut self) -> Option<(StateID, Anchored, Start)> { - let i = self.i; - if i >= self.st.len() { - return None; - } - self.i += 1; - - // This unwrap is okay since the stride of any DFA must always match - // the number of start state types. - let start_type = Start::from_usize(i % self.st.stride).unwrap(); - let anchored = if i < self.st.stride { - Anchored::No - } else if i < (2 * self.st.stride) { - Anchored::Yes - } else { - let pid = (i - (2 * self.st.stride)) / self.st.stride; - Anchored::Pattern(PatternID::new(pid).unwrap()) - }; - let start = i * StateID::SIZE; - let end = start + StateID::SIZE; - let bytes = self.st.table()[start..end].try_into().unwrap(); - // This is OK since we're allowed to assume that any IDs in this start - // table are correct and valid for this DFA. - let id = StateID::from_ne_bytes_unchecked(bytes); - Some((id, anchored, start_type)) - } -} - -impl<'a, T> fmt::Debug for StartStateIter<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("StartStateIter").field("i", &self.i).finish() - } -} - -/// An iterator over all states in a sparse DFA. -/// -/// This iterator yields tuples, where the first element is the state ID and -/// the second element is the state itself. -struct StateIter<'a, T> { - trans: &'a Transitions, - id: usize, -} - -impl<'a, T: AsRef<[u8]>> Iterator for StateIter<'a, T> { - type Item = State<'a>; - - fn next(&mut self) -> Option> { - if self.id >= self.trans.sparse().len() { - return None; - } - let state = self.trans.state(StateID::new_unchecked(self.id)); - self.id = self.id + state.write_to_len(); - Some(state) - } -} - -impl<'a, T> fmt::Debug for StateIter<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("StateIter").field("id", &self.id).finish() - } -} - -/// A representation of a sparse DFA state that can be cheaply materialized -/// from a state identifier. -#[derive(Clone)] -struct State<'a> { - /// The identifier of this state. - id: StateID, - /// Whether this is a match state or not. - is_match: bool, - /// The number of transitions in this state. - ntrans: usize, - /// Pairs of input ranges, where there is one pair for each transition. - /// Each pair specifies an inclusive start and end byte range for the - /// corresponding transition. - input_ranges: &'a [u8], - /// Transitions to the next state. This slice contains native endian - /// encoded state identifiers, with `S` as the representation. Thus, there - /// are `ntrans * size_of::()` bytes in this slice. - next: &'a [u8], - /// If this is a match state, then this contains the pattern IDs that match - /// when the DFA is in this state. - /// - /// This is a contiguous sequence of 32-bit native endian encoded integers. - pattern_ids: &'a [u8], - /// An accelerator for this state, if present. If this state has no - /// accelerator, then this is an empty slice. When non-empty, this slice - /// has length at most 3 and corresponds to the exhaustive set of bytes - /// that must be seen in order to transition out of this state. - accel: &'a [u8], -} - -impl<'a> State<'a> { - /// Searches for the next transition given an input byte. If no such - /// transition could be found, then a dead state is returned. - /// - /// This is marked as inline to help dramatically boost sparse searching, - /// which decodes each state it enters to follow the next transition. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn next(&self, input: u8) -> StateID { - // This straight linear search was observed to be much better than - // binary search on ASCII haystacks, likely because a binary search - // visits the ASCII case last but a linear search sees it first. A - // binary search does do a little better on non-ASCII haystacks, but - // not by much. There might be a better trade off lurking here. - for i in 0..(self.ntrans - 1) { - let (start, end) = self.range(i); - if start <= input && input <= end { - return self.next_at(i); - } - // We could bail early with an extra branch: if input < b1, then - // we know we'll never find a matching transition. Interestingly, - // this extra branch seems to not help performance, or will even - // hurt it. It's likely very dependent on the DFA itself and what - // is being searched. - } - DEAD - } - - /// Returns the next state ID for the special EOI transition. - fn next_eoi(&self) -> StateID { - self.next_at(self.ntrans - 1) - } - - /// Returns the identifier for this state. - fn id(&self) -> StateID { - self.id - } - - /// Returns the inclusive input byte range for the ith transition in this - /// state. - fn range(&self, i: usize) -> (u8, u8) { - (self.input_ranges[i * 2], self.input_ranges[i * 2 + 1]) - } - - /// Returns the next state for the ith transition in this state. - fn next_at(&self, i: usize) -> StateID { - let start = i * StateID::SIZE; - let end = start + StateID::SIZE; - let bytes = self.next[start..end].try_into().unwrap(); - StateID::from_ne_bytes_unchecked(bytes) - } - - /// Returns the pattern ID for the given match index. If the match index - /// is invalid, then this panics. - fn pattern_id(&self, match_index: usize) -> PatternID { - let start = match_index * PatternID::SIZE; - wire::read_pattern_id_unchecked(&self.pattern_ids[start..]).0 - } - - /// Returns the total number of pattern IDs for this state. This is always - /// zero when `is_match` is false. - fn pattern_len(&self) -> usize { - assert_eq!(0, self.pattern_ids.len() % 4); - self.pattern_ids.len() / 4 - } - - /// Return an accelerator for this state. - fn accelerator(&self) -> &'a [u8] { - self.accel - } - - /// Write the raw representation of this state to the given buffer using - /// the given endianness. - fn write_to( - &self, - mut dst: &mut [u8], - ) -> Result { - let nwrite = self.write_to_len(); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small( - "sparse state transitions", - )); - } - - let ntrans = - if self.is_match { self.ntrans | (1 << 15) } else { self.ntrans }; - E::write_u16(u16::try_from(ntrans).unwrap(), dst); - dst = &mut dst[size_of::()..]; - - dst[..self.input_ranges.len()].copy_from_slice(self.input_ranges); - dst = &mut dst[self.input_ranges.len()..]; - - for i in 0..self.ntrans { - E::write_u32(self.next_at(i).as_u32(), dst); - dst = &mut dst[StateID::SIZE..]; - } - - if self.is_match { - E::write_u32(u32::try_from(self.pattern_len()).unwrap(), dst); - dst = &mut dst[size_of::()..]; - for i in 0..self.pattern_len() { - let pid = self.pattern_id(i); - E::write_u32(pid.as_u32(), dst); - dst = &mut dst[PatternID::SIZE..]; - } - } - - dst[0] = u8::try_from(self.accel.len()).unwrap(); - dst[1..][..self.accel.len()].copy_from_slice(self.accel); - - Ok(nwrite) - } - - /// Return the total number of bytes that this state consumes in its - /// encoded form. - fn write_to_len(&self) -> usize { - let mut len = 2 - + (self.ntrans * 2) - + (self.ntrans * StateID::SIZE) - + (1 + self.accel.len()); - if self.is_match { - len += size_of::() + self.pattern_ids.len(); - } - len - } -} - -impl<'a> fmt::Debug for State<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut printed = false; - for i in 0..(self.ntrans - 1) { - let next = self.next_at(i); - if next == DEAD { - continue; - } - - if printed { - write!(f, ", ")?; - } - let (start, end) = self.range(i); - if start == end { - write!(f, "{:?} => {:?}", DebugByte(start), next.as_usize())?; - } else { - write!( - f, - "{:?}-{:?} => {:?}", - DebugByte(start), - DebugByte(end), - next.as_usize(), - )?; - } - printed = true; - } - let eoi = self.next_at(self.ntrans - 1); - if eoi != DEAD { - if printed { - write!(f, ", ")?; - } - write!(f, "EOI => {:?}", eoi.as_usize())?; - } - Ok(()) - } -} - -/// A representation of a mutable sparse DFA state that can be cheaply -/// materialized from a state identifier. -#[cfg(feature = "dfa-build")] -struct StateMut<'a> { - /// The identifier of this state. - id: StateID, - /// Whether this is a match state or not. - is_match: bool, - /// The number of transitions in this state. - ntrans: usize, - /// Pairs of input ranges, where there is one pair for each transition. - /// Each pair specifies an inclusive start and end byte range for the - /// corresponding transition. - input_ranges: &'a mut [u8], - /// Transitions to the next state. This slice contains native endian - /// encoded state identifiers, with `S` as the representation. Thus, there - /// are `ntrans * size_of::()` bytes in this slice. - next: &'a mut [u8], - /// If this is a match state, then this contains the pattern IDs that match - /// when the DFA is in this state. - /// - /// This is a contiguous sequence of 32-bit native endian encoded integers. - pattern_ids: &'a [u8], - /// An accelerator for this state, if present. If this state has no - /// accelerator, then this is an empty slice. When non-empty, this slice - /// has length at most 3 and corresponds to the exhaustive set of bytes - /// that must be seen in order to transition out of this state. - accel: &'a mut [u8], -} - -#[cfg(feature = "dfa-build")] -impl<'a> StateMut<'a> { - /// Sets the ith transition to the given state. - fn set_next_at(&mut self, i: usize, next: StateID) { - let start = i * StateID::SIZE; - let end = start + StateID::SIZE; - wire::write_state_id::(next, &mut self.next[start..end]); - } -} - -#[cfg(feature = "dfa-build")] -impl<'a> fmt::Debug for StateMut<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let state = State { - id: self.id, - is_match: self.is_match, - ntrans: self.ntrans, - input_ranges: self.input_ranges, - next: self.next, - pattern_ids: self.pattern_ids, - accel: self.accel, - }; - fmt::Debug::fmt(&state, f) - } -} - -// In order to validate everything, we not only need to make sure we -// can decode every state, but that every transition in every state -// points to a valid state. There are many duplicative transitions, so -// we record state IDs that we've verified so that we don't redo the -// decoding work. -// -// Except, when in no_std mode, we don't have dynamic memory allocation -// available to us, so we skip this optimization. It's not clear -// whether doing something more clever is worth it just yet. If you're -// profiling this code and need it to run faster, please file an issue. -// -// OK, so we also use this to record the set of valid state IDs. Since -// it is possible for a transition to point to an invalid state ID that -// still (somehow) deserializes to a valid state. So we need to make -// sure our transitions are limited to actually correct state IDs. -// The problem is, I'm not sure how to do this verification step in -// no-std no-alloc mode. I think we'd *have* to store the set of valid -// state IDs in the DFA itself. For now, we don't do this verification -// in no-std no-alloc mode. The worst thing that can happen is an -// incorrect result. But no panics or memory safety problems should -// result. Because we still do validate that the state itself is -// "valid" in the sense that everything it points to actually exists. -// -// ---AG -#[derive(Debug)] -struct Seen { - #[cfg(feature = "alloc")] - set: alloc::collections::BTreeSet, - #[cfg(not(feature = "alloc"))] - set: core::marker::PhantomData, -} - -#[cfg(feature = "alloc")] -impl Seen { - fn new() -> Seen { - Seen { set: alloc::collections::BTreeSet::new() } - } - fn insert(&mut self, id: StateID) { - self.set.insert(id); - } - fn contains(&self, id: &StateID) -> bool { - self.set.contains(id) - } -} - -#[cfg(not(feature = "alloc"))] -impl Seen { - fn new() -> Seen { - Seen { set: core::marker::PhantomData } - } - fn insert(&mut self, _id: StateID) {} - fn contains(&self, _id: &StateID) -> bool { - true - } -} - -/* -/// A binary search routine specialized specifically to a sparse DFA state's -/// transitions. Specifically, the transitions are defined as a set of pairs -/// of input bytes that delineate an inclusive range of bytes. If the input -/// byte is in the range, then the corresponding transition is a match. -/// -/// This binary search accepts a slice of these pairs and returns the position -/// of the matching pair (the ith transition), or None if no matching pair -/// could be found. -/// -/// Note that this routine is not currently used since it was observed to -/// either decrease performance when searching ASCII, or did not provide enough -/// of a boost on non-ASCII haystacks to be worth it. However, we leave it here -/// for posterity in case we can find a way to use it. -/// -/// In theory, we could use the standard library's search routine if we could -/// cast a `&[u8]` to a `&[(u8, u8)]`, but I don't believe this is currently -/// guaranteed to be safe and is thus UB (since I don't think the in-memory -/// representation of `(u8, u8)` has been nailed down). One could define a -/// repr(C) type, but the casting doesn't seem justified. -#[cfg_attr(feature = "perf-inline", inline(always))] -fn binary_search_ranges(ranges: &[u8], needle: u8) -> Option { - debug_assert!(ranges.len() % 2 == 0, "ranges must have even length"); - debug_assert!(ranges.len() <= 512, "ranges should be short"); - - let (mut left, mut right) = (0, ranges.len() / 2); - while left < right { - let mid = (left + right) / 2; - let (b1, b2) = (ranges[mid * 2], ranges[mid * 2 + 1]); - if needle < b1 { - right = mid; - } else if needle > b2 { - left = mid + 1; - } else { - return Some(mid); - } - } - None -} -*/ - -#[cfg(all(test, feature = "syntax", feature = "dfa-build"))] -mod tests { - use crate::{ - dfa::{dense::DFA, Automaton}, - nfa::thompson, - Input, MatchError, - }; - - // See the analogous test in src/hybrid/dfa.rs and src/dfa/dense.rs. - #[test] - fn heuristic_unicode_forward() { - let dfa = DFA::builder() - .configure(DFA::config().unicode_word_boundary(true)) - .thompson(thompson::Config::new().reverse(true)) - .build(r"\b[0-9]+\b") - .unwrap() - .to_sparse() - .unwrap(); - - let input = Input::new("β123").range(2..); - let expected = MatchError::quit(0xB2, 1); - let got = dfa.try_search_fwd(&input); - assert_eq!(Err(expected), got); - - let input = Input::new("123β").range(..3); - let expected = MatchError::quit(0xCE, 3); - let got = dfa.try_search_fwd(&input); - assert_eq!(Err(expected), got); - } - - // See the analogous test in src/hybrid/dfa.rs and src/dfa/dense.rs. - #[test] - fn heuristic_unicode_reverse() { - let dfa = DFA::builder() - .configure(DFA::config().unicode_word_boundary(true)) - .thompson(thompson::Config::new().reverse(true)) - .build(r"\b[0-9]+\b") - .unwrap() - .to_sparse() - .unwrap(); - - let input = Input::new("β123").range(2..); - let expected = MatchError::quit(0xB2, 1); - let got = dfa.try_search_rev(&input); - assert_eq!(Err(expected), got); - - let input = Input::new("123β").range(..3); - let expected = MatchError::quit(0xCE, 3); - let got = dfa.try_search_rev(&input); - assert_eq!(Err(expected), got); - } -} diff --git a/vendor/regex-automata/src/dfa/special.rs b/vendor/regex-automata/src/dfa/special.rs deleted file mode 100644 index 197323116fb695..00000000000000 --- a/vendor/regex-automata/src/dfa/special.rs +++ /dev/null @@ -1,494 +0,0 @@ -use crate::{ - dfa::DEAD, - util::{ - primitives::StateID, - wire::{self, DeserializeError, Endian, SerializeError}, - }, -}; - -macro_rules! err { - ($msg:expr) => { - return Err(DeserializeError::generic($msg)); - }; -} - -// Special represents the identifiers in a DFA that correspond to "special" -// states. If a state is one or more of the following, then it is considered -// special: -// -// * dead - A non-matching state where all outgoing transitions lead back to -// itself. There is only one of these, regardless of whether minimization -// has run. The dead state always has an ID of 0. i.e., It is always the -// first state in a DFA. -// * quit - A state that is entered whenever a byte is seen that should cause -// a DFA to give up and stop searching. This results in a MatchError::quit -// error being returned at search time. The default configuration for a DFA -// has no quit bytes, which means this state is unreachable by default, -// although it is always present for reasons of implementation simplicity. -// This state is only reachable when the caller configures the DFA to quit -// on certain bytes. There is always exactly one of these states and it -// is always the second state. (Its actual ID depends on the size of the -// alphabet in dense DFAs, since state IDs are premultiplied in order to -// allow them to be used directly as indices into the transition table.) -// * match - An accepting state, i.e., indicative of a match. There may be -// zero or more of these states. -// * accelerated - A state where all of its outgoing transitions, except a -// few, loop back to itself. These states are candidates for acceleration -// via memchr during search. There may be zero or more of these states. -// * start - A non-matching state that indicates where the automaton should -// start during a search. There is always at least one starting state and -// all are guaranteed to be non-match states. (A start state cannot be a -// match state because the DFAs in this crate delay all matches by one byte. -// So every search that finds a match must move through one transition to -// some other match state, even when searching an empty string.) -// -// These are not mutually exclusive categories. Namely, the following -// overlapping can occur: -// -// * {dead, start} - If a DFA can never lead to a match and it is minimized, -// then it will typically compile to something where all starting IDs point -// to the DFA's dead state. -// * {match, accelerated} - It is possible for a match state to have the -// majority of its transitions loop back to itself, which means it's -// possible for a match state to be accelerated. -// * {start, accelerated} - Similarly, it is possible for a start state to be -// accelerated. Note that it is possible for an accelerated state to be -// neither a match or a start state. Also note that just because both match -// and start states overlap with accelerated states does not mean that -// match and start states overlap with each other. In fact, they are -// guaranteed not to overlap. -// -// As a special mention, every DFA always has a dead and a quit state, even -// though from the perspective of the DFA, they are equivalent. (Indeed, -// minimization special cases them to ensure they don't get merged.) The -// purpose of keeping them distinct is to use the quit state as a sentinel to -// distinguish between whether a search finished successfully without finding -// anything or whether it gave up before finishing. -// -// So the main problem we want to solve here is the *fast* detection of whether -// a state is special or not. And we also want to do this while storing as -// little extra data as possible. AND we want to be able to quickly determine -// which categories a state falls into above if it is special. -// -// We achieve this by essentially shuffling all special states to the beginning -// of a DFA. That is, all special states appear before every other non-special -// state. By representing special states this way, we can determine whether a -// state is special or not by a single comparison, where special.max is the -// identifier of the last special state in the DFA: -// -// if current_state <= special.max: -// ... do something with special state -// -// The only thing left to do is to determine what kind of special state -// it is. Because what we do next depends on that. Since special states -// are typically rare, we can afford to do a bit more extra work, but we'd -// still like this to be as fast as possible. The trick we employ here is to -// continue shuffling states even within the special state range. Such that -// one contiguous region corresponds to match states, another for start states -// and then an overlapping range for accelerated states. At a high level, our -// special state detection might look like this (for leftmost searching, where -// we continue searching even after seeing a match): -// -// byte = input[offset] -// current_state = next_state(current_state, byte) -// offset += 1 -// if current_state <= special.max: -// if current_state == 0: -// # We can never leave a dead state, so this always marks the -// # end of our search. -// return last_match -// if current_state == special.quit_id: -// # A quit state means we give up. If he DFA has no quit state, -// # then special.quit_id == 0 == dead, which is handled by the -// # conditional above. -// return Err(MatchError::quit { byte, offset: offset - 1 }) -// if special.min_match <= current_state <= special.max_match: -// last_match = Some(offset) -// if special.min_accel <= current_state <= special.max_accel: -// offset = accelerate(input, offset) -// last_match = Some(offset) -// elif special.min_start <= current_state <= special.max_start: -// offset = prefilter.find(input, offset) -// if special.min_accel <= current_state <= special.max_accel: -// offset = accelerate(input, offset) -// elif special.min_accel <= current_state <= special.max_accel: -// offset = accelerate(input, offset) -// -// There are some small details left out of the logic above. For example, -// in order to accelerate a state, we need to know which bytes to search for. -// This in turn implies some extra data we need to store in the DFA. To keep -// things compact, we would ideally only store -// -// N = special.max_accel - special.min_accel + 1 -// -// items. But state IDs are premultiplied, which means they are not contiguous. -// So in order to take a state ID and index an array of accelerated structures, -// we need to do: -// -// i = (state_id - special.min_accel) / stride -// -// (N.B. 'stride' is always a power of 2, so the above can be implemented via -// '(state_id - special.min_accel) >> stride2', where 'stride2' is x in -// 2^x=stride.) -// -// Moreover, some of these specialty categories may be empty. For example, -// DFAs are not required to have any match states or any accelerated states. -// In that case, the lower and upper bounds are both set to 0 (the dead state -// ID) and the first `current_state == 0` check subsumes cases where the -// ranges are empty. -// -// Loop unrolling, if applicable, has also been left out of the logic above. -// -// Graphically, the ranges look like this, where asterisks indicate ranges -// that can be empty. Each 'x' is a state. -// -// quit -// dead| -// || -// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx -// | | | | start | | -// | |-------------| |-------| | -// | match* | | | | -// | | | | | -// | |----------| | | -// | accel* | | -// | | | -// | | | -// |----------------------------|------------------------ -// special non-special* -#[derive(Clone, Copy, Debug)] -pub(crate) struct Special { - /// The identifier of the last special state in a DFA. A state is special - /// if and only if its identifier is less than or equal to `max`. - pub(crate) max: StateID, - /// The identifier of the quit state in a DFA. (There is no analogous field - /// for the dead state since the dead state's ID is always zero, regardless - /// of state ID size.) - pub(crate) quit_id: StateID, - /// The identifier of the first match state. - pub(crate) min_match: StateID, - /// The identifier of the last match state. - pub(crate) max_match: StateID, - /// The identifier of the first accelerated state. - pub(crate) min_accel: StateID, - /// The identifier of the last accelerated state. - pub(crate) max_accel: StateID, - /// The identifier of the first start state. - pub(crate) min_start: StateID, - /// The identifier of the last start state. - pub(crate) max_start: StateID, -} - -impl Special { - /// Creates a new set of special ranges for a DFA. All ranges are initially - /// set to only contain the dead state. This is interpreted as an empty - /// range. - #[cfg(feature = "dfa-build")] - pub(crate) fn new() -> Special { - Special { - max: DEAD, - quit_id: DEAD, - min_match: DEAD, - max_match: DEAD, - min_accel: DEAD, - max_accel: DEAD, - min_start: DEAD, - max_start: DEAD, - } - } - - /// Remaps all of the special state identifiers using the function given. - #[cfg(feature = "dfa-build")] - pub(crate) fn remap(&self, map: impl Fn(StateID) -> StateID) -> Special { - Special { - max: map(self.max), - quit_id: map(self.quit_id), - min_match: map(self.min_match), - max_match: map(self.max_match), - min_accel: map(self.min_accel), - max_accel: map(self.max_accel), - min_start: map(self.min_start), - max_start: map(self.max_start), - } - } - - /// Deserialize the given bytes into special state ranges. If the slice - /// given is not big enough, then this returns an error. Similarly, if - /// any of the expected invariants around special state ranges aren't - /// upheld, an error is returned. Note that this does not guarantee that - /// the information returned is correct. - /// - /// Upon success, this returns the number of bytes read in addition to the - /// special state IDs themselves. - pub(crate) fn from_bytes( - mut slice: &[u8], - ) -> Result<(Special, usize), DeserializeError> { - wire::check_slice_len(slice, 8 * StateID::SIZE, "special states")?; - - let mut nread = 0; - let mut read_id = |what| -> Result { - let (id, nr) = wire::try_read_state_id(slice, what)?; - nread += nr; - slice = &slice[StateID::SIZE..]; - Ok(id) - }; - - let max = read_id("special max id")?; - let quit_id = read_id("special quit id")?; - let min_match = read_id("special min match id")?; - let max_match = read_id("special max match id")?; - let min_accel = read_id("special min accel id")?; - let max_accel = read_id("special max accel id")?; - let min_start = read_id("special min start id")?; - let max_start = read_id("special max start id")?; - - let special = Special { - max, - quit_id, - min_match, - max_match, - min_accel, - max_accel, - min_start, - max_start, - }; - special.validate()?; - assert_eq!(nread, special.write_to_len()); - Ok((special, nread)) - } - - /// Validate that the information describing special states satisfies - /// all known invariants. - pub(crate) fn validate(&self) -> Result<(), DeserializeError> { - // Check that both ends of the range are DEAD or neither are. - if self.min_match == DEAD && self.max_match != DEAD { - err!("min_match is DEAD, but max_match is not"); - } - if self.min_match != DEAD && self.max_match == DEAD { - err!("max_match is DEAD, but min_match is not"); - } - if self.min_accel == DEAD && self.max_accel != DEAD { - err!("min_accel is DEAD, but max_accel is not"); - } - if self.min_accel != DEAD && self.max_accel == DEAD { - err!("max_accel is DEAD, but min_accel is not"); - } - if self.min_start == DEAD && self.max_start != DEAD { - err!("min_start is DEAD, but max_start is not"); - } - if self.min_start != DEAD && self.max_start == DEAD { - err!("max_start is DEAD, but min_start is not"); - } - - // Check that ranges are well formed. - if self.min_match > self.max_match { - err!("min_match should not be greater than max_match"); - } - if self.min_accel > self.max_accel { - err!("min_accel should not be greater than max_accel"); - } - if self.min_start > self.max_start { - err!("min_start should not be greater than max_start"); - } - - // Check that ranges are ordered with respect to one another. - if self.matches() && self.quit_id >= self.min_match { - err!("quit_id should not be greater than min_match"); - } - if self.accels() && self.quit_id >= self.min_accel { - err!("quit_id should not be greater than min_accel"); - } - if self.starts() && self.quit_id >= self.min_start { - err!("quit_id should not be greater than min_start"); - } - if self.matches() && self.accels() && self.min_accel < self.min_match { - err!("min_match should not be greater than min_accel"); - } - if self.matches() && self.starts() && self.min_start < self.min_match { - err!("min_match should not be greater than min_start"); - } - if self.accels() && self.starts() && self.min_start < self.min_accel { - err!("min_accel should not be greater than min_start"); - } - - // Check that max is at least as big as everything else. - if self.max < self.quit_id { - err!("quit_id should not be greater than max"); - } - if self.max < self.max_match { - err!("max_match should not be greater than max"); - } - if self.max < self.max_accel { - err!("max_accel should not be greater than max"); - } - if self.max < self.max_start { - err!("max_start should not be greater than max"); - } - - Ok(()) - } - - /// Validate that the special state information is compatible with the - /// given state len. - pub(crate) fn validate_state_len( - &self, - len: usize, - stride2: usize, - ) -> Result<(), DeserializeError> { - // We assume that 'validate' has already passed, so we know that 'max' - // is truly the max. So all we need to check is that the max state ID - // is less than the state ID len. The max legal value here is len-1, - // which occurs when there are no non-special states. - if (self.max.as_usize() >> stride2) >= len { - err!("max should not be greater than or equal to state length"); - } - Ok(()) - } - - /// Write the IDs and ranges for special states to the given byte buffer. - /// The buffer given must have enough room to store all data, otherwise - /// this will return an error. The number of bytes written is returned - /// on success. The number of bytes written is guaranteed to be a multiple - /// of 8. - pub(crate) fn write_to( - &self, - dst: &mut [u8], - ) -> Result { - use crate::util::wire::write_state_id as write; - - if dst.len() < self.write_to_len() { - return Err(SerializeError::buffer_too_small("special state ids")); - } - - let mut nwrite = 0; - nwrite += write::(self.max, &mut dst[nwrite..]); - nwrite += write::(self.quit_id, &mut dst[nwrite..]); - nwrite += write::(self.min_match, &mut dst[nwrite..]); - nwrite += write::(self.max_match, &mut dst[nwrite..]); - nwrite += write::(self.min_accel, &mut dst[nwrite..]); - nwrite += write::(self.max_accel, &mut dst[nwrite..]); - nwrite += write::(self.min_start, &mut dst[nwrite..]); - nwrite += write::(self.max_start, &mut dst[nwrite..]); - - assert_eq!( - self.write_to_len(), - nwrite, - "expected to write certain number of bytes", - ); - assert_eq!( - nwrite % 8, - 0, - "expected to write multiple of 8 bytes for special states", - ); - Ok(nwrite) - } - - /// Returns the total number of bytes written by `write_to`. - pub(crate) fn write_to_len(&self) -> usize { - 8 * StateID::SIZE - } - - /// Sets the maximum special state ID based on the current values. This - /// should be used once all possible state IDs are set. - #[cfg(feature = "dfa-build")] - pub(crate) fn set_max(&mut self) { - use core::cmp::max; - self.max = max( - self.quit_id, - max(self.max_match, max(self.max_accel, self.max_start)), - ); - } - - /// Sets the maximum special state ID such that starting states are not - /// considered "special." This also marks the min/max starting states as - /// DEAD such that 'is_start_state' always returns false, even if the state - /// is actually a starting state. - /// - /// This is useful when there is no prefilter set. It will avoid - /// ping-ponging between the hot path in the DFA search code and the start - /// state handling code, which is typically only useful for executing a - /// prefilter. - #[cfg(feature = "dfa-build")] - pub(crate) fn set_no_special_start_states(&mut self) { - use core::cmp::max; - self.max = max(self.quit_id, max(self.max_match, self.max_accel)); - self.min_start = DEAD; - self.max_start = DEAD; - } - - /// Returns true if and only if the given state ID is a special state. - #[inline] - pub(crate) fn is_special_state(&self, id: StateID) -> bool { - id <= self.max - } - - /// Returns true if and only if the given state ID is a dead state. - #[inline] - pub(crate) fn is_dead_state(&self, id: StateID) -> bool { - id == DEAD - } - - /// Returns true if and only if the given state ID is a quit state. - #[inline] - pub(crate) fn is_quit_state(&self, id: StateID) -> bool { - !self.is_dead_state(id) && self.quit_id == id - } - - /// Returns true if and only if the given state ID is a match state. - #[inline] - pub(crate) fn is_match_state(&self, id: StateID) -> bool { - !self.is_dead_state(id) && self.min_match <= id && id <= self.max_match - } - - /// Returns true if and only if the given state ID is an accel state. - #[inline] - pub(crate) fn is_accel_state(&self, id: StateID) -> bool { - !self.is_dead_state(id) && self.min_accel <= id && id <= self.max_accel - } - - /// Returns true if and only if the given state ID is a start state. - #[inline] - pub(crate) fn is_start_state(&self, id: StateID) -> bool { - !self.is_dead_state(id) && self.min_start <= id && id <= self.max_start - } - - /// Returns the total number of match states for a dense table based DFA. - #[inline] - pub(crate) fn match_len(&self, stride: usize) -> usize { - if self.matches() { - (self.max_match.as_usize() - self.min_match.as_usize() + stride) - / stride - } else { - 0 - } - } - - /// Returns true if and only if there is at least one match state. - #[inline] - pub(crate) fn matches(&self) -> bool { - self.min_match != DEAD - } - - /// Returns the total number of accel states. - #[cfg(feature = "dfa-build")] - pub(crate) fn accel_len(&self, stride: usize) -> usize { - if self.accels() { - (self.max_accel.as_usize() - self.min_accel.as_usize() + stride) - / stride - } else { - 0 - } - } - - /// Returns true if and only if there is at least one accel state. - #[inline] - pub(crate) fn accels(&self) -> bool { - self.min_accel != DEAD - } - - /// Returns true if and only if there is at least one start state. - #[inline] - pub(crate) fn starts(&self) -> bool { - self.min_start != DEAD - } -} diff --git a/vendor/regex-automata/src/dfa/start.rs b/vendor/regex-automata/src/dfa/start.rs deleted file mode 100644 index fddc702df5628e..00000000000000 --- a/vendor/regex-automata/src/dfa/start.rs +++ /dev/null @@ -1,74 +0,0 @@ -use core::mem::size_of; - -use crate::util::wire::{self, DeserializeError, Endian, SerializeError}; - -/// The kind of anchored starting configurations to support in a DFA. -/// -/// Fully compiled DFAs need to be explicitly configured as to which anchored -/// starting configurations to support. The reason for not just supporting -/// everything unconditionally is that it can use more resources (such as -/// memory and build time). The downside of this is that if you try to execute -/// a search using an [`Anchored`](crate::Anchored) mode that is not supported -/// by the DFA, then the search will return an error. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum StartKind { - /// Support both anchored and unanchored searches. - Both, - /// Support only unanchored searches. Requesting an anchored search will - /// panic. - /// - /// Note that even if an unanchored search is requested, the pattern itself - /// may still be anchored. For example, `^abc` will only match `abc` at the - /// start of a haystack. This will remain true, even if the regex engine - /// only supported unanchored searches. - Unanchored, - /// Support only anchored searches. Requesting an unanchored search will - /// panic. - Anchored, -} - -impl StartKind { - pub(crate) fn from_bytes( - slice: &[u8], - ) -> Result<(StartKind, usize), DeserializeError> { - wire::check_slice_len(slice, size_of::(), "start kind bytes")?; - let (n, nr) = wire::try_read_u32(slice, "start kind integer")?; - match n { - 0 => Ok((StartKind::Both, nr)), - 1 => Ok((StartKind::Unanchored, nr)), - 2 => Ok((StartKind::Anchored, nr)), - _ => Err(DeserializeError::generic("unrecognized start kind")), - } - } - - pub(crate) fn write_to( - &self, - dst: &mut [u8], - ) -> Result { - let nwrite = self.write_to_len(); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small("start kind")); - } - let n = match *self { - StartKind::Both => 0, - StartKind::Unanchored => 1, - StartKind::Anchored => 2, - }; - E::write_u32(n, dst); - Ok(nwrite) - } - - pub(crate) fn write_to_len(&self) -> usize { - size_of::() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn has_unanchored(&self) -> bool { - matches!(*self, StartKind::Both | StartKind::Unanchored) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn has_anchored(&self) -> bool { - matches!(*self, StartKind::Both | StartKind::Anchored) - } -} diff --git a/vendor/regex-automata/src/hybrid/dfa.rs b/vendor/regex-automata/src/hybrid/dfa.rs deleted file mode 100644 index 22893d7a328d1b..00000000000000 --- a/vendor/regex-automata/src/hybrid/dfa.rs +++ /dev/null @@ -1,4434 +0,0 @@ -/*! -Types and routines specific to lazy DFAs. - -This module is the home of [`hybrid::dfa::DFA`](DFA). - -This module also contains a [`hybrid::dfa::Builder`](Builder) and a -[`hybrid::dfa::Config`](Config) for configuring and building a lazy DFA. -*/ - -use core::{iter, mem::size_of}; - -use alloc::vec::Vec; - -use crate::{ - hybrid::{ - error::{BuildError, CacheError, StartError}, - id::{LazyStateID, LazyStateIDError}, - search, - }, - nfa::thompson, - util::{ - alphabet::{self, ByteClasses, ByteSet}, - determinize::{self, State, StateBuilderEmpty, StateBuilderNFA}, - empty, - prefilter::Prefilter, - primitives::{PatternID, StateID as NFAStateID}, - search::{ - Anchored, HalfMatch, Input, MatchError, MatchKind, PatternSet, - }, - sparse_set::SparseSets, - start::{self, Start, StartByteMap}, - }, -}; - -/// The minimum number of states that a lazy DFA's cache size must support. -/// -/// This is checked at time of construction to ensure that at least some small -/// number of states can fit in the given capacity allotment. If we can't fit -/// at least this number of states, then the thinking is that it's pretty -/// senseless to use the lazy DFA. More to the point, parts of the code do -/// assume that the cache can fit at least some small number of states. -const MIN_STATES: usize = SENTINEL_STATES + 2; - -/// The number of "sentinel" states that get added to every lazy DFA. -/// -/// These are special states indicating status conditions of a search: unknown, -/// dead and quit. These states in particular also use zero NFA states, so -/// their memory usage is quite small. This is relevant for computing the -/// minimum memory needed for a lazy DFA cache. -const SENTINEL_STATES: usize = 3; - -/// A hybrid NFA/DFA (also called a "lazy DFA") for regex searching. -/// -/// A lazy DFA is a DFA that builds itself at search time. It otherwise has -/// very similar characteristics as a [`dense::DFA`](crate::dfa::dense::DFA). -/// Indeed, both support precisely the same regex features with precisely the -/// same semantics. -/// -/// Where as a `dense::DFA` must be completely built to handle any input before -/// it may be used for search, a lazy DFA starts off effectively empty. During -/// a search, a lazy DFA will build itself depending on whether it has already -/// computed the next transition or not. If it has, then it looks a lot like -/// a `dense::DFA` internally: it does a very fast table based access to find -/// the next transition. Otherwise, if the state hasn't been computed, then it -/// does determinization _for that specific transition_ to compute the next DFA -/// state. -/// -/// The main selling point of a lazy DFA is that, in practice, it has -/// the performance profile of a `dense::DFA` without the weakness of it -/// taking worst case exponential time to build. Indeed, for each byte of -/// input, the lazy DFA will construct as most one new DFA state. Thus, a -/// lazy DFA achieves worst case `O(mn)` time for regex search (where `m ~ -/// pattern.len()` and `n ~ haystack.len()`). -/// -/// The main downsides of a lazy DFA are: -/// -/// 1. It requires mutable "cache" space during search. This is where the -/// transition table, among other things, is stored. -/// 2. In pathological cases (e.g., if the cache is too small), it will run -/// out of room and either require a bigger cache capacity or will repeatedly -/// clear the cache and thus repeatedly regenerate DFA states. Overall, this -/// will tend to be slower than a typical NFA simulation. -/// -/// # Capabilities -/// -/// Like a `dense::DFA`, a single lazy DFA fundamentally supports the following -/// operations: -/// -/// 1. Detection of a match. -/// 2. Location of the end of a match. -/// 3. In the case of a lazy DFA with multiple patterns, which pattern matched -/// is reported as well. -/// -/// A notable absence from the above list of capabilities is the location of -/// the *start* of a match. In order to provide both the start and end of -/// a match, *two* lazy DFAs are required. This functionality is provided by a -/// [`Regex`](crate::hybrid::regex::Regex). -/// -/// # Example -/// -/// This shows how to build a lazy DFA with the default configuration and -/// execute a search. Notice how, in contrast to a `dense::DFA`, we must create -/// a cache and pass it to our search routine. -/// -/// ``` -/// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; -/// -/// let dfa = DFA::new("foo[0-9]+")?; -/// let mut cache = dfa.create_cache(); -/// -/// let expected = Some(HalfMatch::must(0, 8)); -/// assert_eq!(expected, dfa.try_search_fwd( -/// &mut cache, &Input::new("foo12345"))?, -/// ); -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct DFA { - config: Config, - nfa: thompson::NFA, - stride2: usize, - start_map: StartByteMap, - classes: ByteClasses, - quitset: ByteSet, - cache_capacity: usize, -} - -impl DFA { - /// Parse the given regular expression using a default configuration and - /// return the corresponding lazy DFA. - /// - /// If you want a non-default configuration, then use the [`Builder`] to - /// set your own configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; - /// - /// let dfa = DFA::new("foo[0-9]+bar")?; - /// let mut cache = dfa.create_cache(); - /// - /// let expected = HalfMatch::must(0, 11); - /// assert_eq!( - /// Some(expected), - /// dfa.try_search_fwd(&mut cache, &Input::new("foo12345bar"))?, - /// ); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn new(pattern: &str) -> Result { - DFA::builder().build(pattern) - } - - /// Parse the given regular expressions using a default configuration and - /// return the corresponding lazy multi-DFA. - /// - /// If you want a non-default configuration, then use the [`Builder`] to - /// set your own configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; - /// - /// let dfa = DFA::new_many(&["[0-9]+", "[a-z]+"])?; - /// let mut cache = dfa.create_cache(); - /// - /// let expected = HalfMatch::must(1, 3); - /// assert_eq!( - /// Some(expected), - /// dfa.try_search_fwd(&mut cache, &Input::new("foo12345bar"))?, - /// ); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn new_many>(patterns: &[P]) -> Result { - DFA::builder().build_many(patterns) - } - - /// Create a new lazy DFA that matches every input. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; - /// - /// let dfa = DFA::always_match()?; - /// let mut cache = dfa.create_cache(); - /// - /// let expected = HalfMatch::must(0, 0); - /// assert_eq!(Some(expected), dfa.try_search_fwd( - /// &mut cache, &Input::new(""))?, - /// ); - /// assert_eq!(Some(expected), dfa.try_search_fwd( - /// &mut cache, &Input::new("foo"))?, - /// ); - /// # Ok::<(), Box>(()) - /// ``` - pub fn always_match() -> Result { - let nfa = thompson::NFA::always_match(); - Builder::new().build_from_nfa(nfa) - } - - /// Create a new lazy DFA that never matches any input. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{hybrid::dfa::DFA, Input}; - /// - /// let dfa = DFA::never_match()?; - /// let mut cache = dfa.create_cache(); - /// - /// assert_eq!(None, dfa.try_search_fwd(&mut cache, &Input::new(""))?); - /// assert_eq!(None, dfa.try_search_fwd(&mut cache, &Input::new("foo"))?); - /// # Ok::<(), Box>(()) - /// ``` - pub fn never_match() -> Result { - let nfa = thompson::NFA::never_match(); - Builder::new().build_from_nfa(nfa) - } - - /// Return a default configuration for a `DFA`. - /// - /// This is a convenience routine to avoid needing to import the [`Config`] - /// type when customizing the construction of a lazy DFA. - /// - /// # Example - /// - /// This example shows how to build a lazy DFA that heuristically supports - /// Unicode word boundaries. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, MatchError, Input}; - /// - /// let re = DFA::builder() - /// .configure(DFA::config().unicode_word_boundary(true)) - /// .build(r"\b\w+\b")?; - /// let mut cache = re.create_cache(); - /// - /// // Since our haystack is all ASCII, the DFA search sees then and knows - /// // it is legal to interpret Unicode word boundaries as ASCII word - /// // boundaries. - /// let input = Input::new("!!foo!!"); - /// let expected = HalfMatch::must(0, 5); - /// assert_eq!(Some(expected), re.try_search_fwd(&mut cache, &input)?); - /// - /// // But if our haystack contains non-ASCII, then the search will fail - /// // with an error. - /// let input = Input::new("!!βββ!!"); - /// let expected = MatchError::quit(b'\xCE', 2); - /// assert_eq!(Err(expected), re.try_search_fwd(&mut cache, &input)); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn config() -> Config { - Config::new() - } - - /// Return a builder for configuring the construction of a `Regex`. - /// - /// This is a convenience routine to avoid needing to import the - /// [`Builder`] type in common cases. - /// - /// # Example - /// - /// This example shows how to use the builder to disable UTF-8 mode - /// everywhere for lazy DFAs. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{hybrid::dfa::DFA, util::syntax, HalfMatch, Input}; - /// - /// let re = DFA::builder() - /// .syntax(syntax::Config::new().utf8(false)) - /// .build(r"foo(?-u:[^b])ar.*")?; - /// let mut cache = re.create_cache(); - /// - /// let input = Input::new(b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"); - /// let expected = Some(HalfMatch::must(0, 9)); - /// let got = re.try_search_fwd(&mut cache, &input)?; - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn builder() -> Builder { - Builder::new() - } - - /// Create a new cache for this lazy DFA. - /// - /// The cache returned should only be used for searches for this - /// lazy DFA. If you want to reuse the cache for another DFA, then - /// you must call [`Cache::reset`] with that DFA (or, equivalently, - /// [`DFA::reset_cache`]). - pub fn create_cache(&self) -> Cache { - Cache::new(self) - } - - /// Reset the given cache such that it can be used for searching with the - /// this lazy DFA (and only this DFA). - /// - /// A cache reset permits reusing memory already allocated in this cache - /// with a different lazy DFA. - /// - /// Resetting a cache sets its "clear count" to 0. This is relevant if the - /// lazy DFA has been configured to "give up" after it has cleared the - /// cache a certain number of times. - /// - /// Any lazy state ID generated by the cache prior to resetting it is - /// invalid after the reset. - /// - /// # Example - /// - /// This shows how to re-purpose a cache for use with a different DFA. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; - /// - /// let dfa1 = DFA::new(r"\w")?; - /// let dfa2 = DFA::new(r"\W")?; - /// - /// let mut cache = dfa1.create_cache(); - /// assert_eq!( - /// Some(HalfMatch::must(0, 2)), - /// dfa1.try_search_fwd(&mut cache, &Input::new("Δ"))?, - /// ); - /// - /// // Using 'cache' with dfa2 is not allowed. It may result in panics or - /// // incorrect results. In order to re-purpose the cache, we must reset - /// // it with the DFA we'd like to use it with. - /// // - /// // Similarly, after this reset, using the cache with 'dfa1' is also not - /// // allowed. - /// dfa2.reset_cache(&mut cache); - /// assert_eq!( - /// Some(HalfMatch::must(0, 3)), - /// dfa2.try_search_fwd(&mut cache, &Input::new("☃"))?, - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn reset_cache(&self, cache: &mut Cache) { - Lazy::new(self, cache).reset_cache() - } - - /// Returns the total number of patterns compiled into this lazy DFA. - /// - /// In the case of a DFA that contains no patterns, this returns `0`. - /// - /// # Example - /// - /// This example shows the pattern length for a DFA that never matches: - /// - /// ``` - /// use regex_automata::hybrid::dfa::DFA; - /// - /// let dfa = DFA::never_match()?; - /// assert_eq!(dfa.pattern_len(), 0); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// And another example for a DFA that matches at every position: - /// - /// ``` - /// use regex_automata::hybrid::dfa::DFA; - /// - /// let dfa = DFA::always_match()?; - /// assert_eq!(dfa.pattern_len(), 1); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// And finally, a DFA that was constructed from multiple patterns: - /// - /// ``` - /// use regex_automata::hybrid::dfa::DFA; - /// - /// let dfa = DFA::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; - /// assert_eq!(dfa.pattern_len(), 3); - /// # Ok::<(), Box>(()) - /// ``` - pub fn pattern_len(&self) -> usize { - self.nfa.pattern_len() - } - - /// Returns the equivalence classes that make up the alphabet for this DFA. - /// - /// Unless [`Config::byte_classes`] was disabled, it is possible that - /// multiple distinct bytes are grouped into the same equivalence class - /// if it is impossible for them to discriminate between a match and a - /// non-match. This has the effect of reducing the overall alphabet size - /// and in turn potentially substantially reducing the size of the DFA's - /// transition table. - /// - /// The downside of using equivalence classes like this is that every state - /// transition will automatically use this map to convert an arbitrary - /// byte to its corresponding equivalence class. In practice this has a - /// negligible impact on performance. - pub fn byte_classes(&self) -> &ByteClasses { - &self.classes - } - - /// Returns this lazy DFA's configuration. - pub fn get_config(&self) -> &Config { - &self.config - } - - /// Returns a reference to the underlying NFA. - pub fn get_nfa(&self) -> &thompson::NFA { - &self.nfa - } - - /// Returns the stride, as a base-2 exponent, required for these - /// equivalence classes. - /// - /// The stride is always the smallest power of 2 that is greater than or - /// equal to the alphabet length. This is done so that converting between - /// state IDs and indices can be done with shifts alone, which is much - /// faster than integer division. - fn stride2(&self) -> usize { - self.stride2 - } - - /// Returns the total stride for every state in this lazy DFA. This - /// corresponds to the total number of transitions used by each state in - /// this DFA's transition table. - fn stride(&self) -> usize { - 1 << self.stride2() - } - - /// Returns the memory usage, in bytes, of this lazy DFA. - /// - /// This does **not** include the stack size used up by this lazy DFA. To - /// compute that, use `std::mem::size_of::()`. This also does not - /// include the size of the `Cache` used. - /// - /// This also does not include any heap memory used by the NFA inside of - /// this hybrid NFA/DFA. This is because the NFA's ownership is shared, and - /// thus not owned by this hybrid NFA/DFA. More practically, several regex - /// engines in this crate embed an NFA, and reporting the NFA's memory - /// usage in all of them would likely result in reporting higher heap - /// memory than is actually used. - pub fn memory_usage(&self) -> usize { - // The only thing that uses heap memory in a DFA is the NFA. But the - // NFA has shared ownership, so reporting its memory as part of the - // hybrid DFA is likely to lead to double-counting the NFA memory - // somehow. In particular, this DFA does not really own an NFA, so - // including it in the DFA's memory usage doesn't seem semantically - // correct. - 0 - } -} - -impl DFA { - /// Executes a forward search and returns the end position of the leftmost - /// match that is found. If no match exists, then `None` is returned. - /// - /// In particular, this method continues searching even after it enters - /// a match state. The search only terminates once it has reached the - /// end of the input or when it has entered a dead or quit state. Upon - /// termination, the position of the last byte seen while still in a match - /// state is returned. - /// - /// # Errors - /// - /// This routine errors if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the lazy DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the lazy DFA quitting. - /// * The configuration of the lazy DFA may also permit it to "give up" - /// on a search if it makes ineffective use of its transition table - /// cache. The default configuration does not enable this by default, - /// although it is typically a good idea to. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search returns an error, callers cannot know whether a match - /// exists or not. - /// - /// # Example - /// - /// This example shows how to run a basic search. - /// - /// ``` - /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; - /// - /// let dfa = DFA::new("foo[0-9]+")?; - /// let mut cache = dfa.create_cache(); - /// let expected = HalfMatch::must(0, 8); - /// assert_eq!(Some(expected), dfa.try_search_fwd( - /// &mut cache, &Input::new("foo12345"))?, - /// ); - /// - /// // Even though a match is found after reading the first byte (`a`), - /// // the leftmost first match semantics demand that we find the earliest - /// // match that prefers earlier parts of the pattern over later parts. - /// let dfa = DFA::new("abc|a")?; - /// let mut cache = dfa.create_cache(); - /// let expected = HalfMatch::must(0, 3); - /// assert_eq!(Some(expected), dfa.try_search_fwd( - /// &mut cache, &Input::new("abc"))?, - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: specific pattern search - /// - /// This example shows how to build a lazy multi-DFA that permits searching - /// for specific patterns. - /// - /// ``` - /// use regex_automata::{ - /// hybrid::dfa::DFA, - /// Anchored, HalfMatch, PatternID, Input, - /// }; - /// - /// let dfa = DFA::builder() - /// .configure(DFA::config().starts_for_each_pattern(true)) - /// .build_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?; - /// let mut cache = dfa.create_cache(); - /// let haystack = "foo123"; - /// - /// // Since we are using the default leftmost-first match and both - /// // patterns match at the same starting position, only the first pattern - /// // will be returned in this case when doing a search for any of the - /// // patterns. - /// let expected = Some(HalfMatch::must(0, 6)); - /// let got = dfa.try_search_fwd(&mut cache, &Input::new(haystack))?; - /// assert_eq!(expected, got); - /// - /// // But if we want to check whether some other pattern matches, then we - /// // can provide its pattern ID. - /// let expected = Some(HalfMatch::must(1, 6)); - /// let input = Input::new(haystack) - /// .anchored(Anchored::Pattern(PatternID::must(1))); - /// let got = dfa.try_search_fwd(&mut cache, &input)?; - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: specifying the bounds of a search - /// - /// This example shows how providing the bounds of a search can produce - /// different results than simply sub-slicing the haystack. - /// - /// ``` - /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; - /// - /// // N.B. We disable Unicode here so that we use a simple ASCII word - /// // boundary. Alternatively, we could enable heuristic support for - /// // Unicode word boundaries since our haystack is pure ASCII. - /// let dfa = DFA::new(r"(?-u)\b[0-9]{3}\b")?; - /// let mut cache = dfa.create_cache(); - /// let haystack = "foo123bar"; - /// - /// // Since we sub-slice the haystack, the search doesn't know about the - /// // larger context and assumes that `123` is surrounded by word - /// // boundaries. And of course, the match position is reported relative - /// // to the sub-slice as well, which means we get `3` instead of `6`. - /// let expected = Some(HalfMatch::must(0, 3)); - /// let got = dfa.try_search_fwd( - /// &mut cache, - /// &Input::new(&haystack[3..6]), - /// )?; - /// assert_eq!(expected, got); - /// - /// // But if we provide the bounds of the search within the context of the - /// // entire haystack, then the search can take the surrounding context - /// // into account. (And if we did find a match, it would be reported - /// // as a valid offset into `haystack` instead of its sub-slice.) - /// let expected = None; - /// let got = dfa.try_search_fwd( - /// &mut cache, - /// &Input::new(haystack).range(3..6), - /// )?; - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn try_search_fwd( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Result, MatchError> { - let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); - let hm = match search::find_fwd(self, cache, input)? { - None => return Ok(None), - Some(hm) if !utf8empty => return Ok(Some(hm)), - Some(hm) => hm, - }; - // We get to this point when we know our DFA can match the empty string - // AND when UTF-8 mode is enabled. In this case, we skip any matches - // whose offset splits a codepoint. Such a match is necessarily a - // zero-width match, because UTF-8 mode requires the underlying NFA - // to be built such that all non-empty matches span valid UTF-8. - // Therefore, any match that ends in the middle of a codepoint cannot - // be part of a span of valid UTF-8 and thus must be an empty match. - // In such cases, we skip it, so as not to report matches that split a - // codepoint. - // - // Note that this is not a checked assumption. Callers *can* provide an - // NFA with UTF-8 mode enabled but produces non-empty matches that span - // invalid UTF-8. But doing so is documented to result in unspecified - // behavior. - empty::skip_splits_fwd(input, hm, hm.offset(), |input| { - let got = search::find_fwd(self, cache, input)?; - Ok(got.map(|hm| (hm, hm.offset()))) - }) - } - - /// Executes a reverse search and returns the start of the position of the - /// leftmost match that is found. If no match exists, then `None` is - /// returned. - /// - /// # Errors - /// - /// This routine errors if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the lazy DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the lazy DFA quitting. - /// * The configuration of the lazy DFA may also permit it to "give up" - /// on a search if it makes ineffective use of its transition table - /// cache. The default configuration does not enable this by default, - /// although it is typically a good idea to. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search returns an error, callers cannot know whether a match - /// exists or not. - /// - /// # Example - /// - /// This routine is principally useful when used in - /// conjunction with the - /// [`nfa::thompson::Config::reverse`](crate::nfa::thompson::Config::reverse) - /// configuration. In general, it's unlikely to be correct to use both - /// `try_search_fwd` and `try_search_rev` with the same DFA since any - /// particular DFA will only support searching in one direction with - /// respect to the pattern. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson, - /// hybrid::dfa::DFA, - /// HalfMatch, Input, - /// }; - /// - /// let dfa = DFA::builder() - /// .thompson(thompson::Config::new().reverse(true)) - /// .build("foo[0-9]+")?; - /// let mut cache = dfa.create_cache(); - /// let expected = HalfMatch::must(0, 0); - /// assert_eq!( - /// Some(expected), - /// dfa.try_search_rev(&mut cache, &Input::new("foo12345"))?, - /// ); - /// - /// // Even though a match is found after reading the last byte (`c`), - /// // the leftmost first match semantics demand that we find the earliest - /// // match that prefers earlier parts of the pattern over latter parts. - /// let dfa = DFA::builder() - /// .thompson(thompson::Config::new().reverse(true)) - /// .build("abc|c")?; - /// let mut cache = dfa.create_cache(); - /// let expected = HalfMatch::must(0, 0); - /// assert_eq!(Some(expected), dfa.try_search_rev( - /// &mut cache, &Input::new("abc"))?, - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: UTF-8 mode - /// - /// This examples demonstrates that UTF-8 mode applies to reverse - /// DFAs. When UTF-8 mode is enabled in the underlying NFA, then all - /// matches reported must correspond to valid UTF-8 spans. This includes - /// prohibiting zero-width matches that split a codepoint. - /// - /// UTF-8 mode is enabled by default. Notice below how the only zero-width - /// matches reported are those at UTF-8 boundaries: - /// - /// ``` - /// use regex_automata::{ - /// hybrid::dfa::DFA, - /// nfa::thompson, - /// HalfMatch, Input, MatchKind, - /// }; - /// - /// let dfa = DFA::builder() - /// .thompson(thompson::Config::new().reverse(true)) - /// .build(r"")?; - /// let mut cache = dfa.create_cache(); - /// - /// // Run the reverse DFA to collect all matches. - /// let mut input = Input::new("☃"); - /// let mut matches = vec![]; - /// loop { - /// match dfa.try_search_rev(&mut cache, &input)? { - /// None => break, - /// Some(hm) => { - /// matches.push(hm); - /// if hm.offset() == 0 || input.end() == 0 { - /// break; - /// } else if hm.offset() < input.end() { - /// input.set_end(hm.offset()); - /// } else { - /// // This is only necessary to handle zero-width - /// // matches, which of course occur in this example. - /// // Without this, the search would never advance - /// // backwards beyond the initial match. - /// input.set_end(input.end() - 1); - /// } - /// } - /// } - /// } - /// - /// // No matches split a codepoint. - /// let expected = vec![ - /// HalfMatch::must(0, 3), - /// HalfMatch::must(0, 0), - /// ]; - /// assert_eq!(expected, matches); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Now let's look at the same example, but with UTF-8 mode on the - /// underlying NFA disabled: - /// - /// ``` - /// use regex_automata::{ - /// hybrid::dfa::DFA, - /// nfa::thompson, - /// HalfMatch, Input, MatchKind, - /// }; - /// - /// let dfa = DFA::builder() - /// .thompson(thompson::Config::new().reverse(true).utf8(false)) - /// .build(r"")?; - /// let mut cache = dfa.create_cache(); - /// - /// // Run the reverse DFA to collect all matches. - /// let mut input = Input::new("☃"); - /// let mut matches = vec![]; - /// loop { - /// match dfa.try_search_rev(&mut cache, &input)? { - /// None => break, - /// Some(hm) => { - /// matches.push(hm); - /// if hm.offset() == 0 || input.end() == 0 { - /// break; - /// } else if hm.offset() < input.end() { - /// input.set_end(hm.offset()); - /// } else { - /// // This is only necessary to handle zero-width - /// // matches, which of course occur in this example. - /// // Without this, the search would never advance - /// // backwards beyond the initial match. - /// input.set_end(input.end() - 1); - /// } - /// } - /// } - /// } - /// - /// // No matches split a codepoint. - /// let expected = vec![ - /// HalfMatch::must(0, 3), - /// HalfMatch::must(0, 2), - /// HalfMatch::must(0, 1), - /// HalfMatch::must(0, 0), - /// ]; - /// assert_eq!(expected, matches); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn try_search_rev( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Result, MatchError> { - let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); - let hm = match search::find_rev(self, cache, input)? { - None => return Ok(None), - Some(hm) if !utf8empty => return Ok(Some(hm)), - Some(hm) => hm, - }; - empty::skip_splits_rev(input, hm, hm.offset(), |input| { - let got = search::find_rev(self, cache, input)?; - Ok(got.map(|hm| (hm, hm.offset()))) - }) - } - - /// Executes an overlapping forward search and returns the end position of - /// matches as they are found. If no match exists, then `None` is returned. - /// - /// This routine is principally only useful when searching for multiple - /// patterns on inputs where multiple patterns may match the same regions - /// of text. In particular, callers must preserve the automaton's search - /// state from prior calls so that the implementation knows where the last - /// match occurred. - /// - /// When using this routine to implement an iterator of overlapping - /// matches, the `start` of the search should remain invariant throughout - /// iteration. The `OverlappingState` given to the search will keep track - /// of the current position of the search. (This is because multiple - /// matches may be reported at the same position, so only the search - /// implementation itself knows when to advance the position.) - /// - /// If for some reason you want the search to forget about its previous - /// state and restart the search at a particular position, then setting the - /// state to [`OverlappingState::start`] will accomplish that. - /// - /// # Errors - /// - /// This routine errors if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the lazy DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the lazy DFA quitting. - /// * The configuration of the lazy DFA may also permit it to "give up" - /// on a search if it makes ineffective use of its transition table - /// cache. The default configuration does not enable this by default, - /// although it is typically a good idea to. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search returns an error, callers cannot know whether a match - /// exists or not. - /// - /// # Example - /// - /// This example shows how to run a basic overlapping search. Notice - /// that we build the automaton with a `MatchKind::All` configuration. - /// Overlapping searches are unlikely to work as one would expect when - /// using the default `MatchKind::LeftmostFirst` match semantics, since - /// leftmost-first matching is fundamentally incompatible with overlapping - /// searches. Namely, overlapping searches need to report matches as they - /// are seen, where as leftmost-first searches will continue searching even - /// after a match has been observed in order to find the conventional end - /// position of the match. More concretely, leftmost-first searches use - /// dead states to terminate a search after a specific match can no longer - /// be extended. Overlapping searches instead do the opposite by continuing - /// the search to find totally new matches (potentially of other patterns). - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// hybrid::dfa::{DFA, OverlappingState}, - /// HalfMatch, Input, MatchKind, - /// }; - /// - /// let dfa = DFA::builder() - /// .configure(DFA::config().match_kind(MatchKind::All)) - /// .build_many(&[r"\w+$", r"\S+$"])?; - /// let mut cache = dfa.create_cache(); - /// - /// let haystack = "@foo"; - /// let mut state = OverlappingState::start(); - /// - /// let expected = Some(HalfMatch::must(1, 4)); - /// dfa.try_search_overlapping_fwd( - /// &mut cache, &Input::new(haystack), &mut state, - /// )?; - /// assert_eq!(expected, state.get_match()); - /// - /// // The first pattern also matches at the same position, so re-running - /// // the search will yield another match. Notice also that the first - /// // pattern is returned after the second. This is because the second - /// // pattern begins its match before the first, is therefore an earlier - /// // match and is thus reported first. - /// let expected = Some(HalfMatch::must(0, 4)); - /// dfa.try_search_overlapping_fwd( - /// &mut cache, &Input::new(haystack), &mut state, - /// )?; - /// assert_eq!(expected, state.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn try_search_overlapping_fwd( - &self, - cache: &mut Cache, - input: &Input<'_>, - state: &mut OverlappingState, - ) -> Result<(), MatchError> { - let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); - search::find_overlapping_fwd(self, cache, input, state)?; - match state.get_match() { - None => Ok(()), - Some(_) if !utf8empty => Ok(()), - Some(_) => skip_empty_utf8_splits_overlapping( - input, - state, - |input, state| { - search::find_overlapping_fwd(self, cache, input, state) - }, - ), - } - } - - /// Executes a reverse overlapping search and returns the start of the - /// position of the leftmost match that is found. If no match exists, then - /// `None` is returned. - /// - /// When using this routine to implement an iterator of overlapping - /// matches, the `start` of the search should remain invariant throughout - /// iteration. The `OverlappingState` given to the search will keep track - /// of the current position of the search. (This is because multiple - /// matches may be reported at the same position, so only the search - /// implementation itself knows when to advance the position.) - /// - /// If for some reason you want the search to forget about its previous - /// state and restart the search at a particular position, then setting the - /// state to [`OverlappingState::start`] will accomplish that. - /// - /// # Errors - /// - /// This routine errors if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the lazy DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the lazy DFA quitting. - /// * The configuration of the lazy DFA may also permit it to "give up" - /// on a search if it makes ineffective use of its transition table - /// cache. The default configuration does not enable this by default, - /// although it is typically a good idea to. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search returns an error, callers cannot know whether a match - /// exists or not. - /// - /// # Example: UTF-8 mode - /// - /// This examples demonstrates that UTF-8 mode applies to reverse - /// DFAs. When UTF-8 mode is enabled in the underlying NFA, then all - /// matches reported must correspond to valid UTF-8 spans. This includes - /// prohibiting zero-width matches that split a codepoint. - /// - /// UTF-8 mode is enabled by default. Notice below how the only zero-width - /// matches reported are those at UTF-8 boundaries: - /// - /// ``` - /// use regex_automata::{ - /// hybrid::dfa::{DFA, OverlappingState}, - /// nfa::thompson, - /// HalfMatch, Input, MatchKind, - /// }; - /// - /// let dfa = DFA::builder() - /// .configure(DFA::config().match_kind(MatchKind::All)) - /// .thompson(thompson::Config::new().reverse(true)) - /// .build_many(&[r"", r"☃"])?; - /// let mut cache = dfa.create_cache(); - /// - /// // Run the reverse DFA to collect all matches. - /// let input = Input::new("☃"); - /// let mut state = OverlappingState::start(); - /// let mut matches = vec![]; - /// loop { - /// dfa.try_search_overlapping_rev(&mut cache, &input, &mut state)?; - /// match state.get_match() { - /// None => break, - /// Some(hm) => matches.push(hm), - /// } - /// } - /// - /// // No matches split a codepoint. - /// let expected = vec![ - /// HalfMatch::must(0, 3), - /// HalfMatch::must(1, 0), - /// HalfMatch::must(0, 0), - /// ]; - /// assert_eq!(expected, matches); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Now let's look at the same example, but with UTF-8 mode on the - /// underlying NFA disabled: - /// - /// ``` - /// use regex_automata::{ - /// hybrid::dfa::{DFA, OverlappingState}, - /// nfa::thompson, - /// HalfMatch, Input, MatchKind, - /// }; - /// - /// let dfa = DFA::builder() - /// .configure(DFA::config().match_kind(MatchKind::All)) - /// .thompson(thompson::Config::new().reverse(true).utf8(false)) - /// .build_many(&[r"", r"☃"])?; - /// let mut cache = dfa.create_cache(); - /// - /// // Run the reverse DFA to collect all matches. - /// let input = Input::new("☃"); - /// let mut state = OverlappingState::start(); - /// let mut matches = vec![]; - /// loop { - /// dfa.try_search_overlapping_rev(&mut cache, &input, &mut state)?; - /// match state.get_match() { - /// None => break, - /// Some(hm) => matches.push(hm), - /// } - /// } - /// - /// // Now *all* positions match, even within a codepoint, - /// // because we lifted the requirement that matches - /// // correspond to valid UTF-8 spans. - /// let expected = vec![ - /// HalfMatch::must(0, 3), - /// HalfMatch::must(0, 2), - /// HalfMatch::must(0, 1), - /// HalfMatch::must(1, 0), - /// HalfMatch::must(0, 0), - /// ]; - /// assert_eq!(expected, matches); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn try_search_overlapping_rev( - &self, - cache: &mut Cache, - input: &Input<'_>, - state: &mut OverlappingState, - ) -> Result<(), MatchError> { - let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); - search::find_overlapping_rev(self, cache, input, state)?; - match state.get_match() { - None => Ok(()), - Some(_) if !utf8empty => Ok(()), - Some(_) => skip_empty_utf8_splits_overlapping( - input, - state, - |input, state| { - search::find_overlapping_rev(self, cache, input, state) - }, - ), - } - } - - /// Writes the set of patterns that match anywhere in the given search - /// configuration to `patset`. If multiple patterns match at the same - /// position and the underlying DFA supports overlapping matches, then all - /// matching patterns are written to the given set. - /// - /// Unless all of the patterns in this DFA are anchored, then generally - /// speaking, this will visit every byte in the haystack. - /// - /// This search routine *does not* clear the pattern set. This gives some - /// flexibility to the caller (e.g., running multiple searches with the - /// same pattern set), but does make the API bug-prone if you're reusing - /// the same pattern set for multiple searches but intended them to be - /// independent. - /// - /// If a pattern ID matched but the given `PatternSet` does not have - /// sufficient capacity to store it, then it is not inserted and silently - /// dropped. - /// - /// # Errors - /// - /// This routine errors if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the lazy DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the lazy DFA quitting. - /// * The configuration of the lazy DFA may also permit it to "give up" - /// on a search if it makes ineffective use of its transition table - /// cache. The default configuration does not enable this by default, - /// although it is typically a good idea to. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search returns an error, callers cannot know whether a match - /// exists or not. - /// - /// # Example - /// - /// This example shows how to find all matching patterns in a haystack, - /// even when some patterns match at the same position as other patterns. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// hybrid::dfa::DFA, - /// Input, MatchKind, PatternSet, - /// }; - /// - /// let patterns = &[ - /// r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar", - /// ]; - /// let dfa = DFA::builder() - /// .configure(DFA::config().match_kind(MatchKind::All)) - /// .build_many(patterns)?; - /// let mut cache = dfa.create_cache(); - /// - /// let input = Input::new("foobar"); - /// let mut patset = PatternSet::new(dfa.pattern_len()); - /// dfa.try_which_overlapping_matches(&mut cache, &input, &mut patset)?; - /// let expected = vec![0, 2, 3, 4, 6]; - /// let got: Vec = patset.iter().map(|p| p.as_usize()).collect(); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn try_which_overlapping_matches( - &self, - cache: &mut Cache, - input: &Input<'_>, - patset: &mut PatternSet, - ) -> Result<(), MatchError> { - let mut state = OverlappingState::start(); - while let Some(m) = { - self.try_search_overlapping_fwd(cache, input, &mut state)?; - state.get_match() - } { - let _ = patset.try_insert(m.pattern()); - // There's nothing left to find, so we can stop. Or the caller - // asked us to. - if patset.is_full() || input.get_earliest() { - break; - } - } - Ok(()) - } -} - -impl DFA { - /// Transitions from the current state to the next state, given the next - /// byte of input. - /// - /// The given cache is used to either reuse pre-computed state - /// transitions, or to store this newly computed transition for future - /// reuse. Thus, this routine guarantees that it will never return a state - /// ID that has an "unknown" tag. - /// - /// # State identifier validity - /// - /// The only valid value for `current` is the lazy state ID returned - /// by the most recent call to `next_state`, `next_state_untagged`, - /// `next_state_untagged_unchecked`, `start_state_forward` or - /// `state_state_reverse` for the given `cache`. Any state ID returned from - /// prior calls to these routines (with the same `cache`) is considered - /// invalid (even if it gives an appearance of working). State IDs returned - /// from _any_ prior call for different `cache` values are also always - /// invalid. - /// - /// The returned ID is always a valid ID when `current` refers to a valid - /// ID. Moreover, this routine is defined for all possible values of - /// `input`. - /// - /// These validity rules are not checked, even in debug mode. Callers are - /// required to uphold these rules themselves. - /// - /// Violating these state ID validity rules will not sacrifice memory - /// safety, but _may_ produce an incorrect result or a panic. - /// - /// # Panics - /// - /// If the given ID does not refer to a valid state, then this routine - /// may panic but it also may not panic and instead return an invalid or - /// incorrect ID. - /// - /// # Example - /// - /// This shows a simplistic example for walking a lazy DFA for a given - /// haystack by using the `next_state` method. - /// - /// ``` - /// use regex_automata::{hybrid::dfa::DFA, Input}; - /// - /// let dfa = DFA::new(r"[a-z]+r")?; - /// let mut cache = dfa.create_cache(); - /// let haystack = "bar".as_bytes(); - /// - /// // The start state is determined by inspecting the position and the - /// // initial bytes of the haystack. - /// let mut sid = dfa.start_state_forward( - /// &mut cache, &Input::new(haystack), - /// )?; - /// // Walk all the bytes in the haystack. - /// for &b in haystack { - /// sid = dfa.next_state(&mut cache, sid, b)?; - /// } - /// // Matches are always delayed by 1 byte, so we must explicitly walk the - /// // special "EOI" transition at the end of the search. - /// sid = dfa.next_eoi_state(&mut cache, sid)?; - /// assert!(sid.is_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn next_state( - &self, - cache: &mut Cache, - current: LazyStateID, - input: u8, - ) -> Result { - let class = usize::from(self.classes.get(input)); - let offset = current.as_usize_untagged() + class; - let sid = cache.trans[offset]; - if !sid.is_unknown() { - return Ok(sid); - } - let unit = alphabet::Unit::u8(input); - Lazy::new(self, cache).cache_next_state(current, unit) - } - - /// Transitions from the current state to the next state, given the next - /// byte of input and a state ID that is not tagged. - /// - /// The only reason to use this routine is performance. In particular, the - /// `next_state` method needs to do some additional checks, among them is - /// to account for identifiers to states that are not yet computed. In - /// such a case, the transition is computed on the fly. However, if it is - /// known that the `current` state ID is untagged, then these checks can be - /// omitted. - /// - /// Since this routine does not compute states on the fly, it does not - /// modify the cache and thus cannot return an error. Consequently, `cache` - /// does not need to be mutable and it is possible for this routine to - /// return a state ID corresponding to the special "unknown" state. In - /// this case, it is the caller's responsibility to use the prior state - /// ID and `input` with `next_state` in order to force the computation of - /// the unknown transition. Otherwise, trying to use the "unknown" state - /// ID will just result in transitioning back to itself, and thus never - /// terminating. (This is technically a special exemption to the state ID - /// validity rules, but is permissible since this routine is guaranteed to - /// never mutate the given `cache`, and thus the identifier is guaranteed - /// to remain valid.) - /// - /// See [`LazyStateID`] for more details on what it means for a state ID - /// to be tagged. Also, see - /// [`next_state_untagged_unchecked`](DFA::next_state_untagged_unchecked) - /// for this same idea, but with bounds checks forcefully elided. - /// - /// # State identifier validity - /// - /// The only valid value for `current` is an **untagged** lazy - /// state ID returned by the most recent call to `next_state`, - /// `next_state_untagged`, `next_state_untagged_unchecked`, - /// `start_state_forward` or `state_state_reverse` for the given `cache`. - /// Any state ID returned from prior calls to these routines (with the - /// same `cache`) is considered invalid (even if it gives an appearance - /// of working). State IDs returned from _any_ prior call for different - /// `cache` values are also always invalid. - /// - /// The returned ID is always a valid ID when `current` refers to a valid - /// ID, although it may be tagged. Moreover, this routine is defined for - /// all possible values of `input`. - /// - /// Not all validity rules are checked, even in debug mode. Callers are - /// required to uphold these rules themselves. - /// - /// Violating these state ID validity rules will not sacrifice memory - /// safety, but _may_ produce an incorrect result or a panic. - /// - /// # Panics - /// - /// If the given ID does not refer to a valid state, then this routine - /// may panic but it also may not panic and instead return an invalid or - /// incorrect ID. - /// - /// # Example - /// - /// This shows a simplistic example for walking a lazy DFA for a given - /// haystack by using the `next_state_untagged` method where possible. - /// - /// ``` - /// use regex_automata::{hybrid::dfa::DFA, Input}; - /// - /// let dfa = DFA::new(r"[a-z]+r")?; - /// let mut cache = dfa.create_cache(); - /// let haystack = "bar".as_bytes(); - /// - /// // The start state is determined by inspecting the position and the - /// // initial bytes of the haystack. - /// let mut sid = dfa.start_state_forward( - /// &mut cache, &Input::new(haystack), - /// )?; - /// // Walk all the bytes in the haystack. - /// let mut at = 0; - /// while at < haystack.len() { - /// if sid.is_tagged() { - /// sid = dfa.next_state(&mut cache, sid, haystack[at])?; - /// } else { - /// let mut prev_sid = sid; - /// // We attempt to chew through as much as we can while moving - /// // through untagged state IDs. Thus, the transition function - /// // does less work on average per byte. (Unrolling this loop - /// // may help even more.) - /// while at < haystack.len() { - /// prev_sid = sid; - /// sid = dfa.next_state_untagged( - /// &mut cache, sid, haystack[at], - /// ); - /// at += 1; - /// if sid.is_tagged() { - /// break; - /// } - /// } - /// // We must ensure that we never proceed to the next iteration - /// // with an unknown state ID. If we don't account for this - /// // case, then search isn't guaranteed to terminate since all - /// // transitions on unknown states loop back to itself. - /// if sid.is_unknown() { - /// sid = dfa.next_state( - /// &mut cache, prev_sid, haystack[at - 1], - /// )?; - /// } - /// } - /// } - /// // Matches are always delayed by 1 byte, so we must explicitly walk the - /// // special "EOI" transition at the end of the search. - /// sid = dfa.next_eoi_state(&mut cache, sid)?; - /// assert!(sid.is_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn next_state_untagged( - &self, - cache: &Cache, - current: LazyStateID, - input: u8, - ) -> LazyStateID { - debug_assert!(!current.is_tagged()); - let class = usize::from(self.classes.get(input)); - let offset = current.as_usize_unchecked() + class; - cache.trans[offset] - } - - /// Transitions from the current state to the next state, eliding bounds - /// checks, given the next byte of input and a state ID that is not tagged. - /// - /// The only reason to use this routine is performance. In particular, the - /// `next_state` method needs to do some additional checks, among them is - /// to account for identifiers to states that are not yet computed. In - /// such a case, the transition is computed on the fly. However, if it is - /// known that the `current` state ID is untagged, then these checks can be - /// omitted. - /// - /// Since this routine does not compute states on the fly, it does not - /// modify the cache and thus cannot return an error. Consequently, `cache` - /// does not need to be mutable and it is possible for this routine to - /// return a state ID corresponding to the special "unknown" state. In - /// this case, it is the caller's responsibility to use the prior state - /// ID and `input` with `next_state` in order to force the computation of - /// the unknown transition. Otherwise, trying to use the "unknown" state - /// ID will just result in transitioning back to itself, and thus never - /// terminating. (This is technically a special exemption to the state ID - /// validity rules, but is permissible since this routine is guaranteed to - /// never mutate the given `cache`, and thus the identifier is guaranteed - /// to remain valid.) - /// - /// See [`LazyStateID`] for more details on what it means for a state ID - /// to be tagged. Also, see - /// [`next_state_untagged`](DFA::next_state_untagged) - /// for this same idea, but with memory safety guaranteed by retaining - /// bounds checks. - /// - /// # State identifier validity - /// - /// The only valid value for `current` is an **untagged** lazy - /// state ID returned by the most recent call to `next_state`, - /// `next_state_untagged`, `next_state_untagged_unchecked`, - /// `start_state_forward` or `state_state_reverse` for the given `cache`. - /// Any state ID returned from prior calls to these routines (with the - /// same `cache`) is considered invalid (even if it gives an appearance - /// of working). State IDs returned from _any_ prior call for different - /// `cache` values are also always invalid. - /// - /// The returned ID is always a valid ID when `current` refers to a valid - /// ID, although it may be tagged. Moreover, this routine is defined for - /// all possible values of `input`. - /// - /// Not all validity rules are checked, even in debug mode. Callers are - /// required to uphold these rules themselves. - /// - /// Violating these state ID validity rules will not sacrifice memory - /// safety, but _may_ produce an incorrect result or a panic. - /// - /// # Safety - /// - /// Callers of this method must guarantee that `current` refers to a valid - /// state ID according to the rules described above. If `current` is not a - /// valid state ID for this automaton, then calling this routine may result - /// in undefined behavior. - /// - /// If `current` is valid, then the ID returned is valid for all possible - /// values of `input`. - #[inline] - pub unsafe fn next_state_untagged_unchecked( - &self, - cache: &Cache, - current: LazyStateID, - input: u8, - ) -> LazyStateID { - debug_assert!(!current.is_tagged()); - let class = usize::from(self.classes.get(input)); - let offset = current.as_usize_unchecked() + class; - *cache.trans.get_unchecked(offset) - } - - /// Transitions from the current state to the next state for the special - /// EOI symbol. - /// - /// The given cache is used to either reuse pre-computed state - /// transitions, or to store this newly computed transition for future - /// reuse. Thus, this routine guarantees that it will never return a state - /// ID that has an "unknown" tag. - /// - /// This routine must be called at the end of every search in a correct - /// implementation of search. Namely, lazy DFAs in this crate delay matches - /// by one byte in order to support look-around operators. Thus, after - /// reaching the end of a haystack, a search implementation must follow one - /// last EOI transition. - /// - /// It is best to think of EOI as an additional symbol in the alphabet of a - /// DFA that is distinct from every other symbol. That is, the alphabet of - /// lazy DFAs in this crate has a logical size of 257 instead of 256, where - /// 256 corresponds to every possible inhabitant of `u8`. (In practice, the - /// physical alphabet size may be smaller because of alphabet compression - /// via equivalence classes, but EOI is always represented somehow in the - /// alphabet.) - /// - /// # State identifier validity - /// - /// The only valid value for `current` is the lazy state ID returned - /// by the most recent call to `next_state`, `next_state_untagged`, - /// `next_state_untagged_unchecked`, `start_state_forward` or - /// `state_state_reverse` for the given `cache`. Any state ID returned from - /// prior calls to these routines (with the same `cache`) is considered - /// invalid (even if it gives an appearance of working). State IDs returned - /// from _any_ prior call for different `cache` values are also always - /// invalid. - /// - /// The returned ID is always a valid ID when `current` refers to a valid - /// ID. - /// - /// These validity rules are not checked, even in debug mode. Callers are - /// required to uphold these rules themselves. - /// - /// Violating these state ID validity rules will not sacrifice memory - /// safety, but _may_ produce an incorrect result or a panic. - /// - /// # Panics - /// - /// If the given ID does not refer to a valid state, then this routine - /// may panic but it also may not panic and instead return an invalid or - /// incorrect ID. - /// - /// # Example - /// - /// This shows a simplistic example for walking a DFA for a given haystack, - /// and then finishing the search with the final EOI transition. - /// - /// ``` - /// use regex_automata::{hybrid::dfa::DFA, Input}; - /// - /// let dfa = DFA::new(r"[a-z]+r")?; - /// let mut cache = dfa.create_cache(); - /// let haystack = "bar".as_bytes(); - /// - /// // The start state is determined by inspecting the position and the - /// // initial bytes of the haystack. - /// let mut sid = dfa.start_state_forward( - /// &mut cache, &Input::new(haystack), - /// )?; - /// // Walk all the bytes in the haystack. - /// for &b in haystack { - /// sid = dfa.next_state(&mut cache, sid, b)?; - /// } - /// // Matches are always delayed by 1 byte, so we must explicitly walk - /// // the special "EOI" transition at the end of the search. Without this - /// // final transition, the assert below will fail since the DFA will not - /// // have entered a match state yet! - /// sid = dfa.next_eoi_state(&mut cache, sid)?; - /// assert!(sid.is_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn next_eoi_state( - &self, - cache: &mut Cache, - current: LazyStateID, - ) -> Result { - let eoi = self.classes.eoi().as_usize(); - let offset = current.as_usize_untagged() + eoi; - let sid = cache.trans[offset]; - if !sid.is_unknown() { - return Ok(sid); - } - let unit = self.classes.eoi(); - Lazy::new(self, cache).cache_next_state(current, unit) - } - - /// Return the ID of the start state for this lazy DFA for the given - /// starting configuration. - /// - /// Unlike typical DFA implementations, the start state for DFAs in this - /// crate is dependent on a few different factors: - /// - /// * The [`Anchored`] mode of the search. Unanchored, anchored and - /// anchored searches for a specific [`PatternID`] all use different start - /// states. - /// * Whether a "look-behind" byte exists. For example, the `^` anchor - /// matches if and only if there is no look-behind byte. - /// * The specific value of that look-behind byte. For example, a `(?m:^)` - /// assertion only matches when there is either no look-behind byte, or - /// when the look-behind byte is a line terminator. - /// - /// The [starting configuration](start::Config) provides the above - /// information. - /// - /// This routine can be used for either forward or reverse searches. - /// Although, as a convenience, if you have an [`Input`], then it - /// may be more succinct to use [`DFA::start_state_forward`] or - /// [`DFA::start_state_reverse`]. Note, for example, that the convenience - /// routines return a [`MatchError`] on failure where as this routine - /// returns a [`StartError`]. - /// - /// # Errors - /// - /// This may return a [`StartError`] if the search needs to give up when - /// determining the start state (for example, if it sees a "quit" byte - /// or if the cache has become inefficient). This can also return an - /// error if the given configuration contains an unsupported [`Anchored`] - /// configuration. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub fn start_state( - &self, - cache: &mut Cache, - config: &start::Config, - ) -> Result { - let lazy = LazyRef::new(self, cache); - let anchored = config.get_anchored(); - let start = match config.get_look_behind() { - None => Start::Text, - Some(byte) => { - if !self.quitset.is_empty() && self.quitset.contains(byte) { - return Err(StartError::quit(byte)); - } - self.start_map.get(byte) - } - }; - let start_id = lazy.get_cached_start_id(anchored, start)?; - if !start_id.is_unknown() { - return Ok(start_id); - } - Lazy::new(self, cache).cache_start_group(anchored, start) - } - - /// Return the ID of the start state for this lazy DFA when executing a - /// forward search. - /// - /// This is a convenience routine for calling [`DFA::start_state`] that - /// converts the given [`Input`] to a [start configuration](start::Config). - /// Additionally, if an error occurs, it is converted from a [`StartError`] - /// to a [`MatchError`] using the offset information in the given - /// [`Input`]. - /// - /// # Errors - /// - /// This may return a [`MatchError`] if the search needs to give up when - /// determining the start state (for example, if it sees a "quit" byte or - /// if the cache has become inefficient). This can also return an error if - /// the given `Input` contains an unsupported [`Anchored`] configuration. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub fn start_state_forward( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Result { - let config = start::Config::from_input_forward(input); - self.start_state(cache, &config).map_err(|err| match err { - StartError::Cache { .. } => MatchError::gave_up(input.start()), - StartError::Quit { byte } => { - let offset = input - .start() - .checked_sub(1) - .expect("no quit in start without look-behind"); - MatchError::quit(byte, offset) - } - StartError::UnsupportedAnchored { mode } => { - MatchError::unsupported_anchored(mode) - } - }) - } - - /// Return the ID of the start state for this lazy DFA when executing a - /// reverse search. - /// - /// This is a convenience routine for calling [`DFA::start_state`] that - /// converts the given [`Input`] to a [start configuration](start::Config). - /// Additionally, if an error occurs, it is converted from a [`StartError`] - /// to a [`MatchError`] using the offset information in the given - /// [`Input`]. - /// - /// # Errors - /// - /// This may return a [`MatchError`] if the search needs to give up when - /// determining the start state (for example, if it sees a "quit" byte or - /// if the cache has become inefficient). This can also return an error if - /// the given `Input` contains an unsupported [`Anchored`] configuration. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub fn start_state_reverse( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Result { - let config = start::Config::from_input_reverse(input); - self.start_state(cache, &config).map_err(|err| match err { - StartError::Cache { .. } => MatchError::gave_up(input.end()), - StartError::Quit { byte } => { - let offset = input.end(); - MatchError::quit(byte, offset) - } - StartError::UnsupportedAnchored { mode } => { - MatchError::unsupported_anchored(mode) - } - }) - } - - /// Returns the total number of patterns that match in this state. - /// - /// If the lazy DFA was compiled with one pattern, then this must - /// necessarily always return `1` for all match states. - /// - /// A lazy DFA guarantees that [`DFA::match_pattern`] can be called with - /// indices up to (but not including) the length returned by this routine - /// without panicking. - /// - /// # Panics - /// - /// If the given state is not a match state, then this may either panic - /// or return an incorrect result. - /// - /// # Example - /// - /// This example shows a simple instance of implementing overlapping - /// matches. In particular, it shows not only how to determine how many - /// patterns have matched in a particular state, but also how to access - /// which specific patterns have matched. - /// - /// Notice that we must use [`MatchKind::All`] when building the DFA. If we - /// used [`MatchKind::LeftmostFirst`] instead, then the DFA would not be - /// constructed in a way that supports overlapping matches. (It would only - /// report a single pattern that matches at any particular point in time.) - /// - /// Another thing to take note of is the patterns used and the order in - /// which the pattern IDs are reported. In the example below, pattern `3` - /// is yielded first. Why? Because it corresponds to the match that - /// appears first. Namely, the `@` symbol is part of `\S+` but not part - /// of any of the other patterns. Since the `\S+` pattern has a match that - /// starts to the left of any other pattern, its ID is returned before any - /// other. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{hybrid::dfa::DFA, Input, MatchKind}; - /// - /// let dfa = DFA::builder() - /// .configure(DFA::config().match_kind(MatchKind::All)) - /// .build_many(&[ - /// r"\w+", r"[a-z]+", r"[A-Z]+", r"\S+", - /// ])?; - /// let mut cache = dfa.create_cache(); - /// let haystack = "@bar".as_bytes(); - /// - /// // The start state is determined by inspecting the position and the - /// // initial bytes of the haystack. - /// let mut sid = dfa.start_state_forward( - /// &mut cache, &Input::new(haystack), - /// )?; - /// // Walk all the bytes in the haystack. - /// for &b in haystack { - /// sid = dfa.next_state(&mut cache, sid, b)?; - /// } - /// sid = dfa.next_eoi_state(&mut cache, sid)?; - /// - /// assert!(sid.is_match()); - /// assert_eq!(dfa.match_len(&mut cache, sid), 3); - /// // The following calls are guaranteed to not panic since `match_len` - /// // returned `3` above. - /// assert_eq!(dfa.match_pattern(&mut cache, sid, 0).as_usize(), 3); - /// assert_eq!(dfa.match_pattern(&mut cache, sid, 1).as_usize(), 0); - /// assert_eq!(dfa.match_pattern(&mut cache, sid, 2).as_usize(), 1); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn match_len(&self, cache: &Cache, id: LazyStateID) -> usize { - assert!(id.is_match()); - LazyRef::new(self, cache).get_cached_state(id).match_len() - } - - /// Returns the pattern ID corresponding to the given match index in the - /// given state. - /// - /// See [`DFA::match_len`] for an example of how to use this method - /// correctly. Note that if you know your lazy DFA is configured with a - /// single pattern, then this routine is never necessary since it will - /// always return a pattern ID of `0` for an index of `0` when `id` - /// corresponds to a match state. - /// - /// Typically, this routine is used when implementing an overlapping - /// search, as the example for `DFA::match_len` does. - /// - /// # Panics - /// - /// If the state ID is not a match state or if the match index is out - /// of bounds for the given state, then this routine may either panic - /// or produce an incorrect result. If the state ID is correct and the - /// match index is correct, then this routine always produces a valid - /// `PatternID`. - #[inline] - pub fn match_pattern( - &self, - cache: &Cache, - id: LazyStateID, - match_index: usize, - ) -> PatternID { - // This is an optimization for the very common case of a DFA with a - // single pattern. This conditional avoids a somewhat more costly path - // that finds the pattern ID from the corresponding `State`, which - // requires a bit of slicing/pointer-chasing. This optimization tends - // to only matter when matches are frequent. - if self.pattern_len() == 1 { - return PatternID::ZERO; - } - LazyRef::new(self, cache) - .get_cached_state(id) - .match_pattern(match_index) - } -} - -/// A cache represents a partially computed DFA. -/// -/// A cache is the key component that differentiates a classical DFA and a -/// hybrid NFA/DFA (also called a "lazy DFA"). Where a classical DFA builds a -/// complete transition table that can handle all possible inputs, a hybrid -/// NFA/DFA starts with an empty transition table and builds only the parts -/// required during search. The parts that are built are stored in a cache. For -/// this reason, a cache is a required parameter for nearly every operation on -/// a [`DFA`]. -/// -/// Caches can be created from their corresponding DFA via -/// [`DFA::create_cache`]. A cache can only be used with either the DFA that -/// created it, or the DFA that was most recently used to reset it with -/// [`Cache::reset`]. Using a cache with any other DFA may result in panics -/// or incorrect results. -#[derive(Clone, Debug)] -pub struct Cache { - // N.B. If you're looking to understand how determinization works, it - // is probably simpler to first grok src/dfa/determinize.rs, since that - // doesn't have the "laziness" component. - /// The transition table. - /// - /// Given a `current` LazyStateID and an `input` byte, the next state can - /// be computed via `trans[untagged(current) + equiv_class(input)]`. Notice - /// that no multiplication is used. That's because state identifiers are - /// "premultiplied." - /// - /// Note that the next state may be the "unknown" state. In this case, the - /// next state is not known and determinization for `current` on `input` - /// must be performed. - trans: Vec, - /// The starting states for this DFA. - /// - /// These are computed lazily. Initially, these are all set to "unknown" - /// lazy state IDs. - /// - /// When 'starts_for_each_pattern' is disabled (the default), then the size - /// of this is constrained to the possible starting configurations based - /// on the search parameters. (At time of writing, that's 4.) However, - /// when starting states for each pattern is enabled, then there are N - /// additional groups of starting states, where each group reflects the - /// different possible configurations and N is the number of patterns. - starts: Vec, - /// A sequence of NFA/DFA powerset states that have been computed for this - /// lazy DFA. This sequence is indexable by untagged LazyStateIDs. (Every - /// tagged LazyStateID can be used to index this sequence by converting it - /// to its untagged form.) - states: Vec, - /// A map from states to their corresponding IDs. This map may be accessed - /// via the raw byte representation of a state, which means that a `State` - /// does not need to be allocated to determine whether it already exists - /// in this map. Indeed, the existence of such a state is what determines - /// whether we allocate a new `State` or not. - /// - /// The higher level idea here is that we do just enough determinization - /// for a state to check whether we've already computed it. If we have, - /// then we can save a little (albeit not much) work. The real savings is - /// in memory usage. If we never checked for trivially duplicate states, - /// then our memory usage would explode to unreasonable levels. - states_to_id: StateMap, - /// Sparse sets used to track which NFA states have been visited during - /// various traversals. - sparses: SparseSets, - /// Scratch space for traversing the NFA graph. (We use space on the heap - /// instead of the call stack.) - stack: Vec, - /// Scratch space for building a NFA/DFA powerset state. This is used to - /// help amortize allocation since not every powerset state generated is - /// added to the cache. In particular, if it already exists in the cache, - /// then there is no need to allocate a new `State` for it. - scratch_state_builder: StateBuilderEmpty, - /// A simple abstraction for handling the saving of at most a single state - /// across a cache clearing. This is required for correctness. Namely, if - /// adding a new state after clearing the cache fails, then the caller - /// must retain the ability to continue using the state ID given. The - /// state corresponding to the state ID is what we preserve across cache - /// clearings. - state_saver: StateSaver, - /// The memory usage, in bytes, used by 'states' and 'states_to_id'. We - /// track this as new states are added since states use a variable amount - /// of heap. Tracking this as we add states makes it possible to compute - /// the total amount of memory used by the determinizer in constant time. - memory_usage_state: usize, - /// The number of times the cache has been cleared. When a minimum cache - /// clear count is set, then the cache will return an error instead of - /// clearing the cache if the count has been exceeded. - clear_count: usize, - /// The total number of bytes searched since the last time this cache was - /// cleared, not including the current search. - /// - /// This can be added to the length of the current search to get the true - /// total number of bytes searched. - /// - /// This is generally only non-zero when the - /// `Cache::search_{start,update,finish}` APIs are used to track search - /// progress. - bytes_searched: usize, - /// The progress of the current search. - /// - /// This is only non-`None` when callers utilize the `Cache::search_start`, - /// `Cache::search_update` and `Cache::search_finish` APIs. - /// - /// The purpose of recording search progress is to be able to make a - /// determination about the efficiency of the cache. Namely, by keeping - /// track of the - progress: Option, -} - -impl Cache { - /// Create a new cache for the given lazy DFA. - /// - /// The cache returned should only be used for searches for the given DFA. - /// If you want to reuse the cache for another DFA, then you must call - /// [`Cache::reset`] with that DFA. - pub fn new(dfa: &DFA) -> Cache { - let mut cache = Cache { - trans: alloc::vec![], - starts: alloc::vec![], - states: alloc::vec![], - states_to_id: StateMap::new(), - sparses: SparseSets::new(dfa.get_nfa().states().len()), - stack: alloc::vec![], - scratch_state_builder: StateBuilderEmpty::new(), - state_saver: StateSaver::none(), - memory_usage_state: 0, - clear_count: 0, - bytes_searched: 0, - progress: None, - }; - debug!("pre-init lazy DFA cache size: {}", cache.memory_usage()); - Lazy { dfa, cache: &mut cache }.init_cache(); - debug!("post-init lazy DFA cache size: {}", cache.memory_usage()); - cache - } - - /// Reset this cache such that it can be used for searching with the given - /// lazy DFA (and only that DFA). - /// - /// A cache reset permits reusing memory already allocated in this cache - /// with a different lazy DFA. - /// - /// Resetting a cache sets its "clear count" to 0. This is relevant if the - /// lazy DFA has been configured to "give up" after it has cleared the - /// cache a certain number of times. - /// - /// Any lazy state ID generated by the cache prior to resetting it is - /// invalid after the reset. - /// - /// # Example - /// - /// This shows how to re-purpose a cache for use with a different DFA. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; - /// - /// let dfa1 = DFA::new(r"\w")?; - /// let dfa2 = DFA::new(r"\W")?; - /// - /// let mut cache = dfa1.create_cache(); - /// assert_eq!( - /// Some(HalfMatch::must(0, 2)), - /// dfa1.try_search_fwd(&mut cache, &Input::new("Δ"))?, - /// ); - /// - /// // Using 'cache' with dfa2 is not allowed. It may result in panics or - /// // incorrect results. In order to re-purpose the cache, we must reset - /// // it with the DFA we'd like to use it with. - /// // - /// // Similarly, after this reset, using the cache with 'dfa1' is also not - /// // allowed. - /// cache.reset(&dfa2); - /// assert_eq!( - /// Some(HalfMatch::must(0, 3)), - /// dfa2.try_search_fwd(&mut cache, &Input::new("☃"))?, - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn reset(&mut self, dfa: &DFA) { - Lazy::new(dfa, self).reset_cache() - } - - /// Initializes a new search starting at the given position. - /// - /// If a previous search was unfinished, then it is finished automatically - /// and a new search is begun. - /// - /// Note that keeping track of search progress is _not necessary_ - /// for correct implementations of search using a lazy DFA. Keeping - /// track of search progress is only necessary if you want the - /// [`Config::minimum_bytes_per_state`] configuration knob to work. - #[inline] - pub fn search_start(&mut self, at: usize) { - // If a previous search wasn't marked as finished, then finish it - // now automatically. - if let Some(p) = self.progress.take() { - self.bytes_searched += p.len(); - } - self.progress = Some(SearchProgress { start: at, at }); - } - - /// Updates the current search to indicate that it has search to the - /// current position. - /// - /// No special care needs to be taken for reverse searches. Namely, the - /// position given may be _less than_ the starting position of the search. - /// - /// # Panics - /// - /// This panics if no search has been started by [`Cache::search_start`]. - #[inline] - pub fn search_update(&mut self, at: usize) { - let p = - self.progress.as_mut().expect("no in-progress search to update"); - p.at = at; - } - - /// Indicates that a search has finished at the given position. - /// - /// # Panics - /// - /// This panics if no search has been started by [`Cache::search_start`]. - #[inline] - pub fn search_finish(&mut self, at: usize) { - let mut p = - self.progress.take().expect("no in-progress search to finish"); - p.at = at; - self.bytes_searched += p.len(); - } - - /// Returns the total number of bytes that have been searched since this - /// cache was last cleared. - /// - /// This is useful for determining the efficiency of the cache. For - /// example, the lazy DFA uses this value in conjunction with the - /// [`Config::minimum_bytes_per_state`] knob to help determine whether it - /// should quit searching. - /// - /// This always returns `0` if search progress isn't being tracked. Note - /// that the lazy DFA search routines in this crate always track search - /// progress. - pub fn search_total_len(&self) -> usize { - self.bytes_searched + self.progress.as_ref().map_or(0, |p| p.len()) - } - - /// Returns the total number of times this cache has been cleared since it - /// was either created or last reset. - /// - /// This is useful for informational purposes or if you want to change - /// search strategies based on the number of times the cache has been - /// cleared. - pub fn clear_count(&self) -> usize { - self.clear_count - } - - /// Returns the heap memory usage, in bytes, of this cache. - /// - /// This does **not** include the stack size used up by this cache. To - /// compute that, use `std::mem::size_of::()`. - pub fn memory_usage(&self) -> usize { - const ID_SIZE: usize = size_of::(); - const STATE_SIZE: usize = size_of::(); - - // NOTE: If you make changes to the below, then - // 'minimum_cache_capacity' should be updated correspondingly. - - self.trans.len() * ID_SIZE - + self.starts.len() * ID_SIZE - + self.states.len() * STATE_SIZE - // Maps likely use more memory than this, but it's probably close. - + self.states_to_id.len() * (STATE_SIZE + ID_SIZE) - + self.sparses.memory_usage() - + self.stack.capacity() * ID_SIZE - + self.scratch_state_builder.capacity() - // Heap memory used by 'State' in both 'states' and 'states_to_id'. - + self.memory_usage_state - } -} - -/// Keeps track of the progress of the current search. -/// -/// This is updated via the `Cache::search_{start,update,finish}` APIs to -/// record how many bytes have been searched. This permits computing a -/// heuristic that represents the efficiency of a cache, and thus helps inform -/// whether the lazy DFA should give up or not. -#[derive(Clone, Debug)] -struct SearchProgress { - start: usize, - at: usize, -} - -impl SearchProgress { - /// Returns the length, in bytes, of this search so far. - /// - /// This automatically handles the case of a reverse search, where `at` - /// is likely to be less than `start`. - fn len(&self) -> usize { - if self.start <= self.at { - self.at - self.start - } else { - self.start - self.at - } - } -} - -/// A map from states to state identifiers. When using std, we use a standard -/// hashmap, since it's a bit faster for this use case. (Other maps, like -/// one's based on FNV, have not yet been benchmarked.) -/// -/// The main purpose of this map is to reuse states where possible. This won't -/// fully minimize the DFA, but it works well in a lot of cases. -#[cfg(feature = "std")] -type StateMap = std::collections::HashMap; -#[cfg(not(feature = "std"))] -type StateMap = alloc::collections::BTreeMap; - -/// A type that groups methods that require the base NFA/DFA and writable -/// access to the cache. -#[derive(Debug)] -struct Lazy<'i, 'c> { - dfa: &'i DFA, - cache: &'c mut Cache, -} - -impl<'i, 'c> Lazy<'i, 'c> { - /// Creates a new 'Lazy' wrapper for a DFA and its corresponding cache. - fn new(dfa: &'i DFA, cache: &'c mut Cache) -> Lazy<'i, 'c> { - Lazy { dfa, cache } - } - - /// Return an immutable view by downgrading a writable cache to a read-only - /// cache. - fn as_ref<'a>(&'a self) -> LazyRef<'i, 'a> { - LazyRef::new(self.dfa, self.cache) - } - - /// This is marked as 'inline(never)' to avoid bloating methods on 'DFA' - /// like 'next_state' and 'next_eoi_state' that are called in critical - /// areas. The idea is to let the optimizer focus on the other areas of - /// those methods as the hot path. - /// - /// Here's an example that justifies 'inline(never)' - /// - /// ```ignore - /// regex-cli find match hybrid \ - /// --cache-capacity 100000000 \ - /// -p '\pL{100}' - /// all-codepoints-utf8-100x - /// ``` - /// - /// Where 'all-codepoints-utf8-100x' is the UTF-8 encoding of every - /// codepoint, in sequence, repeated 100 times. - /// - /// With 'inline(never)' hyperfine reports 1.1s per run. With - /// 'inline(always)', hyperfine reports 1.23s. So that's a 10% improvement. - #[cold] - #[inline(never)] - fn cache_next_state( - &mut self, - mut current: LazyStateID, - unit: alphabet::Unit, - ) -> Result { - let stride2 = self.dfa.stride2(); - let empty_builder = self.get_state_builder(); - let builder = determinize::next( - self.dfa.get_nfa(), - self.dfa.get_config().get_match_kind(), - &mut self.cache.sparses, - &mut self.cache.stack, - &self.cache.states[current.as_usize_untagged() >> stride2], - unit, - empty_builder, - ); - // This is subtle, but if we *might* clear the cache, then we should - // try to save the current state so that we can re-map its ID after - // cache clearing. We *might* clear the cache when either the new - // state can't fit in the cache or when the number of transitions has - // reached the maximum. Even if either of these conditions is true, - // the cache might not be cleared if we can reuse an existing state. - // But we don't know that at this point. Moreover, we don't save the - // current state every time because it is costly. - // - // TODO: We should try to find a way to make this less subtle and error - // prone. ---AG - let save_state = !self.as_ref().state_builder_fits_in_cache(&builder) - || self.cache.trans.len() >= LazyStateID::MAX; - if save_state { - self.save_state(current); - } - let next = self.add_builder_state(builder, |sid| sid)?; - if save_state { - current = self.saved_state_id(); - } - // This is the payoff. The next time 'next_state' is called with this - // state and alphabet unit, it will find this transition and avoid - // having to re-determinize this transition. - self.set_transition(current, unit, next); - Ok(next) - } - - /// Compute and cache the starting state for the given pattern ID (if - /// present) and the starting configuration. - /// - /// This panics if a pattern ID is given and the DFA isn't configured to - /// build anchored start states for each pattern. - /// - /// This will never return an unknown lazy state ID. - /// - /// If caching this state would otherwise result in a cache that has been - /// cleared too many times, then an error is returned. - #[cold] - #[inline(never)] - fn cache_start_group( - &mut self, - anchored: Anchored, - start: Start, - ) -> Result { - let nfa_start_id = match anchored { - Anchored::No => self.dfa.get_nfa().start_unanchored(), - Anchored::Yes => self.dfa.get_nfa().start_anchored(), - Anchored::Pattern(pid) => { - if !self.dfa.get_config().get_starts_for_each_pattern() { - return Err(StartError::unsupported_anchored(anchored)); - } - match self.dfa.get_nfa().start_pattern(pid) { - None => return Ok(self.as_ref().dead_id()), - Some(sid) => sid, - } - } - }; - - let id = self - .cache_start_one(nfa_start_id, start) - .map_err(StartError::cache)?; - self.set_start_state(anchored, start, id); - Ok(id) - } - - /// Compute and cache the starting state for the given NFA state ID and the - /// starting configuration. The NFA state ID might be one of the following: - /// - /// 1) An unanchored start state to match any pattern. - /// 2) An anchored start state to match any pattern. - /// 3) An anchored start state for a particular pattern. - /// - /// This will never return an unknown lazy state ID. - /// - /// If caching this state would otherwise result in a cache that has been - /// cleared too many times, then an error is returned. - fn cache_start_one( - &mut self, - nfa_start_id: NFAStateID, - start: Start, - ) -> Result { - let mut builder_matches = self.get_state_builder().into_matches(); - determinize::set_lookbehind_from_start( - self.dfa.get_nfa(), - &start, - &mut builder_matches, - ); - self.cache.sparses.set1.clear(); - determinize::epsilon_closure( - self.dfa.get_nfa(), - nfa_start_id, - builder_matches.look_have(), - &mut self.cache.stack, - &mut self.cache.sparses.set1, - ); - let mut builder = builder_matches.into_nfa(); - determinize::add_nfa_states( - &self.dfa.get_nfa(), - &self.cache.sparses.set1, - &mut builder, - ); - let tag_starts = self.dfa.get_config().get_specialize_start_states(); - self.add_builder_state(builder, |id| { - if tag_starts { - id.to_start() - } else { - id - } - }) - } - - /// Either add the given builder state to this cache, or return an ID to an - /// equivalent state already in this cache. - /// - /// In the case where no equivalent state exists, the idmap function given - /// may be used to transform the identifier allocated. This is useful if - /// the caller needs to tag the ID with additional information. - /// - /// This will never return an unknown lazy state ID. - /// - /// If caching this state would otherwise result in a cache that has been - /// cleared too many times, then an error is returned. - fn add_builder_state( - &mut self, - builder: StateBuilderNFA, - idmap: impl Fn(LazyStateID) -> LazyStateID, - ) -> Result { - if let Some(&cached_id) = - self.cache.states_to_id.get(builder.as_bytes()) - { - // Since we have a cached state, put the constructed state's - // memory back into our scratch space, so that it can be reused. - self.put_state_builder(builder); - return Ok(cached_id); - } - let result = self.add_state(builder.to_state(), idmap); - self.put_state_builder(builder); - result - } - - /// Allocate a new state ID and add the given state to this cache. - /// - /// The idmap function given may be used to transform the identifier - /// allocated. This is useful if the caller needs to tag the ID with - /// additional information. - /// - /// This will never return an unknown lazy state ID. - /// - /// If caching this state would otherwise result in a cache that has been - /// cleared too many times, then an error is returned. - fn add_state( - &mut self, - state: State, - idmap: impl Fn(LazyStateID) -> LazyStateID, - ) -> Result { - if !self.as_ref().state_fits_in_cache(&state) { - self.try_clear_cache()?; - } - // It's important for this to come second, since the above may clear - // the cache. If we clear the cache after ID generation, then the ID - // is likely bunk since it would have been generated based on a larger - // transition table. - let mut id = idmap(self.next_state_id()?); - if state.is_match() { - id = id.to_match(); - } - // Add room in the transition table. Since this is a fresh state, all - // of its transitions are unknown. - self.cache.trans.extend( - iter::repeat(self.as_ref().unknown_id()).take(self.dfa.stride()), - ); - // When we add a sentinel state, we never want to set any quit - // transitions. Technically, this is harmless, since sentinel states - // have all of their transitions set to loop back to themselves. But - // when creating sentinel states before the quit sentinel state, - // this will try to call 'set_transition' on a state ID that doesn't - // actually exist yet, which isn't allowed. So we just skip doing so - // entirely. - if !self.dfa.quitset.is_empty() && !self.as_ref().is_sentinel(id) { - let quit_id = self.as_ref().quit_id(); - for b in self.dfa.quitset.iter() { - self.set_transition(id, alphabet::Unit::u8(b), quit_id); - } - } - self.cache.memory_usage_state += state.memory_usage(); - self.cache.states.push(state.clone()); - self.cache.states_to_id.insert(state, id); - Ok(id) - } - - /// Allocate a new state ID. - /// - /// This will never return an unknown lazy state ID. - /// - /// If caching this state would otherwise result in a cache that has been - /// cleared too many times, then an error is returned. - fn next_state_id(&mut self) -> Result { - let sid = match LazyStateID::new(self.cache.trans.len()) { - Ok(sid) => sid, - Err(_) => { - self.try_clear_cache()?; - // This has to pass since we check that ID capacity at - // construction time can fit at least MIN_STATES states. - LazyStateID::new(self.cache.trans.len()).unwrap() - } - }; - Ok(sid) - } - - /// Attempt to clear the cache used by this lazy DFA. - /// - /// If clearing the cache exceeds the minimum number of required cache - /// clearings, then this will return a cache error. In this case, - /// callers should bubble this up as the cache can't be used until it is - /// reset. Implementations of search should convert this error into a - /// [`MatchError::gave_up`]. - /// - /// If 'self.state_saver' is set to save a state, then this state is - /// persisted through cache clearing. Otherwise, the cache is returned to - /// its state after initialization with two exceptions: its clear count - /// is incremented and some of its memory likely has additional capacity. - /// That is, clearing a cache does _not_ release memory. - /// - /// Otherwise, any lazy state ID generated by the cache prior to resetting - /// it is invalid after the reset. - fn try_clear_cache(&mut self) -> Result<(), CacheError> { - let c = self.dfa.get_config(); - if let Some(min_count) = c.get_minimum_cache_clear_count() { - if self.cache.clear_count >= min_count { - if let Some(min_bytes_per) = c.get_minimum_bytes_per_state() { - let len = self.cache.search_total_len(); - let min_bytes = - min_bytes_per.saturating_mul(self.cache.states.len()); - // If we've searched 0 bytes then probably something has - // gone wrong and the lazy DFA search implementation isn't - // correctly updating the search progress state. - if len == 0 { - trace!( - "number of bytes searched is 0, but \ - a minimum bytes per state searched ({}) is \ - enabled, maybe Cache::search_update \ - is not being used?", - min_bytes_per, - ); - } - if len < min_bytes { - trace!( - "lazy DFA cache has been cleared {} times, \ - which exceeds the limit of {}, \ - AND its bytes searched per state is less \ - than the configured minimum of {}, \ - therefore lazy DFA is giving up \ - (bytes searched since cache clear = {}, \ - number of states = {})", - self.cache.clear_count, - min_count, - min_bytes_per, - len, - self.cache.states.len(), - ); - return Err(CacheError::bad_efficiency()); - } else { - trace!( - "lazy DFA cache has been cleared {} times, \ - which exceeds the limit of {}, \ - AND its bytes searched per state is greater \ - than the configured minimum of {}, \ - therefore lazy DFA is continuing! \ - (bytes searched since cache clear = {}, \ - number of states = {})", - self.cache.clear_count, - min_count, - min_bytes_per, - len, - self.cache.states.len(), - ); - } - } else { - trace!( - "lazy DFA cache has been cleared {} times, \ - which exceeds the limit of {}, \ - since there is no configured bytes per state \ - minimum, lazy DFA is giving up", - self.cache.clear_count, - min_count, - ); - return Err(CacheError::too_many_cache_clears()); - } - } - } - self.clear_cache(); - Ok(()) - } - - /// Clears _and_ resets the cache. Resetting the cache means that no - /// states are persisted and the clear count is reset to 0. No heap memory - /// is released. - /// - /// Note that the caller may reset a cache with a different DFA than what - /// it was created from. In which case, the cache can now be used with the - /// new DFA (and not the old DFA). - fn reset_cache(&mut self) { - self.cache.state_saver = StateSaver::none(); - self.clear_cache(); - // If a new DFA is used, it might have a different number of NFA - // states, so we need to make sure our sparse sets have the appropriate - // size. - self.cache.sparses.resize(self.dfa.get_nfa().states().len()); - self.cache.clear_count = 0; - self.cache.progress = None; - } - - /// Clear the cache used by this lazy DFA. - /// - /// If 'self.state_saver' is set to save a state, then this state is - /// persisted through cache clearing. Otherwise, the cache is returned to - /// its state after initialization with two exceptions: its clear count - /// is incremented and some of its memory likely has additional capacity. - /// That is, clearing a cache does _not_ release memory. - /// - /// Otherwise, any lazy state ID generated by the cache prior to resetting - /// it is invalid after the reset. - fn clear_cache(&mut self) { - self.cache.trans.clear(); - self.cache.starts.clear(); - self.cache.states.clear(); - self.cache.states_to_id.clear(); - self.cache.memory_usage_state = 0; - self.cache.clear_count += 1; - self.cache.bytes_searched = 0; - if let Some(ref mut progress) = self.cache.progress { - progress.start = progress.at; - } - trace!( - "lazy DFA cache has been cleared (count: {})", - self.cache.clear_count - ); - self.init_cache(); - // If the state we want to save is one of the sentinel - // (unknown/dead/quit) states, then 'init_cache' adds those back, and - // their identifier values remains invariant. So there's no need to add - // it again. (And indeed, doing so would be incorrect!) - if let Some((old_id, state)) = self.cache.state_saver.take_to_save() { - // If the state is one of the special sentinel states, then it is - // automatically added by cache initialization and its ID always - // remains the same. With that said, this should never occur since - // the sentinel states are all loop states back to themselves. So - // we should never be in a position where we're attempting to save - // a sentinel state since we never compute transitions out of a - // sentinel state. - assert!( - !self.as_ref().is_sentinel(old_id), - "cannot save sentinel state" - ); - let new_id = self - .add_state(state, |id| { - if old_id.is_start() { - // We don't need to consult the - // 'specialize_start_states' config knob here, because - // if it's disabled, old_id.is_start() will never - // return true. - id.to_start() - } else { - id - } - }) - // The unwrap here is OK because lazy DFA creation ensures that - // we have room in the cache to add MIN_STATES states. Since - // 'init_cache' above adds 3, this adds a 4th. - .expect("adding one state after cache clear must work"); - self.cache.state_saver = StateSaver::Saved(new_id); - } - } - - /// Initialize this cache from emptiness to a place where it can be used - /// for search. - /// - /// This is called both at cache creation time and after the cache has been - /// cleared. - /// - /// Primarily, this adds the three sentinel states and allocates some - /// initial memory. - fn init_cache(&mut self) { - // Why multiply by 2 here? Because we make room for both the unanchored - // and anchored start states. Unanchored is first and then anchored. - let mut starts_len = Start::len().checked_mul(2).unwrap(); - // ... but if we also want start states for every pattern, we make room - // for that too. - if self.dfa.get_config().get_starts_for_each_pattern() { - starts_len += Start::len() * self.dfa.pattern_len(); - } - self.cache - .starts - .extend(iter::repeat(self.as_ref().unknown_id()).take(starts_len)); - // This is the set of NFA states that corresponds to each of our three - // sentinel states: the empty set. - let dead = State::dead(); - // This sets up some states that we use as sentinels that are present - // in every DFA. While it would be technically possible to implement - // this DFA without explicitly putting these states in the transition - // table, this is convenient to do to make `next_state` correct for all - // valid state IDs without needing explicit conditionals to special - // case these sentinel states. - // - // All three of these states are "dead" states. That is, all of - // them transition only to themselves. So once you enter one of - // these states, it's impossible to leave them. Thus, any correct - // search routine must explicitly check for these state types. (Sans - // `unknown`, since that is only used internally to represent missing - // states.) - let unk_id = - self.add_state(dead.clone(), |id| id.to_unknown()).unwrap(); - let dead_id = self.add_state(dead.clone(), |id| id.to_dead()).unwrap(); - let quit_id = self.add_state(dead.clone(), |id| id.to_quit()).unwrap(); - assert_eq!(unk_id, self.as_ref().unknown_id()); - assert_eq!(dead_id, self.as_ref().dead_id()); - assert_eq!(quit_id, self.as_ref().quit_id()); - // The idea here is that if you start in an unknown/dead/quit state and - // try to transition on them, then you should end up where you started. - self.set_all_transitions(unk_id, unk_id); - self.set_all_transitions(dead_id, dead_id); - self.set_all_transitions(quit_id, quit_id); - // All of these states are technically equivalent from the FSM - // perspective, so putting all three of them in the cache isn't - // possible. (They are distinct merely because we use their - // identifiers as sentinels to mean something, as indicated by the - // names.) Moreover, we wouldn't want to do that. Unknown and quit - // states are special in that they are artificial constructions - // this implementation. But dead states are a natural part of - // determinization. When you reach a point in the NFA where you cannot - // go anywhere else, a dead state will naturally arise and we MUST - // reuse the canonical dead state that we've created here. Why? Because - // it is the state ID that tells the search routine whether a state is - // dead or not, and thus, whether to stop the search. Having a bunch of - // distinct dead states would be quite wasteful! - self.cache.states_to_id.insert(dead, dead_id); - } - - /// Save the state corresponding to the ID given such that the state - /// persists through a cache clearing. - /// - /// While the state may persist, the ID may not. In order to discover the - /// new state ID, one must call 'saved_state_id' after a cache clearing. - fn save_state(&mut self, id: LazyStateID) { - let state = self.as_ref().get_cached_state(id).clone(); - self.cache.state_saver = StateSaver::ToSave { id, state }; - } - - /// Returns the updated lazy state ID for a state that was persisted - /// through a cache clearing. - /// - /// It is only correct to call this routine when both a state has been - /// saved and the cache has just been cleared. Otherwise, this panics. - fn saved_state_id(&mut self) -> LazyStateID { - self.cache - .state_saver - .take_saved() - .expect("state saver does not have saved state ID") - } - - /// Set all transitions on the state 'from' to 'to'. - fn set_all_transitions(&mut self, from: LazyStateID, to: LazyStateID) { - for unit in self.dfa.classes.representatives(..) { - self.set_transition(from, unit, to); - } - } - - /// Set the transition on 'from' for 'unit' to 'to'. - /// - /// This panics if either 'from' or 'to' is invalid. - /// - /// All unit values are OK. - fn set_transition( - &mut self, - from: LazyStateID, - unit: alphabet::Unit, - to: LazyStateID, - ) { - assert!(self.as_ref().is_valid(from), "invalid 'from' id: {from:?}"); - assert!(self.as_ref().is_valid(to), "invalid 'to' id: {to:?}"); - let offset = - from.as_usize_untagged() + self.dfa.classes.get_by_unit(unit); - self.cache.trans[offset] = to; - } - - /// Set the start ID for the given pattern ID (if given) and starting - /// configuration to the ID given. - /// - /// This panics if 'id' is not valid or if a pattern ID is given and - /// 'starts_for_each_pattern' is not enabled. - fn set_start_state( - &mut self, - anchored: Anchored, - start: Start, - id: LazyStateID, - ) { - assert!(self.as_ref().is_valid(id)); - let start_index = start.as_usize(); - let index = match anchored { - Anchored::No => start_index, - Anchored::Yes => Start::len() + start_index, - Anchored::Pattern(pid) => { - assert!( - self.dfa.get_config().get_starts_for_each_pattern(), - "attempted to search for a specific pattern \ - without enabling starts_for_each_pattern", - ); - let pid = pid.as_usize(); - (2 * Start::len()) + (Start::len() * pid) + start_index - } - }; - self.cache.starts[index] = id; - } - - /// Returns a state builder from this DFA that might have existing - /// capacity. This helps avoid allocs in cases where a state is built that - /// turns out to already be cached. - /// - /// Callers must put the state builder back with 'put_state_builder', - /// otherwise the allocation reuse won't work. - fn get_state_builder(&mut self) -> StateBuilderEmpty { - core::mem::replace( - &mut self.cache.scratch_state_builder, - StateBuilderEmpty::new(), - ) - } - - /// Puts the given state builder back into this DFA for reuse. - /// - /// Note that building a 'State' from a builder always creates a new alloc, - /// so callers should always put the builder back. - fn put_state_builder(&mut self, builder: StateBuilderNFA) { - let _ = core::mem::replace( - &mut self.cache.scratch_state_builder, - builder.clear(), - ); - } -} - -/// A type that groups methods that require the base NFA/DFA and read-only -/// access to the cache. -#[derive(Debug)] -struct LazyRef<'i, 'c> { - dfa: &'i DFA, - cache: &'c Cache, -} - -impl<'i, 'c> LazyRef<'i, 'c> { - /// Creates a new 'Lazy' wrapper for a DFA and its corresponding cache. - fn new(dfa: &'i DFA, cache: &'c Cache) -> LazyRef<'i, 'c> { - LazyRef { dfa, cache } - } - - /// Return the ID of the start state for the given configuration. - /// - /// If the start state has not yet been computed, then this returns an - /// unknown lazy state ID. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn get_cached_start_id( - &self, - anchored: Anchored, - start: Start, - ) -> Result { - let start_index = start.as_usize(); - let index = match anchored { - Anchored::No => start_index, - Anchored::Yes => Start::len() + start_index, - Anchored::Pattern(pid) => { - if !self.dfa.get_config().get_starts_for_each_pattern() { - return Err(StartError::unsupported_anchored(anchored)); - } - if pid.as_usize() >= self.dfa.pattern_len() { - return Ok(self.dead_id()); - } - (2 * Start::len()) - + (Start::len() * pid.as_usize()) - + start_index - } - }; - Ok(self.cache.starts[index]) - } - - /// Return the cached NFA/DFA powerset state for the given ID. - /// - /// This panics if the given ID does not address a valid state. - fn get_cached_state(&self, sid: LazyStateID) -> &State { - let index = sid.as_usize_untagged() >> self.dfa.stride2(); - &self.cache.states[index] - } - - /// Returns true if and only if the given ID corresponds to a "sentinel" - /// state. - /// - /// A sentinel state is a state that signifies a special condition of - /// search, and where every transition maps back to itself. See LazyStateID - /// for more details. Note that start and match states are _not_ sentinels - /// since they may otherwise be real states with non-trivial transitions. - /// The purposes of sentinel states is purely to indicate something. Their - /// transitions are not meant to be followed. - fn is_sentinel(&self, id: LazyStateID) -> bool { - id == self.unknown_id() || id == self.dead_id() || id == self.quit_id() - } - - /// Returns the ID of the unknown state for this lazy DFA. - fn unknown_id(&self) -> LazyStateID { - // This unwrap is OK since 0 is always a valid state ID. - LazyStateID::new(0).unwrap().to_unknown() - } - - /// Returns the ID of the dead state for this lazy DFA. - fn dead_id(&self) -> LazyStateID { - // This unwrap is OK since the maximum value here is 1 * 512 = 512, - // which is <= 2047 (the maximum state ID on 16-bit systems). Where - // 512 is the worst case for our equivalence classes (every byte is a - // distinct class). - LazyStateID::new(1 << self.dfa.stride2()).unwrap().to_dead() - } - - /// Returns the ID of the quit state for this lazy DFA. - fn quit_id(&self) -> LazyStateID { - // This unwrap is OK since the maximum value here is 2 * 512 = 1024, - // which is <= 2047 (the maximum state ID on 16-bit systems). Where - // 512 is the worst case for our equivalence classes (every byte is a - // distinct class). - LazyStateID::new(2 << self.dfa.stride2()).unwrap().to_quit() - } - - /// Returns true if and only if the given ID is valid. - /// - /// An ID is valid if it is both a valid index into the transition table - /// and is a multiple of the DFA's stride. - fn is_valid(&self, id: LazyStateID) -> bool { - let id = id.as_usize_untagged(); - id < self.cache.trans.len() && id % self.dfa.stride() == 0 - } - - /// Returns true if adding the state given would fit in this cache. - fn state_fits_in_cache(&self, state: &State) -> bool { - let needed = self.cache.memory_usage() - + self.memory_usage_for_one_more_state(state.memory_usage()); - trace!( - "lazy DFA cache capacity state check: {:?} ?<=? {:?}", - needed, - self.dfa.cache_capacity - ); - needed <= self.dfa.cache_capacity - } - - /// Returns true if adding the state to be built by the given builder would - /// fit in this cache. - fn state_builder_fits_in_cache(&self, state: &StateBuilderNFA) -> bool { - let needed = self.cache.memory_usage() - + self.memory_usage_for_one_more_state(state.as_bytes().len()); - trace!( - "lazy DFA cache capacity state builder check: {:?} ?<=? {:?}", - needed, - self.dfa.cache_capacity - ); - needed <= self.dfa.cache_capacity - } - - /// Returns the additional memory usage, in bytes, required to add one more - /// state to this cache. The given size should be the heap size, in bytes, - /// that would be used by the new state being added. - fn memory_usage_for_one_more_state( - &self, - state_heap_size: usize, - ) -> usize { - const ID_SIZE: usize = size_of::(); - const STATE_SIZE: usize = size_of::(); - - self.dfa.stride() * ID_SIZE // additional space needed in trans table - + STATE_SIZE // space in cache.states - + (STATE_SIZE + ID_SIZE) // space in cache.states_to_id - + state_heap_size // heap memory used by state itself - } -} - -/// A simple type that encapsulates the saving of a state ID through a cache -/// clearing. -/// -/// A state ID can be marked for saving with ToSave, while a state ID can be -/// saved itself with Saved. -#[derive(Clone, Debug)] -enum StateSaver { - /// An empty state saver. In this case, no states (other than the special - /// sentinel states) are preserved after clearing the cache. - None, - /// An ID of a state (and the state itself) that should be preserved after - /// the lazy DFA's cache has been cleared. After clearing, the updated ID - /// is stored in 'Saved' since it may have changed. - ToSave { id: LazyStateID, state: State }, - /// An ID that of a state that has been persisted through a lazy DFA - /// cache clearing. The ID recorded here corresponds to an ID that was - /// once marked as ToSave. The IDs are likely not equivalent even though - /// the states they point to are. - Saved(LazyStateID), -} - -impl StateSaver { - /// Create an empty state saver. - fn none() -> StateSaver { - StateSaver::None - } - - /// Replace this state saver with an empty saver, and if this saver is a - /// request to save a state, return that request. - fn take_to_save(&mut self) -> Option<(LazyStateID, State)> { - match core::mem::replace(self, StateSaver::None) { - StateSaver::None | StateSaver::Saved(_) => None, - StateSaver::ToSave { id, state } => Some((id, state)), - } - } - - /// Replace this state saver with an empty saver, and if this saver is a - /// saved state (or a request to save a state), return that state's ID. - /// - /// The idea here is that a request to save a state isn't necessarily - /// honored because it might not be needed. e.g., Some higher level code - /// might request a state to be saved on the off chance that the cache gets - /// cleared when a new state is added at a lower level. But if that new - /// state is never added, then the cache is never cleared and the state and - /// its ID remain unchanged. - fn take_saved(&mut self) -> Option { - match core::mem::replace(self, StateSaver::None) { - StateSaver::None => None, - StateSaver::Saved(id) | StateSaver::ToSave { id, .. } => Some(id), - } - } -} - -/// The configuration used for building a lazy DFA. -/// -/// As a convenience, [`DFA::config`] is an alias for [`Config::new`]. The -/// advantage of the former is that it often lets you avoid importing the -/// `Config` type directly. -/// -/// A lazy DFA configuration is a simple data object that is typically used -/// with [`Builder::configure`]. -/// -/// The default configuration guarantees that a search will never return a -/// "gave up" or "quit" error, although it is possible for a search to fail -/// if [`Config::starts_for_each_pattern`] wasn't enabled (which it is not by -/// default) and an [`Anchored::Pattern`] mode is requested via [`Input`]. -#[derive(Clone, Debug, Default)] -pub struct Config { - // As with other configuration types in this crate, we put all our knobs - // in options so that we can distinguish between "default" and "not set." - // This makes it possible to easily combine multiple configurations - // without default values overwriting explicitly specified values. See the - // 'overwrite' method. - // - // For docs on the fields below, see the corresponding method setters. - match_kind: Option, - pre: Option>, - starts_for_each_pattern: Option, - byte_classes: Option, - unicode_word_boundary: Option, - quitset: Option, - specialize_start_states: Option, - cache_capacity: Option, - skip_cache_capacity_check: Option, - minimum_cache_clear_count: Option>, - minimum_bytes_per_state: Option>, -} - -impl Config { - /// Return a new default lazy DFA builder configuration. - pub fn new() -> Config { - Config::default() - } - - /// Set the desired match semantics. - /// - /// The default is [`MatchKind::LeftmostFirst`], which corresponds to the - /// match semantics of Perl-like regex engines. That is, when multiple - /// patterns would match at the same leftmost position, the pattern that - /// appears first in the concrete syntax is chosen. - /// - /// Currently, the only other kind of match semantics supported is - /// [`MatchKind::All`]. This corresponds to classical DFA construction - /// where all possible matches are added to the lazy DFA. - /// - /// Typically, `All` is used when one wants to execute an overlapping - /// search and `LeftmostFirst` otherwise. In particular, it rarely makes - /// sense to use `All` with the various "leftmost" find routines, since the - /// leftmost routines depend on the `LeftmostFirst` automata construction - /// strategy. Specifically, `LeftmostFirst` adds dead states to the - /// lazy DFA as a way to terminate the search and report a match. - /// `LeftmostFirst` also supports non-greedy matches using this strategy - /// where as `All` does not. - /// - /// # Example: overlapping search - /// - /// This example shows the typical use of `MatchKind::All`, which is to - /// report overlapping matches. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// hybrid::dfa::{DFA, OverlappingState}, - /// HalfMatch, Input, MatchKind, - /// }; - /// - /// let dfa = DFA::builder() - /// .configure(DFA::config().match_kind(MatchKind::All)) - /// .build_many(&[r"\w+$", r"\S+$"])?; - /// let mut cache = dfa.create_cache(); - /// let haystack = "@foo"; - /// let mut state = OverlappingState::start(); - /// - /// let expected = Some(HalfMatch::must(1, 4)); - /// dfa.try_search_overlapping_fwd( - /// &mut cache, &Input::new(haystack), &mut state, - /// )?; - /// assert_eq!(expected, state.get_match()); - /// - /// // The first pattern also matches at the same position, so re-running - /// // the search will yield another match. Notice also that the first - /// // pattern is returned after the second. This is because the second - /// // pattern begins its match before the first, is therefore an earlier - /// // match and is thus reported first. - /// let expected = Some(HalfMatch::must(0, 4)); - /// dfa.try_search_overlapping_fwd( - /// &mut cache, &Input::new(haystack), &mut state, - /// )?; - /// assert_eq!(expected, state.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: reverse automaton to find start of match - /// - /// Another example for using `MatchKind::All` is for constructing a - /// reverse automaton to find the start of a match. `All` semantics are - /// used for this in order to find the longest possible match, which - /// corresponds to the leftmost starting position. - /// - /// Note that if you need the starting position then - /// [`hybrid::regex::Regex`](crate::hybrid::regex::Regex) will handle this - /// for you, so it's usually not necessary to do this yourself. - /// - /// ``` - /// use regex_automata::{ - /// hybrid::dfa::DFA, - /// nfa::thompson::NFA, - /// Anchored, HalfMatch, Input, MatchKind, - /// }; - /// - /// let input = Input::new("123foobar456"); - /// let pattern = r"[a-z]+r"; - /// - /// let dfa_fwd = DFA::new(pattern)?; - /// let dfa_rev = DFA::builder() - /// .thompson(NFA::config().reverse(true)) - /// .configure(DFA::config().match_kind(MatchKind::All)) - /// .build(pattern)?; - /// let mut cache_fwd = dfa_fwd.create_cache(); - /// let mut cache_rev = dfa_rev.create_cache(); - /// - /// let expected_fwd = HalfMatch::must(0, 9); - /// let expected_rev = HalfMatch::must(0, 3); - /// let got_fwd = dfa_fwd.try_search_fwd(&mut cache_fwd, &input)?.unwrap(); - /// // Here we don't specify the pattern to search for since there's only - /// // one pattern and we're doing a leftmost search. But if this were an - /// // overlapping search, you'd need to specify the pattern that matched - /// // in the forward direction. (Otherwise, you might wind up finding the - /// // starting position of a match of some other pattern.) That in turn - /// // requires building the reverse automaton with starts_for_each_pattern - /// // enabled. - /// let input = input - /// .clone() - /// .range(..got_fwd.offset()) - /// .anchored(Anchored::Yes); - /// let got_rev = dfa_rev.try_search_rev(&mut cache_rev, &input)?.unwrap(); - /// assert_eq!(expected_fwd, got_fwd); - /// assert_eq!(expected_rev, got_rev); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn match_kind(mut self, kind: MatchKind) -> Config { - self.match_kind = Some(kind); - self - } - - /// Set a prefilter to be used whenever a start state is entered. - /// - /// A [`Prefilter`] in this context is meant to accelerate searches by - /// looking for literal prefixes that every match for the corresponding - /// pattern (or patterns) must start with. Once a prefilter produces a - /// match, the underlying search routine continues on to try and confirm - /// the match. - /// - /// Be warned that setting a prefilter does not guarantee that the search - /// will be faster. While it's usually a good bet, if the prefilter - /// produces a lot of false positive candidates (i.e., positions matched - /// by the prefilter but not by the regex), then the overall result can - /// be slower than if you had just executed the regex engine without any - /// prefilters. - /// - /// Note that unless [`Config::specialize_start_states`] has been - /// explicitly set, then setting this will also enable (when `pre` is - /// `Some`) or disable (when `pre` is `None`) start state specialization. - /// This occurs because without start state specialization, a prefilter - /// is likely to be less effective. And without a prefilter, start state - /// specialization is usually pointless. - /// - /// By default no prefilter is set. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{ - /// hybrid::dfa::DFA, - /// util::prefilter::Prefilter, - /// Input, HalfMatch, MatchKind, - /// }; - /// - /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "bar"]); - /// let re = DFA::builder() - /// .configure(DFA::config().prefilter(pre)) - /// .build(r"(foo|bar)[a-z]+")?; - /// let mut cache = re.create_cache(); - /// let input = Input::new("foo1 barfox bar"); - /// assert_eq!( - /// Some(HalfMatch::must(0, 11)), - /// re.try_search_fwd(&mut cache, &input)?, - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Be warned though that an incorrect prefilter can lead to incorrect - /// results! - /// - /// ``` - /// use regex_automata::{ - /// hybrid::dfa::DFA, - /// util::prefilter::Prefilter, - /// Input, HalfMatch, MatchKind, - /// }; - /// - /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "car"]); - /// let re = DFA::builder() - /// .configure(DFA::config().prefilter(pre)) - /// .build(r"(foo|bar)[a-z]+")?; - /// let mut cache = re.create_cache(); - /// let input = Input::new("foo1 barfox bar"); - /// assert_eq!( - /// // No match reported even though there clearly is one! - /// None, - /// re.try_search_fwd(&mut cache, &input)?, - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn prefilter(mut self, pre: Option) -> Config { - self.pre = Some(pre); - if self.specialize_start_states.is_none() { - self.specialize_start_states = - Some(self.get_prefilter().is_some()); - } - self - } - - /// Whether to compile a separate start state for each pattern in the - /// lazy DFA. - /// - /// When enabled, a separate **anchored** start state is added for each - /// pattern in the lazy DFA. When this start state is used, then the DFA - /// will only search for matches for the pattern specified, even if there - /// are other patterns in the DFA. - /// - /// The main downside of this option is that it can potentially increase - /// the size of the DFA and/or increase the time it takes to build the - /// DFA at search time. However, since this is configuration for a lazy - /// DFA, these states aren't actually built unless they're used. Enabling - /// this isn't necessarily free, however, as it may result in higher cache - /// usage. - /// - /// There are a few reasons one might want to enable this (it's disabled - /// by default): - /// - /// 1. When looking for the start of an overlapping match (using a reverse - /// DFA), doing it correctly requires starting the reverse search using the - /// starting state of the pattern that matched in the forward direction. - /// Indeed, when building a [`Regex`](crate::hybrid::regex::Regex), it - /// will automatically enable this option when building the reverse DFA - /// internally. - /// 2. When you want to use a DFA with multiple patterns to both search - /// for matches of any pattern or to search for anchored matches of one - /// particular pattern while using the same DFA. (Otherwise, you would need - /// to compile a new DFA for each pattern.) - /// - /// By default this is disabled. - /// - /// # Example - /// - /// This example shows how to use this option to permit the same lazy DFA - /// to run both general searches for any pattern and anchored searches for - /// a specific pattern. - /// - /// ``` - /// use regex_automata::{ - /// hybrid::dfa::DFA, - /// Anchored, HalfMatch, Input, PatternID, - /// }; - /// - /// let dfa = DFA::builder() - /// .configure(DFA::config().starts_for_each_pattern(true)) - /// .build_many(&[r"[a-z0-9]{6}", r"[a-z][a-z0-9]{5}"])?; - /// let mut cache = dfa.create_cache(); - /// let haystack = "bar foo123"; - /// - /// // Here's a normal unanchored search that looks for any pattern. - /// let expected = HalfMatch::must(0, 10); - /// let input = Input::new(haystack); - /// assert_eq!(Some(expected), dfa.try_search_fwd(&mut cache, &input)?); - /// // We can also do a normal anchored search for any pattern. Since it's - /// // an anchored search, we position the start of the search where we - /// // know the match will begin. - /// let expected = HalfMatch::must(0, 10); - /// let input = Input::new(haystack).range(4..); - /// assert_eq!(Some(expected), dfa.try_search_fwd(&mut cache, &input)?); - /// // Since we compiled anchored start states for each pattern, we can - /// // also look for matches of other patterns explicitly, even if a - /// // different pattern would have normally matched. - /// let expected = HalfMatch::must(1, 10); - /// let input = Input::new(haystack) - /// .range(4..) - /// .anchored(Anchored::Pattern(PatternID::must(1))); - /// assert_eq!(Some(expected), dfa.try_search_fwd(&mut cache, &input)?); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn starts_for_each_pattern(mut self, yes: bool) -> Config { - self.starts_for_each_pattern = Some(yes); - self - } - - /// Whether to attempt to shrink the size of the lazy DFA's alphabet or - /// not. - /// - /// This option is enabled by default and should never be disabled unless - /// one is debugging the lazy DFA. - /// - /// When enabled, the lazy DFA will use a map from all possible bytes - /// to their corresponding equivalence class. Each equivalence class - /// represents a set of bytes that does not discriminate between a match - /// and a non-match in the DFA. For example, the pattern `[ab]+` has at - /// least two equivalence classes: a set containing `a` and `b` and a set - /// containing every byte except for `a` and `b`. `a` and `b` are in the - /// same equivalence classes because they never discriminate between a - /// match and a non-match. - /// - /// The advantage of this map is that the size of the transition table - /// can be reduced drastically from `#states * 256 * sizeof(LazyStateID)` - /// to `#states * k * sizeof(LazyStateID)` where `k` is the number of - /// equivalence classes (rounded up to the nearest power of 2). As a - /// result, total space usage can decrease substantially. Moreover, since a - /// smaller alphabet is used, DFA compilation during search becomes faster - /// as well since it will potentially be able to reuse a single transition - /// for multiple bytes. - /// - /// **WARNING:** This is only useful for debugging lazy DFAs. Disabling - /// this does not yield any speed advantages. Namely, even when this is - /// disabled, a byte class map is still used while searching. The only - /// difference is that every byte will be forced into its own distinct - /// equivalence class. This is useful for debugging the actual generated - /// transitions because it lets one see the transitions defined on actual - /// bytes instead of the equivalence classes. - pub fn byte_classes(mut self, yes: bool) -> Config { - self.byte_classes = Some(yes); - self - } - - /// Heuristically enable Unicode word boundaries. - /// - /// When set, this will attempt to implement Unicode word boundaries as if - /// they were ASCII word boundaries. This only works when the search input - /// is ASCII only. If a non-ASCII byte is observed while searching, then a - /// [`MatchError::quit`] error is returned. - /// - /// A possible alternative to enabling this option is to simply use an - /// ASCII word boundary, e.g., via `(?-u:\b)`. The main reason to use this - /// option is if you absolutely need Unicode support. This option lets one - /// use a fast search implementation (a DFA) for some potentially very - /// common cases, while providing the option to fall back to some other - /// regex engine to handle the general case when an error is returned. - /// - /// If the pattern provided has no Unicode word boundary in it, then this - /// option has no effect. (That is, quitting on a non-ASCII byte only - /// occurs when this option is enabled _and_ a Unicode word boundary is - /// present in the pattern.) - /// - /// This is almost equivalent to setting all non-ASCII bytes to be quit - /// bytes. The only difference is that this will cause non-ASCII bytes to - /// be quit bytes _only_ when a Unicode word boundary is present in the - /// pattern. - /// - /// When enabling this option, callers _must_ be prepared to - /// handle a [`MatchError`] error during search. When using a - /// [`Regex`](crate::hybrid::regex::Regex), this corresponds to using the - /// `try_` suite of methods. Alternatively, if callers can guarantee that - /// their input is ASCII only, then a [`MatchError::quit`] error will never - /// be returned while searching. - /// - /// This is disabled by default. - /// - /// # Example - /// - /// This example shows how to heuristically enable Unicode word boundaries - /// in a pattern. It also shows what happens when a search comes across a - /// non-ASCII byte. - /// - /// ``` - /// use regex_automata::{ - /// hybrid::dfa::DFA, - /// HalfMatch, Input, MatchError, - /// }; - /// - /// let dfa = DFA::builder() - /// .configure(DFA::config().unicode_word_boundary(true)) - /// .build(r"\b[0-9]+\b")?; - /// let mut cache = dfa.create_cache(); - /// - /// // The match occurs before the search ever observes the snowman - /// // character, so no error occurs. - /// let haystack = "foo 123 ☃"; - /// let expected = Some(HalfMatch::must(0, 7)); - /// let got = dfa.try_search_fwd(&mut cache, &Input::new(haystack))?; - /// assert_eq!(expected, got); - /// - /// // Notice that this search fails, even though the snowman character - /// // occurs after the ending match offset. This is because search - /// // routines read one byte past the end of the search to account for - /// // look-around, and indeed, this is required here to determine whether - /// // the trailing \b matches. - /// let haystack = "foo 123 ☃"; - /// let expected = MatchError::quit(0xE2, 8); - /// let got = dfa.try_search_fwd(&mut cache, &Input::new(haystack)); - /// assert_eq!(Err(expected), got); - /// - /// // Another example is executing a search where the span of the haystack - /// // we specify is all ASCII, but there is non-ASCII just before it. This - /// // correctly also reports an error. - /// let input = Input::new("β123").range(2..); - /// let expected = MatchError::quit(0xB2, 1); - /// let got = dfa.try_search_fwd(&mut cache, &input); - /// assert_eq!(Err(expected), got); - /// - /// // And similarly for the trailing word boundary. - /// let input = Input::new("123β").range(..3); - /// let expected = MatchError::quit(0xCE, 3); - /// let got = dfa.try_search_fwd(&mut cache, &input); - /// assert_eq!(Err(expected), got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn unicode_word_boundary(mut self, yes: bool) -> Config { - // We have a separate option for this instead of just setting the - // appropriate quit bytes here because we don't want to set quit bytes - // for every regex. We only want to set them when the regex contains a - // Unicode word boundary. - self.unicode_word_boundary = Some(yes); - self - } - - /// Add a "quit" byte to the lazy DFA. - /// - /// When a quit byte is seen during search time, then search will return a - /// [`MatchError::quit`] error indicating the offset at which the search - /// stopped. - /// - /// A quit byte will always overrule any other aspects of a regex. For - /// example, if the `x` byte is added as a quit byte and the regex `\w` is - /// used, then observing `x` will cause the search to quit immediately - /// despite the fact that `x` is in the `\w` class. - /// - /// This mechanism is primarily useful for heuristically enabling certain - /// features like Unicode word boundaries in a DFA. Namely, if the input - /// to search is ASCII, then a Unicode word boundary can be implemented - /// via an ASCII word boundary with no change in semantics. Thus, a DFA - /// can attempt to match a Unicode word boundary but give up as soon as it - /// observes a non-ASCII byte. Indeed, if callers set all non-ASCII bytes - /// to be quit bytes, then Unicode word boundaries will be permitted when - /// building lazy DFAs. Of course, callers should enable - /// [`Config::unicode_word_boundary`] if they want this behavior instead. - /// (The advantage being that non-ASCII quit bytes will only be added if a - /// Unicode word boundary is in the pattern.) - /// - /// When enabling this option, callers _must_ be prepared to - /// handle a [`MatchError`] error during search. When using a - /// [`Regex`](crate::hybrid::regex::Regex), this corresponds to using the - /// `try_` suite of methods. - /// - /// By default, there are no quit bytes set. - /// - /// # Panics - /// - /// This panics if heuristic Unicode word boundaries are enabled and any - /// non-ASCII byte is removed from the set of quit bytes. Namely, enabling - /// Unicode word boundaries requires setting every non-ASCII byte to a quit - /// byte. So if the caller attempts to undo any of that, then this will - /// panic. - /// - /// # Example - /// - /// This example shows how to cause a search to terminate if it sees a - /// `\n` byte. This could be useful if, for example, you wanted to prevent - /// a user supplied pattern from matching across a line boundary. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{hybrid::dfa::DFA, MatchError, Input}; - /// - /// let dfa = DFA::builder() - /// .configure(DFA::config().quit(b'\n', true)) - /// .build(r"foo\p{any}+bar")?; - /// let mut cache = dfa.create_cache(); - /// - /// let haystack = "foo\nbar"; - /// // Normally this would produce a match, since \p{any} contains '\n'. - /// // But since we instructed the automaton to enter a quit state if a - /// // '\n' is observed, this produces a match error instead. - /// let expected = MatchError::quit(b'\n', 3); - /// let got = dfa.try_search_fwd( - /// &mut cache, - /// &Input::new(haystack), - /// ).unwrap_err(); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn quit(mut self, byte: u8, yes: bool) -> Config { - if self.get_unicode_word_boundary() && !byte.is_ascii() && !yes { - panic!( - "cannot set non-ASCII byte to be non-quit when \ - Unicode word boundaries are enabled" - ); - } - if self.quitset.is_none() { - self.quitset = Some(ByteSet::empty()); - } - if yes { - self.quitset.as_mut().unwrap().add(byte); - } else { - self.quitset.as_mut().unwrap().remove(byte); - } - self - } - - /// Enable specializing start states in the lazy DFA. - /// - /// When start states are specialized, an implementor of a search routine - /// using a lazy DFA can tell when the search has entered a starting state. - /// When start states aren't specialized, then it is impossible to know - /// whether the search has entered a start state. - /// - /// Ideally, this option wouldn't need to exist and we could always - /// specialize start states. The problem is that start states can be quite - /// active. This in turn means that an efficient search routine is likely - /// to ping-pong between a heavily optimized hot loop that handles most - /// states and to a less optimized specialized handling of start states. - /// This causes branches to get heavily mispredicted and overall can - /// materially decrease throughput. Therefore, specializing start states - /// should only be enabled when it is needed. - /// - /// Knowing whether a search is in a start state is typically useful when a - /// prefilter is active for the search. A prefilter is typically only run - /// when in a start state and a prefilter can greatly accelerate a search. - /// Therefore, the possible cost of specializing start states is worth it - /// in this case. Otherwise, if you have no prefilter, there is likely no - /// reason to specialize start states. - /// - /// This is disabled by default, but note that it is automatically - /// enabled (or disabled) if [`Config::prefilter`] is set. Namely, unless - /// `specialize_start_states` has already been set, [`Config::prefilter`] - /// will automatically enable or disable it based on whether a prefilter - /// is present or not, respectively. This is done because a prefilter's - /// effectiveness is rooted in being executed whenever the DFA is in a - /// start state, and that's only possible to do when they are specialized. - /// - /// Note that it is plausibly reasonable to _disable_ this option - /// explicitly while _enabling_ a prefilter. In that case, a prefilter - /// will still be run at the beginning of a search, but never again. This - /// in theory could strike a good balance if you're in a situation where a - /// prefilter is likely to produce many false positive candidates. - /// - /// # Example - /// - /// This example shows how to enable start state specialization and then - /// shows how to check whether a state is a start state or not. - /// - /// ``` - /// use regex_automata::{hybrid::dfa::DFA, MatchError, Input}; - /// - /// let dfa = DFA::builder() - /// .configure(DFA::config().specialize_start_states(true)) - /// .build(r"[a-z]+")?; - /// let mut cache = dfa.create_cache(); - /// - /// let haystack = "123 foobar 4567".as_bytes(); - /// let sid = dfa.start_state_forward(&mut cache, &Input::new(haystack))?; - /// // The ID returned by 'start_state_forward' will always be tagged as - /// // a start state when start state specialization is enabled. - /// assert!(sid.is_tagged()); - /// assert!(sid.is_start()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Compare the above with the default lazy DFA configuration where - /// start states are _not_ specialized. In this case, the start state - /// is not tagged and `sid.is_start()` returns false. - /// - /// ``` - /// use regex_automata::{hybrid::dfa::DFA, MatchError, Input}; - /// - /// let dfa = DFA::new(r"[a-z]+")?; - /// let mut cache = dfa.create_cache(); - /// - /// let haystack = "123 foobar 4567".as_bytes(); - /// let sid = dfa.start_state_forward(&mut cache, &Input::new(haystack))?; - /// // Start states are not tagged in the default configuration! - /// assert!(!sid.is_tagged()); - /// assert!(!sid.is_start()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn specialize_start_states(mut self, yes: bool) -> Config { - self.specialize_start_states = Some(yes); - self - } - - /// Sets the maximum amount of heap memory, in bytes, to allocate to the - /// cache for use during a lazy DFA search. If the lazy DFA would otherwise - /// use more heap memory, then, depending on other configuration knobs, - /// either stop the search and return an error or clear the cache and - /// continue the search. - /// - /// The default cache capacity is some "reasonable" number that will - /// accommodate most regular expressions. You may find that if you need - /// to build a large DFA then it may be necessary to increase the cache - /// capacity. - /// - /// Note that while building a lazy DFA will do a "minimum" check to ensure - /// the capacity is big enough, this is more or less about correctness. - /// If the cache is bigger than the minimum but still "too small," then the - /// lazy DFA could wind up spending a lot of time clearing the cache and - /// recomputing transitions, thus negating the performance benefits of a - /// lazy DFA. Thus, setting the cache capacity is mostly an experimental - /// endeavor. For most common patterns, however, the default should be - /// sufficient. - /// - /// For more details on how the lazy DFA's cache is used, see the - /// documentation for [`Cache`]. - /// - /// # Example - /// - /// This example shows what happens if the configured cache capacity is - /// too small. In such cases, one can override the cache capacity to make - /// it bigger. Alternatively, one might want to use less memory by setting - /// a smaller cache capacity. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; - /// - /// let pattern = r"\p{L}{1000}"; - /// - /// // The default cache capacity is likely too small to deal with regexes - /// // that are very large. Large repetitions of large Unicode character - /// // classes are a common way to make very large regexes. - /// let _ = DFA::new(pattern).unwrap_err(); - /// // Bump up the capacity to something bigger. - /// let dfa = DFA::builder() - /// .configure(DFA::config().cache_capacity(100 * (1<<20))) // 100 MB - /// .build(pattern)?; - /// let mut cache = dfa.create_cache(); - /// - /// let haystack = "ͰͲͶͿΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙ".repeat(50); - /// let expected = Some(HalfMatch::must(0, 2000)); - /// let got = dfa.try_search_fwd(&mut cache, &Input::new(&haystack))?; - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn cache_capacity(mut self, bytes: usize) -> Config { - self.cache_capacity = Some(bytes); - self - } - - /// Configures construction of a lazy DFA to use the minimum cache capacity - /// if the configured capacity is otherwise too small for the provided NFA. - /// - /// This is useful if you never want lazy DFA construction to fail because - /// of a capacity that is too small. - /// - /// In general, this option is typically not a good idea. In particular, - /// while a minimum cache capacity does permit the lazy DFA to function - /// where it otherwise couldn't, it's plausible that it may not function - /// well if it's constantly running out of room. In that case, the speed - /// advantages of the lazy DFA may be negated. On the other hand, the - /// "minimum" cache capacity computed may not be completely accurate and - /// could actually be bigger than what is really necessary. Therefore, it - /// is plausible that using the minimum cache capacity could still result - /// in very good performance. - /// - /// This is disabled by default. - /// - /// # Example - /// - /// This example shows what happens if the configured cache capacity is - /// too small. In such cases, one could override the capacity explicitly. - /// An alternative, demonstrated here, let's us force construction to use - /// the minimum cache capacity if the configured capacity is otherwise - /// too small. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{hybrid::dfa::DFA, HalfMatch, Input}; - /// - /// let pattern = r"\p{L}{1000}"; - /// - /// // The default cache capacity is likely too small to deal with regexes - /// // that are very large. Large repetitions of large Unicode character - /// // classes are a common way to make very large regexes. - /// let _ = DFA::new(pattern).unwrap_err(); - /// // Configure construction such it automatically selects the minimum - /// // cache capacity if it would otherwise be too small. - /// let dfa = DFA::builder() - /// .configure(DFA::config().skip_cache_capacity_check(true)) - /// .build(pattern)?; - /// let mut cache = dfa.create_cache(); - /// - /// let haystack = "ͰͲͶͿΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙ".repeat(50); - /// let expected = Some(HalfMatch::must(0, 2000)); - /// let got = dfa.try_search_fwd(&mut cache, &Input::new(&haystack))?; - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn skip_cache_capacity_check(mut self, yes: bool) -> Config { - self.skip_cache_capacity_check = Some(yes); - self - } - - /// Configure a lazy DFA search to quit after a certain number of cache - /// clearings. - /// - /// When a minimum is set, then a lazy DFA search will *possibly* "give - /// up" after the minimum number of cache clearings has occurred. This is - /// typically useful in scenarios where callers want to detect whether the - /// lazy DFA search is "efficient" or not. If the cache is cleared too many - /// times, this is a good indicator that it is not efficient, and thus, the - /// caller may wish to use some other regex engine. - /// - /// Note that the number of times a cache is cleared is a property of - /// the cache itself. Thus, if a cache is used in a subsequent search - /// with a similarly configured lazy DFA, then it could cause the - /// search to "give up" if the cache needed to be cleared, depending - /// on its internal count and configured minimum. The cache clear - /// count can only be reset to `0` via [`DFA::reset_cache`] (or - /// [`Regex::reset_cache`](crate::hybrid::regex::Regex::reset_cache) if - /// you're using the `Regex` API). - /// - /// By default, no minimum is configured. Thus, a lazy DFA search will - /// never give up due to cache clearings. If you do set this option, you - /// might consider also setting [`Config::minimum_bytes_per_state`] in - /// order for the lazy DFA to take efficiency into account before giving - /// up. - /// - /// # Example - /// - /// This example uses a somewhat pathological configuration to demonstrate - /// the _possible_ behavior of cache clearing and how it might result - /// in a search that returns an error. - /// - /// It is important to note that the precise mechanics of how and when - /// a cache gets cleared is an implementation detail. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{hybrid::dfa::DFA, Input, MatchError, MatchErrorKind}; - /// - /// // This is a carefully chosen regex. The idea is to pick one - /// // that requires some decent number of states (hence the bounded - /// // repetition). But we specifically choose to create a class with an - /// // ASCII letter and a non-ASCII letter so that we can check that no new - /// // states are created once the cache is full. Namely, if we fill up the - /// // cache on a haystack of 'a's, then in order to match one 'β', a new - /// // state will need to be created since a 'β' is encoded with multiple - /// // bytes. Since there's no room for this state, the search should quit - /// // at the very first position. - /// let pattern = r"[aβ]{100}"; - /// let dfa = DFA::builder() - /// .configure( - /// // Configure it so that we have the minimum cache capacity - /// // possible. And that if any clearings occur, the search quits. - /// DFA::config() - /// .skip_cache_capacity_check(true) - /// .cache_capacity(0) - /// .minimum_cache_clear_count(Some(0)), - /// ) - /// .build(pattern)?; - /// let mut cache = dfa.create_cache(); - /// - /// // Our search will give up before reaching the end! - /// let haystack = "a".repeat(101).into_bytes(); - /// let result = dfa.try_search_fwd(&mut cache, &Input::new(&haystack)); - /// assert!(matches!( - /// *result.unwrap_err().kind(), - /// MatchErrorKind::GaveUp { .. }, - /// )); - /// - /// // Now that we know the cache is full, if we search a haystack that we - /// // know will require creating at least one new state, it should not - /// // be able to make much progress. - /// let haystack = "β".repeat(101).into_bytes(); - /// let result = dfa.try_search_fwd(&mut cache, &Input::new(&haystack)); - /// assert!(matches!( - /// *result.unwrap_err().kind(), - /// MatchErrorKind::GaveUp { .. }, - /// )); - /// - /// // If we reset the cache, then we should be able to create more states - /// // and make more progress with searching for betas. - /// cache.reset(&dfa); - /// let haystack = "β".repeat(101).into_bytes(); - /// let result = dfa.try_search_fwd(&mut cache, &Input::new(&haystack)); - /// assert!(matches!( - /// *result.unwrap_err().kind(), - /// MatchErrorKind::GaveUp { .. }, - /// )); - /// - /// // ... switching back to ASCII still makes progress since it just needs - /// // to set transitions on existing states! - /// let haystack = "a".repeat(101).into_bytes(); - /// let result = dfa.try_search_fwd(&mut cache, &Input::new(&haystack)); - /// assert!(matches!( - /// *result.unwrap_err().kind(), - /// MatchErrorKind::GaveUp { .. }, - /// )); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn minimum_cache_clear_count(mut self, min: Option) -> Config { - self.minimum_cache_clear_count = Some(min); - self - } - - /// Configure a lazy DFA search to quit only when its efficiency drops - /// below the given minimum. - /// - /// The efficiency of the cache is determined by the number of DFA states - /// compiled per byte of haystack searched. For example, if the efficiency - /// is 2, then it means the lazy DFA is creating a new DFA state after - /// searching approximately 2 bytes in a haystack. Generally speaking, 2 - /// is quite bad and it's likely that even a slower regex engine like the - /// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) would be faster. - /// - /// This has no effect if [`Config::minimum_cache_clear_count`] is not set. - /// Namely, this option only kicks in when the cache has been cleared more - /// than the minimum number. If no minimum is set, then the cache is simply - /// cleared whenever it fills up and it is impossible for the lazy DFA to - /// quit due to ineffective use of the cache. - /// - /// In general, if one is setting [`Config::minimum_cache_clear_count`], - /// then one should probably also set this knob as well. The reason is - /// that the absolute number of times the cache is cleared is generally - /// not a great predictor of efficiency. For example, if a new DFA state - /// is created for every 1,000 bytes searched, then it wouldn't be hard - /// for the cache to get cleared more than `N` times and then cause the - /// lazy DFA to quit. But a new DFA state every 1,000 bytes is likely quite - /// good from a performance perspective, and it's likely that the lazy - /// DFA should continue searching, even if it requires clearing the cache - /// occasionally. - /// - /// Finally, note that if you're implementing your own lazy DFA search - /// routine and also want this efficiency check to work correctly, then - /// you'll need to use the following routines to record search progress: - /// - /// * Call [`Cache::search_start`] at the beginning of every search. - /// * Call [`Cache::search_update`] whenever [`DFA::next_state`] is - /// called. - /// * Call [`Cache::search_finish`] before completing a search. (It is - /// not strictly necessary to call this when an error is returned, as - /// `Cache::search_start` will automatically finish the previous search - /// for you. But calling it where possible before returning helps improve - /// the accuracy of how many bytes have actually been searched.) - pub fn minimum_bytes_per_state(mut self, min: Option) -> Config { - self.minimum_bytes_per_state = Some(min); - self - } - - /// Returns the match semantics set in this configuration. - pub fn get_match_kind(&self) -> MatchKind { - self.match_kind.unwrap_or(MatchKind::LeftmostFirst) - } - - /// Returns the prefilter set in this configuration, if one at all. - pub fn get_prefilter(&self) -> Option<&Prefilter> { - self.pre.as_ref().unwrap_or(&None).as_ref() - } - - /// Returns whether this configuration has enabled anchored starting states - /// for every pattern in the DFA. - pub fn get_starts_for_each_pattern(&self) -> bool { - self.starts_for_each_pattern.unwrap_or(false) - } - - /// Returns whether this configuration has enabled byte classes or not. - /// This is typically a debugging oriented option, as disabling it confers - /// no speed benefit. - pub fn get_byte_classes(&self) -> bool { - self.byte_classes.unwrap_or(true) - } - - /// Returns whether this configuration has enabled heuristic Unicode word - /// boundary support. When enabled, it is possible for a search to return - /// an error. - pub fn get_unicode_word_boundary(&self) -> bool { - self.unicode_word_boundary.unwrap_or(false) - } - - /// Returns whether this configuration will instruct the lazy DFA to enter - /// a quit state whenever the given byte is seen during a search. When at - /// least one byte has this enabled, it is possible for a search to return - /// an error. - pub fn get_quit(&self, byte: u8) -> bool { - self.quitset.map_or(false, |q| q.contains(byte)) - } - - /// Returns whether this configuration will instruct the lazy DFA to - /// "specialize" start states. When enabled, the lazy DFA will tag start - /// states so that search routines using the lazy DFA can detect when - /// it's in a start state and do some kind of optimization (like run a - /// prefilter). - pub fn get_specialize_start_states(&self) -> bool { - self.specialize_start_states.unwrap_or(false) - } - - /// Returns the cache capacity set on this configuration. - pub fn get_cache_capacity(&self) -> usize { - self.cache_capacity.unwrap_or(2 * (1 << 20)) - } - - /// Returns whether the cache capacity check should be skipped. - pub fn get_skip_cache_capacity_check(&self) -> bool { - self.skip_cache_capacity_check.unwrap_or(false) - } - - /// Returns, if set, the minimum number of times the cache must be cleared - /// before a lazy DFA search can give up. When no minimum is set, then a - /// search will never quit and will always clear the cache whenever it - /// fills up. - pub fn get_minimum_cache_clear_count(&self) -> Option { - self.minimum_cache_clear_count.unwrap_or(None) - } - - /// Returns, if set, the minimum number of bytes per state that need to be - /// processed in order for the lazy DFA to keep going. If the minimum falls - /// below this number (and the cache has been cleared a minimum number of - /// times), then the lazy DFA will return a "gave up" error. - pub fn get_minimum_bytes_per_state(&self) -> Option { - self.minimum_bytes_per_state.unwrap_or(None) - } - - /// Returns the minimum lazy DFA cache capacity required for the given NFA. - /// - /// The cache capacity required for a particular NFA may change without - /// notice. Callers should not rely on it being stable. - /// - /// This is useful for informational purposes, but can also be useful for - /// other reasons. For example, if one wants to check the minimum cache - /// capacity themselves or if one wants to set the capacity based on the - /// minimum. - /// - /// This may return an error if this configuration does not support all of - /// the instructions used in the given NFA. For example, if the NFA has a - /// Unicode word boundary but this configuration does not enable heuristic - /// support for Unicode word boundaries. - pub fn get_minimum_cache_capacity( - &self, - nfa: &thompson::NFA, - ) -> Result { - let quitset = self.quit_set_from_nfa(nfa)?; - let classes = self.byte_classes_from_nfa(nfa, &quitset); - let starts = self.get_starts_for_each_pattern(); - Ok(minimum_cache_capacity(nfa, &classes, starts)) - } - - /// Returns the byte class map used during search from the given NFA. - /// - /// If byte classes are disabled on this configuration, then a map is - /// returned that puts each byte in its own equivalent class. - fn byte_classes_from_nfa( - &self, - nfa: &thompson::NFA, - quit: &ByteSet, - ) -> ByteClasses { - if !self.get_byte_classes() { - // The lazy DFA will always use the equivalence class map, but - // enabling this option is useful for debugging. Namely, this will - // cause all transitions to be defined over their actual bytes - // instead of an opaque equivalence class identifier. The former is - // much easier to grok as a human. - ByteClasses::singletons() - } else { - let mut set = nfa.byte_class_set().clone(); - // It is important to distinguish any "quit" bytes from all other - // bytes. Otherwise, a non-quit byte may end up in the same class - // as a quit byte, and thus cause the DFA stop when it shouldn't. - // - // Test case: - // - // regex-cli find match hybrid --unicode-word-boundary \ - // -p '^#' -p '\b10\.55\.182\.100\b' -y @conn.json.1000x.log - if !quit.is_empty() { - set.add_set(&quit); - } - set.byte_classes() - } - } - - /// Return the quit set for this configuration and the given NFA. - /// - /// This may return an error if the NFA is incompatible with this - /// configuration's quit set. For example, if the NFA has a Unicode word - /// boundary and the quit set doesn't include non-ASCII bytes. - fn quit_set_from_nfa( - &self, - nfa: &thompson::NFA, - ) -> Result { - let mut quit = self.quitset.unwrap_or(ByteSet::empty()); - if nfa.look_set_any().contains_word_unicode() { - if self.get_unicode_word_boundary() { - for b in 0x80..=0xFF { - quit.add(b); - } - } else { - // If heuristic support for Unicode word boundaries wasn't - // enabled, then we can still check if our quit set is correct. - // If the caller set their quit bytes in a way that causes the - // DFA to quit on at least all non-ASCII bytes, then that's all - // we need for heuristic support to work. - if !quit.contains_range(0x80, 0xFF) { - return Err( - BuildError::unsupported_dfa_word_boundary_unicode(), - ); - } - } - } - Ok(quit) - } - - /// Overwrite the default configuration such that the options in `o` are - /// always used. If an option in `o` is not set, then the corresponding - /// option in `self` is used. If it's not set in `self` either, then it - /// remains not set. - fn overwrite(&self, o: Config) -> Config { - Config { - match_kind: o.match_kind.or(self.match_kind), - pre: o.pre.or_else(|| self.pre.clone()), - starts_for_each_pattern: o - .starts_for_each_pattern - .or(self.starts_for_each_pattern), - byte_classes: o.byte_classes.or(self.byte_classes), - unicode_word_boundary: o - .unicode_word_boundary - .or(self.unicode_word_boundary), - quitset: o.quitset.or(self.quitset), - specialize_start_states: o - .specialize_start_states - .or(self.specialize_start_states), - cache_capacity: o.cache_capacity.or(self.cache_capacity), - skip_cache_capacity_check: o - .skip_cache_capacity_check - .or(self.skip_cache_capacity_check), - minimum_cache_clear_count: o - .minimum_cache_clear_count - .or(self.minimum_cache_clear_count), - minimum_bytes_per_state: o - .minimum_bytes_per_state - .or(self.minimum_bytes_per_state), - } - } -} - -/// A builder for constructing a lazy deterministic finite automaton from -/// regular expressions. -/// -/// As a convenience, [`DFA::builder`] is an alias for [`Builder::new`]. The -/// advantage of the former is that it often lets you avoid importing the -/// `Builder` type directly. -/// -/// This builder provides two main things: -/// -/// 1. It provides a few different `build` routines for actually constructing -/// a DFA from different kinds of inputs. The most convenient is -/// [`Builder::build`], which builds a DFA directly from a pattern string. The -/// most flexible is [`Builder::build_from_nfa`], which builds a DFA straight -/// from an NFA. -/// 2. The builder permits configuring a number of things. -/// [`Builder::configure`] is used with [`Config`] to configure aspects of -/// the DFA and the construction process itself. [`Builder::syntax`] and -/// [`Builder::thompson`] permit configuring the regex parser and Thompson NFA -/// construction, respectively. The syntax and thompson configurations only -/// apply when building from a pattern string. -/// -/// This builder always constructs a *single* lazy DFA. As such, this builder -/// can only be used to construct regexes that either detect the presence -/// of a match or find the end location of a match. A single DFA cannot -/// produce both the start and end of a match. For that information, use a -/// [`Regex`](crate::hybrid::regex::Regex), which can be similarly configured -/// using [`regex::Builder`](crate::hybrid::regex::Builder). The main reason -/// to use a DFA directly is if the end location of a match is enough for your -/// use case. Namely, a `Regex` will construct two lazy DFAs instead of one, -/// since a second reverse DFA is needed to find the start of a match. -/// -/// # Example -/// -/// This example shows how to build a lazy DFA that uses a tiny cache capacity -/// and completely disables Unicode. That is: -/// -/// * Things such as `\w`, `.` and `\b` are no longer Unicode-aware. `\w` -/// and `\b` are ASCII-only while `.` matches any byte except for `\n` -/// (instead of any UTF-8 encoding of a Unicode scalar value except for -/// `\n`). Things that are Unicode only, such as `\pL`, are not allowed. -/// * The pattern itself is permitted to match invalid UTF-8. For example, -/// things like `[^a]` that match any byte except for `a` are permitted. -/// -/// ``` -/// use regex_automata::{ -/// hybrid::dfa::DFA, -/// nfa::thompson, -/// util::syntax, -/// HalfMatch, Input, -/// }; -/// -/// let dfa = DFA::builder() -/// .configure(DFA::config().cache_capacity(5_000)) -/// .thompson(thompson::Config::new().utf8(false)) -/// .syntax(syntax::Config::new().unicode(false).utf8(false)) -/// .build(r"foo[^b]ar.*")?; -/// let mut cache = dfa.create_cache(); -/// -/// let haystack = b"\xFEfoo\xFFar\xE2\x98\xFF\n"; -/// let expected = Some(HalfMatch::must(0, 10)); -/// let got = dfa.try_search_fwd(&mut cache, &Input::new(haystack))?; -/// assert_eq!(expected, got); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct Builder { - config: Config, - #[cfg(feature = "syntax")] - thompson: thompson::Compiler, -} - -impl Builder { - /// Create a new lazy DFA builder with the default configuration. - pub fn new() -> Builder { - Builder { - config: Config::default(), - #[cfg(feature = "syntax")] - thompson: thompson::Compiler::new(), - } - } - - /// Build a lazy DFA from the given pattern. - /// - /// If there was a problem parsing or compiling the pattern, then an error - /// is returned. - #[cfg(feature = "syntax")] - pub fn build(&self, pattern: &str) -> Result { - self.build_many(&[pattern]) - } - - /// Build a lazy DFA from the given patterns. - /// - /// When matches are returned, the pattern ID corresponds to the index of - /// the pattern in the slice given. - #[cfg(feature = "syntax")] - pub fn build_many>( - &self, - patterns: &[P], - ) -> Result { - let nfa = self - .thompson - .clone() - // We can always forcefully disable captures because DFAs do not - // support them. - .configure( - thompson::Config::new() - .which_captures(thompson::WhichCaptures::None), - ) - .build_many(patterns) - .map_err(BuildError::nfa)?; - self.build_from_nfa(nfa) - } - - /// Build a DFA from the given NFA. - /// - /// Note that this requires owning a `thompson::NFA`. While this may force - /// you to clone the NFA, such a clone is not a deep clone. Namely, NFAs - /// are defined internally to support shared ownership such that cloning is - /// very cheap. - /// - /// # Example - /// - /// This example shows how to build a lazy DFA if you already have an NFA - /// in hand. - /// - /// ``` - /// use regex_automata::{ - /// hybrid::dfa::DFA, - /// nfa::thompson, - /// HalfMatch, Input, - /// }; - /// - /// let haystack = "foo123bar"; - /// - /// // This shows how to set non-default options for building an NFA. - /// let nfa = thompson::Compiler::new() - /// .configure(thompson::Config::new().shrink(true)) - /// .build(r"[0-9]+")?; - /// let dfa = DFA::builder().build_from_nfa(nfa)?; - /// let mut cache = dfa.create_cache(); - /// let expected = Some(HalfMatch::must(0, 6)); - /// let got = dfa.try_search_fwd(&mut cache, &Input::new(haystack))?; - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn build_from_nfa( - &self, - nfa: thompson::NFA, - ) -> Result { - let quitset = self.config.quit_set_from_nfa(&nfa)?; - let classes = self.config.byte_classes_from_nfa(&nfa, &quitset); - // Check that we can fit at least a few states into our cache, - // otherwise it's pretty senseless to use the lazy DFA. This does have - // a possible failure mode though. This assumes the maximum size of a - // state in powerset space (so, the total number of NFA states), which - // may never actually materialize, and could be quite a bit larger - // than the actual biggest state. If this turns out to be a problem, - // we could expose a knob that disables this check. But if so, we have - // to be careful not to panic in other areas of the code (the cache - // clearing and init code) that tend to assume some minimum useful - // cache capacity. - let min_cache = minimum_cache_capacity( - &nfa, - &classes, - self.config.get_starts_for_each_pattern(), - ); - let mut cache_capacity = self.config.get_cache_capacity(); - if cache_capacity < min_cache { - // When the caller has asked us to skip the cache capacity check, - // then we simply force the cache capacity to its minimum amount - // and mush on. - if self.config.get_skip_cache_capacity_check() { - debug!( - "given capacity ({cache_capacity}) is too small, \ - since skip_cache_capacity_check is enabled, \ - setting cache capacity to minimum ({min_cache})", - ); - cache_capacity = min_cache; - } else { - return Err(BuildError::insufficient_cache_capacity( - min_cache, - cache_capacity, - )); - } - } - // We also need to check that we can fit at least some small number - // of states in our state ID space. This is unlikely to trigger in - // >=32-bit systems, but 16-bit systems have a pretty small state ID - // space since a number of bits are used up as sentinels. - if let Err(err) = minimum_lazy_state_id(&classes) { - return Err(BuildError::insufficient_state_id_capacity(err)); - } - let stride2 = classes.stride2(); - let start_map = StartByteMap::new(nfa.look_matcher()); - Ok(DFA { - config: self.config.clone(), - nfa, - stride2, - start_map, - classes, - quitset, - cache_capacity, - }) - } - - /// Apply the given lazy DFA configuration options to this builder. - pub fn configure(&mut self, config: Config) -> &mut Builder { - self.config = self.config.overwrite(config); - self - } - - /// Set the syntax configuration for this builder using - /// [`syntax::Config`](crate::util::syntax::Config). - /// - /// This permits setting things like case insensitivity, Unicode and multi - /// line mode. - /// - /// These settings only apply when constructing a lazy DFA directly from a - /// pattern. - #[cfg(feature = "syntax")] - pub fn syntax( - &mut self, - config: crate::util::syntax::Config, - ) -> &mut Builder { - self.thompson.syntax(config); - self - } - - /// Set the Thompson NFA configuration for this builder using - /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). - /// - /// This permits setting things like whether the DFA should match the regex - /// in reverse or if additional time should be spent shrinking the size of - /// the NFA. - /// - /// These settings only apply when constructing a DFA directly from a - /// pattern. - #[cfg(feature = "syntax")] - pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { - self.thompson.configure(config); - self - } -} - -/// Represents the current state of an overlapping search. -/// -/// This is used for overlapping searches since they need to know something -/// about the previous search. For example, when multiple patterns match at the -/// same position, this state tracks the last reported pattern so that the next -/// search knows whether to report another matching pattern or continue with -/// the search at the next position. Additionally, it also tracks which state -/// the last search call terminated in. -/// -/// This type provides little introspection capabilities. The only thing a -/// caller can do is construct it and pass it around to permit search routines -/// to use it to track state, and also ask whether a match has been found. -/// -/// Callers should always provide a fresh state constructed via -/// [`OverlappingState::start`] when starting a new search. Reusing state from -/// a previous search may result in incorrect results. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct OverlappingState { - /// The match reported by the most recent overlapping search to use this - /// state. - /// - /// If a search does not find any matches, then it is expected to clear - /// this value. - pub(crate) mat: Option, - /// The state ID of the state at which the search was in when the call - /// terminated. When this is a match state, `last_match` must be set to a - /// non-None value. - /// - /// A `None` value indicates the start state of the corresponding - /// automaton. We cannot use the actual ID, since any one automaton may - /// have many start states, and which one is in use depends on several - /// search-time factors. - pub(crate) id: Option, - /// The position of the search. - /// - /// When `id` is None (i.e., we are starting a search), this is set to - /// the beginning of the search as given by the caller regardless of its - /// current value. Subsequent calls to an overlapping search pick up at - /// this offset. - pub(crate) at: usize, - /// The index into the matching patterns of the next match to report if the - /// current state is a match state. Note that this may be 1 greater than - /// the total number of matches to report for the current match state. (In - /// which case, no more matches should be reported at the current position - /// and the search should advance to the next position.) - pub(crate) next_match_index: Option, - /// This is set to true when a reverse overlapping search has entered its - /// EOI transitions. - /// - /// This isn't used in a forward search because it knows to stop once the - /// position exceeds the end of the search range. In a reverse search, - /// since we use unsigned offsets, we don't "know" once we've gone past - /// `0`. So the only way to detect it is with this extra flag. The reverse - /// overlapping search knows to terminate specifically after it has - /// reported all matches after following the EOI transition. - pub(crate) rev_eoi: bool, -} - -impl OverlappingState { - /// Create a new overlapping state that begins at the start state of any - /// automaton. - pub fn start() -> OverlappingState { - OverlappingState { - mat: None, - id: None, - at: 0, - next_match_index: None, - rev_eoi: false, - } - } - - /// Return the match result of the most recent search to execute with this - /// state. - /// - /// A searches will clear this result automatically, such that if no - /// match is found, this will correctly report `None`. - pub fn get_match(&self) -> Option { - self.mat - } -} - -/// Runs the given overlapping `search` function (forwards or backwards) until -/// a match is found whose offset does not split a codepoint. -/// -/// This is *not* always correct to call. It should only be called when the -/// underlying NFA has UTF-8 mode enabled *and* it can produce zero-width -/// matches. Calling this when both of those things aren't true might result -/// in legitimate matches getting skipped. -#[cold] -#[inline(never)] -fn skip_empty_utf8_splits_overlapping( - input: &Input<'_>, - state: &mut OverlappingState, - mut search: F, -) -> Result<(), MatchError> -where - F: FnMut(&Input<'_>, &mut OverlappingState) -> Result<(), MatchError>, -{ - // Note that this routine works for forwards and reverse searches - // even though there's no code here to handle those cases. That's - // because overlapping searches drive themselves to completion via - // `OverlappingState`. So all we have to do is push it until no matches are - // found. - - let mut hm = match state.get_match() { - None => return Ok(()), - Some(hm) => hm, - }; - if input.get_anchored().is_anchored() { - if !input.is_char_boundary(hm.offset()) { - state.mat = None; - } - return Ok(()); - } - while !input.is_char_boundary(hm.offset()) { - search(input, state)?; - hm = match state.get_match() { - None => return Ok(()), - Some(hm) => hm, - }; - } - Ok(()) -} - -/// Based on the minimum number of states required for a useful lazy DFA cache, -/// this returns the minimum lazy state ID that must be representable. -/// -/// It's not likely for this to have any impact 32-bit systems (or higher), but -/// on 16-bit systems, the lazy state ID space is quite constrained and thus -/// may be insufficient if our MIN_STATES value is (for some reason) too high. -fn minimum_lazy_state_id( - classes: &ByteClasses, -) -> Result { - let stride = 1 << classes.stride2(); - let min_state_index = MIN_STATES.checked_sub(1).unwrap(); - LazyStateID::new(min_state_index * stride) -} - -/// Based on the minimum number of states required for a useful lazy DFA cache, -/// this returns a heuristic minimum number of bytes of heap space required. -/// -/// This is a "heuristic" because the minimum it returns is likely bigger than -/// the true minimum. Namely, it assumes that each powerset NFA/DFA state uses -/// the maximum number of NFA states (all of them). This is likely bigger -/// than what is required in practice. Computing the true minimum effectively -/// requires determinization, which is probably too much work to do for a -/// simple check like this. -/// -/// One of the issues with this approach IMO is that it requires that this -/// be in sync with the calculation above for computing how much heap memory -/// the DFA cache uses. If we get it wrong, it's possible for example for the -/// minimum to be smaller than the computed heap memory, and thus, it may be -/// the case that we can't add the required minimum number of states. That in -/// turn will make lazy DFA panic because we assume that we can add at least a -/// minimum number of states. -/// -/// Another approach would be to always allow the minimum number of states to -/// be added to the lazy DFA cache, even if it exceeds the configured cache -/// limit. This does mean that the limit isn't really a limit in all cases, -/// which is unfortunate. But it does at least guarantee that the lazy DFA can -/// always make progress, even if it is slow. (This approach is very similar to -/// enabling the 'skip_cache_capacity_check' config knob, except it wouldn't -/// rely on cache size calculation. Instead, it would just always permit a -/// minimum number of states to be added.) -fn minimum_cache_capacity( - nfa: &thompson::NFA, - classes: &ByteClasses, - starts_for_each_pattern: bool, -) -> usize { - const ID_SIZE: usize = size_of::(); - const STATE_SIZE: usize = size_of::(); - - let stride = 1 << classes.stride2(); - let states_len = nfa.states().len(); - let sparses = 2 * states_len * NFAStateID::SIZE; - let trans = MIN_STATES * stride * ID_SIZE; - - let mut starts = Start::len() * ID_SIZE; - if starts_for_each_pattern { - starts += (Start::len() * nfa.pattern_len()) * ID_SIZE; - } - - // The min number of states HAS to be at least 4: we have 3 sentinel states - // and then we need space for one more when we save a state after clearing - // the cache. We also need space for one more, otherwise we get stuck in a - // loop where we try to add a 5th state, which gets rejected, which clears - // the cache, which adds back a saved state (4th total state) which then - // tries to add the 5th state again. - assert!(MIN_STATES >= 5, "minimum number of states has to be at least 5"); - // The minimum number of non-sentinel states. We consider this separately - // because sentinel states are much smaller in that they contain no NFA - // states. Given our aggressive calculation here, it's worth being more - // precise with the number of states we need. - let non_sentinel = MIN_STATES.checked_sub(SENTINEL_STATES).unwrap(); - - // Every `State` has 5 bytes for flags, 4 bytes (max) for the number of - // patterns, followed by 32-bit encodings of patterns and then delta - // varint encodings of NFA state IDs. We use the worst case (which isn't - // technically possible) of 5 bytes for each NFA state ID. - // - // HOWEVER, three of the states needed by a lazy DFA are just the sentinel - // unknown, dead and quit states. Those states have a known size and it is - // small. - let dead_state_size = State::dead().memory_usage(); - let max_state_size = 5 + 4 + (nfa.pattern_len() * 4) + (states_len * 5); - let states = (SENTINEL_STATES * (STATE_SIZE + dead_state_size)) - + (non_sentinel * (STATE_SIZE + max_state_size)); - // NOTE: We don't double count heap memory used by State for this map since - // we use reference counting to avoid doubling memory usage. (This tends to - // be where most memory is allocated in the cache.) - let states_to_sid = (MIN_STATES * STATE_SIZE) + (MIN_STATES * ID_SIZE); - let stack = states_len * NFAStateID::SIZE; - let scratch_state_builder = max_state_size; - - trans - + starts - + states - + states_to_sid - + sparses - + stack - + scratch_state_builder -} - -#[cfg(all(test, feature = "syntax"))] -mod tests { - use super::*; - - // Tests that we handle heuristic Unicode word boundary support in reverse - // DFAs in the specific case of contextual searches. - // - // I wrote this test when I discovered a bug in how heuristic word - // boundaries were handled. Namely, that the starting state selection - // didn't consider the DFA's quit byte set when looking at the byte - // immediately before the start of the search (or immediately after the - // end of the search in the case of a reverse search). As a result, it was - // possible for '\bfoo\b' to match 'β123' because the trailing \xB2 byte - // in the 'β' codepoint would be treated as a non-word character. But of - // course, this search should trigger the DFA to quit, since there is a - // non-ASCII byte in consideration. - // - // Thus, I fixed 'start_state_{forward,reverse}' to check the quit byte set - // if it wasn't empty. The forward case is tested in the doc test for the - // Config::unicode_word_boundary API. We test the reverse case here, which - // is sufficiently niche that it doesn't really belong in a doc test. - #[test] - fn heuristic_unicode_reverse() { - let dfa = DFA::builder() - .configure(DFA::config().unicode_word_boundary(true)) - .thompson(thompson::Config::new().reverse(true)) - .build(r"\b[0-9]+\b") - .unwrap(); - let mut cache = dfa.create_cache(); - - let input = Input::new("β123").range(2..); - let expected = MatchError::quit(0xB2, 1); - let got = dfa.try_search_rev(&mut cache, &input); - assert_eq!(Err(expected), got); - - let input = Input::new("123β").range(..3); - let expected = MatchError::quit(0xCE, 3); - let got = dfa.try_search_rev(&mut cache, &input); - assert_eq!(Err(expected), got); - } -} diff --git a/vendor/regex-automata/src/hybrid/error.rs b/vendor/regex-automata/src/hybrid/error.rs deleted file mode 100644 index 93e58dd54f22b6..00000000000000 --- a/vendor/regex-automata/src/hybrid/error.rs +++ /dev/null @@ -1,241 +0,0 @@ -use crate::{hybrid::id::LazyStateIDError, nfa, util::search::Anchored}; - -/// An error that occurs when initial construction of a lazy DFA fails. -/// -/// A build error can occur when insufficient cache capacity is configured or -/// if something about the NFA is unsupported. (For example, if one attempts -/// to build a lazy DFA without heuristic Unicode support but with an NFA that -/// contains a Unicode word boundary.) -/// -/// This error does not provide many introspection capabilities. There are -/// generally only two things you can do with it: -/// -/// * Obtain a human readable message via its `std::fmt::Display` impl. -/// * Access an underlying -/// [`nfa::thompson::BuildError`](crate::nfa::thompson::BuildError) -/// type from its `source` method via the `std::error::Error` trait. This error -/// only occurs when using convenience routines for building a lazy DFA -/// directly from a pattern string. -/// -/// When the `std` feature is enabled, this implements the `std::error::Error` -/// trait. -#[derive(Clone, Debug)] -pub struct BuildError { - kind: BuildErrorKind, -} - -#[derive(Clone, Debug)] -enum BuildErrorKind { - NFA(nfa::thompson::BuildError), - InsufficientCacheCapacity { minimum: usize, given: usize }, - InsufficientStateIDCapacity { err: LazyStateIDError }, - Unsupported(&'static str), -} - -impl BuildError { - pub(crate) fn nfa(err: nfa::thompson::BuildError) -> BuildError { - BuildError { kind: BuildErrorKind::NFA(err) } - } - - pub(crate) fn insufficient_cache_capacity( - minimum: usize, - given: usize, - ) -> BuildError { - BuildError { - kind: BuildErrorKind::InsufficientCacheCapacity { minimum, given }, - } - } - - pub(crate) fn insufficient_state_id_capacity( - err: LazyStateIDError, - ) -> BuildError { - BuildError { - kind: BuildErrorKind::InsufficientStateIDCapacity { err }, - } - } - - pub(crate) fn unsupported_dfa_word_boundary_unicode() -> BuildError { - let msg = "cannot build lazy DFAs for regexes with Unicode word \ - boundaries; switch to ASCII word boundaries, or \ - heuristically enable Unicode word boundaries or use a \ - different regex engine"; - BuildError { kind: BuildErrorKind::Unsupported(msg) } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for BuildError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self.kind { - BuildErrorKind::NFA(ref err) => Some(err), - _ => None, - } - } -} - -impl core::fmt::Display for BuildError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self.kind { - BuildErrorKind::NFA(_) => write!(f, "error building NFA"), - BuildErrorKind::InsufficientCacheCapacity { minimum, given } => { - write!( - f, - "given cache capacity ({given}) is smaller than \ - minimum required ({minimum})", - ) - } - BuildErrorKind::InsufficientStateIDCapacity { ref err } => { - err.fmt(f) - } - BuildErrorKind::Unsupported(ref msg) => { - write!(f, "unsupported regex feature for DFAs: {msg}") - } - } - } -} - -/// An error that can occur when computing the start state for a search. -/// -/// Computing a start state can fail for a few reasons, either -/// based on incorrect configuration or even based on whether -/// the look-behind byte triggers a quit state. Typically -/// one does not need to handle this error if you're using -/// [`DFA::start_state_forward`](crate::hybrid::dfa::DFA::start_state_forward) -/// (or its reverse counterpart), as that routine automatically converts -/// `StartError` to a [`MatchError`](crate::MatchError) for you. -/// -/// This error may be returned by the -/// [`DFA::start_state`](crate::hybrid::dfa::DFA::start_state) routine. -/// -/// This error implements the `std::error::Error` trait when the `std` feature -/// is enabled. -/// -/// This error is marked as non-exhaustive. New variants may be added in a -/// semver compatible release. -#[non_exhaustive] -#[derive(Clone, Debug)] -pub enum StartError { - /// An error that occurs when cache inefficiency has dropped below the - /// configured heuristic thresholds. - Cache { - /// The underlying cache error that occurred. - err: CacheError, - }, - /// An error that occurs when a starting configuration's look-behind byte - /// is in this DFA's quit set. - Quit { - /// The quit byte that was found. - byte: u8, - }, - /// An error that occurs when the caller requests an anchored mode that - /// isn't supported by the DFA. - UnsupportedAnchored { - /// The anchored mode given that is unsupported. - mode: Anchored, - }, -} - -impl StartError { - pub(crate) fn cache(err: CacheError) -> StartError { - StartError::Cache { err } - } - - pub(crate) fn quit(byte: u8) -> StartError { - StartError::Quit { byte } - } - - pub(crate) fn unsupported_anchored(mode: Anchored) -> StartError { - StartError::UnsupportedAnchored { mode } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for StartError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match *self { - StartError::Cache { ref err } => Some(err), - _ => None, - } - } -} - -impl core::fmt::Display for StartError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match *self { - StartError::Cache { .. } => write!( - f, - "error computing start state because of cache inefficiency" - ), - StartError::Quit { byte } => write!( - f, - "error computing start state because the look-behind byte \ - {:?} triggered a quit state", - crate::util::escape::DebugByte(byte), - ), - StartError::UnsupportedAnchored { mode: Anchored::Yes } => { - write!( - f, - "error computing start state because \ - anchored searches are not supported or enabled" - ) - } - StartError::UnsupportedAnchored { mode: Anchored::No } => { - write!( - f, - "error computing start state because \ - unanchored searches are not supported or enabled" - ) - } - StartError::UnsupportedAnchored { - mode: Anchored::Pattern(pid), - } => { - write!( - f, - "error computing start state because \ - anchored searches for a specific pattern ({}) \ - are not supported or enabled", - pid.as_usize(), - ) - } - } - } -} - -/// An error that occurs when cache usage has become inefficient. -/// -/// One of the weaknesses of a lazy DFA is that it may need to clear its -/// cache repeatedly if it's not big enough. If this happens too much, then it -/// can slow searching down significantly. A mitigation to this is to use -/// heuristics to detect whether the cache is being used efficiently or not. -/// If not, then a lazy DFA can return a `CacheError`. -/// -/// The default configuration of a lazy DFA in this crate is -/// set such that a `CacheError` will never occur. Instead, -/// callers must opt into this behavior with settings like -/// [`dfa::Config::minimum_cache_clear_count`](crate::hybrid::dfa::Config::minimum_cache_clear_count) -/// and -/// [`dfa::Config::minimum_bytes_per_state`](crate::hybrid::dfa::Config::minimum_bytes_per_state). -/// -/// When the `std` feature is enabled, this implements the `std::error::Error` -/// trait. -#[derive(Clone, Debug)] -pub struct CacheError(()); - -impl CacheError { - pub(crate) fn too_many_cache_clears() -> CacheError { - CacheError(()) - } - - pub(crate) fn bad_efficiency() -> CacheError { - CacheError(()) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for CacheError {} - -impl core::fmt::Display for CacheError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "lazy DFA cache has been cleared too many times") - } -} diff --git a/vendor/regex-automata/src/hybrid/id.rs b/vendor/regex-automata/src/hybrid/id.rs deleted file mode 100644 index 65d8528e71734a..00000000000000 --- a/vendor/regex-automata/src/hybrid/id.rs +++ /dev/null @@ -1,354 +0,0 @@ -/// A state identifier specifically tailored for lazy DFAs. -/// -/// A lazy state ID logically represents a pointer to a DFA state. In practice, -/// by limiting the number of DFA states it can address, it reserves some -/// bits of its representation to encode some additional information. That -/// additional information is called a "tag." That tag is used to record -/// whether the state it points to is an unknown, dead, quit, start or match -/// state. -/// -/// When implementing a low level search routine with a lazy DFA, it is -/// necessary to query the type of the current state to know what to do: -/// -/// * **Unknown** - The state has not yet been computed. The -/// parameters used to get this state ID must be re-passed to -/// [`DFA::next_state`](crate::hybrid::dfa::DFA::next_state), which will never -/// return an unknown state ID. -/// * **Dead** - A dead state only has transitions to itself. It indicates that -/// the search cannot do anything else and should stop with whatever result it -/// has. -/// * **Quit** - A quit state indicates that the automaton could not answer -/// whether a match exists or not. Correct search implementations must return a -/// [`MatchError::quit`](crate::MatchError::quit) when a DFA enters a quit -/// state. -/// * **Start** - A start state is a state in which a search can begin. -/// Lazy DFAs usually have more than one start state. Branching on -/// this isn't required for correctness, but a common optimization is -/// to run a prefilter when a search enters a start state. Note that -/// start states are *not* tagged automatically, and one must enable the -/// [`Config::specialize_start_states`](crate::hybrid::dfa::Config::specialize_start_states) -/// setting for start states to be tagged. The reason for this is -/// that a DFA search loop is usually written to execute a prefilter once it -/// enters a start state. But if there is no prefilter, this handling can be -/// quite disastrous as the DFA may ping-pong between the special handling code -/// and a possible optimized hot path for handling untagged states. When start -/// states aren't specialized, then they are untagged and remain in the hot -/// path. -/// * **Match** - A match state indicates that a match has been found. -/// Depending on the semantics of your search implementation, it may either -/// continue until the end of the haystack or a dead state, or it might quit -/// and return the match immediately. -/// -/// As an optimization, the [`is_tagged`](LazyStateID::is_tagged) predicate -/// can be used to determine if a tag exists at all. This is useful to avoid -/// branching on all of the above types for every byte searched. -/// -/// # Example -/// -/// This example shows how `LazyStateID` can be used to implement a correct -/// search routine with minimal branching. In particular, this search routine -/// implements "leftmost" matching, which means that it doesn't immediately -/// stop once a match is found. Instead, it continues until it reaches a dead -/// state. -/// -/// Notice also how a correct search implementation deals with -/// [`CacheError`](crate::hybrid::CacheError)s returned by some of -/// the lazy DFA routines. When a `CacheError` occurs, it returns -/// [`MatchError::gave_up`](crate::MatchError::gave_up). -/// -/// ``` -/// use regex_automata::{ -/// hybrid::dfa::{Cache, DFA}, -/// HalfMatch, MatchError, Input, -/// }; -/// -/// fn find_leftmost_first( -/// dfa: &DFA, -/// cache: &mut Cache, -/// haystack: &[u8], -/// ) -> Result, MatchError> { -/// // The start state is determined by inspecting the position and the -/// // initial bytes of the haystack. Note that start states can never -/// // be match states (since DFAs in this crate delay matches by 1 -/// // byte), so we don't need to check if the start state is a match. -/// let mut sid = dfa.start_state_forward( -/// cache, -/// &Input::new(haystack), -/// )?; -/// let mut last_match = None; -/// // Walk all the bytes in the haystack. We can quit early if we see -/// // a dead or a quit state. The former means the automaton will -/// // never transition to any other state. The latter means that the -/// // automaton entered a condition in which its search failed. -/// for (i, &b) in haystack.iter().enumerate() { -/// sid = dfa -/// .next_state(cache, sid, b) -/// .map_err(|_| MatchError::gave_up(i))?; -/// if sid.is_tagged() { -/// if sid.is_match() { -/// last_match = Some(HalfMatch::new( -/// dfa.match_pattern(cache, sid, 0), -/// i, -/// )); -/// } else if sid.is_dead() { -/// return Ok(last_match); -/// } else if sid.is_quit() { -/// // It is possible to enter into a quit state after -/// // observing a match has occurred. In that case, we -/// // should return the match instead of an error. -/// if last_match.is_some() { -/// return Ok(last_match); -/// } -/// return Err(MatchError::quit(b, i)); -/// } -/// // Implementors may also want to check for start states and -/// // handle them differently for performance reasons. But it is -/// // not necessary for correctness. Note that in order to check -/// // for start states, you'll need to enable the -/// // 'specialize_start_states' config knob, otherwise start -/// // states will not be tagged. -/// } -/// } -/// // Matches are always delayed by 1 byte, so we must explicitly walk -/// // the special "EOI" transition at the end of the search. -/// sid = dfa -/// .next_eoi_state(cache, sid) -/// .map_err(|_| MatchError::gave_up(haystack.len()))?; -/// if sid.is_match() { -/// last_match = Some(HalfMatch::new( -/// dfa.match_pattern(cache, sid, 0), -/// haystack.len(), -/// )); -/// } -/// Ok(last_match) -/// } -/// -/// // We use a greedy '+' operator to show how the search doesn't just stop -/// // once a match is detected. It continues extending the match. Using -/// // '[a-z]+?' would also work as expected and stop the search early. -/// // Greediness is built into the automaton. -/// let dfa = DFA::new(r"[a-z]+")?; -/// let mut cache = dfa.create_cache(); -/// let haystack = "123 foobar 4567".as_bytes(); -/// let mat = find_leftmost_first(&dfa, &mut cache, haystack)?.unwrap(); -/// assert_eq!(mat.pattern().as_usize(), 0); -/// assert_eq!(mat.offset(), 10); -/// -/// // Here's another example that tests our handling of the special -/// // EOI transition. This will fail to find a match if we don't call -/// // 'next_eoi_state' at the end of the search since the match isn't found -/// // until the final byte in the haystack. -/// let dfa = DFA::new(r"[0-9]{4}")?; -/// let mut cache = dfa.create_cache(); -/// let haystack = "123 foobar 4567".as_bytes(); -/// let mat = find_leftmost_first(&dfa, &mut cache, haystack)?.unwrap(); -/// assert_eq!(mat.pattern().as_usize(), 0); -/// assert_eq!(mat.offset(), 15); -/// -/// // And note that our search implementation above automatically works -/// // with multi-DFAs. Namely, `dfa.match_pattern(match_state, 0)` selects -/// // the appropriate pattern ID for us. -/// let dfa = DFA::new_many(&[r"[a-z]+", r"[0-9]+"])?; -/// let mut cache = dfa.create_cache(); -/// let haystack = "123 foobar 4567".as_bytes(); -/// let mat = find_leftmost_first(&dfa, &mut cache, haystack)?.unwrap(); -/// assert_eq!(mat.pattern().as_usize(), 1); -/// assert_eq!(mat.offset(), 3); -/// let mat = find_leftmost_first(&dfa, &mut cache, &haystack[3..])?.unwrap(); -/// assert_eq!(mat.pattern().as_usize(), 0); -/// assert_eq!(mat.offset(), 7); -/// let mat = find_leftmost_first(&dfa, &mut cache, &haystack[10..])?.unwrap(); -/// assert_eq!(mat.pattern().as_usize(), 1); -/// assert_eq!(mat.offset(), 5); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive( - Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord, -)] -pub struct LazyStateID(u32); - -impl LazyStateID { - #[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] - const MAX_BIT: usize = 31; - - #[cfg(target_pointer_width = "16")] - const MAX_BIT: usize = 15; - - const MASK_UNKNOWN: usize = 1 << (LazyStateID::MAX_BIT); - const MASK_DEAD: usize = 1 << (LazyStateID::MAX_BIT - 1); - const MASK_QUIT: usize = 1 << (LazyStateID::MAX_BIT - 2); - const MASK_START: usize = 1 << (LazyStateID::MAX_BIT - 3); - const MASK_MATCH: usize = 1 << (LazyStateID::MAX_BIT - 4); - pub(crate) const MAX: usize = LazyStateID::MASK_MATCH - 1; - - /// Create a new lazy state ID. - /// - /// If the given identifier exceeds [`LazyStateID::MAX`], then this returns - /// an error. - #[inline] - pub(crate) fn new(id: usize) -> Result { - if id > LazyStateID::MAX { - let attempted = u64::try_from(id).unwrap(); - return Err(LazyStateIDError { attempted }); - } - Ok(LazyStateID::new_unchecked(id)) - } - - /// Create a new lazy state ID without checking whether the given value - /// exceeds [`LazyStateID::MAX`]. - /// - /// While this is unchecked, providing an incorrect value must never - /// sacrifice memory safety. - #[inline] - const fn new_unchecked(id: usize) -> LazyStateID { - // FIXME: Use as_u32() once const functions in traits are stable. - LazyStateID(id as u32) - } - - /// Return this lazy state ID as an untagged `usize`. - /// - /// If this lazy state ID is tagged, then the usize returned is the state - /// ID without the tag. If the ID was not tagged, then the usize returned - /// is equivalent to the state ID. - #[inline] - pub(crate) fn as_usize_untagged(&self) -> usize { - self.as_usize_unchecked() & LazyStateID::MAX - } - - /// Return this lazy state ID as its raw internal `usize` value, which may - /// be tagged (and thus greater than LazyStateID::MAX). - #[inline] - pub(crate) const fn as_usize_unchecked(&self) -> usize { - // FIXME: Use as_usize() once const functions in traits are stable. - self.0 as usize - } - - #[inline] - pub(crate) const fn to_unknown(&self) -> LazyStateID { - LazyStateID::new_unchecked( - self.as_usize_unchecked() | LazyStateID::MASK_UNKNOWN, - ) - } - - #[inline] - pub(crate) const fn to_dead(&self) -> LazyStateID { - LazyStateID::new_unchecked( - self.as_usize_unchecked() | LazyStateID::MASK_DEAD, - ) - } - - #[inline] - pub(crate) const fn to_quit(&self) -> LazyStateID { - LazyStateID::new_unchecked( - self.as_usize_unchecked() | LazyStateID::MASK_QUIT, - ) - } - - /// Return this lazy state ID as a state ID that is tagged as a start - /// state. - #[inline] - pub(crate) const fn to_start(&self) -> LazyStateID { - LazyStateID::new_unchecked( - self.as_usize_unchecked() | LazyStateID::MASK_START, - ) - } - - /// Return this lazy state ID as a lazy state ID that is tagged as a match - /// state. - #[inline] - pub(crate) const fn to_match(&self) -> LazyStateID { - LazyStateID::new_unchecked( - self.as_usize_unchecked() | LazyStateID::MASK_MATCH, - ) - } - - /// Return true if and only if this lazy state ID is tagged. - /// - /// When a lazy state ID is tagged, then one can conclude that it is one - /// of a match, start, dead, quit or unknown state. - #[inline] - pub const fn is_tagged(&self) -> bool { - self.as_usize_unchecked() > LazyStateID::MAX - } - - /// Return true if and only if this represents a lazy state ID that is - /// "unknown." That is, the state has not yet been created. When a caller - /// sees this state ID, it generally means that a state has to be computed - /// in order to proceed. - #[inline] - pub const fn is_unknown(&self) -> bool { - self.as_usize_unchecked() & LazyStateID::MASK_UNKNOWN > 0 - } - - /// Return true if and only if this represents a dead state. A dead state - /// is a state that can never transition to any other state except the - /// dead state. When a dead state is seen, it generally indicates that a - /// search should stop. - #[inline] - pub const fn is_dead(&self) -> bool { - self.as_usize_unchecked() & LazyStateID::MASK_DEAD > 0 - } - - /// Return true if and only if this represents a quit state. A quit state - /// is a state that is representationally equivalent to a dead state, - /// except it indicates the automaton has reached a point at which it can - /// no longer determine whether a match exists or not. In general, this - /// indicates an error during search and the caller must either pass this - /// error up or use a different search technique. - #[inline] - pub const fn is_quit(&self) -> bool { - self.as_usize_unchecked() & LazyStateID::MASK_QUIT > 0 - } - - /// Return true if and only if this lazy state ID has been tagged as a - /// start state. - /// - /// Note that if - /// [`Config::specialize_start_states`](crate::hybrid::dfa::Config) is - /// disabled (which is the default), then this will always return false - /// since start states won't be tagged. - #[inline] - pub const fn is_start(&self) -> bool { - self.as_usize_unchecked() & LazyStateID::MASK_START > 0 - } - - /// Return true if and only if this lazy state ID has been tagged as a - /// match state. - #[inline] - pub const fn is_match(&self) -> bool { - self.as_usize_unchecked() & LazyStateID::MASK_MATCH > 0 - } -} - -/// This error occurs when a lazy state ID could not be constructed. -/// -/// This occurs when given an integer exceeding the maximum lazy state ID -/// value. -/// -/// When the `std` feature is enabled, this implements the `Error` trait. -#[derive(Clone, Debug, Eq, PartialEq)] -pub(crate) struct LazyStateIDError { - attempted: u64, -} - -impl LazyStateIDError { - /// Returns the value that failed to constructed a lazy state ID. - pub(crate) fn attempted(&self) -> u64 { - self.attempted - } -} - -#[cfg(feature = "std")] -impl std::error::Error for LazyStateIDError {} - -impl core::fmt::Display for LazyStateIDError { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!( - f, - "failed to create LazyStateID from {:?}, which exceeds {:?}", - self.attempted(), - LazyStateID::MAX, - ) - } -} diff --git a/vendor/regex-automata/src/hybrid/mod.rs b/vendor/regex-automata/src/hybrid/mod.rs deleted file mode 100644 index 2feb839d16a69a..00000000000000 --- a/vendor/regex-automata/src/hybrid/mod.rs +++ /dev/null @@ -1,144 +0,0 @@ -/*! -A module for building and searching with lazy deterministic finite automata -(DFAs). - -Like other modules in this crate, lazy DFAs support a rich regex syntax with -Unicode features. The key feature of a lazy DFA is that it builds itself -incrementally during search, and never uses more than a configured capacity of -memory. Thus, when searching with a lazy DFA, one must supply a mutable "cache" -in which the actual DFA's transition table is stored. - -If you're looking for fully compiled DFAs, then please see the top-level -[`dfa` module](crate::dfa). - -# Overview - -This section gives a brief overview of the primary types in this module: - -* A [`regex::Regex`] provides a way to search for matches of a regular -expression using lazy DFAs. This includes iterating over matches with both the -start and end positions of each match. -* A [`dfa::DFA`] provides direct low level access to a lazy DFA. - -# Example: basic regex searching - -This example shows how to compile a regex using the default configuration -and then use it to find matches in a byte string: - -``` -use regex_automata::{hybrid::regex::Regex, Match}; - -let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; -let mut cache = re.create_cache(); - -let haystack = "2018-12-24 2016-10-08"; -let matches: Vec = re.find_iter(&mut cache, haystack).collect(); -assert_eq!(matches, vec![ - Match::must(0, 0..10), - Match::must(0, 11..21), -]); -# Ok::<(), Box>(()) -``` - -# Example: searching with multiple regexes - -The lazy DFAs in this module all fully support searching with multiple regexes -simultaneously. You can use this support with standard leftmost-first style -searching to find non-overlapping matches: - -``` -# if cfg!(miri) { return Ok(()); } // miri takes too long -use regex_automata::{hybrid::regex::Regex, Match}; - -let re = Regex::new_many(&[r"\w+", r"\S+"])?; -let mut cache = re.create_cache(); - -let haystack = "@foo bar"; -let matches: Vec = re.find_iter(&mut cache, haystack).collect(); -assert_eq!(matches, vec![ - Match::must(1, 0..4), - Match::must(0, 5..8), -]); -# Ok::<(), Box>(()) -``` - -# When should I use this? - -Generally speaking, if you can abide the use of mutable state during search, -and you don't need things like capturing groups or Unicode word boundary -support in non-ASCII text, then a lazy DFA is likely a robust choice with -respect to both search speed and memory usage. Note however that its speed -may be worse than a general purpose regex engine if you don't select a good -[prefilter](crate::util::prefilter). - -If you know ahead of time that your pattern would result in a very large DFA -if it was fully compiled, it may be better to use an NFA simulation instead -of a lazy DFA. Either that, or increase the cache capacity of your lazy DFA -to something that is big enough to hold the state machine (likely through -experimentation). The issue here is that if the cache is too small, then it -could wind up being reset too frequently and this might decrease searching -speed significantly. - -# Differences with fully compiled DFAs - -A [`hybrid::regex::Regex`](crate::hybrid::regex::Regex) and a -[`dfa::regex::Regex`](crate::dfa::regex::Regex) both have the same capabilities -(and similarly for their underlying DFAs), but they achieve them through -different means. The main difference is that a hybrid or "lazy" regex builds -its DFA lazily during search, where as a fully compiled regex will build its -DFA at construction time. While building a DFA at search time might sound like -it's slow, it tends to work out where most bytes seen during a search will -reuse pre-built parts of the DFA and thus can be almost as fast as a fully -compiled DFA. The main downside is that searching requires mutable space to -store the DFA, and, in the worst case, a search can result in a new state being -created for each byte seen, which would make searching quite a bit slower. - -A fully compiled DFA never has to worry about searches being slower once -it's built. (Aside from, say, the transition table being so large that it -is subject to harsh CPU cache effects.) However, of course, building a full -DFA can be quite time consuming and memory hungry. Particularly when large -Unicode character classes are used, which tend to translate into very large -DFAs. - -A lazy DFA strikes a nice balance _in practice_, particularly in the -presence of Unicode mode, by only building what is needed. It avoids the -worst case exponential time complexity of DFA compilation by guaranteeing that -it will only build at most one state per byte searched. While the worst -case here can lead to a very high constant, it will never be exponential. - -# Syntax - -This module supports the same syntax as the `regex` crate, since they share the -same parser. You can find an exhaustive list of supported syntax in the -[documentation for the `regex` crate](https://docs.rs/regex/1/regex/#syntax). - -There are two things that are not supported by the lazy DFAs in this module: - -* Capturing groups. The DFAs (and [`Regex`](regex::Regex)es built on top -of them) can only find the offsets of an entire match, but cannot resolve -the offsets of each capturing group. This is because DFAs do not have the -expressive power necessary. Note that it is okay to build a lazy DFA from an -NFA that contains capture groups. The capture groups will simply be ignored. -* Unicode word boundaries. These present particularly difficult challenges for -DFA construction and would result in an explosion in the number of states. -One can enable [`dfa::Config::unicode_word_boundary`] though, which provides -heuristic support for Unicode word boundaries that only works on ASCII text. -Otherwise, one can use `(?-u:\b)` for an ASCII word boundary, which will work -on any input. - -There are no plans to lift either of these limitations. - -Note that these restrictions are identical to the restrictions on fully -compiled DFAs. -*/ - -pub use self::{ - error::{BuildError, CacheError, StartError}, - id::LazyStateID, -}; - -pub mod dfa; -mod error; -mod id; -pub mod regex; -mod search; diff --git a/vendor/regex-automata/src/hybrid/regex.rs b/vendor/regex-automata/src/hybrid/regex.rs deleted file mode 100644 index b3b1fe317d6775..00000000000000 --- a/vendor/regex-automata/src/hybrid/regex.rs +++ /dev/null @@ -1,895 +0,0 @@ -/*! -A lazy DFA backed `Regex`. - -This module provides a [`Regex`] backed by a lazy DFA. A `Regex` implements -convenience routines you might have come to expect, such as finding a match -and iterating over all non-overlapping matches. This `Regex` type is limited -in its capabilities to what a lazy DFA can provide. Therefore, APIs involving -capturing groups, for example, are not provided. - -Internally, a `Regex` is composed of two DFAs. One is a "forward" DFA that -finds the end offset of a match, where as the other is a "reverse" DFA that -find the start offset of a match. - -See the [parent module](crate::hybrid) for examples. -*/ - -use crate::{ - hybrid::{ - dfa::{self, DFA}, - error::BuildError, - }, - nfa::thompson, - util::{ - iter, - search::{Anchored, Input, Match, MatchError, MatchKind}, - }, -}; - -/// A regular expression that uses hybrid NFA/DFAs (also called "lazy DFAs") -/// for searching. -/// -/// A regular expression is comprised of two lazy DFAs, a "forward" DFA and a -/// "reverse" DFA. The forward DFA is responsible for detecting the end of -/// a match while the reverse DFA is responsible for detecting the start -/// of a match. Thus, in order to find the bounds of any given match, a -/// forward search must first be run followed by a reverse search. A match -/// found by the forward DFA guarantees that the reverse DFA will also find -/// a match. -/// -/// # Fallibility -/// -/// Most of the search routines defined on this type will _panic_ when the -/// underlying search fails. This might be because the DFA gave up because it -/// saw a quit byte, whether configured explicitly or via heuristic Unicode -/// word boundary support, although neither are enabled by default. It might -/// also fail if the underlying DFA determines it isn't making effective use of -/// the cache (which also never happens by default). Or it might fail because -/// an invalid `Input` configuration is given, for example, with an unsupported -/// [`Anchored`] mode. -/// -/// If you need to handle these error cases instead of allowing them to trigger -/// a panic, then the lower level [`Regex::try_search`] provides a fallible API -/// that never panics. -/// -/// # Example -/// -/// This example shows how to cause a search to terminate if it sees a -/// `\n` byte, and handle the error returned. This could be useful if, for -/// example, you wanted to prevent a user supplied pattern from matching -/// across a line boundary. -/// -/// ``` -/// # if cfg!(miri) { return Ok(()); } // miri takes too long -/// use regex_automata::{hybrid::{dfa, regex::Regex}, Input, MatchError}; -/// -/// let re = Regex::builder() -/// .dfa(dfa::Config::new().quit(b'\n', true)) -/// .build(r"foo\p{any}+bar")?; -/// let mut cache = re.create_cache(); -/// -/// let input = Input::new("foo\nbar"); -/// // Normally this would produce a match, since \p{any} contains '\n'. -/// // But since we instructed the automaton to enter a quit state if a -/// // '\n' is observed, this produces a match error instead. -/// let expected = MatchError::quit(b'\n', 3); -/// let got = re.try_search(&mut cache, &input).unwrap_err(); -/// assert_eq!(expected, got); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Debug)] -pub struct Regex { - /// The forward lazy DFA. This can only find the end of a match. - forward: DFA, - /// The reverse lazy DFA. This can only find the start of a match. - /// - /// This is built with 'all' match semantics (instead of leftmost-first) - /// so that it always finds the longest possible match (which corresponds - /// to the leftmost starting position). It is also compiled as an anchored - /// matcher and has 'starts_for_each_pattern' enabled. Including starting - /// states for each pattern is necessary to ensure that we only look for - /// matches of a pattern that matched in the forward direction. Otherwise, - /// we might wind up finding the "leftmost" starting position of a totally - /// different pattern! - reverse: DFA, -} - -/// Convenience routines for regex and cache construction. -impl Regex { - /// Parse the given regular expression using the default configuration and - /// return the corresponding regex. - /// - /// If you want a non-default configuration, then use the [`Builder`] to - /// set your own configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{hybrid::regex::Regex, Match}; - /// - /// let re = Regex::new("foo[0-9]+bar")?; - /// let mut cache = re.create_cache(); - /// assert_eq!( - /// Some(Match::must(0, 3..14)), - /// re.find(&mut cache, "zzzfoo12345barzzz"), - /// ); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn new(pattern: &str) -> Result { - Regex::builder().build(pattern) - } - - /// Like `new`, but parses multiple patterns into a single "multi regex." - /// This similarly uses the default regex configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{hybrid::regex::Regex, Match}; - /// - /// let re = Regex::new_many(&["[a-z]+", "[0-9]+"])?; - /// let mut cache = re.create_cache(); - /// - /// let mut it = re.find_iter(&mut cache, "abc 1 foo 4567 0 quux"); - /// assert_eq!(Some(Match::must(0, 0..3)), it.next()); - /// assert_eq!(Some(Match::must(1, 4..5)), it.next()); - /// assert_eq!(Some(Match::must(0, 6..9)), it.next()); - /// assert_eq!(Some(Match::must(1, 10..14)), it.next()); - /// assert_eq!(Some(Match::must(1, 15..16)), it.next()); - /// assert_eq!(Some(Match::must(0, 17..21)), it.next()); - /// assert_eq!(None, it.next()); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn new_many>( - patterns: &[P], - ) -> Result { - Regex::builder().build_many(patterns) - } - - /// Return a builder for configuring the construction of a `Regex`. - /// - /// This is a convenience routine to avoid needing to import the - /// [`Builder`] type in common cases. - /// - /// # Example - /// - /// This example shows how to use the builder to disable UTF-8 mode - /// everywhere. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// hybrid::regex::Regex, nfa::thompson, util::syntax, Match, - /// }; - /// - /// let re = Regex::builder() - /// .syntax(syntax::Config::new().utf8(false)) - /// .thompson(thompson::Config::new().utf8(false)) - /// .build(r"foo(?-u:[^b])ar.*")?; - /// let mut cache = re.create_cache(); - /// - /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; - /// let expected = Some(Match::must(0, 1..9)); - /// let got = re.find(&mut cache, haystack); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn builder() -> Builder { - Builder::new() - } - - /// Create a new cache for this `Regex`. - /// - /// The cache returned should only be used for searches for this - /// `Regex`. If you want to reuse the cache for another `Regex`, then - /// you must call [`Cache::reset`] with that `Regex` (or, equivalently, - /// [`Regex::reset_cache`]). - pub fn create_cache(&self) -> Cache { - Cache::new(self) - } - - /// Reset the given cache such that it can be used for searching with the - /// this `Regex` (and only this `Regex`). - /// - /// A cache reset permits reusing memory already allocated in this cache - /// with a different `Regex`. - /// - /// Resetting a cache sets its "clear count" to 0. This is relevant if the - /// `Regex` has been configured to "give up" after it has cleared the cache - /// a certain number of times. - /// - /// # Example - /// - /// This shows how to re-purpose a cache for use with a different `Regex`. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{hybrid::regex::Regex, Match}; - /// - /// let re1 = Regex::new(r"\w")?; - /// let re2 = Regex::new(r"\W")?; - /// - /// let mut cache = re1.create_cache(); - /// assert_eq!( - /// Some(Match::must(0, 0..2)), - /// re1.find(&mut cache, "Δ"), - /// ); - /// - /// // Using 'cache' with re2 is not allowed. It may result in panics or - /// // incorrect results. In order to re-purpose the cache, we must reset - /// // it with the Regex we'd like to use it with. - /// // - /// // Similarly, after this reset, using the cache with 're1' is also not - /// // allowed. - /// re2.reset_cache(&mut cache); - /// assert_eq!( - /// Some(Match::must(0, 0..3)), - /// re2.find(&mut cache, "☃"), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn reset_cache(&self, cache: &mut Cache) { - self.forward().reset_cache(&mut cache.forward); - self.reverse().reset_cache(&mut cache.reverse); - } -} - -/// Standard infallible search routines for finding and iterating over matches. -impl Regex { - /// Returns true if and only if this regex matches the given haystack. - /// - /// This routine may short circuit if it knows that scanning future input - /// will never lead to a different result. In particular, if the underlying - /// DFA enters a match state or a dead state, then this routine will return - /// `true` or `false`, respectively, without inspecting any future input. - /// - /// # Panics - /// - /// This routine panics if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the lazy DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the lazy DFA quitting. - /// * The configuration of the lazy DFA may also permit it to "give up" - /// on a search if it makes ineffective use of its transition table - /// cache. The default configuration does not enable this by default, - /// although it is typically a good idea to. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search panics, callers cannot know whether a match exists or - /// not. - /// - /// Use [`Regex::try_search`] if you want to handle these error conditions. - /// - /// # Example - /// - /// ``` - /// use regex_automata::hybrid::regex::Regex; - /// - /// let re = Regex::new("foo[0-9]+bar")?; - /// let mut cache = re.create_cache(); - /// - /// assert!(re.is_match(&mut cache, "foo12345bar")); - /// assert!(!re.is_match(&mut cache, "foobar")); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn is_match<'h, I: Into>>( - &self, - cache: &mut Cache, - input: I, - ) -> bool { - // Not only can we do an "earliest" search, but we can avoid doing a - // reverse scan too. - self.forward() - .try_search_fwd(&mut cache.forward, &input.into().earliest(true)) - .unwrap() - .is_some() - } - - /// Returns the start and end offset of the leftmost match. If no match - /// exists, then `None` is returned. - /// - /// # Panics - /// - /// This routine panics if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the lazy DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the lazy DFA quitting. - /// * The configuration of the lazy DFA may also permit it to "give up" - /// on a search if it makes ineffective use of its transition table - /// cache. The default configuration does not enable this by default, - /// although it is typically a good idea to. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search panics, callers cannot know whether a match exists or - /// not. - /// - /// Use [`Regex::try_search`] if you want to handle these error conditions. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{Match, hybrid::regex::Regex}; - /// - /// let re = Regex::new("foo[0-9]+")?; - /// let mut cache = re.create_cache(); - /// assert_eq!( - /// Some(Match::must(0, 3..11)), - /// re.find(&mut cache, "zzzfoo12345zzz"), - /// ); - /// - /// // Even though a match is found after reading the first byte (`a`), - /// // the default leftmost-first match semantics demand that we find the - /// // earliest match that prefers earlier parts of the pattern over latter - /// // parts. - /// let re = Regex::new("abc|a")?; - /// let mut cache = re.create_cache(); - /// assert_eq!(Some(Match::must(0, 0..3)), re.find(&mut cache, "abc")); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn find<'h, I: Into>>( - &self, - cache: &mut Cache, - input: I, - ) -> Option { - self.try_search(cache, &input.into()).unwrap() - } - - /// Returns an iterator over all non-overlapping leftmost matches in the - /// given bytes. If no match exists, then the iterator yields no elements. - /// - /// # Panics - /// - /// This routine panics if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the lazy DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the lazy DFA quitting. - /// * The configuration of the lazy DFA may also permit it to "give up" - /// on a search if it makes ineffective use of its transition table - /// cache. The default configuration does not enable this by default, - /// although it is typically a good idea to. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search panics, callers cannot know whether a match exists or - /// not. - /// - /// The above conditions also apply to the iterator returned as well. For - /// example, if the lazy DFA gives up or quits during a search using this - /// method, then a panic will occur during iteration. - /// - /// Use [`Regex::try_search`] with [`util::iter::Searcher`](iter::Searcher) - /// if you want to handle these error conditions. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{hybrid::regex::Regex, Match}; - /// - /// let re = Regex::new("foo[0-9]+")?; - /// let mut cache = re.create_cache(); - /// - /// let text = "foo1 foo12 foo123"; - /// let matches: Vec = re.find_iter(&mut cache, text).collect(); - /// assert_eq!(matches, vec![ - /// Match::must(0, 0..4), - /// Match::must(0, 5..10), - /// Match::must(0, 11..17), - /// ]); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn find_iter<'r, 'c, 'h, I: Into>>( - &'r self, - cache: &'c mut Cache, - input: I, - ) -> FindMatches<'r, 'c, 'h> { - let it = iter::Searcher::new(input.into()); - FindMatches { re: self, cache, it } - } -} - -/// Lower level "search" primitives that accept a `&Input` for cheap reuse -/// and return an error if one occurs instead of panicking. -impl Regex { - /// Returns the start and end offset of the leftmost match. If no match - /// exists, then `None` is returned. - /// - /// This is like [`Regex::find`] but with two differences: - /// - /// 1. It is not generic over `Into` and instead accepts a - /// `&Input`. This permits reusing the same `Input` for multiple searches - /// without needing to create a new one. This _may_ help with latency. - /// 2. It returns an error if the search could not complete where as - /// [`Regex::find`] will panic. - /// - /// # Errors - /// - /// This routine errors if the search could not complete. This can occur - /// in a number of circumstances: - /// - /// * The configuration of the lazy DFA may permit it to "quit" the search. - /// For example, setting quit bytes or enabling heuristic support for - /// Unicode word boundaries. The default configuration does not enable any - /// option that could result in the lazy DFA quitting. - /// * The configuration of the lazy DFA may also permit it to "give up" - /// on a search if it makes ineffective use of its transition table - /// cache. The default configuration does not enable this by default, - /// although it is typically a good idea to. - /// * When the provided `Input` configuration is not supported. For - /// example, by providing an unsupported anchor mode. - /// - /// When a search returns an error, callers cannot know whether a match - /// exists or not. - #[inline] - pub fn try_search( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Result, MatchError> { - let (fcache, rcache) = (&mut cache.forward, &mut cache.reverse); - let end = match self.forward().try_search_fwd(fcache, input)? { - None => return Ok(None), - Some(end) => end, - }; - // This special cases an empty match at the beginning of the search. If - // our end matches our start, then since a reverse DFA can't match past - // the start, it must follow that our starting position is also our end - // position. So short circuit and skip the reverse search. - if input.start() == end.offset() { - return Ok(Some(Match::new( - end.pattern(), - end.offset()..end.offset(), - ))); - } - // We can also skip the reverse search if we know our search was - // anchored. This occurs either when the input config is anchored or - // when we know the regex itself is anchored. In this case, we know the - // start of the match, if one is found, must be the start of the - // search. - if self.is_anchored(input) { - return Ok(Some(Match::new( - end.pattern(), - input.start()..end.offset(), - ))); - } - // N.B. I have tentatively convinced myself that it isn't necessary - // to specify the specific pattern for the reverse search since the - // reverse search will always find the same pattern to match as the - // forward search. But I lack a rigorous proof. Why not just provide - // the pattern anyway? Well, if it is needed, then leaving it out - // gives us a chance to find a witness. (Also, if we don't need to - // specify the pattern, then we don't need to build the reverse DFA - // with 'starts_for_each_pattern' enabled. It doesn't matter too much - // for the lazy DFA, but does make the overall DFA bigger.) - // - // We also need to be careful to disable 'earliest' for the reverse - // search, since it could be enabled for the forward search. In the - // reverse case, to satisfy "leftmost" criteria, we need to match as - // much as we can. We also need to be careful to make the search - // anchored. We don't want the reverse search to report any matches - // other than the one beginning at the end of our forward search. - let revsearch = input - .clone() - .span(input.start()..end.offset()) - .anchored(Anchored::Yes) - .earliest(false); - let start = self - .reverse() - .try_search_rev(rcache, &revsearch)? - .expect("reverse search must match if forward search does"); - debug_assert_eq!( - start.pattern(), - end.pattern(), - "forward and reverse search must match same pattern", - ); - debug_assert!(start.offset() <= end.offset()); - Ok(Some(Match::new(end.pattern(), start.offset()..end.offset()))) - } - - /// Returns true if either the given input specifies an anchored search - /// or if the underlying NFA is always anchored. - fn is_anchored(&self, input: &Input<'_>) -> bool { - match input.get_anchored() { - Anchored::No => { - self.forward().get_nfa().is_always_start_anchored() - } - Anchored::Yes | Anchored::Pattern(_) => true, - } - } -} - -/// Non-search APIs for querying information about the regex and setting a -/// prefilter. -impl Regex { - /// Return the underlying lazy DFA responsible for forward matching. - /// - /// This is useful for accessing the underlying lazy DFA and using it - /// directly if the situation calls for it. - pub fn forward(&self) -> &DFA { - &self.forward - } - - /// Return the underlying lazy DFA responsible for reverse matching. - /// - /// This is useful for accessing the underlying lazy DFA and using it - /// directly if the situation calls for it. - pub fn reverse(&self) -> &DFA { - &self.reverse - } - - /// Returns the total number of patterns matched by this regex. - /// - /// # Example - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::hybrid::regex::Regex; - /// - /// let re = Regex::new_many(&[r"[a-z]+", r"[0-9]+", r"\w+"])?; - /// assert_eq!(3, re.pattern_len()); - /// # Ok::<(), Box>(()) - /// ``` - pub fn pattern_len(&self) -> usize { - assert_eq!(self.forward().pattern_len(), self.reverse().pattern_len()); - self.forward().pattern_len() - } -} - -/// An iterator over all non-overlapping matches for an infallible search. -/// -/// The iterator yields a [`Match`] value until no more matches could be found. -/// If the underlying regex engine returns an error, then a panic occurs. -/// -/// The lifetime parameters are as follows: -/// -/// * `'r` represents the lifetime of the regex object. -/// * `'h` represents the lifetime of the haystack being searched. -/// * `'c` represents the lifetime of the regex cache. -/// -/// This iterator can be created with the [`Regex::find_iter`] method. -#[derive(Debug)] -pub struct FindMatches<'r, 'c, 'h> { - re: &'r Regex, - cache: &'c mut Cache, - it: iter::Searcher<'h>, -} - -impl<'r, 'c, 'h> Iterator for FindMatches<'r, 'c, 'h> { - type Item = Match; - - #[inline] - fn next(&mut self) -> Option { - let FindMatches { re, ref mut cache, ref mut it } = *self; - it.advance(|input| re.try_search(cache, input)) - } -} - -/// A cache represents a partially computed forward and reverse DFA. -/// -/// A cache is the key component that differentiates a classical DFA and a -/// hybrid NFA/DFA (also called a "lazy DFA"). Where a classical DFA builds a -/// complete transition table that can handle all possible inputs, a hybrid -/// NFA/DFA starts with an empty transition table and builds only the parts -/// required during search. The parts that are built are stored in a cache. For -/// this reason, a cache is a required parameter for nearly every operation on -/// a [`Regex`]. -/// -/// Caches can be created from their corresponding `Regex` via -/// [`Regex::create_cache`]. A cache can only be used with either the `Regex` -/// that created it, or the `Regex` that was most recently used to reset it -/// with [`Cache::reset`]. Using a cache with any other `Regex` may result in -/// panics or incorrect results. -#[derive(Debug, Clone)] -pub struct Cache { - forward: dfa::Cache, - reverse: dfa::Cache, -} - -impl Cache { - /// Create a new cache for the given `Regex`. - /// - /// The cache returned should only be used for searches for the given - /// `Regex`. If you want to reuse the cache for another `Regex`, then you - /// must call [`Cache::reset`] with that `Regex`. - pub fn new(re: &Regex) -> Cache { - let forward = dfa::Cache::new(re.forward()); - let reverse = dfa::Cache::new(re.reverse()); - Cache { forward, reverse } - } - - /// Reset this cache such that it can be used for searching with the given - /// `Regex` (and only that `Regex`). - /// - /// A cache reset permits reusing memory already allocated in this cache - /// with a different `Regex`. - /// - /// Resetting a cache sets its "clear count" to 0. This is relevant if the - /// `Regex` has been configured to "give up" after it has cleared the cache - /// a certain number of times. - /// - /// # Example - /// - /// This shows how to re-purpose a cache for use with a different `Regex`. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{hybrid::regex::Regex, Match}; - /// - /// let re1 = Regex::new(r"\w")?; - /// let re2 = Regex::new(r"\W")?; - /// - /// let mut cache = re1.create_cache(); - /// assert_eq!( - /// Some(Match::must(0, 0..2)), - /// re1.find(&mut cache, "Δ"), - /// ); - /// - /// // Using 'cache' with re2 is not allowed. It may result in panics or - /// // incorrect results. In order to re-purpose the cache, we must reset - /// // it with the Regex we'd like to use it with. - /// // - /// // Similarly, after this reset, using the cache with 're1' is also not - /// // allowed. - /// cache.reset(&re2); - /// assert_eq!( - /// Some(Match::must(0, 0..3)), - /// re2.find(&mut cache, "☃"), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn reset(&mut self, re: &Regex) { - self.forward.reset(re.forward()); - self.reverse.reset(re.reverse()); - } - - /// Return a reference to the forward cache. - pub fn forward(&mut self) -> &dfa::Cache { - &self.forward - } - - /// Return a reference to the reverse cache. - pub fn reverse(&mut self) -> &dfa::Cache { - &self.reverse - } - - /// Return a mutable reference to the forward cache. - /// - /// If you need mutable references to both the forward and reverse caches, - /// then use [`Cache::as_parts_mut`]. - pub fn forward_mut(&mut self) -> &mut dfa::Cache { - &mut self.forward - } - - /// Return a mutable reference to the reverse cache. - /// - /// If you need mutable references to both the forward and reverse caches, - /// then use [`Cache::as_parts_mut`]. - pub fn reverse_mut(&mut self) -> &mut dfa::Cache { - &mut self.reverse - } - - /// Return references to the forward and reverse caches, respectively. - pub fn as_parts(&self) -> (&dfa::Cache, &dfa::Cache) { - (&self.forward, &self.reverse) - } - - /// Return mutable references to the forward and reverse caches, - /// respectively. - pub fn as_parts_mut(&mut self) -> (&mut dfa::Cache, &mut dfa::Cache) { - (&mut self.forward, &mut self.reverse) - } - - /// Returns the heap memory usage, in bytes, as a sum of the forward and - /// reverse lazy DFA caches. - /// - /// This does **not** include the stack size used up by this cache. To - /// compute that, use `std::mem::size_of::()`. - pub fn memory_usage(&self) -> usize { - self.forward.memory_usage() + self.reverse.memory_usage() - } -} - -/// A builder for a regex based on a hybrid NFA/DFA. -/// -/// This builder permits configuring options for the syntax of a pattern, the -/// NFA construction, the lazy DFA construction and finally the regex searching -/// itself. This builder is different from a general purpose regex builder -/// in that it permits fine grain configuration of the construction process. -/// The trade off for this is complexity, and the possibility of setting a -/// configuration that might not make sense. For example, there are two -/// different UTF-8 modes: -/// -/// * [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) controls -/// whether the pattern itself can contain sub-expressions that match invalid -/// UTF-8. -/// * [`thompson::Config::utf8`] controls how the regex iterators themselves -/// advance the starting position of the next search when a match with zero -/// length is found. -/// -/// Generally speaking, callers will want to either enable all of these or -/// disable all of these. -/// -/// Internally, building a regex requires building two hybrid NFA/DFAs, -/// where one is responsible for finding the end of a match and the other is -/// responsible for finding the start of a match. If you only need to detect -/// whether something matched, or only the end of a match, then you should use -/// a [`dfa::Builder`] to construct a single hybrid NFA/DFA, which is cheaper -/// than building two of them. -/// -/// # Example -/// -/// This example shows how to disable UTF-8 mode in the syntax and the regex -/// itself. This is generally what you want for matching on arbitrary bytes. -/// -/// ``` -/// # if cfg!(miri) { return Ok(()); } // miri takes too long -/// use regex_automata::{ -/// hybrid::regex::Regex, nfa::thompson, util::syntax, Match, -/// }; -/// -/// let re = Regex::builder() -/// .syntax(syntax::Config::new().utf8(false)) -/// .thompson(thompson::Config::new().utf8(false)) -/// .build(r"foo(?-u:[^b])ar.*")?; -/// let mut cache = re.create_cache(); -/// -/// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; -/// let expected = Some(Match::must(0, 1..9)); -/// let got = re.find(&mut cache, haystack); -/// assert_eq!(expected, got); -/// // Notice that `(?-u:[^b])` matches invalid UTF-8, -/// // but the subsequent `.*` does not! Disabling UTF-8 -/// // on the syntax permits this. -/// assert_eq!(b"foo\xFFarzz", &haystack[got.unwrap().range()]); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct Builder { - dfa: dfa::Builder, -} - -impl Builder { - /// Create a new regex builder with the default configuration. - pub fn new() -> Builder { - Builder { dfa: DFA::builder() } - } - - /// Build a regex from the given pattern. - /// - /// If there was a problem parsing or compiling the pattern, then an error - /// is returned. - #[cfg(feature = "syntax")] - pub fn build(&self, pattern: &str) -> Result { - self.build_many(&[pattern]) - } - - /// Build a regex from the given patterns. - #[cfg(feature = "syntax")] - pub fn build_many>( - &self, - patterns: &[P], - ) -> Result { - let forward = self.dfa.build_many(patterns)?; - let reverse = self - .dfa - .clone() - .configure( - DFA::config() - .prefilter(None) - .specialize_start_states(false) - .match_kind(MatchKind::All), - ) - .thompson(thompson::Config::new().reverse(true)) - .build_many(patterns)?; - Ok(self.build_from_dfas(forward, reverse)) - } - - /// Build a regex from its component forward and reverse hybrid NFA/DFAs. - /// - /// This is useful when you've built a forward and reverse lazy DFA - /// separately, and want to combine them into a single regex. Once build, - /// the individual DFAs given can still be accessed via [`Regex::forward`] - /// and [`Regex::reverse`]. - /// - /// It is important that the reverse lazy DFA be compiled under the - /// following conditions: - /// - /// * It should use [`MatchKind::All`] semantics. - /// * It should match in reverse. - /// * Otherwise, its configuration should match the forward DFA. - /// - /// If these conditions aren't satisfied, then the behavior of searches is - /// unspecified. - /// - /// Note that when using this constructor, no configuration is applied. - /// Since this routine provides the DFAs to the builder, there is no - /// opportunity to apply other configuration options. - /// - /// # Example - /// - /// This shows how to build individual lazy forward and reverse DFAs, and - /// then combine them into a single `Regex`. - /// - /// ``` - /// use regex_automata::{ - /// hybrid::{dfa::DFA, regex::Regex}, - /// nfa::thompson, - /// MatchKind, - /// }; - /// - /// let fwd = DFA::new(r"foo[0-9]+")?; - /// let rev = DFA::builder() - /// .configure(DFA::config().match_kind(MatchKind::All)) - /// .thompson(thompson::Config::new().reverse(true)) - /// .build(r"foo[0-9]+")?; - /// - /// let re = Regex::builder().build_from_dfas(fwd, rev); - /// let mut cache = re.create_cache(); - /// assert_eq!(true, re.is_match(&mut cache, "foo123")); - /// # Ok::<(), Box>(()) - /// ``` - pub fn build_from_dfas(&self, forward: DFA, reverse: DFA) -> Regex { - Regex { forward, reverse } - } - - /// Set the syntax configuration for this builder using - /// [`syntax::Config`](crate::util::syntax::Config). - /// - /// This permits setting things like case insensitivity, Unicode and multi - /// line mode. - #[cfg(feature = "syntax")] - pub fn syntax( - &mut self, - config: crate::util::syntax::Config, - ) -> &mut Builder { - self.dfa.syntax(config); - self - } - - /// Set the Thompson NFA configuration for this builder using - /// [`nfa::thompson::Config`](thompson::Config). - /// - /// This permits setting things like whether additional time should be - /// spent shrinking the size of the NFA. - #[cfg(feature = "syntax")] - pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { - self.dfa.thompson(config); - self - } - - /// Set the lazy DFA compilation configuration for this builder using - /// [`dfa::Config`]. - /// - /// This permits setting things like whether Unicode word boundaries should - /// be heuristically supported or settings how the behavior of the cache. - pub fn dfa(&mut self, config: dfa::Config) -> &mut Builder { - self.dfa.configure(config); - self - } -} - -impl Default for Builder { - fn default() -> Builder { - Builder::new() - } -} diff --git a/vendor/regex-automata/src/hybrid/search.rs b/vendor/regex-automata/src/hybrid/search.rs deleted file mode 100644 index 1f4a505db41784..00000000000000 --- a/vendor/regex-automata/src/hybrid/search.rs +++ /dev/null @@ -1,802 +0,0 @@ -use crate::{ - hybrid::{ - dfa::{Cache, OverlappingState, DFA}, - id::LazyStateID, - }, - util::{ - prefilter::Prefilter, - search::{HalfMatch, Input, MatchError, Span}, - }, -}; - -#[inline(never)] -pub(crate) fn find_fwd( - dfa: &DFA, - cache: &mut Cache, - input: &Input<'_>, -) -> Result, MatchError> { - if input.is_done() { - return Ok(None); - } - let pre = if input.get_anchored().is_anchored() { - None - } else { - dfa.get_config().get_prefilter() - }; - // So what we do here is specialize four different versions of 'find_fwd': - // one for each of the combinations for 'has prefilter' and 'is earliest - // search'. The reason for doing this is that both of these things require - // branches and special handling in some code that can be very hot, - // and shaving off as much as we can when we don't need it tends to be - // beneficial in ad hoc benchmarks. To see these differences, you often - // need a query with a high match count. In other words, specializing these - // four routines *tends* to help latency more than throughput. - if pre.is_some() { - if input.get_earliest() { - find_fwd_imp(dfa, cache, input, pre, true) - } else { - find_fwd_imp(dfa, cache, input, pre, false) - } - } else { - if input.get_earliest() { - find_fwd_imp(dfa, cache, input, None, true) - } else { - find_fwd_imp(dfa, cache, input, None, false) - } - } -} - -#[cfg_attr(feature = "perf-inline", inline(always))] -fn find_fwd_imp( - dfa: &DFA, - cache: &mut Cache, - input: &Input<'_>, - pre: Option<&'_ Prefilter>, - earliest: bool, -) -> Result, MatchError> { - // See 'prefilter_restart' docs for explanation. - let universal_start = dfa.get_nfa().look_set_prefix_any().is_empty(); - let mut mat = None; - let mut sid = init_fwd(dfa, cache, input)?; - let mut at = input.start(); - // This could just be a closure, but then I think it would be unsound - // because it would need to be safe to invoke. This way, the lack of safety - // is clearer in the code below. - macro_rules! next_unchecked { - ($sid:expr, $at:expr) => {{ - let byte = *input.haystack().get_unchecked($at); - dfa.next_state_untagged_unchecked(cache, $sid, byte) - }}; - } - - if let Some(ref pre) = pre { - let span = Span::from(at..input.end()); - match pre.find(input.haystack(), span) { - None => return Ok(mat), - Some(ref span) => { - at = span.start; - if !universal_start { - sid = prefilter_restart(dfa, cache, &input, at)?; - } - } - } - } - cache.search_start(at); - while at < input.end() { - if sid.is_tagged() { - cache.search_update(at); - sid = dfa - .next_state(cache, sid, input.haystack()[at]) - .map_err(|_| gave_up(at))?; - } else { - // SAFETY: There are two safety invariants we need to uphold - // here in the loops below: that 'sid' and 'prev_sid' are valid - // state IDs for this DFA, and that 'at' is a valid index into - // 'haystack'. For the former, we rely on the invariant that - // next_state* and start_state_forward always returns a valid state - // ID (given a valid state ID in the former case), and that we are - // only at this place in the code if 'sid' is untagged. Moreover, - // every call to next_state_untagged_unchecked below is guarded by - // a check that sid is untagged. For the latter safety invariant, - // we always guard unchecked access with a check that 'at' is less - // than 'end', where 'end <= haystack.len()'. In the unrolled loop - // below, we ensure that 'at' is always in bounds. - // - // PERF: For justification of omitting bounds checks, it gives us a - // ~10% bump in search time. This was used for a benchmark: - // - // regex-cli find half hybrid -p '(?m)^.+$' -UBb bigfile - // - // PERF: For justification for the loop unrolling, we use a few - // different tests: - // - // regex-cli find half hybrid -p '\w{50}' -UBb bigfile - // regex-cli find half hybrid -p '(?m)^.+$' -UBb bigfile - // regex-cli find half hybrid -p 'ZQZQZQZQ' -UBb bigfile - // - // And there are three different configurations: - // - // nounroll: this entire 'else' block vanishes and we just - // always use 'dfa.next_state(..)'. - // unroll1: just the outer loop below - // unroll2: just the inner loop below - // unroll3: both the outer and inner loops below - // - // This results in a matrix of timings for each of the above - // regexes with each of the above unrolling configurations: - // - // '\w{50}' '(?m)^.+$' 'ZQZQZQZQ' - // nounroll 1.51s 2.34s 1.51s - // unroll1 1.53s 2.32s 1.56s - // unroll2 2.22s 1.50s 0.61s - // unroll3 1.67s 1.45s 0.61s - // - // Ideally we'd be able to find a configuration that yields the - // best time for all regexes, but alas we settle for unroll3 that - // gives us *almost* the best for '\w{50}' and the best for the - // other two regexes. - // - // So what exactly is going on here? The first unrolling (grouping - // together runs of untagged transitions) specifically targets - // our choice of representation. The second unrolling (grouping - // together runs of self-transitions) specifically targets a common - // DFA topology. Let's dig in a little bit by looking at our - // regexes: - // - // '\w{50}': This regex spends a lot of time outside of the DFA's - // start state matching some part of the '\w' repetition. This - // means that it's a bit of a worst case for loop unrolling that - // targets self-transitions since the self-transitions in '\w{50}' - // are not particularly active for this haystack. However, the - // first unrolling (grouping together untagged transitions) - // does apply quite well here since very few transitions hit - // match/dead/quit/unknown states. It is however worth mentioning - // that if start states are configured to be tagged (which you - // typically want to do if you have a prefilter), then this regex - // actually slows way down because it is constantly ping-ponging - // out of the unrolled loop and into the handling of a tagged start - // state below. But when start states aren't tagged, the unrolled - // loop stays hot. (This is why it's imperative that start state - // tagging be disabled when there isn't a prefilter!) - // - // '(?m)^.+$': There are two important aspects of this regex: 1) - // on this haystack, its match count is very high, much higher - // than the other two regex and 2) it spends the vast majority - // of its time matching '.+'. Since Unicode mode is disabled, - // this corresponds to repeatedly following self transitions for - // the vast majority of the input. This does benefit from the - // untagged unrolling since most of the transitions will be to - // untagged states, but the untagged unrolling does more work than - // what is actually required. Namely, it has to keep track of the - // previous and next state IDs, which I guess requires a bit more - // shuffling. This is supported by the fact that nounroll+unroll1 - // are both slower than unroll2+unroll3, where the latter has a - // loop unrolling that specifically targets self-transitions. - // - // 'ZQZQZQZQ': This one is very similar to '(?m)^.+$' because it - // spends the vast majority of its time in self-transitions for - // the (implicit) unanchored prefix. The main difference with - // '(?m)^.+$' is that it has a much lower match count. So there - // isn't much time spent in the overhead of reporting matches. This - // is the primary explainer in the perf difference here. We include - // this regex and the former to make sure we have comparison points - // with high and low match counts. - // - // NOTE: I used 'OpenSubtitles2018.raw.sample.en' for 'bigfile'. - // - // NOTE: In a follow-up, it turns out that the "inner" loop - // mentioned above was a pretty big pessimization in some other - // cases. Namely, it resulted in too much ping-ponging into and out - // of the loop, which resulted in nearly ~2x regressions in search - // time when compared to the originally lazy DFA in the regex crate. - // So I've removed the second loop unrolling that targets the - // self-transition case. - let mut prev_sid = sid; - while at < input.end() { - prev_sid = unsafe { next_unchecked!(sid, at) }; - if prev_sid.is_tagged() || at + 3 >= input.end() { - core::mem::swap(&mut prev_sid, &mut sid); - break; - } - at += 1; - - sid = unsafe { next_unchecked!(prev_sid, at) }; - if sid.is_tagged() { - break; - } - at += 1; - - prev_sid = unsafe { next_unchecked!(sid, at) }; - if prev_sid.is_tagged() { - core::mem::swap(&mut prev_sid, &mut sid); - break; - } - at += 1; - - sid = unsafe { next_unchecked!(prev_sid, at) }; - if sid.is_tagged() { - break; - } - at += 1; - } - // If we quit out of the code above with an unknown state ID at - // any point, then we need to re-compute that transition using - // 'next_state', which will do NFA powerset construction for us. - if sid.is_unknown() { - cache.search_update(at); - sid = dfa - .next_state(cache, prev_sid, input.haystack()[at]) - .map_err(|_| gave_up(at))?; - } - } - if sid.is_tagged() { - if sid.is_start() { - if let Some(ref pre) = pre { - let span = Span::from(at..input.end()); - match pre.find(input.haystack(), span) { - None => { - cache.search_finish(span.end); - return Ok(mat); - } - Some(ref span) => { - // We want to skip any update to 'at' below - // at the end of this iteration and just - // jump immediately back to the next state - // transition at the leading position of the - // candidate match. - // - // ... but only if we actually made progress - // with our prefilter, otherwise if the start - // state has a self-loop, we can get stuck. - if span.start > at { - at = span.start; - if !universal_start { - sid = prefilter_restart( - dfa, cache, &input, at, - )?; - } - continue; - } - } - } - } - } else if sid.is_match() { - let pattern = dfa.match_pattern(cache, sid, 0); - // Since slice ranges are inclusive at the beginning and - // exclusive at the end, and since forward searches report - // the end, we can return 'at' as-is. This only works because - // matches are delayed by 1 byte. So by the time we observe a - // match, 'at' has already been set to 1 byte past the actual - // match location, which is precisely the exclusive ending - // bound of the match. - mat = Some(HalfMatch::new(pattern, at)); - if earliest { - cache.search_finish(at); - return Ok(mat); - } - } else if sid.is_dead() { - cache.search_finish(at); - return Ok(mat); - } else if sid.is_quit() { - cache.search_finish(at); - return Err(MatchError::quit(input.haystack()[at], at)); - } else { - debug_assert!(sid.is_unknown()); - unreachable!("sid being unknown is a bug"); - } - } - at += 1; - } - eoi_fwd(dfa, cache, input, &mut sid, &mut mat)?; - cache.search_finish(input.end()); - Ok(mat) -} - -#[inline(never)] -pub(crate) fn find_rev( - dfa: &DFA, - cache: &mut Cache, - input: &Input<'_>, -) -> Result, MatchError> { - if input.is_done() { - return Ok(None); - } - if input.get_earliest() { - find_rev_imp(dfa, cache, input, true) - } else { - find_rev_imp(dfa, cache, input, false) - } -} - -#[cfg_attr(feature = "perf-inline", inline(always))] -fn find_rev_imp( - dfa: &DFA, - cache: &mut Cache, - input: &Input<'_>, - earliest: bool, -) -> Result, MatchError> { - let mut mat = None; - let mut sid = init_rev(dfa, cache, input)?; - // In reverse search, the loop below can't handle the case of searching an - // empty slice. Ideally we could write something congruent to the forward - // search, i.e., 'while at >= start', but 'start' might be 0. Since we use - // an unsigned offset, 'at >= 0' is trivially always true. We could avoid - // this extra case handling by using a signed offset, but Rust makes it - // annoying to do. So... We just handle the empty case separately. - if input.start() == input.end() { - eoi_rev(dfa, cache, input, &mut sid, &mut mat)?; - return Ok(mat); - } - - let mut at = input.end() - 1; - macro_rules! next_unchecked { - ($sid:expr, $at:expr) => {{ - let byte = *input.haystack().get_unchecked($at); - dfa.next_state_untagged_unchecked(cache, $sid, byte) - }}; - } - cache.search_start(at); - loop { - if sid.is_tagged() { - cache.search_update(at); - sid = dfa - .next_state(cache, sid, input.haystack()[at]) - .map_err(|_| gave_up(at))?; - } else { - // SAFETY: See comments in 'find_fwd' for a safety argument. - // - // PERF: The comments in 'find_fwd' also provide a justification - // from a performance perspective as to 1) why we elide bounds - // checks and 2) why we do a specialized version of unrolling - // below. The reverse search does have a slightly different - // consideration in that most reverse searches tend to be - // anchored and on shorter haystacks. However, this still makes a - // difference. Take this command for example: - // - // regex-cli find match hybrid -p '(?m)^.+$' -UBb bigfile - // - // (Notice that we use 'find hybrid regex', not 'find hybrid dfa' - // like in the justification for the forward direction. The 'regex' - // sub-command will find start-of-match and thus run the reverse - // direction.) - // - // Without unrolling below, the above command takes around 3.76s. - // But with the unrolling below, we get down to 2.55s. If we keep - // the unrolling but add in bounds checks, then we get 2.86s. - // - // NOTE: I used 'OpenSubtitles2018.raw.sample.en' for 'bigfile'. - let mut prev_sid = sid; - while at >= input.start() { - prev_sid = unsafe { next_unchecked!(sid, at) }; - if prev_sid.is_tagged() - || at <= input.start().saturating_add(3) - { - core::mem::swap(&mut prev_sid, &mut sid); - break; - } - at -= 1; - - sid = unsafe { next_unchecked!(prev_sid, at) }; - if sid.is_tagged() { - break; - } - at -= 1; - - prev_sid = unsafe { next_unchecked!(sid, at) }; - if prev_sid.is_tagged() { - core::mem::swap(&mut prev_sid, &mut sid); - break; - } - at -= 1; - - sid = unsafe { next_unchecked!(prev_sid, at) }; - if sid.is_tagged() { - break; - } - at -= 1; - } - // If we quit out of the code above with an unknown state ID at - // any point, then we need to re-compute that transition using - // 'next_state', which will do NFA powerset construction for us. - if sid.is_unknown() { - cache.search_update(at); - sid = dfa - .next_state(cache, prev_sid, input.haystack()[at]) - .map_err(|_| gave_up(at))?; - } - } - if sid.is_tagged() { - if sid.is_start() { - // do nothing - } else if sid.is_match() { - let pattern = dfa.match_pattern(cache, sid, 0); - // Since reverse searches report the beginning of a match - // and the beginning is inclusive (not exclusive like the - // end of a match), we add 1 to make it inclusive. - mat = Some(HalfMatch::new(pattern, at + 1)); - if earliest { - cache.search_finish(at); - return Ok(mat); - } - } else if sid.is_dead() { - cache.search_finish(at); - return Ok(mat); - } else if sid.is_quit() { - cache.search_finish(at); - return Err(MatchError::quit(input.haystack()[at], at)); - } else { - debug_assert!(sid.is_unknown()); - unreachable!("sid being unknown is a bug"); - } - } - if at == input.start() { - break; - } - at -= 1; - } - cache.search_finish(input.start()); - eoi_rev(dfa, cache, input, &mut sid, &mut mat)?; - Ok(mat) -} - -#[inline(never)] -pub(crate) fn find_overlapping_fwd( - dfa: &DFA, - cache: &mut Cache, - input: &Input<'_>, - state: &mut OverlappingState, -) -> Result<(), MatchError> { - state.mat = None; - if input.is_done() { - return Ok(()); - } - let pre = if input.get_anchored().is_anchored() { - None - } else { - dfa.get_config().get_prefilter() - }; - if pre.is_some() { - find_overlapping_fwd_imp(dfa, cache, input, pre, state) - } else { - find_overlapping_fwd_imp(dfa, cache, input, None, state) - } -} - -#[cfg_attr(feature = "perf-inline", inline(always))] -fn find_overlapping_fwd_imp( - dfa: &DFA, - cache: &mut Cache, - input: &Input<'_>, - pre: Option<&'_ Prefilter>, - state: &mut OverlappingState, -) -> Result<(), MatchError> { - // See 'prefilter_restart' docs for explanation. - let universal_start = dfa.get_nfa().look_set_prefix_any().is_empty(); - let mut sid = match state.id { - None => { - state.at = input.start(); - init_fwd(dfa, cache, input)? - } - Some(sid) => { - if let Some(match_index) = state.next_match_index { - let match_len = dfa.match_len(cache, sid); - if match_index < match_len { - state.next_match_index = Some(match_index + 1); - let pattern = dfa.match_pattern(cache, sid, match_index); - state.mat = Some(HalfMatch::new(pattern, state.at)); - return Ok(()); - } - } - // Once we've reported all matches at a given position, we need to - // advance the search to the next position. - state.at += 1; - if state.at > input.end() { - return Ok(()); - } - sid - } - }; - - // NOTE: We don't optimize the crap out of this routine primarily because - // it seems like most overlapping searches will have higher match counts, - // and thus, throughput is perhaps not as important. But if you have a use - // case for something faster, feel free to file an issue. - cache.search_start(state.at); - while state.at < input.end() { - sid = dfa - .next_state(cache, sid, input.haystack()[state.at]) - .map_err(|_| gave_up(state.at))?; - if sid.is_tagged() { - state.id = Some(sid); - if sid.is_start() { - if let Some(ref pre) = pre { - let span = Span::from(state.at..input.end()); - match pre.find(input.haystack(), span) { - None => return Ok(()), - Some(ref span) => { - if span.start > state.at { - state.at = span.start; - if !universal_start { - sid = prefilter_restart( - dfa, cache, &input, state.at, - )?; - } - continue; - } - } - } - } - } else if sid.is_match() { - state.next_match_index = Some(1); - let pattern = dfa.match_pattern(cache, sid, 0); - state.mat = Some(HalfMatch::new(pattern, state.at)); - cache.search_finish(state.at); - return Ok(()); - } else if sid.is_dead() { - cache.search_finish(state.at); - return Ok(()); - } else if sid.is_quit() { - cache.search_finish(state.at); - return Err(MatchError::quit( - input.haystack()[state.at], - state.at, - )); - } else { - debug_assert!(sid.is_unknown()); - unreachable!("sid being unknown is a bug"); - } - } - state.at += 1; - cache.search_update(state.at); - } - - let result = eoi_fwd(dfa, cache, input, &mut sid, &mut state.mat); - state.id = Some(sid); - if state.mat.is_some() { - // '1' is always correct here since if we get to this point, this - // always corresponds to the first (index '0') match discovered at - // this position. So the next match to report at this position (if - // it exists) is at index '1'. - state.next_match_index = Some(1); - } - cache.search_finish(input.end()); - result -} - -#[inline(never)] -pub(crate) fn find_overlapping_rev( - dfa: &DFA, - cache: &mut Cache, - input: &Input<'_>, - state: &mut OverlappingState, -) -> Result<(), MatchError> { - state.mat = None; - if input.is_done() { - return Ok(()); - } - let mut sid = match state.id { - None => { - let sid = init_rev(dfa, cache, input)?; - state.id = Some(sid); - if input.start() == input.end() { - state.rev_eoi = true; - } else { - state.at = input.end() - 1; - } - sid - } - Some(sid) => { - if let Some(match_index) = state.next_match_index { - let match_len = dfa.match_len(cache, sid); - if match_index < match_len { - state.next_match_index = Some(match_index + 1); - let pattern = dfa.match_pattern(cache, sid, match_index); - state.mat = Some(HalfMatch::new(pattern, state.at)); - return Ok(()); - } - } - // Once we've reported all matches at a given position, we need - // to advance the search to the next position. However, if we've - // already followed the EOI transition, then we know we're done - // with the search and there cannot be any more matches to report. - if state.rev_eoi { - return Ok(()); - } else if state.at == input.start() { - // At this point, we should follow the EOI transition. This - // will cause us the skip the main loop below and fall through - // to the final 'eoi_rev' transition. - state.rev_eoi = true; - } else { - // We haven't hit the end of the search yet, so move on. - state.at -= 1; - } - sid - } - }; - cache.search_start(state.at); - while !state.rev_eoi { - sid = dfa - .next_state(cache, sid, input.haystack()[state.at]) - .map_err(|_| gave_up(state.at))?; - if sid.is_tagged() { - state.id = Some(sid); - if sid.is_start() { - // do nothing - } else if sid.is_match() { - state.next_match_index = Some(1); - let pattern = dfa.match_pattern(cache, sid, 0); - state.mat = Some(HalfMatch::new(pattern, state.at + 1)); - cache.search_finish(state.at); - return Ok(()); - } else if sid.is_dead() { - cache.search_finish(state.at); - return Ok(()); - } else if sid.is_quit() { - cache.search_finish(state.at); - return Err(MatchError::quit( - input.haystack()[state.at], - state.at, - )); - } else { - debug_assert!(sid.is_unknown()); - unreachable!("sid being unknown is a bug"); - } - } - if state.at == input.start() { - break; - } - state.at -= 1; - cache.search_update(state.at); - } - - let result = eoi_rev(dfa, cache, input, &mut sid, &mut state.mat); - state.rev_eoi = true; - state.id = Some(sid); - if state.mat.is_some() { - // '1' is always correct here since if we get to this point, this - // always corresponds to the first (index '0') match discovered at - // this position. So the next match to report at this position (if - // it exists) is at index '1'. - state.next_match_index = Some(1); - } - cache.search_finish(input.start()); - result -} - -#[cfg_attr(feature = "perf-inline", inline(always))] -fn init_fwd( - dfa: &DFA, - cache: &mut Cache, - input: &Input<'_>, -) -> Result { - let sid = dfa.start_state_forward(cache, input)?; - // Start states can never be match states, since all matches are delayed - // by 1 byte. - debug_assert!(!sid.is_match()); - Ok(sid) -} - -#[cfg_attr(feature = "perf-inline", inline(always))] -fn init_rev( - dfa: &DFA, - cache: &mut Cache, - input: &Input<'_>, -) -> Result { - let sid = dfa.start_state_reverse(cache, input)?; - // Start states can never be match states, since all matches are delayed - // by 1 byte. - debug_assert!(!sid.is_match()); - Ok(sid) -} - -#[cfg_attr(feature = "perf-inline", inline(always))] -fn eoi_fwd( - dfa: &DFA, - cache: &mut Cache, - input: &Input<'_>, - sid: &mut LazyStateID, - mat: &mut Option, -) -> Result<(), MatchError> { - let sp = input.get_span(); - match input.haystack().get(sp.end) { - Some(&b) => { - *sid = - dfa.next_state(cache, *sid, b).map_err(|_| gave_up(sp.end))?; - if sid.is_match() { - let pattern = dfa.match_pattern(cache, *sid, 0); - *mat = Some(HalfMatch::new(pattern, sp.end)); - } else if sid.is_quit() { - return Err(MatchError::quit(b, sp.end)); - } - } - None => { - *sid = dfa - .next_eoi_state(cache, *sid) - .map_err(|_| gave_up(input.haystack().len()))?; - if sid.is_match() { - let pattern = dfa.match_pattern(cache, *sid, 0); - *mat = Some(HalfMatch::new(pattern, input.haystack().len())); - } - // N.B. We don't have to check 'is_quit' here because the EOI - // transition can never lead to a quit state. - debug_assert!(!sid.is_quit()); - } - } - Ok(()) -} - -#[cfg_attr(feature = "perf-inline", inline(always))] -fn eoi_rev( - dfa: &DFA, - cache: &mut Cache, - input: &Input<'_>, - sid: &mut LazyStateID, - mat: &mut Option, -) -> Result<(), MatchError> { - let sp = input.get_span(); - if sp.start > 0 { - let byte = input.haystack()[sp.start - 1]; - *sid = dfa - .next_state(cache, *sid, byte) - .map_err(|_| gave_up(sp.start))?; - if sid.is_match() { - let pattern = dfa.match_pattern(cache, *sid, 0); - *mat = Some(HalfMatch::new(pattern, sp.start)); - } else if sid.is_quit() { - return Err(MatchError::quit(byte, sp.start - 1)); - } - } else { - *sid = - dfa.next_eoi_state(cache, *sid).map_err(|_| gave_up(sp.start))?; - if sid.is_match() { - let pattern = dfa.match_pattern(cache, *sid, 0); - *mat = Some(HalfMatch::new(pattern, 0)); - } - // N.B. We don't have to check 'is_quit' here because the EOI - // transition can never lead to a quit state. - debug_assert!(!sid.is_quit()); - } - Ok(()) -} - -/// Re-compute the starting state that a DFA should be in after finding a -/// prefilter candidate match at the position `at`. -/// -/// It is always correct to call this, but not always necessary. Namely, -/// whenever the DFA has a universal start state, the DFA can remain in the -/// start state that it was in when it ran the prefilter. Why? Because in that -/// case, there is only one start state. -/// -/// When does a DFA have a universal start state? In precisely cases where -/// it has no look-around assertions in its prefix. So for example, `\bfoo` -/// does not have a universal start state because the start state depends on -/// whether the byte immediately before the start position is a word byte or -/// not. However, `foo\b` does have a universal start state because the word -/// boundary does not appear in the pattern's prefix. -/// -/// So... most cases don't need this, but when a pattern doesn't have a -/// universal start state, then after a prefilter candidate has been found, the -/// current state *must* be re-litigated as if computing the start state at the -/// beginning of the search because it might change. That is, not all start -/// states are created equal. -/// -/// Why avoid it? Because while it's not super expensive, it isn't a trivial -/// operation to compute the start state. It is much better to avoid it and -/// just state in the current state if you know it to be correct. -#[cfg_attr(feature = "perf-inline", inline(always))] -fn prefilter_restart( - dfa: &DFA, - cache: &mut Cache, - input: &Input<'_>, - at: usize, -) -> Result { - let mut input = input.clone(); - input.set_start(at); - init_fwd(dfa, cache, &input) -} - -/// A convenience routine for constructing a "gave up" match error. -#[cfg_attr(feature = "perf-inline", inline(always))] -fn gave_up(offset: usize) -> MatchError { - MatchError::gave_up(offset) -} diff --git a/vendor/regex-automata/src/lib.rs b/vendor/regex-automata/src/lib.rs deleted file mode 100644 index b29f618a8b21cb..00000000000000 --- a/vendor/regex-automata/src/lib.rs +++ /dev/null @@ -1,651 +0,0 @@ -/*! -This crate exposes a variety of regex engines used by the `regex` crate. -It provides a vast, sprawling and "expert" level API to each regex engine. -The regex engines provided by this crate focus heavily on finite automata -implementations and specifically guarantee worst case `O(m * n)` time -complexity for all searches. (Where `m ~ len(regex)` and `n ~ len(haystack)`.) - -The primary goal of this crate is to serve as an implementation detail for the -`regex` crate. A secondary goal is to make its internals available for use by -others. - -# Table of contents - -* [Should I be using this crate?](#should-i-be-using-this-crate) gives some -reasons for and against using this crate. -* [Examples](#examples) provides a small selection of things you can do with -this crate. -* [Available regex engines](#available-regex-engines) provides a hyperlinked -list of all regex engines in this crate. -* [API themes](#api-themes) discusses common elements used throughout this -crate. -* [Crate features](#crate-features) documents the extensive list of Cargo -features available. - -# Should I be using this crate? - -If you find yourself here because you just want to use regexes, then you should -first check out whether the [`regex` crate](https://docs.rs/regex) meets -your needs. It provides a streamlined and difficult-to-misuse API for regex -searching. - -If you're here because there is something specific you want to do that can't -be easily done with `regex` crate, then you are perhaps in the right place. -It's most likely that the first stop you'll want to make is to explore the -[`meta` regex APIs](meta). Namely, the `regex` crate is just a light wrapper -over a [`meta::Regex`], so its API will probably be the easiest to transition -to. In contrast to the `regex` crate, the `meta::Regex` API supports more -search parameters and does multi-pattern searches. However, it isn't quite as -ergonomic. - -Otherwise, the following is an inexhaustive list of reasons to use this crate: - -* You want to analyze or use a [Thompson `NFA`](nfa::thompson::NFA) directly. -* You want more powerful multi-pattern search than what is provided by -`RegexSet` in the `regex` crate. All regex engines in this crate support -multi-pattern searches. -* You want to use one of the `regex` crate's internal engines directly because -of some interesting configuration that isn't possible via the `regex` crate. -For example, a [lazy DFA's configuration](hybrid::dfa::Config) exposes a -dizzying number of options for controlling its execution. -* You want to use the lower level search APIs. For example, both the [lazy -DFA](hybrid::dfa) and [fully compiled DFAs](dfa) support searching by exploring -the automaton one state at a time. This might be useful, for example, for -stream searches or searches of strings stored in non-contiguous in memory. -* You want to build a fully compiled DFA and then [use zero-copy -deserialization](dfa::dense::DFA::from_bytes) to load it into memory and use -it for searching. This use case is supported in core-only no-std/no-alloc -environments. -* You want to run [anchored searches](Input::anchored) without using the `^` -anchor in your regex pattern. -* You need to work-around contention issues with -sharing a regex across multiple threads. The -[`meta::Regex::search_with`](meta::Regex::search_with) API permits bypassing -any kind of synchronization at all by requiring the caller to provide the -mutable scratch spaced needed during a search. -* You want to build your own regex engine on top of the `regex` crate's -infrastructure. - -# Examples - -This section tries to identify a few interesting things you can do with this -crate and demonstrates them. - -### Multi-pattern searches with capture groups - -One of the more frustrating limitations of `RegexSet` in the `regex` crate -(at the time of writing) is that it doesn't report match positions. With this -crate, multi-pattern support was intentionally designed in from the beginning, -which means it works in all regex engines and even for capture groups as well. - -This example shows how to search for matches of multiple regexes, where each -regex uses the same capture group names to parse different key-value formats. - -``` -use regex_automata::{meta::Regex, PatternID}; - -let re = Regex::new_many(&[ - r#"(?m)^(?[[:word:]]+)=(?[[:word:]]+)$"#, - r#"(?m)^(?[[:word:]]+)="(?[^"]+)"$"#, - r#"(?m)^(?[[:word:]]+)='(?[^']+)'$"#, - r#"(?m)^(?[[:word:]]+):\s*(?[[:word:]]+)$"#, -])?; -let hay = r#" -best_album="Blow Your Face Out" -best_quote='"then as it was, then again it will be"' -best_year=1973 -best_simpsons_episode: HOMR -"#; -let mut kvs = vec![]; -for caps in re.captures_iter(hay) { - // N.B. One could use capture indices '1' and '2' here - // as well. Capture indices are local to each pattern. - // (Just like names are.) - let key = &hay[caps.get_group_by_name("key").unwrap()]; - let val = &hay[caps.get_group_by_name("val").unwrap()]; - kvs.push((key, val)); -} -assert_eq!(kvs, vec![ - ("best_album", "Blow Your Face Out"), - ("best_quote", "\"then as it was, then again it will be\""), - ("best_year", "1973"), - ("best_simpsons_episode", "HOMR"), -]); - -# Ok::<(), Box>(()) -``` - -### Build a full DFA and walk it manually - -One of the regex engines in this crate is a fully compiled DFA. It takes worst -case exponential time to build, but once built, it can be easily explored and -used for searches. Here's a simple example that uses its lower level APIs to -implement a simple anchored search by hand. - -``` -use regex_automata::{dfa::{Automaton, dense}, Input}; - -let dfa = dense::DFA::new(r"(?-u)\b[A-Z]\w+z\b")?; -let haystack = "Quartz"; - -// The start state is determined by inspecting the position and the -// initial bytes of the haystack. -let mut state = dfa.start_state_forward(&Input::new(haystack))?; -// Walk all the bytes in the haystack. -for &b in haystack.as_bytes().iter() { - state = dfa.next_state(state, b); -} -// DFAs in this crate require an explicit -// end-of-input transition if a search reaches -// the end of a haystack. -state = dfa.next_eoi_state(state); -assert!(dfa.is_match_state(state)); - -# Ok::<(), Box>(()) -``` - -Or do the same with a lazy DFA that avoids exponential worst case compile time, -but requires mutable scratch space to lazily build the DFA during the search. - -``` -use regex_automata::{hybrid::dfa::DFA, Input}; - -let dfa = DFA::new(r"(?-u)\b[A-Z]\w+z\b")?; -let mut cache = dfa.create_cache(); -let hay = "Quartz"; - -// The start state is determined by inspecting the position and the -// initial bytes of the haystack. -let mut state = dfa.start_state_forward(&mut cache, &Input::new(hay))?; -// Walk all the bytes in the haystack. -for &b in hay.as_bytes().iter() { - state = dfa.next_state(&mut cache, state, b)?; -} -// DFAs in this crate require an explicit -// end-of-input transition if a search reaches -// the end of a haystack. -state = dfa.next_eoi_state(&mut cache, state)?; -assert!(state.is_match()); - -# Ok::<(), Box>(()) -``` - -### Find all overlapping matches - -This example shows how to build a DFA and use it to find all possible matches, -including overlapping matches. A similar example will work with a lazy DFA as -well. This also works with multiple patterns and will report all matches at the -same position where multiple patterns match. - -``` -use regex_automata::{ - dfa::{dense, Automaton, OverlappingState}, - Input, MatchKind, -}; - -let dfa = dense::DFA::builder() - .configure(dense::DFA::config().match_kind(MatchKind::All)) - .build(r"(?-u)\w{3,}")?; -let input = Input::new("homer marge bart lisa maggie"); -let mut state = OverlappingState::start(); - -let mut matches = vec![]; -while let Some(hm) = { - dfa.try_search_overlapping_fwd(&input, &mut state)?; - state.get_match() -} { - matches.push(hm.offset()); -} -assert_eq!(matches, vec![ - 3, 4, 5, // hom, home, homer - 9, 10, 11, // mar, marg, marge - 15, 16, // bar, bart - 20, 21, // lis, lisa - 25, 26, 27, 28, // mag, magg, maggi, maggie -]); - -# Ok::<(), Box>(()) -``` - -# Available regex engines - -The following is a complete list of all regex engines provided by this crate, -along with a very brief description of it and why you might want to use it. - -* [`dfa::regex::Regex`] is a regex engine that works on top of either -[dense](dfa::dense) or [sparse](dfa::sparse) fully compiled DFAs. You might -use a DFA if you need the fastest possible regex engine in this crate and can -afford the exorbitant memory usage usually required by DFAs. Low level APIs on -fully compiled DFAs are provided by the [`Automaton` trait](dfa::Automaton). -Fully compiled dense DFAs can handle all regexes except for searching a regex -with a Unicode word boundary on non-ASCII haystacks. A fully compiled DFA based -regex can only report the start and end of each match. -* [`hybrid::regex::Regex`] is a regex engine that works on top of a lazily -built DFA. Its performance profile is very similar to that of fully compiled -DFAs, but can be slower in some pathological cases. Fully compiled DFAs are -also amenable to more optimizations, such as state acceleration, that aren't -available in a lazy DFA. You might use this lazy DFA if you can't abide the -worst case exponential compile time of a full DFA, but still want the DFA -search performance in the vast majority of cases. A lazy DFA based regex can -only report the start and end of each match. -* [`dfa::onepass::DFA`] is a regex engine that is implemented as a DFA, but -can report the matches of each capture group in addition to the start and end -of each match. The catch is that it only works on a somewhat small subset of -regexes known as "one-pass." You'll want to use this for cases when you need -capture group matches and the regex is one-pass since it is likely to be faster -than any alternative. A one-pass DFA can handle all types of regexes, but does -have some reasonable limits on the number of capture groups it can handle. -* [`nfa::thompson::backtrack::BoundedBacktracker`] is a regex engine that uses -backtracking, but keeps track of the work it has done to avoid catastrophic -backtracking. Like the one-pass DFA, it provides the matches of each capture -group. It retains the `O(m * n)` worst case time bound. This tends to be slower -than the one-pass DFA regex engine, but faster than the PikeVM. It can handle -all types of regexes, but usually only works well with small haystacks and -small regexes due to the memory required to avoid redoing work. -* [`nfa::thompson::pikevm::PikeVM`] is a regex engine that can handle all -regexes, of all sizes and provides capture group matches. It tends to be a tool -of last resort because it is also usually the slowest regex engine. -* [`meta::Regex`] is the meta regex engine that combines *all* of the above -engines into one. The reason for this is that each of the engines above have -their own caveats such as, "only handles a subset of regexes" or "is generally -slow." The meta regex engine accounts for all of these caveats and composes -the engines in a way that attempts to mitigate each engine's weaknesses while -emphasizing its strengths. For example, it will attempt to run a lazy DFA even -if it might fail. In which case, it will restart the search with a likely -slower but more capable regex engine. The meta regex engine is what you should -default to. Use one of the above engines directly only if you have a specific -reason to. - -# API themes - -While each regex engine has its own APIs and configuration options, there are -some general themes followed by all of them. - -### The `Input` abstraction - -Most search routines in this crate accept anything that implements -`Into`. Both `&str` and `&[u8]` haystacks satisfy this constraint, which -means that things like `engine.search("foo")` will work as you would expect. - -By virtue of accepting an `Into` though, callers can provide more than -just a haystack. Indeed, the [`Input`] type has more details, but briefly, -callers can use it to configure various aspects of the search: - -* The span of the haystack to search via [`Input::span`] or [`Input::range`], -which might be a substring of the haystack. -* Whether to run an anchored search or not via [`Input::anchored`]. This -permits one to require matches to start at the same offset that the search -started. -* Whether to ask the regex engine to stop as soon as a match is seen via -[`Input::earliest`]. This can be used to find the offset of a match as soon -as it is known without waiting for the full leftmost-first match to be found. -This can also be used to avoid the worst case `O(m * n^2)` time complexity -of iteration. - -Some lower level search routines accept an `&Input` for performance reasons. -In which case, `&Input::new("haystack")` can be used for a simple search. - -### Error reporting - -Most, but not all, regex engines in this crate can fail to execute a search. -When a search fails, callers cannot determine whether or not a match exists. -That is, the result is indeterminate. - -Search failure, in all cases in this crate, is represented by a [`MatchError`]. -Routines that can fail start with the `try_` prefix in their name. For example, -[`hybrid::regex::Regex::try_search`] can fail for a number of reasons. -Conversely, routines that either can't fail or can panic on failure lack the -`try_` prefix. For example, [`hybrid::regex::Regex::find`] will panic in -cases where [`hybrid::regex::Regex::try_search`] would return an error, and -[`meta::Regex::find`] will never panic. Therefore, callers need to pay close -attention to the panicking conditions in the documentation. - -In most cases, the reasons that a search fails are either predictable or -configurable, albeit at some additional cost. - -An example of predictable failure is -[`BoundedBacktracker::try_search`](nfa::thompson::backtrack::BoundedBacktracker::try_search). -Namely, it fails whenever the multiplication of the haystack, the regex and some -constant exceeds the -[configured visited capacity](nfa::thompson::backtrack::Config::visited_capacity). -Callers can predict the failure in terms of haystack length via the -[`BoundedBacktracker::max_haystack_len`](nfa::thompson::backtrack::BoundedBacktracker::max_haystack_len) -method. While this form of failure is technically avoidable by increasing the -visited capacity, it isn't practical to do so for all inputs because the -memory usage required for larger haystacks becomes impractically large. So in -practice, if one is using the bounded backtracker, you really do have to deal -with the failure. - -An example of configurable failure happens when one enables heuristic support -for Unicode word boundaries in a DFA. Namely, since the DFAs in this crate -(except for the one-pass DFA) do not support Unicode word boundaries on -non-ASCII haystacks, building a DFA from an NFA that contains a Unicode word -boundary will itself fail. However, one can configure DFAs to still be built in -this case by -[configuring heuristic support for Unicode word boundaries](hybrid::dfa::Config::unicode_word_boundary). -If the NFA the DFA is built from contains a Unicode word boundary, then the -DFA will still be built, but special transitions will be added to every state -that cause the DFA to fail if any non-ASCII byte is seen. This failure happens -at search time and it requires the caller to opt into this. - -There are other ways for regex engines to fail in this crate, but the above -two should represent the general theme of failures one can find. Dealing -with these failures is, in part, one the responsibilities of the [meta regex -engine](meta). Notice, for example, that the meta regex engine exposes an API -that never returns an error nor panics. It carefully manages all of the ways -in which the regex engines can fail and either avoids the predictable ones -entirely (e.g., the bounded backtracker) or reacts to configured failures by -falling back to a different engine (e.g., the lazy DFA quitting because it saw -a non-ASCII byte). - -### Configuration and Builders - -Most of the regex engines in this crate come with two types to facilitate -building the regex engine: a `Config` and a `Builder`. A `Config` is usually -specific to that particular regex engine, but other objects such as parsing and -NFA compilation have `Config` types too. A `Builder` is the thing responsible -for taking inputs (either pattern strings or already-parsed patterns or even -NFAs directly) and turning them into an actual regex engine that can be used -for searching. - -The main reason why building a regex engine is a bit complicated is because -of the desire to permit composition with de-coupled components. For example, -you might want to [manually construct a Thompson NFA](nfa::thompson::Builder) -and then build a regex engine from it without ever using a regex parser -at all. On the other hand, you might also want to build a regex engine directly -from the concrete syntax. This demonstrates why regex engine construction is -so flexible: it needs to support not just convenient construction, but also -construction from parts built elsewhere. - -This is also in turn why there are many different `Config` structs in this -crate. Let's look more closely at an example: [`hybrid::regex::Builder`]. It -accepts three different `Config` types for configuring construction of a lazy -DFA regex: - -* [`hybrid::regex::Builder::syntax`] accepts a -[`util::syntax::Config`] for configuring the options found in the -[`regex-syntax`](regex_syntax) crate. For example, whether to match -case insensitively. -* [`hybrid::regex::Builder::thompson`] accepts a [`nfa::thompson::Config`] for -configuring construction of a [Thompson NFA](nfa::thompson::NFA). For example, -whether to build an NFA that matches the reverse language described by the -regex. -* [`hybrid::regex::Builder::dfa`] accept a [`hybrid::dfa::Config`] for -configuring construction of the pair of underlying lazy DFAs that make up the -lazy DFA regex engine. For example, changing the capacity of the cache used to -store the transition table. - -The lazy DFA regex engine uses all three of those configuration objects for -methods like [`hybrid::regex::Builder::build`], which accepts a pattern -string containing the concrete syntax of your regex. It uses the syntax -configuration to parse it into an AST and translate it into an HIR. Then the -NFA configuration when compiling the HIR into an NFA. And then finally the DFA -configuration when lazily determinizing the NFA into a DFA. - -Notice though that the builder also has a -[`hybrid::regex::Builder::build_from_dfas`] constructor. This permits callers -to build the underlying pair of lazy DFAs themselves (one for the forward -searching to find the end of a match and one for the reverse searching to find -the start of a match), and then build the regex engine from them. The lazy -DFAs, in turn, have their own builder that permits [construction directly from -a Thompson NFA](hybrid::dfa::Builder::build_from_nfa). Continuing down the -rabbit hole, a Thompson NFA has its own compiler that permits [construction -directly from an HIR](nfa::thompson::Compiler::build_from_hir). The lazy DFA -regex engine builder lets you follow this rabbit hole all the way down, but -also provides convenience routines that do it for you when you don't need -precise control over every component. - -The [meta regex engine](meta) is a good example of something that utilizes the -full flexibility of these builders. It often needs not only precise control -over each component, but also shares them across multiple regex engines. -(Most sharing is done by internal reference accounting. For example, an -[`NFA`](nfa::thompson::NFA) is reference counted internally which makes cloning -cheap.) - -### Size limits - -Unlike the `regex` crate, the `regex-automata` crate specifically does not -enable any size limits by default. That means users of this crate need to -be quite careful when using untrusted patterns. Namely, because bounded -repetitions can grow exponentially by stacking them, it is possible to build a -very large internal regex object from just a small pattern string. For example, -the NFA built from the pattern `a{10}{10}{10}{10}{10}{10}{10}` is over 240MB. - -There are multiple size limit options in this crate. If one or more size limits -are relevant for the object you're building, they will be configurable via -methods on a corresponding `Config` type. - -# Crate features - -This crate has a dizzying number of features. The main idea is to be able to -control how much stuff you pull in for your specific use case, since the full -crate is quite large and can dramatically increase compile times and binary -size. - -The most barebones but useful configuration is to disable all default features -and enable only `dfa-search`. This will bring in just the DFA deserialization -and search routines without any dependency on `std` or `alloc`. This does -require generating and serializing a DFA, and then storing it somewhere, but -it permits regex searches in freestanding or embedded environments. - -Because there are so many features, they are split into a few groups. - -The default set of features is: `std`, `syntax`, `perf`, `unicode`, `meta`, -`nfa`, `dfa` and `hybrid`. Basically, the default is to enable everything -except for development related features like `logging`. - -### Ecosystem features - -* **std** - Enables use of the standard library. In terms of APIs, this usually -just means that error types implement the `std::error::Error` trait. Otherwise, -`std` sometimes enables the code to be faster, for example, using a `HashMap` -instead of a `BTreeMap`. (The `std` feature matters more for dependencies like -`aho-corasick` and `memchr`, where `std` is required to enable certain classes -of SIMD optimizations.) Enabling `std` automatically enables `alloc`. -* **alloc** - Enables use of the `alloc` library. This is required for most -APIs in this crate. The main exception is deserializing and searching with -fully compiled DFAs. -* **logging** - Adds a dependency on the `log` crate and makes this crate emit -log messages of varying degrees of utility. The log messages are especially -useful in trying to understand what the meta regex engine is doing. - -### Performance features - -**Note**: - To get performance benefits offered by the SIMD, `std` must be enabled. - None of the `perf-*` features will enable `std` implicitly. - -* **perf** - Enables all of the below features. -* **perf-inline** - When enabled, `inline(always)` is used in (many) strategic -locations to help performance at the expense of longer compile times and -increased binary size. -* **perf-literal** - Enables all literal related optimizations. - * **perf-literal-substring** - Enables all single substring literal - optimizations. This includes adding a dependency on the `memchr` crate. - * **perf-literal-multisubstring** - Enables all multiple substring literal - optimizations. This includes adding a dependency on the `aho-corasick` - crate. - -### Unicode features - -* **unicode** - - Enables all Unicode features. This feature is enabled by default, and will - always cover all Unicode features, even if more are added in the future. -* **unicode-age** - - Provide the data for the - [Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age). - This makes it possible to use classes like `\p{Age:6.0}` to refer to all - codepoints first introduced in Unicode 6.0 -* **unicode-bool** - - Provide the data for numerous Unicode boolean properties. The full list - is not included here, but contains properties like `Alphabetic`, `Emoji`, - `Lowercase`, `Math`, `Uppercase` and `White_Space`. -* **unicode-case** - - Provide the data for case insensitive matching using - [Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches). -* **unicode-gencat** - - Provide the data for - [Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values). - This includes, but is not limited to, `Decimal_Number`, `Letter`, - `Math_Symbol`, `Number` and `Punctuation`. -* **unicode-perl** - - Provide the data for supporting the Unicode-aware Perl character classes, - corresponding to `\w`, `\s` and `\d`. This is also necessary for using - Unicode-aware word boundary assertions. Note that if this feature is - disabled, the `\s` and `\d` character classes are still available if the - `unicode-bool` and `unicode-gencat` features are enabled, respectively. -* **unicode-script** - - Provide the data for - [Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/). - This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`, - `Latin` and `Thai`. -* **unicode-segment** - - Provide the data necessary to provide the properties used to implement the - [Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/). - This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and - `\p{sb=ATerm}`. -* **unicode-word-boundary** - - Enables support for Unicode word boundaries, i.e., `\b`, in regexes. When - this and `unicode-perl` are enabled, then data tables from `regex-syntax` are - used to implement Unicode word boundaries. However, if `regex-syntax` isn't - enabled as a dependency then one can still enable this feature. It will - cause `regex-automata` to bundle its own data table that would otherwise be - redundant with `regex-syntax`'s table. - -### Regex engine features - -* **syntax** - Enables a dependency on `regex-syntax`. This makes APIs -for building regex engines from pattern strings available. Without the -`regex-syntax` dependency, the only way to build a regex engine is generally -to deserialize a previously built DFA or to hand assemble an NFA using its -[builder API](nfa::thompson::Builder). Once you have an NFA, you can build any -of the regex engines in this crate. The `syntax` feature also enables `alloc`. -* **meta** - Enables the meta regex engine. This also enables the `syntax` and -`nfa-pikevm` features, as both are the minimal requirements needed. The meta -regex engine benefits from enabling any of the other regex engines and will -use them automatically when appropriate. -* **nfa** - Enables all NFA related features below. - * **nfa-thompson** - Enables the Thompson NFA APIs. This enables `alloc`. - * **nfa-pikevm** - Enables the PikeVM regex engine. This enables - `nfa-thompson`. - * **nfa-backtrack** - Enables the bounded backtracker regex engine. This - enables `nfa-thompson`. -* **dfa** - Enables all DFA related features below. - * **dfa-build** - Enables APIs for determinizing DFAs from NFAs. This - enables `nfa-thompson` and `dfa-search`. - * **dfa-search** - Enables APIs for searching with DFAs. - * **dfa-onepass** - Enables the one-pass DFA API. This enables - `nfa-thompson`. -* **hybrid** - Enables the hybrid NFA/DFA or "lazy DFA" regex engine. This -enables `alloc` and `nfa-thompson`. - -*/ - -// We are no_std. -#![no_std] -// All APIs need docs! -#![deny(missing_docs)] -// Some intra-doc links are broken when certain features are disabled, so we -// only bleat about it when most (all?) features are enabled. But when we do, -// we block the build. Links need to work. -#![cfg_attr( - all( - feature = "std", - feature = "nfa", - feature = "dfa", - feature = "hybrid" - ), - deny(rustdoc::broken_intra_doc_links) -)] -// Broken rustdoc links are very easy to come by when you start disabling -// features. Namely, features tend to change imports, and imports change what's -// available to link to. -// -// Basically, we just don't support rustdoc for anything other than the maximal -// feature configuration. Other configurations will work, they just won't be -// perfect. -// -// So here, we specifically allow them so we don't even get warned about them. -#![cfg_attr( - not(all( - feature = "std", - feature = "nfa", - feature = "dfa", - feature = "hybrid" - )), - allow(rustdoc::broken_intra_doc_links) -)] -// Kinda similar, but eliminating all of the dead code and unused import -// warnings for every feature combo is a fool's errand. Instead, we just -// suppress those, but still let them through in a common configuration when we -// build most of everything. -// -// This does actually suggest that when features are disabled, we are actually -// compiling more code than we need to be. And this is perhaps not so great -// because disabling features is usually done in order to reduce compile times -// by reducing the amount of code one compiles... However, usually, most of the -// time this dead code is a relatively small amount from the 'util' module. -// But... I confess... There isn't a ton of visibility on this. -// -// I'm happy to try to address this in a different way, but "let's annotate -// every function in 'util' with some non-local combination of features" just -// cannot be the way forward. -#![cfg_attr( - not(all( - feature = "std", - feature = "nfa", - feature = "dfa", - feature = "hybrid", - feature = "perf-literal-substring", - feature = "perf-literal-multisubstring", - )), - allow(dead_code, unused_imports, unused_variables) -)] -// We generally want all types to impl Debug. -#![warn(missing_debug_implementations)] -// This adds Cargo feature annotations to items in the rustdoc output. Which is -// sadly hugely beneficial for this crate due to the number of features. -#![cfg_attr(docsrs_regex, feature(doc_cfg))] - -// I have literally never tested this crate on 16-bit, so it is quite -// suspicious to advertise support for it. But... the regex crate, at time -// of writing, at least claims to support it by not doing any conditional -// compilation based on the target pointer width. So I guess I remain -// consistent with that here. -// -// If you are here because you're on a 16-bit system and you were somehow using -// the regex crate previously, please file an issue. Please be prepared to -// provide some kind of reproduction or carve out some path to getting 16-bit -// working in CI. (Via qemu?) -#[cfg(not(any( - target_pointer_width = "16", - target_pointer_width = "32", - target_pointer_width = "64" -)))] -compile_error!("not supported on non-{16,32,64}, please file an issue"); - -#[cfg(any(test, feature = "std"))] -extern crate std; - -#[cfg(feature = "alloc")] -extern crate alloc; - -#[cfg(doctest)] -doc_comment::doctest!("../README.md"); - -#[doc(inline)] -pub use crate::util::primitives::PatternID; -pub use crate::util::search::*; - -#[macro_use] -mod macros; - -#[cfg(any(feature = "dfa-search", feature = "dfa-onepass"))] -pub mod dfa; -#[cfg(feature = "hybrid")] -pub mod hybrid; -#[cfg(feature = "meta")] -pub mod meta; -#[cfg(feature = "nfa-thompson")] -pub mod nfa; -pub mod util; diff --git a/vendor/regex-automata/src/macros.rs b/vendor/regex-automata/src/macros.rs deleted file mode 100644 index 31b4ca3816ace2..00000000000000 --- a/vendor/regex-automata/src/macros.rs +++ /dev/null @@ -1,20 +0,0 @@ -// Some feature combinations result in some of these macros never being used. -// Which is fine. Just squash the warnings. -#![allow(unused_macros)] - -macro_rules! log { - ($($tt:tt)*) => { - #[cfg(feature = "logging")] - { - $($tt)* - } - } -} - -macro_rules! debug { - ($($tt:tt)*) => { log!(log::debug!($($tt)*)) } -} - -macro_rules! trace { - ($($tt:tt)*) => { log!(log::trace!($($tt)*)) } -} diff --git a/vendor/regex-automata/src/meta/error.rs b/vendor/regex-automata/src/meta/error.rs deleted file mode 100644 index 9ead729bbdf986..00000000000000 --- a/vendor/regex-automata/src/meta/error.rs +++ /dev/null @@ -1,241 +0,0 @@ -use regex_syntax::{ast, hir}; - -use crate::{nfa, util::search::MatchError, PatternID}; - -/// An error that occurs when construction of a `Regex` fails. -/// -/// A build error is generally a result of one of two possible failure -/// modes. First is a parse or syntax error in the concrete syntax of a -/// pattern. Second is that the construction of the underlying regex matcher -/// fails, usually because it gets too big with respect to limits like -/// [`Config::nfa_size_limit`](crate::meta::Config::nfa_size_limit). -/// -/// This error provides very little introspection capabilities. You can: -/// -/// * Ask for the [`PatternID`] of the pattern that caused an error, if one -/// is available. This is available for things like syntax errors, but not for -/// cases where build limits are exceeded. -/// * Ask for the underlying syntax error, but only if the error is a syntax -/// error. -/// * Ask for a human readable message corresponding to the underlying error. -/// * The `BuildError::source` method (from the `std::error::Error` -/// trait implementation) may be used to query for an underlying error if one -/// exists. There are no API guarantees about which error is returned. -/// -/// When the `std` feature is enabled, this implements `std::error::Error`. -#[derive(Clone, Debug)] -pub struct BuildError { - kind: BuildErrorKind, -} - -#[derive(Clone, Debug)] -enum BuildErrorKind { - Syntax { pid: PatternID, err: regex_syntax::Error }, - NFA(nfa::thompson::BuildError), -} - -impl BuildError { - /// If it is known which pattern ID caused this build error to occur, then - /// this method returns it. - /// - /// Some errors are not associated with a particular pattern. However, any - /// errors that occur as part of parsing a pattern are guaranteed to be - /// associated with a pattern ID. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{meta::Regex, PatternID}; - /// - /// let err = Regex::new_many(&["a", "b", r"\p{Foo}", "c"]).unwrap_err(); - /// assert_eq!(Some(PatternID::must(2)), err.pattern()); - /// ``` - pub fn pattern(&self) -> Option { - match self.kind { - BuildErrorKind::Syntax { pid, .. } => Some(pid), - _ => None, - } - } - - /// If this error occurred because the regex exceeded the configured size - /// limit before being built, then this returns the configured size limit. - /// - /// The limit returned is what was configured, and corresponds to the - /// maximum amount of heap usage in bytes. - pub fn size_limit(&self) -> Option { - match self.kind { - BuildErrorKind::NFA(ref err) => err.size_limit(), - _ => None, - } - } - - /// If this error corresponds to a syntax error, then a reference to it is - /// returned by this method. - pub fn syntax_error(&self) -> Option<®ex_syntax::Error> { - match self.kind { - BuildErrorKind::Syntax { ref err, .. } => Some(err), - _ => None, - } - } - - pub(crate) fn ast(pid: PatternID, err: ast::Error) -> BuildError { - let err = regex_syntax::Error::from(err); - BuildError { kind: BuildErrorKind::Syntax { pid, err } } - } - - pub(crate) fn hir(pid: PatternID, err: hir::Error) -> BuildError { - let err = regex_syntax::Error::from(err); - BuildError { kind: BuildErrorKind::Syntax { pid, err } } - } - - pub(crate) fn nfa(err: nfa::thompson::BuildError) -> BuildError { - BuildError { kind: BuildErrorKind::NFA(err) } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for BuildError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self.kind { - BuildErrorKind::Syntax { ref err, .. } => Some(err), - BuildErrorKind::NFA(ref err) => Some(err), - } - } -} - -impl core::fmt::Display for BuildError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self.kind { - BuildErrorKind::Syntax { pid, .. } => { - write!(f, "error parsing pattern {}", pid.as_usize()) - } - BuildErrorKind::NFA(_) => write!(f, "error building NFA"), - } - } -} - -/// An error that occurs when a search should be retried. -/// -/// This retry error distinguishes between two different failure modes. -/// -/// The first is one where potential quadratic behavior has been detected. -/// In this case, whatever optimization that led to this behavior should be -/// stopped, and the next best strategy should be used. -/// -/// The second indicates that the underlying regex engine has failed for some -/// reason. This usually occurs because either a lazy DFA's cache has become -/// ineffective or because a non-ASCII byte has been seen *and* a Unicode word -/// boundary was used in one of the patterns. In this failure case, a different -/// regex engine that won't fail in these ways (PikeVM, backtracker or the -/// one-pass DFA) should be used. -/// -/// This is an internal error only and should never bleed into the public -/// API. -#[derive(Debug)] -pub(crate) enum RetryError { - Quadratic(RetryQuadraticError), - Fail(RetryFailError), -} - -#[cfg(feature = "std")] -impl std::error::Error for RetryError {} - -impl core::fmt::Display for RetryError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match *self { - RetryError::Quadratic(ref err) => err.fmt(f), - RetryError::Fail(ref err) => err.fmt(f), - } - } -} - -impl From for RetryError { - fn from(merr: MatchError) -> RetryError { - RetryError::Fail(RetryFailError::from(merr)) - } -} - -/// An error that occurs when potential quadratic behavior has been detected -/// when applying either the "reverse suffix" or "reverse inner" optimizations. -/// -/// When this error occurs, callers should abandon the "reverse" optimization -/// and use a normal forward search. -#[derive(Debug)] -pub(crate) struct RetryQuadraticError(()); - -impl RetryQuadraticError { - pub(crate) fn new() -> RetryQuadraticError { - RetryQuadraticError(()) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for RetryQuadraticError {} - -impl core::fmt::Display for RetryQuadraticError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "regex engine gave up to avoid quadratic behavior") - } -} - -impl From for RetryError { - fn from(err: RetryQuadraticError) -> RetryError { - RetryError::Quadratic(err) - } -} - -/// An error that occurs when a regex engine "gives up" for some reason before -/// finishing a search. Usually this occurs because of heuristic Unicode word -/// boundary support or because of ineffective cache usage in the lazy DFA. -/// -/// When this error occurs, callers should retry the regex search with a -/// different regex engine. -/// -/// Note that this has convenient `From` impls that will automatically -/// convert a `MatchError` into this error. This works because the meta -/// regex engine internals guarantee that errors like `HaystackTooLong` and -/// `UnsupportedAnchored` will never occur. The only errors left are `Quit` and -/// `GaveUp`, which both correspond to this "failure" error. -#[derive(Debug)] -pub(crate) struct RetryFailError { - offset: usize, -} - -impl RetryFailError { - pub(crate) fn from_offset(offset: usize) -> RetryFailError { - RetryFailError { offset } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for RetryFailError {} - -impl core::fmt::Display for RetryFailError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "regex engine failed at offset {:?}", self.offset) - } -} - -impl From for RetryError { - fn from(err: RetryFailError) -> RetryError { - RetryError::Fail(err) - } -} - -impl From for RetryFailError { - fn from(merr: MatchError) -> RetryFailError { - use crate::util::search::MatchErrorKind::*; - - match *merr.kind() { - Quit { offset, .. } => RetryFailError::from_offset(offset), - GaveUp { offset } => RetryFailError::from_offset(offset), - // These can never occur because we avoid them by construction - // or with higher level control flow logic. For example, the - // backtracker's wrapper will never hand out a backtracker engine - // when the haystack would be too long. - HaystackTooLong { .. } | UnsupportedAnchored { .. } => { - unreachable!("found impossible error in meta engine: {merr}") - } - } - } -} diff --git a/vendor/regex-automata/src/meta/limited.rs b/vendor/regex-automata/src/meta/limited.rs deleted file mode 100644 index ce6708c701574b..00000000000000 --- a/vendor/regex-automata/src/meta/limited.rs +++ /dev/null @@ -1,251 +0,0 @@ -/*! -This module defines two bespoke reverse DFA searching routines. (One for the -lazy DFA and one for the fully compiled DFA.) These routines differ from the -usual ones by permitting the caller to specify a minimum starting position. -That is, the search will begin at `input.end()` and will usually stop at -`input.start()`, unless `min_start > input.start()`, in which case, the search -will stop at `min_start`. - -In other words, this lets you say, "no, the search must not extend past this -point, even if it's within the bounds of the given `Input`." And if the search -*does* want to go past that point, it stops and returns a "may be quadratic" -error, which indicates that the caller should retry using some other technique. - -These routines specifically exist to protect against quadratic behavior when -employing the "reverse suffix" and "reverse inner" optimizations. Without the -backstop these routines provide, it is possible for parts of the haystack to -get re-scanned over and over again. The backstop not only prevents this, but -*tells you when it is happening* so that you can change the strategy. - -Why can't we just use the normal search routines? We could use the normal -search routines and just set the start bound on the provided `Input` to our -`min_start` position. The problem here is that it's impossible to distinguish -between "no match because we reached the end of input" and "determined there -was no match well before the end of input." The former case is what we care -about with respect to quadratic behavior. The latter case is totally fine. - -Why don't we modify the normal search routines to report the position at which -the search stops? I considered this, and I still wonder if it is indeed the -right thing to do. However, I think the straight-forward thing to do there -would be to complicate the return type signature of almost every search routine -in this crate, which I really do not want to do. It therefore might make more -sense to provide a richer way for search routines to report meta data, but that -was beyond my bandwidth to work on at the time of writing. - -See the 'opt/reverse-inner' and 'opt/reverse-suffix' benchmarks in rebar for a -real demonstration of how quadratic behavior is mitigated. -*/ - -use crate::{ - meta::error::{RetryError, RetryQuadraticError}, - HalfMatch, Input, MatchError, -}; - -#[cfg(feature = "dfa-build")] -pub(crate) fn dfa_try_search_half_rev( - dfa: &crate::dfa::dense::DFA>, - input: &Input<'_>, - min_start: usize, -) -> Result, RetryError> { - use crate::dfa::Automaton; - - let mut mat = None; - let mut sid = dfa.start_state_reverse(input)?; - if input.start() == input.end() { - dfa_eoi_rev(dfa, input, &mut sid, &mut mat)?; - return Ok(mat); - } - let mut at = input.end() - 1; - loop { - sid = dfa.next_state(sid, input.haystack()[at]); - if dfa.is_special_state(sid) { - if dfa.is_match_state(sid) { - let pattern = dfa.match_pattern(sid, 0); - // Since reverse searches report the beginning of a - // match and the beginning is inclusive (not exclusive - // like the end of a match), we add 1 to make it - // inclusive. - mat = Some(HalfMatch::new(pattern, at + 1)); - } else if dfa.is_dead_state(sid) { - return Ok(mat); - } else if dfa.is_quit_state(sid) { - return Err(MatchError::quit(input.haystack()[at], at).into()); - } - } - if at == input.start() { - break; - } - at -= 1; - if at < min_start { - trace!( - "reached position {at} which is before the previous literal \ - match, quitting to avoid quadratic behavior", - ); - return Err(RetryError::Quadratic(RetryQuadraticError::new())); - } - } - let was_dead = dfa.is_dead_state(sid); - dfa_eoi_rev(dfa, input, &mut sid, &mut mat)?; - // If we reach the beginning of the search and we could otherwise still - // potentially keep matching if there was more to match, then we actually - // return an error to indicate giving up on this optimization. Why? Because - // we can't prove that the real match begins at where we would report it. - // - // This only happens when all of the following are true: - // - // 1) We reach the starting point of our search span. - // 2) The match we found is before the starting point. - // 3) The FSM reports we could possibly find a longer match. - // - // We need (1) because otherwise the search stopped before the starting - // point and there is no possible way to find a more leftmost position. - // - // We need (2) because if the match found has an offset equal to the minimum - // possible offset, then there is no possible more leftmost match. - // - // We need (3) because if the FSM couldn't continue anyway (i.e., it's in - // a dead state), then we know we couldn't find anything more leftmost - // than what we have. (We have to check the state we were in prior to the - // EOI transition since the EOI transition will usually bring us to a dead - // state by virtue of it represents the end-of-input.) - if at == input.start() - && mat.map_or(false, |m| m.offset() > input.start()) - && !was_dead - { - trace!( - "reached beginning of search at offset {at} without hitting \ - a dead state, quitting to avoid potential false positive match", - ); - return Err(RetryError::Quadratic(RetryQuadraticError::new())); - } - Ok(mat) -} - -#[cfg(feature = "hybrid")] -pub(crate) fn hybrid_try_search_half_rev( - dfa: &crate::hybrid::dfa::DFA, - cache: &mut crate::hybrid::dfa::Cache, - input: &Input<'_>, - min_start: usize, -) -> Result, RetryError> { - let mut mat = None; - let mut sid = dfa.start_state_reverse(cache, input)?; - if input.start() == input.end() { - hybrid_eoi_rev(dfa, cache, input, &mut sid, &mut mat)?; - return Ok(mat); - } - let mut at = input.end() - 1; - loop { - sid = dfa - .next_state(cache, sid, input.haystack()[at]) - .map_err(|_| MatchError::gave_up(at))?; - if sid.is_tagged() { - if sid.is_match() { - let pattern = dfa.match_pattern(cache, sid, 0); - // Since reverse searches report the beginning of a - // match and the beginning is inclusive (not exclusive - // like the end of a match), we add 1 to make it - // inclusive. - mat = Some(HalfMatch::new(pattern, at + 1)); - } else if sid.is_dead() { - return Ok(mat); - } else if sid.is_quit() { - return Err(MatchError::quit(input.haystack()[at], at).into()); - } - } - if at == input.start() { - break; - } - at -= 1; - if at < min_start { - trace!( - "reached position {at} which is before the previous literal \ - match, quitting to avoid quadratic behavior", - ); - return Err(RetryError::Quadratic(RetryQuadraticError::new())); - } - } - let was_dead = sid.is_dead(); - hybrid_eoi_rev(dfa, cache, input, &mut sid, &mut mat)?; - // See the comments in the full DFA routine above for why we need this. - if at == input.start() - && mat.map_or(false, |m| m.offset() > input.start()) - && !was_dead - { - trace!( - "reached beginning of search at offset {at} without hitting \ - a dead state, quitting to avoid potential false positive match", - ); - return Err(RetryError::Quadratic(RetryQuadraticError::new())); - } - Ok(mat) -} - -#[cfg(feature = "dfa-build")] -#[cfg_attr(feature = "perf-inline", inline(always))] -fn dfa_eoi_rev( - dfa: &crate::dfa::dense::DFA>, - input: &Input<'_>, - sid: &mut crate::util::primitives::StateID, - mat: &mut Option, -) -> Result<(), MatchError> { - use crate::dfa::Automaton; - - let sp = input.get_span(); - if sp.start > 0 { - let byte = input.haystack()[sp.start - 1]; - *sid = dfa.next_state(*sid, byte); - if dfa.is_match_state(*sid) { - let pattern = dfa.match_pattern(*sid, 0); - *mat = Some(HalfMatch::new(pattern, sp.start)); - } else if dfa.is_quit_state(*sid) { - return Err(MatchError::quit(byte, sp.start - 1)); - } - } else { - *sid = dfa.next_eoi_state(*sid); - if dfa.is_match_state(*sid) { - let pattern = dfa.match_pattern(*sid, 0); - *mat = Some(HalfMatch::new(pattern, 0)); - } - // N.B. We don't have to check 'is_quit' here because the EOI - // transition can never lead to a quit state. - debug_assert!(!dfa.is_quit_state(*sid)); - } - Ok(()) -} - -#[cfg(feature = "hybrid")] -#[cfg_attr(feature = "perf-inline", inline(always))] -fn hybrid_eoi_rev( - dfa: &crate::hybrid::dfa::DFA, - cache: &mut crate::hybrid::dfa::Cache, - input: &Input<'_>, - sid: &mut crate::hybrid::LazyStateID, - mat: &mut Option, -) -> Result<(), MatchError> { - let sp = input.get_span(); - if sp.start > 0 { - let byte = input.haystack()[sp.start - 1]; - *sid = dfa - .next_state(cache, *sid, byte) - .map_err(|_| MatchError::gave_up(sp.start))?; - if sid.is_match() { - let pattern = dfa.match_pattern(cache, *sid, 0); - *mat = Some(HalfMatch::new(pattern, sp.start)); - } else if sid.is_quit() { - return Err(MatchError::quit(byte, sp.start - 1)); - } - } else { - *sid = dfa - .next_eoi_state(cache, *sid) - .map_err(|_| MatchError::gave_up(sp.start))?; - if sid.is_match() { - let pattern = dfa.match_pattern(cache, *sid, 0); - *mat = Some(HalfMatch::new(pattern, 0)); - } - // N.B. We don't have to check 'is_quit' here because the EOI - // transition can never lead to a quit state. - debug_assert!(!sid.is_quit()); - } - Ok(()) -} diff --git a/vendor/regex-automata/src/meta/literal.rs b/vendor/regex-automata/src/meta/literal.rs deleted file mode 100644 index fac68d00539b8f..00000000000000 --- a/vendor/regex-automata/src/meta/literal.rs +++ /dev/null @@ -1,81 +0,0 @@ -use alloc::{vec, vec::Vec}; - -use regex_syntax::hir::Hir; - -use crate::{meta::regex::RegexInfo, util::search::MatchKind}; - -/// Pull out an alternation of literals from the given sequence of HIR -/// expressions. -/// -/// There are numerous ways for this to fail. Generally, this only applies -/// to regexes of the form 'foo|bar|baz|...|quux'. It can also fail if there -/// are "too few" alternates, in which case, the regex engine is likely faster. -/// -/// And currently, this only returns something when 'hirs.len() == 1'. -pub(crate) fn alternation_literals( - info: &RegexInfo, - hirs: &[&Hir], -) -> Option>> { - use regex_syntax::hir::{HirKind, Literal}; - - // Might as well skip the work below if we know we can't build an - // Aho-Corasick searcher. - if !cfg!(feature = "perf-literal-multisubstring") { - return None; - } - // This is pretty hacky, but basically, if `is_alternation_literal` is - // true, then we can make several assumptions about the structure of our - // HIR. This is what justifies the `unreachable!` statements below. - if hirs.len() != 1 - || !info.props()[0].look_set().is_empty() - || info.props()[0].explicit_captures_len() > 0 - || !info.props()[0].is_alternation_literal() - || info.config().get_match_kind() != MatchKind::LeftmostFirst - { - return None; - } - let hir = &hirs[0]; - let alts = match *hir.kind() { - HirKind::Alternation(ref alts) => alts, - _ => return None, // one literal isn't worth it - }; - - let mut lits = vec![]; - for alt in alts { - let mut lit = vec![]; - match *alt.kind() { - HirKind::Literal(Literal(ref bytes)) => { - lit.extend_from_slice(bytes) - } - HirKind::Concat(ref exprs) => { - for e in exprs { - match *e.kind() { - HirKind::Literal(Literal(ref bytes)) => { - lit.extend_from_slice(bytes); - } - _ => unreachable!("expected literal, got {e:?}"), - } - } - } - _ => unreachable!("expected literal or concat, got {alt:?}"), - } - lits.push(lit); - } - // Why do this? Well, when the number of literals is small, it's likely - // that we'll use the lazy DFA which is in turn likely to be faster than - // Aho-Corasick in such cases. Primarily because Aho-Corasick doesn't have - // a "lazy DFA" but either a contiguous NFA or a full DFA. We rarely use - // the latter because it is so hungry (in time and space), and the former - // is decently fast, but not as fast as a well oiled lazy DFA. - // - // However, once the number starts getting large, the lazy DFA is likely - // to start thrashing because of the modest default cache size. When - // exactly does this happen? Dunno. But at whatever point that is (we make - // a guess below based on ad hoc benchmarking), we'll want to cut over to - // Aho-Corasick, where even the contiguous NFA is likely to do much better. - if lits.len() < 3000 { - debug!("skipping Aho-Corasick because there are too few literals"); - return None; - } - Some(lits) -} diff --git a/vendor/regex-automata/src/meta/mod.rs b/vendor/regex-automata/src/meta/mod.rs deleted file mode 100644 index 01f430fcb79949..00000000000000 --- a/vendor/regex-automata/src/meta/mod.rs +++ /dev/null @@ -1,62 +0,0 @@ -/*! -Provides a regex matcher that composes several other regex matchers -automatically. - -This module is home to a meta [`Regex`], which provides a convenient high -level API for executing regular expressions in linear time. - -# Comparison with the `regex` crate - -A meta `Regex` is the implementation used directly by the `regex` crate. -Indeed, the `regex` crate API is essentially just a light wrapper over a meta -`Regex`. This means that if you need the full flexibility offered by this -API, then you should be able to switch to using this API directly without -any changes in match semantics or syntax. However, there are some API level -differences: - -* The `regex` crate API returns match objects that include references to the -haystack itself, which in turn makes it easy to access the matching strings -without having to slice the haystack yourself. In contrast, a meta `Regex` -returns match objects that only have offsets in them. -* At time of writing, a meta `Regex` doesn't have some of the convenience -routines that the `regex` crate has, such as replacements. Note though that -[`Captures::interpolate_string`](crate::util::captures::Captures::interpolate_string) -will handle the replacement string interpolation for you. -* A meta `Regex` supports the [`Input`](crate::Input) abstraction, which -provides a way to configure a search in more ways than is supported by the -`regex` crate. For example, [`Input::anchored`](crate::Input::anchored) can -be used to run an anchored search, regardless of whether the pattern is itself -anchored with a `^`. -* A meta `Regex` supports multi-pattern searching everywhere. -Indeed, every [`Match`](crate::Match) returned by the search APIs -include a [`PatternID`](crate::PatternID) indicating which pattern -matched. In the single pattern case, all matches correspond to -[`PatternID::ZERO`](crate::PatternID::ZERO). In contrast, the `regex` crate -has distinct `Regex` and a `RegexSet` APIs. The former only supports a single -pattern, while the latter supports multiple patterns but cannot report the -offsets of a match. -* A meta `Regex` provides the explicit capability of bypassing its internal -memory pool for automatically acquiring mutable scratch space required by its -internal regex engines. Namely, a [`Cache`] can be explicitly provided to lower -level routines such as [`Regex::search_with`]. - -*/ - -pub use self::{ - error::BuildError, - regex::{ - Builder, Cache, CapturesMatches, Config, FindMatches, Regex, Split, - SplitN, - }, -}; - -mod error; -#[cfg(any(feature = "dfa-build", feature = "hybrid"))] -mod limited; -mod literal; -mod regex; -mod reverse_inner; -#[cfg(any(feature = "dfa-build", feature = "hybrid"))] -mod stopat; -mod strategy; -mod wrappers; diff --git a/vendor/regex-automata/src/meta/regex.rs b/vendor/regex-automata/src/meta/regex.rs deleted file mode 100644 index 21c1a3a31253c9..00000000000000 --- a/vendor/regex-automata/src/meta/regex.rs +++ /dev/null @@ -1,3706 +0,0 @@ -use core::{ - borrow::Borrow, - panic::{RefUnwindSafe, UnwindSafe}, -}; - -use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; - -use regex_syntax::{ - ast, - hir::{self, Hir}, -}; - -use crate::{ - meta::{ - error::BuildError, - strategy::{self, Strategy}, - wrappers, - }, - nfa::thompson::WhichCaptures, - util::{ - captures::{Captures, GroupInfo}, - iter, - pool::{Pool, PoolGuard}, - prefilter::Prefilter, - primitives::{NonMaxUsize, PatternID}, - search::{HalfMatch, Input, Match, MatchKind, PatternSet, Span}, - }, -}; - -/// A type alias for our pool of meta::Cache that fixes the type parameters to -/// what we use for the meta regex below. -type CachePool = Pool; - -/// Same as above, but for the guard returned by a pool. -type CachePoolGuard<'a> = PoolGuard<'a, Cache, CachePoolFn>; - -/// The type of the closure we use to create new caches. We need to spell out -/// all of the marker traits or else we risk leaking !MARKER impls. -type CachePoolFn = - Box Cache + Send + Sync + UnwindSafe + RefUnwindSafe>; - -/// A regex matcher that works by composing several other regex matchers -/// automatically. -/// -/// In effect, a meta regex papers over a lot of the quirks or performance -/// problems in each of the regex engines in this crate. Its goal is to provide -/// an infallible and simple API that "just does the right thing" in the common -/// case. -/// -/// A meta regex is the implementation of a `Regex` in the `regex` crate. -/// Indeed, the `regex` crate API is essentially just a light wrapper over -/// this type. This includes the `regex` crate's `RegexSet` API! -/// -/// # Composition -/// -/// This is called a "meta" matcher precisely because it uses other regex -/// matchers to provide a convenient high level regex API. Here are some -/// examples of how other regex matchers are composed: -/// -/// * When calling [`Regex::captures`], instead of immediately -/// running a slower but more capable regex engine like the -/// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM), the meta regex engine -/// will usually first look for the bounds of a match with a higher throughput -/// regex engine like a [lazy DFA](crate::hybrid). Only when a match is found -/// is a slower engine like `PikeVM` used to find the matching span for each -/// capture group. -/// * While higher throughout engines like the lazy DFA cannot handle -/// Unicode word boundaries in general, they can still be used on pure ASCII -/// haystacks by pretending that Unicode word boundaries are just plain ASCII -/// word boundaries. However, if a haystack is not ASCII, the meta regex engine -/// will automatically switch to a (possibly slower) regex engine that supports -/// Unicode word boundaries in general. -/// * In some cases where a regex pattern is just a simple literal or a small -/// set of literals, an actual regex engine won't be used at all. Instead, -/// substring or multi-substring search algorithms will be employed. -/// -/// There are many other forms of composition happening too, but the above -/// should give a general idea. In particular, it may perhaps be surprising -/// that *multiple* regex engines might get executed for a single search. That -/// is, the decision of what regex engine to use is not _just_ based on the -/// pattern, but also based on the dynamic execution of the search itself. -/// -/// The primary reason for this composition is performance. The fundamental -/// tension is that the faster engines tend to be less capable, and the more -/// capable engines tend to be slower. -/// -/// Note that the forms of composition that are allowed are determined by -/// compile time crate features and configuration. For example, if the `hybrid` -/// feature isn't enabled, or if [`Config::hybrid`] has been disabled, then the -/// meta regex engine will never use a lazy DFA. -/// -/// # Synchronization and cloning -/// -/// Most of the regex engines in this crate require some kind of mutable -/// "scratch" space to read and write from while performing a search. Since -/// a meta regex composes these regex engines, a meta regex also requires -/// mutable scratch space. This scratch space is called a [`Cache`]. -/// -/// Most regex engines _also_ usually have a read-only component, typically -/// a [Thompson `NFA`](crate::nfa::thompson::NFA). -/// -/// In order to make the `Regex` API convenient, most of the routines hide -/// the fact that a `Cache` is needed at all. To achieve this, a [memory -/// pool](crate::util::pool::Pool) is used internally to retrieve `Cache` -/// values in a thread safe way that also permits reuse. This in turn implies -/// that every such search call requires some form of synchronization. Usually -/// this synchronization is fast enough to not notice, but in some cases, it -/// can be a bottleneck. This typically occurs when all of the following are -/// true: -/// -/// * The same `Regex` is shared across multiple threads simultaneously, -/// usually via a [`util::lazy::Lazy`](crate::util::lazy::Lazy) or something -/// similar from the `once_cell` or `lazy_static` crates. -/// * The primary unit of work in each thread is a regex search. -/// * Searches are run on very short haystacks. -/// -/// This particular case can lead to high contention on the pool used by a -/// `Regex` internally, which can in turn increase latency to a noticeable -/// effect. This cost can be mitigated in one of the following ways: -/// -/// * Use a distinct copy of a `Regex` in each thread, usually by cloning it. -/// Cloning a `Regex` _does not_ do a deep copy of its read-only component. -/// But it does lead to each `Regex` having its own memory pool, which in -/// turn eliminates the problem of contention. In general, this technique should -/// not result in any additional memory usage when compared to sharing the same -/// `Regex` across multiple threads simultaneously. -/// * Use lower level APIs, like [`Regex::search_with`], which permit passing -/// a `Cache` explicitly. In this case, it is up to you to determine how best -/// to provide a `Cache`. For example, you might put a `Cache` in thread-local -/// storage if your use case allows for it. -/// -/// Overall, this is an issue that happens rarely in practice, but it can -/// happen. -/// -/// # Warning: spin-locks may be used in alloc-only mode -/// -/// When this crate is built without the `std` feature and the high level APIs -/// on a `Regex` are used, then a spin-lock will be used to synchronize access -/// to an internal pool of `Cache` values. This may be undesirable because -/// a spin-lock is [effectively impossible to implement correctly in user -/// space][spinlocks-are-bad]. That is, more concretely, the spin-lock could -/// result in a deadlock. -/// -/// [spinlocks-are-bad]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html -/// -/// If one wants to avoid the use of spin-locks when the `std` feature is -/// disabled, then you must use APIs that accept a `Cache` value explicitly. -/// For example, [`Regex::search_with`]. -/// -/// # Example -/// -/// ``` -/// use regex_automata::meta::Regex; -/// -/// let re = Regex::new(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}$")?; -/// assert!(re.is_match("2010-03-14")); -/// -/// # Ok::<(), Box>(()) -/// ``` -/// -/// # Example: anchored search -/// -/// This example shows how to use [`Input::anchored`] to run an anchored -/// search, even when the regex pattern itself isn't anchored. An anchored -/// search guarantees that if a match is found, then the start offset of the -/// match corresponds to the offset at which the search was started. -/// -/// ``` -/// use regex_automata::{meta::Regex, Anchored, Input, Match}; -/// -/// let re = Regex::new(r"\bfoo\b")?; -/// let input = Input::new("xx foo xx").range(3..).anchored(Anchored::Yes); -/// // The offsets are in terms of the original haystack. -/// assert_eq!(Some(Match::must(0, 3..6)), re.find(input)); -/// -/// // Notice that no match occurs here, because \b still takes the -/// // surrounding context into account, even if it means looking back -/// // before the start of your search. -/// let hay = "xxfoo xx"; -/// let input = Input::new(hay).range(2..).anchored(Anchored::Yes); -/// assert_eq!(None, re.find(input)); -/// // Indeed, you cannot achieve the above by simply slicing the -/// // haystack itself, since the regex engine can't see the -/// // surrounding context. This is why 'Input' permits setting -/// // the bounds of a search! -/// let input = Input::new(&hay[2..]).anchored(Anchored::Yes); -/// // WRONG! -/// assert_eq!(Some(Match::must(0, 0..3)), re.find(input)); -/// -/// # Ok::<(), Box>(()) -/// ``` -/// -/// # Example: earliest search -/// -/// This example shows how to use [`Input::earliest`] to run a search that -/// might stop before finding the typical leftmost match. -/// -/// ``` -/// use regex_automata::{meta::Regex, Anchored, Input, Match}; -/// -/// let re = Regex::new(r"[a-z]{3}|b")?; -/// let input = Input::new("abc").earliest(true); -/// assert_eq!(Some(Match::must(0, 1..2)), re.find(input)); -/// -/// // Note that "earliest" isn't really a match semantic unto itself. -/// // Instead, it is merely an instruction to whatever regex engine -/// // gets used internally to quit as soon as it can. For example, -/// // this regex uses a different search technique, and winds up -/// // producing a different (but valid) match! -/// let re = Regex::new(r"abc|b")?; -/// let input = Input::new("abc").earliest(true); -/// assert_eq!(Some(Match::must(0, 0..3)), re.find(input)); -/// -/// # Ok::<(), Box>(()) -/// ``` -/// -/// # Example: change the line terminator -/// -/// This example shows how to enable multi-line mode by default and change -/// the line terminator to the NUL byte: -/// -/// ``` -/// use regex_automata::{meta::Regex, util::syntax, Match}; -/// -/// let re = Regex::builder() -/// .syntax(syntax::Config::new().multi_line(true)) -/// .configure(Regex::config().line_terminator(b'\x00')) -/// .build(r"^foo$")?; -/// let hay = "\x00foo\x00"; -/// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay)); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Debug)] -pub struct Regex { - /// The actual regex implementation. - imp: Arc, - /// A thread safe pool of caches. - /// - /// For the higher level search APIs, a `Cache` is automatically plucked - /// from this pool before running a search. The lower level `with` methods - /// permit the caller to provide their own cache, thereby bypassing - /// accesses to this pool. - /// - /// Note that we put this outside the `Arc` so that cloning a `Regex` - /// results in creating a fresh `CachePool`. This in turn permits callers - /// to clone regexes into separate threads where each such regex gets - /// the pool's "thread owner" optimization. Otherwise, if one shares the - /// `Regex` directly, then the pool will go through a slower mutex path for - /// all threads except for the "owner." - pool: CachePool, -} - -/// The internal implementation of `Regex`, split out so that it can be wrapped -/// in an `Arc`. -#[derive(Debug)] -struct RegexI { - /// The core matching engine. - /// - /// Why is this reference counted when RegexI is already wrapped in an Arc? - /// Well, we need to capture this in a closure to our `Pool` below in order - /// to create new `Cache` values when needed. So since it needs to be in - /// two places, we make it reference counted. - /// - /// We make `RegexI` itself reference counted too so that `Regex` itself - /// stays extremely small and very cheap to clone. - strat: Arc, - /// Metadata about the regexes driving the strategy. The metadata is also - /// usually stored inside the strategy too, but we put it here as well - /// so that we can get quick access to it (without virtual calls) before - /// executing the regex engine. For example, we use this metadata to - /// detect a subset of cases where we know a match is impossible, and can - /// thus avoid calling into the strategy at all. - /// - /// Since `RegexInfo` is stored in multiple places, it is also reference - /// counted. - info: RegexInfo, -} - -/// Convenience constructors for a `Regex` using the default configuration. -impl Regex { - /// Builds a `Regex` from a single pattern string using the default - /// configuration. - /// - /// If there was a problem parsing the pattern or a problem turning it into - /// a regex matcher, then an error is returned. - /// - /// If you want to change the configuration of a `Regex`, use a [`Builder`] - /// with a [`Config`]. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{meta::Regex, Match}; - /// - /// let re = Regex::new(r"(?Rm)^foo$")?; - /// let hay = "\r\nfoo\r\n"; - /// assert_eq!(Some(Match::must(0, 2..5)), re.find(hay)); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn new(pattern: &str) -> Result { - Regex::builder().build(pattern) - } - - /// Builds a `Regex` from many pattern strings using the default - /// configuration. - /// - /// If there was a problem parsing any of the patterns or a problem turning - /// them into a regex matcher, then an error is returned. - /// - /// If you want to change the configuration of a `Regex`, use a [`Builder`] - /// with a [`Config`]. - /// - /// # Example: simple lexer - /// - /// This simplistic example leverages the multi-pattern support to build a - /// simple little lexer. The pattern ID in the match tells you which regex - /// matched, which in turn might be used to map back to the "type" of the - /// token returned by the lexer. - /// - /// ``` - /// use regex_automata::{meta::Regex, Match}; - /// - /// let re = Regex::new_many(&[ - /// r"[[:space:]]", - /// r"[A-Za-z0-9][A-Za-z0-9_]+", - /// r"->", - /// r".", - /// ])?; - /// let haystack = "fn is_boss(bruce: i32, springsteen: String) -> bool;"; - /// let matches: Vec = re.find_iter(haystack).collect(); - /// assert_eq!(matches, vec![ - /// Match::must(1, 0..2), // 'fn' - /// Match::must(0, 2..3), // ' ' - /// Match::must(1, 3..10), // 'is_boss' - /// Match::must(3, 10..11), // '(' - /// Match::must(1, 11..16), // 'bruce' - /// Match::must(3, 16..17), // ':' - /// Match::must(0, 17..18), // ' ' - /// Match::must(1, 18..21), // 'i32' - /// Match::must(3, 21..22), // ',' - /// Match::must(0, 22..23), // ' ' - /// Match::must(1, 23..34), // 'springsteen' - /// Match::must(3, 34..35), // ':' - /// Match::must(0, 35..36), // ' ' - /// Match::must(1, 36..42), // 'String' - /// Match::must(3, 42..43), // ')' - /// Match::must(0, 43..44), // ' ' - /// Match::must(2, 44..46), // '->' - /// Match::must(0, 46..47), // ' ' - /// Match::must(1, 47..51), // 'bool' - /// Match::must(3, 51..52), // ';' - /// ]); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// One can write a lexer like the above using a regex like - /// `(?P[[:space:]])|(?P[A-Za-z0-9][A-Za-z0-9_]+)|...`, - /// but then you need to ask whether capture group matched to determine - /// which branch in the regex matched, and thus, which token the match - /// corresponds to. In contrast, the above example includes the pattern ID - /// in the match. There's no need to use capture groups at all. - /// - /// # Example: finding the pattern that caused an error - /// - /// When a syntax error occurs, it is possible to ask which pattern - /// caused the syntax error. - /// - /// ``` - /// use regex_automata::{meta::Regex, PatternID}; - /// - /// let err = Regex::new_many(&["a", "b", r"\p{Foo}", "c"]).unwrap_err(); - /// assert_eq!(Some(PatternID::must(2)), err.pattern()); - /// ``` - /// - /// # Example: zero patterns is valid - /// - /// Building a regex with zero patterns results in a regex that never - /// matches anything. Because this routine is generic, passing an empty - /// slice usually requires a turbo-fish (or something else to help type - /// inference). - /// - /// ``` - /// use regex_automata::{meta::Regex, util::syntax, Match}; - /// - /// let re = Regex::new_many::<&str>(&[])?; - /// assert_eq!(None, re.find("")); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn new_many>( - patterns: &[P], - ) -> Result { - Regex::builder().build_many(patterns) - } - - /// Return a default configuration for a `Regex`. - /// - /// This is a convenience routine to avoid needing to import the [`Config`] - /// type when customizing the construction of a `Regex`. - /// - /// # Example: lower the NFA size limit - /// - /// In some cases, the default size limit might be too big. The size limit - /// can be lowered, which will prevent large regex patterns from compiling. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::meta::Regex; - /// - /// let result = Regex::builder() - /// .configure(Regex::config().nfa_size_limit(Some(20 * (1<<10)))) - /// // Not even 20KB is enough to build a single large Unicode class! - /// .build(r"\pL"); - /// assert!(result.is_err()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn config() -> Config { - Config::new() - } - - /// Return a builder for configuring the construction of a `Regex`. - /// - /// This is a convenience routine to avoid needing to import the - /// [`Builder`] type in common cases. - /// - /// # Example: change the line terminator - /// - /// This example shows how to enable multi-line mode by default and change - /// the line terminator to the NUL byte: - /// - /// ``` - /// use regex_automata::{meta::Regex, util::syntax, Match}; - /// - /// let re = Regex::builder() - /// .syntax(syntax::Config::new().multi_line(true)) - /// .configure(Regex::config().line_terminator(b'\x00')) - /// .build(r"^foo$")?; - /// let hay = "\x00foo\x00"; - /// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay)); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn builder() -> Builder { - Builder::new() - } -} - -/// High level convenience routines for using a regex to search a haystack. -impl Regex { - /// Returns true if and only if this regex matches the given haystack. - /// - /// This routine may short circuit if it knows that scanning future input - /// will never lead to a different result. (Consider how this might make - /// a difference given the regex `a+` on the haystack `aaaaaaaaaaaaaaa`. - /// This routine _may_ stop after it sees the first `a`, but routines like - /// `find` need to continue searching because `+` is greedy by default.) - /// - /// # Example - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// let re = Regex::new("foo[0-9]+bar")?; - /// - /// assert!(re.is_match("foo12345bar")); - /// assert!(!re.is_match("foobar")); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: consistency with search APIs - /// - /// `is_match` is guaranteed to return `true` whenever `find` returns a - /// match. This includes searches that are executed entirely within a - /// codepoint: - /// - /// ``` - /// use regex_automata::{meta::Regex, Input}; - /// - /// let re = Regex::new("a*")?; - /// - /// // This doesn't match because the default configuration bans empty - /// // matches from splitting a codepoint. - /// assert!(!re.is_match(Input::new("☃").span(1..2))); - /// assert_eq!(None, re.find(Input::new("☃").span(1..2))); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Notice that when UTF-8 mode is disabled, then the above reports a - /// match because the restriction against zero-width matches that split a - /// codepoint has been lifted: - /// - /// ``` - /// use regex_automata::{meta::Regex, Input, Match}; - /// - /// let re = Regex::builder() - /// .configure(Regex::config().utf8_empty(false)) - /// .build("a*")?; - /// - /// assert!(re.is_match(Input::new("☃").span(1..2))); - /// assert_eq!( - /// Some(Match::must(0, 1..1)), - /// re.find(Input::new("☃").span(1..2)), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// A similar idea applies when using line anchors with CRLF mode enabled, - /// which prevents them from matching between a `\r` and a `\n`. - /// - /// ``` - /// use regex_automata::{meta::Regex, Input, Match}; - /// - /// let re = Regex::new(r"(?Rm:$)")?; - /// assert!(!re.is_match(Input::new("\r\n").span(1..1))); - /// // A regular line anchor, which only considers \n as a - /// // line terminator, will match. - /// let re = Regex::new(r"(?m:$)")?; - /// assert!(re.is_match(Input::new("\r\n").span(1..1))); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn is_match<'h, I: Into>>(&self, input: I) -> bool { - let input = input.into().earliest(true); - if self.imp.info.is_impossible(&input) { - return false; - } - let mut guard = self.pool.get(); - let result = self.imp.strat.is_match(&mut guard, &input); - // See 'Regex::search' for why we put the guard back explicitly. - PoolGuard::put(guard); - result - } - - /// Executes a leftmost search and returns the first match that is found, - /// if one exists. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{meta::Regex, Match}; - /// - /// let re = Regex::new("foo[0-9]+")?; - /// assert_eq!(Some(Match::must(0, 0..8)), re.find("foo12345")); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn find<'h, I: Into>>(&self, input: I) -> Option { - self.search(&input.into()) - } - - /// Executes a leftmost forward search and writes the spans of capturing - /// groups that participated in a match into the provided [`Captures`] - /// value. If no match was found, then [`Captures::is_match`] is guaranteed - /// to return `false`. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{meta::Regex, Span}; - /// - /// let re = Regex::new(r"^([0-9]{4})-([0-9]{2})-([0-9]{2})$")?; - /// let mut caps = re.create_captures(); - /// - /// re.captures("2010-03-14", &mut caps); - /// assert!(caps.is_match()); - /// assert_eq!(Some(Span::from(0..4)), caps.get_group(1)); - /// assert_eq!(Some(Span::from(5..7)), caps.get_group(2)); - /// assert_eq!(Some(Span::from(8..10)), caps.get_group(3)); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn captures<'h, I: Into>>( - &self, - input: I, - caps: &mut Captures, - ) { - self.search_captures(&input.into(), caps) - } - - /// Returns an iterator over all non-overlapping leftmost matches in - /// the given haystack. If no match exists, then the iterator yields no - /// elements. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{meta::Regex, Match}; - /// - /// let re = Regex::new("foo[0-9]+")?; - /// let haystack = "foo1 foo12 foo123"; - /// let matches: Vec = re.find_iter(haystack).collect(); - /// assert_eq!(matches, vec![ - /// Match::must(0, 0..4), - /// Match::must(0, 5..10), - /// Match::must(0, 11..17), - /// ]); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn find_iter<'r, 'h, I: Into>>( - &'r self, - input: I, - ) -> FindMatches<'r, 'h> { - let cache = self.pool.get(); - let it = iter::Searcher::new(input.into()); - FindMatches { re: self, cache, it } - } - - /// Returns an iterator over all non-overlapping `Captures` values. If no - /// match exists, then the iterator yields no elements. - /// - /// This yields the same matches as [`Regex::find_iter`], but it includes - /// the spans of all capturing groups that participate in each match. - /// - /// **Tip:** See [`util::iter::Searcher`](crate::util::iter::Searcher) for - /// how to correctly iterate over all matches in a haystack while avoiding - /// the creation of a new `Captures` value for every match. (Which you are - /// forced to do with an `Iterator`.) - /// - /// # Example - /// - /// ``` - /// use regex_automata::{meta::Regex, Span}; - /// - /// let re = Regex::new("foo(?P[0-9]+)")?; - /// - /// let haystack = "foo1 foo12 foo123"; - /// let matches: Vec = re - /// .captures_iter(haystack) - /// // The unwrap is OK since 'numbers' matches if the pattern matches. - /// .map(|caps| caps.get_group_by_name("numbers").unwrap()) - /// .collect(); - /// assert_eq!(matches, vec![ - /// Span::from(3..4), - /// Span::from(8..10), - /// Span::from(14..17), - /// ]); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn captures_iter<'r, 'h, I: Into>>( - &'r self, - input: I, - ) -> CapturesMatches<'r, 'h> { - let cache = self.pool.get(); - let caps = self.create_captures(); - let it = iter::Searcher::new(input.into()); - CapturesMatches { re: self, cache, caps, it } - } - - /// Returns an iterator of spans of the haystack given, delimited by a - /// match of the regex. Namely, each element of the iterator corresponds to - /// a part of the haystack that *isn't* matched by the regular expression. - /// - /// # Example - /// - /// To split a string delimited by arbitrary amounts of spaces or tabs: - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// let re = Regex::new(r"[ \t]+")?; - /// let hay = "a b \t c\td e"; - /// let fields: Vec<&str> = re.split(hay).map(|span| &hay[span]).collect(); - /// assert_eq!(fields, vec!["a", "b", "c", "d", "e"]); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: more cases - /// - /// Basic usage: - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// let re = Regex::new(r" ")?; - /// let hay = "Mary had a little lamb"; - /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec!["Mary", "had", "a", "little", "lamb"]); - /// - /// let re = Regex::new(r"X")?; - /// let hay = ""; - /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec![""]); - /// - /// let re = Regex::new(r"X")?; - /// let hay = "lionXXtigerXleopard"; - /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec!["lion", "", "tiger", "leopard"]); - /// - /// let re = Regex::new(r"::")?; - /// let hay = "lion::tiger::leopard"; - /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec!["lion", "tiger", "leopard"]); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// If a haystack contains multiple contiguous matches, you will end up - /// with empty spans yielded by the iterator: - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// let re = Regex::new(r"X")?; - /// let hay = "XXXXaXXbXc"; - /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]); - /// - /// let re = Regex::new(r"/")?; - /// let hay = "(///)"; - /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec!["(", "", "", ")"]); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Separators at the start or end of a haystack are neighbored by empty - /// spans. - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// let re = Regex::new(r"0")?; - /// let hay = "010"; - /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec!["", "1", ""]); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// When the empty string is used as a regex, it splits at every valid - /// UTF-8 boundary by default (which includes the beginning and end of the - /// haystack): - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// let re = Regex::new(r"")?; - /// let hay = "rust"; - /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec!["", "r", "u", "s", "t", ""]); - /// - /// // Splitting by an empty string is UTF-8 aware by default! - /// let re = Regex::new(r"")?; - /// let hay = "☃"; - /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec!["", "☃", ""]); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// But note that UTF-8 mode for empty strings can be disabled, which will - /// then result in a match at every byte offset in the haystack, - /// including between every UTF-8 code unit. - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// let re = Regex::builder() - /// .configure(Regex::config().utf8_empty(false)) - /// .build(r"")?; - /// let hay = "☃".as_bytes(); - /// let got: Vec<&[u8]> = re.split(hay).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec![ - /// // Writing byte string slices is just brutal. The problem is that - /// // b"foo" has type &[u8; 3] instead of &[u8]. - /// &[][..], &[b'\xE2'][..], &[b'\x98'][..], &[b'\x83'][..], &[][..], - /// ]); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Contiguous separators (commonly shows up with whitespace), can lead to - /// possibly surprising behavior. For example, this code is correct: - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// let re = Regex::new(r" ")?; - /// let hay = " a b c"; - /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// It does *not* give you `["a", "b", "c"]`. For that behavior, you'd want - /// to match contiguous space characters: - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// let re = Regex::new(r" +")?; - /// let hay = " a b c"; - /// let got: Vec<&str> = re.split(hay).map(|sp| &hay[sp]).collect(); - /// // N.B. This does still include a leading empty span because ' +' - /// // matches at the beginning of the haystack. - /// assert_eq!(got, vec!["", "a", "b", "c"]); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn split<'r, 'h, I: Into>>( - &'r self, - input: I, - ) -> Split<'r, 'h> { - Split { finder: self.find_iter(input), last: 0 } - } - - /// Returns an iterator of at most `limit` spans of the haystack given, - /// delimited by a match of the regex. (A `limit` of `0` will return no - /// spans.) Namely, each element of the iterator corresponds to a part - /// of the haystack that *isn't* matched by the regular expression. The - /// remainder of the haystack that is not split will be the last element in - /// the iterator. - /// - /// # Example - /// - /// Get the first two words in some haystack: - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::meta::Regex; - /// - /// let re = Regex::new(r"\W+").unwrap(); - /// let hay = "Hey! How are you?"; - /// let fields: Vec<&str> = - /// re.splitn(hay, 3).map(|span| &hay[span]).collect(); - /// assert_eq!(fields, vec!["Hey", "How", "are you?"]); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Examples: more cases - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// let re = Regex::new(r" ")?; - /// let hay = "Mary had a little lamb"; - /// let got: Vec<&str> = re.splitn(hay, 3).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec!["Mary", "had", "a little lamb"]); - /// - /// let re = Regex::new(r"X")?; - /// let hay = ""; - /// let got: Vec<&str> = re.splitn(hay, 3).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec![""]); - /// - /// let re = Regex::new(r"X")?; - /// let hay = "lionXXtigerXleopard"; - /// let got: Vec<&str> = re.splitn(hay, 3).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec!["lion", "", "tigerXleopard"]); - /// - /// let re = Regex::new(r"::")?; - /// let hay = "lion::tiger::leopard"; - /// let got: Vec<&str> = re.splitn(hay, 2).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec!["lion", "tiger::leopard"]); - /// - /// let re = Regex::new(r"X")?; - /// let hay = "abcXdef"; - /// let got: Vec<&str> = re.splitn(hay, 1).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec!["abcXdef"]); - /// - /// let re = Regex::new(r"X")?; - /// let hay = "abcdef"; - /// let got: Vec<&str> = re.splitn(hay, 2).map(|sp| &hay[sp]).collect(); - /// assert_eq!(got, vec!["abcdef"]); - /// - /// let re = Regex::new(r"X")?; - /// let hay = "abcXdef"; - /// let got: Vec<&str> = re.splitn(hay, 0).map(|sp| &hay[sp]).collect(); - /// assert!(got.is_empty()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn splitn<'r, 'h, I: Into>>( - &'r self, - input: I, - limit: usize, - ) -> SplitN<'r, 'h> { - SplitN { splits: self.split(input), limit } - } -} - -/// Lower level search routines that give more control. -impl Regex { - /// Returns the start and end offset of the leftmost match. If no match - /// exists, then `None` is returned. - /// - /// This is like [`Regex::find`] but, but it accepts a concrete `&Input` - /// instead of an `Into`. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{meta::Regex, Input, Match}; - /// - /// let re = Regex::new(r"Samwise|Sam")?; - /// let input = Input::new( - /// "one of the chief characters, Samwise the Brave", - /// ); - /// assert_eq!(Some(Match::must(0, 29..36)), re.search(&input)); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn search(&self, input: &Input<'_>) -> Option { - if self.imp.info.captures_disabled() - || self.imp.info.is_impossible(input) - { - return None; - } - let mut guard = self.pool.get(); - let result = self.imp.strat.search(&mut guard, input); - // We do this dance with the guard and explicitly put it back in the - // pool because it seems to result in better codegen. If we let the - // guard's Drop impl put it back in the pool, then functions like - // ptr::drop_in_place get called and they *don't* get inlined. This - // isn't usually a big deal, but in latency sensitive benchmarks the - // extra function call can matter. - // - // I used `rebar measure -f '^grep/every-line$' -e meta` to measure - // the effects here. - // - // Note that this doesn't eliminate the latency effects of using the - // pool. There is still some (minor) cost for the "thread owner" of the - // pool. (i.e., The thread that first calls a regex search routine.) - // However, for other threads using the regex, the pool access can be - // quite expensive as it goes through a mutex. Callers can avoid this - // by either cloning the Regex (which creates a distinct copy of the - // pool), or callers can use the lower level APIs that accept a 'Cache' - // directly and do their own handling. - PoolGuard::put(guard); - result - } - - /// Returns the end offset of the leftmost match. If no match exists, then - /// `None` is returned. - /// - /// This is distinct from [`Regex::search`] in that it only returns the end - /// of a match and not the start of the match. Depending on a variety of - /// implementation details, this _may_ permit the regex engine to do less - /// overall work. For example, if a DFA is being used to execute a search, - /// then the start of a match usually requires running a separate DFA in - /// reverse to the find the start of a match. If one only needs the end of - /// a match, then the separate reverse scan to find the start of a match - /// can be skipped. (Note that the reverse scan is avoided even when using - /// `Regex::search` when possible, for example, in the case of an anchored - /// search.) - /// - /// # Example - /// - /// ``` - /// use regex_automata::{meta::Regex, Input, HalfMatch}; - /// - /// let re = Regex::new(r"Samwise|Sam")?; - /// let input = Input::new( - /// "one of the chief characters, Samwise the Brave", - /// ); - /// assert_eq!(Some(HalfMatch::must(0, 36)), re.search_half(&input)); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn search_half(&self, input: &Input<'_>) -> Option { - if self.imp.info.captures_disabled() - || self.imp.info.is_impossible(input) - { - return None; - } - let mut guard = self.pool.get(); - let result = self.imp.strat.search_half(&mut guard, input); - // See 'Regex::search' for why we put the guard back explicitly. - PoolGuard::put(guard); - result - } - - /// Executes a leftmost forward search and writes the spans of capturing - /// groups that participated in a match into the provided [`Captures`] - /// value. If no match was found, then [`Captures::is_match`] is guaranteed - /// to return `false`. - /// - /// This is like [`Regex::captures`], but it accepts a concrete `&Input` - /// instead of an `Into`. - /// - /// # Example: specific pattern search - /// - /// This example shows how to build a multi-pattern `Regex` that permits - /// searching for specific patterns. - /// - /// ``` - /// use regex_automata::{ - /// meta::Regex, - /// Anchored, Match, PatternID, Input, - /// }; - /// - /// let re = Regex::new_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?; - /// let mut caps = re.create_captures(); - /// let haystack = "foo123"; - /// - /// // Since we are using the default leftmost-first match and both - /// // patterns match at the same starting position, only the first pattern - /// // will be returned in this case when doing a search for any of the - /// // patterns. - /// let expected = Some(Match::must(0, 0..6)); - /// re.search_captures(&Input::new(haystack), &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// // But if we want to check whether some other pattern matches, then we - /// // can provide its pattern ID. - /// let expected = Some(Match::must(1, 0..6)); - /// let input = Input::new(haystack) - /// .anchored(Anchored::Pattern(PatternID::must(1))); - /// re.search_captures(&input, &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: specifying the bounds of a search - /// - /// This example shows how providing the bounds of a search can produce - /// different results than simply sub-slicing the haystack. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{meta::Regex, Match, Input}; - /// - /// let re = Regex::new(r"\b[0-9]{3}\b")?; - /// let mut caps = re.create_captures(); - /// let haystack = "foo123bar"; - /// - /// // Since we sub-slice the haystack, the search doesn't know about - /// // the larger context and assumes that `123` is surrounded by word - /// // boundaries. And of course, the match position is reported relative - /// // to the sub-slice as well, which means we get `0..3` instead of - /// // `3..6`. - /// let expected = Some(Match::must(0, 0..3)); - /// let input = Input::new(&haystack[3..6]); - /// re.search_captures(&input, &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// // But if we provide the bounds of the search within the context of the - /// // entire haystack, then the search can take the surrounding context - /// // into account. (And if we did find a match, it would be reported - /// // as a valid offset into `haystack` instead of its sub-slice.) - /// let expected = None; - /// let input = Input::new(haystack).range(3..6); - /// re.search_captures(&input, &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn search_captures(&self, input: &Input<'_>, caps: &mut Captures) { - caps.set_pattern(None); - let pid = self.search_slots(input, caps.slots_mut()); - caps.set_pattern(pid); - } - - /// Executes a leftmost forward search and writes the spans of capturing - /// groups that participated in a match into the provided `slots`, and - /// returns the matching pattern ID. The contents of the slots for patterns - /// other than the matching pattern are unspecified. If no match was found, - /// then `None` is returned and the contents of `slots` is unspecified. - /// - /// This is like [`Regex::search`], but it accepts a raw slots slice - /// instead of a `Captures` value. This is useful in contexts where you - /// don't want or need to allocate a `Captures`. - /// - /// It is legal to pass _any_ number of slots to this routine. If the regex - /// engine would otherwise write a slot offset that doesn't fit in the - /// provided slice, then it is simply skipped. In general though, there are - /// usually three slice lengths you might want to use: - /// - /// * An empty slice, if you only care about which pattern matched. - /// * A slice with [`pattern_len() * 2`](Regex::pattern_len) slots, if you - /// only care about the overall match spans for each matching pattern. - /// * A slice with - /// [`slot_len()`](crate::util::captures::GroupInfo::slot_len) slots, which - /// permits recording match offsets for every capturing group in every - /// pattern. - /// - /// # Example - /// - /// This example shows how to find the overall match offsets in a - /// multi-pattern search without allocating a `Captures` value. Indeed, we - /// can put our slots right on the stack. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{meta::Regex, PatternID, Input}; - /// - /// let re = Regex::new_many(&[ - /// r"\pL+", - /// r"\d+", - /// ])?; - /// let input = Input::new("!@#123"); - /// - /// // We only care about the overall match offsets here, so we just - /// // allocate two slots for each pattern. Each slot records the start - /// // and end of the match. - /// let mut slots = [None; 4]; - /// let pid = re.search_slots(&input, &mut slots); - /// assert_eq!(Some(PatternID::must(1)), pid); - /// - /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'. - /// // See 'GroupInfo' for more details on the mapping between groups and - /// // slot indices. - /// let slot_start = pid.unwrap().as_usize() * 2; - /// let slot_end = slot_start + 1; - /// assert_eq!(Some(3), slots[slot_start].map(|s| s.get())); - /// assert_eq!(Some(6), slots[slot_end].map(|s| s.get())); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn search_slots( - &self, - input: &Input<'_>, - slots: &mut [Option], - ) -> Option { - if self.imp.info.captures_disabled() - || self.imp.info.is_impossible(input) - { - return None; - } - let mut guard = self.pool.get(); - let result = self.imp.strat.search_slots(&mut guard, input, slots); - // See 'Regex::search' for why we put the guard back explicitly. - PoolGuard::put(guard); - result - } - - /// Writes the set of patterns that match anywhere in the given search - /// configuration to `patset`. If multiple patterns match at the same - /// position and this `Regex` was configured with [`MatchKind::All`] - /// semantics, then all matching patterns are written to the given set. - /// - /// Unless all of the patterns in this `Regex` are anchored, then generally - /// speaking, this will scan the entire haystack. - /// - /// This search routine *does not* clear the pattern set. This gives some - /// flexibility to the caller (e.g., running multiple searches with the - /// same pattern set), but does make the API bug-prone if you're reusing - /// the same pattern set for multiple searches but intended them to be - /// independent. - /// - /// If a pattern ID matched but the given `PatternSet` does not have - /// sufficient capacity to store it, then it is not inserted and silently - /// dropped. - /// - /// # Example - /// - /// This example shows how to find all matching patterns in a haystack, - /// even when some patterns match at the same position as other patterns. - /// It is important that we configure the `Regex` with [`MatchKind::All`] - /// semantics here, or else overlapping matches will not be reported. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{meta::Regex, Input, MatchKind, PatternSet}; - /// - /// let patterns = &[ - /// r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar", - /// ]; - /// let re = Regex::builder() - /// .configure(Regex::config().match_kind(MatchKind::All)) - /// .build_many(patterns)?; - /// - /// let input = Input::new("foobar"); - /// let mut patset = PatternSet::new(re.pattern_len()); - /// re.which_overlapping_matches(&input, &mut patset); - /// let expected = vec![0, 2, 3, 4, 6]; - /// let got: Vec = patset.iter().map(|p| p.as_usize()).collect(); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn which_overlapping_matches( - &self, - input: &Input<'_>, - patset: &mut PatternSet, - ) { - if self.imp.info.is_impossible(input) { - return; - } - let mut guard = self.pool.get(); - let result = self - .imp - .strat - .which_overlapping_matches(&mut guard, input, patset); - // See 'Regex::search' for why we put the guard back explicitly. - PoolGuard::put(guard); - result - } -} - -/// Lower level search routines that give more control, and require the caller -/// to provide an explicit [`Cache`] parameter. -impl Regex { - /// This is like [`Regex::search`], but requires the caller to - /// explicitly pass a [`Cache`]. - /// - /// # Why pass a `Cache` explicitly? - /// - /// Passing a `Cache` explicitly will bypass the use of an internal memory - /// pool used by `Regex` to get a `Cache` for a search. The use of this - /// pool can be slower in some cases when a `Regex` is used from multiple - /// threads simultaneously. Typically, performance only becomes an issue - /// when there is heavy contention, which in turn usually only occurs - /// when each thread's primary unit of work is a regex search on a small - /// haystack. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{meta::Regex, Input, Match}; - /// - /// let re = Regex::new(r"Samwise|Sam")?; - /// let mut cache = re.create_cache(); - /// let input = Input::new( - /// "one of the chief characters, Samwise the Brave", - /// ); - /// assert_eq!( - /// Some(Match::must(0, 29..36)), - /// re.search_with(&mut cache, &input), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn search_with( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Option { - if self.imp.info.captures_disabled() - || self.imp.info.is_impossible(input) - { - return None; - } - self.imp.strat.search(cache, input) - } - - /// This is like [`Regex::search_half`], but requires the caller to - /// explicitly pass a [`Cache`]. - /// - /// # Why pass a `Cache` explicitly? - /// - /// Passing a `Cache` explicitly will bypass the use of an internal memory - /// pool used by `Regex` to get a `Cache` for a search. The use of this - /// pool can be slower in some cases when a `Regex` is used from multiple - /// threads simultaneously. Typically, performance only becomes an issue - /// when there is heavy contention, which in turn usually only occurs - /// when each thread's primary unit of work is a regex search on a small - /// haystack. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{meta::Regex, Input, HalfMatch}; - /// - /// let re = Regex::new(r"Samwise|Sam")?; - /// let mut cache = re.create_cache(); - /// let input = Input::new( - /// "one of the chief characters, Samwise the Brave", - /// ); - /// assert_eq!( - /// Some(HalfMatch::must(0, 36)), - /// re.search_half_with(&mut cache, &input), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn search_half_with( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Option { - if self.imp.info.captures_disabled() - || self.imp.info.is_impossible(input) - { - return None; - } - self.imp.strat.search_half(cache, input) - } - - /// This is like [`Regex::search_captures`], but requires the caller to - /// explicitly pass a [`Cache`]. - /// - /// # Why pass a `Cache` explicitly? - /// - /// Passing a `Cache` explicitly will bypass the use of an internal memory - /// pool used by `Regex` to get a `Cache` for a search. The use of this - /// pool can be slower in some cases when a `Regex` is used from multiple - /// threads simultaneously. Typically, performance only becomes an issue - /// when there is heavy contention, which in turn usually only occurs - /// when each thread's primary unit of work is a regex search on a small - /// haystack. - /// - /// # Example: specific pattern search - /// - /// This example shows how to build a multi-pattern `Regex` that permits - /// searching for specific patterns. - /// - /// ``` - /// use regex_automata::{ - /// meta::Regex, - /// Anchored, Match, PatternID, Input, - /// }; - /// - /// let re = Regex::new_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let haystack = "foo123"; - /// - /// // Since we are using the default leftmost-first match and both - /// // patterns match at the same starting position, only the first pattern - /// // will be returned in this case when doing a search for any of the - /// // patterns. - /// let expected = Some(Match::must(0, 0..6)); - /// re.search_captures_with(&mut cache, &Input::new(haystack), &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// // But if we want to check whether some other pattern matches, then we - /// // can provide its pattern ID. - /// let expected = Some(Match::must(1, 0..6)); - /// let input = Input::new(haystack) - /// .anchored(Anchored::Pattern(PatternID::must(1))); - /// re.search_captures_with(&mut cache, &input, &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: specifying the bounds of a search - /// - /// This example shows how providing the bounds of a search can produce - /// different results than simply sub-slicing the haystack. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{meta::Regex, Match, Input}; - /// - /// let re = Regex::new(r"\b[0-9]{3}\b")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let haystack = "foo123bar"; - /// - /// // Since we sub-slice the haystack, the search doesn't know about - /// // the larger context and assumes that `123` is surrounded by word - /// // boundaries. And of course, the match position is reported relative - /// // to the sub-slice as well, which means we get `0..3` instead of - /// // `3..6`. - /// let expected = Some(Match::must(0, 0..3)); - /// let input = Input::new(&haystack[3..6]); - /// re.search_captures_with(&mut cache, &input, &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// // But if we provide the bounds of the search within the context of the - /// // entire haystack, then the search can take the surrounding context - /// // into account. (And if we did find a match, it would be reported - /// // as a valid offset into `haystack` instead of its sub-slice.) - /// let expected = None; - /// let input = Input::new(haystack).range(3..6); - /// re.search_captures_with(&mut cache, &input, &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn search_captures_with( - &self, - cache: &mut Cache, - input: &Input<'_>, - caps: &mut Captures, - ) { - caps.set_pattern(None); - let pid = self.search_slots_with(cache, input, caps.slots_mut()); - caps.set_pattern(pid); - } - - /// This is like [`Regex::search_slots`], but requires the caller to - /// explicitly pass a [`Cache`]. - /// - /// # Why pass a `Cache` explicitly? - /// - /// Passing a `Cache` explicitly will bypass the use of an internal memory - /// pool used by `Regex` to get a `Cache` for a search. The use of this - /// pool can be slower in some cases when a `Regex` is used from multiple - /// threads simultaneously. Typically, performance only becomes an issue - /// when there is heavy contention, which in turn usually only occurs - /// when each thread's primary unit of work is a regex search on a small - /// haystack. - /// - /// # Example - /// - /// This example shows how to find the overall match offsets in a - /// multi-pattern search without allocating a `Captures` value. Indeed, we - /// can put our slots right on the stack. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{meta::Regex, PatternID, Input}; - /// - /// let re = Regex::new_many(&[ - /// r"\pL+", - /// r"\d+", - /// ])?; - /// let mut cache = re.create_cache(); - /// let input = Input::new("!@#123"); - /// - /// // We only care about the overall match offsets here, so we just - /// // allocate two slots for each pattern. Each slot records the start - /// // and end of the match. - /// let mut slots = [None; 4]; - /// let pid = re.search_slots_with(&mut cache, &input, &mut slots); - /// assert_eq!(Some(PatternID::must(1)), pid); - /// - /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'. - /// // See 'GroupInfo' for more details on the mapping between groups and - /// // slot indices. - /// let slot_start = pid.unwrap().as_usize() * 2; - /// let slot_end = slot_start + 1; - /// assert_eq!(Some(3), slots[slot_start].map(|s| s.get())); - /// assert_eq!(Some(6), slots[slot_end].map(|s| s.get())); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn search_slots_with( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Option { - if self.imp.info.captures_disabled() - || self.imp.info.is_impossible(input) - { - return None; - } - self.imp.strat.search_slots(cache, input, slots) - } - - /// This is like [`Regex::which_overlapping_matches`], but requires the - /// caller to explicitly pass a [`Cache`]. - /// - /// Passing a `Cache` explicitly will bypass the use of an internal memory - /// pool used by `Regex` to get a `Cache` for a search. The use of this - /// pool can be slower in some cases when a `Regex` is used from multiple - /// threads simultaneously. Typically, performance only becomes an issue - /// when there is heavy contention, which in turn usually only occurs - /// when each thread's primary unit of work is a regex search on a small - /// haystack. - /// - /// # Why pass a `Cache` explicitly? - /// - /// # Example - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{meta::Regex, Input, MatchKind, PatternSet}; - /// - /// let patterns = &[ - /// r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar", - /// ]; - /// let re = Regex::builder() - /// .configure(Regex::config().match_kind(MatchKind::All)) - /// .build_many(patterns)?; - /// let mut cache = re.create_cache(); - /// - /// let input = Input::new("foobar"); - /// let mut patset = PatternSet::new(re.pattern_len()); - /// re.which_overlapping_matches_with(&mut cache, &input, &mut patset); - /// let expected = vec![0, 2, 3, 4, 6]; - /// let got: Vec = patset.iter().map(|p| p.as_usize()).collect(); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn which_overlapping_matches_with( - &self, - cache: &mut Cache, - input: &Input<'_>, - patset: &mut PatternSet, - ) { - if self.imp.info.is_impossible(input) { - return; - } - self.imp.strat.which_overlapping_matches(cache, input, patset) - } -} - -/// Various non-search routines for querying properties of a `Regex` and -/// convenience routines for creating [`Captures`] and [`Cache`] values. -impl Regex { - /// Creates a new object for recording capture group offsets. This is used - /// in search APIs like [`Regex::captures`] and [`Regex::search_captures`]. - /// - /// This is a convenience routine for - /// `Captures::all(re.group_info().clone())`. Callers may build other types - /// of `Captures` values that record less information (and thus require - /// less work from the regex engine) using [`Captures::matches`] and - /// [`Captures::empty`]. - /// - /// # Example - /// - /// This shows some alternatives to [`Regex::create_captures`]: - /// - /// ``` - /// use regex_automata::{ - /// meta::Regex, - /// util::captures::Captures, - /// Match, PatternID, Span, - /// }; - /// - /// let re = Regex::new(r"(?[A-Z][a-z]+) (?[A-Z][a-z]+)")?; - /// - /// // This is equivalent to Regex::create_captures. It stores matching - /// // offsets for all groups in the regex. - /// let mut all = Captures::all(re.group_info().clone()); - /// re.captures("Bruce Springsteen", &mut all); - /// assert_eq!(Some(Match::must(0, 0..17)), all.get_match()); - /// assert_eq!(Some(Span::from(0..5)), all.get_group_by_name("first")); - /// assert_eq!(Some(Span::from(6..17)), all.get_group_by_name("last")); - /// - /// // In this version, we only care about the implicit groups, which - /// // means offsets for the explicit groups will be unavailable. It can - /// // sometimes be faster to ask for fewer groups, since the underlying - /// // regex engine needs to do less work to keep track of them. - /// let mut matches = Captures::matches(re.group_info().clone()); - /// re.captures("Bruce Springsteen", &mut matches); - /// // We still get the overall match info. - /// assert_eq!(Some(Match::must(0, 0..17)), matches.get_match()); - /// // But now the explicit groups are unavailable. - /// assert_eq!(None, matches.get_group_by_name("first")); - /// assert_eq!(None, matches.get_group_by_name("last")); - /// - /// // Finally, in this version, we don't ask to keep track of offsets for - /// // *any* groups. All we get back is whether a match occurred, and if - /// // so, the ID of the pattern that matched. - /// let mut empty = Captures::empty(re.group_info().clone()); - /// re.captures("Bruce Springsteen", &mut empty); - /// // it's a match! - /// assert!(empty.is_match()); - /// // for pattern ID 0 - /// assert_eq!(Some(PatternID::ZERO), empty.pattern()); - /// // Match offsets are unavailable. - /// assert_eq!(None, empty.get_match()); - /// // And of course, explicit groups are unavailable too. - /// assert_eq!(None, empty.get_group_by_name("first")); - /// assert_eq!(None, empty.get_group_by_name("last")); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn create_captures(&self) -> Captures { - Captures::all(self.group_info().clone()) - } - - /// Creates a new cache for use with lower level search APIs like - /// [`Regex::search_with`]. - /// - /// The cache returned should only be used for searches for this `Regex`. - /// If you want to reuse the cache for another `Regex`, then you must call - /// [`Cache::reset`] with that `Regex`. - /// - /// This is a convenience routine for [`Cache::new`]. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{meta::Regex, Input, Match}; - /// - /// let re = Regex::new(r"(?-u)m\w+\s+m\w+")?; - /// let mut cache = re.create_cache(); - /// let input = Input::new("crazy janey and her mission man"); - /// assert_eq!( - /// Some(Match::must(0, 20..31)), - /// re.search_with(&mut cache, &input), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn create_cache(&self) -> Cache { - self.imp.strat.create_cache() - } - - /// Returns the total number of patterns in this regex. - /// - /// The standard [`Regex::new`] constructor always results in a `Regex` - /// with a single pattern, but [`Regex::new_many`] permits building a - /// multi-pattern regex. - /// - /// A `Regex` guarantees that the maximum possible `PatternID` returned in - /// any match is `Regex::pattern_len() - 1`. In the case where the number - /// of patterns is `0`, a match is impossible. - /// - /// # Example - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// let re = Regex::new(r"(?m)^[a-z]$")?; - /// assert_eq!(1, re.pattern_len()); - /// - /// let re = Regex::new_many::<&str>(&[])?; - /// assert_eq!(0, re.pattern_len()); - /// - /// let re = Regex::new_many(&["a", "b", "c"])?; - /// assert_eq!(3, re.pattern_len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn pattern_len(&self) -> usize { - self.imp.info.pattern_len() - } - - /// Returns the total number of capturing groups. - /// - /// This includes the implicit capturing group corresponding to the - /// entire match. Therefore, the minimum value returned is `1`. - /// - /// # Example - /// - /// This shows a few patterns and how many capture groups they have. - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// let len = |pattern| { - /// Regex::new(pattern).map(|re| re.captures_len()) - /// }; - /// - /// assert_eq!(1, len("a")?); - /// assert_eq!(2, len("(a)")?); - /// assert_eq!(3, len("(a)|(b)")?); - /// assert_eq!(5, len("(a)(b)|(c)(d)")?); - /// assert_eq!(2, len("(a)|b")?); - /// assert_eq!(2, len("a|(b)")?); - /// assert_eq!(2, len("(b)*")?); - /// assert_eq!(2, len("(b)+")?); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: multiple patterns - /// - /// This routine also works for multiple patterns. The total number is - /// the sum of the capture groups of each pattern. - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// let len = |patterns| { - /// Regex::new_many(patterns).map(|re| re.captures_len()) - /// }; - /// - /// assert_eq!(2, len(&["a", "b"])?); - /// assert_eq!(4, len(&["(a)", "(b)"])?); - /// assert_eq!(6, len(&["(a)|(b)", "(c)|(d)"])?); - /// assert_eq!(8, len(&["(a)(b)|(c)(d)", "(x)(y)"])?); - /// assert_eq!(3, len(&["(a)", "b"])?); - /// assert_eq!(3, len(&["a", "(b)"])?); - /// assert_eq!(4, len(&["(a)", "(b)*"])?); - /// assert_eq!(4, len(&["(a)+", "(b)+"])?); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn captures_len(&self) -> usize { - self.imp - .info - .props_union() - .explicit_captures_len() - .saturating_add(self.pattern_len()) - } - - /// Returns the total number of capturing groups that appear in every - /// possible match. - /// - /// If the number of capture groups can vary depending on the match, then - /// this returns `None`. That is, a value is only returned when the number - /// of matching groups is invariant or "static." - /// - /// Note that like [`Regex::captures_len`], this **does** include the - /// implicit capturing group corresponding to the entire match. Therefore, - /// when a non-None value is returned, it is guaranteed to be at least `1`. - /// Stated differently, a return value of `Some(0)` is impossible. - /// - /// # Example - /// - /// This shows a few cases where a static number of capture groups is - /// available and a few cases where it is not. - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// let len = |pattern| { - /// Regex::new(pattern).map(|re| re.static_captures_len()) - /// }; - /// - /// assert_eq!(Some(1), len("a")?); - /// assert_eq!(Some(2), len("(a)")?); - /// assert_eq!(Some(2), len("(a)|(b)")?); - /// assert_eq!(Some(3), len("(a)(b)|(c)(d)")?); - /// assert_eq!(None, len("(a)|b")?); - /// assert_eq!(None, len("a|(b)")?); - /// assert_eq!(None, len("(b)*")?); - /// assert_eq!(Some(2), len("(b)+")?); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: multiple patterns - /// - /// This property extends to regexes with multiple patterns as well. In - /// order for their to be a static number of capture groups in this case, - /// every pattern must have the same static number. - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// let len = |patterns| { - /// Regex::new_many(patterns).map(|re| re.static_captures_len()) - /// }; - /// - /// assert_eq!(Some(1), len(&["a", "b"])?); - /// assert_eq!(Some(2), len(&["(a)", "(b)"])?); - /// assert_eq!(Some(2), len(&["(a)|(b)", "(c)|(d)"])?); - /// assert_eq!(Some(3), len(&["(a)(b)|(c)(d)", "(x)(y)"])?); - /// assert_eq!(None, len(&["(a)", "b"])?); - /// assert_eq!(None, len(&["a", "(b)"])?); - /// assert_eq!(None, len(&["(a)", "(b)*"])?); - /// assert_eq!(Some(2), len(&["(a)+", "(b)+"])?); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn static_captures_len(&self) -> Option { - self.imp - .info - .props_union() - .static_explicit_captures_len() - .map(|len| len.saturating_add(1)) - } - - /// Return information about the capture groups in this `Regex`. - /// - /// A `GroupInfo` is an immutable object that can be cheaply cloned. It - /// is responsible for maintaining a mapping between the capture groups - /// in the concrete syntax of zero or more regex patterns and their - /// internal representation used by some of the regex matchers. It is also - /// responsible for maintaining a mapping between the name of each group - /// (if one exists) and its corresponding group index. - /// - /// A `GroupInfo` is ultimately what is used to build a [`Captures`] value, - /// which is some mutable space where group offsets are stored as a result - /// of a search. - /// - /// # Example - /// - /// This shows some alternatives to [`Regex::create_captures`]: - /// - /// ``` - /// use regex_automata::{ - /// meta::Regex, - /// util::captures::Captures, - /// Match, PatternID, Span, - /// }; - /// - /// let re = Regex::new(r"(?[A-Z][a-z]+) (?[A-Z][a-z]+)")?; - /// - /// // This is equivalent to Regex::create_captures. It stores matching - /// // offsets for all groups in the regex. - /// let mut all = Captures::all(re.group_info().clone()); - /// re.captures("Bruce Springsteen", &mut all); - /// assert_eq!(Some(Match::must(0, 0..17)), all.get_match()); - /// assert_eq!(Some(Span::from(0..5)), all.get_group_by_name("first")); - /// assert_eq!(Some(Span::from(6..17)), all.get_group_by_name("last")); - /// - /// // In this version, we only care about the implicit groups, which - /// // means offsets for the explicit groups will be unavailable. It can - /// // sometimes be faster to ask for fewer groups, since the underlying - /// // regex engine needs to do less work to keep track of them. - /// let mut matches = Captures::matches(re.group_info().clone()); - /// re.captures("Bruce Springsteen", &mut matches); - /// // We still get the overall match info. - /// assert_eq!(Some(Match::must(0, 0..17)), matches.get_match()); - /// // But now the explicit groups are unavailable. - /// assert_eq!(None, matches.get_group_by_name("first")); - /// assert_eq!(None, matches.get_group_by_name("last")); - /// - /// // Finally, in this version, we don't ask to keep track of offsets for - /// // *any* groups. All we get back is whether a match occurred, and if - /// // so, the ID of the pattern that matched. - /// let mut empty = Captures::empty(re.group_info().clone()); - /// re.captures("Bruce Springsteen", &mut empty); - /// // it's a match! - /// assert!(empty.is_match()); - /// // for pattern ID 0 - /// assert_eq!(Some(PatternID::ZERO), empty.pattern()); - /// // Match offsets are unavailable. - /// assert_eq!(None, empty.get_match()); - /// // And of course, explicit groups are unavailable too. - /// assert_eq!(None, empty.get_group_by_name("first")); - /// assert_eq!(None, empty.get_group_by_name("last")); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn group_info(&self) -> &GroupInfo { - self.imp.strat.group_info() - } - - /// Returns the configuration object used to build this `Regex`. - /// - /// If no configuration object was explicitly passed, then the - /// configuration returned represents the default. - #[inline] - pub fn get_config(&self) -> &Config { - self.imp.info.config() - } - - /// Returns true if this regex has a high chance of being "accelerated." - /// - /// The precise meaning of "accelerated" is specifically left unspecified, - /// but the general meaning is that the search is a high likelihood of - /// running faster than a character-at-a-time loop inside a standard - /// regex engine. - /// - /// When a regex is accelerated, it is only a *probabilistic* claim. That - /// is, just because the regex is believed to be accelerated, that doesn't - /// mean it will definitely execute searches very fast. Similarly, if a - /// regex is *not* accelerated, that is also a probabilistic claim. That - /// is, a regex for which `is_accelerated` returns `false` could still run - /// searches more quickly than a regex for which `is_accelerated` returns - /// `true`. - /// - /// Whether a regex is marked as accelerated or not is dependent on - /// implementations details that may change in a semver compatible release. - /// That is, a regex that is accelerated in a `x.y.1` release might not be - /// accelerated in a `x.y.2` release. - /// - /// Basically, the value of acceleration boils down to a hedge: a hodge - /// podge of internal heuristics combine to make a probabilistic guess - /// that this regex search may run "fast." The value in knowing this from - /// a caller's perspective is that it may act as a signal that no further - /// work should be done to accelerate a search. For example, a grep-like - /// tool might try to do some extra work extracting literals from a regex - /// to create its own heuristic acceleration strategies. But it might - /// choose to defer to this crate's acceleration strategy if one exists. - /// This routine permits querying whether such a strategy is active for a - /// particular regex. - /// - /// # Example - /// - /// ``` - /// use regex_automata::meta::Regex; - /// - /// // A simple literal is very likely to be accelerated. - /// let re = Regex::new(r"foo")?; - /// assert!(re.is_accelerated()); - /// - /// // A regex with no literals is likely to not be accelerated. - /// let re = Regex::new(r"\w")?; - /// assert!(!re.is_accelerated()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn is_accelerated(&self) -> bool { - self.imp.strat.is_accelerated() - } - - /// Return the total approximate heap memory, in bytes, used by this `Regex`. - /// - /// Note that currently, there is no high level configuration for setting - /// a limit on the specific value returned by this routine. Instead, the - /// following routines can be used to control heap memory at a bit of a - /// lower level: - /// - /// * [`Config::nfa_size_limit`] controls how big _any_ of the NFAs are - /// allowed to be. - /// * [`Config::onepass_size_limit`] controls how big the one-pass DFA is - /// allowed to be. - /// * [`Config::hybrid_cache_capacity`] controls how much memory the lazy - /// DFA is permitted to allocate to store its transition table. - /// * [`Config::dfa_size_limit`] controls how big a fully compiled DFA is - /// allowed to be. - /// * [`Config::dfa_state_limit`] controls the conditions under which the - /// meta regex engine will even attempt to build a fully compiled DFA. - #[inline] - pub fn memory_usage(&self) -> usize { - self.imp.strat.memory_usage() - } -} - -impl Clone for Regex { - fn clone(&self) -> Regex { - let imp = Arc::clone(&self.imp); - let pool = { - let strat = Arc::clone(&imp.strat); - let create: CachePoolFn = Box::new(move || strat.create_cache()); - Pool::new(create) - }; - Regex { imp, pool } - } -} - -#[derive(Clone, Debug)] -pub(crate) struct RegexInfo(Arc); - -#[derive(Clone, Debug)] -struct RegexInfoI { - config: Config, - props: Vec, - props_union: hir::Properties, -} - -impl RegexInfo { - fn new(config: Config, hirs: &[&Hir]) -> RegexInfo { - // Collect all of the properties from each of the HIRs, and also - // union them into one big set of properties representing all HIRs - // as if they were in one big alternation. - let mut props = vec![]; - for hir in hirs.iter() { - props.push(hir.properties().clone()); - } - let props_union = hir::Properties::union(&props); - - RegexInfo(Arc::new(RegexInfoI { config, props, props_union })) - } - - pub(crate) fn config(&self) -> &Config { - &self.0.config - } - - pub(crate) fn props(&self) -> &[hir::Properties] { - &self.0.props - } - - pub(crate) fn props_union(&self) -> &hir::Properties { - &self.0.props_union - } - - pub(crate) fn pattern_len(&self) -> usize { - self.props().len() - } - - pub(crate) fn memory_usage(&self) -> usize { - self.props().iter().map(|p| p.memory_usage()).sum::() - + self.props_union().memory_usage() - } - - /// Returns true when the search is guaranteed to be anchored. That is, - /// when a match is reported, its offset is guaranteed to correspond to - /// the start of the search. - /// - /// This includes returning true when `input` _isn't_ anchored but the - /// underlying regex is. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn is_anchored_start(&self, input: &Input<'_>) -> bool { - input.get_anchored().is_anchored() || self.is_always_anchored_start() - } - - /// Returns true when this regex is always anchored to the start of a - /// search. And in particular, that regardless of an `Input` configuration, - /// if any match is reported it must start at `0`. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn is_always_anchored_start(&self) -> bool { - use regex_syntax::hir::Look; - self.props_union().look_set_prefix().contains(Look::Start) - } - - /// Returns true when this regex is always anchored to the end of a - /// search. And in particular, that regardless of an `Input` configuration, - /// if any match is reported it must end at the end of the haystack. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn is_always_anchored_end(&self) -> bool { - use regex_syntax::hir::Look; - self.props_union().look_set_suffix().contains(Look::End) - } - - /// Returns true when the regex's NFA lacks capture states. - /// - /// In this case, some regex engines (like the PikeVM) are unable to report - /// match offsets, while some (like the lazy DFA can). To avoid whether a - /// match or not is reported based on engine selection, routines that - /// return match offsets will _always_ report `None` when this is true. - /// - /// Yes, this is a weird case and it's a little fucked up. But - /// `WhichCaptures::None` comes with an appropriate warning. - fn captures_disabled(&self) -> bool { - matches!(self.config().get_which_captures(), WhichCaptures::None) - } - - /// Returns true if and only if it is known that a match is impossible - /// for the given input. This is useful for short-circuiting and avoiding - /// running the regex engine if it's known no match can be reported. - /// - /// Note that this doesn't necessarily detect every possible case. For - /// example, when `pattern_len() == 0`, a match is impossible, but that - /// case is so rare that it's fine to be handled by the regex engine - /// itself. That is, it's not worth the cost of adding it here in order to - /// make it a little faster. The reason is that this is called for every - /// search. so there is some cost to adding checks here. Arguably, some of - /// the checks that are here already probably shouldn't be here... - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_impossible(&self, input: &Input<'_>) -> bool { - // The underlying regex is anchored, so if we don't start the search - // at position 0, a match is impossible, because the anchor can only - // match at position 0. - if input.start() > 0 && self.is_always_anchored_start() { - return true; - } - // Same idea, but for the end anchor. - if input.end() < input.haystack().len() - && self.is_always_anchored_end() - { - return true; - } - // If the haystack is smaller than the minimum length required, then - // we know there can be no match. - let minlen = match self.props_union().minimum_len() { - None => return false, - Some(minlen) => minlen, - }; - if input.get_span().len() < minlen { - return true; - } - // Same idea as minimum, but for maximum. This is trickier. We can - // only apply the maximum when we know the entire span that we're - // searching *has* to match according to the regex (and possibly the - // input configuration). If we know there is too much for the regex - // to match, we can bail early. - // - // I don't think we can apply the maximum otherwise unfortunately. - if self.is_anchored_start(input) && self.is_always_anchored_end() { - let maxlen = match self.props_union().maximum_len() { - None => return false, - Some(maxlen) => maxlen, - }; - if input.get_span().len() > maxlen { - return true; - } - } - false - } -} - -/// An iterator over all non-overlapping matches. -/// -/// The iterator yields a [`Match`] value until no more matches could be found. -/// -/// The lifetime parameters are as follows: -/// -/// * `'r` represents the lifetime of the `Regex` that produced this iterator. -/// * `'h` represents the lifetime of the haystack being searched. -/// -/// This iterator can be created with the [`Regex::find_iter`] method. -#[derive(Debug)] -pub struct FindMatches<'r, 'h> { - re: &'r Regex, - cache: CachePoolGuard<'r>, - it: iter::Searcher<'h>, -} - -impl<'r, 'h> FindMatches<'r, 'h> { - /// Returns the `Regex` value that created this iterator. - #[inline] - pub fn regex(&self) -> &'r Regex { - self.re - } - - /// Returns the current `Input` associated with this iterator. - /// - /// The `start` position on the given `Input` may change during iteration, - /// but all other values are guaranteed to remain invariant. - #[inline] - pub fn input<'s>(&'s self) -> &'s Input<'h> { - self.it.input() - } -} - -impl<'r, 'h> Iterator for FindMatches<'r, 'h> { - type Item = Match; - - #[inline] - fn next(&mut self) -> Option { - let FindMatches { re, ref mut cache, ref mut it } = *self; - it.advance(|input| Ok(re.search_with(cache, input))) - } - - #[inline] - fn count(self) -> usize { - // If all we care about is a count of matches, then we only need to - // find the end position of each match. This can give us a 2x perf - // boost in some cases, because it avoids needing to do a reverse scan - // to find the start of a match. - let FindMatches { re, mut cache, it } = self; - // This does the deref for PoolGuard once instead of every iter. - let cache = &mut *cache; - it.into_half_matches_iter( - |input| Ok(re.search_half_with(cache, input)), - ) - .count() - } -} - -impl<'r, 'h> core::iter::FusedIterator for FindMatches<'r, 'h> {} - -/// An iterator over all non-overlapping leftmost matches with their capturing -/// groups. -/// -/// The iterator yields a [`Captures`] value until no more matches could be -/// found. -/// -/// The lifetime parameters are as follows: -/// -/// * `'r` represents the lifetime of the `Regex` that produced this iterator. -/// * `'h` represents the lifetime of the haystack being searched. -/// -/// This iterator can be created with the [`Regex::captures_iter`] method. -#[derive(Debug)] -pub struct CapturesMatches<'r, 'h> { - re: &'r Regex, - cache: CachePoolGuard<'r>, - caps: Captures, - it: iter::Searcher<'h>, -} - -impl<'r, 'h> CapturesMatches<'r, 'h> { - /// Returns the `Regex` value that created this iterator. - #[inline] - pub fn regex(&self) -> &'r Regex { - self.re - } - - /// Returns the current `Input` associated with this iterator. - /// - /// The `start` position on the given `Input` may change during iteration, - /// but all other values are guaranteed to remain invariant. - #[inline] - pub fn input<'s>(&'s self) -> &'s Input<'h> { - self.it.input() - } -} - -impl<'r, 'h> Iterator for CapturesMatches<'r, 'h> { - type Item = Captures; - - #[inline] - fn next(&mut self) -> Option { - // Splitting 'self' apart seems necessary to appease borrowck. - let CapturesMatches { re, ref mut cache, ref mut caps, ref mut it } = - *self; - let _ = it.advance(|input| { - re.search_captures_with(cache, input, caps); - Ok(caps.get_match()) - }); - if caps.is_match() { - Some(caps.clone()) - } else { - None - } - } - - #[inline] - fn count(self) -> usize { - let CapturesMatches { re, mut cache, it, .. } = self; - // This does the deref for PoolGuard once instead of every iter. - let cache = &mut *cache; - it.into_half_matches_iter( - |input| Ok(re.search_half_with(cache, input)), - ) - .count() - } -} - -impl<'r, 'h> core::iter::FusedIterator for CapturesMatches<'r, 'h> {} - -/// Yields all substrings delimited by a regular expression match. -/// -/// The spans correspond to the offsets between matches. -/// -/// The lifetime parameters are as follows: -/// -/// * `'r` represents the lifetime of the `Regex` that produced this iterator. -/// * `'h` represents the lifetime of the haystack being searched. -/// -/// This iterator can be created with the [`Regex::split`] method. -#[derive(Debug)] -pub struct Split<'r, 'h> { - finder: FindMatches<'r, 'h>, - last: usize, -} - -impl<'r, 'h> Split<'r, 'h> { - /// Returns the current `Input` associated with this iterator. - /// - /// The `start` position on the given `Input` may change during iteration, - /// but all other values are guaranteed to remain invariant. - #[inline] - pub fn input<'s>(&'s self) -> &'s Input<'h> { - self.finder.input() - } -} - -impl<'r, 'h> Iterator for Split<'r, 'h> { - type Item = Span; - - fn next(&mut self) -> Option { - match self.finder.next() { - None => { - let len = self.finder.it.input().haystack().len(); - if self.last > len { - None - } else { - let span = Span::from(self.last..len); - self.last = len + 1; // Next call will return None - Some(span) - } - } - Some(m) => { - let span = Span::from(self.last..m.start()); - self.last = m.end(); - Some(span) - } - } - } -} - -impl<'r, 'h> core::iter::FusedIterator for Split<'r, 'h> {} - -/// Yields at most `N` spans delimited by a regular expression match. -/// -/// The spans correspond to the offsets between matches. The last span will be -/// whatever remains after splitting. -/// -/// The lifetime parameters are as follows: -/// -/// * `'r` represents the lifetime of the `Regex` that produced this iterator. -/// * `'h` represents the lifetime of the haystack being searched. -/// -/// This iterator can be created with the [`Regex::splitn`] method. -#[derive(Debug)] -pub struct SplitN<'r, 'h> { - splits: Split<'r, 'h>, - limit: usize, -} - -impl<'r, 'h> SplitN<'r, 'h> { - /// Returns the current `Input` associated with this iterator. - /// - /// The `start` position on the given `Input` may change during iteration, - /// but all other values are guaranteed to remain invariant. - #[inline] - pub fn input<'s>(&'s self) -> &'s Input<'h> { - self.splits.input() - } -} - -impl<'r, 'h> Iterator for SplitN<'r, 'h> { - type Item = Span; - - fn next(&mut self) -> Option { - if self.limit == 0 { - return None; - } - - self.limit -= 1; - if self.limit > 0 { - return self.splits.next(); - } - - let len = self.splits.finder.it.input().haystack().len(); - if self.splits.last > len { - // We've already returned all substrings. - None - } else { - // self.n == 0, so future calls will return None immediately - Some(Span::from(self.splits.last..len)) - } - } - - fn size_hint(&self) -> (usize, Option) { - (0, Some(self.limit)) - } -} - -impl<'r, 'h> core::iter::FusedIterator for SplitN<'r, 'h> {} - -/// Represents mutable scratch space used by regex engines during a search. -/// -/// Most of the regex engines in this crate require some kind of -/// mutable state in order to execute a search. This mutable state is -/// explicitly separated from the core regex object (such as a -/// [`thompson::NFA`](crate::nfa::thompson::NFA)) so that the read-only regex -/// object can be shared across multiple threads simultaneously without any -/// synchronization. Conversely, a `Cache` must either be duplicated if using -/// the same `Regex` from multiple threads, or else there must be some kind of -/// synchronization that guarantees exclusive access while it's in use by one -/// thread. -/// -/// A `Regex` attempts to do this synchronization for you by using a thread -/// pool internally. Its size scales roughly with the number of simultaneous -/// regex searches. -/// -/// For cases where one does not want to rely on a `Regex`'s internal thread -/// pool, lower level routines such as [`Regex::search_with`] are provided -/// that permit callers to pass a `Cache` into the search routine explicitly. -/// -/// General advice is that the thread pool is often more than good enough. -/// However, it may be possible to observe the effects of its latency, -/// especially when searching many small haystacks from many threads -/// simultaneously. -/// -/// Caches can be created from their corresponding `Regex` via -/// [`Regex::create_cache`]. A cache can only be used with either the `Regex` -/// that created it, or the `Regex` that was most recently used to reset it -/// with [`Cache::reset`]. Using a cache with any other `Regex` may result in -/// panics or incorrect results. -/// -/// # Example -/// -/// ``` -/// use regex_automata::{meta::Regex, Input, Match}; -/// -/// let re = Regex::new(r"(?-u)m\w+\s+m\w+")?; -/// let mut cache = re.create_cache(); -/// let input = Input::new("crazy janey and her mission man"); -/// assert_eq!( -/// Some(Match::must(0, 20..31)), -/// re.search_with(&mut cache, &input), -/// ); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Debug, Clone)] -pub struct Cache { - pub(crate) capmatches: Captures, - pub(crate) pikevm: wrappers::PikeVMCache, - pub(crate) backtrack: wrappers::BoundedBacktrackerCache, - pub(crate) onepass: wrappers::OnePassCache, - pub(crate) hybrid: wrappers::HybridCache, - pub(crate) revhybrid: wrappers::ReverseHybridCache, -} - -impl Cache { - /// Creates a new `Cache` for use with this regex. - /// - /// The cache returned should only be used for searches for the given - /// `Regex`. If you want to reuse the cache for another `Regex`, then you - /// must call [`Cache::reset`] with that `Regex`. - pub fn new(re: &Regex) -> Cache { - re.create_cache() - } - - /// Reset this cache such that it can be used for searching with the given - /// `Regex` (and only that `Regex`). - /// - /// A cache reset permits potentially reusing memory already allocated in - /// this cache with a different `Regex`. - /// - /// # Example - /// - /// This shows how to re-purpose a cache for use with a different `Regex`. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{meta::Regex, Match, Input}; - /// - /// let re1 = Regex::new(r"\w")?; - /// let re2 = Regex::new(r"\W")?; - /// - /// let mut cache = re1.create_cache(); - /// assert_eq!( - /// Some(Match::must(0, 0..2)), - /// re1.search_with(&mut cache, &Input::new("Δ")), - /// ); - /// - /// // Using 'cache' with re2 is not allowed. It may result in panics or - /// // incorrect results. In order to re-purpose the cache, we must reset - /// // it with the Regex we'd like to use it with. - /// // - /// // Similarly, after this reset, using the cache with 're1' is also not - /// // allowed. - /// cache.reset(&re2); - /// assert_eq!( - /// Some(Match::must(0, 0..3)), - /// re2.search_with(&mut cache, &Input::new("☃")), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn reset(&mut self, re: &Regex) { - re.imp.strat.reset_cache(self) - } - - /// Returns the heap memory usage, in bytes, of this cache. - /// - /// This does **not** include the stack size used up by this cache. To - /// compute that, use `std::mem::size_of::()`. - pub fn memory_usage(&self) -> usize { - let mut bytes = 0; - bytes += self.pikevm.memory_usage(); - bytes += self.backtrack.memory_usage(); - bytes += self.onepass.memory_usage(); - bytes += self.hybrid.memory_usage(); - bytes += self.revhybrid.memory_usage(); - bytes - } -} - -/// An object describing the configuration of a `Regex`. -/// -/// This configuration only includes options for the -/// non-syntax behavior of a `Regex`, and can be applied via the -/// [`Builder::configure`] method. For configuring the syntax options, see -/// [`util::syntax::Config`](crate::util::syntax::Config). -/// -/// # Example: lower the NFA size limit -/// -/// In some cases, the default size limit might be too big. The size limit can -/// be lowered, which will prevent large regex patterns from compiling. -/// -/// ``` -/// # if cfg!(miri) { return Ok(()); } // miri takes too long -/// use regex_automata::meta::Regex; -/// -/// let result = Regex::builder() -/// .configure(Regex::config().nfa_size_limit(Some(20 * (1<<10)))) -/// // Not even 20KB is enough to build a single large Unicode class! -/// .build(r"\pL"); -/// assert!(result.is_err()); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug, Default)] -pub struct Config { - // As with other configuration types in this crate, we put all our knobs - // in options so that we can distinguish between "default" and "not set." - // This makes it possible to easily combine multiple configurations - // without default values overwriting explicitly specified values. See the - // 'overwrite' method. - // - // For docs on the fields below, see the corresponding method setters. - match_kind: Option, - utf8_empty: Option, - autopre: Option, - pre: Option>, - which_captures: Option, - nfa_size_limit: Option>, - onepass_size_limit: Option>, - hybrid_cache_capacity: Option, - hybrid: Option, - dfa: Option, - dfa_size_limit: Option>, - dfa_state_limit: Option>, - onepass: Option, - backtrack: Option, - byte_classes: Option, - line_terminator: Option, -} - -impl Config { - /// Create a new configuration object for a `Regex`. - pub fn new() -> Config { - Config::default() - } - - /// Set the match semantics for a `Regex`. - /// - /// The default value is [`MatchKind::LeftmostFirst`]. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{meta::Regex, Match, MatchKind}; - /// - /// // By default, leftmost-first semantics are used, which - /// // disambiguates matches at the same position by selecting - /// // the one that corresponds earlier in the pattern. - /// let re = Regex::new("sam|samwise")?; - /// assert_eq!(Some(Match::must(0, 0..3)), re.find("samwise")); - /// - /// // But with 'all' semantics, match priority is ignored - /// // and all match states are included. When coupled with - /// // a leftmost search, the search will report the last - /// // possible match. - /// let re = Regex::builder() - /// .configure(Regex::config().match_kind(MatchKind::All)) - /// .build("sam|samwise")?; - /// assert_eq!(Some(Match::must(0, 0..7)), re.find("samwise")); - /// // Beware that this can lead to skipping matches! - /// // Usually 'all' is used for anchored reverse searches - /// // only, or for overlapping searches. - /// assert_eq!(Some(Match::must(0, 4..11)), re.find("sam samwise")); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn match_kind(self, kind: MatchKind) -> Config { - Config { match_kind: Some(kind), ..self } - } - - /// Toggles whether empty matches are permitted to occur between the code - /// units of a UTF-8 encoded codepoint. - /// - /// This should generally be enabled when search a `&str` or anything that - /// you otherwise know is valid UTF-8. It should be disabled in all other - /// cases. Namely, if the haystack is not valid UTF-8 and this is enabled, - /// then behavior is unspecified. - /// - /// By default, this is enabled. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{meta::Regex, Match}; - /// - /// let re = Regex::new("")?; - /// let got: Vec = re.find_iter("☃").collect(); - /// // Matches only occur at the beginning and end of the snowman. - /// assert_eq!(got, vec![ - /// Match::must(0, 0..0), - /// Match::must(0, 3..3), - /// ]); - /// - /// let re = Regex::builder() - /// .configure(Regex::config().utf8_empty(false)) - /// .build("")?; - /// let got: Vec = re.find_iter("☃").collect(); - /// // Matches now occur at every position! - /// assert_eq!(got, vec![ - /// Match::must(0, 0..0), - /// Match::must(0, 1..1), - /// Match::must(0, 2..2), - /// Match::must(0, 3..3), - /// ]); - /// - /// Ok::<(), Box>(()) - /// ``` - pub fn utf8_empty(self, yes: bool) -> Config { - Config { utf8_empty: Some(yes), ..self } - } - - /// Toggles whether automatic prefilter support is enabled. - /// - /// If this is disabled and [`Config::prefilter`] is not set, then the - /// meta regex engine will not use any prefilters. This can sometimes - /// be beneficial in cases where you know (or have measured) that the - /// prefilter leads to overall worse search performance. - /// - /// By default, this is enabled. - /// - /// # Example - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{meta::Regex, Match}; - /// - /// let re = Regex::builder() - /// .configure(Regex::config().auto_prefilter(false)) - /// .build(r"Bruce \w+")?; - /// let hay = "Hello Bruce Springsteen!"; - /// assert_eq!(Some(Match::must(0, 6..23)), re.find(hay)); - /// - /// Ok::<(), Box>(()) - /// ``` - pub fn auto_prefilter(self, yes: bool) -> Config { - Config { autopre: Some(yes), ..self } - } - - /// Overrides and sets the prefilter to use inside a `Regex`. - /// - /// This permits one to forcefully set a prefilter in cases where the - /// caller knows better than whatever the automatic prefilter logic is - /// capable of. - /// - /// By default, this is set to `None` and an automatic prefilter will be - /// used if one could be built. (Assuming [`Config::auto_prefilter`] is - /// enabled, which it is by default.) - /// - /// # Example - /// - /// This example shows how to set your own prefilter. In the case of a - /// pattern like `Bruce \w+`, the automatic prefilter is likely to be - /// constructed in a way that it will look for occurrences of `Bruce `. - /// In most cases, this is the best choice. But in some cases, it may be - /// the case that running `memchr` on `B` is the best choice. One can - /// achieve that behavior by overriding the automatic prefilter logic - /// and providing a prefilter that just matches `B`. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// meta::Regex, - /// util::prefilter::Prefilter, - /// Match, MatchKind, - /// }; - /// - /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["B"]) - /// .expect("a prefilter"); - /// let re = Regex::builder() - /// .configure(Regex::config().prefilter(Some(pre))) - /// .build(r"Bruce \w+")?; - /// let hay = "Hello Bruce Springsteen!"; - /// assert_eq!(Some(Match::must(0, 6..23)), re.find(hay)); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: incorrect prefilters can lead to incorrect results! - /// - /// Be warned that setting an incorrect prefilter can lead to missed - /// matches. So if you use this option, ensure your prefilter can _never_ - /// report false negatives. (A false positive is, on the other hand, quite - /// okay and generally unavoidable.) - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// meta::Regex, - /// util::prefilter::Prefilter, - /// Match, MatchKind, - /// }; - /// - /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["Z"]) - /// .expect("a prefilter"); - /// let re = Regex::builder() - /// .configure(Regex::config().prefilter(Some(pre))) - /// .build(r"Bruce \w+")?; - /// let hay = "Hello Bruce Springsteen!"; - /// // Oops! No match found, but there should be one! - /// assert_eq!(None, re.find(hay)); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn prefilter(self, pre: Option) -> Config { - Config { pre: Some(pre), ..self } - } - - /// Configures what kinds of groups are compiled as "capturing" in the - /// underlying regex engine. - /// - /// This is set to [`WhichCaptures::All`] by default. Callers may wish to - /// use [`WhichCaptures::Implicit`] in cases where one wants avoid the - /// overhead of capture states for explicit groups. - /// - /// Note that another approach to avoiding the overhead of capture groups - /// is by using non-capturing groups in the regex pattern. That is, - /// `(?:a)` instead of `(a)`. This option is useful when you can't control - /// the concrete syntax but know that you don't need the underlying capture - /// states. For example, using `WhichCaptures::Implicit` will behave as if - /// all explicit capturing groups in the pattern were non-capturing. - /// - /// Setting this to `WhichCaptures::None` is usually not the right thing to - /// do. When no capture states are compiled, some regex engines (such as - /// the `PikeVM`) won't be able to report match offsets. This will manifest - /// as no match being found. Indeed, in order to enforce consistent - /// behavior, the meta regex engine will always report `None` for routines - /// that return match offsets even if one of its regex engines could - /// service the request. This avoids "match or not" behavior from being - /// influenced by user input (since user input can influence the selection - /// of the regex engine). - /// - /// # Example - /// - /// This example demonstrates how the results of capture groups can change - /// based on this option. First we show the default (all capture groups in - /// the pattern are capturing): - /// - /// ``` - /// use regex_automata::{meta::Regex, Match, Span}; - /// - /// let re = Regex::new(r"foo([0-9]+)bar")?; - /// let hay = "foo123bar"; - /// - /// let mut caps = re.create_captures(); - /// re.captures(hay, &mut caps); - /// assert_eq!(Some(Span::from(0..9)), caps.get_group(0)); - /// assert_eq!(Some(Span::from(3..6)), caps.get_group(1)); - /// - /// Ok::<(), Box>(()) - /// ``` - /// - /// And now we show the behavior when we only include implicit capture - /// groups. In this case, we can only find the overall match span, but the - /// spans of any other explicit group don't exist because they are treated - /// as non-capturing. (In effect, when `WhichCaptures::Implicit` is used, - /// there is no real point in using [`Regex::captures`] since it will never - /// be able to report more information than [`Regex::find`].) - /// - /// ``` - /// use regex_automata::{ - /// meta::Regex, - /// nfa::thompson::WhichCaptures, - /// Match, - /// Span, - /// }; - /// - /// let re = Regex::builder() - /// .configure(Regex::config().which_captures(WhichCaptures::Implicit)) - /// .build(r"foo([0-9]+)bar")?; - /// let hay = "foo123bar"; - /// - /// let mut caps = re.create_captures(); - /// re.captures(hay, &mut caps); - /// assert_eq!(Some(Span::from(0..9)), caps.get_group(0)); - /// assert_eq!(None, caps.get_group(1)); - /// - /// Ok::<(), Box>(()) - /// ``` - /// - /// # Example: strange `Regex::find` behavior - /// - /// As noted above, when using [`WhichCaptures::None`], this means that - /// `Regex::is_match` could return `true` while `Regex::find` returns - /// `None`: - /// - /// ``` - /// use regex_automata::{ - /// meta::Regex, - /// nfa::thompson::WhichCaptures, - /// Input, - /// Match, - /// Span, - /// }; - /// - /// let re = Regex::builder() - /// .configure(Regex::config().which_captures(WhichCaptures::None)) - /// .build(r"foo([0-9]+)bar")?; - /// let hay = "foo123bar"; - /// - /// assert!(re.is_match(hay)); - /// assert_eq!(re.find(hay), None); - /// assert_eq!(re.search_half(&Input::new(hay)), None); - /// - /// Ok::<(), Box>(()) - /// ``` - pub fn which_captures(mut self, which_captures: WhichCaptures) -> Config { - self.which_captures = Some(which_captures); - self - } - - /// Sets the size limit, in bytes, to enforce on the construction of every - /// NFA build by the meta regex engine. - /// - /// Setting it to `None` disables the limit. This is not recommended if - /// you're compiling untrusted patterns. - /// - /// Note that this limit is applied to _each_ NFA built, and if any of - /// them exceed the limit, then construction will fail. This limit does - /// _not_ correspond to the total memory used by all NFAs in the meta regex - /// engine. - /// - /// This defaults to some reasonable number that permits most reasonable - /// patterns. - /// - /// # Example - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::meta::Regex; - /// - /// let result = Regex::builder() - /// .configure(Regex::config().nfa_size_limit(Some(20 * (1<<10)))) - /// // Not even 20KB is enough to build a single large Unicode class! - /// .build(r"\pL"); - /// assert!(result.is_err()); - /// - /// // But notice that building such a regex with the exact same limit - /// // can succeed depending on other aspects of the configuration. For - /// // example, a single *forward* NFA will (at time of writing) fit into - /// // the 20KB limit, but a *reverse* NFA of the same pattern will not. - /// // So if one configures a meta regex such that a reverse NFA is never - /// // needed and thus never built, then the 20KB limit will be enough for - /// // a pattern like \pL! - /// let result = Regex::builder() - /// .configure(Regex::config() - /// .nfa_size_limit(Some(20 * (1<<10))) - /// // The DFAs are the only thing that (currently) need a reverse - /// // NFA. So if both are disabled, the meta regex engine will - /// // skip building the reverse NFA. Note that this isn't an API - /// // guarantee. A future semver compatible version may introduce - /// // new use cases for a reverse NFA. - /// .hybrid(false) - /// .dfa(false) - /// ) - /// // Not even 20KB is enough to build a single large Unicode class! - /// .build(r"\pL"); - /// assert!(result.is_ok()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn nfa_size_limit(self, limit: Option) -> Config { - Config { nfa_size_limit: Some(limit), ..self } - } - - /// Sets the size limit, in bytes, for the one-pass DFA. - /// - /// Setting it to `None` disables the limit. Disabling the limit is - /// strongly discouraged when compiling untrusted patterns. Even if the - /// patterns are trusted, it still may not be a good idea, since a one-pass - /// DFA can use a lot of memory. With that said, as the size of a regex - /// increases, the likelihood of it being one-pass likely decreases. - /// - /// This defaults to some reasonable number that permits most reasonable - /// one-pass patterns. - /// - /// # Example - /// - /// This shows how to set the one-pass DFA size limit. Note that since - /// a one-pass DFA is an optional component of the meta regex engine, - /// this size limit only impacts what is built internally and will never - /// determine whether a `Regex` itself fails to build. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::meta::Regex; - /// - /// let result = Regex::builder() - /// .configure(Regex::config().onepass_size_limit(Some(2 * (1<<20)))) - /// .build(r"\pL{5}"); - /// assert!(result.is_ok()); - /// # Ok::<(), Box>(()) - /// ``` - pub fn onepass_size_limit(self, limit: Option) -> Config { - Config { onepass_size_limit: Some(limit), ..self } - } - - /// Set the cache capacity, in bytes, for the lazy DFA. - /// - /// The cache capacity of the lazy DFA determines approximately how much - /// heap memory it is allowed to use to store its state transitions. The - /// state transitions are computed at search time, and if the cache fills - /// up it, it is cleared. At this point, any previously generated state - /// transitions are lost and are re-generated if they're needed again. - /// - /// This sort of cache filling and clearing works quite well _so long as - /// cache clearing happens infrequently_. If it happens too often, then the - /// meta regex engine will stop using the lazy DFA and switch over to a - /// different regex engine. - /// - /// In cases where the cache is cleared too often, it may be possible to - /// give the cache more space and reduce (or eliminate) how often it is - /// cleared. Similarly, sometimes a regex is so big that the lazy DFA isn't - /// used at all if its cache capacity isn't big enough. - /// - /// The capacity set here is a _limit_ on how much memory is used. The - /// actual memory used is only allocated as it's needed. - /// - /// Determining the right value for this is a little tricky and will likely - /// required some profiling. Enabling the `logging` feature and setting the - /// log level to `trace` will also tell you how often the cache is being - /// cleared. - /// - /// # Example - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::meta::Regex; - /// - /// let result = Regex::builder() - /// .configure(Regex::config().hybrid_cache_capacity(20 * (1<<20))) - /// .build(r"\pL{5}"); - /// assert!(result.is_ok()); - /// # Ok::<(), Box>(()) - /// ``` - pub fn hybrid_cache_capacity(self, limit: usize) -> Config { - Config { hybrid_cache_capacity: Some(limit), ..self } - } - - /// Sets the size limit, in bytes, for heap memory used for a fully - /// compiled DFA. - /// - /// **NOTE:** If you increase this, you'll likely also need to increase - /// [`Config::dfa_state_limit`]. - /// - /// In contrast to the lazy DFA, building a full DFA requires computing - /// all of its state transitions up front. This can be a very expensive - /// process, and runs in worst case `2^n` time and space (where `n` is - /// proportional to the size of the regex). However, a full DFA unlocks - /// some additional optimization opportunities. - /// - /// Because full DFAs can be so expensive, the default limits for them are - /// incredibly small. Generally speaking, if your regex is moderately big - /// or if you're using Unicode features (`\w` is Unicode-aware by default - /// for example), then you can expect that the meta regex engine won't even - /// attempt to build a DFA for it. - /// - /// If this and [`Config::dfa_state_limit`] are set to `None`, then the - /// meta regex will not use any sort of limits when deciding whether to - /// build a DFA. This in turn makes construction of a `Regex` take - /// worst case exponential time and space. Even short patterns can result - /// in huge space blow ups. So it is strongly recommended to keep some kind - /// of limit set! - /// - /// The default is set to a small number that permits some simple regexes - /// to get compiled into DFAs in reasonable time. - /// - /// # Example - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::meta::Regex; - /// - /// let result = Regex::builder() - /// // 100MB is much bigger than the default. - /// .configure(Regex::config() - /// .dfa_size_limit(Some(100 * (1<<20))) - /// // We don't care about size too much here, so just - /// // remove the NFA state limit altogether. - /// .dfa_state_limit(None)) - /// .build(r"\pL{5}"); - /// assert!(result.is_ok()); - /// # Ok::<(), Box>(()) - /// ``` - pub fn dfa_size_limit(self, limit: Option) -> Config { - Config { dfa_size_limit: Some(limit), ..self } - } - - /// Sets a limit on the total number of NFA states, beyond which, a full - /// DFA is not attempted to be compiled. - /// - /// This limit works in concert with [`Config::dfa_size_limit`]. Namely, - /// where as `Config::dfa_size_limit` is applied by attempting to construct - /// a DFA, this limit is used to avoid the attempt in the first place. This - /// is useful to avoid hefty initialization costs associated with building - /// a DFA for cases where it is obvious the DFA will ultimately be too big. - /// - /// By default, this is set to a very small number. - /// - /// # Example - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::meta::Regex; - /// - /// let result = Regex::builder() - /// .configure(Regex::config() - /// // Sometimes the default state limit rejects DFAs even - /// // if they would fit in the size limit. Here, we disable - /// // the check on the number of NFA states and just rely on - /// // the size limit. - /// .dfa_state_limit(None)) - /// .build(r"(?-u)\w{30}"); - /// assert!(result.is_ok()); - /// # Ok::<(), Box>(()) - /// ``` - pub fn dfa_state_limit(self, limit: Option) -> Config { - Config { dfa_state_limit: Some(limit), ..self } - } - - /// Whether to attempt to shrink the size of the alphabet for the regex - /// pattern or not. When enabled, the alphabet is shrunk into a set of - /// equivalence classes, where every byte in the same equivalence class - /// cannot discriminate between a match or non-match. - /// - /// **WARNING:** This is only useful for debugging DFAs. Disabling this - /// does not yield any speed advantages. Indeed, disabling it can result - /// in much higher memory usage. Disabling byte classes is useful for - /// debugging the actual generated transitions because it lets one see the - /// transitions defined on actual bytes instead of the equivalence classes. - /// - /// This option is enabled by default and should never be disabled unless - /// one is debugging the meta regex engine's internals. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{meta::Regex, Match}; - /// - /// let re = Regex::builder() - /// .configure(Regex::config().byte_classes(false)) - /// .build(r"[a-z]+")?; - /// let hay = "!!quux!!"; - /// assert_eq!(Some(Match::must(0, 2..6)), re.find(hay)); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn byte_classes(self, yes: bool) -> Config { - Config { byte_classes: Some(yes), ..self } - } - - /// Set the line terminator to be used by the `^` and `$` anchors in - /// multi-line mode. - /// - /// This option has no effect when CRLF mode is enabled. That is, - /// regardless of this setting, `(?Rm:^)` and `(?Rm:$)` will always treat - /// `\r` and `\n` as line terminators (and will never match between a `\r` - /// and a `\n`). - /// - /// By default, `\n` is the line terminator. - /// - /// **Warning**: This does not change the behavior of `.`. To do that, - /// you'll need to configure the syntax option - /// [`syntax::Config::line_terminator`](crate::util::syntax::Config::line_terminator) - /// in addition to this. Otherwise, `.` will continue to match any - /// character other than `\n`. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{meta::Regex, util::syntax, Match}; - /// - /// let re = Regex::builder() - /// .syntax(syntax::Config::new().multi_line(true)) - /// .configure(Regex::config().line_terminator(b'\x00')) - /// .build(r"^foo$")?; - /// let hay = "\x00foo\x00"; - /// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay)); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn line_terminator(self, byte: u8) -> Config { - Config { line_terminator: Some(byte), ..self } - } - - /// Toggle whether the hybrid NFA/DFA (also known as the "lazy DFA") should - /// be available for use by the meta regex engine. - /// - /// Enabling this does not necessarily mean that the lazy DFA will - /// definitely be used. It just means that it will be _available_ for use - /// if the meta regex engine thinks it will be useful. - /// - /// When the `hybrid` crate feature is enabled, then this is enabled by - /// default. Otherwise, if the crate feature is disabled, then this is - /// always disabled, regardless of its setting by the caller. - pub fn hybrid(self, yes: bool) -> Config { - Config { hybrid: Some(yes), ..self } - } - - /// Toggle whether a fully compiled DFA should be available for use by the - /// meta regex engine. - /// - /// Enabling this does not necessarily mean that a DFA will definitely be - /// used. It just means that it will be _available_ for use if the meta - /// regex engine thinks it will be useful. - /// - /// When the `dfa-build` crate feature is enabled, then this is enabled by - /// default. Otherwise, if the crate feature is disabled, then this is - /// always disabled, regardless of its setting by the caller. - pub fn dfa(self, yes: bool) -> Config { - Config { dfa: Some(yes), ..self } - } - - /// Toggle whether a one-pass DFA should be available for use by the meta - /// regex engine. - /// - /// Enabling this does not necessarily mean that a one-pass DFA will - /// definitely be used. It just means that it will be _available_ for - /// use if the meta regex engine thinks it will be useful. (Indeed, a - /// one-pass DFA can only be used when the regex is one-pass. See the - /// [`dfa::onepass`](crate::dfa::onepass) module for more details.) - /// - /// When the `dfa-onepass` crate feature is enabled, then this is enabled - /// by default. Otherwise, if the crate feature is disabled, then this is - /// always disabled, regardless of its setting by the caller. - pub fn onepass(self, yes: bool) -> Config { - Config { onepass: Some(yes), ..self } - } - - /// Toggle whether a bounded backtracking regex engine should be available - /// for use by the meta regex engine. - /// - /// Enabling this does not necessarily mean that a bounded backtracker will - /// definitely be used. It just means that it will be _available_ for use - /// if the meta regex engine thinks it will be useful. - /// - /// When the `nfa-backtrack` crate feature is enabled, then this is enabled - /// by default. Otherwise, if the crate feature is disabled, then this is - /// always disabled, regardless of its setting by the caller. - pub fn backtrack(self, yes: bool) -> Config { - Config { backtrack: Some(yes), ..self } - } - - /// Returns the match kind on this configuration, as set by - /// [`Config::match_kind`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_match_kind(&self) -> MatchKind { - self.match_kind.unwrap_or(MatchKind::LeftmostFirst) - } - - /// Returns whether empty matches must fall on valid UTF-8 boundaries, as - /// set by [`Config::utf8_empty`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_utf8_empty(&self) -> bool { - self.utf8_empty.unwrap_or(true) - } - - /// Returns whether automatic prefilters are enabled, as set by - /// [`Config::auto_prefilter`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_auto_prefilter(&self) -> bool { - self.autopre.unwrap_or(true) - } - - /// Returns a manually set prefilter, if one was set by - /// [`Config::prefilter`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_prefilter(&self) -> Option<&Prefilter> { - self.pre.as_ref().unwrap_or(&None).as_ref() - } - - /// Returns the capture configuration, as set by - /// [`Config::which_captures`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_which_captures(&self) -> WhichCaptures { - self.which_captures.unwrap_or(WhichCaptures::All) - } - - /// Returns NFA size limit, as set by [`Config::nfa_size_limit`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_nfa_size_limit(&self) -> Option { - self.nfa_size_limit.unwrap_or(Some(10 * (1 << 20))) - } - - /// Returns one-pass DFA size limit, as set by - /// [`Config::onepass_size_limit`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_onepass_size_limit(&self) -> Option { - self.onepass_size_limit.unwrap_or(Some(1 * (1 << 20))) - } - - /// Returns hybrid NFA/DFA cache capacity, as set by - /// [`Config::hybrid_cache_capacity`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_hybrid_cache_capacity(&self) -> usize { - self.hybrid_cache_capacity.unwrap_or(2 * (1 << 20)) - } - - /// Returns DFA size limit, as set by [`Config::dfa_size_limit`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_dfa_size_limit(&self) -> Option { - // The default for this is VERY small because building a full DFA is - // ridiculously costly. But for regexes that are very small, it can be - // beneficial to use a full DFA. In particular, a full DFA can enable - // additional optimizations via something called "accelerated" states. - // Namely, when there's a state with only a few outgoing transitions, - // we can temporary suspend walking the transition table and use memchr - // for just those outgoing transitions to skip ahead very quickly. - // - // Generally speaking, if Unicode is enabled in your regex and you're - // using some kind of Unicode feature, then it's going to blow this - // size limit. Moreover, Unicode tends to defeat the "accelerated" - // state optimization too, so it's a double whammy. - // - // We also use a limit on the number of NFA states to avoid even - // starting the DFA construction process. Namely, DFA construction - // itself could make lots of initial allocs proportional to the size - // of the NFA, and if the NFA is large, it doesn't make sense to pay - // that cost if we know it's likely to be blown by a large margin. - self.dfa_size_limit.unwrap_or(Some(40 * (1 << 10))) - } - - /// Returns DFA size limit in terms of the number of states in the NFA, as - /// set by [`Config::dfa_state_limit`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_dfa_state_limit(&self) -> Option { - // Again, as with the size limit, we keep this very small. - self.dfa_state_limit.unwrap_or(Some(30)) - } - - /// Returns whether byte classes are enabled, as set by - /// [`Config::byte_classes`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_byte_classes(&self) -> bool { - self.byte_classes.unwrap_or(true) - } - - /// Returns the line terminator for this configuration, as set by - /// [`Config::line_terminator`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_line_terminator(&self) -> u8 { - self.line_terminator.unwrap_or(b'\n') - } - - /// Returns whether the hybrid NFA/DFA regex engine may be used, as set by - /// [`Config::hybrid`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_hybrid(&self) -> bool { - #[cfg(feature = "hybrid")] - { - self.hybrid.unwrap_or(true) - } - #[cfg(not(feature = "hybrid"))] - { - false - } - } - - /// Returns whether the DFA regex engine may be used, as set by - /// [`Config::dfa`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_dfa(&self) -> bool { - #[cfg(feature = "dfa-build")] - { - self.dfa.unwrap_or(true) - } - #[cfg(not(feature = "dfa-build"))] - { - false - } - } - - /// Returns whether the one-pass DFA regex engine may be used, as set by - /// [`Config::onepass`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_onepass(&self) -> bool { - #[cfg(feature = "dfa-onepass")] - { - self.onepass.unwrap_or(true) - } - #[cfg(not(feature = "dfa-onepass"))] - { - false - } - } - - /// Returns whether the bounded backtracking regex engine may be used, as - /// set by [`Config::backtrack`]. - /// - /// If it was not explicitly set, then a default value is returned. - pub fn get_backtrack(&self) -> bool { - #[cfg(feature = "nfa-backtrack")] - { - self.backtrack.unwrap_or(true) - } - #[cfg(not(feature = "nfa-backtrack"))] - { - false - } - } - - /// Overwrite the default configuration such that the options in `o` are - /// always used. If an option in `o` is not set, then the corresponding - /// option in `self` is used. If it's not set in `self` either, then it - /// remains not set. - pub(crate) fn overwrite(&self, o: Config) -> Config { - Config { - match_kind: o.match_kind.or(self.match_kind), - utf8_empty: o.utf8_empty.or(self.utf8_empty), - autopre: o.autopre.or(self.autopre), - pre: o.pre.or_else(|| self.pre.clone()), - which_captures: o.which_captures.or(self.which_captures), - nfa_size_limit: o.nfa_size_limit.or(self.nfa_size_limit), - onepass_size_limit: o - .onepass_size_limit - .or(self.onepass_size_limit), - hybrid_cache_capacity: o - .hybrid_cache_capacity - .or(self.hybrid_cache_capacity), - hybrid: o.hybrid.or(self.hybrid), - dfa: o.dfa.or(self.dfa), - dfa_size_limit: o.dfa_size_limit.or(self.dfa_size_limit), - dfa_state_limit: o.dfa_state_limit.or(self.dfa_state_limit), - onepass: o.onepass.or(self.onepass), - backtrack: o.backtrack.or(self.backtrack), - byte_classes: o.byte_classes.or(self.byte_classes), - line_terminator: o.line_terminator.or(self.line_terminator), - } - } -} - -/// A builder for configuring and constructing a `Regex`. -/// -/// The builder permits configuring two different aspects of a `Regex`: -/// -/// * [`Builder::configure`] will set high-level configuration options as -/// described by a [`Config`]. -/// * [`Builder::syntax`] will set the syntax level configuration options -/// as described by a [`util::syntax::Config`](crate::util::syntax::Config). -/// This only applies when building a `Regex` from pattern strings. -/// -/// Once configured, the builder can then be used to construct a `Regex` from -/// one of 4 different inputs: -/// -/// * [`Builder::build`] creates a regex from a single pattern string. -/// * [`Builder::build_many`] creates a regex from many pattern strings. -/// * [`Builder::build_from_hir`] creates a regex from a -/// [`regex-syntax::Hir`](Hir) expression. -/// * [`Builder::build_many_from_hir`] creates a regex from many -/// [`regex-syntax::Hir`](Hir) expressions. -/// -/// The latter two methods in particular provide a way to construct a fully -/// feature regular expression matcher directly from an `Hir` expression -/// without having to first convert it to a string. (This is in contrast to the -/// top-level `regex` crate which intentionally provides no such API in order -/// to avoid making `regex-syntax` a public dependency.) -/// -/// As a convenience, this builder may be created via [`Regex::builder`], which -/// may help avoid an extra import. -/// -/// # Example: change the line terminator -/// -/// This example shows how to enable multi-line mode by default and change the -/// line terminator to the NUL byte: -/// -/// ``` -/// use regex_automata::{meta::Regex, util::syntax, Match}; -/// -/// let re = Regex::builder() -/// .syntax(syntax::Config::new().multi_line(true)) -/// .configure(Regex::config().line_terminator(b'\x00')) -/// .build(r"^foo$")?; -/// let hay = "\x00foo\x00"; -/// assert_eq!(Some(Match::must(0, 1..4)), re.find(hay)); -/// -/// # Ok::<(), Box>(()) -/// ``` -/// -/// # Example: disable UTF-8 requirement -/// -/// By default, regex patterns are required to match UTF-8. This includes -/// regex patterns that can produce matches of length zero. In the case of an -/// empty match, by default, matches will not appear between the code units of -/// a UTF-8 encoded codepoint. -/// -/// However, it can be useful to disable this requirement, particularly if -/// you're searching things like `&[u8]` that are not known to be valid UTF-8. -/// -/// ``` -/// use regex_automata::{meta::Regex, util::syntax, Match}; -/// -/// let mut builder = Regex::builder(); -/// // Disables the requirement that non-empty matches match UTF-8. -/// builder.syntax(syntax::Config::new().utf8(false)); -/// // Disables the requirement that empty matches match UTF-8 boundaries. -/// builder.configure(Regex::config().utf8_empty(false)); -/// -/// // We can match raw bytes via \xZZ syntax, but we need to disable -/// // Unicode mode to do that. We could disable it everywhere, or just -/// // selectively, as shown here. -/// let re = builder.build(r"(?-u:\xFF)foo(?-u:\xFF)")?; -/// let hay = b"\xFFfoo\xFF"; -/// assert_eq!(Some(Match::must(0, 0..5)), re.find(hay)); -/// -/// // We can also match between code units. -/// let re = builder.build(r"")?; -/// let hay = "☃"; -/// assert_eq!(re.find_iter(hay).collect::>(), vec![ -/// Match::must(0, 0..0), -/// Match::must(0, 1..1), -/// Match::must(0, 2..2), -/// Match::must(0, 3..3), -/// ]); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct Builder { - config: Config, - ast: ast::parse::ParserBuilder, - hir: hir::translate::TranslatorBuilder, -} - -impl Builder { - /// Creates a new builder for configuring and constructing a [`Regex`]. - pub fn new() -> Builder { - Builder { - config: Config::default(), - ast: ast::parse::ParserBuilder::new(), - hir: hir::translate::TranslatorBuilder::new(), - } - } - - /// Builds a `Regex` from a single pattern string. - /// - /// If there was a problem parsing the pattern or a problem turning it into - /// a regex matcher, then an error is returned. - /// - /// # Example - /// - /// This example shows how to configure syntax options. - /// - /// ``` - /// use regex_automata::{meta::Regex, util::syntax, Match}; - /// - /// let re = Regex::builder() - /// .syntax(syntax::Config::new().crlf(true).multi_line(true)) - /// .build(r"^foo$")?; - /// let hay = "\r\nfoo\r\n"; - /// assert_eq!(Some(Match::must(0, 2..5)), re.find(hay)); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn build(&self, pattern: &str) -> Result { - self.build_many(&[pattern]) - } - - /// Builds a `Regex` from many pattern strings. - /// - /// If there was a problem parsing any of the patterns or a problem turning - /// them into a regex matcher, then an error is returned. - /// - /// # Example: finding the pattern that caused an error - /// - /// When a syntax error occurs, it is possible to ask which pattern - /// caused the syntax error. - /// - /// ``` - /// use regex_automata::{meta::Regex, PatternID}; - /// - /// let err = Regex::builder() - /// .build_many(&["a", "b", r"\p{Foo}", "c"]) - /// .unwrap_err(); - /// assert_eq!(Some(PatternID::must(2)), err.pattern()); - /// ``` - /// - /// # Example: zero patterns is valid - /// - /// Building a regex with zero patterns results in a regex that never - /// matches anything. Because this routine is generic, passing an empty - /// slice usually requires a turbo-fish (or something else to help type - /// inference). - /// - /// ``` - /// use regex_automata::{meta::Regex, util::syntax, Match}; - /// - /// let re = Regex::builder() - /// .build_many::<&str>(&[])?; - /// assert_eq!(None, re.find("")); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn build_many>( - &self, - patterns: &[P], - ) -> Result { - use crate::util::primitives::IteratorIndexExt; - log! { - debug!("building meta regex with {} patterns:", patterns.len()); - for (pid, p) in patterns.iter().with_pattern_ids() { - let p = p.as_ref(); - // We might split a grapheme with this truncation logic, but - // that's fine. We at least avoid splitting a codepoint. - let maxoff = p - .char_indices() - .map(|(i, ch)| i + ch.len_utf8()) - .take(1000) - .last() - .unwrap_or(0); - if maxoff < p.len() { - debug!("{pid:?}: {}[... snip ...]", &p[..maxoff]); - } else { - debug!("{pid:?}: {p}"); - } - } - } - let (mut asts, mut hirs) = (vec![], vec![]); - for (pid, p) in patterns.iter().with_pattern_ids() { - let ast = self - .ast - .build() - .parse(p.as_ref()) - .map_err(|err| BuildError::ast(pid, err))?; - asts.push(ast); - } - for ((pid, p), ast) in - patterns.iter().with_pattern_ids().zip(asts.iter()) - { - let hir = self - .hir - .build() - .translate(p.as_ref(), ast) - .map_err(|err| BuildError::hir(pid, err))?; - hirs.push(hir); - } - self.build_many_from_hir(&hirs) - } - - /// Builds a `Regex` directly from an `Hir` expression. - /// - /// This is useful if you needed to parse a pattern string into an `Hir` - /// for other reasons (such as analysis or transformations). This routine - /// permits building a `Regex` directly from the `Hir` expression instead - /// of first converting the `Hir` back to a pattern string. - /// - /// When using this method, any options set via [`Builder::syntax`] are - /// ignored. Namely, the syntax options only apply when parsing a pattern - /// string, which isn't relevant here. - /// - /// If there was a problem building the underlying regex matcher for the - /// given `Hir`, then an error is returned. - /// - /// # Example - /// - /// This example shows how one can hand-construct an `Hir` expression and - /// build a regex from it without doing any parsing at all. - /// - /// ``` - /// use { - /// regex_automata::{meta::Regex, Match}, - /// regex_syntax::hir::{Hir, Look}, - /// }; - /// - /// // (?Rm)^foo$ - /// let hir = Hir::concat(vec![ - /// Hir::look(Look::StartCRLF), - /// Hir::literal("foo".as_bytes()), - /// Hir::look(Look::EndCRLF), - /// ]); - /// let re = Regex::builder() - /// .build_from_hir(&hir)?; - /// let hay = "\r\nfoo\r\n"; - /// assert_eq!(Some(Match::must(0, 2..5)), re.find(hay)); - /// - /// Ok::<(), Box>(()) - /// ``` - pub fn build_from_hir(&self, hir: &Hir) -> Result { - self.build_many_from_hir(&[hir]) - } - - /// Builds a `Regex` directly from many `Hir` expressions. - /// - /// This is useful if you needed to parse pattern strings into `Hir` - /// expressions for other reasons (such as analysis or transformations). - /// This routine permits building a `Regex` directly from the `Hir` - /// expressions instead of first converting the `Hir` expressions back to - /// pattern strings. - /// - /// When using this method, any options set via [`Builder::syntax`] are - /// ignored. Namely, the syntax options only apply when parsing a pattern - /// string, which isn't relevant here. - /// - /// If there was a problem building the underlying regex matcher for the - /// given `Hir` expressions, then an error is returned. - /// - /// Note that unlike [`Builder::build_many`], this can only fail as a - /// result of building the underlying matcher. In that case, there is - /// no single `Hir` expression that can be isolated as a reason for the - /// failure. So if this routine fails, it's not possible to determine which - /// `Hir` expression caused the failure. - /// - /// # Example - /// - /// This example shows how one can hand-construct multiple `Hir` - /// expressions and build a single regex from them without doing any - /// parsing at all. - /// - /// ``` - /// use { - /// regex_automata::{meta::Regex, Match}, - /// regex_syntax::hir::{Hir, Look}, - /// }; - /// - /// // (?Rm)^foo$ - /// let hir1 = Hir::concat(vec![ - /// Hir::look(Look::StartCRLF), - /// Hir::literal("foo".as_bytes()), - /// Hir::look(Look::EndCRLF), - /// ]); - /// // (?Rm)^bar$ - /// let hir2 = Hir::concat(vec![ - /// Hir::look(Look::StartCRLF), - /// Hir::literal("bar".as_bytes()), - /// Hir::look(Look::EndCRLF), - /// ]); - /// let re = Regex::builder() - /// .build_many_from_hir(&[&hir1, &hir2])?; - /// let hay = "\r\nfoo\r\nbar"; - /// let got: Vec = re.find_iter(hay).collect(); - /// let expected = vec![ - /// Match::must(0, 2..5), - /// Match::must(1, 7..10), - /// ]; - /// assert_eq!(expected, got); - /// - /// Ok::<(), Box>(()) - /// ``` - pub fn build_many_from_hir>( - &self, - hirs: &[H], - ) -> Result { - let config = self.config.clone(); - // We collect the HIRs into a vec so we can write internal routines - // with '&[&Hir]'. i.e., Don't use generics everywhere to keep code - // bloat down.. - let hirs: Vec<&Hir> = hirs.iter().map(|hir| hir.borrow()).collect(); - let info = RegexInfo::new(config, &hirs); - let strat = strategy::new(&info, &hirs)?; - let pool = { - let strat = Arc::clone(&strat); - let create: CachePoolFn = Box::new(move || strat.create_cache()); - Pool::new(create) - }; - Ok(Regex { imp: Arc::new(RegexI { strat, info }), pool }) - } - - /// Configure the behavior of a `Regex`. - /// - /// This configuration controls non-syntax options related to the behavior - /// of a `Regex`. This includes things like whether empty matches can split - /// a codepoint, prefilters, line terminators and a long list of options - /// for configuring which regex engines the meta regex engine will be able - /// to use internally. - /// - /// # Example - /// - /// This example shows how to disable UTF-8 empty mode. This will permit - /// empty matches to occur between the UTF-8 encoding of a codepoint. - /// - /// ``` - /// use regex_automata::{meta::Regex, Match}; - /// - /// let re = Regex::new("")?; - /// let got: Vec = re.find_iter("☃").collect(); - /// // Matches only occur at the beginning and end of the snowman. - /// assert_eq!(got, vec![ - /// Match::must(0, 0..0), - /// Match::must(0, 3..3), - /// ]); - /// - /// let re = Regex::builder() - /// .configure(Regex::config().utf8_empty(false)) - /// .build("")?; - /// let got: Vec = re.find_iter("☃").collect(); - /// // Matches now occur at every position! - /// assert_eq!(got, vec![ - /// Match::must(0, 0..0), - /// Match::must(0, 1..1), - /// Match::must(0, 2..2), - /// Match::must(0, 3..3), - /// ]); - /// - /// Ok::<(), Box>(()) - /// ``` - pub fn configure(&mut self, config: Config) -> &mut Builder { - self.config = self.config.overwrite(config); - self - } - - /// Configure the syntax options when parsing a pattern string while - /// building a `Regex`. - /// - /// These options _only_ apply when [`Builder::build`] or [`Builder::build_many`] - /// are used. The other build methods accept `Hir` values, which have - /// already been parsed. - /// - /// # Example - /// - /// This example shows how to enable case insensitive mode. - /// - /// ``` - /// use regex_automata::{meta::Regex, util::syntax, Match}; - /// - /// let re = Regex::builder() - /// .syntax(syntax::Config::new().case_insensitive(true)) - /// .build(r"δ")?; - /// assert_eq!(Some(Match::must(0, 0..2)), re.find(r"Δ")); - /// - /// Ok::<(), Box>(()) - /// ``` - pub fn syntax( - &mut self, - config: crate::util::syntax::Config, - ) -> &mut Builder { - config.apply_ast(&mut self.ast); - config.apply_hir(&mut self.hir); - self - } -} - -#[cfg(test)] -mod tests { - use super::*; - - // I found this in the course of building out the benchmark suite for - // rebar. - #[test] - fn regression_suffix_literal_count() { - let _ = env_logger::try_init(); - - let re = Regex::new(r"[a-zA-Z]+ing").unwrap(); - assert_eq!(1, re.find_iter("tingling").count()); - } -} diff --git a/vendor/regex-automata/src/meta/reverse_inner.rs b/vendor/regex-automata/src/meta/reverse_inner.rs deleted file mode 100644 index 3d78779f6f73ee..00000000000000 --- a/vendor/regex-automata/src/meta/reverse_inner.rs +++ /dev/null @@ -1,220 +0,0 @@ -/*! -A module dedicated to plucking inner literals out of a regex pattern, and -then constructing a prefilter for them. We also include a regex pattern -"prefix" that corresponds to the bits of the regex that need to match before -the literals do. The reverse inner optimization then proceeds by looking for -matches of the inner literal(s), and then doing a reverse search of the prefix -from the start of the literal match to find the overall start position of the -match. - -The essential invariant we want to uphold here is that the literals we return -reflect a set where *at least* one of them must match in order for the overall -regex to match. We also need to maintain the invariant that the regex prefix -returned corresponds to the entirety of the regex up until the literals we -return. - -This somewhat limits what we can do. That is, if we a regex like -`\w+(@!|%%)\w+`, then we can pluck the `{@!, %%}` out and build a prefilter -from it. Then we just need to compile `\w+` in reverse. No fuss no muss. But if -we have a regex like \d+@!|\w+%%`, then we get kind of stymied. Technically, -we could still extract `{@!, %%}`, and it is true that at least of them must -match. But then, what is our regex prefix? Again, in theory, that could be -`\d+|\w+`, but that's not quite right, because the `\d+` only matches when `@!` -matches, and `\w+` only matches when `%%` matches. - -All of that is technically possible to do, but it seemingly requires a lot of -sophistication and machinery. Probably the way to tackle that is with some kind -of formalism and approach this problem more generally. - -For now, the code below basically just looks for a top-level concatenation. -And if it can find one, it looks for literals in each of the direct child -sub-expressions of that concatenation. If some good ones are found, we return -those and a concatenation of the Hir expressions seen up to that point. -*/ - -use alloc::vec::Vec; - -use regex_syntax::hir::{self, literal, Hir, HirKind}; - -use crate::{util::prefilter::Prefilter, MatchKind}; - -/// Attempts to extract an "inner" prefilter from the given HIR expressions. If -/// one was found, then a concatenation of the HIR expressions that precede it -/// is returned. -/// -/// The idea here is that the prefilter returned can be used to find candidate -/// matches. And then the HIR returned can be used to build a reverse regex -/// matcher, which will find the start of the candidate match. Finally, the -/// match still has to be confirmed with a normal anchored forward scan to find -/// the end position of the match. -/// -/// Note that this assumes leftmost-first match semantics, so callers must -/// not call this otherwise. -pub(crate) fn extract(hirs: &[&Hir]) -> Option<(Hir, Prefilter)> { - if hirs.len() != 1 { - debug!( - "skipping reverse inner optimization since it only \ - supports 1 pattern, {} were given", - hirs.len(), - ); - return None; - } - let mut concat = match top_concat(hirs[0]) { - Some(concat) => concat, - None => { - debug!( - "skipping reverse inner optimization because a top-level \ - concatenation could not found", - ); - return None; - } - }; - // We skip the first HIR because if it did have a prefix prefilter in it, - // we probably wouldn't be here looking for an inner prefilter. - for i in 1..concat.len() { - let hir = &concat[i]; - let pre = match prefilter(hir) { - None => continue, - Some(pre) => pre, - }; - // Even if we got a prefilter, if it isn't consider "fast," then we - // probably don't want to bother with it. Namely, since the reverse - // inner optimization requires some overhead, it likely only makes - // sense if the prefilter scan itself is (believed) to be much faster - // than the regex engine. - if !pre.is_fast() { - debug!( - "skipping extracted inner prefilter because \ - it probably isn't fast" - ); - continue; - } - let concat_suffix = Hir::concat(concat.split_off(i)); - let concat_prefix = Hir::concat(concat); - // Look for a prefilter again. Why? Because above we only looked for - // a prefilter on the individual 'hir', but we might be able to find - // something better and more discriminatory by looking at the entire - // suffix. We don't do this above to avoid making this loop worst case - // quadratic in the length of 'concat'. - let pre2 = match prefilter(&concat_suffix) { - None => pre, - Some(pre2) => { - if pre2.is_fast() { - pre2 - } else { - pre - } - } - }; - return Some((concat_prefix, pre2)); - } - debug!( - "skipping reverse inner optimization because a top-level \ - sub-expression with a fast prefilter could not be found" - ); - None -} - -/// Attempt to extract a prefilter from an HIR expression. -/// -/// We do a little massaging here to do our best that the prefilter we get out -/// of this is *probably* fast. Basically, the false positive rate has a much -/// higher impact for things like the reverse inner optimization because more -/// work needs to potentially be done for each candidate match. -/// -/// Note that this assumes leftmost-first match semantics, so callers must -/// not call this otherwise. -fn prefilter(hir: &Hir) -> Option { - let mut extractor = literal::Extractor::new(); - extractor.kind(literal::ExtractKind::Prefix); - let mut prefixes = extractor.extract(hir); - debug!( - "inner prefixes (len={:?}) extracted before optimization: {:?}", - prefixes.len(), - prefixes - ); - // Since these are inner literals, we know they cannot be exact. But the - // extractor doesn't know this. We mark them as inexact because this might - // impact literal optimization. Namely, optimization weights "all literals - // are exact" as very high, because it presumes that any match results in - // an overall match. But of course, that is not the case here. - // - // In practice, this avoids plucking out a ASCII-only \s as an alternation - // of single-byte whitespace characters. - prefixes.make_inexact(); - prefixes.optimize_for_prefix_by_preference(); - debug!( - "inner prefixes (len={:?}) extracted after optimization: {:?}", - prefixes.len(), - prefixes - ); - prefixes - .literals() - .and_then(|lits| Prefilter::new(MatchKind::LeftmostFirst, lits)) -} - -/// Looks for a "top level" HirKind::Concat item in the given HIR. This will -/// try to return one even if it's embedded in a capturing group, but is -/// otherwise pretty conservative in what is returned. -/// -/// The HIR returned is a complete copy of the concat with all capturing -/// groups removed. In effect, the concat returned is "flattened" with respect -/// to capturing groups. This makes the detection logic above for prefixes -/// a bit simpler, and it works because 1) capturing groups never influence -/// whether a match occurs or not and 2) capturing groups are not used when -/// doing the reverse inner search to find the start of the match. -fn top_concat(mut hir: &Hir) -> Option> { - loop { - hir = match hir.kind() { - HirKind::Empty - | HirKind::Literal(_) - | HirKind::Class(_) - | HirKind::Look(_) - | HirKind::Repetition(_) - | HirKind::Alternation(_) => return None, - HirKind::Capture(hir::Capture { ref sub, .. }) => sub, - HirKind::Concat(ref subs) => { - // We are careful to only do the flattening/copy when we know - // we have a "top level" concat we can inspect. This avoids - // doing extra work in cases where we definitely won't use it. - // (This might still be wasted work if we can't go on to find - // some literals to extract.) - let concat = - Hir::concat(subs.iter().map(|h| flatten(h)).collect()); - return match concat.into_kind() { - HirKind::Concat(xs) => Some(xs), - // It is actually possible for this case to occur, because - // 'Hir::concat' might simplify the expression to the point - // that concatenations are actually removed. One wonders - // whether this leads to other cases where we should be - // extracting literals, but in theory, I believe if we do - // get here, then it means that a "real" prefilter failed - // to be extracted and we should probably leave well enough - // alone. (A "real" prefilter is unbothered by "top-level - // concats" and "capturing groups.") - _ => return None, - }; - } - }; - } -} - -/// Returns a copy of the given HIR but with all capturing groups removed. -fn flatten(hir: &Hir) -> Hir { - match hir.kind() { - HirKind::Empty => Hir::empty(), - HirKind::Literal(hir::Literal(ref x)) => Hir::literal(x.clone()), - HirKind::Class(ref x) => Hir::class(x.clone()), - HirKind::Look(ref x) => Hir::look(x.clone()), - HirKind::Repetition(ref x) => Hir::repetition(x.with(flatten(&x.sub))), - // This is the interesting case. We just drop the group information - // entirely and use the child HIR itself. - HirKind::Capture(hir::Capture { ref sub, .. }) => flatten(sub), - HirKind::Alternation(ref xs) => { - Hir::alternation(xs.iter().map(|x| flatten(x)).collect()) - } - HirKind::Concat(ref xs) => { - Hir::concat(xs.iter().map(|x| flatten(x)).collect()) - } - } -} diff --git a/vendor/regex-automata/src/meta/stopat.rs b/vendor/regex-automata/src/meta/stopat.rs deleted file mode 100644 index c4dcd797a0b8fc..00000000000000 --- a/vendor/regex-automata/src/meta/stopat.rs +++ /dev/null @@ -1,212 +0,0 @@ -/*! -This module defines two bespoke forward DFA search routines. One for the lazy -DFA and one for the fully compiled DFA. These routines differ from the normal -ones by reporting the position at which the search terminates when a match -*isn't* found. - -This position at which a search terminates is useful in contexts where the meta -regex engine runs optimizations that could go quadratic if we aren't careful. -Namely, a regex search *could* scan to the end of the haystack only to report a -non-match. If the caller doesn't know that the search scanned to the end of the -haystack, it might restart the search at the next literal candidate it finds -and repeat the process. - -Providing the caller with the position at which the search stopped provides a -way for the caller to determine the point at which subsequent scans should not -pass. This is principally used in the "reverse inner" optimization, which works -like this: - -1. Look for a match of an inner literal. Say, 'Z' in '\w+Z\d+'. -2. At the spot where 'Z' matches, do a reverse anchored search from there for -'\w+'. -3. If the reverse search matches, it corresponds to the start position of a -(possible) match. At this point, do a forward anchored search to find the end -position. If an end position is found, then we have a match and we know its -bounds. - -If the forward anchored search in (3) searches the entire rest of the haystack -but reports a non-match, then a naive implementation of the above will continue -back at step 1 looking for more candidates. There might still be a match to be -found! It's possible. But we already scanned the whole haystack. So if we keep -repeating the process, then we might wind up taking quadratic time in the size -of the haystack, which is not great. - -So if the forward anchored search in (3) reports the position at which it -stops, then we can detect whether quadratic behavior might be occurring in -steps (1) and (2). For (1), it occurs if the literal candidate found occurs -*before* the end of the previous search in (3), since that means we're now -going to look for another match in a place where the forward search has already -scanned. It is *correct* to do so, but our technique has become inefficient. -For (2), quadratic behavior occurs similarly when its reverse search extends -past the point where the previous forward search in (3) terminated. Indeed, to -implement (2), we use the sibling 'limited' module for ensuring our reverse -scan doesn't go further than we want. - -See the 'opt/reverse-inner' benchmarks in rebar for a real demonstration of -how quadratic behavior is mitigated. -*/ - -use crate::{meta::error::RetryFailError, HalfMatch, Input, MatchError}; - -#[cfg(feature = "dfa-build")] -pub(crate) fn dfa_try_search_half_fwd( - dfa: &crate::dfa::dense::DFA>, - input: &Input<'_>, -) -> Result, RetryFailError> { - use crate::dfa::{accel, Automaton}; - - let mut mat = None; - let mut sid = dfa.start_state_forward(input)?; - let mut at = input.start(); - while at < input.end() { - sid = dfa.next_state(sid, input.haystack()[at]); - if dfa.is_special_state(sid) { - if dfa.is_match_state(sid) { - let pattern = dfa.match_pattern(sid, 0); - mat = Some(HalfMatch::new(pattern, at)); - if input.get_earliest() { - return Ok(mat.ok_or(at)); - } - if dfa.is_accel_state(sid) { - let needs = dfa.accelerator(sid); - at = accel::find_fwd(needs, input.haystack(), at) - .unwrap_or(input.end()); - continue; - } - } else if dfa.is_accel_state(sid) { - let needs = dfa.accelerator(sid); - at = accel::find_fwd(needs, input.haystack(), at) - .unwrap_or(input.end()); - continue; - } else if dfa.is_dead_state(sid) { - return Ok(mat.ok_or(at)); - } else if dfa.is_quit_state(sid) { - return Err(MatchError::quit(input.haystack()[at], at).into()); - } else { - // Ideally we wouldn't use a DFA that specialized start states - // and thus 'is_start_state()' could never be true here, but in - // practice we reuse the DFA created for the full regex which - // will specialize start states whenever there is a prefilter. - debug_assert!(dfa.is_start_state(sid)); - } - } - at += 1; - } - dfa_eoi_fwd(dfa, input, &mut sid, &mut mat)?; - Ok(mat.ok_or(at)) -} - -#[cfg(feature = "hybrid")] -pub(crate) fn hybrid_try_search_half_fwd( - dfa: &crate::hybrid::dfa::DFA, - cache: &mut crate::hybrid::dfa::Cache, - input: &Input<'_>, -) -> Result, RetryFailError> { - let mut mat = None; - let mut sid = dfa.start_state_forward(cache, input)?; - let mut at = input.start(); - while at < input.end() { - sid = dfa - .next_state(cache, sid, input.haystack()[at]) - .map_err(|_| MatchError::gave_up(at))?; - if sid.is_tagged() { - if sid.is_match() { - let pattern = dfa.match_pattern(cache, sid, 0); - mat = Some(HalfMatch::new(pattern, at)); - if input.get_earliest() { - return Ok(mat.ok_or(at)); - } - } else if sid.is_dead() { - return Ok(mat.ok_or(at)); - } else if sid.is_quit() { - return Err(MatchError::quit(input.haystack()[at], at).into()); - } else { - // We should NEVER get an unknown state ID back from - // dfa.next_state(). - debug_assert!(!sid.is_unknown()); - // Ideally we wouldn't use a lazy DFA that specialized start - // states and thus 'sid.is_start()' could never be true here, - // but in practice we reuse the lazy DFA created for the full - // regex which will specialize start states whenever there is - // a prefilter. - debug_assert!(sid.is_start()); - } - } - at += 1; - } - hybrid_eoi_fwd(dfa, cache, input, &mut sid, &mut mat)?; - Ok(mat.ok_or(at)) -} - -#[cfg(feature = "dfa-build")] -#[cfg_attr(feature = "perf-inline", inline(always))] -fn dfa_eoi_fwd( - dfa: &crate::dfa::dense::DFA>, - input: &Input<'_>, - sid: &mut crate::util::primitives::StateID, - mat: &mut Option, -) -> Result<(), MatchError> { - use crate::dfa::Automaton; - - let sp = input.get_span(); - match input.haystack().get(sp.end) { - Some(&b) => { - *sid = dfa.next_state(*sid, b); - if dfa.is_match_state(*sid) { - let pattern = dfa.match_pattern(*sid, 0); - *mat = Some(HalfMatch::new(pattern, sp.end)); - } else if dfa.is_quit_state(*sid) { - return Err(MatchError::quit(b, sp.end)); - } - } - None => { - *sid = dfa.next_eoi_state(*sid); - if dfa.is_match_state(*sid) { - let pattern = dfa.match_pattern(*sid, 0); - *mat = Some(HalfMatch::new(pattern, input.haystack().len())); - } - // N.B. We don't have to check 'is_quit' here because the EOI - // transition can never lead to a quit state. - debug_assert!(!dfa.is_quit_state(*sid)); - } - } - Ok(()) -} - -#[cfg(feature = "hybrid")] -#[cfg_attr(feature = "perf-inline", inline(always))] -fn hybrid_eoi_fwd( - dfa: &crate::hybrid::dfa::DFA, - cache: &mut crate::hybrid::dfa::Cache, - input: &Input<'_>, - sid: &mut crate::hybrid::LazyStateID, - mat: &mut Option, -) -> Result<(), MatchError> { - let sp = input.get_span(); - match input.haystack().get(sp.end) { - Some(&b) => { - *sid = dfa - .next_state(cache, *sid, b) - .map_err(|_| MatchError::gave_up(sp.end))?; - if sid.is_match() { - let pattern = dfa.match_pattern(cache, *sid, 0); - *mat = Some(HalfMatch::new(pattern, sp.end)); - } else if sid.is_quit() { - return Err(MatchError::quit(b, sp.end)); - } - } - None => { - *sid = dfa - .next_eoi_state(cache, *sid) - .map_err(|_| MatchError::gave_up(input.haystack().len()))?; - if sid.is_match() { - let pattern = dfa.match_pattern(cache, *sid, 0); - *mat = Some(HalfMatch::new(pattern, input.haystack().len())); - } - // N.B. We don't have to check 'is_quit' here because the EOI - // transition can never lead to a quit state. - debug_assert!(!sid.is_quit()); - } - } - Ok(()) -} diff --git a/vendor/regex-automata/src/meta/strategy.rs b/vendor/regex-automata/src/meta/strategy.rs deleted file mode 100644 index ebb876b2b88525..00000000000000 --- a/vendor/regex-automata/src/meta/strategy.rs +++ /dev/null @@ -1,1905 +0,0 @@ -use core::{ - fmt::Debug, - panic::{RefUnwindSafe, UnwindSafe}, -}; - -use alloc::sync::Arc; - -use regex_syntax::hir::{literal, Hir}; - -use crate::{ - meta::{ - error::{BuildError, RetryError, RetryFailError, RetryQuadraticError}, - regex::{Cache, RegexInfo}, - reverse_inner, wrappers, - }, - nfa::thompson::{self, WhichCaptures, NFA}, - util::{ - captures::{Captures, GroupInfo}, - look::LookMatcher, - prefilter::{self, Prefilter, PrefilterI}, - primitives::{NonMaxUsize, PatternID}, - search::{Anchored, HalfMatch, Input, Match, MatchKind, PatternSet}, - }, -}; - -/// A trait that represents a single meta strategy. Its main utility is in -/// providing a way to do dynamic dispatch over a few choices. -/// -/// Why dynamic dispatch? I actually don't have a super compelling reason, and -/// importantly, I have not benchmarked it with the main alternative: an enum. -/// I went with dynamic dispatch initially because the regex engine search code -/// really can't be inlined into caller code in most cases because it's just -/// too big. In other words, it is already expected that every regex search -/// will entail at least the cost of a function call. -/// -/// I do wonder whether using enums would result in better codegen overall -/// though. It's a worthwhile experiment to try. Probably the most interesting -/// benchmark to run in such a case would be one with a high match count. That -/// is, a benchmark to test the overall latency of a search call. -pub(super) trait Strategy: - Debug + Send + Sync + RefUnwindSafe + UnwindSafe + 'static -{ - fn group_info(&self) -> &GroupInfo; - - fn create_cache(&self) -> Cache; - - fn reset_cache(&self, cache: &mut Cache); - - fn is_accelerated(&self) -> bool; - - fn memory_usage(&self) -> usize; - - fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option; - - fn search_half( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Option; - - fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool; - - fn search_slots( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Option; - - fn which_overlapping_matches( - &self, - cache: &mut Cache, - input: &Input<'_>, - patset: &mut PatternSet, - ); -} - -pub(super) fn new( - info: &RegexInfo, - hirs: &[&Hir], -) -> Result, BuildError> { - // At this point, we're committed to a regex engine of some kind. So pull - // out a prefilter if we can, which will feed to each of the constituent - // regex engines. - let pre = if info.is_always_anchored_start() { - // PERF: I'm not sure we necessarily want to do this... We may want to - // run a prefilter for quickly rejecting in some cases. The problem - // is that anchored searches overlap quite a bit with the use case - // of "run a regex on every line to extract data." In that case, the - // regex always matches, so running a prefilter doesn't really help us - // there. The main place where a prefilter helps in an anchored search - // is if the anchored search is not expected to match frequently. That - // is, the prefilter gives us a way to possibly reject a haystack very - // quickly. - // - // Maybe we should do use a prefilter, but only for longer haystacks? - // Or maybe we should only use a prefilter when we think it's "fast"? - // - // Interestingly, I think we currently lack the infrastructure for - // disabling a prefilter based on haystack length. That would probably - // need to be a new 'Input' option. (Interestingly, an 'Input' used to - // carry a 'Prefilter' with it, but I moved away from that.) - debug!("skipping literal extraction since regex is anchored"); - None - } else if let Some(pre) = info.config().get_prefilter() { - debug!( - "skipping literal extraction since the caller provided a prefilter" - ); - Some(pre.clone()) - } else if info.config().get_auto_prefilter() { - let kind = info.config().get_match_kind(); - let prefixes = crate::util::prefilter::prefixes(kind, hirs); - // If we can build a full `Strategy` from just the extracted prefixes, - // then we can short-circuit and avoid building a regex engine at all. - if let Some(pre) = Pre::from_prefixes(info, &prefixes) { - debug!( - "found that the regex can be broken down to a literal \ - search, avoiding the regex engine entirely", - ); - return Ok(pre); - } - // This now attempts another short-circuit of the regex engine: if we - // have a huge alternation of just plain literals, then we can just use - // Aho-Corasick for that and avoid the regex engine entirely. - // - // You might think this case would just be handled by - // `Pre::from_prefixes`, but that technique relies on heuristic literal - // extraction from the corresponding `Hir`. That works, but part of - // heuristics limit the size and number of literals returned. This case - // will specifically handle patterns with very large alternations. - // - // One wonders if we should just roll this our heuristic literal - // extraction, and then I think this case could disappear entirely. - if let Some(pre) = Pre::from_alternation_literals(info, hirs) { - debug!( - "found plain alternation of literals, \ - avoiding regex engine entirely and using Aho-Corasick" - ); - return Ok(pre); - } - prefixes.literals().and_then(|strings| { - debug!( - "creating prefilter from {} literals: {:?}", - strings.len(), - strings, - ); - Prefilter::new(kind, strings) - }) - } else { - debug!("skipping literal extraction since prefilters were disabled"); - None - }; - let mut core = Core::new(info.clone(), pre.clone(), hirs)?; - // Now that we have our core regex engines built, there are a few cases - // where we can do a little bit better than just a normal "search forward - // and maybe use a prefilter when in a start state." However, these cases - // may not always work or otherwise build on top of the Core searcher. - // For example, the reverse anchored optimization seems like it might - // always work, but only the DFAs support reverse searching and the DFAs - // might give up or quit for reasons. If we had, e.g., a PikeVM that - // supported reverse searching, then we could avoid building a full Core - // engine for this case. - core = match ReverseAnchored::new(core) { - Err(core) => core, - Ok(ra) => { - debug!("using reverse anchored strategy"); - return Ok(Arc::new(ra)); - } - }; - core = match ReverseSuffix::new(core, hirs) { - Err(core) => core, - Ok(rs) => { - debug!("using reverse suffix strategy"); - return Ok(Arc::new(rs)); - } - }; - core = match ReverseInner::new(core, hirs) { - Err(core) => core, - Ok(ri) => { - debug!("using reverse inner strategy"); - return Ok(Arc::new(ri)); - } - }; - debug!("using core strategy"); - Ok(Arc::new(core)) -} - -#[derive(Clone, Debug)] -struct Pre

{ - pre: P, - group_info: GroupInfo, -} - -impl Pre

{ - fn new(pre: P) -> Arc { - // The only thing we support when we use prefilters directly as a - // strategy is the start and end of the overall match for a single - // pattern. In other words, exactly one implicit capturing group. Which - // is exactly what we use here for a GroupInfo. - let group_info = GroupInfo::new([[None::<&str>]]).unwrap(); - Arc::new(Pre { pre, group_info }) - } -} - -// This is a little weird, but we don't actually care about the type parameter -// here because we're selecting which underlying prefilter to use. So we just -// define it on an arbitrary type. -impl Pre<()> { - /// Given a sequence of prefixes, attempt to return a full `Strategy` using - /// just the prefixes. - /// - /// Basically, this occurs when the prefixes given not just prefixes, - /// but an enumeration of the entire language matched by the regular - /// expression. - /// - /// A number of other conditions need to be true too. For example, there - /// can be only one pattern, the number of explicit capture groups is 0, no - /// look-around assertions and so on. - /// - /// Note that this ignores `Config::get_auto_prefilter` because if this - /// returns something, then it isn't a prefilter but a matcher itself. - /// Therefore, it shouldn't suffer from the problems typical to prefilters - /// (such as a high false positive rate). - fn from_prefixes( - info: &RegexInfo, - prefixes: &literal::Seq, - ) -> Option> { - let kind = info.config().get_match_kind(); - // Check to see if our prefixes are exact, which means we might be - // able to bypass the regex engine entirely and just rely on literal - // searches. - if !prefixes.is_exact() { - return None; - } - // We also require that we have a single regex pattern. Namely, - // we reuse the prefilter infrastructure to implement search and - // prefilters only report spans. Prefilters don't know about pattern - // IDs. The multi-regex case isn't a lost cause, we might still use - // Aho-Corasick and we might still just use a regular prefilter, but - // that's done below. - if info.pattern_len() != 1 { - return None; - } - // We can't have any capture groups either. The literal engines don't - // know how to deal with things like '(foo)(bar)'. In that case, a - // prefilter will just be used and then the regex engine will resolve - // the capture groups. - if info.props()[0].explicit_captures_len() != 0 { - return None; - } - // We also require that it has zero look-around assertions. Namely, - // literal extraction treats look-around assertions as if they match - // *every* empty string. But of course, that isn't true. So for - // example, 'foo\bquux' never matches anything, but 'fooquux' is - // extracted from that as an exact literal. Such cases should just run - // the regex engine. 'fooquux' will be used as a normal prefilter, and - // then the regex engine will try to look for an actual match. - if !info.props()[0].look_set().is_empty() { - return None; - } - // Finally, currently, our prefilters are all oriented around - // leftmost-first match semantics, so don't try to use them if the - // caller asked for anything else. - if kind != MatchKind::LeftmostFirst { - return None; - } - // The above seems like a lot of requirements to meet, but it applies - // to a lot of cases. 'foo', '[abc][123]' and 'foo|bar|quux' all meet - // the above criteria, for example. - // - // Note that this is effectively a latency optimization. If we didn't - // do this, then the extracted literals would still get bundled into - // a prefilter, and every regex engine capable of running unanchored - // searches supports prefilters. So this optimization merely sidesteps - // having to run the regex engine at all to confirm the match. Thus, it - // decreases the latency of a match. - - // OK because we know the set is exact and thus finite. - let prefixes = prefixes.literals().unwrap(); - debug!( - "trying to bypass regex engine by creating \ - prefilter from {} literals: {:?}", - prefixes.len(), - prefixes, - ); - let choice = match prefilter::Choice::new(kind, prefixes) { - Some(choice) => choice, - None => { - debug!( - "regex bypass failed because no prefilter could be built" - ); - return None; - } - }; - let strat: Arc = match choice { - prefilter::Choice::Memchr(pre) => Pre::new(pre), - prefilter::Choice::Memchr2(pre) => Pre::new(pre), - prefilter::Choice::Memchr3(pre) => Pre::new(pre), - prefilter::Choice::Memmem(pre) => Pre::new(pre), - prefilter::Choice::Teddy(pre) => Pre::new(pre), - prefilter::Choice::ByteSet(pre) => Pre::new(pre), - prefilter::Choice::AhoCorasick(pre) => Pre::new(pre), - }; - Some(strat) - } - - /// Attempts to extract an alternation of literals, and if it's deemed - /// worth doing, returns an Aho-Corasick prefilter as a strategy. - /// - /// And currently, this only returns something when 'hirs.len() == 1'. This - /// could in theory do something if there are multiple HIRs where all of - /// them are alternation of literals, but I haven't had the time to go down - /// that path yet. - fn from_alternation_literals( - info: &RegexInfo, - hirs: &[&Hir], - ) -> Option> { - use crate::util::prefilter::AhoCorasick; - - let lits = crate::meta::literal::alternation_literals(info, hirs)?; - let ac = AhoCorasick::new(MatchKind::LeftmostFirst, &lits)?; - Some(Pre::new(ac)) - } -} - -// This implements Strategy for anything that implements PrefilterI. -// -// Note that this must only be used for regexes of length 1. Multi-regexes -// don't work here. The prefilter interface only provides the span of a match -// and not the pattern ID. (I did consider making it more expressive, but I -// couldn't figure out how to tie everything together elegantly.) Thus, so long -// as the regex only contains one pattern, we can simply assume that a match -// corresponds to PatternID::ZERO. And indeed, that's what we do here. -// -// In practice, since this impl is used to report matches directly and thus -// completely bypasses the regex engine, we only wind up using this under the -// following restrictions: -// -// * There must be only one pattern. As explained above. -// * The literal sequence must be finite and only contain exact literals. -// * There must not be any look-around assertions. If there are, the literals -// extracted might be exact, but a match doesn't necessarily imply an overall -// match. As a trivial example, 'foo\bbar' does not match 'foobar'. -// * The pattern must not have any explicit capturing groups. If it does, the -// caller might expect them to be resolved. e.g., 'foo(bar)'. -// -// So when all of those things are true, we use a prefilter directly as a -// strategy. -// -// In the case where the number of patterns is more than 1, we don't use this -// but do use a special Aho-Corasick strategy if all of the regexes are just -// simple literals or alternations of literals. (We also use the Aho-Corasick -// strategy when len(patterns)==1 if the number of literals is large. In that -// case, literal extraction gives up and will return an infinite set.) -impl Strategy for Pre

{ - #[cfg_attr(feature = "perf-inline", inline(always))] - fn group_info(&self) -> &GroupInfo { - &self.group_info - } - - fn create_cache(&self) -> Cache { - Cache { - capmatches: Captures::all(self.group_info().clone()), - pikevm: wrappers::PikeVMCache::none(), - backtrack: wrappers::BoundedBacktrackerCache::none(), - onepass: wrappers::OnePassCache::none(), - hybrid: wrappers::HybridCache::none(), - revhybrid: wrappers::ReverseHybridCache::none(), - } - } - - fn reset_cache(&self, _cache: &mut Cache) {} - - fn is_accelerated(&self) -> bool { - self.pre.is_fast() - } - - fn memory_usage(&self) -> usize { - self.pre.memory_usage() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn search(&self, _cache: &mut Cache, input: &Input<'_>) -> Option { - if input.is_done() { - return None; - } - if input.get_anchored().is_anchored() { - return self - .pre - .prefix(input.haystack(), input.get_span()) - .map(|sp| Match::new(PatternID::ZERO, sp)); - } - self.pre - .find(input.haystack(), input.get_span()) - .map(|sp| Match::new(PatternID::ZERO, sp)) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn search_half( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Option { - self.search(cache, input).map(|m| HalfMatch::new(m.pattern(), m.end())) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool { - self.search(cache, input).is_some() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn search_slots( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Option { - let m = self.search(cache, input)?; - if let Some(slot) = slots.get_mut(0) { - *slot = NonMaxUsize::new(m.start()); - } - if let Some(slot) = slots.get_mut(1) { - *slot = NonMaxUsize::new(m.end()); - } - Some(m.pattern()) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn which_overlapping_matches( - &self, - cache: &mut Cache, - input: &Input<'_>, - patset: &mut PatternSet, - ) { - if self.search(cache, input).is_some() { - patset.insert(PatternID::ZERO); - } - } -} - -#[derive(Debug)] -struct Core { - info: RegexInfo, - pre: Option, - nfa: NFA, - nfarev: Option, - pikevm: wrappers::PikeVM, - backtrack: wrappers::BoundedBacktracker, - onepass: wrappers::OnePass, - hybrid: wrappers::Hybrid, - dfa: wrappers::DFA, -} - -impl Core { - fn new( - info: RegexInfo, - pre: Option, - hirs: &[&Hir], - ) -> Result { - let mut lookm = LookMatcher::new(); - lookm.set_line_terminator(info.config().get_line_terminator()); - let thompson_config = thompson::Config::new() - .utf8(info.config().get_utf8_empty()) - .nfa_size_limit(info.config().get_nfa_size_limit()) - .shrink(false) - .which_captures(info.config().get_which_captures()) - .look_matcher(lookm); - let nfa = thompson::Compiler::new() - .configure(thompson_config.clone()) - .build_many_from_hir(hirs) - .map_err(BuildError::nfa)?; - // It's possible for the PikeVM or the BB to fail to build, even though - // at this point, we already have a full NFA in hand. They can fail - // when a Unicode word boundary is used but where Unicode word boundary - // support is disabled at compile time, thus making it impossible to - // match. (Construction can also fail if the NFA was compiled without - // captures, but we always enable that above.) - let pikevm = wrappers::PikeVM::new(&info, pre.clone(), &nfa)?; - let backtrack = - wrappers::BoundedBacktracker::new(&info, pre.clone(), &nfa)?; - // The onepass engine can of course fail to build, but we expect it to - // fail in many cases because it is an optimization that doesn't apply - // to all regexes. The 'OnePass' wrapper encapsulates this failure (and - // logs a message if it occurs). - let onepass = wrappers::OnePass::new(&info, &nfa); - // We try to encapsulate whether a particular regex engine should be - // used within each respective wrapper, but the DFAs need a reverse NFA - // to build itself, and we really do not want to build a reverse NFA if - // we know we aren't going to use the lazy DFA. So we do a config check - // up front, which is in practice the only way we won't try to use the - // DFA. - let (nfarev, hybrid, dfa) = - if !info.config().get_hybrid() && !info.config().get_dfa() { - (None, wrappers::Hybrid::none(), wrappers::DFA::none()) - } else { - // FIXME: Technically, we don't quite yet KNOW that we need - // a reverse NFA. It's possible for the DFAs below to both - // fail to build just based on the forward NFA. In which case, - // building the reverse NFA was totally wasted work. But... - // fixing this requires breaking DFA construction apart into - // two pieces: one for the forward part and another for the - // reverse part. Quite annoying. Making it worse, when building - // both DFAs fails, it's quite likely that the NFA is large and - // that it will take quite some time to build the reverse NFA - // too. So... it's really probably worth it to do this! - let nfarev = thompson::Compiler::new() - // Currently, reverse NFAs don't support capturing groups, - // so we MUST disable them. But even if we didn't have to, - // we would, because nothing in this crate does anything - // useful with capturing groups in reverse. And of course, - // the lazy DFA ignores capturing groups in all cases. - .configure( - thompson_config - .clone() - .which_captures(WhichCaptures::None) - .reverse(true), - ) - .build_many_from_hir(hirs) - .map_err(BuildError::nfa)?; - let dfa = if !info.config().get_dfa() { - wrappers::DFA::none() - } else { - wrappers::DFA::new(&info, pre.clone(), &nfa, &nfarev) - }; - let hybrid = if !info.config().get_hybrid() { - wrappers::Hybrid::none() - } else if dfa.is_some() { - debug!("skipping lazy DFA because we have a full DFA"); - wrappers::Hybrid::none() - } else { - wrappers::Hybrid::new(&info, pre.clone(), &nfa, &nfarev) - }; - (Some(nfarev), hybrid, dfa) - }; - Ok(Core { - info, - pre, - nfa, - nfarev, - pikevm, - backtrack, - onepass, - hybrid, - dfa, - }) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn try_search_mayfail( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Option, RetryFailError>> { - if let Some(e) = self.dfa.get(input) { - trace!("using full DFA for search at {:?}", input.get_span()); - Some(e.try_search(input)) - } else if let Some(e) = self.hybrid.get(input) { - trace!("using lazy DFA for search at {:?}", input.get_span()); - Some(e.try_search(&mut cache.hybrid, input)) - } else { - None - } - } - - fn search_nofail( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Option { - let caps = &mut cache.capmatches; - caps.set_pattern(None); - // We manually inline 'try_search_slots_nofail' here because we need to - // borrow from 'cache.capmatches' in this method, but if we do, then - // we can't pass 'cache' wholesale to to 'try_slots_no_hybrid'. It's a - // classic example of how the borrow checker inhibits decomposition. - // There are of course work-arounds (more types and/or interior - // mutability), but that's more annoying than this IMO. - let pid = if let Some(ref e) = self.onepass.get(input) { - trace!("using OnePass for search at {:?}", input.get_span()); - e.search_slots(&mut cache.onepass, input, caps.slots_mut()) - } else if let Some(ref e) = self.backtrack.get(input) { - trace!( - "using BoundedBacktracker for search at {:?}", - input.get_span() - ); - e.search_slots(&mut cache.backtrack, input, caps.slots_mut()) - } else { - trace!("using PikeVM for search at {:?}", input.get_span()); - let e = self.pikevm.get(); - e.search_slots(&mut cache.pikevm, input, caps.slots_mut()) - }; - caps.set_pattern(pid); - caps.get_match() - } - - fn search_half_nofail( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Option { - // Only the lazy/full DFA returns half-matches, since the DFA requires - // a reverse scan to find the start position. These fallback regex - // engines can find the start and end in a single pass, so we just do - // that and throw away the start offset to conform to the API. - let m = self.search_nofail(cache, input)?; - Some(HalfMatch::new(m.pattern(), m.end())) - } - - fn search_slots_nofail( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Option { - if let Some(ref e) = self.onepass.get(input) { - trace!( - "using OnePass for capture search at {:?}", - input.get_span() - ); - e.search_slots(&mut cache.onepass, input, slots) - } else if let Some(ref e) = self.backtrack.get(input) { - trace!( - "using BoundedBacktracker for capture search at {:?}", - input.get_span() - ); - e.search_slots(&mut cache.backtrack, input, slots) - } else { - trace!( - "using PikeVM for capture search at {:?}", - input.get_span() - ); - let e = self.pikevm.get(); - e.search_slots(&mut cache.pikevm, input, slots) - } - } - - fn is_match_nofail(&self, cache: &mut Cache, input: &Input<'_>) -> bool { - if let Some(ref e) = self.onepass.get(input) { - trace!( - "using OnePass for is-match search at {:?}", - input.get_span() - ); - e.search_slots(&mut cache.onepass, input, &mut []).is_some() - } else if let Some(ref e) = self.backtrack.get(input) { - trace!( - "using BoundedBacktracker for is-match search at {:?}", - input.get_span() - ); - e.is_match(&mut cache.backtrack, input) - } else { - trace!( - "using PikeVM for is-match search at {:?}", - input.get_span() - ); - let e = self.pikevm.get(); - e.is_match(&mut cache.pikevm, input) - } - } - - fn is_capture_search_needed(&self, slots_len: usize) -> bool { - slots_len > self.nfa.group_info().implicit_slot_len() - } -} - -impl Strategy for Core { - #[cfg_attr(feature = "perf-inline", inline(always))] - fn group_info(&self) -> &GroupInfo { - self.nfa.group_info() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn create_cache(&self) -> Cache { - Cache { - capmatches: Captures::all(self.group_info().clone()), - pikevm: self.pikevm.create_cache(), - backtrack: self.backtrack.create_cache(), - onepass: self.onepass.create_cache(), - hybrid: self.hybrid.create_cache(), - revhybrid: wrappers::ReverseHybridCache::none(), - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn reset_cache(&self, cache: &mut Cache) { - cache.pikevm.reset(&self.pikevm); - cache.backtrack.reset(&self.backtrack); - cache.onepass.reset(&self.onepass); - cache.hybrid.reset(&self.hybrid); - } - - fn is_accelerated(&self) -> bool { - self.pre.as_ref().map_or(false, |pre| pre.is_fast()) - } - - fn memory_usage(&self) -> usize { - self.info.memory_usage() - + self.pre.as_ref().map_or(0, |pre| pre.memory_usage()) - + self.nfa.memory_usage() - + self.nfarev.as_ref().map_or(0, |nfa| nfa.memory_usage()) - + self.onepass.memory_usage() - + self.dfa.memory_usage() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option { - // We manually inline try_search_mayfail here because letting the - // compiler do it seems to produce pretty crappy codegen. - return if let Some(e) = self.dfa.get(input) { - trace!("using full DFA for full search at {:?}", input.get_span()); - match e.try_search(input) { - Ok(x) => x, - Err(_err) => { - trace!("full DFA search failed: {_err}"); - self.search_nofail(cache, input) - } - } - } else if let Some(e) = self.hybrid.get(input) { - trace!("using lazy DFA for full search at {:?}", input.get_span()); - match e.try_search(&mut cache.hybrid, input) { - Ok(x) => x, - Err(_err) => { - trace!("lazy DFA search failed: {_err}"); - self.search_nofail(cache, input) - } - } - } else { - self.search_nofail(cache, input) - }; - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn search_half( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Option { - // The main difference with 'search' is that if we're using a DFA, we - // can use a single forward scan without needing to run the reverse - // DFA. - if let Some(e) = self.dfa.get(input) { - trace!("using full DFA for half search at {:?}", input.get_span()); - match e.try_search_half_fwd(input) { - Ok(x) => x, - Err(_err) => { - trace!("full DFA half search failed: {_err}"); - self.search_half_nofail(cache, input) - } - } - } else if let Some(e) = self.hybrid.get(input) { - trace!("using lazy DFA for half search at {:?}", input.get_span()); - match e.try_search_half_fwd(&mut cache.hybrid, input) { - Ok(x) => x, - Err(_err) => { - trace!("lazy DFA half search failed: {_err}"); - self.search_half_nofail(cache, input) - } - } - } else { - self.search_half_nofail(cache, input) - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool { - if let Some(e) = self.dfa.get(input) { - trace!( - "using full DFA for is-match search at {:?}", - input.get_span() - ); - match e.try_search_half_fwd(input) { - Ok(x) => x.is_some(), - Err(_err) => { - trace!("full DFA half search failed: {_err}"); - self.is_match_nofail(cache, input) - } - } - } else if let Some(e) = self.hybrid.get(input) { - trace!( - "using lazy DFA for is-match search at {:?}", - input.get_span() - ); - match e.try_search_half_fwd(&mut cache.hybrid, input) { - Ok(x) => x.is_some(), - Err(_err) => { - trace!("lazy DFA half search failed: {_err}"); - self.is_match_nofail(cache, input) - } - } - } else { - self.is_match_nofail(cache, input) - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn search_slots( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Option { - // Even if the regex has explicit capture groups, if the caller didn't - // provide any explicit slots, then it doesn't make sense to try and do - // extra work to get offsets for those slots. Ideally the caller should - // realize this and not call this routine in the first place, but alas, - // we try to save the caller from themselves if they do. - if !self.is_capture_search_needed(slots.len()) { - trace!("asked for slots unnecessarily, trying fast path"); - let m = self.search(cache, input)?; - copy_match_to_slots(m, slots); - return Some(m.pattern()); - } - // If the onepass DFA is available for this search (which only happens - // when it's anchored), then skip running a fallible DFA. The onepass - // DFA isn't as fast as a full or lazy DFA, but it is typically quite - // a bit faster than the backtracker or the PikeVM. So it isn't as - // advantageous to try and do a full/lazy DFA scan first. - // - // We still theorize that it's better to do a full/lazy DFA scan, even - // when it's anchored, because it's usually much faster and permits us - // to say "no match" much more quickly. This does hurt the case of, - // say, parsing each line in a log file into capture groups, because - // in that case, the line always matches. So the lazy DFA scan is - // usually just wasted work. But, the lazy DFA is usually quite fast - // and doesn't cost too much here. - if self.onepass.get(&input).is_some() { - return self.search_slots_nofail(cache, &input, slots); - } - let m = match self.try_search_mayfail(cache, input) { - Some(Ok(Some(m))) => m, - Some(Ok(None)) => return None, - Some(Err(_err)) => { - trace!("fast capture search failed: {_err}"); - return self.search_slots_nofail(cache, input, slots); - } - None => { - return self.search_slots_nofail(cache, input, slots); - } - }; - // At this point, now that we've found the bounds of the - // match, we need to re-run something that can resolve - // capturing groups. But we only need to run on it on the - // match bounds and not the entire haystack. - trace!( - "match found at {}..{} in capture search, \ - using another engine to find captures", - m.start(), - m.end(), - ); - let input = input - .clone() - .span(m.start()..m.end()) - .anchored(Anchored::Pattern(m.pattern())); - Some( - self.search_slots_nofail(cache, &input, slots) - .expect("should find a match"), - ) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn which_overlapping_matches( - &self, - cache: &mut Cache, - input: &Input<'_>, - patset: &mut PatternSet, - ) { - if let Some(e) = self.dfa.get(input) { - trace!( - "using full DFA for overlapping search at {:?}", - input.get_span() - ); - let _err = match e.try_which_overlapping_matches(input, patset) { - Ok(()) => return, - Err(err) => err, - }; - trace!("fast overlapping search failed: {_err}"); - } else if let Some(e) = self.hybrid.get(input) { - trace!( - "using lazy DFA for overlapping search at {:?}", - input.get_span() - ); - let _err = match e.try_which_overlapping_matches( - &mut cache.hybrid, - input, - patset, - ) { - Ok(()) => { - return; - } - Err(err) => err, - }; - trace!("fast overlapping search failed: {_err}"); - } - trace!( - "using PikeVM for overlapping search at {:?}", - input.get_span() - ); - let e = self.pikevm.get(); - e.which_overlapping_matches(&mut cache.pikevm, input, patset) - } -} - -#[derive(Debug)] -struct ReverseAnchored { - core: Core, -} - -impl ReverseAnchored { - fn new(core: Core) -> Result { - if !core.info.is_always_anchored_end() { - debug!( - "skipping reverse anchored optimization because \ - the regex is not always anchored at the end" - ); - return Err(core); - } - // Note that the caller can still request an anchored search even when - // the regex isn't anchored at the start. We detect that case in the - // search routines below and just fallback to the core engine. This - // is fine because both searches are anchored. It's just a matter of - // picking one. Falling back to the core engine is a little simpler, - // since if we used the reverse anchored approach, we'd have to add an - // extra check to ensure the match reported starts at the place where - // the caller requested the search to start. - if core.info.is_always_anchored_start() { - debug!( - "skipping reverse anchored optimization because \ - the regex is also anchored at the start" - ); - return Err(core); - } - // Only DFAs can do reverse searches (currently), so we need one of - // them in order to do this optimization. It's possible (although - // pretty unlikely) that we have neither and need to give up. - if !core.hybrid.is_some() && !core.dfa.is_some() { - debug!( - "skipping reverse anchored optimization because \ - we don't have a lazy DFA or a full DFA" - ); - return Err(core); - } - Ok(ReverseAnchored { core }) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn try_search_half_anchored_rev( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Result, RetryFailError> { - // We of course always want an anchored search. In theory, the - // underlying regex engines should automatically enable anchored - // searches since the regex is itself anchored, but this more clearly - // expresses intent and is always correct. - let input = input.clone().anchored(Anchored::Yes); - if let Some(e) = self.core.dfa.get(&input) { - trace!( - "using full DFA for reverse anchored search at {:?}", - input.get_span() - ); - e.try_search_half_rev(&input) - } else if let Some(e) = self.core.hybrid.get(&input) { - trace!( - "using lazy DFA for reverse anchored search at {:?}", - input.get_span() - ); - e.try_search_half_rev(&mut cache.hybrid, &input) - } else { - unreachable!("ReverseAnchored always has a DFA") - } - } -} - -// Note that in this impl, we don't check that 'input.end() == -// input.haystack().len()'. In particular, when that condition is false, a -// match is always impossible because we know that the regex is always anchored -// at the end (or else 'ReverseAnchored' won't be built). We don't check that -// here because the 'Regex' wrapper actually does that for us in all cases. -// Thus, in this impl, we can actually assume that the end position in 'input' -// is equivalent to the length of the haystack. -impl Strategy for ReverseAnchored { - #[cfg_attr(feature = "perf-inline", inline(always))] - fn group_info(&self) -> &GroupInfo { - self.core.group_info() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn create_cache(&self) -> Cache { - self.core.create_cache() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn reset_cache(&self, cache: &mut Cache) { - self.core.reset_cache(cache); - } - - fn is_accelerated(&self) -> bool { - // Since this is anchored at the end, a reverse anchored search is - // almost certainly guaranteed to result in a much faster search than - // a standard forward search. - true - } - - fn memory_usage(&self) -> usize { - self.core.memory_usage() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option { - if input.get_anchored().is_anchored() { - return self.core.search(cache, input); - } - match self.try_search_half_anchored_rev(cache, input) { - Err(_err) => { - trace!("fast reverse anchored search failed: {_err}"); - self.core.search_nofail(cache, input) - } - Ok(None) => None, - Ok(Some(hm)) => { - Some(Match::new(hm.pattern(), hm.offset()..input.end())) - } - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn search_half( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Option { - if input.get_anchored().is_anchored() { - return self.core.search_half(cache, input); - } - match self.try_search_half_anchored_rev(cache, input) { - Err(_err) => { - trace!("fast reverse anchored search failed: {_err}"); - self.core.search_half_nofail(cache, input) - } - Ok(None) => None, - Ok(Some(hm)) => { - // Careful here! 'try_search_half' is a *forward* search that - // only cares about the *end* position of a match. But - // 'hm.offset()' is actually the start of the match. So we - // actually just throw that away here and, since we know we - // have a match, return the only possible position at which a - // match can occur: input.end(). - Some(HalfMatch::new(hm.pattern(), input.end())) - } - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool { - if input.get_anchored().is_anchored() { - return self.core.is_match(cache, input); - } - match self.try_search_half_anchored_rev(cache, input) { - Err(_err) => { - trace!("fast reverse anchored search failed: {_err}"); - self.core.is_match_nofail(cache, input) - } - Ok(None) => false, - Ok(Some(_)) => true, - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn search_slots( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Option { - if input.get_anchored().is_anchored() { - return self.core.search_slots(cache, input, slots); - } - match self.try_search_half_anchored_rev(cache, input) { - Err(_err) => { - trace!("fast reverse anchored search failed: {_err}"); - self.core.search_slots_nofail(cache, input, slots) - } - Ok(None) => None, - Ok(Some(hm)) => { - if !self.core.is_capture_search_needed(slots.len()) { - trace!("asked for slots unnecessarily, skipping captures"); - let m = Match::new(hm.pattern(), hm.offset()..input.end()); - copy_match_to_slots(m, slots); - return Some(m.pattern()); - } - let start = hm.offset(); - let input = input - .clone() - .span(start..input.end()) - .anchored(Anchored::Pattern(hm.pattern())); - self.core.search_slots_nofail(cache, &input, slots) - } - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn which_overlapping_matches( - &self, - cache: &mut Cache, - input: &Input<'_>, - patset: &mut PatternSet, - ) { - // It seems like this could probably benefit from a reverse anchored - // optimization, perhaps by doing an overlapping reverse search (which - // the DFAs do support). I haven't given it much thought though, and - // I'm currently focus more on the single pattern case. - self.core.which_overlapping_matches(cache, input, patset) - } -} - -#[derive(Debug)] -struct ReverseSuffix { - core: Core, - pre: Prefilter, -} - -impl ReverseSuffix { - fn new(core: Core, hirs: &[&Hir]) -> Result { - if !core.info.config().get_auto_prefilter() { - debug!( - "skipping reverse suffix optimization because \ - automatic prefilters are disabled" - ); - return Err(core); - } - // Like the reverse inner optimization, we don't do this for regexes - // that are always anchored. It could lead to scanning too much, but - // could say "no match" much more quickly than running the regex - // engine if the initial literal scan doesn't match. With that said, - // the reverse suffix optimization has lower overhead, since it only - // requires a reverse scan after a literal match to confirm or reject - // the match. (Although, in the case of confirmation, it then needs to - // do another forward scan to find the end position.) - // - // Note that the caller can still request an anchored search even - // when the regex isn't anchored. We detect that case in the search - // routines below and just fallback to the core engine. Currently this - // optimization assumes all searches are unanchored, so if we do want - // to enable this optimization for anchored searches, it will need a - // little work to support it. - if core.info.is_always_anchored_start() { - debug!( - "skipping reverse suffix optimization because \ - the regex is always anchored at the start", - ); - return Err(core); - } - // Only DFAs can do reverse searches (currently), so we need one of - // them in order to do this optimization. It's possible (although - // pretty unlikely) that we have neither and need to give up. - if !core.hybrid.is_some() && !core.dfa.is_some() { - debug!( - "skipping reverse suffix optimization because \ - we don't have a lazy DFA or a full DFA" - ); - return Err(core); - } - if core.pre.as_ref().map_or(false, |p| p.is_fast()) { - debug!( - "skipping reverse suffix optimization because \ - we already have a prefilter that we think is fast" - ); - return Err(core); - } - let kind = core.info.config().get_match_kind(); - let suffixes = crate::util::prefilter::suffixes(kind, hirs); - let lcs = match suffixes.longest_common_suffix() { - None => { - debug!( - "skipping reverse suffix optimization because \ - a longest common suffix could not be found", - ); - return Err(core); - } - Some(lcs) if lcs.is_empty() => { - debug!( - "skipping reverse suffix optimization because \ - the longest common suffix is the empty string", - ); - return Err(core); - } - Some(lcs) => lcs, - }; - let pre = match Prefilter::new(kind, &[lcs]) { - Some(pre) => pre, - None => { - debug!( - "skipping reverse suffix optimization because \ - a prefilter could not be constructed from the \ - longest common suffix", - ); - return Err(core); - } - }; - if !pre.is_fast() { - debug!( - "skipping reverse suffix optimization because \ - while we have a suffix prefilter, it is not \ - believed to be 'fast'" - ); - return Err(core); - } - Ok(ReverseSuffix { core, pre }) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn try_search_half_start( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Result, RetryError> { - let mut span = input.get_span(); - let mut min_start = 0; - loop { - let litmatch = match self.pre.find(input.haystack(), span) { - None => return Ok(None), - Some(span) => span, - }; - trace!("reverse suffix scan found suffix match at {litmatch:?}"); - let revinput = input - .clone() - .anchored(Anchored::Yes) - .span(input.start()..litmatch.end); - match self - .try_search_half_rev_limited(cache, &revinput, min_start)? - { - None => { - if span.start >= span.end { - break; - } - span.start = litmatch.start.checked_add(1).unwrap(); - } - Some(hm) => return Ok(Some(hm)), - } - min_start = litmatch.end; - } - Ok(None) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn try_search_half_fwd( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Result, RetryFailError> { - if let Some(e) = self.core.dfa.get(&input) { - trace!( - "using full DFA for forward reverse suffix search at {:?}", - input.get_span() - ); - e.try_search_half_fwd(&input) - } else if let Some(e) = self.core.hybrid.get(&input) { - trace!( - "using lazy DFA for forward reverse suffix search at {:?}", - input.get_span() - ); - e.try_search_half_fwd(&mut cache.hybrid, &input) - } else { - unreachable!("ReverseSuffix always has a DFA") - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn try_search_half_rev_limited( - &self, - cache: &mut Cache, - input: &Input<'_>, - min_start: usize, - ) -> Result, RetryError> { - if let Some(e) = self.core.dfa.get(&input) { - trace!( - "using full DFA for reverse suffix search at {:?}, \ - but will be stopped at {} to avoid quadratic behavior", - input.get_span(), - min_start, - ); - e.try_search_half_rev_limited(&input, min_start) - } else if let Some(e) = self.core.hybrid.get(&input) { - trace!( - "using lazy DFA for reverse suffix search at {:?}, \ - but will be stopped at {} to avoid quadratic behavior", - input.get_span(), - min_start, - ); - e.try_search_half_rev_limited(&mut cache.hybrid, &input, min_start) - } else { - unreachable!("ReverseSuffix always has a DFA") - } - } -} - -impl Strategy for ReverseSuffix { - #[cfg_attr(feature = "perf-inline", inline(always))] - fn group_info(&self) -> &GroupInfo { - self.core.group_info() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn create_cache(&self) -> Cache { - self.core.create_cache() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn reset_cache(&self, cache: &mut Cache) { - self.core.reset_cache(cache); - } - - fn is_accelerated(&self) -> bool { - self.pre.is_fast() - } - - fn memory_usage(&self) -> usize { - self.core.memory_usage() + self.pre.memory_usage() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option { - if input.get_anchored().is_anchored() { - return self.core.search(cache, input); - } - match self.try_search_half_start(cache, input) { - Err(RetryError::Quadratic(_err)) => { - trace!("reverse suffix optimization failed: {_err}"); - self.core.search(cache, input) - } - Err(RetryError::Fail(_err)) => { - trace!("reverse suffix reverse fast search failed: {_err}"); - self.core.search_nofail(cache, input) - } - Ok(None) => None, - Ok(Some(hm_start)) => { - let fwdinput = input - .clone() - .anchored(Anchored::Pattern(hm_start.pattern())) - .span(hm_start.offset()..input.end()); - match self.try_search_half_fwd(cache, &fwdinput) { - Err(_err) => { - trace!( - "reverse suffix forward fast search failed: {_err}" - ); - self.core.search_nofail(cache, input) - } - Ok(None) => { - unreachable!( - "suffix match plus reverse match implies \ - there must be a match", - ) - } - Ok(Some(hm_end)) => Some(Match::new( - hm_start.pattern(), - hm_start.offset()..hm_end.offset(), - )), - } - } - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn search_half( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Option { - if input.get_anchored().is_anchored() { - return self.core.search_half(cache, input); - } - match self.try_search_half_start(cache, input) { - Err(RetryError::Quadratic(_err)) => { - trace!("reverse suffix half optimization failed: {_err}"); - self.core.search_half(cache, input) - } - Err(RetryError::Fail(_err)) => { - trace!( - "reverse suffix reverse fast half search failed: {_err}" - ); - self.core.search_half_nofail(cache, input) - } - Ok(None) => None, - Ok(Some(hm_start)) => { - // This is a bit subtle. It is tempting to just stop searching - // at this point and return a half-match with an offset - // corresponding to where the suffix was found. But the suffix - // match does not necessarily correspond to the end of the - // proper leftmost-first match. Consider /[a-z]+ing/ against - // 'tingling'. The first suffix match is the first 'ing', and - // the /[a-z]+/ matches the 't'. So if we stopped here, then - // we'd report 'ting' as the match. But 'tingling' is the - // correct match because of greediness. - let fwdinput = input - .clone() - .anchored(Anchored::Pattern(hm_start.pattern())) - .span(hm_start.offset()..input.end()); - match self.try_search_half_fwd(cache, &fwdinput) { - Err(_err) => { - trace!( - "reverse suffix forward fast search failed: {_err}" - ); - self.core.search_half_nofail(cache, input) - } - Ok(None) => { - unreachable!( - "suffix match plus reverse match implies \ - there must be a match", - ) - } - Ok(Some(hm_end)) => Some(hm_end), - } - } - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool { - if input.get_anchored().is_anchored() { - return self.core.is_match(cache, input); - } - match self.try_search_half_start(cache, input) { - Err(RetryError::Quadratic(_err)) => { - trace!("reverse suffix half optimization failed: {_err}"); - self.core.is_match_nofail(cache, input) - } - Err(RetryError::Fail(_err)) => { - trace!( - "reverse suffix reverse fast half search failed: {_err}" - ); - self.core.is_match_nofail(cache, input) - } - Ok(None) => false, - Ok(Some(_)) => true, - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn search_slots( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Option { - if input.get_anchored().is_anchored() { - return self.core.search_slots(cache, input, slots); - } - if !self.core.is_capture_search_needed(slots.len()) { - trace!("asked for slots unnecessarily, trying fast path"); - let m = self.search(cache, input)?; - copy_match_to_slots(m, slots); - return Some(m.pattern()); - } - let hm_start = match self.try_search_half_start(cache, input) { - Err(RetryError::Quadratic(_err)) => { - trace!("reverse suffix captures optimization failed: {_err}"); - return self.core.search_slots(cache, input, slots); - } - Err(RetryError::Fail(_err)) => { - trace!( - "reverse suffix reverse fast captures search failed: \ - {_err}" - ); - return self.core.search_slots_nofail(cache, input, slots); - } - Ok(None) => return None, - Ok(Some(hm_start)) => hm_start, - }; - trace!( - "match found at {}..{} in capture search, \ - using another engine to find captures", - hm_start.offset(), - input.end(), - ); - let start = hm_start.offset(); - let input = input - .clone() - .span(start..input.end()) - .anchored(Anchored::Pattern(hm_start.pattern())); - self.core.search_slots_nofail(cache, &input, slots) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn which_overlapping_matches( - &self, - cache: &mut Cache, - input: &Input<'_>, - patset: &mut PatternSet, - ) { - self.core.which_overlapping_matches(cache, input, patset) - } -} - -#[derive(Debug)] -struct ReverseInner { - core: Core, - preinner: Prefilter, - nfarev: NFA, - hybrid: wrappers::ReverseHybrid, - dfa: wrappers::ReverseDFA, -} - -impl ReverseInner { - fn new(core: Core, hirs: &[&Hir]) -> Result { - if !core.info.config().get_auto_prefilter() { - debug!( - "skipping reverse inner optimization because \ - automatic prefilters are disabled" - ); - return Err(core); - } - // Currently we hard-code the assumption of leftmost-first match - // semantics. This isn't a huge deal because 'all' semantics tend to - // only be used for forward overlapping searches with multiple regexes, - // and this optimization only supports a single pattern at the moment. - if core.info.config().get_match_kind() != MatchKind::LeftmostFirst { - debug!( - "skipping reverse inner optimization because \ - match kind is {:?} but this only supports leftmost-first", - core.info.config().get_match_kind(), - ); - return Err(core); - } - // It's likely that a reverse inner scan has too much overhead for it - // to be worth it when the regex is anchored at the start. It is - // possible for it to be quite a bit faster if the initial literal - // scan fails to detect a match, in which case, we can say "no match" - // very quickly. But this could be undesirable, e.g., scanning too far - // or when the literal scan matches. If it matches, then confirming the - // match requires a reverse scan followed by a forward scan to confirm - // or reject, which is a fair bit of work. - // - // Note that the caller can still request an anchored search even - // when the regex isn't anchored. We detect that case in the search - // routines below and just fallback to the core engine. Currently this - // optimization assumes all searches are unanchored, so if we do want - // to enable this optimization for anchored searches, it will need a - // little work to support it. - if core.info.is_always_anchored_start() { - debug!( - "skipping reverse inner optimization because \ - the regex is always anchored at the start", - ); - return Err(core); - } - // Only DFAs can do reverse searches (currently), so we need one of - // them in order to do this optimization. It's possible (although - // pretty unlikely) that we have neither and need to give up. - if !core.hybrid.is_some() && !core.dfa.is_some() { - debug!( - "skipping reverse inner optimization because \ - we don't have a lazy DFA or a full DFA" - ); - return Err(core); - } - if core.pre.as_ref().map_or(false, |p| p.is_fast()) { - debug!( - "skipping reverse inner optimization because \ - we already have a prefilter that we think is fast" - ); - return Err(core); - } else if core.pre.is_some() { - debug!( - "core engine has a prefix prefilter, but it is \ - probably not fast, so continuing with attempt to \ - use reverse inner prefilter" - ); - } - let (concat_prefix, preinner) = match reverse_inner::extract(hirs) { - Some(x) => x, - // N.B. the 'extract' function emits debug messages explaining - // why we bailed out here. - None => return Err(core), - }; - debug!("building reverse NFA for prefix before inner literal"); - let mut lookm = LookMatcher::new(); - lookm.set_line_terminator(core.info.config().get_line_terminator()); - let thompson_config = thompson::Config::new() - .reverse(true) - .utf8(core.info.config().get_utf8_empty()) - .nfa_size_limit(core.info.config().get_nfa_size_limit()) - .shrink(false) - .which_captures(WhichCaptures::None) - .look_matcher(lookm); - let result = thompson::Compiler::new() - .configure(thompson_config) - .build_from_hir(&concat_prefix); - let nfarev = match result { - Ok(nfarev) => nfarev, - Err(_err) => { - debug!( - "skipping reverse inner optimization because the \ - reverse NFA failed to build: {}", - _err, - ); - return Err(core); - } - }; - debug!("building reverse DFA for prefix before inner literal"); - let dfa = if !core.info.config().get_dfa() { - wrappers::ReverseDFA::none() - } else { - wrappers::ReverseDFA::new(&core.info, &nfarev) - }; - let hybrid = if !core.info.config().get_hybrid() { - wrappers::ReverseHybrid::none() - } else if dfa.is_some() { - debug!( - "skipping lazy DFA for reverse inner optimization \ - because we have a full DFA" - ); - wrappers::ReverseHybrid::none() - } else { - wrappers::ReverseHybrid::new(&core.info, &nfarev) - }; - Ok(ReverseInner { core, preinner, nfarev, hybrid, dfa }) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn try_search_full( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Result, RetryError> { - let mut span = input.get_span(); - let mut min_match_start = 0; - let mut min_pre_start = 0; - loop { - let litmatch = match self.preinner.find(input.haystack(), span) { - None => return Ok(None), - Some(span) => span, - }; - if litmatch.start < min_pre_start { - trace!( - "found inner prefilter match at {litmatch:?}, which starts \ - before the end of the last forward scan at {min_pre_start}, \ - quitting to avoid quadratic behavior", - ); - return Err(RetryError::Quadratic(RetryQuadraticError::new())); - } - trace!("reverse inner scan found inner match at {litmatch:?}"); - let revinput = input - .clone() - .anchored(Anchored::Yes) - .span(input.start()..litmatch.start); - // Note that in addition to the literal search above scanning past - // our minimum start point, this routine can also return an error - // as a result of detecting possible quadratic behavior if the - // reverse scan goes past the minimum start point. That is, the - // literal search might not, but the reverse regex search for the - // prefix might! - match self.try_search_half_rev_limited( - cache, - &revinput, - min_match_start, - )? { - None => { - if span.start >= span.end { - break; - } - span.start = litmatch.start.checked_add(1).unwrap(); - } - Some(hm_start) => { - let fwdinput = input - .clone() - .anchored(Anchored::Pattern(hm_start.pattern())) - .span(hm_start.offset()..input.end()); - match self.try_search_half_fwd_stopat(cache, &fwdinput)? { - Err(stopat) => { - min_pre_start = stopat; - span.start = - litmatch.start.checked_add(1).unwrap(); - } - Ok(hm_end) => { - return Ok(Some(Match::new( - hm_start.pattern(), - hm_start.offset()..hm_end.offset(), - ))) - } - } - } - } - min_match_start = litmatch.end; - } - Ok(None) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn try_search_half_fwd_stopat( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Result, RetryFailError> { - if let Some(e) = self.core.dfa.get(&input) { - trace!( - "using full DFA for forward reverse inner search at {:?}", - input.get_span() - ); - e.try_search_half_fwd_stopat(&input) - } else if let Some(e) = self.core.hybrid.get(&input) { - trace!( - "using lazy DFA for forward reverse inner search at {:?}", - input.get_span() - ); - e.try_search_half_fwd_stopat(&mut cache.hybrid, &input) - } else { - unreachable!("ReverseInner always has a DFA") - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn try_search_half_rev_limited( - &self, - cache: &mut Cache, - input: &Input<'_>, - min_start: usize, - ) -> Result, RetryError> { - if let Some(e) = self.dfa.get(&input) { - trace!( - "using full DFA for reverse inner search at {:?}, \ - but will be stopped at {} to avoid quadratic behavior", - input.get_span(), - min_start, - ); - e.try_search_half_rev_limited(&input, min_start) - } else if let Some(e) = self.hybrid.get(&input) { - trace!( - "using lazy DFA for reverse inner search at {:?}, \ - but will be stopped at {} to avoid quadratic behavior", - input.get_span(), - min_start, - ); - e.try_search_half_rev_limited( - &mut cache.revhybrid, - &input, - min_start, - ) - } else { - unreachable!("ReverseInner always has a DFA") - } - } -} - -impl Strategy for ReverseInner { - #[cfg_attr(feature = "perf-inline", inline(always))] - fn group_info(&self) -> &GroupInfo { - self.core.group_info() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn create_cache(&self) -> Cache { - let mut cache = self.core.create_cache(); - cache.revhybrid = self.hybrid.create_cache(); - cache - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn reset_cache(&self, cache: &mut Cache) { - self.core.reset_cache(cache); - cache.revhybrid.reset(&self.hybrid); - } - - fn is_accelerated(&self) -> bool { - self.preinner.is_fast() - } - - fn memory_usage(&self) -> usize { - self.core.memory_usage() - + self.preinner.memory_usage() - + self.nfarev.memory_usage() - + self.dfa.memory_usage() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn search(&self, cache: &mut Cache, input: &Input<'_>) -> Option { - if input.get_anchored().is_anchored() { - return self.core.search(cache, input); - } - match self.try_search_full(cache, input) { - Err(RetryError::Quadratic(_err)) => { - trace!("reverse inner optimization failed: {_err}"); - self.core.search(cache, input) - } - Err(RetryError::Fail(_err)) => { - trace!("reverse inner fast search failed: {_err}"); - self.core.search_nofail(cache, input) - } - Ok(matornot) => matornot, - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn search_half( - &self, - cache: &mut Cache, - input: &Input<'_>, - ) -> Option { - if input.get_anchored().is_anchored() { - return self.core.search_half(cache, input); - } - match self.try_search_full(cache, input) { - Err(RetryError::Quadratic(_err)) => { - trace!("reverse inner half optimization failed: {_err}"); - self.core.search_half(cache, input) - } - Err(RetryError::Fail(_err)) => { - trace!("reverse inner fast half search failed: {_err}"); - self.core.search_half_nofail(cache, input) - } - Ok(None) => None, - Ok(Some(m)) => Some(HalfMatch::new(m.pattern(), m.end())), - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_match(&self, cache: &mut Cache, input: &Input<'_>) -> bool { - if input.get_anchored().is_anchored() { - return self.core.is_match(cache, input); - } - match self.try_search_full(cache, input) { - Err(RetryError::Quadratic(_err)) => { - trace!("reverse inner half optimization failed: {_err}"); - self.core.is_match_nofail(cache, input) - } - Err(RetryError::Fail(_err)) => { - trace!("reverse inner fast half search failed: {_err}"); - self.core.is_match_nofail(cache, input) - } - Ok(None) => false, - Ok(Some(_)) => true, - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn search_slots( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Option { - if input.get_anchored().is_anchored() { - return self.core.search_slots(cache, input, slots); - } - if !self.core.is_capture_search_needed(slots.len()) { - trace!("asked for slots unnecessarily, trying fast path"); - let m = self.search(cache, input)?; - copy_match_to_slots(m, slots); - return Some(m.pattern()); - } - let m = match self.try_search_full(cache, input) { - Err(RetryError::Quadratic(_err)) => { - trace!("reverse inner captures optimization failed: {_err}"); - return self.core.search_slots(cache, input, slots); - } - Err(RetryError::Fail(_err)) => { - trace!("reverse inner fast captures search failed: {_err}"); - return self.core.search_slots_nofail(cache, input, slots); - } - Ok(None) => return None, - Ok(Some(m)) => m, - }; - trace!( - "match found at {}..{} in capture search, \ - using another engine to find captures", - m.start(), - m.end(), - ); - let input = input - .clone() - .span(m.start()..m.end()) - .anchored(Anchored::Pattern(m.pattern())); - self.core.search_slots_nofail(cache, &input, slots) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn which_overlapping_matches( - &self, - cache: &mut Cache, - input: &Input<'_>, - patset: &mut PatternSet, - ) { - self.core.which_overlapping_matches(cache, input, patset) - } -} - -/// Copies the offsets in the given match to the corresponding positions in -/// `slots`. -/// -/// In effect, this sets the slots corresponding to the implicit group for the -/// pattern in the given match. If the indices for the corresponding slots do -/// not exist, then no slots are set. -/// -/// This is useful when the caller provides slots (or captures), but you use a -/// regex engine that doesn't operate on slots (like a lazy DFA). This function -/// lets you map the match you get back to the slots provided by the caller. -#[cfg_attr(feature = "perf-inline", inline(always))] -fn copy_match_to_slots(m: Match, slots: &mut [Option]) { - let slot_start = m.pattern().as_usize() * 2; - let slot_end = slot_start + 1; - if let Some(slot) = slots.get_mut(slot_start) { - *slot = NonMaxUsize::new(m.start()); - } - if let Some(slot) = slots.get_mut(slot_end) { - *slot = NonMaxUsize::new(m.end()); - } -} diff --git a/vendor/regex-automata/src/meta/wrappers.rs b/vendor/regex-automata/src/meta/wrappers.rs deleted file mode 100644 index 6651cb90761874..00000000000000 --- a/vendor/regex-automata/src/meta/wrappers.rs +++ /dev/null @@ -1,1336 +0,0 @@ -/*! -This module contains a boat load of wrappers around each of our internal regex -engines. They encapsulate a few things: - -1. The wrappers manage the conditional existence of the regex engine. Namely, -the PikeVM is the only required regex engine. The rest are optional. These -wrappers present a uniform API regardless of which engines are available. And -availability might be determined by compile time features or by dynamic -configuration via `meta::Config`. Encapsulating the conditional compilation -features is in particular a huge simplification for the higher level code that -composes these engines. -2. The wrappers manage construction of each engine, including skipping it if -the engine is unavailable or configured to not be used. -3. The wrappers manage whether an engine *can* be used for a particular -search configuration. For example, `BoundedBacktracker::get` only returns a -backtracking engine when the haystack is bigger than the maximum supported -length. The wrappers also sometimes take a position on when an engine *ought* -to be used, but only in cases where the logic is extremely local to the engine -itself. Otherwise, things like "choose between the backtracker and the one-pass -DFA" are managed by the higher level meta strategy code. - -There are also corresponding wrappers for the various `Cache` types for each -regex engine that needs them. If an engine is unavailable or not used, then a -cache for it will *not* actually be allocated. -*/ - -use alloc::vec::Vec; - -use crate::{ - meta::{ - error::{BuildError, RetryError, RetryFailError}, - regex::RegexInfo, - }, - nfa::thompson::{pikevm, NFA}, - util::{prefilter::Prefilter, primitives::NonMaxUsize}, - HalfMatch, Input, Match, MatchKind, PatternID, PatternSet, -}; - -#[cfg(feature = "dfa-build")] -use crate::dfa; -#[cfg(feature = "dfa-onepass")] -use crate::dfa::onepass; -#[cfg(feature = "hybrid")] -use crate::hybrid; -#[cfg(feature = "nfa-backtrack")] -use crate::nfa::thompson::backtrack; - -#[derive(Debug)] -pub(crate) struct PikeVM(PikeVMEngine); - -impl PikeVM { - pub(crate) fn new( - info: &RegexInfo, - pre: Option, - nfa: &NFA, - ) -> Result { - PikeVMEngine::new(info, pre, nfa).map(PikeVM) - } - - pub(crate) fn create_cache(&self) -> PikeVMCache { - PikeVMCache::none() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn get(&self) -> &PikeVMEngine { - &self.0 - } -} - -#[derive(Debug)] -pub(crate) struct PikeVMEngine(pikevm::PikeVM); - -impl PikeVMEngine { - pub(crate) fn new( - info: &RegexInfo, - pre: Option, - nfa: &NFA, - ) -> Result { - let pikevm_config = pikevm::Config::new() - .match_kind(info.config().get_match_kind()) - .prefilter(pre); - let engine = pikevm::Builder::new() - .configure(pikevm_config) - .build_from_nfa(nfa.clone()) - .map_err(BuildError::nfa)?; - debug!("PikeVM built"); - Ok(PikeVMEngine(engine)) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn is_match( - &self, - cache: &mut PikeVMCache, - input: &Input<'_>, - ) -> bool { - self.0.is_match(cache.get(&self.0), input.clone()) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn search_slots( - &self, - cache: &mut PikeVMCache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Option { - self.0.search_slots(cache.get(&self.0), input, slots) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn which_overlapping_matches( - &self, - cache: &mut PikeVMCache, - input: &Input<'_>, - patset: &mut PatternSet, - ) { - self.0.which_overlapping_matches(cache.get(&self.0), input, patset) - } -} - -#[derive(Clone, Debug)] -pub(crate) struct PikeVMCache(Option); - -impl PikeVMCache { - pub(crate) fn none() -> PikeVMCache { - PikeVMCache(None) - } - - pub(crate) fn reset(&mut self, builder: &PikeVM) { - self.get(&builder.get().0).reset(&builder.get().0); - } - - pub(crate) fn memory_usage(&self) -> usize { - self.0.as_ref().map_or(0, |c| c.memory_usage()) - } - - fn get(&mut self, vm: &pikevm::PikeVM) -> &mut pikevm::Cache { - self.0.get_or_insert_with(|| vm.create_cache()) - } -} - -#[derive(Debug)] -pub(crate) struct BoundedBacktracker(Option); - -impl BoundedBacktracker { - pub(crate) fn new( - info: &RegexInfo, - pre: Option, - nfa: &NFA, - ) -> Result { - BoundedBacktrackerEngine::new(info, pre, nfa).map(BoundedBacktracker) - } - - pub(crate) fn create_cache(&self) -> BoundedBacktrackerCache { - BoundedBacktrackerCache::none() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn get( - &self, - input: &Input<'_>, - ) -> Option<&BoundedBacktrackerEngine> { - let engine = self.0.as_ref()?; - // It is difficult to make the backtracker give up early if it is - // guaranteed to eventually wind up in a match state. This is because - // of the greedy nature of a backtracker: it just blindly mushes - // forward. Every other regex engine is able to give up more quickly, - // so even if the backtracker might be able to zip through faster than - // (say) the PikeVM, we prefer the theoretical benefit that some other - // engine might be able to scan much less of the haystack than the - // backtracker. - // - // Now, if the haystack is really short already, then we allow the - // backtracker to run. (This hasn't been litigated quantitatively with - // benchmarks. Just a hunch.) - if input.get_earliest() && input.haystack().len() > 128 { - return None; - } - // If the backtracker is just going to return an error because the - // haystack is too long, then obviously do not use it. - if input.get_span().len() > engine.max_haystack_len() { - return None; - } - Some(engine) - } -} - -#[derive(Debug)] -pub(crate) struct BoundedBacktrackerEngine( - #[cfg(feature = "nfa-backtrack")] backtrack::BoundedBacktracker, - #[cfg(not(feature = "nfa-backtrack"))] (), -); - -impl BoundedBacktrackerEngine { - pub(crate) fn new( - info: &RegexInfo, - pre: Option, - nfa: &NFA, - ) -> Result, BuildError> { - #[cfg(feature = "nfa-backtrack")] - { - if !info.config().get_backtrack() - || info.config().get_match_kind() != MatchKind::LeftmostFirst - { - return Ok(None); - } - let backtrack_config = backtrack::Config::new().prefilter(pre); - let engine = backtrack::Builder::new() - .configure(backtrack_config) - .build_from_nfa(nfa.clone()) - .map_err(BuildError::nfa)?; - debug!( - "BoundedBacktracker built (max haystack length: {:?})", - engine.max_haystack_len() - ); - Ok(Some(BoundedBacktrackerEngine(engine))) - } - #[cfg(not(feature = "nfa-backtrack"))] - { - Ok(None) - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn is_match( - &self, - cache: &mut BoundedBacktrackerCache, - input: &Input<'_>, - ) -> bool { - #[cfg(feature = "nfa-backtrack")] - { - // OK because we only permit access to this engine when we know - // the haystack is short enough for the backtracker to run without - // reporting an error. - self.0.try_is_match(cache.get(&self.0), input.clone()).unwrap() - } - #[cfg(not(feature = "nfa-backtrack"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn search_slots( - &self, - cache: &mut BoundedBacktrackerCache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Option { - #[cfg(feature = "nfa-backtrack")] - { - // OK because we only permit access to this engine when we know - // the haystack is short enough for the backtracker to run without - // reporting an error. - self.0.try_search_slots(cache.get(&self.0), input, slots).unwrap() - } - #[cfg(not(feature = "nfa-backtrack"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn max_haystack_len(&self) -> usize { - #[cfg(feature = "nfa-backtrack")] - { - self.0.max_haystack_len() - } - #[cfg(not(feature = "nfa-backtrack"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } -} - -#[derive(Clone, Debug)] -pub(crate) struct BoundedBacktrackerCache( - #[cfg(feature = "nfa-backtrack")] Option, - #[cfg(not(feature = "nfa-backtrack"))] (), -); - -impl BoundedBacktrackerCache { - pub(crate) fn none() -> BoundedBacktrackerCache { - #[cfg(feature = "nfa-backtrack")] - { - BoundedBacktrackerCache(None) - } - #[cfg(not(feature = "nfa-backtrack"))] - { - BoundedBacktrackerCache(()) - } - } - - pub(crate) fn reset(&mut self, builder: &BoundedBacktracker) { - #[cfg(feature = "nfa-backtrack")] - if let Some(ref e) = builder.0 { - self.get(&e.0).reset(&e.0); - } - } - - pub(crate) fn memory_usage(&self) -> usize { - #[cfg(feature = "nfa-backtrack")] - { - self.0.as_ref().map_or(0, |c| c.memory_usage()) - } - #[cfg(not(feature = "nfa-backtrack"))] - { - 0 - } - } - - #[cfg(feature = "nfa-backtrack")] - fn get( - &mut self, - bb: &backtrack::BoundedBacktracker, - ) -> &mut backtrack::Cache { - self.0.get_or_insert_with(|| bb.create_cache()) - } -} - -#[derive(Debug)] -pub(crate) struct OnePass(Option); - -impl OnePass { - pub(crate) fn new(info: &RegexInfo, nfa: &NFA) -> OnePass { - OnePass(OnePassEngine::new(info, nfa)) - } - - pub(crate) fn create_cache(&self) -> OnePassCache { - OnePassCache::new(self) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn get(&self, input: &Input<'_>) -> Option<&OnePassEngine> { - let engine = self.0.as_ref()?; - if !input.get_anchored().is_anchored() - && !engine.get_nfa().is_always_start_anchored() - { - return None; - } - Some(engine) - } - - pub(crate) fn memory_usage(&self) -> usize { - self.0.as_ref().map_or(0, |e| e.memory_usage()) - } -} - -#[derive(Debug)] -pub(crate) struct OnePassEngine( - #[cfg(feature = "dfa-onepass")] onepass::DFA, - #[cfg(not(feature = "dfa-onepass"))] (), -); - -impl OnePassEngine { - pub(crate) fn new(info: &RegexInfo, nfa: &NFA) -> Option { - #[cfg(feature = "dfa-onepass")] - { - if !info.config().get_onepass() { - return None; - } - // In order to even attempt building a one-pass DFA, we require - // that we either have at least one explicit capturing group or - // there's a Unicode word boundary somewhere. If we don't have - // either of these things, then the lazy DFA will almost certainly - // be usable and be much faster. The only case where it might - // not is if the lazy DFA isn't utilizing its cache effectively, - // but in those cases, the underlying regex is almost certainly - // not one-pass or is too big to fit within the current one-pass - // implementation limits. - if info.props_union().explicit_captures_len() == 0 - && !info.props_union().look_set().contains_word_unicode() - { - debug!("not building OnePass because it isn't worth it"); - return None; - } - let onepass_config = onepass::Config::new() - .match_kind(info.config().get_match_kind()) - // Like for the lazy DFA, we unconditionally enable this - // because it doesn't cost much and makes the API more - // flexible. - .starts_for_each_pattern(true) - .byte_classes(info.config().get_byte_classes()) - .size_limit(info.config().get_onepass_size_limit()); - let result = onepass::Builder::new() - .configure(onepass_config) - .build_from_nfa(nfa.clone()); - let engine = match result { - Ok(engine) => engine, - Err(_err) => { - debug!("OnePass failed to build: {_err}"); - return None; - } - }; - debug!("OnePass built, {} bytes", engine.memory_usage()); - Some(OnePassEngine(engine)) - } - #[cfg(not(feature = "dfa-onepass"))] - { - None - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn search_slots( - &self, - cache: &mut OnePassCache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Option { - #[cfg(feature = "dfa-onepass")] - { - // OK because we only permit getting a OnePassEngine when we know - // the search is anchored and thus an error cannot occur. - self.0 - .try_search_slots(cache.0.as_mut().unwrap(), input, slots) - .unwrap() - } - #[cfg(not(feature = "dfa-onepass"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - pub(crate) fn memory_usage(&self) -> usize { - #[cfg(feature = "dfa-onepass")] - { - self.0.memory_usage() - } - #[cfg(not(feature = "dfa-onepass"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn get_nfa(&self) -> &NFA { - #[cfg(feature = "dfa-onepass")] - { - self.0.get_nfa() - } - #[cfg(not(feature = "dfa-onepass"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } -} - -#[derive(Clone, Debug)] -pub(crate) struct OnePassCache( - #[cfg(feature = "dfa-onepass")] Option, - #[cfg(not(feature = "dfa-onepass"))] (), -); - -impl OnePassCache { - pub(crate) fn none() -> OnePassCache { - #[cfg(feature = "dfa-onepass")] - { - OnePassCache(None) - } - #[cfg(not(feature = "dfa-onepass"))] - { - OnePassCache(()) - } - } - - pub(crate) fn new(builder: &OnePass) -> OnePassCache { - #[cfg(feature = "dfa-onepass")] - { - OnePassCache(builder.0.as_ref().map(|e| e.0.create_cache())) - } - #[cfg(not(feature = "dfa-onepass"))] - { - OnePassCache(()) - } - } - - pub(crate) fn reset(&mut self, builder: &OnePass) { - #[cfg(feature = "dfa-onepass")] - if let Some(ref e) = builder.0 { - self.0.as_mut().unwrap().reset(&e.0); - } - } - - pub(crate) fn memory_usage(&self) -> usize { - #[cfg(feature = "dfa-onepass")] - { - self.0.as_ref().map_or(0, |c| c.memory_usage()) - } - #[cfg(not(feature = "dfa-onepass"))] - { - 0 - } - } -} - -#[derive(Debug)] -pub(crate) struct Hybrid(Option); - -impl Hybrid { - pub(crate) fn none() -> Hybrid { - Hybrid(None) - } - - pub(crate) fn new( - info: &RegexInfo, - pre: Option, - nfa: &NFA, - nfarev: &NFA, - ) -> Hybrid { - Hybrid(HybridEngine::new(info, pre, nfa, nfarev)) - } - - pub(crate) fn create_cache(&self) -> HybridCache { - HybridCache::new(self) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn get(&self, _input: &Input<'_>) -> Option<&HybridEngine> { - let engine = self.0.as_ref()?; - Some(engine) - } - - pub(crate) fn is_some(&self) -> bool { - self.0.is_some() - } -} - -#[derive(Debug)] -pub(crate) struct HybridEngine( - #[cfg(feature = "hybrid")] hybrid::regex::Regex, - #[cfg(not(feature = "hybrid"))] (), -); - -impl HybridEngine { - pub(crate) fn new( - info: &RegexInfo, - pre: Option, - nfa: &NFA, - nfarev: &NFA, - ) -> Option { - #[cfg(feature = "hybrid")] - { - if !info.config().get_hybrid() { - return None; - } - let dfa_config = hybrid::dfa::Config::new() - .match_kind(info.config().get_match_kind()) - .prefilter(pre.clone()) - // Enabling this is necessary for ensuring we can service any - // kind of 'Input' search without error. For the lazy DFA, - // this is not particularly costly, since the start states are - // generated lazily. - .starts_for_each_pattern(true) - .byte_classes(info.config().get_byte_classes()) - .unicode_word_boundary(true) - .specialize_start_states(pre.is_some()) - .cache_capacity(info.config().get_hybrid_cache_capacity()) - // This makes it possible for building a lazy DFA to - // fail even though the NFA has already been built. Namely, - // if the cache capacity is too small to fit some minimum - // number of states (which is small, like 4 or 5), then the - // DFA will refuse to build. - // - // We shouldn't enable this to make building always work, since - // this could cause the allocation of a cache bigger than the - // provided capacity amount. - // - // This is effectively the only reason why building a lazy DFA - // could fail. If it does, then we simply suppress the error - // and return None. - .skip_cache_capacity_check(false) - // This and enabling heuristic Unicode word boundary support - // above make it so the lazy DFA can quit at match time. - .minimum_cache_clear_count(Some(3)) - .minimum_bytes_per_state(Some(10)); - let result = hybrid::dfa::Builder::new() - .configure(dfa_config.clone()) - .build_from_nfa(nfa.clone()); - let fwd = match result { - Ok(fwd) => fwd, - Err(_err) => { - debug!("forward lazy DFA failed to build: {_err}"); - return None; - } - }; - let result = hybrid::dfa::Builder::new() - .configure( - dfa_config - .clone() - .match_kind(MatchKind::All) - .prefilter(None) - .specialize_start_states(false), - ) - .build_from_nfa(nfarev.clone()); - let rev = match result { - Ok(rev) => rev, - Err(_err) => { - debug!("reverse lazy DFA failed to build: {_err}"); - return None; - } - }; - let engine = - hybrid::regex::Builder::new().build_from_dfas(fwd, rev); - debug!("lazy DFA built"); - Some(HybridEngine(engine)) - } - #[cfg(not(feature = "hybrid"))] - { - None - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn try_search( - &self, - cache: &mut HybridCache, - input: &Input<'_>, - ) -> Result, RetryFailError> { - #[cfg(feature = "hybrid")] - { - let cache = cache.0.as_mut().unwrap(); - self.0.try_search(cache, input).map_err(|e| e.into()) - } - #[cfg(not(feature = "hybrid"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn try_search_half_fwd( - &self, - cache: &mut HybridCache, - input: &Input<'_>, - ) -> Result, RetryFailError> { - #[cfg(feature = "hybrid")] - { - let fwd = self.0.forward(); - let mut fwdcache = cache.0.as_mut().unwrap().as_parts_mut().0; - fwd.try_search_fwd(&mut fwdcache, input).map_err(|e| e.into()) - } - #[cfg(not(feature = "hybrid"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn try_search_half_fwd_stopat( - &self, - cache: &mut HybridCache, - input: &Input<'_>, - ) -> Result, RetryFailError> { - #[cfg(feature = "hybrid")] - { - let dfa = self.0.forward(); - let mut cache = cache.0.as_mut().unwrap().as_parts_mut().0; - crate::meta::stopat::hybrid_try_search_half_fwd( - dfa, &mut cache, input, - ) - } - #[cfg(not(feature = "hybrid"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn try_search_half_rev( - &self, - cache: &mut HybridCache, - input: &Input<'_>, - ) -> Result, RetryFailError> { - #[cfg(feature = "hybrid")] - { - let rev = self.0.reverse(); - let mut revcache = cache.0.as_mut().unwrap().as_parts_mut().1; - rev.try_search_rev(&mut revcache, input).map_err(|e| e.into()) - } - #[cfg(not(feature = "hybrid"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn try_search_half_rev_limited( - &self, - cache: &mut HybridCache, - input: &Input<'_>, - min_start: usize, - ) -> Result, RetryError> { - #[cfg(feature = "hybrid")] - { - let dfa = self.0.reverse(); - let mut cache = cache.0.as_mut().unwrap().as_parts_mut().1; - crate::meta::limited::hybrid_try_search_half_rev( - dfa, &mut cache, input, min_start, - ) - } - #[cfg(not(feature = "hybrid"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - #[inline] - pub(crate) fn try_which_overlapping_matches( - &self, - cache: &mut HybridCache, - input: &Input<'_>, - patset: &mut PatternSet, - ) -> Result<(), RetryFailError> { - #[cfg(feature = "hybrid")] - { - let fwd = self.0.forward(); - let mut fwdcache = cache.0.as_mut().unwrap().as_parts_mut().0; - fwd.try_which_overlapping_matches(&mut fwdcache, input, patset) - .map_err(|e| e.into()) - } - #[cfg(not(feature = "hybrid"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } -} - -#[derive(Clone, Debug)] -pub(crate) struct HybridCache( - #[cfg(feature = "hybrid")] Option, - #[cfg(not(feature = "hybrid"))] (), -); - -impl HybridCache { - pub(crate) fn none() -> HybridCache { - #[cfg(feature = "hybrid")] - { - HybridCache(None) - } - #[cfg(not(feature = "hybrid"))] - { - HybridCache(()) - } - } - - pub(crate) fn new(builder: &Hybrid) -> HybridCache { - #[cfg(feature = "hybrid")] - { - HybridCache(builder.0.as_ref().map(|e| e.0.create_cache())) - } - #[cfg(not(feature = "hybrid"))] - { - HybridCache(()) - } - } - - pub(crate) fn reset(&mut self, builder: &Hybrid) { - #[cfg(feature = "hybrid")] - if let Some(ref e) = builder.0 { - self.0.as_mut().unwrap().reset(&e.0); - } - } - - pub(crate) fn memory_usage(&self) -> usize { - #[cfg(feature = "hybrid")] - { - self.0.as_ref().map_or(0, |c| c.memory_usage()) - } - #[cfg(not(feature = "hybrid"))] - { - 0 - } - } -} - -#[derive(Debug)] -pub(crate) struct DFA(Option); - -impl DFA { - pub(crate) fn none() -> DFA { - DFA(None) - } - - pub(crate) fn new( - info: &RegexInfo, - pre: Option, - nfa: &NFA, - nfarev: &NFA, - ) -> DFA { - DFA(DFAEngine::new(info, pre, nfa, nfarev)) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn get(&self, _input: &Input<'_>) -> Option<&DFAEngine> { - let engine = self.0.as_ref()?; - Some(engine) - } - - pub(crate) fn is_some(&self) -> bool { - self.0.is_some() - } - - pub(crate) fn memory_usage(&self) -> usize { - self.0.as_ref().map_or(0, |e| e.memory_usage()) - } -} - -#[derive(Debug)] -pub(crate) struct DFAEngine( - #[cfg(feature = "dfa-build")] dfa::regex::Regex, - #[cfg(not(feature = "dfa-build"))] (), -); - -impl DFAEngine { - pub(crate) fn new( - info: &RegexInfo, - pre: Option, - nfa: &NFA, - nfarev: &NFA, - ) -> Option { - #[cfg(feature = "dfa-build")] - { - if !info.config().get_dfa() { - return None; - } - // If our NFA is anything but small, don't even bother with a DFA. - if let Some(state_limit) = info.config().get_dfa_state_limit() { - if nfa.states().len() > state_limit { - debug!( - "skipping full DFA because NFA has {} states, \ - which exceeds the heuristic limit of {}", - nfa.states().len(), - state_limit, - ); - return None; - } - } - // We cut the size limit in four because the total heap used by - // DFA construction is determinization aux memory and the DFA - // itself, and those things are configured independently in the - // lower level DFA builder API. And then split that in two because - // of forward and reverse DFAs. - let size_limit = info.config().get_dfa_size_limit().map(|n| n / 4); - let dfa_config = dfa::dense::Config::new() - .match_kind(info.config().get_match_kind()) - .prefilter(pre.clone()) - // Enabling this is necessary for ensuring we can service any - // kind of 'Input' search without error. For the full DFA, this - // can be quite costly. But since we have such a small bound - // on the size of the DFA, in practice, any multi-regexes are - // probably going to blow the limit anyway. - .starts_for_each_pattern(true) - .byte_classes(info.config().get_byte_classes()) - .unicode_word_boundary(true) - .specialize_start_states(pre.is_some()) - .determinize_size_limit(size_limit) - .dfa_size_limit(size_limit); - let result = dfa::dense::Builder::new() - .configure(dfa_config.clone()) - .build_from_nfa(&nfa); - let fwd = match result { - Ok(fwd) => fwd, - Err(_err) => { - debug!("forward full DFA failed to build: {_err}"); - return None; - } - }; - let result = dfa::dense::Builder::new() - .configure( - dfa_config - .clone() - // We never need unanchored reverse searches, so - // there's no point in building it into the DFA, which - // WILL take more space. (This isn't done for the lazy - // DFA because the DFA is, well, lazy. It doesn't pay - // the cost for supporting unanchored searches unless - // you actually do an unanchored search, which we - // don't.) - .start_kind(dfa::StartKind::Anchored) - .match_kind(MatchKind::All) - .prefilter(None) - .specialize_start_states(false), - ) - .build_from_nfa(&nfarev); - let rev = match result { - Ok(rev) => rev, - Err(_err) => { - debug!("reverse full DFA failed to build: {_err}"); - return None; - } - }; - let engine = dfa::regex::Builder::new().build_from_dfas(fwd, rev); - debug!( - "fully compiled forward and reverse DFAs built, {} bytes", - engine.forward().memory_usage() - + engine.reverse().memory_usage(), - ); - Some(DFAEngine(engine)) - } - #[cfg(not(feature = "dfa-build"))] - { - None - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn try_search( - &self, - input: &Input<'_>, - ) -> Result, RetryFailError> { - #[cfg(feature = "dfa-build")] - { - self.0.try_search(input).map_err(|e| e.into()) - } - #[cfg(not(feature = "dfa-build"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn try_search_half_fwd( - &self, - input: &Input<'_>, - ) -> Result, RetryFailError> { - #[cfg(feature = "dfa-build")] - { - use crate::dfa::Automaton; - self.0.forward().try_search_fwd(input).map_err(|e| e.into()) - } - #[cfg(not(feature = "dfa-build"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn try_search_half_fwd_stopat( - &self, - input: &Input<'_>, - ) -> Result, RetryFailError> { - #[cfg(feature = "dfa-build")] - { - let dfa = self.0.forward(); - crate::meta::stopat::dfa_try_search_half_fwd(dfa, input) - } - #[cfg(not(feature = "dfa-build"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn try_search_half_rev( - &self, - input: &Input<'_>, - ) -> Result, RetryFailError> { - #[cfg(feature = "dfa-build")] - { - use crate::dfa::Automaton; - self.0.reverse().try_search_rev(&input).map_err(|e| e.into()) - } - #[cfg(not(feature = "dfa-build"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn try_search_half_rev_limited( - &self, - input: &Input<'_>, - min_start: usize, - ) -> Result, RetryError> { - #[cfg(feature = "dfa-build")] - { - let dfa = self.0.reverse(); - crate::meta::limited::dfa_try_search_half_rev( - dfa, input, min_start, - ) - } - #[cfg(not(feature = "dfa-build"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - #[inline] - pub(crate) fn try_which_overlapping_matches( - &self, - input: &Input<'_>, - patset: &mut PatternSet, - ) -> Result<(), RetryFailError> { - #[cfg(feature = "dfa-build")] - { - use crate::dfa::Automaton; - self.0 - .forward() - .try_which_overlapping_matches(input, patset) - .map_err(|e| e.into()) - } - #[cfg(not(feature = "dfa-build"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - pub(crate) fn memory_usage(&self) -> usize { - #[cfg(feature = "dfa-build")] - { - self.0.forward().memory_usage() + self.0.reverse().memory_usage() - } - #[cfg(not(feature = "dfa-build"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } -} - -#[derive(Debug)] -pub(crate) struct ReverseHybrid(Option); - -impl ReverseHybrid { - pub(crate) fn none() -> ReverseHybrid { - ReverseHybrid(None) - } - - pub(crate) fn new(info: &RegexInfo, nfarev: &NFA) -> ReverseHybrid { - ReverseHybrid(ReverseHybridEngine::new(info, nfarev)) - } - - pub(crate) fn create_cache(&self) -> ReverseHybridCache { - ReverseHybridCache::new(self) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn get( - &self, - _input: &Input<'_>, - ) -> Option<&ReverseHybridEngine> { - let engine = self.0.as_ref()?; - Some(engine) - } -} - -#[derive(Debug)] -pub(crate) struct ReverseHybridEngine( - #[cfg(feature = "hybrid")] hybrid::dfa::DFA, - #[cfg(not(feature = "hybrid"))] (), -); - -impl ReverseHybridEngine { - pub(crate) fn new( - info: &RegexInfo, - nfarev: &NFA, - ) -> Option { - #[cfg(feature = "hybrid")] - { - if !info.config().get_hybrid() { - return None; - } - // Since we only use this for reverse searches, we can hard-code - // a number of things like match semantics, prefilters, starts - // for each pattern and so on. - let dfa_config = hybrid::dfa::Config::new() - .match_kind(MatchKind::All) - .prefilter(None) - .starts_for_each_pattern(false) - .byte_classes(info.config().get_byte_classes()) - .unicode_word_boundary(true) - .specialize_start_states(false) - .cache_capacity(info.config().get_hybrid_cache_capacity()) - .skip_cache_capacity_check(false) - .minimum_cache_clear_count(Some(3)) - .minimum_bytes_per_state(Some(10)); - let result = hybrid::dfa::Builder::new() - .configure(dfa_config) - .build_from_nfa(nfarev.clone()); - let rev = match result { - Ok(rev) => rev, - Err(_err) => { - debug!("lazy reverse DFA failed to build: {_err}"); - return None; - } - }; - debug!("lazy reverse DFA built"); - Some(ReverseHybridEngine(rev)) - } - #[cfg(not(feature = "hybrid"))] - { - None - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn try_search_half_rev_limited( - &self, - cache: &mut ReverseHybridCache, - input: &Input<'_>, - min_start: usize, - ) -> Result, RetryError> { - #[cfg(feature = "hybrid")] - { - let dfa = &self.0; - let mut cache = cache.0.as_mut().unwrap(); - crate::meta::limited::hybrid_try_search_half_rev( - dfa, &mut cache, input, min_start, - ) - } - #[cfg(not(feature = "hybrid"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } -} - -#[derive(Clone, Debug)] -pub(crate) struct ReverseHybridCache( - #[cfg(feature = "hybrid")] Option, - #[cfg(not(feature = "hybrid"))] (), -); - -impl ReverseHybridCache { - pub(crate) fn none() -> ReverseHybridCache { - #[cfg(feature = "hybrid")] - { - ReverseHybridCache(None) - } - #[cfg(not(feature = "hybrid"))] - { - ReverseHybridCache(()) - } - } - - pub(crate) fn new(builder: &ReverseHybrid) -> ReverseHybridCache { - #[cfg(feature = "hybrid")] - { - ReverseHybridCache(builder.0.as_ref().map(|e| e.0.create_cache())) - } - #[cfg(not(feature = "hybrid"))] - { - ReverseHybridCache(()) - } - } - - pub(crate) fn reset(&mut self, builder: &ReverseHybrid) { - #[cfg(feature = "hybrid")] - if let Some(ref e) = builder.0 { - self.0.as_mut().unwrap().reset(&e.0); - } - } - - pub(crate) fn memory_usage(&self) -> usize { - #[cfg(feature = "hybrid")] - { - self.0.as_ref().map_or(0, |c| c.memory_usage()) - } - #[cfg(not(feature = "hybrid"))] - { - 0 - } - } -} - -#[derive(Debug)] -pub(crate) struct ReverseDFA(Option); - -impl ReverseDFA { - pub(crate) fn none() -> ReverseDFA { - ReverseDFA(None) - } - - pub(crate) fn new(info: &RegexInfo, nfarev: &NFA) -> ReverseDFA { - ReverseDFA(ReverseDFAEngine::new(info, nfarev)) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn get(&self, _input: &Input<'_>) -> Option<&ReverseDFAEngine> { - let engine = self.0.as_ref()?; - Some(engine) - } - - pub(crate) fn is_some(&self) -> bool { - self.0.is_some() - } - - pub(crate) fn memory_usage(&self) -> usize { - self.0.as_ref().map_or(0, |e| e.memory_usage()) - } -} - -#[derive(Debug)] -pub(crate) struct ReverseDFAEngine( - #[cfg(feature = "dfa-build")] dfa::dense::DFA>, - #[cfg(not(feature = "dfa-build"))] (), -); - -impl ReverseDFAEngine { - pub(crate) fn new( - info: &RegexInfo, - nfarev: &NFA, - ) -> Option { - #[cfg(feature = "dfa-build")] - { - if !info.config().get_dfa() { - return None; - } - // If our NFA is anything but small, don't even bother with a DFA. - if let Some(state_limit) = info.config().get_dfa_state_limit() { - if nfarev.states().len() > state_limit { - debug!( - "skipping full reverse DFA because NFA has {} states, \ - which exceeds the heuristic limit of {}", - nfarev.states().len(), - state_limit, - ); - return None; - } - } - // We cut the size limit in two because the total heap used by DFA - // construction is determinization aux memory and the DFA itself, - // and those things are configured independently in the lower level - // DFA builder API. - let size_limit = info.config().get_dfa_size_limit().map(|n| n / 2); - // Since we only use this for reverse searches, we can hard-code - // a number of things like match semantics, prefilters, starts - // for each pattern and so on. We also disable acceleration since - // it's incompatible with limited searches (which is the only - // operation we support for this kind of engine at the moment). - let dfa_config = dfa::dense::Config::new() - .match_kind(MatchKind::All) - .prefilter(None) - .accelerate(false) - .start_kind(dfa::StartKind::Anchored) - .starts_for_each_pattern(false) - .byte_classes(info.config().get_byte_classes()) - .unicode_word_boundary(true) - .specialize_start_states(false) - .determinize_size_limit(size_limit) - .dfa_size_limit(size_limit); - let result = dfa::dense::Builder::new() - .configure(dfa_config) - .build_from_nfa(&nfarev); - let rev = match result { - Ok(rev) => rev, - Err(_err) => { - debug!("full reverse DFA failed to build: {_err}"); - return None; - } - }; - debug!( - "fully compiled reverse DFA built, {} bytes", - rev.memory_usage() - ); - Some(ReverseDFAEngine(rev)) - } - #[cfg(not(feature = "dfa-build"))] - { - None - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn try_search_half_rev_limited( - &self, - input: &Input<'_>, - min_start: usize, - ) -> Result, RetryError> { - #[cfg(feature = "dfa-build")] - { - let dfa = &self.0; - crate::meta::limited::dfa_try_search_half_rev( - dfa, input, min_start, - ) - } - #[cfg(not(feature = "dfa-build"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } - - pub(crate) fn memory_usage(&self) -> usize { - #[cfg(feature = "dfa-build")] - { - self.0.memory_usage() - } - #[cfg(not(feature = "dfa-build"))] - { - // Impossible to reach because this engine is never constructed - // if the requisite features aren't enabled. - unreachable!() - } - } -} diff --git a/vendor/regex-automata/src/nfa/mod.rs b/vendor/regex-automata/src/nfa/mod.rs deleted file mode 100644 index 14a0c30bea70ec..00000000000000 --- a/vendor/regex-automata/src/nfa/mod.rs +++ /dev/null @@ -1,55 +0,0 @@ -/*! -Provides non-deterministic finite automata (NFA) and regex engines that use -them. - -While NFAs and DFAs (deterministic finite automata) have equivalent *theoretical* -power, their usage in practice tends to result in different engineering trade -offs. While this isn't meant to be a comprehensive treatment of the topic, here -are a few key trade offs that are, at minimum, true for this crate: - -* NFAs tend to be represented sparsely where as DFAs are represented densely. -Sparse representations use less memory, but are slower to traverse. Conversely, -dense representations use more memory, but are faster to traverse. (Sometimes -these lines are blurred. For example, an `NFA` might choose to represent a -particular state in a dense fashion, and a DFA can be built using a sparse -representation via [`sparse::DFA`](crate::dfa::sparse::DFA). -* NFAs have epsilon transitions and DFAs don't. In practice, this means that -handling a single byte in a haystack with an NFA at search time may require -visiting multiple NFA states. In a DFA, each byte only requires visiting -a single state. Stated differently, NFAs require a variable number of CPU -instructions to process one byte in a haystack where as a DFA uses a constant -number of CPU instructions to process one byte. -* NFAs are generally easier to amend with secondary storage. For example, the -[`thompson::pikevm::PikeVM`] uses an NFA to match, but also uses additional -memory beyond the model of a finite state machine to track offsets for matching -capturing groups. Conversely, the most a DFA can do is report the offset (and -pattern ID) at which a match occurred. This is generally why we also compile -DFAs in reverse, so that we can run them after finding the end of a match to -also find the start of a match. -* NFAs take worst case linear time to build, but DFAs take worst case -exponential time to build. The [hybrid NFA/DFA](crate::hybrid) mitigates this -challenge for DFAs in many practical cases. - -There are likely other differences, but the bottom line is that NFAs tend to be -more memory efficient and give easier opportunities for increasing expressive -power, where as DFAs are faster to search with. - -# Why only a Thompson NFA? - -Currently, the only kind of NFA we support in this crate is a [Thompson -NFA](https://en.wikipedia.org/wiki/Thompson%27s_construction). This refers -to a specific construction algorithm that takes the syntax of a regex -pattern and converts it to an NFA. Specifically, it makes gratuitous use of -epsilon transitions in order to keep its structure simple. In exchange, its -construction time is linear in the size of the regex. A Thompson NFA also makes -the guarantee that given any state and a character in a haystack, there is at -most one transition defined for it. (Although there may be many epsilon -transitions.) - -It's possible that other types of NFAs will be added in the future, such as a -[Glushkov NFA](https://en.wikipedia.org/wiki/Glushkov%27s_construction_algorithm). -But currently, this crate only provides a Thompson NFA. -*/ - -#[cfg(feature = "nfa-thompson")] -pub mod thompson; diff --git a/vendor/regex-automata/src/nfa/thompson/backtrack.rs b/vendor/regex-automata/src/nfa/thompson/backtrack.rs deleted file mode 100644 index df99e456df746f..00000000000000 --- a/vendor/regex-automata/src/nfa/thompson/backtrack.rs +++ /dev/null @@ -1,1908 +0,0 @@ -/*! -An NFA backed bounded backtracker for executing regex searches with capturing -groups. - -This module provides a [`BoundedBacktracker`] that works by simulating an NFA -using the classical backtracking algorithm with a twist: it avoids redoing -work that it has done before and thereby avoids worst case exponential time. -In exchange, it can only be used on "short" haystacks. Its advantage is that -is can be faster than the [`PikeVM`](thompson::pikevm::PikeVM) in many cases -because it does less book-keeping. -*/ - -use alloc::{vec, vec::Vec}; - -use crate::{ - nfa::thompson::{self, BuildError, State, NFA}, - util::{ - captures::Captures, - empty, iter, - prefilter::Prefilter, - primitives::{NonMaxUsize, PatternID, SmallIndex, StateID}, - search::{Anchored, HalfMatch, Input, Match, MatchError, Span}, - }, -}; - -/// Returns the minimum visited capacity for the given haystack. -/// -/// This function can be used as the argument to [`Config::visited_capacity`] -/// in order to guarantee that a backtracking search for the given `input` -/// won't return an error when using a [`BoundedBacktracker`] built from the -/// given `NFA`. -/// -/// This routine exists primarily as a way to test that the bounded backtracker -/// works correctly when its capacity is set to the smallest possible amount. -/// Still, it may be useful in cases where you know you want to use the bounded -/// backtracker for a specific input, and just need to know what visited -/// capacity to provide to make it work. -/// -/// Be warned that this number could be quite large as it is multiplicative in -/// the size the given NFA and haystack. -pub fn min_visited_capacity(nfa: &NFA, input: &Input<'_>) -> usize { - div_ceil(nfa.states().len() * (input.get_span().len() + 1), 8) -} - -/// The configuration used for building a bounded backtracker. -/// -/// A bounded backtracker configuration is a simple data object that is -/// typically used with [`Builder::configure`]. -#[derive(Clone, Debug, Default)] -pub struct Config { - pre: Option>, - visited_capacity: Option, -} - -impl Config { - /// Return a new default regex configuration. - pub fn new() -> Config { - Config::default() - } - - /// Set a prefilter to be used whenever a start state is entered. - /// - /// A [`Prefilter`] in this context is meant to accelerate searches by - /// looking for literal prefixes that every match for the corresponding - /// pattern (or patterns) must start with. Once a prefilter produces a - /// match, the underlying search routine continues on to try and confirm - /// the match. - /// - /// Be warned that setting a prefilter does not guarantee that the search - /// will be faster. While it's usually a good bet, if the prefilter - /// produces a lot of false positive candidates (i.e., positions matched - /// by the prefilter but not by the regex), then the overall result can - /// be slower than if you had just executed the regex engine without any - /// prefilters. - /// - /// By default no prefilter is set. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// util::prefilter::Prefilter, - /// Input, Match, MatchKind, - /// }; - /// - /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "bar"]); - /// let re = BoundedBacktracker::builder() - /// .configure(BoundedBacktracker::config().prefilter(pre)) - /// .build(r"(foo|bar)[a-z]+")?; - /// let mut cache = re.create_cache(); - /// let input = Input::new("foo1 barfox bar"); - /// assert_eq!( - /// Some(Match::must(0, 5..11)), - /// re.try_find(&mut cache, input)?, - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Be warned though that an incorrect prefilter can lead to incorrect - /// results! - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// util::prefilter::Prefilter, - /// Input, HalfMatch, MatchKind, - /// }; - /// - /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "car"]); - /// let re = BoundedBacktracker::builder() - /// .configure(BoundedBacktracker::config().prefilter(pre)) - /// .build(r"(foo|bar)[a-z]+")?; - /// let mut cache = re.create_cache(); - /// let input = Input::new("foo1 barfox bar"); - /// // No match reported even though there clearly is one! - /// assert_eq!(None, re.try_find(&mut cache, input)?); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn prefilter(mut self, pre: Option) -> Config { - self.pre = Some(pre); - self - } - - /// Set the visited capacity used to bound backtracking. - /// - /// The visited capacity represents the amount of heap memory (in bytes) to - /// allocate toward tracking which parts of the backtracking search have - /// been done before. The heap memory needed for any particular search is - /// proportional to `haystack.len() * nfa.states().len()`, which an be - /// quite large. Therefore, the bounded backtracker is typically only able - /// to run on shorter haystacks. - /// - /// For a given regex, increasing the visited capacity means that the - /// maximum haystack length that can be searched is increased. The - /// [`BoundedBacktracker::max_haystack_len`] method returns that maximum. - /// - /// The default capacity is a reasonable but empirically chosen size. - /// - /// # Example - /// - /// As with other regex engines, Unicode is what tends to make the bounded - /// backtracker less useful by making the maximum haystack length quite - /// small. If necessary, increasing the visited capacity using this routine - /// will increase the maximum haystack length at the cost of using more - /// memory. - /// - /// Note though that the specific maximum values here are not an API - /// guarantee. The default visited capacity is subject to change and not - /// covered by semver. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; - /// - /// // Unicode inflates the size of the underlying NFA quite a bit, and - /// // thus means that the backtracker can only handle smaller haystacks, - /// // assuming that the visited capacity remains unchanged. - /// let re = BoundedBacktracker::new(r"\w+")?; - /// assert!(re.max_haystack_len() <= 7_000); - /// // But we can increase the visited capacity to handle bigger haystacks! - /// let re = BoundedBacktracker::builder() - /// .configure(BoundedBacktracker::config().visited_capacity(1<<20)) - /// .build(r"\w+")?; - /// assert!(re.max_haystack_len() >= 25_000); - /// assert!(re.max_haystack_len() <= 28_000); - /// # Ok::<(), Box>(()) - /// ``` - pub fn visited_capacity(mut self, capacity: usize) -> Config { - self.visited_capacity = Some(capacity); - self - } - - /// Returns the prefilter set in this configuration, if one at all. - pub fn get_prefilter(&self) -> Option<&Prefilter> { - self.pre.as_ref().unwrap_or(&None).as_ref() - } - - /// Returns the configured visited capacity. - /// - /// Note that the actual capacity used may be slightly bigger than the - /// configured capacity. - pub fn get_visited_capacity(&self) -> usize { - const DEFAULT: usize = 256 * (1 << 10); // 256 KB - self.visited_capacity.unwrap_or(DEFAULT) - } - - /// Overwrite the default configuration such that the options in `o` are - /// always used. If an option in `o` is not set, then the corresponding - /// option in `self` is used. If it's not set in `self` either, then it - /// remains not set. - pub(crate) fn overwrite(&self, o: Config) -> Config { - Config { - pre: o.pre.or_else(|| self.pre.clone()), - visited_capacity: o.visited_capacity.or(self.visited_capacity), - } - } -} - -/// A builder for a bounded backtracker. -/// -/// This builder permits configuring options for the syntax of a pattern, the -/// NFA construction and the `BoundedBacktracker` construction. This builder -/// is different from a general purpose regex builder in that it permits fine -/// grain configuration of the construction process. The trade off for this is -/// complexity, and the possibility of setting a configuration that might not -/// make sense. For example, there are two different UTF-8 modes: -/// -/// * [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) controls -/// whether the pattern itself can contain sub-expressions that match invalid -/// UTF-8. -/// * [`thompson::Config::utf8`] controls how the regex iterators themselves -/// advance the starting position of the next search when a match with zero -/// length is found. -/// -/// Generally speaking, callers will want to either enable all of these or -/// disable all of these. -/// -/// # Example -/// -/// This example shows how to disable UTF-8 mode in the syntax and the regex -/// itself. This is generally what you want for matching on arbitrary bytes. -/// -/// ``` -/// use regex_automata::{ -/// nfa::thompson::{self, backtrack::BoundedBacktracker}, -/// util::syntax, -/// Match, -/// }; -/// -/// let re = BoundedBacktracker::builder() -/// .syntax(syntax::Config::new().utf8(false)) -/// .thompson(thompson::Config::new().utf8(false)) -/// .build(r"foo(?-u:[^b])ar.*")?; -/// let mut cache = re.create_cache(); -/// -/// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; -/// let expected = Some(Ok(Match::must(0, 1..9))); -/// let got = re.try_find_iter(&mut cache, haystack).next(); -/// assert_eq!(expected, got); -/// // Notice that `(?-u:[^b])` matches invalid UTF-8, -/// // but the subsequent `.*` does not! Disabling UTF-8 -/// // on the syntax permits this. -/// // -/// // N.B. This example does not show the impact of -/// // disabling UTF-8 mode on a BoundedBacktracker Config, since that -/// // only impacts regexes that can produce matches of -/// // length 0. -/// assert_eq!(b"foo\xFFarzz", &haystack[got.unwrap()?.range()]); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct Builder { - config: Config, - #[cfg(feature = "syntax")] - thompson: thompson::Compiler, -} - -impl Builder { - /// Create a new BoundedBacktracker builder with its default configuration. - pub fn new() -> Builder { - Builder { - config: Config::default(), - #[cfg(feature = "syntax")] - thompson: thompson::Compiler::new(), - } - } - - /// Build a `BoundedBacktracker` from the given pattern. - /// - /// If there was a problem parsing or compiling the pattern, then an error - /// is returned. - #[cfg(feature = "syntax")] - pub fn build( - &self, - pattern: &str, - ) -> Result { - self.build_many(&[pattern]) - } - - /// Build a `BoundedBacktracker` from the given patterns. - #[cfg(feature = "syntax")] - pub fn build_many>( - &self, - patterns: &[P], - ) -> Result { - let nfa = self.thompson.build_many(patterns)?; - self.build_from_nfa(nfa) - } - - /// Build a `BoundedBacktracker` directly from its NFA. - /// - /// Note that when using this method, any configuration that applies to the - /// construction of the NFA itself will of course be ignored, since the NFA - /// given here is already built. - pub fn build_from_nfa( - &self, - nfa: NFA, - ) -> Result { - nfa.look_set_any().available().map_err(BuildError::word)?; - Ok(BoundedBacktracker { config: self.config.clone(), nfa }) - } - - /// Apply the given `BoundedBacktracker` configuration options to this - /// builder. - pub fn configure(&mut self, config: Config) -> &mut Builder { - self.config = self.config.overwrite(config); - self - } - - /// Set the syntax configuration for this builder using - /// [`syntax::Config`](crate::util::syntax::Config). - /// - /// This permits setting things like case insensitivity, Unicode and multi - /// line mode. - /// - /// These settings only apply when constructing a `BoundedBacktracker` - /// directly from a pattern. - #[cfg(feature = "syntax")] - pub fn syntax( - &mut self, - config: crate::util::syntax::Config, - ) -> &mut Builder { - self.thompson.syntax(config); - self - } - - /// Set the Thompson NFA configuration for this builder using - /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). - /// - /// This permits setting things like if additional time should be spent - /// shrinking the size of the NFA. - /// - /// These settings only apply when constructing a `BoundedBacktracker` - /// directly from a pattern. - #[cfg(feature = "syntax")] - pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { - self.thompson.configure(config); - self - } -} - -/// A backtracking regex engine that bounds its execution to avoid exponential -/// blow-up. -/// -/// This regex engine only implements leftmost-first match semantics and -/// only supports leftmost searches. It effectively does the same thing as a -/// [`PikeVM`](thompson::pikevm::PikeVM), but typically does it faster because -/// it doesn't have to worry about copying capturing group spans for most NFA -/// states. Instead, the backtracker can maintain one set of captures (provided -/// by the caller) and never needs to copy them. In exchange, the backtracker -/// bounds itself to ensure it doesn't exhibit worst case exponential time. -/// This results in the backtracker only being able to handle short haystacks -/// given reasonable memory usage. -/// -/// # Searches may return an error! -/// -/// By design, this backtracking regex engine is bounded. This bound is -/// implemented by not visiting any combination of NFA state ID and position -/// in a haystack more than once. Thus, the total memory required to bound -/// backtracking is proportional to `haystack.len() * nfa.states().len()`. -/// This can obviously get quite large, since large haystacks aren't terribly -/// uncommon. To avoid using exorbitant memory, the capacity is bounded by -/// a fixed limit set via [`Config::visited_capacity`]. Thus, if the total -/// capacity required for a particular regex and a haystack exceeds this -/// capacity, then the search routine will return an error. -/// -/// Unlike other regex engines that may return an error at search time (like -/// the DFA or the hybrid NFA/DFA), there is no way to guarantee that a bounded -/// backtracker will work for every haystack. Therefore, this regex engine -/// _only_ exposes fallible search routines to avoid the footgun of panicking -/// when running a search on a haystack that is too big. -/// -/// If one wants to use the fallible search APIs without handling the -/// error, the only way to guarantee an error won't occur from the -/// haystack length is to ensure the haystack length does not exceed -/// [`BoundedBacktracker::max_haystack_len`]. -/// -/// # Example: Unicode word boundaries -/// -/// This example shows that the bounded backtracker implements Unicode word -/// boundaries correctly by default. -/// -/// ``` -/// # if cfg!(miri) { return Ok(()); } // miri takes too long -/// use regex_automata::{nfa::thompson::backtrack::BoundedBacktracker, Match}; -/// -/// let re = BoundedBacktracker::new(r"\b\w+\b")?; -/// let mut cache = re.create_cache(); -/// -/// let mut it = re.try_find_iter(&mut cache, "Шерлок Холмс"); -/// assert_eq!(Some(Ok(Match::must(0, 0..12))), it.next()); -/// assert_eq!(Some(Ok(Match::must(0, 13..23))), it.next()); -/// assert_eq!(None, it.next()); -/// # Ok::<(), Box>(()) -/// ``` -/// -/// # Example: multiple regex patterns -/// -/// The bounded backtracker supports searching for multiple patterns -/// simultaneously, just like other regex engines. Note though that because it -/// uses a backtracking strategy, this regex engine is unlikely to scale well -/// as more patterns are added. But then again, as more patterns are added, the -/// maximum haystack length allowed will also shorten (assuming the visited -/// capacity remains invariant). -/// -/// ``` -/// use regex_automata::{nfa::thompson::backtrack::BoundedBacktracker, Match}; -/// -/// let re = BoundedBacktracker::new_many(&["[a-z]+", "[0-9]+"])?; -/// let mut cache = re.create_cache(); -/// -/// let mut it = re.try_find_iter(&mut cache, "abc 1 foo 4567 0 quux"); -/// assert_eq!(Some(Ok(Match::must(0, 0..3))), it.next()); -/// assert_eq!(Some(Ok(Match::must(1, 4..5))), it.next()); -/// assert_eq!(Some(Ok(Match::must(0, 6..9))), it.next()); -/// assert_eq!(Some(Ok(Match::must(1, 10..14))), it.next()); -/// assert_eq!(Some(Ok(Match::must(1, 15..16))), it.next()); -/// assert_eq!(Some(Ok(Match::must(0, 17..21))), it.next()); -/// assert_eq!(None, it.next()); -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct BoundedBacktracker { - config: Config, - nfa: NFA, -} - -impl BoundedBacktracker { - /// Parse the given regular expression using the default configuration and - /// return the corresponding `BoundedBacktracker`. - /// - /// If you want a non-default configuration, then use the [`Builder`] to - /// set your own configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// Match, - /// }; - /// - /// let re = BoundedBacktracker::new("foo[0-9]+bar")?; - /// let mut cache = re.create_cache(); - /// assert_eq!( - /// Some(Ok(Match::must(0, 3..14))), - /// re.try_find_iter(&mut cache, "zzzfoo12345barzzz").next(), - /// ); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn new(pattern: &str) -> Result { - BoundedBacktracker::builder().build(pattern) - } - - /// Like `new`, but parses multiple patterns into a single "multi regex." - /// This similarly uses the default regex configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// Match, - /// }; - /// - /// let re = BoundedBacktracker::new_many(&["[a-z]+", "[0-9]+"])?; - /// let mut cache = re.create_cache(); - /// - /// let mut it = re.try_find_iter(&mut cache, "abc 1 foo 4567 0 quux"); - /// assert_eq!(Some(Ok(Match::must(0, 0..3))), it.next()); - /// assert_eq!(Some(Ok(Match::must(1, 4..5))), it.next()); - /// assert_eq!(Some(Ok(Match::must(0, 6..9))), it.next()); - /// assert_eq!(Some(Ok(Match::must(1, 10..14))), it.next()); - /// assert_eq!(Some(Ok(Match::must(1, 15..16))), it.next()); - /// assert_eq!(Some(Ok(Match::must(0, 17..21))), it.next()); - /// assert_eq!(None, it.next()); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn new_many>( - patterns: &[P], - ) -> Result { - BoundedBacktracker::builder().build_many(patterns) - } - - /// # Example - /// - /// This shows how to hand assemble a regular expression via its HIR, - /// compile an NFA from it and build a BoundedBacktracker from the NFA. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::{NFA, backtrack::BoundedBacktracker}, - /// Match, - /// }; - /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; - /// - /// let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![ - /// ClassBytesRange::new(b'0', b'9'), - /// ClassBytesRange::new(b'A', b'Z'), - /// ClassBytesRange::new(b'_', b'_'), - /// ClassBytesRange::new(b'a', b'z'), - /// ]))); - /// - /// let config = NFA::config().nfa_size_limit(Some(1_000)); - /// let nfa = NFA::compiler().configure(config).build_from_hir(&hir)?; - /// - /// let re = BoundedBacktracker::new_from_nfa(nfa)?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let expected = Some(Match::must(0, 3..4)); - /// re.try_captures(&mut cache, "!@#A#@!", &mut caps)?; - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn new_from_nfa(nfa: NFA) -> Result { - BoundedBacktracker::builder().build_from_nfa(nfa) - } - - /// Create a new `BoundedBacktracker` that matches every input. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// Match, - /// }; - /// - /// let re = BoundedBacktracker::always_match()?; - /// let mut cache = re.create_cache(); - /// - /// let expected = Some(Ok(Match::must(0, 0..0))); - /// assert_eq!(expected, re.try_find_iter(&mut cache, "").next()); - /// assert_eq!(expected, re.try_find_iter(&mut cache, "foo").next()); - /// # Ok::<(), Box>(()) - /// ``` - pub fn always_match() -> Result { - let nfa = thompson::NFA::always_match(); - BoundedBacktracker::new_from_nfa(nfa) - } - - /// Create a new `BoundedBacktracker` that never matches any input. - /// - /// # Example - /// - /// ``` - /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; - /// - /// let re = BoundedBacktracker::never_match()?; - /// let mut cache = re.create_cache(); - /// - /// assert_eq!(None, re.try_find_iter(&mut cache, "").next()); - /// assert_eq!(None, re.try_find_iter(&mut cache, "foo").next()); - /// # Ok::<(), Box>(()) - /// ``` - pub fn never_match() -> Result { - let nfa = thompson::NFA::never_match(); - BoundedBacktracker::new_from_nfa(nfa) - } - - /// Return a default configuration for a `BoundedBacktracker`. - /// - /// This is a convenience routine to avoid needing to import the `Config` - /// type when customizing the construction of a `BoundedBacktracker`. - /// - /// # Example - /// - /// This example shows how to disable UTF-8 mode. When UTF-8 mode is - /// disabled, zero-width matches that split a codepoint are allowed. - /// Otherwise they are never reported. - /// - /// In the code below, notice that `""` is permitted to match positions - /// that split the encoding of a codepoint. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::{self, backtrack::BoundedBacktracker}, - /// Match, - /// }; - /// - /// let re = BoundedBacktracker::builder() - /// .thompson(thompson::Config::new().utf8(false)) - /// .build(r"")?; - /// let mut cache = re.create_cache(); - /// - /// let haystack = "a☃z"; - /// let mut it = re.try_find_iter(&mut cache, haystack); - /// assert_eq!(Some(Ok(Match::must(0, 0..0))), it.next()); - /// assert_eq!(Some(Ok(Match::must(0, 1..1))), it.next()); - /// assert_eq!(Some(Ok(Match::must(0, 2..2))), it.next()); - /// assert_eq!(Some(Ok(Match::must(0, 3..3))), it.next()); - /// assert_eq!(Some(Ok(Match::must(0, 4..4))), it.next()); - /// assert_eq!(Some(Ok(Match::must(0, 5..5))), it.next()); - /// assert_eq!(None, it.next()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn config() -> Config { - Config::new() - } - - /// Return a builder for configuring the construction of a - /// `BoundedBacktracker`. - /// - /// This is a convenience routine to avoid needing to import the - /// [`Builder`] type in common cases. - /// - /// # Example - /// - /// This example shows how to use the builder to disable UTF-8 mode - /// everywhere. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// nfa::thompson::{self, backtrack::BoundedBacktracker}, - /// util::syntax, - /// Match, - /// }; - /// - /// let re = BoundedBacktracker::builder() - /// .syntax(syntax::Config::new().utf8(false)) - /// .thompson(thompson::Config::new().utf8(false)) - /// .build(r"foo(?-u:[^b])ar.*")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; - /// let expected = Some(Match::must(0, 1..9)); - /// re.try_captures(&mut cache, haystack, &mut caps)?; - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn builder() -> Builder { - Builder::new() - } - - /// Create a new cache for this regex. - /// - /// The cache returned should only be used for searches for this - /// regex. If you want to reuse the cache for another regex, then you - /// must call [`Cache::reset`] with that regex (or, equivalently, - /// [`BoundedBacktracker::reset_cache`]). - pub fn create_cache(&self) -> Cache { - Cache::new(self) - } - - /// Create a new empty set of capturing groups that is guaranteed to be - /// valid for the search APIs on this `BoundedBacktracker`. - /// - /// A `Captures` value created for a specific `BoundedBacktracker` cannot - /// be used with any other `BoundedBacktracker`. - /// - /// This is a convenience function for [`Captures::all`]. See the - /// [`Captures`] documentation for an explanation of its alternative - /// constructors that permit the `BoundedBacktracker` to do less work - /// during a search, and thus might make it faster. - pub fn create_captures(&self) -> Captures { - Captures::all(self.get_nfa().group_info().clone()) - } - - /// Reset the given cache such that it can be used for searching with the - /// this `BoundedBacktracker` (and only this `BoundedBacktracker`). - /// - /// A cache reset permits reusing memory already allocated in this cache - /// with a different `BoundedBacktracker`. - /// - /// # Example - /// - /// This shows how to re-purpose a cache for use with a different - /// `BoundedBacktracker`. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// Match, - /// }; - /// - /// let re1 = BoundedBacktracker::new(r"\w")?; - /// let re2 = BoundedBacktracker::new(r"\W")?; - /// - /// let mut cache = re1.create_cache(); - /// assert_eq!( - /// Some(Ok(Match::must(0, 0..2))), - /// re1.try_find_iter(&mut cache, "Δ").next(), - /// ); - /// - /// // Using 'cache' with re2 is not allowed. It may result in panics or - /// // incorrect results. In order to re-purpose the cache, we must reset - /// // it with the BoundedBacktracker we'd like to use it with. - /// // - /// // Similarly, after this reset, using the cache with 're1' is also not - /// // allowed. - /// cache.reset(&re2); - /// assert_eq!( - /// Some(Ok(Match::must(0, 0..3))), - /// re2.try_find_iter(&mut cache, "☃").next(), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn reset_cache(&self, cache: &mut Cache) { - cache.reset(self); - } - - /// Returns the total number of patterns compiled into this - /// `BoundedBacktracker`. - /// - /// In the case of a `BoundedBacktracker` that contains no patterns, this - /// returns `0`. - /// - /// # Example - /// - /// This example shows the pattern length for a `BoundedBacktracker` that - /// never matches: - /// - /// ``` - /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; - /// - /// let re = BoundedBacktracker::never_match()?; - /// assert_eq!(re.pattern_len(), 0); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// And another example for a `BoundedBacktracker` that matches at every - /// position: - /// - /// ``` - /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; - /// - /// let re = BoundedBacktracker::always_match()?; - /// assert_eq!(re.pattern_len(), 1); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// And finally, a `BoundedBacktracker` that was constructed from multiple - /// patterns: - /// - /// ``` - /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; - /// - /// let re = BoundedBacktracker::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; - /// assert_eq!(re.pattern_len(), 3); - /// # Ok::<(), Box>(()) - /// ``` - pub fn pattern_len(&self) -> usize { - self.nfa.pattern_len() - } - - /// Return the config for this `BoundedBacktracker`. - #[inline] - pub fn get_config(&self) -> &Config { - &self.config - } - - /// Returns a reference to the underlying NFA. - #[inline] - pub fn get_nfa(&self) -> &NFA { - &self.nfa - } - - /// Returns the maximum haystack length supported by this backtracker. - /// - /// This routine is a function of both [`Config::visited_capacity`] and the - /// internal size of the backtracker's NFA. - /// - /// # Example - /// - /// This example shows how the maximum haystack length can vary depending - /// on the size of the regex itself. Note though that the specific maximum - /// values here are not an API guarantee. The default visited capacity is - /// subject to change and not covered by semver. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// Match, MatchError, - /// }; - /// - /// // If you're only using ASCII, you get a big budget. - /// let re = BoundedBacktracker::new(r"(?-u)\w+")?; - /// let mut cache = re.create_cache(); - /// assert_eq!(re.max_haystack_len(), 299_592); - /// // Things work up to the max. - /// let mut haystack = "a".repeat(299_592); - /// let expected = Some(Ok(Match::must(0, 0..299_592))); - /// assert_eq!(expected, re.try_find_iter(&mut cache, &haystack).next()); - /// // But you'll get an error if you provide a haystack that's too big. - /// // Notice that we use the 'try_find_iter' routine instead, which - /// // yields Result instead of Match. - /// haystack.push('a'); - /// let expected = Some(Err(MatchError::haystack_too_long(299_593))); - /// assert_eq!(expected, re.try_find_iter(&mut cache, &haystack).next()); - /// - /// // Unicode inflates the size of the underlying NFA quite a bit, and - /// // thus means that the backtracker can only handle smaller haystacks, - /// // assuming that the visited capacity remains unchanged. - /// let re = BoundedBacktracker::new(r"\w+")?; - /// assert!(re.max_haystack_len() <= 7_000); - /// // But we can increase the visited capacity to handle bigger haystacks! - /// let re = BoundedBacktracker::builder() - /// .configure(BoundedBacktracker::config().visited_capacity(1<<20)) - /// .build(r"\w+")?; - /// assert!(re.max_haystack_len() >= 25_000); - /// assert!(re.max_haystack_len() <= 28_000); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn max_haystack_len(&self) -> usize { - // The capacity given in the config is "bytes of heap memory," but the - // capacity we use here is "number of bits." So convert the capacity in - // bytes to the capacity in bits. - let capacity = 8 * self.get_config().get_visited_capacity(); - let blocks = div_ceil(capacity, Visited::BLOCK_SIZE); - let real_capacity = blocks.saturating_mul(Visited::BLOCK_SIZE); - // It's possible for `real_capacity` to be smaller than the number of - // NFA states for particularly large regexes, so we saturate towards - // zero. - (real_capacity / self.nfa.states().len()).saturating_sub(1) - } -} - -impl BoundedBacktracker { - /// Returns true if and only if this regex matches the given haystack. - /// - /// In the case of a backtracking regex engine, and unlike most other - /// regex engines in this crate, short circuiting isn't practical. However, - /// this routine may still be faster because it instructs backtracking to - /// not keep track of any capturing groups. - /// - /// # Errors - /// - /// This routine only errors if the search could not complete. For this - /// backtracking regex engine, this only occurs when the haystack length - /// exceeds [`BoundedBacktracker::max_haystack_len`]. - /// - /// When a search cannot complete, callers cannot know whether a match - /// exists or not. - /// - /// # Example - /// - /// ``` - /// use regex_automata::nfa::thompson::backtrack::BoundedBacktracker; - /// - /// let re = BoundedBacktracker::new("foo[0-9]+bar")?; - /// let mut cache = re.create_cache(); - /// - /// assert!(re.try_is_match(&mut cache, "foo12345bar")?); - /// assert!(!re.try_is_match(&mut cache, "foobar")?); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: consistency with search APIs - /// - /// `is_match` is guaranteed to return `true` whenever `find` returns a - /// match. This includes searches that are executed entirely within a - /// codepoint: - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// Input, - /// }; - /// - /// let re = BoundedBacktracker::new("a*")?; - /// let mut cache = re.create_cache(); - /// - /// assert!(!re.try_is_match(&mut cache, Input::new("☃").span(1..2))?); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Notice that when UTF-8 mode is disabled, then the above reports a - /// match because the restriction against zero-width matches that split a - /// codepoint has been lifted: - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::{backtrack::BoundedBacktracker, NFA}, - /// Input, - /// }; - /// - /// let re = BoundedBacktracker::builder() - /// .thompson(NFA::config().utf8(false)) - /// .build("a*")?; - /// let mut cache = re.create_cache(); - /// - /// assert!(re.try_is_match(&mut cache, Input::new("☃").span(1..2))?); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn try_is_match<'h, I: Into>>( - &self, - cache: &mut Cache, - input: I, - ) -> Result { - let input = input.into().earliest(true); - self.try_search_slots(cache, &input, &mut []).map(|pid| pid.is_some()) - } - - /// Executes a leftmost forward search and returns a `Match` if one exists. - /// - /// This routine only includes the overall match span. To get - /// access to the individual spans of each capturing group, use - /// [`BoundedBacktracker::try_captures`]. - /// - /// # Errors - /// - /// This routine only errors if the search could not complete. For this - /// backtracking regex engine, this only occurs when the haystack length - /// exceeds [`BoundedBacktracker::max_haystack_len`]. - /// - /// When a search cannot complete, callers cannot know whether a match - /// exists or not. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// Match, - /// }; - /// - /// let re = BoundedBacktracker::new("foo[0-9]+")?; - /// let mut cache = re.create_cache(); - /// let expected = Match::must(0, 0..8); - /// assert_eq!(Some(expected), re.try_find(&mut cache, "foo12345")?); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn try_find<'h, I: Into>>( - &self, - cache: &mut Cache, - input: I, - ) -> Result, MatchError> { - let input = input.into(); - if self.get_nfa().pattern_len() == 1 { - let mut slots = [None, None]; - let pid = match self.try_search_slots(cache, &input, &mut slots)? { - None => return Ok(None), - Some(pid) => pid, - }; - let start = match slots[0] { - None => return Ok(None), - Some(s) => s.get(), - }; - let end = match slots[1] { - None => return Ok(None), - Some(s) => s.get(), - }; - return Ok(Some(Match::new(pid, Span { start, end }))); - } - let ginfo = self.get_nfa().group_info(); - let slots_len = ginfo.implicit_slot_len(); - let mut slots = vec![None; slots_len]; - let pid = match self.try_search_slots(cache, &input, &mut slots)? { - None => return Ok(None), - Some(pid) => pid, - }; - let start = match slots[pid.as_usize() * 2] { - None => return Ok(None), - Some(s) => s.get(), - }; - let end = match slots[pid.as_usize() * 2 + 1] { - None => return Ok(None), - Some(s) => s.get(), - }; - Ok(Some(Match::new(pid, Span { start, end }))) - } - - /// Executes a leftmost forward search and writes the spans of capturing - /// groups that participated in a match into the provided [`Captures`] - /// value. If no match was found, then [`Captures::is_match`] is guaranteed - /// to return `false`. - /// - /// # Errors - /// - /// This routine only errors if the search could not complete. For this - /// backtracking regex engine, this only occurs when the haystack length - /// exceeds [`BoundedBacktracker::max_haystack_len`]. - /// - /// When a search cannot complete, callers cannot know whether a match - /// exists or not. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// Span, - /// }; - /// - /// let re = BoundedBacktracker::new( - /// r"^([0-9]{4})-([0-9]{2})-([0-9]{2})$", - /// )?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// re.try_captures(&mut cache, "2010-03-14", &mut caps)?; - /// assert!(caps.is_match()); - /// assert_eq!(Some(Span::from(0..4)), caps.get_group(1)); - /// assert_eq!(Some(Span::from(5..7)), caps.get_group(2)); - /// assert_eq!(Some(Span::from(8..10)), caps.get_group(3)); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn try_captures<'h, I: Into>>( - &self, - cache: &mut Cache, - input: I, - caps: &mut Captures, - ) -> Result<(), MatchError> { - self.try_search(cache, &input.into(), caps) - } - - /// Returns an iterator over all non-overlapping leftmost matches in the - /// given bytes. If no match exists, then the iterator yields no elements. - /// - /// If the regex engine returns an error at any point, then the iterator - /// will yield that error. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// Match, MatchError, - /// }; - /// - /// let re = BoundedBacktracker::new("foo[0-9]+")?; - /// let mut cache = re.create_cache(); - /// - /// let text = "foo1 foo12 foo123"; - /// let result: Result, MatchError> = re - /// .try_find_iter(&mut cache, text) - /// .collect(); - /// let matches = result?; - /// assert_eq!(matches, vec![ - /// Match::must(0, 0..4), - /// Match::must(0, 5..10), - /// Match::must(0, 11..17), - /// ]); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn try_find_iter<'r, 'c, 'h, I: Into>>( - &'r self, - cache: &'c mut Cache, - input: I, - ) -> TryFindMatches<'r, 'c, 'h> { - let caps = Captures::matches(self.get_nfa().group_info().clone()); - let it = iter::Searcher::new(input.into()); - TryFindMatches { re: self, cache, caps, it } - } - - /// Returns an iterator over all non-overlapping `Captures` values. If no - /// match exists, then the iterator yields no elements. - /// - /// This yields the same matches as [`BoundedBacktracker::try_find_iter`], - /// but it includes the spans of all capturing groups that participate in - /// each match. - /// - /// If the regex engine returns an error at any point, then the iterator - /// will yield that error. - /// - /// **Tip:** See [`util::iter::Searcher`](crate::util::iter::Searcher) for - /// how to correctly iterate over all matches in a haystack while avoiding - /// the creation of a new `Captures` value for every match. (Which you are - /// forced to do with an `Iterator`.) - /// - /// # Example - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// Span, - /// }; - /// - /// let re = BoundedBacktracker::new("foo(?P[0-9]+)")?; - /// let mut cache = re.create_cache(); - /// - /// let text = "foo1 foo12 foo123"; - /// let mut spans = vec![]; - /// for result in re.try_captures_iter(&mut cache, text) { - /// let caps = result?; - /// // The unwrap is OK since 'numbers' matches if the pattern matches. - /// spans.push(caps.get_group_by_name("numbers").unwrap()); - /// } - /// assert_eq!(spans, vec![ - /// Span::from(3..4), - /// Span::from(8..10), - /// Span::from(14..17), - /// ]); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn try_captures_iter<'r, 'c, 'h, I: Into>>( - &'r self, - cache: &'c mut Cache, - input: I, - ) -> TryCapturesMatches<'r, 'c, 'h> { - let caps = self.create_captures(); - let it = iter::Searcher::new(input.into()); - TryCapturesMatches { re: self, cache, caps, it } - } -} - -impl BoundedBacktracker { - /// Executes a leftmost forward search and writes the spans of capturing - /// groups that participated in a match into the provided [`Captures`] - /// value. If no match was found, then [`Captures::is_match`] is guaranteed - /// to return `false`. - /// - /// This is like [`BoundedBacktracker::try_captures`], but it accepts a - /// concrete `&Input` instead of an `Into`. - /// - /// # Errors - /// - /// This routine only errors if the search could not complete. For this - /// backtracking regex engine, this only occurs when the haystack length - /// exceeds [`BoundedBacktracker::max_haystack_len`]. - /// - /// When a search cannot complete, callers cannot know whether a match - /// exists or not. - /// - /// # Example: specific pattern search - /// - /// This example shows how to build a multi bounded backtracker that - /// permits searching for specific patterns. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// Anchored, Input, Match, PatternID, - /// }; - /// - /// let re = BoundedBacktracker::new_many(&[ - /// "[a-z0-9]{6}", - /// "[a-z][a-z0-9]{5}", - /// ])?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let haystack = "foo123"; - /// - /// // Since we are using the default leftmost-first match and both - /// // patterns match at the same starting position, only the first pattern - /// // will be returned in this case when doing a search for any of the - /// // patterns. - /// let expected = Some(Match::must(0, 0..6)); - /// re.try_search(&mut cache, &Input::new(haystack), &mut caps)?; - /// assert_eq!(expected, caps.get_match()); - /// - /// // But if we want to check whether some other pattern matches, then we - /// // can provide its pattern ID. - /// let expected = Some(Match::must(1, 0..6)); - /// let input = Input::new(haystack) - /// .anchored(Anchored::Pattern(PatternID::must(1))); - /// re.try_search(&mut cache, &input, &mut caps)?; - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: specifying the bounds of a search - /// - /// This example shows how providing the bounds of a search can produce - /// different results than simply sub-slicing the haystack. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// Match, Input, - /// }; - /// - /// let re = BoundedBacktracker::new(r"\b[0-9]{3}\b")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let haystack = "foo123bar"; - /// - /// // Since we sub-slice the haystack, the search doesn't know about - /// // the larger context and assumes that `123` is surrounded by word - /// // boundaries. And of course, the match position is reported relative - /// // to the sub-slice as well, which means we get `0..3` instead of - /// // `3..6`. - /// let expected = Some(Match::must(0, 0..3)); - /// re.try_search(&mut cache, &Input::new(&haystack[3..6]), &mut caps)?; - /// assert_eq!(expected, caps.get_match()); - /// - /// // But if we provide the bounds of the search within the context of the - /// // entire haystack, then the search can take the surrounding context - /// // into account. (And if we did find a match, it would be reported - /// // as a valid offset into `haystack` instead of its sub-slice.) - /// let expected = None; - /// re.try_search( - /// &mut cache, &Input::new(haystack).range(3..6), &mut caps, - /// )?; - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn try_search( - &self, - cache: &mut Cache, - input: &Input<'_>, - caps: &mut Captures, - ) -> Result<(), MatchError> { - caps.set_pattern(None); - let pid = self.try_search_slots(cache, input, caps.slots_mut())?; - caps.set_pattern(pid); - Ok(()) - } - - /// Executes a leftmost forward search and writes the spans of capturing - /// groups that participated in a match into the provided `slots`, and - /// returns the matching pattern ID. The contents of the slots for patterns - /// other than the matching pattern are unspecified. If no match was found, - /// then `None` is returned and the contents of all `slots` is unspecified. - /// - /// This is like [`BoundedBacktracker::try_search`], but it accepts a raw - /// slots slice instead of a `Captures` value. This is useful in contexts - /// where you don't want or need to allocate a `Captures`. - /// - /// It is legal to pass _any_ number of slots to this routine. If the regex - /// engine would otherwise write a slot offset that doesn't fit in the - /// provided slice, then it is simply skipped. In general though, there are - /// usually three slice lengths you might want to use: - /// - /// * An empty slice, if you only care about which pattern matched. - /// * A slice with - /// [`pattern_len() * 2`](crate::nfa::thompson::NFA::pattern_len) - /// slots, if you only care about the overall match spans for each matching - /// pattern. - /// * A slice with - /// [`slot_len()`](crate::util::captures::GroupInfo::slot_len) slots, which - /// permits recording match offsets for every capturing group in every - /// pattern. - /// - /// # Errors - /// - /// This routine only errors if the search could not complete. For this - /// backtracking regex engine, this only occurs when the haystack length - /// exceeds [`BoundedBacktracker::max_haystack_len`]. - /// - /// When a search cannot complete, callers cannot know whether a match - /// exists or not. - /// - /// # Example - /// - /// This example shows how to find the overall match offsets in a - /// multi-pattern search without allocating a `Captures` value. Indeed, we - /// can put our slots right on the stack. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// PatternID, Input, - /// }; - /// - /// let re = BoundedBacktracker::new_many(&[ - /// r"\pL+", - /// r"\d+", - /// ])?; - /// let mut cache = re.create_cache(); - /// let input = Input::new("!@#123"); - /// - /// // We only care about the overall match offsets here, so we just - /// // allocate two slots for each pattern. Each slot records the start - /// // and end of the match. - /// let mut slots = [None; 4]; - /// let pid = re.try_search_slots(&mut cache, &input, &mut slots)?; - /// assert_eq!(Some(PatternID::must(1)), pid); - /// - /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'. - /// // See 'GroupInfo' for more details on the mapping between groups and - /// // slot indices. - /// let slot_start = pid.unwrap().as_usize() * 2; - /// let slot_end = slot_start + 1; - /// assert_eq!(Some(3), slots[slot_start].map(|s| s.get())); - /// assert_eq!(Some(6), slots[slot_end].map(|s| s.get())); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn try_search_slots( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Result, MatchError> { - let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); - if !utf8empty { - let maybe_hm = self.try_search_slots_imp(cache, input, slots)?; - return Ok(maybe_hm.map(|hm| hm.pattern())); - } - // See PikeVM::try_search_slots for why we do this. - let min = self.get_nfa().group_info().implicit_slot_len(); - if slots.len() >= min { - let maybe_hm = self.try_search_slots_imp(cache, input, slots)?; - return Ok(maybe_hm.map(|hm| hm.pattern())); - } - if self.get_nfa().pattern_len() == 1 { - let mut enough = [None, None]; - let got = self.try_search_slots_imp(cache, input, &mut enough)?; - // This is OK because we know `enough_slots` is strictly bigger - // than `slots`, otherwise this special case isn't reached. - slots.copy_from_slice(&enough[..slots.len()]); - return Ok(got.map(|hm| hm.pattern())); - } - let mut enough = vec![None; min]; - let got = self.try_search_slots_imp(cache, input, &mut enough)?; - // This is OK because we know `enough_slots` is strictly bigger than - // `slots`, otherwise this special case isn't reached. - slots.copy_from_slice(&enough[..slots.len()]); - Ok(got.map(|hm| hm.pattern())) - } - - /// This is the actual implementation of `try_search_slots_imp` that - /// doesn't account for the special case when 1) the NFA has UTF-8 mode - /// enabled, 2) the NFA can match the empty string and 3) the caller has - /// provided an insufficient number of slots to record match offsets. - #[inline(never)] - fn try_search_slots_imp( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Result, MatchError> { - let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); - let hm = match self.search_imp(cache, input, slots)? { - None => return Ok(None), - Some(hm) if !utf8empty => return Ok(Some(hm)), - Some(hm) => hm, - }; - empty::skip_splits_fwd(input, hm, hm.offset(), |input| { - Ok(self - .search_imp(cache, input, slots)? - .map(|hm| (hm, hm.offset()))) - }) - } - - /// The implementation of standard leftmost backtracking search. - /// - /// Capturing group spans are written to 'caps', but only if requested. - /// 'caps' can be one of three things: 1) totally empty, in which case, we - /// only report the pattern that matched or 2) only has slots for recording - /// the overall match offsets for any pattern or 3) has all slots available - /// for recording the spans of any groups participating in a match. - fn search_imp( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Result, MatchError> { - // Unlike in the PikeVM, we write our capturing group spans directly - // into the caller's captures groups. So we have to make sure we're - // starting with a blank slate first. In the PikeVM, we avoid this - // by construction: the spans that are copied to every slot in the - // 'Captures' value already account for presence/absence. In this - // backtracker, we write directly into the caller provided slots, where - // as in the PikeVM, we write into scratch space first and only copy - // them to the caller provided slots when a match is found. - for slot in slots.iter_mut() { - *slot = None; - } - cache.setup_search(&self, input)?; - if input.is_done() { - return Ok(None); - } - let (anchored, start_id) = match input.get_anchored() { - // Only way we're unanchored is if both the caller asked for an - // unanchored search *and* the pattern is itself not anchored. - Anchored::No => ( - self.nfa.is_always_start_anchored(), - // We always use the anchored starting state here, even if - // doing an unanchored search. The "unanchored" part of it is - // implemented in the loop below, by simply trying the next - // byte offset if the previous backtracking exploration failed. - self.nfa.start_anchored(), - ), - Anchored::Yes => (true, self.nfa.start_anchored()), - Anchored::Pattern(pid) => match self.nfa.start_pattern(pid) { - None => return Ok(None), - Some(sid) => (true, sid), - }, - }; - if anchored { - let at = input.start(); - return Ok(self.backtrack(cache, input, at, start_id, slots)); - } - let pre = self.get_config().get_prefilter(); - let mut at = input.start(); - while at <= input.end() { - if let Some(ref pre) = pre { - let span = Span::from(at..input.end()); - match pre.find(input.haystack(), span) { - None => break, - Some(ref span) => at = span.start, - } - } - if let Some(hm) = self.backtrack(cache, input, at, start_id, slots) - { - return Ok(Some(hm)); - } - at += 1; - } - Ok(None) - } - - /// Look for a match starting at `at` in `input` and write the matching - /// pattern ID and group spans to `caps`. The search uses `start_id` as its - /// starting state in the underlying NFA. - /// - /// If no match was found, then the caller should increment `at` and try - /// at the next position. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn backtrack( - &self, - cache: &mut Cache, - input: &Input<'_>, - at: usize, - start_id: StateID, - slots: &mut [Option], - ) -> Option { - cache.stack.push(Frame::Step { sid: start_id, at }); - while let Some(frame) = cache.stack.pop() { - match frame { - Frame::Step { sid, at } => { - if let Some(hm) = self.step(cache, input, sid, at, slots) { - return Some(hm); - } - } - Frame::RestoreCapture { slot, offset } => { - slots[slot] = offset; - } - } - } - None - } - - // LAMENTATION: The actual backtracking search is implemented in about - // 75 lines below. Yet this file is over 2,000 lines long. What have I - // done? - - /// Execute a "step" in the backtracing algorithm. - /// - /// A "step" is somewhat of a misnomer, because this routine keeps going - /// until it either runs out of things to try or fins a match. In the - /// former case, it may have pushed some things on to the backtracking - /// stack, in which case, those will be tried next as part of the - /// 'backtrack' routine above. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn step( - &self, - cache: &mut Cache, - input: &Input<'_>, - mut sid: StateID, - mut at: usize, - slots: &mut [Option], - ) -> Option { - loop { - if !cache.visited.insert(sid, at - input.start()) { - return None; - } - match *self.nfa.state(sid) { - State::ByteRange { ref trans } => { - // Why do we need this? Unlike other regex engines in this - // crate, the backtracker can steam roll ahead in the - // haystack outside of the main loop over the bytes in the - // haystack. While 'trans.matches()' below handles the case - // of 'at' being out of bounds of 'input.haystack()', we - // also need to handle the case of 'at' going out of bounds - // of the span the caller asked to search. - // - // We should perhaps make the 'trans.matches()' API accept - // an '&Input' instead of a '&[u8]'. Or at least, add a new - // API that does it. - if at >= input.end() { - return None; - } - if !trans.matches(input.haystack(), at) { - return None; - } - sid = trans.next; - at += 1; - } - State::Sparse(ref sparse) => { - if at >= input.end() { - return None; - } - sid = sparse.matches(input.haystack(), at)?; - at += 1; - } - State::Dense(ref dense) => { - if at >= input.end() { - return None; - } - sid = dense.matches(input.haystack(), at)?; - at += 1; - } - State::Look { look, next } => { - // OK because we don't permit building a searcher with a - // Unicode word boundary if the requisite Unicode data is - // unavailable. - if !self.nfa.look_matcher().matches_inline( - look, - input.haystack(), - at, - ) { - return None; - } - sid = next; - } - State::Union { ref alternates } => { - sid = match alternates.get(0) { - None => return None, - Some(&sid) => sid, - }; - cache.stack.extend( - alternates[1..] - .iter() - .copied() - .rev() - .map(|sid| Frame::Step { sid, at }), - ); - } - State::BinaryUnion { alt1, alt2 } => { - sid = alt1; - cache.stack.push(Frame::Step { sid: alt2, at }); - } - State::Capture { next, slot, .. } => { - if slot.as_usize() < slots.len() { - cache.stack.push(Frame::RestoreCapture { - slot, - offset: slots[slot], - }); - slots[slot] = NonMaxUsize::new(at); - } - sid = next; - } - State::Fail => return None, - State::Match { pattern_id } => { - return Some(HalfMatch::new(pattern_id, at)); - } - } - } - } -} - -/// An iterator over all non-overlapping matches for a fallible search. -/// -/// The iterator yields a `Result { - re: &'r BoundedBacktracker, - cache: &'c mut Cache, - caps: Captures, - it: iter::Searcher<'h>, -} - -impl<'r, 'c, 'h> Iterator for TryFindMatches<'r, 'c, 'h> { - type Item = Result; - - #[inline] - fn next(&mut self) -> Option> { - // Splitting 'self' apart seems necessary to appease borrowck. - let TryFindMatches { re, ref mut cache, ref mut caps, ref mut it } = - *self; - it.try_advance(|input| { - re.try_search(cache, input, caps)?; - Ok(caps.get_match()) - }) - .transpose() - } -} - -/// An iterator over all non-overlapping leftmost matches, with their capturing -/// groups, for a fallible search. -/// -/// The iterator yields a `Result` value until no more -/// matches could be found. -/// -/// The lifetime parameters are as follows: -/// -/// * `'r` represents the lifetime of the BoundedBacktracker. -/// * `'c` represents the lifetime of the BoundedBacktracker's cache. -/// * `'h` represents the lifetime of the haystack being searched. -/// -/// This iterator can be created with the -/// [`BoundedBacktracker::try_captures_iter`] method. -#[derive(Debug)] -pub struct TryCapturesMatches<'r, 'c, 'h> { - re: &'r BoundedBacktracker, - cache: &'c mut Cache, - caps: Captures, - it: iter::Searcher<'h>, -} - -impl<'r, 'c, 'h> Iterator for TryCapturesMatches<'r, 'c, 'h> { - type Item = Result; - - #[inline] - fn next(&mut self) -> Option> { - // Splitting 'self' apart seems necessary to appease borrowck. - let TryCapturesMatches { re, ref mut cache, ref mut caps, ref mut it } = - *self; - let _ = it - .try_advance(|input| { - re.try_search(cache, input, caps)?; - Ok(caps.get_match()) - }) - .transpose()?; - if caps.is_match() { - Some(Ok(caps.clone())) - } else { - None - } - } -} - -/// A cache represents mutable state that a [`BoundedBacktracker`] requires -/// during a search. -/// -/// For a given [`BoundedBacktracker`], its corresponding cache may be created -/// either via [`BoundedBacktracker::create_cache`], or via [`Cache::new`]. -/// They are equivalent in every way, except the former does not require -/// explicitly importing `Cache`. -/// -/// A particular `Cache` is coupled with the [`BoundedBacktracker`] from which -/// it was created. It may only be used with that `BoundedBacktracker`. A cache -/// and its allocations may be re-purposed via [`Cache::reset`], in which case, -/// it can only be used with the new `BoundedBacktracker` (and not the old -/// one). -#[derive(Clone, Debug)] -pub struct Cache { - /// Stack used on the heap for doing backtracking instead of the - /// traditional recursive approach. We don't want recursion because then - /// we're likely to hit a stack overflow for bigger regexes. - stack: Vec, - /// The set of (StateID, HaystackOffset) pairs that have been visited - /// by the backtracker within a single search. If such a pair has been - /// visited, then we avoid doing the work for that pair again. This is - /// what "bounds" the backtracking and prevents it from having worst case - /// exponential time. - visited: Visited, -} - -impl Cache { - /// Create a new [`BoundedBacktracker`] cache. - /// - /// A potentially more convenient routine to create a cache is - /// [`BoundedBacktracker::create_cache`], as it does not require also - /// importing the `Cache` type. - /// - /// If you want to reuse the returned `Cache` with some other - /// `BoundedBacktracker`, then you must call [`Cache::reset`] with the - /// desired `BoundedBacktracker`. - pub fn new(re: &BoundedBacktracker) -> Cache { - Cache { stack: vec![], visited: Visited::new(re) } - } - - /// Reset this cache such that it can be used for searching with different - /// [`BoundedBacktracker`]. - /// - /// A cache reset permits reusing memory already allocated in this cache - /// with a different `BoundedBacktracker`. - /// - /// # Example - /// - /// This shows how to re-purpose a cache for use with a different - /// `BoundedBacktracker`. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// nfa::thompson::backtrack::BoundedBacktracker, - /// Match, - /// }; - /// - /// let re1 = BoundedBacktracker::new(r"\w")?; - /// let re2 = BoundedBacktracker::new(r"\W")?; - /// - /// let mut cache = re1.create_cache(); - /// assert_eq!( - /// Some(Ok(Match::must(0, 0..2))), - /// re1.try_find_iter(&mut cache, "Δ").next(), - /// ); - /// - /// // Using 'cache' with re2 is not allowed. It may result in panics or - /// // incorrect results. In order to re-purpose the cache, we must reset - /// // it with the BoundedBacktracker we'd like to use it with. - /// // - /// // Similarly, after this reset, using the cache with 're1' is also not - /// // allowed. - /// cache.reset(&re2); - /// assert_eq!( - /// Some(Ok(Match::must(0, 0..3))), - /// re2.try_find_iter(&mut cache, "☃").next(), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn reset(&mut self, re: &BoundedBacktracker) { - self.visited.reset(re); - } - - /// Returns the heap memory usage, in bytes, of this cache. - /// - /// This does **not** include the stack size used up by this cache. To - /// compute that, use `std::mem::size_of::()`. - pub fn memory_usage(&self) -> usize { - self.stack.len() * core::mem::size_of::() - + self.visited.memory_usage() - } - - /// Clears this cache. This should be called at the start of every search - /// to ensure we start with a clean slate. - /// - /// This also sets the length of the capturing groups used in the current - /// search. This permits an optimization where by 'SlotTable::for_state' - /// only returns the number of slots equivalent to the number of slots - /// given in the 'Captures' value. This may be less than the total number - /// of possible slots, e.g., when one only wants to track overall match - /// offsets. This in turn permits less copying of capturing group spans - /// in the BoundedBacktracker. - fn setup_search( - &mut self, - re: &BoundedBacktracker, - input: &Input<'_>, - ) -> Result<(), MatchError> { - self.stack.clear(); - self.visited.setup_search(re, input)?; - Ok(()) - } -} - -/// Represents a stack frame on the heap while doing backtracking. -/// -/// Instead of using explicit recursion for backtracking, we use a stack on -/// the heap to keep track of things that we want to explore if the current -/// backtracking branch turns out to not lead to a match. -#[derive(Clone, Debug)] -enum Frame { - /// Look for a match starting at `sid` and the given position in the - /// haystack. - Step { sid: StateID, at: usize }, - /// Reset the given `slot` to the given `offset` (which might be `None`). - /// This effectively gives a "scope" to capturing groups, such that an - /// offset for a particular group only gets returned if the match goes - /// through that capturing group. If backtracking ends up going down a - /// different branch that results in a different offset (or perhaps none at - /// all), then this "restore capture" frame will cause the offset to get - /// reset. - RestoreCapture { slot: SmallIndex, offset: Option }, -} - -/// A bitset that keeps track of whether a particular (StateID, offset) has -/// been considered during backtracking. If it has already been visited, then -/// backtracking skips it. This is what gives backtracking its "bound." -#[derive(Clone, Debug)] -struct Visited { - /// The actual underlying bitset. Each element in the bitset corresponds - /// to a particular (StateID, offset) pair. States correspond to the rows - /// and the offsets correspond to the columns. - /// - /// If our underlying NFA has N states and the haystack we're searching - /// has M bytes, then we have N*(M+1) entries in our bitset table. The - /// M+1 occurs because our matches are delayed by one byte (to support - /// look-around), and so we need to handle the end position itself rather - /// than stopping just before the end. (If there is no end position, then - /// it's treated as "end-of-input," which is matched by things like '$'.) - /// - /// Given BITS=N*(M+1), we wind up with div_ceil(BITS, sizeof(usize)) - /// blocks. - /// - /// We use 'usize' to represent our blocks because it makes some of the - /// arithmetic in 'insert' a bit nicer. For example, if we used 'u32' for - /// our block, we'd either need to cast u32s to usizes or usizes to u32s. - bitset: Vec, - /// The stride represents one plus length of the haystack we're searching - /// (as described above). The stride must be initialized for each search. - stride: usize, -} - -impl Visited { - /// The size of each block, in bits. - const BLOCK_SIZE: usize = 8 * core::mem::size_of::(); - - /// Create a new visited set for the given backtracker. - /// - /// The set is ready to use, but must be setup at the beginning of each - /// search by calling `setup_search`. - fn new(re: &BoundedBacktracker) -> Visited { - let mut visited = Visited { bitset: vec![], stride: 0 }; - visited.reset(re); - visited - } - - /// Insert the given (StateID, offset) pair into this set. If it already - /// exists, then this is a no-op and it returns false. Otherwise this - /// returns true. - fn insert(&mut self, sid: StateID, at: usize) -> bool { - let table_index = sid.as_usize() * self.stride + at; - let block_index = table_index / Visited::BLOCK_SIZE; - let bit = table_index % Visited::BLOCK_SIZE; - let block_with_bit = 1 << bit; - if self.bitset[block_index] & block_with_bit != 0 { - return false; - } - self.bitset[block_index] |= block_with_bit; - true - } - - /// Reset this visited set to work with the given bounded backtracker. - fn reset(&mut self, _: &BoundedBacktracker) { - self.bitset.truncate(0); - } - - /// Setup this visited set to work for a search using the given NFA - /// and input configuration. The NFA must be the same NFA used by the - /// BoundedBacktracker given to Visited::reset. Failing to call this might - /// result in panics or silently incorrect search behavior. - fn setup_search( - &mut self, - re: &BoundedBacktracker, - input: &Input<'_>, - ) -> Result<(), MatchError> { - // Our haystack length is only the length of the span of the entire - // haystack that we'll be searching. - let haylen = input.get_span().len(); - let err = || MatchError::haystack_too_long(haylen); - // Our stride is one more than the length of the input because our main - // search loop includes the position at input.end(). (And it does this - // because matches are delayed by one byte to account for look-around.) - self.stride = haylen + 1; - let needed_capacity = - match re.get_nfa().states().len().checked_mul(self.stride) { - None => return Err(err()), - Some(capacity) => capacity, - }; - let max_capacity = 8 * re.get_config().get_visited_capacity(); - if needed_capacity > max_capacity { - return Err(err()); - } - let needed_blocks = div_ceil(needed_capacity, Visited::BLOCK_SIZE); - self.bitset.truncate(needed_blocks); - for block in self.bitset.iter_mut() { - *block = 0; - } - if needed_blocks > self.bitset.len() { - self.bitset.resize(needed_blocks, 0); - } - Ok(()) - } - - /// Return the heap memory usage, in bytes, of this visited set. - fn memory_usage(&self) -> usize { - self.bitset.len() * core::mem::size_of::() - } -} - -/// Integer division, but rounds up instead of down. -fn div_ceil(lhs: usize, rhs: usize) -> usize { - if lhs % rhs == 0 { - lhs / rhs - } else { - (lhs / rhs) + 1 - } -} - -#[cfg(test)] -mod tests { - use super::*; - - // This is a regression test for the maximum haystack length computation. - // Previously, it assumed that the total capacity of the backtracker's - // bitset would always be greater than the number of NFA states. But there - // is of course no guarantee that this is true. This regression test - // ensures that not only does `max_haystack_len` not panic, but that it - // should return `0`. - #[cfg(feature = "syntax")] - #[test] - fn max_haystack_len_overflow() { - let re = BoundedBacktracker::builder() - .configure(BoundedBacktracker::config().visited_capacity(10)) - .build(r"[0-9A-Za-z]{100}") - .unwrap(); - assert_eq!(0, re.max_haystack_len()); - } -} diff --git a/vendor/regex-automata/src/nfa/thompson/builder.rs b/vendor/regex-automata/src/nfa/thompson/builder.rs deleted file mode 100644 index 6b69e8784ded8a..00000000000000 --- a/vendor/regex-automata/src/nfa/thompson/builder.rs +++ /dev/null @@ -1,1337 +0,0 @@ -use core::mem; - -use alloc::{sync::Arc, vec, vec::Vec}; - -use crate::{ - nfa::thompson::{ - error::BuildError, - nfa::{self, SparseTransitions, Transition, NFA}, - }, - util::{ - look::{Look, LookMatcher}, - primitives::{IteratorIndexExt, PatternID, SmallIndex, StateID}, - }, -}; - -/// An intermediate NFA state used during construction. -/// -/// During construction of an NFA, it is often convenient to work with states -/// that are amenable to mutation and other carry more information than we -/// otherwise need once an NFA has been built. This type represents those -/// needs. -/// -/// Once construction is finished, the builder will convert these states to a -/// [`nfa::thompson::State`](crate::nfa::thompson::State). This conversion not -/// only results in a simpler representation, but in some cases, entire classes -/// of states are completely removed (such as [`State::Empty`]). -#[derive(Clone, Debug, Eq, PartialEq)] -enum State { - /// An empty state whose only purpose is to forward the automaton to - /// another state via an unconditional epsilon transition. - /// - /// Unconditional epsilon transitions are quite useful during the - /// construction of an NFA, as they permit the insertion of no-op - /// placeholders that make it easier to compose NFA sub-graphs. When - /// the Thompson NFA builder produces a final NFA, all unconditional - /// epsilon transitions are removed, and state identifiers are remapped - /// accordingly. - Empty { - /// The next state that this state should transition to. - next: StateID, - }, - /// A state that only transitions to another state if the current input - /// byte is in a particular range of bytes. - ByteRange { trans: Transition }, - /// A state with possibly many transitions, represented in a sparse - /// fashion. Transitions must be ordered lexicographically by input range - /// and be non-overlapping. As such, this may only be used when every - /// transition has equal priority. (In practice, this is only used for - /// encoding large UTF-8 automata.) In contrast, a `Union` state has each - /// alternate in order of priority. Priority is used to implement greedy - /// matching and also alternations themselves, e.g., `abc|a` where `abc` - /// has priority over `a`. - /// - /// To clarify, it is possible to remove `Sparse` and represent all things - /// that `Sparse` is used for via `Union`. But this creates a more bloated - /// NFA with more epsilon transitions than is necessary in the special case - /// of character classes. - Sparse { transitions: Vec }, - /// A conditional epsilon transition satisfied via some sort of - /// look-around. - Look { look: Look, next: StateID }, - /// An empty state that records the start of a capture location. This is an - /// unconditional epsilon transition like `Empty`, except it can be used to - /// record position information for a capture group when using the NFA for - /// search. - CaptureStart { - /// The ID of the pattern that this capture was defined. - pattern_id: PatternID, - /// The capture group index that this capture state corresponds to. - /// The capture group index is always relative to its corresponding - /// pattern. Therefore, in the presence of multiple patterns, both the - /// pattern ID and the capture group index are required to uniquely - /// identify a capturing group. - group_index: SmallIndex, - /// The next state that this state should transition to. - next: StateID, - }, - /// An empty state that records the end of a capture location. This is an - /// unconditional epsilon transition like `Empty`, except it can be used to - /// record position information for a capture group when using the NFA for - /// search. - CaptureEnd { - /// The ID of the pattern that this capture was defined. - pattern_id: PatternID, - /// The capture group index that this capture state corresponds to. - /// The capture group index is always relative to its corresponding - /// pattern. Therefore, in the presence of multiple patterns, both the - /// pattern ID and the capture group index are required to uniquely - /// identify a capturing group. - group_index: SmallIndex, - /// The next state that this state should transition to. - next: StateID, - }, - /// An alternation such that there exists an epsilon transition to all - /// states in `alternates`, where matches found via earlier transitions - /// are preferred over later transitions. - Union { alternates: Vec }, - /// An alternation such that there exists an epsilon transition to all - /// states in `alternates`, where matches found via later transitions are - /// preferred over earlier transitions. - /// - /// This "reverse" state exists for convenience during compilation that - /// permits easy construction of non-greedy combinations of NFA states. At - /// the end of compilation, Union and UnionReverse states are merged into - /// one Union type of state, where the latter has its epsilon transitions - /// reversed to reflect the priority inversion. - /// - /// The "convenience" here arises from the fact that as new states are - /// added to the list of `alternates`, we would like that add operation - /// to be amortized constant time. But if we used a `Union`, we'd need to - /// prepend the state, which takes O(n) time. There are other approaches we - /// could use to solve this, but this seems simple enough. - UnionReverse { alternates: Vec }, - /// A state that cannot be transitioned out of. This is useful for cases - /// where you want to prevent matching from occurring. For example, if your - /// regex parser permits empty character classes, then one could choose a - /// `Fail` state to represent it. - Fail, - /// A match state. There is at most one such occurrence of this state in - /// an NFA for each pattern compiled into the NFA. At time of writing, a - /// match state is always produced for every pattern given, but in theory, - /// if a pattern can never lead to a match, then the match state could be - /// omitted. - /// - /// `pattern_id` refers to the ID of the pattern itself, which corresponds - /// to the pattern's index (starting at 0). - Match { pattern_id: PatternID }, -} - -impl State { - /// If this state is an unconditional epsilon transition, then this returns - /// the target of the transition. - fn goto(&self) -> Option { - match *self { - State::Empty { next } => Some(next), - State::Union { ref alternates } if alternates.len() == 1 => { - Some(alternates[0]) - } - State::UnionReverse { ref alternates } - if alternates.len() == 1 => - { - Some(alternates[0]) - } - _ => None, - } - } - - /// Returns the heap memory usage, in bytes, of this state. - fn memory_usage(&self) -> usize { - match *self { - State::Empty { .. } - | State::ByteRange { .. } - | State::Look { .. } - | State::CaptureStart { .. } - | State::CaptureEnd { .. } - | State::Fail - | State::Match { .. } => 0, - State::Sparse { ref transitions } => { - transitions.len() * mem::size_of::() - } - State::Union { ref alternates } => { - alternates.len() * mem::size_of::() - } - State::UnionReverse { ref alternates } => { - alternates.len() * mem::size_of::() - } - } - } -} - -/// An abstraction for building Thompson NFAs by hand. -/// -/// A builder is what a [`thompson::Compiler`](crate::nfa::thompson::Compiler) -/// uses internally to translate a regex's high-level intermediate -/// representation into an [`NFA`]. -/// -/// The primary function of this builder is to abstract away the internal -/// representation of an NFA and make it difficult to produce NFAs are that -/// internally invalid or inconsistent. This builder also provides a way to -/// add "empty" states (which can be thought of as unconditional epsilon -/// transitions), despite the fact that [`thompson::State`](nfa::State) does -/// not have any "empty" representation. The advantage of "empty" states is -/// that they make the code for constructing a Thompson NFA logically simpler. -/// -/// Many of the routines on this builder may panic or return errors. Generally -/// speaking, panics occur when an invalid sequence of method calls were made, -/// where as an error occurs if things get too big. (Where "too big" might mean -/// exhausting identifier space or using up too much heap memory in accordance -/// with the configured [`size_limit`](Builder::set_size_limit).) -/// -/// # Overview -/// -/// ## Adding multiple patterns -/// -/// Each pattern you add to an NFA should correspond to a pair of -/// [`Builder::start_pattern`] and [`Builder::finish_pattern`] calls, with -/// calls inbetween that add NFA states for that pattern. NFA states may be -/// added without first calling `start_pattern`, with the exception of adding -/// capturing states. -/// -/// ## Adding NFA states -/// -/// Here is a very brief overview of each of the methods that add NFA states. -/// Every method adds a single state. -/// -/// * [`add_empty`](Builder::add_empty): Add a state with a single -/// unconditional epsilon transition to another state. -/// * [`add_union`](Builder::add_union): Adds a state with unconditional -/// epsilon transitions to two or more states, with earlier transitions -/// preferred over later ones. -/// * [`add_union_reverse`](Builder::add_union_reverse): Adds a state with -/// unconditional epsilon transitions to two or more states, with later -/// transitions preferred over earlier ones. -/// * [`add_range`](Builder::add_range): Adds a state with a single transition -/// to another state that can only be followed if the current input byte is -/// within the range given. -/// * [`add_sparse`](Builder::add_sparse): Adds a state with two or more -/// range transitions to other states, where a transition is only followed -/// if the current input byte is within one of the ranges. All transitions -/// in this state have equal priority, and the corresponding ranges must be -/// non-overlapping. -/// * [`add_look`](Builder::add_look): Adds a state with a single *conditional* -/// epsilon transition to another state, where the condition depends on a -/// limited look-around property. -/// * [`add_capture_start`](Builder::add_capture_start): Adds a state with -/// a single unconditional epsilon transition that also instructs an NFA -/// simulation to record the current input position to a specific location in -/// memory. This is intended to represent the starting location of a capturing -/// group. -/// * [`add_capture_end`](Builder::add_capture_end): Adds a state with -/// a single unconditional epsilon transition that also instructs an NFA -/// simulation to record the current input position to a specific location in -/// memory. This is intended to represent the ending location of a capturing -/// group. -/// * [`add_fail`](Builder::add_fail): Adds a state that never transitions to -/// another state. -/// * [`add_match`](Builder::add_match): Add a state that indicates a match has -/// been found for a particular pattern. A match state is a final state with -/// no outgoing transitions. -/// -/// ## Setting transitions between NFA states -/// -/// The [`Builder::patch`] method creates a transition from one state to the -/// next. If the `from` state corresponds to a state that supports multiple -/// outgoing transitions (such as "union"), then this adds the corresponding -/// transition. Otherwise, it sets the single transition. (This routine panics -/// if `from` corresponds to a state added by `add_sparse`, since sparse states -/// need more specialized handling.) -/// -/// # Example -/// -/// This annotated example shows how to hand construct the regex `[a-z]+` -/// (without an unanchored prefix). -/// -/// ``` -/// use regex_automata::{ -/// nfa::thompson::{pikevm::PikeVM, Builder, Transition}, -/// util::primitives::StateID, -/// Match, -/// }; -/// -/// let mut builder = Builder::new(); -/// // Before adding NFA states for our pattern, we need to tell the builder -/// // that we are starting the pattern. -/// builder.start_pattern()?; -/// // Since we use the Pike VM below for searching, we need to add capturing -/// // states. If you're just going to build a DFA from the NFA, then capturing -/// // states do not need to be added. -/// let start = builder.add_capture_start(StateID::ZERO, 0, None)?; -/// let range = builder.add_range(Transition { -/// // We don't know the state ID of the 'next' state yet, so we just fill -/// // in a dummy 'ZERO' value. -/// start: b'a', end: b'z', next: StateID::ZERO, -/// })?; -/// // This state will point back to 'range', but also enable us to move ahead. -/// // That is, this implements the '+' repetition operator. We add 'range' and -/// // then 'end' below to this alternation. -/// let alt = builder.add_union(vec![])?; -/// // The final state before the match state, which serves to capture the -/// // end location of the match. -/// let end = builder.add_capture_end(StateID::ZERO, 0)?; -/// // The match state for our pattern. -/// let mat = builder.add_match()?; -/// // Now we fill in the transitions between states. -/// builder.patch(start, range)?; -/// builder.patch(range, alt)?; -/// // If we added 'end' before 'range', then we'd implement non-greedy -/// // matching, i.e., '+?'. -/// builder.patch(alt, range)?; -/// builder.patch(alt, end)?; -/// builder.patch(end, mat)?; -/// // We must explicitly finish pattern and provide the starting state ID for -/// // this particular pattern. -/// builder.finish_pattern(start)?; -/// // Finally, when we build the NFA, we provide the anchored and unanchored -/// // starting state IDs. Since we didn't bother with an unanchored prefix -/// // here, we only support anchored searching. Thus, both starting states are -/// // the same. -/// let nfa = builder.build(start, start)?; -/// -/// // Now build a Pike VM from our NFA, and use it for searching. This shows -/// // how we can use a regex engine without ever worrying about syntax! -/// let re = PikeVM::new_from_nfa(nfa)?; -/// let mut cache = re.create_cache(); -/// let mut caps = re.create_captures(); -/// let expected = Some(Match::must(0, 0..3)); -/// re.captures(&mut cache, "foo0", &mut caps); -/// assert_eq!(expected, caps.get_match()); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug, Default)] -pub struct Builder { - /// The ID of the pattern that we're currently building. - /// - /// Callers are required to set (and unset) this by calling - /// {start,finish}_pattern. Otherwise, most methods will panic. - pattern_id: Option, - /// A sequence of intermediate NFA states. Once a state is added to this - /// sequence, it is assigned a state ID equivalent to its index. Once a - /// state is added, it is still expected to be mutated, e.g., to set its - /// transition to a state that didn't exist at the time it was added. - states: Vec, - /// The starting states for each individual pattern. Starting at any - /// of these states will result in only an anchored search for the - /// corresponding pattern. The vec is indexed by pattern ID. When the NFA - /// contains a single regex, then `start_pattern[0]` and `start_anchored` - /// are always equivalent. - start_pattern: Vec, - /// A map from pattern ID to capture group index to name. (If no name - /// exists, then a None entry is present. Thus, all capturing groups are - /// present in this mapping.) - /// - /// The outer vec is indexed by pattern ID, while the inner vec is indexed - /// by capture index offset for the corresponding pattern. - /// - /// The first capture group for each pattern is always unnamed and is thus - /// always None. - captures: Vec>>>, - /// The combined memory used by each of the 'State's in 'states'. This - /// only includes heap usage by each state, and not the size of the state - /// itself. In other words, this tracks heap memory used that isn't - /// captured via `size_of::() * states.len()`. - memory_states: usize, - /// Whether this NFA only matches UTF-8 and whether regex engines using - /// this NFA for searching should report empty matches that split a - /// codepoint. - utf8: bool, - /// Whether this NFA should be matched in reverse or not. - reverse: bool, - /// The matcher to use for look-around assertions. - look_matcher: LookMatcher, - /// A size limit to respect when building an NFA. If the total heap memory - /// of the intermediate NFA states exceeds (or would exceed) this amount, - /// then an error is returned. - size_limit: Option, -} - -impl Builder { - /// Create a new builder for hand-assembling NFAs. - pub fn new() -> Builder { - Builder::default() - } - - /// Clear this builder. - /// - /// Clearing removes all state associated with building an NFA, but does - /// not reset configuration (such as size limits and whether the NFA - /// should only match UTF-8). After clearing, the builder can be reused to - /// assemble an entirely new NFA. - pub fn clear(&mut self) { - self.pattern_id = None; - self.states.clear(); - self.start_pattern.clear(); - self.captures.clear(); - self.memory_states = 0; - } - - /// Assemble a [`NFA`] from the states added so far. - /// - /// After building an NFA, more states may be added and `build` may be - /// called again. To reuse a builder to produce an entirely new NFA from - /// scratch, call the [`clear`](Builder::clear) method first. - /// - /// `start_anchored` refers to the ID of the starting state that anchored - /// searches should use. That is, searches who matches are limited to the - /// starting position of the search. - /// - /// `start_unanchored` refers to the ID of the starting state that - /// unanchored searches should use. This permits searches to report matches - /// that start after the beginning of the search. In cases where unanchored - /// searches are not supported, the unanchored starting state ID must be - /// the same as the anchored starting state ID. - /// - /// # Errors - /// - /// This returns an error if there was a problem producing the final NFA. - /// In particular, this might include an error if the capturing groups - /// added to this builder violate any of the invariants documented on - /// [`GroupInfo`](crate::util::captures::GroupInfo). - /// - /// # Panics - /// - /// If `start_pattern` was called, then `finish_pattern` must be called - /// before `build`, otherwise this panics. - /// - /// This may panic for other invalid uses of a builder. For example, if - /// a "start capture" state was added without a corresponding "end capture" - /// state. - pub fn build( - &self, - start_anchored: StateID, - start_unanchored: StateID, - ) -> Result { - assert!(self.pattern_id.is_none(), "must call 'finish_pattern' first"); - debug!( - "intermediate NFA compilation via builder is complete, \ - intermediate NFA size: {} states, {} bytes on heap", - self.states.len(), - self.memory_usage(), - ); - - let mut nfa = nfa::Inner::default(); - nfa.set_utf8(self.utf8); - nfa.set_reverse(self.reverse); - nfa.set_look_matcher(self.look_matcher.clone()); - // A set of compiler internal state IDs that correspond to states - // that are exclusively epsilon transitions, i.e., goto instructions, - // combined with the state that they point to. This is used to - // record said states while transforming the compiler's internal NFA - // representation to the external form. - let mut empties = vec![]; - // A map used to re-map state IDs when translating this builder's - // internal NFA state representation to the final NFA representation. - let mut remap = vec![]; - remap.resize(self.states.len(), StateID::ZERO); - - nfa.set_starts(start_anchored, start_unanchored, &self.start_pattern); - nfa.set_captures(&self.captures).map_err(BuildError::captures)?; - // The idea here is to convert our intermediate states to their final - // form. The only real complexity here is the process of converting - // transitions, which are expressed in terms of state IDs. The new - // set of states will be smaller because of partial epsilon removal, - // so the state IDs will not be the same. - for (sid, state) in self.states.iter().with_state_ids() { - match *state { - State::Empty { next } => { - // Since we're removing empty states, we need to handle - // them later since we don't yet know which new state this - // empty state will be mapped to. - empties.push((sid, next)); - } - State::ByteRange { trans } => { - remap[sid] = nfa.add(nfa::State::ByteRange { trans }); - } - State::Sparse { ref transitions } => { - remap[sid] = match transitions.len() { - 0 => nfa.add(nfa::State::Fail), - 1 => nfa.add(nfa::State::ByteRange { - trans: transitions[0], - }), - _ => { - let transitions = - transitions.to_vec().into_boxed_slice(); - let sparse = SparseTransitions { transitions }; - nfa.add(nfa::State::Sparse(sparse)) - } - } - } - State::Look { look, next } => { - remap[sid] = nfa.add(nfa::State::Look { look, next }); - } - State::CaptureStart { pattern_id, group_index, next } => { - // We can't remove this empty state because of the side - // effect of capturing an offset for this capture slot. - let slot = nfa - .group_info() - .slot(pattern_id, group_index.as_usize()) - .expect("invalid capture index"); - let slot = - SmallIndex::new(slot).expect("a small enough slot"); - remap[sid] = nfa.add(nfa::State::Capture { - next, - pattern_id, - group_index, - slot, - }); - } - State::CaptureEnd { pattern_id, group_index, next } => { - // We can't remove this empty state because of the side - // effect of capturing an offset for this capture slot. - // Also, this always succeeds because we check that all - // slot indices are valid for all capture indices when they - // are initially added. - let slot = nfa - .group_info() - .slot(pattern_id, group_index.as_usize()) - .expect("invalid capture index") - .checked_add(1) - .unwrap(); - let slot = - SmallIndex::new(slot).expect("a small enough slot"); - remap[sid] = nfa.add(nfa::State::Capture { - next, - pattern_id, - group_index, - slot, - }); - } - State::Union { ref alternates } => { - if alternates.is_empty() { - remap[sid] = nfa.add(nfa::State::Fail); - } else if alternates.len() == 1 { - empties.push((sid, alternates[0])); - remap[sid] = alternates[0]; - } else if alternates.len() == 2 { - remap[sid] = nfa.add(nfa::State::BinaryUnion { - alt1: alternates[0], - alt2: alternates[1], - }); - } else { - let alternates = - alternates.to_vec().into_boxed_slice(); - remap[sid] = nfa.add(nfa::State::Union { alternates }); - } - } - State::UnionReverse { ref alternates } => { - if alternates.is_empty() { - remap[sid] = nfa.add(nfa::State::Fail); - } else if alternates.len() == 1 { - empties.push((sid, alternates[0])); - remap[sid] = alternates[0]; - } else if alternates.len() == 2 { - remap[sid] = nfa.add(nfa::State::BinaryUnion { - alt1: alternates[1], - alt2: alternates[0], - }); - } else { - let mut alternates = - alternates.to_vec().into_boxed_slice(); - alternates.reverse(); - remap[sid] = nfa.add(nfa::State::Union { alternates }); - } - } - State::Fail => { - remap[sid] = nfa.add(nfa::State::Fail); - } - State::Match { pattern_id } => { - remap[sid] = nfa.add(nfa::State::Match { pattern_id }); - } - } - } - // Some of the new states still point to empty state IDs, so we need to - // follow each of them and remap the empty state IDs to their non-empty - // state IDs. - // - // We also keep track of which states we've already mapped. This helps - // avoid quadratic behavior in a long chain of empty states. For - // example, in 'a{0}{50000}'. - let mut remapped = vec![false; self.states.len()]; - for &(empty_id, empty_next) in empties.iter() { - if remapped[empty_id] { - continue; - } - // empty states can point to other empty states, forming a chain. - // So we must follow the chain until the end, which must end at - // a non-empty state, and therefore, a state that is correctly - // remapped. We are guaranteed to terminate because our compiler - // never builds a loop among only empty states. - let mut new_next = empty_next; - while let Some(next) = self.states[new_next].goto() { - new_next = next; - } - remap[empty_id] = remap[new_next]; - remapped[empty_id] = true; - - // Now that we've remapped the main 'empty_id' above, we re-follow - // the chain from above and remap every empty state we found along - // the way to our ultimate non-empty target. We are careful to set - // 'remapped' to true for each such state. We thus will not need - // to re-compute this chain for any subsequent empty states in - // 'empties' that are part of this chain. - let mut next2 = empty_next; - while let Some(next) = self.states[next2].goto() { - remap[next2] = remap[new_next]; - remapped[next2] = true; - next2 = next; - } - } - // Finally remap all of the state IDs. - nfa.remap(&remap); - let final_nfa = nfa.into_nfa(); - debug!( - "NFA compilation via builder complete, \ - final NFA size: {} states, {} bytes on heap, \ - has empty? {:?}, utf8? {:?}", - final_nfa.states().len(), - final_nfa.memory_usage(), - final_nfa.has_empty(), - final_nfa.is_utf8(), - ); - Ok(final_nfa) - } - - /// Start the assembly of a pattern in this NFA. - /// - /// Upon success, this returns the identifier for the new pattern. - /// Identifiers start at `0` and are incremented by 1 for each new pattern. - /// - /// It is necessary to call this routine before adding capturing states. - /// Otherwise, any other NFA state may be added before starting a pattern. - /// - /// # Errors - /// - /// If the pattern identifier space is exhausted, then this returns an - /// error. - /// - /// # Panics - /// - /// If this is called while assembling another pattern (i.e., before - /// `finish_pattern` is called), then this panics. - pub fn start_pattern(&mut self) -> Result { - assert!(self.pattern_id.is_none(), "must call 'finish_pattern' first"); - - let proposed = self.start_pattern.len(); - let pid = PatternID::new(proposed) - .map_err(|_| BuildError::too_many_patterns(proposed))?; - self.pattern_id = Some(pid); - // This gets filled in when 'finish_pattern' is called. - self.start_pattern.push(StateID::ZERO); - Ok(pid) - } - - /// Finish the assembly of a pattern in this NFA. - /// - /// Upon success, this returns the identifier for the new pattern. - /// Identifiers start at `0` and are incremented by 1 for each new - /// pattern. This is the same identifier returned by the corresponding - /// `start_pattern` call. - /// - /// Note that `start_pattern` and `finish_pattern` pairs cannot be - /// interleaved or nested. A correct `finish_pattern` call _always_ - /// corresponds to the most recently called `start_pattern` routine. - /// - /// # Errors - /// - /// This currently never returns an error, but this is subject to change. - /// - /// # Panics - /// - /// If this is called without a corresponding `start_pattern` call, then - /// this panics. - pub fn finish_pattern( - &mut self, - start_id: StateID, - ) -> Result { - let pid = self.current_pattern_id(); - self.start_pattern[pid] = start_id; - self.pattern_id = None; - Ok(pid) - } - - /// Returns the pattern identifier of the current pattern. - /// - /// # Panics - /// - /// If this doesn't occur after a `start_pattern` call and before the - /// corresponding `finish_pattern` call, then this panics. - pub fn current_pattern_id(&self) -> PatternID { - self.pattern_id.expect("must call 'start_pattern' first") - } - - /// Returns the number of patterns added to this builder so far. - /// - /// This only includes patterns that have had `finish_pattern` called - /// for them. - pub fn pattern_len(&self) -> usize { - self.start_pattern.len() - } - - /// Add an "empty" NFA state. - /// - /// An "empty" NFA state is a state with a single unconditional epsilon - /// transition to another NFA state. Such empty states are removed before - /// building the final [`NFA`] (which has no such "empty" states), but they - /// can be quite useful in the construction process of an NFA. - /// - /// # Errors - /// - /// This returns an error if the state identifier space is exhausted, or if - /// the configured heap size limit has been exceeded. - pub fn add_empty(&mut self) -> Result { - self.add(State::Empty { next: StateID::ZERO }) - } - - /// Add a "union" NFA state. - /// - /// A "union" NFA state that contains zero or more unconditional epsilon - /// transitions to other NFA states. The order of these transitions - /// reflects a priority order where earlier transitions are preferred over - /// later transitions. - /// - /// Callers may provide an empty set of alternates to this method call, and - /// then later add transitions via `patch`. At final build time, a "union" - /// state with no alternates is converted to a "fail" state, and a "union" - /// state with exactly one alternate is treated as if it were an "empty" - /// state. - /// - /// # Errors - /// - /// This returns an error if the state identifier space is exhausted, or if - /// the configured heap size limit has been exceeded. - pub fn add_union( - &mut self, - alternates: Vec, - ) -> Result { - self.add(State::Union { alternates }) - } - - /// Add a "reverse union" NFA state. - /// - /// A "reverse union" NFA state contains zero or more unconditional epsilon - /// transitions to other NFA states. The order of these transitions - /// reflects a priority order where later transitions are preferred - /// over earlier transitions. This is an inverted priority order when - /// compared to `add_union`. This is useful, for example, for implementing - /// non-greedy repetition operators. - /// - /// Callers may provide an empty set of alternates to this method call, and - /// then later add transitions via `patch`. At final build time, a "reverse - /// union" state with no alternates is converted to a "fail" state, and a - /// "reverse union" state with exactly one alternate is treated as if it - /// were an "empty" state. - /// - /// # Errors - /// - /// This returns an error if the state identifier space is exhausted, or if - /// the configured heap size limit has been exceeded. - pub fn add_union_reverse( - &mut self, - alternates: Vec, - ) -> Result { - self.add(State::UnionReverse { alternates }) - } - - /// Add a "range" NFA state. - /// - /// A "range" NFA state is a state with one outgoing transition to another - /// state, where that transition may only be followed if the current input - /// byte falls between a range of bytes given. - /// - /// # Errors - /// - /// This returns an error if the state identifier space is exhausted, or if - /// the configured heap size limit has been exceeded. - pub fn add_range( - &mut self, - trans: Transition, - ) -> Result { - self.add(State::ByteRange { trans }) - } - - /// Add a "sparse" NFA state. - /// - /// A "sparse" NFA state contains zero or more outgoing transitions, where - /// the transition to be followed (if any) is chosen based on whether the - /// current input byte falls in the range of one such transition. The - /// transitions given *must* be non-overlapping and in ascending order. (A - /// "sparse" state with no transitions is equivalent to a "fail" state.) - /// - /// A "sparse" state is like adding a "union" state and pointing it at a - /// bunch of "range" states, except that the different alternates have - /// equal priority. - /// - /// Note that a "sparse" state is the only state that cannot be patched. - /// This is because a "sparse" state has many transitions, each of which - /// may point to a different NFA state. Moreover, adding more such - /// transitions requires more than just an NFA state ID to point to. It - /// also requires a byte range. The `patch` routine does not support the - /// additional information required. Therefore, callers must ensure that - /// all outgoing transitions for this state are included when `add_sparse` - /// is called. There is no way to add more later. - /// - /// # Errors - /// - /// This returns an error if the state identifier space is exhausted, or if - /// the configured heap size limit has been exceeded. - /// - /// # Panics - /// - /// This routine _may_ panic if the transitions given overlap or are not - /// in ascending order. - pub fn add_sparse( - &mut self, - transitions: Vec, - ) -> Result { - self.add(State::Sparse { transitions }) - } - - /// Add a "look" NFA state. - /// - /// A "look" NFA state corresponds to a state with exactly one - /// *conditional* epsilon transition to another NFA state. Namely, it - /// represents one of a small set of simplistic look-around operators. - /// - /// Callers may provide a "dummy" state ID (typically [`StateID::ZERO`]), - /// and then change it later with [`patch`](Builder::patch). - /// - /// # Errors - /// - /// This returns an error if the state identifier space is exhausted, or if - /// the configured heap size limit has been exceeded. - pub fn add_look( - &mut self, - next: StateID, - look: Look, - ) -> Result { - self.add(State::Look { look, next }) - } - - /// Add a "start capture" NFA state. - /// - /// A "start capture" NFA state corresponds to a state with exactly one - /// outgoing unconditional epsilon transition to another state. Unlike - /// "empty" states, a "start capture" state also carries with it an - /// instruction for saving the current position of input to a particular - /// location in memory. NFA simulations, like the Pike VM, may use this - /// information to report the match locations of capturing groups in a - /// regex pattern. - /// - /// If the corresponding capturing group has a name, then callers should - /// include it here. - /// - /// Callers may provide a "dummy" state ID (typically [`StateID::ZERO`]), - /// and then change it later with [`patch`](Builder::patch). - /// - /// Note that unlike `start_pattern`/`finish_pattern`, capturing start and - /// end states may be interleaved. Indeed, it is typical for many "start - /// capture" NFA states to appear before the first "end capture" state. - /// - /// # Errors - /// - /// This returns an error if the state identifier space is exhausted, or if - /// the configured heap size limit has been exceeded or if the given - /// capture index overflows `usize`. - /// - /// While the above are the only conditions in which this routine can - /// currently return an error, it is possible to call this method with an - /// inputs that results in the final `build()` step failing to produce an - /// NFA. For example, if one adds two distinct capturing groups with the - /// same name, then that will result in `build()` failing with an error. - /// - /// See the [`GroupInfo`](crate::util::captures::GroupInfo) type for - /// more information on what qualifies as valid capturing groups. - /// - /// # Example - /// - /// This example shows that an error occurs when one tries to add multiple - /// capturing groups with the same name to the same pattern. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::Builder, - /// util::primitives::StateID, - /// }; - /// - /// let name = Some(std::sync::Arc::from("foo")); - /// let mut builder = Builder::new(); - /// builder.start_pattern()?; - /// // 0th capture group should always be unnamed. - /// let start = builder.add_capture_start(StateID::ZERO, 0, None)?; - /// // OK - /// builder.add_capture_start(StateID::ZERO, 1, name.clone())?; - /// // This is not OK, but 'add_capture_start' still succeeds. We don't - /// // get an error until we call 'build' below. Without this call, the - /// // call to 'build' below would succeed. - /// builder.add_capture_start(StateID::ZERO, 2, name.clone())?; - /// // Finish our pattern so we can try to build the NFA. - /// builder.finish_pattern(start)?; - /// let result = builder.build(start, start); - /// assert!(result.is_err()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// However, adding multiple capturing groups with the same name to - /// distinct patterns is okay: - /// - /// ``` - /// use std::sync::Arc; - /// - /// use regex_automata::{ - /// nfa::thompson::{pikevm::PikeVM, Builder, Transition}, - /// util::{ - /// captures::Captures, - /// primitives::{PatternID, StateID}, - /// }, - /// Span, - /// }; - /// - /// // Hand-compile the patterns '(?P[a-z])' and '(?P[A-Z])'. - /// let mut builder = Builder::new(); - /// // We compile them to support an unanchored search, which requires - /// // adding an implicit '(?s-u:.)*?' prefix before adding either pattern. - /// let unanchored_prefix = builder.add_union_reverse(vec![])?; - /// let any = builder.add_range(Transition { - /// start: b'\x00', end: b'\xFF', next: StateID::ZERO, - /// })?; - /// builder.patch(unanchored_prefix, any)?; - /// builder.patch(any, unanchored_prefix)?; - /// - /// // Compile an alternation that permits matching multiple patterns. - /// let alt = builder.add_union(vec![])?; - /// builder.patch(unanchored_prefix, alt)?; - /// - /// // Compile '(?P[a-z]+)'. - /// builder.start_pattern()?; - /// let start0 = builder.add_capture_start(StateID::ZERO, 0, None)?; - /// // N.B. 0th capture group must always be unnamed. - /// let foo_start0 = builder.add_capture_start( - /// StateID::ZERO, 1, Some(Arc::from("foo")), - /// )?; - /// let lowercase = builder.add_range(Transition { - /// start: b'a', end: b'z', next: StateID::ZERO, - /// })?; - /// let foo_end0 = builder.add_capture_end(StateID::ZERO, 1)?; - /// let end0 = builder.add_capture_end(StateID::ZERO, 0)?; - /// let match0 = builder.add_match()?; - /// builder.patch(start0, foo_start0)?; - /// builder.patch(foo_start0, lowercase)?; - /// builder.patch(lowercase, foo_end0)?; - /// builder.patch(foo_end0, end0)?; - /// builder.patch(end0, match0)?; - /// builder.finish_pattern(start0)?; - /// - /// // Compile '(?P[A-Z]+)'. - /// builder.start_pattern()?; - /// let start1 = builder.add_capture_start(StateID::ZERO, 0, None)?; - /// // N.B. 0th capture group must always be unnamed. - /// let foo_start1 = builder.add_capture_start( - /// StateID::ZERO, 1, Some(Arc::from("foo")), - /// )?; - /// let uppercase = builder.add_range(Transition { - /// start: b'A', end: b'Z', next: StateID::ZERO, - /// })?; - /// let foo_end1 = builder.add_capture_end(StateID::ZERO, 1)?; - /// let end1 = builder.add_capture_end(StateID::ZERO, 0)?; - /// let match1 = builder.add_match()?; - /// builder.patch(start1, foo_start1)?; - /// builder.patch(foo_start1, uppercase)?; - /// builder.patch(uppercase, foo_end1)?; - /// builder.patch(foo_end1, end1)?; - /// builder.patch(end1, match1)?; - /// builder.finish_pattern(start1)?; - /// - /// // Now add the patterns to our alternation that we started above. - /// builder.patch(alt, start0)?; - /// builder.patch(alt, start1)?; - /// - /// // Finally build the NFA. The first argument is the anchored starting - /// // state (the pattern alternation) where as the second is the - /// // unanchored starting state (the unanchored prefix). - /// let nfa = builder.build(alt, unanchored_prefix)?; - /// - /// // Now build a Pike VM from our NFA and access the 'foo' capture - /// // group regardless of which pattern matched, since it is defined - /// // for both patterns. - /// let vm = PikeVM::new_from_nfa(nfa)?; - /// let mut cache = vm.create_cache(); - /// let caps: Vec = - /// vm.captures_iter(&mut cache, "0123aAaAA").collect(); - /// assert_eq!(5, caps.len()); - /// - /// assert_eq!(Some(PatternID::must(0)), caps[0].pattern()); - /// assert_eq!(Some(Span::from(4..5)), caps[0].get_group_by_name("foo")); - /// - /// assert_eq!(Some(PatternID::must(1)), caps[1].pattern()); - /// assert_eq!(Some(Span::from(5..6)), caps[1].get_group_by_name("foo")); - /// - /// assert_eq!(Some(PatternID::must(0)), caps[2].pattern()); - /// assert_eq!(Some(Span::from(6..7)), caps[2].get_group_by_name("foo")); - /// - /// assert_eq!(Some(PatternID::must(1)), caps[3].pattern()); - /// assert_eq!(Some(Span::from(7..8)), caps[3].get_group_by_name("foo")); - /// - /// assert_eq!(Some(PatternID::must(1)), caps[4].pattern()); - /// assert_eq!(Some(Span::from(8..9)), caps[4].get_group_by_name("foo")); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn add_capture_start( - &mut self, - next: StateID, - group_index: u32, - name: Option>, - ) -> Result { - let pid = self.current_pattern_id(); - let group_index = match SmallIndex::try_from(group_index) { - Err(_) => { - return Err(BuildError::invalid_capture_index(group_index)) - } - Ok(group_index) => group_index, - }; - // Make sure we have space to insert our (pid,index)|-->name mapping. - if pid.as_usize() >= self.captures.len() { - for _ in 0..=(pid.as_usize() - self.captures.len()) { - self.captures.push(vec![]); - } - } - // In the case where 'group_index < self.captures[pid].len()', it means - // that we are adding a duplicate capture group. This is somewhat - // weird, but permissible because the capture group itself can be - // repeated in the syntax. For example, '([a-z]){4}' will produce 4 - // capture groups. In practice, only the last will be set at search - // time when a match occurs. For duplicates, we don't need to push - // anything other than a CaptureStart NFA state. - if group_index.as_usize() >= self.captures[pid].len() { - // For discontiguous indices, push placeholders for earlier capture - // groups that weren't explicitly added. - for _ in 0..(group_index.as_usize() - self.captures[pid].len()) { - self.captures[pid].push(None); - } - self.captures[pid].push(name); - } - self.add(State::CaptureStart { pattern_id: pid, group_index, next }) - } - - /// Add a "end capture" NFA state. - /// - /// A "end capture" NFA state corresponds to a state with exactly one - /// outgoing unconditional epsilon transition to another state. Unlike - /// "empty" states, a "end capture" state also carries with it an - /// instruction for saving the current position of input to a particular - /// location in memory. NFA simulations, like the Pike VM, may use this - /// information to report the match locations of capturing groups in a - /// - /// Callers may provide a "dummy" state ID (typically [`StateID::ZERO`]), - /// and then change it later with [`patch`](Builder::patch). - /// - /// Note that unlike `start_pattern`/`finish_pattern`, capturing start and - /// end states may be interleaved. Indeed, it is typical for many "start - /// capture" NFA states to appear before the first "end capture" state. - /// - /// # Errors - /// - /// This returns an error if the state identifier space is exhausted, or if - /// the configured heap size limit has been exceeded or if the given - /// capture index overflows `usize`. - /// - /// While the above are the only conditions in which this routine can - /// currently return an error, it is possible to call this method with an - /// inputs that results in the final `build()` step failing to produce an - /// NFA. For example, if one adds two distinct capturing groups with the - /// same name, then that will result in `build()` failing with an error. - /// - /// See the [`GroupInfo`](crate::util::captures::GroupInfo) type for - /// more information on what qualifies as valid capturing groups. - pub fn add_capture_end( - &mut self, - next: StateID, - group_index: u32, - ) -> Result { - let pid = self.current_pattern_id(); - let group_index = match SmallIndex::try_from(group_index) { - Err(_) => { - return Err(BuildError::invalid_capture_index(group_index)) - } - Ok(group_index) => group_index, - }; - self.add(State::CaptureEnd { pattern_id: pid, group_index, next }) - } - - /// Adds a "fail" NFA state. - /// - /// A "fail" state is simply a state that has no outgoing transitions. It - /// acts as a way to cause a search to stop without reporting a match. - /// For example, one way to represent an NFA with zero patterns is with a - /// single "fail" state. - /// - /// # Errors - /// - /// This returns an error if the state identifier space is exhausted, or if - /// the configured heap size limit has been exceeded. - pub fn add_fail(&mut self) -> Result { - self.add(State::Fail) - } - - /// Adds a "match" NFA state. - /// - /// A "match" state has no outgoing transitions (just like a "fail" - /// state), but it has special significance in that if a search enters - /// this state, then a match has been found. The match state that is added - /// automatically has the current pattern ID associated with it. This is - /// used to report the matching pattern ID at search time. - /// - /// # Errors - /// - /// This returns an error if the state identifier space is exhausted, or if - /// the configured heap size limit has been exceeded. - /// - /// # Panics - /// - /// This must be called after a `start_pattern` call but before the - /// corresponding `finish_pattern` call. Otherwise, it panics. - pub fn add_match(&mut self) -> Result { - let pattern_id = self.current_pattern_id(); - let sid = self.add(State::Match { pattern_id })?; - Ok(sid) - } - - /// The common implementation of "add a state." It handles the common - /// error cases of state ID exhausting (by owning state ID allocation) and - /// whether the size limit has been exceeded. - fn add(&mut self, state: State) -> Result { - let id = StateID::new(self.states.len()) - .map_err(|_| BuildError::too_many_states(self.states.len()))?; - self.memory_states += state.memory_usage(); - self.states.push(state); - self.check_size_limit()?; - Ok(id) - } - - /// Add a transition from one state to another. - /// - /// This routine is called "patch" since it is very common to add the - /// states you want, typically with "dummy" state ID transitions, and then - /// "patch" in the real state IDs later. This is because you don't always - /// know all of the necessary state IDs to add because they might not - /// exist yet. - /// - /// # Errors - /// - /// This may error if patching leads to an increase in heap usage beyond - /// the configured size limit. Heap usage only grows when patching adds a - /// new transition (as in the case of a "union" state). - /// - /// # Panics - /// - /// This panics if `from` corresponds to a "sparse" state. When "sparse" - /// states are added, there is no way to patch them after-the-fact. (If you - /// have a use case where this would be helpful, please file an issue. It - /// will likely require a new API.) - pub fn patch( - &mut self, - from: StateID, - to: StateID, - ) -> Result<(), BuildError> { - let old_memory_states = self.memory_states; - match self.states[from] { - State::Empty { ref mut next } => { - *next = to; - } - State::ByteRange { ref mut trans } => { - trans.next = to; - } - State::Sparse { .. } => { - panic!("cannot patch from a sparse NFA state") - } - State::Look { ref mut next, .. } => { - *next = to; - } - State::Union { ref mut alternates } => { - alternates.push(to); - self.memory_states += mem::size_of::(); - } - State::UnionReverse { ref mut alternates } => { - alternates.push(to); - self.memory_states += mem::size_of::(); - } - State::CaptureStart { ref mut next, .. } => { - *next = to; - } - State::CaptureEnd { ref mut next, .. } => { - *next = to; - } - State::Fail => {} - State::Match { .. } => {} - } - if old_memory_states != self.memory_states { - self.check_size_limit()?; - } - Ok(()) - } - - /// Set whether the NFA produced by this builder should only match UTF-8. - /// - /// This should be set when both of the following are true: - /// - /// 1. The caller guarantees that the NFA created by this build will only - /// report non-empty matches with spans that are valid UTF-8. - /// 2. The caller desires regex engines using this NFA to avoid reporting - /// empty matches with a span that splits a valid UTF-8 encoded codepoint. - /// - /// Property (1) is not checked. Instead, this requires the caller to - /// promise that it is true. Property (2) corresponds to the behavior of - /// regex engines using the NFA created by this builder. Namely, there - /// is no way in the NFA's graph itself to say that empty matches found - /// by, for example, the regex `a*` will fall on valid UTF-8 boundaries. - /// Instead, this option is used to communicate the UTF-8 semantic to regex - /// engines that will typically implement it as a post-processing step by - /// filtering out empty matches that don't fall on UTF-8 boundaries. - /// - /// If you're building an NFA from an HIR (and not using a - /// [`thompson::Compiler`](crate::nfa::thompson::Compiler)), then you can - /// use the [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) - /// option to guarantee that if the HIR detects a non-empty match, then it - /// is guaranteed to be valid UTF-8. - /// - /// Note that property (2) does *not* specify the behavior of executing - /// a search on a haystack that is not valid UTF-8. Therefore, if you're - /// *not* running this NFA on strings that are guaranteed to be valid - /// UTF-8, you almost certainly do not want to enable this option. - /// Similarly, if you are running the NFA on strings that *are* guaranteed - /// to be valid UTF-8, then you almost certainly want to enable this option - /// unless you can guarantee that your NFA will never produce a zero-width - /// match. - /// - /// It is disabled by default. - pub fn set_utf8(&mut self, yes: bool) { - self.utf8 = yes; - } - - /// Returns whether UTF-8 mode is enabled for this builder. - /// - /// See [`Builder::set_utf8`] for more details about what "UTF-8 mode" is. - pub fn get_utf8(&self) -> bool { - self.utf8 - } - - /// Sets whether the NFA produced by this builder should be matched in - /// reverse or not. Generally speaking, when enabled, the NFA produced - /// should be matched by moving backwards through a haystack, from a higher - /// memory address to a lower memory address. - /// - /// See also [`NFA::is_reverse`] for more details. - /// - /// This is disabled by default, which means NFAs are by default matched - /// in the forward direction. - pub fn set_reverse(&mut self, yes: bool) { - self.reverse = yes; - } - - /// Returns whether reverse mode is enabled for this builder. - /// - /// See [`Builder::set_reverse`] for more details about what "reverse mode" - /// is. - pub fn get_reverse(&self) -> bool { - self.reverse - } - - /// Sets the look-around matcher that should be used for the resulting NFA. - /// - /// A look-around matcher can be used to configure how look-around - /// assertions are matched. For example, a matcher might carry - /// configuration that changes the line terminator used for `(?m:^)` and - /// `(?m:$)` assertions. - pub fn set_look_matcher(&mut self, m: LookMatcher) { - self.look_matcher = m; - } - - /// Returns the look-around matcher used for this builder. - /// - /// If a matcher was not explicitly set, then `LookMatcher::default()` is - /// returned. - pub fn get_look_matcher(&self) -> &LookMatcher { - &self.look_matcher - } - - /// Set the size limit on this builder. - /// - /// Setting the size limit will also check whether the NFA built so far - /// fits within the given size limit. If it doesn't, then an error is - /// returned. - /// - /// By default, there is no configured size limit. - pub fn set_size_limit( - &mut self, - limit: Option, - ) -> Result<(), BuildError> { - self.size_limit = limit; - self.check_size_limit() - } - - /// Return the currently configured size limit. - /// - /// By default, this returns `None`, which corresponds to no configured - /// size limit. - pub fn get_size_limit(&self) -> Option { - self.size_limit - } - - /// Returns the heap memory usage, in bytes, used by the NFA states added - /// so far. - /// - /// Note that this is an approximation of how big the final NFA will be. - /// In practice, the final NFA will likely be a bit smaller because of - /// its simpler state representation. (For example, using things like - /// `Box<[StateID]>` instead of `Vec`.) - pub fn memory_usage(&self) -> usize { - self.states.len() * mem::size_of::() + self.memory_states - } - - fn check_size_limit(&self) -> Result<(), BuildError> { - if let Some(limit) = self.size_limit { - if self.memory_usage() > limit { - return Err(BuildError::exceeded_size_limit(limit)); - } - } - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - // This asserts that a builder state doesn't have its size changed. It is - // *really* easy to accidentally increase the size, and thus potentially - // dramatically increase the memory usage of NFA builder. - // - // This assert doesn't mean we absolutely cannot increase the size of a - // builder state. We can. It's just here to make sure we do it knowingly - // and intentionally. - // - // A builder state is unfortunately a little bigger than an NFA state, - // since we really want to support adding things to a pre-existing state. - // i.e., We use Vec instead of Box<[thing]>. So we end up using an - // extra 8 bytes per state. Sad, but at least it gets freed once the NFA - // is built. - #[test] - fn state_has_small_size() { - #[cfg(target_pointer_width = "64")] - assert_eq!(32, core::mem::size_of::()); - #[cfg(target_pointer_width = "32")] - assert_eq!(16, core::mem::size_of::()); - } -} diff --git a/vendor/regex-automata/src/nfa/thompson/compiler.rs b/vendor/regex-automata/src/nfa/thompson/compiler.rs deleted file mode 100644 index 96a39ac4ebac08..00000000000000 --- a/vendor/regex-automata/src/nfa/thompson/compiler.rs +++ /dev/null @@ -1,2368 +0,0 @@ -use core::{borrow::Borrow, cell::RefCell}; - -use alloc::{sync::Arc, vec, vec::Vec}; - -use regex_syntax::{ - hir::{self, Hir}, - utf8::{Utf8Range, Utf8Sequences}, - ParserBuilder, -}; - -use crate::{ - nfa::thompson::{ - builder::Builder, - error::BuildError, - literal_trie::LiteralTrie, - map::{Utf8BoundedMap, Utf8SuffixKey, Utf8SuffixMap}, - nfa::{Transition, NFA}, - range_trie::RangeTrie, - }, - util::{ - look::{Look, LookMatcher}, - primitives::{PatternID, StateID}, - }, -}; - -/// The configuration used for a Thompson NFA compiler. -#[derive(Clone, Debug, Default)] -pub struct Config { - utf8: Option, - reverse: Option, - nfa_size_limit: Option>, - shrink: Option, - which_captures: Option, - look_matcher: Option, - #[cfg(test)] - unanchored_prefix: Option, -} - -impl Config { - /// Return a new default Thompson NFA compiler configuration. - pub fn new() -> Config { - Config::default() - } - - /// Whether to enable UTF-8 mode during search or not. - /// - /// A regex engine is said to be in UTF-8 mode when it guarantees that - /// all matches returned by it have spans consisting of only valid UTF-8. - /// That is, it is impossible for a match span to be returned that - /// contains any invalid UTF-8. - /// - /// UTF-8 mode generally consists of two things: - /// - /// 1. Whether the NFA's states are constructed such that all paths to a - /// match state that consume at least one byte always correspond to valid - /// UTF-8. - /// 2. Whether all paths to a match state that do _not_ consume any bytes - /// should always correspond to valid UTF-8 boundaries. - /// - /// (1) is a guarantee made by whoever constructs the NFA. - /// If you're parsing a regex from its concrete syntax, then - /// [`syntax::Config::utf8`](crate::util::syntax::Config::utf8) can make - /// this guarantee for you. It does it by returning an error if the regex - /// pattern could every report a non-empty match span that contains invalid - /// UTF-8. So long as `syntax::Config::utf8` mode is enabled and your regex - /// successfully parses, then you're guaranteed that the corresponding NFA - /// will only ever report non-empty match spans containing valid UTF-8. - /// - /// (2) is a trickier guarantee because it cannot be enforced by the NFA - /// state graph itself. Consider, for example, the regex `a*`. It matches - /// the empty strings in `☃` at positions `0`, `1`, `2` and `3`, where - /// positions `1` and `2` occur within the UTF-8 encoding of a codepoint, - /// and thus correspond to invalid UTF-8 boundaries. Therefore, this - /// guarantee must be made at a higher level than the NFA state graph - /// itself. This crate deals with this case in each regex engine. Namely, - /// when a zero-width match that splits a codepoint is found and UTF-8 - /// mode enabled, then it is ignored and the engine moves on looking for - /// the next match. - /// - /// Thus, UTF-8 mode is both a promise that the NFA built only reports - /// non-empty matches that are valid UTF-8, and an *instruction* to regex - /// engines that empty matches that split codepoints should be banned. - /// - /// Because UTF-8 mode is fundamentally about avoiding invalid UTF-8 spans, - /// it only makes sense to enable this option when you *know* your haystack - /// is valid UTF-8. (For example, a `&str`.) Enabling UTF-8 mode and - /// searching a haystack that contains invalid UTF-8 leads to **unspecified - /// behavior**. - /// - /// Therefore, it may make sense to enable `syntax::Config::utf8` while - /// simultaneously *disabling* this option. That would ensure all non-empty - /// match spans are valid UTF-8, but that empty match spans may still split - /// a codepoint or match at other places that aren't valid UTF-8. - /// - /// In general, this mode is only relevant if your regex can match the - /// empty string. Most regexes don't. - /// - /// This is enabled by default. - /// - /// # Example - /// - /// This example shows how UTF-8 mode can impact the match spans that may - /// be reported in certain cases. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::{self, pikevm::PikeVM}, - /// Match, Input, - /// }; - /// - /// let re = PikeVM::new("")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// // UTF-8 mode is enabled by default. - /// let mut input = Input::new("☃"); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(Some(Match::must(0, 0..0)), caps.get_match()); - /// - /// // Even though an empty regex matches at 1..1, our next match is - /// // 3..3 because 1..1 and 2..2 split the snowman codepoint (which is - /// // three bytes long). - /// input.set_start(1); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(Some(Match::must(0, 3..3)), caps.get_match()); - /// - /// // But if we disable UTF-8, then we'll get matches at 1..1 and 2..2: - /// let re = PikeVM::builder() - /// .thompson(thompson::Config::new().utf8(false)) - /// .build("")?; - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(Some(Match::must(0, 1..1)), caps.get_match()); - /// - /// input.set_start(2); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(Some(Match::must(0, 2..2)), caps.get_match()); - /// - /// input.set_start(3); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(Some(Match::must(0, 3..3)), caps.get_match()); - /// - /// input.set_start(4); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(None, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn utf8(mut self, yes: bool) -> Config { - self.utf8 = Some(yes); - self - } - - /// Reverse the NFA. - /// - /// A NFA reversal is performed by reversing all of the concatenated - /// sub-expressions in the original pattern, recursively. (Look around - /// operators are also inverted.) The resulting NFA can be used to match - /// the pattern starting from the end of a string instead of the beginning - /// of a string. - /// - /// Reversing the NFA is useful for building a reverse DFA, which is most - /// useful for finding the start of a match after its ending position has - /// been found. NFA execution engines typically do not work on reverse - /// NFAs. For example, currently, the Pike VM reports the starting location - /// of matches without a reverse NFA. - /// - /// Currently, enabling this setting requires disabling the - /// [`captures`](Config::captures) setting. If both are enabled, then the - /// compiler will return an error. It is expected that this limitation will - /// be lifted in the future. - /// - /// This is disabled by default. - /// - /// # Example - /// - /// This example shows how to build a DFA from a reverse NFA, and then use - /// the DFA to search backwards. - /// - /// ``` - /// use regex_automata::{ - /// dfa::{self, Automaton}, - /// nfa::thompson::{NFA, WhichCaptures}, - /// HalfMatch, Input, - /// }; - /// - /// let dfa = dfa::dense::Builder::new() - /// .thompson(NFA::config() - /// .which_captures(WhichCaptures::None) - /// .reverse(true) - /// ) - /// .build("baz[0-9]+")?; - /// let expected = Some(HalfMatch::must(0, 3)); - /// assert_eq!( - /// expected, - /// dfa.try_search_rev(&Input::new("foobaz12345bar"))?, - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn reverse(mut self, yes: bool) -> Config { - self.reverse = Some(yes); - self - } - - /// Sets an approximate size limit on the total heap used by the NFA being - /// compiled. - /// - /// This permits imposing constraints on the size of a compiled NFA. This - /// may be useful in contexts where the regex pattern is untrusted and one - /// wants to avoid using too much memory. - /// - /// This size limit does not apply to auxiliary heap used during - /// compilation that is not part of the built NFA. - /// - /// Note that this size limit is applied during compilation in order for - /// the limit to prevent too much heap from being used. However, the - /// implementation may use an intermediate NFA representation that is - /// otherwise slightly bigger than the final public form. Since the size - /// limit may be applied to an intermediate representation, there is not - /// necessarily a precise correspondence between the configured size limit - /// and the heap usage of the final NFA. - /// - /// There is no size limit by default. - /// - /// # Example - /// - /// This example demonstrates how Unicode mode can greatly increase the - /// size of the NFA. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::nfa::thompson::NFA; - /// - /// // 300KB isn't enough! - /// NFA::compiler() - /// .configure(NFA::config().nfa_size_limit(Some(300_000))) - /// .build(r"\w{20}") - /// .unwrap_err(); - /// - /// // ... but 500KB probably is. - /// let nfa = NFA::compiler() - /// .configure(NFA::config().nfa_size_limit(Some(500_000))) - /// .build(r"\w{20}")?; - /// - /// assert_eq!(nfa.pattern_len(), 1); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn nfa_size_limit(mut self, bytes: Option) -> Config { - self.nfa_size_limit = Some(bytes); - self - } - - /// Apply best effort heuristics to shrink the NFA at the expense of more - /// time/memory. - /// - /// Generally speaking, if one is using an NFA to compile a DFA, then the - /// extra time used to shrink the NFA will be more than made up for during - /// DFA construction (potentially by a lot). In other words, enabling this - /// can substantially decrease the overall amount of time it takes to build - /// a DFA. - /// - /// A reason to keep this disabled is if you want to compile an NFA and - /// start using it as quickly as possible without needing to build a DFA, - /// and you don't mind using a bit of extra memory for the NFA. e.g., for - /// an NFA simulation or for a lazy DFA. - /// - /// NFA shrinking is currently most useful when compiling a reverse - /// NFA with large Unicode character classes. In particular, it trades - /// additional CPU time during NFA compilation in favor of generating fewer - /// NFA states. - /// - /// This is disabled by default because it can increase compile times - /// quite a bit if you aren't building a full DFA. - /// - /// # Example - /// - /// This example shows that NFA shrinking can lead to substantial space - /// savings in some cases. Notice that, as noted above, we build a reverse - /// DFA and use a pattern with a large Unicode character class. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::nfa::thompson::{NFA, WhichCaptures}; - /// - /// // Currently we have to disable captures when enabling reverse NFA. - /// let config = NFA::config() - /// .which_captures(WhichCaptures::None) - /// .reverse(true); - /// let not_shrunk = NFA::compiler() - /// .configure(config.clone().shrink(false)) - /// .build(r"\w")?; - /// let shrunk = NFA::compiler() - /// .configure(config.clone().shrink(true)) - /// .build(r"\w")?; - /// - /// // While a specific shrink factor is not guaranteed, the savings can be - /// // considerable in some cases. - /// assert!(shrunk.states().len() * 2 < not_shrunk.states().len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn shrink(mut self, yes: bool) -> Config { - self.shrink = Some(yes); - self - } - - /// Whether to include 'Capture' states in the NFA. - /// - /// Currently, enabling this setting requires disabling the - /// [`reverse`](Config::reverse) setting. If both are enabled, then the - /// compiler will return an error. It is expected that this limitation will - /// be lifted in the future. - /// - /// This is enabled by default. - /// - /// # Example - /// - /// This example demonstrates that some regex engines, like the Pike VM, - /// require capturing states to be present in the NFA to report match - /// offsets. - /// - /// (Note that since this method is deprecated, the example below uses - /// [`Config::which_captures`] to disable capture states.) - /// - /// ``` - /// use regex_automata::nfa::thompson::{ - /// pikevm::PikeVM, - /// NFA, - /// WhichCaptures, - /// }; - /// - /// let re = PikeVM::builder() - /// .thompson(NFA::config().which_captures(WhichCaptures::None)) - /// .build(r"[a-z]+")?; - /// let mut cache = re.create_cache(); - /// - /// assert!(re.is_match(&mut cache, "abc")); - /// assert_eq!(None, re.find(&mut cache, "abc")); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[deprecated(since = "0.3.5", note = "use which_captures instead")] - pub fn captures(self, yes: bool) -> Config { - self.which_captures(if yes { - WhichCaptures::All - } else { - WhichCaptures::None - }) - } - - /// Configures what kinds of capture groups are compiled into - /// [`State::Capture`](crate::nfa::thompson::State::Capture) states in a - /// Thompson NFA. - /// - /// Currently, using any option except for [`WhichCaptures::None`] requires - /// disabling the [`reverse`](Config::reverse) setting. If both are - /// enabled, then the compiler will return an error. It is expected that - /// this limitation will be lifted in the future. - /// - /// This is set to [`WhichCaptures::All`] by default. Callers may wish to - /// use [`WhichCaptures::Implicit`] in cases where one wants avoid the - /// overhead of capture states for explicit groups. Usually this occurs - /// when one wants to use the `PikeVM` only for determining the overall - /// match. Otherwise, the `PikeVM` could use much more memory than is - /// necessary. - /// - /// # Example - /// - /// This example demonstrates that some regex engines, like the Pike VM, - /// require capturing states to be present in the NFA to report match - /// offsets. - /// - /// ``` - /// use regex_automata::nfa::thompson::{ - /// pikevm::PikeVM, - /// NFA, - /// WhichCaptures, - /// }; - /// - /// let re = PikeVM::builder() - /// .thompson(NFA::config().which_captures(WhichCaptures::None)) - /// .build(r"[a-z]+")?; - /// let mut cache = re.create_cache(); - /// - /// assert!(re.is_match(&mut cache, "abc")); - /// assert_eq!(None, re.find(&mut cache, "abc")); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// The same applies to the bounded backtracker: - /// - /// ``` - /// use regex_automata::nfa::thompson::{ - /// backtrack::BoundedBacktracker, - /// NFA, - /// WhichCaptures, - /// }; - /// - /// let re = BoundedBacktracker::builder() - /// .thompson(NFA::config().which_captures(WhichCaptures::None)) - /// .build(r"[a-z]+")?; - /// let mut cache = re.create_cache(); - /// - /// assert!(re.try_is_match(&mut cache, "abc")?); - /// assert_eq!(None, re.try_find(&mut cache, "abc")?); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn which_captures(mut self, which_captures: WhichCaptures) -> Config { - self.which_captures = Some(which_captures); - self - } - - /// Sets the look-around matcher that should be used with this NFA. - /// - /// A look-around matcher determines how to match look-around assertions. - /// In particular, some assertions are configurable. For example, the - /// `(?m:^)` and `(?m:$)` assertions can have their line terminator changed - /// from the default of `\n` to any other byte. - /// - /// # Example - /// - /// This shows how to change the line terminator for multi-line assertions. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::{self, pikevm::PikeVM}, - /// util::look::LookMatcher, - /// Match, Input, - /// }; - /// - /// let mut lookm = LookMatcher::new(); - /// lookm.set_line_terminator(b'\x00'); - /// - /// let re = PikeVM::builder() - /// .thompson(thompson::Config::new().look_matcher(lookm)) - /// .build(r"(?m)^[a-z]+$")?; - /// let mut cache = re.create_cache(); - /// - /// // Multi-line assertions now use NUL as a terminator. - /// assert_eq!( - /// Some(Match::must(0, 1..4)), - /// re.find(&mut cache, b"\x00abc\x00"), - /// ); - /// // ... and \n is no longer recognized as a terminator. - /// assert_eq!( - /// None, - /// re.find(&mut cache, b"\nabc\n"), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn look_matcher(mut self, m: LookMatcher) -> Config { - self.look_matcher = Some(m); - self - } - - /// Whether to compile an unanchored prefix into this NFA. - /// - /// This is enabled by default. It is made available for tests only to make - /// it easier to unit test the output of the compiler. - #[cfg(test)] - fn unanchored_prefix(mut self, yes: bool) -> Config { - self.unanchored_prefix = Some(yes); - self - } - - /// Returns whether this configuration has enabled UTF-8 mode. - pub fn get_utf8(&self) -> bool { - self.utf8.unwrap_or(true) - } - - /// Returns whether this configuration has enabled reverse NFA compilation. - pub fn get_reverse(&self) -> bool { - self.reverse.unwrap_or(false) - } - - /// Return the configured NFA size limit, if it exists, in the number of - /// bytes of heap used. - pub fn get_nfa_size_limit(&self) -> Option { - self.nfa_size_limit.unwrap_or(None) - } - - /// Return whether NFA shrinking is enabled. - pub fn get_shrink(&self) -> bool { - self.shrink.unwrap_or(false) - } - - /// Return whether NFA compilation is configured to produce capture states. - #[deprecated(since = "0.3.5", note = "use get_which_captures instead")] - pub fn get_captures(&self) -> bool { - self.get_which_captures().is_any() - } - - /// Return what kinds of capture states will be compiled into an NFA. - pub fn get_which_captures(&self) -> WhichCaptures { - self.which_captures.unwrap_or(WhichCaptures::All) - } - - /// Return the look-around matcher for this NFA. - pub fn get_look_matcher(&self) -> LookMatcher { - self.look_matcher.clone().unwrap_or(LookMatcher::default()) - } - - /// Return whether NFA compilation is configured to include an unanchored - /// prefix. - /// - /// This is always false when not in test mode. - fn get_unanchored_prefix(&self) -> bool { - #[cfg(test)] - { - self.unanchored_prefix.unwrap_or(true) - } - #[cfg(not(test))] - { - true - } - } - - /// Overwrite the default configuration such that the options in `o` are - /// always used. If an option in `o` is not set, then the corresponding - /// option in `self` is used. If it's not set in `self` either, then it - /// remains not set. - pub(crate) fn overwrite(&self, o: Config) -> Config { - Config { - utf8: o.utf8.or(self.utf8), - reverse: o.reverse.or(self.reverse), - nfa_size_limit: o.nfa_size_limit.or(self.nfa_size_limit), - shrink: o.shrink.or(self.shrink), - which_captures: o.which_captures.or(self.which_captures), - look_matcher: o.look_matcher.or_else(|| self.look_matcher.clone()), - #[cfg(test)] - unanchored_prefix: o.unanchored_prefix.or(self.unanchored_prefix), - } - } -} - -/// A configuration indicating which kinds of -/// [`State::Capture`](crate::nfa::thompson::State::Capture) states to include. -/// -/// This configuration can be used with [`Config::which_captures`] to control -/// which capture states are compiled into a Thompson NFA. -/// -/// The default configuration is [`WhichCaptures::All`]. -#[derive(Clone, Copy, Debug)] -pub enum WhichCaptures { - /// All capture states, including those corresponding to both implicit and - /// explicit capture groups, are included in the Thompson NFA. - All, - /// Only capture states corresponding to implicit capture groups are - /// included. Implicit capture groups appear in every pattern implicitly - /// and correspond to the overall match of a pattern. - /// - /// This is useful when one only cares about the overall match of a - /// pattern. By excluding capture states from explicit capture groups, - /// one might be able to reduce the memory usage of a multi-pattern regex - /// substantially if it was otherwise written to have many explicit capture - /// groups. - Implicit, - /// No capture states are compiled into the Thompson NFA. - /// - /// This is useful when capture states are either not needed (for example, - /// if one is only trying to build a DFA) or if they aren't supported (for - /// example, a reverse NFA). - /// - /// # Warning - /// - /// Callers must be exceedingly careful when using this - /// option. In particular, not all regex engines support - /// reporting match spans when using this option (for example, - /// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) or - /// [`BoundedBacktracker`](crate::nfa::thompson::backtrack::BoundedBacktracker)). - /// - /// Perhaps more confusingly, using this option with such an - /// engine means that an `is_match` routine could report `true` - /// when `find` reports `None`. This is generally not something - /// that _should_ happen, but the low level control provided by - /// this crate makes it possible. - /// - /// Similarly, any regex engines (like [`meta::Regex`](crate::meta::Regex)) - /// should always return `None` from `find` routines when this option is - /// used, even if _some_ of its internal engines could find the match - /// boundaries. This is because inputs from user data could influence - /// engine selection, and thus influence whether a match is found or not. - /// Indeed, `meta::Regex::find` will always return `None` when configured - /// with this option. - None, -} - -impl Default for WhichCaptures { - fn default() -> WhichCaptures { - WhichCaptures::All - } -} - -impl WhichCaptures { - /// Returns true if this configuration indicates that no capture states - /// should be produced in an NFA. - pub fn is_none(&self) -> bool { - matches!(*self, WhichCaptures::None) - } - - /// Returns true if this configuration indicates that some capture states - /// should be added to an NFA. Note that this might only include capture - /// states for implicit capture groups. - pub fn is_any(&self) -> bool { - !self.is_none() - } -} - -/* -This compiler below uses Thompson's construction algorithm. The compiler takes -a regex-syntax::Hir as input and emits an NFA graph as output. The NFA graph -is structured in a way that permits it to be executed by a virtual machine and -also used to efficiently build a DFA. - -The compiler deals with a slightly expanded set of NFA states than what is -in a final NFA (as exhibited by builder::State and nfa::State). Notably a -compiler state includes an empty node that has exactly one unconditional -epsilon transition to the next state. In other words, it's a "goto" instruction -if one views Thompson's NFA as a set of bytecode instructions. These goto -instructions are removed in a subsequent phase before returning the NFA to the -caller. The purpose of these empty nodes is that they make the construction -algorithm substantially simpler to implement. We remove them before returning -to the caller because they can represent substantial overhead when traversing -the NFA graph (either while searching using the NFA directly or while building -a DFA). - -In the future, it would be nice to provide a Glushkov compiler as well, as it -would work well as a bit-parallel NFA for smaller regexes. But the Thompson -construction is one I'm more familiar with and seems more straight-forward to -deal with when it comes to large Unicode character classes. - -Internally, the compiler uses interior mutability to improve composition in the -face of the borrow checker. In particular, we'd really like to be able to write -things like this: - - self.c_concat(exprs.iter().map(|e| self.c(e))) - -Which elegantly uses iterators to build up a sequence of compiled regex -sub-expressions and then hands it off to the concatenating compiler routine. -Without interior mutability, the borrow checker won't let us borrow `self` -mutably both inside and outside the closure at the same time. -*/ - -/// A builder for compiling an NFA from a regex's high-level intermediate -/// representation (HIR). -/// -/// This compiler provides a way to translate a parsed regex pattern into an -/// NFA state graph. The NFA state graph can either be used directly to execute -/// a search (e.g., with a Pike VM), or it can be further used to build a DFA. -/// -/// This compiler provides APIs both for compiling regex patterns directly from -/// their concrete syntax, or via a [`regex_syntax::hir::Hir`]. -/// -/// This compiler has various options that may be configured via -/// [`thompson::Config`](Config). -/// -/// Note that a compiler is not the same as a [`thompson::Builder`](Builder). -/// A `Builder` provides a lower level API that is uncoupled from a regex -/// pattern's concrete syntax or even its HIR. Instead, it permits stitching -/// together an NFA by hand. See its docs for examples. -/// -/// # Example: compilation from concrete syntax -/// -/// This shows how to compile an NFA from a pattern string while setting a size -/// limit on how big the NFA is allowed to be (in terms of bytes of heap used). -/// -/// ``` -/// use regex_automata::{ -/// nfa::thompson::{NFA, pikevm::PikeVM}, -/// Match, -/// }; -/// -/// let config = NFA::config().nfa_size_limit(Some(1_000)); -/// let nfa = NFA::compiler().configure(config).build(r"(?-u)\w")?; -/// -/// let re = PikeVM::new_from_nfa(nfa)?; -/// let mut cache = re.create_cache(); -/// let mut caps = re.create_captures(); -/// let expected = Some(Match::must(0, 3..4)); -/// re.captures(&mut cache, "!@#A#@!", &mut caps); -/// assert_eq!(expected, caps.get_match()); -/// -/// # Ok::<(), Box>(()) -/// ``` -/// -/// # Example: compilation from HIR -/// -/// This shows how to hand assemble a regular expression via its HIR, and then -/// compile an NFA directly from it. -/// -/// ``` -/// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; -/// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; -/// -/// let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![ -/// ClassBytesRange::new(b'0', b'9'), -/// ClassBytesRange::new(b'A', b'Z'), -/// ClassBytesRange::new(b'_', b'_'), -/// ClassBytesRange::new(b'a', b'z'), -/// ]))); -/// -/// let config = NFA::config().nfa_size_limit(Some(1_000)); -/// let nfa = NFA::compiler().configure(config).build_from_hir(&hir)?; -/// -/// let re = PikeVM::new_from_nfa(nfa)?; -/// let mut cache = re.create_cache(); -/// let mut caps = re.create_captures(); -/// let expected = Some(Match::must(0, 3..4)); -/// re.captures(&mut cache, "!@#A#@!", &mut caps); -/// assert_eq!(expected, caps.get_match()); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct Compiler { - /// A regex parser, used when compiling an NFA directly from a pattern - /// string. - parser: ParserBuilder, - /// The compiler configuration. - config: Config, - /// The builder for actually constructing an NFA. This provides a - /// convenient abstraction for writing a compiler. - builder: RefCell, - /// State used for compiling character classes to UTF-8 byte automata. - /// State is not retained between character class compilations. This just - /// serves to amortize allocation to the extent possible. - utf8_state: RefCell, - /// State used for arranging character classes in reverse into a trie. - trie_state: RefCell, - /// State used for caching common suffixes when compiling reverse UTF-8 - /// automata (for Unicode character classes). - utf8_suffix: RefCell, -} - -impl Compiler { - /// Create a new NFA builder with its default configuration. - pub fn new() -> Compiler { - Compiler { - parser: ParserBuilder::new(), - config: Config::default(), - builder: RefCell::new(Builder::new()), - utf8_state: RefCell::new(Utf8State::new()), - trie_state: RefCell::new(RangeTrie::new()), - utf8_suffix: RefCell::new(Utf8SuffixMap::new(1000)), - } - } - - /// Compile the given regular expression pattern into an NFA. - /// - /// If there was a problem parsing the regex, then that error is returned. - /// - /// Otherwise, if there was a problem building the NFA, then an error is - /// returned. The only error that can occur is if the compiled regex would - /// exceed the size limits configured on this builder, or if any part of - /// the NFA would exceed the integer representations used. (For example, - /// too many states might plausibly occur on a 16-bit target.) - /// - /// # Example - /// - /// ``` - /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; - /// - /// let config = NFA::config().nfa_size_limit(Some(1_000)); - /// let nfa = NFA::compiler().configure(config).build(r"(?-u)\w")?; - /// - /// let re = PikeVM::new_from_nfa(nfa)?; - /// let mut cache = re.create_cache(); - /// let mut caps = re.create_captures(); - /// let expected = Some(Match::must(0, 3..4)); - /// re.captures(&mut cache, "!@#A#@!", &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn build(&self, pattern: &str) -> Result { - self.build_many(&[pattern]) - } - - /// Compile the given regular expression patterns into a single NFA. - /// - /// When matches are returned, the pattern ID corresponds to the index of - /// the pattern in the slice given. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; - /// - /// let config = NFA::config().nfa_size_limit(Some(1_000)); - /// let nfa = NFA::compiler().configure(config).build_many(&[ - /// r"(?-u)\s", - /// r"(?-u)\w", - /// ])?; - /// - /// let re = PikeVM::new_from_nfa(nfa)?; - /// let mut cache = re.create_cache(); - /// let mut caps = re.create_captures(); - /// let expected = Some(Match::must(1, 1..2)); - /// re.captures(&mut cache, "!A! !A!", &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn build_many>( - &self, - patterns: &[P], - ) -> Result { - let mut hirs = vec![]; - for p in patterns { - hirs.push( - self.parser - .build() - .parse(p.as_ref()) - .map_err(BuildError::syntax)?, - ); - debug!("parsed: {:?}", p.as_ref()); - } - self.build_many_from_hir(&hirs) - } - - /// Compile the given high level intermediate representation of a regular - /// expression into an NFA. - /// - /// If there was a problem building the NFA, then an error is returned. The - /// only error that can occur is if the compiled regex would exceed the - /// size limits configured on this builder, or if any part of the NFA would - /// exceed the integer representations used. (For example, too many states - /// might plausibly occur on a 16-bit target.) - /// - /// # Example - /// - /// ``` - /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; - /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; - /// - /// let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![ - /// ClassBytesRange::new(b'0', b'9'), - /// ClassBytesRange::new(b'A', b'Z'), - /// ClassBytesRange::new(b'_', b'_'), - /// ClassBytesRange::new(b'a', b'z'), - /// ]))); - /// - /// let config = NFA::config().nfa_size_limit(Some(1_000)); - /// let nfa = NFA::compiler().configure(config).build_from_hir(&hir)?; - /// - /// let re = PikeVM::new_from_nfa(nfa)?; - /// let mut cache = re.create_cache(); - /// let mut caps = re.create_captures(); - /// let expected = Some(Match::must(0, 3..4)); - /// re.captures(&mut cache, "!@#A#@!", &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn build_from_hir(&self, expr: &Hir) -> Result { - self.build_many_from_hir(&[expr]) - } - - /// Compile the given high level intermediate representations of regular - /// expressions into a single NFA. - /// - /// When matches are returned, the pattern ID corresponds to the index of - /// the pattern in the slice given. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; - /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; - /// - /// let hirs = &[ - /// Hir::class(Class::Bytes(ClassBytes::new(vec![ - /// ClassBytesRange::new(b'\t', b'\r'), - /// ClassBytesRange::new(b' ', b' '), - /// ]))), - /// Hir::class(Class::Bytes(ClassBytes::new(vec![ - /// ClassBytesRange::new(b'0', b'9'), - /// ClassBytesRange::new(b'A', b'Z'), - /// ClassBytesRange::new(b'_', b'_'), - /// ClassBytesRange::new(b'a', b'z'), - /// ]))), - /// ]; - /// - /// let config = NFA::config().nfa_size_limit(Some(1_000)); - /// let nfa = NFA::compiler().configure(config).build_many_from_hir(hirs)?; - /// - /// let re = PikeVM::new_from_nfa(nfa)?; - /// let mut cache = re.create_cache(); - /// let mut caps = re.create_captures(); - /// let expected = Some(Match::must(1, 1..2)); - /// re.captures(&mut cache, "!A! !A!", &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn build_many_from_hir>( - &self, - exprs: &[H], - ) -> Result { - self.compile(exprs) - } - - /// Apply the given NFA configuration options to this builder. - /// - /// # Example - /// - /// ``` - /// use regex_automata::nfa::thompson::NFA; - /// - /// let config = NFA::config().nfa_size_limit(Some(1_000)); - /// let nfa = NFA::compiler().configure(config).build(r"(?-u)\w")?; - /// assert_eq!(nfa.pattern_len(), 1); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn configure(&mut self, config: Config) -> &mut Compiler { - self.config = self.config.overwrite(config); - self - } - - /// Set the syntax configuration for this builder using - /// [`syntax::Config`](crate::util::syntax::Config). - /// - /// This permits setting things like case insensitivity, Unicode and multi - /// line mode. - /// - /// This syntax configuration only applies when an NFA is built directly - /// from a pattern string. If an NFA is built from an HIR, then all syntax - /// settings are ignored. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{nfa::thompson::NFA, util::syntax}; - /// - /// let syntax_config = syntax::Config::new().unicode(false); - /// let nfa = NFA::compiler().syntax(syntax_config).build(r"\w")?; - /// // If Unicode were enabled, the number of states would be much bigger. - /// assert!(nfa.states().len() < 15); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn syntax( - &mut self, - config: crate::util::syntax::Config, - ) -> &mut Compiler { - config.apply(&mut self.parser); - self - } -} - -impl Compiler { - /// Compile the sequence of HIR expressions given. Pattern IDs are - /// allocated starting from 0, in correspondence with the slice given. - /// - /// It is legal to provide an empty slice. In that case, the NFA returned - /// has no patterns and will never match anything. - fn compile>(&self, exprs: &[H]) -> Result { - if exprs.len() > PatternID::LIMIT { - return Err(BuildError::too_many_patterns(exprs.len())); - } - if self.config.get_reverse() - && self.config.get_which_captures().is_any() - { - return Err(BuildError::unsupported_captures()); - } - - self.builder.borrow_mut().clear(); - self.builder.borrow_mut().set_utf8(self.config.get_utf8()); - self.builder.borrow_mut().set_reverse(self.config.get_reverse()); - self.builder - .borrow_mut() - .set_look_matcher(self.config.get_look_matcher()); - self.builder - .borrow_mut() - .set_size_limit(self.config.get_nfa_size_limit())?; - - // We always add an unanchored prefix unless we were specifically told - // not to (for tests only), or if we know that the regex is anchored - // for all matches. When an unanchored prefix is not added, then the - // NFA's anchored and unanchored start states are equivalent. - let all_anchored = exprs.iter().all(|e| { - let props = e.borrow().properties(); - if self.config.get_reverse() { - props.look_set_suffix().contains(hir::Look::End) - } else { - props.look_set_prefix().contains(hir::Look::Start) - } - }); - let anchored = !self.config.get_unanchored_prefix() || all_anchored; - let unanchored_prefix = if anchored { - self.c_empty()? - } else { - self.c_at_least(&Hir::dot(hir::Dot::AnyByte), false, 0)? - }; - - let compiled = self.c_alt_iter(exprs.iter().map(|e| { - let _ = self.start_pattern()?; - let one = self.c_cap(0, None, e.borrow())?; - let match_state_id = self.add_match()?; - self.patch(one.end, match_state_id)?; - let _ = self.finish_pattern(one.start)?; - Ok(ThompsonRef { start: one.start, end: match_state_id }) - }))?; - self.patch(unanchored_prefix.end, compiled.start)?; - let nfa = self - .builder - .borrow_mut() - .build(compiled.start, unanchored_prefix.start)?; - - debug!("HIR-to-NFA compilation complete, config: {:?}", self.config); - Ok(nfa) - } - - /// Compile an arbitrary HIR expression. - fn c(&self, expr: &Hir) -> Result { - use regex_syntax::hir::{Class, HirKind::*}; - - match *expr.kind() { - Empty => self.c_empty(), - Literal(hir::Literal(ref bytes)) => self.c_literal(bytes), - Class(Class::Bytes(ref c)) => self.c_byte_class(c), - Class(Class::Unicode(ref c)) => self.c_unicode_class(c), - Look(ref look) => self.c_look(look), - Repetition(ref rep) => self.c_repetition(rep), - Capture(ref c) => self.c_cap(c.index, c.name.as_deref(), &c.sub), - Concat(ref es) => self.c_concat(es.iter().map(|e| self.c(e))), - Alternation(ref es) => self.c_alt_slice(es), - } - } - - /// Compile a concatenation of the sub-expressions yielded by the given - /// iterator. If the iterator yields no elements, then this compiles down - /// to an "empty" state that always matches. - /// - /// If the compiler is in reverse mode, then the expressions given are - /// automatically compiled in reverse. - fn c_concat(&self, mut it: I) -> Result - where - I: DoubleEndedIterator>, - { - let first = if self.is_reverse() { it.next_back() } else { it.next() }; - let ThompsonRef { start, mut end } = match first { - Some(result) => result?, - None => return self.c_empty(), - }; - loop { - let next = - if self.is_reverse() { it.next_back() } else { it.next() }; - let compiled = match next { - Some(result) => result?, - None => break, - }; - self.patch(end, compiled.start)?; - end = compiled.end; - } - Ok(ThompsonRef { start, end }) - } - - /// Compile an alternation of the given HIR values. - /// - /// This is like 'c_alt_iter', but it accepts a slice of HIR values instead - /// of an iterator of compiled NFA sub-graphs. The point of accepting a - /// slice here is that it opens up some optimization opportunities. For - /// example, if all of the HIR values are literals, then this routine might - /// re-shuffle them to make NFA epsilon closures substantially faster. - fn c_alt_slice(&self, exprs: &[Hir]) -> Result { - // self.c_alt_iter(exprs.iter().map(|e| self.c(e))) - let literal_count = exprs - .iter() - .filter(|e| { - matches!(*e.kind(), hir::HirKind::Literal(hir::Literal(_))) - }) - .count(); - if literal_count <= 1 || literal_count < exprs.len() { - return self.c_alt_iter(exprs.iter().map(|e| self.c(e))); - } - - let mut trie = if self.is_reverse() { - LiteralTrie::reverse() - } else { - LiteralTrie::forward() - }; - for expr in exprs.iter() { - let literal = match *expr.kind() { - hir::HirKind::Literal(hir::Literal(ref bytes)) => bytes, - _ => unreachable!(), - }; - trie.add(literal)?; - } - trie.compile(&mut self.builder.borrow_mut()) - } - - /// Compile an alternation, where each element yielded by the given - /// iterator represents an item in the alternation. If the iterator yields - /// no elements, then this compiles down to a "fail" state. - /// - /// In an alternation, expressions appearing earlier are "preferred" at - /// match time over expressions appearing later. At least, this is true - /// when using "leftmost first" match semantics. (If "leftmost longest" are - /// ever added in the future, then this preference order of priority would - /// not apply in that mode.) - fn c_alt_iter(&self, mut it: I) -> Result - where - I: Iterator>, - { - let first = match it.next() { - None => return self.c_fail(), - Some(result) => result?, - }; - let second = match it.next() { - None => return Ok(first), - Some(result) => result?, - }; - - let union = self.add_union()?; - let end = self.add_empty()?; - self.patch(union, first.start)?; - self.patch(first.end, end)?; - self.patch(union, second.start)?; - self.patch(second.end, end)?; - for result in it { - let compiled = result?; - self.patch(union, compiled.start)?; - self.patch(compiled.end, end)?; - } - Ok(ThompsonRef { start: union, end }) - } - - /// Compile the given capture sub-expression. `expr` should be the - /// sub-expression contained inside the capture. If "capture" states are - /// enabled, then they are added as appropriate. - /// - /// This accepts the pieces of a capture instead of a `hir::Capture` so - /// that it's easy to manufacture a "fake" group when necessary, e.g., for - /// adding the entire pattern as if it were a group in order to create - /// appropriate "capture" states in the NFA. - fn c_cap( - &self, - index: u32, - name: Option<&str>, - expr: &Hir, - ) -> Result { - match self.config.get_which_captures() { - // No capture states means we always skip them. - WhichCaptures::None => return self.c(expr), - // Implicit captures states means we only add when index==0 since - // index==0 implies the group is implicit. - WhichCaptures::Implicit if index > 0 => return self.c(expr), - _ => {} - } - - let start = self.add_capture_start(index, name)?; - let inner = self.c(expr)?; - let end = self.add_capture_end(index)?; - self.patch(start, inner.start)?; - self.patch(inner.end, end)?; - Ok(ThompsonRef { start, end }) - } - - /// Compile the given repetition expression. This handles all types of - /// repetitions and greediness. - fn c_repetition( - &self, - rep: &hir::Repetition, - ) -> Result { - match (rep.min, rep.max) { - (0, Some(1)) => self.c_zero_or_one(&rep.sub, rep.greedy), - (min, None) => self.c_at_least(&rep.sub, rep.greedy, min), - (min, Some(max)) if min == max => self.c_exactly(&rep.sub, min), - (min, Some(max)) => self.c_bounded(&rep.sub, rep.greedy, min, max), - } - } - - /// Compile the given expression such that it matches at least `min` times, - /// but no more than `max` times. - /// - /// When `greedy` is true, then the preference is for the expression to - /// match as much as possible. Otherwise, it will match as little as - /// possible. - fn c_bounded( - &self, - expr: &Hir, - greedy: bool, - min: u32, - max: u32, - ) -> Result { - let prefix = self.c_exactly(expr, min)?; - if min == max { - return Ok(prefix); - } - - // It is tempting here to compile the rest here as a concatenation - // of zero-or-one matches. i.e., for `a{2,5}`, compile it as if it - // were `aaa?a?a?`. The problem here is that it leads to this program: - // - // >000000: 61 => 01 - // 000001: 61 => 02 - // 000002: union(03, 04) - // 000003: 61 => 04 - // 000004: union(05, 06) - // 000005: 61 => 06 - // 000006: union(07, 08) - // 000007: 61 => 08 - // 000008: MATCH - // - // And effectively, once you hit state 2, the epsilon closure will - // include states 3, 5, 6, 7 and 8, which is quite a bit. It is better - // to instead compile it like so: - // - // >000000: 61 => 01 - // 000001: 61 => 02 - // 000002: union(03, 08) - // 000003: 61 => 04 - // 000004: union(05, 08) - // 000005: 61 => 06 - // 000006: union(07, 08) - // 000007: 61 => 08 - // 000008: MATCH - // - // So that the epsilon closure of state 2 is now just 3 and 8. - let empty = self.add_empty()?; - let mut prev_end = prefix.end; - for _ in min..max { - let union = if greedy { - self.add_union() - } else { - self.add_union_reverse() - }?; - let compiled = self.c(expr)?; - self.patch(prev_end, union)?; - self.patch(union, compiled.start)?; - self.patch(union, empty)?; - prev_end = compiled.end; - } - self.patch(prev_end, empty)?; - Ok(ThompsonRef { start: prefix.start, end: empty }) - } - - /// Compile the given expression such that it may be matched `n` or more - /// times, where `n` can be any integer. (Although a particularly large - /// integer is likely to run afoul of any configured size limits.) - /// - /// When `greedy` is true, then the preference is for the expression to - /// match as much as possible. Otherwise, it will match as little as - /// possible. - fn c_at_least( - &self, - expr: &Hir, - greedy: bool, - n: u32, - ) -> Result { - if n == 0 { - // When the expression cannot match the empty string, then we - // can get away with something much simpler: just one 'alt' - // instruction that optionally repeats itself. But if the expr - // can match the empty string... see below. - if expr.properties().minimum_len().map_or(false, |len| len > 0) { - let union = if greedy { - self.add_union() - } else { - self.add_union_reverse() - }?; - let compiled = self.c(expr)?; - self.patch(union, compiled.start)?; - self.patch(compiled.end, union)?; - return Ok(ThompsonRef { start: union, end: union }); - } - - // What's going on here? Shouldn't x* be simpler than this? It - // turns out that when implementing leftmost-first (Perl-like) - // match semantics, x* results in an incorrect preference order - // when computing the transitive closure of states if and only if - // 'x' can match the empty string. So instead, we compile x* as - // (x+)?, which preserves the correct preference order. - // - // See: https://github.com/rust-lang/regex/issues/779 - let compiled = self.c(expr)?; - let plus = if greedy { - self.add_union() - } else { - self.add_union_reverse() - }?; - self.patch(compiled.end, plus)?; - self.patch(plus, compiled.start)?; - - let question = if greedy { - self.add_union() - } else { - self.add_union_reverse() - }?; - let empty = self.add_empty()?; - self.patch(question, compiled.start)?; - self.patch(question, empty)?; - self.patch(plus, empty)?; - Ok(ThompsonRef { start: question, end: empty }) - } else if n == 1 { - let compiled = self.c(expr)?; - let union = if greedy { - self.add_union() - } else { - self.add_union_reverse() - }?; - self.patch(compiled.end, union)?; - self.patch(union, compiled.start)?; - Ok(ThompsonRef { start: compiled.start, end: union }) - } else { - let prefix = self.c_exactly(expr, n - 1)?; - let last = self.c(expr)?; - let union = if greedy { - self.add_union() - } else { - self.add_union_reverse() - }?; - self.patch(prefix.end, last.start)?; - self.patch(last.end, union)?; - self.patch(union, last.start)?; - Ok(ThompsonRef { start: prefix.start, end: union }) - } - } - - /// Compile the given expression such that it may be matched zero or one - /// times. - /// - /// When `greedy` is true, then the preference is for the expression to - /// match as much as possible. Otherwise, it will match as little as - /// possible. - fn c_zero_or_one( - &self, - expr: &Hir, - greedy: bool, - ) -> Result { - let union = - if greedy { self.add_union() } else { self.add_union_reverse() }?; - let compiled = self.c(expr)?; - let empty = self.add_empty()?; - self.patch(union, compiled.start)?; - self.patch(union, empty)?; - self.patch(compiled.end, empty)?; - Ok(ThompsonRef { start: union, end: empty }) - } - - /// Compile the given HIR expression exactly `n` times. - fn c_exactly( - &self, - expr: &Hir, - n: u32, - ) -> Result { - let it = (0..n).map(|_| self.c(expr)); - self.c_concat(it) - } - - /// Compile the given byte oriented character class. - /// - /// This uses "sparse" states to represent an alternation between ranges in - /// this character class. We can use "sparse" states instead of stitching - /// together a "union" state because all ranges in a character class have - /// equal priority *and* are non-overlapping (thus, only one can match, so - /// there's never a question of priority in the first place). This saves a - /// fair bit of overhead when traversing an NFA. - /// - /// This routine compiles an empty character class into a "fail" state. - fn c_byte_class( - &self, - cls: &hir::ClassBytes, - ) -> Result { - let end = self.add_empty()?; - let mut trans = Vec::with_capacity(cls.ranges().len()); - for r in cls.iter() { - trans.push(Transition { - start: r.start(), - end: r.end(), - next: end, - }); - } - Ok(ThompsonRef { start: self.add_sparse(trans)?, end }) - } - - /// Compile the given Unicode character class. - /// - /// This routine specifically tries to use various types of compression, - /// since UTF-8 automata of large classes can get quite large. The specific - /// type of compression used depends on forward vs reverse compilation, and - /// whether NFA shrinking is enabled or not. - /// - /// Aside from repetitions causing lots of repeat group, this is like the - /// single most expensive part of regex compilation. Therefore, a large part - /// of the expense of compilation may be reduce by disabling Unicode in the - /// pattern. - /// - /// This routine compiles an empty character class into a "fail" state. - fn c_unicode_class( - &self, - cls: &hir::ClassUnicode, - ) -> Result { - // If all we have are ASCII ranges wrapped in a Unicode package, then - // there is zero reason to bring out the big guns. We can fit all ASCII - // ranges within a single sparse state. - if cls.is_ascii() { - let end = self.add_empty()?; - let mut trans = Vec::with_capacity(cls.ranges().len()); - for r in cls.iter() { - // The unwraps below are OK because we've verified that this - // class only contains ASCII codepoints. - trans.push(Transition { - // FIXME(1.59): use the 'TryFrom for u8' impl. - start: u8::try_from(u32::from(r.start())).unwrap(), - end: u8::try_from(u32::from(r.end())).unwrap(), - next: end, - }); - } - Ok(ThompsonRef { start: self.add_sparse(trans)?, end }) - } else if self.is_reverse() { - if !self.config.get_shrink() { - // When we don't want to spend the extra time shrinking, we - // compile the UTF-8 automaton in reverse using something like - // the "naive" approach, but will attempt to re-use common - // suffixes. - self.c_unicode_class_reverse_with_suffix(cls) - } else { - // When we want to shrink our NFA for reverse UTF-8 automata, - // we cannot feed UTF-8 sequences directly to the UTF-8 - // compiler, since the UTF-8 compiler requires all sequences - // to be lexicographically sorted. Instead, we organize our - // sequences into a range trie, which can then output our - // sequences in the correct order. Unfortunately, building the - // range trie is fairly expensive (but not nearly as expensive - // as building a DFA). Hence the reason why the 'shrink' option - // exists, so that this path can be toggled off. For example, - // we might want to turn this off if we know we won't be - // compiling a DFA. - let mut trie = self.trie_state.borrow_mut(); - trie.clear(); - - for rng in cls.iter() { - for mut seq in Utf8Sequences::new(rng.start(), rng.end()) { - seq.reverse(); - trie.insert(seq.as_slice()); - } - } - let mut builder = self.builder.borrow_mut(); - let mut utf8_state = self.utf8_state.borrow_mut(); - let mut utf8c = - Utf8Compiler::new(&mut *builder, &mut *utf8_state)?; - trie.iter(|seq| { - utf8c.add(&seq)?; - Ok(()) - })?; - utf8c.finish() - } - } else { - // In the forward direction, we always shrink our UTF-8 automata - // because we can stream it right into the UTF-8 compiler. There - // is almost no downside (in either memory or time) to using this - // approach. - let mut builder = self.builder.borrow_mut(); - let mut utf8_state = self.utf8_state.borrow_mut(); - let mut utf8c = - Utf8Compiler::new(&mut *builder, &mut *utf8_state)?; - for rng in cls.iter() { - for seq in Utf8Sequences::new(rng.start(), rng.end()) { - utf8c.add(seq.as_slice())?; - } - } - utf8c.finish() - } - - // For reference, the code below is the "naive" version of compiling a - // UTF-8 automaton. It is deliciously simple (and works for both the - // forward and reverse cases), but will unfortunately produce very - // large NFAs. When compiling a forward automaton, the size difference - // can sometimes be an order of magnitude. For example, the '\w' regex - // will generate about ~3000 NFA states using the naive approach below, - // but only 283 states when using the approach above. This is because - // the approach above actually compiles a *minimal* (or near minimal, - // because of the bounded hashmap for reusing equivalent states) UTF-8 - // automaton. - // - // The code below is kept as a reference point in order to make it - // easier to understand the higher level goal here. Although, it will - // almost certainly bit-rot, so keep that in mind. Also, if you try to - // use it, some of the tests in this module will fail because they look - // for terser byte code produce by the more optimized handling above. - // But the integration test suite should still pass. - // - // One good example of the substantial difference this can make is to - // compare and contrast performance of the Pike VM when the code below - // is active vs the code above. Here's an example to try: - // - // regex-cli find match pikevm -b -p '(?m)^\w{20}' non-ascii-file - // - // With Unicode classes generated below, this search takes about 45s on - // my machine. But with the compressed version above, the search takes - // only around 1.4s. The NFA is also 20% smaller. This is in part due - // to the compression, but also because of the utilization of 'sparse' - // NFA states. They lead to much less state shuffling during the NFA - // search. - /* - let it = cls - .iter() - .flat_map(|rng| Utf8Sequences::new(rng.start(), rng.end())) - .map(|seq| { - let it = seq - .as_slice() - .iter() - .map(|rng| self.c_range(rng.start, rng.end)); - self.c_concat(it) - }); - self.c_alt_iter(it) - */ - } - - /// Compile the given Unicode character class in reverse with suffix - /// caching. - /// - /// This is a "quick" way to compile large Unicode classes into reverse - /// UTF-8 automata while doing a small amount of compression on that - /// automata by reusing common suffixes. - /// - /// A more comprehensive compression scheme can be accomplished by using - /// a range trie to efficiently sort a reverse sequence of UTF-8 byte - /// ranges, and then use Daciuk's algorithm via `Utf8Compiler`. - /// - /// This is the technique used when "NFA shrinking" is disabled. - /// - /// (This also tries to use "sparse" states where possible, just like - /// `c_byte_class` does.) - fn c_unicode_class_reverse_with_suffix( - &self, - cls: &hir::ClassUnicode, - ) -> Result { - // N.B. It would likely be better to cache common *prefixes* in the - // reverse direction, but it's not quite clear how to do that. The - // advantage of caching suffixes is that it does give us a win, and - // has a very small additional overhead. - let mut cache = self.utf8_suffix.borrow_mut(); - cache.clear(); - - let union = self.add_union()?; - let alt_end = self.add_empty()?; - for urng in cls.iter() { - for seq in Utf8Sequences::new(urng.start(), urng.end()) { - let mut end = alt_end; - for brng in seq.as_slice() { - let key = Utf8SuffixKey { - from: end, - start: brng.start, - end: brng.end, - }; - let hash = cache.hash(&key); - if let Some(id) = cache.get(&key, hash) { - end = id; - continue; - } - - let compiled = self.c_range(brng.start, brng.end)?; - self.patch(compiled.end, end)?; - end = compiled.start; - cache.set(key, hash, end); - } - self.patch(union, end)?; - } - } - Ok(ThompsonRef { start: union, end: alt_end }) - } - - /// Compile the given HIR look-around assertion to an NFA look-around - /// assertion. - fn c_look(&self, anchor: &hir::Look) -> Result { - let look = match *anchor { - hir::Look::Start => Look::Start, - hir::Look::End => Look::End, - hir::Look::StartLF => Look::StartLF, - hir::Look::EndLF => Look::EndLF, - hir::Look::StartCRLF => Look::StartCRLF, - hir::Look::EndCRLF => Look::EndCRLF, - hir::Look::WordAscii => Look::WordAscii, - hir::Look::WordAsciiNegate => Look::WordAsciiNegate, - hir::Look::WordUnicode => Look::WordUnicode, - hir::Look::WordUnicodeNegate => Look::WordUnicodeNegate, - hir::Look::WordStartAscii => Look::WordStartAscii, - hir::Look::WordEndAscii => Look::WordEndAscii, - hir::Look::WordStartUnicode => Look::WordStartUnicode, - hir::Look::WordEndUnicode => Look::WordEndUnicode, - hir::Look::WordStartHalfAscii => Look::WordStartHalfAscii, - hir::Look::WordEndHalfAscii => Look::WordEndHalfAscii, - hir::Look::WordStartHalfUnicode => Look::WordStartHalfUnicode, - hir::Look::WordEndHalfUnicode => Look::WordEndHalfUnicode, - }; - let id = self.add_look(look)?; - Ok(ThompsonRef { start: id, end: id }) - } - - /// Compile the given byte string to a concatenation of bytes. - fn c_literal(&self, bytes: &[u8]) -> Result { - self.c_concat(bytes.iter().copied().map(|b| self.c_range(b, b))) - } - - /// Compile a "range" state with one transition that may only be followed - /// if the input byte is in the (inclusive) range given. - /// - /// Both the `start` and `end` locations point to the state created. - /// Callers will likely want to keep the `start`, but patch the `end` to - /// point to some other state. - fn c_range(&self, start: u8, end: u8) -> Result { - let id = self.add_range(start, end)?; - Ok(ThompsonRef { start: id, end: id }) - } - - /// Compile an "empty" state with one unconditional epsilon transition. - /// - /// Both the `start` and `end` locations point to the state created. - /// Callers will likely want to keep the `start`, but patch the `end` to - /// point to some other state. - fn c_empty(&self) -> Result { - let id = self.add_empty()?; - Ok(ThompsonRef { start: id, end: id }) - } - - /// Compile a "fail" state that can never have any outgoing transitions. - fn c_fail(&self) -> Result { - let id = self.add_fail()?; - Ok(ThompsonRef { start: id, end: id }) - } - - // The below helpers are meant to be simple wrappers around the - // corresponding Builder methods. For the most part, they let us write - // 'self.add_foo()' instead of 'self.builder.borrow_mut().add_foo()', where - // the latter is a mouthful. Some of the methods do inject a little bit - // of extra logic. e.g., Flipping look-around operators when compiling in - // reverse mode. - - fn patch(&self, from: StateID, to: StateID) -> Result<(), BuildError> { - self.builder.borrow_mut().patch(from, to) - } - - fn start_pattern(&self) -> Result { - self.builder.borrow_mut().start_pattern() - } - - fn finish_pattern( - &self, - start_id: StateID, - ) -> Result { - self.builder.borrow_mut().finish_pattern(start_id) - } - - fn add_empty(&self) -> Result { - self.builder.borrow_mut().add_empty() - } - - fn add_range(&self, start: u8, end: u8) -> Result { - self.builder.borrow_mut().add_range(Transition { - start, - end, - next: StateID::ZERO, - }) - } - - fn add_sparse( - &self, - ranges: Vec, - ) -> Result { - self.builder.borrow_mut().add_sparse(ranges) - } - - fn add_look(&self, mut look: Look) -> Result { - if self.is_reverse() { - look = look.reversed(); - } - self.builder.borrow_mut().add_look(StateID::ZERO, look) - } - - fn add_union(&self) -> Result { - self.builder.borrow_mut().add_union(vec![]) - } - - fn add_union_reverse(&self) -> Result { - self.builder.borrow_mut().add_union_reverse(vec![]) - } - - fn add_capture_start( - &self, - capture_index: u32, - name: Option<&str>, - ) -> Result { - let name = name.map(Arc::from); - self.builder.borrow_mut().add_capture_start( - StateID::ZERO, - capture_index, - name, - ) - } - - fn add_capture_end( - &self, - capture_index: u32, - ) -> Result { - self.builder.borrow_mut().add_capture_end(StateID::ZERO, capture_index) - } - - fn add_fail(&self) -> Result { - self.builder.borrow_mut().add_fail() - } - - fn add_match(&self) -> Result { - self.builder.borrow_mut().add_match() - } - - fn is_reverse(&self) -> bool { - self.config.get_reverse() - } -} - -/// A value that represents the result of compiling a sub-expression of a -/// regex's HIR. Specifically, this represents a sub-graph of the NFA that -/// has an initial state at `start` and a final state at `end`. -#[derive(Clone, Copy, Debug)] -pub(crate) struct ThompsonRef { - pub(crate) start: StateID, - pub(crate) end: StateID, -} - -/// A UTF-8 compiler based on Daciuk's algorithm for compiling minimal DFAs -/// from a lexicographically sorted sequence of strings in linear time. -/// -/// The trick here is that any Unicode codepoint range can be converted to -/// a sequence of byte ranges that form a UTF-8 automaton. Connecting them -/// together via an alternation is trivial, and indeed, it works. However, -/// there is a lot of redundant structure in many UTF-8 automatons. Since our -/// UTF-8 ranges are in lexicographic order, we can use Daciuk's algorithm -/// to build nearly minimal DFAs in linear time. (They are guaranteed to be -/// minimal because we use a bounded cache of previously build DFA states.) -/// -/// The drawback is that this sadly doesn't work for reverse automata, since -/// the ranges are no longer in lexicographic order. For that, we invented the -/// range trie (which gets its own module). Once a range trie is built, we then -/// use this same Utf8Compiler to build a reverse UTF-8 automaton. -/// -/// The high level idea is described here: -/// https://blog.burntsushi.net/transducers/#finite-state-machines-as-data-structures -/// -/// There is also another implementation of this in the `fst` crate. -#[derive(Debug)] -struct Utf8Compiler<'a> { - builder: &'a mut Builder, - state: &'a mut Utf8State, - target: StateID, -} - -#[derive(Clone, Debug)] -struct Utf8State { - compiled: Utf8BoundedMap, - uncompiled: Vec, -} - -#[derive(Clone, Debug)] -struct Utf8Node { - trans: Vec, - last: Option, -} - -#[derive(Clone, Debug)] -struct Utf8LastTransition { - start: u8, - end: u8, -} - -impl Utf8State { - fn new() -> Utf8State { - Utf8State { compiled: Utf8BoundedMap::new(10_000), uncompiled: vec![] } - } - - fn clear(&mut self) { - self.compiled.clear(); - self.uncompiled.clear(); - } -} - -impl<'a> Utf8Compiler<'a> { - fn new( - builder: &'a mut Builder, - state: &'a mut Utf8State, - ) -> Result, BuildError> { - let target = builder.add_empty()?; - state.clear(); - let mut utf8c = Utf8Compiler { builder, state, target }; - utf8c.add_empty(); - Ok(utf8c) - } - - fn finish(&mut self) -> Result { - self.compile_from(0)?; - let node = self.pop_root(); - let start = self.compile(node)?; - Ok(ThompsonRef { start, end: self.target }) - } - - fn add(&mut self, ranges: &[Utf8Range]) -> Result<(), BuildError> { - let prefix_len = ranges - .iter() - .zip(&self.state.uncompiled) - .take_while(|&(range, node)| { - node.last.as_ref().map_or(false, |t| { - (t.start, t.end) == (range.start, range.end) - }) - }) - .count(); - assert!(prefix_len < ranges.len()); - self.compile_from(prefix_len)?; - self.add_suffix(&ranges[prefix_len..]); - Ok(()) - } - - fn compile_from(&mut self, from: usize) -> Result<(), BuildError> { - let mut next = self.target; - while from + 1 < self.state.uncompiled.len() { - let node = self.pop_freeze(next); - next = self.compile(node)?; - } - self.top_last_freeze(next); - Ok(()) - } - - fn compile( - &mut self, - node: Vec, - ) -> Result { - let hash = self.state.compiled.hash(&node); - if let Some(id) = self.state.compiled.get(&node, hash) { - return Ok(id); - } - let id = self.builder.add_sparse(node.clone())?; - self.state.compiled.set(node, hash, id); - Ok(id) - } - - fn add_suffix(&mut self, ranges: &[Utf8Range]) { - assert!(!ranges.is_empty()); - let last = self - .state - .uncompiled - .len() - .checked_sub(1) - .expect("non-empty nodes"); - assert!(self.state.uncompiled[last].last.is_none()); - self.state.uncompiled[last].last = Some(Utf8LastTransition { - start: ranges[0].start, - end: ranges[0].end, - }); - for r in &ranges[1..] { - self.state.uncompiled.push(Utf8Node { - trans: vec![], - last: Some(Utf8LastTransition { start: r.start, end: r.end }), - }); - } - } - - fn add_empty(&mut self) { - self.state.uncompiled.push(Utf8Node { trans: vec![], last: None }); - } - - fn pop_freeze(&mut self, next: StateID) -> Vec { - let mut uncompiled = self.state.uncompiled.pop().unwrap(); - uncompiled.set_last_transition(next); - uncompiled.trans - } - - fn pop_root(&mut self) -> Vec { - assert_eq!(self.state.uncompiled.len(), 1); - assert!(self.state.uncompiled[0].last.is_none()); - self.state.uncompiled.pop().expect("non-empty nodes").trans - } - - fn top_last_freeze(&mut self, next: StateID) { - let last = self - .state - .uncompiled - .len() - .checked_sub(1) - .expect("non-empty nodes"); - self.state.uncompiled[last].set_last_transition(next); - } -} - -impl Utf8Node { - fn set_last_transition(&mut self, next: StateID) { - if let Some(last) = self.last.take() { - self.trans.push(Transition { - start: last.start, - end: last.end, - next, - }); - } - } -} - -#[cfg(test)] -mod tests { - use alloc::vec; - - use crate::{ - nfa::thompson::{SparseTransitions, State}, - util::primitives::SmallIndex, - }; - - use super::*; - - fn build(pattern: &str) -> NFA { - NFA::compiler() - .configure( - NFA::config() - .which_captures(WhichCaptures::None) - .unanchored_prefix(false), - ) - .build(pattern) - .unwrap() - } - - fn pid(id: usize) -> PatternID { - PatternID::new(id).unwrap() - } - - fn sid(id: usize) -> StateID { - StateID::new(id).unwrap() - } - - fn s_byte(byte: u8, next: usize) -> State { - let next = sid(next); - let trans = Transition { start: byte, end: byte, next }; - State::ByteRange { trans } - } - - fn s_range(start: u8, end: u8, next: usize) -> State { - let next = sid(next); - let trans = Transition { start, end, next }; - State::ByteRange { trans } - } - - fn s_sparse(transitions: &[(u8, u8, usize)]) -> State { - let transitions = transitions - .iter() - .map(|&(start, end, next)| Transition { - start, - end, - next: sid(next), - }) - .collect(); - State::Sparse(SparseTransitions { transitions }) - } - - fn s_look(look: Look, next: usize) -> State { - let next = sid(next); - State::Look { look, next } - } - - fn s_bin_union(alt1: usize, alt2: usize) -> State { - State::BinaryUnion { alt1: sid(alt1), alt2: sid(alt2) } - } - - fn s_union(alts: &[usize]) -> State { - State::Union { - alternates: alts - .iter() - .map(|&id| sid(id)) - .collect::>() - .into_boxed_slice(), - } - } - - fn s_cap(next: usize, pattern: usize, index: usize, slot: usize) -> State { - State::Capture { - next: sid(next), - pattern_id: pid(pattern), - group_index: SmallIndex::new(index).unwrap(), - slot: SmallIndex::new(slot).unwrap(), - } - } - - fn s_fail() -> State { - State::Fail - } - - fn s_match(id: usize) -> State { - State::Match { pattern_id: pid(id) } - } - - // Test that building an unanchored NFA has an appropriate `(?s:.)*?` - // prefix. - #[test] - fn compile_unanchored_prefix() { - let nfa = NFA::compiler() - .configure(NFA::config().which_captures(WhichCaptures::None)) - .build(r"a") - .unwrap(); - assert_eq!( - nfa.states(), - &[ - s_bin_union(2, 1), - s_range(0, 255, 0), - s_byte(b'a', 3), - s_match(0), - ] - ); - } - - #[test] - fn compile_no_unanchored_prefix_with_start_anchor() { - let nfa = NFA::compiler() - .configure(NFA::config().which_captures(WhichCaptures::None)) - .build(r"^a") - .unwrap(); - assert_eq!( - nfa.states(), - &[s_look(Look::Start, 1), s_byte(b'a', 2), s_match(0)] - ); - } - - #[test] - fn compile_yes_unanchored_prefix_with_end_anchor() { - let nfa = NFA::compiler() - .configure(NFA::config().which_captures(WhichCaptures::None)) - .build(r"a$") - .unwrap(); - assert_eq!( - nfa.states(), - &[ - s_bin_union(2, 1), - s_range(0, 255, 0), - s_byte(b'a', 3), - s_look(Look::End, 4), - s_match(0), - ] - ); - } - - #[test] - fn compile_yes_reverse_unanchored_prefix_with_start_anchor() { - let nfa = NFA::compiler() - .configure( - NFA::config() - .reverse(true) - .which_captures(WhichCaptures::None), - ) - .build(r"^a") - .unwrap(); - assert_eq!( - nfa.states(), - &[ - s_bin_union(2, 1), - s_range(0, 255, 0), - s_byte(b'a', 3), - // Anchors get flipped in a reverse automaton. - s_look(Look::End, 4), - s_match(0), - ], - ); - } - - #[test] - fn compile_no_reverse_unanchored_prefix_with_end_anchor() { - let nfa = NFA::compiler() - .configure( - NFA::config() - .reverse(true) - .which_captures(WhichCaptures::None), - ) - .build(r"a$") - .unwrap(); - assert_eq!( - nfa.states(), - &[ - // Anchors get flipped in a reverse automaton. - s_look(Look::Start, 1), - s_byte(b'a', 2), - s_match(0), - ], - ); - } - - #[test] - fn compile_empty() { - assert_eq!(build("").states(), &[s_match(0),]); - } - - #[test] - fn compile_literal() { - assert_eq!(build("a").states(), &[s_byte(b'a', 1), s_match(0),]); - assert_eq!( - build("ab").states(), - &[s_byte(b'a', 1), s_byte(b'b', 2), s_match(0),] - ); - assert_eq!( - build("☃").states(), - &[s_byte(0xE2, 1), s_byte(0x98, 2), s_byte(0x83, 3), s_match(0)] - ); - - // Check that non-UTF-8 literals work. - let nfa = NFA::compiler() - .configure( - NFA::config() - .which_captures(WhichCaptures::None) - .unanchored_prefix(false), - ) - .syntax(crate::util::syntax::Config::new().utf8(false)) - .build(r"(?-u)\xFF") - .unwrap(); - assert_eq!(nfa.states(), &[s_byte(b'\xFF', 1), s_match(0),]); - } - - #[test] - fn compile_class_ascii() { - assert_eq!( - build(r"[a-z]").states(), - &[s_range(b'a', b'z', 1), s_match(0),] - ); - assert_eq!( - build(r"[x-za-c]").states(), - &[s_sparse(&[(b'a', b'c', 1), (b'x', b'z', 1)]), s_match(0)] - ); - } - - #[test] - #[cfg(not(miri))] - fn compile_class_unicode() { - assert_eq!( - build(r"[\u03B1-\u03B4]").states(), - &[s_range(0xB1, 0xB4, 2), s_byte(0xCE, 0), s_match(0)] - ); - assert_eq!( - build(r"[\u03B1-\u03B4\u{1F919}-\u{1F91E}]").states(), - &[ - s_range(0xB1, 0xB4, 5), - s_range(0x99, 0x9E, 5), - s_byte(0xA4, 1), - s_byte(0x9F, 2), - s_sparse(&[(0xCE, 0xCE, 0), (0xF0, 0xF0, 3)]), - s_match(0), - ] - ); - assert_eq!( - build(r"[a-z☃]").states(), - &[ - s_byte(0x83, 3), - s_byte(0x98, 0), - s_sparse(&[(b'a', b'z', 3), (0xE2, 0xE2, 1)]), - s_match(0), - ] - ); - } - - #[test] - fn compile_repetition() { - assert_eq!( - build(r"a?").states(), - &[s_bin_union(1, 2), s_byte(b'a', 2), s_match(0),] - ); - assert_eq!( - build(r"a??").states(), - &[s_bin_union(2, 1), s_byte(b'a', 2), s_match(0),] - ); - } - - #[test] - fn compile_group() { - assert_eq!( - build(r"ab+").states(), - &[s_byte(b'a', 1), s_byte(b'b', 2), s_bin_union(1, 3), s_match(0)] - ); - assert_eq!( - build(r"(ab)").states(), - &[s_byte(b'a', 1), s_byte(b'b', 2), s_match(0)] - ); - assert_eq!( - build(r"(ab)+").states(), - &[s_byte(b'a', 1), s_byte(b'b', 2), s_bin_union(0, 3), s_match(0)] - ); - } - - #[test] - fn compile_alternation() { - assert_eq!( - build(r"a|b").states(), - &[s_range(b'a', b'b', 1), s_match(0)] - ); - assert_eq!( - build(r"ab|cd").states(), - &[ - s_byte(b'b', 3), - s_byte(b'd', 3), - s_sparse(&[(b'a', b'a', 0), (b'c', b'c', 1)]), - s_match(0) - ], - ); - assert_eq!( - build(r"|b").states(), - &[s_byte(b'b', 2), s_bin_union(2, 0), s_match(0)] - ); - assert_eq!( - build(r"a|").states(), - &[s_byte(b'a', 2), s_bin_union(0, 2), s_match(0)] - ); - } - - // This tests the use of a non-binary union, i.e., a state with more than - // 2 unconditional epsilon transitions. The only place they tend to appear - // is in reverse NFAs when shrinking is disabled. Otherwise, 'binary-union' - // and 'sparse' tend to cover all other cases of alternation. - #[test] - fn compile_non_binary_union() { - let nfa = NFA::compiler() - .configure( - NFA::config() - .which_captures(WhichCaptures::None) - .reverse(true) - .shrink(false) - .unanchored_prefix(false), - ) - .build(r"[\u1000\u2000\u3000]") - .unwrap(); - assert_eq!( - nfa.states(), - &[ - s_union(&[3, 6, 9]), - s_byte(0xE1, 10), - s_byte(0x80, 1), - s_byte(0x80, 2), - s_byte(0xE2, 10), - s_byte(0x80, 4), - s_byte(0x80, 5), - s_byte(0xE3, 10), - s_byte(0x80, 7), - s_byte(0x80, 8), - s_match(0), - ] - ); - } - - #[test] - fn compile_many_start_pattern() { - let nfa = NFA::compiler() - .configure( - NFA::config() - .which_captures(WhichCaptures::None) - .unanchored_prefix(false), - ) - .build_many(&["a", "b"]) - .unwrap(); - assert_eq!( - nfa.states(), - &[ - s_byte(b'a', 1), - s_match(0), - s_byte(b'b', 3), - s_match(1), - s_bin_union(0, 2), - ] - ); - assert_eq!(nfa.start_anchored().as_usize(), 4); - assert_eq!(nfa.start_unanchored().as_usize(), 4); - // Test that the start states for each individual pattern are correct. - assert_eq!(nfa.start_pattern(pid(0)).unwrap(), sid(0)); - assert_eq!(nfa.start_pattern(pid(1)).unwrap(), sid(2)); - } - - // This tests that our compiler can handle an empty character class. At the - // time of writing, the regex parser forbids it, so the only way to test it - // is to provide a hand written HIR. - #[test] - fn empty_class_bytes() { - use regex_syntax::hir::{Class, ClassBytes, Hir}; - - let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![]))); - let config = NFA::config() - .which_captures(WhichCaptures::None) - .unanchored_prefix(false); - let nfa = - NFA::compiler().configure(config).build_from_hir(&hir).unwrap(); - assert_eq!(nfa.states(), &[s_fail(), s_match(0)]); - } - - // Like empty_class_bytes, but for a Unicode class. - #[test] - fn empty_class_unicode() { - use regex_syntax::hir::{Class, ClassUnicode, Hir}; - - let hir = Hir::class(Class::Unicode(ClassUnicode::new(vec![]))); - let config = NFA::config() - .which_captures(WhichCaptures::None) - .unanchored_prefix(false); - let nfa = - NFA::compiler().configure(config).build_from_hir(&hir).unwrap(); - assert_eq!(nfa.states(), &[s_fail(), s_match(0)]); - } - - #[test] - fn compile_captures_all() { - let nfa = NFA::compiler() - .configure( - NFA::config() - .unanchored_prefix(false) - .which_captures(WhichCaptures::All), - ) - .build("a(b)c") - .unwrap(); - assert_eq!( - nfa.states(), - &[ - s_cap(1, 0, 0, 0), - s_byte(b'a', 2), - s_cap(3, 0, 1, 2), - s_byte(b'b', 4), - s_cap(5, 0, 1, 3), - s_byte(b'c', 6), - s_cap(7, 0, 0, 1), - s_match(0) - ] - ); - let ginfo = nfa.group_info(); - assert_eq!(2, ginfo.all_group_len()); - } - - #[test] - fn compile_captures_implicit() { - let nfa = NFA::compiler() - .configure( - NFA::config() - .unanchored_prefix(false) - .which_captures(WhichCaptures::Implicit), - ) - .build("a(b)c") - .unwrap(); - assert_eq!( - nfa.states(), - &[ - s_cap(1, 0, 0, 0), - s_byte(b'a', 2), - s_byte(b'b', 3), - s_byte(b'c', 4), - s_cap(5, 0, 0, 1), - s_match(0) - ] - ); - let ginfo = nfa.group_info(); - assert_eq!(1, ginfo.all_group_len()); - } - - #[test] - fn compile_captures_none() { - let nfa = NFA::compiler() - .configure( - NFA::config() - .unanchored_prefix(false) - .which_captures(WhichCaptures::None), - ) - .build("a(b)c") - .unwrap(); - assert_eq!( - nfa.states(), - &[s_byte(b'a', 1), s_byte(b'b', 2), s_byte(b'c', 3), s_match(0)] - ); - let ginfo = nfa.group_info(); - assert_eq!(0, ginfo.all_group_len()); - } -} diff --git a/vendor/regex-automata/src/nfa/thompson/error.rs b/vendor/regex-automata/src/nfa/thompson/error.rs deleted file mode 100644 index 9f884ff20e3fa0..00000000000000 --- a/vendor/regex-automata/src/nfa/thompson/error.rs +++ /dev/null @@ -1,182 +0,0 @@ -use crate::util::{ - captures, look, - primitives::{PatternID, StateID}, -}; - -/// An error that can occurred during the construction of a thompson NFA. -/// -/// This error does not provide many introspection capabilities. There are -/// generally only two things you can do with it: -/// -/// * Obtain a human readable message via its `std::fmt::Display` impl. -/// * Access an underlying [`regex_syntax::Error`] type from its `source` -/// method via the `std::error::Error` trait. This error only occurs when using -/// convenience routines for building an NFA directly from a pattern string. -/// -/// Otherwise, errors typically occur when a limit has been breached. For -/// example, if the total heap usage of the compiled NFA exceeds the limit -/// set by [`Config::nfa_size_limit`](crate::nfa::thompson::Config), then -/// building the NFA will fail. -#[derive(Clone, Debug)] -pub struct BuildError { - kind: BuildErrorKind, -} - -/// The kind of error that occurred during the construction of a thompson NFA. -#[derive(Clone, Debug)] -enum BuildErrorKind { - /// An error that occurred while parsing a regular expression. Note that - /// this error may be printed over multiple lines, and is generally - /// intended to be end user readable on its own. - #[cfg(feature = "syntax")] - Syntax(regex_syntax::Error), - /// An error that occurs if the capturing groups provided to an NFA builder - /// do not satisfy the documented invariants. For example, things like - /// too many groups, missing groups, having the first (zeroth) group be - /// named or duplicate group names within the same pattern. - Captures(captures::GroupInfoError), - /// An error that occurs when an NFA contains a Unicode word boundary, but - /// where the crate was compiled without the necessary data for dealing - /// with Unicode word boundaries. - Word(look::UnicodeWordBoundaryError), - /// An error that occurs if too many patterns were given to the NFA - /// compiler. - TooManyPatterns { - /// The number of patterns given, which exceeds the limit. - given: usize, - /// The limit on the number of patterns. - limit: usize, - }, - /// An error that occurs if too states are produced while building an NFA. - TooManyStates { - /// The minimum number of states that are desired, which exceeds the - /// limit. - given: usize, - /// The limit on the number of states. - limit: usize, - }, - /// An error that occurs when NFA compilation exceeds a configured heap - /// limit. - ExceededSizeLimit { - /// The configured limit, in bytes. - limit: usize, - }, - /// An error that occurs when an invalid capture group index is added to - /// the NFA. An "invalid" index can be one that would otherwise overflow - /// a `usize` on the current target. - InvalidCaptureIndex { - /// The invalid index that was given. - index: u32, - }, - /// An error that occurs when one tries to build a reverse NFA with - /// captures enabled. Currently, this isn't supported, but we probably - /// should support it at some point. - #[cfg(feature = "syntax")] - UnsupportedCaptures, -} - -impl BuildError { - /// If this error occurred because the NFA exceeded the configured size - /// limit before being built, then this returns the configured size limit. - /// - /// The limit returned is what was configured, and corresponds to the - /// maximum amount of heap usage in bytes. - pub fn size_limit(&self) -> Option { - match self.kind { - BuildErrorKind::ExceededSizeLimit { limit } => Some(limit), - _ => None, - } - } - - fn kind(&self) -> &BuildErrorKind { - &self.kind - } - - #[cfg(feature = "syntax")] - pub(crate) fn syntax(err: regex_syntax::Error) -> BuildError { - BuildError { kind: BuildErrorKind::Syntax(err) } - } - - pub(crate) fn captures(err: captures::GroupInfoError) -> BuildError { - BuildError { kind: BuildErrorKind::Captures(err) } - } - - pub(crate) fn word(err: look::UnicodeWordBoundaryError) -> BuildError { - BuildError { kind: BuildErrorKind::Word(err) } - } - - pub(crate) fn too_many_patterns(given: usize) -> BuildError { - let limit = PatternID::LIMIT; - BuildError { kind: BuildErrorKind::TooManyPatterns { given, limit } } - } - - pub(crate) fn too_many_states(given: usize) -> BuildError { - let limit = StateID::LIMIT; - BuildError { kind: BuildErrorKind::TooManyStates { given, limit } } - } - - pub(crate) fn exceeded_size_limit(limit: usize) -> BuildError { - BuildError { kind: BuildErrorKind::ExceededSizeLimit { limit } } - } - - pub(crate) fn invalid_capture_index(index: u32) -> BuildError { - BuildError { kind: BuildErrorKind::InvalidCaptureIndex { index } } - } - - #[cfg(feature = "syntax")] - pub(crate) fn unsupported_captures() -> BuildError { - BuildError { kind: BuildErrorKind::UnsupportedCaptures } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for BuildError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self.kind() { - #[cfg(feature = "syntax")] - BuildErrorKind::Syntax(ref err) => Some(err), - BuildErrorKind::Captures(ref err) => Some(err), - _ => None, - } - } -} - -impl core::fmt::Display for BuildError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self.kind() { - #[cfg(feature = "syntax")] - BuildErrorKind::Syntax(_) => write!(f, "error parsing regex"), - BuildErrorKind::Captures(_) => { - write!(f, "error with capture groups") - } - BuildErrorKind::Word(_) => { - write!(f, "NFA contains Unicode word boundary") - } - BuildErrorKind::TooManyPatterns { given, limit } => write!( - f, - "attempted to compile {given} patterns, \ - which exceeds the limit of {limit}", - ), - BuildErrorKind::TooManyStates { given, limit } => write!( - f, - "attempted to compile {given} NFA states, \ - which exceeds the limit of {limit}", - ), - BuildErrorKind::ExceededSizeLimit { limit } => write!( - f, - "heap usage during NFA compilation exceeded limit of {limit}", - ), - BuildErrorKind::InvalidCaptureIndex { index } => write!( - f, - "capture group index {index} is invalid \ - (too big or discontinuous)", - ), - #[cfg(feature = "syntax")] - BuildErrorKind::UnsupportedCaptures => write!( - f, - "currently captures must be disabled when compiling \ - a reverse NFA", - ), - } - } -} diff --git a/vendor/regex-automata/src/nfa/thompson/literal_trie.rs b/vendor/regex-automata/src/nfa/thompson/literal_trie.rs deleted file mode 100644 index 08793cd6dc760a..00000000000000 --- a/vendor/regex-automata/src/nfa/thompson/literal_trie.rs +++ /dev/null @@ -1,528 +0,0 @@ -use core::mem; - -use alloc::{vec, vec::Vec}; - -use crate::{ - nfa::thompson::{self, compiler::ThompsonRef, BuildError, Builder}, - util::primitives::{IteratorIndexExt, StateID}, -}; - -/// A trie that preserves leftmost-first match semantics. -/// -/// This is a purpose-built data structure for optimizing 'lit1|lit2|..|litN' -/// patterns. It can *only* handle alternations of literals, which makes it -/// somewhat restricted in its scope, but literal alternations are fairly -/// common. -/// -/// At a 5,000 foot level, the main idea of this trie is make an alternation of -/// literals look more like a DFA than an NFA via epsilon removal. -/// -/// More precisely, the main issue is in how alternations are compiled into -/// a Thompson NFA. Namely, each alternation gets a single NFA "union" state -/// with an epsilon transition for every branch of the alternation pointing to -/// an NFA state corresponding to the start of that branch. The main problem -/// with this representation is the cost of computing an epsilon closure. Once -/// you hit the alternation's start state, it acts as a sort of "clog" that -/// requires you to traverse all of the epsilon transitions to compute the full -/// closure. -/// -/// While fixing such clogs in the general case is pretty tricky without going -/// to a DFA (or perhaps a Glushkov NFA, but that comes with other problems). -/// But at least in the case of an alternation of literals, we can convert -/// that to a prefix trie without too much cost. In theory, that's all you -/// really need to do: build the trie and then compile it to a Thompson NFA. -/// For example, if you have the pattern 'bar|baz|foo', then using a trie, it -/// is transformed to something like 'b(a(r|z))|f'. This reduces the clog by -/// reducing the number of epsilon transitions out of the alternation's start -/// state from 3 to 2 (it actually gets down to 1 when you use a sparse state, -/// which we do below). It's a small effect here, but when your alternation is -/// huge, the savings is also huge. -/// -/// And that is... essentially what a LiteralTrie does. But there is one -/// hiccup. Consider a regex like 'sam|samwise'. How does a prefix trie compile -/// that when leftmost-first semantics are used? If 'sam|samwise' was the -/// entire regex, then you could just drop the 'samwise' branch entirely since -/// it is impossible to match ('sam' will always take priority, and since it -/// is a prefix of 'samwise', 'samwise' will never match). But what about the -/// regex '\b(sam|samwise)\b'? In that case, you can't remove 'samwise' because -/// it might match when 'sam' doesn't fall on a word boundary. -/// -/// The main idea is that 'sam|samwise' can be translated to 'sam(?:|wise)', -/// which is a precisely equivalent regex that also gets rid of the clog. -/// -/// Another example is 'zapper|z|zap'. That gets translated to -/// 'z(?:apper||ap)'. -/// -/// We accomplish this by giving each state in the trie multiple "chunks" of -/// transitions. Each chunk barrier represents a match. The idea is that once -/// you know a match occurs, none of the transitions after the match can be -/// re-ordered and mixed in with the transitions before the match. Otherwise, -/// the match semantics could be changed. -/// -/// See the 'State' data type for a bit more detail. -/// -/// Future work: -/// -/// * In theory, it would be nice to generalize the idea of removing clogs and -/// apply it to the NFA graph itself. Then this could in theory work for -/// case insensitive alternations of literals, or even just alternations where -/// each branch starts with a non-epsilon transition. -/// * Could we instead use the Aho-Corasick algorithm here? The aho-corasick -/// crate deals with leftmost-first matches correctly, but I think this implies -/// encoding failure transitions into a Thompson NFA somehow. Which seems fine, -/// because failure transitions are just unconditional epsilon transitions? -/// * Or perhaps even better, could we use an aho_corasick::AhoCorasick -/// directly? At time of writing, 0.7 is the current version of the -/// aho-corasick crate, and that definitely cannot be used as-is. But if we -/// expose the underlying finite state machine API, then could we use it? That -/// would be super. If we could figure that out, it might also lend itself to -/// more general composition of finite state machines. -#[derive(Clone)] -pub(crate) struct LiteralTrie { - /// The set of trie states. Each state contains one or more chunks, where - /// each chunk is a sparse set of transitions to other states. A leaf state - /// is always a match state that contains only empty chunks (i.e., no - /// transitions). - states: Vec, - /// Whether to add literals in reverse to the trie. Useful when building - /// a reverse NFA automaton. - rev: bool, -} - -impl LiteralTrie { - /// Create a new literal trie that adds literals in the forward direction. - pub(crate) fn forward() -> LiteralTrie { - let root = State::default(); - LiteralTrie { states: vec![root], rev: false } - } - - /// Create a new literal trie that adds literals in reverse. - pub(crate) fn reverse() -> LiteralTrie { - let root = State::default(); - LiteralTrie { states: vec![root], rev: true } - } - - /// Add the given literal to this trie. - /// - /// If the literal could not be added because the `StateID` space was - /// exhausted, then an error is returned. If an error returns, the trie - /// is in an unspecified state. - pub(crate) fn add(&mut self, bytes: &[u8]) -> Result<(), BuildError> { - let mut prev = StateID::ZERO; - let mut it = bytes.iter().copied(); - while let Some(b) = if self.rev { it.next_back() } else { it.next() } { - prev = self.get_or_add_state(prev, b)?; - } - self.states[prev].add_match(); - Ok(()) - } - - /// If the given transition is defined, then return the next state ID. - /// Otherwise, add the transition to `from` and point it to a new state. - /// - /// If a new state ID could not be allocated, then an error is returned. - fn get_or_add_state( - &mut self, - from: StateID, - byte: u8, - ) -> Result { - let active = self.states[from].active_chunk(); - match active.binary_search_by_key(&byte, |t| t.byte) { - Ok(i) => Ok(active[i].next), - Err(i) => { - // Add a new state and get its ID. - let next = StateID::new(self.states.len()).map_err(|_| { - BuildError::too_many_states(self.states.len()) - })?; - self.states.push(State::default()); - // Offset our position to account for all transitions and not - // just the ones in the active chunk. - let i = self.states[from].active_chunk_start() + i; - let t = Transition { byte, next }; - self.states[from].transitions.insert(i, t); - Ok(next) - } - } - } - - /// Compile this literal trie to the NFA builder given. - /// - /// This forwards any errors that may occur while using the given builder. - pub(crate) fn compile( - &self, - builder: &mut Builder, - ) -> Result { - // Compilation proceeds via depth-first traversal of the trie. - // - // This is overall pretty brutal. The recursive version of this is - // deliciously simple. (See 'compile_to_hir' below for what it might - // look like.) But recursion on a trie means your call stack grows - // in accordance with the longest literal, which just does not seem - // appropriate. So we push the call stack to the heap. But as a result, - // the trie traversal becomes pretty brutal because we essentially - // have to encode the state of a double for-loop into an explicit call - // frame. If someone can simplify this without using recursion, that'd - // be great. - - // 'end' is our match state for this trie, but represented in the the - // NFA. Any time we see a match in the trie, we insert a transition - // from the current state we're in to 'end'. - let end = builder.add_empty()?; - let mut stack = vec![]; - let mut f = Frame::new(&self.states[StateID::ZERO]); - loop { - if let Some(t) = f.transitions.next() { - if self.states[t.next].is_leaf() { - f.sparse.push(thompson::Transition { - start: t.byte, - end: t.byte, - next: end, - }); - } else { - f.sparse.push(thompson::Transition { - start: t.byte, - end: t.byte, - // This is a little funny, but when the frame we create - // below completes, it will pop this parent frame off - // and modify this transition to point to the correct - // state. - next: StateID::ZERO, - }); - stack.push(f); - f = Frame::new(&self.states[t.next]); - } - continue; - } - // At this point, we have visited all transitions in f.chunk, so - // add it as a sparse NFA state. Unless the chunk was empty, in - // which case, we don't do anything. - if !f.sparse.is_empty() { - let chunk_id = if f.sparse.len() == 1 { - builder.add_range(f.sparse.pop().unwrap())? - } else { - let sparse = mem::replace(&mut f.sparse, vec![]); - builder.add_sparse(sparse)? - }; - f.union.push(chunk_id); - } - // Now we need to look to see if there are other chunks to visit. - if let Some(chunk) = f.chunks.next() { - // If we're here, it means we're on the second (or greater) - // chunk, which implies there is a match at this point. So - // connect this state to the final end state. - f.union.push(end); - // Advance to the next chunk. - f.transitions = chunk.iter(); - continue; - } - // Now that we are out of chunks, we have completely visited - // this state. So turn our union of chunks into an NFA union - // state, and add that union state to the parent state's current - // sparse state. (If there is no parent, we're done.) - let start = builder.add_union(f.union)?; - match stack.pop() { - None => { - return Ok(ThompsonRef { start, end }); - } - Some(mut parent) => { - // OK because the only way a frame gets pushed on to the - // stack (aside from the root) is when a transition has - // been added to 'sparse'. - parent.sparse.last_mut().unwrap().next = start; - f = parent; - } - } - } - } - - /// Converts this trie to an equivalent HIR expression. - /// - /// We don't actually use this, but it's useful for tests. In particular, - /// it provides a (somewhat) human readable representation of the trie - /// itself. - #[cfg(test)] - fn compile_to_hir(&self) -> regex_syntax::hir::Hir { - self.compile_state_to_hir(StateID::ZERO) - } - - /// The recursive implementation of 'to_hir'. - /// - /// Notice how simple this is compared to 'compile' above. 'compile' could - /// be similarly simple, but we opt to not use recursion in order to avoid - /// overflowing the stack in the case of a longer literal. - #[cfg(test)] - fn compile_state_to_hir(&self, sid: StateID) -> regex_syntax::hir::Hir { - use regex_syntax::hir::Hir; - - let mut alt = vec![]; - for (i, chunk) in self.states[sid].chunks().enumerate() { - if i > 0 { - alt.push(Hir::empty()); - } - if chunk.is_empty() { - continue; - } - let mut chunk_alt = vec![]; - for t in chunk.iter() { - chunk_alt.push(Hir::concat(vec![ - Hir::literal(vec![t.byte]), - self.compile_state_to_hir(t.next), - ])); - } - alt.push(Hir::alternation(chunk_alt)); - } - Hir::alternation(alt) - } -} - -impl core::fmt::Debug for LiteralTrie { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - writeln!(f, "LiteralTrie(")?; - for (sid, state) in self.states.iter().with_state_ids() { - writeln!(f, "{:06?}: {:?}", sid.as_usize(), state)?; - } - writeln!(f, ")")?; - Ok(()) - } -} - -/// An explicit stack frame used for traversing the trie without using -/// recursion. -/// -/// Each frame is tied to the traversal of a single trie state. The frame is -/// dropped once the entire state (and all of its children) have been visited. -/// The "output" of compiling a state is the 'union' vector, which is turn -/// converted to a NFA union state. Each branch of the union corresponds to a -/// chunk in the trie state. -/// -/// 'sparse' corresponds to the set of transitions for a particular chunk in a -/// trie state. It is ultimately converted to an NFA sparse state. The 'sparse' -/// field, after being converted to a sparse NFA state, is reused for any -/// subsequent chunks in the trie state, if any exist. -#[derive(Debug)] -struct Frame<'a> { - /// The remaining chunks to visit for a trie state. - chunks: StateChunksIter<'a>, - /// The transitions of the current chunk that we're iterating over. Since - /// every trie state has at least one chunk, every frame is initialized - /// with the first chunk's transitions ready to be consumed. - transitions: core::slice::Iter<'a, Transition>, - /// The NFA state IDs pointing to the start of each chunk compiled by - /// this trie state. This ultimately gets converted to an NFA union once - /// the entire trie state (and all of its children) have been compiled. - /// The order of these matters for leftmost-first match semantics, since - /// earlier matches in the union are preferred over later ones. - union: Vec, - /// The actual NFA transitions for a single chunk in a trie state. This - /// gets converted to an NFA sparse state, and its corresponding NFA state - /// ID should get added to 'union'. - sparse: Vec, -} - -impl<'a> Frame<'a> { - /// Create a new stack frame for trie traversal. This initializes the - /// 'transitions' iterator to the transitions for the first chunk, with the - /// 'chunks' iterator being every chunk after the first one. - fn new(state: &'a State) -> Frame<'a> { - let mut chunks = state.chunks(); - // every state has at least 1 chunk - let chunk = chunks.next().unwrap(); - let transitions = chunk.iter(); - Frame { chunks, transitions, union: vec![], sparse: vec![] } - } -} - -/// A state in a trie. -/// -/// This uses a sparse representation. Since we don't use literal tries -/// for searching, and ultimately (and compilation requires visiting every -/// transition anyway), we use a sparse representation for transitions. This -/// means we save on memory, at the expense of 'LiteralTrie::add' being perhaps -/// a bit slower. -/// -/// While 'transitions' is pretty standard as far as tries goes, the 'chunks' -/// piece here is more unusual. In effect, 'chunks' defines a partitioning -/// of 'transitions', where each chunk corresponds to a distinct set of -/// transitions. The key invariant is that a transition in one chunk cannot -/// be moved to another chunk. This is the secret sauce that preserve -/// leftmost-first match semantics. -/// -/// A new chunk is added whenever we mark a state as a match state. Once a -/// new chunk is added, the old active chunk is frozen and is never mutated -/// again. The new chunk becomes the active chunk, which is defined as -/// '&transitions[chunks.last().map_or(0, |c| c.1)..]'. Thus, a state where -/// 'chunks' is empty actually contains one chunk. Thus, every state contains -/// at least one (possibly empty) chunk. -/// -/// A "leaf" state is a state that has no outgoing transitions (so -/// 'transitions' is empty). Note that there is no way for a leaf state to be a -/// non-matching state. (Although while building the trie, within 'add', a leaf -/// state may exist while not containing any matches. But this invariant is -/// only broken within 'add'. Once 'add' returns, the invariant is upheld.) -#[derive(Clone, Default)] -struct State { - transitions: Vec, - chunks: Vec<(usize, usize)>, -} - -impl State { - /// Mark this state as a match state and freeze the active chunk such that - /// it can not be further mutated. - fn add_match(&mut self) { - // This is not strictly necessary, but there's no point in recording - // another match by adding another chunk if the state has no - // transitions. Note though that we only skip this if we already know - // this is a match state, which is only true if 'chunks' is not empty. - // Basically, if we didn't do this, nothing semantically would change, - // but we'd end up pushing another chunk and potentially triggering an - // alloc. - if self.transitions.is_empty() && !self.chunks.is_empty() { - return; - } - let chunk_start = self.active_chunk_start(); - let chunk_end = self.transitions.len(); - self.chunks.push((chunk_start, chunk_end)); - } - - /// Returns true if and only if this state is a leaf state. That is, a - /// state that has no outgoing transitions. - fn is_leaf(&self) -> bool { - self.transitions.is_empty() - } - - /// Returns an iterator over all of the chunks (including the currently - /// active chunk) in this state. Since the active chunk is included, the - /// iterator is guaranteed to always yield at least one chunk (although the - /// chunk may be empty). - fn chunks(&self) -> StateChunksIter<'_> { - StateChunksIter { - transitions: &*self.transitions, - chunks: self.chunks.iter(), - active: Some(self.active_chunk()), - } - } - - /// Returns the active chunk as a slice of transitions. - fn active_chunk(&self) -> &[Transition] { - let start = self.active_chunk_start(); - &self.transitions[start..] - } - - /// Returns the index into 'transitions' where the active chunk starts. - fn active_chunk_start(&self) -> usize { - self.chunks.last().map_or(0, |&(_, end)| end) - } -} - -impl core::fmt::Debug for State { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let mut spacing = " "; - for (i, chunk) in self.chunks().enumerate() { - if i > 0 { - write!(f, "{spacing}MATCH")?; - } - spacing = ""; - for (j, t) in chunk.iter().enumerate() { - spacing = " "; - if j == 0 && i > 0 { - write!(f, " ")?; - } else if j > 0 { - write!(f, ", ")?; - } - write!(f, "{t:?}")?; - } - } - Ok(()) - } -} - -/// An iterator over all of the chunks in a state, including the active chunk. -/// -/// This iterator is created by `State::chunks`. We name this iterator so that -/// we can include it in the `Frame` type for non-recursive trie traversal. -#[derive(Debug)] -struct StateChunksIter<'a> { - transitions: &'a [Transition], - chunks: core::slice::Iter<'a, (usize, usize)>, - active: Option<&'a [Transition]>, -} - -impl<'a> Iterator for StateChunksIter<'a> { - type Item = &'a [Transition]; - - fn next(&mut self) -> Option<&'a [Transition]> { - if let Some(&(start, end)) = self.chunks.next() { - return Some(&self.transitions[start..end]); - } - if let Some(chunk) = self.active.take() { - return Some(chunk); - } - None - } -} - -/// A single transition in a trie to another state. -#[derive(Clone, Copy)] -struct Transition { - byte: u8, - next: StateID, -} - -impl core::fmt::Debug for Transition { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!( - f, - "{:?} => {}", - crate::util::escape::DebugByte(self.byte), - self.next.as_usize() - ) - } -} - -#[cfg(test)] -mod tests { - use bstr::B; - use regex_syntax::hir::Hir; - - use super::*; - - #[test] - fn zap() { - let mut trie = LiteralTrie::forward(); - trie.add(b"zapper").unwrap(); - trie.add(b"z").unwrap(); - trie.add(b"zap").unwrap(); - - let got = trie.compile_to_hir(); - let expected = Hir::concat(vec![ - Hir::literal(B("z")), - Hir::alternation(vec![ - Hir::literal(B("apper")), - Hir::empty(), - Hir::literal(B("ap")), - ]), - ]); - assert_eq!(expected, got); - } - - #[test] - fn maker() { - let mut trie = LiteralTrie::forward(); - trie.add(b"make").unwrap(); - trie.add(b"maple").unwrap(); - trie.add(b"maker").unwrap(); - - let got = trie.compile_to_hir(); - let expected = Hir::concat(vec![ - Hir::literal(B("ma")), - Hir::alternation(vec![ - Hir::concat(vec![ - Hir::literal(B("ke")), - Hir::alternation(vec![Hir::empty(), Hir::literal(B("r"))]), - ]), - Hir::literal(B("ple")), - ]), - ]); - assert_eq!(expected, got); - } -} diff --git a/vendor/regex-automata/src/nfa/thompson/map.rs b/vendor/regex-automata/src/nfa/thompson/map.rs deleted file mode 100644 index 7f074a353b93da..00000000000000 --- a/vendor/regex-automata/src/nfa/thompson/map.rs +++ /dev/null @@ -1,296 +0,0 @@ -// This module contains a couple simple and purpose built hash maps. The key -// trade off they make is that they serve as caches rather than true maps. That -// is, inserting a new entry may cause eviction of another entry. This gives -// us two things. First, there's less overhead associated with inserts and -// lookups. Secondly, it lets us control our memory usage. -// -// These maps are used in some fairly hot code when generating NFA states for -// large Unicode character classes. -// -// Instead of exposing a rich hashmap entry API, we just permit the caller to -// produce a hash of the key directly. The hash can then be reused for both -// lookups and insertions at the cost of leaking abstraction a bit. But these -// are for internal use only, so it's fine. -// -// The Utf8BoundedMap is used for Daciuk's algorithm for constructing a -// (almost) minimal DFA for large Unicode character classes in linear time. -// (Daciuk's algorithm is always used when compiling forward NFAs. For reverse -// NFAs, it's only used when the compiler is configured to 'shrink' the NFA, -// since there's a bit more expense in the reverse direction.) -// -// The Utf8SuffixMap is used when compiling large Unicode character classes for -// reverse NFAs when 'shrink' is disabled. Specifically, it augments the naive -// construction of UTF-8 automata by caching common suffixes. This doesn't -// get the same space savings as Daciuk's algorithm, but it's basically as -// fast as the naive approach and typically winds up using less memory (since -// it generates smaller NFAs) despite the presence of the cache. -// -// These maps effectively represent caching mechanisms for sparse and -// byte-range NFA states, respectively. The former represents a single NFA -// state with many transitions of equivalent priority while the latter -// represents a single NFA state with a single transition. (Neither state ever -// has or is an epsilon transition.) Thus, they have different key types. It's -// likely we could make one generic map, but the machinery didn't seem worth -// it. They are simple enough. - -use alloc::{vec, vec::Vec}; - -use crate::{ - nfa::thompson::Transition, - util::{ - int::{Usize, U64}, - primitives::StateID, - }, -}; - -// Basic FNV-1a hash constants as described in: -// https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function -const PRIME: u64 = 1099511628211; -const INIT: u64 = 14695981039346656037; - -/// A bounded hash map where the key is a sequence of NFA transitions and the -/// value is a pre-existing NFA state ID. -/// -/// std's hashmap can be used for this, however, this map has two important -/// advantages. Firstly, it has lower overhead. Secondly, it permits us to -/// control our memory usage by limited the number of slots. In general, the -/// cost here is that this map acts as a cache. That is, inserting a new entry -/// may remove an old entry. We are okay with this, since it does not impact -/// correctness in the cases where it is used. The only effect that dropping -/// states from the cache has is that the resulting NFA generated may be bigger -/// than it otherwise would be. -/// -/// This improves benchmarks that compile large Unicode character classes, -/// since it makes the generation of (almost) minimal UTF-8 automaton faster. -/// Specifically, one could observe the difference with std's hashmap via -/// something like the following benchmark: -/// -/// hyperfine "regex-cli debug thompson -qr --captures none '\w{90} ecurB'" -/// -/// But to observe that difference, you'd have to modify the code to use -/// std's hashmap. -/// -/// It is quite possible that there is a better way to approach this problem. -/// For example, if there happens to be a very common state that collides with -/// a lot of less frequent states, then we could wind up with very poor caching -/// behavior. Alas, the effectiveness of this cache has not been measured. -/// Instead, ad hoc experiments suggest that it is "good enough." Additional -/// smarts (such as an LRU eviction policy) have to be weighed against the -/// amount of extra time they cost. -#[derive(Clone, Debug)] -pub struct Utf8BoundedMap { - /// The current version of this map. Only entries with matching versions - /// are considered during lookups. If an entry is found with a mismatched - /// version, then the map behaves as if the entry does not exist. - /// - /// This makes it possible to clear the map by simply incrementing the - /// version number instead of actually deallocating any storage. - version: u16, - /// The total number of entries this map can store. - capacity: usize, - /// The actual entries, keyed by hash. Collisions between different states - /// result in the old state being dropped. - map: Vec, -} - -/// An entry in this map. -#[derive(Clone, Debug, Default)] -struct Utf8BoundedEntry { - /// The version of the map used to produce this entry. If this entry's - /// version does not match the current version of the map, then the map - /// should behave as if this entry does not exist. - version: u16, - /// The key, which is a sorted sequence of non-overlapping NFA transitions. - key: Vec, - /// The state ID corresponding to the state containing the transitions in - /// this entry. - val: StateID, -} - -impl Utf8BoundedMap { - /// Create a new bounded map with the given capacity. The map will never - /// grow beyond the given size. - /// - /// Note that this does not allocate. Instead, callers must call `clear` - /// before using this map. `clear` will allocate space if necessary. - /// - /// This avoids the need to pay for the allocation of this map when - /// compiling regexes that lack large Unicode character classes. - pub fn new(capacity: usize) -> Utf8BoundedMap { - assert!(capacity > 0); - Utf8BoundedMap { version: 0, capacity, map: vec![] } - } - - /// Clear this map of all entries, but permit the reuse of allocation - /// if possible. - /// - /// This must be called before the map can be used. - pub fn clear(&mut self) { - if self.map.is_empty() { - self.map = vec![Utf8BoundedEntry::default(); self.capacity]; - } else { - self.version = self.version.wrapping_add(1); - // If we loop back to version 0, then we forcefully clear the - // entire map. Otherwise, it might be possible to incorrectly - // match entries used to generate other NFAs. - if self.version == 0 { - self.map = vec![Utf8BoundedEntry::default(); self.capacity]; - } - } - } - - /// Return a hash of the given transitions. - pub fn hash(&self, key: &[Transition]) -> usize { - let mut h = INIT; - for t in key { - h = (h ^ u64::from(t.start)).wrapping_mul(PRIME); - h = (h ^ u64::from(t.end)).wrapping_mul(PRIME); - h = (h ^ t.next.as_u64()).wrapping_mul(PRIME); - } - (h % self.map.len().as_u64()).as_usize() - } - - /// Retrieve the cached state ID corresponding to the given key. The hash - /// given must have been computed with `hash` using the same key value. - /// - /// If there is no cached state with the given transitions, then None is - /// returned. - pub fn get(&mut self, key: &[Transition], hash: usize) -> Option { - let entry = &self.map[hash]; - if entry.version != self.version { - return None; - } - // There may be a hash collision, so we need to confirm real equality. - if entry.key != key { - return None; - } - Some(entry.val) - } - - /// Add a cached state to this map with the given key. Callers should - /// ensure that `state_id` points to a state that contains precisely the - /// NFA transitions given. - /// - /// `hash` must have been computed using the `hash` method with the same - /// key. - pub fn set( - &mut self, - key: Vec, - hash: usize, - state_id: StateID, - ) { - self.map[hash] = - Utf8BoundedEntry { version: self.version, key, val: state_id }; - } -} - -/// A cache of suffixes used to modestly compress UTF-8 automata for large -/// Unicode character classes. -#[derive(Clone, Debug)] -pub struct Utf8SuffixMap { - /// The current version of this map. Only entries with matching versions - /// are considered during lookups. If an entry is found with a mismatched - /// version, then the map behaves as if the entry does not exist. - version: u16, - /// The total number of entries this map can store. - capacity: usize, - /// The actual entries, keyed by hash. Collisions between different states - /// result in the old state being dropped. - map: Vec, -} - -/// A key that uniquely identifies an NFA state. It is a triple that represents -/// a transition from one state for a particular byte range. -#[derive(Clone, Debug, Default, Eq, PartialEq)] -pub struct Utf8SuffixKey { - pub from: StateID, - pub start: u8, - pub end: u8, -} - -/// An entry in this map. -#[derive(Clone, Debug, Default)] -struct Utf8SuffixEntry { - /// The version of the map used to produce this entry. If this entry's - /// version does not match the current version of the map, then the map - /// should behave as if this entry does not exist. - version: u16, - /// The key, which consists of a transition in a particular state. - key: Utf8SuffixKey, - /// The identifier that the transition in the key maps to. - val: StateID, -} - -impl Utf8SuffixMap { - /// Create a new bounded map with the given capacity. The map will never - /// grow beyond the given size. - /// - /// Note that this does not allocate. Instead, callers must call `clear` - /// before using this map. `clear` will allocate space if necessary. - /// - /// This avoids the need to pay for the allocation of this map when - /// compiling regexes that lack large Unicode character classes. - pub fn new(capacity: usize) -> Utf8SuffixMap { - assert!(capacity > 0); - Utf8SuffixMap { version: 0, capacity, map: vec![] } - } - - /// Clear this map of all entries, but permit the reuse of allocation - /// if possible. - /// - /// This must be called before the map can be used. - pub fn clear(&mut self) { - if self.map.is_empty() { - self.map = vec![Utf8SuffixEntry::default(); self.capacity]; - } else { - self.version = self.version.wrapping_add(1); - if self.version == 0 { - self.map = vec![Utf8SuffixEntry::default(); self.capacity]; - } - } - } - - /// Return a hash of the given transition. - pub fn hash(&self, key: &Utf8SuffixKey) -> usize { - // Basic FNV-1a hash as described: - // https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function - const PRIME: u64 = 1099511628211; - const INIT: u64 = 14695981039346656037; - - let mut h = INIT; - h = (h ^ key.from.as_u64()).wrapping_mul(PRIME); - h = (h ^ u64::from(key.start)).wrapping_mul(PRIME); - h = (h ^ u64::from(key.end)).wrapping_mul(PRIME); - (h % self.map.len().as_u64()).as_usize() - } - - /// Retrieve the cached state ID corresponding to the given key. The hash - /// given must have been computed with `hash` using the same key value. - /// - /// If there is no cached state with the given key, then None is returned. - pub fn get( - &mut self, - key: &Utf8SuffixKey, - hash: usize, - ) -> Option { - let entry = &self.map[hash]; - if entry.version != self.version { - return None; - } - if key != &entry.key { - return None; - } - Some(entry.val) - } - - /// Add a cached state to this map with the given key. Callers should - /// ensure that `state_id` points to a state that contains precisely the - /// NFA transition given. - /// - /// `hash` must have been computed using the `hash` method with the same - /// key. - pub fn set(&mut self, key: Utf8SuffixKey, hash: usize, state_id: StateID) { - self.map[hash] = - Utf8SuffixEntry { version: self.version, key, val: state_id }; - } -} diff --git a/vendor/regex-automata/src/nfa/thompson/mod.rs b/vendor/regex-automata/src/nfa/thompson/mod.rs deleted file mode 100644 index dc7effef1df36b..00000000000000 --- a/vendor/regex-automata/src/nfa/thompson/mod.rs +++ /dev/null @@ -1,81 +0,0 @@ -/*! -Defines a Thompson NFA and provides the [`PikeVM`](pikevm::PikeVM) and -[`BoundedBacktracker`](backtrack::BoundedBacktracker) regex engines. - -A Thompson NFA (non-deterministic finite automaton) is arguably _the_ central -data type in this library. It is the result of what is commonly referred to as -"regex compilation." That is, turning a regex pattern from its concrete syntax -string into something that can run a search looks roughly like this: - -* A `&str` is parsed into a [`regex-syntax::ast::Ast`](regex_syntax::ast::Ast). -* An `Ast` is translated into a [`regex-syntax::hir::Hir`](regex_syntax::hir::Hir). -* An `Hir` is compiled into a [`NFA`]. -* The `NFA` is then used to build one of a few different regex engines: - * An `NFA` is used directly in the `PikeVM` and `BoundedBacktracker` engines. - * An `NFA` is used by a [hybrid NFA/DFA](crate::hybrid) to build out a DFA's - transition table at search time. - * An `NFA`, assuming it is one-pass, is used to build a full - [one-pass DFA](crate::dfa::onepass) ahead of time. - * An `NFA` is used to build a [full DFA](crate::dfa) ahead of time. - -The [`meta`](crate::meta) regex engine makes all of these choices for you based -on various criteria. However, if you have a lower level use case, _you_ can -build any of the above regex engines and use them directly. But you must start -here by building an `NFA`. - -# Details - -It is perhaps worth expanding a bit more on what it means to go through the -`&str`->`Ast`->`Hir`->`NFA` process. - -* Parsing a string into an `Ast` gives it a structured representation. -Crucially, the size and amount of work done in this step is proportional to the -size of the original string. No optimization or Unicode handling is done at -this point. This means that parsing into an `Ast` has very predictable costs. -Moreover, an `Ast` can be round-tripped back to its original pattern string as -written. -* Translating an `Ast` into an `Hir` is a process by which the structured -representation is simplified down to its most fundamental components. -Translation deals with flags such as case insensitivity by converting things -like `(?i:a)` to `[Aa]`. Translation is also where Unicode tables are consulted -to resolve things like `\p{Emoji}` and `\p{Greek}`. It also flattens each -character class, regardless of how deeply nested it is, into a single sequence -of non-overlapping ranges. All the various literal forms are thrown out in -favor of one common representation. Overall, the `Hir` is small enough to fit -into your head and makes analysis and other tasks much simpler. -* Compiling an `Hir` into an `NFA` formulates the regex into a finite state -machine whose transitions are defined over bytes. For example, an `Hir` might -have a Unicode character class corresponding to a sequence of ranges defined -in terms of `char`. Compilation is then responsible for turning those ranges -into a UTF-8 automaton. That is, an automaton that matches the UTF-8 encoding -of just the codepoints specified by those ranges. Otherwise, the main job of -an `NFA` is to serve as a byte-code of sorts for a virtual machine. It can be -seen as a sequence of instructions for how to match a regex. -*/ - -#[cfg(feature = "nfa-backtrack")] -pub mod backtrack; -mod builder; -#[cfg(feature = "syntax")] -mod compiler; -mod error; -#[cfg(feature = "syntax")] -mod literal_trie; -#[cfg(feature = "syntax")] -mod map; -mod nfa; -#[cfg(feature = "nfa-pikevm")] -pub mod pikevm; -#[cfg(feature = "syntax")] -mod range_trie; - -pub use self::{ - builder::Builder, - error::BuildError, - nfa::{ - DenseTransitions, PatternIter, SparseTransitions, State, Transition, - NFA, - }, -}; -#[cfg(feature = "syntax")] -pub use compiler::{Compiler, Config, WhichCaptures}; diff --git a/vendor/regex-automata/src/nfa/thompson/nfa.rs b/vendor/regex-automata/src/nfa/thompson/nfa.rs deleted file mode 100644 index 405aa7533d4936..00000000000000 --- a/vendor/regex-automata/src/nfa/thompson/nfa.rs +++ /dev/null @@ -1,2098 +0,0 @@ -use core::{fmt, mem}; - -use alloc::{boxed::Box, format, string::String, sync::Arc, vec, vec::Vec}; - -#[cfg(feature = "syntax")] -use crate::nfa::thompson::{ - compiler::{Compiler, Config}, - error::BuildError, -}; -use crate::{ - nfa::thompson::builder::Builder, - util::{ - alphabet::{self, ByteClassSet, ByteClasses}, - captures::{GroupInfo, GroupInfoError}, - look::{Look, LookMatcher, LookSet}, - primitives::{ - IteratorIndexExt, PatternID, PatternIDIter, SmallIndex, StateID, - }, - sparse_set::SparseSet, - }, -}; - -/// A byte oriented Thompson non-deterministic finite automaton (NFA). -/// -/// A Thompson NFA is a finite state machine that permits unconditional epsilon -/// transitions, but guarantees that there exists at most one non-epsilon -/// transition for each element in the alphabet for each state. -/// -/// An NFA may be used directly for searching, for analysis or to build -/// a deterministic finite automaton (DFA). -/// -/// # Cheap clones -/// -/// Since an NFA is a core data type in this crate that many other regex -/// engines are based on top of, it is convenient to give ownership of an NFA -/// to said regex engines. Because of this, an NFA uses reference counting -/// internally. Therefore, it is cheap to clone and it is encouraged to do so. -/// -/// # Capabilities -/// -/// Using an NFA for searching via the -/// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) provides the most amount -/// of "power" of any regex engine in this crate. Namely, it supports the -/// following in all cases: -/// -/// 1. Detection of a match. -/// 2. Location of a match, including both the start and end offset, in a -/// single pass of the haystack. -/// 3. Location of matching capturing groups. -/// 4. Handles multiple patterns, including (1)-(3) when multiple patterns are -/// present. -/// -/// # Capturing Groups -/// -/// Groups refer to parenthesized expressions inside a regex pattern. They look -/// like this, where `exp` is an arbitrary regex: -/// -/// * `(exp)` - An unnamed capturing group. -/// * `(?Pexp)` or `(?exp)` - A named capturing group. -/// * `(?:exp)` - A non-capturing group. -/// * `(?i:exp)` - A non-capturing group that sets flags. -/// -/// Only the first two forms are said to be _capturing_. Capturing -/// means that the last position at which they match is reportable. The -/// [`Captures`](crate::util::captures::Captures) type provides convenient -/// access to the match positions of capturing groups, which includes looking -/// up capturing groups by their name. -/// -/// # Byte oriented -/// -/// This NFA is byte oriented, which means that all of its transitions are -/// defined on bytes. In other words, the alphabet of an NFA consists of the -/// 256 different byte values. -/// -/// While DFAs nearly demand that they be byte oriented for performance -/// reasons, an NFA could conceivably be *Unicode codepoint* oriented. Indeed, -/// a previous version of this NFA supported both byte and codepoint oriented -/// modes. A codepoint oriented mode can work because an NFA fundamentally uses -/// a sparse representation of transitions, which works well with the large -/// sparse space of Unicode codepoints. -/// -/// Nevertheless, this NFA is only byte oriented. This choice is primarily -/// driven by implementation simplicity, and also in part memory usage. In -/// practice, performance between the two is roughly comparable. However, -/// building a DFA (including a hybrid DFA) really wants a byte oriented NFA. -/// So if we do have a codepoint oriented NFA, then we also need to generate -/// byte oriented NFA in order to build an hybrid NFA/DFA. Thus, by only -/// generating byte oriented NFAs, we can produce one less NFA. In other words, -/// if we made our NFA codepoint oriented, we'd need to *also* make it support -/// a byte oriented mode, which is more complicated. But a byte oriented mode -/// can support everything. -/// -/// # Differences with DFAs -/// -/// At the theoretical level, the precise difference between an NFA and a DFA -/// is that, in a DFA, for every state, an input symbol unambiguously refers -/// to a single transition _and_ that an input symbol is required for each -/// transition. At a practical level, this permits DFA implementations to be -/// implemented at their core with a small constant number of CPU instructions -/// for each byte of input searched. In practice, this makes them quite a bit -/// faster than NFAs _in general_. Namely, in order to execute a search for any -/// Thompson NFA, one needs to keep track of a _set_ of states, and execute -/// the possible transitions on all of those states for each input symbol. -/// Overall, this results in much more overhead. To a first approximation, one -/// can expect DFA searches to be about an order of magnitude faster. -/// -/// So why use an NFA at all? The main advantage of an NFA is that it takes -/// linear time (in the size of the pattern string after repetitions have been -/// expanded) to build and linear memory usage. A DFA, on the other hand, may -/// take exponential time and/or space to build. Even in non-pathological -/// cases, DFAs often take quite a bit more memory than their NFA counterparts, -/// _especially_ if large Unicode character classes are involved. Of course, -/// an NFA also provides additional capabilities. For example, it can match -/// Unicode word boundaries on non-ASCII text and resolve the positions of -/// capturing groups. -/// -/// Note that a [`hybrid::regex::Regex`](crate::hybrid::regex::Regex) strikes a -/// good balance between an NFA and a DFA. It avoids the exponential build time -/// of a DFA while maintaining its fast search time. The downside of a hybrid -/// NFA/DFA is that in some cases it can be slower at search time than the NFA. -/// (It also has less functionality than a pure NFA. It cannot handle Unicode -/// word boundaries on non-ASCII text and cannot resolve capturing groups.) -/// -/// # Example -/// -/// This shows how to build an NFA with the default configuration and execute a -/// search using the Pike VM. -/// -/// ``` -/// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; -/// -/// let re = PikeVM::new(r"foo[0-9]+")?; -/// let mut cache = re.create_cache(); -/// let mut caps = re.create_captures(); -/// -/// let expected = Some(Match::must(0, 0..8)); -/// re.captures(&mut cache, b"foo12345", &mut caps); -/// assert_eq!(expected, caps.get_match()); -/// -/// # Ok::<(), Box>(()) -/// ``` -/// -/// # Example: resolving capturing groups -/// -/// This example shows how to parse some simple dates and extract the -/// components of each date via capturing groups. -/// -/// ``` -/// # if cfg!(miri) { return Ok(()); } // miri takes too long -/// use regex_automata::{ -/// nfa::thompson::pikevm::PikeVM, -/// util::captures::Captures, -/// }; -/// -/// let vm = PikeVM::new(r"(?P\d{4})-(?P\d{2})-(?P\d{2})")?; -/// let mut cache = vm.create_cache(); -/// -/// let haystack = "2012-03-14, 2013-01-01 and 2014-07-05"; -/// let all: Vec = vm.captures_iter( -/// &mut cache, haystack.as_bytes() -/// ).collect(); -/// // There should be a total of 3 matches. -/// assert_eq!(3, all.len()); -/// // The year from the second match is '2013'. -/// let span = all[1].get_group_by_name("y").unwrap(); -/// assert_eq!("2013", &haystack[span]); -/// -/// # Ok::<(), Box>(()) -/// ``` -/// -/// This example shows that only the last match of a capturing group is -/// reported, even if it had to match multiple times for an overall match -/// to occur. -/// -/// ``` -/// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; -/// -/// let re = PikeVM::new(r"([a-z]){4}")?; -/// let mut cache = re.create_cache(); -/// let mut caps = re.create_captures(); -/// -/// let haystack = b"quux"; -/// re.captures(&mut cache, haystack, &mut caps); -/// assert!(caps.is_match()); -/// assert_eq!(Some(Span::from(3..4)), caps.get_group(1)); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone)] -pub struct NFA( - // We make NFAs reference counted primarily for two reasons. First is that - // the NFA type itself is quite large (at least 0.5KB), and so it makes - // sense to put it on the heap by default anyway. Second is that, for Arc - // specifically, this enables cheap clones. This tends to be useful because - // several structures (the backtracker, the Pike VM, the hybrid NFA/DFA) - // all want to hang on to an NFA for use during search time. We could - // provide the NFA at search time via a function argument, but this makes - // for an unnecessarily annoying API. Instead, we just let each structure - // share ownership of the NFA. Using a deep clone would not be smart, since - // the NFA can use quite a bit of heap space. - Arc, -); - -impl NFA { - /// Parse the given regular expression using a default configuration and - /// build an NFA from it. - /// - /// If you want a non-default configuration, then use the NFA - /// [`Compiler`] with a [`Config`]. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; - /// - /// let re = PikeVM::new(r"foo[0-9]+")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// let expected = Some(Match::must(0, 0..8)); - /// re.captures(&mut cache, b"foo12345", &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn new(pattern: &str) -> Result { - NFA::compiler().build(pattern) - } - - /// Parse the given regular expressions using a default configuration and - /// build a multi-NFA from them. - /// - /// If you want a non-default configuration, then use the NFA - /// [`Compiler`] with a [`Config`]. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; - /// - /// let re = PikeVM::new_many(&["[0-9]+", "[a-z]+"])?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// let expected = Some(Match::must(1, 0..3)); - /// re.captures(&mut cache, b"foo12345bar", &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn new_many>(patterns: &[P]) -> Result { - NFA::compiler().build_many(patterns) - } - - /// Returns an NFA with a single regex pattern that always matches at every - /// position. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; - /// - /// let re = PikeVM::new_from_nfa(NFA::always_match())?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// let expected = Some(Match::must(0, 0..0)); - /// re.captures(&mut cache, b"", &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// re.captures(&mut cache, b"foo", &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn always_match() -> NFA { - // We could use NFA::new("") here and we'd get the same semantics, but - // hand-assembling the NFA (as below) does the same thing with a fewer - // number of states. It also avoids needing the 'syntax' feature - // enabled. - // - // Technically all we need is the "match" state, but we add the - // "capture" states so that the PikeVM can use this NFA. - // - // The unwraps below are OK because we add so few states that they will - // never exhaust any default limits in any environment. - let mut builder = Builder::new(); - let pid = builder.start_pattern().unwrap(); - assert_eq!(pid.as_usize(), 0); - let start_id = - builder.add_capture_start(StateID::ZERO, 0, None).unwrap(); - let end_id = builder.add_capture_end(StateID::ZERO, 0).unwrap(); - let match_id = builder.add_match().unwrap(); - builder.patch(start_id, end_id).unwrap(); - builder.patch(end_id, match_id).unwrap(); - let pid = builder.finish_pattern(start_id).unwrap(); - assert_eq!(pid.as_usize(), 0); - builder.build(start_id, start_id).unwrap() - } - - /// Returns an NFA that never matches at any position. - /// - /// This is a convenience routine for creating an NFA with zero patterns. - /// - /// # Example - /// - /// ``` - /// use regex_automata::nfa::thompson::{NFA, pikevm::PikeVM}; - /// - /// let re = PikeVM::new_from_nfa(NFA::never_match())?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// re.captures(&mut cache, b"", &mut caps); - /// assert!(!caps.is_match()); - /// re.captures(&mut cache, b"foo", &mut caps); - /// assert!(!caps.is_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn never_match() -> NFA { - // This always succeeds because it only requires one NFA state, which - // will never exhaust any (default) limits. - let mut builder = Builder::new(); - let sid = builder.add_fail().unwrap(); - builder.build(sid, sid).unwrap() - } - - /// Return a default configuration for an `NFA`. - /// - /// This is a convenience routine to avoid needing to import the `Config` - /// type when customizing the construction of an NFA. - /// - /// # Example - /// - /// This example shows how to build an NFA with a small size limit that - /// results in a compilation error for any regex that tries to use more - /// heap memory than the configured limit. - /// - /// ``` - /// use regex_automata::nfa::thompson::{NFA, pikevm::PikeVM}; - /// - /// let result = PikeVM::builder() - /// .thompson(NFA::config().nfa_size_limit(Some(1_000))) - /// // Remember, \w is Unicode-aware by default and thus huge. - /// .build(r"\w+"); - /// assert!(result.is_err()); - /// ``` - #[cfg(feature = "syntax")] - pub fn config() -> Config { - Config::new() - } - - /// Return a compiler for configuring the construction of an `NFA`. - /// - /// This is a convenience routine to avoid needing to import the - /// [`Compiler`] type in common cases. - /// - /// # Example - /// - /// This example shows how to build an NFA that is permitted match invalid - /// UTF-8. Without the additional syntax configuration here, compilation of - /// `(?-u:.)` would fail because it is permitted to match invalid UTF-8. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::pikevm::PikeVM, - /// util::syntax, - /// Match, - /// }; - /// - /// let re = PikeVM::builder() - /// .syntax(syntax::Config::new().utf8(false)) - /// .build(r"[a-z]+(?-u:.)")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// let expected = Some(Match::must(0, 1..5)); - /// re.captures(&mut cache, b"\xFFabc\xFF", &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn compiler() -> Compiler { - Compiler::new() - } - - /// Returns an iterator over all pattern identifiers in this NFA. - /// - /// Pattern IDs are allocated in sequential order starting from zero, - /// where the order corresponds to the order of patterns provided to the - /// [`NFA::new_many`] constructor. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{nfa::thompson::NFA, PatternID}; - /// - /// let nfa = NFA::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; - /// let pids: Vec = nfa.patterns().collect(); - /// assert_eq!(pids, vec![ - /// PatternID::must(0), - /// PatternID::must(1), - /// PatternID::must(2), - /// ]); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn patterns(&self) -> PatternIter<'_> { - PatternIter { - it: PatternID::iter(self.pattern_len()), - _marker: core::marker::PhantomData, - } - } - - /// Returns the total number of regex patterns in this NFA. - /// - /// This may return zero if the NFA was constructed with no patterns. In - /// this case, the NFA can never produce a match for any input. - /// - /// This is guaranteed to be no bigger than [`PatternID::LIMIT`] because - /// NFA construction will fail if too many patterns are added. - /// - /// It is always true that `nfa.patterns().count() == nfa.pattern_len()`. - /// - /// # Example - /// - /// ``` - /// use regex_automata::nfa::thompson::NFA; - /// - /// let nfa = NFA::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; - /// assert_eq!(3, nfa.pattern_len()); - /// - /// let nfa = NFA::never_match(); - /// assert_eq!(0, nfa.pattern_len()); - /// - /// let nfa = NFA::always_match(); - /// assert_eq!(1, nfa.pattern_len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn pattern_len(&self) -> usize { - self.0.start_pattern.len() - } - - /// Return the state identifier of the initial anchored state of this NFA. - /// - /// The returned identifier is guaranteed to be a valid index into the - /// slice returned by [`NFA::states`], and is also a valid argument to - /// [`NFA::state`]. - /// - /// # Example - /// - /// This example shows a somewhat contrived example where we can easily - /// predict the anchored starting state. - /// - /// ``` - /// use regex_automata::nfa::thompson::{NFA, State, WhichCaptures}; - /// - /// let nfa = NFA::compiler() - /// .configure(NFA::config().which_captures(WhichCaptures::None)) - /// .build("a")?; - /// let state = nfa.state(nfa.start_anchored()); - /// match *state { - /// State::ByteRange { trans } => { - /// assert_eq!(b'a', trans.start); - /// assert_eq!(b'a', trans.end); - /// } - /// _ => unreachable!("unexpected state"), - /// } - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn start_anchored(&self) -> StateID { - self.0.start_anchored - } - - /// Return the state identifier of the initial unanchored state of this - /// NFA. - /// - /// This is equivalent to the identifier returned by - /// [`NFA::start_anchored`] when the NFA has no unanchored starting state. - /// - /// The returned identifier is guaranteed to be a valid index into the - /// slice returned by [`NFA::states`], and is also a valid argument to - /// [`NFA::state`]. - /// - /// # Example - /// - /// This example shows that the anchored and unanchored starting states - /// are equivalent when an anchored NFA is built. - /// - /// ``` - /// use regex_automata::nfa::thompson::NFA; - /// - /// let nfa = NFA::new("^a")?; - /// assert_eq!(nfa.start_anchored(), nfa.start_unanchored()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn start_unanchored(&self) -> StateID { - self.0.start_unanchored - } - - /// Return the state identifier of the initial anchored state for the given - /// pattern, or `None` if there is no pattern corresponding to the given - /// identifier. - /// - /// If one uses the starting state for a particular pattern, then the only - /// match that can be returned is for the corresponding pattern. - /// - /// The returned identifier is guaranteed to be a valid index into the - /// slice returned by [`NFA::states`], and is also a valid argument to - /// [`NFA::state`]. - /// - /// # Errors - /// - /// If the pattern doesn't exist in this NFA, then this returns an error. - /// This occurs when `pid.as_usize() >= nfa.pattern_len()`. - /// - /// # Example - /// - /// This example shows that the anchored and unanchored starting states - /// are equivalent when an anchored NFA is built. - /// - /// ``` - /// use regex_automata::{nfa::thompson::NFA, PatternID}; - /// - /// let nfa = NFA::new_many(&["^a", "^b"])?; - /// // The anchored and unanchored states for the entire NFA are the same, - /// // since all of the patterns are anchored. - /// assert_eq!(nfa.start_anchored(), nfa.start_unanchored()); - /// // But the anchored starting states for each pattern are distinct, - /// // because these starting states can only lead to matches for the - /// // corresponding pattern. - /// let anchored = Some(nfa.start_anchored()); - /// assert_ne!(anchored, nfa.start_pattern(PatternID::must(0))); - /// assert_ne!(anchored, nfa.start_pattern(PatternID::must(1))); - /// // Requesting a pattern not in the NFA will result in None: - /// assert_eq!(None, nfa.start_pattern(PatternID::must(2))); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn start_pattern(&self, pid: PatternID) -> Option { - self.0.start_pattern.get(pid.as_usize()).copied() - } - - /// Get the byte class set for this NFA. - /// - /// A byte class set is a partitioning of this NFA's alphabet into - /// equivalence classes. Any two bytes in the same equivalence class are - /// guaranteed to never discriminate between a match or a non-match. (The - /// partitioning may not be minimal.) - /// - /// Byte classes are used internally by this crate when building DFAs. - /// Namely, among other optimizations, they enable a space optimization - /// where the DFA's internal alphabet is defined over the equivalence - /// classes of bytes instead of all possible byte values. The former is - /// often quite a bit smaller than the latter, which permits the DFA to use - /// less space for its transition table. - #[inline] - pub(crate) fn byte_class_set(&self) -> &ByteClassSet { - &self.0.byte_class_set - } - - /// Get the byte classes for this NFA. - /// - /// Byte classes represent a partitioning of this NFA's alphabet into - /// equivalence classes. Any two bytes in the same equivalence class are - /// guaranteed to never discriminate between a match or a non-match. (The - /// partitioning may not be minimal.) - /// - /// Byte classes are used internally by this crate when building DFAs. - /// Namely, among other optimizations, they enable a space optimization - /// where the DFA's internal alphabet is defined over the equivalence - /// classes of bytes instead of all possible byte values. The former is - /// often quite a bit smaller than the latter, which permits the DFA to use - /// less space for its transition table. - /// - /// # Example - /// - /// This example shows how to query the class of various bytes. - /// - /// ``` - /// use regex_automata::nfa::thompson::NFA; - /// - /// let nfa = NFA::new("[a-z]+")?; - /// let classes = nfa.byte_classes(); - /// // 'a' and 'z' are in the same class for this regex. - /// assert_eq!(classes.get(b'a'), classes.get(b'z')); - /// // But 'a' and 'A' are not. - /// assert_ne!(classes.get(b'a'), classes.get(b'A')); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn byte_classes(&self) -> &ByteClasses { - &self.0.byte_classes - } - - /// Return a reference to the NFA state corresponding to the given ID. - /// - /// This is a convenience routine for `nfa.states()[id]`. - /// - /// # Panics - /// - /// This panics when the given identifier does not reference a valid state. - /// That is, when `id.as_usize() >= nfa.states().len()`. - /// - /// # Example - /// - /// The anchored state for a pattern will typically correspond to a - /// capturing state for that pattern. (Although, this is not an API - /// guarantee!) - /// - /// ``` - /// use regex_automata::{nfa::thompson::{NFA, State}, PatternID}; - /// - /// let nfa = NFA::new("a")?; - /// let state = nfa.state(nfa.start_pattern(PatternID::ZERO).unwrap()); - /// match *state { - /// State::Capture { slot, .. } => { - /// assert_eq!(0, slot.as_usize()); - /// } - /// _ => unreachable!("unexpected state"), - /// } - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn state(&self, id: StateID) -> &State { - &self.states()[id] - } - - /// Returns a slice of all states in this NFA. - /// - /// The slice returned is indexed by `StateID`. This provides a convenient - /// way to access states while following transitions among those states. - /// - /// # Example - /// - /// This demonstrates that disabling UTF-8 mode can shrink the size of the - /// NFA considerably in some cases, especially when using Unicode character - /// classes. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::nfa::thompson::NFA; - /// - /// let nfa_unicode = NFA::new(r"\w")?; - /// let nfa_ascii = NFA::new(r"(?-u)\w")?; - /// // Yes, a factor of 45 difference. No lie. - /// assert!(40 * nfa_ascii.states().len() < nfa_unicode.states().len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn states(&self) -> &[State] { - &self.0.states - } - - /// Returns the capturing group info for this NFA. - /// - /// The [`GroupInfo`] provides a way to map to and from capture index - /// and capture name for each pattern. It also provides a mapping from - /// each of the capturing groups in every pattern to their corresponding - /// slot offsets encoded in [`State::Capture`] states. - /// - /// Note that `GroupInfo` uses reference counting internally, such that - /// cloning a `GroupInfo` is very cheap. - /// - /// # Example - /// - /// This example shows how to get a list of all capture group names for - /// a particular pattern. - /// - /// ``` - /// use regex_automata::{nfa::thompson::NFA, PatternID}; - /// - /// let nfa = NFA::new(r"(a)(?Pb)(c)(d)(?Pe)")?; - /// // The first is the implicit group that is always unnamed. The next - /// // 5 groups are the explicit groups found in the concrete syntax above. - /// let expected = vec![None, None, Some("foo"), None, None, Some("bar")]; - /// let got: Vec> = - /// nfa.group_info().pattern_names(PatternID::ZERO).collect(); - /// assert_eq!(expected, got); - /// - /// // Using an invalid pattern ID will result in nothing yielded. - /// let got = nfa.group_info().pattern_names(PatternID::must(999)).count(); - /// assert_eq!(0, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn group_info(&self) -> &GroupInfo { - &self.0.group_info() - } - - /// Returns true if and only if this NFA has at least one - /// [`Capture`](State::Capture) in its sequence of states. - /// - /// This is useful as a way to perform a quick test before attempting - /// something that does or does not require capture states. For example, - /// some regex engines (like the PikeVM) require capture states in order to - /// work at all. - /// - /// # Example - /// - /// This example shows a few different NFAs and whether they have captures - /// or not. - /// - /// ``` - /// use regex_automata::nfa::thompson::{NFA, WhichCaptures}; - /// - /// // Obviously has capture states. - /// let nfa = NFA::new("(a)")?; - /// assert!(nfa.has_capture()); - /// - /// // Less obviously has capture states, because every pattern has at - /// // least one anonymous capture group corresponding to the match for the - /// // entire pattern. - /// let nfa = NFA::new("a")?; - /// assert!(nfa.has_capture()); - /// - /// // Other than hand building your own NFA, this is the only way to build - /// // an NFA without capturing groups. In general, you should only do this - /// // if you don't intend to use any of the NFA-oriented regex engines. - /// // Overall, capturing groups don't have many downsides. Although they - /// // can add a bit of noise to simple NFAs, so it can be nice to disable - /// // them for debugging purposes. - /// // - /// // Notice that 'has_capture' is false here even when we have an - /// // explicit capture group in the pattern. - /// let nfa = NFA::compiler() - /// .configure(NFA::config().which_captures(WhichCaptures::None)) - /// .build("(a)")?; - /// assert!(!nfa.has_capture()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn has_capture(&self) -> bool { - self.0.has_capture - } - - /// Returns true if and only if this NFA can match the empty string. - /// When it returns false, all possible matches are guaranteed to have a - /// non-zero length. - /// - /// This is useful as cheap way to know whether code needs to handle the - /// case of a zero length match. This is particularly important when UTF-8 - /// modes are enabled, as when UTF-8 mode is enabled, empty matches that - /// split a codepoint must never be reported. This extra handling can - /// sometimes be costly, and since regexes matching an empty string are - /// somewhat rare, it can be beneficial to treat such regexes specially. - /// - /// # Example - /// - /// This example shows a few different NFAs and whether they match the - /// empty string or not. Notice the empty string isn't merely a matter - /// of a string of length literally `0`, but rather, whether a match can - /// occur between specific pairs of bytes. - /// - /// ``` - /// use regex_automata::{nfa::thompson::NFA, util::syntax}; - /// - /// // The empty regex matches the empty string. - /// let nfa = NFA::new("")?; - /// assert!(nfa.has_empty(), "empty matches empty"); - /// // The '+' repetition operator requires at least one match, and so - /// // does not match the empty string. - /// let nfa = NFA::new("a+")?; - /// assert!(!nfa.has_empty(), "+ does not match empty"); - /// // But the '*' repetition operator does. - /// let nfa = NFA::new("a*")?; - /// assert!(nfa.has_empty(), "* does match empty"); - /// // And wrapping '+' in an operator that can match an empty string also - /// // causes it to match the empty string too. - /// let nfa = NFA::new("(a+)*")?; - /// assert!(nfa.has_empty(), "+ inside of * matches empty"); - /// - /// // If a regex is just made of a look-around assertion, even if the - /// // assertion requires some kind of non-empty string around it (such as - /// // \b), then it is still treated as if it matches the empty string. - /// // Namely, if a match occurs of just a look-around assertion, then the - /// // match returned is empty. - /// let nfa = NFA::compiler() - /// .syntax(syntax::Config::new().utf8(false)) - /// .build(r"^$\A\z\b\B(?-u:\b\B)")?; - /// assert!(nfa.has_empty(), "assertions match empty"); - /// // Even when an assertion is wrapped in a '+', it still matches the - /// // empty string. - /// let nfa = NFA::new(r"\b+")?; - /// assert!(nfa.has_empty(), "+ of an assertion matches empty"); - /// - /// // An alternation with even one branch that can match the empty string - /// // is also said to match the empty string overall. - /// let nfa = NFA::new("foo|(bar)?|quux")?; - /// assert!(nfa.has_empty(), "alternations can match empty"); - /// - /// // An NFA that matches nothing does not match the empty string. - /// let nfa = NFA::new("[a&&b]")?; - /// assert!(!nfa.has_empty(), "never matching means not matching empty"); - /// // But if it's wrapped in something that doesn't require a match at - /// // all, then it can match the empty string! - /// let nfa = NFA::new("[a&&b]*")?; - /// assert!(nfa.has_empty(), "* on never-match still matches empty"); - /// // Since a '+' requires a match, using it on something that can never - /// // match will itself produce a regex that can never match anything, - /// // and thus does not match the empty string. - /// let nfa = NFA::new("[a&&b]+")?; - /// assert!(!nfa.has_empty(), "+ on never-match still matches nothing"); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn has_empty(&self) -> bool { - self.0.has_empty - } - - /// Whether UTF-8 mode is enabled for this NFA or not. - /// - /// When UTF-8 mode is enabled, all matches reported by a regex engine - /// derived from this NFA are guaranteed to correspond to spans of valid - /// UTF-8. This includes zero-width matches. For example, the regex engine - /// must guarantee that the empty regex will not match at the positions - /// between code units in the UTF-8 encoding of a single codepoint. - /// - /// See [`Config::utf8`] for more information. - /// - /// This is enabled by default. - /// - /// # Example - /// - /// This example shows how UTF-8 mode can impact the match spans that may - /// be reported in certain cases. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::{self, pikevm::PikeVM}, - /// Match, Input, - /// }; - /// - /// let re = PikeVM::new("")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// // UTF-8 mode is enabled by default. - /// let mut input = Input::new("☃"); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(Some(Match::must(0, 0..0)), caps.get_match()); - /// - /// // Even though an empty regex matches at 1..1, our next match is - /// // 3..3 because 1..1 and 2..2 split the snowman codepoint (which is - /// // three bytes long). - /// input.set_start(1); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(Some(Match::must(0, 3..3)), caps.get_match()); - /// - /// // But if we disable UTF-8, then we'll get matches at 1..1 and 2..2: - /// let re = PikeVM::builder() - /// .thompson(thompson::Config::new().utf8(false)) - /// .build("")?; - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(Some(Match::must(0, 1..1)), caps.get_match()); - /// - /// input.set_start(2); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(Some(Match::must(0, 2..2)), caps.get_match()); - /// - /// input.set_start(3); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(Some(Match::must(0, 3..3)), caps.get_match()); - /// - /// input.set_start(4); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(None, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn is_utf8(&self) -> bool { - self.0.utf8 - } - - /// Returns true when this NFA is meant to be matched in reverse. - /// - /// Generally speaking, when this is true, it means the NFA is supposed to - /// be used in conjunction with moving backwards through the haystack. That - /// is, from a higher memory address to a lower memory address. - /// - /// It is often the case that lower level routines dealing with an NFA - /// don't need to care about whether it is "meant" to be matched in reverse - /// or not. However, there are some specific cases where it matters. For - /// example, the implementation of CRLF-aware `^` and `$` line anchors - /// needs to know whether the search is in the forward or reverse - /// direction. In the forward direction, neither `^` nor `$` should match - /// when a `\r` has been seen previously and a `\n` is next. However, in - /// the reverse direction, neither `^` nor `$` should match when a `\n` - /// has been seen previously and a `\r` is next. This fundamentally changes - /// how the state machine is constructed, and thus needs to be altered - /// based on the direction of the search. - /// - /// This is automatically set when using a [`Compiler`] with a configuration - /// where [`Config::reverse`] is enabled. If you're building your own NFA - /// by hand via a [`Builder`] - #[inline] - pub fn is_reverse(&self) -> bool { - self.0.reverse - } - - /// Returns true if and only if all starting states for this NFA correspond - /// to the beginning of an anchored search. - /// - /// Typically, an NFA will have both an anchored and an unanchored starting - /// state. Namely, because it tends to be useful to have both and the cost - /// of having an unanchored starting state is almost zero (for an NFA). - /// However, if all patterns in the NFA are themselves anchored, then even - /// the unanchored starting state will correspond to an anchored search - /// since the pattern doesn't permit anything else. - /// - /// # Example - /// - /// This example shows a few different scenarios where this method's - /// return value varies. - /// - /// ``` - /// use regex_automata::nfa::thompson::NFA; - /// - /// // The unanchored starting state permits matching this pattern anywhere - /// // in a haystack, instead of just at the beginning. - /// let nfa = NFA::new("a")?; - /// assert!(!nfa.is_always_start_anchored()); - /// - /// // In this case, the pattern is itself anchored, so there is no way - /// // to run an unanchored search. - /// let nfa = NFA::new("^a")?; - /// assert!(nfa.is_always_start_anchored()); - /// - /// // When multiline mode is enabled, '^' can match at the start of a line - /// // in addition to the start of a haystack, so an unanchored search is - /// // actually possible. - /// let nfa = NFA::new("(?m)^a")?; - /// assert!(!nfa.is_always_start_anchored()); - /// - /// // Weird cases also work. A pattern is only considered anchored if all - /// // matches may only occur at the start of a haystack. - /// let nfa = NFA::new("(^a)|a")?; - /// assert!(!nfa.is_always_start_anchored()); - /// - /// // When multiple patterns are present, if they are all anchored, then - /// // the NFA is always anchored too. - /// let nfa = NFA::new_many(&["^a", "^b", "^c"])?; - /// assert!(nfa.is_always_start_anchored()); - /// - /// // But if one pattern is unanchored, then the NFA must permit an - /// // unanchored search. - /// let nfa = NFA::new_many(&["^a", "b", "^c"])?; - /// assert!(!nfa.is_always_start_anchored()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn is_always_start_anchored(&self) -> bool { - self.start_anchored() == self.start_unanchored() - } - - /// Returns the look-around matcher associated with this NFA. - /// - /// A look-around matcher determines how to match look-around assertions. - /// In particular, some assertions are configurable. For example, the - /// `(?m:^)` and `(?m:$)` assertions can have their line terminator changed - /// from the default of `\n` to any other byte. - /// - /// If the NFA was built using a [`Compiler`], then this matcher - /// can be set via the [`Config::look_matcher`] configuration - /// knob. Otherwise, if you've built an NFA by hand, it is set via - /// [`Builder::set_look_matcher`]. - /// - /// # Example - /// - /// This shows how to change the line terminator for multi-line assertions. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::{self, pikevm::PikeVM}, - /// util::look::LookMatcher, - /// Match, Input, - /// }; - /// - /// let mut lookm = LookMatcher::new(); - /// lookm.set_line_terminator(b'\x00'); - /// - /// let re = PikeVM::builder() - /// .thompson(thompson::Config::new().look_matcher(lookm)) - /// .build(r"(?m)^[a-z]+$")?; - /// let mut cache = re.create_cache(); - /// - /// // Multi-line assertions now use NUL as a terminator. - /// assert_eq!( - /// Some(Match::must(0, 1..4)), - /// re.find(&mut cache, b"\x00abc\x00"), - /// ); - /// // ... and \n is no longer recognized as a terminator. - /// assert_eq!( - /// None, - /// re.find(&mut cache, b"\nabc\n"), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn look_matcher(&self) -> &LookMatcher { - &self.0.look_matcher - } - - /// Returns the union of all look-around assertions used throughout this - /// NFA. When the returned set is empty, it implies that the NFA has no - /// look-around assertions and thus zero conditional epsilon transitions. - /// - /// This is useful in some cases enabling optimizations. It is not - /// unusual, for example, for optimizations to be of the form, "for any - /// regex with zero conditional epsilon transitions, do ..." where "..." - /// is some kind of optimization. - /// - /// This isn't only helpful for optimizations either. Sometimes look-around - /// assertions are difficult to support. For example, many of the DFAs in - /// this crate don't support Unicode word boundaries or handle them using - /// heuristics. Handling that correctly typically requires some kind of - /// cheap check of whether the NFA has a Unicode word boundary in the first - /// place. - /// - /// # Example - /// - /// This example shows how this routine varies based on the regex pattern: - /// - /// ``` - /// use regex_automata::{nfa::thompson::NFA, util::look::Look}; - /// - /// // No look-around at all. - /// let nfa = NFA::new("a")?; - /// assert!(nfa.look_set_any().is_empty()); - /// - /// // When multiple patterns are present, since this returns the union, - /// // it will include look-around assertions that only appear in one - /// // pattern. - /// let nfa = NFA::new_many(&["a", "b", "a^b", "c"])?; - /// assert!(nfa.look_set_any().contains(Look::Start)); - /// - /// // Some groups of assertions have various shortcuts. For example: - /// let nfa = NFA::new(r"(?-u:\b)")?; - /// assert!(nfa.look_set_any().contains_word()); - /// assert!(!nfa.look_set_any().contains_word_unicode()); - /// assert!(nfa.look_set_any().contains_word_ascii()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn look_set_any(&self) -> LookSet { - self.0.look_set_any - } - - /// Returns the union of all prefix look-around assertions for every - /// pattern in this NFA. When the returned set is empty, it implies none of - /// the patterns require moving through a conditional epsilon transition - /// before inspecting the first byte in the haystack. - /// - /// This can be useful for determining what kinds of assertions need to be - /// satisfied at the beginning of a search. For example, typically DFAs - /// in this crate will build a distinct starting state for each possible - /// starting configuration that might result in look-around assertions - /// being satisfied differently. However, if the set returned here is - /// empty, then you know that the start state is invariant because there - /// are no conditional epsilon transitions to consider. - /// - /// # Example - /// - /// This example shows how this routine varies based on the regex pattern: - /// - /// ``` - /// use regex_automata::{nfa::thompson::NFA, util::look::Look}; - /// - /// // No look-around at all. - /// let nfa = NFA::new("a")?; - /// assert!(nfa.look_set_prefix_any().is_empty()); - /// - /// // When multiple patterns are present, since this returns the union, - /// // it will include look-around assertions that only appear in one - /// // pattern. But it will only include assertions that are in the prefix - /// // of a pattern. For example, this includes '^' but not '$' even though - /// // '$' does appear. - /// let nfa = NFA::new_many(&["a", "b", "^ab$", "c"])?; - /// assert!(nfa.look_set_prefix_any().contains(Look::Start)); - /// assert!(!nfa.look_set_prefix_any().contains(Look::End)); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn look_set_prefix_any(&self) -> LookSet { - self.0.look_set_prefix_any - } - - // FIXME: The `look_set_prefix_all` computation was not correct, and it - // seemed a little tricky to fix it. Since I wasn't actually using it for - // anything, I just decided to remove it in the run up to the regex 1.9 - // release. If you need this, please file an issue. - /* - /// Returns the intersection of all prefix look-around assertions for every - /// pattern in this NFA. When the returned set is empty, it implies at - /// least one of the patterns does not require moving through a conditional - /// epsilon transition before inspecting the first byte in the haystack. - /// Conversely, when the set contains an assertion, it implies that every - /// pattern in the NFA also contains that assertion in its prefix. - /// - /// This can be useful for determining what kinds of assertions need to be - /// satisfied at the beginning of a search. For example, if you know that - /// [`Look::Start`] is in the prefix intersection set returned here, then - /// you know that all searches, regardless of input configuration, will be - /// anchored. - /// - /// # Example - /// - /// This example shows how this routine varies based on the regex pattern: - /// - /// ``` - /// use regex_automata::{nfa::thompson::NFA, util::look::Look}; - /// - /// // No look-around at all. - /// let nfa = NFA::new("a")?; - /// assert!(nfa.look_set_prefix_all().is_empty()); - /// - /// // When multiple patterns are present, since this returns the - /// // intersection, it will only include assertions present in every - /// // prefix, and only the prefix. - /// let nfa = NFA::new_many(&["^a$", "^b$", "$^ab$", "^c$"])?; - /// assert!(nfa.look_set_prefix_all().contains(Look::Start)); - /// assert!(!nfa.look_set_prefix_all().contains(Look::End)); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn look_set_prefix_all(&self) -> LookSet { - self.0.look_set_prefix_all - } - */ - - /// Returns the memory usage, in bytes, of this NFA. - /// - /// This does **not** include the stack size used up by this NFA. To - /// compute that, use `std::mem::size_of::()`. - /// - /// # Example - /// - /// This example shows that large Unicode character classes can use quite - /// a bit of memory. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::nfa::thompson::NFA; - /// - /// let nfa_unicode = NFA::new(r"\w")?; - /// let nfa_ascii = NFA::new(r"(?-u:\w)")?; - /// - /// assert!(10 * nfa_ascii.memory_usage() < nfa_unicode.memory_usage()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn memory_usage(&self) -> usize { - use core::mem::size_of; - - size_of::() // allocated on the heap via Arc - + self.0.states.len() * size_of::() - + self.0.start_pattern.len() * size_of::() - + self.0.group_info.memory_usage() - + self.0.memory_extra - } -} - -impl fmt::Debug for NFA { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - -/// The "inner" part of the NFA. We split this part out so that we can easily -/// wrap it in an `Arc` above in the definition of `NFA`. -/// -/// See builder.rs for the code that actually builds this type. This module -/// does provide (internal) mutable methods for adding things to this -/// NFA before finalizing it, but the high level construction process is -/// controlled by the builder abstraction. (Which is complicated enough to -/// get its own module.) -#[derive(Default)] -pub(super) struct Inner { - /// The state sequence. This sequence is guaranteed to be indexable by all - /// starting state IDs, and it is also guaranteed to contain at most one - /// `Match` state for each pattern compiled into this NFA. (A pattern may - /// not have a corresponding `Match` state if a `Match` state is impossible - /// to reach.) - states: Vec, - /// The anchored starting state of this NFA. - start_anchored: StateID, - /// The unanchored starting state of this NFA. - start_unanchored: StateID, - /// The starting states for each individual pattern. Starting at any - /// of these states will result in only an anchored search for the - /// corresponding pattern. The vec is indexed by pattern ID. When the NFA - /// contains a single regex, then `start_pattern[0]` and `start_anchored` - /// are always equivalent. - start_pattern: Vec, - /// Info about the capturing groups in this NFA. This is responsible for - /// mapping groups to slots, mapping groups to names and names to groups. - group_info: GroupInfo, - /// A representation of equivalence classes over the transitions in this - /// NFA. Two bytes in the same equivalence class must not discriminate - /// between a match or a non-match. This map can be used to shrink the - /// total size of a DFA's transition table with a small match-time cost. - /// - /// Note that the NFA's transitions are *not* defined in terms of these - /// equivalence classes. The NFA's transitions are defined on the original - /// byte values. For the most part, this is because they wouldn't really - /// help the NFA much since the NFA already uses a sparse representation - /// to represent transitions. Byte classes are most effective in a dense - /// representation. - byte_class_set: ByteClassSet, - /// This is generated from `byte_class_set`, and essentially represents the - /// same thing but supports different access patterns. Namely, this permits - /// looking up the equivalence class of a byte very cheaply. - /// - /// Ideally we would just store this, but because of annoying code - /// structure reasons, we keep both this and `byte_class_set` around for - /// now. I think I would prefer that `byte_class_set` were computed in the - /// `Builder`, but right now, we compute it as states are added to the - /// `NFA`. - byte_classes: ByteClasses, - /// Whether this NFA has a `Capture` state anywhere. - has_capture: bool, - /// When the empty string is in the language matched by this NFA. - has_empty: bool, - /// Whether UTF-8 mode is enabled for this NFA. Briefly, this means that - /// all non-empty matches produced by this NFA correspond to spans of valid - /// UTF-8, and any empty matches produced by this NFA that split a UTF-8 - /// encoded codepoint should be filtered out by the corresponding regex - /// engine. - utf8: bool, - /// Whether this NFA is meant to be matched in reverse or not. - reverse: bool, - /// The matcher to be used for look-around assertions. - look_matcher: LookMatcher, - /// The union of all look-around assertions that occur anywhere within - /// this NFA. If this set is empty, then it means there are precisely zero - /// conditional epsilon transitions in the NFA. - look_set_any: LookSet, - /// The union of all look-around assertions that occur as a zero-length - /// prefix for any of the patterns in this NFA. - look_set_prefix_any: LookSet, - /* - /// The intersection of all look-around assertions that occur as a - /// zero-length prefix for any of the patterns in this NFA. - look_set_prefix_all: LookSet, - */ - /// Heap memory used indirectly by NFA states and other things (like the - /// various capturing group representations above). Since each state - /// might use a different amount of heap, we need to keep track of this - /// incrementally. - memory_extra: usize, -} - -impl Inner { - /// Runs any last finalization bits and turns this into a full NFA. - pub(super) fn into_nfa(mut self) -> NFA { - self.byte_classes = self.byte_class_set.byte_classes(); - // Do epsilon closure from the start state of every pattern in order - // to compute various properties such as look-around assertions and - // whether the empty string can be matched. - let mut stack = vec![]; - let mut seen = SparseSet::new(self.states.len()); - for &start_id in self.start_pattern.iter() { - stack.push(start_id); - seen.clear(); - // let mut prefix_all = LookSet::full(); - let mut prefix_any = LookSet::empty(); - while let Some(sid) = stack.pop() { - if !seen.insert(sid) { - continue; - } - match self.states[sid] { - State::ByteRange { .. } - | State::Dense { .. } - | State::Fail => continue, - State::Sparse(_) => { - // This snippet below will rewrite this sparse state - // as a dense state. By doing it here, we apply this - // optimization to all hot "sparse" states since these - // are the states that are reachable from the start - // state via an epsilon closure. - // - // Unfortunately, this optimization did not seem to - // help much in some very limited ad hoc benchmarking. - // - // I left the 'Dense' state type in place in case we - // want to revisit this, but I suspect the real way - // to make forward progress is a more fundamental - // re-architecting of how data in the NFA is laid out. - // I think we should consider a single contiguous - // allocation instead of all this indirection and - // potential heap allocations for every state. But this - // is a large re-design and will require API breaking - // changes. - // self.memory_extra -= self.states[sid].memory_usage(); - // let trans = DenseTransitions::from_sparse(sparse); - // self.states[sid] = State::Dense(trans); - // self.memory_extra += self.states[sid].memory_usage(); - continue; - } - State::Match { .. } => self.has_empty = true, - State::Look { look, next } => { - prefix_any = prefix_any.insert(look); - stack.push(next); - } - State::Union { ref alternates } => { - // Order doesn't matter here, since we're just dealing - // with look-around sets. But if we do richer analysis - // here that needs to care about preference order, then - // this should be done in reverse. - stack.extend(alternates.iter()); - } - State::BinaryUnion { alt1, alt2 } => { - stack.push(alt2); - stack.push(alt1); - } - State::Capture { next, .. } => { - stack.push(next); - } - } - } - self.look_set_prefix_any = - self.look_set_prefix_any.union(prefix_any); - } - self.states.shrink_to_fit(); - self.start_pattern.shrink_to_fit(); - NFA(Arc::new(self)) - } - - /// Returns the capturing group info for this NFA. - pub(super) fn group_info(&self) -> &GroupInfo { - &self.group_info - } - - /// Add the given state to this NFA after allocating a fresh identifier for - /// it. - /// - /// This panics if too many states are added such that a fresh identifier - /// could not be created. (Currently, the only caller of this routine is - /// a `Builder`, and it upholds this invariant.) - pub(super) fn add(&mut self, state: State) -> StateID { - match state { - State::ByteRange { ref trans } => { - self.byte_class_set.set_range(trans.start, trans.end); - } - State::Sparse(ref sparse) => { - for trans in sparse.transitions.iter() { - self.byte_class_set.set_range(trans.start, trans.end); - } - } - State::Dense { .. } => unreachable!(), - State::Look { look, .. } => { - self.look_matcher - .add_to_byteset(look, &mut self.byte_class_set); - self.look_set_any = self.look_set_any.insert(look); - } - State::Capture { .. } => { - self.has_capture = true; - } - State::Union { .. } - | State::BinaryUnion { .. } - | State::Fail - | State::Match { .. } => {} - } - - let id = StateID::new(self.states.len()).unwrap(); - self.memory_extra += state.memory_usage(); - self.states.push(state); - id - } - - /// Set the starting state identifiers for this NFA. - /// - /// `start_anchored` and `start_unanchored` may be equivalent. When they - /// are, then the NFA can only execute anchored searches. This might - /// occur, for example, for patterns that are unconditionally anchored. - /// e.g., `^foo`. - pub(super) fn set_starts( - &mut self, - start_anchored: StateID, - start_unanchored: StateID, - start_pattern: &[StateID], - ) { - self.start_anchored = start_anchored; - self.start_unanchored = start_unanchored; - self.start_pattern = start_pattern.to_vec(); - } - - /// Sets the UTF-8 mode of this NFA. - pub(super) fn set_utf8(&mut self, yes: bool) { - self.utf8 = yes; - } - - /// Sets the reverse mode of this NFA. - pub(super) fn set_reverse(&mut self, yes: bool) { - self.reverse = yes; - } - - /// Sets the look-around assertion matcher for this NFA. - pub(super) fn set_look_matcher(&mut self, m: LookMatcher) { - self.look_matcher = m; - } - - /// Set the capturing groups for this NFA. - /// - /// The given slice should contain the capturing groups for each pattern, - /// The capturing groups in turn should correspond to the total number of - /// capturing groups in the pattern, including the anonymous first capture - /// group for each pattern. If a capturing group does have a name, then it - /// should be provided as a Arc. - /// - /// This returns an error if a corresponding `GroupInfo` could not be - /// built. - pub(super) fn set_captures( - &mut self, - captures: &[Vec>>], - ) -> Result<(), GroupInfoError> { - self.group_info = GroupInfo::new( - captures.iter().map(|x| x.iter().map(|y| y.as_ref())), - )?; - Ok(()) - } - - /// Remap the transitions in every state of this NFA using the given map. - /// The given map should be indexed according to state ID namespace used by - /// the transitions of the states currently in this NFA. - /// - /// This is particularly useful to the NFA builder, since it is convenient - /// to add NFA states in order to produce their final IDs. Then, after all - /// of the intermediate "empty" states (unconditional epsilon transitions) - /// have been removed from the builder's representation, we can re-map all - /// of the transitions in the states already added to their final IDs. - pub(super) fn remap(&mut self, old_to_new: &[StateID]) { - for state in &mut self.states { - state.remap(old_to_new); - } - self.start_anchored = old_to_new[self.start_anchored]; - self.start_unanchored = old_to_new[self.start_unanchored]; - for id in self.start_pattern.iter_mut() { - *id = old_to_new[*id]; - } - } -} - -impl fmt::Debug for Inner { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!(f, "thompson::NFA(")?; - for (sid, state) in self.states.iter().with_state_ids() { - let status = if sid == self.start_anchored { - '^' - } else if sid == self.start_unanchored { - '>' - } else { - ' ' - }; - writeln!(f, "{}{:06?}: {:?}", status, sid.as_usize(), state)?; - } - let pattern_len = self.start_pattern.len(); - if pattern_len > 1 { - writeln!(f)?; - for pid in 0..pattern_len { - let sid = self.start_pattern[pid]; - writeln!(f, "START({:06?}): {:?}", pid, sid.as_usize())?; - } - } - writeln!(f)?; - writeln!( - f, - "transition equivalence classes: {:?}", - self.byte_classes, - )?; - writeln!(f, ")")?; - Ok(()) - } -} - -/// A state in an NFA. -/// -/// In theory, it can help to conceptualize an `NFA` as a graph consisting of -/// `State`s. Each `State` contains its complete set of outgoing transitions. -/// -/// In practice, it can help to conceptualize an `NFA` as a sequence of -/// instructions for a virtual machine. Each `State` says what to do and where -/// to go next. -/// -/// Strictly speaking, the practical interpretation is the most correct one, -/// because of the [`Capture`](State::Capture) state. Namely, a `Capture` -/// state always forwards execution to another state unconditionally. Its only -/// purpose is to cause a side effect: the recording of the current input -/// position at a particular location in memory. In this sense, an `NFA` -/// has more power than a theoretical non-deterministic finite automaton. -/// -/// For most uses of this crate, it is likely that one may never even need to -/// be aware of this type at all. The main use cases for looking at `State`s -/// directly are if you need to write your own search implementation or if you -/// need to do some kind of analysis on the NFA. -#[derive(Clone, Eq, PartialEq)] -pub enum State { - /// A state with a single transition that can only be taken if the current - /// input symbol is in a particular range of bytes. - ByteRange { - /// The transition from this state to the next. - trans: Transition, - }, - /// A state with possibly many transitions represented in a sparse fashion. - /// Transitions are non-overlapping and ordered lexicographically by input - /// range. - /// - /// In practice, this is used for encoding UTF-8 automata. Its presence is - /// primarily an optimization that avoids many additional unconditional - /// epsilon transitions (via [`Union`](State::Union) states), and thus - /// decreases the overhead of traversing the NFA. This can improve both - /// matching time and DFA construction time. - Sparse(SparseTransitions), - /// A dense representation of a state with multiple transitions. - Dense(DenseTransitions), - /// A conditional epsilon transition satisfied via some sort of - /// look-around. Look-around is limited to anchor and word boundary - /// assertions. - /// - /// Look-around states are meant to be evaluated while performing epsilon - /// closure (computing the set of states reachable from a particular state - /// via only epsilon transitions). If the current position in the haystack - /// satisfies the look-around assertion, then you're permitted to follow - /// that epsilon transition. - Look { - /// The look-around assertion that must be satisfied before moving - /// to `next`. - look: Look, - /// The state to transition to if the look-around assertion is - /// satisfied. - next: StateID, - }, - /// An alternation such that there exists an epsilon transition to all - /// states in `alternates`, where matches found via earlier transitions - /// are preferred over later transitions. - Union { - /// An ordered sequence of unconditional epsilon transitions to other - /// states. Transitions earlier in the sequence are preferred over - /// transitions later in the sequence. - alternates: Box<[StateID]>, - }, - /// An alternation such that there exists precisely two unconditional - /// epsilon transitions, where matches found via `alt1` are preferred over - /// matches found via `alt2`. - /// - /// This state exists as a common special case of Union where there are - /// only two alternates. In this case, we don't need any allocations to - /// represent the state. This saves a bit of memory and also saves an - /// additional memory access when traversing the NFA. - BinaryUnion { - /// An unconditional epsilon transition to another NFA state. This - /// is preferred over `alt2`. - alt1: StateID, - /// An unconditional epsilon transition to another NFA state. Matches - /// reported via this transition should only be reported if no matches - /// were found by following `alt1`. - alt2: StateID, - }, - /// An empty state that records a capture location. - /// - /// From the perspective of finite automata, this is precisely equivalent - /// to an unconditional epsilon transition, but serves the purpose of - /// instructing NFA simulations to record additional state when the finite - /// state machine passes through this epsilon transition. - /// - /// `slot` in this context refers to the specific capture group slot - /// offset that is being recorded. Each capturing group has two slots - /// corresponding to the start and end of the matching portion of that - /// group. - /// - /// The pattern ID and capture group index are also included in this state - /// in case they are useful. But mostly, all you'll need is `next` and - /// `slot`. - Capture { - /// The state to transition to, unconditionally. - next: StateID, - /// The pattern ID that this capture belongs to. - pattern_id: PatternID, - /// The capture group index that this capture belongs to. Capture group - /// indices are local to each pattern. For example, when capturing - /// groups are enabled, every pattern has a capture group at index - /// `0`. - group_index: SmallIndex, - /// The slot index for this capture. Every capturing group has two - /// slots: one for the start haystack offset and one for the end - /// haystack offset. Unlike capture group indices, slot indices are - /// global across all patterns in this NFA. That is, each slot belongs - /// to a single pattern, but there is only one slot at index `i`. - slot: SmallIndex, - }, - /// A state that cannot be transitioned out of. This is useful for cases - /// where you want to prevent matching from occurring. For example, if your - /// regex parser permits empty character classes, then one could choose - /// a `Fail` state to represent them. (An empty character class can be - /// thought of as an empty set. Since nothing is in an empty set, they can - /// never match anything.) - Fail, - /// A match state. There is at least one such occurrence of this state for - /// each regex that can match that is in this NFA. - Match { - /// The matching pattern ID. - pattern_id: PatternID, - }, -} - -impl State { - /// Returns true if and only if this state contains one or more epsilon - /// transitions. - /// - /// In practice, a state has no outgoing transitions (like `Match`), has - /// only non-epsilon transitions (like `ByteRange`) or has only epsilon - /// transitions (like `Union`). - /// - /// # Example - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::{State, Transition}, - /// util::primitives::{PatternID, StateID, SmallIndex}, - /// }; - /// - /// // Capture states are epsilon transitions. - /// let state = State::Capture { - /// next: StateID::ZERO, - /// pattern_id: PatternID::ZERO, - /// group_index: SmallIndex::ZERO, - /// slot: SmallIndex::ZERO, - /// }; - /// assert!(state.is_epsilon()); - /// - /// // ByteRange states are not. - /// let state = State::ByteRange { - /// trans: Transition { start: b'a', end: b'z', next: StateID::ZERO }, - /// }; - /// assert!(!state.is_epsilon()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn is_epsilon(&self) -> bool { - match *self { - State::ByteRange { .. } - | State::Sparse { .. } - | State::Dense { .. } - | State::Fail - | State::Match { .. } => false, - State::Look { .. } - | State::Union { .. } - | State::BinaryUnion { .. } - | State::Capture { .. } => true, - } - } - - /// Returns the heap memory usage of this NFA state in bytes. - fn memory_usage(&self) -> usize { - match *self { - State::ByteRange { .. } - | State::Look { .. } - | State::BinaryUnion { .. } - | State::Capture { .. } - | State::Match { .. } - | State::Fail => 0, - State::Sparse(SparseTransitions { ref transitions }) => { - transitions.len() * mem::size_of::() - } - State::Dense { .. } => 256 * mem::size_of::(), - State::Union { ref alternates } => { - alternates.len() * mem::size_of::() - } - } - } - - /// Remap the transitions in this state using the given map. Namely, the - /// given map should be indexed according to the transitions currently - /// in this state. - /// - /// This is used during the final phase of the NFA compiler, which turns - /// its intermediate NFA into the final NFA. - fn remap(&mut self, remap: &[StateID]) { - match *self { - State::ByteRange { ref mut trans } => { - trans.next = remap[trans.next] - } - State::Sparse(SparseTransitions { ref mut transitions }) => { - for t in transitions.iter_mut() { - t.next = remap[t.next]; - } - } - State::Dense(DenseTransitions { ref mut transitions }) => { - for sid in transitions.iter_mut() { - *sid = remap[*sid]; - } - } - State::Look { ref mut next, .. } => *next = remap[*next], - State::Union { ref mut alternates } => { - for alt in alternates.iter_mut() { - *alt = remap[*alt]; - } - } - State::BinaryUnion { ref mut alt1, ref mut alt2 } => { - *alt1 = remap[*alt1]; - *alt2 = remap[*alt2]; - } - State::Capture { ref mut next, .. } => *next = remap[*next], - State::Fail => {} - State::Match { .. } => {} - } - } -} - -impl fmt::Debug for State { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - State::ByteRange { ref trans } => trans.fmt(f), - State::Sparse(SparseTransitions { ref transitions }) => { - let rs = transitions - .iter() - .map(|t| format!("{t:?}")) - .collect::>() - .join(", "); - write!(f, "sparse({rs})") - } - State::Dense(ref dense) => { - write!(f, "dense(")?; - for (i, t) in dense.iter().enumerate() { - if i > 0 { - write!(f, ", ")?; - } - write!(f, "{t:?}")?; - } - write!(f, ")") - } - State::Look { ref look, next } => { - write!(f, "{:?} => {:?}", look, next.as_usize()) - } - State::Union { ref alternates } => { - let alts = alternates - .iter() - .map(|id| format!("{:?}", id.as_usize())) - .collect::>() - .join(", "); - write!(f, "union({alts})") - } - State::BinaryUnion { alt1, alt2 } => { - write!( - f, - "binary-union({}, {})", - alt1.as_usize(), - alt2.as_usize() - ) - } - State::Capture { next, pattern_id, group_index, slot } => { - write!( - f, - "capture(pid={:?}, group={:?}, slot={:?}) => {:?}", - pattern_id.as_usize(), - group_index.as_usize(), - slot.as_usize(), - next.as_usize(), - ) - } - State::Fail => write!(f, "FAIL"), - State::Match { pattern_id } => { - write!(f, "MATCH({:?})", pattern_id.as_usize()) - } - } - } -} - -/// A sequence of transitions used to represent a sparse state. -/// -/// This is the primary representation of a [`Sparse`](State::Sparse) state. -/// It corresponds to a sorted sequence of transitions with non-overlapping -/// byte ranges. If the byte at the current position in the haystack matches -/// one of the byte ranges, then the finite state machine should take the -/// corresponding transition. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct SparseTransitions { - /// The sorted sequence of non-overlapping transitions. - pub transitions: Box<[Transition]>, -} - -impl SparseTransitions { - /// This follows the matching transition for a particular byte. - /// - /// The matching transition is found by looking for a matching byte - /// range (there is at most one) corresponding to the position `at` in - /// `haystack`. - /// - /// If `at >= haystack.len()`, then this returns `None`. - #[inline] - pub fn matches(&self, haystack: &[u8], at: usize) -> Option { - haystack.get(at).and_then(|&b| self.matches_byte(b)) - } - - /// This follows the matching transition for any member of the alphabet. - /// - /// The matching transition is found by looking for a matching byte - /// range (there is at most one) corresponding to the position `at` in - /// `haystack`. If the given alphabet unit is [`EOI`](alphabet::Unit::eoi), - /// then this always returns `None`. - #[inline] - pub(crate) fn matches_unit( - &self, - unit: alphabet::Unit, - ) -> Option { - unit.as_u8().and_then(|byte| self.matches_byte(byte)) - } - - /// This follows the matching transition for a particular byte. - /// - /// The matching transition is found by looking for a matching byte range - /// (there is at most one) corresponding to the byte given. - #[inline] - pub fn matches_byte(&self, byte: u8) -> Option { - for t in self.transitions.iter() { - if t.start > byte { - break; - } else if t.matches_byte(byte) { - return Some(t.next); - } - } - None - - /* - // This is an alternative implementation that uses binary search. In - // some ad hoc experiments, like - // - // regex-cli find match pikevm -b -p '\b\w+\b' non-ascii-file - // - // I could not observe any improvement, and in fact, things seemed to - // be a bit slower. I can see an improvement in at least one benchmark: - // - // regex-cli find match pikevm -b -p '\pL{100}' all-codepoints-utf8 - // - // Where total search time goes from 3.2s to 2.4s when using binary - // search. - self.transitions - .binary_search_by(|t| { - if t.end < byte { - core::cmp::Ordering::Less - } else if t.start > byte { - core::cmp::Ordering::Greater - } else { - core::cmp::Ordering::Equal - } - }) - .ok() - .map(|i| self.transitions[i].next) - */ - } -} - -/// A sequence of transitions used to represent a dense state. -/// -/// This is the primary representation of a [`Dense`](State::Dense) state. It -/// provides constant time matching. That is, given a byte in a haystack and -/// a `DenseTransitions`, one can determine if the state matches in constant -/// time. -/// -/// This is in contrast to `SparseTransitions`, whose time complexity is -/// necessarily bigger than constant time. Also in contrast, `DenseTransitions` -/// usually requires (much) more heap memory. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct DenseTransitions { - /// A dense representation of this state's transitions on the heap. This - /// always has length 256. - pub transitions: Box<[StateID]>, -} - -impl DenseTransitions { - /// This follows the matching transition for a particular byte. - /// - /// The matching transition is found by looking for a transition that - /// doesn't correspond to `StateID::ZERO` for the byte `at` the given - /// position in `haystack`. - /// - /// If `at >= haystack.len()`, then this returns `None`. - #[inline] - pub fn matches(&self, haystack: &[u8], at: usize) -> Option { - haystack.get(at).and_then(|&b| self.matches_byte(b)) - } - - /// This follows the matching transition for any member of the alphabet. - /// - /// The matching transition is found by looking for a transition that - /// doesn't correspond to `StateID::ZERO` for the given alphabet `unit`. - /// - /// If the given alphabet unit is [`EOI`](alphabet::Unit::eoi), then - /// this returns `None`. - #[inline] - pub(crate) fn matches_unit( - &self, - unit: alphabet::Unit, - ) -> Option { - unit.as_u8().and_then(|byte| self.matches_byte(byte)) - } - - /// This follows the matching transition for a particular byte. - /// - /// The matching transition is found by looking for a transition that - /// doesn't correspond to `StateID::ZERO` for the given `byte`. - #[inline] - pub fn matches_byte(&self, byte: u8) -> Option { - let next = self.transitions[usize::from(byte)]; - if next == StateID::ZERO { - None - } else { - Some(next) - } - } - - /* - /// The dense state optimization isn't currently enabled, so permit a - /// little bit of dead code. - pub(crate) fn from_sparse(sparse: &SparseTransitions) -> DenseTransitions { - let mut dense = vec![StateID::ZERO; 256]; - for t in sparse.transitions.iter() { - for b in t.start..=t.end { - dense[usize::from(b)] = t.next; - } - } - DenseTransitions { transitions: dense.into_boxed_slice() } - } - */ - - /// Returns an iterator over all transitions that don't point to - /// `StateID::ZERO`. - pub(crate) fn iter(&self) -> impl Iterator + '_ { - use crate::util::int::Usize; - self.transitions - .iter() - .enumerate() - .filter(|&(_, &sid)| sid != StateID::ZERO) - .map(|(byte, &next)| Transition { - start: byte.as_u8(), - end: byte.as_u8(), - next, - }) - } -} - -/// A single transition to another state. -/// -/// This transition may only be followed if the current byte in the haystack -/// falls in the inclusive range of bytes specified. -#[derive(Clone, Copy, Eq, Hash, PartialEq)] -pub struct Transition { - /// The inclusive start of the byte range. - pub start: u8, - /// The inclusive end of the byte range. - pub end: u8, - /// The identifier of the state to transition to. - pub next: StateID, -} - -impl Transition { - /// Returns true if the position `at` in `haystack` falls in this - /// transition's range of bytes. - /// - /// If `at >= haystack.len()`, then this returns `false`. - pub fn matches(&self, haystack: &[u8], at: usize) -> bool { - haystack.get(at).map_or(false, |&b| self.matches_byte(b)) - } - - /// Returns true if the given alphabet unit falls in this transition's - /// range of bytes. If the given unit is [`EOI`](alphabet::Unit::eoi), then - /// this returns `false`. - pub fn matches_unit(&self, unit: alphabet::Unit) -> bool { - unit.as_u8().map_or(false, |byte| self.matches_byte(byte)) - } - - /// Returns true if the given byte falls in this transition's range of - /// bytes. - pub fn matches_byte(&self, byte: u8) -> bool { - self.start <= byte && byte <= self.end - } -} - -impl fmt::Debug for Transition { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use crate::util::escape::DebugByte; - - let Transition { start, end, next } = *self; - if self.start == self.end { - write!(f, "{:?} => {:?}", DebugByte(start), next.as_usize()) - } else { - write!( - f, - "{:?}-{:?} => {:?}", - DebugByte(start), - DebugByte(end), - next.as_usize(), - ) - } - } -} - -/// An iterator over all pattern IDs in an NFA. -/// -/// This iterator is created by [`NFA::patterns`]. -/// -/// The lifetime parameter `'a` refers to the lifetime of the NFA from which -/// this pattern iterator was created. -#[derive(Debug)] -pub struct PatternIter<'a> { - it: PatternIDIter, - /// We explicitly associate a lifetime with this iterator even though we - /// don't actually borrow anything from the NFA. We do this for backward - /// compatibility purposes. If we ever do need to borrow something from - /// the NFA, then we can and just get rid of this marker without breaking - /// the public API. - _marker: core::marker::PhantomData<&'a ()>, -} - -impl<'a> Iterator for PatternIter<'a> { - type Item = PatternID; - - fn next(&mut self) -> Option { - self.it.next() - } -} - -#[cfg(all(test, feature = "nfa-pikevm"))] -mod tests { - use super::*; - use crate::{nfa::thompson::pikevm::PikeVM, Input}; - - // This asserts that an NFA state doesn't have its size changed. It is - // *really* easy to accidentally increase the size, and thus potentially - // dramatically increase the memory usage of every NFA. - // - // This assert doesn't mean we absolutely cannot increase the size of an - // NFA state. We can. It's just here to make sure we do it knowingly and - // intentionally. - #[test] - fn state_has_small_size() { - #[cfg(target_pointer_width = "64")] - assert_eq!(24, core::mem::size_of::()); - #[cfg(target_pointer_width = "32")] - assert_eq!(20, core::mem::size_of::()); - } - - #[test] - fn always_match() { - let re = PikeVM::new_from_nfa(NFA::always_match()).unwrap(); - let mut cache = re.create_cache(); - let mut caps = re.create_captures(); - let mut find = |haystack, start, end| { - let input = Input::new(haystack).range(start..end); - re.search(&mut cache, &input, &mut caps); - caps.get_match().map(|m| m.end()) - }; - - assert_eq!(Some(0), find("", 0, 0)); - assert_eq!(Some(0), find("a", 0, 1)); - assert_eq!(Some(1), find("a", 1, 1)); - assert_eq!(Some(0), find("ab", 0, 2)); - assert_eq!(Some(1), find("ab", 1, 2)); - assert_eq!(Some(2), find("ab", 2, 2)); - } - - #[test] - fn never_match() { - let re = PikeVM::new_from_nfa(NFA::never_match()).unwrap(); - let mut cache = re.create_cache(); - let mut caps = re.create_captures(); - let mut find = |haystack, start, end| { - let input = Input::new(haystack).range(start..end); - re.search(&mut cache, &input, &mut caps); - caps.get_match().map(|m| m.end()) - }; - - assert_eq!(None, find("", 0, 0)); - assert_eq!(None, find("a", 0, 1)); - assert_eq!(None, find("a", 1, 1)); - assert_eq!(None, find("ab", 0, 2)); - assert_eq!(None, find("ab", 1, 2)); - assert_eq!(None, find("ab", 2, 2)); - } -} diff --git a/vendor/regex-automata/src/nfa/thompson/pikevm.rs b/vendor/regex-automata/src/nfa/thompson/pikevm.rs deleted file mode 100644 index a5cd7086f521de..00000000000000 --- a/vendor/regex-automata/src/nfa/thompson/pikevm.rs +++ /dev/null @@ -1,2359 +0,0 @@ -/*! -An NFA backed Pike VM for executing regex searches with capturing groups. - -This module provides a [`PikeVM`] that works by simulating an NFA and -resolving all spans of capturing groups that participate in a match. -*/ - -#[cfg(feature = "internal-instrument-pikevm")] -use core::cell::RefCell; - -use alloc::{vec, vec::Vec}; - -use crate::{ - nfa::thompson::{self, BuildError, State, NFA}, - util::{ - captures::Captures, - empty, iter, - prefilter::Prefilter, - primitives::{NonMaxUsize, PatternID, SmallIndex, StateID}, - search::{ - Anchored, HalfMatch, Input, Match, MatchKind, PatternSet, Span, - }, - sparse_set::SparseSet, - }, -}; - -/// A simple macro for conditionally executing instrumentation logic when -/// the 'trace' log level is enabled. This is a compile-time no-op when the -/// 'internal-instrument-pikevm' feature isn't enabled. The intent here is that -/// this makes it easier to avoid doing extra work when instrumentation isn't -/// enabled. -/// -/// This macro accepts a closure of type `|&mut Counters|`. The closure can -/// then increment counters (or whatever) in accordance with what one wants -/// to track. -macro_rules! instrument { - ($fun:expr) => { - #[cfg(feature = "internal-instrument-pikevm")] - { - let fun: &mut dyn FnMut(&mut Counters) = &mut $fun; - COUNTERS.with(|c: &RefCell| fun(&mut *c.borrow_mut())); - } - }; -} - -#[cfg(feature = "internal-instrument-pikevm")] -std::thread_local! { - /// Effectively global state used to keep track of instrumentation - /// counters. The "proper" way to do this is to thread it through the - /// PikeVM, but it makes the code quite icky. Since this is just a - /// debugging feature, we're content to relegate it to thread local - /// state. When instrumentation is enabled, the counters are reset at the - /// beginning of every search and printed (with the 'trace' log level) at - /// the end of every search. - static COUNTERS: RefCell = RefCell::new(Counters::empty()); -} - -/// The configuration used for building a [`PikeVM`]. -/// -/// A PikeVM configuration is a simple data object that is typically used with -/// [`Builder::configure`]. It can be cheaply cloned. -/// -/// A default configuration can be created either with `Config::new`, or -/// perhaps more conveniently, with [`PikeVM::config`]. -#[derive(Clone, Debug, Default)] -pub struct Config { - match_kind: Option, - pre: Option>, -} - -impl Config { - /// Return a new default PikeVM configuration. - pub fn new() -> Config { - Config::default() - } - - /// Set the desired match semantics. - /// - /// The default is [`MatchKind::LeftmostFirst`], which corresponds to the - /// match semantics of Perl-like regex engines. That is, when multiple - /// patterns would match at the same leftmost position, the pattern that - /// appears first in the concrete syntax is chosen. - /// - /// Currently, the only other kind of match semantics supported is - /// [`MatchKind::All`]. This corresponds to "classical DFA" construction - /// where all possible matches are visited in the NFA by the `PikeVM`. - /// - /// Typically, `All` is used when one wants to execute an overlapping - /// search and `LeftmostFirst` otherwise. In particular, it rarely makes - /// sense to use `All` with the various "leftmost" find routines, since the - /// leftmost routines depend on the `LeftmostFirst` automata construction - /// strategy. Specifically, `LeftmostFirst` results in the `PikeVM` - /// simulating dead states as a way to terminate the search and report a - /// match. `LeftmostFirst` also supports non-greedy matches using this - /// strategy where as `All` does not. - pub fn match_kind(mut self, kind: MatchKind) -> Config { - self.match_kind = Some(kind); - self - } - - /// Set a prefilter to be used whenever a start state is entered. - /// - /// A [`Prefilter`] in this context is meant to accelerate searches by - /// looking for literal prefixes that every match for the corresponding - /// pattern (or patterns) must start with. Once a prefilter produces a - /// match, the underlying search routine continues on to try and confirm - /// the match. - /// - /// Be warned that setting a prefilter does not guarantee that the search - /// will be faster. While it's usually a good bet, if the prefilter - /// produces a lot of false positive candidates (i.e., positions matched - /// by the prefilter but not by the regex), then the overall result can - /// be slower than if you had just executed the regex engine without any - /// prefilters. - /// - /// By default no prefilter is set. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::pikevm::PikeVM, - /// util::prefilter::Prefilter, - /// Input, Match, MatchKind, - /// }; - /// - /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "bar"]); - /// let re = PikeVM::builder() - /// .configure(PikeVM::config().prefilter(pre)) - /// .build(r"(foo|bar)[a-z]+")?; - /// let mut cache = re.create_cache(); - /// let input = Input::new("foo1 barfox bar"); - /// assert_eq!(Some(Match::must(0, 5..11)), re.find(&mut cache, input)); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Be warned though that an incorrect prefilter can lead to incorrect - /// results! - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::pikevm::PikeVM, - /// util::prefilter::Prefilter, - /// Input, HalfMatch, MatchKind, - /// }; - /// - /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["foo", "car"]); - /// let re = PikeVM::builder() - /// .configure(PikeVM::config().prefilter(pre)) - /// .build(r"(foo|bar)[a-z]+")?; - /// let mut cache = re.create_cache(); - /// let input = Input::new("foo1 barfox bar"); - /// // No match reported even though there clearly is one! - /// assert_eq!(None, re.find(&mut cache, input)); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn prefilter(mut self, pre: Option) -> Config { - self.pre = Some(pre); - self - } - - /// Returns the match semantics set in this configuration. - pub fn get_match_kind(&self) -> MatchKind { - self.match_kind.unwrap_or(MatchKind::LeftmostFirst) - } - - /// Returns the prefilter set in this configuration, if one at all. - pub fn get_prefilter(&self) -> Option<&Prefilter> { - self.pre.as_ref().unwrap_or(&None).as_ref() - } - - /// Overwrite the default configuration such that the options in `o` are - /// always used. If an option in `o` is not set, then the corresponding - /// option in `self` is used. If it's not set in `self` either, then it - /// remains not set. - pub(crate) fn overwrite(&self, o: Config) -> Config { - Config { - match_kind: o.match_kind.or(self.match_kind), - pre: o.pre.or_else(|| self.pre.clone()), - } - } -} - -/// A builder for a `PikeVM`. -/// -/// This builder permits configuring options for the syntax of a pattern, -/// the NFA construction and the `PikeVM` construction. This builder is -/// different from a general purpose regex builder in that it permits fine -/// grain configuration of the construction process. The trade off for this is -/// complexity, and the possibility of setting a configuration that might not -/// make sense. For example, there are two different UTF-8 modes: -/// -/// * [`util::syntax::Config::utf8`](crate::util::syntax::Config::utf8) -/// controls whether the pattern itself can contain sub-expressions that match -/// invalid UTF-8. -/// * [`thompson::Config::utf8`] controls whether empty matches that split a -/// Unicode codepoint are reported or not. -/// -/// Generally speaking, callers will want to either enable all of these or -/// disable all of these. -/// -/// # Example -/// -/// This example shows how to disable UTF-8 mode in the syntax and the regex -/// itself. This is generally what you want for matching on arbitrary bytes. -/// -/// ``` -/// use regex_automata::{ -/// nfa::thompson::{self, pikevm::PikeVM}, -/// util::syntax, -/// Match, -/// }; -/// -/// let re = PikeVM::builder() -/// .syntax(syntax::Config::new().utf8(false)) -/// .thompson(thompson::Config::new().utf8(false)) -/// .build(r"foo(?-u:[^b])ar.*")?; -/// let mut cache = re.create_cache(); -/// -/// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; -/// let expected = Some(Match::must(0, 1..9)); -/// let got = re.find_iter(&mut cache, haystack).next(); -/// assert_eq!(expected, got); -/// // Notice that `(?-u:[^b])` matches invalid UTF-8, -/// // but the subsequent `.*` does not! Disabling UTF-8 -/// // on the syntax permits this. -/// // -/// // N.B. This example does not show the impact of -/// // disabling UTF-8 mode on a PikeVM Config, since that -/// // only impacts regexes that can produce matches of -/// // length 0. -/// assert_eq!(b"foo\xFFarzz", &haystack[got.unwrap().range()]); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct Builder { - config: Config, - #[cfg(feature = "syntax")] - thompson: thompson::Compiler, -} - -impl Builder { - /// Create a new PikeVM builder with its default configuration. - pub fn new() -> Builder { - Builder { - config: Config::default(), - #[cfg(feature = "syntax")] - thompson: thompson::Compiler::new(), - } - } - - /// Build a `PikeVM` from the given pattern. - /// - /// If there was a problem parsing or compiling the pattern, then an error - /// is returned. - #[cfg(feature = "syntax")] - pub fn build(&self, pattern: &str) -> Result { - self.build_many(&[pattern]) - } - - /// Build a `PikeVM` from the given patterns. - #[cfg(feature = "syntax")] - pub fn build_many>( - &self, - patterns: &[P], - ) -> Result { - let nfa = self.thompson.build_many(patterns)?; - self.build_from_nfa(nfa) - } - - /// Build a `PikeVM` directly from its NFA. - /// - /// Note that when using this method, any configuration that applies to the - /// construction of the NFA itself will of course be ignored, since the NFA - /// given here is already built. - pub fn build_from_nfa(&self, nfa: NFA) -> Result { - nfa.look_set_any().available().map_err(BuildError::word)?; - Ok(PikeVM { config: self.config.clone(), nfa }) - } - - /// Apply the given `PikeVM` configuration options to this builder. - pub fn configure(&mut self, config: Config) -> &mut Builder { - self.config = self.config.overwrite(config); - self - } - - /// Set the syntax configuration for this builder using - /// [`syntax::Config`](crate::util::syntax::Config). - /// - /// This permits setting things like case insensitivity, Unicode and multi - /// line mode. - /// - /// These settings only apply when constructing a PikeVM directly from a - /// pattern. - #[cfg(feature = "syntax")] - pub fn syntax( - &mut self, - config: crate::util::syntax::Config, - ) -> &mut Builder { - self.thompson.syntax(config); - self - } - - /// Set the Thompson NFA configuration for this builder using - /// [`nfa::thompson::Config`](crate::nfa::thompson::Config). - /// - /// This permits setting things like if additional time should be spent - /// shrinking the size of the NFA. - /// - /// These settings only apply when constructing a PikeVM directly from a - /// pattern. - #[cfg(feature = "syntax")] - pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder { - self.thompson.configure(config); - self - } -} - -/// A virtual machine for executing regex searches with capturing groups. -/// -/// # Infallible APIs -/// -/// Unlike most other regex engines in this crate, a `PikeVM` never returns an -/// error at search time. It supports all [`Anchored`] configurations, never -/// quits and works on haystacks of arbitrary length. -/// -/// There are two caveats to mention though: -/// -/// * If an invalid pattern ID is given to a search via [`Anchored::Pattern`], -/// then the PikeVM will report "no match." This is consistent with all other -/// regex engines in this crate. -/// * When using [`PikeVM::which_overlapping_matches`] with a [`PatternSet`] -/// that has insufficient capacity to store all valid pattern IDs, then if a -/// match occurs for a `PatternID` that cannot be inserted, it is silently -/// dropped as if it did not match. -/// -/// # Advice -/// -/// The `PikeVM` is generally the most "powerful" regex engine in this crate. -/// "Powerful" in this context means that it can handle any regular expression -/// that is parseable by `regex-syntax` and any size haystack. Regrettably, -/// the `PikeVM` is also simultaneously often the _slowest_ regex engine in -/// practice. This results in an annoying situation where one generally tries -/// to pick any other regex engine (or perhaps none at all) before being -/// forced to fall back to a `PikeVM`. -/// -/// For example, a common strategy for dealing with capturing groups is to -/// actually look for the overall match of the regex using a faster regex -/// engine, like a [lazy DFA](crate::hybrid::regex::Regex). Once the overall -/// match is found, one can then run the `PikeVM` on just the match span to -/// find the spans of the capturing groups. In this way, the faster regex -/// engine does the majority of the work, while the `PikeVM` only lends its -/// power in a more limited role. -/// -/// Unfortunately, this isn't always possible because the faster regex engines -/// don't support all of the regex features in `regex-syntax`. This notably -/// includes (and is currently limited to) Unicode word boundaries. So if -/// your pattern has Unicode word boundaries, you typically can't use a -/// DFA-based regex engine at all (unless you [enable heuristic support for -/// it](crate::hybrid::dfa::Config::unicode_word_boundary)). (The [one-pass -/// DFA](crate::dfa::onepass::DFA) can handle Unicode word boundaries for -/// anchored searches only, but in a cruel sort of joke, many Unicode features -/// tend to result in making the regex _not_ one-pass.) -/// -/// # Example -/// -/// This example shows that the `PikeVM` implements Unicode word boundaries -/// correctly by default. -/// -/// ``` -/// # if cfg!(miri) { return Ok(()); } // miri takes too long -/// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; -/// -/// let re = PikeVM::new(r"\b\w+\b")?; -/// let mut cache = re.create_cache(); -/// -/// let mut it = re.find_iter(&mut cache, "Шерлок Холмс"); -/// assert_eq!(Some(Match::must(0, 0..12)), it.next()); -/// assert_eq!(Some(Match::must(0, 13..23)), it.next()); -/// assert_eq!(None, it.next()); -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct PikeVM { - config: Config, - nfa: NFA, -} - -impl PikeVM { - /// Parse the given regular expression using the default configuration and - /// return the corresponding `PikeVM`. - /// - /// If you want a non-default configuration, then use the [`Builder`] to - /// set your own configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; - /// - /// let re = PikeVM::new("foo[0-9]+bar")?; - /// let mut cache = re.create_cache(); - /// assert_eq!( - /// Some(Match::must(0, 3..14)), - /// re.find_iter(&mut cache, "zzzfoo12345barzzz").next(), - /// ); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn new(pattern: &str) -> Result { - PikeVM::builder().build(pattern) - } - - /// Like `new`, but parses multiple patterns into a single "multi regex." - /// This similarly uses the default regex configuration. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; - /// - /// let re = PikeVM::new_many(&["[a-z]+", "[0-9]+"])?; - /// let mut cache = re.create_cache(); - /// - /// let mut it = re.find_iter(&mut cache, "abc 1 foo 4567 0 quux"); - /// assert_eq!(Some(Match::must(0, 0..3)), it.next()); - /// assert_eq!(Some(Match::must(1, 4..5)), it.next()); - /// assert_eq!(Some(Match::must(0, 6..9)), it.next()); - /// assert_eq!(Some(Match::must(1, 10..14)), it.next()); - /// assert_eq!(Some(Match::must(1, 15..16)), it.next()); - /// assert_eq!(Some(Match::must(0, 17..21)), it.next()); - /// assert_eq!(None, it.next()); - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn new_many>( - patterns: &[P], - ) -> Result { - PikeVM::builder().build_many(patterns) - } - - /// Like `new`, but builds a PikeVM directly from an NFA. This is useful - /// if you already have an NFA, or even if you hand-assembled the NFA. - /// - /// # Example - /// - /// This shows how to hand assemble a regular expression via its HIR, - /// compile an NFA from it and build a PikeVM from the NFA. - /// - /// ``` - /// use regex_automata::{nfa::thompson::{NFA, pikevm::PikeVM}, Match}; - /// use regex_syntax::hir::{Hir, Class, ClassBytes, ClassBytesRange}; - /// - /// let hir = Hir::class(Class::Bytes(ClassBytes::new(vec![ - /// ClassBytesRange::new(b'0', b'9'), - /// ClassBytesRange::new(b'A', b'Z'), - /// ClassBytesRange::new(b'_', b'_'), - /// ClassBytesRange::new(b'a', b'z'), - /// ]))); - /// - /// let config = NFA::config().nfa_size_limit(Some(1_000)); - /// let nfa = NFA::compiler().configure(config).build_from_hir(&hir)?; - /// - /// let re = PikeVM::new_from_nfa(nfa)?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let expected = Some(Match::must(0, 3..4)); - /// re.captures(&mut cache, "!@#A#@!", &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn new_from_nfa(nfa: NFA) -> Result { - PikeVM::builder().build_from_nfa(nfa) - } - - /// Create a new `PikeVM` that matches every input. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; - /// - /// let re = PikeVM::always_match()?; - /// let mut cache = re.create_cache(); - /// - /// let expected = Match::must(0, 0..0); - /// assert_eq!(Some(expected), re.find_iter(&mut cache, "").next()); - /// assert_eq!(Some(expected), re.find_iter(&mut cache, "foo").next()); - /// # Ok::<(), Box>(()) - /// ``` - pub fn always_match() -> Result { - let nfa = thompson::NFA::always_match(); - PikeVM::new_from_nfa(nfa) - } - - /// Create a new `PikeVM` that never matches any input. - /// - /// # Example - /// - /// ``` - /// use regex_automata::nfa::thompson::pikevm::PikeVM; - /// - /// let re = PikeVM::never_match()?; - /// let mut cache = re.create_cache(); - /// - /// assert_eq!(None, re.find_iter(&mut cache, "").next()); - /// assert_eq!(None, re.find_iter(&mut cache, "foo").next()); - /// # Ok::<(), Box>(()) - /// ``` - pub fn never_match() -> Result { - let nfa = thompson::NFA::never_match(); - PikeVM::new_from_nfa(nfa) - } - - /// Return a default configuration for a `PikeVM`. - /// - /// This is a convenience routine to avoid needing to import the `Config` - /// type when customizing the construction of a `PikeVM`. - /// - /// # Example - /// - /// This example shows how to disable UTF-8 mode. When UTF-8 mode is - /// disabled, zero-width matches that split a codepoint are allowed. - /// Otherwise they are never reported. - /// - /// In the code below, notice that `""` is permitted to match positions - /// that split the encoding of a codepoint. - /// - /// ``` - /// use regex_automata::{nfa::thompson::{self, pikevm::PikeVM}, Match}; - /// - /// let re = PikeVM::builder() - /// .thompson(thompson::Config::new().utf8(false)) - /// .build(r"")?; - /// let mut cache = re.create_cache(); - /// - /// let haystack = "a☃z"; - /// let mut it = re.find_iter(&mut cache, haystack); - /// assert_eq!(Some(Match::must(0, 0..0)), it.next()); - /// assert_eq!(Some(Match::must(0, 1..1)), it.next()); - /// assert_eq!(Some(Match::must(0, 2..2)), it.next()); - /// assert_eq!(Some(Match::must(0, 3..3)), it.next()); - /// assert_eq!(Some(Match::must(0, 4..4)), it.next()); - /// assert_eq!(Some(Match::must(0, 5..5)), it.next()); - /// assert_eq!(None, it.next()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn config() -> Config { - Config::new() - } - - /// Return a builder for configuring the construction of a `PikeVM`. - /// - /// This is a convenience routine to avoid needing to import the - /// [`Builder`] type in common cases. - /// - /// # Example - /// - /// This example shows how to use the builder to disable UTF-8 mode - /// everywhere. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::{self, pikevm::PikeVM}, - /// util::syntax, - /// Match, - /// }; - /// - /// let re = PikeVM::builder() - /// .syntax(syntax::Config::new().utf8(false)) - /// .thompson(thompson::Config::new().utf8(false)) - /// .build(r"foo(?-u:[^b])ar.*")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// let haystack = b"\xFEfoo\xFFarzz\xE2\x98\xFF\n"; - /// let expected = Some(Match::must(0, 1..9)); - /// re.captures(&mut cache, haystack, &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn builder() -> Builder { - Builder::new() - } - - /// Create a new empty set of capturing groups that is guaranteed to be - /// valid for the search APIs on this `PikeVM`. - /// - /// A `Captures` value created for a specific `PikeVM` cannot be used with - /// any other `PikeVM`. - /// - /// This is a convenience function for [`Captures::all`]. See the - /// [`Captures`] documentation for an explanation of its alternative - /// constructors that permit the `PikeVM` to do less work during a search, - /// and thus might make it faster. - pub fn create_captures(&self) -> Captures { - Captures::all(self.get_nfa().group_info().clone()) - } - - /// Create a new cache for this `PikeVM`. - /// - /// The cache returned should only be used for searches for this - /// `PikeVM`. If you want to reuse the cache for another `PikeVM`, then - /// you must call [`Cache::reset`] with that `PikeVM` (or, equivalently, - /// [`PikeVM::reset_cache`]). - pub fn create_cache(&self) -> Cache { - Cache::new(self) - } - - /// Reset the given cache such that it can be used for searching with the - /// this `PikeVM` (and only this `PikeVM`). - /// - /// A cache reset permits reusing memory already allocated in this cache - /// with a different `PikeVM`. - /// - /// # Example - /// - /// This shows how to re-purpose a cache for use with a different `PikeVM`. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; - /// - /// let re1 = PikeVM::new(r"\w")?; - /// let re2 = PikeVM::new(r"\W")?; - /// - /// let mut cache = re1.create_cache(); - /// assert_eq!( - /// Some(Match::must(0, 0..2)), - /// re1.find_iter(&mut cache, "Δ").next(), - /// ); - /// - /// // Using 'cache' with re2 is not allowed. It may result in panics or - /// // incorrect results. In order to re-purpose the cache, we must reset - /// // it with the PikeVM we'd like to use it with. - /// // - /// // Similarly, after this reset, using the cache with 're1' is also not - /// // allowed. - /// re2.reset_cache(&mut cache); - /// assert_eq!( - /// Some(Match::must(0, 0..3)), - /// re2.find_iter(&mut cache, "☃").next(), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn reset_cache(&self, cache: &mut Cache) { - cache.reset(self); - } - - /// Returns the total number of patterns compiled into this `PikeVM`. - /// - /// In the case of a `PikeVM` that contains no patterns, this returns `0`. - /// - /// # Example - /// - /// This example shows the pattern length for a `PikeVM` that never - /// matches: - /// - /// ``` - /// use regex_automata::nfa::thompson::pikevm::PikeVM; - /// - /// let re = PikeVM::never_match()?; - /// assert_eq!(re.pattern_len(), 0); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// And another example for a `PikeVM` that matches at every position: - /// - /// ``` - /// use regex_automata::nfa::thompson::pikevm::PikeVM; - /// - /// let re = PikeVM::always_match()?; - /// assert_eq!(re.pattern_len(), 1); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// And finally, a `PikeVM` that was constructed from multiple patterns: - /// - /// ``` - /// use regex_automata::nfa::thompson::pikevm::PikeVM; - /// - /// let re = PikeVM::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; - /// assert_eq!(re.pattern_len(), 3); - /// # Ok::<(), Box>(()) - /// ``` - pub fn pattern_len(&self) -> usize { - self.nfa.pattern_len() - } - - /// Return the config for this `PikeVM`. - #[inline] - pub fn get_config(&self) -> &Config { - &self.config - } - - /// Returns a reference to the underlying NFA. - #[inline] - pub fn get_nfa(&self) -> &NFA { - &self.nfa - } -} - -impl PikeVM { - /// Returns true if and only if this `PikeVM` matches the given haystack. - /// - /// This routine may short circuit if it knows that scanning future - /// input will never lead to a different result. In particular, if the - /// underlying NFA enters a match state, then this routine will return - /// `true` immediately without inspecting any future input. (Consider how - /// this might make a difference given the regex `a+` on the haystack - /// `aaaaaaaaaaaaaaa`. This routine can stop after it sees the first `a`, - /// but routines like `find` need to continue searching because `+` is - /// greedy by default.) - /// - /// # Example - /// - /// This shows basic usage: - /// - /// ``` - /// use regex_automata::nfa::thompson::pikevm::PikeVM; - /// - /// let re = PikeVM::new("foo[0-9]+bar")?; - /// let mut cache = re.create_cache(); - /// - /// assert!(re.is_match(&mut cache, "foo12345bar")); - /// assert!(!re.is_match(&mut cache, "foobar")); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: consistency with search APIs - /// - /// `is_match` is guaranteed to return `true` whenever `find` returns a - /// match. This includes searches that are executed entirely within a - /// codepoint: - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Input}; - /// - /// let re = PikeVM::new("a*")?; - /// let mut cache = re.create_cache(); - /// - /// assert!(!re.is_match(&mut cache, Input::new("☃").span(1..2))); - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Notice that when UTF-8 mode is disabled, then the above reports a - /// match because the restriction against zero-width matches that split a - /// codepoint has been lifted: - /// - /// ``` - /// use regex_automata::{nfa::thompson::{pikevm::PikeVM, NFA}, Input}; - /// - /// let re = PikeVM::builder() - /// .thompson(NFA::config().utf8(false)) - /// .build("a*")?; - /// let mut cache = re.create_cache(); - /// - /// assert!(re.is_match(&mut cache, Input::new("☃").span(1..2))); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn is_match<'h, I: Into>>( - &self, - cache: &mut Cache, - input: I, - ) -> bool { - let input = input.into().earliest(true); - self.search_slots(cache, &input, &mut []).is_some() - } - - /// Executes a leftmost forward search and returns a `Match` if one exists. - /// - /// This routine only includes the overall match span. To get access to the - /// individual spans of each capturing group, use [`PikeVM::captures`]. - /// - /// # Example - /// - /// Leftmost first match semantics corresponds to the match with the - /// smallest starting offset, but where the end offset is determined by - /// preferring earlier branches in the original regular expression. For - /// example, `Sam|Samwise` will match `Sam` in `Samwise`, but `Samwise|Sam` - /// will match `Samwise` in `Samwise`. - /// - /// Generally speaking, the "leftmost first" match is how most backtracking - /// regular expressions tend to work. This is in contrast to POSIX-style - /// regular expressions that yield "leftmost longest" matches. Namely, - /// both `Sam|Samwise` and `Samwise|Sam` match `Samwise` when using - /// leftmost longest semantics. (This crate does not currently support - /// leftmost longest semantics.) - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; - /// - /// let re = PikeVM::new("foo[0-9]+")?; - /// let mut cache = re.create_cache(); - /// let expected = Match::must(0, 0..8); - /// assert_eq!(Some(expected), re.find(&mut cache, "foo12345")); - /// - /// // Even though a match is found after reading the first byte (`a`), - /// // the leftmost first match semantics demand that we find the earliest - /// // match that prefers earlier parts of the pattern over later parts. - /// let re = PikeVM::new("abc|a")?; - /// let mut cache = re.create_cache(); - /// let expected = Match::must(0, 0..3); - /// assert_eq!(Some(expected), re.find(&mut cache, "abc")); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn find<'h, I: Into>>( - &self, - cache: &mut Cache, - input: I, - ) -> Option { - let input = input.into(); - if self.get_nfa().pattern_len() == 1 { - let mut slots = [None, None]; - let pid = self.search_slots(cache, &input, &mut slots)?; - let start = slots[0]?.get(); - let end = slots[1]?.get(); - return Some(Match::new(pid, Span { start, end })); - } - let ginfo = self.get_nfa().group_info(); - let slots_len = ginfo.implicit_slot_len(); - let mut slots = vec![None; slots_len]; - let pid = self.search_slots(cache, &input, &mut slots)?; - let start = slots[pid.as_usize() * 2]?.get(); - let end = slots[pid.as_usize() * 2 + 1]?.get(); - Some(Match::new(pid, Span { start, end })) - } - - /// Executes a leftmost forward search and writes the spans of capturing - /// groups that participated in a match into the provided [`Captures`] - /// value. If no match was found, then [`Captures::is_match`] is guaranteed - /// to return `false`. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; - /// - /// let re = PikeVM::new(r"^([0-9]{4})-([0-9]{2})-([0-9]{2})$")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// re.captures(&mut cache, "2010-03-14", &mut caps); - /// assert!(caps.is_match()); - /// assert_eq!(Some(Span::from(0..4)), caps.get_group(1)); - /// assert_eq!(Some(Span::from(5..7)), caps.get_group(2)); - /// assert_eq!(Some(Span::from(8..10)), caps.get_group(3)); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn captures<'h, I: Into>>( - &self, - cache: &mut Cache, - input: I, - caps: &mut Captures, - ) { - self.search(cache, &input.into(), caps) - } - - /// Returns an iterator over all non-overlapping leftmost matches in the - /// given bytes. If no match exists, then the iterator yields no elements. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; - /// - /// let re = PikeVM::new("foo[0-9]+")?; - /// let mut cache = re.create_cache(); - /// - /// let text = "foo1 foo12 foo123"; - /// let matches: Vec = re.find_iter(&mut cache, text).collect(); - /// assert_eq!(matches, vec![ - /// Match::must(0, 0..4), - /// Match::must(0, 5..10), - /// Match::must(0, 11..17), - /// ]); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn find_iter<'r, 'c, 'h, I: Into>>( - &'r self, - cache: &'c mut Cache, - input: I, - ) -> FindMatches<'r, 'c, 'h> { - let caps = Captures::matches(self.get_nfa().group_info().clone()); - let it = iter::Searcher::new(input.into()); - FindMatches { re: self, cache, caps, it } - } - - /// Returns an iterator over all non-overlapping `Captures` values. If no - /// match exists, then the iterator yields no elements. - /// - /// This yields the same matches as [`PikeVM::find_iter`], but it includes - /// the spans of all capturing groups that participate in each match. - /// - /// **Tip:** See [`util::iter::Searcher`](crate::util::iter::Searcher) for - /// how to correctly iterate over all matches in a haystack while avoiding - /// the creation of a new `Captures` value for every match. (Which you are - /// forced to do with an `Iterator`.) - /// - /// # Example - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; - /// - /// let re = PikeVM::new("foo(?P[0-9]+)")?; - /// let mut cache = re.create_cache(); - /// - /// let text = "foo1 foo12 foo123"; - /// let matches: Vec = re - /// .captures_iter(&mut cache, text) - /// // The unwrap is OK since 'numbers' matches if the pattern matches. - /// .map(|caps| caps.get_group_by_name("numbers").unwrap()) - /// .collect(); - /// assert_eq!(matches, vec![ - /// Span::from(3..4), - /// Span::from(8..10), - /// Span::from(14..17), - /// ]); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn captures_iter<'r, 'c, 'h, I: Into>>( - &'r self, - cache: &'c mut Cache, - input: I, - ) -> CapturesMatches<'r, 'c, 'h> { - let caps = self.create_captures(); - let it = iter::Searcher::new(input.into()); - CapturesMatches { re: self, cache, caps, it } - } -} - -impl PikeVM { - /// Executes a leftmost forward search and writes the spans of capturing - /// groups that participated in a match into the provided [`Captures`] - /// value. If no match was found, then [`Captures::is_match`] is guaranteed - /// to return `false`. - /// - /// This is like [`PikeVM::captures`], but it accepts a concrete `&Input` - /// instead of an `Into`. - /// - /// # Example: specific pattern search - /// - /// This example shows how to build a multi-PikeVM that permits searching - /// for specific patterns. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::pikevm::PikeVM, - /// Anchored, Match, PatternID, Input, - /// }; - /// - /// let re = PikeVM::new_many(&["[a-z0-9]{6}", "[a-z][a-z0-9]{5}"])?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let haystack = "foo123"; - /// - /// // Since we are using the default leftmost-first match and both - /// // patterns match at the same starting position, only the first pattern - /// // will be returned in this case when doing a search for any of the - /// // patterns. - /// let expected = Some(Match::must(0, 0..6)); - /// re.search(&mut cache, &Input::new(haystack), &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// // But if we want to check whether some other pattern matches, then we - /// // can provide its pattern ID. - /// let expected = Some(Match::must(1, 0..6)); - /// let input = Input::new(haystack) - /// .anchored(Anchored::Pattern(PatternID::must(1))); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: specifying the bounds of a search - /// - /// This example shows how providing the bounds of a search can produce - /// different results than simply sub-slicing the haystack. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match, Input}; - /// - /// let re = PikeVM::new(r"\b[0-9]{3}\b")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let haystack = "foo123bar"; - /// - /// // Since we sub-slice the haystack, the search doesn't know about - /// // the larger context and assumes that `123` is surrounded by word - /// // boundaries. And of course, the match position is reported relative - /// // to the sub-slice as well, which means we get `0..3` instead of - /// // `3..6`. - /// let expected = Some(Match::must(0, 0..3)); - /// re.search(&mut cache, &Input::new(&haystack[3..6]), &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// // But if we provide the bounds of the search within the context of the - /// // entire haystack, then the search can take the surrounding context - /// // into account. (And if we did find a match, it would be reported - /// // as a valid offset into `haystack` instead of its sub-slice.) - /// let expected = None; - /// let input = Input::new(haystack).range(3..6); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn search( - &self, - cache: &mut Cache, - input: &Input<'_>, - caps: &mut Captures, - ) { - caps.set_pattern(None); - let pid = self.search_slots(cache, input, caps.slots_mut()); - caps.set_pattern(pid); - } - - /// Executes a leftmost forward search and writes the spans of capturing - /// groups that participated in a match into the provided `slots`, and - /// returns the matching pattern ID. The contents of the slots for patterns - /// other than the matching pattern are unspecified. If no match was found, - /// then `None` is returned and the contents of `slots` is unspecified. - /// - /// This is like [`PikeVM::search`], but it accepts a raw slots slice - /// instead of a `Captures` value. This is useful in contexts where you - /// don't want or need to allocate a `Captures`. - /// - /// It is legal to pass _any_ number of slots to this routine. If the regex - /// engine would otherwise write a slot offset that doesn't fit in the - /// provided slice, then it is simply skipped. In general though, there are - /// usually three slice lengths you might want to use: - /// - /// * An empty slice, if you only care about which pattern matched. - /// * A slice with - /// [`pattern_len() * 2`](crate::nfa::thompson::NFA::pattern_len) - /// slots, if you only care about the overall match spans for each matching - /// pattern. - /// * A slice with - /// [`slot_len()`](crate::util::captures::GroupInfo::slot_len) slots, which - /// permits recording match offsets for every capturing group in every - /// pattern. - /// - /// # Example - /// - /// This example shows how to find the overall match offsets in a - /// multi-pattern search without allocating a `Captures` value. Indeed, we - /// can put our slots right on the stack. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID, Input}; - /// - /// let re = PikeVM::new_many(&[ - /// r"\pL+", - /// r"\d+", - /// ])?; - /// let mut cache = re.create_cache(); - /// let input = Input::new("!@#123"); - /// - /// // We only care about the overall match offsets here, so we just - /// // allocate two slots for each pattern. Each slot records the start - /// // and end of the match. - /// let mut slots = [None; 4]; - /// let pid = re.search_slots(&mut cache, &input, &mut slots); - /// assert_eq!(Some(PatternID::must(1)), pid); - /// - /// // The overall match offsets are always at 'pid * 2' and 'pid * 2 + 1'. - /// // See 'GroupInfo' for more details on the mapping between groups and - /// // slot indices. - /// let slot_start = pid.unwrap().as_usize() * 2; - /// let slot_end = slot_start + 1; - /// assert_eq!(Some(3), slots[slot_start].map(|s| s.get())); - /// assert_eq!(Some(6), slots[slot_end].map(|s| s.get())); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn search_slots( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Option { - let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); - if !utf8empty { - let hm = self.search_slots_imp(cache, input, slots)?; - return Some(hm.pattern()); - } - // There is an unfortunate special case where if the regex can - // match the empty string and UTF-8 mode is enabled, the search - // implementation requires that the slots have at least as much space - // to report the bounds of any match. This is so zero-width matches - // that split a codepoint can be filtered out. - // - // Note that if utf8empty is true, we specialize the case for when - // the number of patterns is 1. In that case, we can just use a stack - // allocation. Otherwise we resort to a heap allocation, which we - // convince ourselves we're fine with due to the pathological nature of - // this case. - let min = self.get_nfa().group_info().implicit_slot_len(); - if slots.len() >= min { - let hm = self.search_slots_imp(cache, input, slots)?; - return Some(hm.pattern()); - } - if self.get_nfa().pattern_len() == 1 { - let mut enough = [None, None]; - let got = self.search_slots_imp(cache, input, &mut enough); - // This is OK because we know `enough` is strictly bigger than - // `slots`, otherwise this special case isn't reached. - slots.copy_from_slice(&enough[..slots.len()]); - return got.map(|hm| hm.pattern()); - } - let mut enough = vec![None; min]; - let got = self.search_slots_imp(cache, input, &mut enough); - // This is OK because we know `enough` is strictly bigger than `slots`, - // otherwise this special case isn't reached. - slots.copy_from_slice(&enough[..slots.len()]); - got.map(|hm| hm.pattern()) - } - - /// This is the actual implementation of `search_slots_imp` that - /// doesn't account for the special case when 1) the NFA has UTF-8 mode - /// enabled, 2) the NFA can match the empty string and 3) the caller has - /// provided an insufficient number of slots to record match offsets. - #[inline(never)] - fn search_slots_imp( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Option { - let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); - let hm = match self.search_imp(cache, input, slots) { - None => return None, - Some(hm) if !utf8empty => return Some(hm), - Some(hm) => hm, - }; - empty::skip_splits_fwd(input, hm, hm.offset(), |input| { - Ok(self - .search_imp(cache, input, slots) - .map(|hm| (hm, hm.offset()))) - }) - // OK because the PikeVM never errors. - .unwrap() - } - - /// Writes the set of patterns that match anywhere in the given search - /// configuration to `patset`. If multiple patterns match at the same - /// position and this `PikeVM` was configured with [`MatchKind::All`] - /// semantics, then all matching patterns are written to the given set. - /// - /// Unless all of the patterns in this `PikeVM` are anchored, then - /// generally speaking, this will visit every byte in the haystack. - /// - /// This search routine *does not* clear the pattern set. This gives some - /// flexibility to the caller (e.g., running multiple searches with the - /// same pattern set), but does make the API bug-prone if you're reusing - /// the same pattern set for multiple searches but intended them to be - /// independent. - /// - /// If a pattern ID matched but the given `PatternSet` does not have - /// sufficient capacity to store it, then it is not inserted and silently - /// dropped. - /// - /// # Example - /// - /// This example shows how to find all matching patterns in a haystack, - /// even when some patterns match at the same position as other patterns. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// nfa::thompson::pikevm::PikeVM, - /// Input, MatchKind, PatternSet, - /// }; - /// - /// let patterns = &[ - /// r"\w+", r"\d+", r"\pL+", r"foo", r"bar", r"barfoo", r"foobar", - /// ]; - /// let re = PikeVM::builder() - /// .configure(PikeVM::config().match_kind(MatchKind::All)) - /// .build_many(patterns)?; - /// let mut cache = re.create_cache(); - /// - /// let input = Input::new("foobar"); - /// let mut patset = PatternSet::new(re.pattern_len()); - /// re.which_overlapping_matches(&mut cache, &input, &mut patset); - /// let expected = vec![0, 2, 3, 4, 6]; - /// let got: Vec = patset.iter().map(|p| p.as_usize()).collect(); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn which_overlapping_matches( - &self, - cache: &mut Cache, - input: &Input<'_>, - patset: &mut PatternSet, - ) { - self.which_overlapping_imp(cache, input, patset) - } -} - -impl PikeVM { - /// The implementation of standard leftmost search. - /// - /// Capturing group spans are written to `slots`, but only if requested. - /// `slots` can be any length. Any slot in the NFA that is activated but - /// which is out of bounds for the given `slots` is ignored. - fn search_imp( - &self, - cache: &mut Cache, - input: &Input<'_>, - slots: &mut [Option], - ) -> Option { - cache.setup_search(slots.len()); - if input.is_done() { - return None; - } - // Why do we even care about this? Well, in our 'Captures' - // representation, we use usize::MAX as a sentinel to indicate "no - // match." This isn't problematic so long as our haystack doesn't have - // a maximal length. Byte slices are guaranteed by Rust to have a - // length that fits into isize, and so this assert should always pass. - // But we put it here to make our assumption explicit. - assert!( - input.haystack().len() < core::usize::MAX, - "byte slice lengths must be less than usize MAX", - ); - instrument!(|c| c.reset(&self.nfa)); - - // Whether we want to visit all match states instead of emulating the - // 'leftmost' semantics of typical backtracking regex engines. - let allmatches = - self.config.get_match_kind().continue_past_first_match(); - let (anchored, start_id) = match self.start_config(input) { - None => return None, - Some(config) => config, - }; - - let pre = - if anchored { None } else { self.get_config().get_prefilter() }; - let Cache { ref mut stack, ref mut curr, ref mut next } = cache; - let mut hm = None; - // Yes, our search doesn't end at input.end(), but includes it. This - // is necessary because matches are delayed by one byte, just like - // how the DFA engines work. The delay is used to handle look-behind - // assertions. In the case of the PikeVM, the delay is implemented - // by not considering a match to exist until it is visited in - // 'steps'. Technically, we know a match exists in the previous - // iteration via 'epsilon_closure'. (It's the same thing in NFA-to-DFA - // determinization. We don't mark a DFA state as a match state if it - // contains an NFA match state, but rather, whether the DFA state was - // generated by a transition from a DFA state that contains an NFA - // match state.) - let mut at = input.start(); - while at <= input.end() { - // If we have no states left to visit, then there are some cases - // where we know we can quit early or even skip ahead. - if curr.set.is_empty() { - // We have a match and we haven't been instructed to continue - // on even after finding a match, so we can quit. - if hm.is_some() && !allmatches { - break; - } - // If we're running an anchored search and we've advanced - // beyond the start position with no other states to try, then - // we will never observe a match and thus can stop. - if anchored && at > input.start() { - break; - } - // If there no states left to explore at this position and we - // know we can't terminate early, then we are effectively at - // the starting state of the NFA. If we fell through here, - // we'd end up adding our '(?s-u:.)*?' prefix and it would be - // the only thing in 'curr'. So we might as well just skip - // ahead until we find something that we know might advance us - // forward. - if let Some(pre) = pre { - let span = Span::from(at..input.end()); - match pre.find(input.haystack(), span) { - None => break, - Some(ref span) => at = span.start, - } - } - } - // Instead of using the NFA's unanchored start state, we actually - // always use its anchored starting state. As a result, when doing - // an unanchored search, we need to simulate our own '(?s-u:.)*?' - // prefix, to permit a match to appear anywhere. - // - // Now, we don't *have* to do things this way. We could use the - // NFA's unanchored starting state and do one 'epsilon_closure' - // call from that starting state before the main loop here. And - // that is just as correct. However, it turns out to be slower - // than our approach here because it slightly increases the cost - // of processing each byte by requiring us to visit more NFA - // states to deal with the additional NFA states in the unanchored - // prefix. By simulating it explicitly here, we lower those costs - // substantially. The cost is itself small, but it adds up for - // large haystacks. - // - // In order to simulate the '(?s-u:.)*?' prefix---which is not - // greedy---we are careful not to perform an epsilon closure on - // the start state if we already have a match. Namely, if we - // did otherwise, we would never reach a terminating condition - // because there would always be additional states to process. - // In effect, the exclusion of running 'epsilon_closure' when - // we have a match corresponds to the "dead" states we have in - // our DFA regex engines. Namely, in a DFA, match states merely - // instruct the search execution to record the current offset as - // the most recently seen match. It is the dead state that actually - // indicates when to stop the search (other than EOF or quit - // states). - // - // However, when 'allmatches' is true, the caller has asked us to - // leave in every possible match state. This tends not to make a - // whole lot of sense in unanchored searches, because it means the - // search really cannot terminate until EOF. And often, in that - // case, you wind up skipping over a bunch of matches and are left - // with the "last" match. Arguably, it just doesn't make a lot of - // sense to run a 'leftmost' search (which is what this routine is) - // with 'allmatches' set to true. But the DFAs support it and this - // matches their behavior. (Generally, 'allmatches' is useful for - // overlapping searches or leftmost anchored searches to find the - // longest possible match by ignoring match priority.) - // - // Additionally, when we're running an anchored search, this - // epsilon closure should only be computed at the beginning of the - // search. If we re-computed it at every position, we would be - // simulating an unanchored search when we were tasked to perform - // an anchored search. - if (hm.is_none() || allmatches) - && (!anchored || at == input.start()) - { - // Since we are adding to the 'curr' active states and since - // this is for the start ID, we use a slots slice that is - // guaranteed to have the right length but where every element - // is absent. This is exactly what we want, because this - // epsilon closure is responsible for simulating an unanchored - // '(?s:.)*?' prefix. It is specifically outside of any - // capturing groups, and thus, using slots that are always - // absent is correct. - // - // Note though that we can't just use '&mut []' here, since - // this epsilon closure may traverse through 'Captures' epsilon - // transitions, and thus must be able to write offsets to the - // slots given which are later copied to slot values in 'curr'. - let slots = next.slot_table.all_absent(); - self.epsilon_closure(stack, slots, curr, input, at, start_id); - } - if let Some(pid) = self.nexts(stack, curr, next, input, at, slots) - { - hm = Some(HalfMatch::new(pid, at)); - } - // Unless the caller asked us to return early, we need to mush on - // to see if we can extend our match. (But note that 'nexts' will - // quit right after seeing a match when match_kind==LeftmostFirst, - // as is consistent with leftmost-first match priority.) - if input.get_earliest() && hm.is_some() { - break; - } - core::mem::swap(curr, next); - next.set.clear(); - at += 1; - } - instrument!(|c| c.eprint(&self.nfa)); - hm - } - - /// The implementation for the 'which_overlapping_matches' API. Basically, - /// we do a single scan through the entire haystack (unless our regex - /// or search is anchored) and record every pattern that matched. In - /// particular, when MatchKind::All is used, this supports overlapping - /// matches. So if we have the regexes 'sam' and 'samwise', they will - /// *both* be reported in the pattern set when searching the haystack - /// 'samwise'. - fn which_overlapping_imp( - &self, - cache: &mut Cache, - input: &Input<'_>, - patset: &mut PatternSet, - ) { - // NOTE: This is effectively a copy of 'search_imp' above, but with no - // captures support and instead writes patterns that matched directly - // to 'patset'. See that routine for better commentary about what's - // going on in this routine. We probably could unify the routines using - // generics or more helper routines, but I'm not sure it's worth it. - // - // NOTE: We somewhat go out of our way here to support things like - // 'input.get_earliest()' and 'leftmost-first' match semantics. Neither - // of those seem particularly relevant to this routine, but they are - // both supported by the DFA analogs of this routine by construction - // and composition, so it seems like good sense to have the PikeVM - // match that behavior. - - cache.setup_search(0); - if input.is_done() { - return; - } - assert!( - input.haystack().len() < core::usize::MAX, - "byte slice lengths must be less than usize MAX", - ); - instrument!(|c| c.reset(&self.nfa)); - - let allmatches = - self.config.get_match_kind().continue_past_first_match(); - let (anchored, start_id) = match self.start_config(input) { - None => return, - Some(config) => config, - }; - - let Cache { ref mut stack, ref mut curr, ref mut next } = cache; - for at in input.start()..=input.end() { - let any_matches = !patset.is_empty(); - if curr.set.is_empty() { - if any_matches && !allmatches { - break; - } - if anchored && at > input.start() { - break; - } - } - if !any_matches || allmatches { - let slots = &mut []; - self.epsilon_closure(stack, slots, curr, input, at, start_id); - } - self.nexts_overlapping(stack, curr, next, input, at, patset); - // If we found a match and filled our set, then there is no more - // additional info that we can provide. Thus, we can quit. We also - // quit if the caller asked us to stop at the earliest point that - // we know a match exists. - if patset.is_full() || input.get_earliest() { - break; - } - core::mem::swap(curr, next); - next.set.clear(); - } - instrument!(|c| c.eprint(&self.nfa)); - } - - /// Process the active states in 'curr' to find the states (written to - /// 'next') we should process for the next byte in the haystack. - /// - /// 'stack' is used to perform a depth first traversal of the NFA when - /// computing an epsilon closure. - /// - /// When a match is found, the slots for that match state (in 'curr') are - /// copied to 'caps'. Moreover, once a match is seen, processing for 'curr' - /// stops (unless the PikeVM was configured with MatchKind::All semantics). - #[cfg_attr(feature = "perf-inline", inline(always))] - fn nexts( - &self, - stack: &mut Vec, - curr: &mut ActiveStates, - next: &mut ActiveStates, - input: &Input<'_>, - at: usize, - slots: &mut [Option], - ) -> Option { - instrument!(|c| c.record_state_set(&curr.set)); - let mut pid = None; - let ActiveStates { ref set, ref mut slot_table } = *curr; - for sid in set.iter() { - pid = match self.next(stack, slot_table, next, input, at, sid) { - None => continue, - Some(pid) => Some(pid), - }; - slots.copy_from_slice(slot_table.for_state(sid)); - if !self.config.get_match_kind().continue_past_first_match() { - break; - } - } - pid - } - - /// Like 'nexts', but for the overlapping case. This doesn't write any - /// slots, and instead just writes which pattern matched in 'patset'. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn nexts_overlapping( - &self, - stack: &mut Vec, - curr: &mut ActiveStates, - next: &mut ActiveStates, - input: &Input<'_>, - at: usize, - patset: &mut PatternSet, - ) { - instrument!(|c| c.record_state_set(&curr.set)); - let utf8empty = self.get_nfa().has_empty() && self.get_nfa().is_utf8(); - let ActiveStates { ref set, ref mut slot_table } = *curr; - for sid in set.iter() { - let pid = match self.next(stack, slot_table, next, input, at, sid) - { - None => continue, - Some(pid) => pid, - }; - // This handles the case of finding a zero-width match that splits - // a codepoint. Namely, if we're in UTF-8 mode AND we know we can - // match the empty string, then the only valid way of getting to - // this point with an offset that splits a codepoint is when we - // have an empty match. Such matches, in UTF-8 mode, must not be - // reported. So we just skip them here and pretend as if we did - // not see a match. - if utf8empty && !input.is_char_boundary(at) { - continue; - } - let _ = patset.try_insert(pid); - if !self.config.get_match_kind().continue_past_first_match() { - break; - } - } - } - - /// Starting from 'sid', if the position 'at' in the 'input' haystack has a - /// transition defined out of 'sid', then add the state transitioned to and - /// its epsilon closure to the 'next' set of states to explore. - /// - /// 'stack' is used by the epsilon closure computation to perform a depth - /// first traversal of the NFA. - /// - /// 'curr_slot_table' should be the table of slots for the current set of - /// states being explored. If there is a transition out of 'sid', then - /// sid's row in the slot table is used to perform the epsilon closure. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn next( - &self, - stack: &mut Vec, - curr_slot_table: &mut SlotTable, - next: &mut ActiveStates, - input: &Input<'_>, - at: usize, - sid: StateID, - ) -> Option { - instrument!(|c| c.record_step(sid)); - match *self.nfa.state(sid) { - State::Fail - | State::Look { .. } - | State::Union { .. } - | State::BinaryUnion { .. } - | State::Capture { .. } => None, - State::ByteRange { ref trans } => { - if trans.matches(input.haystack(), at) { - let slots = curr_slot_table.for_state(sid); - // OK because 'at <= haystack.len() < usize::MAX', so - // adding 1 will never wrap. - let at = at.wrapping_add(1); - self.epsilon_closure( - stack, slots, next, input, at, trans.next, - ); - } - None - } - State::Sparse(ref sparse) => { - if let Some(next_sid) = sparse.matches(input.haystack(), at) { - let slots = curr_slot_table.for_state(sid); - // OK because 'at <= haystack.len() < usize::MAX', so - // adding 1 will never wrap. - let at = at.wrapping_add(1); - self.epsilon_closure( - stack, slots, next, input, at, next_sid, - ); - } - None - } - State::Dense(ref dense) => { - if let Some(next_sid) = dense.matches(input.haystack(), at) { - let slots = curr_slot_table.for_state(sid); - // OK because 'at <= haystack.len() < usize::MAX', so - // adding 1 will never wrap. - let at = at.wrapping_add(1); - self.epsilon_closure( - stack, slots, next, input, at, next_sid, - ); - } - None - } - State::Match { pattern_id } => Some(pattern_id), - } - } - - /// Compute the epsilon closure of 'sid', writing the closure into 'next' - /// while copying slot values from 'curr_slots' into corresponding states - /// in 'next'. 'curr_slots' should be the slot values corresponding to - /// 'sid'. - /// - /// The given 'stack' is used to perform a depth first traversal of the - /// NFA by recursively following all epsilon transitions out of 'sid'. - /// Conditional epsilon transitions are followed if and only if they are - /// satisfied for the position 'at' in the 'input' haystack. - /// - /// While this routine may write to 'curr_slots', once it returns, any - /// writes are undone and the original values (even if absent) are - /// restored. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn epsilon_closure( - &self, - stack: &mut Vec, - curr_slots: &mut [Option], - next: &mut ActiveStates, - input: &Input<'_>, - at: usize, - sid: StateID, - ) { - instrument!(|c| { - c.record_closure(sid); - c.record_stack_push(sid); - }); - stack.push(FollowEpsilon::Explore(sid)); - while let Some(frame) = stack.pop() { - match frame { - FollowEpsilon::RestoreCapture { slot, offset: pos } => { - curr_slots[slot] = pos; - } - FollowEpsilon::Explore(sid) => { - self.epsilon_closure_explore( - stack, curr_slots, next, input, at, sid, - ); - } - } - } - } - - /// Explore all of the epsilon transitions out of 'sid'. This is mostly - /// split out from 'epsilon_closure' in order to clearly delineate - /// the actual work of computing an epsilon closure from the stack - /// book-keeping. - /// - /// This will push any additional explorations needed on to 'stack'. - /// - /// 'curr_slots' should refer to the slots for the currently active NFA - /// state. That is, the current state we are stepping through. These - /// slots are mutated in place as new 'Captures' states are traversed - /// during epsilon closure, but the slots are restored to their original - /// values once the full epsilon closure is completed. The ultimate use of - /// 'curr_slots' is to copy them to the corresponding 'next_slots', so that - /// the capturing group spans are forwarded from the currently active state - /// to the next. - /// - /// 'next' refers to the next set of active states. Computing an epsilon - /// closure may increase the next set of active states. - /// - /// 'input' refers to the caller's input configuration and 'at' refers to - /// the current position in the haystack. These are used to check whether - /// conditional epsilon transitions (like look-around) are satisfied at - /// the current position. If they aren't, then the epsilon closure won't - /// include them. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn epsilon_closure_explore( - &self, - stack: &mut Vec, - curr_slots: &mut [Option], - next: &mut ActiveStates, - input: &Input<'_>, - at: usize, - mut sid: StateID, - ) { - // We can avoid pushing some state IDs on to our stack in precisely - // the cases where a 'push(x)' would be immediately followed by a 'x - // = pop()'. This is achieved by this outer-loop. We simply set 'sid' - // to be the next state ID we want to explore once we're done with - // our initial exploration. In practice, this avoids a lot of stack - // thrashing. - loop { - instrument!(|c| c.record_set_insert(sid)); - // Record this state as part of our next set of active states. If - // we've already explored it, then no need to do it again. - if !next.set.insert(sid) { - return; - } - match *self.nfa.state(sid) { - State::Fail - | State::Match { .. } - | State::ByteRange { .. } - | State::Sparse { .. } - | State::Dense { .. } => { - next.slot_table.for_state(sid).copy_from_slice(curr_slots); - return; - } - State::Look { look, next } => { - // OK because we don't permit building a searcher with a - // Unicode word boundary if the requisite Unicode data is - // unavailable. - if !self.nfa.look_matcher().matches_inline( - look, - input.haystack(), - at, - ) { - return; - } - sid = next; - } - State::Union { ref alternates } => { - sid = match alternates.get(0) { - None => return, - Some(&sid) => sid, - }; - instrument!(|c| { - for &alt in &alternates[1..] { - c.record_stack_push(alt); - } - }); - stack.extend( - alternates[1..] - .iter() - .copied() - .rev() - .map(FollowEpsilon::Explore), - ); - } - State::BinaryUnion { alt1, alt2 } => { - sid = alt1; - instrument!(|c| c.record_stack_push(sid)); - stack.push(FollowEpsilon::Explore(alt2)); - } - State::Capture { next, slot, .. } => { - // There's no need to do anything with slots that - // ultimately won't be copied into the caller-provided - // 'Captures' value. So we just skip dealing with them at - // all. - if slot.as_usize() < curr_slots.len() { - instrument!(|c| c.record_stack_push(sid)); - stack.push(FollowEpsilon::RestoreCapture { - slot, - offset: curr_slots[slot], - }); - // OK because length of a slice must fit into an isize. - curr_slots[slot] = Some(NonMaxUsize::new(at).unwrap()); - } - sid = next; - } - } - } - } - - /// Return the starting configuration of a PikeVM search. - /// - /// The "start config" is basically whether the search should be anchored - /// or not and the NFA state ID at which to begin the search. The state ID - /// returned always corresponds to an anchored starting state even when the - /// search is unanchored. This is because the PikeVM search loop deals with - /// unanchored searches with an explicit epsilon closure out of the start - /// state. - /// - /// This routine accounts for both the caller's `Input` configuration - /// and the pattern itself. For example, even if the caller asks for an - /// unanchored search, if the pattern itself is anchored, then this will - /// always return 'true' because implementing an unanchored search in that - /// case would be incorrect. - /// - /// Similarly, if the caller requests an anchored search for a particular - /// pattern, then the starting state ID returned will reflect that. - /// - /// If a pattern ID is given in the input configuration that is not in - /// this regex, then `None` is returned. - fn start_config(&self, input: &Input<'_>) -> Option<(bool, StateID)> { - match input.get_anchored() { - // Only way we're unanchored is if both the caller asked for an - // unanchored search *and* the pattern is itself not anchored. - Anchored::No => Some(( - self.nfa.is_always_start_anchored(), - self.nfa.start_anchored(), - )), - Anchored::Yes => Some((true, self.nfa.start_anchored())), - Anchored::Pattern(pid) => { - Some((true, self.nfa.start_pattern(pid)?)) - } - } - } -} - -/// An iterator over all non-overlapping matches for a particular search. -/// -/// The iterator yields a [`Match`] value until no more matches could be found. -/// -/// The lifetime parameters are as follows: -/// -/// * `'r` represents the lifetime of the PikeVM. -/// * `'c` represents the lifetime of the PikeVM's cache. -/// * `'h` represents the lifetime of the haystack being searched. -/// -/// This iterator can be created with the [`PikeVM::find_iter`] method. -#[derive(Debug)] -pub struct FindMatches<'r, 'c, 'h> { - re: &'r PikeVM, - cache: &'c mut Cache, - caps: Captures, - it: iter::Searcher<'h>, -} - -impl<'r, 'c, 'h> Iterator for FindMatches<'r, 'c, 'h> { - type Item = Match; - - #[inline] - fn next(&mut self) -> Option { - // Splitting 'self' apart seems necessary to appease borrowck. - let FindMatches { re, ref mut cache, ref mut caps, ref mut it } = - *self; - // 'advance' converts errors into panics, which is OK here because - // the PikeVM can never return an error. - it.advance(|input| { - re.search(cache, input, caps); - Ok(caps.get_match()) - }) - } -} - -/// An iterator over all non-overlapping leftmost matches, with their capturing -/// groups, for a particular search. -/// -/// The iterator yields a [`Captures`] value until no more matches could be -/// found. -/// -/// The lifetime parameters are as follows: -/// -/// * `'r` represents the lifetime of the PikeVM. -/// * `'c` represents the lifetime of the PikeVM's cache. -/// * `'h` represents the lifetime of the haystack being searched. -/// -/// This iterator can be created with the [`PikeVM::captures_iter`] method. -#[derive(Debug)] -pub struct CapturesMatches<'r, 'c, 'h> { - re: &'r PikeVM, - cache: &'c mut Cache, - caps: Captures, - it: iter::Searcher<'h>, -} - -impl<'r, 'c, 'h> Iterator for CapturesMatches<'r, 'c, 'h> { - type Item = Captures; - - #[inline] - fn next(&mut self) -> Option { - // Splitting 'self' apart seems necessary to appease borrowck. - let CapturesMatches { re, ref mut cache, ref mut caps, ref mut it } = - *self; - // 'advance' converts errors into panics, which is OK here because - // the PikeVM can never return an error. - it.advance(|input| { - re.search(cache, input, caps); - Ok(caps.get_match()) - }); - if caps.is_match() { - Some(caps.clone()) - } else { - None - } - } -} - -/// A cache represents mutable state that a [`PikeVM`] requires during a -/// search. -/// -/// For a given [`PikeVM`], its corresponding cache may be created either via -/// [`PikeVM::create_cache`], or via [`Cache::new`]. They are equivalent in -/// every way, except the former does not require explicitly importing `Cache`. -/// -/// A particular `Cache` is coupled with the [`PikeVM`] from which it -/// was created. It may only be used with that `PikeVM`. A cache and its -/// allocations may be re-purposed via [`Cache::reset`], in which case, it can -/// only be used with the new `PikeVM` (and not the old one). -#[derive(Clone, Debug)] -pub struct Cache { - /// Stack used while computing epsilon closure. This effectively lets us - /// move what is more naturally expressed through recursion to a stack - /// on the heap. - stack: Vec, - /// The current active states being explored for the current byte in the - /// haystack. - curr: ActiveStates, - /// The next set of states we're building that will be explored for the - /// next byte in the haystack. - next: ActiveStates, -} - -impl Cache { - /// Create a new [`PikeVM`] cache. - /// - /// A potentially more convenient routine to create a cache is - /// [`PikeVM::create_cache`], as it does not require also importing the - /// `Cache` type. - /// - /// If you want to reuse the returned `Cache` with some other `PikeVM`, - /// then you must call [`Cache::reset`] with the desired `PikeVM`. - pub fn new(re: &PikeVM) -> Cache { - Cache { - stack: vec![], - curr: ActiveStates::new(re), - next: ActiveStates::new(re), - } - } - - /// Reset this cache such that it can be used for searching with a - /// different [`PikeVM`]. - /// - /// A cache reset permits reusing memory already allocated in this cache - /// with a different `PikeVM`. - /// - /// # Example - /// - /// This shows how to re-purpose a cache for use with a different `PikeVM`. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; - /// - /// let re1 = PikeVM::new(r"\w")?; - /// let re2 = PikeVM::new(r"\W")?; - /// - /// let mut cache = re1.create_cache(); - /// assert_eq!( - /// Some(Match::must(0, 0..2)), - /// re1.find_iter(&mut cache, "Δ").next(), - /// ); - /// - /// // Using 'cache' with re2 is not allowed. It may result in panics or - /// // incorrect results. In order to re-purpose the cache, we must reset - /// // it with the PikeVM we'd like to use it with. - /// // - /// // Similarly, after this reset, using the cache with 're1' is also not - /// // allowed. - /// cache.reset(&re2); - /// assert_eq!( - /// Some(Match::must(0, 0..3)), - /// re2.find_iter(&mut cache, "☃").next(), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn reset(&mut self, re: &PikeVM) { - self.curr.reset(re); - self.next.reset(re); - } - - /// Returns the heap memory usage, in bytes, of this cache. - /// - /// This does **not** include the stack size used up by this cache. To - /// compute that, use `std::mem::size_of::()`. - pub fn memory_usage(&self) -> usize { - use core::mem::size_of; - (self.stack.len() * size_of::()) - + self.curr.memory_usage() - + self.next.memory_usage() - } - - /// Clears this cache. This should be called at the start of every search - /// to ensure we start with a clean slate. - /// - /// This also sets the length of the capturing groups used in the current - /// search. This permits an optimization where by 'SlotTable::for_state' - /// only returns the number of slots equivalent to the number of slots - /// given in the 'Captures' value. This may be less than the total number - /// of possible slots, e.g., when one only wants to track overall match - /// offsets. This in turn permits less copying of capturing group spans - /// in the PikeVM. - fn setup_search(&mut self, captures_slot_len: usize) { - self.stack.clear(); - self.curr.setup_search(captures_slot_len); - self.next.setup_search(captures_slot_len); - } -} - -/// A set of active states used to "simulate" the execution of an NFA via the -/// PikeVM. -/// -/// There are two sets of these used during NFA simulation. One set corresponds -/// to the "current" set of states being traversed for the current position -/// in a haystack. The other set corresponds to the "next" set of states being -/// built, which will become the new "current" set for the next position in the -/// haystack. These two sets correspond to CLIST and NLIST in Thompson's -/// original paper regexes: https://dl.acm.org/doi/pdf/10.1145/363347.363387 -/// -/// In addition to representing a set of NFA states, this also maintains slot -/// values for each state. These slot values are what turn the NFA simulation -/// into the "Pike VM." Namely, they track capturing group values for each -/// state. During the computation of epsilon closure, we copy slot values from -/// states in the "current" set to the "next" set. Eventually, once a match -/// is found, the slot values for that match state are what we write to the -/// caller provided 'Captures' value. -#[derive(Clone, Debug)] -struct ActiveStates { - /// The set of active NFA states. This set preserves insertion order, which - /// is critical for simulating the match semantics of backtracking regex - /// engines. - set: SparseSet, - /// The slots for every NFA state, where each slot stores a (possibly - /// absent) offset. Every capturing group has two slots. One for a start - /// offset and one for an end offset. - slot_table: SlotTable, -} - -impl ActiveStates { - /// Create a new set of active states for the given PikeVM. The active - /// states returned may only be used with the given PikeVM. (Use 'reset' - /// to re-purpose the allocation for a different PikeVM.) - fn new(re: &PikeVM) -> ActiveStates { - let mut active = ActiveStates { - set: SparseSet::new(0), - slot_table: SlotTable::new(), - }; - active.reset(re); - active - } - - /// Reset this set of active states such that it can be used with the given - /// PikeVM (and only that PikeVM). - fn reset(&mut self, re: &PikeVM) { - self.set.resize(re.get_nfa().states().len()); - self.slot_table.reset(re); - } - - /// Return the heap memory usage, in bytes, used by this set of active - /// states. - /// - /// This does not include the stack size of this value. - fn memory_usage(&self) -> usize { - self.set.memory_usage() + self.slot_table.memory_usage() - } - - /// Setup this set of active states for a new search. The given slot - /// length should be the number of slots in a caller provided 'Captures' - /// (and may be zero). - fn setup_search(&mut self, captures_slot_len: usize) { - self.set.clear(); - self.slot_table.setup_search(captures_slot_len); - } -} - -/// A table of slots, where each row represent a state in an NFA. Thus, the -/// table has room for storing slots for every single state in an NFA. -/// -/// This table is represented with a single contiguous allocation. In general, -/// the notion of "capturing group" doesn't really exist at this level of -/// abstraction, hence the name "slot" instead. (Indeed, every capturing group -/// maps to a pair of slots, one for the start offset and one for the end -/// offset.) Slots are indexed by the 'Captures' NFA state. -/// -/// N.B. Not every state actually needs a row of slots. Namely, states that -/// only have epsilon transitions currently never have anything written to -/// their rows in this table. Thus, the table is somewhat wasteful in its heap -/// usage. However, it is important to maintain fast random access by state -/// ID, which means one giant table tends to work well. RE2 takes a different -/// approach here and allocates each row as its own reference counted thing. -/// I explored such a strategy at one point here, but couldn't get it to work -/// well using entirely safe code. (To the ambitious reader: I encourage you to -/// re-litigate that experiment.) I very much wanted to stick to safe code, but -/// could be convinced otherwise if there was a solid argument and the safety -/// was encapsulated well. -#[derive(Clone, Debug)] -struct SlotTable { - /// The actual table of offsets. - table: Vec>, - /// The number of slots per state, i.e., the table's stride or the length - /// of each row. - slots_per_state: usize, - /// The number of slots in the caller-provided 'Captures' value for the - /// current search. Setting this to 'slots_per_state' is always correct, - /// but may be wasteful. - slots_for_captures: usize, -} - -impl SlotTable { - /// Create a new slot table. - /// - /// One should call 'reset' with the corresponding PikeVM before use. - fn new() -> SlotTable { - SlotTable { table: vec![], slots_for_captures: 0, slots_per_state: 0 } - } - - /// Reset this slot table such that it can be used with the given PikeVM - /// (and only that PikeVM). - fn reset(&mut self, re: &PikeVM) { - let nfa = re.get_nfa(); - self.slots_per_state = nfa.group_info().slot_len(); - // This is always correct, but may be reduced for a particular search - // if a 'Captures' has fewer slots, e.g., none at all or only slots - // for tracking the overall match instead of all slots for every - // group. - self.slots_for_captures = core::cmp::max( - self.slots_per_state, - nfa.pattern_len().checked_mul(2).unwrap(), - ); - let len = nfa - .states() - .len() - .checked_mul(self.slots_per_state) - // Add space to account for scratch space used during a search. - .and_then(|x| x.checked_add(self.slots_for_captures)) - // It seems like this could actually panic on legitimate inputs on - // 32-bit targets, and very likely to panic on 16-bit. Should we - // somehow convert this to an error? What about something similar - // for the lazy DFA cache? If you're tripping this assert, please - // file a bug. - .expect("slot table length doesn't overflow"); - // This happens about as often as a regex is compiled, so it probably - // should be at debug level, but I found it quite distracting and not - // particularly useful. - trace!( - "resizing PikeVM active states table to {} entries \ - (slots_per_state={})", - len, - self.slots_per_state, - ); - self.table.resize(len, None); - } - - /// Return the heap memory usage, in bytes, used by this slot table. - /// - /// This does not include the stack size of this value. - fn memory_usage(&self) -> usize { - self.table.len() * core::mem::size_of::>() - } - - /// Perform any per-search setup for this slot table. - /// - /// In particular, this sets the length of the number of slots used in the - /// 'Captures' given by the caller (if any at all). This number may be - /// smaller than the total number of slots available, e.g., when the caller - /// is only interested in tracking the overall match and not the spans of - /// every matching capturing group. Only tracking the overall match can - /// save a substantial amount of time copying capturing spans during a - /// search. - fn setup_search(&mut self, captures_slot_len: usize) { - self.slots_for_captures = captures_slot_len; - } - - /// Return a mutable slice of the slots for the given state. - /// - /// Note that the length of the slice returned may be less than the total - /// number of slots available for this state. In particular, the length - /// always matches the number of slots indicated via 'setup_search'. - fn for_state(&mut self, sid: StateID) -> &mut [Option] { - let i = sid.as_usize() * self.slots_per_state; - &mut self.table[i..i + self.slots_for_captures] - } - - /// Return a slice of slots of appropriate length where every slot offset - /// is guaranteed to be absent. This is useful in cases where you need to - /// compute an epsilon closure outside of the user supplied regex, and thus - /// never want it to have any capturing slots set. - fn all_absent(&mut self) -> &mut [Option] { - let i = self.table.len() - self.slots_for_captures; - &mut self.table[i..i + self.slots_for_captures] - } -} - -/// Represents a stack frame for use while computing an epsilon closure. -/// -/// (An "epsilon closure" refers to the set of reachable NFA states from a -/// single state without consuming any input. That is, the set of all epsilon -/// transitions not only from that single state, but from every other state -/// reachable by an epsilon transition as well. This is why it's called a -/// "closure." Computing an epsilon closure is also done during DFA -/// determinization! Compare and contrast the epsilon closure here in this -/// PikeVM and the one used for determinization in crate::util::determinize.) -/// -/// Computing the epsilon closure in a Thompson NFA proceeds via a depth -/// first traversal over all epsilon transitions from a particular state. -/// (A depth first traversal is important because it emulates the same priority -/// of matches that is typically found in backtracking regex engines.) This -/// depth first traversal is naturally expressed using recursion, but to avoid -/// a call stack size proportional to the size of a regex, we put our stack on -/// the heap instead. -/// -/// This stack thus consists of call frames. The typical call frame is -/// `Explore`, which instructs epsilon closure to explore the epsilon -/// transitions from that state. (Subsequent epsilon transitions are then -/// pushed on to the stack as more `Explore` frames.) If the state ID being -/// explored has no epsilon transitions, then the capturing group slots are -/// copied from the original state that sparked the epsilon closure (from the -/// 'step' routine) to the state ID being explored. This way, capturing group -/// slots are forwarded from the previous state to the next. -/// -/// The other stack frame, `RestoreCaptures`, instructs the epsilon closure to -/// set the position for a particular slot back to some particular offset. This -/// frame is pushed when `Explore` sees a `Capture` transition. `Explore` will -/// set the offset of the slot indicated in `Capture` to the current offset, -/// and then push the old offset on to the stack as a `RestoreCapture` frame. -/// Thus, the new offset is only used until the epsilon closure reverts back to -/// the `RestoreCapture` frame. In effect, this gives the `Capture` epsilon -/// transition its "scope" to only states that come "after" it during depth -/// first traversal. -#[derive(Clone, Debug)] -enum FollowEpsilon { - /// Explore the epsilon transitions from a state ID. - Explore(StateID), - /// Reset the given `slot` to the given `offset` (which might be `None`). - RestoreCapture { slot: SmallIndex, offset: Option }, -} - -/// A set of counters that "instruments" a PikeVM search. To enable this, you -/// must enable the 'internal-instrument-pikevm' feature. Then run your Rust -/// program with RUST_LOG=regex_automata::nfa::thompson::pikevm=trace set in -/// the environment. The metrics collected will be dumped automatically for -/// every search executed by the PikeVM. -/// -/// NOTE: When 'internal-instrument-pikevm' is enabled, it will likely cause an -/// absolute decrease in wall-clock performance, even if the 'trace' log level -/// isn't enabled. (Although, we do try to avoid extra costs when 'trace' isn't -/// enabled.) The main point of instrumentation is to get counts of various -/// events that occur during the PikeVM's execution. -/// -/// This is a somewhat hacked together collection of metrics that are useful -/// to gather from a PikeVM search. In particular, it lets us scrutinize the -/// performance profile of a search beyond what general purpose profiling tools -/// give us. Namely, we orient the profiling data around the specific states of -/// the NFA. -/// -/// In other words, this lets us see which parts of the NFA graph are most -/// frequently activated. This then provides direction for optimization -/// opportunities. -/// -/// The really sad part about this is that it absolutely clutters up the PikeVM -/// implementation. :'( Another approach would be to just manually add this -/// code in whenever I want this kind of profiling data, but it's complicated -/// and tedious enough that I went with this approach... for now. -/// -/// When instrumentation is enabled (which also turns on 'logging'), then a -/// `Counters` is initialized for every search and `trace`'d just before the -/// search returns to the caller. -/// -/// Tip: When debugging performance problems with the PikeVM, it's best to try -/// to work with an NFA that is as small as possible. Otherwise the state graph -/// is likely to be too big to digest. -#[cfg(feature = "internal-instrument-pikevm")] -#[derive(Clone, Debug)] -struct Counters { - /// The number of times the NFA is in a particular permutation of states. - state_sets: alloc::collections::BTreeMap, u64>, - /// The number of times 'step' is called for a particular state ID (which - /// indexes this array). - steps: Vec, - /// The number of times an epsilon closure was computed for a state. - closures: Vec, - /// The number of times a particular state ID is pushed on to a stack while - /// computing an epsilon closure. - stack_pushes: Vec, - /// The number of times a particular state ID is inserted into a sparse set - /// while computing an epsilon closure. - set_inserts: Vec, -} - -#[cfg(feature = "internal-instrument-pikevm")] -impl Counters { - fn empty() -> Counters { - Counters { - state_sets: alloc::collections::BTreeMap::new(), - steps: vec![], - closures: vec![], - stack_pushes: vec![], - set_inserts: vec![], - } - } - - fn reset(&mut self, nfa: &NFA) { - let len = nfa.states().len(); - - self.state_sets.clear(); - - self.steps.clear(); - self.steps.resize(len, 0); - - self.closures.clear(); - self.closures.resize(len, 0); - - self.stack_pushes.clear(); - self.stack_pushes.resize(len, 0); - - self.set_inserts.clear(); - self.set_inserts.resize(len, 0); - } - - fn eprint(&self, nfa: &NFA) { - trace!("===== START PikeVM Instrumentation Output ====="); - // We take the top-K most occurring state sets. Otherwise the output - // is likely to be overwhelming. And we probably only care about the - // most frequently occurring ones anyway. - const LIMIT: usize = 20; - let mut set_counts = - self.state_sets.iter().collect::, &u64)>>(); - set_counts.sort_by_key(|(_, &count)| core::cmp::Reverse(count)); - trace!("## PikeVM frequency of state sets (top {LIMIT})"); - for (set, count) in set_counts.iter().take(LIMIT) { - trace!("{set:?}: {count}"); - } - if set_counts.len() > LIMIT { - trace!( - "... {} sets omitted (out of {} total)", - set_counts.len() - LIMIT, - set_counts.len(), - ); - } - - trace!(""); - trace!("## PikeVM total frequency of events"); - trace!( - "steps: {}, closures: {}, stack-pushes: {}, set-inserts: {}", - self.steps.iter().copied().sum::(), - self.closures.iter().copied().sum::(), - self.stack_pushes.iter().copied().sum::(), - self.set_inserts.iter().copied().sum::(), - ); - - trace!(""); - trace!("## PikeVM frequency of events broken down by state"); - for sid in 0..self.steps.len() { - trace!( - "{:06}: steps: {}, closures: {}, \ - stack-pushes: {}, set-inserts: {}", - sid, - self.steps[sid], - self.closures[sid], - self.stack_pushes[sid], - self.set_inserts[sid], - ); - } - - trace!(""); - trace!("## NFA debug display"); - trace!("{nfa:?}"); - trace!("===== END PikeVM Instrumentation Output ====="); - } - - fn record_state_set(&mut self, set: &SparseSet) { - let set = set.iter().collect::>(); - *self.state_sets.entry(set).or_insert(0) += 1; - } - - fn record_step(&mut self, sid: StateID) { - self.steps[sid] += 1; - } - - fn record_closure(&mut self, sid: StateID) { - self.closures[sid] += 1; - } - - fn record_stack_push(&mut self, sid: StateID) { - self.stack_pushes[sid] += 1; - } - - fn record_set_insert(&mut self, sid: StateID) { - self.set_inserts[sid] += 1; - } -} diff --git a/vendor/regex-automata/src/nfa/thompson/range_trie.rs b/vendor/regex-automata/src/nfa/thompson/range_trie.rs deleted file mode 100644 index 57ae322d50af21..00000000000000 --- a/vendor/regex-automata/src/nfa/thompson/range_trie.rs +++ /dev/null @@ -1,1051 +0,0 @@ -/* -I've called the primary data structure in this module a "range trie." As far -as I can tell, there is no prior art on a data structure like this, however, -it's likely someone somewhere has built something like it. Searching for -"range trie" turns up the paper "Range Tries for Scalable Address Lookup," -but it does not appear relevant. - -The range trie is just like a trie in that it is a special case of a -deterministic finite state machine. It has states and each state has a set -of transitions to other states. It is acyclic, and, like a normal trie, -it makes no attempt to reuse common suffixes among its elements. The key -difference between a normal trie and a range trie below is that a range trie -operates on *contiguous sequences* of bytes instead of singleton bytes. -One could say say that our alphabet is ranges of bytes instead of bytes -themselves, except a key part of range trie construction is splitting ranges -apart to ensure there is at most one transition that can be taken for any -byte in a given state. - -I've tried to explain the details of how the range trie works below, so -for now, we are left with trying to understand what problem we're trying to -solve. Which is itself fairly involved! - -At the highest level, here's what we want to do. We want to convert a -sequence of Unicode codepoints into a finite state machine whose transitions -are over *bytes* and *not* Unicode codepoints. We want this because it makes -said finite state machines much smaller and much faster to execute. As a -simple example, consider a byte oriented automaton for all Unicode scalar -values (0x00 through 0x10FFFF, not including surrogate codepoints): - - [00-7F] - [C2-DF][80-BF] - [E0-E0][A0-BF][80-BF] - [E1-EC][80-BF][80-BF] - [ED-ED][80-9F][80-BF] - [EE-EF][80-BF][80-BF] - [F0-F0][90-BF][80-BF][80-BF] - [F1-F3][80-BF][80-BF][80-BF] - [F4-F4][80-8F][80-BF][80-BF] - -(These byte ranges are generated via the regex-syntax::utf8 module, which -was based on Russ Cox's code in RE2, which was in turn based on Ken -Thompson's implementation of the same idea in his Plan9 implementation of -grep.) - -It should be fairly straight-forward to see how one could compile this into -a DFA. The sequences are sorted and non-overlapping. Essentially, you could -build a trie from this fairly easy. The problem comes when your initial -range (in this case, 0x00-0x10FFFF) isn't so nice. For example, the class -represented by '\w' contains only a tenth of the codepoints that -0x00-0x10FFFF contains, but if we were to write out the byte based ranges -as we did above, the list would stretch to 892 entries! This turns into -quite a large NFA with a few thousand states. Turning this beast into a DFA -takes quite a bit of time. We are thus left with trying to trim down the -number of states we produce as early as possible. - -One approach (used by RE2 and still by the regex crate, at time of writing) -is to try to find common suffixes while building NFA states for the above -and reuse them. This is very cheap to do and one can control precisely how -much extra memory you want to use for the cache. - -Another approach, however, is to reuse an algorithm for constructing a -*minimal* DFA from a sorted sequence of inputs. I don't want to go into -the full details here, but I explain it in more depth in my blog post on -FSTs[1]. Note that the algorithm was not invented by me, but was published -in paper by Daciuk et al. in 2000 called "Incremental Construction of -MinimalAcyclic Finite-State Automata." Like the suffix cache approach above, -it is also possible to control the amount of extra memory one uses, although -this usually comes with the cost of sacrificing true minimality. (But it's -typically close enough with a reasonably sized cache of states.) - -The catch is that Daciuk's algorithm only works if you add your keys in -lexicographic ascending order. In our case, since we're dealing with ranges, -we also need the additional requirement that ranges are either equivalent -or do not overlap at all. For example, if one were given the following byte -ranges: - - [BC-BF][80-BF] - [BC-BF][90-BF] - -Then Daciuk's algorithm would not work, since there is nothing to handle the -fact that the ranges overlap. They would need to be split apart. Thankfully, -Thompson's algorithm for producing byte ranges for Unicode codepoint ranges -meets both of our requirements. (A proof for this eludes me, but it appears -true.) - -... however, we would also like to be able to compile UTF-8 automata in -reverse. We want this because in order to find the starting location of a -match using a DFA, we need to run a second DFA---a reversed version of the -forward DFA---backwards to discover the match location. Unfortunately, if -we reverse our byte sequences for 0x00-0x10FFFF, we get sequences that are -can overlap, even if they are sorted: - - [00-7F] - [80-BF][80-9F][ED-ED] - [80-BF][80-BF][80-8F][F4-F4] - [80-BF][80-BF][80-BF][F1-F3] - [80-BF][80-BF][90-BF][F0-F0] - [80-BF][80-BF][E1-EC] - [80-BF][80-BF][EE-EF] - [80-BF][A0-BF][E0-E0] - [80-BF][C2-DF] - -For example, '[80-BF][80-BF][EE-EF]' and '[80-BF][A0-BF][E0-E0]' have -overlapping ranges between '[80-BF]' and '[A0-BF]'. Thus, there is no -simple way to apply Daciuk's algorithm. - -And thus, the range trie was born. The range trie's only purpose is to take -sequences of byte ranges like the ones above, collect them into a trie and then -spit them out in a sorted fashion with no overlapping ranges. For example, -0x00-0x10FFFF gets translated to: - - [0-7F] - [80-BF][80-9F][80-8F][F1-F3] - [80-BF][80-9F][80-8F][F4] - [80-BF][80-9F][90-BF][F0] - [80-BF][80-9F][90-BF][F1-F3] - [80-BF][80-9F][E1-EC] - [80-BF][80-9F][ED] - [80-BF][80-9F][EE-EF] - [80-BF][A0-BF][80-8F][F1-F3] - [80-BF][A0-BF][80-8F][F4] - [80-BF][A0-BF][90-BF][F0] - [80-BF][A0-BF][90-BF][F1-F3] - [80-BF][A0-BF][E0] - [80-BF][A0-BF][E1-EC] - [80-BF][A0-BF][EE-EF] - [80-BF][C2-DF] - -We've thus satisfied our requirements for running Daciuk's algorithm. All -sequences of ranges are sorted, and any corresponding ranges are either -exactly equivalent or non-overlapping. - -In effect, a range trie is building a DFA from a sequence of arbitrary byte -ranges. But it uses an algorithm custom tailored to its input, so it is not as -costly as traditional DFA construction. While it is still quite a bit more -costly than the forward case (which only needs Daciuk's algorithm), it winds -up saving a substantial amount of time if one is doing a full DFA powerset -construction later by virtue of producing a much much smaller NFA. - -[1] - https://blog.burntsushi.net/transducers/ -[2] - https://www.mitpressjournals.org/doi/pdfplus/10.1162/089120100561601 -*/ - -use core::{cell::RefCell, fmt, mem, ops::RangeInclusive}; - -use alloc::{format, string::String, vec, vec::Vec}; - -use regex_syntax::utf8::Utf8Range; - -use crate::util::primitives::StateID; - -/// There is only one final state in this trie. Every sequence of byte ranges -/// added shares the same final state. -const FINAL: StateID = StateID::ZERO; - -/// The root state of the trie. -const ROOT: StateID = StateID::new_unchecked(1); - -/// A range trie represents an ordered set of sequences of bytes. -/// -/// A range trie accepts as input a sequence of byte ranges and merges -/// them into the existing set such that the trie can produce a sorted -/// non-overlapping sequence of byte ranges. The sequence emitted corresponds -/// precisely to the sequence of bytes matched by the given keys, although the -/// byte ranges themselves may be split at different boundaries. -/// -/// The order complexity of this data structure seems difficult to analyze. -/// If the size of a byte is held as a constant, then insertion is clearly -/// O(n) where n is the number of byte ranges in the input key. However, if -/// k=256 is our alphabet size, then insertion could be O(k^2 * n). In -/// particular it seems possible for pathological inputs to cause insertion -/// to do a lot of work. However, for what we use this data structure for, -/// there should be no pathological inputs since the ultimate source is always -/// a sorted set of Unicode scalar value ranges. -/// -/// Internally, this trie is setup like a finite state machine. Note though -/// that it is acyclic. -#[derive(Clone)] -pub struct RangeTrie { - /// The states in this trie. The first is always the shared final state. - /// The second is always the root state. Otherwise, there is no - /// particular order. - states: Vec, - /// A free-list of states. When a range trie is cleared, all of its states - /// are added to this list. Creating a new state reuses states from this - /// list before allocating a new one. - free: Vec, - /// A stack for traversing this trie to yield sequences of byte ranges in - /// lexicographic order. - iter_stack: RefCell>, - /// A buffer that stores the current sequence during iteration. - iter_ranges: RefCell>, - /// A stack used for traversing the trie in order to (deeply) duplicate - /// a state. States are recursively duplicated when ranges are split. - dupe_stack: Vec, - /// A stack used for traversing the trie during insertion of a new - /// sequence of byte ranges. - insert_stack: Vec, -} - -/// A single state in this trie. -#[derive(Clone)] -struct State { - /// A sorted sequence of non-overlapping transitions to other states. Each - /// transition corresponds to a single range of bytes. - transitions: Vec, -} - -/// A transition is a single range of bytes. If a particular byte is in this -/// range, then the corresponding machine may transition to the state pointed -/// to by `next_id`. -#[derive(Clone)] -struct Transition { - /// The byte range. - range: Utf8Range, - /// The next state to transition to. - next_id: StateID, -} - -impl RangeTrie { - /// Create a new empty range trie. - pub fn new() -> RangeTrie { - let mut trie = RangeTrie { - states: vec![], - free: vec![], - iter_stack: RefCell::new(vec![]), - iter_ranges: RefCell::new(vec![]), - dupe_stack: vec![], - insert_stack: vec![], - }; - trie.clear(); - trie - } - - /// Clear this range trie such that it is empty. Clearing a range trie - /// and reusing it can beneficial because this may reuse allocations. - pub fn clear(&mut self) { - self.free.append(&mut self.states); - self.add_empty(); // final - self.add_empty(); // root - } - - /// Iterate over all of the sequences of byte ranges in this trie, and - /// call the provided function for each sequence. Iteration occurs in - /// lexicographic order. - pub fn iter Result<(), E>>( - &self, - mut f: F, - ) -> Result<(), E> { - let mut stack = self.iter_stack.borrow_mut(); - stack.clear(); - let mut ranges = self.iter_ranges.borrow_mut(); - ranges.clear(); - - // We do iteration in a way that permits us to use a single buffer - // for our keys. We iterate in a depth first fashion, while being - // careful to expand our frontier as we move deeper in the trie. - stack.push(NextIter { state_id: ROOT, tidx: 0 }); - while let Some(NextIter { mut state_id, mut tidx }) = stack.pop() { - // This could be implemented more simply without an inner loop - // here, but at the cost of more stack pushes. - loop { - let state = self.state(state_id); - // If we've visited all transitions in this state, then pop - // back to the parent state. - if tidx >= state.transitions.len() { - ranges.pop(); - break; - } - - let t = &state.transitions[tidx]; - ranges.push(t.range); - if t.next_id == FINAL { - f(&ranges)?; - ranges.pop(); - tidx += 1; - } else { - // Expand our frontier. Once we come back to this state - // via the stack, start in on the next transition. - stack.push(NextIter { state_id, tidx: tidx + 1 }); - // Otherwise, move to the first transition of the next - // state. - state_id = t.next_id; - tidx = 0; - } - } - } - Ok(()) - } - - /// Inserts a new sequence of ranges into this trie. - /// - /// The sequence given must be non-empty and must not have a length - /// exceeding 4. - pub fn insert(&mut self, ranges: &[Utf8Range]) { - assert!(!ranges.is_empty()); - assert!(ranges.len() <= 4); - - let mut stack = core::mem::replace(&mut self.insert_stack, vec![]); - stack.clear(); - - stack.push(NextInsert::new(ROOT, ranges)); - while let Some(next) = stack.pop() { - let (state_id, ranges) = (next.state_id(), next.ranges()); - assert!(!ranges.is_empty()); - - let (mut new, rest) = (ranges[0], &ranges[1..]); - - // i corresponds to the position of the existing transition on - // which we are operating. Typically, the result is to remove the - // transition and replace it with two or more new transitions - // corresponding to the partitions generated by splitting the - // 'new' with the ith transition's range. - let mut i = self.state(state_id).find(new); - - // In this case, there is no overlap *and* the new range is greater - // than all existing ranges. So we can just add it to the end. - if i == self.state(state_id).transitions.len() { - let next_id = NextInsert::push(self, &mut stack, rest); - self.add_transition(state_id, new, next_id); - continue; - } - - // The need for this loop is a bit subtle, buf basically, after - // we've handled the partitions from our initial split, it's - // possible that there will be a partition leftover that overlaps - // with a subsequent transition. If so, then we have to repeat - // the split process again with the leftovers and that subsequent - // transition. - 'OUTER: loop { - let old = self.state(state_id).transitions[i].clone(); - let split = match Split::new(old.range, new) { - Some(split) => split, - None => { - let next_id = NextInsert::push(self, &mut stack, rest); - self.add_transition_at(i, state_id, new, next_id); - continue; - } - }; - let splits = split.as_slice(); - // If we only have one partition, then the ranges must be - // equivalent. There's nothing to do here for this state, so - // just move on to the next one. - if splits.len() == 1 { - // ... but only if we have anything left to do. - if !rest.is_empty() { - stack.push(NextInsert::new(old.next_id, rest)); - } - break; - } - // At this point, we know that 'split' is non-empty and there - // must be some overlap AND that the two ranges are not - // equivalent. Therefore, the existing range MUST be removed - // and split up somehow. Instead of actually doing the removal - // and then a subsequent insertion---with all the memory - // shuffling that entails---we simply overwrite the transition - // at position `i` for the first new transition we want to - // insert. After that, we're forced to do expensive inserts. - let mut first = true; - let mut add_trans = - |trie: &mut RangeTrie, pos, from, range, to| { - if first { - trie.set_transition_at(pos, from, range, to); - first = false; - } else { - trie.add_transition_at(pos, from, range, to); - } - }; - for (j, &srange) in splits.iter().enumerate() { - match srange { - SplitRange::Old(r) => { - // Deep clone the state pointed to by the ith - // transition. This is always necessary since 'old' - // is always coupled with at least a 'both' - // partition. We don't want any new changes made - // via the 'both' partition to impact the part of - // the transition that doesn't overlap with the - // new range. - let dup_id = self.duplicate(old.next_id); - add_trans(self, i, state_id, r, dup_id); - } - SplitRange::New(r) => { - // This is a bit subtle, but if this happens to be - // the last partition in our split, it is possible - // that this overlaps with a subsequent transition. - // If it does, then we must repeat the whole - // splitting process over again with `r` and the - // subsequent transition. - { - let trans = &self.state(state_id).transitions; - if j + 1 == splits.len() - && i < trans.len() - && intersects(r, trans[i].range) - { - new = r; - continue 'OUTER; - } - } - - // ... otherwise, setup exploration for a new - // empty state and add a brand new transition for - // this new range. - let next_id = - NextInsert::push(self, &mut stack, rest); - add_trans(self, i, state_id, r, next_id); - } - SplitRange::Both(r) => { - // Continue adding the remaining ranges on this - // path and update the transition with the new - // range. - if !rest.is_empty() { - stack.push(NextInsert::new(old.next_id, rest)); - } - add_trans(self, i, state_id, r, old.next_id); - } - } - i += 1; - } - // If we've reached this point, then we know that there are - // no subsequent transitions with any overlap. Therefore, we - // can stop processing this range and move on to the next one. - break; - } - } - self.insert_stack = stack; - } - - pub fn add_empty(&mut self) -> StateID { - let id = match StateID::try_from(self.states.len()) { - Ok(id) => id, - Err(_) => { - // This generally should not happen since a range trie is - // only ever used to compile a single sequence of Unicode - // scalar values. If we ever got to this point, we would, at - // *minimum*, be using 96GB in just the range trie alone. - panic!("too many sequences added to range trie"); - } - }; - // If we have some free states available, then use them to avoid - // more allocations. - if let Some(mut state) = self.free.pop() { - state.clear(); - self.states.push(state); - } else { - self.states.push(State { transitions: vec![] }); - } - id - } - - /// Performs a deep clone of the given state and returns the duplicate's - /// state ID. - /// - /// A "deep clone" in this context means that the state given along with - /// recursively all states that it points to are copied. Once complete, - /// the given state ID and the returned state ID share nothing. - /// - /// This is useful during range trie insertion when a new range overlaps - /// with an existing range that is bigger than the new one. The part - /// of the existing range that does *not* overlap with the new one is - /// duplicated so that adding the new range to the overlap doesn't disturb - /// the non-overlapping portion. - /// - /// There's one exception: if old_id is the final state, then it is not - /// duplicated and the same final state is returned. This is because all - /// final states in this trie are equivalent. - fn duplicate(&mut self, old_id: StateID) -> StateID { - if old_id == FINAL { - return FINAL; - } - - let mut stack = mem::replace(&mut self.dupe_stack, vec![]); - stack.clear(); - - let new_id = self.add_empty(); - // old_id is the state we're cloning and new_id is the ID of the - // duplicated state for old_id. - stack.push(NextDupe { old_id, new_id }); - while let Some(NextDupe { old_id, new_id }) = stack.pop() { - for i in 0..self.state(old_id).transitions.len() { - let t = self.state(old_id).transitions[i].clone(); - if t.next_id == FINAL { - // All final states are the same, so there's no need to - // duplicate it. - self.add_transition(new_id, t.range, FINAL); - continue; - } - - let new_child_id = self.add_empty(); - self.add_transition(new_id, t.range, new_child_id); - stack.push(NextDupe { - old_id: t.next_id, - new_id: new_child_id, - }); - } - } - self.dupe_stack = stack; - new_id - } - - /// Adds the given transition to the given state. - /// - /// Callers must ensure that all previous transitions in this state - /// are lexicographically smaller than the given range. - fn add_transition( - &mut self, - from_id: StateID, - range: Utf8Range, - next_id: StateID, - ) { - self.state_mut(from_id) - .transitions - .push(Transition { range, next_id }); - } - - /// Like `add_transition`, except this inserts the transition just before - /// the ith transition. - fn add_transition_at( - &mut self, - i: usize, - from_id: StateID, - range: Utf8Range, - next_id: StateID, - ) { - self.state_mut(from_id) - .transitions - .insert(i, Transition { range, next_id }); - } - - /// Overwrites the transition at position i with the given transition. - fn set_transition_at( - &mut self, - i: usize, - from_id: StateID, - range: Utf8Range, - next_id: StateID, - ) { - self.state_mut(from_id).transitions[i] = Transition { range, next_id }; - } - - /// Return an immutable borrow for the state with the given ID. - fn state(&self, id: StateID) -> &State { - &self.states[id] - } - - /// Return a mutable borrow for the state with the given ID. - fn state_mut(&mut self, id: StateID) -> &mut State { - &mut self.states[id] - } -} - -impl State { - /// Find the position at which the given range should be inserted in this - /// state. - /// - /// The position returned is always in the inclusive range - /// [0, transitions.len()]. If 'transitions.len()' is returned, then the - /// given range overlaps with no other range in this state *and* is greater - /// than all of them. - /// - /// For all other possible positions, the given range either overlaps - /// with the transition at that position or is otherwise less than it - /// with no overlap (and is greater than the previous transition). In the - /// former case, careful attention must be paid to inserting this range - /// as a new transition. In the latter case, the range can be inserted as - /// a new transition at the given position without disrupting any other - /// transitions. - fn find(&self, range: Utf8Range) -> usize { - /// Returns the position `i` at which `pred(xs[i])` first returns true - /// such that for all `j >= i`, `pred(xs[j]) == true`. If `pred` never - /// returns true, then `xs.len()` is returned. - /// - /// We roll our own binary search because it doesn't seem like the - /// standard library's binary search can be used here. Namely, if - /// there is an overlapping range, then we want to find the first such - /// occurrence, but there may be many. Or at least, it's not quite - /// clear to me how to do it. - fn binary_search(xs: &[T], mut pred: F) -> usize - where - F: FnMut(&T) -> bool, - { - let (mut left, mut right) = (0, xs.len()); - while left < right { - // Overflow is impossible because xs.len() <= 256. - let mid = (left + right) / 2; - if pred(&xs[mid]) { - right = mid; - } else { - left = mid + 1; - } - } - left - } - - // Benchmarks suggest that binary search is just a bit faster than - // straight linear search. Specifically when using the debug tool: - // - // hyperfine "regex-cli debug thompson -qr --captures none '\w{90} ecurB'" - binary_search(&self.transitions, |t| range.start <= t.range.end) - } - - /// Clear this state such that it has zero transitions. - fn clear(&mut self) { - self.transitions.clear(); - } -} - -/// The next state to process during duplication. -#[derive(Clone, Debug)] -struct NextDupe { - /// The state we want to duplicate. - old_id: StateID, - /// The ID of the new state that is a duplicate of old_id. - new_id: StateID, -} - -/// The next state (and its corresponding transition) that we want to visit -/// during iteration in lexicographic order. -#[derive(Clone, Debug)] -struct NextIter { - state_id: StateID, - tidx: usize, -} - -/// The next state to process during insertion and any remaining ranges that we -/// want to add for a particular sequence of ranges. The first such instance -/// is always the root state along with all ranges given. -#[derive(Clone, Debug)] -struct NextInsert { - /// The next state to begin inserting ranges. This state should be the - /// state at which `ranges[0]` should be inserted. - state_id: StateID, - /// The ranges to insert. We used a fixed-size array here to avoid an - /// allocation. - ranges: [Utf8Range; 4], - /// The number of valid ranges in the above array. - len: u8, -} - -impl NextInsert { - /// Create the next item to visit. The given state ID should correspond - /// to the state at which the first range in the given slice should be - /// inserted. The slice given must not be empty and it must be no longer - /// than 4. - fn new(state_id: StateID, ranges: &[Utf8Range]) -> NextInsert { - let len = ranges.len(); - assert!(len > 0); - assert!(len <= 4); - - let mut tmp = [Utf8Range { start: 0, end: 0 }; 4]; - tmp[..len].copy_from_slice(ranges); - NextInsert { state_id, ranges: tmp, len: u8::try_from(len).unwrap() } - } - - /// Push a new empty state to visit along with any remaining ranges that - /// still need to be inserted. The ID of the new empty state is returned. - /// - /// If ranges is empty, then no new state is created and FINAL is returned. - fn push( - trie: &mut RangeTrie, - stack: &mut Vec, - ranges: &[Utf8Range], - ) -> StateID { - if ranges.is_empty() { - FINAL - } else { - let next_id = trie.add_empty(); - stack.push(NextInsert::new(next_id, ranges)); - next_id - } - } - - /// Return the ID of the state to visit. - fn state_id(&self) -> StateID { - self.state_id - } - - /// Return the remaining ranges to insert. - fn ranges(&self) -> &[Utf8Range] { - &self.ranges[..usize::try_from(self.len).unwrap()] - } -} - -/// Split represents a partitioning of two ranges into one or more ranges. This -/// is the secret sauce that makes a range trie work, as it's what tells us -/// how to deal with two overlapping but unequal ranges during insertion. -/// -/// Essentially, either two ranges overlap or they don't. If they don't, then -/// handling insertion is easy: just insert the new range into its -/// lexicographically correct position. Since it does not overlap with anything -/// else, no other transitions are impacted by the new range. -/// -/// If they do overlap though, there are generally three possible cases to -/// handle: -/// -/// 1. The part where the two ranges actually overlap. i.e., The intersection. -/// 2. The part of the existing range that is not in the new range. -/// 3. The part of the new range that is not in the old range. -/// -/// (1) is guaranteed to always occur since all overlapping ranges have a -/// non-empty intersection. If the two ranges are not equivalent, then at -/// least one of (2) or (3) is guaranteed to occur as well. In some cases, -/// e.g., `[0-4]` and `[4-9]`, all three cases will occur. -/// -/// This `Split` type is responsible for providing (1), (2) and (3) for any -/// possible pair of byte ranges. -/// -/// As for insertion, for the overlap in (1), the remaining ranges to insert -/// should be added by following the corresponding transition. However, this -/// should only be done for the overlapping parts of the range. If there was -/// a part of the existing range that was not in the new range, then that -/// existing part must be split off from the transition and duplicated. The -/// remaining parts of the overlap can then be added to using the new ranges -/// without disturbing the existing range. -/// -/// Handling the case for the part of a new range that is not in an existing -/// range is seemingly easy. Just treat it as if it were a non-overlapping -/// range. The problem here is that if this new non-overlapping range occurs -/// after both (1) and (2), then it's possible that it can overlap with the -/// next transition in the current state. If it does, then the whole process -/// must be repeated! -/// -/// # Details of the 3 cases -/// -/// The following details the various cases that are implemented in code -/// below. It's plausible that the number of cases is not actually minimal, -/// but it's important for this code to remain at least somewhat readable. -/// -/// Given [a,b] and [x,y], where a <= b, x <= y, b < 256 and y < 256, we define -/// the follow distinct relationships where at least one must apply. The order -/// of these matters, since multiple can match. The first to match applies. -/// -/// 1. b < x <=> [a,b] < [x,y] -/// 2. y < a <=> [x,y] < [a,b] -/// -/// In the case of (1) and (2), these are the only cases where there is no -/// overlap. Or otherwise, the intersection of [a,b] and [x,y] is empty. In -/// order to compute the intersection, one can do [max(a,x), min(b,y)]. The -/// intersection in all of the following cases is non-empty. -/// -/// 3. a = x && b = y <=> [a,b] == [x,y] -/// 4. a = x && b < y <=> [x,y] right-extends [a,b] -/// 5. b = y && a > x <=> [x,y] left-extends [a,b] -/// 6. x = a && y < b <=> [a,b] right-extends [x,y] -/// 7. y = b && x > a <=> [a,b] left-extends [x,y] -/// 8. a > x && b < y <=> [x,y] covers [a,b] -/// 9. x > a && y < b <=> [a,b] covers [x,y] -/// 10. b = x && a < y <=> [a,b] is left-adjacent to [x,y] -/// 11. y = a && x < b <=> [x,y] is left-adjacent to [a,b] -/// 12. b > x && b < y <=> [a,b] left-overlaps [x,y] -/// 13. y > a && y < b <=> [x,y] left-overlaps [a,b] -/// -/// In cases 3-13, we can form rules that partition the ranges into a -/// non-overlapping ordered sequence of ranges: -/// -/// 3. [a,b] -/// 4. [a,b], [b+1,y] -/// 5. [x,a-1], [a,b] -/// 6. [x,y], [y+1,b] -/// 7. [a,x-1], [x,y] -/// 8. [x,a-1], [a,b], [b+1,y] -/// 9. [a,x-1], [x,y], [y+1,b] -/// 10. [a,b-1], [b,b], [b+1,y] -/// 11. [x,y-1], [y,y], [y+1,b] -/// 12. [a,x-1], [x,b], [b+1,y] -/// 13. [x,a-1], [a,y], [y+1,b] -/// -/// In the code below, we go a step further and identify each of the above -/// outputs as belonging either to the overlap of the two ranges or to one -/// of [a,b] or [x,y] exclusively. -#[derive(Clone, Debug, Eq, PartialEq)] -struct Split { - partitions: [SplitRange; 3], - len: usize, -} - -/// A tagged range indicating how it was derived from a pair of ranges. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -enum SplitRange { - Old(Utf8Range), - New(Utf8Range), - Both(Utf8Range), -} - -impl Split { - /// Create a partitioning of the given ranges. - /// - /// If the given ranges have an empty intersection, then None is returned. - fn new(o: Utf8Range, n: Utf8Range) -> Option { - let range = |r: RangeInclusive| Utf8Range { - start: *r.start(), - end: *r.end(), - }; - let old = |r| SplitRange::Old(range(r)); - let new = |r| SplitRange::New(range(r)); - let both = |r| SplitRange::Both(range(r)); - - // Use same names as the comment above to make it easier to compare. - let (a, b, x, y) = (o.start, o.end, n.start, n.end); - - if b < x || y < a { - // case 1, case 2 - None - } else if a == x && b == y { - // case 3 - Some(Split::parts1(both(a..=b))) - } else if a == x && b < y { - // case 4 - Some(Split::parts2(both(a..=b), new(b + 1..=y))) - } else if b == y && a > x { - // case 5 - Some(Split::parts2(new(x..=a - 1), both(a..=b))) - } else if x == a && y < b { - // case 6 - Some(Split::parts2(both(x..=y), old(y + 1..=b))) - } else if y == b && x > a { - // case 7 - Some(Split::parts2(old(a..=x - 1), both(x..=y))) - } else if a > x && b < y { - // case 8 - Some(Split::parts3(new(x..=a - 1), both(a..=b), new(b + 1..=y))) - } else if x > a && y < b { - // case 9 - Some(Split::parts3(old(a..=x - 1), both(x..=y), old(y + 1..=b))) - } else if b == x && a < y { - // case 10 - Some(Split::parts3(old(a..=b - 1), both(b..=b), new(b + 1..=y))) - } else if y == a && x < b { - // case 11 - Some(Split::parts3(new(x..=y - 1), both(y..=y), old(y + 1..=b))) - } else if b > x && b < y { - // case 12 - Some(Split::parts3(old(a..=x - 1), both(x..=b), new(b + 1..=y))) - } else if y > a && y < b { - // case 13 - Some(Split::parts3(new(x..=a - 1), both(a..=y), old(y + 1..=b))) - } else { - unreachable!() - } - } - - /// Create a new split with a single partition. This only occurs when two - /// ranges are equivalent. - fn parts1(r1: SplitRange) -> Split { - // This value doesn't matter since it is never accessed. - let nada = SplitRange::Old(Utf8Range { start: 0, end: 0 }); - Split { partitions: [r1, nada, nada], len: 1 } - } - - /// Create a new split with two partitions. - fn parts2(r1: SplitRange, r2: SplitRange) -> Split { - // This value doesn't matter since it is never accessed. - let nada = SplitRange::Old(Utf8Range { start: 0, end: 0 }); - Split { partitions: [r1, r2, nada], len: 2 } - } - - /// Create a new split with three partitions. - fn parts3(r1: SplitRange, r2: SplitRange, r3: SplitRange) -> Split { - Split { partitions: [r1, r2, r3], len: 3 } - } - - /// Return the partitions in this split as a slice. - fn as_slice(&self) -> &[SplitRange] { - &self.partitions[..self.len] - } -} - -impl fmt::Debug for RangeTrie { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!(f)?; - for (i, state) in self.states.iter().enumerate() { - let status = if i == FINAL.as_usize() { '*' } else { ' ' }; - writeln!(f, "{status}{i:06}: {state:?}")?; - } - Ok(()) - } -} - -impl fmt::Debug for State { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let rs = self - .transitions - .iter() - .map(|t| format!("{t:?}")) - .collect::>() - .join(", "); - write!(f, "{rs}") - } -} - -impl fmt::Debug for Transition { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.range.start == self.range.end { - write!( - f, - "{:02X} => {:02X}", - self.range.start, - self.next_id.as_usize(), - ) - } else { - write!( - f, - "{:02X}-{:02X} => {:02X}", - self.range.start, - self.range.end, - self.next_id.as_usize(), - ) - } - } -} - -/// Returns true if and only if the given ranges intersect. -fn intersects(r1: Utf8Range, r2: Utf8Range) -> bool { - !(r1.end < r2.start || r2.end < r1.start) -} - -#[cfg(test)] -mod tests { - use super::*; - - fn r(range: RangeInclusive) -> Utf8Range { - Utf8Range { start: *range.start(), end: *range.end() } - } - - fn split_maybe( - old: RangeInclusive, - new: RangeInclusive, - ) -> Option { - Split::new(r(old), r(new)) - } - - fn split( - old: RangeInclusive, - new: RangeInclusive, - ) -> Vec { - split_maybe(old, new).unwrap().as_slice().to_vec() - } - - #[test] - fn no_splits() { - // case 1 - assert_eq!(None, split_maybe(0..=1, 2..=3)); - // case 2 - assert_eq!(None, split_maybe(2..=3, 0..=1)); - } - - #[test] - fn splits() { - let range = |r: RangeInclusive| Utf8Range { - start: *r.start(), - end: *r.end(), - }; - let old = |r| SplitRange::Old(range(r)); - let new = |r| SplitRange::New(range(r)); - let both = |r| SplitRange::Both(range(r)); - - // case 3 - assert_eq!(split(0..=0, 0..=0), vec![both(0..=0)]); - assert_eq!(split(9..=9, 9..=9), vec![both(9..=9)]); - - // case 4 - assert_eq!(split(0..=5, 0..=6), vec![both(0..=5), new(6..=6)]); - assert_eq!(split(0..=5, 0..=8), vec![both(0..=5), new(6..=8)]); - assert_eq!(split(5..=5, 5..=8), vec![both(5..=5), new(6..=8)]); - - // case 5 - assert_eq!(split(1..=5, 0..=5), vec![new(0..=0), both(1..=5)]); - assert_eq!(split(3..=5, 0..=5), vec![new(0..=2), both(3..=5)]); - assert_eq!(split(5..=5, 0..=5), vec![new(0..=4), both(5..=5)]); - - // case 6 - assert_eq!(split(0..=6, 0..=5), vec![both(0..=5), old(6..=6)]); - assert_eq!(split(0..=8, 0..=5), vec![both(0..=5), old(6..=8)]); - assert_eq!(split(5..=8, 5..=5), vec![both(5..=5), old(6..=8)]); - - // case 7 - assert_eq!(split(0..=5, 1..=5), vec![old(0..=0), both(1..=5)]); - assert_eq!(split(0..=5, 3..=5), vec![old(0..=2), both(3..=5)]); - assert_eq!(split(0..=5, 5..=5), vec![old(0..=4), both(5..=5)]); - - // case 8 - assert_eq!( - split(3..=6, 2..=7), - vec![new(2..=2), both(3..=6), new(7..=7)], - ); - assert_eq!( - split(3..=6, 1..=8), - vec![new(1..=2), both(3..=6), new(7..=8)], - ); - - // case 9 - assert_eq!( - split(2..=7, 3..=6), - vec![old(2..=2), both(3..=6), old(7..=7)], - ); - assert_eq!( - split(1..=8, 3..=6), - vec![old(1..=2), both(3..=6), old(7..=8)], - ); - - // case 10 - assert_eq!( - split(3..=6, 6..=7), - vec![old(3..=5), both(6..=6), new(7..=7)], - ); - assert_eq!( - split(3..=6, 6..=8), - vec![old(3..=5), both(6..=6), new(7..=8)], - ); - assert_eq!( - split(5..=6, 6..=7), - vec![old(5..=5), both(6..=6), new(7..=7)], - ); - - // case 11 - assert_eq!( - split(6..=7, 3..=6), - vec![new(3..=5), both(6..=6), old(7..=7)], - ); - assert_eq!( - split(6..=8, 3..=6), - vec![new(3..=5), both(6..=6), old(7..=8)], - ); - assert_eq!( - split(6..=7, 5..=6), - vec![new(5..=5), both(6..=6), old(7..=7)], - ); - - // case 12 - assert_eq!( - split(3..=7, 5..=9), - vec![old(3..=4), both(5..=7), new(8..=9)], - ); - assert_eq!( - split(3..=5, 4..=6), - vec![old(3..=3), both(4..=5), new(6..=6)], - ); - - // case 13 - assert_eq!( - split(5..=9, 3..=7), - vec![new(3..=4), both(5..=7), old(8..=9)], - ); - assert_eq!( - split(4..=6, 3..=5), - vec![new(3..=3), both(4..=5), old(6..=6)], - ); - } - - // Arguably there should be more tests here, but in practice, this data - // structure is well covered by the huge number of regex tests. -} diff --git a/vendor/regex-automata/src/util/alphabet.rs b/vendor/regex-automata/src/util/alphabet.rs deleted file mode 100644 index 475f9515963751..00000000000000 --- a/vendor/regex-automata/src/util/alphabet.rs +++ /dev/null @@ -1,1139 +0,0 @@ -/*! -This module provides APIs for dealing with the alphabets of finite state -machines. - -There are two principal types in this module, [`ByteClasses`] and [`Unit`]. -The former defines the alphabet of a finite state machine while the latter -represents an element of that alphabet. - -To a first approximation, the alphabet of all automata in this crate is just -a `u8`. Namely, every distinct byte value. All 256 of them. In practice, this -can be quite wasteful when building a transition table for a DFA, since it -requires storing a state identifier for each element in the alphabet. Instead, -we collapse the alphabet of an automaton down into equivalence classes, where -every byte in the same equivalence class never discriminates between a match or -a non-match from any other byte in the same class. For example, in the regex -`[a-z]+`, then you could consider it having an alphabet consisting of two -equivalence classes: `a-z` and everything else. In terms of the transitions on -an automaton, it doesn't actually require representing every distinct byte. -Just the equivalence classes. - -The downside of equivalence classes is that, of course, searching a haystack -deals with individual byte values. Those byte values need to be mapped to -their corresponding equivalence class. This is what `ByteClasses` does. In -practice, doing this for every state transition has negligible impact on modern -CPUs. Moreover, it helps make more efficient use of the CPU cache by (possibly -considerably) shrinking the size of the transition table. - -One last hiccup concerns `Unit`. Namely, because of look-around and how the -DFAs in this crate work, we need to add a sentinel value to our alphabet -of equivalence classes that represents the "end" of a search. We call that -sentinel [`Unit::eoi`] or "end of input." Thus, a `Unit` is either an -equivalence class corresponding to a set of bytes, or it is a special "end of -input" sentinel. - -In general, you should not expect to need either of these types unless you're -doing lower level shenanigans with DFAs, or even building your own DFAs. -(Although, you don't have to use these types to build your own DFAs of course.) -For example, if you're walking a DFA's state graph, it's probably useful to -make use of [`ByteClasses`] to visit each element in the DFA's alphabet instead -of just visiting every distinct `u8` value. The latter isn't necessarily wrong, -but it could be potentially very wasteful. -*/ -use crate::util::{ - escape::DebugByte, - wire::{self, DeserializeError, SerializeError}, -}; - -/// Unit represents a single unit of haystack for DFA based regex engines. -/// -/// It is not expected for consumers of this crate to need to use this type -/// unless they are implementing their own DFA. And even then, it's not -/// required: implementors may use other techniques to handle haystack units. -/// -/// Typically, a single unit of haystack for a DFA would be a single byte. -/// However, for the DFAs in this crate, matches are delayed by a single byte -/// in order to handle look-ahead assertions (`\b`, `$` and `\z`). Thus, once -/// we have consumed the haystack, we must run the DFA through one additional -/// transition using a unit that indicates the haystack has ended. -/// -/// There is no way to represent a sentinel with a `u8` since all possible -/// values *may* be valid haystack units to a DFA, therefore this type -/// explicitly adds room for a sentinel value. -/// -/// The sentinel EOI value is always its own equivalence class and is -/// ultimately represented by adding 1 to the maximum equivalence class value. -/// So for example, the regex `^[a-z]+$` might be split into the following -/// equivalence classes: -/// -/// ```text -/// 0 => [\x00-`] -/// 1 => [a-z] -/// 2 => [{-\xFF] -/// 3 => [EOI] -/// ``` -/// -/// Where EOI is the special sentinel value that is always in its own -/// singleton equivalence class. -#[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord)] -pub struct Unit(UnitKind); - -#[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord)] -enum UnitKind { - /// Represents a byte value, or more typically, an equivalence class - /// represented as a byte value. - U8(u8), - /// Represents the "end of input" sentinel. We regrettably use a `u16` - /// here since the maximum sentinel value is `256`. Thankfully, we don't - /// actually store a `Unit` anywhere, so this extra space shouldn't be too - /// bad. - EOI(u16), -} - -impl Unit { - /// Create a new haystack unit from a byte value. - /// - /// All possible byte values are legal. However, when creating a haystack - /// unit for a specific DFA, one should be careful to only construct units - /// that are in that DFA's alphabet. Namely, one way to compact a DFA's - /// in-memory representation is to collapse its transitions to a set of - /// equivalence classes into a set of all possible byte values. If a DFA - /// uses equivalence classes instead of byte values, then the byte given - /// here should be the equivalence class. - pub fn u8(byte: u8) -> Unit { - Unit(UnitKind::U8(byte)) - } - - /// Create a new "end of input" haystack unit. - /// - /// The value given is the sentinel value used by this unit to represent - /// the "end of input." The value should be the total number of equivalence - /// classes in the corresponding alphabet. Its maximum value is `256`, - /// which occurs when every byte is its own equivalence class. - /// - /// # Panics - /// - /// This panics when `num_byte_equiv_classes` is greater than `256`. - pub fn eoi(num_byte_equiv_classes: usize) -> Unit { - assert!( - num_byte_equiv_classes <= 256, - "max number of byte-based equivalent classes is 256, but got \ - {num_byte_equiv_classes}", - ); - Unit(UnitKind::EOI(u16::try_from(num_byte_equiv_classes).unwrap())) - } - - /// If this unit is not an "end of input" sentinel, then returns its - /// underlying byte value. Otherwise return `None`. - pub fn as_u8(self) -> Option { - match self.0 { - UnitKind::U8(b) => Some(b), - UnitKind::EOI(_) => None, - } - } - - /// If this unit is an "end of input" sentinel, then return the underlying - /// sentinel value that was given to [`Unit::eoi`]. Otherwise return - /// `None`. - pub fn as_eoi(self) -> Option { - match self.0 { - UnitKind::U8(_) => None, - UnitKind::EOI(sentinel) => Some(sentinel), - } - } - - /// Return this unit as a `usize`, regardless of whether it is a byte value - /// or an "end of input" sentinel. In the latter case, the underlying - /// sentinel value given to [`Unit::eoi`] is returned. - pub fn as_usize(self) -> usize { - match self.0 { - UnitKind::U8(b) => usize::from(b), - UnitKind::EOI(eoi) => usize::from(eoi), - } - } - - /// Returns true if and only of this unit is a byte value equivalent to the - /// byte given. This always returns false when this is an "end of input" - /// sentinel. - pub fn is_byte(self, byte: u8) -> bool { - self.as_u8().map_or(false, |b| b == byte) - } - - /// Returns true when this unit represents an "end of input" sentinel. - pub fn is_eoi(self) -> bool { - self.as_eoi().is_some() - } - - /// Returns true when this unit corresponds to an ASCII word byte. - /// - /// This always returns false when this unit represents an "end of input" - /// sentinel. - pub fn is_word_byte(self) -> bool { - self.as_u8().map_or(false, crate::util::utf8::is_word_byte) - } -} - -impl core::fmt::Debug for Unit { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - match self.0 { - UnitKind::U8(b) => write!(f, "{:?}", DebugByte(b)), - UnitKind::EOI(_) => write!(f, "EOI"), - } - } -} - -/// A representation of byte oriented equivalence classes. -/// -/// This is used in a DFA to reduce the size of the transition table. This can -/// have a particularly large impact not only on the total size of a dense DFA, -/// but also on compile times. -/// -/// The essential idea here is that the alphabet of a DFA is shrunk from the -/// usual 256 distinct byte values down to a set of equivalence classes. The -/// guarantee you get is that any byte belonging to the same equivalence class -/// can be treated as if it were any other byte in the same class, and the -/// result of a search wouldn't change. -/// -/// # Example -/// -/// This example shows how to get byte classes from an -/// [`NFA`](crate::nfa::thompson::NFA) and ask for the class of various bytes. -/// -/// ``` -/// use regex_automata::nfa::thompson::NFA; -/// -/// let nfa = NFA::new("[a-z]+")?; -/// let classes = nfa.byte_classes(); -/// // 'a' and 'z' are in the same class for this regex. -/// assert_eq!(classes.get(b'a'), classes.get(b'z')); -/// // But 'a' and 'A' are not. -/// assert_ne!(classes.get(b'a'), classes.get(b'A')); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Copy)] -pub struct ByteClasses([u8; 256]); - -impl ByteClasses { - /// Creates a new set of equivalence classes where all bytes are mapped to - /// the same class. - #[inline] - pub fn empty() -> ByteClasses { - ByteClasses([0; 256]) - } - - /// Creates a new set of equivalence classes where each byte belongs to - /// its own equivalence class. - #[inline] - pub fn singletons() -> ByteClasses { - let mut classes = ByteClasses::empty(); - for b in 0..=255 { - classes.set(b, b); - } - classes - } - - /// Deserializes a byte class map from the given slice. If the slice is of - /// insufficient length or otherwise contains an impossible mapping, then - /// an error is returned. Upon success, the number of bytes read along with - /// the map are returned. The number of bytes read is always a multiple of - /// 8. - pub(crate) fn from_bytes( - slice: &[u8], - ) -> Result<(ByteClasses, usize), DeserializeError> { - wire::check_slice_len(slice, 256, "byte class map")?; - let mut classes = ByteClasses::empty(); - for (b, &class) in slice[..256].iter().enumerate() { - classes.set(u8::try_from(b).unwrap(), class); - } - // We specifically don't use 'classes.iter()' here because that - // iterator depends on 'classes.alphabet_len()' being correct. But that - // is precisely the thing we're trying to verify below! - for &b in classes.0.iter() { - if usize::from(b) >= classes.alphabet_len() { - return Err(DeserializeError::generic( - "found equivalence class greater than alphabet len", - )); - } - } - Ok((classes, 256)) - } - - /// Writes this byte class map to the given byte buffer. if the given - /// buffer is too small, then an error is returned. Upon success, the total - /// number of bytes written is returned. The number of bytes written is - /// guaranteed to be a multiple of 8. - pub(crate) fn write_to( - &self, - mut dst: &mut [u8], - ) -> Result { - let nwrite = self.write_to_len(); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small("byte class map")); - } - for b in 0..=255 { - dst[0] = self.get(b); - dst = &mut dst[1..]; - } - Ok(nwrite) - } - - /// Returns the total number of bytes written by `write_to`. - pub(crate) fn write_to_len(&self) -> usize { - 256 - } - - /// Set the equivalence class for the given byte. - #[inline] - pub fn set(&mut self, byte: u8, class: u8) { - self.0[usize::from(byte)] = class; - } - - /// Get the equivalence class for the given byte. - #[inline] - pub fn get(&self, byte: u8) -> u8 { - self.0[usize::from(byte)] - } - - /// Get the equivalence class for the given haystack unit and return the - /// class as a `usize`. - #[inline] - pub fn get_by_unit(&self, unit: Unit) -> usize { - match unit.0 { - UnitKind::U8(b) => usize::from(self.get(b)), - UnitKind::EOI(b) => usize::from(b), - } - } - - /// Create a unit that represents the "end of input" sentinel based on the - /// number of equivalence classes. - #[inline] - pub fn eoi(&self) -> Unit { - // The alphabet length already includes the EOI sentinel, hence why - // we subtract 1. - Unit::eoi(self.alphabet_len().checked_sub(1).unwrap()) - } - - /// Return the total number of elements in the alphabet represented by - /// these equivalence classes. Equivalently, this returns the total number - /// of equivalence classes. - #[inline] - pub fn alphabet_len(&self) -> usize { - // Add one since the number of equivalence classes is one bigger than - // the last one. But add another to account for the final EOI class - // that isn't explicitly represented. - usize::from(self.0[255]) + 1 + 1 - } - - /// Returns the stride, as a base-2 exponent, required for these - /// equivalence classes. - /// - /// The stride is always the smallest power of 2 that is greater than or - /// equal to the alphabet length, and the `stride2` returned here is the - /// exponent applied to `2` to get the smallest power. This is done so that - /// converting between premultiplied state IDs and indices can be done with - /// shifts alone, which is much faster than integer division. - #[inline] - pub fn stride2(&self) -> usize { - let zeros = self.alphabet_len().next_power_of_two().trailing_zeros(); - usize::try_from(zeros).unwrap() - } - - /// Returns true if and only if every byte in this class maps to its own - /// equivalence class. Equivalently, there are 257 equivalence classes - /// and each class contains either exactly one byte or corresponds to the - /// singleton class containing the "end of input" sentinel. - #[inline] - pub fn is_singleton(&self) -> bool { - self.alphabet_len() == 257 - } - - /// Returns an iterator over all equivalence classes in this set. - #[inline] - pub fn iter(&self) -> ByteClassIter<'_> { - ByteClassIter { classes: self, i: 0 } - } - - /// Returns an iterator over a sequence of representative bytes from each - /// equivalence class within the range of bytes given. - /// - /// When the given range is unbounded on both sides, the iterator yields - /// exactly N items, where N is equivalent to the number of equivalence - /// classes. Each item is an arbitrary byte drawn from each equivalence - /// class. - /// - /// This is useful when one is determinizing an NFA and the NFA's alphabet - /// hasn't been converted to equivalence classes. Picking an arbitrary byte - /// from each equivalence class then permits a full exploration of the NFA - /// instead of using every possible byte value and thus potentially saves - /// quite a lot of redundant work. - /// - /// # Example - /// - /// This shows an example of what a complete sequence of representatives - /// might look like from a real example. - /// - /// ``` - /// use regex_automata::{nfa::thompson::NFA, util::alphabet::Unit}; - /// - /// let nfa = NFA::new("[a-z]+")?; - /// let classes = nfa.byte_classes(); - /// let reps: Vec = classes.representatives(..).collect(); - /// // Note that the specific byte values yielded are not guaranteed! - /// let expected = vec![ - /// Unit::u8(b'\x00'), - /// Unit::u8(b'a'), - /// Unit::u8(b'{'), - /// Unit::eoi(3), - /// ]; - /// assert_eq!(expected, reps); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Note though, that you can ask for an arbitrary range of bytes, and only - /// representatives for that range will be returned: - /// - /// ``` - /// use regex_automata::{nfa::thompson::NFA, util::alphabet::Unit}; - /// - /// let nfa = NFA::new("[a-z]+")?; - /// let classes = nfa.byte_classes(); - /// let reps: Vec = classes.representatives(b'A'..=b'z').collect(); - /// // Note that the specific byte values yielded are not guaranteed! - /// let expected = vec![ - /// Unit::u8(b'A'), - /// Unit::u8(b'a'), - /// ]; - /// assert_eq!(expected, reps); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn representatives>( - &self, - range: R, - ) -> ByteClassRepresentatives<'_> { - use core::ops::Bound; - - let cur_byte = match range.start_bound() { - Bound::Included(&i) => usize::from(i), - Bound::Excluded(&i) => usize::from(i).checked_add(1).unwrap(), - Bound::Unbounded => 0, - }; - let end_byte = match range.end_bound() { - Bound::Included(&i) => { - Some(usize::from(i).checked_add(1).unwrap()) - } - Bound::Excluded(&i) => Some(usize::from(i)), - Bound::Unbounded => None, - }; - assert_ne!( - cur_byte, - usize::MAX, - "start range must be less than usize::MAX", - ); - ByteClassRepresentatives { - classes: self, - cur_byte, - end_byte, - last_class: None, - } - } - - /// Returns an iterator of the bytes in the given equivalence class. - /// - /// This is useful when one needs to know the actual bytes that belong to - /// an equivalence class. For example, conceptually speaking, accelerating - /// a DFA state occurs when a state only has a few outgoing transitions. - /// But in reality, what is required is that there are only a small - /// number of distinct bytes that can lead to an outgoing transition. The - /// difference is that any one transition can correspond to an equivalence - /// class which may contains many bytes. Therefore, DFA state acceleration - /// considers the actual elements in each equivalence class of each - /// outgoing transition. - /// - /// # Example - /// - /// This shows an example of how to get all of the elements in an - /// equivalence class. - /// - /// ``` - /// use regex_automata::{nfa::thompson::NFA, util::alphabet::Unit}; - /// - /// let nfa = NFA::new("[a-z]+")?; - /// let classes = nfa.byte_classes(); - /// let elements: Vec = classes.elements(Unit::u8(1)).collect(); - /// let expected: Vec = (b'a'..=b'z').map(Unit::u8).collect(); - /// assert_eq!(expected, elements); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn elements(&self, class: Unit) -> ByteClassElements<'_> { - ByteClassElements { classes: self, class, byte: 0 } - } - - /// Returns an iterator of byte ranges in the given equivalence class. - /// - /// That is, a sequence of contiguous ranges are returned. Typically, every - /// class maps to a single contiguous range. - fn element_ranges(&self, class: Unit) -> ByteClassElementRanges<'_> { - ByteClassElementRanges { elements: self.elements(class), range: None } - } -} - -impl Default for ByteClasses { - fn default() -> ByteClasses { - ByteClasses::singletons() - } -} - -impl core::fmt::Debug for ByteClasses { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - if self.is_singleton() { - write!(f, "ByteClasses({{singletons}})") - } else { - write!(f, "ByteClasses(")?; - for (i, class) in self.iter().enumerate() { - if i > 0 { - write!(f, ", ")?; - } - write!(f, "{:?} => [", class.as_usize())?; - for (start, end) in self.element_ranges(class) { - if start == end { - write!(f, "{start:?}")?; - } else { - write!(f, "{start:?}-{end:?}")?; - } - } - write!(f, "]")?; - } - write!(f, ")") - } - } -} - -/// An iterator over each equivalence class. -/// -/// The last element in this iterator always corresponds to [`Unit::eoi`]. -/// -/// This is created by the [`ByteClasses::iter`] method. -/// -/// The lifetime `'a` refers to the lifetime of the byte classes that this -/// iterator was created from. -#[derive(Debug)] -pub struct ByteClassIter<'a> { - classes: &'a ByteClasses, - i: usize, -} - -impl<'a> Iterator for ByteClassIter<'a> { - type Item = Unit; - - fn next(&mut self) -> Option { - if self.i + 1 == self.classes.alphabet_len() { - self.i += 1; - Some(self.classes.eoi()) - } else if self.i < self.classes.alphabet_len() { - let class = u8::try_from(self.i).unwrap(); - self.i += 1; - Some(Unit::u8(class)) - } else { - None - } - } -} - -/// An iterator over representative bytes from each equivalence class. -/// -/// This is created by the [`ByteClasses::representatives`] method. -/// -/// The lifetime `'a` refers to the lifetime of the byte classes that this -/// iterator was created from. -#[derive(Debug)] -pub struct ByteClassRepresentatives<'a> { - classes: &'a ByteClasses, - cur_byte: usize, - end_byte: Option, - last_class: Option, -} - -impl<'a> Iterator for ByteClassRepresentatives<'a> { - type Item = Unit; - - fn next(&mut self) -> Option { - while self.cur_byte < self.end_byte.unwrap_or(256) { - let byte = u8::try_from(self.cur_byte).unwrap(); - let class = self.classes.get(byte); - self.cur_byte += 1; - - if self.last_class != Some(class) { - self.last_class = Some(class); - return Some(Unit::u8(byte)); - } - } - if self.cur_byte != usize::MAX && self.end_byte.is_none() { - // Using usize::MAX as a sentinel is OK because we ban usize::MAX - // from appearing as a start bound in iterator construction. But - // why do it this way? Well, we want to return the EOI class - // whenever the end of the given range is unbounded because EOI - // isn't really a "byte" per se, so the only way it should be - // excluded is if there is a bounded end to the range. Therefore, - // when the end is unbounded, we just need to know whether we've - // reported EOI or not. When we do, we set cur_byte to a value it - // can never otherwise be. - self.cur_byte = usize::MAX; - return Some(self.classes.eoi()); - } - None - } -} - -/// An iterator over all elements in an equivalence class. -/// -/// This is created by the [`ByteClasses::elements`] method. -/// -/// The lifetime `'a` refers to the lifetime of the byte classes that this -/// iterator was created from. -#[derive(Debug)] -pub struct ByteClassElements<'a> { - classes: &'a ByteClasses, - class: Unit, - byte: usize, -} - -impl<'a> Iterator for ByteClassElements<'a> { - type Item = Unit; - - fn next(&mut self) -> Option { - while self.byte < 256 { - let byte = u8::try_from(self.byte).unwrap(); - self.byte += 1; - if self.class.is_byte(self.classes.get(byte)) { - return Some(Unit::u8(byte)); - } - } - if self.byte < 257 { - self.byte += 1; - if self.class.is_eoi() { - return Some(Unit::eoi(256)); - } - } - None - } -} - -/// An iterator over all elements in an equivalence class expressed as a -/// sequence of contiguous ranges. -#[derive(Debug)] -struct ByteClassElementRanges<'a> { - elements: ByteClassElements<'a>, - range: Option<(Unit, Unit)>, -} - -impl<'a> Iterator for ByteClassElementRanges<'a> { - type Item = (Unit, Unit); - - fn next(&mut self) -> Option<(Unit, Unit)> { - loop { - let element = match self.elements.next() { - None => return self.range.take(), - Some(element) => element, - }; - match self.range.take() { - None => { - self.range = Some((element, element)); - } - Some((start, end)) => { - if end.as_usize() + 1 != element.as_usize() - || element.is_eoi() - { - self.range = Some((element, element)); - return Some((start, end)); - } - self.range = Some((start, element)); - } - } - } - } -} - -/// A partitioning of bytes into equivalence classes. -/// -/// A byte class set keeps track of an *approximation* of equivalence classes -/// of bytes during NFA construction. That is, every byte in an equivalence -/// class cannot discriminate between a match and a non-match. -/// -/// For example, in the regex `[ab]+`, the bytes `a` and `b` would be in the -/// same equivalence class because it never matters whether an `a` or a `b` is -/// seen, and no combination of `a`s and `b`s in the text can discriminate a -/// match. -/// -/// Note though that this does not compute the minimal set of equivalence -/// classes. For example, in the regex `[ac]+`, both `a` and `c` are in the -/// same equivalence class for the same reason that `a` and `b` are in the -/// same equivalence class in the aforementioned regex. However, in this -/// implementation, `a` and `c` are put into distinct equivalence classes. The -/// reason for this is implementation complexity. In the future, we should -/// endeavor to compute the minimal equivalence classes since they can have a -/// rather large impact on the size of the DFA. (Doing this will likely require -/// rethinking how equivalence classes are computed, including changing the -/// representation here, which is only able to group contiguous bytes into the -/// same equivalence class.) -#[cfg(feature = "alloc")] -#[derive(Clone, Debug)] -pub(crate) struct ByteClassSet(ByteSet); - -#[cfg(feature = "alloc")] -impl Default for ByteClassSet { - fn default() -> ByteClassSet { - ByteClassSet::empty() - } -} - -#[cfg(feature = "alloc")] -impl ByteClassSet { - /// Create a new set of byte classes where all bytes are part of the same - /// equivalence class. - pub(crate) fn empty() -> Self { - ByteClassSet(ByteSet::empty()) - } - - /// Indicate the range of byte given (inclusive) can discriminate a - /// match between it and all other bytes outside of the range. - pub(crate) fn set_range(&mut self, start: u8, end: u8) { - debug_assert!(start <= end); - if start > 0 { - self.0.add(start - 1); - } - self.0.add(end); - } - - /// Add the contiguous ranges in the set given to this byte class set. - pub(crate) fn add_set(&mut self, set: &ByteSet) { - for (start, end) in set.iter_ranges() { - self.set_range(start, end); - } - } - - /// Convert this boolean set to a map that maps all byte values to their - /// corresponding equivalence class. The last mapping indicates the largest - /// equivalence class identifier (which is never bigger than 255). - pub(crate) fn byte_classes(&self) -> ByteClasses { - let mut classes = ByteClasses::empty(); - let mut class = 0u8; - let mut b = 0u8; - loop { - classes.set(b, class); - if b == 255 { - break; - } - if self.0.contains(b) { - class = class.checked_add(1).unwrap(); - } - b = b.checked_add(1).unwrap(); - } - classes - } -} - -/// A simple set of bytes that is reasonably cheap to copy and allocation free. -#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] -pub(crate) struct ByteSet { - bits: BitSet, -} - -/// The representation of a byte set. Split out so that we can define a -/// convenient Debug impl for it while keeping "ByteSet" in the output. -#[derive(Clone, Copy, Default, Eq, PartialEq)] -struct BitSet([u128; 2]); - -impl ByteSet { - /// Create an empty set of bytes. - pub(crate) fn empty() -> ByteSet { - ByteSet { bits: BitSet([0; 2]) } - } - - /// Add a byte to this set. - /// - /// If the given byte already belongs to this set, then this is a no-op. - pub(crate) fn add(&mut self, byte: u8) { - let bucket = byte / 128; - let bit = byte % 128; - self.bits.0[usize::from(bucket)] |= 1 << bit; - } - - /// Remove a byte from this set. - /// - /// If the given byte is not in this set, then this is a no-op. - pub(crate) fn remove(&mut self, byte: u8) { - let bucket = byte / 128; - let bit = byte % 128; - self.bits.0[usize::from(bucket)] &= !(1 << bit); - } - - /// Return true if and only if the given byte is in this set. - pub(crate) fn contains(&self, byte: u8) -> bool { - let bucket = byte / 128; - let bit = byte % 128; - self.bits.0[usize::from(bucket)] & (1 << bit) > 0 - } - - /// Return true if and only if the given inclusive range of bytes is in - /// this set. - pub(crate) fn contains_range(&self, start: u8, end: u8) -> bool { - (start..=end).all(|b| self.contains(b)) - } - - /// Returns an iterator over all bytes in this set. - pub(crate) fn iter(&self) -> ByteSetIter<'_> { - ByteSetIter { set: self, b: 0 } - } - - /// Returns an iterator over all contiguous ranges of bytes in this set. - pub(crate) fn iter_ranges(&self) -> ByteSetRangeIter<'_> { - ByteSetRangeIter { set: self, b: 0 } - } - - /// Return true if and only if this set is empty. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn is_empty(&self) -> bool { - self.bits.0 == [0, 0] - } - - /// Deserializes a byte set from the given slice. If the slice is of - /// incorrect length or is otherwise malformed, then an error is returned. - /// Upon success, the number of bytes read along with the set are returned. - /// The number of bytes read is always a multiple of 8. - pub(crate) fn from_bytes( - slice: &[u8], - ) -> Result<(ByteSet, usize), DeserializeError> { - use core::mem::size_of; - - wire::check_slice_len(slice, 2 * size_of::(), "byte set")?; - let mut nread = 0; - let (low, nr) = wire::try_read_u128(slice, "byte set low bucket")?; - nread += nr; - let (high, nr) = wire::try_read_u128(slice, "byte set high bucket")?; - nread += nr; - Ok((ByteSet { bits: BitSet([low, high]) }, nread)) - } - - /// Writes this byte set to the given byte buffer. If the given buffer is - /// too small, then an error is returned. Upon success, the total number of - /// bytes written is returned. The number of bytes written is guaranteed to - /// be a multiple of 8. - pub(crate) fn write_to( - &self, - dst: &mut [u8], - ) -> Result { - use core::mem::size_of; - - let nwrite = self.write_to_len(); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small("byte set")); - } - let mut nw = 0; - E::write_u128(self.bits.0[0], &mut dst[nw..]); - nw += size_of::(); - E::write_u128(self.bits.0[1], &mut dst[nw..]); - nw += size_of::(); - assert_eq!(nwrite, nw, "expected to write certain number of bytes",); - assert_eq!( - nw % 8, - 0, - "expected to write multiple of 8 bytes for byte set", - ); - Ok(nw) - } - - /// Returns the total number of bytes written by `write_to`. - pub(crate) fn write_to_len(&self) -> usize { - 2 * core::mem::size_of::() - } -} - -impl core::fmt::Debug for BitSet { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let mut fmtd = f.debug_set(); - for b in 0u8..=255 { - if (ByteSet { bits: *self }).contains(b) { - fmtd.entry(&b); - } - } - fmtd.finish() - } -} - -#[derive(Debug)] -pub(crate) struct ByteSetIter<'a> { - set: &'a ByteSet, - b: usize, -} - -impl<'a> Iterator for ByteSetIter<'a> { - type Item = u8; - - fn next(&mut self) -> Option { - while self.b <= 255 { - let b = u8::try_from(self.b).unwrap(); - self.b += 1; - if self.set.contains(b) { - return Some(b); - } - } - None - } -} - -#[derive(Debug)] -pub(crate) struct ByteSetRangeIter<'a> { - set: &'a ByteSet, - b: usize, -} - -impl<'a> Iterator for ByteSetRangeIter<'a> { - type Item = (u8, u8); - - fn next(&mut self) -> Option<(u8, u8)> { - let asu8 = |n: usize| u8::try_from(n).unwrap(); - while self.b <= 255 { - let start = asu8(self.b); - self.b += 1; - if !self.set.contains(start) { - continue; - } - - let mut end = start; - while self.b <= 255 && self.set.contains(asu8(self.b)) { - end = asu8(self.b); - self.b += 1; - } - return Some((start, end)); - } - None - } -} - -#[cfg(all(test, feature = "alloc"))] -mod tests { - use alloc::{vec, vec::Vec}; - - use super::*; - - #[test] - fn byte_classes() { - let mut set = ByteClassSet::empty(); - set.set_range(b'a', b'z'); - - let classes = set.byte_classes(); - assert_eq!(classes.get(0), 0); - assert_eq!(classes.get(1), 0); - assert_eq!(classes.get(2), 0); - assert_eq!(classes.get(b'a' - 1), 0); - assert_eq!(classes.get(b'a'), 1); - assert_eq!(classes.get(b'm'), 1); - assert_eq!(classes.get(b'z'), 1); - assert_eq!(classes.get(b'z' + 1), 2); - assert_eq!(classes.get(254), 2); - assert_eq!(classes.get(255), 2); - - let mut set = ByteClassSet::empty(); - set.set_range(0, 2); - set.set_range(4, 6); - let classes = set.byte_classes(); - assert_eq!(classes.get(0), 0); - assert_eq!(classes.get(1), 0); - assert_eq!(classes.get(2), 0); - assert_eq!(classes.get(3), 1); - assert_eq!(classes.get(4), 2); - assert_eq!(classes.get(5), 2); - assert_eq!(classes.get(6), 2); - assert_eq!(classes.get(7), 3); - assert_eq!(classes.get(255), 3); - } - - #[test] - fn full_byte_classes() { - let mut set = ByteClassSet::empty(); - for b in 0u8..=255 { - set.set_range(b, b); - } - assert_eq!(set.byte_classes().alphabet_len(), 257); - } - - #[test] - fn elements_typical() { - let mut set = ByteClassSet::empty(); - set.set_range(b'b', b'd'); - set.set_range(b'g', b'm'); - set.set_range(b'z', b'z'); - let classes = set.byte_classes(); - // class 0: \x00-a - // class 1: b-d - // class 2: e-f - // class 3: g-m - // class 4: n-y - // class 5: z-z - // class 6: \x7B-\xFF - // class 7: EOI - assert_eq!(classes.alphabet_len(), 8); - - let elements = classes.elements(Unit::u8(0)).collect::>(); - assert_eq!(elements.len(), 98); - assert_eq!(elements[0], Unit::u8(b'\x00')); - assert_eq!(elements[97], Unit::u8(b'a')); - - let elements = classes.elements(Unit::u8(1)).collect::>(); - assert_eq!( - elements, - vec![Unit::u8(b'b'), Unit::u8(b'c'), Unit::u8(b'd')], - ); - - let elements = classes.elements(Unit::u8(2)).collect::>(); - assert_eq!(elements, vec![Unit::u8(b'e'), Unit::u8(b'f')],); - - let elements = classes.elements(Unit::u8(3)).collect::>(); - assert_eq!( - elements, - vec![ - Unit::u8(b'g'), - Unit::u8(b'h'), - Unit::u8(b'i'), - Unit::u8(b'j'), - Unit::u8(b'k'), - Unit::u8(b'l'), - Unit::u8(b'm'), - ], - ); - - let elements = classes.elements(Unit::u8(4)).collect::>(); - assert_eq!(elements.len(), 12); - assert_eq!(elements[0], Unit::u8(b'n')); - assert_eq!(elements[11], Unit::u8(b'y')); - - let elements = classes.elements(Unit::u8(5)).collect::>(); - assert_eq!(elements, vec![Unit::u8(b'z')]); - - let elements = classes.elements(Unit::u8(6)).collect::>(); - assert_eq!(elements.len(), 133); - assert_eq!(elements[0], Unit::u8(b'\x7B')); - assert_eq!(elements[132], Unit::u8(b'\xFF')); - - let elements = classes.elements(Unit::eoi(7)).collect::>(); - assert_eq!(elements, vec![Unit::eoi(256)]); - } - - #[test] - fn elements_singletons() { - let classes = ByteClasses::singletons(); - assert_eq!(classes.alphabet_len(), 257); - - let elements = classes.elements(Unit::u8(b'a')).collect::>(); - assert_eq!(elements, vec![Unit::u8(b'a')]); - - let elements = classes.elements(Unit::eoi(5)).collect::>(); - assert_eq!(elements, vec![Unit::eoi(256)]); - } - - #[test] - fn elements_empty() { - let classes = ByteClasses::empty(); - assert_eq!(classes.alphabet_len(), 2); - - let elements = classes.elements(Unit::u8(0)).collect::>(); - assert_eq!(elements.len(), 256); - assert_eq!(elements[0], Unit::u8(b'\x00')); - assert_eq!(elements[255], Unit::u8(b'\xFF')); - - let elements = classes.elements(Unit::eoi(1)).collect::>(); - assert_eq!(elements, vec![Unit::eoi(256)]); - } - - #[test] - fn representatives() { - let mut set = ByteClassSet::empty(); - set.set_range(b'b', b'd'); - set.set_range(b'g', b'm'); - set.set_range(b'z', b'z'); - let classes = set.byte_classes(); - - let got: Vec = classes.representatives(..).collect(); - let expected = vec![ - Unit::u8(b'\x00'), - Unit::u8(b'b'), - Unit::u8(b'e'), - Unit::u8(b'g'), - Unit::u8(b'n'), - Unit::u8(b'z'), - Unit::u8(b'\x7B'), - Unit::eoi(7), - ]; - assert_eq!(expected, got); - - let got: Vec = classes.representatives(..0).collect(); - assert!(got.is_empty()); - let got: Vec = classes.representatives(1..1).collect(); - assert!(got.is_empty()); - let got: Vec = classes.representatives(255..255).collect(); - assert!(got.is_empty()); - - // A weird case that is the only guaranteed to way to get an iterator - // of just the EOI class by excluding all possible byte values. - let got: Vec = classes - .representatives(( - core::ops::Bound::Excluded(255), - core::ops::Bound::Unbounded, - )) - .collect(); - let expected = vec![Unit::eoi(7)]; - assert_eq!(expected, got); - - let got: Vec = classes.representatives(..=255).collect(); - let expected = vec![ - Unit::u8(b'\x00'), - Unit::u8(b'b'), - Unit::u8(b'e'), - Unit::u8(b'g'), - Unit::u8(b'n'), - Unit::u8(b'z'), - Unit::u8(b'\x7B'), - ]; - assert_eq!(expected, got); - - let got: Vec = classes.representatives(b'b'..=b'd').collect(); - let expected = vec![Unit::u8(b'b')]; - assert_eq!(expected, got); - - let got: Vec = classes.representatives(b'a'..=b'd').collect(); - let expected = vec![Unit::u8(b'a'), Unit::u8(b'b')]; - assert_eq!(expected, got); - - let got: Vec = classes.representatives(b'b'..=b'e').collect(); - let expected = vec![Unit::u8(b'b'), Unit::u8(b'e')]; - assert_eq!(expected, got); - - let got: Vec = classes.representatives(b'A'..=b'Z').collect(); - let expected = vec![Unit::u8(b'A')]; - assert_eq!(expected, got); - - let got: Vec = classes.representatives(b'A'..=b'z').collect(); - let expected = vec![ - Unit::u8(b'A'), - Unit::u8(b'b'), - Unit::u8(b'e'), - Unit::u8(b'g'), - Unit::u8(b'n'), - Unit::u8(b'z'), - ]; - assert_eq!(expected, got); - - let got: Vec = classes.representatives(b'z'..).collect(); - let expected = vec![Unit::u8(b'z'), Unit::u8(b'\x7B'), Unit::eoi(7)]; - assert_eq!(expected, got); - - let got: Vec = classes.representatives(b'z'..=0xFF).collect(); - let expected = vec![Unit::u8(b'z'), Unit::u8(b'\x7B')]; - assert_eq!(expected, got); - } -} diff --git a/vendor/regex-automata/src/util/captures.rs b/vendor/regex-automata/src/util/captures.rs deleted file mode 100644 index 5376f348d10ebc..00000000000000 --- a/vendor/regex-automata/src/util/captures.rs +++ /dev/null @@ -1,2551 +0,0 @@ -/*! -Provides types for dealing with capturing groups. - -Capturing groups refer to sub-patterns of regexes that some regex engines can -report matching offsets for. For example, matching `[a-z]([0-9]+)` against -`a789` would give `a789` as the overall match (for the implicit capturing group -at index `0`) and `789` as the match for the capturing group `([0-9]+)` (an -explicit capturing group at index `1`). - -Not all regex engines can report match offsets for capturing groups. Indeed, -to a first approximation, regex engines that can report capturing group offsets -tend to be quite a bit slower than regex engines that can't. This is because -tracking capturing groups at search time usually requires more "power" that -in turn adds overhead. - -Other regex implementations might call capturing groups "submatches." - -# Overview - -The main types in this module are: - -* [`Captures`] records the capturing group offsets found during a search. It -provides convenience routines for looking up capturing group offsets by either -index or name. -* [`GroupInfo`] records the mapping between capturing groups and "slots," -where the latter are how capturing groups are recorded during a regex search. -This also keeps a mapping from capturing group name to index, and capture -group index to name. A `GroupInfo` is used by `Captures` internally to -provide a convenient API. It is unlikely that you'll use a `GroupInfo` -directly, but for example, if you've compiled an Thompson NFA, then you can use -[`thompson::NFA::group_info`](crate::nfa::thompson::NFA::group_info) to get its -underlying `GroupInfo`. -*/ - -use alloc::{string::String, sync::Arc, vec, vec::Vec}; - -use crate::util::{ - interpolate, - primitives::{ - NonMaxUsize, PatternID, PatternIDError, PatternIDIter, SmallIndex, - }, - search::{Match, Span}, -}; - -/// The span offsets of capturing groups after a match has been found. -/// -/// This type represents the output of regex engines that can report the -/// offsets at which capturing groups matches or "submatches" occur. For -/// example, the [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM). When a match -/// occurs, it will at minimum contain the [`PatternID`] of the pattern that -/// matched. Depending upon how it was constructed, it may also contain the -/// start/end offsets of the entire match of the pattern and the start/end -/// offsets of each capturing group that participated in the match. -/// -/// Values of this type are always created for a specific [`GroupInfo`]. It is -/// unspecified behavior to use a `Captures` value in a search with any regex -/// engine that has a different `GroupInfo` than the one the `Captures` were -/// created with. -/// -/// # Constructors -/// -/// There are three constructors for this type that control what kind of -/// information is available upon a match: -/// -/// * [`Captures::all`]: Will store overall pattern match offsets in addition -/// to the offsets of capturing groups that participated in the match. -/// * [`Captures::matches`]: Will store only the overall pattern -/// match offsets. The offsets of capturing groups (even ones that participated -/// in the match) are not available. -/// * [`Captures::empty`]: Will only store the pattern ID that matched. No -/// match offsets are available at all. -/// -/// If you aren't sure which to choose, then pick the first one. The first one -/// is what convenience routines like, -/// [`PikeVM::create_captures`](crate::nfa::thompson::pikevm::PikeVM::create_captures), -/// will use automatically. -/// -/// The main difference between these choices is performance. Namely, if you -/// ask for _less_ information, then the execution of regex search may be able -/// to run more quickly. -/// -/// # Notes -/// -/// It is worth pointing out that this type is not coupled to any one specific -/// regex engine. Instead, its coupling is with [`GroupInfo`], which is the -/// thing that is responsible for mapping capturing groups to "slot" offsets. -/// Slot offsets are indices into a single sequence of memory at which matching -/// haystack offsets for the corresponding group are written by regex engines. -/// -/// # Example -/// -/// This example shows how to parse a simple date and extract the components of -/// the date via capturing groups: -/// -/// ``` -/// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; -/// -/// let re = PikeVM::new(r"^([0-9]{4})-([0-9]{2})-([0-9]{2})$")?; -/// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); -/// -/// re.captures(&mut cache, "2010-03-14", &mut caps); -/// assert!(caps.is_match()); -/// assert_eq!(Some(Span::from(0..4)), caps.get_group(1)); -/// assert_eq!(Some(Span::from(5..7)), caps.get_group(2)); -/// assert_eq!(Some(Span::from(8..10)), caps.get_group(3)); -/// -/// # Ok::<(), Box>(()) -/// ``` -/// -/// # Example: named capturing groups -/// -/// This example is like the one above, but leverages the ability to name -/// capturing groups in order to make the code a bit clearer: -/// -/// ``` -/// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; -/// -/// let re = PikeVM::new(r"^(?P[0-9]{4})-(?P[0-9]{2})-(?P[0-9]{2})$")?; -/// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); -/// -/// re.captures(&mut cache, "2010-03-14", &mut caps); -/// assert!(caps.is_match()); -/// assert_eq!(Some(Span::from(0..4)), caps.get_group_by_name("y")); -/// assert_eq!(Some(Span::from(5..7)), caps.get_group_by_name("m")); -/// assert_eq!(Some(Span::from(8..10)), caps.get_group_by_name("d")); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone)] -pub struct Captures { - /// The group info that these capture groups are coupled to. This is what - /// gives the "convenience" of the `Captures` API. Namely, it provides the - /// slot mapping and the name|-->index mapping for capture lookups by name. - group_info: GroupInfo, - /// The ID of the pattern that matched. Regex engines must set this to - /// None when no match occurs. - pid: Option, - /// The slot values, i.e., submatch offsets. - /// - /// In theory, the smallest sequence of slots would be something like - /// `max(groups(pattern) for pattern in regex) * 2`, but instead, we use - /// `sum(groups(pattern) for pattern in regex) * 2`. Why? - /// - /// Well, the former could be used in theory, because we don't generally - /// have any overlapping APIs that involve capturing groups. Therefore, - /// there's technically never any need to have slots set for multiple - /// patterns. However, this might change some day, in which case, we would - /// need to have slots available. - /// - /// The other reason is that during the execution of some regex engines, - /// there exists a point in time where multiple slots for different - /// patterns may be written to before knowing which pattern has matched. - /// Therefore, the regex engines themselves, in order to support multiple - /// patterns correctly, must have all slots available. If `Captures` - /// doesn't have all slots available, then regex engines can't write - /// directly into the caller provided `Captures` and must instead write - /// into some other storage and then copy the slots involved in the match - /// at the end of the search. - /// - /// So overall, at least as of the time of writing, it seems like the path - /// of least resistance is to just require allocating all possible slots - /// instead of the conceptual minimum. Another way to justify this is that - /// the most common case is a single pattern, in which case, there is no - /// inefficiency here since the 'max' and 'sum' calculations above are - /// equivalent in that case. - /// - /// N.B. The mapping from group index to slot is maintained by `GroupInfo` - /// and is considered an API guarantee. See `GroupInfo` for more details on - /// that mapping. - /// - /// N.B. `Option` has the same size as a `usize`. - slots: Vec>, -} - -impl Captures { - /// Create new storage for the offsets of all matching capturing groups. - /// - /// This routine provides the most information for matches---namely, the - /// spans of matching capturing groups---but also requires the regex search - /// routines to do the most work. - /// - /// It is unspecified behavior to use the returned `Captures` value in a - /// search with a `GroupInfo` other than the one that is provided to this - /// constructor. - /// - /// # Example - /// - /// This example shows that all capturing groups---but only ones that - /// participated in a match---are available to query after a match has - /// been found: - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::pikevm::PikeVM, - /// util::captures::Captures, - /// Span, Match, - /// }; - /// - /// let re = PikeVM::new( - /// r"^(?:(?P[a-z]+)|(?P[A-Z]+))(?P[0-9]+)$", - /// )?; - /// let mut cache = re.create_cache(); - /// let mut caps = Captures::all(re.get_nfa().group_info().clone()); - /// - /// re.captures(&mut cache, "ABC123", &mut caps); - /// assert!(caps.is_match()); - /// assert_eq!(Some(Match::must(0, 0..6)), caps.get_match()); - /// // The 'lower' group didn't match, so it won't have any offsets. - /// assert_eq!(None, caps.get_group_by_name("lower")); - /// assert_eq!(Some(Span::from(0..3)), caps.get_group_by_name("upper")); - /// assert_eq!(Some(Span::from(3..6)), caps.get_group_by_name("digits")); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn all(group_info: GroupInfo) -> Captures { - let slots = group_info.slot_len(); - Captures { group_info, pid: None, slots: vec![None; slots] } - } - - /// Create new storage for only the full match spans of a pattern. This - /// does not include any capturing group offsets. - /// - /// It is unspecified behavior to use the returned `Captures` value in a - /// search with a `GroupInfo` other than the one that is provided to this - /// constructor. - /// - /// # Example - /// - /// This example shows that only overall match offsets are reported when - /// this constructor is used. Accessing any capturing groups other than - /// the 0th will always return `None`. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::pikevm::PikeVM, - /// util::captures::Captures, - /// Match, - /// }; - /// - /// let re = PikeVM::new( - /// r"^(?:(?P[a-z]+)|(?P[A-Z]+))(?P[0-9]+)$", - /// )?; - /// let mut cache = re.create_cache(); - /// let mut caps = Captures::matches(re.get_nfa().group_info().clone()); - /// - /// re.captures(&mut cache, "ABC123", &mut caps); - /// assert!(caps.is_match()); - /// assert_eq!(Some(Match::must(0, 0..6)), caps.get_match()); - /// // We didn't ask for capturing group offsets, so they aren't available. - /// assert_eq!(None, caps.get_group_by_name("lower")); - /// assert_eq!(None, caps.get_group_by_name("upper")); - /// assert_eq!(None, caps.get_group_by_name("digits")); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn matches(group_info: GroupInfo) -> Captures { - // This is OK because we know there are at least this many slots, - // and GroupInfo construction guarantees that the number of slots fits - // into a usize. - let slots = group_info.pattern_len().checked_mul(2).unwrap(); - Captures { group_info, pid: None, slots: vec![None; slots] } - } - - /// Create new storage for only tracking which pattern matched. No offsets - /// are stored at all. - /// - /// It is unspecified behavior to use the returned `Captures` value in a - /// search with a `GroupInfo` other than the one that is provided to this - /// constructor. - /// - /// # Example - /// - /// This example shows that only the pattern that matched can be accessed - /// from a `Captures` value created via this constructor. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::pikevm::PikeVM, - /// util::captures::Captures, - /// PatternID, - /// }; - /// - /// let re = PikeVM::new_many(&[r"[a-z]+", r"[A-Z]+"])?; - /// let mut cache = re.create_cache(); - /// let mut caps = Captures::empty(re.get_nfa().group_info().clone()); - /// - /// re.captures(&mut cache, "aABCz", &mut caps); - /// assert!(caps.is_match()); - /// assert_eq!(Some(PatternID::must(0)), caps.pattern()); - /// // We didn't ask for any offsets, so they aren't available. - /// assert_eq!(None, caps.get_match()); - /// - /// re.captures(&mut cache, &"aABCz"[1..], &mut caps); - /// assert!(caps.is_match()); - /// assert_eq!(Some(PatternID::must(1)), caps.pattern()); - /// // We didn't ask for any offsets, so they aren't available. - /// assert_eq!(None, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn empty(group_info: GroupInfo) -> Captures { - Captures { group_info, pid: None, slots: vec![] } - } - - /// Returns true if and only if this capturing group represents a match. - /// - /// This is a convenience routine for `caps.pattern().is_some()`. - /// - /// # Example - /// - /// When using the PikeVM (for example), the lightest weight way of - /// detecting whether a match exists is to create capturing groups that - /// only track the ID of the pattern that match (if any): - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::pikevm::PikeVM, - /// util::captures::Captures, - /// }; - /// - /// let re = PikeVM::new(r"[a-z]+")?; - /// let mut cache = re.create_cache(); - /// let mut caps = Captures::empty(re.get_nfa().group_info().clone()); - /// - /// re.captures(&mut cache, "aABCz", &mut caps); - /// assert!(caps.is_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn is_match(&self) -> bool { - self.pid.is_some() - } - - /// Returns the identifier of the pattern that matched when this - /// capturing group represents a match. If no match was found, then this - /// always returns `None`. - /// - /// This returns a pattern ID in precisely the cases in which `is_match` - /// returns `true`. Similarly, the pattern ID returned is always the - /// same pattern ID found in the `Match` returned by `get_match`. - /// - /// # Example - /// - /// When using the PikeVM (for example), the lightest weight way of - /// detecting which pattern matched is to create capturing groups that only - /// track the ID of the pattern that match (if any): - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::pikevm::PikeVM, - /// util::captures::Captures, - /// PatternID, - /// }; - /// - /// let re = PikeVM::new_many(&[r"[a-z]+", r"[A-Z]+"])?; - /// let mut cache = re.create_cache(); - /// let mut caps = Captures::empty(re.get_nfa().group_info().clone()); - /// - /// re.captures(&mut cache, "ABC", &mut caps); - /// assert_eq!(Some(PatternID::must(1)), caps.pattern()); - /// // Recall that offsets are only available when using a non-empty - /// // Captures value. So even though a match occurred, this returns None! - /// assert_eq!(None, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn pattern(&self) -> Option { - self.pid - } - - /// Returns the pattern ID and the span of the match, if one occurred. - /// - /// This always returns `None` when `Captures` was created with - /// [`Captures::empty`], even if a match was found. - /// - /// If this routine returns a non-`None` value, then `is_match` is - /// guaranteed to return `true` and `pattern` is also guaranteed to return - /// a non-`None` value. - /// - /// # Example - /// - /// This example shows how to get the full match from a search: - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match}; - /// - /// let re = PikeVM::new_many(&[r"[a-z]+", r"[A-Z]+"])?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// re.captures(&mut cache, "ABC", &mut caps); - /// assert_eq!(Some(Match::must(1, 0..3)), caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn get_match(&self) -> Option { - Some(Match::new(self.pattern()?, self.get_group(0)?)) - } - - /// Returns the span of a capturing group match corresponding to the group - /// index given, only if both the overall pattern matched and the capturing - /// group participated in that match. - /// - /// This returns `None` if `index` is invalid. `index` is valid if and only - /// if it's less than [`Captures::group_len`] for the matching pattern. - /// - /// This always returns `None` when `Captures` was created with - /// [`Captures::empty`], even if a match was found. This also always - /// returns `None` for any `index > 0` when `Captures` was created with - /// [`Captures::matches`]. - /// - /// If this routine returns a non-`None` value, then `is_match` is - /// guaranteed to return `true`, `pattern` is guaranteed to return a - /// non-`None` value and `get_match` is guaranteed to return a non-`None` - /// value. - /// - /// By convention, the 0th capture group will always return the same - /// span as the span returned by `get_match`. This is because the 0th - /// capture group always corresponds to the entirety of the pattern's - /// match. (It is similarly always unnamed because it is implicit.) This - /// isn't necessarily true of all regex engines. For example, one can - /// hand-compile a [`thompson::NFA`](crate::nfa::thompson::NFA) via a - /// [`thompson::Builder`](crate::nfa::thompson::Builder), which isn't - /// technically forced to make the 0th capturing group always correspond to - /// the entire match. - /// - /// # Example - /// - /// This example shows how to get the capturing groups, by index, from a - /// match: - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span, Match}; - /// - /// let re = PikeVM::new(r"^(?P\pL+)\s+(?P\pL+)$")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// re.captures(&mut cache, "Bruce Springsteen", &mut caps); - /// assert_eq!(Some(Match::must(0, 0..17)), caps.get_match()); - /// assert_eq!(Some(Span::from(0..5)), caps.get_group(1)); - /// assert_eq!(Some(Span::from(6..17)), caps.get_group(2)); - /// // Looking for a non-existent capturing group will return None: - /// assert_eq!(None, caps.get_group(3)); - /// # // literals are too big for 32-bit usize: #1039 - /// # #[cfg(target_pointer_width = "64")] - /// assert_eq!(None, caps.get_group(9944060567225171988)); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn get_group(&self, index: usize) -> Option { - let pid = self.pattern()?; - // There's a little bit of work needed to map captures to slots in the - // fully general case. But in the overwhelming common case of a single - // pattern, we can just do some simple arithmetic. - let (slot_start, slot_end) = if self.group_info().pattern_len() == 1 { - (index.checked_mul(2)?, index.checked_mul(2)?.checked_add(1)?) - } else { - self.group_info().slots(pid, index)? - }; - let start = self.slots.get(slot_start).copied()??; - let end = self.slots.get(slot_end).copied()??; - Some(Span { start: start.get(), end: end.get() }) - } - - /// Returns the span of a capturing group match corresponding to the group - /// name given, only if both the overall pattern matched and the capturing - /// group participated in that match. - /// - /// This returns `None` if `name` does not correspond to a valid capturing - /// group for the pattern that matched. - /// - /// This always returns `None` when `Captures` was created with - /// [`Captures::empty`], even if a match was found. This also always - /// returns `None` for any `index > 0` when `Captures` was created with - /// [`Captures::matches`]. - /// - /// If this routine returns a non-`None` value, then `is_match` is - /// guaranteed to return `true`, `pattern` is guaranteed to return a - /// non-`None` value and `get_match` is guaranteed to return a non-`None` - /// value. - /// - /// # Example - /// - /// This example shows how to get the capturing groups, by name, from a - /// match: - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span, Match}; - /// - /// let re = PikeVM::new(r"^(?P\pL+)\s+(?P\pL+)$")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// re.captures(&mut cache, "Bruce Springsteen", &mut caps); - /// assert_eq!(Some(Match::must(0, 0..17)), caps.get_match()); - /// assert_eq!(Some(Span::from(0..5)), caps.get_group_by_name("first")); - /// assert_eq!(Some(Span::from(6..17)), caps.get_group_by_name("last")); - /// // Looking for a non-existent capturing group will return None: - /// assert_eq!(None, caps.get_group_by_name("middle")); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn get_group_by_name(&self, name: &str) -> Option { - let index = self.group_info().to_index(self.pattern()?, name)?; - self.get_group(index) - } - - /// Returns an iterator of possible spans for every capturing group in the - /// matching pattern. - /// - /// If this `Captures` value does not correspond to a match, then the - /// iterator returned yields no elements. - /// - /// Note that the iterator returned yields elements of type `Option`. - /// A span is present if and only if it corresponds to a capturing group - /// that participated in a match. - /// - /// # Example - /// - /// This example shows how to collect all capturing groups: - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; - /// - /// let re = PikeVM::new( - /// // Matches first/last names, with an optional middle name. - /// r"^(?P\pL+)\s+(?:(?P\pL+)\s+)?(?P\pL+)$", - /// )?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// re.captures(&mut cache, "Harry James Potter", &mut caps); - /// assert!(caps.is_match()); - /// let groups: Vec> = caps.iter().collect(); - /// assert_eq!(groups, vec![ - /// Some(Span::from(0..18)), - /// Some(Span::from(0..5)), - /// Some(Span::from(6..11)), - /// Some(Span::from(12..18)), - /// ]); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// This example uses the same regex as the previous example, but with a - /// haystack that omits the middle name. This results in a capturing group - /// that is present in the elements yielded by the iterator but without a - /// match: - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Span}; - /// - /// let re = PikeVM::new( - /// // Matches first/last names, with an optional middle name. - /// r"^(?P\pL+)\s+(?:(?P\pL+)\s+)?(?P\pL+)$", - /// )?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// re.captures(&mut cache, "Harry Potter", &mut caps); - /// assert!(caps.is_match()); - /// let groups: Vec> = caps.iter().collect(); - /// assert_eq!(groups, vec![ - /// Some(Span::from(0..12)), - /// Some(Span::from(0..5)), - /// None, - /// Some(Span::from(6..12)), - /// ]); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn iter(&self) -> CapturesPatternIter<'_> { - let names = self - .pattern() - .map_or(GroupInfoPatternNames::empty().enumerate(), |pid| { - self.group_info().pattern_names(pid).enumerate() - }); - CapturesPatternIter { caps: self, names } - } - - /// Return the total number of capturing groups for the matching pattern. - /// - /// If this `Captures` value does not correspond to a match, then this - /// always returns `0`. - /// - /// This always returns the same number of elements yielded by - /// [`Captures::iter`]. That is, the number includes capturing groups even - /// if they don't participate in the match. - /// - /// # Example - /// - /// This example shows how to count the total number of capturing groups - /// associated with a pattern. Notice that it includes groups that did not - /// participate in a match (just like `Captures::iter` does). - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::nfa::thompson::pikevm::PikeVM; - /// - /// let re = PikeVM::new( - /// // Matches first/last names, with an optional middle name. - /// r"^(?P\pL+)\s+(?:(?P\pL+)\s+)?(?P\pL+)$", - /// )?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// re.captures(&mut cache, "Harry Potter", &mut caps); - /// assert_eq!(4, caps.group_len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn group_len(&self) -> usize { - let pid = match self.pattern() { - None => return 0, - Some(pid) => pid, - }; - self.group_info().group_len(pid) - } - - /// Returns a reference to the underlying group info on which these - /// captures are based. - /// - /// The difference between `GroupInfo` and `Captures` is that the former - /// defines the structure of capturing groups where as the latter is what - /// stores the actual match information. So where as `Captures` only gives - /// you access to the current match, `GroupInfo` lets you query any - /// information about all capturing groups, even ones for patterns that - /// weren't involved in a match. - /// - /// Note that a `GroupInfo` uses reference counting internally, so it may - /// be cloned cheaply. - /// - /// # Example - /// - /// This example shows how to get all capturing group names from the - /// underlying `GroupInfo`. Notice that we don't even need to run a - /// search. - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID}; - /// - /// let re = PikeVM::new_many(&[ - /// r"(?Pa)", - /// r"(a)(b)", - /// r"ab", - /// r"(?Pa)(?Pa)", - /// r"(?Pz)", - /// ])?; - /// let caps = re.create_captures(); - /// - /// let expected = vec![ - /// (PatternID::must(0), 0, None), - /// (PatternID::must(0), 1, Some("foo")), - /// (PatternID::must(1), 0, None), - /// (PatternID::must(1), 1, None), - /// (PatternID::must(1), 2, None), - /// (PatternID::must(2), 0, None), - /// (PatternID::must(3), 0, None), - /// (PatternID::must(3), 1, Some("bar")), - /// (PatternID::must(3), 2, Some("quux")), - /// (PatternID::must(4), 0, None), - /// (PatternID::must(4), 1, Some("foo")), - /// ]; - /// // We could also just use 're.get_nfa().group_info()'. - /// let got: Vec<(PatternID, usize, Option<&str>)> = - /// caps.group_info().all_names().collect(); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn group_info(&self) -> &GroupInfo { - &self.group_info - } - - /// Interpolates the capture references in `replacement` with the - /// corresponding substrings in `haystack` matched by each reference. The - /// interpolated string is returned. - /// - /// See the [`interpolate` module](interpolate) for documentation on the - /// format of the replacement string. - /// - /// # Example - /// - /// This example shows how to use interpolation, and also shows how it - /// can work with multi-pattern regexes. - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID}; - /// - /// let re = PikeVM::new_many(&[ - /// r"(?[0-9]{2})-(?[0-9]{2})-(?[0-9]{4})", - /// r"(?[0-9]{4})-(?[0-9]{2})-(?[0-9]{2})", - /// ])?; - /// let mut cache = re.create_cache(); - /// let mut caps = re.create_captures(); - /// - /// let replacement = "year=$year, month=$month, day=$day"; - /// - /// // This matches the first pattern. - /// let hay = "On 14-03-2010, I became a Tennessee lamb."; - /// re.captures(&mut cache, hay, &mut caps); - /// let result = caps.interpolate_string(hay, replacement); - /// assert_eq!("year=2010, month=03, day=14", result); - /// - /// // And this matches the second pattern. - /// let hay = "On 2010-03-14, I became a Tennessee lamb."; - /// re.captures(&mut cache, hay, &mut caps); - /// let result = caps.interpolate_string(hay, replacement); - /// assert_eq!("year=2010, month=03, day=14", result); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn interpolate_string( - &self, - haystack: &str, - replacement: &str, - ) -> String { - let mut dst = String::new(); - self.interpolate_string_into(haystack, replacement, &mut dst); - dst - } - - /// Interpolates the capture references in `replacement` with the - /// corresponding substrings in `haystack` matched by each reference. The - /// interpolated string is written to `dst`. - /// - /// See the [`interpolate` module](interpolate) for documentation on the - /// format of the replacement string. - /// - /// # Example - /// - /// This example shows how to use interpolation, and also shows how it - /// can work with multi-pattern regexes. - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID}; - /// - /// let re = PikeVM::new_many(&[ - /// r"(?[0-9]{2})-(?[0-9]{2})-(?[0-9]{4})", - /// r"(?[0-9]{4})-(?[0-9]{2})-(?[0-9]{2})", - /// ])?; - /// let mut cache = re.create_cache(); - /// let mut caps = re.create_captures(); - /// - /// let replacement = "year=$year, month=$month, day=$day"; - /// - /// // This matches the first pattern. - /// let hay = "On 14-03-2010, I became a Tennessee lamb."; - /// re.captures(&mut cache, hay, &mut caps); - /// let mut dst = String::new(); - /// caps.interpolate_string_into(hay, replacement, &mut dst); - /// assert_eq!("year=2010, month=03, day=14", dst); - /// - /// // And this matches the second pattern. - /// let hay = "On 2010-03-14, I became a Tennessee lamb."; - /// re.captures(&mut cache, hay, &mut caps); - /// let mut dst = String::new(); - /// caps.interpolate_string_into(hay, replacement, &mut dst); - /// assert_eq!("year=2010, month=03, day=14", dst); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn interpolate_string_into( - &self, - haystack: &str, - replacement: &str, - dst: &mut String, - ) { - interpolate::string( - replacement, - |index, dst| { - let span = match self.get_group(index) { - None => return, - Some(span) => span, - }; - dst.push_str(&haystack[span]); - }, - |name| self.group_info().to_index(self.pattern()?, name), - dst, - ); - } - - /// Interpolates the capture references in `replacement` with the - /// corresponding substrings in `haystack` matched by each reference. The - /// interpolated byte string is returned. - /// - /// See the [`interpolate` module](interpolate) for documentation on the - /// format of the replacement string. - /// - /// # Example - /// - /// This example shows how to use interpolation, and also shows how it - /// can work with multi-pattern regexes. - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID}; - /// - /// let re = PikeVM::new_many(&[ - /// r"(?[0-9]{2})-(?[0-9]{2})-(?[0-9]{4})", - /// r"(?[0-9]{4})-(?[0-9]{2})-(?[0-9]{2})", - /// ])?; - /// let mut cache = re.create_cache(); - /// let mut caps = re.create_captures(); - /// - /// let replacement = b"year=$year, month=$month, day=$day"; - /// - /// // This matches the first pattern. - /// let hay = b"On 14-03-2010, I became a Tennessee lamb."; - /// re.captures(&mut cache, hay, &mut caps); - /// let result = caps.interpolate_bytes(hay, replacement); - /// assert_eq!(&b"year=2010, month=03, day=14"[..], result); - /// - /// // And this matches the second pattern. - /// let hay = b"On 2010-03-14, I became a Tennessee lamb."; - /// re.captures(&mut cache, hay, &mut caps); - /// let result = caps.interpolate_bytes(hay, replacement); - /// assert_eq!(&b"year=2010, month=03, day=14"[..], result); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn interpolate_bytes( - &self, - haystack: &[u8], - replacement: &[u8], - ) -> Vec { - let mut dst = vec![]; - self.interpolate_bytes_into(haystack, replacement, &mut dst); - dst - } - - /// Interpolates the capture references in `replacement` with the - /// corresponding substrings in `haystack` matched by each reference. The - /// interpolated byte string is written to `dst`. - /// - /// See the [`interpolate` module](interpolate) for documentation on the - /// format of the replacement string. - /// - /// # Example - /// - /// This example shows how to use interpolation, and also shows how it - /// can work with multi-pattern regexes. - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, PatternID}; - /// - /// let re = PikeVM::new_many(&[ - /// r"(?[0-9]{2})-(?[0-9]{2})-(?[0-9]{4})", - /// r"(?[0-9]{4})-(?[0-9]{2})-(?[0-9]{2})", - /// ])?; - /// let mut cache = re.create_cache(); - /// let mut caps = re.create_captures(); - /// - /// let replacement = b"year=$year, month=$month, day=$day"; - /// - /// // This matches the first pattern. - /// let hay = b"On 14-03-2010, I became a Tennessee lamb."; - /// re.captures(&mut cache, hay, &mut caps); - /// let mut dst = vec![]; - /// caps.interpolate_bytes_into(hay, replacement, &mut dst); - /// assert_eq!(&b"year=2010, month=03, day=14"[..], dst); - /// - /// // And this matches the second pattern. - /// let hay = b"On 2010-03-14, I became a Tennessee lamb."; - /// re.captures(&mut cache, hay, &mut caps); - /// let mut dst = vec![]; - /// caps.interpolate_bytes_into(hay, replacement, &mut dst); - /// assert_eq!(&b"year=2010, month=03, day=14"[..], dst); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn interpolate_bytes_into( - &self, - haystack: &[u8], - replacement: &[u8], - dst: &mut Vec, - ) { - interpolate::bytes( - replacement, - |index, dst| { - let span = match self.get_group(index) { - None => return, - Some(span) => span, - }; - dst.extend_from_slice(&haystack[span]); - }, - |name| self.group_info().to_index(self.pattern()?, name), - dst, - ); - } - - /// This is a convenience routine for extracting the substrings - /// corresponding to matching capture groups in the given `haystack`. The - /// `haystack` should be the same substring used to find the match spans in - /// this `Captures` value. - /// - /// This is identical to [`Captures::extract_bytes`], except it works with - /// `&str` instead of `&[u8]`. - /// - /// # Panics - /// - /// This panics if the number of explicit matching groups in this - /// `Captures` value is less than `N`. This also panics if this `Captures` - /// value does not correspond to a match. - /// - /// Note that this does *not* panic if the number of explicit matching - /// groups is bigger than `N`. In that case, only the first `N` matching - /// groups are extracted. - /// - /// # Example - /// - /// ``` - /// use regex_automata::nfa::thompson::pikevm::PikeVM; - /// - /// let re = PikeVM::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})")?; - /// let mut cache = re.create_cache(); - /// let mut caps = re.create_captures(); - /// - /// let hay = "On 2010-03-14, I became a Tennessee lamb."; - /// re.captures(&mut cache, hay, &mut caps); - /// assert!(caps.is_match()); - /// let (full, [year, month, day]) = caps.extract(hay); - /// assert_eq!("2010-03-14", full); - /// assert_eq!("2010", year); - /// assert_eq!("03", month); - /// assert_eq!("14", day); - /// - /// // We can also ask for fewer than all capture groups. - /// let (full, [year]) = caps.extract(hay); - /// assert_eq!("2010-03-14", full); - /// assert_eq!("2010", year); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn extract<'h, const N: usize>( - &self, - haystack: &'h str, - ) -> (&'h str, [&'h str; N]) { - let mut matched = self.iter().flatten(); - let whole_match = &haystack[matched.next().expect("a match")]; - let group_matches = [0; N].map(|_| { - let sp = matched.next().expect("too few matching groups"); - &haystack[sp] - }); - (whole_match, group_matches) - } - - /// This is a convenience routine for extracting the substrings - /// corresponding to matching capture groups in the given `haystack`. The - /// `haystack` should be the same substring used to find the match spans in - /// this `Captures` value. - /// - /// This is identical to [`Captures::extract`], except it works with - /// `&[u8]` instead of `&str`. - /// - /// # Panics - /// - /// This panics if the number of explicit matching groups in this - /// `Captures` value is less than `N`. This also panics if this `Captures` - /// value does not correspond to a match. - /// - /// Note that this does *not* panic if the number of explicit matching - /// groups is bigger than `N`. In that case, only the first `N` matching - /// groups are extracted. - /// - /// # Example - /// - /// ``` - /// use regex_automata::nfa::thompson::pikevm::PikeVM; - /// - /// let re = PikeVM::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})")?; - /// let mut cache = re.create_cache(); - /// let mut caps = re.create_captures(); - /// - /// let hay = b"On 2010-03-14, I became a Tennessee lamb."; - /// re.captures(&mut cache, hay, &mut caps); - /// assert!(caps.is_match()); - /// let (full, [year, month, day]) = caps.extract_bytes(hay); - /// assert_eq!(b"2010-03-14", full); - /// assert_eq!(b"2010", year); - /// assert_eq!(b"03", month); - /// assert_eq!(b"14", day); - /// - /// // We can also ask for fewer than all capture groups. - /// let (full, [year]) = caps.extract_bytes(hay); - /// assert_eq!(b"2010-03-14", full); - /// assert_eq!(b"2010", year); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn extract_bytes<'h, const N: usize>( - &self, - haystack: &'h [u8], - ) -> (&'h [u8], [&'h [u8]; N]) { - let mut matched = self.iter().flatten(); - let whole_match = &haystack[matched.next().expect("a match")]; - let group_matches = [0; N].map(|_| { - let sp = matched.next().expect("too few matching groups"); - &haystack[sp] - }); - (whole_match, group_matches) - } -} - -/// Lower level "slot" oriented APIs. One does not typically need to use these -/// when executing a search. They are instead mostly intended for folks that -/// are writing their own regex engine while reusing this `Captures` type. -impl Captures { - /// Clear this `Captures` value. - /// - /// After clearing, all slots inside this `Captures` value will be set to - /// `None`. Similarly, any pattern ID that it was previously associated - /// with (for a match) is erased. - /// - /// It is not usually necessary to call this routine. Namely, a `Captures` - /// value only provides high level access to the capturing groups of the - /// pattern that matched, and only low level access to individual slots. - /// Thus, even if slots corresponding to groups that aren't associated - /// with the matching pattern are set, then it won't impact the higher - /// level APIs. Namely, higher level APIs like [`Captures::get_group`] will - /// return `None` if no pattern ID is present, even if there are spans set - /// in the underlying slots. - /// - /// Thus, to "clear" a `Captures` value of a match, it is usually only - /// necessary to call [`Captures::set_pattern`] with `None`. - /// - /// # Example - /// - /// This example shows what happens when a `Captures` value is cleared. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::nfa::thompson::pikevm::PikeVM; - /// - /// let re = PikeVM::new(r"^(?P\pL+)\s+(?P\pL+)$")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// re.captures(&mut cache, "Bruce Springsteen", &mut caps); - /// assert!(caps.is_match()); - /// let slots: Vec> = - /// caps.slots().iter().map(|s| s.map(|x| x.get())).collect(); - /// // Note that the following ordering is considered an API guarantee. - /// assert_eq!(slots, vec![ - /// Some(0), - /// Some(17), - /// Some(0), - /// Some(5), - /// Some(6), - /// Some(17), - /// ]); - /// - /// // Now clear the slots. Everything is gone and it is no longer a match. - /// caps.clear(); - /// assert!(!caps.is_match()); - /// let slots: Vec> = - /// caps.slots().iter().map(|s| s.map(|x| x.get())).collect(); - /// assert_eq!(slots, vec![ - /// None, - /// None, - /// None, - /// None, - /// None, - /// None, - /// ]); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn clear(&mut self) { - self.pid = None; - for slot in self.slots.iter_mut() { - *slot = None; - } - } - - /// Set the pattern on this `Captures` value. - /// - /// When the pattern ID is `None`, then this `Captures` value does not - /// correspond to a match (`is_match` will return `false`). Otherwise, it - /// corresponds to a match. - /// - /// This is useful in search implementations where you might want to - /// initially call `set_pattern(None)` in order to avoid the cost of - /// calling `clear()` if it turns out to not be necessary. - /// - /// # Example - /// - /// This example shows that `set_pattern` merely overwrites the pattern ID. - /// It does not actually change the underlying slot values. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::nfa::thompson::pikevm::PikeVM; - /// - /// let re = PikeVM::new(r"^(?P\pL+)\s+(?P\pL+)$")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// re.captures(&mut cache, "Bruce Springsteen", &mut caps); - /// assert!(caps.is_match()); - /// assert!(caps.pattern().is_some()); - /// let slots: Vec> = - /// caps.slots().iter().map(|s| s.map(|x| x.get())).collect(); - /// // Note that the following ordering is considered an API guarantee. - /// assert_eq!(slots, vec![ - /// Some(0), - /// Some(17), - /// Some(0), - /// Some(5), - /// Some(6), - /// Some(17), - /// ]); - /// - /// // Now set the pattern to None. Note that the slot values remain. - /// caps.set_pattern(None); - /// assert!(!caps.is_match()); - /// assert!(!caps.pattern().is_some()); - /// let slots: Vec> = - /// caps.slots().iter().map(|s| s.map(|x| x.get())).collect(); - /// // Note that the following ordering is considered an API guarantee. - /// assert_eq!(slots, vec![ - /// Some(0), - /// Some(17), - /// Some(0), - /// Some(5), - /// Some(6), - /// Some(17), - /// ]); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn set_pattern(&mut self, pid: Option) { - self.pid = pid; - } - - /// Returns the underlying slots, where each slot stores a single offset. - /// - /// Every matching capturing group generally corresponds to two slots: one - /// slot for the starting position and another for the ending position. - /// Typically, either both are present or neither are. (The weasel word - /// "typically" is used here because it really depends on the regex engine - /// implementation. Every sensible regex engine likely adheres to this - /// invariant, and every regex engine in this crate is sensible.) - /// - /// Generally speaking, callers should prefer to use higher level routines - /// like [`Captures::get_match`] or [`Captures::get_group`]. - /// - /// An important note here is that a regex engine may not reset all of the - /// slots to `None` values when no match occurs, or even when a match of - /// a different pattern occurs. But this depends on how the regex engine - /// implementation deals with slots. - /// - /// # Example - /// - /// This example shows how to get the underlying slots from a regex match. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::pikevm::PikeVM, - /// util::primitives::{PatternID, NonMaxUsize}, - /// }; - /// - /// let re = PikeVM::new_many(&[ - /// r"[a-z]+", - /// r"[0-9]+", - /// ])?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// re.captures(&mut cache, "123", &mut caps); - /// assert_eq!(Some(PatternID::must(1)), caps.pattern()); - /// // Note that the only guarantee we have here is that slots 2 and 3 - /// // are set to correct values. The contents of the first two slots are - /// // unspecified since the 0th pattern did not match. - /// let expected = &[ - /// None, - /// None, - /// NonMaxUsize::new(0), - /// NonMaxUsize::new(3), - /// ]; - /// assert_eq!(expected, caps.slots()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn slots(&self) -> &[Option] { - &self.slots - } - - /// Returns the underlying slots as a mutable slice, where each slot stores - /// a single offset. - /// - /// This tends to be most useful for regex engine implementations for - /// writing offsets for matching capturing groups to slots. - /// - /// See [`Captures::slots`] for more information about slots. - #[inline] - pub fn slots_mut(&mut self) -> &mut [Option] { - &mut self.slots - } -} - -impl core::fmt::Debug for Captures { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let mut dstruct = f.debug_struct("Captures"); - dstruct.field("pid", &self.pid); - if let Some(pid) = self.pid { - dstruct.field("spans", &CapturesDebugMap { pid, caps: self }); - } - dstruct.finish() - } -} - -/// A little helper type to provide a nice map-like debug representation for -/// our capturing group spans. -struct CapturesDebugMap<'a> { - pid: PatternID, - caps: &'a Captures, -} - -impl<'a> core::fmt::Debug for CapturesDebugMap<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - struct Key<'a>(usize, Option<&'a str>); - - impl<'a> core::fmt::Debug for Key<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "{}", self.0)?; - if let Some(name) = self.1 { - write!(f, "/{name:?}")?; - } - Ok(()) - } - } - - let mut map = f.debug_map(); - let names = self.caps.group_info().pattern_names(self.pid); - for (group_index, maybe_name) in names.enumerate() { - let key = Key(group_index, maybe_name); - match self.caps.get_group(group_index) { - None => map.entry(&key, &None::<()>), - Some(span) => map.entry(&key, &span), - }; - } - map.finish() - } -} - -/// An iterator over all capturing groups in a `Captures` value. -/// -/// This iterator includes capturing groups that did not participate in a -/// match. See the [`Captures::iter`] method documentation for more details -/// and examples. -/// -/// The lifetime parameter `'a` refers to the lifetime of the underlying -/// `Captures` value. -#[derive(Clone, Debug)] -pub struct CapturesPatternIter<'a> { - caps: &'a Captures, - names: core::iter::Enumerate>, -} - -impl<'a> Iterator for CapturesPatternIter<'a> { - type Item = Option; - - fn next(&mut self) -> Option> { - let (group_index, _) = self.names.next()?; - Some(self.caps.get_group(group_index)) - } - - fn size_hint(&self) -> (usize, Option) { - self.names.size_hint() - } - - fn count(self) -> usize { - self.names.count() - } -} - -impl<'a> ExactSizeIterator for CapturesPatternIter<'a> {} -impl<'a> core::iter::FusedIterator for CapturesPatternIter<'a> {} - -/// Represents information about capturing groups in a compiled regex. -/// -/// The information encapsulated by this type consists of the following. For -/// each pattern: -/// -/// * A map from every capture group name to its corresponding capture group -/// index. -/// * A map from every capture group index to its corresponding capture group -/// name. -/// * A map from capture group index to its corresponding slot index. A slot -/// refers to one half of a capturing group. That is, a capture slot is either -/// the start or end of a capturing group. A slot is usually the mechanism -/// by which a regex engine records offsets for each capturing group during a -/// search. -/// -/// A `GroupInfo` uses reference counting internally and is thus cheap to -/// clone. -/// -/// # Mapping from capture groups to slots -/// -/// One of the main responsibilities of a `GroupInfo` is to build a mapping -/// from `(PatternID, u32)` (where the `u32` is a capture index) to something -/// called a "slot." As mentioned above, a slot refers to one half of a -/// capturing group. Both combined provide the start and end offsets of -/// a capturing group that participated in a match. -/// -/// **The mapping between group indices and slots is an API guarantee.** That -/// is, the mapping won't change within a semver compatible release. -/// -/// Slots exist primarily because this is a convenient mechanism by which -/// regex engines report group offsets at search time. For example, the -/// [`nfa::thompson::State::Capture`](crate::nfa::thompson::State::Capture) -/// NFA state includes the slot index. When a regex engine transitions through -/// this state, it will likely use the slot index to write the current haystack -/// offset to some region of memory. When a match is found, those slots are -/// then reported to the caller, typically via a convenient abstraction like a -/// [`Captures`] value. -/// -/// Because this crate provides first class support for multi-pattern regexes, -/// and because of some performance related reasons, the mapping between -/// capturing groups and slots is a little complex. However, in the case of a -/// single pattern, the mapping can be described very simply: for all capture -/// group indices `i`, its corresponding slots are at `i * 2` and `i * 2 + 1`. -/// Notice that the pattern ID isn't involved at all here, because it only -/// applies to a single-pattern regex, it is therefore always `0`. -/// -/// In the multi-pattern case, the mapping is a bit more complicated. To talk -/// about it, we must define what we mean by "implicit" vs "explicit" -/// capturing groups: -/// -/// * An **implicit** capturing group refers to the capturing group that is -/// present for every pattern automatically, and corresponds to the overall -/// match of a pattern. Every pattern has precisely one implicit capturing -/// group. It is always unnamed and it always corresponds to the capture group -/// index `0`. -/// * An **explicit** capturing group refers to any capturing group that -/// appears in the concrete syntax of the pattern. (Or, if an NFA was hand -/// built without any concrete syntax, it refers to any capturing group with an -/// index greater than `0`.) -/// -/// Some examples: -/// -/// * `\w+` has one implicit capturing group and zero explicit capturing -/// groups. -/// * `(\w+)` has one implicit group and one explicit group. -/// * `foo(\d+)(?:\pL+)(\d+)` has one implicit group and two explicit groups. -/// -/// Turning back to the slot mapping, we can now state it as follows: -/// -/// * Given a pattern ID `pid`, the slots for its implicit group are always -/// at `pid * 2` and `pid * 2 + 1`. -/// * Given a pattern ID `0`, the slots for its explicit groups start -/// at `group_info.pattern_len() * 2`. -/// * Given a pattern ID `pid > 0`, the slots for its explicit groups start -/// immediately following where the slots for the explicit groups of `pid - 1` -/// end. -/// -/// In particular, while there is a concrete formula one can use to determine -/// where the slots for the implicit group of any pattern are, there is no -/// general formula for determining where the slots for explicit capturing -/// groups are. This is because each pattern can contain a different number -/// of groups. -/// -/// The intended way of getting the slots for a particular capturing group -/// (whether implicit or explicit) is via the [`GroupInfo::slot`] or -/// [`GroupInfo::slots`] method. -/// -/// See below for a concrete example of how capturing groups get mapped to -/// slots. -/// -/// # Example -/// -/// This example shows how to build a new `GroupInfo` and query it for -/// information. -/// -/// ``` -/// use regex_automata::util::{captures::GroupInfo, primitives::PatternID}; -/// -/// let info = GroupInfo::new(vec![ -/// vec![None, Some("foo")], -/// vec![None], -/// vec![None, None, None, Some("bar"), None], -/// vec![None, None, Some("foo")], -/// ])?; -/// // The number of patterns being tracked. -/// assert_eq!(4, info.pattern_len()); -/// // We can query the number of groups for any pattern. -/// assert_eq!(2, info.group_len(PatternID::must(0))); -/// assert_eq!(1, info.group_len(PatternID::must(1))); -/// assert_eq!(5, info.group_len(PatternID::must(2))); -/// assert_eq!(3, info.group_len(PatternID::must(3))); -/// // An invalid pattern always has zero groups. -/// assert_eq!(0, info.group_len(PatternID::must(999))); -/// // 2 slots per group -/// assert_eq!(22, info.slot_len()); -/// -/// // We can map a group index for a particular pattern to its name, if -/// // one exists. -/// assert_eq!(Some("foo"), info.to_name(PatternID::must(3), 2)); -/// assert_eq!(None, info.to_name(PatternID::must(2), 4)); -/// // Or map a name to its group index. -/// assert_eq!(Some(1), info.to_index(PatternID::must(0), "foo")); -/// assert_eq!(Some(2), info.to_index(PatternID::must(3), "foo")); -/// -/// # Ok::<(), Box>(()) -/// ``` -/// -/// # Example: mapping from capture groups to slots -/// -/// This example shows the specific mapping from capture group indices for -/// each pattern to their corresponding slots. The slot values shown in this -/// example are considered an API guarantee. -/// -/// ``` -/// use regex_automata::util::{captures::GroupInfo, primitives::PatternID}; -/// -/// let info = GroupInfo::new(vec![ -/// vec![None, Some("foo")], -/// vec![None], -/// vec![None, None, None, Some("bar"), None], -/// vec![None, None, Some("foo")], -/// ])?; -/// -/// // We first show the slots for each pattern's implicit group. -/// assert_eq!(Some((0, 1)), info.slots(PatternID::must(0), 0)); -/// assert_eq!(Some((2, 3)), info.slots(PatternID::must(1), 0)); -/// assert_eq!(Some((4, 5)), info.slots(PatternID::must(2), 0)); -/// assert_eq!(Some((6, 7)), info.slots(PatternID::must(3), 0)); -/// -/// // And now we show the slots for each pattern's explicit group. -/// assert_eq!(Some((8, 9)), info.slots(PatternID::must(0), 1)); -/// assert_eq!(Some((10, 11)), info.slots(PatternID::must(2), 1)); -/// assert_eq!(Some((12, 13)), info.slots(PatternID::must(2), 2)); -/// assert_eq!(Some((14, 15)), info.slots(PatternID::must(2), 3)); -/// assert_eq!(Some((16, 17)), info.slots(PatternID::must(2), 4)); -/// assert_eq!(Some((18, 19)), info.slots(PatternID::must(3), 1)); -/// assert_eq!(Some((20, 21)), info.slots(PatternID::must(3), 2)); -/// -/// // Asking for the slots for an invalid pattern ID or even for an invalid -/// // group index for a specific pattern will return None. So for example, -/// // you're guaranteed to not get the slots for a different pattern than the -/// // one requested. -/// assert_eq!(None, info.slots(PatternID::must(5), 0)); -/// assert_eq!(None, info.slots(PatternID::must(1), 1)); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug, Default)] -pub struct GroupInfo(Arc); - -impl GroupInfo { - /// Creates a new group info from a sequence of patterns, where each - /// sequence of patterns yields a sequence of possible group names. The - /// index of each pattern in the sequence corresponds to its `PatternID`, - /// and the index of each group in each pattern's sequence corresponds to - /// its corresponding group index. - /// - /// While this constructor is very generic and therefore perhaps hard to - /// chew on, an example of a valid concrete type that can be passed to - /// this constructor is `Vec>>`. The outer `Vec` - /// corresponds to the patterns, i.e., one `Vec>` per - /// pattern. The inner `Vec` corresponds to the capturing groups for - /// each pattern. The `Option` corresponds to the name of the - /// capturing group, if present. - /// - /// It is legal to pass an empty iterator to this constructor. It will - /// return an empty group info with zero slots. An empty group info is - /// useful for cases where you have no patterns or for cases where slots - /// aren't being used at all (e.g., for most DFAs in this crate). - /// - /// # Errors - /// - /// This constructor returns an error if the given capturing groups are - /// invalid in some way. Those reasons include, but are not necessarily - /// limited to: - /// - /// * Too many patterns (i.e., `PatternID` would overflow). - /// * Too many capturing groups (e.g., `u32` would overflow). - /// * A pattern is given that has no capturing groups. (All patterns must - /// have at least an implicit capturing group at index `0`.) - /// * The capturing group at index `0` has a name. It must be unnamed. - /// * There are duplicate capturing group names within the same pattern. - /// (Multiple capturing groups with the same name may exist, but they - /// must be in different patterns.) - /// - /// An example below shows how to trigger some of the above error - /// conditions. - /// - /// # Example - /// - /// This example shows how to build a new `GroupInfo` and query it for - /// information. - /// - /// ``` - /// use regex_automata::util::captures::GroupInfo; - /// - /// let info = GroupInfo::new(vec![ - /// vec![None, Some("foo")], - /// vec![None], - /// vec![None, None, None, Some("bar"), None], - /// vec![None, None, Some("foo")], - /// ])?; - /// // The number of patterns being tracked. - /// assert_eq!(4, info.pattern_len()); - /// // 2 slots per group - /// assert_eq!(22, info.slot_len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: empty `GroupInfo` - /// - /// This example shows how to build a new `GroupInfo` and query it for - /// information. - /// - /// ``` - /// use regex_automata::util::captures::GroupInfo; - /// - /// let info = GroupInfo::empty(); - /// // Everything is zero. - /// assert_eq!(0, info.pattern_len()); - /// assert_eq!(0, info.slot_len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// # Example: error conditions - /// - /// This example shows how to provoke some of the ways in which building - /// a `GroupInfo` can fail. - /// - /// ``` - /// use regex_automata::util::captures::GroupInfo; - /// - /// // Either the group info is empty, or all patterns must have at least - /// // one capturing group. - /// assert!(GroupInfo::new(vec![ - /// vec![None, Some("a")], // ok - /// vec![None], // ok - /// vec![], // not ok - /// ]).is_err()); - /// // Note that building an empty group info is OK. - /// assert!(GroupInfo::new(Vec::>>::new()).is_ok()); - /// - /// // The first group in each pattern must correspond to an implicit - /// // anonymous group. i.e., One that is not named. By convention, this - /// // group corresponds to the overall match of a regex. Every other group - /// // in a pattern is explicit and optional. - /// assert!(GroupInfo::new(vec![vec![Some("foo")]]).is_err()); - /// - /// // There must not be duplicate group names within the same pattern. - /// assert!(GroupInfo::new(vec![ - /// vec![None, Some("foo"), Some("foo")], - /// ]).is_err()); - /// // But duplicate names across distinct patterns is OK. - /// assert!(GroupInfo::new(vec![ - /// vec![None, Some("foo")], - /// vec![None, Some("foo")], - /// ]).is_ok()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// There are other ways for building a `GroupInfo` to fail but are - /// difficult to show. For example, if the number of patterns given would - /// overflow `PatternID`. - pub fn new(pattern_groups: P) -> Result - where - P: IntoIterator, - G: IntoIterator>, - N: AsRef, - { - let mut group_info = GroupInfoInner { - slot_ranges: vec![], - name_to_index: vec![], - index_to_name: vec![], - memory_extra: 0, - }; - for (pattern_index, groups) in pattern_groups.into_iter().enumerate() { - // If we can't convert the pattern index to an ID, then the caller - // tried to build capture info for too many patterns. - let pid = PatternID::new(pattern_index) - .map_err(GroupInfoError::too_many_patterns)?; - - let mut groups_iter = groups.into_iter().enumerate(); - match groups_iter.next() { - None => return Err(GroupInfoError::missing_groups(pid)), - Some((_, Some(_))) => { - return Err(GroupInfoError::first_must_be_unnamed(pid)) - } - Some((_, None)) => {} - } - group_info.add_first_group(pid); - // Now iterate over the rest, which correspond to all of the - // (conventionally) explicit capture groups in a regex pattern. - for (group_index, maybe_name) in groups_iter { - // Just like for patterns, if the group index can't be - // converted to a "small" index, then the caller has given too - // many groups for a particular pattern. - let group = SmallIndex::new(group_index).map_err(|_| { - GroupInfoError::too_many_groups(pid, group_index) - })?; - group_info.add_explicit_group(pid, group, maybe_name)?; - } - } - group_info.fixup_slot_ranges()?; - group_info.slot_ranges.shrink_to_fit(); - group_info.name_to_index.shrink_to_fit(); - group_info.index_to_name.shrink_to_fit(); - Ok(GroupInfo(Arc::new(group_info))) - } - - /// This creates an empty `GroupInfo`. - /// - /// This is a convenience routine for calling `GroupInfo::new` with an - /// iterator that yields no elements. - /// - /// # Example - /// - /// This example shows how to build a new empty `GroupInfo` and query it - /// for information. - /// - /// ``` - /// use regex_automata::util::captures::GroupInfo; - /// - /// let info = GroupInfo::empty(); - /// // Everything is zero. - /// assert_eq!(0, info.pattern_len()); - /// assert_eq!(0, info.all_group_len()); - /// assert_eq!(0, info.slot_len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn empty() -> GroupInfo { - GroupInfo::new(core::iter::empty::<[Option<&str>; 0]>()) - .expect("empty group info is always valid") - } - - /// Return the capture group index corresponding to the given name in the - /// given pattern. If no such capture group name exists in the given - /// pattern, then this returns `None`. - /// - /// If the given pattern ID is invalid, then this returns `None`. - /// - /// This also returns `None` for all inputs if these captures are empty - /// (e.g., built from an empty [`GroupInfo`]). To check whether captures - /// are present for a specific pattern, use [`GroupInfo::group_len`]. - /// - /// # Example - /// - /// This example shows how to find the capture index for the given pattern - /// and group name. - /// - /// Remember that capture indices are relative to the pattern, such that - /// the same capture index value may refer to different capturing groups - /// for distinct patterns. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{nfa::thompson::NFA, PatternID}; - /// - /// let (pid0, pid1) = (PatternID::must(0), PatternID::must(1)); - /// - /// let nfa = NFA::new_many(&[ - /// r"a(?P\w+)z(?P\s+)", - /// r"a(?P\d+)z", - /// ])?; - /// let groups = nfa.group_info(); - /// assert_eq!(Some(2), groups.to_index(pid0, "foo")); - /// // Recall that capture index 0 is always unnamed and refers to the - /// // entire pattern. So the first capturing group present in the pattern - /// // itself always starts at index 1. - /// assert_eq!(Some(1), groups.to_index(pid1, "foo")); - /// - /// // And if a name does not exist for a particular pattern, None is - /// // returned. - /// assert!(groups.to_index(pid0, "quux").is_some()); - /// assert!(groups.to_index(pid1, "quux").is_none()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn to_index(&self, pid: PatternID, name: &str) -> Option { - let indices = self.0.name_to_index.get(pid.as_usize())?; - indices.get(name).cloned().map(|i| i.as_usize()) - } - - /// Return the capture name for the given index and given pattern. If the - /// corresponding group does not have a name, then this returns `None`. - /// - /// If the pattern ID is invalid, then this returns `None`. - /// - /// If the group index is invalid for the given pattern, then this returns - /// `None`. A group `index` is valid for a pattern `pid` in an `nfa` if and - /// only if `index < nfa.pattern_capture_len(pid)`. - /// - /// This also returns `None` for all inputs if these captures are empty - /// (e.g., built from an empty [`GroupInfo`]). To check whether captures - /// are present for a specific pattern, use [`GroupInfo::group_len`]. - /// - /// # Example - /// - /// This example shows how to find the capture group name for the given - /// pattern and group index. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{nfa::thompson::NFA, PatternID}; - /// - /// let (pid0, pid1) = (PatternID::must(0), PatternID::must(1)); - /// - /// let nfa = NFA::new_many(&[ - /// r"a(?P\w+)z(\s+)x(\d+)", - /// r"a(\d+)z(?P\s+)", - /// ])?; - /// let groups = nfa.group_info(); - /// assert_eq!(None, groups.to_name(pid0, 0)); - /// assert_eq!(Some("foo"), groups.to_name(pid0, 1)); - /// assert_eq!(None, groups.to_name(pid0, 2)); - /// assert_eq!(None, groups.to_name(pid0, 3)); - /// - /// assert_eq!(None, groups.to_name(pid1, 0)); - /// assert_eq!(None, groups.to_name(pid1, 1)); - /// assert_eq!(Some("foo"), groups.to_name(pid1, 2)); - /// // '3' is not a valid capture index for the second pattern. - /// assert_eq!(None, groups.to_name(pid1, 3)); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn to_name(&self, pid: PatternID, group_index: usize) -> Option<&str> { - let pattern_names = self.0.index_to_name.get(pid.as_usize())?; - pattern_names.get(group_index)?.as_deref() - } - - /// Return an iterator of all capture groups and their names (if present) - /// for a particular pattern. - /// - /// If the given pattern ID is invalid or if this `GroupInfo` is empty, - /// then the iterator yields no elements. - /// - /// The number of elements yielded by this iterator is always equal to - /// the result of calling [`GroupInfo::group_len`] with the same - /// `PatternID`. - /// - /// # Example - /// - /// This example shows how to get a list of all capture group names for - /// a particular pattern. - /// - /// ``` - /// use regex_automata::{nfa::thompson::NFA, PatternID}; - /// - /// let nfa = NFA::new(r"(a)(?Pb)(c)(d)(?Pe)")?; - /// // The first is the implicit group that is always unnamed. The next - /// // 5 groups are the explicit groups found in the concrete syntax above. - /// let expected = vec![None, None, Some("foo"), None, None, Some("bar")]; - /// let got: Vec> = - /// nfa.group_info().pattern_names(PatternID::ZERO).collect(); - /// assert_eq!(expected, got); - /// - /// // Using an invalid pattern ID will result in nothing yielded. - /// let got = nfa.group_info().pattern_names(PatternID::must(999)).count(); - /// assert_eq!(0, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn pattern_names(&self, pid: PatternID) -> GroupInfoPatternNames<'_> { - GroupInfoPatternNames { - it: self - .0 - .index_to_name - .get(pid.as_usize()) - .map(|indices| indices.iter()) - .unwrap_or([].iter()), - } - } - - /// Return an iterator of all capture groups for all patterns supported by - /// this `GroupInfo`. Each item yielded is a triple of the group's pattern - /// ID, index in the pattern and the group's name, if present. - /// - /// # Example - /// - /// This example shows how to get a list of all capture groups found in - /// one NFA, potentially spanning multiple patterns. - /// - /// ``` - /// use regex_automata::{nfa::thompson::NFA, PatternID}; - /// - /// let nfa = NFA::new_many(&[ - /// r"(?Pa)", - /// r"a", - /// r"(a)", - /// ])?; - /// let expected = vec![ - /// (PatternID::must(0), 0, None), - /// (PatternID::must(0), 1, Some("foo")), - /// (PatternID::must(1), 0, None), - /// (PatternID::must(2), 0, None), - /// (PatternID::must(2), 1, None), - /// ]; - /// let got: Vec<(PatternID, usize, Option<&str>)> = - /// nfa.group_info().all_names().collect(); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// Unlike other capturing group related routines, this routine doesn't - /// panic even if captures aren't enabled on this NFA: - /// - /// ``` - /// use regex_automata::nfa::thompson::{NFA, WhichCaptures}; - /// - /// let nfa = NFA::compiler() - /// .configure(NFA::config().which_captures(WhichCaptures::None)) - /// .build_many(&[ - /// r"(?Pa)", - /// r"a", - /// r"(a)", - /// ])?; - /// // When captures aren't enabled, there's nothing to return. - /// assert_eq!(0, nfa.group_info().all_names().count()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn all_names(&self) -> GroupInfoAllNames<'_> { - GroupInfoAllNames { - group_info: self, - pids: PatternID::iter(self.pattern_len()), - current_pid: None, - names: None, - } - } - - /// Returns the starting and ending slot corresponding to the given - /// capturing group for the given pattern. The ending slot is always one - /// more than the starting slot returned. - /// - /// Note that this is like [`GroupInfo::slot`], except that it also returns - /// the ending slot value for convenience. - /// - /// If either the pattern ID or the capture index is invalid, then this - /// returns None. - /// - /// # Example - /// - /// This example shows that the starting slots for the first capturing - /// group of each pattern are distinct. - /// - /// ``` - /// use regex_automata::{nfa::thompson::NFA, PatternID}; - /// - /// let nfa = NFA::new_many(&["a", "b"])?; - /// assert_ne!( - /// nfa.group_info().slots(PatternID::must(0), 0), - /// nfa.group_info().slots(PatternID::must(1), 0), - /// ); - /// - /// // Also, the start and end slot values are never equivalent. - /// let (start, end) = nfa.group_info().slots(PatternID::ZERO, 0).unwrap(); - /// assert_ne!(start, end); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn slots( - &self, - pid: PatternID, - group_index: usize, - ) -> Option<(usize, usize)> { - // Since 'slot' only even returns valid starting slots, we know that - // there must also be an end slot and that end slot is always one more - // than the start slot. - self.slot(pid, group_index).map(|start| (start, start + 1)) - } - - /// Returns the starting slot corresponding to the given capturing group - /// for the given pattern. The ending slot is always one more than the - /// value returned. - /// - /// If either the pattern ID or the capture index is invalid, then this - /// returns None. - /// - /// # Example - /// - /// This example shows that the starting slots for the first capturing - /// group of each pattern are distinct. - /// - /// ``` - /// use regex_automata::{nfa::thompson::NFA, PatternID}; - /// - /// let nfa = NFA::new_many(&["a", "b"])?; - /// assert_ne!( - /// nfa.group_info().slot(PatternID::must(0), 0), - /// nfa.group_info().slot(PatternID::must(1), 0), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn slot(&self, pid: PatternID, group_index: usize) -> Option { - if group_index >= self.group_len(pid) { - return None; - } - // At this point, we know that 'pid' refers to a real pattern and that - // 'group_index' refers to a real group. We therefore also know that - // the pattern and group can be combined to return a correct slot. - // That's why we don't need to use checked arithmetic below. - if group_index == 0 { - Some(pid.as_usize() * 2) - } else { - // As above, we don't need to check that our slot is less than the - // end of our range since we already know the group index is a - // valid index for the given pattern. - let (start, _) = self.0.slot_ranges[pid]; - Some(start.as_usize() + ((group_index - 1) * 2)) - } - } - - /// Returns the total number of patterns in this `GroupInfo`. - /// - /// This may return zero if the `GroupInfo` was constructed with no - /// patterns. - /// - /// This is guaranteed to be no bigger than [`PatternID::LIMIT`] because - /// `GroupInfo` construction will fail if too many patterns are added. - /// - /// # Example - /// - /// ``` - /// use regex_automata::nfa::thompson::NFA; - /// - /// let nfa = NFA::new_many(&["[0-9]+", "[a-z]+", "[A-Z]+"])?; - /// assert_eq!(3, nfa.group_info().pattern_len()); - /// - /// let nfa = NFA::never_match(); - /// assert_eq!(0, nfa.group_info().pattern_len()); - /// - /// let nfa = NFA::always_match(); - /// assert_eq!(1, nfa.group_info().pattern_len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn pattern_len(&self) -> usize { - self.0.pattern_len() - } - - /// Return the number of capture groups in a pattern. - /// - /// If the pattern ID is invalid, then this returns `0`. - /// - /// # Example - /// - /// This example shows how the values returned by this routine may vary - /// for different patterns and NFA configurations. - /// - /// ``` - /// use regex_automata::{nfa::thompson::{NFA, WhichCaptures}, PatternID}; - /// - /// let nfa = NFA::new(r"(a)(b)(c)")?; - /// // There are 3 explicit groups in the pattern's concrete syntax and - /// // 1 unnamed and implicit group spanning the entire pattern. - /// assert_eq!(4, nfa.group_info().group_len(PatternID::ZERO)); - /// - /// let nfa = NFA::new(r"abc")?; - /// // There is just the unnamed implicit group. - /// assert_eq!(1, nfa.group_info().group_len(PatternID::ZERO)); - /// - /// let nfa = NFA::compiler() - /// .configure(NFA::config().which_captures(WhichCaptures::None)) - /// .build(r"abc")?; - /// // We disabled capturing groups, so there are none. - /// assert_eq!(0, nfa.group_info().group_len(PatternID::ZERO)); - /// - /// let nfa = NFA::compiler() - /// .configure(NFA::config().which_captures(WhichCaptures::None)) - /// .build(r"(a)(b)(c)")?; - /// // We disabled capturing groups, so there are none, even if there are - /// // explicit groups in the concrete syntax. - /// assert_eq!(0, nfa.group_info().group_len(PatternID::ZERO)); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn group_len(&self, pid: PatternID) -> usize { - self.0.group_len(pid) - } - - /// Return the total number of capture groups across all patterns. - /// - /// This includes implicit groups that represent the entire match of a - /// pattern. - /// - /// # Example - /// - /// This example shows how the values returned by this routine may vary - /// for different patterns and NFA configurations. - /// - /// ``` - /// use regex_automata::{nfa::thompson::{NFA, WhichCaptures}, PatternID}; - /// - /// let nfa = NFA::new(r"(a)(b)(c)")?; - /// // There are 3 explicit groups in the pattern's concrete syntax and - /// // 1 unnamed and implicit group spanning the entire pattern. - /// assert_eq!(4, nfa.group_info().all_group_len()); - /// - /// let nfa = NFA::new(r"abc")?; - /// // There is just the unnamed implicit group. - /// assert_eq!(1, nfa.group_info().all_group_len()); - /// - /// let nfa = NFA::new_many(&["(a)", "b", "(c)"])?; - /// // Each pattern has one implicit groups, and two - /// // patterns have one explicit group each. - /// assert_eq!(5, nfa.group_info().all_group_len()); - /// - /// let nfa = NFA::compiler() - /// .configure(NFA::config().which_captures(WhichCaptures::None)) - /// .build(r"abc")?; - /// // We disabled capturing groups, so there are none. - /// assert_eq!(0, nfa.group_info().all_group_len()); - /// - /// let nfa = NFA::compiler() - /// .configure(NFA::config().which_captures(WhichCaptures::None)) - /// .build(r"(a)(b)(c)")?; - /// // We disabled capturing groups, so there are none, even if there are - /// // explicit groups in the concrete syntax. - /// assert_eq!(0, nfa.group_info().group_len(PatternID::ZERO)); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn all_group_len(&self) -> usize { - self.slot_len() / 2 - } - - /// Returns the total number of slots in this `GroupInfo` across all - /// patterns. - /// - /// The total number of slots is always twice the total number of capturing - /// groups, including both implicit and explicit groups. - /// - /// # Example - /// - /// This example shows the relationship between the number of capturing - /// groups and slots. - /// - /// ``` - /// use regex_automata::util::captures::GroupInfo; - /// - /// // There are 11 total groups here. - /// let info = GroupInfo::new(vec![ - /// vec![None, Some("foo")], - /// vec![None], - /// vec![None, None, None, Some("bar"), None], - /// vec![None, None, Some("foo")], - /// ])?; - /// // 2 slots per group gives us 11*2=22 slots. - /// assert_eq!(22, info.slot_len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn slot_len(&self) -> usize { - self.0.small_slot_len().as_usize() - } - - /// Returns the total number of slots for implicit capturing groups. - /// - /// This is like [`GroupInfo::slot_len`], except it doesn't include the - /// explicit slots for each pattern. Since there are always exactly 2 - /// implicit slots for each pattern, the number of implicit slots is always - /// equal to twice the number of patterns. - /// - /// # Example - /// - /// This example shows the relationship between the number of capturing - /// groups, implicit slots and explicit slots. - /// - /// ``` - /// use regex_automata::util::captures::GroupInfo; - /// - /// // There are 11 total groups here. - /// let info = GroupInfo::new(vec![vec![None, Some("foo"), Some("bar")]])?; - /// // 2 slots per group gives us 11*2=22 slots. - /// assert_eq!(6, info.slot_len()); - /// // 2 implicit slots per pattern gives us 2 implicit slots since there - /// // is 1 pattern. - /// assert_eq!(2, info.implicit_slot_len()); - /// // 2 explicit capturing groups gives us 2*2=4 explicit slots. - /// assert_eq!(4, info.explicit_slot_len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn implicit_slot_len(&self) -> usize { - self.pattern_len() * 2 - } - - /// Returns the total number of slots for explicit capturing groups. - /// - /// This is like [`GroupInfo::slot_len`], except it doesn't include the - /// implicit slots for each pattern. (There are always 2 implicit slots for - /// each pattern.) - /// - /// For a non-empty `GroupInfo`, it is always the case that `slot_len` is - /// strictly greater than `explicit_slot_len`. For an empty `GroupInfo`, - /// both the total number of slots and the number of explicit slots is - /// `0`. - /// - /// # Example - /// - /// This example shows the relationship between the number of capturing - /// groups, implicit slots and explicit slots. - /// - /// ``` - /// use regex_automata::util::captures::GroupInfo; - /// - /// // There are 11 total groups here. - /// let info = GroupInfo::new(vec![vec![None, Some("foo"), Some("bar")]])?; - /// // 2 slots per group gives us 11*2=22 slots. - /// assert_eq!(6, info.slot_len()); - /// // 2 implicit slots per pattern gives us 2 implicit slots since there - /// // is 1 pattern. - /// assert_eq!(2, info.implicit_slot_len()); - /// // 2 explicit capturing groups gives us 2*2=4 explicit slots. - /// assert_eq!(4, info.explicit_slot_len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn explicit_slot_len(&self) -> usize { - self.slot_len().saturating_sub(self.implicit_slot_len()) - } - - /// Returns the memory usage, in bytes, of this `GroupInfo`. - /// - /// This does **not** include the stack size used up by this `GroupInfo`. - /// To compute that, use `std::mem::size_of::()`. - #[inline] - pub fn memory_usage(&self) -> usize { - use core::mem::size_of as s; - - s::() - + self.0.slot_ranges.len() * s::<(SmallIndex, SmallIndex)>() - + self.0.name_to_index.len() * s::() - + self.0.index_to_name.len() * s::>>>() - + self.0.memory_extra - } -} - -/// A map from capture group name to its corresponding capture group index. -/// -/// This type is actually wrapped inside a Vec indexed by pattern ID on a -/// `GroupInfo`, since multiple patterns may have the same capture group name. -/// That is, each pattern gets its own namespace of capture group names. -/// -/// Perhaps a more memory efficient representation would be -/// HashMap<(PatternID, Arc), usize>, but this makes it difficult to look -/// up a capture index by name without producing a `Arc`, which requires -/// an allocation. To fix this, I think we'd need to define our own unsized -/// type or something? Anyway, I didn't give this much thought since it -/// probably doesn't matter much in the grand scheme of things. But it did -/// stand out to me as mildly wasteful. -#[cfg(feature = "std")] -type CaptureNameMap = std::collections::HashMap, SmallIndex>; -#[cfg(not(feature = "std"))] -type CaptureNameMap = alloc::collections::BTreeMap, SmallIndex>; - -/// The inner guts of `GroupInfo`. This type only exists so that it can -/// be wrapped in an `Arc` to make `GroupInfo` reference counted. -#[derive(Debug, Default)] -struct GroupInfoInner { - slot_ranges: Vec<(SmallIndex, SmallIndex)>, - name_to_index: Vec, - index_to_name: Vec>>>, - memory_extra: usize, -} - -impl GroupInfoInner { - /// This adds the first unnamed group for the given pattern ID. The given - /// pattern ID must be zero if this is the first time this method is - /// called, or must be exactly one more than the pattern ID supplied to the - /// previous call to this method. (This method panics if this rule is - /// violated.) - /// - /// This can be thought of as initializing the GroupInfo state for the - /// given pattern and closing off the state for any previous pattern. - fn add_first_group(&mut self, pid: PatternID) { - assert_eq!(pid.as_usize(), self.slot_ranges.len()); - assert_eq!(pid.as_usize(), self.name_to_index.len()); - assert_eq!(pid.as_usize(), self.index_to_name.len()); - // This is the start of our slots for the explicit capturing groups. - // Note that since the slots for the 0th group for every pattern appear - // before any slots for the nth group (where n > 0) in any pattern, we - // will have to fix up the slot ranges once we know how many patterns - // we've added capture groups for. - let slot_start = self.small_slot_len(); - self.slot_ranges.push((slot_start, slot_start)); - self.name_to_index.push(CaptureNameMap::new()); - self.index_to_name.push(vec![None]); - self.memory_extra += core::mem::size_of::>>(); - } - - /// Add an explicit capturing group for the given pattern with the given - /// index. If the group has a name, then that must be given as well. - /// - /// Note that every capturing group except for the first or zeroth group is - /// explicit. - /// - /// This returns an error if adding this group would result in overflowing - /// slot indices or if a capturing group with the same name for this - /// pattern has already been added. - fn add_explicit_group>( - &mut self, - pid: PatternID, - group: SmallIndex, - maybe_name: Option, - ) -> Result<(), GroupInfoError> { - // We also need to check that the slot index generated for - // this group is also valid. Although, this is a little weird - // because we offset these indices below, at which point, we'll - // have to recheck them. Gosh this is annoying. Note that - // the '+2' below is OK because 'end' is guaranteed to be less - // than isize::MAX. - let end = &mut self.slot_ranges[pid].1; - *end = SmallIndex::new(end.as_usize() + 2).map_err(|_| { - GroupInfoError::too_many_groups(pid, group.as_usize()) - })?; - if let Some(name) = maybe_name { - let name = Arc::::from(name.as_ref()); - if self.name_to_index[pid].contains_key(&*name) { - return Err(GroupInfoError::duplicate(pid, &name)); - } - let len = name.len(); - self.name_to_index[pid].insert(Arc::clone(&name), group); - self.index_to_name[pid].push(Some(name)); - // Adds the memory used by the Arc in both maps. - self.memory_extra += - 2 * (len + core::mem::size_of::>>()); - // And also the value entry for the 'name_to_index' map. - // This is probably an underestimate for 'name_to_index' since - // hashmaps/btrees likely have some non-zero overhead, but we - // assume here that they have zero overhead. - self.memory_extra += core::mem::size_of::(); - } else { - self.index_to_name[pid].push(None); - self.memory_extra += core::mem::size_of::>>(); - } - // This is a sanity assert that checks that our group index - // is in line with the number of groups added so far for this - // pattern. - assert_eq!(group.one_more(), self.group_len(pid)); - // And is also in line with the 'index_to_name' map. - assert_eq!(group.one_more(), self.index_to_name[pid].len()); - Ok(()) - } - - /// This corrects the slot ranges to account for the slots corresponding - /// to the zeroth group of each pattern. That is, every slot range is - /// offset by 'pattern_len() * 2', since each pattern uses two slots to - /// represent the zeroth group. - fn fixup_slot_ranges(&mut self) -> Result<(), GroupInfoError> { - use crate::util::primitives::IteratorIndexExt; - // Since we know number of patterns fits in PatternID and - // PatternID::MAX < isize::MAX, it follows that multiplying by 2 will - // never overflow usize. - let offset = self.pattern_len().checked_mul(2).unwrap(); - for (pid, &mut (ref mut start, ref mut end)) in - self.slot_ranges.iter_mut().with_pattern_ids() - { - let group_len = 1 + ((end.as_usize() - start.as_usize()) / 2); - let new_end = match end.as_usize().checked_add(offset) { - Some(new_end) => new_end, - None => { - return Err(GroupInfoError::too_many_groups( - pid, group_len, - )) - } - }; - *end = SmallIndex::new(new_end).map_err(|_| { - GroupInfoError::too_many_groups(pid, group_len) - })?; - // Since start <= end, if end is valid then start must be too. - *start = SmallIndex::new(start.as_usize() + offset).unwrap(); - } - Ok(()) - } - - /// Return the total number of patterns represented by this capture slot - /// info. - fn pattern_len(&self) -> usize { - self.slot_ranges.len() - } - - /// Return the total number of capturing groups for the given pattern. If - /// the given pattern isn't valid for this capture slot info, then 0 is - /// returned. - fn group_len(&self, pid: PatternID) -> usize { - let (start, end) = match self.slot_ranges.get(pid.as_usize()) { - None => return 0, - Some(range) => range, - }; - // The difference between any two SmallIndex values always fits in a - // usize since we know that SmallIndex::MAX <= isize::MAX-1. We also - // know that start<=end by construction and that the number of groups - // never exceeds SmallIndex and thus never overflows usize. - 1 + ((end.as_usize() - start.as_usize()) / 2) - } - - /// Return the total number of slots in this capture slot info as a - /// "small index." - fn small_slot_len(&self) -> SmallIndex { - // Since slots are allocated in order of pattern (starting at 0) and - // then in order of capture group, it follows that the number of slots - // is the end of the range of slots for the last pattern. This is - // true even when the last pattern has no capturing groups, since - // 'slot_ranges' will still represent it explicitly with an empty - // range. - self.slot_ranges.last().map_or(SmallIndex::ZERO, |&(_, end)| end) - } -} - -/// An error that may occur when building a `GroupInfo`. -/// -/// Building a `GroupInfo` does a variety of checks to make sure the -/// capturing groups satisfy a number of invariants. This includes, but is not -/// limited to, ensuring that the first capturing group is unnamed and that -/// there are no duplicate capture groups for a specific pattern. -#[derive(Clone, Debug)] -pub struct GroupInfoError { - kind: GroupInfoErrorKind, -} - -/// The kind of error that occurs when building a `GroupInfo` fails. -/// -/// We keep this un-exported because it's not clear how useful it is to -/// export it. -#[derive(Clone, Debug)] -enum GroupInfoErrorKind { - /// This occurs when too many patterns have been added. i.e., It would - /// otherwise overflow a `PatternID`. - TooManyPatterns { err: PatternIDError }, - /// This occurs when too many capturing groups have been added for a - /// particular pattern. - TooManyGroups { - /// The ID of the pattern that had too many groups. - pattern: PatternID, - /// The minimum number of groups that the caller has tried to add for - /// a pattern. - minimum: usize, - }, - /// An error that occurs when a pattern has no capture groups. Either the - /// group info must be empty, or all patterns must have at least one group - /// (corresponding to the unnamed group for the entire pattern). - MissingGroups { - /// The ID of the pattern that had no capturing groups. - pattern: PatternID, - }, - /// An error that occurs when one tries to provide a name for the capture - /// group at index 0. This capturing group must currently always be - /// unnamed. - FirstMustBeUnnamed { - /// The ID of the pattern that was found to have a named first - /// capturing group. - pattern: PatternID, - }, - /// An error that occurs when duplicate capture group names for the same - /// pattern are added. - /// - /// NOTE: At time of writing, this error can never occur if you're using - /// regex-syntax, since the parser itself will reject patterns with - /// duplicate capture group names. This error can only occur when the - /// builder is used to hand construct NFAs. - Duplicate { - /// The pattern in which the duplicate capture group name was found. - pattern: PatternID, - /// The duplicate name. - name: String, - }, -} - -impl GroupInfoError { - fn too_many_patterns(err: PatternIDError) -> GroupInfoError { - GroupInfoError { kind: GroupInfoErrorKind::TooManyPatterns { err } } - } - - fn too_many_groups(pattern: PatternID, minimum: usize) -> GroupInfoError { - GroupInfoError { - kind: GroupInfoErrorKind::TooManyGroups { pattern, minimum }, - } - } - - fn missing_groups(pattern: PatternID) -> GroupInfoError { - GroupInfoError { kind: GroupInfoErrorKind::MissingGroups { pattern } } - } - - fn first_must_be_unnamed(pattern: PatternID) -> GroupInfoError { - GroupInfoError { - kind: GroupInfoErrorKind::FirstMustBeUnnamed { pattern }, - } - } - - fn duplicate(pattern: PatternID, name: &str) -> GroupInfoError { - GroupInfoError { - kind: GroupInfoErrorKind::Duplicate { - pattern, - name: String::from(name), - }, - } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for GroupInfoError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self.kind { - GroupInfoErrorKind::TooManyPatterns { .. } - | GroupInfoErrorKind::TooManyGroups { .. } - | GroupInfoErrorKind::MissingGroups { .. } - | GroupInfoErrorKind::FirstMustBeUnnamed { .. } - | GroupInfoErrorKind::Duplicate { .. } => None, - } - } -} - -impl core::fmt::Display for GroupInfoError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - use self::GroupInfoErrorKind::*; - - match self.kind { - TooManyPatterns { ref err } => { - write!(f, "too many patterns to build capture info: {err}") - } - TooManyGroups { pattern, minimum } => { - write!( - f, - "too many capture groups (at least {}) were \ - found for pattern {}", - minimum, - pattern.as_usize() - ) - } - MissingGroups { pattern } => write!( - f, - "no capturing groups found for pattern {} \ - (either all patterns have zero groups or all patterns have \ - at least one group)", - pattern.as_usize(), - ), - FirstMustBeUnnamed { pattern } => write!( - f, - "first capture group (at index 0) for pattern {} has a name \ - (it must be unnamed)", - pattern.as_usize(), - ), - Duplicate { pattern, ref name } => write!( - f, - "duplicate capture group name '{}' found for pattern {}", - name, - pattern.as_usize(), - ), - } - } -} - -/// An iterator over capturing groups and their names for a specific pattern. -/// -/// This iterator is created by [`GroupInfo::pattern_names`]. -/// -/// The lifetime parameter `'a` refers to the lifetime of the `GroupInfo` -/// from which this iterator was created. -#[derive(Clone, Debug)] -pub struct GroupInfoPatternNames<'a> { - it: core::slice::Iter<'a, Option>>, -} - -impl GroupInfoPatternNames<'static> { - fn empty() -> GroupInfoPatternNames<'static> { - GroupInfoPatternNames { it: [].iter() } - } -} - -impl<'a> Iterator for GroupInfoPatternNames<'a> { - type Item = Option<&'a str>; - - fn next(&mut self) -> Option> { - self.it.next().map(|x| x.as_deref()) - } - - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } - - fn count(self) -> usize { - self.it.count() - } -} - -impl<'a> ExactSizeIterator for GroupInfoPatternNames<'a> {} -impl<'a> core::iter::FusedIterator for GroupInfoPatternNames<'a> {} - -/// An iterator over capturing groups and their names for a `GroupInfo`. -/// -/// This iterator is created by [`GroupInfo::all_names`]. -/// -/// The lifetime parameter `'a` refers to the lifetime of the `GroupInfo` -/// from which this iterator was created. -#[derive(Debug)] -pub struct GroupInfoAllNames<'a> { - group_info: &'a GroupInfo, - pids: PatternIDIter, - current_pid: Option, - names: Option>>, -} - -impl<'a> Iterator for GroupInfoAllNames<'a> { - type Item = (PatternID, usize, Option<&'a str>); - - fn next(&mut self) -> Option<(PatternID, usize, Option<&'a str>)> { - // If the group info has no captures, then we never have anything - // to yield. We need to consider this case explicitly (at time of - // writing) because 'pattern_capture_names' will panic if captures - // aren't enabled. - if self.group_info.0.index_to_name.is_empty() { - return None; - } - if self.current_pid.is_none() { - self.current_pid = Some(self.pids.next()?); - } - let pid = self.current_pid.unwrap(); - if self.names.is_none() { - self.names = Some(self.group_info.pattern_names(pid).enumerate()); - } - let (group_index, name) = match self.names.as_mut().unwrap().next() { - Some((group_index, name)) => (group_index, name), - None => { - self.current_pid = None; - self.names = None; - return self.next(); - } - }; - Some((pid, group_index, name)) - } -} diff --git a/vendor/regex-automata/src/util/determinize/mod.rs b/vendor/regex-automata/src/util/determinize/mod.rs deleted file mode 100644 index 22e38c94ca4b0a..00000000000000 --- a/vendor/regex-automata/src/util/determinize/mod.rs +++ /dev/null @@ -1,682 +0,0 @@ -/*! -This module contains types and routines for implementing determinization. - -In this crate, there are at least two places where we implement -determinization: fully ahead-of-time compiled DFAs in the `dfa` module and -lazily compiled DFAs in the `hybrid` module. The stuff in this module -corresponds to the things that are in common between these implementations. - -There are three broad things that our implementations of determinization have -in common, as defined by this module: - -* The classification of start states. That is, whether we're dealing with -word boundaries, line boundaries, etc., is all the same. This also includes -the look-behind assertions that are satisfied by each starting state -classification. -* The representation of DFA states as sets of NFA states, including -convenience types for building these DFA states that are amenable to reusing -allocations. -* Routines for the "classical" parts of determinization: computing the -epsilon closure, tracking match states (with corresponding pattern IDs, since -we support multi-pattern finite automata) and, of course, computing the -transition function between states for units of input. - -I did consider a couple of alternatives to this particular form of code reuse: - -1. Don't do any code reuse. The problem here is that we *really* want both -forms of determinization to do exactly identical things when it comes to -their handling of NFA states. While our tests generally ensure this, the code -is tricky and large enough where not reusing code is a pretty big bummer. - -2. Implement all of determinization once and make it generic over fully -compiled DFAs and lazily compiled DFAs. While I didn't actually try this -approach, my instinct is that it would be more complex than is needed here. -And the interface required would be pretty hairy. Instead, I think splitting -it into logical sub-components works better. -*/ - -use alloc::vec::Vec; - -pub(crate) use self::state::{ - State, StateBuilderEmpty, StateBuilderMatches, StateBuilderNFA, -}; - -use crate::{ - nfa::thompson, - util::{ - alphabet, - look::{Look, LookSet}, - primitives::StateID, - search::MatchKind, - sparse_set::{SparseSet, SparseSets}, - start::Start, - utf8, - }, -}; - -mod state; - -/// Compute the set of all reachable NFA states, including the full epsilon -/// closure, from a DFA state for a single unit of input. The set of reachable -/// states is returned as a `StateBuilderNFA`. The `StateBuilderNFA` returned -/// also includes any look-behind assertions satisfied by `unit`, in addition -/// to whether it is a match state. For multi-pattern DFAs, the builder will -/// also include the pattern IDs that match (in the order seen). -/// -/// `nfa` must be able to resolve any NFA state in `state` and any NFA state -/// reachable via the epsilon closure of any NFA state in `state`. `sparses` -/// must have capacity equivalent to `nfa.len()`. -/// -/// `match_kind` should correspond to the match semantics implemented by the -/// DFA being built. Generally speaking, for leftmost-first match semantics, -/// states that appear after the first NFA match state will not be included in -/// the `StateBuilderNFA` returned since they are impossible to visit. -/// -/// `sparses` is used as scratch space for NFA traversal. Other than their -/// capacity requirements (detailed above), there are no requirements on what's -/// contained within them (if anything). Similarly, what's inside of them once -/// this routine returns is unspecified. -/// -/// `stack` must have length 0. It is used as scratch space for depth first -/// traversal. After returning, it is guaranteed that `stack` will have length -/// 0. -/// -/// `state` corresponds to the current DFA state on which one wants to compute -/// the transition for the input `unit`. -/// -/// `empty_builder` corresponds to the builder allocation to use to produce a -/// complete `StateBuilderNFA` state. If the state is not needed (or is already -/// cached), then it can be cleared and reused without needing to create a new -/// `State`. The `StateBuilderNFA` state returned is final and ready to be -/// turned into a `State` if necessary. -pub(crate) fn next( - nfa: &thompson::NFA, - match_kind: MatchKind, - sparses: &mut SparseSets, - stack: &mut Vec, - state: &State, - unit: alphabet::Unit, - empty_builder: StateBuilderEmpty, -) -> StateBuilderNFA { - sparses.clear(); - - // Whether the NFA is matched in reverse or not. We use this in some - // conditional logic for dealing with the exceptionally annoying CRLF-aware - // line anchors. - let rev = nfa.is_reverse(); - // The look-around matcher that our NFA is configured with. We don't - // actually use it to match look-around assertions, but we do need its - // configuration for constructing states consistent with how it matches. - let lookm = nfa.look_matcher(); - - // Put the NFA state IDs into a sparse set in case we need to - // re-compute their epsilon closure. - // - // Doing this state shuffling is technically not necessary unless some - // kind of look-around is used in the DFA. Some ad hoc experiments - // suggested that avoiding this didn't lead to much of an improvement, - // but perhaps more rigorous experimentation should be done. And in - // particular, avoiding this check requires some light refactoring of - // the code below. - state.iter_nfa_state_ids(|nfa_id| { - sparses.set1.insert(nfa_id); - }); - - // Compute look-ahead assertions originating from the current state. Based - // on the input unit we're transitioning over, some additional set of - // assertions may be true. Thus, we re-compute this state's epsilon closure - // (but only if necessary). Notably, when we build a DFA state initially, - // we don't enable any look-ahead assertions because we don't know whether - // they're true or not at that point. - if !state.look_need().is_empty() { - // Add look-ahead assertions that are now true based on the current - // input unit. - let mut look_have = state.look_have(); - match unit.as_u8() { - Some(b'\r') => { - if !rev || !state.is_half_crlf() { - look_have = look_have.insert(Look::EndCRLF); - } - } - Some(b'\n') => { - if rev || !state.is_half_crlf() { - look_have = look_have.insert(Look::EndCRLF); - } - } - Some(_) => {} - None => { - look_have = look_have - .insert(Look::End) - .insert(Look::EndLF) - .insert(Look::EndCRLF); - } - } - if unit.is_byte(lookm.get_line_terminator()) { - look_have = look_have.insert(Look::EndLF); - } - if state.is_half_crlf() - && ((rev && !unit.is_byte(b'\r')) - || (!rev && !unit.is_byte(b'\n'))) - { - look_have = look_have.insert(Look::StartCRLF); - } - if state.is_from_word() == unit.is_word_byte() { - look_have = look_have - .insert(Look::WordAsciiNegate) - .insert(Look::WordUnicodeNegate); - } else { - look_have = - look_have.insert(Look::WordAscii).insert(Look::WordUnicode); - } - if !unit.is_word_byte() { - look_have = look_have - .insert(Look::WordEndHalfAscii) - .insert(Look::WordEndHalfUnicode); - } - if state.is_from_word() && !unit.is_word_byte() { - look_have = look_have - .insert(Look::WordEndAscii) - .insert(Look::WordEndUnicode); - } else if !state.is_from_word() && unit.is_word_byte() { - look_have = look_have - .insert(Look::WordStartAscii) - .insert(Look::WordStartUnicode); - } - // If we have new assertions satisfied that are among the set of - // assertions that exist in this state (that is, just because we added - // an EndLF assertion above doesn't mean there is an EndLF conditional - // epsilon transition in this state), then we re-compute this state's - // epsilon closure using the updated set of assertions. - // - // Note that since our DFA states omit unconditional epsilon - // transitions, this check is necessary for correctness. If we re-did - // the epsilon closure below needlessly, it could change based on the - // fact that we omitted epsilon states originally. - if !look_have - .subtract(state.look_have()) - .intersect(state.look_need()) - .is_empty() - { - for nfa_id in sparses.set1.iter() { - epsilon_closure( - nfa, - nfa_id, - look_have, - stack, - &mut sparses.set2, - ); - } - sparses.swap(); - sparses.set2.clear(); - } - } - - // Convert our empty builder into one that can record assertions and match - // pattern IDs. - let mut builder = empty_builder.into_matches(); - // Set whether the StartLF look-behind assertion is true for this - // transition or not. The look-behind assertion for ASCII word boundaries - // is handled below. - if nfa.look_set_any().contains_anchor_line() - && unit.is_byte(lookm.get_line_terminator()) - { - // Why only handle StartLF here and not Start? That's because Start - // can only impact the starting state, which is special cased in - // start state handling. - builder.set_look_have(|have| have.insert(Look::StartLF)); - } - // We also need to add StartCRLF to our assertions too, if we can. This - // is unfortunately a bit more complicated, because it depends on the - // direction of the search. In the forward direction, ^ matches after a - // \n, but in the reverse direction, ^ only matches after a \r. (This is - // further complicated by the fact that reverse a regex means changing a ^ - // to a $ and vice versa.) - if nfa.look_set_any().contains_anchor_crlf() - && ((rev && unit.is_byte(b'\r')) || (!rev && unit.is_byte(b'\n'))) - { - builder.set_look_have(|have| have.insert(Look::StartCRLF)); - } - // And also for the start-half word boundary assertions. As long as the - // look-behind byte is not a word char, then the assertions are satisfied. - if nfa.look_set_any().contains_word() && !unit.is_word_byte() { - builder.set_look_have(|have| { - have.insert(Look::WordStartHalfAscii) - .insert(Look::WordStartHalfUnicode) - }); - } - for nfa_id in sparses.set1.iter() { - match *nfa.state(nfa_id) { - thompson::State::Union { .. } - | thompson::State::BinaryUnion { .. } - | thompson::State::Fail - | thompson::State::Look { .. } - | thompson::State::Capture { .. } => {} - thompson::State::Match { pattern_id } => { - // Notice here that we are calling the NEW state a match - // state if the OLD state we are transitioning from - // contains an NFA match state. This is precisely how we - // delay all matches by one byte and also what therefore - // guarantees that starting states cannot be match states. - // - // If we didn't delay matches by one byte, then whether - // a DFA is a matching state or not would be determined - // by whether one of its own constituent NFA states - // was a match state. (And that would be done in - // 'add_nfa_states'.) - // - // Also, 'add_match_pattern_id' requires that callers never - // pass duplicative pattern IDs. We do in fact uphold that - // guarantee here, but it's subtle. In particular, a Thompson - // NFA guarantees that each pattern has exactly one match - // state. Moreover, since we're iterating over the NFA state - // IDs in a set, we are guaranteed not to have any duplicative - // match states. Thus, it is impossible to add the same pattern - // ID more than once. - // - // N.B. We delay matches by 1 byte as a way to hack 1-byte - // look-around into DFA searches. This lets us support ^, $ - // and ASCII-only \b. The delay is also why we need a special - // "end-of-input" (EOI) sentinel and why we need to follow the - // EOI sentinel at the end of every search. This final EOI - // transition is necessary to report matches found at the end - // of a haystack. - builder.add_match_pattern_id(pattern_id); - if !match_kind.continue_past_first_match() { - break; - } - } - thompson::State::ByteRange { ref trans } => { - if trans.matches_unit(unit) { - epsilon_closure( - nfa, - trans.next, - builder.look_have(), - stack, - &mut sparses.set2, - ); - } - } - thompson::State::Sparse(ref sparse) => { - if let Some(next) = sparse.matches_unit(unit) { - epsilon_closure( - nfa, - next, - builder.look_have(), - stack, - &mut sparses.set2, - ); - } - } - thompson::State::Dense(ref dense) => { - if let Some(next) = dense.matches_unit(unit) { - epsilon_closure( - nfa, - next, - builder.look_have(), - stack, - &mut sparses.set2, - ); - } - } - } - } - // We only set the word byte if there's a word boundary look-around - // anywhere in this regex. Otherwise, there's no point in bloating the - // number of states if we don't have one. - // - // We also only set it when the state has a non-zero number of NFA states. - // Otherwise, we could wind up with states that *should* be DEAD states - // but are otherwise distinct from DEAD states because of this look-behind - // assertion being set. While this can't technically impact correctness *in - // theory*, it can create pathological DFAs that consume input until EOI or - // a quit byte is seen. Consuming until EOI isn't a correctness problem, - // but a (serious) perf problem. Hitting a quit byte, however, could be a - // correctness problem since it could cause search routines to report an - // error instead of a detected match once the quit state is entered. (The - // search routine could be made to be a bit smarter by reporting a match - // if one was detected once it enters a quit state (and indeed, the search - // routines in this crate do just that), but it seems better to prevent - // these things by construction if possible.) - if !sparses.set2.is_empty() { - if nfa.look_set_any().contains_word() && unit.is_word_byte() { - builder.set_is_from_word(); - } - if nfa.look_set_any().contains_anchor_crlf() - && ((rev && unit.is_byte(b'\n')) || (!rev && unit.is_byte(b'\r'))) - { - builder.set_is_half_crlf(); - } - } - let mut builder_nfa = builder.into_nfa(); - add_nfa_states(nfa, &sparses.set2, &mut builder_nfa); - builder_nfa -} - -/// Compute the epsilon closure for the given NFA state. The epsilon closure -/// consists of all NFA state IDs, including `start_nfa_id`, that can be -/// reached from `start_nfa_id` without consuming any input. These state IDs -/// are written to `set` in the order they are visited, but only if they are -/// not already in `set`. `start_nfa_id` must be a valid state ID for the NFA -/// given. -/// -/// `look_have` consists of the satisfied assertions at the current -/// position. For conditional look-around epsilon transitions, these are -/// only followed if they are satisfied by `look_have`. -/// -/// `stack` must have length 0. It is used as scratch space for depth first -/// traversal. After returning, it is guaranteed that `stack` will have length -/// 0. -pub(crate) fn epsilon_closure( - nfa: &thompson::NFA, - start_nfa_id: StateID, - look_have: LookSet, - stack: &mut Vec, - set: &mut SparseSet, -) { - assert!(stack.is_empty()); - // If this isn't an epsilon state, then the epsilon closure is always just - // itself, so there's no need to spin up the machinery below to handle it. - if !nfa.state(start_nfa_id).is_epsilon() { - set.insert(start_nfa_id); - return; - } - - stack.push(start_nfa_id); - while let Some(mut id) = stack.pop() { - // In many cases, we can avoid stack operations when an NFA state only - // adds one new state to visit. In that case, we just set our ID to - // that state and mush on. We only use the stack when an NFA state - // introduces multiple new states to visit. - loop { - // Insert this NFA state, and if it's already in the set and thus - // already visited, then we can move on to the next one. - if !set.insert(id) { - break; - } - match *nfa.state(id) { - thompson::State::ByteRange { .. } - | thompson::State::Sparse { .. } - | thompson::State::Dense { .. } - | thompson::State::Fail - | thompson::State::Match { .. } => break, - thompson::State::Look { look, next } => { - if !look_have.contains(look) { - break; - } - id = next; - } - thompson::State::Union { ref alternates } => { - id = match alternates.get(0) { - None => break, - Some(&id) => id, - }; - // We need to process our alternates in order to preserve - // match preferences, so put the earliest alternates closer - // to the top of the stack. - stack.extend(alternates[1..].iter().rev()); - } - thompson::State::BinaryUnion { alt1, alt2 } => { - id = alt1; - stack.push(alt2); - } - thompson::State::Capture { next, .. } => { - id = next; - } - } - } - } -} - -/// Add the NFA state IDs in the given `set` to the given DFA builder state. -/// The order in which states are added corresponds to the order in which they -/// were added to `set`. -/// -/// The DFA builder state given should already have its complete set of match -/// pattern IDs added (if any) and any look-behind assertions (StartLF, Start -/// and whether this state is being generated for a transition over a word byte -/// when applicable) that are true immediately prior to transitioning into this -/// state (via `builder.look_have()`). The match pattern IDs should correspond -/// to matches that occurred on the previous transition, since all matches are -/// delayed by one byte. The things that should _not_ be set are look-ahead -/// assertions (EndLF, End and whether the next byte is a word byte or not). -/// The builder state should also not have anything in `look_need` set, as this -/// routine will compute that for you. -/// -/// The given NFA should be able to resolve all identifiers in `set` to a -/// particular NFA state. Additionally, `set` must have capacity equivalent -/// to `nfa.len()`. -pub(crate) fn add_nfa_states( - nfa: &thompson::NFA, - set: &SparseSet, - builder: &mut StateBuilderNFA, -) { - for nfa_id in set.iter() { - match *nfa.state(nfa_id) { - thompson::State::ByteRange { .. } => { - builder.add_nfa_state_id(nfa_id); - } - thompson::State::Sparse { .. } => { - builder.add_nfa_state_id(nfa_id); - } - thompson::State::Dense { .. } => { - builder.add_nfa_state_id(nfa_id); - } - thompson::State::Look { look, .. } => { - builder.add_nfa_state_id(nfa_id); - builder.set_look_need(|need| need.insert(look)); - } - thompson::State::Union { .. } - | thompson::State::BinaryUnion { .. } => { - // Pure epsilon transitions don't need to be tracked as part - // of the DFA state. Tracking them is actually superfluous; - // they won't cause any harm other than making determinization - // slower. - // - // Why aren't these needed? Well, in an NFA, epsilon - // transitions are really just jumping points to other states. - // So once you hit an epsilon transition, the same set of - // resulting states always appears. Therefore, putting them in - // a DFA's set of ordered NFA states is strictly redundant. - // - // Look-around states are also epsilon transitions, but - // they are *conditional*. So their presence could be - // discriminatory, and thus, they are tracked above. - // - // But wait... why are epsilon states in our `set` in the first - // place? Why not just leave them out? They're in our `set` - // because it was generated by computing an epsilon closure, - // and we want to keep track of all states we visited to avoid - // re-visiting them. In exchange, we have to do this second - // iteration over our collected states to finalize our DFA - // state. In theory, we could avoid this second iteration if - // we maintained two sets during epsilon closure: the set of - // visited states (to avoid cycles) and the set of states that - // will actually be used to construct the next DFA state. - // - // Note that this optimization requires that we re-compute the - // epsilon closure to account for look-ahead in 'next' *only - // when necessary*. Namely, only when the set of look-around - // assertions changes and only when those changes are within - // the set of assertions that are needed in order to step - // through the closure correctly. Otherwise, if we re-do the - // epsilon closure needlessly, it could change based on the - // fact that we are omitting epsilon states here. - // - // ----- - // - // Welp, scratch the above. It turns out that recording these - // is in fact necessary to seemingly handle one particularly - // annoying case: when a conditional epsilon transition is - // put inside of a repetition operator. One specific case I - // ran into was the regex `(?:\b|%)+` on the haystack `z%`. - // The correct leftmost first matches are: [0, 0] and [1, 1]. - // But the DFA was reporting [0, 0] and [1, 2]. To understand - // why this happens, consider the NFA for the aforementioned - // regex: - // - // >000000: binary-union(4, 1) - // 000001: \x00-\xFF => 0 - // 000002: WordAscii => 5 - // 000003: % => 5 - // ^000004: binary-union(2, 3) - // 000005: binary-union(4, 6) - // 000006: MATCH(0) - // - // The problem here is that one of the DFA start states is - // going to consist of the NFA states [2, 3] by computing the - // epsilon closure of state 4. State 4 isn't included because - // we previously were not keeping track of union states. But - // only a subset of transitions out of this state will be able - // to follow WordAscii, and in those cases, the epsilon closure - // is redone. The only problem is that computing the epsilon - // closure from [2, 3] is different than computing the epsilon - // closure from [4]. In the former case, assuming the WordAscii - // assertion is satisfied, you get: [2, 3, 6]. In the latter - // case, you get: [2, 6, 3]. Notice that '6' is the match state - // and appears AFTER '3' in the former case. This leads to a - // preferential but incorrect match of '%' before returning - // a match. In the latter case, the match is preferred over - // continuing to accept the '%'. - // - // It almost feels like we might be able to fix the NFA states - // to avoid this, or to at least only keep track of union - // states where this actually matters, since in the vast - // majority of cases, this doesn't matter. - // - // Another alternative would be to define a new HIR property - // called "assertion is repeated anywhere" and compute it - // inductively over the entire pattern. If it happens anywhere, - // which is probably pretty rare, then we record union states. - // Otherwise we don't. - builder.add_nfa_state_id(nfa_id); - } - // Capture states we definitely do not need to record, since they - // are unconditional epsilon transitions with no branching. - thompson::State::Capture { .. } => {} - // It's not totally clear whether we need to record fail states or - // not, but we do so out of an abundance of caution. Since they are - // quite rare in practice, there isn't much cost to recording them. - thompson::State::Fail => { - builder.add_nfa_state_id(nfa_id); - } - thompson::State::Match { .. } => { - // Normally, the NFA match state doesn't actually need to - // be inside the DFA state. But since we delay matches by - // one byte, the matching DFA state corresponds to states - // that transition from the one we're building here. And - // the way we detect those cases is by looking for an NFA - // match state. See 'next' for how this is handled. - builder.add_nfa_state_id(nfa_id); - } - } - } - // If we know this state contains no look-around assertions, then - // there's no reason to track which look-around assertions were - // satisfied when this state was created. - if builder.look_need().is_empty() { - builder.set_look_have(|_| LookSet::empty()); - } -} - -/// Sets the appropriate look-behind assertions on the given state based on -/// this starting configuration. -pub(crate) fn set_lookbehind_from_start( - nfa: &thompson::NFA, - start: &Start, - builder: &mut StateBuilderMatches, -) { - let rev = nfa.is_reverse(); - let lineterm = nfa.look_matcher().get_line_terminator(); - let lookset = nfa.look_set_any(); - match *start { - Start::NonWordByte => { - if lookset.contains_word() { - builder.set_look_have(|have| { - have.insert(Look::WordStartHalfAscii) - .insert(Look::WordStartHalfUnicode) - }); - } - } - Start::WordByte => { - if lookset.contains_word() { - builder.set_is_from_word(); - } - } - Start::Text => { - if lookset.contains_anchor_haystack() { - builder.set_look_have(|have| have.insert(Look::Start)); - } - if lookset.contains_anchor_line() { - builder.set_look_have(|have| { - have.insert(Look::StartLF).insert(Look::StartCRLF) - }); - } - if lookset.contains_word() { - builder.set_look_have(|have| { - have.insert(Look::WordStartHalfAscii) - .insert(Look::WordStartHalfUnicode) - }); - } - } - Start::LineLF => { - if rev { - if lookset.contains_anchor_crlf() { - builder.set_is_half_crlf(); - } - if lookset.contains_anchor_line() { - builder.set_look_have(|have| have.insert(Look::StartLF)); - } - } else { - if lookset.contains_anchor_line() { - builder.set_look_have(|have| have.insert(Look::StartCRLF)); - } - } - if lookset.contains_anchor_line() && lineterm == b'\n' { - builder.set_look_have(|have| have.insert(Look::StartLF)); - } - if lookset.contains_word() { - builder.set_look_have(|have| { - have.insert(Look::WordStartHalfAscii) - .insert(Look::WordStartHalfUnicode) - }); - } - } - Start::LineCR => { - if lookset.contains_anchor_crlf() { - if rev { - builder.set_look_have(|have| have.insert(Look::StartCRLF)); - } else { - builder.set_is_half_crlf(); - } - } - if lookset.contains_anchor_line() && lineterm == b'\r' { - builder.set_look_have(|have| have.insert(Look::StartLF)); - } - if lookset.contains_word() { - builder.set_look_have(|have| { - have.insert(Look::WordStartHalfAscii) - .insert(Look::WordStartHalfUnicode) - }); - } - } - Start::CustomLineTerminator => { - if lookset.contains_anchor_line() { - builder.set_look_have(|have| have.insert(Look::StartLF)); - } - // This is a bit of a tricky case, but if the line terminator was - // set to a word byte, then we also need to behave as if the start - // configuration is Start::WordByte. That is, we need to mark our - // state as having come from a word byte. - if lookset.contains_word() { - if utf8::is_word_byte(lineterm) { - builder.set_is_from_word(); - } else { - builder.set_look_have(|have| { - have.insert(Look::WordStartHalfAscii) - .insert(Look::WordStartHalfUnicode) - }); - } - } - } - } -} diff --git a/vendor/regex-automata/src/util/determinize/state.rs b/vendor/regex-automata/src/util/determinize/state.rs deleted file mode 100644 index f410f9acb2f5b2..00000000000000 --- a/vendor/regex-automata/src/util/determinize/state.rs +++ /dev/null @@ -1,907 +0,0 @@ -/*! -This module defines a DFA state representation and builders for constructing -DFA states. - -This representation is specifically for use in implementations of NFA-to-DFA -conversion via powerset construction. (Also called "determinization" in this -crate.) - -The term "DFA state" is somewhat overloaded in this crate. In some cases, it -refers to the set of transitions over an alphabet for a particular state. In -other cases, it refers to a set of NFA states. The former is really about the -final representation of a state in a DFA's transition table, where as the -latter---what this module is focused on---is closer to an intermediate form -that is used to help eventually build the transition table. - -This module exports four types. All four types represent the same idea: an -ordered set of NFA states. This ordered set represents the epsilon closure of a -particular NFA state, where the "epsilon closure" is the set of NFA states that -can be transitioned to without consuming any input. i.e., Follow all of the NFA -state's epsilon transitions. In addition, this implementation of DFA states -cares about two other things: the ordered set of pattern IDs corresponding -to the patterns that match if the state is a match state, and the set of -look-behind assertions that were true when the state was created. - -The first, `State`, is a frozen representation of a state that cannot be -modified. It may be cheaply cloned without copying the state itself and can be -accessed safely from multiple threads simultaneously. This type is useful for -when one knows that the DFA state being constructed is distinct from any other -previously constructed states. Namely, powerset construction, in practice, -requires one to keep a cache of previously created DFA states. Otherwise, -the number of DFA states created in memory balloons to an impractically -large number. For this reason, equivalent states should endeavor to have an -equivalent byte-level representation. (In general, "equivalency" here means, -"equivalent assertions, pattern IDs and NFA state IDs." We do not require that -full DFA minimization be implemented here. This form of equivalency is only -surface deep and is more-or-less a practical necessity.) - -The other three types represent different phases in the construction of a -DFA state. Internally, these three types (and `State`) all use the same -byte-oriented representation. That means one can use any of the builder types -to check whether the state it represents already exists or not. If it does, -then there is no need to freeze it into a `State` (which requires an alloc and -a copy). Here are the three types described succinctly: - -* `StateBuilderEmpty` represents a state with no pattern IDs, no assertions -and no NFA states. Creating a `StateBuilderEmpty` performs no allocs. A -`StateBuilderEmpty` can only be used to query its underlying memory capacity, -or to convert into a builder for recording pattern IDs and/or assertions. - -* `StateBuilderMatches` represents a state with zero or more pattern IDs, zero -or more satisfied assertions and zero NFA state IDs. A `StateBuilderMatches` -can only be used for adding pattern IDs and recording assertions. - -* `StateBuilderNFA` represents a state with zero or more pattern IDs, zero or -more satisfied assertions and zero or more NFA state IDs. A `StateBuilderNFA` -can only be used for adding NFA state IDs and recording some assertions. - -The expected flow here is to use the above builders to construct a candidate -DFA state to check if it already exists. If it does, then there's no need to -freeze it into a `State`. If it doesn't exist, then `StateBuilderNFA::to_state` -can be called to freeze the builder into an immutable `State`. In either -case, `clear` should be called on the builder to turn it back into a -`StateBuilderEmpty` that reuses the underlying memory. - -The main purpose for splitting the builder into these distinct types is to -make it impossible to do things like adding a pattern ID after adding an NFA -state ID. Namely, this makes it simpler to use a space-and-time efficient -binary representation for the state. (The format is documented on the `Repr` -type below.) If we just used one type for everything, it would be possible for -callers to use an incorrect interleaving of calls and thus result in a corrupt -representation. I chose to use more type machinery to make this impossible to -do because 1) determinization is itself pretty complex and it wouldn't be too -hard to foul this up and 2) there isn't too much machinery involved and it's -well contained. - -As an optimization, sometimes states won't have certain things set. For -example, if the underlying NFA has no word boundary assertions, then there is -no reason to set a state's look-behind assertion as to whether it was generated -from a word byte or not. Similarly, if a state has no NFA states corresponding -to look-around assertions, then there is no reason to set `look_have` to a -non-empty set. Finally, callers usually omit unconditional epsilon transitions -when adding NFA state IDs since they aren't discriminatory. - -Finally, the binary representation used by these states is, thankfully, not -serialized anywhere. So any kind of change can be made with reckless abandon, -as long as everything in this module agrees. -*/ - -use core::mem; - -use alloc::{sync::Arc, vec::Vec}; - -use crate::util::{ - int::{I32, U32}, - look::LookSet, - primitives::{PatternID, StateID}, - wire::{self, Endian}, -}; - -/// A DFA state that, at its core, is represented by an ordered set of NFA -/// states. -/// -/// This type is intended to be used only in NFA-to-DFA conversion via powerset -/// construction. -/// -/// It may be cheaply cloned and accessed safely from multiple threads -/// simultaneously. -#[derive(Clone, Eq, Hash, PartialEq, PartialOrd, Ord)] -pub(crate) struct State(Arc<[u8]>); - -/// This Borrow impl permits us to lookup any state in a map by its byte -/// representation. This is particularly convenient when one has a StateBuilder -/// and we want to see if a correspondingly equivalent state already exists. If -/// one does exist, then we can reuse the allocation required by StateBuilder -/// without having to convert it into a State first. -impl core::borrow::Borrow<[u8]> for State { - fn borrow(&self) -> &[u8] { - &self.0 - } -} - -impl core::fmt::Debug for State { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - f.debug_tuple("State").field(&self.repr()).finish() - } -} - -/// For docs on these routines, see the internal Repr and ReprVec types below. -impl State { - pub(crate) fn dead() -> State { - StateBuilderEmpty::new().into_matches().into_nfa().to_state() - } - - pub(crate) fn is_match(&self) -> bool { - self.repr().is_match() - } - - pub(crate) fn is_from_word(&self) -> bool { - self.repr().is_from_word() - } - - pub(crate) fn is_half_crlf(&self) -> bool { - self.repr().is_half_crlf() - } - - pub(crate) fn look_have(&self) -> LookSet { - self.repr().look_have() - } - - pub(crate) fn look_need(&self) -> LookSet { - self.repr().look_need() - } - - pub(crate) fn match_len(&self) -> usize { - self.repr().match_len() - } - - pub(crate) fn match_pattern(&self, index: usize) -> PatternID { - self.repr().match_pattern(index) - } - - pub(crate) fn match_pattern_ids(&self) -> Option> { - self.repr().match_pattern_ids() - } - - #[cfg(all(test, not(miri)))] - pub(crate) fn iter_match_pattern_ids(&self, f: F) { - self.repr().iter_match_pattern_ids(f) - } - - pub(crate) fn iter_nfa_state_ids(&self, f: F) { - self.repr().iter_nfa_state_ids(f) - } - - pub(crate) fn memory_usage(&self) -> usize { - self.0.len() - } - - fn repr(&self) -> Repr<'_> { - Repr(&self.0) - } -} - -/// A state builder that represents an empty state. -/// -/// This is a useful "initial condition" for state construction. It has no -/// NFA state IDs, no assertions set and no pattern IDs. No allocations are -/// made when new() is called. Its main use is for being converted into a -/// builder that can capture assertions and pattern IDs. -#[derive(Clone, Debug)] -pub(crate) struct StateBuilderEmpty(Vec); - -/// For docs on these routines, see the internal Repr and ReprVec types below. -impl StateBuilderEmpty { - pub(crate) fn new() -> StateBuilderEmpty { - StateBuilderEmpty(alloc::vec![]) - } - - pub(crate) fn into_matches(mut self) -> StateBuilderMatches { - self.0.extend_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0]); - StateBuilderMatches(self.0) - } - - fn clear(&mut self) { - self.0.clear(); - } - - pub(crate) fn capacity(&self) -> usize { - self.0.capacity() - } -} - -/// A state builder that collects assertions and pattern IDs. -/// -/// When collecting pattern IDs is finished, this can be converted into a -/// builder that collects NFA state IDs. -#[derive(Clone)] -pub(crate) struct StateBuilderMatches(Vec); - -impl core::fmt::Debug for StateBuilderMatches { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - f.debug_tuple("StateBuilderMatches").field(&self.repr()).finish() - } -} - -/// For docs on these routines, see the internal Repr and ReprVec types below. -impl StateBuilderMatches { - pub(crate) fn into_nfa(mut self) -> StateBuilderNFA { - self.repr_vec().close_match_pattern_ids(); - StateBuilderNFA { repr: self.0, prev_nfa_state_id: StateID::ZERO } - } - - pub(crate) fn set_is_from_word(&mut self) { - self.repr_vec().set_is_from_word() - } - - pub(crate) fn set_is_half_crlf(&mut self) { - self.repr_vec().set_is_half_crlf() - } - - pub(crate) fn look_have(&self) -> LookSet { - LookSet::read_repr(&self.0[1..]) - } - - pub(crate) fn set_look_have( - &mut self, - set: impl FnMut(LookSet) -> LookSet, - ) { - self.repr_vec().set_look_have(set) - } - - pub(crate) fn add_match_pattern_id(&mut self, pid: PatternID) { - self.repr_vec().add_match_pattern_id(pid) - } - - fn repr(&self) -> Repr<'_> { - Repr(&self.0) - } - - fn repr_vec(&mut self) -> ReprVec<'_> { - ReprVec(&mut self.0) - } -} - -/// A state builder that collects some assertions and NFA state IDs. -/// -/// When collecting NFA state IDs is finished, this can be used to build a -/// `State` if necessary. -/// -/// When dont with building a state (regardless of whether it got kept or not), -/// it's usually a good idea to call `clear` to get an empty builder back so -/// that it can be reused to build the next state. -#[derive(Clone)] -pub(crate) struct StateBuilderNFA { - repr: Vec, - prev_nfa_state_id: StateID, -} - -impl core::fmt::Debug for StateBuilderNFA { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - f.debug_tuple("StateBuilderNFA").field(&self.repr()).finish() - } -} - -/// For docs on these routines, see the internal Repr and ReprVec types below. -impl StateBuilderNFA { - pub(crate) fn to_state(&self) -> State { - State(Arc::from(&*self.repr)) - } - - pub(crate) fn clear(self) -> StateBuilderEmpty { - let mut builder = StateBuilderEmpty(self.repr); - builder.clear(); - builder - } - - pub(crate) fn look_need(&self) -> LookSet { - self.repr().look_need() - } - - pub(crate) fn set_look_have( - &mut self, - set: impl FnMut(LookSet) -> LookSet, - ) { - self.repr_vec().set_look_have(set) - } - - pub(crate) fn set_look_need( - &mut self, - set: impl FnMut(LookSet) -> LookSet, - ) { - self.repr_vec().set_look_need(set) - } - - pub(crate) fn add_nfa_state_id(&mut self, sid: StateID) { - ReprVec(&mut self.repr) - .add_nfa_state_id(&mut self.prev_nfa_state_id, sid) - } - - pub(crate) fn as_bytes(&self) -> &[u8] { - &self.repr - } - - fn repr(&self) -> Repr<'_> { - Repr(&self.repr) - } - - fn repr_vec(&mut self) -> ReprVec<'_> { - ReprVec(&mut self.repr) - } -} - -/// Repr is a read-only view into the representation of a DFA state. -/// -/// Primarily, a Repr is how we achieve DRY: we implement decoding the format -/// in one place, and then use a Repr to implement the various methods on the -/// public state types. -/// -/// The format is as follows: -/// -/// The first three bytes correspond to bitsets. -/// -/// Byte 0 is a bitset corresponding to miscellaneous flags associated with the -/// state. Bit 0 is set to 1 if the state is a match state. Bit 1 is set to 1 -/// if the state has pattern IDs explicitly written to it. (This is a flag that -/// is not meant to be set by determinization, but rather, is used as part of -/// an internal space-saving optimization.) Bit 2 is set to 1 if the state was -/// generated by a transition over a "word" byte. (Callers may not always set -/// this. For example, if the NFA has no word boundary assertion, then needing -/// to track whether a state came from a word byte or not is superfluous and -/// wasteful.) Bit 3 is set to 1 if the state was generated by a transition -/// from a `\r` (forward search) or a `\n` (reverse search) when CRLF mode is -/// enabled. -/// -/// Bytes 1..5 correspond to the look-behind assertions that were satisfied -/// by the transition that created this state. (Look-ahead assertions are not -/// tracked as part of states. Instead, these are applied by re-computing the -/// epsilon closure of a state when computing the transition function. See -/// `next` in the parent module.) -/// -/// Bytes 5..9 correspond to the set of look-around assertions (including both -/// look-behind and look-ahead) that appear somewhere in this state's set of -/// NFA state IDs. This is used to determine whether this state's epsilon -/// closure should be re-computed when computing the transition function. -/// Namely, look-around assertions are "just" conditional epsilon transitions, -/// so if there are new assertions available when computing the transition -/// function, we should only re-compute the epsilon closure if those new -/// assertions are relevant to this particular state. -/// -/// Bytes 9..13 correspond to a 32-bit native-endian encoded integer -/// corresponding to the number of patterns encoded in this state. If the state -/// is not a match state (byte 0 bit 0 is 0) or if it's only pattern ID is -/// PatternID::ZERO, then no integer is encoded at this position. Instead, byte -/// offset 3 is the position at which the first NFA state ID is encoded. -/// -/// For a match state with at least one non-ZERO pattern ID, the next bytes -/// correspond to a sequence of 32-bit native endian encoded integers that -/// represent each pattern ID, in order, that this match state represents. -/// -/// After the pattern IDs (if any), NFA state IDs are delta encoded as -/// varints.[1] The first NFA state ID is encoded as itself, and each -/// subsequent NFA state ID is encoded as the difference between itself and the -/// previous NFA state ID. -/// -/// [1] - https://developers.google.com/protocol-buffers/docs/encoding#varints -struct Repr<'a>(&'a [u8]); - -impl<'a> Repr<'a> { - /// Returns true if and only if this is a match state. - /// - /// If callers have added pattern IDs to this state, then callers MUST set - /// this state as a match state explicitly. However, as a special case, - /// states that are marked as match states but with no pattern IDs, then - /// the state is treated as if it had a single pattern ID equivalent to - /// PatternID::ZERO. - fn is_match(&self) -> bool { - self.0[0] & (1 << 0) > 0 - } - - /// Returns true if and only if this state has had at least one pattern - /// ID added to it. - /// - /// This is an internal-only flag that permits the representation to save - /// space in the common case of an NFA with one pattern in it. In that - /// case, a match state can only ever have exactly one pattern ID: - /// PatternID::ZERO. So there's no need to represent it. - fn has_pattern_ids(&self) -> bool { - self.0[0] & (1 << 1) > 0 - } - - /// Returns true if and only if this state is marked as having been created - /// from a transition over a word byte. This is useful for checking whether - /// a word boundary assertion is true or not, which requires look-behind - /// (whether the current state came from a word byte or not) and look-ahead - /// (whether the transition byte is a word byte or not). - /// - /// Since states with this set are distinct from states that don't have - /// this set (even if they are otherwise equivalent), callers should not - /// set this assertion unless the underlying NFA has at least one word - /// boundary assertion somewhere. Otherwise, a superfluous number of states - /// may be created. - fn is_from_word(&self) -> bool { - self.0[0] & (1 << 2) > 0 - } - - /// Returns true if and only if this state is marked as being inside of a - /// CRLF terminator. In the forward direction, this means the state was - /// created after seeing a `\r`. In the reverse direction, this means the - /// state was created after seeing a `\n`. - fn is_half_crlf(&self) -> bool { - self.0[0] & (1 << 3) > 0 - } - - /// The set of look-behind assertions that were true in the transition that - /// created this state. - /// - /// Generally, this should be empty if 'look_need' is empty, since there is - /// no reason to track which look-behind assertions are true if the state - /// has no conditional epsilon transitions. - /// - /// Satisfied look-ahead assertions are not tracked in states. Instead, - /// these are re-computed on demand via epsilon closure when computing the - /// transition function. - fn look_have(&self) -> LookSet { - LookSet::read_repr(&self.0[1..]) - } - - /// The set of look-around (both behind and ahead) assertions that appear - /// at least once in this state's set of NFA states. - /// - /// This is used to determine whether the epsilon closure needs to be - /// re-computed when computing the transition function. Namely, if the - /// state has no conditional epsilon transitions, then there is no need - /// to re-compute the epsilon closure. - fn look_need(&self) -> LookSet { - LookSet::read_repr(&self.0[5..]) - } - - /// Returns the total number of match pattern IDs in this state. - /// - /// If this state is not a match state, then this always returns 0. - fn match_len(&self) -> usize { - if !self.is_match() { - 0 - } else if !self.has_pattern_ids() { - 1 - } else { - self.encoded_pattern_len() - } - } - - /// Returns the pattern ID for this match state at the given index. - /// - /// If the given index is greater than or equal to `match_len()` for this - /// state, then this could panic or return incorrect results. - fn match_pattern(&self, index: usize) -> PatternID { - if !self.has_pattern_ids() { - PatternID::ZERO - } else { - let offset = 13 + index * PatternID::SIZE; - // This is OK since we only ever serialize valid PatternIDs to - // states. - wire::read_pattern_id_unchecked(&self.0[offset..]).0 - } - } - - /// Returns a copy of all match pattern IDs in this state. If this state - /// is not a match state, then this returns None. - fn match_pattern_ids(&self) -> Option> { - if !self.is_match() { - return None; - } - let mut pids = alloc::vec![]; - self.iter_match_pattern_ids(|pid| pids.push(pid)); - Some(pids) - } - - /// Calls the given function on every pattern ID in this state. - fn iter_match_pattern_ids(&self, mut f: F) { - if !self.is_match() { - return; - } - // As an optimization for a very common case, when this is a match - // state for an NFA with only one pattern, we don't actually write the - // pattern ID to the state representation. Instead, we know it must - // be there since it is the only possible choice. - if !self.has_pattern_ids() { - f(PatternID::ZERO); - return; - } - let mut pids = &self.0[13..self.pattern_offset_end()]; - while !pids.is_empty() { - let pid = wire::read_u32(pids); - pids = &pids[PatternID::SIZE..]; - // This is OK since we only ever serialize valid PatternIDs to - // states. And since pattern IDs can never exceed a usize, the - // unwrap is OK. - f(PatternID::new_unchecked(usize::try_from(pid).unwrap())); - } - } - - /// Calls the given function on every NFA state ID in this state. - fn iter_nfa_state_ids(&self, mut f: F) { - let mut sids = &self.0[self.pattern_offset_end()..]; - let mut prev = 0i32; - while !sids.is_empty() { - let (delta, nr) = read_vari32(sids); - sids = &sids[nr..]; - let sid = prev + delta; - prev = sid; - // This is OK since we only ever serialize valid StateIDs to - // states. And since state IDs can never exceed an isize, they must - // always be able to fit into a usize, and thus cast is OK. - f(StateID::new_unchecked(sid.as_usize())) - } - } - - /// Returns the offset into this state's representation where the pattern - /// IDs end and the NFA state IDs begin. - fn pattern_offset_end(&self) -> usize { - let encoded = self.encoded_pattern_len(); - if encoded == 0 { - return 9; - } - // This arithmetic is OK since we were able to address this many bytes - // when writing to the state, thus, it must fit into a usize. - encoded.checked_mul(4).unwrap().checked_add(13).unwrap() - } - - /// Returns the total number of *encoded* pattern IDs in this state. - /// - /// This may return 0 even when this is a match state, since the pattern - /// ID `PatternID::ZERO` is not encoded when it's the only pattern ID in - /// the match state (the overwhelming common case). - fn encoded_pattern_len(&self) -> usize { - if !self.has_pattern_ids() { - return 0; - } - // This unwrap is OK since the total number of patterns is always - // guaranteed to fit into a usize. - usize::try_from(wire::read_u32(&self.0[9..13])).unwrap() - } -} - -impl<'a> core::fmt::Debug for Repr<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let mut nfa_ids = alloc::vec![]; - self.iter_nfa_state_ids(|sid| nfa_ids.push(sid)); - f.debug_struct("Repr") - .field("is_match", &self.is_match()) - .field("is_from_word", &self.is_from_word()) - .field("is_half_crlf", &self.is_half_crlf()) - .field("look_have", &self.look_have()) - .field("look_need", &self.look_need()) - .field("match_pattern_ids", &self.match_pattern_ids()) - .field("nfa_state_ids", &nfa_ids) - .finish() - } -} - -/// ReprVec is a write-only view into the representation of a DFA state. -/// -/// See Repr for more details on the purpose of this type and also the format. -/// -/// Note that not all possible combinations of methods may be called. This is -/// precisely what the various StateBuilder types encapsulate: they only -/// permit valid combinations via Rust's linear typing. -struct ReprVec<'a>(&'a mut Vec); - -impl<'a> ReprVec<'a> { - /// Set this state as a match state. - /// - /// This should not be exposed explicitly outside of this module. It is - /// set automatically when a pattern ID is added. - fn set_is_match(&mut self) { - self.0[0] |= 1 << 0; - } - - /// Set that this state has pattern IDs explicitly written to it. - /// - /// This should not be exposed explicitly outside of this module. This is - /// used internally as a space saving optimization. Namely, if the state - /// is a match state but does not have any pattern IDs written to it, - /// then it is automatically inferred to have a pattern ID of ZERO. - fn set_has_pattern_ids(&mut self) { - self.0[0] |= 1 << 1; - } - - /// Set this state as being built from a transition over a word byte. - /// - /// Setting this is only necessary when one needs to deal with word - /// boundary assertions. Therefore, if the underlying NFA has no word - /// boundary assertions, callers should not set this. - fn set_is_from_word(&mut self) { - self.0[0] |= 1 << 2; - } - - /// Set this state as having seen half of a CRLF terminator. - /// - /// In the forward direction, this should be set when a `\r` has been seen. - /// In the reverse direction, this should be set when a `\n` has been seen. - fn set_is_half_crlf(&mut self) { - self.0[0] |= 1 << 3; - } - - /// The set of look-behind assertions that were true in the transition that - /// created this state. - fn look_have(&self) -> LookSet { - self.repr().look_have() - } - - /// The set of look-around (both behind and ahead) assertions that appear - /// at least once in this state's set of NFA states. - fn look_need(&self) -> LookSet { - self.repr().look_need() - } - - /// Mutate the set of look-behind assertions that were true in the - /// transition that created this state. - fn set_look_have(&mut self, mut set: impl FnMut(LookSet) -> LookSet) { - set(self.look_have()).write_repr(&mut self.0[1..]); - } - - /// Mutate the set of look-around (both behind and ahead) assertions that - /// appear at least once in this state's set of NFA states. - fn set_look_need(&mut self, mut set: impl FnMut(LookSet) -> LookSet) { - set(self.look_need()).write_repr(&mut self.0[5..]); - } - - /// Add a pattern ID to this state. All match states must have at least - /// one pattern ID associated with it. - /// - /// Callers must never add duplicative pattern IDs. - /// - /// The order in which patterns are added must correspond to the order - /// in which patterns are reported as matches. - fn add_match_pattern_id(&mut self, pid: PatternID) { - // As a (somewhat small) space saving optimization, in the case where - // a matching state has exactly one pattern ID, PatternID::ZERO, we do - // not write either the pattern ID or the number of patterns encoded. - // Instead, all we do is set the 'is_match' bit on this state. Overall, - // this saves 8 bytes per match state for the overwhelming majority of - // match states. - // - // In order to know whether pattern IDs need to be explicitly read or - // not, we use another internal-only bit, 'has_pattern_ids', to - // indicate whether they have been explicitly written or not. - if !self.repr().has_pattern_ids() { - if pid == PatternID::ZERO { - self.set_is_match(); - return; - } - // Make room for 'close_match_pattern_ids' to write the total - // number of pattern IDs written. - self.0.extend(core::iter::repeat(0).take(PatternID::SIZE)); - self.set_has_pattern_ids(); - // If this was already a match state, then the only way that's - // possible when the state doesn't have pattern IDs is if - // PatternID::ZERO was added by the caller previously. In this - // case, we are now adding a non-ZERO pattern ID after it, in - // which case, we want to make sure to represent ZERO explicitly - // now. - if self.repr().is_match() { - write_u32(self.0, 0) - } else { - // Otherwise, just make sure the 'is_match' bit is set. - self.set_is_match(); - } - } - write_u32(self.0, pid.as_u32()); - } - - /// Indicate that no more pattern IDs will be added to this state. - /// - /// Once this is called, callers must not call it or 'add_match_pattern_id' - /// again. - /// - /// This should not be exposed explicitly outside of this module. It - /// should be called only when converting a StateBuilderMatches into a - /// StateBuilderNFA. - fn close_match_pattern_ids(&mut self) { - // If we never wrote any pattern IDs, then there's nothing to do here. - if !self.repr().has_pattern_ids() { - return; - } - let patsize = PatternID::SIZE; - let pattern_bytes = self.0.len() - 13; - // Every pattern ID uses 4 bytes, so number of bytes should be - // divisible by 4. - assert_eq!(pattern_bytes % patsize, 0); - // This unwrap is OK since we are guaranteed that the maximum number - // of possible patterns fits into a u32. - let count32 = u32::try_from(pattern_bytes / patsize).unwrap(); - wire::NE::write_u32(count32, &mut self.0[9..13]); - } - - /// Add an NFA state ID to this state. The order in which NFA states are - /// added matters. It is the caller's responsibility to ensure that - /// duplicate NFA state IDs are not added. - fn add_nfa_state_id(&mut self, prev: &mut StateID, sid: StateID) { - let delta = sid.as_i32() - prev.as_i32(); - write_vari32(self.0, delta); - *prev = sid; - } - - /// Return a read-only view of this state's representation. - fn repr(&self) -> Repr<'_> { - Repr(self.0.as_slice()) - } -} - -/// Write a signed 32-bit integer using zig-zag encoding. -/// -/// https://developers.google.com/protocol-buffers/docs/encoding#varints -fn write_vari32(data: &mut Vec, n: i32) { - let mut un = n.to_bits() << 1; - if n < 0 { - un = !un; - } - write_varu32(data, un) -} - -/// Read a signed 32-bit integer using zig-zag encoding. Also, return the -/// number of bytes read. -/// -/// https://developers.google.com/protocol-buffers/docs/encoding#varints -fn read_vari32(data: &[u8]) -> (i32, usize) { - let (un, i) = read_varu32(data); - let mut n = i32::from_bits(un >> 1); - if un & 1 != 0 { - n = !n; - } - (n, i) -} - -/// Write an unsigned 32-bit integer as a varint. In essence, `n` is written -/// as a sequence of bytes where all bytes except for the last one have the -/// most significant bit set. The least significant 7 bits correspond to the -/// actual bits of `n`. So in the worst case, a varint uses 5 bytes, but in -/// very common cases, it uses fewer than 4. -/// -/// https://developers.google.com/protocol-buffers/docs/encoding#varints -fn write_varu32(data: &mut Vec, mut n: u32) { - while n >= 0b1000_0000 { - data.push(n.low_u8() | 0b1000_0000); - n >>= 7; - } - data.push(n.low_u8()); -} - -/// Read an unsigned 32-bit varint. Also, return the number of bytes read. -/// -/// https://developers.google.com/protocol-buffers/docs/encoding#varints -fn read_varu32(data: &[u8]) -> (u32, usize) { - // N.B. We can assume correctness here since we know that all var-u32 are - // written with write_varu32. Hence, the 'as' uses and unchecked arithmetic - // is all okay. - let mut n: u32 = 0; - let mut shift: u32 = 0; - for (i, &b) in data.iter().enumerate() { - if b < 0b1000_0000 { - return (n | (u32::from(b) << shift), i + 1); - } - n |= (u32::from(b) & 0b0111_1111) << shift; - shift += 7; - } - (0, 0) -} - -/// Push a native-endian encoded `n` on to `dst`. -fn write_u32(dst: &mut Vec, n: u32) { - use crate::util::wire::NE; - - let start = dst.len(); - dst.extend(core::iter::repeat(0).take(mem::size_of::())); - NE::write_u32(n, &mut dst[start..]); -} - -#[cfg(test)] -mod tests { - use alloc::vec; - - use quickcheck::quickcheck; - - use super::*; - - #[cfg(not(miri))] - quickcheck! { - fn prop_state_read_write_nfa_state_ids(sids: Vec) -> bool { - // Builders states do not permit duplicate IDs. - let sids = dedup_state_ids(sids); - - let mut b = StateBuilderEmpty::new().into_matches().into_nfa(); - for &sid in &sids { - b.add_nfa_state_id(sid); - } - let s = b.to_state(); - let mut got = vec![]; - s.iter_nfa_state_ids(|sid| got.push(sid)); - got == sids - } - - fn prop_state_read_write_pattern_ids(pids: Vec) -> bool { - // Builders states do not permit duplicate IDs. - let pids = dedup_pattern_ids(pids); - - let mut b = StateBuilderEmpty::new().into_matches(); - for &pid in &pids { - b.add_match_pattern_id(pid); - } - let s = b.into_nfa().to_state(); - let mut got = vec![]; - s.iter_match_pattern_ids(|pid| got.push(pid)); - got == pids - } - - fn prop_state_read_write_nfa_state_and_pattern_ids( - sids: Vec, - pids: Vec - ) -> bool { - // Builders states do not permit duplicate IDs. - let sids = dedup_state_ids(sids); - let pids = dedup_pattern_ids(pids); - - let mut b = StateBuilderEmpty::new().into_matches(); - for &pid in &pids { - b.add_match_pattern_id(pid); - } - - let mut b = b.into_nfa(); - for &sid in &sids { - b.add_nfa_state_id(sid); - } - - let s = b.to_state(); - let mut got_pids = vec![]; - s.iter_match_pattern_ids(|pid| got_pids.push(pid)); - let mut got_sids = vec![]; - s.iter_nfa_state_ids(|sid| got_sids.push(sid)); - got_pids == pids && got_sids == sids - } - } - - quickcheck! { - fn prop_read_write_varu32(n: u32) -> bool { - let mut buf = vec![]; - write_varu32(&mut buf, n); - let (got, nread) = read_varu32(&buf); - nread == buf.len() && got == n - } - - fn prop_read_write_vari32(n: i32) -> bool { - let mut buf = vec![]; - write_vari32(&mut buf, n); - let (got, nread) = read_vari32(&buf); - nread == buf.len() && got == n - } - } - - #[cfg(not(miri))] - fn dedup_state_ids(sids: Vec) -> Vec { - let mut set = alloc::collections::BTreeSet::new(); - let mut deduped = vec![]; - for sid in sids { - if set.contains(&sid) { - continue; - } - set.insert(sid); - deduped.push(sid); - } - deduped - } - - #[cfg(not(miri))] - fn dedup_pattern_ids(pids: Vec) -> Vec { - let mut set = alloc::collections::BTreeSet::new(); - let mut deduped = vec![]; - for pid in pids { - if set.contains(&pid) { - continue; - } - set.insert(pid); - deduped.push(pid); - } - deduped - } -} diff --git a/vendor/regex-automata/src/util/empty.rs b/vendor/regex-automata/src/util/empty.rs deleted file mode 100644 index e16af3b6e596da..00000000000000 --- a/vendor/regex-automata/src/util/empty.rs +++ /dev/null @@ -1,265 +0,0 @@ -/*! -This module provides helper routines for dealing with zero-width matches. - -The main problem being solved here is this: - -1. The caller wants to search something that they know is valid UTF-8, such -as a Rust `&str`. -2. The regex used by the caller can match the empty string. For example, `a*`. -3. The caller should never get match offsets returned that occur within the -encoding of a UTF-8 codepoint. It is logically incorrect, and also means that, -e.g., slicing the `&str` at those offsets will lead to a panic. - -So the question here is, how do we prevent the caller from getting match -offsets that split a codepoint? For example, strictly speaking, the regex `a*` -matches `☃` at the positions `[0, 0]`, `[1, 1]`, `[2, 2]` and `[3, 3]` since -the UTF-8 encoding of `☃` is `\xE2\x98\x83`. In particular, the `NFA` that -underlies all of the matching engines in this crate doesn't have anything in -its state graph that prevents matching between UTF-8 code units. Indeed, any -engine derived from the `NFA` will match at those positions by virtue of the -fact that the `NFA` is byte oriented. That is, its transitions are defined over -bytes and the matching engines work by proceeding one byte at a time. - -(An alternative architecture would be to define the transitions in an `NFA` -over codepoints, or `char`. And then make the matching engines proceed by -decoding one codepoint at a time. This is a viable strategy, but it doesn't -work for DFA matching engines because designing a fast and memory efficient -transition table for an alphabet as large as Unicode is quite difficult. More -to the point, the top-level `regex` crate supports matching on arbitrary bytes -when Unicode mode is disabled and one is searching a `&[u8]`. So in that case, -you can't just limit yourself to decoding codepoints and matching those. You -really do need to be able to follow byte oriented transitions on the `NFA`.) - -In an older version of the regex crate, we handled this case not in the regex -engine, but in the iterators over matches. Namely, since this case only arises -when the match is empty, we "just" incremented the next starting position -of the search by `N`, where `N` is the length of the codepoint encoded at -the current position. The alternative or more "natural" solution of just -incrementing by `1` would result in executing a search of `a*` on `☃` like -this: - -* Start search at `0`. -* Found match at `[0, 0]`. -* Next start position is `0`. -* To avoid an infinite loop, since it's an empty match, increment by `1`. -* Start search at `1`. -* Found match at `[1, 1]`. Oops. - -But if we instead incremented by `3` (the length in bytes of `☃`), then we get -the following: - -* Start search at `0`. -* Found match at `[0, 0]`. -* Next start position is `0`. -* To avoid an infinite loop, since it's an empty match, increment by `3`. -* Start search at `3`. -* Found match at `[3, 3]`. - -And we get the correct result. But does this technique work in all cases? -Crucially, it requires that a zero-width match that splits a codepoint never -occurs beyond the starting position of the search. Because if it did, merely -incrementing the start position by the number of bytes in the codepoint at -the current position wouldn't be enough. A zero-width match could just occur -anywhere. It turns out that it is _almost_ true. We can convince ourselves by -looking at all possible patterns that can match the empty string: - -* Patterns like `a*`, `a{0}`, `(?:)`, `a|` and `|a` all unconditionally match -the empty string. That is, assuming there isn't an `a` at the current position, -they will all match the empty string at the start of a search. There is no way -to move past it because any other match would not be "leftmost." -* `^` only matches at the beginning of the haystack, where the start position -is `0`. Since we know we're searching valid UTF-8 (if it isn't valid UTF-8, -then this entire problem goes away because it implies your string type supports -invalid UTF-8 and thus must deal with offsets that not only split a codepoint -but occur in entirely invalid UTF-8 somehow), it follows that `^` never matches -between the code units of a codepoint because the start of a valid UTF-8 string -is never within the encoding of a codepoint. -* `$` basically the same logic as `^`, but for the end of a string. A valid -UTF-8 string can't have an incomplete codepoint at the end of it. -* `(?m:^)` follows similarly to `^`, but it can match immediately following -a `\n`. However, since a `\n` is always a codepoint itself and can never -appear within a codepoint, it follows that the position immediately following -a `\n` in a string that is valid UTF-8 is guaranteed to not be between the -code units of another codepoint. (One caveat here is that the line terminator -for multi-line anchors can now be changed to any arbitrary byte, including -things like `\x98` which might occur within a codepoint. However, this wasn't -supported by the old regex crate. If it was, it pose the same problems as -`(?-u:\B)`, as we'll discuss below.) -* `(?m:$)` a similar argument as for `(?m:^)`. The only difference is that a -`(?m:$)` matches just before a `\n`. But the same argument applies. -* `(?Rm:^)` and `(?Rm:$)` weren't supported by the old regex crate, but the -CRLF aware line anchors follow a similar argument as for `(?m:^)` and `(?m:$)`. -Namely, since they only ever match at a boundary where one side is either a -`\r` or a `\n`, neither of which can occur within a codepoint. -* `\b` only matches at positions where both sides are valid codepoints, so -this cannot split a codepoint. -* `\B`, like `\b`, also only matches at positions where both sides are valid -codepoints. So this cannot split a codepoint either. -* `(?-u:\b)` matches only at positions where at least one side of it is an ASCII -word byte. Since ASCII bytes cannot appear as code units in non-ASCII codepoints -(one of the many amazing qualities of UTF-8), it follows that this too cannot -split a codepoint. -* `(?-u:\B)` finally represents a problem. It can matches between *any* two -bytes that are either both word bytes or non-word bytes. Since code units like -`\xE2` and `\x98` (from the UTF-8 encoding of `☃`) are both non-word bytes, -`(?-u:\B)` will match at the position between them. - -Thus, our approach of incrementing one codepoint at a time after seeing an -empty match is flawed because `(?-u:\B)` can result in an empty match that -splits a codepoint at a position past the starting point of a search. For -example, searching `(?-u:\B)` on `a☃` would produce the following matches: `[2, -2]`, `[3, 3]` and `[4, 4]`. The positions at `0` and `1` don't match because -they correspond to word boundaries since `a` is an ASCII word byte. - -So what did the old regex crate do to avoid this? It banned `(?-u:\B)` from -regexes that could match `&str`. That might sound extreme, but a lot of other -things were banned too. For example, all of `(?-u:.)`, `(?-u:[^a])` and -`(?-u:\W)` can match invalid UTF-8 too, including individual code units with a -codepoint. The key difference is that those expressions could never produce an -empty match. That ban happens when translating an `Ast` to an `Hir`, because -that process that reason about whether an `Hir` can produce *non-empty* matches -at invalid UTF-8 boundaries. Bottom line though is that we side-stepped the -`(?-u:\B)` issue by banning it. - -If banning `(?-u:\B)` were the only issue with the old regex crate's approach, -then I probably would have kept it. `\B` is rarely used, so it's not such a big -deal to have to work-around it. However, the problem with the above approach -is that it doesn't compose. The logic for avoiding splitting a codepoint only -lived in the iterator, which means if anyone wants to implement their own -iterator over regex matches, they have to deal with this extremely subtle edge -case to get full correctness. - -Instead, in this crate, we take the approach of pushing this complexity down -to the lowest layers of each regex engine. The approach is pretty simple: - -* If this corner case doesn't apply, don't do anything. (For example, if UTF-8 -mode isn't enabled or if the regex cannot match the empty string.) -* If an empty match is reported, explicitly check if it splits a codepoint. -* If it doesn't, we're done, return the match. -* If it does, then ignore the match and re-run the search. -* Repeat the above process until the end of the haystack is reached or a match -is found that doesn't split a codepoint or isn't zero width. - -And that's pretty much what this module provides. Every regex engine uses these -methods in their lowest level public APIs, but just above the layer where -their internal engine is used. That way, all regex engines can be arbitrarily -composed without worrying about handling this case, and iterators don't need to -handle it explicitly. - -(It turns out that a new feature I added, support for changing the line -terminator in a regex to any arbitrary byte, also provokes the above problem. -Namely, the byte could be invalid UTF-8 or a UTF-8 continuation byte. So that -support would need to be limited or banned when UTF-8 mode is enabled, just -like we did for `(?-u:\B)`. But thankfully our more robust approach in this -crate handles that case just fine too.) -*/ - -use crate::util::search::{Input, MatchError}; - -#[cold] -#[inline(never)] -pub(crate) fn skip_splits_fwd( - input: &Input<'_>, - init_value: T, - match_offset: usize, - find: F, -) -> Result, MatchError> -where - F: FnMut(&Input<'_>) -> Result, MatchError>, -{ - skip_splits(true, input, init_value, match_offset, find) -} - -#[cold] -#[inline(never)] -pub(crate) fn skip_splits_rev( - input: &Input<'_>, - init_value: T, - match_offset: usize, - find: F, -) -> Result, MatchError> -where - F: FnMut(&Input<'_>) -> Result, MatchError>, -{ - skip_splits(false, input, init_value, match_offset, find) -} - -fn skip_splits( - forward: bool, - input: &Input<'_>, - init_value: T, - mut match_offset: usize, - mut find: F, -) -> Result, MatchError> -where - F: FnMut(&Input<'_>) -> Result, MatchError>, -{ - // If our config says to do an anchored search, then we're definitely - // done. We just need to determine whether we have a valid match or - // not. If we don't, then we're not allowed to continue, so we report - // no match. - // - // This is actually quite a subtle correctness thing. The key here is - // that if we got an empty match that splits a codepoint after doing an - // anchored search in UTF-8 mode, then that implies that we must have - // *started* the search at a location that splits a codepoint. This - // follows from the fact that if a match is reported from an anchored - // search, then the start offset of the match *must* match the start - // offset of the search. - // - // It also follows that no other non-empty match is possible. For - // example, you might write a regex like '(?:)|SOMETHING' and start its - // search in the middle of a codepoint. The first branch is an empty - // regex that will bubble up a match at the first position, and then - // get rejected here and report no match. But what if 'SOMETHING' could - // have matched? We reason that such a thing is impossible, because - // if it does, it must report a match that starts in the middle of a - // codepoint. This in turn implies that a match is reported whose span - // does not correspond to valid UTF-8, and this breaks the promise - // made when UTF-8 mode is enabled. (That promise *can* be broken, for - // example, by enabling UTF-8 mode but building an by hand NFA that - // produces non-empty matches that span invalid UTF-8. This is an unchecked - // but documented precondition violation of UTF-8 mode, and is documented - // to have unspecified behavior.) - // - // I believe this actually means that if an anchored search is run, and - // UTF-8 mode is enabled and the start position splits a codepoint, - // then it is correct to immediately report no match without even - // executing the regex engine. But it doesn't really seem worth writing - // out that case in every regex engine to save a tiny bit of work in an - // extremely pathological case, so we just handle it here. - if input.get_anchored().is_anchored() { - return Ok(if input.is_char_boundary(match_offset) { - Some(init_value) - } else { - None - }); - } - // Otherwise, we have an unanchored search, so just keep looking for - // matches until we have one that does not split a codepoint or we hit - // EOI. - let mut value = init_value; - let mut input = input.clone(); - while !input.is_char_boundary(match_offset) { - if forward { - // The unwrap is OK here because overflowing usize while - // iterating over a slice is impossible, at it would require - // a slice of length greater than isize::MAX, which is itself - // impossible. - input.set_start(input.start().checked_add(1).unwrap()); - } else { - input.set_end(match input.end().checked_sub(1) { - None => return Ok(None), - Some(end) => end, - }); - } - match find(&input)? { - None => return Ok(None), - Some((new_value, new_match_end)) => { - value = new_value; - match_offset = new_match_end; - } - } - } - Ok(Some(value)) -} diff --git a/vendor/regex-automata/src/util/escape.rs b/vendor/regex-automata/src/util/escape.rs deleted file mode 100644 index 9c5b72e9d1ecec..00000000000000 --- a/vendor/regex-automata/src/util/escape.rs +++ /dev/null @@ -1,84 +0,0 @@ -/*! -Provides convenience routines for escaping raw bytes. - -Since this crate tends to deal with `&[u8]` everywhere and the default -`Debug` implementation just shows decimal integers, it makes debugging those -representations quite difficult. This module provides types that show `&[u8]` -as if it were a string, with invalid UTF-8 escaped into its byte-by-byte hex -representation. -*/ - -use crate::util::utf8; - -/// Provides a convenient `Debug` implementation for a `u8`. -/// -/// The `Debug` impl treats the byte as an ASCII, and emits a human readable -/// representation of it. If the byte isn't ASCII, then it's emitted as a hex -/// escape sequence. -#[derive(Clone, Copy)] -pub struct DebugByte(pub u8); - -impl core::fmt::Debug for DebugByte { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - // Special case ASCII space. It's too hard to read otherwise, so - // put quotes around it. I sometimes wonder whether just '\x20' would - // be better... - if self.0 == b' ' { - return write!(f, "' '"); - } - // 10 bytes is enough to cover any output from ascii::escape_default. - let mut bytes = [0u8; 10]; - let mut len = 0; - for (i, mut b) in core::ascii::escape_default(self.0).enumerate() { - // capitalize \xab to \xAB - if i >= 2 && b'a' <= b && b <= b'f' { - b -= 32; - } - bytes[len] = b; - len += 1; - } - write!(f, "{}", core::str::from_utf8(&bytes[..len]).unwrap()) - } -} - -/// Provides a convenient `Debug` implementation for `&[u8]`. -/// -/// This generally works best when the bytes are presumed to be mostly UTF-8, -/// but will work for anything. For any bytes that aren't UTF-8, they are -/// emitted as hex escape sequences. -pub struct DebugHaystack<'a>(pub &'a [u8]); - -impl<'a> core::fmt::Debug for DebugHaystack<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "\"")?; - // This is a sad re-implementation of a similar impl found in bstr. - let mut bytes = self.0; - while let Some(result) = utf8::decode(bytes) { - let ch = match result { - Ok(ch) => ch, - Err(byte) => { - write!(f, r"\x{byte:02x}")?; - bytes = &bytes[1..]; - continue; - } - }; - bytes = &bytes[ch.len_utf8()..]; - match ch { - '\0' => write!(f, "\\0")?, - // ASCII control characters except \0, \n, \r, \t - '\x01'..='\x08' - | '\x0b' - | '\x0c' - | '\x0e'..='\x19' - | '\x7f' => { - write!(f, "\\x{:02x}", u32::from(ch))?; - } - '\n' | '\r' | '\t' | _ => { - write!(f, "{}", ch.escape_debug())?; - } - } - } - write!(f, "\"")?; - Ok(()) - } -} diff --git a/vendor/regex-automata/src/util/int.rs b/vendor/regex-automata/src/util/int.rs deleted file mode 100644 index b726e93f858e4a..00000000000000 --- a/vendor/regex-automata/src/util/int.rs +++ /dev/null @@ -1,246 +0,0 @@ -/*! -This module provides several integer oriented traits for converting between -both fixed size integers and integers whose size varies based on the target -(like `usize`). - -The driving design principle of this module is to attempt to centralize as many -`as` casts as possible here. And in particular, we separate casts into two -buckets: - -* Casts that we use for their truncating behavior. In this case, we use more -descriptive names, like `low_u32` and `high_u32`. -* Casts that we use for converting back-and-forth between `usize`. These -conversions are generally necessary because we often store indices in different -formats to save on memory, which requires converting to and from `usize`. In -this case, we very specifically do not want to overflow, and so the methods -defined here will panic if the `as` cast would be lossy in debug mode. (A -normal `as` cast will never panic!) - -For `as` casts between raw pointers, we use `cast`, so `as` isn't needed there. - -For regex engines, floating point is just never used, so we don't have to worry -about `as` casts for those. - -Otherwise, this module pretty much covers all of our `as` needs except for one -thing: const contexts. There are a select few places in this crate where we -still need to use `as` because const functions on traits aren't stable yet. -If we wind up significantly expanding our const footprint in this crate, it -might be worth defining free functions to handle those cases. But at the time -of writing, that just seemed like too much ceremony. Instead, I comment each -such use of `as` in a const context with a "fixme" notice. - -NOTE: for simplicity, we don't take target pointer width into account here for -`usize` conversions. Since we currently only panic in debug mode, skipping the -check when it can be proven it isn't needed at compile time doesn't really -matter. Now, if we wind up wanting to do as many checks as possible in release -mode, then we would want to skip those when we know the conversions are always -non-lossy. - -NOTE: this module isn't an exhaustive API. For example, we still use things -like `u64::from` where possible, or even `usize::try_from()` for when we do -explicitly want to panic or when we want to return an error for overflow. -*/ - -// We define a little more than what we need, but I'd rather just have -// everything via a consistent and uniform API then have holes. -#![allow(dead_code)] - -pub(crate) trait U8 { - fn as_usize(self) -> usize; -} - -impl U8 for u8 { - fn as_usize(self) -> usize { - usize::from(self) - } -} - -pub(crate) trait U16 { - fn as_usize(self) -> usize; - fn low_u8(self) -> u8; - fn high_u8(self) -> u8; -} - -impl U16 for u16 { - fn as_usize(self) -> usize { - usize::from(self) - } - - fn low_u8(self) -> u8 { - self as u8 - } - - fn high_u8(self) -> u8 { - (self >> 8) as u8 - } -} - -pub(crate) trait U32 { - fn as_usize(self) -> usize; - fn low_u8(self) -> u8; - fn low_u16(self) -> u16; - fn high_u16(self) -> u16; -} - -impl U32 for u32 { - fn as_usize(self) -> usize { - #[cfg(debug_assertions)] - { - usize::try_from(self).expect("u32 overflowed usize") - } - #[cfg(not(debug_assertions))] - { - self as usize - } - } - - fn low_u8(self) -> u8 { - self as u8 - } - - fn low_u16(self) -> u16 { - self as u16 - } - - fn high_u16(self) -> u16 { - (self >> 16) as u16 - } -} - -pub(crate) trait U64 { - fn as_usize(self) -> usize; - fn low_u8(self) -> u8; - fn low_u16(self) -> u16; - fn low_u32(self) -> u32; - fn high_u32(self) -> u32; -} - -impl U64 for u64 { - fn as_usize(self) -> usize { - #[cfg(debug_assertions)] - { - usize::try_from(self).expect("u64 overflowed usize") - } - #[cfg(not(debug_assertions))] - { - self as usize - } - } - - fn low_u8(self) -> u8 { - self as u8 - } - - fn low_u16(self) -> u16 { - self as u16 - } - - fn low_u32(self) -> u32 { - self as u32 - } - - fn high_u32(self) -> u32 { - (self >> 32) as u32 - } -} - -pub(crate) trait I32 { - fn as_usize(self) -> usize; - fn to_bits(self) -> u32; - fn from_bits(n: u32) -> i32; -} - -impl I32 for i32 { - fn as_usize(self) -> usize { - #[cfg(debug_assertions)] - { - usize::try_from(self).expect("i32 overflowed usize") - } - #[cfg(not(debug_assertions))] - { - self as usize - } - } - - fn to_bits(self) -> u32 { - self as u32 - } - - fn from_bits(n: u32) -> i32 { - n as i32 - } -} - -pub(crate) trait Usize { - fn as_u8(self) -> u8; - fn as_u16(self) -> u16; - fn as_u32(self) -> u32; - fn as_u64(self) -> u64; -} - -impl Usize for usize { - fn as_u8(self) -> u8 { - #[cfg(debug_assertions)] - { - u8::try_from(self).expect("usize overflowed u8") - } - #[cfg(not(debug_assertions))] - { - self as u8 - } - } - - fn as_u16(self) -> u16 { - #[cfg(debug_assertions)] - { - u16::try_from(self).expect("usize overflowed u16") - } - #[cfg(not(debug_assertions))] - { - self as u16 - } - } - - fn as_u32(self) -> u32 { - #[cfg(debug_assertions)] - { - u32::try_from(self).expect("usize overflowed u32") - } - #[cfg(not(debug_assertions))] - { - self as u32 - } - } - - fn as_u64(self) -> u64 { - #[cfg(debug_assertions)] - { - u64::try_from(self).expect("usize overflowed u64") - } - #[cfg(not(debug_assertions))] - { - self as u64 - } - } -} - -// Pointers aren't integers, but we convert pointers to integers to perform -// offset arithmetic in some places. (And no, we don't convert the integers -// back to pointers.) So add 'as_usize' conversions here too for completeness. -// -// These 'as' casts are actually okay because they're always non-lossy. But the -// idea here is to just try and remove as much 'as' as possible, particularly -// in this crate where we are being really paranoid about offsets and making -// sure we don't panic on inputs that might be untrusted. This way, the 'as' -// casts become easier to audit if they're all in one place, even when some of -// them are actually okay 100% of the time. - -pub(crate) trait Pointer { - fn as_usize(self) -> usize; -} - -impl Pointer for *const T { - fn as_usize(self) -> usize { - self as usize - } -} diff --git a/vendor/regex-automata/src/util/interpolate.rs b/vendor/regex-automata/src/util/interpolate.rs deleted file mode 100644 index 2b851aa8f9caf9..00000000000000 --- a/vendor/regex-automata/src/util/interpolate.rs +++ /dev/null @@ -1,576 +0,0 @@ -/*! -Provides routines for interpolating capture group references. - -That is, if a replacement string contains references like `$foo` or `${foo1}`, -then they are replaced with the corresponding capture values for the groups -named `foo` and `foo1`, respectively. Similarly, syntax like `$1` and `${1}` -is supported as well, with `1` corresponding to a capture group index and not -a name. - -This module provides the free functions [`string`] and [`bytes`], which -interpolate Rust Unicode strings and byte strings, respectively. - -# Format - -These routines support two different kinds of capture references: unbraced and -braced. - -For the unbraced format, the format supported is `$ref` where `name` can be -any character in the class `[0-9A-Za-z_]`. `ref` is always the longest -possible parse. So for example, `$1a` corresponds to the capture group named -`1a` and not the capture group at index `1`. If `ref` matches `^[0-9]+$`, then -it is treated as a capture group index itself and not a name. - -For the braced format, the format supported is `${ref}` where `ref` can be any -sequence of bytes except for `}`. If no closing brace occurs, then it is not -considered a capture reference. As with the unbraced format, if `ref` matches -`^[0-9]+$`, then it is treated as a capture group index and not a name. - -The braced format is useful for exerting precise control over the name of the -capture reference. For example, `${1}a` corresponds to the capture group -reference `1` followed by the letter `a`, where as `$1a` (as mentioned above) -corresponds to the capture group reference `1a`. The braced format is also -useful for expressing capture group names that use characters not supported by -the unbraced format. For example, `${foo[bar].baz}` refers to the capture group -named `foo[bar].baz`. - -If a capture group reference is found and it does not refer to a valid capture -group, then it will be replaced with the empty string. - -To write a literal `$`, use `$$`. - -To be clear, and as exhibited via the type signatures in the routines in this -module, it is impossible for a replacement string to be invalid. A replacement -string may not have the intended semantics, but the interpolation procedure -itself can never fail. -*/ - -use alloc::{string::String, vec::Vec}; - -use crate::util::memchr::memchr; - -/// Accepts a replacement string and interpolates capture references with their -/// corresponding values. -/// -/// `append` should be a function that appends the string value of a capture -/// group at a particular index to the string given. If the capture group -/// index is invalid, then nothing should be appended. -/// -/// `name_to_index` should be a function that maps a capture group name to a -/// capture group index. If the given name doesn't exist, then `None` should -/// be returned. -/// -/// Finally, `dst` is where the final interpolated contents should be written. -/// If `replacement` contains no capture group references, then `dst` will be -/// equivalent to `replacement`. -/// -/// See the [module documentation](self) for details about the format -/// supported. -/// -/// # Example -/// -/// ``` -/// use regex_automata::util::interpolate; -/// -/// let mut dst = String::new(); -/// interpolate::string( -/// "foo $bar baz", -/// |index, dst| { -/// if index == 0 { -/// dst.push_str("BAR"); -/// } -/// }, -/// |name| { -/// if name == "bar" { -/// Some(0) -/// } else { -/// None -/// } -/// }, -/// &mut dst, -/// ); -/// assert_eq!("foo BAR baz", dst); -/// ``` -pub fn string( - mut replacement: &str, - mut append: impl FnMut(usize, &mut String), - mut name_to_index: impl FnMut(&str) -> Option, - dst: &mut String, -) { - while !replacement.is_empty() { - match memchr(b'$', replacement.as_bytes()) { - None => break, - Some(i) => { - dst.push_str(&replacement[..i]); - replacement = &replacement[i..]; - } - } - // Handle escaping of '$'. - if replacement.as_bytes().get(1).map_or(false, |&b| b == b'$') { - dst.push_str("$"); - replacement = &replacement[2..]; - continue; - } - debug_assert!(!replacement.is_empty()); - let cap_ref = match find_cap_ref(replacement.as_bytes()) { - Some(cap_ref) => cap_ref, - None => { - dst.push_str("$"); - replacement = &replacement[1..]; - continue; - } - }; - replacement = &replacement[cap_ref.end..]; - match cap_ref.cap { - Ref::Number(i) => append(i, dst), - Ref::Named(name) => { - if let Some(i) = name_to_index(name) { - append(i, dst); - } - } - } - } - dst.push_str(replacement); -} - -/// Accepts a replacement byte string and interpolates capture references with -/// their corresponding values. -/// -/// `append` should be a function that appends the byte string value of a -/// capture group at a particular index to the byte string given. If the -/// capture group index is invalid, then nothing should be appended. -/// -/// `name_to_index` should be a function that maps a capture group name to a -/// capture group index. If the given name doesn't exist, then `None` should -/// be returned. -/// -/// Finally, `dst` is where the final interpolated contents should be written. -/// If `replacement` contains no capture group references, then `dst` will be -/// equivalent to `replacement`. -/// -/// See the [module documentation](self) for details about the format -/// supported. -/// -/// # Example -/// -/// ``` -/// use regex_automata::util::interpolate; -/// -/// let mut dst = vec![]; -/// interpolate::bytes( -/// b"foo $bar baz", -/// |index, dst| { -/// if index == 0 { -/// dst.extend_from_slice(b"BAR"); -/// } -/// }, -/// |name| { -/// if name == "bar" { -/// Some(0) -/// } else { -/// None -/// } -/// }, -/// &mut dst, -/// ); -/// assert_eq!(&b"foo BAR baz"[..], dst); -/// ``` -pub fn bytes( - mut replacement: &[u8], - mut append: impl FnMut(usize, &mut Vec), - mut name_to_index: impl FnMut(&str) -> Option, - dst: &mut Vec, -) { - while !replacement.is_empty() { - match memchr(b'$', replacement) { - None => break, - Some(i) => { - dst.extend_from_slice(&replacement[..i]); - replacement = &replacement[i..]; - } - } - // Handle escaping of '$'. - if replacement.get(1).map_or(false, |&b| b == b'$') { - dst.push(b'$'); - replacement = &replacement[2..]; - continue; - } - debug_assert!(!replacement.is_empty()); - let cap_ref = match find_cap_ref(replacement) { - Some(cap_ref) => cap_ref, - None => { - dst.push(b'$'); - replacement = &replacement[1..]; - continue; - } - }; - replacement = &replacement[cap_ref.end..]; - match cap_ref.cap { - Ref::Number(i) => append(i, dst), - Ref::Named(name) => { - if let Some(i) = name_to_index(name) { - append(i, dst); - } - } - } - } - dst.extend_from_slice(replacement); -} - -/// `CaptureRef` represents a reference to a capture group inside some text. -/// The reference is either a capture group name or a number. -/// -/// It is also tagged with the position in the text following the -/// capture reference. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -struct CaptureRef<'a> { - cap: Ref<'a>, - end: usize, -} - -/// A reference to a capture group in some text. -/// -/// e.g., `$2`, `$foo`, `${foo}`. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -enum Ref<'a> { - Named(&'a str), - Number(usize), -} - -impl<'a> From<&'a str> for Ref<'a> { - fn from(x: &'a str) -> Ref<'a> { - Ref::Named(x) - } -} - -impl From for Ref<'static> { - fn from(x: usize) -> Ref<'static> { - Ref::Number(x) - } -} - -/// Parses a possible reference to a capture group name in the given text, -/// starting at the beginning of `replacement`. -/// -/// If no such valid reference could be found, None is returned. -/// -/// Note that this returns a "possible" reference because this routine doesn't -/// know whether the reference is to a valid group or not. If it winds up not -/// being a valid reference, then it should be replaced with the empty string. -fn find_cap_ref(replacement: &[u8]) -> Option> { - let mut i = 0; - let rep: &[u8] = replacement; - if rep.len() <= 1 || rep[0] != b'$' { - return None; - } - i += 1; - if rep[i] == b'{' { - return find_cap_ref_braced(rep, i + 1); - } - let mut cap_end = i; - while rep.get(cap_end).copied().map_or(false, is_valid_cap_letter) { - cap_end += 1; - } - if cap_end == i { - return None; - } - // We just verified that the range 0..cap_end is valid ASCII, so it must - // therefore be valid UTF-8. If we really cared, we could avoid this UTF-8 - // check via an unchecked conversion or by parsing the number straight from - // &[u8]. - let cap = core::str::from_utf8(&rep[i..cap_end]) - .expect("valid UTF-8 capture name"); - Some(CaptureRef { - cap: match cap.parse::() { - Ok(i) => Ref::Number(i), - Err(_) => Ref::Named(cap), - }, - end: cap_end, - }) -} - -/// Looks for a braced reference, e.g., `${foo1}`. This assumes that an opening -/// brace has been found at `i-1` in `rep`. This then looks for a closing -/// brace and returns the capture reference within the brace. -fn find_cap_ref_braced(rep: &[u8], mut i: usize) -> Option> { - assert_eq!(b'{', rep[i.checked_sub(1).unwrap()]); - let start = i; - while rep.get(i).map_or(false, |&b| b != b'}') { - i += 1; - } - if !rep.get(i).map_or(false, |&b| b == b'}') { - return None; - } - // When looking at braced names, we don't put any restrictions on the name, - // so it's possible it could be invalid UTF-8. But a capture group name - // can never be invalid UTF-8, so if we have invalid UTF-8, then we can - // safely return None. - let cap = match core::str::from_utf8(&rep[start..i]) { - Err(_) => return None, - Ok(cap) => cap, - }; - Some(CaptureRef { - cap: match cap.parse::() { - Ok(i) => Ref::Number(i), - Err(_) => Ref::Named(cap), - }, - end: i + 1, - }) -} - -/// Returns true if and only if the given byte is allowed in a capture name -/// written in non-brace form. -fn is_valid_cap_letter(b: u8) -> bool { - matches!(b, b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' | b'_') -} - -#[cfg(test)] -mod tests { - use alloc::{string::String, vec, vec::Vec}; - - use super::{find_cap_ref, CaptureRef}; - - macro_rules! find { - ($name:ident, $text:expr) => { - #[test] - fn $name() { - assert_eq!(None, find_cap_ref($text.as_bytes())); - } - }; - ($name:ident, $text:expr, $capref:expr) => { - #[test] - fn $name() { - assert_eq!(Some($capref), find_cap_ref($text.as_bytes())); - } - }; - } - - macro_rules! c { - ($name_or_number:expr, $pos:expr) => { - CaptureRef { cap: $name_or_number.into(), end: $pos } - }; - } - - find!(find_cap_ref1, "$foo", c!("foo", 4)); - find!(find_cap_ref2, "${foo}", c!("foo", 6)); - find!(find_cap_ref3, "$0", c!(0, 2)); - find!(find_cap_ref4, "$5", c!(5, 2)); - find!(find_cap_ref5, "$10", c!(10, 3)); - // See https://github.com/rust-lang/regex/pull/585 - // for more on characters following numbers - find!(find_cap_ref6, "$42a", c!("42a", 4)); - find!(find_cap_ref7, "${42}a", c!(42, 5)); - find!(find_cap_ref8, "${42"); - find!(find_cap_ref9, "${42 "); - find!(find_cap_ref10, " $0 "); - find!(find_cap_ref11, "$"); - find!(find_cap_ref12, " "); - find!(find_cap_ref13, ""); - find!(find_cap_ref14, "$1-$2", c!(1, 2)); - find!(find_cap_ref15, "$1_$2", c!("1_", 3)); - find!(find_cap_ref16, "$x-$y", c!("x", 2)); - find!(find_cap_ref17, "$x_$y", c!("x_", 3)); - find!(find_cap_ref18, "${#}", c!("#", 4)); - find!(find_cap_ref19, "${Z[}", c!("Z[", 5)); - find!(find_cap_ref20, "${¾}", c!("¾", 5)); - find!(find_cap_ref21, "${¾a}", c!("¾a", 6)); - find!(find_cap_ref22, "${a¾}", c!("a¾", 6)); - find!(find_cap_ref23, "${☃}", c!("☃", 6)); - find!(find_cap_ref24, "${a☃}", c!("a☃", 7)); - find!(find_cap_ref25, "${☃a}", c!("☃a", 7)); - find!(find_cap_ref26, "${名字}", c!("名字", 9)); - - fn interpolate_string( - mut name_to_index: Vec<(&'static str, usize)>, - caps: Vec<&'static str>, - replacement: &str, - ) -> String { - name_to_index.sort_by_key(|x| x.0); - - let mut dst = String::new(); - super::string( - replacement, - |i, dst| { - if let Some(&s) = caps.get(i) { - dst.push_str(s); - } - }, - |name| -> Option { - name_to_index - .binary_search_by_key(&name, |x| x.0) - .ok() - .map(|i| name_to_index[i].1) - }, - &mut dst, - ); - dst - } - - fn interpolate_bytes( - mut name_to_index: Vec<(&'static str, usize)>, - caps: Vec<&'static str>, - replacement: &str, - ) -> String { - name_to_index.sort_by_key(|x| x.0); - - let mut dst = vec![]; - super::bytes( - replacement.as_bytes(), - |i, dst| { - if let Some(&s) = caps.get(i) { - dst.extend_from_slice(s.as_bytes()); - } - }, - |name| -> Option { - name_to_index - .binary_search_by_key(&name, |x| x.0) - .ok() - .map(|i| name_to_index[i].1) - }, - &mut dst, - ); - String::from_utf8(dst).unwrap() - } - - macro_rules! interp { - ($name:ident, $map:expr, $caps:expr, $hay:expr, $expected:expr $(,)*) => { - #[test] - fn $name() { - assert_eq!( - $expected, - interpolate_string($map, $caps, $hay), - "interpolate::string failed", - ); - assert_eq!( - $expected, - interpolate_bytes($map, $caps, $hay), - "interpolate::bytes failed", - ); - } - }; - } - - interp!( - interp1, - vec![("foo", 2)], - vec!["", "", "xxx"], - "test $foo test", - "test xxx test", - ); - - interp!( - interp2, - vec![("foo", 2)], - vec!["", "", "xxx"], - "test$footest", - "test", - ); - - interp!( - interp3, - vec![("foo", 2)], - vec!["", "", "xxx"], - "test${foo}test", - "testxxxtest", - ); - - interp!( - interp4, - vec![("foo", 2)], - vec!["", "", "xxx"], - "test$2test", - "test", - ); - - interp!( - interp5, - vec![("foo", 2)], - vec!["", "", "xxx"], - "test${2}test", - "testxxxtest", - ); - - interp!( - interp6, - vec![("foo", 2)], - vec!["", "", "xxx"], - "test $$foo test", - "test $foo test", - ); - - interp!( - interp7, - vec![("foo", 2)], - vec!["", "", "xxx"], - "test $foo", - "test xxx", - ); - - interp!( - interp8, - vec![("foo", 2)], - vec!["", "", "xxx"], - "$foo test", - "xxx test", - ); - - interp!( - interp9, - vec![("bar", 1), ("foo", 2)], - vec!["", "yyy", "xxx"], - "test $bar$foo", - "test yyyxxx", - ); - - interp!( - interp10, - vec![("bar", 1), ("foo", 2)], - vec!["", "yyy", "xxx"], - "test $ test", - "test $ test", - ); - - interp!( - interp11, - vec![("bar", 1), ("foo", 2)], - vec!["", "yyy", "xxx"], - "test ${} test", - "test test", - ); - - interp!( - interp12, - vec![("bar", 1), ("foo", 2)], - vec!["", "yyy", "xxx"], - "test ${ } test", - "test test", - ); - - interp!( - interp13, - vec![("bar", 1), ("foo", 2)], - vec!["", "yyy", "xxx"], - "test ${a b} test", - "test test", - ); - - interp!( - interp14, - vec![("bar", 1), ("foo", 2)], - vec!["", "yyy", "xxx"], - "test ${a} test", - "test test", - ); - - // This is a funny case where a braced reference is never closed, but - // within the unclosed braced reference, there is an unbraced reference. - // In this case, the braced reference is just treated literally and the - // unbraced reference is found. - interp!( - interp15, - vec![("bar", 1), ("foo", 2)], - vec!["", "yyy", "xxx"], - "test ${wat $bar ok", - "test ${wat yyy ok", - ); -} diff --git a/vendor/regex-automata/src/util/iter.rs b/vendor/regex-automata/src/util/iter.rs deleted file mode 100644 index dcfa4a4cc3022f..00000000000000 --- a/vendor/regex-automata/src/util/iter.rs +++ /dev/null @@ -1,1022 +0,0 @@ -/*! -Generic helpers for iteration of matches from a regex engine in a haystack. - -The principle type in this module is a [`Searcher`]. A `Searcher` provides -its own lower level iterator-like API in addition to methods for constructing -types that implement `Iterator`. The documentation for `Searcher` explains a -bit more about why these different APIs exist. - -Currently, this module supports iteration over any regex engine that works -with the [`HalfMatch`], [`Match`] or [`Captures`] types. -*/ - -#[cfg(feature = "alloc")] -use crate::util::captures::Captures; -use crate::util::search::{HalfMatch, Input, Match, MatchError}; - -/// A searcher for creating iterators and performing lower level iteration. -/// -/// This searcher encapsulates the logic required for finding all successive -/// non-overlapping matches in a haystack. In theory, iteration would look -/// something like this: -/// -/// 1. Setting the start position to `0`. -/// 2. Execute a regex search. If no match, end iteration. -/// 3. Report the match and set the start position to the end of the match. -/// 4. Go back to (2). -/// -/// And if this were indeed the case, it's likely that `Searcher` wouldn't -/// exist. Unfortunately, because a regex may match the empty string, the above -/// logic won't work for all possible regexes. Namely, if an empty match is -/// found, then step (3) would set the start position of the search to the -/// position it was at. Thus, iteration would never end. -/// -/// Instead, a `Searcher` knows how to detect these cases and forcefully -/// advance iteration in the case of an empty match that overlaps with a -/// previous match. -/// -/// If you know that your regex cannot match any empty string, then the simple -/// algorithm described above will work correctly. -/// -/// When possible, prefer the iterators defined on the regex engine you're -/// using. This tries to abstract over the regex engine and is thus a bit more -/// unwieldy to use. -/// -/// In particular, a `Searcher` is not itself an iterator. Instead, it provides -/// `advance` routines that permit moving the search along explicitly. It also -/// provides various routines, like [`Searcher::into_matches_iter`], that -/// accept a closure (representing how a regex engine executes a search) and -/// returns a conventional iterator. -/// -/// The lifetime parameters come from the [`Input`] type passed to -/// [`Searcher::new`]: -/// -/// * `'h` is the lifetime of the underlying haystack. -/// -/// # Searcher vs Iterator -/// -/// Why does a search type with "advance" APIs exist at all when we also have -/// iterators? Unfortunately, the reasoning behind this split is a complex -/// combination of the following things: -/// -/// 1. While many of the regex engines expose their own iterators, it is also -/// nice to expose this lower level iteration helper because it permits callers -/// to provide their own `Input` configuration. Moreover, a `Searcher` can work -/// with _any_ regex engine instead of only the ones defined in this crate. -/// This way, everyone benefits from a shared iteration implementation. -/// 2. There are many different regex engines that, while they have the same -/// match semantics, they have slightly different APIs. Iteration is just -/// complex enough to want to share code, and so we need a way of abstracting -/// over those different regex engines. While we could define a new trait that -/// describes any regex engine search API, it would wind up looking very close -/// to a closure. While there may still be reasons for the more generic trait -/// to exist, for now and for the purposes of iteration, we use a closure. -/// Closures also provide a lot of easy flexibility at the call site, in that -/// they permit the caller to borrow any kind of state they want for use during -/// each search call. -/// 3. As a result of using closures, and because closures are anonymous types -/// that cannot be named, it is difficult to encapsulate them without both -/// costs to speed and added complexity to the public API. For example, in -/// defining an iterator type like -/// [`dfa::regex::FindMatches`](crate::dfa::regex::FindMatches), -/// if we use a closure internally, it's not possible to name this type in the -/// return type of the iterator constructor. Thus, the only way around it is -/// to erase the type by boxing it and turning it into a `Box`. -/// This boxed closure is unlikely to be inlined _and_ it infects the public -/// API in subtle ways. Namely, unless you declare the closure as implementing -/// `Send` and `Sync`, then the resulting iterator type won't implement it -/// either. But there are practical issues with requiring the closure to -/// implement `Send` and `Sync` that result in other API complexities that -/// are beyond the scope of this already long exposition. -/// 4. Some regex engines expose more complex match information than just -/// "which pattern matched" and "at what offsets." For example, the PikeVM -/// exposes match spans for each capturing group that participated in the -/// match. In such cases, it can be quite beneficial to reuse the capturing -/// group allocation on subsequent searches. A proper iterator doesn't permit -/// this API due to its interface, so it's useful to have something a bit lower -/// level that permits callers to amortize allocations while also reusing a -/// shared implementation of iteration. (See the documentation for -/// [`Searcher::advance`] for an example of using the "advance" API with the -/// PikeVM.) -/// -/// What this boils down to is that there are "advance" APIs which require -/// handing a closure to it for every call, and there are also APIs to create -/// iterators from a closure. The former are useful for _implementing_ -/// iterators or when you need more flexibility, while the latter are useful -/// for conveniently writing custom iterators on-the-fly. -/// -/// # Example: iterating with captures -/// -/// Several regex engines in this crate over convenient iterator APIs over -/// [`Captures`] values. To do so, this requires allocating a new `Captures` -/// value for each iteration step. This can perhaps be more costly than you -/// might want. Instead of implementing your own iterator to avoid that -/// cost (which can be a little subtle if you want to handle empty matches -/// correctly), you can use this `Searcher` to do it for you: -/// -/// ``` -/// use regex_automata::{ -/// nfa::thompson::pikevm::PikeVM, -/// util::iter::Searcher, -/// Input, Span, -/// }; -/// -/// let re = PikeVM::new("foo(?P[0-9]+)")?; -/// let haystack = "foo1 foo12 foo123"; -/// -/// let mut caps = re.create_captures(); -/// let mut cache = re.create_cache(); -/// let mut matches = vec![]; -/// let mut searcher = Searcher::new(Input::new(haystack)); -/// while let Some(_) = searcher.advance(|input| { -/// re.search(&mut cache, input, &mut caps); -/// Ok(caps.get_match()) -/// }) { -/// // The unwrap is OK since 'numbers' matches if the pattern matches. -/// matches.push(caps.get_group_by_name("numbers").unwrap()); -/// } -/// assert_eq!(matches, vec![ -/// Span::from(3..4), -/// Span::from(8..10), -/// Span::from(14..17), -/// ]); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct Searcher<'h> { - /// The input parameters to give to each regex engine call. - /// - /// The start position of the search is mutated during iteration. - input: Input<'h>, - /// Records the end offset of the most recent match. This is necessary to - /// handle a corner case for preventing empty matches from overlapping with - /// the ending bounds of a prior match. - last_match_end: Option, -} - -impl<'h> Searcher<'h> { - /// Create a new fallible non-overlapping matches iterator. - /// - /// The given `input` provides the parameters (including the haystack), - /// while the `finder` represents a closure that calls the underlying regex - /// engine. The closure may borrow any additional state that is needed, - /// such as a prefilter scanner. - pub fn new(input: Input<'h>) -> Searcher<'h> { - Searcher { input, last_match_end: None } - } - - /// Returns the current `Input` used by this searcher. - /// - /// The `Input` returned is generally equivalent to the one given to - /// [`Searcher::new`], but its start position may be different to reflect - /// the start of the next search to be executed. - pub fn input<'s>(&'s self) -> &'s Input<'h> { - &self.input - } - - /// Return the next half match for an infallible search if one exists, and - /// advance to the next position. - /// - /// This is like `try_advance_half`, except errors are converted into - /// panics. - /// - /// # Panics - /// - /// If the given closure returns an error, then this panics. This is useful - /// when you know your underlying regex engine has been configured to not - /// return an error. - /// - /// # Example - /// - /// This example shows how to use a `Searcher` to iterate over all matches - /// when using a DFA, which only provides "half" matches. - /// - /// ``` - /// use regex_automata::{ - /// hybrid::dfa::DFA, - /// util::iter::Searcher, - /// HalfMatch, Input, - /// }; - /// - /// let re = DFA::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; - /// let mut cache = re.create_cache(); - /// - /// let input = Input::new("2010-03-14 2016-10-08 2020-10-22"); - /// let mut it = Searcher::new(input); - /// - /// let expected = Some(HalfMatch::must(0, 10)); - /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); - /// assert_eq!(expected, got); - /// - /// let expected = Some(HalfMatch::must(0, 21)); - /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); - /// assert_eq!(expected, got); - /// - /// let expected = Some(HalfMatch::must(0, 32)); - /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); - /// assert_eq!(expected, got); - /// - /// let expected = None; - /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// This correctly moves iteration forward even when an empty match occurs: - /// - /// ``` - /// use regex_automata::{ - /// hybrid::dfa::DFA, - /// util::iter::Searcher, - /// HalfMatch, Input, - /// }; - /// - /// let re = DFA::new(r"a|")?; - /// let mut cache = re.create_cache(); - /// - /// let input = Input::new("abba"); - /// let mut it = Searcher::new(input); - /// - /// let expected = Some(HalfMatch::must(0, 1)); - /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); - /// assert_eq!(expected, got); - /// - /// let expected = Some(HalfMatch::must(0, 2)); - /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); - /// assert_eq!(expected, got); - /// - /// let expected = Some(HalfMatch::must(0, 4)); - /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); - /// assert_eq!(expected, got); - /// - /// let expected = None; - /// let got = it.advance_half(|input| re.try_search_fwd(&mut cache, input)); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn advance_half(&mut self, finder: F) -> Option - where - F: FnMut(&Input<'_>) -> Result, MatchError>, - { - match self.try_advance_half(finder) { - Ok(m) => m, - Err(err) => panic!( - "unexpected regex half find error: {err}\n\ - to handle find errors, use 'try' or 'search' methods", - ), - } - } - - /// Return the next match for an infallible search if one exists, and - /// advance to the next position. - /// - /// The search is advanced even in the presence of empty matches by - /// forbidding empty matches from overlapping with any other match. - /// - /// This is like `try_advance`, except errors are converted into panics. - /// - /// # Panics - /// - /// If the given closure returns an error, then this panics. This is useful - /// when you know your underlying regex engine has been configured to not - /// return an error. - /// - /// # Example - /// - /// This example shows how to use a `Searcher` to iterate over all matches - /// when using a regex based on lazy DFAs: - /// - /// ``` - /// use regex_automata::{ - /// hybrid::regex::Regex, - /// util::iter::Searcher, - /// Match, Input, - /// }; - /// - /// let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; - /// let mut cache = re.create_cache(); - /// - /// let input = Input::new("2010-03-14 2016-10-08 2020-10-22"); - /// let mut it = Searcher::new(input); - /// - /// let expected = Some(Match::must(0, 0..10)); - /// let got = it.advance(|input| re.try_search(&mut cache, input)); - /// assert_eq!(expected, got); - /// - /// let expected = Some(Match::must(0, 11..21)); - /// let got = it.advance(|input| re.try_search(&mut cache, input)); - /// assert_eq!(expected, got); - /// - /// let expected = Some(Match::must(0, 22..32)); - /// let got = it.advance(|input| re.try_search(&mut cache, input)); - /// assert_eq!(expected, got); - /// - /// let expected = None; - /// let got = it.advance(|input| re.try_search(&mut cache, input)); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// This example shows the same as above, but with the PikeVM. This example - /// is useful because it shows how to use this API even when the regex - /// engine doesn't directly return a `Match`. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::pikevm::PikeVM, - /// util::iter::Searcher, - /// Match, Input, - /// }; - /// - /// let re = PikeVM::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// - /// let input = Input::new("2010-03-14 2016-10-08 2020-10-22"); - /// let mut it = Searcher::new(input); - /// - /// let expected = Some(Match::must(0, 0..10)); - /// let got = it.advance(|input| { - /// re.search(&mut cache, input, &mut caps); - /// Ok(caps.get_match()) - /// }); - /// // Note that if we wanted to extract capturing group spans, we could - /// // do that here with 'caps'. - /// assert_eq!(expected, got); - /// - /// let expected = Some(Match::must(0, 11..21)); - /// let got = it.advance(|input| { - /// re.search(&mut cache, input, &mut caps); - /// Ok(caps.get_match()) - /// }); - /// assert_eq!(expected, got); - /// - /// let expected = Some(Match::must(0, 22..32)); - /// let got = it.advance(|input| { - /// re.search(&mut cache, input, &mut caps); - /// Ok(caps.get_match()) - /// }); - /// assert_eq!(expected, got); - /// - /// let expected = None; - /// let got = it.advance(|input| { - /// re.search(&mut cache, input, &mut caps); - /// Ok(caps.get_match()) - /// }); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn advance(&mut self, finder: F) -> Option - where - F: FnMut(&Input<'_>) -> Result, MatchError>, - { - match self.try_advance(finder) { - Ok(m) => m, - Err(err) => panic!( - "unexpected regex find error: {err}\n\ - to handle find errors, use 'try' or 'search' methods", - ), - } - } - - /// Return the next half match for a fallible search if one exists, and - /// advance to the next position. - /// - /// This is like `advance_half`, except it permits callers to handle errors - /// during iteration. - #[inline] - pub fn try_advance_half( - &mut self, - mut finder: F, - ) -> Result, MatchError> - where - F: FnMut(&Input<'_>) -> Result, MatchError>, - { - let mut m = match finder(&self.input)? { - None => return Ok(None), - Some(m) => m, - }; - if Some(m.offset()) == self.last_match_end { - m = match self.handle_overlapping_empty_half_match(m, finder)? { - None => return Ok(None), - Some(m) => m, - }; - } - self.input.set_start(m.offset()); - self.last_match_end = Some(m.offset()); - Ok(Some(m)) - } - - /// Return the next match for a fallible search if one exists, and advance - /// to the next position. - /// - /// This is like `advance`, except it permits callers to handle errors - /// during iteration. - #[inline] - pub fn try_advance( - &mut self, - mut finder: F, - ) -> Result, MatchError> - where - F: FnMut(&Input<'_>) -> Result, MatchError>, - { - let mut m = match finder(&self.input)? { - None => return Ok(None), - Some(m) => m, - }; - if m.is_empty() && Some(m.end()) == self.last_match_end { - m = match self.handle_overlapping_empty_match(m, finder)? { - None => return Ok(None), - Some(m) => m, - }; - } - self.input.set_start(m.end()); - self.last_match_end = Some(m.end()); - Ok(Some(m)) - } - - /// Given a closure that executes a single search, return an iterator over - /// all successive non-overlapping half matches. - /// - /// The iterator returned yields result values. If the underlying regex - /// engine is configured to never return an error, consider calling - /// [`TryHalfMatchesIter::infallible`] to convert errors into panics. - /// - /// # Example - /// - /// This example shows how to use a `Searcher` to create a proper - /// iterator over half matches. - /// - /// ``` - /// use regex_automata::{ - /// hybrid::dfa::DFA, - /// util::iter::Searcher, - /// HalfMatch, Input, - /// }; - /// - /// let re = DFA::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; - /// let mut cache = re.create_cache(); - /// - /// let input = Input::new("2010-03-14 2016-10-08 2020-10-22"); - /// let mut it = Searcher::new(input).into_half_matches_iter(|input| { - /// re.try_search_fwd(&mut cache, input) - /// }); - /// - /// let expected = Some(Ok(HalfMatch::must(0, 10))); - /// assert_eq!(expected, it.next()); - /// - /// let expected = Some(Ok(HalfMatch::must(0, 21))); - /// assert_eq!(expected, it.next()); - /// - /// let expected = Some(Ok(HalfMatch::must(0, 32))); - /// assert_eq!(expected, it.next()); - /// - /// let expected = None; - /// assert_eq!(expected, it.next()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn into_half_matches_iter( - self, - finder: F, - ) -> TryHalfMatchesIter<'h, F> - where - F: FnMut(&Input<'_>) -> Result, MatchError>, - { - TryHalfMatchesIter { it: self, finder } - } - - /// Given a closure that executes a single search, return an iterator over - /// all successive non-overlapping matches. - /// - /// The iterator returned yields result values. If the underlying regex - /// engine is configured to never return an error, consider calling - /// [`TryMatchesIter::infallible`] to convert errors into panics. - /// - /// # Example - /// - /// This example shows how to use a `Searcher` to create a proper - /// iterator over matches. - /// - /// ``` - /// use regex_automata::{ - /// hybrid::regex::Regex, - /// util::iter::Searcher, - /// Match, Input, - /// }; - /// - /// let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")?; - /// let mut cache = re.create_cache(); - /// - /// let input = Input::new("2010-03-14 2016-10-08 2020-10-22"); - /// let mut it = Searcher::new(input).into_matches_iter(|input| { - /// re.try_search(&mut cache, input) - /// }); - /// - /// let expected = Some(Ok(Match::must(0, 0..10))); - /// assert_eq!(expected, it.next()); - /// - /// let expected = Some(Ok(Match::must(0, 11..21))); - /// assert_eq!(expected, it.next()); - /// - /// let expected = Some(Ok(Match::must(0, 22..32))); - /// assert_eq!(expected, it.next()); - /// - /// let expected = None; - /// assert_eq!(expected, it.next()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn into_matches_iter(self, finder: F) -> TryMatchesIter<'h, F> - where - F: FnMut(&Input<'_>) -> Result, MatchError>, - { - TryMatchesIter { it: self, finder } - } - - /// Given a closure that executes a single search, return an iterator over - /// all successive non-overlapping `Captures` values. - /// - /// The iterator returned yields result values. If the underlying regex - /// engine is configured to never return an error, consider calling - /// [`TryCapturesIter::infallible`] to convert errors into panics. - /// - /// Unlike the other iterator constructors, this accepts an initial - /// `Captures` value. This `Captures` value is reused for each search, and - /// the iterator implementation clones it before returning it. The caller - /// must provide this value because the iterator is purposely ignorant - /// of the underlying regex engine and thus doesn't know how to create - /// one itself. More to the point, a `Captures` value itself has a few - /// different constructors, which change which kind of information is - /// available to query in exchange for search performance. - /// - /// # Example - /// - /// This example shows how to use a `Searcher` to create a proper iterator - /// over `Captures` values, which provides access to all capturing group - /// spans for each match. - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::pikevm::PikeVM, - /// util::iter::Searcher, - /// Input, - /// }; - /// - /// let re = PikeVM::new( - /// r"(?P[0-9]{4})-(?P[0-9]{2})-(?P[0-9]{2})", - /// )?; - /// let (mut cache, caps) = (re.create_cache(), re.create_captures()); - /// - /// let haystack = "2010-03-14 2016-10-08 2020-10-22"; - /// let input = Input::new(haystack); - /// let mut it = Searcher::new(input) - /// .into_captures_iter(caps, |input, caps| { - /// re.search(&mut cache, input, caps); - /// Ok(()) - /// }); - /// - /// let got = it.next().expect("first date")?; - /// let year = got.get_group_by_name("y").expect("must match"); - /// assert_eq!("2010", &haystack[year]); - /// - /// let got = it.next().expect("second date")?; - /// let month = got.get_group_by_name("m").expect("must match"); - /// assert_eq!("10", &haystack[month]); - /// - /// let got = it.next().expect("third date")?; - /// let day = got.get_group_by_name("d").expect("must match"); - /// assert_eq!("22", &haystack[day]); - /// - /// assert!(it.next().is_none()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "alloc")] - #[inline] - pub fn into_captures_iter( - self, - caps: Captures, - finder: F, - ) -> TryCapturesIter<'h, F> - where - F: FnMut(&Input<'_>, &mut Captures) -> Result<(), MatchError>, - { - TryCapturesIter { it: self, caps, finder } - } - - /// Handles the special case of a match that begins where the previous - /// match ended. Without this special handling, it'd be possible to get - /// stuck where an empty match never results in forward progress. This - /// also makes it more consistent with how presiding general purpose regex - /// engines work. - #[cold] - #[inline(never)] - fn handle_overlapping_empty_half_match( - &mut self, - _: HalfMatch, - mut finder: F, - ) -> Result, MatchError> - where - F: FnMut(&Input<'_>) -> Result, MatchError>, - { - // Since we are only here when 'm.offset()' matches the offset of the - // last match, it follows that this must have been an empty match. - // Since we both need to make progress *and* prevent overlapping - // matches, we discard this match and advance the search by 1. - // - // Note that this may start a search in the middle of a codepoint. The - // regex engines themselves are expected to deal with that and not - // report any matches within a codepoint if they are configured in - // UTF-8 mode. - self.input.set_start(self.input.start().checked_add(1).unwrap()); - finder(&self.input) - } - - /// Handles the special case of an empty match by ensuring that 1) the - /// iterator always advances and 2) empty matches never overlap with other - /// matches. - /// - /// (1) is necessary because we principally make progress by setting the - /// starting location of the next search to the ending location of the last - /// match. But if a match is empty, then this results in a search that does - /// not advance and thus does not terminate. - /// - /// (2) is not strictly necessary, but makes intuitive sense and matches - /// the presiding behavior of most general purpose regex engines. The - /// "intuitive sense" here is that we want to report NON-overlapping - /// matches. So for example, given the regex 'a|(?:)' against the haystack - /// 'a', without the special handling, you'd get the matches [0, 1) and [1, - /// 1), where the latter overlaps with the end bounds of the former. - /// - /// Note that we mark this cold and forcefully prevent inlining because - /// handling empty matches like this is extremely rare and does require - /// quite a bit of code, comparatively. Keeping this code out of the main - /// iterator function keeps it smaller and more amenable to inlining - /// itself. - #[cold] - #[inline(never)] - fn handle_overlapping_empty_match( - &mut self, - m: Match, - mut finder: F, - ) -> Result, MatchError> - where - F: FnMut(&Input<'_>) -> Result, MatchError>, - { - assert!(m.is_empty()); - self.input.set_start(self.input.start().checked_add(1).unwrap()); - finder(&self.input) - } -} - -/// An iterator over all non-overlapping half matches for a fallible search. -/// -/// The iterator yields a `Result` value until no more -/// matches could be found. -/// -/// The type parameters are as follows: -/// -/// * `F` represents the type of a closure that executes the search. -/// -/// The lifetime parameters come from the [`Input`] type: -/// -/// * `'h` is the lifetime of the underlying haystack. -/// -/// When possible, prefer the iterators defined on the regex engine you're -/// using. This tries to abstract over the regex engine and is thus a bit more -/// unwieldy to use. -/// -/// This iterator is created by [`Searcher::into_half_matches_iter`]. -pub struct TryHalfMatchesIter<'h, F> { - it: Searcher<'h>, - finder: F, -} - -impl<'h, F> TryHalfMatchesIter<'h, F> { - /// Return an infallible version of this iterator. - /// - /// Any item yielded that corresponds to an error results in a panic. This - /// is useful if your underlying regex engine is configured in a way that - /// it is guaranteed to never return an error. - pub fn infallible(self) -> HalfMatchesIter<'h, F> { - HalfMatchesIter(self) - } - - /// Returns the current `Input` used by this iterator. - /// - /// The `Input` returned is generally equivalent to the one used to - /// construct this iterator, but its start position may be different to - /// reflect the start of the next search to be executed. - pub fn input<'i>(&'i self) -> &'i Input<'h> { - self.it.input() - } -} - -impl<'h, F> Iterator for TryHalfMatchesIter<'h, F> -where - F: FnMut(&Input<'_>) -> Result, MatchError>, -{ - type Item = Result; - - #[inline] - fn next(&mut self) -> Option> { - self.it.try_advance_half(&mut self.finder).transpose() - } -} - -impl<'h, F> core::fmt::Debug for TryHalfMatchesIter<'h, F> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("TryHalfMatchesIter") - .field("it", &self.it) - .field("finder", &"") - .finish() - } -} - -/// An iterator over all non-overlapping half matches for an infallible search. -/// -/// The iterator yields a [`HalfMatch`] value until no more matches could be -/// found. -/// -/// The type parameters are as follows: -/// -/// * `F` represents the type of a closure that executes the search. -/// -/// The lifetime parameters come from the [`Input`] type: -/// -/// * `'h` is the lifetime of the underlying haystack. -/// -/// When possible, prefer the iterators defined on the regex engine you're -/// using. This tries to abstract over the regex engine and is thus a bit more -/// unwieldy to use. -/// -/// This iterator is created by [`Searcher::into_half_matches_iter`] and -/// then calling [`TryHalfMatchesIter::infallible`]. -#[derive(Debug)] -pub struct HalfMatchesIter<'h, F>(TryHalfMatchesIter<'h, F>); - -impl<'h, F> HalfMatchesIter<'h, F> { - /// Returns the current `Input` used by this iterator. - /// - /// The `Input` returned is generally equivalent to the one used to - /// construct this iterator, but its start position may be different to - /// reflect the start of the next search to be executed. - pub fn input<'i>(&'i self) -> &'i Input<'h> { - self.0.it.input() - } -} - -impl<'h, F> Iterator for HalfMatchesIter<'h, F> -where - F: FnMut(&Input<'_>) -> Result, MatchError>, -{ - type Item = HalfMatch; - - #[inline] - fn next(&mut self) -> Option { - match self.0.next()? { - Ok(m) => Some(m), - Err(err) => panic!( - "unexpected regex half find error: {err}\n\ - to handle find errors, use 'try' or 'search' methods", - ), - } - } -} - -/// An iterator over all non-overlapping matches for a fallible search. -/// -/// The iterator yields a `Result` value until no more -/// matches could be found. -/// -/// The type parameters are as follows: -/// -/// * `F` represents the type of a closure that executes the search. -/// -/// The lifetime parameters come from the [`Input`] type: -/// -/// * `'h` is the lifetime of the underlying haystack. -/// -/// When possible, prefer the iterators defined on the regex engine you're -/// using. This tries to abstract over the regex engine and is thus a bit more -/// unwieldy to use. -/// -/// This iterator is created by [`Searcher::into_matches_iter`]. -pub struct TryMatchesIter<'h, F> { - it: Searcher<'h>, - finder: F, -} - -impl<'h, F> TryMatchesIter<'h, F> { - /// Return an infallible version of this iterator. - /// - /// Any item yielded that corresponds to an error results in a panic. This - /// is useful if your underlying regex engine is configured in a way that - /// it is guaranteed to never return an error. - pub fn infallible(self) -> MatchesIter<'h, F> { - MatchesIter(self) - } - - /// Returns the current `Input` used by this iterator. - /// - /// The `Input` returned is generally equivalent to the one used to - /// construct this iterator, but its start position may be different to - /// reflect the start of the next search to be executed. - pub fn input<'i>(&'i self) -> &'i Input<'h> { - self.it.input() - } -} - -impl<'h, F> Iterator for TryMatchesIter<'h, F> -where - F: FnMut(&Input<'_>) -> Result, MatchError>, -{ - type Item = Result; - - #[inline] - fn next(&mut self) -> Option> { - self.it.try_advance(&mut self.finder).transpose() - } -} - -impl<'h, F> core::fmt::Debug for TryMatchesIter<'h, F> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("TryMatchesIter") - .field("it", &self.it) - .field("finder", &"") - .finish() - } -} - -/// An iterator over all non-overlapping matches for an infallible search. -/// -/// The iterator yields a [`Match`] value until no more matches could be found. -/// -/// The type parameters are as follows: -/// -/// * `F` represents the type of a closure that executes the search. -/// -/// The lifetime parameters come from the [`Input`] type: -/// -/// * `'h` is the lifetime of the underlying haystack. -/// -/// When possible, prefer the iterators defined on the regex engine you're -/// using. This tries to abstract over the regex engine and is thus a bit more -/// unwieldy to use. -/// -/// This iterator is created by [`Searcher::into_matches_iter`] and -/// then calling [`TryMatchesIter::infallible`]. -#[derive(Debug)] -pub struct MatchesIter<'h, F>(TryMatchesIter<'h, F>); - -impl<'h, F> MatchesIter<'h, F> { - /// Returns the current `Input` used by this iterator. - /// - /// The `Input` returned is generally equivalent to the one used to - /// construct this iterator, but its start position may be different to - /// reflect the start of the next search to be executed. - pub fn input<'i>(&'i self) -> &'i Input<'h> { - self.0.it.input() - } -} - -impl<'h, F> Iterator for MatchesIter<'h, F> -where - F: FnMut(&Input<'_>) -> Result, MatchError>, -{ - type Item = Match; - - #[inline] - fn next(&mut self) -> Option { - match self.0.next()? { - Ok(m) => Some(m), - Err(err) => panic!( - "unexpected regex find error: {err}\n\ - to handle find errors, use 'try' or 'search' methods", - ), - } - } -} - -/// An iterator over all non-overlapping captures for a fallible search. -/// -/// The iterator yields a `Result` value until no more -/// matches could be found. -/// -/// The type parameters are as follows: -/// -/// * `F` represents the type of a closure that executes the search. -/// -/// The lifetime parameters come from the [`Input`] type: -/// -/// * `'h` is the lifetime of the underlying haystack. -/// -/// When possible, prefer the iterators defined on the regex engine you're -/// using. This tries to abstract over the regex engine and is thus a bit more -/// unwieldy to use. -/// -/// This iterator is created by [`Searcher::into_captures_iter`]. -#[cfg(feature = "alloc")] -pub struct TryCapturesIter<'h, F> { - it: Searcher<'h>, - caps: Captures, - finder: F, -} - -#[cfg(feature = "alloc")] -impl<'h, F> TryCapturesIter<'h, F> { - /// Return an infallible version of this iterator. - /// - /// Any item yielded that corresponds to an error results in a panic. This - /// is useful if your underlying regex engine is configured in a way that - /// it is guaranteed to never return an error. - pub fn infallible(self) -> CapturesIter<'h, F> { - CapturesIter(self) - } -} - -#[cfg(feature = "alloc")] -impl<'h, F> Iterator for TryCapturesIter<'h, F> -where - F: FnMut(&Input<'_>, &mut Captures) -> Result<(), MatchError>, -{ - type Item = Result; - - #[inline] - fn next(&mut self) -> Option> { - let TryCapturesIter { ref mut it, ref mut caps, ref mut finder } = - *self; - let result = it - .try_advance(|input| { - (finder)(input, caps)?; - Ok(caps.get_match()) - }) - .transpose()?; - match result { - Ok(_) => Some(Ok(caps.clone())), - Err(err) => Some(Err(err)), - } - } -} - -#[cfg(feature = "alloc")] -impl<'h, F> core::fmt::Debug for TryCapturesIter<'h, F> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("TryCapturesIter") - .field("it", &self.it) - .field("caps", &self.caps) - .field("finder", &"") - .finish() - } -} - -/// An iterator over all non-overlapping captures for an infallible search. -/// -/// The iterator yields a [`Captures`] value until no more matches could be -/// found. -/// -/// The type parameters are as follows: -/// -/// * `F` represents the type of a closure that executes the search. -/// -/// The lifetime parameters come from the [`Input`] type: -/// -/// * `'h` is the lifetime of the underlying haystack. -/// -/// When possible, prefer the iterators defined on the regex engine you're -/// using. This tries to abstract over the regex engine and is thus a bit more -/// unwieldy to use. -/// -/// This iterator is created by [`Searcher::into_captures_iter`] and then -/// calling [`TryCapturesIter::infallible`]. -#[cfg(feature = "alloc")] -#[derive(Debug)] -pub struct CapturesIter<'h, F>(TryCapturesIter<'h, F>); - -#[cfg(feature = "alloc")] -impl<'h, F> Iterator for CapturesIter<'h, F> -where - F: FnMut(&Input<'_>, &mut Captures) -> Result<(), MatchError>, -{ - type Item = Captures; - - #[inline] - fn next(&mut self) -> Option { - match self.0.next()? { - Ok(m) => Some(m), - Err(err) => panic!( - "unexpected regex captures error: {err}\n\ - to handle find errors, use 'try' or 'search' methods", - ), - } - } -} diff --git a/vendor/regex-automata/src/util/lazy.rs b/vendor/regex-automata/src/util/lazy.rs deleted file mode 100644 index c5903381ed59da..00000000000000 --- a/vendor/regex-automata/src/util/lazy.rs +++ /dev/null @@ -1,461 +0,0 @@ -/*! -A lazily initialized value for safe sharing between threads. - -The principal type in this module is `Lazy`, which makes it easy to construct -values that are shared safely across multiple threads simultaneously. -*/ - -use core::fmt; - -/// A lazily initialized value that implements `Deref` for `T`. -/// -/// A `Lazy` takes an initialization function and permits callers from any -/// thread to access the result of that initialization function in a safe -/// manner. In effect, this permits one-time initialization of global resources -/// in a (possibly) multi-threaded program. -/// -/// This type and its functionality are available even when neither the `alloc` -/// nor the `std` features are enabled. In exchange, a `Lazy` does **not** -/// guarantee that the given `create` function is called at most once. It -/// might be called multiple times. Moreover, a call to `Lazy::get` (either -/// explicitly or implicitly via `Lazy`'s `Deref` impl) may block until a `T` -/// is available. -/// -/// This is very similar to `lazy_static` or `once_cell`, except it doesn't -/// guarantee that the initialization function will be run once and it works -/// in no-alloc no-std environments. With that said, if you need stronger -/// guarantees or a more flexible API, then it is recommended to use either -/// `lazy_static` or `once_cell`. -/// -/// # Warning: may use a spin lock -/// -/// When this crate is compiled _without_ the `alloc` feature, then this type -/// may used a spin lock internally. This can have subtle effects that may -/// be undesirable. See [Spinlocks Considered Harmful][spinharm] for a more -/// thorough treatment of this topic. -/// -/// [spinharm]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html -/// -/// # Example -/// -/// This type is useful for creating regexes once, and then using them from -/// multiple threads simultaneously without worrying about synchronization. -/// -/// ``` -/// use regex_automata::{dfa::regex::Regex, util::lazy::Lazy, Match}; -/// -/// static RE: Lazy = Lazy::new(|| Regex::new("foo[0-9]+bar").unwrap()); -/// -/// let expected = Some(Match::must(0, 3..14)); -/// assert_eq!(expected, RE.find(b"zzzfoo12345barzzz")); -/// ``` -pub struct Lazy T>(lazy::Lazy); - -impl Lazy { - /// Create a new `Lazy` value that is initialized via the given function. - /// - /// The `T` type is automatically inferred from the return type of the - /// `create` function given. - pub const fn new(create: F) -> Lazy { - Lazy(lazy::Lazy::new(create)) - } -} - -impl T> Lazy { - /// Return a reference to the lazily initialized value. - /// - /// This routine may block if another thread is initializing a `T`. - /// - /// Note that given a `x` which has type `Lazy`, this must be called via - /// `Lazy::get(x)` and not `x.get()`. This routine is defined this way - /// because `Lazy` impls `Deref` with a target of `T`. - /// - /// # Panics - /// - /// This panics if the `create` function inside this lazy value panics. - /// If the panic occurred in another thread, then this routine _may_ also - /// panic (but is not guaranteed to do so). - pub fn get(this: &Lazy) -> &T { - this.0.get() - } -} - -impl T> core::ops::Deref for Lazy { - type Target = T; - - fn deref(&self) -> &T { - Lazy::get(self) - } -} - -impl T> fmt::Debug for Lazy { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.0.fmt(f) - } -} - -#[cfg(feature = "alloc")] -mod lazy { - use core::{ - fmt, - marker::PhantomData, - sync::atomic::{AtomicPtr, Ordering}, - }; - - use alloc::boxed::Box; - - /// A non-std lazy initialized value. - /// - /// This might run the initialization function more than once, but will - /// never block. - /// - /// I wish I could get these semantics into the non-alloc non-std Lazy - /// type below, but I'm not sure how to do it. If you can do an alloc, - /// then the implementation becomes very simple if you don't care about - /// redundant work precisely because a pointer can be atomically swapped. - /// - /// Perhaps making this approach work in the non-alloc non-std case - /// requires asking the caller for a pointer? It would make the API less - /// convenient I think. - pub(super) struct Lazy { - data: AtomicPtr, - create: F, - // This indicates to the compiler that this type can drop T. It's not - // totally clear how the absence of this marker could lead to trouble, - // but putting here doesn't have any downsides so we hedge until someone - // can from the Unsafe Working Group can tell us definitively that we - // don't need it. - // - // See: https://github.com/BurntSushi/regex-automata/issues/30 - owned: PhantomData>, - } - - // SAFETY: So long as T and &T (and F and &F) can themselves be safely - // shared among threads, so to can a Lazy. Namely, the Lazy API only - // permits accessing a &T and initialization is free of data races. So if T - // is thread safe, then so to is Lazy. - // - // We specifically require that T: Send in order for Lazy to be Sync. - // Without that requirement, it's possible to send a T from one thread to - // another via Lazy's destructor. - // - // It's not clear whether we need F: Send+Sync for Lazy to be Sync. But - // we're conservative for now and keep both. - unsafe impl Sync for Lazy {} - - impl Lazy { - /// Create a new alloc but non-std lazy value that is racily - /// initialized. That is, the 'create' function may be called more than - /// once. - pub(super) const fn new(create: F) -> Lazy { - Lazy { - data: AtomicPtr::new(core::ptr::null_mut()), - create, - owned: PhantomData, - } - } - } - - impl T> Lazy { - /// Get the underlying lazy value. If it hasn't been initialized - /// yet, then always attempt to initialize it (even if some other - /// thread is initializing it) and atomically attach it to this lazy - /// value before returning it. - pub(super) fn get(&self) -> &T { - if let Some(data) = self.poll() { - return data; - } - let data = (self.create)(); - let mut ptr = Box::into_raw(Box::new(data)); - // We attempt to stuff our initialized value into our atomic - // pointer. Upon success, we don't need to do anything. But if - // someone else beat us to the punch, then we need to make sure - // our newly created value is dropped. - let result = self.data.compare_exchange( - core::ptr::null_mut(), - ptr, - Ordering::AcqRel, - Ordering::Acquire, - ); - if let Err(old) = result { - // SAFETY: We created 'ptr' via Box::into_raw above, so turning - // it back into a Box via from_raw is safe. - drop(unsafe { Box::from_raw(ptr) }); - ptr = old; - } - // SAFETY: We just set the pointer above to a non-null value, even - // in the error case, and set it to a fully initialized value - // returned by 'create'. - unsafe { &*ptr } - } - - /// If this lazy value has been initialized successfully, then return - /// that value. Otherwise return None immediately. This never attempts - /// to run initialization itself. - fn poll(&self) -> Option<&T> { - let ptr = self.data.load(Ordering::Acquire); - if ptr.is_null() { - return None; - } - // SAFETY: We just checked that the pointer is not null. Since it's - // not null, it must have been fully initialized by 'get' at some - // point. - Some(unsafe { &*ptr }) - } - } - - impl T> fmt::Debug for Lazy { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Lazy").field("data", &self.poll()).finish() - } - } - - impl Drop for Lazy { - fn drop(&mut self) { - let ptr = *self.data.get_mut(); - if !ptr.is_null() { - // SAFETY: We just checked that 'ptr' is not null. And since - // we have exclusive access, there are no races to worry about. - drop(unsafe { Box::from_raw(ptr) }); - } - } - } -} - -#[cfg(not(feature = "alloc"))] -mod lazy { - use core::{ - cell::Cell, - fmt, - mem::MaybeUninit, - panic::{RefUnwindSafe, UnwindSafe}, - sync::atomic::{AtomicU8, Ordering}, - }; - - /// Our 'Lazy' value can be in one of three states: - /// - /// * INIT is where it starts, and also ends up back here if the - /// 'create' routine panics. - /// * BUSY is where it sits while initialization is running in exactly - /// one thread. - /// * DONE is where it sits after 'create' has completed and 'data' has - /// been fully initialized. - const LAZY_STATE_INIT: u8 = 0; - const LAZY_STATE_BUSY: u8 = 1; - const LAZY_STATE_DONE: u8 = 2; - - /// A non-alloc non-std lazy initialized value. - /// - /// This guarantees initialization only happens once, but uses a spinlock - /// to block in the case of simultaneous access. Blocking occurs so that - /// one thread waits while another thread initializes the value. - /// - /// I would much rather have the semantics of the 'alloc' Lazy type above. - /// Namely, that we might run the initialization function more than once, - /// but we never otherwise block. However, I don't know how to do that in - /// a non-alloc non-std context. - pub(super) struct Lazy { - state: AtomicU8, - create: Cell>, - data: Cell>, - } - - // SAFETY: So long as T and &T (and F and &F) can themselves be safely - // shared among threads, so to can a Lazy. Namely, the Lazy API only - // permits accessing a &T and initialization is free of data races. So if T - // is thread safe, then so to is Lazy. - unsafe impl Sync for Lazy {} - // A reference to a Lazy is unwind safe because we specifically take - // precautions to poison all accesses to a Lazy if the caller-provided - // 'create' function panics. - impl RefUnwindSafe - for Lazy - { - } - - impl Lazy { - /// Create a new non-alloc non-std lazy value that is initialized - /// exactly once on first use using the given function. - pub(super) const fn new(create: F) -> Lazy { - Lazy { - state: AtomicU8::new(LAZY_STATE_INIT), - create: Cell::new(Some(create)), - data: Cell::new(MaybeUninit::uninit()), - } - } - } - - impl T> Lazy { - /// Get the underlying lazy value. If it isn't been initialized - /// yet, then either initialize it or block until some other thread - /// initializes it. If the 'create' function given to Lazy::new panics - /// (even in another thread), then this panics too. - pub(super) fn get(&self) -> &T { - // This is effectively a spinlock. We loop until we enter a DONE - // state, and if possible, initialize it ourselves. The only way - // we exit the loop is if 'create' panics, we initialize 'data' or - // some other thread initializes 'data'. - // - // Yes, I have read spinlocks considered harmful[1]. And that - // article is why this spinlock is only active when 'alloc' isn't - // enabled. I did this because I don't think there is really - // another choice without 'alloc', other than not providing this at - // all. But I think that's a big bummer. - // - // [1]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html - while self.state.load(Ordering::Acquire) != LAZY_STATE_DONE { - // Check if we're the first ones to get here. If so, we'll be - // the ones who initialize. - let result = self.state.compare_exchange( - LAZY_STATE_INIT, - LAZY_STATE_BUSY, - Ordering::AcqRel, - Ordering::Acquire, - ); - // This means we saw the INIT state and nobody else can. So we - // must take responsibility for initializing. And by virtue of - // observing INIT, we have also told anyone else trying to - // get here that we are BUSY. If someone else sees BUSY, then - // they will spin until we finish initialization. - if let Ok(_) = result { - // Since we are guaranteed to be the only ones here, we - // know that 'create' is there... Unless someone else got - // here before us and 'create' panicked. In which case, - // 'self.create' is now 'None' and we forward the panic - // to the caller. (i.e., We implement poisoning.) - // - // SAFETY: Our use of 'self.state' guarantees that we are - // the only thread executing this line, and thus there are - // no races. - let create = unsafe { - (*self.create.as_ptr()).take().expect( - "Lazy's create function panicked, \ - preventing initialization, - poisoning current thread", - ) - }; - let guard = Guard { state: &self.state }; - // SAFETY: Our use of 'self.state' guarantees that we are - // the only thread executing this line, and thus there are - // no races. - unsafe { - (*self.data.as_ptr()).as_mut_ptr().write(create()); - } - // All is well. 'self.create' ran successfully, so we - // forget the guard. - core::mem::forget(guard); - // Everything is initialized, so we can declare success. - self.state.store(LAZY_STATE_DONE, Ordering::Release); - break; - } - core::hint::spin_loop(); - } - // We only get here if data is fully initialized, and thus poll - // will always return something. - self.poll().unwrap() - } - - /// If this lazy value has been initialized successfully, then return - /// that value. Otherwise return None immediately. This never blocks. - fn poll(&self) -> Option<&T> { - if self.state.load(Ordering::Acquire) == LAZY_STATE_DONE { - // SAFETY: The DONE state only occurs when data has been fully - // initialized. - Some(unsafe { &*(*self.data.as_ptr()).as_ptr() }) - } else { - None - } - } - } - - impl T> fmt::Debug for Lazy { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Lazy") - .field("state", &self.state.load(Ordering::Acquire)) - .field("create", &"") - .field("data", &self.poll()) - .finish() - } - } - - impl Drop for Lazy { - fn drop(&mut self) { - if *self.state.get_mut() == LAZY_STATE_DONE { - // SAFETY: state is DONE if and only if data has been fully - // initialized. At which point, it is safe to drop. - unsafe { - self.data.get_mut().assume_init_drop(); - } - } - } - } - - /// A guard that will reset a Lazy's state back to INIT when dropped. The - /// idea here is to 'forget' this guard on success. On failure (when a - /// panic occurs), the Drop impl runs and causes all in-progress and future - /// 'get' calls to panic. Without this guard, all in-progress and future - /// 'get' calls would spin forever. Crashing is much better than getting - /// stuck in an infinite loop. - struct Guard<'a> { - state: &'a AtomicU8, - } - - impl<'a> Drop for Guard<'a> { - fn drop(&mut self) { - // We force ourselves back into an INIT state. This will in turn - // cause any future 'get' calls to attempt calling 'self.create' - // again which will in turn panic because 'self.create' will now - // be 'None'. - self.state.store(LAZY_STATE_INIT, Ordering::Release); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn assert_send() {} - fn assert_sync() {} - fn assert_unwind() {} - fn assert_refunwind() {} - - #[test] - fn oibits() { - assert_send::>(); - assert_sync::>(); - assert_unwind::>(); - assert_refunwind::>(); - } - - // This is a regression test because we used to rely on the inferred Sync - // impl for the Lazy type defined above (for 'alloc' mode). In the - // inferred impl, it only requires that T: Sync for Lazy: Sync. But - // if we have that, we can actually make use of the fact that Lazy drops - // T to create a value on one thread and drop it on another. This *should* - // require T: Send, but our missing bounds before let it sneak by. - // - // Basically, this test should not compile, so we... comment it out. We - // don't have a great way of testing compile-fail tests right now. - // - // See: https://github.com/BurntSushi/regex-automata/issues/30 - /* - #[test] - fn sync_not_send() { - #[allow(dead_code)] - fn inner() { - let lazy = Lazy::new(move || T::default()); - std::thread::scope(|scope| { - scope.spawn(|| { - Lazy::get(&lazy); // We create T in this thread - }); - }); - // And drop in this thread. - drop(lazy); - // So we have send a !Send type over threads. (with some more - // legwork, its possible to even sneak the value out of drop - // through thread local) - } - } - */ -} diff --git a/vendor/regex-automata/src/util/look.rs b/vendor/regex-automata/src/util/look.rs deleted file mode 100644 index 20bb8cc37149e5..00000000000000 --- a/vendor/regex-automata/src/util/look.rs +++ /dev/null @@ -1,2547 +0,0 @@ -/*! -Types and routines for working with look-around assertions. - -This module principally defines two types: - -* [`Look`] enumerates all of the assertions supported by this crate. -* [`LookSet`] provides a way to efficiently store a set of [`Look`] values. -* [`LookMatcher`] provides routines for checking whether a `Look` or a -`LookSet` matches at a particular position in a haystack. -*/ - -// LAMENTATION: Sadly, a lot of the API of `Look` and `LookSet` were basically -// copied verbatim from the regex-syntax crate. I would have no problems using -// the regex-syntax types and defining the matching routines (only found -// in this crate) as free functions, except the `Look` and `LookSet` types -// are used in lots of places. Including in places we expect to work when -// regex-syntax is *not* enabled, such as in the definition of the NFA itself. -// -// Thankfully the code we copy is pretty simple and there isn't much of it. -// Otherwise, the rest of this module deals with *matching* the assertions, -// which is not something that regex-syntax handles. - -use crate::util::{escape::DebugByte, utf8}; - -/// A look-around assertion. -/// -/// An assertion matches at a position between characters in a haystack. -/// Namely, it does not actually "consume" any input as most parts of a regular -/// expression do. Assertions are a way of stating that some property must be -/// true at a particular point during matching. -/// -/// For example, `(?m)^[a-z]+$` is a pattern that: -/// -/// * Scans the haystack for a position at which `(?m:^)` is satisfied. That -/// occurs at either the beginning of the haystack, or immediately following -/// a `\n` character. -/// * Looks for one or more occurrences of `[a-z]`. -/// * Once `[a-z]+` has matched as much as it can, an overall match is only -/// reported when `[a-z]+` stops just before a `\n`. -/// -/// So in this case, `abc` and `\nabc\n` match, but `\nabc1\n` does not. -/// -/// Assertions are also called "look-around," "look-behind" and "look-ahead." -/// Specifically, some assertions are look-behind (like `^`), other assertions -/// are look-ahead (like `$`) and yet other assertions are both look-ahead and -/// look-behind (like `\b`). -/// -/// # Assertions in an NFA -/// -/// An assertion in a [`thompson::NFA`](crate::nfa::thompson::NFA) can be -/// thought of as a conditional epsilon transition. That is, a matching engine -/// like the [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) only permits -/// moving through conditional epsilon transitions when their condition -/// is satisfied at whatever position the `PikeVM` is currently at in the -/// haystack. -/// -/// How assertions are handled in a `DFA` is trickier, since a DFA does not -/// have epsilon transitions at all. In this case, they are compiled into the -/// automaton itself, at the expense of more states than what would be required -/// without an assertion. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum Look { - /// Match the beginning of text. Specifically, this matches at the starting - /// position of the input. - Start = 1 << 0, - /// Match the end of text. Specifically, this matches at the ending - /// position of the input. - End = 1 << 1, - /// Match the beginning of a line or the beginning of text. Specifically, - /// this matches at the starting position of the input, or at the position - /// immediately following a `\n` character. - StartLF = 1 << 2, - /// Match the end of a line or the end of text. Specifically, this matches - /// at the end position of the input, or at the position immediately - /// preceding a `\n` character. - EndLF = 1 << 3, - /// Match the beginning of a line or the beginning of text. Specifically, - /// this matches at the starting position of the input, or at the position - /// immediately following either a `\r` or `\n` character, but never after - /// a `\r` when a `\n` follows. - StartCRLF = 1 << 4, - /// Match the end of a line or the end of text. Specifically, this matches - /// at the end position of the input, or at the position immediately - /// preceding a `\r` or `\n` character, but never before a `\n` when a `\r` - /// precedes it. - EndCRLF = 1 << 5, - /// Match an ASCII-only word boundary. That is, this matches a position - /// where the left adjacent character and right adjacent character - /// correspond to a word and non-word or a non-word and word character. - WordAscii = 1 << 6, - /// Match an ASCII-only negation of a word boundary. - WordAsciiNegate = 1 << 7, - /// Match a Unicode-aware word boundary. That is, this matches a position - /// where the left adjacent character and right adjacent character - /// correspond to a word and non-word or a non-word and word character. - WordUnicode = 1 << 8, - /// Match a Unicode-aware negation of a word boundary. - WordUnicodeNegate = 1 << 9, - /// Match the start of an ASCII-only word boundary. That is, this matches a - /// position at either the beginning of the haystack or where the previous - /// character is not a word character and the following character is a word - /// character. - WordStartAscii = 1 << 10, - /// Match the end of an ASCII-only word boundary. That is, this matches - /// a position at either the end of the haystack or where the previous - /// character is a word character and the following character is not a word - /// character. - WordEndAscii = 1 << 11, - /// Match the start of a Unicode word boundary. That is, this matches a - /// position at either the beginning of the haystack or where the previous - /// character is not a word character and the following character is a word - /// character. - WordStartUnicode = 1 << 12, - /// Match the end of a Unicode word boundary. That is, this matches a - /// position at either the end of the haystack or where the previous - /// character is a word character and the following character is not a word - /// character. - WordEndUnicode = 1 << 13, - /// Match the start half of an ASCII-only word boundary. That is, this - /// matches a position at either the beginning of the haystack or where the - /// previous character is not a word character. - WordStartHalfAscii = 1 << 14, - /// Match the end half of an ASCII-only word boundary. That is, this - /// matches a position at either the end of the haystack or where the - /// following character is not a word character. - WordEndHalfAscii = 1 << 15, - /// Match the start half of a Unicode word boundary. That is, this matches - /// a position at either the beginning of the haystack or where the - /// previous character is not a word character. - WordStartHalfUnicode = 1 << 16, - /// Match the end half of a Unicode word boundary. That is, this matches - /// a position at either the end of the haystack or where the following - /// character is not a word character. - WordEndHalfUnicode = 1 << 17, -} - -impl Look { - /// Flip the look-around assertion to its equivalent for reverse searches. - /// For example, `StartLF` gets translated to `EndLF`. - /// - /// Some assertions, such as `WordUnicode`, remain the same since they - /// match the same positions regardless of the direction of the search. - #[inline] - pub const fn reversed(self) -> Look { - match self { - Look::Start => Look::End, - Look::End => Look::Start, - Look::StartLF => Look::EndLF, - Look::EndLF => Look::StartLF, - Look::StartCRLF => Look::EndCRLF, - Look::EndCRLF => Look::StartCRLF, - Look::WordAscii => Look::WordAscii, - Look::WordAsciiNegate => Look::WordAsciiNegate, - Look::WordUnicode => Look::WordUnicode, - Look::WordUnicodeNegate => Look::WordUnicodeNegate, - Look::WordStartAscii => Look::WordEndAscii, - Look::WordEndAscii => Look::WordStartAscii, - Look::WordStartUnicode => Look::WordEndUnicode, - Look::WordEndUnicode => Look::WordStartUnicode, - Look::WordStartHalfAscii => Look::WordEndHalfAscii, - Look::WordEndHalfAscii => Look::WordStartHalfAscii, - Look::WordStartHalfUnicode => Look::WordEndHalfUnicode, - Look::WordEndHalfUnicode => Look::WordStartHalfUnicode, - } - } - - /// Return the underlying representation of this look-around enumeration - /// as an integer. Giving the return value to the [`Look::from_repr`] - /// constructor is guaranteed to return the same look-around variant that - /// one started with within a semver compatible release of this crate. - #[inline] - pub const fn as_repr(self) -> u32 { - // AFAIK, 'as' is the only way to zero-cost convert an int enum to an - // actual int. - self as u32 - } - - /// Given the underlying representation of a `Look` value, return the - /// corresponding `Look` value if the representation is valid. Otherwise - /// `None` is returned. - #[inline] - pub const fn from_repr(repr: u32) -> Option { - match repr { - 0b00_0000_0000_0000_0001 => Some(Look::Start), - 0b00_0000_0000_0000_0010 => Some(Look::End), - 0b00_0000_0000_0000_0100 => Some(Look::StartLF), - 0b00_0000_0000_0000_1000 => Some(Look::EndLF), - 0b00_0000_0000_0001_0000 => Some(Look::StartCRLF), - 0b00_0000_0000_0010_0000 => Some(Look::EndCRLF), - 0b00_0000_0000_0100_0000 => Some(Look::WordAscii), - 0b00_0000_0000_1000_0000 => Some(Look::WordAsciiNegate), - 0b00_0000_0001_0000_0000 => Some(Look::WordUnicode), - 0b00_0000_0010_0000_0000 => Some(Look::WordUnicodeNegate), - 0b00_0000_0100_0000_0000 => Some(Look::WordStartAscii), - 0b00_0000_1000_0000_0000 => Some(Look::WordEndAscii), - 0b00_0001_0000_0000_0000 => Some(Look::WordStartUnicode), - 0b00_0010_0000_0000_0000 => Some(Look::WordEndUnicode), - 0b00_0100_0000_0000_0000 => Some(Look::WordStartHalfAscii), - 0b00_1000_0000_0000_0000 => Some(Look::WordEndHalfAscii), - 0b01_0000_0000_0000_0000 => Some(Look::WordStartHalfUnicode), - 0b10_0000_0000_0000_0000 => Some(Look::WordEndHalfUnicode), - _ => None, - } - } - - /// Returns a convenient single codepoint representation of this - /// look-around assertion. Each assertion is guaranteed to be represented - /// by a distinct character. - /// - /// This is useful for succinctly representing a look-around assertion in - /// human friendly but succinct output intended for a programmer working on - /// regex internals. - #[inline] - pub const fn as_char(self) -> char { - match self { - Look::Start => 'A', - Look::End => 'z', - Look::StartLF => '^', - Look::EndLF => '$', - Look::StartCRLF => 'r', - Look::EndCRLF => 'R', - Look::WordAscii => 'b', - Look::WordAsciiNegate => 'B', - Look::WordUnicode => '𝛃', - Look::WordUnicodeNegate => '𝚩', - Look::WordStartAscii => '<', - Look::WordEndAscii => '>', - Look::WordStartUnicode => '〈', - Look::WordEndUnicode => '〉', - Look::WordStartHalfAscii => '◁', - Look::WordEndHalfAscii => '▷', - Look::WordStartHalfUnicode => '◀', - Look::WordEndHalfUnicode => '▶', - } - } -} - -/// LookSet is a memory-efficient set of look-around assertions. -/// -/// This is useful for efficiently tracking look-around assertions. For -/// example, a [`thompson::NFA`](crate::nfa::thompson::NFA) provides properties -/// that return `LookSet`s. -#[derive(Clone, Copy, Default, Eq, PartialEq)] -pub struct LookSet { - /// The underlying representation this set is exposed to make it possible - /// to store it somewhere efficiently. The representation is that - /// of a bitset, where each assertion occupies bit `i` where - /// `i = Look::as_repr()`. - /// - /// Note that users of this internal representation must permit the full - /// range of `u16` values to be represented. For example, even if the - /// current implementation only makes use of the 10 least significant bits, - /// it may use more bits in a future semver compatible release. - pub bits: u32, -} - -impl LookSet { - /// Create an empty set of look-around assertions. - #[inline] - pub fn empty() -> LookSet { - LookSet { bits: 0 } - } - - /// Create a full set of look-around assertions. - /// - /// This set contains all possible look-around assertions. - #[inline] - pub fn full() -> LookSet { - LookSet { bits: !0 } - } - - /// Create a look-around set containing the look-around assertion given. - /// - /// This is a convenience routine for creating an empty set and inserting - /// one look-around assertions. - #[inline] - pub fn singleton(look: Look) -> LookSet { - LookSet::empty().insert(look) - } - - /// Returns the total number of look-around assertions in this set. - #[inline] - pub fn len(self) -> usize { - // OK because max value always fits in a u8, which in turn always - // fits in a usize, regardless of target. - usize::try_from(self.bits.count_ones()).unwrap() - } - - /// Returns true if and only if this set is empty. - #[inline] - pub fn is_empty(self) -> bool { - self.len() == 0 - } - - /// Returns true if and only if the given look-around assertion is in this - /// set. - #[inline] - pub fn contains(self, look: Look) -> bool { - self.bits & look.as_repr() != 0 - } - - /// Returns true if and only if this set contains any anchor assertions. - /// This includes both "start/end of haystack" and "start/end of line." - #[inline] - pub fn contains_anchor(&self) -> bool { - self.contains_anchor_haystack() || self.contains_anchor_line() - } - - /// Returns true if and only if this set contains any "start/end of - /// haystack" anchors. This doesn't include "start/end of line" anchors. - #[inline] - pub fn contains_anchor_haystack(&self) -> bool { - self.contains(Look::Start) || self.contains(Look::End) - } - - /// Returns true if and only if this set contains any "start/end of line" - /// anchors. This doesn't include "start/end of haystack" anchors. This - /// includes both `\n` line anchors and CRLF (`\r\n`) aware line anchors. - #[inline] - pub fn contains_anchor_line(&self) -> bool { - self.contains(Look::StartLF) - || self.contains(Look::EndLF) - || self.contains(Look::StartCRLF) - || self.contains(Look::EndCRLF) - } - - /// Returns true if and only if this set contains any "start/end of line" - /// anchors that only treat `\n` as line terminators. This does not include - /// haystack anchors or CRLF aware line anchors. - #[inline] - pub fn contains_anchor_lf(&self) -> bool { - self.contains(Look::StartLF) || self.contains(Look::EndLF) - } - - /// Returns true if and only if this set contains any "start/end of line" - /// anchors that are CRLF-aware. This doesn't include "start/end of - /// haystack" or "start/end of line-feed" anchors. - #[inline] - pub fn contains_anchor_crlf(&self) -> bool { - self.contains(Look::StartCRLF) || self.contains(Look::EndCRLF) - } - - /// Returns true if and only if this set contains any word boundary or - /// negated word boundary assertions. This include both Unicode and ASCII - /// word boundaries. - #[inline] - pub fn contains_word(self) -> bool { - self.contains_word_unicode() || self.contains_word_ascii() - } - - /// Returns true if and only if this set contains any Unicode word boundary - /// or negated Unicode word boundary assertions. - #[inline] - pub fn contains_word_unicode(self) -> bool { - self.contains(Look::WordUnicode) - || self.contains(Look::WordUnicodeNegate) - || self.contains(Look::WordStartUnicode) - || self.contains(Look::WordEndUnicode) - || self.contains(Look::WordStartHalfUnicode) - || self.contains(Look::WordEndHalfUnicode) - } - - /// Returns true if and only if this set contains any ASCII word boundary - /// or negated ASCII word boundary assertions. - #[inline] - pub fn contains_word_ascii(self) -> bool { - self.contains(Look::WordAscii) - || self.contains(Look::WordAsciiNegate) - || self.contains(Look::WordStartAscii) - || self.contains(Look::WordEndAscii) - || self.contains(Look::WordStartHalfAscii) - || self.contains(Look::WordEndHalfAscii) - } - - /// Returns an iterator over all of the look-around assertions in this set. - #[inline] - pub fn iter(self) -> LookSetIter { - LookSetIter { set: self } - } - - /// Return a new set that is equivalent to the original, but with the given - /// assertion added to it. If the assertion is already in the set, then the - /// returned set is equivalent to the original. - #[inline] - pub fn insert(self, look: Look) -> LookSet { - LookSet { bits: self.bits | look.as_repr() } - } - - /// Updates this set in place with the result of inserting the given - /// assertion into this set. - #[inline] - pub fn set_insert(&mut self, look: Look) { - *self = self.insert(look); - } - - /// Return a new set that is equivalent to the original, but with the given - /// assertion removed from it. If the assertion is not in the set, then the - /// returned set is equivalent to the original. - #[inline] - pub fn remove(self, look: Look) -> LookSet { - LookSet { bits: self.bits & !look.as_repr() } - } - - /// Updates this set in place with the result of removing the given - /// assertion from this set. - #[inline] - pub fn set_remove(&mut self, look: Look) { - *self = self.remove(look); - } - - /// Returns a new set that is the result of subtracting the given set from - /// this set. - #[inline] - pub fn subtract(self, other: LookSet) -> LookSet { - LookSet { bits: self.bits & !other.bits } - } - - /// Updates this set in place with the result of subtracting the given set - /// from this set. - #[inline] - pub fn set_subtract(&mut self, other: LookSet) { - *self = self.subtract(other); - } - - /// Returns a new set that is the union of this and the one given. - #[inline] - pub fn union(self, other: LookSet) -> LookSet { - LookSet { bits: self.bits | other.bits } - } - - /// Updates this set in place with the result of unioning it with the one - /// given. - #[inline] - pub fn set_union(&mut self, other: LookSet) { - *self = self.union(other); - } - - /// Returns a new set that is the intersection of this and the one given. - #[inline] - pub fn intersect(self, other: LookSet) -> LookSet { - LookSet { bits: self.bits & other.bits } - } - - /// Updates this set in place with the result of intersecting it with the - /// one given. - #[inline] - pub fn set_intersect(&mut self, other: LookSet) { - *self = self.intersect(other); - } - - /// Return a `LookSet` from the slice given as a native endian 32-bit - /// integer. - /// - /// # Panics - /// - /// This panics if `slice.len() < 4`. - #[inline] - pub fn read_repr(slice: &[u8]) -> LookSet { - let bits = u32::from_ne_bytes(slice[..4].try_into().unwrap()); - LookSet { bits } - } - - /// Write a `LookSet` as a native endian 32-bit integer to the beginning - /// of the slice given. - /// - /// # Panics - /// - /// This panics if `slice.len() < 4`. - #[inline] - pub fn write_repr(self, slice: &mut [u8]) { - let raw = self.bits.to_ne_bytes(); - slice[0] = raw[0]; - slice[1] = raw[1]; - slice[2] = raw[2]; - slice[3] = raw[3]; - } - - /// Checks that all assertions in this set can be matched. - /// - /// Some assertions, such as Unicode word boundaries, require optional (but - /// enabled by default) tables that may not be available. If there are - /// assertions in this set that require tables that are not available, then - /// this will return an error. - /// - /// Specifically, this returns an error when the - /// `unicode-word-boundary` feature is _not_ enabled _and_ this set - /// contains a Unicode word boundary assertion. - /// - /// It can be useful to use this on the result of - /// [`NFA::look_set_any`](crate::nfa::thompson::NFA::look_set_any) - /// when building a matcher engine to ensure methods like - /// [`LookMatcher::matches_set`] do not panic at search time. - pub fn available(self) -> Result<(), UnicodeWordBoundaryError> { - if self.contains_word_unicode() { - UnicodeWordBoundaryError::check()?; - } - Ok(()) - } -} - -impl core::fmt::Debug for LookSet { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - if self.is_empty() { - return write!(f, "∅"); - } - for look in self.iter() { - write!(f, "{}", look.as_char())?; - } - Ok(()) - } -} - -/// An iterator over all look-around assertions in a [`LookSet`]. -/// -/// This iterator is created by [`LookSet::iter`]. -#[derive(Clone, Debug)] -pub struct LookSetIter { - set: LookSet, -} - -impl Iterator for LookSetIter { - type Item = Look; - - #[inline] - fn next(&mut self) -> Option { - if self.set.is_empty() { - return None; - } - // We'll never have more than u8::MAX distinct look-around assertions, - // so 'bit' will always fit into a u16. - let bit = u16::try_from(self.set.bits.trailing_zeros()).unwrap(); - let look = Look::from_repr(1 << bit)?; - self.set = self.set.remove(look); - Some(look) - } -} - -/// A matcher for look-around assertions. -/// -/// This matcher permits configuring aspects of how look-around assertions are -/// matched. -/// -/// # Example -/// -/// A `LookMatcher` can change the line terminator used for matching multi-line -/// anchors such as `(?m:^)` and `(?m:$)`. -/// -/// ``` -/// use regex_automata::{ -/// nfa::thompson::{self, pikevm::PikeVM}, -/// util::look::LookMatcher, -/// Match, Input, -/// }; -/// -/// let mut lookm = LookMatcher::new(); -/// lookm.set_line_terminator(b'\x00'); -/// -/// let re = PikeVM::builder() -/// .thompson(thompson::Config::new().look_matcher(lookm)) -/// .build(r"(?m)^[a-z]+$")?; -/// let mut cache = re.create_cache(); -/// -/// // Multi-line assertions now use NUL as a terminator. -/// assert_eq!( -/// Some(Match::must(0, 1..4)), -/// re.find(&mut cache, b"\x00abc\x00"), -/// ); -/// // ... and \n is no longer recognized as a terminator. -/// assert_eq!( -/// None, -/// re.find(&mut cache, b"\nabc\n"), -/// ); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct LookMatcher { - lineterm: DebugByte, -} - -impl LookMatcher { - /// Creates a new default matcher for look-around assertions. - pub fn new() -> LookMatcher { - LookMatcher { lineterm: DebugByte(b'\n') } - } - - /// Sets the line terminator for use with `(?m:^)` and `(?m:$)`. - /// - /// Namely, instead of `^` matching after `\n` and `$` matching immediately - /// before a `\n`, this will cause it to match after and before the byte - /// given. - /// - /// It can occasionally be useful to use this to configure the line - /// terminator to the NUL byte when searching binary data. - /// - /// Note that this does not apply to CRLF-aware line anchors such as - /// `(?Rm:^)` and `(?Rm:$)`. CRLF-aware line anchors are hard-coded to - /// use `\r` and `\n`. - pub fn set_line_terminator(&mut self, byte: u8) -> &mut LookMatcher { - self.lineterm.0 = byte; - self - } - - /// Returns the line terminator that was configured for this matcher. - /// - /// If no line terminator was configured, then this returns `\n`. - /// - /// Note that the line terminator should only be used for matching `(?m:^)` - /// and `(?m:$)` assertions. It specifically should _not_ be used for - /// matching the CRLF aware assertions `(?Rm:^)` and `(?Rm:$)`. - pub fn get_line_terminator(&self) -> u8 { - self.lineterm.0 - } - - /// Returns true when the position `at` in `haystack` satisfies the given - /// look-around assertion. - /// - /// # Panics - /// - /// This panics when testing any Unicode word boundary assertion in this - /// set and when the Unicode word data is not available. Specifically, this - /// only occurs when the `unicode-word-boundary` feature is not enabled. - /// - /// Since it's generally expected that this routine is called inside of - /// a matching engine, callers should check the error condition when - /// building the matching engine. If there is a Unicode word boundary - /// in the matcher and the data isn't available, then the matcher should - /// fail to build. - /// - /// Callers can check the error condition with [`LookSet::available`]. - /// - /// This also may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - #[inline] - pub fn matches(&self, look: Look, haystack: &[u8], at: usize) -> bool { - self.matches_inline(look, haystack, at) - } - - /// Like `matches`, but forcefully inlined. - /// - /// # Panics - /// - /// This panics when testing any Unicode word boundary assertion in this - /// set and when the Unicode word data is not available. Specifically, this - /// only occurs when the `unicode-word-boundary` feature is not enabled. - /// - /// Since it's generally expected that this routine is called inside of - /// a matching engine, callers should check the error condition when - /// building the matching engine. If there is a Unicode word boundary - /// in the matcher and the data isn't available, then the matcher should - /// fail to build. - /// - /// Callers can check the error condition with [`LookSet::available`]. - /// - /// This also may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn matches_inline( - &self, - look: Look, - haystack: &[u8], - at: usize, - ) -> bool { - match look { - Look::Start => self.is_start(haystack, at), - Look::End => self.is_end(haystack, at), - Look::StartLF => self.is_start_lf(haystack, at), - Look::EndLF => self.is_end_lf(haystack, at), - Look::StartCRLF => self.is_start_crlf(haystack, at), - Look::EndCRLF => self.is_end_crlf(haystack, at), - Look::WordAscii => self.is_word_ascii(haystack, at), - Look::WordAsciiNegate => self.is_word_ascii_negate(haystack, at), - Look::WordUnicode => self.is_word_unicode(haystack, at).unwrap(), - Look::WordUnicodeNegate => { - self.is_word_unicode_negate(haystack, at).unwrap() - } - Look::WordStartAscii => self.is_word_start_ascii(haystack, at), - Look::WordEndAscii => self.is_word_end_ascii(haystack, at), - Look::WordStartUnicode => { - self.is_word_start_unicode(haystack, at).unwrap() - } - Look::WordEndUnicode => { - self.is_word_end_unicode(haystack, at).unwrap() - } - Look::WordStartHalfAscii => { - self.is_word_start_half_ascii(haystack, at) - } - Look::WordEndHalfAscii => { - self.is_word_end_half_ascii(haystack, at) - } - Look::WordStartHalfUnicode => { - self.is_word_start_half_unicode(haystack, at).unwrap() - } - Look::WordEndHalfUnicode => { - self.is_word_end_half_unicode(haystack, at).unwrap() - } - } - } - - /// Returns true when _all_ of the assertions in the given set match at the - /// given position in the haystack. - /// - /// # Panics - /// - /// This panics when testing any Unicode word boundary assertion in this - /// set and when the Unicode word data is not available. Specifically, this - /// only occurs when the `unicode-word-boundary` feature is not enabled. - /// - /// Since it's generally expected that this routine is called inside of - /// a matching engine, callers should check the error condition when - /// building the matching engine. If there is a Unicode word boundary - /// in the matcher and the data isn't available, then the matcher should - /// fail to build. - /// - /// Callers can check the error condition with [`LookSet::available`]. - /// - /// This also may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - #[inline] - pub fn matches_set( - &self, - set: LookSet, - haystack: &[u8], - at: usize, - ) -> bool { - self.matches_set_inline(set, haystack, at) - } - - /// Like `LookSet::matches`, but forcefully inlined for perf. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn matches_set_inline( - &self, - set: LookSet, - haystack: &[u8], - at: usize, - ) -> bool { - // This used to use LookSet::iter with Look::matches on each element, - // but that proved to be quite disastrous for perf. The manual "if - // the set has this assertion, check it" turns out to be quite a bit - // faster. - if set.contains(Look::Start) { - if !self.is_start(haystack, at) { - return false; - } - } - if set.contains(Look::End) { - if !self.is_end(haystack, at) { - return false; - } - } - if set.contains(Look::StartLF) { - if !self.is_start_lf(haystack, at) { - return false; - } - } - if set.contains(Look::EndLF) { - if !self.is_end_lf(haystack, at) { - return false; - } - } - if set.contains(Look::StartCRLF) { - if !self.is_start_crlf(haystack, at) { - return false; - } - } - if set.contains(Look::EndCRLF) { - if !self.is_end_crlf(haystack, at) { - return false; - } - } - if set.contains(Look::WordAscii) { - if !self.is_word_ascii(haystack, at) { - return false; - } - } - if set.contains(Look::WordAsciiNegate) { - if !self.is_word_ascii_negate(haystack, at) { - return false; - } - } - if set.contains(Look::WordUnicode) { - if !self.is_word_unicode(haystack, at).unwrap() { - return false; - } - } - if set.contains(Look::WordUnicodeNegate) { - if !self.is_word_unicode_negate(haystack, at).unwrap() { - return false; - } - } - if set.contains(Look::WordStartAscii) { - if !self.is_word_start_ascii(haystack, at) { - return false; - } - } - if set.contains(Look::WordEndAscii) { - if !self.is_word_end_ascii(haystack, at) { - return false; - } - } - if set.contains(Look::WordStartUnicode) { - if !self.is_word_start_unicode(haystack, at).unwrap() { - return false; - } - } - if set.contains(Look::WordEndUnicode) { - if !self.is_word_end_unicode(haystack, at).unwrap() { - return false; - } - } - if set.contains(Look::WordStartHalfAscii) { - if !self.is_word_start_half_ascii(haystack, at) { - return false; - } - } - if set.contains(Look::WordEndHalfAscii) { - if !self.is_word_end_half_ascii(haystack, at) { - return false; - } - } - if set.contains(Look::WordStartHalfUnicode) { - if !self.is_word_start_half_unicode(haystack, at).unwrap() { - return false; - } - } - if set.contains(Look::WordEndHalfUnicode) { - if !self.is_word_end_half_unicode(haystack, at).unwrap() { - return false; - } - } - true - } - - /// Split up the given byte classes into equivalence classes in a way that - /// is consistent with this look-around assertion. - #[cfg(feature = "alloc")] - pub(crate) fn add_to_byteset( - &self, - look: Look, - set: &mut crate::util::alphabet::ByteClassSet, - ) { - match look { - Look::Start | Look::End => {} - Look::StartLF | Look::EndLF => { - set.set_range(self.lineterm.0, self.lineterm.0); - } - Look::StartCRLF | Look::EndCRLF => { - set.set_range(b'\r', b'\r'); - set.set_range(b'\n', b'\n'); - } - Look::WordAscii - | Look::WordAsciiNegate - | Look::WordUnicode - | Look::WordUnicodeNegate - | Look::WordStartAscii - | Look::WordEndAscii - | Look::WordStartUnicode - | Look::WordEndUnicode - | Look::WordStartHalfAscii - | Look::WordEndHalfAscii - | Look::WordStartHalfUnicode - | Look::WordEndHalfUnicode => { - // We need to mark all ranges of bytes whose pairs result in - // evaluating \b differently. This isn't technically correct - // for Unicode word boundaries, but DFAs can't handle those - // anyway, and thus, the byte classes don't need to either - // since they are themselves only used in DFAs. - // - // FIXME: It seems like the calls to 'set_range' here are - // completely invariant, which means we could just hard-code - // them here without needing to write a loop. And we only need - // to do this dance at most once per regex. - // - // FIXME: Is this correct for \B? - let iswb = utf8::is_word_byte; - // This unwrap is OK because we guard every use of 'asu8' with - // a check that the input is <= 255. - let asu8 = |b: u16| u8::try_from(b).unwrap(); - let mut b1: u16 = 0; - let mut b2: u16; - while b1 <= 255 { - b2 = b1 + 1; - while b2 <= 255 && iswb(asu8(b1)) == iswb(asu8(b2)) { - b2 += 1; - } - // The guards above guarantee that b2 can never get any - // bigger. - assert!(b2 <= 256); - // Subtracting 1 from b2 is always OK because it is always - // at least 1 greater than b1, and the assert above - // guarantees that the asu8 conversion will succeed. - set.set_range(asu8(b1), asu8(b2.checked_sub(1).unwrap())); - b1 = b2; - } - } - } - } - - /// Returns true when [`Look::Start`] is satisfied `at` the given position - /// in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - #[inline] - pub fn is_start(&self, _haystack: &[u8], at: usize) -> bool { - at == 0 - } - - /// Returns true when [`Look::End`] is satisfied `at` the given position in - /// `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - #[inline] - pub fn is_end(&self, haystack: &[u8], at: usize) -> bool { - at == haystack.len() - } - - /// Returns true when [`Look::StartLF`] is satisfied `at` the given - /// position in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - #[inline] - pub fn is_start_lf(&self, haystack: &[u8], at: usize) -> bool { - self.is_start(haystack, at) || haystack[at - 1] == self.lineterm.0 - } - - /// Returns true when [`Look::EndLF`] is satisfied `at` the given position - /// in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - #[inline] - pub fn is_end_lf(&self, haystack: &[u8], at: usize) -> bool { - self.is_end(haystack, at) || haystack[at] == self.lineterm.0 - } - - /// Returns true when [`Look::StartCRLF`] is satisfied `at` the given - /// position in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - #[inline] - pub fn is_start_crlf(&self, haystack: &[u8], at: usize) -> bool { - self.is_start(haystack, at) - || haystack[at - 1] == b'\n' - || (haystack[at - 1] == b'\r' - && (at >= haystack.len() || haystack[at] != b'\n')) - } - - /// Returns true when [`Look::EndCRLF`] is satisfied `at` the given - /// position in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - #[inline] - pub fn is_end_crlf(&self, haystack: &[u8], at: usize) -> bool { - self.is_end(haystack, at) - || haystack[at] == b'\r' - || (haystack[at] == b'\n' - && (at == 0 || haystack[at - 1] != b'\r')) - } - - /// Returns true when [`Look::WordAscii`] is satisfied `at` the given - /// position in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - #[inline] - pub fn is_word_ascii(&self, haystack: &[u8], at: usize) -> bool { - let word_before = at > 0 && utf8::is_word_byte(haystack[at - 1]); - let word_after = - at < haystack.len() && utf8::is_word_byte(haystack[at]); - word_before != word_after - } - - /// Returns true when [`Look::WordAsciiNegate`] is satisfied `at` the given - /// position in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - #[inline] - pub fn is_word_ascii_negate(&self, haystack: &[u8], at: usize) -> bool { - !self.is_word_ascii(haystack, at) - } - - /// Returns true when [`Look::WordUnicode`] is satisfied `at` the given - /// position in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - /// - /// # Errors - /// - /// This returns an error when Unicode word boundary tables - /// are not available. Specifically, this only occurs when the - /// `unicode-word-boundary` feature is not enabled. - #[inline] - pub fn is_word_unicode( - &self, - haystack: &[u8], - at: usize, - ) -> Result { - let word_before = is_word_char::rev(haystack, at)?; - let word_after = is_word_char::fwd(haystack, at)?; - Ok(word_before != word_after) - } - - /// Returns true when [`Look::WordUnicodeNegate`] is satisfied `at` the - /// given position in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - /// - /// # Errors - /// - /// This returns an error when Unicode word boundary tables - /// are not available. Specifically, this only occurs when the - /// `unicode-word-boundary` feature is not enabled. - #[inline] - pub fn is_word_unicode_negate( - &self, - haystack: &[u8], - at: usize, - ) -> Result { - // This is pretty subtle. Why do we need to do UTF-8 decoding here? - // Well... at time of writing, the is_word_char_{fwd,rev} routines will - // only return true if there is a valid UTF-8 encoding of a "word" - // codepoint, and false in every other case (including invalid UTF-8). - // This means that in regions of invalid UTF-8 (which might be a - // subset of valid UTF-8!), it would result in \B matching. While this - // would be questionable in the context of truly invalid UTF-8, it is - // *certainly* wrong to report match boundaries that split the encoding - // of a codepoint. So to work around this, we ensure that we can decode - // a codepoint on either side of `at`. If either direction fails, then - // we don't permit \B to match at all. - // - // Now, this isn't exactly optimal from a perf perspective. We could - // try and detect this in is_word_char::{fwd,rev}, but it's not clear - // if it's worth it. \B is, after all, rarely used. Even worse, - // is_word_char::{fwd,rev} could do its own UTF-8 decoding, and so this - // will wind up doing UTF-8 decoding twice. Ouch. We could fix this - // with more code complexity, but it just doesn't feel worth it for \B. - // - // And in particular, we do *not* have to do this with \b, because \b - // *requires* that at least one side of `at` be a "word" codepoint, - // which in turn implies one side of `at` must be valid UTF-8. This in - // turn implies that \b can never split a valid UTF-8 encoding of a - // codepoint. In the case where one side of `at` is truly invalid UTF-8 - // and the other side IS a word codepoint, then we want \b to match - // since it represents a valid UTF-8 boundary. It also makes sense. For - // example, you'd want \b\w+\b to match 'abc' in '\xFFabc\xFF'. - // - // Note also that this is not just '!is_word_unicode(..)' like it is - // for the ASCII case. For example, neither \b nor \B is satisfied - // within invalid UTF-8 sequences. - let word_before = at > 0 - && match utf8::decode_last(&haystack[..at]) { - None | Some(Err(_)) => return Ok(false), - Some(Ok(_)) => is_word_char::rev(haystack, at)?, - }; - let word_after = at < haystack.len() - && match utf8::decode(&haystack[at..]) { - None | Some(Err(_)) => return Ok(false), - Some(Ok(_)) => is_word_char::fwd(haystack, at)?, - }; - Ok(word_before == word_after) - } - - /// Returns true when [`Look::WordStartAscii`] is satisfied `at` the given - /// position in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - #[inline] - pub fn is_word_start_ascii(&self, haystack: &[u8], at: usize) -> bool { - let word_before = at > 0 && utf8::is_word_byte(haystack[at - 1]); - let word_after = - at < haystack.len() && utf8::is_word_byte(haystack[at]); - !word_before && word_after - } - - /// Returns true when [`Look::WordEndAscii`] is satisfied `at` the given - /// position in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - #[inline] - pub fn is_word_end_ascii(&self, haystack: &[u8], at: usize) -> bool { - let word_before = at > 0 && utf8::is_word_byte(haystack[at - 1]); - let word_after = - at < haystack.len() && utf8::is_word_byte(haystack[at]); - word_before && !word_after - } - - /// Returns true when [`Look::WordStartUnicode`] is satisfied `at` the - /// given position in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - /// - /// # Errors - /// - /// This returns an error when Unicode word boundary tables - /// are not available. Specifically, this only occurs when the - /// `unicode-word-boundary` feature is not enabled. - #[inline] - pub fn is_word_start_unicode( - &self, - haystack: &[u8], - at: usize, - ) -> Result { - let word_before = is_word_char::rev(haystack, at)?; - let word_after = is_word_char::fwd(haystack, at)?; - Ok(!word_before && word_after) - } - - /// Returns true when [`Look::WordEndUnicode`] is satisfied `at` the - /// given position in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - /// - /// # Errors - /// - /// This returns an error when Unicode word boundary tables - /// are not available. Specifically, this only occurs when the - /// `unicode-word-boundary` feature is not enabled. - #[inline] - pub fn is_word_end_unicode( - &self, - haystack: &[u8], - at: usize, - ) -> Result { - let word_before = is_word_char::rev(haystack, at)?; - let word_after = is_word_char::fwd(haystack, at)?; - Ok(word_before && !word_after) - } - - /// Returns true when [`Look::WordStartHalfAscii`] is satisfied `at` the - /// given position in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - #[inline] - pub fn is_word_start_half_ascii( - &self, - haystack: &[u8], - at: usize, - ) -> bool { - let word_before = at > 0 && utf8::is_word_byte(haystack[at - 1]); - !word_before - } - - /// Returns true when [`Look::WordEndHalfAscii`] is satisfied `at` the - /// given position in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - #[inline] - pub fn is_word_end_half_ascii(&self, haystack: &[u8], at: usize) -> bool { - let word_after = - at < haystack.len() && utf8::is_word_byte(haystack[at]); - !word_after - } - - /// Returns true when [`Look::WordStartHalfUnicode`] is satisfied `at` the - /// given position in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - /// - /// # Errors - /// - /// This returns an error when Unicode word boundary tables - /// are not available. Specifically, this only occurs when the - /// `unicode-word-boundary` feature is not enabled. - #[inline] - pub fn is_word_start_half_unicode( - &self, - haystack: &[u8], - at: usize, - ) -> Result { - // See `is_word_unicode_negate` for why we need to do this. We don't - // need to do it for `is_word_start_unicode` because that guarantees - // that the position matched falls on a valid UTF-8 boundary given - // that the right side must be in \w. - let word_before = at > 0 - && match utf8::decode_last(&haystack[..at]) { - None | Some(Err(_)) => return Ok(false), - Some(Ok(_)) => is_word_char::rev(haystack, at)?, - }; - Ok(!word_before) - } - - /// Returns true when [`Look::WordEndHalfUnicode`] is satisfied `at` the - /// given position in `haystack`. - /// - /// # Panics - /// - /// This may panic when `at > haystack.len()`. Note that `at == - /// haystack.len()` is legal and guaranteed not to panic. - /// - /// # Errors - /// - /// This returns an error when Unicode word boundary tables - /// are not available. Specifically, this only occurs when the - /// `unicode-word-boundary` feature is not enabled. - #[inline] - pub fn is_word_end_half_unicode( - &self, - haystack: &[u8], - at: usize, - ) -> Result { - // See `is_word_unicode_negate` for why we need to do this. We don't - // need to do it for `is_word_end_unicode` because that guarantees - // that the position matched falls on a valid UTF-8 boundary given - // that the left side must be in \w. - let word_after = at < haystack.len() - && match utf8::decode(&haystack[at..]) { - None | Some(Err(_)) => return Ok(false), - Some(Ok(_)) => is_word_char::fwd(haystack, at)?, - }; - Ok(!word_after) - } -} - -impl Default for LookMatcher { - fn default() -> LookMatcher { - LookMatcher::new() - } -} - -/// An error that occurs when the Unicode-aware `\w` class is unavailable. -/// -/// This error can occur when the data tables necessary for the Unicode aware -/// Perl character class `\w` are unavailable. The `\w` class is used to -/// determine whether a codepoint is considered a word character or not when -/// determining whether a Unicode aware `\b` (or `\B`) matches at a particular -/// position. -/// -/// This error can only occur when the `unicode-word-boundary` feature is -/// disabled. -#[derive(Clone, Debug)] -pub struct UnicodeWordBoundaryError(()); - -impl UnicodeWordBoundaryError { - #[cfg(not(feature = "unicode-word-boundary"))] - pub(crate) fn new() -> UnicodeWordBoundaryError { - UnicodeWordBoundaryError(()) - } - - /// Returns an error if and only if Unicode word boundary data is - /// unavailable. - pub fn check() -> Result<(), UnicodeWordBoundaryError> { - is_word_char::check() - } -} - -#[cfg(feature = "std")] -impl std::error::Error for UnicodeWordBoundaryError {} - -impl core::fmt::Display for UnicodeWordBoundaryError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!( - f, - "Unicode-aware \\b and \\B are unavailable because the \ - requisite data tables are missing, please enable the \ - unicode-word-boundary feature" - ) - } -} - -// Below are FOUR different ways for checking whether whether a "word" -// codepoint exists at a particular position in the haystack. The four -// different approaches are, in order of preference: -// -// 1. Parse '\w', convert to an NFA, convert to a fully compiled DFA on the -// first call, and then use that DFA for all subsequent calls. -// 2. Do UTF-8 decoding and use regex_syntax::is_word_character if available. -// 3. Do UTF-8 decoding and use our own 'perl_word' table. -// 4. Return an error. -// -// The reason for all of these approaches is a combination of perf and -// permitting one to build regex-automata without the Unicode data necessary -// for handling Unicode-aware word boundaries. (In which case, '(?-u:\b)' would -// still work.) -// -// The DFA approach is the fastest, but it requires the regex parser, the -// NFA compiler, the DFA builder and the DFA search runtime. That's a lot to -// bring in, but if it's available, it's (probably) the best we can do. -// -// Approaches (2) and (3) are effectively equivalent, but (2) reuses the -// data in regex-syntax and avoids duplicating it in regex-automata. -// -// Finally, (4) unconditionally returns an error since the requisite data isn't -// available anywhere. -// -// There are actually more approaches possible that we didn't implement. For -// example, if the DFA builder is available but the syntax parser is not, we -// could technically hand construct our own NFA from the 'perl_word' data -// table. But to avoid some pretty hairy code duplication, we would in turn -// need to pull the UTF-8 compiler out of the NFA compiler. Yikes. -// -// A possibly more sensible alternative is to use a lazy DFA when the full -// DFA builder isn't available... -// -// Yet another choice would be to build the full DFA and then embed it into the -// source. Then we'd only need to bring in the DFA search runtime, which is -// considerably smaller than the DFA builder code. The problem here is that the -// Debian people have spooked me[1] into avoiding cyclic dependencies. Namely, -// we'd need to build regex-cli, which depends on regex-automata in order to -// build some part of regex-automata. But to be honest, something like this has -// to be allowed somehow? I just don't know what the right process is. -// -// There are perhaps other choices as well. Why did I stop at these 4? Because -// I wanted to preserve my sanity. I suspect I'll wind up adding the lazy DFA -// approach eventually, as the benefits of the DFA approach are somewhat -// compelling. The 'boundary-words-holmes' benchmark tests this. (Note that -// the commands below no longer work. If necessary, we should re-capitulate -// the benchmark from whole cloth in rebar.) -// -// $ regex-cli bench measure -f boundary-words-holmes -e pikevm > dfa.csv -// -// Then I changed the code below so that the util/unicode_data/perl_word table -// was used and re-ran the benchmark: -// -// $ regex-cli bench measure -f boundary-words-holmes -e pikevm > table.csv -// -// And compared them: -// -// $ regex-cli bench diff dfa.csv table.csv -// benchmark engine dfa table -// --------- ------ --- ----- -// internal/count/boundary-words-holmes regex/automata/pikevm 18.6 MB/s 12.9 MB/s -// -// Which is a nice improvement. -// -// UPDATE: It turns out that it takes approximately 22ms to build the reverse -// DFA for \w. (And about 3ms for the forward DFA.) It's probably not much in -// the grand scheme things, but that is a significant latency cost. So I'm not -// sure that's a good idea. I then tried using a lazy DFA instead, and that -// eliminated the overhead, but since the lazy DFA requires mutable working -// memory, that requires introducing a 'Cache' for every simultaneous call. -// -// I ended up deciding for now to just keep the "UTF-8 decode and check the -// table." The DFA and lazy DFA approaches are still below, but commented out. -// -// [1]: https://github.com/BurntSushi/ucd-generate/issues/11 - -/* -/// A module that looks for word codepoints using lazy DFAs. -#[cfg(all( - feature = "unicode-word-boundary", - feature = "syntax", - feature = "unicode-perl", - feature = "hybrid" -))] -mod is_word_char { - use alloc::vec::Vec; - - use crate::{ - hybrid::dfa::{Cache, DFA}, - nfa::thompson::NFA, - util::{lazy::Lazy, pool::Pool, primitives::StateID}, - Anchored, Input, - }; - - pub(super) fn check() -> Result<(), super::UnicodeWordBoundaryError> { - Ok(()) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(super) fn fwd( - haystack: &[u8], - mut at: usize, - ) -> Result { - static WORD: Lazy = Lazy::new(|| DFA::new(r"\w").unwrap()); - static CACHE: Lazy> = - Lazy::new(|| Pool::new(|| WORD.create_cache())); - let dfa = Lazy::get(&WORD); - let mut cache = Lazy::get(&CACHE).get(); - let mut sid = dfa - .start_state_forward( - &mut cache, - &Input::new("").anchored(Anchored::Yes), - ) - .unwrap(); - while at < haystack.len() { - let byte = haystack[at]; - sid = dfa.next_state(&mut cache, sid, byte).unwrap(); - at += 1; - if sid.is_tagged() { - if sid.is_match() { - return Ok(true); - } else if sid.is_dead() { - return Ok(false); - } - } - } - Ok(dfa.next_eoi_state(&mut cache, sid).unwrap().is_match()) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(super) fn rev( - haystack: &[u8], - mut at: usize, - ) -> Result { - static WORD: Lazy = Lazy::new(|| { - DFA::builder() - .thompson(NFA::config().reverse(true)) - .build(r"\w") - .unwrap() - }); - static CACHE: Lazy> = - Lazy::new(|| Pool::new(|| WORD.create_cache())); - let dfa = Lazy::get(&WORD); - let mut cache = Lazy::get(&CACHE).get(); - let mut sid = dfa - .start_state_reverse( - &mut cache, - &Input::new("").anchored(Anchored::Yes), - ) - .unwrap(); - while at > 0 { - at -= 1; - let byte = haystack[at]; - sid = dfa.next_state(&mut cache, sid, byte).unwrap(); - if sid.is_tagged() { - if sid.is_match() { - return Ok(true); - } else if sid.is_dead() { - return Ok(false); - } - } - } - Ok(dfa.next_eoi_state(&mut cache, sid).unwrap().is_match()) - } -} -*/ - -/* -/// A module that looks for word codepoints using fully compiled DFAs. -#[cfg(all( - feature = "unicode-word-boundary", - feature = "syntax", - feature = "unicode-perl", - feature = "dfa-build" -))] -mod is_word_char { - use alloc::vec::Vec; - - use crate::{ - dfa::{dense::DFA, Automaton, StartKind}, - nfa::thompson::NFA, - util::{lazy::Lazy, primitives::StateID}, - Anchored, Input, - }; - - pub(super) fn check() -> Result<(), super::UnicodeWordBoundaryError> { - Ok(()) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(super) fn fwd( - haystack: &[u8], - mut at: usize, - ) -> Result { - static WORD: Lazy<(DFA>, StateID)> = Lazy::new(|| { - let dfa = DFA::builder() - .configure(DFA::config().start_kind(StartKind::Anchored)) - .build(r"\w") - .unwrap(); - // OK because our regex has no look-around. - let start_id = dfa.universal_start_state(Anchored::Yes).unwrap(); - (dfa, start_id) - }); - let &(ref dfa, mut sid) = Lazy::get(&WORD); - while at < haystack.len() { - let byte = haystack[at]; - sid = dfa.next_state(sid, byte); - at += 1; - if dfa.is_special_state(sid) { - if dfa.is_match_state(sid) { - return Ok(true); - } else if dfa.is_dead_state(sid) { - return Ok(false); - } - } - } - Ok(dfa.is_match_state(dfa.next_eoi_state(sid))) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(super) fn rev( - haystack: &[u8], - mut at: usize, - ) -> Result { - static WORD: Lazy<(DFA>, StateID)> = Lazy::new(|| { - let dfa = DFA::builder() - .configure(DFA::config().start_kind(StartKind::Anchored)) - // From ad hoc measurements, it looks like setting - // shrink==false is slightly faster than shrink==true. I kind - // of feel like this indicates that shrinking is probably a - // failure, although it can help in some cases. Sigh. - .thompson(NFA::config().reverse(true).shrink(false)) - .build(r"\w") - .unwrap(); - // OK because our regex has no look-around. - let start_id = dfa.universal_start_state(Anchored::Yes).unwrap(); - (dfa, start_id) - }); - let &(ref dfa, mut sid) = Lazy::get(&WORD); - while at > 0 { - at -= 1; - let byte = haystack[at]; - sid = dfa.next_state(sid, byte); - if dfa.is_special_state(sid) { - if dfa.is_match_state(sid) { - return Ok(true); - } else if dfa.is_dead_state(sid) { - return Ok(false); - } - } - } - Ok(dfa.is_match_state(dfa.next_eoi_state(sid))) - } -} -*/ - -/// A module that looks for word codepoints using regex-syntax's data tables. -#[cfg(all( - feature = "unicode-word-boundary", - feature = "syntax", - feature = "unicode-perl", -))] -mod is_word_char { - use regex_syntax::try_is_word_character; - - use crate::util::utf8; - - pub(super) fn check() -> Result<(), super::UnicodeWordBoundaryError> { - Ok(()) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(super) fn fwd( - haystack: &[u8], - at: usize, - ) -> Result { - Ok(match utf8::decode(&haystack[at..]) { - None | Some(Err(_)) => false, - Some(Ok(ch)) => try_is_word_character(ch).expect( - "since unicode-word-boundary, syntax and unicode-perl \ - are all enabled, it is expected that \ - try_is_word_character succeeds", - ), - }) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(super) fn rev( - haystack: &[u8], - at: usize, - ) -> Result { - Ok(match utf8::decode_last(&haystack[..at]) { - None | Some(Err(_)) => false, - Some(Ok(ch)) => try_is_word_character(ch).expect( - "since unicode-word-boundary, syntax and unicode-perl \ - are all enabled, it is expected that \ - try_is_word_character succeeds", - ), - }) - } -} - -/// A module that looks for word codepoints using regex-automata's data tables -/// (which are only compiled when regex-syntax's tables aren't available). -/// -/// Note that the cfg should match the one in src/util/unicode_data/mod.rs for -/// perl_word. -#[cfg(all( - feature = "unicode-word-boundary", - not(all(feature = "syntax", feature = "unicode-perl")), -))] -mod is_word_char { - use crate::util::utf8; - - pub(super) fn check() -> Result<(), super::UnicodeWordBoundaryError> { - Ok(()) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(super) fn fwd( - haystack: &[u8], - at: usize, - ) -> Result { - Ok(match utf8::decode(&haystack[at..]) { - None | Some(Err(_)) => false, - Some(Ok(ch)) => is_word_character(ch), - }) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(super) fn rev( - haystack: &[u8], - at: usize, - ) -> Result { - Ok(match utf8::decode_last(&haystack[..at]) { - None | Some(Err(_)) => false, - Some(Ok(ch)) => is_word_character(ch), - }) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_word_character(c: char) -> bool { - use crate::util::{unicode_data::perl_word::PERL_WORD, utf8}; - - if u8::try_from(c).map_or(false, utf8::is_word_byte) { - return true; - } - PERL_WORD - .binary_search_by(|&(start, end)| { - use core::cmp::Ordering; - - if start <= c && c <= end { - Ordering::Equal - } else if start > c { - Ordering::Greater - } else { - Ordering::Less - } - }) - .is_ok() - } -} - -/// A module that always returns an error if Unicode word boundaries are -/// disabled. When this feature is disabled, then regex-automata will not -/// include its own data tables even if regex-syntax is disabled. -#[cfg(not(feature = "unicode-word-boundary"))] -mod is_word_char { - pub(super) fn check() -> Result<(), super::UnicodeWordBoundaryError> { - Err(super::UnicodeWordBoundaryError::new()) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(super) fn fwd( - _bytes: &[u8], - _at: usize, - ) -> Result { - Err(super::UnicodeWordBoundaryError::new()) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(super) fn rev( - _bytes: &[u8], - _at: usize, - ) -> Result { - Err(super::UnicodeWordBoundaryError::new()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - macro_rules! testlook { - ($look:expr, $haystack:expr, $at:expr) => { - LookMatcher::default().matches($look, $haystack.as_bytes(), $at) - }; - } - - #[test] - fn look_matches_start_line() { - let look = Look::StartLF; - - assert!(testlook!(look, "", 0)); - assert!(testlook!(look, "\n", 0)); - assert!(testlook!(look, "\n", 1)); - assert!(testlook!(look, "a", 0)); - assert!(testlook!(look, "\na", 1)); - - assert!(!testlook!(look, "a", 1)); - assert!(!testlook!(look, "a\na", 1)); - } - - #[test] - fn look_matches_end_line() { - let look = Look::EndLF; - - assert!(testlook!(look, "", 0)); - assert!(testlook!(look, "\n", 1)); - assert!(testlook!(look, "\na", 0)); - assert!(testlook!(look, "\na", 2)); - assert!(testlook!(look, "a\na", 1)); - - assert!(!testlook!(look, "a", 0)); - assert!(!testlook!(look, "\na", 1)); - assert!(!testlook!(look, "a\na", 0)); - assert!(!testlook!(look, "a\na", 2)); - } - - #[test] - fn look_matches_start_text() { - let look = Look::Start; - - assert!(testlook!(look, "", 0)); - assert!(testlook!(look, "\n", 0)); - assert!(testlook!(look, "a", 0)); - - assert!(!testlook!(look, "\n", 1)); - assert!(!testlook!(look, "\na", 1)); - assert!(!testlook!(look, "a", 1)); - assert!(!testlook!(look, "a\na", 1)); - } - - #[test] - fn look_matches_end_text() { - let look = Look::End; - - assert!(testlook!(look, "", 0)); - assert!(testlook!(look, "\n", 1)); - assert!(testlook!(look, "\na", 2)); - - assert!(!testlook!(look, "\na", 0)); - assert!(!testlook!(look, "a\na", 1)); - assert!(!testlook!(look, "a", 0)); - assert!(!testlook!(look, "\na", 1)); - assert!(!testlook!(look, "a\na", 0)); - assert!(!testlook!(look, "a\na", 2)); - } - - #[test] - #[cfg(all(not(miri), feature = "unicode-word-boundary"))] - fn look_matches_word_unicode() { - let look = Look::WordUnicode; - - // \xF0\x9D\x9B\x83 = 𝛃 (in \w) - // \xF0\x90\x86\x80 = 𐆀 (not in \w) - - // Simple ASCII word boundaries. - assert!(testlook!(look, "a", 0)); - assert!(testlook!(look, "a", 1)); - assert!(testlook!(look, "a ", 1)); - assert!(testlook!(look, " a ", 1)); - assert!(testlook!(look, " a ", 2)); - - // Unicode word boundaries with a non-ASCII codepoint. - assert!(testlook!(look, "𝛃", 0)); - assert!(testlook!(look, "𝛃", 4)); - assert!(testlook!(look, "𝛃 ", 4)); - assert!(testlook!(look, " 𝛃 ", 1)); - assert!(testlook!(look, " 𝛃 ", 5)); - - // Unicode word boundaries between non-ASCII codepoints. - assert!(testlook!(look, "𝛃𐆀", 0)); - assert!(testlook!(look, "𝛃𐆀", 4)); - - // Non word boundaries for ASCII. - assert!(!testlook!(look, "", 0)); - assert!(!testlook!(look, "ab", 1)); - assert!(!testlook!(look, "a ", 2)); - assert!(!testlook!(look, " a ", 0)); - assert!(!testlook!(look, " a ", 3)); - - // Non word boundaries with a non-ASCII codepoint. - assert!(!testlook!(look, "𝛃b", 4)); - assert!(!testlook!(look, "𝛃 ", 5)); - assert!(!testlook!(look, " 𝛃 ", 0)); - assert!(!testlook!(look, " 𝛃 ", 6)); - assert!(!testlook!(look, "𝛃", 1)); - assert!(!testlook!(look, "𝛃", 2)); - assert!(!testlook!(look, "𝛃", 3)); - - // Non word boundaries with non-ASCII codepoints. - assert!(!testlook!(look, "𝛃𐆀", 1)); - assert!(!testlook!(look, "𝛃𐆀", 2)); - assert!(!testlook!(look, "𝛃𐆀", 3)); - assert!(!testlook!(look, "𝛃𐆀", 5)); - assert!(!testlook!(look, "𝛃𐆀", 6)); - assert!(!testlook!(look, "𝛃𐆀", 7)); - assert!(!testlook!(look, "𝛃𐆀", 8)); - } - - #[test] - fn look_matches_word_ascii() { - let look = Look::WordAscii; - - // \xF0\x9D\x9B\x83 = 𝛃 (in \w) - // \xF0\x90\x86\x80 = 𐆀 (not in \w) - - // Simple ASCII word boundaries. - assert!(testlook!(look, "a", 0)); - assert!(testlook!(look, "a", 1)); - assert!(testlook!(look, "a ", 1)); - assert!(testlook!(look, " a ", 1)); - assert!(testlook!(look, " a ", 2)); - - // Unicode word boundaries with a non-ASCII codepoint. Since this is - // an ASCII word boundary, none of these match. - assert!(!testlook!(look, "𝛃", 0)); - assert!(!testlook!(look, "𝛃", 4)); - assert!(!testlook!(look, "𝛃 ", 4)); - assert!(!testlook!(look, " 𝛃 ", 1)); - assert!(!testlook!(look, " 𝛃 ", 5)); - - // Unicode word boundaries between non-ASCII codepoints. Again, since - // this is an ASCII word boundary, none of these match. - assert!(!testlook!(look, "𝛃𐆀", 0)); - assert!(!testlook!(look, "𝛃𐆀", 4)); - - // Non word boundaries for ASCII. - assert!(!testlook!(look, "", 0)); - assert!(!testlook!(look, "ab", 1)); - assert!(!testlook!(look, "a ", 2)); - assert!(!testlook!(look, " a ", 0)); - assert!(!testlook!(look, " a ", 3)); - - // Non word boundaries with a non-ASCII codepoint. - assert!(testlook!(look, "𝛃b", 4)); - assert!(!testlook!(look, "𝛃 ", 5)); - assert!(!testlook!(look, " 𝛃 ", 0)); - assert!(!testlook!(look, " 𝛃 ", 6)); - assert!(!testlook!(look, "𝛃", 1)); - assert!(!testlook!(look, "𝛃", 2)); - assert!(!testlook!(look, "𝛃", 3)); - - // Non word boundaries with non-ASCII codepoints. - assert!(!testlook!(look, "𝛃𐆀", 1)); - assert!(!testlook!(look, "𝛃𐆀", 2)); - assert!(!testlook!(look, "𝛃𐆀", 3)); - assert!(!testlook!(look, "𝛃𐆀", 5)); - assert!(!testlook!(look, "𝛃𐆀", 6)); - assert!(!testlook!(look, "𝛃𐆀", 7)); - assert!(!testlook!(look, "𝛃𐆀", 8)); - } - - #[test] - #[cfg(all(not(miri), feature = "unicode-word-boundary"))] - fn look_matches_word_unicode_negate() { - let look = Look::WordUnicodeNegate; - - // \xF0\x9D\x9B\x83 = 𝛃 (in \w) - // \xF0\x90\x86\x80 = 𐆀 (not in \w) - - // Simple ASCII word boundaries. - assert!(!testlook!(look, "a", 0)); - assert!(!testlook!(look, "a", 1)); - assert!(!testlook!(look, "a ", 1)); - assert!(!testlook!(look, " a ", 1)); - assert!(!testlook!(look, " a ", 2)); - - // Unicode word boundaries with a non-ASCII codepoint. - assert!(!testlook!(look, "𝛃", 0)); - assert!(!testlook!(look, "𝛃", 4)); - assert!(!testlook!(look, "𝛃 ", 4)); - assert!(!testlook!(look, " 𝛃 ", 1)); - assert!(!testlook!(look, " 𝛃 ", 5)); - - // Unicode word boundaries between non-ASCII codepoints. - assert!(!testlook!(look, "𝛃𐆀", 0)); - assert!(!testlook!(look, "𝛃𐆀", 4)); - - // Non word boundaries for ASCII. - assert!(testlook!(look, "", 0)); - assert!(testlook!(look, "ab", 1)); - assert!(testlook!(look, "a ", 2)); - assert!(testlook!(look, " a ", 0)); - assert!(testlook!(look, " a ", 3)); - - // Non word boundaries with a non-ASCII codepoint. - assert!(testlook!(look, "𝛃b", 4)); - assert!(testlook!(look, "𝛃 ", 5)); - assert!(testlook!(look, " 𝛃 ", 0)); - assert!(testlook!(look, " 𝛃 ", 6)); - // These don't match because they could otherwise return an offset that - // splits the UTF-8 encoding of a codepoint. - assert!(!testlook!(look, "𝛃", 1)); - assert!(!testlook!(look, "𝛃", 2)); - assert!(!testlook!(look, "𝛃", 3)); - - // Non word boundaries with non-ASCII codepoints. These also don't - // match because they could otherwise return an offset that splits the - // UTF-8 encoding of a codepoint. - assert!(!testlook!(look, "𝛃𐆀", 1)); - assert!(!testlook!(look, "𝛃𐆀", 2)); - assert!(!testlook!(look, "𝛃𐆀", 3)); - assert!(!testlook!(look, "𝛃𐆀", 5)); - assert!(!testlook!(look, "𝛃𐆀", 6)); - assert!(!testlook!(look, "𝛃𐆀", 7)); - // But this one does, since 𐆀 isn't a word codepoint, and 8 is the end - // of the haystack. So the "end" of the haystack isn't a word and 𐆀 - // isn't a word, thus, \B matches. - assert!(testlook!(look, "𝛃𐆀", 8)); - } - - #[test] - fn look_matches_word_ascii_negate() { - let look = Look::WordAsciiNegate; - - // \xF0\x9D\x9B\x83 = 𝛃 (in \w) - // \xF0\x90\x86\x80 = 𐆀 (not in \w) - - // Simple ASCII word boundaries. - assert!(!testlook!(look, "a", 0)); - assert!(!testlook!(look, "a", 1)); - assert!(!testlook!(look, "a ", 1)); - assert!(!testlook!(look, " a ", 1)); - assert!(!testlook!(look, " a ", 2)); - - // Unicode word boundaries with a non-ASCII codepoint. Since this is - // an ASCII word boundary, none of these match. - assert!(testlook!(look, "𝛃", 0)); - assert!(testlook!(look, "𝛃", 4)); - assert!(testlook!(look, "𝛃 ", 4)); - assert!(testlook!(look, " 𝛃 ", 1)); - assert!(testlook!(look, " 𝛃 ", 5)); - - // Unicode word boundaries between non-ASCII codepoints. Again, since - // this is an ASCII word boundary, none of these match. - assert!(testlook!(look, "𝛃𐆀", 0)); - assert!(testlook!(look, "𝛃𐆀", 4)); - - // Non word boundaries for ASCII. - assert!(testlook!(look, "", 0)); - assert!(testlook!(look, "ab", 1)); - assert!(testlook!(look, "a ", 2)); - assert!(testlook!(look, " a ", 0)); - assert!(testlook!(look, " a ", 3)); - - // Non word boundaries with a non-ASCII codepoint. - assert!(!testlook!(look, "𝛃b", 4)); - assert!(testlook!(look, "𝛃 ", 5)); - assert!(testlook!(look, " 𝛃 ", 0)); - assert!(testlook!(look, " 𝛃 ", 6)); - assert!(testlook!(look, "𝛃", 1)); - assert!(testlook!(look, "𝛃", 2)); - assert!(testlook!(look, "𝛃", 3)); - - // Non word boundaries with non-ASCII codepoints. - assert!(testlook!(look, "𝛃𐆀", 1)); - assert!(testlook!(look, "𝛃𐆀", 2)); - assert!(testlook!(look, "𝛃𐆀", 3)); - assert!(testlook!(look, "𝛃𐆀", 5)); - assert!(testlook!(look, "𝛃𐆀", 6)); - assert!(testlook!(look, "𝛃𐆀", 7)); - assert!(testlook!(look, "𝛃𐆀", 8)); - } - - #[test] - fn look_matches_word_start_ascii() { - let look = Look::WordStartAscii; - - // \xF0\x9D\x9B\x83 = 𝛃 (in \w) - // \xF0\x90\x86\x80 = 𐆀 (not in \w) - - // Simple ASCII word boundaries. - assert!(testlook!(look, "a", 0)); - assert!(!testlook!(look, "a", 1)); - assert!(!testlook!(look, "a ", 1)); - assert!(testlook!(look, " a ", 1)); - assert!(!testlook!(look, " a ", 2)); - - // Unicode word boundaries with a non-ASCII codepoint. Since this is - // an ASCII word boundary, none of these match. - assert!(!testlook!(look, "𝛃", 0)); - assert!(!testlook!(look, "𝛃", 4)); - assert!(!testlook!(look, "𝛃 ", 4)); - assert!(!testlook!(look, " 𝛃 ", 1)); - assert!(!testlook!(look, " 𝛃 ", 5)); - - // Unicode word boundaries between non-ASCII codepoints. Again, since - // this is an ASCII word boundary, none of these match. - assert!(!testlook!(look, "𝛃𐆀", 0)); - assert!(!testlook!(look, "𝛃𐆀", 4)); - - // Non word boundaries for ASCII. - assert!(!testlook!(look, "", 0)); - assert!(!testlook!(look, "ab", 1)); - assert!(!testlook!(look, "a ", 2)); - assert!(!testlook!(look, " a ", 0)); - assert!(!testlook!(look, " a ", 3)); - - // Non word boundaries with a non-ASCII codepoint. - assert!(testlook!(look, "𝛃b", 4)); - assert!(!testlook!(look, "b𝛃", 1)); - assert!(!testlook!(look, "𝛃 ", 5)); - assert!(!testlook!(look, " 𝛃 ", 0)); - assert!(!testlook!(look, " 𝛃 ", 6)); - assert!(!testlook!(look, "𝛃", 1)); - assert!(!testlook!(look, "𝛃", 2)); - assert!(!testlook!(look, "𝛃", 3)); - - // Non word boundaries with non-ASCII codepoints. - assert!(!testlook!(look, "𝛃𐆀", 1)); - assert!(!testlook!(look, "𝛃𐆀", 2)); - assert!(!testlook!(look, "𝛃𐆀", 3)); - assert!(!testlook!(look, "𝛃𐆀", 5)); - assert!(!testlook!(look, "𝛃𐆀", 6)); - assert!(!testlook!(look, "𝛃𐆀", 7)); - assert!(!testlook!(look, "𝛃𐆀", 8)); - } - - #[test] - fn look_matches_word_end_ascii() { - let look = Look::WordEndAscii; - - // \xF0\x9D\x9B\x83 = 𝛃 (in \w) - // \xF0\x90\x86\x80 = 𐆀 (not in \w) - - // Simple ASCII word boundaries. - assert!(!testlook!(look, "a", 0)); - assert!(testlook!(look, "a", 1)); - assert!(testlook!(look, "a ", 1)); - assert!(!testlook!(look, " a ", 1)); - assert!(testlook!(look, " a ", 2)); - - // Unicode word boundaries with a non-ASCII codepoint. Since this is - // an ASCII word boundary, none of these match. - assert!(!testlook!(look, "𝛃", 0)); - assert!(!testlook!(look, "𝛃", 4)); - assert!(!testlook!(look, "𝛃 ", 4)); - assert!(!testlook!(look, " 𝛃 ", 1)); - assert!(!testlook!(look, " 𝛃 ", 5)); - - // Unicode word boundaries between non-ASCII codepoints. Again, since - // this is an ASCII word boundary, none of these match. - assert!(!testlook!(look, "𝛃𐆀", 0)); - assert!(!testlook!(look, "𝛃𐆀", 4)); - - // Non word boundaries for ASCII. - assert!(!testlook!(look, "", 0)); - assert!(!testlook!(look, "ab", 1)); - assert!(!testlook!(look, "a ", 2)); - assert!(!testlook!(look, " a ", 0)); - assert!(!testlook!(look, " a ", 3)); - - // Non word boundaries with a non-ASCII codepoint. - assert!(!testlook!(look, "𝛃b", 4)); - assert!(testlook!(look, "b𝛃", 1)); - assert!(!testlook!(look, "𝛃 ", 5)); - assert!(!testlook!(look, " 𝛃 ", 0)); - assert!(!testlook!(look, " 𝛃 ", 6)); - assert!(!testlook!(look, "𝛃", 1)); - assert!(!testlook!(look, "𝛃", 2)); - assert!(!testlook!(look, "𝛃", 3)); - - // Non word boundaries with non-ASCII codepoints. - assert!(!testlook!(look, "𝛃𐆀", 1)); - assert!(!testlook!(look, "𝛃𐆀", 2)); - assert!(!testlook!(look, "𝛃𐆀", 3)); - assert!(!testlook!(look, "𝛃𐆀", 5)); - assert!(!testlook!(look, "𝛃𐆀", 6)); - assert!(!testlook!(look, "𝛃𐆀", 7)); - assert!(!testlook!(look, "𝛃𐆀", 8)); - } - - #[test] - #[cfg(all(not(miri), feature = "unicode-word-boundary"))] - fn look_matches_word_start_unicode() { - let look = Look::WordStartUnicode; - - // \xF0\x9D\x9B\x83 = 𝛃 (in \w) - // \xF0\x90\x86\x80 = 𐆀 (not in \w) - - // Simple ASCII word boundaries. - assert!(testlook!(look, "a", 0)); - assert!(!testlook!(look, "a", 1)); - assert!(!testlook!(look, "a ", 1)); - assert!(testlook!(look, " a ", 1)); - assert!(!testlook!(look, " a ", 2)); - - // Unicode word boundaries with a non-ASCII codepoint. - assert!(testlook!(look, "𝛃", 0)); - assert!(!testlook!(look, "𝛃", 4)); - assert!(!testlook!(look, "𝛃 ", 4)); - assert!(testlook!(look, " 𝛃 ", 1)); - assert!(!testlook!(look, " 𝛃 ", 5)); - - // Unicode word boundaries between non-ASCII codepoints. - assert!(testlook!(look, "𝛃𐆀", 0)); - assert!(!testlook!(look, "𝛃𐆀", 4)); - - // Non word boundaries for ASCII. - assert!(!testlook!(look, "", 0)); - assert!(!testlook!(look, "ab", 1)); - assert!(!testlook!(look, "a ", 2)); - assert!(!testlook!(look, " a ", 0)); - assert!(!testlook!(look, " a ", 3)); - - // Non word boundaries with a non-ASCII codepoint. - assert!(!testlook!(look, "𝛃b", 4)); - assert!(!testlook!(look, "b𝛃", 1)); - assert!(!testlook!(look, "𝛃 ", 5)); - assert!(!testlook!(look, " 𝛃 ", 0)); - assert!(!testlook!(look, " 𝛃 ", 6)); - assert!(!testlook!(look, "𝛃", 1)); - assert!(!testlook!(look, "𝛃", 2)); - assert!(!testlook!(look, "𝛃", 3)); - - // Non word boundaries with non-ASCII codepoints. - assert!(!testlook!(look, "𝛃𐆀", 1)); - assert!(!testlook!(look, "𝛃𐆀", 2)); - assert!(!testlook!(look, "𝛃𐆀", 3)); - assert!(!testlook!(look, "𝛃𐆀", 5)); - assert!(!testlook!(look, "𝛃𐆀", 6)); - assert!(!testlook!(look, "𝛃𐆀", 7)); - assert!(!testlook!(look, "𝛃𐆀", 8)); - } - - #[test] - #[cfg(all(not(miri), feature = "unicode-word-boundary"))] - fn look_matches_word_end_unicode() { - let look = Look::WordEndUnicode; - - // \xF0\x9D\x9B\x83 = 𝛃 (in \w) - // \xF0\x90\x86\x80 = 𐆀 (not in \w) - - // Simple ASCII word boundaries. - assert!(!testlook!(look, "a", 0)); - assert!(testlook!(look, "a", 1)); - assert!(testlook!(look, "a ", 1)); - assert!(!testlook!(look, " a ", 1)); - assert!(testlook!(look, " a ", 2)); - - // Unicode word boundaries with a non-ASCII codepoint. - assert!(!testlook!(look, "𝛃", 0)); - assert!(testlook!(look, "𝛃", 4)); - assert!(testlook!(look, "𝛃 ", 4)); - assert!(!testlook!(look, " 𝛃 ", 1)); - assert!(testlook!(look, " 𝛃 ", 5)); - - // Unicode word boundaries between non-ASCII codepoints. - assert!(!testlook!(look, "𝛃𐆀", 0)); - assert!(testlook!(look, "𝛃𐆀", 4)); - - // Non word boundaries for ASCII. - assert!(!testlook!(look, "", 0)); - assert!(!testlook!(look, "ab", 1)); - assert!(!testlook!(look, "a ", 2)); - assert!(!testlook!(look, " a ", 0)); - assert!(!testlook!(look, " a ", 3)); - - // Non word boundaries with a non-ASCII codepoint. - assert!(!testlook!(look, "𝛃b", 4)); - assert!(!testlook!(look, "b𝛃", 1)); - assert!(!testlook!(look, "𝛃 ", 5)); - assert!(!testlook!(look, " 𝛃 ", 0)); - assert!(!testlook!(look, " 𝛃 ", 6)); - assert!(!testlook!(look, "𝛃", 1)); - assert!(!testlook!(look, "𝛃", 2)); - assert!(!testlook!(look, "𝛃", 3)); - - // Non word boundaries with non-ASCII codepoints. - assert!(!testlook!(look, "𝛃𐆀", 1)); - assert!(!testlook!(look, "𝛃𐆀", 2)); - assert!(!testlook!(look, "𝛃𐆀", 3)); - assert!(!testlook!(look, "𝛃𐆀", 5)); - assert!(!testlook!(look, "𝛃𐆀", 6)); - assert!(!testlook!(look, "𝛃𐆀", 7)); - assert!(!testlook!(look, "𝛃𐆀", 8)); - } - - #[test] - fn look_matches_word_start_half_ascii() { - let look = Look::WordStartHalfAscii; - - // \xF0\x9D\x9B\x83 = 𝛃 (in \w) - // \xF0\x90\x86\x80 = 𐆀 (not in \w) - - // Simple ASCII word boundaries. - assert!(testlook!(look, "a", 0)); - assert!(!testlook!(look, "a", 1)); - assert!(!testlook!(look, "a ", 1)); - assert!(testlook!(look, " a ", 1)); - assert!(!testlook!(look, " a ", 2)); - - // Unicode word boundaries with a non-ASCII codepoint. Since this is - // an ASCII word boundary, none of these match. - assert!(testlook!(look, "𝛃", 0)); - assert!(testlook!(look, "𝛃", 4)); - assert!(testlook!(look, "𝛃 ", 4)); - assert!(testlook!(look, " 𝛃 ", 1)); - assert!(testlook!(look, " 𝛃 ", 5)); - - // Unicode word boundaries between non-ASCII codepoints. Again, since - // this is an ASCII word boundary, none of these match. - assert!(testlook!(look, "𝛃𐆀", 0)); - assert!(testlook!(look, "𝛃𐆀", 4)); - - // Non word boundaries for ASCII. - assert!(testlook!(look, "", 0)); - assert!(!testlook!(look, "ab", 1)); - assert!(testlook!(look, "a ", 2)); - assert!(testlook!(look, " a ", 0)); - assert!(testlook!(look, " a ", 3)); - - // Non word boundaries with a non-ASCII codepoint. - assert!(testlook!(look, "𝛃b", 4)); - assert!(!testlook!(look, "b𝛃", 1)); - assert!(testlook!(look, "𝛃 ", 5)); - assert!(testlook!(look, " 𝛃 ", 0)); - assert!(testlook!(look, " 𝛃 ", 6)); - assert!(testlook!(look, "𝛃", 1)); - assert!(testlook!(look, "𝛃", 2)); - assert!(testlook!(look, "𝛃", 3)); - - // Non word boundaries with non-ASCII codepoints. - assert!(testlook!(look, "𝛃𐆀", 1)); - assert!(testlook!(look, "𝛃𐆀", 2)); - assert!(testlook!(look, "𝛃𐆀", 3)); - assert!(testlook!(look, "𝛃𐆀", 5)); - assert!(testlook!(look, "𝛃𐆀", 6)); - assert!(testlook!(look, "𝛃𐆀", 7)); - assert!(testlook!(look, "𝛃𐆀", 8)); - } - - #[test] - fn look_matches_word_end_half_ascii() { - let look = Look::WordEndHalfAscii; - - // \xF0\x9D\x9B\x83 = 𝛃 (in \w) - // \xF0\x90\x86\x80 = 𐆀 (not in \w) - - // Simple ASCII word boundaries. - assert!(!testlook!(look, "a", 0)); - assert!(testlook!(look, "a", 1)); - assert!(testlook!(look, "a ", 1)); - assert!(!testlook!(look, " a ", 1)); - assert!(testlook!(look, " a ", 2)); - - // Unicode word boundaries with a non-ASCII codepoint. Since this is - // an ASCII word boundary, none of these match. - assert!(testlook!(look, "𝛃", 0)); - assert!(testlook!(look, "𝛃", 4)); - assert!(testlook!(look, "𝛃 ", 4)); - assert!(testlook!(look, " 𝛃 ", 1)); - assert!(testlook!(look, " 𝛃 ", 5)); - - // Unicode word boundaries between non-ASCII codepoints. Again, since - // this is an ASCII word boundary, none of these match. - assert!(testlook!(look, "𝛃𐆀", 0)); - assert!(testlook!(look, "𝛃𐆀", 4)); - - // Non word boundaries for ASCII. - assert!(testlook!(look, "", 0)); - assert!(!testlook!(look, "ab", 1)); - assert!(testlook!(look, "a ", 2)); - assert!(testlook!(look, " a ", 0)); - assert!(testlook!(look, " a ", 3)); - - // Non word boundaries with a non-ASCII codepoint. - assert!(!testlook!(look, "𝛃b", 4)); - assert!(testlook!(look, "b𝛃", 1)); - assert!(testlook!(look, "𝛃 ", 5)); - assert!(testlook!(look, " 𝛃 ", 0)); - assert!(testlook!(look, " 𝛃 ", 6)); - assert!(testlook!(look, "𝛃", 1)); - assert!(testlook!(look, "𝛃", 2)); - assert!(testlook!(look, "𝛃", 3)); - - // Non word boundaries with non-ASCII codepoints. - assert!(testlook!(look, "𝛃𐆀", 1)); - assert!(testlook!(look, "𝛃𐆀", 2)); - assert!(testlook!(look, "𝛃𐆀", 3)); - assert!(testlook!(look, "𝛃𐆀", 5)); - assert!(testlook!(look, "𝛃𐆀", 6)); - assert!(testlook!(look, "𝛃𐆀", 7)); - assert!(testlook!(look, "𝛃𐆀", 8)); - } - - #[test] - #[cfg(all(not(miri), feature = "unicode-word-boundary"))] - fn look_matches_word_start_half_unicode() { - let look = Look::WordStartHalfUnicode; - - // \xF0\x9D\x9B\x83 = 𝛃 (in \w) - // \xF0\x90\x86\x80 = 𐆀 (not in \w) - - // Simple ASCII word boundaries. - assert!(testlook!(look, "a", 0)); - assert!(!testlook!(look, "a", 1)); - assert!(!testlook!(look, "a ", 1)); - assert!(testlook!(look, " a ", 1)); - assert!(!testlook!(look, " a ", 2)); - - // Unicode word boundaries with a non-ASCII codepoint. - assert!(testlook!(look, "𝛃", 0)); - assert!(!testlook!(look, "𝛃", 4)); - assert!(!testlook!(look, "𝛃 ", 4)); - assert!(testlook!(look, " 𝛃 ", 1)); - assert!(!testlook!(look, " 𝛃 ", 5)); - - // Unicode word boundaries between non-ASCII codepoints. - assert!(testlook!(look, "𝛃𐆀", 0)); - assert!(!testlook!(look, "𝛃𐆀", 4)); - - // Non word boundaries for ASCII. - assert!(testlook!(look, "", 0)); - assert!(!testlook!(look, "ab", 1)); - assert!(testlook!(look, "a ", 2)); - assert!(testlook!(look, " a ", 0)); - assert!(testlook!(look, " a ", 3)); - - // Non word boundaries with a non-ASCII codepoint. - assert!(!testlook!(look, "𝛃b", 4)); - assert!(!testlook!(look, "b𝛃", 1)); - assert!(testlook!(look, "𝛃 ", 5)); - assert!(testlook!(look, " 𝛃 ", 0)); - assert!(testlook!(look, " 𝛃 ", 6)); - assert!(!testlook!(look, "𝛃", 1)); - assert!(!testlook!(look, "𝛃", 2)); - assert!(!testlook!(look, "𝛃", 3)); - - // Non word boundaries with non-ASCII codepoints. - assert!(!testlook!(look, "𝛃𐆀", 1)); - assert!(!testlook!(look, "𝛃𐆀", 2)); - assert!(!testlook!(look, "𝛃𐆀", 3)); - assert!(!testlook!(look, "𝛃𐆀", 5)); - assert!(!testlook!(look, "𝛃𐆀", 6)); - assert!(!testlook!(look, "𝛃𐆀", 7)); - assert!(testlook!(look, "𝛃𐆀", 8)); - } - - #[test] - #[cfg(all(not(miri), feature = "unicode-word-boundary"))] - fn look_matches_word_end_half_unicode() { - let look = Look::WordEndHalfUnicode; - - // \xF0\x9D\x9B\x83 = 𝛃 (in \w) - // \xF0\x90\x86\x80 = 𐆀 (not in \w) - - // Simple ASCII word boundaries. - assert!(!testlook!(look, "a", 0)); - assert!(testlook!(look, "a", 1)); - assert!(testlook!(look, "a ", 1)); - assert!(!testlook!(look, " a ", 1)); - assert!(testlook!(look, " a ", 2)); - - // Unicode word boundaries with a non-ASCII codepoint. - assert!(!testlook!(look, "𝛃", 0)); - assert!(testlook!(look, "𝛃", 4)); - assert!(testlook!(look, "𝛃 ", 4)); - assert!(!testlook!(look, " 𝛃 ", 1)); - assert!(testlook!(look, " 𝛃 ", 5)); - - // Unicode word boundaries between non-ASCII codepoints. - assert!(!testlook!(look, "𝛃𐆀", 0)); - assert!(testlook!(look, "𝛃𐆀", 4)); - - // Non word boundaries for ASCII. - assert!(testlook!(look, "", 0)); - assert!(!testlook!(look, "ab", 1)); - assert!(testlook!(look, "a ", 2)); - assert!(testlook!(look, " a ", 0)); - assert!(testlook!(look, " a ", 3)); - - // Non word boundaries with a non-ASCII codepoint. - assert!(!testlook!(look, "𝛃b", 4)); - assert!(!testlook!(look, "b𝛃", 1)); - assert!(testlook!(look, "𝛃 ", 5)); - assert!(testlook!(look, " 𝛃 ", 0)); - assert!(testlook!(look, " 𝛃 ", 6)); - assert!(!testlook!(look, "𝛃", 1)); - assert!(!testlook!(look, "𝛃", 2)); - assert!(!testlook!(look, "𝛃", 3)); - - // Non word boundaries with non-ASCII codepoints. - assert!(!testlook!(look, "𝛃𐆀", 1)); - assert!(!testlook!(look, "𝛃𐆀", 2)); - assert!(!testlook!(look, "𝛃𐆀", 3)); - assert!(!testlook!(look, "𝛃𐆀", 5)); - assert!(!testlook!(look, "𝛃𐆀", 6)); - assert!(!testlook!(look, "𝛃𐆀", 7)); - assert!(testlook!(look, "𝛃𐆀", 8)); - } - - #[test] - fn look_set() { - let mut f = LookSet::default(); - assert!(!f.contains(Look::Start)); - assert!(!f.contains(Look::End)); - assert!(!f.contains(Look::StartLF)); - assert!(!f.contains(Look::EndLF)); - assert!(!f.contains(Look::WordUnicode)); - assert!(!f.contains(Look::WordUnicodeNegate)); - assert!(!f.contains(Look::WordAscii)); - assert!(!f.contains(Look::WordAsciiNegate)); - - f = f.insert(Look::Start); - assert!(f.contains(Look::Start)); - f = f.remove(Look::Start); - assert!(!f.contains(Look::Start)); - - f = f.insert(Look::End); - assert!(f.contains(Look::End)); - f = f.remove(Look::End); - assert!(!f.contains(Look::End)); - - f = f.insert(Look::StartLF); - assert!(f.contains(Look::StartLF)); - f = f.remove(Look::StartLF); - assert!(!f.contains(Look::StartLF)); - - f = f.insert(Look::EndLF); - assert!(f.contains(Look::EndLF)); - f = f.remove(Look::EndLF); - assert!(!f.contains(Look::EndLF)); - - f = f.insert(Look::StartCRLF); - assert!(f.contains(Look::StartCRLF)); - f = f.remove(Look::StartCRLF); - assert!(!f.contains(Look::StartCRLF)); - - f = f.insert(Look::EndCRLF); - assert!(f.contains(Look::EndCRLF)); - f = f.remove(Look::EndCRLF); - assert!(!f.contains(Look::EndCRLF)); - - f = f.insert(Look::WordUnicode); - assert!(f.contains(Look::WordUnicode)); - f = f.remove(Look::WordUnicode); - assert!(!f.contains(Look::WordUnicode)); - - f = f.insert(Look::WordUnicodeNegate); - assert!(f.contains(Look::WordUnicodeNegate)); - f = f.remove(Look::WordUnicodeNegate); - assert!(!f.contains(Look::WordUnicodeNegate)); - - f = f.insert(Look::WordAscii); - assert!(f.contains(Look::WordAscii)); - f = f.remove(Look::WordAscii); - assert!(!f.contains(Look::WordAscii)); - - f = f.insert(Look::WordAsciiNegate); - assert!(f.contains(Look::WordAsciiNegate)); - f = f.remove(Look::WordAsciiNegate); - assert!(!f.contains(Look::WordAsciiNegate)); - - f = f.insert(Look::WordStartAscii); - assert!(f.contains(Look::WordStartAscii)); - f = f.remove(Look::WordStartAscii); - assert!(!f.contains(Look::WordStartAscii)); - - f = f.insert(Look::WordEndAscii); - assert!(f.contains(Look::WordEndAscii)); - f = f.remove(Look::WordEndAscii); - assert!(!f.contains(Look::WordEndAscii)); - - f = f.insert(Look::WordStartUnicode); - assert!(f.contains(Look::WordStartUnicode)); - f = f.remove(Look::WordStartUnicode); - assert!(!f.contains(Look::WordStartUnicode)); - - f = f.insert(Look::WordEndUnicode); - assert!(f.contains(Look::WordEndUnicode)); - f = f.remove(Look::WordEndUnicode); - assert!(!f.contains(Look::WordEndUnicode)); - - f = f.insert(Look::WordStartHalfAscii); - assert!(f.contains(Look::WordStartHalfAscii)); - f = f.remove(Look::WordStartHalfAscii); - assert!(!f.contains(Look::WordStartHalfAscii)); - - f = f.insert(Look::WordEndHalfAscii); - assert!(f.contains(Look::WordEndHalfAscii)); - f = f.remove(Look::WordEndHalfAscii); - assert!(!f.contains(Look::WordEndHalfAscii)); - - f = f.insert(Look::WordStartHalfUnicode); - assert!(f.contains(Look::WordStartHalfUnicode)); - f = f.remove(Look::WordStartHalfUnicode); - assert!(!f.contains(Look::WordStartHalfUnicode)); - - f = f.insert(Look::WordEndHalfUnicode); - assert!(f.contains(Look::WordEndHalfUnicode)); - f = f.remove(Look::WordEndHalfUnicode); - assert!(!f.contains(Look::WordEndHalfUnicode)); - } - - #[test] - fn look_set_iter() { - let set = LookSet::empty(); - assert_eq!(0, set.iter().count()); - - let set = LookSet::full(); - assert_eq!(18, set.iter().count()); - - let set = - LookSet::empty().insert(Look::StartLF).insert(Look::WordUnicode); - assert_eq!(2, set.iter().count()); - - let set = LookSet::empty().insert(Look::StartLF); - assert_eq!(1, set.iter().count()); - - let set = LookSet::empty().insert(Look::WordAsciiNegate); - assert_eq!(1, set.iter().count()); - - let set = LookSet::empty().insert(Look::WordEndHalfUnicode); - assert_eq!(1, set.iter().count()); - } - - #[test] - #[cfg(feature = "alloc")] - fn look_set_debug() { - let res = alloc::format!("{:?}", LookSet::empty()); - assert_eq!("∅", res); - let res = alloc::format!("{:?}", LookSet::full()); - assert_eq!("Az^$rRbB𝛃𝚩<>〈〉◁▷◀▶", res); - } -} diff --git a/vendor/regex-automata/src/util/memchr.rs b/vendor/regex-automata/src/util/memchr.rs deleted file mode 100644 index a2cbb07321a721..00000000000000 --- a/vendor/regex-automata/src/util/memchr.rs +++ /dev/null @@ -1,93 +0,0 @@ -/*! -This module defines simple wrapper routines for the memchr functions from the -`memchr` crate. Basically, when the `memchr` crate is available, we use it, -otherwise we use a naive implementation which is still pretty fast. -*/ - -pub(crate) use self::inner::*; - -#[cfg(feature = "perf-literal-substring")] -pub(super) mod inner { - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn memchr(n1: u8, haystack: &[u8]) -> Option { - memchr::memchr(n1, haystack) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn memchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option { - memchr::memchr2(n1, n2, haystack) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn memchr3( - n1: u8, - n2: u8, - n3: u8, - haystack: &[u8], - ) -> Option { - memchr::memchr3(n1, n2, n3, haystack) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn memrchr(n1: u8, haystack: &[u8]) -> Option { - memchr::memrchr(n1, haystack) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn memrchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option { - memchr::memrchr2(n1, n2, haystack) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn memrchr3( - n1: u8, - n2: u8, - n3: u8, - haystack: &[u8], - ) -> Option { - memchr::memrchr3(n1, n2, n3, haystack) - } -} - -#[cfg(not(feature = "perf-literal-substring"))] -pub(super) mod inner { - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn memchr(n1: u8, haystack: &[u8]) -> Option { - haystack.iter().position(|&b| b == n1) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn memchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option { - haystack.iter().position(|&b| b == n1 || b == n2) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn memchr3( - n1: u8, - n2: u8, - n3: u8, - haystack: &[u8], - ) -> Option { - haystack.iter().position(|&b| b == n1 || b == n2 || b == n3) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn memrchr(n1: u8, haystack: &[u8]) -> Option { - haystack.iter().rposition(|&b| b == n1) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn memrchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option { - haystack.iter().rposition(|&b| b == n1 || b == n2) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn memrchr3( - n1: u8, - n2: u8, - n3: u8, - haystack: &[u8], - ) -> Option { - haystack.iter().rposition(|&b| b == n1 || b == n2 || b == n3) - } -} diff --git a/vendor/regex-automata/src/util/mod.rs b/vendor/regex-automata/src/util/mod.rs deleted file mode 100644 index b3eef64e64b476..00000000000000 --- a/vendor/regex-automata/src/util/mod.rs +++ /dev/null @@ -1,57 +0,0 @@ -/*! -A collection of modules that provide APIs that are useful across many regex -engines. - -While one should explore the sub-modules directly to get a sense of what's -there, here are some highlights that tie the sub-modules to higher level -use cases: - -* `alphabet` contains APIs that are useful if you're doing low level things -with the DFAs in this crate. For example, implementing determinization or -walking its state graph directly. -* `captures` contains APIs for dealing with capture group matches and their -mapping to "slots" used inside an NFA graph. This is also where you can find -iterators over capture group names. -* `escape` contains types for pretty-printing raw byte slices as strings. -* `iter` contains API helpers for writing regex iterators. -* `lazy` contains a no-std and no-alloc variant of `lazy_static!` and -`once_cell`. -* `look` contains APIs for matching and configuring look-around assertions. -* `pool` provides a way to reuse mutable memory allocated in a thread safe -manner. -* `prefilter` provides APIs for building prefilters and using them in searches. -* `primitives` are what you might use if you're doing lower level work on -automata, such as walking an NFA state graph. -* `syntax` provides some higher level convenience functions for interacting -with the `regex-syntax` crate. -* `wire` is useful if you're working with DFA serialization. -*/ - -pub mod alphabet; -#[cfg(feature = "alloc")] -pub mod captures; -pub mod escape; -#[cfg(feature = "alloc")] -pub mod interpolate; -pub mod iter; -pub mod lazy; -pub mod look; -#[cfg(feature = "alloc")] -pub mod pool; -pub mod prefilter; -pub mod primitives; -pub mod start; -#[cfg(feature = "syntax")] -pub mod syntax; -pub mod wire; - -#[cfg(any(feature = "dfa-build", feature = "hybrid"))] -pub(crate) mod determinize; -pub(crate) mod empty; -pub(crate) mod int; -pub(crate) mod memchr; -pub(crate) mod search; -#[cfg(feature = "alloc")] -pub(crate) mod sparse_set; -pub(crate) mod unicode_data; -pub(crate) mod utf8; diff --git a/vendor/regex-automata/src/util/pool.rs b/vendor/regex-automata/src/util/pool.rs deleted file mode 100644 index 567ebfb2ea9582..00000000000000 --- a/vendor/regex-automata/src/util/pool.rs +++ /dev/null @@ -1,1199 +0,0 @@ -// This module provides a relatively simple thread-safe pool of reusable -// objects. For the most part, it's implemented by a stack represented by a -// Mutex>. It has one small trick: because unlocking a mutex is somewhat -// costly, in the case where a pool is accessed by the first thread that tried -// to get a value, we bypass the mutex. Here are some benchmarks showing the -// difference. -// -// 2022-10-15: These benchmarks are from the old regex crate and they aren't -// easy to reproduce because some rely on older implementations of Pool that -// are no longer around. I've left the results here for posterity, but any -// enterprising individual should feel encouraged to re-litigate the way Pool -// works. I am not at all certain it is the best approach. -// -// 1) misc::anchored_literal_long_non_match 21 (18571 MB/s) -// 2) misc::anchored_literal_long_non_match 107 (3644 MB/s) -// 3) misc::anchored_literal_long_non_match 45 (8666 MB/s) -// 4) misc::anchored_literal_long_non_match 19 (20526 MB/s) -// -// (1) represents our baseline: the master branch at the time of writing when -// using the 'thread_local' crate to implement the pool below. -// -// (2) represents a naive pool implemented completely via Mutex>. There -// is no special trick for bypassing the mutex. -// -// (3) is the same as (2), except it uses Mutex>>. It is twice as -// fast because a Box is much smaller than the T we use with a Pool in this -// crate. So pushing and popping a Box from a Vec is quite a bit faster -// than for T. -// -// (4) is the same as (3), but with the trick for bypassing the mutex in the -// case of the first-to-get thread. -// -// Why move off of thread_local? Even though (4) is a hair faster than (1) -// above, this was not the main goal. The main goal was to move off of -// thread_local and find a way to *simply* re-capture some of its speed for -// regex's specific case. So again, why move off of it? The *primary* reason is -// because of memory leaks. See https://github.com/rust-lang/regex/issues/362 -// for example. (Why do I want it to be simple? Well, I suppose what I mean is, -// "use as much safe code as possible to minimize risk and be as sure as I can -// be that it is correct.") -// -// My guess is that the thread_local design is probably not appropriate for -// regex since its memory usage scales to the number of active threads that -// have used a regex, where as the pool below scales to the number of threads -// that simultaneously use a regex. While neither case permits contraction, -// since we own the pool data structure below, we can add contraction if a -// clear use case pops up in the wild. More pressingly though, it seems that -// there are at least some use case patterns where one might have many threads -// sitting around that might have used a regex at one point. While thread_local -// does try to reuse space previously used by a thread that has since stopped, -// its maximal memory usage still scales with the total number of active -// threads. In contrast, the pool below scales with the total number of threads -// *simultaneously* using the pool. The hope is that this uses less memory -// overall. And if it doesn't, we can hopefully tune it somehow. -// -// It seems that these sort of conditions happen frequently -// in FFI inside of other more "managed" languages. This was -// mentioned in the issue linked above, and also mentioned here: -// https://github.com/BurntSushi/rure-go/issues/3. And in particular, users -// confirm that disabling the use of thread_local resolves the leak. -// -// There were other weaker reasons for moving off of thread_local as well. -// Namely, at the time, I was looking to reduce dependencies. And for something -// like regex, maintenance can be simpler when we own the full dependency tree. -// -// Note that I am not entirely happy with this pool. It has some subtle -// implementation details and is overall still observable (even with the -// thread owner optimization) in benchmarks. If someone wants to take a crack -// at building something better, please file an issue. Even if it means a -// different API. The API exposed by this pool is not the minimal thing that -// something like a 'Regex' actually needs. It could adapt to, for example, -// an API more like what is found in the 'thread_local' crate. However, we do -// really need to support the no-std alloc-only context, or else the regex -// crate wouldn't be able to support no-std alloc-only. However, I'm generally -// okay with making the alloc-only context slower (as it is here), although I -// do find it unfortunate. - -/*! -A thread safe memory pool. - -The principal type in this module is a [`Pool`]. It main use case is for -holding a thread safe collection of mutable scratch spaces (usually called -`Cache` in this crate) that regex engines need to execute a search. This then -permits sharing the same read-only regex object across multiple threads while -having a quick way of reusing scratch space in a thread safe way. This avoids -needing to re-create the scratch space for every search, which could wind up -being quite expensive. -*/ - -/// A thread safe pool that works in an `alloc`-only context. -/// -/// Getting a value out comes with a guard. When that guard is dropped, the -/// value is automatically put back in the pool. The guard provides both a -/// `Deref` and a `DerefMut` implementation for easy access to an underlying -/// `T`. -/// -/// A `Pool` impls `Sync` when `T` is `Send` (even if `T` is not `Sync`). This -/// is possible because a pool is guaranteed to provide a value to exactly one -/// thread at any time. -/// -/// Currently, a pool never contracts in size. Its size is proportional to the -/// maximum number of simultaneous uses. This may change in the future. -/// -/// A `Pool` is a particularly useful data structure for this crate because -/// many of the regex engines require a mutable "cache" in order to execute -/// a search. Since regexes themselves tend to be global, the problem is then: -/// how do you get a mutable cache to execute a search? You could: -/// -/// 1. Use a `thread_local!`, which requires the standard library and requires -/// that the regex pattern be statically known. -/// 2. Use a `Pool`. -/// 3. Make the cache an explicit dependency in your code and pass it around. -/// 4. Put the cache state in a `Mutex`, but this means only one search can -/// execute at a time. -/// 5. Create a new cache for every search. -/// -/// A `thread_local!` is perhaps the best choice if it works for your use case. -/// Putting the cache in a mutex or creating a new cache for every search are -/// perhaps the worst choices. Of the remaining two choices, whether you use -/// this `Pool` or thread through a cache explicitly in your code is a matter -/// of taste and depends on your code architecture. -/// -/// # Warning: may use a spin lock -/// -/// When this crate is compiled _without_ the `std` feature, then this type -/// may used a spin lock internally. This can have subtle effects that may -/// be undesirable. See [Spinlocks Considered Harmful][spinharm] for a more -/// thorough treatment of this topic. -/// -/// [spinharm]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html -/// -/// # Example -/// -/// This example shows how to share a single hybrid regex among multiple -/// threads, while also safely getting exclusive access to a hybrid's -/// [`Cache`](crate::hybrid::regex::Cache) without preventing other searches -/// from running while your thread uses the `Cache`. -/// -/// ``` -/// use regex_automata::{ -/// hybrid::regex::{Cache, Regex}, -/// util::{lazy::Lazy, pool::Pool}, -/// Match, -/// }; -/// -/// static RE: Lazy = -/// Lazy::new(|| Regex::new("foo[0-9]+bar").unwrap()); -/// static CACHE: Lazy> = -/// Lazy::new(|| Pool::new(|| RE.create_cache())); -/// -/// let expected = Some(Match::must(0, 3..14)); -/// assert_eq!(expected, RE.find(&mut CACHE.get(), b"zzzfoo12345barzzz")); -/// ``` -pub struct Pool T>(alloc::boxed::Box>); - -impl Pool { - /// Create a new pool. The given closure is used to create values in - /// the pool when necessary. - pub fn new(create: F) -> Pool { - Pool(alloc::boxed::Box::new(inner::Pool::new(create))) - } -} - -impl T> Pool { - /// Get a value from the pool. The caller is guaranteed to have - /// exclusive access to the given value. Namely, it is guaranteed that - /// this will never return a value that was returned by another call to - /// `get` but was not put back into the pool. - /// - /// When the guard goes out of scope and its destructor is called, then - /// it will automatically be put back into the pool. Alternatively, - /// [`PoolGuard::put`] may be used to explicitly put it back in the pool - /// without relying on its destructor. - /// - /// Note that there is no guarantee provided about which value in the - /// pool is returned. That is, calling get, dropping the guard (causing - /// the value to go back into the pool) and then calling get again is - /// *not* guaranteed to return the same value received in the first `get` - /// call. - #[inline] - pub fn get(&self) -> PoolGuard<'_, T, F> { - PoolGuard(self.0.get()) - } -} - -impl core::fmt::Debug for Pool { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - f.debug_tuple("Pool").field(&self.0).finish() - } -} - -/// A guard that is returned when a caller requests a value from the pool. -/// -/// The purpose of the guard is to use RAII to automatically put the value -/// back in the pool once it's dropped. -pub struct PoolGuard<'a, T: Send, F: Fn() -> T>(inner::PoolGuard<'a, T, F>); - -impl<'a, T: Send, F: Fn() -> T> PoolGuard<'a, T, F> { - /// Consumes this guard and puts it back into the pool. - /// - /// This circumvents the guard's `Drop` implementation. This can be useful - /// in circumstances where the automatic `Drop` results in poorer codegen, - /// such as calling non-inlined functions. - #[inline] - pub fn put(this: PoolGuard<'_, T, F>) { - inner::PoolGuard::put(this.0); - } -} - -impl<'a, T: Send, F: Fn() -> T> core::ops::Deref for PoolGuard<'a, T, F> { - type Target = T; - - #[inline] - fn deref(&self) -> &T { - self.0.value() - } -} - -impl<'a, T: Send, F: Fn() -> T> core::ops::DerefMut for PoolGuard<'a, T, F> { - #[inline] - fn deref_mut(&mut self) -> &mut T { - self.0.value_mut() - } -} - -impl<'a, T: Send + core::fmt::Debug, F: Fn() -> T> core::fmt::Debug - for PoolGuard<'a, T, F> -{ - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - f.debug_tuple("PoolGuard").field(&self.0).finish() - } -} - -#[cfg(feature = "std")] -mod inner { - use core::{ - cell::UnsafeCell, - panic::{RefUnwindSafe, UnwindSafe}, - sync::atomic::{AtomicUsize, Ordering}, - }; - - use alloc::{boxed::Box, vec, vec::Vec}; - - use std::{sync::Mutex, thread_local}; - - /// An atomic counter used to allocate thread IDs. - /// - /// We specifically start our counter at 3 so that we can use the values - /// less than it as sentinels. - static COUNTER: AtomicUsize = AtomicUsize::new(3); - - /// A thread ID indicating that there is no owner. This is the initial - /// state of a pool. Once a pool has an owner, there is no way to change - /// it. - static THREAD_ID_UNOWNED: usize = 0; - - /// A thread ID indicating that the special owner value is in use and not - /// available. This state is useful for avoiding a case where the owner - /// of a pool calls `get` before putting the result of a previous `get` - /// call back into the pool. - static THREAD_ID_INUSE: usize = 1; - - /// This sentinel is used to indicate that a guard has already been dropped - /// and should not be re-dropped. We use this because our drop code can be - /// called outside of Drop and thus there could be a bug in the internal - /// implementation that results in trying to put the same guard back into - /// the same pool multiple times, and *that* could result in UB if we - /// didn't mark the guard as already having been put back in the pool. - /// - /// So this isn't strictly necessary, but this let's us define some - /// routines as safe (like PoolGuard::put_imp) that we couldn't otherwise - /// do. - static THREAD_ID_DROPPED: usize = 2; - - /// The number of stacks we use inside of the pool. These are only used for - /// non-owners. That is, these represent the "slow" path. - /// - /// In the original implementation of this pool, we only used a single - /// stack. While this might be okay for a couple threads, the prevalence of - /// 32, 64 and even 128 core CPUs has made it untenable. The contention - /// such an environment introduces when threads are doing a lot of searches - /// on short haystacks (a not uncommon use case) is palpable and leads to - /// huge slowdowns. - /// - /// This constant reflects a change from using one stack to the number of - /// stacks that this constant is set to. The stack for a particular thread - /// is simply chosen by `thread_id % MAX_POOL_STACKS`. The idea behind - /// this setup is that there should be a good chance that accesses to the - /// pool will be distributed over several stacks instead of all of them - /// converging to one. - /// - /// This is not a particularly smart or dynamic strategy. Fixing this to a - /// specific number has at least two downsides. First is that it will help, - /// say, an 8 core CPU more than it will a 128 core CPU. (But, crucially, - /// it will still help the 128 core case.) Second is that this may wind - /// up being a little wasteful with respect to memory usage. Namely, if a - /// regex is used on one thread and then moved to another thread, then it - /// could result in creating a new copy of the data in the pool even though - /// only one is actually needed. - /// - /// And that memory usage bit is why this is set to 8 and not, say, 64. - /// Keeping it at 8 limits, to an extent, how much unnecessary memory can - /// be allocated. - /// - /// In an ideal world, we'd be able to have something like this: - /// - /// * Grow the number of stacks as the number of concurrent callers - /// increases. I spent a little time trying this, but even just adding an - /// atomic addition/subtraction for each pop/push for tracking concurrent - /// callers led to a big perf hit. Since even more work would seemingly be - /// required than just an addition/subtraction, I abandoned this approach. - /// * The maximum amount of memory used should scale with respect to the - /// number of concurrent callers and *not* the total number of existing - /// threads. This is primarily why the `thread_local` crate isn't used, as - /// as some environments spin up a lot of threads. This led to multiple - /// reports of extremely high memory usage (often described as memory - /// leaks). - /// * Even more ideally, the pool should contract in size. That is, it - /// should grow with bursts and then shrink. But this is a pretty thorny - /// issue to tackle and it might be better to just not. - /// * It would be nice to explore the use of, say, a lock-free stack - /// instead of using a mutex to guard a `Vec` that is ultimately just - /// treated as a stack. The main thing preventing me from exploring this - /// is the ABA problem. The `crossbeam` crate has tools for dealing with - /// this sort of problem (via its epoch based memory reclamation strategy), - /// but I can't justify bringing in all of `crossbeam` as a dependency of - /// `regex` for this. - /// - /// See this issue for more context and discussion: - /// https://github.com/rust-lang/regex/issues/934 - const MAX_POOL_STACKS: usize = 8; - - thread_local!( - /// A thread local used to assign an ID to a thread. - static THREAD_ID: usize = { - let next = COUNTER.fetch_add(1, Ordering::Relaxed); - // SAFETY: We cannot permit the reuse of thread IDs since reusing a - // thread ID might result in more than one thread "owning" a pool, - // and thus, permit accessing a mutable value from multiple threads - // simultaneously without synchronization. The intent of this panic - // is to be a sanity check. It is not expected that the thread ID - // space will actually be exhausted in practice. Even on a 32-bit - // system, it would require spawning 2^32 threads (although they - // wouldn't all need to run simultaneously, so it is in theory - // possible). - // - // This checks that the counter never wraps around, since atomic - // addition wraps around on overflow. - if next == 0 { - panic!("regex: thread ID allocation space exhausted"); - } - next - }; - ); - - /// This puts each stack in the pool below into its own cache line. This is - /// an absolutely critical optimization that tends to have the most impact - /// in high contention workloads. Without forcing each mutex protected - /// into its own cache line, high contention exacerbates the performance - /// problem by causing "false sharing." By putting each mutex in its own - /// cache-line, we avoid the false sharing problem and the affects of - /// contention are greatly reduced. - #[derive(Debug)] - #[repr(C, align(64))] - struct CacheLine(T); - - /// A thread safe pool utilizing std-only features. - /// - /// The main difference between this and the simplistic alloc-only pool is - /// the use of std::sync::Mutex and an "owner thread" optimization that - /// makes accesses by the owner of a pool faster than all other threads. - /// This makes the common case of running a regex within a single thread - /// faster by avoiding mutex unlocking. - pub(super) struct Pool { - /// A function to create more T values when stack is empty and a caller - /// has requested a T. - create: F, - /// Multiple stacks of T values to hand out. These are used when a Pool - /// is accessed by a thread that didn't create it. - /// - /// Conceptually this is `Mutex>>`, but sharded out to make - /// it scale better under high contention work-loads. We index into - /// this sequence via `thread_id % stacks.len()`. - stacks: Vec>>>>, - /// The ID of the thread that owns this pool. The owner is the thread - /// that makes the first call to 'get'. When the owner calls 'get', it - /// gets 'owner_val' directly instead of returning a T from 'stack'. - /// See comments elsewhere for details, but this is intended to be an - /// optimization for the common case that makes getting a T faster. - /// - /// It is initialized to a value of zero (an impossible thread ID) as a - /// sentinel to indicate that it is unowned. - owner: AtomicUsize, - /// A value to return when the caller is in the same thread that - /// first called `Pool::get`. - /// - /// This is set to None when a Pool is first created, and set to Some - /// once the first thread calls Pool::get. - owner_val: UnsafeCell>, - } - - // SAFETY: Since we want to use a Pool from multiple threads simultaneously - // behind an Arc, we need for it to be Sync. In cases where T is sync, - // Pool would be Sync. However, since we use a Pool to store mutable - // scratch space, we wind up using a T that has interior mutability and is - // thus itself not Sync. So what we *really* want is for our Pool to by - // Sync even when T is not Sync (but is at least Send). - // - // The only non-sync aspect of a Pool is its 'owner_val' field, which is - // used to implement faster access to a pool value in the common case of - // a pool being accessed in the same thread in which it was created. The - // 'stack' field is also shared, but a Mutex where T: Send is already - // Sync. So we only need to worry about 'owner_val'. - // - // The key is to guarantee that 'owner_val' can only ever be accessed from - // one thread. In our implementation below, we guarantee this by only - // returning the 'owner_val' when the ID of the current thread matches the - // ID of the thread that first called 'Pool::get'. Since this can only ever - // be one thread, it follows that only one thread can access 'owner_val' at - // any point in time. Thus, it is safe to declare that Pool is Sync when - // T is Send. - // - // If there is a way to achieve our performance goals using safe code, then - // I would very much welcome a patch. As it stands, the implementation - // below tries to balance safety with performance. The case where a Regex - // is used from multiple threads simultaneously will suffer a bit since - // getting a value out of the pool will require unlocking a mutex. - // - // We require `F: Send + Sync` because we call `F` at any point on demand, - // potentially from multiple threads simultaneously. - unsafe impl Sync for Pool {} - - // If T is UnwindSafe, then since we provide exclusive access to any - // particular value in the pool, the pool should therefore also be - // considered UnwindSafe. - // - // We require `F: UnwindSafe + RefUnwindSafe` because we call `F` at any - // point on demand, so it needs to be unwind safe on both dimensions for - // the entire Pool to be unwind safe. - impl UnwindSafe for Pool {} - - // If T is UnwindSafe, then since we provide exclusive access to any - // particular value in the pool, the pool should therefore also be - // considered RefUnwindSafe. - // - // We require `F: UnwindSafe + RefUnwindSafe` because we call `F` at any - // point on demand, so it needs to be unwind safe on both dimensions for - // the entire Pool to be unwind safe. - impl RefUnwindSafe - for Pool - { - } - - impl Pool { - /// Create a new pool. The given closure is used to create values in - /// the pool when necessary. - pub(super) fn new(create: F) -> Pool { - // FIXME: Now that we require 1.65+, Mutex::new is available as - // const... So we can almost mark this function as const. But of - // course, we're creating a Vec of stacks below (we didn't when I - // originally wrote this code). It seems like the best way to work - // around this would be to use a `[Stack; MAX_POOL_STACKS]` instead - // of a `Vec`. I refrained from making this change at time - // of writing (2023/10/08) because I was making a lot of other - // changes at the same time and wanted to do this more carefully. - // Namely, because of the cache line optimization, that `[Stack; - // MAX_POOL_STACKS]` would be quite big. It's unclear how bad (if - // at all) that would be. - // - // Another choice would be to lazily allocate the stacks, but... - // I'm not so sure about that. Seems like a fair bit of complexity? - // - // Maybe there's a simple solution I'm missing. - // - // ... OK, I tried to fix this. First, I did it by putting `stacks` - // in an `UnsafeCell` and using a `Once` to lazily initialize it. - // I benchmarked it and everything looked okay. I then made this - // function `const` and thought I was just about done. But the - // public pool type wraps its inner pool in a `Box` to keep its - // size down. Blech. - // - // So then I thought that I could push the box down into this - // type (and leave the non-std version unboxed) and use the same - // `UnsafeCell` technique to lazily initialize it. This has the - // downside of the `Once` now needing to get hit in the owner fast - // path, but maybe that's OK? However, I then realized that we can - // only lazily initialize `stacks`, `owner` and `owner_val`. The - // `create` function needs to be put somewhere outside of the box. - // So now the pool is a `Box`, `Once` and a function. Now we're - // starting to defeat the point of boxing in the first place. So I - // backed out that change too. - // - // Back to square one. I maybe we just don't make a pool's - // constructor const and live with it. It's probably not a huge - // deal. - let mut stacks = Vec::with_capacity(MAX_POOL_STACKS); - for _ in 0..stacks.capacity() { - stacks.push(CacheLine(Mutex::new(vec![]))); - } - let owner = AtomicUsize::new(THREAD_ID_UNOWNED); - let owner_val = UnsafeCell::new(None); // init'd on first access - Pool { create, stacks, owner, owner_val } - } - } - - impl T> Pool { - /// Get a value from the pool. This may block if another thread is also - /// attempting to retrieve a value from the pool. - #[inline] - pub(super) fn get(&self) -> PoolGuard<'_, T, F> { - // Our fast path checks if the caller is the thread that "owns" - // this pool. Or stated differently, whether it is the first thread - // that tried to extract a value from the pool. If it is, then we - // can return a T to the caller without going through a mutex. - // - // SAFETY: We must guarantee that only one thread gets access - // to this value. Since a thread is uniquely identified by the - // THREAD_ID thread local, it follows that if the caller's thread - // ID is equal to the owner, then only one thread may receive this - // value. This is also why we can get away with what looks like a - // racy load and a store. We know that if 'owner == caller', then - // only one thread can be here, so we don't need to worry about any - // other thread setting the owner to something else. - let caller = THREAD_ID.with(|id| *id); - let owner = self.owner.load(Ordering::Acquire); - if caller == owner { - // N.B. We could also do a CAS here instead of a load/store, - // but ad hoc benchmarking suggests it is slower. And a lot - // slower in the case where `get_slow` is common. - self.owner.store(THREAD_ID_INUSE, Ordering::Release); - return self.guard_owned(caller); - } - self.get_slow(caller, owner) - } - - /// This is the "slow" version that goes through a mutex to pop an - /// allocated value off a stack to return to the caller. (Or, if the - /// stack is empty, a new value is created.) - /// - /// If the pool has no owner, then this will set the owner. - #[cold] - fn get_slow( - &self, - caller: usize, - owner: usize, - ) -> PoolGuard<'_, T, F> { - if owner == THREAD_ID_UNOWNED { - // This sentinel means this pool is not yet owned. We try to - // atomically set the owner. If we do, then this thread becomes - // the owner and we can return a guard that represents the - // special T for the owner. - // - // Note that we set the owner to a different sentinel that - // indicates that the owned value is in use. The owner ID will - // get updated to the actual ID of this thread once the guard - // returned by this function is put back into the pool. - let res = self.owner.compare_exchange( - THREAD_ID_UNOWNED, - THREAD_ID_INUSE, - Ordering::AcqRel, - Ordering::Acquire, - ); - if res.is_ok() { - // SAFETY: A successful CAS above implies this thread is - // the owner and that this is the only such thread that - // can reach here. Thus, there is no data race. - unsafe { - *self.owner_val.get() = Some((self.create)()); - } - return self.guard_owned(caller); - } - } - let stack_id = caller % self.stacks.len(); - // We try to acquire exclusive access to this thread's stack, and - // if so, grab a value from it if we can. We put this in a loop so - // that it's easy to tweak and experiment with a different number - // of tries. In the end, I couldn't see anything obviously better - // than one attempt in ad hoc testing. - for _ in 0..1 { - let mut stack = match self.stacks[stack_id].0.try_lock() { - Err(_) => continue, - Ok(stack) => stack, - }; - if let Some(value) = stack.pop() { - return self.guard_stack(value); - } - // Unlock the mutex guarding the stack before creating a fresh - // value since we no longer need the stack. - drop(stack); - let value = Box::new((self.create)()); - return self.guard_stack(value); - } - // We're only here if we could get access to our stack, so just - // create a new value. This seems like it could be wasteful, but - // waiting for exclusive access to a stack when there's high - // contention is brutal for perf. - self.guard_stack_transient(Box::new((self.create)())) - } - - /// Puts a value back into the pool. Callers don't need to call this. - /// Once the guard that's returned by 'get' is dropped, it is put back - /// into the pool automatically. - #[inline] - fn put_value(&self, value: Box) { - let caller = THREAD_ID.with(|id| *id); - let stack_id = caller % self.stacks.len(); - // As with trying to pop a value from this thread's stack, we - // merely attempt to get access to push this value back on the - // stack. If there's too much contention, we just give up and throw - // the value away. - // - // Interestingly, in ad hoc benchmarking, it is beneficial to - // attempt to push the value back more than once, unlike when - // popping the value. I don't have a good theory for why this is. - // I guess if we drop too many values then that winds up forcing - // the pop operation to create new fresh values and thus leads to - // less reuse. There's definitely a balancing act here. - for _ in 0..10 { - let mut stack = match self.stacks[stack_id].0.try_lock() { - Err(_) => continue, - Ok(stack) => stack, - }; - stack.push(value); - return; - } - } - - /// Create a guard that represents the special owned T. - #[inline] - fn guard_owned(&self, caller: usize) -> PoolGuard<'_, T, F> { - PoolGuard { pool: self, value: Err(caller), discard: false } - } - - /// Create a guard that contains a value from the pool's stack. - #[inline] - fn guard_stack(&self, value: Box) -> PoolGuard<'_, T, F> { - PoolGuard { pool: self, value: Ok(value), discard: false } - } - - /// Create a guard that contains a value from the pool's stack with an - /// instruction to throw away the value instead of putting it back - /// into the pool. - #[inline] - fn guard_stack_transient(&self, value: Box) -> PoolGuard<'_, T, F> { - PoolGuard { pool: self, value: Ok(value), discard: true } - } - } - - impl core::fmt::Debug for Pool { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("Pool") - .field("stacks", &self.stacks) - .field("owner", &self.owner) - .field("owner_val", &self.owner_val) - .finish() - } - } - - /// A guard that is returned when a caller requests a value from the pool. - pub(super) struct PoolGuard<'a, T: Send, F: Fn() -> T> { - /// The pool that this guard is attached to. - pool: &'a Pool, - /// This is Err when the guard represents the special "owned" value. - /// In which case, the value is retrieved from 'pool.owner_val'. And - /// in the special case of `Err(THREAD_ID_DROPPED)`, it means the - /// guard has been put back into the pool and should no longer be used. - value: Result, usize>, - /// When true, the value should be discarded instead of being pushed - /// back into the pool. We tend to use this under high contention, and - /// this allows us to avoid inflating the size of the pool. (Because - /// under contention, we tend to create more values instead of waiting - /// for access to a stack of existing values.) - discard: bool, - } - - impl<'a, T: Send, F: Fn() -> T> PoolGuard<'a, T, F> { - /// Return the underlying value. - #[inline] - pub(super) fn value(&self) -> &T { - match self.value { - Ok(ref v) => v, - // SAFETY: This is safe because the only way a PoolGuard gets - // created for self.value=Err is when the current thread - // corresponds to the owning thread, of which there can only - // be one. Thus, we are guaranteed to be providing exclusive - // access here which makes this safe. - // - // Also, since 'owner_val' is guaranteed to be initialized - // before an owned PoolGuard is created, the unchecked unwrap - // is safe. - Err(id) => unsafe { - // This assert is *not* necessary for safety, since we - // should never be here if the guard had been put back into - // the pool. This is a sanity check to make sure we didn't - // break an internal invariant. - debug_assert_ne!(THREAD_ID_DROPPED, id); - (*self.pool.owner_val.get()).as_ref().unwrap_unchecked() - }, - } - } - - /// Return the underlying value as a mutable borrow. - #[inline] - pub(super) fn value_mut(&mut self) -> &mut T { - match self.value { - Ok(ref mut v) => v, - // SAFETY: This is safe because the only way a PoolGuard gets - // created for self.value=None is when the current thread - // corresponds to the owning thread, of which there can only - // be one. Thus, we are guaranteed to be providing exclusive - // access here which makes this safe. - // - // Also, since 'owner_val' is guaranteed to be initialized - // before an owned PoolGuard is created, the unwrap_unchecked - // is safe. - Err(id) => unsafe { - // This assert is *not* necessary for safety, since we - // should never be here if the guard had been put back into - // the pool. This is a sanity check to make sure we didn't - // break an internal invariant. - debug_assert_ne!(THREAD_ID_DROPPED, id); - (*self.pool.owner_val.get()).as_mut().unwrap_unchecked() - }, - } - } - - /// Consumes this guard and puts it back into the pool. - #[inline] - pub(super) fn put(this: PoolGuard<'_, T, F>) { - // Since this is effectively consuming the guard and putting the - // value back into the pool, there's no reason to run its Drop - // impl after doing this. I don't believe there is a correctness - // problem with doing so, but there's definitely a perf problem - // by redoing this work. So we avoid it. - let mut this = core::mem::ManuallyDrop::new(this); - this.put_imp(); - } - - /// Puts this guard back into the pool by only borrowing the guard as - /// mutable. This should be called at most once. - #[inline(always)] - fn put_imp(&mut self) { - match core::mem::replace(&mut self.value, Err(THREAD_ID_DROPPED)) { - Ok(value) => { - // If we were told to discard this value then don't bother - // trying to put it back into the pool. This occurs when - // the pop operation failed to acquire a lock and we - // decided to create a new value in lieu of contending for - // the lock. - if self.discard { - return; - } - self.pool.put_value(value); - } - // If this guard has a value "owned" by the thread, then - // the Pool guarantees that this is the ONLY such guard. - // Therefore, in order to place it back into the pool and make - // it available, we need to change the owner back to the owning - // thread's ID. But note that we use the ID that was stored in - // the guard, since a guard can be moved to another thread and - // dropped. (A previous iteration of this code read from the - // THREAD_ID thread local, which uses the ID of the current - // thread which may not be the ID of the owning thread! This - // also avoids the TLS access, which is likely a hair faster.) - Err(owner) => { - // If we hit this point, it implies 'put_imp' has been - // called multiple times for the same guard which in turn - // corresponds to a bug in this implementation. - assert_ne!(THREAD_ID_DROPPED, owner); - self.pool.owner.store(owner, Ordering::Release); - } - } - } - } - - impl<'a, T: Send, F: Fn() -> T> Drop for PoolGuard<'a, T, F> { - #[inline] - fn drop(&mut self) { - self.put_imp(); - } - } - - impl<'a, T: Send + core::fmt::Debug, F: Fn() -> T> core::fmt::Debug - for PoolGuard<'a, T, F> - { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - f.debug_struct("PoolGuard") - .field("pool", &self.pool) - .field("value", &self.value) - .finish() - } - } -} - -// FUTURE: We should consider using Mara Bos's nearly-lock-free version of this -// here: https://gist.github.com/m-ou-se/5fdcbdf7dcf4585199ce2de697f367a4. -// -// One reason why I did things with a "mutex" below is that it isolates the -// safety concerns to just the Mutex, where as the safety of Mara's pool is a -// bit more sprawling. I also expect this code to not be used that much, and -// so is unlikely to get as much real world usage with which to test it. That -// means the "obviously correct" lever is an important one. -// -// The specific reason to use Mara's pool is that it is likely faster and also -// less likely to hit problems with spin-locks, although it is not completely -// impervious to them. -// -// The best solution to this problem, probably, is a truly lock free pool. That -// could be done with a lock free linked list. The issue is the ABA problem. It -// is difficult to avoid, and doing so is complex. BUT, the upshot of that is -// that if we had a truly lock free pool, then we could also use it above in -// the 'std' pool instead of a Mutex because it should be completely free the -// problems that come from spin-locks. -#[cfg(not(feature = "std"))] -mod inner { - use core::{ - cell::UnsafeCell, - panic::{RefUnwindSafe, UnwindSafe}, - sync::atomic::{AtomicBool, Ordering}, - }; - - use alloc::{boxed::Box, vec, vec::Vec}; - - /// A thread safe pool utilizing alloc-only features. - /// - /// Unlike the std version, it doesn't seem possible(?) to implement the - /// "thread owner" optimization because alloc-only doesn't have any concept - /// of threads. So the best we can do is just a normal stack. This will - /// increase latency in alloc-only environments. - pub(super) struct Pool { - /// A stack of T values to hand out. These are used when a Pool is - /// accessed by a thread that didn't create it. - stack: Mutex>>, - /// A function to create more T values when stack is empty and a caller - /// has requested a T. - create: F, - } - - // If T is UnwindSafe, then since we provide exclusive access to any - // particular value in the pool, it should therefore also be considered - // RefUnwindSafe. - impl RefUnwindSafe for Pool {} - - impl Pool { - /// Create a new pool. The given closure is used to create values in - /// the pool when necessary. - pub(super) const fn new(create: F) -> Pool { - Pool { stack: Mutex::new(vec![]), create } - } - } - - impl T> Pool { - /// Get a value from the pool. This may block if another thread is also - /// attempting to retrieve a value from the pool. - #[inline] - pub(super) fn get(&self) -> PoolGuard<'_, T, F> { - let mut stack = self.stack.lock(); - let value = match stack.pop() { - None => Box::new((self.create)()), - Some(value) => value, - }; - PoolGuard { pool: self, value: Some(value) } - } - - #[inline] - fn put(&self, guard: PoolGuard<'_, T, F>) { - let mut guard = core::mem::ManuallyDrop::new(guard); - if let Some(value) = guard.value.take() { - self.put_value(value); - } - } - - /// Puts a value back into the pool. Callers don't need to call this. - /// Once the guard that's returned by 'get' is dropped, it is put back - /// into the pool automatically. - #[inline] - fn put_value(&self, value: Box) { - let mut stack = self.stack.lock(); - stack.push(value); - } - } - - impl core::fmt::Debug for Pool { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("Pool").field("stack", &self.stack).finish() - } - } - - /// A guard that is returned when a caller requests a value from the pool. - pub(super) struct PoolGuard<'a, T: Send, F: Fn() -> T> { - /// The pool that this guard is attached to. - pool: &'a Pool, - /// This is None after the guard has been put back into the pool. - value: Option>, - } - - impl<'a, T: Send, F: Fn() -> T> PoolGuard<'a, T, F> { - /// Return the underlying value. - #[inline] - pub(super) fn value(&self) -> &T { - self.value.as_deref().unwrap() - } - - /// Return the underlying value as a mutable borrow. - #[inline] - pub(super) fn value_mut(&mut self) -> &mut T { - self.value.as_deref_mut().unwrap() - } - - /// Consumes this guard and puts it back into the pool. - #[inline] - pub(super) fn put(this: PoolGuard<'_, T, F>) { - // Since this is effectively consuming the guard and putting the - // value back into the pool, there's no reason to run its Drop - // impl after doing this. I don't believe there is a correctness - // problem with doing so, but there's definitely a perf problem - // by redoing this work. So we avoid it. - let mut this = core::mem::ManuallyDrop::new(this); - this.put_imp(); - } - - /// Puts this guard back into the pool by only borrowing the guard as - /// mutable. This should be called at most once. - #[inline(always)] - fn put_imp(&mut self) { - if let Some(value) = self.value.take() { - self.pool.put_value(value); - } - } - } - - impl<'a, T: Send, F: Fn() -> T> Drop for PoolGuard<'a, T, F> { - #[inline] - fn drop(&mut self) { - self.put_imp(); - } - } - - impl<'a, T: Send + core::fmt::Debug, F: Fn() -> T> core::fmt::Debug - for PoolGuard<'a, T, F> - { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - f.debug_struct("PoolGuard") - .field("pool", &self.pool) - .field("value", &self.value) - .finish() - } - } - - /// A spin-lock based mutex. Yes, I have read spinlocks considered - /// harmful[1], and if there's a reasonable alternative choice, I'll - /// happily take it. - /// - /// I suspect the most likely alternative here is a Treiber stack, but - /// implementing one correctly in a way that avoids the ABA problem looks - /// subtle enough that I'm not sure I want to attempt that. But otherwise, - /// we only need a mutex in order to implement our pool, so if there's - /// something simpler we can use that works for our `Pool` use case, then - /// that would be great. - /// - /// Note that this mutex does not do poisoning. - /// - /// [1]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html - #[derive(Debug)] - struct Mutex { - locked: AtomicBool, - data: UnsafeCell, - } - - // SAFETY: Since a Mutex guarantees exclusive access, as long as we can - // send it across threads, it must also be Sync. - unsafe impl Sync for Mutex {} - - impl Mutex { - /// Create a new mutex for protecting access to the given value across - /// multiple threads simultaneously. - const fn new(value: T) -> Mutex { - Mutex { - locked: AtomicBool::new(false), - data: UnsafeCell::new(value), - } - } - - /// Lock this mutex and return a guard providing exclusive access to - /// `T`. This blocks if some other thread has already locked this - /// mutex. - #[inline] - fn lock(&self) -> MutexGuard<'_, T> { - while self - .locked - .compare_exchange( - false, - true, - Ordering::AcqRel, - Ordering::Acquire, - ) - .is_err() - { - core::hint::spin_loop(); - } - // SAFETY: The only way we're here is if we successfully set - // 'locked' to true, which implies we must be the only thread here - // and thus have exclusive access to 'data'. - let data = unsafe { &mut *self.data.get() }; - MutexGuard { locked: &self.locked, data } - } - } - - /// A guard that derefs to &T and &mut T. When it's dropped, the lock is - /// released. - #[derive(Debug)] - struct MutexGuard<'a, T> { - locked: &'a AtomicBool, - data: &'a mut T, - } - - impl<'a, T> core::ops::Deref for MutexGuard<'a, T> { - type Target = T; - - #[inline] - fn deref(&self) -> &T { - self.data - } - } - - impl<'a, T> core::ops::DerefMut for MutexGuard<'a, T> { - #[inline] - fn deref_mut(&mut self) -> &mut T { - self.data - } - } - - impl<'a, T> Drop for MutexGuard<'a, T> { - #[inline] - fn drop(&mut self) { - // Drop means 'data' is no longer accessible, so we can unlock - // the mutex. - self.locked.store(false, Ordering::Release); - } - } -} - -#[cfg(test)] -mod tests { - use core::panic::{RefUnwindSafe, UnwindSafe}; - - use alloc::{boxed::Box, vec, vec::Vec}; - - use super::*; - - #[test] - fn oibits() { - fn assert_oitbits() {} - assert_oitbits::>>(); - assert_oitbits::>>>(); - assert_oitbits::< - Pool< - Vec, - Box< - dyn Fn() -> Vec - + Send - + Sync - + UnwindSafe - + RefUnwindSafe, - >, - >, - >(); - } - - // Tests that Pool implements the "single owner" optimization. That is, the - // thread that first accesses the pool gets its own copy, while all other - // threads get distinct copies. - #[cfg(feature = "std")] - #[test] - fn thread_owner_optimization() { - use std::{cell::RefCell, sync::Arc, vec}; - - let pool: Arc>>> = - Arc::new(Pool::new(|| RefCell::new(vec!['a']))); - pool.get().borrow_mut().push('x'); - - let pool1 = pool.clone(); - let t1 = std::thread::spawn(move || { - let guard = pool1.get(); - guard.borrow_mut().push('y'); - }); - - let pool2 = pool.clone(); - let t2 = std::thread::spawn(move || { - let guard = pool2.get(); - guard.borrow_mut().push('z'); - }); - - t1.join().unwrap(); - t2.join().unwrap(); - - // If we didn't implement the single owner optimization, then one of - // the threads above is likely to have mutated the [a, x] vec that - // we stuffed in the pool before spawning the threads. But since - // neither thread was first to access the pool, and because of the - // optimization, we should be guaranteed that neither thread mutates - // the special owned pool value. - // - // (Technically this is an implementation detail and not a contract of - // Pool's API.) - assert_eq!(vec!['a', 'x'], *pool.get().borrow()); - } - - // This tests that if the "owner" of a pool asks for two values, then it - // gets two distinct values and not the same one. This test failed in the - // course of developing the pool, which in turn resulted in UB because it - // permitted getting aliasing &mut borrows to the same place in memory. - #[test] - fn thread_owner_distinct() { - let pool = Pool::new(|| vec!['a']); - - { - let mut g1 = pool.get(); - let v1 = &mut *g1; - let mut g2 = pool.get(); - let v2 = &mut *g2; - v1.push('b'); - v2.push('c'); - assert_eq!(&mut vec!['a', 'b'], v1); - assert_eq!(&mut vec!['a', 'c'], v2); - } - // This isn't technically guaranteed, but we - // expect to now get the "owned" value (the first - // call to 'get()' above) now that it's back in - // the pool. - assert_eq!(&mut vec!['a', 'b'], &mut *pool.get()); - } - - // This tests that we can share a guard with another thread, mutate the - // underlying value and everything works. This failed in the course of - // developing a pool since the pool permitted 'get()' to return the same - // value to the owner thread, even before the previous value was put back - // into the pool. This in turn resulted in this test producing a data race. - #[cfg(feature = "std")] - #[test] - fn thread_owner_sync() { - let pool = Pool::new(|| vec!['a']); - { - let mut g1 = pool.get(); - let mut g2 = pool.get(); - std::thread::scope(|s| { - s.spawn(|| { - g1.push('b'); - }); - s.spawn(|| { - g2.push('c'); - }); - }); - - let v1 = &mut *g1; - let v2 = &mut *g2; - assert_eq!(&mut vec!['a', 'b'], v1); - assert_eq!(&mut vec!['a', 'c'], v2); - } - - // This isn't technically guaranteed, but we - // expect to now get the "owned" value (the first - // call to 'get()' above) now that it's back in - // the pool. - assert_eq!(&mut vec!['a', 'b'], &mut *pool.get()); - } - - // This tests that if we move a PoolGuard that is owned by the current - // thread to another thread and drop it, then the thread owner doesn't - // change. During development of the pool, this test failed because the - // PoolGuard assumed it was dropped in the same thread from which it was - // created, and thus used the current thread's ID as the owner, which could - // be different than the actual owner of the pool. - #[cfg(feature = "std")] - #[test] - fn thread_owner_send_drop() { - let pool = Pool::new(|| vec!['a']); - // Establishes this thread as the owner. - { - pool.get().push('b'); - } - std::thread::scope(|s| { - // Sanity check that we get the same value back. - // (Not technically guaranteed.) - let mut g = pool.get(); - assert_eq!(&vec!['a', 'b'], &*g); - // Now push it to another thread and drop it. - s.spawn(move || { - g.push('c'); - }) - .join() - .unwrap(); - }); - // Now check that we're still the owner. This is not technically - // guaranteed by the API, but is true in practice given the thread - // owner optimization. - assert_eq!(&vec!['a', 'b', 'c'], &*pool.get()); - } -} diff --git a/vendor/regex-automata/src/util/prefilter/aho_corasick.rs b/vendor/regex-automata/src/util/prefilter/aho_corasick.rs deleted file mode 100644 index 7a2517fc7b2156..00000000000000 --- a/vendor/regex-automata/src/util/prefilter/aho_corasick.rs +++ /dev/null @@ -1,149 +0,0 @@ -use crate::util::{ - prefilter::PrefilterI, - search::{MatchKind, Span}, -}; - -#[derive(Clone, Debug)] -pub(crate) struct AhoCorasick { - #[cfg(not(feature = "perf-literal-multisubstring"))] - _unused: (), - #[cfg(feature = "perf-literal-multisubstring")] - ac: aho_corasick::AhoCorasick, -} - -impl AhoCorasick { - pub(crate) fn new>( - kind: MatchKind, - needles: &[B], - ) -> Option { - #[cfg(not(feature = "perf-literal-multisubstring"))] - { - None - } - #[cfg(feature = "perf-literal-multisubstring")] - { - // We used to use `aho_corasick::MatchKind::Standard` here when - // `kind` was `MatchKind::All`, but this is not correct. The - // "standard" Aho-Corasick match semantics are to report a match - // immediately as soon as it is seen, but `All` isn't like that. - // In particular, with "standard" semantics, given the needles - // "abc" and "b" and the haystack "abc," it would report a match - // at offset 1 before a match at offset 0. This is never what we - // want in the context of the regex engine, regardless of whether - // we have leftmost-first or 'all' semantics. Namely, we always - // want the leftmost match. - let ac_match_kind = match kind { - MatchKind::LeftmostFirst | MatchKind::All => { - aho_corasick::MatchKind::LeftmostFirst - } - }; - // This is kind of just an arbitrary number, but basically, if we - // have a small enough set of literals, then we try to use the VERY - // memory hungry DFA. Otherwise, we wimp out and use an NFA. The - // upshot is that the NFA is quite lean and decently fast. Faster - // than a naive Aho-Corasick NFA anyway. - let ac_kind = if needles.len() <= 500 { - aho_corasick::AhoCorasickKind::DFA - } else { - aho_corasick::AhoCorasickKind::ContiguousNFA - }; - let result = aho_corasick::AhoCorasick::builder() - .kind(Some(ac_kind)) - .match_kind(ac_match_kind) - .start_kind(aho_corasick::StartKind::Both) - // We try to handle all of the prefilter cases in the super - // module, and only use Aho-Corasick for the actual automaton. - // The aho-corasick crate does have some extra prefilters, - // namely, looking for rare bytes to feed to memchr{,2,3} - // instead of just the first byte. If we end up wanting - // those---and they are somewhat tricky to implement---then - // we could port them to this crate. - // - // The main reason for doing things this way is so we have a - // complete and easy to understand picture of which prefilters - // are available and how they work. Otherwise it seems too - // easy to get into a situation where we have a prefilter - // layered on top of prefilter, and that might have unintended - // consequences. - .prefilter(false) - .build(needles); - let ac = match result { - Ok(ac) => ac, - Err(_err) => { - debug!("aho-corasick prefilter failed to build: {_err}"); - return None; - } - }; - Some(AhoCorasick { ac }) - } - } -} - -impl PrefilterI for AhoCorasick { - fn find(&self, haystack: &[u8], span: Span) -> Option { - #[cfg(not(feature = "perf-literal-multisubstring"))] - { - unreachable!() - } - #[cfg(feature = "perf-literal-multisubstring")] - { - let input = - aho_corasick::Input::new(haystack).span(span.start..span.end); - self.ac - .find(input) - .map(|m| Span { start: m.start(), end: m.end() }) - } - } - - fn prefix(&self, haystack: &[u8], span: Span) -> Option { - #[cfg(not(feature = "perf-literal-multisubstring"))] - { - unreachable!() - } - #[cfg(feature = "perf-literal-multisubstring")] - { - let input = aho_corasick::Input::new(haystack) - .anchored(aho_corasick::Anchored::Yes) - .span(span.start..span.end); - self.ac - .find(input) - .map(|m| Span { start: m.start(), end: m.end() }) - } - } - - fn memory_usage(&self) -> usize { - #[cfg(not(feature = "perf-literal-multisubstring"))] - { - unreachable!() - } - #[cfg(feature = "perf-literal-multisubstring")] - { - self.ac.memory_usage() - } - } - - fn is_fast(&self) -> bool { - #[cfg(not(feature = "perf-literal-multisubstring"))] - { - unreachable!() - } - #[cfg(feature = "perf-literal-multisubstring")] - { - // Aho-Corasick is never considered "fast" because it's never - // going to be even close to an order of magnitude faster than the - // regex engine itself (assuming a DFA is used). In fact, it is - // usually slower. The magic of Aho-Corasick is that it can search - // a *large* number of literals with a relatively small amount of - // memory. The regex engines are far more wasteful. - // - // Aho-Corasick may be "fast" when the regex engine corresponds - // to, say, the PikeVM. That happens when the lazy DFA couldn't be - // built or used for some reason. But in these cases, the regex - // itself is likely quite big and we're probably hosed no matter - // what we do. (In this case, the best bet is for the caller to - // increase some of the memory limits on the hybrid cache capacity - // and hope that's enough.) - false - } - } -} diff --git a/vendor/regex-automata/src/util/prefilter/byteset.rs b/vendor/regex-automata/src/util/prefilter/byteset.rs deleted file mode 100644 index a669d6c9d7b696..00000000000000 --- a/vendor/regex-automata/src/util/prefilter/byteset.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crate::util::{ - prefilter::PrefilterI, - search::{MatchKind, Span}, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ByteSet([bool; 256]); - -impl ByteSet { - pub(crate) fn new>( - _kind: MatchKind, - needles: &[B], - ) -> Option { - #[cfg(not(feature = "perf-literal-multisubstring"))] - { - None - } - #[cfg(feature = "perf-literal-multisubstring")] - { - let mut set = [false; 256]; - for needle in needles.iter() { - let needle = needle.as_ref(); - if needle.len() != 1 { - return None; - } - set[usize::from(needle[0])] = true; - } - Some(ByteSet(set)) - } - } -} - -impl PrefilterI for ByteSet { - fn find(&self, haystack: &[u8], span: Span) -> Option { - haystack[span].iter().position(|&b| self.0[usize::from(b)]).map(|i| { - let start = span.start + i; - let end = start + 1; - Span { start, end } - }) - } - - fn prefix(&self, haystack: &[u8], span: Span) -> Option { - let b = *haystack.get(span.start)?; - if self.0[usize::from(b)] { - Some(Span { start: span.start, end: span.start + 1 }) - } else { - None - } - } - - fn memory_usage(&self) -> usize { - 0 - } - - fn is_fast(&self) -> bool { - false - } -} diff --git a/vendor/regex-automata/src/util/prefilter/memchr.rs b/vendor/regex-automata/src/util/prefilter/memchr.rs deleted file mode 100644 index 3d44b837219060..00000000000000 --- a/vendor/regex-automata/src/util/prefilter/memchr.rs +++ /dev/null @@ -1,186 +0,0 @@ -use crate::util::{ - prefilter::PrefilterI, - search::{MatchKind, Span}, -}; - -#[derive(Clone, Debug)] -pub(crate) struct Memchr(u8); - -impl Memchr { - pub(crate) fn new>( - _kind: MatchKind, - needles: &[B], - ) -> Option { - #[cfg(not(feature = "perf-literal-substring"))] - { - None - } - #[cfg(feature = "perf-literal-substring")] - { - if needles.len() != 1 { - return None; - } - if needles[0].as_ref().len() != 1 { - return None; - } - Some(Memchr(needles[0].as_ref()[0])) - } - } -} - -impl PrefilterI for Memchr { - fn find(&self, haystack: &[u8], span: Span) -> Option { - #[cfg(not(feature = "perf-literal-substring"))] - { - unreachable!() - } - #[cfg(feature = "perf-literal-substring")] - { - memchr::memchr(self.0, &haystack[span]).map(|i| { - let start = span.start + i; - let end = start + 1; - Span { start, end } - }) - } - } - - fn prefix(&self, haystack: &[u8], span: Span) -> Option { - let b = *haystack.get(span.start)?; - if self.0 == b { - Some(Span { start: span.start, end: span.start + 1 }) - } else { - None - } - } - - fn memory_usage(&self) -> usize { - 0 - } - - fn is_fast(&self) -> bool { - true - } -} - -#[derive(Clone, Debug)] -pub(crate) struct Memchr2(u8, u8); - -impl Memchr2 { - pub(crate) fn new>( - _kind: MatchKind, - needles: &[B], - ) -> Option { - #[cfg(not(feature = "perf-literal-substring"))] - { - None - } - #[cfg(feature = "perf-literal-substring")] - { - if needles.len() != 2 { - return None; - } - if !needles.iter().all(|n| n.as_ref().len() == 1) { - return None; - } - let b1 = needles[0].as_ref()[0]; - let b2 = needles[1].as_ref()[0]; - Some(Memchr2(b1, b2)) - } - } -} - -impl PrefilterI for Memchr2 { - fn find(&self, haystack: &[u8], span: Span) -> Option { - #[cfg(not(feature = "perf-literal-substring"))] - { - unreachable!() - } - #[cfg(feature = "perf-literal-substring")] - { - memchr::memchr2(self.0, self.1, &haystack[span]).map(|i| { - let start = span.start + i; - let end = start + 1; - Span { start, end } - }) - } - } - - fn prefix(&self, haystack: &[u8], span: Span) -> Option { - let b = *haystack.get(span.start)?; - if self.0 == b || self.1 == b { - Some(Span { start: span.start, end: span.start + 1 }) - } else { - None - } - } - - fn memory_usage(&self) -> usize { - 0 - } - - fn is_fast(&self) -> bool { - true - } -} - -#[derive(Clone, Debug)] -pub(crate) struct Memchr3(u8, u8, u8); - -impl Memchr3 { - pub(crate) fn new>( - _kind: MatchKind, - needles: &[B], - ) -> Option { - #[cfg(not(feature = "perf-literal-substring"))] - { - None - } - #[cfg(feature = "perf-literal-substring")] - { - if needles.len() != 3 { - return None; - } - if !needles.iter().all(|n| n.as_ref().len() == 1) { - return None; - } - let b1 = needles[0].as_ref()[0]; - let b2 = needles[1].as_ref()[0]; - let b3 = needles[2].as_ref()[0]; - Some(Memchr3(b1, b2, b3)) - } - } -} - -impl PrefilterI for Memchr3 { - fn find(&self, haystack: &[u8], span: Span) -> Option { - #[cfg(not(feature = "perf-literal-substring"))] - { - unreachable!() - } - #[cfg(feature = "perf-literal-substring")] - { - memchr::memchr3(self.0, self.1, self.2, &haystack[span]).map(|i| { - let start = span.start + i; - let end = start + 1; - Span { start, end } - }) - } - } - - fn prefix(&self, haystack: &[u8], span: Span) -> Option { - let b = *haystack.get(span.start)?; - if self.0 == b || self.1 == b || self.2 == b { - Some(Span { start: span.start, end: span.start + 1 }) - } else { - None - } - } - - fn memory_usage(&self) -> usize { - 0 - } - - fn is_fast(&self) -> bool { - true - } -} diff --git a/vendor/regex-automata/src/util/prefilter/memmem.rs b/vendor/regex-automata/src/util/prefilter/memmem.rs deleted file mode 100644 index deea17bd9ded14..00000000000000 --- a/vendor/regex-automata/src/util/prefilter/memmem.rs +++ /dev/null @@ -1,88 +0,0 @@ -use crate::util::{ - prefilter::PrefilterI, - search::{MatchKind, Span}, -}; - -#[derive(Clone, Debug)] -pub(crate) struct Memmem { - #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] - _unused: (), - #[cfg(all(feature = "std", feature = "perf-literal-substring"))] - finder: memchr::memmem::Finder<'static>, -} - -impl Memmem { - pub(crate) fn new>( - _kind: MatchKind, - needles: &[B], - ) -> Option { - #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] - { - None - } - #[cfg(all(feature = "std", feature = "perf-literal-substring"))] - { - if needles.len() != 1 { - return None; - } - let needle = needles[0].as_ref(); - let finder = memchr::memmem::Finder::new(needle).into_owned(); - Some(Memmem { finder }) - } - } -} - -impl PrefilterI for Memmem { - fn find(&self, haystack: &[u8], span: Span) -> Option { - #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] - { - unreachable!() - } - #[cfg(all(feature = "std", feature = "perf-literal-substring"))] - { - self.finder.find(&haystack[span]).map(|i| { - let start = span.start + i; - let end = start + self.finder.needle().len(); - Span { start, end } - }) - } - } - - fn prefix(&self, haystack: &[u8], span: Span) -> Option { - #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] - { - unreachable!() - } - #[cfg(all(feature = "std", feature = "perf-literal-substring"))] - { - let needle = self.finder.needle(); - if haystack[span].starts_with(needle) { - Some(Span { end: span.start + needle.len(), ..span }) - } else { - None - } - } - } - - fn memory_usage(&self) -> usize { - #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] - { - unreachable!() - } - #[cfg(all(feature = "std", feature = "perf-literal-substring"))] - { - self.finder.needle().len() - } - } - - fn is_fast(&self) -> bool { - #[cfg(not(all(feature = "std", feature = "perf-literal-substring")))] - { - unreachable!() - } - #[cfg(all(feature = "std", feature = "perf-literal-substring"))] - { - true - } - } -} diff --git a/vendor/regex-automata/src/util/prefilter/mod.rs b/vendor/regex-automata/src/util/prefilter/mod.rs deleted file mode 100644 index f95adff05c882e..00000000000000 --- a/vendor/regex-automata/src/util/prefilter/mod.rs +++ /dev/null @@ -1,719 +0,0 @@ -/*! -Defines a prefilter for accelerating regex searches. - -A prefilter can be created by building a [`Prefilter`] value. - -A prefilter represents one of the most important optimizations available for -accelerating regex searches. The idea of a prefilter is to very quickly find -candidate locations in a haystack where a regex _could_ match. Once a candidate -is found, it is then intended for the regex engine to run at that position to -determine whether the candidate is a match or a false positive. - -In the aforementioned description of the prefilter optimization also lay its -demise. Namely, if a prefilter has a high false positive rate and it produces -lots of candidates, then a prefilter can overall make a regex search slower. -It can run more slowly because more time is spent ping-ponging between the -prefilter search and the regex engine attempting to confirm each candidate as -a match. This ping-ponging has overhead that adds up, and is exacerbated by -a high false positive rate. - -Nevertheless, the optimization is still generally worth performing in most -cases. Particularly given just how much throughput can be improved. (It is not -uncommon for prefilter optimizations to improve throughput by one or two orders -of magnitude.) - -Typically a prefilter is used to find occurrences of literal prefixes from a -regex pattern, but this isn't required. A prefilter can be used to look for -suffixes or even inner literals. - -Note that as of now, prefilters throw away information about which pattern -each literal comes from. In other words, when a prefilter finds a match, -there's no way to know which pattern (or patterns) it came from. Therefore, -in order to confirm a match, you'll have to check all of the patterns by -running the full regex engine. -*/ - -mod aho_corasick; -mod byteset; -mod memchr; -mod memmem; -mod teddy; - -use core::{ - borrow::Borrow, - fmt::Debug, - panic::{RefUnwindSafe, UnwindSafe}, -}; - -#[cfg(feature = "alloc")] -use alloc::sync::Arc; - -#[cfg(feature = "syntax")] -use regex_syntax::hir::{literal, Hir}; - -use crate::util::search::{MatchKind, Span}; - -pub(crate) use crate::util::prefilter::{ - aho_corasick::AhoCorasick, - byteset::ByteSet, - memchr::{Memchr, Memchr2, Memchr3}, - memmem::Memmem, - teddy::Teddy, -}; - -/// A prefilter for accelerating regex searches. -/// -/// If you already have your literals that you want to search with, -/// then the vanilla [`Prefilter::new`] constructor is for you. But -/// if you have an [`Hir`] value from the `regex-syntax` crate, then -/// [`Prefilter::from_hir_prefix`] might be more convenient. Namely, it uses -/// the [`regex-syntax::hir::literal`](regex_syntax::hir::literal) module to -/// extract literal prefixes for you, optimize them and then select and build a -/// prefilter matcher. -/// -/// A prefilter must have **zero false negatives**. However, by its very -/// nature, it may produce false positives. That is, a prefilter will never -/// skip over a position in the haystack that corresponds to a match of the -/// original regex pattern, but it *may* produce a match for a position -/// in the haystack that does *not* correspond to a match of the original -/// regex pattern. If you use either the [`Prefilter::from_hir_prefix`] or -/// [`Prefilter::from_hirs_prefix`] constructors, then this guarantee is -/// upheld for you automatically. This guarantee is not preserved if you use -/// [`Prefilter::new`] though, since it is up to the caller to provide correct -/// literal strings with respect to the original regex pattern. -/// -/// # Cloning -/// -/// It is an API guarantee that cloning a prefilter is cheap. That is, cloning -/// it will not duplicate whatever heap memory is used to represent the -/// underlying matcher. -/// -/// # Example -/// -/// This example shows how to attach a `Prefilter` to the -/// [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) in order to accelerate -/// searches. -/// -/// ``` -/// use regex_automata::{ -/// nfa::thompson::pikevm::PikeVM, -/// util::prefilter::Prefilter, -/// Match, MatchKind, -/// }; -/// -/// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["Bruce "]) -/// .expect("a prefilter"); -/// let re = PikeVM::builder() -/// .configure(PikeVM::config().prefilter(Some(pre))) -/// .build(r"Bruce \w+")?; -/// let mut cache = re.create_cache(); -/// assert_eq!( -/// Some(Match::must(0, 6..23)), -/// re.find(&mut cache, "Hello Bruce Springsteen!"), -/// ); -/// # Ok::<(), Box>(()) -/// ``` -/// -/// But note that if you get your prefilter incorrect, it could lead to an -/// incorrect result! -/// -/// ``` -/// use regex_automata::{ -/// nfa::thompson::pikevm::PikeVM, -/// util::prefilter::Prefilter, -/// Match, MatchKind, -/// }; -/// -/// // This prefilter is wrong! -/// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["Patti "]) -/// .expect("a prefilter"); -/// let re = PikeVM::builder() -/// .configure(PikeVM::config().prefilter(Some(pre))) -/// .build(r"Bruce \w+")?; -/// let mut cache = re.create_cache(); -/// // We find no match even though the regex does match. -/// assert_eq!( -/// None, -/// re.find(&mut cache, "Hello Bruce Springsteen!"), -/// ); -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct Prefilter { - #[cfg(not(feature = "alloc"))] - _unused: (), - #[cfg(feature = "alloc")] - pre: Arc, - #[cfg(feature = "alloc")] - is_fast: bool, - #[cfg(feature = "alloc")] - max_needle_len: usize, -} - -impl Prefilter { - /// Create a new prefilter from a sequence of needles and a corresponding - /// match semantics. - /// - /// This may return `None` for a variety of reasons, for example, if - /// a suitable prefilter could not be constructed. That might occur - /// if they are unavailable (e.g., the `perf-literal-substring` and - /// `perf-literal-multisubstring` features aren't enabled), or it might - /// occur because of heuristics or other artifacts of how the prefilter - /// works. - /// - /// Note that if you have an [`Hir`] expression, it may be more convenient - /// to use [`Prefilter::from_hir_prefix`]. It will automatically handle the - /// task of extracting prefix literals for you. - /// - /// # Example - /// - /// This example shows how match semantics can impact the matching - /// algorithm used by the prefilter. For this reason, it is important to - /// ensure that the match semantics given here are consistent with the - /// match semantics intended for the regular expression that the literals - /// were extracted from. - /// - /// ``` - /// use regex_automata::{ - /// util::{prefilter::Prefilter, syntax}, - /// MatchKind, Span, - /// }; - /// - /// let hay = "Hello samwise"; - /// - /// // With leftmost-first, we find 'samwise' here because it comes - /// // before 'sam' in the sequence we give it.. - /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["samwise", "sam"]) - /// .expect("a prefilter"); - /// assert_eq!( - /// Some(Span::from(6..13)), - /// pre.find(hay.as_bytes(), Span::from(0..hay.len())), - /// ); - /// // Still with leftmost-first but with the literals reverse, now 'sam' - /// // will match instead! - /// let pre = Prefilter::new(MatchKind::LeftmostFirst, &["sam", "samwise"]) - /// .expect("a prefilter"); - /// assert_eq!( - /// Some(Span::from(6..9)), - /// pre.find(hay.as_bytes(), Span::from(0..hay.len())), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn new>( - kind: MatchKind, - needles: &[B], - ) -> Option { - Choice::new(kind, needles).and_then(|choice| { - let max_needle_len = - needles.iter().map(|b| b.as_ref().len()).max().unwrap_or(0); - Prefilter::from_choice(choice, max_needle_len) - }) - } - - /// This turns a prefilter selection into a `Prefilter`. That is, in turns - /// the enum given into a trait object. - fn from_choice( - choice: Choice, - max_needle_len: usize, - ) -> Option { - #[cfg(not(feature = "alloc"))] - { - None - } - #[cfg(feature = "alloc")] - { - let pre: Arc = match choice { - Choice::Memchr(p) => Arc::new(p), - Choice::Memchr2(p) => Arc::new(p), - Choice::Memchr3(p) => Arc::new(p), - Choice::Memmem(p) => Arc::new(p), - Choice::Teddy(p) => Arc::new(p), - Choice::ByteSet(p) => Arc::new(p), - Choice::AhoCorasick(p) => Arc::new(p), - }; - let is_fast = pre.is_fast(); - Some(Prefilter { pre, is_fast, max_needle_len }) - } - } - - /// This attempts to extract prefixes from the given `Hir` expression for - /// the given match semantics, and if possible, builds a prefilter for - /// them. - /// - /// # Example - /// - /// This example shows how to build a prefilter directly from an [`Hir`] - /// expression, and use to find an occurrence of a prefix from the regex - /// pattern. - /// - /// ``` - /// use regex_automata::{ - /// util::{prefilter::Prefilter, syntax}, - /// MatchKind, Span, - /// }; - /// - /// let hir = syntax::parse(r"(Bruce|Patti) \w+")?; - /// let pre = Prefilter::from_hir_prefix(MatchKind::LeftmostFirst, &hir) - /// .expect("a prefilter"); - /// let hay = "Hello Patti Scialfa!"; - /// assert_eq!( - /// Some(Span::from(6..12)), - /// pre.find(hay.as_bytes(), Span::from(0..hay.len())), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn from_hir_prefix(kind: MatchKind, hir: &Hir) -> Option { - Prefilter::from_hirs_prefix(kind, &[hir]) - } - - /// This attempts to extract prefixes from the given `Hir` expressions for - /// the given match semantics, and if possible, builds a prefilter for - /// them. - /// - /// Note that as of now, prefilters throw away information about which - /// pattern each literal comes from. In other words, when a prefilter finds - /// a match, there's no way to know which pattern (or patterns) it came - /// from. Therefore, in order to confirm a match, you'll have to check all - /// of the patterns by running the full regex engine. - /// - /// # Example - /// - /// This example shows how to build a prefilter directly from multiple - /// `Hir` expressions expression, and use it to find an occurrence of a - /// prefix from the regex patterns. - /// - /// ``` - /// use regex_automata::{ - /// util::{prefilter::Prefilter, syntax}, - /// MatchKind, Span, - /// }; - /// - /// let hirs = syntax::parse_many(&[ - /// r"(Bruce|Patti) \w+", - /// r"Mrs?\. Doubtfire", - /// ])?; - /// let pre = Prefilter::from_hirs_prefix(MatchKind::LeftmostFirst, &hirs) - /// .expect("a prefilter"); - /// let hay = "Hello Mrs. Doubtfire"; - /// assert_eq!( - /// Some(Span::from(6..20)), - /// pre.find(hay.as_bytes(), Span::from(0..hay.len())), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "syntax")] - pub fn from_hirs_prefix>( - kind: MatchKind, - hirs: &[H], - ) -> Option { - prefixes(kind, hirs) - .literals() - .and_then(|lits| Prefilter::new(kind, lits)) - } - - /// Run this prefilter on `haystack[span.start..end]` and return a matching - /// span if one exists. - /// - /// The span returned is guaranteed to have a start position greater than - /// or equal to the one given, and an end position less than or equal to - /// the one given. - /// - /// # Example - /// - /// This example shows how to build a prefilter directly from an [`Hir`] - /// expression, and use it to find an occurrence of a prefix from the regex - /// pattern. - /// - /// ``` - /// use regex_automata::{ - /// util::{prefilter::Prefilter, syntax}, - /// MatchKind, Span, - /// }; - /// - /// let hir = syntax::parse(r"Bruce \w+")?; - /// let pre = Prefilter::from_hir_prefix(MatchKind::LeftmostFirst, &hir) - /// .expect("a prefilter"); - /// let hay = "Hello Bruce Springsteen!"; - /// assert_eq!( - /// Some(Span::from(6..12)), - /// pre.find(hay.as_bytes(), Span::from(0..hay.len())), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn find(&self, haystack: &[u8], span: Span) -> Option { - #[cfg(not(feature = "alloc"))] - { - unreachable!() - } - #[cfg(feature = "alloc")] - { - self.pre.find(haystack, span) - } - } - - /// Returns the span of a prefix of `haystack[span.start..span.end]` if - /// the prefilter matches. - /// - /// The span returned is guaranteed to have a start position equivalent to - /// the one given, and an end position less than or equal to the one given. - /// - /// # Example - /// - /// This example shows how to build a prefilter directly from an [`Hir`] - /// expression, and use it to find an occurrence of a prefix from the regex - /// pattern that begins at the start of a haystack only. - /// - /// ``` - /// use regex_automata::{ - /// util::{prefilter::Prefilter, syntax}, - /// MatchKind, Span, - /// }; - /// - /// let hir = syntax::parse(r"Bruce \w+")?; - /// let pre = Prefilter::from_hir_prefix(MatchKind::LeftmostFirst, &hir) - /// .expect("a prefilter"); - /// let hay = "Hello Bruce Springsteen!"; - /// // Nothing is found here because 'Bruce' does - /// // not occur at the beginning of our search. - /// assert_eq!( - /// None, - /// pre.prefix(hay.as_bytes(), Span::from(0..hay.len())), - /// ); - /// // But if we change where we start the search - /// // to begin where 'Bruce ' begins, then a - /// // match will be found. - /// assert_eq!( - /// Some(Span::from(6..12)), - /// pre.prefix(hay.as_bytes(), Span::from(6..hay.len())), - /// ); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn prefix(&self, haystack: &[u8], span: Span) -> Option { - #[cfg(not(feature = "alloc"))] - { - unreachable!() - } - #[cfg(feature = "alloc")] - { - self.pre.prefix(haystack, span) - } - } - - /// Returns the heap memory, in bytes, used by the underlying prefilter. - #[inline] - pub fn memory_usage(&self) -> usize { - #[cfg(not(feature = "alloc"))] - { - unreachable!() - } - #[cfg(feature = "alloc")] - { - self.pre.memory_usage() - } - } - - /// Return the length of the longest needle - /// in this Prefilter - #[inline] - pub fn max_needle_len(&self) -> usize { - #[cfg(not(feature = "alloc"))] - { - unreachable!() - } - #[cfg(feature = "alloc")] - { - self.max_needle_len - } - } - - /// Implementations might return true here if they believe themselves to - /// be "fast." The concept of "fast" is deliberately left vague, but in - /// practice this usually corresponds to whether it's believed that SIMD - /// will be used. - /// - /// Why do we care about this? Well, some prefilter tricks tend to come - /// with their own bits of overhead, and so might only make sense if we - /// know that a scan will be *much* faster than the regex engine itself. - /// Otherwise, the trick may not be worth doing. Whether something is - /// "much" faster than the regex engine generally boils down to whether - /// SIMD is used. (But not always. Even a SIMD matcher with a high false - /// positive rate can become quite slow.) - /// - /// Even if this returns true, it is still possible for the prefilter to - /// be "slow." Remember, prefilters are just heuristics. We can't really - /// *know* a prefilter will be fast without actually trying the prefilter. - /// (Which of course we cannot afford to do.) - #[inline] - pub fn is_fast(&self) -> bool { - #[cfg(not(feature = "alloc"))] - { - unreachable!() - } - #[cfg(feature = "alloc")] - { - self.is_fast - } - } -} - -/// A trait for abstracting over prefilters. Basically, a prefilter is -/// something that do an unanchored *and* an anchored search in a haystack -/// within a given span. -/// -/// This exists pretty much only so that we can use prefilters as a trait -/// object (which is what `Prefilter` is). If we ever move off of trait objects -/// and to an enum, then it's likely this trait could be removed. -pub(crate) trait PrefilterI: - Debug + Send + Sync + RefUnwindSafe + UnwindSafe + 'static -{ - /// Run this prefilter on `haystack[span.start..end]` and return a matching - /// span if one exists. - /// - /// The span returned is guaranteed to have a start position greater than - /// or equal to the one given, and an end position less than or equal to - /// the one given. - fn find(&self, haystack: &[u8], span: Span) -> Option; - - /// Returns the span of a prefix of `haystack[span.start..span.end]` if - /// the prefilter matches. - /// - /// The span returned is guaranteed to have a start position equivalent to - /// the one given, and an end position less than or equal to the one given. - fn prefix(&self, haystack: &[u8], span: Span) -> Option; - - /// Returns the heap memory, in bytes, used by the underlying prefilter. - fn memory_usage(&self) -> usize; - - /// Implementations might return true here if they believe themselves to - /// be "fast." See [`Prefilter::is_fast`] for more details. - fn is_fast(&self) -> bool; -} - -#[cfg(feature = "alloc")] -impl PrefilterI for Arc

-.             any character except new line (includes new line with s flag)
-[0-9]         any ASCII digit
-\d            digit (\p{Nd})
-\D            not digit
-\pX           Unicode character class identified by a one-letter name
-\p{Greek}     Unicode character class (general category or script)
-\PX           Negated Unicode character class identified by a one-letter name
-\P{Greek}     negated Unicode character class (general category or script)
-
- -### Character classes - -
-[xyz]         A character class matching either x, y or z (union).
-[^xyz]        A character class matching any character except x, y and z.
-[a-z]         A character class matching any character in range a-z.
-[[:alpha:]]   ASCII character class ([A-Za-z])
-[[:^alpha:]]  Negated ASCII character class ([^A-Za-z])
-[x[^xyz]]     Nested/grouping character class (matching any character except y and z)
-[a-y&&xyz]    Intersection (matching x or y)
-[0-9&&[^4]]   Subtraction using intersection and negation (matching 0-9 except 4)
-[0-9--4]      Direct subtraction (matching 0-9 except 4)
-[a-g~~b-h]    Symmetric difference (matching `a` and `h` only)
-[\[\]]        Escaping in character classes (matching [ or ])
-[a&&b]        An empty character class matching nothing
-
- -Any named character class may appear inside a bracketed `[...]` character -class. For example, `[\p{Greek}[:digit:]]` matches any ASCII digit or any -codepoint in the `Greek` script. `[\p{Greek}&&\pL]` matches Greek letters. - -Precedence in character classes, from most binding to least: - -1. Ranges: `[a-cd]` == `[[a-c]d]` -2. Union: `[ab&&bc]` == `[[ab]&&[bc]]` -3. Intersection, difference, symmetric difference. All three have equivalent -precedence, and are evaluated in left-to-right order. For example, -`[\pL--\p{Greek}&&\p{Uppercase}]` == `[[\pL--\p{Greek}]&&\p{Uppercase}]`. -4. Negation: `[^a-z&&b]` == `[^[a-z&&b]]`. - -### Composites - -
-xy    concatenation (x followed by y)
-x|y   alternation (x or y, prefer x)
-
- -This example shows how an alternation works, and what it means to prefer a -branch in the alternation over subsequent branches. - -``` -use regex::Regex; - -let haystack = "samwise"; -// If 'samwise' comes first in our alternation, then it is -// preferred as a match, even if the regex engine could -// technically detect that 'sam' led to a match earlier. -let re = Regex::new(r"samwise|sam").unwrap(); -assert_eq!("samwise", re.find(haystack).unwrap().as_str()); -// But if 'sam' comes first, then it will match instead. -// In this case, it is impossible for 'samwise' to match -// because 'sam' is a prefix of it. -let re = Regex::new(r"sam|samwise").unwrap(); -assert_eq!("sam", re.find(haystack).unwrap().as_str()); -``` - -### Repetitions - -
-x*        zero or more of x (greedy)
-x+        one or more of x (greedy)
-x?        zero or one of x (greedy)
-x*?       zero or more of x (ungreedy/lazy)
-x+?       one or more of x (ungreedy/lazy)
-x??       zero or one of x (ungreedy/lazy)
-x{n,m}    at least n x and at most m x (greedy)
-x{n,}     at least n x (greedy)
-x{n}      exactly n x
-x{n,m}?   at least n x and at most m x (ungreedy/lazy)
-x{n,}?    at least n x (ungreedy/lazy)
-x{n}?     exactly n x
-
- -### Empty matches - -
-^               the beginning of a haystack (or start-of-line with multi-line mode)
-$               the end of a haystack (or end-of-line with multi-line mode)
-\A              only the beginning of a haystack (even with multi-line mode enabled)
-\z              only the end of a haystack (even with multi-line mode enabled)
-\b              a Unicode word boundary (\w on one side and \W, \A, or \z on other)
-\B              not a Unicode word boundary
-\b{start}, \<   a Unicode start-of-word boundary (\W|\A on the left, \w on the right)
-\b{end}, \>     a Unicode end-of-word boundary (\w on the left, \W|\z on the right))
-\b{start-half}  half of a Unicode start-of-word boundary (\W|\A on the left)
-\b{end-half}    half of a Unicode end-of-word boundary (\W|\z on the right)
-
- -The empty regex is valid and matches the empty string. For example, the -empty regex matches `abc` at positions `0`, `1`, `2` and `3`. When using the -top-level [`Regex`] on `&str` haystacks, an empty match that splits a codepoint -is guaranteed to never be returned. However, such matches are permitted when -using a [`bytes::Regex`]. For example: - -```rust -let re = regex::Regex::new(r"").unwrap(); -let ranges: Vec<_> = re.find_iter("💩").map(|m| m.range()).collect(); -assert_eq!(ranges, vec![0..0, 4..4]); - -let re = regex::bytes::Regex::new(r"").unwrap(); -let ranges: Vec<_> = re.find_iter("💩".as_bytes()).map(|m| m.range()).collect(); -assert_eq!(ranges, vec![0..0, 1..1, 2..2, 3..3, 4..4]); -``` - -Note that an empty regex is distinct from a regex that can never match. -For example, the regex `[a&&b]` is a character class that represents the -intersection of `a` and `b`. That intersection is empty, which means the -character class is empty. Since nothing is in the empty set, `[a&&b]` matches -nothing, not even the empty string. - -### Grouping and flags - -
-(exp)          numbered capture group (indexed by opening parenthesis)
-(?P<name>exp)  named (also numbered) capture group (names must be alpha-numeric)
-(?<name>exp)   named (also numbered) capture group (names must be alpha-numeric)
-(?:exp)        non-capturing group
-(?flags)       set flags within current group
-(?flags:exp)   set flags for exp (non-capturing)
-
- -Capture group names must be any sequence of alpha-numeric Unicode codepoints, -in addition to `.`, `_`, `[` and `]`. Names must start with either an `_` or -an alphabetic codepoint. Alphabetic codepoints correspond to the `Alphabetic` -Unicode property, while numeric codepoints correspond to the union of the -`Decimal_Number`, `Letter_Number` and `Other_Number` general categories. - -Flags are each a single character. For example, `(?x)` sets the flag `x` -and `(?-x)` clears the flag `x`. Multiple flags can be set or cleared at -the same time: `(?xy)` sets both the `x` and `y` flags and `(?x-y)` sets -the `x` flag and clears the `y` flag. - -All flags are by default disabled unless stated otherwise. They are: - -
-i     case-insensitive: letters match both upper and lower case
-m     multi-line mode: ^ and $ match begin/end of line
-s     allow . to match \n
-R     enables CRLF mode: when multi-line mode is enabled, \r\n is used
-U     swap the meaning of x* and x*?
-u     Unicode support (enabled by default)
-x     verbose mode, ignores whitespace and allow line comments (starting with `#`)
-
- -Note that in verbose mode, whitespace is ignored everywhere, including within -character classes. To insert whitespace, use its escaped form or a hex literal. -For example, `\ ` or `\x20` for an ASCII space. - -Flags can be toggled within a pattern. Here's an example that matches -case-insensitively for the first part but case-sensitively for the second part: - -```rust -use regex::Regex; - -let re = Regex::new(r"(?i)a+(?-i)b+").unwrap(); -let m = re.find("AaAaAbbBBBb").unwrap(); -assert_eq!(m.as_str(), "AaAaAbb"); -``` - -Notice that the `a+` matches either `a` or `A`, but the `b+` only matches -`b`. - -Multi-line mode means `^` and `$` no longer match just at the beginning/end of -the input, but also at the beginning/end of lines: - -``` -use regex::Regex; - -let re = Regex::new(r"(?m)^line \d+").unwrap(); -let m = re.find("line one\nline 2\n").unwrap(); -assert_eq!(m.as_str(), "line 2"); -``` - -Note that `^` matches after new lines, even at the end of input: - -``` -use regex::Regex; - -let re = Regex::new(r"(?m)^").unwrap(); -let m = re.find_iter("test\n").last().unwrap(); -assert_eq!((m.start(), m.end()), (5, 5)); -``` - -When both CRLF mode and multi-line mode are enabled, then `^` and `$` will -match either `\r` or `\n`, but never in the middle of a `\r\n`: - -``` -use regex::Regex; - -let re = Regex::new(r"(?mR)^foo$").unwrap(); -let m = re.find("\r\nfoo\r\n").unwrap(); -assert_eq!(m.as_str(), "foo"); -``` - -Unicode mode can also be selectively disabled, although only when the result -*would not* match invalid UTF-8. One good example of this is using an ASCII -word boundary instead of a Unicode word boundary, which might make some regex -searches run faster: - -```rust -use regex::Regex; - -let re = Regex::new(r"(?-u:\b).+(?-u:\b)").unwrap(); -let m = re.find("$$abc$$").unwrap(); -assert_eq!(m.as_str(), "abc"); -``` - -### Escape sequences - -Note that this includes all possible escape sequences, even ones that are -documented elsewhere. - -
-\*              literal *, applies to all ASCII except [0-9A-Za-z<>]
-\a              bell (\x07)
-\f              form feed (\x0C)
-\t              horizontal tab
-\n              new line
-\r              carriage return
-\v              vertical tab (\x0B)
-\A              matches at the beginning of a haystack
-\z              matches at the end of a haystack
-\b              word boundary assertion
-\B              negated word boundary assertion
-\b{start}, \<   start-of-word boundary assertion
-\b{end}, \>     end-of-word boundary assertion
-\b{start-half}  half of a start-of-word boundary assertion
-\b{end-half}    half of a end-of-word boundary assertion
-\123            octal character code, up to three digits (when enabled)
-\x7F            hex character code (exactly two digits)
-\x{10FFFF}      any hex character code corresponding to a Unicode code point
-\u007F          hex character code (exactly four digits)
-\u{7F}          any hex character code corresponding to a Unicode code point
-\U0000007F      hex character code (exactly eight digits)
-\U{7F}          any hex character code corresponding to a Unicode code point
-\p{Letter}      Unicode character class
-\P{Letter}      negated Unicode character class
-\d, \s, \w      Perl character class
-\D, \S, \W      negated Perl character class
-
- -### Perl character classes (Unicode friendly) - -These classes are based on the definitions provided in -[UTS#18](https://www.unicode.org/reports/tr18/#Compatibility_Properties): - -
-\d     digit (\p{Nd})
-\D     not digit
-\s     whitespace (\p{White_Space})
-\S     not whitespace
-\w     word character (\p{Alphabetic} + \p{M} + \d + \p{Pc} + \p{Join_Control})
-\W     not word character
-
- -### ASCII character classes - -These classes are based on the definitions provided in -[UTS#18](https://www.unicode.org/reports/tr18/#Compatibility_Properties): - -
-[[:alnum:]]    alphanumeric ([0-9A-Za-z])
-[[:alpha:]]    alphabetic ([A-Za-z])
-[[:ascii:]]    ASCII ([\x00-\x7F])
-[[:blank:]]    blank ([\t ])
-[[:cntrl:]]    control ([\x00-\x1F\x7F])
-[[:digit:]]    digits ([0-9])
-[[:graph:]]    graphical ([!-~])
-[[:lower:]]    lower case ([a-z])
-[[:print:]]    printable ([ -~])
-[[:punct:]]    punctuation ([!-/:-@\[-`{-~])
-[[:space:]]    whitespace ([\t\n\v\f\r ])
-[[:upper:]]    upper case ([A-Z])
-[[:word:]]     word characters ([0-9A-Za-z_])
-[[:xdigit:]]   hex digit ([0-9A-Fa-f])
-
- -# Untrusted input - -This crate is meant to be able to run regex searches on untrusted haystacks -without fear of [ReDoS]. This crate also, to a certain extent, supports -untrusted patterns. - -[ReDoS]: https://en.wikipedia.org/wiki/ReDoS - -This crate differs from most (but not all) other regex engines in that it -doesn't use unbounded backtracking to run a regex search. In those cases, -one generally cannot use untrusted patterns *or* untrusted haystacks because -it can be very difficult to know whether a particular pattern will result in -catastrophic backtracking or not. - -We'll first discuss how this crate deals with untrusted inputs and then wrap -it up with a realistic discussion about what practice really looks like. - -### Panics - -Outside of clearly documented cases, most APIs in this crate are intended to -never panic regardless of the inputs given to them. For example, `Regex::new`, -`Regex::is_match`, `Regex::find` and `Regex::captures` should never panic. That -is, it is an API promise that those APIs will never panic no matter what inputs -are given to them. With that said, regex engines are complicated beasts, and -providing a rock solid guarantee that these APIs literally never panic is -essentially equivalent to saying, "there are no bugs in this library." That is -a bold claim, and not really one that can be feasibly made with a straight -face. - -Don't get the wrong impression here. This crate is extensively tested, not just -with unit and integration tests, but also via fuzz testing. For example, this -crate is part of the [OSS-fuzz project]. Panics should be incredibly rare, but -it is possible for bugs to exist, and thus possible for a panic to occur. If -you need a rock solid guarantee against panics, then you should wrap calls into -this library with [`std::panic::catch_unwind`]. - -It's also worth pointing out that this library will *generally* panic when -other regex engines would commit undefined behavior. When undefined behavior -occurs, your program might continue as if nothing bad has happened, but it also -might mean your program is open to the worst kinds of exploits. In contrast, -the worst thing a panic can do is a denial of service. - -[OSS-fuzz project]: https://android.googlesource.com/platform/external/oss-fuzz/+/refs/tags/android-t-preview-1/projects/rust-regex/ -[`std::panic::catch_unwind`]: https://doc.rust-lang.org/std/panic/fn.catch_unwind.html - -### Untrusted patterns - -The principal way this crate deals with them is by limiting their size by -default. The size limit can be configured via [`RegexBuilder::size_limit`]. The -idea of a size limit is that compiling a pattern into a `Regex` will fail if it -becomes "too big." Namely, while *most* resources consumed by compiling a regex -are approximately proportional (albeit with some high constant factors in some -cases, such as with Unicode character classes) to the length of the pattern -itself, there is one particular exception to this: counted repetitions. Namely, -this pattern: - -```text -a{5}{5}{5}{5}{5}{5} -``` - -Is equivalent to this pattern: - -```text -a{15625} -``` - -In both of these cases, the actual pattern string is quite small, but the -resulting `Regex` value is quite large. Indeed, as the first pattern shows, -it isn't enough to locally limit the size of each repetition because they can -be stacked in a way that results in exponential growth. - -To provide a bit more context, a simplified view of regex compilation looks -like this: - -* The pattern string is parsed into a structured representation called an AST. -Counted repetitions are not expanded and Unicode character classes are not -looked up in this stage. That is, the size of the AST is proportional to the -size of the pattern with "reasonable" constant factors. In other words, one -can reasonably limit the memory used by an AST by limiting the length of the -pattern string. -* The AST is translated into an HIR. Counted repetitions are still *not* -expanded at this stage, but Unicode character classes are embedded into the -HIR. The memory usage of a HIR is still proportional to the length of the -original pattern string, but the constant factors---mostly as a result of -Unicode character classes---can be quite high. Still though, the memory used by -an HIR can be reasonably limited by limiting the length of the pattern string. -* The HIR is compiled into a [Thompson NFA]. This is the stage at which -something like `\w{5}` is rewritten to `\w\w\w\w\w`. Thus, this is the stage -at which [`RegexBuilder::size_limit`] is enforced. If the NFA exceeds the -configured size, then this stage will fail. - -[Thompson NFA]: https://en.wikipedia.org/wiki/Thompson%27s_construction - -The size limit helps avoid two different kinds of exorbitant resource usage: - -* It avoids permitting exponential memory usage based on the size of the -pattern string. -* It avoids long search times. This will be discussed in more detail in the -next section, but worst case search time *is* dependent on the size of the -regex. So keeping regexes limited to a reasonable size is also a way of keeping -search times reasonable. - -Finally, it's worth pointing out that regex compilation is guaranteed to take -worst case `O(m)` time, where `m` is proportional to the size of regex. The -size of the regex here is *after* the counted repetitions have been expanded. - -**Advice for those using untrusted regexes**: limit the pattern length to -something small and expand it as needed. Configure [`RegexBuilder::size_limit`] -to something small and then expand it as needed. - -### Untrusted haystacks - -The main way this crate guards against searches from taking a long time is by -using algorithms that guarantee a `O(m * n)` worst case time and space bound. -Namely: - -* `m` is proportional to the size of the regex, where the size of the regex -includes the expansion of all counted repetitions. (See the previous section on -untrusted patterns.) -* `n` is proportional to the length, in bytes, of the haystack. - -In other words, if you consider `m` to be a constant (for example, the regex -pattern is a literal in the source code), then the search can be said to run -in "linear time." Or equivalently, "linear time with respect to the size of the -haystack." - -But the `m` factor here is important not to ignore. If a regex is -particularly big, the search times can get quite slow. This is why, in part, -[`RegexBuilder::size_limit`] exists. - -**Advice for those searching untrusted haystacks**: As long as your regexes -are not enormous, you should expect to be able to search untrusted haystacks -without fear. If you aren't sure, you should benchmark it. Unlike backtracking -engines, if your regex is so big that it's likely to result in slow searches, -this is probably something you'll be able to observe regardless of what the -haystack is made up of. - -### Iterating over matches - -One thing that is perhaps easy to miss is that the worst case time -complexity bound of `O(m * n)` applies to methods like [`Regex::is_match`], -[`Regex::find`] and [`Regex::captures`]. It does **not** apply to -[`Regex::find_iter`] or [`Regex::captures_iter`]. Namely, since iterating over -all matches can execute many searches, and each search can scan the entire -haystack, the worst case time complexity for iterators is `O(m * n^2)`. - -One example of where this occurs is when a pattern consists of an alternation, -where an earlier branch of the alternation requires scanning the entire -haystack only to discover that there is no match. It also requires a later -branch of the alternation to have matched at the beginning of the search. For -example, consider the pattern `.*[^A-Z]|[A-Z]` and the haystack `AAAAA`. The -first search will scan to the end looking for matches of `.*[^A-Z]` even though -a finite automata engine (as in this crate) knows that `[A-Z]` has already -matched the first character of the haystack. This is due to the greedy nature -of regex searching. That first search will report a match at the first `A` only -after scanning to the end to discover that no other match exists. The next -search then begins at the second `A` and the behavior repeats. - -There is no way to avoid this. This means that if both patterns and haystacks -are untrusted and you're iterating over all matches, you're susceptible to -worst case quadratic time complexity. One possible way to mitigate this -is to drop down to the lower level `regex-automata` crate and use its -`meta::Regex` iterator APIs. There, you can configure the search to operate -in "earliest" mode by passing a `Input::new(haystack).earliest(true)` to -`meta::Regex::find_iter` (for example). By enabling this mode, you give up -the normal greedy match semantics of regex searches and instead ask the regex -engine to immediately stop as soon as a match has been found. Enabling this -mode will thus restore the worst case `O(m * n)` time complexity bound, but at -the cost of different semantics. - -### Untrusted inputs in practice - -While providing a `O(m * n)` worst case time bound on all searches goes a long -way toward preventing [ReDoS], that doesn't mean every search you can possibly -run will complete without burning CPU time. In general, there are a few ways -for the `m * n` time bound to still bite you: - -* You are searching an exceptionally long haystack. No matter how you slice -it, a longer haystack will take more time to search. This crate may often make -very quick work of even long haystacks because of its literal optimizations, -but those aren't available for all regexes. -* Unicode character classes can cause searches to be quite slow in some cases. -This is especially true when they are combined with counted repetitions. While -the regex size limit above will protect you from the most egregious cases, -the default size limit still permits pretty big regexes that can execute more -slowly than one might expect. -* While routines like [`Regex::find`] and [`Regex::captures`] guarantee -worst case `O(m * n)` search time, routines like [`Regex::find_iter`] and -[`Regex::captures_iter`] actually have worst case `O(m * n^2)` search time. -This is because `find_iter` runs many searches, and each search takes worst -case `O(m * n)` time. Thus, iteration of all matches in a haystack has -worst case `O(m * n^2)`. A good example of a pattern that exhibits this is -`(?:A+){1000}|` or even `.*[^A-Z]|[A-Z]`. - -In general, untrusted haystacks are easier to stomach than untrusted patterns. -Untrusted patterns give a lot more control to the caller to impact the -performance of a search. In many cases, a regex search will actually execute in -average case `O(n)` time (i.e., not dependent on the size of the regex), but -this can't be guaranteed in general. Therefore, permitting untrusted patterns -means that your only line of defense is to put a limit on how big `m` (and -perhaps also `n`) can be in `O(m * n)`. `n` is limited by simply inspecting -the length of the haystack while `m` is limited by *both* applying a limit to -the length of the pattern *and* a limit on the compiled size of the regex via -[`RegexBuilder::size_limit`]. - -It bears repeating: if you're accepting untrusted patterns, it would be a good -idea to start with conservative limits on `m` and `n`, and then carefully -increase them as needed. - -# Crate features - -By default, this crate tries pretty hard to make regex matching both as fast -as possible and as correct as it can be. This means that there is a lot of -code dedicated to performance, the handling of Unicode data and the Unicode -data itself. Overall, this leads to more dependencies, larger binaries and -longer compile times. This trade off may not be appropriate in all cases, and -indeed, even when all Unicode and performance features are disabled, one is -still left with a perfectly serviceable regex engine that will work well in -many cases. (Note that code is not arbitrarily reducible, and for this reason, -the [`regex-lite`](https://docs.rs/regex-lite) crate exists to provide an even -more minimal experience by cutting out Unicode and performance, but still -maintaining the linear search time bound.) - -This crate exposes a number of features for controlling that trade off. Some -of these features are strictly performance oriented, such that disabling them -won't result in a loss of functionality, but may result in worse performance. -Other features, such as the ones controlling the presence or absence of Unicode -data, can result in a loss of functionality. For example, if one disables the -`unicode-case` feature (described below), then compiling the regex `(?i)a` -will fail since Unicode case insensitivity is enabled by default. Instead, -callers must use `(?i-u)a` to disable Unicode case folding. Stated differently, -enabling or disabling any of the features below can only add or subtract from -the total set of valid regular expressions. Enabling or disabling a feature -will never modify the match semantics of a regular expression. - -Most features below are enabled by default. Features that aren't enabled by -default are noted. - -### Ecosystem features - -* **std** - - When enabled, this will cause `regex` to use the standard library. In terms - of APIs, `std` causes error types to implement the `std::error::Error` - trait. Enabling `std` will also result in performance optimizations, - including SIMD and faster synchronization primitives. Notably, **disabling - the `std` feature will result in the use of spin locks**. To use a regex - engine without `std` and without spin locks, you'll need to drop down to - the [`regex-automata`](https://docs.rs/regex-automata) crate. -* **logging** - - When enabled, the `log` crate is used to emit messages about regex - compilation and search strategies. This is **disabled by default**. This is - typically only useful to someone working on this crate's internals, but might - be useful if you're doing some rabbit hole performance hacking. Or if you're - just interested in the kinds of decisions being made by the regex engine. - -### Performance features - -**Note**: - To get performance benefits offered by the SIMD, `std` must be enabled. - None of the `perf-*` features will enable `std` implicitly. - -* **perf** - - Enables all performance related features except for `perf-dfa-full`. This - feature is enabled by default is intended to cover all reasonable features - that improve performance, even if more are added in the future. -* **perf-dfa** - - Enables the use of a lazy DFA for matching. The lazy DFA is used to compile - portions of a regex to a very fast DFA on an as-needed basis. This can - result in substantial speedups, usually by an order of magnitude on large - haystacks. The lazy DFA does not bring in any new dependencies, but it can - make compile times longer. -* **perf-dfa-full** - - Enables the use of a full DFA for matching. Full DFAs are problematic because - they have worst case `O(2^n)` construction time. For this reason, when this - feature is enabled, full DFAs are only used for very small regexes and a - very small space bound is used during determinization to avoid the DFA - from blowing up. This feature is not enabled by default, even as part of - `perf`, because it results in fairly sizeable increases in binary size and - compilation time. It can result in faster search times, but they tend to be - more modest and limited to non-Unicode regexes. -* **perf-onepass** - - Enables the use of a one-pass DFA for extracting the positions of capture - groups. This optimization applies to a subset of certain types of NFAs and - represents the fastest engine in this crate for dealing with capture groups. -* **perf-backtrack** - - Enables the use of a bounded backtracking algorithm for extracting the - positions of capture groups. This usually sits between the slowest engine - (the PikeVM) and the fastest engine (one-pass DFA) for extracting capture - groups. It's used whenever the regex is not one-pass and is small enough. -* **perf-inline** - - Enables the use of aggressive inlining inside match routines. This reduces - the overhead of each match. The aggressive inlining, however, increases - compile times and binary size. -* **perf-literal** - - Enables the use of literal optimizations for speeding up matches. In some - cases, literal optimizations can result in speedups of _several_ orders of - magnitude. Disabling this drops the `aho-corasick` and `memchr` dependencies. -* **perf-cache** - - This feature used to enable a faster internal cache at the cost of using - additional dependencies, but this is no longer an option. A fast internal - cache is now used unconditionally with no additional dependencies. This may - change in the future. - -### Unicode features - -* **unicode** - - Enables all Unicode features. This feature is enabled by default, and will - always cover all Unicode features, even if more are added in the future. -* **unicode-age** - - Provide the data for the - [Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age). - This makes it possible to use classes like `\p{Age:6.0}` to refer to all - codepoints first introduced in Unicode 6.0 -* **unicode-bool** - - Provide the data for numerous Unicode boolean properties. The full list - is not included here, but contains properties like `Alphabetic`, `Emoji`, - `Lowercase`, `Math`, `Uppercase` and `White_Space`. -* **unicode-case** - - Provide the data for case insensitive matching using - [Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches). -* **unicode-gencat** - - Provide the data for - [Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values). - This includes, but is not limited to, `Decimal_Number`, `Letter`, - `Math_Symbol`, `Number` and `Punctuation`. -* **unicode-perl** - - Provide the data for supporting the Unicode-aware Perl character classes, - corresponding to `\w`, `\s` and `\d`. This is also necessary for using - Unicode-aware word boundary assertions. Note that if this feature is - disabled, the `\s` and `\d` character classes are still available if the - `unicode-bool` and `unicode-gencat` features are enabled, respectively. -* **unicode-script** - - Provide the data for - [Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/). - This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`, - `Latin` and `Thai`. -* **unicode-segment** - - Provide the data necessary to provide the properties used to implement the - [Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/). - This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and - `\p{sb=ATerm}`. - -# Other crates - -This crate has two required dependencies and several optional dependencies. -This section briefly describes them with the goal of raising awareness of how -different components of this crate may be used independently. - -It is somewhat unusual for a regex engine to have dependencies, as most regex -libraries are self contained units with no dependencies other than a particular -environment's standard library. Indeed, for other similarly optimized regex -engines, most or all of the code in the dependencies of this crate would -normally just be inseparable or coupled parts of the crate itself. But since -Rust and its tooling ecosystem make the use of dependencies so easy, it made -sense to spend some effort de-coupling parts of this crate and making them -independently useful. - -We only briefly describe each crate here. - -* [`regex-lite`](https://docs.rs/regex-lite) is not a dependency of `regex`, -but rather, a standalone zero-dependency simpler version of `regex` that -prioritizes compile times and binary size. In exchange, it eschews Unicode -support and performance. Its match semantics are as identical as possible to -the `regex` crate, and for the things it supports, its APIs are identical to -the APIs in this crate. In other words, for a lot of use cases, it is a drop-in -replacement. -* [`regex-syntax`](https://docs.rs/regex-syntax) provides a regular expression -parser via `Ast` and `Hir` types. It also provides routines for extracting -literals from a pattern. Folks can use this crate to do analysis, or even to -build their own regex engine without having to worry about writing a parser. -* [`regex-automata`](https://docs.rs/regex-automata) provides the regex engines -themselves. One of the downsides of finite automata based regex engines is that -they often need multiple internal engines in order to have similar or better -performance than an unbounded backtracking engine in practice. `regex-automata` -in particular provides public APIs for a PikeVM, a bounded backtracker, a -one-pass DFA, a lazy DFA, a fully compiled DFA and a meta regex engine that -combines all them together. It also has native multi-pattern support and -provides a way to compile and serialize full DFAs such that they can be loaded -and searched in a no-std no-alloc environment. `regex-automata` itself doesn't -even have a required dependency on `regex-syntax`! -* [`memchr`](https://docs.rs/memchr) provides low level SIMD vectorized -routines for quickly finding the location of single bytes or even substrings -in a haystack. In other words, it provides fast `memchr` and `memmem` routines. -These are used by this crate in literal optimizations. -* [`aho-corasick`](https://docs.rs/aho-corasick) provides multi-substring -search. It also provides SIMD vectorized routines in the case where the number -of substrings to search for is relatively small. The `regex` crate also uses -this for literal optimizations. -*/ - -#![no_std] -#![deny(missing_docs)] -#![cfg_attr(feature = "pattern", feature(pattern))] -// This adds Cargo feature annotations to items in the rustdoc output. Which is -// sadly hugely beneficial for this crate due to the number of features. -#![cfg_attr(docsrs_regex, feature(doc_cfg))] -#![warn(missing_debug_implementations)] - -#[cfg(doctest)] -doc_comment::doctest!("../README.md"); - -extern crate alloc; -#[cfg(any(test, feature = "std"))] -extern crate std; - -pub use crate::error::Error; - -pub use crate::{builders::string::*, regex::string::*, regexset::string::*}; - -mod builders; -pub mod bytes; -mod error; -mod find_byte; -#[cfg(feature = "pattern")] -mod pattern; -mod regex; -mod regexset; - -/// Escapes all regular expression meta characters in `pattern`. -/// -/// The string returned may be safely used as a literal in a regular -/// expression. -pub fn escape(pattern: &str) -> alloc::string::String { - regex_syntax::escape(pattern) -} diff --git a/vendor/regex/src/pattern.rs b/vendor/regex/src/pattern.rs deleted file mode 100644 index d7bf148d5de2fb..00000000000000 --- a/vendor/regex/src/pattern.rs +++ /dev/null @@ -1,67 +0,0 @@ -use core::str::pattern::{Pattern, SearchStep, Searcher, Utf8Pattern}; - -use crate::{Matches, Regex}; - -#[derive(Debug)] -pub struct RegexSearcher<'r, 't> { - haystack: &'t str, - it: Matches<'r, 't>, - last_step_end: usize, - next_match: Option<(usize, usize)>, -} - -impl<'r> Pattern for &'r Regex { - type Searcher<'t> = RegexSearcher<'r, 't>; - - fn into_searcher<'t>(self, haystack: &'t str) -> RegexSearcher<'r, 't> { - RegexSearcher { - haystack, - it: self.find_iter(haystack), - last_step_end: 0, - next_match: None, - } - } - - fn as_utf8_pattern<'p>(&'p self) -> Option> { - None - } -} - -unsafe impl<'r, 't> Searcher<'t> for RegexSearcher<'r, 't> { - #[inline] - fn haystack(&self) -> &'t str { - self.haystack - } - - #[inline] - fn next(&mut self) -> SearchStep { - if let Some((s, e)) = self.next_match { - self.next_match = None; - self.last_step_end = e; - return SearchStep::Match(s, e); - } - match self.it.next() { - None => { - if self.last_step_end < self.haystack().len() { - let last = self.last_step_end; - self.last_step_end = self.haystack().len(); - SearchStep::Reject(last, self.haystack().len()) - } else { - SearchStep::Done - } - } - Some(m) => { - let (s, e) = (m.start(), m.end()); - if s == self.last_step_end { - self.last_step_end = e; - SearchStep::Match(s, e) - } else { - self.next_match = Some((s, e)); - let last = self.last_step_end; - self.last_step_end = s; - SearchStep::Reject(last, s) - } - } - } - } -} diff --git a/vendor/regex/src/regex/bytes.rs b/vendor/regex/src/regex/bytes.rs deleted file mode 100644 index 303e0cbc4a3519..00000000000000 --- a/vendor/regex/src/regex/bytes.rs +++ /dev/null @@ -1,2722 +0,0 @@ -use alloc::{borrow::Cow, string::String, sync::Arc, vec::Vec}; - -use regex_automata::{meta, util::captures, Input, PatternID}; - -use crate::{bytes::RegexBuilder, error::Error}; - -/// A compiled regular expression for searching Unicode haystacks. -/// -/// A `Regex` can be used to search haystacks, split haystacks into substrings -/// or replace substrings in a haystack with a different substring. All -/// searching is done with an implicit `(?s:.)*?` at the beginning and end of -/// an pattern. To force an expression to match the whole string (or a prefix -/// or a suffix), you must use an anchor like `^` or `$` (or `\A` and `\z`). -/// -/// Like the `Regex` type in the parent module, matches with this regex return -/// byte offsets into the haystack. **Unlike** the parent `Regex` type, these -/// byte offsets may not correspond to UTF-8 sequence boundaries since the -/// regexes in this module can match arbitrary bytes. -/// -/// The only methods that allocate new byte strings are the string replacement -/// methods. All other methods (searching and splitting) return borrowed -/// references into the haystack given. -/// -/// # Example -/// -/// Find the offsets of a US phone number: -/// -/// ``` -/// use regex::bytes::Regex; -/// -/// let re = Regex::new("[0-9]{3}-[0-9]{3}-[0-9]{4}").unwrap(); -/// let m = re.find(b"phone: 111-222-3333").unwrap(); -/// assert_eq!(7..19, m.range()); -/// ``` -/// -/// # Example: extracting capture groups -/// -/// A common way to use regexes is with capture groups. That is, instead of -/// just looking for matches of an entire regex, parentheses are used to create -/// groups that represent part of the match. -/// -/// For example, consider a haystack with multiple lines, and each line has -/// three whitespace delimited fields where the second field is expected to be -/// a number and the third field a boolean. To make this convenient, we use -/// the [`Captures::extract`] API to put the strings that match each group -/// into a fixed size array: -/// -/// ``` -/// use regex::bytes::Regex; -/// -/// let hay = b" -/// rabbit 54 true -/// groundhog 2 true -/// does not match -/// fox 109 false -/// "; -/// let re = Regex::new(r"(?m)^\s*(\S+)\s+([0-9]+)\s+(true|false)\s*$").unwrap(); -/// let mut fields: Vec<(&[u8], i64, bool)> = vec![]; -/// for (_, [f1, f2, f3]) in re.captures_iter(hay).map(|caps| caps.extract()) { -/// // These unwraps are OK because our pattern is written in a way where -/// // all matches for f2 and f3 will be valid UTF-8. -/// let f2 = std::str::from_utf8(f2).unwrap(); -/// let f3 = std::str::from_utf8(f3).unwrap(); -/// fields.push((f1, f2.parse()?, f3.parse()?)); -/// } -/// assert_eq!(fields, vec![ -/// (&b"rabbit"[..], 54, true), -/// (&b"groundhog"[..], 2, true), -/// (&b"fox"[..], 109, false), -/// ]); -/// -/// # Ok::<(), Box>(()) -/// ``` -/// -/// # Example: matching invalid UTF-8 -/// -/// One of the reasons for searching `&[u8]` haystacks is that the `&[u8]` -/// might not be valid UTF-8. Indeed, with a `bytes::Regex`, patterns that -/// match invalid UTF-8 are explicitly allowed. Here's one example that looks -/// for valid UTF-8 fields that might be separated by invalid UTF-8. In this -/// case, we use `(?s-u:.)`, which matches any byte. Attempting to use it in a -/// top-level `Regex` will result in the regex failing to compile. Notice also -/// that we use `.` with Unicode mode enabled, in which case, only valid UTF-8 -/// is matched. In this way, we can build one pattern where some parts only -/// match valid UTF-8 while other parts are more permissive. -/// -/// ``` -/// use regex::bytes::Regex; -/// -/// // F0 9F 92 A9 is the UTF-8 encoding for a Pile of Poo. -/// let hay = b"\xFF\xFFfoo\xFF\xFF\xFF\xF0\x9F\x92\xA9\xFF"; -/// // An equivalent to '(?s-u:.)' is '(?-u:[\x00-\xFF])'. -/// let re = Regex::new(r"(?s)(?-u:.)*?(?.+)(?-u:.)*?(?.+)").unwrap(); -/// let caps = re.captures(hay).unwrap(); -/// assert_eq!(&caps["f1"], &b"foo"[..]); -/// assert_eq!(&caps["f2"], "💩".as_bytes()); -/// ``` -#[derive(Clone)] -pub struct Regex { - pub(crate) meta: meta::Regex, - pub(crate) pattern: Arc, -} - -impl core::fmt::Display for Regex { - /// Shows the original regular expression. - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "{}", self.as_str()) - } -} - -impl core::fmt::Debug for Regex { - /// Shows the original regular expression. - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_tuple("Regex").field(&self.as_str()).finish() - } -} - -impl core::str::FromStr for Regex { - type Err = Error; - - /// Attempts to parse a string into a regular expression - fn from_str(s: &str) -> Result { - Regex::new(s) - } -} - -impl TryFrom<&str> for Regex { - type Error = Error; - - /// Attempts to parse a string into a regular expression - fn try_from(s: &str) -> Result { - Regex::new(s) - } -} - -impl TryFrom for Regex { - type Error = Error; - - /// Attempts to parse a string into a regular expression - fn try_from(s: String) -> Result { - Regex::new(&s) - } -} - -/// Core regular expression methods. -impl Regex { - /// Compiles a regular expression. Once compiled, it can be used repeatedly - /// to search, split or replace substrings in a haystack. - /// - /// Note that regex compilation tends to be a somewhat expensive process, - /// and unlike higher level environments, compilation is not automatically - /// cached for you. One should endeavor to compile a regex once and then - /// reuse it. For example, it's a bad idea to compile the same regex - /// repeatedly in a loop. - /// - /// # Errors - /// - /// If an invalid pattern is given, then an error is returned. - /// An error is also returned if the pattern is valid, but would - /// produce a regex that is bigger than the configured size limit via - /// [`RegexBuilder::size_limit`]. (A reasonable size limit is enabled by - /// default.) - /// - /// # Example - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// // An Invalid pattern because of an unclosed parenthesis - /// assert!(Regex::new(r"foo(bar").is_err()); - /// // An invalid pattern because the regex would be too big - /// // because Unicode tends to inflate things. - /// assert!(Regex::new(r"\w{1000}").is_err()); - /// // Disabling Unicode can make the regex much smaller, - /// // potentially by up to or more than an order of magnitude. - /// assert!(Regex::new(r"(?-u:\w){1000}").is_ok()); - /// ``` - pub fn new(re: &str) -> Result { - RegexBuilder::new(re).build() - } - - /// Returns true if and only if there is a match for the regex anywhere - /// in the haystack given. - /// - /// It is recommended to use this method if all you need to do is test - /// whether a match exists, since the underlying matching engine may be - /// able to do less work. - /// - /// # Example - /// - /// Test if some haystack contains at least one word with exactly 13 - /// Unicode word characters: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"\b\w{13}\b").unwrap(); - /// let hay = b"I categorically deny having triskaidekaphobia."; - /// assert!(re.is_match(hay)); - /// ``` - #[inline] - pub fn is_match(&self, haystack: &[u8]) -> bool { - self.is_match_at(haystack, 0) - } - - /// This routine searches for the first match of this regex in the - /// haystack given, and if found, returns a [`Match`]. The `Match` - /// provides access to both the byte offsets of the match and the actual - /// substring that matched. - /// - /// Note that this should only be used if you want to find the entire - /// match. If instead you just want to test the existence of a match, - /// it's potentially faster to use `Regex::is_match(hay)` instead of - /// `Regex::find(hay).is_some()`. - /// - /// # Example - /// - /// Find the first word with exactly 13 Unicode word characters: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"\b\w{13}\b").unwrap(); - /// let hay = b"I categorically deny having triskaidekaphobia."; - /// let mat = re.find(hay).unwrap(); - /// assert_eq!(2..15, mat.range()); - /// assert_eq!(b"categorically", mat.as_bytes()); - /// ``` - #[inline] - pub fn find<'h>(&self, haystack: &'h [u8]) -> Option> { - self.find_at(haystack, 0) - } - - /// Returns an iterator that yields successive non-overlapping matches in - /// the given haystack. The iterator yields values of type [`Match`]. - /// - /// # Time complexity - /// - /// Note that since `find_iter` runs potentially many searches on the - /// haystack and since each search has worst case `O(m * n)` time - /// complexity, the overall worst case time complexity for iteration is - /// `O(m * n^2)`. - /// - /// # Example - /// - /// Find every word with exactly 13 Unicode word characters: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"\b\w{13}\b").unwrap(); - /// let hay = b"Retroactively relinquishing remunerations is reprehensible."; - /// let matches: Vec<_> = re.find_iter(hay).map(|m| m.as_bytes()).collect(); - /// assert_eq!(matches, vec![ - /// &b"Retroactively"[..], - /// &b"relinquishing"[..], - /// &b"remunerations"[..], - /// &b"reprehensible"[..], - /// ]); - /// ``` - #[inline] - pub fn find_iter<'r, 'h>(&'r self, haystack: &'h [u8]) -> Matches<'r, 'h> { - Matches { haystack, it: self.meta.find_iter(haystack) } - } - - /// This routine searches for the first match of this regex in the haystack - /// given, and if found, returns not only the overall match but also the - /// matches of each capture group in the regex. If no match is found, then - /// `None` is returned. - /// - /// Capture group `0` always corresponds to an implicit unnamed group that - /// includes the entire match. If a match is found, this group is always - /// present. Subsequent groups may be named and are numbered, starting - /// at 1, by the order in which the opening parenthesis appears in the - /// pattern. For example, in the pattern `(?
.(?.))(?.)`, `a`, - /// `b` and `c` correspond to capture group indices `1`, `2` and `3`, - /// respectively. - /// - /// You should only use `captures` if you need access to the capture group - /// matches. Otherwise, [`Regex::find`] is generally faster for discovering - /// just the overall match. - /// - /// # Example - /// - /// Say you have some haystack with movie names and their release years, - /// like "'Citizen Kane' (1941)". It'd be nice if we could search for - /// strings looking like that, while also extracting the movie name and its - /// release year separately. The example below shows how to do that. - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"'([^']+)'\s+\((\d{4})\)").unwrap(); - /// let hay = b"Not my favorite movie: 'Citizen Kane' (1941)."; - /// let caps = re.captures(hay).unwrap(); - /// assert_eq!(caps.get(0).unwrap().as_bytes(), b"'Citizen Kane' (1941)"); - /// assert_eq!(caps.get(1).unwrap().as_bytes(), b"Citizen Kane"); - /// assert_eq!(caps.get(2).unwrap().as_bytes(), b"1941"); - /// // You can also access the groups by index using the Index notation. - /// // Note that this will panic on an invalid index. In this case, these - /// // accesses are always correct because the overall regex will only - /// // match when these capture groups match. - /// assert_eq!(&caps[0], b"'Citizen Kane' (1941)"); - /// assert_eq!(&caps[1], b"Citizen Kane"); - /// assert_eq!(&caps[2], b"1941"); - /// ``` - /// - /// Note that the full match is at capture group `0`. Each subsequent - /// capture group is indexed by the order of its opening `(`. - /// - /// We can make this example a bit clearer by using *named* capture groups: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"'(?[^']+)'\s+\((?<year>\d{4})\)").unwrap(); - /// let hay = b"Not my favorite movie: 'Citizen Kane' (1941)."; - /// let caps = re.captures(hay).unwrap(); - /// assert_eq!(caps.get(0).unwrap().as_bytes(), b"'Citizen Kane' (1941)"); - /// assert_eq!(caps.name("title").unwrap().as_bytes(), b"Citizen Kane"); - /// assert_eq!(caps.name("year").unwrap().as_bytes(), b"1941"); - /// // You can also access the groups by name using the Index notation. - /// // Note that this will panic on an invalid group name. In this case, - /// // these accesses are always correct because the overall regex will - /// // only match when these capture groups match. - /// assert_eq!(&caps[0], b"'Citizen Kane' (1941)"); - /// assert_eq!(&caps["title"], b"Citizen Kane"); - /// assert_eq!(&caps["year"], b"1941"); - /// ``` - /// - /// Here we name the capture groups, which we can access with the `name` - /// method or the `Index` notation with a `&str`. Note that the named - /// capture groups are still accessible with `get` or the `Index` notation - /// with a `usize`. - /// - /// The `0`th capture group is always unnamed, so it must always be - /// accessed with `get(0)` or `[0]`. - /// - /// Finally, one other way to get the matched substrings is with the - /// [`Captures::extract`] API: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"'([^']+)'\s+\((\d{4})\)").unwrap(); - /// let hay = b"Not my favorite movie: 'Citizen Kane' (1941)."; - /// let (full, [title, year]) = re.captures(hay).unwrap().extract(); - /// assert_eq!(full, b"'Citizen Kane' (1941)"); - /// assert_eq!(title, b"Citizen Kane"); - /// assert_eq!(year, b"1941"); - /// ``` - #[inline] - pub fn captures<'h>(&self, haystack: &'h [u8]) -> Option<Captures<'h>> { - self.captures_at(haystack, 0) - } - - /// Returns an iterator that yields successive non-overlapping matches in - /// the given haystack. The iterator yields values of type [`Captures`]. - /// - /// This is the same as [`Regex::find_iter`], but instead of only providing - /// access to the overall match, each value yield includes access to the - /// matches of all capture groups in the regex. Reporting this extra match - /// data is potentially costly, so callers should only use `captures_iter` - /// over `find_iter` when they actually need access to the capture group - /// matches. - /// - /// # Time complexity - /// - /// Note that since `captures_iter` runs potentially many searches on the - /// haystack and since each search has worst case `O(m * n)` time - /// complexity, the overall worst case time complexity for iteration is - /// `O(m * n^2)`. - /// - /// # Example - /// - /// We can use this to find all movie titles and their release years in - /// some haystack, where the movie is formatted like "'Title' (xxxx)": - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"'([^']+)'\s+\(([0-9]{4})\)").unwrap(); - /// let hay = b"'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931)."; - /// let mut movies = vec![]; - /// for (_, [title, year]) in re.captures_iter(hay).map(|c| c.extract()) { - /// // OK because [0-9]{4} can only match valid UTF-8. - /// let year = std::str::from_utf8(year).unwrap(); - /// movies.push((title, year.parse::<i64>()?)); - /// } - /// assert_eq!(movies, vec![ - /// (&b"Citizen Kane"[..], 1941), - /// (&b"The Wizard of Oz"[..], 1939), - /// (&b"M"[..], 1931), - /// ]); - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - /// - /// Or with named groups: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"'(?<title>[^']+)'\s+\((?<year>[0-9]{4})\)").unwrap(); - /// let hay = b"'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931)."; - /// let mut it = re.captures_iter(hay); - /// - /// let caps = it.next().unwrap(); - /// assert_eq!(&caps["title"], b"Citizen Kane"); - /// assert_eq!(&caps["year"], b"1941"); - /// - /// let caps = it.next().unwrap(); - /// assert_eq!(&caps["title"], b"The Wizard of Oz"); - /// assert_eq!(&caps["year"], b"1939"); - /// - /// let caps = it.next().unwrap(); - /// assert_eq!(&caps["title"], b"M"); - /// assert_eq!(&caps["year"], b"1931"); - /// ``` - #[inline] - pub fn captures_iter<'r, 'h>( - &'r self, - haystack: &'h [u8], - ) -> CaptureMatches<'r, 'h> { - CaptureMatches { haystack, it: self.meta.captures_iter(haystack) } - } - - /// Returns an iterator of substrings of the haystack given, delimited by a - /// match of the regex. Namely, each element of the iterator corresponds to - /// a part of the haystack that *isn't* matched by the regular expression. - /// - /// # Time complexity - /// - /// Since iterators over all matches requires running potentially many - /// searches on the haystack, and since each search has worst case - /// `O(m * n)` time complexity, the overall worst case time complexity for - /// this routine is `O(m * n^2)`. - /// - /// # Example - /// - /// To split a string delimited by arbitrary amounts of spaces or tabs: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"[ \t]+").unwrap(); - /// let hay = b"a b \t c\td e"; - /// let fields: Vec<&[u8]> = re.split(hay).collect(); - /// assert_eq!(fields, vec![ - /// &b"a"[..], &b"b"[..], &b"c"[..], &b"d"[..], &b"e"[..], - /// ]); - /// ``` - /// - /// # Example: more cases - /// - /// Basic usage: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r" ").unwrap(); - /// let hay = b"Mary had a little lamb"; - /// let got: Vec<&[u8]> = re.split(hay).collect(); - /// assert_eq!(got, vec![ - /// &b"Mary"[..], &b"had"[..], &b"a"[..], &b"little"[..], &b"lamb"[..], - /// ]); - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = b""; - /// let got: Vec<&[u8]> = re.split(hay).collect(); - /// assert_eq!(got, vec![&b""[..]]); - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = b"lionXXtigerXleopard"; - /// let got: Vec<&[u8]> = re.split(hay).collect(); - /// assert_eq!(got, vec![ - /// &b"lion"[..], &b""[..], &b"tiger"[..], &b"leopard"[..], - /// ]); - /// - /// let re = Regex::new(r"::").unwrap(); - /// let hay = b"lion::tiger::leopard"; - /// let got: Vec<&[u8]> = re.split(hay).collect(); - /// assert_eq!(got, vec![&b"lion"[..], &b"tiger"[..], &b"leopard"[..]]); - /// ``` - /// - /// If a haystack contains multiple contiguous matches, you will end up - /// with empty spans yielded by the iterator: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = b"XXXXaXXbXc"; - /// let got: Vec<&[u8]> = re.split(hay).collect(); - /// assert_eq!(got, vec![ - /// &b""[..], &b""[..], &b""[..], &b""[..], - /// &b"a"[..], &b""[..], &b"b"[..], &b"c"[..], - /// ]); - /// - /// let re = Regex::new(r"/").unwrap(); - /// let hay = b"(///)"; - /// let got: Vec<&[u8]> = re.split(hay).collect(); - /// assert_eq!(got, vec![&b"("[..], &b""[..], &b""[..], &b")"[..]]); - /// ``` - /// - /// Separators at the start or end of a haystack are neighbored by empty - /// substring. - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"0").unwrap(); - /// let hay = b"010"; - /// let got: Vec<&[u8]> = re.split(hay).collect(); - /// assert_eq!(got, vec![&b""[..], &b"1"[..], &b""[..]]); - /// ``` - /// - /// When the regex can match the empty string, it splits at every byte - /// position in the haystack. This includes between all UTF-8 code units. - /// (The top-level [`Regex::split`](crate::Regex::split) will only split - /// at valid UTF-8 boundaries.) - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"").unwrap(); - /// let hay = "☃".as_bytes(); - /// let got: Vec<&[u8]> = re.split(hay).collect(); - /// assert_eq!(got, vec![ - /// &[][..], &[b'\xE2'][..], &[b'\x98'][..], &[b'\x83'][..], &[][..], - /// ]); - /// ``` - /// - /// Contiguous separators (commonly shows up with whitespace), can lead to - /// possibly surprising behavior. For example, this code is correct: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r" ").unwrap(); - /// let hay = b" a b c"; - /// let got: Vec<&[u8]> = re.split(hay).collect(); - /// assert_eq!(got, vec![ - /// &b""[..], &b""[..], &b""[..], &b""[..], - /// &b"a"[..], &b""[..], &b"b"[..], &b"c"[..], - /// ]); - /// ``` - /// - /// It does *not* give you `["a", "b", "c"]`. For that behavior, you'd want - /// to match contiguous space characters: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r" +").unwrap(); - /// let hay = b" a b c"; - /// let got: Vec<&[u8]> = re.split(hay).collect(); - /// // N.B. This does still include a leading empty span because ' +' - /// // matches at the beginning of the haystack. - /// assert_eq!(got, vec![&b""[..], &b"a"[..], &b"b"[..], &b"c"[..]]); - /// ``` - #[inline] - pub fn split<'r, 'h>(&'r self, haystack: &'h [u8]) -> Split<'r, 'h> { - Split { haystack, it: self.meta.split(haystack) } - } - - /// Returns an iterator of at most `limit` substrings of the haystack - /// given, delimited by a match of the regex. (A `limit` of `0` will return - /// no substrings.) Namely, each element of the iterator corresponds to a - /// part of the haystack that *isn't* matched by the regular expression. - /// The remainder of the haystack that is not split will be the last - /// element in the iterator. - /// - /// # Time complexity - /// - /// Since iterators over all matches requires running potentially many - /// searches on the haystack, and since each search has worst case - /// `O(m * n)` time complexity, the overall worst case time complexity for - /// this routine is `O(m * n^2)`. - /// - /// Although note that the worst case time here has an upper bound given - /// by the `limit` parameter. - /// - /// # Example - /// - /// Get the first two words in some haystack: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"\W+").unwrap(); - /// let hay = b"Hey! How are you?"; - /// let fields: Vec<&[u8]> = re.splitn(hay, 3).collect(); - /// assert_eq!(fields, vec![&b"Hey"[..], &b"How"[..], &b"are you?"[..]]); - /// ``` - /// - /// # Examples: more cases - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r" ").unwrap(); - /// let hay = b"Mary had a little lamb"; - /// let got: Vec<&[u8]> = re.splitn(hay, 3).collect(); - /// assert_eq!(got, vec![&b"Mary"[..], &b"had"[..], &b"a little lamb"[..]]); - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = b""; - /// let got: Vec<&[u8]> = re.splitn(hay, 3).collect(); - /// assert_eq!(got, vec![&b""[..]]); - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = b"lionXXtigerXleopard"; - /// let got: Vec<&[u8]> = re.splitn(hay, 3).collect(); - /// assert_eq!(got, vec![&b"lion"[..], &b""[..], &b"tigerXleopard"[..]]); - /// - /// let re = Regex::new(r"::").unwrap(); - /// let hay = b"lion::tiger::leopard"; - /// let got: Vec<&[u8]> = re.splitn(hay, 2).collect(); - /// assert_eq!(got, vec![&b"lion"[..], &b"tiger::leopard"[..]]); - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = b"abcXdef"; - /// let got: Vec<&[u8]> = re.splitn(hay, 1).collect(); - /// assert_eq!(got, vec![&b"abcXdef"[..]]); - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = b"abcdef"; - /// let got: Vec<&[u8]> = re.splitn(hay, 2).collect(); - /// assert_eq!(got, vec![&b"abcdef"[..]]); - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = b"abcXdef"; - /// let got: Vec<&[u8]> = re.splitn(hay, 0).collect(); - /// assert!(got.is_empty()); - /// ``` - #[inline] - pub fn splitn<'r, 'h>( - &'r self, - haystack: &'h [u8], - limit: usize, - ) -> SplitN<'r, 'h> { - SplitN { haystack, it: self.meta.splitn(haystack, limit) } - } - - /// Replaces the leftmost-first match in the given haystack with the - /// replacement provided. The replacement can be a regular string (where - /// `$N` and `$name` are expanded to match capture groups) or a function - /// that takes a [`Captures`] and returns the replaced string. - /// - /// If no match is found, then the haystack is returned unchanged. In that - /// case, this implementation will likely return a `Cow::Borrowed` value - /// such that no allocation is performed. - /// - /// When a `Cow::Borrowed` is returned, the value returned is guaranteed - /// to be equivalent to the `haystack` given. - /// - /// # Replacement string syntax - /// - /// All instances of `$ref` in the replacement string are replaced with - /// the substring corresponding to the capture group identified by `ref`. - /// - /// `ref` may be an integer corresponding to the index of the capture group - /// (counted by order of opening parenthesis where `0` is the entire match) - /// or it can be a name (consisting of letters, digits or underscores) - /// corresponding to a named capture group. - /// - /// If `ref` isn't a valid capture group (whether the name doesn't exist or - /// isn't a valid index), then it is replaced with the empty string. - /// - /// The longest possible name is used. For example, `$1a` looks up the - /// capture group named `1a` and not the capture group at index `1`. To - /// exert more precise control over the name, use braces, e.g., `${1}a`. - /// - /// To write a literal `$` use `$$`. - /// - /// # Example - /// - /// Note that this function is polymorphic with respect to the replacement. - /// In typical usage, this can just be a normal string: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"[^01]+").unwrap(); - /// assert_eq!(re.replace(b"1078910", b""), &b"1010"[..]); - /// ``` - /// - /// But anything satisfying the [`Replacer`] trait will work. For example, - /// a closure of type `|&Captures| -> String` provides direct access to the - /// captures corresponding to a match. This allows one to access capturing - /// group matches easily: - /// - /// ``` - /// use regex::bytes::{Captures, Regex}; - /// - /// let re = Regex::new(r"([^,\s]+),\s+(\S+)").unwrap(); - /// let result = re.replace(b"Springsteen, Bruce", |caps: &Captures| { - /// let mut buf = vec![]; - /// buf.extend_from_slice(&caps[2]); - /// buf.push(b' '); - /// buf.extend_from_slice(&caps[1]); - /// buf - /// }); - /// assert_eq!(result, &b"Bruce Springsteen"[..]); - /// ``` - /// - /// But this is a bit cumbersome to use all the time. Instead, a simple - /// syntax is supported (as described above) that expands `$name` into the - /// corresponding capture group. Here's the last example, but using this - /// expansion technique with named capture groups: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(?<first>\S+)").unwrap(); - /// let result = re.replace(b"Springsteen, Bruce", b"$first $last"); - /// assert_eq!(result, &b"Bruce Springsteen"[..]); - /// ``` - /// - /// Note that using `$2` instead of `$first` or `$1` instead of `$last` - /// would produce the same result. To write a literal `$` use `$$`. - /// - /// Sometimes the replacement string requires use of curly braces to - /// delineate a capture group replacement when it is adjacent to some other - /// literal text. For example, if we wanted to join two words together with - /// an underscore: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"(?<first>\w+)\s+(?<second>\w+)").unwrap(); - /// let result = re.replace(b"deep fried", b"${first}_$second"); - /// assert_eq!(result, &b"deep_fried"[..]); - /// ``` - /// - /// Without the curly braces, the capture group name `first_` would be - /// used, and since it doesn't exist, it would be replaced with the empty - /// string. - /// - /// Finally, sometimes you just want to replace a literal string with no - /// regard for capturing group expansion. This can be done by wrapping a - /// string with [`NoExpand`]: - /// - /// ``` - /// use regex::bytes::{NoExpand, Regex}; - /// - /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(\S+)").unwrap(); - /// let result = re.replace(b"Springsteen, Bruce", NoExpand(b"$2 $last")); - /// assert_eq!(result, &b"$2 $last"[..]); - /// ``` - /// - /// Using `NoExpand` may also be faster, since the replacement string won't - /// need to be parsed for the `$` syntax. - #[inline] - pub fn replace<'h, R: Replacer>( - &self, - haystack: &'h [u8], - rep: R, - ) -> Cow<'h, [u8]> { - self.replacen(haystack, 1, rep) - } - - /// Replaces all non-overlapping matches in the haystack with the - /// replacement provided. This is the same as calling `replacen` with - /// `limit` set to `0`. - /// - /// If no match is found, then the haystack is returned unchanged. In that - /// case, this implementation will likely return a `Cow::Borrowed` value - /// such that no allocation is performed. - /// - /// When a `Cow::Borrowed` is returned, the value returned is guaranteed - /// to be equivalent to the `haystack` given. - /// - /// The documentation for [`Regex::replace`] goes into more detail about - /// what kinds of replacement strings are supported. - /// - /// # Time complexity - /// - /// Since iterators over all matches requires running potentially many - /// searches on the haystack, and since each search has worst case - /// `O(m * n)` time complexity, the overall worst case time complexity for - /// this routine is `O(m * n^2)`. - /// - /// # Fallibility - /// - /// If you need to write a replacement routine where any individual - /// replacement might "fail," doing so with this API isn't really feasible - /// because there's no way to stop the search process if a replacement - /// fails. Instead, if you need this functionality, you should consider - /// implementing your own replacement routine: - /// - /// ``` - /// use regex::bytes::{Captures, Regex}; - /// - /// fn replace_all<E>( - /// re: &Regex, - /// haystack: &[u8], - /// replacement: impl Fn(&Captures) -> Result<Vec<u8>, E>, - /// ) -> Result<Vec<u8>, E> { - /// let mut new = Vec::with_capacity(haystack.len()); - /// let mut last_match = 0; - /// for caps in re.captures_iter(haystack) { - /// let m = caps.get(0).unwrap(); - /// new.extend_from_slice(&haystack[last_match..m.start()]); - /// new.extend_from_slice(&replacement(&caps)?); - /// last_match = m.end(); - /// } - /// new.extend_from_slice(&haystack[last_match..]); - /// Ok(new) - /// } - /// - /// // Let's replace each word with the number of bytes in that word. - /// // But if we see a word that is "too long," we'll give up. - /// let re = Regex::new(r"\w+").unwrap(); - /// let replacement = |caps: &Captures| -> Result<Vec<u8>, &'static str> { - /// if caps[0].len() >= 5 { - /// return Err("word too long"); - /// } - /// Ok(caps[0].len().to_string().into_bytes()) - /// }; - /// assert_eq!( - /// Ok(b"2 3 3 3?".to_vec()), - /// replace_all(&re, b"hi how are you?", &replacement), - /// ); - /// assert!(replace_all(&re, b"hi there", &replacement).is_err()); - /// ``` - /// - /// # Example - /// - /// This example shows how to flip the order of whitespace (excluding line - /// terminators) delimited fields, and normalizes the whitespace that - /// delimits the fields: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"(?m)^(\S+)[\s--\r\n]+(\S+)$").unwrap(); - /// let hay = b" - /// Greetings 1973 - /// Wild\t1973 - /// BornToRun\t\t\t\t1975 - /// Darkness 1978 - /// TheRiver 1980 - /// "; - /// let new = re.replace_all(hay, b"$2 $1"); - /// assert_eq!(new, &b" - /// 1973 Greetings - /// 1973 Wild - /// 1975 BornToRun - /// 1978 Darkness - /// 1980 TheRiver - /// "[..]); - /// ``` - #[inline] - pub fn replace_all<'h, R: Replacer>( - &self, - haystack: &'h [u8], - rep: R, - ) -> Cow<'h, [u8]> { - self.replacen(haystack, 0, rep) - } - - /// Replaces at most `limit` non-overlapping matches in the haystack with - /// the replacement provided. If `limit` is `0`, then all non-overlapping - /// matches are replaced. That is, `Regex::replace_all(hay, rep)` is - /// equivalent to `Regex::replacen(hay, 0, rep)`. - /// - /// If no match is found, then the haystack is returned unchanged. In that - /// case, this implementation will likely return a `Cow::Borrowed` value - /// such that no allocation is performed. - /// - /// When a `Cow::Borrowed` is returned, the value returned is guaranteed - /// to be equivalent to the `haystack` given. - /// - /// The documentation for [`Regex::replace`] goes into more detail about - /// what kinds of replacement strings are supported. - /// - /// # Time complexity - /// - /// Since iterators over all matches requires running potentially many - /// searches on the haystack, and since each search has worst case - /// `O(m * n)` time complexity, the overall worst case time complexity for - /// this routine is `O(m * n^2)`. - /// - /// Although note that the worst case time here has an upper bound given - /// by the `limit` parameter. - /// - /// # Fallibility - /// - /// See the corresponding section in the docs for [`Regex::replace_all`] - /// for tips on how to deal with a replacement routine that can fail. - /// - /// # Example - /// - /// This example shows how to flip the order of whitespace (excluding line - /// terminators) delimited fields, and normalizes the whitespace that - /// delimits the fields. But we only do it for the first two matches. - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"(?m)^(\S+)[\s--\r\n]+(\S+)$").unwrap(); - /// let hay = b" - /// Greetings 1973 - /// Wild\t1973 - /// BornToRun\t\t\t\t1975 - /// Darkness 1978 - /// TheRiver 1980 - /// "; - /// let new = re.replacen(hay, 2, b"$2 $1"); - /// assert_eq!(new, &b" - /// 1973 Greetings - /// 1973 Wild - /// BornToRun\t\t\t\t1975 - /// Darkness 1978 - /// TheRiver 1980 - /// "[..]); - /// ``` - #[inline] - pub fn replacen<'h, R: Replacer>( - &self, - haystack: &'h [u8], - limit: usize, - mut rep: R, - ) -> Cow<'h, [u8]> { - // If we know that the replacement doesn't have any capture expansions, - // then we can use the fast path. The fast path can make a tremendous - // difference: - // - // 1) We use `find_iter` instead of `captures_iter`. Not asking for - // captures generally makes the regex engines faster. - // 2) We don't need to look up all of the capture groups and do - // replacements inside the replacement string. We just push it - // at each match and be done with it. - if let Some(rep) = rep.no_expansion() { - let mut it = self.find_iter(haystack).enumerate().peekable(); - if it.peek().is_none() { - return Cow::Borrowed(haystack); - } - let mut new = Vec::with_capacity(haystack.len()); - let mut last_match = 0; - for (i, m) in it { - new.extend_from_slice(&haystack[last_match..m.start()]); - new.extend_from_slice(&rep); - last_match = m.end(); - if limit > 0 && i >= limit - 1 { - break; - } - } - new.extend_from_slice(&haystack[last_match..]); - return Cow::Owned(new); - } - - // The slower path, which we use if the replacement needs access to - // capture groups. - let mut it = self.captures_iter(haystack).enumerate().peekable(); - if it.peek().is_none() { - return Cow::Borrowed(haystack); - } - let mut new = Vec::with_capacity(haystack.len()); - let mut last_match = 0; - for (i, cap) in it { - // unwrap on 0 is OK because captures only reports matches - let m = cap.get(0).unwrap(); - new.extend_from_slice(&haystack[last_match..m.start()]); - rep.replace_append(&cap, &mut new); - last_match = m.end(); - if limit > 0 && i >= limit - 1 { - break; - } - } - new.extend_from_slice(&haystack[last_match..]); - Cow::Owned(new) - } -} - -/// A group of advanced or "lower level" search methods. Some methods permit -/// starting the search at a position greater than `0` in the haystack. Other -/// methods permit reusing allocations, for example, when extracting the -/// matches for capture groups. -impl Regex { - /// Returns the end byte offset of the first match in the haystack given. - /// - /// This method may have the same performance characteristics as - /// `is_match`. Behaviorally, it doesn't just report whether it match - /// occurs, but also the end offset for a match. In particular, the offset - /// returned *may be shorter* than the proper end of the leftmost-first - /// match that you would find via [`Regex::find`]. - /// - /// Note that it is not guaranteed that this routine finds the shortest or - /// "earliest" possible match. Instead, the main idea of this API is that - /// it returns the offset at the point at which the internal regex engine - /// has determined that a match has occurred. This may vary depending on - /// which internal regex engine is used, and thus, the offset itself may - /// change based on internal heuristics. - /// - /// # Example - /// - /// Typically, `a+` would match the entire first sequence of `a` in some - /// haystack, but `shortest_match` *may* give up as soon as it sees the - /// first `a`. - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"a+").unwrap(); - /// let offset = re.shortest_match(b"aaaaa").unwrap(); - /// assert_eq!(offset, 1); - /// ``` - #[inline] - pub fn shortest_match(&self, haystack: &[u8]) -> Option<usize> { - self.shortest_match_at(haystack, 0) - } - - /// Returns the same as `shortest_match`, but starts the search at the - /// given offset. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only match - /// when `start == 0`. - /// - /// If a match is found, the offset returned is relative to the beginning - /// of the haystack, not the beginning of the search. - /// - /// # Panics - /// - /// This panics when `start >= haystack.len() + 1`. - /// - /// # Example - /// - /// This example shows the significance of `start` by demonstrating how it - /// can be used to permit look-around assertions in a regex to take the - /// surrounding context into account. - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"\bchew\b").unwrap(); - /// let hay = b"eschew"; - /// // We get a match here, but it's probably not intended. - /// assert_eq!(re.shortest_match(&hay[2..]), Some(4)); - /// // No match because the assertions take the context into account. - /// assert_eq!(re.shortest_match_at(hay, 2), None); - /// ``` - #[inline] - pub fn shortest_match_at( - &self, - haystack: &[u8], - start: usize, - ) -> Option<usize> { - let input = - Input::new(haystack).earliest(true).span(start..haystack.len()); - self.meta.search_half(&input).map(|hm| hm.offset()) - } - - /// Returns the same as [`Regex::is_match`], but starts the search at the - /// given offset. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only - /// match when `start == 0`. - /// - /// # Panics - /// - /// This panics when `start >= haystack.len() + 1`. - /// - /// # Example - /// - /// This example shows the significance of `start` by demonstrating how it - /// can be used to permit look-around assertions in a regex to take the - /// surrounding context into account. - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"\bchew\b").unwrap(); - /// let hay = b"eschew"; - /// // We get a match here, but it's probably not intended. - /// assert!(re.is_match(&hay[2..])); - /// // No match because the assertions take the context into account. - /// assert!(!re.is_match_at(hay, 2)); - /// ``` - #[inline] - pub fn is_match_at(&self, haystack: &[u8], start: usize) -> bool { - self.meta.is_match(Input::new(haystack).span(start..haystack.len())) - } - - /// Returns the same as [`Regex::find`], but starts the search at the given - /// offset. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only - /// match when `start == 0`. - /// - /// # Panics - /// - /// This panics when `start >= haystack.len() + 1`. - /// - /// # Example - /// - /// This example shows the significance of `start` by demonstrating how it - /// can be used to permit look-around assertions in a regex to take the - /// surrounding context into account. - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"\bchew\b").unwrap(); - /// let hay = b"eschew"; - /// // We get a match here, but it's probably not intended. - /// assert_eq!(re.find(&hay[2..]).map(|m| m.range()), Some(0..4)); - /// // No match because the assertions take the context into account. - /// assert_eq!(re.find_at(hay, 2), None); - /// ``` - #[inline] - pub fn find_at<'h>( - &self, - haystack: &'h [u8], - start: usize, - ) -> Option<Match<'h>> { - let input = Input::new(haystack).span(start..haystack.len()); - self.meta.find(input).map(|m| Match::new(haystack, m.start(), m.end())) - } - - /// Returns the same as [`Regex::captures`], but starts the search at the - /// given offset. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only - /// match when `start == 0`. - /// - /// # Panics - /// - /// This panics when `start >= haystack.len() + 1`. - /// - /// # Example - /// - /// This example shows the significance of `start` by demonstrating how it - /// can be used to permit look-around assertions in a regex to take the - /// surrounding context into account. - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"\bchew\b").unwrap(); - /// let hay = b"eschew"; - /// // We get a match here, but it's probably not intended. - /// assert_eq!(&re.captures(&hay[2..]).unwrap()[0], b"chew"); - /// // No match because the assertions take the context into account. - /// assert!(re.captures_at(hay, 2).is_none()); - /// ``` - #[inline] - pub fn captures_at<'h>( - &self, - haystack: &'h [u8], - start: usize, - ) -> Option<Captures<'h>> { - let input = Input::new(haystack).span(start..haystack.len()); - let mut caps = self.meta.create_captures(); - self.meta.captures(input, &mut caps); - if caps.is_match() { - let static_captures_len = self.static_captures_len(); - Some(Captures { haystack, caps, static_captures_len }) - } else { - None - } - } - - /// This is like [`Regex::captures`], but writes the byte offsets of each - /// capture group match into the locations given. - /// - /// A [`CaptureLocations`] stores the same byte offsets as a [`Captures`], - /// but does *not* store a reference to the haystack. This makes its API - /// a bit lower level and less convenient. But in exchange, callers - /// may allocate their own `CaptureLocations` and reuse it for multiple - /// searches. This may be helpful if allocating a `Captures` shows up in a - /// profile as too costly. - /// - /// To create a `CaptureLocations` value, use the - /// [`Regex::capture_locations`] method. - /// - /// This also returns the overall match if one was found. When a match is - /// found, its offsets are also always stored in `locs` at index `0`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"^([a-z]+)=(\S*)$").unwrap(); - /// let mut locs = re.capture_locations(); - /// assert!(re.captures_read(&mut locs, b"id=foo123").is_some()); - /// assert_eq!(Some((0, 9)), locs.get(0)); - /// assert_eq!(Some((0, 2)), locs.get(1)); - /// assert_eq!(Some((3, 9)), locs.get(2)); - /// ``` - #[inline] - pub fn captures_read<'h>( - &self, - locs: &mut CaptureLocations, - haystack: &'h [u8], - ) -> Option<Match<'h>> { - self.captures_read_at(locs, haystack, 0) - } - - /// Returns the same as [`Regex::captures_read`], but starts the search at - /// the given offset. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only - /// match when `start == 0`. - /// - /// # Panics - /// - /// This panics when `start >= haystack.len() + 1`. - /// - /// # Example - /// - /// This example shows the significance of `start` by demonstrating how it - /// can be used to permit look-around assertions in a regex to take the - /// surrounding context into account. - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"\bchew\b").unwrap(); - /// let hay = b"eschew"; - /// let mut locs = re.capture_locations(); - /// // We get a match here, but it's probably not intended. - /// assert!(re.captures_read(&mut locs, &hay[2..]).is_some()); - /// // No match because the assertions take the context into account. - /// assert!(re.captures_read_at(&mut locs, hay, 2).is_none()); - /// ``` - #[inline] - pub fn captures_read_at<'h>( - &self, - locs: &mut CaptureLocations, - haystack: &'h [u8], - start: usize, - ) -> Option<Match<'h>> { - let input = Input::new(haystack).span(start..haystack.len()); - self.meta.search_captures(&input, &mut locs.0); - locs.0.get_match().map(|m| Match::new(haystack, m.start(), m.end())) - } - - /// An undocumented alias for `captures_read_at`. - /// - /// The `regex-capi` crate previously used this routine, so to avoid - /// breaking that crate, we continue to provide the name as an undocumented - /// alias. - #[doc(hidden)] - #[inline] - pub fn read_captures_at<'h>( - &self, - locs: &mut CaptureLocations, - haystack: &'h [u8], - start: usize, - ) -> Option<Match<'h>> { - self.captures_read_at(locs, haystack, start) - } -} - -/// Auxiliary methods. -impl Regex { - /// Returns the original string of this regex. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"foo\w+bar").unwrap(); - /// assert_eq!(re.as_str(), r"foo\w+bar"); - /// ``` - #[inline] - pub fn as_str(&self) -> &str { - &self.pattern - } - - /// Returns an iterator over the capture names in this regex. - /// - /// The iterator returned yields elements of type `Option<&str>`. That is, - /// the iterator yields values for all capture groups, even ones that are - /// unnamed. The order of the groups corresponds to the order of the group's - /// corresponding opening parenthesis. - /// - /// The first element of the iterator always yields the group corresponding - /// to the overall match, and this group is always unnamed. Therefore, the - /// iterator always yields at least one group. - /// - /// # Example - /// - /// This shows basic usage with a mix of named and unnamed capture groups: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"(?<a>.(?<b>.))(.)(?:.)(?<c>.)").unwrap(); - /// let mut names = re.capture_names(); - /// assert_eq!(names.next(), Some(None)); - /// assert_eq!(names.next(), Some(Some("a"))); - /// assert_eq!(names.next(), Some(Some("b"))); - /// assert_eq!(names.next(), Some(None)); - /// // the '(?:.)' group is non-capturing and so doesn't appear here! - /// assert_eq!(names.next(), Some(Some("c"))); - /// assert_eq!(names.next(), None); - /// ``` - /// - /// The iterator always yields at least one element, even for regexes with - /// no capture groups and even for regexes that can never match: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"").unwrap(); - /// let mut names = re.capture_names(); - /// assert_eq!(names.next(), Some(None)); - /// assert_eq!(names.next(), None); - /// - /// let re = Regex::new(r"[a&&b]").unwrap(); - /// let mut names = re.capture_names(); - /// assert_eq!(names.next(), Some(None)); - /// assert_eq!(names.next(), None); - /// ``` - #[inline] - pub fn capture_names(&self) -> CaptureNames<'_> { - CaptureNames(self.meta.group_info().pattern_names(PatternID::ZERO)) - } - - /// Returns the number of captures groups in this regex. - /// - /// This includes all named and unnamed groups, including the implicit - /// unnamed group that is always present and corresponds to the entire - /// match. - /// - /// Since the implicit unnamed group is always included in this length, the - /// length returned is guaranteed to be greater than zero. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"foo").unwrap(); - /// assert_eq!(1, re.captures_len()); - /// - /// let re = Regex::new(r"(foo)").unwrap(); - /// assert_eq!(2, re.captures_len()); - /// - /// let re = Regex::new(r"(?<a>.(?<b>.))(.)(?:.)(?<c>.)").unwrap(); - /// assert_eq!(5, re.captures_len()); - /// - /// let re = Regex::new(r"[a&&b]").unwrap(); - /// assert_eq!(1, re.captures_len()); - /// ``` - #[inline] - pub fn captures_len(&self) -> usize { - self.meta.group_info().group_len(PatternID::ZERO) - } - - /// Returns the total number of capturing groups that appear in every - /// possible match. - /// - /// If the number of capture groups can vary depending on the match, then - /// this returns `None`. That is, a value is only returned when the number - /// of matching groups is invariant or "static." - /// - /// Note that like [`Regex::captures_len`], this **does** include the - /// implicit capturing group corresponding to the entire match. Therefore, - /// when a non-None value is returned, it is guaranteed to be at least `1`. - /// Stated differently, a return value of `Some(0)` is impossible. - /// - /// # Example - /// - /// This shows a few cases where a static number of capture groups is - /// available and a few cases where it is not. - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let len = |pattern| { - /// Regex::new(pattern).map(|re| re.static_captures_len()) - /// }; - /// - /// assert_eq!(Some(1), len("a")?); - /// assert_eq!(Some(2), len("(a)")?); - /// assert_eq!(Some(2), len("(a)|(b)")?); - /// assert_eq!(Some(3), len("(a)(b)|(c)(d)")?); - /// assert_eq!(None, len("(a)|b")?); - /// assert_eq!(None, len("a|(b)")?); - /// assert_eq!(None, len("(b)*")?); - /// assert_eq!(Some(2), len("(b)+")?); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - #[inline] - pub fn static_captures_len(&self) -> Option<usize> { - self.meta.static_captures_len() - } - - /// Returns a fresh allocated set of capture locations that can - /// be reused in multiple calls to [`Regex::captures_read`] or - /// [`Regex::captures_read_at`]. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"(.)(.)(\w+)").unwrap(); - /// let mut locs = re.capture_locations(); - /// assert!(re.captures_read(&mut locs, b"Padron").is_some()); - /// assert_eq!(locs.get(0), Some((0, 6))); - /// assert_eq!(locs.get(1), Some((0, 1))); - /// assert_eq!(locs.get(2), Some((1, 2))); - /// assert_eq!(locs.get(3), Some((2, 6))); - /// ``` - #[inline] - pub fn capture_locations(&self) -> CaptureLocations { - CaptureLocations(self.meta.create_captures()) - } - - /// An alias for `capture_locations` to preserve backward compatibility. - /// - /// The `regex-capi` crate uses this method, so to avoid breaking that - /// crate, we continue to export it as an undocumented API. - #[doc(hidden)] - #[inline] - pub fn locations(&self) -> CaptureLocations { - self.capture_locations() - } -} - -/// Represents a single match of a regex in a haystack. -/// -/// A `Match` contains both the start and end byte offsets of the match and the -/// actual substring corresponding to the range of those byte offsets. It is -/// guaranteed that `start <= end`. When `start == end`, the match is empty. -/// -/// Unlike the top-level `Match` type, this `Match` type is produced by APIs -/// that search `&[u8]` haystacks. This means that the offsets in a `Match` can -/// point to anywhere in the haystack, including in a place that splits the -/// UTF-8 encoding of a Unicode scalar value. -/// -/// The lifetime parameter `'h` refers to the lifetime of the matched of the -/// haystack that this match was produced from. -/// -/// # Numbering -/// -/// The byte offsets in a `Match` form a half-open interval. That is, the -/// start of the range is inclusive and the end of the range is exclusive. -/// For example, given a haystack `abcFOOxyz` and a match of `FOO`, its byte -/// offset range starts at `3` and ends at `6`. `3` corresponds to `F` and -/// `6` corresponds to `x`, which is one past the end of the match. This -/// corresponds to the same kind of slicing that Rust uses. -/// -/// For more on why this was chosen over other schemes (aside from being -/// consistent with how Rust the language works), see [this discussion] and -/// [Dijkstra's note on a related topic][note]. -/// -/// [this discussion]: https://github.com/rust-lang/regex/discussions/866 -/// [note]: https://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html -/// -/// # Example -/// -/// This example shows the value of each of the methods on `Match` for a -/// particular search. -/// -/// ``` -/// use regex::bytes::Regex; -/// -/// let re = Regex::new(r"\p{Greek}+").unwrap(); -/// let hay = "Greek: αβγδ".as_bytes(); -/// let m = re.find(hay).unwrap(); -/// assert_eq!(7, m.start()); -/// assert_eq!(15, m.end()); -/// assert!(!m.is_empty()); -/// assert_eq!(8, m.len()); -/// assert_eq!(7..15, m.range()); -/// assert_eq!("αβγδ".as_bytes(), m.as_bytes()); -/// ``` -#[derive(Copy, Clone, Eq, PartialEq)] -pub struct Match<'h> { - haystack: &'h [u8], - start: usize, - end: usize, -} - -impl<'h> Match<'h> { - /// Returns the byte offset of the start of the match in the haystack. The - /// start of the match corresponds to the position where the match begins - /// and includes the first byte in the match. - /// - /// It is guaranteed that `Match::start() <= Match::end()`. - /// - /// Unlike the top-level `Match` type, the start offset may appear anywhere - /// in the haystack. This includes between the code units of a UTF-8 - /// encoded Unicode scalar value. - #[inline] - pub fn start(&self) -> usize { - self.start - } - - /// Returns the byte offset of the end of the match in the haystack. The - /// end of the match corresponds to the byte immediately following the last - /// byte in the match. This means that `&slice[start..end]` works as one - /// would expect. - /// - /// It is guaranteed that `Match::start() <= Match::end()`. - /// - /// Unlike the top-level `Match` type, the start offset may appear anywhere - /// in the haystack. This includes between the code units of a UTF-8 - /// encoded Unicode scalar value. - #[inline] - pub fn end(&self) -> usize { - self.end - } - - /// Returns true if and only if this match has a length of zero. - /// - /// Note that an empty match can only occur when the regex itself can - /// match the empty string. Here are some examples of regexes that can - /// all match the empty string: `^`, `^$`, `\b`, `a?`, `a*`, `a{0}`, - /// `(foo|\d+|quux)?`. - #[inline] - pub fn is_empty(&self) -> bool { - self.start == self.end - } - - /// Returns the length, in bytes, of this match. - #[inline] - pub fn len(&self) -> usize { - self.end - self.start - } - - /// Returns the range over the starting and ending byte offsets of the - /// match in the haystack. - #[inline] - pub fn range(&self) -> core::ops::Range<usize> { - self.start..self.end - } - - /// Returns the substring of the haystack that matched. - #[inline] - pub fn as_bytes(&self) -> &'h [u8] { - &self.haystack[self.range()] - } - - /// Creates a new match from the given haystack and byte offsets. - #[inline] - fn new(haystack: &'h [u8], start: usize, end: usize) -> Match<'h> { - Match { haystack, start, end } - } -} - -impl<'h> core::fmt::Debug for Match<'h> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - use regex_automata::util::escape::DebugHaystack; - - let mut fmt = f.debug_struct("Match"); - fmt.field("start", &self.start) - .field("end", &self.end) - .field("bytes", &DebugHaystack(&self.as_bytes())); - - fmt.finish() - } -} - -impl<'h> From<Match<'h>> for &'h [u8] { - fn from(m: Match<'h>) -> &'h [u8] { - m.as_bytes() - } -} - -impl<'h> From<Match<'h>> for core::ops::Range<usize> { - fn from(m: Match<'h>) -> core::ops::Range<usize> { - m.range() - } -} - -/// Represents the capture groups for a single match. -/// -/// Capture groups refer to parts of a regex enclosed in parentheses. They -/// can be optionally named. The purpose of capture groups is to be able to -/// reference different parts of a match based on the original pattern. In -/// essence, a `Captures` is a container of [`Match`] values for each group -/// that participated in a regex match. Each `Match` can be looked up by either -/// its capture group index or name (if it has one). -/// -/// For example, say you want to match the individual letters in a 5-letter -/// word: -/// -/// ```text -/// (?<first>\w)(\w)(?:\w)\w(?<last>\w) -/// ``` -/// -/// This regex has 4 capture groups: -/// -/// * The group at index `0` corresponds to the overall match. It is always -/// present in every match and never has a name. -/// * The group at index `1` with name `first` corresponding to the first -/// letter. -/// * The group at index `2` with no name corresponding to the second letter. -/// * The group at index `3` with name `last` corresponding to the fifth and -/// last letter. -/// -/// Notice that `(?:\w)` was not listed above as a capture group despite it -/// being enclosed in parentheses. That's because `(?:pattern)` is a special -/// syntax that permits grouping but *without* capturing. The reason for not -/// treating it as a capture is that tracking and reporting capture groups -/// requires additional state that may lead to slower searches. So using as few -/// capture groups as possible can help performance. (Although the difference -/// in performance of a couple of capture groups is likely immaterial.) -/// -/// Values with this type are created by [`Regex::captures`] or -/// [`Regex::captures_iter`]. -/// -/// `'h` is the lifetime of the haystack that these captures were matched from. -/// -/// # Example -/// -/// ``` -/// use regex::bytes::Regex; -/// -/// let re = Regex::new(r"(?<first>\w)(\w)(?:\w)\w(?<last>\w)").unwrap(); -/// let caps = re.captures(b"toady").unwrap(); -/// assert_eq!(b"toady", &caps[0]); -/// assert_eq!(b"t", &caps["first"]); -/// assert_eq!(b"o", &caps[2]); -/// assert_eq!(b"y", &caps["last"]); -/// ``` -pub struct Captures<'h> { - haystack: &'h [u8], - caps: captures::Captures, - static_captures_len: Option<usize>, -} - -impl<'h> Captures<'h> { - /// Returns the `Match` associated with the capture group at index `i`. If - /// `i` does not correspond to a capture group, or if the capture group did - /// not participate in the match, then `None` is returned. - /// - /// When `i == 0`, this is guaranteed to return a non-`None` value. - /// - /// # Examples - /// - /// Get the substring that matched with a default of an empty string if the - /// group didn't participate in the match: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"[a-z]+(?:([0-9]+)|([A-Z]+))").unwrap(); - /// let caps = re.captures(b"abc123").unwrap(); - /// - /// let substr1 = caps.get(1).map_or(&b""[..], |m| m.as_bytes()); - /// let substr2 = caps.get(2).map_or(&b""[..], |m| m.as_bytes()); - /// assert_eq!(substr1, b"123"); - /// assert_eq!(substr2, b""); - /// ``` - #[inline] - pub fn get(&self, i: usize) -> Option<Match<'h>> { - self.caps - .get_group(i) - .map(|sp| Match::new(self.haystack, sp.start, sp.end)) - } - - /// Return the overall match for the capture. - /// - /// This returns the match for index `0`. That is it is equivalent to - /// `m.get(0).unwrap()` - /// - /// # Example - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"[a-z]+([0-9]+)").unwrap(); - /// let caps = re.captures(b" abc123-def").unwrap(); - /// - /// assert_eq!(caps.get_match().as_bytes(), b"abc123"); - /// ``` - #[inline] - pub fn get_match(&self) -> Match<'h> { - self.get(0).unwrap() - } - - /// Returns the `Match` associated with the capture group named `name`. If - /// `name` isn't a valid capture group or it refers to a group that didn't - /// match, then `None` is returned. - /// - /// Note that unlike `caps["name"]`, this returns a `Match` whose lifetime - /// matches the lifetime of the haystack in this `Captures` value. - /// Conversely, the substring returned by `caps["name"]` has a lifetime - /// of the `Captures` value, which is likely shorter than the lifetime of - /// the haystack. In some cases, it may be necessary to use this method to - /// access the matching substring instead of the `caps["name"]` notation. - /// - /// # Examples - /// - /// Get the substring that matched with a default of an empty string if the - /// group didn't participate in the match: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new( - /// r"[a-z]+(?:(?<numbers>[0-9]+)|(?<letters>[A-Z]+))", - /// ).unwrap(); - /// let caps = re.captures(b"abc123").unwrap(); - /// - /// let numbers = caps.name("numbers").map_or(&b""[..], |m| m.as_bytes()); - /// let letters = caps.name("letters").map_or(&b""[..], |m| m.as_bytes()); - /// assert_eq!(numbers, b"123"); - /// assert_eq!(letters, b""); - /// ``` - #[inline] - pub fn name(&self, name: &str) -> Option<Match<'h>> { - self.caps - .get_group_by_name(name) - .map(|sp| Match::new(self.haystack, sp.start, sp.end)) - } - - /// This is a convenience routine for extracting the substrings - /// corresponding to matching capture groups. - /// - /// This returns a tuple where the first element corresponds to the full - /// substring of the haystack that matched the regex. The second element is - /// an array of substrings, with each corresponding to the substring that - /// matched for a particular capture group. - /// - /// # Panics - /// - /// This panics if the number of possible matching groups in this - /// `Captures` value is not fixed to `N` in all circumstances. - /// More precisely, this routine only works when `N` is equivalent to - /// [`Regex::static_captures_len`]. - /// - /// Stated more plainly, if the number of matching capture groups in a - /// regex can vary from match to match, then this function always panics. - /// - /// For example, `(a)(b)|(c)` could produce two matching capture groups - /// or one matching capture group for any given match. Therefore, one - /// cannot use `extract` with such a pattern. - /// - /// But a pattern like `(a)(b)|(c)(d)` can be used with `extract` because - /// the number of capture groups in every match is always equivalent, - /// even if the capture _indices_ in each match are not. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap(); - /// let hay = b"On 2010-03-14, I became a Tennessee lamb."; - /// let Some((full, [year, month, day])) = - /// re.captures(hay).map(|caps| caps.extract()) else { return }; - /// assert_eq!(b"2010-03-14", full); - /// assert_eq!(b"2010", year); - /// assert_eq!(b"03", month); - /// assert_eq!(b"14", day); - /// ``` - /// - /// # Example: iteration - /// - /// This example shows how to use this method when iterating over all - /// `Captures` matches in a haystack. - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap(); - /// let hay = b"1973-01-05, 1975-08-25 and 1980-10-18"; - /// - /// let mut dates: Vec<(&[u8], &[u8], &[u8])> = vec![]; - /// for (_, [y, m, d]) in re.captures_iter(hay).map(|c| c.extract()) { - /// dates.push((y, m, d)); - /// } - /// assert_eq!(dates, vec![ - /// (&b"1973"[..], &b"01"[..], &b"05"[..]), - /// (&b"1975"[..], &b"08"[..], &b"25"[..]), - /// (&b"1980"[..], &b"10"[..], &b"18"[..]), - /// ]); - /// ``` - /// - /// # Example: parsing different formats - /// - /// This API is particularly useful when you need to extract a particular - /// value that might occur in a different format. Consider, for example, - /// an identifier that might be in double quotes or single quotes: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r#"id:(?:"([^"]+)"|'([^']+)')"#).unwrap(); - /// let hay = br#"The first is id:"foo" and the second is id:'bar'."#; - /// let mut ids = vec![]; - /// for (_, [id]) in re.captures_iter(hay).map(|c| c.extract()) { - /// ids.push(id); - /// } - /// assert_eq!(ids, vec![b"foo", b"bar"]); - /// ``` - pub fn extract<const N: usize>(&self) -> (&'h [u8], [&'h [u8]; N]) { - let len = self - .static_captures_len - .expect("number of capture groups can vary in a match") - .checked_sub(1) - .expect("number of groups is always greater than zero"); - assert_eq!(N, len, "asked for {N} groups, but must ask for {len}"); - // The regex-automata variant of extract is a bit more permissive. - // It doesn't require the number of matching capturing groups to be - // static, and you can even request fewer groups than what's there. So - // this is guaranteed to never panic because we've asserted above that - // the user has requested precisely the number of groups that must be - // present in any match for this regex. - self.caps.extract_bytes(self.haystack) - } - - /// Expands all instances of `$ref` in `replacement` to the corresponding - /// capture group, and writes them to the `dst` buffer given. A `ref` can - /// be a capture group index or a name. If `ref` doesn't refer to a capture - /// group that participated in the match, then it is replaced with the - /// empty string. - /// - /// # Format - /// - /// The format of the replacement string supports two different kinds of - /// capture references: unbraced and braced. - /// - /// For the unbraced format, the format supported is `$ref` where `name` - /// can be any character in the class `[0-9A-Za-z_]`. `ref` is always - /// the longest possible parse. So for example, `$1a` corresponds to the - /// capture group named `1a` and not the capture group at index `1`. If - /// `ref` matches `^[0-9]+$`, then it is treated as a capture group index - /// itself and not a name. - /// - /// For the braced format, the format supported is `${ref}` where `ref` can - /// be any sequence of bytes except for `}`. If no closing brace occurs, - /// then it is not considered a capture reference. As with the unbraced - /// format, if `ref` matches `^[0-9]+$`, then it is treated as a capture - /// group index and not a name. - /// - /// The braced format is useful for exerting precise control over the name - /// of the capture reference. For example, `${1}a` corresponds to the - /// capture group reference `1` followed by the letter `a`, where as `$1a` - /// (as mentioned above) corresponds to the capture group reference `1a`. - /// The braced format is also useful for expressing capture group names - /// that use characters not supported by the unbraced format. For example, - /// `${foo[bar].baz}` refers to the capture group named `foo[bar].baz`. - /// - /// If a capture group reference is found and it does not refer to a valid - /// capture group, then it will be replaced with the empty string. - /// - /// To write a literal `$`, use `$$`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new( - /// r"(?<day>[0-9]{2})-(?<month>[0-9]{2})-(?<year>[0-9]{4})", - /// ).unwrap(); - /// let hay = b"On 14-03-2010, I became a Tennessee lamb."; - /// let caps = re.captures(hay).unwrap(); - /// - /// let mut dst = vec![]; - /// caps.expand(b"year=$year, month=$month, day=$day", &mut dst); - /// assert_eq!(dst, b"year=2010, month=03, day=14"); - /// ``` - #[inline] - pub fn expand(&self, replacement: &[u8], dst: &mut Vec<u8>) { - self.caps.interpolate_bytes_into(self.haystack, replacement, dst); - } - - /// Returns an iterator over all capture groups. This includes both - /// matching and non-matching groups. - /// - /// The iterator always yields at least one matching group: the first group - /// (at index `0`) with no name. Subsequent groups are returned in the order - /// of their opening parenthesis in the regex. - /// - /// The elements yielded have type `Option<Match<'h>>`, where a non-`None` - /// value is present if the capture group matches. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"(\w)(\d)?(\w)").unwrap(); - /// let caps = re.captures(b"AZ").unwrap(); - /// - /// let mut it = caps.iter(); - /// assert_eq!(it.next().unwrap().map(|m| m.as_bytes()), Some(&b"AZ"[..])); - /// assert_eq!(it.next().unwrap().map(|m| m.as_bytes()), Some(&b"A"[..])); - /// assert_eq!(it.next().unwrap().map(|m| m.as_bytes()), None); - /// assert_eq!(it.next().unwrap().map(|m| m.as_bytes()), Some(&b"Z"[..])); - /// assert_eq!(it.next(), None); - /// ``` - #[inline] - pub fn iter<'c>(&'c self) -> SubCaptureMatches<'c, 'h> { - SubCaptureMatches { haystack: self.haystack, it: self.caps.iter() } - } - - /// Returns the total number of capture groups. This includes both - /// matching and non-matching groups. - /// - /// The length returned is always equivalent to the number of elements - /// yielded by [`Captures::iter`]. Consequently, the length is always - /// greater than zero since every `Captures` value always includes the - /// match for the entire regex. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"(\w)(\d)?(\w)").unwrap(); - /// let caps = re.captures(b"AZ").unwrap(); - /// assert_eq!(caps.len(), 4); - /// ``` - #[inline] - pub fn len(&self) -> usize { - self.caps.group_len() - } -} - -impl<'h> core::fmt::Debug for Captures<'h> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - /// A little helper type to provide a nice map-like debug - /// representation for our capturing group spans. - /// - /// regex-automata has something similar, but it includes the pattern - /// ID in its debug output, which is confusing. It also doesn't include - /// that strings that match because a regex-automata `Captures` doesn't - /// borrow the haystack. - struct CapturesDebugMap<'a> { - caps: &'a Captures<'a>, - } - - impl<'a> core::fmt::Debug for CapturesDebugMap<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let mut map = f.debug_map(); - let names = - self.caps.caps.group_info().pattern_names(PatternID::ZERO); - for (group_index, maybe_name) in names.enumerate() { - let key = Key(group_index, maybe_name); - match self.caps.get(group_index) { - None => map.entry(&key, &None::<()>), - Some(mat) => map.entry(&key, &Value(mat)), - }; - } - map.finish() - } - } - - struct Key<'a>(usize, Option<&'a str>); - - impl<'a> core::fmt::Debug for Key<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "{}", self.0)?; - if let Some(name) = self.1 { - write!(f, "/{name:?}")?; - } - Ok(()) - } - } - - struct Value<'a>(Match<'a>); - - impl<'a> core::fmt::Debug for Value<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - use regex_automata::util::escape::DebugHaystack; - - write!( - f, - "{}..{}/{:?}", - self.0.start(), - self.0.end(), - DebugHaystack(self.0.as_bytes()) - ) - } - } - - f.debug_tuple("Captures") - .field(&CapturesDebugMap { caps: self }) - .finish() - } -} - -/// Get a matching capture group's haystack substring by index. -/// -/// The haystack substring returned can't outlive the `Captures` object if this -/// method is used, because of how `Index` is defined (normally `a[i]` is part -/// of `a` and can't outlive it). To work around this limitation, do that, use -/// [`Captures::get`] instead. -/// -/// `'h` is the lifetime of the matched haystack, but the lifetime of the -/// `&str` returned by this implementation is the lifetime of the `Captures` -/// value itself. -/// -/// # Panics -/// -/// If there is no matching group at the given index. -impl<'h> core::ops::Index<usize> for Captures<'h> { - type Output = [u8]; - - // The lifetime is written out to make it clear that the &str returned - // does NOT have a lifetime equivalent to 'h. - fn index<'a>(&'a self, i: usize) -> &'a [u8] { - self.get(i) - .map(|m| m.as_bytes()) - .unwrap_or_else(|| panic!("no group at index '{i}'")) - } -} - -/// Get a matching capture group's haystack substring by name. -/// -/// The haystack substring returned can't outlive the `Captures` object if this -/// method is used, because of how `Index` is defined (normally `a[i]` is part -/// of `a` and can't outlive it). To work around this limitation, do that, use -/// [`Captures::name`] instead. -/// -/// `'h` is the lifetime of the matched haystack, but the lifetime of the -/// `&str` returned by this implementation is the lifetime of the `Captures` -/// value itself. -/// -/// `'n` is the lifetime of the group name used to index the `Captures` value. -/// -/// # Panics -/// -/// If there is no matching group at the given name. -impl<'h, 'n> core::ops::Index<&'n str> for Captures<'h> { - type Output = [u8]; - - fn index<'a>(&'a self, name: &'n str) -> &'a [u8] { - self.name(name) - .map(|m| m.as_bytes()) - .unwrap_or_else(|| panic!("no group named '{name}'")) - } -} - -/// A low level representation of the byte offsets of each capture group. -/// -/// You can think of this as a lower level [`Captures`], where this type does -/// not support named capturing groups directly and it does not borrow the -/// haystack that these offsets were matched on. -/// -/// Primarily, this type is useful when using the lower level `Regex` APIs such -/// as [`Regex::captures_read`], which permits amortizing the allocation in -/// which capture match offsets are stored. -/// -/// In order to build a value of this type, you'll need to call the -/// [`Regex::capture_locations`] method. The value returned can then be reused -/// in subsequent searches for that regex. Using it for other regexes may -/// result in a panic or otherwise incorrect results. -/// -/// # Example -/// -/// This example shows how to create and use `CaptureLocations` in a search. -/// -/// ``` -/// use regex::bytes::Regex; -/// -/// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap(); -/// let mut locs = re.capture_locations(); -/// let m = re.captures_read(&mut locs, b"Bruce Springsteen").unwrap(); -/// assert_eq!(0..17, m.range()); -/// assert_eq!(Some((0, 17)), locs.get(0)); -/// assert_eq!(Some((0, 5)), locs.get(1)); -/// assert_eq!(Some((6, 17)), locs.get(2)); -/// -/// // Asking for an invalid capture group always returns None. -/// assert_eq!(None, locs.get(3)); -/// # // literals are too big for 32-bit usize: #1041 -/// # #[cfg(target_pointer_width = "64")] -/// assert_eq!(None, locs.get(34973498648)); -/// # #[cfg(target_pointer_width = "64")] -/// assert_eq!(None, locs.get(9944060567225171988)); -/// ``` -#[derive(Clone, Debug)] -pub struct CaptureLocations(captures::Captures); - -/// A type alias for `CaptureLocations` for backwards compatibility. -/// -/// Previously, we exported `CaptureLocations` as `Locations` in an -/// undocumented API. To prevent breaking that code (e.g., in `regex-capi`), -/// we continue re-exporting the same undocumented API. -#[doc(hidden)] -pub type Locations = CaptureLocations; - -impl CaptureLocations { - /// Returns the start and end byte offsets of the capture group at index - /// `i`. This returns `None` if `i` is not a valid capture group or if the - /// capture group did not match. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap(); - /// let mut locs = re.capture_locations(); - /// re.captures_read(&mut locs, b"Bruce Springsteen").unwrap(); - /// assert_eq!(Some((0, 17)), locs.get(0)); - /// assert_eq!(Some((0, 5)), locs.get(1)); - /// assert_eq!(Some((6, 17)), locs.get(2)); - /// ``` - #[inline] - pub fn get(&self, i: usize) -> Option<(usize, usize)> { - self.0.get_group(i).map(|sp| (sp.start, sp.end)) - } - - /// Returns the total number of capture groups (even if they didn't match). - /// That is, the length returned is unaffected by the result of a search. - /// - /// This is always at least `1` since every regex has at least `1` - /// capturing group that corresponds to the entire match. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap(); - /// let mut locs = re.capture_locations(); - /// assert_eq!(3, locs.len()); - /// re.captures_read(&mut locs, b"Bruce Springsteen").unwrap(); - /// assert_eq!(3, locs.len()); - /// ``` - /// - /// Notice that the length is always at least `1`, regardless of the regex: - /// - /// ``` - /// use regex::bytes::Regex; - /// - /// let re = Regex::new(r"").unwrap(); - /// let locs = re.capture_locations(); - /// assert_eq!(1, locs.len()); - /// - /// // [a&&b] is a regex that never matches anything. - /// let re = Regex::new(r"[a&&b]").unwrap(); - /// let locs = re.capture_locations(); - /// assert_eq!(1, locs.len()); - /// ``` - #[inline] - pub fn len(&self) -> usize { - // self.0.group_len() returns 0 if the underlying captures doesn't - // represent a match, but the behavior guaranteed for this method is - // that the length doesn't change based on a match or not. - self.0.group_info().group_len(PatternID::ZERO) - } - - /// An alias for the `get` method for backwards compatibility. - /// - /// Previously, we exported `get` as `pos` in an undocumented API. To - /// prevent breaking that code (e.g., in `regex-capi`), we continue - /// re-exporting the same undocumented API. - #[doc(hidden)] - #[inline] - pub fn pos(&self, i: usize) -> Option<(usize, usize)> { - self.get(i) - } -} - -/// An iterator over all non-overlapping matches in a haystack. -/// -/// This iterator yields [`Match`] values. The iterator stops when no more -/// matches can be found. -/// -/// `'r` is the lifetime of the compiled regular expression and `'h` is the -/// lifetime of the haystack. -/// -/// This iterator is created by [`Regex::find_iter`]. -/// -/// # Time complexity -/// -/// Note that since an iterator runs potentially many searches on the haystack -/// and since each search has worst case `O(m * n)` time complexity, the -/// overall worst case time complexity for iteration is `O(m * n^2)`. -#[derive(Debug)] -pub struct Matches<'r, 'h> { - haystack: &'h [u8], - it: meta::FindMatches<'r, 'h>, -} - -impl<'r, 'h> Iterator for Matches<'r, 'h> { - type Item = Match<'h>; - - #[inline] - fn next(&mut self) -> Option<Match<'h>> { - self.it - .next() - .map(|sp| Match::new(self.haystack, sp.start(), sp.end())) - } - - #[inline] - fn count(self) -> usize { - // This can actually be up to 2x faster than calling `next()` until - // completion, because counting matches when using a DFA only requires - // finding the end of each match. But returning a `Match` via `next()` - // requires the start of each match which, with a DFA, requires a - // reverse forward scan to find it. - self.it.count() - } -} - -impl<'r, 'h> core::iter::FusedIterator for Matches<'r, 'h> {} - -/// An iterator over all non-overlapping capture matches in a haystack. -/// -/// This iterator yields [`Captures`] values. The iterator stops when no more -/// matches can be found. -/// -/// `'r` is the lifetime of the compiled regular expression and `'h` is the -/// lifetime of the matched string. -/// -/// This iterator is created by [`Regex::captures_iter`]. -/// -/// # Time complexity -/// -/// Note that since an iterator runs potentially many searches on the haystack -/// and since each search has worst case `O(m * n)` time complexity, the -/// overall worst case time complexity for iteration is `O(m * n^2)`. -#[derive(Debug)] -pub struct CaptureMatches<'r, 'h> { - haystack: &'h [u8], - it: meta::CapturesMatches<'r, 'h>, -} - -impl<'r, 'h> Iterator for CaptureMatches<'r, 'h> { - type Item = Captures<'h>; - - #[inline] - fn next(&mut self) -> Option<Captures<'h>> { - let static_captures_len = self.it.regex().static_captures_len(); - self.it.next().map(|caps| Captures { - haystack: self.haystack, - caps, - static_captures_len, - }) - } - - #[inline] - fn count(self) -> usize { - // This can actually be up to 2x faster than calling `next()` until - // completion, because counting matches when using a DFA only requires - // finding the end of each match. But returning a `Match` via `next()` - // requires the start of each match which, with a DFA, requires a - // reverse forward scan to find it. - self.it.count() - } -} - -impl<'r, 'h> core::iter::FusedIterator for CaptureMatches<'r, 'h> {} - -/// An iterator over all substrings delimited by a regex match. -/// -/// `'r` is the lifetime of the compiled regular expression and `'h` is the -/// lifetime of the byte string being split. -/// -/// This iterator is created by [`Regex::split`]. -/// -/// # Time complexity -/// -/// Note that since an iterator runs potentially many searches on the haystack -/// and since each search has worst case `O(m * n)` time complexity, the -/// overall worst case time complexity for iteration is `O(m * n^2)`. -#[derive(Debug)] -pub struct Split<'r, 'h> { - haystack: &'h [u8], - it: meta::Split<'r, 'h>, -} - -impl<'r, 'h> Iterator for Split<'r, 'h> { - type Item = &'h [u8]; - - #[inline] - fn next(&mut self) -> Option<&'h [u8]> { - self.it.next().map(|span| &self.haystack[span]) - } -} - -impl<'r, 'h> core::iter::FusedIterator for Split<'r, 'h> {} - -/// An iterator over at most `N` substrings delimited by a regex match. -/// -/// The last substring yielded by this iterator will be whatever remains after -/// `N-1` splits. -/// -/// `'r` is the lifetime of the compiled regular expression and `'h` is the -/// lifetime of the byte string being split. -/// -/// This iterator is created by [`Regex::splitn`]. -/// -/// # Time complexity -/// -/// Note that since an iterator runs potentially many searches on the haystack -/// and since each search has worst case `O(m * n)` time complexity, the -/// overall worst case time complexity for iteration is `O(m * n^2)`. -/// -/// Although note that the worst case time here has an upper bound given -/// by the `limit` parameter to [`Regex::splitn`]. -#[derive(Debug)] -pub struct SplitN<'r, 'h> { - haystack: &'h [u8], - it: meta::SplitN<'r, 'h>, -} - -impl<'r, 'h> Iterator for SplitN<'r, 'h> { - type Item = &'h [u8]; - - #[inline] - fn next(&mut self) -> Option<&'h [u8]> { - self.it.next().map(|span| &self.haystack[span]) - } - - #[inline] - fn size_hint(&self) -> (usize, Option<usize>) { - self.it.size_hint() - } -} - -impl<'r, 'h> core::iter::FusedIterator for SplitN<'r, 'h> {} - -/// An iterator over the names of all capture groups in a regex. -/// -/// This iterator yields values of type `Option<&str>` in order of the opening -/// capture group parenthesis in the regex pattern. `None` is yielded for -/// groups with no name. The first element always corresponds to the implicit -/// and unnamed group for the overall match. -/// -/// `'r` is the lifetime of the compiled regular expression. -/// -/// This iterator is created by [`Regex::capture_names`]. -#[derive(Clone, Debug)] -pub struct CaptureNames<'r>(captures::GroupInfoPatternNames<'r>); - -impl<'r> Iterator for CaptureNames<'r> { - type Item = Option<&'r str>; - - #[inline] - fn next(&mut self) -> Option<Option<&'r str>> { - self.0.next() - } - - #[inline] - fn size_hint(&self) -> (usize, Option<usize>) { - self.0.size_hint() - } - - #[inline] - fn count(self) -> usize { - self.0.count() - } -} - -impl<'r> ExactSizeIterator for CaptureNames<'r> {} - -impl<'r> core::iter::FusedIterator for CaptureNames<'r> {} - -/// An iterator over all group matches in a [`Captures`] value. -/// -/// This iterator yields values of type `Option<Match<'h>>`, where `'h` is the -/// lifetime of the haystack that the matches are for. The order of elements -/// yielded corresponds to the order of the opening parenthesis for the group -/// in the regex pattern. `None` is yielded for groups that did not participate -/// in the match. -/// -/// The first element always corresponds to the implicit group for the overall -/// match. Since this iterator is created by a [`Captures`] value, and a -/// `Captures` value is only created when a match occurs, it follows that the -/// first element yielded by this iterator is guaranteed to be non-`None`. -/// -/// The lifetime `'c` corresponds to the lifetime of the `Captures` value that -/// created this iterator, and the lifetime `'h` corresponds to the originally -/// matched haystack. -#[derive(Clone, Debug)] -pub struct SubCaptureMatches<'c, 'h> { - haystack: &'h [u8], - it: captures::CapturesPatternIter<'c>, -} - -impl<'c, 'h> Iterator for SubCaptureMatches<'c, 'h> { - type Item = Option<Match<'h>>; - - #[inline] - fn next(&mut self) -> Option<Option<Match<'h>>> { - self.it.next().map(|group| { - group.map(|sp| Match::new(self.haystack, sp.start, sp.end)) - }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option<usize>) { - self.it.size_hint() - } - - #[inline] - fn count(self) -> usize { - self.it.count() - } -} - -impl<'c, 'h> ExactSizeIterator for SubCaptureMatches<'c, 'h> {} - -impl<'c, 'h> core::iter::FusedIterator for SubCaptureMatches<'c, 'h> {} - -/// A trait for types that can be used to replace matches in a haystack. -/// -/// In general, users of this crate shouldn't need to implement this trait, -/// since implementations are already provided for `&[u8]` along with other -/// variants of byte string types, as well as `FnMut(&Captures) -> Vec<u8>` (or -/// any `FnMut(&Captures) -> T` where `T: AsRef<[u8]>`). Those cover most use -/// cases, but callers can implement this trait directly if necessary. -/// -/// # Example -/// -/// This example shows a basic implementation of the `Replacer` trait. This can -/// be done much more simply using the replacement byte string interpolation -/// support (e.g., `$first $last`), but this approach avoids needing to parse -/// the replacement byte string at all. -/// -/// ``` -/// use regex::bytes::{Captures, Regex, Replacer}; -/// -/// struct NameSwapper; -/// -/// impl Replacer for NameSwapper { -/// fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { -/// dst.extend_from_slice(&caps["first"]); -/// dst.extend_from_slice(b" "); -/// dst.extend_from_slice(&caps["last"]); -/// } -/// } -/// -/// let re = Regex::new(r"(?<last>[^,\s]+),\s+(?<first>\S+)").unwrap(); -/// let result = re.replace(b"Springsteen, Bruce", NameSwapper); -/// assert_eq!(result, &b"Bruce Springsteen"[..]); -/// ``` -pub trait Replacer { - /// Appends possibly empty data to `dst` to replace the current match. - /// - /// The current match is represented by `caps`, which is guaranteed to have - /// a match at capture group `0`. - /// - /// For example, a no-op replacement would be - /// `dst.extend_from_slice(&caps[0])`. - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>); - - /// Return a fixed unchanging replacement byte string. - /// - /// When doing replacements, if access to [`Captures`] is not needed (e.g., - /// the replacement byte string does not need `$` expansion), then it can - /// be beneficial to avoid finding sub-captures. - /// - /// In general, this is called once for every call to a replacement routine - /// such as [`Regex::replace_all`]. - fn no_expansion<'r>(&'r mut self) -> Option<Cow<'r, [u8]>> { - None - } - - /// Returns a type that implements `Replacer`, but that borrows and wraps - /// this `Replacer`. - /// - /// This is useful when you want to take a generic `Replacer` (which might - /// not be cloneable) and use it without consuming it, so it can be used - /// more than once. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::{Regex, Replacer}; - /// - /// fn replace_all_twice<R: Replacer>( - /// re: Regex, - /// src: &[u8], - /// mut rep: R, - /// ) -> Vec<u8> { - /// let dst = re.replace_all(src, rep.by_ref()); - /// let dst = re.replace_all(&dst, rep.by_ref()); - /// dst.into_owned() - /// } - /// ``` - fn by_ref<'r>(&'r mut self) -> ReplacerRef<'r, Self> { - ReplacerRef(self) - } -} - -impl<'a, const N: usize> Replacer for &'a [u8; N] { - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { - caps.expand(&**self, dst); - } - - fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { - no_expansion(self) - } -} - -impl<const N: usize> Replacer for [u8; N] { - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { - caps.expand(&*self, dst); - } - - fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { - no_expansion(self) - } -} - -impl<'a> Replacer for &'a [u8] { - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { - caps.expand(*self, dst); - } - - fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { - no_expansion(self) - } -} - -impl<'a> Replacer for &'a Vec<u8> { - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { - caps.expand(*self, dst); - } - - fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { - no_expansion(self) - } -} - -impl Replacer for Vec<u8> { - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { - caps.expand(self, dst); - } - - fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { - no_expansion(self) - } -} - -impl<'a> Replacer for Cow<'a, [u8]> { - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { - caps.expand(self.as_ref(), dst); - } - - fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { - no_expansion(self) - } -} - -impl<'a> Replacer for &'a Cow<'a, [u8]> { - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { - caps.expand(self.as_ref(), dst); - } - - fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { - no_expansion(self) - } -} - -impl<F, T> Replacer for F -where - F: FnMut(&Captures<'_>) -> T, - T: AsRef<[u8]>, -{ - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { - dst.extend_from_slice((*self)(caps).as_ref()); - } -} - -/// A by-reference adaptor for a [`Replacer`]. -/// -/// This permits reusing the same `Replacer` value in multiple calls to a -/// replacement routine like [`Regex::replace_all`]. -/// -/// This type is created by [`Replacer::by_ref`]. -#[derive(Debug)] -pub struct ReplacerRef<'a, R: ?Sized>(&'a mut R); - -impl<'a, R: Replacer + ?Sized + 'a> Replacer for ReplacerRef<'a, R> { - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut Vec<u8>) { - self.0.replace_append(caps, dst) - } - - fn no_expansion<'r>(&'r mut self) -> Option<Cow<'r, [u8]>> { - self.0.no_expansion() - } -} - -/// A helper type for forcing literal string replacement. -/// -/// It can be used with routines like [`Regex::replace`] and -/// [`Regex::replace_all`] to do a literal string replacement without expanding -/// `$name` to their corresponding capture groups. This can be both convenient -/// (to avoid escaping `$`, for example) and faster (since capture groups -/// don't need to be found). -/// -/// `'s` is the lifetime of the literal string to use. -/// -/// # Example -/// -/// ``` -/// use regex::bytes::{NoExpand, Regex}; -/// -/// let re = Regex::new(r"(?<last>[^,\s]+),\s+(\S+)").unwrap(); -/// let result = re.replace(b"Springsteen, Bruce", NoExpand(b"$2 $last")); -/// assert_eq!(result, &b"$2 $last"[..]); -/// ``` -#[derive(Clone, Debug)] -pub struct NoExpand<'s>(pub &'s [u8]); - -impl<'s> Replacer for NoExpand<'s> { - fn replace_append(&mut self, _: &Captures<'_>, dst: &mut Vec<u8>) { - dst.extend_from_slice(self.0); - } - - fn no_expansion(&mut self) -> Option<Cow<'_, [u8]>> { - Some(Cow::Borrowed(self.0)) - } -} - -/// Quickly checks the given replacement string for whether interpolation -/// should be done on it. It returns `None` if a `$` was found anywhere in the -/// given string, which suggests interpolation needs to be done. But if there's -/// no `$` anywhere, then interpolation definitely does not need to be done. In -/// that case, the given string is returned as a borrowed `Cow`. -/// -/// This is meant to be used to implement the `Replacer::no_expansion` method -/// in its various trait impls. -fn no_expansion<T: AsRef<[u8]>>(replacement: &T) -> Option<Cow<'_, [u8]>> { - let replacement = replacement.as_ref(); - match crate::find_byte::find_byte(b'$', replacement) { - Some(_) => None, - None => Some(Cow::Borrowed(replacement)), - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloc::format; - - #[test] - fn test_match_properties() { - let haystack = b"Hello, world!"; - let m = Match::new(haystack, 7, 12); - - assert_eq!(m.start(), 7); - assert_eq!(m.end(), 12); - assert_eq!(m.is_empty(), false); - assert_eq!(m.len(), 5); - assert_eq!(m.as_bytes(), b"world"); - } - - #[test] - fn test_empty_match() { - let haystack = b""; - let m = Match::new(haystack, 0, 0); - - assert_eq!(m.is_empty(), true); - assert_eq!(m.len(), 0); - } - - #[test] - fn test_debug_output_valid_utf8() { - let haystack = b"Hello, world!"; - let m = Match::new(haystack, 7, 12); - let debug_str = format!("{m:?}"); - - assert_eq!( - debug_str, - r#"Match { start: 7, end: 12, bytes: "world" }"# - ); - } - - #[test] - fn test_debug_output_invalid_utf8() { - let haystack = b"Hello, \xFFworld!"; - let m = Match::new(haystack, 7, 13); - let debug_str = format!("{m:?}"); - - assert_eq!( - debug_str, - r#"Match { start: 7, end: 13, bytes: "\xffworld" }"# - ); - } - - #[test] - fn test_debug_output_various_unicode() { - let haystack = - "Hello, 😊 world! 안녕하세요? مرحبا بالعالم!".as_bytes(); - let m = Match::new(haystack, 0, haystack.len()); - let debug_str = format!("{m:?}"); - - assert_eq!( - debug_str, - r#"Match { start: 0, end: 62, bytes: "Hello, 😊 world! 안녕하세요? مرحبا بالعالم!" }"# - ); - } - - #[test] - fn test_debug_output_ascii_escape() { - let haystack = b"Hello,\tworld!\nThis is a \x1b[31mtest\x1b[0m."; - let m = Match::new(haystack, 0, haystack.len()); - let debug_str = format!("{m:?}"); - - assert_eq!( - debug_str, - r#"Match { start: 0, end: 38, bytes: "Hello,\tworld!\nThis is a \u{1b}[31mtest\u{1b}[0m." }"# - ); - } - - #[test] - fn test_debug_output_match_in_middle() { - let haystack = b"The quick brown fox jumps over the lazy dog."; - let m = Match::new(haystack, 16, 19); - let debug_str = format!("{m:?}"); - - assert_eq!(debug_str, r#"Match { start: 16, end: 19, bytes: "fox" }"#); - } -} diff --git a/vendor/regex/src/regex/mod.rs b/vendor/regex/src/regex/mod.rs deleted file mode 100644 index 93fadec8bf65c9..00000000000000 --- a/vendor/regex/src/regex/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub(crate) mod bytes; -pub(crate) mod string; diff --git a/vendor/regex/src/regex/string.rs b/vendor/regex/src/regex/string.rs deleted file mode 100644 index e066d7630cf0e4..00000000000000 --- a/vendor/regex/src/regex/string.rs +++ /dev/null @@ -1,2625 +0,0 @@ -use alloc::{borrow::Cow, string::String, sync::Arc}; - -use regex_automata::{meta, util::captures, Input, PatternID}; - -use crate::{error::Error, RegexBuilder}; - -/// A compiled regular expression for searching Unicode haystacks. -/// -/// A `Regex` can be used to search haystacks, split haystacks into substrings -/// or replace substrings in a haystack with a different substring. All -/// searching is done with an implicit `(?s:.)*?` at the beginning and end of -/// an pattern. To force an expression to match the whole string (or a prefix -/// or a suffix), you must use an anchor like `^` or `$` (or `\A` and `\z`). -/// -/// While this crate will handle Unicode strings (whether in the regular -/// expression or in the haystack), all positions returned are **byte -/// offsets**. Every byte offset is guaranteed to be at a Unicode code point -/// boundary. That is, all offsets returned by the `Regex` API are guaranteed -/// to be ranges that can slice a `&str` without panicking. If you want to -/// relax this requirement, then you must search `&[u8]` haystacks with a -/// [`bytes::Regex`](crate::bytes::Regex). -/// -/// The only methods that allocate new strings are the string replacement -/// methods. All other methods (searching and splitting) return borrowed -/// references into the haystack given. -/// -/// # Example -/// -/// Find the offsets of a US phone number: -/// -/// ``` -/// use regex::Regex; -/// -/// let re = Regex::new("[0-9]{3}-[0-9]{3}-[0-9]{4}").unwrap(); -/// let m = re.find("phone: 111-222-3333").unwrap(); -/// assert_eq!(7..19, m.range()); -/// ``` -/// -/// # Example: extracting capture groups -/// -/// A common way to use regexes is with capture groups. That is, instead of -/// just looking for matches of an entire regex, parentheses are used to create -/// groups that represent part of the match. -/// -/// For example, consider a haystack with multiple lines, and each line has -/// three whitespace delimited fields where the second field is expected to be -/// a number and the third field a boolean. To make this convenient, we use -/// the [`Captures::extract`] API to put the strings that match each group -/// into a fixed size array: -/// -/// ``` -/// use regex::Regex; -/// -/// let hay = " -/// rabbit 54 true -/// groundhog 2 true -/// does not match -/// fox 109 false -/// "; -/// let re = Regex::new(r"(?m)^\s*(\S+)\s+([0-9]+)\s+(true|false)\s*$").unwrap(); -/// let mut fields: Vec<(&str, i64, bool)> = vec![]; -/// for (_, [f1, f2, f3]) in re.captures_iter(hay).map(|caps| caps.extract()) { -/// fields.push((f1, f2.parse()?, f3.parse()?)); -/// } -/// assert_eq!(fields, vec![ -/// ("rabbit", 54, true), -/// ("groundhog", 2, true), -/// ("fox", 109, false), -/// ]); -/// -/// # Ok::<(), Box<dyn std::error::Error>>(()) -/// ``` -/// -/// # Example: searching with the `Pattern` trait -/// -/// **Note**: This section requires that this crate is compiled with the -/// `pattern` Cargo feature enabled, which **requires nightly Rust**. -/// -/// Since `Regex` implements `Pattern` from the standard library, one can -/// use regexes with methods defined on `&str`. For example, `is_match`, -/// `find`, `find_iter` and `split` can, in some cases, be replaced with -/// `str::contains`, `str::find`, `str::match_indices` and `str::split`. -/// -/// Here are some examples: -/// -/// ```ignore -/// use regex::Regex; -/// -/// let re = Regex::new(r"\d+").unwrap(); -/// let hay = "a111b222c"; -/// -/// assert!(hay.contains(&re)); -/// assert_eq!(hay.find(&re), Some(1)); -/// assert_eq!(hay.match_indices(&re).collect::<Vec<_>>(), vec![ -/// (1, "111"), -/// (5, "222"), -/// ]); -/// assert_eq!(hay.split(&re).collect::<Vec<_>>(), vec!["a", "b", "c"]); -/// ``` -#[derive(Clone)] -pub struct Regex { - pub(crate) meta: meta::Regex, - pub(crate) pattern: Arc<str>, -} - -impl core::fmt::Display for Regex { - /// Shows the original regular expression. - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "{}", self.as_str()) - } -} - -impl core::fmt::Debug for Regex { - /// Shows the original regular expression. - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_tuple("Regex").field(&self.as_str()).finish() - } -} - -impl core::str::FromStr for Regex { - type Err = Error; - - /// Attempts to parse a string into a regular expression - fn from_str(s: &str) -> Result<Regex, Error> { - Regex::new(s) - } -} - -impl TryFrom<&str> for Regex { - type Error = Error; - - /// Attempts to parse a string into a regular expression - fn try_from(s: &str) -> Result<Regex, Error> { - Regex::new(s) - } -} - -impl TryFrom<String> for Regex { - type Error = Error; - - /// Attempts to parse a string into a regular expression - fn try_from(s: String) -> Result<Regex, Error> { - Regex::new(&s) - } -} - -/// Core regular expression methods. -impl Regex { - /// Compiles a regular expression. Once compiled, it can be used repeatedly - /// to search, split or replace substrings in a haystack. - /// - /// Note that regex compilation tends to be a somewhat expensive process, - /// and unlike higher level environments, compilation is not automatically - /// cached for you. One should endeavor to compile a regex once and then - /// reuse it. For example, it's a bad idea to compile the same regex - /// repeatedly in a loop. - /// - /// # Errors - /// - /// If an invalid pattern is given, then an error is returned. - /// An error is also returned if the pattern is valid, but would - /// produce a regex that is bigger than the configured size limit via - /// [`RegexBuilder::size_limit`]. (A reasonable size limit is enabled by - /// default.) - /// - /// # Example - /// - /// ``` - /// use regex::Regex; - /// - /// // An Invalid pattern because of an unclosed parenthesis - /// assert!(Regex::new(r"foo(bar").is_err()); - /// // An invalid pattern because the regex would be too big - /// // because Unicode tends to inflate things. - /// assert!(Regex::new(r"\w{1000}").is_err()); - /// // Disabling Unicode can make the regex much smaller, - /// // potentially by up to or more than an order of magnitude. - /// assert!(Regex::new(r"(?-u:\w){1000}").is_ok()); - /// ``` - pub fn new(re: &str) -> Result<Regex, Error> { - RegexBuilder::new(re).build() - } - - /// Returns true if and only if there is a match for the regex anywhere - /// in the haystack given. - /// - /// It is recommended to use this method if all you need to do is test - /// whether a match exists, since the underlying matching engine may be - /// able to do less work. - /// - /// # Example - /// - /// Test if some haystack contains at least one word with exactly 13 - /// Unicode word characters: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"\b\w{13}\b").unwrap(); - /// let hay = "I categorically deny having triskaidekaphobia."; - /// assert!(re.is_match(hay)); - /// ``` - #[inline] - pub fn is_match(&self, haystack: &str) -> bool { - self.is_match_at(haystack, 0) - } - - /// This routine searches for the first match of this regex in the - /// haystack given, and if found, returns a [`Match`]. The `Match` - /// provides access to both the byte offsets of the match and the actual - /// substring that matched. - /// - /// Note that this should only be used if you want to find the entire - /// match. If instead you just want to test the existence of a match, - /// it's potentially faster to use `Regex::is_match(hay)` instead of - /// `Regex::find(hay).is_some()`. - /// - /// # Example - /// - /// Find the first word with exactly 13 Unicode word characters: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"\b\w{13}\b").unwrap(); - /// let hay = "I categorically deny having triskaidekaphobia."; - /// let mat = re.find(hay).unwrap(); - /// assert_eq!(2..15, mat.range()); - /// assert_eq!("categorically", mat.as_str()); - /// ``` - #[inline] - pub fn find<'h>(&self, haystack: &'h str) -> Option<Match<'h>> { - self.find_at(haystack, 0) - } - - /// Returns an iterator that yields successive non-overlapping matches in - /// the given haystack. The iterator yields values of type [`Match`]. - /// - /// # Time complexity - /// - /// Note that since `find_iter` runs potentially many searches on the - /// haystack and since each search has worst case `O(m * n)` time - /// complexity, the overall worst case time complexity for iteration is - /// `O(m * n^2)`. - /// - /// # Example - /// - /// Find every word with exactly 13 Unicode word characters: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"\b\w{13}\b").unwrap(); - /// let hay = "Retroactively relinquishing remunerations is reprehensible."; - /// let matches: Vec<_> = re.find_iter(hay).map(|m| m.as_str()).collect(); - /// assert_eq!(matches, vec![ - /// "Retroactively", - /// "relinquishing", - /// "remunerations", - /// "reprehensible", - /// ]); - /// ``` - #[inline] - pub fn find_iter<'r, 'h>(&'r self, haystack: &'h str) -> Matches<'r, 'h> { - Matches { haystack, it: self.meta.find_iter(haystack) } - } - - /// This routine searches for the first match of this regex in the haystack - /// given, and if found, returns not only the overall match but also the - /// matches of each capture group in the regex. If no match is found, then - /// `None` is returned. - /// - /// Capture group `0` always corresponds to an implicit unnamed group that - /// includes the entire match. If a match is found, this group is always - /// present. Subsequent groups may be named and are numbered, starting - /// at 1, by the order in which the opening parenthesis appears in the - /// pattern. For example, in the pattern `(?<a>.(?<b>.))(?<c>.)`, `a`, - /// `b` and `c` correspond to capture group indices `1`, `2` and `3`, - /// respectively. - /// - /// You should only use `captures` if you need access to the capture group - /// matches. Otherwise, [`Regex::find`] is generally faster for discovering - /// just the overall match. - /// - /// # Example - /// - /// Say you have some haystack with movie names and their release years, - /// like "'Citizen Kane' (1941)". It'd be nice if we could search for - /// substrings looking like that, while also extracting the movie name and - /// its release year separately. The example below shows how to do that. - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"'([^']+)'\s+\((\d{4})\)").unwrap(); - /// let hay = "Not my favorite movie: 'Citizen Kane' (1941)."; - /// let caps = re.captures(hay).unwrap(); - /// assert_eq!(caps.get(0).unwrap().as_str(), "'Citizen Kane' (1941)"); - /// assert_eq!(caps.get(1).unwrap().as_str(), "Citizen Kane"); - /// assert_eq!(caps.get(2).unwrap().as_str(), "1941"); - /// // You can also access the groups by index using the Index notation. - /// // Note that this will panic on an invalid index. In this case, these - /// // accesses are always correct because the overall regex will only - /// // match when these capture groups match. - /// assert_eq!(&caps[0], "'Citizen Kane' (1941)"); - /// assert_eq!(&caps[1], "Citizen Kane"); - /// assert_eq!(&caps[2], "1941"); - /// ``` - /// - /// Note that the full match is at capture group `0`. Each subsequent - /// capture group is indexed by the order of its opening `(`. - /// - /// We can make this example a bit clearer by using *named* capture groups: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"'(?<title>[^']+)'\s+\((?<year>\d{4})\)").unwrap(); - /// let hay = "Not my favorite movie: 'Citizen Kane' (1941)."; - /// let caps = re.captures(hay).unwrap(); - /// assert_eq!(caps.get(0).unwrap().as_str(), "'Citizen Kane' (1941)"); - /// assert_eq!(caps.name("title").unwrap().as_str(), "Citizen Kane"); - /// assert_eq!(caps.name("year").unwrap().as_str(), "1941"); - /// // You can also access the groups by name using the Index notation. - /// // Note that this will panic on an invalid group name. In this case, - /// // these accesses are always correct because the overall regex will - /// // only match when these capture groups match. - /// assert_eq!(&caps[0], "'Citizen Kane' (1941)"); - /// assert_eq!(&caps["title"], "Citizen Kane"); - /// assert_eq!(&caps["year"], "1941"); - /// ``` - /// - /// Here we name the capture groups, which we can access with the `name` - /// method or the `Index` notation with a `&str`. Note that the named - /// capture groups are still accessible with `get` or the `Index` notation - /// with a `usize`. - /// - /// The `0`th capture group is always unnamed, so it must always be - /// accessed with `get(0)` or `[0]`. - /// - /// Finally, one other way to get the matched substrings is with the - /// [`Captures::extract`] API: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"'([^']+)'\s+\((\d{4})\)").unwrap(); - /// let hay = "Not my favorite movie: 'Citizen Kane' (1941)."; - /// let (full, [title, year]) = re.captures(hay).unwrap().extract(); - /// assert_eq!(full, "'Citizen Kane' (1941)"); - /// assert_eq!(title, "Citizen Kane"); - /// assert_eq!(year, "1941"); - /// ``` - #[inline] - pub fn captures<'h>(&self, haystack: &'h str) -> Option<Captures<'h>> { - self.captures_at(haystack, 0) - } - - /// Returns an iterator that yields successive non-overlapping matches in - /// the given haystack. The iterator yields values of type [`Captures`]. - /// - /// This is the same as [`Regex::find_iter`], but instead of only providing - /// access to the overall match, each value yield includes access to the - /// matches of all capture groups in the regex. Reporting this extra match - /// data is potentially costly, so callers should only use `captures_iter` - /// over `find_iter` when they actually need access to the capture group - /// matches. - /// - /// # Time complexity - /// - /// Note that since `captures_iter` runs potentially many searches on the - /// haystack and since each search has worst case `O(m * n)` time - /// complexity, the overall worst case time complexity for iteration is - /// `O(m * n^2)`. - /// - /// # Example - /// - /// We can use this to find all movie titles and their release years in - /// some haystack, where the movie is formatted like "'Title' (xxxx)": - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"'([^']+)'\s+\(([0-9]{4})\)").unwrap(); - /// let hay = "'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931)."; - /// let mut movies = vec![]; - /// for (_, [title, year]) in re.captures_iter(hay).map(|c| c.extract()) { - /// movies.push((title, year.parse::<i64>()?)); - /// } - /// assert_eq!(movies, vec![ - /// ("Citizen Kane", 1941), - /// ("The Wizard of Oz", 1939), - /// ("M", 1931), - /// ]); - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - /// - /// Or with named groups: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"'(?<title>[^']+)'\s+\((?<year>[0-9]{4})\)").unwrap(); - /// let hay = "'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931)."; - /// let mut it = re.captures_iter(hay); - /// - /// let caps = it.next().unwrap(); - /// assert_eq!(&caps["title"], "Citizen Kane"); - /// assert_eq!(&caps["year"], "1941"); - /// - /// let caps = it.next().unwrap(); - /// assert_eq!(&caps["title"], "The Wizard of Oz"); - /// assert_eq!(&caps["year"], "1939"); - /// - /// let caps = it.next().unwrap(); - /// assert_eq!(&caps["title"], "M"); - /// assert_eq!(&caps["year"], "1931"); - /// ``` - #[inline] - pub fn captures_iter<'r, 'h>( - &'r self, - haystack: &'h str, - ) -> CaptureMatches<'r, 'h> { - CaptureMatches { haystack, it: self.meta.captures_iter(haystack) } - } - - /// Returns an iterator of substrings of the haystack given, delimited by a - /// match of the regex. Namely, each element of the iterator corresponds to - /// a part of the haystack that *isn't* matched by the regular expression. - /// - /// # Time complexity - /// - /// Since iterators over all matches requires running potentially many - /// searches on the haystack, and since each search has worst case - /// `O(m * n)` time complexity, the overall worst case time complexity for - /// this routine is `O(m * n^2)`. - /// - /// # Example - /// - /// To split a string delimited by arbitrary amounts of spaces or tabs: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"[ \t]+").unwrap(); - /// let hay = "a b \t c\td e"; - /// let fields: Vec<&str> = re.split(hay).collect(); - /// assert_eq!(fields, vec!["a", "b", "c", "d", "e"]); - /// ``` - /// - /// # Example: more cases - /// - /// Basic usage: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r" ").unwrap(); - /// let hay = "Mary had a little lamb"; - /// let got: Vec<&str> = re.split(hay).collect(); - /// assert_eq!(got, vec!["Mary", "had", "a", "little", "lamb"]); - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = ""; - /// let got: Vec<&str> = re.split(hay).collect(); - /// assert_eq!(got, vec![""]); - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = "lionXXtigerXleopard"; - /// let got: Vec<&str> = re.split(hay).collect(); - /// assert_eq!(got, vec!["lion", "", "tiger", "leopard"]); - /// - /// let re = Regex::new(r"::").unwrap(); - /// let hay = "lion::tiger::leopard"; - /// let got: Vec<&str> = re.split(hay).collect(); - /// assert_eq!(got, vec!["lion", "tiger", "leopard"]); - /// ``` - /// - /// If a haystack contains multiple contiguous matches, you will end up - /// with empty spans yielded by the iterator: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = "XXXXaXXbXc"; - /// let got: Vec<&str> = re.split(hay).collect(); - /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]); - /// - /// let re = Regex::new(r"/").unwrap(); - /// let hay = "(///)"; - /// let got: Vec<&str> = re.split(hay).collect(); - /// assert_eq!(got, vec!["(", "", "", ")"]); - /// ``` - /// - /// Separators at the start or end of a haystack are neighbored by empty - /// substring. - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"0").unwrap(); - /// let hay = "010"; - /// let got: Vec<&str> = re.split(hay).collect(); - /// assert_eq!(got, vec!["", "1", ""]); - /// ``` - /// - /// When the empty string is used as a regex, it splits at every valid - /// UTF-8 boundary by default (which includes the beginning and end of the - /// haystack): - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"").unwrap(); - /// let hay = "rust"; - /// let got: Vec<&str> = re.split(hay).collect(); - /// assert_eq!(got, vec!["", "r", "u", "s", "t", ""]); - /// - /// // Splitting by an empty string is UTF-8 aware by default! - /// let re = Regex::new(r"").unwrap(); - /// let hay = "☃"; - /// let got: Vec<&str> = re.split(hay).collect(); - /// assert_eq!(got, vec!["", "☃", ""]); - /// ``` - /// - /// Contiguous separators (commonly shows up with whitespace), can lead to - /// possibly surprising behavior. For example, this code is correct: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r" ").unwrap(); - /// let hay = " a b c"; - /// let got: Vec<&str> = re.split(hay).collect(); - /// assert_eq!(got, vec!["", "", "", "", "a", "", "b", "c"]); - /// ``` - /// - /// It does *not* give you `["a", "b", "c"]`. For that behavior, you'd want - /// to match contiguous space characters: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r" +").unwrap(); - /// let hay = " a b c"; - /// let got: Vec<&str> = re.split(hay).collect(); - /// // N.B. This does still include a leading empty span because ' +' - /// // matches at the beginning of the haystack. - /// assert_eq!(got, vec!["", "a", "b", "c"]); - /// ``` - #[inline] - pub fn split<'r, 'h>(&'r self, haystack: &'h str) -> Split<'r, 'h> { - Split { haystack, it: self.meta.split(haystack) } - } - - /// Returns an iterator of at most `limit` substrings of the haystack - /// given, delimited by a match of the regex. (A `limit` of `0` will return - /// no substrings.) Namely, each element of the iterator corresponds to a - /// part of the haystack that *isn't* matched by the regular expression. - /// The remainder of the haystack that is not split will be the last - /// element in the iterator. - /// - /// # Time complexity - /// - /// Since iterators over all matches requires running potentially many - /// searches on the haystack, and since each search has worst case - /// `O(m * n)` time complexity, the overall worst case time complexity for - /// this routine is `O(m * n^2)`. - /// - /// Although note that the worst case time here has an upper bound given - /// by the `limit` parameter. - /// - /// # Example - /// - /// Get the first two words in some haystack: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"\W+").unwrap(); - /// let hay = "Hey! How are you?"; - /// let fields: Vec<&str> = re.splitn(hay, 3).collect(); - /// assert_eq!(fields, vec!["Hey", "How", "are you?"]); - /// ``` - /// - /// # Examples: more cases - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r" ").unwrap(); - /// let hay = "Mary had a little lamb"; - /// let got: Vec<&str> = re.splitn(hay, 3).collect(); - /// assert_eq!(got, vec!["Mary", "had", "a little lamb"]); - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = ""; - /// let got: Vec<&str> = re.splitn(hay, 3).collect(); - /// assert_eq!(got, vec![""]); - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = "lionXXtigerXleopard"; - /// let got: Vec<&str> = re.splitn(hay, 3).collect(); - /// assert_eq!(got, vec!["lion", "", "tigerXleopard"]); - /// - /// let re = Regex::new(r"::").unwrap(); - /// let hay = "lion::tiger::leopard"; - /// let got: Vec<&str> = re.splitn(hay, 2).collect(); - /// assert_eq!(got, vec!["lion", "tiger::leopard"]); - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = "abcXdef"; - /// let got: Vec<&str> = re.splitn(hay, 1).collect(); - /// assert_eq!(got, vec!["abcXdef"]); - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = "abcdef"; - /// let got: Vec<&str> = re.splitn(hay, 2).collect(); - /// assert_eq!(got, vec!["abcdef"]); - /// - /// let re = Regex::new(r"X").unwrap(); - /// let hay = "abcXdef"; - /// let got: Vec<&str> = re.splitn(hay, 0).collect(); - /// assert!(got.is_empty()); - /// ``` - #[inline] - pub fn splitn<'r, 'h>( - &'r self, - haystack: &'h str, - limit: usize, - ) -> SplitN<'r, 'h> { - SplitN { haystack, it: self.meta.splitn(haystack, limit) } - } - - /// Replaces the leftmost-first match in the given haystack with the - /// replacement provided. The replacement can be a regular string (where - /// `$N` and `$name` are expanded to match capture groups) or a function - /// that takes a [`Captures`] and returns the replaced string. - /// - /// If no match is found, then the haystack is returned unchanged. In that - /// case, this implementation will likely return a `Cow::Borrowed` value - /// such that no allocation is performed. - /// - /// When a `Cow::Borrowed` is returned, the value returned is guaranteed - /// to be equivalent to the `haystack` given. - /// - /// # Replacement string syntax - /// - /// All instances of `$ref` in the replacement string are replaced with - /// the substring corresponding to the capture group identified by `ref`. - /// - /// `ref` may be an integer corresponding to the index of the capture group - /// (counted by order of opening parenthesis where `0` is the entire match) - /// or it can be a name (consisting of letters, digits or underscores) - /// corresponding to a named capture group. - /// - /// If `ref` isn't a valid capture group (whether the name doesn't exist or - /// isn't a valid index), then it is replaced with the empty string. - /// - /// The longest possible name is used. For example, `$1a` looks up the - /// capture group named `1a` and not the capture group at index `1`. To - /// exert more precise control over the name, use braces, e.g., `${1}a`. - /// - /// To write a literal `$` use `$$`. - /// - /// # Example - /// - /// Note that this function is polymorphic with respect to the replacement. - /// In typical usage, this can just be a normal string: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"[^01]+").unwrap(); - /// assert_eq!(re.replace("1078910", ""), "1010"); - /// ``` - /// - /// But anything satisfying the [`Replacer`] trait will work. For example, - /// a closure of type `|&Captures| -> String` provides direct access to the - /// captures corresponding to a match. This allows one to access capturing - /// group matches easily: - /// - /// ``` - /// use regex::{Captures, Regex}; - /// - /// let re = Regex::new(r"([^,\s]+),\s+(\S+)").unwrap(); - /// let result = re.replace("Springsteen, Bruce", |caps: &Captures| { - /// format!("{} {}", &caps[2], &caps[1]) - /// }); - /// assert_eq!(result, "Bruce Springsteen"); - /// ``` - /// - /// But this is a bit cumbersome to use all the time. Instead, a simple - /// syntax is supported (as described above) that expands `$name` into the - /// corresponding capture group. Here's the last example, but using this - /// expansion technique with named capture groups: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(?<first>\S+)").unwrap(); - /// let result = re.replace("Springsteen, Bruce", "$first $last"); - /// assert_eq!(result, "Bruce Springsteen"); - /// ``` - /// - /// Note that using `$2` instead of `$first` or `$1` instead of `$last` - /// would produce the same result. To write a literal `$` use `$$`. - /// - /// Sometimes the replacement string requires use of curly braces to - /// delineate a capture group replacement when it is adjacent to some other - /// literal text. For example, if we wanted to join two words together with - /// an underscore: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"(?<first>\w+)\s+(?<second>\w+)").unwrap(); - /// let result = re.replace("deep fried", "${first}_$second"); - /// assert_eq!(result, "deep_fried"); - /// ``` - /// - /// Without the curly braces, the capture group name `first_` would be - /// used, and since it doesn't exist, it would be replaced with the empty - /// string. - /// - /// Finally, sometimes you just want to replace a literal string with no - /// regard for capturing group expansion. This can be done by wrapping a - /// string with [`NoExpand`]: - /// - /// ``` - /// use regex::{NoExpand, Regex}; - /// - /// let re = Regex::new(r"(?<last>[^,\s]+),\s+(\S+)").unwrap(); - /// let result = re.replace("Springsteen, Bruce", NoExpand("$2 $last")); - /// assert_eq!(result, "$2 $last"); - /// ``` - /// - /// Using `NoExpand` may also be faster, since the replacement string won't - /// need to be parsed for the `$` syntax. - #[inline] - pub fn replace<'h, R: Replacer>( - &self, - haystack: &'h str, - rep: R, - ) -> Cow<'h, str> { - self.replacen(haystack, 1, rep) - } - - /// Replaces all non-overlapping matches in the haystack with the - /// replacement provided. This is the same as calling `replacen` with - /// `limit` set to `0`. - /// - /// If no match is found, then the haystack is returned unchanged. In that - /// case, this implementation will likely return a `Cow::Borrowed` value - /// such that no allocation is performed. - /// - /// When a `Cow::Borrowed` is returned, the value returned is guaranteed - /// to be equivalent to the `haystack` given. - /// - /// The documentation for [`Regex::replace`] goes into more detail about - /// what kinds of replacement strings are supported. - /// - /// # Time complexity - /// - /// Since iterators over all matches requires running potentially many - /// searches on the haystack, and since each search has worst case - /// `O(m * n)` time complexity, the overall worst case time complexity for - /// this routine is `O(m * n^2)`. - /// - /// # Fallibility - /// - /// If you need to write a replacement routine where any individual - /// replacement might "fail," doing so with this API isn't really feasible - /// because there's no way to stop the search process if a replacement - /// fails. Instead, if you need this functionality, you should consider - /// implementing your own replacement routine: - /// - /// ``` - /// use regex::{Captures, Regex}; - /// - /// fn replace_all<E>( - /// re: &Regex, - /// haystack: &str, - /// replacement: impl Fn(&Captures) -> Result<String, E>, - /// ) -> Result<String, E> { - /// let mut new = String::with_capacity(haystack.len()); - /// let mut last_match = 0; - /// for caps in re.captures_iter(haystack) { - /// let m = caps.get(0).unwrap(); - /// new.push_str(&haystack[last_match..m.start()]); - /// new.push_str(&replacement(&caps)?); - /// last_match = m.end(); - /// } - /// new.push_str(&haystack[last_match..]); - /// Ok(new) - /// } - /// - /// // Let's replace each word with the number of bytes in that word. - /// // But if we see a word that is "too long," we'll give up. - /// let re = Regex::new(r"\w+").unwrap(); - /// let replacement = |caps: &Captures| -> Result<String, &'static str> { - /// if caps[0].len() >= 5 { - /// return Err("word too long"); - /// } - /// Ok(caps[0].len().to_string()) - /// }; - /// assert_eq!( - /// Ok("2 3 3 3?".to_string()), - /// replace_all(&re, "hi how are you?", &replacement), - /// ); - /// assert!(replace_all(&re, "hi there", &replacement).is_err()); - /// ``` - /// - /// # Example - /// - /// This example shows how to flip the order of whitespace (excluding line - /// terminators) delimited fields, and normalizes the whitespace that - /// delimits the fields: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"(?m)^(\S+)[\s--\r\n]+(\S+)$").unwrap(); - /// let hay = " - /// Greetings 1973 - /// Wild\t1973 - /// BornToRun\t\t\t\t1975 - /// Darkness 1978 - /// TheRiver 1980 - /// "; - /// let new = re.replace_all(hay, "$2 $1"); - /// assert_eq!(new, " - /// 1973 Greetings - /// 1973 Wild - /// 1975 BornToRun - /// 1978 Darkness - /// 1980 TheRiver - /// "); - /// ``` - #[inline] - pub fn replace_all<'h, R: Replacer>( - &self, - haystack: &'h str, - rep: R, - ) -> Cow<'h, str> { - self.replacen(haystack, 0, rep) - } - - /// Replaces at most `limit` non-overlapping matches in the haystack with - /// the replacement provided. If `limit` is `0`, then all non-overlapping - /// matches are replaced. That is, `Regex::replace_all(hay, rep)` is - /// equivalent to `Regex::replacen(hay, 0, rep)`. - /// - /// If no match is found, then the haystack is returned unchanged. In that - /// case, this implementation will likely return a `Cow::Borrowed` value - /// such that no allocation is performed. - /// - /// When a `Cow::Borrowed` is returned, the value returned is guaranteed - /// to be equivalent to the `haystack` given. - /// - /// The documentation for [`Regex::replace`] goes into more detail about - /// what kinds of replacement strings are supported. - /// - /// # Time complexity - /// - /// Since iterators over all matches requires running potentially many - /// searches on the haystack, and since each search has worst case - /// `O(m * n)` time complexity, the overall worst case time complexity for - /// this routine is `O(m * n^2)`. - /// - /// Although note that the worst case time here has an upper bound given - /// by the `limit` parameter. - /// - /// # Fallibility - /// - /// See the corresponding section in the docs for [`Regex::replace_all`] - /// for tips on how to deal with a replacement routine that can fail. - /// - /// # Example - /// - /// This example shows how to flip the order of whitespace (excluding line - /// terminators) delimited fields, and normalizes the whitespace that - /// delimits the fields. But we only do it for the first two matches. - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"(?m)^(\S+)[\s--\r\n]+(\S+)$").unwrap(); - /// let hay = " - /// Greetings 1973 - /// Wild\t1973 - /// BornToRun\t\t\t\t1975 - /// Darkness 1978 - /// TheRiver 1980 - /// "; - /// let new = re.replacen(hay, 2, "$2 $1"); - /// assert_eq!(new, " - /// 1973 Greetings - /// 1973 Wild - /// BornToRun\t\t\t\t1975 - /// Darkness 1978 - /// TheRiver 1980 - /// "); - /// ``` - #[inline] - pub fn replacen<'h, R: Replacer>( - &self, - haystack: &'h str, - limit: usize, - mut rep: R, - ) -> Cow<'h, str> { - // If we know that the replacement doesn't have any capture expansions, - // then we can use the fast path. The fast path can make a tremendous - // difference: - // - // 1) We use `find_iter` instead of `captures_iter`. Not asking for - // captures generally makes the regex engines faster. - // 2) We don't need to look up all of the capture groups and do - // replacements inside the replacement string. We just push it - // at each match and be done with it. - if let Some(rep) = rep.no_expansion() { - let mut it = self.find_iter(haystack).enumerate().peekable(); - if it.peek().is_none() { - return Cow::Borrowed(haystack); - } - let mut new = String::with_capacity(haystack.len()); - let mut last_match = 0; - for (i, m) in it { - new.push_str(&haystack[last_match..m.start()]); - new.push_str(&rep); - last_match = m.end(); - if limit > 0 && i >= limit - 1 { - break; - } - } - new.push_str(&haystack[last_match..]); - return Cow::Owned(new); - } - - // The slower path, which we use if the replacement may need access to - // capture groups. - let mut it = self.captures_iter(haystack).enumerate().peekable(); - if it.peek().is_none() { - return Cow::Borrowed(haystack); - } - let mut new = String::with_capacity(haystack.len()); - let mut last_match = 0; - for (i, cap) in it { - // unwrap on 0 is OK because captures only reports matches - let m = cap.get(0).unwrap(); - new.push_str(&haystack[last_match..m.start()]); - rep.replace_append(&cap, &mut new); - last_match = m.end(); - if limit > 0 && i >= limit - 1 { - break; - } - } - new.push_str(&haystack[last_match..]); - Cow::Owned(new) - } -} - -/// A group of advanced or "lower level" search methods. Some methods permit -/// starting the search at a position greater than `0` in the haystack. Other -/// methods permit reusing allocations, for example, when extracting the -/// matches for capture groups. -impl Regex { - /// Returns the end byte offset of the first match in the haystack given. - /// - /// This method may have the same performance characteristics as - /// `is_match`. Behaviorally, it doesn't just report whether it match - /// occurs, but also the end offset for a match. In particular, the offset - /// returned *may be shorter* than the proper end of the leftmost-first - /// match that you would find via [`Regex::find`]. - /// - /// Note that it is not guaranteed that this routine finds the shortest or - /// "earliest" possible match. Instead, the main idea of this API is that - /// it returns the offset at the point at which the internal regex engine - /// has determined that a match has occurred. This may vary depending on - /// which internal regex engine is used, and thus, the offset itself may - /// change based on internal heuristics. - /// - /// # Example - /// - /// Typically, `a+` would match the entire first sequence of `a` in some - /// haystack, but `shortest_match` *may* give up as soon as it sees the - /// first `a`. - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"a+").unwrap(); - /// let offset = re.shortest_match("aaaaa").unwrap(); - /// assert_eq!(offset, 1); - /// ``` - #[inline] - pub fn shortest_match(&self, haystack: &str) -> Option<usize> { - self.shortest_match_at(haystack, 0) - } - - /// Returns the same as [`Regex::shortest_match`], but starts the search at - /// the given offset. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only match - /// when `start == 0`. - /// - /// If a match is found, the offset returned is relative to the beginning - /// of the haystack, not the beginning of the search. - /// - /// # Panics - /// - /// This panics when `start >= haystack.len() + 1`. - /// - /// # Example - /// - /// This example shows the significance of `start` by demonstrating how it - /// can be used to permit look-around assertions in a regex to take the - /// surrounding context into account. - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"\bchew\b").unwrap(); - /// let hay = "eschew"; - /// // We get a match here, but it's probably not intended. - /// assert_eq!(re.shortest_match(&hay[2..]), Some(4)); - /// // No match because the assertions take the context into account. - /// assert_eq!(re.shortest_match_at(hay, 2), None); - /// ``` - #[inline] - pub fn shortest_match_at( - &self, - haystack: &str, - start: usize, - ) -> Option<usize> { - let input = - Input::new(haystack).earliest(true).span(start..haystack.len()); - self.meta.search_half(&input).map(|hm| hm.offset()) - } - - /// Returns the same as [`Regex::is_match`], but starts the search at the - /// given offset. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only - /// match when `start == 0`. - /// - /// # Panics - /// - /// This panics when `start >= haystack.len() + 1`. - /// - /// # Example - /// - /// This example shows the significance of `start` by demonstrating how it - /// can be used to permit look-around assertions in a regex to take the - /// surrounding context into account. - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"\bchew\b").unwrap(); - /// let hay = "eschew"; - /// // We get a match here, but it's probably not intended. - /// assert!(re.is_match(&hay[2..])); - /// // No match because the assertions take the context into account. - /// assert!(!re.is_match_at(hay, 2)); - /// ``` - #[inline] - pub fn is_match_at(&self, haystack: &str, start: usize) -> bool { - let input = - Input::new(haystack).earliest(true).span(start..haystack.len()); - self.meta.search_half(&input).is_some() - } - - /// Returns the same as [`Regex::find`], but starts the search at the given - /// offset. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only - /// match when `start == 0`. - /// - /// # Panics - /// - /// This panics when `start >= haystack.len() + 1`. - /// - /// # Example - /// - /// This example shows the significance of `start` by demonstrating how it - /// can be used to permit look-around assertions in a regex to take the - /// surrounding context into account. - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"\bchew\b").unwrap(); - /// let hay = "eschew"; - /// // We get a match here, but it's probably not intended. - /// assert_eq!(re.find(&hay[2..]).map(|m| m.range()), Some(0..4)); - /// // No match because the assertions take the context into account. - /// assert_eq!(re.find_at(hay, 2), None); - /// ``` - #[inline] - pub fn find_at<'h>( - &self, - haystack: &'h str, - start: usize, - ) -> Option<Match<'h>> { - let input = Input::new(haystack).span(start..haystack.len()); - self.meta - .search(&input) - .map(|m| Match::new(haystack, m.start(), m.end())) - } - - /// Returns the same as [`Regex::captures`], but starts the search at the - /// given offset. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only - /// match when `start == 0`. - /// - /// # Panics - /// - /// This panics when `start >= haystack.len() + 1`. - /// - /// # Example - /// - /// This example shows the significance of `start` by demonstrating how it - /// can be used to permit look-around assertions in a regex to take the - /// surrounding context into account. - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"\bchew\b").unwrap(); - /// let hay = "eschew"; - /// // We get a match here, but it's probably not intended. - /// assert_eq!(&re.captures(&hay[2..]).unwrap()[0], "chew"); - /// // No match because the assertions take the context into account. - /// assert!(re.captures_at(hay, 2).is_none()); - /// ``` - #[inline] - pub fn captures_at<'h>( - &self, - haystack: &'h str, - start: usize, - ) -> Option<Captures<'h>> { - let input = Input::new(haystack).span(start..haystack.len()); - let mut caps = self.meta.create_captures(); - self.meta.search_captures(&input, &mut caps); - if caps.is_match() { - let static_captures_len = self.static_captures_len(); - Some(Captures { haystack, caps, static_captures_len }) - } else { - None - } - } - - /// This is like [`Regex::captures`], but writes the byte offsets of each - /// capture group match into the locations given. - /// - /// A [`CaptureLocations`] stores the same byte offsets as a [`Captures`], - /// but does *not* store a reference to the haystack. This makes its API - /// a bit lower level and less convenient. But in exchange, callers - /// may allocate their own `CaptureLocations` and reuse it for multiple - /// searches. This may be helpful if allocating a `Captures` shows up in a - /// profile as too costly. - /// - /// To create a `CaptureLocations` value, use the - /// [`Regex::capture_locations`] method. - /// - /// This also returns the overall match if one was found. When a match is - /// found, its offsets are also always stored in `locs` at index `0`. - /// - /// # Panics - /// - /// This routine may panic if the given `CaptureLocations` was not created - /// by this regex. - /// - /// # Example - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"^([a-z]+)=(\S*)$").unwrap(); - /// let mut locs = re.capture_locations(); - /// assert!(re.captures_read(&mut locs, "id=foo123").is_some()); - /// assert_eq!(Some((0, 9)), locs.get(0)); - /// assert_eq!(Some((0, 2)), locs.get(1)); - /// assert_eq!(Some((3, 9)), locs.get(2)); - /// ``` - #[inline] - pub fn captures_read<'h>( - &self, - locs: &mut CaptureLocations, - haystack: &'h str, - ) -> Option<Match<'h>> { - self.captures_read_at(locs, haystack, 0) - } - - /// Returns the same as [`Regex::captures_read`], but starts the search at - /// the given offset. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only - /// match when `start == 0`. - /// - /// # Panics - /// - /// This panics when `start >= haystack.len() + 1`. - /// - /// This routine may also panic if the given `CaptureLocations` was not - /// created by this regex. - /// - /// # Example - /// - /// This example shows the significance of `start` by demonstrating how it - /// can be used to permit look-around assertions in a regex to take the - /// surrounding context into account. - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"\bchew\b").unwrap(); - /// let hay = "eschew"; - /// let mut locs = re.capture_locations(); - /// // We get a match here, but it's probably not intended. - /// assert!(re.captures_read(&mut locs, &hay[2..]).is_some()); - /// // No match because the assertions take the context into account. - /// assert!(re.captures_read_at(&mut locs, hay, 2).is_none()); - /// ``` - #[inline] - pub fn captures_read_at<'h>( - &self, - locs: &mut CaptureLocations, - haystack: &'h str, - start: usize, - ) -> Option<Match<'h>> { - let input = Input::new(haystack).span(start..haystack.len()); - self.meta.search_captures(&input, &mut locs.0); - locs.0.get_match().map(|m| Match::new(haystack, m.start(), m.end())) - } - - /// An undocumented alias for `captures_read_at`. - /// - /// The `regex-capi` crate previously used this routine, so to avoid - /// breaking that crate, we continue to provide the name as an undocumented - /// alias. - #[doc(hidden)] - #[inline] - pub fn read_captures_at<'h>( - &self, - locs: &mut CaptureLocations, - haystack: &'h str, - start: usize, - ) -> Option<Match<'h>> { - self.captures_read_at(locs, haystack, start) - } -} - -/// Auxiliary methods. -impl Regex { - /// Returns the original string of this regex. - /// - /// # Example - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"foo\w+bar").unwrap(); - /// assert_eq!(re.as_str(), r"foo\w+bar"); - /// ``` - #[inline] - pub fn as_str(&self) -> &str { - &self.pattern - } - - /// Returns an iterator over the capture names in this regex. - /// - /// The iterator returned yields elements of type `Option<&str>`. That is, - /// the iterator yields values for all capture groups, even ones that are - /// unnamed. The order of the groups corresponds to the order of the group's - /// corresponding opening parenthesis. - /// - /// The first element of the iterator always yields the group corresponding - /// to the overall match, and this group is always unnamed. Therefore, the - /// iterator always yields at least one group. - /// - /// # Example - /// - /// This shows basic usage with a mix of named and unnamed capture groups: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"(?<a>.(?<b>.))(.)(?:.)(?<c>.)").unwrap(); - /// let mut names = re.capture_names(); - /// assert_eq!(names.next(), Some(None)); - /// assert_eq!(names.next(), Some(Some("a"))); - /// assert_eq!(names.next(), Some(Some("b"))); - /// assert_eq!(names.next(), Some(None)); - /// // the '(?:.)' group is non-capturing and so doesn't appear here! - /// assert_eq!(names.next(), Some(Some("c"))); - /// assert_eq!(names.next(), None); - /// ``` - /// - /// The iterator always yields at least one element, even for regexes with - /// no capture groups and even for regexes that can never match: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"").unwrap(); - /// let mut names = re.capture_names(); - /// assert_eq!(names.next(), Some(None)); - /// assert_eq!(names.next(), None); - /// - /// let re = Regex::new(r"[a&&b]").unwrap(); - /// let mut names = re.capture_names(); - /// assert_eq!(names.next(), Some(None)); - /// assert_eq!(names.next(), None); - /// ``` - #[inline] - pub fn capture_names(&self) -> CaptureNames<'_> { - CaptureNames(self.meta.group_info().pattern_names(PatternID::ZERO)) - } - - /// Returns the number of captures groups in this regex. - /// - /// This includes all named and unnamed groups, including the implicit - /// unnamed group that is always present and corresponds to the entire - /// match. - /// - /// Since the implicit unnamed group is always included in this length, the - /// length returned is guaranteed to be greater than zero. - /// - /// # Example - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"foo").unwrap(); - /// assert_eq!(1, re.captures_len()); - /// - /// let re = Regex::new(r"(foo)").unwrap(); - /// assert_eq!(2, re.captures_len()); - /// - /// let re = Regex::new(r"(?<a>.(?<b>.))(.)(?:.)(?<c>.)").unwrap(); - /// assert_eq!(5, re.captures_len()); - /// - /// let re = Regex::new(r"[a&&b]").unwrap(); - /// assert_eq!(1, re.captures_len()); - /// ``` - #[inline] - pub fn captures_len(&self) -> usize { - self.meta.group_info().group_len(PatternID::ZERO) - } - - /// Returns the total number of capturing groups that appear in every - /// possible match. - /// - /// If the number of capture groups can vary depending on the match, then - /// this returns `None`. That is, a value is only returned when the number - /// of matching groups is invariant or "static." - /// - /// Note that like [`Regex::captures_len`], this **does** include the - /// implicit capturing group corresponding to the entire match. Therefore, - /// when a non-None value is returned, it is guaranteed to be at least `1`. - /// Stated differently, a return value of `Some(0)` is impossible. - /// - /// # Example - /// - /// This shows a few cases where a static number of capture groups is - /// available and a few cases where it is not. - /// - /// ``` - /// use regex::Regex; - /// - /// let len = |pattern| { - /// Regex::new(pattern).map(|re| re.static_captures_len()) - /// }; - /// - /// assert_eq!(Some(1), len("a")?); - /// assert_eq!(Some(2), len("(a)")?); - /// assert_eq!(Some(2), len("(a)|(b)")?); - /// assert_eq!(Some(3), len("(a)(b)|(c)(d)")?); - /// assert_eq!(None, len("(a)|b")?); - /// assert_eq!(None, len("a|(b)")?); - /// assert_eq!(None, len("(b)*")?); - /// assert_eq!(Some(2), len("(b)+")?); - /// - /// # Ok::<(), Box<dyn std::error::Error>>(()) - /// ``` - #[inline] - pub fn static_captures_len(&self) -> Option<usize> { - self.meta.static_captures_len() - } - - /// Returns a fresh allocated set of capture locations that can - /// be reused in multiple calls to [`Regex::captures_read`] or - /// [`Regex::captures_read_at`]. - /// - /// The returned locations can be used for any subsequent search for this - /// particular regex. There is no guarantee that it is correct to use for - /// other regexes, even if they have the same number of capture groups. - /// - /// # Example - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"(.)(.)(\w+)").unwrap(); - /// let mut locs = re.capture_locations(); - /// assert!(re.captures_read(&mut locs, "Padron").is_some()); - /// assert_eq!(locs.get(0), Some((0, 6))); - /// assert_eq!(locs.get(1), Some((0, 1))); - /// assert_eq!(locs.get(2), Some((1, 2))); - /// assert_eq!(locs.get(3), Some((2, 6))); - /// ``` - #[inline] - pub fn capture_locations(&self) -> CaptureLocations { - CaptureLocations(self.meta.create_captures()) - } - - /// An alias for `capture_locations` to preserve backward compatibility. - /// - /// The `regex-capi` crate used this method, so to avoid breaking that - /// crate, we continue to export it as an undocumented API. - #[doc(hidden)] - #[inline] - pub fn locations(&self) -> CaptureLocations { - self.capture_locations() - } -} - -/// Represents a single match of a regex in a haystack. -/// -/// A `Match` contains both the start and end byte offsets of the match and the -/// actual substring corresponding to the range of those byte offsets. It is -/// guaranteed that `start <= end`. When `start == end`, the match is empty. -/// -/// Since this `Match` can only be produced by the top-level `Regex` APIs -/// that only support searching UTF-8 encoded strings, the byte offsets for a -/// `Match` are guaranteed to fall on valid UTF-8 codepoint boundaries. That -/// is, slicing a `&str` with [`Match::range`] is guaranteed to never panic. -/// -/// Values with this type are created by [`Regex::find`] or -/// [`Regex::find_iter`]. Other APIs can create `Match` values too. For -/// example, [`Captures::get`]. -/// -/// The lifetime parameter `'h` refers to the lifetime of the matched of the -/// haystack that this match was produced from. -/// -/// # Numbering -/// -/// The byte offsets in a `Match` form a half-open interval. That is, the -/// start of the range is inclusive and the end of the range is exclusive. -/// For example, given a haystack `abcFOOxyz` and a match of `FOO`, its byte -/// offset range starts at `3` and ends at `6`. `3` corresponds to `F` and -/// `6` corresponds to `x`, which is one past the end of the match. This -/// corresponds to the same kind of slicing that Rust uses. -/// -/// For more on why this was chosen over other schemes (aside from being -/// consistent with how Rust the language works), see [this discussion] and -/// [Dijkstra's note on a related topic][note]. -/// -/// [this discussion]: https://github.com/rust-lang/regex/discussions/866 -/// [note]: https://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html -/// -/// # Example -/// -/// This example shows the value of each of the methods on `Match` for a -/// particular search. -/// -/// ``` -/// use regex::Regex; -/// -/// let re = Regex::new(r"\p{Greek}+").unwrap(); -/// let hay = "Greek: αβγδ"; -/// let m = re.find(hay).unwrap(); -/// assert_eq!(7, m.start()); -/// assert_eq!(15, m.end()); -/// assert!(!m.is_empty()); -/// assert_eq!(8, m.len()); -/// assert_eq!(7..15, m.range()); -/// assert_eq!("αβγδ", m.as_str()); -/// ``` -#[derive(Copy, Clone, Eq, PartialEq)] -pub struct Match<'h> { - haystack: &'h str, - start: usize, - end: usize, -} - -impl<'h> Match<'h> { - /// Returns the byte offset of the start of the match in the haystack. The - /// start of the match corresponds to the position where the match begins - /// and includes the first byte in the match. - /// - /// It is guaranteed that `Match::start() <= Match::end()`. - /// - /// This is guaranteed to fall on a valid UTF-8 codepoint boundary. That - /// is, it will never be an offset that appears between the UTF-8 code - /// units of a UTF-8 encoded Unicode scalar value. Consequently, it is - /// always safe to slice the corresponding haystack using this offset. - #[inline] - pub fn start(&self) -> usize { - self.start - } - - /// Returns the byte offset of the end of the match in the haystack. The - /// end of the match corresponds to the byte immediately following the last - /// byte in the match. This means that `&slice[start..end]` works as one - /// would expect. - /// - /// It is guaranteed that `Match::start() <= Match::end()`. - /// - /// This is guaranteed to fall on a valid UTF-8 codepoint boundary. That - /// is, it will never be an offset that appears between the UTF-8 code - /// units of a UTF-8 encoded Unicode scalar value. Consequently, it is - /// always safe to slice the corresponding haystack using this offset. - #[inline] - pub fn end(&self) -> usize { - self.end - } - - /// Returns true if and only if this match has a length of zero. - /// - /// Note that an empty match can only occur when the regex itself can - /// match the empty string. Here are some examples of regexes that can - /// all match the empty string: `^`, `^$`, `\b`, `a?`, `a*`, `a{0}`, - /// `(foo|\d+|quux)?`. - #[inline] - pub fn is_empty(&self) -> bool { - self.start == self.end - } - - /// Returns the length, in bytes, of this match. - #[inline] - pub fn len(&self) -> usize { - self.end - self.start - } - - /// Returns the range over the starting and ending byte offsets of the - /// match in the haystack. - /// - /// It is always correct to slice the original haystack searched with this - /// range. That is, because the offsets are guaranteed to fall on valid - /// UTF-8 boundaries, the range returned is always valid. - #[inline] - pub fn range(&self) -> core::ops::Range<usize> { - self.start..self.end - } - - /// Returns the substring of the haystack that matched. - #[inline] - pub fn as_str(&self) -> &'h str { - &self.haystack[self.range()] - } - - /// Creates a new match from the given haystack and byte offsets. - #[inline] - fn new(haystack: &'h str, start: usize, end: usize) -> Match<'h> { - Match { haystack, start, end } - } -} - -impl<'h> core::fmt::Debug for Match<'h> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - f.debug_struct("Match") - .field("start", &self.start) - .field("end", &self.end) - .field("string", &self.as_str()) - .finish() - } -} - -impl<'h> From<Match<'h>> for &'h str { - fn from(m: Match<'h>) -> &'h str { - m.as_str() - } -} - -impl<'h> From<Match<'h>> for core::ops::Range<usize> { - fn from(m: Match<'h>) -> core::ops::Range<usize> { - m.range() - } -} - -/// Represents the capture groups for a single match. -/// -/// Capture groups refer to parts of a regex enclosed in parentheses. They -/// can be optionally named. The purpose of capture groups is to be able to -/// reference different parts of a match based on the original pattern. In -/// essence, a `Captures` is a container of [`Match`] values for each group -/// that participated in a regex match. Each `Match` can be looked up by either -/// its capture group index or name (if it has one). -/// -/// For example, say you want to match the individual letters in a 5-letter -/// word: -/// -/// ```text -/// (?<first>\w)(\w)(?:\w)\w(?<last>\w) -/// ``` -/// -/// This regex has 4 capture groups: -/// -/// * The group at index `0` corresponds to the overall match. It is always -/// present in every match and never has a name. -/// * The group at index `1` with name `first` corresponding to the first -/// letter. -/// * The group at index `2` with no name corresponding to the second letter. -/// * The group at index `3` with name `last` corresponding to the fifth and -/// last letter. -/// -/// Notice that `(?:\w)` was not listed above as a capture group despite it -/// being enclosed in parentheses. That's because `(?:pattern)` is a special -/// syntax that permits grouping but *without* capturing. The reason for not -/// treating it as a capture is that tracking and reporting capture groups -/// requires additional state that may lead to slower searches. So using as few -/// capture groups as possible can help performance. (Although the difference -/// in performance of a couple of capture groups is likely immaterial.) -/// -/// Values with this type are created by [`Regex::captures`] or -/// [`Regex::captures_iter`]. -/// -/// `'h` is the lifetime of the haystack that these captures were matched from. -/// -/// # Example -/// -/// ``` -/// use regex::Regex; -/// -/// let re = Regex::new(r"(?<first>\w)(\w)(?:\w)\w(?<last>\w)").unwrap(); -/// let caps = re.captures("toady").unwrap(); -/// assert_eq!("toady", &caps[0]); -/// assert_eq!("t", &caps["first"]); -/// assert_eq!("o", &caps[2]); -/// assert_eq!("y", &caps["last"]); -/// ``` -pub struct Captures<'h> { - haystack: &'h str, - caps: captures::Captures, - static_captures_len: Option<usize>, -} - -impl<'h> Captures<'h> { - /// Returns the `Match` associated with the capture group at index `i`. If - /// `i` does not correspond to a capture group, or if the capture group did - /// not participate in the match, then `None` is returned. - /// - /// When `i == 0`, this is guaranteed to return a non-`None` value. - /// - /// # Examples - /// - /// Get the substring that matched with a default of an empty string if the - /// group didn't participate in the match: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"[a-z]+(?:([0-9]+)|([A-Z]+))").unwrap(); - /// let caps = re.captures("abc123").unwrap(); - /// - /// let substr1 = caps.get(1).map_or("", |m| m.as_str()); - /// let substr2 = caps.get(2).map_or("", |m| m.as_str()); - /// assert_eq!(substr1, "123"); - /// assert_eq!(substr2, ""); - /// ``` - #[inline] - pub fn get(&self, i: usize) -> Option<Match<'h>> { - self.caps - .get_group(i) - .map(|sp| Match::new(self.haystack, sp.start, sp.end)) - } - - /// Return the overall match for the capture. - /// - /// This returns the match for index `0`. That is it is equivalent to - /// `m.get(0).unwrap()` - /// - /// # Example - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"[a-z]+([0-9]+)").unwrap(); - /// let caps = re.captures(" abc123-def").unwrap(); - /// - /// assert_eq!(caps.get_match().as_str(), "abc123"); - /// - /// ``` - #[inline] - pub fn get_match(&self) -> Match<'h> { - self.get(0).unwrap() - } - - /// Returns the `Match` associated with the capture group named `name`. If - /// `name` isn't a valid capture group or it refers to a group that didn't - /// match, then `None` is returned. - /// - /// Note that unlike `caps["name"]`, this returns a `Match` whose lifetime - /// matches the lifetime of the haystack in this `Captures` value. - /// Conversely, the substring returned by `caps["name"]` has a lifetime - /// of the `Captures` value, which is likely shorter than the lifetime of - /// the haystack. In some cases, it may be necessary to use this method to - /// access the matching substring instead of the `caps["name"]` notation. - /// - /// # Examples - /// - /// Get the substring that matched with a default of an empty string if the - /// group didn't participate in the match: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new( - /// r"[a-z]+(?:(?<numbers>[0-9]+)|(?<letters>[A-Z]+))", - /// ).unwrap(); - /// let caps = re.captures("abc123").unwrap(); - /// - /// let numbers = caps.name("numbers").map_or("", |m| m.as_str()); - /// let letters = caps.name("letters").map_or("", |m| m.as_str()); - /// assert_eq!(numbers, "123"); - /// assert_eq!(letters, ""); - /// ``` - #[inline] - pub fn name(&self, name: &str) -> Option<Match<'h>> { - self.caps - .get_group_by_name(name) - .map(|sp| Match::new(self.haystack, sp.start, sp.end)) - } - - /// This is a convenience routine for extracting the substrings - /// corresponding to matching capture groups. - /// - /// This returns a tuple where the first element corresponds to the full - /// substring of the haystack that matched the regex. The second element is - /// an array of substrings, with each corresponding to the substring that - /// matched for a particular capture group. - /// - /// # Panics - /// - /// This panics if the number of possible matching groups in this - /// `Captures` value is not fixed to `N` in all circumstances. - /// More precisely, this routine only works when `N` is equivalent to - /// [`Regex::static_captures_len`]. - /// - /// Stated more plainly, if the number of matching capture groups in a - /// regex can vary from match to match, then this function always panics. - /// - /// For example, `(a)(b)|(c)` could produce two matching capture groups - /// or one matching capture group for any given match. Therefore, one - /// cannot use `extract` with such a pattern. - /// - /// But a pattern like `(a)(b)|(c)(d)` can be used with `extract` because - /// the number of capture groups in every match is always equivalent, - /// even if the capture _indices_ in each match are not. - /// - /// # Example - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap(); - /// let hay = "On 2010-03-14, I became a Tennessee lamb."; - /// let Some((full, [year, month, day])) = - /// re.captures(hay).map(|caps| caps.extract()) else { return }; - /// assert_eq!("2010-03-14", full); - /// assert_eq!("2010", year); - /// assert_eq!("03", month); - /// assert_eq!("14", day); - /// ``` - /// - /// # Example: iteration - /// - /// This example shows how to use this method when iterating over all - /// `Captures` matches in a haystack. - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap(); - /// let hay = "1973-01-05, 1975-08-25 and 1980-10-18"; - /// - /// let mut dates: Vec<(&str, &str, &str)> = vec![]; - /// for (_, [y, m, d]) in re.captures_iter(hay).map(|c| c.extract()) { - /// dates.push((y, m, d)); - /// } - /// assert_eq!(dates, vec![ - /// ("1973", "01", "05"), - /// ("1975", "08", "25"), - /// ("1980", "10", "18"), - /// ]); - /// ``` - /// - /// # Example: parsing different formats - /// - /// This API is particularly useful when you need to extract a particular - /// value that might occur in a different format. Consider, for example, - /// an identifier that might be in double quotes or single quotes: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r#"id:(?:"([^"]+)"|'([^']+)')"#).unwrap(); - /// let hay = r#"The first is id:"foo" and the second is id:'bar'."#; - /// let mut ids = vec![]; - /// for (_, [id]) in re.captures_iter(hay).map(|c| c.extract()) { - /// ids.push(id); - /// } - /// assert_eq!(ids, vec!["foo", "bar"]); - /// ``` - pub fn extract<const N: usize>(&self) -> (&'h str, [&'h str; N]) { - let len = self - .static_captures_len - .expect("number of capture groups can vary in a match") - .checked_sub(1) - .expect("number of groups is always greater than zero"); - assert_eq!(N, len, "asked for {N} groups, but must ask for {len}"); - // The regex-automata variant of extract is a bit more permissive. - // It doesn't require the number of matching capturing groups to be - // static, and you can even request fewer groups than what's there. So - // this is guaranteed to never panic because we've asserted above that - // the user has requested precisely the number of groups that must be - // present in any match for this regex. - self.caps.extract(self.haystack) - } - - /// Expands all instances of `$ref` in `replacement` to the corresponding - /// capture group, and writes them to the `dst` buffer given. A `ref` can - /// be a capture group index or a name. If `ref` doesn't refer to a capture - /// group that participated in the match, then it is replaced with the - /// empty string. - /// - /// # Format - /// - /// The format of the replacement string supports two different kinds of - /// capture references: unbraced and braced. - /// - /// For the unbraced format, the format supported is `$ref` where `name` - /// can be any character in the class `[0-9A-Za-z_]`. `ref` is always - /// the longest possible parse. So for example, `$1a` corresponds to the - /// capture group named `1a` and not the capture group at index `1`. If - /// `ref` matches `^[0-9]+$`, then it is treated as a capture group index - /// itself and not a name. - /// - /// For the braced format, the format supported is `${ref}` where `ref` can - /// be any sequence of bytes except for `}`. If no closing brace occurs, - /// then it is not considered a capture reference. As with the unbraced - /// format, if `ref` matches `^[0-9]+$`, then it is treated as a capture - /// group index and not a name. - /// - /// The braced format is useful for exerting precise control over the name - /// of the capture reference. For example, `${1}a` corresponds to the - /// capture group reference `1` followed by the letter `a`, where as `$1a` - /// (as mentioned above) corresponds to the capture group reference `1a`. - /// The braced format is also useful for expressing capture group names - /// that use characters not supported by the unbraced format. For example, - /// `${foo[bar].baz}` refers to the capture group named `foo[bar].baz`. - /// - /// If a capture group reference is found and it does not refer to a valid - /// capture group, then it will be replaced with the empty string. - /// - /// To write a literal `$`, use `$$`. - /// - /// # Example - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new( - /// r"(?<day>[0-9]{2})-(?<month>[0-9]{2})-(?<year>[0-9]{4})", - /// ).unwrap(); - /// let hay = "On 14-03-2010, I became a Tennessee lamb."; - /// let caps = re.captures(hay).unwrap(); - /// - /// let mut dst = String::new(); - /// caps.expand("year=$year, month=$month, day=$day", &mut dst); - /// assert_eq!(dst, "year=2010, month=03, day=14"); - /// ``` - #[inline] - pub fn expand(&self, replacement: &str, dst: &mut String) { - self.caps.interpolate_string_into(self.haystack, replacement, dst); - } - - /// Returns an iterator over all capture groups. This includes both - /// matching and non-matching groups. - /// - /// The iterator always yields at least one matching group: the first group - /// (at index `0`) with no name. Subsequent groups are returned in the order - /// of their opening parenthesis in the regex. - /// - /// The elements yielded have type `Option<Match<'h>>`, where a non-`None` - /// value is present if the capture group matches. - /// - /// # Example - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"(\w)(\d)?(\w)").unwrap(); - /// let caps = re.captures("AZ").unwrap(); - /// - /// let mut it = caps.iter(); - /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), Some("AZ")); - /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), Some("A")); - /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), None); - /// assert_eq!(it.next().unwrap().map(|m| m.as_str()), Some("Z")); - /// assert_eq!(it.next(), None); - /// ``` - #[inline] - pub fn iter<'c>(&'c self) -> SubCaptureMatches<'c, 'h> { - SubCaptureMatches { haystack: self.haystack, it: self.caps.iter() } - } - - /// Returns the total number of capture groups. This includes both - /// matching and non-matching groups. - /// - /// The length returned is always equivalent to the number of elements - /// yielded by [`Captures::iter`]. Consequently, the length is always - /// greater than zero since every `Captures` value always includes the - /// match for the entire regex. - /// - /// # Example - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"(\w)(\d)?(\w)").unwrap(); - /// let caps = re.captures("AZ").unwrap(); - /// assert_eq!(caps.len(), 4); - /// ``` - #[inline] - pub fn len(&self) -> usize { - self.caps.group_len() - } -} - -impl<'h> core::fmt::Debug for Captures<'h> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - /// A little helper type to provide a nice map-like debug - /// representation for our capturing group spans. - /// - /// regex-automata has something similar, but it includes the pattern - /// ID in its debug output, which is confusing. It also doesn't include - /// that strings that match because a regex-automata `Captures` doesn't - /// borrow the haystack. - struct CapturesDebugMap<'a> { - caps: &'a Captures<'a>, - } - - impl<'a> core::fmt::Debug for CapturesDebugMap<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let mut map = f.debug_map(); - let names = - self.caps.caps.group_info().pattern_names(PatternID::ZERO); - for (group_index, maybe_name) in names.enumerate() { - let key = Key(group_index, maybe_name); - match self.caps.get(group_index) { - None => map.entry(&key, &None::<()>), - Some(mat) => map.entry(&key, &Value(mat)), - }; - } - map.finish() - } - } - - struct Key<'a>(usize, Option<&'a str>); - - impl<'a> core::fmt::Debug for Key<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "{}", self.0)?; - if let Some(name) = self.1 { - write!(f, "/{name:?}")?; - } - Ok(()) - } - } - - struct Value<'a>(Match<'a>); - - impl<'a> core::fmt::Debug for Value<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!( - f, - "{}..{}/{:?}", - self.0.start(), - self.0.end(), - self.0.as_str() - ) - } - } - - f.debug_tuple("Captures") - .field(&CapturesDebugMap { caps: self }) - .finish() - } -} - -/// Get a matching capture group's haystack substring by index. -/// -/// The haystack substring returned can't outlive the `Captures` object if this -/// method is used, because of how `Index` is defined (normally `a[i]` is part -/// of `a` and can't outlive it). To work around this limitation, do that, use -/// [`Captures::get`] instead. -/// -/// `'h` is the lifetime of the matched haystack, but the lifetime of the -/// `&str` returned by this implementation is the lifetime of the `Captures` -/// value itself. -/// -/// # Panics -/// -/// If there is no matching group at the given index. -impl<'h> core::ops::Index<usize> for Captures<'h> { - type Output = str; - - // The lifetime is written out to make it clear that the &str returned - // does NOT have a lifetime equivalent to 'h. - fn index<'a>(&'a self, i: usize) -> &'a str { - self.get(i) - .map(|m| m.as_str()) - .unwrap_or_else(|| panic!("no group at index '{i}'")) - } -} - -/// Get a matching capture group's haystack substring by name. -/// -/// The haystack substring returned can't outlive the `Captures` object if this -/// method is used, because of how `Index` is defined (normally `a[i]` is part -/// of `a` and can't outlive it). To work around this limitation, do that, use -/// [`Captures::name`] instead. -/// -/// `'h` is the lifetime of the matched haystack, but the lifetime of the -/// `&str` returned by this implementation is the lifetime of the `Captures` -/// value itself. -/// -/// `'n` is the lifetime of the group name used to index the `Captures` value. -/// -/// # Panics -/// -/// If there is no matching group at the given name. -impl<'h, 'n> core::ops::Index<&'n str> for Captures<'h> { - type Output = str; - - fn index<'a>(&'a self, name: &'n str) -> &'a str { - self.name(name) - .map(|m| m.as_str()) - .unwrap_or_else(|| panic!("no group named '{name}'")) - } -} - -/// A low level representation of the byte offsets of each capture group. -/// -/// You can think of this as a lower level [`Captures`], where this type does -/// not support named capturing groups directly and it does not borrow the -/// haystack that these offsets were matched on. -/// -/// Primarily, this type is useful when using the lower level `Regex` APIs such -/// as [`Regex::captures_read`], which permits amortizing the allocation in -/// which capture match offsets are stored. -/// -/// In order to build a value of this type, you'll need to call the -/// [`Regex::capture_locations`] method. The value returned can then be reused -/// in subsequent searches for that regex. Using it for other regexes may -/// result in a panic or otherwise incorrect results. -/// -/// # Example -/// -/// This example shows how to create and use `CaptureLocations` in a search. -/// -/// ``` -/// use regex::Regex; -/// -/// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap(); -/// let mut locs = re.capture_locations(); -/// let m = re.captures_read(&mut locs, "Bruce Springsteen").unwrap(); -/// assert_eq!(0..17, m.range()); -/// assert_eq!(Some((0, 17)), locs.get(0)); -/// assert_eq!(Some((0, 5)), locs.get(1)); -/// assert_eq!(Some((6, 17)), locs.get(2)); -/// -/// // Asking for an invalid capture group always returns None. -/// assert_eq!(None, locs.get(3)); -/// # // literals are too big for 32-bit usize: #1041 -/// # #[cfg(target_pointer_width = "64")] -/// assert_eq!(None, locs.get(34973498648)); -/// # #[cfg(target_pointer_width = "64")] -/// assert_eq!(None, locs.get(9944060567225171988)); -/// ``` -#[derive(Clone, Debug)] -pub struct CaptureLocations(captures::Captures); - -/// A type alias for `CaptureLocations` for backwards compatibility. -/// -/// Previously, we exported `CaptureLocations` as `Locations` in an -/// undocumented API. To prevent breaking that code (e.g., in `regex-capi`), -/// we continue re-exporting the same undocumented API. -#[doc(hidden)] -pub type Locations = CaptureLocations; - -impl CaptureLocations { - /// Returns the start and end byte offsets of the capture group at index - /// `i`. This returns `None` if `i` is not a valid capture group or if the - /// capture group did not match. - /// - /// # Example - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap(); - /// let mut locs = re.capture_locations(); - /// re.captures_read(&mut locs, "Bruce Springsteen").unwrap(); - /// assert_eq!(Some((0, 17)), locs.get(0)); - /// assert_eq!(Some((0, 5)), locs.get(1)); - /// assert_eq!(Some((6, 17)), locs.get(2)); - /// ``` - #[inline] - pub fn get(&self, i: usize) -> Option<(usize, usize)> { - self.0.get_group(i).map(|sp| (sp.start, sp.end)) - } - - /// Returns the total number of capture groups (even if they didn't match). - /// That is, the length returned is unaffected by the result of a search. - /// - /// This is always at least `1` since every regex has at least `1` - /// capturing group that corresponds to the entire match. - /// - /// # Example - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"(?<first>\w+)\s+(?<last>\w+)").unwrap(); - /// let mut locs = re.capture_locations(); - /// assert_eq!(3, locs.len()); - /// re.captures_read(&mut locs, "Bruce Springsteen").unwrap(); - /// assert_eq!(3, locs.len()); - /// ``` - /// - /// Notice that the length is always at least `1`, regardless of the regex: - /// - /// ``` - /// use regex::Regex; - /// - /// let re = Regex::new(r"").unwrap(); - /// let locs = re.capture_locations(); - /// assert_eq!(1, locs.len()); - /// - /// // [a&&b] is a regex that never matches anything. - /// let re = Regex::new(r"[a&&b]").unwrap(); - /// let locs = re.capture_locations(); - /// assert_eq!(1, locs.len()); - /// ``` - #[inline] - pub fn len(&self) -> usize { - // self.0.group_len() returns 0 if the underlying captures doesn't - // represent a match, but the behavior guaranteed for this method is - // that the length doesn't change based on a match or not. - self.0.group_info().group_len(PatternID::ZERO) - } - - /// An alias for the `get` method for backwards compatibility. - /// - /// Previously, we exported `get` as `pos` in an undocumented API. To - /// prevent breaking that code (e.g., in `regex-capi`), we continue - /// re-exporting the same undocumented API. - #[doc(hidden)] - #[inline] - pub fn pos(&self, i: usize) -> Option<(usize, usize)> { - self.get(i) - } -} - -/// An iterator over all non-overlapping matches in a haystack. -/// -/// This iterator yields [`Match`] values. The iterator stops when no more -/// matches can be found. -/// -/// `'r` is the lifetime of the compiled regular expression and `'h` is the -/// lifetime of the haystack. -/// -/// This iterator is created by [`Regex::find_iter`]. -/// -/// # Time complexity -/// -/// Note that since an iterator runs potentially many searches on the haystack -/// and since each search has worst case `O(m * n)` time complexity, the -/// overall worst case time complexity for iteration is `O(m * n^2)`. -#[derive(Debug)] -pub struct Matches<'r, 'h> { - haystack: &'h str, - it: meta::FindMatches<'r, 'h>, -} - -impl<'r, 'h> Iterator for Matches<'r, 'h> { - type Item = Match<'h>; - - #[inline] - fn next(&mut self) -> Option<Match<'h>> { - self.it - .next() - .map(|sp| Match::new(self.haystack, sp.start(), sp.end())) - } - - #[inline] - fn count(self) -> usize { - // This can actually be up to 2x faster than calling `next()` until - // completion, because counting matches when using a DFA only requires - // finding the end of each match. But returning a `Match` via `next()` - // requires the start of each match which, with a DFA, requires a - // reverse forward scan to find it. - self.it.count() - } -} - -impl<'r, 'h> core::iter::FusedIterator for Matches<'r, 'h> {} - -/// An iterator over all non-overlapping capture matches in a haystack. -/// -/// This iterator yields [`Captures`] values. The iterator stops when no more -/// matches can be found. -/// -/// `'r` is the lifetime of the compiled regular expression and `'h` is the -/// lifetime of the matched string. -/// -/// This iterator is created by [`Regex::captures_iter`]. -/// -/// # Time complexity -/// -/// Note that since an iterator runs potentially many searches on the haystack -/// and since each search has worst case `O(m * n)` time complexity, the -/// overall worst case time complexity for iteration is `O(m * n^2)`. -#[derive(Debug)] -pub struct CaptureMatches<'r, 'h> { - haystack: &'h str, - it: meta::CapturesMatches<'r, 'h>, -} - -impl<'r, 'h> Iterator for CaptureMatches<'r, 'h> { - type Item = Captures<'h>; - - #[inline] - fn next(&mut self) -> Option<Captures<'h>> { - let static_captures_len = self.it.regex().static_captures_len(); - self.it.next().map(|caps| Captures { - haystack: self.haystack, - caps, - static_captures_len, - }) - } - - #[inline] - fn count(self) -> usize { - // This can actually be up to 2x faster than calling `next()` until - // completion, because counting matches when using a DFA only requires - // finding the end of each match. But returning a `Match` via `next()` - // requires the start of each match which, with a DFA, requires a - // reverse forward scan to find it. - self.it.count() - } -} - -impl<'r, 'h> core::iter::FusedIterator for CaptureMatches<'r, 'h> {} - -/// An iterator over all substrings delimited by a regex match. -/// -/// `'r` is the lifetime of the compiled regular expression and `'h` is the -/// lifetime of the byte string being split. -/// -/// This iterator is created by [`Regex::split`]. -/// -/// # Time complexity -/// -/// Note that since an iterator runs potentially many searches on the haystack -/// and since each search has worst case `O(m * n)` time complexity, the -/// overall worst case time complexity for iteration is `O(m * n^2)`. -#[derive(Debug)] -pub struct Split<'r, 'h> { - haystack: &'h str, - it: meta::Split<'r, 'h>, -} - -impl<'r, 'h> Iterator for Split<'r, 'h> { - type Item = &'h str; - - #[inline] - fn next(&mut self) -> Option<&'h str> { - self.it.next().map(|span| &self.haystack[span]) - } -} - -impl<'r, 'h> core::iter::FusedIterator for Split<'r, 'h> {} - -/// An iterator over at most `N` substrings delimited by a regex match. -/// -/// The last substring yielded by this iterator will be whatever remains after -/// `N-1` splits. -/// -/// `'r` is the lifetime of the compiled regular expression and `'h` is the -/// lifetime of the byte string being split. -/// -/// This iterator is created by [`Regex::splitn`]. -/// -/// # Time complexity -/// -/// Note that since an iterator runs potentially many searches on the haystack -/// and since each search has worst case `O(m * n)` time complexity, the -/// overall worst case time complexity for iteration is `O(m * n^2)`. -/// -/// Although note that the worst case time here has an upper bound given -/// by the `limit` parameter to [`Regex::splitn`]. -#[derive(Debug)] -pub struct SplitN<'r, 'h> { - haystack: &'h str, - it: meta::SplitN<'r, 'h>, -} - -impl<'r, 'h> Iterator for SplitN<'r, 'h> { - type Item = &'h str; - - #[inline] - fn next(&mut self) -> Option<&'h str> { - self.it.next().map(|span| &self.haystack[span]) - } - - #[inline] - fn size_hint(&self) -> (usize, Option<usize>) { - self.it.size_hint() - } -} - -impl<'r, 'h> core::iter::FusedIterator for SplitN<'r, 'h> {} - -/// An iterator over the names of all capture groups in a regex. -/// -/// This iterator yields values of type `Option<&str>` in order of the opening -/// capture group parenthesis in the regex pattern. `None` is yielded for -/// groups with no name. The first element always corresponds to the implicit -/// and unnamed group for the overall match. -/// -/// `'r` is the lifetime of the compiled regular expression. -/// -/// This iterator is created by [`Regex::capture_names`]. -#[derive(Clone, Debug)] -pub struct CaptureNames<'r>(captures::GroupInfoPatternNames<'r>); - -impl<'r> Iterator for CaptureNames<'r> { - type Item = Option<&'r str>; - - #[inline] - fn next(&mut self) -> Option<Option<&'r str>> { - self.0.next() - } - - #[inline] - fn size_hint(&self) -> (usize, Option<usize>) { - self.0.size_hint() - } - - #[inline] - fn count(self) -> usize { - self.0.count() - } -} - -impl<'r> ExactSizeIterator for CaptureNames<'r> {} - -impl<'r> core::iter::FusedIterator for CaptureNames<'r> {} - -/// An iterator over all group matches in a [`Captures`] value. -/// -/// This iterator yields values of type `Option<Match<'h>>`, where `'h` is the -/// lifetime of the haystack that the matches are for. The order of elements -/// yielded corresponds to the order of the opening parenthesis for the group -/// in the regex pattern. `None` is yielded for groups that did not participate -/// in the match. -/// -/// The first element always corresponds to the implicit group for the overall -/// match. Since this iterator is created by a [`Captures`] value, and a -/// `Captures` value is only created when a match occurs, it follows that the -/// first element yielded by this iterator is guaranteed to be non-`None`. -/// -/// The lifetime `'c` corresponds to the lifetime of the `Captures` value that -/// created this iterator, and the lifetime `'h` corresponds to the originally -/// matched haystack. -#[derive(Clone, Debug)] -pub struct SubCaptureMatches<'c, 'h> { - haystack: &'h str, - it: captures::CapturesPatternIter<'c>, -} - -impl<'c, 'h> Iterator for SubCaptureMatches<'c, 'h> { - type Item = Option<Match<'h>>; - - #[inline] - fn next(&mut self) -> Option<Option<Match<'h>>> { - self.it.next().map(|group| { - group.map(|sp| Match::new(self.haystack, sp.start, sp.end)) - }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option<usize>) { - self.it.size_hint() - } - - #[inline] - fn count(self) -> usize { - self.it.count() - } -} - -impl<'c, 'h> ExactSizeIterator for SubCaptureMatches<'c, 'h> {} - -impl<'c, 'h> core::iter::FusedIterator for SubCaptureMatches<'c, 'h> {} - -/// A trait for types that can be used to replace matches in a haystack. -/// -/// In general, users of this crate shouldn't need to implement this trait, -/// since implementations are already provided for `&str` along with other -/// variants of string types, as well as `FnMut(&Captures) -> String` (or any -/// `FnMut(&Captures) -> T` where `T: AsRef<str>`). Those cover most use cases, -/// but callers can implement this trait directly if necessary. -/// -/// # Example -/// -/// This example shows a basic implementation of the `Replacer` trait. This -/// can be done much more simply using the replacement string interpolation -/// support (e.g., `$first $last`), but this approach avoids needing to parse -/// the replacement string at all. -/// -/// ``` -/// use regex::{Captures, Regex, Replacer}; -/// -/// struct NameSwapper; -/// -/// impl Replacer for NameSwapper { -/// fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { -/// dst.push_str(&caps["first"]); -/// dst.push_str(" "); -/// dst.push_str(&caps["last"]); -/// } -/// } -/// -/// let re = Regex::new(r"(?<last>[^,\s]+),\s+(?<first>\S+)").unwrap(); -/// let result = re.replace("Springsteen, Bruce", NameSwapper); -/// assert_eq!(result, "Bruce Springsteen"); -/// ``` -pub trait Replacer { - /// Appends possibly empty data to `dst` to replace the current match. - /// - /// The current match is represented by `caps`, which is guaranteed to - /// have a match at capture group `0`. - /// - /// For example, a no-op replacement would be `dst.push_str(&caps[0])`. - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String); - - /// Return a fixed unchanging replacement string. - /// - /// When doing replacements, if access to [`Captures`] is not needed (e.g., - /// the replacement string does not need `$` expansion), then it can be - /// beneficial to avoid finding sub-captures. - /// - /// In general, this is called once for every call to a replacement routine - /// such as [`Regex::replace_all`]. - fn no_expansion<'r>(&'r mut self) -> Option<Cow<'r, str>> { - None - } - - /// Returns a type that implements `Replacer`, but that borrows and wraps - /// this `Replacer`. - /// - /// This is useful when you want to take a generic `Replacer` (which might - /// not be cloneable) and use it without consuming it, so it can be used - /// more than once. - /// - /// # Example - /// - /// ``` - /// use regex::{Regex, Replacer}; - /// - /// fn replace_all_twice<R: Replacer>( - /// re: Regex, - /// src: &str, - /// mut rep: R, - /// ) -> String { - /// let dst = re.replace_all(src, rep.by_ref()); - /// let dst = re.replace_all(&dst, rep.by_ref()); - /// dst.into_owned() - /// } - /// ``` - fn by_ref<'r>(&'r mut self) -> ReplacerRef<'r, Self> { - ReplacerRef(self) - } -} - -impl<'a> Replacer for &'a str { - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { - caps.expand(*self, dst); - } - - fn no_expansion(&mut self) -> Option<Cow<'_, str>> { - no_expansion(self) - } -} - -impl<'a> Replacer for &'a String { - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { - self.as_str().replace_append(caps, dst) - } - - fn no_expansion(&mut self) -> Option<Cow<'_, str>> { - no_expansion(self) - } -} - -impl Replacer for String { - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { - self.as_str().replace_append(caps, dst) - } - - fn no_expansion(&mut self) -> Option<Cow<'_, str>> { - no_expansion(self) - } -} - -impl<'a> Replacer for Cow<'a, str> { - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { - self.as_ref().replace_append(caps, dst) - } - - fn no_expansion(&mut self) -> Option<Cow<'_, str>> { - no_expansion(self) - } -} - -impl<'a> Replacer for &'a Cow<'a, str> { - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { - self.as_ref().replace_append(caps, dst) - } - - fn no_expansion(&mut self) -> Option<Cow<'_, str>> { - no_expansion(self) - } -} - -impl<F, T> Replacer for F -where - F: FnMut(&Captures<'_>) -> T, - T: AsRef<str>, -{ - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { - dst.push_str((*self)(caps).as_ref()); - } -} - -/// A by-reference adaptor for a [`Replacer`]. -/// -/// This permits reusing the same `Replacer` value in multiple calls to a -/// replacement routine like [`Regex::replace_all`]. -/// -/// This type is created by [`Replacer::by_ref`]. -#[derive(Debug)] -pub struct ReplacerRef<'a, R: ?Sized>(&'a mut R); - -impl<'a, R: Replacer + ?Sized + 'a> Replacer for ReplacerRef<'a, R> { - fn replace_append(&mut self, caps: &Captures<'_>, dst: &mut String) { - self.0.replace_append(caps, dst) - } - - fn no_expansion(&mut self) -> Option<Cow<'_, str>> { - self.0.no_expansion() - } -} - -/// A helper type for forcing literal string replacement. -/// -/// It can be used with routines like [`Regex::replace`] and -/// [`Regex::replace_all`] to do a literal string replacement without expanding -/// `$name` to their corresponding capture groups. This can be both convenient -/// (to avoid escaping `$`, for example) and faster (since capture groups -/// don't need to be found). -/// -/// `'s` is the lifetime of the literal string to use. -/// -/// # Example -/// -/// ``` -/// use regex::{NoExpand, Regex}; -/// -/// let re = Regex::new(r"(?<last>[^,\s]+),\s+(\S+)").unwrap(); -/// let result = re.replace("Springsteen, Bruce", NoExpand("$2 $last")); -/// assert_eq!(result, "$2 $last"); -/// ``` -#[derive(Clone, Debug)] -pub struct NoExpand<'s>(pub &'s str); - -impl<'s> Replacer for NoExpand<'s> { - fn replace_append(&mut self, _: &Captures<'_>, dst: &mut String) { - dst.push_str(self.0); - } - - fn no_expansion(&mut self) -> Option<Cow<'_, str>> { - Some(Cow::Borrowed(self.0)) - } -} - -/// Quickly checks the given replacement string for whether interpolation -/// should be done on it. It returns `None` if a `$` was found anywhere in the -/// given string, which suggests interpolation needs to be done. But if there's -/// no `$` anywhere, then interpolation definitely does not need to be done. In -/// that case, the given string is returned as a borrowed `Cow`. -/// -/// This is meant to be used to implement the [`Replacer::no_expansion`] method -/// in its various trait impls. -fn no_expansion<T: AsRef<str>>(replacement: &T) -> Option<Cow<'_, str>> { - let replacement = replacement.as_ref(); - match crate::find_byte::find_byte(b'$', replacement.as_bytes()) { - Some(_) => None, - None => Some(Cow::Borrowed(replacement)), - } -} diff --git a/vendor/regex/src/regexset/bytes.rs b/vendor/regex/src/regexset/bytes.rs deleted file mode 100644 index 76174afffcd41a..00000000000000 --- a/vendor/regex/src/regexset/bytes.rs +++ /dev/null @@ -1,728 +0,0 @@ -use alloc::string::String; - -use regex_automata::{meta, Input, PatternID, PatternSet, PatternSetIter}; - -use crate::{bytes::RegexSetBuilder, Error}; - -/// Match multiple, possibly overlapping, regexes in a single search. -/// -/// A regex set corresponds to the union of zero or more regular expressions. -/// That is, a regex set will match a haystack when at least one of its -/// constituent regexes matches. A regex set as its formulated here provides a -/// touch more power: it will also report *which* regular expressions in the -/// set match. Indeed, this is the key difference between regex sets and a -/// single `Regex` with many alternates, since only one alternate can match at -/// a time. -/// -/// For example, consider regular expressions to match email addresses and -/// domains: `[a-z]+@[a-z]+\.(com|org|net)` and `[a-z]+\.(com|org|net)`. If a -/// regex set is constructed from those regexes, then searching the haystack -/// `foo@example.com` will report both regexes as matching. Of course, one -/// could accomplish this by compiling each regex on its own and doing two -/// searches over the haystack. The key advantage of using a regex set is -/// that it will report the matching regexes using a *single pass through the -/// haystack*. If one has hundreds or thousands of regexes to match repeatedly -/// (like a URL router for a complex web application or a user agent matcher), -/// then a regex set *can* realize huge performance gains. -/// -/// Unlike the top-level [`RegexSet`](crate::RegexSet), this `RegexSet` -/// searches haystacks with type `&[u8]` instead of `&str`. Consequently, this -/// `RegexSet` is permitted to match invalid UTF-8. -/// -/// # Limitations -/// -/// Regex sets are limited to answering the following two questions: -/// -/// 1. Does any regex in the set match? -/// 2. If so, which regexes in the set match? -/// -/// As with the main [`Regex`][crate::bytes::Regex] type, it is cheaper to ask -/// (1) instead of (2) since the matching engines can stop after the first -/// match is found. -/// -/// You cannot directly extract [`Match`][crate::bytes::Match] or -/// [`Captures`][crate::bytes::Captures] objects from a regex set. If you need -/// these operations, the recommended approach is to compile each pattern in -/// the set independently and scan the exact same haystack a second time with -/// those independently compiled patterns: -/// -/// ``` -/// use regex::bytes::{Regex, RegexSet}; -/// -/// let patterns = ["foo", "bar"]; -/// // Both patterns will match different ranges of this string. -/// let hay = b"barfoo"; -/// -/// // Compile a set matching any of our patterns. -/// let set = RegexSet::new(patterns).unwrap(); -/// // Compile each pattern independently. -/// let regexes: Vec<_> = set -/// .patterns() -/// .iter() -/// .map(|pat| Regex::new(pat).unwrap()) -/// .collect(); -/// -/// // Match against the whole set first and identify the individual -/// // matching patterns. -/// let matches: Vec<&[u8]> = set -/// .matches(hay) -/// .into_iter() -/// // Dereference the match index to get the corresponding -/// // compiled pattern. -/// .map(|index| ®exes[index]) -/// // To get match locations or any other info, we then have to search the -/// // exact same haystack again, using our separately-compiled pattern. -/// .map(|re| re.find(hay).unwrap().as_bytes()) -/// .collect(); -/// -/// // Matches arrive in the order the constituent patterns were declared, -/// // not the order they appear in the haystack. -/// assert_eq!(vec![&b"foo"[..], &b"bar"[..]], matches); -/// ``` -/// -/// # Performance -/// -/// A `RegexSet` has the same performance characteristics as `Regex`. Namely, -/// search takes `O(m * n)` time, where `m` is proportional to the size of the -/// regex set and `n` is proportional to the length of the haystack. -/// -/// # Trait implementations -/// -/// The `Default` trait is implemented for `RegexSet`. The default value -/// is an empty set. An empty set can also be explicitly constructed via -/// [`RegexSet::empty`]. -/// -/// # Example -/// -/// This shows how the above two regexes (for matching email addresses and -/// domains) might work: -/// -/// ``` -/// use regex::bytes::RegexSet; -/// -/// let set = RegexSet::new(&[ -/// r"[a-z]+@[a-z]+\.(com|org|net)", -/// r"[a-z]+\.(com|org|net)", -/// ]).unwrap(); -/// -/// // Ask whether any regexes in the set match. -/// assert!(set.is_match(b"foo@example.com")); -/// -/// // Identify which regexes in the set match. -/// let matches: Vec<_> = set.matches(b"foo@example.com").into_iter().collect(); -/// assert_eq!(vec![0, 1], matches); -/// -/// // Try again, but with a haystack that only matches one of the regexes. -/// let matches: Vec<_> = set.matches(b"example.com").into_iter().collect(); -/// assert_eq!(vec![1], matches); -/// -/// // Try again, but with a haystack that doesn't match any regex in the set. -/// let matches: Vec<_> = set.matches(b"example").into_iter().collect(); -/// assert!(matches.is_empty()); -/// ``` -/// -/// Note that it would be possible to adapt the above example to using `Regex` -/// with an expression like: -/// -/// ```text -/// (?P<email>[a-z]+@(?P<email_domain>[a-z]+[.](com|org|net)))|(?P<domain>[a-z]+[.](com|org|net)) -/// ``` -/// -/// After a match, one could then inspect the capture groups to figure out -/// which alternates matched. The problem is that it is hard to make this -/// approach scale when there are many regexes since the overlap between each -/// alternate isn't always obvious to reason about. -#[derive(Clone)] -pub struct RegexSet { - pub(crate) meta: meta::Regex, - pub(crate) patterns: alloc::sync::Arc<[String]>, -} - -impl RegexSet { - /// Create a new regex set with the given regular expressions. - /// - /// This takes an iterator of `S`, where `S` is something that can produce - /// a `&str`. If any of the strings in the iterator are not valid regular - /// expressions, then an error is returned. - /// - /// # Example - /// - /// Create a new regex set from an iterator of strings: - /// - /// ``` - /// use regex::bytes::RegexSet; - /// - /// let set = RegexSet::new([r"\w+", r"\d+"]).unwrap(); - /// assert!(set.is_match(b"foo")); - /// ``` - pub fn new<I, S>(exprs: I) -> Result<RegexSet, Error> - where - S: AsRef<str>, - I: IntoIterator<Item = S>, - { - RegexSetBuilder::new(exprs).build() - } - - /// Create a new empty regex set. - /// - /// An empty regex never matches anything. - /// - /// This is a convenience function for `RegexSet::new([])`, but doesn't - /// require one to specify the type of the input. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSet; - /// - /// let set = RegexSet::empty(); - /// assert!(set.is_empty()); - /// // an empty set matches nothing - /// assert!(!set.is_match(b"")); - /// ``` - pub fn empty() -> RegexSet { - let empty: [&str; 0] = []; - RegexSetBuilder::new(empty).build().unwrap() - } - - /// Returns true if and only if one of the regexes in this set matches - /// the haystack given. - /// - /// This method should be preferred if you only need to test whether any - /// of the regexes in the set should match, but don't care about *which* - /// regexes matched. This is because the underlying matching engine will - /// quit immediately after seeing the first match instead of continuing to - /// find all matches. - /// - /// Note that as with searches using [`Regex`](crate::bytes::Regex), the - /// expression is unanchored by default. That is, if the regex does not - /// start with `^` or `\A`, or end with `$` or `\z`, then it is permitted - /// to match anywhere in the haystack. - /// - /// # Example - /// - /// Tests whether a set matches somewhere in a haystack: - /// - /// ``` - /// use regex::bytes::RegexSet; - /// - /// let set = RegexSet::new([r"\w+", r"\d+"]).unwrap(); - /// assert!(set.is_match(b"foo")); - /// assert!(!set.is_match("☃".as_bytes())); - /// ``` - #[inline] - pub fn is_match(&self, haystack: &[u8]) -> bool { - self.is_match_at(haystack, 0) - } - - /// Returns true if and only if one of the regexes in this set matches the - /// haystack given, with the search starting at the offset given. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only - /// match when `start == 0`. - /// - /// # Panics - /// - /// This panics when `start >= haystack.len() + 1`. - /// - /// # Example - /// - /// This example shows the significance of `start`. Namely, consider a - /// haystack `foobar` and a desire to execute a search starting at offset - /// `3`. You could search a substring explicitly, but then the look-around - /// assertions won't work correctly. Instead, you can use this method to - /// specify the start position of a search. - /// - /// ``` - /// use regex::bytes::RegexSet; - /// - /// let set = RegexSet::new([r"\bbar\b", r"(?m)^bar$"]).unwrap(); - /// let hay = b"foobar"; - /// // We get a match here, but it's probably not intended. - /// assert!(set.is_match(&hay[3..])); - /// // No match because the assertions take the context into account. - /// assert!(!set.is_match_at(hay, 3)); - /// ``` - #[inline] - pub fn is_match_at(&self, haystack: &[u8], start: usize) -> bool { - self.meta.is_match(Input::new(haystack).span(start..haystack.len())) - } - - /// Returns the set of regexes that match in the given haystack. - /// - /// The set returned contains the index of each regex that matches in - /// the given haystack. The index is in correspondence with the order of - /// regular expressions given to `RegexSet`'s constructor. - /// - /// The set can also be used to iterate over the matched indices. The order - /// of iteration is always ascending with respect to the matching indices. - /// - /// Note that as with searches using [`Regex`](crate::bytes::Regex), the - /// expression is unanchored by default. That is, if the regex does not - /// start with `^` or `\A`, or end with `$` or `\z`, then it is permitted - /// to match anywhere in the haystack. - /// - /// # Example - /// - /// Tests which regular expressions match the given haystack: - /// - /// ``` - /// use regex::bytes::RegexSet; - /// - /// let set = RegexSet::new([ - /// r"\w+", - /// r"\d+", - /// r"\pL+", - /// r"foo", - /// r"bar", - /// r"barfoo", - /// r"foobar", - /// ]).unwrap(); - /// let matches: Vec<_> = set.matches(b"foobar").into_iter().collect(); - /// assert_eq!(matches, vec![0, 2, 3, 4, 6]); - /// - /// // You can also test whether a particular regex matched: - /// let matches = set.matches(b"foobar"); - /// assert!(!matches.matched(5)); - /// assert!(matches.matched(6)); - /// ``` - #[inline] - pub fn matches(&self, haystack: &[u8]) -> SetMatches { - self.matches_at(haystack, 0) - } - - /// Returns the set of regexes that match in the given haystack. - /// - /// The set returned contains the index of each regex that matches in - /// the given haystack. The index is in correspondence with the order of - /// regular expressions given to `RegexSet`'s constructor. - /// - /// The set can also be used to iterate over the matched indices. The order - /// of iteration is always ascending with respect to the matching indices. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only - /// match when `start == 0`. - /// - /// # Panics - /// - /// This panics when `start >= haystack.len() + 1`. - /// - /// # Example - /// - /// Tests which regular expressions match the given haystack: - /// - /// ``` - /// use regex::bytes::RegexSet; - /// - /// let set = RegexSet::new([r"\bbar\b", r"(?m)^bar$"]).unwrap(); - /// let hay = b"foobar"; - /// // We get matches here, but it's probably not intended. - /// let matches: Vec<_> = set.matches(&hay[3..]).into_iter().collect(); - /// assert_eq!(matches, vec![0, 1]); - /// // No matches because the assertions take the context into account. - /// let matches: Vec<_> = set.matches_at(hay, 3).into_iter().collect(); - /// assert_eq!(matches, vec![]); - /// ``` - #[inline] - pub fn matches_at(&self, haystack: &[u8], start: usize) -> SetMatches { - let input = Input::new(haystack).span(start..haystack.len()); - let mut patset = PatternSet::new(self.meta.pattern_len()); - self.meta.which_overlapping_matches(&input, &mut patset); - SetMatches(patset) - } - - /// Returns the same as matches, but starts the search at the given - /// offset and stores the matches into the slice given. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only - /// match when `start == 0`. - /// - /// `matches` must have a length that is at least the number of regexes - /// in this set. - /// - /// This method returns true if and only if at least one member of - /// `matches` is true after executing the set against `haystack`. - #[doc(hidden)] - #[inline] - pub fn matches_read_at( - &self, - matches: &mut [bool], - haystack: &[u8], - start: usize, - ) -> bool { - // This is pretty dumb. We should try to fix this, but the - // regex-automata API doesn't provide a way to store matches in an - // arbitrary &mut [bool]. Thankfully, this API is doc(hidden) and - // thus not public... But regex-capi currently uses it. We should - // fix regex-capi to use a PatternSet, maybe? Not sure... PatternSet - // is in regex-automata, not regex. So maybe we should just accept a - // 'SetMatches', which is basically just a newtype around PatternSet. - let mut patset = PatternSet::new(self.meta.pattern_len()); - let mut input = Input::new(haystack); - input.set_start(start); - self.meta.which_overlapping_matches(&input, &mut patset); - for pid in patset.iter() { - matches[pid] = true; - } - !patset.is_empty() - } - - /// An alias for `matches_read_at` to preserve backward compatibility. - /// - /// The `regex-capi` crate used this method, so to avoid breaking that - /// crate, we continue to export it as an undocumented API. - #[doc(hidden)] - #[inline] - pub fn read_matches_at( - &self, - matches: &mut [bool], - haystack: &[u8], - start: usize, - ) -> bool { - self.matches_read_at(matches, haystack, start) - } - - /// Returns the total number of regexes in this set. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSet; - /// - /// assert_eq!(0, RegexSet::empty().len()); - /// assert_eq!(1, RegexSet::new([r"[0-9]"]).unwrap().len()); - /// assert_eq!(2, RegexSet::new([r"[0-9]", r"[a-z]"]).unwrap().len()); - /// ``` - #[inline] - pub fn len(&self) -> usize { - self.meta.pattern_len() - } - - /// Returns `true` if this set contains no regexes. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSet; - /// - /// assert!(RegexSet::empty().is_empty()); - /// assert!(!RegexSet::new([r"[0-9]"]).unwrap().is_empty()); - /// ``` - #[inline] - pub fn is_empty(&self) -> bool { - self.meta.pattern_len() == 0 - } - - /// Returns the regex patterns that this regex set was constructed from. - /// - /// This function can be used to determine the pattern for a match. The - /// slice returned has exactly as many patterns givens to this regex set, - /// and the order of the slice is the same as the order of the patterns - /// provided to the set. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSet; - /// - /// let set = RegexSet::new(&[ - /// r"\w+", - /// r"\d+", - /// r"\pL+", - /// r"foo", - /// r"bar", - /// r"barfoo", - /// r"foobar", - /// ]).unwrap(); - /// let matches: Vec<_> = set - /// .matches(b"foobar") - /// .into_iter() - /// .map(|index| &set.patterns()[index]) - /// .collect(); - /// assert_eq!(matches, vec![r"\w+", r"\pL+", r"foo", r"bar", r"foobar"]); - /// ``` - #[inline] - pub fn patterns(&self) -> &[String] { - &self.patterns - } -} - -impl Default for RegexSet { - fn default() -> Self { - RegexSet::empty() - } -} - -/// A set of matches returned by a regex set. -/// -/// Values of this type are constructed by [`RegexSet::matches`]. -#[derive(Clone, Debug)] -pub struct SetMatches(PatternSet); - -impl SetMatches { - /// Whether this set contains any matches. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSet; - /// - /// let set = RegexSet::new(&[ - /// r"[a-z]+@[a-z]+\.(com|org|net)", - /// r"[a-z]+\.(com|org|net)", - /// ]).unwrap(); - /// let matches = set.matches(b"foo@example.com"); - /// assert!(matches.matched_any()); - /// ``` - #[inline] - pub fn matched_any(&self) -> bool { - !self.0.is_empty() - } - - /// Whether all patterns in this set matched. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSet; - /// - /// let set = RegexSet::new(&[ - /// r"^foo", - /// r"[a-z]+\.com", - /// ]).unwrap(); - /// let matches = set.matches(b"foo.example.com"); - /// assert!(matches.matched_all()); - /// ``` - pub fn matched_all(&self) -> bool { - self.0.is_full() - } - - /// Whether the regex at the given index matched. - /// - /// The index for a regex is determined by its insertion order upon the - /// initial construction of a `RegexSet`, starting at `0`. - /// - /// # Panics - /// - /// If `index` is greater than or equal to the number of regexes in the - /// original set that produced these matches. Equivalently, when `index` - /// is greater than or equal to [`SetMatches::len`]. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSet; - /// - /// let set = RegexSet::new([ - /// r"[a-z]+@[a-z]+\.(com|org|net)", - /// r"[a-z]+\.(com|org|net)", - /// ]).unwrap(); - /// let matches = set.matches(b"example.com"); - /// assert!(!matches.matched(0)); - /// assert!(matches.matched(1)); - /// ``` - #[inline] - pub fn matched(&self, index: usize) -> bool { - self.0.contains(PatternID::new_unchecked(index)) - } - - /// The total number of regexes in the set that created these matches. - /// - /// **WARNING:** This always returns the same value as [`RegexSet::len`]. - /// In particular, it does *not* return the number of elements yielded by - /// [`SetMatches::iter`]. The only way to determine the total number of - /// matched regexes is to iterate over them. - /// - /// # Example - /// - /// Notice that this method returns the total number of regexes in the - /// original set, and *not* the total number of regexes that matched. - /// - /// ``` - /// use regex::bytes::RegexSet; - /// - /// let set = RegexSet::new([ - /// r"[a-z]+@[a-z]+\.(com|org|net)", - /// r"[a-z]+\.(com|org|net)", - /// ]).unwrap(); - /// let matches = set.matches(b"example.com"); - /// // Total number of patterns that matched. - /// assert_eq!(1, matches.iter().count()); - /// // Total number of patterns in the set. - /// assert_eq!(2, matches.len()); - /// ``` - #[inline] - pub fn len(&self) -> usize { - self.0.capacity() - } - - /// Returns an iterator over the indices of the regexes that matched. - /// - /// This will always produces matches in ascending order, where the index - /// yielded corresponds to the index of the regex that matched with respect - /// to its position when initially building the set. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSet; - /// - /// let set = RegexSet::new([ - /// r"[0-9]", - /// r"[a-z]", - /// r"[A-Z]", - /// r"\p{Greek}", - /// ]).unwrap(); - /// let hay = "βa1".as_bytes(); - /// let matches: Vec<_> = set.matches(hay).iter().collect(); - /// assert_eq!(matches, vec![0, 1, 3]); - /// ``` - /// - /// Note that `SetMatches` also implements the `IntoIterator` trait, so - /// this method is not always needed. For example: - /// - /// ``` - /// use regex::bytes::RegexSet; - /// - /// let set = RegexSet::new([ - /// r"[0-9]", - /// r"[a-z]", - /// r"[A-Z]", - /// r"\p{Greek}", - /// ]).unwrap(); - /// let hay = "βa1".as_bytes(); - /// let mut matches = vec![]; - /// for index in set.matches(hay) { - /// matches.push(index); - /// } - /// assert_eq!(matches, vec![0, 1, 3]); - /// ``` - #[inline] - pub fn iter(&self) -> SetMatchesIter<'_> { - SetMatchesIter(self.0.iter()) - } -} - -impl IntoIterator for SetMatches { - type IntoIter = SetMatchesIntoIter; - type Item = usize; - - fn into_iter(self) -> Self::IntoIter { - let it = 0..self.0.capacity(); - SetMatchesIntoIter { patset: self.0, it } - } -} - -impl<'a> IntoIterator for &'a SetMatches { - type IntoIter = SetMatchesIter<'a>; - type Item = usize; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -/// An owned iterator over the set of matches from a regex set. -/// -/// This will always produces matches in ascending order of index, where the -/// index corresponds to the index of the regex that matched with respect to -/// its position when initially building the set. -/// -/// This iterator is created by calling `SetMatches::into_iter` via the -/// `IntoIterator` trait. This is automatically done in `for` loops. -/// -/// # Example -/// -/// ``` -/// use regex::bytes::RegexSet; -/// -/// let set = RegexSet::new([ -/// r"[0-9]", -/// r"[a-z]", -/// r"[A-Z]", -/// r"\p{Greek}", -/// ]).unwrap(); -/// let hay = "βa1".as_bytes(); -/// let mut matches = vec![]; -/// for index in set.matches(hay) { -/// matches.push(index); -/// } -/// assert_eq!(matches, vec![0, 1, 3]); -/// ``` -#[derive(Debug)] -pub struct SetMatchesIntoIter { - patset: PatternSet, - it: core::ops::Range<usize>, -} - -impl Iterator for SetMatchesIntoIter { - type Item = usize; - - fn next(&mut self) -> Option<usize> { - loop { - let id = self.it.next()?; - if self.patset.contains(PatternID::new_unchecked(id)) { - return Some(id); - } - } - } - - fn size_hint(&self) -> (usize, Option<usize>) { - self.it.size_hint() - } -} - -impl DoubleEndedIterator for SetMatchesIntoIter { - fn next_back(&mut self) -> Option<usize> { - loop { - let id = self.it.next_back()?; - if self.patset.contains(PatternID::new_unchecked(id)) { - return Some(id); - } - } - } -} - -impl core::iter::FusedIterator for SetMatchesIntoIter {} - -/// A borrowed iterator over the set of matches from a regex set. -/// -/// The lifetime `'a` refers to the lifetime of the [`SetMatches`] value that -/// created this iterator. -/// -/// This will always produces matches in ascending order, where the index -/// corresponds to the index of the regex that matched with respect to its -/// position when initially building the set. -/// -/// This iterator is created by the [`SetMatches::iter`] method. -#[derive(Clone, Debug)] -pub struct SetMatchesIter<'a>(PatternSetIter<'a>); - -impl<'a> Iterator for SetMatchesIter<'a> { - type Item = usize; - - fn next(&mut self) -> Option<usize> { - self.0.next().map(|pid| pid.as_usize()) - } - - fn size_hint(&self) -> (usize, Option<usize>) { - self.0.size_hint() - } -} - -impl<'a> DoubleEndedIterator for SetMatchesIter<'a> { - fn next_back(&mut self) -> Option<usize> { - self.0.next_back().map(|pid| pid.as_usize()) - } -} - -impl<'a> core::iter::FusedIterator for SetMatchesIter<'a> {} - -impl core::fmt::Debug for RegexSet { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "RegexSet({:?})", self.patterns()) - } -} diff --git a/vendor/regex/src/regexset/mod.rs b/vendor/regex/src/regexset/mod.rs deleted file mode 100644 index 93fadec8bf65c9..00000000000000 --- a/vendor/regex/src/regexset/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub(crate) mod bytes; -pub(crate) mod string; diff --git a/vendor/regex/src/regexset/string.rs b/vendor/regex/src/regexset/string.rs deleted file mode 100644 index 5126a4661ebca0..00000000000000 --- a/vendor/regex/src/regexset/string.rs +++ /dev/null @@ -1,724 +0,0 @@ -use alloc::string::String; - -use regex_automata::{meta, Input, PatternID, PatternSet, PatternSetIter}; - -use crate::{Error, RegexSetBuilder}; - -/// Match multiple, possibly overlapping, regexes in a single search. -/// -/// A regex set corresponds to the union of zero or more regular expressions. -/// That is, a regex set will match a haystack when at least one of its -/// constituent regexes matches. A regex set as its formulated here provides a -/// touch more power: it will also report *which* regular expressions in the -/// set match. Indeed, this is the key difference between regex sets and a -/// single `Regex` with many alternates, since only one alternate can match at -/// a time. -/// -/// For example, consider regular expressions to match email addresses and -/// domains: `[a-z]+@[a-z]+\.(com|org|net)` and `[a-z]+\.(com|org|net)`. If a -/// regex set is constructed from those regexes, then searching the haystack -/// `foo@example.com` will report both regexes as matching. Of course, one -/// could accomplish this by compiling each regex on its own and doing two -/// searches over the haystack. The key advantage of using a regex set is -/// that it will report the matching regexes using a *single pass through the -/// haystack*. If one has hundreds or thousands of regexes to match repeatedly -/// (like a URL router for a complex web application or a user agent matcher), -/// then a regex set *can* realize huge performance gains. -/// -/// # Limitations -/// -/// Regex sets are limited to answering the following two questions: -/// -/// 1. Does any regex in the set match? -/// 2. If so, which regexes in the set match? -/// -/// As with the main [`Regex`][crate::Regex] type, it is cheaper to ask (1) -/// instead of (2) since the matching engines can stop after the first match -/// is found. -/// -/// You cannot directly extract [`Match`][crate::Match] or -/// [`Captures`][crate::Captures] objects from a regex set. If you need these -/// operations, the recommended approach is to compile each pattern in the set -/// independently and scan the exact same haystack a second time with those -/// independently compiled patterns: -/// -/// ``` -/// use regex::{Regex, RegexSet}; -/// -/// let patterns = ["foo", "bar"]; -/// // Both patterns will match different ranges of this string. -/// let hay = "barfoo"; -/// -/// // Compile a set matching any of our patterns. -/// let set = RegexSet::new(patterns).unwrap(); -/// // Compile each pattern independently. -/// let regexes: Vec<_> = set -/// .patterns() -/// .iter() -/// .map(|pat| Regex::new(pat).unwrap()) -/// .collect(); -/// -/// // Match against the whole set first and identify the individual -/// // matching patterns. -/// let matches: Vec<&str> = set -/// .matches(hay) -/// .into_iter() -/// // Dereference the match index to get the corresponding -/// // compiled pattern. -/// .map(|index| ®exes[index]) -/// // To get match locations or any other info, we then have to search the -/// // exact same haystack again, using our separately-compiled pattern. -/// .map(|re| re.find(hay).unwrap().as_str()) -/// .collect(); -/// -/// // Matches arrive in the order the constituent patterns were declared, -/// // not the order they appear in the haystack. -/// assert_eq!(vec!["foo", "bar"], matches); -/// ``` -/// -/// # Performance -/// -/// A `RegexSet` has the same performance characteristics as `Regex`. Namely, -/// search takes `O(m * n)` time, where `m` is proportional to the size of the -/// regex set and `n` is proportional to the length of the haystack. -/// -/// # Trait implementations -/// -/// The `Default` trait is implemented for `RegexSet`. The default value -/// is an empty set. An empty set can also be explicitly constructed via -/// [`RegexSet::empty`]. -/// -/// # Example -/// -/// This shows how the above two regexes (for matching email addresses and -/// domains) might work: -/// -/// ``` -/// use regex::RegexSet; -/// -/// let set = RegexSet::new(&[ -/// r"[a-z]+@[a-z]+\.(com|org|net)", -/// r"[a-z]+\.(com|org|net)", -/// ]).unwrap(); -/// -/// // Ask whether any regexes in the set match. -/// assert!(set.is_match("foo@example.com")); -/// -/// // Identify which regexes in the set match. -/// let matches: Vec<_> = set.matches("foo@example.com").into_iter().collect(); -/// assert_eq!(vec![0, 1], matches); -/// -/// // Try again, but with a haystack that only matches one of the regexes. -/// let matches: Vec<_> = set.matches("example.com").into_iter().collect(); -/// assert_eq!(vec![1], matches); -/// -/// // Try again, but with a haystack that doesn't match any regex in the set. -/// let matches: Vec<_> = set.matches("example").into_iter().collect(); -/// assert!(matches.is_empty()); -/// ``` -/// -/// Note that it would be possible to adapt the above example to using `Regex` -/// with an expression like: -/// -/// ```text -/// (?P<email>[a-z]+@(?P<email_domain>[a-z]+[.](com|org|net)))|(?P<domain>[a-z]+[.](com|org|net)) -/// ``` -/// -/// After a match, one could then inspect the capture groups to figure out -/// which alternates matched. The problem is that it is hard to make this -/// approach scale when there are many regexes since the overlap between each -/// alternate isn't always obvious to reason about. -#[derive(Clone)] -pub struct RegexSet { - pub(crate) meta: meta::Regex, - pub(crate) patterns: alloc::sync::Arc<[String]>, -} - -impl RegexSet { - /// Create a new regex set with the given regular expressions. - /// - /// This takes an iterator of `S`, where `S` is something that can produce - /// a `&str`. If any of the strings in the iterator are not valid regular - /// expressions, then an error is returned. - /// - /// # Example - /// - /// Create a new regex set from an iterator of strings: - /// - /// ``` - /// use regex::RegexSet; - /// - /// let set = RegexSet::new([r"\w+", r"\d+"]).unwrap(); - /// assert!(set.is_match("foo")); - /// ``` - pub fn new<I, S>(exprs: I) -> Result<RegexSet, Error> - where - S: AsRef<str>, - I: IntoIterator<Item = S>, - { - RegexSetBuilder::new(exprs).build() - } - - /// Create a new empty regex set. - /// - /// An empty regex never matches anything. - /// - /// This is a convenience function for `RegexSet::new([])`, but doesn't - /// require one to specify the type of the input. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSet; - /// - /// let set = RegexSet::empty(); - /// assert!(set.is_empty()); - /// // an empty set matches nothing - /// assert!(!set.is_match("")); - /// ``` - pub fn empty() -> RegexSet { - let empty: [&str; 0] = []; - RegexSetBuilder::new(empty).build().unwrap() - } - - /// Returns true if and only if one of the regexes in this set matches - /// the haystack given. - /// - /// This method should be preferred if you only need to test whether any - /// of the regexes in the set should match, but don't care about *which* - /// regexes matched. This is because the underlying matching engine will - /// quit immediately after seeing the first match instead of continuing to - /// find all matches. - /// - /// Note that as with searches using [`Regex`](crate::Regex), the - /// expression is unanchored by default. That is, if the regex does not - /// start with `^` or `\A`, or end with `$` or `\z`, then it is permitted - /// to match anywhere in the haystack. - /// - /// # Example - /// - /// Tests whether a set matches somewhere in a haystack: - /// - /// ``` - /// use regex::RegexSet; - /// - /// let set = RegexSet::new([r"\w+", r"\d+"]).unwrap(); - /// assert!(set.is_match("foo")); - /// assert!(!set.is_match("☃")); - /// ``` - #[inline] - pub fn is_match(&self, haystack: &str) -> bool { - self.is_match_at(haystack, 0) - } - - /// Returns true if and only if one of the regexes in this set matches the - /// haystack given, with the search starting at the offset given. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only - /// match when `start == 0`. - /// - /// # Panics - /// - /// This panics when `start >= haystack.len() + 1`. - /// - /// # Example - /// - /// This example shows the significance of `start`. Namely, consider a - /// haystack `foobar` and a desire to execute a search starting at offset - /// `3`. You could search a substring explicitly, but then the look-around - /// assertions won't work correctly. Instead, you can use this method to - /// specify the start position of a search. - /// - /// ``` - /// use regex::RegexSet; - /// - /// let set = RegexSet::new([r"\bbar\b", r"(?m)^bar$"]).unwrap(); - /// let hay = "foobar"; - /// // We get a match here, but it's probably not intended. - /// assert!(set.is_match(&hay[3..])); - /// // No match because the assertions take the context into account. - /// assert!(!set.is_match_at(hay, 3)); - /// ``` - #[inline] - pub fn is_match_at(&self, haystack: &str, start: usize) -> bool { - self.meta.is_match(Input::new(haystack).span(start..haystack.len())) - } - - /// Returns the set of regexes that match in the given haystack. - /// - /// The set returned contains the index of each regex that matches in - /// the given haystack. The index is in correspondence with the order of - /// regular expressions given to `RegexSet`'s constructor. - /// - /// The set can also be used to iterate over the matched indices. The order - /// of iteration is always ascending with respect to the matching indices. - /// - /// Note that as with searches using [`Regex`](crate::Regex), the - /// expression is unanchored by default. That is, if the regex does not - /// start with `^` or `\A`, or end with `$` or `\z`, then it is permitted - /// to match anywhere in the haystack. - /// - /// # Example - /// - /// Tests which regular expressions match the given haystack: - /// - /// ``` - /// use regex::RegexSet; - /// - /// let set = RegexSet::new([ - /// r"\w+", - /// r"\d+", - /// r"\pL+", - /// r"foo", - /// r"bar", - /// r"barfoo", - /// r"foobar", - /// ]).unwrap(); - /// let matches: Vec<_> = set.matches("foobar").into_iter().collect(); - /// assert_eq!(matches, vec![0, 2, 3, 4, 6]); - /// - /// // You can also test whether a particular regex matched: - /// let matches = set.matches("foobar"); - /// assert!(!matches.matched(5)); - /// assert!(matches.matched(6)); - /// ``` - #[inline] - pub fn matches(&self, haystack: &str) -> SetMatches { - self.matches_at(haystack, 0) - } - - /// Returns the set of regexes that match in the given haystack. - /// - /// The set returned contains the index of each regex that matches in - /// the given haystack. The index is in correspondence with the order of - /// regular expressions given to `RegexSet`'s constructor. - /// - /// The set can also be used to iterate over the matched indices. The order - /// of iteration is always ascending with respect to the matching indices. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only - /// match when `start == 0`. - /// - /// # Panics - /// - /// This panics when `start >= haystack.len() + 1`. - /// - /// # Example - /// - /// Tests which regular expressions match the given haystack: - /// - /// ``` - /// use regex::RegexSet; - /// - /// let set = RegexSet::new([r"\bbar\b", r"(?m)^bar$"]).unwrap(); - /// let hay = "foobar"; - /// // We get matches here, but it's probably not intended. - /// let matches: Vec<_> = set.matches(&hay[3..]).into_iter().collect(); - /// assert_eq!(matches, vec![0, 1]); - /// // No matches because the assertions take the context into account. - /// let matches: Vec<_> = set.matches_at(hay, 3).into_iter().collect(); - /// assert_eq!(matches, vec![]); - /// ``` - #[inline] - pub fn matches_at(&self, haystack: &str, start: usize) -> SetMatches { - let input = Input::new(haystack).span(start..haystack.len()); - let mut patset = PatternSet::new(self.meta.pattern_len()); - self.meta.which_overlapping_matches(&input, &mut patset); - SetMatches(patset) - } - - /// Returns the same as matches, but starts the search at the given - /// offset and stores the matches into the slice given. - /// - /// The significance of the starting point is that it takes the surrounding - /// context into consideration. For example, the `\A` anchor can only - /// match when `start == 0`. - /// - /// `matches` must have a length that is at least the number of regexes - /// in this set. - /// - /// This method returns true if and only if at least one member of - /// `matches` is true after executing the set against `haystack`. - #[doc(hidden)] - #[inline] - pub fn matches_read_at( - &self, - matches: &mut [bool], - haystack: &str, - start: usize, - ) -> bool { - // This is pretty dumb. We should try to fix this, but the - // regex-automata API doesn't provide a way to store matches in an - // arbitrary &mut [bool]. Thankfully, this API is doc(hidden) and - // thus not public... But regex-capi currently uses it. We should - // fix regex-capi to use a PatternSet, maybe? Not sure... PatternSet - // is in regex-automata, not regex. So maybe we should just accept a - // 'SetMatches', which is basically just a newtype around PatternSet. - let mut patset = PatternSet::new(self.meta.pattern_len()); - let mut input = Input::new(haystack); - input.set_start(start); - self.meta.which_overlapping_matches(&input, &mut patset); - for pid in patset.iter() { - matches[pid] = true; - } - !patset.is_empty() - } - - /// An alias for `matches_read_at` to preserve backward compatibility. - /// - /// The `regex-capi` crate used this method, so to avoid breaking that - /// crate, we continue to export it as an undocumented API. - #[doc(hidden)] - #[inline] - pub fn read_matches_at( - &self, - matches: &mut [bool], - haystack: &str, - start: usize, - ) -> bool { - self.matches_read_at(matches, haystack, start) - } - - /// Returns the total number of regexes in this set. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSet; - /// - /// assert_eq!(0, RegexSet::empty().len()); - /// assert_eq!(1, RegexSet::new([r"[0-9]"]).unwrap().len()); - /// assert_eq!(2, RegexSet::new([r"[0-9]", r"[a-z]"]).unwrap().len()); - /// ``` - #[inline] - pub fn len(&self) -> usize { - self.meta.pattern_len() - } - - /// Returns `true` if this set contains no regexes. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSet; - /// - /// assert!(RegexSet::empty().is_empty()); - /// assert!(!RegexSet::new([r"[0-9]"]).unwrap().is_empty()); - /// ``` - #[inline] - pub fn is_empty(&self) -> bool { - self.meta.pattern_len() == 0 - } - - /// Returns the regex patterns that this regex set was constructed from. - /// - /// This function can be used to determine the pattern for a match. The - /// slice returned has exactly as many patterns givens to this regex set, - /// and the order of the slice is the same as the order of the patterns - /// provided to the set. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSet; - /// - /// let set = RegexSet::new(&[ - /// r"\w+", - /// r"\d+", - /// r"\pL+", - /// r"foo", - /// r"bar", - /// r"barfoo", - /// r"foobar", - /// ]).unwrap(); - /// let matches: Vec<_> = set - /// .matches("foobar") - /// .into_iter() - /// .map(|index| &set.patterns()[index]) - /// .collect(); - /// assert_eq!(matches, vec![r"\w+", r"\pL+", r"foo", r"bar", r"foobar"]); - /// ``` - #[inline] - pub fn patterns(&self) -> &[String] { - &self.patterns - } -} - -impl Default for RegexSet { - fn default() -> Self { - RegexSet::empty() - } -} - -/// A set of matches returned by a regex set. -/// -/// Values of this type are constructed by [`RegexSet::matches`]. -#[derive(Clone, Debug)] -pub struct SetMatches(PatternSet); - -impl SetMatches { - /// Whether this set contains any matches. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSet; - /// - /// let set = RegexSet::new(&[ - /// r"[a-z]+@[a-z]+\.(com|org|net)", - /// r"[a-z]+\.(com|org|net)", - /// ]).unwrap(); - /// let matches = set.matches("foo@example.com"); - /// assert!(matches.matched_any()); - /// ``` - #[inline] - pub fn matched_any(&self) -> bool { - !self.0.is_empty() - } - - /// Whether all patterns in this set matched. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSet; - /// - /// let set = RegexSet::new(&[ - /// r"^foo", - /// r"[a-z]+\.com", - /// ]).unwrap(); - /// let matches = set.matches("foo.example.com"); - /// assert!(matches.matched_all()); - /// ``` - pub fn matched_all(&self) -> bool { - self.0.is_full() - } - - /// Whether the regex at the given index matched. - /// - /// The index for a regex is determined by its insertion order upon the - /// initial construction of a `RegexSet`, starting at `0`. - /// - /// # Panics - /// - /// If `index` is greater than or equal to the number of regexes in the - /// original set that produced these matches. Equivalently, when `index` - /// is greater than or equal to [`SetMatches::len`]. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSet; - /// - /// let set = RegexSet::new([ - /// r"[a-z]+@[a-z]+\.(com|org|net)", - /// r"[a-z]+\.(com|org|net)", - /// ]).unwrap(); - /// let matches = set.matches("example.com"); - /// assert!(!matches.matched(0)); - /// assert!(matches.matched(1)); - /// ``` - #[inline] - pub fn matched(&self, index: usize) -> bool { - self.0.contains(PatternID::new_unchecked(index)) - } - - /// The total number of regexes in the set that created these matches. - /// - /// **WARNING:** This always returns the same value as [`RegexSet::len`]. - /// In particular, it does *not* return the number of elements yielded by - /// [`SetMatches::iter`]. The only way to determine the total number of - /// matched regexes is to iterate over them. - /// - /// # Example - /// - /// Notice that this method returns the total number of regexes in the - /// original set, and *not* the total number of regexes that matched. - /// - /// ``` - /// use regex::RegexSet; - /// - /// let set = RegexSet::new([ - /// r"[a-z]+@[a-z]+\.(com|org|net)", - /// r"[a-z]+\.(com|org|net)", - /// ]).unwrap(); - /// let matches = set.matches("example.com"); - /// // Total number of patterns that matched. - /// assert_eq!(1, matches.iter().count()); - /// // Total number of patterns in the set. - /// assert_eq!(2, matches.len()); - /// ``` - #[inline] - pub fn len(&self) -> usize { - self.0.capacity() - } - - /// Returns an iterator over the indices of the regexes that matched. - /// - /// This will always produces matches in ascending order, where the index - /// yielded corresponds to the index of the regex that matched with respect - /// to its position when initially building the set. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSet; - /// - /// let set = RegexSet::new([ - /// r"[0-9]", - /// r"[a-z]", - /// r"[A-Z]", - /// r"\p{Greek}", - /// ]).unwrap(); - /// let hay = "βa1"; - /// let matches: Vec<_> = set.matches(hay).iter().collect(); - /// assert_eq!(matches, vec![0, 1, 3]); - /// ``` - /// - /// Note that `SetMatches` also implements the `IntoIterator` trait, so - /// this method is not always needed. For example: - /// - /// ``` - /// use regex::RegexSet; - /// - /// let set = RegexSet::new([ - /// r"[0-9]", - /// r"[a-z]", - /// r"[A-Z]", - /// r"\p{Greek}", - /// ]).unwrap(); - /// let hay = "βa1"; - /// let mut matches = vec![]; - /// for index in set.matches(hay) { - /// matches.push(index); - /// } - /// assert_eq!(matches, vec![0, 1, 3]); - /// ``` - #[inline] - pub fn iter(&self) -> SetMatchesIter<'_> { - SetMatchesIter(self.0.iter()) - } -} - -impl IntoIterator for SetMatches { - type IntoIter = SetMatchesIntoIter; - type Item = usize; - - fn into_iter(self) -> Self::IntoIter { - let it = 0..self.0.capacity(); - SetMatchesIntoIter { patset: self.0, it } - } -} - -impl<'a> IntoIterator for &'a SetMatches { - type IntoIter = SetMatchesIter<'a>; - type Item = usize; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -/// An owned iterator over the set of matches from a regex set. -/// -/// This will always produces matches in ascending order of index, where the -/// index corresponds to the index of the regex that matched with respect to -/// its position when initially building the set. -/// -/// This iterator is created by calling `SetMatches::into_iter` via the -/// `IntoIterator` trait. This is automatically done in `for` loops. -/// -/// # Example -/// -/// ``` -/// use regex::RegexSet; -/// -/// let set = RegexSet::new([ -/// r"[0-9]", -/// r"[a-z]", -/// r"[A-Z]", -/// r"\p{Greek}", -/// ]).unwrap(); -/// let hay = "βa1"; -/// let mut matches = vec![]; -/// for index in set.matches(hay) { -/// matches.push(index); -/// } -/// assert_eq!(matches, vec![0, 1, 3]); -/// ``` -#[derive(Debug)] -pub struct SetMatchesIntoIter { - patset: PatternSet, - it: core::ops::Range<usize>, -} - -impl Iterator for SetMatchesIntoIter { - type Item = usize; - - fn next(&mut self) -> Option<usize> { - loop { - let id = self.it.next()?; - if self.patset.contains(PatternID::new_unchecked(id)) { - return Some(id); - } - } - } - - fn size_hint(&self) -> (usize, Option<usize>) { - self.it.size_hint() - } -} - -impl DoubleEndedIterator for SetMatchesIntoIter { - fn next_back(&mut self) -> Option<usize> { - loop { - let id = self.it.next_back()?; - if self.patset.contains(PatternID::new_unchecked(id)) { - return Some(id); - } - } - } -} - -impl core::iter::FusedIterator for SetMatchesIntoIter {} - -/// A borrowed iterator over the set of matches from a regex set. -/// -/// The lifetime `'a` refers to the lifetime of the [`SetMatches`] value that -/// created this iterator. -/// -/// This will always produces matches in ascending order, where the index -/// corresponds to the index of the regex that matched with respect to its -/// position when initially building the set. -/// -/// This iterator is created by the [`SetMatches::iter`] method. -#[derive(Clone, Debug)] -pub struct SetMatchesIter<'a>(PatternSetIter<'a>); - -impl<'a> Iterator for SetMatchesIter<'a> { - type Item = usize; - - fn next(&mut self) -> Option<usize> { - self.0.next().map(|pid| pid.as_usize()) - } - - fn size_hint(&self) -> (usize, Option<usize>) { - self.0.size_hint() - } -} - -impl<'a> DoubleEndedIterator for SetMatchesIter<'a> { - fn next_back(&mut self) -> Option<usize> { - self.0.next_back().map(|pid| pid.as_usize()) - } -} - -impl<'a> core::iter::FusedIterator for SetMatchesIter<'a> {} - -impl core::fmt::Debug for RegexSet { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "RegexSet({:?})", self.patterns()) - } -} diff --git a/vendor/regex/test b/vendor/regex/test deleted file mode 100755 index 48224c6d114eaa..00000000000000 --- a/vendor/regex/test +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -set -e - -# cd to the directory containing this crate's Cargo.toml so that we don't need -# to pass --manifest-path to every `cargo` command. -cd "$(dirname "$0")" - -# This is a convenience script for running a broad swath of tests across -# features. We don't test the complete space, since the complete space is quite -# large. Hopefully once we migrate the test suite to better infrastructure -# (like regex-automata), we'll be able to test more of the space. -echo "===== DEFAULT FEATURES =====" -cargo test - -# no-std mode is annoyingly difficult to test. Currently, the integration tests -# don't run. So for now, we just test that library tests run. (There aren't -# many because `regex` is just a wrapper crate.) -cargo test --no-default-features --lib - -echo "===== DOC TESTS =====" -cargo test --doc - -features=( - "std" - "std unicode" - "std unicode-perl" - "std perf" - "std perf-cache" - "std perf-dfa" - "std perf-inline" - "std perf-literal" - "std perf-dfa-full" - "std perf-onepass" - "std perf-backtrack" -) -for f in "${features[@]}"; do - echo "===== FEATURE: $f =====" - cargo test --test integration --no-default-features --features "$f" -done - -# And test the probably-forever-nightly-only 'pattern' feature... -if rustc --version | grep -q nightly; then - echo "===== FEATURE: std,pattern,unicode-perl =====" - cargo test --test integration --no-default-features --features std,pattern,unicode-perl -fi diff --git a/vendor/regex/testdata/README.md b/vendor/regex/testdata/README.md deleted file mode 100644 index dcac6719f4f076..00000000000000 --- a/vendor/regex/testdata/README.md +++ /dev/null @@ -1,22 +0,0 @@ -This directory contains a large suite of regex tests defined in a TOML format. -They are used to drive tests in `tests/lib.rs`, `regex-automata/tests/lib.rs` -and `regex-lite/tests/lib.rs`. - -See the [`regex-test`][regex-test] crate documentation for an explanation of -the format and how it generates tests. - -The basic idea here is that we have many different regex engines but generally -one set of tests. We want to be able to run those tests (or most of them) on -every engine. Prior to `regex 1.9`, we used to do this with a hodge podge soup -of macros and a different test executable for each engine. It overall took a -longer time to compile, was harder to maintain, and it made the test definitions -themselves less clear. - -In `regex 1.9`, when we moved over to `regex-automata`, the situation got a lot -worse because of an increase in the number of engines. So I devised an engine -independent format for testing regex patterns and their semantics. - -Note: the naming scheme used in these tests isn't terribly consistent. It would -be great to fix that. - -[regex-test]: https://docs.rs/regex-test diff --git a/vendor/regex/testdata/anchored.toml b/vendor/regex/testdata/anchored.toml deleted file mode 100644 index 0f2248d098716e..00000000000000 --- a/vendor/regex/testdata/anchored.toml +++ /dev/null @@ -1,127 +0,0 @@ -# These tests are specifically geared toward searches with 'anchored = true'. -# While they are interesting in their own right, they are particularly -# important for testing the one-pass DFA since the one-pass DFA can't work in -# unanchored contexts. -# -# Note that "anchored" in this context does not mean "^". Anchored searches are -# searches whose matches must begin at the start of the search, which may not -# be at the start of the haystack. That's why anchored searches---and there are -# some examples below---can still report multiple matches. This occurs when the -# matches are adjacent to one another. - -[[test]] -name = "greedy" -regex = '(abc)+' -haystack = "abcabcabc" -matches = [ - [[0, 9], [6, 9]], -] -anchored = true - -# When a "earliest" search is used, greediness doesn't really exist because -# matches are reported as soon as they are known. -[[test]] -name = "greedy-earliest" -regex = '(abc)+' -haystack = "abcabcabc" -matches = [ - [[0, 3], [0, 3]], - [[3, 6], [3, 6]], - [[6, 9], [6, 9]], -] -anchored = true -search-kind = "earliest" - -[[test]] -name = "nongreedy" -regex = '(abc)+?' -haystack = "abcabcabc" -matches = [ - [[0, 3], [0, 3]], - [[3, 6], [3, 6]], - [[6, 9], [6, 9]], -] -anchored = true - -# When "all" semantics are used, non-greediness doesn't exist since the longest -# possible match is always taken. -[[test]] -name = "nongreedy-all" -regex = '(abc)+?' -haystack = "abcabcabc" -matches = [ - [[0, 9], [6, 9]], -] -anchored = true -match-kind = "all" - -[[test]] -name = "word-boundary-unicode-01" -regex = '\b\w+\b' -haystack = 'βββ☃' -matches = [[0, 6]] -anchored = true - -[[test]] -name = "word-boundary-nounicode-01" -regex = '\b\w+\b' -haystack = 'abcβ' -matches = [[0, 3]] -anchored = true -unicode = false - -# Tests that '.c' doesn't match 'abc' when performing an anchored search from -# the beginning of the haystack. This test found two different bugs in the -# PikeVM and the meta engine. -[[test]] -name = "no-match-at-start" -regex = '.c' -haystack = 'abc' -matches = [] -anchored = true - -# Like above, but at a non-zero start offset. -[[test]] -name = "no-match-at-start-bounds" -regex = '.c' -haystack = 'aabc' -bounds = [1, 4] -matches = [] -anchored = true - -# This is like no-match-at-start, but hits the "reverse inner" optimization -# inside the meta engine. (no-match-at-start hits the "reverse suffix" -# optimization.) -[[test]] -name = "no-match-at-start-reverse-inner" -regex = '.c[a-z]' -haystack = 'abcz' -matches = [] -anchored = true - -# Like above, but at a non-zero start offset. -[[test]] -name = "no-match-at-start-reverse-inner-bounds" -regex = '.c[a-z]' -haystack = 'aabcz' -bounds = [1, 5] -matches = [] -anchored = true - -# Same as no-match-at-start, but applies to the meta engine's "reverse -# anchored" optimization. -[[test]] -name = "no-match-at-start-reverse-anchored" -regex = '.c[a-z]$' -haystack = 'abcz' -matches = [] -anchored = true - -# Like above, but at a non-zero start offset. -[[test]] -name = "no-match-at-start-reverse-anchored-bounds" -regex = '.c[a-z]$' -haystack = 'aabcz' -bounds = [1, 5] -matches = [] -anchored = true diff --git a/vendor/regex/testdata/bytes.toml b/vendor/regex/testdata/bytes.toml deleted file mode 100644 index 346e36971d4335..00000000000000 --- a/vendor/regex/testdata/bytes.toml +++ /dev/null @@ -1,235 +0,0 @@ -# These are tests specifically crafted for regexes that can match arbitrary -# bytes. In some cases, we also test the Unicode variant as well, just because -# it's good sense to do so. But also, these tests aren't really about Unicode, -# but whether matches are only reported at valid UTF-8 boundaries. For most -# tests in this entire collection, utf8 = true. But for these tests, we use -# utf8 = false. - -[[test]] -name = "word-boundary-ascii" -regex = ' \b' -haystack = " δ" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "word-boundary-unicode" -regex = ' \b' -haystack = " δ" -matches = [[0, 1]] -unicode = true -utf8 = false - -[[test]] -name = "word-boundary-ascii-not" -regex = ' \B' -haystack = " δ" -matches = [[0, 1]] -unicode = false -utf8 = false - -[[test]] -name = "word-boundary-unicode-not" -regex = ' \B' -haystack = " δ" -matches = [] -unicode = true -utf8 = false - -[[test]] -name = "perl-word-ascii" -regex = '\w+' -haystack = "aδ" -matches = [[0, 1]] -unicode = false -utf8 = false - -[[test]] -name = "perl-word-unicode" -regex = '\w+' -haystack = "aδ" -matches = [[0, 3]] -unicode = true -utf8 = false - -[[test]] -name = "perl-decimal-ascii" -regex = '\d+' -haystack = "1२३9" -matches = [[0, 1], [7, 8]] -unicode = false -utf8 = false - -[[test]] -name = "perl-decimal-unicode" -regex = '\d+' -haystack = "1२३9" -matches = [[0, 8]] -unicode = true -utf8 = false - -[[test]] -name = "perl-whitespace-ascii" -regex = '\s+' -haystack = " \u1680" -matches = [[0, 1]] -unicode = false -utf8 = false - -[[test]] -name = "perl-whitespace-unicode" -regex = '\s+' -haystack = " \u1680" -matches = [[0, 4]] -unicode = true -utf8 = false - -# The first `(.+)` matches two Unicode codepoints, but can't match the 5th -# byte, which isn't valid UTF-8. The second (byte based) `(.+)` takes over and -# matches. -[[test]] -name = "mixed-dot" -regex = '(.+)(?-u)(.+)' -haystack = '\xCE\x93\xCE\x94\xFF' -matches = [ - [[0, 5], [0, 4], [4, 5]], -] -unescape = true -unicode = true -utf8 = false - -[[test]] -name = "case-one-ascii" -regex = 'a' -haystack = "A" -matches = [[0, 1]] -case-insensitive = true -unicode = false -utf8 = false - -[[test]] -name = "case-one-unicode" -regex = 'a' -haystack = "A" -matches = [[0, 1]] -case-insensitive = true -unicode = true -utf8 = false - -[[test]] -name = "case-class-simple-ascii" -regex = '[a-z]+' -haystack = "AaAaA" -matches = [[0, 5]] -case-insensitive = true -unicode = false -utf8 = false - -[[test]] -name = "case-class-ascii" -regex = '[a-z]+' -haystack = "aA\u212AaA" -matches = [[0, 2], [5, 7]] -case-insensitive = true -unicode = false -utf8 = false - -[[test]] -name = "case-class-unicode" -regex = '[a-z]+' -haystack = "aA\u212AaA" -matches = [[0, 7]] -case-insensitive = true -unicode = true -utf8 = false - -[[test]] -name = "negate-ascii" -regex = '[^a]' -haystack = "δ" -matches = [[0, 1], [1, 2]] -unicode = false -utf8 = false - -[[test]] -name = "negate-unicode" -regex = '[^a]' -haystack = "δ" -matches = [[0, 2]] -unicode = true -utf8 = false - -# When utf8=true, this won't match, because the implicit '.*?' prefix is -# Unicode aware and will refuse to match through invalid UTF-8 bytes. -[[test]] -name = "dotstar-prefix-ascii" -regex = 'a' -haystack = '\xFFa' -matches = [[1, 2]] -unescape = true -unicode = false -utf8 = false - -[[test]] -name = "dotstar-prefix-unicode" -regex = 'a' -haystack = '\xFFa' -matches = [[1, 2]] -unescape = true -unicode = true -utf8 = false - -[[test]] -name = "null-bytes" -regex = '(?P<cstr>[^\x00]+)\x00' -haystack = 'foo\x00' -matches = [ - [[0, 4], [0, 3]], -] -unescape = true -unicode = false -utf8 = false - -[[test]] -name = "invalid-utf8-anchor-100" -regex = '\xCC?^' -haystack = '\x8d#;\x1a\xa4s3\x05foobarX\\\x0f0t\xe4\x9b\xa4' -matches = [[0, 0]] -unescape = true -unicode = false -utf8 = false - -[[test]] -name = "invalid-utf8-anchor-200" -regex = '^\xf7|4\xff\d\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a##########[] d\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a##########\[] #####\x80\S7|$' -haystack = '\x8d#;\x1a\xa4s3\x05foobarX\\\x0f0t\xe4\x9b\xa4' -matches = [[22, 22]] -unescape = true -unicode = false -utf8 = false - -[[test]] -name = "invalid-utf8-anchor-300" -regex = '^|ddp\xff\xffdddddlQd@\x80' -haystack = '\x8d#;\x1a\xa4s3\x05foobarX\\\x0f0t\xe4\x9b\xa4' -matches = [[0, 0]] -unescape = true -unicode = false -utf8 = false - -[[test]] -name = "word-boundary-ascii-100" -regex = '\Bx\B' -haystack = "áxβ" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "word-boundary-ascii-200" -regex = '\B' -haystack = "0\U0007EF5E" -matches = [[2, 2], [3, 3], [4, 4], [5, 5]] -unicode = false -utf8 = false diff --git a/vendor/regex/testdata/crazy.toml b/vendor/regex/testdata/crazy.toml deleted file mode 100644 index aed46ea1570f11..00000000000000 --- a/vendor/regex/testdata/crazy.toml +++ /dev/null @@ -1,315 +0,0 @@ -[[test]] -name = "nothing-empty" -regex = [] -haystack = "" -matches = [] - -[[test]] -name = "nothing-something" -regex = [] -haystack = "wat" -matches = [] - -[[test]] -name = "ranges" -regex = '(?-u)\b(?:[0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\b' -haystack = "num: 255" -matches = [[5, 8]] - -[[test]] -name = "ranges-not" -regex = '(?-u)\b(?:[0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\b' -haystack = "num: 256" -matches = [] - -[[test]] -name = "float1" -regex = '[-+]?[0-9]*\.?[0-9]+' -haystack = "0.1" -matches = [[0, 3]] - -[[test]] -name = "float2" -regex = '[-+]?[0-9]*\.?[0-9]+' -haystack = "0.1.2" -matches = [[0, 3]] -match-limit = 1 - -[[test]] -name = "float3" -regex = '[-+]?[0-9]*\.?[0-9]+' -haystack = "a1.2" -matches = [[1, 4]] - -[[test]] -name = "float4" -regex = '[-+]?[0-9]*\.?[0-9]+' -haystack = "1.a" -matches = [[0, 1]] - -[[test]] -name = "float5" -regex = '^[-+]?[0-9]*\.?[0-9]+$' -haystack = "1.a" -matches = [] - -[[test]] -name = "email" -regex = '(?i-u)\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}\b' -haystack = "mine is jam.slam@gmail.com " -matches = [[8, 26]] - -[[test]] -name = "email-not" -regex = '(?i-u)\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}\b' -haystack = "mine is jam.slam@gmail " -matches = [] - -[[test]] -name = "email-big" -regex = '''[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?''' -haystack = "mine is jam.slam@gmail.com " -matches = [[8, 26]] - -[[test]] -name = "date1" -regex = '^(?:19|20)\d\d[- /.](?:0[1-9]|1[012])[- /.](?:0[1-9]|[12][0-9]|3[01])$' -haystack = "1900-01-01" -matches = [[0, 10]] -unicode = false - -[[test]] -name = "date2" -regex = '^(?:19|20)\d\d[- /.](?:0[1-9]|1[012])[- /.](?:0[1-9]|[12][0-9]|3[01])$' -haystack = "1900-00-01" -matches = [] -unicode = false - -[[test]] -name = "date3" -regex = '^(?:19|20)\d\d[- /.](?:0[1-9]|1[012])[- /.](?:0[1-9]|[12][0-9]|3[01])$' -haystack = "1900-13-01" -matches = [] -unicode = false - -[[test]] -name = "start-end-empty" -regex = '^$' -haystack = "" -matches = [[0, 0]] - -[[test]] -name = "start-end-empty-rev" -regex = '$^' -haystack = "" -matches = [[0, 0]] - -[[test]] -name = "start-end-empty-many-1" -regex = '^$^$^$' -haystack = "" -matches = [[0, 0]] - -[[test]] -name = "start-end-empty-many-2" -regex = '^^^$$$' -haystack = "" -matches = [[0, 0]] - -[[test]] -name = "start-end-empty-rep" -regex = '(?:^$)*' -haystack = "a\nb\nc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] - -[[test]] -name = "start-end-empty-rep-rev" -regex = '(?:$^)*' -haystack = "a\nb\nc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] - -[[test]] -name = "neg-class-letter" -regex = '[^ac]' -haystack = "acx" -matches = [[2, 3]] - -[[test]] -name = "neg-class-letter-comma" -regex = '[^a,]' -haystack = "a,x" -matches = [[2, 3]] - -[[test]] -name = "neg-class-letter-space" -regex = '[^a[:space:]]' -haystack = "a x" -matches = [[2, 3]] - -[[test]] -name = "neg-class-comma" -regex = '[^,]' -haystack = ",,x" -matches = [[2, 3]] - -[[test]] -name = "neg-class-space" -regex = '[^[:space:]]' -haystack = " a" -matches = [[1, 2]] - -[[test]] -name = "neg-class-space-comma" -regex = '[^,[:space:]]' -haystack = ", a" -matches = [[2, 3]] - -[[test]] -name = "neg-class-comma-space" -regex = '[^[:space:],]' -haystack = " ,a" -matches = [[2, 3]] - -[[test]] -name = "neg-class-ascii" -regex = '[^[:alpha:]Z]' -haystack = "A1" -matches = [[1, 2]] - -[[test]] -name = "lazy-many-many" -regex = '(?:(?:.*)*?)=' -haystack = "a=b" -matches = [[0, 2]] - -[[test]] -name = "lazy-many-optional" -regex = '(?:(?:.?)*?)=' -haystack = "a=b" -matches = [[0, 2]] - -[[test]] -name = "lazy-one-many-many" -regex = '(?:(?:.*)+?)=' -haystack = "a=b" -matches = [[0, 2]] - -[[test]] -name = "lazy-one-many-optional" -regex = '(?:(?:.?)+?)=' -haystack = "a=b" -matches = [[0, 2]] - -[[test]] -name = "lazy-range-min-many" -regex = '(?:(?:.*){1,}?)=' -haystack = "a=b" -matches = [[0, 2]] - -[[test]] -name = "lazy-range-many" -regex = '(?:(?:.*){1,2}?)=' -haystack = "a=b" -matches = [[0, 2]] - -[[test]] -name = "greedy-many-many" -regex = '(?:(?:.*)*)=' -haystack = "a=b" -matches = [[0, 2]] - -[[test]] -name = "greedy-many-optional" -regex = '(?:(?:.?)*)=' -haystack = "a=b" -matches = [[0, 2]] - -[[test]] -name = "greedy-one-many-many" -regex = '(?:(?:.*)+)=' -haystack = "a=b" -matches = [[0, 2]] - -[[test]] -name = "greedy-one-many-optional" -regex = '(?:(?:.?)+)=' -haystack = "a=b" -matches = [[0, 2]] - -[[test]] -name = "greedy-range-min-many" -regex = '(?:(?:.*){1,})=' -haystack = "a=b" -matches = [[0, 2]] - -[[test]] -name = "greedy-range-many" -regex = '(?:(?:.*){1,2})=' -haystack = "a=b" -matches = [[0, 2]] - -[[test]] -name = "empty1" -regex = '' -haystack = "" -matches = [[0, 0]] - -[[test]] -name = "empty2" -regex = '' -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty3" -regex = '(?:)' -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty4" -regex = '(?:)*' -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty5" -regex = '(?:)+' -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty6" -regex = '(?:)?' -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty7" -regex = '(?:)(?:)' -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty8" -regex = '(?:)+|z' -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty9" -regex = 'z|(?:)+' -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty10" -regex = '(?:)+|b' -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty11" -regex = 'b|(?:)+' -haystack = "abc" -matches = [[0, 0], [1, 2], [3, 3]] diff --git a/vendor/regex/testdata/crlf.toml b/vendor/regex/testdata/crlf.toml deleted file mode 100644 index 9e2d3761af7271..00000000000000 --- a/vendor/regex/testdata/crlf.toml +++ /dev/null @@ -1,117 +0,0 @@ -# This is a basic test that checks ^ and $ treat \r\n as a single line -# terminator. If ^ and $ only treated \n as a line terminator, then this would -# only match 'xyz' at the end of the haystack. -[[test]] -name = "basic" -regex = '(?mR)^[a-z]+$' -haystack = "abc\r\ndef\r\nxyz" -matches = [[0, 3], [5, 8], [10, 13]] - -# Tests that a CRLF-aware '^$' assertion does not match between CR and LF. -[[test]] -name = "start-end-non-empty" -regex = '(?mR)^$' -haystack = "abc\r\ndef\r\nxyz" -matches = [] - -# Tests that a CRLF-aware '^$' assertion matches the empty string, just like -# a non-CRLF-aware '^$' assertion. -[[test]] -name = "start-end-empty" -regex = '(?mR)^$' -haystack = "" -matches = [[0, 0]] - -# Tests that a CRLF-aware '^$' assertion matches the empty string preceding -# and following a line terminator. -[[test]] -name = "start-end-before-after" -regex = '(?mR)^$' -haystack = "\r\n" -matches = [[0, 0], [2, 2]] - -# Tests that a CRLF-aware '^' assertion does not split a line terminator. -[[test]] -name = "start-no-split" -regex = '(?mR)^' -haystack = "abc\r\ndef\r\nxyz" -matches = [[0, 0], [5, 5], [10, 10]] - -# Same as above, but with adjacent runs of line terminators. -[[test]] -name = "start-no-split-adjacent" -regex = '(?mR)^' -haystack = "\r\n\r\n\r\n" -matches = [[0, 0], [2, 2], [4, 4], [6, 6]] - -# Same as above, but with adjacent runs of just carriage returns. -[[test]] -name = "start-no-split-adjacent-cr" -regex = '(?mR)^' -haystack = "\r\r\r" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -# Same as above, but with adjacent runs of just line feeds. -[[test]] -name = "start-no-split-adjacent-lf" -regex = '(?mR)^' -haystack = "\n\n\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -# Tests that a CRLF-aware '$' assertion does not split a line terminator. -[[test]] -name = "end-no-split" -regex = '(?mR)$' -haystack = "abc\r\ndef\r\nxyz" -matches = [[3, 3], [8, 8], [13, 13]] - -# Same as above, but with adjacent runs of line terminators. -[[test]] -name = "end-no-split-adjacent" -regex = '(?mR)$' -haystack = "\r\n\r\n\r\n" -matches = [[0, 0], [2, 2], [4, 4], [6, 6]] - -# Same as above, but with adjacent runs of just carriage returns. -[[test]] -name = "end-no-split-adjacent-cr" -regex = '(?mR)$' -haystack = "\r\r\r" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -# Same as above, but with adjacent runs of just line feeds. -[[test]] -name = "end-no-split-adjacent-lf" -regex = '(?mR)$' -haystack = "\n\n\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -# Tests that '.' does not match either \r or \n when CRLF mode is enabled. Note -# that this doesn't require multi-line mode to be enabled. -[[test]] -name = "dot-no-crlf" -regex = '(?R).' -haystack = "\r\n\r\n\r\n" -matches = [] - -# This is a test that caught a bug in the one-pass DFA where it (amazingly) was -# using 'is_end_lf' instead of 'is_end_crlf' here. It was probably a copy & -# paste bug. We insert an empty capture group here because it provokes the meta -# regex engine to first find a match and then trip over a panic because the -# one-pass DFA erroneously says there is no match. -[[test]] -name = "onepass-wrong-crlf-with-capture" -regex = '(?Rm:().$)' -haystack = "ZZ\r" -matches = [[[1, 2], [1, 1]]] - -# This is like onepass-wrong-crlf-with-capture above, except it sets up the -# test so that it can be run by the one-pass DFA directly. (i.e., Make it -# anchored and start the search at the right place.) -[[test]] -name = "onepass-wrong-crlf-anchored" -regex = '(?Rm:.$)' -haystack = "ZZ\r" -matches = [[1, 2]] -anchored = true -bounds = [1, 3] diff --git a/vendor/regex/testdata/earliest.toml b/vendor/regex/testdata/earliest.toml deleted file mode 100644 index 951689358e6516..00000000000000 --- a/vendor/regex/testdata/earliest.toml +++ /dev/null @@ -1,52 +0,0 @@ -[[test]] -name = "no-greedy-100" -regex = 'a+' -haystack = "aaa" -matches = [[0, 1], [1, 2], [2, 3]] -search-kind = "earliest" - -[[test]] -name = "no-greedy-200" -regex = 'abc+' -haystack = "zzzabccc" -matches = [[3, 6]] -search-kind = "earliest" - -[[test]] -name = "is-ungreedy" -regex = 'a+?' -haystack = "aaa" -matches = [[0, 1], [1, 2], [2, 3]] -search-kind = "earliest" - -[[test]] -name = "look-start-test" -regex = '^(abc|a)' -haystack = "abc" -matches = [ - [[0, 1], [0, 1]], -] -search-kind = "earliest" - -[[test]] -name = "look-end-test" -regex = '(abc|a)$' -haystack = "abc" -matches = [ - [[0, 3], [0, 3]], -] -search-kind = "earliest" - -[[test]] -name = "no-leftmost-first-100" -regex = 'abc|a' -haystack = "abc" -matches = [[0, 1]] -search-kind = "earliest" - -[[test]] -name = "no-leftmost-first-200" -regex = 'aba|a' -haystack = "aba" -matches = [[0, 1], [2, 3]] -search-kind = "earliest" diff --git a/vendor/regex/testdata/empty.toml b/vendor/regex/testdata/empty.toml deleted file mode 100644 index 7dfd8027a4410f..00000000000000 --- a/vendor/regex/testdata/empty.toml +++ /dev/null @@ -1,113 +0,0 @@ -[[test]] -name = "100" -regex = "|b" -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "110" -regex = "b|" -haystack = "abc" -matches = [[0, 0], [1, 2], [3, 3]] - -[[test]] -name = "120" -regex = "|z" -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "130" -regex = "z|" -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "200" -regex = "|" -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "210" -regex = "||" -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "220" -regex = "||b" -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "230" -regex = "b||" -haystack = "abc" -matches = [[0, 0], [1, 2], [3, 3]] - -[[test]] -name = "240" -regex = "||z" -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "300" -regex = "(?:)|b" -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "310" -regex = "b|(?:)" -haystack = "abc" -matches = [[0, 0], [1, 2], [3, 3]] - -[[test]] -name = "320" -regex = "(?:|)" -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "330" -regex = "(?:|)|z" -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "400" -regex = "a(?:)|b" -haystack = "abc" -matches = [[0, 1], [1, 2]] - -[[test]] -name = "500" -regex = "" -haystack = "" -matches = [[0, 0]] - -[[test]] -name = "510" -regex = "" -haystack = "a" -matches = [[0, 0], [1, 1]] - -[[test]] -name = "520" -regex = "" -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "600" -regex = '(?:|a)*' -haystack = "aaa" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "610" -regex = '(?:|a)+' -haystack = "aaa" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] diff --git a/vendor/regex/testdata/expensive.toml b/vendor/regex/testdata/expensive.toml deleted file mode 100644 index b70e42f9bb15ca..00000000000000 --- a/vendor/regex/testdata/expensive.toml +++ /dev/null @@ -1,23 +0,0 @@ -# This file represent tests that may be expensive to run on some regex engines. -# For example, tests that build a full DFA ahead of time and minimize it can -# take a horrendously long time on regexes that are large (or result in an -# explosion in the number of states). We group these tests together so that -# such engines can simply skip these tests. - -# See: https://github.com/rust-lang/regex/issues/98 -[[test]] -name = "regression-many-repeat-no-stack-overflow" -regex = '^.{1,2500}' -haystack = "a" -matches = [[0, 1]] - -# This test is meant to blow the bounded backtracker's visited capacity. In -# order to do that, we need a somewhat sizeable regex. The purpose of this -# is to make sure there's at least one test that exercises this path in the -# backtracker. All other tests (at time of writing) are small enough that the -# backtracker can handle them fine. -[[test]] -name = "backtrack-blow-visited-capacity" -regex = '\pL{50}' -haystack = "abcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyZZ" -matches = [[0, 50], [50, 100], [100, 150]] diff --git a/vendor/regex/testdata/flags.toml b/vendor/regex/testdata/flags.toml deleted file mode 100644 index 30b412ca65079d..00000000000000 --- a/vendor/regex/testdata/flags.toml +++ /dev/null @@ -1,68 +0,0 @@ -[[test]] -name = "1" -regex = "(?i)abc" -haystack = "ABC" -matches = [[0, 3]] - -[[test]] -name = "2" -regex = "(?i)a(?-i)bc" -haystack = "Abc" -matches = [[0, 3]] - -[[test]] -name = "3" -regex = "(?i)a(?-i)bc" -haystack = "ABC" -matches = [] - -[[test]] -name = "4" -regex = "(?is)a." -haystack = "A\n" -matches = [[0, 2]] - -[[test]] -name = "5" -regex = "(?is)a.(?-is)a." -haystack = "A\nab" -matches = [[0, 4]] - -[[test]] -name = "6" -regex = "(?is)a.(?-is)a." -haystack = "A\na\n" -matches = [] - -[[test]] -name = "7" -regex = "(?is)a.(?-is:a.)?" -haystack = "A\na\n" -matches = [[0, 2]] -match-limit = 1 - -[[test]] -name = "8" -regex = "(?U)a+" -haystack = "aa" -matches = [[0, 1]] -match-limit = 1 - -[[test]] -name = "9" -regex = "(?U)a+?" -haystack = "aa" -matches = [[0, 2]] - -[[test]] -name = "10" -regex = "(?U)(?-U)a+" -haystack = "aa" -matches = [[0, 2]] - -[[test]] -name = "11" -regex = '(?m)(?:^\d+$\n?)+' -haystack = "123\n456\n789" -matches = [[0, 11]] -unicode = false diff --git a/vendor/regex/testdata/fowler/basic.toml b/vendor/regex/testdata/fowler/basic.toml deleted file mode 100644 index 92b4e4cf724c34..00000000000000 --- a/vendor/regex/testdata/fowler/basic.toml +++ /dev/null @@ -1,1611 +0,0 @@ -# !!! DO NOT EDIT !!! -# Automatically generated by 'regex-cli generate fowler'. -# Numbers in the test names correspond to the line number of the test from -# the original dat file. - -[[test]] -name = "basic3" -regex = '''abracadabra$''' -haystack = '''abracadabracadabra''' -matches = [[[7, 18]]] -match-limit = 1 - -[[test]] -name = "basic4" -regex = '''a...b''' -haystack = '''abababbb''' -matches = [[[2, 7]]] -match-limit = 1 - -[[test]] -name = "basic5" -regex = '''XXXXXX''' -haystack = '''..XXXXXX''' -matches = [[[2, 8]]] -match-limit = 1 - -[[test]] -name = "basic6" -regex = '''\)''' -haystack = '''()''' -matches = [[[1, 2]]] -match-limit = 1 - -[[test]] -name = "basic7" -regex = '''a]''' -haystack = '''a]a''' -matches = [[[0, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic9" -regex = '''\}''' -haystack = '''}''' -matches = [[[0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic10" -regex = '''\]''' -haystack = ''']''' -matches = [[[0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic12" -regex = ''']''' -haystack = ''']''' -matches = [[[0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic15" -regex = '''^a''' -haystack = '''ax''' -matches = [[[0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic16" -regex = '''\^a''' -haystack = '''a^a''' -matches = [[[1, 3]]] -match-limit = 1 - -[[test]] -name = "basic17" -regex = '''a\^''' -haystack = '''a^''' -matches = [[[0, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic18" -regex = '''a$''' -haystack = '''aa''' -matches = [[[1, 2]]] -match-limit = 1 - -[[test]] -name = "basic19" -regex = '''a\$''' -haystack = '''a$''' -matches = [[[0, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic20" -regex = '''^$''' -haystack = '''''' -matches = [[[0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic21" -regex = '''$^''' -haystack = '''''' -matches = [[[0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic22" -regex = '''a($)''' -haystack = '''aa''' -matches = [[[1, 2], [2, 2]]] -match-limit = 1 - -[[test]] -name = "basic23" -regex = '''a*(^a)''' -haystack = '''aa''' -matches = [[[0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic24" -regex = '''(..)*(...)*''' -haystack = '''a''' -matches = [[[0, 0], [], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic25" -regex = '''(..)*(...)*''' -haystack = '''abcd''' -matches = [[[0, 4], [2, 4], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic26" -regex = '''(ab|a)(bc|c)''' -haystack = '''abc''' -matches = [[[0, 3], [0, 2], [2, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic27" -regex = '''(ab)c|abc''' -haystack = '''abc''' -matches = [[[0, 3], [0, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic28" -regex = '''a{0}b''' -haystack = '''ab''' -matches = [[[1, 2]]] -match-limit = 1 - -[[test]] -name = "basic29" -regex = '''(a*)(b?)(b+)b{3}''' -haystack = '''aaabbbbbbb''' -matches = [[[0, 10], [0, 3], [3, 4], [4, 7]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic30" -regex = '''(a*)(b{0,1})(b{1,})b{3}''' -haystack = '''aaabbbbbbb''' -matches = [[[0, 10], [0, 3], [3, 4], [4, 7]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic32" -regex = '''((a|a)|a)''' -haystack = '''a''' -matches = [[[0, 1], [0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic33" -regex = '''(a*)(a|aa)''' -haystack = '''aaaa''' -matches = [[[0, 4], [0, 3], [3, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic34" -regex = '''a*(a.|aa)''' -haystack = '''aaaa''' -matches = [[[0, 4], [2, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic35" -regex = '''a(b)|c(d)|a(e)f''' -haystack = '''aef''' -matches = [[[0, 3], [], [], [1, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic36" -regex = '''(a|b)?.*''' -haystack = '''b''' -matches = [[[0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic37" -regex = '''(a|b)c|a(b|c)''' -haystack = '''ac''' -matches = [[[0, 2], [0, 1], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic38" -regex = '''(a|b)c|a(b|c)''' -haystack = '''ab''' -matches = [[[0, 2], [], [1, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic39" -regex = '''(a|b)*c|(a|ab)*c''' -haystack = '''abc''' -matches = [[[0, 3], [1, 2], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic40" -regex = '''(a|b)*c|(a|ab)*c''' -haystack = '''xc''' -matches = [[[1, 2], [], []]] -match-limit = 1 - -[[test]] -name = "basic41" -regex = '''(.a|.b).*|.*(.a|.b)''' -haystack = '''xa''' -matches = [[[0, 2], [0, 2], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic42" -regex = '''a?(ab|ba)ab''' -haystack = '''abab''' -matches = [[[0, 4], [0, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic43" -regex = '''a?(ac{0}b|ba)ab''' -haystack = '''abab''' -matches = [[[0, 4], [0, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic44" -regex = '''ab|abab''' -haystack = '''abbabab''' -matches = [[[0, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic45" -regex = '''aba|bab|bba''' -haystack = '''baaabbbaba''' -matches = [[[5, 8]]] -match-limit = 1 - -[[test]] -name = "basic46" -regex = '''aba|bab''' -haystack = '''baaabbbaba''' -matches = [[[6, 9]]] -match-limit = 1 - -[[test]] -name = "basic47" -regex = '''(aa|aaa)*|(a|aaaaa)''' -haystack = '''aa''' -matches = [[[0, 2], [0, 2], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic48" -regex = '''(a.|.a.)*|(a|.a...)''' -haystack = '''aa''' -matches = [[[0, 2], [0, 2], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic49" -regex = '''ab|a''' -haystack = '''xabc''' -matches = [[[1, 3]]] -match-limit = 1 - -[[test]] -name = "basic50" -regex = '''ab|a''' -haystack = '''xxabc''' -matches = [[[2, 4]]] -match-limit = 1 - -[[test]] -name = "basic51" -regex = '''(Ab|cD)*''' -haystack = '''aBcD''' -matches = [[[0, 4], [2, 4]]] -match-limit = 1 -anchored = true -case-insensitive = true - -[[test]] -name = "basic52" -regex = '''[^-]''' -haystack = '''--a''' -matches = [[[2, 3]]] -match-limit = 1 - -[[test]] -name = "basic53" -regex = '''[a-]*''' -haystack = '''--a''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic54" -regex = '''[a-m-]*''' -haystack = '''--amoma--''' -matches = [[[0, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic55" -regex = ''':::1:::0:|:::1:1:0:''' -haystack = ''':::0:::1:::1:::0:''' -matches = [[[8, 17]]] -match-limit = 1 - -[[test]] -name = "basic56" -regex = ''':::1:::0:|:::1:1:1:''' -haystack = ''':::0:::1:::1:::0:''' -matches = [[[8, 17]]] -match-limit = 1 - -[[test]] -name = "basic57" -regex = '''[[:upper:]]''' -haystack = '''A''' -matches = [[[0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic58" -regex = '''[[:lower:]]+''' -haystack = '''`az{''' -matches = [[[1, 3]]] -match-limit = 1 - -[[test]] -name = "basic59" -regex = '''[[:upper:]]+''' -haystack = '''@AZ[''' -matches = [[[1, 3]]] -match-limit = 1 - -[[test]] -name = "basic65" -regex = '''\n''' -haystack = '''\n''' -matches = [[[0, 1]]] -match-limit = 1 -anchored = true -unescape = true - -[[test]] -name = "basic66" -regex = '''\n''' -haystack = '''\n''' -matches = [[[0, 1]]] -match-limit = 1 -anchored = true -unescape = true - -[[test]] -name = "basic67" -regex = '''[^a]''' -haystack = '''\n''' -matches = [[[0, 1]]] -match-limit = 1 -anchored = true -unescape = true - -[[test]] -name = "basic68" -regex = '''\na''' -haystack = '''\na''' -matches = [[[0, 2]]] -match-limit = 1 -anchored = true -unescape = true - -[[test]] -name = "basic69" -regex = '''(a)(b)(c)''' -haystack = '''abc''' -matches = [[[0, 3], [0, 1], [1, 2], [2, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic70" -regex = '''xxx''' -haystack = '''xxx''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -# Test added by Rust regex project. -[[test]] -name = "basic72" -regex = '''(?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$)''' -haystack = '''feb 6,''' -matches = [[[0, 6]]] -match-limit = 1 -anchored = true - -# Test added by Rust regex project. -[[test]] -name = "basic74" -regex = '''(?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$)''' -haystack = '''2/7''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -# Test added by Rust regex project. -[[test]] -name = "basic76" -regex = '''(?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$)''' -haystack = '''feb 1,Feb 6''' -matches = [[[5, 11]]] -match-limit = 1 - -# Test added by Rust regex project. -[[test]] -name = "basic78" -regex = '''(((?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:x))))))))))))))))))))))))))))))''' -haystack = '''x''' -matches = [[[0, 1], [0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -# Test added by Rust regex project. -[[test]] -name = "basic80" -regex = '''(((?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:x))))))))))))))))))))))))))))))*''' -haystack = '''xx''' -matches = [[[0, 2], [1, 2], [1, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic81" -regex = '''a?(ab|ba)*''' -haystack = '''ababababababababababababababababababababababababababababababababababababababababa''' -matches = [[[0, 81], [79, 81]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic82" -regex = '''abaa|abbaa|abbbaa|abbbbaa''' -haystack = '''ababbabbbabbbabbbbabbbbaa''' -matches = [[[18, 25]]] -match-limit = 1 - -[[test]] -name = "basic83" -regex = '''abaa|abbaa|abbbaa|abbbbaa''' -haystack = '''ababbabbbabbbabbbbabaa''' -matches = [[[18, 22]]] -match-limit = 1 - -[[test]] -name = "basic84" -regex = '''aaac|aabc|abac|abbc|baac|babc|bbac|bbbc''' -haystack = '''baaabbbabac''' -matches = [[[7, 11]]] -match-limit = 1 - -# Test added by Rust regex project. -[[test]] -name = "basic86" -regex = '''.*''' -haystack = '''\x01\x7f''' -matches = [[[0, 2]]] -match-limit = 1 -anchored = true -unescape = true - -[[test]] -name = "basic87" -regex = '''aaaa|bbbb|cccc|ddddd|eeeeee|fffffff|gggg|hhhh|iiiii|jjjjj|kkkkk|llll''' -haystack = '''XaaaXbbbXcccXdddXeeeXfffXgggXhhhXiiiXjjjXkkkXlllXcbaXaaaa''' -matches = [[[53, 57]]] -match-limit = 1 - -[[test]] -name = "basic89" -regex = '''a*a*a*a*a*b''' -haystack = '''aaaaaaaaab''' -matches = [[[0, 10]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic90" -regex = '''^''' -haystack = '''''' -matches = [[[0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic91" -regex = '''$''' -haystack = '''''' -matches = [[[0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic92" -regex = '''^$''' -haystack = '''''' -matches = [[[0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic93" -regex = '''^a$''' -haystack = '''a''' -matches = [[[0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic94" -regex = '''abc''' -haystack = '''abc''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic95" -regex = '''abc''' -haystack = '''xabcy''' -matches = [[[1, 4]]] -match-limit = 1 - -[[test]] -name = "basic96" -regex = '''abc''' -haystack = '''ababc''' -matches = [[[2, 5]]] -match-limit = 1 - -[[test]] -name = "basic97" -regex = '''ab*c''' -haystack = '''abc''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic98" -regex = '''ab*bc''' -haystack = '''abc''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic99" -regex = '''ab*bc''' -haystack = '''abbc''' -matches = [[[0, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic100" -regex = '''ab*bc''' -haystack = '''abbbbc''' -matches = [[[0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic101" -regex = '''ab+bc''' -haystack = '''abbc''' -matches = [[[0, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic102" -regex = '''ab+bc''' -haystack = '''abbbbc''' -matches = [[[0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic103" -regex = '''ab?bc''' -haystack = '''abbc''' -matches = [[[0, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic104" -regex = '''ab?bc''' -haystack = '''abc''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic105" -regex = '''ab?c''' -haystack = '''abc''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic106" -regex = '''^abc$''' -haystack = '''abc''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic107" -regex = '''^abc''' -haystack = '''abcc''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic108" -regex = '''abc$''' -haystack = '''aabc''' -matches = [[[1, 4]]] -match-limit = 1 - -[[test]] -name = "basic109" -regex = '''^''' -haystack = '''abc''' -matches = [[[0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic110" -regex = '''$''' -haystack = '''abc''' -matches = [[[3, 3]]] -match-limit = 1 - -[[test]] -name = "basic111" -regex = '''a.c''' -haystack = '''abc''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic112" -regex = '''a.c''' -haystack = '''axc''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic113" -regex = '''a.*c''' -haystack = '''axyzc''' -matches = [[[0, 5]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic114" -regex = '''a[bc]d''' -haystack = '''abd''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic115" -regex = '''a[b-d]e''' -haystack = '''ace''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic116" -regex = '''a[b-d]''' -haystack = '''aac''' -matches = [[[1, 3]]] -match-limit = 1 - -[[test]] -name = "basic117" -regex = '''a[-b]''' -haystack = '''a-''' -matches = [[[0, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic118" -regex = '''a[b-]''' -haystack = '''a-''' -matches = [[[0, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic119" -regex = '''a]''' -haystack = '''a]''' -matches = [[[0, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic120" -regex = '''a[]]b''' -haystack = '''a]b''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic121" -regex = '''a[^bc]d''' -haystack = '''aed''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic122" -regex = '''a[^-b]c''' -haystack = '''adc''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic123" -regex = '''a[^]b]c''' -haystack = '''adc''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic124" -regex = '''ab|cd''' -haystack = '''abc''' -matches = [[[0, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic125" -regex = '''ab|cd''' -haystack = '''abcd''' -matches = [[[0, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic126" -regex = '''a\(b''' -haystack = '''a(b''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic127" -regex = '''a\(*b''' -haystack = '''ab''' -matches = [[[0, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic128" -regex = '''a\(*b''' -haystack = '''a((b''' -matches = [[[0, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic129" -regex = '''((a))''' -haystack = '''abc''' -matches = [[[0, 1], [0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic130" -regex = '''(a)b(c)''' -haystack = '''abc''' -matches = [[[0, 3], [0, 1], [2, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic131" -regex = '''a+b+c''' -haystack = '''aabbabc''' -matches = [[[4, 7]]] -match-limit = 1 - -[[test]] -name = "basic132" -regex = '''a*''' -haystack = '''aaa''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic133" -regex = '''(a*)*''' -haystack = '''-''' -matches = [[[0, 0], [0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic134" -regex = '''(a*)+''' -haystack = '''-''' -matches = [[[0, 0], [0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic135" -regex = '''(a*|b)*''' -haystack = '''-''' -matches = [[[0, 0], [0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic136" -regex = '''(a+|b)*''' -haystack = '''ab''' -matches = [[[0, 2], [1, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic137" -regex = '''(a+|b)+''' -haystack = '''ab''' -matches = [[[0, 2], [1, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic138" -regex = '''(a+|b)?''' -haystack = '''ab''' -matches = [[[0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic139" -regex = '''[^ab]*''' -haystack = '''cde''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic140" -regex = '''(^)*''' -haystack = '''-''' -matches = [[[0, 0], [0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic141" -regex = '''a*''' -haystack = '''''' -matches = [[[0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic142" -regex = '''([abc])*d''' -haystack = '''abbbcd''' -matches = [[[0, 6], [4, 5]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic143" -regex = '''([abc])*bcd''' -haystack = '''abcd''' -matches = [[[0, 4], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic144" -regex = '''a|b|c|d|e''' -haystack = '''e''' -matches = [[[0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic145" -regex = '''(a|b|c|d|e)f''' -haystack = '''ef''' -matches = [[[0, 2], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic146" -regex = '''((a*|b))*''' -haystack = '''-''' -matches = [[[0, 0], [0, 0], [0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic147" -regex = '''abcd*efg''' -haystack = '''abcdefg''' -matches = [[[0, 7]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic148" -regex = '''ab*''' -haystack = '''xabyabbbz''' -matches = [[[1, 3]]] -match-limit = 1 - -[[test]] -name = "basic149" -regex = '''ab*''' -haystack = '''xayabbbz''' -matches = [[[1, 2]]] -match-limit = 1 - -[[test]] -name = "basic150" -regex = '''(ab|cd)e''' -haystack = '''abcde''' -matches = [[[2, 5], [2, 4]]] -match-limit = 1 - -[[test]] -name = "basic151" -regex = '''[abhgefdc]ij''' -haystack = '''hij''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic152" -regex = '''(a|b)c*d''' -haystack = '''abcd''' -matches = [[[1, 4], [1, 2]]] -match-limit = 1 - -[[test]] -name = "basic153" -regex = '''(ab|ab*)bc''' -haystack = '''abc''' -matches = [[[0, 3], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic154" -regex = '''a([bc]*)c*''' -haystack = '''abc''' -matches = [[[0, 3], [1, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic155" -regex = '''a([bc]*)(c*d)''' -haystack = '''abcd''' -matches = [[[0, 4], [1, 3], [3, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic156" -regex = '''a([bc]+)(c*d)''' -haystack = '''abcd''' -matches = [[[0, 4], [1, 3], [3, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic157" -regex = '''a([bc]*)(c+d)''' -haystack = '''abcd''' -matches = [[[0, 4], [1, 2], [2, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic158" -regex = '''a[bcd]*dcdcde''' -haystack = '''adcdcde''' -matches = [[[0, 7]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic159" -regex = '''(ab|a)b*c''' -haystack = '''abc''' -matches = [[[0, 3], [0, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic160" -regex = '''((a)(b)c)(d)''' -haystack = '''abcd''' -matches = [[[0, 4], [0, 3], [0, 1], [1, 2], [3, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic161" -regex = '''[A-Za-z_][A-Za-z0-9_]*''' -haystack = '''alpha''' -matches = [[[0, 5]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic162" -regex = '''^a(bc+|b[eh])g|.h$''' -haystack = '''abh''' -matches = [[[1, 3], []]] -match-limit = 1 - -[[test]] -name = "basic163" -regex = '''(bc+d$|ef*g.|h?i(j|k))''' -haystack = '''effgz''' -matches = [[[0, 5], [0, 5], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic164" -regex = '''(bc+d$|ef*g.|h?i(j|k))''' -haystack = '''ij''' -matches = [[[0, 2], [0, 2], [1, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic165" -regex = '''(bc+d$|ef*g.|h?i(j|k))''' -haystack = '''reffgz''' -matches = [[[1, 6], [1, 6], []]] -match-limit = 1 - -[[test]] -name = "basic166" -regex = '''(((((((((a)))))))))''' -haystack = '''a''' -matches = [[[0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic167" -regex = '''multiple words''' -haystack = '''multiple words yeah''' -matches = [[[0, 14]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic168" -regex = '''(.*)c(.*)''' -haystack = '''abcde''' -matches = [[[0, 5], [0, 2], [3, 5]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic169" -regex = '''abcd''' -haystack = '''abcd''' -matches = [[[0, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic170" -regex = '''a(bc)d''' -haystack = '''abcd''' -matches = [[[0, 4], [1, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic171" -regex = '''a[\x01-\x03]?c''' -haystack = '''a\x02c''' -matches = [[[0, 3]]] -match-limit = 1 -anchored = true -unescape = true - -[[test]] -name = "basic172" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Muammar Qaddafi''' -matches = [[[0, 15], [], [10, 12]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic173" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Mo'ammar Gadhafi''' -matches = [[[0, 16], [], [11, 13]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic174" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Muammar Kaddafi''' -matches = [[[0, 15], [], [10, 12]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic175" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Muammar Qadhafi''' -matches = [[[0, 15], [], [10, 12]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic176" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Muammar Gadafi''' -matches = [[[0, 14], [], [10, 11]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic177" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Mu'ammar Qadafi''' -matches = [[[0, 15], [], [11, 12]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic178" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Moamar Gaddafi''' -matches = [[[0, 14], [], [9, 11]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic179" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Mu'ammar Qadhdhafi''' -matches = [[[0, 18], [], [13, 15]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic180" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Muammar Khaddafi''' -matches = [[[0, 16], [], [11, 13]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic181" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Muammar Ghaddafy''' -matches = [[[0, 16], [], [11, 13]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic182" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Muammar Ghadafi''' -matches = [[[0, 15], [], [11, 12]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic183" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Muammar Ghaddafi''' -matches = [[[0, 16], [], [11, 13]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic184" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Muamar Kaddafi''' -matches = [[[0, 14], [], [9, 11]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic185" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Muammar Quathafi''' -matches = [[[0, 16], [], [11, 13]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic186" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Muammar Gheddafi''' -matches = [[[0, 16], [], [11, 13]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic187" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Moammar Khadafy''' -matches = [[[0, 15], [], [11, 12]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic188" -regex = '''M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]''' -haystack = '''Moammar Qudhafi''' -matches = [[[0, 15], [], [10, 12]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic189" -regex = '''a+(b|c)*d+''' -haystack = '''aabcdd''' -matches = [[[0, 6], [3, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic190" -regex = '''^.+$''' -haystack = '''vivi''' -matches = [[[0, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic191" -regex = '''^(.+)$''' -haystack = '''vivi''' -matches = [[[0, 4], [0, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic192" -regex = '''^([^!.]+).att.com!(.+)$''' -haystack = '''gryphon.att.com!eby''' -matches = [[[0, 19], [0, 7], [16, 19]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic193" -regex = '''^([^!]+!)?([^!]+)$''' -haystack = '''bas''' -matches = [[[0, 3], [], [0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic194" -regex = '''^([^!]+!)?([^!]+)$''' -haystack = '''bar!bas''' -matches = [[[0, 7], [0, 4], [4, 7]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic195" -regex = '''^([^!]+!)?([^!]+)$''' -haystack = '''foo!bas''' -matches = [[[0, 7], [0, 4], [4, 7]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic196" -regex = '''^.+!([^!]+!)([^!]+)$''' -haystack = '''foo!bar!bas''' -matches = [[[0, 11], [4, 8], [8, 11]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic197" -regex = '''((foo)|(bar))!bas''' -haystack = '''bar!bas''' -matches = [[[0, 7], [0, 3], [], [0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic198" -regex = '''((foo)|(bar))!bas''' -haystack = '''foo!bar!bas''' -matches = [[[4, 11], [4, 7], [], [4, 7]]] -match-limit = 1 - -[[test]] -name = "basic199" -regex = '''((foo)|(bar))!bas''' -haystack = '''foo!bas''' -matches = [[[0, 7], [0, 3], [0, 3], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic200" -regex = '''((foo)|bar)!bas''' -haystack = '''bar!bas''' -matches = [[[0, 7], [0, 3], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic201" -regex = '''((foo)|bar)!bas''' -haystack = '''foo!bar!bas''' -matches = [[[4, 11], [4, 7], []]] -match-limit = 1 - -[[test]] -name = "basic202" -regex = '''((foo)|bar)!bas''' -haystack = '''foo!bas''' -matches = [[[0, 7], [0, 3], [0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic203" -regex = '''(foo|(bar))!bas''' -haystack = '''bar!bas''' -matches = [[[0, 7], [0, 3], [0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic204" -regex = '''(foo|(bar))!bas''' -haystack = '''foo!bar!bas''' -matches = [[[4, 11], [4, 7], [4, 7]]] -match-limit = 1 - -[[test]] -name = "basic205" -regex = '''(foo|(bar))!bas''' -haystack = '''foo!bas''' -matches = [[[0, 7], [0, 3], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic206" -regex = '''(foo|bar)!bas''' -haystack = '''bar!bas''' -matches = [[[0, 7], [0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic207" -regex = '''(foo|bar)!bas''' -haystack = '''foo!bar!bas''' -matches = [[[4, 11], [4, 7]]] -match-limit = 1 - -[[test]] -name = "basic208" -regex = '''(foo|bar)!bas''' -haystack = '''foo!bas''' -matches = [[[0, 7], [0, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic209" -regex = '''^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$''' -haystack = '''foo!bar!bas''' -matches = [[[0, 11], [0, 11], [], [], [4, 8], [8, 11]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic210" -regex = '''^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$''' -haystack = '''bas''' -matches = [[[0, 3], [], [0, 3], [], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic211" -regex = '''^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$''' -haystack = '''bar!bas''' -matches = [[[0, 7], [0, 4], [4, 7], [], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic212" -regex = '''^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$''' -haystack = '''foo!bar!bas''' -matches = [[[0, 11], [], [], [4, 8], [8, 11]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic213" -regex = '''^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$''' -haystack = '''foo!bas''' -matches = [[[0, 7], [0, 4], [4, 7], [], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic214" -regex = '''^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$''' -haystack = '''bas''' -matches = [[[0, 3], [0, 3], [], [0, 3], [], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic215" -regex = '''^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$''' -haystack = '''bar!bas''' -matches = [[[0, 7], [0, 7], [0, 4], [4, 7], [], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic216" -regex = '''^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$''' -haystack = '''foo!bar!bas''' -matches = [[[0, 11], [0, 11], [], [], [4, 8], [8, 11]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic217" -regex = '''^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$''' -haystack = '''foo!bas''' -matches = [[[0, 7], [0, 7], [0, 4], [4, 7], [], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic218" -regex = '''.*(/XXX).*''' -haystack = '''/XXX''' -matches = [[[0, 4], [0, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic219" -regex = '''.*(\\XXX).*''' -haystack = '''\XXX''' -matches = [[[0, 4], [0, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic220" -regex = '''\\XXX''' -haystack = '''\XXX''' -matches = [[[0, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic221" -regex = '''.*(/000).*''' -haystack = '''/000''' -matches = [[[0, 4], [0, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic222" -regex = '''.*(\\000).*''' -haystack = '''\000''' -matches = [[[0, 4], [0, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "basic223" -regex = '''\\000''' -haystack = '''\000''' -matches = [[[0, 4]]] -match-limit = 1 -anchored = true - diff --git a/vendor/regex/testdata/fowler/dat/README b/vendor/regex/testdata/fowler/dat/README deleted file mode 100644 index 242a0e6c3a9914..00000000000000 --- a/vendor/regex/testdata/fowler/dat/README +++ /dev/null @@ -1,25 +0,0 @@ -Test data was taken from the Go distribution, which was in turn taken from the -testregex test suite: - - http://web.archive.org/web/20150925124103/http://www2.research.att.com/~astopen/testregex/testregex.html - -Unfortunately, the original web site now appears dead, but the test data lives -on. - -The LICENSE in this directory corresponds to the LICENSE that the data was -originally released under. - -The tests themselves were modified for RE2/Go (and marked as such). A -couple were modified further by me (Andrew Gallant) and marked with 'Rust'. - -After some number of years, these tests were transformed into a TOML format -using the 'regex-cli generate fowler' command. To re-generate the -TOML files, run the following from the root of this repository: - - regex-cli generate fowler tests/data/fowler tests/data/fowler/dat/*.dat - -This assumes that you have 'regex-cli' installed. See 'regex-cli/README.md' -from the root of the repository for more information. - -This brings the Fowler tests into a more "sensible" structured format in which -other tests can be written such that they aren't write-only. diff --git a/vendor/regex/testdata/fowler/dat/basic.dat b/vendor/regex/testdata/fowler/dat/basic.dat deleted file mode 100644 index 654a72b39b821b..00000000000000 --- a/vendor/regex/testdata/fowler/dat/basic.dat +++ /dev/null @@ -1,223 +0,0 @@ -NOTE all standard compliant implementations should pass these : 2002-05-31 - -BE abracadabra$ abracadabracadabra (7,18) -BE a...b abababbb (2,7) -BE XXXXXX ..XXXXXX (2,8) -E \) () (1,2) -BE a] a]a (0,2) -B } } (0,1) -E \} } (0,1) -BE \] ] (0,1) -B ] ] (0,1) -E ] ] (0,1) -B { { (0,1) -B } } (0,1) -BE ^a ax (0,1) -BE \^a a^a (1,3) -BE a\^ a^ (0,2) -BE a$ aa (1,2) -BE a\$ a$ (0,2) -BE ^$ NULL (0,0) -E $^ NULL (0,0) -E a($) aa (1,2)(2,2) -E a*(^a) aa (0,1)(0,1) -E (..)*(...)* a (0,0) -E (..)*(...)* abcd (0,4)(2,4) -E (ab|a)(bc|c) abc (0,3)(0,2)(2,3) -E (ab)c|abc abc (0,3)(0,2) -E a{0}b ab (1,2) -E (a*)(b?)(b+)b{3} aaabbbbbbb (0,10)(0,3)(3,4)(4,7) -E (a*)(b{0,1})(b{1,})b{3} aaabbbbbbb (0,10)(0,3)(3,4)(4,7) -E a{9876543210} NULL BADBR -E ((a|a)|a) a (0,1)(0,1)(0,1) -E (a*)(a|aa) aaaa (0,4)(0,3)(3,4) -E a*(a.|aa) aaaa (0,4)(2,4) -E a(b)|c(d)|a(e)f aef (0,3)(?,?)(?,?)(1,2) -E (a|b)?.* b (0,1)(0,1) -E (a|b)c|a(b|c) ac (0,2)(0,1) -E (a|b)c|a(b|c) ab (0,2)(?,?)(1,2) -E (a|b)*c|(a|ab)*c abc (0,3)(1,2) -E (a|b)*c|(a|ab)*c xc (1,2) -E (.a|.b).*|.*(.a|.b) xa (0,2)(0,2) -E a?(ab|ba)ab abab (0,4)(0,2) -E a?(ac{0}b|ba)ab abab (0,4)(0,2) -E ab|abab abbabab (0,2) -E aba|bab|bba baaabbbaba (5,8) -E aba|bab baaabbbaba (6,9) -E (aa|aaa)*|(a|aaaaa) aa (0,2)(0,2) -E (a.|.a.)*|(a|.a...) aa (0,2)(0,2) -E ab|a xabc (1,3) -E ab|a xxabc (2,4) -Ei (Ab|cD)* aBcD (0,4)(2,4) -BE [^-] --a (2,3) -BE [a-]* --a (0,3) -BE [a-m-]* --amoma-- (0,4) -E :::1:::0:|:::1:1:0: :::0:::1:::1:::0: (8,17) -E :::1:::0:|:::1:1:1: :::0:::1:::1:::0: (8,17) -{E [[:upper:]] A (0,1) [[<element>]] not supported -E [[:lower:]]+ `az{ (1,3) -E [[:upper:]]+ @AZ[ (1,3) -# No collation in Go -#BE [[-]] [[-]] (2,4) -#BE [[.NIL.]] NULL ECOLLATE -#BE [[=aleph=]] NULL ECOLLATE -} -BE$ \n \n (0,1) -BEn$ \n \n (0,1) -BE$ [^a] \n (0,1) -BE$ \na \na (0,2) -E (a)(b)(c) abc (0,3)(0,1)(1,2)(2,3) -BE xxx xxx (0,3) -#E1 (^|[ (,;])((([Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))([^0-9]|$) feb 6, (0,6) -E (?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$) feb 6, (0,6) Rust -#E1 (^|[ (,;])((([Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))([^0-9]|$) 2/7 (0,3) -E (?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$) 2/7 (0,3) Rust -#E1 (^|[ (,;])((([Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))([^0-9]|$) feb 1,Feb 6 (5,11) -E (?:^|[ (,;])(?:(?:(?:[Ff]eb[^ ]* *|0*2/|\* */?)0*[6-7]))(?:[^0-9]|$) feb 1,Feb 6 (5,11) Rust -#E3 ((((((((((((((((((((((((((((((x)))))))))))))))))))))))))))))) x (0,1)(0,1)(0,1) -E (((?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:x)))))))))))))))))))))))))))))) x (0,1)(0,1)(0,1) Rust -#E3 ((((((((((((((((((((((((((((((x))))))))))))))))))))))))))))))* xx (0,2)(1,2)(1,2) -E (((?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:(?:x))))))))))))))))))))))))))))))* xx (0,2)(1,2)(1,2) Rust -E a?(ab|ba)* ababababababababababababababababababababababababababababababababababababababababa (0,81)(79,81) -E abaa|abbaa|abbbaa|abbbbaa ababbabbbabbbabbbbabbbbaa (18,25) -E abaa|abbaa|abbbaa|abbbbaa ababbabbbabbbabbbbabaa (18,22) -E aaac|aabc|abac|abbc|baac|babc|bbac|bbbc baaabbbabac (7,11) -#BE$ .* \x01\xff (0,2) -BE$ .* \x01\x7f (0,2) Rust -E aaaa|bbbb|cccc|ddddd|eeeeee|fffffff|gggg|hhhh|iiiii|jjjjj|kkkkk|llll XaaaXbbbXcccXdddXeeeXfffXgggXhhhXiiiXjjjXkkkXlllXcbaXaaaa (53,57) -L aaaa\nbbbb\ncccc\nddddd\neeeeee\nfffffff\ngggg\nhhhh\niiiii\njjjjj\nkkkkk\nllll XaaaXbbbXcccXdddXeeeXfffXgggXhhhXiiiXjjjXkkkXlllXcbaXaaaa NOMATCH -E a*a*a*a*a*b aaaaaaaaab (0,10) -BE ^ NULL (0,0) -BE $ NULL (0,0) -BE ^$ NULL (0,0) -BE ^a$ a (0,1) -BE abc abc (0,3) -BE abc xabcy (1,4) -BE abc ababc (2,5) -BE ab*c abc (0,3) -BE ab*bc abc (0,3) -BE ab*bc abbc (0,4) -BE ab*bc abbbbc (0,6) -E ab+bc abbc (0,4) -E ab+bc abbbbc (0,6) -E ab?bc abbc (0,4) -E ab?bc abc (0,3) -E ab?c abc (0,3) -BE ^abc$ abc (0,3) -BE ^abc abcc (0,3) -BE abc$ aabc (1,4) -BE ^ abc (0,0) -BE $ abc (3,3) -BE a.c abc (0,3) -BE a.c axc (0,3) -BE a.*c axyzc (0,5) -BE a[bc]d abd (0,3) -BE a[b-d]e ace (0,3) -BE a[b-d] aac (1,3) -BE a[-b] a- (0,2) -BE a[b-] a- (0,2) -BE a] a] (0,2) -BE a[]]b a]b (0,3) -BE a[^bc]d aed (0,3) -BE a[^-b]c adc (0,3) -BE a[^]b]c adc (0,3) -E ab|cd abc (0,2) -E ab|cd abcd (0,2) -E a\(b a(b (0,3) -E a\(*b ab (0,2) -E a\(*b a((b (0,4) -E ((a)) abc (0,1)(0,1)(0,1) -E (a)b(c) abc (0,3)(0,1)(2,3) -E a+b+c aabbabc (4,7) -E a* aaa (0,3) -E (a*)* - (0,0)(0,0) -E (a*)+ - (0,0)(0,0) -E (a*|b)* - (0,0)(0,0) -E (a+|b)* ab (0,2)(1,2) -E (a+|b)+ ab (0,2)(1,2) -E (a+|b)? ab (0,1)(0,1) -BE [^ab]* cde (0,3) -E (^)* - (0,0)(0,0) -BE a* NULL (0,0) -E ([abc])*d abbbcd (0,6)(4,5) -E ([abc])*bcd abcd (0,4)(0,1) -E a|b|c|d|e e (0,1) -E (a|b|c|d|e)f ef (0,2)(0,1) -E ((a*|b))* - (0,0)(0,0)(0,0) -BE abcd*efg abcdefg (0,7) -BE ab* xabyabbbz (1,3) -BE ab* xayabbbz (1,2) -E (ab|cd)e abcde (2,5)(2,4) -BE [abhgefdc]ij hij (0,3) -E (a|b)c*d abcd (1,4)(1,2) -E (ab|ab*)bc abc (0,3)(0,1) -E a([bc]*)c* abc (0,3)(1,3) -E a([bc]*)(c*d) abcd (0,4)(1,3)(3,4) -E a([bc]+)(c*d) abcd (0,4)(1,3)(3,4) -E a([bc]*)(c+d) abcd (0,4)(1,2)(2,4) -E a[bcd]*dcdcde adcdcde (0,7) -E (ab|a)b*c abc (0,3)(0,2) -E ((a)(b)c)(d) abcd (0,4)(0,3)(0,1)(1,2)(3,4) -BE [A-Za-z_][A-Za-z0-9_]* alpha (0,5) -E ^a(bc+|b[eh])g|.h$ abh (1,3) -E (bc+d$|ef*g.|h?i(j|k)) effgz (0,5)(0,5) -E (bc+d$|ef*g.|h?i(j|k)) ij (0,2)(0,2)(1,2) -E (bc+d$|ef*g.|h?i(j|k)) reffgz (1,6)(1,6) -E (((((((((a))))))))) a (0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1)(0,1) -BE multiple words multiple words yeah (0,14) -E (.*)c(.*) abcde (0,5)(0,2)(3,5) -BE abcd abcd (0,4) -E a(bc)d abcd (0,4)(1,3) -E a[-]?c ac (0,3) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Qaddafi (0,15)(?,?)(10,12) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Mo'ammar Gadhafi (0,16)(?,?)(11,13) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Kaddafi (0,15)(?,?)(10,12) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Qadhafi (0,15)(?,?)(10,12) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Gadafi (0,14)(?,?)(10,11) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Mu'ammar Qadafi (0,15)(?,?)(11,12) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Moamar Gaddafi (0,14)(?,?)(9,11) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Mu'ammar Qadhdhafi (0,18)(?,?)(13,15) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Khaddafi (0,16)(?,?)(11,13) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Ghaddafy (0,16)(?,?)(11,13) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Ghadafi (0,15)(?,?)(11,12) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Ghaddafi (0,16)(?,?)(11,13) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muamar Kaddafi (0,14)(?,?)(9,11) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Quathafi (0,16)(?,?)(11,13) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Muammar Gheddafi (0,16)(?,?)(11,13) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Moammar Khadafy (0,15)(?,?)(11,12) -E M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy] Moammar Qudhafi (0,15)(?,?)(10,12) -E a+(b|c)*d+ aabcdd (0,6)(3,4) -E ^.+$ vivi (0,4) -E ^(.+)$ vivi (0,4)(0,4) -E ^([^!.]+).att.com!(.+)$ gryphon.att.com!eby (0,19)(0,7)(16,19) -E ^([^!]+!)?([^!]+)$ bas (0,3)(?,?)(0,3) -E ^([^!]+!)?([^!]+)$ bar!bas (0,7)(0,4)(4,7) -E ^([^!]+!)?([^!]+)$ foo!bas (0,7)(0,4)(4,7) -E ^.+!([^!]+!)([^!]+)$ foo!bar!bas (0,11)(4,8)(8,11) -E ((foo)|(bar))!bas bar!bas (0,7)(0,3)(?,?)(0,3) -E ((foo)|(bar))!bas foo!bar!bas (4,11)(4,7)(?,?)(4,7) -E ((foo)|(bar))!bas foo!bas (0,7)(0,3)(0,3) -E ((foo)|bar)!bas bar!bas (0,7)(0,3) -E ((foo)|bar)!bas foo!bar!bas (4,11)(4,7) -E ((foo)|bar)!bas foo!bas (0,7)(0,3)(0,3) -E (foo|(bar))!bas bar!bas (0,7)(0,3)(0,3) -E (foo|(bar))!bas foo!bar!bas (4,11)(4,7)(4,7) -E (foo|(bar))!bas foo!bas (0,7)(0,3) -E (foo|bar)!bas bar!bas (0,7)(0,3) -E (foo|bar)!bas foo!bar!bas (4,11)(4,7) -E (foo|bar)!bas foo!bas (0,7)(0,3) -E ^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$ foo!bar!bas (0,11)(0,11)(?,?)(?,?)(4,8)(8,11) -E ^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$ bas (0,3)(?,?)(0,3) -E ^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$ bar!bas (0,7)(0,4)(4,7) -E ^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$ foo!bar!bas (0,11)(?,?)(?,?)(4,8)(8,11) -E ^([^!]+!)?([^!]+)$|^.+!([^!]+!)([^!]+)$ foo!bas (0,7)(0,4)(4,7) -E ^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$ bas (0,3)(0,3)(?,?)(0,3) -E ^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$ bar!bas (0,7)(0,7)(0,4)(4,7) -E ^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$ foo!bar!bas (0,11)(0,11)(?,?)(?,?)(4,8)(8,11) -E ^(([^!]+!)?([^!]+)|.+!([^!]+!)([^!]+))$ foo!bas (0,7)(0,7)(0,4)(4,7) -E .*(/XXX).* /XXX (0,4)(0,4) -E .*(\\XXX).* \XXX (0,4)(0,4) -E \\XXX \XXX (0,4) -E .*(/000).* /000 (0,4)(0,4) -E .*(\\000).* \000 (0,4)(0,4) -E \\000 \000 (0,4) diff --git a/vendor/regex/testdata/fowler/dat/nullsubexpr.dat b/vendor/regex/testdata/fowler/dat/nullsubexpr.dat deleted file mode 100644 index eb3e721d3301a1..00000000000000 --- a/vendor/regex/testdata/fowler/dat/nullsubexpr.dat +++ /dev/null @@ -1,74 +0,0 @@ -NOTE null subexpression matches : 2002-06-06 - -E (a*)* a (0,1)(0,1) -E SAME x (0,0)(0,0) -E SAME aaaaaa (0,6)(0,6) -E SAME aaaaaax (0,6)(0,6) -E (a*)+ a (0,1)(0,1) -E SAME x (0,0)(0,0) -E SAME aaaaaa (0,6)(0,6) -E SAME aaaaaax (0,6)(0,6) -E (a+)* a (0,1)(0,1) -E SAME x (0,0) -E SAME aaaaaa (0,6)(0,6) -E SAME aaaaaax (0,6)(0,6) -E (a+)+ a (0,1)(0,1) -E SAME x NOMATCH -E SAME aaaaaa (0,6)(0,6) -E SAME aaaaaax (0,6)(0,6) - -E ([a]*)* a (0,1)(0,1) -E SAME x (0,0)(0,0) -E SAME aaaaaa (0,6)(0,6) -E SAME aaaaaax (0,6)(0,6) -E ([a]*)+ a (0,1)(0,1) -E SAME x (0,0)(0,0) -E SAME aaaaaa (0,6)(0,6) -E SAME aaaaaax (0,6)(0,6) -E ([^b]*)* a (0,1)(0,1) -E SAME b (0,0)(0,0) -E SAME aaaaaa (0,6)(0,6) -E SAME aaaaaab (0,6)(0,6) -E ([ab]*)* a (0,1)(0,1) -E SAME aaaaaa (0,6)(0,6) -E SAME ababab (0,6)(0,6) -E SAME bababa (0,6)(0,6) -E SAME b (0,1)(0,1) -E SAME bbbbbb (0,6)(0,6) -E SAME aaaabcde (0,5)(0,5) -E ([^a]*)* b (0,1)(0,1) -E SAME bbbbbb (0,6)(0,6) -E SAME aaaaaa (0,0)(0,0) -E ([^ab]*)* ccccxx (0,6)(0,6) -E SAME ababab (0,0)(0,0) - -#E ((z)+|a)* zabcde (0,2)(1,2) -E ((z)+|a)* zabcde (0,2)(1,2)(0,1) Rust - -#{E a+? aaaaaa (0,1) no *? +? minimal match ops -#E (a) aaa (0,1)(0,1) -#E (a*?) aaa (0,0)(0,0) -#E (a)*? aaa (0,0) -#E (a*?)*? aaa (0,0) -#} - -B \(a*\)*\(x\) x (0,1)(0,0)(0,1) -B \(a*\)*\(x\) ax (0,2)(0,1)(1,2) -B \(a*\)*\(x\) axa (0,2)(0,1)(1,2) -B \(a*\)*\(x\)\(\1\) x (0,1)(0,0)(0,1)(1,1) -B \(a*\)*\(x\)\(\1\) ax (0,2)(1,1)(1,2)(2,2) -B \(a*\)*\(x\)\(\1\) axa (0,3)(0,1)(1,2)(2,3) -B \(a*\)*\(x\)\(\1\)\(x\) axax (0,4)(0,1)(1,2)(2,3)(3,4) -B \(a*\)*\(x\)\(\1\)\(x\) axxa (0,3)(1,1)(1,2)(2,2)(2,3) - -E (a*)*(x) x (0,1)(0,0)(0,1) -E (a*)*(x) ax (0,2)(0,1)(1,2) -E (a*)*(x) axa (0,2)(0,1)(1,2) - -E (a*)+(x) x (0,1)(0,0)(0,1) -E (a*)+(x) ax (0,2)(0,1)(1,2) -E (a*)+(x) axa (0,2)(0,1)(1,2) - -E (a*){2}(x) x (0,1)(0,0)(0,1) -E (a*){2}(x) ax (0,2)(1,1)(1,2) -E (a*){2}(x) axa (0,2)(1,1)(1,2) diff --git a/vendor/regex/testdata/fowler/dat/repetition.dat b/vendor/regex/testdata/fowler/dat/repetition.dat deleted file mode 100644 index cf0d8382f84357..00000000000000 --- a/vendor/regex/testdata/fowler/dat/repetition.dat +++ /dev/null @@ -1,169 +0,0 @@ -NOTE implicit vs. explicit repetitions : 2009-02-02 - -# Glenn Fowler <gsf@research.att.com> -# conforming matches (column 4) must match one of the following BREs -# NOMATCH -# (0,.)\((\(.\),\(.\))(?,?)(\2,\3)\)* -# (0,.)\((\(.\),\(.\))(\2,\3)(?,?)\)* -# i.e., each 3-tuple has two identical elements and one (?,?) - -E ((..)|(.)) NULL NOMATCH -E ((..)|(.))((..)|(.)) NULL NOMATCH -E ((..)|(.))((..)|(.))((..)|(.)) NULL NOMATCH - -E ((..)|(.)){1} NULL NOMATCH -E ((..)|(.)){2} NULL NOMATCH -E ((..)|(.)){3} NULL NOMATCH - -E ((..)|(.))* NULL (0,0) - -E ((..)|(.)) a (0,1)(0,1)(?,?)(0,1) -E ((..)|(.))((..)|(.)) a NOMATCH -E ((..)|(.))((..)|(.))((..)|(.)) a NOMATCH - -E ((..)|(.)){1} a (0,1)(0,1)(?,?)(0,1) -E ((..)|(.)){2} a NOMATCH -E ((..)|(.)){3} a NOMATCH - -E ((..)|(.))* a (0,1)(0,1)(?,?)(0,1) - -E ((..)|(.)) aa (0,2)(0,2)(0,2)(?,?) -E ((..)|(.))((..)|(.)) aa (0,2)(0,1)(?,?)(0,1)(1,2)(?,?)(1,2) -E ((..)|(.))((..)|(.))((..)|(.)) aa NOMATCH - -E ((..)|(.)){1} aa (0,2)(0,2)(0,2)(?,?) -E ((..)|(.)){2} aa (0,2)(1,2)(?,?)(1,2) -E ((..)|(.)){3} aa NOMATCH - -E ((..)|(.))* aa (0,2)(0,2)(0,2)(?,?) - -E ((..)|(.)) aaa (0,2)(0,2)(0,2)(?,?) -E ((..)|(.))((..)|(.)) aaa (0,3)(0,2)(0,2)(?,?)(2,3)(?,?)(2,3) -E ((..)|(.))((..)|(.))((..)|(.)) aaa (0,3)(0,1)(?,?)(0,1)(1,2)(?,?)(1,2)(2,3)(?,?)(2,3) - -E ((..)|(.)){1} aaa (0,2)(0,2)(0,2)(?,?) -#E ((..)|(.)){2} aaa (0,3)(2,3)(?,?)(2,3) -E ((..)|(.)){2} aaa (0,3)(2,3)(0,2)(2,3) RE2/Go -E ((..)|(.)){3} aaa (0,3)(2,3)(?,?)(2,3) - -#E ((..)|(.))* aaa (0,3)(2,3)(?,?)(2,3) -E ((..)|(.))* aaa (0,3)(2,3)(0,2)(2,3) RE2/Go - -E ((..)|(.)) aaaa (0,2)(0,2)(0,2)(?,?) -E ((..)|(.))((..)|(.)) aaaa (0,4)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?) -E ((..)|(.))((..)|(.))((..)|(.)) aaaa (0,4)(0,2)(0,2)(?,?)(2,3)(?,?)(2,3)(3,4)(?,?)(3,4) - -E ((..)|(.)){1} aaaa (0,2)(0,2)(0,2)(?,?) -E ((..)|(.)){2} aaaa (0,4)(2,4)(2,4)(?,?) -#E ((..)|(.)){3} aaaa (0,4)(3,4)(?,?)(3,4) -E ((..)|(.)){3} aaaa (0,4)(3,4)(0,2)(3,4) RE2/Go - -E ((..)|(.))* aaaa (0,4)(2,4)(2,4)(?,?) - -E ((..)|(.)) aaaaa (0,2)(0,2)(0,2)(?,?) -E ((..)|(.))((..)|(.)) aaaaa (0,4)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?) -E ((..)|(.))((..)|(.))((..)|(.)) aaaaa (0,5)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?)(4,5)(?,?)(4,5) - -E ((..)|(.)){1} aaaaa (0,2)(0,2)(0,2)(?,?) -E ((..)|(.)){2} aaaaa (0,4)(2,4)(2,4)(?,?) -#E ((..)|(.)){3} aaaaa (0,5)(4,5)(?,?)(4,5) -E ((..)|(.)){3} aaaaa (0,5)(4,5)(2,4)(4,5) RE2/Go - -#E ((..)|(.))* aaaaa (0,5)(4,5)(?,?)(4,5) -E ((..)|(.))* aaaaa (0,5)(4,5)(2,4)(4,5) RE2/Go - -E ((..)|(.)) aaaaaa (0,2)(0,2)(0,2)(?,?) -E ((..)|(.))((..)|(.)) aaaaaa (0,4)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?) -E ((..)|(.))((..)|(.))((..)|(.)) aaaaaa (0,6)(0,2)(0,2)(?,?)(2,4)(2,4)(?,?)(4,6)(4,6)(?,?) - -E ((..)|(.)){1} aaaaaa (0,2)(0,2)(0,2)(?,?) -E ((..)|(.)){2} aaaaaa (0,4)(2,4)(2,4)(?,?) -E ((..)|(.)){3} aaaaaa (0,6)(4,6)(4,6)(?,?) - -E ((..)|(.))* aaaaaa (0,6)(4,6)(4,6)(?,?) - -NOTE additional repetition tests graciously provided by Chris Kuklewicz www.haskell.org 2009-02-02 - -# These test a bug in OS X / FreeBSD / NetBSD, and libtree. -# Linux/GLIBC gets the {8,} and {8,8} wrong. - -:HA#100:E X(.?){0,}Y X1234567Y (0,9)(7,8) -:HA#101:E X(.?){1,}Y X1234567Y (0,9)(7,8) -:HA#102:E X(.?){2,}Y X1234567Y (0,9)(7,8) -:HA#103:E X(.?){3,}Y X1234567Y (0,9)(7,8) -:HA#104:E X(.?){4,}Y X1234567Y (0,9)(7,8) -:HA#105:E X(.?){5,}Y X1234567Y (0,9)(7,8) -:HA#106:E X(.?){6,}Y X1234567Y (0,9)(7,8) -:HA#107:E X(.?){7,}Y X1234567Y (0,9)(7,8) -:HA#108:E X(.?){8,}Y X1234567Y (0,9)(8,8) -#:HA#110:E X(.?){0,8}Y X1234567Y (0,9)(7,8) -:HA#110:E X(.?){0,8}Y X1234567Y (0,9)(8,8) RE2/Go -#:HA#111:E X(.?){1,8}Y X1234567Y (0,9)(7,8) -:HA#111:E X(.?){1,8}Y X1234567Y (0,9)(8,8) RE2/Go -#:HA#112:E X(.?){2,8}Y X1234567Y (0,9)(7,8) -:HA#112:E X(.?){2,8}Y X1234567Y (0,9)(8,8) RE2/Go -#:HA#113:E X(.?){3,8}Y X1234567Y (0,9)(7,8) -:HA#113:E X(.?){3,8}Y X1234567Y (0,9)(8,8) RE2/Go -#:HA#114:E X(.?){4,8}Y X1234567Y (0,9)(7,8) -:HA#114:E X(.?){4,8}Y X1234567Y (0,9)(8,8) RE2/Go -#:HA#115:E X(.?){5,8}Y X1234567Y (0,9)(7,8) -:HA#115:E X(.?){5,8}Y X1234567Y (0,9)(8,8) RE2/Go -#:HA#116:E X(.?){6,8}Y X1234567Y (0,9)(7,8) -:HA#116:E X(.?){6,8}Y X1234567Y (0,9)(8,8) RE2/Go -#:HA#117:E X(.?){7,8}Y X1234567Y (0,9)(7,8) -:HA#117:E X(.?){7,8}Y X1234567Y (0,9)(8,8) RE2/Go -:HA#118:E X(.?){8,8}Y X1234567Y (0,9)(8,8) - -# These test a fixed bug in my regex-tdfa that did not keep the expanded -# form properly grouped, so right association did the wrong thing with -# these ambiguous patterns (crafted just to test my code when I became -# suspicious of my implementation). The first subexpression should use -# "ab" then "a" then "bcd". - -# OS X / FreeBSD / NetBSD badly fail many of these, with impossible -# results like (0,6)(4,5)(6,6). - -#:HA#260:E (a|ab|c|bcd){0,}(d*) ababcd (0,6)(3,6)(6,6) -:HA#260:E (a|ab|c|bcd){0,}(d*) ababcd (0,1)(0,1)(1,1) Rust -#:HA#261:E (a|ab|c|bcd){1,}(d*) ababcd (0,6)(3,6)(6,6) -:HA#261:E (a|ab|c|bcd){1,}(d*) ababcd (0,1)(0,1)(1,1) Rust -:HA#262:E (a|ab|c|bcd){2,}(d*) ababcd (0,6)(3,6)(6,6) -:HA#263:E (a|ab|c|bcd){3,}(d*) ababcd (0,6)(3,6)(6,6) -:HA#264:E (a|ab|c|bcd){4,}(d*) ababcd NOMATCH -#:HA#265:E (a|ab|c|bcd){0,10}(d*) ababcd (0,6)(3,6)(6,6) -:HA#265:E (a|ab|c|bcd){0,10}(d*) ababcd (0,1)(0,1)(1,1) Rust -#:HA#266:E (a|ab|c|bcd){1,10}(d*) ababcd (0,6)(3,6)(6,6) -:HA#266:E (a|ab|c|bcd){1,10}(d*) ababcd (0,1)(0,1)(1,1) Rust -:HA#267:E (a|ab|c|bcd){2,10}(d*) ababcd (0,6)(3,6)(6,6) -:HA#268:E (a|ab|c|bcd){3,10}(d*) ababcd (0,6)(3,6)(6,6) -:HA#269:E (a|ab|c|bcd){4,10}(d*) ababcd NOMATCH -#:HA#270:E (a|ab|c|bcd)*(d*) ababcd (0,6)(3,6)(6,6) -:HA#270:E (a|ab|c|bcd)*(d*) ababcd (0,1)(0,1)(1,1) Rust -#:HA#271:E (a|ab|c|bcd)+(d*) ababcd (0,6)(3,6)(6,6) -:HA#271:E (a|ab|c|bcd)+(d*) ababcd (0,1)(0,1)(1,1) Rust - -# The above worked on Linux/GLIBC but the following often fail. -# They also trip up OS X / FreeBSD / NetBSD: - -#:HA#280:E (ab|a|c|bcd){0,}(d*) ababcd (0,6)(3,6)(6,6) -:HA#280:E (ab|a|c|bcd){0,}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go -#:HA#281:E (ab|a|c|bcd){1,}(d*) ababcd (0,6)(3,6)(6,6) -:HA#281:E (ab|a|c|bcd){1,}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go -#:HA#282:E (ab|a|c|bcd){2,}(d*) ababcd (0,6)(3,6)(6,6) -:HA#282:E (ab|a|c|bcd){2,}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go -#:HA#283:E (ab|a|c|bcd){3,}(d*) ababcd (0,6)(3,6)(6,6) -:HA#283:E (ab|a|c|bcd){3,}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go -:HA#284:E (ab|a|c|bcd){4,}(d*) ababcd NOMATCH -#:HA#285:E (ab|a|c|bcd){0,10}(d*) ababcd (0,6)(3,6)(6,6) -:HA#285:E (ab|a|c|bcd){0,10}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go -#:HA#286:E (ab|a|c|bcd){1,10}(d*) ababcd (0,6)(3,6)(6,6) -:HA#286:E (ab|a|c|bcd){1,10}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go -#:HA#287:E (ab|a|c|bcd){2,10}(d*) ababcd (0,6)(3,6)(6,6) -:HA#287:E (ab|a|c|bcd){2,10}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go -#:HA#288:E (ab|a|c|bcd){3,10}(d*) ababcd (0,6)(3,6)(6,6) -:HA#288:E (ab|a|c|bcd){3,10}(d*) ababcd (0,6)(4,5)(5,6) RE2/Go -:HA#289:E (ab|a|c|bcd){4,10}(d*) ababcd NOMATCH -#:HA#290:E (ab|a|c|bcd)*(d*) ababcd (0,6)(3,6)(6,6) -:HA#290:E (ab|a|c|bcd)*(d*) ababcd (0,6)(4,5)(5,6) RE2/Go -#:HA#291:E (ab|a|c|bcd)+(d*) ababcd (0,6)(3,6)(6,6) -:HA#291:E (ab|a|c|bcd)+(d*) ababcd (0,6)(4,5)(5,6) RE2/Go diff --git a/vendor/regex/testdata/fowler/nullsubexpr.toml b/vendor/regex/testdata/fowler/nullsubexpr.toml deleted file mode 100644 index 2f1f0183edf4da..00000000000000 --- a/vendor/regex/testdata/fowler/nullsubexpr.toml +++ /dev/null @@ -1,405 +0,0 @@ -# !!! DO NOT EDIT !!! -# Automatically generated by 'regex-cli generate fowler'. -# Numbers in the test names correspond to the line number of the test from -# the original dat file. - -[[test]] -name = "nullsubexpr3" -regex = '''(a*)*''' -haystack = '''a''' -matches = [[[0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr4" -regex = '''(a*)*''' -haystack = '''x''' -matches = [[[0, 0], [0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr5" -regex = '''(a*)*''' -haystack = '''aaaaaa''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr6" -regex = '''(a*)*''' -haystack = '''aaaaaax''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr7" -regex = '''(a*)+''' -haystack = '''a''' -matches = [[[0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr8" -regex = '''(a*)+''' -haystack = '''x''' -matches = [[[0, 0], [0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr9" -regex = '''(a*)+''' -haystack = '''aaaaaa''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr10" -regex = '''(a*)+''' -haystack = '''aaaaaax''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr11" -regex = '''(a+)*''' -haystack = '''a''' -matches = [[[0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr12" -regex = '''(a+)*''' -haystack = '''x''' -matches = [[[0, 0], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr13" -regex = '''(a+)*''' -haystack = '''aaaaaa''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr14" -regex = '''(a+)*''' -haystack = '''aaaaaax''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr15" -regex = '''(a+)+''' -haystack = '''a''' -matches = [[[0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr16" -regex = '''(a+)+''' -haystack = '''x''' -matches = [] -match-limit = 1 - -[[test]] -name = "nullsubexpr17" -regex = '''(a+)+''' -haystack = '''aaaaaa''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr18" -regex = '''(a+)+''' -haystack = '''aaaaaax''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr20" -regex = '''([a]*)*''' -haystack = '''a''' -matches = [[[0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr21" -regex = '''([a]*)*''' -haystack = '''x''' -matches = [[[0, 0], [0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr22" -regex = '''([a]*)*''' -haystack = '''aaaaaa''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr23" -regex = '''([a]*)*''' -haystack = '''aaaaaax''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr24" -regex = '''([a]*)+''' -haystack = '''a''' -matches = [[[0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr25" -regex = '''([a]*)+''' -haystack = '''x''' -matches = [[[0, 0], [0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr26" -regex = '''([a]*)+''' -haystack = '''aaaaaa''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr27" -regex = '''([a]*)+''' -haystack = '''aaaaaax''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr28" -regex = '''([^b]*)*''' -haystack = '''a''' -matches = [[[0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr29" -regex = '''([^b]*)*''' -haystack = '''b''' -matches = [[[0, 0], [0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr30" -regex = '''([^b]*)*''' -haystack = '''aaaaaa''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr31" -regex = '''([^b]*)*''' -haystack = '''aaaaaab''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr32" -regex = '''([ab]*)*''' -haystack = '''a''' -matches = [[[0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr33" -regex = '''([ab]*)*''' -haystack = '''aaaaaa''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr34" -regex = '''([ab]*)*''' -haystack = '''ababab''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr35" -regex = '''([ab]*)*''' -haystack = '''bababa''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr36" -regex = '''([ab]*)*''' -haystack = '''b''' -matches = [[[0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr37" -regex = '''([ab]*)*''' -haystack = '''bbbbbb''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr38" -regex = '''([ab]*)*''' -haystack = '''aaaabcde''' -matches = [[[0, 5], [0, 5]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr39" -regex = '''([^a]*)*''' -haystack = '''b''' -matches = [[[0, 1], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr40" -regex = '''([^a]*)*''' -haystack = '''bbbbbb''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr41" -regex = '''([^a]*)*''' -haystack = '''aaaaaa''' -matches = [[[0, 0], [0, 0]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr42" -regex = '''([^ab]*)*''' -haystack = '''ccccxx''' -matches = [[[0, 6], [0, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr43" -regex = '''([^ab]*)*''' -haystack = '''ababab''' -matches = [[[0, 0], [0, 0]]] -match-limit = 1 -anchored = true - -# Test added by Rust regex project. -[[test]] -name = "nullsubexpr46" -regex = '''((z)+|a)*''' -haystack = '''zabcde''' -matches = [[[0, 2], [1, 2], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr64" -regex = '''(a*)*(x)''' -haystack = '''x''' -matches = [[[0, 1], [0, 0], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr65" -regex = '''(a*)*(x)''' -haystack = '''ax''' -matches = [[[0, 2], [0, 1], [1, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr66" -regex = '''(a*)*(x)''' -haystack = '''axa''' -matches = [[[0, 2], [0, 1], [1, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr68" -regex = '''(a*)+(x)''' -haystack = '''x''' -matches = [[[0, 1], [0, 0], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr69" -regex = '''(a*)+(x)''' -haystack = '''ax''' -matches = [[[0, 2], [0, 1], [1, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr70" -regex = '''(a*)+(x)''' -haystack = '''axa''' -matches = [[[0, 2], [0, 1], [1, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr72" -regex = '''(a*){2}(x)''' -haystack = '''x''' -matches = [[[0, 1], [0, 0], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr73" -regex = '''(a*){2}(x)''' -haystack = '''ax''' -matches = [[[0, 2], [1, 1], [1, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "nullsubexpr74" -regex = '''(a*){2}(x)''' -haystack = '''axa''' -matches = [[[0, 2], [1, 1], [1, 2]]] -match-limit = 1 -anchored = true - diff --git a/vendor/regex/testdata/fowler/repetition.toml b/vendor/regex/testdata/fowler/repetition.toml deleted file mode 100644 index d6a711202209b2..00000000000000 --- a/vendor/regex/testdata/fowler/repetition.toml +++ /dev/null @@ -1,746 +0,0 @@ -# !!! DO NOT EDIT !!! -# Automatically generated by 'regex-cli generate fowler'. -# Numbers in the test names correspond to the line number of the test from -# the original dat file. - -[[test]] -name = "repetition10" -regex = '''((..)|(.))''' -haystack = '''''' -matches = [] -match-limit = 1 - -[[test]] -name = "repetition11" -regex = '''((..)|(.))((..)|(.))''' -haystack = '''''' -matches = [] -match-limit = 1 - -[[test]] -name = "repetition12" -regex = '''((..)|(.))((..)|(.))((..)|(.))''' -haystack = '''''' -matches = [] -match-limit = 1 - -[[test]] -name = "repetition14" -regex = '''((..)|(.)){1}''' -haystack = '''''' -matches = [] -match-limit = 1 - -[[test]] -name = "repetition15" -regex = '''((..)|(.)){2}''' -haystack = '''''' -matches = [] -match-limit = 1 - -[[test]] -name = "repetition16" -regex = '''((..)|(.)){3}''' -haystack = '''''' -matches = [] -match-limit = 1 - -[[test]] -name = "repetition18" -regex = '''((..)|(.))*''' -haystack = '''''' -matches = [[[0, 0], [], [], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition20" -regex = '''((..)|(.))''' -haystack = '''a''' -matches = [[[0, 1], [0, 1], [], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition21" -regex = '''((..)|(.))((..)|(.))''' -haystack = '''a''' -matches = [] -match-limit = 1 - -[[test]] -name = "repetition22" -regex = '''((..)|(.))((..)|(.))((..)|(.))''' -haystack = '''a''' -matches = [] -match-limit = 1 - -[[test]] -name = "repetition24" -regex = '''((..)|(.)){1}''' -haystack = '''a''' -matches = [[[0, 1], [0, 1], [], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition25" -regex = '''((..)|(.)){2}''' -haystack = '''a''' -matches = [] -match-limit = 1 - -[[test]] -name = "repetition26" -regex = '''((..)|(.)){3}''' -haystack = '''a''' -matches = [] -match-limit = 1 - -[[test]] -name = "repetition28" -regex = '''((..)|(.))*''' -haystack = '''a''' -matches = [[[0, 1], [0, 1], [], [0, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition30" -regex = '''((..)|(.))''' -haystack = '''aa''' -matches = [[[0, 2], [0, 2], [0, 2], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition31" -regex = '''((..)|(.))((..)|(.))''' -haystack = '''aa''' -matches = [[[0, 2], [0, 1], [], [0, 1], [1, 2], [], [1, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition32" -regex = '''((..)|(.))((..)|(.))((..)|(.))''' -haystack = '''aa''' -matches = [] -match-limit = 1 - -[[test]] -name = "repetition34" -regex = '''((..)|(.)){1}''' -haystack = '''aa''' -matches = [[[0, 2], [0, 2], [0, 2], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition35" -regex = '''((..)|(.)){2}''' -haystack = '''aa''' -matches = [[[0, 2], [1, 2], [], [1, 2]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition36" -regex = '''((..)|(.)){3}''' -haystack = '''aa''' -matches = [] -match-limit = 1 - -[[test]] -name = "repetition38" -regex = '''((..)|(.))*''' -haystack = '''aa''' -matches = [[[0, 2], [0, 2], [0, 2], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition40" -regex = '''((..)|(.))''' -haystack = '''aaa''' -matches = [[[0, 2], [0, 2], [0, 2], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition41" -regex = '''((..)|(.))((..)|(.))''' -haystack = '''aaa''' -matches = [[[0, 3], [0, 2], [0, 2], [], [2, 3], [], [2, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition42" -regex = '''((..)|(.))((..)|(.))((..)|(.))''' -haystack = '''aaa''' -matches = [[[0, 3], [0, 1], [], [0, 1], [1, 2], [], [1, 2], [2, 3], [], [2, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition44" -regex = '''((..)|(.)){1}''' -haystack = '''aaa''' -matches = [[[0, 2], [0, 2], [0, 2], []]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition46" -regex = '''((..)|(.)){2}''' -haystack = '''aaa''' -matches = [[[0, 3], [2, 3], [0, 2], [2, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition47" -regex = '''((..)|(.)){3}''' -haystack = '''aaa''' -matches = [[[0, 3], [2, 3], [], [2, 3]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition50" -regex = '''((..)|(.))*''' -haystack = '''aaa''' -matches = [[[0, 3], [2, 3], [0, 2], [2, 3]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition52" -regex = '''((..)|(.))''' -haystack = '''aaaa''' -matches = [[[0, 2], [0, 2], [0, 2], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition53" -regex = '''((..)|(.))((..)|(.))''' -haystack = '''aaaa''' -matches = [[[0, 4], [0, 2], [0, 2], [], [2, 4], [2, 4], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition54" -regex = '''((..)|(.))((..)|(.))((..)|(.))''' -haystack = '''aaaa''' -matches = [[[0, 4], [0, 2], [0, 2], [], [2, 3], [], [2, 3], [3, 4], [], [3, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition56" -regex = '''((..)|(.)){1}''' -haystack = '''aaaa''' -matches = [[[0, 2], [0, 2], [0, 2], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition57" -regex = '''((..)|(.)){2}''' -haystack = '''aaaa''' -matches = [[[0, 4], [2, 4], [2, 4], []]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition59" -regex = '''((..)|(.)){3}''' -haystack = '''aaaa''' -matches = [[[0, 4], [3, 4], [0, 2], [3, 4]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition61" -regex = '''((..)|(.))*''' -haystack = '''aaaa''' -matches = [[[0, 4], [2, 4], [2, 4], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition63" -regex = '''((..)|(.))''' -haystack = '''aaaaa''' -matches = [[[0, 2], [0, 2], [0, 2], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition64" -regex = '''((..)|(.))((..)|(.))''' -haystack = '''aaaaa''' -matches = [[[0, 4], [0, 2], [0, 2], [], [2, 4], [2, 4], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition65" -regex = '''((..)|(.))((..)|(.))((..)|(.))''' -haystack = '''aaaaa''' -matches = [[[0, 5], [0, 2], [0, 2], [], [2, 4], [2, 4], [], [4, 5], [], [4, 5]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition67" -regex = '''((..)|(.)){1}''' -haystack = '''aaaaa''' -matches = [[[0, 2], [0, 2], [0, 2], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition68" -regex = '''((..)|(.)){2}''' -haystack = '''aaaaa''' -matches = [[[0, 4], [2, 4], [2, 4], []]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition70" -regex = '''((..)|(.)){3}''' -haystack = '''aaaaa''' -matches = [[[0, 5], [4, 5], [2, 4], [4, 5]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition73" -regex = '''((..)|(.))*''' -haystack = '''aaaaa''' -matches = [[[0, 5], [4, 5], [2, 4], [4, 5]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition75" -regex = '''((..)|(.))''' -haystack = '''aaaaaa''' -matches = [[[0, 2], [0, 2], [0, 2], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition76" -regex = '''((..)|(.))((..)|(.))''' -haystack = '''aaaaaa''' -matches = [[[0, 4], [0, 2], [0, 2], [], [2, 4], [2, 4], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition77" -regex = '''((..)|(.))((..)|(.))((..)|(.))''' -haystack = '''aaaaaa''' -matches = [[[0, 6], [0, 2], [0, 2], [], [2, 4], [2, 4], [], [4, 6], [4, 6], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition79" -regex = '''((..)|(.)){1}''' -haystack = '''aaaaaa''' -matches = [[[0, 2], [0, 2], [0, 2], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition80" -regex = '''((..)|(.)){2}''' -haystack = '''aaaaaa''' -matches = [[[0, 4], [2, 4], [2, 4], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition81" -regex = '''((..)|(.)){3}''' -haystack = '''aaaaaa''' -matches = [[[0, 6], [4, 6], [4, 6], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition83" -regex = '''((..)|(.))*''' -haystack = '''aaaaaa''' -matches = [[[0, 6], [4, 6], [4, 6], []]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive90" -regex = '''X(.?){0,}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [7, 8]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive91" -regex = '''X(.?){1,}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [7, 8]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive92" -regex = '''X(.?){2,}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [7, 8]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive93" -regex = '''X(.?){3,}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [7, 8]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive94" -regex = '''X(.?){4,}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [7, 8]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive95" -regex = '''X(.?){5,}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [7, 8]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive96" -regex = '''X(.?){6,}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [7, 8]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive97" -regex = '''X(.?){7,}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [7, 8]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive98" -regex = '''X(.?){8,}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [8, 8]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive100" -regex = '''X(.?){0,8}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [8, 8]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive102" -regex = '''X(.?){1,8}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [8, 8]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive104" -regex = '''X(.?){2,8}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [8, 8]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive106" -regex = '''X(.?){3,8}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [8, 8]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive108" -regex = '''X(.?){4,8}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [8, 8]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive110" -regex = '''X(.?){5,8}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [8, 8]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive112" -regex = '''X(.?){6,8}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [8, 8]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive114" -regex = '''X(.?){7,8}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [8, 8]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive115" -regex = '''X(.?){8,8}Y''' -haystack = '''X1234567Y''' -matches = [[[0, 9], [8, 8]]] -match-limit = 1 -anchored = true - -# Test added by Rust regex project. -[[test]] -name = "repetition-expensive127" -regex = '''(a|ab|c|bcd){0,}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 1], [0, 1], [1, 1]]] -match-limit = 1 -anchored = true - -# Test added by Rust regex project. -[[test]] -name = "repetition-expensive129" -regex = '''(a|ab|c|bcd){1,}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 1], [0, 1], [1, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive130" -regex = '''(a|ab|c|bcd){2,}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 6], [3, 6], [6, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive131" -regex = '''(a|ab|c|bcd){3,}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 6], [3, 6], [6, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive132" -regex = '''(a|ab|c|bcd){4,}(d*)''' -haystack = '''ababcd''' -matches = [] -match-limit = 1 - -# Test added by Rust regex project. -[[test]] -name = "repetition-expensive134" -regex = '''(a|ab|c|bcd){0,10}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 1], [0, 1], [1, 1]]] -match-limit = 1 -anchored = true - -# Test added by Rust regex project. -[[test]] -name = "repetition-expensive136" -regex = '''(a|ab|c|bcd){1,10}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 1], [0, 1], [1, 1]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive137" -regex = '''(a|ab|c|bcd){2,10}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 6], [3, 6], [6, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive138" -regex = '''(a|ab|c|bcd){3,10}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 6], [3, 6], [6, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive139" -regex = '''(a|ab|c|bcd){4,10}(d*)''' -haystack = '''ababcd''' -matches = [] -match-limit = 1 - -# Test added by Rust regex project. -[[test]] -name = "repetition-expensive141" -regex = '''(a|ab|c|bcd)*(d*)''' -haystack = '''ababcd''' -matches = [[[0, 1], [0, 1], [1, 1]]] -match-limit = 1 -anchored = true - -# Test added by Rust regex project. -[[test]] -name = "repetition-expensive143" -regex = '''(a|ab|c|bcd)+(d*)''' -haystack = '''ababcd''' -matches = [[[0, 1], [0, 1], [1, 1]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive149" -regex = '''(ab|a|c|bcd){0,}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 6], [4, 5], [5, 6]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive151" -regex = '''(ab|a|c|bcd){1,}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 6], [4, 5], [5, 6]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive153" -regex = '''(ab|a|c|bcd){2,}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 6], [4, 5], [5, 6]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive155" -regex = '''(ab|a|c|bcd){3,}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 6], [4, 5], [5, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive156" -regex = '''(ab|a|c|bcd){4,}(d*)''' -haystack = '''ababcd''' -matches = [] -match-limit = 1 - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive158" -regex = '''(ab|a|c|bcd){0,10}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 6], [4, 5], [5, 6]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive160" -regex = '''(ab|a|c|bcd){1,10}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 6], [4, 5], [5, 6]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive162" -regex = '''(ab|a|c|bcd){2,10}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 6], [4, 5], [5, 6]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive164" -regex = '''(ab|a|c|bcd){3,10}(d*)''' -haystack = '''ababcd''' -matches = [[[0, 6], [4, 5], [5, 6]]] -match-limit = 1 -anchored = true - -[[test]] -name = "repetition-expensive165" -regex = '''(ab|a|c|bcd){4,10}(d*)''' -haystack = '''ababcd''' -matches = [] -match-limit = 1 - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive167" -regex = '''(ab|a|c|bcd)*(d*)''' -haystack = '''ababcd''' -matches = [[[0, 6], [4, 5], [5, 6]]] -match-limit = 1 -anchored = true - -# Test added by RE2/Go project. -[[test]] -name = "repetition-expensive169" -regex = '''(ab|a|c|bcd)+(d*)''' -haystack = '''ababcd''' -matches = [[[0, 6], [4, 5], [5, 6]]] -match-limit = 1 -anchored = true - diff --git a/vendor/regex/testdata/iter.toml b/vendor/regex/testdata/iter.toml deleted file mode 100644 index 329b9f031b2184..00000000000000 --- a/vendor/regex/testdata/iter.toml +++ /dev/null @@ -1,143 +0,0 @@ -[[test]] -name = "1" -regex = "a" -haystack = "aaa" -matches = [[0, 1], [1, 2], [2, 3]] - -[[test]] -name = "2" -regex = "a" -haystack = "aba" -matches = [[0, 1], [2, 3]] - -[[test]] -name = "empty1" -regex = '' -haystack = '' -matches = [[0, 0]] - -[[test]] -name = "empty2" -regex = '' -haystack = 'abc' -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty3" -regex = '(?:)' -haystack = 'abc' -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty4" -regex = '(?:)*' -haystack = 'abc' -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty5" -regex = '(?:)+' -haystack = 'abc' -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty6" -regex = '(?:)?' -haystack = 'abc' -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty7" -regex = '(?:)(?:)' -haystack = 'abc' -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty8" -regex = '(?:)+|z' -haystack = 'abc' -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty9" -regex = 'z|(?:)+' -haystack = 'abc' -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty10" -regex = '(?:)+|b' -haystack = 'abc' -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] - -[[test]] -name = "empty11" -regex = 'b|(?:)+' -haystack = 'abc' -matches = [[0, 0], [1, 2], [3, 3]] - -[[test]] -name = "start1" -regex = "^a" -haystack = "a" -matches = [[0, 1]] - -[[test]] -name = "start2" -regex = "^a" -haystack = "aa" -matches = [[0, 1]] - -[[test]] -name = "anchored1" -regex = "a" -haystack = "a" -matches = [[0, 1]] -anchored = true - -# This test is pretty subtle. It demonstrates the crucial difference between -# '^a' and 'a' compiled in 'anchored' mode. The former regex exclusively -# matches at the start of a haystack and nowhere else. The latter regex has -# no such restriction, but its automaton is constructed such that it lacks a -# `.*?` prefix. So it can actually produce matches at multiple locations. -# The anchored3 test drives this point home. -[[test]] -name = "anchored2" -regex = "a" -haystack = "aa" -matches = [[0, 1], [1, 2]] -anchored = true - -# Unlikely anchored2, this test stops matching anything after it sees `b` -# since it lacks a `.*?` prefix. Since it is looking for 'a' but sees 'b', it -# determines that there are no remaining matches. -[[test]] -name = "anchored3" -regex = "a" -haystack = "aaba" -matches = [[0, 1], [1, 2]] -anchored = true - -[[test]] -name = "nonempty-followedby-empty" -regex = 'abc|.*?' -haystack = "abczzz" -matches = [[0, 3], [4, 4], [5, 5], [6, 6]] - -[[test]] -name = "nonempty-followedby-oneempty" -regex = 'abc|.*?' -haystack = "abcz" -matches = [[0, 3], [4, 4]] - -[[test]] -name = "nonempty-followedby-onemixed" -regex = 'abc|.*?' -haystack = "abczabc" -matches = [[0, 3], [4, 7]] - -[[test]] -name = "nonempty-followedby-twomixed" -regex = 'abc|.*?' -haystack = "abczzabc" -matches = [[0, 3], [4, 4], [5, 8]] diff --git a/vendor/regex/testdata/leftmost-all.toml b/vendor/regex/testdata/leftmost-all.toml deleted file mode 100644 index e3fd950b6ba1ae..00000000000000 --- a/vendor/regex/testdata/leftmost-all.toml +++ /dev/null @@ -1,25 +0,0 @@ -[[test]] -name = "alt" -regex = 'foo|foobar' -haystack = "foobar" -matches = [[0, 6]] -match-kind = "all" -search-kind = "leftmost" - -[[test]] -name = "multi" -regex = ['foo', 'foobar'] -haystack = "foobar" -matches = [ - { id = 1, span = [0, 6] }, -] -match-kind = "all" -search-kind = "leftmost" - -[[test]] -name = "dotall" -regex = '(?s:.)' -haystack = "foobar" -matches = [[5, 6]] -match-kind = "all" -search-kind = "leftmost" diff --git a/vendor/regex/testdata/line-terminator.toml b/vendor/regex/testdata/line-terminator.toml deleted file mode 100644 index a398dafa2ecf99..00000000000000 --- a/vendor/regex/testdata/line-terminator.toml +++ /dev/null @@ -1,109 +0,0 @@ -# This tests that we can switch the line terminator to the NUL byte. -[[test]] -name = "nul" -regex = '(?m)^[a-z]+$' -haystack = '\x00abc\x00' -matches = [[1, 4]] -unescape = true -line-terminator = '\x00' - -# This tests that '.' will not match the configured line terminator, but will -# match \n. -[[test]] -name = "dot-changes-with-line-terminator" -regex = '.' -haystack = '\x00\n' -matches = [[1, 2]] -unescape = true -line-terminator = '\x00' - -# This tests that when we switch the line terminator, \n is no longer -# recognized as the terminator. -[[test]] -name = "not-line-feed" -regex = '(?m)^[a-z]+$' -haystack = '\nabc\n' -matches = [] -unescape = true -line-terminator = '\x00' - -# This tests that we can set the line terminator to a non-ASCII byte and have -# it behave as expected. -[[test]] -name = "non-ascii" -regex = '(?m)^[a-z]+$' -haystack = '\xFFabc\xFF' -matches = [[1, 4]] -unescape = true -line-terminator = '\xFF' -utf8 = false - -# This tests a tricky case where the line terminator is set to \r. This ensures -# that the StartLF look-behind assertion is tracked when computing the start -# state. -[[test]] -name = "carriage" -regex = '(?m)^[a-z]+' -haystack = 'ABC\rabc' -matches = [[4, 7]] -bounds = [4, 7] -unescape = true -line-terminator = '\r' - -# This tests that we can set the line terminator to a byte corresponding to a -# word character, and things work as expected. -[[test]] -name = "word-byte" -regex = '(?m)^[a-z]+$' -haystack = 'ZabcZ' -matches = [[1, 4]] -unescape = true -line-terminator = 'Z' - -# This tests that we can set the line terminator to a byte corresponding to a -# non-word character, and things work as expected. -[[test]] -name = "non-word-byte" -regex = '(?m)^[a-z]+$' -haystack = '%abc%' -matches = [[1, 4]] -unescape = true -line-terminator = '%' - -# This combines "set line terminator to a word byte" with a word boundary -# assertion, which should result in no match even though ^/$ matches. -[[test]] -name = "word-boundary" -regex = '(?m)^\b[a-z]+\b$' -haystack = 'ZabcZ' -matches = [] -unescape = true -line-terminator = 'Z' - -# Like 'word-boundary', but does an anchored search at the point where ^ -# matches, but where \b should not. -[[test]] -name = "word-boundary-at" -regex = '(?m)^\b[a-z]+\b$' -haystack = 'ZabcZ' -matches = [] -bounds = [1, 4] -anchored = true -unescape = true -line-terminator = 'Z' - -# Like 'word-boundary-at', but flips the word boundary to a negation. This -# in particular tests a tricky case in DFA engines, where they must consider -# explicitly that a starting configuration from a custom line terminator may -# also required setting the "is from word byte" flag on a state. Otherwise, -# it's treated as "not from a word byte," which would result in \B not matching -# here when it should. -[[test]] -name = "not-word-boundary-at" -regex = '(?m)^\B[a-z]+\B$' -haystack = 'ZabcZ' -matches = [[1, 4]] -bounds = [1, 4] -anchored = true -unescape = true -line-terminator = 'Z' diff --git a/vendor/regex/testdata/misc.toml b/vendor/regex/testdata/misc.toml deleted file mode 100644 index c65531f5d9065a..00000000000000 --- a/vendor/regex/testdata/misc.toml +++ /dev/null @@ -1,99 +0,0 @@ -[[test]] -name = "ascii-literal" -regex = "a" -haystack = "a" -matches = [[0, 1]] - -[[test]] -name = "ascii-literal-not" -regex = "a" -haystack = "z" -matches = [] - -[[test]] -name = "ascii-literal-anchored" -regex = "a" -haystack = "a" -matches = [[0, 1]] -anchored = true - -[[test]] -name = "ascii-literal-anchored-not" -regex = "a" -haystack = "z" -matches = [] -anchored = true - -[[test]] -name = "anchor-start-end-line" -regex = '(?m)^bar$' -haystack = "foo\nbar\nbaz" -matches = [[4, 7]] - -[[test]] -name = "prefix-literal-match" -regex = '^abc' -haystack = "abc" -matches = [[0, 3]] - -[[test]] -name = "prefix-literal-match-ascii" -regex = '^abc' -haystack = "abc" -matches = [[0, 3]] -unicode = false -utf8 = false - -[[test]] -name = "prefix-literal-no-match" -regex = '^abc' -haystack = "zabc" -matches = [] - -[[test]] -name = "one-literal-edge" -regex = 'abc' -haystack = "xxxxxab" -matches = [] - -[[test]] -name = "terminates" -regex = 'a$' -haystack = "a" -matches = [[0, 1]] - -[[test]] -name = "suffix-100" -regex = '.*abcd' -haystack = "abcd" -matches = [[0, 4]] - -[[test]] -name = "suffix-200" -regex = '.*(?:abcd)+' -haystack = "abcd" -matches = [[0, 4]] - -[[test]] -name = "suffix-300" -regex = '.*(?:abcd)+' -haystack = "abcdabcd" -matches = [[0, 8]] - -[[test]] -name = "suffix-400" -regex = '.*(?:abcd)+' -haystack = "abcdxabcd" -matches = [[0, 9]] - -[[test]] -name = "suffix-500" -regex = '.*x(?:abcd)+' -haystack = "abcdxabcd" -matches = [[0, 9]] - -[[test]] -name = "suffix-600" -regex = '[^abcd]*x(?:abcd)+' -haystack = "abcdxabcd" -matches = [[4, 9]] diff --git a/vendor/regex/testdata/multiline.toml b/vendor/regex/testdata/multiline.toml deleted file mode 100644 index 3acc901d50af20..00000000000000 --- a/vendor/regex/testdata/multiline.toml +++ /dev/null @@ -1,845 +0,0 @@ -[[test]] -name = "basic1" -regex = '(?m)^[a-z]+$' -haystack = "abc\ndef\nxyz" -matches = [[0, 3], [4, 7], [8, 11]] - -[[test]] -name = "basic1-crlf" -regex = '(?Rm)^[a-z]+$' -haystack = "abc\ndef\nxyz" -matches = [[0, 3], [4, 7], [8, 11]] - -[[test]] -name = "basic1-crlf-cr" -regex = '(?Rm)^[a-z]+$' -haystack = "abc\rdef\rxyz" -matches = [[0, 3], [4, 7], [8, 11]] - -[[test]] -name = "basic2" -regex = '(?m)^$' -haystack = "abc\ndef\nxyz" -matches = [] - -[[test]] -name = "basic2-crlf" -regex = '(?Rm)^$' -haystack = "abc\ndef\nxyz" -matches = [] - -[[test]] -name = "basic2-crlf-cr" -regex = '(?Rm)^$' -haystack = "abc\rdef\rxyz" -matches = [] - -[[test]] -name = "basic3" -regex = '(?m)^' -haystack = "abc\ndef\nxyz" -matches = [[0, 0], [4, 4], [8, 8]] - -[[test]] -name = "basic3-crlf" -regex = '(?Rm)^' -haystack = "abc\ndef\nxyz" -matches = [[0, 0], [4, 4], [8, 8]] - -[[test]] -name = "basic3-crlf-cr" -regex = '(?Rm)^' -haystack = "abc\rdef\rxyz" -matches = [[0, 0], [4, 4], [8, 8]] - -[[test]] -name = "basic4" -regex = '(?m)$' -haystack = "abc\ndef\nxyz" -matches = [[3, 3], [7, 7], [11, 11]] - -[[test]] -name = "basic4-crlf" -regex = '(?Rm)$' -haystack = "abc\ndef\nxyz" -matches = [[3, 3], [7, 7], [11, 11]] - -[[test]] -name = "basic4-crlf-cr" -regex = '(?Rm)$' -haystack = "abc\rdef\rxyz" -matches = [[3, 3], [7, 7], [11, 11]] - -[[test]] -name = "basic5" -regex = '(?m)^[a-z]' -haystack = "abc\ndef\nxyz" -matches = [[0, 1], [4, 5], [8, 9]] - -[[test]] -name = "basic5-crlf" -regex = '(?Rm)^[a-z]' -haystack = "abc\ndef\nxyz" -matches = [[0, 1], [4, 5], [8, 9]] - -[[test]] -name = "basic5-crlf-cr" -regex = '(?Rm)^[a-z]' -haystack = "abc\rdef\rxyz" -matches = [[0, 1], [4, 5], [8, 9]] - -[[test]] -name = "basic6" -regex = '(?m)[a-z]^' -haystack = "abc\ndef\nxyz" -matches = [] - -[[test]] -name = "basic6-crlf" -regex = '(?Rm)[a-z]^' -haystack = "abc\ndef\nxyz" -matches = [] - -[[test]] -name = "basic6-crlf-cr" -regex = '(?Rm)[a-z]^' -haystack = "abc\rdef\rxyz" -matches = [] - -[[test]] -name = "basic7" -regex = '(?m)[a-z]$' -haystack = "abc\ndef\nxyz" -matches = [[2, 3], [6, 7], [10, 11]] - -[[test]] -name = "basic7-crlf" -regex = '(?Rm)[a-z]$' -haystack = "abc\ndef\nxyz" -matches = [[2, 3], [6, 7], [10, 11]] - -[[test]] -name = "basic7-crlf-cr" -regex = '(?Rm)[a-z]$' -haystack = "abc\rdef\rxyz" -matches = [[2, 3], [6, 7], [10, 11]] - -[[test]] -name = "basic8" -regex = '(?m)$[a-z]' -haystack = "abc\ndef\nxyz" -matches = [] - -[[test]] -name = "basic8-crlf" -regex = '(?Rm)$[a-z]' -haystack = "abc\ndef\nxyz" -matches = [] - -[[test]] -name = "basic8-crlf-cr" -regex = '(?Rm)$[a-z]' -haystack = "abc\rdef\rxyz" -matches = [] - -[[test]] -name = "basic9" -regex = '(?m)^$' -haystack = "" -matches = [[0, 0]] - -[[test]] -name = "basic9-crlf" -regex = '(?Rm)^$' -haystack = "" -matches = [[0, 0]] - -[[test]] -name = "repeat1" -regex = '(?m)(?:^$)*' -haystack = "a\nb\nc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] - -[[test]] -name = "repeat1-crlf" -regex = '(?Rm)(?:^$)*' -haystack = "a\nb\nc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] - -[[test]] -name = "repeat1-crlf-cr" -regex = '(?Rm)(?:^$)*' -haystack = "a\rb\rc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] - -[[test]] -name = "repeat1-no-multi" -regex = '(?:^$)*' -haystack = "a\nb\nc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] - -[[test]] -name = "repeat1-no-multi-crlf" -regex = '(?R)(?:^$)*' -haystack = "a\nb\nc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] - -[[test]] -name = "repeat1-no-multi-crlf-cr" -regex = '(?R)(?:^$)*' -haystack = "a\rb\rc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] - -[[test]] -name = "repeat2" -regex = '(?m)(?:^|a)+' -haystack = "a\naaa\n" -matches = [[0, 0], [2, 2], [3, 5], [6, 6]] - -[[test]] -name = "repeat2-crlf" -regex = '(?Rm)(?:^|a)+' -haystack = "a\naaa\n" -matches = [[0, 0], [2, 2], [3, 5], [6, 6]] - -[[test]] -name = "repeat2-crlf-cr" -regex = '(?Rm)(?:^|a)+' -haystack = "a\raaa\r" -matches = [[0, 0], [2, 2], [3, 5], [6, 6]] - -[[test]] -name = "repeat2-no-multi" -regex = '(?:^|a)+' -haystack = "a\naaa\n" -matches = [[0, 0], [2, 5]] - -[[test]] -name = "repeat2-no-multi-crlf" -regex = '(?R)(?:^|a)+' -haystack = "a\naaa\n" -matches = [[0, 0], [2, 5]] - -[[test]] -name = "repeat2-no-multi-crlf-cr" -regex = '(?R)(?:^|a)+' -haystack = "a\raaa\r" -matches = [[0, 0], [2, 5]] - -[[test]] -name = "repeat3" -regex = '(?m)(?:^|a)*' -haystack = "a\naaa\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] - -[[test]] -name = "repeat3-crlf" -regex = '(?Rm)(?:^|a)*' -haystack = "a\naaa\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] - -[[test]] -name = "repeat3-crlf-cr" -regex = '(?Rm)(?:^|a)*' -haystack = "a\raaa\r" -matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] - -[[test]] -name = "repeat3-no-multi" -regex = '(?:^|a)*' -haystack = "a\naaa\n" -matches = [[0, 0], [1, 1], [2, 5], [6, 6]] - -[[test]] -name = "repeat3-no-multi-crlf" -regex = '(?R)(?:^|a)*' -haystack = "a\naaa\n" -matches = [[0, 0], [1, 1], [2, 5], [6, 6]] - -[[test]] -name = "repeat3-no-multi-crlf-cr" -regex = '(?R)(?:^|a)*' -haystack = "a\raaa\r" -matches = [[0, 0], [1, 1], [2, 5], [6, 6]] - -[[test]] -name = "repeat4" -regex = '(?m)(?:^|a+)' -haystack = "a\naaa\n" -matches = [[0, 0], [2, 2], [3, 5], [6, 6]] - -[[test]] -name = "repeat4-crlf" -regex = '(?Rm)(?:^|a+)' -haystack = "a\naaa\n" -matches = [[0, 0], [2, 2], [3, 5], [6, 6]] - -[[test]] -name = "repeat4-crlf-cr" -regex = '(?Rm)(?:^|a+)' -haystack = "a\raaa\r" -matches = [[0, 0], [2, 2], [3, 5], [6, 6]] - -[[test]] -name = "repeat4-no-multi" -regex = '(?:^|a+)' -haystack = "a\naaa\n" -matches = [[0, 0], [2, 5]] - -[[test]] -name = "repeat4-no-multi-crlf" -regex = '(?R)(?:^|a+)' -haystack = "a\naaa\n" -matches = [[0, 0], [2, 5]] - -[[test]] -name = "repeat4-no-multi-crlf-cr" -regex = '(?R)(?:^|a+)' -haystack = "a\raaa\r" -matches = [[0, 0], [2, 5]] - -[[test]] -name = "repeat5" -regex = '(?m)(?:^|a*)' -haystack = "a\naaa\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] - -[[test]] -name = "repeat5-crlf" -regex = '(?Rm)(?:^|a*)' -haystack = "a\naaa\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] - -[[test]] -name = "repeat5-crlf-cr" -regex = '(?Rm)(?:^|a*)' -haystack = "a\raaa\r" -matches = [[0, 0], [1, 1], [2, 2], [3, 5], [6, 6]] - -[[test]] -name = "repeat5-no-multi" -regex = '(?:^|a*)' -haystack = "a\naaa\n" -matches = [[0, 0], [1, 1], [2, 5], [6, 6]] - -[[test]] -name = "repeat5-no-multi-crlf" -regex = '(?R)(?:^|a*)' -haystack = "a\naaa\n" -matches = [[0, 0], [1, 1], [2, 5], [6, 6]] - -[[test]] -name = "repeat5-no-multi-crlf-cr" -regex = '(?R)(?:^|a*)' -haystack = "a\raaa\r" -matches = [[0, 0], [1, 1], [2, 5], [6, 6]] - -[[test]] -name = "repeat6" -regex = '(?m)(?:^[a-z])+' -haystack = "abc\ndef\nxyz" -matches = [[0, 1], [4, 5], [8, 9]] - -[[test]] -name = "repeat6-crlf" -regex = '(?Rm)(?:^[a-z])+' -haystack = "abc\ndef\nxyz" -matches = [[0, 1], [4, 5], [8, 9]] - -[[test]] -name = "repeat6-crlf-cr" -regex = '(?Rm)(?:^[a-z])+' -haystack = "abc\rdef\rxyz" -matches = [[0, 1], [4, 5], [8, 9]] - -[[test]] -name = "repeat6-no-multi" -regex = '(?:^[a-z])+' -haystack = "abc\ndef\nxyz" -matches = [[0, 1]] - -[[test]] -name = "repeat6-no-multi-crlf" -regex = '(?R)(?:^[a-z])+' -haystack = "abc\ndef\nxyz" -matches = [[0, 1]] - -[[test]] -name = "repeat6-no-multi-crlf-cr" -regex = '(?R)(?:^[a-z])+' -haystack = "abc\rdef\rxyz" -matches = [[0, 1]] - -[[test]] -name = "repeat7" -regex = '(?m)(?:^[a-z]{3}\n?)+' -haystack = "abc\ndef\nxyz" -matches = [[0, 11]] - -[[test]] -name = "repeat7-crlf" -regex = '(?Rm)(?:^[a-z]{3}\n?)+' -haystack = "abc\ndef\nxyz" -matches = [[0, 11]] - -[[test]] -name = "repeat7-crlf-cr" -regex = '(?Rm)(?:^[a-z]{3}\r?)+' -haystack = "abc\rdef\rxyz" -matches = [[0, 11]] - -[[test]] -name = "repeat7-no-multi" -regex = '(?:^[a-z]{3}\n?)+' -haystack = "abc\ndef\nxyz" -matches = [[0, 4]] - -[[test]] -name = "repeat7-no-multi-crlf" -regex = '(?R)(?:^[a-z]{3}\n?)+' -haystack = "abc\ndef\nxyz" -matches = [[0, 4]] - -[[test]] -name = "repeat7-no-multi-crlf-cr" -regex = '(?R)(?:^[a-z]{3}\r?)+' -haystack = "abc\rdef\rxyz" -matches = [[0, 4]] - -[[test]] -name = "repeat8" -regex = '(?m)(?:^[a-z]{3}\n?)*' -haystack = "abc\ndef\nxyz" -matches = [[0, 11]] - -[[test]] -name = "repeat8-crlf" -regex = '(?Rm)(?:^[a-z]{3}\n?)*' -haystack = "abc\ndef\nxyz" -matches = [[0, 11]] - -[[test]] -name = "repeat8-crlf-cr" -regex = '(?Rm)(?:^[a-z]{3}\r?)*' -haystack = "abc\rdef\rxyz" -matches = [[0, 11]] - -[[test]] -name = "repeat8-no-multi" -regex = '(?:^[a-z]{3}\n?)*' -haystack = "abc\ndef\nxyz" -matches = [[0, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9], [10, 10], [11, 11]] - -[[test]] -name = "repeat8-no-multi-crlf" -regex = '(?R)(?:^[a-z]{3}\n?)*' -haystack = "abc\ndef\nxyz" -matches = [[0, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9], [10, 10], [11, 11]] - -[[test]] -name = "repeat8-no-multi-crlf-cr" -regex = '(?R)(?:^[a-z]{3}\r?)*' -haystack = "abc\rdef\rxyz" -matches = [[0, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9], [10, 10], [11, 11]] - -[[test]] -name = "repeat9" -regex = '(?m)(?:\n?[a-z]{3}$)+' -haystack = "abc\ndef\nxyz" -matches = [[0, 11]] - -[[test]] -name = "repeat9-crlf" -regex = '(?Rm)(?:\n?[a-z]{3}$)+' -haystack = "abc\ndef\nxyz" -matches = [[0, 11]] - -[[test]] -name = "repeat9-crlf-cr" -regex = '(?Rm)(?:\r?[a-z]{3}$)+' -haystack = "abc\rdef\rxyz" -matches = [[0, 11]] - -[[test]] -name = "repeat9-no-multi" -regex = '(?:\n?[a-z]{3}$)+' -haystack = "abc\ndef\nxyz" -matches = [[7, 11]] - -[[test]] -name = "repeat9-no-multi-crlf" -regex = '(?R)(?:\n?[a-z]{3}$)+' -haystack = "abc\ndef\nxyz" -matches = [[7, 11]] - -[[test]] -name = "repeat9-no-multi-crlf-cr" -regex = '(?R)(?:\r?[a-z]{3}$)+' -haystack = "abc\rdef\rxyz" -matches = [[7, 11]] - -[[test]] -name = "repeat10" -regex = '(?m)(?:\n?[a-z]{3}$)*' -haystack = "abc\ndef\nxyz" -matches = [[0, 11]] - -[[test]] -name = "repeat10-crlf" -regex = '(?Rm)(?:\n?[a-z]{3}$)*' -haystack = "abc\ndef\nxyz" -matches = [[0, 11]] - -[[test]] -name = "repeat10-crlf-cr" -regex = '(?Rm)(?:\r?[a-z]{3}$)*' -haystack = "abc\rdef\rxyz" -matches = [[0, 11]] - -[[test]] -name = "repeat10-no-multi" -regex = '(?:\n?[a-z]{3}$)*' -haystack = "abc\ndef\nxyz" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 11]] - -[[test]] -name = "repeat10-no-multi-crlf" -regex = '(?R)(?:\n?[a-z]{3}$)*' -haystack = "abc\ndef\nxyz" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 11]] - -[[test]] -name = "repeat10-no-multi-crlf-cr" -regex = '(?R)(?:\r?[a-z]{3}$)*' -haystack = "abc\rdef\rxyz" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 11]] - -[[test]] -name = "repeat11" -regex = '(?m)^*' -haystack = "\naa\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] - -[[test]] -name = "repeat11-crlf" -regex = '(?Rm)^*' -haystack = "\naa\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] - -[[test]] -name = "repeat11-crlf-cr" -regex = '(?Rm)^*' -haystack = "\raa\r" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] - -[[test]] -name = "repeat11-no-multi" -regex = '^*' -haystack = "\naa\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] - -[[test]] -name = "repeat11-no-multi-crlf" -regex = '(?R)^*' -haystack = "\naa\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] - -[[test]] -name = "repeat11-no-multi-crlf-cr" -regex = '(?R)^*' -haystack = "\raa\r" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] - -[[test]] -name = "repeat12" -regex = '(?m)^+' -haystack = "\naa\n" -matches = [[0, 0], [1, 1], [4, 4]] - -[[test]] -name = "repeat12-crlf" -regex = '(?Rm)^+' -haystack = "\naa\n" -matches = [[0, 0], [1, 1], [4, 4]] - -[[test]] -name = "repeat12-crlf-cr" -regex = '(?Rm)^+' -haystack = "\raa\r" -matches = [[0, 0], [1, 1], [4, 4]] - -[[test]] -name = "repeat12-no-multi" -regex = '^+' -haystack = "\naa\n" -matches = [[0, 0]] - -[[test]] -name = "repeat12-no-multi-crlf" -regex = '(?R)^+' -haystack = "\naa\n" -matches = [[0, 0]] - -[[test]] -name = "repeat12-no-multi-crlf-cr" -regex = '(?R)^+' -haystack = "\raa\r" -matches = [[0, 0]] - -[[test]] -name = "repeat13" -regex = '(?m)$*' -haystack = "\naa\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] - -[[test]] -name = "repeat13-crlf" -regex = '(?Rm)$*' -haystack = "\naa\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] - -[[test]] -name = "repeat13-crlf-cr" -regex = '(?Rm)$*' -haystack = "\raa\r" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] - -[[test]] -name = "repeat13-no-multi" -regex = '$*' -haystack = "\naa\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] - -[[test]] -name = "repeat13-no-multi-crlf" -regex = '(?R)$*' -haystack = "\naa\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] - -[[test]] -name = "repeat13-no-multi-crlf-cr" -regex = '(?R)$*' -haystack = "\raa\r" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] - -[[test]] -name = "repeat14" -regex = '(?m)$+' -haystack = "\naa\n" -matches = [[0, 0], [3, 3], [4, 4]] - -[[test]] -name = "repeat14-crlf" -regex = '(?Rm)$+' -haystack = "\naa\n" -matches = [[0, 0], [3, 3], [4, 4]] - -[[test]] -name = "repeat14-crlf-cr" -regex = '(?Rm)$+' -haystack = "\raa\r" -matches = [[0, 0], [3, 3], [4, 4]] - -[[test]] -name = "repeat14-no-multi" -regex = '$+' -haystack = "\naa\n" -matches = [[4, 4]] - -[[test]] -name = "repeat14-no-multi-crlf" -regex = '(?R)$+' -haystack = "\naa\n" -matches = [[4, 4]] - -[[test]] -name = "repeat14-no-multi-crlf-cr" -regex = '(?R)$+' -haystack = "\raa\r" -matches = [[4, 4]] - -[[test]] -name = "repeat15" -regex = '(?m)(?:$\n)+' -haystack = "\n\naaa\n\n" -matches = [[0, 2], [5, 7]] - -[[test]] -name = "repeat15-crlf" -regex = '(?Rm)(?:$\n)+' -haystack = "\n\naaa\n\n" -matches = [[0, 2], [5, 7]] - -[[test]] -name = "repeat15-crlf-cr" -regex = '(?Rm)(?:$\r)+' -haystack = "\r\raaa\r\r" -matches = [[0, 2], [5, 7]] - -[[test]] -name = "repeat15-no-multi" -regex = '(?:$\n)+' -haystack = "\n\naaa\n\n" -matches = [] - -[[test]] -name = "repeat15-no-multi-crlf" -regex = '(?R)(?:$\n)+' -haystack = "\n\naaa\n\n" -matches = [] - -[[test]] -name = "repeat15-no-multi-crlf-cr" -regex = '(?R)(?:$\r)+' -haystack = "\r\raaa\r\r" -matches = [] - -[[test]] -name = "repeat16" -regex = '(?m)(?:$\n)*' -haystack = "\n\naaa\n\n" -matches = [[0, 2], [3, 3], [4, 4], [5, 7]] - -[[test]] -name = "repeat16-crlf" -regex = '(?Rm)(?:$\n)*' -haystack = "\n\naaa\n\n" -matches = [[0, 2], [3, 3], [4, 4], [5, 7]] - -[[test]] -name = "repeat16-crlf-cr" -regex = '(?Rm)(?:$\r)*' -haystack = "\r\raaa\r\r" -matches = [[0, 2], [3, 3], [4, 4], [5, 7]] - -[[test]] -name = "repeat16-no-multi" -regex = '(?:$\n)*' -haystack = "\n\naaa\n\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7]] - -[[test]] -name = "repeat16-no-multi-crlf" -regex = '(?R)(?:$\n)*' -haystack = "\n\naaa\n\n" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7]] - -[[test]] -name = "repeat16-no-multi-crlf-cr" -regex = '(?R)(?:$\r)*' -haystack = "\r\raaa\r\r" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7]] - -[[test]] -name = "repeat17" -regex = '(?m)(?:$\n^)+' -haystack = "\n\naaa\n\n" -matches = [[0, 2], [5, 7]] - -[[test]] -name = "repeat17-crlf" -regex = '(?Rm)(?:$\n^)+' -haystack = "\n\naaa\n\n" -matches = [[0, 2], [5, 7]] - -[[test]] -name = "repeat17-crlf-cr" -regex = '(?Rm)(?:$\r^)+' -haystack = "\r\raaa\r\r" -matches = [[0, 2], [5, 7]] - -[[test]] -name = "repeat17-no-multi" -regex = '(?:$\n^)+' -haystack = "\n\naaa\n\n" -matches = [] - -[[test]] -name = "repeat17-no-multi-crlf" -regex = '(?R)(?:$\n^)+' -haystack = "\n\naaa\n\n" -matches = [] - -[[test]] -name = "repeat17-no-multi-crlf-cr" -regex = '(?R)(?:$\r^)+' -haystack = "\r\raaa\r\r" -matches = [] - -[[test]] -name = "repeat18" -regex = '(?m)(?:^|$)+' -haystack = "\n\naaa\n\n" -matches = [[0, 0], [1, 1], [2, 2], [5, 5], [6, 6], [7, 7]] - -[[test]] -name = "repeat18-crlf" -regex = '(?Rm)(?:^|$)+' -haystack = "\n\naaa\n\n" -matches = [[0, 0], [1, 1], [2, 2], [5, 5], [6, 6], [7, 7]] - -[[test]] -name = "repeat18-crlf-cr" -regex = '(?Rm)(?:^|$)+' -haystack = "\r\raaa\r\r" -matches = [[0, 0], [1, 1], [2, 2], [5, 5], [6, 6], [7, 7]] - -[[test]] -name = "repeat18-no-multi" -regex = '(?:^|$)+' -haystack = "\n\naaa\n\n" -matches = [[0, 0], [7, 7]] - -[[test]] -name = "repeat18-no-multi-crlf" -regex = '(?R)(?:^|$)+' -haystack = "\n\naaa\n\n" -matches = [[0, 0], [7, 7]] - -[[test]] -name = "repeat18-no-multi-crlf-cr" -regex = '(?R)(?:^|$)+' -haystack = "\r\raaa\r\r" -matches = [[0, 0], [7, 7]] - -[[test]] -name = "match-line-100" -regex = '(?m)^.+$' -haystack = "aa\naaaaaaaaaaaaaaaaaaa\n" -matches = [[0, 2], [3, 22]] - -[[test]] -name = "match-line-100-crlf" -regex = '(?Rm)^.+$' -haystack = "aa\naaaaaaaaaaaaaaaaaaa\n" -matches = [[0, 2], [3, 22]] - -[[test]] -name = "match-line-100-crlf-cr" -regex = '(?Rm)^.+$' -haystack = "aa\raaaaaaaaaaaaaaaaaaa\r" -matches = [[0, 2], [3, 22]] - -[[test]] -name = "match-line-200" -regex = '(?m)^.+$' -haystack = "aa\naaaaaaaaaaaaaaaaaaa\n" -matches = [[0, 2], [3, 22]] -unicode = false -utf8 = false - -[[test]] -name = "match-line-200-crlf" -regex = '(?Rm)^.+$' -haystack = "aa\naaaaaaaaaaaaaaaaaaa\n" -matches = [[0, 2], [3, 22]] -unicode = false -utf8 = false - -[[test]] -name = "match-line-200-crlf-cr" -regex = '(?Rm)^.+$' -haystack = "aa\raaaaaaaaaaaaaaaaaaa\r" -matches = [[0, 2], [3, 22]] -unicode = false -utf8 = false diff --git a/vendor/regex/testdata/no-unicode.toml b/vendor/regex/testdata/no-unicode.toml deleted file mode 100644 index 0ddac4c96d116f..00000000000000 --- a/vendor/regex/testdata/no-unicode.toml +++ /dev/null @@ -1,222 +0,0 @@ -[[test]] -name = "invalid-utf8-literal1" -regex = '\xFF' -haystack = '\xFF' -matches = [[0, 1]] -unicode = false -utf8 = false -unescape = true - - -[[test]] -name = "mixed" -regex = '(?:.+)(?-u)(?:.+)' -haystack = '\xCE\x93\xCE\x94\xFF' -matches = [[0, 5]] -utf8 = false -unescape = true - - -[[test]] -name = "case1" -regex = "a" -haystack = "A" -matches = [[0, 1]] -case-insensitive = true -unicode = false - -[[test]] -name = "case2" -regex = "[a-z]+" -haystack = "AaAaA" -matches = [[0, 5]] -case-insensitive = true -unicode = false - -[[test]] -name = "case3" -regex = "[a-z]+" -haystack = "aA\u212AaA" -matches = [[0, 7]] -case-insensitive = true - -[[test]] -name = "case4" -regex = "[a-z]+" -haystack = "aA\u212AaA" -matches = [[0, 2], [5, 7]] -case-insensitive = true -unicode = false - - -[[test]] -name = "negate1" -regex = "[^a]" -haystack = "δ" -matches = [[0, 2]] - -[[test]] -name = "negate2" -regex = "[^a]" -haystack = "δ" -matches = [[0, 1], [1, 2]] -unicode = false -utf8 = false - - -[[test]] -name = "dotstar-prefix1" -regex = "a" -haystack = '\xFFa' -matches = [[1, 2]] -unicode = false -utf8 = false -unescape = true - -[[test]] -name = "dotstar-prefix2" -regex = "a" -haystack = '\xFFa' -matches = [[1, 2]] -utf8 = false -unescape = true - - -[[test]] -name = "null-bytes1" -regex = '[^\x00]+\x00' -haystack = 'foo\x00' -matches = [[0, 4]] -unicode = false -utf8 = false -unescape = true - - -[[test]] -name = "word-ascii" -regex = '\w+' -haystack = "aδ" -matches = [[0, 1]] -unicode = false - -[[test]] -name = "word-unicode" -regex = '\w+' -haystack = "aδ" -matches = [[0, 3]] - -[[test]] -name = "decimal-ascii" -regex = '\d+' -haystack = "1२३9" -matches = [[0, 1], [7, 8]] -unicode = false - -[[test]] -name = "decimal-unicode" -regex = '\d+' -haystack = "1२३9" -matches = [[0, 8]] - -[[test]] -name = "space-ascii" -regex = '\s+' -haystack = " \u1680" -matches = [[0, 1]] -unicode = false - -[[test]] -name = "space-unicode" -regex = '\s+' -haystack = " \u1680" -matches = [[0, 4]] - - -[[test]] -# See: https://github.com/rust-lang/regex/issues/484 -name = "iter1-bytes" -regex = '' -haystack = "☃" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] -utf8 = false - -[[test]] -# See: https://github.com/rust-lang/regex/issues/484 -name = "iter1-utf8" -regex = '' -haystack = "☃" -matches = [[0, 0], [3, 3]] - -[[test]] -# See: https://github.com/rust-lang/regex/issues/484 -# Note that iter2-utf8 doesn't make sense here, since the input isn't UTF-8. -name = "iter2-bytes" -regex = '' -haystack = 'b\xFFr' -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] -unescape = true -utf8 = false - - -# These test that unanchored prefixes can munch through invalid UTF-8 even when -# utf8 is enabled. -# -# This test actually reflects an interesting simplification in how the Thompson -# NFA is constructed. It used to be that the NFA could be built with an -# unanchored prefix that either matched any byte or _only_ matched valid UTF-8. -# But the latter turns out to be pretty precarious when it comes to prefilters, -# because if you search a haystack that contains invalid UTF-8 but have an -# unanchored prefix that requires UTF-8, then prefilters are no longer a valid -# optimization because you actually have to check that everything is valid -# UTF-8. -# -# Originally, I had thought that we needed a valid UTF-8 unanchored prefix in -# order to guarantee that we only match at valid UTF-8 boundaries. But this -# isn't actually true! There are really only two things to consider here: -# -# 1) Will a regex match split an encoded codepoint? No. Because by construction, -# we ensure that a MATCH state can only be reached by following valid UTF-8 (assuming -# all of the UTF-8 modes are enabled). -# -# 2) Will a regex match arbitrary bytes that aren't valid UTF-8? Again, no, -# assuming all of the UTF-8 modes are enabled. -[[test]] -name = "unanchored-invalid-utf8-match-100" -regex = '[a-z]' -haystack = '\xFFa\xFF' -matches = [[1, 2]] -unescape = true -utf8 = false - -# This test shows that we can still prevent a match from occurring by requiring -# that valid UTF-8 match by inserting our own unanchored prefix. Thus, if the -# behavior of not munching through invalid UTF-8 anywhere is needed, then it -# can be achieved thusly. -[[test]] -name = "unanchored-invalid-utf8-nomatch" -regex = '^(?s:.)*?[a-z]' -haystack = '\xFFa\xFF' -matches = [] -unescape = true -utf8 = false - -# This is a tricky test that makes sure we don't accidentally do a kind of -# unanchored search when we've requested that a regex engine not report -# empty matches that split a codepoint. This test caught a regression during -# development where the code for skipping over bad empty matches would do so -# even if the search should have been anchored. This is ultimately what led to -# making 'anchored' an 'Input' option, so that it was always clear what kind -# of search was being performed. (Before that, whether a search was anchored -# or not was a config knob on the regex engine.) This did wind up making DFAs -# a little more complex to configure (with their 'StartKind' knob), but it -# generally smoothed out everything else. -# -# Great example of a test whose failure motivated a sweeping API refactoring. -[[test]] -name = "anchored-iter-empty-utf8" -regex = '' -haystack = 'a☃z' -matches = [[0, 0], [1, 1]] -unescape = false -utf8 = true -anchored = true diff --git a/vendor/regex/testdata/overlapping.toml b/vendor/regex/testdata/overlapping.toml deleted file mode 100644 index 7bcd45a2f78e13..00000000000000 --- a/vendor/regex/testdata/overlapping.toml +++ /dev/null @@ -1,280 +0,0 @@ -# NOTE: We define a number of tests where the *match* kind is 'leftmost-first' -# but the *search* kind is 'overlapping'. This is a somewhat nonsensical -# combination and can produce odd results. Nevertheless, those results should -# be consistent so we test them here. (At the time of writing this note, I -# hadn't yet decided whether to make 'leftmost-first' with 'overlapping' result -# in unspecified behavior.) - -# This demonstrates how a full overlapping search is obvious quadratic. This -# regex reports a match for every substring in the haystack. -[[test]] -name = "ungreedy-dotstar-matches-everything-100" -regex = [".*?"] -haystack = "zzz" -matches = [ - { id = 0, span = [0, 0] }, - { id = 0, span = [1, 1] }, - { id = 0, span = [0, 1] }, - { id = 0, span = [2, 2] }, - { id = 0, span = [1, 2] }, - { id = 0, span = [0, 2] }, - { id = 0, span = [3, 3] }, - { id = 0, span = [2, 3] }, - { id = 0, span = [1, 3] }, - { id = 0, span = [0, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "greedy-dotstar-matches-everything-100" -regex = [".*"] -haystack = "zzz" -matches = [ - { id = 0, span = [0, 0] }, - { id = 0, span = [1, 1] }, - { id = 0, span = [0, 1] }, - { id = 0, span = [2, 2] }, - { id = 0, span = [1, 2] }, - { id = 0, span = [0, 2] }, - { id = 0, span = [3, 3] }, - { id = 0, span = [2, 3] }, - { id = 0, span = [1, 3] }, - { id = 0, span = [0, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "repetition-plus-leftmost-first-100" -regex = 'a+' -haystack = "aaa" -matches = [[0, 1], [1, 2], [0, 2], [2, 3], [1, 3], [0, 3]] -match-kind = "leftmost-first" -search-kind = "overlapping" - -[[test]] -name = "repetition-plus-leftmost-first-110" -regex = '☃+' -haystack = "☃☃☃" -matches = [[0, 3], [3, 6], [0, 6], [6, 9], [3, 9], [0, 9]] -match-kind = "leftmost-first" -search-kind = "overlapping" - -[[test]] -name = "repetition-plus-all-100" -regex = 'a+' -haystack = "aaa" -matches = [[0, 1], [1, 2], [0, 2], [2, 3], [1, 3], [0, 3]] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "repetition-plus-all-110" -regex = '☃+' -haystack = "☃☃☃" -matches = [[0, 3], [3, 6], [0, 6], [6, 9], [3, 9], [0, 9]] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "repetition-plus-leftmost-first-200" -regex = '(abc)+' -haystack = "zzabcabczzabc" -matches = [ - [[2, 5], [2, 5]], - [[5, 8], [5, 8]], - [[2, 8], [5, 8]], -] -match-kind = "leftmost-first" -search-kind = "overlapping" - -[[test]] -name = "repetition-plus-all-200" -regex = '(abc)+' -haystack = "zzabcabczzabc" -matches = [ - [[2, 5], [2, 5]], - [[5, 8], [5, 8]], - [[2, 8], [5, 8]], - [[10, 13], [10, 13]], -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "repetition-star-leftmost-first-100" -regex = 'a*' -haystack = "aaa" -matches = [ - [0, 0], - [1, 1], - [0, 1], - [2, 2], - [1, 2], - [0, 2], - [3, 3], - [2, 3], - [1, 3], - [0, 3], -] -match-kind = "leftmost-first" -search-kind = "overlapping" - -[[test]] -name = "repetition-star-all-100" -regex = 'a*' -haystack = "aaa" -matches = [ - [0, 0], - [1, 1], - [0, 1], - [2, 2], - [1, 2], - [0, 2], - [3, 3], - [2, 3], - [1, 3], - [0, 3], -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "repetition-star-leftmost-first-200" -regex = '(abc)*' -haystack = "zzabcabczzabc" -matches = [ - [[0, 0], []], -] -match-kind = "leftmost-first" -search-kind = "overlapping" - -[[test]] -name = "repetition-star-all-200" -regex = '(abc)*' -haystack = "zzabcabczzabc" -matches = [ - [[0, 0], []], - [[1, 1], []], - [[2, 2], []], - [[3, 3], []], - [[4, 4], []], - [[5, 5], []], - [[2, 5], [2, 5]], - [[6, 6], []], - [[7, 7], []], - [[8, 8], []], - [[5, 8], [5, 8]], - [[2, 8], [5, 8]], - [[9, 9], []], - [[10, 10], []], - [[11, 11], []], - [[12, 12], []], - [[13, 13], []], - [[10, 13], [10, 13]], -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "start-end-rep-leftmost-first" -regex = '(^$)*' -haystack = "abc" -matches = [ - [[0, 0], []], -] -match-kind = "leftmost-first" -search-kind = "overlapping" - -[[test]] -name = "start-end-rep-all" -regex = '(^$)*' -haystack = "abc" -matches = [ - [[0, 0], []], - [[1, 1], []], - [[2, 2], []], - [[3, 3], []], -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "alt-leftmost-first-100" -regex = 'abc|a' -haystack = "zzabcazzaabc" -matches = [[2, 3], [2, 5]] -match-kind = "leftmost-first" -search-kind = "overlapping" - -[[test]] -name = "alt-all-100" -regex = 'abc|a' -haystack = "zzabcazzaabc" -matches = [[2, 3], [2, 5], [5, 6], [8, 9], [9, 10], [9, 12]] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "empty-000" -regex = "" -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "empty-alt-000" -regex = "|b" -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [1, 2], [3, 3]] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "empty-alt-010" -regex = "b|" -haystack = "abc" -matches = [[0, 0], [1, 1], [2, 2], [1, 2], [3, 3]] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -# See: https://github.com/rust-lang/regex/issues/484 -name = "iter1-bytes" -regex = '' -haystack = "☃" -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] -utf8 = false -match-kind = "all" -search-kind = "overlapping" - -[[test]] -# See: https://github.com/rust-lang/regex/issues/484 -name = "iter1-utf8" -regex = '' -haystack = "☃" -matches = [[0, 0], [3, 3]] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "iter1-incomplete-utf8" -regex = '' -haystack = '\xE2\x98' # incomplete snowman -matches = [[0, 0], [1, 1], [2, 2]] -match-kind = "all" -search-kind = "overlapping" -unescape = true -utf8 = false - -[[test]] -name = "scratch" -regex = ['sam', 'samwise'] -haystack = "samwise" -matches = [ - { id = 0, span = [0, 3] }, -] -match-kind = "leftmost-first" -search-kind = "overlapping" diff --git a/vendor/regex/testdata/regex-lite.toml b/vendor/regex/testdata/regex-lite.toml deleted file mode 100644 index 1769d803d4e07c..00000000000000 --- a/vendor/regex/testdata/regex-lite.toml +++ /dev/null @@ -1,98 +0,0 @@ -# These tests are specifically written to test the regex-lite crate. While it -# largely has the same semantics as the regex crate, there are some differences -# around Unicode support and UTF-8. -# -# To be clear, regex-lite supports far fewer patterns because of its lack of -# Unicode support, nested character classes and character class set operations. -# What we're talking about here are the patterns that both crates support but -# where the semantics might differ. - -# regex-lite uses ASCII definitions for Perl character classes. -[[test]] -name = "perl-class-decimal" -regex = '\d' -haystack = '᠕' -matches = [] -unicode = true - -# regex-lite uses ASCII definitions for Perl character classes. -[[test]] -name = "perl-class-space" -regex = '\s' -haystack = "\u2000" -matches = [] -unicode = true - -# regex-lite uses ASCII definitions for Perl character classes. -[[test]] -name = "perl-class-word" -regex = '\w' -haystack = 'δ' -matches = [] -unicode = true - -# regex-lite uses the ASCII definition of word for word boundary assertions. -[[test]] -name = "word-boundary" -regex = '\b' -haystack = 'δ' -matches = [] -unicode = true - -# regex-lite uses the ASCII definition of word for negated word boundary -# assertions. But note that it should still not split codepoints! -[[test]] -name = "word-boundary-negated" -regex = '\B' -haystack = 'δ' -matches = [[0, 0], [2, 2]] -unicode = true - -# While we're here, the empty regex---which matches at every -# position---shouldn't split a codepoint either. -[[test]] -name = "empty-no-split-codepoint" -regex = '' -haystack = '💩' -matches = [[0, 0], [4, 4]] -unicode = true - -# A dot always matches a full codepoint. -[[test]] -name = "dot-always-matches-codepoint" -regex = '.' -haystack = '💩' -matches = [[0, 4]] -unicode = false - -# A negated character class also always matches a full codepoint. -[[test]] -name = "negated-class-always-matches-codepoint" -regex = '[^a]' -haystack = '💩' -matches = [[0, 4]] -unicode = false - -# regex-lite only supports ASCII-aware case insensitive matching. -[[test]] -name = "case-insensitive-is-ascii-only" -regex = 's' -haystack = 'ſ' -matches = [] -unicode = true -case-insensitive = true - -# Negated word boundaries shouldn't split a codepoint, but they will match -# between invalid UTF-8. -# -# This test is only valid for a 'bytes' API, but that doesn't (yet) exist in -# regex-lite. This can't happen in the main API because &str can't contain -# invalid UTF-8. -# [[test]] -# name = "word-boundary-invalid-utf8" -# regex = '\B' -# haystack = '\xFF\xFF\xFF\xFF' -# unescape = true -# matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] -# unicode = true -# utf8 = false diff --git a/vendor/regex/testdata/regression.toml b/vendor/regex/testdata/regression.toml deleted file mode 100644 index 53b0701a3ceeb2..00000000000000 --- a/vendor/regex/testdata/regression.toml +++ /dev/null @@ -1,830 +0,0 @@ -# See: https://github.com/rust-lang/regex/issues/48 -[[test]] -name = "invalid-regex-no-crash-100" -regex = '(*)' -haystack = "" -matches = [] -compiles = false - -# See: https://github.com/rust-lang/regex/issues/48 -[[test]] -name = "invalid-regex-no-crash-200" -regex = '(?:?)' -haystack = "" -matches = [] -compiles = false - -# See: https://github.com/rust-lang/regex/issues/48 -[[test]] -name = "invalid-regex-no-crash-300" -regex = '(?)' -haystack = "" -matches = [] -compiles = false - -# See: https://github.com/rust-lang/regex/issues/48 -[[test]] -name = "invalid-regex-no-crash-400" -regex = '*' -haystack = "" -matches = [] -compiles = false - -# See: https://github.com/rust-lang/regex/issues/75 -[[test]] -name = "unsorted-binary-search-100" -regex = '(?i-u)[a_]+' -haystack = "A_" -matches = [[0, 2]] - -# See: https://github.com/rust-lang/regex/issues/75 -[[test]] -name = "unsorted-binary-search-200" -regex = '(?i-u)[A_]+' -haystack = "a_" -matches = [[0, 2]] - -# See: https://github.com/rust-lang/regex/issues/76 -[[test]] -name = "unicode-case-lower-nocase-flag" -regex = '(?i)\p{Ll}+' -haystack = "ΛΘΓΔα" -matches = [[0, 10]] - -# See: https://github.com/rust-lang/regex/issues/99 -[[test]] -name = "negated-char-class-100" -regex = '(?i)[^x]' -haystack = "x" -matches = [] - -# See: https://github.com/rust-lang/regex/issues/99 -[[test]] -name = "negated-char-class-200" -regex = '(?i)[^x]' -haystack = "X" -matches = [] - -# See: https://github.com/rust-lang/regex/issues/101 -[[test]] -name = "ascii-word-underscore" -regex = '[[:word:]]' -haystack = "_" -matches = [[0, 1]] - -# See: https://github.com/rust-lang/regex/issues/129 -[[test]] -name = "captures-repeat" -regex = '([a-f]){2}(?P<foo>[x-z])' -haystack = "abx" -matches = [ - [[0, 3], [1, 2], [2, 3]], -] - -# See: https://github.com/rust-lang/regex/issues/153 -[[test]] -name = "alt-in-alt-100" -regex = 'ab?|$' -haystack = "az" -matches = [[0, 1], [2, 2]] - -# See: https://github.com/rust-lang/regex/issues/153 -[[test]] -name = "alt-in-alt-200" -regex = '^(?:.*?)(?:\n|\r\n?|$)' -haystack = "ab\rcd" -matches = [[0, 3]] - -# See: https://github.com/rust-lang/regex/issues/169 -[[test]] -name = "leftmost-first-prefix" -regex = 'z*azb' -haystack = "azb" -matches = [[0, 3]] - -# See: https://github.com/rust-lang/regex/issues/191 -[[test]] -name = "many-alternates" -regex = '1|2|3|4|5|6|7|8|9|10|int' -haystack = "int" -matches = [[0, 3]] - -# See: https://github.com/rust-lang/regex/issues/204 -[[test]] -name = "word-boundary-alone-100" -regex = '\b' -haystack = "Should this (work?)" -matches = [[0, 0], [6, 6], [7, 7], [11, 11], [13, 13], [17, 17]] - -# See: https://github.com/rust-lang/regex/issues/204 -[[test]] -name = "word-boundary-alone-200" -regex = '\b' -haystack = "a b c" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] - -# See: https://github.com/rust-lang/regex/issues/264 -[[test]] -name = "word-boundary-ascii-no-capture" -regex = '\B' -haystack = "\U00028F3E" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] -unicode = false -utf8 = false - -# See: https://github.com/rust-lang/regex/issues/264 -[[test]] -name = "word-boundary-ascii-capture" -regex = '(?:\B)' -haystack = "\U00028F3E" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] -unicode = false -utf8 = false - -# See: https://github.com/rust-lang/regex/issues/268 -[[test]] -name = "partial-anchor" -regex = '^a|b' -haystack = "ba" -matches = [[0, 1]] - -# See: https://github.com/rust-lang/regex/issues/271 -[[test]] -name = "endl-or-word-boundary" -regex = '(?m:$)|(?-u:\b)' -haystack = "\U0006084E" -matches = [[4, 4]] - -# See: https://github.com/rust-lang/regex/issues/271 -[[test]] -name = "zero-or-end" -regex = '(?i-u:\x00)|$' -haystack = "\U000E682F" -matches = [[4, 4]] - -# See: https://github.com/rust-lang/regex/issues/271 -[[test]] -name = "y-or-endl" -regex = '(?i-u:y)|(?m:$)' -haystack = "\U000B4331" -matches = [[4, 4]] - -# See: https://github.com/rust-lang/regex/issues/271 -[[test]] -name = "word-boundary-start-x" -regex = '(?u:\b)^(?-u:X)' -haystack = "X" -matches = [[0, 1]] - -# See: https://github.com/rust-lang/regex/issues/271 -[[test]] -name = "word-boundary-ascii-start-x" -regex = '(?-u:\b)^(?-u:X)' -haystack = "X" -matches = [[0, 1]] - -# See: https://github.com/rust-lang/regex/issues/271 -[[test]] -name = "end-not-word-boundary" -regex = '$\B' -haystack = "\U0005C124\U000B576C" -matches = [[8, 8]] -unicode = false -utf8 = false - -# See: https://github.com/rust-lang/regex/issues/280 -[[test]] -name = "partial-anchor-alternate-begin" -regex = '^a|z' -haystack = "yyyyya" -matches = [] - -# See: https://github.com/rust-lang/regex/issues/280 -[[test]] -name = "partial-anchor-alternate-end" -regex = 'a$|z' -haystack = "ayyyyy" -matches = [] - -# See: https://github.com/rust-lang/regex/issues/289 -[[test]] -name = "lits-unambiguous-100" -regex = '(?:ABC|CDA|BC)X' -haystack = "CDAX" -matches = [[0, 4]] - -# See: https://github.com/rust-lang/regex/issues/291 -[[test]] -name = "lits-unambiguous-200" -regex = '((IMG|CAM|MG|MB2)_|(DSCN|CIMG))(?P<n>[0-9]+)$' -haystack = "CIMG2341" -matches = [ - [[0, 8], [0, 4], [], [0, 4], [4, 8]], -] - -# See: https://github.com/rust-lang/regex/issues/303 -# -# 2022-09-19: This has now been "properly" fixed in that empty character -# classes are fully supported as something that can never match. This test -# used to be marked as 'compiles = false', but now it works. -[[test]] -name = "negated-full-byte-range" -regex = '[^\x00-\xFF]' -haystack = "" -matches = [] -compiles = true -unicode = false -utf8 = false - -# See: https://github.com/rust-lang/regex/issues/321 -[[test]] -name = "strange-anchor-non-complete-prefix" -regex = 'a^{2}' -haystack = "" -matches = [] - -# See: https://github.com/rust-lang/regex/issues/321 -[[test]] -name = "strange-anchor-non-complete-suffix" -regex = '${2}a' -haystack = "" -matches = [] - -# See: https://github.com/rust-lang/regex/issues/334 -# See: https://github.com/rust-lang/regex/issues/557 -[[test]] -name = "captures-after-dfa-premature-end-100" -regex = 'a(b*(X|$))?' -haystack = "abcbX" -matches = [ - [[0, 1], [], []], -] - -# See: https://github.com/rust-lang/regex/issues/334 -# See: https://github.com/rust-lang/regex/issues/557 -[[test]] -name = "captures-after-dfa-premature-end-200" -regex = 'a(bc*(X|$))?' -haystack = "abcbX" -matches = [ - [[0, 1], [], []], -] - -# See: https://github.com/rust-lang/regex/issues/334 -# See: https://github.com/rust-lang/regex/issues/557 -[[test]] -name = "captures-after-dfa-premature-end-300" -regex = '(aa$)?' -haystack = "aaz" -matches = [ - [[0, 0], []], - [[1, 1], []], - [[2, 2], []], - [[3, 3], []], -] - -# Plucked from "Why aren’t regular expressions a lingua franca? an empirical -# study on the re-use and portability of regular expressions", The ACM Joint -# European Software Engineering Conference and Symposium on the Foundations of -# Software Engineering (ESEC/FSE), 2019. -# -# Link: https://dl.acm.org/doi/pdf/10.1145/3338906.3338909 -[[test]] -name = "captures-after-dfa-premature-end-400" -regex = '(a)\d*\.?\d+\b' -haystack = "a0.0c" -matches = [ - [[0, 2], [0, 1]], -] - -# See: https://github.com/rust-lang/regex/issues/437 -[[test]] -name = "literal-panic" -regex = 'typename type\-parameter\-[0-9]+\-[0-9]+::.+' -haystack = "test" -matches = [] - -# See: https://github.com/rust-lang/regex/issues/527 -[[test]] -name = "empty-flag-expr" -regex = '(?:(?:(?x)))' -haystack = "" -matches = [[0, 0]] - -# See: https://github.com/rust-lang/regex/issues/533 -#[[tests]] -#name = "blank-matches-nothing-between-space-and-tab" -#regex = '[[:blank:]]' -#input = '\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F' -#match = false -#unescape = true - -# See: https://github.com/rust-lang/regex/issues/533 -#[[tests]] -#name = "blank-matches-nothing-between-space-and-tab-inverted" -#regex = '^[[:^blank:]]+$' -#input = '\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F' -#match = true -#unescape = true - -# See: https://github.com/rust-lang/regex/issues/555 -[[test]] -name = "invalid-repetition" -regex = '(?m){1,1}' -haystack = "" -matches = [] -compiles = false - -# See: https://github.com/rust-lang/regex/issues/640 -[[test]] -name = "flags-are-unset" -regex = '(?:(?i)foo)|Bar' -haystack = "foo Foo bar Bar" -matches = [[0, 3], [4, 7], [12, 15]] - -# Note that 'Ј' is not 'j', but cyrillic Je -# https://en.wikipedia.org/wiki/Je_(Cyrillic) -# -# See: https://github.com/rust-lang/regex/issues/659 -[[test]] -name = "empty-group-with-unicode" -regex = '(?:)Ј01' -haystack = 'zЈ01' -matches = [[1, 5]] - -# See: https://github.com/rust-lang/regex/issues/579 -[[test]] -name = "word-boundary-weird" -regex = '\b..\b' -haystack = "I have 12, he has 2!" -matches = [[0, 2], [7, 9], [9, 11], [11, 13], [17, 19]] - -# See: https://github.com/rust-lang/regex/issues/579 -[[test]] -name = "word-boundary-weird-ascii" -regex = '\b..\b' -haystack = "I have 12, he has 2!" -matches = [[0, 2], [7, 9], [9, 11], [11, 13], [17, 19]] -unicode = false -utf8 = false - -# See: https://github.com/rust-lang/regex/issues/579 -[[test]] -name = "word-boundary-weird-minimal-ascii" -regex = '\b..\b' -haystack = "az,,b" -matches = [[0, 2], [2, 4]] -unicode = false -utf8 = false - -# See: https://github.com/BurntSushi/ripgrep/issues/1203 -[[test]] -name = "reverse-suffix-100" -regex = '[0-4][0-4][0-4]000' -haystack = "153.230000" -matches = [[4, 10]] - -# See: https://github.com/BurntSushi/ripgrep/issues/1203 -[[test]] -name = "reverse-suffix-200" -regex = '[0-9][0-9][0-9]000' -haystack = "153.230000\n" -matches = [[4, 10]] - -# This is a tricky case for the reverse suffix optimization, because it -# finds the 'foobar' match but the reverse scan must fail to find a match by -# correctly dealing with the word boundary following the 'foobar' literal when -# computing the start state. -# -# This test exists because I tried to break the following assumption that -# is currently in the code: that if a suffix is found and the reverse scan -# succeeds, then it's guaranteed that there is an overall match. Namely, the -# 'is_match' routine does *not* do another forward scan in this case because of -# this assumption. -[[test]] -name = "reverse-suffix-300" -regex = '\w+foobar\b' -haystack = "xyzfoobarZ" -matches = [] -unicode = false -utf8 = false - -# See: https://github.com/BurntSushi/ripgrep/issues/1247 -[[test]] -name = "stops" -regex = '\bs(?:[ab])' -haystack = 's\xE4' -matches = [] -unescape = true -utf8 = false - -# See: https://github.com/BurntSushi/ripgrep/issues/1247 -[[test]] -name = "stops-ascii" -regex = '(?-u:\b)s(?:[ab])' -haystack = 's\xE4' -matches = [] -unescape = true -utf8 = false - -# See: https://github.com/rust-lang/regex/issues/850 -[[test]] -name = "adjacent-line-boundary-100" -regex = '(?m)^(?:[^ ]+?)$' -haystack = "line1\nline2" -matches = [[0, 5], [6, 11]] - -# Continued. -[[test]] -name = "adjacent-line-boundary-200" -regex = '(?m)^(?:[^ ]+?)$' -haystack = "A\nB" -matches = [[0, 1], [2, 3]] - -# There is no issue for this bug. -[[test]] -name = "anchored-prefix-100" -regex = '^a[[:^space:]]' -haystack = "a " -matches = [] - -# There is no issue for this bug. -[[test]] -name = "anchored-prefix-200" -regex = '^a[[:^space:]]' -haystack = "foo boo a" -matches = [] - -# There is no issue for this bug. -[[test]] -name = "anchored-prefix-300" -regex = '^-[a-z]' -haystack = "r-f" -matches = [] - -# Tests that a possible Aho-Corasick optimization works correctly. It only -# kicks in when we have a lot of literals. By "works correctly," we mean that -# leftmost-first match semantics are properly respected. That is, samwise -# should match, not sam. -# -# There is no issue for this bug. -[[test]] -name = "aho-corasick-100" -regex = 'samwise|sam|a|b|c|d|e|f|g|h|i|j|k|l|m|n|o|p|q|r|s|t|u|v|w|x|y|z|A|B|C|D|E|F|G|H|I|J|K|L|M|N|O|P|Q|R|S|T|U|V|W|X|Y|Z' -haystack = "samwise" -matches = [[0, 7]] - -# See: https://github.com/rust-lang/regex/issues/921 -[[test]] -name = "interior-anchor-capture" -regex = '(a$)b$' -haystack = 'ab' -matches = [] - -# I found this bug in the course of adding some of the regexes that Ruff uses -# to rebar. It turns out that the lazy DFA was finding a match that was being -# rejected by the one-pass DFA. Yikes. I then minimized the regex and haystack. -# -# Source: https://github.com/charliermarsh/ruff/blob/a919041ddaa64cdf6f216f90dd0480dab69fd3ba/crates/ruff/src/rules/pycodestyle/rules/whitespace_around_keywords.rs#L52 -[[test]] -name = "ruff-whitespace-around-keywords" -regex = '^(a|ab)$' -haystack = "ab" -anchored = true -unicode = false -utf8 = true -matches = [[[0, 2], [0, 2]]] - -# From: https://github.com/rust-lang/regex/issues/429 -[[test]] -name = "i429-0" -regex = '(?:(?-u:\b)|(?u:h))+' -haystack = "h" -unicode = true -utf8 = false -matches = [[0, 0], [1, 1]] - -# From: https://github.com/rust-lang/regex/issues/429 -[[test]] -name = "i429-1" -regex = '(?u:\B)' -haystack = "鋸" -unicode = true -utf8 = false -matches = [] - -# From: https://github.com/rust-lang/regex/issues/429 -[[test]] -name = "i429-2" -regex = '(?:(?u:\b)|(?s-u:.))+' -haystack = "oB" -unicode = true -utf8 = false -matches = [[0, 0], [1, 2]] - -# From: https://github.com/rust-lang/regex/issues/429 -[[test]] -name = "i429-3" -regex = '(?:(?-u:\B)|(?su:.))+' -haystack = "\U000FEF80" -unicode = true -utf8 = false -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] - -# From: https://github.com/rust-lang/regex/issues/429 -[[test]] -name = "i429-3-utf8" -regex = '(?:(?-u:\B)|(?su:.))+' -haystack = "\U000FEF80" -unicode = true -utf8 = true -matches = [[0, 0], [4, 4]] - -# From: https://github.com/rust-lang/regex/issues/429 -[[test]] -name = "i429-4" -regex = '(?m:$)(?m:^)(?su:.)' -haystack = "\n‣" -unicode = true -utf8 = false -matches = [[0, 1]] - -# From: https://github.com/rust-lang/regex/issues/429 -[[test]] -name = "i429-5" -regex = '(?m:$)^(?m:^)' -haystack = "\n" -unicode = true -utf8 = false -matches = [[0, 0]] - -# From: https://github.com/rust-lang/regex/issues/429 -[[test]] -name = "i429-6" -regex = '(?P<kp>(?iu:do)(?m:$))*' -haystack = "dodo" -unicode = true -utf8 = false -matches = [ - [[0, 0], []], - [[1, 1], []], - [[2, 4], [2, 4]], -] - -# From: https://github.com/rust-lang/regex/issues/429 -[[test]] -name = "i429-7" -regex = '(?u:\B)' -haystack = "䡁" -unicode = true -utf8 = false -matches = [] - -# From: https://github.com/rust-lang/regex/issues/429 -[[test]] -name = "i429-8" -regex = '(?:(?-u:\b)|(?u:[\u{0}-W]))+' -haystack = "0" -unicode = true -utf8 = false -matches = [[0, 0], [1, 1]] - -# From: https://github.com/rust-lang/regex/issues/429 -[[test]] -name = "i429-9" -regex = '((?m:$)(?-u:\B)(?s-u:.)(?-u:\B)$)' -haystack = "\n\n" -unicode = true -utf8 = false -matches = [ - [[1, 2], [1, 2]], -] - -# From: https://github.com/rust-lang/regex/issues/429 -[[test]] -name = "i429-10" -regex = '(?m:$)(?m:$)^(?su:.)' -haystack = "\n\u0081¨\u200a" -unicode = true -utf8 = false -matches = [[0, 1]] - -# From: https://github.com/rust-lang/regex/issues/429 -[[test]] -name = "i429-11" -regex = '(?-u:\B)(?m:^)' -haystack = "0\n" -unicode = true -utf8 = false -matches = [[2, 2]] - -# From: https://github.com/rust-lang/regex/issues/429 -[[test]] -name = "i429-12" -regex = '(?:(?u:\b)|(?-u:.))+' -haystack = "0" -unicode = true -utf8 = false -matches = [[0, 0], [1, 1]] - -# From: https://github.com/rust-lang/regex/issues/969 -[[test]] -name = "i969" -regex = 'c.*d\z' -haystack = "ababcd" -bounds = [4, 6] -search-kind = "earliest" -matches = [[4, 6]] - -# I found this during the regex-automata migration. This is the fowler basic -# 154 test, but without anchored = true and without a match limit. -# -# This test caught a subtle bug in the hybrid reverse DFA search, where it -# would skip over the termination condition if it entered a start state. This -# was a double bug. Firstly, the reverse DFA shouldn't have had start states -# specialized in the first place, and thus it shouldn't have possible to detect -# that the DFA had entered a start state. The second bug was that the start -# state handling was incorrect by jumping over the termination condition. -[[test]] -name = "fowler-basic154-unanchored" -regex = '''a([bc]*)c*''' -haystack = '''abc''' -matches = [[[0, 3], [1, 3]]] - -# From: https://github.com/rust-lang/regex/issues/981 -# -# This was never really a problem in the new architecture because the -# regex-automata engines are far more principled about how they deal with -# look-around. (This was one of the many reasons I wanted to re-work the -# original regex crate engines.) -[[test]] -name = "word-boundary-interact-poorly-with-literal-optimizations" -regex = '(?i:(?:\b|_)win(?:32|64|dows)?(?:\b|_))' -haystack = 'ubi-Darwin-x86_64.tar.gz' -matches = [] - -# This was found during fuzz testing of regex. It provoked a panic in the meta -# engine as a result of the reverse suffix optimization. Namely, it hit a case -# where a suffix match was found, a corresponding reverse match was found, but -# the forward search turned up no match. The forward search should always match -# if the suffix and reverse search match. -# -# This in turn uncovered an inconsistency between the PikeVM and the DFA (lazy -# and fully compiled) engines. It was caused by a mishandling of the collection -# of NFA state IDs in the generic determinization code (which is why both types -# of DFA were impacted). Namely, when a fail state was encountered (that's the -# `[^\s\S]` in the pattern below), then it would just stop collecting states. -# But that's not correct since a later state could lead to a match. -[[test]] -name = "impossible-branch" -regex = '.*[^\s\S]A|B' -haystack = "B" -matches = [[0, 1]] - -# This was found during fuzz testing in regex-lite. The regex crate never -# suffered from this bug, but it causes regex-lite to incorrectly compile -# captures. -[[test]] -name = "captures-wrong-order" -regex = '(a){0}(a)' -haystack = 'a' -matches = [[[0, 1], [], [0, 1]]] - -# This tests a bug in how quit states are handled in the DFA. At some point -# during development, the DFAs were tweaked slightly such that if they hit -# a quit state (which means, they hit a byte that the caller configured should -# stop the search), then it might not return an error necessarily. Namely, if a -# match had already been found, then it would be returned instead of an error. -# -# But this is actually wrong! Why? Because even though a match had been found, -# it wouldn't be fully correct to return it once a quit state has been seen -# because you can't determine whether the match offset returned is the correct -# greedy/leftmost-first match. Since you can't complete the search as requested -# by the caller, the DFA should just stop and return an error. -# -# Interestingly, this does seem to produce an unavoidable difference between -# 'try_is_match().unwrap()' and 'try_find().unwrap().is_some()' for the DFAs. -# The former will stop immediately once a match is known to occur and return -# 'Ok(true)', where as the latter could find the match but quit with an -# 'Err(..)' first. -# -# Thankfully, I believe this inconsistency between 'is_match()' and 'find()' -# cannot be observed in the higher level meta regex API because it specifically -# will try another engine that won't fail in the case of a DFA failing. -# -# This regression happened in the regex crate rewrite, but before anything got -# released. -[[test]] -name = "negated-unicode-word-boundary-dfa-fail" -regex = '\B.*' -haystack = "!\u02D7" -matches = [[0, 3]] - -# This failure was found in the *old* regex crate (prior to regex 1.9), but -# I didn't investigate why. My best guess is that it's a literal optimization -# bug. It didn't occur in the rewrite. -[[test]] -name = "missed-match" -regex = 'e..+e.ee>' -haystack = 'Zeee.eZZZZZZZZeee>eeeeeee>' -matches = [[1, 26]] - -# This test came from the 'ignore' crate and tripped a bug in how accelerated -# DFA states were handled in an overlapping search. -[[test]] -name = "regex-to-glob" -regex = ['(?-u)^path1/[^/]*$'] -haystack = "path1/foo" -matches = [[0, 9]] -utf8 = false -match-kind = "all" -search-kind = "overlapping" - -# See: https://github.com/rust-lang/regex/issues/1060 -[[test]] -name = "reverse-inner-plus-shorter-than-expected" -regex = '(?:(\d+)[:.])?(\d{1,2})[:.](\d{2})' -haystack = '102:12:39' -matches = [[[0, 9], [0, 3], [4, 6], [7, 9]]] - -# Like reverse-inner-plus-shorter-than-expected, but using a far simpler regex -# to demonstrate the extent of the rot. Sigh. -# -# See: https://github.com/rust-lang/regex/issues/1060 -[[test]] -name = "reverse-inner-short" -regex = '(?:([0-9][0-9][0-9]):)?([0-9][0-9]):([0-9][0-9])' -haystack = '102:12:39' -matches = [[[0, 9], [0, 3], [4, 6], [7, 9]]] - -# This regression test was found via the RegexSet APIs. It triggered a -# particular code path where a regex was compiled with 'All' match semantics -# (to support overlapping search), but got funneled down into a standard -# leftmost search when calling 'is_match'. This is fine on its own, but the -# leftmost search will use a prefilter and that's where this went awry. -# -# Namely, since 'All' semantics were used, the aho-corasick prefilter was -# incorrectly compiled with 'Standard' semantics. This was wrong because -# 'Standard' immediately attempts to report a match at every position, even if -# that would mean reporting a match past the leftmost match before reporting -# the leftmost match. This breaks the prefilter contract of never having false -# negatives and leads overall to the engine not finding a match. -# -# See: https://github.com/rust-lang/regex/issues/1070 -[[test]] -name = "prefilter-with-aho-corasick-standard-semantics" -regex = '(?m)^ *v [0-9]' -haystack = 'v 0' -matches = [ - { id = 0, spans = [[0, 3]] }, -] -match-kind = "all" -search-kind = "overlapping" -unicode = true -utf8 = true - -# This tests that the PikeVM and the meta regex agree on a particular regex. -# This test previously failed when the ad hoc engines inside the meta engine -# did not handle quit states correctly. Namely, the Unicode word boundary here -# combined with a non-ASCII codepoint provokes the quit state. The ad hoc -# engines were previously returning a match even after entering the quit state -# if a match had been previously detected, but this is incorrect. The reason -# is that if a quit state is found, then the search must give up *immediately* -# because it prevents the search from finding the "proper" leftmost-first -# match. If it instead returns a match that has been found, it risks reporting -# an improper match, as it did in this case. -# -# See: https://github.com/rust-lang/regex/issues/1046 -[[test]] -name = "non-prefix-literal-quit-state" -regex = '.+\b\n' -haystack = "β77\n" -matches = [[0, 5]] - -# This is a regression test for some errant HIR interval set operations that -# were made in the regex-syntax 0.8.0 release and then reverted in 0.8.1. The -# issue here is that the HIR produced from the regex had out-of-order ranges. -# -# See: https://github.com/rust-lang/regex/issues/1103 -# Ref: https://github.com/rust-lang/regex/pull/1051 -# Ref: https://github.com/rust-lang/regex/pull/1102 -[[test]] -name = "hir-optimization-out-of-order-class" -regex = '^[[:alnum:]./-]+$' -haystack = "a-b" -matches = [[0, 3]] - -# This is a regression test for an improper reverse suffix optimization. This -# occurred when I "broadened" the applicability of the optimization to include -# multiple possible literal suffixes instead of only sticking to a non-empty -# longest common suffix. It turns out that, at least given how the reverse -# suffix optimization works, we need to stick to the longest common suffix for -# now. -# -# See: https://github.com/rust-lang/regex/issues/1110 -# See also: https://github.com/astral-sh/ruff/pull/7980 -[[test]] -name = 'improper-reverse-suffix-optimization' -regex = '(\\N\{[^}]+})|([{}])' -haystack = 'hiya \N{snowman} bye' -matches = [[[5, 16], [5, 16], []]] diff --git a/vendor/regex/testdata/set.toml b/vendor/regex/testdata/set.toml deleted file mode 100644 index 049e8a89d1bcb8..00000000000000 --- a/vendor/regex/testdata/set.toml +++ /dev/null @@ -1,641 +0,0 @@ -# Basic multi-regex tests. - -[[test]] -name = "basic10" -regex = ["a", "a"] -haystack = "a" -matches = [ - { id = 0, span = [0, 1] }, - { id = 1, span = [0, 1] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic10-leftmost-first" -regex = ["a", "a"] -haystack = "a" -matches = [ - { id = 0, span = [0, 1] }, -] -match-kind = "leftmost-first" -search-kind = "leftmost" - -[[test]] -name = "basic20" -regex = ["a", "a"] -haystack = "ba" -matches = [ - { id = 0, span = [1, 2] }, - { id = 1, span = [1, 2] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic30" -regex = ["a", "b"] -haystack = "a" -matches = [ - { id = 0, span = [0, 1] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic40" -regex = ["a", "b"] -haystack = "b" -matches = [ - { id = 1, span = [0, 1] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic50" -regex = ["a|b", "b|a"] -haystack = "b" -matches = [ - { id = 0, span = [0, 1] }, - { id = 1, span = [0, 1] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic60" -regex = ["foo", "oo"] -haystack = "foo" -matches = [ - { id = 0, span = [0, 3] }, - { id = 1, span = [1, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic60-leftmost-first" -regex = ["foo", "oo"] -haystack = "foo" -matches = [ - { id = 0, span = [0, 3] }, -] -match-kind = "leftmost-first" -search-kind = "leftmost" - -[[test]] -name = "basic61" -regex = ["oo", "foo"] -haystack = "foo" -matches = [ - { id = 1, span = [0, 3] }, - { id = 0, span = [1, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic61-leftmost-first" -regex = ["oo", "foo"] -haystack = "foo" -matches = [ - { id = 1, span = [0, 3] }, -] -match-kind = "leftmost-first" -search-kind = "leftmost" - -[[test]] -name = "basic70" -regex = ["abcd", "bcd", "cd", "d"] -haystack = "abcd" -matches = [ - { id = 0, span = [0, 4] }, - { id = 1, span = [1, 4] }, - { id = 2, span = [2, 4] }, - { id = 3, span = [3, 4] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic71" -regex = ["bcd", "cd", "d", "abcd"] -haystack = "abcd" -matches = [ - { id = 3, span = [0, 4] }, -] -match-kind = "leftmost-first" -search-kind = "leftmost" - -[[test]] -name = "basic80" -regex = ["^foo", "bar$"] -haystack = "foo" -matches = [ - { id = 0, span = [0, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic81" -regex = ["^foo", "bar$"] -haystack = "foo bar" -matches = [ - { id = 0, span = [0, 3] }, - { id = 1, span = [4, 7] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic82" -regex = ["^foo", "bar$"] -haystack = "bar" -matches = [ - { id = 1, span = [0, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic90" -regex = ["[a-z]+$", "foo"] -haystack = "01234 foo" -matches = [ - { id = 0, span = [8, 9] }, - { id = 0, span = [7, 9] }, - { id = 0, span = [6, 9] }, - { id = 1, span = [6, 9] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic91" -regex = ["[a-z]+$", "foo"] -haystack = "foo 01234" -matches = [ - { id = 1, span = [0, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic100" -regex = [".*?", "a"] -haystack = "zzza" -matches = [ - { id = 0, span = [0, 0] }, - { id = 0, span = [1, 1] }, - { id = 0, span = [0, 1] }, - { id = 0, span = [2, 2] }, - { id = 0, span = [1, 2] }, - { id = 0, span = [0, 2] }, - { id = 0, span = [3, 3] }, - { id = 0, span = [2, 3] }, - { id = 0, span = [1, 3] }, - { id = 0, span = [0, 3] }, - { id = 0, span = [4, 4] }, - { id = 0, span = [3, 4] }, - { id = 0, span = [2, 4] }, - { id = 0, span = [1, 4] }, - { id = 0, span = [0, 4] }, - { id = 1, span = [3, 4] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic101" -regex = [".*", "a"] -haystack = "zzza" -matches = [ - { id = 0, span = [0, 0] }, - { id = 0, span = [1, 1] }, - { id = 0, span = [0, 1] }, - { id = 0, span = [2, 2] }, - { id = 0, span = [1, 2] }, - { id = 0, span = [0, 2] }, - { id = 0, span = [3, 3] }, - { id = 0, span = [2, 3] }, - { id = 0, span = [1, 3] }, - { id = 0, span = [0, 3] }, - { id = 0, span = [4, 4] }, - { id = 0, span = [3, 4] }, - { id = 0, span = [2, 4] }, - { id = 0, span = [1, 4] }, - { id = 0, span = [0, 4] }, - { id = 1, span = [3, 4] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic102" -regex = [".*", "a"] -haystack = "zzz" -matches = [ - { id = 0, span = [0, 0] }, - { id = 0, span = [1, 1] }, - { id = 0, span = [0, 1] }, - { id = 0, span = [2, 2] }, - { id = 0, span = [1, 2] }, - { id = 0, span = [0, 2] }, - { id = 0, span = [3, 3] }, - { id = 0, span = [2, 3] }, - { id = 0, span = [1, 3] }, - { id = 0, span = [0, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic110" -regex = ['\ba\b'] -haystack = "hello a bye" -matches = [ - { id = 0, span = [6, 7] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic111" -regex = ['\ba\b', '\be\b'] -haystack = "hello a bye e" -matches = [ - { id = 0, span = [6, 7] }, - { id = 1, span = [12, 13] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic120" -regex = ["a"] -haystack = "a" -matches = [ - { id = 0, span = [0, 1] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic121" -regex = [".*a"] -haystack = "a" -matches = [ - { id = 0, span = [0, 1] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic122" -regex = [".*a", "β"] -haystack = "β" -matches = [ - { id = 1, span = [0, 2] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "basic130" -regex = ["ab", "b"] -haystack = "ba" -matches = [ - { id = 1, span = [0, 1] }, -] -match-kind = "all" -search-kind = "overlapping" - -# These test cases where one of the regexes matches the empty string. - -[[test]] -name = "empty10" -regex = ["", "a"] -haystack = "abc" -matches = [ - { id = 0, span = [0, 0] }, - { id = 1, span = [0, 1] }, - { id = 0, span = [1, 1] }, - { id = 0, span = [2, 2] }, - { id = 0, span = [3, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "empty10-leftmost-first" -regex = ["", "a"] -haystack = "abc" -matches = [ - { id = 0, span = [0, 0] }, - { id = 0, span = [1, 1] }, - { id = 0, span = [2, 2] }, - { id = 0, span = [3, 3] }, -] -match-kind = "leftmost-first" -search-kind = "leftmost" - -[[test]] -name = "empty11" -regex = ["a", ""] -haystack = "abc" -matches = [ - { id = 1, span = [0, 0] }, - { id = 0, span = [0, 1] }, - { id = 1, span = [1, 1] }, - { id = 1, span = [2, 2] }, - { id = 1, span = [3, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "empty11-leftmost-first" -regex = ["a", ""] -haystack = "abc" -matches = [ - { id = 0, span = [0, 1] }, - { id = 1, span = [2, 2] }, - { id = 1, span = [3, 3] }, -] -match-kind = "leftmost-first" -search-kind = "leftmost" - -[[test]] -name = "empty20" -regex = ["", "b"] -haystack = "abc" -matches = [ - { id = 0, span = [0, 0] }, - { id = 0, span = [1, 1] }, - { id = 1, span = [1, 2] }, - { id = 0, span = [2, 2] }, - { id = 0, span = [3, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "empty20-leftmost-first" -regex = ["", "b"] -haystack = "abc" -matches = [ - { id = 0, span = [0, 0] }, - { id = 0, span = [1, 1] }, - { id = 0, span = [2, 2] }, - { id = 0, span = [3, 3] }, -] -match-kind = "leftmost-first" -search-kind = "leftmost" - -[[test]] -name = "empty21" -regex = ["b", ""] -haystack = "abc" -matches = [ - { id = 1, span = [0, 0] }, - { id = 1, span = [1, 1] }, - { id = 0, span = [1, 2] }, - { id = 1, span = [2, 2] }, - { id = 1, span = [3, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "empty21-leftmost-first" -regex = ["b", ""] -haystack = "abc" -matches = [ - { id = 1, span = [0, 0] }, - { id = 0, span = [1, 2] }, - { id = 1, span = [3, 3] }, -] -match-kind = "leftmost-first" -search-kind = "leftmost" - -[[test]] -name = "empty22" -regex = ["(?:)", "b"] -haystack = "abc" -matches = [ - { id = 0, span = [0, 0] }, - { id = 0, span = [1, 1] }, - { id = 1, span = [1, 2] }, - { id = 0, span = [2, 2] }, - { id = 0, span = [3, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "empty23" -regex = ["b", "(?:)"] -haystack = "abc" -matches = [ - { id = 1, span = [0, 0] }, - { id = 1, span = [1, 1] }, - { id = 0, span = [1, 2] }, - { id = 1, span = [2, 2] }, - { id = 1, span = [3, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "empty30" -regex = ["", "z"] -haystack = "abc" -matches = [ - { id = 0, span = [0, 0] }, - { id = 0, span = [1, 1] }, - { id = 0, span = [2, 2] }, - { id = 0, span = [3, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "empty30-leftmost-first" -regex = ["", "z"] -haystack = "abc" -matches = [ - { id = 0, span = [0, 0] }, - { id = 0, span = [1, 1] }, - { id = 0, span = [2, 2] }, - { id = 0, span = [3, 3] }, -] -match-kind = "leftmost-first" -search-kind = "leftmost" - -[[test]] -name = "empty31" -regex = ["z", ""] -haystack = "abc" -matches = [ - { id = 1, span = [0, 0] }, - { id = 1, span = [1, 1] }, - { id = 1, span = [2, 2] }, - { id = 1, span = [3, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "empty31-leftmost-first" -regex = ["z", ""] -haystack = "abc" -matches = [ - { id = 1, span = [0, 0] }, - { id = 1, span = [1, 1] }, - { id = 1, span = [2, 2] }, - { id = 1, span = [3, 3] }, -] -match-kind = "leftmost-first" -search-kind = "leftmost" - -[[test]] -name = "empty40" -regex = ["c(?:)", "b"] -haystack = "abc" -matches = [ - { id = 1, span = [1, 2] }, - { id = 0, span = [2, 3] }, -] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "empty40-leftmost-first" -regex = ["c(?:)", "b"] -haystack = "abc" -matches = [ - { id = 1, span = [1, 2] }, - { id = 0, span = [2, 3] }, -] -match-kind = "leftmost-first" -search-kind = "leftmost" - -# These test cases where there are no matches. - -[[test]] -name = "nomatch10" -regex = ["a", "a"] -haystack = "b" -matches = [] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "nomatch20" -regex = ["^foo", "bar$"] -haystack = "bar foo" -matches = [] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "nomatch30" -regex = [] -haystack = "a" -matches = [] -match-kind = "all" -search-kind = "overlapping" - -[[test]] -name = "nomatch40" -regex = ["^rooted$", '\.log$'] -haystack = "notrooted" -matches = [] -match-kind = "all" -search-kind = "overlapping" - -# These test multi-regex searches with capture groups. -# -# NOTE: I wrote these tests in the course of developing a first class API for -# overlapping capturing group matches, but ultimately removed that API because -# the semantics for overlapping matches aren't totally clear. However, I've -# left the tests because I believe the semantics for these patterns are clear -# and because we can still test our "which patterns matched" APIs with them. - -[[test]] -name = "caps-010" -regex = ['^(\w+) (\w+)$', '^(\S+) (\S+)$'] -haystack = "Bruce Springsteen" -matches = [ - { id = 0, spans = [[0, 17], [0, 5], [6, 17]] }, - { id = 1, spans = [[0, 17], [0, 5], [6, 17]] }, -] -match-kind = "all" -search-kind = "overlapping" -unicode = false -utf8 = false - -[[test]] -name = "caps-020" -regex = ['^(\w+) (\w+)$', '^[A-Z](\S+) [A-Z](\S+)$'] -haystack = "Bruce Springsteen" -matches = [ - { id = 0, spans = [[0, 17], [0, 5], [6, 17]] }, - { id = 1, spans = [[0, 17], [1, 5], [7, 17]] }, -] -match-kind = "all" -search-kind = "overlapping" -unicode = false -utf8 = false - -[[test]] -name = "caps-030" -regex = ['^(\w+) (\w+)$', '^([A-Z])(\S+) ([A-Z])(\S+)$'] -haystack = "Bruce Springsteen" -matches = [ - { id = 0, spans = [[0, 17], [0, 5], [6, 17]] }, - { id = 1, spans = [[0, 17], [0, 1], [1, 5], [6, 7], [7, 17]] }, -] -match-kind = "all" -search-kind = "overlapping" -unicode = false -utf8 = false - -[[test]] -name = "caps-110" -regex = ['(\w+) (\w+)', '(\S+) (\S+)'] -haystack = "Bruce Springsteen" -matches = [ - { id = 0, spans = [[0, 17], [0, 5], [6, 17]] }, -] -match-kind = "leftmost-first" -search-kind = "leftmost" -unicode = false -utf8 = false - -[[test]] -name = "caps-120" -regex = ['(\w+) (\w+)', '(\S+) (\S+)'] -haystack = "&ruce $pringsteen" -matches = [ - { id = 1, spans = [[0, 17], [0, 5], [6, 17]] }, -] -match-kind = "leftmost-first" -search-kind = "leftmost" -unicode = false -utf8 = false - -[[test]] -name = "caps-121" -regex = ['(\w+) (\w+)', '(\S+) (\S+)'] -haystack = "&ruce $pringsteen Foo Bar" -matches = [ - { id = 1, spans = [[0, 17], [0, 5], [6, 17]] }, - { id = 0, spans = [[18, 25], [18, 21], [22, 25]] }, -] -match-kind = "leftmost-first" -search-kind = "leftmost" -unicode = false -utf8 = false diff --git a/vendor/regex/testdata/substring.toml b/vendor/regex/testdata/substring.toml deleted file mode 100644 index 69595ce851de32..00000000000000 --- a/vendor/regex/testdata/substring.toml +++ /dev/null @@ -1,36 +0,0 @@ -# These tests check that regex engines perform as expected when the search is -# instructed to only search a substring of a haystack instead of the entire -# haystack. This tends to exercise interesting edge cases that are otherwise -# difficult to provoke. (But not necessarily impossible. Regex search iterators -# for example, make use of the "search just a substring" APIs by changing the -# starting position of a search to the end position of the previous match.) - -[[test]] -name = "unicode-word-start" -regex = '\b[0-9]+\b' -haystack = "β123" -bounds = { start = 2, end = 5 } -matches = [] - -[[test]] -name = "unicode-word-end" -regex = '\b[0-9]+\b' -haystack = "123β" -bounds = { start = 0, end = 3 } -matches = [] - -[[test]] -name = "ascii-word-start" -regex = '\b[0-9]+\b' -haystack = "β123" -bounds = { start = 2, end = 5 } -matches = [[2, 5]] -unicode = false - -[[test]] -name = "ascii-word-end" -regex = '\b[0-9]+\b' -haystack = "123β" -bounds = { start = 0, end = 3 } -matches = [[0, 3]] -unicode = false diff --git a/vendor/regex/testdata/unicode.toml b/vendor/regex/testdata/unicode.toml deleted file mode 100644 index f4ac76bae65c12..00000000000000 --- a/vendor/regex/testdata/unicode.toml +++ /dev/null @@ -1,517 +0,0 @@ -# Basic Unicode literal support. -[[test]] -name = "literal1" -regex = '☃' -haystack = "☃" -matches = [[0, 3]] - -[[test]] -name = "literal2" -regex = '☃+' -haystack = "☃" -matches = [[0, 3]] - -[[test]] -name = "literal3" -regex = '☃+' -haystack = "☃" -matches = [[0, 3]] -case-insensitive = true - -[[test]] -name = "literal4" -regex = 'Δ' -haystack = "δ" -matches = [[0, 2]] -case-insensitive = true - -# Unicode word boundaries. -[[test]] -name = "wb-100" -regex = '\d\b' -haystack = "6δ" -matches = [] - -[[test]] -name = "wb-200" -regex = '\d\b' -haystack = "6 " -matches = [[0, 1]] - -[[test]] -name = "wb-300" -regex = '\d\B' -haystack = "6δ" -matches = [[0, 1]] - -[[test]] -name = "wb-400" -regex = '\d\B' -haystack = "6 " -matches = [] - -# Unicode character class support. -[[test]] -name = "class1" -regex = '[☃Ⅰ]+' -haystack = "☃" -matches = [[0, 3]] - -[[test]] -name = "class2" -regex = '\pN' -haystack = "Ⅰ" -matches = [[0, 3]] - -[[test]] -name = "class3" -regex = '\pN+' -haystack = "Ⅰ1Ⅱ2" -matches = [[0, 8]] - -[[test]] -name = "class4" -regex = '\PN+' -haystack = "abⅠ" -matches = [[0, 2]] - -[[test]] -name = "class5" -regex = '[\PN]+' -haystack = "abⅠ" -matches = [[0, 2]] - -[[test]] -name = "class6" -regex = '[^\PN]+' -haystack = "abⅠ" -matches = [[2, 5]] - -[[test]] -name = "class7" -regex = '\p{Lu}+' -haystack = "ΛΘΓΔα" -matches = [[0, 8]] - -[[test]] -name = "class8" -regex = '\p{Lu}+' -haystack = "ΛΘΓΔα" -matches = [[0, 10]] -case-insensitive = true - -[[test]] -name = "class9" -regex = '\pL+' -haystack = "ΛΘΓΔα" -matches = [[0, 10]] - -[[test]] -name = "class10" -regex = '\p{Ll}+' -haystack = "ΛΘΓΔα" -matches = [[8, 10]] - -# Unicode aware "Perl" character classes. -[[test]] -name = "perl1" -regex = '\w+' -haystack = "dδd" -matches = [[0, 4]] - -[[test]] -name = "perl2" -regex = '\w+' -haystack = "⥡" -matches = [] - -[[test]] -name = "perl3" -regex = '\W+' -haystack = "⥡" -matches = [[0, 3]] - -[[test]] -name = "perl4" -regex = '\d+' -haystack = "1२३9" -matches = [[0, 8]] - -[[test]] -name = "perl5" -regex = '\d+' -haystack = "Ⅱ" -matches = [] - -[[test]] -name = "perl6" -regex = '\D+' -haystack = "Ⅱ" -matches = [[0, 3]] - -[[test]] -name = "perl7" -regex = '\s+' -haystack = " " -matches = [[0, 3]] - -[[test]] -name = "perl8" -regex = '\s+' -haystack = "☃" -matches = [] - -[[test]] -name = "perl9" -regex = '\S+' -haystack = "☃" -matches = [[0, 3]] - -# Specific tests for Unicode general category classes. -[[test]] -name = "class-gencat1" -regex = '\p{Cased_Letter}' -haystack = "A" -matches = [[0, 3]] - -[[test]] -name = "class-gencat2" -regex = '\p{Close_Punctuation}' -haystack = "❯" -matches = [[0, 3]] - -[[test]] -name = "class-gencat3" -regex = '\p{Connector_Punctuation}' -haystack = "⁀" -matches = [[0, 3]] - -[[test]] -name = "class-gencat4" -regex = '\p{Control}' -haystack = "\u009F" -matches = [[0, 2]] - -[[test]] -name = "class-gencat5" -regex = '\p{Currency_Symbol}' -haystack = "£" -matches = [[0, 3]] - -[[test]] -name = "class-gencat6" -regex = '\p{Dash_Punctuation}' -haystack = "〰" -matches = [[0, 3]] - -[[test]] -name = "class-gencat7" -regex = '\p{Decimal_Number}' -haystack = "𑓙" -matches = [[0, 4]] - -[[test]] -name = "class-gencat8" -regex = '\p{Enclosing_Mark}' -haystack = "\uA672" -matches = [[0, 3]] - -[[test]] -name = "class-gencat9" -regex = '\p{Final_Punctuation}' -haystack = "⸡" -matches = [[0, 3]] - -[[test]] -name = "class-gencat10" -regex = '\p{Format}' -haystack = "\U000E007F" -matches = [[0, 4]] - -[[test]] -name = "class-gencat11" -regex = '\p{Initial_Punctuation}' -haystack = "⸜" -matches = [[0, 3]] - -[[test]] -name = "class-gencat12" -regex = '\p{Letter}' -haystack = "Έ" -matches = [[0, 2]] - -[[test]] -name = "class-gencat13" -regex = '\p{Letter_Number}' -haystack = "ↂ" -matches = [[0, 3]] - -[[test]] -name = "class-gencat14" -regex = '\p{Line_Separator}' -haystack = "\u2028" -matches = [[0, 3]] - -[[test]] -name = "class-gencat15" -regex = '\p{Lowercase_Letter}' -haystack = "ϛ" -matches = [[0, 2]] - -[[test]] -name = "class-gencat16" -regex = '\p{Mark}' -haystack = "\U000E01EF" -matches = [[0, 4]] - -[[test]] -name = "class-gencat17" -regex = '\p{Math}' -haystack = "⋿" -matches = [[0, 3]] - -[[test]] -name = "class-gencat18" -regex = '\p{Modifier_Letter}' -haystack = "𖭃" -matches = [[0, 4]] - -[[test]] -name = "class-gencat19" -regex = '\p{Modifier_Symbol}' -haystack = "🏿" -matches = [[0, 4]] - -[[test]] -name = "class-gencat20" -regex = '\p{Nonspacing_Mark}' -haystack = "\U0001E94A" -matches = [[0, 4]] - -[[test]] -name = "class-gencat21" -regex = '\p{Number}' -haystack = "⓿" -matches = [[0, 3]] - -[[test]] -name = "class-gencat22" -regex = '\p{Open_Punctuation}' -haystack = "⦅" -matches = [[0, 3]] - -[[test]] -name = "class-gencat23" -regex = '\p{Other}' -haystack = "\u0BC9" -matches = [[0, 3]] - -[[test]] -name = "class-gencat24" -regex = '\p{Other_Letter}' -haystack = "ꓷ" -matches = [[0, 3]] - -[[test]] -name = "class-gencat25" -regex = '\p{Other_Number}' -haystack = "㉏" -matches = [[0, 3]] - -[[test]] -name = "class-gencat26" -regex = '\p{Other_Punctuation}' -haystack = "𞥞" -matches = [[0, 4]] - -[[test]] -name = "class-gencat27" -regex = '\p{Other_Symbol}' -haystack = "⅌" -matches = [[0, 3]] - -[[test]] -name = "class-gencat28" -regex = '\p{Paragraph_Separator}' -haystack = "\u2029" -matches = [[0, 3]] - -[[test]] -name = "class-gencat29" -regex = '\p{Private_Use}' -haystack = "\U0010FFFD" -matches = [[0, 4]] - -[[test]] -name = "class-gencat30" -regex = '\p{Punctuation}' -haystack = "𑁍" -matches = [[0, 4]] - -[[test]] -name = "class-gencat31" -regex = '\p{Separator}' -haystack = "\u3000" -matches = [[0, 3]] - -[[test]] -name = "class-gencat32" -regex = '\p{Space_Separator}' -haystack = "\u205F" -matches = [[0, 3]] - -[[test]] -name = "class-gencat33" -regex = '\p{Spacing_Mark}' -haystack = "\U00016F7E" -matches = [[0, 4]] - -[[test]] -name = "class-gencat34" -regex = '\p{Symbol}' -haystack = "⯈" -matches = [[0, 3]] - -[[test]] -name = "class-gencat35" -regex = '\p{Titlecase_Letter}' -haystack = "ῼ" -matches = [[0, 3]] - -[[test]] -name = "class-gencat36" -regex = '\p{Unassigned}' -haystack = "\U0010FFFF" -matches = [[0, 4]] - -[[test]] -name = "class-gencat37" -regex = '\p{Uppercase_Letter}' -haystack = "Ꝋ" -matches = [[0, 3]] - - -# Tests for Unicode emoji properties. -[[test]] -name = "class-emoji1" -regex = '\p{Emoji}' -haystack = "\u23E9" -matches = [[0, 3]] - -[[test]] -name = "class-emoji2" -regex = '\p{emoji}' -haystack = "\U0001F21A" -matches = [[0, 4]] - -[[test]] -name = "class-emoji3" -regex = '\p{extendedpictographic}' -haystack = "\U0001FA6E" -matches = [[0, 4]] - -[[test]] -name = "class-emoji4" -regex = '\p{extendedpictographic}' -haystack = "\U0001FFFD" -matches = [[0, 4]] - - -# Tests for Unicode grapheme cluster properties. -[[test]] -name = "class-gcb1" -regex = '\p{grapheme_cluster_break=prepend}' -haystack = "\U00011D46" -matches = [[0, 4]] - -[[test]] -name = "class-gcb2" -regex = '\p{gcb=regional_indicator}' -haystack = "\U0001F1E6" -matches = [[0, 4]] - -[[test]] -name = "class-gcb3" -regex = '\p{gcb=ri}' -haystack = "\U0001F1E7" -matches = [[0, 4]] - -[[test]] -name = "class-gcb4" -regex = '\p{regionalindicator}' -haystack = "\U0001F1FF" -matches = [[0, 4]] - -[[test]] -name = "class-gcb5" -regex = '\p{gcb=lvt}' -haystack = "\uC989" -matches = [[0, 3]] - -[[test]] -name = "class-gcb6" -regex = '\p{gcb=zwj}' -haystack = "\u200D" -matches = [[0, 3]] - -# Tests for Unicode word boundary properties. -[[test]] -name = "class-word-break1" -regex = '\p{word_break=Hebrew_Letter}' -haystack = "\uFB46" -matches = [[0, 3]] - -[[test]] -name = "class-word-break2" -regex = '\p{wb=hebrewletter}' -haystack = "\uFB46" -matches = [[0, 3]] - -[[test]] -name = "class-word-break3" -regex = '\p{wb=ExtendNumLet}' -haystack = "\uFF3F" -matches = [[0, 3]] - -[[test]] -name = "class-word-break4" -regex = '\p{wb=WSegSpace}' -haystack = "\u3000" -matches = [[0, 3]] - -[[test]] -name = "class-word-break5" -regex = '\p{wb=numeric}' -haystack = "\U0001E950" -matches = [[0, 4]] - -# Tests for Unicode sentence boundary properties. -[[test]] -name = "class-sentence-break1" -regex = '\p{sentence_break=Lower}' -haystack = "\u0469" -matches = [[0, 2]] - -[[test]] -name = "class-sentence-break2" -regex = '\p{sb=lower}' -haystack = "\u0469" -matches = [[0, 2]] - -[[test]] -name = "class-sentence-break3" -regex = '\p{sb=Close}' -haystack = "\uFF60" -matches = [[0, 3]] - -[[test]] -name = "class-sentence-break4" -regex = '\p{sb=Close}' -haystack = "\U0001F677" -matches = [[0, 4]] - -[[test]] -name = "class-sentence-break5" -regex = '\p{sb=SContinue}' -haystack = "\uFF64" -matches = [[0, 3]] diff --git a/vendor/regex/testdata/utf8.toml b/vendor/regex/testdata/utf8.toml deleted file mode 100644 index 39e284b3828039..00000000000000 --- a/vendor/regex/testdata/utf8.toml +++ /dev/null @@ -1,399 +0,0 @@ -# These test the UTF-8 modes expose by regex-automata. Namely, when utf8 is -# true, then we promise that the haystack is valid UTF-8. (Otherwise behavior -# is unspecified.) This also corresponds to building the regex engine with the -# following two guarantees: -# -# 1) For any non-empty match reported, its span is guaranteed to correspond to -# valid UTF-8. -# 2) All empty or zero-width matches reported must never split a UTF-8 -# encoded codepoint. If the haystack has invalid UTF-8, then this results in -# unspecified behavior. -# -# The (2) is in particular what we focus our testing on since (1) is generally -# guaranteed by regex-syntax's AST-to-HIR translator and is well tested there. -# The thing with (2) is that it can't be described in the HIR, so the regex -# engines have to handle that case. Thus, we test it here. -# -# Note that it is possible to build a regex that has property (1) but not -# (2), and vice versa. This is done by building the HIR with 'utf8=true' but -# building the Thompson NFA with 'utf8=false'. We don't test that here because -# the harness doesn't expose a way to enable or disable UTF-8 mode with that -# granularity. Instead, those combinations are lightly tested via doc examples. -# That's not to say that (1) without (2) is uncommon. Indeed, ripgrep uses it -# because it cannot guarantee that its haystack is valid UTF-8. - -# This tests that an empty regex doesn't split a codepoint. -[[test]] -name = "empty-utf8yes" -regex = '' -haystack = '☃' -matches = [[0, 0], [3, 3]] -unicode = true -utf8 = true - -# Tests the overlapping case of the above. -[[test]] -name = "empty-utf8yes-overlapping" -regex = '' -haystack = '☃' -matches = [[0, 0], [3, 3]] -unicode = true -utf8 = true -match-kind = "all" -search-kind = "overlapping" - -# This tests that an empty regex DOES split a codepoint when utf=false. -[[test]] -name = "empty-utf8no" -regex = '' -haystack = '☃' -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] -unicode = true -utf8 = false - -# Tests the overlapping case of the above. -[[test]] -name = "empty-utf8no-overlapping" -regex = '' -haystack = '☃' -matches = [[0, 0], [1, 1], [2, 2], [3, 3]] -unicode = true -utf8 = false -match-kind = "all" -search-kind = "overlapping" - -# This tests that an empty regex doesn't split a codepoint, even if we give -# it bounds entirely within the codepoint. -# -# This is one of the trickier cases and is what motivated the current UTF-8 -# mode design. In particular, at one point, this test failed the 'is_match' -# variant of the test but not 'find'. This is because the 'is_match' code path -# is specifically optimized for "was a match found" rather than "where is the -# match." In the former case, you don't really care about the empty-vs-non-empty -# matches, and thus, the codepoint splitting filtering logic wasn't getting -# applied. (In multiple ways across multiple regex engines.) In this way, you -# can wind up with a situation where 'is_match' says "yes," but 'find' says, -# "I didn't find anything." Which is... not great. -# -# I could have decided to say that providing boundaries that themselves split -# a codepoint would have unspecified behavior. But I couldn't quite convince -# myself that such boundaries were the only way to get an inconsistency between -# 'is_match' and 'find'. -# -# Note that I also tried to come up with a test like this that fails without -# using `bounds`. Specifically, a test where 'is_match' and 'find' disagree. -# But I couldn't do it, and I'm tempted to conclude it is impossible. The -# fundamental problem is that you need to simultaneously produce an empty match -# that splits a codepoint while *not* matching before or after the codepoint. -[[test]] -name = "empty-utf8yes-bounds" -regex = '' -haystack = '𝛃' -bounds = [1, 3] -matches = [] -unicode = true -utf8 = true - -# Tests the overlapping case of the above. -[[test]] -name = "empty-utf8yes-bounds-overlapping" -regex = '' -haystack = '𝛃' -bounds = [1, 3] -matches = [] -unicode = true -utf8 = true -match-kind = "all" -search-kind = "overlapping" - -# This tests that an empty regex splits a codepoint when the bounds are -# entirely within the codepoint. -[[test]] -name = "empty-utf8no-bounds" -regex = '' -haystack = '𝛃' -bounds = [1, 3] -matches = [[1, 1], [2, 2], [3, 3]] -unicode = true -utf8 = false - -# Tests the overlapping case of the above. -[[test]] -name = "empty-utf8no-bounds-overlapping" -regex = '' -haystack = '𝛃' -bounds = [1, 3] -matches = [[1, 1], [2, 2], [3, 3]] -unicode = true -utf8 = false -match-kind = "all" -search-kind = "overlapping" - -# In this test, we anchor the search. Since the start position is also a UTF-8 -# boundary, we get a match. -[[test]] -name = "empty-utf8yes-anchored" -regex = '' -haystack = '𝛃' -matches = [[0, 0]] -anchored = true -unicode = true -utf8 = true - -# Tests the overlapping case of the above. -[[test]] -name = "empty-utf8yes-anchored-overlapping" -regex = '' -haystack = '𝛃' -matches = [[0, 0]] -anchored = true -unicode = true -utf8 = true -match-kind = "all" -search-kind = "overlapping" - -# Same as above, except with UTF-8 mode disabled. It almost doesn't change the -# result, except for the fact that since this is an anchored search and we -# always find all matches, the test harness will keep reporting matches until -# none are found. Because it's anchored, matches will be reported so long as -# they are directly adjacent. Since with UTF-8 mode the next anchored search -# after the match at [0, 0] fails, iteration stops (and doesn't find the last -# match at [4, 4]). -[[test]] -name = "empty-utf8no-anchored" -regex = '' -haystack = '𝛃' -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] -anchored = true -unicode = true -utf8 = false - -# Tests the overlapping case of the above. -# -# Note that overlapping anchored searches are a little weird, and it's not -# totally clear what their semantics ought to be. For now, we just test the -# current behavior of our test shim that implements overlapping search. (This -# is one of the reasons why we don't really expose regex-level overlapping -# searches.) -[[test]] -name = "empty-utf8no-anchored-overlapping" -regex = '' -haystack = '𝛃' -matches = [[0, 0]] -anchored = true -unicode = true -utf8 = false -match-kind = "all" -search-kind = "overlapping" - -# In this test, we anchor the search, but also set bounds. The bounds start the -# search in the middle of a codepoint, so there should never be a match. -[[test]] -name = "empty-utf8yes-anchored-bounds" -regex = '' -haystack = '𝛃' -matches = [] -bounds = [1, 3] -anchored = true -unicode = true -utf8 = true - -# Tests the overlapping case of the above. -[[test]] -name = "empty-utf8yes-anchored-bounds-overlapping" -regex = '' -haystack = '𝛃' -matches = [] -bounds = [1, 3] -anchored = true -unicode = true -utf8 = true -match-kind = "all" -search-kind = "overlapping" - -# Same as above, except with UTF-8 mode disabled. Without UTF-8 mode enabled, -# matching within a codepoint is allowed. And remember, as in the anchored test -# above with UTF-8 mode disabled, iteration will report all adjacent matches. -# The matches at [0, 0] and [4, 4] are not included because of the bounds of -# the search. -[[test]] -name = "empty-utf8no-anchored-bounds" -regex = '' -haystack = '𝛃' -bounds = [1, 3] -matches = [[1, 1], [2, 2], [3, 3]] -anchored = true -unicode = true -utf8 = false - -# Tests the overlapping case of the above. -# -# Note that overlapping anchored searches are a little weird, and it's not -# totally clear what their semantics ought to be. For now, we just test the -# current behavior of our test shim that implements overlapping search. (This -# is one of the reasons why we don't really expose regex-level overlapping -# searches.) -[[test]] -name = "empty-utf8no-anchored-bounds-overlapping" -regex = '' -haystack = '𝛃' -bounds = [1, 3] -matches = [[1, 1]] -anchored = true -unicode = true -utf8 = false -match-kind = "all" -search-kind = "overlapping" - -# This tests that we find the match at the end of the string when the bounds -# exclude the first match. -[[test]] -name = "empty-utf8yes-startbound" -regex = '' -haystack = '𝛃' -bounds = [1, 4] -matches = [[4, 4]] -unicode = true -utf8 = true - -# Tests the overlapping case of the above. -[[test]] -name = "empty-utf8yes-startbound-overlapping" -regex = '' -haystack = '𝛃' -bounds = [1, 4] -matches = [[4, 4]] -unicode = true -utf8 = true -match-kind = "all" -search-kind = "overlapping" - -# Same as above, except since UTF-8 mode is disabled, we also find the matches -# inbetween that split the codepoint. -[[test]] -name = "empty-utf8no-startbound" -regex = '' -haystack = '𝛃' -bounds = [1, 4] -matches = [[1, 1], [2, 2], [3, 3], [4, 4]] -unicode = true -utf8 = false - -# Tests the overlapping case of the above. -[[test]] -name = "empty-utf8no-startbound-overlapping" -regex = '' -haystack = '𝛃' -bounds = [1, 4] -matches = [[1, 1], [2, 2], [3, 3], [4, 4]] -unicode = true -utf8 = false -match-kind = "all" -search-kind = "overlapping" - -# This tests that we don't find any matches in an anchored search, even when -# the bounds include a match (at the end). -[[test]] -name = "empty-utf8yes-anchored-startbound" -regex = '' -haystack = '𝛃' -bounds = [1, 4] -matches = [] -anchored = true -unicode = true -utf8 = true - -# Tests the overlapping case of the above. -[[test]] -name = "empty-utf8yes-anchored-startbound-overlapping" -regex = '' -haystack = '𝛃' -bounds = [1, 4] -matches = [] -anchored = true -unicode = true -utf8 = true -match-kind = "all" -search-kind = "overlapping" - -# Same as above, except since UTF-8 mode is disabled, we also find the matches -# inbetween that split the codepoint. Even though this is an anchored search, -# since the matches are adjacent, we find all of them. -[[test]] -name = "empty-utf8no-anchored-startbound" -regex = '' -haystack = '𝛃' -bounds = [1, 4] -matches = [[1, 1], [2, 2], [3, 3], [4, 4]] -anchored = true -unicode = true -utf8 = false - -# Tests the overlapping case of the above. -# -# Note that overlapping anchored searches are a little weird, and it's not -# totally clear what their semantics ought to be. For now, we just test the -# current behavior of our test shim that implements overlapping search. (This -# is one of the reasons why we don't really expose regex-level overlapping -# searches.) -[[test]] -name = "empty-utf8no-anchored-startbound-overlapping" -regex = '' -haystack = '𝛃' -bounds = [1, 4] -matches = [[1, 1]] -anchored = true -unicode = true -utf8 = false -match-kind = "all" -search-kind = "overlapping" - -# This tests that we find the match at the end of the haystack in UTF-8 mode -# when our bounds only include the empty string at the end of the haystack. -[[test]] -name = "empty-utf8yes-anchored-endbound" -regex = '' -haystack = '𝛃' -bounds = [4, 4] -matches = [[4, 4]] -anchored = true -unicode = true -utf8 = true - -# Tests the overlapping case of the above. -[[test]] -name = "empty-utf8yes-anchored-endbound-overlapping" -regex = '' -haystack = '𝛃' -bounds = [4, 4] -matches = [[4, 4]] -anchored = true -unicode = true -utf8 = true -match-kind = "all" -search-kind = "overlapping" - -# Same as above, but with UTF-8 mode disabled. Results remain the same since -# the only possible match does not split a codepoint. -[[test]] -name = "empty-utf8no-anchored-endbound" -regex = '' -haystack = '𝛃' -bounds = [4, 4] -matches = [[4, 4]] -anchored = true -unicode = true -utf8 = false - -# Tests the overlapping case of the above. -[[test]] -name = "empty-utf8no-anchored-endbound-overlapping" -regex = '' -haystack = '𝛃' -bounds = [4, 4] -matches = [[4, 4]] -anchored = true -unicode = true -utf8 = false -match-kind = "all" -search-kind = "overlapping" diff --git a/vendor/regex/testdata/word-boundary-special.toml b/vendor/regex/testdata/word-boundary-special.toml deleted file mode 100644 index 2b5a2a0acf9378..00000000000000 --- a/vendor/regex/testdata/word-boundary-special.toml +++ /dev/null @@ -1,687 +0,0 @@ -# These tests are for the "special" word boundary assertions. That is, -# \b{start}, \b{end}, \b{start-half}, \b{end-half}. These are specialty -# assertions for more niche use cases, but hitting those cases without these -# assertions is difficult. For example, \b{start-half} and \b{end-half} are -# used to implement the -w/--word-regexp flag in a grep program. - -# Tests for (?-u:\b{start}) - -[[test]] -name = "word-start-ascii-010" -regex = '\b{start}' -haystack = "a" -matches = [[0, 0]] -unicode = false - -[[test]] -name = "word-start-ascii-020" -regex = '\b{start}' -haystack = "a " -matches = [[0, 0]] -unicode = false - -[[test]] -name = "word-start-ascii-030" -regex = '\b{start}' -haystack = " a " -matches = [[1, 1]] -unicode = false - -[[test]] -name = "word-start-ascii-040" -regex = '\b{start}' -haystack = "" -matches = [] -unicode = false - -[[test]] -name = "word-start-ascii-050" -regex = '\b{start}' -haystack = "ab" -matches = [[0, 0]] -unicode = false - -[[test]] -name = "word-start-ascii-060" -regex = '\b{start}' -haystack = "𝛃" -matches = [] -unicode = false - -[[test]] -name = "word-start-ascii-060-bounds" -regex = '\b{start}' -haystack = "𝛃" -bounds = [2, 3] -matches = [] -unicode = false - -[[test]] -name = "word-start-ascii-070" -regex = '\b{start}' -haystack = " 𝛃 " -matches = [] -unicode = false - -[[test]] -name = "word-start-ascii-080" -regex = '\b{start}' -haystack = "𝛃𐆀" -matches = [] -unicode = false - -[[test]] -name = "word-start-ascii-090" -regex = '\b{start}' -haystack = "𝛃b" -matches = [[4, 4]] -unicode = false - -[[test]] -name = "word-start-ascii-110" -regex = '\b{start}' -haystack = "b𝛃" -matches = [[0, 0]] -unicode = false - -# Tests for (?-u:\b{end}) - -[[test]] -name = "word-end-ascii-010" -regex = '\b{end}' -haystack = "a" -matches = [[1, 1]] -unicode = false - -[[test]] -name = "word-end-ascii-020" -regex = '\b{end}' -haystack = "a " -matches = [[1, 1]] -unicode = false - -[[test]] -name = "word-end-ascii-030" -regex = '\b{end}' -haystack = " a " -matches = [[2, 2]] -unicode = false - -[[test]] -name = "word-end-ascii-040" -regex = '\b{end}' -haystack = "" -matches = [] -unicode = false - -[[test]] -name = "word-end-ascii-050" -regex = '\b{end}' -haystack = "ab" -matches = [[2, 2]] -unicode = false - -[[test]] -name = "word-end-ascii-060" -regex = '\b{end}' -haystack = "𝛃" -matches = [] -unicode = false - -[[test]] -name = "word-end-ascii-060-bounds" -regex = '\b{end}' -haystack = "𝛃" -bounds = [2, 3] -matches = [] -unicode = false - -[[test]] -name = "word-end-ascii-070" -regex = '\b{end}' -haystack = " 𝛃 " -matches = [] -unicode = false - -[[test]] -name = "word-end-ascii-080" -regex = '\b{end}' -haystack = "𝛃𐆀" -matches = [] -unicode = false - -[[test]] -name = "word-end-ascii-090" -regex = '\b{end}' -haystack = "𝛃b" -matches = [[5, 5]] -unicode = false - -[[test]] -name = "word-end-ascii-110" -regex = '\b{end}' -haystack = "b𝛃" -matches = [[1, 1]] -unicode = false - -# Tests for \b{start} - -[[test]] -name = "word-start-unicode-010" -regex = '\b{start}' -haystack = "a" -matches = [[0, 0]] -unicode = true - -[[test]] -name = "word-start-unicode-020" -regex = '\b{start}' -haystack = "a " -matches = [[0, 0]] -unicode = true - -[[test]] -name = "word-start-unicode-030" -regex = '\b{start}' -haystack = " a " -matches = [[1, 1]] -unicode = true - -[[test]] -name = "word-start-unicode-040" -regex = '\b{start}' -haystack = "" -matches = [] -unicode = true - -[[test]] -name = "word-start-unicode-050" -regex = '\b{start}' -haystack = "ab" -matches = [[0, 0]] -unicode = true - -[[test]] -name = "word-start-unicode-060" -regex = '\b{start}' -haystack = "𝛃" -matches = [[0, 0]] -unicode = true - -[[test]] -name = "word-start-unicode-060-bounds" -regex = '\b{start}' -haystack = "𝛃" -bounds = [2, 3] -matches = [] -unicode = true - -[[test]] -name = "word-start-unicode-070" -regex = '\b{start}' -haystack = " 𝛃 " -matches = [[1, 1]] -unicode = true - -[[test]] -name = "word-start-unicode-080" -regex = '\b{start}' -haystack = "𝛃𐆀" -matches = [[0, 0]] -unicode = true - -[[test]] -name = "word-start-unicode-090" -regex = '\b{start}' -haystack = "𝛃b" -matches = [[0, 0]] -unicode = true - -[[test]] -name = "word-start-unicode-110" -regex = '\b{start}' -haystack = "b𝛃" -matches = [[0, 0]] -unicode = true - -# Tests for \b{end} - -[[test]] -name = "word-end-unicode-010" -regex = '\b{end}' -haystack = "a" -matches = [[1, 1]] -unicode = true - -[[test]] -name = "word-end-unicode-020" -regex = '\b{end}' -haystack = "a " -matches = [[1, 1]] -unicode = true - -[[test]] -name = "word-end-unicode-030" -regex = '\b{end}' -haystack = " a " -matches = [[2, 2]] -unicode = true - -[[test]] -name = "word-end-unicode-040" -regex = '\b{end}' -haystack = "" -matches = [] -unicode = true - -[[test]] -name = "word-end-unicode-050" -regex = '\b{end}' -haystack = "ab" -matches = [[2, 2]] -unicode = true - -[[test]] -name = "word-end-unicode-060" -regex = '\b{end}' -haystack = "𝛃" -matches = [[4, 4]] -unicode = true - -[[test]] -name = "word-end-unicode-060-bounds" -regex = '\b{end}' -haystack = "𝛃" -bounds = [2, 3] -matches = [] -unicode = true - -[[test]] -name = "word-end-unicode-070" -regex = '\b{end}' -haystack = " 𝛃 " -matches = [[5, 5]] -unicode = true - -[[test]] -name = "word-end-unicode-080" -regex = '\b{end}' -haystack = "𝛃𐆀" -matches = [[4, 4]] -unicode = true - -[[test]] -name = "word-end-unicode-090" -regex = '\b{end}' -haystack = "𝛃b" -matches = [[5, 5]] -unicode = true - -[[test]] -name = "word-end-unicode-110" -regex = '\b{end}' -haystack = "b𝛃" -matches = [[5, 5]] -unicode = true - -# Tests for (?-u:\b{start-half}) - -[[test]] -name = "word-start-half-ascii-010" -regex = '\b{start-half}' -haystack = "a" -matches = [[0, 0]] -unicode = false - -[[test]] -name = "word-start-half-ascii-020" -regex = '\b{start-half}' -haystack = "a " -matches = [[0, 0], [2, 2]] -unicode = false - -[[test]] -name = "word-start-half-ascii-030" -regex = '\b{start-half}' -haystack = " a " -matches = [[0, 0], [1, 1], [3, 3]] -unicode = false - -[[test]] -name = "word-start-half-ascii-040" -regex = '\b{start-half}' -haystack = "" -matches = [[0, 0]] -unicode = false - -[[test]] -name = "word-start-half-ascii-050" -regex = '\b{start-half}' -haystack = "ab" -matches = [[0, 0]] -unicode = false - -[[test]] -name = "word-start-half-ascii-060" -regex = '\b{start-half}' -haystack = "𝛃" -matches = [[0, 0], [4, 4]] -unicode = false - -[[test]] -name = "word-start-half-ascii-060-noutf8" -regex = '\b{start-half}' -haystack = "𝛃" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]] -unicode = false -utf8 = false - -[[test]] -name = "word-start-half-ascii-060-bounds" -regex = '\b{start-half}' -haystack = "𝛃" -bounds = [2, 3] -matches = [] -unicode = false - -[[test]] -name = "word-start-half-ascii-070" -regex = '\b{start-half}' -haystack = " 𝛃 " -matches = [[0, 0], [1, 1], [5, 5], [6, 6]] -unicode = false - -[[test]] -name = "word-start-half-ascii-080" -regex = '\b{start-half}' -haystack = "𝛃𐆀" -matches = [[0, 0], [4, 4], [8, 8]] -unicode = false - -[[test]] -name = "word-start-half-ascii-090" -regex = '\b{start-half}' -haystack = "𝛃b" -matches = [[0, 0], [4, 4]] -unicode = false - -[[test]] -name = "word-start-half-ascii-110" -regex = '\b{start-half}' -haystack = "b𝛃" -matches = [[0, 0], [5, 5]] -unicode = false - -# Tests for (?-u:\b{end-half}) - -[[test]] -name = "word-end-half-ascii-010" -regex = '\b{end-half}' -haystack = "a" -matches = [[1, 1]] -unicode = false - -[[test]] -name = "word-end-half-ascii-020" -regex = '\b{end-half}' -haystack = "a " -matches = [[1, 1], [2, 2]] -unicode = false - -[[test]] -name = "word-end-half-ascii-030" -regex = '\b{end-half}' -haystack = " a " -matches = [[0, 0], [2, 2], [3, 3]] -unicode = false - -[[test]] -name = "word-end-half-ascii-040" -regex = '\b{end-half}' -haystack = "" -matches = [[0, 0]] -unicode = false - -[[test]] -name = "word-end-half-ascii-050" -regex = '\b{end-half}' -haystack = "ab" -matches = [[2, 2]] -unicode = false - -[[test]] -name = "word-end-half-ascii-060" -regex = '\b{end-half}' -haystack = "𝛃" -matches = [[0, 0], [4, 4]] -unicode = false - -[[test]] -name = "word-end-half-ascii-060-bounds" -regex = '\b{end-half}' -haystack = "𝛃" -bounds = [2, 3] -matches = [] -unicode = false - -[[test]] -name = "word-end-half-ascii-070" -regex = '\b{end-half}' -haystack = " 𝛃 " -matches = [[0, 0], [1, 1], [5, 5], [6, 6]] -unicode = false - -[[test]] -name = "word-end-half-ascii-080" -regex = '\b{end-half}' -haystack = "𝛃𐆀" -matches = [[0, 0], [4, 4], [8, 8]] -unicode = false - -[[test]] -name = "word-end-half-ascii-090" -regex = '\b{end-half}' -haystack = "𝛃b" -matches = [[0, 0], [5, 5]] -unicode = false - -[[test]] -name = "word-end-half-ascii-110" -regex = '\b{end-half}' -haystack = "b𝛃" -matches = [[1, 1], [5, 5]] -unicode = false - -# Tests for \b{start-half} - -[[test]] -name = "word-start-half-unicode-010" -regex = '\b{start-half}' -haystack = "a" -matches = [[0, 0]] -unicode = true - -[[test]] -name = "word-start-half-unicode-020" -regex = '\b{start-half}' -haystack = "a " -matches = [[0, 0], [2, 2]] -unicode = true - -[[test]] -name = "word-start-half-unicode-030" -regex = '\b{start-half}' -haystack = " a " -matches = [[0, 0], [1, 1], [3, 3]] -unicode = true - -[[test]] -name = "word-start-half-unicode-040" -regex = '\b{start-half}' -haystack = "" -matches = [[0, 0]] -unicode = true - -[[test]] -name = "word-start-half-unicode-050" -regex = '\b{start-half}' -haystack = "ab" -matches = [[0, 0]] -unicode = true - -[[test]] -name = "word-start-half-unicode-060" -regex = '\b{start-half}' -haystack = "𝛃" -matches = [[0, 0]] -unicode = true - -[[test]] -name = "word-start-half-unicode-060-bounds" -regex = '\b{start-half}' -haystack = "𝛃" -bounds = [2, 3] -matches = [] -unicode = true - -[[test]] -name = "word-start-half-unicode-070" -regex = '\b{start-half}' -haystack = " 𝛃 " -matches = [[0, 0], [1, 1], [6, 6]] -unicode = true - -[[test]] -name = "word-start-half-unicode-080" -regex = '\b{start-half}' -haystack = "𝛃𐆀" -matches = [[0, 0], [8, 8]] -unicode = true - -[[test]] -name = "word-start-half-unicode-090" -regex = '\b{start-half}' -haystack = "𝛃b" -matches = [[0, 0]] -unicode = true - -[[test]] -name = "word-start-half-unicode-110" -regex = '\b{start-half}' -haystack = "b𝛃" -matches = [[0, 0]] -unicode = true - -# Tests for \b{end-half} - -[[test]] -name = "word-end-half-unicode-010" -regex = '\b{end-half}' -haystack = "a" -matches = [[1, 1]] -unicode = true - -[[test]] -name = "word-end-half-unicode-020" -regex = '\b{end-half}' -haystack = "a " -matches = [[1, 1], [2, 2]] -unicode = true - -[[test]] -name = "word-end-half-unicode-030" -regex = '\b{end-half}' -haystack = " a " -matches = [[0, 0], [2, 2], [3, 3]] -unicode = true - -[[test]] -name = "word-end-half-unicode-040" -regex = '\b{end-half}' -haystack = "" -matches = [[0, 0]] -unicode = true - -[[test]] -name = "word-end-half-unicode-050" -regex = '\b{end-half}' -haystack = "ab" -matches = [[2, 2]] -unicode = true - -[[test]] -name = "word-end-half-unicode-060" -regex = '\b{end-half}' -haystack = "𝛃" -matches = [[4, 4]] -unicode = true - -[[test]] -name = "word-end-half-unicode-060-bounds" -regex = '\b{end-half}' -haystack = "𝛃" -bounds = [2, 3] -matches = [] -unicode = true - -[[test]] -name = "word-end-half-unicode-070" -regex = '\b{end-half}' -haystack = " 𝛃 " -matches = [[0, 0], [5, 5], [6, 6]] -unicode = true - -[[test]] -name = "word-end-half-unicode-080" -regex = '\b{end-half}' -haystack = "𝛃𐆀" -matches = [[4, 4], [8, 8]] -unicode = true - -[[test]] -name = "word-end-half-unicode-090" -regex = '\b{end-half}' -haystack = "𝛃b" -matches = [[5, 5]] -unicode = true - -[[test]] -name = "word-end-half-unicode-110" -regex = '\b{end-half}' -haystack = "b𝛃" -matches = [[5, 5]] -unicode = true - -# Specialty tests. - -# Since \r is special cased in the start state computation (to deal with CRLF -# mode), this test ensures that the correct start state is computed when the -# pattern starts with a half word boundary assertion. -[[test]] -name = "word-start-half-ascii-carriage" -regex = '\b{start-half}[a-z]+' -haystack = 'ABC\rabc' -matches = [[4, 7]] -bounds = [4, 7] -unescape = true - -# Since \n is also special cased in the start state computation, this test -# ensures that the correct start state is computed when the pattern starts with -# a half word boundary assertion. -[[test]] -name = "word-start-half-ascii-linefeed" -regex = '\b{start-half}[a-z]+' -haystack = 'ABC\nabc' -matches = [[4, 7]] -bounds = [4, 7] -unescape = true - -# Like the carriage return test above, but with a custom line terminator. -[[test]] -name = "word-start-half-ascii-customlineterm" -regex = '\b{start-half}[a-z]+' -haystack = 'ABC!abc' -matches = [[4, 7]] -bounds = [4, 7] -unescape = true -line-terminator = '!' diff --git a/vendor/regex/testdata/word-boundary.toml b/vendor/regex/testdata/word-boundary.toml deleted file mode 100644 index 1d86fc9bb3b235..00000000000000 --- a/vendor/regex/testdata/word-boundary.toml +++ /dev/null @@ -1,781 +0,0 @@ -# Some of these are cribbed from RE2's test suite. - -# These test \b. Below are tests for \B. -[[test]] -name = "wb1" -regex = '\b' -haystack = "" -matches = [] -unicode = false - -[[test]] -name = "wb2" -regex = '\b' -haystack = "a" -matches = [[0, 0], [1, 1]] -unicode = false - -[[test]] -name = "wb3" -regex = '\b' -haystack = "ab" -matches = [[0, 0], [2, 2]] -unicode = false - -[[test]] -name = "wb4" -regex = '^\b' -haystack = "ab" -matches = [[0, 0]] -unicode = false - -[[test]] -name = "wb5" -regex = '\b$' -haystack = "ab" -matches = [[2, 2]] -unicode = false - -[[test]] -name = "wb6" -regex = '^\b$' -haystack = "ab" -matches = [] -unicode = false - -[[test]] -name = "wb7" -regex = '\bbar\b' -haystack = "nobar bar foo bar" -matches = [[6, 9], [14, 17]] -unicode = false - -[[test]] -name = "wb8" -regex = 'a\b' -haystack = "faoa x" -matches = [[3, 4]] -unicode = false - -[[test]] -name = "wb9" -regex = '\bbar' -haystack = "bar x" -matches = [[0, 3]] -unicode = false - -[[test]] -name = "wb10" -regex = '\bbar' -haystack = "foo\nbar x" -matches = [[4, 7]] -unicode = false - -[[test]] -name = "wb11" -regex = 'bar\b' -haystack = "foobar" -matches = [[3, 6]] -unicode = false - -[[test]] -name = "wb12" -regex = 'bar\b' -haystack = "foobar\nxxx" -matches = [[3, 6]] -unicode = false - -[[test]] -name = "wb13" -regex = '(?:foo|bar|[A-Z])\b' -haystack = "foo" -matches = [[0, 3]] -unicode = false - -[[test]] -name = "wb14" -regex = '(?:foo|bar|[A-Z])\b' -haystack = "foo\n" -matches = [[0, 3]] -unicode = false - -[[test]] -name = "wb15" -regex = '\b(?:foo|bar|[A-Z])' -haystack = "foo" -matches = [[0, 3]] -unicode = false - -[[test]] -name = "wb16" -regex = '\b(?:foo|bar|[A-Z])\b' -haystack = "X" -matches = [[0, 1]] -unicode = false - -[[test]] -name = "wb17" -regex = '\b(?:foo|bar|[A-Z])\b' -haystack = "XY" -matches = [] -unicode = false - -[[test]] -name = "wb18" -regex = '\b(?:foo|bar|[A-Z])\b' -haystack = "bar" -matches = [[0, 3]] -unicode = false - -[[test]] -name = "wb19" -regex = '\b(?:foo|bar|[A-Z])\b' -haystack = "foo" -matches = [[0, 3]] -unicode = false - -[[test]] -name = "wb20" -regex = '\b(?:foo|bar|[A-Z])\b' -haystack = "foo\n" -matches = [[0, 3]] -unicode = false - -[[test]] -name = "wb21" -regex = '\b(?:foo|bar|[A-Z])\b' -haystack = "ffoo bbar N x" -matches = [[10, 11]] -unicode = false - -[[test]] -name = "wb22" -regex = '\b(?:fo|foo)\b' -haystack = "fo" -matches = [[0, 2]] -unicode = false - -[[test]] -name = "wb23" -regex = '\b(?:fo|foo)\b' -haystack = "foo" -matches = [[0, 3]] -unicode = false - -[[test]] -name = "wb24" -regex = '\b\b' -haystack = "" -matches = [] -unicode = false - -[[test]] -name = "wb25" -regex = '\b\b' -haystack = "a" -matches = [[0, 0], [1, 1]] -unicode = false - -[[test]] -name = "wb26" -regex = '\b$' -haystack = "" -matches = [] -unicode = false - -[[test]] -name = "wb27" -regex = '\b$' -haystack = "x" -matches = [[1, 1]] -unicode = false - -[[test]] -name = "wb28" -regex = '\b$' -haystack = "y x" -matches = [[3, 3]] -unicode = false - -[[test]] -name = "wb29" -regex = '(?-u:\b).$' -haystack = "x" -matches = [[0, 1]] - -[[test]] -name = "wb30" -regex = '^\b(?:fo|foo)\b' -haystack = "fo" -matches = [[0, 2]] -unicode = false - -[[test]] -name = "wb31" -regex = '^\b(?:fo|foo)\b' -haystack = "foo" -matches = [[0, 3]] -unicode = false - -[[test]] -name = "wb32" -regex = '^\b$' -haystack = "" -matches = [] -unicode = false - -[[test]] -name = "wb33" -regex = '^\b$' -haystack = "x" -matches = [] -unicode = false - -[[test]] -name = "wb34" -regex = '^(?-u:\b).$' -haystack = "x" -matches = [[0, 1]] - -[[test]] -name = "wb35" -regex = '^(?-u:\b).(?-u:\b)$' -haystack = "x" -matches = [[0, 1]] - -[[test]] -name = "wb36" -regex = '^^^^^\b$$$$$' -haystack = "" -matches = [] -unicode = false - -[[test]] -name = "wb37" -regex = '^^^^^(?-u:\b).$$$$$' -haystack = "x" -matches = [[0, 1]] - -[[test]] -name = "wb38" -regex = '^^^^^\b$$$$$' -haystack = "x" -matches = [] -unicode = false - -[[test]] -name = "wb39" -regex = '^^^^^(?-u:\b\b\b).(?-u:\b\b\b)$$$$$' -haystack = "x" -matches = [[0, 1]] - -[[test]] -name = "wb40" -regex = '(?-u:\b).+(?-u:\b)' -haystack = "$$abc$$" -matches = [[2, 5]] - -[[test]] -name = "wb41" -regex = '\b' -haystack = "a b c" -matches = [[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]] -unicode = false - -[[test]] -name = "wb42" -regex = '\bfoo\b' -haystack = "zzz foo zzz" -matches = [[4, 7]] -unicode = false - -[[test]] -name = "wb43" -regex = '\b^' -haystack = "ab" -matches = [[0, 0]] -unicode = false - -[[test]] -name = "wb44" -regex = '$\b' -haystack = "ab" -matches = [[2, 2]] -unicode = false - - -# Tests for \B. Note that \B is not allowed if UTF-8 mode is enabled, so we -# have to disable it for most of these tests. This is because \B can match at -# non-UTF-8 boundaries. -[[test]] -name = "nb1" -regex = '\Bfoo\B' -haystack = "n foo xfoox that" -matches = [[7, 10]] -unicode = false -utf8 = false - -[[test]] -name = "nb2" -regex = 'a\B' -haystack = "faoa x" -matches = [[1, 2]] -unicode = false -utf8 = false - -[[test]] -name = "nb3" -regex = '\Bbar' -haystack = "bar x" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb4" -regex = '\Bbar' -haystack = "foo\nbar x" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb5" -regex = 'bar\B' -haystack = "foobar" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb6" -regex = 'bar\B' -haystack = "foobar\nxxx" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb7" -regex = '(?:foo|bar|[A-Z])\B' -haystack = "foox" -matches = [[0, 3]] -unicode = false -utf8 = false - -[[test]] -name = "nb8" -regex = '(?:foo|bar|[A-Z])\B' -haystack = "foo\n" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb9" -regex = '\B' -haystack = "" -matches = [[0, 0]] -unicode = false -utf8 = false - -[[test]] -name = "nb10" -regex = '\B' -haystack = "x" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb11" -regex = '\B(?:foo|bar|[A-Z])' -haystack = "foo" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb12" -regex = '\B(?:foo|bar|[A-Z])\B' -haystack = "xXy" -matches = [[1, 2]] -unicode = false -utf8 = false - -[[test]] -name = "nb13" -regex = '\B(?:foo|bar|[A-Z])\B' -haystack = "XY" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb14" -regex = '\B(?:foo|bar|[A-Z])\B' -haystack = "XYZ" -matches = [[1, 2]] -unicode = false -utf8 = false - -[[test]] -name = "nb15" -regex = '\B(?:foo|bar|[A-Z])\B' -haystack = "abara" -matches = [[1, 4]] -unicode = false -utf8 = false - -[[test]] -name = "nb16" -regex = '\B(?:foo|bar|[A-Z])\B' -haystack = "xfoo_" -matches = [[1, 4]] -unicode = false -utf8 = false - -[[test]] -name = "nb17" -regex = '\B(?:foo|bar|[A-Z])\B' -haystack = "xfoo\n" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb18" -regex = '\B(?:foo|bar|[A-Z])\B' -haystack = "foo bar vNX" -matches = [[9, 10]] -unicode = false -utf8 = false - -[[test]] -name = "nb19" -regex = '\B(?:fo|foo)\B' -haystack = "xfoo" -matches = [[1, 3]] -unicode = false -utf8 = false - -[[test]] -name = "nb20" -regex = '\B(?:foo|fo)\B' -haystack = "xfooo" -matches = [[1, 4]] -unicode = false -utf8 = false - -[[test]] -name = "nb21" -regex = '\B\B' -haystack = "" -matches = [[0, 0]] -unicode = false -utf8 = false - -[[test]] -name = "nb22" -regex = '\B\B' -haystack = "x" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb23" -regex = '\B$' -haystack = "" -matches = [[0, 0]] -unicode = false -utf8 = false - -[[test]] -name = "nb24" -regex = '\B$' -haystack = "x" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb25" -regex = '\B$' -haystack = "y x" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb26" -regex = '\B.$' -haystack = "x" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb27" -regex = '^\B(?:fo|foo)\B' -haystack = "fo" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb28" -regex = '^\B(?:fo|foo)\B' -haystack = "fo" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb29" -regex = '^\B' -haystack = "" -matches = [[0, 0]] -unicode = false -utf8 = false - -[[test]] -name = "nb30" -regex = '^\B' -haystack = "x" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb31" -regex = '^\B\B' -haystack = "" -matches = [[0, 0]] -unicode = false -utf8 = false - -[[test]] -name = "nb32" -regex = '^\B\B' -haystack = "x" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb33" -regex = '^\B$' -haystack = "" -matches = [[0, 0]] -unicode = false -utf8 = false - -[[test]] -name = "nb34" -regex = '^\B$' -haystack = "x" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb35" -regex = '^\B.$' -haystack = "x" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb36" -regex = '^\B.\B$' -haystack = "x" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb37" -regex = '^^^^^\B$$$$$' -haystack = "" -matches = [[0, 0]] -unicode = false -utf8 = false - -[[test]] -name = "nb38" -regex = '^^^^^\B.$$$$$' -haystack = "x" -matches = [] -unicode = false -utf8 = false - -[[test]] -name = "nb39" -regex = '^^^^^\B$$$$$' -haystack = "x" -matches = [] -unicode = false -utf8 = false - - -# unicode1* and unicode2* work for both Unicode and ASCII because all matches -# are reported as byte offsets, and « and » do not correspond to word -# boundaries at either the character or byte level. -[[test]] -name = "unicode1" -regex = '\bx\b' -haystack = "«x" -matches = [[2, 3]] - -[[test]] -name = "unicode1-only-ascii" -regex = '\bx\b' -haystack = "«x" -matches = [[2, 3]] -unicode = false - -[[test]] -name = "unicode2" -regex = '\bx\b' -haystack = "x»" -matches = [[0, 1]] - -[[test]] -name = "unicode2-only-ascii" -regex = '\bx\b' -haystack = "x»" -matches = [[0, 1]] -unicode = false - -# ASCII word boundaries are completely oblivious to Unicode characters, so -# even though β is a character, an ASCII \b treats it as a word boundary -# when it is adjacent to another ASCII character. (The ASCII \b only looks -# at the leading byte of β.) For Unicode \b, the tests are precisely inverted. -[[test]] -name = "unicode3" -regex = '\bx\b' -haystack = 'áxβ' -matches = [] - -[[test]] -name = "unicode3-only-ascii" -regex = '\bx\b' -haystack = 'áxβ' -matches = [[2, 3]] -unicode = false - -[[test]] -name = "unicode4" -regex = '\Bx\B' -haystack = 'áxβ' -matches = [[2, 3]] - -[[test]] -name = "unicode4-only-ascii" -regex = '\Bx\B' -haystack = 'áxβ' -matches = [] -unicode = false -utf8 = false - -# The same as above, but with \b instead of \B as a sanity check. -[[test]] -name = "unicode5" -regex = '\b' -haystack = "0\U0007EF5E" -matches = [[0, 0], [1, 1]] - -[[test]] -name = "unicode5-only-ascii" -regex = '\b' -haystack = "0\U0007EF5E" -matches = [[0, 0], [1, 1]] -unicode = false -utf8 = false - -[[test]] -name = "unicode5-noutf8" -regex = '\b' -haystack = '0\xFF\xFF\xFF\xFF' -matches = [[0, 0], [1, 1]] -unescape = true -utf8 = false - -[[test]] -name = "unicode5-noutf8-only-ascii" -regex = '\b' -haystack = '0\xFF\xFF\xFF\xFF' -matches = [[0, 0], [1, 1]] -unescape = true -unicode = false -utf8 = false - -# Weird special case to ensure that ASCII \B treats each individual code unit -# as a non-word byte. (The specific codepoint is irrelevant. It's an arbitrary -# codepoint that uses 4 bytes in its UTF-8 encoding and is not a member of the -# \w character class.) -[[test]] -name = "unicode5-not" -regex = '\B' -haystack = "0\U0007EF5E" -matches = [[5, 5]] - -[[test]] -name = "unicode5-not-only-ascii" -regex = '\B' -haystack = "0\U0007EF5E" -matches = [[2, 2], [3, 3], [4, 4], [5, 5]] -unicode = false -utf8 = false - -# This gets no matches since \B only matches in the presence of valid UTF-8 -# when Unicode is enabled, even when UTF-8 mode is disabled. -[[test]] -name = "unicode5-not-noutf8" -regex = '\B' -haystack = '0\xFF\xFF\xFF\xFF' -matches = [] -unescape = true -utf8 = false - -# But this DOES get matches since \B in ASCII mode only looks at individual -# bytes. -[[test]] -name = "unicode5-not-noutf8-only-ascii" -regex = '\B' -haystack = '0\xFF\xFF\xFF\xFF' -matches = [[2, 2], [3, 3], [4, 4], [5, 5]] -unescape = true -unicode = false -utf8 = false - -# Some tests of no particular significance. -[[test]] -name = "unicode6" -regex = '\b[0-9]+\b' -haystack = "foo 123 bar 456 quux 789" -matches = [[4, 7], [12, 15], [21, 24]] - -[[test]] -name = "unicode7" -regex = '\b[0-9]+\b' -haystack = "foo 123 bar a456 quux 789" -matches = [[4, 7], [22, 25]] - -[[test]] -name = "unicode8" -regex = '\b[0-9]+\b' -haystack = "foo 123 bar 456a quux 789" -matches = [[4, 7], [22, 25]] - -# A variant of the problem described here: -# https://github.com/google/re2/blob/89567f5de5b23bb5ad0c26cbafc10bdc7389d1fa/re2/dfa.cc#L658-L667 -[[test]] -name = "alt-with-assertion-repetition" -regex = '(?:\b|%)+' -haystack = "z%" -bounds = [1, 2] -anchored = true -matches = [[1, 1]] diff --git a/vendor/regex/tests/lib.rs b/vendor/regex/tests/lib.rs deleted file mode 100644 index b3f69423d955a0..00000000000000 --- a/vendor/regex/tests/lib.rs +++ /dev/null @@ -1,58 +0,0 @@ -#![cfg_attr(feature = "pattern", feature(pattern))] - -mod fuzz; -mod misc; -mod regression; -mod regression_fuzz; -mod replace; -#[cfg(feature = "pattern")] -mod searcher; -mod suite_bytes; -mod suite_bytes_set; -mod suite_string; -mod suite_string_set; - -const BLACKLIST: &[&str] = &[ - // Nothing to blacklist yet! -]; - -fn suite() -> anyhow::Result<regex_test::RegexTests> { - let _ = env_logger::try_init(); - - let mut tests = regex_test::RegexTests::new(); - macro_rules! load { - ($name:expr) => {{ - const DATA: &[u8] = - include_bytes!(concat!("../testdata/", $name, ".toml")); - tests.load_slice($name, DATA)?; - }}; - } - - load!("anchored"); - load!("bytes"); - load!("crazy"); - load!("crlf"); - load!("earliest"); - load!("empty"); - load!("expensive"); - load!("flags"); - load!("iter"); - load!("leftmost-all"); - load!("line-terminator"); - load!("misc"); - load!("multiline"); - load!("no-unicode"); - load!("overlapping"); - load!("regression"); - load!("set"); - load!("substring"); - load!("unicode"); - load!("utf8"); - load!("word-boundary"); - load!("word-boundary-special"); - load!("fowler/basic"); - load!("fowler/nullsubexpr"); - load!("fowler/repetition"); - - Ok(tests) -} diff --git a/vendor/regex/tests/misc.rs b/vendor/regex/tests/misc.rs deleted file mode 100644 index c04c9c9fe2b367..00000000000000 --- a/vendor/regex/tests/misc.rs +++ /dev/null @@ -1,143 +0,0 @@ -use regex::Regex; - -macro_rules! regex { - ($pattern:expr) => { - regex::Regex::new($pattern).unwrap() - }; -} - -#[test] -fn unclosed_group_error() { - let err = Regex::new(r"(").unwrap_err(); - let msg = err.to_string(); - assert!(msg.contains("unclosed group"), "error message: {msg:?}"); -} - -#[test] -fn regex_string() { - assert_eq!(r"[a-zA-Z0-9]+", regex!(r"[a-zA-Z0-9]+").as_str()); - assert_eq!(r"[a-zA-Z0-9]+", &format!("{}", regex!(r"[a-zA-Z0-9]+"))); - assert_eq!( - r#"Regex("[a-zA-Z0-9]+")"#, - &format!("{:?}", regex!(r"[a-zA-Z0-9]+")) - ); -} - -#[test] -fn capture_names() { - let re = regex!(r"(.)(?P<a>.)"); - assert_eq!(3, re.captures_len()); - assert_eq!((3, Some(3)), re.capture_names().size_hint()); - assert_eq!( - vec![None, None, Some("a")], - re.capture_names().collect::<Vec<_>>() - ); -} - -#[test] -fn capture_index() { - let re = regex!(r"^(?P<name>.+)$"); - let cap = re.captures("abc").unwrap(); - assert_eq!(&cap[0], "abc"); - assert_eq!(&cap[1], "abc"); - assert_eq!(&cap["name"], "abc"); -} - -#[test] -#[should_panic] -fn capture_index_panic_usize() { - let re = regex!(r"^(?P<name>.+)$"); - let cap = re.captures("abc").unwrap(); - let _ = cap[2]; -} - -#[test] -#[should_panic] -fn capture_index_panic_name() { - let re = regex!(r"^(?P<name>.+)$"); - let cap = re.captures("abc").unwrap(); - let _ = cap["bad name"]; -} - -#[test] -fn capture_index_lifetime() { - // This is a test of whether the types on `caps["..."]` are general - // enough. If not, this will fail to typecheck. - fn inner(s: &str) -> usize { - let re = regex!(r"(?P<number>[0-9]+)"); - let caps = re.captures(s).unwrap(); - caps["number"].len() - } - assert_eq!(3, inner("123")); -} - -#[test] -fn capture_misc() { - let re = regex!(r"(.)(?P<a>a)?(.)(?P<b>.)"); - let cap = re.captures("abc").unwrap(); - - assert_eq!(5, cap.len()); - - assert_eq!((0, 3), { - let m = cap.get(0).unwrap(); - (m.start(), m.end()) - }); - assert_eq!(None, cap.get(2)); - assert_eq!((2, 3), { - let m = cap.get(4).unwrap(); - (m.start(), m.end()) - }); - - assert_eq!("abc", cap.get(0).unwrap().as_str()); - assert_eq!(None, cap.get(2)); - assert_eq!("c", cap.get(4).unwrap().as_str()); - - assert_eq!(None, cap.name("a")); - assert_eq!("c", cap.name("b").unwrap().as_str()); -} - -#[test] -fn sub_capture_matches() { - let re = regex!(r"([a-z])(([a-z])|([0-9]))"); - let cap = re.captures("a5").unwrap(); - let subs: Vec<_> = cap.iter().collect(); - - assert_eq!(5, subs.len()); - assert!(subs[0].is_some()); - assert!(subs[1].is_some()); - assert!(subs[2].is_some()); - assert!(subs[3].is_none()); - assert!(subs[4].is_some()); - - assert_eq!("a5", subs[0].unwrap().as_str()); - assert_eq!("a", subs[1].unwrap().as_str()); - assert_eq!("5", subs[2].unwrap().as_str()); - assert_eq!("5", subs[4].unwrap().as_str()); -} - -// Test that the DFA can handle pathological cases. (This should result in the -// DFA's cache being flushed too frequently, which should cause it to quit and -// fall back to the NFA algorithm.) -#[test] -fn dfa_handles_pathological_case() { - fn ones_and_zeroes(count: usize) -> String { - let mut s = String::new(); - for i in 0..count { - if i % 3 == 0 { - s.push('1'); - } else { - s.push('0'); - } - } - s - } - - let re = regex!(r"[01]*1[01]{20}$"); - let text = { - let mut pieces = ones_and_zeroes(100_000); - pieces.push('1'); - pieces.push_str(&ones_and_zeroes(20)); - pieces - }; - assert!(re.is_match(&text)); -} diff --git a/vendor/regex/tests/regression.rs b/vendor/regex/tests/regression.rs deleted file mode 100644 index a5867016b211cc..00000000000000 --- a/vendor/regex/tests/regression.rs +++ /dev/null @@ -1,94 +0,0 @@ -use regex::Regex; - -macro_rules! regex { - ($pattern:expr) => { - regex::Regex::new($pattern).unwrap() - }; -} - -// See: https://github.com/rust-lang/regex/issues/48 -#[test] -fn invalid_regexes_no_crash() { - assert!(Regex::new("(*)").is_err()); - assert!(Regex::new("(?:?)").is_err()); - assert!(Regex::new("(?)").is_err()); - assert!(Regex::new("*").is_err()); -} - -// See: https://github.com/rust-lang/regex/issues/98 -#[test] -fn regression_many_repeat_stack_overflow() { - let re = regex!("^.{1,2500}"); - assert_eq!( - vec![0..1], - re.find_iter("a").map(|m| m.range()).collect::<Vec<_>>() - ); -} - -// See: https://github.com/rust-lang/regex/issues/555 -#[test] -fn regression_invalid_repetition_expr() { - assert!(Regex::new("(?m){1,1}").is_err()); -} - -// See: https://github.com/rust-lang/regex/issues/527 -#[test] -fn regression_invalid_flags_expression() { - assert!(Regex::new("(((?x)))").is_ok()); -} - -// See: https://github.com/rust-lang/regex/issues/129 -#[test] -fn regression_captures_rep() { - let re = regex!(r"([a-f]){2}(?P<foo>[x-z])"); - let caps = re.captures("abx").unwrap(); - assert_eq!(&caps["foo"], "x"); -} - -// See: https://github.com/BurntSushi/ripgrep/issues/1247 -#[cfg(feature = "unicode-perl")] -#[test] -fn regression_nfa_stops1() { - let re = regex::bytes::Regex::new(r"\bs(?:[ab])").unwrap(); - assert_eq!(0, re.find_iter(b"s\xE4").count()); -} - -// See: https://github.com/rust-lang/regex/issues/981 -#[cfg(feature = "unicode")] -#[test] -fn regression_bad_word_boundary() { - let re = regex!(r#"(?i:(?:\b|_)win(?:32|64|dows)?(?:\b|_))"#); - let hay = "ubi-Darwin-x86_64.tar.gz"; - assert!(!re.is_match(hay)); - let hay = "ubi-Windows-x86_64.zip"; - assert!(re.is_match(hay)); -} - -// See: https://github.com/rust-lang/regex/issues/982 -#[cfg(feature = "unicode-perl")] -#[test] -fn regression_unicode_perl_not_enabled() { - let pat = r"(\d+\s?(years|year|y))?\s?(\d+\s?(months|month|m))?\s?(\d+\s?(weeks|week|w))?\s?(\d+\s?(days|day|d))?\s?(\d+\s?(hours|hour|h))?"; - assert!(Regex::new(pat).is_ok()); -} - -// See: https://github.com/rust-lang/regex/issues/995 -#[test] -fn regression_big_regex_overflow() { - let pat = r" {2147483516}{2147483416}{5}"; - assert!(Regex::new(pat).is_err()); -} - -// See: https://github.com/rust-lang/regex/issues/999 -#[test] -fn regression_complete_literals_suffix_incorrect() { - let needles = vec![ - "aA", "bA", "cA", "dA", "eA", "fA", "gA", "hA", "iA", "jA", "kA", - "lA", "mA", "nA", "oA", "pA", "qA", "rA", "sA", "tA", "uA", "vA", - "wA", "xA", "yA", "zA", - ]; - let pattern = needles.join("|"); - let re = regex!(&pattern); - let hay = "FUBAR"; - assert_eq!(0, re.find_iter(hay).count()); -} diff --git a/vendor/regex/tests/regression_fuzz.rs b/vendor/regex/tests/regression_fuzz.rs deleted file mode 100644 index f90ad4cb20d685..00000000000000 --- a/vendor/regex/tests/regression_fuzz.rs +++ /dev/null @@ -1,61 +0,0 @@ -// These tests are only run for the "default" test target because some of them -// can take quite a long time. Some of them take long enough that it's not -// practical to run them in debug mode. :-/ - -use regex::Regex; - -macro_rules! regex { - ($pattern:expr) => { - regex::Regex::new($pattern).unwrap() - }; -} - -// See: https://oss-fuzz.com/testcase-detail/5673225499181056 -// -// Ignored by default since it takes too long in debug mode (almost a minute). -#[test] -#[ignore] -fn fuzz1() { - regex!(r"1}{55}{0}*{1}{55}{55}{5}*{1}{55}+{56}|;**"); -} - -// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26505 -// See: https://github.com/rust-lang/regex/issues/722 -#[test] -#[cfg(feature = "unicode")] -fn empty_any_errors_no_panic() { - assert!(Regex::new(r"\P{any}").is_ok()); -} - -// This tests that a very large regex errors during compilation instead of -// using gratuitous amounts of memory. The specific problem is that the -// compiler wasn't accounting for the memory used by Unicode character classes -// correctly. -// -// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=33579 -#[test] -fn big_regex_fails_to_compile() { - let pat = "[\u{0}\u{e}\u{2}\\w~~>[l\t\u{0}]p?<]{971158}"; - assert!(Regex::new(pat).is_err()); -} - -// This was caught while on master but before a release went out(!). -// -// See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=58173 -#[test] -fn todo() { - let pat = "(?:z|xx)@|xx"; - assert!(Regex::new(pat).is_ok()); -} - -// This was caused by the fuzzer, and then minimized by hand. -// -// This was caused by a bug in DFA determinization that mishandled NFA fail -// states. -#[test] -fn fail_branch_prevents_match() { - let pat = r".*[a&&b]A|B"; - let hay = "B"; - let re = Regex::new(pat).unwrap(); - assert!(re.is_match(hay)); -} diff --git a/vendor/regex/tests/replace.rs b/vendor/regex/tests/replace.rs deleted file mode 100644 index f26ae46030bad2..00000000000000 --- a/vendor/regex/tests/replace.rs +++ /dev/null @@ -1,183 +0,0 @@ -macro_rules! replace( - ($name:ident, $which:ident, $re:expr, - $search:expr, $replace:expr, $result:expr) => ( - #[test] - fn $name() { - let re = regex::Regex::new($re).unwrap(); - assert_eq!(re.$which($search, $replace), $result); - } - ); -); - -replace!(first, replace, r"[0-9]", "age: 26", "Z", "age: Z6"); -replace!(plus, replace, r"[0-9]+", "age: 26", "Z", "age: Z"); -replace!(all, replace_all, r"[0-9]", "age: 26", "Z", "age: ZZ"); -replace!(groups, replace, r"([^ ]+)[ ]+([^ ]+)", "w1 w2", "$2 $1", "w2 w1"); -replace!( - double_dollar, - replace, - r"([^ ]+)[ ]+([^ ]+)", - "w1 w2", - "$2 $$1", - "w2 $1" -); -// replace!(adjacent_index, replace, -// r"([^aeiouy])ies$", "skies", "$1y", "sky"); -replace!( - named, - replace_all, - r"(?P<first>[^ ]+)[ ]+(?P<last>[^ ]+)(?P<space>[ ]*)", - "w1 w2 w3 w4", - "$last $first$space", - "w2 w1 w4 w3" -); -replace!( - trim, - replace_all, - "^[ \t]+|[ \t]+$", - " \t trim me\t \t", - "", - "trim me" -); -replace!(number_hyphen, replace, r"(.)(.)", "ab", "$1-$2", "a-b"); -// replace!(number_underscore, replace, r"(.)(.)", "ab", "$1_$2", "a_b"); -replace!( - simple_expand, - replace_all, - r"([a-z]) ([a-z])", - "a b", - "$2 $1", - "b a" -); -replace!( - literal_dollar1, - replace_all, - r"([a-z]+) ([a-z]+)", - "a b", - "$$1", - "$1" -); -replace!( - literal_dollar2, - replace_all, - r"([a-z]+) ([a-z]+)", - "a b", - "$2 $$c $1", - "b $c a" -); -replace!( - no_expand1, - replace, - r"([^ ]+)[ ]+([^ ]+)", - "w1 w2", - regex::NoExpand("$2 $1"), - "$2 $1" -); -replace!( - no_expand2, - replace, - r"([^ ]+)[ ]+([^ ]+)", - "w1 w2", - regex::NoExpand("$$1"), - "$$1" -); -replace!( - closure_returning_reference, - replace, - r"([0-9]+)", - "age: 26", - |captures: ®ex::Captures<'_>| { captures[1][0..1].to_owned() }, - "age: 2" -); -replace!( - closure_returning_value, - replace, - r"[0-9]+", - "age: 26", - |_captures: ®ex::Captures<'_>| "Z".to_owned(), - "age: Z" -); - -// See https://github.com/rust-lang/regex/issues/314 -replace!( - match_at_start_replace_with_empty, - replace_all, - r"foo", - "foobar", - "", - "bar" -); - -// See https://github.com/rust-lang/regex/issues/393 -replace!(single_empty_match, replace, r"^", "bar", "foo", "foobar"); - -// See https://github.com/rust-lang/regex/issues/399 -replace!( - capture_longest_possible_name, - replace_all, - r"(.)", - "b", - "${1}a $1a", - "ba " -); - -replace!( - impl_string, - replace, - r"[0-9]", - "age: 26", - "Z".to_string(), - "age: Z6" -); -replace!( - impl_string_ref, - replace, - r"[0-9]", - "age: 26", - &"Z".to_string(), - "age: Z6" -); -replace!( - impl_cow_str_borrowed, - replace, - r"[0-9]", - "age: 26", - std::borrow::Cow::<'_, str>::Borrowed("Z"), - "age: Z6" -); -replace!( - impl_cow_str_borrowed_ref, - replace, - r"[0-9]", - "age: 26", - &std::borrow::Cow::<'_, str>::Borrowed("Z"), - "age: Z6" -); -replace!( - impl_cow_str_owned, - replace, - r"[0-9]", - "age: 26", - std::borrow::Cow::<'_, str>::Owned("Z".to_string()), - "age: Z6" -); -replace!( - impl_cow_str_owned_ref, - replace, - r"[0-9]", - "age: 26", - &std::borrow::Cow::<'_, str>::Owned("Z".to_string()), - "age: Z6" -); - -#[test] -fn replacen_no_captures() { - let re = regex::Regex::new(r"[0-9]").unwrap(); - assert_eq!(re.replacen("age: 1234", 2, "Z"), "age: ZZ34"); -} - -#[test] -fn replacen_with_captures() { - let re = regex::Regex::new(r"([0-9])").unwrap(); - assert_eq!(re.replacen("age: 1234", 2, "${1}Z"), "age: 1Z2Z34"); -} diff --git a/vendor/regex/tests/searcher.rs b/vendor/regex/tests/searcher.rs deleted file mode 100644 index f6dae13105ffe9..00000000000000 --- a/vendor/regex/tests/searcher.rs +++ /dev/null @@ -1,93 +0,0 @@ -macro_rules! searcher { - ($name:ident, $re:expr, $haystack:expr) => ( - searcher!($name, $re, $haystack, vec vec![]); - ); - ($name:ident, $re:expr, $haystack:expr, $($steps:expr,)*) => ( - searcher!($name, $re, $haystack, vec vec![$($steps),*]); - ); - ($name:ident, $re:expr, $haystack:expr, $($steps:expr),*) => ( - searcher!($name, $re, $haystack, vec vec![$($steps),*]); - ); - ($name:ident, $re:expr, $haystack:expr, vec $expect_steps:expr) => ( - #[test] - #[allow(unused_imports)] - fn $name() { - use std::str::pattern::{Pattern, Searcher}; - use std::str::pattern::SearchStep::{Match, Reject, Done}; - let re = regex::Regex::new($re).unwrap(); - let mut se = re.into_searcher($haystack); - let mut got_steps = vec![]; - loop { - match se.next() { - Done => break, - step => { got_steps.push(step); } - } - } - assert_eq!(got_steps, $expect_steps); - } - ); -} - -searcher!(searcher_empty_regex_empty_haystack, r"", "", Match(0, 0)); -searcher!( - searcher_empty_regex, - r"", - "ab", - Match(0, 0), - Reject(0, 1), - Match(1, 1), - Reject(1, 2), - Match(2, 2) -); -searcher!(searcher_empty_haystack, r"\d", ""); -searcher!(searcher_one_match, r"\d", "5", Match(0, 1)); -searcher!(searcher_no_match, r"\d", "a", Reject(0, 1)); -searcher!( - searcher_two_adjacent_matches, - r"\d", - "56", - Match(0, 1), - Match(1, 2) -); -searcher!( - searcher_two_non_adjacent_matches, - r"\d", - "5a6", - Match(0, 1), - Reject(1, 2), - Match(2, 3) -); -searcher!(searcher_reject_first, r"\d", "a6", Reject(0, 1), Match(1, 2)); -searcher!( - searcher_one_zero_length_matches, - r"\d*", - "a1b2", - Match(0, 0), // ^ - Reject(0, 1), // a - Match(1, 2), // a1 - Reject(2, 3), // a1b - Match(3, 4), // a1b2 -); -searcher!( - searcher_many_zero_length_matches, - r"\d*", - "a1bbb2", - Match(0, 0), // ^ - Reject(0, 1), // a - Match(1, 2), // a1 - Reject(2, 3), // a1b - Match(3, 3), // a1bb - Reject(3, 4), // a1bb - Match(4, 4), // a1bbb - Reject(4, 5), // a1bbb - Match(5, 6), // a1bbba -); -searcher!( - searcher_unicode, - r".+?", - "Ⅰ1Ⅱ2", - Match(0, 3), - Match(3, 4), - Match(4, 7), - Match(7, 8) -); diff --git a/vendor/regex/tests/suite_bytes.rs b/vendor/regex/tests/suite_bytes.rs deleted file mode 100644 index 784b1a47adcbba..00000000000000 --- a/vendor/regex/tests/suite_bytes.rs +++ /dev/null @@ -1,108 +0,0 @@ -use { - anyhow::Result, - regex::bytes::{Regex, RegexBuilder}, - regex_test::{ - CompiledRegex, Match, RegexTest, Span, TestResult, TestRunner, - }, -}; - -/// Tests the default configuration of the hybrid NFA/DFA. -#[test] -fn default() -> Result<()> { - let mut runner = TestRunner::new()?; - runner - .expand(&["is_match", "find", "captures"], |test| test.compiles()) - .blacklist_iter(super::BLACKLIST) - .test_iter(crate::suite()?.iter(), compiler) - .assert(); - Ok(()) -} - -fn run_test(re: &Regex, test: &RegexTest) -> TestResult { - match test.additional_name() { - "is_match" => TestResult::matched(re.is_match(test.haystack())), - "find" => TestResult::matches( - re.find_iter(test.haystack()) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|m| Match { - id: 0, - span: Span { start: m.start(), end: m.end() }, - }), - ), - "captures" => { - let it = re - .captures_iter(test.haystack()) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|caps| testify_captures(&caps)); - TestResult::captures(it) - } - name => TestResult::fail(&format!("unrecognized test name: {name}")), - } -} - -/// Converts the given regex test to a closure that searches with a -/// `bytes::Regex`. If the test configuration is unsupported, then a -/// `CompiledRegex` that skips the test is returned. -fn compiler( - test: &RegexTest, - _patterns: &[String], -) -> anyhow::Result<CompiledRegex> { - let skip = Ok(CompiledRegex::skip()); - - // We're only testing bytes::Regex here, which supports one pattern only. - let pattern = match test.regexes().len() { - 1 => &test.regexes()[0], - _ => return skip, - }; - // We only test is_match, find_iter and captures_iter. All of those are - // leftmost searches. - if !matches!(test.search_kind(), regex_test::SearchKind::Leftmost) { - return skip; - } - // The top-level single-pattern regex API always uses leftmost-first. - if !matches!(test.match_kind(), regex_test::MatchKind::LeftmostFirst) { - return skip; - } - // The top-level regex API always runs unanchored searches. ... But we can - // handle tests that are anchored but have only one match. - if test.anchored() && test.match_limit() != Some(1) { - return skip; - } - // We don't support tests with explicit search bounds. We could probably - // support this by using the 'find_at' (and such) APIs. - let bounds = test.bounds(); - if !(bounds.start == 0 && bounds.end == test.haystack().len()) { - return skip; - } - // The bytes::Regex API specifically does not support enabling UTF-8 mode. - // It could I suppose, but currently it does not. That is, it permits - // matches to have offsets that split codepoints. - if test.utf8() { - return skip; - } - // If the test requires Unicode but the Unicode feature isn't enabled, - // skip it. This is a little aggressive, but the test suite doesn't - // have any easy way of communicating which Unicode features are needed. - if test.unicode() && !cfg!(feature = "unicode") { - return skip; - } - let re = RegexBuilder::new(pattern) - .case_insensitive(test.case_insensitive()) - .unicode(test.unicode()) - .line_terminator(test.line_terminator()) - .build()?; - Ok(CompiledRegex::compiled(move |test| run_test(&re, test))) -} - -/// Convert `Captures` into the test suite's capture values. -fn testify_captures( - caps: ®ex::bytes::Captures<'_>, -) -> regex_test::Captures { - let spans = caps.iter().map(|group| { - group.map(|m| regex_test::Span { start: m.start(), end: m.end() }) - }); - // This unwrap is OK because we assume our 'caps' represents a match, and - // a match always gives a non-zero number of groups with the first group - // being non-None. - regex_test::Captures::new(0, spans).unwrap() -} diff --git a/vendor/regex/tests/suite_bytes_set.rs b/vendor/regex/tests/suite_bytes_set.rs deleted file mode 100644 index 9b75f8da1fedae..00000000000000 --- a/vendor/regex/tests/suite_bytes_set.rs +++ /dev/null @@ -1,71 +0,0 @@ -use { - anyhow::Result, - regex::bytes::{RegexSet, RegexSetBuilder}, - regex_test::{CompiledRegex, RegexTest, TestResult, TestRunner}, -}; - -/// Tests the default configuration of the hybrid NFA/DFA. -#[test] -fn default() -> Result<()> { - let mut runner = TestRunner::new()?; - runner - .expand(&["is_match", "which"], |test| test.compiles()) - .blacklist_iter(super::BLACKLIST) - .test_iter(crate::suite()?.iter(), compiler) - .assert(); - Ok(()) -} - -fn run_test(re: &RegexSet, test: &RegexTest) -> TestResult { - match test.additional_name() { - "is_match" => TestResult::matched(re.is_match(test.haystack())), - "which" => TestResult::which(re.matches(test.haystack()).iter()), - name => TestResult::fail(&format!("unrecognized test name: {name}")), - } -} - -/// Converts the given regex test to a closure that searches with a -/// `bytes::Regex`. If the test configuration is unsupported, then a -/// `CompiledRegex` that skips the test is returned. -fn compiler( - test: &RegexTest, - _patterns: &[String], -) -> anyhow::Result<CompiledRegex> { - let skip = Ok(CompiledRegex::skip()); - - // The top-level RegexSet API only supports "overlapping" semantics. - if !matches!(test.search_kind(), regex_test::SearchKind::Overlapping) { - return skip; - } - // The top-level RegexSet API only supports "all" semantics. - if !matches!(test.match_kind(), regex_test::MatchKind::All) { - return skip; - } - // The top-level RegexSet API always runs unanchored searches. - if test.anchored() { - return skip; - } - // We don't support tests with explicit search bounds. - let bounds = test.bounds(); - if !(bounds.start == 0 && bounds.end == test.haystack().len()) { - return skip; - } - // The bytes::Regex API specifically does not support enabling UTF-8 mode. - // It could I suppose, but currently it does not. That is, it permits - // matches to have offsets that split codepoints. - if test.utf8() { - return skip; - } - // If the test requires Unicode but the Unicode feature isn't enabled, - // skip it. This is a little aggressive, but the test suite doesn't - // have any easy way of communicating which Unicode features are needed. - if test.unicode() && !cfg!(feature = "unicode") { - return skip; - } - let re = RegexSetBuilder::new(test.regexes()) - .case_insensitive(test.case_insensitive()) - .unicode(test.unicode()) - .line_terminator(test.line_terminator()) - .build()?; - Ok(CompiledRegex::compiled(move |test| run_test(&re, test))) -} diff --git a/vendor/regex/tests/suite_string.rs b/vendor/regex/tests/suite_string.rs deleted file mode 100644 index 2a6d7709be7521..00000000000000 --- a/vendor/regex/tests/suite_string.rs +++ /dev/null @@ -1,113 +0,0 @@ -use { - anyhow::Result, - regex::{Regex, RegexBuilder}, - regex_test::{ - CompiledRegex, Match, RegexTest, Span, TestResult, TestRunner, - }, -}; - -/// Tests the default configuration of the hybrid NFA/DFA. -#[test] -fn default() -> Result<()> { - let mut runner = TestRunner::new()?; - runner - .expand(&["is_match", "find", "captures"], |test| test.compiles()) - .blacklist_iter(super::BLACKLIST) - .test_iter(crate::suite()?.iter(), compiler) - .assert(); - Ok(()) -} - -fn run_test(re: &Regex, test: &RegexTest) -> TestResult { - let hay = match std::str::from_utf8(test.haystack()) { - Ok(hay) => hay, - Err(err) => { - return TestResult::fail(&format!( - "haystack is not valid UTF-8: {err}", - )); - } - }; - match test.additional_name() { - "is_match" => TestResult::matched(re.is_match(hay)), - "find" => TestResult::matches( - re.find_iter(hay) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|m| Match { - id: 0, - span: Span { start: m.start(), end: m.end() }, - }), - ), - "captures" => { - let it = re - .captures_iter(hay) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|caps| testify_captures(&caps)); - TestResult::captures(it) - } - name => TestResult::fail(&format!("unrecognized test name: {name}")), - } -} - -/// Converts the given regex test to a closure that searches with a -/// `bytes::Regex`. If the test configuration is unsupported, then a -/// `CompiledRegex` that skips the test is returned. -fn compiler( - test: &RegexTest, - _patterns: &[String], -) -> anyhow::Result<CompiledRegex> { - let skip = Ok(CompiledRegex::skip()); - - // We're only testing bytes::Regex here, which supports one pattern only. - let pattern = match test.regexes().len() { - 1 => &test.regexes()[0], - _ => return skip, - }; - // We only test is_match, find_iter and captures_iter. All of those are - // leftmost searches. - if !matches!(test.search_kind(), regex_test::SearchKind::Leftmost) { - return skip; - } - // The top-level single-pattern regex API always uses leftmost-first. - if !matches!(test.match_kind(), regex_test::MatchKind::LeftmostFirst) { - return skip; - } - // The top-level regex API always runs unanchored searches. ... But we can - // handle tests that are anchored but have only one match. - if test.anchored() && test.match_limit() != Some(1) { - return skip; - } - // We don't support tests with explicit search bounds. We could probably - // support this by using the 'find_at' (and such) APIs. - let bounds = test.bounds(); - if !(bounds.start == 0 && bounds.end == test.haystack().len()) { - return skip; - } - // The Regex API specifically does not support disabling UTF-8 mode because - // it can only search &str which is always valid UTF-8. - if !test.utf8() { - return skip; - } - // If the test requires Unicode but the Unicode feature isn't enabled, - // skip it. This is a little aggressive, but the test suite doesn't - // have any easy way of communicating which Unicode features are needed. - if test.unicode() && !cfg!(feature = "unicode") { - return skip; - } - let re = RegexBuilder::new(pattern) - .case_insensitive(test.case_insensitive()) - .unicode(test.unicode()) - .line_terminator(test.line_terminator()) - .build()?; - Ok(CompiledRegex::compiled(move |test| run_test(&re, test))) -} - -/// Convert `Captures` into the test suite's capture values. -fn testify_captures(caps: ®ex::Captures<'_>) -> regex_test::Captures { - let spans = caps.iter().map(|group| { - group.map(|m| regex_test::Span { start: m.start(), end: m.end() }) - }); - // This unwrap is OK because we assume our 'caps' represents a match, and - // a match always gives a non-zero number of groups with the first group - // being non-None. - regex_test::Captures::new(0, spans).unwrap() -} diff --git a/vendor/regex/tests/suite_string_set.rs b/vendor/regex/tests/suite_string_set.rs deleted file mode 100644 index 122e39c75e908b..00000000000000 --- a/vendor/regex/tests/suite_string_set.rs +++ /dev/null @@ -1,78 +0,0 @@ -use { - anyhow::Result, - regex::{RegexSet, RegexSetBuilder}, - regex_test::{CompiledRegex, RegexTest, TestResult, TestRunner}, -}; - -/// Tests the default configuration of the hybrid NFA/DFA. -#[test] -fn default() -> Result<()> { - let mut runner = TestRunner::new()?; - runner - .expand(&["is_match", "which"], |test| test.compiles()) - .blacklist_iter(super::BLACKLIST) - .test_iter(crate::suite()?.iter(), compiler) - .assert(); - Ok(()) -} - -fn run_test(re: &RegexSet, test: &RegexTest) -> TestResult { - let hay = match std::str::from_utf8(test.haystack()) { - Ok(hay) => hay, - Err(err) => { - return TestResult::fail(&format!( - "haystack is not valid UTF-8: {err}", - )); - } - }; - match test.additional_name() { - "is_match" => TestResult::matched(re.is_match(hay)), - "which" => TestResult::which(re.matches(hay).iter()), - name => TestResult::fail(&format!("unrecognized test name: {name}")), - } -} - -/// Converts the given regex test to a closure that searches with a -/// `bytes::Regex`. If the test configuration is unsupported, then a -/// `CompiledRegex` that skips the test is returned. -fn compiler( - test: &RegexTest, - _patterns: &[String], -) -> anyhow::Result<CompiledRegex> { - let skip = Ok(CompiledRegex::skip()); - - // The top-level RegexSet API only supports "overlapping" semantics. - if !matches!(test.search_kind(), regex_test::SearchKind::Overlapping) { - return skip; - } - // The top-level RegexSet API only supports "all" semantics. - if !matches!(test.match_kind(), regex_test::MatchKind::All) { - return skip; - } - // The top-level RegexSet API always runs unanchored searches. - if test.anchored() { - return skip; - } - // We don't support tests with explicit search bounds. - let bounds = test.bounds(); - if !(bounds.start == 0 && bounds.end == test.haystack().len()) { - return skip; - } - // The Regex API specifically does not support disabling UTF-8 mode because - // it can only search &str which is always valid UTF-8. - if !test.utf8() { - return skip; - } - // If the test requires Unicode but the Unicode feature isn't enabled, - // skip it. This is a little aggressive, but the test suite doesn't - // have any easy way of communicating which Unicode features are needed. - if test.unicode() && !cfg!(feature = "unicode") { - return skip; - } - let re = RegexSetBuilder::new(test.regexes()) - .case_insensitive(test.case_insensitive()) - .unicode(test.unicode()) - .line_terminator(test.line_terminator()) - .build()?; - Ok(CompiledRegex::compiled(move |test| run_test(&re, test))) -} diff --git a/vendor/rustc-hash/.cargo-checksum.json b/vendor/rustc-hash/.cargo-checksum.json deleted file mode 100644 index 694a5dfff97328..00000000000000 --- a/vendor/rustc-hash/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"afbb3c737b1463a6ef5ba68383db3ddcd9cc1228ea88b9f4fa488619f7bb2fb2",".github/workflows/rust.yml":"ff0288f89c97203b725441e9d9717fa0c049f1cebb2bcbb556981bfa8be10029","CHANGELOG.md":"1c9951d52d63dfbff8d32ad7909761517db5dc8f9084dba7892da8d5028c9692","CODE_OF_CONDUCT.md":"3e77f5476805b69467641b2c682aa2355344395056939089182cd901c56dce63","Cargo.lock":"213c05814f6402c09bc5bf32579187b5448ec81f8530f58d7289fd50a35d5b2a","Cargo.toml":"110004d0c56ebe79a83b2cbe44b1574a69010a9e4a8581e5215ac14251ef30cc","Cargo.toml.orig":"4c6ecfcf11005839dc797e6de41b70c97ab043a9a9fe8cc1c27b904252ce8ae7","LICENSE-APACHE":"95bd3988beee069fa2848f648dab43cc6e0b2add2ad6bcb17360caf749802bcc","LICENSE-MIT":"30fefc3a7d6a0041541858293bcbea2dde4caa4c0a5802f996a7f7e8c0085652","README.md":"ccd7a15a2e2021dbbfd5b7f99a10666a64ac50f8d5d6926a858efdde724fb424","src/lib.rs":"6928d71e403482e0e6f3324fbcef23a731c9236a5315db829f4020991064c5fa","src/random_state.rs":"39063b702c38dc93b7a9039f19f4acfdc539acf1604584a87eeb43cca149ca7e","src/seeded_state.rs":"530ba6e25d766231cc7540f968d3e41c5af5a38d936542b407010b9d35746fd8"},"package":"357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"} \ No newline at end of file diff --git a/vendor/rustc-hash/.cargo_vcs_info.json b/vendor/rustc-hash/.cargo_vcs_info.json deleted file mode 100644 index 63811667ce1f28..00000000000000 --- a/vendor/rustc-hash/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "dc5c33f1283de2da64d8d7a06401d91aded03ad4" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/rustc-hash/.github/workflows/rust.yml b/vendor/rustc-hash/.github/workflows/rust.yml deleted file mode 100644 index 0a019cbfb00afd..00000000000000 --- a/vendor/rustc-hash/.github/workflows/rust.yml +++ /dev/null @@ -1,73 +0,0 @@ -name: Rust - -permissions: - contents: read - -on: [push, pull_request] - -env: - CARGO_TERM_COLOR: always - RUST_BACKTRACE: 1 - RUSTUP_MAX_RETRIES: 10 - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} - cancel-in-progress: true - -jobs: - test: - strategy: - matrix: - os: [ubuntu, windows, macos] - runs-on: ${{ matrix.os }}-latest - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - - run: rustup update stable && rustup default stable - - run: cargo check - - run: cargo test - - run: rustup update nightly && rustup default nightly - - run: cargo test --all-features - cross-test: - strategy: - matrix: - target: [ - "x86_64-unknown-linux-gnu", # 64-bits, little-endian - "i686-unknown-linux-gnu", # 32-bits, little-endian - "mips-unknown-linux-gnu", # 32-bits, big-endian - "mips64-unknown-linux-gnuabi64", # 64-bits, big-endian - ] - runs-on: ubuntu-latest - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - - name: install miri - run: rustup toolchain add nightly --no-self-update --component miri && rustup default nightly - - run: | - cargo miri test --target=${{ matrix.target }} --all-features - env: - MIRIFLAGS: -Zmiri-strict-provenance - RUSTDOCFLAGS: ${{ env.RUSTDOCFLAGS }} -Z randomize-layout - RUSTFLAGS: ${{ env.RUSTFLAGS }} -Z randomize-layout - fmt: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - run: rustup update stable && rustup default stable - - run: rustup component add rustfmt - - run: cargo fmt --all --check - docs: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - run: rustup update stable && rustup default stable - - run: cargo doc --workspace --document-private-items --no-deps - env: - RUSTDOCFLAGS: -D warnings - clippy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - run: rustup update stable && rustup default stable - - run: rustup component add clippy - - run: cargo clippy --workspace --all-targets --no-deps diff --git a/vendor/rustc-hash/CHANGELOG.md b/vendor/rustc-hash/CHANGELOG.md deleted file mode 100644 index d52aba07819be5..00000000000000 --- a/vendor/rustc-hash/CHANGELOG.md +++ /dev/null @@ -1,32 +0,0 @@ -# 2.1.1 - -- Change the internal algorithm to better accomodate large hashmaps. - This mitigates a [regression with 2.0 in rustc](https://github.com/rust-lang/rust/issues/135477). - See [PR#55](https://github.com/rust-lang/rustc-hash/pull/55) for more details on the change (this PR was not merged). - This problem might be improved with changes to hashbrown in the future. - -## 2.1.0 - -- Implement `Clone` for `FxRandomState` -- Implement `Clone` for `FxSeededState` -- Use SPDX license expression in license field - -## 2.0.0 - -- Replace hash with faster and better finalized hash. - This replaces the previous "fxhash" algorithm originating in Firefox - with a custom hasher designed and implemented by Orson Peters ([`@orlp`](https://github.com/orlp)). - It was measured to have slightly better performance for rustc, has better theoretical properties - and also includes a significantly better string hasher. -- Fix `no_std` builds - -## 1.2.0 (**YANKED**) - -**Note: This version has been yanked due to issues with the `no_std` feature!** - -- Add a `FxBuildHasher` unit struct -- Improve documentation -- Add seed API for supplying custom seeds other than 0 -- Add `FxRandomState` based on `rand` (behind the `rand` feature) for random seeds -- Make many functions `const fn` -- Implement `Clone` for `FxHasher` struct diff --git a/vendor/rustc-hash/CODE_OF_CONDUCT.md b/vendor/rustc-hash/CODE_OF_CONDUCT.md deleted file mode 100644 index d6d774281213a9..00000000000000 --- a/vendor/rustc-hash/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,3 +0,0 @@ -# The Rust Code of Conduct - -The Code of Conduct for this repository [can be found online](https://www.rust-lang.org/conduct.html). \ No newline at end of file diff --git a/vendor/rustc-hash/Cargo.lock b/vendor/rustc-hash/Cargo.lock deleted file mode 100644 index 2b1b0744884706..00000000000000 --- a/vendor/rustc-hash/Cargo.lock +++ /dev/null @@ -1,75 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 4 - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "getrandom" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "libc" -version = "0.2.153" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" - -[[package]] -name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "rustc-hash" -version = "2.1.1" -dependencies = [ - "rand", -] - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" diff --git a/vendor/rustc-hash/Cargo.toml b/vendor/rustc-hash/Cargo.toml deleted file mode 100644 index a95ba3a3fd1555..00000000000000 --- a/vendor/rustc-hash/Cargo.toml +++ /dev/null @@ -1,49 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -name = "rustc-hash" -version = "2.1.1" -authors = ["The Rust Project Developers"] -build = false -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = "A speedy, non-cryptographic hashing algorithm used by rustc" -readme = "README.md" -keywords = [ - "hash", - "hasher", - "fxhash", - "rustc", -] -license = "Apache-2.0 OR MIT" -repository = "https://github.com/rust-lang/rustc-hash" - -[features] -default = ["std"] -nightly = [] -rand = [ - "dep:rand", - "std", -] -std = [] - -[lib] -name = "rustc_hash" -path = "src/lib.rs" - -[dependencies.rand] -version = "0.8" -optional = true diff --git a/vendor/rustc-hash/LICENSE-APACHE b/vendor/rustc-hash/LICENSE-APACHE deleted file mode 100644 index a7e77cb28d386e..00000000000000 --- a/vendor/rustc-hash/LICENSE-APACHE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/vendor/rustc-hash/LICENSE-MIT b/vendor/rustc-hash/LICENSE-MIT deleted file mode 100644 index 468cd79a8f6e50..00000000000000 --- a/vendor/rustc-hash/LICENSE-MIT +++ /dev/null @@ -1,23 +0,0 @@ -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/rustc-hash/README.md b/vendor/rustc-hash/README.md deleted file mode 100644 index bcac3455ac90d6..00000000000000 --- a/vendor/rustc-hash/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# rustc-hash - -[![crates.io](https://img.shields.io/crates/v/rustc-hash.svg)](https://crates.io/crates/rustc-hash) -[![Documentation](https://docs.rs/rustc-hash/badge.svg)](https://docs.rs/rustc-hash) - -A speedy, non-cryptographic hashing algorithm used by `rustc`. -The [hash map in `std`](https://doc.rust-lang.org/std/collections/struct.HashMap.html) uses SipHash by default, which provides resistance against DOS attacks. -These attacks aren't a concern in the compiler so we prefer to use a quicker, -non-cryptographic hash algorithm. - -The original hash algorithm provided by this crate was one taken from Firefox, -hence the hasher it provides is called FxHasher. This name is kept for backwards -compatibility, but the underlying hash has since been replaced. The current -design for the hasher is a polynomial hash finished with a single bit rotation, -together with a wyhash-inspired compression function for strings/slices, both -designed by Orson Peters. - -For `rustc` we have tried many different hashing algorithms. Hashing speed is -critical, especially for single integers. Spending more CPU cycles on a higher -quality hash does not reduce hash collisions enough to make the compiler faster -on real-world benchmarks. - -## Usage - -This crate provides `FxHashMap` and `FxHashSet` as collections. -They are simply type aliases for their `std::collection` counterparts using the Fx hasher. - -```rust -use rustc_hash::FxHashMap; - -let mut map: FxHashMap<u32, u32> = FxHashMap::default(); -map.insert(22, 44); -``` - -### `no_std` - -The `std` feature is on by default to enable collections. -It can be turned off in `Cargo.toml` like so: - -```toml -rustc-hash = { version = "2.1", default-features = false } -``` diff --git a/vendor/rustc-hash/src/lib.rs b/vendor/rustc-hash/src/lib.rs deleted file mode 100644 index 03117c96c015cc..00000000000000 --- a/vendor/rustc-hash/src/lib.rs +++ /dev/null @@ -1,459 +0,0 @@ -//! A speedy, non-cryptographic hashing algorithm used by `rustc`. -//! -//! # Example -//! -//! ```rust -//! # #[cfg(feature = "std")] -//! # fn main() { -//! use rustc_hash::FxHashMap; -//! -//! let mut map: FxHashMap<u32, u32> = FxHashMap::default(); -//! map.insert(22, 44); -//! # } -//! # #[cfg(not(feature = "std"))] -//! # fn main() { } -//! ``` - -#![no_std] -#![cfg_attr(feature = "nightly", feature(hasher_prefixfree_extras))] - -#[cfg(feature = "std")] -extern crate std; - -#[cfg(feature = "rand")] -extern crate rand; - -#[cfg(feature = "rand")] -mod random_state; - -mod seeded_state; - -use core::default::Default; -use core::hash::{BuildHasher, Hasher}; -#[cfg(feature = "std")] -use std::collections::{HashMap, HashSet}; - -/// Type alias for a hash map that uses the Fx hashing algorithm. -#[cfg(feature = "std")] -pub type FxHashMap<K, V> = HashMap<K, V, FxBuildHasher>; - -/// Type alias for a hash set that uses the Fx hashing algorithm. -#[cfg(feature = "std")] -pub type FxHashSet<V> = HashSet<V, FxBuildHasher>; - -#[cfg(feature = "rand")] -pub use random_state::{FxHashMapRand, FxHashSetRand, FxRandomState}; - -pub use seeded_state::FxSeededState; -#[cfg(feature = "std")] -pub use seeded_state::{FxHashMapSeed, FxHashSetSeed}; - -/// A speedy hash algorithm for use within rustc. The hashmap in liballoc -/// by default uses SipHash which isn't quite as speedy as we want. In the -/// compiler we're not really worried about DOS attempts, so we use a fast -/// non-cryptographic hash. -/// -/// The current implementation is a fast polynomial hash with a single -/// bit rotation as a finishing step designed by Orson Peters. -#[derive(Clone)] -pub struct FxHasher { - hash: usize, -} - -// One might view a polynomial hash -// m[0] * k + m[1] * k^2 + m[2] * k^3 + ... -// as a multilinear hash with keystream k[..] -// m[0] * k[0] + m[1] * k[1] + m[2] * k[2] + ... -// where keystream k just happens to be generated using a multiplicative -// congruential pseudorandom number generator (MCG). For that reason we chose a -// constant that was found to be good for a MCG in: -// "Computationally Easy, Spectrally Good Multipliers for Congruential -// Pseudorandom Number Generators" by Guy Steele and Sebastiano Vigna. -#[cfg(target_pointer_width = "64")] -const K: usize = 0xf1357aea2e62a9c5; -#[cfg(target_pointer_width = "32")] -const K: usize = 0x93d765dd; - -impl FxHasher { - /// Creates a `fx` hasher with a given seed. - pub const fn with_seed(seed: usize) -> FxHasher { - FxHasher { hash: seed } - } - - /// Creates a default `fx` hasher. - pub const fn default() -> FxHasher { - FxHasher { hash: 0 } - } -} - -impl Default for FxHasher { - #[inline] - fn default() -> FxHasher { - Self::default() - } -} - -impl FxHasher { - #[inline] - fn add_to_hash(&mut self, i: usize) { - self.hash = self.hash.wrapping_add(i).wrapping_mul(K); - } -} - -impl Hasher for FxHasher { - #[inline] - fn write(&mut self, bytes: &[u8]) { - // Compress the byte string to a single u64 and add to our hash. - self.write_u64(hash_bytes(bytes)); - } - - #[inline] - fn write_u8(&mut self, i: u8) { - self.add_to_hash(i as usize); - } - - #[inline] - fn write_u16(&mut self, i: u16) { - self.add_to_hash(i as usize); - } - - #[inline] - fn write_u32(&mut self, i: u32) { - self.add_to_hash(i as usize); - } - - #[inline] - fn write_u64(&mut self, i: u64) { - self.add_to_hash(i as usize); - #[cfg(target_pointer_width = "32")] - self.add_to_hash((i >> 32) as usize); - } - - #[inline] - fn write_u128(&mut self, i: u128) { - self.add_to_hash(i as usize); - #[cfg(target_pointer_width = "32")] - self.add_to_hash((i >> 32) as usize); - self.add_to_hash((i >> 64) as usize); - #[cfg(target_pointer_width = "32")] - self.add_to_hash((i >> 96) as usize); - } - - #[inline] - fn write_usize(&mut self, i: usize) { - self.add_to_hash(i); - } - - #[cfg(feature = "nightly")] - #[inline] - fn write_length_prefix(&mut self, _len: usize) { - // Most cases will specialize hash_slice to call write(), which encodes - // the length already in a more efficient manner than we could here. For - // HashDoS-resistance you would still need to include this for the - // non-slice collection hashes, but for the purposes of rustc we do not - // care and do not wish to pay the performance penalty of mixing in len - // for those collections. - } - - #[cfg(feature = "nightly")] - #[inline] - fn write_str(&mut self, s: &str) { - // Similarly here, write already encodes the length, so nothing special - // is needed. - self.write(s.as_bytes()) - } - - #[inline] - fn finish(&self) -> u64 { - // Since we used a multiplicative hash our top bits have the most - // entropy (with the top bit having the most, decreasing as you go). - // As most hash table implementations (including hashbrown) compute - // the bucket index from the bottom bits we want to move bits from the - // top to the bottom. Ideally we'd rotate left by exactly the hash table - // size, but as we don't know this we'll choose 26 bits, giving decent - // entropy up until 2^26 table sizes. On 32-bit hosts we'll dial it - // back down a bit to 15 bits. - - #[cfg(target_pointer_width = "64")] - const ROTATE: u32 = 26; - #[cfg(target_pointer_width = "32")] - const ROTATE: u32 = 15; - - self.hash.rotate_left(ROTATE) as u64 - - // A bit reversal would be even better, except hashbrown also expects - // good entropy in the top 7 bits and a bit reverse would fill those - // bits with low entropy. More importantly, bit reversals are very slow - // on x86-64. A byte reversal is relatively fast, but still has a 2 - // cycle latency on x86-64 compared to the 1 cycle latency of a rotate. - // It also suffers from the hashbrown-top-7-bit-issue. - } -} - -// Nothing special, digits of pi. -const SEED1: u64 = 0x243f6a8885a308d3; -const SEED2: u64 = 0x13198a2e03707344; -const PREVENT_TRIVIAL_ZERO_COLLAPSE: u64 = 0xa4093822299f31d0; - -#[inline] -fn multiply_mix(x: u64, y: u64) -> u64 { - #[cfg(target_pointer_width = "64")] - { - // We compute the full u64 x u64 -> u128 product, this is a single mul - // instruction on x86-64, one mul plus one mulhi on ARM64. - let full = (x as u128) * (y as u128); - let lo = full as u64; - let hi = (full >> 64) as u64; - - // The middle bits of the full product fluctuate the most with small - // changes in the input. This is the top bits of lo and the bottom bits - // of hi. We can thus make the entire output fluctuate with small - // changes to the input by XOR'ing these two halves. - lo ^ hi - - // Unfortunately both 2^64 + 1 and 2^64 - 1 have small prime factors, - // otherwise combining with + or - could result in a really strong hash, as: - // x * y = 2^64 * hi + lo = (-1) * hi + lo = lo - hi, (mod 2^64 + 1) - // x * y = 2^64 * hi + lo = 1 * hi + lo = lo + hi, (mod 2^64 - 1) - // Multiplicative hashing is universal in a field (like mod p). - } - - #[cfg(target_pointer_width = "32")] - { - // u64 x u64 -> u128 product is prohibitively expensive on 32-bit. - // Decompose into 32-bit parts. - let lx = x as u32; - let ly = y as u32; - let hx = (x >> 32) as u32; - let hy = (y >> 32) as u32; - - // u32 x u32 -> u64 the low bits of one with the high bits of the other. - let afull = (lx as u64) * (hy as u64); - let bfull = (hx as u64) * (ly as u64); - - // Combine, swapping low/high of one of them so the upper bits of the - // product of one combine with the lower bits of the other. - afull ^ bfull.rotate_right(32) - } -} - -/// A wyhash-inspired non-collision-resistant hash for strings/slices designed -/// by Orson Peters, with a focus on small strings and small codesize. -/// -/// The 64-bit version of this hash passes the SMHasher3 test suite on the full -/// 64-bit output, that is, f(hash_bytes(b) ^ f(seed)) for some good avalanching -/// permutation f() passed all tests with zero failures. When using the 32-bit -/// version of multiply_mix this hash has a few non-catastrophic failures where -/// there are a handful more collisions than an optimal hash would give. -/// -/// We don't bother avalanching here as we'll feed this hash into a -/// multiplication after which we take the high bits, which avalanches for us. -#[inline] -fn hash_bytes(bytes: &[u8]) -> u64 { - let len = bytes.len(); - let mut s0 = SEED1; - let mut s1 = SEED2; - - if len <= 16 { - // XOR the input into s0, s1. - if len >= 8 { - s0 ^= u64::from_le_bytes(bytes[0..8].try_into().unwrap()); - s1 ^= u64::from_le_bytes(bytes[len - 8..].try_into().unwrap()); - } else if len >= 4 { - s0 ^= u32::from_le_bytes(bytes[0..4].try_into().unwrap()) as u64; - s1 ^= u32::from_le_bytes(bytes[len - 4..].try_into().unwrap()) as u64; - } else if len > 0 { - let lo = bytes[0]; - let mid = bytes[len / 2]; - let hi = bytes[len - 1]; - s0 ^= lo as u64; - s1 ^= ((hi as u64) << 8) | mid as u64; - } - } else { - // Handle bulk (can partially overlap with suffix). - let mut off = 0; - while off < len - 16 { - let x = u64::from_le_bytes(bytes[off..off + 8].try_into().unwrap()); - let y = u64::from_le_bytes(bytes[off + 8..off + 16].try_into().unwrap()); - - // Replace s1 with a mix of s0, x, and y, and s0 with s1. - // This ensures the compiler can unroll this loop into two - // independent streams, one operating on s0, the other on s1. - // - // Since zeroes are a common input we prevent an immediate trivial - // collapse of the hash function by XOR'ing a constant with y. - let t = multiply_mix(s0 ^ x, PREVENT_TRIVIAL_ZERO_COLLAPSE ^ y); - s0 = s1; - s1 = t; - off += 16; - } - - let suffix = &bytes[len - 16..]; - s0 ^= u64::from_le_bytes(suffix[0..8].try_into().unwrap()); - s1 ^= u64::from_le_bytes(suffix[8..16].try_into().unwrap()); - } - - multiply_mix(s0, s1) ^ (len as u64) -} - -/// An implementation of [`BuildHasher`] that produces [`FxHasher`]s. -/// -/// ``` -/// use std::hash::BuildHasher; -/// use rustc_hash::FxBuildHasher; -/// assert_ne!(FxBuildHasher.hash_one(1), FxBuildHasher.hash_one(2)); -/// ``` -#[derive(Copy, Clone, Default)] -pub struct FxBuildHasher; - -impl BuildHasher for FxBuildHasher { - type Hasher = FxHasher; - fn build_hasher(&self) -> FxHasher { - FxHasher::default() - } -} - -#[cfg(test)] -mod tests { - #[cfg(not(any(target_pointer_width = "64", target_pointer_width = "32")))] - compile_error!("The test suite only supports 64 bit and 32 bit usize"); - - use crate::{FxBuildHasher, FxHasher}; - use core::hash::{BuildHasher, Hash, Hasher}; - - macro_rules! test_hash { - ( - $( - hash($value:expr) == $result:expr, - )* - ) => { - $( - assert_eq!(FxBuildHasher.hash_one($value), $result); - )* - }; - } - - const B32: bool = cfg!(target_pointer_width = "32"); - - #[test] - fn unsigned() { - test_hash! { - hash(0_u8) == 0, - hash(1_u8) == if B32 { 3001993707 } else { 12157901119326311915 }, - hash(100_u8) == if B32 { 3844759569 } else { 16751747135202103309 }, - hash(u8::MAX) == if B32 { 999399879 } else { 1211781028898739645 }, - - hash(0_u16) == 0, - hash(1_u16) == if B32 { 3001993707 } else { 12157901119326311915 }, - hash(100_u16) == if B32 { 3844759569 } else { 16751747135202103309 }, - hash(u16::MAX) == if B32 { 3440503042 } else { 16279819243059860173 }, - - hash(0_u32) == 0, - hash(1_u32) == if B32 { 3001993707 } else { 12157901119326311915 }, - hash(100_u32) == if B32 { 3844759569 } else { 16751747135202103309 }, - hash(u32::MAX) == if B32 { 1293006356 } else { 7729994835221066939 }, - - hash(0_u64) == 0, - hash(1_u64) == if B32 { 275023839 } else { 12157901119326311915 }, - hash(100_u64) == if B32 { 1732383522 } else { 16751747135202103309 }, - hash(u64::MAX) == if B32 { 1017982517 } else { 6288842954450348564 }, - - hash(0_u128) == 0, - hash(1_u128) == if B32 { 1860738631 } else { 13032756267696824044 }, - hash(100_u128) == if B32 { 1389515751 } else { 12003541609544029302 }, - hash(u128::MAX) == if B32 { 2156022013 } else { 11702830760530184999 }, - - hash(0_usize) == 0, - hash(1_usize) == if B32 { 3001993707 } else { 12157901119326311915 }, - hash(100_usize) == if B32 { 3844759569 } else { 16751747135202103309 }, - hash(usize::MAX) == if B32 { 1293006356 } else { 6288842954450348564 }, - } - } - - #[test] - fn signed() { - test_hash! { - hash(i8::MIN) == if B32 { 2000713177 } else { 6684841074112525780 }, - hash(0_i8) == 0, - hash(1_i8) == if B32 { 3001993707 } else { 12157901119326311915 }, - hash(100_i8) == if B32 { 3844759569 } else { 16751747135202103309 }, - hash(i8::MAX) == if B32 { 3293686765 } else { 12973684028562874344 }, - - hash(i16::MIN) == if B32 { 1073764727 } else { 14218860181193086044 }, - hash(0_i16) == 0, - hash(1_i16) == if B32 { 3001993707 } else { 12157901119326311915 }, - hash(100_i16) == if B32 { 3844759569 } else { 16751747135202103309 }, - hash(i16::MAX) == if B32 { 2366738315 } else { 2060959061933882993 }, - - hash(i32::MIN) == if B32 { 16384 } else { 9943947977240134995 }, - hash(0_i32) == 0, - hash(1_i32) == if B32 { 3001993707 } else { 12157901119326311915 }, - hash(100_i32) == if B32 { 3844759569 } else { 16751747135202103309 }, - hash(i32::MAX) == if B32 { 1293022740 } else { 16232790931690483559 }, - - hash(i64::MIN) == if B32 { 16384 } else { 33554432 }, - hash(0_i64) == 0, - hash(1_i64) == if B32 { 275023839 } else { 12157901119326311915 }, - hash(100_i64) == if B32 { 1732383522 } else { 16751747135202103309 }, - hash(i64::MAX) == if B32 { 1017998901 } else { 6288842954483902996 }, - - hash(i128::MIN) == if B32 { 16384 } else { 33554432 }, - hash(0_i128) == 0, - hash(1_i128) == if B32 { 1860738631 } else { 13032756267696824044 }, - hash(100_i128) == if B32 { 1389515751 } else { 12003541609544029302 }, - hash(i128::MAX) == if B32 { 2156005629 } else { 11702830760496630567 }, - - hash(isize::MIN) == if B32 { 16384 } else { 33554432 }, - hash(0_isize) == 0, - hash(1_isize) == if B32 { 3001993707 } else { 12157901119326311915 }, - hash(100_isize) == if B32 { 3844759569 } else { 16751747135202103309 }, - hash(isize::MAX) == if B32 { 1293022740 } else { 6288842954483902996 }, - } - } - - // Avoid relying on any `Hash` implementations in the standard library. - struct HashBytes(&'static [u8]); - impl Hash for HashBytes { - fn hash<H: core::hash::Hasher>(&self, state: &mut H) { - state.write(self.0); - } - } - - #[test] - fn bytes() { - test_hash! { - hash(HashBytes(&[])) == if B32 { 2673204745 } else { 17606491139363777937 }, - hash(HashBytes(&[0])) == if B32 { 2948228584 } else { 5448590020104574886 }, - hash(HashBytes(&[0, 0, 0, 0, 0, 0])) == if B32 { 3223252423 } else { 16766921560080789783 }, - hash(HashBytes(&[1])) == if B32 { 2943445104 } else { 5922447956811044110 }, - hash(HashBytes(&[2])) == if B32 { 1055423297 } else { 5229781508510959783 }, - hash(HashBytes(b"uwu")) == if B32 { 2699662140 } else { 7168164714682931527 }, - hash(HashBytes(b"These are some bytes for testing rustc_hash.")) == if B32 { 2303640537 } else { 2349210501944688211 }, - } - } - - #[test] - fn with_seed_actually_different() { - let seeds = [ - [1, 2], - [42, 17], - [124436707, 99237], - [usize::MIN, usize::MAX], - ]; - - for [a_seed, b_seed] in seeds { - let a = || FxHasher::with_seed(a_seed); - let b = || FxHasher::with_seed(b_seed); - - for x in u8::MIN..=u8::MAX { - let mut a = a(); - let mut b = b(); - - x.hash(&mut a); - x.hash(&mut b); - - assert_ne!(a.finish(), b.finish()) - } - } - } -} diff --git a/vendor/rustc-hash/src/random_state.rs b/vendor/rustc-hash/src/random_state.rs deleted file mode 100644 index c8c35a0b1a4da9..00000000000000 --- a/vendor/rustc-hash/src/random_state.rs +++ /dev/null @@ -1,101 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use crate::FxHasher; - -/// Type alias for a hashmap using the `fx` hash algorithm with [`FxRandomState`]. -pub type FxHashMapRand<K, V> = HashMap<K, V, FxRandomState>; - -/// Type alias for a hashmap using the `fx` hash algorithm with [`FxRandomState`]. -pub type FxHashSetRand<V> = HashSet<V, FxRandomState>; - -/// `FxRandomState` is an alternative state for `HashMap` types. -/// -/// A particular instance `FxRandomState` will create the same instances of -/// [`Hasher`], but the hashers created by two different `FxRandomState` -/// instances are unlikely to produce the same result for the same values. -#[derive(Clone)] -pub struct FxRandomState { - seed: usize, -} - -impl FxRandomState { - /// Constructs a new `FxRandomState` that is initialized with random seed. - pub fn new() -> FxRandomState { - use rand::Rng; - use std::{cell::Cell, thread_local}; - - // This mirrors what `std::collections::hash_map::RandomState` does, as of 2024-01-14. - // - // Basically - // 1. Cache result of the rng in a thread local, so repeatedly - // creating maps is cheaper - // 2. Change the cached result on every creation, so maps created - // on the same thread don't have the same iteration order - thread_local!(static SEED: Cell<usize> = { - Cell::new(rand::thread_rng().gen()) - }); - - SEED.with(|seed| { - let s = seed.get(); - seed.set(s.wrapping_add(1)); - FxRandomState { seed: s } - }) - } -} - -impl core::hash::BuildHasher for FxRandomState { - type Hasher = FxHasher; - - fn build_hasher(&self) -> Self::Hasher { - FxHasher::with_seed(self.seed) - } -} - -impl Default for FxRandomState { - fn default() -> Self { - Self::new() - } -} - -#[cfg(test)] -mod tests { - use std::thread; - - use crate::FxHashMapRand; - - #[test] - fn cloned_random_states_are_equal() { - let a = FxHashMapRand::<&str, u32>::default(); - let b = a.clone(); - - assert_eq!(a.hasher().seed, b.hasher().seed); - } - - #[test] - fn random_states_are_different() { - let a = FxHashMapRand::<&str, u32>::default(); - let b = FxHashMapRand::<&str, u32>::default(); - - // That's the whole point of them being random! - // - // N.B.: `FxRandomState` uses a thread-local set to a random value and then incremented, - // which means that this is *guaranteed* to pass :> - assert_ne!(a.hasher().seed, b.hasher().seed); - } - - #[test] - fn random_states_are_different_cross_thread() { - // This is similar to the test above, but uses two different threads, so they both get - // completely random, unrelated values. - // - // This means that this test is technically flaky, but the probability of it failing is - // `1 / 2.pow(bit_size_of::<usize>())`. Or 1/1.7e19 for 64 bit platforms or 1/4294967295 - // for 32 bit platforms. I suppose this is acceptable. - let a = FxHashMapRand::<&str, u32>::default(); - let b = thread::spawn(|| FxHashMapRand::<&str, u32>::default()) - .join() - .unwrap(); - - assert_ne!(a.hasher().seed, b.hasher().seed); - } -} diff --git a/vendor/rustc-hash/src/seeded_state.rs b/vendor/rustc-hash/src/seeded_state.rs deleted file mode 100644 index e84190625939e8..00000000000000 --- a/vendor/rustc-hash/src/seeded_state.rs +++ /dev/null @@ -1,76 +0,0 @@ -use crate::FxHasher; - -/// Type alias for a hashmap using the `fx` hash algorithm with [`FxSeededState`]. -#[cfg(feature = "std")] -pub type FxHashMapSeed<K, V> = std::collections::HashMap<K, V, FxSeededState>; - -/// Type alias for a hashmap using the `fx` hash algorithm with [`FxSeededState`]. -#[cfg(feature = "std")] -pub type FxHashSetSeed<V> = std::collections::HashSet<V, FxSeededState>; - -/// [`FxSeededState`] is an alternative state for `HashMap` types, allowing to use [`FxHasher`] with a set seed. -/// -/// ``` -/// # use std::collections::HashMap; -/// use rustc_hash::FxSeededState; -/// -/// let mut map = HashMap::with_hasher(FxSeededState::with_seed(12)); -/// map.insert(15, 610); -/// assert_eq!(map[&15], 610); -/// ``` -#[derive(Clone)] -pub struct FxSeededState { - seed: usize, -} - -impl FxSeededState { - /// Constructs a new `FxSeededState` that is initialized with a `seed`. - pub const fn with_seed(seed: usize) -> FxSeededState { - Self { seed } - } -} - -impl core::hash::BuildHasher for FxSeededState { - type Hasher = FxHasher; - - fn build_hasher(&self) -> Self::Hasher { - FxHasher::with_seed(self.seed) - } -} - -#[cfg(test)] -mod tests { - use core::hash::BuildHasher; - - use crate::FxSeededState; - - #[test] - fn cloned_seeded_states_are_equal() { - let seed = 2; - let a = FxSeededState::with_seed(seed); - let b = a.clone(); - - assert_eq!(a.seed, b.seed); - assert_eq!(a.seed, seed); - - assert_eq!(a.build_hasher().hash, b.build_hasher().hash); - } - - #[test] - fn same_seed_produces_same_hasher() { - let seed = 1; - let a = FxSeededState::with_seed(seed); - let b = FxSeededState::with_seed(seed); - - // The hashers should be the same, as they have the same seed. - assert_eq!(a.build_hasher().hash, b.build_hasher().hash); - } - - #[test] - fn different_states_are_different() { - let a = FxSeededState::with_seed(1); - let b = FxSeededState::with_seed(2); - - assert_ne!(a.build_hasher().hash, b.build_hasher().hash); - } -} diff --git a/vendor/shlex/.cargo-checksum.json b/vendor/shlex/.cargo-checksum.json deleted file mode 100644 index 6a97828db78816..00000000000000 --- a/vendor/shlex/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"5ba6bffcbe6628331b4b8ae50936c44d6260de2913c83f55e0c19fce366af72c",".github/workflows/test.yml":"cca05e7dbd8b0c65f5c8f5a30fb76fd11aaaaf9a3216b5d5b0ea387b3d94fffa","CHANGELOG.md":"879a16b3fef6fb3251fcac516fe73414109e3b7df5eb2ec4863a7551674038a0","Cargo.toml":"d7eb8c4bce681b4dd1dfc2c98c649754390775f38f4796d491948ddbb53aa2ef","Cargo.toml.orig":"aba3cfcd4981d79feac94eb673bcdd0754962edc0e2a0ce81a13d5285c5a3f3d","LICENSE-APACHE":"553fffcd9b1cb158bc3e9edc35da85ca5c3b3d7d2e61c883ebcfa8a65814b583","LICENSE-MIT":"4455bf75a91154108304cb283e0fea9948c14f13e20d60887cf2552449dea3b1","README.md":"082e505bba5dffc5904af5602b45d01129173e617db62c81e6c11d71c964ea71","src/bytes.rs":"eadfffcdb7846d341ba451d6118d275b9d0f14a9554984ccfcdbe9a8d77ec5ee","src/lib.rs":"44c8fb929e1443f2446d26025a9bcfca0b329811bbc309b4a6afb8ec17d7de8d","src/quoting_warning.md":"566d6509211ddcd4afbd4f1117c5234567f6b6d01f5da60acfaef011362be045"},"package":"0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"} \ No newline at end of file diff --git a/vendor/shlex/.cargo_vcs_info.json b/vendor/shlex/.cargo_vcs_info.json deleted file mode 100644 index efa0c6e18d1233..00000000000000 --- a/vendor/shlex/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "4a0724b0b62ef715467875b040a890ce75a8a829" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/shlex/.github/workflows/test.yml b/vendor/shlex/.github/workflows/test.yml deleted file mode 100644 index 7f299916bc6db5..00000000000000 --- a/vendor/shlex/.github/workflows/test.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Rust - -on: - pull_request: - push: - -jobs: - check: - name: Check - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: ATiltedTree/setup-rust@v1 - with: - rust-version: stable - - run: cargo check - - test: - name: Test - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: ATiltedTree/setup-rust@v1 - with: - rust-version: stable - - run: cargo test - - test_no_default_features: - name: Test (no default features) - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: ATiltedTree/setup-rust@v1 - with: - rust-version: stable - - run: cargo test --no-default-features diff --git a/vendor/shlex/CHANGELOG.md b/vendor/shlex/CHANGELOG.md deleted file mode 100644 index 95552b430bb65b..00000000000000 --- a/vendor/shlex/CHANGELOG.md +++ /dev/null @@ -1,21 +0,0 @@ -# 1.2.0 - -* Adds `bytes` module to support operating directly on byte strings. - -# 1.1.0 - -* Adds the `std` feature (enabled by default) -* Disabling the `std` feature makes the crate work in `#![no_std]` mode, assuming presence of the `alloc` crate - -# 1.0.0 - -* Adds the `join` convenience function. -* Fixes parsing of `'\\n'` to match the behavior of bash/Zsh/Python `shlex`. The result was previously `\n`, now it is `\\n`. - -# 0.1.1 - -* Adds handling of `#` comments. - -# 0.1.0 - -This is the initial release. diff --git a/vendor/shlex/Cargo.toml b/vendor/shlex/Cargo.toml deleted file mode 100644 index 2b668928d63fd3..00000000000000 --- a/vendor/shlex/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -rust-version = "1.46.0" -name = "shlex" -version = "1.3.0" -authors = [ - "comex <comexk@gmail.com>", - "Fenhl <fenhl@fenhl.net>", - "Adrian Taylor <adetaylor@chromium.org>", - "Alex Touchet <alextouchet@outlook.com>", - "Daniel Parks <dp+git@oxidized.org>", - "Garrett Berg <googberg@gmail.com>", -] -description = "Split a string into shell words, like Python's shlex." -readme = "README.md" -categories = [ - "command-line-interface", - "parser-implementations", -] -license = "MIT OR Apache-2.0" -repository = "https://github.com/comex/rust-shlex" - -[features] -default = ["std"] -std = [] diff --git a/vendor/shlex/LICENSE-APACHE b/vendor/shlex/LICENSE-APACHE deleted file mode 100644 index 37465048a6f63d..00000000000000 --- a/vendor/shlex/LICENSE-APACHE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2015 Nicholas Allegra (comex). - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/shlex/LICENSE-MIT b/vendor/shlex/LICENSE-MIT deleted file mode 100644 index 5ec1fe1cd795fa..00000000000000 --- a/vendor/shlex/LICENSE-MIT +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Nicholas Allegra (comex). - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/shlex/README.md b/vendor/shlex/README.md deleted file mode 100644 index 6400a6f75a915e..00000000000000 --- a/vendor/shlex/README.md +++ /dev/null @@ -1,39 +0,0 @@ -[![ci badge]][ci link] [![crates.io badge]][crates.io link] [![docs.rs badge]][docs.rs link] - -[crates.io badge]: https://img.shields.io/crates/v/shlex.svg?style=flat-square -[crates.io link]: https://crates.io/crates/shlex -[docs.rs badge]: https://img.shields.io/badge/docs-online-dddddd.svg?style=flat-square -[docs.rs link]: https://docs.rs/shlex -[ci badge]: https://img.shields.io/github/actions/workflow/status/comex/rust-shlex/test.yml?branch=master&style=flat-square -[ci link]: https://github.com/comex/rust-shlex/actions - -Same idea as (but implementation not directly based on) the Python shlex -module. However, this implementation does not support any of the Python -module's customization because it makes parsing slower and is fairly useless. -You only get the default settings of shlex.split, which mimic the POSIX shell: -<https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html> - -This implementation also deviates from the Python version in not treating \r -specially, which I believe is more compliant. - -This crate can be used on either normal Rust strings, or on byte strings with -the `bytes` module. The algorithms used are oblivious to UTF-8 high bytes, so -internally they all work on bytes directly as a micro-optimization. - -Disabling the `std` feature (which is enabled by default) will allow the crate -to work in `no_std` environments, where the `alloc` crate, and a global -allocator, are available. - -# LICENSE - -The source code in this repository is Licensed under either of -- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or - https://www.apache.org/licenses/LICENSE-2.0) -- MIT license ([LICENSE-MIT](LICENSE-MIT) or - https://opensource.org/licenses/MIT) - -at your option. - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. diff --git a/vendor/shlex/src/bytes.rs b/vendor/shlex/src/bytes.rs deleted file mode 100644 index af8daad0d33c18..00000000000000 --- a/vendor/shlex/src/bytes.rs +++ /dev/null @@ -1,576 +0,0 @@ -// Copyright 2015 Nicholas Allegra (comex). -// Licensed under the Apache License, Version 2.0 <https://www.apache.org/licenses/LICENSE-2.0> or -// the MIT license <https://opensource.org/licenses/MIT>, at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -//! [`Shlex`] and friends for byte strings. -//! -//! This is used internally by the [outer module](crate), and may be more -//! convenient if you are working with byte slices (`[u8]`) or types that are -//! wrappers around bytes, such as [`OsStr`](std::ffi::OsStr): -//! -//! ```rust -//! #[cfg(unix)] { -//! use shlex::bytes::quote; -//! use std::ffi::OsStr; -//! use std::os::unix::ffi::OsStrExt; -//! -//! // `\x80` is invalid in UTF-8. -//! let os_str = OsStr::from_bytes(b"a\x80b c"); -//! assert_eq!(quote(os_str.as_bytes()), &b"'a\x80b c'"[..]); -//! } -//! ``` -//! -//! (On Windows, `OsStr` uses 16 bit wide characters so this will not work.) - -extern crate alloc; -use alloc::vec::Vec; -use alloc::borrow::Cow; -#[cfg(test)] -use alloc::vec; -#[cfg(test)] -use alloc::borrow::ToOwned; -#[cfg(all(doc, not(doctest)))] -use crate::{self as shlex, quoting_warning}; - -use super::QuoteError; - -/// An iterator that takes an input byte string and splits it into the words using the same syntax as -/// the POSIX shell. -pub struct Shlex<'a> { - in_iter: core::slice::Iter<'a, u8>, - /// The number of newlines read so far, plus one. - pub line_no: usize, - /// An input string is erroneous if it ends while inside a quotation or right after an - /// unescaped backslash. Since Iterator does not have a mechanism to return an error, if that - /// happens, Shlex just throws out the last token, ends the iteration, and sets 'had_error' to - /// true; best to check it after you're done iterating. - pub had_error: bool, -} - -impl<'a> Shlex<'a> { - pub fn new(in_bytes: &'a [u8]) -> Self { - Shlex { - in_iter: in_bytes.iter(), - line_no: 1, - had_error: false, - } - } - - fn parse_word(&mut self, mut ch: u8) -> Option<Vec<u8>> { - let mut result: Vec<u8> = Vec::new(); - loop { - match ch as char { - '"' => if let Err(()) = self.parse_double(&mut result) { - self.had_error = true; - return None; - }, - '\'' => if let Err(()) = self.parse_single(&mut result) { - self.had_error = true; - return None; - }, - '\\' => if let Some(ch2) = self.next_char() { - if ch2 != '\n' as u8 { result.push(ch2); } - } else { - self.had_error = true; - return None; - }, - ' ' | '\t' | '\n' => { break; }, - _ => { result.push(ch as u8); }, - } - if let Some(ch2) = self.next_char() { ch = ch2; } else { break; } - } - Some(result) - } - - fn parse_double(&mut self, result: &mut Vec<u8>) -> Result<(), ()> { - loop { - if let Some(ch2) = self.next_char() { - match ch2 as char { - '\\' => { - if let Some(ch3) = self.next_char() { - match ch3 as char { - // \$ => $ - '$' | '`' | '"' | '\\' => { result.push(ch3); }, - // \<newline> => nothing - '\n' => {}, - // \x => =x - _ => { result.push('\\' as u8); result.push(ch3); } - } - } else { - return Err(()); - } - }, - '"' => { return Ok(()); }, - _ => { result.push(ch2); }, - } - } else { - return Err(()); - } - } - } - - fn parse_single(&mut self, result: &mut Vec<u8>) -> Result<(), ()> { - loop { - if let Some(ch2) = self.next_char() { - match ch2 as char { - '\'' => { return Ok(()); }, - _ => { result.push(ch2); }, - } - } else { - return Err(()); - } - } - } - - fn next_char(&mut self) -> Option<u8> { - let res = self.in_iter.next().copied(); - if res == Some(b'\n') { self.line_no += 1; } - res - } -} - -impl<'a> Iterator for Shlex<'a> { - type Item = Vec<u8>; - fn next(&mut self) -> Option<Self::Item> { - if let Some(mut ch) = self.next_char() { - // skip initial whitespace - loop { - match ch as char { - ' ' | '\t' | '\n' => {}, - '#' => { - while let Some(ch2) = self.next_char() { - if ch2 as char == '\n' { break; } - } - }, - _ => { break; } - } - if let Some(ch2) = self.next_char() { ch = ch2; } else { return None; } - } - self.parse_word(ch) - } else { // no initial character - None - } - } - -} - -/// Convenience function that consumes the whole byte string at once. Returns None if the input was -/// erroneous. -pub fn split(in_bytes: &[u8]) -> Option<Vec<Vec<u8>>> { - let mut shl = Shlex::new(in_bytes); - let res = shl.by_ref().collect(); - if shl.had_error { None } else { Some(res) } -} - -/// A more configurable interface to quote strings. If you only want the default settings you can -/// use the convenience functions [`try_quote`] and [`try_join`]. -/// -/// The string equivalent is [`shlex::Quoter`]. -#[derive(Default, Debug, Clone)] -pub struct Quoter { - allow_nul: bool, - // TODO: more options -} - -impl Quoter { - /// Create a new [`Quoter`] with default settings. - #[inline] - pub fn new() -> Self { - Self::default() - } - - /// Set whether to allow [nul bytes](quoting_warning#nul-bytes). By default they are not - /// allowed and will result in an error of [`QuoteError::Nul`]. - #[inline] - pub fn allow_nul(mut self, allow: bool) -> Self { - self.allow_nul = allow; - self - } - - /// Convenience function that consumes an iterable of words and turns it into a single byte string, - /// quoting words when necessary. Consecutive words will be separated by a single space. - pub fn join<'a, I: IntoIterator<Item = &'a [u8]>>(&self, words: I) -> Result<Vec<u8>, QuoteError> { - Ok(words.into_iter() - .map(|word| self.quote(word)) - .collect::<Result<Vec<Cow<[u8]>>, QuoteError>>()? - .join(&b' ')) - } - - /// Given a single word, return a byte string suitable to encode it as a shell argument. - /// - /// If given valid UTF-8, this will never produce invalid UTF-8. This is because it only - /// ever inserts valid ASCII characters before or after existing ASCII characters (or - /// returns two single quotes if the input was an empty string). It will never modify a - /// multibyte UTF-8 character. - pub fn quote<'a>(&self, mut in_bytes: &'a [u8]) -> Result<Cow<'a, [u8]>, QuoteError> { - if in_bytes.is_empty() { - // Empty string. Special case that isn't meaningful as only part of a word. - return Ok(b"''"[..].into()); - } - if !self.allow_nul && in_bytes.iter().any(|&b| b == b'\0') { - return Err(QuoteError::Nul); - } - let mut out: Vec<u8> = Vec::new(); - while !in_bytes.is_empty() { - // Pick a quoting strategy for some prefix of the input. Normally this will cover the - // entire input, but in some case we might need to divide the input into multiple chunks - // that are quoted differently. - let (cur_len, strategy) = quoting_strategy(in_bytes); - if cur_len == in_bytes.len() && strategy == QuotingStrategy::Unquoted && out.is_empty() { - // Entire string can be represented unquoted. Reuse the allocation. - return Ok(in_bytes.into()); - } - let (cur_chunk, rest) = in_bytes.split_at(cur_len); - assert!(rest.len() < in_bytes.len()); // no infinite loop - in_bytes = rest; - append_quoted_chunk(&mut out, cur_chunk, strategy); - } - Ok(out.into()) - } - -} - -#[derive(PartialEq)] -enum QuotingStrategy { - /// No quotes and no backslash escapes. (If backslash escapes would be necessary, we use a - /// different strategy instead.) - Unquoted, - /// Single quoted. - SingleQuoted, - /// Double quotes, potentially with backslash escapes. - DoubleQuoted, - // TODO: add $'xxx' and "$(printf 'xxx')" styles -} - -/// Is this ASCII byte okay to emit unquoted? -const fn unquoted_ok(c: u8) -> bool { - match c as char { - // Allowed characters: - '+' | '-' | '.' | '/' | ':' | '@' | ']' | '_' | - '0'..='9' | 'A'..='Z' | 'a'..='z' - => true, - - // Non-allowed characters: - // From POSIX https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html - // "The application shall quote the following characters if they are to represent themselves:" - '|' | '&' | ';' | '<' | '>' | '(' | ')' | '$' | '`' | '\\' | '"' | '\'' | ' ' | '\t' | '\n' | - // "and the following may need to be quoted under certain circumstances[..]:" - '*' | '?' | '[' | '#' | '~' | '=' | '%' | - // Brace expansion. These ought to be in the POSIX list but aren't yet; - // see: https://www.austingroupbugs.net/view.php?id=1193 - '{' | '}' | - // Also quote comma, just to be safe in the extremely odd case that the user of this crate - // is intentionally placing a quoted string inside a brace expansion, e.g.: - // format!("echo foo{{a,b,{}}}" | shlex::quote(some_str)) - ',' | - // '\r' is allowed in a word by all real shells I tested, but is treated as a word - // separator by Python `shlex` | and might be translated to '\n' in interactive mode. - '\r' | - // '!' and '^' are treated specially in interactive mode; see quoting_warning. - '!' | '^' | - // Nul bytes and control characters. - '\x00' ..= '\x1f' | '\x7f' - => false, - '\u{80}' ..= '\u{10ffff}' => { - // This is unreachable since `unquoted_ok` is only called for 0..128. - // Non-ASCII bytes are handled separately in `quoting_strategy`. - // Can't call unreachable!() from `const fn` on old Rust, so... - unquoted_ok(c) - }, - } - // Note: The logic cited above for quoting comma might suggest that `..` should also be quoted, - // it as a special case of brace expansion). But it's not necessary. There are three cases: - // - // 1. The user wants comma-based brace expansion, but the untrusted string being `quote`d - // contains `..`, so they get something like `{foo,bar,3..5}`. - // => That's safe; both Bash and Zsh expand this to `foo bar 3..5` rather than - // `foo bar 3 4 5`. The presence of commas disables sequence expression expansion. - // - // 2. The user wants comma-based brace expansion where the contents of the braces are a - // variable number of `quote`d strings and nothing else. There happens to be exactly - // one string and it contains `..`, so they get something like `{3..5}`. - // => Then this will expand as a sequence expression, which is unintended. But I don't mind, - // because any such code is already buggy. Suppose the untrusted string *didn't* contain - // `,` or `..`, resulting in shell input like `{foo}`. Then the shell would interpret it - // as the literal string `{foo}` rather than brace-expanding it into `foo`. - // - // 3. The user wants a sequence expression and wants to supply an untrusted string as one of - // the endpoints or the increment. - // => Well, that's just silly, since the endpoints can only be numbers or single letters. -} - -/// Optimized version of `unquoted_ok`. -fn unquoted_ok_fast(c: u8) -> bool { - const UNQUOTED_OK_MASK: u128 = { - // Make a mask of all bytes in 0..<0x80 that pass. - let mut c = 0u8; - let mut mask = 0u128; - while c < 0x80 { - if unquoted_ok(c) { - mask |= 1u128 << c; - } - c += 1; - } - mask - }; - ((UNQUOTED_OK_MASK >> c) & 1) != 0 -} - -/// Is this ASCII byte okay to emit in single quotes? -fn single_quoted_ok(c: u8) -> bool { - match c { - // No single quotes in single quotes. - b'\'' => false, - // To work around a Bash bug, ^ is only allowed right after an opening single quote; see - // quoting_warning. - b'^' => false, - // Backslashes in single quotes are literal according to POSIX, but Fish treats them as an - // escape character. Ban them. Fish doesn't aim to be POSIX-compatible, but we *can* - // achieve Fish compatibility using double quotes, so we might as well. - b'\\' => false, - _ => true - } -} - -/// Is this ASCII byte okay to emit in double quotes? -fn double_quoted_ok(c: u8) -> bool { - match c { - // Work around Python `shlex` bug where parsing "\`" and "\$" doesn't strip the - // backslash, even though POSIX requires it. - b'`' | b'$' => false, - // '!' and '^' are treated specially in interactive mode; see quoting_warning. - b'!' | b'^' => false, - _ => true - } -} - -/// Given an input, return a quoting strategy that can cover some prefix of the string, along with -/// the size of that prefix. -/// -/// Precondition: input size is nonzero. (Empty strings are handled by the caller.) -/// Postcondition: returned size is nonzero. -#[cfg_attr(manual_codegen_check, inline(never))] -fn quoting_strategy(in_bytes: &[u8]) -> (usize, QuotingStrategy) { - const UNQUOTED_OK: u8 = 1; - const SINGLE_QUOTED_OK: u8 = 2; - const DOUBLE_QUOTED_OK: u8 = 4; - - let mut prev_ok = SINGLE_QUOTED_OK | DOUBLE_QUOTED_OK | UNQUOTED_OK; - let mut i = 0; - - if in_bytes[0] == b'^' { - // To work around a Bash bug, ^ is only allowed right after an opening single quote; see - // quoting_warning. - prev_ok = SINGLE_QUOTED_OK; - i = 1; - } - - while i < in_bytes.len() { - let c = in_bytes[i]; - let mut cur_ok = prev_ok; - - if c >= 0x80 { - // Normally, non-ASCII characters shouldn't require quoting, but see quoting_warning.md - // about \xa0. For now, just treat all non-ASCII characters as requiring quotes. This - // also ensures things are safe in the off-chance that you're in a legacy 8-bit locale that - // has additional characters satisfying `isblank`. - cur_ok &= !UNQUOTED_OK; - } else { - if !unquoted_ok_fast(c) { - cur_ok &= !UNQUOTED_OK; - } - if !single_quoted_ok(c){ - cur_ok &= !SINGLE_QUOTED_OK; - } - if !double_quoted_ok(c) { - cur_ok &= !DOUBLE_QUOTED_OK; - } - } - - if cur_ok == 0 { - // There are no quoting strategies that would work for both the previous characters and - // this one. So we have to end the chunk before this character. The caller will call - // `quoting_strategy` again to handle the rest of the string. - break; - } - - prev_ok = cur_ok; - i += 1; - } - - // Pick the best allowed strategy. - let strategy = if prev_ok & UNQUOTED_OK != 0 { - QuotingStrategy::Unquoted - } else if prev_ok & SINGLE_QUOTED_OK != 0 { - QuotingStrategy::SingleQuoted - } else if prev_ok & DOUBLE_QUOTED_OK != 0 { - QuotingStrategy::DoubleQuoted - } else { - unreachable!() - }; - debug_assert!(i > 0); - (i, strategy) -} - -fn append_quoted_chunk(out: &mut Vec<u8>, cur_chunk: &[u8], strategy: QuotingStrategy) { - match strategy { - QuotingStrategy::Unquoted => { - out.extend_from_slice(cur_chunk); - }, - QuotingStrategy::SingleQuoted => { - out.reserve(cur_chunk.len() + 2); - out.push(b'\''); - out.extend_from_slice(cur_chunk); - out.push(b'\''); - }, - QuotingStrategy::DoubleQuoted => { - out.reserve(cur_chunk.len() + 2); - out.push(b'"'); - for &c in cur_chunk.into_iter() { - if let b'$' | b'`' | b'"' | b'\\' = c { - // Add a preceding backslash. - // Note: We shouldn't actually get here for $ and ` because they don't pass - // `double_quoted_ok`. - out.push(b'\\'); - } - // Add the character itself. - out.push(c); - } - out.push(b'"'); - }, - } -} - -/// Convenience function that consumes an iterable of words and turns it into a single byte string, -/// quoting words when necessary. Consecutive words will be separated by a single space. -/// -/// Uses default settings except that nul bytes are passed through, which [may be -/// dangerous](quoting_warning#nul-bytes), leading to this function being deprecated. -/// -/// Equivalent to [`Quoter::new().allow_nul(true).join(words).unwrap()`](Quoter). -/// -/// (That configuration never returns `Err`, so this function does not panic.) -/// -/// The string equivalent is [shlex::join]. -#[deprecated(since = "1.3.0", note = "replace with `try_join(words)?` to avoid nul byte danger")] -pub fn join<'a, I: IntoIterator<Item = &'a [u8]>>(words: I) -> Vec<u8> { - Quoter::new().allow_nul(true).join(words).unwrap() -} - -/// Convenience function that consumes an iterable of words and turns it into a single byte string, -/// quoting words when necessary. Consecutive words will be separated by a single space. -/// -/// Uses default settings. The only error that can be returned is [`QuoteError::Nul`]. -/// -/// Equivalent to [`Quoter::new().join(words)`](Quoter). -/// -/// The string equivalent is [shlex::try_join]. -pub fn try_join<'a, I: IntoIterator<Item = &'a [u8]>>(words: I) -> Result<Vec<u8>, QuoteError> { - Quoter::new().join(words) -} - -/// Given a single word, return a string suitable to encode it as a shell argument. -/// -/// Uses default settings except that nul bytes are passed through, which [may be -/// dangerous](quoting_warning#nul-bytes), leading to this function being deprecated. -/// -/// Equivalent to [`Quoter::new().allow_nul(true).quote(in_bytes).unwrap()`](Quoter). -/// -/// (That configuration never returns `Err`, so this function does not panic.) -/// -/// The string equivalent is [shlex::quote]. -#[deprecated(since = "1.3.0", note = "replace with `try_quote(str)?` to avoid nul byte danger")] -pub fn quote(in_bytes: &[u8]) -> Cow<[u8]> { - Quoter::new().allow_nul(true).quote(in_bytes).unwrap() -} - -/// Given a single word, return a string suitable to encode it as a shell argument. -/// -/// Uses default settings. The only error that can be returned is [`QuoteError::Nul`]. -/// -/// Equivalent to [`Quoter::new().quote(in_bytes)`](Quoter). -/// -/// (That configuration never returns `Err`, so this function does not panic.) -/// -/// The string equivalent is [shlex::try_quote]. -pub fn try_quote(in_bytes: &[u8]) -> Result<Cow<[u8]>, QuoteError> { - Quoter::new().quote(in_bytes) -} - -#[cfg(test)] -const INVALID_UTF8: &[u8] = b"\xa1"; -#[cfg(test)] -const INVALID_UTF8_SINGLEQUOTED: &[u8] = b"'\xa1'"; - -#[test] -#[allow(invalid_from_utf8)] -fn test_invalid_utf8() { - // Check that our test string is actually invalid UTF-8. - assert!(core::str::from_utf8(INVALID_UTF8).is_err()); -} - -#[cfg(test)] -static SPLIT_TEST_ITEMS: &'static [(&'static [u8], Option<&'static [&'static [u8]]>)] = &[ - (b"foo$baz", Some(&[b"foo$baz"])), - (b"foo baz", Some(&[b"foo", b"baz"])), - (b"foo\"bar\"baz", Some(&[b"foobarbaz"])), - (b"foo \"bar\"baz", Some(&[b"foo", b"barbaz"])), - (b" foo \nbar", Some(&[b"foo", b"bar"])), - (b"foo\\\nbar", Some(&[b"foobar"])), - (b"\"foo\\\nbar\"", Some(&[b"foobar"])), - (b"'baz\\$b'", Some(&[b"baz\\$b"])), - (b"'baz\\\''", None), - (b"\\", None), - (b"\"\\", None), - (b"'\\", None), - (b"\"", None), - (b"'", None), - (b"foo #bar\nbaz", Some(&[b"foo", b"baz"])), - (b"foo #bar", Some(&[b"foo"])), - (b"foo#bar", Some(&[b"foo#bar"])), - (b"foo\"#bar", None), - (b"'\\n'", Some(&[b"\\n"])), - (b"'\\\\n'", Some(&[b"\\\\n"])), - (INVALID_UTF8, Some(&[INVALID_UTF8])), -]; - -#[test] -fn test_split() { - for &(input, output) in SPLIT_TEST_ITEMS { - assert_eq!(split(input), output.map(|o| o.iter().map(|&x| x.to_owned()).collect())); - } -} - -#[test] -fn test_lineno() { - let mut sh = Shlex::new(b"\nfoo\nbar"); - while let Some(word) = sh.next() { - if word == b"bar" { - assert_eq!(sh.line_no, 3); - } - } -} - -#[test] -#[allow(deprecated)] -fn test_quote() { - // Validate behavior with invalid UTF-8: - assert_eq!(quote(INVALID_UTF8), INVALID_UTF8_SINGLEQUOTED); - // Replicate a few tests from lib.rs. No need to replicate all of them. - assert_eq!(quote(b""), &b"''"[..]); - assert_eq!(quote(b"foobar"), &b"foobar"[..]); - assert_eq!(quote(b"foo bar"), &b"'foo bar'"[..]); - assert_eq!(quote(b"'\""), &b"\"'\\\"\""[..]); - assert_eq!(quote(b""), &b"''"[..]); -} - -#[test] -#[allow(deprecated)] -fn test_join() { - // Validate behavior with invalid UTF-8: - assert_eq!(join(vec![INVALID_UTF8]), INVALID_UTF8_SINGLEQUOTED); - // Replicate a few tests from lib.rs. No need to replicate all of them. - assert_eq!(join(vec![]), &b""[..]); - assert_eq!(join(vec![&b""[..]]), b"''"); -} diff --git a/vendor/shlex/src/lib.rs b/vendor/shlex/src/lib.rs deleted file mode 100644 index aa5c3067af82eb..00000000000000 --- a/vendor/shlex/src/lib.rs +++ /dev/null @@ -1,358 +0,0 @@ -// Copyright 2015 Nicholas Allegra (comex). -// Licensed under the Apache License, Version 2.0 <https://www.apache.org/licenses/LICENSE-2.0> or -// the MIT license <https://opensource.org/licenses/MIT>, at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -//! Parse strings like, and escape strings for, POSIX shells. -//! -//! Same idea as (but implementation not directly based on) the Python shlex module. -//! -//! Disabling the `std` feature (which is enabled by default) will allow the crate to work in -//! `no_std` environments, where the `alloc` crate, and a global allocator, are available. -//! -//! ## <span style="color:red">Warning</span> -//! -//! The [`try_quote`]/[`try_join`] family of APIs does not quote control characters (because they -//! cannot be quoted portably). -//! -//! This is fully safe in noninteractive contexts, like shell scripts and `sh -c` arguments (or -//! even scripts `source`d from interactive shells). -//! -//! But if you are quoting for human consumption, you should keep in mind that ugly inputs produce -//! ugly outputs (which may not be copy-pastable). -//! -//! And if by chance you are piping the output of [`try_quote`]/[`try_join`] directly to the stdin -//! of an interactive shell, you should stop, because control characters can lead to arbitrary -//! command injection. -//! -//! For more information, and for information about more minor issues, please see [quoting_warning]. -//! -//! ## Compatibility -//! -//! This crate's quoting functionality tries to be compatible with **any POSIX-compatible shell**; -//! it's tested against `bash`, `zsh`, `dash`, Busybox `ash`, and `mksh`, plus `fish` (which is not -//! POSIX-compatible but close enough). -//! -//! It also aims to be compatible with Python `shlex` and C `wordexp`. - -#![cfg_attr(not(feature = "std"), no_std)] - -extern crate alloc; -use alloc::vec::Vec; -use alloc::borrow::Cow; -use alloc::string::String; -#[cfg(test)] -use alloc::vec; -#[cfg(test)] -use alloc::borrow::ToOwned; - -pub mod bytes; -#[cfg(all(doc, not(doctest)))] -#[path = "quoting_warning.md"] -pub mod quoting_warning; - -/// An iterator that takes an input string and splits it into the words using the same syntax as -/// the POSIX shell. -/// -/// See [`bytes::Shlex`]. -pub struct Shlex<'a>(bytes::Shlex<'a>); - -impl<'a> Shlex<'a> { - pub fn new(in_str: &'a str) -> Self { - Self(bytes::Shlex::new(in_str.as_bytes())) - } -} - -impl<'a> Iterator for Shlex<'a> { - type Item = String; - fn next(&mut self) -> Option<String> { - self.0.next().map(|byte_word| { - // Safety: given valid UTF-8, bytes::Shlex will always return valid UTF-8. - unsafe { String::from_utf8_unchecked(byte_word) } - }) - } -} - -impl<'a> core::ops::Deref for Shlex<'a> { - type Target = bytes::Shlex<'a>; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl<'a> core::ops::DerefMut for Shlex<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -/// Convenience function that consumes the whole string at once. Returns None if the input was -/// erroneous. -pub fn split(in_str: &str) -> Option<Vec<String>> { - let mut shl = Shlex::new(in_str); - let res = shl.by_ref().collect(); - if shl.had_error { None } else { Some(res) } -} - -/// Errors from [`Quoter::quote`], [`Quoter::join`], etc. (and their [`bytes`] counterparts). -/// -/// By default, the only error that can be returned is [`QuoteError::Nul`]. If you call -/// `allow_nul(true)`, then no errors can be returned at all. Any error variants added in the -/// future will not be enabled by default; they will be enabled through corresponding non-default -/// [`Quoter`] options. -/// -/// ...In theory. In the unlikely event that additional classes of inputs are discovered that, -/// like nul bytes, are fundamentally unsafe to quote even for non-interactive shells, the risk -/// will be mitigated by adding corresponding [`QuoteError`] variants that *are* enabled by -/// default. -#[non_exhaustive] -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum QuoteError { - /// The input contained a nul byte. In most cases, shells fundamentally [cannot handle strings - /// containing nul bytes](quoting_warning#nul-bytes), no matter how they are quoted. But if - /// you're sure you can handle nul bytes, you can call `allow_nul(true)` on the `Quoter` to let - /// them pass through. - Nul, -} - -impl core::fmt::Display for QuoteError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - QuoteError::Nul => f.write_str("cannot shell-quote string containing nul byte"), - } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for QuoteError {} - -/// A more configurable interface to quote strings. If you only want the default settings you can -/// use the convenience functions [`try_quote`] and [`try_join`]. -/// -/// The bytes equivalent is [`bytes::Quoter`]. -#[derive(Default, Debug, Clone)] -pub struct Quoter { - inner: bytes::Quoter, -} - -impl Quoter { - /// Create a new [`Quoter`] with default settings. - #[inline] - pub fn new() -> Self { - Self::default() - } - - /// Set whether to allow [nul bytes](quoting_warning#nul-bytes). By default they are not - /// allowed and will result in an error of [`QuoteError::Nul`]. - #[inline] - pub fn allow_nul(mut self, allow: bool) -> Self { - self.inner = self.inner.allow_nul(allow); - self - } - - /// Convenience function that consumes an iterable of words and turns it into a single string, - /// quoting words when necessary. Consecutive words will be separated by a single space. - pub fn join<'a, I: IntoIterator<Item = &'a str>>(&self, words: I) -> Result<String, QuoteError> { - // Safety: given valid UTF-8, bytes::join() will always return valid UTF-8. - self.inner.join(words.into_iter().map(|s| s.as_bytes())) - .map(|bytes| unsafe { String::from_utf8_unchecked(bytes) }) - } - - /// Given a single word, return a string suitable to encode it as a shell argument. - pub fn quote<'a>(&self, in_str: &'a str) -> Result<Cow<'a, str>, QuoteError> { - Ok(match self.inner.quote(in_str.as_bytes())? { - Cow::Borrowed(out) => { - // Safety: given valid UTF-8, bytes::quote() will always return valid UTF-8. - unsafe { core::str::from_utf8_unchecked(out) }.into() - } - Cow::Owned(out) => { - // Safety: given valid UTF-8, bytes::quote() will always return valid UTF-8. - unsafe { String::from_utf8_unchecked(out) }.into() - } - }) - } -} - -impl From<bytes::Quoter> for Quoter { - fn from(inner: bytes::Quoter) -> Quoter { - Quoter { inner } - } -} - -impl From<Quoter> for bytes::Quoter { - fn from(quoter: Quoter) -> bytes::Quoter { - quoter.inner - } -} - -/// Convenience function that consumes an iterable of words and turns it into a single string, -/// quoting words when necessary. Consecutive words will be separated by a single space. -/// -/// Uses default settings except that nul bytes are passed through, which [may be -/// dangerous](quoting_warning#nul-bytes), leading to this function being deprecated. -/// -/// Equivalent to [`Quoter::new().allow_nul(true).join(words).unwrap()`](Quoter). -/// -/// (That configuration never returns `Err`, so this function does not panic.) -/// -/// The bytes equivalent is [bytes::join]. -#[deprecated(since = "1.3.0", note = "replace with `try_join(words)?` to avoid nul byte danger")] -pub fn join<'a, I: IntoIterator<Item = &'a str>>(words: I) -> String { - Quoter::new().allow_nul(true).join(words).unwrap() -} - -/// Convenience function that consumes an iterable of words and turns it into a single string, -/// quoting words when necessary. Consecutive words will be separated by a single space. -/// -/// Uses default settings. The only error that can be returned is [`QuoteError::Nul`]. -/// -/// Equivalent to [`Quoter::new().join(words)`](Quoter). -/// -/// The bytes equivalent is [bytes::try_join]. -pub fn try_join<'a, I: IntoIterator<Item = &'a str>>(words: I) -> Result<String, QuoteError> { - Quoter::new().join(words) -} - -/// Given a single word, return a string suitable to encode it as a shell argument. -/// -/// Uses default settings except that nul bytes are passed through, which [may be -/// dangerous](quoting_warning#nul-bytes), leading to this function being deprecated. -/// -/// Equivalent to [`Quoter::new().allow_nul(true).quote(in_str).unwrap()`](Quoter). -/// -/// (That configuration never returns `Err`, so this function does not panic.) -/// -/// The bytes equivalent is [bytes::quote]. -#[deprecated(since = "1.3.0", note = "replace with `try_quote(str)?` to avoid nul byte danger")] -pub fn quote(in_str: &str) -> Cow<str> { - Quoter::new().allow_nul(true).quote(in_str).unwrap() -} - -/// Given a single word, return a string suitable to encode it as a shell argument. -/// -/// Uses default settings. The only error that can be returned is [`QuoteError::Nul`]. -/// -/// Equivalent to [`Quoter::new().quote(in_str)`](Quoter). -/// -/// (That configuration never returns `Err`, so this function does not panic.) -/// -/// The bytes equivalent is [bytes::try_quote]. -pub fn try_quote(in_str: &str) -> Result<Cow<str>, QuoteError> { - Quoter::new().quote(in_str) -} - -#[cfg(test)] -static SPLIT_TEST_ITEMS: &'static [(&'static str, Option<&'static [&'static str]>)] = &[ - ("foo$baz", Some(&["foo$baz"])), - ("foo baz", Some(&["foo", "baz"])), - ("foo\"bar\"baz", Some(&["foobarbaz"])), - ("foo \"bar\"baz", Some(&["foo", "barbaz"])), - (" foo \nbar", Some(&["foo", "bar"])), - ("foo\\\nbar", Some(&["foobar"])), - ("\"foo\\\nbar\"", Some(&["foobar"])), - ("'baz\\$b'", Some(&["baz\\$b"])), - ("'baz\\\''", None), - ("\\", None), - ("\"\\", None), - ("'\\", None), - ("\"", None), - ("'", None), - ("foo #bar\nbaz", Some(&["foo", "baz"])), - ("foo #bar", Some(&["foo"])), - ("foo#bar", Some(&["foo#bar"])), - ("foo\"#bar", None), - ("'\\n'", Some(&["\\n"])), - ("'\\\\n'", Some(&["\\\\n"])), -]; - -#[test] -fn test_split() { - for &(input, output) in SPLIT_TEST_ITEMS { - assert_eq!(split(input), output.map(|o| o.iter().map(|&x| x.to_owned()).collect())); - } -} - -#[test] -fn test_lineno() { - let mut sh = Shlex::new("\nfoo\nbar"); - while let Some(word) = sh.next() { - if word == "bar" { - assert_eq!(sh.line_no, 3); - } - } -} - -#[test] -#[cfg_attr(not(feature = "std"), allow(unreachable_code, unused_mut))] -fn test_quote() { - // This is a list of (unquoted, quoted) pairs. - // But it's using a single long (raw) string literal with an ad-hoc format, just because it's - // hard to read if we have to put the test strings through Rust escaping on top of the escaping - // being tested. (Even raw string literals are noisy for short strings). - // Ad-hoc: "NL" is replaced with a literal newline; no other escape sequences. - let tests = r#" - <> => <''> - <foobar> => <foobar> - <foo bar> => <'foo bar'> - <"foo bar'"> => <"\"foo bar'\""> - <'foo bar'> => <"'foo bar'"> - <"> => <'"'> - <"'> => <"\"'"> - <hello!world> => <'hello!world'> - <'hello!world> => <"'hello"'!world'> - <'hello!> => <"'hello"'!'> - <hello ^ world> => <'hello ''^ world'> - <hello^> => <hello'^'> - <!world'> => <'!world'"'"> - <{a, b}> => <'{a, b}'> - <NL> => <'NL'> - <^> => <'^'> - <foo^bar> => <foo'^bar'> - <NLx^> => <'NLx''^'> - <NL^x> => <'NL''^x'> - <NL ^x> => <'NL ''^x'> - <{a,b}> => <'{a,b}'> - <a,b> => <'a,b'> - <a..b => <a..b> - <'$> => <"'"'$'> - <"^> => <'"''^'> - "#; - let mut ok = true; - for test in tests.trim().split('\n') { - let parts: Vec<String> = test - .replace("NL", "\n") - .split("=>") - .map(|part| part.trim().trim_start_matches('<').trim_end_matches('>').to_owned()) - .collect(); - assert!(parts.len() == 2); - let unquoted = &*parts[0]; - let quoted_expected = &*parts[1]; - let quoted_actual = try_quote(&parts[0]).unwrap(); - if quoted_expected != quoted_actual { - #[cfg(not(feature = "std"))] - panic!("FAIL: for input <{}>, expected <{}>, got <{}>", - unquoted, quoted_expected, quoted_actual); - #[cfg(feature = "std")] - println!("FAIL: for input <{}>, expected <{}>, got <{}>", - unquoted, quoted_expected, quoted_actual); - ok = false; - } - } - assert!(ok); -} - -#[test] -#[allow(deprecated)] -fn test_join() { - assert_eq!(join(vec![]), ""); - assert_eq!(join(vec![""]), "''"); - assert_eq!(join(vec!["a", "b"]), "a b"); - assert_eq!(join(vec!["foo bar", "baz"]), "'foo bar' baz"); -} - -#[test] -fn test_fallible() { - assert_eq!(try_join(vec!["\0"]), Err(QuoteError::Nul)); - assert_eq!(try_quote("\0"), Err(QuoteError::Nul)); -} diff --git a/vendor/shlex/src/quoting_warning.md b/vendor/shlex/src/quoting_warning.md deleted file mode 100644 index fab9857bec9686..00000000000000 --- a/vendor/shlex/src/quoting_warning.md +++ /dev/null @@ -1,365 +0,0 @@ -// vim: textwidth=99 -/* -Meta note: This file is loaded as a .rs file by rustdoc only. -*/ -/*! - -A more detailed version of the [warning at the top level](super#warning) about the `quote`/`join` -family of APIs. - -In general, passing the output of these APIs to a shell should recover the original string(s). -This page lists cases where it fails to do so. - -In noninteractive contexts, there are only minor issues. 'Noninteractive' includes shell scripts -and `sh -c` arguments, or even scripts `source`d from interactive shells. The issues are: - -- [Nul bytes](#nul-bytes) - -- [Overlong commands](#overlong-commands) - -If you are writing directly to the stdin of an interactive (`-i`) shell (i.e., if you are -pretending to be a terminal), or if you are writing to a cooked-mode pty (even if the other end is -noninteractive), then there is a **severe** security issue: - -- [Control characters](#control-characters-interactive-contexts-only) - -Finally, there are some [solved issues](#solved-issues). - -# List of issues - -## Nul bytes - -For non-interactive shells, the most problematic input is nul bytes (bytes with value 0). The -non-deprecated functions all default to returning [`QuoteError::Nul`] when encountering them, but -the deprecated [`quote`] and [`join`] functions leave them as-is. - -In Unix, nul bytes can't appear in command arguments, environment variables, or filenames. It's -not a question of proper quoting; they just can't be used at all. This is a consequence of Unix's -system calls all being designed around nul-terminated C strings. - -Shells inherit that limitation. Most of them do not accept nul bytes in strings even internally. -Even when they do, it's pretty much useless or even dangerous, since you can't pass them to -external commands. - -In some cases, you might fail to pass the nul byte to the shell in the first place. For example, -the following code uses [`join`] to tunnel a command over an SSH connection: - -```rust -std::process::Command::new("ssh") - .arg("myhost") - .arg("--") - .arg(join(my_cmd_args)) -``` - -If any argument in `my_cmd_args` contains a nul byte, then `join(my_cmd_args)` will contain a nul -byte. But `join(my_cmd_args)` is itself being passed as an argument to a command (the ssh -command), and command arguments can't contain nul bytes! So this will simply result in the -`Command` failing to launch. - -Still, there are other ways to smuggle nul bytes into a shell. How the shell reacts depends on the -shell and the method of smuggling. For example, here is Bash 5.2.21 exhibiting three different -behaviors: - -- With ANSI-C quoting, the string is truncated at the first nul byte: - ```bash - $ echo $'foo\0bar' | hexdump -C - 00000000 66 6f 6f 0a |foo.| - ``` - -- With command substitution, nul bytes are removed with a warning: - ```bash - $ echo $(printf 'foo\0bar') | hexdump -C - bash: warning: command substitution: ignored null byte in input - 00000000 66 6f 6f 62 61 72 0a |foobar.| - ``` - -- When a nul byte appears directly in a shell script, it's removed with no warning: - ```bash - $ printf 'echo "foo\0bar"' | bash | hexdump -C - 00000000 66 6f 6f 62 61 72 0a |foobar.| - ``` - -Zsh, in contrast, actually allows nul bytes internally, in shell variables and even arguments to -builtin commands. But if a variable is exported to the environment, or if an argument is used for -an external command, then the child process will see it silently truncated at the first nul. This -might actually be more dangerous, depending on the use case. - -## Overlong commands - -If you pass a long string into a shell, several things might happen: - -- It might succeed, yet the shell might have trouble actually doing anything with it. For example: - - ```bash - x=$(printf '%010000000d' 0); /bin/echo $x - bash: /bin/echo: Argument list too long - ``` - -- If you're using certain shells (e.g. Busybox Ash) *and* using a pty for communication, then the - shell will impose a line length limit, ignoring all input past the limit. - -- If you're using a pty in cooked mode, then by default, if you write so many bytes as input that - it fills the kernel's internal buffer, the kernel will simply drop those bytes, instead of - blocking waiting for the shell to empty out the buffer. In other words, random bits of input can - be lost, which is obviously insecure. - -Future versions of this crate may add an option to [`Quoter`] to check the length for you. - -## Control characters (*interactive contexts only*) - -Control characters are the bytes from `\x00` to `\x1f`, plus `\x7f`. `\x00` (the nul byte) is -discussed [above](#nul-bytes), but what about the rest? Well, many of them correspond to terminal -keyboard shortcuts. For example, when you press Ctrl-A at a shell prompt, your terminal sends the -byte `\x01`. The shell sees that byte and (if not configured differently) takes the standard -action for Ctrl-A, which is to move the cursor to the beginning of the line. - -This means that it's quite dangerous to pipe bytes to an interactive shell. For example, here is a -program that tries to tell Bash to echo an arbitrary string, 'safely': -```rust -use std::process::{Command, Stdio}; -use std::io::Write; - -let evil_string = "\x01do_something_evil; "; -let quoted = shlex::try_quote(evil_string).unwrap(); -println!("quoted string is {:?}", quoted); - -let mut bash = Command::new("bash") - .arg("-i") // force interactive mode - .stdin(Stdio::piped()) - .spawn() - .unwrap(); -let stdin = bash.stdin.as_mut().unwrap(); -write!(stdin, "echo {}\n", quoted).unwrap(); -``` - -Here's the output of the program (with irrelevant bits removed): - -```text -quoted string is "'\u{1}do_something_evil; '" -/tmp comex$ do_something_evil; 'echo ' -bash: do_something_evil: command not found -bash: echo : command not found -``` - -Even though we quoted it, Bash still ran an arbitrary command! - -This is not because the quoting was insufficient, per se. In single quotes, all input is supposed -to be treated as raw data until the closing single quote. And in fact, this would work fine -without the `"-i"` argument. - -But line input is a separate stage from shell syntax parsing. After all, if you type a single -quote on the keyboard, you wouldn't expect it to disable all your keyboard shortcuts. So a control -character always has its designated effect, no matter if it's quoted or backslash-escaped. - -Also, some control characters are interpreted by the kernel tty layer instead, like CTRL-C to send -SIGINT. These can be an issue even with noninteractive shells, but only if using a pty for -communication, as opposed to a pipe. - -To be safe, you just have to avoid sending them. - -### Why not just use hex escapes? - -In any normal programming languages, this would be no big deal. - -Any normal language has a way to escape arbitrary characters in strings by writing out their -numeric values. For example, Rust lets you write them in hexadecimal, like `"\x4f"` (or -`"\u{1d546}"` for Unicode). In this way, arbitrary strings can be represented using only 'nice' -simple characters. Any remotely suspicious character can be replaced with a numeric escape -sequence, where the escape sequence itself consists only of alphanumeric characters and some -punctuation. The result may not be the most readable[^choices], but it's quite safe from being -misinterpreted or corrupted in transit. - -Shell is not normal. It has no numeric escape sequences. - -There are a few different ways to quote characters (unquoted, unquoted-with-backslash, single -quotes, double quotes), but all of them involve writing the character itself. If the input -contains a control character, the output must contain that same character. - -### Mitigation: terminal filters - -In practice, automating interactive shells like in the above example is pretty uncommon these days. -In most cases, the only way for a programmatically generated string to make its way to the input of -an interactive shell is if a human copies and pastes it into their terminal. - -And many terminals detect when you paste a string containing control characters. iTerm2 strips -them out; gnome-terminal replaces them with alternate characters[^gr]; Kitty outright prompts for -confirmation. This mitigates the risk. - -But it's not perfect. Some other terminals don't implement this check or implement it incorrectly. -Also, these checks tend to not filter the tab character, which could trigger tab completion. In -most cases that's a non-issue, because most shells support paste bracketing, which disables tab and -some other control characters[^bracketing] within pasted text. But in some cases paste bracketing -gets disabled. - -### Future possibility: ANSI-C quoting - -I said that shell syntax has no numeric escapes, but that only applies to *portable* shell syntax. -Bash and Zsh support an obscure alternate quoting style with the syntax `$'foo'`. It's called -["ANSI-C quoting"][ansic], and inside it you can use all the escape sequences supported by C, -including hex escapes: - -```bash -$ echo $'\x41\n\x42' -A -B -``` - -But other shells don't support it — including Dash, a popular choice for `/bin/sh`, and Busybox's -Ash, frequently seen on stripped-down embedded systems. This crate's quoting functionality [tries -to be compatible](crate#compatibility) with those shells, plus all other POSIX-compatible shells. -That makes ANSI-C quoting a no-go. - -Still, future versions of this crate may provide an option to enable ANSI-C quoting, at the cost of -reduced portability. - -### Future possibility: printf - -Another option would be to invoke the `printf` command, which is required by POSIX to support octal -escapes. For example, you could 'escape' the Rust string `"\x01"` into the shell syntax `"$(printf -'\001')"`. The shell will execute the command `printf` with the first argument being literally a -backslash followed by three digits; `printf` will output the actual byte with value 1; and the -shell will substitute that back into the original command. - -The problem is that 'escaping' a string into a command substitution just feels too surprising. If -nothing else, it only works with an actual shell; [other languages' shell parsing -routines](crate#compatibility) wouldn't understand it. Neither would this crate's own parser, -though that could be fixed. - -Future versions of this crate may provide an option to use `printf` for quoting. - -### Special note: newlines - -Did you know that `\r` and `\n` are control characters? They aren't as dangerous as other control -characters (if quoted properly). But there's still an issue with them in interactive contexts. - -Namely, in some cases, interactive shells and/or the tty layer will 'helpfully' translate between -different line ending conventions. The possibilities include replacing `\r` with `\n`, replacing -`\n` with `\r\n`, and others. This can't result in command injection, but it's still a lossy -transformation which can result in a failure to round-trip (i.e. the shell sees a different string -from what was originally passed to `quote`). - -Numeric escapes would solve this as well. - -# Solved issues - -## Solved: Past vulnerability (GHSA-r7qv-8r2h-pg27 / RUSTSEC-2024-XXX) - -Versions of this crate before 1.3.0 did not quote `{`, `}`, and `\xa0`. - -See: -- <https://github.com/advisories/GHSA-r7qv-8r2h-pg27> -- (TODO: Add Rustsec link) - -## Solved: `!` and `^` - -There are two non-control characters which have a special meaning in interactive contexts only: `!` and -`^`. Luckily, these can be escaped adequately. - -The `!` character triggers [history expansion][he]; the `^` character can trigger a variant of -history expansion known as [Quick Substitution][qs]. Both of these characters get expanded even -inside of double-quoted strings\! - -If we're in a double-quoted string, then we can't just escape these characters with a backslash. -Only a specific set of characters can be backslash-escaped inside double quotes; the set of -supported characters depends on the shell, but it often doesn't include `!` and `^`.[^escbs] -Trying to backslash-escape an unsupported character produces a literal backslash: -```bash -$ echo "\!" -\! -``` - -However, these characters don't get expanded in single-quoted strings, so this crate just -single-quotes them. - -But there's a Bash bug where `^` actually does get partially expanded in single-quoted strings: -```bash -$ echo ' -> ^a^b -> ' - -!!:s^a^b -``` - -To work around that, this crate forces `^` to appear right after an opening single quote. For -example, the string `"^` is quoted into `'"''^'` instead of `'"^'`. This restriction is overkill, -since `^` is only meaningful right after a newline, but it's a sufficient restriction (after all, a -`^` character can't be preceded by a newline if it's forced to be preceded by a single quote), and -for now it simplifies things. - -## Solved: `\xa0` - -The byte `\xa0` may be treated as a shell word separator, specifically on Bash on macOS when using -the default UTF-8 locale, only when the input is invalid UTF-8. This crate handles the issue by -always using quotes for arguments containing this byte. - -In fact, this crate always uses quotes for arguments containing any non-ASCII bytes. This may be -changed in the future, since it's a bit unfriendly to non-English users. But for now it -minimizes risk, especially considering the large number of different legacy single-byte locales -someone might hypothetically be running their shell in. - -### Demonstration - -```bash -$ echo -e 'ls a\xa0b' | bash -ls: a: No such file or directory -ls: b: No such file or directory -``` -The normal behavior would be to output a single line, e.g.: -```bash -$ echo -e 'ls a\xa0b' | bash -ls: cannot access 'a'$'\240''b': No such file or directory -``` -(The specific quoting in the error doesn't matter.) - -### Cause - -Just for fun, here's why this behavior occurs: - -Bash decides which bytes serve as word separators based on the libc function [`isblank`][isblank]. -On macOS on UTF-8 locales, this passes for `\xa0`, corresponding to U+00A0 NO-BREAK SPACE. - -This is doubly unique compared to the other systems I tested (Linux/glibc, Linux/musl, and -Windows/MSVC). First, the other systems don't allow bytes in the range [0x80, 0xFF] to pass -<code>is<i>foo</i></code> functions in UTF-8 locales, even if the corresponding Unicode codepoint -does pass, as determined by the wide-character equivalent function, <code>isw<i>foo</i></code>. -Second, the other systems don't treat U+00A0 as blank (even using `iswblank`). - -Meanwhile, Bash checks for multi-byte sequences and forbids them from being treated as special -characters, so the proper UTF-8 encoding of U+00A0, `b"\xc2\xa0"`, is not treated as a word -separator. Treatment as a word separator only happens for `b"\xa0"` alone, which is illegal UTF-8. - -[ansic]: https://www.gnu.org/software/bash/manual/html_node/ANSI_002dC-Quoting.html -[he]: https://www.gnu.org/software/bash/manual/html_node/History-Interaction.html -[qs]: https://www.gnu.org/software/bash/manual/html_node/Event-Designators.html -[isblank]: https://man7.org/linux/man-pages/man3/isblank.3p.html -[nul]: #nul-bytes - -[^choices]: This can lead to tough choices over which - characters to escape and which to leave as-is, especially when Unicode gets involved and you - have to balance the risk of confusion with the benefit of properly supporting non-English - languages. - <br> - <br> - We don't have the luxury of those choices. - -[^gr]: For example, backspace (in Unicode lingo, U+0008 BACKSPACE) turns into U+2408 SYMBOL FOR BACKSPACE. - -[^bracketing]: It typically disables almost all handling of control characters by the shell proper, - but one necessary exception is the end-of-paste sequence itself (which starts with the control - character `\x1b`). In addition, paste bracketing does not suppress handling of control - characters by the kernel tty layer, such as `\x03` sending SIGINT (which typically clears the - currently typed command, making it dangerous in a similar way to `\x01`). - -[^escbs]: For example, Dash doesn't remove the backslash from `"\!"` because it simply doesn't know - anything about `!` as a special character: it doesn't support history expansion. On the other - end of the spectrum, Zsh supports history expansion and does remove the backslash — though only - in interactive mode. Bash's behavior is weirder. It supports history expansion, and if you - write `"\!"`, the backslash does prevent history expansion from occurring — but it doesn't get - removed! - -*/ - -// `use` declarations to make auto links work: -use ::{quote, join, Shlex, Quoter, QuoteError}; - -// TODO: add more about copy-paste and human readability. diff --git a/vendor/syn/.cargo-checksum.json b/vendor/syn/.cargo-checksum.json deleted file mode 100644 index ce1b816764beb8..00000000000000 --- a/vendor/syn/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"645009f117c4017452703e2b0999c110f324ef2cb9f5aff4055fd3712c3a90ab","Cargo.lock":"151d8723bc5c18decd094e2053baaef8853dfb4b0652aae99d90c6bb03c7af20","Cargo.toml":"25e3f93c23cc3f60f1bae346e1bdcf7c0790a09168b5bcac3e80e6158d437a9b","Cargo.toml.orig":"bf872e309248326ee3a96a8871b927f059277754f4534136b2d637ce86c63004","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"ae6deb98ea51df4829c0327139a555cc115c6bcf6fb459db0ef0d6a96c4566ec","benches/file.rs":"6f2ae7748d1576cff48e4ef55d4e87e2f5bb7898a36aa99ea6dd6ed0f72a4e3c","benches/rust.rs":"53cb8accfda73e59a3efc78081e7c58a1010ae60c23ef8c43bb240550daa3e96","src/attr.rs":"f44ff3cb9d3bc34d7de9e3f1aba62ddb1c8973881d9db981623b112005d4ed53","src/bigint.rs":"0299829b2f7a1a798fe2f7bc1680e4a10f9b6f4a852d09af4da2deab466c4242","src/buffer.rs":"7622b646e0d2399068868c41cb6b8abec39afa925c220a8e9f19c2a424911fd4","src/classify.rs":"3c796df4c891034abc3008196d34ad62c22fcb9525a067890731a5a6bbc7b5fb","src/custom_keyword.rs":"322114e36ae43a2f8605506fb4568efdbc2986853e2fee74bd10a4ca0fb60c69","src/custom_punctuation.rs":"26b28164f0b2e5e80e7cf36a3ba6d2577d27442cce5e00a72f685b5ee6f51ecd","src/data.rs":"fa04dce757ca3dd1e350aaa84bbcf8b743c13a00b0983b980bf2919f91a22078","src/derive.rs":"f54f8cf9386a2d45186ff3c86ade5dae59e0e337b0198532449190ae8520cff8","src/discouraged.rs":"653c5d9e6c4e3c2359817dc343f145569a0c9562a707f4949c374c242955ce12","src/drops.rs":"e98da4aaafb5afc75919f9e2914326ad09bf16094a9407c60a05515a2e01dd00","src/error.rs":"cbf06fb7b000f2e6444fa224a062c493911a8f9fc5d386be6e52dadbb7658f34","src/export.rs":"b260cc49da1da3489e7755832bc8015cfad79e84f6c74e237f65ae25a2385e56","src/expr.rs":"fa766ce749ea31367e178f45a2dc8f8545b9467f7fc51e7a1fe72bbb0b9738dc","src/ext.rs":"57577c7e6b7b65cd27ac5aad66d47e63693762d8880cde088b20975ec845244d","src/file.rs":"9d04206da5eff88e92698b3f78c51082d615300cb11d93768250a3e97c40b078","src/fixup.rs":"7647cde30efdce96b1488ae805788c168d4499b464b7d421abc17ea8ffde66f2","src/gen/clone.rs":"7af00b0a240453d7aac178be1b6cdf9df3b33f1281da35e02f66ba7ba55d060c","src/gen/debug.rs":"59bc259fa9dc0c7ffe094df7ad8afc9c4e79b6d73a8f0fae8a2e435905866e7a","src/gen/eq.rs":"d7428672d82c1931fdefb8bda0425a25ebbe20e5e2736b18cfd1752b64e99f78","src/gen/fold.rs":"39b0a26cfdf0accaff6da108a2b6d8f93e83c63d0bf6a8d7af0900fc0f71b55b","src/gen/hash.rs":"6808bb0e47e7346a14fbec5f55430906efa172f46417c83c2f7c76ce8c9ceab6","src/gen/token.css":"3a5882d0b3aefbf56ca5d08c28c117e25ac2327eadf7242202d906b2ddb2232e","src/gen/visit.rs":"fe1443aa7953eaca10d6bf982396e627e31ce6b8aea8eb7cf949e0adeea5badb","src/gen/visit_mut.rs":"9948f0f07aefd8133dcc958e744c49f1da625d199f7707363b79f0373b2dcd6b","src/generics.rs":"6170b4a9d82ba27a3b4471057a5206e45d4b379813855b67d06aa1fc7f978ccc","src/group.rs":"ddbff97e41315bdf9dfce215a8c00bb4d532827cf794246afde7308b39dc09ca","src/ident.rs":"d6061030fadae9c7dc847e1ee46178d9657d782aad108c7197e8cafe765b3eaa","src/item.rs":"ad2d5f4621426420ba4dc0c1a82626b7b0979cb67c06fbcb16ee6abb025e7c80","src/lib.rs":"33992cd3fb39b1af62b844da65596854d355ad7e85d516d67bbf67b3f04bfc09","src/lifetime.rs":"ec748fdbdedeb75c4dbc4460653cf97fcf113207eea5b12fea9e1f6e1198beca","src/lit.rs":"69ef534be9ba43de0da9a65d75de36f3d14d83f5bd1666ea72419c9302095408","src/lookahead.rs":"b2837d80fa4466bb430b65d32b54d1bad5de9bb851550f916658347145c281b4","src/mac.rs":"fdce8291f71adef3f69975f229156dca2309ca232ed943061afaf96220908ab8","src/macros.rs":"2a6e895dfe1c3a9a7237b5e23358ca5d8967e2beae6d094dda68d3659f9a5c84","src/meta.rs":"969d8ccbdbc6ea2e4928a21831b791c57447b231e1373149e4c63b46f3951801","src/op.rs":"a61757370f802e44efa3c4a1057ae2cd26e64e273f7d76c06d5ffb49602319e2","src/parse.rs":"bbe69237d50ce5f9b5c029e851607c54ca6232cad0790551c2f5bb29e2f9657d","src/parse_macro_input.rs":"e4e22b63d0496d06a4ca17742a22467ed93f08a739081324773828bad63175ee","src/parse_quote.rs":"80eec7ce54c38f3bbd23acb70cd8a6649d7e1523c3977e3bf12849fd8c5cf16d","src/pat.rs":"b6c8c04c330a76dbe9cd35949026724fc3aeacf98e8c0a259cf2e16caff99071","src/path.rs":"2146bdf5e0eb6991232c8a09de3a30440727f439ab792a34f5313057c091a724","src/precedence.rs":"58420a5015003ecd4d7a4a0c87c168caa4c696e646355523d9eaae81fc5e1d54","src/print.rs":"22910bf0521ab868ebd7c62601c55912d12cfb400c65723e08e5cfa3a2d111c0","src/punctuated.rs":"711c1f9122f560530d40bdccbd8784b6c2c54067f0d753cce282a4d6ca933a37","src/restriction.rs":"a7152ec5a4ee4f55446019aa2b4d84f2238776f0e6ffc0c22adf3374b517fe56","src/scan_expr.rs":"e199c35e8bbf3e2c70901e1175df8dd446f4cb67b60100647f478f2dc31f6f12","src/sealed.rs":"6ece3b3dcb30f6bb98b93d83759ca7712ee8592bef9c0511141039c38765db0e","src/span.rs":"0a48e375e5c9768f6f64174a91ba6a255f4b021e2fb3548d8494e617f142601b","src/spanned.rs":"4b9bd65f60ab81922adfd0be8f03b6d50e98da3a5f525f242f9639aec4beac79","src/stmt.rs":"7a594d08cbedef4c6c0ed6ca9c331f4f087bd631a12938240180f7c53ada44e9","src/thread.rs":"1f1deb1272525ab2af9a36aac4bce8f65b0e315adb1656641fd7075662f49222","src/token.rs":"55f1ad3ba0edc43ae7b65a6fa6dc13fc1a99053d6300187a4cc48572b8f451f3","src/tt.rs":"ad478bef531007fac0e4af7ecae81f8fe66a5ce44532288156b7e3d4bfc45950","src/ty.rs":"b7daaf57dd96fc09448e45fc92f55b00f3b7ba99a00f3f2eb8a11f35e302af3c","src/verbatim.rs":"4aa06d0ce2f6b6c6aa657bc349ccc85005d2eb05494dfa1ac1fe9012916dcc3e","src/whitespace.rs":"9cdcbfe9045b259046329a795bc1105ab5a871471a6d3f7318d275ee53f7a825","tests/common/eq.rs":"4e66a9bd9262a8ff7db3a243cbb21c870b50f3286a23d013c767ec849e4f311d","tests/common/mod.rs":"b752aa8f1faf8c6abf1286a12fb50b6c257ec1889d81bcdb3dc3257134695a89","tests/common/parse.rs":"f226bfa84803429c4ef203a09b30372db01298e14443089fb60c11e2112212db","tests/common/visit.rs":"a260ecd2ce7853cd3644e19aba08e8d358a656fd3fb0f1287cea40c59c9e62c9","tests/debug/gen.rs":"cdd89f1bf91fe215e06868fc93423d2f1872c812c3bfec93dc920bc105e20c09","tests/debug/mod.rs":"1259df940bbcaa968a837e402d6853f2efa38d2260e306d42f17f9e8ef74fae5","tests/macros/mod.rs":"d2294a79e341c623ae671dd363e99965d78dda7f340b0cc038267207adfacae2","tests/regression.rs":"e9565ea0efecb4136f099164ffcfa26e1996b0a27fb9c6659e90ad9bdd42e7b6","tests/regression/issue1108.rs":"f32db35244a674e22ff824ca9e5bbec2184e287b59f022db68c418b5878a2edc","tests/regression/issue1235.rs":"a2266b10c3f7c7af5734817ab0a3e8b309b51e7d177b63f26e67e6b744d280b0","tests/repo/mod.rs":"4e2d370876192fc0514962e1eeb9e1e4a96e3805b1f87257ba4d1eeda8b1db73","tests/repo/progress.rs":"c08d0314a7f3ecf760d471f27da3cd2a500aeb9f1c8331bffb2aa648f9fabf3f","tests/snapshot/mod.rs":"4a101272c5abe6ca9f3501e0cacacee9a0ccf7ca773348a239e5b046d0316a7e","tests/test_asyncness.rs":"971d560d927d5a8494eaa7fce8f0d062d6971c17c4c464fcfc31570572b7d3d7","tests/test_attribute.rs":"8a4429b7cfe2360bb73beae54a62ae3255ebbd5181467a8608d6f858c2711728","tests/test_derive_input.rs":"c8f5dbac6482dadd0fab30d0b1fe3254869256c48ea68ea484cad7f7406c8568","tests/test_expr.rs":"055cb9b33a5bb6ed5dc67491e6f1ae794a53a5a091245debd464ef57144f5edb","tests/test_generics.rs":"0d79a25b75e45779185c2adefd3d88a9e49d0f333d885265551df1402d50abaf","tests/test_grouping.rs":"fe3de6e8824f0722ab6450c6dfc374f6e0f8fe75c87c4dd56b2cb00a2197ed58","tests/test_ident.rs":"d5850e817720e774cd397a46dbc5298c57933823c18e20805e84503fc9387e8f","tests/test_item.rs":"f4119000784af2d65d5fd097830368a391c05b249f3df8c60613a98b16a322ca","tests/test_lit.rs":"4130efa425d14ed3ad9a1c2a00ef4b29782c9d1cf9e29ff9dddd3b23b2e3ddee","tests/test_meta.rs":"5b0fdee0decbd07476c9673403a662de385901b4bf60600c26ac879893f5bf9c","tests/test_parse_buffer.rs":"0de6af13ba0345986b18d495063f9b75a1018e8569c34b277f9522c63a6c0941","tests/test_parse_quote.rs":"85d90d2d51b82aab7c30159dd884f26c592ddb28ed31ef2baf371ee31349694c","tests/test_parse_stream.rs":"b6b533432173123d6d01d8d2cb33714bc50b30b16ffbb6116f93937221ad4594","tests/test_pat.rs":"dafa3e1f51812e8c852dc5210640a4adf6fff7cd0a0790ee17d2c4c115321846","tests/test_path.rs":"7a6763a262c41a9522068887702fe7cd4ff72b07da5253ac47761d73315b021d","tests/test_precedence.rs":"ed27331fe3bc4496970e677df0d2f66e4516e6eea975d4a31029338ad23c79c0","tests/test_punctuated.rs":"efed2c281b6965d71b065c7606631ba1989af6e7b5f5d1ca1033f8b968dc076c","tests/test_receiver.rs":"2053028236f95f3cb508ebf2eb606df43cae4f9f4dd27823661459ff6c54a39c","tests/test_round_trip.rs":"8b2ed3c4164247577953e3108cca67eed97761c90b9c0df31cbd50097ed1a047","tests/test_shebang.rs":"9bc24b1ee2947b06a279d2ed40039cb45bba6caf7cd40530d93f7e2355de53c6","tests/test_size.rs":"03efaf829b80b7db1f831474c1d3ce268914fc499d0e2a7eea03cad04a482974","tests/test_stmt.rs":"b3c120059d7b56388963b85234feb2e4d379e32a0bf7f29b6683eca000dd3919","tests/test_token_trees.rs":"c30b921a96739c9334ec2bdd06552729891e0251b9d8fbdf0b8f5cc897babee5","tests/test_ty.rs":"9bb5f632941451ca6b200100310b55e62a9956190df3efe28b80d42843e75362","tests/test_unparenthesize.rs":"e5c047819afd5f70fde1bdd095467b1291d0854641f21e8183e50919986d8ce7","tests/test_visibility.rs":"7d05f05b0782976369d21477ac9f4d35a7c7f36faa42127e3a9c12ada270baf8","tests/zzz_stable.rs":"2a862e59cb446235ed99aec0e6ada8e16d3ecc30229b29d825b7c0bbc2602989"},"package":"a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea"} \ No newline at end of file diff --git a/vendor/syn/.cargo_vcs_info.json b/vendor/syn/.cargo_vcs_info.json deleted file mode 100644 index b403b881f18c95..00000000000000 --- a/vendor/syn/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "1c8cabea3c98acb7c23722b9663e269d93ce692b" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/syn/Cargo.lock b/vendor/syn/Cargo.lock deleted file mode 100644 index ced51021e878ce..00000000000000 --- a/vendor/syn/Cargo.lock +++ /dev/null @@ -1,1819 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "adler2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" - -[[package]] -name = "anyhow" -version = "1.0.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" - -[[package]] -name = "atomic-waker" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" - -[[package]] -name = "automod" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebb4bd301db2e2ca1f5be131c24eb8ebf2d9559bc3744419e93baf8ddea7e670" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.109", -] - -[[package]] -name = "base64" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" - -[[package]] -name = "bitflags" -version = "2.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" - -[[package]] -name = "bumpalo" -version = "3.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" - -[[package]] -name = "bytes" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" - -[[package]] -name = "cc" -version = "1.2.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35900b6c8d709fb1d854671ae27aeaa9eec2f8b01b364e1619a40da3e6fe2afe" -dependencies = [ - "find-msvc-tools", - "shlex", -] - -[[package]] -name = "cfg-if" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" - -[[package]] -name = "console" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" -dependencies = [ - "encode_unicode", - "libc", - "once_cell", - "windows-sys 0.59.0", -] - -[[package]] -name = "core-foundation" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" - -[[package]] -name = "crc32fast" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" - -[[package]] -name = "displaydoc" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.109", -] - -[[package]] -name = "either" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" - -[[package]] -name = "encode_unicode" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" - -[[package]] -name = "encoding_rs" -version = "0.8.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "equivalent" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" - -[[package]] -name = "errno" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" -dependencies = [ - "libc", - "windows-sys 0.61.2", -] - -[[package]] -name = "fastrand" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" - -[[package]] -name = "filetime" -version = "0.2.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" -dependencies = [ - "cfg-if", - "libc", - "libredox", - "windows-sys 0.60.2", -] - -[[package]] -name = "find-msvc-tools" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" - -[[package]] -name = "flate2" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - -[[package]] -name = "form_urlencoded" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" -dependencies = [ - "percent-encoding", -] - -[[package]] -name = "futures-channel" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" - -[[package]] -name = "futures-io" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" - -[[package]] -name = "futures-sink" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" - -[[package]] -name = "futures-task" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" - -[[package]] -name = "futures-util" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" -dependencies = [ - "futures-core", - "futures-io", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "getrandom" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "getrandom" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" -dependencies = [ - "cfg-if", - "libc", - "r-efi", - "wasip2", -] - -[[package]] -name = "h2" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" -dependencies = [ - "atomic-waker", - "bytes", - "fnv", - "futures-core", - "futures-sink", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "hashbrown" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" - -[[package]] -name = "http" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" -dependencies = [ - "bytes", - "http", -] - -[[package]] -name = "http-body-util" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" -dependencies = [ - "bytes", - "futures-core", - "http", - "http-body", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" - -[[package]] -name = "hyper" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" -dependencies = [ - "atomic-waker", - "bytes", - "futures-channel", - "futures-core", - "h2", - "http", - "http-body", - "httparse", - "itoa", - "pin-project-lite", - "pin-utils", - "smallvec", - "tokio", - "want", -] - -[[package]] -name = "hyper-rustls" -version = "0.27.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" -dependencies = [ - "http", - "hyper", - "hyper-util", - "rustls", - "rustls-pki-types", - "tokio", - "tokio-rustls", - "tower-service", -] - -[[package]] -name = "hyper-tls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" -dependencies = [ - "bytes", - "http-body-util", - "hyper", - "hyper-util", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", -] - -[[package]] -name = "hyper-util" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" -dependencies = [ - "base64", - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "http", - "http-body", - "hyper", - "ipnet", - "libc", - "percent-encoding", - "pin-project-lite", - "socket2", - "system-configuration", - "tokio", - "tower-service", - "tracing", - "windows-registry", -] - -[[package]] -name = "icu_collections" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" -dependencies = [ - "displaydoc", - "potential_utf", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_locale_core" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" -dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", -] - -[[package]] -name = "icu_normalizer" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" -dependencies = [ - "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "zerovec", -] - -[[package]] -name = "icu_normalizer_data" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" - -[[package]] -name = "icu_properties" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" -dependencies = [ - "icu_collections", - "icu_locale_core", - "icu_properties_data", - "icu_provider", - "zerotrie", - "zerovec", -] - -[[package]] -name = "icu_properties_data" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" - -[[package]] -name = "icu_provider" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" -dependencies = [ - "displaydoc", - "icu_locale_core", - "writeable", - "yoke", - "zerofrom", - "zerotrie", - "zerovec", -] - -[[package]] -name = "idna" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" -dependencies = [ - "idna_adapter", - "smallvec", - "utf8_iter", -] - -[[package]] -name = "idna_adapter" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" -dependencies = [ - "icu_normalizer", - "icu_properties", -] - -[[package]] -name = "indexmap" -version = "2.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" -dependencies = [ - "equivalent", - "hashbrown", -] - -[[package]] -name = "insta" -version = "1.43.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fdb647ebde000f43b5b53f773c30cf9b0cb4300453208713fa38b2c70935a0" -dependencies = [ - "console", - "once_cell", - "similar", -] - -[[package]] -name = "ipnet" -version = "2.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" - -[[package]] -name = "iri-string" -version = "0.7.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" -dependencies = [ - "memchr", - "serde", -] - -[[package]] -name = "itoa" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" - -[[package]] -name = "js-sys" -version = "0.3.82" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" -dependencies = [ - "once_cell", - "wasm-bindgen", -] - -[[package]] -name = "libc" -version = "0.2.177" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" - -[[package]] -name = "libredox" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" -dependencies = [ - "bitflags", - "libc", - "redox_syscall", -] - -[[package]] -name = "linux-raw-sys" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" - -[[package]] -name = "litemap" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" - -[[package]] -name = "log" -version = "0.4.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" - -[[package]] -name = "memchr" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" - -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - -[[package]] -name = "miniz_oxide" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" -dependencies = [ - "adler2", - "simd-adler32", -] - -[[package]] -name = "mio" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" -dependencies = [ - "libc", - "wasi", - "windows-sys 0.61.2", -] - -[[package]] -name = "native-tls" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" -dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "once_cell" -version = "1.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" - -[[package]] -name = "openssl" -version = "0.10.75" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" -dependencies = [ - "bitflags", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.109", -] - -[[package]] -name = "openssl-probe" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" - -[[package]] -name = "openssl-sys" -version = "0.9.111" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "percent-encoding" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" - -[[package]] -name = "pin-project-lite" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pkg-config" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" - -[[package]] -name = "potential_utf" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" -dependencies = [ - "zerovec", -] - -[[package]] -name = "proc-macro2" -version = "1.0.103" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "r-efi" -version = "5.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" - -[[package]] -name = "rayon" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" -dependencies = [ - "crossbeam-deque", - "crossbeam-utils", -] - -[[package]] -name = "redox_syscall" -version = "0.5.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" -dependencies = [ - "bitflags", -] - -[[package]] -name = "ref-cast" -version = "1.0.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" -dependencies = [ - "ref-cast-impl", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.109", -] - -[[package]] -name = "reqwest" -version = "0.12.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" -dependencies = [ - "base64", - "bytes", - "encoding_rs", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "http-body-util", - "hyper", - "hyper-rustls", - "hyper-tls", - "hyper-util", - "js-sys", - "log", - "mime", - "native-tls", - "percent-encoding", - "pin-project-lite", - "rustls-pki-types", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper", - "tokio", - "tokio-native-tls", - "tower", - "tower-http", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - -[[package]] -name = "ring" -version = "0.17.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" -dependencies = [ - "cc", - "cfg-if", - "getrandom 0.2.16", - "libc", - "untrusted", - "windows-sys 0.52.0", -] - -[[package]] -name = "rustix" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" -dependencies = [ - "bitflags", - "errno", - "libc", - "linux-raw-sys", - "windows-sys 0.61.2", -] - -[[package]] -name = "rustls" -version = "0.23.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" -dependencies = [ - "once_cell", - "rustls-pki-types", - "rustls-webpki", - "subtle", - "zeroize", -] - -[[package]] -name = "rustls-pki-types" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a" -dependencies = [ - "zeroize", -] - -[[package]] -name = "rustls-webpki" -version = "0.103.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" -dependencies = [ - "ring", - "rustls-pki-types", - "untrusted", -] - -[[package]] -name = "rustversion" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" - -[[package]] -name = "ryu" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "schannel" -version = "0.1.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" -dependencies = [ - "windows-sys 0.61.2", -] - -[[package]] -name = "security-framework" -version = "2.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" -dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "serde" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" -dependencies = [ - "serde_core", - "serde_derive", -] - -[[package]] -name = "serde_core" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.109", -] - -[[package]] -name = "serde_json" -version = "1.0.145" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" -dependencies = [ - "itoa", - "memchr", - "ryu", - "serde", - "serde_core", -] - -[[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "shlex" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" - -[[package]] -name = "simd-adler32" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" - -[[package]] -name = "similar" -version = "2.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" - -[[package]] -name = "slab" -version = "0.4.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" - -[[package]] -name = "smallvec" -version = "1.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" - -[[package]] -name = "socket2" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" -dependencies = [ - "libc", - "windows-sys 0.60.2", -] - -[[package]] -name = "stable_deref_trait" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" - -[[package]] -name = "subtle" -version = "2.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" - -[[package]] -name = "syn" -version = "2.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f17c7e013e88258aa9543dcbe81aca68a667a9ac37cd69c9fbc07858bfe0e2f" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.110" -dependencies = [ - "anyhow", - "automod", - "flate2", - "insta", - "proc-macro2", - "quote", - "rayon", - "ref-cast", - "reqwest", - "rustversion", - "syn-test-suite", - "tar", - "termcolor", - "unicode-ident", - "walkdir", -] - -[[package]] -name = "syn-test-suite" -version = "0.0.0+test" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0d661992f60e67c8bdd9a7d6360d30d1301f5783abf7d59933844f656762eb5" - -[[package]] -name = "sync_wrapper" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" -dependencies = [ - "futures-core", -] - -[[package]] -name = "synstructure" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.109", -] - -[[package]] -name = "system-configuration" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" -dependencies = [ - "bitflags", - "core-foundation", - "system-configuration-sys", -] - -[[package]] -name = "system-configuration-sys" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "tar" -version = "0.4.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" -dependencies = [ - "filetime", - "libc", - "xattr", -] - -[[package]] -name = "tempfile" -version = "3.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" -dependencies = [ - "fastrand", - "getrandom 0.3.4", - "once_cell", - "rustix", - "windows-sys 0.61.2", -] - -[[package]] -name = "termcolor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "tinystr" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" -dependencies = [ - "displaydoc", - "zerovec", -] - -[[package]] -name = "tokio" -version = "1.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" -dependencies = [ - "bytes", - "libc", - "mio", - "pin-project-lite", - "socket2", - "windows-sys 0.61.2", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - -[[package]] -name = "tokio-rustls" -version = "0.26.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" -dependencies = [ - "rustls", - "tokio", -] - -[[package]] -name = "tokio-util" -version = "0.7.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tower" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" -dependencies = [ - "futures-core", - "futures-util", - "pin-project-lite", - "sync_wrapper", - "tokio", - "tower-layer", - "tower-service", -] - -[[package]] -name = "tower-http" -version = "0.6.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" -dependencies = [ - "bitflags", - "bytes", - "futures-util", - "http", - "http-body", - "iri-string", - "pin-project-lite", - "tower", - "tower-layer", - "tower-service", -] - -[[package]] -name = "tower-layer" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" - -[[package]] -name = "tower-service" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" - -[[package]] -name = "tracing" -version = "0.1.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" -dependencies = [ - "pin-project-lite", - "tracing-core", -] - -[[package]] -name = "tracing-core" -version = "0.1.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" -dependencies = [ - "once_cell", -] - -[[package]] -name = "try-lock" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" - -[[package]] -name = "unicode-ident" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" - -[[package]] -name = "untrusted" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" - -[[package]] -name = "url" -version = "2.5.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" -dependencies = [ - "form_urlencoded", - "idna", - "percent-encoding", - "serde", -] - -[[package]] -name = "utf8_iter" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" - -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - -[[package]] -name = "walkdir" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" -dependencies = [ - "same-file", - "winapi-util", -] - -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.11.1+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" - -[[package]] -name = "wasip2" -version = "1.0.1+wasi-0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" -dependencies = [ - "wit-bindgen", -] - -[[package]] -name = "wasm-bindgen" -version = "0.2.105" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" -dependencies = [ - "cfg-if", - "once_cell", - "rustversion", - "wasm-bindgen-macro", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0" -dependencies = [ - "cfg-if", - "js-sys", - "once_cell", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.105" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.105" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" -dependencies = [ - "bumpalo", - "proc-macro2", - "quote", - "syn 2.0.109", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.105" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "web-sys" -version = "0.3.82" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "winapi-util" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" -dependencies = [ - "windows-sys 0.61.2", -] - -[[package]] -name = "windows-link" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" - -[[package]] -name = "windows-link" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" - -[[package]] -name = "windows-registry" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" -dependencies = [ - "windows-link 0.1.3", - "windows-result", - "windows-strings", -] - -[[package]] -name = "windows-result" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" -dependencies = [ - "windows-link 0.1.3", -] - -[[package]] -name = "windows-strings" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" -dependencies = [ - "windows-link 0.1.3", -] - -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.60.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" -dependencies = [ - "windows-targets 0.53.5", -] - -[[package]] -name = "windows-sys" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" -dependencies = [ - "windows-link 0.2.1", -] - -[[package]] -name = "windows-targets" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" -dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", -] - -[[package]] -name = "windows-targets" -version = "0.53.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" -dependencies = [ - "windows-link 0.2.1", - "windows_aarch64_gnullvm 0.53.1", - "windows_aarch64_msvc 0.53.1", - "windows_i686_gnu 0.53.1", - "windows_i686_gnullvm 0.53.1", - "windows_i686_msvc 0.53.1", - "windows_x86_64_gnu 0.53.1", - "windows_x86_64_gnullvm 0.53.1", - "windows_x86_64_msvc 0.53.1", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnu" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - -[[package]] -name = "windows_i686_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" - -[[package]] -name = "wit-bindgen" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" - -[[package]] -name = "writeable" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" - -[[package]] -name = "xattr" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" -dependencies = [ - "libc", - "rustix", -] - -[[package]] -name = "yoke" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" -dependencies = [ - "stable_deref_trait", - "yoke-derive", - "zerofrom", -] - -[[package]] -name = "yoke-derive" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.109", - "synstructure", -] - -[[package]] -name = "zerofrom" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" -dependencies = [ - "zerofrom-derive", -] - -[[package]] -name = "zerofrom-derive" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.109", - "synstructure", -] - -[[package]] -name = "zeroize" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" - -[[package]] -name = "zerotrie" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" -dependencies = [ - "displaydoc", - "yoke", - "zerofrom", -] - -[[package]] -name = "zerovec" -version = "0.11.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" -dependencies = [ - "yoke", - "zerofrom", - "zerovec-derive", -] - -[[package]] -name = "zerovec-derive" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.109", -] diff --git a/vendor/syn/Cargo.toml b/vendor/syn/Cargo.toml deleted file mode 100644 index b3e4ae86a8239b..00000000000000 --- a/vendor/syn/Cargo.toml +++ /dev/null @@ -1,272 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.68" -name = "syn" -version = "2.0.110" -authors = ["David Tolnay <dtolnay@gmail.com>"] -build = false -include = [ - "/benches/**", - "/Cargo.toml", - "/LICENSE-APACHE", - "/LICENSE-MIT", - "/README.md", - "/src/**", - "/tests/**", -] -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = "Parser for Rust source code" -documentation = "https://docs.rs/syn" -readme = "README.md" -keywords = [ - "macros", - "syn", -] -categories = [ - "development-tools::procedural-macro-helpers", - "parser-implementations", -] -license = "MIT OR Apache-2.0" -repository = "https://github.com/dtolnay/syn" - -[package.metadata.docs.rs] -all-features = true -targets = ["x86_64-unknown-linux-gnu"] -rustdoc-args = [ - "--generate-link-to-definition", - "--generate-macro-expansion", - "--extend-css=src/gen/token.css", - "--extern-html-root-url=core=https://doc.rust-lang.org", - "--extern-html-root-url=alloc=https://doc.rust-lang.org", - "--extern-html-root-url=std=https://doc.rust-lang.org", - "--extern-html-root-url=proc_macro=https://doc.rust-lang.org", -] - -[package.metadata.playground] -features = [ - "full", - "visit", - "visit-mut", - "fold", - "extra-traits", -] - -[features] -clone-impls = [] -default = [ - "derive", - "parsing", - "printing", - "clone-impls", - "proc-macro", -] -derive = [] -extra-traits = [] -fold = [] -full = [] -parsing = [] -printing = ["dep:quote"] -proc-macro = [ - "proc-macro2/proc-macro", - "quote?/proc-macro", -] -test = ["syn-test-suite/all-features"] -visit = [] -visit-mut = [] - -[lib] -name = "syn" -path = "src/lib.rs" - -[[test]] -name = "regression" -path = "tests/regression.rs" - -[[test]] -name = "test_asyncness" -path = "tests/test_asyncness.rs" - -[[test]] -name = "test_attribute" -path = "tests/test_attribute.rs" - -[[test]] -name = "test_derive_input" -path = "tests/test_derive_input.rs" - -[[test]] -name = "test_expr" -path = "tests/test_expr.rs" - -[[test]] -name = "test_generics" -path = "tests/test_generics.rs" - -[[test]] -name = "test_grouping" -path = "tests/test_grouping.rs" - -[[test]] -name = "test_ident" -path = "tests/test_ident.rs" - -[[test]] -name = "test_item" -path = "tests/test_item.rs" - -[[test]] -name = "test_lit" -path = "tests/test_lit.rs" - -[[test]] -name = "test_meta" -path = "tests/test_meta.rs" - -[[test]] -name = "test_parse_buffer" -path = "tests/test_parse_buffer.rs" - -[[test]] -name = "test_parse_quote" -path = "tests/test_parse_quote.rs" - -[[test]] -name = "test_parse_stream" -path = "tests/test_parse_stream.rs" - -[[test]] -name = "test_pat" -path = "tests/test_pat.rs" - -[[test]] -name = "test_path" -path = "tests/test_path.rs" - -[[test]] -name = "test_precedence" -path = "tests/test_precedence.rs" - -[[test]] -name = "test_punctuated" -path = "tests/test_punctuated.rs" - -[[test]] -name = "test_receiver" -path = "tests/test_receiver.rs" - -[[test]] -name = "test_round_trip" -path = "tests/test_round_trip.rs" - -[[test]] -name = "test_shebang" -path = "tests/test_shebang.rs" - -[[test]] -name = "test_size" -path = "tests/test_size.rs" - -[[test]] -name = "test_stmt" -path = "tests/test_stmt.rs" - -[[test]] -name = "test_token_trees" -path = "tests/test_token_trees.rs" - -[[test]] -name = "test_ty" -path = "tests/test_ty.rs" - -[[test]] -name = "test_unparenthesize" -path = "tests/test_unparenthesize.rs" - -[[test]] -name = "test_visibility" -path = "tests/test_visibility.rs" - -[[test]] -name = "zzz_stable" -path = "tests/zzz_stable.rs" - -[[bench]] -name = "file" -path = "benches/file.rs" -required-features = [ - "full", - "parsing", -] - -[[bench]] -name = "rust" -path = "benches/rust.rs" -harness = false -required-features = [ - "full", - "parsing", -] - -[dependencies.proc-macro2] -version = "1.0.91" -default-features = false - -[dependencies.quote] -version = "1.0.35" -optional = true -default-features = false - -[dependencies.unicode-ident] -version = "1" - -[dev-dependencies.anyhow] -version = "1" - -[dev-dependencies.automod] -version = "1" - -[dev-dependencies.insta] -version = "1" - -[dev-dependencies.ref-cast] -version = "1" - -[dev-dependencies.rustversion] -version = "1" - -[dev-dependencies.syn-test-suite] -version = "0" - -[dev-dependencies.termcolor] -version = "1" - -[target."cfg(not(miri))".dev-dependencies.flate2] -version = "1" - -[target."cfg(not(miri))".dev-dependencies.rayon] -version = "1" - -[target."cfg(not(miri))".dev-dependencies.reqwest] -version = "0.12" -features = ["blocking"] - -[target."cfg(not(miri))".dev-dependencies.tar] -version = "0.4.16" - -[target."cfg(not(miri))".dev-dependencies.walkdir] -version = "2.3.2" diff --git a/vendor/syn/LICENSE-APACHE b/vendor/syn/LICENSE-APACHE deleted file mode 100644 index 1b5ec8b78e237b..00000000000000 --- a/vendor/syn/LICENSE-APACHE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS diff --git a/vendor/syn/LICENSE-MIT b/vendor/syn/LICENSE-MIT deleted file mode 100644 index 31aa79387f27e7..00000000000000 --- a/vendor/syn/LICENSE-MIT +++ /dev/null @@ -1,23 +0,0 @@ -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/syn/README.md b/vendor/syn/README.md deleted file mode 100644 index 16a393b9f0dc6a..00000000000000 --- a/vendor/syn/README.md +++ /dev/null @@ -1,284 +0,0 @@ -Parser for Rust source code -=========================== - -[<img alt="github" src="https://img.shields.io/badge/github-dtolnay/syn-8da0cb?style=for-the-badge&labelColor=555555&logo=github" height="20">](https://github.com/dtolnay/syn) -[<img alt="crates.io" src="https://img.shields.io/crates/v/syn.svg?style=for-the-badge&color=fc8d62&logo=rust" height="20">](https://crates.io/crates/syn) -[<img alt="docs.rs" src="https://img.shields.io/badge/docs.rs-syn-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs" height="20">](https://docs.rs/syn) -[<img alt="build status" src="https://img.shields.io/github/actions/workflow/status/dtolnay/syn/ci.yml?branch=master&style=for-the-badge" height="20">](https://github.com/dtolnay/syn/actions?query=branch%3Amaster) - -Syn is a parsing library for parsing a stream of Rust tokens into a syntax tree -of Rust source code. - -Currently this library is geared toward use in Rust procedural macros, but -contains some APIs that may be useful more generally. - -- **Data structures** — Syn provides a complete syntax tree that can represent - any valid Rust source code. The syntax tree is rooted at [`syn::File`] which - represents a full source file, but there are other entry points that may be - useful to procedural macros including [`syn::Item`], [`syn::Expr`] and - [`syn::Type`]. - -- **Derives** — Of particular interest to derive macros is [`syn::DeriveInput`] - which is any of the three legal input items to a derive macro. An example - below shows using this type in a library that can derive implementations of a - user-defined trait. - -- **Parsing** — Parsing in Syn is built around [parser functions] with the - signature `fn(ParseStream) -> Result<T>`. Every syntax tree node defined by - Syn is individually parsable and may be used as a building block for custom - syntaxes, or you may dream up your own brand new syntax without involving any - of our syntax tree types. - -- **Location information** — Every token parsed by Syn is associated with a - `Span` that tracks line and column information back to the source of that - token. These spans allow a procedural macro to display detailed error messages - pointing to all the right places in the user's code. There is an example of - this below. - -- **Feature flags** — Functionality is aggressively feature gated so your - procedural macros enable only what they need, and do not pay in compile time - for all the rest. - -[`syn::File`]: https://docs.rs/syn/2.0/syn/struct.File.html -[`syn::Item`]: https://docs.rs/syn/2.0/syn/enum.Item.html -[`syn::Expr`]: https://docs.rs/syn/2.0/syn/enum.Expr.html -[`syn::Type`]: https://docs.rs/syn/2.0/syn/enum.Type.html -[`syn::DeriveInput`]: https://docs.rs/syn/2.0/syn/struct.DeriveInput.html -[parser functions]: https://docs.rs/syn/2.0/syn/parse/index.html - -*Version requirement: Syn supports rustc 1.61 and up.* - -[*Release notes*](https://github.com/dtolnay/syn/releases) - -<br> - -## Resources - -The best way to learn about procedural macros is by writing some. Consider -working through [this procedural macro workshop][workshop] to get familiar with -the different types of procedural macros. The workshop contains relevant links -into the Syn documentation as you work through each project. - -[workshop]: https://github.com/dtolnay/proc-macro-workshop - -<br> - -## Example of a derive macro - -The canonical derive macro using Syn looks like this. We write an ordinary Rust -function tagged with a `proc_macro_derive` attribute and the name of the trait -we are deriving. Any time that derive appears in the user's code, the Rust -compiler passes their data structure as tokens into our macro. We get to execute -arbitrary Rust code to figure out what to do with those tokens, then hand some -tokens back to the compiler to compile into the user's crate. - -[`TokenStream`]: https://doc.rust-lang.org/proc_macro/struct.TokenStream.html - -```toml -[dependencies] -syn = "2.0" -quote = "1.0" - -[lib] -proc-macro = true -``` - -```rust -use proc_macro::TokenStream; -use quote::quote; -use syn::{parse_macro_input, DeriveInput}; - -#[proc_macro_derive(MyMacro)] -pub fn my_macro(input: TokenStream) -> TokenStream { - // Parse the input tokens into a syntax tree - let input = parse_macro_input!(input as DeriveInput); - - // Build the output, possibly using quasi-quotation - let expanded = quote! { - // ... - }; - - // Hand the output tokens back to the compiler - TokenStream::from(expanded) -} -``` - -The [`heapsize`] example directory shows a complete working implementation of a -derive macro. The example derives a `HeapSize` trait which computes an estimate -of the amount of heap memory owned by a value. - -[`heapsize`]: examples/heapsize - -```rust -pub trait HeapSize { - /// Total number of bytes of heap memory owned by `self`. - fn heap_size_of_children(&self) -> usize; -} -``` - -The derive macro allows users to write `#[derive(HeapSize)]` on data structures -in their program. - -```rust -#[derive(HeapSize)] -struct Demo<'a, T: ?Sized> { - a: Box<T>, - b: u8, - c: &'a str, - d: String, -} -``` - -<br> - -## Spans and error reporting - -The token-based procedural macro API provides great control over where the -compiler's error messages are displayed in user code. Consider the error the -user sees if one of their field types does not implement `HeapSize`. - -```rust -#[derive(HeapSize)] -struct Broken { - ok: String, - bad: std::thread::Thread, -} -``` - -By tracking span information all the way through the expansion of a procedural -macro as shown in the `heapsize` example, token-based macros in Syn are able to -trigger errors that directly pinpoint the source of the problem. - -```console -error[E0277]: the trait bound `std::thread::Thread: HeapSize` is not satisfied - --> src/main.rs:7:5 - | -7 | bad: std::thread::Thread, - | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `HeapSize` is not implemented for `std::thread::Thread` -``` - -<br> - -## Parsing a custom syntax - -The [`lazy-static`] example directory shows the implementation of a -`functionlike!(...)` procedural macro in which the input tokens are parsed using -Syn's parsing API. - -[`lazy-static`]: examples/lazy-static - -The example reimplements the popular `lazy_static` crate from crates.io as a -procedural macro. - -```rust -lazy_static! { - static ref USERNAME: Regex = Regex::new("^[a-z0-9_-]{3,16}$").unwrap(); -} -``` - -The implementation shows how to trigger custom warnings and error messages on -the macro input. - -```console -warning: come on, pick a more creative name - --> src/main.rs:10:16 - | -10 | static ref FOO: String = "lazy_static".to_owned(); - | ^^^ -``` - -<br> - -## Testing - -When testing macros, we often care not just that the macro can be used -successfully but also that when the macro is provided with invalid input it -produces maximally helpful error messages. Consider using the [`trybuild`] crate -to write tests for errors that are emitted by your macro or errors detected by -the Rust compiler in the expanded code following misuse of the macro. Such tests -help avoid regressions from later refactors that mistakenly make an error no -longer trigger or be less helpful than it used to be. - -[`trybuild`]: https://github.com/dtolnay/trybuild - -<br> - -## Debugging - -When developing a procedural macro it can be helpful to look at what the -generated code looks like. Use `cargo rustc -- -Zunstable-options ---pretty=expanded` or the [`cargo expand`] subcommand. - -[`cargo expand`]: https://github.com/dtolnay/cargo-expand - -To show the expanded code for some crate that uses your procedural macro, run -`cargo expand` from that crate. To show the expanded code for one of your own -test cases, run `cargo expand --test the_test_case` where the last argument is -the name of the test file without the `.rs` extension. - -This write-up by Brandon W Maister discusses debugging in more detail: -[Debugging Rust's new Custom Derive system][debugging]. - -[debugging]: https://quodlibetor.github.io/posts/debugging-rusts-new-custom-derive-system/ - -<br> - -## Optional features - -Syn puts a lot of functionality behind optional features in order to optimize -compile time for the most common use cases. The following features are -available. - -- **`derive`** *(enabled by default)* — Data structures for representing the - possible input to a derive macro, including structs and enums and types. -- **`full`** — Data structures for representing the syntax tree of all valid - Rust source code, including items and expressions. -- **`parsing`** *(enabled by default)* — Ability to parse input tokens into a - syntax tree node of a chosen type. -- **`printing`** *(enabled by default)* — Ability to print a syntax tree node as - tokens of Rust source code. -- **`visit`** — Trait for traversing a syntax tree. -- **`visit-mut`** — Trait for traversing and mutating in place a syntax tree. -- **`fold`** — Trait for transforming an owned syntax tree. -- **`clone-impls`** *(enabled by default)* — Clone impls for all syntax tree - types. -- **`extra-traits`** — Debug, Eq, PartialEq, Hash impls for all syntax tree - types. -- **`proc-macro`** *(enabled by default)* — Runtime dependency on the dynamic - library libproc_macro from rustc toolchain. - -<br> - -## Proc macro shim - -Syn operates on the token representation provided by the [proc-macro2] crate -from crates.io rather than using the compiler's built in proc-macro crate -directly. This enables code using Syn to execute outside of the context of a -procedural macro, such as in unit tests or build.rs, and we avoid needing -incompatible ecosystems for proc macros vs non-macro use cases. - -In general all of your code should be written against proc-macro2 rather than -proc-macro. The one exception is in the signatures of procedural macro entry -points, which are required by the language to use `proc_macro::TokenStream`. - -The proc-macro2 crate will automatically detect and use the compiler's data -structures when a procedural macro is active. - -[proc-macro2]: https://docs.rs/proc-macro2/1.0/proc_macro2/ - -<br> - -#### License - -<sup> -Licensed under either of <a href="LICENSE-APACHE">Apache License, Version -2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option. -</sup> - -<br> - -<sub> -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in this crate by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. -</sub> diff --git a/vendor/syn/benches/file.rs b/vendor/syn/benches/file.rs deleted file mode 100644 index 6167488c9c5500..00000000000000 --- a/vendor/syn/benches/file.rs +++ /dev/null @@ -1,59 +0,0 @@ -// $ cargo bench --features full,test --bench file - -#![feature(rustc_private, test)] -#![recursion_limit = "1024"] -#![allow( - clippy::elidable_lifetime_names, - clippy::items_after_statements, - clippy::manual_let_else, - clippy::match_like_matches_macro, - clippy::missing_panics_doc, - clippy::must_use_candidate, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] - -extern crate test; - -#[macro_use] -#[path = "../tests/macros/mod.rs"] -mod macros; - -#[allow(dead_code)] -#[path = "../tests/repo/mod.rs"] -mod repo; - -use proc_macro2::{Span, TokenStream}; -use std::fs; -use std::str::FromStr; -use syn::parse::{ParseStream, Parser}; -use test::Bencher; - -const FILE: &str = "tests/rust/library/core/src/str/mod.rs"; - -fn get_tokens() -> TokenStream { - repo::clone_rust(); - let content = fs::read_to_string(FILE).unwrap(); - TokenStream::from_str(&content).unwrap() -} - -#[bench] -fn baseline(b: &mut Bencher) { - let tokens = get_tokens(); - b.iter(|| drop(tokens.clone())); -} - -#[bench] -fn create_token_buffer(b: &mut Bencher) { - let tokens = get_tokens(); - fn immediate_fail(_input: ParseStream) -> syn::Result<()> { - Err(syn::Error::new(Span::call_site(), "")) - } - b.iter(|| immediate_fail.parse2(tokens.clone())); -} - -#[bench] -fn parse_file(b: &mut Bencher) { - let tokens = get_tokens(); - b.iter(|| syn::parse2::<syn::File>(tokens.clone())); -} diff --git a/vendor/syn/benches/rust.rs b/vendor/syn/benches/rust.rs deleted file mode 100644 index ecb9c56fa314e3..00000000000000 --- a/vendor/syn/benches/rust.rs +++ /dev/null @@ -1,194 +0,0 @@ -// $ cargo bench --features full,test --bench rust -// -// Syn only, useful for profiling: -// $ RUSTFLAGS='--cfg syn_only' cargo build --release --features full,test --bench rust - -#![cfg_attr(not(syn_only), feature(rustc_private))] -#![recursion_limit = "1024"] -#![allow( - clippy::arc_with_non_send_sync, - clippy::cast_lossless, - clippy::elidable_lifetime_names, - clippy::let_underscore_untyped, - clippy::manual_let_else, - clippy::match_like_matches_macro, - clippy::needless_lifetimes, - clippy::uninlined_format_args, - clippy::unnecessary_wraps -)] - -#[macro_use] -#[path = "../tests/macros/mod.rs"] -mod macros; - -#[allow(dead_code)] -#[path = "../tests/repo/mod.rs"] -mod repo; - -use std::fs; -use std::path::Path; -use std::time::{Duration, Instant}; - -#[cfg(not(syn_only))] -mod tokenstream_parse { - use proc_macro2::TokenStream; - use std::path::Path; - use std::str::FromStr; - - pub fn bench(_path: &Path, content: &str) -> Result<(), ()> { - TokenStream::from_str(content).map(drop).map_err(drop) - } -} - -mod syn_parse { - use std::path::Path; - - pub fn bench(_path: &Path, content: &str) -> Result<(), ()> { - syn::parse_file(content).map(drop).map_err(drop) - } -} - -#[cfg(not(syn_only))] -mod librustc_parse { - extern crate rustc_data_structures; - extern crate rustc_driver; - extern crate rustc_error_messages; - extern crate rustc_errors; - extern crate rustc_parse; - extern crate rustc_session; - extern crate rustc_span; - - use crate::repo; - use rustc_errors::emitter::Emitter; - use rustc_errors::registry::Registry; - use rustc_errors::translation::Translator; - use rustc_errors::{DiagCtxt, DiagInner}; - use rustc_parse::lexer::StripTokens; - use rustc_session::parse::ParseSess; - use rustc_span::source_map::{FilePathMapping, SourceMap}; - use rustc_span::FileName; - use std::path::Path; - use std::sync::Arc; - - pub fn bench(path: &Path, content: &str) -> Result<(), ()> { - struct SilentEmitter; - - impl Emitter for SilentEmitter { - fn emit_diagnostic(&mut self, _diag: DiagInner, _registry: &Registry) {} - fn source_map(&self) -> Option<&SourceMap> { - None - } - fn translator(&self) -> &Translator { - panic!("silent emitter attempted to translate a diagnostic"); - } - } - - let edition = repo::edition(path).parse().unwrap(); - rustc_span::create_session_if_not_set_then(edition, |_| { - let source_map = Arc::new(SourceMap::new(FilePathMapping::empty())); - let emitter = Box::new(SilentEmitter); - let handler = DiagCtxt::new(emitter); - let sess = ParseSess::with_dcx(handler, source_map); - let name = FileName::Custom("bench".to_owned()); - let mut parser = rustc_parse::new_parser_from_source_str( - &sess, - name, - content.to_owned(), - StripTokens::ShebangAndFrontmatter, - ) - .unwrap(); - if let Err(diagnostic) = parser.parse_crate_mod() { - diagnostic.cancel(); - return Err(()); - } - Ok(()) - }) - } -} - -#[cfg(not(syn_only))] -mod read_from_disk { - use std::path::Path; - - pub fn bench(_path: &Path, content: &str) -> Result<(), ()> { - let _ = content; - Ok(()) - } -} - -fn exec(mut codepath: impl FnMut(&Path, &str) -> Result<(), ()>) -> Duration { - let begin = Instant::now(); - let mut success = 0; - let mut total = 0; - - ["tests/rust/compiler", "tests/rust/library"] - .iter() - .flat_map(|dir| { - walkdir::WalkDir::new(dir) - .into_iter() - .filter_entry(repo::base_dir_filter) - }) - .for_each(|entry| { - let entry = entry.unwrap(); - let path = entry.path(); - if path.is_dir() { - return; - } - let content = fs::read_to_string(path).unwrap(); - let ok = codepath(path, &content).is_ok(); - success += ok as usize; - total += 1; - if !ok { - eprintln!("FAIL {}", path.display()); - } - }); - - assert_eq!(success, total); - begin.elapsed() -} - -fn main() { - repo::clone_rust(); - - macro_rules! testcases { - ($($(#[$cfg:meta])* $name:ident,)*) => { - [ - $( - $(#[$cfg])* - (stringify!($name), $name::bench as fn(&Path, &str) -> Result<(), ()>), - )* - ] - }; - } - - #[cfg(not(syn_only))] - { - let mut lines = 0; - let mut files = 0; - exec(|_path, content| { - lines += content.lines().count(); - files += 1; - Ok(()) - }); - eprintln!("\n{} lines in {} files", lines, files); - } - - for (name, f) in testcases!( - #[cfg(not(syn_only))] - read_from_disk, - #[cfg(not(syn_only))] - tokenstream_parse, - syn_parse, - #[cfg(not(syn_only))] - librustc_parse, - ) { - eprint!("{:20}", format!("{}:", name)); - let elapsed = exec(f); - eprintln!( - "elapsed={}.{:03}s", - elapsed.as_secs(), - elapsed.subsec_millis(), - ); - } - eprintln!(); -} diff --git a/vendor/syn/src/attr.rs b/vendor/syn/src/attr.rs deleted file mode 100644 index a543af5597bd1a..00000000000000 --- a/vendor/syn/src/attr.rs +++ /dev/null @@ -1,836 +0,0 @@ -#[cfg(feature = "parsing")] -use crate::error::Error; -#[cfg(feature = "parsing")] -use crate::error::Result; -use crate::expr::Expr; -use crate::mac::MacroDelimiter; -#[cfg(feature = "parsing")] -use crate::meta::{self, ParseNestedMeta}; -#[cfg(feature = "parsing")] -use crate::parse::{Parse, ParseStream, Parser}; -use crate::path::Path; -use crate::token; -use proc_macro2::TokenStream; -#[cfg(feature = "printing")] -use std::iter; -#[cfg(feature = "printing")] -use std::slice; - -ast_struct! { - /// An attribute, like `#[repr(transparent)]`. - /// - /// <br> - /// - /// # Syntax - /// - /// Rust has six types of attributes. - /// - /// - Outer attributes like `#[repr(transparent)]`. These appear outside or - /// in front of the item they describe. - /// - /// - Inner attributes like `#![feature(proc_macro)]`. These appear inside - /// of the item they describe, usually a module. - /// - /// - Outer one-line doc comments like `/// Example`. - /// - /// - Inner one-line doc comments like `//! Please file an issue`. - /// - /// - Outer documentation blocks `/** Example */`. - /// - /// - Inner documentation blocks `/*! Please file an issue */`. - /// - /// The `style` field of type `AttrStyle` distinguishes whether an attribute - /// is outer or inner. - /// - /// Every attribute has a `path` that indicates the intended interpretation - /// of the rest of the attribute's contents. The path and the optional - /// additional contents are represented together in the `meta` field of the - /// attribute in three possible varieties: - /// - /// - Meta::Path — attributes whose information content conveys just a - /// path, for example the `#[test]` attribute. - /// - /// - Meta::List — attributes that carry arbitrary tokens after the - /// path, surrounded by a delimiter (parenthesis, bracket, or brace). For - /// example `#[derive(Copy)]` or `#[precondition(x < 5)]`. - /// - /// - Meta::NameValue — attributes with an `=` sign after the path, - /// followed by a Rust expression. For example `#[path = - /// "sys/windows.rs"]`. - /// - /// All doc comments are represented in the NameValue style with a path of - /// "doc", as this is how they are processed by the compiler and by - /// `macro_rules!` macros. - /// - /// ```text - /// #[derive(Copy, Clone)] - /// ~~~~~~Path - /// ^^^^^^^^^^^^^^^^^^^Meta::List - /// - /// #[path = "sys/windows.rs"] - /// ~~~~Path - /// ^^^^^^^^^^^^^^^^^^^^^^^Meta::NameValue - /// - /// #[test] - /// ^^^^Meta::Path - /// ``` - /// - /// <br> - /// - /// # Parsing from tokens to Attribute - /// - /// This type does not implement the [`Parse`] trait and thus cannot be - /// parsed directly by [`ParseStream::parse`]. Instead use - /// [`ParseStream::call`] with one of the two parser functions - /// [`Attribute::parse_outer`] or [`Attribute::parse_inner`] depending on - /// which you intend to parse. - /// - /// [`Parse`]: crate::parse::Parse - /// [`ParseStream::parse`]: crate::parse::ParseBuffer::parse - /// [`ParseStream::call`]: crate::parse::ParseBuffer::call - /// - /// ``` - /// use syn::{Attribute, Ident, Result, Token}; - /// use syn::parse::{Parse, ParseStream}; - /// - /// // Parses a unit struct with attributes. - /// // - /// // #[path = "s.tmpl"] - /// // struct S; - /// struct UnitStruct { - /// attrs: Vec<Attribute>, - /// struct_token: Token![struct], - /// name: Ident, - /// semi_token: Token![;], - /// } - /// - /// impl Parse for UnitStruct { - /// fn parse(input: ParseStream) -> Result<Self> { - /// Ok(UnitStruct { - /// attrs: input.call(Attribute::parse_outer)?, - /// struct_token: input.parse()?, - /// name: input.parse()?, - /// semi_token: input.parse()?, - /// }) - /// } - /// } - /// ``` - /// - /// <p><br></p> - /// - /// # Parsing from Attribute to structured arguments - /// - /// The grammar of attributes in Rust is very flexible, which makes the - /// syntax tree not that useful on its own. In particular, arguments of the - /// `Meta::List` variety of attribute are held in an arbitrary `tokens: - /// TokenStream`. Macros are expected to check the `path` of the attribute, - /// decide whether they recognize it, and then parse the remaining tokens - /// according to whatever grammar they wish to require for that kind of - /// attribute. Use [`parse_args()`] to parse those tokens into the expected - /// data structure. - /// - /// [`parse_args()`]: Attribute::parse_args - /// - /// <p><br></p> - /// - /// # Doc comments - /// - /// The compiler transforms doc comments, such as `/// comment` and `/*! - /// comment */`, into attributes before macros are expanded. Each comment is - /// expanded into an attribute of the form `#[doc = r"comment"]`. - /// - /// As an example, the following `mod` items are expanded identically: - /// - /// ``` - /// # use syn::{ItemMod, parse_quote}; - /// let doc: ItemMod = parse_quote! { - /// /// Single line doc comments - /// /// We write so many! - /// /** - /// * Multi-line comments... - /// * May span many lines - /// */ - /// mod example { - /// //! Of course, they can be inner too - /// /*! And fit in a single line */ - /// } - /// }; - /// let attr: ItemMod = parse_quote! { - /// #[doc = r" Single line doc comments"] - /// #[doc = r" We write so many!"] - /// #[doc = r" - /// * Multi-line comments... - /// * May span many lines - /// "] - /// mod example { - /// #![doc = r" Of course, they can be inner too"] - /// #![doc = r" And fit in a single line "] - /// } - /// }; - /// assert_eq!(doc, attr); - /// ``` - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct Attribute { - pub pound_token: Token![#], - pub style: AttrStyle, - pub bracket_token: token::Bracket, - pub meta: Meta, - } -} - -impl Attribute { - /// Returns the path that identifies the interpretation of this attribute. - /// - /// For example this would return the `test` in `#[test]`, the `derive` in - /// `#[derive(Copy)]`, and the `path` in `#[path = "sys/windows.rs"]`. - pub fn path(&self) -> &Path { - self.meta.path() - } - - /// Parse the arguments to the attribute as a syntax tree. - /// - /// This is similar to pulling out the `TokenStream` from `Meta::List` and - /// doing `syn::parse2::<T>(meta_list.tokens)`, except that using - /// `parse_args` the error message has a more useful span when `tokens` is - /// empty. - /// - /// The surrounding delimiters are *not* included in the input to the - /// parser. - /// - /// ```text - /// #[my_attr(value < 5)] - /// ^^^^^^^^^ what gets parsed - /// ``` - /// - /// # Example - /// - /// ``` - /// use syn::{parse_quote, Attribute, Expr}; - /// - /// let attr: Attribute = parse_quote! { - /// #[precondition(value < 5)] - /// }; - /// - /// if attr.path().is_ident("precondition") { - /// let precondition: Expr = attr.parse_args()?; - /// // ... - /// } - /// # anyhow::Ok(()) - /// ``` - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_args<T: Parse>(&self) -> Result<T> { - self.parse_args_with(T::parse) - } - - /// Parse the arguments to the attribute using the given parser. - /// - /// # Example - /// - /// ``` - /// use syn::{parse_quote, Attribute}; - /// - /// let attr: Attribute = parse_quote! { - /// #[inception { #[brrrrrrraaaaawwwwrwrrrmrmrmmrmrmmmmm] }] - /// }; - /// - /// let bwom = attr.parse_args_with(Attribute::parse_outer)?; - /// - /// // Attribute does not have a Parse impl, so we couldn't directly do: - /// // let bwom: Attribute = attr.parse_args()?; - /// # anyhow::Ok(()) - /// ``` - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_args_with<F: Parser>(&self, parser: F) -> Result<F::Output> { - match &self.meta { - Meta::Path(path) => Err(crate::error::new2( - path.segments.first().unwrap().ident.span(), - path.segments.last().unwrap().ident.span(), - format!( - "expected attribute arguments in parentheses: {}[{}(...)]", - parsing::DisplayAttrStyle(&self.style), - parsing::DisplayPath(path), - ), - )), - Meta::NameValue(meta) => Err(Error::new( - meta.eq_token.span, - format_args!( - "expected parentheses: {}[{}(...)]", - parsing::DisplayAttrStyle(&self.style), - parsing::DisplayPath(&meta.path), - ), - )), - Meta::List(meta) => meta.parse_args_with(parser), - } - } - - /// Parse the arguments to the attribute, expecting it to follow the - /// conventional structure used by most of Rust's built-in attributes. - /// - /// The [*Meta Item Attribute Syntax*][syntax] section in the Rust reference - /// explains the convention in more detail. Not all attributes follow this - /// convention, so [`parse_args()`][Self::parse_args] is available if you - /// need to parse arbitrarily goofy attribute syntax. - /// - /// [syntax]: https://doc.rust-lang.org/reference/attributes.html#meta-item-attribute-syntax - /// - /// # Example - /// - /// We'll parse a struct, and then parse some of Rust's `#[repr]` attribute - /// syntax. - /// - /// ``` - /// use syn::{parenthesized, parse_quote, token, ItemStruct, LitInt}; - /// - /// let input: ItemStruct = parse_quote! { - /// #[repr(C, align(4))] - /// pub struct MyStruct(u16, u32); - /// }; - /// - /// let mut repr_c = false; - /// let mut repr_transparent = false; - /// let mut repr_align = None::<usize>; - /// let mut repr_packed = None::<usize>; - /// for attr in &input.attrs { - /// if attr.path().is_ident("repr") { - /// attr.parse_nested_meta(|meta| { - /// // #[repr(C)] - /// if meta.path.is_ident("C") { - /// repr_c = true; - /// return Ok(()); - /// } - /// - /// // #[repr(transparent)] - /// if meta.path.is_ident("transparent") { - /// repr_transparent = true; - /// return Ok(()); - /// } - /// - /// // #[repr(align(N))] - /// if meta.path.is_ident("align") { - /// let content; - /// parenthesized!(content in meta.input); - /// let lit: LitInt = content.parse()?; - /// let n: usize = lit.base10_parse()?; - /// repr_align = Some(n); - /// return Ok(()); - /// } - /// - /// // #[repr(packed)] or #[repr(packed(N))], omitted N means 1 - /// if meta.path.is_ident("packed") { - /// if meta.input.peek(token::Paren) { - /// let content; - /// parenthesized!(content in meta.input); - /// let lit: LitInt = content.parse()?; - /// let n: usize = lit.base10_parse()?; - /// repr_packed = Some(n); - /// } else { - /// repr_packed = Some(1); - /// } - /// return Ok(()); - /// } - /// - /// Err(meta.error("unrecognized repr")) - /// })?; - /// } - /// } - /// # anyhow::Ok(()) - /// ``` - /// - /// # Alternatives - /// - /// In some cases, for attributes which have nested layers of structured - /// content, the following less flexible approach might be more convenient: - /// - /// ``` - /// # use syn::{parse_quote, ItemStruct}; - /// # - /// # let input: ItemStruct = parse_quote! { - /// # #[repr(C, align(4))] - /// # pub struct MyStruct(u16, u32); - /// # }; - /// # - /// use syn::punctuated::Punctuated; - /// use syn::{parenthesized, token, Error, LitInt, Meta, Token}; - /// - /// let mut repr_c = false; - /// let mut repr_transparent = false; - /// let mut repr_align = None::<usize>; - /// let mut repr_packed = None::<usize>; - /// for attr in &input.attrs { - /// if attr.path().is_ident("repr") { - /// let nested = attr.parse_args_with(Punctuated::<Meta, Token![,]>::parse_terminated)?; - /// for meta in nested { - /// match meta { - /// // #[repr(C)] - /// Meta::Path(path) if path.is_ident("C") => { - /// repr_c = true; - /// } - /// - /// // #[repr(align(N))] - /// Meta::List(meta) if meta.path.is_ident("align") => { - /// let lit: LitInt = meta.parse_args()?; - /// let n: usize = lit.base10_parse()?; - /// repr_align = Some(n); - /// } - /// - /// /* ... */ - /// - /// _ => { - /// return Err(Error::new_spanned(meta, "unrecognized repr")); - /// } - /// } - /// } - /// } - /// } - /// # Ok(()) - /// ``` - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_nested_meta( - &self, - logic: impl FnMut(ParseNestedMeta) -> Result<()>, - ) -> Result<()> { - self.parse_args_with(meta::parser(logic)) - } - - /// Parses zero or more outer attributes from the stream. - /// - /// # Example - /// - /// See - /// [*Parsing from tokens to Attribute*](#parsing-from-tokens-to-attribute). - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_outer(input: ParseStream) -> Result<Vec<Self>> { - let mut attrs = Vec::new(); - while input.peek(Token![#]) { - attrs.push(input.call(parsing::single_parse_outer)?); - } - Ok(attrs) - } - - /// Parses zero or more inner attributes from the stream. - /// - /// # Example - /// - /// See - /// [*Parsing from tokens to Attribute*](#parsing-from-tokens-to-attribute). - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_inner(input: ParseStream) -> Result<Vec<Self>> { - let mut attrs = Vec::new(); - parsing::parse_inner(input, &mut attrs)?; - Ok(attrs) - } -} - -ast_enum! { - /// Distinguishes between attributes that decorate an item and attributes - /// that are contained within an item. - /// - /// # Outer attributes - /// - /// - `#[repr(transparent)]` - /// - `/// # Example` - /// - `/** Please file an issue */` - /// - /// # Inner attributes - /// - /// - `#![feature(proc_macro)]` - /// - `//! # Example` - /// - `/*! Please file an issue */` - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub enum AttrStyle { - Outer, - Inner(Token![!]), - } -} - -ast_enum! { - /// Content of a compile-time structured attribute. - /// - /// ## Path - /// - /// A meta path is like the `test` in `#[test]`. - /// - /// ## List - /// - /// A meta list is like the `derive(Copy)` in `#[derive(Copy)]`. - /// - /// ## NameValue - /// - /// A name-value meta is like the `path = "..."` in `#[path = - /// "sys/windows.rs"]`. - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub enum Meta { - Path(Path), - - /// A structured list within an attribute, like `derive(Copy, Clone)`. - List(MetaList), - - /// A name-value pair within an attribute, like `feature = "nightly"`. - NameValue(MetaNameValue), - } -} - -ast_struct! { - /// A structured list within an attribute, like `derive(Copy, Clone)`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct MetaList { - pub path: Path, - pub delimiter: MacroDelimiter, - pub tokens: TokenStream, - } -} - -ast_struct! { - /// A name-value pair within an attribute, like `feature = "nightly"`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct MetaNameValue { - pub path: Path, - pub eq_token: Token![=], - pub value: Expr, - } -} - -impl Meta { - /// Returns the path that begins this structured meta item. - /// - /// For example this would return the `test` in `#[test]`, the `derive` in - /// `#[derive(Copy)]`, and the `path` in `#[path = "sys/windows.rs"]`. - pub fn path(&self) -> &Path { - match self { - Meta::Path(path) => path, - Meta::List(meta) => &meta.path, - Meta::NameValue(meta) => &meta.path, - } - } - - /// Error if this is a `Meta::List` or `Meta::NameValue`. - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn require_path_only(&self) -> Result<&Path> { - let error_span = match self { - Meta::Path(path) => return Ok(path), - Meta::List(meta) => meta.delimiter.span().open(), - Meta::NameValue(meta) => meta.eq_token.span, - }; - Err(Error::new(error_span, "unexpected token in attribute")) - } - - /// Error if this is a `Meta::Path` or `Meta::NameValue`. - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn require_list(&self) -> Result<&MetaList> { - match self { - Meta::List(meta) => Ok(meta), - Meta::Path(path) => Err(crate::error::new2( - path.segments.first().unwrap().ident.span(), - path.segments.last().unwrap().ident.span(), - format!( - "expected attribute arguments in parentheses: `{}(...)`", - parsing::DisplayPath(path), - ), - )), - Meta::NameValue(meta) => Err(Error::new(meta.eq_token.span, "expected `(`")), - } - } - - /// Error if this is a `Meta::Path` or `Meta::List`. - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn require_name_value(&self) -> Result<&MetaNameValue> { - match self { - Meta::NameValue(meta) => Ok(meta), - Meta::Path(path) => Err(crate::error::new2( - path.segments.first().unwrap().ident.span(), - path.segments.last().unwrap().ident.span(), - format!( - "expected a value for this attribute: `{} = ...`", - parsing::DisplayPath(path), - ), - )), - Meta::List(meta) => Err(Error::new(meta.delimiter.span().open(), "expected `=`")), - } - } -} - -impl MetaList { - /// See [`Attribute::parse_args`]. - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_args<T: Parse>(&self) -> Result<T> { - self.parse_args_with(T::parse) - } - - /// See [`Attribute::parse_args_with`]. - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_args_with<F: Parser>(&self, parser: F) -> Result<F::Output> { - let scope = self.delimiter.span().close(); - crate::parse::parse_scoped(parser, scope, self.tokens.clone()) - } - - /// See [`Attribute::parse_nested_meta`]. - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_nested_meta( - &self, - logic: impl FnMut(ParseNestedMeta) -> Result<()>, - ) -> Result<()> { - self.parse_args_with(meta::parser(logic)) - } -} - -#[cfg(feature = "printing")] -pub(crate) trait FilterAttrs<'a> { - type Ret: Iterator<Item = &'a Attribute>; - - fn outer(self) -> Self::Ret; - #[cfg(feature = "full")] - fn inner(self) -> Self::Ret; -} - -#[cfg(feature = "printing")] -impl<'a> FilterAttrs<'a> for &'a [Attribute] { - type Ret = iter::Filter<slice::Iter<'a, Attribute>, fn(&&Attribute) -> bool>; - - fn outer(self) -> Self::Ret { - fn is_outer(attr: &&Attribute) -> bool { - match attr.style { - AttrStyle::Outer => true, - AttrStyle::Inner(_) => false, - } - } - self.iter().filter(is_outer) - } - - #[cfg(feature = "full")] - fn inner(self) -> Self::Ret { - fn is_inner(attr: &&Attribute) -> bool { - match attr.style { - AttrStyle::Inner(_) => true, - AttrStyle::Outer => false, - } - } - self.iter().filter(is_inner) - } -} - -impl From<Path> for Meta { - fn from(meta: Path) -> Meta { - Meta::Path(meta) - } -} - -impl From<MetaList> for Meta { - fn from(meta: MetaList) -> Meta { - Meta::List(meta) - } -} - -impl From<MetaNameValue> for Meta { - fn from(meta: MetaNameValue) -> Meta { - Meta::NameValue(meta) - } -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::attr::{AttrStyle, Attribute, Meta, MetaList, MetaNameValue}; - use crate::error::Result; - use crate::expr::{Expr, ExprLit}; - use crate::lit::Lit; - use crate::parse::discouraged::Speculative as _; - use crate::parse::{Parse, ParseStream}; - use crate::path::Path; - use crate::{mac, token}; - use proc_macro2::Ident; - use std::fmt::{self, Display}; - - pub(crate) fn parse_inner(input: ParseStream, attrs: &mut Vec<Attribute>) -> Result<()> { - while input.peek(Token![#]) && input.peek2(Token![!]) { - attrs.push(input.call(single_parse_inner)?); - } - Ok(()) - } - - pub(crate) fn single_parse_inner(input: ParseStream) -> Result<Attribute> { - let content; - Ok(Attribute { - pound_token: input.parse()?, - style: AttrStyle::Inner(input.parse()?), - bracket_token: bracketed!(content in input), - meta: content.parse()?, - }) - } - - pub(crate) fn single_parse_outer(input: ParseStream) -> Result<Attribute> { - let content; - Ok(Attribute { - pound_token: input.parse()?, - style: AttrStyle::Outer, - bracket_token: bracketed!(content in input), - meta: content.parse()?, - }) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Meta { - fn parse(input: ParseStream) -> Result<Self> { - let path = parse_outermost_meta_path(input)?; - parse_meta_after_path(path, input) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for MetaList { - fn parse(input: ParseStream) -> Result<Self> { - let path = parse_outermost_meta_path(input)?; - parse_meta_list_after_path(path, input) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for MetaNameValue { - fn parse(input: ParseStream) -> Result<Self> { - let path = parse_outermost_meta_path(input)?; - parse_meta_name_value_after_path(path, input) - } - } - - // Unlike meta::parse_meta_path which accepts arbitrary keywords in the path, - // only the `unsafe` keyword is accepted as an attribute's outermost path. - fn parse_outermost_meta_path(input: ParseStream) -> Result<Path> { - if input.peek(Token![unsafe]) { - let unsafe_token: Token![unsafe] = input.parse()?; - Ok(Path::from(Ident::new("unsafe", unsafe_token.span))) - } else { - Path::parse_mod_style(input) - } - } - - pub(crate) fn parse_meta_after_path(path: Path, input: ParseStream) -> Result<Meta> { - if input.peek(token::Paren) || input.peek(token::Bracket) || input.peek(token::Brace) { - parse_meta_list_after_path(path, input).map(Meta::List) - } else if input.peek(Token![=]) && !input.peek(Token![==]) && !input.peek(Token![=>]) { - parse_meta_name_value_after_path(path, input).map(Meta::NameValue) - } else { - Ok(Meta::Path(path)) - } - } - - fn parse_meta_list_after_path(path: Path, input: ParseStream) -> Result<MetaList> { - let (delimiter, tokens) = mac::parse_delimiter(input)?; - Ok(MetaList { - path, - delimiter, - tokens, - }) - } - - fn parse_meta_name_value_after_path(path: Path, input: ParseStream) -> Result<MetaNameValue> { - let eq_token: Token![=] = input.parse()?; - let ahead = input.fork(); - let lit: Option<Lit> = ahead.parse()?; - let value = if let (Some(lit), true) = (lit, ahead.is_empty()) { - input.advance_to(&ahead); - Expr::Lit(ExprLit { - attrs: Vec::new(), - lit, - }) - } else if input.peek(Token![#]) && input.peek2(token::Bracket) { - return Err(input.error("unexpected attribute inside of attribute")); - } else { - input.parse()? - }; - Ok(MetaNameValue { - path, - eq_token, - value, - }) - } - - pub(super) struct DisplayAttrStyle<'a>(pub &'a AttrStyle); - - impl<'a> Display for DisplayAttrStyle<'a> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str(match self.0 { - AttrStyle::Outer => "#", - AttrStyle::Inner(_) => "#!", - }) - } - } - - pub(super) struct DisplayPath<'a>(pub &'a Path); - - impl<'a> Display for DisplayPath<'a> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - for (i, segment) in self.0.segments.iter().enumerate() { - if i > 0 || self.0.leading_colon.is_some() { - formatter.write_str("::")?; - } - write!(formatter, "{}", segment.ident)?; - } - Ok(()) - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use crate::attr::{AttrStyle, Attribute, Meta, MetaList, MetaNameValue}; - use crate::path; - use crate::path::printing::PathStyle; - use proc_macro2::TokenStream; - use quote::ToTokens; - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Attribute { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.pound_token.to_tokens(tokens); - if let AttrStyle::Inner(b) = &self.style { - b.to_tokens(tokens); - } - self.bracket_token.surround(tokens, |tokens| { - self.meta.to_tokens(tokens); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Meta { - fn to_tokens(&self, tokens: &mut TokenStream) { - match self { - Meta::Path(path) => path::printing::print_path(tokens, path, PathStyle::Mod), - Meta::List(meta_list) => meta_list.to_tokens(tokens), - Meta::NameValue(meta_name_value) => meta_name_value.to_tokens(tokens), - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for MetaList { - fn to_tokens(&self, tokens: &mut TokenStream) { - path::printing::print_path(tokens, &self.path, PathStyle::Mod); - self.delimiter.surround(tokens, self.tokens.clone()); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for MetaNameValue { - fn to_tokens(&self, tokens: &mut TokenStream) { - path::printing::print_path(tokens, &self.path, PathStyle::Mod); - self.eq_token.to_tokens(tokens); - self.value.to_tokens(tokens); - } - } -} diff --git a/vendor/syn/src/bigint.rs b/vendor/syn/src/bigint.rs deleted file mode 100644 index 66aaa93725401a..00000000000000 --- a/vendor/syn/src/bigint.rs +++ /dev/null @@ -1,66 +0,0 @@ -use std::ops::{AddAssign, MulAssign}; - -// For implementing base10_digits() accessor on LitInt. -pub(crate) struct BigInt { - digits: Vec<u8>, -} - -impl BigInt { - pub(crate) fn new() -> Self { - BigInt { digits: Vec::new() } - } - - pub(crate) fn to_string(&self) -> String { - let mut repr = String::with_capacity(self.digits.len()); - - let mut has_nonzero = false; - for digit in self.digits.iter().rev() { - has_nonzero |= *digit != 0; - if has_nonzero { - repr.push((*digit + b'0') as char); - } - } - - if repr.is_empty() { - repr.push('0'); - } - - repr - } - - fn reserve_two_digits(&mut self) { - let len = self.digits.len(); - let desired = - len + !self.digits.ends_with(&[0, 0]) as usize + !self.digits.ends_with(&[0]) as usize; - self.digits.resize(desired, 0); - } -} - -impl AddAssign<u8> for BigInt { - // Assumes increment <16. - fn add_assign(&mut self, mut increment: u8) { - self.reserve_two_digits(); - - let mut i = 0; - while increment > 0 { - let sum = self.digits[i] + increment; - self.digits[i] = sum % 10; - increment = sum / 10; - i += 1; - } - } -} - -impl MulAssign<u8> for BigInt { - // Assumes base <=16. - fn mul_assign(&mut self, base: u8) { - self.reserve_two_digits(); - - let mut carry = 0; - for digit in &mut self.digits { - let prod = *digit * base + carry; - *digit = prod % 10; - carry = prod / 10; - } - } -} diff --git a/vendor/syn/src/buffer.rs b/vendor/syn/src/buffer.rs deleted file mode 100644 index b0f3148443d81c..00000000000000 --- a/vendor/syn/src/buffer.rs +++ /dev/null @@ -1,435 +0,0 @@ -//! A stably addressed token buffer supporting efficient traversal based on a -//! cheaply copyable cursor. - -// This module is heavily commented as it contains most of the unsafe code in -// Syn, and caution should be used when editing it. The public-facing interface -// is 100% safe but the implementation is fragile internally. - -use crate::ext::TokenStreamExt as _; -use crate::Lifetime; -use proc_macro2::extra::DelimSpan; -use proc_macro2::{Delimiter, Group, Ident, Literal, Punct, Spacing, Span, TokenStream, TokenTree}; -use std::cmp::Ordering; -use std::marker::PhantomData; -use std::ptr; - -/// Internal type which is used instead of `TokenTree` to represent a token tree -/// within a `TokenBuffer`. -enum Entry { - // Mimicking types from proc-macro. - // Group entries contain the offset to the matching End entry. - Group(Group, usize), - Ident(Ident), - Punct(Punct), - Literal(Literal), - // End entries contain the offset (negative) to the start of the buffer, and - // offset (negative) to the matching Group entry. - End(isize, isize), -} - -/// A buffer that can be efficiently traversed multiple times, unlike -/// `TokenStream` which requires a deep copy in order to traverse more than -/// once. -pub struct TokenBuffer { - // NOTE: Do not implement clone on this - while the current design could be - // cloned, other designs which could be desirable may not be cloneable. - entries: Box<[Entry]>, -} - -impl TokenBuffer { - fn recursive_new(entries: &mut Vec<Entry>, stream: TokenStream) { - for tt in stream { - match tt { - TokenTree::Ident(ident) => entries.push(Entry::Ident(ident)), - TokenTree::Punct(punct) => entries.push(Entry::Punct(punct)), - TokenTree::Literal(literal) => entries.push(Entry::Literal(literal)), - TokenTree::Group(group) => { - let group_start_index = entries.len(); - entries.push(Entry::End(0, 0)); // we replace this below - Self::recursive_new(entries, group.stream()); - let group_end_index = entries.len(); - let group_offset = group_end_index - group_start_index; - entries.push(Entry::End( - -(group_end_index as isize), - -(group_offset as isize), - )); - entries[group_start_index] = Entry::Group(group, group_offset); - } - } - } - } - - /// Creates a `TokenBuffer` containing all the tokens from the input - /// `proc_macro::TokenStream`. - #[cfg(feature = "proc-macro")] - #[cfg_attr(docsrs, doc(cfg(feature = "proc-macro")))] - pub fn new(stream: proc_macro::TokenStream) -> Self { - Self::new2(stream.into()) - } - - /// Creates a `TokenBuffer` containing all the tokens from the input - /// `proc_macro2::TokenStream`. - pub fn new2(stream: TokenStream) -> Self { - let mut entries = Vec::new(); - Self::recursive_new(&mut entries, stream); - entries.push(Entry::End(-(entries.len() as isize), 0)); - Self { - entries: entries.into_boxed_slice(), - } - } - - /// Creates a cursor referencing the first token in the buffer and able to - /// traverse until the end of the buffer. - pub fn begin(&self) -> Cursor { - let ptr = self.entries.as_ptr(); - unsafe { Cursor::create(ptr, ptr.add(self.entries.len() - 1)) } - } -} - -/// A cheaply copyable cursor into a `TokenBuffer`. -/// -/// This cursor holds a shared reference into the immutable data which is used -/// internally to represent a `TokenStream`, and can be efficiently manipulated -/// and copied around. -/// -/// An empty `Cursor` can be created directly, or one may create a `TokenBuffer` -/// object and get a cursor to its first token with `begin()`. -pub struct Cursor<'a> { - // The current entry which the `Cursor` is pointing at. - ptr: *const Entry, - // This is the only `Entry::End` object which this cursor is allowed to - // point at. All other `End` objects are skipped over in `Cursor::create`. - scope: *const Entry, - // Cursor is covariant in 'a. This field ensures that our pointers are still - // valid. - marker: PhantomData<&'a Entry>, -} - -impl<'a> Cursor<'a> { - /// Creates a cursor referencing a static empty TokenStream. - pub fn empty() -> Self { - // It's safe in this situation for us to put an `Entry` object in global - // storage, despite it not actually being safe to send across threads - // (`Ident` is a reference into a thread-local table). This is because - // this entry never includes a `Ident` object. - // - // This wrapper struct allows us to break the rules and put a `Sync` - // object in global storage. - struct UnsafeSyncEntry(Entry); - unsafe impl Sync for UnsafeSyncEntry {} - static EMPTY_ENTRY: UnsafeSyncEntry = UnsafeSyncEntry(Entry::End(0, 0)); - - Cursor { - ptr: &EMPTY_ENTRY.0, - scope: &EMPTY_ENTRY.0, - marker: PhantomData, - } - } - - /// This create method intelligently exits non-explicitly-entered - /// `None`-delimited scopes when the cursor reaches the end of them, - /// allowing for them to be treated transparently. - unsafe fn create(mut ptr: *const Entry, scope: *const Entry) -> Self { - // NOTE: If we're looking at a `End`, we want to advance the cursor - // past it, unless `ptr == scope`, which means that we're at the edge of - // our cursor's scope. We should only have `ptr != scope` at the exit - // from None-delimited groups entered with `ignore_none`. - while let Entry::End(..) = unsafe { &*ptr } { - if ptr::eq(ptr, scope) { - break; - } - ptr = unsafe { ptr.add(1) }; - } - - Cursor { - ptr, - scope, - marker: PhantomData, - } - } - - /// Get the current entry. - fn entry(self) -> &'a Entry { - unsafe { &*self.ptr } - } - - /// Bump the cursor to point at the next token after the current one. This - /// is undefined behavior if the cursor is currently looking at an - /// `Entry::End`. - /// - /// If the cursor is looking at an `Entry::Group`, the bumped cursor will - /// point at the first token in the group (with the same scope end). - unsafe fn bump_ignore_group(self) -> Cursor<'a> { - unsafe { Cursor::create(self.ptr.offset(1), self.scope) } - } - - /// While the cursor is looking at a `None`-delimited group, move it to look - /// at the first token inside instead. If the group is empty, this will move - /// the cursor past the `None`-delimited group. - /// - /// WARNING: This mutates its argument. - fn ignore_none(&mut self) { - while let Entry::Group(group, _) = self.entry() { - if group.delimiter() == Delimiter::None { - unsafe { *self = self.bump_ignore_group() }; - } else { - break; - } - } - } - - /// Checks whether the cursor is currently pointing at the end of its valid - /// scope. - pub fn eof(self) -> bool { - // We're at eof if we're at the end of our scope. - ptr::eq(self.ptr, self.scope) - } - - /// If the cursor is pointing at a `Ident`, returns it along with a cursor - /// pointing at the next `TokenTree`. - pub fn ident(mut self) -> Option<(Ident, Cursor<'a>)> { - self.ignore_none(); - match self.entry() { - Entry::Ident(ident) => Some((ident.clone(), unsafe { self.bump_ignore_group() })), - _ => None, - } - } - - /// If the cursor is pointing at a `Punct`, returns it along with a cursor - /// pointing at the next `TokenTree`. - pub fn punct(mut self) -> Option<(Punct, Cursor<'a>)> { - self.ignore_none(); - match self.entry() { - Entry::Punct(punct) if punct.as_char() != '\'' => { - Some((punct.clone(), unsafe { self.bump_ignore_group() })) - } - _ => None, - } - } - - /// If the cursor is pointing at a `Literal`, return it along with a cursor - /// pointing at the next `TokenTree`. - pub fn literal(mut self) -> Option<(Literal, Cursor<'a>)> { - self.ignore_none(); - match self.entry() { - Entry::Literal(literal) => Some((literal.clone(), unsafe { self.bump_ignore_group() })), - _ => None, - } - } - - /// If the cursor is pointing at a `Lifetime`, returns it along with a - /// cursor pointing at the next `TokenTree`. - pub fn lifetime(mut self) -> Option<(Lifetime, Cursor<'a>)> { - self.ignore_none(); - match self.entry() { - Entry::Punct(punct) if punct.as_char() == '\'' && punct.spacing() == Spacing::Joint => { - let next = unsafe { self.bump_ignore_group() }; - let (ident, rest) = next.ident()?; - let lifetime = Lifetime { - apostrophe: punct.span(), - ident, - }; - Some((lifetime, rest)) - } - _ => None, - } - } - - /// If the cursor is pointing at a `Group` with the given delimiter, returns - /// a cursor into that group and one pointing to the next `TokenTree`. - pub fn group(mut self, delim: Delimiter) -> Option<(Cursor<'a>, DelimSpan, Cursor<'a>)> { - // If we're not trying to enter a none-delimited group, we want to - // ignore them. We have to make sure to _not_ ignore them when we want - // to enter them, of course. For obvious reasons. - if delim != Delimiter::None { - self.ignore_none(); - } - - if let Entry::Group(group, end_offset) = self.entry() { - if group.delimiter() == delim { - let span = group.delim_span(); - let end_of_group = unsafe { self.ptr.add(*end_offset) }; - let inside_of_group = unsafe { Cursor::create(self.ptr.add(1), end_of_group) }; - let after_group = unsafe { Cursor::create(end_of_group, self.scope) }; - return Some((inside_of_group, span, after_group)); - } - } - - None - } - - /// If the cursor is pointing at a `Group`, returns a cursor into the group - /// and one pointing to the next `TokenTree`. - pub fn any_group(self) -> Option<(Cursor<'a>, Delimiter, DelimSpan, Cursor<'a>)> { - if let Entry::Group(group, end_offset) = self.entry() { - let delimiter = group.delimiter(); - let span = group.delim_span(); - let end_of_group = unsafe { self.ptr.add(*end_offset) }; - let inside_of_group = unsafe { Cursor::create(self.ptr.add(1), end_of_group) }; - let after_group = unsafe { Cursor::create(end_of_group, self.scope) }; - return Some((inside_of_group, delimiter, span, after_group)); - } - - None - } - - pub(crate) fn any_group_token(self) -> Option<(Group, Cursor<'a>)> { - if let Entry::Group(group, end_offset) = self.entry() { - let end_of_group = unsafe { self.ptr.add(*end_offset) }; - let after_group = unsafe { Cursor::create(end_of_group, self.scope) }; - return Some((group.clone(), after_group)); - } - - None - } - - /// Copies all remaining tokens visible from this cursor into a - /// `TokenStream`. - pub fn token_stream(self) -> TokenStream { - let mut tokens = TokenStream::new(); - let mut cursor = self; - while let Some((tt, rest)) = cursor.token_tree() { - tokens.append(tt); - cursor = rest; - } - tokens - } - - /// If the cursor is pointing at a `TokenTree`, returns it along with a - /// cursor pointing at the next `TokenTree`. - /// - /// Returns `None` if the cursor has reached the end of its stream. - /// - /// This method does not treat `None`-delimited groups as transparent, and - /// will return a `Group(None, ..)` if the cursor is looking at one. - pub fn token_tree(self) -> Option<(TokenTree, Cursor<'a>)> { - let (tree, len) = match self.entry() { - Entry::Group(group, end_offset) => (group.clone().into(), *end_offset), - Entry::Literal(literal) => (literal.clone().into(), 1), - Entry::Ident(ident) => (ident.clone().into(), 1), - Entry::Punct(punct) => (punct.clone().into(), 1), - Entry::End(..) => return None, - }; - - let rest = unsafe { Cursor::create(self.ptr.add(len), self.scope) }; - Some((tree, rest)) - } - - /// Returns the `Span` of the current token, or `Span::call_site()` if this - /// cursor points to eof. - pub fn span(mut self) -> Span { - match self.entry() { - Entry::Group(group, _) => group.span(), - Entry::Literal(literal) => literal.span(), - Entry::Ident(ident) => ident.span(), - Entry::Punct(punct) => punct.span(), - Entry::End(_, offset) => { - self.ptr = unsafe { self.ptr.offset(*offset) }; - if let Entry::Group(group, _) = self.entry() { - group.span_close() - } else { - Span::call_site() - } - } - } - } - - /// Returns the `Span` of the token immediately prior to the position of - /// this cursor, or of the current token if there is no previous one. - #[cfg(any(feature = "full", feature = "derive"))] - pub(crate) fn prev_span(mut self) -> Span { - if start_of_buffer(self) < self.ptr { - self.ptr = unsafe { self.ptr.offset(-1) }; - } - self.span() - } - - /// Skip over the next token that is not a None-delimited group, without - /// cloning it. Returns `None` if this cursor points to eof. - /// - /// This method treats `'lifetimes` as a single token. - pub(crate) fn skip(mut self) -> Option<Cursor<'a>> { - self.ignore_none(); - - let len = match self.entry() { - Entry::End(..) => return None, - - // Treat lifetimes as a single tt for the purposes of 'skip'. - Entry::Punct(punct) if punct.as_char() == '\'' && punct.spacing() == Spacing::Joint => { - match unsafe { &*self.ptr.add(1) } { - Entry::Ident(_) => 2, - _ => 1, - } - } - - Entry::Group(_, end_offset) => *end_offset, - _ => 1, - }; - - Some(unsafe { Cursor::create(self.ptr.add(len), self.scope) }) - } - - pub(crate) fn scope_delimiter(self) -> Delimiter { - match unsafe { &*self.scope } { - Entry::End(_, offset) => match unsafe { &*self.scope.offset(*offset) } { - Entry::Group(group, _) => group.delimiter(), - _ => Delimiter::None, - }, - _ => unreachable!(), - } - } -} - -impl<'a> Copy for Cursor<'a> {} - -impl<'a> Clone for Cursor<'a> { - fn clone(&self) -> Self { - *self - } -} - -impl<'a> Eq for Cursor<'a> {} - -impl<'a> PartialEq for Cursor<'a> { - fn eq(&self, other: &Self) -> bool { - ptr::eq(self.ptr, other.ptr) - } -} - -impl<'a> PartialOrd for Cursor<'a> { - fn partial_cmp(&self, other: &Self) -> Option<Ordering> { - if same_buffer(*self, *other) { - Some(cmp_assuming_same_buffer(*self, *other)) - } else { - None - } - } -} - -pub(crate) fn same_scope(a: Cursor, b: Cursor) -> bool { - ptr::eq(a.scope, b.scope) -} - -pub(crate) fn same_buffer(a: Cursor, b: Cursor) -> bool { - ptr::eq(start_of_buffer(a), start_of_buffer(b)) -} - -fn start_of_buffer(cursor: Cursor) -> *const Entry { - unsafe { - match &*cursor.scope { - Entry::End(offset, _) => cursor.scope.offset(*offset), - _ => unreachable!(), - } - } -} - -pub(crate) fn cmp_assuming_same_buffer(a: Cursor, b: Cursor) -> Ordering { - a.ptr.cmp(&b.ptr) -} - -pub(crate) fn open_span_of_group(cursor: Cursor) -> Span { - match cursor.entry() { - Entry::Group(group, _) => group.span_open(), - _ => cursor.span(), - } -} diff --git a/vendor/syn/src/classify.rs b/vendor/syn/src/classify.rs deleted file mode 100644 index 8eab19dbc37cac..00000000000000 --- a/vendor/syn/src/classify.rs +++ /dev/null @@ -1,311 +0,0 @@ -#[cfg(feature = "full")] -use crate::expr::Expr; -#[cfg(any(feature = "printing", feature = "full"))] -use crate::generics::TypeParamBound; -#[cfg(any(feature = "printing", feature = "full"))] -use crate::path::{Path, PathArguments}; -#[cfg(any(feature = "printing", feature = "full"))] -use crate::punctuated::Punctuated; -#[cfg(any(feature = "printing", feature = "full"))] -use crate::ty::{ReturnType, Type}; -#[cfg(feature = "full")] -use proc_macro2::{Delimiter, TokenStream, TokenTree}; -#[cfg(any(feature = "printing", feature = "full"))] -use std::ops::ControlFlow; - -#[cfg(feature = "full")] -pub(crate) fn requires_semi_to_be_stmt(expr: &Expr) -> bool { - match expr { - Expr::Macro(expr) => !expr.mac.delimiter.is_brace(), - _ => requires_comma_to_be_match_arm(expr), - } -} - -#[cfg(feature = "full")] -pub(crate) fn requires_comma_to_be_match_arm(expr: &Expr) -> bool { - match expr { - Expr::If(_) - | Expr::Match(_) - | Expr::Block(_) | Expr::Unsafe(_) // both under ExprKind::Block in rustc - | Expr::While(_) - | Expr::Loop(_) - | Expr::ForLoop(_) - | Expr::TryBlock(_) - | Expr::Const(_) => false, - - Expr::Array(_) - | Expr::Assign(_) - | Expr::Async(_) - | Expr::Await(_) - | Expr::Binary(_) - | Expr::Break(_) - | Expr::Call(_) - | Expr::Cast(_) - | Expr::Closure(_) - | Expr::Continue(_) - | Expr::Field(_) - | Expr::Group(_) - | Expr::Index(_) - | Expr::Infer(_) - | Expr::Let(_) - | Expr::Lit(_) - | Expr::Macro(_) - | Expr::MethodCall(_) - | Expr::Paren(_) - | Expr::Path(_) - | Expr::Range(_) - | Expr::RawAddr(_) - | Expr::Reference(_) - | Expr::Repeat(_) - | Expr::Return(_) - | Expr::Struct(_) - | Expr::Try(_) - | Expr::Tuple(_) - | Expr::Unary(_) - | Expr::Yield(_) - | Expr::Verbatim(_) => true, - } -} - -#[cfg(feature = "printing")] -pub(crate) fn trailing_unparameterized_path(mut ty: &Type) -> bool { - loop { - match ty { - Type::BareFn(t) => match &t.output { - ReturnType::Default => return false, - ReturnType::Type(_, ret) => ty = ret, - }, - Type::ImplTrait(t) => match last_type_in_bounds(&t.bounds) { - ControlFlow::Break(trailing_path) => return trailing_path, - ControlFlow::Continue(t) => ty = t, - }, - Type::Path(t) => match last_type_in_path(&t.path) { - ControlFlow::Break(trailing_path) => return trailing_path, - ControlFlow::Continue(t) => ty = t, - }, - Type::Ptr(t) => ty = &t.elem, - Type::Reference(t) => ty = &t.elem, - Type::TraitObject(t) => match last_type_in_bounds(&t.bounds) { - ControlFlow::Break(trailing_path) => return trailing_path, - ControlFlow::Continue(t) => ty = t, - }, - - Type::Array(_) - | Type::Group(_) - | Type::Infer(_) - | Type::Macro(_) - | Type::Never(_) - | Type::Paren(_) - | Type::Slice(_) - | Type::Tuple(_) - | Type::Verbatim(_) => return false, - } - } - - fn last_type_in_path(path: &Path) -> ControlFlow<bool, &Type> { - match &path.segments.last().unwrap().arguments { - PathArguments::None => ControlFlow::Break(true), - PathArguments::AngleBracketed(_) => ControlFlow::Break(false), - PathArguments::Parenthesized(arg) => match &arg.output { - ReturnType::Default => ControlFlow::Break(false), - ReturnType::Type(_, ret) => ControlFlow::Continue(ret), - }, - } - } - - fn last_type_in_bounds( - bounds: &Punctuated<TypeParamBound, Token![+]>, - ) -> ControlFlow<bool, &Type> { - match bounds.last().unwrap() { - TypeParamBound::Trait(t) => last_type_in_path(&t.path), - TypeParamBound::Lifetime(_) - | TypeParamBound::PreciseCapture(_) - | TypeParamBound::Verbatim(_) => ControlFlow::Break(false), - } - } -} - -/// Whether the expression's first token is the label of a loop/block. -#[cfg(all(feature = "printing", feature = "full"))] -pub(crate) fn expr_leading_label(mut expr: &Expr) -> bool { - loop { - match expr { - Expr::Block(e) => return e.label.is_some(), - Expr::ForLoop(e) => return e.label.is_some(), - Expr::Loop(e) => return e.label.is_some(), - Expr::While(e) => return e.label.is_some(), - - Expr::Assign(e) => expr = &e.left, - Expr::Await(e) => expr = &e.base, - Expr::Binary(e) => expr = &e.left, - Expr::Call(e) => expr = &e.func, - Expr::Cast(e) => expr = &e.expr, - Expr::Field(e) => expr = &e.base, - Expr::Index(e) => expr = &e.expr, - Expr::MethodCall(e) => expr = &e.receiver, - Expr::Range(e) => match &e.start { - Some(start) => expr = start, - None => return false, - }, - Expr::Try(e) => expr = &e.expr, - - Expr::Array(_) - | Expr::Async(_) - | Expr::Break(_) - | Expr::Closure(_) - | Expr::Const(_) - | Expr::Continue(_) - | Expr::Group(_) - | Expr::If(_) - | Expr::Infer(_) - | Expr::Let(_) - | Expr::Lit(_) - | Expr::Macro(_) - | Expr::Match(_) - | Expr::Paren(_) - | Expr::Path(_) - | Expr::RawAddr(_) - | Expr::Reference(_) - | Expr::Repeat(_) - | Expr::Return(_) - | Expr::Struct(_) - | Expr::TryBlock(_) - | Expr::Tuple(_) - | Expr::Unary(_) - | Expr::Unsafe(_) - | Expr::Verbatim(_) - | Expr::Yield(_) => return false, - } - } -} - -/// Whether the expression's last token is `}`. -#[cfg(feature = "full")] -pub(crate) fn expr_trailing_brace(mut expr: &Expr) -> bool { - loop { - match expr { - Expr::Async(_) - | Expr::Block(_) - | Expr::Const(_) - | Expr::ForLoop(_) - | Expr::If(_) - | Expr::Loop(_) - | Expr::Match(_) - | Expr::Struct(_) - | Expr::TryBlock(_) - | Expr::Unsafe(_) - | Expr::While(_) => return true, - - Expr::Assign(e) => expr = &e.right, - Expr::Binary(e) => expr = &e.right, - Expr::Break(e) => match &e.expr { - Some(e) => expr = e, - None => return false, - }, - Expr::Cast(e) => return type_trailing_brace(&e.ty), - Expr::Closure(e) => expr = &e.body, - Expr::Let(e) => expr = &e.expr, - Expr::Macro(e) => return e.mac.delimiter.is_brace(), - Expr::Range(e) => match &e.end { - Some(end) => expr = end, - None => return false, - }, - Expr::RawAddr(e) => expr = &e.expr, - Expr::Reference(e) => expr = &e.expr, - Expr::Return(e) => match &e.expr { - Some(e) => expr = e, - None => return false, - }, - Expr::Unary(e) => expr = &e.expr, - Expr::Verbatim(e) => return tokens_trailing_brace(e), - Expr::Yield(e) => match &e.expr { - Some(e) => expr = e, - None => return false, - }, - - Expr::Array(_) - | Expr::Await(_) - | Expr::Call(_) - | Expr::Continue(_) - | Expr::Field(_) - | Expr::Group(_) - | Expr::Index(_) - | Expr::Infer(_) - | Expr::Lit(_) - | Expr::MethodCall(_) - | Expr::Paren(_) - | Expr::Path(_) - | Expr::Repeat(_) - | Expr::Try(_) - | Expr::Tuple(_) => return false, - } - } - - fn type_trailing_brace(mut ty: &Type) -> bool { - loop { - match ty { - Type::BareFn(t) => match &t.output { - ReturnType::Default => return false, - ReturnType::Type(_, ret) => ty = ret, - }, - Type::ImplTrait(t) => match last_type_in_bounds(&t.bounds) { - ControlFlow::Break(trailing_brace) => return trailing_brace, - ControlFlow::Continue(t) => ty = t, - }, - Type::Macro(t) => return t.mac.delimiter.is_brace(), - Type::Path(t) => match last_type_in_path(&t.path) { - Some(t) => ty = t, - None => return false, - }, - Type::Ptr(t) => ty = &t.elem, - Type::Reference(t) => ty = &t.elem, - Type::TraitObject(t) => match last_type_in_bounds(&t.bounds) { - ControlFlow::Break(trailing_brace) => return trailing_brace, - ControlFlow::Continue(t) => ty = t, - }, - Type::Verbatim(t) => return tokens_trailing_brace(t), - - Type::Array(_) - | Type::Group(_) - | Type::Infer(_) - | Type::Never(_) - | Type::Paren(_) - | Type::Slice(_) - | Type::Tuple(_) => return false, - } - } - } - - fn last_type_in_path(path: &Path) -> Option<&Type> { - match &path.segments.last().unwrap().arguments { - PathArguments::None | PathArguments::AngleBracketed(_) => None, - PathArguments::Parenthesized(arg) => match &arg.output { - ReturnType::Default => None, - ReturnType::Type(_, ret) => Some(ret), - }, - } - } - - fn last_type_in_bounds( - bounds: &Punctuated<TypeParamBound, Token![+]>, - ) -> ControlFlow<bool, &Type> { - match bounds.last().unwrap() { - TypeParamBound::Trait(t) => match last_type_in_path(&t.path) { - Some(t) => ControlFlow::Continue(t), - None => ControlFlow::Break(false), - }, - TypeParamBound::Lifetime(_) | TypeParamBound::PreciseCapture(_) => { - ControlFlow::Break(false) - } - TypeParamBound::Verbatim(t) => ControlFlow::Break(tokens_trailing_brace(t)), - } - } - - fn tokens_trailing_brace(tokens: &TokenStream) -> bool { - if let Some(TokenTree::Group(last)) = tokens.clone().into_iter().last() { - last.delimiter() == Delimiter::Brace - } else { - false - } - } -} diff --git a/vendor/syn/src/custom_keyword.rs b/vendor/syn/src/custom_keyword.rs deleted file mode 100644 index cc4f632c981a97..00000000000000 --- a/vendor/syn/src/custom_keyword.rs +++ /dev/null @@ -1,260 +0,0 @@ -/// Define a type that supports parsing and printing a given identifier as if it -/// were a keyword. -/// -/// # Usage -/// -/// As a convention, it is recommended that this macro be invoked within a -/// module called `kw` or `keyword` and that the resulting parser be invoked -/// with a `kw::` or `keyword::` prefix. -/// -/// ``` -/// mod kw { -/// syn::custom_keyword!(whatever); -/// } -/// ``` -/// -/// The generated syntax tree node supports the following operations just like -/// any built-in keyword token. -/// -/// - [Peeking] — `input.peek(kw::whatever)` -/// -/// - [Parsing] — `input.parse::<kw::whatever>()?` -/// -/// - [Printing] — `quote!( ... #whatever_token ... )` -/// -/// - Construction from a [`Span`] — `let whatever_token = kw::whatever(sp)` -/// -/// - Field access to its span — `let sp = whatever_token.span` -/// -/// [Peeking]: crate::parse::ParseBuffer::peek -/// [Parsing]: crate::parse::ParseBuffer::parse -/// [Printing]: quote::ToTokens -/// [`Span`]: proc_macro2::Span -/// -/// # Example -/// -/// This example parses input that looks like `bool = true` or `str = "value"`. -/// The key must be either the identifier `bool` or the identifier `str`. If -/// `bool`, the value may be either `true` or `false`. If `str`, the value may -/// be any string literal. -/// -/// The symbols `bool` and `str` are not reserved keywords in Rust so these are -/// not considered keywords in the `syn::token` module. Like any other -/// identifier that is not a keyword, these can be declared as custom keywords -/// by crates that need to use them as such. -/// -/// ``` -/// use syn::{LitBool, LitStr, Result, Token}; -/// use syn::parse::{Parse, ParseStream}; -/// -/// mod kw { -/// syn::custom_keyword!(bool); -/// syn::custom_keyword!(str); -/// } -/// -/// enum Argument { -/// Bool { -/// bool_token: kw::bool, -/// eq_token: Token![=], -/// value: LitBool, -/// }, -/// Str { -/// str_token: kw::str, -/// eq_token: Token![=], -/// value: LitStr, -/// }, -/// } -/// -/// impl Parse for Argument { -/// fn parse(input: ParseStream) -> Result<Self> { -/// let lookahead = input.lookahead1(); -/// if lookahead.peek(kw::bool) { -/// Ok(Argument::Bool { -/// bool_token: input.parse::<kw::bool>()?, -/// eq_token: input.parse()?, -/// value: input.parse()?, -/// }) -/// } else if lookahead.peek(kw::str) { -/// Ok(Argument::Str { -/// str_token: input.parse::<kw::str>()?, -/// eq_token: input.parse()?, -/// value: input.parse()?, -/// }) -/// } else { -/// Err(lookahead.error()) -/// } -/// } -/// } -/// ``` -#[macro_export] -macro_rules! custom_keyword { - ($ident:ident) => { - #[allow(non_camel_case_types)] - pub struct $ident { - #[allow(dead_code)] - pub span: $crate::__private::Span, - } - - #[doc(hidden)] - #[allow(dead_code, non_snake_case)] - pub fn $ident<__S: $crate::__private::IntoSpans<$crate::__private::Span>>( - span: __S, - ) -> $ident { - $ident { - span: $crate::__private::IntoSpans::into_spans(span), - } - } - - const _: () = { - impl $crate::__private::Default for $ident { - fn default() -> Self { - $ident { - span: $crate::__private::Span::call_site(), - } - } - } - - $crate::impl_parse_for_custom_keyword!($ident); - $crate::impl_to_tokens_for_custom_keyword!($ident); - $crate::impl_clone_for_custom_keyword!($ident); - $crate::impl_extra_traits_for_custom_keyword!($ident); - }; - }; -} - -// Not public API. -#[cfg(feature = "parsing")] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_parse_for_custom_keyword { - ($ident:ident) => { - // For peek. - impl $crate::__private::CustomToken for $ident { - fn peek(cursor: $crate::buffer::Cursor) -> $crate::__private::bool { - if let $crate::__private::Some((ident, _rest)) = cursor.ident() { - ident == $crate::__private::stringify!($ident) - } else { - false - } - } - - fn display() -> &'static $crate::__private::str { - $crate::__private::concat!("`", $crate::__private::stringify!($ident), "`") - } - } - - impl $crate::parse::Parse for $ident { - fn parse(input: $crate::parse::ParseStream) -> $crate::parse::Result<$ident> { - input.step(|cursor| { - if let $crate::__private::Some((ident, rest)) = cursor.ident() { - if ident == $crate::__private::stringify!($ident) { - return $crate::__private::Ok(($ident { span: ident.span() }, rest)); - } - } - $crate::__private::Err(cursor.error($crate::__private::concat!( - "expected `", - $crate::__private::stringify!($ident), - "`", - ))) - }) - } - } - }; -} - -// Not public API. -#[cfg(not(feature = "parsing"))] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_parse_for_custom_keyword { - ($ident:ident) => {}; -} - -// Not public API. -#[cfg(feature = "printing")] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_to_tokens_for_custom_keyword { - ($ident:ident) => { - impl $crate::__private::ToTokens for $ident { - fn to_tokens(&self, tokens: &mut $crate::__private::TokenStream2) { - let ident = $crate::Ident::new($crate::__private::stringify!($ident), self.span); - $crate::__private::TokenStreamExt::append(tokens, ident); - } - } - }; -} - -// Not public API. -#[cfg(not(feature = "printing"))] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_to_tokens_for_custom_keyword { - ($ident:ident) => {}; -} - -// Not public API. -#[cfg(feature = "clone-impls")] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_clone_for_custom_keyword { - ($ident:ident) => { - impl $crate::__private::Copy for $ident {} - - #[allow(clippy::expl_impl_clone_on_copy)] - impl $crate::__private::Clone for $ident { - fn clone(&self) -> Self { - *self - } - } - }; -} - -// Not public API. -#[cfg(not(feature = "clone-impls"))] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_clone_for_custom_keyword { - ($ident:ident) => {}; -} - -// Not public API. -#[cfg(feature = "extra-traits")] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_extra_traits_for_custom_keyword { - ($ident:ident) => { - impl $crate::__private::Debug for $ident { - fn fmt(&self, f: &mut $crate::__private::Formatter) -> $crate::__private::FmtResult { - $crate::__private::Formatter::write_str( - f, - $crate::__private::concat!( - "Keyword [", - $crate::__private::stringify!($ident), - "]", - ), - ) - } - } - - impl $crate::__private::Eq for $ident {} - - impl $crate::__private::PartialEq for $ident { - fn eq(&self, _other: &Self) -> $crate::__private::bool { - true - } - } - - impl $crate::__private::Hash for $ident { - fn hash<__H: $crate::__private::Hasher>(&self, _state: &mut __H) {} - } - }; -} - -// Not public API. -#[cfg(not(feature = "extra-traits"))] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_extra_traits_for_custom_keyword { - ($ident:ident) => {}; -} diff --git a/vendor/syn/src/custom_punctuation.rs b/vendor/syn/src/custom_punctuation.rs deleted file mode 100644 index 568bc5d92ef848..00000000000000 --- a/vendor/syn/src/custom_punctuation.rs +++ /dev/null @@ -1,305 +0,0 @@ -/// Define a type that supports parsing and printing a multi-character symbol -/// as if it were a punctuation token. -/// -/// # Usage -/// -/// ``` -/// syn::custom_punctuation!(LeftRightArrow, <=>); -/// ``` -/// -/// The generated syntax tree node supports the following operations just like -/// any built-in punctuation token. -/// -/// - [Peeking] — `input.peek(LeftRightArrow)` -/// -/// - [Parsing] — `input.parse::<LeftRightArrow>()?` -/// -/// - [Printing] — `quote!( ... #lrarrow ... )` -/// -/// - Construction from a [`Span`] — `let lrarrow = LeftRightArrow(sp)` -/// -/// - Construction from multiple [`Span`] — `let lrarrow = LeftRightArrow([sp, sp, sp])` -/// -/// - Field access to its spans — `let spans = lrarrow.spans` -/// -/// [Peeking]: crate::parse::ParseBuffer::peek -/// [Parsing]: crate::parse::ParseBuffer::parse -/// [Printing]: quote::ToTokens -/// [`Span`]: proc_macro2::Span -/// -/// # Example -/// -/// ``` -/// use proc_macro2::{TokenStream, TokenTree}; -/// use std::iter; -/// use syn::parse::{Parse, ParseStream, Peek, Result}; -/// use syn::punctuated::Punctuated; -/// use syn::Expr; -/// -/// syn::custom_punctuation!(PathSeparator, </>); -/// -/// // expr </> expr </> expr ... -/// struct PathSegments { -/// segments: Punctuated<Expr, PathSeparator>, -/// } -/// -/// impl Parse for PathSegments { -/// fn parse(input: ParseStream) -> Result<Self> { -/// let mut segments = Punctuated::new(); -/// -/// let first = parse_until(input, PathSeparator)?; -/// segments.push_value(syn::parse2(first)?); -/// -/// while input.peek(PathSeparator) { -/// segments.push_punct(input.parse()?); -/// -/// let next = parse_until(input, PathSeparator)?; -/// segments.push_value(syn::parse2(next)?); -/// } -/// -/// Ok(PathSegments { segments }) -/// } -/// } -/// -/// fn parse_until<E: Peek>(input: ParseStream, end: E) -> Result<TokenStream> { -/// let mut tokens = TokenStream::new(); -/// while !input.is_empty() && !input.peek(end) { -/// let next: TokenTree = input.parse()?; -/// tokens.extend(iter::once(next)); -/// } -/// Ok(tokens) -/// } -/// -/// fn main() { -/// let input = r#" a::b </> c::d::e "#; -/// let _: PathSegments = syn::parse_str(input).unwrap(); -/// } -/// ``` -#[macro_export] -macro_rules! custom_punctuation { - ($ident:ident, $($tt:tt)+) => { - pub struct $ident { - #[allow(dead_code)] - pub spans: $crate::custom_punctuation_repr!($($tt)+), - } - - #[doc(hidden)] - #[allow(dead_code, non_snake_case)] - pub fn $ident<__S: $crate::__private::IntoSpans<$crate::custom_punctuation_repr!($($tt)+)>>( - spans: __S, - ) -> $ident { - let _validate_len = 0 $(+ $crate::custom_punctuation_len!(strict, $tt))*; - $ident { - spans: $crate::__private::IntoSpans::into_spans(spans) - } - } - - const _: () = { - impl $crate::__private::Default for $ident { - fn default() -> Self { - $ident($crate::__private::Span::call_site()) - } - } - - $crate::impl_parse_for_custom_punctuation!($ident, $($tt)+); - $crate::impl_to_tokens_for_custom_punctuation!($ident, $($tt)+); - $crate::impl_clone_for_custom_punctuation!($ident, $($tt)+); - $crate::impl_extra_traits_for_custom_punctuation!($ident, $($tt)+); - }; - }; -} - -// Not public API. -#[cfg(feature = "parsing")] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_parse_for_custom_punctuation { - ($ident:ident, $($tt:tt)+) => { - impl $crate::__private::CustomToken for $ident { - fn peek(cursor: $crate::buffer::Cursor) -> $crate::__private::bool { - $crate::__private::peek_punct(cursor, $crate::stringify_punct!($($tt)+)) - } - - fn display() -> &'static $crate::__private::str { - $crate::__private::concat!("`", $crate::stringify_punct!($($tt)+), "`") - } - } - - impl $crate::parse::Parse for $ident { - fn parse(input: $crate::parse::ParseStream) -> $crate::parse::Result<$ident> { - let spans: $crate::custom_punctuation_repr!($($tt)+) = - $crate::__private::parse_punct(input, $crate::stringify_punct!($($tt)+))?; - Ok($ident(spans)) - } - } - }; -} - -// Not public API. -#[cfg(not(feature = "parsing"))] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_parse_for_custom_punctuation { - ($ident:ident, $($tt:tt)+) => {}; -} - -// Not public API. -#[cfg(feature = "printing")] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_to_tokens_for_custom_punctuation { - ($ident:ident, $($tt:tt)+) => { - impl $crate::__private::ToTokens for $ident { - fn to_tokens(&self, tokens: &mut $crate::__private::TokenStream2) { - $crate::__private::print_punct($crate::stringify_punct!($($tt)+), &self.spans, tokens) - } - } - }; -} - -// Not public API. -#[cfg(not(feature = "printing"))] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_to_tokens_for_custom_punctuation { - ($ident:ident, $($tt:tt)+) => {}; -} - -// Not public API. -#[cfg(feature = "clone-impls")] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_clone_for_custom_punctuation { - ($ident:ident, $($tt:tt)+) => { - impl $crate::__private::Copy for $ident {} - - #[allow(clippy::expl_impl_clone_on_copy)] - impl $crate::__private::Clone for $ident { - fn clone(&self) -> Self { - *self - } - } - }; -} - -// Not public API. -#[cfg(not(feature = "clone-impls"))] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_clone_for_custom_punctuation { - ($ident:ident, $($tt:tt)+) => {}; -} - -// Not public API. -#[cfg(feature = "extra-traits")] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_extra_traits_for_custom_punctuation { - ($ident:ident, $($tt:tt)+) => { - impl $crate::__private::Debug for $ident { - fn fmt(&self, f: &mut $crate::__private::Formatter) -> $crate::__private::FmtResult { - $crate::__private::Formatter::write_str(f, $crate::__private::stringify!($ident)) - } - } - - impl $crate::__private::Eq for $ident {} - - impl $crate::__private::PartialEq for $ident { - fn eq(&self, _other: &Self) -> $crate::__private::bool { - true - } - } - - impl $crate::__private::Hash for $ident { - fn hash<__H: $crate::__private::Hasher>(&self, _state: &mut __H) {} - } - }; -} - -// Not public API. -#[cfg(not(feature = "extra-traits"))] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_extra_traits_for_custom_punctuation { - ($ident:ident, $($tt:tt)+) => {}; -} - -// Not public API. -#[doc(hidden)] -#[macro_export] -macro_rules! custom_punctuation_repr { - ($($tt:tt)+) => { - [$crate::__private::Span; 0 $(+ $crate::custom_punctuation_len!(lenient, $tt))+] - }; -} - -// Not public API. -#[doc(hidden)] -#[macro_export] -#[rustfmt::skip] -macro_rules! custom_punctuation_len { - ($mode:ident, &) => { 1 }; - ($mode:ident, &&) => { 2 }; - ($mode:ident, &=) => { 2 }; - ($mode:ident, @) => { 1 }; - ($mode:ident, ^) => { 1 }; - ($mode:ident, ^=) => { 2 }; - ($mode:ident, :) => { 1 }; - ($mode:ident, ,) => { 1 }; - ($mode:ident, $) => { 1 }; - ($mode:ident, .) => { 1 }; - ($mode:ident, ..) => { 2 }; - ($mode:ident, ...) => { 3 }; - ($mode:ident, ..=) => { 3 }; - ($mode:ident, =) => { 1 }; - ($mode:ident, ==) => { 2 }; - ($mode:ident, =>) => { 2 }; - ($mode:ident, >=) => { 2 }; - ($mode:ident, >) => { 1 }; - ($mode:ident, <-) => { 2 }; - ($mode:ident, <=) => { 2 }; - ($mode:ident, <) => { 1 }; - ($mode:ident, -) => { 1 }; - ($mode:ident, -=) => { 2 }; - ($mode:ident, !=) => { 2 }; - ($mode:ident, !) => { 1 }; - ($mode:ident, |) => { 1 }; - ($mode:ident, |=) => { 2 }; - ($mode:ident, ||) => { 2 }; - ($mode:ident, ::) => { 2 }; - ($mode:ident, %) => { 1 }; - ($mode:ident, %=) => { 2 }; - ($mode:ident, +) => { 1 }; - ($mode:ident, +=) => { 2 }; - ($mode:ident, #) => { 1 }; - ($mode:ident, ?) => { 1 }; - ($mode:ident, ->) => { 2 }; - ($mode:ident, ;) => { 1 }; - ($mode:ident, <<) => { 2 }; - ($mode:ident, <<=) => { 3 }; - ($mode:ident, >>) => { 2 }; - ($mode:ident, >>=) => { 3 }; - ($mode:ident, /) => { 1 }; - ($mode:ident, /=) => { 2 }; - ($mode:ident, *) => { 1 }; - ($mode:ident, *=) => { 2 }; - ($mode:ident, ~) => { 1 }; - (lenient, $tt:tt) => { 0 }; - (strict, $tt:tt) => {{ $crate::custom_punctuation_unexpected!($tt); 0 }}; -} - -// Not public API. -#[doc(hidden)] -#[macro_export] -macro_rules! custom_punctuation_unexpected { - () => {}; -} - -// Not public API. -#[doc(hidden)] -#[macro_export] -macro_rules! stringify_punct { - ($($tt:tt)+) => { - $crate::__private::concat!($($crate::__private::stringify!($tt)),+) - }; -} diff --git a/vendor/syn/src/data.rs b/vendor/syn/src/data.rs deleted file mode 100644 index f973004dc63f88..00000000000000 --- a/vendor/syn/src/data.rs +++ /dev/null @@ -1,424 +0,0 @@ -use crate::attr::Attribute; -use crate::expr::{Expr, Index, Member}; -use crate::ident::Ident; -use crate::punctuated::{self, Punctuated}; -use crate::restriction::{FieldMutability, Visibility}; -use crate::token; -use crate::ty::Type; - -ast_struct! { - /// An enum variant. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct Variant { - pub attrs: Vec<Attribute>, - - /// Name of the variant. - pub ident: Ident, - - /// Content stored in the variant. - pub fields: Fields, - - /// Explicit discriminant: `Variant = 1` - pub discriminant: Option<(Token![=], Expr)>, - } -} - -ast_enum_of_structs! { - /// Data stored within an enum variant or struct. - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub enum Fields { - /// Named fields of a struct or struct variant such as `Point { x: f64, - /// y: f64 }`. - Named(FieldsNamed), - - /// Unnamed fields of a tuple struct or tuple variant such as `Some(T)`. - Unnamed(FieldsUnnamed), - - /// Unit struct or unit variant such as `None`. - Unit, - } -} - -ast_struct! { - /// Named fields of a struct or struct variant such as `Point { x: f64, - /// y: f64 }`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct FieldsNamed { - pub brace_token: token::Brace, - pub named: Punctuated<Field, Token![,]>, - } -} - -ast_struct! { - /// Unnamed fields of a tuple struct or tuple variant such as `Some(T)`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct FieldsUnnamed { - pub paren_token: token::Paren, - pub unnamed: Punctuated<Field, Token![,]>, - } -} - -impl Fields { - /// Get an iterator over the borrowed [`Field`] items in this object. This - /// iterator can be used to iterate over a named or unnamed struct or - /// variant's fields uniformly. - pub fn iter(&self) -> punctuated::Iter<Field> { - match self { - Fields::Unit => crate::punctuated::empty_punctuated_iter(), - Fields::Named(f) => f.named.iter(), - Fields::Unnamed(f) => f.unnamed.iter(), - } - } - - /// Get an iterator over the mutably borrowed [`Field`] items in this - /// object. This iterator can be used to iterate over a named or unnamed - /// struct or variant's fields uniformly. - pub fn iter_mut(&mut self) -> punctuated::IterMut<Field> { - match self { - Fields::Unit => crate::punctuated::empty_punctuated_iter_mut(), - Fields::Named(f) => f.named.iter_mut(), - Fields::Unnamed(f) => f.unnamed.iter_mut(), - } - } - - /// Returns the number of fields. - pub fn len(&self) -> usize { - match self { - Fields::Unit => 0, - Fields::Named(f) => f.named.len(), - Fields::Unnamed(f) => f.unnamed.len(), - } - } - - /// Returns `true` if there are zero fields. - pub fn is_empty(&self) -> bool { - match self { - Fields::Unit => true, - Fields::Named(f) => f.named.is_empty(), - Fields::Unnamed(f) => f.unnamed.is_empty(), - } - } - - return_impl_trait! { - /// Get an iterator over the fields of a struct or variant as [`Member`]s. - /// This iterator can be used to iterate over a named or unnamed struct or - /// variant's fields uniformly. - /// - /// # Example - /// - /// The following is a simplistic [`Clone`] derive for structs. (A more - /// complete implementation would additionally want to infer trait bounds on - /// the generic type parameters.) - /// - /// ``` - /// # use quote::quote; - /// # - /// fn derive_clone(input: &syn::ItemStruct) -> proc_macro2::TokenStream { - /// let ident = &input.ident; - /// let members = input.fields.members(); - /// let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); - /// quote! { - /// impl #impl_generics Clone for #ident #ty_generics #where_clause { - /// fn clone(&self) -> Self { - /// Self { - /// #(#members: self.#members.clone()),* - /// } - /// } - /// } - /// } - /// } - /// ``` - /// - /// For structs with named fields, it produces an expression like `Self { a: - /// self.a.clone() }`. For structs with unnamed fields, `Self { 0: - /// self.0.clone() }`. And for unit structs, `Self {}`. - pub fn members(&self) -> impl Iterator<Item = Member> + Clone + '_ [Members] { - Members { - fields: self.iter(), - index: 0, - } - } - } -} - -impl IntoIterator for Fields { - type Item = Field; - type IntoIter = punctuated::IntoIter<Field>; - - fn into_iter(self) -> Self::IntoIter { - match self { - Fields::Unit => Punctuated::<Field, ()>::new().into_iter(), - Fields::Named(f) => f.named.into_iter(), - Fields::Unnamed(f) => f.unnamed.into_iter(), - } - } -} - -impl<'a> IntoIterator for &'a Fields { - type Item = &'a Field; - type IntoIter = punctuated::Iter<'a, Field>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a> IntoIterator for &'a mut Fields { - type Item = &'a mut Field; - type IntoIter = punctuated::IterMut<'a, Field>; - - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -ast_struct! { - /// A field of a struct or enum variant. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct Field { - pub attrs: Vec<Attribute>, - - pub vis: Visibility, - - pub mutability: FieldMutability, - - /// Name of the field, if any. - /// - /// Fields of tuple structs have no names. - pub ident: Option<Ident>, - - pub colon_token: Option<Token![:]>, - - pub ty: Type, - } -} - -pub struct Members<'a> { - fields: punctuated::Iter<'a, Field>, - index: u32, -} - -impl<'a> Iterator for Members<'a> { - type Item = Member; - - fn next(&mut self) -> Option<Self::Item> { - let field = self.fields.next()?; - let member = match &field.ident { - Some(ident) => Member::Named(ident.clone()), - None => { - #[cfg(all(feature = "parsing", feature = "printing"))] - let span = crate::spanned::Spanned::span(&field.ty); - #[cfg(not(all(feature = "parsing", feature = "printing")))] - let span = proc_macro2::Span::call_site(); - Member::Unnamed(Index { - index: self.index, - span, - }) - } - }; - self.index += 1; - Some(member) - } -} - -impl<'a> Clone for Members<'a> { - fn clone(&self) -> Self { - Members { - fields: self.fields.clone(), - index: self.index, - } - } -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::attr::Attribute; - use crate::data::{Field, Fields, FieldsNamed, FieldsUnnamed, Variant}; - use crate::error::Result; - use crate::expr::Expr; - use crate::ext::IdentExt as _; - use crate::ident::Ident; - #[cfg(not(feature = "full"))] - use crate::parse::discouraged::Speculative as _; - use crate::parse::{Parse, ParseStream}; - use crate::restriction::{FieldMutability, Visibility}; - #[cfg(not(feature = "full"))] - use crate::scan_expr::scan_expr; - use crate::token; - use crate::ty::Type; - use crate::verbatim; - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Variant { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let _visibility: Visibility = input.parse()?; - let ident: Ident = input.parse()?; - let fields = if input.peek(token::Brace) { - Fields::Named(input.parse()?) - } else if input.peek(token::Paren) { - Fields::Unnamed(input.parse()?) - } else { - Fields::Unit - }; - let discriminant = if input.peek(Token![=]) { - let eq_token: Token![=] = input.parse()?; - #[cfg(feature = "full")] - let discriminant: Expr = input.parse()?; - #[cfg(not(feature = "full"))] - let discriminant = { - let begin = input.fork(); - let ahead = input.fork(); - let mut discriminant: Result<Expr> = ahead.parse(); - if discriminant.is_ok() { - input.advance_to(&ahead); - } else if scan_expr(input).is_ok() { - discriminant = Ok(Expr::Verbatim(verbatim::between(&begin, input))); - } - discriminant? - }; - Some((eq_token, discriminant)) - } else { - None - }; - Ok(Variant { - attrs, - ident, - fields, - discriminant, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for FieldsNamed { - fn parse(input: ParseStream) -> Result<Self> { - let content; - Ok(FieldsNamed { - brace_token: braced!(content in input), - named: content.parse_terminated(Field::parse_named, Token![,])?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for FieldsUnnamed { - fn parse(input: ParseStream) -> Result<Self> { - let content; - Ok(FieldsUnnamed { - paren_token: parenthesized!(content in input), - unnamed: content.parse_terminated(Field::parse_unnamed, Token![,])?, - }) - } - } - - impl Field { - /// Parses a named (braced struct) field. - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_named(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - - let unnamed_field = cfg!(feature = "full") && input.peek(Token![_]); - let ident = if unnamed_field { - input.call(Ident::parse_any) - } else { - input.parse() - }?; - - let colon_token: Token![:] = input.parse()?; - - let ty: Type = if unnamed_field - && (input.peek(Token![struct]) - || input.peek(Token![union]) && input.peek2(token::Brace)) - { - let begin = input.fork(); - input.call(Ident::parse_any)?; - input.parse::<FieldsNamed>()?; - Type::Verbatim(verbatim::between(&begin, input)) - } else { - input.parse()? - }; - - Ok(Field { - attrs, - vis, - mutability: FieldMutability::None, - ident: Some(ident), - colon_token: Some(colon_token), - ty, - }) - } - - /// Parses an unnamed (tuple struct) field. - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_unnamed(input: ParseStream) -> Result<Self> { - Ok(Field { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - mutability: FieldMutability::None, - ident: None, - colon_token: None, - ty: input.parse()?, - }) - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use crate::data::{Field, FieldsNamed, FieldsUnnamed, Variant}; - use crate::print::TokensOrDefault; - use proc_macro2::TokenStream; - use quote::{ToTokens, TokenStreamExt as _}; - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Variant { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(&self.attrs); - self.ident.to_tokens(tokens); - self.fields.to_tokens(tokens); - if let Some((eq_token, disc)) = &self.discriminant { - eq_token.to_tokens(tokens); - disc.to_tokens(tokens); - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for FieldsNamed { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.brace_token.surround(tokens, |tokens| { - self.named.to_tokens(tokens); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for FieldsUnnamed { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.paren_token.surround(tokens, |tokens| { - self.unnamed.to_tokens(tokens); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Field { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(&self.attrs); - self.vis.to_tokens(tokens); - if let Some(ident) = &self.ident { - ident.to_tokens(tokens); - TokensOrDefault(&self.colon_token).to_tokens(tokens); - } - self.ty.to_tokens(tokens); - } - } -} diff --git a/vendor/syn/src/derive.rs b/vendor/syn/src/derive.rs deleted file mode 100644 index 3443ecfc05cba6..00000000000000 --- a/vendor/syn/src/derive.rs +++ /dev/null @@ -1,259 +0,0 @@ -use crate::attr::Attribute; -use crate::data::{Fields, FieldsNamed, Variant}; -use crate::generics::Generics; -use crate::ident::Ident; -use crate::punctuated::Punctuated; -use crate::restriction::Visibility; -use crate::token; - -ast_struct! { - /// Data structure sent to a `proc_macro_derive` macro. - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - pub struct DeriveInput { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub ident: Ident, - pub generics: Generics, - pub data: Data, - } -} - -ast_enum! { - /// The storage of a struct, enum or union data structure. - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - pub enum Data { - Struct(DataStruct), - Enum(DataEnum), - Union(DataUnion), - } -} - -ast_struct! { - /// A struct input to a `proc_macro_derive` macro. - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - pub struct DataStruct { - pub struct_token: Token![struct], - pub fields: Fields, - pub semi_token: Option<Token![;]>, - } -} - -ast_struct! { - /// An enum input to a `proc_macro_derive` macro. - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - pub struct DataEnum { - pub enum_token: Token![enum], - pub brace_token: token::Brace, - pub variants: Punctuated<Variant, Token![,]>, - } -} - -ast_struct! { - /// An untagged union input to a `proc_macro_derive` macro. - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - pub struct DataUnion { - pub union_token: Token![union], - pub fields: FieldsNamed, - } -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::attr::Attribute; - use crate::data::{Fields, FieldsNamed, Variant}; - use crate::derive::{Data, DataEnum, DataStruct, DataUnion, DeriveInput}; - use crate::error::Result; - use crate::generics::{Generics, WhereClause}; - use crate::ident::Ident; - use crate::parse::{Parse, ParseStream}; - use crate::punctuated::Punctuated; - use crate::restriction::Visibility; - use crate::token; - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for DeriveInput { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let vis = input.parse::<Visibility>()?; - - let lookahead = input.lookahead1(); - if lookahead.peek(Token![struct]) { - let struct_token = input.parse::<Token![struct]>()?; - let ident = input.parse::<Ident>()?; - let generics = input.parse::<Generics>()?; - let (where_clause, fields, semi) = data_struct(input)?; - Ok(DeriveInput { - attrs, - vis, - ident, - generics: Generics { - where_clause, - ..generics - }, - data: Data::Struct(DataStruct { - struct_token, - fields, - semi_token: semi, - }), - }) - } else if lookahead.peek(Token![enum]) { - let enum_token = input.parse::<Token![enum]>()?; - let ident = input.parse::<Ident>()?; - let generics = input.parse::<Generics>()?; - let (where_clause, brace, variants) = data_enum(input)?; - Ok(DeriveInput { - attrs, - vis, - ident, - generics: Generics { - where_clause, - ..generics - }, - data: Data::Enum(DataEnum { - enum_token, - brace_token: brace, - variants, - }), - }) - } else if lookahead.peek(Token![union]) { - let union_token = input.parse::<Token![union]>()?; - let ident = input.parse::<Ident>()?; - let generics = input.parse::<Generics>()?; - let (where_clause, fields) = data_union(input)?; - Ok(DeriveInput { - attrs, - vis, - ident, - generics: Generics { - where_clause, - ..generics - }, - data: Data::Union(DataUnion { - union_token, - fields, - }), - }) - } else { - Err(lookahead.error()) - } - } - } - - pub(crate) fn data_struct( - input: ParseStream, - ) -> Result<(Option<WhereClause>, Fields, Option<Token![;]>)> { - let mut lookahead = input.lookahead1(); - let mut where_clause = None; - if lookahead.peek(Token![where]) { - where_clause = Some(input.parse()?); - lookahead = input.lookahead1(); - } - - if where_clause.is_none() && lookahead.peek(token::Paren) { - let fields = input.parse()?; - - lookahead = input.lookahead1(); - if lookahead.peek(Token![where]) { - where_clause = Some(input.parse()?); - lookahead = input.lookahead1(); - } - - if lookahead.peek(Token![;]) { - let semi = input.parse()?; - Ok((where_clause, Fields::Unnamed(fields), Some(semi))) - } else { - Err(lookahead.error()) - } - } else if lookahead.peek(token::Brace) { - let fields = input.parse()?; - Ok((where_clause, Fields::Named(fields), None)) - } else if lookahead.peek(Token![;]) { - let semi = input.parse()?; - Ok((where_clause, Fields::Unit, Some(semi))) - } else { - Err(lookahead.error()) - } - } - - pub(crate) fn data_enum( - input: ParseStream, - ) -> Result<( - Option<WhereClause>, - token::Brace, - Punctuated<Variant, Token![,]>, - )> { - let where_clause = input.parse()?; - - let content; - let brace = braced!(content in input); - let variants = content.parse_terminated(Variant::parse, Token![,])?; - - Ok((where_clause, brace, variants)) - } - - pub(crate) fn data_union(input: ParseStream) -> Result<(Option<WhereClause>, FieldsNamed)> { - let where_clause = input.parse()?; - let fields = input.parse()?; - Ok((where_clause, fields)) - } -} - -#[cfg(feature = "printing")] -mod printing { - use crate::attr::FilterAttrs; - use crate::data::Fields; - use crate::derive::{Data, DeriveInput}; - use crate::print::TokensOrDefault; - use proc_macro2::TokenStream; - use quote::ToTokens; - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for DeriveInput { - fn to_tokens(&self, tokens: &mut TokenStream) { - for attr in self.attrs.outer() { - attr.to_tokens(tokens); - } - self.vis.to_tokens(tokens); - match &self.data { - Data::Struct(d) => d.struct_token.to_tokens(tokens), - Data::Enum(d) => d.enum_token.to_tokens(tokens), - Data::Union(d) => d.union_token.to_tokens(tokens), - } - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - match &self.data { - Data::Struct(data) => match &data.fields { - Fields::Named(fields) => { - self.generics.where_clause.to_tokens(tokens); - fields.to_tokens(tokens); - } - Fields::Unnamed(fields) => { - fields.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - TokensOrDefault(&data.semi_token).to_tokens(tokens); - } - Fields::Unit => { - self.generics.where_clause.to_tokens(tokens); - TokensOrDefault(&data.semi_token).to_tokens(tokens); - } - }, - Data::Enum(data) => { - self.generics.where_clause.to_tokens(tokens); - data.brace_token.surround(tokens, |tokens| { - data.variants.to_tokens(tokens); - }); - } - Data::Union(data) => { - self.generics.where_clause.to_tokens(tokens); - data.fields.to_tokens(tokens); - } - } - } - } -} diff --git a/vendor/syn/src/discouraged.rs b/vendor/syn/src/discouraged.rs deleted file mode 100644 index c8d6bfe89a14ad..00000000000000 --- a/vendor/syn/src/discouraged.rs +++ /dev/null @@ -1,225 +0,0 @@ -//! Extensions to the parsing API with niche applicability. - -use crate::buffer::Cursor; -use crate::error::Result; -use crate::parse::{inner_unexpected, ParseBuffer, Unexpected}; -use proc_macro2::extra::DelimSpan; -use proc_macro2::Delimiter; -use std::cell::Cell; -use std::mem; -use std::rc::Rc; - -/// Extensions to the `ParseStream` API to support speculative parsing. -pub trait Speculative { - /// Advance this parse stream to the position of a forked parse stream. - /// - /// This is the opposite operation to [`ParseStream::fork`]. You can fork a - /// parse stream, perform some speculative parsing, then join the original - /// stream to the fork to "commit" the parsing from the fork to the main - /// stream. - /// - /// If you can avoid doing this, you should, as it limits the ability to - /// generate useful errors. That said, it is often the only way to parse - /// syntax of the form `A* B*` for arbitrary syntax `A` and `B`. The problem - /// is that when the fork fails to parse an `A`, it's impossible to tell - /// whether that was because of a syntax error and the user meant to provide - /// an `A`, or that the `A`s are finished and it's time to start parsing - /// `B`s. Use with care. - /// - /// Also note that if `A` is a subset of `B`, `A* B*` can be parsed by - /// parsing `B*` and removing the leading members of `A` from the - /// repetition, bypassing the need to involve the downsides associated with - /// speculative parsing. - /// - /// [`ParseStream::fork`]: ParseBuffer::fork - /// - /// # Example - /// - /// There has been chatter about the possibility of making the colons in the - /// turbofish syntax like `path::to::<T>` no longer required by accepting - /// `path::to<T>` in expression position. Specifically, according to [RFC - /// 2544], [`PathSegment`] parsing should always try to consume a following - /// `<` token as the start of generic arguments, and reset to the `<` if - /// that fails (e.g. the token is acting as a less-than operator). - /// - /// This is the exact kind of parsing behavior which requires the "fork, - /// try, commit" behavior that [`ParseStream::fork`] discourages. With - /// `advance_to`, we can avoid having to parse the speculatively parsed - /// content a second time. - /// - /// This change in behavior can be implemented in syn by replacing just the - /// `Parse` implementation for `PathSegment`: - /// - /// ``` - /// # use syn::ext::IdentExt; - /// use syn::parse::discouraged::Speculative; - /// # use syn::parse::{Parse, ParseStream}; - /// # use syn::{Ident, PathArguments, Result, Token}; - /// - /// pub struct PathSegment { - /// pub ident: Ident, - /// pub arguments: PathArguments, - /// } - /// # - /// # impl<T> From<T> for PathSegment - /// # where - /// # T: Into<Ident>, - /// # { - /// # fn from(ident: T) -> Self { - /// # PathSegment { - /// # ident: ident.into(), - /// # arguments: PathArguments::None, - /// # } - /// # } - /// # } - /// - /// impl Parse for PathSegment { - /// fn parse(input: ParseStream) -> Result<Self> { - /// if input.peek(Token![super]) - /// || input.peek(Token![self]) - /// || input.peek(Token![Self]) - /// || input.peek(Token![crate]) - /// { - /// let ident = input.call(Ident::parse_any)?; - /// return Ok(PathSegment::from(ident)); - /// } - /// - /// let ident = input.parse()?; - /// if input.peek(Token![::]) && input.peek3(Token![<]) { - /// return Ok(PathSegment { - /// ident, - /// arguments: PathArguments::AngleBracketed(input.parse()?), - /// }); - /// } - /// if input.peek(Token![<]) && !input.peek(Token![<=]) { - /// let fork = input.fork(); - /// if let Ok(arguments) = fork.parse() { - /// input.advance_to(&fork); - /// return Ok(PathSegment { - /// ident, - /// arguments: PathArguments::AngleBracketed(arguments), - /// }); - /// } - /// } - /// Ok(PathSegment::from(ident)) - /// } - /// } - /// - /// # syn::parse_str::<PathSegment>("a<b,c>").unwrap(); - /// ``` - /// - /// # Drawbacks - /// - /// The main drawback of this style of speculative parsing is in error - /// presentation. Even if the lookahead is the "correct" parse, the error - /// that is shown is that of the "fallback" parse. To use the same example - /// as the turbofish above, take the following unfinished "turbofish": - /// - /// ```text - /// let _ = f<&'a fn(), for<'a> serde::>(); - /// ``` - /// - /// If this is parsed as generic arguments, we can provide the error message - /// - /// ```text - /// error: expected identifier - /// --> src.rs:L:C - /// | - /// L | let _ = f<&'a fn(), for<'a> serde::>(); - /// | ^ - /// ``` - /// - /// but if parsed using the above speculative parsing, it falls back to - /// assuming that the `<` is a less-than when it fails to parse the generic - /// arguments, and tries to interpret the `&'a` as the start of a labelled - /// loop, resulting in the much less helpful error - /// - /// ```text - /// error: expected `:` - /// --> src.rs:L:C - /// | - /// L | let _ = f<&'a fn(), for<'a> serde::>(); - /// | ^^ - /// ``` - /// - /// This can be mitigated with various heuristics (two examples: show both - /// forks' parse errors, or show the one that consumed more tokens), but - /// when you can control the grammar, sticking to something that can be - /// parsed LL(3) and without the LL(*) speculative parsing this makes - /// possible, displaying reasonable errors becomes much more simple. - /// - /// [RFC 2544]: https://github.com/rust-lang/rfcs/pull/2544 - /// [`PathSegment`]: crate::PathSegment - /// - /// # Performance - /// - /// This method performs a cheap fixed amount of work that does not depend - /// on how far apart the two streams are positioned. - /// - /// # Panics - /// - /// The forked stream in the argument of `advance_to` must have been - /// obtained by forking `self`. Attempting to advance to any other stream - /// will cause a panic. - fn advance_to(&self, fork: &Self); -} - -impl<'a> Speculative for ParseBuffer<'a> { - fn advance_to(&self, fork: &Self) { - if !crate::buffer::same_scope(self.cursor(), fork.cursor()) { - panic!("fork was not derived from the advancing parse stream"); - } - - let (self_unexp, self_sp) = inner_unexpected(self); - let (fork_unexp, fork_sp) = inner_unexpected(fork); - if !Rc::ptr_eq(&self_unexp, &fork_unexp) { - match (fork_sp, self_sp) { - // Unexpected set on the fork, but not on `self`, copy it over. - (Some((span, delimiter)), None) => { - self_unexp.set(Unexpected::Some(span, delimiter)); - } - // Unexpected unset. Use chain to propagate errors from fork. - (None, None) => { - fork_unexp.set(Unexpected::Chain(self_unexp)); - - // Ensure toplevel 'unexpected' tokens from the fork don't - // propagate up the chain by replacing the root `unexpected` - // pointer, only 'unexpected' tokens from existing group - // parsers should propagate. - fork.unexpected - .set(Some(Rc::new(Cell::new(Unexpected::None)))); - } - // Unexpected has been set on `self`. No changes needed. - (_, Some(_)) => {} - } - } - - // See comment on `cell` in the struct definition. - self.cell - .set(unsafe { mem::transmute::<Cursor, Cursor<'static>>(fork.cursor()) }); - } -} - -/// Extensions to the `ParseStream` API to support manipulating invisible -/// delimiters the same as if they were visible. -pub trait AnyDelimiter { - /// Returns the delimiter, the span of the delimiter token, and the nested - /// contents for further parsing. - fn parse_any_delimiter(&self) -> Result<(Delimiter, DelimSpan, ParseBuffer)>; -} - -impl<'a> AnyDelimiter for ParseBuffer<'a> { - fn parse_any_delimiter(&self) -> Result<(Delimiter, DelimSpan, ParseBuffer)> { - self.step(|cursor| { - if let Some((content, delimiter, span, rest)) = cursor.any_group() { - let scope = span.close(); - let nested = crate::parse::advance_step_cursor(cursor, content); - let unexpected = crate::parse::get_unexpected(self); - let content = crate::parse::new_parse_buffer(scope, nested, unexpected); - Ok(((delimiter, span, content), rest)) - } else { - Err(cursor.error("expected any delimiter")) - } - }) - } -} diff --git a/vendor/syn/src/drops.rs b/vendor/syn/src/drops.rs deleted file mode 100644 index c54308f02c13d3..00000000000000 --- a/vendor/syn/src/drops.rs +++ /dev/null @@ -1,58 +0,0 @@ -use std::iter; -use std::mem::ManuallyDrop; -use std::ops::{Deref, DerefMut}; -use std::option; -use std::slice; - -#[repr(transparent)] -pub(crate) struct NoDrop<T: ?Sized>(ManuallyDrop<T>); - -impl<T> NoDrop<T> { - pub(crate) fn new(value: T) -> Self - where - T: TrivialDrop, - { - NoDrop(ManuallyDrop::new(value)) - } -} - -impl<T: ?Sized> Deref for NoDrop<T> { - type Target = T; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl<T: ?Sized> DerefMut for NoDrop<T> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -pub(crate) trait TrivialDrop {} - -impl<T> TrivialDrop for iter::Empty<T> {} -impl<T> TrivialDrop for slice::Iter<'_, T> {} -impl<T> TrivialDrop for slice::IterMut<'_, T> {} -impl<T> TrivialDrop for option::IntoIter<&T> {} -impl<T> TrivialDrop for option::IntoIter<&mut T> {} - -#[test] -fn test_needs_drop() { - use std::mem::needs_drop; - - struct NeedsDrop; - - impl Drop for NeedsDrop { - fn drop(&mut self) {} - } - - assert!(needs_drop::<NeedsDrop>()); - - // Test each of the types with a handwritten TrivialDrop impl above. - assert!(!needs_drop::<iter::Empty<NeedsDrop>>()); - assert!(!needs_drop::<slice::Iter<NeedsDrop>>()); - assert!(!needs_drop::<slice::IterMut<NeedsDrop>>()); - assert!(!needs_drop::<option::IntoIter<&NeedsDrop>>()); - assert!(!needs_drop::<option::IntoIter<&mut NeedsDrop>>()); -} diff --git a/vendor/syn/src/error.rs b/vendor/syn/src/error.rs deleted file mode 100644 index f89278c26c40c2..00000000000000 --- a/vendor/syn/src/error.rs +++ /dev/null @@ -1,468 +0,0 @@ -#[cfg(feature = "parsing")] -use crate::buffer::Cursor; -use crate::ext::{PunctExt as _, TokenStreamExt as _}; -use crate::thread::ThreadBound; -use proc_macro2::{ - Delimiter, Group, Ident, LexError, Literal, Punct, Spacing, Span, TokenStream, TokenTree, -}; -#[cfg(feature = "printing")] -use quote::ToTokens; -use std::fmt::{self, Debug, Display}; -use std::slice; -use std::vec; - -/// The result of a Syn parser. -pub type Result<T> = std::result::Result<T, Error>; - -/// Error returned when a Syn parser cannot parse the input tokens. -/// -/// # Error reporting in proc macros -/// -/// The correct way to report errors back to the compiler from a procedural -/// macro is by emitting an appropriately spanned invocation of -/// [`compile_error!`] in the generated code. This produces a better diagnostic -/// message than simply panicking the macro. -/// -/// [`compile_error!`]: std::compile_error! -/// -/// When parsing macro input, the [`parse_macro_input!`] macro handles the -/// conversion to `compile_error!` automatically. -/// -/// [`parse_macro_input!`]: crate::parse_macro_input! -/// -/// ``` -/// # extern crate proc_macro; -/// # -/// use proc_macro::TokenStream; -/// use syn::parse::{Parse, ParseStream, Result}; -/// use syn::{parse_macro_input, ItemFn}; -/// -/// # const IGNORE: &str = stringify! { -/// #[proc_macro_attribute] -/// # }; -/// pub fn my_attr(args: TokenStream, input: TokenStream) -> TokenStream { -/// let args = parse_macro_input!(args as MyAttrArgs); -/// let input = parse_macro_input!(input as ItemFn); -/// -/// /* ... */ -/// # TokenStream::new() -/// } -/// -/// struct MyAttrArgs { -/// # _k: [(); { stringify! { -/// ... -/// # }; 0 }] -/// } -/// -/// impl Parse for MyAttrArgs { -/// fn parse(input: ParseStream) -> Result<Self> { -/// # stringify! { -/// ... -/// # }; -/// # unimplemented!() -/// } -/// } -/// ``` -/// -/// For errors that arise later than the initial parsing stage, the -/// [`.to_compile_error()`] or [`.into_compile_error()`] methods can be used to -/// perform an explicit conversion to `compile_error!`. -/// -/// [`.to_compile_error()`]: Error::to_compile_error -/// [`.into_compile_error()`]: Error::into_compile_error -/// -/// ``` -/// # extern crate proc_macro; -/// # -/// # use proc_macro::TokenStream; -/// # use syn::{parse_macro_input, DeriveInput}; -/// # -/// # const IGNORE: &str = stringify! { -/// #[proc_macro_derive(MyDerive)] -/// # }; -/// pub fn my_derive(input: TokenStream) -> TokenStream { -/// let input = parse_macro_input!(input as DeriveInput); -/// -/// // fn(DeriveInput) -> syn::Result<proc_macro2::TokenStream> -/// expand::my_derive(input) -/// .unwrap_or_else(syn::Error::into_compile_error) -/// .into() -/// } -/// # -/// # mod expand { -/// # use proc_macro2::TokenStream; -/// # use syn::{DeriveInput, Result}; -/// # -/// # pub fn my_derive(input: DeriveInput) -> Result<TokenStream> { -/// # unimplemented!() -/// # } -/// # } -/// ``` -pub struct Error { - messages: Vec<ErrorMessage>, -} - -struct ErrorMessage { - // Span is implemented as an index into a thread-local interner to keep the - // size small. It is not safe to access from a different thread. We want - // errors to be Send and Sync to play nicely with ecosystem crates for error - // handling, so pin the span we're given to its original thread and assume - // it is Span::call_site if accessed from any other thread. - span: ThreadBound<SpanRange>, - message: String, -} - -// Cannot use std::ops::Range<Span> because that does not implement Copy, -// whereas ThreadBound<T> requires a Copy impl as a way to ensure no Drop impls -// are involved. -struct SpanRange { - start: Span, - end: Span, -} - -#[cfg(test)] -struct _Test -where - Error: Send + Sync; - -impl Error { - /// Usually the [`ParseStream::error`] method will be used instead, which - /// automatically uses the correct span from the current position of the - /// parse stream. - /// - /// Use `Error::new` when the error needs to be triggered on some span other - /// than where the parse stream is currently positioned. - /// - /// [`ParseStream::error`]: crate::parse::ParseBuffer::error - /// - /// # Example - /// - /// ``` - /// use syn::{Error, Ident, LitStr, Result, Token}; - /// use syn::parse::ParseStream; - /// - /// // Parses input that looks like `name = "string"` where the key must be - /// // the identifier `name` and the value may be any string literal. - /// // Returns the string literal. - /// fn parse_name(input: ParseStream) -> Result<LitStr> { - /// let name_token: Ident = input.parse()?; - /// if name_token != "name" { - /// // Trigger an error not on the current position of the stream, - /// // but on the position of the unexpected identifier. - /// return Err(Error::new(name_token.span(), "expected `name`")); - /// } - /// input.parse::<Token![=]>()?; - /// let s: LitStr = input.parse()?; - /// Ok(s) - /// } - /// ``` - pub fn new<T: Display>(span: Span, message: T) -> Self { - return new(span, message.to_string()); - - fn new(span: Span, message: String) -> Error { - Error { - messages: vec![ErrorMessage { - span: ThreadBound::new(SpanRange { - start: span, - end: span, - }), - message, - }], - } - } - } - - /// Creates an error with the specified message spanning the given syntax - /// tree node. - /// - /// Unlike the `Error::new` constructor, this constructor takes an argument - /// `tokens` which is a syntax tree node. This allows the resulting `Error` - /// to attempt to span all tokens inside of `tokens`. While you would - /// typically be able to use the `Spanned` trait with the above `Error::new` - /// constructor, implementation limitations today mean that - /// `Error::new_spanned` may provide a higher-quality error message on - /// stable Rust. - /// - /// When in doubt it's recommended to stick to `Error::new` (or - /// `ParseStream::error`)! - #[cfg(feature = "printing")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - pub fn new_spanned<T: ToTokens, U: Display>(tokens: T, message: U) -> Self { - return new_spanned(tokens.into_token_stream(), message.to_string()); - - fn new_spanned(tokens: TokenStream, message: String) -> Error { - let mut iter = tokens.into_iter(); - let start = iter.next().map_or_else(Span::call_site, |t| t.span()); - let end = iter.last().map_or(start, |t| t.span()); - Error { - messages: vec![ErrorMessage { - span: ThreadBound::new(SpanRange { start, end }), - message, - }], - } - } - } - - /// The source location of the error. - /// - /// Spans are not thread-safe so this function returns `Span::call_site()` - /// if called from a different thread than the one on which the `Error` was - /// originally created. - pub fn span(&self) -> Span { - let SpanRange { start, end } = match self.messages[0].span.get() { - Some(span) => *span, - None => return Span::call_site(), - }; - start.join(end).unwrap_or(start) - } - - /// Render the error as an invocation of [`compile_error!`]. - /// - /// The [`parse_macro_input!`] macro provides a convenient way to invoke - /// this method correctly in a procedural macro. - /// - /// [`compile_error!`]: std::compile_error! - /// [`parse_macro_input!`]: crate::parse_macro_input! - pub fn to_compile_error(&self) -> TokenStream { - let mut tokens = TokenStream::new(); - for msg in &self.messages { - ErrorMessage::to_compile_error(msg, &mut tokens); - } - tokens - } - - /// Render the error as an invocation of [`compile_error!`]. - /// - /// [`compile_error!`]: std::compile_error! - /// - /// # Example - /// - /// ``` - /// # extern crate proc_macro; - /// # - /// use proc_macro::TokenStream; - /// use syn::{parse_macro_input, DeriveInput, Error}; - /// - /// # const _: &str = stringify! { - /// #[proc_macro_derive(MyTrait)] - /// # }; - /// pub fn derive_my_trait(input: TokenStream) -> TokenStream { - /// let input = parse_macro_input!(input as DeriveInput); - /// my_trait::expand(input) - /// .unwrap_or_else(Error::into_compile_error) - /// .into() - /// } - /// - /// mod my_trait { - /// use proc_macro2::TokenStream; - /// use syn::{DeriveInput, Result}; - /// - /// pub(crate) fn expand(input: DeriveInput) -> Result<TokenStream> { - /// /* ... */ - /// # unimplemented!() - /// } - /// } - /// ``` - pub fn into_compile_error(self) -> TokenStream { - self.to_compile_error() - } - - /// Add another error message to self such that when `to_compile_error()` is - /// called, both errors will be emitted together. - pub fn combine(&mut self, another: Error) { - self.messages.extend(another.messages); - } -} - -impl ErrorMessage { - fn to_compile_error(&self, tokens: &mut TokenStream) { - let (start, end) = match self.span.get() { - Some(range) => (range.start, range.end), - None => (Span::call_site(), Span::call_site()), - }; - - // ::core::compile_error!($message) - tokens.append(TokenTree::Punct(Punct::new_spanned( - ':', - Spacing::Joint, - start, - ))); - tokens.append(TokenTree::Punct(Punct::new_spanned( - ':', - Spacing::Alone, - start, - ))); - tokens.append(TokenTree::Ident(Ident::new("core", start))); - tokens.append(TokenTree::Punct(Punct::new_spanned( - ':', - Spacing::Joint, - start, - ))); - tokens.append(TokenTree::Punct(Punct::new_spanned( - ':', - Spacing::Alone, - start, - ))); - tokens.append(TokenTree::Ident(Ident::new("compile_error", start))); - tokens.append(TokenTree::Punct(Punct::new_spanned( - '!', - Spacing::Alone, - start, - ))); - tokens.append(TokenTree::Group({ - let mut group = Group::new( - Delimiter::Brace, - TokenStream::from({ - let mut string = Literal::string(&self.message); - string.set_span(end); - TokenTree::Literal(string) - }), - ); - group.set_span(end); - group - })); - } -} - -#[cfg(feature = "parsing")] -pub(crate) fn new_at<T: Display>(scope: Span, cursor: Cursor, message: T) -> Error { - if cursor.eof() { - Error::new(scope, format!("unexpected end of input, {}", message)) - } else { - let span = crate::buffer::open_span_of_group(cursor); - Error::new(span, message) - } -} - -#[cfg(all(feature = "parsing", any(feature = "full", feature = "derive")))] -pub(crate) fn new2<T: Display>(start: Span, end: Span, message: T) -> Error { - return new2(start, end, message.to_string()); - - fn new2(start: Span, end: Span, message: String) -> Error { - Error { - messages: vec![ErrorMessage { - span: ThreadBound::new(SpanRange { start, end }), - message, - }], - } - } -} - -impl Debug for Error { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - if self.messages.len() == 1 { - formatter - .debug_tuple("Error") - .field(&self.messages[0]) - .finish() - } else { - formatter - .debug_tuple("Error") - .field(&self.messages) - .finish() - } - } -} - -impl Debug for ErrorMessage { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - Debug::fmt(&self.message, formatter) - } -} - -impl Display for Error { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str(&self.messages[0].message) - } -} - -impl Clone for Error { - fn clone(&self) -> Self { - Error { - messages: self.messages.clone(), - } - } -} - -impl Clone for ErrorMessage { - fn clone(&self) -> Self { - ErrorMessage { - span: self.span, - message: self.message.clone(), - } - } -} - -impl Clone for SpanRange { - fn clone(&self) -> Self { - *self - } -} - -impl Copy for SpanRange {} - -impl std::error::Error for Error {} - -impl From<LexError> for Error { - fn from(err: LexError) -> Self { - Error::new(err.span(), err) - } -} - -impl IntoIterator for Error { - type Item = Error; - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter { - messages: self.messages.into_iter(), - } - } -} - -pub struct IntoIter { - messages: vec::IntoIter<ErrorMessage>, -} - -impl Iterator for IntoIter { - type Item = Error; - - fn next(&mut self) -> Option<Self::Item> { - Some(Error { - messages: vec![self.messages.next()?], - }) - } -} - -impl<'a> IntoIterator for &'a Error { - type Item = Error; - type IntoIter = Iter<'a>; - - fn into_iter(self) -> Self::IntoIter { - Iter { - messages: self.messages.iter(), - } - } -} - -pub struct Iter<'a> { - messages: slice::Iter<'a, ErrorMessage>, -} - -impl<'a> Iterator for Iter<'a> { - type Item = Error; - - fn next(&mut self) -> Option<Self::Item> { - Some(Error { - messages: vec![self.messages.next()?.clone()], - }) - } -} - -impl Extend<Error> for Error { - fn extend<T: IntoIterator<Item = Error>>(&mut self, iter: T) { - for err in iter { - self.combine(err); - } - } -} diff --git a/vendor/syn/src/export.rs b/vendor/syn/src/export.rs deleted file mode 100644 index b9ea5c747b75a7..00000000000000 --- a/vendor/syn/src/export.rs +++ /dev/null @@ -1,73 +0,0 @@ -#[doc(hidden)] -pub use std::clone::Clone; -#[doc(hidden)] -pub use std::cmp::{Eq, PartialEq}; -#[doc(hidden)] -pub use std::concat; -#[doc(hidden)] -pub use std::default::Default; -#[doc(hidden)] -pub use std::fmt::Debug; -#[doc(hidden)] -pub use std::hash::{Hash, Hasher}; -#[doc(hidden)] -pub use std::marker::Copy; -#[doc(hidden)] -pub use std::option::Option::{None, Some}; -#[doc(hidden)] -pub use std::result::Result::{Err, Ok}; -#[doc(hidden)] -pub use std::stringify; - -#[doc(hidden)] -pub type Formatter<'a> = std::fmt::Formatter<'a>; -#[doc(hidden)] -pub type FmtResult = std::fmt::Result; - -#[doc(hidden)] -pub type bool = std::primitive::bool; -#[doc(hidden)] -pub type str = std::primitive::str; - -#[cfg(feature = "printing")] -#[doc(hidden)] -pub use quote; - -#[doc(hidden)] -pub type Span = proc_macro2::Span; -#[doc(hidden)] -pub type TokenStream2 = proc_macro2::TokenStream; - -#[cfg(feature = "parsing")] -#[doc(hidden)] -pub use crate::group::{parse_braces, parse_brackets, parse_parens}; - -#[doc(hidden)] -pub use crate::span::IntoSpans; - -#[cfg(all(feature = "parsing", feature = "printing"))] -#[doc(hidden)] -pub use crate::parse_quote::parse as parse_quote; - -#[cfg(feature = "parsing")] -#[doc(hidden)] -pub use crate::token::parsing::{peek_punct, punct as parse_punct}; - -#[cfg(feature = "printing")] -#[doc(hidden)] -pub use crate::token::printing::punct as print_punct; - -#[cfg(feature = "parsing")] -#[doc(hidden)] -pub use crate::token::private::CustomToken; - -#[cfg(feature = "proc-macro")] -#[doc(hidden)] -pub type TokenStream = proc_macro::TokenStream; - -#[cfg(feature = "printing")] -#[doc(hidden)] -pub use quote::{ToTokens, TokenStreamExt}; - -#[doc(hidden)] -pub struct private(pub(crate) ()); diff --git a/vendor/syn/src/expr.rs b/vendor/syn/src/expr.rs deleted file mode 100644 index b1b16465fcdd91..00000000000000 --- a/vendor/syn/src/expr.rs +++ /dev/null @@ -1,4173 +0,0 @@ -use crate::attr::Attribute; -#[cfg(all(feature = "parsing", feature = "full"))] -use crate::error::Result; -#[cfg(feature = "parsing")] -use crate::ext::IdentExt as _; -#[cfg(feature = "full")] -use crate::generics::BoundLifetimes; -use crate::ident::Ident; -#[cfg(any(feature = "parsing", feature = "full"))] -use crate::lifetime::Lifetime; -use crate::lit::Lit; -use crate::mac::Macro; -use crate::op::{BinOp, UnOp}; -#[cfg(feature = "parsing")] -use crate::parse::ParseStream; -#[cfg(feature = "full")] -use crate::pat::Pat; -use crate::path::{AngleBracketedGenericArguments, Path, QSelf}; -use crate::punctuated::Punctuated; -#[cfg(feature = "full")] -use crate::stmt::Block; -use crate::token; -#[cfg(feature = "full")] -use crate::ty::ReturnType; -use crate::ty::Type; -use proc_macro2::{Span, TokenStream}; -#[cfg(feature = "printing")] -use quote::IdentFragment; -#[cfg(feature = "printing")] -use std::fmt::{self, Display}; -use std::hash::{Hash, Hasher}; -#[cfg(all(feature = "parsing", feature = "full"))] -use std::mem; - -ast_enum_of_structs! { - /// A Rust expression. - /// - /// *This type is available only if Syn is built with the `"derive"` or `"full"` - /// feature, but most of the variants are not available unless "full" is enabled.* - /// - /// # Syntax tree enums - /// - /// This type is a syntax tree enum. In Syn this and other syntax tree enums - /// are designed to be traversed using the following rebinding idiom. - /// - /// ``` - /// # use syn::Expr; - /// # - /// # fn example(expr: Expr) { - /// # const IGNORE: &str = stringify! { - /// let expr: Expr = /* ... */; - /// # }; - /// match expr { - /// Expr::MethodCall(expr) => { - /// /* ... */ - /// } - /// Expr::Cast(expr) => { - /// /* ... */ - /// } - /// Expr::If(expr) => { - /// /* ... */ - /// } - /// - /// /* ... */ - /// # _ => {} - /// # } - /// # } - /// ``` - /// - /// We begin with a variable `expr` of type `Expr` that has no fields - /// (because it is an enum), and by matching on it and rebinding a variable - /// with the same name `expr` we effectively imbue our variable with all of - /// the data fields provided by the variant that it turned out to be. So for - /// example above if we ended up in the `MethodCall` case then we get to use - /// `expr.receiver`, `expr.args` etc; if we ended up in the `If` case we get - /// to use `expr.cond`, `expr.then_branch`, `expr.else_branch`. - /// - /// This approach avoids repeating the variant names twice on every line. - /// - /// ``` - /// # use syn::{Expr, ExprMethodCall}; - /// # - /// # fn example(expr: Expr) { - /// // Repetitive; recommend not doing this. - /// match expr { - /// Expr::MethodCall(ExprMethodCall { method, args, .. }) => { - /// # } - /// # _ => {} - /// # } - /// # } - /// ``` - /// - /// In general, the name to which a syntax tree enum variant is bound should - /// be a suitable name for the complete syntax tree enum type. - /// - /// ``` - /// # use syn::{Expr, ExprField}; - /// # - /// # fn example(discriminant: ExprField) { - /// // Binding is called `base` which is the name I would use if I were - /// // assigning `*discriminant.base` without an `if let`. - /// if let Expr::Tuple(base) = *discriminant.base { - /// # } - /// # } - /// ``` - /// - /// A sign that you may not be choosing the right variable names is if you - /// see names getting repeated in your code, like accessing - /// `receiver.receiver` or `pat.pat` or `cond.cond`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - #[non_exhaustive] - pub enum Expr { - /// A slice literal expression: `[a, b, c, d]`. - Array(ExprArray), - - /// An assignment expression: `a = compute()`. - Assign(ExprAssign), - - /// An async block: `async { ... }`. - Async(ExprAsync), - - /// An await expression: `fut.await`. - Await(ExprAwait), - - /// A binary operation: `a + b`, `a += b`. - Binary(ExprBinary), - - /// A blocked scope: `{ ... }`. - Block(ExprBlock), - - /// A `break`, with an optional label to break and an optional - /// expression. - Break(ExprBreak), - - /// A function call expression: `invoke(a, b)`. - Call(ExprCall), - - /// A cast expression: `foo as f64`. - Cast(ExprCast), - - /// A closure expression: `|a, b| a + b`. - Closure(ExprClosure), - - /// A const block: `const { ... }`. - Const(ExprConst), - - /// A `continue`, with an optional label. - Continue(ExprContinue), - - /// Access of a named struct field (`obj.k`) or unnamed tuple struct - /// field (`obj.0`). - Field(ExprField), - - /// A for loop: `for pat in expr { ... }`. - ForLoop(ExprForLoop), - - /// An expression contained within invisible delimiters. - /// - /// This variant is important for faithfully representing the precedence - /// of expressions and is related to `None`-delimited spans in a - /// `TokenStream`. - Group(ExprGroup), - - /// An `if` expression with an optional `else` block: `if expr { ... } - /// else { ... }`. - /// - /// The `else` branch expression may only be an `If` or `Block` - /// expression, not any of the other types of expression. - If(ExprIf), - - /// A square bracketed indexing expression: `vector[2]`. - Index(ExprIndex), - - /// The inferred value of a const generic argument, denoted `_`. - Infer(ExprInfer), - - /// A `let` guard: `let Some(x) = opt`. - Let(ExprLet), - - /// A literal in place of an expression: `1`, `"foo"`. - Lit(ExprLit), - - /// Conditionless loop: `loop { ... }`. - Loop(ExprLoop), - - /// A macro invocation expression: `format!("{}", q)`. - Macro(ExprMacro), - - /// A `match` expression: `match n { Some(n) => {}, None => {} }`. - Match(ExprMatch), - - /// A method call expression: `x.foo::<T>(a, b)`. - MethodCall(ExprMethodCall), - - /// A parenthesized expression: `(a + b)`. - Paren(ExprParen), - - /// A path like `std::mem::replace` possibly containing generic - /// parameters and a qualified self-type. - /// - /// A plain identifier like `x` is a path of length 1. - Path(ExprPath), - - /// A range expression: `1..2`, `1..`, `..2`, `1..=2`, `..=2`. - Range(ExprRange), - - /// Address-of operation: `&raw const place` or `&raw mut place`. - RawAddr(ExprRawAddr), - - /// A referencing operation: `&a` or `&mut a`. - Reference(ExprReference), - - /// An array literal constructed from one repeated element: `[0u8; N]`. - Repeat(ExprRepeat), - - /// A `return`, with an optional value to be returned. - Return(ExprReturn), - - /// A struct literal expression: `Point { x: 1, y: 1 }`. - /// - /// The `rest` provides the value of the remaining fields as in `S { a: - /// 1, b: 1, ..rest }`. - Struct(ExprStruct), - - /// A try-expression: `expr?`. - Try(ExprTry), - - /// A try block: `try { ... }`. - TryBlock(ExprTryBlock), - - /// A tuple expression: `(a, b, c, d)`. - Tuple(ExprTuple), - - /// A unary operation: `!x`, `*x`. - Unary(ExprUnary), - - /// An unsafe block: `unsafe { ... }`. - Unsafe(ExprUnsafe), - - /// Tokens in expression position not interpreted by Syn. - Verbatim(TokenStream), - - /// A while loop: `while expr { ... }`. - While(ExprWhile), - - /// A yield expression: `yield expr`. - Yield(ExprYield), - - // For testing exhaustiveness in downstream code, use the following idiom: - // - // match expr { - // #![cfg_attr(test, deny(non_exhaustive_omitted_patterns))] - // - // Expr::Array(expr) => {...} - // Expr::Assign(expr) => {...} - // ... - // Expr::Yield(expr) => {...} - // - // _ => { /* some sane fallback */ } - // } - // - // This way we fail your tests but don't break your library when adding - // a variant. You will be notified by a test failure when a variant is - // added, so that you can add code to handle it, but your library will - // continue to compile and work for downstream users in the interim. - } -} - -ast_struct! { - /// A slice literal expression: `[a, b, c, d]`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprArray #full { - pub attrs: Vec<Attribute>, - pub bracket_token: token::Bracket, - pub elems: Punctuated<Expr, Token![,]>, - } -} - -ast_struct! { - /// An assignment expression: `a = compute()`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprAssign #full { - pub attrs: Vec<Attribute>, - pub left: Box<Expr>, - pub eq_token: Token![=], - pub right: Box<Expr>, - } -} - -ast_struct! { - /// An async block: `async { ... }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprAsync #full { - pub attrs: Vec<Attribute>, - pub async_token: Token![async], - pub capture: Option<Token![move]>, - pub block: Block, - } -} - -ast_struct! { - /// An await expression: `fut.await`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprAwait #full { - pub attrs: Vec<Attribute>, - pub base: Box<Expr>, - pub dot_token: Token![.], - pub await_token: Token![await], - } -} - -ast_struct! { - /// A binary operation: `a + b`, `a += b`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct ExprBinary { - pub attrs: Vec<Attribute>, - pub left: Box<Expr>, - pub op: BinOp, - pub right: Box<Expr>, - } -} - -ast_struct! { - /// A blocked scope: `{ ... }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprBlock #full { - pub attrs: Vec<Attribute>, - pub label: Option<Label>, - pub block: Block, - } -} - -ast_struct! { - /// A `break`, with an optional label to break and an optional - /// expression. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprBreak #full { - pub attrs: Vec<Attribute>, - pub break_token: Token![break], - pub label: Option<Lifetime>, - pub expr: Option<Box<Expr>>, - } -} - -ast_struct! { - /// A function call expression: `invoke(a, b)`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct ExprCall { - pub attrs: Vec<Attribute>, - pub func: Box<Expr>, - pub paren_token: token::Paren, - pub args: Punctuated<Expr, Token![,]>, - } -} - -ast_struct! { - /// A cast expression: `foo as f64`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct ExprCast { - pub attrs: Vec<Attribute>, - pub expr: Box<Expr>, - pub as_token: Token![as], - pub ty: Box<Type>, - } -} - -ast_struct! { - /// A closure expression: `|a, b| a + b`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprClosure #full { - pub attrs: Vec<Attribute>, - pub lifetimes: Option<BoundLifetimes>, - pub constness: Option<Token![const]>, - pub movability: Option<Token![static]>, - pub asyncness: Option<Token![async]>, - pub capture: Option<Token![move]>, - pub or1_token: Token![|], - pub inputs: Punctuated<Pat, Token![,]>, - pub or2_token: Token![|], - pub output: ReturnType, - pub body: Box<Expr>, - } -} - -ast_struct! { - /// A const block: `const { ... }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprConst #full { - pub attrs: Vec<Attribute>, - pub const_token: Token![const], - pub block: Block, - } -} - -ast_struct! { - /// A `continue`, with an optional label. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprContinue #full { - pub attrs: Vec<Attribute>, - pub continue_token: Token![continue], - pub label: Option<Lifetime>, - } -} - -ast_struct! { - /// Access of a named struct field (`obj.k`) or unnamed tuple struct - /// field (`obj.0`). - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct ExprField { - pub attrs: Vec<Attribute>, - pub base: Box<Expr>, - pub dot_token: Token![.], - pub member: Member, - } -} - -ast_struct! { - /// A for loop: `for pat in expr { ... }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprForLoop #full { - pub attrs: Vec<Attribute>, - pub label: Option<Label>, - pub for_token: Token![for], - pub pat: Box<Pat>, - pub in_token: Token![in], - pub expr: Box<Expr>, - pub body: Block, - } -} - -ast_struct! { - /// An expression contained within invisible delimiters. - /// - /// This variant is important for faithfully representing the precedence - /// of expressions and is related to `None`-delimited spans in a - /// `TokenStream`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprGroup { - pub attrs: Vec<Attribute>, - pub group_token: token::Group, - pub expr: Box<Expr>, - } -} - -ast_struct! { - /// An `if` expression with an optional `else` block: `if expr { ... } - /// else { ... }`. - /// - /// The `else` branch expression may only be an `If` or `Block` - /// expression, not any of the other types of expression. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprIf #full { - pub attrs: Vec<Attribute>, - pub if_token: Token![if], - pub cond: Box<Expr>, - pub then_branch: Block, - pub else_branch: Option<(Token![else], Box<Expr>)>, - } -} - -ast_struct! { - /// A square bracketed indexing expression: `vector[2]`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct ExprIndex { - pub attrs: Vec<Attribute>, - pub expr: Box<Expr>, - pub bracket_token: token::Bracket, - pub index: Box<Expr>, - } -} - -ast_struct! { - /// The inferred value of a const generic argument, denoted `_`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprInfer #full { - pub attrs: Vec<Attribute>, - pub underscore_token: Token![_], - } -} - -ast_struct! { - /// A `let` guard: `let Some(x) = opt`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprLet #full { - pub attrs: Vec<Attribute>, - pub let_token: Token![let], - pub pat: Box<Pat>, - pub eq_token: Token![=], - pub expr: Box<Expr>, - } -} - -ast_struct! { - /// A literal in place of an expression: `1`, `"foo"`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct ExprLit { - pub attrs: Vec<Attribute>, - pub lit: Lit, - } -} - -ast_struct! { - /// Conditionless loop: `loop { ... }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprLoop #full { - pub attrs: Vec<Attribute>, - pub label: Option<Label>, - pub loop_token: Token![loop], - pub body: Block, - } -} - -ast_struct! { - /// A macro invocation expression: `format!("{}", q)`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct ExprMacro { - pub attrs: Vec<Attribute>, - pub mac: Macro, - } -} - -ast_struct! { - /// A `match` expression: `match n { Some(n) => {}, None => {} }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprMatch #full { - pub attrs: Vec<Attribute>, - pub match_token: Token![match], - pub expr: Box<Expr>, - pub brace_token: token::Brace, - pub arms: Vec<Arm>, - } -} - -ast_struct! { - /// A method call expression: `x.foo::<T>(a, b)`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct ExprMethodCall { - pub attrs: Vec<Attribute>, - pub receiver: Box<Expr>, - pub dot_token: Token![.], - pub method: Ident, - pub turbofish: Option<AngleBracketedGenericArguments>, - pub paren_token: token::Paren, - pub args: Punctuated<Expr, Token![,]>, - } -} - -ast_struct! { - /// A parenthesized expression: `(a + b)`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct ExprParen { - pub attrs: Vec<Attribute>, - pub paren_token: token::Paren, - pub expr: Box<Expr>, - } -} - -ast_struct! { - /// A path like `std::mem::replace` possibly containing generic - /// parameters and a qualified self-type. - /// - /// A plain identifier like `x` is a path of length 1. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct ExprPath { - pub attrs: Vec<Attribute>, - pub qself: Option<QSelf>, - pub path: Path, - } -} - -ast_struct! { - /// A range expression: `1..2`, `1..`, `..2`, `1..=2`, `..=2`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprRange #full { - pub attrs: Vec<Attribute>, - pub start: Option<Box<Expr>>, - pub limits: RangeLimits, - pub end: Option<Box<Expr>>, - } -} - -ast_struct! { - /// Address-of operation: `&raw const place` or `&raw mut place`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprRawAddr #full { - pub attrs: Vec<Attribute>, - pub and_token: Token![&], - pub raw: Token![raw], - pub mutability: PointerMutability, - pub expr: Box<Expr>, - } -} - -ast_struct! { - /// A referencing operation: `&a` or `&mut a`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct ExprReference { - pub attrs: Vec<Attribute>, - pub and_token: Token![&], - pub mutability: Option<Token![mut]>, - pub expr: Box<Expr>, - } -} - -ast_struct! { - /// An array literal constructed from one repeated element: `[0u8; N]`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprRepeat #full { - pub attrs: Vec<Attribute>, - pub bracket_token: token::Bracket, - pub expr: Box<Expr>, - pub semi_token: Token![;], - pub len: Box<Expr>, - } -} - -ast_struct! { - /// A `return`, with an optional value to be returned. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprReturn #full { - pub attrs: Vec<Attribute>, - pub return_token: Token![return], - pub expr: Option<Box<Expr>>, - } -} - -ast_struct! { - /// A struct literal expression: `Point { x: 1, y: 1 }`. - /// - /// The `rest` provides the value of the remaining fields as in `S { a: - /// 1, b: 1, ..rest }`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct ExprStruct { - pub attrs: Vec<Attribute>, - pub qself: Option<QSelf>, - pub path: Path, - pub brace_token: token::Brace, - pub fields: Punctuated<FieldValue, Token![,]>, - pub dot2_token: Option<Token![..]>, - pub rest: Option<Box<Expr>>, - } -} - -ast_struct! { - /// A try-expression: `expr?`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprTry #full { - pub attrs: Vec<Attribute>, - pub expr: Box<Expr>, - pub question_token: Token![?], - } -} - -ast_struct! { - /// A try block: `try { ... }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprTryBlock #full { - pub attrs: Vec<Attribute>, - pub try_token: Token![try], - pub block: Block, - } -} - -ast_struct! { - /// A tuple expression: `(a, b, c, d)`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprTuple { - pub attrs: Vec<Attribute>, - pub paren_token: token::Paren, - pub elems: Punctuated<Expr, Token![,]>, - } -} - -ast_struct! { - /// A unary operation: `!x`, `*x`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct ExprUnary { - pub attrs: Vec<Attribute>, - pub op: UnOp, - pub expr: Box<Expr>, - } -} - -ast_struct! { - /// An unsafe block: `unsafe { ... }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprUnsafe #full { - pub attrs: Vec<Attribute>, - pub unsafe_token: Token![unsafe], - pub block: Block, - } -} - -ast_struct! { - /// A while loop: `while expr { ... }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprWhile #full { - pub attrs: Vec<Attribute>, - pub label: Option<Label>, - pub while_token: Token![while], - pub cond: Box<Expr>, - pub body: Block, - } -} - -ast_struct! { - /// A yield expression: `yield expr`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ExprYield #full { - pub attrs: Vec<Attribute>, - pub yield_token: Token![yield], - pub expr: Option<Box<Expr>>, - } -} - -impl Expr { - /// An unspecified invalid expression. - /// - /// ``` - /// use quote::ToTokens; - /// use std::mem; - /// use syn::{parse_quote, Expr}; - /// - /// fn unparenthesize(e: &mut Expr) { - /// while let Expr::Paren(paren) = e { - /// *e = mem::replace(&mut *paren.expr, Expr::PLACEHOLDER); - /// } - /// } - /// - /// fn main() { - /// let mut e: Expr = parse_quote! { ((1 + 1)) }; - /// unparenthesize(&mut e); - /// assert_eq!("1 + 1", e.to_token_stream().to_string()); - /// } - /// ``` - pub const PLACEHOLDER: Self = Expr::Path(ExprPath { - attrs: Vec::new(), - qself: None, - path: Path { - leading_colon: None, - segments: Punctuated::new(), - }, - }); - - /// An alternative to the primary `Expr::parse` parser (from the [`Parse`] - /// trait) for ambiguous syntactic positions in which a trailing brace - /// should not be taken as part of the expression. - /// - /// [`Parse`]: crate::parse::Parse - /// - /// Rust grammar has an ambiguity where braces sometimes turn a path - /// expression into a struct initialization and sometimes do not. In the - /// following code, the expression `S {}` is one expression. Presumably - /// there is an empty struct `struct S {}` defined somewhere which it is - /// instantiating. - /// - /// ``` - /// # struct S; - /// # impl std::ops::Deref for S { - /// # type Target = bool; - /// # fn deref(&self) -> &Self::Target { - /// # &true - /// # } - /// # } - /// let _ = *S {}; - /// - /// // parsed by rustc as: `*(S {})` - /// ``` - /// - /// We would want to parse the above using `Expr::parse` after the `=` - /// token. - /// - /// But in the following, `S {}` is *not* a struct init expression. - /// - /// ``` - /// # const S: &bool = &true; - /// if *S {} {} - /// - /// // parsed by rustc as: - /// // - /// // if (*S) { - /// // /* empty block */ - /// // } - /// // { - /// // /* another empty block */ - /// // } - /// ``` - /// - /// For that reason we would want to parse if-conditions using - /// `Expr::parse_without_eager_brace` after the `if` token. Same for similar - /// syntactic positions such as the condition expr after a `while` token or - /// the expr at the top of a `match`. - /// - /// The Rust grammar's choices around which way this ambiguity is resolved - /// at various syntactic positions is fairly arbitrary. Really either parse - /// behavior could work in most positions, and language designers just - /// decide each case based on which is more likely to be what the programmer - /// had in mind most of the time. - /// - /// ``` - /// # struct S; - /// # fn doc() -> S { - /// if return S {} {} - /// # unreachable!() - /// # } - /// - /// // parsed by rustc as: - /// // - /// // if (return (S {})) { - /// // } - /// // - /// // but could equally well have been this other arbitrary choice: - /// // - /// // if (return S) { - /// // } - /// // {} - /// ``` - /// - /// Note the grammar ambiguity on trailing braces is distinct from - /// precedence and is not captured by assigning a precedence level to the - /// braced struct init expr in relation to other operators. This can be - /// illustrated by `return 0..S {}` vs `match 0..S {}`. The former parses as - /// `return (0..(S {}))` implying tighter precedence for struct init than - /// `..`, while the latter parses as `match (0..S) {}` implying tighter - /// precedence for `..` than struct init, a contradiction. - #[cfg(all(feature = "full", feature = "parsing"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "full", feature = "parsing"))))] - pub fn parse_without_eager_brace(input: ParseStream) -> Result<Expr> { - parsing::ambiguous_expr(input, parsing::AllowStruct(false)) - } - - /// An alternative to the primary `Expr::parse` parser (from the [`Parse`] - /// trait) for syntactic positions in which expression boundaries are placed - /// more eagerly than done by the typical expression grammar. This includes - /// expressions at the head of a statement or in the right-hand side of a - /// `match` arm. - /// - /// [`Parse`]: crate::parse::Parse - /// - /// Compare the following cases: - /// - /// 1. - /// ``` - /// # let result = (); - /// # let guard = false; - /// # let cond = true; - /// # let f = true; - /// # let g = f; - /// # - /// let _ = match result { - /// () if guard => if cond { f } else { g } - /// () => false, - /// }; - /// ``` - /// - /// 2. - /// ``` - /// # let cond = true; - /// # let f = (); - /// # let g = f; - /// # - /// let _ = || { - /// if cond { f } else { g } - /// () - /// }; - /// ``` - /// - /// 3. - /// ``` - /// # let cond = true; - /// # let f = || (); - /// # let g = f; - /// # - /// let _ = [if cond { f } else { g } ()]; - /// ``` - /// - /// The same sequence of tokens `if cond { f } else { g } ()` appears in - /// expression position 3 times. The first two syntactic positions use eager - /// placement of expression boundaries, and parse as `Expr::If`, with the - /// adjacent `()` becoming `Pat::Tuple` or `Expr::Tuple`. In contrast, the - /// third case uses standard expression boundaries and parses as - /// `Expr::Call`. - /// - /// As with [`parse_without_eager_brace`], this ambiguity in the Rust - /// grammar is independent of precedence. - /// - /// [`parse_without_eager_brace`]: Self::parse_without_eager_brace - #[cfg(all(feature = "full", feature = "parsing"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "full", feature = "parsing"))))] - pub fn parse_with_earlier_boundary_rule(input: ParseStream) -> Result<Expr> { - parsing::parse_with_earlier_boundary_rule(input) - } - - /// Returns whether the next token in the parse stream is one that might - /// possibly form the beginning of an expr. - /// - /// This classification is a load-bearing part of the grammar of some Rust - /// expressions, notably `return` and `break`. For example `return < …` will - /// never parse `<` as a binary operator regardless of what comes after, - /// because `<` is a legal starting token for an expression and so it's - /// required to be continued as a return value, such as `return <Struct as - /// Trait>::CONST`. Meanwhile `return > …` treats the `>` as a binary - /// operator because it cannot be a starting token for any Rust expression. - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn peek(input: ParseStream) -> bool { - input.peek(Ident::peek_any) && !input.peek(Token![as]) // value name or keyword - || input.peek(token::Paren) // tuple - || input.peek(token::Bracket) // array - || input.peek(token::Brace) // block - || input.peek(Lit) // literal - || input.peek(Token![!]) && !input.peek(Token![!=]) // operator not - || input.peek(Token![-]) && !input.peek(Token![-=]) && !input.peek(Token![->]) // unary minus - || input.peek(Token![*]) && !input.peek(Token![*=]) // dereference - || input.peek(Token![|]) && !input.peek(Token![|=]) // closure - || input.peek(Token![&]) && !input.peek(Token![&=]) // reference - || input.peek(Token![..]) // range - || input.peek(Token![<]) && !input.peek(Token![<=]) && !input.peek(Token![<<=]) // associated path - || input.peek(Token![::]) // absolute path - || input.peek(Lifetime) // labeled loop - || input.peek(Token![#]) // expression attributes - } - - #[cfg(all(feature = "parsing", feature = "full"))] - pub(crate) fn replace_attrs(&mut self, new: Vec<Attribute>) -> Vec<Attribute> { - match self { - Expr::Array(ExprArray { attrs, .. }) - | Expr::Assign(ExprAssign { attrs, .. }) - | Expr::Async(ExprAsync { attrs, .. }) - | Expr::Await(ExprAwait { attrs, .. }) - | Expr::Binary(ExprBinary { attrs, .. }) - | Expr::Block(ExprBlock { attrs, .. }) - | Expr::Break(ExprBreak { attrs, .. }) - | Expr::Call(ExprCall { attrs, .. }) - | Expr::Cast(ExprCast { attrs, .. }) - | Expr::Closure(ExprClosure { attrs, .. }) - | Expr::Const(ExprConst { attrs, .. }) - | Expr::Continue(ExprContinue { attrs, .. }) - | Expr::Field(ExprField { attrs, .. }) - | Expr::ForLoop(ExprForLoop { attrs, .. }) - | Expr::Group(ExprGroup { attrs, .. }) - | Expr::If(ExprIf { attrs, .. }) - | Expr::Index(ExprIndex { attrs, .. }) - | Expr::Infer(ExprInfer { attrs, .. }) - | Expr::Let(ExprLet { attrs, .. }) - | Expr::Lit(ExprLit { attrs, .. }) - | Expr::Loop(ExprLoop { attrs, .. }) - | Expr::Macro(ExprMacro { attrs, .. }) - | Expr::Match(ExprMatch { attrs, .. }) - | Expr::MethodCall(ExprMethodCall { attrs, .. }) - | Expr::Paren(ExprParen { attrs, .. }) - | Expr::Path(ExprPath { attrs, .. }) - | Expr::Range(ExprRange { attrs, .. }) - | Expr::RawAddr(ExprRawAddr { attrs, .. }) - | Expr::Reference(ExprReference { attrs, .. }) - | Expr::Repeat(ExprRepeat { attrs, .. }) - | Expr::Return(ExprReturn { attrs, .. }) - | Expr::Struct(ExprStruct { attrs, .. }) - | Expr::Try(ExprTry { attrs, .. }) - | Expr::TryBlock(ExprTryBlock { attrs, .. }) - | Expr::Tuple(ExprTuple { attrs, .. }) - | Expr::Unary(ExprUnary { attrs, .. }) - | Expr::Unsafe(ExprUnsafe { attrs, .. }) - | Expr::While(ExprWhile { attrs, .. }) - | Expr::Yield(ExprYield { attrs, .. }) => mem::replace(attrs, new), - Expr::Verbatim(_) => Vec::new(), - } - } -} - -ast_enum! { - /// A struct or tuple struct field accessed in a struct literal or field - /// expression. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub enum Member { - /// A named field like `self.x`. - Named(Ident), - /// An unnamed field like `self.0`. - Unnamed(Index), - } -} - -impl From<Ident> for Member { - fn from(ident: Ident) -> Member { - Member::Named(ident) - } -} - -impl From<Index> for Member { - fn from(index: Index) -> Member { - Member::Unnamed(index) - } -} - -impl From<usize> for Member { - fn from(index: usize) -> Member { - Member::Unnamed(Index::from(index)) - } -} - -impl Eq for Member {} - -impl PartialEq for Member { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Member::Named(this), Member::Named(other)) => this == other, - (Member::Unnamed(this), Member::Unnamed(other)) => this == other, - _ => false, - } - } -} - -impl Hash for Member { - fn hash<H: Hasher>(&self, state: &mut H) { - match self { - Member::Named(m) => m.hash(state), - Member::Unnamed(m) => m.hash(state), - } - } -} - -#[cfg(feature = "printing")] -impl IdentFragment for Member { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match self { - Member::Named(m) => Display::fmt(m, formatter), - Member::Unnamed(m) => Display::fmt(&m.index, formatter), - } - } - - fn span(&self) -> Option<Span> { - match self { - Member::Named(m) => Some(m.span()), - Member::Unnamed(m) => Some(m.span), - } - } -} - -#[cfg(any(feature = "parsing", feature = "printing"))] -impl Member { - pub(crate) fn is_named(&self) -> bool { - match self { - Member::Named(_) => true, - Member::Unnamed(_) => false, - } - } -} - -ast_struct! { - /// The index of an unnamed tuple struct field. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct Index { - pub index: u32, - pub span: Span, - } -} - -impl From<usize> for Index { - fn from(index: usize) -> Index { - assert!(index < u32::MAX as usize); - Index { - index: index as u32, - span: Span::call_site(), - } - } -} - -impl Eq for Index {} - -impl PartialEq for Index { - fn eq(&self, other: &Self) -> bool { - self.index == other.index - } -} - -impl Hash for Index { - fn hash<H: Hasher>(&self, state: &mut H) { - self.index.hash(state); - } -} - -#[cfg(feature = "printing")] -impl IdentFragment for Index { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&self.index, formatter) - } - - fn span(&self) -> Option<Span> { - Some(self.span) - } -} - -ast_struct! { - /// A field-value pair in a struct literal. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct FieldValue { - pub attrs: Vec<Attribute>, - pub member: Member, - - /// The colon in `Struct { x: x }`. If written in shorthand like - /// `Struct { x }`, there is no colon. - pub colon_token: Option<Token![:]>, - - pub expr: Expr, - } -} - -#[cfg(feature = "full")] -ast_struct! { - /// A lifetime labeling a `for`, `while`, or `loop`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct Label { - pub name: Lifetime, - pub colon_token: Token![:], - } -} - -#[cfg(feature = "full")] -ast_struct! { - /// One arm of a `match` expression: `0..=10 => { return true; }`. - /// - /// As in: - /// - /// ``` - /// # fn f() -> bool { - /// # let n = 0; - /// match n { - /// 0..=10 => { - /// return true; - /// } - /// // ... - /// # _ => {} - /// } - /// # false - /// # } - /// ``` - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct Arm { - pub attrs: Vec<Attribute>, - pub pat: Pat, - pub guard: Option<(Token![if], Box<Expr>)>, - pub fat_arrow_token: Token![=>], - pub body: Box<Expr>, - pub comma: Option<Token![,]>, - } -} - -#[cfg(feature = "full")] -ast_enum! { - /// Limit types of a range, inclusive or exclusive. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub enum RangeLimits { - /// Inclusive at the beginning, exclusive at the end. - HalfOpen(Token![..]), - /// Inclusive at the beginning and end. - Closed(Token![..=]), - } -} - -#[cfg(feature = "full")] -ast_enum! { - /// Mutability of a raw pointer (`*const T`, `*mut T`), in which non-mutable - /// isn't the implicit default. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub enum PointerMutability { - Const(Token![const]), - Mut(Token![mut]), - } -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - #[cfg(feature = "full")] - use crate::attr; - use crate::attr::Attribute; - #[cfg(feature = "full")] - use crate::classify; - use crate::error::{Error, Result}; - #[cfg(feature = "full")] - use crate::expr::{ - Arm, ExprArray, ExprAssign, ExprAsync, ExprAwait, ExprBlock, ExprBreak, ExprClosure, - ExprConst, ExprContinue, ExprForLoop, ExprIf, ExprInfer, ExprLet, ExprLoop, ExprMatch, - ExprRange, ExprRawAddr, ExprRepeat, ExprReturn, ExprTry, ExprTryBlock, ExprUnsafe, - ExprWhile, ExprYield, Label, PointerMutability, RangeLimits, - }; - use crate::expr::{ - Expr, ExprBinary, ExprCall, ExprCast, ExprField, ExprGroup, ExprIndex, ExprLit, ExprMacro, - ExprMethodCall, ExprParen, ExprPath, ExprReference, ExprStruct, ExprTuple, ExprUnary, - FieldValue, Index, Member, - }; - #[cfg(feature = "full")] - use crate::generics::{self, BoundLifetimes}; - use crate::ident::Ident; - #[cfg(feature = "full")] - use crate::lifetime::Lifetime; - use crate::lit::{Lit, LitFloat, LitInt}; - use crate::mac::{self, Macro}; - use crate::op::BinOp; - use crate::parse::discouraged::Speculative as _; - #[cfg(feature = "full")] - use crate::parse::ParseBuffer; - use crate::parse::{Parse, ParseStream}; - #[cfg(feature = "full")] - use crate::pat::{Pat, PatType}; - use crate::path::{self, AngleBracketedGenericArguments, Path, QSelf}; - use crate::precedence::Precedence; - use crate::punctuated::Punctuated; - #[cfg(feature = "full")] - use crate::stmt::Block; - use crate::token; - use crate::ty; - #[cfg(feature = "full")] - use crate::ty::{ReturnType, Type}; - use crate::verbatim; - #[cfg(feature = "full")] - use proc_macro2::{Span, TokenStream}; - use std::mem; - - // When we're parsing expressions which occur before blocks, like in an if - // statement's condition, we cannot parse a struct literal. - // - // Struct literals are ambiguous in certain positions - // https://github.com/rust-lang/rfcs/pull/92 - #[cfg(feature = "full")] - pub(super) struct AllowStruct(pub bool); - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Expr { - fn parse(input: ParseStream) -> Result<Self> { - ambiguous_expr( - input, - #[cfg(feature = "full")] - AllowStruct(true), - ) - } - } - - #[cfg(feature = "full")] - pub(super) fn parse_with_earlier_boundary_rule(input: ParseStream) -> Result<Expr> { - let mut attrs = input.call(expr_attrs)?; - let mut expr = if input.peek(token::Group) { - let allow_struct = AllowStruct(true); - let atom = expr_group(input, allow_struct)?; - if continue_parsing_early(&atom) { - trailer_helper(input, atom)? - } else { - atom - } - } else if input.peek(Token![if]) { - Expr::If(input.parse()?) - } else if input.peek(Token![while]) { - Expr::While(input.parse()?) - } else if input.peek(Token![for]) - && !generics::parsing::choose_generics_over_qpath_after_keyword(input) - { - Expr::ForLoop(input.parse()?) - } else if input.peek(Token![loop]) { - Expr::Loop(input.parse()?) - } else if input.peek(Token![match]) { - Expr::Match(input.parse()?) - } else if input.peek(Token![try]) && input.peek2(token::Brace) { - Expr::TryBlock(input.parse()?) - } else if input.peek(Token![unsafe]) { - Expr::Unsafe(input.parse()?) - } else if input.peek(Token![const]) && input.peek2(token::Brace) { - Expr::Const(input.parse()?) - } else if input.peek(token::Brace) { - Expr::Block(input.parse()?) - } else if input.peek(Lifetime) { - atom_labeled(input)? - } else { - let allow_struct = AllowStruct(true); - unary_expr(input, allow_struct)? - }; - - if continue_parsing_early(&expr) { - attrs.extend(expr.replace_attrs(Vec::new())); - expr.replace_attrs(attrs); - - let allow_struct = AllowStruct(true); - return parse_expr(input, expr, allow_struct, Precedence::MIN); - } - - if input.peek(Token![.]) && !input.peek(Token![..]) || input.peek(Token![?]) { - expr = trailer_helper(input, expr)?; - - attrs.extend(expr.replace_attrs(Vec::new())); - expr.replace_attrs(attrs); - - let allow_struct = AllowStruct(true); - return parse_expr(input, expr, allow_struct, Precedence::MIN); - } - - attrs.extend(expr.replace_attrs(Vec::new())); - expr.replace_attrs(attrs); - Ok(expr) - } - - #[cfg(feature = "full")] - impl Copy for AllowStruct {} - - #[cfg(feature = "full")] - impl Clone for AllowStruct { - fn clone(&self) -> Self { - *self - } - } - - #[cfg(feature = "full")] - fn parse_expr( - input: ParseStream, - mut lhs: Expr, - allow_struct: AllowStruct, - base: Precedence, - ) -> Result<Expr> { - loop { - let ahead = input.fork(); - if let Expr::Range(_) = lhs { - // A range cannot be the left-hand side of another binary operator. - break; - } else if let Ok(op) = ahead.parse::<BinOp>() { - let precedence = Precedence::of_binop(&op); - if precedence < base { - break; - } - if precedence == Precedence::Assign { - if let Expr::Range(_) = lhs { - break; - } - } - if precedence == Precedence::Compare { - if let Expr::Binary(lhs) = &lhs { - if Precedence::of_binop(&lhs.op) == Precedence::Compare { - return Err(input.error("comparison operators cannot be chained")); - } - } - } - input.advance_to(&ahead); - let right = parse_binop_rhs(input, allow_struct, precedence)?; - lhs = Expr::Binary(ExprBinary { - attrs: Vec::new(), - left: Box::new(lhs), - op, - right, - }); - } else if Precedence::Assign >= base - && input.peek(Token![=]) - && !input.peek(Token![=>]) - && match lhs { - Expr::Range(_) => false, - _ => true, - } - { - let eq_token: Token![=] = input.parse()?; - let right = parse_binop_rhs(input, allow_struct, Precedence::Assign)?; - lhs = Expr::Assign(ExprAssign { - attrs: Vec::new(), - left: Box::new(lhs), - eq_token, - right, - }); - } else if Precedence::Range >= base && input.peek(Token![..]) { - let limits: RangeLimits = input.parse()?; - let end = parse_range_end(input, &limits, allow_struct)?; - lhs = Expr::Range(ExprRange { - attrs: Vec::new(), - start: Some(Box::new(lhs)), - limits, - end, - }); - } else if Precedence::Cast >= base && input.peek(Token![as]) { - let as_token: Token![as] = input.parse()?; - let allow_plus = false; - let allow_group_generic = false; - let ty = ty::parsing::ambig_ty(input, allow_plus, allow_group_generic)?; - check_cast(input)?; - lhs = Expr::Cast(ExprCast { - attrs: Vec::new(), - expr: Box::new(lhs), - as_token, - ty: Box::new(ty), - }); - } else { - break; - } - } - Ok(lhs) - } - - #[cfg(not(feature = "full"))] - fn parse_expr(input: ParseStream, mut lhs: Expr, base: Precedence) -> Result<Expr> { - loop { - let ahead = input.fork(); - if let Ok(op) = ahead.parse::<BinOp>() { - let precedence = Precedence::of_binop(&op); - if precedence < base { - break; - } - if precedence == Precedence::Compare { - if let Expr::Binary(lhs) = &lhs { - if Precedence::of_binop(&lhs.op) == Precedence::Compare { - return Err(input.error("comparison operators cannot be chained")); - } - } - } - input.advance_to(&ahead); - let right = parse_binop_rhs(input, precedence)?; - lhs = Expr::Binary(ExprBinary { - attrs: Vec::new(), - left: Box::new(lhs), - op, - right, - }); - } else if Precedence::Cast >= base && input.peek(Token![as]) { - let as_token: Token![as] = input.parse()?; - let allow_plus = false; - let allow_group_generic = false; - let ty = ty::parsing::ambig_ty(input, allow_plus, allow_group_generic)?; - check_cast(input)?; - lhs = Expr::Cast(ExprCast { - attrs: Vec::new(), - expr: Box::new(lhs), - as_token, - ty: Box::new(ty), - }); - } else { - break; - } - } - Ok(lhs) - } - - fn parse_binop_rhs( - input: ParseStream, - #[cfg(feature = "full")] allow_struct: AllowStruct, - precedence: Precedence, - ) -> Result<Box<Expr>> { - let mut rhs = unary_expr( - input, - #[cfg(feature = "full")] - allow_struct, - )?; - loop { - let next = peek_precedence(input); - if next > precedence || next == precedence && precedence == Precedence::Assign { - let cursor = input.cursor(); - rhs = parse_expr( - input, - rhs, - #[cfg(feature = "full")] - allow_struct, - next, - )?; - if cursor == input.cursor() { - // Bespoke grammar restrictions separate from precedence can - // cause parsing to not advance, such as `..a` being - // disallowed in the left-hand side of binary operators, - // even ones that have lower precedence than `..`. - break; - } - } else { - break; - } - } - Ok(Box::new(rhs)) - } - - fn peek_precedence(input: ParseStream) -> Precedence { - if let Ok(op) = input.fork().parse() { - Precedence::of_binop(&op) - } else if input.peek(Token![=]) && !input.peek(Token![=>]) { - Precedence::Assign - } else if input.peek(Token![..]) { - Precedence::Range - } else if input.peek(Token![as]) { - Precedence::Cast - } else { - Precedence::MIN - } - } - - // Parse an arbitrary expression. - pub(super) fn ambiguous_expr( - input: ParseStream, - #[cfg(feature = "full")] allow_struct: AllowStruct, - ) -> Result<Expr> { - let lhs = unary_expr( - input, - #[cfg(feature = "full")] - allow_struct, - )?; - parse_expr( - input, - lhs, - #[cfg(feature = "full")] - allow_struct, - Precedence::MIN, - ) - } - - #[cfg(feature = "full")] - fn expr_attrs(input: ParseStream) -> Result<Vec<Attribute>> { - let mut attrs = Vec::new(); - while !input.peek(token::Group) && input.peek(Token![#]) { - attrs.push(input.call(attr::parsing::single_parse_outer)?); - } - Ok(attrs) - } - - // <UnOp> <trailer> - // & <trailer> - // &mut <trailer> - // box <trailer> - #[cfg(feature = "full")] - fn unary_expr(input: ParseStream, allow_struct: AllowStruct) -> Result<Expr> { - let begin = input.fork(); - let attrs = input.call(expr_attrs)?; - if input.peek(token::Group) { - return trailer_expr(begin, attrs, input, allow_struct); - } - - if input.peek(Token![&]) { - let and_token: Token![&] = input.parse()?; - let raw: Option<Token![raw]> = if input.peek(Token![raw]) - && (input.peek2(Token![mut]) || input.peek2(Token![const])) - { - Some(input.parse()?) - } else { - None - }; - let mutability: Option<Token![mut]> = input.parse()?; - let const_token: Option<Token![const]> = if raw.is_some() && mutability.is_none() { - Some(input.parse()?) - } else { - None - }; - let expr = Box::new(unary_expr(input, allow_struct)?); - if let Some(raw) = raw { - Ok(Expr::RawAddr(ExprRawAddr { - attrs, - and_token, - raw, - mutability: match mutability { - Some(mut_token) => PointerMutability::Mut(mut_token), - None => PointerMutability::Const(const_token.unwrap()), - }, - expr, - })) - } else { - Ok(Expr::Reference(ExprReference { - attrs, - and_token, - mutability, - expr, - })) - } - } else if input.peek(Token![*]) || input.peek(Token![!]) || input.peek(Token![-]) { - expr_unary(input, attrs, allow_struct).map(Expr::Unary) - } else { - trailer_expr(begin, attrs, input, allow_struct) - } - } - - #[cfg(not(feature = "full"))] - fn unary_expr(input: ParseStream) -> Result<Expr> { - if input.peek(Token![&]) { - Ok(Expr::Reference(ExprReference { - attrs: Vec::new(), - and_token: input.parse()?, - mutability: input.parse()?, - expr: Box::new(unary_expr(input)?), - })) - } else if input.peek(Token![*]) || input.peek(Token![!]) || input.peek(Token![-]) { - Ok(Expr::Unary(ExprUnary { - attrs: Vec::new(), - op: input.parse()?, - expr: Box::new(unary_expr(input)?), - })) - } else { - trailer_expr(input) - } - } - - // <atom> (..<args>) ... - // <atom> . <ident> (..<args>) ... - // <atom> . <ident> ... - // <atom> . <lit> ... - // <atom> [ <expr> ] ... - // <atom> ? ... - #[cfg(feature = "full")] - fn trailer_expr( - begin: ParseBuffer, - mut attrs: Vec<Attribute>, - input: ParseStream, - allow_struct: AllowStruct, - ) -> Result<Expr> { - let atom = atom_expr(input, allow_struct)?; - let mut e = trailer_helper(input, atom)?; - - if let Expr::Verbatim(tokens) = &mut e { - *tokens = verbatim::between(&begin, input); - } else if !attrs.is_empty() { - if let Expr::Range(range) = e { - let spans: &[Span] = match &range.limits { - RangeLimits::HalfOpen(limits) => &limits.spans, - RangeLimits::Closed(limits) => &limits.spans, - }; - return Err(crate::error::new2( - spans[0], - *spans.last().unwrap(), - "attributes are not allowed on range expressions starting with `..`", - )); - } - let inner_attrs = e.replace_attrs(Vec::new()); - attrs.extend(inner_attrs); - e.replace_attrs(attrs); - } - - Ok(e) - } - - #[cfg(feature = "full")] - fn trailer_helper(input: ParseStream, mut e: Expr) -> Result<Expr> { - loop { - if input.peek(token::Paren) { - let content; - e = Expr::Call(ExprCall { - attrs: Vec::new(), - func: Box::new(e), - paren_token: parenthesized!(content in input), - args: content.parse_terminated(Expr::parse, Token![,])?, - }); - } else if input.peek(Token![.]) - && !input.peek(Token![..]) - && match e { - Expr::Range(_) => false, - _ => true, - } - { - let mut dot_token: Token![.] = input.parse()?; - - let float_token: Option<LitFloat> = input.parse()?; - if let Some(float_token) = float_token { - if multi_index(&mut e, &mut dot_token, float_token)? { - continue; - } - } - - let await_token: Option<Token![await]> = input.parse()?; - if let Some(await_token) = await_token { - e = Expr::Await(ExprAwait { - attrs: Vec::new(), - base: Box::new(e), - dot_token, - await_token, - }); - continue; - } - - let member: Member = input.parse()?; - let turbofish = if member.is_named() && input.peek(Token![::]) { - Some(AngleBracketedGenericArguments::parse_turbofish(input)?) - } else { - None - }; - - if turbofish.is_some() || input.peek(token::Paren) { - if let Member::Named(method) = member { - let content; - e = Expr::MethodCall(ExprMethodCall { - attrs: Vec::new(), - receiver: Box::new(e), - dot_token, - method, - turbofish, - paren_token: parenthesized!(content in input), - args: content.parse_terminated(Expr::parse, Token![,])?, - }); - continue; - } - } - - e = Expr::Field(ExprField { - attrs: Vec::new(), - base: Box::new(e), - dot_token, - member, - }); - } else if input.peek(token::Bracket) { - let content; - e = Expr::Index(ExprIndex { - attrs: Vec::new(), - expr: Box::new(e), - bracket_token: bracketed!(content in input), - index: content.parse()?, - }); - } else if input.peek(Token![?]) - && match e { - Expr::Range(_) => false, - _ => true, - } - { - e = Expr::Try(ExprTry { - attrs: Vec::new(), - expr: Box::new(e), - question_token: input.parse()?, - }); - } else { - break; - } - } - Ok(e) - } - - #[cfg(not(feature = "full"))] - fn trailer_expr(input: ParseStream) -> Result<Expr> { - let mut e = atom_expr(input)?; - - loop { - if input.peek(token::Paren) { - let content; - e = Expr::Call(ExprCall { - attrs: Vec::new(), - func: Box::new(e), - paren_token: parenthesized!(content in input), - args: content.parse_terminated(Expr::parse, Token![,])?, - }); - } else if input.peek(Token![.]) - && !input.peek(Token![..]) - && !input.peek2(Token![await]) - { - let mut dot_token: Token![.] = input.parse()?; - - let float_token: Option<LitFloat> = input.parse()?; - if let Some(float_token) = float_token { - if multi_index(&mut e, &mut dot_token, float_token)? { - continue; - } - } - - let member: Member = input.parse()?; - let turbofish = if member.is_named() && input.peek(Token![::]) { - let colon2_token: Token![::] = input.parse()?; - let turbofish = - AngleBracketedGenericArguments::do_parse(Some(colon2_token), input)?; - Some(turbofish) - } else { - None - }; - - if turbofish.is_some() || input.peek(token::Paren) { - if let Member::Named(method) = member { - let content; - e = Expr::MethodCall(ExprMethodCall { - attrs: Vec::new(), - receiver: Box::new(e), - dot_token, - method, - turbofish, - paren_token: parenthesized!(content in input), - args: content.parse_terminated(Expr::parse, Token![,])?, - }); - continue; - } - } - - e = Expr::Field(ExprField { - attrs: Vec::new(), - base: Box::new(e), - dot_token, - member, - }); - } else if input.peek(token::Bracket) { - let content; - e = Expr::Index(ExprIndex { - attrs: Vec::new(), - expr: Box::new(e), - bracket_token: bracketed!(content in input), - index: content.parse()?, - }); - } else { - break; - } - } - - Ok(e) - } - - // Parse all atomic expressions which don't have to worry about precedence - // interactions, as they are fully contained. - #[cfg(feature = "full")] - fn atom_expr(input: ParseStream, allow_struct: AllowStruct) -> Result<Expr> { - if input.peek(token::Group) { - expr_group(input, allow_struct) - } else if input.peek(Lit) { - input.parse().map(Expr::Lit) - } else if input.peek(Token![async]) - && (input.peek2(token::Brace) || input.peek2(Token![move]) && input.peek3(token::Brace)) - { - input.parse().map(Expr::Async) - } else if input.peek(Token![try]) && input.peek2(token::Brace) { - input.parse().map(Expr::TryBlock) - } else if input.peek(Token![|]) - || input.peek(Token![move]) - || input.peek(Token![for]) - && generics::parsing::choose_generics_over_qpath_after_keyword(input) - || input.peek(Token![const]) && !input.peek2(token::Brace) - || input.peek(Token![static]) - || input.peek(Token![async]) && (input.peek2(Token![|]) || input.peek2(Token![move])) - { - expr_closure(input, allow_struct).map(Expr::Closure) - } else if token::parsing::peek_keyword(input.cursor(), "builtin") && input.peek2(Token![#]) - { - expr_builtin(input) - } else if input.peek(Ident) - || input.peek(Token![::]) - || input.peek(Token![<]) - || input.peek(Token![self]) - || input.peek(Token![Self]) - || input.peek(Token![super]) - || input.peek(Token![crate]) - || input.peek(Token![try]) && (input.peek2(Token![!]) || input.peek2(Token![::])) - { - path_or_macro_or_struct(input, allow_struct) - } else if input.peek(token::Paren) { - paren_or_tuple(input) - } else if input.peek(Token![break]) { - expr_break(input, allow_struct).map(Expr::Break) - } else if input.peek(Token![continue]) { - input.parse().map(Expr::Continue) - } else if input.peek(Token![return]) { - input.parse().map(Expr::Return) - } else if input.peek(Token![become]) { - expr_become(input) - } else if input.peek(token::Bracket) { - array_or_repeat(input) - } else if input.peek(Token![let]) { - expr_let(input, allow_struct).map(Expr::Let) - } else if input.peek(Token![if]) { - input.parse().map(Expr::If) - } else if input.peek(Token![while]) { - input.parse().map(Expr::While) - } else if input.peek(Token![for]) { - input.parse().map(Expr::ForLoop) - } else if input.peek(Token![loop]) { - input.parse().map(Expr::Loop) - } else if input.peek(Token![match]) { - input.parse().map(Expr::Match) - } else if input.peek(Token![yield]) { - input.parse().map(Expr::Yield) - } else if input.peek(Token![unsafe]) { - input.parse().map(Expr::Unsafe) - } else if input.peek(Token![const]) { - input.parse().map(Expr::Const) - } else if input.peek(token::Brace) { - input.parse().map(Expr::Block) - } else if input.peek(Token![..]) { - expr_range(input, allow_struct).map(Expr::Range) - } else if input.peek(Token![_]) { - input.parse().map(Expr::Infer) - } else if input.peek(Lifetime) { - atom_labeled(input) - } else { - Err(input.error("expected an expression")) - } - } - - #[cfg(feature = "full")] - fn atom_labeled(input: ParseStream) -> Result<Expr> { - let the_label: Label = input.parse()?; - let mut expr = if input.peek(Token![while]) { - Expr::While(input.parse()?) - } else if input.peek(Token![for]) { - Expr::ForLoop(input.parse()?) - } else if input.peek(Token![loop]) { - Expr::Loop(input.parse()?) - } else if input.peek(token::Brace) { - Expr::Block(input.parse()?) - } else { - return Err(input.error("expected loop or block expression")); - }; - match &mut expr { - Expr::While(ExprWhile { label, .. }) - | Expr::ForLoop(ExprForLoop { label, .. }) - | Expr::Loop(ExprLoop { label, .. }) - | Expr::Block(ExprBlock { label, .. }) => *label = Some(the_label), - _ => unreachable!(), - } - Ok(expr) - } - - #[cfg(not(feature = "full"))] - fn atom_expr(input: ParseStream) -> Result<Expr> { - if input.peek(token::Group) { - expr_group(input) - } else if input.peek(Lit) { - input.parse().map(Expr::Lit) - } else if input.peek(token::Paren) { - paren_or_tuple(input) - } else if input.peek(Ident) - || input.peek(Token![::]) - || input.peek(Token![<]) - || input.peek(Token![self]) - || input.peek(Token![Self]) - || input.peek(Token![super]) - || input.peek(Token![crate]) - { - path_or_macro_or_struct(input) - } else if input.is_empty() { - Err(input.error("expected an expression")) - } else { - if input.peek(token::Brace) { - let scan = input.fork(); - let content; - braced!(content in scan); - if content.parse::<Expr>().is_ok() && content.is_empty() { - let expr_block = verbatim::between(input, &scan); - input.advance_to(&scan); - return Ok(Expr::Verbatim(expr_block)); - } - } - Err(input.error("unsupported expression; enable syn's features=[\"full\"]")) - } - } - - #[cfg(feature = "full")] - fn expr_builtin(input: ParseStream) -> Result<Expr> { - let begin = input.fork(); - - token::parsing::keyword(input, "builtin")?; - input.parse::<Token![#]>()?; - input.parse::<Ident>()?; - - let args; - parenthesized!(args in input); - args.parse::<TokenStream>()?; - - Ok(Expr::Verbatim(verbatim::between(&begin, input))) - } - - fn path_or_macro_or_struct( - input: ParseStream, - #[cfg(feature = "full")] allow_struct: AllowStruct, - ) -> Result<Expr> { - let expr_style = true; - let (qself, path) = path::parsing::qpath(input, expr_style)?; - rest_of_path_or_macro_or_struct( - qself, - path, - input, - #[cfg(feature = "full")] - allow_struct, - ) - } - - fn rest_of_path_or_macro_or_struct( - qself: Option<QSelf>, - path: Path, - input: ParseStream, - #[cfg(feature = "full")] allow_struct: AllowStruct, - ) -> Result<Expr> { - if qself.is_none() - && input.peek(Token![!]) - && !input.peek(Token![!=]) - && path.is_mod_style() - { - let bang_token: Token![!] = input.parse()?; - let (delimiter, tokens) = mac::parse_delimiter(input)?; - return Ok(Expr::Macro(ExprMacro { - attrs: Vec::new(), - mac: Macro { - path, - bang_token, - delimiter, - tokens, - }, - })); - } - - #[cfg(not(feature = "full"))] - let allow_struct = (true,); - if allow_struct.0 && input.peek(token::Brace) { - return expr_struct_helper(input, qself, path).map(Expr::Struct); - } - - Ok(Expr::Path(ExprPath { - attrs: Vec::new(), - qself, - path, - })) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprMacro { - fn parse(input: ParseStream) -> Result<Self> { - Ok(ExprMacro { - attrs: Vec::new(), - mac: input.parse()?, - }) - } - } - - fn paren_or_tuple(input: ParseStream) -> Result<Expr> { - let content; - let paren_token = parenthesized!(content in input); - if content.is_empty() { - return Ok(Expr::Tuple(ExprTuple { - attrs: Vec::new(), - paren_token, - elems: Punctuated::new(), - })); - } - - let first: Expr = content.parse()?; - if content.is_empty() { - return Ok(Expr::Paren(ExprParen { - attrs: Vec::new(), - paren_token, - expr: Box::new(first), - })); - } - - let mut elems = Punctuated::new(); - elems.push_value(first); - while !content.is_empty() { - let punct = content.parse()?; - elems.push_punct(punct); - if content.is_empty() { - break; - } - let value = content.parse()?; - elems.push_value(value); - } - Ok(Expr::Tuple(ExprTuple { - attrs: Vec::new(), - paren_token, - elems, - })) - } - - #[cfg(feature = "full")] - fn array_or_repeat(input: ParseStream) -> Result<Expr> { - let content; - let bracket_token = bracketed!(content in input); - if content.is_empty() { - return Ok(Expr::Array(ExprArray { - attrs: Vec::new(), - bracket_token, - elems: Punctuated::new(), - })); - } - - let first: Expr = content.parse()?; - if content.is_empty() || content.peek(Token![,]) { - let mut elems = Punctuated::new(); - elems.push_value(first); - while !content.is_empty() { - let punct = content.parse()?; - elems.push_punct(punct); - if content.is_empty() { - break; - } - let value = content.parse()?; - elems.push_value(value); - } - Ok(Expr::Array(ExprArray { - attrs: Vec::new(), - bracket_token, - elems, - })) - } else if content.peek(Token![;]) { - let semi_token: Token![;] = content.parse()?; - let len: Expr = content.parse()?; - Ok(Expr::Repeat(ExprRepeat { - attrs: Vec::new(), - bracket_token, - expr: Box::new(first), - semi_token, - len: Box::new(len), - })) - } else { - Err(content.error("expected `,` or `;`")) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprArray { - fn parse(input: ParseStream) -> Result<Self> { - let content; - let bracket_token = bracketed!(content in input); - let mut elems = Punctuated::new(); - - while !content.is_empty() { - let first: Expr = content.parse()?; - elems.push_value(first); - if content.is_empty() { - break; - } - let punct = content.parse()?; - elems.push_punct(punct); - } - - Ok(ExprArray { - attrs: Vec::new(), - bracket_token, - elems, - }) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprRepeat { - fn parse(input: ParseStream) -> Result<Self> { - let content; - Ok(ExprRepeat { - bracket_token: bracketed!(content in input), - attrs: Vec::new(), - expr: content.parse()?, - semi_token: content.parse()?, - len: content.parse()?, - }) - } - } - - #[cfg(feature = "full")] - fn continue_parsing_early(mut expr: &Expr) -> bool { - while let Expr::Group(group) = expr { - expr = &group.expr; - } - match expr { - Expr::If(_) - | Expr::While(_) - | Expr::ForLoop(_) - | Expr::Loop(_) - | Expr::Match(_) - | Expr::TryBlock(_) - | Expr::Unsafe(_) - | Expr::Const(_) - | Expr::Block(_) => false, - _ => true, - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprLit { - fn parse(input: ParseStream) -> Result<Self> { - Ok(ExprLit { - attrs: Vec::new(), - lit: input.parse()?, - }) - } - } - - fn expr_group( - input: ParseStream, - #[cfg(feature = "full")] allow_struct: AllowStruct, - ) -> Result<Expr> { - let group = crate::group::parse_group(input)?; - let mut inner: Expr = group.content.parse()?; - - match inner { - Expr::Path(mut expr) if expr.attrs.is_empty() => { - let grouped_len = expr.path.segments.len(); - Path::parse_rest(input, &mut expr.path, true)?; - match rest_of_path_or_macro_or_struct( - expr.qself, - expr.path, - input, - #[cfg(feature = "full")] - allow_struct, - )? { - Expr::Path(expr) if expr.path.segments.len() == grouped_len => { - inner = Expr::Path(expr); - } - extended => return Ok(extended), - } - } - _ => {} - } - - Ok(Expr::Group(ExprGroup { - attrs: Vec::new(), - group_token: group.token, - expr: Box::new(inner), - })) - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprParen { - fn parse(input: ParseStream) -> Result<Self> { - let content; - Ok(ExprParen { - attrs: Vec::new(), - paren_token: parenthesized!(content in input), - expr: content.parse()?, - }) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprLet { - fn parse(input: ParseStream) -> Result<Self> { - let allow_struct = AllowStruct(true); - expr_let(input, allow_struct) - } - } - - #[cfg(feature = "full")] - fn expr_let(input: ParseStream, allow_struct: AllowStruct) -> Result<ExprLet> { - Ok(ExprLet { - attrs: Vec::new(), - let_token: input.parse()?, - pat: Box::new(Pat::parse_multi_with_leading_vert(input)?), - eq_token: input.parse()?, - expr: Box::new({ - let lhs = unary_expr(input, allow_struct)?; - parse_expr(input, lhs, allow_struct, Precedence::Compare)? - }), - }) - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprIf { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - - let mut clauses = Vec::new(); - let mut expr; - loop { - let if_token: Token![if] = input.parse()?; - let cond = input.call(Expr::parse_without_eager_brace)?; - let then_branch: Block = input.parse()?; - - expr = ExprIf { - attrs: Vec::new(), - if_token, - cond: Box::new(cond), - then_branch, - else_branch: None, - }; - - if !input.peek(Token![else]) { - break; - } - - let else_token: Token![else] = input.parse()?; - let lookahead = input.lookahead1(); - if lookahead.peek(Token![if]) { - expr.else_branch = Some((else_token, Box::new(Expr::PLACEHOLDER))); - clauses.push(expr); - } else if lookahead.peek(token::Brace) { - expr.else_branch = Some(( - else_token, - Box::new(Expr::Block(ExprBlock { - attrs: Vec::new(), - label: None, - block: input.parse()?, - })), - )); - break; - } else { - return Err(lookahead.error()); - } - } - - while let Some(mut prev) = clauses.pop() { - *prev.else_branch.as_mut().unwrap().1 = Expr::If(expr); - expr = prev; - } - expr.attrs = attrs; - Ok(expr) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprInfer { - fn parse(input: ParseStream) -> Result<Self> { - Ok(ExprInfer { - attrs: input.call(Attribute::parse_outer)?, - underscore_token: input.parse()?, - }) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprForLoop { - fn parse(input: ParseStream) -> Result<Self> { - let mut attrs = input.call(Attribute::parse_outer)?; - let label: Option<Label> = input.parse()?; - let for_token: Token![for] = input.parse()?; - - let pat = Pat::parse_multi_with_leading_vert(input)?; - - let in_token: Token![in] = input.parse()?; - let expr: Expr = input.call(Expr::parse_without_eager_brace)?; - - let content; - let brace_token = braced!(content in input); - attr::parsing::parse_inner(&content, &mut attrs)?; - let stmts = content.call(Block::parse_within)?; - - Ok(ExprForLoop { - attrs, - label, - for_token, - pat: Box::new(pat), - in_token, - expr: Box::new(expr), - body: Block { brace_token, stmts }, - }) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprLoop { - fn parse(input: ParseStream) -> Result<Self> { - let mut attrs = input.call(Attribute::parse_outer)?; - let label: Option<Label> = input.parse()?; - let loop_token: Token![loop] = input.parse()?; - - let content; - let brace_token = braced!(content in input); - attr::parsing::parse_inner(&content, &mut attrs)?; - let stmts = content.call(Block::parse_within)?; - - Ok(ExprLoop { - attrs, - label, - loop_token, - body: Block { brace_token, stmts }, - }) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprMatch { - fn parse(input: ParseStream) -> Result<Self> { - let mut attrs = input.call(Attribute::parse_outer)?; - let match_token: Token![match] = input.parse()?; - let expr = Expr::parse_without_eager_brace(input)?; - - let content; - let brace_token = braced!(content in input); - attr::parsing::parse_inner(&content, &mut attrs)?; - - let arms = Arm::parse_multiple(&content)?; - - Ok(ExprMatch { - attrs, - match_token, - expr: Box::new(expr), - brace_token, - arms, - }) - } - } - - macro_rules! impl_by_parsing_expr { - ( - $( - $expr_type:ty, $variant:ident, $msg:expr, - )* - ) => { - $( - #[cfg(all(feature = "full", feature = "printing"))] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for $expr_type { - fn parse(input: ParseStream) -> Result<Self> { - let mut expr: Expr = input.parse()?; - loop { - match expr { - Expr::$variant(inner) => return Ok(inner), - Expr::Group(next) => expr = *next.expr, - _ => return Err(Error::new_spanned(expr, $msg)), - } - } - } - } - )* - }; - } - - impl_by_parsing_expr! { - ExprAssign, Assign, "expected assignment expression", - ExprAwait, Await, "expected await expression", - ExprBinary, Binary, "expected binary operation", - ExprCall, Call, "expected function call expression", - ExprCast, Cast, "expected cast expression", - ExprField, Field, "expected struct field access", - ExprIndex, Index, "expected indexing expression", - ExprMethodCall, MethodCall, "expected method call expression", - ExprRange, Range, "expected range expression", - ExprTry, Try, "expected try expression", - ExprTuple, Tuple, "expected tuple expression", - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprUnary { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = Vec::new(); - let allow_struct = AllowStruct(true); - expr_unary(input, attrs, allow_struct) - } - } - - #[cfg(feature = "full")] - fn expr_unary( - input: ParseStream, - attrs: Vec<Attribute>, - allow_struct: AllowStruct, - ) -> Result<ExprUnary> { - Ok(ExprUnary { - attrs, - op: input.parse()?, - expr: Box::new(unary_expr(input, allow_struct)?), - }) - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprClosure { - fn parse(input: ParseStream) -> Result<Self> { - let allow_struct = AllowStruct(true); - expr_closure(input, allow_struct) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprRawAddr { - fn parse(input: ParseStream) -> Result<Self> { - let allow_struct = AllowStruct(true); - Ok(ExprRawAddr { - attrs: Vec::new(), - and_token: input.parse()?, - raw: input.parse()?, - mutability: input.parse()?, - expr: Box::new(unary_expr(input, allow_struct)?), - }) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprReference { - fn parse(input: ParseStream) -> Result<Self> { - let allow_struct = AllowStruct(true); - Ok(ExprReference { - attrs: Vec::new(), - and_token: input.parse()?, - mutability: input.parse()?, - expr: Box::new(unary_expr(input, allow_struct)?), - }) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprBreak { - fn parse(input: ParseStream) -> Result<Self> { - let allow_struct = AllowStruct(true); - expr_break(input, allow_struct) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprReturn { - fn parse(input: ParseStream) -> Result<Self> { - Ok(ExprReturn { - attrs: Vec::new(), - return_token: input.parse()?, - expr: { - if Expr::peek(input) { - Some(input.parse()?) - } else { - None - } - }, - }) - } - } - - #[cfg(feature = "full")] - fn expr_become(input: ParseStream) -> Result<Expr> { - let begin = input.fork(); - input.parse::<Token![become]>()?; - input.parse::<Expr>()?; - Ok(Expr::Verbatim(verbatim::between(&begin, input))) - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprTryBlock { - fn parse(input: ParseStream) -> Result<Self> { - Ok(ExprTryBlock { - attrs: Vec::new(), - try_token: input.parse()?, - block: input.parse()?, - }) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprYield { - fn parse(input: ParseStream) -> Result<Self> { - Ok(ExprYield { - attrs: Vec::new(), - yield_token: input.parse()?, - expr: { - if Expr::peek(input) { - Some(input.parse()?) - } else { - None - } - }, - }) - } - } - - #[cfg(feature = "full")] - fn expr_closure(input: ParseStream, allow_struct: AllowStruct) -> Result<ExprClosure> { - let lifetimes: Option<BoundLifetimes> = input.parse()?; - let constness: Option<Token![const]> = input.parse()?; - let movability: Option<Token![static]> = input.parse()?; - let asyncness: Option<Token![async]> = input.parse()?; - let capture: Option<Token![move]> = input.parse()?; - let or1_token: Token![|] = input.parse()?; - - let mut inputs = Punctuated::new(); - loop { - if input.peek(Token![|]) { - break; - } - let value = closure_arg(input)?; - inputs.push_value(value); - if input.peek(Token![|]) { - break; - } - let punct: Token![,] = input.parse()?; - inputs.push_punct(punct); - } - - let or2_token: Token![|] = input.parse()?; - - let (output, body) = if input.peek(Token![->]) { - let arrow_token: Token![->] = input.parse()?; - let ty: Type = input.parse()?; - let body: Block = input.parse()?; - let output = ReturnType::Type(arrow_token, Box::new(ty)); - let block = Expr::Block(ExprBlock { - attrs: Vec::new(), - label: None, - block: body, - }); - (output, block) - } else { - let body = ambiguous_expr(input, allow_struct)?; - (ReturnType::Default, body) - }; - - Ok(ExprClosure { - attrs: Vec::new(), - lifetimes, - constness, - movability, - asyncness, - capture, - or1_token, - inputs, - or2_token, - output, - body: Box::new(body), - }) - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprAsync { - fn parse(input: ParseStream) -> Result<Self> { - Ok(ExprAsync { - attrs: Vec::new(), - async_token: input.parse()?, - capture: input.parse()?, - block: input.parse()?, - }) - } - } - - #[cfg(feature = "full")] - fn closure_arg(input: ParseStream) -> Result<Pat> { - let attrs = input.call(Attribute::parse_outer)?; - let mut pat = Pat::parse_single(input)?; - - if input.peek(Token![:]) { - Ok(Pat::Type(PatType { - attrs, - pat: Box::new(pat), - colon_token: input.parse()?, - ty: input.parse()?, - })) - } else { - match &mut pat { - Pat::Const(pat) => pat.attrs = attrs, - Pat::Ident(pat) => pat.attrs = attrs, - Pat::Lit(pat) => pat.attrs = attrs, - Pat::Macro(pat) => pat.attrs = attrs, - Pat::Or(pat) => pat.attrs = attrs, - Pat::Paren(pat) => pat.attrs = attrs, - Pat::Path(pat) => pat.attrs = attrs, - Pat::Range(pat) => pat.attrs = attrs, - Pat::Reference(pat) => pat.attrs = attrs, - Pat::Rest(pat) => pat.attrs = attrs, - Pat::Slice(pat) => pat.attrs = attrs, - Pat::Struct(pat) => pat.attrs = attrs, - Pat::Tuple(pat) => pat.attrs = attrs, - Pat::TupleStruct(pat) => pat.attrs = attrs, - Pat::Type(_) => unreachable!(), - Pat::Verbatim(_) => {} - Pat::Wild(pat) => pat.attrs = attrs, - } - Ok(pat) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprWhile { - fn parse(input: ParseStream) -> Result<Self> { - let mut attrs = input.call(Attribute::parse_outer)?; - let label: Option<Label> = input.parse()?; - let while_token: Token![while] = input.parse()?; - let cond = Expr::parse_without_eager_brace(input)?; - - let content; - let brace_token = braced!(content in input); - attr::parsing::parse_inner(&content, &mut attrs)?; - let stmts = content.call(Block::parse_within)?; - - Ok(ExprWhile { - attrs, - label, - while_token, - cond: Box::new(cond), - body: Block { brace_token, stmts }, - }) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprConst { - fn parse(input: ParseStream) -> Result<Self> { - let const_token: Token![const] = input.parse()?; - - let content; - let brace_token = braced!(content in input); - let inner_attrs = content.call(Attribute::parse_inner)?; - let stmts = content.call(Block::parse_within)?; - - Ok(ExprConst { - attrs: inner_attrs, - const_token, - block: Block { brace_token, stmts }, - }) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Label { - fn parse(input: ParseStream) -> Result<Self> { - Ok(Label { - name: input.parse()?, - colon_token: input.parse()?, - }) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Option<Label> { - fn parse(input: ParseStream) -> Result<Self> { - if input.peek(Lifetime) { - input.parse().map(Some) - } else { - Ok(None) - } - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprContinue { - fn parse(input: ParseStream) -> Result<Self> { - Ok(ExprContinue { - attrs: Vec::new(), - continue_token: input.parse()?, - label: input.parse()?, - }) - } - } - - #[cfg(feature = "full")] - fn expr_break(input: ParseStream, allow_struct: AllowStruct) -> Result<ExprBreak> { - let break_token: Token![break] = input.parse()?; - - let ahead = input.fork(); - let label: Option<Lifetime> = ahead.parse()?; - if label.is_some() && ahead.peek(Token![:]) { - // Not allowed: `break 'label: loop {...}` - // Parentheses are required. `break ('label: loop {...})` - let _: Expr = input.parse()?; - let start_span = label.unwrap().apostrophe; - let end_span = input.cursor().prev_span(); - return Err(crate::error::new2( - start_span, - end_span, - "parentheses required", - )); - } - - input.advance_to(&ahead); - let expr = if Expr::peek(input) && (allow_struct.0 || !input.peek(token::Brace)) { - Some(input.parse()?) - } else { - None - }; - - Ok(ExprBreak { - attrs: Vec::new(), - break_token, - label, - expr, - }) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for FieldValue { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let member: Member = input.parse()?; - let (colon_token, value) = if input.peek(Token![:]) || !member.is_named() { - let colon_token: Token![:] = input.parse()?; - let value: Expr = input.parse()?; - (Some(colon_token), value) - } else if let Member::Named(ident) = &member { - let value = Expr::Path(ExprPath { - attrs: Vec::new(), - qself: None, - path: Path::from(ident.clone()), - }); - (None, value) - } else { - unreachable!() - }; - - Ok(FieldValue { - attrs, - member, - colon_token, - expr: value, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprStruct { - fn parse(input: ParseStream) -> Result<Self> { - let expr_style = true; - let (qself, path) = path::parsing::qpath(input, expr_style)?; - expr_struct_helper(input, qself, path) - } - } - - fn expr_struct_helper( - input: ParseStream, - qself: Option<QSelf>, - path: Path, - ) -> Result<ExprStruct> { - let content; - let brace_token = braced!(content in input); - - let mut fields = Punctuated::new(); - while !content.is_empty() { - if content.peek(Token![..]) { - return Ok(ExprStruct { - attrs: Vec::new(), - qself, - path, - brace_token, - fields, - dot2_token: Some(content.parse()?), - rest: if content.is_empty() { - None - } else { - Some(Box::new(content.parse()?)) - }, - }); - } - - fields.push(content.parse()?); - if content.is_empty() { - break; - } - let punct: Token![,] = content.parse()?; - fields.push_punct(punct); - } - - Ok(ExprStruct { - attrs: Vec::new(), - qself, - path, - brace_token, - fields, - dot2_token: None, - rest: None, - }) - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprUnsafe { - fn parse(input: ParseStream) -> Result<Self> { - let unsafe_token: Token![unsafe] = input.parse()?; - - let content; - let brace_token = braced!(content in input); - let inner_attrs = content.call(Attribute::parse_inner)?; - let stmts = content.call(Block::parse_within)?; - - Ok(ExprUnsafe { - attrs: inner_attrs, - unsafe_token, - block: Block { brace_token, stmts }, - }) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprBlock { - fn parse(input: ParseStream) -> Result<Self> { - let mut attrs = input.call(Attribute::parse_outer)?; - let label: Option<Label> = input.parse()?; - - let content; - let brace_token = braced!(content in input); - attr::parsing::parse_inner(&content, &mut attrs)?; - let stmts = content.call(Block::parse_within)?; - - Ok(ExprBlock { - attrs, - label, - block: Block { brace_token, stmts }, - }) - } - } - - #[cfg(feature = "full")] - fn expr_range(input: ParseStream, allow_struct: AllowStruct) -> Result<ExprRange> { - let limits: RangeLimits = input.parse()?; - let end = parse_range_end(input, &limits, allow_struct)?; - Ok(ExprRange { - attrs: Vec::new(), - start: None, - limits, - end, - }) - } - - #[cfg(feature = "full")] - fn parse_range_end( - input: ParseStream, - limits: &RangeLimits, - allow_struct: AllowStruct, - ) -> Result<Option<Box<Expr>>> { - if matches!(limits, RangeLimits::HalfOpen(_)) - && (input.is_empty() - || input.peek(Token![,]) - || input.peek(Token![;]) - || input.peek(Token![.]) && !input.peek(Token![..]) - || input.peek(Token![?]) - || input.peek(Token![=>]) - || !allow_struct.0 && input.peek(token::Brace) - || input.peek(Token![=]) - || input.peek(Token![+]) - || input.peek(Token![/]) - || input.peek(Token![%]) - || input.peek(Token![^]) - || input.peek(Token![>]) - || input.peek(Token![<=]) - || input.peek(Token![!=]) - || input.peek(Token![-=]) - || input.peek(Token![*=]) - || input.peek(Token![&=]) - || input.peek(Token![|=]) - || input.peek(Token![<<=]) - || input.peek(Token![as])) - { - Ok(None) - } else { - let end = parse_binop_rhs(input, allow_struct, Precedence::Range)?; - Ok(Some(end)) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for RangeLimits { - fn parse(input: ParseStream) -> Result<Self> { - let lookahead = input.lookahead1(); - let dot_dot = lookahead.peek(Token![..]); - let dot_dot_eq = dot_dot && lookahead.peek(Token![..=]); - let dot_dot_dot = dot_dot && input.peek(Token![...]); - if dot_dot_eq { - input.parse().map(RangeLimits::Closed) - } else if dot_dot && !dot_dot_dot { - input.parse().map(RangeLimits::HalfOpen) - } else { - Err(lookahead.error()) - } - } - } - - #[cfg(feature = "full")] - impl RangeLimits { - pub(crate) fn parse_obsolete(input: ParseStream) -> Result<Self> { - let lookahead = input.lookahead1(); - let dot_dot = lookahead.peek(Token![..]); - let dot_dot_eq = dot_dot && lookahead.peek(Token![..=]); - let dot_dot_dot = dot_dot && input.peek(Token![...]); - if dot_dot_eq { - input.parse().map(RangeLimits::Closed) - } else if dot_dot_dot { - let dot3: Token![...] = input.parse()?; - Ok(RangeLimits::Closed(Token![..=](dot3.spans))) - } else if dot_dot { - input.parse().map(RangeLimits::HalfOpen) - } else { - Err(lookahead.error()) - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ExprPath { - fn parse(input: ParseStream) -> Result<Self> { - #[cfg(not(feature = "full"))] - let attrs = Vec::new(); - #[cfg(feature = "full")] - let attrs = input.call(Attribute::parse_outer)?; - - let expr_style = true; - let (qself, path) = path::parsing::qpath(input, expr_style)?; - - Ok(ExprPath { attrs, qself, path }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Member { - fn parse(input: ParseStream) -> Result<Self> { - if input.peek(Ident) { - input.parse().map(Member::Named) - } else if input.peek(LitInt) { - input.parse().map(Member::Unnamed) - } else { - Err(input.error("expected identifier or integer")) - } - } - } - - #[cfg(feature = "full")] - impl Arm { - pub(crate) fn parse_multiple(input: ParseStream) -> Result<Vec<Self>> { - let mut arms = Vec::new(); - while !input.is_empty() { - arms.push(input.call(Arm::parse)?); - } - Ok(arms) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Arm { - fn parse(input: ParseStream) -> Result<Arm> { - let requires_comma; - Ok(Arm { - attrs: input.call(Attribute::parse_outer)?, - pat: Pat::parse_multi_with_leading_vert(input)?, - guard: { - if input.peek(Token![if]) { - let if_token: Token![if] = input.parse()?; - let guard: Expr = input.parse()?; - Some((if_token, Box::new(guard))) - } else { - None - } - }, - fat_arrow_token: input.parse()?, - body: { - let body = Expr::parse_with_earlier_boundary_rule(input)?; - requires_comma = classify::requires_comma_to_be_match_arm(&body); - Box::new(body) - }, - comma: { - if requires_comma && !input.is_empty() { - Some(input.parse()?) - } else { - input.parse()? - } - }, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Index { - fn parse(input: ParseStream) -> Result<Self> { - let lit: LitInt = input.parse()?; - if lit.suffix().is_empty() { - Ok(Index { - index: lit - .base10_digits() - .parse() - .map_err(|err| Error::new(lit.span(), err))?, - span: lit.span(), - }) - } else { - Err(Error::new(lit.span(), "expected unsuffixed integer")) - } - } - } - - fn multi_index(e: &mut Expr, dot_token: &mut Token![.], float: LitFloat) -> Result<bool> { - let float_token = float.token(); - let float_span = float_token.span(); - let mut float_repr = float_token.to_string(); - let trailing_dot = float_repr.ends_with('.'); - if trailing_dot { - float_repr.truncate(float_repr.len() - 1); - } - - let mut offset = 0; - for part in float_repr.split('.') { - let mut index: Index = - crate::parse_str(part).map_err(|err| Error::new(float_span, err))?; - let part_end = offset + part.len(); - index.span = float_token.subspan(offset..part_end).unwrap_or(float_span); - - let base = mem::replace(e, Expr::PLACEHOLDER); - *e = Expr::Field(ExprField { - attrs: Vec::new(), - base: Box::new(base), - dot_token: Token![.](dot_token.span), - member: Member::Unnamed(index), - }); - - let dot_span = float_token - .subspan(part_end..part_end + 1) - .unwrap_or(float_span); - *dot_token = Token![.](dot_span); - offset = part_end + 1; - } - - Ok(!trailing_dot) - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for PointerMutability { - fn parse(input: ParseStream) -> Result<Self> { - let lookahead = input.lookahead1(); - if lookahead.peek(Token![const]) { - Ok(PointerMutability::Const(input.parse()?)) - } else if lookahead.peek(Token![mut]) { - Ok(PointerMutability::Mut(input.parse()?)) - } else { - Err(lookahead.error()) - } - } - } - - fn check_cast(input: ParseStream) -> Result<()> { - let kind = if input.peek(Token![.]) && !input.peek(Token![..]) { - if input.peek2(Token![await]) { - "`.await`" - } else if input.peek2(Ident) && (input.peek3(token::Paren) || input.peek3(Token![::])) { - "a method call" - } else { - "a field access" - } - } else if input.peek(Token![?]) { - "`?`" - } else if input.peek(token::Bracket) { - "indexing" - } else if input.peek(token::Paren) { - "a function call" - } else { - return Ok(()); - }; - let msg = format!("casts cannot be followed by {}", kind); - Err(input.error(msg)) - } -} - -#[cfg(feature = "printing")] -pub(crate) mod printing { - use crate::attr::Attribute; - #[cfg(feature = "full")] - use crate::attr::FilterAttrs; - #[cfg(feature = "full")] - use crate::classify; - #[cfg(feature = "full")] - use crate::expr::{ - Arm, ExprArray, ExprAssign, ExprAsync, ExprAwait, ExprBlock, ExprBreak, ExprClosure, - ExprConst, ExprContinue, ExprForLoop, ExprIf, ExprInfer, ExprLet, ExprLoop, ExprMatch, - ExprRange, ExprRawAddr, ExprRepeat, ExprReturn, ExprTry, ExprTryBlock, ExprUnsafe, - ExprWhile, ExprYield, Label, PointerMutability, RangeLimits, - }; - use crate::expr::{ - Expr, ExprBinary, ExprCall, ExprCast, ExprField, ExprGroup, ExprIndex, ExprLit, ExprMacro, - ExprMethodCall, ExprParen, ExprPath, ExprReference, ExprStruct, ExprTuple, ExprUnary, - FieldValue, Index, Member, - }; - use crate::fixup::FixupContext; - use crate::op::BinOp; - use crate::path; - use crate::path::printing::PathStyle; - use crate::precedence::Precedence; - use crate::token; - #[cfg(feature = "full")] - use crate::ty::ReturnType; - use proc_macro2::{Literal, Span, TokenStream}; - use quote::{ToTokens, TokenStreamExt as _}; - - #[cfg(feature = "full")] - pub(crate) fn outer_attrs_to_tokens(attrs: &[Attribute], tokens: &mut TokenStream) { - tokens.append_all(attrs.outer()); - } - - #[cfg(feature = "full")] - fn inner_attrs_to_tokens(attrs: &[Attribute], tokens: &mut TokenStream) { - tokens.append_all(attrs.inner()); - } - - #[cfg(not(feature = "full"))] - pub(crate) fn outer_attrs_to_tokens(_attrs: &[Attribute], _tokens: &mut TokenStream) {} - - pub(crate) fn print_subexpression( - expr: &Expr, - needs_group: bool, - tokens: &mut TokenStream, - mut fixup: FixupContext, - ) { - if needs_group { - // If we are surrounding the whole cond in parentheses, such as: - // - // if (return Struct {}) {} - // - // then there is no need for parenthesizing the individual struct - // expressions within. On the other hand if the whole cond is not - // parenthesized, then print_expr must parenthesize exterior struct - // literals. - // - // if x == (Struct {}) {} - // - fixup = FixupContext::NONE; - } - - let do_print_expr = |tokens: &mut TokenStream| print_expr(expr, tokens, fixup); - - if needs_group { - token::Paren::default().surround(tokens, do_print_expr); - } else { - do_print_expr(tokens); - } - } - - pub(crate) fn print_expr(expr: &Expr, tokens: &mut TokenStream, mut fixup: FixupContext) { - #[cfg(feature = "full")] - let needs_group = fixup.parenthesize(expr); - #[cfg(not(feature = "full"))] - let needs_group = false; - - if needs_group { - fixup = FixupContext::NONE; - } - - let do_print_expr = |tokens: &mut TokenStream| match expr { - #[cfg(feature = "full")] - Expr::Array(e) => e.to_tokens(tokens), - #[cfg(feature = "full")] - Expr::Assign(e) => print_expr_assign(e, tokens, fixup), - #[cfg(feature = "full")] - Expr::Async(e) => e.to_tokens(tokens), - #[cfg(feature = "full")] - Expr::Await(e) => print_expr_await(e, tokens, fixup), - Expr::Binary(e) => print_expr_binary(e, tokens, fixup), - #[cfg(feature = "full")] - Expr::Block(e) => e.to_tokens(tokens), - #[cfg(feature = "full")] - Expr::Break(e) => print_expr_break(e, tokens, fixup), - Expr::Call(e) => print_expr_call(e, tokens, fixup), - Expr::Cast(e) => print_expr_cast(e, tokens, fixup), - #[cfg(feature = "full")] - Expr::Closure(e) => print_expr_closure(e, tokens, fixup), - #[cfg(feature = "full")] - Expr::Const(e) => e.to_tokens(tokens), - #[cfg(feature = "full")] - Expr::Continue(e) => e.to_tokens(tokens), - Expr::Field(e) => print_expr_field(e, tokens, fixup), - #[cfg(feature = "full")] - Expr::ForLoop(e) => e.to_tokens(tokens), - Expr::Group(e) => e.to_tokens(tokens), - #[cfg(feature = "full")] - Expr::If(e) => e.to_tokens(tokens), - Expr::Index(e) => print_expr_index(e, tokens, fixup), - #[cfg(feature = "full")] - Expr::Infer(e) => e.to_tokens(tokens), - #[cfg(feature = "full")] - Expr::Let(e) => print_expr_let(e, tokens, fixup), - Expr::Lit(e) => e.to_tokens(tokens), - #[cfg(feature = "full")] - Expr::Loop(e) => e.to_tokens(tokens), - Expr::Macro(e) => e.to_tokens(tokens), - #[cfg(feature = "full")] - Expr::Match(e) => e.to_tokens(tokens), - Expr::MethodCall(e) => print_expr_method_call(e, tokens, fixup), - Expr::Paren(e) => e.to_tokens(tokens), - Expr::Path(e) => e.to_tokens(tokens), - #[cfg(feature = "full")] - Expr::Range(e) => print_expr_range(e, tokens, fixup), - #[cfg(feature = "full")] - Expr::RawAddr(e) => print_expr_raw_addr(e, tokens, fixup), - Expr::Reference(e) => print_expr_reference(e, tokens, fixup), - #[cfg(feature = "full")] - Expr::Repeat(e) => e.to_tokens(tokens), - #[cfg(feature = "full")] - Expr::Return(e) => print_expr_return(e, tokens, fixup), - Expr::Struct(e) => e.to_tokens(tokens), - #[cfg(feature = "full")] - Expr::Try(e) => print_expr_try(e, tokens, fixup), - #[cfg(feature = "full")] - Expr::TryBlock(e) => e.to_tokens(tokens), - Expr::Tuple(e) => e.to_tokens(tokens), - Expr::Unary(e) => print_expr_unary(e, tokens, fixup), - #[cfg(feature = "full")] - Expr::Unsafe(e) => e.to_tokens(tokens), - Expr::Verbatim(e) => e.to_tokens(tokens), - #[cfg(feature = "full")] - Expr::While(e) => e.to_tokens(tokens), - #[cfg(feature = "full")] - Expr::Yield(e) => print_expr_yield(e, tokens, fixup), - - #[cfg(not(feature = "full"))] - _ => unreachable!(), - }; - - if needs_group { - token::Paren::default().surround(tokens, do_print_expr); - } else { - do_print_expr(tokens); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprArray { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.bracket_token.surround(tokens, |tokens| { - self.elems.to_tokens(tokens); - }); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprAssign { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_assign(self, tokens, FixupContext::NONE); - } - } - - #[cfg(feature = "full")] - fn print_expr_assign(e: &ExprAssign, tokens: &mut TokenStream, mut fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - - let needs_group = !e.attrs.is_empty(); - if needs_group { - fixup = FixupContext::NONE; - } - - let do_print_expr = |tokens: &mut TokenStream| { - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( - &e.left, - false, - false, - Precedence::Assign, - ); - print_subexpression(&e.left, left_prec <= Precedence::Range, tokens, left_fixup); - e.eq_token.to_tokens(tokens); - print_expr( - &e.right, - tokens, - fixup.rightmost_subexpression_fixup(false, false, Precedence::Assign), - ); - }; - - if needs_group { - token::Paren::default().surround(tokens, do_print_expr); - } else { - do_print_expr(tokens); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprAsync { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.async_token.to_tokens(tokens); - self.capture.to_tokens(tokens); - self.block.to_tokens(tokens); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprAwait { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_await(self, tokens, FixupContext::NONE); - } - } - - #[cfg(feature = "full")] - fn print_expr_await(e: &ExprAwait, tokens: &mut TokenStream, fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&e.base); - print_subexpression( - &e.base, - left_prec < Precedence::Unambiguous, - tokens, - left_fixup, - ); - e.dot_token.to_tokens(tokens); - e.await_token.to_tokens(tokens); - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprBinary { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_binary(self, tokens, FixupContext::NONE); - } - } - - fn print_expr_binary(e: &ExprBinary, tokens: &mut TokenStream, mut fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - - let needs_group = !e.attrs.is_empty(); - if needs_group { - fixup = FixupContext::NONE; - } - - let do_print_expr = |tokens: &mut TokenStream| { - let binop_prec = Precedence::of_binop(&e.op); - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( - &e.left, - #[cfg(feature = "full")] - match &e.op { - BinOp::Sub(_) - | BinOp::Mul(_) - | BinOp::And(_) - | BinOp::Or(_) - | BinOp::BitAnd(_) - | BinOp::BitOr(_) - | BinOp::Shl(_) - | BinOp::Lt(_) => true, - _ => false, - }, - match &e.op { - BinOp::Shl(_) | BinOp::Lt(_) => true, - _ => false, - }, - #[cfg(feature = "full")] - binop_prec, - ); - let left_needs_group = match binop_prec { - Precedence::Assign => left_prec <= Precedence::Range, - Precedence::Compare => left_prec <= binop_prec, - _ => left_prec < binop_prec, - }; - - let right_fixup = fixup.rightmost_subexpression_fixup( - #[cfg(feature = "full")] - false, - #[cfg(feature = "full")] - false, - #[cfg(feature = "full")] - binop_prec, - ); - let right_needs_group = binop_prec != Precedence::Assign - && right_fixup.rightmost_subexpression_precedence(&e.right) <= binop_prec; - - print_subexpression(&e.left, left_needs_group, tokens, left_fixup); - e.op.to_tokens(tokens); - print_subexpression(&e.right, right_needs_group, tokens, right_fixup); - }; - - if needs_group { - token::Paren::default().surround(tokens, do_print_expr); - } else { - do_print_expr(tokens); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprBlock { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.label.to_tokens(tokens); - self.block.brace_token.surround(tokens, |tokens| { - inner_attrs_to_tokens(&self.attrs, tokens); - tokens.append_all(&self.block.stmts); - }); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprBreak { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_break(self, tokens, FixupContext::NONE); - } - } - - #[cfg(feature = "full")] - fn print_expr_break(e: &ExprBreak, tokens: &mut TokenStream, fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - e.break_token.to_tokens(tokens); - e.label.to_tokens(tokens); - if let Some(value) = &e.expr { - print_subexpression( - value, - // Parenthesize `break 'inner: loop { break 'inner 1 } + 1` - // ^---------------------------------^ - e.label.is_none() && classify::expr_leading_label(value), - tokens, - fixup.rightmost_subexpression_fixup(true, true, Precedence::Jump), - ); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprCall { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_call(self, tokens, FixupContext::NONE); - } - } - - fn print_expr_call(e: &ExprCall, tokens: &mut TokenStream, fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( - &e.func, - #[cfg(feature = "full")] - true, - false, - #[cfg(feature = "full")] - Precedence::Unambiguous, - ); - let needs_group = if let Expr::Field(func) = &*e.func { - func.member.is_named() - } else { - left_prec < Precedence::Unambiguous - }; - print_subexpression(&e.func, needs_group, tokens, left_fixup); - - e.paren_token.surround(tokens, |tokens| { - e.args.to_tokens(tokens); - }); - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprCast { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_cast(self, tokens, FixupContext::NONE); - } - } - - fn print_expr_cast(e: &ExprCast, tokens: &mut TokenStream, mut fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - - let needs_group = !e.attrs.is_empty(); - if needs_group { - fixup = FixupContext::NONE; - } - - let do_print_expr = |tokens: &mut TokenStream| { - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( - &e.expr, - #[cfg(feature = "full")] - false, - false, - #[cfg(feature = "full")] - Precedence::Cast, - ); - print_subexpression(&e.expr, left_prec < Precedence::Cast, tokens, left_fixup); - e.as_token.to_tokens(tokens); - e.ty.to_tokens(tokens); - }; - - if needs_group { - token::Paren::default().surround(tokens, do_print_expr); - } else { - do_print_expr(tokens); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprClosure { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_closure(self, tokens, FixupContext::NONE); - } - } - - #[cfg(feature = "full")] - fn print_expr_closure(e: &ExprClosure, tokens: &mut TokenStream, fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - e.lifetimes.to_tokens(tokens); - e.constness.to_tokens(tokens); - e.movability.to_tokens(tokens); - e.asyncness.to_tokens(tokens); - e.capture.to_tokens(tokens); - e.or1_token.to_tokens(tokens); - e.inputs.to_tokens(tokens); - e.or2_token.to_tokens(tokens); - e.output.to_tokens(tokens); - if matches!(e.output, ReturnType::Default) - || matches!(&*e.body, Expr::Block(body) if body.attrs.is_empty() && body.label.is_none()) - { - print_expr( - &e.body, - tokens, - fixup.rightmost_subexpression_fixup(false, false, Precedence::Jump), - ); - } else { - token::Brace::default().surround(tokens, |tokens| { - print_expr(&e.body, tokens, FixupContext::new_stmt()); - }); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprConst { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.const_token.to_tokens(tokens); - self.block.brace_token.surround(tokens, |tokens| { - inner_attrs_to_tokens(&self.attrs, tokens); - tokens.append_all(&self.block.stmts); - }); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprContinue { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.continue_token.to_tokens(tokens); - self.label.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprField { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_field(self, tokens, FixupContext::NONE); - } - } - - fn print_expr_field(e: &ExprField, tokens: &mut TokenStream, fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&e.base); - print_subexpression( - &e.base, - left_prec < Precedence::Unambiguous, - tokens, - left_fixup, - ); - e.dot_token.to_tokens(tokens); - e.member.to_tokens(tokens); - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprForLoop { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.label.to_tokens(tokens); - self.for_token.to_tokens(tokens); - self.pat.to_tokens(tokens); - self.in_token.to_tokens(tokens); - print_expr(&self.expr, tokens, FixupContext::new_condition()); - self.body.brace_token.surround(tokens, |tokens| { - inner_attrs_to_tokens(&self.attrs, tokens); - tokens.append_all(&self.body.stmts); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprGroup { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.group_token.surround(tokens, |tokens| { - self.expr.to_tokens(tokens); - }); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprIf { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - - let mut expr = self; - loop { - expr.if_token.to_tokens(tokens); - print_expr(&expr.cond, tokens, FixupContext::new_condition()); - expr.then_branch.to_tokens(tokens); - - let (else_token, else_) = match &expr.else_branch { - Some(else_branch) => else_branch, - None => break, - }; - - else_token.to_tokens(tokens); - match &**else_ { - Expr::If(next) => { - expr = next; - } - Expr::Block(last) => { - last.to_tokens(tokens); - break; - } - // If this is not one of the valid expressions to exist in - // an else clause, wrap it in a block. - other => { - token::Brace::default().surround(tokens, |tokens| { - print_expr(other, tokens, FixupContext::new_stmt()); - }); - break; - } - } - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprIndex { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_index(self, tokens, FixupContext::NONE); - } - } - - fn print_expr_index(e: &ExprIndex, tokens: &mut TokenStream, fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( - &e.expr, - #[cfg(feature = "full")] - true, - false, - #[cfg(feature = "full")] - Precedence::Unambiguous, - ); - print_subexpression( - &e.expr, - left_prec < Precedence::Unambiguous, - tokens, - left_fixup, - ); - e.bracket_token.surround(tokens, |tokens| { - e.index.to_tokens(tokens); - }); - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprInfer { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.underscore_token.to_tokens(tokens); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprLet { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_let(self, tokens, FixupContext::NONE); - } - } - - #[cfg(feature = "full")] - fn print_expr_let(e: &ExprLet, tokens: &mut TokenStream, fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - e.let_token.to_tokens(tokens); - e.pat.to_tokens(tokens); - e.eq_token.to_tokens(tokens); - let (right_prec, right_fixup) = fixup.rightmost_subexpression(&e.expr, Precedence::Let); - print_subexpression(&e.expr, right_prec < Precedence::Let, tokens, right_fixup); - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprLit { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.lit.to_tokens(tokens); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprLoop { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.label.to_tokens(tokens); - self.loop_token.to_tokens(tokens); - self.body.brace_token.surround(tokens, |tokens| { - inner_attrs_to_tokens(&self.attrs, tokens); - tokens.append_all(&self.body.stmts); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprMacro { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.mac.to_tokens(tokens); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprMatch { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.match_token.to_tokens(tokens); - print_expr(&self.expr, tokens, FixupContext::new_condition()); - self.brace_token.surround(tokens, |tokens| { - inner_attrs_to_tokens(&self.attrs, tokens); - for (i, arm) in self.arms.iter().enumerate() { - arm.to_tokens(tokens); - // Ensure that we have a comma after a non-block arm, except - // for the last one. - let is_last = i == self.arms.len() - 1; - if !is_last - && classify::requires_comma_to_be_match_arm(&arm.body) - && arm.comma.is_none() - { - <Token![,]>::default().to_tokens(tokens); - } - } - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprMethodCall { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_method_call(self, tokens, FixupContext::NONE); - } - } - - fn print_expr_method_call(e: &ExprMethodCall, tokens: &mut TokenStream, fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&e.receiver); - print_subexpression( - &e.receiver, - left_prec < Precedence::Unambiguous, - tokens, - left_fixup, - ); - e.dot_token.to_tokens(tokens); - e.method.to_tokens(tokens); - if let Some(turbofish) = &e.turbofish { - path::printing::print_angle_bracketed_generic_arguments( - tokens, - turbofish, - PathStyle::Expr, - ); - } - e.paren_token.surround(tokens, |tokens| { - e.args.to_tokens(tokens); - }); - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprParen { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.paren_token.surround(tokens, |tokens| { - self.expr.to_tokens(tokens); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprPath { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - path::printing::print_qpath(tokens, &self.qself, &self.path, PathStyle::Expr); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprRange { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_range(self, tokens, FixupContext::NONE); - } - } - - #[cfg(feature = "full")] - fn print_expr_range(e: &ExprRange, tokens: &mut TokenStream, mut fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - - let needs_group = !e.attrs.is_empty(); - if needs_group { - fixup = FixupContext::NONE; - } - - let do_print_expr = |tokens: &mut TokenStream| { - if let Some(start) = &e.start { - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_operator( - start, - true, - false, - Precedence::Range, - ); - print_subexpression(start, left_prec <= Precedence::Range, tokens, left_fixup); - } - e.limits.to_tokens(tokens); - if let Some(end) = &e.end { - let right_fixup = - fixup.rightmost_subexpression_fixup(false, true, Precedence::Range); - let right_prec = right_fixup.rightmost_subexpression_precedence(end); - print_subexpression(end, right_prec <= Precedence::Range, tokens, right_fixup); - } - }; - - if needs_group { - token::Paren::default().surround(tokens, do_print_expr); - } else { - do_print_expr(tokens); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprRawAddr { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_raw_addr(self, tokens, FixupContext::NONE); - } - } - - #[cfg(feature = "full")] - fn print_expr_raw_addr(e: &ExprRawAddr, tokens: &mut TokenStream, fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - e.and_token.to_tokens(tokens); - e.raw.to_tokens(tokens); - e.mutability.to_tokens(tokens); - let (right_prec, right_fixup) = fixup.rightmost_subexpression(&e.expr, Precedence::Prefix); - print_subexpression( - &e.expr, - right_prec < Precedence::Prefix, - tokens, - right_fixup, - ); - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprReference { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_reference(self, tokens, FixupContext::NONE); - } - } - - fn print_expr_reference(e: &ExprReference, tokens: &mut TokenStream, fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - e.and_token.to_tokens(tokens); - e.mutability.to_tokens(tokens); - let (right_prec, right_fixup) = fixup.rightmost_subexpression( - &e.expr, - #[cfg(feature = "full")] - Precedence::Prefix, - ); - print_subexpression( - &e.expr, - right_prec < Precedence::Prefix, - tokens, - right_fixup, - ); - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprRepeat { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.bracket_token.surround(tokens, |tokens| { - self.expr.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - self.len.to_tokens(tokens); - }); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprReturn { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_return(self, tokens, FixupContext::NONE); - } - } - - #[cfg(feature = "full")] - fn print_expr_return(e: &ExprReturn, tokens: &mut TokenStream, fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - e.return_token.to_tokens(tokens); - if let Some(expr) = &e.expr { - print_expr( - expr, - tokens, - fixup.rightmost_subexpression_fixup(true, false, Precedence::Jump), - ); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprStruct { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - path::printing::print_qpath(tokens, &self.qself, &self.path, PathStyle::Expr); - self.brace_token.surround(tokens, |tokens| { - self.fields.to_tokens(tokens); - if let Some(dot2_token) = &self.dot2_token { - dot2_token.to_tokens(tokens); - } else if self.rest.is_some() { - Token![..](Span::call_site()).to_tokens(tokens); - } - self.rest.to_tokens(tokens); - }); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprTry { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_try(self, tokens, FixupContext::NONE); - } - } - - #[cfg(feature = "full")] - fn print_expr_try(e: &ExprTry, tokens: &mut TokenStream, fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - let (left_prec, left_fixup) = fixup.leftmost_subexpression_with_dot(&e.expr); - print_subexpression( - &e.expr, - left_prec < Precedence::Unambiguous, - tokens, - left_fixup, - ); - e.question_token.to_tokens(tokens); - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprTryBlock { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.try_token.to_tokens(tokens); - self.block.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprTuple { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.paren_token.surround(tokens, |tokens| { - self.elems.to_tokens(tokens); - // If we only have one argument, we need a trailing comma to - // distinguish ExprTuple from ExprParen. - if self.elems.len() == 1 && !self.elems.trailing_punct() { - <Token![,]>::default().to_tokens(tokens); - } - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprUnary { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_unary(self, tokens, FixupContext::NONE); - } - } - - fn print_expr_unary(e: &ExprUnary, tokens: &mut TokenStream, fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - e.op.to_tokens(tokens); - let (right_prec, right_fixup) = fixup.rightmost_subexpression( - &e.expr, - #[cfg(feature = "full")] - Precedence::Prefix, - ); - print_subexpression( - &e.expr, - right_prec < Precedence::Prefix, - tokens, - right_fixup, - ); - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprUnsafe { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.unsafe_token.to_tokens(tokens); - self.block.brace_token.surround(tokens, |tokens| { - inner_attrs_to_tokens(&self.attrs, tokens); - tokens.append_all(&self.block.stmts); - }); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprWhile { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.label.to_tokens(tokens); - self.while_token.to_tokens(tokens); - print_expr(&self.cond, tokens, FixupContext::new_condition()); - self.body.brace_token.surround(tokens, |tokens| { - inner_attrs_to_tokens(&self.attrs, tokens); - tokens.append_all(&self.body.stmts); - }); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ExprYield { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_expr_yield(self, tokens, FixupContext::NONE); - } - } - - #[cfg(feature = "full")] - fn print_expr_yield(e: &ExprYield, tokens: &mut TokenStream, fixup: FixupContext) { - outer_attrs_to_tokens(&e.attrs, tokens); - e.yield_token.to_tokens(tokens); - if let Some(expr) = &e.expr { - print_expr( - expr, - tokens, - fixup.rightmost_subexpression_fixup(true, false, Precedence::Jump), - ); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Arm { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(&self.attrs); - self.pat.to_tokens(tokens); - if let Some((if_token, guard)) = &self.guard { - if_token.to_tokens(tokens); - guard.to_tokens(tokens); - } - self.fat_arrow_token.to_tokens(tokens); - print_expr(&self.body, tokens, FixupContext::new_match_arm()); - self.comma.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for FieldValue { - fn to_tokens(&self, tokens: &mut TokenStream) { - outer_attrs_to_tokens(&self.attrs, tokens); - self.member.to_tokens(tokens); - if let Some(colon_token) = &self.colon_token { - colon_token.to_tokens(tokens); - self.expr.to_tokens(tokens); - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Index { - fn to_tokens(&self, tokens: &mut TokenStream) { - let mut lit = Literal::i64_unsuffixed(i64::from(self.index)); - lit.set_span(self.span); - tokens.append(lit); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Label { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.name.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Member { - fn to_tokens(&self, tokens: &mut TokenStream) { - match self { - Member::Named(ident) => ident.to_tokens(tokens), - Member::Unnamed(index) => index.to_tokens(tokens), - } - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for RangeLimits { - fn to_tokens(&self, tokens: &mut TokenStream) { - match self { - RangeLimits::HalfOpen(t) => t.to_tokens(tokens), - RangeLimits::Closed(t) => t.to_tokens(tokens), - } - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PointerMutability { - fn to_tokens(&self, tokens: &mut TokenStream) { - match self { - PointerMutability::Const(const_token) => const_token.to_tokens(tokens), - PointerMutability::Mut(mut_token) => mut_token.to_tokens(tokens), - } - } - } -} diff --git a/vendor/syn/src/ext.rs b/vendor/syn/src/ext.rs deleted file mode 100644 index 7cf62bd45ef560..00000000000000 --- a/vendor/syn/src/ext.rs +++ /dev/null @@ -1,179 +0,0 @@ -//! Extension traits to provide parsing methods on foreign types. - -#[cfg(feature = "parsing")] -use crate::buffer::Cursor; -#[cfg(feature = "parsing")] -use crate::error::Result; -#[cfg(feature = "parsing")] -use crate::parse::ParseStream; -#[cfg(feature = "parsing")] -use crate::parse::Peek; -#[cfg(feature = "parsing")] -use crate::sealed::lookahead; -#[cfg(feature = "parsing")] -use crate::token::CustomToken; -use proc_macro2::{Ident, Punct, Spacing, Span, TokenStream, TokenTree}; -use std::iter; - -/// Additional methods for `Ident` not provided by proc-macro2 or libproc_macro. -/// -/// This trait is sealed and cannot be implemented for types outside of Syn. It -/// is implemented only for `proc_macro2::Ident`. -pub trait IdentExt: Sized + private::Sealed { - /// Parses any identifier including keywords. - /// - /// This is useful when parsing macro input which allows Rust keywords as - /// identifiers. - /// - /// # Example - /// - /// ``` - /// use syn::{Error, Ident, Result, Token}; - /// use syn::ext::IdentExt; - /// use syn::parse::ParseStream; - /// - /// mod kw { - /// syn::custom_keyword!(name); - /// } - /// - /// // Parses input that looks like `name = NAME` where `NAME` can be - /// // any identifier. - /// // - /// // Examples: - /// // - /// // name = anything - /// // name = impl - /// fn parse_dsl(input: ParseStream) -> Result<Ident> { - /// input.parse::<kw::name>()?; - /// input.parse::<Token![=]>()?; - /// let name = input.call(Ident::parse_any)?; - /// Ok(name) - /// } - /// ``` - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - fn parse_any(input: ParseStream) -> Result<Self>; - - /// Peeks any identifier including keywords. Usage: - /// `input.peek(Ident::peek_any)` - /// - /// This is different from `input.peek(Ident)` which only returns true in - /// the case of an ident which is not a Rust keyword. - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - #[allow(non_upper_case_globals)] - const peek_any: private::PeekFn = private::PeekFn; - - /// Strips the raw marker `r#`, if any, from the beginning of an ident. - /// - /// - unraw(`x`) = `x` - /// - unraw(`move`) = `move` - /// - unraw(`r#move`) = `move` - /// - /// # Example - /// - /// In the case of interop with other languages like Python that have a - /// different set of keywords than Rust, we might come across macro input - /// that involves raw identifiers to refer to ordinary variables in the - /// other language with a name that happens to be a Rust keyword. - /// - /// The function below appends an identifier from the caller's input onto a - /// fixed prefix. Without using `unraw()`, this would tend to produce - /// invalid identifiers like `__pyo3_get_r#move`. - /// - /// ``` - /// use proc_macro2::Span; - /// use syn::Ident; - /// use syn::ext::IdentExt; - /// - /// fn ident_for_getter(variable: &Ident) -> Ident { - /// let getter = format!("__pyo3_get_{}", variable.unraw()); - /// Ident::new(&getter, Span::call_site()) - /// } - /// ``` - fn unraw(&self) -> Ident; -} - -impl IdentExt for Ident { - #[cfg(feature = "parsing")] - fn parse_any(input: ParseStream) -> Result<Self> { - input.step(|cursor| match cursor.ident() { - Some((ident, rest)) => Ok((ident, rest)), - None => Err(cursor.error("expected ident")), - }) - } - - fn unraw(&self) -> Ident { - let string = self.to_string(); - if let Some(string) = string.strip_prefix("r#") { - Ident::new(string, self.span()) - } else { - self.clone() - } - } -} - -#[cfg(feature = "parsing")] -impl Peek for private::PeekFn { - type Token = private::IdentAny; -} - -#[cfg(feature = "parsing")] -impl CustomToken for private::IdentAny { - fn peek(cursor: Cursor) -> bool { - cursor.ident().is_some() - } - - fn display() -> &'static str { - "identifier" - } -} - -#[cfg(feature = "parsing")] -impl lookahead::Sealed for private::PeekFn {} - -pub(crate) trait TokenStreamExt { - fn append(&mut self, token: TokenTree); -} - -impl TokenStreamExt for TokenStream { - fn append(&mut self, token: TokenTree) { - self.extend(iter::once(token)); - } -} - -pub(crate) trait PunctExt { - fn new_spanned(ch: char, spacing: Spacing, span: Span) -> Self; -} - -impl PunctExt for Punct { - fn new_spanned(ch: char, spacing: Spacing, span: Span) -> Self { - let mut punct = Punct::new(ch, spacing); - punct.set_span(span); - punct - } -} - -mod private { - use proc_macro2::Ident; - - pub trait Sealed {} - - impl Sealed for Ident {} - - #[cfg(feature = "parsing")] - pub struct PeekFn; - - #[cfg(feature = "parsing")] - pub struct IdentAny; - - #[cfg(feature = "parsing")] - impl Copy for PeekFn {} - - #[cfg(feature = "parsing")] - impl Clone for PeekFn { - fn clone(&self) -> Self { - *self - } - } -} diff --git a/vendor/syn/src/file.rs b/vendor/syn/src/file.rs deleted file mode 100644 index 066f97b1a2bfb9..00000000000000 --- a/vendor/syn/src/file.rs +++ /dev/null @@ -1,125 +0,0 @@ -use crate::attr::Attribute; -use crate::item::Item; - -ast_struct! { - /// A complete file of Rust source code. - /// - /// Typically `File` objects are created with [`parse_file`]. - /// - /// [`parse_file`]: crate::parse_file - /// - /// # Example - /// - /// Parse a Rust source file into a `syn::File` and print out a debug - /// representation of the syntax tree. - /// - /// ``` - /// use std::env; - /// use std::fs; - /// use std::process; - /// - /// fn main() { - /// # } - /// # - /// # fn fake_main() { - /// let mut args = env::args(); - /// let _ = args.next(); // executable name - /// - /// let filename = match (args.next(), args.next()) { - /// (Some(filename), None) => filename, - /// _ => { - /// eprintln!("Usage: dump-syntax path/to/filename.rs"); - /// process::exit(1); - /// } - /// }; - /// - /// let src = fs::read_to_string(&filename).expect("unable to read file"); - /// let syntax = syn::parse_file(&src).expect("unable to parse file"); - /// - /// // Debug impl is available if Syn is built with "extra-traits" feature. - /// println!("{:#?}", syntax); - /// } - /// ``` - /// - /// Running with its own source code as input, this program prints output - /// that begins with: - /// - /// ```text - /// File { - /// shebang: None, - /// attrs: [], - /// items: [ - /// Use( - /// ItemUse { - /// attrs: [], - /// vis: Inherited, - /// use_token: Use, - /// leading_colon: None, - /// tree: Path( - /// UsePath { - /// ident: Ident( - /// std, - /// ), - /// colon2_token: Colon2, - /// tree: Name( - /// UseName { - /// ident: Ident( - /// env, - /// ), - /// }, - /// ), - /// }, - /// ), - /// semi_token: Semi, - /// }, - /// ), - /// ... - /// ``` - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct File { - pub shebang: Option<String>, - pub attrs: Vec<Attribute>, - pub items: Vec<Item>, - } -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::attr::Attribute; - use crate::error::Result; - use crate::file::File; - use crate::parse::{Parse, ParseStream}; - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for File { - fn parse(input: ParseStream) -> Result<Self> { - Ok(File { - shebang: None, - attrs: input.call(Attribute::parse_inner)?, - items: { - let mut items = Vec::new(); - while !input.is_empty() { - items.push(input.parse()?); - } - items - }, - }) - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use crate::attr::FilterAttrs; - use crate::file::File; - use proc_macro2::TokenStream; - use quote::{ToTokens, TokenStreamExt as _}; - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for File { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.inner()); - tokens.append_all(&self.items); - } - } -} diff --git a/vendor/syn/src/fixup.rs b/vendor/syn/src/fixup.rs deleted file mode 100644 index 6d2c3092d54838..00000000000000 --- a/vendor/syn/src/fixup.rs +++ /dev/null @@ -1,773 +0,0 @@ -use crate::classify; -use crate::expr::Expr; -#[cfg(feature = "full")] -use crate::expr::{ - ExprBreak, ExprRange, ExprRawAddr, ExprReference, ExprReturn, ExprUnary, ExprYield, -}; -use crate::precedence::Precedence; -#[cfg(feature = "full")] -use crate::ty::ReturnType; - -pub(crate) struct FixupContext { - #[cfg(feature = "full")] - previous_operator: Precedence, - #[cfg(feature = "full")] - next_operator: Precedence, - - // Print expression such that it can be parsed back as a statement - // consisting of the original expression. - // - // The effect of this is for binary operators in statement position to set - // `leftmost_subexpression_in_stmt` when printing their left-hand operand. - // - // (match x {}) - 1; // match needs parens when LHS of binary operator - // - // match x {}; // not when its own statement - // - #[cfg(feature = "full")] - stmt: bool, - - // This is the difference between: - // - // (match x {}) - 1; // subexpression needs parens - // - // let _ = match x {} - 1; // no parens - // - // There are 3 distinguishable contexts in which `print_expr` might be - // called with the expression `$match` as its argument, where `$match` - // represents an expression of kind `ExprKind::Match`: - // - // - stmt=false leftmost_subexpression_in_stmt=false - // - // Example: `let _ = $match - 1;` - // - // No parentheses required. - // - // - stmt=false leftmost_subexpression_in_stmt=true - // - // Example: `$match - 1;` - // - // Must parenthesize `($match)`, otherwise parsing back the output as a - // statement would terminate the statement after the closing brace of - // the match, parsing `-1;` as a separate statement. - // - // - stmt=true leftmost_subexpression_in_stmt=false - // - // Example: `$match;` - // - // No parentheses required. - #[cfg(feature = "full")] - leftmost_subexpression_in_stmt: bool, - - // Print expression such that it can be parsed as a match arm. - // - // This is almost equivalent to `stmt`, but the grammar diverges a tiny bit - // between statements and match arms when it comes to braced macro calls. - // Macro calls with brace delimiter terminate a statement without a - // semicolon, but do not terminate a match-arm without comma. - // - // m! {} - 1; // two statements: a macro call followed by -1 literal - // - // match () { - // _ => m! {} - 1, // binary subtraction operator - // } - // - #[cfg(feature = "full")] - match_arm: bool, - - // This is almost equivalent to `leftmost_subexpression_in_stmt`, other than - // for braced macro calls. - // - // If we have `m! {} - 1` as an expression, the leftmost subexpression - // `m! {}` will need to be parenthesized in the statement case but not the - // match-arm case. - // - // (m! {}) - 1; // subexpression needs parens - // - // match () { - // _ => m! {} - 1, // no parens - // } - // - #[cfg(feature = "full")] - leftmost_subexpression_in_match_arm: bool, - - // This is the difference between: - // - // if let _ = (Struct {}) {} // needs parens - // - // match () { - // () if let _ = Struct {} => {} // no parens - // } - // - #[cfg(feature = "full")] - condition: bool, - - // This is the difference between: - // - // if break Struct {} == (break) {} // needs parens - // - // if break break == Struct {} {} // no parens - // - #[cfg(feature = "full")] - rightmost_subexpression_in_condition: bool, - - // This is the difference between: - // - // if break ({ x }).field + 1 {} needs parens - // - // if break 1 + { x }.field {} // no parens - // - #[cfg(feature = "full")] - leftmost_subexpression_in_optional_operand: bool, - - // This is the difference between: - // - // let _ = (return) - 1; // without paren, this would return -1 - // - // let _ = return + 1; // no paren because '+' cannot begin expr - // - #[cfg(feature = "full")] - next_operator_can_begin_expr: bool, - - // This is the difference between: - // - // let _ = 1 + return 1; // no parens if rightmost subexpression - // - // let _ = 1 + (return 1) + 1; // needs parens - // - #[cfg(feature = "full")] - next_operator_can_continue_expr: bool, - - // This is the difference between: - // - // let _ = x as u8 + T; - // - // let _ = (x as u8) < T; - // - // Without parens, the latter would want to parse `u8<T...` as a type. - next_operator_can_begin_generics: bool, -} - -impl FixupContext { - /// The default amount of fixing is minimal fixing. Fixups should be turned - /// on in a targeted fashion where needed. - pub const NONE: Self = FixupContext { - #[cfg(feature = "full")] - previous_operator: Precedence::MIN, - #[cfg(feature = "full")] - next_operator: Precedence::MIN, - #[cfg(feature = "full")] - stmt: false, - #[cfg(feature = "full")] - leftmost_subexpression_in_stmt: false, - #[cfg(feature = "full")] - match_arm: false, - #[cfg(feature = "full")] - leftmost_subexpression_in_match_arm: false, - #[cfg(feature = "full")] - condition: false, - #[cfg(feature = "full")] - rightmost_subexpression_in_condition: false, - #[cfg(feature = "full")] - leftmost_subexpression_in_optional_operand: false, - #[cfg(feature = "full")] - next_operator_can_begin_expr: false, - #[cfg(feature = "full")] - next_operator_can_continue_expr: false, - next_operator_can_begin_generics: false, - }; - - /// Create the initial fixup for printing an expression in statement - /// position. - #[cfg(feature = "full")] - pub fn new_stmt() -> Self { - FixupContext { - stmt: true, - ..FixupContext::NONE - } - } - - /// Create the initial fixup for printing an expression as the right-hand - /// side of a match arm. - #[cfg(feature = "full")] - pub fn new_match_arm() -> Self { - FixupContext { - match_arm: true, - ..FixupContext::NONE - } - } - - /// Create the initial fixup for printing an expression as the "condition" - /// of an `if` or `while`. There are a few other positions which are - /// grammatically equivalent and also use this, such as the iterator - /// expression in `for` and the scrutinee in `match`. - #[cfg(feature = "full")] - pub fn new_condition() -> Self { - FixupContext { - condition: true, - rightmost_subexpression_in_condition: true, - ..FixupContext::NONE - } - } - - /// Transform this fixup into the one that should apply when printing the - /// leftmost subexpression of the current expression. - /// - /// The leftmost subexpression is any subexpression that has the same first - /// token as the current expression, but has a different last token. - /// - /// For example in `$a + $b` and `$a.method()`, the subexpression `$a` is a - /// leftmost subexpression. - /// - /// Not every expression has a leftmost subexpression. For example neither - /// `-$a` nor `[$a]` have one. - pub fn leftmost_subexpression_with_operator( - self, - expr: &Expr, - #[cfg(feature = "full")] next_operator_can_begin_expr: bool, - next_operator_can_begin_generics: bool, - #[cfg(feature = "full")] precedence: Precedence, - ) -> (Precedence, Self) { - let fixup = FixupContext { - #[cfg(feature = "full")] - next_operator: precedence, - #[cfg(feature = "full")] - stmt: false, - #[cfg(feature = "full")] - leftmost_subexpression_in_stmt: self.stmt || self.leftmost_subexpression_in_stmt, - #[cfg(feature = "full")] - match_arm: false, - #[cfg(feature = "full")] - leftmost_subexpression_in_match_arm: self.match_arm - || self.leftmost_subexpression_in_match_arm, - #[cfg(feature = "full")] - rightmost_subexpression_in_condition: false, - #[cfg(feature = "full")] - next_operator_can_begin_expr, - #[cfg(feature = "full")] - next_operator_can_continue_expr: true, - next_operator_can_begin_generics, - ..self - }; - - (fixup.leftmost_subexpression_precedence(expr), fixup) - } - - /// Transform this fixup into the one that should apply when printing a - /// leftmost subexpression followed by a `.` or `?` token, which confer - /// different statement boundary rules compared to other leftmost - /// subexpressions. - pub fn leftmost_subexpression_with_dot(self, expr: &Expr) -> (Precedence, Self) { - let fixup = FixupContext { - #[cfg(feature = "full")] - next_operator: Precedence::Unambiguous, - #[cfg(feature = "full")] - stmt: self.stmt || self.leftmost_subexpression_in_stmt, - #[cfg(feature = "full")] - leftmost_subexpression_in_stmt: false, - #[cfg(feature = "full")] - match_arm: self.match_arm || self.leftmost_subexpression_in_match_arm, - #[cfg(feature = "full")] - leftmost_subexpression_in_match_arm: false, - #[cfg(feature = "full")] - rightmost_subexpression_in_condition: false, - #[cfg(feature = "full")] - next_operator_can_begin_expr: false, - #[cfg(feature = "full")] - next_operator_can_continue_expr: true, - next_operator_can_begin_generics: false, - ..self - }; - - (fixup.leftmost_subexpression_precedence(expr), fixup) - } - - fn leftmost_subexpression_precedence(self, expr: &Expr) -> Precedence { - #[cfg(feature = "full")] - if !self.next_operator_can_begin_expr || self.next_operator == Precedence::Range { - if let Scan::Bailout = scan_right(expr, self, Precedence::MIN, 0, 0) { - if scan_left(expr, self) { - return Precedence::Unambiguous; - } - } - } - - self.precedence(expr) - } - - /// Transform this fixup into the one that should apply when printing the - /// rightmost subexpression of the current expression. - /// - /// The rightmost subexpression is any subexpression that has a different - /// first token than the current expression, but has the same last token. - /// - /// For example in `$a + $b` and `-$b`, the subexpression `$b` is a - /// rightmost subexpression. - /// - /// Not every expression has a rightmost subexpression. For example neither - /// `[$b]` nor `$a.f($b)` have one. - pub fn rightmost_subexpression( - self, - expr: &Expr, - #[cfg(feature = "full")] precedence: Precedence, - ) -> (Precedence, Self) { - let fixup = self.rightmost_subexpression_fixup( - #[cfg(feature = "full")] - false, - #[cfg(feature = "full")] - false, - #[cfg(feature = "full")] - precedence, - ); - (fixup.rightmost_subexpression_precedence(expr), fixup) - } - - pub fn rightmost_subexpression_fixup( - self, - #[cfg(feature = "full")] reset_allow_struct: bool, - #[cfg(feature = "full")] optional_operand: bool, - #[cfg(feature = "full")] precedence: Precedence, - ) -> Self { - FixupContext { - #[cfg(feature = "full")] - previous_operator: precedence, - #[cfg(feature = "full")] - stmt: false, - #[cfg(feature = "full")] - leftmost_subexpression_in_stmt: false, - #[cfg(feature = "full")] - match_arm: false, - #[cfg(feature = "full")] - leftmost_subexpression_in_match_arm: false, - #[cfg(feature = "full")] - condition: self.condition && !reset_allow_struct, - #[cfg(feature = "full")] - leftmost_subexpression_in_optional_operand: self.condition && optional_operand, - ..self - } - } - - pub fn rightmost_subexpression_precedence(self, expr: &Expr) -> Precedence { - let default_prec = self.precedence(expr); - - #[cfg(feature = "full")] - if match self.previous_operator { - Precedence::Assign | Precedence::Let | Precedence::Prefix => { - default_prec < self.previous_operator - } - _ => default_prec <= self.previous_operator, - } && match self.next_operator { - Precedence::Range | Precedence::Or | Precedence::And => true, - _ => !self.next_operator_can_begin_expr, - } { - if let Scan::Bailout | Scan::Fail = scan_right(expr, self, self.previous_operator, 1, 0) - { - if scan_left(expr, self) { - return Precedence::Prefix; - } - } - } - - default_prec - } - - /// Determine whether parentheses are needed around the given expression to - /// head off the early termination of a statement or condition. - #[cfg(feature = "full")] - pub fn parenthesize(self, expr: &Expr) -> bool { - (self.leftmost_subexpression_in_stmt && !classify::requires_semi_to_be_stmt(expr)) - || ((self.stmt || self.leftmost_subexpression_in_stmt) && matches!(expr, Expr::Let(_))) - || (self.leftmost_subexpression_in_match_arm - && !classify::requires_comma_to_be_match_arm(expr)) - || (self.condition && matches!(expr, Expr::Struct(_))) - || (self.rightmost_subexpression_in_condition - && matches!( - expr, - Expr::Return(ExprReturn { expr: None, .. }) - | Expr::Yield(ExprYield { expr: None, .. }) - )) - || (self.rightmost_subexpression_in_condition - && !self.condition - && matches!( - expr, - Expr::Break(ExprBreak { expr: None, .. }) - | Expr::Path(_) - | Expr::Range(ExprRange { end: None, .. }) - )) - || (self.leftmost_subexpression_in_optional_operand - && matches!(expr, Expr::Block(expr) if expr.attrs.is_empty() && expr.label.is_none())) - } - - /// Determines the effective precedence of a subexpression. Some expressions - /// have higher or lower precedence when adjacent to particular operators. - fn precedence(self, expr: &Expr) -> Precedence { - #[cfg(feature = "full")] - if self.next_operator_can_begin_expr { - // Decrease precedence of value-less jumps when followed by an - // operator that would otherwise get interpreted as beginning a - // value for the jump. - if let Expr::Break(ExprBreak { expr: None, .. }) - | Expr::Return(ExprReturn { expr: None, .. }) - | Expr::Yield(ExprYield { expr: None, .. }) = expr - { - return Precedence::Jump; - } - } - - #[cfg(feature = "full")] - if !self.next_operator_can_continue_expr { - match expr { - // Increase precedence of expressions that extend to the end of - // current statement or group. - Expr::Break(_) - | Expr::Closure(_) - | Expr::Let(_) - | Expr::Return(_) - | Expr::Yield(_) => { - return Precedence::Prefix; - } - Expr::Range(e) if e.start.is_none() => return Precedence::Prefix, - _ => {} - } - } - - if self.next_operator_can_begin_generics { - if let Expr::Cast(cast) = expr { - if classify::trailing_unparameterized_path(&cast.ty) { - return Precedence::MIN; - } - } - } - - Precedence::of(expr) - } -} - -impl Copy for FixupContext {} - -impl Clone for FixupContext { - fn clone(&self) -> Self { - *self - } -} - -#[cfg(feature = "full")] -enum Scan { - Fail, - Bailout, - Consume, -} - -#[cfg(feature = "full")] -impl Copy for Scan {} - -#[cfg(feature = "full")] -impl Clone for Scan { - fn clone(&self) -> Self { - *self - } -} - -#[cfg(feature = "full")] -impl PartialEq for Scan { - fn eq(&self, other: &Self) -> bool { - *self as u8 == *other as u8 - } -} - -#[cfg(feature = "full")] -fn scan_left(expr: &Expr, fixup: FixupContext) -> bool { - match expr { - Expr::Assign(_) => fixup.previous_operator <= Precedence::Assign, - Expr::Binary(e) => match Precedence::of_binop(&e.op) { - Precedence::Assign => fixup.previous_operator <= Precedence::Assign, - binop_prec => fixup.previous_operator < binop_prec, - }, - Expr::Cast(_) => fixup.previous_operator < Precedence::Cast, - Expr::Range(e) => e.start.is_none() || fixup.previous_operator < Precedence::Assign, - _ => true, - } -} - -#[cfg(feature = "full")] -fn scan_right( - expr: &Expr, - fixup: FixupContext, - precedence: Precedence, - fail_offset: u8, - bailout_offset: u8, -) -> Scan { - let consume_by_precedence = if match precedence { - Precedence::Assign | Precedence::Compare => precedence <= fixup.next_operator, - _ => precedence < fixup.next_operator, - } || fixup.next_operator == Precedence::MIN - { - Scan::Consume - } else { - Scan::Bailout - }; - if fixup.parenthesize(expr) { - return consume_by_precedence; - } - match expr { - Expr::Assign(e) if e.attrs.is_empty() => { - if match fixup.next_operator { - Precedence::Unambiguous => fail_offset >= 2, - _ => bailout_offset >= 1, - } { - return Scan::Consume; - } - let right_fixup = fixup.rightmost_subexpression_fixup(false, false, Precedence::Assign); - let scan = scan_right( - &e.right, - right_fixup, - Precedence::Assign, - match fixup.next_operator { - Precedence::Unambiguous => fail_offset, - _ => 1, - }, - 1, - ); - if let Scan::Bailout | Scan::Consume = scan { - Scan::Consume - } else if let Precedence::Unambiguous = fixup.next_operator { - Scan::Fail - } else { - Scan::Bailout - } - } - Expr::Binary(e) if e.attrs.is_empty() => { - if match fixup.next_operator { - Precedence::Unambiguous => { - fail_offset >= 2 - && (consume_by_precedence == Scan::Consume || bailout_offset >= 1) - } - _ => bailout_offset >= 1, - } { - return Scan::Consume; - } - let binop_prec = Precedence::of_binop(&e.op); - if binop_prec == Precedence::Compare && fixup.next_operator == Precedence::Compare { - return Scan::Consume; - } - let right_fixup = fixup.rightmost_subexpression_fixup(false, false, binop_prec); - let scan = scan_right( - &e.right, - right_fixup, - binop_prec, - match fixup.next_operator { - Precedence::Unambiguous => fail_offset, - _ => 1, - }, - consume_by_precedence as u8 - Scan::Bailout as u8, - ); - match scan { - Scan::Fail => {} - Scan::Bailout => return consume_by_precedence, - Scan::Consume => return Scan::Consume, - } - let right_needs_group = binop_prec != Precedence::Assign - && right_fixup.rightmost_subexpression_precedence(&e.right) <= binop_prec; - if right_needs_group { - consume_by_precedence - } else if let (Scan::Fail, Precedence::Unambiguous) = (scan, fixup.next_operator) { - Scan::Fail - } else { - Scan::Bailout - } - } - Expr::RawAddr(ExprRawAddr { expr, .. }) - | Expr::Reference(ExprReference { expr, .. }) - | Expr::Unary(ExprUnary { expr, .. }) => { - if match fixup.next_operator { - Precedence::Unambiguous => { - fail_offset >= 2 - && (consume_by_precedence == Scan::Consume || bailout_offset >= 1) - } - _ => bailout_offset >= 1, - } { - return Scan::Consume; - } - let right_fixup = fixup.rightmost_subexpression_fixup(false, false, Precedence::Prefix); - let scan = scan_right( - expr, - right_fixup, - precedence, - match fixup.next_operator { - Precedence::Unambiguous => fail_offset, - _ => 1, - }, - consume_by_precedence as u8 - Scan::Bailout as u8, - ); - match scan { - Scan::Fail => {} - Scan::Bailout => return consume_by_precedence, - Scan::Consume => return Scan::Consume, - } - if right_fixup.rightmost_subexpression_precedence(expr) < Precedence::Prefix { - consume_by_precedence - } else if let (Scan::Fail, Precedence::Unambiguous) = (scan, fixup.next_operator) { - Scan::Fail - } else { - Scan::Bailout - } - } - Expr::Range(e) if e.attrs.is_empty() => match &e.end { - Some(end) => { - if fail_offset >= 2 { - return Scan::Consume; - } - let right_fixup = - fixup.rightmost_subexpression_fixup(false, true, Precedence::Range); - let scan = scan_right( - end, - right_fixup, - Precedence::Range, - fail_offset, - match fixup.next_operator { - Precedence::Assign | Precedence::Range => 0, - _ => 1, - }, - ); - if match (scan, fixup.next_operator) { - (Scan::Fail, _) => false, - (Scan::Bailout, Precedence::Assign | Precedence::Range) => false, - (Scan::Bailout | Scan::Consume, _) => true, - } { - return Scan::Consume; - } - if right_fixup.rightmost_subexpression_precedence(end) <= Precedence::Range { - Scan::Consume - } else { - Scan::Fail - } - } - None => { - if fixup.next_operator_can_begin_expr { - Scan::Consume - } else { - Scan::Fail - } - } - }, - Expr::Break(e) => match &e.expr { - Some(value) => { - if bailout_offset >= 1 || e.label.is_none() && classify::expr_leading_label(value) { - return Scan::Consume; - } - let right_fixup = fixup.rightmost_subexpression_fixup(true, true, Precedence::Jump); - match scan_right(value, right_fixup, Precedence::Jump, 1, 1) { - Scan::Fail => Scan::Bailout, - Scan::Bailout | Scan::Consume => Scan::Consume, - } - } - None => match fixup.next_operator { - Precedence::Assign if precedence > Precedence::Assign => Scan::Fail, - _ => Scan::Consume, - }, - }, - Expr::Return(ExprReturn { expr, .. }) | Expr::Yield(ExprYield { expr, .. }) => match expr { - Some(e) => { - if bailout_offset >= 1 { - return Scan::Consume; - } - let right_fixup = - fixup.rightmost_subexpression_fixup(true, false, Precedence::Jump); - match scan_right(e, right_fixup, Precedence::Jump, 1, 1) { - Scan::Fail => Scan::Bailout, - Scan::Bailout | Scan::Consume => Scan::Consume, - } - } - None => match fixup.next_operator { - Precedence::Assign if precedence > Precedence::Assign => Scan::Fail, - _ => Scan::Consume, - }, - }, - Expr::Closure(e) => { - if matches!(e.output, ReturnType::Default) - || matches!(&*e.body, Expr::Block(body) if body.attrs.is_empty() && body.label.is_none()) - { - if bailout_offset >= 1 { - return Scan::Consume; - } - let right_fixup = - fixup.rightmost_subexpression_fixup(false, false, Precedence::Jump); - match scan_right(&e.body, right_fixup, Precedence::Jump, 1, 1) { - Scan::Fail => Scan::Bailout, - Scan::Bailout | Scan::Consume => Scan::Consume, - } - } else { - Scan::Consume - } - } - Expr::Let(e) => { - if bailout_offset >= 1 { - return Scan::Consume; - } - let right_fixup = fixup.rightmost_subexpression_fixup(false, false, Precedence::Let); - let scan = scan_right( - &e.expr, - right_fixup, - Precedence::Let, - 1, - if fixup.next_operator < Precedence::Let { - 0 - } else { - 1 - }, - ); - match scan { - Scan::Fail | Scan::Bailout if fixup.next_operator < Precedence::Let => { - return Scan::Bailout; - } - Scan::Consume => return Scan::Consume, - _ => {} - } - if right_fixup.rightmost_subexpression_precedence(&e.expr) < Precedence::Let { - Scan::Consume - } else if let Scan::Fail = scan { - Scan::Bailout - } else { - Scan::Consume - } - } - Expr::Array(_) - | Expr::Assign(_) - | Expr::Async(_) - | Expr::Await(_) - | Expr::Binary(_) - | Expr::Block(_) - | Expr::Call(_) - | Expr::Cast(_) - | Expr::Const(_) - | Expr::Continue(_) - | Expr::Field(_) - | Expr::ForLoop(_) - | Expr::Group(_) - | Expr::If(_) - | Expr::Index(_) - | Expr::Infer(_) - | Expr::Lit(_) - | Expr::Loop(_) - | Expr::Macro(_) - | Expr::Match(_) - | Expr::MethodCall(_) - | Expr::Paren(_) - | Expr::Path(_) - | Expr::Range(_) - | Expr::Repeat(_) - | Expr::Struct(_) - | Expr::Try(_) - | Expr::TryBlock(_) - | Expr::Tuple(_) - | Expr::Unsafe(_) - | Expr::Verbatim(_) - | Expr::While(_) => match fixup.next_operator { - Precedence::Assign | Precedence::Range if precedence == Precedence::Range => Scan::Fail, - _ if precedence == Precedence::Let && fixup.next_operator < Precedence::Let => { - Scan::Fail - } - _ => consume_by_precedence, - }, - } -} diff --git a/vendor/syn/src/gen/clone.rs b/vendor/syn/src/gen/clone.rs deleted file mode 100644 index be2b698422da91..00000000000000 --- a/vendor/syn/src/gen/clone.rs +++ /dev/null @@ -1,2267 +0,0 @@ -// This file is @generated by syn-internal-codegen. -// It is not intended for manual editing. - -#![allow(clippy::clone_on_copy, clippy::expl_impl_clone_on_copy)] -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Abi { - fn clone(&self) -> Self { - crate::Abi { - extern_token: self.extern_token.clone(), - name: self.name.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::AngleBracketedGenericArguments { - fn clone(&self) -> Self { - crate::AngleBracketedGenericArguments { - colon2_token: self.colon2_token.clone(), - lt_token: self.lt_token.clone(), - args: self.args.clone(), - gt_token: self.gt_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Arm { - fn clone(&self) -> Self { - crate::Arm { - attrs: self.attrs.clone(), - pat: self.pat.clone(), - guard: self.guard.clone(), - fat_arrow_token: self.fat_arrow_token.clone(), - body: self.body.clone(), - comma: self.comma.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::AssocConst { - fn clone(&self) -> Self { - crate::AssocConst { - ident: self.ident.clone(), - generics: self.generics.clone(), - eq_token: self.eq_token.clone(), - value: self.value.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::AssocType { - fn clone(&self) -> Self { - crate::AssocType { - ident: self.ident.clone(), - generics: self.generics.clone(), - eq_token: self.eq_token.clone(), - ty: self.ty.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Copy for crate::AttrStyle {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::AttrStyle { - fn clone(&self) -> Self { - *self - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Attribute { - fn clone(&self) -> Self { - crate::Attribute { - pound_token: self.pound_token.clone(), - style: self.style.clone(), - bracket_token: self.bracket_token.clone(), - meta: self.meta.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::BareFnArg { - fn clone(&self) -> Self { - crate::BareFnArg { - attrs: self.attrs.clone(), - name: self.name.clone(), - ty: self.ty.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::BareVariadic { - fn clone(&self) -> Self { - crate::BareVariadic { - attrs: self.attrs.clone(), - name: self.name.clone(), - dots: self.dots.clone(), - comma: self.comma.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Copy for crate::BinOp {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::BinOp { - fn clone(&self) -> Self { - *self - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Block { - fn clone(&self) -> Self { - crate::Block { - brace_token: self.brace_token.clone(), - stmts: self.stmts.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::BoundLifetimes { - fn clone(&self) -> Self { - crate::BoundLifetimes { - for_token: self.for_token.clone(), - lt_token: self.lt_token.clone(), - lifetimes: self.lifetimes.clone(), - gt_token: self.gt_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::CapturedParam { - fn clone(&self) -> Self { - match self { - crate::CapturedParam::Lifetime(v0) => { - crate::CapturedParam::Lifetime(v0.clone()) - } - crate::CapturedParam::Ident(v0) => crate::CapturedParam::Ident(v0.clone()), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ConstParam { - fn clone(&self) -> Self { - crate::ConstParam { - attrs: self.attrs.clone(), - const_token: self.const_token.clone(), - ident: self.ident.clone(), - colon_token: self.colon_token.clone(), - ty: self.ty.clone(), - eq_token: self.eq_token.clone(), - default: self.default.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Constraint { - fn clone(&self) -> Self { - crate::Constraint { - ident: self.ident.clone(), - generics: self.generics.clone(), - colon_token: self.colon_token.clone(), - bounds: self.bounds.clone(), - } - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Data { - fn clone(&self) -> Self { - match self { - crate::Data::Struct(v0) => crate::Data::Struct(v0.clone()), - crate::Data::Enum(v0) => crate::Data::Enum(v0.clone()), - crate::Data::Union(v0) => crate::Data::Union(v0.clone()), - } - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::DataEnum { - fn clone(&self) -> Self { - crate::DataEnum { - enum_token: self.enum_token.clone(), - brace_token: self.brace_token.clone(), - variants: self.variants.clone(), - } - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::DataStruct { - fn clone(&self) -> Self { - crate::DataStruct { - struct_token: self.struct_token.clone(), - fields: self.fields.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::DataUnion { - fn clone(&self) -> Self { - crate::DataUnion { - union_token: self.union_token.clone(), - fields: self.fields.clone(), - } - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::DeriveInput { - fn clone(&self) -> Self { - crate::DeriveInput { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - ident: self.ident.clone(), - generics: self.generics.clone(), - data: self.data.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Expr { - fn clone(&self) -> Self { - match self { - #[cfg(feature = "full")] - crate::Expr::Array(v0) => crate::Expr::Array(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Assign(v0) => crate::Expr::Assign(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Async(v0) => crate::Expr::Async(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Await(v0) => crate::Expr::Await(v0.clone()), - crate::Expr::Binary(v0) => crate::Expr::Binary(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Block(v0) => crate::Expr::Block(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Break(v0) => crate::Expr::Break(v0.clone()), - crate::Expr::Call(v0) => crate::Expr::Call(v0.clone()), - crate::Expr::Cast(v0) => crate::Expr::Cast(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Closure(v0) => crate::Expr::Closure(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Const(v0) => crate::Expr::Const(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Continue(v0) => crate::Expr::Continue(v0.clone()), - crate::Expr::Field(v0) => crate::Expr::Field(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::ForLoop(v0) => crate::Expr::ForLoop(v0.clone()), - crate::Expr::Group(v0) => crate::Expr::Group(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::If(v0) => crate::Expr::If(v0.clone()), - crate::Expr::Index(v0) => crate::Expr::Index(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Infer(v0) => crate::Expr::Infer(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Let(v0) => crate::Expr::Let(v0.clone()), - crate::Expr::Lit(v0) => crate::Expr::Lit(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Loop(v0) => crate::Expr::Loop(v0.clone()), - crate::Expr::Macro(v0) => crate::Expr::Macro(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Match(v0) => crate::Expr::Match(v0.clone()), - crate::Expr::MethodCall(v0) => crate::Expr::MethodCall(v0.clone()), - crate::Expr::Paren(v0) => crate::Expr::Paren(v0.clone()), - crate::Expr::Path(v0) => crate::Expr::Path(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Range(v0) => crate::Expr::Range(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::RawAddr(v0) => crate::Expr::RawAddr(v0.clone()), - crate::Expr::Reference(v0) => crate::Expr::Reference(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Repeat(v0) => crate::Expr::Repeat(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Return(v0) => crate::Expr::Return(v0.clone()), - crate::Expr::Struct(v0) => crate::Expr::Struct(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Try(v0) => crate::Expr::Try(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::TryBlock(v0) => crate::Expr::TryBlock(v0.clone()), - crate::Expr::Tuple(v0) => crate::Expr::Tuple(v0.clone()), - crate::Expr::Unary(v0) => crate::Expr::Unary(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Unsafe(v0) => crate::Expr::Unsafe(v0.clone()), - crate::Expr::Verbatim(v0) => crate::Expr::Verbatim(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::While(v0) => crate::Expr::While(v0.clone()), - #[cfg(feature = "full")] - crate::Expr::Yield(v0) => crate::Expr::Yield(v0.clone()), - #[cfg(not(feature = "full"))] - _ => unreachable!(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprArray { - fn clone(&self) -> Self { - crate::ExprArray { - attrs: self.attrs.clone(), - bracket_token: self.bracket_token.clone(), - elems: self.elems.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprAssign { - fn clone(&self) -> Self { - crate::ExprAssign { - attrs: self.attrs.clone(), - left: self.left.clone(), - eq_token: self.eq_token.clone(), - right: self.right.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprAsync { - fn clone(&self) -> Self { - crate::ExprAsync { - attrs: self.attrs.clone(), - async_token: self.async_token.clone(), - capture: self.capture.clone(), - block: self.block.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprAwait { - fn clone(&self) -> Self { - crate::ExprAwait { - attrs: self.attrs.clone(), - base: self.base.clone(), - dot_token: self.dot_token.clone(), - await_token: self.await_token.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprBinary { - fn clone(&self) -> Self { - crate::ExprBinary { - attrs: self.attrs.clone(), - left: self.left.clone(), - op: self.op.clone(), - right: self.right.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprBlock { - fn clone(&self) -> Self { - crate::ExprBlock { - attrs: self.attrs.clone(), - label: self.label.clone(), - block: self.block.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprBreak { - fn clone(&self) -> Self { - crate::ExprBreak { - attrs: self.attrs.clone(), - break_token: self.break_token.clone(), - label: self.label.clone(), - expr: self.expr.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprCall { - fn clone(&self) -> Self { - crate::ExprCall { - attrs: self.attrs.clone(), - func: self.func.clone(), - paren_token: self.paren_token.clone(), - args: self.args.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprCast { - fn clone(&self) -> Self { - crate::ExprCast { - attrs: self.attrs.clone(), - expr: self.expr.clone(), - as_token: self.as_token.clone(), - ty: self.ty.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprClosure { - fn clone(&self) -> Self { - crate::ExprClosure { - attrs: self.attrs.clone(), - lifetimes: self.lifetimes.clone(), - constness: self.constness.clone(), - movability: self.movability.clone(), - asyncness: self.asyncness.clone(), - capture: self.capture.clone(), - or1_token: self.or1_token.clone(), - inputs: self.inputs.clone(), - or2_token: self.or2_token.clone(), - output: self.output.clone(), - body: self.body.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprConst { - fn clone(&self) -> Self { - crate::ExprConst { - attrs: self.attrs.clone(), - const_token: self.const_token.clone(), - block: self.block.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprContinue { - fn clone(&self) -> Self { - crate::ExprContinue { - attrs: self.attrs.clone(), - continue_token: self.continue_token.clone(), - label: self.label.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprField { - fn clone(&self) -> Self { - crate::ExprField { - attrs: self.attrs.clone(), - base: self.base.clone(), - dot_token: self.dot_token.clone(), - member: self.member.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprForLoop { - fn clone(&self) -> Self { - crate::ExprForLoop { - attrs: self.attrs.clone(), - label: self.label.clone(), - for_token: self.for_token.clone(), - pat: self.pat.clone(), - in_token: self.in_token.clone(), - expr: self.expr.clone(), - body: self.body.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprGroup { - fn clone(&self) -> Self { - crate::ExprGroup { - attrs: self.attrs.clone(), - group_token: self.group_token.clone(), - expr: self.expr.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprIf { - fn clone(&self) -> Self { - crate::ExprIf { - attrs: self.attrs.clone(), - if_token: self.if_token.clone(), - cond: self.cond.clone(), - then_branch: self.then_branch.clone(), - else_branch: self.else_branch.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprIndex { - fn clone(&self) -> Self { - crate::ExprIndex { - attrs: self.attrs.clone(), - expr: self.expr.clone(), - bracket_token: self.bracket_token.clone(), - index: self.index.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprInfer { - fn clone(&self) -> Self { - crate::ExprInfer { - attrs: self.attrs.clone(), - underscore_token: self.underscore_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprLet { - fn clone(&self) -> Self { - crate::ExprLet { - attrs: self.attrs.clone(), - let_token: self.let_token.clone(), - pat: self.pat.clone(), - eq_token: self.eq_token.clone(), - expr: self.expr.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprLit { - fn clone(&self) -> Self { - crate::ExprLit { - attrs: self.attrs.clone(), - lit: self.lit.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprLoop { - fn clone(&self) -> Self { - crate::ExprLoop { - attrs: self.attrs.clone(), - label: self.label.clone(), - loop_token: self.loop_token.clone(), - body: self.body.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprMacro { - fn clone(&self) -> Self { - crate::ExprMacro { - attrs: self.attrs.clone(), - mac: self.mac.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprMatch { - fn clone(&self) -> Self { - crate::ExprMatch { - attrs: self.attrs.clone(), - match_token: self.match_token.clone(), - expr: self.expr.clone(), - brace_token: self.brace_token.clone(), - arms: self.arms.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprMethodCall { - fn clone(&self) -> Self { - crate::ExprMethodCall { - attrs: self.attrs.clone(), - receiver: self.receiver.clone(), - dot_token: self.dot_token.clone(), - method: self.method.clone(), - turbofish: self.turbofish.clone(), - paren_token: self.paren_token.clone(), - args: self.args.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprParen { - fn clone(&self) -> Self { - crate::ExprParen { - attrs: self.attrs.clone(), - paren_token: self.paren_token.clone(), - expr: self.expr.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprPath { - fn clone(&self) -> Self { - crate::ExprPath { - attrs: self.attrs.clone(), - qself: self.qself.clone(), - path: self.path.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprRange { - fn clone(&self) -> Self { - crate::ExprRange { - attrs: self.attrs.clone(), - start: self.start.clone(), - limits: self.limits.clone(), - end: self.end.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprRawAddr { - fn clone(&self) -> Self { - crate::ExprRawAddr { - attrs: self.attrs.clone(), - and_token: self.and_token.clone(), - raw: self.raw.clone(), - mutability: self.mutability.clone(), - expr: self.expr.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprReference { - fn clone(&self) -> Self { - crate::ExprReference { - attrs: self.attrs.clone(), - and_token: self.and_token.clone(), - mutability: self.mutability.clone(), - expr: self.expr.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprRepeat { - fn clone(&self) -> Self { - crate::ExprRepeat { - attrs: self.attrs.clone(), - bracket_token: self.bracket_token.clone(), - expr: self.expr.clone(), - semi_token: self.semi_token.clone(), - len: self.len.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprReturn { - fn clone(&self) -> Self { - crate::ExprReturn { - attrs: self.attrs.clone(), - return_token: self.return_token.clone(), - expr: self.expr.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprStruct { - fn clone(&self) -> Self { - crate::ExprStruct { - attrs: self.attrs.clone(), - qself: self.qself.clone(), - path: self.path.clone(), - brace_token: self.brace_token.clone(), - fields: self.fields.clone(), - dot2_token: self.dot2_token.clone(), - rest: self.rest.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprTry { - fn clone(&self) -> Self { - crate::ExprTry { - attrs: self.attrs.clone(), - expr: self.expr.clone(), - question_token: self.question_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprTryBlock { - fn clone(&self) -> Self { - crate::ExprTryBlock { - attrs: self.attrs.clone(), - try_token: self.try_token.clone(), - block: self.block.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprTuple { - fn clone(&self) -> Self { - crate::ExprTuple { - attrs: self.attrs.clone(), - paren_token: self.paren_token.clone(), - elems: self.elems.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprUnary { - fn clone(&self) -> Self { - crate::ExprUnary { - attrs: self.attrs.clone(), - op: self.op.clone(), - expr: self.expr.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprUnsafe { - fn clone(&self) -> Self { - crate::ExprUnsafe { - attrs: self.attrs.clone(), - unsafe_token: self.unsafe_token.clone(), - block: self.block.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprWhile { - fn clone(&self) -> Self { - crate::ExprWhile { - attrs: self.attrs.clone(), - label: self.label.clone(), - while_token: self.while_token.clone(), - cond: self.cond.clone(), - body: self.body.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ExprYield { - fn clone(&self) -> Self { - crate::ExprYield { - attrs: self.attrs.clone(), - yield_token: self.yield_token.clone(), - expr: self.expr.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Field { - fn clone(&self) -> Self { - crate::Field { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - mutability: self.mutability.clone(), - ident: self.ident.clone(), - colon_token: self.colon_token.clone(), - ty: self.ty.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::FieldMutability { - fn clone(&self) -> Self { - match self { - crate::FieldMutability::None => crate::FieldMutability::None, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::FieldPat { - fn clone(&self) -> Self { - crate::FieldPat { - attrs: self.attrs.clone(), - member: self.member.clone(), - colon_token: self.colon_token.clone(), - pat: self.pat.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::FieldValue { - fn clone(&self) -> Self { - crate::FieldValue { - attrs: self.attrs.clone(), - member: self.member.clone(), - colon_token: self.colon_token.clone(), - expr: self.expr.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Fields { - fn clone(&self) -> Self { - match self { - crate::Fields::Named(v0) => crate::Fields::Named(v0.clone()), - crate::Fields::Unnamed(v0) => crate::Fields::Unnamed(v0.clone()), - crate::Fields::Unit => crate::Fields::Unit, - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::FieldsNamed { - fn clone(&self) -> Self { - crate::FieldsNamed { - brace_token: self.brace_token.clone(), - named: self.named.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::FieldsUnnamed { - fn clone(&self) -> Self { - crate::FieldsUnnamed { - paren_token: self.paren_token.clone(), - unnamed: self.unnamed.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::File { - fn clone(&self) -> Self { - crate::File { - shebang: self.shebang.clone(), - attrs: self.attrs.clone(), - items: self.items.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::FnArg { - fn clone(&self) -> Self { - match self { - crate::FnArg::Receiver(v0) => crate::FnArg::Receiver(v0.clone()), - crate::FnArg::Typed(v0) => crate::FnArg::Typed(v0.clone()), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ForeignItem { - fn clone(&self) -> Self { - match self { - crate::ForeignItem::Fn(v0) => crate::ForeignItem::Fn(v0.clone()), - crate::ForeignItem::Static(v0) => crate::ForeignItem::Static(v0.clone()), - crate::ForeignItem::Type(v0) => crate::ForeignItem::Type(v0.clone()), - crate::ForeignItem::Macro(v0) => crate::ForeignItem::Macro(v0.clone()), - crate::ForeignItem::Verbatim(v0) => crate::ForeignItem::Verbatim(v0.clone()), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ForeignItemFn { - fn clone(&self) -> Self { - crate::ForeignItemFn { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - sig: self.sig.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ForeignItemMacro { - fn clone(&self) -> Self { - crate::ForeignItemMacro { - attrs: self.attrs.clone(), - mac: self.mac.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ForeignItemStatic { - fn clone(&self) -> Self { - crate::ForeignItemStatic { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - static_token: self.static_token.clone(), - mutability: self.mutability.clone(), - ident: self.ident.clone(), - colon_token: self.colon_token.clone(), - ty: self.ty.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ForeignItemType { - fn clone(&self) -> Self { - crate::ForeignItemType { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - type_token: self.type_token.clone(), - ident: self.ident.clone(), - generics: self.generics.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::GenericArgument { - fn clone(&self) -> Self { - match self { - crate::GenericArgument::Lifetime(v0) => { - crate::GenericArgument::Lifetime(v0.clone()) - } - crate::GenericArgument::Type(v0) => crate::GenericArgument::Type(v0.clone()), - crate::GenericArgument::Const(v0) => { - crate::GenericArgument::Const(v0.clone()) - } - crate::GenericArgument::AssocType(v0) => { - crate::GenericArgument::AssocType(v0.clone()) - } - crate::GenericArgument::AssocConst(v0) => { - crate::GenericArgument::AssocConst(v0.clone()) - } - crate::GenericArgument::Constraint(v0) => { - crate::GenericArgument::Constraint(v0.clone()) - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::GenericParam { - fn clone(&self) -> Self { - match self { - crate::GenericParam::Lifetime(v0) => { - crate::GenericParam::Lifetime(v0.clone()) - } - crate::GenericParam::Type(v0) => crate::GenericParam::Type(v0.clone()), - crate::GenericParam::Const(v0) => crate::GenericParam::Const(v0.clone()), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Generics { - fn clone(&self) -> Self { - crate::Generics { - lt_token: self.lt_token.clone(), - params: self.params.clone(), - gt_token: self.gt_token.clone(), - where_clause: self.where_clause.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ImplItem { - fn clone(&self) -> Self { - match self { - crate::ImplItem::Const(v0) => crate::ImplItem::Const(v0.clone()), - crate::ImplItem::Fn(v0) => crate::ImplItem::Fn(v0.clone()), - crate::ImplItem::Type(v0) => crate::ImplItem::Type(v0.clone()), - crate::ImplItem::Macro(v0) => crate::ImplItem::Macro(v0.clone()), - crate::ImplItem::Verbatim(v0) => crate::ImplItem::Verbatim(v0.clone()), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ImplItemConst { - fn clone(&self) -> Self { - crate::ImplItemConst { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - defaultness: self.defaultness.clone(), - const_token: self.const_token.clone(), - ident: self.ident.clone(), - generics: self.generics.clone(), - colon_token: self.colon_token.clone(), - ty: self.ty.clone(), - eq_token: self.eq_token.clone(), - expr: self.expr.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ImplItemFn { - fn clone(&self) -> Self { - crate::ImplItemFn { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - defaultness: self.defaultness.clone(), - sig: self.sig.clone(), - block: self.block.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ImplItemMacro { - fn clone(&self) -> Self { - crate::ImplItemMacro { - attrs: self.attrs.clone(), - mac: self.mac.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ImplItemType { - fn clone(&self) -> Self { - crate::ImplItemType { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - defaultness: self.defaultness.clone(), - type_token: self.type_token.clone(), - ident: self.ident.clone(), - generics: self.generics.clone(), - eq_token: self.eq_token.clone(), - ty: self.ty.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ImplRestriction { - fn clone(&self) -> Self { - match *self {} - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Index { - fn clone(&self) -> Self { - crate::Index { - index: self.index.clone(), - span: self.span.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Item { - fn clone(&self) -> Self { - match self { - crate::Item::Const(v0) => crate::Item::Const(v0.clone()), - crate::Item::Enum(v0) => crate::Item::Enum(v0.clone()), - crate::Item::ExternCrate(v0) => crate::Item::ExternCrate(v0.clone()), - crate::Item::Fn(v0) => crate::Item::Fn(v0.clone()), - crate::Item::ForeignMod(v0) => crate::Item::ForeignMod(v0.clone()), - crate::Item::Impl(v0) => crate::Item::Impl(v0.clone()), - crate::Item::Macro(v0) => crate::Item::Macro(v0.clone()), - crate::Item::Mod(v0) => crate::Item::Mod(v0.clone()), - crate::Item::Static(v0) => crate::Item::Static(v0.clone()), - crate::Item::Struct(v0) => crate::Item::Struct(v0.clone()), - crate::Item::Trait(v0) => crate::Item::Trait(v0.clone()), - crate::Item::TraitAlias(v0) => crate::Item::TraitAlias(v0.clone()), - crate::Item::Type(v0) => crate::Item::Type(v0.clone()), - crate::Item::Union(v0) => crate::Item::Union(v0.clone()), - crate::Item::Use(v0) => crate::Item::Use(v0.clone()), - crate::Item::Verbatim(v0) => crate::Item::Verbatim(v0.clone()), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ItemConst { - fn clone(&self) -> Self { - crate::ItemConst { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - const_token: self.const_token.clone(), - ident: self.ident.clone(), - generics: self.generics.clone(), - colon_token: self.colon_token.clone(), - ty: self.ty.clone(), - eq_token: self.eq_token.clone(), - expr: self.expr.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ItemEnum { - fn clone(&self) -> Self { - crate::ItemEnum { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - enum_token: self.enum_token.clone(), - ident: self.ident.clone(), - generics: self.generics.clone(), - brace_token: self.brace_token.clone(), - variants: self.variants.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ItemExternCrate { - fn clone(&self) -> Self { - crate::ItemExternCrate { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - extern_token: self.extern_token.clone(), - crate_token: self.crate_token.clone(), - ident: self.ident.clone(), - rename: self.rename.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ItemFn { - fn clone(&self) -> Self { - crate::ItemFn { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - sig: self.sig.clone(), - block: self.block.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ItemForeignMod { - fn clone(&self) -> Self { - crate::ItemForeignMod { - attrs: self.attrs.clone(), - unsafety: self.unsafety.clone(), - abi: self.abi.clone(), - brace_token: self.brace_token.clone(), - items: self.items.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ItemImpl { - fn clone(&self) -> Self { - crate::ItemImpl { - attrs: self.attrs.clone(), - defaultness: self.defaultness.clone(), - unsafety: self.unsafety.clone(), - impl_token: self.impl_token.clone(), - generics: self.generics.clone(), - trait_: self.trait_.clone(), - self_ty: self.self_ty.clone(), - brace_token: self.brace_token.clone(), - items: self.items.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ItemMacro { - fn clone(&self) -> Self { - crate::ItemMacro { - attrs: self.attrs.clone(), - ident: self.ident.clone(), - mac: self.mac.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ItemMod { - fn clone(&self) -> Self { - crate::ItemMod { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - unsafety: self.unsafety.clone(), - mod_token: self.mod_token.clone(), - ident: self.ident.clone(), - content: self.content.clone(), - semi: self.semi.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ItemStatic { - fn clone(&self) -> Self { - crate::ItemStatic { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - static_token: self.static_token.clone(), - mutability: self.mutability.clone(), - ident: self.ident.clone(), - colon_token: self.colon_token.clone(), - ty: self.ty.clone(), - eq_token: self.eq_token.clone(), - expr: self.expr.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ItemStruct { - fn clone(&self) -> Self { - crate::ItemStruct { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - struct_token: self.struct_token.clone(), - ident: self.ident.clone(), - generics: self.generics.clone(), - fields: self.fields.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ItemTrait { - fn clone(&self) -> Self { - crate::ItemTrait { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - unsafety: self.unsafety.clone(), - auto_token: self.auto_token.clone(), - restriction: self.restriction.clone(), - trait_token: self.trait_token.clone(), - ident: self.ident.clone(), - generics: self.generics.clone(), - colon_token: self.colon_token.clone(), - supertraits: self.supertraits.clone(), - brace_token: self.brace_token.clone(), - items: self.items.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ItemTraitAlias { - fn clone(&self) -> Self { - crate::ItemTraitAlias { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - trait_token: self.trait_token.clone(), - ident: self.ident.clone(), - generics: self.generics.clone(), - eq_token: self.eq_token.clone(), - bounds: self.bounds.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ItemType { - fn clone(&self) -> Self { - crate::ItemType { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - type_token: self.type_token.clone(), - ident: self.ident.clone(), - generics: self.generics.clone(), - eq_token: self.eq_token.clone(), - ty: self.ty.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ItemUnion { - fn clone(&self) -> Self { - crate::ItemUnion { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - union_token: self.union_token.clone(), - ident: self.ident.clone(), - generics: self.generics.clone(), - fields: self.fields.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ItemUse { - fn clone(&self) -> Self { - crate::ItemUse { - attrs: self.attrs.clone(), - vis: self.vis.clone(), - use_token: self.use_token.clone(), - leading_colon: self.leading_colon.clone(), - tree: self.tree.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Label { - fn clone(&self) -> Self { - crate::Label { - name: self.name.clone(), - colon_token: self.colon_token.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::LifetimeParam { - fn clone(&self) -> Self { - crate::LifetimeParam { - attrs: self.attrs.clone(), - lifetime: self.lifetime.clone(), - colon_token: self.colon_token.clone(), - bounds: self.bounds.clone(), - } - } -} -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Lit { - fn clone(&self) -> Self { - match self { - crate::Lit::Str(v0) => crate::Lit::Str(v0.clone()), - crate::Lit::ByteStr(v0) => crate::Lit::ByteStr(v0.clone()), - crate::Lit::CStr(v0) => crate::Lit::CStr(v0.clone()), - crate::Lit::Byte(v0) => crate::Lit::Byte(v0.clone()), - crate::Lit::Char(v0) => crate::Lit::Char(v0.clone()), - crate::Lit::Int(v0) => crate::Lit::Int(v0.clone()), - crate::Lit::Float(v0) => crate::Lit::Float(v0.clone()), - crate::Lit::Bool(v0) => crate::Lit::Bool(v0.clone()), - crate::Lit::Verbatim(v0) => crate::Lit::Verbatim(v0.clone()), - } - } -} -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::LitBool { - fn clone(&self) -> Self { - crate::LitBool { - value: self.value.clone(), - span: self.span.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Local { - fn clone(&self) -> Self { - crate::Local { - attrs: self.attrs.clone(), - let_token: self.let_token.clone(), - pat: self.pat.clone(), - init: self.init.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::LocalInit { - fn clone(&self) -> Self { - crate::LocalInit { - eq_token: self.eq_token.clone(), - expr: self.expr.clone(), - diverge: self.diverge.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Macro { - fn clone(&self) -> Self { - crate::Macro { - path: self.path.clone(), - bang_token: self.bang_token.clone(), - delimiter: self.delimiter.clone(), - tokens: self.tokens.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::MacroDelimiter { - fn clone(&self) -> Self { - match self { - crate::MacroDelimiter::Paren(v0) => crate::MacroDelimiter::Paren(v0.clone()), - crate::MacroDelimiter::Brace(v0) => crate::MacroDelimiter::Brace(v0.clone()), - crate::MacroDelimiter::Bracket(v0) => { - crate::MacroDelimiter::Bracket(v0.clone()) - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Member { - fn clone(&self) -> Self { - match self { - crate::Member::Named(v0) => crate::Member::Named(v0.clone()), - crate::Member::Unnamed(v0) => crate::Member::Unnamed(v0.clone()), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Meta { - fn clone(&self) -> Self { - match self { - crate::Meta::Path(v0) => crate::Meta::Path(v0.clone()), - crate::Meta::List(v0) => crate::Meta::List(v0.clone()), - crate::Meta::NameValue(v0) => crate::Meta::NameValue(v0.clone()), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::MetaList { - fn clone(&self) -> Self { - crate::MetaList { - path: self.path.clone(), - delimiter: self.delimiter.clone(), - tokens: self.tokens.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::MetaNameValue { - fn clone(&self) -> Self { - crate::MetaNameValue { - path: self.path.clone(), - eq_token: self.eq_token.clone(), - value: self.value.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ParenthesizedGenericArguments { - fn clone(&self) -> Self { - crate::ParenthesizedGenericArguments { - paren_token: self.paren_token.clone(), - inputs: self.inputs.clone(), - output: self.output.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Pat { - fn clone(&self) -> Self { - match self { - crate::Pat::Const(v0) => crate::Pat::Const(v0.clone()), - crate::Pat::Ident(v0) => crate::Pat::Ident(v0.clone()), - crate::Pat::Lit(v0) => crate::Pat::Lit(v0.clone()), - crate::Pat::Macro(v0) => crate::Pat::Macro(v0.clone()), - crate::Pat::Or(v0) => crate::Pat::Or(v0.clone()), - crate::Pat::Paren(v0) => crate::Pat::Paren(v0.clone()), - crate::Pat::Path(v0) => crate::Pat::Path(v0.clone()), - crate::Pat::Range(v0) => crate::Pat::Range(v0.clone()), - crate::Pat::Reference(v0) => crate::Pat::Reference(v0.clone()), - crate::Pat::Rest(v0) => crate::Pat::Rest(v0.clone()), - crate::Pat::Slice(v0) => crate::Pat::Slice(v0.clone()), - crate::Pat::Struct(v0) => crate::Pat::Struct(v0.clone()), - crate::Pat::Tuple(v0) => crate::Pat::Tuple(v0.clone()), - crate::Pat::TupleStruct(v0) => crate::Pat::TupleStruct(v0.clone()), - crate::Pat::Type(v0) => crate::Pat::Type(v0.clone()), - crate::Pat::Verbatim(v0) => crate::Pat::Verbatim(v0.clone()), - crate::Pat::Wild(v0) => crate::Pat::Wild(v0.clone()), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PatIdent { - fn clone(&self) -> Self { - crate::PatIdent { - attrs: self.attrs.clone(), - by_ref: self.by_ref.clone(), - mutability: self.mutability.clone(), - ident: self.ident.clone(), - subpat: self.subpat.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PatOr { - fn clone(&self) -> Self { - crate::PatOr { - attrs: self.attrs.clone(), - leading_vert: self.leading_vert.clone(), - cases: self.cases.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PatParen { - fn clone(&self) -> Self { - crate::PatParen { - attrs: self.attrs.clone(), - paren_token: self.paren_token.clone(), - pat: self.pat.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PatReference { - fn clone(&self) -> Self { - crate::PatReference { - attrs: self.attrs.clone(), - and_token: self.and_token.clone(), - mutability: self.mutability.clone(), - pat: self.pat.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PatRest { - fn clone(&self) -> Self { - crate::PatRest { - attrs: self.attrs.clone(), - dot2_token: self.dot2_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PatSlice { - fn clone(&self) -> Self { - crate::PatSlice { - attrs: self.attrs.clone(), - bracket_token: self.bracket_token.clone(), - elems: self.elems.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PatStruct { - fn clone(&self) -> Self { - crate::PatStruct { - attrs: self.attrs.clone(), - qself: self.qself.clone(), - path: self.path.clone(), - brace_token: self.brace_token.clone(), - fields: self.fields.clone(), - rest: self.rest.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PatTuple { - fn clone(&self) -> Self { - crate::PatTuple { - attrs: self.attrs.clone(), - paren_token: self.paren_token.clone(), - elems: self.elems.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PatTupleStruct { - fn clone(&self) -> Self { - crate::PatTupleStruct { - attrs: self.attrs.clone(), - qself: self.qself.clone(), - path: self.path.clone(), - paren_token: self.paren_token.clone(), - elems: self.elems.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PatType { - fn clone(&self) -> Self { - crate::PatType { - attrs: self.attrs.clone(), - pat: self.pat.clone(), - colon_token: self.colon_token.clone(), - ty: self.ty.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PatWild { - fn clone(&self) -> Self { - crate::PatWild { - attrs: self.attrs.clone(), - underscore_token: self.underscore_token.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Path { - fn clone(&self) -> Self { - crate::Path { - leading_colon: self.leading_colon.clone(), - segments: self.segments.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PathArguments { - fn clone(&self) -> Self { - match self { - crate::PathArguments::None => crate::PathArguments::None, - crate::PathArguments::AngleBracketed(v0) => { - crate::PathArguments::AngleBracketed(v0.clone()) - } - crate::PathArguments::Parenthesized(v0) => { - crate::PathArguments::Parenthesized(v0.clone()) - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PathSegment { - fn clone(&self) -> Self { - crate::PathSegment { - ident: self.ident.clone(), - arguments: self.arguments.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PointerMutability { - fn clone(&self) -> Self { - match self { - crate::PointerMutability::Const(v0) => { - crate::PointerMutability::Const(v0.clone()) - } - crate::PointerMutability::Mut(v0) => { - crate::PointerMutability::Mut(v0.clone()) - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PreciseCapture { - fn clone(&self) -> Self { - crate::PreciseCapture { - use_token: self.use_token.clone(), - lt_token: self.lt_token.clone(), - params: self.params.clone(), - gt_token: self.gt_token.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PredicateLifetime { - fn clone(&self) -> Self { - crate::PredicateLifetime { - lifetime: self.lifetime.clone(), - colon_token: self.colon_token.clone(), - bounds: self.bounds.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::PredicateType { - fn clone(&self) -> Self { - crate::PredicateType { - lifetimes: self.lifetimes.clone(), - bounded_ty: self.bounded_ty.clone(), - colon_token: self.colon_token.clone(), - bounds: self.bounds.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::QSelf { - fn clone(&self) -> Self { - crate::QSelf { - lt_token: self.lt_token.clone(), - ty: self.ty.clone(), - position: self.position.clone(), - as_token: self.as_token.clone(), - gt_token: self.gt_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Copy for crate::RangeLimits {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::RangeLimits { - fn clone(&self) -> Self { - *self - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Receiver { - fn clone(&self) -> Self { - crate::Receiver { - attrs: self.attrs.clone(), - reference: self.reference.clone(), - mutability: self.mutability.clone(), - self_token: self.self_token.clone(), - colon_token: self.colon_token.clone(), - ty: self.ty.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::ReturnType { - fn clone(&self) -> Self { - match self { - crate::ReturnType::Default => crate::ReturnType::Default, - crate::ReturnType::Type(v0, v1) => { - crate::ReturnType::Type(v0.clone(), v1.clone()) - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Signature { - fn clone(&self) -> Self { - crate::Signature { - constness: self.constness.clone(), - asyncness: self.asyncness.clone(), - unsafety: self.unsafety.clone(), - abi: self.abi.clone(), - fn_token: self.fn_token.clone(), - ident: self.ident.clone(), - generics: self.generics.clone(), - paren_token: self.paren_token.clone(), - inputs: self.inputs.clone(), - variadic: self.variadic.clone(), - output: self.output.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::StaticMutability { - fn clone(&self) -> Self { - match self { - crate::StaticMutability::Mut(v0) => crate::StaticMutability::Mut(v0.clone()), - crate::StaticMutability::None => crate::StaticMutability::None, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Stmt { - fn clone(&self) -> Self { - match self { - crate::Stmt::Local(v0) => crate::Stmt::Local(v0.clone()), - crate::Stmt::Item(v0) => crate::Stmt::Item(v0.clone()), - crate::Stmt::Expr(v0, v1) => crate::Stmt::Expr(v0.clone(), v1.clone()), - crate::Stmt::Macro(v0) => crate::Stmt::Macro(v0.clone()), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::StmtMacro { - fn clone(&self) -> Self { - crate::StmtMacro { - attrs: self.attrs.clone(), - mac: self.mac.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TraitBound { - fn clone(&self) -> Self { - crate::TraitBound { - paren_token: self.paren_token.clone(), - modifier: self.modifier.clone(), - lifetimes: self.lifetimes.clone(), - path: self.path.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Copy for crate::TraitBoundModifier {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TraitBoundModifier { - fn clone(&self) -> Self { - *self - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TraitItem { - fn clone(&self) -> Self { - match self { - crate::TraitItem::Const(v0) => crate::TraitItem::Const(v0.clone()), - crate::TraitItem::Fn(v0) => crate::TraitItem::Fn(v0.clone()), - crate::TraitItem::Type(v0) => crate::TraitItem::Type(v0.clone()), - crate::TraitItem::Macro(v0) => crate::TraitItem::Macro(v0.clone()), - crate::TraitItem::Verbatim(v0) => crate::TraitItem::Verbatim(v0.clone()), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TraitItemConst { - fn clone(&self) -> Self { - crate::TraitItemConst { - attrs: self.attrs.clone(), - const_token: self.const_token.clone(), - ident: self.ident.clone(), - generics: self.generics.clone(), - colon_token: self.colon_token.clone(), - ty: self.ty.clone(), - default: self.default.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TraitItemFn { - fn clone(&self) -> Self { - crate::TraitItemFn { - attrs: self.attrs.clone(), - sig: self.sig.clone(), - default: self.default.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TraitItemMacro { - fn clone(&self) -> Self { - crate::TraitItemMacro { - attrs: self.attrs.clone(), - mac: self.mac.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TraitItemType { - fn clone(&self) -> Self { - crate::TraitItemType { - attrs: self.attrs.clone(), - type_token: self.type_token.clone(), - ident: self.ident.clone(), - generics: self.generics.clone(), - colon_token: self.colon_token.clone(), - bounds: self.bounds.clone(), - default: self.default.clone(), - semi_token: self.semi_token.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Type { - fn clone(&self) -> Self { - match self { - crate::Type::Array(v0) => crate::Type::Array(v0.clone()), - crate::Type::BareFn(v0) => crate::Type::BareFn(v0.clone()), - crate::Type::Group(v0) => crate::Type::Group(v0.clone()), - crate::Type::ImplTrait(v0) => crate::Type::ImplTrait(v0.clone()), - crate::Type::Infer(v0) => crate::Type::Infer(v0.clone()), - crate::Type::Macro(v0) => crate::Type::Macro(v0.clone()), - crate::Type::Never(v0) => crate::Type::Never(v0.clone()), - crate::Type::Paren(v0) => crate::Type::Paren(v0.clone()), - crate::Type::Path(v0) => crate::Type::Path(v0.clone()), - crate::Type::Ptr(v0) => crate::Type::Ptr(v0.clone()), - crate::Type::Reference(v0) => crate::Type::Reference(v0.clone()), - crate::Type::Slice(v0) => crate::Type::Slice(v0.clone()), - crate::Type::TraitObject(v0) => crate::Type::TraitObject(v0.clone()), - crate::Type::Tuple(v0) => crate::Type::Tuple(v0.clone()), - crate::Type::Verbatim(v0) => crate::Type::Verbatim(v0.clone()), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypeArray { - fn clone(&self) -> Self { - crate::TypeArray { - bracket_token: self.bracket_token.clone(), - elem: self.elem.clone(), - semi_token: self.semi_token.clone(), - len: self.len.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypeBareFn { - fn clone(&self) -> Self { - crate::TypeBareFn { - lifetimes: self.lifetimes.clone(), - unsafety: self.unsafety.clone(), - abi: self.abi.clone(), - fn_token: self.fn_token.clone(), - paren_token: self.paren_token.clone(), - inputs: self.inputs.clone(), - variadic: self.variadic.clone(), - output: self.output.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypeGroup { - fn clone(&self) -> Self { - crate::TypeGroup { - group_token: self.group_token.clone(), - elem: self.elem.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypeImplTrait { - fn clone(&self) -> Self { - crate::TypeImplTrait { - impl_token: self.impl_token.clone(), - bounds: self.bounds.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypeInfer { - fn clone(&self) -> Self { - crate::TypeInfer { - underscore_token: self.underscore_token.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypeMacro { - fn clone(&self) -> Self { - crate::TypeMacro { - mac: self.mac.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypeNever { - fn clone(&self) -> Self { - crate::TypeNever { - bang_token: self.bang_token.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypeParam { - fn clone(&self) -> Self { - crate::TypeParam { - attrs: self.attrs.clone(), - ident: self.ident.clone(), - colon_token: self.colon_token.clone(), - bounds: self.bounds.clone(), - eq_token: self.eq_token.clone(), - default: self.default.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypeParamBound { - fn clone(&self) -> Self { - match self { - crate::TypeParamBound::Trait(v0) => crate::TypeParamBound::Trait(v0.clone()), - crate::TypeParamBound::Lifetime(v0) => { - crate::TypeParamBound::Lifetime(v0.clone()) - } - #[cfg(feature = "full")] - crate::TypeParamBound::PreciseCapture(v0) => { - crate::TypeParamBound::PreciseCapture(v0.clone()) - } - crate::TypeParamBound::Verbatim(v0) => { - crate::TypeParamBound::Verbatim(v0.clone()) - } - #[cfg(not(feature = "full"))] - _ => unreachable!(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypeParen { - fn clone(&self) -> Self { - crate::TypeParen { - paren_token: self.paren_token.clone(), - elem: self.elem.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypePath { - fn clone(&self) -> Self { - crate::TypePath { - qself: self.qself.clone(), - path: self.path.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypePtr { - fn clone(&self) -> Self { - crate::TypePtr { - star_token: self.star_token.clone(), - const_token: self.const_token.clone(), - mutability: self.mutability.clone(), - elem: self.elem.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypeReference { - fn clone(&self) -> Self { - crate::TypeReference { - and_token: self.and_token.clone(), - lifetime: self.lifetime.clone(), - mutability: self.mutability.clone(), - elem: self.elem.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypeSlice { - fn clone(&self) -> Self { - crate::TypeSlice { - bracket_token: self.bracket_token.clone(), - elem: self.elem.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypeTraitObject { - fn clone(&self) -> Self { - crate::TypeTraitObject { - dyn_token: self.dyn_token.clone(), - bounds: self.bounds.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::TypeTuple { - fn clone(&self) -> Self { - crate::TypeTuple { - paren_token: self.paren_token.clone(), - elems: self.elems.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Copy for crate::UnOp {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::UnOp { - fn clone(&self) -> Self { - *self - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::UseGlob { - fn clone(&self) -> Self { - crate::UseGlob { - star_token: self.star_token.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::UseGroup { - fn clone(&self) -> Self { - crate::UseGroup { - brace_token: self.brace_token.clone(), - items: self.items.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::UseName { - fn clone(&self) -> Self { - crate::UseName { - ident: self.ident.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::UsePath { - fn clone(&self) -> Self { - crate::UsePath { - ident: self.ident.clone(), - colon2_token: self.colon2_token.clone(), - tree: self.tree.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::UseRename { - fn clone(&self) -> Self { - crate::UseRename { - ident: self.ident.clone(), - as_token: self.as_token.clone(), - rename: self.rename.clone(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::UseTree { - fn clone(&self) -> Self { - match self { - crate::UseTree::Path(v0) => crate::UseTree::Path(v0.clone()), - crate::UseTree::Name(v0) => crate::UseTree::Name(v0.clone()), - crate::UseTree::Rename(v0) => crate::UseTree::Rename(v0.clone()), - crate::UseTree::Glob(v0) => crate::UseTree::Glob(v0.clone()), - crate::UseTree::Group(v0) => crate::UseTree::Group(v0.clone()), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Variadic { - fn clone(&self) -> Self { - crate::Variadic { - attrs: self.attrs.clone(), - pat: self.pat.clone(), - dots: self.dots.clone(), - comma: self.comma.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Variant { - fn clone(&self) -> Self { - crate::Variant { - attrs: self.attrs.clone(), - ident: self.ident.clone(), - fields: self.fields.clone(), - discriminant: self.discriminant.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::VisRestricted { - fn clone(&self) -> Self { - crate::VisRestricted { - pub_token: self.pub_token.clone(), - paren_token: self.paren_token.clone(), - in_token: self.in_token.clone(), - path: self.path.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::Visibility { - fn clone(&self) -> Self { - match self { - crate::Visibility::Public(v0) => crate::Visibility::Public(v0.clone()), - crate::Visibility::Restricted(v0) => { - crate::Visibility::Restricted(v0.clone()) - } - crate::Visibility::Inherited => crate::Visibility::Inherited, - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::WhereClause { - fn clone(&self) -> Self { - crate::WhereClause { - where_token: self.where_token.clone(), - predicates: self.predicates.clone(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for crate::WherePredicate { - fn clone(&self) -> Self { - match self { - crate::WherePredicate::Lifetime(v0) => { - crate::WherePredicate::Lifetime(v0.clone()) - } - crate::WherePredicate::Type(v0) => crate::WherePredicate::Type(v0.clone()), - } - } -} diff --git a/vendor/syn/src/gen/debug.rs b/vendor/syn/src/gen/debug.rs deleted file mode 100644 index aa42e32c60ede5..00000000000000 --- a/vendor/syn/src/gen/debug.rs +++ /dev/null @@ -1,3238 +0,0 @@ -// This file is @generated by syn-internal-codegen. -// It is not intended for manual editing. - -#![allow(unknown_lints, non_local_definitions)] -use std::fmt::{self, Debug}; -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Abi { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Abi"); - formatter.field("extern_token", &self.extern_token); - formatter.field("name", &self.name); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::AngleBracketedGenericArguments { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "AngleBracketedGenericArguments") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::AngleBracketedGenericArguments { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("colon2_token", &self.colon2_token); - formatter.field("lt_token", &self.lt_token); - formatter.field("args", &self.args); - formatter.field("gt_token", &self.gt_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Arm { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Arm"); - formatter.field("attrs", &self.attrs); - formatter.field("pat", &self.pat); - formatter.field("guard", &self.guard); - formatter.field("fat_arrow_token", &self.fat_arrow_token); - formatter.field("body", &self.body); - formatter.field("comma", &self.comma); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::AssocConst { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("AssocConst"); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("eq_token", &self.eq_token); - formatter.field("value", &self.value); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::AssocType { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("AssocType"); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("eq_token", &self.eq_token); - formatter.field("ty", &self.ty); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::AttrStyle { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("AttrStyle::")?; - match self { - crate::AttrStyle::Outer => formatter.write_str("Outer"), - crate::AttrStyle::Inner(v0) => { - let mut formatter = formatter.debug_tuple("Inner"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Attribute { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Attribute"); - formatter.field("pound_token", &self.pound_token); - formatter.field("style", &self.style); - formatter.field("bracket_token", &self.bracket_token); - formatter.field("meta", &self.meta); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::BareFnArg { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("BareFnArg"); - formatter.field("attrs", &self.attrs); - formatter.field("name", &self.name); - formatter.field("ty", &self.ty); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::BareVariadic { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("BareVariadic"); - formatter.field("attrs", &self.attrs); - formatter.field("name", &self.name); - formatter.field("dots", &self.dots); - formatter.field("comma", &self.comma); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::BinOp { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("BinOp::")?; - match self { - crate::BinOp::Add(v0) => { - let mut formatter = formatter.debug_tuple("Add"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::Sub(v0) => { - let mut formatter = formatter.debug_tuple("Sub"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::Mul(v0) => { - let mut formatter = formatter.debug_tuple("Mul"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::Div(v0) => { - let mut formatter = formatter.debug_tuple("Div"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::Rem(v0) => { - let mut formatter = formatter.debug_tuple("Rem"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::And(v0) => { - let mut formatter = formatter.debug_tuple("And"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::Or(v0) => { - let mut formatter = formatter.debug_tuple("Or"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::BitXor(v0) => { - let mut formatter = formatter.debug_tuple("BitXor"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::BitAnd(v0) => { - let mut formatter = formatter.debug_tuple("BitAnd"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::BitOr(v0) => { - let mut formatter = formatter.debug_tuple("BitOr"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::Shl(v0) => { - let mut formatter = formatter.debug_tuple("Shl"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::Shr(v0) => { - let mut formatter = formatter.debug_tuple("Shr"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::Eq(v0) => { - let mut formatter = formatter.debug_tuple("Eq"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::Lt(v0) => { - let mut formatter = formatter.debug_tuple("Lt"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::Le(v0) => { - let mut formatter = formatter.debug_tuple("Le"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::Ne(v0) => { - let mut formatter = formatter.debug_tuple("Ne"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::Ge(v0) => { - let mut formatter = formatter.debug_tuple("Ge"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::Gt(v0) => { - let mut formatter = formatter.debug_tuple("Gt"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::AddAssign(v0) => { - let mut formatter = formatter.debug_tuple("AddAssign"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::SubAssign(v0) => { - let mut formatter = formatter.debug_tuple("SubAssign"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::MulAssign(v0) => { - let mut formatter = formatter.debug_tuple("MulAssign"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::DivAssign(v0) => { - let mut formatter = formatter.debug_tuple("DivAssign"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::RemAssign(v0) => { - let mut formatter = formatter.debug_tuple("RemAssign"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::BitXorAssign(v0) => { - let mut formatter = formatter.debug_tuple("BitXorAssign"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::BitAndAssign(v0) => { - let mut formatter = formatter.debug_tuple("BitAndAssign"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::BitOrAssign(v0) => { - let mut formatter = formatter.debug_tuple("BitOrAssign"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::ShlAssign(v0) => { - let mut formatter = formatter.debug_tuple("ShlAssign"); - formatter.field(v0); - formatter.finish() - } - crate::BinOp::ShrAssign(v0) => { - let mut formatter = formatter.debug_tuple("ShrAssign"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Block { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Block"); - formatter.field("brace_token", &self.brace_token); - formatter.field("stmts", &self.stmts); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::BoundLifetimes { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("BoundLifetimes"); - formatter.field("for_token", &self.for_token); - formatter.field("lt_token", &self.lt_token); - formatter.field("lifetimes", &self.lifetimes); - formatter.field("gt_token", &self.gt_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::CapturedParam { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("CapturedParam::")?; - match self { - crate::CapturedParam::Lifetime(v0) => { - let mut formatter = formatter.debug_tuple("Lifetime"); - formatter.field(v0); - formatter.finish() - } - crate::CapturedParam::Ident(v0) => { - let mut formatter = formatter.debug_tuple("Ident"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ConstParam { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ConstParam"); - formatter.field("attrs", &self.attrs); - formatter.field("const_token", &self.const_token); - formatter.field("ident", &self.ident); - formatter.field("colon_token", &self.colon_token); - formatter.field("ty", &self.ty); - formatter.field("eq_token", &self.eq_token); - formatter.field("default", &self.default); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Constraint { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Constraint"); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("colon_token", &self.colon_token); - formatter.field("bounds", &self.bounds); - formatter.finish() - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Data { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Data::")?; - match self { - crate::Data::Struct(v0) => v0.debug(formatter, "Struct"), - crate::Data::Enum(v0) => v0.debug(formatter, "Enum"), - crate::Data::Union(v0) => v0.debug(formatter, "Union"), - } - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::DataEnum { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "DataEnum") - } -} -#[cfg(feature = "derive")] -impl crate::DataEnum { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("enum_token", &self.enum_token); - formatter.field("brace_token", &self.brace_token); - formatter.field("variants", &self.variants); - formatter.finish() - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::DataStruct { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "DataStruct") - } -} -#[cfg(feature = "derive")] -impl crate::DataStruct { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("struct_token", &self.struct_token); - formatter.field("fields", &self.fields); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::DataUnion { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "DataUnion") - } -} -#[cfg(feature = "derive")] -impl crate::DataUnion { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("union_token", &self.union_token); - formatter.field("fields", &self.fields); - formatter.finish() - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::DeriveInput { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("DeriveInput"); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("data", &self.data); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Expr { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Expr::")?; - match self { - #[cfg(feature = "full")] - crate::Expr::Array(v0) => v0.debug(formatter, "Array"), - #[cfg(feature = "full")] - crate::Expr::Assign(v0) => v0.debug(formatter, "Assign"), - #[cfg(feature = "full")] - crate::Expr::Async(v0) => v0.debug(formatter, "Async"), - #[cfg(feature = "full")] - crate::Expr::Await(v0) => v0.debug(formatter, "Await"), - crate::Expr::Binary(v0) => v0.debug(formatter, "Binary"), - #[cfg(feature = "full")] - crate::Expr::Block(v0) => v0.debug(formatter, "Block"), - #[cfg(feature = "full")] - crate::Expr::Break(v0) => v0.debug(formatter, "Break"), - crate::Expr::Call(v0) => v0.debug(formatter, "Call"), - crate::Expr::Cast(v0) => v0.debug(formatter, "Cast"), - #[cfg(feature = "full")] - crate::Expr::Closure(v0) => v0.debug(formatter, "Closure"), - #[cfg(feature = "full")] - crate::Expr::Const(v0) => v0.debug(formatter, "Const"), - #[cfg(feature = "full")] - crate::Expr::Continue(v0) => v0.debug(formatter, "Continue"), - crate::Expr::Field(v0) => v0.debug(formatter, "Field"), - #[cfg(feature = "full")] - crate::Expr::ForLoop(v0) => v0.debug(formatter, "ForLoop"), - crate::Expr::Group(v0) => v0.debug(formatter, "Group"), - #[cfg(feature = "full")] - crate::Expr::If(v0) => v0.debug(formatter, "If"), - crate::Expr::Index(v0) => v0.debug(formatter, "Index"), - #[cfg(feature = "full")] - crate::Expr::Infer(v0) => v0.debug(formatter, "Infer"), - #[cfg(feature = "full")] - crate::Expr::Let(v0) => v0.debug(formatter, "Let"), - crate::Expr::Lit(v0) => v0.debug(formatter, "Lit"), - #[cfg(feature = "full")] - crate::Expr::Loop(v0) => v0.debug(formatter, "Loop"), - crate::Expr::Macro(v0) => v0.debug(formatter, "Macro"), - #[cfg(feature = "full")] - crate::Expr::Match(v0) => v0.debug(formatter, "Match"), - crate::Expr::MethodCall(v0) => v0.debug(formatter, "MethodCall"), - crate::Expr::Paren(v0) => v0.debug(formatter, "Paren"), - crate::Expr::Path(v0) => v0.debug(formatter, "Path"), - #[cfg(feature = "full")] - crate::Expr::Range(v0) => v0.debug(formatter, "Range"), - #[cfg(feature = "full")] - crate::Expr::RawAddr(v0) => v0.debug(formatter, "RawAddr"), - crate::Expr::Reference(v0) => v0.debug(formatter, "Reference"), - #[cfg(feature = "full")] - crate::Expr::Repeat(v0) => v0.debug(formatter, "Repeat"), - #[cfg(feature = "full")] - crate::Expr::Return(v0) => v0.debug(formatter, "Return"), - crate::Expr::Struct(v0) => v0.debug(formatter, "Struct"), - #[cfg(feature = "full")] - crate::Expr::Try(v0) => v0.debug(formatter, "Try"), - #[cfg(feature = "full")] - crate::Expr::TryBlock(v0) => v0.debug(formatter, "TryBlock"), - crate::Expr::Tuple(v0) => v0.debug(formatter, "Tuple"), - crate::Expr::Unary(v0) => v0.debug(formatter, "Unary"), - #[cfg(feature = "full")] - crate::Expr::Unsafe(v0) => v0.debug(formatter, "Unsafe"), - crate::Expr::Verbatim(v0) => { - let mut formatter = formatter.debug_tuple("Verbatim"); - formatter.field(v0); - formatter.finish() - } - #[cfg(feature = "full")] - crate::Expr::While(v0) => v0.debug(formatter, "While"), - #[cfg(feature = "full")] - crate::Expr::Yield(v0) => v0.debug(formatter, "Yield"), - #[cfg(not(feature = "full"))] - _ => unreachable!(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprArray { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprArray") - } -} -#[cfg(feature = "full")] -impl crate::ExprArray { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("bracket_token", &self.bracket_token); - formatter.field("elems", &self.elems); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprAssign { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprAssign") - } -} -#[cfg(feature = "full")] -impl crate::ExprAssign { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("left", &self.left); - formatter.field("eq_token", &self.eq_token); - formatter.field("right", &self.right); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprAsync { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprAsync") - } -} -#[cfg(feature = "full")] -impl crate::ExprAsync { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("async_token", &self.async_token); - formatter.field("capture", &self.capture); - formatter.field("block", &self.block); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprAwait { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprAwait") - } -} -#[cfg(feature = "full")] -impl crate::ExprAwait { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("base", &self.base); - formatter.field("dot_token", &self.dot_token); - formatter.field("await_token", &self.await_token); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprBinary { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprBinary") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ExprBinary { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("left", &self.left); - formatter.field("op", &self.op); - formatter.field("right", &self.right); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprBlock { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprBlock") - } -} -#[cfg(feature = "full")] -impl crate::ExprBlock { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("label", &self.label); - formatter.field("block", &self.block); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprBreak { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprBreak") - } -} -#[cfg(feature = "full")] -impl crate::ExprBreak { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("break_token", &self.break_token); - formatter.field("label", &self.label); - formatter.field("expr", &self.expr); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprCall { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprCall") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ExprCall { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("func", &self.func); - formatter.field("paren_token", &self.paren_token); - formatter.field("args", &self.args); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprCast { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprCast") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ExprCast { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("expr", &self.expr); - formatter.field("as_token", &self.as_token); - formatter.field("ty", &self.ty); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprClosure { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprClosure") - } -} -#[cfg(feature = "full")] -impl crate::ExprClosure { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("lifetimes", &self.lifetimes); - formatter.field("constness", &self.constness); - formatter.field("movability", &self.movability); - formatter.field("asyncness", &self.asyncness); - formatter.field("capture", &self.capture); - formatter.field("or1_token", &self.or1_token); - formatter.field("inputs", &self.inputs); - formatter.field("or2_token", &self.or2_token); - formatter.field("output", &self.output); - formatter.field("body", &self.body); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprConst { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprConst") - } -} -#[cfg(feature = "full")] -impl crate::ExprConst { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("const_token", &self.const_token); - formatter.field("block", &self.block); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprContinue { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprContinue") - } -} -#[cfg(feature = "full")] -impl crate::ExprContinue { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("continue_token", &self.continue_token); - formatter.field("label", &self.label); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprField { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprField") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ExprField { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("base", &self.base); - formatter.field("dot_token", &self.dot_token); - formatter.field("member", &self.member); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprForLoop { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprForLoop") - } -} -#[cfg(feature = "full")] -impl crate::ExprForLoop { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("label", &self.label); - formatter.field("for_token", &self.for_token); - formatter.field("pat", &self.pat); - formatter.field("in_token", &self.in_token); - formatter.field("expr", &self.expr); - formatter.field("body", &self.body); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprGroup { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprGroup") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ExprGroup { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("group_token", &self.group_token); - formatter.field("expr", &self.expr); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprIf { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprIf") - } -} -#[cfg(feature = "full")] -impl crate::ExprIf { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("if_token", &self.if_token); - formatter.field("cond", &self.cond); - formatter.field("then_branch", &self.then_branch); - formatter.field("else_branch", &self.else_branch); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprIndex { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprIndex") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ExprIndex { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("expr", &self.expr); - formatter.field("bracket_token", &self.bracket_token); - formatter.field("index", &self.index); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprInfer { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprInfer") - } -} -#[cfg(feature = "full")] -impl crate::ExprInfer { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("underscore_token", &self.underscore_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprLet { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprLet") - } -} -#[cfg(feature = "full")] -impl crate::ExprLet { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("let_token", &self.let_token); - formatter.field("pat", &self.pat); - formatter.field("eq_token", &self.eq_token); - formatter.field("expr", &self.expr); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprLit { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprLit") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ExprLit { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("lit", &self.lit); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprLoop { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprLoop") - } -} -#[cfg(feature = "full")] -impl crate::ExprLoop { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("label", &self.label); - formatter.field("loop_token", &self.loop_token); - formatter.field("body", &self.body); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprMacro { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprMacro") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ExprMacro { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("mac", &self.mac); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprMatch { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprMatch") - } -} -#[cfg(feature = "full")] -impl crate::ExprMatch { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("match_token", &self.match_token); - formatter.field("expr", &self.expr); - formatter.field("brace_token", &self.brace_token); - formatter.field("arms", &self.arms); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprMethodCall { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprMethodCall") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ExprMethodCall { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("receiver", &self.receiver); - formatter.field("dot_token", &self.dot_token); - formatter.field("method", &self.method); - formatter.field("turbofish", &self.turbofish); - formatter.field("paren_token", &self.paren_token); - formatter.field("args", &self.args); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprParen { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprParen") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ExprParen { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("paren_token", &self.paren_token); - formatter.field("expr", &self.expr); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprPath { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprPath") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ExprPath { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("qself", &self.qself); - formatter.field("path", &self.path); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprRange { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprRange") - } -} -#[cfg(feature = "full")] -impl crate::ExprRange { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("start", &self.start); - formatter.field("limits", &self.limits); - formatter.field("end", &self.end); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprRawAddr { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprRawAddr") - } -} -#[cfg(feature = "full")] -impl crate::ExprRawAddr { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("and_token", &self.and_token); - formatter.field("raw", &self.raw); - formatter.field("mutability", &self.mutability); - formatter.field("expr", &self.expr); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprReference { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprReference") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ExprReference { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("and_token", &self.and_token); - formatter.field("mutability", &self.mutability); - formatter.field("expr", &self.expr); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprRepeat { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprRepeat") - } -} -#[cfg(feature = "full")] -impl crate::ExprRepeat { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("bracket_token", &self.bracket_token); - formatter.field("expr", &self.expr); - formatter.field("semi_token", &self.semi_token); - formatter.field("len", &self.len); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprReturn { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprReturn") - } -} -#[cfg(feature = "full")] -impl crate::ExprReturn { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("return_token", &self.return_token); - formatter.field("expr", &self.expr); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprStruct { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprStruct") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ExprStruct { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("qself", &self.qself); - formatter.field("path", &self.path); - formatter.field("brace_token", &self.brace_token); - formatter.field("fields", &self.fields); - formatter.field("dot2_token", &self.dot2_token); - formatter.field("rest", &self.rest); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprTry { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprTry") - } -} -#[cfg(feature = "full")] -impl crate::ExprTry { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("expr", &self.expr); - formatter.field("question_token", &self.question_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprTryBlock { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprTryBlock") - } -} -#[cfg(feature = "full")] -impl crate::ExprTryBlock { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("try_token", &self.try_token); - formatter.field("block", &self.block); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprTuple { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprTuple") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ExprTuple { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("paren_token", &self.paren_token); - formatter.field("elems", &self.elems); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprUnary { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprUnary") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ExprUnary { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("op", &self.op); - formatter.field("expr", &self.expr); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprUnsafe { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprUnsafe") - } -} -#[cfg(feature = "full")] -impl crate::ExprUnsafe { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("unsafe_token", &self.unsafe_token); - formatter.field("block", &self.block); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprWhile { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprWhile") - } -} -#[cfg(feature = "full")] -impl crate::ExprWhile { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("label", &self.label); - formatter.field("while_token", &self.while_token); - formatter.field("cond", &self.cond); - formatter.field("body", &self.body); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ExprYield { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ExprYield") - } -} -#[cfg(feature = "full")] -impl crate::ExprYield { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("yield_token", &self.yield_token); - formatter.field("expr", &self.expr); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Field { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Field"); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("mutability", &self.mutability); - formatter.field("ident", &self.ident); - formatter.field("colon_token", &self.colon_token); - formatter.field("ty", &self.ty); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::FieldMutability { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("FieldMutability::")?; - match self { - crate::FieldMutability::None => formatter.write_str("None"), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::FieldPat { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("FieldPat"); - formatter.field("attrs", &self.attrs); - formatter.field("member", &self.member); - formatter.field("colon_token", &self.colon_token); - formatter.field("pat", &self.pat); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::FieldValue { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("FieldValue"); - formatter.field("attrs", &self.attrs); - formatter.field("member", &self.member); - formatter.field("colon_token", &self.colon_token); - formatter.field("expr", &self.expr); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Fields { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Fields::")?; - match self { - crate::Fields::Named(v0) => v0.debug(formatter, "Named"), - crate::Fields::Unnamed(v0) => v0.debug(formatter, "Unnamed"), - crate::Fields::Unit => formatter.write_str("Unit"), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::FieldsNamed { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "FieldsNamed") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::FieldsNamed { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("brace_token", &self.brace_token); - formatter.field("named", &self.named); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::FieldsUnnamed { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "FieldsUnnamed") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::FieldsUnnamed { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("paren_token", &self.paren_token); - formatter.field("unnamed", &self.unnamed); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::File { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("File"); - formatter.field("shebang", &self.shebang); - formatter.field("attrs", &self.attrs); - formatter.field("items", &self.items); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::FnArg { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("FnArg::")?; - match self { - crate::FnArg::Receiver(v0) => { - let mut formatter = formatter.debug_tuple("Receiver"); - formatter.field(v0); - formatter.finish() - } - crate::FnArg::Typed(v0) => { - let mut formatter = formatter.debug_tuple("Typed"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ForeignItem { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("ForeignItem::")?; - match self { - crate::ForeignItem::Fn(v0) => v0.debug(formatter, "Fn"), - crate::ForeignItem::Static(v0) => v0.debug(formatter, "Static"), - crate::ForeignItem::Type(v0) => v0.debug(formatter, "Type"), - crate::ForeignItem::Macro(v0) => v0.debug(formatter, "Macro"), - crate::ForeignItem::Verbatim(v0) => { - let mut formatter = formatter.debug_tuple("Verbatim"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ForeignItemFn { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ForeignItemFn") - } -} -#[cfg(feature = "full")] -impl crate::ForeignItemFn { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("sig", &self.sig); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ForeignItemMacro { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ForeignItemMacro") - } -} -#[cfg(feature = "full")] -impl crate::ForeignItemMacro { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("mac", &self.mac); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ForeignItemStatic { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ForeignItemStatic") - } -} -#[cfg(feature = "full")] -impl crate::ForeignItemStatic { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("static_token", &self.static_token); - formatter.field("mutability", &self.mutability); - formatter.field("ident", &self.ident); - formatter.field("colon_token", &self.colon_token); - formatter.field("ty", &self.ty); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ForeignItemType { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ForeignItemType") - } -} -#[cfg(feature = "full")] -impl crate::ForeignItemType { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("type_token", &self.type_token); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::GenericArgument { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("GenericArgument::")?; - match self { - crate::GenericArgument::Lifetime(v0) => { - let mut formatter = formatter.debug_tuple("Lifetime"); - formatter.field(v0); - formatter.finish() - } - crate::GenericArgument::Type(v0) => { - let mut formatter = formatter.debug_tuple("Type"); - formatter.field(v0); - formatter.finish() - } - crate::GenericArgument::Const(v0) => { - let mut formatter = formatter.debug_tuple("Const"); - formatter.field(v0); - formatter.finish() - } - crate::GenericArgument::AssocType(v0) => { - let mut formatter = formatter.debug_tuple("AssocType"); - formatter.field(v0); - formatter.finish() - } - crate::GenericArgument::AssocConst(v0) => { - let mut formatter = formatter.debug_tuple("AssocConst"); - formatter.field(v0); - formatter.finish() - } - crate::GenericArgument::Constraint(v0) => { - let mut formatter = formatter.debug_tuple("Constraint"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::GenericParam { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("GenericParam::")?; - match self { - crate::GenericParam::Lifetime(v0) => { - let mut formatter = formatter.debug_tuple("Lifetime"); - formatter.field(v0); - formatter.finish() - } - crate::GenericParam::Type(v0) => { - let mut formatter = formatter.debug_tuple("Type"); - formatter.field(v0); - formatter.finish() - } - crate::GenericParam::Const(v0) => { - let mut formatter = formatter.debug_tuple("Const"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Generics { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Generics"); - formatter.field("lt_token", &self.lt_token); - formatter.field("params", &self.params); - formatter.field("gt_token", &self.gt_token); - formatter.field("where_clause", &self.where_clause); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ImplItem { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("ImplItem::")?; - match self { - crate::ImplItem::Const(v0) => v0.debug(formatter, "Const"), - crate::ImplItem::Fn(v0) => v0.debug(formatter, "Fn"), - crate::ImplItem::Type(v0) => v0.debug(formatter, "Type"), - crate::ImplItem::Macro(v0) => v0.debug(formatter, "Macro"), - crate::ImplItem::Verbatim(v0) => { - let mut formatter = formatter.debug_tuple("Verbatim"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ImplItemConst { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ImplItemConst") - } -} -#[cfg(feature = "full")] -impl crate::ImplItemConst { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("defaultness", &self.defaultness); - formatter.field("const_token", &self.const_token); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("colon_token", &self.colon_token); - formatter.field("ty", &self.ty); - formatter.field("eq_token", &self.eq_token); - formatter.field("expr", &self.expr); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ImplItemFn { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ImplItemFn") - } -} -#[cfg(feature = "full")] -impl crate::ImplItemFn { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("defaultness", &self.defaultness); - formatter.field("sig", &self.sig); - formatter.field("block", &self.block); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ImplItemMacro { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ImplItemMacro") - } -} -#[cfg(feature = "full")] -impl crate::ImplItemMacro { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("mac", &self.mac); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ImplItemType { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ImplItemType") - } -} -#[cfg(feature = "full")] -impl crate::ImplItemType { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("defaultness", &self.defaultness); - formatter.field("type_token", &self.type_token); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("eq_token", &self.eq_token); - formatter.field("ty", &self.ty); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ImplRestriction { - fn fmt(&self, _formatter: &mut fmt::Formatter) -> fmt::Result { - match *self {} - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Index { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Index"); - formatter.field("index", &self.index); - formatter.field("span", &self.span); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Item { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Item::")?; - match self { - crate::Item::Const(v0) => v0.debug(formatter, "Const"), - crate::Item::Enum(v0) => v0.debug(formatter, "Enum"), - crate::Item::ExternCrate(v0) => v0.debug(formatter, "ExternCrate"), - crate::Item::Fn(v0) => v0.debug(formatter, "Fn"), - crate::Item::ForeignMod(v0) => v0.debug(formatter, "ForeignMod"), - crate::Item::Impl(v0) => v0.debug(formatter, "Impl"), - crate::Item::Macro(v0) => v0.debug(formatter, "Macro"), - crate::Item::Mod(v0) => v0.debug(formatter, "Mod"), - crate::Item::Static(v0) => v0.debug(formatter, "Static"), - crate::Item::Struct(v0) => v0.debug(formatter, "Struct"), - crate::Item::Trait(v0) => v0.debug(formatter, "Trait"), - crate::Item::TraitAlias(v0) => v0.debug(formatter, "TraitAlias"), - crate::Item::Type(v0) => v0.debug(formatter, "Type"), - crate::Item::Union(v0) => v0.debug(formatter, "Union"), - crate::Item::Use(v0) => v0.debug(formatter, "Use"), - crate::Item::Verbatim(v0) => { - let mut formatter = formatter.debug_tuple("Verbatim"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ItemConst { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ItemConst") - } -} -#[cfg(feature = "full")] -impl crate::ItemConst { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("const_token", &self.const_token); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("colon_token", &self.colon_token); - formatter.field("ty", &self.ty); - formatter.field("eq_token", &self.eq_token); - formatter.field("expr", &self.expr); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ItemEnum { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ItemEnum") - } -} -#[cfg(feature = "full")] -impl crate::ItemEnum { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("enum_token", &self.enum_token); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("brace_token", &self.brace_token); - formatter.field("variants", &self.variants); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ItemExternCrate { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ItemExternCrate") - } -} -#[cfg(feature = "full")] -impl crate::ItemExternCrate { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("extern_token", &self.extern_token); - formatter.field("crate_token", &self.crate_token); - formatter.field("ident", &self.ident); - formatter.field("rename", &self.rename); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ItemFn { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ItemFn") - } -} -#[cfg(feature = "full")] -impl crate::ItemFn { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("sig", &self.sig); - formatter.field("block", &self.block); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ItemForeignMod { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ItemForeignMod") - } -} -#[cfg(feature = "full")] -impl crate::ItemForeignMod { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("unsafety", &self.unsafety); - formatter.field("abi", &self.abi); - formatter.field("brace_token", &self.brace_token); - formatter.field("items", &self.items); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ItemImpl { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ItemImpl") - } -} -#[cfg(feature = "full")] -impl crate::ItemImpl { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("defaultness", &self.defaultness); - formatter.field("unsafety", &self.unsafety); - formatter.field("impl_token", &self.impl_token); - formatter.field("generics", &self.generics); - formatter.field("trait_", &self.trait_); - formatter.field("self_ty", &self.self_ty); - formatter.field("brace_token", &self.brace_token); - formatter.field("items", &self.items); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ItemMacro { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ItemMacro") - } -} -#[cfg(feature = "full")] -impl crate::ItemMacro { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("ident", &self.ident); - formatter.field("mac", &self.mac); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ItemMod { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ItemMod") - } -} -#[cfg(feature = "full")] -impl crate::ItemMod { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("unsafety", &self.unsafety); - formatter.field("mod_token", &self.mod_token); - formatter.field("ident", &self.ident); - formatter.field("content", &self.content); - formatter.field("semi", &self.semi); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ItemStatic { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ItemStatic") - } -} -#[cfg(feature = "full")] -impl crate::ItemStatic { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("static_token", &self.static_token); - formatter.field("mutability", &self.mutability); - formatter.field("ident", &self.ident); - formatter.field("colon_token", &self.colon_token); - formatter.field("ty", &self.ty); - formatter.field("eq_token", &self.eq_token); - formatter.field("expr", &self.expr); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ItemStruct { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ItemStruct") - } -} -#[cfg(feature = "full")] -impl crate::ItemStruct { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("struct_token", &self.struct_token); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("fields", &self.fields); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ItemTrait { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ItemTrait") - } -} -#[cfg(feature = "full")] -impl crate::ItemTrait { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("unsafety", &self.unsafety); - formatter.field("auto_token", &self.auto_token); - formatter.field("restriction", &self.restriction); - formatter.field("trait_token", &self.trait_token); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("colon_token", &self.colon_token); - formatter.field("supertraits", &self.supertraits); - formatter.field("brace_token", &self.brace_token); - formatter.field("items", &self.items); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ItemTraitAlias { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ItemTraitAlias") - } -} -#[cfg(feature = "full")] -impl crate::ItemTraitAlias { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("trait_token", &self.trait_token); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("eq_token", &self.eq_token); - formatter.field("bounds", &self.bounds); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ItemType { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ItemType") - } -} -#[cfg(feature = "full")] -impl crate::ItemType { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("type_token", &self.type_token); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("eq_token", &self.eq_token); - formatter.field("ty", &self.ty); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ItemUnion { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ItemUnion") - } -} -#[cfg(feature = "full")] -impl crate::ItemUnion { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("union_token", &self.union_token); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("fields", &self.fields); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ItemUse { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ItemUse") - } -} -#[cfg(feature = "full")] -impl crate::ItemUse { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("vis", &self.vis); - formatter.field("use_token", &self.use_token); - formatter.field("leading_colon", &self.leading_colon); - formatter.field("tree", &self.tree); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Label { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Label"); - formatter.field("name", &self.name); - formatter.field("colon_token", &self.colon_token); - formatter.finish() - } -} -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Lifetime { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "Lifetime") - } -} -impl crate::Lifetime { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("apostrophe", &self.apostrophe); - formatter.field("ident", &self.ident); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::LifetimeParam { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("LifetimeParam"); - formatter.field("attrs", &self.attrs); - formatter.field("lifetime", &self.lifetime); - formatter.field("colon_token", &self.colon_token); - formatter.field("bounds", &self.bounds); - formatter.finish() - } -} -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Lit { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Lit::")?; - match self { - crate::Lit::Str(v0) => v0.debug(formatter, "Str"), - crate::Lit::ByteStr(v0) => v0.debug(formatter, "ByteStr"), - crate::Lit::CStr(v0) => v0.debug(formatter, "CStr"), - crate::Lit::Byte(v0) => v0.debug(formatter, "Byte"), - crate::Lit::Char(v0) => v0.debug(formatter, "Char"), - crate::Lit::Int(v0) => v0.debug(formatter, "Int"), - crate::Lit::Float(v0) => v0.debug(formatter, "Float"), - crate::Lit::Bool(v0) => v0.debug(formatter, "Bool"), - crate::Lit::Verbatim(v0) => { - let mut formatter = formatter.debug_tuple("Verbatim"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Local { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "Local") - } -} -#[cfg(feature = "full")] -impl crate::Local { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("let_token", &self.let_token); - formatter.field("pat", &self.pat); - formatter.field("init", &self.init); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::LocalInit { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("LocalInit"); - formatter.field("eq_token", &self.eq_token); - formatter.field("expr", &self.expr); - formatter.field("diverge", &self.diverge); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Macro { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Macro"); - formatter.field("path", &self.path); - formatter.field("bang_token", &self.bang_token); - formatter.field("delimiter", &self.delimiter); - formatter.field("tokens", &self.tokens); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::MacroDelimiter { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("MacroDelimiter::")?; - match self { - crate::MacroDelimiter::Paren(v0) => { - let mut formatter = formatter.debug_tuple("Paren"); - formatter.field(v0); - formatter.finish() - } - crate::MacroDelimiter::Brace(v0) => { - let mut formatter = formatter.debug_tuple("Brace"); - formatter.field(v0); - formatter.finish() - } - crate::MacroDelimiter::Bracket(v0) => { - let mut formatter = formatter.debug_tuple("Bracket"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Member { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Member::")?; - match self { - crate::Member::Named(v0) => { - let mut formatter = formatter.debug_tuple("Named"); - formatter.field(v0); - formatter.finish() - } - crate::Member::Unnamed(v0) => { - let mut formatter = formatter.debug_tuple("Unnamed"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Meta { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Meta::")?; - match self { - crate::Meta::Path(v0) => v0.debug(formatter, "Path"), - crate::Meta::List(v0) => v0.debug(formatter, "List"), - crate::Meta::NameValue(v0) => v0.debug(formatter, "NameValue"), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::MetaList { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "MetaList") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::MetaList { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("path", &self.path); - formatter.field("delimiter", &self.delimiter); - formatter.field("tokens", &self.tokens); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::MetaNameValue { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "MetaNameValue") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::MetaNameValue { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("path", &self.path); - formatter.field("eq_token", &self.eq_token); - formatter.field("value", &self.value); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ParenthesizedGenericArguments { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "ParenthesizedGenericArguments") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::ParenthesizedGenericArguments { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("paren_token", &self.paren_token); - formatter.field("inputs", &self.inputs); - formatter.field("output", &self.output); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Pat { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Pat::")?; - match self { - crate::Pat::Const(v0) => v0.debug(formatter, "Const"), - crate::Pat::Ident(v0) => v0.debug(formatter, "Ident"), - crate::Pat::Lit(v0) => v0.debug(formatter, "Lit"), - crate::Pat::Macro(v0) => v0.debug(formatter, "Macro"), - crate::Pat::Or(v0) => v0.debug(formatter, "Or"), - crate::Pat::Paren(v0) => v0.debug(formatter, "Paren"), - crate::Pat::Path(v0) => v0.debug(formatter, "Path"), - crate::Pat::Range(v0) => v0.debug(formatter, "Range"), - crate::Pat::Reference(v0) => v0.debug(formatter, "Reference"), - crate::Pat::Rest(v0) => v0.debug(formatter, "Rest"), - crate::Pat::Slice(v0) => v0.debug(formatter, "Slice"), - crate::Pat::Struct(v0) => v0.debug(formatter, "Struct"), - crate::Pat::Tuple(v0) => v0.debug(formatter, "Tuple"), - crate::Pat::TupleStruct(v0) => v0.debug(formatter, "TupleStruct"), - crate::Pat::Type(v0) => v0.debug(formatter, "Type"), - crate::Pat::Verbatim(v0) => { - let mut formatter = formatter.debug_tuple("Verbatim"); - formatter.field(v0); - formatter.finish() - } - crate::Pat::Wild(v0) => v0.debug(formatter, "Wild"), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PatIdent { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "PatIdent") - } -} -#[cfg(feature = "full")] -impl crate::PatIdent { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("by_ref", &self.by_ref); - formatter.field("mutability", &self.mutability); - formatter.field("ident", &self.ident); - formatter.field("subpat", &self.subpat); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PatOr { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "PatOr") - } -} -#[cfg(feature = "full")] -impl crate::PatOr { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("leading_vert", &self.leading_vert); - formatter.field("cases", &self.cases); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PatParen { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "PatParen") - } -} -#[cfg(feature = "full")] -impl crate::PatParen { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("paren_token", &self.paren_token); - formatter.field("pat", &self.pat); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PatReference { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "PatReference") - } -} -#[cfg(feature = "full")] -impl crate::PatReference { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("and_token", &self.and_token); - formatter.field("mutability", &self.mutability); - formatter.field("pat", &self.pat); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PatRest { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "PatRest") - } -} -#[cfg(feature = "full")] -impl crate::PatRest { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("dot2_token", &self.dot2_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PatSlice { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "PatSlice") - } -} -#[cfg(feature = "full")] -impl crate::PatSlice { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("bracket_token", &self.bracket_token); - formatter.field("elems", &self.elems); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PatStruct { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "PatStruct") - } -} -#[cfg(feature = "full")] -impl crate::PatStruct { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("qself", &self.qself); - formatter.field("path", &self.path); - formatter.field("brace_token", &self.brace_token); - formatter.field("fields", &self.fields); - formatter.field("rest", &self.rest); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PatTuple { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "PatTuple") - } -} -#[cfg(feature = "full")] -impl crate::PatTuple { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("paren_token", &self.paren_token); - formatter.field("elems", &self.elems); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PatTupleStruct { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "PatTupleStruct") - } -} -#[cfg(feature = "full")] -impl crate::PatTupleStruct { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("qself", &self.qself); - formatter.field("path", &self.path); - formatter.field("paren_token", &self.paren_token); - formatter.field("elems", &self.elems); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PatType { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "PatType") - } -} -#[cfg(feature = "full")] -impl crate::PatType { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("pat", &self.pat); - formatter.field("colon_token", &self.colon_token); - formatter.field("ty", &self.ty); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PatWild { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "PatWild") - } -} -#[cfg(feature = "full")] -impl crate::PatWild { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("underscore_token", &self.underscore_token); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Path { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "Path") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::Path { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("leading_colon", &self.leading_colon); - formatter.field("segments", &self.segments); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PathArguments { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("PathArguments::")?; - match self { - crate::PathArguments::None => formatter.write_str("None"), - crate::PathArguments::AngleBracketed(v0) => { - v0.debug(formatter, "AngleBracketed") - } - crate::PathArguments::Parenthesized(v0) => { - v0.debug(formatter, "Parenthesized") - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PathSegment { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PathSegment"); - formatter.field("ident", &self.ident); - formatter.field("arguments", &self.arguments); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PointerMutability { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("PointerMutability::")?; - match self { - crate::PointerMutability::Const(v0) => { - let mut formatter = formatter.debug_tuple("Const"); - formatter.field(v0); - formatter.finish() - } - crate::PointerMutability::Mut(v0) => { - let mut formatter = formatter.debug_tuple("Mut"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PreciseCapture { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PreciseCapture"); - formatter.field("use_token", &self.use_token); - formatter.field("lt_token", &self.lt_token); - formatter.field("params", &self.params); - formatter.field("gt_token", &self.gt_token); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PredicateLifetime { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PredicateLifetime"); - formatter.field("lifetime", &self.lifetime); - formatter.field("colon_token", &self.colon_token); - formatter.field("bounds", &self.bounds); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::PredicateType { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PredicateType"); - formatter.field("lifetimes", &self.lifetimes); - formatter.field("bounded_ty", &self.bounded_ty); - formatter.field("colon_token", &self.colon_token); - formatter.field("bounds", &self.bounds); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::QSelf { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("QSelf"); - formatter.field("lt_token", &self.lt_token); - formatter.field("ty", &self.ty); - formatter.field("position", &self.position); - formatter.field("as_token", &self.as_token); - formatter.field("gt_token", &self.gt_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::RangeLimits { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("RangeLimits::")?; - match self { - crate::RangeLimits::HalfOpen(v0) => { - let mut formatter = formatter.debug_tuple("HalfOpen"); - formatter.field(v0); - formatter.finish() - } - crate::RangeLimits::Closed(v0) => { - let mut formatter = formatter.debug_tuple("Closed"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Receiver { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Receiver"); - formatter.field("attrs", &self.attrs); - formatter.field("reference", &self.reference); - formatter.field("mutability", &self.mutability); - formatter.field("self_token", &self.self_token); - formatter.field("colon_token", &self.colon_token); - formatter.field("ty", &self.ty); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::ReturnType { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("ReturnType::")?; - match self { - crate::ReturnType::Default => formatter.write_str("Default"), - crate::ReturnType::Type(v0, v1) => { - let mut formatter = formatter.debug_tuple("Type"); - formatter.field(v0); - formatter.field(v1); - formatter.finish() - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Signature { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Signature"); - formatter.field("constness", &self.constness); - formatter.field("asyncness", &self.asyncness); - formatter.field("unsafety", &self.unsafety); - formatter.field("abi", &self.abi); - formatter.field("fn_token", &self.fn_token); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("paren_token", &self.paren_token); - formatter.field("inputs", &self.inputs); - formatter.field("variadic", &self.variadic); - formatter.field("output", &self.output); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::StaticMutability { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("StaticMutability::")?; - match self { - crate::StaticMutability::Mut(v0) => { - let mut formatter = formatter.debug_tuple("Mut"); - formatter.field(v0); - formatter.finish() - } - crate::StaticMutability::None => formatter.write_str("None"), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Stmt { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Stmt::")?; - match self { - crate::Stmt::Local(v0) => v0.debug(formatter, "Local"), - crate::Stmt::Item(v0) => { - let mut formatter = formatter.debug_tuple("Item"); - formatter.field(v0); - formatter.finish() - } - crate::Stmt::Expr(v0, v1) => { - let mut formatter = formatter.debug_tuple("Expr"); - formatter.field(v0); - formatter.field(v1); - formatter.finish() - } - crate::Stmt::Macro(v0) => v0.debug(formatter, "Macro"), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::StmtMacro { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "StmtMacro") - } -} -#[cfg(feature = "full")] -impl crate::StmtMacro { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("mac", &self.mac); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TraitBound { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TraitBound"); - formatter.field("paren_token", &self.paren_token); - formatter.field("modifier", &self.modifier); - formatter.field("lifetimes", &self.lifetimes); - formatter.field("path", &self.path); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TraitBoundModifier { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("TraitBoundModifier::")?; - match self { - crate::TraitBoundModifier::None => formatter.write_str("None"), - crate::TraitBoundModifier::Maybe(v0) => { - let mut formatter = formatter.debug_tuple("Maybe"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TraitItem { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("TraitItem::")?; - match self { - crate::TraitItem::Const(v0) => v0.debug(formatter, "Const"), - crate::TraitItem::Fn(v0) => v0.debug(formatter, "Fn"), - crate::TraitItem::Type(v0) => v0.debug(formatter, "Type"), - crate::TraitItem::Macro(v0) => v0.debug(formatter, "Macro"), - crate::TraitItem::Verbatim(v0) => { - let mut formatter = formatter.debug_tuple("Verbatim"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TraitItemConst { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TraitItemConst") - } -} -#[cfg(feature = "full")] -impl crate::TraitItemConst { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("const_token", &self.const_token); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("colon_token", &self.colon_token); - formatter.field("ty", &self.ty); - formatter.field("default", &self.default); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TraitItemFn { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TraitItemFn") - } -} -#[cfg(feature = "full")] -impl crate::TraitItemFn { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("sig", &self.sig); - formatter.field("default", &self.default); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TraitItemMacro { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TraitItemMacro") - } -} -#[cfg(feature = "full")] -impl crate::TraitItemMacro { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("mac", &self.mac); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TraitItemType { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TraitItemType") - } -} -#[cfg(feature = "full")] -impl crate::TraitItemType { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("attrs", &self.attrs); - formatter.field("type_token", &self.type_token); - formatter.field("ident", &self.ident); - formatter.field("generics", &self.generics); - formatter.field("colon_token", &self.colon_token); - formatter.field("bounds", &self.bounds); - formatter.field("default", &self.default); - formatter.field("semi_token", &self.semi_token); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Type { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Type::")?; - match self { - crate::Type::Array(v0) => v0.debug(formatter, "Array"), - crate::Type::BareFn(v0) => v0.debug(formatter, "BareFn"), - crate::Type::Group(v0) => v0.debug(formatter, "Group"), - crate::Type::ImplTrait(v0) => v0.debug(formatter, "ImplTrait"), - crate::Type::Infer(v0) => v0.debug(formatter, "Infer"), - crate::Type::Macro(v0) => v0.debug(formatter, "Macro"), - crate::Type::Never(v0) => v0.debug(formatter, "Never"), - crate::Type::Paren(v0) => v0.debug(formatter, "Paren"), - crate::Type::Path(v0) => v0.debug(formatter, "Path"), - crate::Type::Ptr(v0) => v0.debug(formatter, "Ptr"), - crate::Type::Reference(v0) => v0.debug(formatter, "Reference"), - crate::Type::Slice(v0) => v0.debug(formatter, "Slice"), - crate::Type::TraitObject(v0) => v0.debug(formatter, "TraitObject"), - crate::Type::Tuple(v0) => v0.debug(formatter, "Tuple"), - crate::Type::Verbatim(v0) => { - let mut formatter = formatter.debug_tuple("Verbatim"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypeArray { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TypeArray") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::TypeArray { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("bracket_token", &self.bracket_token); - formatter.field("elem", &self.elem); - formatter.field("semi_token", &self.semi_token); - formatter.field("len", &self.len); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypeBareFn { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TypeBareFn") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::TypeBareFn { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("lifetimes", &self.lifetimes); - formatter.field("unsafety", &self.unsafety); - formatter.field("abi", &self.abi); - formatter.field("fn_token", &self.fn_token); - formatter.field("paren_token", &self.paren_token); - formatter.field("inputs", &self.inputs); - formatter.field("variadic", &self.variadic); - formatter.field("output", &self.output); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypeGroup { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TypeGroup") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::TypeGroup { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("group_token", &self.group_token); - formatter.field("elem", &self.elem); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypeImplTrait { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TypeImplTrait") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::TypeImplTrait { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("impl_token", &self.impl_token); - formatter.field("bounds", &self.bounds); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypeInfer { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TypeInfer") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::TypeInfer { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("underscore_token", &self.underscore_token); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypeMacro { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TypeMacro") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::TypeMacro { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("mac", &self.mac); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypeNever { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TypeNever") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::TypeNever { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("bang_token", &self.bang_token); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypeParam { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypeParam"); - formatter.field("attrs", &self.attrs); - formatter.field("ident", &self.ident); - formatter.field("colon_token", &self.colon_token); - formatter.field("bounds", &self.bounds); - formatter.field("eq_token", &self.eq_token); - formatter.field("default", &self.default); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypeParamBound { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("TypeParamBound::")?; - match self { - crate::TypeParamBound::Trait(v0) => { - let mut formatter = formatter.debug_tuple("Trait"); - formatter.field(v0); - formatter.finish() - } - crate::TypeParamBound::Lifetime(v0) => v0.debug(formatter, "Lifetime"), - #[cfg(feature = "full")] - crate::TypeParamBound::PreciseCapture(v0) => { - let mut formatter = formatter.debug_tuple("PreciseCapture"); - formatter.field(v0); - formatter.finish() - } - crate::TypeParamBound::Verbatim(v0) => { - let mut formatter = formatter.debug_tuple("Verbatim"); - formatter.field(v0); - formatter.finish() - } - #[cfg(not(feature = "full"))] - _ => unreachable!(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypeParen { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TypeParen") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::TypeParen { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("paren_token", &self.paren_token); - formatter.field("elem", &self.elem); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypePath { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TypePath") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::TypePath { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("qself", &self.qself); - formatter.field("path", &self.path); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypePtr { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TypePtr") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::TypePtr { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("star_token", &self.star_token); - formatter.field("const_token", &self.const_token); - formatter.field("mutability", &self.mutability); - formatter.field("elem", &self.elem); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypeReference { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TypeReference") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::TypeReference { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("and_token", &self.and_token); - formatter.field("lifetime", &self.lifetime); - formatter.field("mutability", &self.mutability); - formatter.field("elem", &self.elem); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypeSlice { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TypeSlice") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::TypeSlice { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("bracket_token", &self.bracket_token); - formatter.field("elem", &self.elem); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypeTraitObject { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TypeTraitObject") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::TypeTraitObject { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("dyn_token", &self.dyn_token); - formatter.field("bounds", &self.bounds); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::TypeTuple { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "TypeTuple") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::TypeTuple { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("paren_token", &self.paren_token); - formatter.field("elems", &self.elems); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::UnOp { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("UnOp::")?; - match self { - crate::UnOp::Deref(v0) => { - let mut formatter = formatter.debug_tuple("Deref"); - formatter.field(v0); - formatter.finish() - } - crate::UnOp::Not(v0) => { - let mut formatter = formatter.debug_tuple("Not"); - formatter.field(v0); - formatter.finish() - } - crate::UnOp::Neg(v0) => { - let mut formatter = formatter.debug_tuple("Neg"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::UseGlob { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("UseGlob"); - formatter.field("star_token", &self.star_token); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::UseGroup { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("UseGroup"); - formatter.field("brace_token", &self.brace_token); - formatter.field("items", &self.items); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::UseName { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("UseName"); - formatter.field("ident", &self.ident); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::UsePath { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("UsePath"); - formatter.field("ident", &self.ident); - formatter.field("colon2_token", &self.colon2_token); - formatter.field("tree", &self.tree); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::UseRename { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("UseRename"); - formatter.field("ident", &self.ident); - formatter.field("as_token", &self.as_token); - formatter.field("rename", &self.rename); - formatter.finish() - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::UseTree { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("UseTree::")?; - match self { - crate::UseTree::Path(v0) => { - let mut formatter = formatter.debug_tuple("Path"); - formatter.field(v0); - formatter.finish() - } - crate::UseTree::Name(v0) => { - let mut formatter = formatter.debug_tuple("Name"); - formatter.field(v0); - formatter.finish() - } - crate::UseTree::Rename(v0) => { - let mut formatter = formatter.debug_tuple("Rename"); - formatter.field(v0); - formatter.finish() - } - crate::UseTree::Glob(v0) => { - let mut formatter = formatter.debug_tuple("Glob"); - formatter.field(v0); - formatter.finish() - } - crate::UseTree::Group(v0) => { - let mut formatter = formatter.debug_tuple("Group"); - formatter.field(v0); - formatter.finish() - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Variadic { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Variadic"); - formatter.field("attrs", &self.attrs); - formatter.field("pat", &self.pat); - formatter.field("dots", &self.dots); - formatter.field("comma", &self.comma); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Variant { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Variant"); - formatter.field("attrs", &self.attrs); - formatter.field("ident", &self.ident); - formatter.field("fields", &self.fields); - formatter.field("discriminant", &self.discriminant); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::VisRestricted { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "VisRestricted") - } -} -#[cfg(any(feature = "derive", feature = "full"))] -impl crate::VisRestricted { - fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - let mut formatter = formatter.debug_struct(name); - formatter.field("pub_token", &self.pub_token); - formatter.field("paren_token", &self.paren_token); - formatter.field("in_token", &self.in_token); - formatter.field("path", &self.path); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::Visibility { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Visibility::")?; - match self { - crate::Visibility::Public(v0) => { - let mut formatter = formatter.debug_tuple("Public"); - formatter.field(v0); - formatter.finish() - } - crate::Visibility::Restricted(v0) => v0.debug(formatter, "Restricted"), - crate::Visibility::Inherited => formatter.write_str("Inherited"), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::WhereClause { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("WhereClause"); - formatter.field("where_token", &self.where_token); - formatter.field("predicates", &self.predicates); - formatter.finish() - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for crate::WherePredicate { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("WherePredicate::")?; - match self { - crate::WherePredicate::Lifetime(v0) => { - let mut formatter = formatter.debug_tuple("Lifetime"); - formatter.field(v0); - formatter.finish() - } - crate::WherePredicate::Type(v0) => { - let mut formatter = formatter.debug_tuple("Type"); - formatter.field(v0); - formatter.finish() - } - } - } -} diff --git a/vendor/syn/src/gen/eq.rs b/vendor/syn/src/gen/eq.rs deleted file mode 100644 index 128e8991eeccfb..00000000000000 --- a/vendor/syn/src/gen/eq.rs +++ /dev/null @@ -1,2306 +0,0 @@ -// This file is @generated by syn-internal-codegen. -// It is not intended for manual editing. - -#[cfg(any(feature = "derive", feature = "full"))] -use crate::tt::TokenStreamHelper; -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Abi {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Abi { - fn eq(&self, other: &Self) -> bool { - self.name == other.name - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::AngleBracketedGenericArguments {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::AngleBracketedGenericArguments { - fn eq(&self, other: &Self) -> bool { - self.colon2_token == other.colon2_token && self.args == other.args - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Arm {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Arm { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.pat == other.pat && self.guard == other.guard - && self.body == other.body && self.comma == other.comma - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::AssocConst {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::AssocConst { - fn eq(&self, other: &Self) -> bool { - self.ident == other.ident && self.generics == other.generics - && self.value == other.value - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::AssocType {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::AssocType { - fn eq(&self, other: &Self) -> bool { - self.ident == other.ident && self.generics == other.generics - && self.ty == other.ty - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::AttrStyle {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::AttrStyle { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::AttrStyle::Outer, crate::AttrStyle::Outer) => true, - (crate::AttrStyle::Inner(_), crate::AttrStyle::Inner(_)) => true, - _ => false, - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Attribute {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Attribute { - fn eq(&self, other: &Self) -> bool { - self.style == other.style && self.meta == other.meta - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::BareFnArg {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::BareFnArg { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.name == other.name && self.ty == other.ty - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::BareVariadic {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::BareVariadic { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.name == other.name && self.comma == other.comma - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::BinOp {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::BinOp { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::BinOp::Add(_), crate::BinOp::Add(_)) => true, - (crate::BinOp::Sub(_), crate::BinOp::Sub(_)) => true, - (crate::BinOp::Mul(_), crate::BinOp::Mul(_)) => true, - (crate::BinOp::Div(_), crate::BinOp::Div(_)) => true, - (crate::BinOp::Rem(_), crate::BinOp::Rem(_)) => true, - (crate::BinOp::And(_), crate::BinOp::And(_)) => true, - (crate::BinOp::Or(_), crate::BinOp::Or(_)) => true, - (crate::BinOp::BitXor(_), crate::BinOp::BitXor(_)) => true, - (crate::BinOp::BitAnd(_), crate::BinOp::BitAnd(_)) => true, - (crate::BinOp::BitOr(_), crate::BinOp::BitOr(_)) => true, - (crate::BinOp::Shl(_), crate::BinOp::Shl(_)) => true, - (crate::BinOp::Shr(_), crate::BinOp::Shr(_)) => true, - (crate::BinOp::Eq(_), crate::BinOp::Eq(_)) => true, - (crate::BinOp::Lt(_), crate::BinOp::Lt(_)) => true, - (crate::BinOp::Le(_), crate::BinOp::Le(_)) => true, - (crate::BinOp::Ne(_), crate::BinOp::Ne(_)) => true, - (crate::BinOp::Ge(_), crate::BinOp::Ge(_)) => true, - (crate::BinOp::Gt(_), crate::BinOp::Gt(_)) => true, - (crate::BinOp::AddAssign(_), crate::BinOp::AddAssign(_)) => true, - (crate::BinOp::SubAssign(_), crate::BinOp::SubAssign(_)) => true, - (crate::BinOp::MulAssign(_), crate::BinOp::MulAssign(_)) => true, - (crate::BinOp::DivAssign(_), crate::BinOp::DivAssign(_)) => true, - (crate::BinOp::RemAssign(_), crate::BinOp::RemAssign(_)) => true, - (crate::BinOp::BitXorAssign(_), crate::BinOp::BitXorAssign(_)) => true, - (crate::BinOp::BitAndAssign(_), crate::BinOp::BitAndAssign(_)) => true, - (crate::BinOp::BitOrAssign(_), crate::BinOp::BitOrAssign(_)) => true, - (crate::BinOp::ShlAssign(_), crate::BinOp::ShlAssign(_)) => true, - (crate::BinOp::ShrAssign(_), crate::BinOp::ShrAssign(_)) => true, - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Block {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Block { - fn eq(&self, other: &Self) -> bool { - self.stmts == other.stmts - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::BoundLifetimes {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::BoundLifetimes { - fn eq(&self, other: &Self) -> bool { - self.lifetimes == other.lifetimes - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::CapturedParam {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::CapturedParam { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - ( - crate::CapturedParam::Lifetime(self0), - crate::CapturedParam::Lifetime(other0), - ) => self0 == other0, - (crate::CapturedParam::Ident(self0), crate::CapturedParam::Ident(other0)) => { - self0 == other0 - } - _ => false, - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ConstParam {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ConstParam { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.ident == other.ident && self.ty == other.ty - && self.eq_token == other.eq_token && self.default == other.default - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Constraint {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Constraint { - fn eq(&self, other: &Self) -> bool { - self.ident == other.ident && self.generics == other.generics - && self.bounds == other.bounds - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Data {} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Data { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::Data::Struct(self0), crate::Data::Struct(other0)) => self0 == other0, - (crate::Data::Enum(self0), crate::Data::Enum(other0)) => self0 == other0, - (crate::Data::Union(self0), crate::Data::Union(other0)) => self0 == other0, - _ => false, - } - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::DataEnum {} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::DataEnum { - fn eq(&self, other: &Self) -> bool { - self.variants == other.variants - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::DataStruct {} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::DataStruct { - fn eq(&self, other: &Self) -> bool { - self.fields == other.fields && self.semi_token == other.semi_token - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::DataUnion {} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::DataUnion { - fn eq(&self, other: &Self) -> bool { - self.fields == other.fields - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::DeriveInput {} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::DeriveInput { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident - && self.generics == other.generics && self.data == other.data - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Expr {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Expr { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - #[cfg(feature = "full")] - (crate::Expr::Array(self0), crate::Expr::Array(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Assign(self0), crate::Expr::Assign(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Async(self0), crate::Expr::Async(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Await(self0), crate::Expr::Await(other0)) => self0 == other0, - (crate::Expr::Binary(self0), crate::Expr::Binary(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Block(self0), crate::Expr::Block(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Break(self0), crate::Expr::Break(other0)) => self0 == other0, - (crate::Expr::Call(self0), crate::Expr::Call(other0)) => self0 == other0, - (crate::Expr::Cast(self0), crate::Expr::Cast(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Closure(self0), crate::Expr::Closure(other0)) => { - self0 == other0 - } - #[cfg(feature = "full")] - (crate::Expr::Const(self0), crate::Expr::Const(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Continue(self0), crate::Expr::Continue(other0)) => { - self0 == other0 - } - (crate::Expr::Field(self0), crate::Expr::Field(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::ForLoop(self0), crate::Expr::ForLoop(other0)) => { - self0 == other0 - } - (crate::Expr::Group(self0), crate::Expr::Group(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::If(self0), crate::Expr::If(other0)) => self0 == other0, - (crate::Expr::Index(self0), crate::Expr::Index(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Infer(self0), crate::Expr::Infer(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Let(self0), crate::Expr::Let(other0)) => self0 == other0, - (crate::Expr::Lit(self0), crate::Expr::Lit(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Loop(self0), crate::Expr::Loop(other0)) => self0 == other0, - (crate::Expr::Macro(self0), crate::Expr::Macro(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Match(self0), crate::Expr::Match(other0)) => self0 == other0, - (crate::Expr::MethodCall(self0), crate::Expr::MethodCall(other0)) => { - self0 == other0 - } - (crate::Expr::Paren(self0), crate::Expr::Paren(other0)) => self0 == other0, - (crate::Expr::Path(self0), crate::Expr::Path(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Range(self0), crate::Expr::Range(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::RawAddr(self0), crate::Expr::RawAddr(other0)) => { - self0 == other0 - } - (crate::Expr::Reference(self0), crate::Expr::Reference(other0)) => { - self0 == other0 - } - #[cfg(feature = "full")] - (crate::Expr::Repeat(self0), crate::Expr::Repeat(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Return(self0), crate::Expr::Return(other0)) => self0 == other0, - (crate::Expr::Struct(self0), crate::Expr::Struct(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Try(self0), crate::Expr::Try(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::TryBlock(self0), crate::Expr::TryBlock(other0)) => { - self0 == other0 - } - (crate::Expr::Tuple(self0), crate::Expr::Tuple(other0)) => self0 == other0, - (crate::Expr::Unary(self0), crate::Expr::Unary(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Unsafe(self0), crate::Expr::Unsafe(other0)) => self0 == other0, - (crate::Expr::Verbatim(self0), crate::Expr::Verbatim(other0)) => { - TokenStreamHelper(self0) == TokenStreamHelper(other0) - } - #[cfg(feature = "full")] - (crate::Expr::While(self0), crate::Expr::While(other0)) => self0 == other0, - #[cfg(feature = "full")] - (crate::Expr::Yield(self0), crate::Expr::Yield(other0)) => self0 == other0, - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprArray {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprArray { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.elems == other.elems - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprAssign {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprAssign { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.left == other.left && self.right == other.right - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprAsync {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprAsync { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.capture == other.capture - && self.block == other.block - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprAwait {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprAwait { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.base == other.base - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprBinary {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprBinary { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.left == other.left && self.op == other.op - && self.right == other.right - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprBlock {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprBlock { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.label == other.label - && self.block == other.block - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprBreak {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprBreak { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.label == other.label && self.expr == other.expr - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprCall {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprCall { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.func == other.func && self.args == other.args - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprCast {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprCast { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.expr == other.expr && self.ty == other.ty - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprClosure {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprClosure { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.lifetimes == other.lifetimes - && self.constness == other.constness && self.movability == other.movability - && self.asyncness == other.asyncness && self.capture == other.capture - && self.inputs == other.inputs && self.output == other.output - && self.body == other.body - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprConst {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprConst { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.block == other.block - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprContinue {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprContinue { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.label == other.label - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprField {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprField { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.base == other.base - && self.member == other.member - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprForLoop {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprForLoop { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.label == other.label && self.pat == other.pat - && self.expr == other.expr && self.body == other.body - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprGroup {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprGroup { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.expr == other.expr - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprIf {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprIf { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.cond == other.cond - && self.then_branch == other.then_branch - && self.else_branch == other.else_branch - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprIndex {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprIndex { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.expr == other.expr && self.index == other.index - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprInfer {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprInfer { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprLet {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprLet { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.pat == other.pat && self.expr == other.expr - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprLit {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprLit { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.lit == other.lit - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprLoop {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprLoop { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.label == other.label && self.body == other.body - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprMacro {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprMacro { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.mac == other.mac - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprMatch {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprMatch { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.expr == other.expr && self.arms == other.arms - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprMethodCall {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprMethodCall { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.receiver == other.receiver - && self.method == other.method && self.turbofish == other.turbofish - && self.args == other.args - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprParen {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprParen { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.expr == other.expr - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprPath {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprPath { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.qself == other.qself && self.path == other.path - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprRange {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprRange { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.start == other.start - && self.limits == other.limits && self.end == other.end - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprRawAddr {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprRawAddr { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.mutability == other.mutability - && self.expr == other.expr - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprReference {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprReference { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.mutability == other.mutability - && self.expr == other.expr - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprRepeat {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprRepeat { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.expr == other.expr && self.len == other.len - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprReturn {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprReturn { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.expr == other.expr - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprStruct {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprStruct { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.qself == other.qself && self.path == other.path - && self.fields == other.fields && self.dot2_token == other.dot2_token - && self.rest == other.rest - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprTry {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprTry { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.expr == other.expr - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprTryBlock {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprTryBlock { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.block == other.block - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprTuple {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprTuple { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.elems == other.elems - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprUnary {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprUnary { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.op == other.op && self.expr == other.expr - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprUnsafe {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprUnsafe { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.block == other.block - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprWhile {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprWhile { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.label == other.label && self.cond == other.cond - && self.body == other.body - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ExprYield {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ExprYield { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.expr == other.expr - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Field {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Field { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis - && self.mutability == other.mutability && self.ident == other.ident - && self.colon_token == other.colon_token && self.ty == other.ty - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::FieldMutability {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::FieldMutability { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::FieldMutability::None, crate::FieldMutability::None) => true, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::FieldPat {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::FieldPat { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.member == other.member - && self.colon_token == other.colon_token && self.pat == other.pat - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::FieldValue {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::FieldValue { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.member == other.member - && self.colon_token == other.colon_token && self.expr == other.expr - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Fields {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Fields { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::Fields::Named(self0), crate::Fields::Named(other0)) => { - self0 == other0 - } - (crate::Fields::Unnamed(self0), crate::Fields::Unnamed(other0)) => { - self0 == other0 - } - (crate::Fields::Unit, crate::Fields::Unit) => true, - _ => false, - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::FieldsNamed {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::FieldsNamed { - fn eq(&self, other: &Self) -> bool { - self.named == other.named - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::FieldsUnnamed {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::FieldsUnnamed { - fn eq(&self, other: &Self) -> bool { - self.unnamed == other.unnamed - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::File {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::File { - fn eq(&self, other: &Self) -> bool { - self.shebang == other.shebang && self.attrs == other.attrs - && self.items == other.items - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::FnArg {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::FnArg { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::FnArg::Receiver(self0), crate::FnArg::Receiver(other0)) => { - self0 == other0 - } - (crate::FnArg::Typed(self0), crate::FnArg::Typed(other0)) => self0 == other0, - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ForeignItem {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ForeignItem { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::ForeignItem::Fn(self0), crate::ForeignItem::Fn(other0)) => { - self0 == other0 - } - (crate::ForeignItem::Static(self0), crate::ForeignItem::Static(other0)) => { - self0 == other0 - } - (crate::ForeignItem::Type(self0), crate::ForeignItem::Type(other0)) => { - self0 == other0 - } - (crate::ForeignItem::Macro(self0), crate::ForeignItem::Macro(other0)) => { - self0 == other0 - } - ( - crate::ForeignItem::Verbatim(self0), - crate::ForeignItem::Verbatim(other0), - ) => TokenStreamHelper(self0) == TokenStreamHelper(other0), - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ForeignItemFn {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ForeignItemFn { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis && self.sig == other.sig - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ForeignItemMacro {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ForeignItemMacro { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.mac == other.mac - && self.semi_token == other.semi_token - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ForeignItemStatic {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ForeignItemStatic { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis - && self.mutability == other.mutability && self.ident == other.ident - && self.ty == other.ty - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ForeignItemType {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ForeignItemType { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident - && self.generics == other.generics - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::GenericArgument {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::GenericArgument { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - ( - crate::GenericArgument::Lifetime(self0), - crate::GenericArgument::Lifetime(other0), - ) => self0 == other0, - ( - crate::GenericArgument::Type(self0), - crate::GenericArgument::Type(other0), - ) => self0 == other0, - ( - crate::GenericArgument::Const(self0), - crate::GenericArgument::Const(other0), - ) => self0 == other0, - ( - crate::GenericArgument::AssocType(self0), - crate::GenericArgument::AssocType(other0), - ) => self0 == other0, - ( - crate::GenericArgument::AssocConst(self0), - crate::GenericArgument::AssocConst(other0), - ) => self0 == other0, - ( - crate::GenericArgument::Constraint(self0), - crate::GenericArgument::Constraint(other0), - ) => self0 == other0, - _ => false, - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::GenericParam {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::GenericParam { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - ( - crate::GenericParam::Lifetime(self0), - crate::GenericParam::Lifetime(other0), - ) => self0 == other0, - (crate::GenericParam::Type(self0), crate::GenericParam::Type(other0)) => { - self0 == other0 - } - (crate::GenericParam::Const(self0), crate::GenericParam::Const(other0)) => { - self0 == other0 - } - _ => false, - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Generics {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Generics { - fn eq(&self, other: &Self) -> bool { - self.lt_token == other.lt_token && self.params == other.params - && self.gt_token == other.gt_token && self.where_clause == other.where_clause - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ImplItem {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ImplItem { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::ImplItem::Const(self0), crate::ImplItem::Const(other0)) => { - self0 == other0 - } - (crate::ImplItem::Fn(self0), crate::ImplItem::Fn(other0)) => self0 == other0, - (crate::ImplItem::Type(self0), crate::ImplItem::Type(other0)) => { - self0 == other0 - } - (crate::ImplItem::Macro(self0), crate::ImplItem::Macro(other0)) => { - self0 == other0 - } - (crate::ImplItem::Verbatim(self0), crate::ImplItem::Verbatim(other0)) => { - TokenStreamHelper(self0) == TokenStreamHelper(other0) - } - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ImplItemConst {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ImplItemConst { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis - && self.defaultness == other.defaultness && self.ident == other.ident - && self.generics == other.generics && self.ty == other.ty - && self.expr == other.expr - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ImplItemFn {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ImplItemFn { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis - && self.defaultness == other.defaultness && self.sig == other.sig - && self.block == other.block - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ImplItemMacro {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ImplItemMacro { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.mac == other.mac - && self.semi_token == other.semi_token - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ImplItemType {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ImplItemType { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis - && self.defaultness == other.defaultness && self.ident == other.ident - && self.generics == other.generics && self.ty == other.ty - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ImplRestriction {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ImplRestriction { - fn eq(&self, _other: &Self) -> bool { - match *self {} - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Item {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Item { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::Item::Const(self0), crate::Item::Const(other0)) => self0 == other0, - (crate::Item::Enum(self0), crate::Item::Enum(other0)) => self0 == other0, - (crate::Item::ExternCrate(self0), crate::Item::ExternCrate(other0)) => { - self0 == other0 - } - (crate::Item::Fn(self0), crate::Item::Fn(other0)) => self0 == other0, - (crate::Item::ForeignMod(self0), crate::Item::ForeignMod(other0)) => { - self0 == other0 - } - (crate::Item::Impl(self0), crate::Item::Impl(other0)) => self0 == other0, - (crate::Item::Macro(self0), crate::Item::Macro(other0)) => self0 == other0, - (crate::Item::Mod(self0), crate::Item::Mod(other0)) => self0 == other0, - (crate::Item::Static(self0), crate::Item::Static(other0)) => self0 == other0, - (crate::Item::Struct(self0), crate::Item::Struct(other0)) => self0 == other0, - (crate::Item::Trait(self0), crate::Item::Trait(other0)) => self0 == other0, - (crate::Item::TraitAlias(self0), crate::Item::TraitAlias(other0)) => { - self0 == other0 - } - (crate::Item::Type(self0), crate::Item::Type(other0)) => self0 == other0, - (crate::Item::Union(self0), crate::Item::Union(other0)) => self0 == other0, - (crate::Item::Use(self0), crate::Item::Use(other0)) => self0 == other0, - (crate::Item::Verbatim(self0), crate::Item::Verbatim(other0)) => { - TokenStreamHelper(self0) == TokenStreamHelper(other0) - } - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ItemConst {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ItemConst { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident - && self.generics == other.generics && self.ty == other.ty - && self.expr == other.expr - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ItemEnum {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ItemEnum { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident - && self.generics == other.generics && self.variants == other.variants - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ItemExternCrate {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ItemExternCrate { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident - && self.rename == other.rename - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ItemFn {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ItemFn { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis && self.sig == other.sig - && self.block == other.block - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ItemForeignMod {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ItemForeignMod { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.unsafety == other.unsafety - && self.abi == other.abi && self.items == other.items - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ItemImpl {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ItemImpl { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.defaultness == other.defaultness - && self.unsafety == other.unsafety && self.generics == other.generics - && self.trait_ == other.trait_ && self.self_ty == other.self_ty - && self.items == other.items - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ItemMacro {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ItemMacro { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.ident == other.ident && self.mac == other.mac - && self.semi_token == other.semi_token - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ItemMod {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ItemMod { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis - && self.unsafety == other.unsafety && self.ident == other.ident - && self.content == other.content && self.semi == other.semi - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ItemStatic {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ItemStatic { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis - && self.mutability == other.mutability && self.ident == other.ident - && self.ty == other.ty && self.expr == other.expr - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ItemStruct {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ItemStruct { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident - && self.generics == other.generics && self.fields == other.fields - && self.semi_token == other.semi_token - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ItemTrait {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ItemTrait { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis - && self.unsafety == other.unsafety && self.auto_token == other.auto_token - && self.restriction == other.restriction && self.ident == other.ident - && self.generics == other.generics && self.colon_token == other.colon_token - && self.supertraits == other.supertraits && self.items == other.items - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ItemTraitAlias {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ItemTraitAlias { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident - && self.generics == other.generics && self.bounds == other.bounds - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ItemType {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ItemType { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident - && self.generics == other.generics && self.ty == other.ty - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ItemUnion {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ItemUnion { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis && self.ident == other.ident - && self.generics == other.generics && self.fields == other.fields - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ItemUse {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ItemUse { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.vis == other.vis - && self.leading_colon == other.leading_colon && self.tree == other.tree - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Label {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Label { - fn eq(&self, other: &Self) -> bool { - self.name == other.name - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::LifetimeParam {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::LifetimeParam { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.lifetime == other.lifetime - && self.colon_token == other.colon_token && self.bounds == other.bounds - } -} -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Lit {} -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Lit { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::Lit::Str(self0), crate::Lit::Str(other0)) => self0 == other0, - (crate::Lit::ByteStr(self0), crate::Lit::ByteStr(other0)) => self0 == other0, - (crate::Lit::CStr(self0), crate::Lit::CStr(other0)) => self0 == other0, - (crate::Lit::Byte(self0), crate::Lit::Byte(other0)) => self0 == other0, - (crate::Lit::Char(self0), crate::Lit::Char(other0)) => self0 == other0, - (crate::Lit::Int(self0), crate::Lit::Int(other0)) => self0 == other0, - (crate::Lit::Float(self0), crate::Lit::Float(other0)) => self0 == other0, - (crate::Lit::Bool(self0), crate::Lit::Bool(other0)) => self0 == other0, - (crate::Lit::Verbatim(self0), crate::Lit::Verbatim(other0)) => { - self0.to_string() == other0.to_string() - } - _ => false, - } - } -} -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::LitBool {} -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::LitBool { - fn eq(&self, other: &Self) -> bool { - self.value == other.value - } -} -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::LitByte {} -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::LitByteStr {} -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::LitCStr {} -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::LitChar {} -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::LitFloat {} -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::LitInt {} -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::LitStr {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Local {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Local { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.pat == other.pat && self.init == other.init - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::LocalInit {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::LocalInit { - fn eq(&self, other: &Self) -> bool { - self.expr == other.expr && self.diverge == other.diverge - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Macro {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Macro { - fn eq(&self, other: &Self) -> bool { - self.path == other.path && self.delimiter == other.delimiter - && TokenStreamHelper(&self.tokens) == TokenStreamHelper(&other.tokens) - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::MacroDelimiter {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::MacroDelimiter { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::MacroDelimiter::Paren(_), crate::MacroDelimiter::Paren(_)) => true, - (crate::MacroDelimiter::Brace(_), crate::MacroDelimiter::Brace(_)) => true, - (crate::MacroDelimiter::Bracket(_), crate::MacroDelimiter::Bracket(_)) => { - true - } - _ => false, - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Meta {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Meta { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::Meta::Path(self0), crate::Meta::Path(other0)) => self0 == other0, - (crate::Meta::List(self0), crate::Meta::List(other0)) => self0 == other0, - (crate::Meta::NameValue(self0), crate::Meta::NameValue(other0)) => { - self0 == other0 - } - _ => false, - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::MetaList {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::MetaList { - fn eq(&self, other: &Self) -> bool { - self.path == other.path && self.delimiter == other.delimiter - && TokenStreamHelper(&self.tokens) == TokenStreamHelper(&other.tokens) - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::MetaNameValue {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::MetaNameValue { - fn eq(&self, other: &Self) -> bool { - self.path == other.path && self.value == other.value - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ParenthesizedGenericArguments {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ParenthesizedGenericArguments { - fn eq(&self, other: &Self) -> bool { - self.inputs == other.inputs && self.output == other.output - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Pat {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Pat { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::Pat::Const(self0), crate::Pat::Const(other0)) => self0 == other0, - (crate::Pat::Ident(self0), crate::Pat::Ident(other0)) => self0 == other0, - (crate::Pat::Lit(self0), crate::Pat::Lit(other0)) => self0 == other0, - (crate::Pat::Macro(self0), crate::Pat::Macro(other0)) => self0 == other0, - (crate::Pat::Or(self0), crate::Pat::Or(other0)) => self0 == other0, - (crate::Pat::Paren(self0), crate::Pat::Paren(other0)) => self0 == other0, - (crate::Pat::Path(self0), crate::Pat::Path(other0)) => self0 == other0, - (crate::Pat::Range(self0), crate::Pat::Range(other0)) => self0 == other0, - (crate::Pat::Reference(self0), crate::Pat::Reference(other0)) => { - self0 == other0 - } - (crate::Pat::Rest(self0), crate::Pat::Rest(other0)) => self0 == other0, - (crate::Pat::Slice(self0), crate::Pat::Slice(other0)) => self0 == other0, - (crate::Pat::Struct(self0), crate::Pat::Struct(other0)) => self0 == other0, - (crate::Pat::Tuple(self0), crate::Pat::Tuple(other0)) => self0 == other0, - (crate::Pat::TupleStruct(self0), crate::Pat::TupleStruct(other0)) => { - self0 == other0 - } - (crate::Pat::Type(self0), crate::Pat::Type(other0)) => self0 == other0, - (crate::Pat::Verbatim(self0), crate::Pat::Verbatim(other0)) => { - TokenStreamHelper(self0) == TokenStreamHelper(other0) - } - (crate::Pat::Wild(self0), crate::Pat::Wild(other0)) => self0 == other0, - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PatIdent {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PatIdent { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.by_ref == other.by_ref - && self.mutability == other.mutability && self.ident == other.ident - && self.subpat == other.subpat - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PatOr {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PatOr { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.leading_vert == other.leading_vert - && self.cases == other.cases - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PatParen {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PatParen { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.pat == other.pat - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PatReference {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PatReference { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.mutability == other.mutability - && self.pat == other.pat - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PatRest {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PatRest { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PatSlice {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PatSlice { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.elems == other.elems - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PatStruct {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PatStruct { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.qself == other.qself && self.path == other.path - && self.fields == other.fields && self.rest == other.rest - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PatTuple {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PatTuple { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.elems == other.elems - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PatTupleStruct {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PatTupleStruct { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.qself == other.qself && self.path == other.path - && self.elems == other.elems - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PatType {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PatType { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.pat == other.pat && self.ty == other.ty - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PatWild {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PatWild { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Path {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Path { - fn eq(&self, other: &Self) -> bool { - self.leading_colon == other.leading_colon && self.segments == other.segments - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PathArguments {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PathArguments { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::PathArguments::None, crate::PathArguments::None) => true, - ( - crate::PathArguments::AngleBracketed(self0), - crate::PathArguments::AngleBracketed(other0), - ) => self0 == other0, - ( - crate::PathArguments::Parenthesized(self0), - crate::PathArguments::Parenthesized(other0), - ) => self0 == other0, - _ => false, - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PathSegment {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PathSegment { - fn eq(&self, other: &Self) -> bool { - self.ident == other.ident && self.arguments == other.arguments - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PointerMutability {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PointerMutability { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::PointerMutability::Const(_), crate::PointerMutability::Const(_)) => { - true - } - (crate::PointerMutability::Mut(_), crate::PointerMutability::Mut(_)) => true, - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PreciseCapture {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PreciseCapture { - fn eq(&self, other: &Self) -> bool { - self.params == other.params - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PredicateLifetime {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PredicateLifetime { - fn eq(&self, other: &Self) -> bool { - self.lifetime == other.lifetime && self.bounds == other.bounds - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::PredicateType {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::PredicateType { - fn eq(&self, other: &Self) -> bool { - self.lifetimes == other.lifetimes && self.bounded_ty == other.bounded_ty - && self.bounds == other.bounds - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::QSelf {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::QSelf { - fn eq(&self, other: &Self) -> bool { - self.ty == other.ty && self.position == other.position - && self.as_token == other.as_token - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::RangeLimits {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::RangeLimits { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::RangeLimits::HalfOpen(_), crate::RangeLimits::HalfOpen(_)) => true, - (crate::RangeLimits::Closed(_), crate::RangeLimits::Closed(_)) => true, - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Receiver {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Receiver { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.reference == other.reference - && self.mutability == other.mutability - && self.colon_token == other.colon_token && self.ty == other.ty - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::ReturnType {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::ReturnType { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::ReturnType::Default, crate::ReturnType::Default) => true, - (crate::ReturnType::Type(_, self1), crate::ReturnType::Type(_, other1)) => { - self1 == other1 - } - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Signature {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Signature { - fn eq(&self, other: &Self) -> bool { - self.constness == other.constness && self.asyncness == other.asyncness - && self.unsafety == other.unsafety && self.abi == other.abi - && self.ident == other.ident && self.generics == other.generics - && self.inputs == other.inputs && self.variadic == other.variadic - && self.output == other.output - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::StaticMutability {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::StaticMutability { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::StaticMutability::Mut(_), crate::StaticMutability::Mut(_)) => true, - (crate::StaticMutability::None, crate::StaticMutability::None) => true, - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Stmt {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Stmt { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::Stmt::Local(self0), crate::Stmt::Local(other0)) => self0 == other0, - (crate::Stmt::Item(self0), crate::Stmt::Item(other0)) => self0 == other0, - (crate::Stmt::Expr(self0, self1), crate::Stmt::Expr(other0, other1)) => { - self0 == other0 && self1 == other1 - } - (crate::Stmt::Macro(self0), crate::Stmt::Macro(other0)) => self0 == other0, - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::StmtMacro {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::StmtMacro { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.mac == other.mac - && self.semi_token == other.semi_token - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TraitBound {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TraitBound { - fn eq(&self, other: &Self) -> bool { - self.paren_token == other.paren_token && self.modifier == other.modifier - && self.lifetimes == other.lifetimes && self.path == other.path - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TraitBoundModifier {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TraitBoundModifier { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::TraitBoundModifier::None, crate::TraitBoundModifier::None) => true, - ( - crate::TraitBoundModifier::Maybe(_), - crate::TraitBoundModifier::Maybe(_), - ) => true, - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TraitItem {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TraitItem { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::TraitItem::Const(self0), crate::TraitItem::Const(other0)) => { - self0 == other0 - } - (crate::TraitItem::Fn(self0), crate::TraitItem::Fn(other0)) => { - self0 == other0 - } - (crate::TraitItem::Type(self0), crate::TraitItem::Type(other0)) => { - self0 == other0 - } - (crate::TraitItem::Macro(self0), crate::TraitItem::Macro(other0)) => { - self0 == other0 - } - (crate::TraitItem::Verbatim(self0), crate::TraitItem::Verbatim(other0)) => { - TokenStreamHelper(self0) == TokenStreamHelper(other0) - } - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TraitItemConst {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TraitItemConst { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.ident == other.ident - && self.generics == other.generics && self.ty == other.ty - && self.default == other.default - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TraitItemFn {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TraitItemFn { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.sig == other.sig - && self.default == other.default && self.semi_token == other.semi_token - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TraitItemMacro {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TraitItemMacro { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.mac == other.mac - && self.semi_token == other.semi_token - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TraitItemType {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TraitItemType { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.ident == other.ident - && self.generics == other.generics && self.colon_token == other.colon_token - && self.bounds == other.bounds && self.default == other.default - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Type {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Type { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::Type::Array(self0), crate::Type::Array(other0)) => self0 == other0, - (crate::Type::BareFn(self0), crate::Type::BareFn(other0)) => self0 == other0, - (crate::Type::Group(self0), crate::Type::Group(other0)) => self0 == other0, - (crate::Type::ImplTrait(self0), crate::Type::ImplTrait(other0)) => { - self0 == other0 - } - (crate::Type::Infer(self0), crate::Type::Infer(other0)) => self0 == other0, - (crate::Type::Macro(self0), crate::Type::Macro(other0)) => self0 == other0, - (crate::Type::Never(self0), crate::Type::Never(other0)) => self0 == other0, - (crate::Type::Paren(self0), crate::Type::Paren(other0)) => self0 == other0, - (crate::Type::Path(self0), crate::Type::Path(other0)) => self0 == other0, - (crate::Type::Ptr(self0), crate::Type::Ptr(other0)) => self0 == other0, - (crate::Type::Reference(self0), crate::Type::Reference(other0)) => { - self0 == other0 - } - (crate::Type::Slice(self0), crate::Type::Slice(other0)) => self0 == other0, - (crate::Type::TraitObject(self0), crate::Type::TraitObject(other0)) => { - self0 == other0 - } - (crate::Type::Tuple(self0), crate::Type::Tuple(other0)) => self0 == other0, - (crate::Type::Verbatim(self0), crate::Type::Verbatim(other0)) => { - TokenStreamHelper(self0) == TokenStreamHelper(other0) - } - _ => false, - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypeArray {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypeArray { - fn eq(&self, other: &Self) -> bool { - self.elem == other.elem && self.len == other.len - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypeBareFn {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypeBareFn { - fn eq(&self, other: &Self) -> bool { - self.lifetimes == other.lifetimes && self.unsafety == other.unsafety - && self.abi == other.abi && self.inputs == other.inputs - && self.variadic == other.variadic && self.output == other.output - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypeGroup {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypeGroup { - fn eq(&self, other: &Self) -> bool { - self.elem == other.elem - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypeImplTrait {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypeImplTrait { - fn eq(&self, other: &Self) -> bool { - self.bounds == other.bounds - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypeInfer {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypeInfer { - fn eq(&self, _other: &Self) -> bool { - true - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypeMacro {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypeMacro { - fn eq(&self, other: &Self) -> bool { - self.mac == other.mac - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypeNever {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypeNever { - fn eq(&self, _other: &Self) -> bool { - true - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypeParam {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypeParam { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.ident == other.ident - && self.colon_token == other.colon_token && self.bounds == other.bounds - && self.eq_token == other.eq_token && self.default == other.default - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypeParamBound {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypeParamBound { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - ( - crate::TypeParamBound::Trait(self0), - crate::TypeParamBound::Trait(other0), - ) => self0 == other0, - ( - crate::TypeParamBound::Lifetime(self0), - crate::TypeParamBound::Lifetime(other0), - ) => self0 == other0, - #[cfg(feature = "full")] - ( - crate::TypeParamBound::PreciseCapture(self0), - crate::TypeParamBound::PreciseCapture(other0), - ) => self0 == other0, - ( - crate::TypeParamBound::Verbatim(self0), - crate::TypeParamBound::Verbatim(other0), - ) => TokenStreamHelper(self0) == TokenStreamHelper(other0), - _ => false, - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypeParen {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypeParen { - fn eq(&self, other: &Self) -> bool { - self.elem == other.elem - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypePath {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypePath { - fn eq(&self, other: &Self) -> bool { - self.qself == other.qself && self.path == other.path - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypePtr {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypePtr { - fn eq(&self, other: &Self) -> bool { - self.const_token == other.const_token && self.mutability == other.mutability - && self.elem == other.elem - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypeReference {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypeReference { - fn eq(&self, other: &Self) -> bool { - self.lifetime == other.lifetime && self.mutability == other.mutability - && self.elem == other.elem - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypeSlice {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypeSlice { - fn eq(&self, other: &Self) -> bool { - self.elem == other.elem - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypeTraitObject {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypeTraitObject { - fn eq(&self, other: &Self) -> bool { - self.dyn_token == other.dyn_token && self.bounds == other.bounds - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::TypeTuple {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::TypeTuple { - fn eq(&self, other: &Self) -> bool { - self.elems == other.elems - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::UnOp {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::UnOp { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::UnOp::Deref(_), crate::UnOp::Deref(_)) => true, - (crate::UnOp::Not(_), crate::UnOp::Not(_)) => true, - (crate::UnOp::Neg(_), crate::UnOp::Neg(_)) => true, - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::UseGlob {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::UseGlob { - fn eq(&self, _other: &Self) -> bool { - true - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::UseGroup {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::UseGroup { - fn eq(&self, other: &Self) -> bool { - self.items == other.items - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::UseName {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::UseName { - fn eq(&self, other: &Self) -> bool { - self.ident == other.ident - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::UsePath {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::UsePath { - fn eq(&self, other: &Self) -> bool { - self.ident == other.ident && self.tree == other.tree - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::UseRename {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::UseRename { - fn eq(&self, other: &Self) -> bool { - self.ident == other.ident && self.rename == other.rename - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::UseTree {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::UseTree { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::UseTree::Path(self0), crate::UseTree::Path(other0)) => { - self0 == other0 - } - (crate::UseTree::Name(self0), crate::UseTree::Name(other0)) => { - self0 == other0 - } - (crate::UseTree::Rename(self0), crate::UseTree::Rename(other0)) => { - self0 == other0 - } - (crate::UseTree::Glob(self0), crate::UseTree::Glob(other0)) => { - self0 == other0 - } - (crate::UseTree::Group(self0), crate::UseTree::Group(other0)) => { - self0 == other0 - } - _ => false, - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Variadic {} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Variadic { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.pat == other.pat && self.comma == other.comma - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Variant {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Variant { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs && self.ident == other.ident - && self.fields == other.fields && self.discriminant == other.discriminant - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::VisRestricted {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::VisRestricted { - fn eq(&self, other: &Self) -> bool { - self.in_token == other.in_token && self.path == other.path - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::Visibility {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::Visibility { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (crate::Visibility::Public(_), crate::Visibility::Public(_)) => true, - ( - crate::Visibility::Restricted(self0), - crate::Visibility::Restricted(other0), - ) => self0 == other0, - (crate::Visibility::Inherited, crate::Visibility::Inherited) => true, - _ => false, - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::WhereClause {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::WhereClause { - fn eq(&self, other: &Self) -> bool { - self.predicates == other.predicates - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for crate::WherePredicate {} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for crate::WherePredicate { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - ( - crate::WherePredicate::Lifetime(self0), - crate::WherePredicate::Lifetime(other0), - ) => self0 == other0, - (crate::WherePredicate::Type(self0), crate::WherePredicate::Type(other0)) => { - self0 == other0 - } - _ => false, - } - } -} diff --git a/vendor/syn/src/gen/fold.rs b/vendor/syn/src/gen/fold.rs deleted file mode 100644 index 1f0afd31919d9c..00000000000000 --- a/vendor/syn/src/gen/fold.rs +++ /dev/null @@ -1,3902 +0,0 @@ -// This file is @generated by syn-internal-codegen. -// It is not intended for manual editing. - -#![allow(unreachable_code, unused_variables)] -#![allow( - clippy::match_wildcard_for_single_variants, - clippy::needless_match, - clippy::needless_pass_by_ref_mut, -)] -#[cfg(feature = "full")] -macro_rules! full { - ($e:expr) => { - $e - }; -} -#[cfg(all(feature = "derive", not(feature = "full")))] -macro_rules! full { - ($e:expr) => { - unreachable!() - }; -} -/// Syntax tree traversal to transform the nodes of an owned syntax tree. -/// -/// See the [module documentation] for details. -/// -/// [module documentation]: self -pub trait Fold { - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_abi(&mut self, i: crate::Abi) -> crate::Abi { - fold_abi(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_angle_bracketed_generic_arguments( - &mut self, - i: crate::AngleBracketedGenericArguments, - ) -> crate::AngleBracketedGenericArguments { - fold_angle_bracketed_generic_arguments(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_arm(&mut self, i: crate::Arm) -> crate::Arm { - fold_arm(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_assoc_const(&mut self, i: crate::AssocConst) -> crate::AssocConst { - fold_assoc_const(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_assoc_type(&mut self, i: crate::AssocType) -> crate::AssocType { - fold_assoc_type(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_attr_style(&mut self, i: crate::AttrStyle) -> crate::AttrStyle { - fold_attr_style(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_attribute(&mut self, i: crate::Attribute) -> crate::Attribute { - fold_attribute(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_attributes(&mut self, i: Vec<crate::Attribute>) -> Vec<crate::Attribute> { - fold_vec(i, self, Self::fold_attribute) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_bare_fn_arg(&mut self, i: crate::BareFnArg) -> crate::BareFnArg { - fold_bare_fn_arg(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_bare_variadic(&mut self, i: crate::BareVariadic) -> crate::BareVariadic { - fold_bare_variadic(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_bin_op(&mut self, i: crate::BinOp) -> crate::BinOp { - fold_bin_op(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_block(&mut self, i: crate::Block) -> crate::Block { - fold_block(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_bound_lifetimes( - &mut self, - i: crate::BoundLifetimes, - ) -> crate::BoundLifetimes { - fold_bound_lifetimes(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_captured_param(&mut self, i: crate::CapturedParam) -> crate::CapturedParam { - fold_captured_param(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_const_param(&mut self, i: crate::ConstParam) -> crate::ConstParam { - fold_const_param(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_constraint(&mut self, i: crate::Constraint) -> crate::Constraint { - fold_constraint(self, i) - } - #[cfg(feature = "derive")] - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - fn fold_data(&mut self, i: crate::Data) -> crate::Data { - fold_data(self, i) - } - #[cfg(feature = "derive")] - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - fn fold_data_enum(&mut self, i: crate::DataEnum) -> crate::DataEnum { - fold_data_enum(self, i) - } - #[cfg(feature = "derive")] - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - fn fold_data_struct(&mut self, i: crate::DataStruct) -> crate::DataStruct { - fold_data_struct(self, i) - } - #[cfg(feature = "derive")] - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - fn fold_data_union(&mut self, i: crate::DataUnion) -> crate::DataUnion { - fold_data_union(self, i) - } - #[cfg(feature = "derive")] - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - fn fold_derive_input(&mut self, i: crate::DeriveInput) -> crate::DeriveInput { - fold_derive_input(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr(&mut self, i: crate::Expr) -> crate::Expr { - fold_expr(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_array(&mut self, i: crate::ExprArray) -> crate::ExprArray { - fold_expr_array(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_assign(&mut self, i: crate::ExprAssign) -> crate::ExprAssign { - fold_expr_assign(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_async(&mut self, i: crate::ExprAsync) -> crate::ExprAsync { - fold_expr_async(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_await(&mut self, i: crate::ExprAwait) -> crate::ExprAwait { - fold_expr_await(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr_binary(&mut self, i: crate::ExprBinary) -> crate::ExprBinary { - fold_expr_binary(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_block(&mut self, i: crate::ExprBlock) -> crate::ExprBlock { - fold_expr_block(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_break(&mut self, i: crate::ExprBreak) -> crate::ExprBreak { - fold_expr_break(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr_call(&mut self, i: crate::ExprCall) -> crate::ExprCall { - fold_expr_call(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr_cast(&mut self, i: crate::ExprCast) -> crate::ExprCast { - fold_expr_cast(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_closure(&mut self, i: crate::ExprClosure) -> crate::ExprClosure { - fold_expr_closure(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_const(&mut self, i: crate::ExprConst) -> crate::ExprConst { - fold_expr_const(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_continue(&mut self, i: crate::ExprContinue) -> crate::ExprContinue { - fold_expr_continue(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr_field(&mut self, i: crate::ExprField) -> crate::ExprField { - fold_expr_field(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_for_loop(&mut self, i: crate::ExprForLoop) -> crate::ExprForLoop { - fold_expr_for_loop(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr_group(&mut self, i: crate::ExprGroup) -> crate::ExprGroup { - fold_expr_group(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_if(&mut self, i: crate::ExprIf) -> crate::ExprIf { - fold_expr_if(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr_index(&mut self, i: crate::ExprIndex) -> crate::ExprIndex { - fold_expr_index(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_infer(&mut self, i: crate::ExprInfer) -> crate::ExprInfer { - fold_expr_infer(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_let(&mut self, i: crate::ExprLet) -> crate::ExprLet { - fold_expr_let(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr_lit(&mut self, i: crate::ExprLit) -> crate::ExprLit { - fold_expr_lit(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_loop(&mut self, i: crate::ExprLoop) -> crate::ExprLoop { - fold_expr_loop(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr_macro(&mut self, i: crate::ExprMacro) -> crate::ExprMacro { - fold_expr_macro(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_match(&mut self, i: crate::ExprMatch) -> crate::ExprMatch { - fold_expr_match(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr_method_call( - &mut self, - i: crate::ExprMethodCall, - ) -> crate::ExprMethodCall { - fold_expr_method_call(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr_paren(&mut self, i: crate::ExprParen) -> crate::ExprParen { - fold_expr_paren(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr_path(&mut self, i: crate::ExprPath) -> crate::ExprPath { - fold_expr_path(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_range(&mut self, i: crate::ExprRange) -> crate::ExprRange { - fold_expr_range(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_raw_addr(&mut self, i: crate::ExprRawAddr) -> crate::ExprRawAddr { - fold_expr_raw_addr(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr_reference(&mut self, i: crate::ExprReference) -> crate::ExprReference { - fold_expr_reference(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_repeat(&mut self, i: crate::ExprRepeat) -> crate::ExprRepeat { - fold_expr_repeat(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_return(&mut self, i: crate::ExprReturn) -> crate::ExprReturn { - fold_expr_return(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr_struct(&mut self, i: crate::ExprStruct) -> crate::ExprStruct { - fold_expr_struct(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_try(&mut self, i: crate::ExprTry) -> crate::ExprTry { - fold_expr_try(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_try_block(&mut self, i: crate::ExprTryBlock) -> crate::ExprTryBlock { - fold_expr_try_block(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr_tuple(&mut self, i: crate::ExprTuple) -> crate::ExprTuple { - fold_expr_tuple(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_expr_unary(&mut self, i: crate::ExprUnary) -> crate::ExprUnary { - fold_expr_unary(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_unsafe(&mut self, i: crate::ExprUnsafe) -> crate::ExprUnsafe { - fold_expr_unsafe(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_while(&mut self, i: crate::ExprWhile) -> crate::ExprWhile { - fold_expr_while(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_expr_yield(&mut self, i: crate::ExprYield) -> crate::ExprYield { - fold_expr_yield(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_field(&mut self, i: crate::Field) -> crate::Field { - fold_field(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_field_mutability( - &mut self, - i: crate::FieldMutability, - ) -> crate::FieldMutability { - fold_field_mutability(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_field_pat(&mut self, i: crate::FieldPat) -> crate::FieldPat { - fold_field_pat(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_field_value(&mut self, i: crate::FieldValue) -> crate::FieldValue { - fold_field_value(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_fields(&mut self, i: crate::Fields) -> crate::Fields { - fold_fields(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_fields_named(&mut self, i: crate::FieldsNamed) -> crate::FieldsNamed { - fold_fields_named(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_fields_unnamed(&mut self, i: crate::FieldsUnnamed) -> crate::FieldsUnnamed { - fold_fields_unnamed(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_file(&mut self, i: crate::File) -> crate::File { - fold_file(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_fn_arg(&mut self, i: crate::FnArg) -> crate::FnArg { - fold_fn_arg(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_foreign_item(&mut self, i: crate::ForeignItem) -> crate::ForeignItem { - fold_foreign_item(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_foreign_item_fn(&mut self, i: crate::ForeignItemFn) -> crate::ForeignItemFn { - fold_foreign_item_fn(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_foreign_item_macro( - &mut self, - i: crate::ForeignItemMacro, - ) -> crate::ForeignItemMacro { - fold_foreign_item_macro(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_foreign_item_static( - &mut self, - i: crate::ForeignItemStatic, - ) -> crate::ForeignItemStatic { - fold_foreign_item_static(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_foreign_item_type( - &mut self, - i: crate::ForeignItemType, - ) -> crate::ForeignItemType { - fold_foreign_item_type(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_generic_argument( - &mut self, - i: crate::GenericArgument, - ) -> crate::GenericArgument { - fold_generic_argument(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_generic_param(&mut self, i: crate::GenericParam) -> crate::GenericParam { - fold_generic_param(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_generics(&mut self, i: crate::Generics) -> crate::Generics { - fold_generics(self, i) - } - fn fold_ident(&mut self, i: proc_macro2::Ident) -> proc_macro2::Ident { - fold_ident(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_impl_item(&mut self, i: crate::ImplItem) -> crate::ImplItem { - fold_impl_item(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_impl_item_const(&mut self, i: crate::ImplItemConst) -> crate::ImplItemConst { - fold_impl_item_const(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_impl_item_fn(&mut self, i: crate::ImplItemFn) -> crate::ImplItemFn { - fold_impl_item_fn(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_impl_item_macro(&mut self, i: crate::ImplItemMacro) -> crate::ImplItemMacro { - fold_impl_item_macro(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_impl_item_type(&mut self, i: crate::ImplItemType) -> crate::ImplItemType { - fold_impl_item_type(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_impl_restriction( - &mut self, - i: crate::ImplRestriction, - ) -> crate::ImplRestriction { - fold_impl_restriction(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_index(&mut self, i: crate::Index) -> crate::Index { - fold_index(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item(&mut self, i: crate::Item) -> crate::Item { - fold_item(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item_const(&mut self, i: crate::ItemConst) -> crate::ItemConst { - fold_item_const(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item_enum(&mut self, i: crate::ItemEnum) -> crate::ItemEnum { - fold_item_enum(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item_extern_crate( - &mut self, - i: crate::ItemExternCrate, - ) -> crate::ItemExternCrate { - fold_item_extern_crate(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item_fn(&mut self, i: crate::ItemFn) -> crate::ItemFn { - fold_item_fn(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item_foreign_mod( - &mut self, - i: crate::ItemForeignMod, - ) -> crate::ItemForeignMod { - fold_item_foreign_mod(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item_impl(&mut self, i: crate::ItemImpl) -> crate::ItemImpl { - fold_item_impl(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item_macro(&mut self, i: crate::ItemMacro) -> crate::ItemMacro { - fold_item_macro(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item_mod(&mut self, i: crate::ItemMod) -> crate::ItemMod { - fold_item_mod(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item_static(&mut self, i: crate::ItemStatic) -> crate::ItemStatic { - fold_item_static(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item_struct(&mut self, i: crate::ItemStruct) -> crate::ItemStruct { - fold_item_struct(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item_trait(&mut self, i: crate::ItemTrait) -> crate::ItemTrait { - fold_item_trait(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item_trait_alias( - &mut self, - i: crate::ItemTraitAlias, - ) -> crate::ItemTraitAlias { - fold_item_trait_alias(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item_type(&mut self, i: crate::ItemType) -> crate::ItemType { - fold_item_type(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item_union(&mut self, i: crate::ItemUnion) -> crate::ItemUnion { - fold_item_union(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_item_use(&mut self, i: crate::ItemUse) -> crate::ItemUse { - fold_item_use(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_label(&mut self, i: crate::Label) -> crate::Label { - fold_label(self, i) - } - fn fold_lifetime(&mut self, i: crate::Lifetime) -> crate::Lifetime { - fold_lifetime(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_lifetime_param(&mut self, i: crate::LifetimeParam) -> crate::LifetimeParam { - fold_lifetime_param(self, i) - } - fn fold_lit(&mut self, i: crate::Lit) -> crate::Lit { - fold_lit(self, i) - } - fn fold_lit_bool(&mut self, i: crate::LitBool) -> crate::LitBool { - fold_lit_bool(self, i) - } - fn fold_lit_byte(&mut self, i: crate::LitByte) -> crate::LitByte { - fold_lit_byte(self, i) - } - fn fold_lit_byte_str(&mut self, i: crate::LitByteStr) -> crate::LitByteStr { - fold_lit_byte_str(self, i) - } - fn fold_lit_cstr(&mut self, i: crate::LitCStr) -> crate::LitCStr { - fold_lit_cstr(self, i) - } - fn fold_lit_char(&mut self, i: crate::LitChar) -> crate::LitChar { - fold_lit_char(self, i) - } - fn fold_lit_float(&mut self, i: crate::LitFloat) -> crate::LitFloat { - fold_lit_float(self, i) - } - fn fold_lit_int(&mut self, i: crate::LitInt) -> crate::LitInt { - fold_lit_int(self, i) - } - fn fold_lit_str(&mut self, i: crate::LitStr) -> crate::LitStr { - fold_lit_str(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_local(&mut self, i: crate::Local) -> crate::Local { - fold_local(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_local_init(&mut self, i: crate::LocalInit) -> crate::LocalInit { - fold_local_init(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_macro(&mut self, i: crate::Macro) -> crate::Macro { - fold_macro(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_macro_delimiter( - &mut self, - i: crate::MacroDelimiter, - ) -> crate::MacroDelimiter { - fold_macro_delimiter(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_member(&mut self, i: crate::Member) -> crate::Member { - fold_member(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_meta(&mut self, i: crate::Meta) -> crate::Meta { - fold_meta(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_meta_list(&mut self, i: crate::MetaList) -> crate::MetaList { - fold_meta_list(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_meta_name_value(&mut self, i: crate::MetaNameValue) -> crate::MetaNameValue { - fold_meta_name_value(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_parenthesized_generic_arguments( - &mut self, - i: crate::ParenthesizedGenericArguments, - ) -> crate::ParenthesizedGenericArguments { - fold_parenthesized_generic_arguments(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_pat(&mut self, i: crate::Pat) -> crate::Pat { - fold_pat(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_pat_ident(&mut self, i: crate::PatIdent) -> crate::PatIdent { - fold_pat_ident(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_pat_or(&mut self, i: crate::PatOr) -> crate::PatOr { - fold_pat_or(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_pat_paren(&mut self, i: crate::PatParen) -> crate::PatParen { - fold_pat_paren(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_pat_reference(&mut self, i: crate::PatReference) -> crate::PatReference { - fold_pat_reference(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_pat_rest(&mut self, i: crate::PatRest) -> crate::PatRest { - fold_pat_rest(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_pat_slice(&mut self, i: crate::PatSlice) -> crate::PatSlice { - fold_pat_slice(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_pat_struct(&mut self, i: crate::PatStruct) -> crate::PatStruct { - fold_pat_struct(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_pat_tuple(&mut self, i: crate::PatTuple) -> crate::PatTuple { - fold_pat_tuple(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_pat_tuple_struct( - &mut self, - i: crate::PatTupleStruct, - ) -> crate::PatTupleStruct { - fold_pat_tuple_struct(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_pat_type(&mut self, i: crate::PatType) -> crate::PatType { - fold_pat_type(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_pat_wild(&mut self, i: crate::PatWild) -> crate::PatWild { - fold_pat_wild(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_path(&mut self, i: crate::Path) -> crate::Path { - fold_path(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_path_arguments(&mut self, i: crate::PathArguments) -> crate::PathArguments { - fold_path_arguments(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_path_segment(&mut self, i: crate::PathSegment) -> crate::PathSegment { - fold_path_segment(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_pointer_mutability( - &mut self, - i: crate::PointerMutability, - ) -> crate::PointerMutability { - fold_pointer_mutability(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_precise_capture( - &mut self, - i: crate::PreciseCapture, - ) -> crate::PreciseCapture { - fold_precise_capture(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_predicate_lifetime( - &mut self, - i: crate::PredicateLifetime, - ) -> crate::PredicateLifetime { - fold_predicate_lifetime(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_predicate_type(&mut self, i: crate::PredicateType) -> crate::PredicateType { - fold_predicate_type(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_qself(&mut self, i: crate::QSelf) -> crate::QSelf { - fold_qself(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_range_limits(&mut self, i: crate::RangeLimits) -> crate::RangeLimits { - fold_range_limits(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_receiver(&mut self, i: crate::Receiver) -> crate::Receiver { - fold_receiver(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_return_type(&mut self, i: crate::ReturnType) -> crate::ReturnType { - fold_return_type(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_signature(&mut self, i: crate::Signature) -> crate::Signature { - fold_signature(self, i) - } - fn fold_span(&mut self, i: proc_macro2::Span) -> proc_macro2::Span { - i - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_static_mutability( - &mut self, - i: crate::StaticMutability, - ) -> crate::StaticMutability { - fold_static_mutability(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_stmt(&mut self, i: crate::Stmt) -> crate::Stmt { - fold_stmt(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_stmt_macro(&mut self, i: crate::StmtMacro) -> crate::StmtMacro { - fold_stmt_macro(self, i) - } - fn fold_token_stream( - &mut self, - i: proc_macro2::TokenStream, - ) -> proc_macro2::TokenStream { - i - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_trait_bound(&mut self, i: crate::TraitBound) -> crate::TraitBound { - fold_trait_bound(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_trait_bound_modifier( - &mut self, - i: crate::TraitBoundModifier, - ) -> crate::TraitBoundModifier { - fold_trait_bound_modifier(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_trait_item(&mut self, i: crate::TraitItem) -> crate::TraitItem { - fold_trait_item(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_trait_item_const( - &mut self, - i: crate::TraitItemConst, - ) -> crate::TraitItemConst { - fold_trait_item_const(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_trait_item_fn(&mut self, i: crate::TraitItemFn) -> crate::TraitItemFn { - fold_trait_item_fn(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_trait_item_macro( - &mut self, - i: crate::TraitItemMacro, - ) -> crate::TraitItemMacro { - fold_trait_item_macro(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_trait_item_type(&mut self, i: crate::TraitItemType) -> crate::TraitItemType { - fold_trait_item_type(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type(&mut self, i: crate::Type) -> crate::Type { - fold_type(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_array(&mut self, i: crate::TypeArray) -> crate::TypeArray { - fold_type_array(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_bare_fn(&mut self, i: crate::TypeBareFn) -> crate::TypeBareFn { - fold_type_bare_fn(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_group(&mut self, i: crate::TypeGroup) -> crate::TypeGroup { - fold_type_group(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_impl_trait(&mut self, i: crate::TypeImplTrait) -> crate::TypeImplTrait { - fold_type_impl_trait(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_infer(&mut self, i: crate::TypeInfer) -> crate::TypeInfer { - fold_type_infer(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_macro(&mut self, i: crate::TypeMacro) -> crate::TypeMacro { - fold_type_macro(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_never(&mut self, i: crate::TypeNever) -> crate::TypeNever { - fold_type_never(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_param(&mut self, i: crate::TypeParam) -> crate::TypeParam { - fold_type_param(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_param_bound( - &mut self, - i: crate::TypeParamBound, - ) -> crate::TypeParamBound { - fold_type_param_bound(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_paren(&mut self, i: crate::TypeParen) -> crate::TypeParen { - fold_type_paren(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_path(&mut self, i: crate::TypePath) -> crate::TypePath { - fold_type_path(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_ptr(&mut self, i: crate::TypePtr) -> crate::TypePtr { - fold_type_ptr(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_reference(&mut self, i: crate::TypeReference) -> crate::TypeReference { - fold_type_reference(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_slice(&mut self, i: crate::TypeSlice) -> crate::TypeSlice { - fold_type_slice(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_trait_object( - &mut self, - i: crate::TypeTraitObject, - ) -> crate::TypeTraitObject { - fold_type_trait_object(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_type_tuple(&mut self, i: crate::TypeTuple) -> crate::TypeTuple { - fold_type_tuple(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_un_op(&mut self, i: crate::UnOp) -> crate::UnOp { - fold_un_op(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_use_glob(&mut self, i: crate::UseGlob) -> crate::UseGlob { - fold_use_glob(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_use_group(&mut self, i: crate::UseGroup) -> crate::UseGroup { - fold_use_group(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_use_name(&mut self, i: crate::UseName) -> crate::UseName { - fold_use_name(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_use_path(&mut self, i: crate::UsePath) -> crate::UsePath { - fold_use_path(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_use_rename(&mut self, i: crate::UseRename) -> crate::UseRename { - fold_use_rename(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_use_tree(&mut self, i: crate::UseTree) -> crate::UseTree { - fold_use_tree(self, i) - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn fold_variadic(&mut self, i: crate::Variadic) -> crate::Variadic { - fold_variadic(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_variant(&mut self, i: crate::Variant) -> crate::Variant { - fold_variant(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_vis_restricted(&mut self, i: crate::VisRestricted) -> crate::VisRestricted { - fold_vis_restricted(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_visibility(&mut self, i: crate::Visibility) -> crate::Visibility { - fold_visibility(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_where_clause(&mut self, i: crate::WhereClause) -> crate::WhereClause { - fold_where_clause(self, i) - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn fold_where_predicate( - &mut self, - i: crate::WherePredicate, - ) -> crate::WherePredicate { - fold_where_predicate(self, i) - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_abi<F>(f: &mut F, node: crate::Abi) -> crate::Abi -where - F: Fold + ?Sized, -{ - crate::Abi { - extern_token: node.extern_token, - name: (node.name).map(|it| f.fold_lit_str(it)), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_angle_bracketed_generic_arguments<F>( - f: &mut F, - node: crate::AngleBracketedGenericArguments, -) -> crate::AngleBracketedGenericArguments -where - F: Fold + ?Sized, -{ - crate::AngleBracketedGenericArguments { - colon2_token: node.colon2_token, - lt_token: node.lt_token, - args: crate::punctuated::fold(node.args, f, F::fold_generic_argument), - gt_token: node.gt_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_arm<F>(f: &mut F, node: crate::Arm) -> crate::Arm -where - F: Fold + ?Sized, -{ - crate::Arm { - attrs: f.fold_attributes(node.attrs), - pat: f.fold_pat(node.pat), - guard: (node.guard).map(|it| ((it).0, Box::new(f.fold_expr(*(it).1)))), - fat_arrow_token: node.fat_arrow_token, - body: Box::new(f.fold_expr(*node.body)), - comma: node.comma, - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_assoc_const<F>(f: &mut F, node: crate::AssocConst) -> crate::AssocConst -where - F: Fold + ?Sized, -{ - crate::AssocConst { - ident: f.fold_ident(node.ident), - generics: (node.generics).map(|it| f.fold_angle_bracketed_generic_arguments(it)), - eq_token: node.eq_token, - value: f.fold_expr(node.value), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_assoc_type<F>(f: &mut F, node: crate::AssocType) -> crate::AssocType -where - F: Fold + ?Sized, -{ - crate::AssocType { - ident: f.fold_ident(node.ident), - generics: (node.generics).map(|it| f.fold_angle_bracketed_generic_arguments(it)), - eq_token: node.eq_token, - ty: f.fold_type(node.ty), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_attr_style<F>(f: &mut F, node: crate::AttrStyle) -> crate::AttrStyle -where - F: Fold + ?Sized, -{ - match node { - crate::AttrStyle::Outer => crate::AttrStyle::Outer, - crate::AttrStyle::Inner(_binding_0) => crate::AttrStyle::Inner(_binding_0), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_attribute<F>(f: &mut F, node: crate::Attribute) -> crate::Attribute -where - F: Fold + ?Sized, -{ - crate::Attribute { - pound_token: node.pound_token, - style: f.fold_attr_style(node.style), - bracket_token: node.bracket_token, - meta: f.fold_meta(node.meta), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_bare_fn_arg<F>(f: &mut F, node: crate::BareFnArg) -> crate::BareFnArg -where - F: Fold + ?Sized, -{ - crate::BareFnArg { - attrs: f.fold_attributes(node.attrs), - name: (node.name).map(|it| (f.fold_ident((it).0), (it).1)), - ty: f.fold_type(node.ty), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_bare_variadic<F>(f: &mut F, node: crate::BareVariadic) -> crate::BareVariadic -where - F: Fold + ?Sized, -{ - crate::BareVariadic { - attrs: f.fold_attributes(node.attrs), - name: (node.name).map(|it| (f.fold_ident((it).0), (it).1)), - dots: node.dots, - comma: node.comma, - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_bin_op<F>(f: &mut F, node: crate::BinOp) -> crate::BinOp -where - F: Fold + ?Sized, -{ - match node { - crate::BinOp::Add(_binding_0) => crate::BinOp::Add(_binding_0), - crate::BinOp::Sub(_binding_0) => crate::BinOp::Sub(_binding_0), - crate::BinOp::Mul(_binding_0) => crate::BinOp::Mul(_binding_0), - crate::BinOp::Div(_binding_0) => crate::BinOp::Div(_binding_0), - crate::BinOp::Rem(_binding_0) => crate::BinOp::Rem(_binding_0), - crate::BinOp::And(_binding_0) => crate::BinOp::And(_binding_0), - crate::BinOp::Or(_binding_0) => crate::BinOp::Or(_binding_0), - crate::BinOp::BitXor(_binding_0) => crate::BinOp::BitXor(_binding_0), - crate::BinOp::BitAnd(_binding_0) => crate::BinOp::BitAnd(_binding_0), - crate::BinOp::BitOr(_binding_0) => crate::BinOp::BitOr(_binding_0), - crate::BinOp::Shl(_binding_0) => crate::BinOp::Shl(_binding_0), - crate::BinOp::Shr(_binding_0) => crate::BinOp::Shr(_binding_0), - crate::BinOp::Eq(_binding_0) => crate::BinOp::Eq(_binding_0), - crate::BinOp::Lt(_binding_0) => crate::BinOp::Lt(_binding_0), - crate::BinOp::Le(_binding_0) => crate::BinOp::Le(_binding_0), - crate::BinOp::Ne(_binding_0) => crate::BinOp::Ne(_binding_0), - crate::BinOp::Ge(_binding_0) => crate::BinOp::Ge(_binding_0), - crate::BinOp::Gt(_binding_0) => crate::BinOp::Gt(_binding_0), - crate::BinOp::AddAssign(_binding_0) => crate::BinOp::AddAssign(_binding_0), - crate::BinOp::SubAssign(_binding_0) => crate::BinOp::SubAssign(_binding_0), - crate::BinOp::MulAssign(_binding_0) => crate::BinOp::MulAssign(_binding_0), - crate::BinOp::DivAssign(_binding_0) => crate::BinOp::DivAssign(_binding_0), - crate::BinOp::RemAssign(_binding_0) => crate::BinOp::RemAssign(_binding_0), - crate::BinOp::BitXorAssign(_binding_0) => crate::BinOp::BitXorAssign(_binding_0), - crate::BinOp::BitAndAssign(_binding_0) => crate::BinOp::BitAndAssign(_binding_0), - crate::BinOp::BitOrAssign(_binding_0) => crate::BinOp::BitOrAssign(_binding_0), - crate::BinOp::ShlAssign(_binding_0) => crate::BinOp::ShlAssign(_binding_0), - crate::BinOp::ShrAssign(_binding_0) => crate::BinOp::ShrAssign(_binding_0), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_block<F>(f: &mut F, node: crate::Block) -> crate::Block -where - F: Fold + ?Sized, -{ - crate::Block { - brace_token: node.brace_token, - stmts: fold_vec(node.stmts, f, F::fold_stmt), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_bound_lifetimes<F>( - f: &mut F, - node: crate::BoundLifetimes, -) -> crate::BoundLifetimes -where - F: Fold + ?Sized, -{ - crate::BoundLifetimes { - for_token: node.for_token, - lt_token: node.lt_token, - lifetimes: crate::punctuated::fold(node.lifetimes, f, F::fold_generic_param), - gt_token: node.gt_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_captured_param<F>( - f: &mut F, - node: crate::CapturedParam, -) -> crate::CapturedParam -where - F: Fold + ?Sized, -{ - match node { - crate::CapturedParam::Lifetime(_binding_0) => { - crate::CapturedParam::Lifetime(f.fold_lifetime(_binding_0)) - } - crate::CapturedParam::Ident(_binding_0) => { - crate::CapturedParam::Ident(f.fold_ident(_binding_0)) - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_const_param<F>(f: &mut F, node: crate::ConstParam) -> crate::ConstParam -where - F: Fold + ?Sized, -{ - crate::ConstParam { - attrs: f.fold_attributes(node.attrs), - const_token: node.const_token, - ident: f.fold_ident(node.ident), - colon_token: node.colon_token, - ty: f.fold_type(node.ty), - eq_token: node.eq_token, - default: (node.default).map(|it| f.fold_expr(it)), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_constraint<F>(f: &mut F, node: crate::Constraint) -> crate::Constraint -where - F: Fold + ?Sized, -{ - crate::Constraint { - ident: f.fold_ident(node.ident), - generics: (node.generics).map(|it| f.fold_angle_bracketed_generic_arguments(it)), - colon_token: node.colon_token, - bounds: crate::punctuated::fold(node.bounds, f, F::fold_type_param_bound), - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub fn fold_data<F>(f: &mut F, node: crate::Data) -> crate::Data -where - F: Fold + ?Sized, -{ - match node { - crate::Data::Struct(_binding_0) => { - crate::Data::Struct(f.fold_data_struct(_binding_0)) - } - crate::Data::Enum(_binding_0) => crate::Data::Enum(f.fold_data_enum(_binding_0)), - crate::Data::Union(_binding_0) => { - crate::Data::Union(f.fold_data_union(_binding_0)) - } - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub fn fold_data_enum<F>(f: &mut F, node: crate::DataEnum) -> crate::DataEnum -where - F: Fold + ?Sized, -{ - crate::DataEnum { - enum_token: node.enum_token, - brace_token: node.brace_token, - variants: crate::punctuated::fold(node.variants, f, F::fold_variant), - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub fn fold_data_struct<F>(f: &mut F, node: crate::DataStruct) -> crate::DataStruct -where - F: Fold + ?Sized, -{ - crate::DataStruct { - struct_token: node.struct_token, - fields: f.fold_fields(node.fields), - semi_token: node.semi_token, - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub fn fold_data_union<F>(f: &mut F, node: crate::DataUnion) -> crate::DataUnion -where - F: Fold + ?Sized, -{ - crate::DataUnion { - union_token: node.union_token, - fields: f.fold_fields_named(node.fields), - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub fn fold_derive_input<F>(f: &mut F, node: crate::DeriveInput) -> crate::DeriveInput -where - F: Fold + ?Sized, -{ - crate::DeriveInput { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - ident: f.fold_ident(node.ident), - generics: f.fold_generics(node.generics), - data: f.fold_data(node.data), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr<F>(f: &mut F, node: crate::Expr) -> crate::Expr -where - F: Fold + ?Sized, -{ - match node { - crate::Expr::Array(_binding_0) => { - crate::Expr::Array(full!(f.fold_expr_array(_binding_0))) - } - crate::Expr::Assign(_binding_0) => { - crate::Expr::Assign(full!(f.fold_expr_assign(_binding_0))) - } - crate::Expr::Async(_binding_0) => { - crate::Expr::Async(full!(f.fold_expr_async(_binding_0))) - } - crate::Expr::Await(_binding_0) => { - crate::Expr::Await(full!(f.fold_expr_await(_binding_0))) - } - crate::Expr::Binary(_binding_0) => { - crate::Expr::Binary(f.fold_expr_binary(_binding_0)) - } - crate::Expr::Block(_binding_0) => { - crate::Expr::Block(full!(f.fold_expr_block(_binding_0))) - } - crate::Expr::Break(_binding_0) => { - crate::Expr::Break(full!(f.fold_expr_break(_binding_0))) - } - crate::Expr::Call(_binding_0) => crate::Expr::Call(f.fold_expr_call(_binding_0)), - crate::Expr::Cast(_binding_0) => crate::Expr::Cast(f.fold_expr_cast(_binding_0)), - crate::Expr::Closure(_binding_0) => { - crate::Expr::Closure(full!(f.fold_expr_closure(_binding_0))) - } - crate::Expr::Const(_binding_0) => { - crate::Expr::Const(full!(f.fold_expr_const(_binding_0))) - } - crate::Expr::Continue(_binding_0) => { - crate::Expr::Continue(full!(f.fold_expr_continue(_binding_0))) - } - crate::Expr::Field(_binding_0) => { - crate::Expr::Field(f.fold_expr_field(_binding_0)) - } - crate::Expr::ForLoop(_binding_0) => { - crate::Expr::ForLoop(full!(f.fold_expr_for_loop(_binding_0))) - } - crate::Expr::Group(_binding_0) => { - crate::Expr::Group(f.fold_expr_group(_binding_0)) - } - crate::Expr::If(_binding_0) => crate::Expr::If(full!(f.fold_expr_if(_binding_0))), - crate::Expr::Index(_binding_0) => { - crate::Expr::Index(f.fold_expr_index(_binding_0)) - } - crate::Expr::Infer(_binding_0) => { - crate::Expr::Infer(full!(f.fold_expr_infer(_binding_0))) - } - crate::Expr::Let(_binding_0) => { - crate::Expr::Let(full!(f.fold_expr_let(_binding_0))) - } - crate::Expr::Lit(_binding_0) => crate::Expr::Lit(f.fold_expr_lit(_binding_0)), - crate::Expr::Loop(_binding_0) => { - crate::Expr::Loop(full!(f.fold_expr_loop(_binding_0))) - } - crate::Expr::Macro(_binding_0) => { - crate::Expr::Macro(f.fold_expr_macro(_binding_0)) - } - crate::Expr::Match(_binding_0) => { - crate::Expr::Match(full!(f.fold_expr_match(_binding_0))) - } - crate::Expr::MethodCall(_binding_0) => { - crate::Expr::MethodCall(f.fold_expr_method_call(_binding_0)) - } - crate::Expr::Paren(_binding_0) => { - crate::Expr::Paren(f.fold_expr_paren(_binding_0)) - } - crate::Expr::Path(_binding_0) => crate::Expr::Path(f.fold_expr_path(_binding_0)), - crate::Expr::Range(_binding_0) => { - crate::Expr::Range(full!(f.fold_expr_range(_binding_0))) - } - crate::Expr::RawAddr(_binding_0) => { - crate::Expr::RawAddr(full!(f.fold_expr_raw_addr(_binding_0))) - } - crate::Expr::Reference(_binding_0) => { - crate::Expr::Reference(f.fold_expr_reference(_binding_0)) - } - crate::Expr::Repeat(_binding_0) => { - crate::Expr::Repeat(full!(f.fold_expr_repeat(_binding_0))) - } - crate::Expr::Return(_binding_0) => { - crate::Expr::Return(full!(f.fold_expr_return(_binding_0))) - } - crate::Expr::Struct(_binding_0) => { - crate::Expr::Struct(f.fold_expr_struct(_binding_0)) - } - crate::Expr::Try(_binding_0) => { - crate::Expr::Try(full!(f.fold_expr_try(_binding_0))) - } - crate::Expr::TryBlock(_binding_0) => { - crate::Expr::TryBlock(full!(f.fold_expr_try_block(_binding_0))) - } - crate::Expr::Tuple(_binding_0) => { - crate::Expr::Tuple(f.fold_expr_tuple(_binding_0)) - } - crate::Expr::Unary(_binding_0) => { - crate::Expr::Unary(f.fold_expr_unary(_binding_0)) - } - crate::Expr::Unsafe(_binding_0) => { - crate::Expr::Unsafe(full!(f.fold_expr_unsafe(_binding_0))) - } - crate::Expr::Verbatim(_binding_0) => { - crate::Expr::Verbatim(f.fold_token_stream(_binding_0)) - } - crate::Expr::While(_binding_0) => { - crate::Expr::While(full!(f.fold_expr_while(_binding_0))) - } - crate::Expr::Yield(_binding_0) => { - crate::Expr::Yield(full!(f.fold_expr_yield(_binding_0))) - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_array<F>(f: &mut F, node: crate::ExprArray) -> crate::ExprArray -where - F: Fold + ?Sized, -{ - crate::ExprArray { - attrs: f.fold_attributes(node.attrs), - bracket_token: node.bracket_token, - elems: crate::punctuated::fold(node.elems, f, F::fold_expr), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_assign<F>(f: &mut F, node: crate::ExprAssign) -> crate::ExprAssign -where - F: Fold + ?Sized, -{ - crate::ExprAssign { - attrs: f.fold_attributes(node.attrs), - left: Box::new(f.fold_expr(*node.left)), - eq_token: node.eq_token, - right: Box::new(f.fold_expr(*node.right)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_async<F>(f: &mut F, node: crate::ExprAsync) -> crate::ExprAsync -where - F: Fold + ?Sized, -{ - crate::ExprAsync { - attrs: f.fold_attributes(node.attrs), - async_token: node.async_token, - capture: node.capture, - block: f.fold_block(node.block), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_await<F>(f: &mut F, node: crate::ExprAwait) -> crate::ExprAwait -where - F: Fold + ?Sized, -{ - crate::ExprAwait { - attrs: f.fold_attributes(node.attrs), - base: Box::new(f.fold_expr(*node.base)), - dot_token: node.dot_token, - await_token: node.await_token, - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr_binary<F>(f: &mut F, node: crate::ExprBinary) -> crate::ExprBinary -where - F: Fold + ?Sized, -{ - crate::ExprBinary { - attrs: f.fold_attributes(node.attrs), - left: Box::new(f.fold_expr(*node.left)), - op: f.fold_bin_op(node.op), - right: Box::new(f.fold_expr(*node.right)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_block<F>(f: &mut F, node: crate::ExprBlock) -> crate::ExprBlock -where - F: Fold + ?Sized, -{ - crate::ExprBlock { - attrs: f.fold_attributes(node.attrs), - label: (node.label).map(|it| f.fold_label(it)), - block: f.fold_block(node.block), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_break<F>(f: &mut F, node: crate::ExprBreak) -> crate::ExprBreak -where - F: Fold + ?Sized, -{ - crate::ExprBreak { - attrs: f.fold_attributes(node.attrs), - break_token: node.break_token, - label: (node.label).map(|it| f.fold_lifetime(it)), - expr: (node.expr).map(|it| Box::new(f.fold_expr(*it))), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr_call<F>(f: &mut F, node: crate::ExprCall) -> crate::ExprCall -where - F: Fold + ?Sized, -{ - crate::ExprCall { - attrs: f.fold_attributes(node.attrs), - func: Box::new(f.fold_expr(*node.func)), - paren_token: node.paren_token, - args: crate::punctuated::fold(node.args, f, F::fold_expr), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr_cast<F>(f: &mut F, node: crate::ExprCast) -> crate::ExprCast -where - F: Fold + ?Sized, -{ - crate::ExprCast { - attrs: f.fold_attributes(node.attrs), - expr: Box::new(f.fold_expr(*node.expr)), - as_token: node.as_token, - ty: Box::new(f.fold_type(*node.ty)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_closure<F>(f: &mut F, node: crate::ExprClosure) -> crate::ExprClosure -where - F: Fold + ?Sized, -{ - crate::ExprClosure { - attrs: f.fold_attributes(node.attrs), - lifetimes: (node.lifetimes).map(|it| f.fold_bound_lifetimes(it)), - constness: node.constness, - movability: node.movability, - asyncness: node.asyncness, - capture: node.capture, - or1_token: node.or1_token, - inputs: crate::punctuated::fold(node.inputs, f, F::fold_pat), - or2_token: node.or2_token, - output: f.fold_return_type(node.output), - body: Box::new(f.fold_expr(*node.body)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_const<F>(f: &mut F, node: crate::ExprConst) -> crate::ExprConst -where - F: Fold + ?Sized, -{ - crate::ExprConst { - attrs: f.fold_attributes(node.attrs), - const_token: node.const_token, - block: f.fold_block(node.block), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_continue<F>(f: &mut F, node: crate::ExprContinue) -> crate::ExprContinue -where - F: Fold + ?Sized, -{ - crate::ExprContinue { - attrs: f.fold_attributes(node.attrs), - continue_token: node.continue_token, - label: (node.label).map(|it| f.fold_lifetime(it)), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr_field<F>(f: &mut F, node: crate::ExprField) -> crate::ExprField -where - F: Fold + ?Sized, -{ - crate::ExprField { - attrs: f.fold_attributes(node.attrs), - base: Box::new(f.fold_expr(*node.base)), - dot_token: node.dot_token, - member: f.fold_member(node.member), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_for_loop<F>(f: &mut F, node: crate::ExprForLoop) -> crate::ExprForLoop -where - F: Fold + ?Sized, -{ - crate::ExprForLoop { - attrs: f.fold_attributes(node.attrs), - label: (node.label).map(|it| f.fold_label(it)), - for_token: node.for_token, - pat: Box::new(f.fold_pat(*node.pat)), - in_token: node.in_token, - expr: Box::new(f.fold_expr(*node.expr)), - body: f.fold_block(node.body), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr_group<F>(f: &mut F, node: crate::ExprGroup) -> crate::ExprGroup -where - F: Fold + ?Sized, -{ - crate::ExprGroup { - attrs: f.fold_attributes(node.attrs), - group_token: node.group_token, - expr: Box::new(f.fold_expr(*node.expr)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_if<F>(f: &mut F, node: crate::ExprIf) -> crate::ExprIf -where - F: Fold + ?Sized, -{ - crate::ExprIf { - attrs: f.fold_attributes(node.attrs), - if_token: node.if_token, - cond: Box::new(f.fold_expr(*node.cond)), - then_branch: f.fold_block(node.then_branch), - else_branch: (node.else_branch) - .map(|it| ((it).0, Box::new(f.fold_expr(*(it).1)))), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr_index<F>(f: &mut F, node: crate::ExprIndex) -> crate::ExprIndex -where - F: Fold + ?Sized, -{ - crate::ExprIndex { - attrs: f.fold_attributes(node.attrs), - expr: Box::new(f.fold_expr(*node.expr)), - bracket_token: node.bracket_token, - index: Box::new(f.fold_expr(*node.index)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_infer<F>(f: &mut F, node: crate::ExprInfer) -> crate::ExprInfer -where - F: Fold + ?Sized, -{ - crate::ExprInfer { - attrs: f.fold_attributes(node.attrs), - underscore_token: node.underscore_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_let<F>(f: &mut F, node: crate::ExprLet) -> crate::ExprLet -where - F: Fold + ?Sized, -{ - crate::ExprLet { - attrs: f.fold_attributes(node.attrs), - let_token: node.let_token, - pat: Box::new(f.fold_pat(*node.pat)), - eq_token: node.eq_token, - expr: Box::new(f.fold_expr(*node.expr)), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr_lit<F>(f: &mut F, node: crate::ExprLit) -> crate::ExprLit -where - F: Fold + ?Sized, -{ - crate::ExprLit { - attrs: f.fold_attributes(node.attrs), - lit: f.fold_lit(node.lit), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_loop<F>(f: &mut F, node: crate::ExprLoop) -> crate::ExprLoop -where - F: Fold + ?Sized, -{ - crate::ExprLoop { - attrs: f.fold_attributes(node.attrs), - label: (node.label).map(|it| f.fold_label(it)), - loop_token: node.loop_token, - body: f.fold_block(node.body), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr_macro<F>(f: &mut F, node: crate::ExprMacro) -> crate::ExprMacro -where - F: Fold + ?Sized, -{ - crate::ExprMacro { - attrs: f.fold_attributes(node.attrs), - mac: f.fold_macro(node.mac), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_match<F>(f: &mut F, node: crate::ExprMatch) -> crate::ExprMatch -where - F: Fold + ?Sized, -{ - crate::ExprMatch { - attrs: f.fold_attributes(node.attrs), - match_token: node.match_token, - expr: Box::new(f.fold_expr(*node.expr)), - brace_token: node.brace_token, - arms: fold_vec(node.arms, f, F::fold_arm), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr_method_call<F>( - f: &mut F, - node: crate::ExprMethodCall, -) -> crate::ExprMethodCall -where - F: Fold + ?Sized, -{ - crate::ExprMethodCall { - attrs: f.fold_attributes(node.attrs), - receiver: Box::new(f.fold_expr(*node.receiver)), - dot_token: node.dot_token, - method: f.fold_ident(node.method), - turbofish: (node.turbofish) - .map(|it| f.fold_angle_bracketed_generic_arguments(it)), - paren_token: node.paren_token, - args: crate::punctuated::fold(node.args, f, F::fold_expr), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr_paren<F>(f: &mut F, node: crate::ExprParen) -> crate::ExprParen -where - F: Fold + ?Sized, -{ - crate::ExprParen { - attrs: f.fold_attributes(node.attrs), - paren_token: node.paren_token, - expr: Box::new(f.fold_expr(*node.expr)), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr_path<F>(f: &mut F, node: crate::ExprPath) -> crate::ExprPath -where - F: Fold + ?Sized, -{ - crate::ExprPath { - attrs: f.fold_attributes(node.attrs), - qself: (node.qself).map(|it| f.fold_qself(it)), - path: f.fold_path(node.path), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_range<F>(f: &mut F, node: crate::ExprRange) -> crate::ExprRange -where - F: Fold + ?Sized, -{ - crate::ExprRange { - attrs: f.fold_attributes(node.attrs), - start: (node.start).map(|it| Box::new(f.fold_expr(*it))), - limits: f.fold_range_limits(node.limits), - end: (node.end).map(|it| Box::new(f.fold_expr(*it))), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_raw_addr<F>(f: &mut F, node: crate::ExprRawAddr) -> crate::ExprRawAddr -where - F: Fold + ?Sized, -{ - crate::ExprRawAddr { - attrs: f.fold_attributes(node.attrs), - and_token: node.and_token, - raw: node.raw, - mutability: f.fold_pointer_mutability(node.mutability), - expr: Box::new(f.fold_expr(*node.expr)), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr_reference<F>( - f: &mut F, - node: crate::ExprReference, -) -> crate::ExprReference -where - F: Fold + ?Sized, -{ - crate::ExprReference { - attrs: f.fold_attributes(node.attrs), - and_token: node.and_token, - mutability: node.mutability, - expr: Box::new(f.fold_expr(*node.expr)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_repeat<F>(f: &mut F, node: crate::ExprRepeat) -> crate::ExprRepeat -where - F: Fold + ?Sized, -{ - crate::ExprRepeat { - attrs: f.fold_attributes(node.attrs), - bracket_token: node.bracket_token, - expr: Box::new(f.fold_expr(*node.expr)), - semi_token: node.semi_token, - len: Box::new(f.fold_expr(*node.len)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_return<F>(f: &mut F, node: crate::ExprReturn) -> crate::ExprReturn -where - F: Fold + ?Sized, -{ - crate::ExprReturn { - attrs: f.fold_attributes(node.attrs), - return_token: node.return_token, - expr: (node.expr).map(|it| Box::new(f.fold_expr(*it))), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr_struct<F>(f: &mut F, node: crate::ExprStruct) -> crate::ExprStruct -where - F: Fold + ?Sized, -{ - crate::ExprStruct { - attrs: f.fold_attributes(node.attrs), - qself: (node.qself).map(|it| f.fold_qself(it)), - path: f.fold_path(node.path), - brace_token: node.brace_token, - fields: crate::punctuated::fold(node.fields, f, F::fold_field_value), - dot2_token: node.dot2_token, - rest: (node.rest).map(|it| Box::new(f.fold_expr(*it))), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_try<F>(f: &mut F, node: crate::ExprTry) -> crate::ExprTry -where - F: Fold + ?Sized, -{ - crate::ExprTry { - attrs: f.fold_attributes(node.attrs), - expr: Box::new(f.fold_expr(*node.expr)), - question_token: node.question_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_try_block<F>( - f: &mut F, - node: crate::ExprTryBlock, -) -> crate::ExprTryBlock -where - F: Fold + ?Sized, -{ - crate::ExprTryBlock { - attrs: f.fold_attributes(node.attrs), - try_token: node.try_token, - block: f.fold_block(node.block), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr_tuple<F>(f: &mut F, node: crate::ExprTuple) -> crate::ExprTuple -where - F: Fold + ?Sized, -{ - crate::ExprTuple { - attrs: f.fold_attributes(node.attrs), - paren_token: node.paren_token, - elems: crate::punctuated::fold(node.elems, f, F::fold_expr), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_expr_unary<F>(f: &mut F, node: crate::ExprUnary) -> crate::ExprUnary -where - F: Fold + ?Sized, -{ - crate::ExprUnary { - attrs: f.fold_attributes(node.attrs), - op: f.fold_un_op(node.op), - expr: Box::new(f.fold_expr(*node.expr)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_unsafe<F>(f: &mut F, node: crate::ExprUnsafe) -> crate::ExprUnsafe -where - F: Fold + ?Sized, -{ - crate::ExprUnsafe { - attrs: f.fold_attributes(node.attrs), - unsafe_token: node.unsafe_token, - block: f.fold_block(node.block), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_while<F>(f: &mut F, node: crate::ExprWhile) -> crate::ExprWhile -where - F: Fold + ?Sized, -{ - crate::ExprWhile { - attrs: f.fold_attributes(node.attrs), - label: (node.label).map(|it| f.fold_label(it)), - while_token: node.while_token, - cond: Box::new(f.fold_expr(*node.cond)), - body: f.fold_block(node.body), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_expr_yield<F>(f: &mut F, node: crate::ExprYield) -> crate::ExprYield -where - F: Fold + ?Sized, -{ - crate::ExprYield { - attrs: f.fold_attributes(node.attrs), - yield_token: node.yield_token, - expr: (node.expr).map(|it| Box::new(f.fold_expr(*it))), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_field<F>(f: &mut F, node: crate::Field) -> crate::Field -where - F: Fold + ?Sized, -{ - crate::Field { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - mutability: f.fold_field_mutability(node.mutability), - ident: (node.ident).map(|it| f.fold_ident(it)), - colon_token: node.colon_token, - ty: f.fold_type(node.ty), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_field_mutability<F>( - f: &mut F, - node: crate::FieldMutability, -) -> crate::FieldMutability -where - F: Fold + ?Sized, -{ - match node { - crate::FieldMutability::None => crate::FieldMutability::None, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_field_pat<F>(f: &mut F, node: crate::FieldPat) -> crate::FieldPat -where - F: Fold + ?Sized, -{ - crate::FieldPat { - attrs: f.fold_attributes(node.attrs), - member: f.fold_member(node.member), - colon_token: node.colon_token, - pat: Box::new(f.fold_pat(*node.pat)), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_field_value<F>(f: &mut F, node: crate::FieldValue) -> crate::FieldValue -where - F: Fold + ?Sized, -{ - crate::FieldValue { - attrs: f.fold_attributes(node.attrs), - member: f.fold_member(node.member), - colon_token: node.colon_token, - expr: f.fold_expr(node.expr), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_fields<F>(f: &mut F, node: crate::Fields) -> crate::Fields -where - F: Fold + ?Sized, -{ - match node { - crate::Fields::Named(_binding_0) => { - crate::Fields::Named(f.fold_fields_named(_binding_0)) - } - crate::Fields::Unnamed(_binding_0) => { - crate::Fields::Unnamed(f.fold_fields_unnamed(_binding_0)) - } - crate::Fields::Unit => crate::Fields::Unit, - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_fields_named<F>(f: &mut F, node: crate::FieldsNamed) -> crate::FieldsNamed -where - F: Fold + ?Sized, -{ - crate::FieldsNamed { - brace_token: node.brace_token, - named: crate::punctuated::fold(node.named, f, F::fold_field), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_fields_unnamed<F>( - f: &mut F, - node: crate::FieldsUnnamed, -) -> crate::FieldsUnnamed -where - F: Fold + ?Sized, -{ - crate::FieldsUnnamed { - paren_token: node.paren_token, - unnamed: crate::punctuated::fold(node.unnamed, f, F::fold_field), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_file<F>(f: &mut F, node: crate::File) -> crate::File -where - F: Fold + ?Sized, -{ - crate::File { - shebang: node.shebang, - attrs: f.fold_attributes(node.attrs), - items: fold_vec(node.items, f, F::fold_item), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_fn_arg<F>(f: &mut F, node: crate::FnArg) -> crate::FnArg -where - F: Fold + ?Sized, -{ - match node { - crate::FnArg::Receiver(_binding_0) => { - crate::FnArg::Receiver(f.fold_receiver(_binding_0)) - } - crate::FnArg::Typed(_binding_0) => { - crate::FnArg::Typed(f.fold_pat_type(_binding_0)) - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_foreign_item<F>(f: &mut F, node: crate::ForeignItem) -> crate::ForeignItem -where - F: Fold + ?Sized, -{ - match node { - crate::ForeignItem::Fn(_binding_0) => { - crate::ForeignItem::Fn(f.fold_foreign_item_fn(_binding_0)) - } - crate::ForeignItem::Static(_binding_0) => { - crate::ForeignItem::Static(f.fold_foreign_item_static(_binding_0)) - } - crate::ForeignItem::Type(_binding_0) => { - crate::ForeignItem::Type(f.fold_foreign_item_type(_binding_0)) - } - crate::ForeignItem::Macro(_binding_0) => { - crate::ForeignItem::Macro(f.fold_foreign_item_macro(_binding_0)) - } - crate::ForeignItem::Verbatim(_binding_0) => { - crate::ForeignItem::Verbatim(f.fold_token_stream(_binding_0)) - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_foreign_item_fn<F>( - f: &mut F, - node: crate::ForeignItemFn, -) -> crate::ForeignItemFn -where - F: Fold + ?Sized, -{ - crate::ForeignItemFn { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - sig: f.fold_signature(node.sig), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_foreign_item_macro<F>( - f: &mut F, - node: crate::ForeignItemMacro, -) -> crate::ForeignItemMacro -where - F: Fold + ?Sized, -{ - crate::ForeignItemMacro { - attrs: f.fold_attributes(node.attrs), - mac: f.fold_macro(node.mac), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_foreign_item_static<F>( - f: &mut F, - node: crate::ForeignItemStatic, -) -> crate::ForeignItemStatic -where - F: Fold + ?Sized, -{ - crate::ForeignItemStatic { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - static_token: node.static_token, - mutability: f.fold_static_mutability(node.mutability), - ident: f.fold_ident(node.ident), - colon_token: node.colon_token, - ty: Box::new(f.fold_type(*node.ty)), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_foreign_item_type<F>( - f: &mut F, - node: crate::ForeignItemType, -) -> crate::ForeignItemType -where - F: Fold + ?Sized, -{ - crate::ForeignItemType { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - type_token: node.type_token, - ident: f.fold_ident(node.ident), - generics: f.fold_generics(node.generics), - semi_token: node.semi_token, - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_generic_argument<F>( - f: &mut F, - node: crate::GenericArgument, -) -> crate::GenericArgument -where - F: Fold + ?Sized, -{ - match node { - crate::GenericArgument::Lifetime(_binding_0) => { - crate::GenericArgument::Lifetime(f.fold_lifetime(_binding_0)) - } - crate::GenericArgument::Type(_binding_0) => { - crate::GenericArgument::Type(f.fold_type(_binding_0)) - } - crate::GenericArgument::Const(_binding_0) => { - crate::GenericArgument::Const(f.fold_expr(_binding_0)) - } - crate::GenericArgument::AssocType(_binding_0) => { - crate::GenericArgument::AssocType(f.fold_assoc_type(_binding_0)) - } - crate::GenericArgument::AssocConst(_binding_0) => { - crate::GenericArgument::AssocConst(f.fold_assoc_const(_binding_0)) - } - crate::GenericArgument::Constraint(_binding_0) => { - crate::GenericArgument::Constraint(f.fold_constraint(_binding_0)) - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_generic_param<F>(f: &mut F, node: crate::GenericParam) -> crate::GenericParam -where - F: Fold + ?Sized, -{ - match node { - crate::GenericParam::Lifetime(_binding_0) => { - crate::GenericParam::Lifetime(f.fold_lifetime_param(_binding_0)) - } - crate::GenericParam::Type(_binding_0) => { - crate::GenericParam::Type(f.fold_type_param(_binding_0)) - } - crate::GenericParam::Const(_binding_0) => { - crate::GenericParam::Const(f.fold_const_param(_binding_0)) - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_generics<F>(f: &mut F, node: crate::Generics) -> crate::Generics -where - F: Fold + ?Sized, -{ - crate::Generics { - lt_token: node.lt_token, - params: crate::punctuated::fold(node.params, f, F::fold_generic_param), - gt_token: node.gt_token, - where_clause: (node.where_clause).map(|it| f.fold_where_clause(it)), - } -} -pub fn fold_ident<F>(f: &mut F, node: proc_macro2::Ident) -> proc_macro2::Ident -where - F: Fold + ?Sized, -{ - let mut node = node; - let span = f.fold_span(node.span()); - node.set_span(span); - node -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_impl_item<F>(f: &mut F, node: crate::ImplItem) -> crate::ImplItem -where - F: Fold + ?Sized, -{ - match node { - crate::ImplItem::Const(_binding_0) => { - crate::ImplItem::Const(f.fold_impl_item_const(_binding_0)) - } - crate::ImplItem::Fn(_binding_0) => { - crate::ImplItem::Fn(f.fold_impl_item_fn(_binding_0)) - } - crate::ImplItem::Type(_binding_0) => { - crate::ImplItem::Type(f.fold_impl_item_type(_binding_0)) - } - crate::ImplItem::Macro(_binding_0) => { - crate::ImplItem::Macro(f.fold_impl_item_macro(_binding_0)) - } - crate::ImplItem::Verbatim(_binding_0) => { - crate::ImplItem::Verbatim(f.fold_token_stream(_binding_0)) - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_impl_item_const<F>( - f: &mut F, - node: crate::ImplItemConst, -) -> crate::ImplItemConst -where - F: Fold + ?Sized, -{ - crate::ImplItemConst { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - defaultness: node.defaultness, - const_token: node.const_token, - ident: f.fold_ident(node.ident), - generics: f.fold_generics(node.generics), - colon_token: node.colon_token, - ty: f.fold_type(node.ty), - eq_token: node.eq_token, - expr: f.fold_expr(node.expr), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_impl_item_fn<F>(f: &mut F, node: crate::ImplItemFn) -> crate::ImplItemFn -where - F: Fold + ?Sized, -{ - crate::ImplItemFn { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - defaultness: node.defaultness, - sig: f.fold_signature(node.sig), - block: f.fold_block(node.block), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_impl_item_macro<F>( - f: &mut F, - node: crate::ImplItemMacro, -) -> crate::ImplItemMacro -where - F: Fold + ?Sized, -{ - crate::ImplItemMacro { - attrs: f.fold_attributes(node.attrs), - mac: f.fold_macro(node.mac), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_impl_item_type<F>( - f: &mut F, - node: crate::ImplItemType, -) -> crate::ImplItemType -where - F: Fold + ?Sized, -{ - crate::ImplItemType { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - defaultness: node.defaultness, - type_token: node.type_token, - ident: f.fold_ident(node.ident), - generics: f.fold_generics(node.generics), - eq_token: node.eq_token, - ty: f.fold_type(node.ty), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_impl_restriction<F>( - f: &mut F, - node: crate::ImplRestriction, -) -> crate::ImplRestriction -where - F: Fold + ?Sized, -{ - match node {} -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_index<F>(f: &mut F, node: crate::Index) -> crate::Index -where - F: Fold + ?Sized, -{ - crate::Index { - index: node.index, - span: f.fold_span(node.span), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item<F>(f: &mut F, node: crate::Item) -> crate::Item -where - F: Fold + ?Sized, -{ - match node { - crate::Item::Const(_binding_0) => { - crate::Item::Const(f.fold_item_const(_binding_0)) - } - crate::Item::Enum(_binding_0) => crate::Item::Enum(f.fold_item_enum(_binding_0)), - crate::Item::ExternCrate(_binding_0) => { - crate::Item::ExternCrate(f.fold_item_extern_crate(_binding_0)) - } - crate::Item::Fn(_binding_0) => crate::Item::Fn(f.fold_item_fn(_binding_0)), - crate::Item::ForeignMod(_binding_0) => { - crate::Item::ForeignMod(f.fold_item_foreign_mod(_binding_0)) - } - crate::Item::Impl(_binding_0) => crate::Item::Impl(f.fold_item_impl(_binding_0)), - crate::Item::Macro(_binding_0) => { - crate::Item::Macro(f.fold_item_macro(_binding_0)) - } - crate::Item::Mod(_binding_0) => crate::Item::Mod(f.fold_item_mod(_binding_0)), - crate::Item::Static(_binding_0) => { - crate::Item::Static(f.fold_item_static(_binding_0)) - } - crate::Item::Struct(_binding_0) => { - crate::Item::Struct(f.fold_item_struct(_binding_0)) - } - crate::Item::Trait(_binding_0) => { - crate::Item::Trait(f.fold_item_trait(_binding_0)) - } - crate::Item::TraitAlias(_binding_0) => { - crate::Item::TraitAlias(f.fold_item_trait_alias(_binding_0)) - } - crate::Item::Type(_binding_0) => crate::Item::Type(f.fold_item_type(_binding_0)), - crate::Item::Union(_binding_0) => { - crate::Item::Union(f.fold_item_union(_binding_0)) - } - crate::Item::Use(_binding_0) => crate::Item::Use(f.fold_item_use(_binding_0)), - crate::Item::Verbatim(_binding_0) => { - crate::Item::Verbatim(f.fold_token_stream(_binding_0)) - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item_const<F>(f: &mut F, node: crate::ItemConst) -> crate::ItemConst -where - F: Fold + ?Sized, -{ - crate::ItemConst { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - const_token: node.const_token, - ident: f.fold_ident(node.ident), - generics: f.fold_generics(node.generics), - colon_token: node.colon_token, - ty: Box::new(f.fold_type(*node.ty)), - eq_token: node.eq_token, - expr: Box::new(f.fold_expr(*node.expr)), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item_enum<F>(f: &mut F, node: crate::ItemEnum) -> crate::ItemEnum -where - F: Fold + ?Sized, -{ - crate::ItemEnum { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - enum_token: node.enum_token, - ident: f.fold_ident(node.ident), - generics: f.fold_generics(node.generics), - brace_token: node.brace_token, - variants: crate::punctuated::fold(node.variants, f, F::fold_variant), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item_extern_crate<F>( - f: &mut F, - node: crate::ItemExternCrate, -) -> crate::ItemExternCrate -where - F: Fold + ?Sized, -{ - crate::ItemExternCrate { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - extern_token: node.extern_token, - crate_token: node.crate_token, - ident: f.fold_ident(node.ident), - rename: (node.rename).map(|it| ((it).0, f.fold_ident((it).1))), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item_fn<F>(f: &mut F, node: crate::ItemFn) -> crate::ItemFn -where - F: Fold + ?Sized, -{ - crate::ItemFn { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - sig: f.fold_signature(node.sig), - block: Box::new(f.fold_block(*node.block)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item_foreign_mod<F>( - f: &mut F, - node: crate::ItemForeignMod, -) -> crate::ItemForeignMod -where - F: Fold + ?Sized, -{ - crate::ItemForeignMod { - attrs: f.fold_attributes(node.attrs), - unsafety: node.unsafety, - abi: f.fold_abi(node.abi), - brace_token: node.brace_token, - items: fold_vec(node.items, f, F::fold_foreign_item), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item_impl<F>(f: &mut F, node: crate::ItemImpl) -> crate::ItemImpl -where - F: Fold + ?Sized, -{ - crate::ItemImpl { - attrs: f.fold_attributes(node.attrs), - defaultness: node.defaultness, - unsafety: node.unsafety, - impl_token: node.impl_token, - generics: f.fold_generics(node.generics), - trait_: (node.trait_).map(|it| ((it).0, f.fold_path((it).1), (it).2)), - self_ty: Box::new(f.fold_type(*node.self_ty)), - brace_token: node.brace_token, - items: fold_vec(node.items, f, F::fold_impl_item), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item_macro<F>(f: &mut F, node: crate::ItemMacro) -> crate::ItemMacro -where - F: Fold + ?Sized, -{ - crate::ItemMacro { - attrs: f.fold_attributes(node.attrs), - ident: (node.ident).map(|it| f.fold_ident(it)), - mac: f.fold_macro(node.mac), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item_mod<F>(f: &mut F, node: crate::ItemMod) -> crate::ItemMod -where - F: Fold + ?Sized, -{ - crate::ItemMod { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - unsafety: node.unsafety, - mod_token: node.mod_token, - ident: f.fold_ident(node.ident), - content: (node.content).map(|it| ((it).0, fold_vec((it).1, f, F::fold_item))), - semi: node.semi, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item_static<F>(f: &mut F, node: crate::ItemStatic) -> crate::ItemStatic -where - F: Fold + ?Sized, -{ - crate::ItemStatic { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - static_token: node.static_token, - mutability: f.fold_static_mutability(node.mutability), - ident: f.fold_ident(node.ident), - colon_token: node.colon_token, - ty: Box::new(f.fold_type(*node.ty)), - eq_token: node.eq_token, - expr: Box::new(f.fold_expr(*node.expr)), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item_struct<F>(f: &mut F, node: crate::ItemStruct) -> crate::ItemStruct -where - F: Fold + ?Sized, -{ - crate::ItemStruct { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - struct_token: node.struct_token, - ident: f.fold_ident(node.ident), - generics: f.fold_generics(node.generics), - fields: f.fold_fields(node.fields), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item_trait<F>(f: &mut F, node: crate::ItemTrait) -> crate::ItemTrait -where - F: Fold + ?Sized, -{ - crate::ItemTrait { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - unsafety: node.unsafety, - auto_token: node.auto_token, - restriction: (node.restriction).map(|it| f.fold_impl_restriction(it)), - trait_token: node.trait_token, - ident: f.fold_ident(node.ident), - generics: f.fold_generics(node.generics), - colon_token: node.colon_token, - supertraits: crate::punctuated::fold( - node.supertraits, - f, - F::fold_type_param_bound, - ), - brace_token: node.brace_token, - items: fold_vec(node.items, f, F::fold_trait_item), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item_trait_alias<F>( - f: &mut F, - node: crate::ItemTraitAlias, -) -> crate::ItemTraitAlias -where - F: Fold + ?Sized, -{ - crate::ItemTraitAlias { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - trait_token: node.trait_token, - ident: f.fold_ident(node.ident), - generics: f.fold_generics(node.generics), - eq_token: node.eq_token, - bounds: crate::punctuated::fold(node.bounds, f, F::fold_type_param_bound), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item_type<F>(f: &mut F, node: crate::ItemType) -> crate::ItemType -where - F: Fold + ?Sized, -{ - crate::ItemType { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - type_token: node.type_token, - ident: f.fold_ident(node.ident), - generics: f.fold_generics(node.generics), - eq_token: node.eq_token, - ty: Box::new(f.fold_type(*node.ty)), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item_union<F>(f: &mut F, node: crate::ItemUnion) -> crate::ItemUnion -where - F: Fold + ?Sized, -{ - crate::ItemUnion { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - union_token: node.union_token, - ident: f.fold_ident(node.ident), - generics: f.fold_generics(node.generics), - fields: f.fold_fields_named(node.fields), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_item_use<F>(f: &mut F, node: crate::ItemUse) -> crate::ItemUse -where - F: Fold + ?Sized, -{ - crate::ItemUse { - attrs: f.fold_attributes(node.attrs), - vis: f.fold_visibility(node.vis), - use_token: node.use_token, - leading_colon: node.leading_colon, - tree: f.fold_use_tree(node.tree), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_label<F>(f: &mut F, node: crate::Label) -> crate::Label -where - F: Fold + ?Sized, -{ - crate::Label { - name: f.fold_lifetime(node.name), - colon_token: node.colon_token, - } -} -pub fn fold_lifetime<F>(f: &mut F, node: crate::Lifetime) -> crate::Lifetime -where - F: Fold + ?Sized, -{ - crate::Lifetime { - apostrophe: f.fold_span(node.apostrophe), - ident: f.fold_ident(node.ident), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_lifetime_param<F>( - f: &mut F, - node: crate::LifetimeParam, -) -> crate::LifetimeParam -where - F: Fold + ?Sized, -{ - crate::LifetimeParam { - attrs: f.fold_attributes(node.attrs), - lifetime: f.fold_lifetime(node.lifetime), - colon_token: node.colon_token, - bounds: crate::punctuated::fold(node.bounds, f, F::fold_lifetime), - } -} -pub fn fold_lit<F>(f: &mut F, node: crate::Lit) -> crate::Lit -where - F: Fold + ?Sized, -{ - match node { - crate::Lit::Str(_binding_0) => crate::Lit::Str(f.fold_lit_str(_binding_0)), - crate::Lit::ByteStr(_binding_0) => { - crate::Lit::ByteStr(f.fold_lit_byte_str(_binding_0)) - } - crate::Lit::CStr(_binding_0) => crate::Lit::CStr(f.fold_lit_cstr(_binding_0)), - crate::Lit::Byte(_binding_0) => crate::Lit::Byte(f.fold_lit_byte(_binding_0)), - crate::Lit::Char(_binding_0) => crate::Lit::Char(f.fold_lit_char(_binding_0)), - crate::Lit::Int(_binding_0) => crate::Lit::Int(f.fold_lit_int(_binding_0)), - crate::Lit::Float(_binding_0) => crate::Lit::Float(f.fold_lit_float(_binding_0)), - crate::Lit::Bool(_binding_0) => crate::Lit::Bool(f.fold_lit_bool(_binding_0)), - crate::Lit::Verbatim(_binding_0) => crate::Lit::Verbatim(_binding_0), - } -} -pub fn fold_lit_bool<F>(f: &mut F, node: crate::LitBool) -> crate::LitBool -where - F: Fold + ?Sized, -{ - crate::LitBool { - value: node.value, - span: f.fold_span(node.span), - } -} -pub fn fold_lit_byte<F>(f: &mut F, node: crate::LitByte) -> crate::LitByte -where - F: Fold + ?Sized, -{ - let span = f.fold_span(node.span()); - let mut node = node; - node.set_span(span); - node -} -pub fn fold_lit_byte_str<F>(f: &mut F, node: crate::LitByteStr) -> crate::LitByteStr -where - F: Fold + ?Sized, -{ - let span = f.fold_span(node.span()); - let mut node = node; - node.set_span(span); - node -} -pub fn fold_lit_cstr<F>(f: &mut F, node: crate::LitCStr) -> crate::LitCStr -where - F: Fold + ?Sized, -{ - let span = f.fold_span(node.span()); - let mut node = node; - node.set_span(span); - node -} -pub fn fold_lit_char<F>(f: &mut F, node: crate::LitChar) -> crate::LitChar -where - F: Fold + ?Sized, -{ - let span = f.fold_span(node.span()); - let mut node = node; - node.set_span(span); - node -} -pub fn fold_lit_float<F>(f: &mut F, node: crate::LitFloat) -> crate::LitFloat -where - F: Fold + ?Sized, -{ - let span = f.fold_span(node.span()); - let mut node = node; - node.set_span(span); - node -} -pub fn fold_lit_int<F>(f: &mut F, node: crate::LitInt) -> crate::LitInt -where - F: Fold + ?Sized, -{ - let span = f.fold_span(node.span()); - let mut node = node; - node.set_span(span); - node -} -pub fn fold_lit_str<F>(f: &mut F, node: crate::LitStr) -> crate::LitStr -where - F: Fold + ?Sized, -{ - let span = f.fold_span(node.span()); - let mut node = node; - node.set_span(span); - node -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_local<F>(f: &mut F, node: crate::Local) -> crate::Local -where - F: Fold + ?Sized, -{ - crate::Local { - attrs: f.fold_attributes(node.attrs), - let_token: node.let_token, - pat: f.fold_pat(node.pat), - init: (node.init).map(|it| f.fold_local_init(it)), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_local_init<F>(f: &mut F, node: crate::LocalInit) -> crate::LocalInit -where - F: Fold + ?Sized, -{ - crate::LocalInit { - eq_token: node.eq_token, - expr: Box::new(f.fold_expr(*node.expr)), - diverge: (node.diverge).map(|it| ((it).0, Box::new(f.fold_expr(*(it).1)))), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_macro<F>(f: &mut F, node: crate::Macro) -> crate::Macro -where - F: Fold + ?Sized, -{ - crate::Macro { - path: f.fold_path(node.path), - bang_token: node.bang_token, - delimiter: f.fold_macro_delimiter(node.delimiter), - tokens: f.fold_token_stream(node.tokens), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_macro_delimiter<F>( - f: &mut F, - node: crate::MacroDelimiter, -) -> crate::MacroDelimiter -where - F: Fold + ?Sized, -{ - match node { - crate::MacroDelimiter::Paren(_binding_0) => { - crate::MacroDelimiter::Paren(_binding_0) - } - crate::MacroDelimiter::Brace(_binding_0) => { - crate::MacroDelimiter::Brace(_binding_0) - } - crate::MacroDelimiter::Bracket(_binding_0) => { - crate::MacroDelimiter::Bracket(_binding_0) - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_member<F>(f: &mut F, node: crate::Member) -> crate::Member -where - F: Fold + ?Sized, -{ - match node { - crate::Member::Named(_binding_0) => { - crate::Member::Named(f.fold_ident(_binding_0)) - } - crate::Member::Unnamed(_binding_0) => { - crate::Member::Unnamed(f.fold_index(_binding_0)) - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_meta<F>(f: &mut F, node: crate::Meta) -> crate::Meta -where - F: Fold + ?Sized, -{ - match node { - crate::Meta::Path(_binding_0) => crate::Meta::Path(f.fold_path(_binding_0)), - crate::Meta::List(_binding_0) => crate::Meta::List(f.fold_meta_list(_binding_0)), - crate::Meta::NameValue(_binding_0) => { - crate::Meta::NameValue(f.fold_meta_name_value(_binding_0)) - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_meta_list<F>(f: &mut F, node: crate::MetaList) -> crate::MetaList -where - F: Fold + ?Sized, -{ - crate::MetaList { - path: f.fold_path(node.path), - delimiter: f.fold_macro_delimiter(node.delimiter), - tokens: f.fold_token_stream(node.tokens), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_meta_name_value<F>( - f: &mut F, - node: crate::MetaNameValue, -) -> crate::MetaNameValue -where - F: Fold + ?Sized, -{ - crate::MetaNameValue { - path: f.fold_path(node.path), - eq_token: node.eq_token, - value: f.fold_expr(node.value), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_parenthesized_generic_arguments<F>( - f: &mut F, - node: crate::ParenthesizedGenericArguments, -) -> crate::ParenthesizedGenericArguments -where - F: Fold + ?Sized, -{ - crate::ParenthesizedGenericArguments { - paren_token: node.paren_token, - inputs: crate::punctuated::fold(node.inputs, f, F::fold_type), - output: f.fold_return_type(node.output), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_pat<F>(f: &mut F, node: crate::Pat) -> crate::Pat -where - F: Fold + ?Sized, -{ - match node { - crate::Pat::Const(_binding_0) => crate::Pat::Const(f.fold_expr_const(_binding_0)), - crate::Pat::Ident(_binding_0) => crate::Pat::Ident(f.fold_pat_ident(_binding_0)), - crate::Pat::Lit(_binding_0) => crate::Pat::Lit(f.fold_expr_lit(_binding_0)), - crate::Pat::Macro(_binding_0) => crate::Pat::Macro(f.fold_expr_macro(_binding_0)), - crate::Pat::Or(_binding_0) => crate::Pat::Or(f.fold_pat_or(_binding_0)), - crate::Pat::Paren(_binding_0) => crate::Pat::Paren(f.fold_pat_paren(_binding_0)), - crate::Pat::Path(_binding_0) => crate::Pat::Path(f.fold_expr_path(_binding_0)), - crate::Pat::Range(_binding_0) => crate::Pat::Range(f.fold_expr_range(_binding_0)), - crate::Pat::Reference(_binding_0) => { - crate::Pat::Reference(f.fold_pat_reference(_binding_0)) - } - crate::Pat::Rest(_binding_0) => crate::Pat::Rest(f.fold_pat_rest(_binding_0)), - crate::Pat::Slice(_binding_0) => crate::Pat::Slice(f.fold_pat_slice(_binding_0)), - crate::Pat::Struct(_binding_0) => { - crate::Pat::Struct(f.fold_pat_struct(_binding_0)) - } - crate::Pat::Tuple(_binding_0) => crate::Pat::Tuple(f.fold_pat_tuple(_binding_0)), - crate::Pat::TupleStruct(_binding_0) => { - crate::Pat::TupleStruct(f.fold_pat_tuple_struct(_binding_0)) - } - crate::Pat::Type(_binding_0) => crate::Pat::Type(f.fold_pat_type(_binding_0)), - crate::Pat::Verbatim(_binding_0) => { - crate::Pat::Verbatim(f.fold_token_stream(_binding_0)) - } - crate::Pat::Wild(_binding_0) => crate::Pat::Wild(f.fold_pat_wild(_binding_0)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_pat_ident<F>(f: &mut F, node: crate::PatIdent) -> crate::PatIdent -where - F: Fold + ?Sized, -{ - crate::PatIdent { - attrs: f.fold_attributes(node.attrs), - by_ref: node.by_ref, - mutability: node.mutability, - ident: f.fold_ident(node.ident), - subpat: (node.subpat).map(|it| ((it).0, Box::new(f.fold_pat(*(it).1)))), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_pat_or<F>(f: &mut F, node: crate::PatOr) -> crate::PatOr -where - F: Fold + ?Sized, -{ - crate::PatOr { - attrs: f.fold_attributes(node.attrs), - leading_vert: node.leading_vert, - cases: crate::punctuated::fold(node.cases, f, F::fold_pat), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_pat_paren<F>(f: &mut F, node: crate::PatParen) -> crate::PatParen -where - F: Fold + ?Sized, -{ - crate::PatParen { - attrs: f.fold_attributes(node.attrs), - paren_token: node.paren_token, - pat: Box::new(f.fold_pat(*node.pat)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_pat_reference<F>(f: &mut F, node: crate::PatReference) -> crate::PatReference -where - F: Fold + ?Sized, -{ - crate::PatReference { - attrs: f.fold_attributes(node.attrs), - and_token: node.and_token, - mutability: node.mutability, - pat: Box::new(f.fold_pat(*node.pat)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_pat_rest<F>(f: &mut F, node: crate::PatRest) -> crate::PatRest -where - F: Fold + ?Sized, -{ - crate::PatRest { - attrs: f.fold_attributes(node.attrs), - dot2_token: node.dot2_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_pat_slice<F>(f: &mut F, node: crate::PatSlice) -> crate::PatSlice -where - F: Fold + ?Sized, -{ - crate::PatSlice { - attrs: f.fold_attributes(node.attrs), - bracket_token: node.bracket_token, - elems: crate::punctuated::fold(node.elems, f, F::fold_pat), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_pat_struct<F>(f: &mut F, node: crate::PatStruct) -> crate::PatStruct -where - F: Fold + ?Sized, -{ - crate::PatStruct { - attrs: f.fold_attributes(node.attrs), - qself: (node.qself).map(|it| f.fold_qself(it)), - path: f.fold_path(node.path), - brace_token: node.brace_token, - fields: crate::punctuated::fold(node.fields, f, F::fold_field_pat), - rest: (node.rest).map(|it| f.fold_pat_rest(it)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_pat_tuple<F>(f: &mut F, node: crate::PatTuple) -> crate::PatTuple -where - F: Fold + ?Sized, -{ - crate::PatTuple { - attrs: f.fold_attributes(node.attrs), - paren_token: node.paren_token, - elems: crate::punctuated::fold(node.elems, f, F::fold_pat), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_pat_tuple_struct<F>( - f: &mut F, - node: crate::PatTupleStruct, -) -> crate::PatTupleStruct -where - F: Fold + ?Sized, -{ - crate::PatTupleStruct { - attrs: f.fold_attributes(node.attrs), - qself: (node.qself).map(|it| f.fold_qself(it)), - path: f.fold_path(node.path), - paren_token: node.paren_token, - elems: crate::punctuated::fold(node.elems, f, F::fold_pat), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_pat_type<F>(f: &mut F, node: crate::PatType) -> crate::PatType -where - F: Fold + ?Sized, -{ - crate::PatType { - attrs: f.fold_attributes(node.attrs), - pat: Box::new(f.fold_pat(*node.pat)), - colon_token: node.colon_token, - ty: Box::new(f.fold_type(*node.ty)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_pat_wild<F>(f: &mut F, node: crate::PatWild) -> crate::PatWild -where - F: Fold + ?Sized, -{ - crate::PatWild { - attrs: f.fold_attributes(node.attrs), - underscore_token: node.underscore_token, - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_path<F>(f: &mut F, node: crate::Path) -> crate::Path -where - F: Fold + ?Sized, -{ - crate::Path { - leading_colon: node.leading_colon, - segments: crate::punctuated::fold(node.segments, f, F::fold_path_segment), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_path_arguments<F>( - f: &mut F, - node: crate::PathArguments, -) -> crate::PathArguments -where - F: Fold + ?Sized, -{ - match node { - crate::PathArguments::None => crate::PathArguments::None, - crate::PathArguments::AngleBracketed(_binding_0) => { - crate::PathArguments::AngleBracketed( - f.fold_angle_bracketed_generic_arguments(_binding_0), - ) - } - crate::PathArguments::Parenthesized(_binding_0) => { - crate::PathArguments::Parenthesized( - f.fold_parenthesized_generic_arguments(_binding_0), - ) - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_path_segment<F>(f: &mut F, node: crate::PathSegment) -> crate::PathSegment -where - F: Fold + ?Sized, -{ - crate::PathSegment { - ident: f.fold_ident(node.ident), - arguments: f.fold_path_arguments(node.arguments), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_pointer_mutability<F>( - f: &mut F, - node: crate::PointerMutability, -) -> crate::PointerMutability -where - F: Fold + ?Sized, -{ - match node { - crate::PointerMutability::Const(_binding_0) => { - crate::PointerMutability::Const(_binding_0) - } - crate::PointerMutability::Mut(_binding_0) => { - crate::PointerMutability::Mut(_binding_0) - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_precise_capture<F>( - f: &mut F, - node: crate::PreciseCapture, -) -> crate::PreciseCapture -where - F: Fold + ?Sized, -{ - crate::PreciseCapture { - use_token: node.use_token, - lt_token: node.lt_token, - params: crate::punctuated::fold(node.params, f, F::fold_captured_param), - gt_token: node.gt_token, - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_predicate_lifetime<F>( - f: &mut F, - node: crate::PredicateLifetime, -) -> crate::PredicateLifetime -where - F: Fold + ?Sized, -{ - crate::PredicateLifetime { - lifetime: f.fold_lifetime(node.lifetime), - colon_token: node.colon_token, - bounds: crate::punctuated::fold(node.bounds, f, F::fold_lifetime), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_predicate_type<F>( - f: &mut F, - node: crate::PredicateType, -) -> crate::PredicateType -where - F: Fold + ?Sized, -{ - crate::PredicateType { - lifetimes: (node.lifetimes).map(|it| f.fold_bound_lifetimes(it)), - bounded_ty: f.fold_type(node.bounded_ty), - colon_token: node.colon_token, - bounds: crate::punctuated::fold(node.bounds, f, F::fold_type_param_bound), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_qself<F>(f: &mut F, node: crate::QSelf) -> crate::QSelf -where - F: Fold + ?Sized, -{ - crate::QSelf { - lt_token: node.lt_token, - ty: Box::new(f.fold_type(*node.ty)), - position: node.position, - as_token: node.as_token, - gt_token: node.gt_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_range_limits<F>(f: &mut F, node: crate::RangeLimits) -> crate::RangeLimits -where - F: Fold + ?Sized, -{ - match node { - crate::RangeLimits::HalfOpen(_binding_0) => { - crate::RangeLimits::HalfOpen(_binding_0) - } - crate::RangeLimits::Closed(_binding_0) => crate::RangeLimits::Closed(_binding_0), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_receiver<F>(f: &mut F, node: crate::Receiver) -> crate::Receiver -where - F: Fold + ?Sized, -{ - crate::Receiver { - attrs: f.fold_attributes(node.attrs), - reference: (node.reference) - .map(|it| ((it).0, ((it).1).map(|it| f.fold_lifetime(it)))), - mutability: node.mutability, - self_token: node.self_token, - colon_token: node.colon_token, - ty: Box::new(f.fold_type(*node.ty)), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_return_type<F>(f: &mut F, node: crate::ReturnType) -> crate::ReturnType -where - F: Fold + ?Sized, -{ - match node { - crate::ReturnType::Default => crate::ReturnType::Default, - crate::ReturnType::Type(_binding_0, _binding_1) => { - crate::ReturnType::Type(_binding_0, Box::new(f.fold_type(*_binding_1))) - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_signature<F>(f: &mut F, node: crate::Signature) -> crate::Signature -where - F: Fold + ?Sized, -{ - crate::Signature { - constness: node.constness, - asyncness: node.asyncness, - unsafety: node.unsafety, - abi: (node.abi).map(|it| f.fold_abi(it)), - fn_token: node.fn_token, - ident: f.fold_ident(node.ident), - generics: f.fold_generics(node.generics), - paren_token: node.paren_token, - inputs: crate::punctuated::fold(node.inputs, f, F::fold_fn_arg), - variadic: (node.variadic).map(|it| f.fold_variadic(it)), - output: f.fold_return_type(node.output), - } -} -pub fn fold_span<F>(f: &mut F, node: proc_macro2::Span) -> proc_macro2::Span -where - F: Fold + ?Sized, -{ - node -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_static_mutability<F>( - f: &mut F, - node: crate::StaticMutability, -) -> crate::StaticMutability -where - F: Fold + ?Sized, -{ - match node { - crate::StaticMutability::Mut(_binding_0) => { - crate::StaticMutability::Mut(_binding_0) - } - crate::StaticMutability::None => crate::StaticMutability::None, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_stmt<F>(f: &mut F, node: crate::Stmt) -> crate::Stmt -where - F: Fold + ?Sized, -{ - match node { - crate::Stmt::Local(_binding_0) => crate::Stmt::Local(f.fold_local(_binding_0)), - crate::Stmt::Item(_binding_0) => crate::Stmt::Item(f.fold_item(_binding_0)), - crate::Stmt::Expr(_binding_0, _binding_1) => { - crate::Stmt::Expr(f.fold_expr(_binding_0), _binding_1) - } - crate::Stmt::Macro(_binding_0) => { - crate::Stmt::Macro(f.fold_stmt_macro(_binding_0)) - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_stmt_macro<F>(f: &mut F, node: crate::StmtMacro) -> crate::StmtMacro -where - F: Fold + ?Sized, -{ - crate::StmtMacro { - attrs: f.fold_attributes(node.attrs), - mac: f.fold_macro(node.mac), - semi_token: node.semi_token, - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_trait_bound<F>(f: &mut F, node: crate::TraitBound) -> crate::TraitBound -where - F: Fold + ?Sized, -{ - crate::TraitBound { - paren_token: node.paren_token, - modifier: f.fold_trait_bound_modifier(node.modifier), - lifetimes: (node.lifetimes).map(|it| f.fold_bound_lifetimes(it)), - path: f.fold_path(node.path), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_trait_bound_modifier<F>( - f: &mut F, - node: crate::TraitBoundModifier, -) -> crate::TraitBoundModifier -where - F: Fold + ?Sized, -{ - match node { - crate::TraitBoundModifier::None => crate::TraitBoundModifier::None, - crate::TraitBoundModifier::Maybe(_binding_0) => { - crate::TraitBoundModifier::Maybe(_binding_0) - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_trait_item<F>(f: &mut F, node: crate::TraitItem) -> crate::TraitItem -where - F: Fold + ?Sized, -{ - match node { - crate::TraitItem::Const(_binding_0) => { - crate::TraitItem::Const(f.fold_trait_item_const(_binding_0)) - } - crate::TraitItem::Fn(_binding_0) => { - crate::TraitItem::Fn(f.fold_trait_item_fn(_binding_0)) - } - crate::TraitItem::Type(_binding_0) => { - crate::TraitItem::Type(f.fold_trait_item_type(_binding_0)) - } - crate::TraitItem::Macro(_binding_0) => { - crate::TraitItem::Macro(f.fold_trait_item_macro(_binding_0)) - } - crate::TraitItem::Verbatim(_binding_0) => { - crate::TraitItem::Verbatim(f.fold_token_stream(_binding_0)) - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_trait_item_const<F>( - f: &mut F, - node: crate::TraitItemConst, -) -> crate::TraitItemConst -where - F: Fold + ?Sized, -{ - crate::TraitItemConst { - attrs: f.fold_attributes(node.attrs), - const_token: node.const_token, - ident: f.fold_ident(node.ident), - generics: f.fold_generics(node.generics), - colon_token: node.colon_token, - ty: f.fold_type(node.ty), - default: (node.default).map(|it| ((it).0, f.fold_expr((it).1))), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_trait_item_fn<F>(f: &mut F, node: crate::TraitItemFn) -> crate::TraitItemFn -where - F: Fold + ?Sized, -{ - crate::TraitItemFn { - attrs: f.fold_attributes(node.attrs), - sig: f.fold_signature(node.sig), - default: (node.default).map(|it| f.fold_block(it)), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_trait_item_macro<F>( - f: &mut F, - node: crate::TraitItemMacro, -) -> crate::TraitItemMacro -where - F: Fold + ?Sized, -{ - crate::TraitItemMacro { - attrs: f.fold_attributes(node.attrs), - mac: f.fold_macro(node.mac), - semi_token: node.semi_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_trait_item_type<F>( - f: &mut F, - node: crate::TraitItemType, -) -> crate::TraitItemType -where - F: Fold + ?Sized, -{ - crate::TraitItemType { - attrs: f.fold_attributes(node.attrs), - type_token: node.type_token, - ident: f.fold_ident(node.ident), - generics: f.fold_generics(node.generics), - colon_token: node.colon_token, - bounds: crate::punctuated::fold(node.bounds, f, F::fold_type_param_bound), - default: (node.default).map(|it| ((it).0, f.fold_type((it).1))), - semi_token: node.semi_token, - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type<F>(f: &mut F, node: crate::Type) -> crate::Type -where - F: Fold + ?Sized, -{ - match node { - crate::Type::Array(_binding_0) => { - crate::Type::Array(f.fold_type_array(_binding_0)) - } - crate::Type::BareFn(_binding_0) => { - crate::Type::BareFn(f.fold_type_bare_fn(_binding_0)) - } - crate::Type::Group(_binding_0) => { - crate::Type::Group(f.fold_type_group(_binding_0)) - } - crate::Type::ImplTrait(_binding_0) => { - crate::Type::ImplTrait(f.fold_type_impl_trait(_binding_0)) - } - crate::Type::Infer(_binding_0) => { - crate::Type::Infer(f.fold_type_infer(_binding_0)) - } - crate::Type::Macro(_binding_0) => { - crate::Type::Macro(f.fold_type_macro(_binding_0)) - } - crate::Type::Never(_binding_0) => { - crate::Type::Never(f.fold_type_never(_binding_0)) - } - crate::Type::Paren(_binding_0) => { - crate::Type::Paren(f.fold_type_paren(_binding_0)) - } - crate::Type::Path(_binding_0) => crate::Type::Path(f.fold_type_path(_binding_0)), - crate::Type::Ptr(_binding_0) => crate::Type::Ptr(f.fold_type_ptr(_binding_0)), - crate::Type::Reference(_binding_0) => { - crate::Type::Reference(f.fold_type_reference(_binding_0)) - } - crate::Type::Slice(_binding_0) => { - crate::Type::Slice(f.fold_type_slice(_binding_0)) - } - crate::Type::TraitObject(_binding_0) => { - crate::Type::TraitObject(f.fold_type_trait_object(_binding_0)) - } - crate::Type::Tuple(_binding_0) => { - crate::Type::Tuple(f.fold_type_tuple(_binding_0)) - } - crate::Type::Verbatim(_binding_0) => { - crate::Type::Verbatim(f.fold_token_stream(_binding_0)) - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_array<F>(f: &mut F, node: crate::TypeArray) -> crate::TypeArray -where - F: Fold + ?Sized, -{ - crate::TypeArray { - bracket_token: node.bracket_token, - elem: Box::new(f.fold_type(*node.elem)), - semi_token: node.semi_token, - len: f.fold_expr(node.len), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_bare_fn<F>(f: &mut F, node: crate::TypeBareFn) -> crate::TypeBareFn -where - F: Fold + ?Sized, -{ - crate::TypeBareFn { - lifetimes: (node.lifetimes).map(|it| f.fold_bound_lifetimes(it)), - unsafety: node.unsafety, - abi: (node.abi).map(|it| f.fold_abi(it)), - fn_token: node.fn_token, - paren_token: node.paren_token, - inputs: crate::punctuated::fold(node.inputs, f, F::fold_bare_fn_arg), - variadic: (node.variadic).map(|it| f.fold_bare_variadic(it)), - output: f.fold_return_type(node.output), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_group<F>(f: &mut F, node: crate::TypeGroup) -> crate::TypeGroup -where - F: Fold + ?Sized, -{ - crate::TypeGroup { - group_token: node.group_token, - elem: Box::new(f.fold_type(*node.elem)), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_impl_trait<F>( - f: &mut F, - node: crate::TypeImplTrait, -) -> crate::TypeImplTrait -where - F: Fold + ?Sized, -{ - crate::TypeImplTrait { - impl_token: node.impl_token, - bounds: crate::punctuated::fold(node.bounds, f, F::fold_type_param_bound), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_infer<F>(f: &mut F, node: crate::TypeInfer) -> crate::TypeInfer -where - F: Fold + ?Sized, -{ - crate::TypeInfer { - underscore_token: node.underscore_token, - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_macro<F>(f: &mut F, node: crate::TypeMacro) -> crate::TypeMacro -where - F: Fold + ?Sized, -{ - crate::TypeMacro { - mac: f.fold_macro(node.mac), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_never<F>(f: &mut F, node: crate::TypeNever) -> crate::TypeNever -where - F: Fold + ?Sized, -{ - crate::TypeNever { - bang_token: node.bang_token, - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_param<F>(f: &mut F, node: crate::TypeParam) -> crate::TypeParam -where - F: Fold + ?Sized, -{ - crate::TypeParam { - attrs: f.fold_attributes(node.attrs), - ident: f.fold_ident(node.ident), - colon_token: node.colon_token, - bounds: crate::punctuated::fold(node.bounds, f, F::fold_type_param_bound), - eq_token: node.eq_token, - default: (node.default).map(|it| f.fold_type(it)), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_param_bound<F>( - f: &mut F, - node: crate::TypeParamBound, -) -> crate::TypeParamBound -where - F: Fold + ?Sized, -{ - match node { - crate::TypeParamBound::Trait(_binding_0) => { - crate::TypeParamBound::Trait(f.fold_trait_bound(_binding_0)) - } - crate::TypeParamBound::Lifetime(_binding_0) => { - crate::TypeParamBound::Lifetime(f.fold_lifetime(_binding_0)) - } - crate::TypeParamBound::PreciseCapture(_binding_0) => { - crate::TypeParamBound::PreciseCapture( - full!(f.fold_precise_capture(_binding_0)), - ) - } - crate::TypeParamBound::Verbatim(_binding_0) => { - crate::TypeParamBound::Verbatim(f.fold_token_stream(_binding_0)) - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_paren<F>(f: &mut F, node: crate::TypeParen) -> crate::TypeParen -where - F: Fold + ?Sized, -{ - crate::TypeParen { - paren_token: node.paren_token, - elem: Box::new(f.fold_type(*node.elem)), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_path<F>(f: &mut F, node: crate::TypePath) -> crate::TypePath -where - F: Fold + ?Sized, -{ - crate::TypePath { - qself: (node.qself).map(|it| f.fold_qself(it)), - path: f.fold_path(node.path), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_ptr<F>(f: &mut F, node: crate::TypePtr) -> crate::TypePtr -where - F: Fold + ?Sized, -{ - crate::TypePtr { - star_token: node.star_token, - const_token: node.const_token, - mutability: node.mutability, - elem: Box::new(f.fold_type(*node.elem)), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_reference<F>( - f: &mut F, - node: crate::TypeReference, -) -> crate::TypeReference -where - F: Fold + ?Sized, -{ - crate::TypeReference { - and_token: node.and_token, - lifetime: (node.lifetime).map(|it| f.fold_lifetime(it)), - mutability: node.mutability, - elem: Box::new(f.fold_type(*node.elem)), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_slice<F>(f: &mut F, node: crate::TypeSlice) -> crate::TypeSlice -where - F: Fold + ?Sized, -{ - crate::TypeSlice { - bracket_token: node.bracket_token, - elem: Box::new(f.fold_type(*node.elem)), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_trait_object<F>( - f: &mut F, - node: crate::TypeTraitObject, -) -> crate::TypeTraitObject -where - F: Fold + ?Sized, -{ - crate::TypeTraitObject { - dyn_token: node.dyn_token, - bounds: crate::punctuated::fold(node.bounds, f, F::fold_type_param_bound), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_type_tuple<F>(f: &mut F, node: crate::TypeTuple) -> crate::TypeTuple -where - F: Fold + ?Sized, -{ - crate::TypeTuple { - paren_token: node.paren_token, - elems: crate::punctuated::fold(node.elems, f, F::fold_type), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_un_op<F>(f: &mut F, node: crate::UnOp) -> crate::UnOp -where - F: Fold + ?Sized, -{ - match node { - crate::UnOp::Deref(_binding_0) => crate::UnOp::Deref(_binding_0), - crate::UnOp::Not(_binding_0) => crate::UnOp::Not(_binding_0), - crate::UnOp::Neg(_binding_0) => crate::UnOp::Neg(_binding_0), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_use_glob<F>(f: &mut F, node: crate::UseGlob) -> crate::UseGlob -where - F: Fold + ?Sized, -{ - crate::UseGlob { - star_token: node.star_token, - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_use_group<F>(f: &mut F, node: crate::UseGroup) -> crate::UseGroup -where - F: Fold + ?Sized, -{ - crate::UseGroup { - brace_token: node.brace_token, - items: crate::punctuated::fold(node.items, f, F::fold_use_tree), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_use_name<F>(f: &mut F, node: crate::UseName) -> crate::UseName -where - F: Fold + ?Sized, -{ - crate::UseName { - ident: f.fold_ident(node.ident), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_use_path<F>(f: &mut F, node: crate::UsePath) -> crate::UsePath -where - F: Fold + ?Sized, -{ - crate::UsePath { - ident: f.fold_ident(node.ident), - colon2_token: node.colon2_token, - tree: Box::new(f.fold_use_tree(*node.tree)), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_use_rename<F>(f: &mut F, node: crate::UseRename) -> crate::UseRename -where - F: Fold + ?Sized, -{ - crate::UseRename { - ident: f.fold_ident(node.ident), - as_token: node.as_token, - rename: f.fold_ident(node.rename), - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_use_tree<F>(f: &mut F, node: crate::UseTree) -> crate::UseTree -where - F: Fold + ?Sized, -{ - match node { - crate::UseTree::Path(_binding_0) => { - crate::UseTree::Path(f.fold_use_path(_binding_0)) - } - crate::UseTree::Name(_binding_0) => { - crate::UseTree::Name(f.fold_use_name(_binding_0)) - } - crate::UseTree::Rename(_binding_0) => { - crate::UseTree::Rename(f.fold_use_rename(_binding_0)) - } - crate::UseTree::Glob(_binding_0) => { - crate::UseTree::Glob(f.fold_use_glob(_binding_0)) - } - crate::UseTree::Group(_binding_0) => { - crate::UseTree::Group(f.fold_use_group(_binding_0)) - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn fold_variadic<F>(f: &mut F, node: crate::Variadic) -> crate::Variadic -where - F: Fold + ?Sized, -{ - crate::Variadic { - attrs: f.fold_attributes(node.attrs), - pat: (node.pat).map(|it| (Box::new(f.fold_pat(*(it).0)), (it).1)), - dots: node.dots, - comma: node.comma, - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_variant<F>(f: &mut F, node: crate::Variant) -> crate::Variant -where - F: Fold + ?Sized, -{ - crate::Variant { - attrs: f.fold_attributes(node.attrs), - ident: f.fold_ident(node.ident), - fields: f.fold_fields(node.fields), - discriminant: (node.discriminant).map(|it| ((it).0, f.fold_expr((it).1))), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_vis_restricted<F>( - f: &mut F, - node: crate::VisRestricted, -) -> crate::VisRestricted -where - F: Fold + ?Sized, -{ - crate::VisRestricted { - pub_token: node.pub_token, - paren_token: node.paren_token, - in_token: node.in_token, - path: Box::new(f.fold_path(*node.path)), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_visibility<F>(f: &mut F, node: crate::Visibility) -> crate::Visibility -where - F: Fold + ?Sized, -{ - match node { - crate::Visibility::Public(_binding_0) => crate::Visibility::Public(_binding_0), - crate::Visibility::Restricted(_binding_0) => { - crate::Visibility::Restricted(f.fold_vis_restricted(_binding_0)) - } - crate::Visibility::Inherited => crate::Visibility::Inherited, - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_where_clause<F>(f: &mut F, node: crate::WhereClause) -> crate::WhereClause -where - F: Fold + ?Sized, -{ - crate::WhereClause { - where_token: node.where_token, - predicates: crate::punctuated::fold(node.predicates, f, F::fold_where_predicate), - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn fold_where_predicate<F>( - f: &mut F, - node: crate::WherePredicate, -) -> crate::WherePredicate -where - F: Fold + ?Sized, -{ - match node { - crate::WherePredicate::Lifetime(_binding_0) => { - crate::WherePredicate::Lifetime(f.fold_predicate_lifetime(_binding_0)) - } - crate::WherePredicate::Type(_binding_0) => { - crate::WherePredicate::Type(f.fold_predicate_type(_binding_0)) - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -fn fold_vec<T, V, F>(vec: Vec<T>, fold: &mut V, mut f: F) -> Vec<T> -where - V: ?Sized, - F: FnMut(&mut V, T) -> T, -{ - vec.into_iter().map(|it| f(fold, it)).collect() -} diff --git a/vendor/syn/src/gen/hash.rs b/vendor/syn/src/gen/hash.rs deleted file mode 100644 index 04f23453a11777..00000000000000 --- a/vendor/syn/src/gen/hash.rs +++ /dev/null @@ -1,2876 +0,0 @@ -// This file is @generated by syn-internal-codegen. -// It is not intended for manual editing. - -#[cfg(any(feature = "derive", feature = "full"))] -use crate::tt::TokenStreamHelper; -use std::hash::{Hash, Hasher}; -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Abi { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.name.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::AngleBracketedGenericArguments { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.colon2_token.hash(state); - self.args.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Arm { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.pat.hash(state); - self.guard.hash(state); - self.body.hash(state); - self.comma.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::AssocConst { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.ident.hash(state); - self.generics.hash(state); - self.value.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::AssocType { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.ident.hash(state); - self.generics.hash(state); - self.ty.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::AttrStyle { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::AttrStyle::Outer => { - state.write_u8(0u8); - } - crate::AttrStyle::Inner(_) => { - state.write_u8(1u8); - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Attribute { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.style.hash(state); - self.meta.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::BareFnArg { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.name.hash(state); - self.ty.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::BareVariadic { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.name.hash(state); - self.comma.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::BinOp { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::BinOp::Add(_) => { - state.write_u8(0u8); - } - crate::BinOp::Sub(_) => { - state.write_u8(1u8); - } - crate::BinOp::Mul(_) => { - state.write_u8(2u8); - } - crate::BinOp::Div(_) => { - state.write_u8(3u8); - } - crate::BinOp::Rem(_) => { - state.write_u8(4u8); - } - crate::BinOp::And(_) => { - state.write_u8(5u8); - } - crate::BinOp::Or(_) => { - state.write_u8(6u8); - } - crate::BinOp::BitXor(_) => { - state.write_u8(7u8); - } - crate::BinOp::BitAnd(_) => { - state.write_u8(8u8); - } - crate::BinOp::BitOr(_) => { - state.write_u8(9u8); - } - crate::BinOp::Shl(_) => { - state.write_u8(10u8); - } - crate::BinOp::Shr(_) => { - state.write_u8(11u8); - } - crate::BinOp::Eq(_) => { - state.write_u8(12u8); - } - crate::BinOp::Lt(_) => { - state.write_u8(13u8); - } - crate::BinOp::Le(_) => { - state.write_u8(14u8); - } - crate::BinOp::Ne(_) => { - state.write_u8(15u8); - } - crate::BinOp::Ge(_) => { - state.write_u8(16u8); - } - crate::BinOp::Gt(_) => { - state.write_u8(17u8); - } - crate::BinOp::AddAssign(_) => { - state.write_u8(18u8); - } - crate::BinOp::SubAssign(_) => { - state.write_u8(19u8); - } - crate::BinOp::MulAssign(_) => { - state.write_u8(20u8); - } - crate::BinOp::DivAssign(_) => { - state.write_u8(21u8); - } - crate::BinOp::RemAssign(_) => { - state.write_u8(22u8); - } - crate::BinOp::BitXorAssign(_) => { - state.write_u8(23u8); - } - crate::BinOp::BitAndAssign(_) => { - state.write_u8(24u8); - } - crate::BinOp::BitOrAssign(_) => { - state.write_u8(25u8); - } - crate::BinOp::ShlAssign(_) => { - state.write_u8(26u8); - } - crate::BinOp::ShrAssign(_) => { - state.write_u8(27u8); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Block { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.stmts.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::BoundLifetimes { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.lifetimes.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::CapturedParam { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::CapturedParam::Lifetime(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::CapturedParam::Ident(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ConstParam { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.ident.hash(state); - self.ty.hash(state); - self.eq_token.hash(state); - self.default.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Constraint { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.ident.hash(state); - self.generics.hash(state); - self.bounds.hash(state); - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Data { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::Data::Struct(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::Data::Enum(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::Data::Union(v0) => { - state.write_u8(2u8); - v0.hash(state); - } - } - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::DataEnum { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.variants.hash(state); - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::DataStruct { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.fields.hash(state); - self.semi_token.hash(state); - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::DataUnion { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.fields.hash(state); - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::DeriveInput { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.ident.hash(state); - self.generics.hash(state); - self.data.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Expr { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - #[cfg(feature = "full")] - crate::Expr::Array(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Assign(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Async(v0) => { - state.write_u8(2u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Await(v0) => { - state.write_u8(3u8); - v0.hash(state); - } - crate::Expr::Binary(v0) => { - state.write_u8(4u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Block(v0) => { - state.write_u8(5u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Break(v0) => { - state.write_u8(6u8); - v0.hash(state); - } - crate::Expr::Call(v0) => { - state.write_u8(7u8); - v0.hash(state); - } - crate::Expr::Cast(v0) => { - state.write_u8(8u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Closure(v0) => { - state.write_u8(9u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Const(v0) => { - state.write_u8(10u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Continue(v0) => { - state.write_u8(11u8); - v0.hash(state); - } - crate::Expr::Field(v0) => { - state.write_u8(12u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::ForLoop(v0) => { - state.write_u8(13u8); - v0.hash(state); - } - crate::Expr::Group(v0) => { - state.write_u8(14u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::If(v0) => { - state.write_u8(15u8); - v0.hash(state); - } - crate::Expr::Index(v0) => { - state.write_u8(16u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Infer(v0) => { - state.write_u8(17u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Let(v0) => { - state.write_u8(18u8); - v0.hash(state); - } - crate::Expr::Lit(v0) => { - state.write_u8(19u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Loop(v0) => { - state.write_u8(20u8); - v0.hash(state); - } - crate::Expr::Macro(v0) => { - state.write_u8(21u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Match(v0) => { - state.write_u8(22u8); - v0.hash(state); - } - crate::Expr::MethodCall(v0) => { - state.write_u8(23u8); - v0.hash(state); - } - crate::Expr::Paren(v0) => { - state.write_u8(24u8); - v0.hash(state); - } - crate::Expr::Path(v0) => { - state.write_u8(25u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Range(v0) => { - state.write_u8(26u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::RawAddr(v0) => { - state.write_u8(27u8); - v0.hash(state); - } - crate::Expr::Reference(v0) => { - state.write_u8(28u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Repeat(v0) => { - state.write_u8(29u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Return(v0) => { - state.write_u8(30u8); - v0.hash(state); - } - crate::Expr::Struct(v0) => { - state.write_u8(31u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Try(v0) => { - state.write_u8(32u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::TryBlock(v0) => { - state.write_u8(33u8); - v0.hash(state); - } - crate::Expr::Tuple(v0) => { - state.write_u8(34u8); - v0.hash(state); - } - crate::Expr::Unary(v0) => { - state.write_u8(35u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Unsafe(v0) => { - state.write_u8(36u8); - v0.hash(state); - } - crate::Expr::Verbatim(v0) => { - state.write_u8(37u8); - TokenStreamHelper(v0).hash(state); - } - #[cfg(feature = "full")] - crate::Expr::While(v0) => { - state.write_u8(38u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::Expr::Yield(v0) => { - state.write_u8(39u8); - v0.hash(state); - } - #[cfg(not(feature = "full"))] - _ => unreachable!(), - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprArray { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.elems.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprAssign { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.left.hash(state); - self.right.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprAsync { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.capture.hash(state); - self.block.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprAwait { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.base.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprBinary { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.left.hash(state); - self.op.hash(state); - self.right.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprBlock { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.label.hash(state); - self.block.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprBreak { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.label.hash(state); - self.expr.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprCall { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.func.hash(state); - self.args.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprCast { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.expr.hash(state); - self.ty.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprClosure { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.lifetimes.hash(state); - self.constness.hash(state); - self.movability.hash(state); - self.asyncness.hash(state); - self.capture.hash(state); - self.inputs.hash(state); - self.output.hash(state); - self.body.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprConst { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.block.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprContinue { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.label.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprField { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.base.hash(state); - self.member.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprForLoop { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.label.hash(state); - self.pat.hash(state); - self.expr.hash(state); - self.body.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprGroup { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.expr.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprIf { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.cond.hash(state); - self.then_branch.hash(state); - self.else_branch.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprIndex { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.expr.hash(state); - self.index.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprInfer { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprLet { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.pat.hash(state); - self.expr.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprLit { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.lit.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprLoop { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.label.hash(state); - self.body.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprMacro { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.mac.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprMatch { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.expr.hash(state); - self.arms.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprMethodCall { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.receiver.hash(state); - self.method.hash(state); - self.turbofish.hash(state); - self.args.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprParen { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.expr.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprPath { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.qself.hash(state); - self.path.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprRange { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.start.hash(state); - self.limits.hash(state); - self.end.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprRawAddr { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.mutability.hash(state); - self.expr.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprReference { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.mutability.hash(state); - self.expr.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprRepeat { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.expr.hash(state); - self.len.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprReturn { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.expr.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprStruct { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.qself.hash(state); - self.path.hash(state); - self.fields.hash(state); - self.dot2_token.hash(state); - self.rest.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprTry { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.expr.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprTryBlock { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.block.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprTuple { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.elems.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprUnary { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.op.hash(state); - self.expr.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprUnsafe { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.block.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprWhile { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.label.hash(state); - self.cond.hash(state); - self.body.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ExprYield { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.expr.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Field { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.mutability.hash(state); - self.ident.hash(state); - self.colon_token.hash(state); - self.ty.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::FieldMutability { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::FieldMutability::None => { - state.write_u8(0u8); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::FieldPat { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.member.hash(state); - self.colon_token.hash(state); - self.pat.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::FieldValue { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.member.hash(state); - self.colon_token.hash(state); - self.expr.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Fields { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::Fields::Named(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::Fields::Unnamed(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::Fields::Unit => { - state.write_u8(2u8); - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::FieldsNamed { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.named.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::FieldsUnnamed { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.unnamed.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::File { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.shebang.hash(state); - self.attrs.hash(state); - self.items.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::FnArg { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::FnArg::Receiver(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::FnArg::Typed(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ForeignItem { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::ForeignItem::Fn(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::ForeignItem::Static(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::ForeignItem::Type(v0) => { - state.write_u8(2u8); - v0.hash(state); - } - crate::ForeignItem::Macro(v0) => { - state.write_u8(3u8); - v0.hash(state); - } - crate::ForeignItem::Verbatim(v0) => { - state.write_u8(4u8); - TokenStreamHelper(v0).hash(state); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ForeignItemFn { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.sig.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ForeignItemMacro { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.mac.hash(state); - self.semi_token.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ForeignItemStatic { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.mutability.hash(state); - self.ident.hash(state); - self.ty.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ForeignItemType { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.ident.hash(state); - self.generics.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::GenericArgument { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::GenericArgument::Lifetime(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::GenericArgument::Type(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::GenericArgument::Const(v0) => { - state.write_u8(2u8); - v0.hash(state); - } - crate::GenericArgument::AssocType(v0) => { - state.write_u8(3u8); - v0.hash(state); - } - crate::GenericArgument::AssocConst(v0) => { - state.write_u8(4u8); - v0.hash(state); - } - crate::GenericArgument::Constraint(v0) => { - state.write_u8(5u8); - v0.hash(state); - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::GenericParam { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::GenericParam::Lifetime(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::GenericParam::Type(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::GenericParam::Const(v0) => { - state.write_u8(2u8); - v0.hash(state); - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Generics { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.lt_token.hash(state); - self.params.hash(state); - self.gt_token.hash(state); - self.where_clause.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ImplItem { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::ImplItem::Const(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::ImplItem::Fn(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::ImplItem::Type(v0) => { - state.write_u8(2u8); - v0.hash(state); - } - crate::ImplItem::Macro(v0) => { - state.write_u8(3u8); - v0.hash(state); - } - crate::ImplItem::Verbatim(v0) => { - state.write_u8(4u8); - TokenStreamHelper(v0).hash(state); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ImplItemConst { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.defaultness.hash(state); - self.ident.hash(state); - self.generics.hash(state); - self.ty.hash(state); - self.expr.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ImplItemFn { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.defaultness.hash(state); - self.sig.hash(state); - self.block.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ImplItemMacro { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.mac.hash(state); - self.semi_token.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ImplItemType { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.defaultness.hash(state); - self.ident.hash(state); - self.generics.hash(state); - self.ty.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ImplRestriction { - fn hash<H>(&self, _state: &mut H) - where - H: Hasher, - { - match *self {} - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Item { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::Item::Const(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::Item::Enum(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::Item::ExternCrate(v0) => { - state.write_u8(2u8); - v0.hash(state); - } - crate::Item::Fn(v0) => { - state.write_u8(3u8); - v0.hash(state); - } - crate::Item::ForeignMod(v0) => { - state.write_u8(4u8); - v0.hash(state); - } - crate::Item::Impl(v0) => { - state.write_u8(5u8); - v0.hash(state); - } - crate::Item::Macro(v0) => { - state.write_u8(6u8); - v0.hash(state); - } - crate::Item::Mod(v0) => { - state.write_u8(7u8); - v0.hash(state); - } - crate::Item::Static(v0) => { - state.write_u8(8u8); - v0.hash(state); - } - crate::Item::Struct(v0) => { - state.write_u8(9u8); - v0.hash(state); - } - crate::Item::Trait(v0) => { - state.write_u8(10u8); - v0.hash(state); - } - crate::Item::TraitAlias(v0) => { - state.write_u8(11u8); - v0.hash(state); - } - crate::Item::Type(v0) => { - state.write_u8(12u8); - v0.hash(state); - } - crate::Item::Union(v0) => { - state.write_u8(13u8); - v0.hash(state); - } - crate::Item::Use(v0) => { - state.write_u8(14u8); - v0.hash(state); - } - crate::Item::Verbatim(v0) => { - state.write_u8(15u8); - TokenStreamHelper(v0).hash(state); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ItemConst { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.ident.hash(state); - self.generics.hash(state); - self.ty.hash(state); - self.expr.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ItemEnum { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.ident.hash(state); - self.generics.hash(state); - self.variants.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ItemExternCrate { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.ident.hash(state); - self.rename.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ItemFn { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.sig.hash(state); - self.block.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ItemForeignMod { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.unsafety.hash(state); - self.abi.hash(state); - self.items.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ItemImpl { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.defaultness.hash(state); - self.unsafety.hash(state); - self.generics.hash(state); - self.trait_.hash(state); - self.self_ty.hash(state); - self.items.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ItemMacro { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.ident.hash(state); - self.mac.hash(state); - self.semi_token.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ItemMod { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.unsafety.hash(state); - self.ident.hash(state); - self.content.hash(state); - self.semi.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ItemStatic { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.mutability.hash(state); - self.ident.hash(state); - self.ty.hash(state); - self.expr.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ItemStruct { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.ident.hash(state); - self.generics.hash(state); - self.fields.hash(state); - self.semi_token.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ItemTrait { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.unsafety.hash(state); - self.auto_token.hash(state); - self.restriction.hash(state); - self.ident.hash(state); - self.generics.hash(state); - self.colon_token.hash(state); - self.supertraits.hash(state); - self.items.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ItemTraitAlias { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.ident.hash(state); - self.generics.hash(state); - self.bounds.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ItemType { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.ident.hash(state); - self.generics.hash(state); - self.ty.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ItemUnion { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.ident.hash(state); - self.generics.hash(state); - self.fields.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ItemUse { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.leading_colon.hash(state); - self.tree.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Label { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.name.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::LifetimeParam { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.lifetime.hash(state); - self.colon_token.hash(state); - self.bounds.hash(state); - } -} -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Lit { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::Lit::Str(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::Lit::ByteStr(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::Lit::CStr(v0) => { - state.write_u8(2u8); - v0.hash(state); - } - crate::Lit::Byte(v0) => { - state.write_u8(3u8); - v0.hash(state); - } - crate::Lit::Char(v0) => { - state.write_u8(4u8); - v0.hash(state); - } - crate::Lit::Int(v0) => { - state.write_u8(5u8); - v0.hash(state); - } - crate::Lit::Float(v0) => { - state.write_u8(6u8); - v0.hash(state); - } - crate::Lit::Bool(v0) => { - state.write_u8(7u8); - v0.hash(state); - } - crate::Lit::Verbatim(v0) => { - state.write_u8(8u8); - v0.to_string().hash(state); - } - } - } -} -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::LitBool { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.value.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Local { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.pat.hash(state); - self.init.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::LocalInit { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.expr.hash(state); - self.diverge.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Macro { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.path.hash(state); - self.delimiter.hash(state); - TokenStreamHelper(&self.tokens).hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::MacroDelimiter { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::MacroDelimiter::Paren(_) => { - state.write_u8(0u8); - } - crate::MacroDelimiter::Brace(_) => { - state.write_u8(1u8); - } - crate::MacroDelimiter::Bracket(_) => { - state.write_u8(2u8); - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Meta { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::Meta::Path(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::Meta::List(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::Meta::NameValue(v0) => { - state.write_u8(2u8); - v0.hash(state); - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::MetaList { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.path.hash(state); - self.delimiter.hash(state); - TokenStreamHelper(&self.tokens).hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::MetaNameValue { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.path.hash(state); - self.value.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ParenthesizedGenericArguments { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.inputs.hash(state); - self.output.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Pat { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::Pat::Const(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::Pat::Ident(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::Pat::Lit(v0) => { - state.write_u8(2u8); - v0.hash(state); - } - crate::Pat::Macro(v0) => { - state.write_u8(3u8); - v0.hash(state); - } - crate::Pat::Or(v0) => { - state.write_u8(4u8); - v0.hash(state); - } - crate::Pat::Paren(v0) => { - state.write_u8(5u8); - v0.hash(state); - } - crate::Pat::Path(v0) => { - state.write_u8(6u8); - v0.hash(state); - } - crate::Pat::Range(v0) => { - state.write_u8(7u8); - v0.hash(state); - } - crate::Pat::Reference(v0) => { - state.write_u8(8u8); - v0.hash(state); - } - crate::Pat::Rest(v0) => { - state.write_u8(9u8); - v0.hash(state); - } - crate::Pat::Slice(v0) => { - state.write_u8(10u8); - v0.hash(state); - } - crate::Pat::Struct(v0) => { - state.write_u8(11u8); - v0.hash(state); - } - crate::Pat::Tuple(v0) => { - state.write_u8(12u8); - v0.hash(state); - } - crate::Pat::TupleStruct(v0) => { - state.write_u8(13u8); - v0.hash(state); - } - crate::Pat::Type(v0) => { - state.write_u8(14u8); - v0.hash(state); - } - crate::Pat::Verbatim(v0) => { - state.write_u8(15u8); - TokenStreamHelper(v0).hash(state); - } - crate::Pat::Wild(v0) => { - state.write_u8(16u8); - v0.hash(state); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PatIdent { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.by_ref.hash(state); - self.mutability.hash(state); - self.ident.hash(state); - self.subpat.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PatOr { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.leading_vert.hash(state); - self.cases.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PatParen { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.pat.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PatReference { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.mutability.hash(state); - self.pat.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PatRest { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PatSlice { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.elems.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PatStruct { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.qself.hash(state); - self.path.hash(state); - self.fields.hash(state); - self.rest.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PatTuple { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.elems.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PatTupleStruct { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.qself.hash(state); - self.path.hash(state); - self.elems.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PatType { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.pat.hash(state); - self.ty.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PatWild { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Path { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.leading_colon.hash(state); - self.segments.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PathArguments { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::PathArguments::None => { - state.write_u8(0u8); - } - crate::PathArguments::AngleBracketed(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::PathArguments::Parenthesized(v0) => { - state.write_u8(2u8); - v0.hash(state); - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PathSegment { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.ident.hash(state); - self.arguments.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PointerMutability { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::PointerMutability::Const(_) => { - state.write_u8(0u8); - } - crate::PointerMutability::Mut(_) => { - state.write_u8(1u8); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PreciseCapture { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.params.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PredicateLifetime { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.lifetime.hash(state); - self.bounds.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::PredicateType { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.lifetimes.hash(state); - self.bounded_ty.hash(state); - self.bounds.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::QSelf { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.ty.hash(state); - self.position.hash(state); - self.as_token.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::RangeLimits { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::RangeLimits::HalfOpen(_) => { - state.write_u8(0u8); - } - crate::RangeLimits::Closed(_) => { - state.write_u8(1u8); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Receiver { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.reference.hash(state); - self.mutability.hash(state); - self.colon_token.hash(state); - self.ty.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::ReturnType { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::ReturnType::Default => { - state.write_u8(0u8); - } - crate::ReturnType::Type(_, v1) => { - state.write_u8(1u8); - v1.hash(state); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Signature { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.constness.hash(state); - self.asyncness.hash(state); - self.unsafety.hash(state); - self.abi.hash(state); - self.ident.hash(state); - self.generics.hash(state); - self.inputs.hash(state); - self.variadic.hash(state); - self.output.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::StaticMutability { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::StaticMutability::Mut(_) => { - state.write_u8(0u8); - } - crate::StaticMutability::None => { - state.write_u8(1u8); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Stmt { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::Stmt::Local(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::Stmt::Item(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::Stmt::Expr(v0, v1) => { - state.write_u8(2u8); - v0.hash(state); - v1.hash(state); - } - crate::Stmt::Macro(v0) => { - state.write_u8(3u8); - v0.hash(state); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::StmtMacro { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.mac.hash(state); - self.semi_token.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TraitBound { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.paren_token.hash(state); - self.modifier.hash(state); - self.lifetimes.hash(state); - self.path.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TraitBoundModifier { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::TraitBoundModifier::None => { - state.write_u8(0u8); - } - crate::TraitBoundModifier::Maybe(_) => { - state.write_u8(1u8); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TraitItem { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::TraitItem::Const(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::TraitItem::Fn(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::TraitItem::Type(v0) => { - state.write_u8(2u8); - v0.hash(state); - } - crate::TraitItem::Macro(v0) => { - state.write_u8(3u8); - v0.hash(state); - } - crate::TraitItem::Verbatim(v0) => { - state.write_u8(4u8); - TokenStreamHelper(v0).hash(state); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TraitItemConst { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.ident.hash(state); - self.generics.hash(state); - self.ty.hash(state); - self.default.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TraitItemFn { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.sig.hash(state); - self.default.hash(state); - self.semi_token.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TraitItemMacro { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.mac.hash(state); - self.semi_token.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TraitItemType { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.ident.hash(state); - self.generics.hash(state); - self.colon_token.hash(state); - self.bounds.hash(state); - self.default.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Type { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::Type::Array(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::Type::BareFn(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::Type::Group(v0) => { - state.write_u8(2u8); - v0.hash(state); - } - crate::Type::ImplTrait(v0) => { - state.write_u8(3u8); - v0.hash(state); - } - crate::Type::Infer(v0) => { - state.write_u8(4u8); - v0.hash(state); - } - crate::Type::Macro(v0) => { - state.write_u8(5u8); - v0.hash(state); - } - crate::Type::Never(v0) => { - state.write_u8(6u8); - v0.hash(state); - } - crate::Type::Paren(v0) => { - state.write_u8(7u8); - v0.hash(state); - } - crate::Type::Path(v0) => { - state.write_u8(8u8); - v0.hash(state); - } - crate::Type::Ptr(v0) => { - state.write_u8(9u8); - v0.hash(state); - } - crate::Type::Reference(v0) => { - state.write_u8(10u8); - v0.hash(state); - } - crate::Type::Slice(v0) => { - state.write_u8(11u8); - v0.hash(state); - } - crate::Type::TraitObject(v0) => { - state.write_u8(12u8); - v0.hash(state); - } - crate::Type::Tuple(v0) => { - state.write_u8(13u8); - v0.hash(state); - } - crate::Type::Verbatim(v0) => { - state.write_u8(14u8); - TokenStreamHelper(v0).hash(state); - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypeArray { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.elem.hash(state); - self.len.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypeBareFn { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.lifetimes.hash(state); - self.unsafety.hash(state); - self.abi.hash(state); - self.inputs.hash(state); - self.variadic.hash(state); - self.output.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypeGroup { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.elem.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypeImplTrait { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.bounds.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypeInfer { - fn hash<H>(&self, _state: &mut H) - where - H: Hasher, - {} -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypeMacro { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.mac.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypeNever { - fn hash<H>(&self, _state: &mut H) - where - H: Hasher, - {} -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypeParam { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.ident.hash(state); - self.colon_token.hash(state); - self.bounds.hash(state); - self.eq_token.hash(state); - self.default.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypeParamBound { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::TypeParamBound::Trait(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::TypeParamBound::Lifetime(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - #[cfg(feature = "full")] - crate::TypeParamBound::PreciseCapture(v0) => { - state.write_u8(2u8); - v0.hash(state); - } - crate::TypeParamBound::Verbatim(v0) => { - state.write_u8(3u8); - TokenStreamHelper(v0).hash(state); - } - #[cfg(not(feature = "full"))] - _ => unreachable!(), - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypeParen { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.elem.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypePath { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.qself.hash(state); - self.path.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypePtr { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.const_token.hash(state); - self.mutability.hash(state); - self.elem.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypeReference { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.lifetime.hash(state); - self.mutability.hash(state); - self.elem.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypeSlice { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.elem.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypeTraitObject { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.dyn_token.hash(state); - self.bounds.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::TypeTuple { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.elems.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::UnOp { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::UnOp::Deref(_) => { - state.write_u8(0u8); - } - crate::UnOp::Not(_) => { - state.write_u8(1u8); - } - crate::UnOp::Neg(_) => { - state.write_u8(2u8); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::UseGlob { - fn hash<H>(&self, _state: &mut H) - where - H: Hasher, - {} -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::UseGroup { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.items.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::UseName { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.ident.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::UsePath { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.ident.hash(state); - self.tree.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::UseRename { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.ident.hash(state); - self.rename.hash(state); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::UseTree { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::UseTree::Path(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::UseTree::Name(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::UseTree::Rename(v0) => { - state.write_u8(2u8); - v0.hash(state); - } - crate::UseTree::Glob(v0) => { - state.write_u8(3u8); - v0.hash(state); - } - crate::UseTree::Group(v0) => { - state.write_u8(4u8); - v0.hash(state); - } - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Variadic { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.pat.hash(state); - self.comma.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Variant { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.ident.hash(state); - self.fields.hash(state); - self.discriminant.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::VisRestricted { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.in_token.hash(state); - self.path.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::Visibility { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::Visibility::Public(_) => { - state.write_u8(0u8); - } - crate::Visibility::Restricted(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - crate::Visibility::Inherited => { - state.write_u8(2u8); - } - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::WhereClause { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.predicates.hash(state); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for crate::WherePredicate { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - match self { - crate::WherePredicate::Lifetime(v0) => { - state.write_u8(0u8); - v0.hash(state); - } - crate::WherePredicate::Type(v0) => { - state.write_u8(1u8); - v0.hash(state); - } - } - } -} diff --git a/vendor/syn/src/gen/token.css b/vendor/syn/src/gen/token.css deleted file mode 100644 index ed82ae1502b4b6..00000000000000 --- a/vendor/syn/src/gen/token.css +++ /dev/null @@ -1,737 +0,0 @@ -a.struct[title="struct syn::token::Abstract"], -a.struct[title="struct syn::token::And"], -a.struct[title="struct syn::token::AndAnd"], -a.struct[title="struct syn::token::AndEq"], -a.struct[title="struct syn::token::As"], -a.struct[title="struct syn::token::Async"], -a.struct[title="struct syn::token::At"], -a.struct[title="struct syn::token::Auto"], -a.struct[title="struct syn::token::Await"], -a.struct[title="struct syn::token::Become"], -a.struct[title="struct syn::token::Box"], -a.struct[title="struct syn::token::Break"], -a.struct[title="struct syn::token::Caret"], -a.struct[title="struct syn::token::CaretEq"], -a.struct[title="struct syn::token::Colon"], -a.struct[title="struct syn::token::Comma"], -a.struct[title="struct syn::token::Const"], -a.struct[title="struct syn::token::Continue"], -a.struct[title="struct syn::token::Crate"], -a.struct[title="struct syn::token::Default"], -a.struct[title="struct syn::token::Do"], -a.struct[title="struct syn::token::Dollar"], -a.struct[title="struct syn::token::Dot"], -a.struct[title="struct syn::token::DotDot"], -a.struct[title="struct syn::token::DotDotDot"], -a.struct[title="struct syn::token::DotDotEq"], -a.struct[title="struct syn::token::Dyn"], -a.struct[title="struct syn::token::Else"], -a.struct[title="struct syn::token::Enum"], -a.struct[title="struct syn::token::Eq"], -a.struct[title="struct syn::token::EqEq"], -a.struct[title="struct syn::token::Extern"], -a.struct[title="struct syn::token::FatArrow"], -a.struct[title="struct syn::token::Final"], -a.struct[title="struct syn::token::Fn"], -a.struct[title="struct syn::token::For"], -a.struct[title="struct syn::token::Ge"], -a.struct[title="struct syn::token::Gt"], -a.struct[title="struct syn::token::If"], -a.struct[title="struct syn::token::Impl"], -a.struct[title="struct syn::token::In"], -a.struct[title="struct syn::token::LArrow"], -a.struct[title="struct syn::token::Le"], -a.struct[title="struct syn::token::Let"], -a.struct[title="struct syn::token::Loop"], -a.struct[title="struct syn::token::Lt"], -a.struct[title="struct syn::token::Macro"], -a.struct[title="struct syn::token::Match"], -a.struct[title="struct syn::token::Minus"], -a.struct[title="struct syn::token::MinusEq"], -a.struct[title="struct syn::token::Mod"], -a.struct[title="struct syn::token::Move"], -a.struct[title="struct syn::token::Mut"], -a.struct[title="struct syn::token::Ne"], -a.struct[title="struct syn::token::Not"], -a.struct[title="struct syn::token::Or"], -a.struct[title="struct syn::token::OrEq"], -a.struct[title="struct syn::token::OrOr"], -a.struct[title="struct syn::token::Override"], -a.struct[title="struct syn::token::PathSep"], -a.struct[title="struct syn::token::Percent"], -a.struct[title="struct syn::token::PercentEq"], -a.struct[title="struct syn::token::Plus"], -a.struct[title="struct syn::token::PlusEq"], -a.struct[title="struct syn::token::Pound"], -a.struct[title="struct syn::token::Priv"], -a.struct[title="struct syn::token::Pub"], -a.struct[title="struct syn::token::Question"], -a.struct[title="struct syn::token::RArrow"], -a.struct[title="struct syn::token::Raw"], -a.struct[title="struct syn::token::Ref"], -a.struct[title="struct syn::token::Return"], -a.struct[title="struct syn::token::SelfType"], -a.struct[title="struct syn::token::SelfValue"], -a.struct[title="struct syn::token::Semi"], -a.struct[title="struct syn::token::Shl"], -a.struct[title="struct syn::token::ShlEq"], -a.struct[title="struct syn::token::Shr"], -a.struct[title="struct syn::token::ShrEq"], -a.struct[title="struct syn::token::Slash"], -a.struct[title="struct syn::token::SlashEq"], -a.struct[title="struct syn::token::Star"], -a.struct[title="struct syn::token::StarEq"], -a.struct[title="struct syn::token::Static"], -a.struct[title="struct syn::token::Struct"], -a.struct[title="struct syn::token::Super"], -a.struct[title="struct syn::token::Tilde"], -a.struct[title="struct syn::token::Trait"], -a.struct[title="struct syn::token::Try"], -a.struct[title="struct syn::token::Type"], -a.struct[title="struct syn::token::Typeof"], -a.struct[title="struct syn::token::Underscore"], -a.struct[title="struct syn::token::Union"], -a.struct[title="struct syn::token::Unsafe"], -a.struct[title="struct syn::token::Unsized"], -a.struct[title="struct syn::token::Use"], -a.struct[title="struct syn::token::Virtual"], -a.struct[title="struct syn::token::Where"], -a.struct[title="struct syn::token::While"], -a.struct[title="struct syn::token::Yield"] { - display: inline-block; - color: transparent; - white-space: nowrap; -} - -a.struct[title="struct syn::token::Abstract"]::before, -a.struct[title="struct syn::token::And"]::before, -a.struct[title="struct syn::token::AndAnd"]::before, -a.struct[title="struct syn::token::AndEq"]::before, -a.struct[title="struct syn::token::As"]::before, -a.struct[title="struct syn::token::Async"]::before, -a.struct[title="struct syn::token::At"]::before, -a.struct[title="struct syn::token::Auto"]::before, -a.struct[title="struct syn::token::Await"]::before, -a.struct[title="struct syn::token::Become"]::before, -a.struct[title="struct syn::token::Box"]::before, -a.struct[title="struct syn::token::Break"]::before, -a.struct[title="struct syn::token::Caret"]::before, -a.struct[title="struct syn::token::CaretEq"]::before, -a.struct[title="struct syn::token::Colon"]::before, -a.struct[title="struct syn::token::Comma"]::before, -a.struct[title="struct syn::token::Const"]::before, -a.struct[title="struct syn::token::Continue"]::before, -a.struct[title="struct syn::token::Crate"]::before, -a.struct[title="struct syn::token::Default"]::before, -a.struct[title="struct syn::token::Do"]::before, -a.struct[title="struct syn::token::Dollar"]::before, -a.struct[title="struct syn::token::Dot"]::before, -a.struct[title="struct syn::token::DotDot"]::before, -a.struct[title="struct syn::token::DotDotDot"]::before, -a.struct[title="struct syn::token::DotDotEq"]::before, -a.struct[title="struct syn::token::Dyn"]::before, -a.struct[title="struct syn::token::Else"]::before, -a.struct[title="struct syn::token::Enum"]::before, -a.struct[title="struct syn::token::Eq"]::before, -a.struct[title="struct syn::token::EqEq"]::before, -a.struct[title="struct syn::token::Extern"]::before, -a.struct[title="struct syn::token::FatArrow"]::before, -a.struct[title="struct syn::token::Final"]::before, -a.struct[title="struct syn::token::Fn"]::before, -a.struct[title="struct syn::token::For"]::before, -a.struct[title="struct syn::token::Ge"]::before, -a.struct[title="struct syn::token::Gt"]::before, -a.struct[title="struct syn::token::If"]::before, -a.struct[title="struct syn::token::Impl"]::before, -a.struct[title="struct syn::token::In"]::before, -a.struct[title="struct syn::token::LArrow"]::before, -a.struct[title="struct syn::token::Le"]::before, -a.struct[title="struct syn::token::Let"]::before, -a.struct[title="struct syn::token::Loop"]::before, -a.struct[title="struct syn::token::Lt"]::before, -a.struct[title="struct syn::token::Macro"]::before, -a.struct[title="struct syn::token::Match"]::before, -a.struct[title="struct syn::token::Minus"]::before, -a.struct[title="struct syn::token::MinusEq"]::before, -a.struct[title="struct syn::token::Mod"]::before, -a.struct[title="struct syn::token::Move"]::before, -a.struct[title="struct syn::token::Mut"]::before, -a.struct[title="struct syn::token::Ne"]::before, -a.struct[title="struct syn::token::Not"]::before, -a.struct[title="struct syn::token::Or"]::before, -a.struct[title="struct syn::token::OrEq"]::before, -a.struct[title="struct syn::token::OrOr"]::before, -a.struct[title="struct syn::token::Override"]::before, -a.struct[title="struct syn::token::PathSep"]::before, -a.struct[title="struct syn::token::Percent"]::before, -a.struct[title="struct syn::token::PercentEq"]::before, -a.struct[title="struct syn::token::Plus"]::before, -a.struct[title="struct syn::token::PlusEq"]::before, -a.struct[title="struct syn::token::Pound"]::before, -a.struct[title="struct syn::token::Priv"]::before, -a.struct[title="struct syn::token::Pub"]::before, -a.struct[title="struct syn::token::Question"]::before, -a.struct[title="struct syn::token::RArrow"]::before, -a.struct[title="struct syn::token::Raw"]::before, -a.struct[title="struct syn::token::Ref"]::before, -a.struct[title="struct syn::token::Return"]::before, -a.struct[title="struct syn::token::SelfType"]::before, -a.struct[title="struct syn::token::SelfValue"]::before, -a.struct[title="struct syn::token::Semi"]::before, -a.struct[title="struct syn::token::Shl"]::before, -a.struct[title="struct syn::token::ShlEq"]::before, -a.struct[title="struct syn::token::Shr"]::before, -a.struct[title="struct syn::token::ShrEq"]::before, -a.struct[title="struct syn::token::Slash"]::before, -a.struct[title="struct syn::token::SlashEq"]::before, -a.struct[title="struct syn::token::Star"]::before, -a.struct[title="struct syn::token::StarEq"]::before, -a.struct[title="struct syn::token::Static"]::before, -a.struct[title="struct syn::token::Struct"]::before, -a.struct[title="struct syn::token::Super"]::before, -a.struct[title="struct syn::token::Tilde"]::before, -a.struct[title="struct syn::token::Trait"]::before, -a.struct[title="struct syn::token::Try"]::before, -a.struct[title="struct syn::token::Type"]::before, -a.struct[title="struct syn::token::Typeof"]::before, -a.struct[title="struct syn::token::Underscore"]::before, -a.struct[title="struct syn::token::Union"]::before, -a.struct[title="struct syn::token::Unsafe"]::before, -a.struct[title="struct syn::token::Unsized"]::before, -a.struct[title="struct syn::token::Use"]::before, -a.struct[title="struct syn::token::Virtual"]::before, -a.struct[title="struct syn::token::Where"]::before, -a.struct[title="struct syn::token::While"]::before, -a.struct[title="struct syn::token::Yield"]::before { - display: inline-block; - color: var(--type-link-color); - width: 0; -} - -a.struct[title="struct syn::token::Abstract"]::before { - content: "Token![abstract]"; -} - -a.struct[title="struct syn::token::And"]::before { - content: "Token![&]"; -} - -a.struct[title="struct syn::token::AndAnd"]::before { - content: "Token![&&]"; -} - -a.struct[title="struct syn::token::AndEq"]::before { - content: "Token![&=]"; -} - -a.struct[title="struct syn::token::As"]::before { - content: "Token![as]"; -} - -a.struct[title="struct syn::token::Async"]::before { - content: "Token![async]"; -} - -a.struct[title="struct syn::token::At"]::before { - content: "Token![@]"; -} - -a.struct[title="struct syn::token::Auto"]::before { - content: "Token![auto]"; -} - -a.struct[title="struct syn::token::Await"]::before { - content: "Token![await]"; -} - -a.struct[title="struct syn::token::Become"]::before { - content: "Token![become]"; -} - -a.struct[title="struct syn::token::Box"]::before { - content: "Token![box]"; -} - -a.struct[title="struct syn::token::Break"]::before { - content: "Token![break]"; -} - -a.struct[title="struct syn::token::Caret"]::before { - content: "Token![^]"; -} - -a.struct[title="struct syn::token::CaretEq"]::before { - content: "Token![^=]"; -} - -a.struct[title="struct syn::token::Colon"]::before { - content: "Token![:]"; -} - -a.struct[title="struct syn::token::Comma"]::before { - content: "Token![,]"; -} - -a.struct[title="struct syn::token::Const"]::before { - content: "Token![const]"; -} - -a.struct[title="struct syn::token::Continue"]::before { - content: "Token![continue]"; -} - -a.struct[title="struct syn::token::Crate"]::before { - content: "Token![crate]"; -} - -a.struct[title="struct syn::token::Default"]::before { - content: "Token![default]"; -} - -a.struct[title="struct syn::token::Do"]::before { - content: "Token![do]"; -} - -a.struct[title="struct syn::token::Dollar"]::before { - content: "Token![$]"; -} - -a.struct[title="struct syn::token::Dot"]::before { - content: "Token![.]"; -} - -a.struct[title="struct syn::token::DotDot"]::before { - content: "Token![..]"; -} - -a.struct[title="struct syn::token::DotDotDot"]::before { - content: "Token![...]"; -} - -a.struct[title="struct syn::token::DotDotEq"]::before { - content: "Token![..=]"; -} - -a.struct[title="struct syn::token::Dyn"]::before { - content: "Token![dyn]"; -} - -a.struct[title="struct syn::token::Else"]::before { - content: "Token![else]"; -} - -a.struct[title="struct syn::token::Enum"]::before { - content: "Token![enum]"; -} - -a.struct[title="struct syn::token::Eq"]::before { - content: "Token![=]"; -} - -a.struct[title="struct syn::token::EqEq"]::before { - content: "Token![==]"; -} - -a.struct[title="struct syn::token::Extern"]::before { - content: "Token![extern]"; -} - -a.struct[title="struct syn::token::FatArrow"]::before { - content: "Token![=>]"; -} - -a.struct[title="struct syn::token::Final"]::before { - content: "Token![final]"; -} - -a.struct[title="struct syn::token::Fn"]::before { - content: "Token![fn]"; -} - -a.struct[title="struct syn::token::For"]::before { - content: "Token![for]"; -} - -a.struct[title="struct syn::token::Ge"]::before { - content: "Token![>=]"; -} - -a.struct[title="struct syn::token::Gt"]::before { - content: "Token![>]"; -} - -a.struct[title="struct syn::token::If"]::before { - content: "Token![if]"; -} - -a.struct[title="struct syn::token::Impl"]::before { - content: "Token![impl]"; -} - -a.struct[title="struct syn::token::In"]::before { - content: "Token![in]"; -} - -a.struct[title="struct syn::token::LArrow"]::before { - content: "Token![<-]"; -} - -a.struct[title="struct syn::token::Le"]::before { - content: "Token![<=]"; -} - -a.struct[title="struct syn::token::Let"]::before { - content: "Token![let]"; -} - -a.struct[title="struct syn::token::Loop"]::before { - content: "Token![loop]"; -} - -a.struct[title="struct syn::token::Lt"]::before { - content: "Token![<]"; -} - -a.struct[title="struct syn::token::Macro"]::before { - content: "Token![macro]"; -} - -a.struct[title="struct syn::token::Match"]::before { - content: "Token![match]"; -} - -a.struct[title="struct syn::token::Minus"]::before { - content: "Token![-]"; -} - -a.struct[title="struct syn::token::MinusEq"]::before { - content: "Token![-=]"; -} - -a.struct[title="struct syn::token::Mod"]::before { - content: "Token![mod]"; -} - -a.struct[title="struct syn::token::Move"]::before { - content: "Token![move]"; -} - -a.struct[title="struct syn::token::Mut"]::before { - content: "Token![mut]"; -} - -a.struct[title="struct syn::token::Ne"]::before { - content: "Token![!=]"; -} - -a.struct[title="struct syn::token::Not"]::before { - content: "Token![!]"; -} - -a.struct[title="struct syn::token::Or"]::before { - content: "Token![|]"; -} - -a.struct[title="struct syn::token::OrEq"]::before { - content: "Token![|=]"; -} - -a.struct[title="struct syn::token::OrOr"]::before { - content: "Token![||]"; -} - -a.struct[title="struct syn::token::Override"]::before { - content: "Token![override]"; -} - -a.struct[title="struct syn::token::PathSep"]::before { - content: "Token![::]"; -} - -a.struct[title="struct syn::token::Percent"]::before { - content: "Token![%]"; -} - -a.struct[title="struct syn::token::PercentEq"]::before { - content: "Token![%=]"; -} - -a.struct[title="struct syn::token::Plus"]::before { - content: "Token![+]"; -} - -a.struct[title="struct syn::token::PlusEq"]::before { - content: "Token![+=]"; -} - -a.struct[title="struct syn::token::Pound"]::before { - content: "Token![#]"; -} - -a.struct[title="struct syn::token::Priv"]::before { - content: "Token![priv]"; -} - -a.struct[title="struct syn::token::Pub"]::before { - content: "Token![pub]"; -} - -a.struct[title="struct syn::token::Question"]::before { - content: "Token![?]"; -} - -a.struct[title="struct syn::token::RArrow"]::before { - content: "Token![->]"; -} - -a.struct[title="struct syn::token::Raw"]::before { - content: "Token![raw]"; -} - -a.struct[title="struct syn::token::Ref"]::before { - content: "Token![ref]"; -} - -a.struct[title="struct syn::token::Return"]::before { - content: "Token![return]"; -} - -a.struct[title="struct syn::token::SelfType"]::before { - content: "Token![Self]"; -} - -a.struct[title="struct syn::token::SelfValue"]::before { - content: "Token![self]"; -} - -a.struct[title="struct syn::token::Semi"]::before { - content: "Token![;]"; -} - -a.struct[title="struct syn::token::Shl"]::before { - content: "Token![<<]"; -} - -a.struct[title="struct syn::token::ShlEq"]::before { - content: "Token![<<=]"; -} - -a.struct[title="struct syn::token::Shr"]::before { - content: "Token![>>]"; -} - -a.struct[title="struct syn::token::ShrEq"]::before { - content: "Token![>>=]"; -} - -a.struct[title="struct syn::token::Slash"]::before { - content: "Token![/]"; -} - -a.struct[title="struct syn::token::SlashEq"]::before { - content: "Token![/=]"; -} - -a.struct[title="struct syn::token::Star"]::before { - content: "Token![*]"; -} - -a.struct[title="struct syn::token::StarEq"]::before { - content: "Token![*=]"; -} - -a.struct[title="struct syn::token::Static"]::before { - content: "Token![static]"; -} - -a.struct[title="struct syn::token::Struct"]::before { - content: "Token![struct]"; -} - -a.struct[title="struct syn::token::Super"]::before { - content: "Token![super]"; -} - -a.struct[title="struct syn::token::Tilde"]::before { - content: "Token![~]"; -} - -a.struct[title="struct syn::token::Trait"]::before { - content: "Token![trait]"; -} - -a.struct[title="struct syn::token::Try"]::before { - content: "Token![try]"; -} - -a.struct[title="struct syn::token::Type"]::before { - content: "Token![type]"; -} - -a.struct[title="struct syn::token::Typeof"]::before { - content: "Token![typeof]"; -} - -a.struct[title="struct syn::token::Underscore"]::before { - content: "Token![_]"; - font-size: calc(100% * 10 / 9); -} - -a.struct[title="struct syn::token::Union"]::before { - content: "Token![union]"; -} - -a.struct[title="struct syn::token::Unsafe"]::before { - content: "Token![unsafe]"; -} - -a.struct[title="struct syn::token::Unsized"]::before { - content: "Token![unsized]"; -} - -a.struct[title="struct syn::token::Use"]::before { - content: "Token![use]"; -} - -a.struct[title="struct syn::token::Virtual"]::before { - content: "Token![virtual]"; -} - -a.struct[title="struct syn::token::Where"]::before { - content: "Token![where]"; -} - -a.struct[title="struct syn::token::While"]::before { - content: "Token![while]"; -} - -a.struct[title="struct syn::token::Yield"]::before { - content: "Token![yield]"; -} - -a.struct[title="struct syn::token::Underscore"] { - font-size: calc(100% * 9 / 10); -} - -a.struct[title="struct syn::token::PercentEq"]::after, -a.struct[title="struct syn::token::Question"]::after { - content: "."; -} - -a.struct[title="struct syn::token::DotDotDot"]::after, -a.struct[title="struct syn::token::FatArrow"]::after, -a.struct[title="struct syn::token::Percent"]::after { - content: ".."; -} - -a.struct[title="struct syn::token::CaretEq"]::after, -a.struct[title="struct syn::token::Dollar"]::after, -a.struct[title="struct syn::token::DotDotEq"]::after, -a.struct[title="struct syn::token::MinusEq"]::after, -a.struct[title="struct syn::token::PathSep"]::after, -a.struct[title="struct syn::token::SelfValue"]::after, -a.struct[title="struct syn::token::SlashEq"]::after { - content: "..."; -} - -a.struct[title="struct syn::token::AndAnd"]::after, -a.struct[title="struct syn::token::Caret"]::after, -a.struct[title="struct syn::token::Colon"]::after, -a.struct[title="struct syn::token::Comma"]::after, -a.struct[title="struct syn::token::DotDot"]::after, -a.struct[title="struct syn::token::LArrow"]::after, -a.struct[title="struct syn::token::Minus"]::after, -a.struct[title="struct syn::token::PlusEq"]::after, -a.struct[title="struct syn::token::Pound"]::after, -a.struct[title="struct syn::token::RArrow"]::after, -a.struct[title="struct syn::token::SelfType"]::after, -a.struct[title="struct syn::token::Slash"]::after, -a.struct[title="struct syn::token::StarEq"]::after, -a.struct[title="struct syn::token::Tilde"]::after { - content: "...."; -} - -a.struct[title="struct syn::token::AndEq"]::after, -a.struct[title="struct syn::token::Plus"]::after, -a.struct[title="struct syn::token::Semi"]::after, -a.struct[title="struct syn::token::Star"]::after { - content: "....."; -} - -a.struct[title="struct syn::token::And"]::after, -a.struct[title="struct syn::token::Dot"]::after, -a.struct[title="struct syn::token::EqEq"]::after, -a.struct[title="struct syn::token::Not"]::after, -a.struct[title="struct syn::token::OrEq"]::after, -a.struct[title="struct syn::token::OrOr"]::after, -a.struct[title="struct syn::token::ShlEq"]::after, -a.struct[title="struct syn::token::ShrEq"]::after { - content: "......"; -} - -a.struct[title="struct syn::token::At"]::after, -a.struct[title="struct syn::token::Eq"]::after, -a.struct[title="struct syn::token::Gt"]::after, -a.struct[title="struct syn::token::Lt"]::after, -a.struct[title="struct syn::token::Or"]::after, -a.struct[title="struct syn::token::Shl"]::after, -a.struct[title="struct syn::token::Shr"]::after { - content: "......."; -} - -a.struct[title="struct syn::token::Abstract"]::after, -a.struct[title="struct syn::token::As"]::after, -a.struct[title="struct syn::token::Async"]::after, -a.struct[title="struct syn::token::Auto"]::after, -a.struct[title="struct syn::token::Await"]::after, -a.struct[title="struct syn::token::Become"]::after, -a.struct[title="struct syn::token::Box"]::after, -a.struct[title="struct syn::token::Break"]::after, -a.struct[title="struct syn::token::Const"]::after, -a.struct[title="struct syn::token::Continue"]::after, -a.struct[title="struct syn::token::Crate"]::after, -a.struct[title="struct syn::token::Default"]::after, -a.struct[title="struct syn::token::Do"]::after, -a.struct[title="struct syn::token::Dyn"]::after, -a.struct[title="struct syn::token::Else"]::after, -a.struct[title="struct syn::token::Enum"]::after, -a.struct[title="struct syn::token::Extern"]::after, -a.struct[title="struct syn::token::Final"]::after, -a.struct[title="struct syn::token::Fn"]::after, -a.struct[title="struct syn::token::For"]::after, -a.struct[title="struct syn::token::Ge"]::after, -a.struct[title="struct syn::token::If"]::after, -a.struct[title="struct syn::token::Impl"]::after, -a.struct[title="struct syn::token::In"]::after, -a.struct[title="struct syn::token::Le"]::after, -a.struct[title="struct syn::token::Let"]::after, -a.struct[title="struct syn::token::Loop"]::after, -a.struct[title="struct syn::token::Macro"]::after, -a.struct[title="struct syn::token::Match"]::after, -a.struct[title="struct syn::token::Mod"]::after, -a.struct[title="struct syn::token::Move"]::after, -a.struct[title="struct syn::token::Mut"]::after, -a.struct[title="struct syn::token::Ne"]::after, -a.struct[title="struct syn::token::Override"]::after, -a.struct[title="struct syn::token::Priv"]::after, -a.struct[title="struct syn::token::Pub"]::after, -a.struct[title="struct syn::token::Raw"]::after, -a.struct[title="struct syn::token::Ref"]::after, -a.struct[title="struct syn::token::Return"]::after, -a.struct[title="struct syn::token::Static"]::after, -a.struct[title="struct syn::token::Struct"]::after, -a.struct[title="struct syn::token::Super"]::after, -a.struct[title="struct syn::token::Trait"]::after, -a.struct[title="struct syn::token::Try"]::after, -a.struct[title="struct syn::token::Type"]::after, -a.struct[title="struct syn::token::Typeof"]::after, -a.struct[title="struct syn::token::Union"]::after, -a.struct[title="struct syn::token::Unsafe"]::after, -a.struct[title="struct syn::token::Unsized"]::after, -a.struct[title="struct syn::token::Use"]::after, -a.struct[title="struct syn::token::Virtual"]::after, -a.struct[title="struct syn::token::Where"]::after, -a.struct[title="struct syn::token::While"]::after, -a.struct[title="struct syn::token::Yield"]::after { - content: "........"; -} diff --git a/vendor/syn/src/gen/visit.rs b/vendor/syn/src/gen/visit.rs deleted file mode 100644 index cd258fcde120a9..00000000000000 --- a/vendor/syn/src/gen/visit.rs +++ /dev/null @@ -1,3941 +0,0 @@ -// This file is @generated by syn-internal-codegen. -// It is not intended for manual editing. - -#![allow(unused_variables)] -#![allow(clippy::needless_pass_by_ref_mut)] -#[cfg(any(feature = "full", feature = "derive"))] -use crate::punctuated::Punctuated; -#[cfg(feature = "full")] -macro_rules! full { - ($e:expr) => { - $e - }; -} -#[cfg(all(feature = "derive", not(feature = "full")))] -macro_rules! full { - ($e:expr) => { - unreachable!() - }; -} -macro_rules! skip { - ($($tt:tt)*) => {}; -} -/// Syntax tree traversal to walk a shared borrow of a syntax tree. -/// -/// See the [module documentation] for details. -/// -/// [module documentation]: self -pub trait Visit<'ast> { - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_abi(&mut self, i: &'ast crate::Abi) { - visit_abi(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_angle_bracketed_generic_arguments( - &mut self, - i: &'ast crate::AngleBracketedGenericArguments, - ) { - visit_angle_bracketed_generic_arguments(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_arm(&mut self, i: &'ast crate::Arm) { - visit_arm(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_assoc_const(&mut self, i: &'ast crate::AssocConst) { - visit_assoc_const(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_assoc_type(&mut self, i: &'ast crate::AssocType) { - visit_assoc_type(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_attr_style(&mut self, i: &'ast crate::AttrStyle) { - visit_attr_style(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_attribute(&mut self, i: &'ast crate::Attribute) { - visit_attribute(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_bare_fn_arg(&mut self, i: &'ast crate::BareFnArg) { - visit_bare_fn_arg(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_bare_variadic(&mut self, i: &'ast crate::BareVariadic) { - visit_bare_variadic(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_bin_op(&mut self, i: &'ast crate::BinOp) { - visit_bin_op(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_block(&mut self, i: &'ast crate::Block) { - visit_block(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_bound_lifetimes(&mut self, i: &'ast crate::BoundLifetimes) { - visit_bound_lifetimes(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_captured_param(&mut self, i: &'ast crate::CapturedParam) { - visit_captured_param(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_const_param(&mut self, i: &'ast crate::ConstParam) { - visit_const_param(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_constraint(&mut self, i: &'ast crate::Constraint) { - visit_constraint(self, i); - } - #[cfg(feature = "derive")] - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - fn visit_data(&mut self, i: &'ast crate::Data) { - visit_data(self, i); - } - #[cfg(feature = "derive")] - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - fn visit_data_enum(&mut self, i: &'ast crate::DataEnum) { - visit_data_enum(self, i); - } - #[cfg(feature = "derive")] - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - fn visit_data_struct(&mut self, i: &'ast crate::DataStruct) { - visit_data_struct(self, i); - } - #[cfg(feature = "derive")] - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - fn visit_data_union(&mut self, i: &'ast crate::DataUnion) { - visit_data_union(self, i); - } - #[cfg(feature = "derive")] - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - fn visit_derive_input(&mut self, i: &'ast crate::DeriveInput) { - visit_derive_input(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr(&mut self, i: &'ast crate::Expr) { - visit_expr(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_array(&mut self, i: &'ast crate::ExprArray) { - visit_expr_array(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_assign(&mut self, i: &'ast crate::ExprAssign) { - visit_expr_assign(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_async(&mut self, i: &'ast crate::ExprAsync) { - visit_expr_async(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_await(&mut self, i: &'ast crate::ExprAwait) { - visit_expr_await(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_binary(&mut self, i: &'ast crate::ExprBinary) { - visit_expr_binary(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_block(&mut self, i: &'ast crate::ExprBlock) { - visit_expr_block(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_break(&mut self, i: &'ast crate::ExprBreak) { - visit_expr_break(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_call(&mut self, i: &'ast crate::ExprCall) { - visit_expr_call(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_cast(&mut self, i: &'ast crate::ExprCast) { - visit_expr_cast(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_closure(&mut self, i: &'ast crate::ExprClosure) { - visit_expr_closure(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_const(&mut self, i: &'ast crate::ExprConst) { - visit_expr_const(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_continue(&mut self, i: &'ast crate::ExprContinue) { - visit_expr_continue(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_field(&mut self, i: &'ast crate::ExprField) { - visit_expr_field(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_for_loop(&mut self, i: &'ast crate::ExprForLoop) { - visit_expr_for_loop(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_group(&mut self, i: &'ast crate::ExprGroup) { - visit_expr_group(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_if(&mut self, i: &'ast crate::ExprIf) { - visit_expr_if(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_index(&mut self, i: &'ast crate::ExprIndex) { - visit_expr_index(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_infer(&mut self, i: &'ast crate::ExprInfer) { - visit_expr_infer(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_let(&mut self, i: &'ast crate::ExprLet) { - visit_expr_let(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_lit(&mut self, i: &'ast crate::ExprLit) { - visit_expr_lit(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_loop(&mut self, i: &'ast crate::ExprLoop) { - visit_expr_loop(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_macro(&mut self, i: &'ast crate::ExprMacro) { - visit_expr_macro(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_match(&mut self, i: &'ast crate::ExprMatch) { - visit_expr_match(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_method_call(&mut self, i: &'ast crate::ExprMethodCall) { - visit_expr_method_call(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_paren(&mut self, i: &'ast crate::ExprParen) { - visit_expr_paren(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_path(&mut self, i: &'ast crate::ExprPath) { - visit_expr_path(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_range(&mut self, i: &'ast crate::ExprRange) { - visit_expr_range(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_raw_addr(&mut self, i: &'ast crate::ExprRawAddr) { - visit_expr_raw_addr(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_reference(&mut self, i: &'ast crate::ExprReference) { - visit_expr_reference(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_repeat(&mut self, i: &'ast crate::ExprRepeat) { - visit_expr_repeat(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_return(&mut self, i: &'ast crate::ExprReturn) { - visit_expr_return(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_struct(&mut self, i: &'ast crate::ExprStruct) { - visit_expr_struct(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_try(&mut self, i: &'ast crate::ExprTry) { - visit_expr_try(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_try_block(&mut self, i: &'ast crate::ExprTryBlock) { - visit_expr_try_block(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_tuple(&mut self, i: &'ast crate::ExprTuple) { - visit_expr_tuple(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_unary(&mut self, i: &'ast crate::ExprUnary) { - visit_expr_unary(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_unsafe(&mut self, i: &'ast crate::ExprUnsafe) { - visit_expr_unsafe(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_while(&mut self, i: &'ast crate::ExprWhile) { - visit_expr_while(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_yield(&mut self, i: &'ast crate::ExprYield) { - visit_expr_yield(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_field(&mut self, i: &'ast crate::Field) { - visit_field(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_field_mutability(&mut self, i: &'ast crate::FieldMutability) { - visit_field_mutability(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_field_pat(&mut self, i: &'ast crate::FieldPat) { - visit_field_pat(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_field_value(&mut self, i: &'ast crate::FieldValue) { - visit_field_value(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_fields(&mut self, i: &'ast crate::Fields) { - visit_fields(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_fields_named(&mut self, i: &'ast crate::FieldsNamed) { - visit_fields_named(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_fields_unnamed(&mut self, i: &'ast crate::FieldsUnnamed) { - visit_fields_unnamed(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_file(&mut self, i: &'ast crate::File) { - visit_file(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_fn_arg(&mut self, i: &'ast crate::FnArg) { - visit_fn_arg(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_foreign_item(&mut self, i: &'ast crate::ForeignItem) { - visit_foreign_item(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_foreign_item_fn(&mut self, i: &'ast crate::ForeignItemFn) { - visit_foreign_item_fn(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_foreign_item_macro(&mut self, i: &'ast crate::ForeignItemMacro) { - visit_foreign_item_macro(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_foreign_item_static(&mut self, i: &'ast crate::ForeignItemStatic) { - visit_foreign_item_static(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_foreign_item_type(&mut self, i: &'ast crate::ForeignItemType) { - visit_foreign_item_type(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_generic_argument(&mut self, i: &'ast crate::GenericArgument) { - visit_generic_argument(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_generic_param(&mut self, i: &'ast crate::GenericParam) { - visit_generic_param(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_generics(&mut self, i: &'ast crate::Generics) { - visit_generics(self, i); - } - fn visit_ident(&mut self, i: &'ast proc_macro2::Ident) { - visit_ident(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_impl_item(&mut self, i: &'ast crate::ImplItem) { - visit_impl_item(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_impl_item_const(&mut self, i: &'ast crate::ImplItemConst) { - visit_impl_item_const(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_impl_item_fn(&mut self, i: &'ast crate::ImplItemFn) { - visit_impl_item_fn(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_impl_item_macro(&mut self, i: &'ast crate::ImplItemMacro) { - visit_impl_item_macro(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_impl_item_type(&mut self, i: &'ast crate::ImplItemType) { - visit_impl_item_type(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_impl_restriction(&mut self, i: &'ast crate::ImplRestriction) { - visit_impl_restriction(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_index(&mut self, i: &'ast crate::Index) { - visit_index(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item(&mut self, i: &'ast crate::Item) { - visit_item(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_const(&mut self, i: &'ast crate::ItemConst) { - visit_item_const(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_enum(&mut self, i: &'ast crate::ItemEnum) { - visit_item_enum(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_extern_crate(&mut self, i: &'ast crate::ItemExternCrate) { - visit_item_extern_crate(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_fn(&mut self, i: &'ast crate::ItemFn) { - visit_item_fn(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_foreign_mod(&mut self, i: &'ast crate::ItemForeignMod) { - visit_item_foreign_mod(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_impl(&mut self, i: &'ast crate::ItemImpl) { - visit_item_impl(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_macro(&mut self, i: &'ast crate::ItemMacro) { - visit_item_macro(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_mod(&mut self, i: &'ast crate::ItemMod) { - visit_item_mod(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_static(&mut self, i: &'ast crate::ItemStatic) { - visit_item_static(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_struct(&mut self, i: &'ast crate::ItemStruct) { - visit_item_struct(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_trait(&mut self, i: &'ast crate::ItemTrait) { - visit_item_trait(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_trait_alias(&mut self, i: &'ast crate::ItemTraitAlias) { - visit_item_trait_alias(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_type(&mut self, i: &'ast crate::ItemType) { - visit_item_type(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_union(&mut self, i: &'ast crate::ItemUnion) { - visit_item_union(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_use(&mut self, i: &'ast crate::ItemUse) { - visit_item_use(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_label(&mut self, i: &'ast crate::Label) { - visit_label(self, i); - } - fn visit_lifetime(&mut self, i: &'ast crate::Lifetime) { - visit_lifetime(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_lifetime_param(&mut self, i: &'ast crate::LifetimeParam) { - visit_lifetime_param(self, i); - } - fn visit_lit(&mut self, i: &'ast crate::Lit) { - visit_lit(self, i); - } - fn visit_lit_bool(&mut self, i: &'ast crate::LitBool) { - visit_lit_bool(self, i); - } - fn visit_lit_byte(&mut self, i: &'ast crate::LitByte) { - visit_lit_byte(self, i); - } - fn visit_lit_byte_str(&mut self, i: &'ast crate::LitByteStr) { - visit_lit_byte_str(self, i); - } - fn visit_lit_cstr(&mut self, i: &'ast crate::LitCStr) { - visit_lit_cstr(self, i); - } - fn visit_lit_char(&mut self, i: &'ast crate::LitChar) { - visit_lit_char(self, i); - } - fn visit_lit_float(&mut self, i: &'ast crate::LitFloat) { - visit_lit_float(self, i); - } - fn visit_lit_int(&mut self, i: &'ast crate::LitInt) { - visit_lit_int(self, i); - } - fn visit_lit_str(&mut self, i: &'ast crate::LitStr) { - visit_lit_str(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_local(&mut self, i: &'ast crate::Local) { - visit_local(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_local_init(&mut self, i: &'ast crate::LocalInit) { - visit_local_init(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_macro(&mut self, i: &'ast crate::Macro) { - visit_macro(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_macro_delimiter(&mut self, i: &'ast crate::MacroDelimiter) { - visit_macro_delimiter(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_member(&mut self, i: &'ast crate::Member) { - visit_member(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_meta(&mut self, i: &'ast crate::Meta) { - visit_meta(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_meta_list(&mut self, i: &'ast crate::MetaList) { - visit_meta_list(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_meta_name_value(&mut self, i: &'ast crate::MetaNameValue) { - visit_meta_name_value(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_parenthesized_generic_arguments( - &mut self, - i: &'ast crate::ParenthesizedGenericArguments, - ) { - visit_parenthesized_generic_arguments(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat(&mut self, i: &'ast crate::Pat) { - visit_pat(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_ident(&mut self, i: &'ast crate::PatIdent) { - visit_pat_ident(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_or(&mut self, i: &'ast crate::PatOr) { - visit_pat_or(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_paren(&mut self, i: &'ast crate::PatParen) { - visit_pat_paren(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_reference(&mut self, i: &'ast crate::PatReference) { - visit_pat_reference(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_rest(&mut self, i: &'ast crate::PatRest) { - visit_pat_rest(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_slice(&mut self, i: &'ast crate::PatSlice) { - visit_pat_slice(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_struct(&mut self, i: &'ast crate::PatStruct) { - visit_pat_struct(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_tuple(&mut self, i: &'ast crate::PatTuple) { - visit_pat_tuple(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_tuple_struct(&mut self, i: &'ast crate::PatTupleStruct) { - visit_pat_tuple_struct(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_type(&mut self, i: &'ast crate::PatType) { - visit_pat_type(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_wild(&mut self, i: &'ast crate::PatWild) { - visit_pat_wild(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_path(&mut self, i: &'ast crate::Path) { - visit_path(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_path_arguments(&mut self, i: &'ast crate::PathArguments) { - visit_path_arguments(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_path_segment(&mut self, i: &'ast crate::PathSegment) { - visit_path_segment(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pointer_mutability(&mut self, i: &'ast crate::PointerMutability) { - visit_pointer_mutability(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_precise_capture(&mut self, i: &'ast crate::PreciseCapture) { - visit_precise_capture(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_predicate_lifetime(&mut self, i: &'ast crate::PredicateLifetime) { - visit_predicate_lifetime(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_predicate_type(&mut self, i: &'ast crate::PredicateType) { - visit_predicate_type(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_qself(&mut self, i: &'ast crate::QSelf) { - visit_qself(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_range_limits(&mut self, i: &'ast crate::RangeLimits) { - visit_range_limits(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_receiver(&mut self, i: &'ast crate::Receiver) { - visit_receiver(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_return_type(&mut self, i: &'ast crate::ReturnType) { - visit_return_type(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_signature(&mut self, i: &'ast crate::Signature) { - visit_signature(self, i); - } - fn visit_span(&mut self, i: &proc_macro2::Span) {} - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_static_mutability(&mut self, i: &'ast crate::StaticMutability) { - visit_static_mutability(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_stmt(&mut self, i: &'ast crate::Stmt) { - visit_stmt(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_stmt_macro(&mut self, i: &'ast crate::StmtMacro) { - visit_stmt_macro(self, i); - } - fn visit_token_stream(&mut self, i: &'ast proc_macro2::TokenStream) {} - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_trait_bound(&mut self, i: &'ast crate::TraitBound) { - visit_trait_bound(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_trait_bound_modifier(&mut self, i: &'ast crate::TraitBoundModifier) { - visit_trait_bound_modifier(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_trait_item(&mut self, i: &'ast crate::TraitItem) { - visit_trait_item(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_trait_item_const(&mut self, i: &'ast crate::TraitItemConst) { - visit_trait_item_const(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_trait_item_fn(&mut self, i: &'ast crate::TraitItemFn) { - visit_trait_item_fn(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_trait_item_macro(&mut self, i: &'ast crate::TraitItemMacro) { - visit_trait_item_macro(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_trait_item_type(&mut self, i: &'ast crate::TraitItemType) { - visit_trait_item_type(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type(&mut self, i: &'ast crate::Type) { - visit_type(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_array(&mut self, i: &'ast crate::TypeArray) { - visit_type_array(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_bare_fn(&mut self, i: &'ast crate::TypeBareFn) { - visit_type_bare_fn(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_group(&mut self, i: &'ast crate::TypeGroup) { - visit_type_group(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_impl_trait(&mut self, i: &'ast crate::TypeImplTrait) { - visit_type_impl_trait(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_infer(&mut self, i: &'ast crate::TypeInfer) { - visit_type_infer(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_macro(&mut self, i: &'ast crate::TypeMacro) { - visit_type_macro(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_never(&mut self, i: &'ast crate::TypeNever) { - visit_type_never(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_param(&mut self, i: &'ast crate::TypeParam) { - visit_type_param(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_param_bound(&mut self, i: &'ast crate::TypeParamBound) { - visit_type_param_bound(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_paren(&mut self, i: &'ast crate::TypeParen) { - visit_type_paren(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_path(&mut self, i: &'ast crate::TypePath) { - visit_type_path(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_ptr(&mut self, i: &'ast crate::TypePtr) { - visit_type_ptr(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_reference(&mut self, i: &'ast crate::TypeReference) { - visit_type_reference(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_slice(&mut self, i: &'ast crate::TypeSlice) { - visit_type_slice(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_trait_object(&mut self, i: &'ast crate::TypeTraitObject) { - visit_type_trait_object(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_tuple(&mut self, i: &'ast crate::TypeTuple) { - visit_type_tuple(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_un_op(&mut self, i: &'ast crate::UnOp) { - visit_un_op(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_use_glob(&mut self, i: &'ast crate::UseGlob) { - visit_use_glob(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_use_group(&mut self, i: &'ast crate::UseGroup) { - visit_use_group(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_use_name(&mut self, i: &'ast crate::UseName) { - visit_use_name(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_use_path(&mut self, i: &'ast crate::UsePath) { - visit_use_path(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_use_rename(&mut self, i: &'ast crate::UseRename) { - visit_use_rename(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_use_tree(&mut self, i: &'ast crate::UseTree) { - visit_use_tree(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_variadic(&mut self, i: &'ast crate::Variadic) { - visit_variadic(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_variant(&mut self, i: &'ast crate::Variant) { - visit_variant(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_vis_restricted(&mut self, i: &'ast crate::VisRestricted) { - visit_vis_restricted(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_visibility(&mut self, i: &'ast crate::Visibility) { - visit_visibility(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_where_clause(&mut self, i: &'ast crate::WhereClause) { - visit_where_clause(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_where_predicate(&mut self, i: &'ast crate::WherePredicate) { - visit_where_predicate(self, i); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_abi<'ast, V>(v: &mut V, node: &'ast crate::Abi) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.extern_token); - if let Some(it) = &node.name { - v.visit_lit_str(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_angle_bracketed_generic_arguments<'ast, V>( - v: &mut V, - node: &'ast crate::AngleBracketedGenericArguments, -) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.colon2_token); - skip!(node.lt_token); - for el in Punctuated::pairs(&node.args) { - let it = el.value(); - v.visit_generic_argument(it); - } - skip!(node.gt_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_arm<'ast, V>(v: &mut V, node: &'ast crate::Arm) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_pat(&node.pat); - if let Some(it) = &node.guard { - skip!((it).0); - v.visit_expr(&*(it).1); - } - skip!(node.fat_arrow_token); - v.visit_expr(&*node.body); - skip!(node.comma); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_assoc_const<'ast, V>(v: &mut V, node: &'ast crate::AssocConst) -where - V: Visit<'ast> + ?Sized, -{ - v.visit_ident(&node.ident); - if let Some(it) = &node.generics { - v.visit_angle_bracketed_generic_arguments(it); - } - skip!(node.eq_token); - v.visit_expr(&node.value); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_assoc_type<'ast, V>(v: &mut V, node: &'ast crate::AssocType) -where - V: Visit<'ast> + ?Sized, -{ - v.visit_ident(&node.ident); - if let Some(it) = &node.generics { - v.visit_angle_bracketed_generic_arguments(it); - } - skip!(node.eq_token); - v.visit_type(&node.ty); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_attr_style<'ast, V>(v: &mut V, node: &'ast crate::AttrStyle) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::AttrStyle::Outer => {} - crate::AttrStyle::Inner(_binding_0) => { - skip!(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_attribute<'ast, V>(v: &mut V, node: &'ast crate::Attribute) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.pound_token); - v.visit_attr_style(&node.style); - skip!(node.bracket_token); - v.visit_meta(&node.meta); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_bare_fn_arg<'ast, V>(v: &mut V, node: &'ast crate::BareFnArg) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - if let Some(it) = &node.name { - v.visit_ident(&(it).0); - skip!((it).1); - } - v.visit_type(&node.ty); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_bare_variadic<'ast, V>(v: &mut V, node: &'ast crate::BareVariadic) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - if let Some(it) = &node.name { - v.visit_ident(&(it).0); - skip!((it).1); - } - skip!(node.dots); - skip!(node.comma); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_bin_op<'ast, V>(v: &mut V, node: &'ast crate::BinOp) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::BinOp::Add(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Sub(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Mul(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Div(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Rem(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::And(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Or(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::BitXor(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::BitAnd(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::BitOr(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Shl(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Shr(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Eq(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Lt(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Le(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Ne(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Ge(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Gt(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::AddAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::SubAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::MulAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::DivAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::RemAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::BitXorAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::BitAndAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::BitOrAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::ShlAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::ShrAssign(_binding_0) => { - skip!(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_block<'ast, V>(v: &mut V, node: &'ast crate::Block) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.brace_token); - for it in &node.stmts { - v.visit_stmt(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_bound_lifetimes<'ast, V>(v: &mut V, node: &'ast crate::BoundLifetimes) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.for_token); - skip!(node.lt_token); - for el in Punctuated::pairs(&node.lifetimes) { - let it = el.value(); - v.visit_generic_param(it); - } - skip!(node.gt_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_captured_param<'ast, V>(v: &mut V, node: &'ast crate::CapturedParam) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::CapturedParam::Lifetime(_binding_0) => { - v.visit_lifetime(_binding_0); - } - crate::CapturedParam::Ident(_binding_0) => { - v.visit_ident(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_const_param<'ast, V>(v: &mut V, node: &'ast crate::ConstParam) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.const_token); - v.visit_ident(&node.ident); - skip!(node.colon_token); - v.visit_type(&node.ty); - skip!(node.eq_token); - if let Some(it) = &node.default { - v.visit_expr(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_constraint<'ast, V>(v: &mut V, node: &'ast crate::Constraint) -where - V: Visit<'ast> + ?Sized, -{ - v.visit_ident(&node.ident); - if let Some(it) = &node.generics { - v.visit_angle_bracketed_generic_arguments(it); - } - skip!(node.colon_token); - for el in Punctuated::pairs(&node.bounds) { - let it = el.value(); - v.visit_type_param_bound(it); - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub fn visit_data<'ast, V>(v: &mut V, node: &'ast crate::Data) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::Data::Struct(_binding_0) => { - v.visit_data_struct(_binding_0); - } - crate::Data::Enum(_binding_0) => { - v.visit_data_enum(_binding_0); - } - crate::Data::Union(_binding_0) => { - v.visit_data_union(_binding_0); - } - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub fn visit_data_enum<'ast, V>(v: &mut V, node: &'ast crate::DataEnum) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.enum_token); - skip!(node.brace_token); - for el in Punctuated::pairs(&node.variants) { - let it = el.value(); - v.visit_variant(it); - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub fn visit_data_struct<'ast, V>(v: &mut V, node: &'ast crate::DataStruct) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.struct_token); - v.visit_fields(&node.fields); - skip!(node.semi_token); -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub fn visit_data_union<'ast, V>(v: &mut V, node: &'ast crate::DataUnion) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.union_token); - v.visit_fields_named(&node.fields); -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub fn visit_derive_input<'ast, V>(v: &mut V, node: &'ast crate::DeriveInput) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - v.visit_ident(&node.ident); - v.visit_generics(&node.generics); - v.visit_data(&node.data); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr<'ast, V>(v: &mut V, node: &'ast crate::Expr) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::Expr::Array(_binding_0) => { - full!(v.visit_expr_array(_binding_0)); - } - crate::Expr::Assign(_binding_0) => { - full!(v.visit_expr_assign(_binding_0)); - } - crate::Expr::Async(_binding_0) => { - full!(v.visit_expr_async(_binding_0)); - } - crate::Expr::Await(_binding_0) => { - full!(v.visit_expr_await(_binding_0)); - } - crate::Expr::Binary(_binding_0) => { - v.visit_expr_binary(_binding_0); - } - crate::Expr::Block(_binding_0) => { - full!(v.visit_expr_block(_binding_0)); - } - crate::Expr::Break(_binding_0) => { - full!(v.visit_expr_break(_binding_0)); - } - crate::Expr::Call(_binding_0) => { - v.visit_expr_call(_binding_0); - } - crate::Expr::Cast(_binding_0) => { - v.visit_expr_cast(_binding_0); - } - crate::Expr::Closure(_binding_0) => { - full!(v.visit_expr_closure(_binding_0)); - } - crate::Expr::Const(_binding_0) => { - full!(v.visit_expr_const(_binding_0)); - } - crate::Expr::Continue(_binding_0) => { - full!(v.visit_expr_continue(_binding_0)); - } - crate::Expr::Field(_binding_0) => { - v.visit_expr_field(_binding_0); - } - crate::Expr::ForLoop(_binding_0) => { - full!(v.visit_expr_for_loop(_binding_0)); - } - crate::Expr::Group(_binding_0) => { - v.visit_expr_group(_binding_0); - } - crate::Expr::If(_binding_0) => { - full!(v.visit_expr_if(_binding_0)); - } - crate::Expr::Index(_binding_0) => { - v.visit_expr_index(_binding_0); - } - crate::Expr::Infer(_binding_0) => { - full!(v.visit_expr_infer(_binding_0)); - } - crate::Expr::Let(_binding_0) => { - full!(v.visit_expr_let(_binding_0)); - } - crate::Expr::Lit(_binding_0) => { - v.visit_expr_lit(_binding_0); - } - crate::Expr::Loop(_binding_0) => { - full!(v.visit_expr_loop(_binding_0)); - } - crate::Expr::Macro(_binding_0) => { - v.visit_expr_macro(_binding_0); - } - crate::Expr::Match(_binding_0) => { - full!(v.visit_expr_match(_binding_0)); - } - crate::Expr::MethodCall(_binding_0) => { - v.visit_expr_method_call(_binding_0); - } - crate::Expr::Paren(_binding_0) => { - v.visit_expr_paren(_binding_0); - } - crate::Expr::Path(_binding_0) => { - v.visit_expr_path(_binding_0); - } - crate::Expr::Range(_binding_0) => { - full!(v.visit_expr_range(_binding_0)); - } - crate::Expr::RawAddr(_binding_0) => { - full!(v.visit_expr_raw_addr(_binding_0)); - } - crate::Expr::Reference(_binding_0) => { - v.visit_expr_reference(_binding_0); - } - crate::Expr::Repeat(_binding_0) => { - full!(v.visit_expr_repeat(_binding_0)); - } - crate::Expr::Return(_binding_0) => { - full!(v.visit_expr_return(_binding_0)); - } - crate::Expr::Struct(_binding_0) => { - v.visit_expr_struct(_binding_0); - } - crate::Expr::Try(_binding_0) => { - full!(v.visit_expr_try(_binding_0)); - } - crate::Expr::TryBlock(_binding_0) => { - full!(v.visit_expr_try_block(_binding_0)); - } - crate::Expr::Tuple(_binding_0) => { - v.visit_expr_tuple(_binding_0); - } - crate::Expr::Unary(_binding_0) => { - v.visit_expr_unary(_binding_0); - } - crate::Expr::Unsafe(_binding_0) => { - full!(v.visit_expr_unsafe(_binding_0)); - } - crate::Expr::Verbatim(_binding_0) => { - v.visit_token_stream(_binding_0); - } - crate::Expr::While(_binding_0) => { - full!(v.visit_expr_while(_binding_0)); - } - crate::Expr::Yield(_binding_0) => { - full!(v.visit_expr_yield(_binding_0)); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_array<'ast, V>(v: &mut V, node: &'ast crate::ExprArray) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.bracket_token); - for el in Punctuated::pairs(&node.elems) { - let it = el.value(); - v.visit_expr(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_assign<'ast, V>(v: &mut V, node: &'ast crate::ExprAssign) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_expr(&*node.left); - skip!(node.eq_token); - v.visit_expr(&*node.right); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_async<'ast, V>(v: &mut V, node: &'ast crate::ExprAsync) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.async_token); - skip!(node.capture); - v.visit_block(&node.block); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_await<'ast, V>(v: &mut V, node: &'ast crate::ExprAwait) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_expr(&*node.base); - skip!(node.dot_token); - skip!(node.await_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_binary<'ast, V>(v: &mut V, node: &'ast crate::ExprBinary) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_expr(&*node.left); - v.visit_bin_op(&node.op); - v.visit_expr(&*node.right); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_block<'ast, V>(v: &mut V, node: &'ast crate::ExprBlock) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - if let Some(it) = &node.label { - v.visit_label(it); - } - v.visit_block(&node.block); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_break<'ast, V>(v: &mut V, node: &'ast crate::ExprBreak) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.break_token); - if let Some(it) = &node.label { - v.visit_lifetime(it); - } - if let Some(it) = &node.expr { - v.visit_expr(&**it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_call<'ast, V>(v: &mut V, node: &'ast crate::ExprCall) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_expr(&*node.func); - skip!(node.paren_token); - for el in Punctuated::pairs(&node.args) { - let it = el.value(); - v.visit_expr(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_cast<'ast, V>(v: &mut V, node: &'ast crate::ExprCast) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_expr(&*node.expr); - skip!(node.as_token); - v.visit_type(&*node.ty); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_closure<'ast, V>(v: &mut V, node: &'ast crate::ExprClosure) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - if let Some(it) = &node.lifetimes { - v.visit_bound_lifetimes(it); - } - skip!(node.constness); - skip!(node.movability); - skip!(node.asyncness); - skip!(node.capture); - skip!(node.or1_token); - for el in Punctuated::pairs(&node.inputs) { - let it = el.value(); - v.visit_pat(it); - } - skip!(node.or2_token); - v.visit_return_type(&node.output); - v.visit_expr(&*node.body); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_const<'ast, V>(v: &mut V, node: &'ast crate::ExprConst) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.const_token); - v.visit_block(&node.block); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_continue<'ast, V>(v: &mut V, node: &'ast crate::ExprContinue) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.continue_token); - if let Some(it) = &node.label { - v.visit_lifetime(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_field<'ast, V>(v: &mut V, node: &'ast crate::ExprField) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_expr(&*node.base); - skip!(node.dot_token); - v.visit_member(&node.member); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_for_loop<'ast, V>(v: &mut V, node: &'ast crate::ExprForLoop) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - if let Some(it) = &node.label { - v.visit_label(it); - } - skip!(node.for_token); - v.visit_pat(&*node.pat); - skip!(node.in_token); - v.visit_expr(&*node.expr); - v.visit_block(&node.body); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_group<'ast, V>(v: &mut V, node: &'ast crate::ExprGroup) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.group_token); - v.visit_expr(&*node.expr); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_if<'ast, V>(v: &mut V, node: &'ast crate::ExprIf) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.if_token); - v.visit_expr(&*node.cond); - v.visit_block(&node.then_branch); - if let Some(it) = &node.else_branch { - skip!((it).0); - v.visit_expr(&*(it).1); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_index<'ast, V>(v: &mut V, node: &'ast crate::ExprIndex) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_expr(&*node.expr); - skip!(node.bracket_token); - v.visit_expr(&*node.index); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_infer<'ast, V>(v: &mut V, node: &'ast crate::ExprInfer) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.underscore_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_let<'ast, V>(v: &mut V, node: &'ast crate::ExprLet) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.let_token); - v.visit_pat(&*node.pat); - skip!(node.eq_token); - v.visit_expr(&*node.expr); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_lit<'ast, V>(v: &mut V, node: &'ast crate::ExprLit) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_lit(&node.lit); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_loop<'ast, V>(v: &mut V, node: &'ast crate::ExprLoop) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - if let Some(it) = &node.label { - v.visit_label(it); - } - skip!(node.loop_token); - v.visit_block(&node.body); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_macro<'ast, V>(v: &mut V, node: &'ast crate::ExprMacro) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_macro(&node.mac); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_match<'ast, V>(v: &mut V, node: &'ast crate::ExprMatch) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.match_token); - v.visit_expr(&*node.expr); - skip!(node.brace_token); - for it in &node.arms { - v.visit_arm(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_method_call<'ast, V>(v: &mut V, node: &'ast crate::ExprMethodCall) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_expr(&*node.receiver); - skip!(node.dot_token); - v.visit_ident(&node.method); - if let Some(it) = &node.turbofish { - v.visit_angle_bracketed_generic_arguments(it); - } - skip!(node.paren_token); - for el in Punctuated::pairs(&node.args) { - let it = el.value(); - v.visit_expr(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_paren<'ast, V>(v: &mut V, node: &'ast crate::ExprParen) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.paren_token); - v.visit_expr(&*node.expr); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_path<'ast, V>(v: &mut V, node: &'ast crate::ExprPath) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - if let Some(it) = &node.qself { - v.visit_qself(it); - } - v.visit_path(&node.path); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_range<'ast, V>(v: &mut V, node: &'ast crate::ExprRange) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - if let Some(it) = &node.start { - v.visit_expr(&**it); - } - v.visit_range_limits(&node.limits); - if let Some(it) = &node.end { - v.visit_expr(&**it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_raw_addr<'ast, V>(v: &mut V, node: &'ast crate::ExprRawAddr) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.and_token); - skip!(node.raw); - v.visit_pointer_mutability(&node.mutability); - v.visit_expr(&*node.expr); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_reference<'ast, V>(v: &mut V, node: &'ast crate::ExprReference) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.and_token); - skip!(node.mutability); - v.visit_expr(&*node.expr); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_repeat<'ast, V>(v: &mut V, node: &'ast crate::ExprRepeat) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.bracket_token); - v.visit_expr(&*node.expr); - skip!(node.semi_token); - v.visit_expr(&*node.len); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_return<'ast, V>(v: &mut V, node: &'ast crate::ExprReturn) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.return_token); - if let Some(it) = &node.expr { - v.visit_expr(&**it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_struct<'ast, V>(v: &mut V, node: &'ast crate::ExprStruct) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - if let Some(it) = &node.qself { - v.visit_qself(it); - } - v.visit_path(&node.path); - skip!(node.brace_token); - for el in Punctuated::pairs(&node.fields) { - let it = el.value(); - v.visit_field_value(it); - } - skip!(node.dot2_token); - if let Some(it) = &node.rest { - v.visit_expr(&**it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_try<'ast, V>(v: &mut V, node: &'ast crate::ExprTry) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_expr(&*node.expr); - skip!(node.question_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_try_block<'ast, V>(v: &mut V, node: &'ast crate::ExprTryBlock) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.try_token); - v.visit_block(&node.block); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_tuple<'ast, V>(v: &mut V, node: &'ast crate::ExprTuple) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.paren_token); - for el in Punctuated::pairs(&node.elems) { - let it = el.value(); - v.visit_expr(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_unary<'ast, V>(v: &mut V, node: &'ast crate::ExprUnary) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_un_op(&node.op); - v.visit_expr(&*node.expr); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_unsafe<'ast, V>(v: &mut V, node: &'ast crate::ExprUnsafe) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.unsafe_token); - v.visit_block(&node.block); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_while<'ast, V>(v: &mut V, node: &'ast crate::ExprWhile) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - if let Some(it) = &node.label { - v.visit_label(it); - } - skip!(node.while_token); - v.visit_expr(&*node.cond); - v.visit_block(&node.body); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_yield<'ast, V>(v: &mut V, node: &'ast crate::ExprYield) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.yield_token); - if let Some(it) = &node.expr { - v.visit_expr(&**it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_field<'ast, V>(v: &mut V, node: &'ast crate::Field) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - v.visit_field_mutability(&node.mutability); - if let Some(it) = &node.ident { - v.visit_ident(it); - } - skip!(node.colon_token); - v.visit_type(&node.ty); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_field_mutability<'ast, V>(v: &mut V, node: &'ast crate::FieldMutability) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::FieldMutability::None => {} - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_field_pat<'ast, V>(v: &mut V, node: &'ast crate::FieldPat) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_member(&node.member); - skip!(node.colon_token); - v.visit_pat(&*node.pat); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_field_value<'ast, V>(v: &mut V, node: &'ast crate::FieldValue) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_member(&node.member); - skip!(node.colon_token); - v.visit_expr(&node.expr); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_fields<'ast, V>(v: &mut V, node: &'ast crate::Fields) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::Fields::Named(_binding_0) => { - v.visit_fields_named(_binding_0); - } - crate::Fields::Unnamed(_binding_0) => { - v.visit_fields_unnamed(_binding_0); - } - crate::Fields::Unit => {} - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_fields_named<'ast, V>(v: &mut V, node: &'ast crate::FieldsNamed) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.brace_token); - for el in Punctuated::pairs(&node.named) { - let it = el.value(); - v.visit_field(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_fields_unnamed<'ast, V>(v: &mut V, node: &'ast crate::FieldsUnnamed) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.paren_token); - for el in Punctuated::pairs(&node.unnamed) { - let it = el.value(); - v.visit_field(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_file<'ast, V>(v: &mut V, node: &'ast crate::File) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.shebang); - for it in &node.attrs { - v.visit_attribute(it); - } - for it in &node.items { - v.visit_item(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_fn_arg<'ast, V>(v: &mut V, node: &'ast crate::FnArg) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::FnArg::Receiver(_binding_0) => { - v.visit_receiver(_binding_0); - } - crate::FnArg::Typed(_binding_0) => { - v.visit_pat_type(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_foreign_item<'ast, V>(v: &mut V, node: &'ast crate::ForeignItem) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::ForeignItem::Fn(_binding_0) => { - v.visit_foreign_item_fn(_binding_0); - } - crate::ForeignItem::Static(_binding_0) => { - v.visit_foreign_item_static(_binding_0); - } - crate::ForeignItem::Type(_binding_0) => { - v.visit_foreign_item_type(_binding_0); - } - crate::ForeignItem::Macro(_binding_0) => { - v.visit_foreign_item_macro(_binding_0); - } - crate::ForeignItem::Verbatim(_binding_0) => { - v.visit_token_stream(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_foreign_item_fn<'ast, V>(v: &mut V, node: &'ast crate::ForeignItemFn) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - v.visit_signature(&node.sig); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_foreign_item_macro<'ast, V>(v: &mut V, node: &'ast crate::ForeignItemMacro) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_macro(&node.mac); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_foreign_item_static<'ast, V>( - v: &mut V, - node: &'ast crate::ForeignItemStatic, -) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.static_token); - v.visit_static_mutability(&node.mutability); - v.visit_ident(&node.ident); - skip!(node.colon_token); - v.visit_type(&*node.ty); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_foreign_item_type<'ast, V>(v: &mut V, node: &'ast crate::ForeignItemType) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.type_token); - v.visit_ident(&node.ident); - v.visit_generics(&node.generics); - skip!(node.semi_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_generic_argument<'ast, V>(v: &mut V, node: &'ast crate::GenericArgument) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::GenericArgument::Lifetime(_binding_0) => { - v.visit_lifetime(_binding_0); - } - crate::GenericArgument::Type(_binding_0) => { - v.visit_type(_binding_0); - } - crate::GenericArgument::Const(_binding_0) => { - v.visit_expr(_binding_0); - } - crate::GenericArgument::AssocType(_binding_0) => { - v.visit_assoc_type(_binding_0); - } - crate::GenericArgument::AssocConst(_binding_0) => { - v.visit_assoc_const(_binding_0); - } - crate::GenericArgument::Constraint(_binding_0) => { - v.visit_constraint(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_generic_param<'ast, V>(v: &mut V, node: &'ast crate::GenericParam) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::GenericParam::Lifetime(_binding_0) => { - v.visit_lifetime_param(_binding_0); - } - crate::GenericParam::Type(_binding_0) => { - v.visit_type_param(_binding_0); - } - crate::GenericParam::Const(_binding_0) => { - v.visit_const_param(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_generics<'ast, V>(v: &mut V, node: &'ast crate::Generics) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.lt_token); - for el in Punctuated::pairs(&node.params) { - let it = el.value(); - v.visit_generic_param(it); - } - skip!(node.gt_token); - if let Some(it) = &node.where_clause { - v.visit_where_clause(it); - } -} -pub fn visit_ident<'ast, V>(v: &mut V, node: &'ast proc_macro2::Ident) -where - V: Visit<'ast> + ?Sized, -{ - v.visit_span(&node.span()); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_impl_item<'ast, V>(v: &mut V, node: &'ast crate::ImplItem) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::ImplItem::Const(_binding_0) => { - v.visit_impl_item_const(_binding_0); - } - crate::ImplItem::Fn(_binding_0) => { - v.visit_impl_item_fn(_binding_0); - } - crate::ImplItem::Type(_binding_0) => { - v.visit_impl_item_type(_binding_0); - } - crate::ImplItem::Macro(_binding_0) => { - v.visit_impl_item_macro(_binding_0); - } - crate::ImplItem::Verbatim(_binding_0) => { - v.visit_token_stream(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_impl_item_const<'ast, V>(v: &mut V, node: &'ast crate::ImplItemConst) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.defaultness); - skip!(node.const_token); - v.visit_ident(&node.ident); - v.visit_generics(&node.generics); - skip!(node.colon_token); - v.visit_type(&node.ty); - skip!(node.eq_token); - v.visit_expr(&node.expr); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_impl_item_fn<'ast, V>(v: &mut V, node: &'ast crate::ImplItemFn) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.defaultness); - v.visit_signature(&node.sig); - v.visit_block(&node.block); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_impl_item_macro<'ast, V>(v: &mut V, node: &'ast crate::ImplItemMacro) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_macro(&node.mac); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_impl_item_type<'ast, V>(v: &mut V, node: &'ast crate::ImplItemType) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.defaultness); - skip!(node.type_token); - v.visit_ident(&node.ident); - v.visit_generics(&node.generics); - skip!(node.eq_token); - v.visit_type(&node.ty); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_impl_restriction<'ast, V>(v: &mut V, node: &'ast crate::ImplRestriction) -where - V: Visit<'ast> + ?Sized, -{ - match *node {} -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_index<'ast, V>(v: &mut V, node: &'ast crate::Index) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.index); - v.visit_span(&node.span); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item<'ast, V>(v: &mut V, node: &'ast crate::Item) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::Item::Const(_binding_0) => { - v.visit_item_const(_binding_0); - } - crate::Item::Enum(_binding_0) => { - v.visit_item_enum(_binding_0); - } - crate::Item::ExternCrate(_binding_0) => { - v.visit_item_extern_crate(_binding_0); - } - crate::Item::Fn(_binding_0) => { - v.visit_item_fn(_binding_0); - } - crate::Item::ForeignMod(_binding_0) => { - v.visit_item_foreign_mod(_binding_0); - } - crate::Item::Impl(_binding_0) => { - v.visit_item_impl(_binding_0); - } - crate::Item::Macro(_binding_0) => { - v.visit_item_macro(_binding_0); - } - crate::Item::Mod(_binding_0) => { - v.visit_item_mod(_binding_0); - } - crate::Item::Static(_binding_0) => { - v.visit_item_static(_binding_0); - } - crate::Item::Struct(_binding_0) => { - v.visit_item_struct(_binding_0); - } - crate::Item::Trait(_binding_0) => { - v.visit_item_trait(_binding_0); - } - crate::Item::TraitAlias(_binding_0) => { - v.visit_item_trait_alias(_binding_0); - } - crate::Item::Type(_binding_0) => { - v.visit_item_type(_binding_0); - } - crate::Item::Union(_binding_0) => { - v.visit_item_union(_binding_0); - } - crate::Item::Use(_binding_0) => { - v.visit_item_use(_binding_0); - } - crate::Item::Verbatim(_binding_0) => { - v.visit_token_stream(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_const<'ast, V>(v: &mut V, node: &'ast crate::ItemConst) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.const_token); - v.visit_ident(&node.ident); - v.visit_generics(&node.generics); - skip!(node.colon_token); - v.visit_type(&*node.ty); - skip!(node.eq_token); - v.visit_expr(&*node.expr); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_enum<'ast, V>(v: &mut V, node: &'ast crate::ItemEnum) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.enum_token); - v.visit_ident(&node.ident); - v.visit_generics(&node.generics); - skip!(node.brace_token); - for el in Punctuated::pairs(&node.variants) { - let it = el.value(); - v.visit_variant(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_extern_crate<'ast, V>(v: &mut V, node: &'ast crate::ItemExternCrate) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.extern_token); - skip!(node.crate_token); - v.visit_ident(&node.ident); - if let Some(it) = &node.rename { - skip!((it).0); - v.visit_ident(&(it).1); - } - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_fn<'ast, V>(v: &mut V, node: &'ast crate::ItemFn) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - v.visit_signature(&node.sig); - v.visit_block(&*node.block); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_foreign_mod<'ast, V>(v: &mut V, node: &'ast crate::ItemForeignMod) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.unsafety); - v.visit_abi(&node.abi); - skip!(node.brace_token); - for it in &node.items { - v.visit_foreign_item(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_impl<'ast, V>(v: &mut V, node: &'ast crate::ItemImpl) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.defaultness); - skip!(node.unsafety); - skip!(node.impl_token); - v.visit_generics(&node.generics); - if let Some(it) = &node.trait_ { - skip!((it).0); - v.visit_path(&(it).1); - skip!((it).2); - } - v.visit_type(&*node.self_ty); - skip!(node.brace_token); - for it in &node.items { - v.visit_impl_item(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_macro<'ast, V>(v: &mut V, node: &'ast crate::ItemMacro) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - if let Some(it) = &node.ident { - v.visit_ident(it); - } - v.visit_macro(&node.mac); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_mod<'ast, V>(v: &mut V, node: &'ast crate::ItemMod) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.unsafety); - skip!(node.mod_token); - v.visit_ident(&node.ident); - if let Some(it) = &node.content { - skip!((it).0); - for it in &(it).1 { - v.visit_item(it); - } - } - skip!(node.semi); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_static<'ast, V>(v: &mut V, node: &'ast crate::ItemStatic) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.static_token); - v.visit_static_mutability(&node.mutability); - v.visit_ident(&node.ident); - skip!(node.colon_token); - v.visit_type(&*node.ty); - skip!(node.eq_token); - v.visit_expr(&*node.expr); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_struct<'ast, V>(v: &mut V, node: &'ast crate::ItemStruct) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.struct_token); - v.visit_ident(&node.ident); - v.visit_generics(&node.generics); - v.visit_fields(&node.fields); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_trait<'ast, V>(v: &mut V, node: &'ast crate::ItemTrait) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.unsafety); - skip!(node.auto_token); - if let Some(it) = &node.restriction { - v.visit_impl_restriction(it); - } - skip!(node.trait_token); - v.visit_ident(&node.ident); - v.visit_generics(&node.generics); - skip!(node.colon_token); - for el in Punctuated::pairs(&node.supertraits) { - let it = el.value(); - v.visit_type_param_bound(it); - } - skip!(node.brace_token); - for it in &node.items { - v.visit_trait_item(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_trait_alias<'ast, V>(v: &mut V, node: &'ast crate::ItemTraitAlias) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.trait_token); - v.visit_ident(&node.ident); - v.visit_generics(&node.generics); - skip!(node.eq_token); - for el in Punctuated::pairs(&node.bounds) { - let it = el.value(); - v.visit_type_param_bound(it); - } - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_type<'ast, V>(v: &mut V, node: &'ast crate::ItemType) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.type_token); - v.visit_ident(&node.ident); - v.visit_generics(&node.generics); - skip!(node.eq_token); - v.visit_type(&*node.ty); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_union<'ast, V>(v: &mut V, node: &'ast crate::ItemUnion) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.union_token); - v.visit_ident(&node.ident); - v.visit_generics(&node.generics); - v.visit_fields_named(&node.fields); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_use<'ast, V>(v: &mut V, node: &'ast crate::ItemUse) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_visibility(&node.vis); - skip!(node.use_token); - skip!(node.leading_colon); - v.visit_use_tree(&node.tree); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_label<'ast, V>(v: &mut V, node: &'ast crate::Label) -where - V: Visit<'ast> + ?Sized, -{ - v.visit_lifetime(&node.name); - skip!(node.colon_token); -} -pub fn visit_lifetime<'ast, V>(v: &mut V, node: &'ast crate::Lifetime) -where - V: Visit<'ast> + ?Sized, -{ - v.visit_span(&node.apostrophe); - v.visit_ident(&node.ident); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_lifetime_param<'ast, V>(v: &mut V, node: &'ast crate::LifetimeParam) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_lifetime(&node.lifetime); - skip!(node.colon_token); - for el in Punctuated::pairs(&node.bounds) { - let it = el.value(); - v.visit_lifetime(it); - } -} -pub fn visit_lit<'ast, V>(v: &mut V, node: &'ast crate::Lit) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::Lit::Str(_binding_0) => { - v.visit_lit_str(_binding_0); - } - crate::Lit::ByteStr(_binding_0) => { - v.visit_lit_byte_str(_binding_0); - } - crate::Lit::CStr(_binding_0) => { - v.visit_lit_cstr(_binding_0); - } - crate::Lit::Byte(_binding_0) => { - v.visit_lit_byte(_binding_0); - } - crate::Lit::Char(_binding_0) => { - v.visit_lit_char(_binding_0); - } - crate::Lit::Int(_binding_0) => { - v.visit_lit_int(_binding_0); - } - crate::Lit::Float(_binding_0) => { - v.visit_lit_float(_binding_0); - } - crate::Lit::Bool(_binding_0) => { - v.visit_lit_bool(_binding_0); - } - crate::Lit::Verbatim(_binding_0) => { - skip!(_binding_0); - } - } -} -pub fn visit_lit_bool<'ast, V>(v: &mut V, node: &'ast crate::LitBool) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.value); - v.visit_span(&node.span); -} -pub fn visit_lit_byte<'ast, V>(v: &mut V, node: &'ast crate::LitByte) -where - V: Visit<'ast> + ?Sized, -{} -pub fn visit_lit_byte_str<'ast, V>(v: &mut V, node: &'ast crate::LitByteStr) -where - V: Visit<'ast> + ?Sized, -{} -pub fn visit_lit_cstr<'ast, V>(v: &mut V, node: &'ast crate::LitCStr) -where - V: Visit<'ast> + ?Sized, -{} -pub fn visit_lit_char<'ast, V>(v: &mut V, node: &'ast crate::LitChar) -where - V: Visit<'ast> + ?Sized, -{} -pub fn visit_lit_float<'ast, V>(v: &mut V, node: &'ast crate::LitFloat) -where - V: Visit<'ast> + ?Sized, -{} -pub fn visit_lit_int<'ast, V>(v: &mut V, node: &'ast crate::LitInt) -where - V: Visit<'ast> + ?Sized, -{} -pub fn visit_lit_str<'ast, V>(v: &mut V, node: &'ast crate::LitStr) -where - V: Visit<'ast> + ?Sized, -{} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_local<'ast, V>(v: &mut V, node: &'ast crate::Local) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.let_token); - v.visit_pat(&node.pat); - if let Some(it) = &node.init { - v.visit_local_init(it); - } - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_local_init<'ast, V>(v: &mut V, node: &'ast crate::LocalInit) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.eq_token); - v.visit_expr(&*node.expr); - if let Some(it) = &node.diverge { - skip!((it).0); - v.visit_expr(&*(it).1); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_macro<'ast, V>(v: &mut V, node: &'ast crate::Macro) -where - V: Visit<'ast> + ?Sized, -{ - v.visit_path(&node.path); - skip!(node.bang_token); - v.visit_macro_delimiter(&node.delimiter); - v.visit_token_stream(&node.tokens); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_macro_delimiter<'ast, V>(v: &mut V, node: &'ast crate::MacroDelimiter) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::MacroDelimiter::Paren(_binding_0) => { - skip!(_binding_0); - } - crate::MacroDelimiter::Brace(_binding_0) => { - skip!(_binding_0); - } - crate::MacroDelimiter::Bracket(_binding_0) => { - skip!(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_member<'ast, V>(v: &mut V, node: &'ast crate::Member) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::Member::Named(_binding_0) => { - v.visit_ident(_binding_0); - } - crate::Member::Unnamed(_binding_0) => { - v.visit_index(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_meta<'ast, V>(v: &mut V, node: &'ast crate::Meta) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::Meta::Path(_binding_0) => { - v.visit_path(_binding_0); - } - crate::Meta::List(_binding_0) => { - v.visit_meta_list(_binding_0); - } - crate::Meta::NameValue(_binding_0) => { - v.visit_meta_name_value(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_meta_list<'ast, V>(v: &mut V, node: &'ast crate::MetaList) -where - V: Visit<'ast> + ?Sized, -{ - v.visit_path(&node.path); - v.visit_macro_delimiter(&node.delimiter); - v.visit_token_stream(&node.tokens); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_meta_name_value<'ast, V>(v: &mut V, node: &'ast crate::MetaNameValue) -where - V: Visit<'ast> + ?Sized, -{ - v.visit_path(&node.path); - skip!(node.eq_token); - v.visit_expr(&node.value); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_parenthesized_generic_arguments<'ast, V>( - v: &mut V, - node: &'ast crate::ParenthesizedGenericArguments, -) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.paren_token); - for el in Punctuated::pairs(&node.inputs) { - let it = el.value(); - v.visit_type(it); - } - v.visit_return_type(&node.output); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat<'ast, V>(v: &mut V, node: &'ast crate::Pat) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::Pat::Const(_binding_0) => { - v.visit_expr_const(_binding_0); - } - crate::Pat::Ident(_binding_0) => { - v.visit_pat_ident(_binding_0); - } - crate::Pat::Lit(_binding_0) => { - v.visit_expr_lit(_binding_0); - } - crate::Pat::Macro(_binding_0) => { - v.visit_expr_macro(_binding_0); - } - crate::Pat::Or(_binding_0) => { - v.visit_pat_or(_binding_0); - } - crate::Pat::Paren(_binding_0) => { - v.visit_pat_paren(_binding_0); - } - crate::Pat::Path(_binding_0) => { - v.visit_expr_path(_binding_0); - } - crate::Pat::Range(_binding_0) => { - v.visit_expr_range(_binding_0); - } - crate::Pat::Reference(_binding_0) => { - v.visit_pat_reference(_binding_0); - } - crate::Pat::Rest(_binding_0) => { - v.visit_pat_rest(_binding_0); - } - crate::Pat::Slice(_binding_0) => { - v.visit_pat_slice(_binding_0); - } - crate::Pat::Struct(_binding_0) => { - v.visit_pat_struct(_binding_0); - } - crate::Pat::Tuple(_binding_0) => { - v.visit_pat_tuple(_binding_0); - } - crate::Pat::TupleStruct(_binding_0) => { - v.visit_pat_tuple_struct(_binding_0); - } - crate::Pat::Type(_binding_0) => { - v.visit_pat_type(_binding_0); - } - crate::Pat::Verbatim(_binding_0) => { - v.visit_token_stream(_binding_0); - } - crate::Pat::Wild(_binding_0) => { - v.visit_pat_wild(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_ident<'ast, V>(v: &mut V, node: &'ast crate::PatIdent) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.by_ref); - skip!(node.mutability); - v.visit_ident(&node.ident); - if let Some(it) = &node.subpat { - skip!((it).0); - v.visit_pat(&*(it).1); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_or<'ast, V>(v: &mut V, node: &'ast crate::PatOr) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.leading_vert); - for el in Punctuated::pairs(&node.cases) { - let it = el.value(); - v.visit_pat(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_paren<'ast, V>(v: &mut V, node: &'ast crate::PatParen) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.paren_token); - v.visit_pat(&*node.pat); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_reference<'ast, V>(v: &mut V, node: &'ast crate::PatReference) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.and_token); - skip!(node.mutability); - v.visit_pat(&*node.pat); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_rest<'ast, V>(v: &mut V, node: &'ast crate::PatRest) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.dot2_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_slice<'ast, V>(v: &mut V, node: &'ast crate::PatSlice) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.bracket_token); - for el in Punctuated::pairs(&node.elems) { - let it = el.value(); - v.visit_pat(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_struct<'ast, V>(v: &mut V, node: &'ast crate::PatStruct) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - if let Some(it) = &node.qself { - v.visit_qself(it); - } - v.visit_path(&node.path); - skip!(node.brace_token); - for el in Punctuated::pairs(&node.fields) { - let it = el.value(); - v.visit_field_pat(it); - } - if let Some(it) = &node.rest { - v.visit_pat_rest(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_tuple<'ast, V>(v: &mut V, node: &'ast crate::PatTuple) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.paren_token); - for el in Punctuated::pairs(&node.elems) { - let it = el.value(); - v.visit_pat(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_tuple_struct<'ast, V>(v: &mut V, node: &'ast crate::PatTupleStruct) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - if let Some(it) = &node.qself { - v.visit_qself(it); - } - v.visit_path(&node.path); - skip!(node.paren_token); - for el in Punctuated::pairs(&node.elems) { - let it = el.value(); - v.visit_pat(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_type<'ast, V>(v: &mut V, node: &'ast crate::PatType) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_pat(&*node.pat); - skip!(node.colon_token); - v.visit_type(&*node.ty); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_wild<'ast, V>(v: &mut V, node: &'ast crate::PatWild) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.underscore_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_path<'ast, V>(v: &mut V, node: &'ast crate::Path) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.leading_colon); - for el in Punctuated::pairs(&node.segments) { - let it = el.value(); - v.visit_path_segment(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_path_arguments<'ast, V>(v: &mut V, node: &'ast crate::PathArguments) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::PathArguments::None => {} - crate::PathArguments::AngleBracketed(_binding_0) => { - v.visit_angle_bracketed_generic_arguments(_binding_0); - } - crate::PathArguments::Parenthesized(_binding_0) => { - v.visit_parenthesized_generic_arguments(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_path_segment<'ast, V>(v: &mut V, node: &'ast crate::PathSegment) -where - V: Visit<'ast> + ?Sized, -{ - v.visit_ident(&node.ident); - v.visit_path_arguments(&node.arguments); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pointer_mutability<'ast, V>(v: &mut V, node: &'ast crate::PointerMutability) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::PointerMutability::Const(_binding_0) => { - skip!(_binding_0); - } - crate::PointerMutability::Mut(_binding_0) => { - skip!(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_precise_capture<'ast, V>(v: &mut V, node: &'ast crate::PreciseCapture) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.use_token); - skip!(node.lt_token); - for el in Punctuated::pairs(&node.params) { - let it = el.value(); - v.visit_captured_param(it); - } - skip!(node.gt_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_predicate_lifetime<'ast, V>(v: &mut V, node: &'ast crate::PredicateLifetime) -where - V: Visit<'ast> + ?Sized, -{ - v.visit_lifetime(&node.lifetime); - skip!(node.colon_token); - for el in Punctuated::pairs(&node.bounds) { - let it = el.value(); - v.visit_lifetime(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_predicate_type<'ast, V>(v: &mut V, node: &'ast crate::PredicateType) -where - V: Visit<'ast> + ?Sized, -{ - if let Some(it) = &node.lifetimes { - v.visit_bound_lifetimes(it); - } - v.visit_type(&node.bounded_ty); - skip!(node.colon_token); - for el in Punctuated::pairs(&node.bounds) { - let it = el.value(); - v.visit_type_param_bound(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_qself<'ast, V>(v: &mut V, node: &'ast crate::QSelf) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.lt_token); - v.visit_type(&*node.ty); - skip!(node.position); - skip!(node.as_token); - skip!(node.gt_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_range_limits<'ast, V>(v: &mut V, node: &'ast crate::RangeLimits) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::RangeLimits::HalfOpen(_binding_0) => { - skip!(_binding_0); - } - crate::RangeLimits::Closed(_binding_0) => { - skip!(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_receiver<'ast, V>(v: &mut V, node: &'ast crate::Receiver) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - if let Some(it) = &node.reference { - skip!((it).0); - if let Some(it) = &(it).1 { - v.visit_lifetime(it); - } - } - skip!(node.mutability); - skip!(node.self_token); - skip!(node.colon_token); - v.visit_type(&*node.ty); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_return_type<'ast, V>(v: &mut V, node: &'ast crate::ReturnType) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::ReturnType::Default => {} - crate::ReturnType::Type(_binding_0, _binding_1) => { - skip!(_binding_0); - v.visit_type(&**_binding_1); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_signature<'ast, V>(v: &mut V, node: &'ast crate::Signature) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.constness); - skip!(node.asyncness); - skip!(node.unsafety); - if let Some(it) = &node.abi { - v.visit_abi(it); - } - skip!(node.fn_token); - v.visit_ident(&node.ident); - v.visit_generics(&node.generics); - skip!(node.paren_token); - for el in Punctuated::pairs(&node.inputs) { - let it = el.value(); - v.visit_fn_arg(it); - } - if let Some(it) = &node.variadic { - v.visit_variadic(it); - } - v.visit_return_type(&node.output); -} -pub fn visit_span<'ast, V>(v: &mut V, node: &proc_macro2::Span) -where - V: Visit<'ast> + ?Sized, -{} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_static_mutability<'ast, V>(v: &mut V, node: &'ast crate::StaticMutability) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::StaticMutability::Mut(_binding_0) => { - skip!(_binding_0); - } - crate::StaticMutability::None => {} - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_stmt<'ast, V>(v: &mut V, node: &'ast crate::Stmt) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::Stmt::Local(_binding_0) => { - v.visit_local(_binding_0); - } - crate::Stmt::Item(_binding_0) => { - v.visit_item(_binding_0); - } - crate::Stmt::Expr(_binding_0, _binding_1) => { - v.visit_expr(_binding_0); - skip!(_binding_1); - } - crate::Stmt::Macro(_binding_0) => { - v.visit_stmt_macro(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_stmt_macro<'ast, V>(v: &mut V, node: &'ast crate::StmtMacro) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_macro(&node.mac); - skip!(node.semi_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_trait_bound<'ast, V>(v: &mut V, node: &'ast crate::TraitBound) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.paren_token); - v.visit_trait_bound_modifier(&node.modifier); - if let Some(it) = &node.lifetimes { - v.visit_bound_lifetimes(it); - } - v.visit_path(&node.path); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_trait_bound_modifier<'ast, V>( - v: &mut V, - node: &'ast crate::TraitBoundModifier, -) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::TraitBoundModifier::None => {} - crate::TraitBoundModifier::Maybe(_binding_0) => { - skip!(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_trait_item<'ast, V>(v: &mut V, node: &'ast crate::TraitItem) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::TraitItem::Const(_binding_0) => { - v.visit_trait_item_const(_binding_0); - } - crate::TraitItem::Fn(_binding_0) => { - v.visit_trait_item_fn(_binding_0); - } - crate::TraitItem::Type(_binding_0) => { - v.visit_trait_item_type(_binding_0); - } - crate::TraitItem::Macro(_binding_0) => { - v.visit_trait_item_macro(_binding_0); - } - crate::TraitItem::Verbatim(_binding_0) => { - v.visit_token_stream(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_trait_item_const<'ast, V>(v: &mut V, node: &'ast crate::TraitItemConst) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.const_token); - v.visit_ident(&node.ident); - v.visit_generics(&node.generics); - skip!(node.colon_token); - v.visit_type(&node.ty); - if let Some(it) = &node.default { - skip!((it).0); - v.visit_expr(&(it).1); - } - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_trait_item_fn<'ast, V>(v: &mut V, node: &'ast crate::TraitItemFn) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_signature(&node.sig); - if let Some(it) = &node.default { - v.visit_block(it); - } - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_trait_item_macro<'ast, V>(v: &mut V, node: &'ast crate::TraitItemMacro) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_macro(&node.mac); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_trait_item_type<'ast, V>(v: &mut V, node: &'ast crate::TraitItemType) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - skip!(node.type_token); - v.visit_ident(&node.ident); - v.visit_generics(&node.generics); - skip!(node.colon_token); - for el in Punctuated::pairs(&node.bounds) { - let it = el.value(); - v.visit_type_param_bound(it); - } - if let Some(it) = &node.default { - skip!((it).0); - v.visit_type(&(it).1); - } - skip!(node.semi_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type<'ast, V>(v: &mut V, node: &'ast crate::Type) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::Type::Array(_binding_0) => { - v.visit_type_array(_binding_0); - } - crate::Type::BareFn(_binding_0) => { - v.visit_type_bare_fn(_binding_0); - } - crate::Type::Group(_binding_0) => { - v.visit_type_group(_binding_0); - } - crate::Type::ImplTrait(_binding_0) => { - v.visit_type_impl_trait(_binding_0); - } - crate::Type::Infer(_binding_0) => { - v.visit_type_infer(_binding_0); - } - crate::Type::Macro(_binding_0) => { - v.visit_type_macro(_binding_0); - } - crate::Type::Never(_binding_0) => { - v.visit_type_never(_binding_0); - } - crate::Type::Paren(_binding_0) => { - v.visit_type_paren(_binding_0); - } - crate::Type::Path(_binding_0) => { - v.visit_type_path(_binding_0); - } - crate::Type::Ptr(_binding_0) => { - v.visit_type_ptr(_binding_0); - } - crate::Type::Reference(_binding_0) => { - v.visit_type_reference(_binding_0); - } - crate::Type::Slice(_binding_0) => { - v.visit_type_slice(_binding_0); - } - crate::Type::TraitObject(_binding_0) => { - v.visit_type_trait_object(_binding_0); - } - crate::Type::Tuple(_binding_0) => { - v.visit_type_tuple(_binding_0); - } - crate::Type::Verbatim(_binding_0) => { - v.visit_token_stream(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_array<'ast, V>(v: &mut V, node: &'ast crate::TypeArray) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.bracket_token); - v.visit_type(&*node.elem); - skip!(node.semi_token); - v.visit_expr(&node.len); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_bare_fn<'ast, V>(v: &mut V, node: &'ast crate::TypeBareFn) -where - V: Visit<'ast> + ?Sized, -{ - if let Some(it) = &node.lifetimes { - v.visit_bound_lifetimes(it); - } - skip!(node.unsafety); - if let Some(it) = &node.abi { - v.visit_abi(it); - } - skip!(node.fn_token); - skip!(node.paren_token); - for el in Punctuated::pairs(&node.inputs) { - let it = el.value(); - v.visit_bare_fn_arg(it); - } - if let Some(it) = &node.variadic { - v.visit_bare_variadic(it); - } - v.visit_return_type(&node.output); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_group<'ast, V>(v: &mut V, node: &'ast crate::TypeGroup) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.group_token); - v.visit_type(&*node.elem); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_impl_trait<'ast, V>(v: &mut V, node: &'ast crate::TypeImplTrait) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.impl_token); - for el in Punctuated::pairs(&node.bounds) { - let it = el.value(); - v.visit_type_param_bound(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_infer<'ast, V>(v: &mut V, node: &'ast crate::TypeInfer) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.underscore_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_macro<'ast, V>(v: &mut V, node: &'ast crate::TypeMacro) -where - V: Visit<'ast> + ?Sized, -{ - v.visit_macro(&node.mac); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_never<'ast, V>(v: &mut V, node: &'ast crate::TypeNever) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.bang_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_param<'ast, V>(v: &mut V, node: &'ast crate::TypeParam) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_ident(&node.ident); - skip!(node.colon_token); - for el in Punctuated::pairs(&node.bounds) { - let it = el.value(); - v.visit_type_param_bound(it); - } - skip!(node.eq_token); - if let Some(it) = &node.default { - v.visit_type(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_param_bound<'ast, V>(v: &mut V, node: &'ast crate::TypeParamBound) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::TypeParamBound::Trait(_binding_0) => { - v.visit_trait_bound(_binding_0); - } - crate::TypeParamBound::Lifetime(_binding_0) => { - v.visit_lifetime(_binding_0); - } - crate::TypeParamBound::PreciseCapture(_binding_0) => { - full!(v.visit_precise_capture(_binding_0)); - } - crate::TypeParamBound::Verbatim(_binding_0) => { - v.visit_token_stream(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_paren<'ast, V>(v: &mut V, node: &'ast crate::TypeParen) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.paren_token); - v.visit_type(&*node.elem); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_path<'ast, V>(v: &mut V, node: &'ast crate::TypePath) -where - V: Visit<'ast> + ?Sized, -{ - if let Some(it) = &node.qself { - v.visit_qself(it); - } - v.visit_path(&node.path); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_ptr<'ast, V>(v: &mut V, node: &'ast crate::TypePtr) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.star_token); - skip!(node.const_token); - skip!(node.mutability); - v.visit_type(&*node.elem); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_reference<'ast, V>(v: &mut V, node: &'ast crate::TypeReference) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.and_token); - if let Some(it) = &node.lifetime { - v.visit_lifetime(it); - } - skip!(node.mutability); - v.visit_type(&*node.elem); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_slice<'ast, V>(v: &mut V, node: &'ast crate::TypeSlice) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.bracket_token); - v.visit_type(&*node.elem); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_trait_object<'ast, V>(v: &mut V, node: &'ast crate::TypeTraitObject) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.dyn_token); - for el in Punctuated::pairs(&node.bounds) { - let it = el.value(); - v.visit_type_param_bound(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_tuple<'ast, V>(v: &mut V, node: &'ast crate::TypeTuple) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.paren_token); - for el in Punctuated::pairs(&node.elems) { - let it = el.value(); - v.visit_type(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_un_op<'ast, V>(v: &mut V, node: &'ast crate::UnOp) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::UnOp::Deref(_binding_0) => { - skip!(_binding_0); - } - crate::UnOp::Not(_binding_0) => { - skip!(_binding_0); - } - crate::UnOp::Neg(_binding_0) => { - skip!(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_use_glob<'ast, V>(v: &mut V, node: &'ast crate::UseGlob) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.star_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_use_group<'ast, V>(v: &mut V, node: &'ast crate::UseGroup) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.brace_token); - for el in Punctuated::pairs(&node.items) { - let it = el.value(); - v.visit_use_tree(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_use_name<'ast, V>(v: &mut V, node: &'ast crate::UseName) -where - V: Visit<'ast> + ?Sized, -{ - v.visit_ident(&node.ident); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_use_path<'ast, V>(v: &mut V, node: &'ast crate::UsePath) -where - V: Visit<'ast> + ?Sized, -{ - v.visit_ident(&node.ident); - skip!(node.colon2_token); - v.visit_use_tree(&*node.tree); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_use_rename<'ast, V>(v: &mut V, node: &'ast crate::UseRename) -where - V: Visit<'ast> + ?Sized, -{ - v.visit_ident(&node.ident); - skip!(node.as_token); - v.visit_ident(&node.rename); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_use_tree<'ast, V>(v: &mut V, node: &'ast crate::UseTree) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::UseTree::Path(_binding_0) => { - v.visit_use_path(_binding_0); - } - crate::UseTree::Name(_binding_0) => { - v.visit_use_name(_binding_0); - } - crate::UseTree::Rename(_binding_0) => { - v.visit_use_rename(_binding_0); - } - crate::UseTree::Glob(_binding_0) => { - v.visit_use_glob(_binding_0); - } - crate::UseTree::Group(_binding_0) => { - v.visit_use_group(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_variadic<'ast, V>(v: &mut V, node: &'ast crate::Variadic) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - if let Some(it) = &node.pat { - v.visit_pat(&*(it).0); - skip!((it).1); - } - skip!(node.dots); - skip!(node.comma); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_variant<'ast, V>(v: &mut V, node: &'ast crate::Variant) -where - V: Visit<'ast> + ?Sized, -{ - for it in &node.attrs { - v.visit_attribute(it); - } - v.visit_ident(&node.ident); - v.visit_fields(&node.fields); - if let Some(it) = &node.discriminant { - skip!((it).0); - v.visit_expr(&(it).1); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_vis_restricted<'ast, V>(v: &mut V, node: &'ast crate::VisRestricted) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.pub_token); - skip!(node.paren_token); - skip!(node.in_token); - v.visit_path(&*node.path); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_visibility<'ast, V>(v: &mut V, node: &'ast crate::Visibility) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::Visibility::Public(_binding_0) => { - skip!(_binding_0); - } - crate::Visibility::Restricted(_binding_0) => { - v.visit_vis_restricted(_binding_0); - } - crate::Visibility::Inherited => {} - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_where_clause<'ast, V>(v: &mut V, node: &'ast crate::WhereClause) -where - V: Visit<'ast> + ?Sized, -{ - skip!(node.where_token); - for el in Punctuated::pairs(&node.predicates) { - let it = el.value(); - v.visit_where_predicate(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_where_predicate<'ast, V>(v: &mut V, node: &'ast crate::WherePredicate) -where - V: Visit<'ast> + ?Sized, -{ - match node { - crate::WherePredicate::Lifetime(_binding_0) => { - v.visit_predicate_lifetime(_binding_0); - } - crate::WherePredicate::Type(_binding_0) => { - v.visit_predicate_type(_binding_0); - } - } -} diff --git a/vendor/syn/src/gen/visit_mut.rs b/vendor/syn/src/gen/visit_mut.rs deleted file mode 100644 index 2bbd6895db7573..00000000000000 --- a/vendor/syn/src/gen/visit_mut.rs +++ /dev/null @@ -1,3759 +0,0 @@ -// This file is @generated by syn-internal-codegen. -// It is not intended for manual editing. - -#![allow(unused_variables)] -#![allow(clippy::needless_pass_by_ref_mut)] -#[cfg(any(feature = "full", feature = "derive"))] -use crate::punctuated::Punctuated; -#[cfg(feature = "full")] -macro_rules! full { - ($e:expr) => { - $e - }; -} -#[cfg(all(feature = "derive", not(feature = "full")))] -macro_rules! full { - ($e:expr) => { - unreachable!() - }; -} -macro_rules! skip { - ($($tt:tt)*) => {}; -} -/// Syntax tree traversal to mutate an exclusive borrow of a syntax tree in -/// place. -/// -/// See the [module documentation] for details. -/// -/// [module documentation]: self -pub trait VisitMut { - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_abi_mut(&mut self, i: &mut crate::Abi) { - visit_abi_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_angle_bracketed_generic_arguments_mut( - &mut self, - i: &mut crate::AngleBracketedGenericArguments, - ) { - visit_angle_bracketed_generic_arguments_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_arm_mut(&mut self, i: &mut crate::Arm) { - visit_arm_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_assoc_const_mut(&mut self, i: &mut crate::AssocConst) { - visit_assoc_const_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_assoc_type_mut(&mut self, i: &mut crate::AssocType) { - visit_assoc_type_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_attr_style_mut(&mut self, i: &mut crate::AttrStyle) { - visit_attr_style_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_attribute_mut(&mut self, i: &mut crate::Attribute) { - visit_attribute_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_attributes_mut(&mut self, i: &mut Vec<crate::Attribute>) { - for attr in i { - self.visit_attribute_mut(attr); - } - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_bare_fn_arg_mut(&mut self, i: &mut crate::BareFnArg) { - visit_bare_fn_arg_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_bare_variadic_mut(&mut self, i: &mut crate::BareVariadic) { - visit_bare_variadic_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_bin_op_mut(&mut self, i: &mut crate::BinOp) { - visit_bin_op_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_block_mut(&mut self, i: &mut crate::Block) { - visit_block_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_bound_lifetimes_mut(&mut self, i: &mut crate::BoundLifetimes) { - visit_bound_lifetimes_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_captured_param_mut(&mut self, i: &mut crate::CapturedParam) { - visit_captured_param_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_const_param_mut(&mut self, i: &mut crate::ConstParam) { - visit_const_param_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_constraint_mut(&mut self, i: &mut crate::Constraint) { - visit_constraint_mut(self, i); - } - #[cfg(feature = "derive")] - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - fn visit_data_mut(&mut self, i: &mut crate::Data) { - visit_data_mut(self, i); - } - #[cfg(feature = "derive")] - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - fn visit_data_enum_mut(&mut self, i: &mut crate::DataEnum) { - visit_data_enum_mut(self, i); - } - #[cfg(feature = "derive")] - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - fn visit_data_struct_mut(&mut self, i: &mut crate::DataStruct) { - visit_data_struct_mut(self, i); - } - #[cfg(feature = "derive")] - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - fn visit_data_union_mut(&mut self, i: &mut crate::DataUnion) { - visit_data_union_mut(self, i); - } - #[cfg(feature = "derive")] - #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] - fn visit_derive_input_mut(&mut self, i: &mut crate::DeriveInput) { - visit_derive_input_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_mut(&mut self, i: &mut crate::Expr) { - visit_expr_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_array_mut(&mut self, i: &mut crate::ExprArray) { - visit_expr_array_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_assign_mut(&mut self, i: &mut crate::ExprAssign) { - visit_expr_assign_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_async_mut(&mut self, i: &mut crate::ExprAsync) { - visit_expr_async_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_await_mut(&mut self, i: &mut crate::ExprAwait) { - visit_expr_await_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_binary_mut(&mut self, i: &mut crate::ExprBinary) { - visit_expr_binary_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_block_mut(&mut self, i: &mut crate::ExprBlock) { - visit_expr_block_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_break_mut(&mut self, i: &mut crate::ExprBreak) { - visit_expr_break_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_call_mut(&mut self, i: &mut crate::ExprCall) { - visit_expr_call_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_cast_mut(&mut self, i: &mut crate::ExprCast) { - visit_expr_cast_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_closure_mut(&mut self, i: &mut crate::ExprClosure) { - visit_expr_closure_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_const_mut(&mut self, i: &mut crate::ExprConst) { - visit_expr_const_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_continue_mut(&mut self, i: &mut crate::ExprContinue) { - visit_expr_continue_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_field_mut(&mut self, i: &mut crate::ExprField) { - visit_expr_field_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_for_loop_mut(&mut self, i: &mut crate::ExprForLoop) { - visit_expr_for_loop_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_group_mut(&mut self, i: &mut crate::ExprGroup) { - visit_expr_group_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_if_mut(&mut self, i: &mut crate::ExprIf) { - visit_expr_if_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_index_mut(&mut self, i: &mut crate::ExprIndex) { - visit_expr_index_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_infer_mut(&mut self, i: &mut crate::ExprInfer) { - visit_expr_infer_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_let_mut(&mut self, i: &mut crate::ExprLet) { - visit_expr_let_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_lit_mut(&mut self, i: &mut crate::ExprLit) { - visit_expr_lit_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_loop_mut(&mut self, i: &mut crate::ExprLoop) { - visit_expr_loop_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_macro_mut(&mut self, i: &mut crate::ExprMacro) { - visit_expr_macro_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_match_mut(&mut self, i: &mut crate::ExprMatch) { - visit_expr_match_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_method_call_mut(&mut self, i: &mut crate::ExprMethodCall) { - visit_expr_method_call_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_paren_mut(&mut self, i: &mut crate::ExprParen) { - visit_expr_paren_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_path_mut(&mut self, i: &mut crate::ExprPath) { - visit_expr_path_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_range_mut(&mut self, i: &mut crate::ExprRange) { - visit_expr_range_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_raw_addr_mut(&mut self, i: &mut crate::ExprRawAddr) { - visit_expr_raw_addr_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_reference_mut(&mut self, i: &mut crate::ExprReference) { - visit_expr_reference_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_repeat_mut(&mut self, i: &mut crate::ExprRepeat) { - visit_expr_repeat_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_return_mut(&mut self, i: &mut crate::ExprReturn) { - visit_expr_return_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_struct_mut(&mut self, i: &mut crate::ExprStruct) { - visit_expr_struct_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_try_mut(&mut self, i: &mut crate::ExprTry) { - visit_expr_try_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_try_block_mut(&mut self, i: &mut crate::ExprTryBlock) { - visit_expr_try_block_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_tuple_mut(&mut self, i: &mut crate::ExprTuple) { - visit_expr_tuple_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_expr_unary_mut(&mut self, i: &mut crate::ExprUnary) { - visit_expr_unary_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_unsafe_mut(&mut self, i: &mut crate::ExprUnsafe) { - visit_expr_unsafe_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_while_mut(&mut self, i: &mut crate::ExprWhile) { - visit_expr_while_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_expr_yield_mut(&mut self, i: &mut crate::ExprYield) { - visit_expr_yield_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_field_mut(&mut self, i: &mut crate::Field) { - visit_field_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_field_mutability_mut(&mut self, i: &mut crate::FieldMutability) { - visit_field_mutability_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_field_pat_mut(&mut self, i: &mut crate::FieldPat) { - visit_field_pat_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_field_value_mut(&mut self, i: &mut crate::FieldValue) { - visit_field_value_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_fields_mut(&mut self, i: &mut crate::Fields) { - visit_fields_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_fields_named_mut(&mut self, i: &mut crate::FieldsNamed) { - visit_fields_named_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_fields_unnamed_mut(&mut self, i: &mut crate::FieldsUnnamed) { - visit_fields_unnamed_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_file_mut(&mut self, i: &mut crate::File) { - visit_file_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_fn_arg_mut(&mut self, i: &mut crate::FnArg) { - visit_fn_arg_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_foreign_item_mut(&mut self, i: &mut crate::ForeignItem) { - visit_foreign_item_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_foreign_item_fn_mut(&mut self, i: &mut crate::ForeignItemFn) { - visit_foreign_item_fn_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_foreign_item_macro_mut(&mut self, i: &mut crate::ForeignItemMacro) { - visit_foreign_item_macro_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_foreign_item_static_mut(&mut self, i: &mut crate::ForeignItemStatic) { - visit_foreign_item_static_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_foreign_item_type_mut(&mut self, i: &mut crate::ForeignItemType) { - visit_foreign_item_type_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_generic_argument_mut(&mut self, i: &mut crate::GenericArgument) { - visit_generic_argument_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_generic_param_mut(&mut self, i: &mut crate::GenericParam) { - visit_generic_param_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_generics_mut(&mut self, i: &mut crate::Generics) { - visit_generics_mut(self, i); - } - fn visit_ident_mut(&mut self, i: &mut proc_macro2::Ident) { - visit_ident_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_impl_item_mut(&mut self, i: &mut crate::ImplItem) { - visit_impl_item_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_impl_item_const_mut(&mut self, i: &mut crate::ImplItemConst) { - visit_impl_item_const_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_impl_item_fn_mut(&mut self, i: &mut crate::ImplItemFn) { - visit_impl_item_fn_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_impl_item_macro_mut(&mut self, i: &mut crate::ImplItemMacro) { - visit_impl_item_macro_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_impl_item_type_mut(&mut self, i: &mut crate::ImplItemType) { - visit_impl_item_type_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_impl_restriction_mut(&mut self, i: &mut crate::ImplRestriction) { - visit_impl_restriction_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_index_mut(&mut self, i: &mut crate::Index) { - visit_index_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_mut(&mut self, i: &mut crate::Item) { - visit_item_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_const_mut(&mut self, i: &mut crate::ItemConst) { - visit_item_const_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_enum_mut(&mut self, i: &mut crate::ItemEnum) { - visit_item_enum_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_extern_crate_mut(&mut self, i: &mut crate::ItemExternCrate) { - visit_item_extern_crate_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_fn_mut(&mut self, i: &mut crate::ItemFn) { - visit_item_fn_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_foreign_mod_mut(&mut self, i: &mut crate::ItemForeignMod) { - visit_item_foreign_mod_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_impl_mut(&mut self, i: &mut crate::ItemImpl) { - visit_item_impl_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_macro_mut(&mut self, i: &mut crate::ItemMacro) { - visit_item_macro_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_mod_mut(&mut self, i: &mut crate::ItemMod) { - visit_item_mod_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_static_mut(&mut self, i: &mut crate::ItemStatic) { - visit_item_static_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_struct_mut(&mut self, i: &mut crate::ItemStruct) { - visit_item_struct_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_trait_mut(&mut self, i: &mut crate::ItemTrait) { - visit_item_trait_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_trait_alias_mut(&mut self, i: &mut crate::ItemTraitAlias) { - visit_item_trait_alias_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_type_mut(&mut self, i: &mut crate::ItemType) { - visit_item_type_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_union_mut(&mut self, i: &mut crate::ItemUnion) { - visit_item_union_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_item_use_mut(&mut self, i: &mut crate::ItemUse) { - visit_item_use_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_label_mut(&mut self, i: &mut crate::Label) { - visit_label_mut(self, i); - } - fn visit_lifetime_mut(&mut self, i: &mut crate::Lifetime) { - visit_lifetime_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_lifetime_param_mut(&mut self, i: &mut crate::LifetimeParam) { - visit_lifetime_param_mut(self, i); - } - fn visit_lit_mut(&mut self, i: &mut crate::Lit) { - visit_lit_mut(self, i); - } - fn visit_lit_bool_mut(&mut self, i: &mut crate::LitBool) { - visit_lit_bool_mut(self, i); - } - fn visit_lit_byte_mut(&mut self, i: &mut crate::LitByte) { - visit_lit_byte_mut(self, i); - } - fn visit_lit_byte_str_mut(&mut self, i: &mut crate::LitByteStr) { - visit_lit_byte_str_mut(self, i); - } - fn visit_lit_cstr_mut(&mut self, i: &mut crate::LitCStr) { - visit_lit_cstr_mut(self, i); - } - fn visit_lit_char_mut(&mut self, i: &mut crate::LitChar) { - visit_lit_char_mut(self, i); - } - fn visit_lit_float_mut(&mut self, i: &mut crate::LitFloat) { - visit_lit_float_mut(self, i); - } - fn visit_lit_int_mut(&mut self, i: &mut crate::LitInt) { - visit_lit_int_mut(self, i); - } - fn visit_lit_str_mut(&mut self, i: &mut crate::LitStr) { - visit_lit_str_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_local_mut(&mut self, i: &mut crate::Local) { - visit_local_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_local_init_mut(&mut self, i: &mut crate::LocalInit) { - visit_local_init_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_macro_mut(&mut self, i: &mut crate::Macro) { - visit_macro_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_macro_delimiter_mut(&mut self, i: &mut crate::MacroDelimiter) { - visit_macro_delimiter_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_member_mut(&mut self, i: &mut crate::Member) { - visit_member_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_meta_mut(&mut self, i: &mut crate::Meta) { - visit_meta_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_meta_list_mut(&mut self, i: &mut crate::MetaList) { - visit_meta_list_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_meta_name_value_mut(&mut self, i: &mut crate::MetaNameValue) { - visit_meta_name_value_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_parenthesized_generic_arguments_mut( - &mut self, - i: &mut crate::ParenthesizedGenericArguments, - ) { - visit_parenthesized_generic_arguments_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_mut(&mut self, i: &mut crate::Pat) { - visit_pat_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_ident_mut(&mut self, i: &mut crate::PatIdent) { - visit_pat_ident_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_or_mut(&mut self, i: &mut crate::PatOr) { - visit_pat_or_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_paren_mut(&mut self, i: &mut crate::PatParen) { - visit_pat_paren_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_reference_mut(&mut self, i: &mut crate::PatReference) { - visit_pat_reference_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_rest_mut(&mut self, i: &mut crate::PatRest) { - visit_pat_rest_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_slice_mut(&mut self, i: &mut crate::PatSlice) { - visit_pat_slice_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_struct_mut(&mut self, i: &mut crate::PatStruct) { - visit_pat_struct_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_tuple_mut(&mut self, i: &mut crate::PatTuple) { - visit_pat_tuple_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_tuple_struct_mut(&mut self, i: &mut crate::PatTupleStruct) { - visit_pat_tuple_struct_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_type_mut(&mut self, i: &mut crate::PatType) { - visit_pat_type_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pat_wild_mut(&mut self, i: &mut crate::PatWild) { - visit_pat_wild_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_path_mut(&mut self, i: &mut crate::Path) { - visit_path_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_path_arguments_mut(&mut self, i: &mut crate::PathArguments) { - visit_path_arguments_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_path_segment_mut(&mut self, i: &mut crate::PathSegment) { - visit_path_segment_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_pointer_mutability_mut(&mut self, i: &mut crate::PointerMutability) { - visit_pointer_mutability_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_precise_capture_mut(&mut self, i: &mut crate::PreciseCapture) { - visit_precise_capture_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_predicate_lifetime_mut(&mut self, i: &mut crate::PredicateLifetime) { - visit_predicate_lifetime_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_predicate_type_mut(&mut self, i: &mut crate::PredicateType) { - visit_predicate_type_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_qself_mut(&mut self, i: &mut crate::QSelf) { - visit_qself_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_range_limits_mut(&mut self, i: &mut crate::RangeLimits) { - visit_range_limits_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_receiver_mut(&mut self, i: &mut crate::Receiver) { - visit_receiver_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_return_type_mut(&mut self, i: &mut crate::ReturnType) { - visit_return_type_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_signature_mut(&mut self, i: &mut crate::Signature) { - visit_signature_mut(self, i); - } - fn visit_span_mut(&mut self, i: &mut proc_macro2::Span) {} - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_static_mutability_mut(&mut self, i: &mut crate::StaticMutability) { - visit_static_mutability_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_stmt_mut(&mut self, i: &mut crate::Stmt) { - visit_stmt_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_stmt_macro_mut(&mut self, i: &mut crate::StmtMacro) { - visit_stmt_macro_mut(self, i); - } - fn visit_token_stream_mut(&mut self, i: &mut proc_macro2::TokenStream) {} - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_trait_bound_mut(&mut self, i: &mut crate::TraitBound) { - visit_trait_bound_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_trait_bound_modifier_mut(&mut self, i: &mut crate::TraitBoundModifier) { - visit_trait_bound_modifier_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_trait_item_mut(&mut self, i: &mut crate::TraitItem) { - visit_trait_item_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_trait_item_const_mut(&mut self, i: &mut crate::TraitItemConst) { - visit_trait_item_const_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_trait_item_fn_mut(&mut self, i: &mut crate::TraitItemFn) { - visit_trait_item_fn_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_trait_item_macro_mut(&mut self, i: &mut crate::TraitItemMacro) { - visit_trait_item_macro_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_trait_item_type_mut(&mut self, i: &mut crate::TraitItemType) { - visit_trait_item_type_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_mut(&mut self, i: &mut crate::Type) { - visit_type_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_array_mut(&mut self, i: &mut crate::TypeArray) { - visit_type_array_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_bare_fn_mut(&mut self, i: &mut crate::TypeBareFn) { - visit_type_bare_fn_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_group_mut(&mut self, i: &mut crate::TypeGroup) { - visit_type_group_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_impl_trait_mut(&mut self, i: &mut crate::TypeImplTrait) { - visit_type_impl_trait_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_infer_mut(&mut self, i: &mut crate::TypeInfer) { - visit_type_infer_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_macro_mut(&mut self, i: &mut crate::TypeMacro) { - visit_type_macro_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_never_mut(&mut self, i: &mut crate::TypeNever) { - visit_type_never_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_param_mut(&mut self, i: &mut crate::TypeParam) { - visit_type_param_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_param_bound_mut(&mut self, i: &mut crate::TypeParamBound) { - visit_type_param_bound_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_paren_mut(&mut self, i: &mut crate::TypeParen) { - visit_type_paren_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_path_mut(&mut self, i: &mut crate::TypePath) { - visit_type_path_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_ptr_mut(&mut self, i: &mut crate::TypePtr) { - visit_type_ptr_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_reference_mut(&mut self, i: &mut crate::TypeReference) { - visit_type_reference_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_slice_mut(&mut self, i: &mut crate::TypeSlice) { - visit_type_slice_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_trait_object_mut(&mut self, i: &mut crate::TypeTraitObject) { - visit_type_trait_object_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_type_tuple_mut(&mut self, i: &mut crate::TypeTuple) { - visit_type_tuple_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_un_op_mut(&mut self, i: &mut crate::UnOp) { - visit_un_op_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_use_glob_mut(&mut self, i: &mut crate::UseGlob) { - visit_use_glob_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_use_group_mut(&mut self, i: &mut crate::UseGroup) { - visit_use_group_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_use_name_mut(&mut self, i: &mut crate::UseName) { - visit_use_name_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_use_path_mut(&mut self, i: &mut crate::UsePath) { - visit_use_path_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_use_rename_mut(&mut self, i: &mut crate::UseRename) { - visit_use_rename_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_use_tree_mut(&mut self, i: &mut crate::UseTree) { - visit_use_tree_mut(self, i); - } - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - fn visit_variadic_mut(&mut self, i: &mut crate::Variadic) { - visit_variadic_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_variant_mut(&mut self, i: &mut crate::Variant) { - visit_variant_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_vis_restricted_mut(&mut self, i: &mut crate::VisRestricted) { - visit_vis_restricted_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_visibility_mut(&mut self, i: &mut crate::Visibility) { - visit_visibility_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_where_clause_mut(&mut self, i: &mut crate::WhereClause) { - visit_where_clause_mut(self, i); - } - #[cfg(any(feature = "derive", feature = "full"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] - fn visit_where_predicate_mut(&mut self, i: &mut crate::WherePredicate) { - visit_where_predicate_mut(self, i); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_abi_mut<V>(v: &mut V, node: &mut crate::Abi) -where - V: VisitMut + ?Sized, -{ - skip!(node.extern_token); - if let Some(it) = &mut node.name { - v.visit_lit_str_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_angle_bracketed_generic_arguments_mut<V>( - v: &mut V, - node: &mut crate::AngleBracketedGenericArguments, -) -where - V: VisitMut + ?Sized, -{ - skip!(node.colon2_token); - skip!(node.lt_token); - for mut el in Punctuated::pairs_mut(&mut node.args) { - let it = el.value_mut(); - v.visit_generic_argument_mut(it); - } - skip!(node.gt_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_arm_mut<V>(v: &mut V, node: &mut crate::Arm) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_pat_mut(&mut node.pat); - if let Some(it) = &mut node.guard { - skip!((it).0); - v.visit_expr_mut(&mut *(it).1); - } - skip!(node.fat_arrow_token); - v.visit_expr_mut(&mut *node.body); - skip!(node.comma); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_assoc_const_mut<V>(v: &mut V, node: &mut crate::AssocConst) -where - V: VisitMut + ?Sized, -{ - v.visit_ident_mut(&mut node.ident); - if let Some(it) = &mut node.generics { - v.visit_angle_bracketed_generic_arguments_mut(it); - } - skip!(node.eq_token); - v.visit_expr_mut(&mut node.value); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_assoc_type_mut<V>(v: &mut V, node: &mut crate::AssocType) -where - V: VisitMut + ?Sized, -{ - v.visit_ident_mut(&mut node.ident); - if let Some(it) = &mut node.generics { - v.visit_angle_bracketed_generic_arguments_mut(it); - } - skip!(node.eq_token); - v.visit_type_mut(&mut node.ty); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_attr_style_mut<V>(v: &mut V, node: &mut crate::AttrStyle) -where - V: VisitMut + ?Sized, -{ - match node { - crate::AttrStyle::Outer => {} - crate::AttrStyle::Inner(_binding_0) => { - skip!(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_attribute_mut<V>(v: &mut V, node: &mut crate::Attribute) -where - V: VisitMut + ?Sized, -{ - skip!(node.pound_token); - v.visit_attr_style_mut(&mut node.style); - skip!(node.bracket_token); - v.visit_meta_mut(&mut node.meta); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_bare_fn_arg_mut<V>(v: &mut V, node: &mut crate::BareFnArg) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - if let Some(it) = &mut node.name { - v.visit_ident_mut(&mut (it).0); - skip!((it).1); - } - v.visit_type_mut(&mut node.ty); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_bare_variadic_mut<V>(v: &mut V, node: &mut crate::BareVariadic) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - if let Some(it) = &mut node.name { - v.visit_ident_mut(&mut (it).0); - skip!((it).1); - } - skip!(node.dots); - skip!(node.comma); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_bin_op_mut<V>(v: &mut V, node: &mut crate::BinOp) -where - V: VisitMut + ?Sized, -{ - match node { - crate::BinOp::Add(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Sub(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Mul(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Div(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Rem(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::And(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Or(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::BitXor(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::BitAnd(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::BitOr(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Shl(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Shr(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Eq(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Lt(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Le(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Ne(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Ge(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::Gt(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::AddAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::SubAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::MulAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::DivAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::RemAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::BitXorAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::BitAndAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::BitOrAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::ShlAssign(_binding_0) => { - skip!(_binding_0); - } - crate::BinOp::ShrAssign(_binding_0) => { - skip!(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_block_mut<V>(v: &mut V, node: &mut crate::Block) -where - V: VisitMut + ?Sized, -{ - skip!(node.brace_token); - for it in &mut node.stmts { - v.visit_stmt_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_bound_lifetimes_mut<V>(v: &mut V, node: &mut crate::BoundLifetimes) -where - V: VisitMut + ?Sized, -{ - skip!(node.for_token); - skip!(node.lt_token); - for mut el in Punctuated::pairs_mut(&mut node.lifetimes) { - let it = el.value_mut(); - v.visit_generic_param_mut(it); - } - skip!(node.gt_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_captured_param_mut<V>(v: &mut V, node: &mut crate::CapturedParam) -where - V: VisitMut + ?Sized, -{ - match node { - crate::CapturedParam::Lifetime(_binding_0) => { - v.visit_lifetime_mut(_binding_0); - } - crate::CapturedParam::Ident(_binding_0) => { - v.visit_ident_mut(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_const_param_mut<V>(v: &mut V, node: &mut crate::ConstParam) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.const_token); - v.visit_ident_mut(&mut node.ident); - skip!(node.colon_token); - v.visit_type_mut(&mut node.ty); - skip!(node.eq_token); - if let Some(it) = &mut node.default { - v.visit_expr_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_constraint_mut<V>(v: &mut V, node: &mut crate::Constraint) -where - V: VisitMut + ?Sized, -{ - v.visit_ident_mut(&mut node.ident); - if let Some(it) = &mut node.generics { - v.visit_angle_bracketed_generic_arguments_mut(it); - } - skip!(node.colon_token); - for mut el in Punctuated::pairs_mut(&mut node.bounds) { - let it = el.value_mut(); - v.visit_type_param_bound_mut(it); - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub fn visit_data_mut<V>(v: &mut V, node: &mut crate::Data) -where - V: VisitMut + ?Sized, -{ - match node { - crate::Data::Struct(_binding_0) => { - v.visit_data_struct_mut(_binding_0); - } - crate::Data::Enum(_binding_0) => { - v.visit_data_enum_mut(_binding_0); - } - crate::Data::Union(_binding_0) => { - v.visit_data_union_mut(_binding_0); - } - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub fn visit_data_enum_mut<V>(v: &mut V, node: &mut crate::DataEnum) -where - V: VisitMut + ?Sized, -{ - skip!(node.enum_token); - skip!(node.brace_token); - for mut el in Punctuated::pairs_mut(&mut node.variants) { - let it = el.value_mut(); - v.visit_variant_mut(it); - } -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub fn visit_data_struct_mut<V>(v: &mut V, node: &mut crate::DataStruct) -where - V: VisitMut + ?Sized, -{ - skip!(node.struct_token); - v.visit_fields_mut(&mut node.fields); - skip!(node.semi_token); -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub fn visit_data_union_mut<V>(v: &mut V, node: &mut crate::DataUnion) -where - V: VisitMut + ?Sized, -{ - skip!(node.union_token); - v.visit_fields_named_mut(&mut node.fields); -} -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub fn visit_derive_input_mut<V>(v: &mut V, node: &mut crate::DeriveInput) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - v.visit_ident_mut(&mut node.ident); - v.visit_generics_mut(&mut node.generics); - v.visit_data_mut(&mut node.data); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_mut<V>(v: &mut V, node: &mut crate::Expr) -where - V: VisitMut + ?Sized, -{ - match node { - crate::Expr::Array(_binding_0) => { - full!(v.visit_expr_array_mut(_binding_0)); - } - crate::Expr::Assign(_binding_0) => { - full!(v.visit_expr_assign_mut(_binding_0)); - } - crate::Expr::Async(_binding_0) => { - full!(v.visit_expr_async_mut(_binding_0)); - } - crate::Expr::Await(_binding_0) => { - full!(v.visit_expr_await_mut(_binding_0)); - } - crate::Expr::Binary(_binding_0) => { - v.visit_expr_binary_mut(_binding_0); - } - crate::Expr::Block(_binding_0) => { - full!(v.visit_expr_block_mut(_binding_0)); - } - crate::Expr::Break(_binding_0) => { - full!(v.visit_expr_break_mut(_binding_0)); - } - crate::Expr::Call(_binding_0) => { - v.visit_expr_call_mut(_binding_0); - } - crate::Expr::Cast(_binding_0) => { - v.visit_expr_cast_mut(_binding_0); - } - crate::Expr::Closure(_binding_0) => { - full!(v.visit_expr_closure_mut(_binding_0)); - } - crate::Expr::Const(_binding_0) => { - full!(v.visit_expr_const_mut(_binding_0)); - } - crate::Expr::Continue(_binding_0) => { - full!(v.visit_expr_continue_mut(_binding_0)); - } - crate::Expr::Field(_binding_0) => { - v.visit_expr_field_mut(_binding_0); - } - crate::Expr::ForLoop(_binding_0) => { - full!(v.visit_expr_for_loop_mut(_binding_0)); - } - crate::Expr::Group(_binding_0) => { - v.visit_expr_group_mut(_binding_0); - } - crate::Expr::If(_binding_0) => { - full!(v.visit_expr_if_mut(_binding_0)); - } - crate::Expr::Index(_binding_0) => { - v.visit_expr_index_mut(_binding_0); - } - crate::Expr::Infer(_binding_0) => { - full!(v.visit_expr_infer_mut(_binding_0)); - } - crate::Expr::Let(_binding_0) => { - full!(v.visit_expr_let_mut(_binding_0)); - } - crate::Expr::Lit(_binding_0) => { - v.visit_expr_lit_mut(_binding_0); - } - crate::Expr::Loop(_binding_0) => { - full!(v.visit_expr_loop_mut(_binding_0)); - } - crate::Expr::Macro(_binding_0) => { - v.visit_expr_macro_mut(_binding_0); - } - crate::Expr::Match(_binding_0) => { - full!(v.visit_expr_match_mut(_binding_0)); - } - crate::Expr::MethodCall(_binding_0) => { - v.visit_expr_method_call_mut(_binding_0); - } - crate::Expr::Paren(_binding_0) => { - v.visit_expr_paren_mut(_binding_0); - } - crate::Expr::Path(_binding_0) => { - v.visit_expr_path_mut(_binding_0); - } - crate::Expr::Range(_binding_0) => { - full!(v.visit_expr_range_mut(_binding_0)); - } - crate::Expr::RawAddr(_binding_0) => { - full!(v.visit_expr_raw_addr_mut(_binding_0)); - } - crate::Expr::Reference(_binding_0) => { - v.visit_expr_reference_mut(_binding_0); - } - crate::Expr::Repeat(_binding_0) => { - full!(v.visit_expr_repeat_mut(_binding_0)); - } - crate::Expr::Return(_binding_0) => { - full!(v.visit_expr_return_mut(_binding_0)); - } - crate::Expr::Struct(_binding_0) => { - v.visit_expr_struct_mut(_binding_0); - } - crate::Expr::Try(_binding_0) => { - full!(v.visit_expr_try_mut(_binding_0)); - } - crate::Expr::TryBlock(_binding_0) => { - full!(v.visit_expr_try_block_mut(_binding_0)); - } - crate::Expr::Tuple(_binding_0) => { - v.visit_expr_tuple_mut(_binding_0); - } - crate::Expr::Unary(_binding_0) => { - v.visit_expr_unary_mut(_binding_0); - } - crate::Expr::Unsafe(_binding_0) => { - full!(v.visit_expr_unsafe_mut(_binding_0)); - } - crate::Expr::Verbatim(_binding_0) => { - v.visit_token_stream_mut(_binding_0); - } - crate::Expr::While(_binding_0) => { - full!(v.visit_expr_while_mut(_binding_0)); - } - crate::Expr::Yield(_binding_0) => { - full!(v.visit_expr_yield_mut(_binding_0)); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_array_mut<V>(v: &mut V, node: &mut crate::ExprArray) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.bracket_token); - for mut el in Punctuated::pairs_mut(&mut node.elems) { - let it = el.value_mut(); - v.visit_expr_mut(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_assign_mut<V>(v: &mut V, node: &mut crate::ExprAssign) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_expr_mut(&mut *node.left); - skip!(node.eq_token); - v.visit_expr_mut(&mut *node.right); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_async_mut<V>(v: &mut V, node: &mut crate::ExprAsync) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.async_token); - skip!(node.capture); - v.visit_block_mut(&mut node.block); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_await_mut<V>(v: &mut V, node: &mut crate::ExprAwait) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_expr_mut(&mut *node.base); - skip!(node.dot_token); - skip!(node.await_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_binary_mut<V>(v: &mut V, node: &mut crate::ExprBinary) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_expr_mut(&mut *node.left); - v.visit_bin_op_mut(&mut node.op); - v.visit_expr_mut(&mut *node.right); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_block_mut<V>(v: &mut V, node: &mut crate::ExprBlock) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - if let Some(it) = &mut node.label { - v.visit_label_mut(it); - } - v.visit_block_mut(&mut node.block); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_break_mut<V>(v: &mut V, node: &mut crate::ExprBreak) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.break_token); - if let Some(it) = &mut node.label { - v.visit_lifetime_mut(it); - } - if let Some(it) = &mut node.expr { - v.visit_expr_mut(&mut **it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_call_mut<V>(v: &mut V, node: &mut crate::ExprCall) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_expr_mut(&mut *node.func); - skip!(node.paren_token); - for mut el in Punctuated::pairs_mut(&mut node.args) { - let it = el.value_mut(); - v.visit_expr_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_cast_mut<V>(v: &mut V, node: &mut crate::ExprCast) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_expr_mut(&mut *node.expr); - skip!(node.as_token); - v.visit_type_mut(&mut *node.ty); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_closure_mut<V>(v: &mut V, node: &mut crate::ExprClosure) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - if let Some(it) = &mut node.lifetimes { - v.visit_bound_lifetimes_mut(it); - } - skip!(node.constness); - skip!(node.movability); - skip!(node.asyncness); - skip!(node.capture); - skip!(node.or1_token); - for mut el in Punctuated::pairs_mut(&mut node.inputs) { - let it = el.value_mut(); - v.visit_pat_mut(it); - } - skip!(node.or2_token); - v.visit_return_type_mut(&mut node.output); - v.visit_expr_mut(&mut *node.body); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_const_mut<V>(v: &mut V, node: &mut crate::ExprConst) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.const_token); - v.visit_block_mut(&mut node.block); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_continue_mut<V>(v: &mut V, node: &mut crate::ExprContinue) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.continue_token); - if let Some(it) = &mut node.label { - v.visit_lifetime_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_field_mut<V>(v: &mut V, node: &mut crate::ExprField) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_expr_mut(&mut *node.base); - skip!(node.dot_token); - v.visit_member_mut(&mut node.member); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_for_loop_mut<V>(v: &mut V, node: &mut crate::ExprForLoop) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - if let Some(it) = &mut node.label { - v.visit_label_mut(it); - } - skip!(node.for_token); - v.visit_pat_mut(&mut *node.pat); - skip!(node.in_token); - v.visit_expr_mut(&mut *node.expr); - v.visit_block_mut(&mut node.body); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_group_mut<V>(v: &mut V, node: &mut crate::ExprGroup) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.group_token); - v.visit_expr_mut(&mut *node.expr); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_if_mut<V>(v: &mut V, node: &mut crate::ExprIf) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.if_token); - v.visit_expr_mut(&mut *node.cond); - v.visit_block_mut(&mut node.then_branch); - if let Some(it) = &mut node.else_branch { - skip!((it).0); - v.visit_expr_mut(&mut *(it).1); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_index_mut<V>(v: &mut V, node: &mut crate::ExprIndex) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_expr_mut(&mut *node.expr); - skip!(node.bracket_token); - v.visit_expr_mut(&mut *node.index); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_infer_mut<V>(v: &mut V, node: &mut crate::ExprInfer) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.underscore_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_let_mut<V>(v: &mut V, node: &mut crate::ExprLet) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.let_token); - v.visit_pat_mut(&mut *node.pat); - skip!(node.eq_token); - v.visit_expr_mut(&mut *node.expr); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_lit_mut<V>(v: &mut V, node: &mut crate::ExprLit) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_lit_mut(&mut node.lit); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_loop_mut<V>(v: &mut V, node: &mut crate::ExprLoop) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - if let Some(it) = &mut node.label { - v.visit_label_mut(it); - } - skip!(node.loop_token); - v.visit_block_mut(&mut node.body); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_macro_mut<V>(v: &mut V, node: &mut crate::ExprMacro) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_macro_mut(&mut node.mac); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_match_mut<V>(v: &mut V, node: &mut crate::ExprMatch) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.match_token); - v.visit_expr_mut(&mut *node.expr); - skip!(node.brace_token); - for it in &mut node.arms { - v.visit_arm_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_method_call_mut<V>(v: &mut V, node: &mut crate::ExprMethodCall) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_expr_mut(&mut *node.receiver); - skip!(node.dot_token); - v.visit_ident_mut(&mut node.method); - if let Some(it) = &mut node.turbofish { - v.visit_angle_bracketed_generic_arguments_mut(it); - } - skip!(node.paren_token); - for mut el in Punctuated::pairs_mut(&mut node.args) { - let it = el.value_mut(); - v.visit_expr_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_paren_mut<V>(v: &mut V, node: &mut crate::ExprParen) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.paren_token); - v.visit_expr_mut(&mut *node.expr); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_path_mut<V>(v: &mut V, node: &mut crate::ExprPath) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - if let Some(it) = &mut node.qself { - v.visit_qself_mut(it); - } - v.visit_path_mut(&mut node.path); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_range_mut<V>(v: &mut V, node: &mut crate::ExprRange) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - if let Some(it) = &mut node.start { - v.visit_expr_mut(&mut **it); - } - v.visit_range_limits_mut(&mut node.limits); - if let Some(it) = &mut node.end { - v.visit_expr_mut(&mut **it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_raw_addr_mut<V>(v: &mut V, node: &mut crate::ExprRawAddr) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.and_token); - skip!(node.raw); - v.visit_pointer_mutability_mut(&mut node.mutability); - v.visit_expr_mut(&mut *node.expr); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_reference_mut<V>(v: &mut V, node: &mut crate::ExprReference) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.and_token); - skip!(node.mutability); - v.visit_expr_mut(&mut *node.expr); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_repeat_mut<V>(v: &mut V, node: &mut crate::ExprRepeat) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.bracket_token); - v.visit_expr_mut(&mut *node.expr); - skip!(node.semi_token); - v.visit_expr_mut(&mut *node.len); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_return_mut<V>(v: &mut V, node: &mut crate::ExprReturn) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.return_token); - if let Some(it) = &mut node.expr { - v.visit_expr_mut(&mut **it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_struct_mut<V>(v: &mut V, node: &mut crate::ExprStruct) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - if let Some(it) = &mut node.qself { - v.visit_qself_mut(it); - } - v.visit_path_mut(&mut node.path); - skip!(node.brace_token); - for mut el in Punctuated::pairs_mut(&mut node.fields) { - let it = el.value_mut(); - v.visit_field_value_mut(it); - } - skip!(node.dot2_token); - if let Some(it) = &mut node.rest { - v.visit_expr_mut(&mut **it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_try_mut<V>(v: &mut V, node: &mut crate::ExprTry) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_expr_mut(&mut *node.expr); - skip!(node.question_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_try_block_mut<V>(v: &mut V, node: &mut crate::ExprTryBlock) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.try_token); - v.visit_block_mut(&mut node.block); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_tuple_mut<V>(v: &mut V, node: &mut crate::ExprTuple) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.paren_token); - for mut el in Punctuated::pairs_mut(&mut node.elems) { - let it = el.value_mut(); - v.visit_expr_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_expr_unary_mut<V>(v: &mut V, node: &mut crate::ExprUnary) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_un_op_mut(&mut node.op); - v.visit_expr_mut(&mut *node.expr); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_unsafe_mut<V>(v: &mut V, node: &mut crate::ExprUnsafe) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.unsafe_token); - v.visit_block_mut(&mut node.block); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_while_mut<V>(v: &mut V, node: &mut crate::ExprWhile) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - if let Some(it) = &mut node.label { - v.visit_label_mut(it); - } - skip!(node.while_token); - v.visit_expr_mut(&mut *node.cond); - v.visit_block_mut(&mut node.body); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_expr_yield_mut<V>(v: &mut V, node: &mut crate::ExprYield) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.yield_token); - if let Some(it) = &mut node.expr { - v.visit_expr_mut(&mut **it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_field_mut<V>(v: &mut V, node: &mut crate::Field) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - v.visit_field_mutability_mut(&mut node.mutability); - if let Some(it) = &mut node.ident { - v.visit_ident_mut(it); - } - skip!(node.colon_token); - v.visit_type_mut(&mut node.ty); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_field_mutability_mut<V>(v: &mut V, node: &mut crate::FieldMutability) -where - V: VisitMut + ?Sized, -{ - match node { - crate::FieldMutability::None => {} - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_field_pat_mut<V>(v: &mut V, node: &mut crate::FieldPat) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_member_mut(&mut node.member); - skip!(node.colon_token); - v.visit_pat_mut(&mut *node.pat); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_field_value_mut<V>(v: &mut V, node: &mut crate::FieldValue) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_member_mut(&mut node.member); - skip!(node.colon_token); - v.visit_expr_mut(&mut node.expr); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_fields_mut<V>(v: &mut V, node: &mut crate::Fields) -where - V: VisitMut + ?Sized, -{ - match node { - crate::Fields::Named(_binding_0) => { - v.visit_fields_named_mut(_binding_0); - } - crate::Fields::Unnamed(_binding_0) => { - v.visit_fields_unnamed_mut(_binding_0); - } - crate::Fields::Unit => {} - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_fields_named_mut<V>(v: &mut V, node: &mut crate::FieldsNamed) -where - V: VisitMut + ?Sized, -{ - skip!(node.brace_token); - for mut el in Punctuated::pairs_mut(&mut node.named) { - let it = el.value_mut(); - v.visit_field_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_fields_unnamed_mut<V>(v: &mut V, node: &mut crate::FieldsUnnamed) -where - V: VisitMut + ?Sized, -{ - skip!(node.paren_token); - for mut el in Punctuated::pairs_mut(&mut node.unnamed) { - let it = el.value_mut(); - v.visit_field_mut(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_file_mut<V>(v: &mut V, node: &mut crate::File) -where - V: VisitMut + ?Sized, -{ - skip!(node.shebang); - v.visit_attributes_mut(&mut node.attrs); - for it in &mut node.items { - v.visit_item_mut(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_fn_arg_mut<V>(v: &mut V, node: &mut crate::FnArg) -where - V: VisitMut + ?Sized, -{ - match node { - crate::FnArg::Receiver(_binding_0) => { - v.visit_receiver_mut(_binding_0); - } - crate::FnArg::Typed(_binding_0) => { - v.visit_pat_type_mut(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_foreign_item_mut<V>(v: &mut V, node: &mut crate::ForeignItem) -where - V: VisitMut + ?Sized, -{ - match node { - crate::ForeignItem::Fn(_binding_0) => { - v.visit_foreign_item_fn_mut(_binding_0); - } - crate::ForeignItem::Static(_binding_0) => { - v.visit_foreign_item_static_mut(_binding_0); - } - crate::ForeignItem::Type(_binding_0) => { - v.visit_foreign_item_type_mut(_binding_0); - } - crate::ForeignItem::Macro(_binding_0) => { - v.visit_foreign_item_macro_mut(_binding_0); - } - crate::ForeignItem::Verbatim(_binding_0) => { - v.visit_token_stream_mut(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_foreign_item_fn_mut<V>(v: &mut V, node: &mut crate::ForeignItemFn) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - v.visit_signature_mut(&mut node.sig); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_foreign_item_macro_mut<V>(v: &mut V, node: &mut crate::ForeignItemMacro) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_macro_mut(&mut node.mac); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_foreign_item_static_mut<V>(v: &mut V, node: &mut crate::ForeignItemStatic) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.static_token); - v.visit_static_mutability_mut(&mut node.mutability); - v.visit_ident_mut(&mut node.ident); - skip!(node.colon_token); - v.visit_type_mut(&mut *node.ty); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_foreign_item_type_mut<V>(v: &mut V, node: &mut crate::ForeignItemType) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.type_token); - v.visit_ident_mut(&mut node.ident); - v.visit_generics_mut(&mut node.generics); - skip!(node.semi_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_generic_argument_mut<V>(v: &mut V, node: &mut crate::GenericArgument) -where - V: VisitMut + ?Sized, -{ - match node { - crate::GenericArgument::Lifetime(_binding_0) => { - v.visit_lifetime_mut(_binding_0); - } - crate::GenericArgument::Type(_binding_0) => { - v.visit_type_mut(_binding_0); - } - crate::GenericArgument::Const(_binding_0) => { - v.visit_expr_mut(_binding_0); - } - crate::GenericArgument::AssocType(_binding_0) => { - v.visit_assoc_type_mut(_binding_0); - } - crate::GenericArgument::AssocConst(_binding_0) => { - v.visit_assoc_const_mut(_binding_0); - } - crate::GenericArgument::Constraint(_binding_0) => { - v.visit_constraint_mut(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_generic_param_mut<V>(v: &mut V, node: &mut crate::GenericParam) -where - V: VisitMut + ?Sized, -{ - match node { - crate::GenericParam::Lifetime(_binding_0) => { - v.visit_lifetime_param_mut(_binding_0); - } - crate::GenericParam::Type(_binding_0) => { - v.visit_type_param_mut(_binding_0); - } - crate::GenericParam::Const(_binding_0) => { - v.visit_const_param_mut(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_generics_mut<V>(v: &mut V, node: &mut crate::Generics) -where - V: VisitMut + ?Sized, -{ - skip!(node.lt_token); - for mut el in Punctuated::pairs_mut(&mut node.params) { - let it = el.value_mut(); - v.visit_generic_param_mut(it); - } - skip!(node.gt_token); - if let Some(it) = &mut node.where_clause { - v.visit_where_clause_mut(it); - } -} -pub fn visit_ident_mut<V>(v: &mut V, node: &mut proc_macro2::Ident) -where - V: VisitMut + ?Sized, -{ - let mut span = node.span(); - v.visit_span_mut(&mut span); - node.set_span(span); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_impl_item_mut<V>(v: &mut V, node: &mut crate::ImplItem) -where - V: VisitMut + ?Sized, -{ - match node { - crate::ImplItem::Const(_binding_0) => { - v.visit_impl_item_const_mut(_binding_0); - } - crate::ImplItem::Fn(_binding_0) => { - v.visit_impl_item_fn_mut(_binding_0); - } - crate::ImplItem::Type(_binding_0) => { - v.visit_impl_item_type_mut(_binding_0); - } - crate::ImplItem::Macro(_binding_0) => { - v.visit_impl_item_macro_mut(_binding_0); - } - crate::ImplItem::Verbatim(_binding_0) => { - v.visit_token_stream_mut(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_impl_item_const_mut<V>(v: &mut V, node: &mut crate::ImplItemConst) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.defaultness); - skip!(node.const_token); - v.visit_ident_mut(&mut node.ident); - v.visit_generics_mut(&mut node.generics); - skip!(node.colon_token); - v.visit_type_mut(&mut node.ty); - skip!(node.eq_token); - v.visit_expr_mut(&mut node.expr); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_impl_item_fn_mut<V>(v: &mut V, node: &mut crate::ImplItemFn) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.defaultness); - v.visit_signature_mut(&mut node.sig); - v.visit_block_mut(&mut node.block); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_impl_item_macro_mut<V>(v: &mut V, node: &mut crate::ImplItemMacro) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_macro_mut(&mut node.mac); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_impl_item_type_mut<V>(v: &mut V, node: &mut crate::ImplItemType) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.defaultness); - skip!(node.type_token); - v.visit_ident_mut(&mut node.ident); - v.visit_generics_mut(&mut node.generics); - skip!(node.eq_token); - v.visit_type_mut(&mut node.ty); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_impl_restriction_mut<V>(v: &mut V, node: &mut crate::ImplRestriction) -where - V: VisitMut + ?Sized, -{ - match *node {} -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_index_mut<V>(v: &mut V, node: &mut crate::Index) -where - V: VisitMut + ?Sized, -{ - skip!(node.index); - v.visit_span_mut(&mut node.span); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_mut<V>(v: &mut V, node: &mut crate::Item) -where - V: VisitMut + ?Sized, -{ - match node { - crate::Item::Const(_binding_0) => { - v.visit_item_const_mut(_binding_0); - } - crate::Item::Enum(_binding_0) => { - v.visit_item_enum_mut(_binding_0); - } - crate::Item::ExternCrate(_binding_0) => { - v.visit_item_extern_crate_mut(_binding_0); - } - crate::Item::Fn(_binding_0) => { - v.visit_item_fn_mut(_binding_0); - } - crate::Item::ForeignMod(_binding_0) => { - v.visit_item_foreign_mod_mut(_binding_0); - } - crate::Item::Impl(_binding_0) => { - v.visit_item_impl_mut(_binding_0); - } - crate::Item::Macro(_binding_0) => { - v.visit_item_macro_mut(_binding_0); - } - crate::Item::Mod(_binding_0) => { - v.visit_item_mod_mut(_binding_0); - } - crate::Item::Static(_binding_0) => { - v.visit_item_static_mut(_binding_0); - } - crate::Item::Struct(_binding_0) => { - v.visit_item_struct_mut(_binding_0); - } - crate::Item::Trait(_binding_0) => { - v.visit_item_trait_mut(_binding_0); - } - crate::Item::TraitAlias(_binding_0) => { - v.visit_item_trait_alias_mut(_binding_0); - } - crate::Item::Type(_binding_0) => { - v.visit_item_type_mut(_binding_0); - } - crate::Item::Union(_binding_0) => { - v.visit_item_union_mut(_binding_0); - } - crate::Item::Use(_binding_0) => { - v.visit_item_use_mut(_binding_0); - } - crate::Item::Verbatim(_binding_0) => { - v.visit_token_stream_mut(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_const_mut<V>(v: &mut V, node: &mut crate::ItemConst) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.const_token); - v.visit_ident_mut(&mut node.ident); - v.visit_generics_mut(&mut node.generics); - skip!(node.colon_token); - v.visit_type_mut(&mut *node.ty); - skip!(node.eq_token); - v.visit_expr_mut(&mut *node.expr); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_enum_mut<V>(v: &mut V, node: &mut crate::ItemEnum) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.enum_token); - v.visit_ident_mut(&mut node.ident); - v.visit_generics_mut(&mut node.generics); - skip!(node.brace_token); - for mut el in Punctuated::pairs_mut(&mut node.variants) { - let it = el.value_mut(); - v.visit_variant_mut(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_extern_crate_mut<V>(v: &mut V, node: &mut crate::ItemExternCrate) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.extern_token); - skip!(node.crate_token); - v.visit_ident_mut(&mut node.ident); - if let Some(it) = &mut node.rename { - skip!((it).0); - v.visit_ident_mut(&mut (it).1); - } - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_fn_mut<V>(v: &mut V, node: &mut crate::ItemFn) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - v.visit_signature_mut(&mut node.sig); - v.visit_block_mut(&mut *node.block); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_foreign_mod_mut<V>(v: &mut V, node: &mut crate::ItemForeignMod) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.unsafety); - v.visit_abi_mut(&mut node.abi); - skip!(node.brace_token); - for it in &mut node.items { - v.visit_foreign_item_mut(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_impl_mut<V>(v: &mut V, node: &mut crate::ItemImpl) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.defaultness); - skip!(node.unsafety); - skip!(node.impl_token); - v.visit_generics_mut(&mut node.generics); - if let Some(it) = &mut node.trait_ { - skip!((it).0); - v.visit_path_mut(&mut (it).1); - skip!((it).2); - } - v.visit_type_mut(&mut *node.self_ty); - skip!(node.brace_token); - for it in &mut node.items { - v.visit_impl_item_mut(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_macro_mut<V>(v: &mut V, node: &mut crate::ItemMacro) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - if let Some(it) = &mut node.ident { - v.visit_ident_mut(it); - } - v.visit_macro_mut(&mut node.mac); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_mod_mut<V>(v: &mut V, node: &mut crate::ItemMod) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.unsafety); - skip!(node.mod_token); - v.visit_ident_mut(&mut node.ident); - if let Some(it) = &mut node.content { - skip!((it).0); - for it in &mut (it).1 { - v.visit_item_mut(it); - } - } - skip!(node.semi); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_static_mut<V>(v: &mut V, node: &mut crate::ItemStatic) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.static_token); - v.visit_static_mutability_mut(&mut node.mutability); - v.visit_ident_mut(&mut node.ident); - skip!(node.colon_token); - v.visit_type_mut(&mut *node.ty); - skip!(node.eq_token); - v.visit_expr_mut(&mut *node.expr); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_struct_mut<V>(v: &mut V, node: &mut crate::ItemStruct) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.struct_token); - v.visit_ident_mut(&mut node.ident); - v.visit_generics_mut(&mut node.generics); - v.visit_fields_mut(&mut node.fields); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_trait_mut<V>(v: &mut V, node: &mut crate::ItemTrait) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.unsafety); - skip!(node.auto_token); - if let Some(it) = &mut node.restriction { - v.visit_impl_restriction_mut(it); - } - skip!(node.trait_token); - v.visit_ident_mut(&mut node.ident); - v.visit_generics_mut(&mut node.generics); - skip!(node.colon_token); - for mut el in Punctuated::pairs_mut(&mut node.supertraits) { - let it = el.value_mut(); - v.visit_type_param_bound_mut(it); - } - skip!(node.brace_token); - for it in &mut node.items { - v.visit_trait_item_mut(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_trait_alias_mut<V>(v: &mut V, node: &mut crate::ItemTraitAlias) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.trait_token); - v.visit_ident_mut(&mut node.ident); - v.visit_generics_mut(&mut node.generics); - skip!(node.eq_token); - for mut el in Punctuated::pairs_mut(&mut node.bounds) { - let it = el.value_mut(); - v.visit_type_param_bound_mut(it); - } - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_type_mut<V>(v: &mut V, node: &mut crate::ItemType) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.type_token); - v.visit_ident_mut(&mut node.ident); - v.visit_generics_mut(&mut node.generics); - skip!(node.eq_token); - v.visit_type_mut(&mut *node.ty); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_union_mut<V>(v: &mut V, node: &mut crate::ItemUnion) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.union_token); - v.visit_ident_mut(&mut node.ident); - v.visit_generics_mut(&mut node.generics); - v.visit_fields_named_mut(&mut node.fields); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_item_use_mut<V>(v: &mut V, node: &mut crate::ItemUse) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_visibility_mut(&mut node.vis); - skip!(node.use_token); - skip!(node.leading_colon); - v.visit_use_tree_mut(&mut node.tree); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_label_mut<V>(v: &mut V, node: &mut crate::Label) -where - V: VisitMut + ?Sized, -{ - v.visit_lifetime_mut(&mut node.name); - skip!(node.colon_token); -} -pub fn visit_lifetime_mut<V>(v: &mut V, node: &mut crate::Lifetime) -where - V: VisitMut + ?Sized, -{ - v.visit_span_mut(&mut node.apostrophe); - v.visit_ident_mut(&mut node.ident); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_lifetime_param_mut<V>(v: &mut V, node: &mut crate::LifetimeParam) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_lifetime_mut(&mut node.lifetime); - skip!(node.colon_token); - for mut el in Punctuated::pairs_mut(&mut node.bounds) { - let it = el.value_mut(); - v.visit_lifetime_mut(it); - } -} -pub fn visit_lit_mut<V>(v: &mut V, node: &mut crate::Lit) -where - V: VisitMut + ?Sized, -{ - match node { - crate::Lit::Str(_binding_0) => { - v.visit_lit_str_mut(_binding_0); - } - crate::Lit::ByteStr(_binding_0) => { - v.visit_lit_byte_str_mut(_binding_0); - } - crate::Lit::CStr(_binding_0) => { - v.visit_lit_cstr_mut(_binding_0); - } - crate::Lit::Byte(_binding_0) => { - v.visit_lit_byte_mut(_binding_0); - } - crate::Lit::Char(_binding_0) => { - v.visit_lit_char_mut(_binding_0); - } - crate::Lit::Int(_binding_0) => { - v.visit_lit_int_mut(_binding_0); - } - crate::Lit::Float(_binding_0) => { - v.visit_lit_float_mut(_binding_0); - } - crate::Lit::Bool(_binding_0) => { - v.visit_lit_bool_mut(_binding_0); - } - crate::Lit::Verbatim(_binding_0) => { - skip!(_binding_0); - } - } -} -pub fn visit_lit_bool_mut<V>(v: &mut V, node: &mut crate::LitBool) -where - V: VisitMut + ?Sized, -{ - skip!(node.value); - v.visit_span_mut(&mut node.span); -} -pub fn visit_lit_byte_mut<V>(v: &mut V, node: &mut crate::LitByte) -where - V: VisitMut + ?Sized, -{} -pub fn visit_lit_byte_str_mut<V>(v: &mut V, node: &mut crate::LitByteStr) -where - V: VisitMut + ?Sized, -{} -pub fn visit_lit_cstr_mut<V>(v: &mut V, node: &mut crate::LitCStr) -where - V: VisitMut + ?Sized, -{} -pub fn visit_lit_char_mut<V>(v: &mut V, node: &mut crate::LitChar) -where - V: VisitMut + ?Sized, -{} -pub fn visit_lit_float_mut<V>(v: &mut V, node: &mut crate::LitFloat) -where - V: VisitMut + ?Sized, -{} -pub fn visit_lit_int_mut<V>(v: &mut V, node: &mut crate::LitInt) -where - V: VisitMut + ?Sized, -{} -pub fn visit_lit_str_mut<V>(v: &mut V, node: &mut crate::LitStr) -where - V: VisitMut + ?Sized, -{} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_local_mut<V>(v: &mut V, node: &mut crate::Local) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.let_token); - v.visit_pat_mut(&mut node.pat); - if let Some(it) = &mut node.init { - v.visit_local_init_mut(it); - } - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_local_init_mut<V>(v: &mut V, node: &mut crate::LocalInit) -where - V: VisitMut + ?Sized, -{ - skip!(node.eq_token); - v.visit_expr_mut(&mut *node.expr); - if let Some(it) = &mut node.diverge { - skip!((it).0); - v.visit_expr_mut(&mut *(it).1); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_macro_mut<V>(v: &mut V, node: &mut crate::Macro) -where - V: VisitMut + ?Sized, -{ - v.visit_path_mut(&mut node.path); - skip!(node.bang_token); - v.visit_macro_delimiter_mut(&mut node.delimiter); - v.visit_token_stream_mut(&mut node.tokens); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_macro_delimiter_mut<V>(v: &mut V, node: &mut crate::MacroDelimiter) -where - V: VisitMut + ?Sized, -{ - match node { - crate::MacroDelimiter::Paren(_binding_0) => { - skip!(_binding_0); - } - crate::MacroDelimiter::Brace(_binding_0) => { - skip!(_binding_0); - } - crate::MacroDelimiter::Bracket(_binding_0) => { - skip!(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_member_mut<V>(v: &mut V, node: &mut crate::Member) -where - V: VisitMut + ?Sized, -{ - match node { - crate::Member::Named(_binding_0) => { - v.visit_ident_mut(_binding_0); - } - crate::Member::Unnamed(_binding_0) => { - v.visit_index_mut(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_meta_mut<V>(v: &mut V, node: &mut crate::Meta) -where - V: VisitMut + ?Sized, -{ - match node { - crate::Meta::Path(_binding_0) => { - v.visit_path_mut(_binding_0); - } - crate::Meta::List(_binding_0) => { - v.visit_meta_list_mut(_binding_0); - } - crate::Meta::NameValue(_binding_0) => { - v.visit_meta_name_value_mut(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_meta_list_mut<V>(v: &mut V, node: &mut crate::MetaList) -where - V: VisitMut + ?Sized, -{ - v.visit_path_mut(&mut node.path); - v.visit_macro_delimiter_mut(&mut node.delimiter); - v.visit_token_stream_mut(&mut node.tokens); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_meta_name_value_mut<V>(v: &mut V, node: &mut crate::MetaNameValue) -where - V: VisitMut + ?Sized, -{ - v.visit_path_mut(&mut node.path); - skip!(node.eq_token); - v.visit_expr_mut(&mut node.value); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_parenthesized_generic_arguments_mut<V>( - v: &mut V, - node: &mut crate::ParenthesizedGenericArguments, -) -where - V: VisitMut + ?Sized, -{ - skip!(node.paren_token); - for mut el in Punctuated::pairs_mut(&mut node.inputs) { - let it = el.value_mut(); - v.visit_type_mut(it); - } - v.visit_return_type_mut(&mut node.output); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_mut<V>(v: &mut V, node: &mut crate::Pat) -where - V: VisitMut + ?Sized, -{ - match node { - crate::Pat::Const(_binding_0) => { - v.visit_expr_const_mut(_binding_0); - } - crate::Pat::Ident(_binding_0) => { - v.visit_pat_ident_mut(_binding_0); - } - crate::Pat::Lit(_binding_0) => { - v.visit_expr_lit_mut(_binding_0); - } - crate::Pat::Macro(_binding_0) => { - v.visit_expr_macro_mut(_binding_0); - } - crate::Pat::Or(_binding_0) => { - v.visit_pat_or_mut(_binding_0); - } - crate::Pat::Paren(_binding_0) => { - v.visit_pat_paren_mut(_binding_0); - } - crate::Pat::Path(_binding_0) => { - v.visit_expr_path_mut(_binding_0); - } - crate::Pat::Range(_binding_0) => { - v.visit_expr_range_mut(_binding_0); - } - crate::Pat::Reference(_binding_0) => { - v.visit_pat_reference_mut(_binding_0); - } - crate::Pat::Rest(_binding_0) => { - v.visit_pat_rest_mut(_binding_0); - } - crate::Pat::Slice(_binding_0) => { - v.visit_pat_slice_mut(_binding_0); - } - crate::Pat::Struct(_binding_0) => { - v.visit_pat_struct_mut(_binding_0); - } - crate::Pat::Tuple(_binding_0) => { - v.visit_pat_tuple_mut(_binding_0); - } - crate::Pat::TupleStruct(_binding_0) => { - v.visit_pat_tuple_struct_mut(_binding_0); - } - crate::Pat::Type(_binding_0) => { - v.visit_pat_type_mut(_binding_0); - } - crate::Pat::Verbatim(_binding_0) => { - v.visit_token_stream_mut(_binding_0); - } - crate::Pat::Wild(_binding_0) => { - v.visit_pat_wild_mut(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_ident_mut<V>(v: &mut V, node: &mut crate::PatIdent) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.by_ref); - skip!(node.mutability); - v.visit_ident_mut(&mut node.ident); - if let Some(it) = &mut node.subpat { - skip!((it).0); - v.visit_pat_mut(&mut *(it).1); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_or_mut<V>(v: &mut V, node: &mut crate::PatOr) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.leading_vert); - for mut el in Punctuated::pairs_mut(&mut node.cases) { - let it = el.value_mut(); - v.visit_pat_mut(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_paren_mut<V>(v: &mut V, node: &mut crate::PatParen) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.paren_token); - v.visit_pat_mut(&mut *node.pat); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_reference_mut<V>(v: &mut V, node: &mut crate::PatReference) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.and_token); - skip!(node.mutability); - v.visit_pat_mut(&mut *node.pat); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_rest_mut<V>(v: &mut V, node: &mut crate::PatRest) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.dot2_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_slice_mut<V>(v: &mut V, node: &mut crate::PatSlice) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.bracket_token); - for mut el in Punctuated::pairs_mut(&mut node.elems) { - let it = el.value_mut(); - v.visit_pat_mut(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_struct_mut<V>(v: &mut V, node: &mut crate::PatStruct) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - if let Some(it) = &mut node.qself { - v.visit_qself_mut(it); - } - v.visit_path_mut(&mut node.path); - skip!(node.brace_token); - for mut el in Punctuated::pairs_mut(&mut node.fields) { - let it = el.value_mut(); - v.visit_field_pat_mut(it); - } - if let Some(it) = &mut node.rest { - v.visit_pat_rest_mut(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_tuple_mut<V>(v: &mut V, node: &mut crate::PatTuple) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.paren_token); - for mut el in Punctuated::pairs_mut(&mut node.elems) { - let it = el.value_mut(); - v.visit_pat_mut(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_tuple_struct_mut<V>(v: &mut V, node: &mut crate::PatTupleStruct) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - if let Some(it) = &mut node.qself { - v.visit_qself_mut(it); - } - v.visit_path_mut(&mut node.path); - skip!(node.paren_token); - for mut el in Punctuated::pairs_mut(&mut node.elems) { - let it = el.value_mut(); - v.visit_pat_mut(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_type_mut<V>(v: &mut V, node: &mut crate::PatType) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_pat_mut(&mut *node.pat); - skip!(node.colon_token); - v.visit_type_mut(&mut *node.ty); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pat_wild_mut<V>(v: &mut V, node: &mut crate::PatWild) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.underscore_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_path_mut<V>(v: &mut V, node: &mut crate::Path) -where - V: VisitMut + ?Sized, -{ - skip!(node.leading_colon); - for mut el in Punctuated::pairs_mut(&mut node.segments) { - let it = el.value_mut(); - v.visit_path_segment_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_path_arguments_mut<V>(v: &mut V, node: &mut crate::PathArguments) -where - V: VisitMut + ?Sized, -{ - match node { - crate::PathArguments::None => {} - crate::PathArguments::AngleBracketed(_binding_0) => { - v.visit_angle_bracketed_generic_arguments_mut(_binding_0); - } - crate::PathArguments::Parenthesized(_binding_0) => { - v.visit_parenthesized_generic_arguments_mut(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_path_segment_mut<V>(v: &mut V, node: &mut crate::PathSegment) -where - V: VisitMut + ?Sized, -{ - v.visit_ident_mut(&mut node.ident); - v.visit_path_arguments_mut(&mut node.arguments); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_pointer_mutability_mut<V>(v: &mut V, node: &mut crate::PointerMutability) -where - V: VisitMut + ?Sized, -{ - match node { - crate::PointerMutability::Const(_binding_0) => { - skip!(_binding_0); - } - crate::PointerMutability::Mut(_binding_0) => { - skip!(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_precise_capture_mut<V>(v: &mut V, node: &mut crate::PreciseCapture) -where - V: VisitMut + ?Sized, -{ - skip!(node.use_token); - skip!(node.lt_token); - for mut el in Punctuated::pairs_mut(&mut node.params) { - let it = el.value_mut(); - v.visit_captured_param_mut(it); - } - skip!(node.gt_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_predicate_lifetime_mut<V>(v: &mut V, node: &mut crate::PredicateLifetime) -where - V: VisitMut + ?Sized, -{ - v.visit_lifetime_mut(&mut node.lifetime); - skip!(node.colon_token); - for mut el in Punctuated::pairs_mut(&mut node.bounds) { - let it = el.value_mut(); - v.visit_lifetime_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_predicate_type_mut<V>(v: &mut V, node: &mut crate::PredicateType) -where - V: VisitMut + ?Sized, -{ - if let Some(it) = &mut node.lifetimes { - v.visit_bound_lifetimes_mut(it); - } - v.visit_type_mut(&mut node.bounded_ty); - skip!(node.colon_token); - for mut el in Punctuated::pairs_mut(&mut node.bounds) { - let it = el.value_mut(); - v.visit_type_param_bound_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_qself_mut<V>(v: &mut V, node: &mut crate::QSelf) -where - V: VisitMut + ?Sized, -{ - skip!(node.lt_token); - v.visit_type_mut(&mut *node.ty); - skip!(node.position); - skip!(node.as_token); - skip!(node.gt_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_range_limits_mut<V>(v: &mut V, node: &mut crate::RangeLimits) -where - V: VisitMut + ?Sized, -{ - match node { - crate::RangeLimits::HalfOpen(_binding_0) => { - skip!(_binding_0); - } - crate::RangeLimits::Closed(_binding_0) => { - skip!(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_receiver_mut<V>(v: &mut V, node: &mut crate::Receiver) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - if let Some(it) = &mut node.reference { - skip!((it).0); - if let Some(it) = &mut (it).1 { - v.visit_lifetime_mut(it); - } - } - skip!(node.mutability); - skip!(node.self_token); - skip!(node.colon_token); - v.visit_type_mut(&mut *node.ty); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_return_type_mut<V>(v: &mut V, node: &mut crate::ReturnType) -where - V: VisitMut + ?Sized, -{ - match node { - crate::ReturnType::Default => {} - crate::ReturnType::Type(_binding_0, _binding_1) => { - skip!(_binding_0); - v.visit_type_mut(&mut **_binding_1); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_signature_mut<V>(v: &mut V, node: &mut crate::Signature) -where - V: VisitMut + ?Sized, -{ - skip!(node.constness); - skip!(node.asyncness); - skip!(node.unsafety); - if let Some(it) = &mut node.abi { - v.visit_abi_mut(it); - } - skip!(node.fn_token); - v.visit_ident_mut(&mut node.ident); - v.visit_generics_mut(&mut node.generics); - skip!(node.paren_token); - for mut el in Punctuated::pairs_mut(&mut node.inputs) { - let it = el.value_mut(); - v.visit_fn_arg_mut(it); - } - if let Some(it) = &mut node.variadic { - v.visit_variadic_mut(it); - } - v.visit_return_type_mut(&mut node.output); -} -pub fn visit_span_mut<V>(v: &mut V, node: &mut proc_macro2::Span) -where - V: VisitMut + ?Sized, -{} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_static_mutability_mut<V>(v: &mut V, node: &mut crate::StaticMutability) -where - V: VisitMut + ?Sized, -{ - match node { - crate::StaticMutability::Mut(_binding_0) => { - skip!(_binding_0); - } - crate::StaticMutability::None => {} - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_stmt_mut<V>(v: &mut V, node: &mut crate::Stmt) -where - V: VisitMut + ?Sized, -{ - match node { - crate::Stmt::Local(_binding_0) => { - v.visit_local_mut(_binding_0); - } - crate::Stmt::Item(_binding_0) => { - v.visit_item_mut(_binding_0); - } - crate::Stmt::Expr(_binding_0, _binding_1) => { - v.visit_expr_mut(_binding_0); - skip!(_binding_1); - } - crate::Stmt::Macro(_binding_0) => { - v.visit_stmt_macro_mut(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_stmt_macro_mut<V>(v: &mut V, node: &mut crate::StmtMacro) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_macro_mut(&mut node.mac); - skip!(node.semi_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_trait_bound_mut<V>(v: &mut V, node: &mut crate::TraitBound) -where - V: VisitMut + ?Sized, -{ - skip!(node.paren_token); - v.visit_trait_bound_modifier_mut(&mut node.modifier); - if let Some(it) = &mut node.lifetimes { - v.visit_bound_lifetimes_mut(it); - } - v.visit_path_mut(&mut node.path); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_trait_bound_modifier_mut<V>(v: &mut V, node: &mut crate::TraitBoundModifier) -where - V: VisitMut + ?Sized, -{ - match node { - crate::TraitBoundModifier::None => {} - crate::TraitBoundModifier::Maybe(_binding_0) => { - skip!(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_trait_item_mut<V>(v: &mut V, node: &mut crate::TraitItem) -where - V: VisitMut + ?Sized, -{ - match node { - crate::TraitItem::Const(_binding_0) => { - v.visit_trait_item_const_mut(_binding_0); - } - crate::TraitItem::Fn(_binding_0) => { - v.visit_trait_item_fn_mut(_binding_0); - } - crate::TraitItem::Type(_binding_0) => { - v.visit_trait_item_type_mut(_binding_0); - } - crate::TraitItem::Macro(_binding_0) => { - v.visit_trait_item_macro_mut(_binding_0); - } - crate::TraitItem::Verbatim(_binding_0) => { - v.visit_token_stream_mut(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_trait_item_const_mut<V>(v: &mut V, node: &mut crate::TraitItemConst) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.const_token); - v.visit_ident_mut(&mut node.ident); - v.visit_generics_mut(&mut node.generics); - skip!(node.colon_token); - v.visit_type_mut(&mut node.ty); - if let Some(it) = &mut node.default { - skip!((it).0); - v.visit_expr_mut(&mut (it).1); - } - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_trait_item_fn_mut<V>(v: &mut V, node: &mut crate::TraitItemFn) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_signature_mut(&mut node.sig); - if let Some(it) = &mut node.default { - v.visit_block_mut(it); - } - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_trait_item_macro_mut<V>(v: &mut V, node: &mut crate::TraitItemMacro) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_macro_mut(&mut node.mac); - skip!(node.semi_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_trait_item_type_mut<V>(v: &mut V, node: &mut crate::TraitItemType) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - skip!(node.type_token); - v.visit_ident_mut(&mut node.ident); - v.visit_generics_mut(&mut node.generics); - skip!(node.colon_token); - for mut el in Punctuated::pairs_mut(&mut node.bounds) { - let it = el.value_mut(); - v.visit_type_param_bound_mut(it); - } - if let Some(it) = &mut node.default { - skip!((it).0); - v.visit_type_mut(&mut (it).1); - } - skip!(node.semi_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_mut<V>(v: &mut V, node: &mut crate::Type) -where - V: VisitMut + ?Sized, -{ - match node { - crate::Type::Array(_binding_0) => { - v.visit_type_array_mut(_binding_0); - } - crate::Type::BareFn(_binding_0) => { - v.visit_type_bare_fn_mut(_binding_0); - } - crate::Type::Group(_binding_0) => { - v.visit_type_group_mut(_binding_0); - } - crate::Type::ImplTrait(_binding_0) => { - v.visit_type_impl_trait_mut(_binding_0); - } - crate::Type::Infer(_binding_0) => { - v.visit_type_infer_mut(_binding_0); - } - crate::Type::Macro(_binding_0) => { - v.visit_type_macro_mut(_binding_0); - } - crate::Type::Never(_binding_0) => { - v.visit_type_never_mut(_binding_0); - } - crate::Type::Paren(_binding_0) => { - v.visit_type_paren_mut(_binding_0); - } - crate::Type::Path(_binding_0) => { - v.visit_type_path_mut(_binding_0); - } - crate::Type::Ptr(_binding_0) => { - v.visit_type_ptr_mut(_binding_0); - } - crate::Type::Reference(_binding_0) => { - v.visit_type_reference_mut(_binding_0); - } - crate::Type::Slice(_binding_0) => { - v.visit_type_slice_mut(_binding_0); - } - crate::Type::TraitObject(_binding_0) => { - v.visit_type_trait_object_mut(_binding_0); - } - crate::Type::Tuple(_binding_0) => { - v.visit_type_tuple_mut(_binding_0); - } - crate::Type::Verbatim(_binding_0) => { - v.visit_token_stream_mut(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_array_mut<V>(v: &mut V, node: &mut crate::TypeArray) -where - V: VisitMut + ?Sized, -{ - skip!(node.bracket_token); - v.visit_type_mut(&mut *node.elem); - skip!(node.semi_token); - v.visit_expr_mut(&mut node.len); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_bare_fn_mut<V>(v: &mut V, node: &mut crate::TypeBareFn) -where - V: VisitMut + ?Sized, -{ - if let Some(it) = &mut node.lifetimes { - v.visit_bound_lifetimes_mut(it); - } - skip!(node.unsafety); - if let Some(it) = &mut node.abi { - v.visit_abi_mut(it); - } - skip!(node.fn_token); - skip!(node.paren_token); - for mut el in Punctuated::pairs_mut(&mut node.inputs) { - let it = el.value_mut(); - v.visit_bare_fn_arg_mut(it); - } - if let Some(it) = &mut node.variadic { - v.visit_bare_variadic_mut(it); - } - v.visit_return_type_mut(&mut node.output); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_group_mut<V>(v: &mut V, node: &mut crate::TypeGroup) -where - V: VisitMut + ?Sized, -{ - skip!(node.group_token); - v.visit_type_mut(&mut *node.elem); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_impl_trait_mut<V>(v: &mut V, node: &mut crate::TypeImplTrait) -where - V: VisitMut + ?Sized, -{ - skip!(node.impl_token); - for mut el in Punctuated::pairs_mut(&mut node.bounds) { - let it = el.value_mut(); - v.visit_type_param_bound_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_infer_mut<V>(v: &mut V, node: &mut crate::TypeInfer) -where - V: VisitMut + ?Sized, -{ - skip!(node.underscore_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_macro_mut<V>(v: &mut V, node: &mut crate::TypeMacro) -where - V: VisitMut + ?Sized, -{ - v.visit_macro_mut(&mut node.mac); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_never_mut<V>(v: &mut V, node: &mut crate::TypeNever) -where - V: VisitMut + ?Sized, -{ - skip!(node.bang_token); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_param_mut<V>(v: &mut V, node: &mut crate::TypeParam) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_ident_mut(&mut node.ident); - skip!(node.colon_token); - for mut el in Punctuated::pairs_mut(&mut node.bounds) { - let it = el.value_mut(); - v.visit_type_param_bound_mut(it); - } - skip!(node.eq_token); - if let Some(it) = &mut node.default { - v.visit_type_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_param_bound_mut<V>(v: &mut V, node: &mut crate::TypeParamBound) -where - V: VisitMut + ?Sized, -{ - match node { - crate::TypeParamBound::Trait(_binding_0) => { - v.visit_trait_bound_mut(_binding_0); - } - crate::TypeParamBound::Lifetime(_binding_0) => { - v.visit_lifetime_mut(_binding_0); - } - crate::TypeParamBound::PreciseCapture(_binding_0) => { - full!(v.visit_precise_capture_mut(_binding_0)); - } - crate::TypeParamBound::Verbatim(_binding_0) => { - v.visit_token_stream_mut(_binding_0); - } - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_paren_mut<V>(v: &mut V, node: &mut crate::TypeParen) -where - V: VisitMut + ?Sized, -{ - skip!(node.paren_token); - v.visit_type_mut(&mut *node.elem); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_path_mut<V>(v: &mut V, node: &mut crate::TypePath) -where - V: VisitMut + ?Sized, -{ - if let Some(it) = &mut node.qself { - v.visit_qself_mut(it); - } - v.visit_path_mut(&mut node.path); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_ptr_mut<V>(v: &mut V, node: &mut crate::TypePtr) -where - V: VisitMut + ?Sized, -{ - skip!(node.star_token); - skip!(node.const_token); - skip!(node.mutability); - v.visit_type_mut(&mut *node.elem); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_reference_mut<V>(v: &mut V, node: &mut crate::TypeReference) -where - V: VisitMut + ?Sized, -{ - skip!(node.and_token); - if let Some(it) = &mut node.lifetime { - v.visit_lifetime_mut(it); - } - skip!(node.mutability); - v.visit_type_mut(&mut *node.elem); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_slice_mut<V>(v: &mut V, node: &mut crate::TypeSlice) -where - V: VisitMut + ?Sized, -{ - skip!(node.bracket_token); - v.visit_type_mut(&mut *node.elem); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_trait_object_mut<V>(v: &mut V, node: &mut crate::TypeTraitObject) -where - V: VisitMut + ?Sized, -{ - skip!(node.dyn_token); - for mut el in Punctuated::pairs_mut(&mut node.bounds) { - let it = el.value_mut(); - v.visit_type_param_bound_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_type_tuple_mut<V>(v: &mut V, node: &mut crate::TypeTuple) -where - V: VisitMut + ?Sized, -{ - skip!(node.paren_token); - for mut el in Punctuated::pairs_mut(&mut node.elems) { - let it = el.value_mut(); - v.visit_type_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_un_op_mut<V>(v: &mut V, node: &mut crate::UnOp) -where - V: VisitMut + ?Sized, -{ - match node { - crate::UnOp::Deref(_binding_0) => { - skip!(_binding_0); - } - crate::UnOp::Not(_binding_0) => { - skip!(_binding_0); - } - crate::UnOp::Neg(_binding_0) => { - skip!(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_use_glob_mut<V>(v: &mut V, node: &mut crate::UseGlob) -where - V: VisitMut + ?Sized, -{ - skip!(node.star_token); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_use_group_mut<V>(v: &mut V, node: &mut crate::UseGroup) -where - V: VisitMut + ?Sized, -{ - skip!(node.brace_token); - for mut el in Punctuated::pairs_mut(&mut node.items) { - let it = el.value_mut(); - v.visit_use_tree_mut(it); - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_use_name_mut<V>(v: &mut V, node: &mut crate::UseName) -where - V: VisitMut + ?Sized, -{ - v.visit_ident_mut(&mut node.ident); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_use_path_mut<V>(v: &mut V, node: &mut crate::UsePath) -where - V: VisitMut + ?Sized, -{ - v.visit_ident_mut(&mut node.ident); - skip!(node.colon2_token); - v.visit_use_tree_mut(&mut *node.tree); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_use_rename_mut<V>(v: &mut V, node: &mut crate::UseRename) -where - V: VisitMut + ?Sized, -{ - v.visit_ident_mut(&mut node.ident); - skip!(node.as_token); - v.visit_ident_mut(&mut node.rename); -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_use_tree_mut<V>(v: &mut V, node: &mut crate::UseTree) -where - V: VisitMut + ?Sized, -{ - match node { - crate::UseTree::Path(_binding_0) => { - v.visit_use_path_mut(_binding_0); - } - crate::UseTree::Name(_binding_0) => { - v.visit_use_name_mut(_binding_0); - } - crate::UseTree::Rename(_binding_0) => { - v.visit_use_rename_mut(_binding_0); - } - crate::UseTree::Glob(_binding_0) => { - v.visit_use_glob_mut(_binding_0); - } - crate::UseTree::Group(_binding_0) => { - v.visit_use_group_mut(_binding_0); - } - } -} -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub fn visit_variadic_mut<V>(v: &mut V, node: &mut crate::Variadic) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - if let Some(it) = &mut node.pat { - v.visit_pat_mut(&mut *(it).0); - skip!((it).1); - } - skip!(node.dots); - skip!(node.comma); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_variant_mut<V>(v: &mut V, node: &mut crate::Variant) -where - V: VisitMut + ?Sized, -{ - v.visit_attributes_mut(&mut node.attrs); - v.visit_ident_mut(&mut node.ident); - v.visit_fields_mut(&mut node.fields); - if let Some(it) = &mut node.discriminant { - skip!((it).0); - v.visit_expr_mut(&mut (it).1); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_vis_restricted_mut<V>(v: &mut V, node: &mut crate::VisRestricted) -where - V: VisitMut + ?Sized, -{ - skip!(node.pub_token); - skip!(node.paren_token); - skip!(node.in_token); - v.visit_path_mut(&mut *node.path); -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_visibility_mut<V>(v: &mut V, node: &mut crate::Visibility) -where - V: VisitMut + ?Sized, -{ - match node { - crate::Visibility::Public(_binding_0) => { - skip!(_binding_0); - } - crate::Visibility::Restricted(_binding_0) => { - v.visit_vis_restricted_mut(_binding_0); - } - crate::Visibility::Inherited => {} - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_where_clause_mut<V>(v: &mut V, node: &mut crate::WhereClause) -where - V: VisitMut + ?Sized, -{ - skip!(node.where_token); - for mut el in Punctuated::pairs_mut(&mut node.predicates) { - let it = el.value_mut(); - v.visit_where_predicate_mut(it); - } -} -#[cfg(any(feature = "derive", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "derive", feature = "full"))))] -pub fn visit_where_predicate_mut<V>(v: &mut V, node: &mut crate::WherePredicate) -where - V: VisitMut + ?Sized, -{ - match node { - crate::WherePredicate::Lifetime(_binding_0) => { - v.visit_predicate_lifetime_mut(_binding_0); - } - crate::WherePredicate::Type(_binding_0) => { - v.visit_predicate_type_mut(_binding_0); - } - } -} diff --git a/vendor/syn/src/generics.rs b/vendor/syn/src/generics.rs deleted file mode 100644 index de8e09151e3c4d..00000000000000 --- a/vendor/syn/src/generics.rs +++ /dev/null @@ -1,1477 +0,0 @@ -use crate::attr::Attribute; -use crate::expr::Expr; -use crate::ident::Ident; -use crate::lifetime::Lifetime; -use crate::path::Path; -use crate::punctuated::{Iter, IterMut, Punctuated}; -use crate::token; -use crate::ty::Type; -use proc_macro2::TokenStream; -#[cfg(all(feature = "printing", feature = "extra-traits"))] -use std::fmt::{self, Debug}; -#[cfg(all(feature = "printing", feature = "extra-traits"))] -use std::hash::{Hash, Hasher}; - -ast_struct! { - /// Lifetimes and type parameters attached to a declaration of a function, - /// enum, trait, etc. - /// - /// This struct represents two distinct optional syntactic elements, - /// [generic parameters] and [where clause]. In some locations of the - /// grammar, there may be other tokens in between these two things. - /// - /// [generic parameters]: https://doc.rust-lang.org/stable/reference/items/generics.html#generic-parameters - /// [where clause]: https://doc.rust-lang.org/stable/reference/items/generics.html#where-clauses - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct Generics { - pub lt_token: Option<Token![<]>, - pub params: Punctuated<GenericParam, Token![,]>, - pub gt_token: Option<Token![>]>, - pub where_clause: Option<WhereClause>, - } -} - -ast_enum_of_structs! { - /// A generic type parameter, lifetime, or const generic: `T: Into<String>`, - /// `'a: 'b`, `const LEN: usize`. - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub enum GenericParam { - /// A lifetime parameter: `'a: 'b + 'c + 'd`. - Lifetime(LifetimeParam), - - /// A generic type parameter: `T: Into<String>`. - Type(TypeParam), - - /// A const generic parameter: `const LENGTH: usize`. - Const(ConstParam), - } -} - -ast_struct! { - /// A lifetime definition: `'a: 'b + 'c + 'd`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct LifetimeParam { - pub attrs: Vec<Attribute>, - pub lifetime: Lifetime, - pub colon_token: Option<Token![:]>, - pub bounds: Punctuated<Lifetime, Token![+]>, - } -} - -ast_struct! { - /// A generic type parameter: `T: Into<String>`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TypeParam { - pub attrs: Vec<Attribute>, - pub ident: Ident, - pub colon_token: Option<Token![:]>, - pub bounds: Punctuated<TypeParamBound, Token![+]>, - pub eq_token: Option<Token![=]>, - pub default: Option<Type>, - } -} - -ast_struct! { - /// A const generic parameter: `const LENGTH: usize`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct ConstParam { - pub attrs: Vec<Attribute>, - pub const_token: Token![const], - pub ident: Ident, - pub colon_token: Token![:], - pub ty: Type, - pub eq_token: Option<Token![=]>, - pub default: Option<Expr>, - } -} - -impl Default for Generics { - fn default() -> Self { - Generics { - lt_token: None, - params: Punctuated::new(), - gt_token: None, - where_clause: None, - } - } -} - -impl Generics { - return_impl_trait! { - /// Iterator over the lifetime parameters in `self.params`. - pub fn lifetimes(&self) -> impl Iterator<Item = &LifetimeParam> [Lifetimes] { - Lifetimes(self.params.iter()) - } - } - - return_impl_trait! { - /// Iterator over the lifetime parameters in `self.params`. - pub fn lifetimes_mut(&mut self) -> impl Iterator<Item = &mut LifetimeParam> [LifetimesMut] { - LifetimesMut(self.params.iter_mut()) - } - } - - return_impl_trait! { - /// Iterator over the type parameters in `self.params`. - pub fn type_params(&self) -> impl Iterator<Item = &TypeParam> [TypeParams] { - TypeParams(self.params.iter()) - } - } - - return_impl_trait! { - /// Iterator over the type parameters in `self.params`. - pub fn type_params_mut(&mut self) -> impl Iterator<Item = &mut TypeParam> [TypeParamsMut] { - TypeParamsMut(self.params.iter_mut()) - } - } - - return_impl_trait! { - /// Iterator over the constant parameters in `self.params`. - pub fn const_params(&self) -> impl Iterator<Item = &ConstParam> [ConstParams] { - ConstParams(self.params.iter()) - } - } - - return_impl_trait! { - /// Iterator over the constant parameters in `self.params`. - pub fn const_params_mut(&mut self) -> impl Iterator<Item = &mut ConstParam> [ConstParamsMut] { - ConstParamsMut(self.params.iter_mut()) - } - } - - /// Initializes an empty `where`-clause if there is not one present already. - pub fn make_where_clause(&mut self) -> &mut WhereClause { - self.where_clause.get_or_insert_with(|| WhereClause { - where_token: <Token![where]>::default(), - predicates: Punctuated::new(), - }) - } - - /// Split a type's generics into the pieces required for impl'ing a trait - /// for that type. - /// - /// ``` - /// # use proc_macro2::{Span, Ident}; - /// # use quote::quote; - /// # - /// # let generics: syn::Generics = Default::default(); - /// # let name = Ident::new("MyType", Span::call_site()); - /// # - /// let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); - /// quote! { - /// impl #impl_generics MyTrait for #name #ty_generics #where_clause { - /// // ... - /// } - /// } - /// # ; - /// ``` - #[cfg(feature = "printing")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - pub fn split_for_impl(&self) -> (ImplGenerics, TypeGenerics, Option<&WhereClause>) { - ( - ImplGenerics(self), - TypeGenerics(self), - self.where_clause.as_ref(), - ) - } -} - -pub struct Lifetimes<'a>(Iter<'a, GenericParam>); - -impl<'a> Iterator for Lifetimes<'a> { - type Item = &'a LifetimeParam; - - fn next(&mut self) -> Option<Self::Item> { - if let GenericParam::Lifetime(lifetime) = self.0.next()? { - Some(lifetime) - } else { - self.next() - } - } -} - -pub struct LifetimesMut<'a>(IterMut<'a, GenericParam>); - -impl<'a> Iterator for LifetimesMut<'a> { - type Item = &'a mut LifetimeParam; - - fn next(&mut self) -> Option<Self::Item> { - if let GenericParam::Lifetime(lifetime) = self.0.next()? { - Some(lifetime) - } else { - self.next() - } - } -} - -pub struct TypeParams<'a>(Iter<'a, GenericParam>); - -impl<'a> Iterator for TypeParams<'a> { - type Item = &'a TypeParam; - - fn next(&mut self) -> Option<Self::Item> { - if let GenericParam::Type(type_param) = self.0.next()? { - Some(type_param) - } else { - self.next() - } - } -} - -pub struct TypeParamsMut<'a>(IterMut<'a, GenericParam>); - -impl<'a> Iterator for TypeParamsMut<'a> { - type Item = &'a mut TypeParam; - - fn next(&mut self) -> Option<Self::Item> { - if let GenericParam::Type(type_param) = self.0.next()? { - Some(type_param) - } else { - self.next() - } - } -} - -pub struct ConstParams<'a>(Iter<'a, GenericParam>); - -impl<'a> Iterator for ConstParams<'a> { - type Item = &'a ConstParam; - - fn next(&mut self) -> Option<Self::Item> { - if let GenericParam::Const(const_param) = self.0.next()? { - Some(const_param) - } else { - self.next() - } - } -} - -pub struct ConstParamsMut<'a>(IterMut<'a, GenericParam>); - -impl<'a> Iterator for ConstParamsMut<'a> { - type Item = &'a mut ConstParam; - - fn next(&mut self) -> Option<Self::Item> { - if let GenericParam::Const(const_param) = self.0.next()? { - Some(const_param) - } else { - self.next() - } - } -} - -/// Returned by `Generics::split_for_impl`. -#[cfg(feature = "printing")] -#[cfg_attr( - docsrs, - doc(cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))) -)] -pub struct ImplGenerics<'a>(&'a Generics); - -/// Returned by `Generics::split_for_impl`. -#[cfg(feature = "printing")] -#[cfg_attr( - docsrs, - doc(cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))) -)] -pub struct TypeGenerics<'a>(&'a Generics); - -/// Returned by `TypeGenerics::as_turbofish`. -#[cfg(feature = "printing")] -#[cfg_attr( - docsrs, - doc(cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))) -)] -pub struct Turbofish<'a>(&'a Generics); - -#[cfg(feature = "printing")] -macro_rules! generics_wrapper_impls { - ($ty:ident) => { - #[cfg(feature = "clone-impls")] - #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] - impl<'a> Clone for $ty<'a> { - fn clone(&self) -> Self { - $ty(self.0) - } - } - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl<'a> Debug for $ty<'a> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter - .debug_tuple(stringify!($ty)) - .field(self.0) - .finish() - } - } - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl<'a> Eq for $ty<'a> {} - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl<'a> PartialEq for $ty<'a> { - fn eq(&self, other: &Self) -> bool { - self.0 == other.0 - } - } - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl<'a> Hash for $ty<'a> { - fn hash<H: Hasher>(&self, state: &mut H) { - self.0.hash(state); - } - } - }; -} - -#[cfg(feature = "printing")] -generics_wrapper_impls!(ImplGenerics); -#[cfg(feature = "printing")] -generics_wrapper_impls!(TypeGenerics); -#[cfg(feature = "printing")] -generics_wrapper_impls!(Turbofish); - -#[cfg(feature = "printing")] -impl<'a> TypeGenerics<'a> { - /// Turn a type's generics like `<X, Y>` into a turbofish like `::<X, Y>`. - pub fn as_turbofish(&self) -> Turbofish<'a> { - Turbofish(self.0) - } -} - -ast_struct! { - /// A set of bound lifetimes: `for<'a, 'b, 'c>`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct BoundLifetimes { - pub for_token: Token![for], - pub lt_token: Token![<], - pub lifetimes: Punctuated<GenericParam, Token![,]>, - pub gt_token: Token![>], - } -} - -impl Default for BoundLifetimes { - fn default() -> Self { - BoundLifetimes { - for_token: Default::default(), - lt_token: Default::default(), - lifetimes: Punctuated::new(), - gt_token: Default::default(), - } - } -} - -impl LifetimeParam { - pub fn new(lifetime: Lifetime) -> Self { - LifetimeParam { - attrs: Vec::new(), - lifetime, - colon_token: None, - bounds: Punctuated::new(), - } - } -} - -impl From<Ident> for TypeParam { - fn from(ident: Ident) -> Self { - TypeParam { - attrs: vec![], - ident, - colon_token: None, - bounds: Punctuated::new(), - eq_token: None, - default: None, - } - } -} - -ast_enum_of_structs! { - /// A trait or lifetime used as a bound on a type parameter. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - #[non_exhaustive] - pub enum TypeParamBound { - Trait(TraitBound), - Lifetime(Lifetime), - PreciseCapture(PreciseCapture), - Verbatim(TokenStream), - } -} - -ast_struct! { - /// A trait used as a bound on a type parameter. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TraitBound { - pub paren_token: Option<token::Paren>, - pub modifier: TraitBoundModifier, - /// The `for<'a>` in `for<'a> Foo<&'a T>` - pub lifetimes: Option<BoundLifetimes>, - /// The `Foo<&'a T>` in `for<'a> Foo<&'a T>` - pub path: Path, - } -} - -ast_enum! { - /// A modifier on a trait bound, currently only used for the `?` in - /// `?Sized`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub enum TraitBoundModifier { - None, - Maybe(Token![?]), - } -} - -ast_struct! { - /// Precise capturing bound: the 'use<…>' in `impl Trait + - /// use<'a, T>`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct PreciseCapture #full { - pub use_token: Token![use], - pub lt_token: Token![<], - pub params: Punctuated<CapturedParam, Token![,]>, - pub gt_token: Token![>], - } -} - -#[cfg(feature = "full")] -ast_enum! { - /// Single parameter in a precise capturing bound. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - #[non_exhaustive] - pub enum CapturedParam { - /// A lifetime parameter in precise capturing bound: `fn f<'a>() -> impl - /// Trait + use<'a>`. - Lifetime(Lifetime), - /// A type parameter or const generic parameter in precise capturing - /// bound: `fn f<T>() -> impl Trait + use<T>` or `fn f<const K: T>() -> - /// impl Trait + use<K>`. - Ident(Ident), - } -} - -ast_struct! { - /// A `where` clause in a definition: `where T: Deserialize<'de>, D: - /// 'static`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct WhereClause { - pub where_token: Token![where], - pub predicates: Punctuated<WherePredicate, Token![,]>, - } -} - -ast_enum_of_structs! { - /// A single predicate in a `where` clause: `T: Deserialize<'de>`. - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - #[non_exhaustive] - pub enum WherePredicate { - /// A lifetime predicate in a `where` clause: `'a: 'b + 'c`. - Lifetime(PredicateLifetime), - - /// A type predicate in a `where` clause: `for<'c> Foo<'c>: Trait<'c>`. - Type(PredicateType), - } -} - -ast_struct! { - /// A lifetime predicate in a `where` clause: `'a: 'b + 'c`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct PredicateLifetime { - pub lifetime: Lifetime, - pub colon_token: Token![:], - pub bounds: Punctuated<Lifetime, Token![+]>, - } -} - -ast_struct! { - /// A type predicate in a `where` clause: `for<'c> Foo<'c>: Trait<'c>`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct PredicateType { - /// Any lifetimes from a `for` binding - pub lifetimes: Option<BoundLifetimes>, - /// The type being bounded - pub bounded_ty: Type, - pub colon_token: Token![:], - /// Trait and lifetime bounds (`Clone+Send+'static`) - pub bounds: Punctuated<TypeParamBound, Token![+]>, - } -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::attr::Attribute; - #[cfg(feature = "full")] - use crate::error; - use crate::error::{Error, Result}; - use crate::ext::IdentExt as _; - use crate::generics::{ - BoundLifetimes, ConstParam, GenericParam, Generics, LifetimeParam, PredicateLifetime, - PredicateType, TraitBound, TraitBoundModifier, TypeParam, TypeParamBound, WhereClause, - WherePredicate, - }; - #[cfg(feature = "full")] - use crate::generics::{CapturedParam, PreciseCapture}; - use crate::ident::Ident; - use crate::lifetime::Lifetime; - use crate::parse::{Parse, ParseStream}; - use crate::path::{self, ParenthesizedGenericArguments, Path, PathArguments}; - use crate::punctuated::Punctuated; - use crate::token; - use crate::ty::Type; - use crate::verbatim; - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Generics { - fn parse(input: ParseStream) -> Result<Self> { - if !input.peek(Token![<]) { - return Ok(Generics::default()); - } - - let lt_token: Token![<] = input.parse()?; - - let mut params = Punctuated::new(); - loop { - if input.peek(Token![>]) { - break; - } - - let attrs = input.call(Attribute::parse_outer)?; - let lookahead = input.lookahead1(); - if lookahead.peek(Lifetime) { - params.push_value(GenericParam::Lifetime(LifetimeParam { - attrs, - ..input.parse()? - })); - } else if lookahead.peek(Ident) { - params.push_value(GenericParam::Type(TypeParam { - attrs, - ..input.parse()? - })); - } else if lookahead.peek(Token![const]) { - params.push_value(GenericParam::Const(ConstParam { - attrs, - ..input.parse()? - })); - } else if input.peek(Token![_]) { - params.push_value(GenericParam::Type(TypeParam { - attrs, - ident: input.call(Ident::parse_any)?, - colon_token: None, - bounds: Punctuated::new(), - eq_token: None, - default: None, - })); - } else { - return Err(lookahead.error()); - } - - if input.peek(Token![>]) { - break; - } - let punct = input.parse()?; - params.push_punct(punct); - } - - let gt_token: Token![>] = input.parse()?; - - Ok(Generics { - lt_token: Some(lt_token), - params, - gt_token: Some(gt_token), - where_clause: None, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for GenericParam { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - - let lookahead = input.lookahead1(); - if lookahead.peek(Ident) { - Ok(GenericParam::Type(TypeParam { - attrs, - ..input.parse()? - })) - } else if lookahead.peek(Lifetime) { - Ok(GenericParam::Lifetime(LifetimeParam { - attrs, - ..input.parse()? - })) - } else if lookahead.peek(Token![const]) { - Ok(GenericParam::Const(ConstParam { - attrs, - ..input.parse()? - })) - } else { - Err(lookahead.error()) - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for LifetimeParam { - fn parse(input: ParseStream) -> Result<Self> { - let has_colon; - Ok(LifetimeParam { - attrs: input.call(Attribute::parse_outer)?, - lifetime: input.parse()?, - colon_token: { - if input.peek(Token![:]) { - has_colon = true; - Some(input.parse()?) - } else { - has_colon = false; - None - } - }, - bounds: { - let mut bounds = Punctuated::new(); - if has_colon { - loop { - if input.peek(Token![,]) || input.peek(Token![>]) { - break; - } - let value = input.parse()?; - bounds.push_value(value); - if !input.peek(Token![+]) { - break; - } - let punct = input.parse()?; - bounds.push_punct(punct); - } - } - bounds - }, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for BoundLifetimes { - fn parse(input: ParseStream) -> Result<Self> { - Ok(BoundLifetimes { - for_token: input.parse()?, - lt_token: input.parse()?, - lifetimes: { - let mut lifetimes = Punctuated::new(); - while !input.peek(Token![>]) { - lifetimes.push_value(input.parse()?); - if input.peek(Token![>]) { - break; - } - lifetimes.push_punct(input.parse()?); - } - lifetimes - }, - gt_token: input.parse()?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Option<BoundLifetimes> { - fn parse(input: ParseStream) -> Result<Self> { - if input.peek(Token![for]) { - input.parse().map(Some) - } else { - Ok(None) - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypeParam { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let ident: Ident = input.parse()?; - let colon_token: Option<Token![:]> = input.parse()?; - - let mut bounds = Punctuated::new(); - if colon_token.is_some() { - loop { - if input.peek(Token![,]) || input.peek(Token![>]) || input.peek(Token![=]) { - break; - } - bounds.push_value({ - let allow_precise_capture = false; - let allow_const = true; - TypeParamBound::parse_single(input, allow_precise_capture, allow_const)? - }); - if !input.peek(Token![+]) { - break; - } - let punct: Token![+] = input.parse()?; - bounds.push_punct(punct); - } - } - - let eq_token: Option<Token![=]> = input.parse()?; - let default = if eq_token.is_some() { - Some(input.parse::<Type>()?) - } else { - None - }; - - Ok(TypeParam { - attrs, - ident, - colon_token, - bounds, - eq_token, - default, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypeParamBound { - fn parse(input: ParseStream) -> Result<Self> { - let allow_precise_capture = true; - let allow_const = true; - Self::parse_single(input, allow_precise_capture, allow_const) - } - } - - impl TypeParamBound { - pub(crate) fn parse_single( - input: ParseStream, - #[cfg_attr(not(feature = "full"), allow(unused_variables))] allow_precise_capture: bool, - allow_const: bool, - ) -> Result<Self> { - if input.peek(Lifetime) { - return input.parse().map(TypeParamBound::Lifetime); - } - - #[cfg(feature = "full")] - { - if input.peek(Token![use]) { - let precise_capture: PreciseCapture = input.parse()?; - return if allow_precise_capture { - Ok(TypeParamBound::PreciseCapture(precise_capture)) - } else { - let msg = "`use<...>` precise capturing syntax is not allowed here"; - Err(error::new2( - precise_capture.use_token.span, - precise_capture.gt_token.span, - msg, - )) - }; - } - } - - let begin = input.fork(); - - let content; - let (paren_token, content) = if input.peek(token::Paren) { - (Some(parenthesized!(content in input)), &content) - } else { - (None, input) - }; - - if let Some(mut bound) = TraitBound::do_parse(content, allow_const)? { - bound.paren_token = paren_token; - Ok(TypeParamBound::Trait(bound)) - } else { - Ok(TypeParamBound::Verbatim(verbatim::between(&begin, input))) - } - } - - pub(crate) fn parse_multiple( - input: ParseStream, - allow_plus: bool, - allow_precise_capture: bool, - allow_const: bool, - ) -> Result<Punctuated<Self, Token![+]>> { - let mut bounds = Punctuated::new(); - loop { - let bound = Self::parse_single(input, allow_precise_capture, allow_const)?; - bounds.push_value(bound); - if !(allow_plus && input.peek(Token![+])) { - break; - } - bounds.push_punct(input.parse()?); - if !(input.peek(Ident::peek_any) - || input.peek(Token![::]) - || input.peek(Token![?]) - || input.peek(Lifetime) - || input.peek(token::Paren) - || (allow_const && (input.peek(token::Bracket) || input.peek(Token![const])))) - { - break; - } - } - Ok(bounds) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TraitBound { - fn parse(input: ParseStream) -> Result<Self> { - let allow_const = false; - Self::do_parse(input, allow_const).map(Option::unwrap) - } - } - - impl TraitBound { - fn do_parse(input: ParseStream, allow_const: bool) -> Result<Option<Self>> { - let mut lifetimes: Option<BoundLifetimes> = input.parse()?; - - let is_conditionally_const = cfg!(feature = "full") && input.peek(token::Bracket); - let is_unconditionally_const = cfg!(feature = "full") && input.peek(Token![const]); - if is_conditionally_const { - let conditionally_const; - let bracket_token = bracketed!(conditionally_const in input); - conditionally_const.parse::<Token![const]>()?; - if !allow_const { - let msg = "`[const]` is not allowed here"; - return Err(Error::new(bracket_token.span.join(), msg)); - } - } else if is_unconditionally_const { - let const_token: Token![const] = input.parse()?; - if !allow_const { - let msg = "`const` is not allowed here"; - return Err(Error::new(const_token.span, msg)); - } - } - - let modifier: TraitBoundModifier = input.parse()?; - if lifetimes.is_none() && matches!(modifier, TraitBoundModifier::Maybe(_)) { - lifetimes = input.parse()?; - } - - let mut path: Path = input.parse()?; - if path.segments.last().unwrap().arguments.is_empty() - && (input.peek(token::Paren) || input.peek(Token![::]) && input.peek3(token::Paren)) - { - input.parse::<Option<Token![::]>>()?; - let args: ParenthesizedGenericArguments = input.parse()?; - let parenthesized = PathArguments::Parenthesized(args); - path.segments.last_mut().unwrap().arguments = parenthesized; - } - - if lifetimes.is_some() { - match modifier { - TraitBoundModifier::None => {} - TraitBoundModifier::Maybe(maybe) => { - let msg = "`for<...>` binder not allowed with `?` trait polarity modifier"; - return Err(Error::new(maybe.span, msg)); - } - } - } - - if is_conditionally_const || is_unconditionally_const { - Ok(None) - } else { - Ok(Some(TraitBound { - paren_token: None, - modifier, - lifetimes, - path, - })) - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TraitBoundModifier { - fn parse(input: ParseStream) -> Result<Self> { - if input.peek(Token![?]) { - input.parse().map(TraitBoundModifier::Maybe) - } else { - Ok(TraitBoundModifier::None) - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ConstParam { - fn parse(input: ParseStream) -> Result<Self> { - let mut default = None; - Ok(ConstParam { - attrs: input.call(Attribute::parse_outer)?, - const_token: input.parse()?, - ident: input.parse()?, - colon_token: input.parse()?, - ty: input.parse()?, - eq_token: { - if input.peek(Token![=]) { - let eq_token = input.parse()?; - default = Some(path::parsing::const_argument(input)?); - Some(eq_token) - } else { - None - } - }, - default, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for WhereClause { - fn parse(input: ParseStream) -> Result<Self> { - let where_token: Token![where] = input.parse()?; - - if choose_generics_over_qpath(input) { - return Err(input - .error("generic parameters on `where` clauses are reserved for future use")); - } - - Ok(WhereClause { - where_token, - predicates: { - let mut predicates = Punctuated::new(); - loop { - if input.is_empty() - || input.peek(token::Brace) - || input.peek(Token![,]) - || input.peek(Token![;]) - || input.peek(Token![:]) && !input.peek(Token![::]) - || input.peek(Token![=]) - { - break; - } - let value = input.parse()?; - predicates.push_value(value); - if !input.peek(Token![,]) { - break; - } - let punct = input.parse()?; - predicates.push_punct(punct); - } - predicates - }, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Option<WhereClause> { - fn parse(input: ParseStream) -> Result<Self> { - if input.peek(Token![where]) { - input.parse().map(Some) - } else { - Ok(None) - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for WherePredicate { - fn parse(input: ParseStream) -> Result<Self> { - if input.peek(Lifetime) && input.peek2(Token![:]) { - Ok(WherePredicate::Lifetime(PredicateLifetime { - lifetime: input.parse()?, - colon_token: input.parse()?, - bounds: { - let mut bounds = Punctuated::new(); - loop { - if input.is_empty() - || input.peek(token::Brace) - || input.peek(Token![,]) - || input.peek(Token![;]) - || input.peek(Token![:]) - || input.peek(Token![=]) - { - break; - } - let value = input.parse()?; - bounds.push_value(value); - if !input.peek(Token![+]) { - break; - } - let punct = input.parse()?; - bounds.push_punct(punct); - } - bounds - }, - })) - } else { - Ok(WherePredicate::Type(PredicateType { - lifetimes: input.parse()?, - bounded_ty: input.parse()?, - colon_token: input.parse()?, - bounds: { - let mut bounds = Punctuated::new(); - loop { - if input.is_empty() - || input.peek(token::Brace) - || input.peek(Token![,]) - || input.peek(Token![;]) - || input.peek(Token![:]) && !input.peek(Token![::]) - || input.peek(Token![=]) - { - break; - } - bounds.push_value({ - let allow_precise_capture = false; - let allow_const = true; - TypeParamBound::parse_single( - input, - allow_precise_capture, - allow_const, - )? - }); - if !input.peek(Token![+]) { - break; - } - let punct = input.parse()?; - bounds.push_punct(punct); - } - bounds - }, - })) - } - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for PreciseCapture { - fn parse(input: ParseStream) -> Result<Self> { - let use_token: Token![use] = input.parse()?; - let lt_token: Token![<] = input.parse()?; - let mut params = Punctuated::new(); - loop { - let lookahead = input.lookahead1(); - params.push_value( - if lookahead.peek(Lifetime) || lookahead.peek(Ident) || input.peek(Token![Self]) - { - input.parse::<CapturedParam>()? - } else if lookahead.peek(Token![>]) { - break; - } else { - return Err(lookahead.error()); - }, - ); - let lookahead = input.lookahead1(); - params.push_punct(if lookahead.peek(Token![,]) { - input.parse::<Token![,]>()? - } else if lookahead.peek(Token![>]) { - break; - } else { - return Err(lookahead.error()); - }); - } - let gt_token: Token![>] = input.parse()?; - Ok(PreciseCapture { - use_token, - lt_token, - params, - gt_token, - }) - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for CapturedParam { - fn parse(input: ParseStream) -> Result<Self> { - let lookahead = input.lookahead1(); - if lookahead.peek(Lifetime) { - input.parse().map(CapturedParam::Lifetime) - } else if lookahead.peek(Ident) || input.peek(Token![Self]) { - input.call(Ident::parse_any).map(CapturedParam::Ident) - } else { - Err(lookahead.error()) - } - } - } - - pub(crate) fn choose_generics_over_qpath(input: ParseStream) -> bool { - // Rust syntax has an ambiguity between generic parameters and qualified - // paths. In `impl <T> :: Thing<T, U> {}` this may either be a generic - // inherent impl `impl<T> ::Thing<T, U>` or a non-generic inherent impl - // for an associated type `impl <T>::Thing<T, U>`. - // - // After `<` the following continuations can only begin generics, not a - // qualified path: - // - // `<` `>` - empty generic parameters - // `<` `#` - generic parameters with attribute - // `<` LIFETIME `>` - single lifetime parameter - // `<` (LIFETIME|IDENT) `,` - first generic parameter in a list - // `<` (LIFETIME|IDENT) `:` - generic parameter with bounds - // `<` (LIFETIME|IDENT) `=` - generic parameter with a default - // `<` const - generic const parameter - // - // The only truly ambiguous case is: - // - // `<` IDENT `>` `::` IDENT ... - // - // which we disambiguate in favor of generics because this is almost - // always the expected one in the context of real-world code. - input.peek(Token![<]) - && (input.peek2(Token![>]) - || input.peek2(Token![#]) - || (input.peek2(Lifetime) || input.peek2(Ident)) - && (input.peek3(Token![>]) - || input.peek3(Token![,]) - || input.peek3(Token![:]) && !input.peek3(Token![::]) - || input.peek3(Token![=])) - || input.peek2(Token![const])) - } - - #[cfg(feature = "full")] - pub(crate) fn choose_generics_over_qpath_after_keyword(input: ParseStream) -> bool { - let input = input.fork(); - input.call(Ident::parse_any).unwrap(); // `impl` or `for` or `where` - choose_generics_over_qpath(&input) - } -} - -#[cfg(feature = "printing")] -pub(crate) mod printing { - use crate::attr::FilterAttrs; - #[cfg(feature = "full")] - use crate::expr; - use crate::expr::Expr; - #[cfg(feature = "full")] - use crate::fixup::FixupContext; - use crate::generics::{ - BoundLifetimes, ConstParam, GenericParam, Generics, ImplGenerics, LifetimeParam, - PredicateLifetime, PredicateType, TraitBound, TraitBoundModifier, Turbofish, TypeGenerics, - TypeParam, WhereClause, - }; - #[cfg(feature = "full")] - use crate::generics::{CapturedParam, PreciseCapture}; - use crate::print::TokensOrDefault; - use crate::token; - use proc_macro2::TokenStream; - use quote::{ToTokens, TokenStreamExt as _}; - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Generics { - fn to_tokens(&self, tokens: &mut TokenStream) { - if self.params.is_empty() { - return; - } - - TokensOrDefault(&self.lt_token).to_tokens(tokens); - - // Print lifetimes before types and consts, regardless of their - // order in self.params. - let mut trailing_or_empty = true; - for param in self.params.pairs() { - if let GenericParam::Lifetime(_) = **param.value() { - param.to_tokens(tokens); - trailing_or_empty = param.punct().is_some(); - } - } - for param in self.params.pairs() { - match param.value() { - GenericParam::Type(_) | GenericParam::Const(_) => { - if !trailing_or_empty { - <Token![,]>::default().to_tokens(tokens); - trailing_or_empty = true; - } - param.to_tokens(tokens); - } - GenericParam::Lifetime(_) => {} - } - } - - TokensOrDefault(&self.gt_token).to_tokens(tokens); - } - } - - impl<'a> ToTokens for ImplGenerics<'a> { - fn to_tokens(&self, tokens: &mut TokenStream) { - if self.0.params.is_empty() { - return; - } - - TokensOrDefault(&self.0.lt_token).to_tokens(tokens); - - // Print lifetimes before types and consts, regardless of their - // order in self.params. - let mut trailing_or_empty = true; - for param in self.0.params.pairs() { - if let GenericParam::Lifetime(_) = **param.value() { - param.to_tokens(tokens); - trailing_or_empty = param.punct().is_some(); - } - } - for param in self.0.params.pairs() { - if let GenericParam::Lifetime(_) = **param.value() { - continue; - } - if !trailing_or_empty { - <Token![,]>::default().to_tokens(tokens); - trailing_or_empty = true; - } - match param.value() { - GenericParam::Lifetime(_) => unreachable!(), - GenericParam::Type(param) => { - // Leave off the type parameter defaults - tokens.append_all(param.attrs.outer()); - param.ident.to_tokens(tokens); - if !param.bounds.is_empty() { - TokensOrDefault(¶m.colon_token).to_tokens(tokens); - param.bounds.to_tokens(tokens); - } - } - GenericParam::Const(param) => { - // Leave off the const parameter defaults - tokens.append_all(param.attrs.outer()); - param.const_token.to_tokens(tokens); - param.ident.to_tokens(tokens); - param.colon_token.to_tokens(tokens); - param.ty.to_tokens(tokens); - } - } - param.punct().to_tokens(tokens); - } - - TokensOrDefault(&self.0.gt_token).to_tokens(tokens); - } - } - - impl<'a> ToTokens for TypeGenerics<'a> { - fn to_tokens(&self, tokens: &mut TokenStream) { - if self.0.params.is_empty() { - return; - } - - TokensOrDefault(&self.0.lt_token).to_tokens(tokens); - - // Print lifetimes before types and consts, regardless of their - // order in self.params. - let mut trailing_or_empty = true; - for param in self.0.params.pairs() { - if let GenericParam::Lifetime(def) = *param.value() { - // Leave off the lifetime bounds and attributes - def.lifetime.to_tokens(tokens); - param.punct().to_tokens(tokens); - trailing_or_empty = param.punct().is_some(); - } - } - for param in self.0.params.pairs() { - if let GenericParam::Lifetime(_) = **param.value() { - continue; - } - if !trailing_or_empty { - <Token![,]>::default().to_tokens(tokens); - trailing_or_empty = true; - } - match param.value() { - GenericParam::Lifetime(_) => unreachable!(), - GenericParam::Type(param) => { - // Leave off the type parameter defaults - param.ident.to_tokens(tokens); - } - GenericParam::Const(param) => { - // Leave off the const parameter defaults - param.ident.to_tokens(tokens); - } - } - param.punct().to_tokens(tokens); - } - - TokensOrDefault(&self.0.gt_token).to_tokens(tokens); - } - } - - impl<'a> ToTokens for Turbofish<'a> { - fn to_tokens(&self, tokens: &mut TokenStream) { - if !self.0.params.is_empty() { - <Token![::]>::default().to_tokens(tokens); - TypeGenerics(self.0).to_tokens(tokens); - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for BoundLifetimes { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.for_token.to_tokens(tokens); - self.lt_token.to_tokens(tokens); - self.lifetimes.to_tokens(tokens); - self.gt_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for LifetimeParam { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.lifetime.to_tokens(tokens); - if !self.bounds.is_empty() { - TokensOrDefault(&self.colon_token).to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TypeParam { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.ident.to_tokens(tokens); - if !self.bounds.is_empty() { - TokensOrDefault(&self.colon_token).to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - if let Some(default) = &self.default { - TokensOrDefault(&self.eq_token).to_tokens(tokens); - default.to_tokens(tokens); - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TraitBound { - fn to_tokens(&self, tokens: &mut TokenStream) { - let to_tokens = |tokens: &mut TokenStream| { - self.modifier.to_tokens(tokens); - self.lifetimes.to_tokens(tokens); - self.path.to_tokens(tokens); - }; - match &self.paren_token { - Some(paren) => paren.surround(tokens, to_tokens), - None => to_tokens(tokens), - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TraitBoundModifier { - fn to_tokens(&self, tokens: &mut TokenStream) { - match self { - TraitBoundModifier::None => {} - TraitBoundModifier::Maybe(t) => t.to_tokens(tokens), - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ConstParam { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.const_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - if let Some(default) = &self.default { - TokensOrDefault(&self.eq_token).to_tokens(tokens); - print_const_argument(default, tokens); - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for WhereClause { - fn to_tokens(&self, tokens: &mut TokenStream) { - if !self.predicates.is_empty() { - self.where_token.to_tokens(tokens); - self.predicates.to_tokens(tokens); - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PredicateLifetime { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.lifetime.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PredicateType { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.lifetimes.to_tokens(tokens); - self.bounded_ty.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PreciseCapture { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.use_token.to_tokens(tokens); - self.lt_token.to_tokens(tokens); - - // Print lifetimes before types and consts, regardless of their - // order in self.params. - let mut trailing_or_empty = true; - for param in self.params.pairs() { - if let CapturedParam::Lifetime(_) = **param.value() { - param.to_tokens(tokens); - trailing_or_empty = param.punct().is_some(); - } - } - for param in self.params.pairs() { - if let CapturedParam::Ident(_) = **param.value() { - if !trailing_or_empty { - <Token![,]>::default().to_tokens(tokens); - trailing_or_empty = true; - } - param.to_tokens(tokens); - } - } - - self.gt_token.to_tokens(tokens); - } - } - - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for CapturedParam { - fn to_tokens(&self, tokens: &mut TokenStream) { - match self { - CapturedParam::Lifetime(lifetime) => lifetime.to_tokens(tokens), - CapturedParam::Ident(ident) => ident.to_tokens(tokens), - } - } - } - - pub(crate) fn print_const_argument(expr: &Expr, tokens: &mut TokenStream) { - match expr { - Expr::Lit(expr) => expr.to_tokens(tokens), - - Expr::Path(expr) - if expr.attrs.is_empty() - && expr.qself.is_none() - && expr.path.get_ident().is_some() => - { - expr.to_tokens(tokens); - } - - #[cfg(feature = "full")] - Expr::Block(expr) => expr.to_tokens(tokens), - - #[cfg(not(feature = "full"))] - Expr::Verbatim(expr) => expr.to_tokens(tokens), - - // ERROR CORRECTION: Add braces to make sure that the - // generated code is valid. - _ => token::Brace::default().surround(tokens, |tokens| { - #[cfg(feature = "full")] - expr::printing::print_expr(expr, tokens, FixupContext::new_stmt()); - - #[cfg(not(feature = "full"))] - expr.to_tokens(tokens); - }), - } - } -} diff --git a/vendor/syn/src/group.rs b/vendor/syn/src/group.rs deleted file mode 100644 index 1534ae995dd5ab..00000000000000 --- a/vendor/syn/src/group.rs +++ /dev/null @@ -1,291 +0,0 @@ -use crate::error::Result; -use crate::parse::ParseBuffer; -use crate::token; -use proc_macro2::extra::DelimSpan; -use proc_macro2::Delimiter; - -// Not public API. -#[doc(hidden)] -pub struct Parens<'a> { - #[doc(hidden)] - pub token: token::Paren, - #[doc(hidden)] - pub content: ParseBuffer<'a>, -} - -// Not public API. -#[doc(hidden)] -pub struct Braces<'a> { - #[doc(hidden)] - pub token: token::Brace, - #[doc(hidden)] - pub content: ParseBuffer<'a>, -} - -// Not public API. -#[doc(hidden)] -pub struct Brackets<'a> { - #[doc(hidden)] - pub token: token::Bracket, - #[doc(hidden)] - pub content: ParseBuffer<'a>, -} - -// Not public API. -#[cfg(any(feature = "full", feature = "derive"))] -#[doc(hidden)] -pub struct Group<'a> { - #[doc(hidden)] - pub token: token::Group, - #[doc(hidden)] - pub content: ParseBuffer<'a>, -} - -// Not public API. -#[doc(hidden)] -pub fn parse_parens<'a>(input: &ParseBuffer<'a>) -> Result<Parens<'a>> { - parse_delimited(input, Delimiter::Parenthesis).map(|(span, content)| Parens { - token: token::Paren(span), - content, - }) -} - -// Not public API. -#[doc(hidden)] -pub fn parse_braces<'a>(input: &ParseBuffer<'a>) -> Result<Braces<'a>> { - parse_delimited(input, Delimiter::Brace).map(|(span, content)| Braces { - token: token::Brace(span), - content, - }) -} - -// Not public API. -#[doc(hidden)] -pub fn parse_brackets<'a>(input: &ParseBuffer<'a>) -> Result<Brackets<'a>> { - parse_delimited(input, Delimiter::Bracket).map(|(span, content)| Brackets { - token: token::Bracket(span), - content, - }) -} - -#[cfg(any(feature = "full", feature = "derive"))] -pub(crate) fn parse_group<'a>(input: &ParseBuffer<'a>) -> Result<Group<'a>> { - parse_delimited(input, Delimiter::None).map(|(span, content)| Group { - token: token::Group(span.join()), - content, - }) -} - -fn parse_delimited<'a>( - input: &ParseBuffer<'a>, - delimiter: Delimiter, -) -> Result<(DelimSpan, ParseBuffer<'a>)> { - input.step(|cursor| { - if let Some((content, span, rest)) = cursor.group(delimiter) { - let scope = span.close(); - let nested = crate::parse::advance_step_cursor(cursor, content); - let unexpected = crate::parse::get_unexpected(input); - let content = crate::parse::new_parse_buffer(scope, nested, unexpected); - Ok(((span, content), rest)) - } else { - let message = match delimiter { - Delimiter::Parenthesis => "expected parentheses", - Delimiter::Brace => "expected curly braces", - Delimiter::Bracket => "expected square brackets", - Delimiter::None => "expected invisible group", - }; - Err(cursor.error(message)) - } - }) -} - -/// Parse a set of parentheses and expose their content to subsequent parsers. -/// -/// # Example -/// -/// ``` -/// # use quote::quote; -/// # -/// use syn::{parenthesized, token, Ident, Result, Token, Type}; -/// use syn::parse::{Parse, ParseStream}; -/// use syn::punctuated::Punctuated; -/// -/// // Parse a simplified tuple struct syntax like: -/// // -/// // struct S(A, B); -/// struct TupleStruct { -/// struct_token: Token![struct], -/// ident: Ident, -/// paren_token: token::Paren, -/// fields: Punctuated<Type, Token![,]>, -/// semi_token: Token![;], -/// } -/// -/// impl Parse for TupleStruct { -/// fn parse(input: ParseStream) -> Result<Self> { -/// let content; -/// Ok(TupleStruct { -/// struct_token: input.parse()?, -/// ident: input.parse()?, -/// paren_token: parenthesized!(content in input), -/// fields: content.parse_terminated(Type::parse, Token![,])?, -/// semi_token: input.parse()?, -/// }) -/// } -/// } -/// # -/// # fn main() { -/// # let input = quote! { -/// # struct S(A, B); -/// # }; -/// # syn::parse2::<TupleStruct>(input).unwrap(); -/// # } -/// ``` -#[macro_export] -#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] -macro_rules! parenthesized { - ($content:ident in $cursor:expr) => { - match $crate::__private::parse_parens(&$cursor) { - $crate::__private::Ok(parens) => { - $content = parens.content; - parens.token - } - $crate::__private::Err(error) => { - return $crate::__private::Err(error); - } - } - }; -} - -/// Parse a set of curly braces and expose their content to subsequent parsers. -/// -/// # Example -/// -/// ``` -/// # use quote::quote; -/// # -/// use syn::{braced, token, Ident, Result, Token, Type}; -/// use syn::parse::{Parse, ParseStream}; -/// use syn::punctuated::Punctuated; -/// -/// // Parse a simplified struct syntax like: -/// // -/// // struct S { -/// // a: A, -/// // b: B, -/// // } -/// struct Struct { -/// struct_token: Token![struct], -/// ident: Ident, -/// brace_token: token::Brace, -/// fields: Punctuated<Field, Token![,]>, -/// } -/// -/// struct Field { -/// name: Ident, -/// colon_token: Token![:], -/// ty: Type, -/// } -/// -/// impl Parse for Struct { -/// fn parse(input: ParseStream) -> Result<Self> { -/// let content; -/// Ok(Struct { -/// struct_token: input.parse()?, -/// ident: input.parse()?, -/// brace_token: braced!(content in input), -/// fields: content.parse_terminated(Field::parse, Token![,])?, -/// }) -/// } -/// } -/// -/// impl Parse for Field { -/// fn parse(input: ParseStream) -> Result<Self> { -/// Ok(Field { -/// name: input.parse()?, -/// colon_token: input.parse()?, -/// ty: input.parse()?, -/// }) -/// } -/// } -/// # -/// # fn main() { -/// # let input = quote! { -/// # struct S { -/// # a: A, -/// # b: B, -/// # } -/// # }; -/// # syn::parse2::<Struct>(input).unwrap(); -/// # } -/// ``` -#[macro_export] -#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] -macro_rules! braced { - ($content:ident in $cursor:expr) => { - match $crate::__private::parse_braces(&$cursor) { - $crate::__private::Ok(braces) => { - $content = braces.content; - braces.token - } - $crate::__private::Err(error) => { - return $crate::__private::Err(error); - } - } - }; -} - -/// Parse a set of square brackets and expose their content to subsequent -/// parsers. -/// -/// # Example -/// -/// ``` -/// # use quote::quote; -/// # -/// use proc_macro2::TokenStream; -/// use syn::{bracketed, token, Result, Token}; -/// use syn::parse::{Parse, ParseStream}; -/// -/// // Parse an outer attribute like: -/// // -/// // #[repr(C, packed)] -/// struct OuterAttribute { -/// pound_token: Token![#], -/// bracket_token: token::Bracket, -/// content: TokenStream, -/// } -/// -/// impl Parse for OuterAttribute { -/// fn parse(input: ParseStream) -> Result<Self> { -/// let content; -/// Ok(OuterAttribute { -/// pound_token: input.parse()?, -/// bracket_token: bracketed!(content in input), -/// content: content.parse()?, -/// }) -/// } -/// } -/// # -/// # fn main() { -/// # let input = quote! { -/// # #[repr(C, packed)] -/// # }; -/// # syn::parse2::<OuterAttribute>(input).unwrap(); -/// # } -/// ``` -#[macro_export] -#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] -macro_rules! bracketed { - ($content:ident in $cursor:expr) => { - match $crate::__private::parse_brackets(&$cursor) { - $crate::__private::Ok(brackets) => { - $content = brackets.content; - brackets.token - } - $crate::__private::Err(error) => { - return $crate::__private::Err(error); - } - } - }; -} diff --git a/vendor/syn/src/ident.rs b/vendor/syn/src/ident.rs deleted file mode 100644 index 8a8e8a50d9b0bf..00000000000000 --- a/vendor/syn/src/ident.rs +++ /dev/null @@ -1,108 +0,0 @@ -#[cfg(feature = "parsing")] -use crate::lookahead; - -pub use proc_macro2::Ident; - -#[cfg(feature = "parsing")] -pub_if_not_doc! { - #[doc(hidden)] - #[allow(non_snake_case)] - pub fn Ident(marker: lookahead::TokenMarker) -> Ident { - match marker {} - } -} - -macro_rules! ident_from_token { - ($token:ident) => { - impl From<Token![$token]> for Ident { - fn from(token: Token![$token]) -> Ident { - Ident::new(stringify!($token), token.span) - } - } - }; -} - -ident_from_token!(self); -ident_from_token!(Self); -ident_from_token!(super); -ident_from_token!(crate); -ident_from_token!(extern); - -impl From<Token![_]> for Ident { - fn from(token: Token![_]) -> Ident { - Ident::new("_", token.span) - } -} - -pub(crate) fn xid_ok(symbol: &str) -> bool { - let mut chars = symbol.chars(); - let first = chars.next().unwrap(); - if !(first == '_' || unicode_ident::is_xid_start(first)) { - return false; - } - for ch in chars { - if !unicode_ident::is_xid_continue(ch) { - return false; - } - } - true -} - -#[cfg(feature = "parsing")] -mod parsing { - use crate::buffer::Cursor; - use crate::error::Result; - use crate::parse::{Parse, ParseStream}; - use crate::token::Token; - use proc_macro2::Ident; - - fn accept_as_ident(ident: &Ident) -> bool { - match ident.to_string().as_str() { - "_" | - // Based on https://doc.rust-lang.org/1.65.0/reference/keywords.html - "abstract" | "as" | "async" | "await" | "become" | "box" | "break" | - "const" | "continue" | "crate" | "do" | "dyn" | "else" | "enum" | - "extern" | "false" | "final" | "fn" | "for" | "if" | "impl" | "in" | - "let" | "loop" | "macro" | "match" | "mod" | "move" | "mut" | - "override" | "priv" | "pub" | "ref" | "return" | "Self" | "self" | - "static" | "struct" | "super" | "trait" | "true" | "try" | "type" | - "typeof" | "unsafe" | "unsized" | "use" | "virtual" | "where" | - "while" | "yield" => false, - _ => true, - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Ident { - fn parse(input: ParseStream) -> Result<Self> { - input.step(|cursor| { - if let Some((ident, rest)) = cursor.ident() { - if accept_as_ident(&ident) { - Ok((ident, rest)) - } else { - Err(cursor.error(format_args!( - "expected identifier, found keyword `{}`", - ident, - ))) - } - } else { - Err(cursor.error("expected identifier")) - } - }) - } - } - - impl Token for Ident { - fn peek(cursor: Cursor) -> bool { - if let Some((ident, _rest)) = cursor.ident() { - accept_as_ident(&ident) - } else { - false - } - } - - fn display() -> &'static str { - "identifier" - } - } -} diff --git a/vendor/syn/src/item.rs b/vendor/syn/src/item.rs deleted file mode 100644 index 00beb0d3686992..00000000000000 --- a/vendor/syn/src/item.rs +++ /dev/null @@ -1,3490 +0,0 @@ -use crate::attr::Attribute; -use crate::data::{Fields, FieldsNamed, Variant}; -use crate::derive::{Data, DataEnum, DataStruct, DataUnion, DeriveInput}; -use crate::expr::Expr; -use crate::generics::{Generics, TypeParamBound}; -use crate::ident::Ident; -use crate::lifetime::Lifetime; -use crate::mac::Macro; -use crate::pat::{Pat, PatType}; -use crate::path::Path; -use crate::punctuated::Punctuated; -use crate::restriction::Visibility; -use crate::stmt::Block; -use crate::token; -use crate::ty::{Abi, ReturnType, Type}; -use proc_macro2::TokenStream; -#[cfg(feature = "parsing")] -use std::mem; - -ast_enum_of_structs! { - /// Things that can appear directly inside of a module or scope. - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - #[non_exhaustive] - pub enum Item { - /// A constant item: `const MAX: u16 = 65535`. - Const(ItemConst), - - /// An enum definition: `enum Foo<A, B> { A(A), B(B) }`. - Enum(ItemEnum), - - /// An `extern crate` item: `extern crate serde`. - ExternCrate(ItemExternCrate), - - /// A free-standing function: `fn process(n: usize) -> Result<()> { ... - /// }`. - Fn(ItemFn), - - /// A block of foreign items: `extern "C" { ... }`. - ForeignMod(ItemForeignMod), - - /// An impl block providing trait or associated items: `impl<A> Trait - /// for Data<A> { ... }`. - Impl(ItemImpl), - - /// A macro invocation, which includes `macro_rules!` definitions. - Macro(ItemMacro), - - /// A module or module declaration: `mod m` or `mod m { ... }`. - Mod(ItemMod), - - /// A static item: `static BIKE: Shed = Shed(42)`. - Static(ItemStatic), - - /// A struct definition: `struct Foo<A> { x: A }`. - Struct(ItemStruct), - - /// A trait definition: `pub trait Iterator { ... }`. - Trait(ItemTrait), - - /// A trait alias: `pub trait SharableIterator = Iterator + Sync`. - TraitAlias(ItemTraitAlias), - - /// A type alias: `type Result<T> = std::result::Result<T, MyError>`. - Type(ItemType), - - /// A union definition: `union Foo<A, B> { x: A, y: B }`. - Union(ItemUnion), - - /// A use declaration: `use std::collections::HashMap`. - Use(ItemUse), - - /// Tokens forming an item not interpreted by Syn. - Verbatim(TokenStream), - - // For testing exhaustiveness in downstream code, use the following idiom: - // - // match item { - // #![cfg_attr(test, deny(non_exhaustive_omitted_patterns))] - // - // Item::Const(item) => {...} - // Item::Enum(item) => {...} - // ... - // Item::Verbatim(item) => {...} - // - // _ => { /* some sane fallback */ } - // } - // - // This way we fail your tests but don't break your library when adding - // a variant. You will be notified by a test failure when a variant is - // added, so that you can add code to handle it, but your library will - // continue to compile and work for downstream users in the interim. - } -} - -ast_struct! { - /// A constant item: `const MAX: u16 = 65535`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ItemConst { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub const_token: Token![const], - pub ident: Ident, - pub generics: Generics, - pub colon_token: Token![:], - pub ty: Box<Type>, - pub eq_token: Token![=], - pub expr: Box<Expr>, - pub semi_token: Token![;], - } -} - -ast_struct! { - /// An enum definition: `enum Foo<A, B> { A(A), B(B) }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ItemEnum { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub enum_token: Token![enum], - pub ident: Ident, - pub generics: Generics, - pub brace_token: token::Brace, - pub variants: Punctuated<Variant, Token![,]>, - } -} - -ast_struct! { - /// An `extern crate` item: `extern crate serde`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ItemExternCrate { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub extern_token: Token![extern], - pub crate_token: Token![crate], - pub ident: Ident, - pub rename: Option<(Token![as], Ident)>, - pub semi_token: Token![;], - } -} - -ast_struct! { - /// A free-standing function: `fn process(n: usize) -> Result<()> { ... }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ItemFn { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub sig: Signature, - pub block: Box<Block>, - } -} - -ast_struct! { - /// A block of foreign items: `extern "C" { ... }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ItemForeignMod { - pub attrs: Vec<Attribute>, - pub unsafety: Option<Token![unsafe]>, - pub abi: Abi, - pub brace_token: token::Brace, - pub items: Vec<ForeignItem>, - } -} - -ast_struct! { - /// An impl block providing trait or associated items: `impl<A> Trait - /// for Data<A> { ... }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ItemImpl { - pub attrs: Vec<Attribute>, - pub defaultness: Option<Token![default]>, - pub unsafety: Option<Token![unsafe]>, - pub impl_token: Token![impl], - pub generics: Generics, - /// Trait this impl implements. - pub trait_: Option<(Option<Token![!]>, Path, Token![for])>, - /// The Self type of the impl. - pub self_ty: Box<Type>, - pub brace_token: token::Brace, - pub items: Vec<ImplItem>, - } -} - -ast_struct! { - /// A macro invocation, which includes `macro_rules!` definitions. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ItemMacro { - pub attrs: Vec<Attribute>, - /// The `example` in `macro_rules! example { ... }`. - pub ident: Option<Ident>, - pub mac: Macro, - pub semi_token: Option<Token![;]>, - } -} - -ast_struct! { - /// A module or module declaration: `mod m` or `mod m { ... }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ItemMod { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub unsafety: Option<Token![unsafe]>, - pub mod_token: Token![mod], - pub ident: Ident, - pub content: Option<(token::Brace, Vec<Item>)>, - pub semi: Option<Token![;]>, - } -} - -ast_struct! { - /// A static item: `static BIKE: Shed = Shed(42)`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ItemStatic { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub static_token: Token![static], - pub mutability: StaticMutability, - pub ident: Ident, - pub colon_token: Token![:], - pub ty: Box<Type>, - pub eq_token: Token![=], - pub expr: Box<Expr>, - pub semi_token: Token![;], - } -} - -ast_struct! { - /// A struct definition: `struct Foo<A> { x: A }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ItemStruct { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub struct_token: Token![struct], - pub ident: Ident, - pub generics: Generics, - pub fields: Fields, - pub semi_token: Option<Token![;]>, - } -} - -ast_struct! { - /// A trait definition: `pub trait Iterator { ... }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ItemTrait { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub unsafety: Option<Token![unsafe]>, - pub auto_token: Option<Token![auto]>, - pub restriction: Option<ImplRestriction>, - pub trait_token: Token![trait], - pub ident: Ident, - pub generics: Generics, - pub colon_token: Option<Token![:]>, - pub supertraits: Punctuated<TypeParamBound, Token![+]>, - pub brace_token: token::Brace, - pub items: Vec<TraitItem>, - } -} - -ast_struct! { - /// A trait alias: `pub trait SharableIterator = Iterator + Sync`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ItemTraitAlias { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub trait_token: Token![trait], - pub ident: Ident, - pub generics: Generics, - pub eq_token: Token![=], - pub bounds: Punctuated<TypeParamBound, Token![+]>, - pub semi_token: Token![;], - } -} - -ast_struct! { - /// A type alias: `type Result<T> = std::result::Result<T, MyError>`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ItemType { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub type_token: Token![type], - pub ident: Ident, - pub generics: Generics, - pub eq_token: Token![=], - pub ty: Box<Type>, - pub semi_token: Token![;], - } -} - -ast_struct! { - /// A union definition: `union Foo<A, B> { x: A, y: B }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ItemUnion { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub union_token: Token![union], - pub ident: Ident, - pub generics: Generics, - pub fields: FieldsNamed, - } -} - -ast_struct! { - /// A use declaration: `use std::collections::HashMap`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ItemUse { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub use_token: Token![use], - pub leading_colon: Option<Token![::]>, - pub tree: UseTree, - pub semi_token: Token![;], - } -} - -impl Item { - #[cfg(feature = "parsing")] - pub(crate) fn replace_attrs(&mut self, new: Vec<Attribute>) -> Vec<Attribute> { - match self { - Item::Const(ItemConst { attrs, .. }) - | Item::Enum(ItemEnum { attrs, .. }) - | Item::ExternCrate(ItemExternCrate { attrs, .. }) - | Item::Fn(ItemFn { attrs, .. }) - | Item::ForeignMod(ItemForeignMod { attrs, .. }) - | Item::Impl(ItemImpl { attrs, .. }) - | Item::Macro(ItemMacro { attrs, .. }) - | Item::Mod(ItemMod { attrs, .. }) - | Item::Static(ItemStatic { attrs, .. }) - | Item::Struct(ItemStruct { attrs, .. }) - | Item::Trait(ItemTrait { attrs, .. }) - | Item::TraitAlias(ItemTraitAlias { attrs, .. }) - | Item::Type(ItemType { attrs, .. }) - | Item::Union(ItemUnion { attrs, .. }) - | Item::Use(ItemUse { attrs, .. }) => mem::replace(attrs, new), - Item::Verbatim(_) => Vec::new(), - } - } -} - -impl From<DeriveInput> for Item { - fn from(input: DeriveInput) -> Item { - match input.data { - Data::Struct(data) => Item::Struct(ItemStruct { - attrs: input.attrs, - vis: input.vis, - struct_token: data.struct_token, - ident: input.ident, - generics: input.generics, - fields: data.fields, - semi_token: data.semi_token, - }), - Data::Enum(data) => Item::Enum(ItemEnum { - attrs: input.attrs, - vis: input.vis, - enum_token: data.enum_token, - ident: input.ident, - generics: input.generics, - brace_token: data.brace_token, - variants: data.variants, - }), - Data::Union(data) => Item::Union(ItemUnion { - attrs: input.attrs, - vis: input.vis, - union_token: data.union_token, - ident: input.ident, - generics: input.generics, - fields: data.fields, - }), - } - } -} - -impl From<ItemStruct> for DeriveInput { - fn from(input: ItemStruct) -> DeriveInput { - DeriveInput { - attrs: input.attrs, - vis: input.vis, - ident: input.ident, - generics: input.generics, - data: Data::Struct(DataStruct { - struct_token: input.struct_token, - fields: input.fields, - semi_token: input.semi_token, - }), - } - } -} - -impl From<ItemEnum> for DeriveInput { - fn from(input: ItemEnum) -> DeriveInput { - DeriveInput { - attrs: input.attrs, - vis: input.vis, - ident: input.ident, - generics: input.generics, - data: Data::Enum(DataEnum { - enum_token: input.enum_token, - brace_token: input.brace_token, - variants: input.variants, - }), - } - } -} - -impl From<ItemUnion> for DeriveInput { - fn from(input: ItemUnion) -> DeriveInput { - DeriveInput { - attrs: input.attrs, - vis: input.vis, - ident: input.ident, - generics: input.generics, - data: Data::Union(DataUnion { - union_token: input.union_token, - fields: input.fields, - }), - } - } -} - -ast_enum_of_structs! { - /// A suffix of an import tree in a `use` item: `Type as Renamed` or `*`. - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub enum UseTree { - /// A path prefix of imports in a `use` item: `std::...`. - Path(UsePath), - - /// An identifier imported by a `use` item: `HashMap`. - Name(UseName), - - /// An renamed identifier imported by a `use` item: `HashMap as Map`. - Rename(UseRename), - - /// A glob import in a `use` item: `*`. - Glob(UseGlob), - - /// A braced group of imports in a `use` item: `{A, B, C}`. - Group(UseGroup), - } -} - -ast_struct! { - /// A path prefix of imports in a `use` item: `std::...`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct UsePath { - pub ident: Ident, - pub colon2_token: Token![::], - pub tree: Box<UseTree>, - } -} - -ast_struct! { - /// An identifier imported by a `use` item: `HashMap`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct UseName { - pub ident: Ident, - } -} - -ast_struct! { - /// An renamed identifier imported by a `use` item: `HashMap as Map`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct UseRename { - pub ident: Ident, - pub as_token: Token![as], - pub rename: Ident, - } -} - -ast_struct! { - /// A glob import in a `use` item: `*`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct UseGlob { - pub star_token: Token![*], - } -} - -ast_struct! { - /// A braced group of imports in a `use` item: `{A, B, C}`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct UseGroup { - pub brace_token: token::Brace, - pub items: Punctuated<UseTree, Token![,]>, - } -} - -ast_enum_of_structs! { - /// An item within an `extern` block. - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - #[non_exhaustive] - pub enum ForeignItem { - /// A foreign function in an `extern` block. - Fn(ForeignItemFn), - - /// A foreign static item in an `extern` block: `static ext: u8`. - Static(ForeignItemStatic), - - /// A foreign type in an `extern` block: `type void`. - Type(ForeignItemType), - - /// A macro invocation within an extern block. - Macro(ForeignItemMacro), - - /// Tokens in an `extern` block not interpreted by Syn. - Verbatim(TokenStream), - - // For testing exhaustiveness in downstream code, use the following idiom: - // - // match item { - // #![cfg_attr(test, deny(non_exhaustive_omitted_patterns))] - // - // ForeignItem::Fn(item) => {...} - // ForeignItem::Static(item) => {...} - // ... - // ForeignItem::Verbatim(item) => {...} - // - // _ => { /* some sane fallback */ } - // } - // - // This way we fail your tests but don't break your library when adding - // a variant. You will be notified by a test failure when a variant is - // added, so that you can add code to handle it, but your library will - // continue to compile and work for downstream users in the interim. - } -} - -ast_struct! { - /// A foreign function in an `extern` block. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ForeignItemFn { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub sig: Signature, - pub semi_token: Token![;], - } -} - -ast_struct! { - /// A foreign static item in an `extern` block: `static ext: u8`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ForeignItemStatic { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub static_token: Token![static], - pub mutability: StaticMutability, - pub ident: Ident, - pub colon_token: Token![:], - pub ty: Box<Type>, - pub semi_token: Token![;], - } -} - -ast_struct! { - /// A foreign type in an `extern` block: `type void`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ForeignItemType { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub type_token: Token![type], - pub ident: Ident, - pub generics: Generics, - pub semi_token: Token![;], - } -} - -ast_struct! { - /// A macro invocation within an extern block. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ForeignItemMacro { - pub attrs: Vec<Attribute>, - pub mac: Macro, - pub semi_token: Option<Token![;]>, - } -} - -ast_enum_of_structs! { - /// An item declaration within the definition of a trait. - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - #[non_exhaustive] - pub enum TraitItem { - /// An associated constant within the definition of a trait. - Const(TraitItemConst), - - /// An associated function within the definition of a trait. - Fn(TraitItemFn), - - /// An associated type within the definition of a trait. - Type(TraitItemType), - - /// A macro invocation within the definition of a trait. - Macro(TraitItemMacro), - - /// Tokens within the definition of a trait not interpreted by Syn. - Verbatim(TokenStream), - - // For testing exhaustiveness in downstream code, use the following idiom: - // - // match item { - // #![cfg_attr(test, deny(non_exhaustive_omitted_patterns))] - // - // TraitItem::Const(item) => {...} - // TraitItem::Fn(item) => {...} - // ... - // TraitItem::Verbatim(item) => {...} - // - // _ => { /* some sane fallback */ } - // } - // - // This way we fail your tests but don't break your library when adding - // a variant. You will be notified by a test failure when a variant is - // added, so that you can add code to handle it, but your library will - // continue to compile and work for downstream users in the interim. - } -} - -ast_struct! { - /// An associated constant within the definition of a trait. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct TraitItemConst { - pub attrs: Vec<Attribute>, - pub const_token: Token![const], - pub ident: Ident, - pub generics: Generics, - pub colon_token: Token![:], - pub ty: Type, - pub default: Option<(Token![=], Expr)>, - pub semi_token: Token![;], - } -} - -ast_struct! { - /// An associated function within the definition of a trait. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct TraitItemFn { - pub attrs: Vec<Attribute>, - pub sig: Signature, - pub default: Option<Block>, - pub semi_token: Option<Token![;]>, - } -} - -ast_struct! { - /// An associated type within the definition of a trait. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct TraitItemType { - pub attrs: Vec<Attribute>, - pub type_token: Token![type], - pub ident: Ident, - pub generics: Generics, - pub colon_token: Option<Token![:]>, - pub bounds: Punctuated<TypeParamBound, Token![+]>, - pub default: Option<(Token![=], Type)>, - pub semi_token: Token![;], - } -} - -ast_struct! { - /// A macro invocation within the definition of a trait. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct TraitItemMacro { - pub attrs: Vec<Attribute>, - pub mac: Macro, - pub semi_token: Option<Token![;]>, - } -} - -ast_enum_of_structs! { - /// An item within an impl block. - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - #[non_exhaustive] - pub enum ImplItem { - /// An associated constant within an impl block. - Const(ImplItemConst), - - /// An associated function within an impl block. - Fn(ImplItemFn), - - /// An associated type within an impl block. - Type(ImplItemType), - - /// A macro invocation within an impl block. - Macro(ImplItemMacro), - - /// Tokens within an impl block not interpreted by Syn. - Verbatim(TokenStream), - - // For testing exhaustiveness in downstream code, use the following idiom: - // - // match item { - // #![cfg_attr(test, deny(non_exhaustive_omitted_patterns))] - // - // ImplItem::Const(item) => {...} - // ImplItem::Fn(item) => {...} - // ... - // ImplItem::Verbatim(item) => {...} - // - // _ => { /* some sane fallback */ } - // } - // - // This way we fail your tests but don't break your library when adding - // a variant. You will be notified by a test failure when a variant is - // added, so that you can add code to handle it, but your library will - // continue to compile and work for downstream users in the interim. - } -} - -ast_struct! { - /// An associated constant within an impl block. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ImplItemConst { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub defaultness: Option<Token![default]>, - pub const_token: Token![const], - pub ident: Ident, - pub generics: Generics, - pub colon_token: Token![:], - pub ty: Type, - pub eq_token: Token![=], - pub expr: Expr, - pub semi_token: Token![;], - } -} - -ast_struct! { - /// An associated function within an impl block. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ImplItemFn { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub defaultness: Option<Token![default]>, - pub sig: Signature, - pub block: Block, - } -} - -ast_struct! { - /// An associated type within an impl block. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ImplItemType { - pub attrs: Vec<Attribute>, - pub vis: Visibility, - pub defaultness: Option<Token![default]>, - pub type_token: Token![type], - pub ident: Ident, - pub generics: Generics, - pub eq_token: Token![=], - pub ty: Type, - pub semi_token: Token![;], - } -} - -ast_struct! { - /// A macro invocation within an impl block. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct ImplItemMacro { - pub attrs: Vec<Attribute>, - pub mac: Macro, - pub semi_token: Option<Token![;]>, - } -} - -ast_struct! { - /// A function signature in a trait or implementation: `unsafe fn - /// initialize(&self)`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct Signature { - pub constness: Option<Token![const]>, - pub asyncness: Option<Token![async]>, - pub unsafety: Option<Token![unsafe]>, - pub abi: Option<Abi>, - pub fn_token: Token![fn], - pub ident: Ident, - pub generics: Generics, - pub paren_token: token::Paren, - pub inputs: Punctuated<FnArg, Token![,]>, - pub variadic: Option<Variadic>, - pub output: ReturnType, - } -} - -impl Signature { - /// A method's `self` receiver, such as `&self` or `self: Box<Self>`. - pub fn receiver(&self) -> Option<&Receiver> { - let arg = self.inputs.first()?; - match arg { - FnArg::Receiver(receiver) => Some(receiver), - FnArg::Typed(_) => None, - } - } -} - -ast_enum_of_structs! { - /// An argument in a function signature: the `n: usize` in `fn f(n: usize)`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub enum FnArg { - /// The `self` argument of an associated method. - Receiver(Receiver), - - /// A function argument accepted by pattern and type. - Typed(PatType), - } -} - -ast_struct! { - /// The `self` argument of an associated method. - /// - /// If `colon_token` is present, the receiver is written with an explicit - /// type such as `self: Box<Self>`. If `colon_token` is absent, the receiver - /// is written in shorthand such as `self` or `&self` or `&mut self`. In the - /// shorthand case, the type in `ty` is reconstructed as one of `Self`, - /// `&Self`, or `&mut Self`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct Receiver { - pub attrs: Vec<Attribute>, - pub reference: Option<(Token![&], Option<Lifetime>)>, - pub mutability: Option<Token![mut]>, - pub self_token: Token![self], - pub colon_token: Option<Token![:]>, - pub ty: Box<Type>, - } -} - -impl Receiver { - pub fn lifetime(&self) -> Option<&Lifetime> { - self.reference.as_ref()?.1.as_ref() - } -} - -ast_struct! { - /// The variadic argument of a foreign function. - /// - /// ```rust - /// # struct c_char; - /// # struct c_int; - /// # - /// extern "C" { - /// fn printf(format: *const c_char, ...) -> c_int; - /// // ^^^ - /// } - /// ``` - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct Variadic { - pub attrs: Vec<Attribute>, - pub pat: Option<(Box<Pat>, Token![:])>, - pub dots: Token![...], - pub comma: Option<Token![,]>, - } -} - -ast_enum! { - /// The mutability of an `Item::Static` or `ForeignItem::Static`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - #[non_exhaustive] - pub enum StaticMutability { - Mut(Token![mut]), - None, - } -} - -ast_enum! { - /// Unused, but reserved for RFC 3323 restrictions. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - #[non_exhaustive] - pub enum ImplRestriction {} - - - // TODO: https://rust-lang.github.io/rfcs/3323-restrictions.html - // - // pub struct ImplRestriction { - // pub impl_token: Token![impl], - // pub paren_token: token::Paren, - // pub in_token: Option<Token![in]>, - // pub path: Box<Path>, - // } -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::attr::{self, Attribute}; - use crate::derive; - use crate::error::{Error, Result}; - use crate::expr::Expr; - use crate::ext::IdentExt as _; - use crate::generics::{self, Generics, TypeParamBound}; - use crate::ident::Ident; - use crate::item::{ - FnArg, ForeignItem, ForeignItemFn, ForeignItemMacro, ForeignItemStatic, ForeignItemType, - ImplItem, ImplItemConst, ImplItemFn, ImplItemMacro, ImplItemType, Item, ItemConst, - ItemEnum, ItemExternCrate, ItemFn, ItemForeignMod, ItemImpl, ItemMacro, ItemMod, - ItemStatic, ItemStruct, ItemTrait, ItemTraitAlias, ItemType, ItemUnion, ItemUse, Receiver, - Signature, StaticMutability, TraitItem, TraitItemConst, TraitItemFn, TraitItemMacro, - TraitItemType, UseGlob, UseGroup, UseName, UsePath, UseRename, UseTree, Variadic, - }; - use crate::lifetime::Lifetime; - use crate::lit::LitStr; - use crate::mac::{self, Macro}; - use crate::parse::discouraged::Speculative as _; - use crate::parse::{Parse, ParseBuffer, ParseStream}; - use crate::pat::{Pat, PatType, PatWild}; - use crate::path::Path; - use crate::punctuated::Punctuated; - use crate::restriction::Visibility; - use crate::stmt::Block; - use crate::token; - use crate::ty::{Abi, ReturnType, Type, TypePath, TypeReference}; - use crate::verbatim; - use proc_macro2::TokenStream; - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Item { - fn parse(input: ParseStream) -> Result<Self> { - let begin = input.fork(); - let attrs = input.call(Attribute::parse_outer)?; - parse_rest_of_item(begin, attrs, input) - } - } - - pub(crate) fn parse_rest_of_item( - begin: ParseBuffer, - mut attrs: Vec<Attribute>, - input: ParseStream, - ) -> Result<Item> { - let ahead = input.fork(); - let vis: Visibility = ahead.parse()?; - - let lookahead = ahead.lookahead1(); - let allow_safe = false; - let mut item = if lookahead.peek(Token![fn]) || peek_signature(&ahead, allow_safe) { - let vis: Visibility = input.parse()?; - let sig: Signature = input.parse()?; - if input.peek(Token![;]) { - input.parse::<Token![;]>()?; - Ok(Item::Verbatim(verbatim::between(&begin, input))) - } else { - parse_rest_of_fn(input, Vec::new(), vis, sig).map(Item::Fn) - } - } else if lookahead.peek(Token![extern]) { - ahead.parse::<Token![extern]>()?; - let lookahead = ahead.lookahead1(); - if lookahead.peek(Token![crate]) { - input.parse().map(Item::ExternCrate) - } else if lookahead.peek(token::Brace) { - input.parse().map(Item::ForeignMod) - } else if lookahead.peek(LitStr) { - ahead.parse::<LitStr>()?; - let lookahead = ahead.lookahead1(); - if lookahead.peek(token::Brace) { - input.parse().map(Item::ForeignMod) - } else { - Err(lookahead.error()) - } - } else { - Err(lookahead.error()) - } - } else if lookahead.peek(Token![use]) { - let allow_crate_root_in_path = true; - match parse_item_use(input, allow_crate_root_in_path)? { - Some(item_use) => Ok(Item::Use(item_use)), - None => Ok(Item::Verbatim(verbatim::between(&begin, input))), - } - } else if lookahead.peek(Token![static]) { - let vis = input.parse()?; - let static_token = input.parse()?; - let mutability = input.parse()?; - let ident = input.parse()?; - if input.peek(Token![=]) { - input.parse::<Token![=]>()?; - input.parse::<Expr>()?; - input.parse::<Token![;]>()?; - Ok(Item::Verbatim(verbatim::between(&begin, input))) - } else { - let colon_token = input.parse()?; - let ty = input.parse()?; - if input.peek(Token![;]) { - input.parse::<Token![;]>()?; - Ok(Item::Verbatim(verbatim::between(&begin, input))) - } else { - Ok(Item::Static(ItemStatic { - attrs: Vec::new(), - vis, - static_token, - mutability, - ident, - colon_token, - ty, - eq_token: input.parse()?, - expr: input.parse()?, - semi_token: input.parse()?, - })) - } - } - } else if lookahead.peek(Token![const]) { - let vis = input.parse()?; - let const_token: Token![const] = input.parse()?; - let lookahead = input.lookahead1(); - let ident = if lookahead.peek(Ident) || lookahead.peek(Token![_]) { - input.call(Ident::parse_any)? - } else { - return Err(lookahead.error()); - }; - let mut generics: Generics = input.parse()?; - let colon_token = input.parse()?; - let ty = input.parse()?; - let value = if let Some(eq_token) = input.parse::<Option<Token![=]>>()? { - let expr: Expr = input.parse()?; - Some((eq_token, expr)) - } else { - None - }; - generics.where_clause = input.parse()?; - let semi_token: Token![;] = input.parse()?; - match value { - Some((eq_token, expr)) - if generics.lt_token.is_none() && generics.where_clause.is_none() => - { - Ok(Item::Const(ItemConst { - attrs: Vec::new(), - vis, - const_token, - ident, - generics, - colon_token, - ty, - eq_token, - expr: Box::new(expr), - semi_token, - })) - } - _ => Ok(Item::Verbatim(verbatim::between(&begin, input))), - } - } else if lookahead.peek(Token![unsafe]) { - ahead.parse::<Token![unsafe]>()?; - let lookahead = ahead.lookahead1(); - if lookahead.peek(Token![trait]) - || lookahead.peek(Token![auto]) && ahead.peek2(Token![trait]) - { - input.parse().map(Item::Trait) - } else if lookahead.peek(Token![impl]) { - let allow_verbatim_impl = true; - if let Some(item) = parse_impl(input, allow_verbatim_impl)? { - Ok(Item::Impl(item)) - } else { - Ok(Item::Verbatim(verbatim::between(&begin, input))) - } - } else if lookahead.peek(Token![extern]) { - input.parse().map(Item::ForeignMod) - } else if lookahead.peek(Token![mod]) { - input.parse().map(Item::Mod) - } else { - Err(lookahead.error()) - } - } else if lookahead.peek(Token![mod]) { - input.parse().map(Item::Mod) - } else if lookahead.peek(Token![type]) { - parse_item_type(begin, input) - } else if lookahead.peek(Token![struct]) { - input.parse().map(Item::Struct) - } else if lookahead.peek(Token![enum]) { - input.parse().map(Item::Enum) - } else if lookahead.peek(Token![union]) && ahead.peek2(Ident) { - input.parse().map(Item::Union) - } else if lookahead.peek(Token![trait]) { - input.call(parse_trait_or_trait_alias) - } else if lookahead.peek(Token![auto]) && ahead.peek2(Token![trait]) { - input.parse().map(Item::Trait) - } else if lookahead.peek(Token![impl]) - || lookahead.peek(Token![default]) && !ahead.peek2(Token![!]) - { - let allow_verbatim_impl = true; - if let Some(item) = parse_impl(input, allow_verbatim_impl)? { - Ok(Item::Impl(item)) - } else { - Ok(Item::Verbatim(verbatim::between(&begin, input))) - } - } else if lookahead.peek(Token![macro]) { - input.advance_to(&ahead); - parse_macro2(begin, vis, input) - } else if vis.is_inherited() - && (lookahead.peek(Ident) - || lookahead.peek(Token![self]) - || lookahead.peek(Token![super]) - || lookahead.peek(Token![crate]) - || lookahead.peek(Token![::])) - { - input.parse().map(Item::Macro) - } else { - Err(lookahead.error()) - }?; - - attrs.extend(item.replace_attrs(Vec::new())); - item.replace_attrs(attrs); - Ok(item) - } - - struct FlexibleItemType { - vis: Visibility, - defaultness: Option<Token![default]>, - type_token: Token![type], - ident: Ident, - generics: Generics, - colon_token: Option<Token![:]>, - bounds: Punctuated<TypeParamBound, Token![+]>, - ty: Option<(Token![=], Type)>, - semi_token: Token![;], - } - - enum TypeDefaultness { - Optional, - Disallowed, - } - - enum WhereClauseLocation { - // type Ty<T> where T: 'static = T; - BeforeEq, - // type Ty<T> = T where T: 'static; - AfterEq, - // TODO: goes away once the migration period on rust-lang/rust#89122 is over - Both, - } - - impl FlexibleItemType { - fn parse( - input: ParseStream, - allow_defaultness: TypeDefaultness, - where_clause_location: WhereClauseLocation, - ) -> Result<Self> { - let vis: Visibility = input.parse()?; - let defaultness: Option<Token![default]> = match allow_defaultness { - TypeDefaultness::Optional => input.parse()?, - TypeDefaultness::Disallowed => None, - }; - let type_token: Token![type] = input.parse()?; - let ident: Ident = input.parse()?; - let mut generics: Generics = input.parse()?; - let (colon_token, bounds) = Self::parse_optional_bounds(input)?; - - match where_clause_location { - WhereClauseLocation::BeforeEq | WhereClauseLocation::Both => { - generics.where_clause = input.parse()?; - } - WhereClauseLocation::AfterEq => {} - } - - let ty = Self::parse_optional_definition(input)?; - - match where_clause_location { - WhereClauseLocation::AfterEq | WhereClauseLocation::Both - if generics.where_clause.is_none() => - { - generics.where_clause = input.parse()?; - } - _ => {} - } - - let semi_token: Token![;] = input.parse()?; - - Ok(FlexibleItemType { - vis, - defaultness, - type_token, - ident, - generics, - colon_token, - bounds, - ty, - semi_token, - }) - } - - fn parse_optional_bounds( - input: ParseStream, - ) -> Result<(Option<Token![:]>, Punctuated<TypeParamBound, Token![+]>)> { - let colon_token: Option<Token![:]> = input.parse()?; - - let mut bounds = Punctuated::new(); - if colon_token.is_some() { - loop { - if input.peek(Token![where]) || input.peek(Token![=]) || input.peek(Token![;]) { - break; - } - bounds.push_value({ - let allow_precise_capture = false; - let allow_const = true; - TypeParamBound::parse_single(input, allow_precise_capture, allow_const)? - }); - if input.peek(Token![where]) || input.peek(Token![=]) || input.peek(Token![;]) { - break; - } - bounds.push_punct(input.parse::<Token![+]>()?); - } - } - - Ok((colon_token, bounds)) - } - - fn parse_optional_definition(input: ParseStream) -> Result<Option<(Token![=], Type)>> { - let eq_token: Option<Token![=]> = input.parse()?; - if let Some(eq_token) = eq_token { - let definition: Type = input.parse()?; - Ok(Some((eq_token, definition))) - } else { - Ok(None) - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ItemMacro { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let path = input.call(Path::parse_mod_style)?; - let bang_token: Token![!] = input.parse()?; - let ident: Option<Ident> = if input.peek(Token![try]) { - input.call(Ident::parse_any).map(Some) - } else { - input.parse() - }?; - let (delimiter, tokens) = input.call(mac::parse_delimiter)?; - let semi_token: Option<Token![;]> = if !delimiter.is_brace() { - Some(input.parse()?) - } else { - None - }; - Ok(ItemMacro { - attrs, - ident, - mac: Macro { - path, - bang_token, - delimiter, - tokens, - }, - semi_token, - }) - } - } - - fn parse_macro2(begin: ParseBuffer, _vis: Visibility, input: ParseStream) -> Result<Item> { - input.parse::<Token![macro]>()?; - input.parse::<Ident>()?; - - let mut lookahead = input.lookahead1(); - if lookahead.peek(token::Paren) { - let paren_content; - parenthesized!(paren_content in input); - paren_content.parse::<TokenStream>()?; - lookahead = input.lookahead1(); - } - - if lookahead.peek(token::Brace) { - let brace_content; - braced!(brace_content in input); - brace_content.parse::<TokenStream>()?; - } else { - return Err(lookahead.error()); - } - - Ok(Item::Verbatim(verbatim::between(&begin, input))) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ItemExternCrate { - fn parse(input: ParseStream) -> Result<Self> { - Ok(ItemExternCrate { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - extern_token: input.parse()?, - crate_token: input.parse()?, - ident: { - if input.peek(Token![self]) { - input.call(Ident::parse_any)? - } else { - input.parse()? - } - }, - rename: { - if input.peek(Token![as]) { - let as_token: Token![as] = input.parse()?; - let rename: Ident = if input.peek(Token![_]) { - Ident::from(input.parse::<Token![_]>()?) - } else { - input.parse()? - }; - Some((as_token, rename)) - } else { - None - } - }, - semi_token: input.parse()?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ItemUse { - fn parse(input: ParseStream) -> Result<Self> { - let allow_crate_root_in_path = false; - parse_item_use(input, allow_crate_root_in_path).map(Option::unwrap) - } - } - - fn parse_item_use( - input: ParseStream, - allow_crate_root_in_path: bool, - ) -> Result<Option<ItemUse>> { - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let use_token: Token![use] = input.parse()?; - let leading_colon: Option<Token![::]> = input.parse()?; - let tree = parse_use_tree(input, allow_crate_root_in_path && leading_colon.is_none())?; - let semi_token: Token![;] = input.parse()?; - - let tree = match tree { - Some(tree) => tree, - None => return Ok(None), - }; - - Ok(Some(ItemUse { - attrs, - vis, - use_token, - leading_colon, - tree, - semi_token, - })) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for UseTree { - fn parse(input: ParseStream) -> Result<UseTree> { - let allow_crate_root_in_path = false; - parse_use_tree(input, allow_crate_root_in_path).map(Option::unwrap) - } - } - - fn parse_use_tree( - input: ParseStream, - allow_crate_root_in_path: bool, - ) -> Result<Option<UseTree>> { - let lookahead = input.lookahead1(); - if lookahead.peek(Ident) - || lookahead.peek(Token![self]) - || lookahead.peek(Token![super]) - || lookahead.peek(Token![crate]) - || lookahead.peek(Token![try]) - { - let ident = input.call(Ident::parse_any)?; - if input.peek(Token![::]) { - Ok(Some(UseTree::Path(UsePath { - ident, - colon2_token: input.parse()?, - tree: Box::new(input.parse()?), - }))) - } else if input.peek(Token![as]) { - Ok(Some(UseTree::Rename(UseRename { - ident, - as_token: input.parse()?, - rename: { - if input.peek(Ident) { - input.parse()? - } else if input.peek(Token![_]) { - Ident::from(input.parse::<Token![_]>()?) - } else { - return Err(input.error("expected identifier or underscore")); - } - }, - }))) - } else { - Ok(Some(UseTree::Name(UseName { ident }))) - } - } else if lookahead.peek(Token![*]) { - Ok(Some(UseTree::Glob(UseGlob { - star_token: input.parse()?, - }))) - } else if lookahead.peek(token::Brace) { - let content; - let brace_token = braced!(content in input); - let mut items = Punctuated::new(); - let mut has_any_crate_root_in_path = false; - loop { - if content.is_empty() { - break; - } - let this_tree_starts_with_crate_root = - allow_crate_root_in_path && content.parse::<Option<Token![::]>>()?.is_some(); - has_any_crate_root_in_path |= this_tree_starts_with_crate_root; - match parse_use_tree( - &content, - allow_crate_root_in_path && !this_tree_starts_with_crate_root, - )? { - Some(tree) if !has_any_crate_root_in_path => items.push_value(tree), - _ => has_any_crate_root_in_path = true, - } - if content.is_empty() { - break; - } - let comma: Token![,] = content.parse()?; - if !has_any_crate_root_in_path { - items.push_punct(comma); - } - } - if has_any_crate_root_in_path { - Ok(None) - } else { - Ok(Some(UseTree::Group(UseGroup { brace_token, items }))) - } - } else { - Err(lookahead.error()) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ItemStatic { - fn parse(input: ParseStream) -> Result<Self> { - Ok(ItemStatic { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - static_token: input.parse()?, - mutability: input.parse()?, - ident: input.parse()?, - colon_token: input.parse()?, - ty: input.parse()?, - eq_token: input.parse()?, - expr: input.parse()?, - semi_token: input.parse()?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ItemConst { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let const_token: Token![const] = input.parse()?; - - let lookahead = input.lookahead1(); - let ident = if lookahead.peek(Ident) || lookahead.peek(Token![_]) { - input.call(Ident::parse_any)? - } else { - return Err(lookahead.error()); - }; - - let colon_token: Token![:] = input.parse()?; - let ty: Type = input.parse()?; - let eq_token: Token![=] = input.parse()?; - let expr: Expr = input.parse()?; - let semi_token: Token![;] = input.parse()?; - - Ok(ItemConst { - attrs, - vis, - const_token, - ident, - generics: Generics::default(), - colon_token, - ty: Box::new(ty), - eq_token, - expr: Box::new(expr), - semi_token, - }) - } - } - - fn peek_signature(input: ParseStream, allow_safe: bool) -> bool { - let fork = input.fork(); - fork.parse::<Option<Token![const]>>().is_ok() - && fork.parse::<Option<Token![async]>>().is_ok() - && ((allow_safe - && token::parsing::peek_keyword(fork.cursor(), "safe") - && token::parsing::keyword(&fork, "safe").is_ok()) - || fork.parse::<Option<Token![unsafe]>>().is_ok()) - && fork.parse::<Option<Abi>>().is_ok() - && fork.peek(Token![fn]) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Signature { - fn parse(input: ParseStream) -> Result<Self> { - let allow_safe = false; - parse_signature(input, allow_safe).map(Option::unwrap) - } - } - - fn parse_signature(input: ParseStream, allow_safe: bool) -> Result<Option<Signature>> { - let constness: Option<Token![const]> = input.parse()?; - let asyncness: Option<Token![async]> = input.parse()?; - let unsafety: Option<Token![unsafe]> = input.parse()?; - let safe = allow_safe - && unsafety.is_none() - && token::parsing::peek_keyword(input.cursor(), "safe"); - if safe { - token::parsing::keyword(input, "safe")?; - } - let abi: Option<Abi> = input.parse()?; - let fn_token: Token![fn] = input.parse()?; - let ident: Ident = input.parse()?; - let mut generics: Generics = input.parse()?; - - let content; - let paren_token = parenthesized!(content in input); - let (inputs, variadic) = parse_fn_args(&content)?; - - let output: ReturnType = input.parse()?; - generics.where_clause = input.parse()?; - - Ok(if safe { - None - } else { - Some(Signature { - constness, - asyncness, - unsafety, - abi, - fn_token, - ident, - generics, - paren_token, - inputs, - variadic, - output, - }) - }) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ItemFn { - fn parse(input: ParseStream) -> Result<Self> { - let outer_attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let sig: Signature = input.parse()?; - parse_rest_of_fn(input, outer_attrs, vis, sig) - } - } - - fn parse_rest_of_fn( - input: ParseStream, - mut attrs: Vec<Attribute>, - vis: Visibility, - sig: Signature, - ) -> Result<ItemFn> { - let content; - let brace_token = braced!(content in input); - attr::parsing::parse_inner(&content, &mut attrs)?; - let stmts = content.call(Block::parse_within)?; - - Ok(ItemFn { - attrs, - vis, - sig, - block: Box::new(Block { brace_token, stmts }), - }) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for FnArg { - fn parse(input: ParseStream) -> Result<Self> { - let allow_variadic = false; - let attrs = input.call(Attribute::parse_outer)?; - match parse_fn_arg_or_variadic(input, attrs, allow_variadic)? { - FnArgOrVariadic::FnArg(arg) => Ok(arg), - FnArgOrVariadic::Variadic(_) => unreachable!(), - } - } - } - - enum FnArgOrVariadic { - FnArg(FnArg), - Variadic(Variadic), - } - - fn parse_fn_arg_or_variadic( - input: ParseStream, - attrs: Vec<Attribute>, - allow_variadic: bool, - ) -> Result<FnArgOrVariadic> { - let ahead = input.fork(); - if let Ok(mut receiver) = ahead.parse::<Receiver>() { - input.advance_to(&ahead); - receiver.attrs = attrs; - return Ok(FnArgOrVariadic::FnArg(FnArg::Receiver(receiver))); - } - - // Hack to parse pre-2018 syntax in - // test/ui/rfc-2565-param-attrs/param-attrs-pretty.rs - // because the rest of the test case is valuable. - if input.peek(Ident) && input.peek2(Token![<]) { - let span = input.span(); - return Ok(FnArgOrVariadic::FnArg(FnArg::Typed(PatType { - attrs, - pat: Box::new(Pat::Wild(PatWild { - attrs: Vec::new(), - underscore_token: Token![_](span), - })), - colon_token: Token![:](span), - ty: input.parse()?, - }))); - } - - let pat = Box::new(Pat::parse_single(input)?); - let colon_token: Token![:] = input.parse()?; - - if allow_variadic { - if let Some(dots) = input.parse::<Option<Token![...]>>()? { - return Ok(FnArgOrVariadic::Variadic(Variadic { - attrs, - pat: Some((pat, colon_token)), - dots, - comma: None, - })); - } - } - - Ok(FnArgOrVariadic::FnArg(FnArg::Typed(PatType { - attrs, - pat, - colon_token, - ty: input.parse()?, - }))) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Receiver { - fn parse(input: ParseStream) -> Result<Self> { - let reference = if input.peek(Token![&]) { - let ampersand: Token![&] = input.parse()?; - let lifetime: Option<Lifetime> = input.parse()?; - Some((ampersand, lifetime)) - } else { - None - }; - let mutability: Option<Token![mut]> = input.parse()?; - let self_token: Token![self] = input.parse()?; - let colon_token: Option<Token![:]> = if reference.is_some() { - None - } else { - input.parse()? - }; - let ty: Type = if colon_token.is_some() { - input.parse()? - } else { - let mut ty = Type::Path(TypePath { - qself: None, - path: Path::from(Ident::new("Self", self_token.span)), - }); - if let Some((ampersand, lifetime)) = reference.as_ref() { - ty = Type::Reference(TypeReference { - and_token: Token![&](ampersand.span), - lifetime: lifetime.clone(), - mutability: mutability.as_ref().map(|m| Token![mut](m.span)), - elem: Box::new(ty), - }); - } - ty - }; - Ok(Receiver { - attrs: Vec::new(), - reference, - mutability, - self_token, - colon_token, - ty: Box::new(ty), - }) - } - } - - fn parse_fn_args( - input: ParseStream, - ) -> Result<(Punctuated<FnArg, Token![,]>, Option<Variadic>)> { - let mut args = Punctuated::new(); - let mut variadic = None; - let mut has_receiver = false; - - while !input.is_empty() { - let attrs = input.call(Attribute::parse_outer)?; - - if let Some(dots) = input.parse::<Option<Token![...]>>()? { - variadic = Some(Variadic { - attrs, - pat: None, - dots, - comma: if input.is_empty() { - None - } else { - Some(input.parse()?) - }, - }); - break; - } - - let allow_variadic = true; - let arg = match parse_fn_arg_or_variadic(input, attrs, allow_variadic)? { - FnArgOrVariadic::FnArg(arg) => arg, - FnArgOrVariadic::Variadic(arg) => { - variadic = Some(Variadic { - comma: if input.is_empty() { - None - } else { - Some(input.parse()?) - }, - ..arg - }); - break; - } - }; - - match &arg { - FnArg::Receiver(receiver) if has_receiver => { - return Err(Error::new( - receiver.self_token.span, - "unexpected second method receiver", - )); - } - FnArg::Receiver(receiver) if !args.is_empty() => { - return Err(Error::new( - receiver.self_token.span, - "unexpected method receiver", - )); - } - FnArg::Receiver(_) => has_receiver = true, - FnArg::Typed(_) => {} - } - args.push_value(arg); - - if input.is_empty() { - break; - } - - let comma: Token![,] = input.parse()?; - args.push_punct(comma); - } - - Ok((args, variadic)) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ItemMod { - fn parse(input: ParseStream) -> Result<Self> { - let mut attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let unsafety: Option<Token![unsafe]> = input.parse()?; - let mod_token: Token![mod] = input.parse()?; - let ident: Ident = if input.peek(Token![try]) { - input.call(Ident::parse_any) - } else { - input.parse() - }?; - - let lookahead = input.lookahead1(); - if lookahead.peek(Token![;]) { - Ok(ItemMod { - attrs, - vis, - unsafety, - mod_token, - ident, - content: None, - semi: Some(input.parse()?), - }) - } else if lookahead.peek(token::Brace) { - let content; - let brace_token = braced!(content in input); - attr::parsing::parse_inner(&content, &mut attrs)?; - - let mut items = Vec::new(); - while !content.is_empty() { - items.push(content.parse()?); - } - - Ok(ItemMod { - attrs, - vis, - unsafety, - mod_token, - ident, - content: Some((brace_token, items)), - semi: None, - }) - } else { - Err(lookahead.error()) - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ItemForeignMod { - fn parse(input: ParseStream) -> Result<Self> { - let mut attrs = input.call(Attribute::parse_outer)?; - let unsafety: Option<Token![unsafe]> = input.parse()?; - let abi: Abi = input.parse()?; - - let content; - let brace_token = braced!(content in input); - attr::parsing::parse_inner(&content, &mut attrs)?; - let mut items = Vec::new(); - while !content.is_empty() { - items.push(content.parse()?); - } - - Ok(ItemForeignMod { - attrs, - unsafety, - abi, - brace_token, - items, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ForeignItem { - fn parse(input: ParseStream) -> Result<Self> { - let begin = input.fork(); - let mut attrs = input.call(Attribute::parse_outer)?; - let ahead = input.fork(); - let vis: Visibility = ahead.parse()?; - - let lookahead = ahead.lookahead1(); - let allow_safe = true; - let mut item = if lookahead.peek(Token![fn]) || peek_signature(&ahead, allow_safe) { - let vis: Visibility = input.parse()?; - let sig = parse_signature(input, allow_safe)?; - let has_safe = sig.is_none(); - let has_body = input.peek(token::Brace); - let semi_token: Option<Token![;]> = if has_body { - let content; - braced!(content in input); - content.call(Attribute::parse_inner)?; - content.call(Block::parse_within)?; - None - } else { - Some(input.parse()?) - }; - if has_safe || has_body { - Ok(ForeignItem::Verbatim(verbatim::between(&begin, input))) - } else { - Ok(ForeignItem::Fn(ForeignItemFn { - attrs: Vec::new(), - vis, - sig: sig.unwrap(), - semi_token: semi_token.unwrap(), - })) - } - } else if lookahead.peek(Token![static]) - || ((ahead.peek(Token![unsafe]) - || token::parsing::peek_keyword(ahead.cursor(), "safe")) - && ahead.peek2(Token![static])) - { - let vis = input.parse()?; - let unsafety: Option<Token![unsafe]> = input.parse()?; - let safe = - unsafety.is_none() && token::parsing::peek_keyword(input.cursor(), "safe"); - if safe { - token::parsing::keyword(input, "safe")?; - } - let static_token = input.parse()?; - let mutability = input.parse()?; - let ident = input.parse()?; - let colon_token = input.parse()?; - let ty = input.parse()?; - let has_value = input.peek(Token![=]); - if has_value { - input.parse::<Token![=]>()?; - input.parse::<Expr>()?; - } - let semi_token: Token![;] = input.parse()?; - if unsafety.is_some() || safe || has_value { - Ok(ForeignItem::Verbatim(verbatim::between(&begin, input))) - } else { - Ok(ForeignItem::Static(ForeignItemStatic { - attrs: Vec::new(), - vis, - static_token, - mutability, - ident, - colon_token, - ty, - semi_token, - })) - } - } else if lookahead.peek(Token![type]) { - parse_foreign_item_type(begin, input) - } else if vis.is_inherited() - && (lookahead.peek(Ident) - || lookahead.peek(Token![self]) - || lookahead.peek(Token![super]) - || lookahead.peek(Token![crate]) - || lookahead.peek(Token![::])) - { - input.parse().map(ForeignItem::Macro) - } else { - Err(lookahead.error()) - }?; - - let item_attrs = match &mut item { - ForeignItem::Fn(item) => &mut item.attrs, - ForeignItem::Static(item) => &mut item.attrs, - ForeignItem::Type(item) => &mut item.attrs, - ForeignItem::Macro(item) => &mut item.attrs, - ForeignItem::Verbatim(_) => return Ok(item), - }; - attrs.append(item_attrs); - *item_attrs = attrs; - - Ok(item) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ForeignItemFn { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let sig: Signature = input.parse()?; - let semi_token: Token![;] = input.parse()?; - Ok(ForeignItemFn { - attrs, - vis, - sig, - semi_token, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ForeignItemStatic { - fn parse(input: ParseStream) -> Result<Self> { - Ok(ForeignItemStatic { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - static_token: input.parse()?, - mutability: input.parse()?, - ident: input.parse()?, - colon_token: input.parse()?, - ty: input.parse()?, - semi_token: input.parse()?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ForeignItemType { - fn parse(input: ParseStream) -> Result<Self> { - Ok(ForeignItemType { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - type_token: input.parse()?, - ident: input.parse()?, - generics: { - let mut generics: Generics = input.parse()?; - generics.where_clause = input.parse()?; - generics - }, - semi_token: input.parse()?, - }) - } - } - - fn parse_foreign_item_type(begin: ParseBuffer, input: ParseStream) -> Result<ForeignItem> { - let FlexibleItemType { - vis, - defaultness: _, - type_token, - ident, - generics, - colon_token, - bounds: _, - ty, - semi_token, - } = FlexibleItemType::parse( - input, - TypeDefaultness::Disallowed, - WhereClauseLocation::Both, - )?; - - if colon_token.is_some() || ty.is_some() { - Ok(ForeignItem::Verbatim(verbatim::between(&begin, input))) - } else { - Ok(ForeignItem::Type(ForeignItemType { - attrs: Vec::new(), - vis, - type_token, - ident, - generics, - semi_token, - })) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ForeignItemMacro { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let mac: Macro = input.parse()?; - let semi_token: Option<Token![;]> = if mac.delimiter.is_brace() { - None - } else { - Some(input.parse()?) - }; - Ok(ForeignItemMacro { - attrs, - mac, - semi_token, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ItemType { - fn parse(input: ParseStream) -> Result<Self> { - Ok(ItemType { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - type_token: input.parse()?, - ident: input.parse()?, - generics: { - let mut generics: Generics = input.parse()?; - generics.where_clause = input.parse()?; - generics - }, - eq_token: input.parse()?, - ty: input.parse()?, - semi_token: input.parse()?, - }) - } - } - - fn parse_item_type(begin: ParseBuffer, input: ParseStream) -> Result<Item> { - let FlexibleItemType { - vis, - defaultness: _, - type_token, - ident, - generics, - colon_token, - bounds: _, - ty, - semi_token, - } = FlexibleItemType::parse( - input, - TypeDefaultness::Disallowed, - WhereClauseLocation::BeforeEq, - )?; - - let (eq_token, ty) = match ty { - Some(ty) if colon_token.is_none() => ty, - _ => return Ok(Item::Verbatim(verbatim::between(&begin, input))), - }; - - Ok(Item::Type(ItemType { - attrs: Vec::new(), - vis, - type_token, - ident, - generics, - eq_token, - ty: Box::new(ty), - semi_token, - })) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ItemStruct { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let vis = input.parse::<Visibility>()?; - let struct_token = input.parse::<Token![struct]>()?; - let ident = input.parse::<Ident>()?; - let generics = input.parse::<Generics>()?; - let (where_clause, fields, semi_token) = derive::parsing::data_struct(input)?; - Ok(ItemStruct { - attrs, - vis, - struct_token, - ident, - generics: Generics { - where_clause, - ..generics - }, - fields, - semi_token, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ItemEnum { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let vis = input.parse::<Visibility>()?; - let enum_token = input.parse::<Token![enum]>()?; - let ident = input.parse::<Ident>()?; - let generics = input.parse::<Generics>()?; - let (where_clause, brace_token, variants) = derive::parsing::data_enum(input)?; - Ok(ItemEnum { - attrs, - vis, - enum_token, - ident, - generics: Generics { - where_clause, - ..generics - }, - brace_token, - variants, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ItemUnion { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let vis = input.parse::<Visibility>()?; - let union_token = input.parse::<Token![union]>()?; - let ident = input.parse::<Ident>()?; - let generics = input.parse::<Generics>()?; - let (where_clause, fields) = derive::parsing::data_union(input)?; - Ok(ItemUnion { - attrs, - vis, - union_token, - ident, - generics: Generics { - where_clause, - ..generics - }, - fields, - }) - } - } - - fn parse_trait_or_trait_alias(input: ParseStream) -> Result<Item> { - let (attrs, vis, trait_token, ident, generics) = parse_start_of_trait_alias(input)?; - let lookahead = input.lookahead1(); - if lookahead.peek(token::Brace) - || lookahead.peek(Token![:]) - || lookahead.peek(Token![where]) - { - let unsafety = None; - let auto_token = None; - parse_rest_of_trait( - input, - attrs, - vis, - unsafety, - auto_token, - trait_token, - ident, - generics, - ) - .map(Item::Trait) - } else if lookahead.peek(Token![=]) { - parse_rest_of_trait_alias(input, attrs, vis, trait_token, ident, generics) - .map(Item::TraitAlias) - } else { - Err(lookahead.error()) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ItemTrait { - fn parse(input: ParseStream) -> Result<Self> { - let outer_attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let unsafety: Option<Token![unsafe]> = input.parse()?; - let auto_token: Option<Token![auto]> = input.parse()?; - let trait_token: Token![trait] = input.parse()?; - let ident: Ident = input.parse()?; - let generics: Generics = input.parse()?; - parse_rest_of_trait( - input, - outer_attrs, - vis, - unsafety, - auto_token, - trait_token, - ident, - generics, - ) - } - } - - fn parse_rest_of_trait( - input: ParseStream, - mut attrs: Vec<Attribute>, - vis: Visibility, - unsafety: Option<Token![unsafe]>, - auto_token: Option<Token![auto]>, - trait_token: Token![trait], - ident: Ident, - mut generics: Generics, - ) -> Result<ItemTrait> { - let colon_token: Option<Token![:]> = input.parse()?; - - let mut supertraits = Punctuated::new(); - if colon_token.is_some() { - loop { - if input.peek(Token![where]) || input.peek(token::Brace) { - break; - } - supertraits.push_value({ - let allow_precise_capture = false; - let allow_const = true; - TypeParamBound::parse_single(input, allow_precise_capture, allow_const)? - }); - if input.peek(Token![where]) || input.peek(token::Brace) { - break; - } - supertraits.push_punct(input.parse()?); - } - } - - generics.where_clause = input.parse()?; - - let content; - let brace_token = braced!(content in input); - attr::parsing::parse_inner(&content, &mut attrs)?; - let mut items = Vec::new(); - while !content.is_empty() { - items.push(content.parse()?); - } - - Ok(ItemTrait { - attrs, - vis, - unsafety, - auto_token, - restriction: None, - trait_token, - ident, - generics, - colon_token, - supertraits, - brace_token, - items, - }) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ItemTraitAlias { - fn parse(input: ParseStream) -> Result<Self> { - let (attrs, vis, trait_token, ident, generics) = parse_start_of_trait_alias(input)?; - parse_rest_of_trait_alias(input, attrs, vis, trait_token, ident, generics) - } - } - - fn parse_start_of_trait_alias( - input: ParseStream, - ) -> Result<(Vec<Attribute>, Visibility, Token![trait], Ident, Generics)> { - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let trait_token: Token![trait] = input.parse()?; - let ident: Ident = input.parse()?; - let generics: Generics = input.parse()?; - Ok((attrs, vis, trait_token, ident, generics)) - } - - fn parse_rest_of_trait_alias( - input: ParseStream, - attrs: Vec<Attribute>, - vis: Visibility, - trait_token: Token![trait], - ident: Ident, - mut generics: Generics, - ) -> Result<ItemTraitAlias> { - let eq_token: Token![=] = input.parse()?; - - let mut bounds = Punctuated::new(); - loop { - if input.peek(Token![where]) || input.peek(Token![;]) { - break; - } - bounds.push_value({ - let allow_precise_capture = false; - let allow_const = false; - TypeParamBound::parse_single(input, allow_precise_capture, allow_const)? - }); - if input.peek(Token![where]) || input.peek(Token![;]) { - break; - } - bounds.push_punct(input.parse()?); - } - - generics.where_clause = input.parse()?; - let semi_token: Token![;] = input.parse()?; - - Ok(ItemTraitAlias { - attrs, - vis, - trait_token, - ident, - generics, - eq_token, - bounds, - semi_token, - }) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TraitItem { - fn parse(input: ParseStream) -> Result<Self> { - let begin = input.fork(); - let mut attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let defaultness: Option<Token![default]> = input.parse()?; - let ahead = input.fork(); - - let lookahead = ahead.lookahead1(); - let allow_safe = false; - let mut item = if lookahead.peek(Token![fn]) || peek_signature(&ahead, allow_safe) { - input.parse().map(TraitItem::Fn) - } else if lookahead.peek(Token![const]) { - let const_token: Token![const] = ahead.parse()?; - let lookahead = ahead.lookahead1(); - if lookahead.peek(Ident) || lookahead.peek(Token![_]) { - input.advance_to(&ahead); - let ident = input.call(Ident::parse_any)?; - let mut generics: Generics = input.parse()?; - let colon_token: Token![:] = input.parse()?; - let ty: Type = input.parse()?; - let default = if let Some(eq_token) = input.parse::<Option<Token![=]>>()? { - let expr: Expr = input.parse()?; - Some((eq_token, expr)) - } else { - None - }; - generics.where_clause = input.parse()?; - let semi_token: Token![;] = input.parse()?; - if generics.lt_token.is_none() && generics.where_clause.is_none() { - Ok(TraitItem::Const(TraitItemConst { - attrs: Vec::new(), - const_token, - ident, - generics, - colon_token, - ty, - default, - semi_token, - })) - } else { - return Ok(TraitItem::Verbatim(verbatim::between(&begin, input))); - } - } else if lookahead.peek(Token![async]) - || lookahead.peek(Token![unsafe]) - || lookahead.peek(Token![extern]) - || lookahead.peek(Token![fn]) - { - input.parse().map(TraitItem::Fn) - } else { - Err(lookahead.error()) - } - } else if lookahead.peek(Token![type]) { - parse_trait_item_type(begin.fork(), input) - } else if vis.is_inherited() - && defaultness.is_none() - && (lookahead.peek(Ident) - || lookahead.peek(Token![self]) - || lookahead.peek(Token![super]) - || lookahead.peek(Token![crate]) - || lookahead.peek(Token![::])) - { - input.parse().map(TraitItem::Macro) - } else { - Err(lookahead.error()) - }?; - - match (vis, defaultness) { - (Visibility::Inherited, None) => {} - _ => return Ok(TraitItem::Verbatim(verbatim::between(&begin, input))), - } - - let item_attrs = match &mut item { - TraitItem::Const(item) => &mut item.attrs, - TraitItem::Fn(item) => &mut item.attrs, - TraitItem::Type(item) => &mut item.attrs, - TraitItem::Macro(item) => &mut item.attrs, - TraitItem::Verbatim(_) => unreachable!(), - }; - attrs.append(item_attrs); - *item_attrs = attrs; - Ok(item) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TraitItemConst { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let const_token: Token![const] = input.parse()?; - - let lookahead = input.lookahead1(); - let ident = if lookahead.peek(Ident) || lookahead.peek(Token![_]) { - input.call(Ident::parse_any)? - } else { - return Err(lookahead.error()); - }; - - let colon_token: Token![:] = input.parse()?; - let ty: Type = input.parse()?; - let default = if input.peek(Token![=]) { - let eq_token: Token![=] = input.parse()?; - let default: Expr = input.parse()?; - Some((eq_token, default)) - } else { - None - }; - let semi_token: Token![;] = input.parse()?; - - Ok(TraitItemConst { - attrs, - const_token, - ident, - generics: Generics::default(), - colon_token, - ty, - default, - semi_token, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TraitItemFn { - fn parse(input: ParseStream) -> Result<Self> { - let mut attrs = input.call(Attribute::parse_outer)?; - let sig: Signature = input.parse()?; - - let lookahead = input.lookahead1(); - let (brace_token, stmts, semi_token) = if lookahead.peek(token::Brace) { - let content; - let brace_token = braced!(content in input); - attr::parsing::parse_inner(&content, &mut attrs)?; - let stmts = content.call(Block::parse_within)?; - (Some(brace_token), stmts, None) - } else if lookahead.peek(Token![;]) { - let semi_token: Token![;] = input.parse()?; - (None, Vec::new(), Some(semi_token)) - } else { - return Err(lookahead.error()); - }; - - Ok(TraitItemFn { - attrs, - sig, - default: brace_token.map(|brace_token| Block { brace_token, stmts }), - semi_token, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TraitItemType { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let type_token: Token![type] = input.parse()?; - let ident: Ident = input.parse()?; - let mut generics: Generics = input.parse()?; - let (colon_token, bounds) = FlexibleItemType::parse_optional_bounds(input)?; - let default = FlexibleItemType::parse_optional_definition(input)?; - generics.where_clause = input.parse()?; - let semi_token: Token![;] = input.parse()?; - Ok(TraitItemType { - attrs, - type_token, - ident, - generics, - colon_token, - bounds, - default, - semi_token, - }) - } - } - - fn parse_trait_item_type(begin: ParseBuffer, input: ParseStream) -> Result<TraitItem> { - let FlexibleItemType { - vis, - defaultness: _, - type_token, - ident, - generics, - colon_token, - bounds, - ty, - semi_token, - } = FlexibleItemType::parse( - input, - TypeDefaultness::Disallowed, - WhereClauseLocation::AfterEq, - )?; - - if vis.is_some() { - Ok(TraitItem::Verbatim(verbatim::between(&begin, input))) - } else { - Ok(TraitItem::Type(TraitItemType { - attrs: Vec::new(), - type_token, - ident, - generics, - colon_token, - bounds, - default: ty, - semi_token, - })) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TraitItemMacro { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let mac: Macro = input.parse()?; - let semi_token: Option<Token![;]> = if mac.delimiter.is_brace() { - None - } else { - Some(input.parse()?) - }; - Ok(TraitItemMacro { - attrs, - mac, - semi_token, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ItemImpl { - fn parse(input: ParseStream) -> Result<Self> { - let allow_verbatim_impl = false; - parse_impl(input, allow_verbatim_impl).map(Option::unwrap) - } - } - - fn parse_impl(input: ParseStream, allow_verbatim_impl: bool) -> Result<Option<ItemImpl>> { - let mut attrs = input.call(Attribute::parse_outer)?; - let has_visibility = allow_verbatim_impl && input.parse::<Visibility>()?.is_some(); - let defaultness: Option<Token![default]> = input.parse()?; - let unsafety: Option<Token![unsafe]> = input.parse()?; - let impl_token: Token![impl] = input.parse()?; - - let has_generics = generics::parsing::choose_generics_over_qpath(input); - let mut generics: Generics = if has_generics { - input.parse()? - } else { - Generics::default() - }; - - let is_const_impl = allow_verbatim_impl - && (input.peek(Token![const]) || input.peek(Token![?]) && input.peek2(Token![const])); - if is_const_impl { - input.parse::<Option<Token![?]>>()?; - input.parse::<Token![const]>()?; - } - - let polarity = if input.peek(Token![!]) && !input.peek2(token::Brace) { - Some(input.parse::<Token![!]>()?) - } else { - None - }; - - #[cfg(not(feature = "printing"))] - let first_ty_span = input.span(); - let mut first_ty: Type = input.parse()?; - let self_ty: Type; - let trait_; - - let is_impl_for = input.peek(Token![for]); - if is_impl_for { - let for_token: Token![for] = input.parse()?; - let mut first_ty_ref = &first_ty; - while let Type::Group(ty) = first_ty_ref { - first_ty_ref = &ty.elem; - } - if let Type::Path(TypePath { qself: None, .. }) = first_ty_ref { - while let Type::Group(ty) = first_ty { - first_ty = *ty.elem; - } - if let Type::Path(TypePath { qself: None, path }) = first_ty { - trait_ = Some((polarity, path, for_token)); - } else { - unreachable!(); - } - } else if !allow_verbatim_impl { - #[cfg(feature = "printing")] - return Err(Error::new_spanned(first_ty_ref, "expected trait path")); - #[cfg(not(feature = "printing"))] - return Err(Error::new(first_ty_span, "expected trait path")); - } else { - trait_ = None; - } - self_ty = input.parse()?; - } else if let Some(polarity) = polarity { - return Err(Error::new( - polarity.span, - "inherent impls cannot be negative", - )); - } else { - trait_ = None; - self_ty = first_ty; - } - - generics.where_clause = input.parse()?; - - let content; - let brace_token = braced!(content in input); - attr::parsing::parse_inner(&content, &mut attrs)?; - - let mut items = Vec::new(); - while !content.is_empty() { - items.push(content.parse()?); - } - - if has_visibility || is_const_impl || is_impl_for && trait_.is_none() { - Ok(None) - } else { - Ok(Some(ItemImpl { - attrs, - defaultness, - unsafety, - impl_token, - generics, - trait_, - self_ty: Box::new(self_ty), - brace_token, - items, - })) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ImplItem { - fn parse(input: ParseStream) -> Result<Self> { - let begin = input.fork(); - let mut attrs = input.call(Attribute::parse_outer)?; - let ahead = input.fork(); - let vis: Visibility = ahead.parse()?; - - let mut lookahead = ahead.lookahead1(); - let defaultness = if lookahead.peek(Token![default]) && !ahead.peek2(Token![!]) { - let defaultness: Token![default] = ahead.parse()?; - lookahead = ahead.lookahead1(); - Some(defaultness) - } else { - None - }; - - let allow_safe = false; - let mut item = if lookahead.peek(Token![fn]) || peek_signature(&ahead, allow_safe) { - let allow_omitted_body = true; - if let Some(item) = parse_impl_item_fn(input, allow_omitted_body)? { - Ok(ImplItem::Fn(item)) - } else { - Ok(ImplItem::Verbatim(verbatim::between(&begin, input))) - } - } else if lookahead.peek(Token![const]) { - input.advance_to(&ahead); - let const_token: Token![const] = input.parse()?; - let lookahead = input.lookahead1(); - let ident = if lookahead.peek(Ident) || lookahead.peek(Token![_]) { - input.call(Ident::parse_any)? - } else { - return Err(lookahead.error()); - }; - let mut generics: Generics = input.parse()?; - let colon_token: Token![:] = input.parse()?; - let ty: Type = input.parse()?; - let value = if let Some(eq_token) = input.parse::<Option<Token![=]>>()? { - let expr: Expr = input.parse()?; - Some((eq_token, expr)) - } else { - None - }; - generics.where_clause = input.parse()?; - let semi_token: Token![;] = input.parse()?; - return match value { - Some((eq_token, expr)) - if generics.lt_token.is_none() && generics.where_clause.is_none() => - { - Ok(ImplItem::Const(ImplItemConst { - attrs, - vis, - defaultness, - const_token, - ident, - generics, - colon_token, - ty, - eq_token, - expr, - semi_token, - })) - } - _ => Ok(ImplItem::Verbatim(verbatim::between(&begin, input))), - }; - } else if lookahead.peek(Token![type]) { - parse_impl_item_type(begin, input) - } else if vis.is_inherited() - && defaultness.is_none() - && (lookahead.peek(Ident) - || lookahead.peek(Token![self]) - || lookahead.peek(Token![super]) - || lookahead.peek(Token![crate]) - || lookahead.peek(Token![::])) - { - input.parse().map(ImplItem::Macro) - } else { - Err(lookahead.error()) - }?; - - { - let item_attrs = match &mut item { - ImplItem::Const(item) => &mut item.attrs, - ImplItem::Fn(item) => &mut item.attrs, - ImplItem::Type(item) => &mut item.attrs, - ImplItem::Macro(item) => &mut item.attrs, - ImplItem::Verbatim(_) => return Ok(item), - }; - attrs.append(item_attrs); - *item_attrs = attrs; - } - - Ok(item) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ImplItemConst { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let defaultness: Option<Token![default]> = input.parse()?; - let const_token: Token![const] = input.parse()?; - - let lookahead = input.lookahead1(); - let ident = if lookahead.peek(Ident) || lookahead.peek(Token![_]) { - input.call(Ident::parse_any)? - } else { - return Err(lookahead.error()); - }; - - let colon_token: Token![:] = input.parse()?; - let ty: Type = input.parse()?; - let eq_token: Token![=] = input.parse()?; - let expr: Expr = input.parse()?; - let semi_token: Token![;] = input.parse()?; - - Ok(ImplItemConst { - attrs, - vis, - defaultness, - const_token, - ident, - generics: Generics::default(), - colon_token, - ty, - eq_token, - expr, - semi_token, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ImplItemFn { - fn parse(input: ParseStream) -> Result<Self> { - let allow_omitted_body = false; - parse_impl_item_fn(input, allow_omitted_body).map(Option::unwrap) - } - } - - fn parse_impl_item_fn( - input: ParseStream, - allow_omitted_body: bool, - ) -> Result<Option<ImplItemFn>> { - let mut attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let defaultness: Option<Token![default]> = input.parse()?; - let sig: Signature = input.parse()?; - - // Accept functions without a body in an impl block because rustc's - // *parser* does not reject them (the compilation error is emitted later - // than parsing) and it can be useful for macro DSLs. - if allow_omitted_body && input.parse::<Option<Token![;]>>()?.is_some() { - return Ok(None); - } - - let content; - let brace_token = braced!(content in input); - attrs.extend(content.call(Attribute::parse_inner)?); - let block = Block { - brace_token, - stmts: content.call(Block::parse_within)?, - }; - - Ok(Some(ImplItemFn { - attrs, - vis, - defaultness, - sig, - block, - })) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ImplItemType { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let defaultness: Option<Token![default]> = input.parse()?; - let type_token: Token![type] = input.parse()?; - let ident: Ident = input.parse()?; - let mut generics: Generics = input.parse()?; - let eq_token: Token![=] = input.parse()?; - let ty: Type = input.parse()?; - generics.where_clause = input.parse()?; - let semi_token: Token![;] = input.parse()?; - Ok(ImplItemType { - attrs, - vis, - defaultness, - type_token, - ident, - generics, - eq_token, - ty, - semi_token, - }) - } - } - - fn parse_impl_item_type(begin: ParseBuffer, input: ParseStream) -> Result<ImplItem> { - let FlexibleItemType { - vis, - defaultness, - type_token, - ident, - generics, - colon_token, - bounds: _, - ty, - semi_token, - } = FlexibleItemType::parse( - input, - TypeDefaultness::Optional, - WhereClauseLocation::AfterEq, - )?; - - let (eq_token, ty) = match ty { - Some(ty) if colon_token.is_none() => ty, - _ => return Ok(ImplItem::Verbatim(verbatim::between(&begin, input))), - }; - - Ok(ImplItem::Type(ImplItemType { - attrs: Vec::new(), - vis, - defaultness, - type_token, - ident, - generics, - eq_token, - ty, - semi_token, - })) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ImplItemMacro { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let mac: Macro = input.parse()?; - let semi_token: Option<Token![;]> = if mac.delimiter.is_brace() { - None - } else { - Some(input.parse()?) - }; - Ok(ImplItemMacro { - attrs, - mac, - semi_token, - }) - } - } - - impl Visibility { - fn is_inherited(&self) -> bool { - match self { - Visibility::Inherited => true, - _ => false, - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for StaticMutability { - fn parse(input: ParseStream) -> Result<Self> { - let mut_token: Option<Token![mut]> = input.parse()?; - Ok(mut_token.map_or(StaticMutability::None, StaticMutability::Mut)) - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use crate::attr::FilterAttrs; - use crate::data::Fields; - use crate::item::{ - ForeignItemFn, ForeignItemMacro, ForeignItemStatic, ForeignItemType, ImplItemConst, - ImplItemFn, ImplItemMacro, ImplItemType, ItemConst, ItemEnum, ItemExternCrate, ItemFn, - ItemForeignMod, ItemImpl, ItemMacro, ItemMod, ItemStatic, ItemStruct, ItemTrait, - ItemTraitAlias, ItemType, ItemUnion, ItemUse, Receiver, Signature, StaticMutability, - TraitItemConst, TraitItemFn, TraitItemMacro, TraitItemType, UseGlob, UseGroup, UseName, - UsePath, UseRename, Variadic, - }; - use crate::mac::MacroDelimiter; - use crate::path; - use crate::path::printing::PathStyle; - use crate::print::TokensOrDefault; - use crate::ty::Type; - use proc_macro2::TokenStream; - use quote::{ToTokens, TokenStreamExt as _}; - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ItemExternCrate { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.extern_token.to_tokens(tokens); - self.crate_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - if let Some((as_token, rename)) = &self.rename { - as_token.to_tokens(tokens); - rename.to_tokens(tokens); - } - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ItemUse { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.use_token.to_tokens(tokens); - self.leading_colon.to_tokens(tokens); - self.tree.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ItemStatic { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.static_token.to_tokens(tokens); - self.mutability.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.expr.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ItemConst { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.const_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.expr.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ItemFn { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.sig.to_tokens(tokens); - self.block.brace_token.surround(tokens, |tokens| { - tokens.append_all(self.attrs.inner()); - tokens.append_all(&self.block.stmts); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ItemMod { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.unsafety.to_tokens(tokens); - self.mod_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - if let Some((brace, items)) = &self.content { - brace.surround(tokens, |tokens| { - tokens.append_all(self.attrs.inner()); - tokens.append_all(items); - }); - } else { - TokensOrDefault(&self.semi).to_tokens(tokens); - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ItemForeignMod { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.unsafety.to_tokens(tokens); - self.abi.to_tokens(tokens); - self.brace_token.surround(tokens, |tokens| { - tokens.append_all(self.attrs.inner()); - tokens.append_all(&self.items); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ItemType { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.type_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ItemEnum { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.enum_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - self.brace_token.surround(tokens, |tokens| { - self.variants.to_tokens(tokens); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ItemStruct { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.struct_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - match &self.fields { - Fields::Named(fields) => { - self.generics.where_clause.to_tokens(tokens); - fields.to_tokens(tokens); - } - Fields::Unnamed(fields) => { - fields.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - TokensOrDefault(&self.semi_token).to_tokens(tokens); - } - Fields::Unit => { - self.generics.where_clause.to_tokens(tokens); - TokensOrDefault(&self.semi_token).to_tokens(tokens); - } - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ItemUnion { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.union_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - self.fields.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ItemTrait { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.unsafety.to_tokens(tokens); - self.auto_token.to_tokens(tokens); - self.trait_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - if !self.supertraits.is_empty() { - TokensOrDefault(&self.colon_token).to_tokens(tokens); - self.supertraits.to_tokens(tokens); - } - self.generics.where_clause.to_tokens(tokens); - self.brace_token.surround(tokens, |tokens| { - tokens.append_all(self.attrs.inner()); - tokens.append_all(&self.items); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ItemTraitAlias { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.trait_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.bounds.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ItemImpl { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.defaultness.to_tokens(tokens); - self.unsafety.to_tokens(tokens); - self.impl_token.to_tokens(tokens); - self.generics.to_tokens(tokens); - if let Some((polarity, path, for_token)) = &self.trait_ { - polarity.to_tokens(tokens); - path.to_tokens(tokens); - for_token.to_tokens(tokens); - } - self.self_ty.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - self.brace_token.surround(tokens, |tokens| { - tokens.append_all(self.attrs.inner()); - tokens.append_all(&self.items); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ItemMacro { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - path::printing::print_path(tokens, &self.mac.path, PathStyle::Mod); - self.mac.bang_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - match &self.mac.delimiter { - MacroDelimiter::Paren(paren) => { - paren.surround(tokens, |tokens| self.mac.tokens.to_tokens(tokens)); - } - MacroDelimiter::Brace(brace) => { - brace.surround(tokens, |tokens| self.mac.tokens.to_tokens(tokens)); - } - MacroDelimiter::Bracket(bracket) => { - bracket.surround(tokens, |tokens| self.mac.tokens.to_tokens(tokens)); - } - } - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for UsePath { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.ident.to_tokens(tokens); - self.colon2_token.to_tokens(tokens); - self.tree.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for UseName { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.ident.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for UseRename { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.ident.to_tokens(tokens); - self.as_token.to_tokens(tokens); - self.rename.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for UseGlob { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.star_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for UseGroup { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.brace_token.surround(tokens, |tokens| { - self.items.to_tokens(tokens); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TraitItemConst { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.const_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - if let Some((eq_token, default)) = &self.default { - eq_token.to_tokens(tokens); - default.to_tokens(tokens); - } - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TraitItemFn { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.sig.to_tokens(tokens); - match &self.default { - Some(block) => { - block.brace_token.surround(tokens, |tokens| { - tokens.append_all(self.attrs.inner()); - tokens.append_all(&block.stmts); - }); - } - None => { - TokensOrDefault(&self.semi_token).to_tokens(tokens); - } - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TraitItemType { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.type_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - if !self.bounds.is_empty() { - TokensOrDefault(&self.colon_token).to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - if let Some((eq_token, default)) = &self.default { - eq_token.to_tokens(tokens); - default.to_tokens(tokens); - } - self.generics.where_clause.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TraitItemMacro { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.mac.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ImplItemConst { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.defaultness.to_tokens(tokens); - self.const_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.expr.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ImplItemFn { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.defaultness.to_tokens(tokens); - self.sig.to_tokens(tokens); - self.block.brace_token.surround(tokens, |tokens| { - tokens.append_all(self.attrs.inner()); - tokens.append_all(&self.block.stmts); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ImplItemType { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.defaultness.to_tokens(tokens); - self.type_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ImplItemMacro { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.mac.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ForeignItemFn { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.sig.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ForeignItemStatic { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.static_token.to_tokens(tokens); - self.mutability.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ForeignItemType { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.type_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ForeignItemMacro { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.mac.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Signature { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.constness.to_tokens(tokens); - self.asyncness.to_tokens(tokens); - self.unsafety.to_tokens(tokens); - self.abi.to_tokens(tokens); - self.fn_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.paren_token.surround(tokens, |tokens| { - self.inputs.to_tokens(tokens); - if let Some(variadic) = &self.variadic { - if !self.inputs.empty_or_trailing() { - <Token![,]>::default().to_tokens(tokens); - } - variadic.to_tokens(tokens); - } - }); - self.output.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Receiver { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - if let Some((ampersand, lifetime)) = &self.reference { - ampersand.to_tokens(tokens); - lifetime.to_tokens(tokens); - } - self.mutability.to_tokens(tokens); - self.self_token.to_tokens(tokens); - if let Some(colon_token) = &self.colon_token { - colon_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - } else { - let consistent = match (&self.reference, &self.mutability, &*self.ty) { - (Some(_), mutability, Type::Reference(ty)) => { - mutability.is_some() == ty.mutability.is_some() - && match &*ty.elem { - Type::Path(ty) => ty.qself.is_none() && ty.path.is_ident("Self"), - _ => false, - } - } - (None, _, Type::Path(ty)) => ty.qself.is_none() && ty.path.is_ident("Self"), - _ => false, - }; - if !consistent { - <Token![:]>::default().to_tokens(tokens); - self.ty.to_tokens(tokens); - } - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Variadic { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - if let Some((pat, colon)) = &self.pat { - pat.to_tokens(tokens); - colon.to_tokens(tokens); - } - self.dots.to_tokens(tokens); - self.comma.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for StaticMutability { - fn to_tokens(&self, tokens: &mut TokenStream) { - match self { - StaticMutability::None => {} - StaticMutability::Mut(mut_token) => mut_token.to_tokens(tokens), - } - } - } -} diff --git a/vendor/syn/src/lib.rs b/vendor/syn/src/lib.rs deleted file mode 100644 index cd7090515ac4f1..00000000000000 --- a/vendor/syn/src/lib.rs +++ /dev/null @@ -1,1009 +0,0 @@ -//! [![github]](https://github.com/dtolnay/syn) [![crates-io]](https://crates.io/crates/syn) [![docs-rs]](crate) -//! -//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github -//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust -//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs -//! -//! <br> -//! -//! Syn is a parsing library for parsing a stream of Rust tokens into a syntax -//! tree of Rust source code. -//! -//! Currently this library is geared toward use in Rust procedural macros, but -//! contains some APIs that may be useful more generally. -//! -//! - **Data structures** — Syn provides a complete syntax tree that can -//! represent any valid Rust source code. The syntax tree is rooted at -//! [`syn::File`] which represents a full source file, but there are other -//! entry points that may be useful to procedural macros including -//! [`syn::Item`], [`syn::Expr`] and [`syn::Type`]. -//! -//! - **Derives** — Of particular interest to derive macros is -//! [`syn::DeriveInput`] which is any of the three legal input items to a -//! derive macro. An example below shows using this type in a library that can -//! derive implementations of a user-defined trait. -//! -//! - **Parsing** — Parsing in Syn is built around [parser functions] with the -//! signature `fn(ParseStream) -> Result<T>`. Every syntax tree node defined -//! by Syn is individually parsable and may be used as a building block for -//! custom syntaxes, or you may dream up your own brand new syntax without -//! involving any of our syntax tree types. -//! -//! - **Location information** — Every token parsed by Syn is associated with a -//! `Span` that tracks line and column information back to the source of that -//! token. These spans allow a procedural macro to display detailed error -//! messages pointing to all the right places in the user's code. There is an -//! example of this below. -//! -//! - **Feature flags** — Functionality is aggressively feature gated so your -//! procedural macros enable only what they need, and do not pay in compile -//! time for all the rest. -//! -//! [`syn::File`]: File -//! [`syn::Item`]: Item -//! [`syn::Expr`]: Expr -//! [`syn::Type`]: Type -//! [`syn::DeriveInput`]: DeriveInput -//! [parser functions]: mod@parse -//! -//! <br> -//! -//! # Example of a derive macro -//! -//! The canonical derive macro using Syn looks like this. We write an ordinary -//! Rust function tagged with a `proc_macro_derive` attribute and the name of -//! the trait we are deriving. Any time that derive appears in the user's code, -//! the Rust compiler passes their data structure as tokens into our macro. We -//! get to execute arbitrary Rust code to figure out what to do with those -//! tokens, then hand some tokens back to the compiler to compile into the -//! user's crate. -//! -//! [`TokenStream`]: proc_macro::TokenStream -//! -//! ```toml -//! [dependencies] -//! syn = "2.0" -//! quote = "1.0" -//! -//! [lib] -//! proc-macro = true -//! ``` -//! -//! ``` -//! # extern crate proc_macro; -//! # -//! use proc_macro::TokenStream; -//! use quote::quote; -//! use syn::{parse_macro_input, DeriveInput}; -//! -//! # const IGNORE_TOKENS: &str = stringify! { -//! #[proc_macro_derive(MyMacro)] -//! # }; -//! pub fn my_macro(input: TokenStream) -> TokenStream { -//! // Parse the input tokens into a syntax tree -//! let input = parse_macro_input!(input as DeriveInput); -//! -//! // Build the output, possibly using quasi-quotation -//! let expanded = quote! { -//! // ... -//! }; -//! -//! // Hand the output tokens back to the compiler -//! TokenStream::from(expanded) -//! } -//! ``` -//! -//! The [`heapsize`] example directory shows a complete working implementation -//! of a derive macro. The example derives a `HeapSize` trait which computes an -//! estimate of the amount of heap memory owned by a value. -//! -//! [`heapsize`]: https://github.com/dtolnay/syn/tree/master/examples/heapsize -//! -//! ``` -//! pub trait HeapSize { -//! /// Total number of bytes of heap memory owned by `self`. -//! fn heap_size_of_children(&self) -> usize; -//! } -//! ``` -//! -//! The derive macro allows users to write `#[derive(HeapSize)]` on data -//! structures in their program. -//! -//! ``` -//! # const IGNORE_TOKENS: &str = stringify! { -//! #[derive(HeapSize)] -//! # }; -//! struct Demo<'a, T: ?Sized> { -//! a: Box<T>, -//! b: u8, -//! c: &'a str, -//! d: String, -//! } -//! ``` -//! -//! <p><br></p> -//! -//! # Spans and error reporting -//! -//! The token-based procedural macro API provides great control over where the -//! compiler's error messages are displayed in user code. Consider the error the -//! user sees if one of their field types does not implement `HeapSize`. -//! -//! ``` -//! # const IGNORE_TOKENS: &str = stringify! { -//! #[derive(HeapSize)] -//! # }; -//! struct Broken { -//! ok: String, -//! bad: std::thread::Thread, -//! } -//! ``` -//! -//! By tracking span information all the way through the expansion of a -//! procedural macro as shown in the `heapsize` example, token-based macros in -//! Syn are able to trigger errors that directly pinpoint the source of the -//! problem. -//! -//! ```text -//! error[E0277]: the trait bound `std::thread::Thread: HeapSize` is not satisfied -//! --> src/main.rs:7:5 -//! | -//! 7 | bad: std::thread::Thread, -//! | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `HeapSize` is not implemented for `Thread` -//! ``` -//! -//! <br> -//! -//! # Parsing a custom syntax -//! -//! The [`lazy-static`] example directory shows the implementation of a -//! `functionlike!(...)` procedural macro in which the input tokens are parsed -//! using Syn's parsing API. -//! -//! [`lazy-static`]: https://github.com/dtolnay/syn/tree/master/examples/lazy-static -//! -//! The example reimplements the popular `lazy_static` crate from crates.io as a -//! procedural macro. -//! -//! ``` -//! # macro_rules! lazy_static { -//! # ($($tt:tt)*) => {} -//! # } -//! # -//! lazy_static! { -//! static ref USERNAME: Regex = Regex::new("^[a-z0-9_-]{3,16}$").unwrap(); -//! } -//! ``` -//! -//! The implementation shows how to trigger custom warnings and error messages -//! on the macro input. -//! -//! ```text -//! warning: come on, pick a more creative name -//! --> src/main.rs:10:16 -//! | -//! 10 | static ref FOO: String = "lazy_static".to_owned(); -//! | ^^^ -//! ``` -//! -//! <br> -//! -//! # Testing -//! -//! When testing macros, we often care not just that the macro can be used -//! successfully but also that when the macro is provided with invalid input it -//! produces maximally helpful error messages. Consider using the [`trybuild`] -//! crate to write tests for errors that are emitted by your macro or errors -//! detected by the Rust compiler in the expanded code following misuse of the -//! macro. Such tests help avoid regressions from later refactors that -//! mistakenly make an error no longer trigger or be less helpful than it used -//! to be. -//! -//! [`trybuild`]: https://github.com/dtolnay/trybuild -//! -//! <br> -//! -//! # Debugging -//! -//! When developing a procedural macro it can be helpful to look at what the -//! generated code looks like. Use `cargo rustc -- -Zunstable-options -//! --pretty=expanded` or the [`cargo expand`] subcommand. -//! -//! [`cargo expand`]: https://github.com/dtolnay/cargo-expand -//! -//! To show the expanded code for some crate that uses your procedural macro, -//! run `cargo expand` from that crate. To show the expanded code for one of -//! your own test cases, run `cargo expand --test the_test_case` where the last -//! argument is the name of the test file without the `.rs` extension. -//! -//! This write-up by Brandon W Maister discusses debugging in more detail: -//! [Debugging Rust's new Custom Derive system][debugging]. -//! -//! [debugging]: https://quodlibetor.github.io/posts/debugging-rusts-new-custom-derive-system/ -//! -//! <br> -//! -//! # Optional features -//! -//! Syn puts a lot of functionality behind optional features in order to -//! optimize compile time for the most common use cases. The following features -//! are available. -//! -//! - **`derive`** *(enabled by default)* — Data structures for representing the -//! possible input to a derive macro, including structs and enums and types. -//! - **`full`** — Data structures for representing the syntax tree of all valid -//! Rust source code, including items and expressions. -//! - **`parsing`** *(enabled by default)* — Ability to parse input tokens into -//! a syntax tree node of a chosen type. -//! - **`printing`** *(enabled by default)* — Ability to print a syntax tree -//! node as tokens of Rust source code. -//! - **`visit`** — Trait for traversing a syntax tree. -//! - **`visit-mut`** — Trait for traversing and mutating in place a syntax -//! tree. -//! - **`fold`** — Trait for transforming an owned syntax tree. -//! - **`clone-impls`** *(enabled by default)* — Clone impls for all syntax tree -//! types. -//! - **`extra-traits`** — Debug, Eq, PartialEq, Hash impls for all syntax tree -//! types. -//! - **`proc-macro`** *(enabled by default)* — Runtime dependency on the -//! dynamic library libproc_macro from rustc toolchain. - -// Syn types in rustdoc of other crates get linked to here. -#![doc(html_root_url = "https://docs.rs/syn/2.0.110")] -#![cfg_attr(docsrs, feature(doc_cfg), doc(auto_cfg = false))] -#![deny(unsafe_op_in_unsafe_fn)] -#![allow(non_camel_case_types)] -#![cfg_attr(not(check_cfg), allow(unexpected_cfgs))] -#![allow( - clippy::bool_to_int_with_if, - clippy::cast_lossless, - clippy::cast_possible_truncation, - clippy::cast_possible_wrap, - clippy::cast_ptr_alignment, - clippy::default_trait_access, - clippy::derivable_impls, - clippy::diverging_sub_expression, - clippy::doc_markdown, - clippy::elidable_lifetime_names, - clippy::enum_glob_use, - clippy::expl_impl_clone_on_copy, - clippy::explicit_auto_deref, - clippy::fn_params_excessive_bools, - clippy::if_not_else, - clippy::inherent_to_string, - clippy::into_iter_without_iter, - clippy::items_after_statements, - clippy::large_enum_variant, - clippy::let_underscore_untyped, // https://github.com/rust-lang/rust-clippy/issues/10410 - clippy::manual_assert, - clippy::manual_let_else, - clippy::manual_map, - clippy::match_like_matches_macro, - clippy::match_same_arms, - clippy::match_wildcard_for_single_variants, // clippy bug: https://github.com/rust-lang/rust-clippy/issues/6984 - clippy::missing_errors_doc, - clippy::missing_panics_doc, - clippy::module_name_repetitions, - clippy::must_use_candidate, - clippy::needless_doctest_main, - clippy::needless_lifetimes, - clippy::needless_pass_by_value, - clippy::needless_update, - clippy::never_loop, - clippy::range_plus_one, - clippy::redundant_else, - clippy::ref_option, - clippy::return_self_not_must_use, - clippy::similar_names, - clippy::single_match_else, - clippy::struct_excessive_bools, - clippy::too_many_arguments, - clippy::too_many_lines, - clippy::trivially_copy_pass_by_ref, - clippy::unconditional_recursion, // https://github.com/rust-lang/rust-clippy/issues/12133 - clippy::uninhabited_references, - clippy::uninlined_format_args, - clippy::unnecessary_box_returns, - clippy::unnecessary_unwrap, - clippy::used_underscore_binding, - clippy::wildcard_imports, -)] -#![allow(unknown_lints, mismatched_lifetime_syntaxes)] - -extern crate self as syn; - -#[cfg(feature = "proc-macro")] -extern crate proc_macro; - -#[macro_use] -mod macros; - -#[cfg(feature = "parsing")] -#[macro_use] -mod group; - -#[macro_use] -pub mod token; - -#[cfg(any(feature = "full", feature = "derive"))] -mod attr; -#[cfg(any(feature = "full", feature = "derive"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] -pub use crate::attr::{AttrStyle, Attribute, Meta, MetaList, MetaNameValue}; - -mod bigint; - -#[cfg(feature = "parsing")] -#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] -pub mod buffer; - -#[cfg(any( - all(feature = "parsing", feature = "full"), - all(feature = "printing", any(feature = "full", feature = "derive")), -))] -mod classify; - -mod custom_keyword; - -mod custom_punctuation; - -#[cfg(any(feature = "full", feature = "derive"))] -mod data; -#[cfg(any(feature = "full", feature = "derive"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] -pub use crate::data::{Field, Fields, FieldsNamed, FieldsUnnamed, Variant}; - -#[cfg(any(feature = "full", feature = "derive"))] -mod derive; -#[cfg(feature = "derive")] -#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] -pub use crate::derive::{Data, DataEnum, DataStruct, DataUnion, DeriveInput}; - -mod drops; - -mod error; -pub use crate::error::{Error, Result}; - -#[cfg(any(feature = "full", feature = "derive"))] -mod expr; -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub use crate::expr::{Arm, Label, PointerMutability, RangeLimits}; -#[cfg(any(feature = "full", feature = "derive"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] -pub use crate::expr::{ - Expr, ExprBinary, ExprCall, ExprCast, ExprField, ExprIndex, ExprLit, ExprMacro, ExprMethodCall, - ExprParen, ExprPath, ExprReference, ExprStruct, ExprUnary, FieldValue, Index, Member, -}; -#[cfg(any(feature = "full", feature = "derive"))] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub use crate::expr::{ - ExprArray, ExprAssign, ExprAsync, ExprAwait, ExprBlock, ExprBreak, ExprClosure, ExprConst, - ExprContinue, ExprForLoop, ExprGroup, ExprIf, ExprInfer, ExprLet, ExprLoop, ExprMatch, - ExprRange, ExprRawAddr, ExprRepeat, ExprReturn, ExprTry, ExprTryBlock, ExprTuple, ExprUnsafe, - ExprWhile, ExprYield, -}; - -pub mod ext; - -#[cfg(feature = "full")] -mod file; -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub use crate::file::File; - -#[cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))] -mod fixup; - -#[cfg(any(feature = "full", feature = "derive"))] -mod generics; -#[cfg(any(feature = "full", feature = "derive"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] -pub use crate::generics::{ - BoundLifetimes, ConstParam, GenericParam, Generics, LifetimeParam, PredicateLifetime, - PredicateType, TraitBound, TraitBoundModifier, TypeParam, TypeParamBound, WhereClause, - WherePredicate, -}; -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub use crate::generics::{CapturedParam, PreciseCapture}; -#[cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))] -#[cfg_attr( - docsrs, - doc(cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))) -)] -pub use crate::generics::{ImplGenerics, Turbofish, TypeGenerics}; - -mod ident; -#[doc(inline)] -pub use crate::ident::Ident; - -#[cfg(feature = "full")] -mod item; -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub use crate::item::{ - FnArg, ForeignItem, ForeignItemFn, ForeignItemMacro, ForeignItemStatic, ForeignItemType, - ImplItem, ImplItemConst, ImplItemFn, ImplItemMacro, ImplItemType, ImplRestriction, Item, - ItemConst, ItemEnum, ItemExternCrate, ItemFn, ItemForeignMod, ItemImpl, ItemMacro, ItemMod, - ItemStatic, ItemStruct, ItemTrait, ItemTraitAlias, ItemType, ItemUnion, ItemUse, Receiver, - Signature, StaticMutability, TraitItem, TraitItemConst, TraitItemFn, TraitItemMacro, - TraitItemType, UseGlob, UseGroup, UseName, UsePath, UseRename, UseTree, Variadic, -}; - -mod lifetime; -#[doc(inline)] -pub use crate::lifetime::Lifetime; - -mod lit; -#[doc(hidden)] // https://github.com/dtolnay/syn/issues/1566 -pub use crate::lit::StrStyle; -#[doc(inline)] -pub use crate::lit::{ - Lit, LitBool, LitByte, LitByteStr, LitCStr, LitChar, LitFloat, LitInt, LitStr, -}; - -#[cfg(feature = "parsing")] -mod lookahead; - -#[cfg(any(feature = "full", feature = "derive"))] -mod mac; -#[cfg(any(feature = "full", feature = "derive"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] -pub use crate::mac::{Macro, MacroDelimiter}; - -#[cfg(all(feature = "parsing", any(feature = "full", feature = "derive")))] -#[cfg_attr( - docsrs, - doc(cfg(all(feature = "parsing", any(feature = "full", feature = "derive")))) -)] -pub mod meta; - -#[cfg(any(feature = "full", feature = "derive"))] -mod op; -#[cfg(any(feature = "full", feature = "derive"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] -pub use crate::op::{BinOp, UnOp}; - -#[cfg(feature = "parsing")] -#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] -pub mod parse; - -#[cfg(all(feature = "parsing", feature = "proc-macro"))] -mod parse_macro_input; - -#[cfg(all(feature = "parsing", feature = "printing"))] -mod parse_quote; - -#[cfg(feature = "full")] -mod pat; -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub use crate::pat::{ - FieldPat, Pat, PatConst, PatIdent, PatLit, PatMacro, PatOr, PatParen, PatPath, PatRange, - PatReference, PatRest, PatSlice, PatStruct, PatTuple, PatTupleStruct, PatType, PatWild, -}; - -#[cfg(any(feature = "full", feature = "derive"))] -mod path; -#[cfg(any(feature = "full", feature = "derive"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] -pub use crate::path::{ - AngleBracketedGenericArguments, AssocConst, AssocType, Constraint, GenericArgument, - ParenthesizedGenericArguments, Path, PathArguments, PathSegment, QSelf, -}; - -#[cfg(all( - any(feature = "full", feature = "derive"), - any(feature = "parsing", feature = "printing") -))] -mod precedence; - -#[cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))] -mod print; - -pub mod punctuated; - -#[cfg(any(feature = "full", feature = "derive"))] -mod restriction; -#[cfg(any(feature = "full", feature = "derive"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] -pub use crate::restriction::{FieldMutability, VisRestricted, Visibility}; - -mod sealed; - -#[cfg(all(feature = "parsing", feature = "derive", not(feature = "full")))] -mod scan_expr; - -mod span; - -#[cfg(all(feature = "parsing", feature = "printing"))] -#[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "printing"))))] -pub mod spanned; - -#[cfg(feature = "full")] -mod stmt; -#[cfg(feature = "full")] -#[cfg_attr(docsrs, doc(cfg(feature = "full")))] -pub use crate::stmt::{Block, Local, LocalInit, Stmt, StmtMacro}; - -mod thread; - -#[cfg(all(any(feature = "full", feature = "derive"), feature = "extra-traits"))] -mod tt; - -#[cfg(any(feature = "full", feature = "derive"))] -mod ty; -#[cfg(any(feature = "full", feature = "derive"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] -pub use crate::ty::{ - Abi, BareFnArg, BareVariadic, ReturnType, Type, TypeArray, TypeBareFn, TypeGroup, - TypeImplTrait, TypeInfer, TypeMacro, TypeNever, TypeParen, TypePath, TypePtr, TypeReference, - TypeSlice, TypeTraitObject, TypeTuple, -}; - -#[cfg(all(any(feature = "full", feature = "derive"), feature = "parsing"))] -mod verbatim; - -#[cfg(all(feature = "parsing", feature = "full"))] -mod whitespace; - -#[rustfmt::skip] // https://github.com/rust-lang/rustfmt/issues/6176 -mod gen { - /// Syntax tree traversal to transform the nodes of an owned syntax tree. - /// - /// Each method of the [`Fold`] trait is a hook that can be overridden to - /// customize the behavior when transforming the corresponding type of node. - /// By default, every method recursively visits the substructure of the - /// input by invoking the right visitor method of each of its fields. - /// - /// [`Fold`]: fold::Fold - /// - /// ``` - /// # use syn::{Attribute, BinOp, Expr, ExprBinary}; - /// # - /// pub trait Fold { - /// /* ... */ - /// - /// fn fold_expr_binary(&mut self, node: ExprBinary) -> ExprBinary { - /// fold_expr_binary(self, node) - /// } - /// - /// /* ... */ - /// # fn fold_attribute(&mut self, node: Attribute) -> Attribute; - /// # fn fold_expr(&mut self, node: Expr) -> Expr; - /// # fn fold_bin_op(&mut self, node: BinOp) -> BinOp; - /// } - /// - /// pub fn fold_expr_binary<V>(v: &mut V, node: ExprBinary) -> ExprBinary - /// where - /// V: Fold + ?Sized, - /// { - /// ExprBinary { - /// attrs: node - /// .attrs - /// .into_iter() - /// .map(|attr| v.fold_attribute(attr)) - /// .collect(), - /// left: Box::new(v.fold_expr(*node.left)), - /// op: v.fold_bin_op(node.op), - /// right: Box::new(v.fold_expr(*node.right)), - /// } - /// } - /// - /// /* ... */ - /// ``` - /// - /// <br> - /// - /// # Example - /// - /// This fold inserts parentheses to fully parenthesizes any expression. - /// - /// ``` - /// // [dependencies] - /// // quote = "1.0" - /// // syn = { version = "2.0", features = ["fold", "full"] } - /// - /// use quote::quote; - /// use syn::fold::{fold_expr, Fold}; - /// use syn::{token, Expr, ExprParen}; - /// - /// struct ParenthesizeEveryExpr; - /// - /// impl Fold for ParenthesizeEveryExpr { - /// fn fold_expr(&mut self, expr: Expr) -> Expr { - /// Expr::Paren(ExprParen { - /// attrs: Vec::new(), - /// expr: Box::new(fold_expr(self, expr)), - /// paren_token: token::Paren::default(), - /// }) - /// } - /// } - /// - /// fn main() { - /// let code = quote! { a() + b(1) * c.d }; - /// let expr: Expr = syn::parse2(code).unwrap(); - /// let parenthesized = ParenthesizeEveryExpr.fold_expr(expr); - /// println!("{}", quote!(#parenthesized)); - /// - /// // Output: (((a)()) + (((b)((1))) * ((c).d))) - /// } - /// ``` - #[cfg(feature = "fold")] - #[cfg_attr(docsrs, doc(cfg(feature = "fold")))] - #[rustfmt::skip] - pub mod fold; - - /// Syntax tree traversal to walk a shared borrow of a syntax tree. - /// - /// Each method of the [`Visit`] trait is a hook that can be overridden to - /// customize the behavior when visiting the corresponding type of node. By - /// default, every method recursively visits the substructure of the input - /// by invoking the right visitor method of each of its fields. - /// - /// [`Visit`]: visit::Visit - /// - /// ``` - /// # use syn::{Attribute, BinOp, Expr, ExprBinary}; - /// # - /// pub trait Visit<'ast> { - /// /* ... */ - /// - /// fn visit_expr_binary(&mut self, node: &'ast ExprBinary) { - /// visit_expr_binary(self, node); - /// } - /// - /// /* ... */ - /// # fn visit_attribute(&mut self, node: &'ast Attribute); - /// # fn visit_expr(&mut self, node: &'ast Expr); - /// # fn visit_bin_op(&mut self, node: &'ast BinOp); - /// } - /// - /// pub fn visit_expr_binary<'ast, V>(v: &mut V, node: &'ast ExprBinary) - /// where - /// V: Visit<'ast> + ?Sized, - /// { - /// for attr in &node.attrs { - /// v.visit_attribute(attr); - /// } - /// v.visit_expr(&*node.left); - /// v.visit_bin_op(&node.op); - /// v.visit_expr(&*node.right); - /// } - /// - /// /* ... */ - /// ``` - /// - /// <br> - /// - /// # Example - /// - /// This visitor will print the name of every freestanding function in the - /// syntax tree, including nested functions. - /// - /// ``` - /// // [dependencies] - /// // quote = "1.0" - /// // syn = { version = "2.0", features = ["full", "visit"] } - /// - /// use quote::quote; - /// use syn::visit::{self, Visit}; - /// use syn::{File, ItemFn}; - /// - /// struct FnVisitor; - /// - /// impl<'ast> Visit<'ast> for FnVisitor { - /// fn visit_item_fn(&mut self, node: &'ast ItemFn) { - /// println!("Function with name={}", node.sig.ident); - /// - /// // Delegate to the default impl to visit any nested functions. - /// visit::visit_item_fn(self, node); - /// } - /// } - /// - /// fn main() { - /// let code = quote! { - /// pub fn f() { - /// fn g() {} - /// } - /// }; - /// - /// let syntax_tree: File = syn::parse2(code).unwrap(); - /// FnVisitor.visit_file(&syntax_tree); - /// } - /// ``` - /// - /// The `'ast` lifetime on the input references means that the syntax tree - /// outlives the complete recursive visit call, so the visitor is allowed to - /// hold on to references into the syntax tree. - /// - /// ``` - /// use quote::quote; - /// use syn::visit::{self, Visit}; - /// use syn::{File, ItemFn}; - /// - /// struct FnVisitor<'ast> { - /// functions: Vec<&'ast ItemFn>, - /// } - /// - /// impl<'ast> Visit<'ast> for FnVisitor<'ast> { - /// fn visit_item_fn(&mut self, node: &'ast ItemFn) { - /// self.functions.push(node); - /// visit::visit_item_fn(self, node); - /// } - /// } - /// - /// fn main() { - /// let code = quote! { - /// pub fn f() { - /// fn g() {} - /// } - /// }; - /// - /// let syntax_tree: File = syn::parse2(code).unwrap(); - /// let mut visitor = FnVisitor { functions: Vec::new() }; - /// visitor.visit_file(&syntax_tree); - /// for f in visitor.functions { - /// println!("Function with name={}", f.sig.ident); - /// } - /// } - /// ``` - #[cfg(feature = "visit")] - #[cfg_attr(docsrs, doc(cfg(feature = "visit")))] - #[rustfmt::skip] - pub mod visit; - - /// Syntax tree traversal to mutate an exclusive borrow of a syntax tree in - /// place. - /// - /// Each method of the [`VisitMut`] trait is a hook that can be overridden - /// to customize the behavior when mutating the corresponding type of node. - /// By default, every method recursively visits the substructure of the - /// input by invoking the right visitor method of each of its fields. - /// - /// [`VisitMut`]: visit_mut::VisitMut - /// - /// ``` - /// # use syn::{Attribute, BinOp, Expr, ExprBinary}; - /// # - /// pub trait VisitMut { - /// /* ... */ - /// - /// fn visit_expr_binary_mut(&mut self, node: &mut ExprBinary) { - /// visit_expr_binary_mut(self, node); - /// } - /// - /// /* ... */ - /// # fn visit_attribute_mut(&mut self, node: &mut Attribute); - /// # fn visit_expr_mut(&mut self, node: &mut Expr); - /// # fn visit_bin_op_mut(&mut self, node: &mut BinOp); - /// } - /// - /// pub fn visit_expr_binary_mut<V>(v: &mut V, node: &mut ExprBinary) - /// where - /// V: VisitMut + ?Sized, - /// { - /// for attr in &mut node.attrs { - /// v.visit_attribute_mut(attr); - /// } - /// v.visit_expr_mut(&mut *node.left); - /// v.visit_bin_op_mut(&mut node.op); - /// v.visit_expr_mut(&mut *node.right); - /// } - /// - /// /* ... */ - /// ``` - /// - /// <br> - /// - /// # Example - /// - /// This mut visitor replace occurrences of u256 suffixed integer literals - /// like `999u256` with a macro invocation `bigint::u256!(999)`. - /// - /// ``` - /// // [dependencies] - /// // quote = "1.0" - /// // syn = { version = "2.0", features = ["full", "visit-mut"] } - /// - /// use quote::quote; - /// use syn::visit_mut::{self, VisitMut}; - /// use syn::{parse_quote, Expr, File, Lit, LitInt}; - /// - /// struct BigintReplace; - /// - /// impl VisitMut for BigintReplace { - /// fn visit_expr_mut(&mut self, node: &mut Expr) { - /// if let Expr::Lit(expr) = &node { - /// if let Lit::Int(int) = &expr.lit { - /// if int.suffix() == "u256" { - /// let digits = int.base10_digits(); - /// let unsuffixed: LitInt = syn::parse_str(digits).unwrap(); - /// *node = parse_quote!(bigint::u256!(#unsuffixed)); - /// return; - /// } - /// } - /// } - /// - /// // Delegate to the default impl to visit nested expressions. - /// visit_mut::visit_expr_mut(self, node); - /// } - /// } - /// - /// fn main() { - /// let code = quote! { - /// fn main() { - /// let _ = 999u256; - /// } - /// }; - /// - /// let mut syntax_tree: File = syn::parse2(code).unwrap(); - /// BigintReplace.visit_file_mut(&mut syntax_tree); - /// println!("{}", quote!(#syntax_tree)); - /// } - /// ``` - #[cfg(feature = "visit-mut")] - #[cfg_attr(docsrs, doc(cfg(feature = "visit-mut")))] - #[rustfmt::skip] - pub mod visit_mut; - - #[cfg(feature = "clone-impls")] - #[rustfmt::skip] - mod clone; - - #[cfg(feature = "extra-traits")] - #[rustfmt::skip] - mod debug; - - #[cfg(feature = "extra-traits")] - #[rustfmt::skip] - mod eq; - - #[cfg(feature = "extra-traits")] - #[rustfmt::skip] - mod hash; -} - -#[cfg(feature = "fold")] -#[cfg_attr(docsrs, doc(cfg(feature = "fold")))] -pub use crate::gen::fold; - -#[cfg(feature = "visit")] -#[cfg_attr(docsrs, doc(cfg(feature = "visit")))] -pub use crate::gen::visit; - -#[cfg(feature = "visit-mut")] -#[cfg_attr(docsrs, doc(cfg(feature = "visit-mut")))] -pub use crate::gen::visit_mut; - -// Not public API. -#[doc(hidden)] -#[path = "export.rs"] -pub mod __private; - -/// Parse tokens of source code into the chosen syntax tree node. -/// -/// This is preferred over parsing a string because tokens are able to preserve -/// information about where in the user's code they were originally written (the -/// "span" of the token), possibly allowing the compiler to produce better error -/// messages. -/// -/// This function parses a `proc_macro::TokenStream` which is the type used for -/// interop with the compiler in a procedural macro. To parse a -/// `proc_macro2::TokenStream`, use [`syn::parse2`] instead. -/// -/// [`syn::parse2`]: parse2 -/// -/// This function enforces that the input is fully parsed. If there are any -/// unparsed tokens at the end of the stream, an error is returned. -#[cfg(all(feature = "parsing", feature = "proc-macro"))] -#[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "proc-macro"))))] -pub fn parse<T: parse::Parse>(tokens: proc_macro::TokenStream) -> Result<T> { - parse::Parser::parse(T::parse, tokens) -} - -/// Parse a proc-macro2 token stream into the chosen syntax tree node. -/// -/// This function parses a `proc_macro2::TokenStream` which is commonly useful -/// when the input comes from a node of the Syn syntax tree, for example the -/// body tokens of a [`Macro`] node. When in a procedural macro parsing the -/// `proc_macro::TokenStream` provided by the compiler, use [`syn::parse`] -/// instead. -/// -/// [`syn::parse`]: parse() -/// -/// This function enforces that the input is fully parsed. If there are any -/// unparsed tokens at the end of the stream, an error is returned. -#[cfg(feature = "parsing")] -#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] -pub fn parse2<T: parse::Parse>(tokens: proc_macro2::TokenStream) -> Result<T> { - parse::Parser::parse2(T::parse, tokens) -} - -/// Parse a string of Rust code into the chosen syntax tree node. -/// -/// This function enforces that the input is fully parsed. If there are any -/// unparsed tokens at the end of the stream, an error is returned. -/// -/// # Hygiene -/// -/// Every span in the resulting syntax tree will be set to resolve at the macro -/// call site. -/// -/// # Examples -/// -/// ``` -/// use syn::{Expr, Result}; -/// -/// fn run() -> Result<()> { -/// let code = "assert_eq!(u8::max_value(), 255)"; -/// let expr = syn::parse_str::<Expr>(code)?; -/// println!("{:#?}", expr); -/// Ok(()) -/// } -/// # -/// # run().unwrap(); -/// ``` -#[cfg(feature = "parsing")] -#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] -pub fn parse_str<T: parse::Parse>(s: &str) -> Result<T> { - parse::Parser::parse_str(T::parse, s) -} - -/// Parse the content of a file of Rust code. -/// -/// This is different from `syn::parse_str::<File>(content)` in two ways: -/// -/// - It discards a leading byte order mark `\u{FEFF}` if the file has one. -/// - It preserves the shebang line of the file, such as `#!/usr/bin/env rustx`. -/// -/// If present, either of these would be an error using `from_str`. -/// -/// # Examples -/// -/// ```no_run -/// use std::error::Error; -/// use std::fs; -/// use std::io::Read; -/// -/// fn run() -> Result<(), Box<dyn Error>> { -/// let content = fs::read_to_string("path/to/code.rs")?; -/// let ast = syn::parse_file(&content)?; -/// if let Some(shebang) = ast.shebang { -/// println!("{}", shebang); -/// } -/// println!("{} items", ast.items.len()); -/// -/// Ok(()) -/// } -/// # -/// # run().unwrap(); -/// ``` -#[cfg(all(feature = "parsing", feature = "full"))] -#[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "full"))))] -pub fn parse_file(mut content: &str) -> Result<File> { - // Strip the BOM if it is present - const BOM: &str = "\u{feff}"; - if content.starts_with(BOM) { - content = &content[BOM.len()..]; - } - - let mut shebang = None; - if content.starts_with("#!") { - let rest = whitespace::skip(&content[2..]); - if !rest.starts_with('[') { - if let Some(idx) = content.find('\n') { - shebang = Some(content[..idx].to_string()); - content = &content[idx..]; - } else { - shebang = Some(content.to_string()); - content = ""; - } - } - } - - let mut file: File = parse_str(content)?; - file.shebang = shebang; - Ok(file) -} diff --git a/vendor/syn/src/lifetime.rs b/vendor/syn/src/lifetime.rs deleted file mode 100644 index 248af5aaad54f8..00000000000000 --- a/vendor/syn/src/lifetime.rs +++ /dev/null @@ -1,155 +0,0 @@ -#[cfg(feature = "parsing")] -use crate::lookahead; -use proc_macro2::{Ident, Span}; -use std::cmp::Ordering; -use std::fmt::{self, Display}; -use std::hash::{Hash, Hasher}; - -/// A Rust lifetime: `'a`. -/// -/// Lifetime names must conform to the following rules: -/// -/// - Must start with an apostrophe. -/// - Must not consist of just an apostrophe: `'`. -/// - Character after the apostrophe must be `_` or a Unicode code point with -/// the XID_Start property. -/// - All following characters must be Unicode code points with the XID_Continue -/// property. -pub struct Lifetime { - pub apostrophe: Span, - pub ident: Ident, -} - -impl Lifetime { - /// # Panics - /// - /// Panics if the lifetime does not conform to the bulleted rules above. - /// - /// # Invocation - /// - /// ``` - /// # use proc_macro2::Span; - /// # use syn::Lifetime; - /// # - /// # fn f() -> Lifetime { - /// Lifetime::new("'a", Span::call_site()) - /// # } - /// ``` - pub fn new(symbol: &str, span: Span) -> Self { - if !symbol.starts_with('\'') { - panic!( - "lifetime name must start with apostrophe as in \"'a\", got {:?}", - symbol - ); - } - - if symbol == "'" { - panic!("lifetime name must not be empty"); - } - - if !crate::ident::xid_ok(&symbol[1..]) { - panic!("{:?} is not a valid lifetime name", symbol); - } - - Lifetime { - apostrophe: span, - ident: Ident::new(&symbol[1..], span), - } - } - - pub fn span(&self) -> Span { - self.apostrophe - .join(self.ident.span()) - .unwrap_or(self.apostrophe) - } - - pub fn set_span(&mut self, span: Span) { - self.apostrophe = span; - self.ident.set_span(span); - } -} - -impl Display for Lifetime { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - "'".fmt(formatter)?; - self.ident.fmt(formatter) - } -} - -impl Clone for Lifetime { - fn clone(&self) -> Self { - Lifetime { - apostrophe: self.apostrophe, - ident: self.ident.clone(), - } - } -} - -impl PartialEq for Lifetime { - fn eq(&self, other: &Lifetime) -> bool { - self.ident.eq(&other.ident) - } -} - -impl Eq for Lifetime {} - -impl PartialOrd for Lifetime { - fn partial_cmp(&self, other: &Lifetime) -> Option<Ordering> { - Some(self.cmp(other)) - } -} - -impl Ord for Lifetime { - fn cmp(&self, other: &Lifetime) -> Ordering { - self.ident.cmp(&other.ident) - } -} - -impl Hash for Lifetime { - fn hash<H: Hasher>(&self, h: &mut H) { - self.ident.hash(h); - } -} - -#[cfg(feature = "parsing")] -pub_if_not_doc! { - #[doc(hidden)] - #[allow(non_snake_case)] - pub fn Lifetime(marker: lookahead::TokenMarker) -> Lifetime { - match marker {} - } -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::error::Result; - use crate::lifetime::Lifetime; - use crate::parse::{Parse, ParseStream}; - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Lifetime { - fn parse(input: ParseStream) -> Result<Self> { - input.step(|cursor| { - cursor - .lifetime() - .ok_or_else(|| cursor.error("expected lifetime")) - }) - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use crate::ext::PunctExt as _; - use crate::lifetime::Lifetime; - use proc_macro2::{Punct, Spacing, TokenStream}; - use quote::{ToTokens, TokenStreamExt as _}; - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Lifetime { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Punct::new_spanned('\'', Spacing::Joint, self.apostrophe)); - self.ident.to_tokens(tokens); - } - } -} diff --git a/vendor/syn/src/lit.rs b/vendor/syn/src/lit.rs deleted file mode 100644 index 369c3a12d1e982..00000000000000 --- a/vendor/syn/src/lit.rs +++ /dev/null @@ -1,1918 +0,0 @@ -#[cfg(feature = "parsing")] -use crate::ext::TokenStreamExt as _; -#[cfg(feature = "parsing")] -use crate::lookahead; -#[cfg(feature = "parsing")] -use crate::parse::{Parse, Parser}; -use crate::{Error, Result}; -use proc_macro2::{Ident, Literal, Span}; -#[cfg(feature = "parsing")] -use proc_macro2::{TokenStream, TokenTree}; -use std::ffi::{CStr, CString}; -use std::fmt::{self, Display}; -#[cfg(feature = "extra-traits")] -use std::hash::{Hash, Hasher}; -use std::str::{self, FromStr}; - -ast_enum_of_structs! { - /// A Rust literal such as a string or integer or boolean. - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums - #[non_exhaustive] - pub enum Lit { - /// A UTF-8 string literal: `"foo"`. - Str(LitStr), - - /// A byte string literal: `b"foo"`. - ByteStr(LitByteStr), - - /// A nul-terminated C-string literal: `c"foo"`. - CStr(LitCStr), - - /// A byte literal: `b'f'`. - Byte(LitByte), - - /// A character literal: `'a'`. - Char(LitChar), - - /// An integer literal: `1` or `1u16`. - Int(LitInt), - - /// A floating point literal: `1f64` or `1.0e10f64`. - /// - /// Must be finite. May not be infinite or NaN. - Float(LitFloat), - - /// A boolean literal: `true` or `false`. - Bool(LitBool), - - /// A raw token literal not interpreted by Syn. - Verbatim(Literal), - } -} - -ast_struct! { - /// A UTF-8 string literal: `"foo"`. - pub struct LitStr { - repr: Box<LitRepr>, - } -} - -ast_struct! { - /// A byte string literal: `b"foo"`. - pub struct LitByteStr { - repr: Box<LitRepr>, - } -} - -ast_struct! { - /// A nul-terminated C-string literal: `c"foo"`. - pub struct LitCStr { - repr: Box<LitRepr>, - } -} - -ast_struct! { - /// A byte literal: `b'f'`. - pub struct LitByte { - repr: Box<LitRepr>, - } -} - -ast_struct! { - /// A character literal: `'a'`. - pub struct LitChar { - repr: Box<LitRepr>, - } -} - -struct LitRepr { - token: Literal, - suffix: Box<str>, -} - -ast_struct! { - /// An integer literal: `1` or `1u16`. - pub struct LitInt { - repr: Box<LitIntRepr>, - } -} - -struct LitIntRepr { - token: Literal, - digits: Box<str>, - suffix: Box<str>, -} - -ast_struct! { - /// A floating point literal: `1f64` or `1.0e10f64`. - /// - /// Must be finite. May not be infinite or NaN. - pub struct LitFloat { - repr: Box<LitFloatRepr>, - } -} - -struct LitFloatRepr { - token: Literal, - digits: Box<str>, - suffix: Box<str>, -} - -ast_struct! { - /// A boolean literal: `true` or `false`. - pub struct LitBool { - pub value: bool, - pub span: Span, - } -} - -impl LitStr { - pub fn new(value: &str, span: Span) -> Self { - let mut token = Literal::string(value); - token.set_span(span); - LitStr { - repr: Box::new(LitRepr { - token, - suffix: Box::<str>::default(), - }), - } - } - - pub fn value(&self) -> String { - let repr = self.repr.token.to_string(); - let (value, _suffix) = value::parse_lit_str(&repr).unwrap(); - String::from(value) - } - - /// Parse a syntax tree node from the content of this string literal. - /// - /// All spans in the syntax tree will point to the span of this `LitStr`. - /// - /// # Example - /// - /// ``` - /// use syn::{Attribute, Error, Expr, Lit, Meta, Path, Result}; - /// - /// // Parses the path from an attribute that looks like: - /// // - /// // #[path = "a::b::c"] - /// // - /// // or returns `None` if the input is some other attribute. - /// fn get_path(attr: &Attribute) -> Result<Option<Path>> { - /// if !attr.path().is_ident("path") { - /// return Ok(None); - /// } - /// - /// if let Meta::NameValue(meta) = &attr.meta { - /// if let Expr::Lit(expr) = &meta.value { - /// if let Lit::Str(lit_str) = &expr.lit { - /// return lit_str.parse().map(Some); - /// } - /// } - /// } - /// - /// let message = "expected #[path = \"...\"]"; - /// Err(Error::new_spanned(attr, message)) - /// } - /// ``` - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse<T: Parse>(&self) -> Result<T> { - self.parse_with(T::parse) - } - - /// Invoke parser on the content of this string literal. - /// - /// All spans in the syntax tree will point to the span of this `LitStr`. - /// - /// # Example - /// - /// ``` - /// # use proc_macro2::Span; - /// # use syn::{LitStr, Result}; - /// # - /// # fn main() -> Result<()> { - /// # let lit_str = LitStr::new("a::b::c", Span::call_site()); - /// # - /// # const IGNORE: &str = stringify! { - /// let lit_str: LitStr = /* ... */; - /// # }; - /// - /// // Parse a string literal like "a::b::c" into a Path, not allowing - /// // generic arguments on any of the path segments. - /// let basic_path = lit_str.parse_with(syn::Path::parse_mod_style)?; - /// # - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_with<F: Parser>(&self, parser: F) -> Result<F::Output> { - use proc_macro2::Group; - - // Token stream with every span replaced by the given one. - fn respan_token_stream(stream: TokenStream, span: Span) -> TokenStream { - let mut tokens = TokenStream::new(); - for token in stream { - tokens.append(respan_token_tree(token, span)); - } - tokens - } - - // Token tree with every span replaced by the given one. - fn respan_token_tree(mut token: TokenTree, span: Span) -> TokenTree { - match &mut token { - TokenTree::Group(g) => { - let stream = respan_token_stream(g.stream(), span); - *g = Group::new(g.delimiter(), stream); - g.set_span(span); - } - other => other.set_span(span), - } - token - } - - // Parse string literal into a token stream with every span equal to the - // original literal's span. - let span = self.span(); - let mut tokens = TokenStream::from_str(&self.value())?; - tokens = respan_token_stream(tokens, span); - - let result = crate::parse::parse_scoped(parser, span, tokens)?; - - let suffix = self.suffix(); - if !suffix.is_empty() { - return Err(Error::new( - self.span(), - format!("unexpected suffix `{}` on string literal", suffix), - )); - } - - Ok(result) - } - - pub fn span(&self) -> Span { - self.repr.token.span() - } - - pub fn set_span(&mut self, span: Span) { - self.repr.token.set_span(span); - } - - pub fn suffix(&self) -> &str { - &self.repr.suffix - } - - pub fn token(&self) -> Literal { - self.repr.token.clone() - } -} - -impl LitByteStr { - pub fn new(value: &[u8], span: Span) -> Self { - let mut token = Literal::byte_string(value); - token.set_span(span); - LitByteStr { - repr: Box::new(LitRepr { - token, - suffix: Box::<str>::default(), - }), - } - } - - pub fn value(&self) -> Vec<u8> { - let repr = self.repr.token.to_string(); - let (value, _suffix) = value::parse_lit_byte_str(&repr).unwrap(); - value - } - - pub fn span(&self) -> Span { - self.repr.token.span() - } - - pub fn set_span(&mut self, span: Span) { - self.repr.token.set_span(span); - } - - pub fn suffix(&self) -> &str { - &self.repr.suffix - } - - pub fn token(&self) -> Literal { - self.repr.token.clone() - } -} - -impl LitCStr { - pub fn new(value: &CStr, span: Span) -> Self { - let mut token = Literal::c_string(value); - token.set_span(span); - LitCStr { - repr: Box::new(LitRepr { - token, - suffix: Box::<str>::default(), - }), - } - } - - pub fn value(&self) -> CString { - let repr = self.repr.token.to_string(); - let (value, _suffix) = value::parse_lit_c_str(&repr).unwrap(); - value - } - - pub fn span(&self) -> Span { - self.repr.token.span() - } - - pub fn set_span(&mut self, span: Span) { - self.repr.token.set_span(span); - } - - pub fn suffix(&self) -> &str { - &self.repr.suffix - } - - pub fn token(&self) -> Literal { - self.repr.token.clone() - } -} - -impl LitByte { - pub fn new(value: u8, span: Span) -> Self { - let mut token = Literal::u8_suffixed(value); - token.set_span(span); - LitByte { - repr: Box::new(LitRepr { - token, - suffix: Box::<str>::default(), - }), - } - } - - pub fn value(&self) -> u8 { - let repr = self.repr.token.to_string(); - let (value, _suffix) = value::parse_lit_byte(&repr).unwrap(); - value - } - - pub fn span(&self) -> Span { - self.repr.token.span() - } - - pub fn set_span(&mut self, span: Span) { - self.repr.token.set_span(span); - } - - pub fn suffix(&self) -> &str { - &self.repr.suffix - } - - pub fn token(&self) -> Literal { - self.repr.token.clone() - } -} - -impl LitChar { - pub fn new(value: char, span: Span) -> Self { - let mut token = Literal::character(value); - token.set_span(span); - LitChar { - repr: Box::new(LitRepr { - token, - suffix: Box::<str>::default(), - }), - } - } - - pub fn value(&self) -> char { - let repr = self.repr.token.to_string(); - let (value, _suffix) = value::parse_lit_char(&repr).unwrap(); - value - } - - pub fn span(&self) -> Span { - self.repr.token.span() - } - - pub fn set_span(&mut self, span: Span) { - self.repr.token.set_span(span); - } - - pub fn suffix(&self) -> &str { - &self.repr.suffix - } - - pub fn token(&self) -> Literal { - self.repr.token.clone() - } -} - -impl LitInt { - #[track_caller] - pub fn new(repr: &str, span: Span) -> Self { - let (digits, suffix) = match value::parse_lit_int(repr) { - Some(parse) => parse, - None => panic!("not an integer literal: `{}`", repr), - }; - - let mut token: Literal = repr.parse().unwrap(); - token.set_span(span); - LitInt { - repr: Box::new(LitIntRepr { - token, - digits, - suffix, - }), - } - } - - pub fn base10_digits(&self) -> &str { - &self.repr.digits - } - - /// Parses the literal into a selected number type. - /// - /// This is equivalent to `lit.base10_digits().parse()` except that the - /// resulting errors will be correctly spanned to point to the literal token - /// in the macro input. - /// - /// ``` - /// use syn::LitInt; - /// use syn::parse::{Parse, ParseStream, Result}; - /// - /// struct Port { - /// value: u16, - /// } - /// - /// impl Parse for Port { - /// fn parse(input: ParseStream) -> Result<Self> { - /// let lit: LitInt = input.parse()?; - /// let value = lit.base10_parse::<u16>()?; - /// Ok(Port { value }) - /// } - /// } - /// ``` - pub fn base10_parse<N>(&self) -> Result<N> - where - N: FromStr, - N::Err: Display, - { - self.base10_digits() - .parse() - .map_err(|err| Error::new(self.span(), err)) - } - - pub fn suffix(&self) -> &str { - &self.repr.suffix - } - - pub fn span(&self) -> Span { - self.repr.token.span() - } - - pub fn set_span(&mut self, span: Span) { - self.repr.token.set_span(span); - } - - pub fn token(&self) -> Literal { - self.repr.token.clone() - } -} - -impl From<Literal> for LitInt { - #[track_caller] - fn from(token: Literal) -> Self { - let repr = token.to_string(); - if let Some((digits, suffix)) = value::parse_lit_int(&repr) { - LitInt { - repr: Box::new(LitIntRepr { - token, - digits, - suffix, - }), - } - } else { - panic!("not an integer literal: `{}`", repr); - } - } -} - -impl Display for LitInt { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.repr.token.fmt(formatter) - } -} - -impl LitFloat { - #[track_caller] - pub fn new(repr: &str, span: Span) -> Self { - let (digits, suffix) = match value::parse_lit_float(repr) { - Some(parse) => parse, - None => panic!("not a float literal: `{}`", repr), - }; - - let mut token: Literal = repr.parse().unwrap(); - token.set_span(span); - LitFloat { - repr: Box::new(LitFloatRepr { - token, - digits, - suffix, - }), - } - } - - pub fn base10_digits(&self) -> &str { - &self.repr.digits - } - - pub fn base10_parse<N>(&self) -> Result<N> - where - N: FromStr, - N::Err: Display, - { - self.base10_digits() - .parse() - .map_err(|err| Error::new(self.span(), err)) - } - - pub fn suffix(&self) -> &str { - &self.repr.suffix - } - - pub fn span(&self) -> Span { - self.repr.token.span() - } - - pub fn set_span(&mut self, span: Span) { - self.repr.token.set_span(span); - } - - pub fn token(&self) -> Literal { - self.repr.token.clone() - } -} - -impl From<Literal> for LitFloat { - #[track_caller] - fn from(token: Literal) -> Self { - let repr = token.to_string(); - if let Some((digits, suffix)) = value::parse_lit_float(&repr) { - LitFloat { - repr: Box::new(LitFloatRepr { - token, - digits, - suffix, - }), - } - } else { - panic!("not a float literal: `{}`", repr); - } - } -} - -impl Display for LitFloat { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.repr.token.fmt(formatter) - } -} - -impl LitBool { - pub fn new(value: bool, span: Span) -> Self { - LitBool { value, span } - } - - pub fn value(&self) -> bool { - self.value - } - - pub fn span(&self) -> Span { - self.span - } - - pub fn set_span(&mut self, span: Span) { - self.span = span; - } - - pub fn token(&self) -> Ident { - let s = if self.value { "true" } else { "false" }; - Ident::new(s, self.span) - } -} - -#[cfg(feature = "extra-traits")] -mod debug_impls { - use crate::lit::{LitBool, LitByte, LitByteStr, LitCStr, LitChar, LitFloat, LitInt, LitStr}; - use std::fmt::{self, Debug}; - - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl Debug for LitStr { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "LitStr") - } - } - - impl LitStr { - pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - formatter - .debug_struct(name) - .field("token", &format_args!("{}", self.repr.token)) - .finish() - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl Debug for LitByteStr { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "LitByteStr") - } - } - - impl LitByteStr { - pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - formatter - .debug_struct(name) - .field("token", &format_args!("{}", self.repr.token)) - .finish() - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl Debug for LitCStr { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "LitCStr") - } - } - - impl LitCStr { - pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - formatter - .debug_struct(name) - .field("token", &format_args!("{}", self.repr.token)) - .finish() - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl Debug for LitByte { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "LitByte") - } - } - - impl LitByte { - pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - formatter - .debug_struct(name) - .field("token", &format_args!("{}", self.repr.token)) - .finish() - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl Debug for LitChar { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "LitChar") - } - } - - impl LitChar { - pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - formatter - .debug_struct(name) - .field("token", &format_args!("{}", self.repr.token)) - .finish() - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl Debug for LitInt { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "LitInt") - } - } - - impl LitInt { - pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - formatter - .debug_struct(name) - .field("token", &format_args!("{}", self.repr.token)) - .finish() - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl Debug for LitFloat { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "LitFloat") - } - } - - impl LitFloat { - pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - formatter - .debug_struct(name) - .field("token", &format_args!("{}", self.repr.token)) - .finish() - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl Debug for LitBool { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - self.debug(formatter, "LitBool") - } - } - - impl LitBool { - pub(crate) fn debug(&self, formatter: &mut fmt::Formatter, name: &str) -> fmt::Result { - formatter - .debug_struct(name) - .field("value", &self.value) - .finish() - } - } -} - -#[cfg(feature = "clone-impls")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for LitRepr { - fn clone(&self) -> Self { - LitRepr { - token: self.token.clone(), - suffix: self.suffix.clone(), - } - } -} - -#[cfg(feature = "clone-impls")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for LitIntRepr { - fn clone(&self) -> Self { - LitIntRepr { - token: self.token.clone(), - digits: self.digits.clone(), - suffix: self.suffix.clone(), - } - } -} - -#[cfg(feature = "clone-impls")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for LitFloatRepr { - fn clone(&self) -> Self { - LitFloatRepr { - token: self.token.clone(), - digits: self.digits.clone(), - suffix: self.suffix.clone(), - } - } -} - -macro_rules! lit_extra_traits { - ($ty:ident) => { - #[cfg(feature = "clone-impls")] - #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] - impl Clone for $ty { - fn clone(&self) -> Self { - $ty { - repr: self.repr.clone(), - } - } - } - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl PartialEq for $ty { - fn eq(&self, other: &Self) -> bool { - self.repr.token.to_string() == other.repr.token.to_string() - } - } - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl Hash for $ty { - fn hash<H>(&self, state: &mut H) - where - H: Hasher, - { - self.repr.token.to_string().hash(state); - } - } - - #[cfg(feature = "parsing")] - pub_if_not_doc! { - #[doc(hidden)] - #[allow(non_snake_case)] - pub fn $ty(marker: lookahead::TokenMarker) -> $ty { - match marker {} - } - } - }; -} - -lit_extra_traits!(LitStr); -lit_extra_traits!(LitByteStr); -lit_extra_traits!(LitCStr); -lit_extra_traits!(LitByte); -lit_extra_traits!(LitChar); -lit_extra_traits!(LitInt); -lit_extra_traits!(LitFloat); - -#[cfg(feature = "parsing")] -pub_if_not_doc! { - #[doc(hidden)] - #[allow(non_snake_case)] - pub fn LitBool(marker: lookahead::TokenMarker) -> LitBool { - match marker {} - } -} - -/// The style of a string literal, either plain quoted or a raw string like -/// `r##"data"##`. -#[doc(hidden)] // https://github.com/dtolnay/syn/issues/1566 -pub enum StrStyle { - /// An ordinary string like `"data"`. - Cooked, - /// A raw string like `r##"data"##`. - /// - /// The unsigned integer is the number of `#` symbols used. - Raw(usize), -} - -#[cfg(feature = "parsing")] -pub_if_not_doc! { - #[doc(hidden)] - #[allow(non_snake_case)] - pub fn Lit(marker: lookahead::TokenMarker) -> Lit { - match marker {} - } -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::buffer::Cursor; - use crate::error::Result; - use crate::lit::{ - value, Lit, LitBool, LitByte, LitByteStr, LitCStr, LitChar, LitFloat, LitFloatRepr, LitInt, - LitIntRepr, LitStr, - }; - use crate::parse::{Parse, ParseStream, Unexpected}; - use crate::token::{self, Token}; - use proc_macro2::{Literal, Punct, Span}; - use std::cell::Cell; - use std::rc::Rc; - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Lit { - fn parse(input: ParseStream) -> Result<Self> { - input.step(|cursor| { - if let Some((lit, rest)) = cursor.literal() { - return Ok((Lit::new(lit), rest)); - } - - if let Some((ident, rest)) = cursor.ident() { - let value = ident == "true"; - if value || ident == "false" { - let lit_bool = LitBool { - value, - span: ident.span(), - }; - return Ok((Lit::Bool(lit_bool), rest)); - } - } - - if let Some((punct, rest)) = cursor.punct() { - if punct.as_char() == '-' { - if let Some((lit, rest)) = parse_negative_lit(punct, rest) { - return Ok((lit, rest)); - } - } - } - - Err(cursor.error("expected literal")) - }) - } - } - - fn parse_negative_lit(neg: Punct, cursor: Cursor) -> Option<(Lit, Cursor)> { - let (lit, rest) = cursor.literal()?; - - let mut span = neg.span(); - span = span.join(lit.span()).unwrap_or(span); - - let mut repr = lit.to_string(); - repr.insert(0, '-'); - - if let Some((digits, suffix)) = value::parse_lit_int(&repr) { - let mut token: Literal = repr.parse().unwrap(); - token.set_span(span); - return Some(( - Lit::Int(LitInt { - repr: Box::new(LitIntRepr { - token, - digits, - suffix, - }), - }), - rest, - )); - } - - let (digits, suffix) = value::parse_lit_float(&repr)?; - let mut token: Literal = repr.parse().unwrap(); - token.set_span(span); - Some(( - Lit::Float(LitFloat { - repr: Box::new(LitFloatRepr { - token, - digits, - suffix, - }), - }), - rest, - )) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for LitStr { - fn parse(input: ParseStream) -> Result<Self> { - let head = input.fork(); - match input.parse() { - Ok(Lit::Str(lit)) => Ok(lit), - _ => Err(head.error("expected string literal")), - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for LitByteStr { - fn parse(input: ParseStream) -> Result<Self> { - let head = input.fork(); - match input.parse() { - Ok(Lit::ByteStr(lit)) => Ok(lit), - _ => Err(head.error("expected byte string literal")), - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for LitCStr { - fn parse(input: ParseStream) -> Result<Self> { - let head = input.fork(); - match input.parse() { - Ok(Lit::CStr(lit)) => Ok(lit), - _ => Err(head.error("expected C string literal")), - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for LitByte { - fn parse(input: ParseStream) -> Result<Self> { - let head = input.fork(); - match input.parse() { - Ok(Lit::Byte(lit)) => Ok(lit), - _ => Err(head.error("expected byte literal")), - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for LitChar { - fn parse(input: ParseStream) -> Result<Self> { - let head = input.fork(); - match input.parse() { - Ok(Lit::Char(lit)) => Ok(lit), - _ => Err(head.error("expected character literal")), - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for LitInt { - fn parse(input: ParseStream) -> Result<Self> { - let head = input.fork(); - match input.parse() { - Ok(Lit::Int(lit)) => Ok(lit), - _ => Err(head.error("expected integer literal")), - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for LitFloat { - fn parse(input: ParseStream) -> Result<Self> { - let head = input.fork(); - match input.parse() { - Ok(Lit::Float(lit)) => Ok(lit), - _ => Err(head.error("expected floating point literal")), - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for LitBool { - fn parse(input: ParseStream) -> Result<Self> { - let head = input.fork(); - match input.parse() { - Ok(Lit::Bool(lit)) => Ok(lit), - _ => Err(head.error("expected boolean literal")), - } - } - } - - fn peek_impl(cursor: Cursor, peek: fn(ParseStream) -> bool) -> bool { - let scope = Span::call_site(); - let unexpected = Rc::new(Cell::new(Unexpected::None)); - let buffer = crate::parse::new_parse_buffer(scope, cursor, unexpected); - peek(&buffer) - } - - macro_rules! impl_token { - ($display:literal $name:ty) => { - impl Token for $name { - fn peek(cursor: Cursor) -> bool { - fn peek(input: ParseStream) -> bool { - <$name as Parse>::parse(input).is_ok() - } - peek_impl(cursor, peek) - } - - fn display() -> &'static str { - $display - } - } - - impl token::private::Sealed for $name {} - }; - } - - impl_token!("literal" Lit); - impl_token!("string literal" LitStr); - impl_token!("byte string literal" LitByteStr); - impl_token!("C-string literal" LitCStr); - impl_token!("byte literal" LitByte); - impl_token!("character literal" LitChar); - impl_token!("integer literal" LitInt); - impl_token!("floating point literal" LitFloat); - impl_token!("boolean literal" LitBool); -} - -#[cfg(feature = "printing")] -mod printing { - use crate::lit::{LitBool, LitByte, LitByteStr, LitCStr, LitChar, LitFloat, LitInt, LitStr}; - use proc_macro2::TokenStream; - use quote::{ToTokens, TokenStreamExt as _}; - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for LitStr { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.repr.token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for LitByteStr { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.repr.token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for LitCStr { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.repr.token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for LitByte { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.repr.token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for LitChar { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.repr.token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for LitInt { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.repr.token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for LitFloat { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.repr.token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for LitBool { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(self.token()); - } - } -} - -mod value { - use crate::bigint::BigInt; - use crate::lit::{ - Lit, LitBool, LitByte, LitByteStr, LitCStr, LitChar, LitFloat, LitFloatRepr, LitInt, - LitIntRepr, LitRepr, LitStr, - }; - use proc_macro2::{Literal, Span}; - use std::char; - use std::ffi::CString; - use std::ops::{Index, RangeFrom}; - - impl Lit { - /// Interpret a Syn literal from a proc-macro2 literal. - pub fn new(token: Literal) -> Self { - let repr = token.to_string(); - Lit::from_str(token, &repr) - } - - #[cfg(fuzzing)] - #[doc(hidden)] - pub fn from_str_for_fuzzing(repr: &str) -> Self { - let token = Literal::u8_unsuffixed(0); - Lit::from_str(token, repr) - } - - fn from_str(token: Literal, repr: &str) -> Self { - match byte(repr, 0) { - // "...", r"...", r#"..."# - b'"' | b'r' => { - if let Some((_, suffix)) = parse_lit_str(repr) { - return Lit::Str(LitStr { - repr: Box::new(LitRepr { token, suffix }), - }); - } - } - b'b' => match byte(repr, 1) { - // b"...", br"...", br#"...#" - b'"' | b'r' => { - if let Some((_, suffix)) = parse_lit_byte_str(repr) { - return Lit::ByteStr(LitByteStr { - repr: Box::new(LitRepr { token, suffix }), - }); - } - } - // b'...' - b'\'' => { - if let Some((_, suffix)) = parse_lit_byte(repr) { - return Lit::Byte(LitByte { - repr: Box::new(LitRepr { token, suffix }), - }); - } - } - _ => {} - }, - b'c' => match byte(repr, 1) { - // c"...", cr"...", cr#"..."# - b'"' | b'r' => { - if let Some((_, suffix)) = parse_lit_c_str(repr) { - return Lit::CStr(LitCStr { - repr: Box::new(LitRepr { token, suffix }), - }); - } - } - _ => {} - }, - // '...' - b'\'' => { - if let Some((_, suffix)) = parse_lit_char(repr) { - return Lit::Char(LitChar { - repr: Box::new(LitRepr { token, suffix }), - }); - } - } - b'0'..=b'9' | b'-' => { - // 0, 123, 0xFF, 0o77, 0b11 - if let Some((digits, suffix)) = parse_lit_int(repr) { - return Lit::Int(LitInt { - repr: Box::new(LitIntRepr { - token, - digits, - suffix, - }), - }); - } - // 1.0, 1e-1, 1e+1 - if let Some((digits, suffix)) = parse_lit_float(repr) { - return Lit::Float(LitFloat { - repr: Box::new(LitFloatRepr { - token, - digits, - suffix, - }), - }); - } - } - // true, false - b't' | b'f' => { - if repr == "true" || repr == "false" { - return Lit::Bool(LitBool { - value: repr == "true", - span: token.span(), - }); - } - } - b'(' if repr == "(/*ERROR*/)" => return Lit::Verbatim(token), - _ => {} - } - - Lit::Verbatim(token) - } - - pub fn suffix(&self) -> &str { - match self { - Lit::Str(lit) => lit.suffix(), - Lit::ByteStr(lit) => lit.suffix(), - Lit::CStr(lit) => lit.suffix(), - Lit::Byte(lit) => lit.suffix(), - Lit::Char(lit) => lit.suffix(), - Lit::Int(lit) => lit.suffix(), - Lit::Float(lit) => lit.suffix(), - Lit::Bool(_) | Lit::Verbatim(_) => "", - } - } - - pub fn span(&self) -> Span { - match self { - Lit::Str(lit) => lit.span(), - Lit::ByteStr(lit) => lit.span(), - Lit::CStr(lit) => lit.span(), - Lit::Byte(lit) => lit.span(), - Lit::Char(lit) => lit.span(), - Lit::Int(lit) => lit.span(), - Lit::Float(lit) => lit.span(), - Lit::Bool(lit) => lit.span, - Lit::Verbatim(lit) => lit.span(), - } - } - - pub fn set_span(&mut self, span: Span) { - match self { - Lit::Str(lit) => lit.set_span(span), - Lit::ByteStr(lit) => lit.set_span(span), - Lit::CStr(lit) => lit.set_span(span), - Lit::Byte(lit) => lit.set_span(span), - Lit::Char(lit) => lit.set_span(span), - Lit::Int(lit) => lit.set_span(span), - Lit::Float(lit) => lit.set_span(span), - Lit::Bool(lit) => lit.span = span, - Lit::Verbatim(lit) => lit.set_span(span), - } - } - } - - /// Get the byte at offset idx, or a default of `b'\0'` if we're looking - /// past the end of the input buffer. - pub(crate) fn byte<S: AsRef<[u8]> + ?Sized>(s: &S, idx: usize) -> u8 { - let s = s.as_ref(); - if idx < s.len() { - s[idx] - } else { - 0 - } - } - - fn next_chr(s: &str) -> char { - s.chars().next().unwrap_or('\0') - } - - // Returns (content, suffix). - pub(crate) fn parse_lit_str(s: &str) -> Option<(Box<str>, Box<str>)> { - match byte(s, 0) { - b'"' => parse_lit_str_cooked(s), - b'r' => parse_lit_str_raw(s), - _ => unreachable!(), - } - } - - fn parse_lit_str_cooked(mut s: &str) -> Option<(Box<str>, Box<str>)> { - assert_eq!(byte(s, 0), b'"'); - s = &s[1..]; - - let mut content = String::new(); - 'outer: loop { - let ch = match byte(s, 0) { - b'"' => break, - b'\\' => { - let b = byte(s, 1); - s = s.get(2..)?; - match b { - b'x' => { - let (byte, rest) = backslash_x(s)?; - s = rest; - if byte > 0x7F { - // invalid \x byte in string literal - return None; - } - char::from(byte) - } - b'u' => { - let (ch, rest) = backslash_u(s)?; - s = rest; - ch - } - b'n' => '\n', - b'r' => '\r', - b't' => '\t', - b'\\' => '\\', - b'0' => '\0', - b'\'' => '\'', - b'"' => '"', - b'\r' | b'\n' => loop { - let b = byte(s, 0); - match b { - b' ' | b'\t' | b'\n' | b'\r' => s = &s[1..], - _ => continue 'outer, - } - }, - _ => { - // unexpected byte after backslash - return None; - } - } - } - b'\r' => { - if byte(s, 1) != b'\n' { - // bare carriage return not allowed in string - return None; - } - s = &s[2..]; - '\n' - } - _ => { - let ch = next_chr(s); - s = s.get(ch.len_utf8()..)?; - ch - } - }; - content.push(ch); - } - - assert!(s.starts_with('"')); - let content = content.into_boxed_str(); - let suffix = s[1..].to_owned().into_boxed_str(); - Some((content, suffix)) - } - - fn parse_lit_str_raw(mut s: &str) -> Option<(Box<str>, Box<str>)> { - assert_eq!(byte(s, 0), b'r'); - s = &s[1..]; - - let mut pounds = 0; - loop { - match byte(s, pounds) { - b'#' => pounds += 1, - b'"' => break, - _ => return None, - } - } - let close = s.rfind('"').unwrap(); - for end in s.get(close + 1..close + 1 + pounds)?.bytes() { - if end != b'#' { - return None; - } - } - - let content = s.get(pounds + 1..close)?.to_owned().into_boxed_str(); - let suffix = s[close + 1 + pounds..].to_owned().into_boxed_str(); - Some((content, suffix)) - } - - // Returns (content, suffix). - pub(crate) fn parse_lit_byte_str(s: &str) -> Option<(Vec<u8>, Box<str>)> { - assert_eq!(byte(s, 0), b'b'); - match byte(s, 1) { - b'"' => parse_lit_byte_str_cooked(s), - b'r' => parse_lit_byte_str_raw(s), - _ => unreachable!(), - } - } - - fn parse_lit_byte_str_cooked(mut s: &str) -> Option<(Vec<u8>, Box<str>)> { - assert_eq!(byte(s, 0), b'b'); - assert_eq!(byte(s, 1), b'"'); - s = &s[2..]; - - // We're going to want to have slices which don't respect codepoint boundaries. - let mut v = s.as_bytes(); - - let mut out = Vec::new(); - 'outer: loop { - let byte = match byte(v, 0) { - b'"' => break, - b'\\' => { - let b = byte(v, 1); - v = v.get(2..)?; - match b { - b'x' => { - let (b, rest) = backslash_x(v)?; - v = rest; - b - } - b'n' => b'\n', - b'r' => b'\r', - b't' => b'\t', - b'\\' => b'\\', - b'0' => b'\0', - b'\'' => b'\'', - b'"' => b'"', - b'\r' | b'\n' => loop { - let byte = byte(v, 0); - if matches!(byte, b' ' | b'\t' | b'\n' | b'\r') { - v = &v[1..]; - } else { - continue 'outer; - } - }, - _ => { - // unexpected byte after backslash - return None; - } - } - } - b'\r' => { - if byte(v, 1) != b'\n' { - // bare carriage return not allowed in string - return None; - } - v = &v[2..]; - b'\n' - } - b => { - v = v.get(1..)?; - b - } - }; - out.push(byte); - } - - assert_eq!(byte(v, 0), b'"'); - let suffix = s[s.len() - v.len() + 1..].to_owned().into_boxed_str(); - Some((out, suffix)) - } - - fn parse_lit_byte_str_raw(s: &str) -> Option<(Vec<u8>, Box<str>)> { - assert_eq!(byte(s, 0), b'b'); - let (value, suffix) = parse_lit_str_raw(&s[1..])?; - Some((String::from(value).into_bytes(), suffix)) - } - - // Returns (content, suffix). - pub(crate) fn parse_lit_c_str(s: &str) -> Option<(CString, Box<str>)> { - assert_eq!(byte(s, 0), b'c'); - match byte(s, 1) { - b'"' => parse_lit_c_str_cooked(s), - b'r' => parse_lit_c_str_raw(s), - _ => unreachable!(), - } - } - - fn parse_lit_c_str_cooked(mut s: &str) -> Option<(CString, Box<str>)> { - assert_eq!(byte(s, 0), b'c'); - assert_eq!(byte(s, 1), b'"'); - s = &s[2..]; - - // We're going to want to have slices which don't respect codepoint boundaries. - let mut v = s.as_bytes(); - - let mut out = Vec::new(); - 'outer: loop { - let byte = match byte(v, 0) { - b'"' => break, - b'\\' => { - let b = byte(v, 1); - v = v.get(2..)?; - match b { - b'x' => { - let (b, rest) = backslash_x(v)?; - if b == 0 { - // \x00 is not allowed in C-string literal - return None; - } - v = rest; - b - } - b'u' => { - let (ch, rest) = backslash_u(v)?; - if ch == '\0' { - // \u{0} is not allowed in C-string literal - return None; - } - v = rest; - out.extend_from_slice(ch.encode_utf8(&mut [0u8; 4]).as_bytes()); - continue 'outer; - } - b'n' => b'\n', - b'r' => b'\r', - b't' => b'\t', - b'\\' => b'\\', - b'\'' => b'\'', - b'"' => b'"', - b'\r' | b'\n' => loop { - let byte = byte(v, 0); - if matches!(byte, b' ' | b'\t' | b'\n' | b'\r') { - v = &v[1..]; - } else { - continue 'outer; - } - }, - _ => { - // unexpected byte after backslash - return None; - } - } - } - b'\r' => { - if byte(v, 1) != b'\n' { - // bare carriage return not allowed in string - return None; - } - v = &v[2..]; - b'\n' - } - b => { - v = v.get(1..)?; - b - } - }; - out.push(byte); - } - - assert_eq!(byte(v, 0), b'"'); - let suffix = s[s.len() - v.len() + 1..].to_owned().into_boxed_str(); - let cstring = CString::new(out).ok()?; - Some((cstring, suffix)) - } - - fn parse_lit_c_str_raw(s: &str) -> Option<(CString, Box<str>)> { - assert_eq!(byte(s, 0), b'c'); - let (value, suffix) = parse_lit_str_raw(&s[1..])?; - let cstring = CString::new(String::from(value)).ok()?; - Some((cstring, suffix)) - } - - // Returns (value, suffix). - pub(crate) fn parse_lit_byte(s: &str) -> Option<(u8, Box<str>)> { - assert_eq!(byte(s, 0), b'b'); - assert_eq!(byte(s, 1), b'\''); - - // We're going to want to have slices which don't respect codepoint boundaries. - let mut v = &s.as_bytes()[2..]; - - let b = match byte(v, 0) { - b'\\' => { - let b = byte(v, 1); - v = v.get(2..)?; - match b { - b'x' => { - let (b, rest) = backslash_x(v)?; - v = rest; - b - } - b'n' => b'\n', - b'r' => b'\r', - b't' => b'\t', - b'\\' => b'\\', - b'0' => b'\0', - b'\'' => b'\'', - b'"' => b'"', - _ => { - // unexpected byte after backslash - return None; - } - } - } - b => { - v = v.get(1..)?; - b - } - }; - - if byte(v, 0) != b'\'' { - return None; - } - - let suffix = s[s.len() - v.len() + 1..].to_owned().into_boxed_str(); - Some((b, suffix)) - } - - // Returns (value, suffix). - pub(crate) fn parse_lit_char(mut s: &str) -> Option<(char, Box<str>)> { - assert_eq!(byte(s, 0), b'\''); - s = &s[1..]; - - let ch = match byte(s, 0) { - b'\\' => { - let b = byte(s, 1); - s = s.get(2..)?; - match b { - b'x' => { - let (byte, rest) = backslash_x(s)?; - s = rest; - if byte > 0x7F { - // invalid \x byte in character literal - return None; - } - char::from(byte) - } - b'u' => { - let (ch, rest) = backslash_u(s)?; - s = rest; - ch - } - b'n' => '\n', - b'r' => '\r', - b't' => '\t', - b'\\' => '\\', - b'0' => '\0', - b'\'' => '\'', - b'"' => '"', - _ => { - // unexpected byte after backslash - return None; - } - } - } - _ => { - let ch = next_chr(s); - s = s.get(ch.len_utf8()..)?; - ch - } - }; - - if byte(s, 0) != b'\'' { - return None; - } - - let suffix = s[1..].to_owned().into_boxed_str(); - Some((ch, suffix)) - } - - fn backslash_x<S>(s: &S) -> Option<(u8, &S)> - where - S: Index<RangeFrom<usize>, Output = S> + AsRef<[u8]> + ?Sized, - { - let mut ch = 0; - let b0 = byte(s, 0); - let b1 = byte(s, 1); - ch += 0x10 - * match b0 { - b'0'..=b'9' => b0 - b'0', - b'a'..=b'f' => 10 + (b0 - b'a'), - b'A'..=b'F' => 10 + (b0 - b'A'), - _ => return None, - }; - ch += match b1 { - b'0'..=b'9' => b1 - b'0', - b'a'..=b'f' => 10 + (b1 - b'a'), - b'A'..=b'F' => 10 + (b1 - b'A'), - _ => return None, - }; - Some((ch, &s[2..])) - } - - fn backslash_u<S>(mut s: &S) -> Option<(char, &S)> - where - S: Index<RangeFrom<usize>, Output = S> + AsRef<[u8]> + ?Sized, - { - if byte(s, 0) != b'{' { - return None; - } - s = &s[1..]; - - let mut ch = 0; - let mut digits = 0; - loop { - let b = byte(s, 0); - let digit = match b { - b'0'..=b'9' => b - b'0', - b'a'..=b'f' => 10 + b - b'a', - b'A'..=b'F' => 10 + b - b'A', - b'_' if digits > 0 => { - s = &s[1..]; - continue; - } - b'}' if digits == 0 => return None, - b'}' => break, - _ => return None, - }; - if digits == 6 { - return None; - } - ch *= 0x10; - ch += u32::from(digit); - digits += 1; - s = &s[1..]; - } - if byte(s, 0) != b'}' { - return None; - } - s = &s[1..]; - - let ch = char::from_u32(ch)?; - Some((ch, s)) - } - - // Returns base 10 digits and suffix. - pub(crate) fn parse_lit_int(mut s: &str) -> Option<(Box<str>, Box<str>)> { - let negative = byte(s, 0) == b'-'; - if negative { - s = &s[1..]; - } - - let base = match (byte(s, 0), byte(s, 1)) { - (b'0', b'x') => { - s = &s[2..]; - 16 - } - (b'0', b'o') => { - s = &s[2..]; - 8 - } - (b'0', b'b') => { - s = &s[2..]; - 2 - } - (b'0'..=b'9', _) => 10, - _ => return None, - }; - - let mut value = BigInt::new(); - let mut has_digit = false; - 'outer: loop { - let b = byte(s, 0); - let digit = match b { - b'0'..=b'9' => b - b'0', - b'a'..=b'f' if base > 10 => b - b'a' + 10, - b'A'..=b'F' if base > 10 => b - b'A' + 10, - b'_' => { - s = &s[1..]; - continue; - } - // If looking at a floating point literal, we don't want to - // consider it an integer. - b'.' if base == 10 => return None, - b'e' | b'E' if base == 10 => { - let mut has_exp = false; - for (i, b) in s[1..].bytes().enumerate() { - match b { - b'_' => {} - b'-' | b'+' => return None, - b'0'..=b'9' => has_exp = true, - _ => { - let suffix = &s[1 + i..]; - if has_exp && crate::ident::xid_ok(suffix) { - return None; - } else { - break 'outer; - } - } - } - } - if has_exp { - return None; - } else { - break; - } - } - _ => break, - }; - - if digit >= base { - return None; - } - - has_digit = true; - value *= base; - value += digit; - s = &s[1..]; - } - - if !has_digit { - return None; - } - - let suffix = s; - if suffix.is_empty() || crate::ident::xid_ok(suffix) { - let mut repr = value.to_string(); - if negative { - repr.insert(0, '-'); - } - Some((repr.into_boxed_str(), suffix.to_owned().into_boxed_str())) - } else { - None - } - } - - // Returns base 10 digits and suffix. - pub(crate) fn parse_lit_float(input: &str) -> Option<(Box<str>, Box<str>)> { - // Rust's floating point literals are very similar to the ones parsed by - // the standard library, except that rust's literals can contain - // ignorable underscores. Let's remove those underscores. - - let mut bytes = input.to_owned().into_bytes(); - - let start = (*bytes.first()? == b'-') as usize; - match bytes.get(start)? { - b'0'..=b'9' => {} - _ => return None, - } - - let mut read = start; - let mut write = start; - let mut has_dot = false; - let mut has_e = false; - let mut has_sign = false; - let mut has_exponent = false; - while read < bytes.len() { - match bytes[read] { - b'_' => { - // Don't increase write - read += 1; - continue; - } - b'0'..=b'9' => { - if has_e { - has_exponent = true; - } - bytes[write] = bytes[read]; - } - b'.' => { - if has_e || has_dot { - return None; - } - has_dot = true; - bytes[write] = b'.'; - } - b'e' | b'E' => { - match bytes[read + 1..] - .iter() - .find(|b| **b != b'_') - .unwrap_or(&b'\0') - { - b'-' | b'+' | b'0'..=b'9' => {} - _ => break, - } - if has_e { - if has_exponent { - break; - } else { - return None; - } - } - has_e = true; - bytes[write] = b'e'; - } - b'-' | b'+' => { - if has_sign || has_exponent || !has_e { - return None; - } - has_sign = true; - if bytes[read] == b'-' { - bytes[write] = bytes[read]; - } else { - // Omit '+' - read += 1; - continue; - } - } - _ => break, - } - read += 1; - write += 1; - } - - if has_e && !has_exponent { - return None; - } - - let mut digits = String::from_utf8(bytes).unwrap(); - let suffix = digits.split_off(read); - digits.truncate(write); - if suffix.is_empty() || crate::ident::xid_ok(&suffix) { - Some((digits.into_boxed_str(), suffix.into_boxed_str())) - } else { - None - } - } -} diff --git a/vendor/syn/src/lookahead.rs b/vendor/syn/src/lookahead.rs deleted file mode 100644 index 10b4566135c9e9..00000000000000 --- a/vendor/syn/src/lookahead.rs +++ /dev/null @@ -1,348 +0,0 @@ -use crate::buffer::Cursor; -use crate::error::{self, Error}; -use crate::sealed::lookahead::Sealed; -use crate::span::IntoSpans; -use crate::token::{CustomToken, Token}; -use proc_macro2::{Delimiter, Span}; -use std::cell::RefCell; -use std::fmt::{self, Display}; - -/// Support for checking the next token in a stream to decide how to parse. -/// -/// An important advantage over [`ParseStream::peek`] is that here we -/// automatically construct an appropriate error message based on the token -/// alternatives that get peeked. If you are producing your own error message, -/// go ahead and use `ParseStream::peek` instead. -/// -/// Use [`ParseStream::lookahead1`] to construct this object. -/// -/// [`ParseStream::peek`]: crate::parse::ParseBuffer::peek -/// [`ParseStream::lookahead1`]: crate::parse::ParseBuffer::lookahead1 -/// -/// Consuming tokens from the source stream after constructing a lookahead -/// object does not also advance the lookahead object. -/// -/// # Example -/// -/// ``` -/// use syn::{ConstParam, Ident, Lifetime, LifetimeParam, Result, Token, TypeParam}; -/// use syn::parse::{Parse, ParseStream}; -/// -/// // A generic parameter, a single one of the comma-separated elements inside -/// // angle brackets in: -/// // -/// // fn f<T: Clone, 'a, 'b: 'a, const N: usize>() { ... } -/// // -/// // On invalid input, lookahead gives us a reasonable error message. -/// // -/// // error: expected one of: identifier, lifetime, `const` -/// // | -/// // 5 | fn f<!Sized>() {} -/// // | ^ -/// enum GenericParam { -/// Type(TypeParam), -/// Lifetime(LifetimeParam), -/// Const(ConstParam), -/// } -/// -/// impl Parse for GenericParam { -/// fn parse(input: ParseStream) -> Result<Self> { -/// let lookahead = input.lookahead1(); -/// if lookahead.peek(Ident) { -/// input.parse().map(GenericParam::Type) -/// } else if lookahead.peek(Lifetime) { -/// input.parse().map(GenericParam::Lifetime) -/// } else if lookahead.peek(Token![const]) { -/// input.parse().map(GenericParam::Const) -/// } else { -/// Err(lookahead.error()) -/// } -/// } -/// } -/// ``` -pub struct Lookahead1<'a> { - scope: Span, - cursor: Cursor<'a>, - comparisons: RefCell<Vec<&'static str>>, -} - -pub(crate) fn new(scope: Span, cursor: Cursor) -> Lookahead1 { - Lookahead1 { - scope, - cursor, - comparisons: RefCell::new(Vec::new()), - } -} - -fn peek_impl( - lookahead: &Lookahead1, - peek: fn(Cursor) -> bool, - display: fn() -> &'static str, -) -> bool { - if peek(lookahead.cursor) { - return true; - } - lookahead.comparisons.borrow_mut().push(display()); - false -} - -impl<'a> Lookahead1<'a> { - /// Looks at the next token in the parse stream to determine whether it - /// matches the requested type of token. - /// - /// # Syntax - /// - /// Note that this method does not use turbofish syntax. Pass the peek type - /// inside of parentheses. - /// - /// - `input.peek(Token![struct])` - /// - `input.peek(Token![==])` - /// - `input.peek(Ident)` *(does not accept keywords)* - /// - `input.peek(Ident::peek_any)` - /// - `input.peek(Lifetime)` - /// - `input.peek(token::Brace)` - pub fn peek<T: Peek>(&self, token: T) -> bool { - let _ = token; - peek_impl(self, T::Token::peek, T::Token::display) - } - - /// Triggers an error at the current position of the parse stream. - /// - /// The error message will identify all of the expected token types that - /// have been peeked against this lookahead instance. - pub fn error(self) -> Error { - let mut comparisons = self.comparisons.into_inner(); - comparisons.retain_mut(|display| { - if *display == "`)`" { - *display = match self.cursor.scope_delimiter() { - Delimiter::Parenthesis => "`)`", - Delimiter::Brace => "`}`", - Delimiter::Bracket => "`]`", - Delimiter::None => return false, - } - } - true - }); - match comparisons.len() { - 0 => { - if self.cursor.eof() { - Error::new(self.scope, "unexpected end of input") - } else { - Error::new(self.cursor.span(), "unexpected token") - } - } - 1 => { - let message = format!("expected {}", comparisons[0]); - error::new_at(self.scope, self.cursor, message) - } - 2 => { - let message = format!("expected {} or {}", comparisons[0], comparisons[1]); - error::new_at(self.scope, self.cursor, message) - } - _ => { - let message = format!("expected one of: {}", CommaSeparated(&comparisons)); - error::new_at(self.scope, self.cursor, message) - } - } - } -} - -struct CommaSeparated<'a>(&'a [&'a str]); - -impl<'a> Display for CommaSeparated<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut first = true; - for &s in self.0 { - if !first { - f.write_str(", ")?; - } - f.write_str(s)?; - first = false; - } - Ok(()) - } -} - -/// Types that can be parsed by looking at just one token. -/// -/// Use [`ParseStream::peek`] to peek one of these types in a parse stream -/// without consuming it from the stream. -/// -/// This trait is sealed and cannot be implemented for types outside of Syn. -/// -/// [`ParseStream::peek`]: crate::parse::ParseBuffer::peek -pub trait Peek: Sealed { - // Not public API. - #[doc(hidden)] - type Token: Token; -} - -/// Pseudo-token used for peeking the end of a parse stream. -/// -/// This type is only useful as an argument to one of the following functions: -/// -/// - [`ParseStream::peek`][crate::parse::ParseBuffer::peek] -/// - [`ParseStream::peek2`][crate::parse::ParseBuffer::peek2] -/// - [`ParseStream::peek3`][crate::parse::ParseBuffer::peek3] -/// - [`Lookahead1::peek`] -/// -/// The peek will return `true` if there are no remaining tokens after that -/// point in the parse stream. -/// -/// # Example -/// -/// Suppose we are parsing attributes containing core::fmt inspired formatting -/// arguments: -/// -/// - `#[fmt("simple example")]` -/// - `#[fmt("interpolation e{}ample", self.x)]` -/// - `#[fmt("interpolation e{x}ample")]` -/// -/// and we want to recognize the cases where no interpolation occurs so that -/// more efficient code can be generated. -/// -/// The following implementation uses `input.peek(Token![,]) && -/// input.peek2(End)` to recognize the case of a trailing comma without -/// consuming the comma from the parse stream, because if it isn't a trailing -/// comma, that same comma needs to be parsed as part of `args`. -/// -/// ``` -/// use proc_macro2::TokenStream; -/// use quote::quote; -/// use syn::parse::{End, Parse, ParseStream, Result}; -/// use syn::{parse_quote, Attribute, LitStr, Token}; -/// -/// struct FormatArgs { -/// template: LitStr, // "...{}..." -/// args: TokenStream, // , self.x -/// } -/// -/// impl Parse for FormatArgs { -/// fn parse(input: ParseStream) -> Result<Self> { -/// let template: LitStr = input.parse()?; -/// -/// let args = if input.is_empty() -/// || input.peek(Token![,]) && input.peek2(End) -/// { -/// input.parse::<Option<Token![,]>>()?; -/// TokenStream::new() -/// } else { -/// input.parse()? -/// }; -/// -/// Ok(FormatArgs { -/// template, -/// args, -/// }) -/// } -/// } -/// -/// fn main() -> Result<()> { -/// let attrs: Vec<Attribute> = parse_quote! { -/// #[fmt("simple example")] -/// #[fmt("interpolation e{}ample", self.x)] -/// #[fmt("interpolation e{x}ample")] -/// }; -/// -/// for attr in &attrs { -/// let FormatArgs { template, args } = attr.parse_args()?; -/// let requires_fmt_machinery = -/// !args.is_empty() || template.value().contains(['{', '}']); -/// let out = if requires_fmt_machinery { -/// quote! { -/// ::core::write!(__formatter, #template #args) -/// } -/// } else { -/// quote! { -/// __formatter.write_str(#template) -/// } -/// }; -/// println!("{}", out); -/// } -/// Ok(()) -/// } -/// ``` -/// -/// Implementing this parsing logic without `peek2(End)` is more clumsy because -/// we'd need a parse stream actually advanced past the comma before being able -/// to find out whether there is anything after it. It would look something -/// like: -/// -/// ``` -/// # use proc_macro2::TokenStream; -/// # use syn::parse::{ParseStream, Result}; -/// # use syn::Token; -/// # -/// # fn parse(input: ParseStream) -> Result<()> { -/// use syn::parse::discouraged::Speculative as _; -/// -/// let ahead = input.fork(); -/// ahead.parse::<Option<Token![,]>>()?; -/// let args = if ahead.is_empty() { -/// input.advance_to(&ahead); -/// TokenStream::new() -/// } else { -/// input.parse()? -/// }; -/// # Ok(()) -/// # } -/// ``` -/// -/// or: -/// -/// ``` -/// # use proc_macro2::TokenStream; -/// # use syn::parse::{ParseStream, Result}; -/// # use syn::Token; -/// # -/// # fn parse(input: ParseStream) -> Result<()> { -/// use quote::ToTokens as _; -/// -/// let comma: Option<Token![,]> = input.parse()?; -/// let mut args = TokenStream::new(); -/// if !input.is_empty() { -/// comma.to_tokens(&mut args); -/// input.parse::<TokenStream>()?.to_tokens(&mut args); -/// } -/// # Ok(()) -/// # } -/// ``` -pub struct End; - -impl Copy for End {} - -impl Clone for End { - fn clone(&self) -> Self { - *self - } -} - -impl Peek for End { - type Token = Self; -} - -impl CustomToken for End { - fn peek(cursor: Cursor) -> bool { - cursor.eof() - } - - fn display() -> &'static str { - "`)`" // Lookahead1 error message will fill in the expected close delimiter - } -} - -impl<F: Copy + FnOnce(TokenMarker) -> T, T: Token> Peek for F { - type Token = T; -} - -pub enum TokenMarker {} - -impl<S> IntoSpans<S> for TokenMarker { - fn into_spans(self) -> S { - match self {} - } -} - -impl<F: Copy + FnOnce(TokenMarker) -> T, T: Token> Sealed for F {} - -impl Sealed for End {} diff --git a/vendor/syn/src/mac.rs b/vendor/syn/src/mac.rs deleted file mode 100644 index 15107801cfee02..00000000000000 --- a/vendor/syn/src/mac.rs +++ /dev/null @@ -1,225 +0,0 @@ -#[cfg(feature = "parsing")] -use crate::error::Result; -#[cfg(feature = "parsing")] -use crate::parse::{Parse, ParseStream, Parser}; -use crate::path::Path; -use crate::token::{Brace, Bracket, Paren}; -use proc_macro2::extra::DelimSpan; -#[cfg(feature = "parsing")] -use proc_macro2::Delimiter; -use proc_macro2::TokenStream; -#[cfg(feature = "parsing")] -use proc_macro2::TokenTree; - -ast_struct! { - /// A macro invocation: `println!("{}", mac)`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct Macro { - pub path: Path, - pub bang_token: Token![!], - pub delimiter: MacroDelimiter, - pub tokens: TokenStream, - } -} - -ast_enum! { - /// A grouping token that surrounds a macro body: `m!(...)` or `m!{...}` or `m![...]`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub enum MacroDelimiter { - Paren(Paren), - Brace(Brace), - Bracket(Bracket), - } -} - -impl MacroDelimiter { - pub fn span(&self) -> &DelimSpan { - match self { - MacroDelimiter::Paren(token) => &token.span, - MacroDelimiter::Brace(token) => &token.span, - MacroDelimiter::Bracket(token) => &token.span, - } - } - - #[cfg(all(feature = "full", any(feature = "parsing", feature = "printing")))] - pub(crate) fn is_brace(&self) -> bool { - match self { - MacroDelimiter::Brace(_) => true, - MacroDelimiter::Paren(_) | MacroDelimiter::Bracket(_) => false, - } - } -} - -impl Macro { - /// Parse the tokens within the macro invocation's delimiters into a syntax - /// tree. - /// - /// This is equivalent to `syn::parse2::<T>(mac.tokens)` except that it - /// produces a more useful span when `tokens` is empty. - /// - /// # Example - /// - /// ``` - /// use syn::{parse_quote, Expr, ExprLit, Ident, Lit, LitStr, Macro, Token}; - /// use syn::ext::IdentExt; - /// use syn::parse::{Error, Parse, ParseStream, Result}; - /// use syn::punctuated::Punctuated; - /// - /// // The arguments expected by libcore's format_args macro, and as a - /// // result most other formatting and printing macros like println. - /// // - /// // println!("{} is {number:.prec$}", "x", prec=5, number=0.01) - /// struct FormatArgs { - /// format_string: Expr, - /// positional_args: Vec<Expr>, - /// named_args: Vec<(Ident, Expr)>, - /// } - /// - /// impl Parse for FormatArgs { - /// fn parse(input: ParseStream) -> Result<Self> { - /// let format_string: Expr; - /// let mut positional_args = Vec::new(); - /// let mut named_args = Vec::new(); - /// - /// format_string = input.parse()?; - /// while !input.is_empty() { - /// input.parse::<Token![,]>()?; - /// if input.is_empty() { - /// break; - /// } - /// if input.peek(Ident::peek_any) && input.peek2(Token![=]) { - /// while !input.is_empty() { - /// let name: Ident = input.call(Ident::parse_any)?; - /// input.parse::<Token![=]>()?; - /// let value: Expr = input.parse()?; - /// named_args.push((name, value)); - /// if input.is_empty() { - /// break; - /// } - /// input.parse::<Token![,]>()?; - /// } - /// break; - /// } - /// positional_args.push(input.parse()?); - /// } - /// - /// Ok(FormatArgs { - /// format_string, - /// positional_args, - /// named_args, - /// }) - /// } - /// } - /// - /// // Extract the first argument, the format string literal, from an - /// // invocation of a formatting or printing macro. - /// fn get_format_string(m: &Macro) -> Result<LitStr> { - /// let args: FormatArgs = m.parse_body()?; - /// match args.format_string { - /// Expr::Lit(ExprLit { lit: Lit::Str(lit), .. }) => Ok(lit), - /// other => { - /// // First argument was not a string literal expression. - /// // Maybe something like: println!(concat!(...), ...) - /// Err(Error::new_spanned(other, "format string must be a string literal")) - /// } - /// } - /// } - /// - /// fn main() { - /// let invocation = parse_quote! { - /// println!("{:?}", Instant::now()) - /// }; - /// let lit = get_format_string(&invocation).unwrap(); - /// assert_eq!(lit.value(), "{:?}"); - /// } - /// ``` - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_body<T: Parse>(&self) -> Result<T> { - self.parse_body_with(T::parse) - } - - /// Parse the tokens within the macro invocation's delimiters using the - /// given parser. - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_body_with<F: Parser>(&self, parser: F) -> Result<F::Output> { - let scope = self.delimiter.span().close(); - crate::parse::parse_scoped(parser, scope, self.tokens.clone()) - } -} - -#[cfg(feature = "parsing")] -pub(crate) fn parse_delimiter(input: ParseStream) -> Result<(MacroDelimiter, TokenStream)> { - input.step(|cursor| { - if let Some((TokenTree::Group(g), rest)) = cursor.token_tree() { - let span = g.delim_span(); - let delimiter = match g.delimiter() { - Delimiter::Parenthesis => MacroDelimiter::Paren(Paren(span)), - Delimiter::Brace => MacroDelimiter::Brace(Brace(span)), - Delimiter::Bracket => MacroDelimiter::Bracket(Bracket(span)), - Delimiter::None => { - return Err(cursor.error("expected delimiter")); - } - }; - Ok(((delimiter, g.stream()), rest)) - } else { - Err(cursor.error("expected delimiter")) - } - }) -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::error::Result; - use crate::mac::{parse_delimiter, Macro}; - use crate::parse::{Parse, ParseStream}; - use crate::path::Path; - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Macro { - fn parse(input: ParseStream) -> Result<Self> { - let tokens; - Ok(Macro { - path: input.call(Path::parse_mod_style)?, - bang_token: input.parse()?, - delimiter: { - let (delimiter, content) = parse_delimiter(input)?; - tokens = content; - delimiter - }, - tokens, - }) - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use crate::mac::{Macro, MacroDelimiter}; - use crate::path; - use crate::path::printing::PathStyle; - use crate::token; - use proc_macro2::{Delimiter, TokenStream}; - use quote::ToTokens; - - impl MacroDelimiter { - pub(crate) fn surround(&self, tokens: &mut TokenStream, inner: TokenStream) { - let (delim, span) = match self { - MacroDelimiter::Paren(paren) => (Delimiter::Parenthesis, paren.span), - MacroDelimiter::Brace(brace) => (Delimiter::Brace, brace.span), - MacroDelimiter::Bracket(bracket) => (Delimiter::Bracket, bracket.span), - }; - token::printing::delim(delim, span.join(), tokens, inner); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Macro { - fn to_tokens(&self, tokens: &mut TokenStream) { - path::printing::print_path(tokens, &self.path, PathStyle::Mod); - self.bang_token.to_tokens(tokens); - self.delimiter.surround(tokens, self.tokens.clone()); - } - } -} diff --git a/vendor/syn/src/macros.rs b/vendor/syn/src/macros.rs deleted file mode 100644 index 167f2cf260a7c7..00000000000000 --- a/vendor/syn/src/macros.rs +++ /dev/null @@ -1,182 +0,0 @@ -#[cfg_attr( - not(any(feature = "full", feature = "derive")), - allow(unknown_lints, unused_macro_rules) -)] -macro_rules! ast_struct { - ( - $(#[$attr:meta])* - $pub:ident $struct:ident $name:ident #full $body:tt - ) => { - check_keyword_matches!(pub $pub); - check_keyword_matches!(struct $struct); - - #[cfg(feature = "full")] - $(#[$attr])* $pub $struct $name $body - - #[cfg(not(feature = "full"))] - $(#[$attr])* $pub $struct $name { - _noconstruct: ::std::marker::PhantomData<::proc_macro2::Span>, - } - - #[cfg(all(not(feature = "full"), feature = "printing"))] - impl ::quote::ToTokens for $name { - fn to_tokens(&self, _: &mut ::proc_macro2::TokenStream) { - unreachable!() - } - } - }; - - ( - $(#[$attr:meta])* - $pub:ident $struct:ident $name:ident $body:tt - ) => { - check_keyword_matches!(pub $pub); - check_keyword_matches!(struct $struct); - - $(#[$attr])* $pub $struct $name $body - }; -} - -#[cfg(any(feature = "full", feature = "derive"))] -macro_rules! ast_enum { - ( - $(#[$enum_attr:meta])* - $pub:ident $enum:ident $name:ident $body:tt - ) => { - check_keyword_matches!(pub $pub); - check_keyword_matches!(enum $enum); - - $(#[$enum_attr])* $pub $enum $name $body - }; -} - -macro_rules! ast_enum_of_structs { - ( - $(#[$enum_attr:meta])* - $pub:ident $enum:ident $name:ident $body:tt - ) => { - check_keyword_matches!(pub $pub); - check_keyword_matches!(enum $enum); - - $(#[$enum_attr])* $pub $enum $name $body - - ast_enum_of_structs_impl!($name $body); - - #[cfg(feature = "printing")] - generate_to_tokens!(() tokens $name $body); - }; -} - -macro_rules! ast_enum_of_structs_impl { - ( - $name:ident { - $( - $(#[cfg $cfg_attr:tt])* - $(#[doc $($doc_attr:tt)*])* - $variant:ident $( ($member:ident) )*, - )* - } - ) => { - $($( - ast_enum_from_struct!($name::$variant, $member); - )*)* - }; -} - -macro_rules! ast_enum_from_struct { - // No From<TokenStream> for verbatim variants. - ($name:ident::Verbatim, $member:ident) => {}; - - ($name:ident::$variant:ident, $member:ident) => { - impl From<$member> for $name { - fn from(e: $member) -> $name { - $name::$variant(e) - } - } - }; -} - -#[cfg(feature = "printing")] -macro_rules! generate_to_tokens { - ( - ($($arms:tt)*) $tokens:ident $name:ident { - $(#[cfg $cfg_attr:tt])* - $(#[doc $($doc_attr:tt)*])* - $variant:ident, - $($next:tt)* - } - ) => { - generate_to_tokens!( - ($($arms)* $(#[cfg $cfg_attr])* $name::$variant => {}) - $tokens $name { $($next)* } - ); - }; - - ( - ($($arms:tt)*) $tokens:ident $name:ident { - $(#[cfg $cfg_attr:tt])* - $(#[doc $($doc_attr:tt)*])* - $variant:ident($member:ident), - $($next:tt)* - } - ) => { - generate_to_tokens!( - ($($arms)* $(#[cfg $cfg_attr])* $name::$variant(_e) => _e.to_tokens($tokens),) - $tokens $name { $($next)* } - ); - }; - - (($($arms:tt)*) $tokens:ident $name:ident {}) => { - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ::quote::ToTokens for $name { - fn to_tokens(&self, $tokens: &mut ::proc_macro2::TokenStream) { - match self { - $($arms)* - } - } - } - }; -} - -// Rustdoc bug: does not respect the doc(hidden) on some items. -#[cfg(all(doc, feature = "parsing"))] -macro_rules! pub_if_not_doc { - ($(#[$m:meta])* $pub:ident $($item:tt)*) => { - check_keyword_matches!(pub $pub); - - $(#[$m])* - $pub(crate) $($item)* - }; -} - -#[cfg(all(not(doc), feature = "parsing"))] -macro_rules! pub_if_not_doc { - ($(#[$m:meta])* $pub:ident $($item:tt)*) => { - check_keyword_matches!(pub $pub); - - $(#[$m])* - $pub $($item)* - }; -} - -macro_rules! check_keyword_matches { - (enum enum) => {}; - (pub pub) => {}; - (struct struct) => {}; -} - -#[cfg(any(feature = "full", feature = "derive"))] -macro_rules! return_impl_trait { - ( - $(#[$attr:meta])* - $vis:vis fn $name:ident $args:tt -> $impl_trait:ty [$concrete:ty] $body:block - ) => { - #[cfg(not(docsrs))] - $(#[$attr])* - $vis fn $name $args -> $concrete $body - - #[cfg(docsrs)] - $(#[$attr])* - $vis fn $name $args -> $impl_trait $body - }; -} diff --git a/vendor/syn/src/meta.rs b/vendor/syn/src/meta.rs deleted file mode 100644 index ffeeb2629f4f78..00000000000000 --- a/vendor/syn/src/meta.rs +++ /dev/null @@ -1,427 +0,0 @@ -//! Facility for interpreting structured content inside of an `Attribute`. - -use crate::error::{Error, Result}; -use crate::ext::IdentExt as _; -use crate::lit::Lit; -use crate::parse::{ParseStream, Parser}; -use crate::path::{Path, PathSegment}; -use crate::punctuated::Punctuated; -use proc_macro2::Ident; -use std::fmt::Display; - -/// Make a parser that is usable with `parse_macro_input!` in a -/// `#[proc_macro_attribute]` macro. -/// -/// *Warning:* When parsing attribute args **other than** the -/// `proc_macro::TokenStream` input of a `proc_macro_attribute`, you do **not** -/// need this function. In several cases your callers will get worse error -/// messages if you use this function, because the surrounding delimiter's span -/// is concealed from attribute macros by rustc. Use -/// [`Attribute::parse_nested_meta`] instead. -/// -/// [`Attribute::parse_nested_meta`]: crate::Attribute::parse_nested_meta -/// -/// # Example -/// -/// This example implements an attribute macro whose invocations look like this: -/// -/// ``` -/// # const IGNORE: &str = stringify! { -/// #[tea(kind = "EarlGrey", hot)] -/// struct Picard {...} -/// # }; -/// ``` -/// -/// The "parameters" supported by the attribute are: -/// -/// - `kind = "..."` -/// - `hot` -/// - `with(sugar, milk, ...)`, a comma-separated list of ingredients -/// -/// ``` -/// # extern crate proc_macro; -/// # -/// use proc_macro::TokenStream; -/// use syn::{parse_macro_input, LitStr, Path}; -/// -/// # const IGNORE: &str = stringify! { -/// #[proc_macro_attribute] -/// # }; -/// pub fn tea(args: TokenStream, input: TokenStream) -> TokenStream { -/// let mut kind: Option<LitStr> = None; -/// let mut hot: bool = false; -/// let mut with: Vec<Path> = Vec::new(); -/// let tea_parser = syn::meta::parser(|meta| { -/// if meta.path.is_ident("kind") { -/// kind = Some(meta.value()?.parse()?); -/// Ok(()) -/// } else if meta.path.is_ident("hot") { -/// hot = true; -/// Ok(()) -/// } else if meta.path.is_ident("with") { -/// meta.parse_nested_meta(|meta| { -/// with.push(meta.path); -/// Ok(()) -/// }) -/// } else { -/// Err(meta.error("unsupported tea property")) -/// } -/// }); -/// -/// parse_macro_input!(args with tea_parser); -/// eprintln!("kind={kind:?} hot={hot} with={with:?}"); -/// -/// /* ... */ -/// # TokenStream::new() -/// } -/// ``` -/// -/// The `syn::meta` library will take care of dealing with the commas including -/// trailing commas, and producing sensible error messages on unexpected input. -/// -/// ```console -/// error: expected `,` -/// --> src/main.rs:3:37 -/// | -/// 3 | #[tea(kind = "EarlGrey", with(sugar = "lol", milk))] -/// | ^ -/// ``` -/// -/// # Example -/// -/// Same as above but we factor out most of the logic into a separate function. -/// -/// ``` -/// # extern crate proc_macro; -/// # -/// use proc_macro::TokenStream; -/// use syn::meta::ParseNestedMeta; -/// use syn::parse::{Parser, Result}; -/// use syn::{parse_macro_input, LitStr, Path}; -/// -/// # const IGNORE: &str = stringify! { -/// #[proc_macro_attribute] -/// # }; -/// pub fn tea(args: TokenStream, input: TokenStream) -> TokenStream { -/// let mut attrs = TeaAttributes::default(); -/// let tea_parser = syn::meta::parser(|meta| attrs.parse(meta)); -/// parse_macro_input!(args with tea_parser); -/// -/// /* ... */ -/// # TokenStream::new() -/// } -/// -/// #[derive(Default)] -/// struct TeaAttributes { -/// kind: Option<LitStr>, -/// hot: bool, -/// with: Vec<Path>, -/// } -/// -/// impl TeaAttributes { -/// fn parse(&mut self, meta: ParseNestedMeta) -> Result<()> { -/// if meta.path.is_ident("kind") { -/// self.kind = Some(meta.value()?.parse()?); -/// Ok(()) -/// } else /* just like in last example */ -/// # { unimplemented!() } -/// -/// } -/// } -/// ``` -pub fn parser(logic: impl FnMut(ParseNestedMeta) -> Result<()>) -> impl Parser<Output = ()> { - |input: ParseStream| { - if input.is_empty() { - Ok(()) - } else { - parse_nested_meta(input, logic) - } - } -} - -/// Context for parsing a single property in the conventional syntax for -/// structured attributes. -/// -/// # Examples -/// -/// Refer to usage examples on the following two entry-points: -/// -/// - [`Attribute::parse_nested_meta`] if you have an entire `Attribute` to -/// parse. Always use this if possible. Generally this is able to produce -/// better error messages because `Attribute` holds span information for all -/// of the delimiters therein. -/// -/// - [`syn::meta::parser`] if you are implementing a `proc_macro_attribute` -/// macro and parsing the arguments to the attribute macro, i.e. the ones -/// written in the same attribute that dispatched the macro invocation. Rustc -/// does not pass span information for the surrounding delimiters into the -/// attribute macro invocation in this situation, so error messages might be -/// less precise. -/// -/// [`Attribute::parse_nested_meta`]: crate::Attribute::parse_nested_meta -/// [`syn::meta::parser`]: crate::meta::parser -#[non_exhaustive] -pub struct ParseNestedMeta<'a> { - pub path: Path, - pub input: ParseStream<'a>, -} - -impl<'a> ParseNestedMeta<'a> { - /// Used when parsing `key = "value"` syntax. - /// - /// All it does is advance `meta.input` past the `=` sign in the input. You - /// could accomplish the same effect by writing - /// `meta.parse::<Token![=]>()?`, so at most it is a minor convenience to - /// use `meta.value()?`. - /// - /// # Example - /// - /// ``` - /// use syn::{parse_quote, Attribute, LitStr}; - /// - /// let attr: Attribute = parse_quote! { - /// #[tea(kind = "EarlGrey")] - /// }; - /// // conceptually: - /// if attr.path().is_ident("tea") { // this parses the `tea` - /// attr.parse_nested_meta(|meta| { // this parses the `(` - /// if meta.path.is_ident("kind") { // this parses the `kind` - /// let value = meta.value()?; // this parses the `=` - /// let s: LitStr = value.parse()?; // this parses `"EarlGrey"` - /// if s.value() == "EarlGrey" { - /// // ... - /// } - /// Ok(()) - /// } else { - /// Err(meta.error("unsupported attribute")) - /// } - /// })?; - /// } - /// # anyhow::Ok(()) - /// ``` - pub fn value(&self) -> Result<ParseStream<'a>> { - self.input.parse::<Token![=]>()?; - Ok(self.input) - } - - /// Used when parsing `list(...)` syntax **if** the content inside the - /// nested parentheses is also expected to conform to Rust's structured - /// attribute convention. - /// - /// # Example - /// - /// ``` - /// use syn::{parse_quote, Attribute}; - /// - /// let attr: Attribute = parse_quote! { - /// #[tea(with(sugar, milk))] - /// }; - /// - /// if attr.path().is_ident("tea") { - /// attr.parse_nested_meta(|meta| { - /// if meta.path.is_ident("with") { - /// meta.parse_nested_meta(|meta| { // <--- - /// if meta.path.is_ident("sugar") { - /// // Here we can go even deeper if needed. - /// Ok(()) - /// } else if meta.path.is_ident("milk") { - /// Ok(()) - /// } else { - /// Err(meta.error("unsupported ingredient")) - /// } - /// }) - /// } else { - /// Err(meta.error("unsupported tea property")) - /// } - /// })?; - /// } - /// # anyhow::Ok(()) - /// ``` - /// - /// # Counterexample - /// - /// If you don't need `parse_nested_meta`'s help in parsing the content - /// written within the nested parentheses, keep in mind that you can always - /// just parse it yourself from the exposed ParseStream. Rust syntax permits - /// arbitrary tokens within those parentheses so for the crazier stuff, - /// `parse_nested_meta` is not what you want. - /// - /// ``` - /// use syn::{parenthesized, parse_quote, Attribute, LitInt}; - /// - /// let attr: Attribute = parse_quote! { - /// #[repr(align(32))] - /// }; - /// - /// let mut align: Option<LitInt> = None; - /// if attr.path().is_ident("repr") { - /// attr.parse_nested_meta(|meta| { - /// if meta.path.is_ident("align") { - /// let content; - /// parenthesized!(content in meta.input); - /// align = Some(content.parse()?); - /// Ok(()) - /// } else { - /// Err(meta.error("unsupported repr")) - /// } - /// })?; - /// } - /// # anyhow::Ok(()) - /// ``` - pub fn parse_nested_meta( - &self, - logic: impl FnMut(ParseNestedMeta) -> Result<()>, - ) -> Result<()> { - let content; - parenthesized!(content in self.input); - parse_nested_meta(&content, logic) - } - - /// Report that the attribute's content did not conform to expectations. - /// - /// The span of the resulting error will cover `meta.path` *and* everything - /// that has been parsed so far since it. - /// - /// There are 2 ways you might call this. First, if `meta.path` is not - /// something you recognize: - /// - /// ``` - /// # use syn::Attribute; - /// # - /// # fn example(attr: &Attribute) -> syn::Result<()> { - /// attr.parse_nested_meta(|meta| { - /// if meta.path.is_ident("kind") { - /// // ... - /// Ok(()) - /// } else { - /// Err(meta.error("unsupported tea property")) - /// } - /// })?; - /// # Ok(()) - /// # } - /// ``` - /// - /// In this case, it behaves exactly like - /// `syn::Error::new_spanned(&meta.path, "message...")`. - /// - /// ```console - /// error: unsupported tea property - /// --> src/main.rs:3:26 - /// | - /// 3 | #[tea(kind = "EarlGrey", wat = "foo")] - /// | ^^^ - /// ``` - /// - /// More usefully, the second place is if you've already parsed a value but - /// have decided not to accept the value: - /// - /// ``` - /// # use syn::Attribute; - /// # - /// # fn example(attr: &Attribute) -> syn::Result<()> { - /// use syn::Expr; - /// - /// attr.parse_nested_meta(|meta| { - /// if meta.path.is_ident("kind") { - /// let expr: Expr = meta.value()?.parse()?; - /// match expr { - /// Expr::Lit(expr) => /* ... */ - /// # unimplemented!(), - /// Expr::Path(expr) => /* ... */ - /// # unimplemented!(), - /// Expr::Macro(expr) => /* ... */ - /// # unimplemented!(), - /// _ => Err(meta.error("tea kind must be a string literal, path, or macro")), - /// } - /// } else /* as above */ - /// # { unimplemented!() } - /// - /// })?; - /// # Ok(()) - /// # } - /// ``` - /// - /// ```console - /// error: tea kind must be a string literal, path, or macro - /// --> src/main.rs:3:7 - /// | - /// 3 | #[tea(kind = async { replicator.await })] - /// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - /// ``` - /// - /// Often you may want to use `syn::Error::new_spanned` even in this - /// situation. In the above code, that would be: - /// - /// ``` - /// # use syn::{Error, Expr}; - /// # - /// # fn example(expr: Expr) -> syn::Result<()> { - /// match expr { - /// Expr::Lit(expr) => /* ... */ - /// # unimplemented!(), - /// Expr::Path(expr) => /* ... */ - /// # unimplemented!(), - /// Expr::Macro(expr) => /* ... */ - /// # unimplemented!(), - /// _ => Err(Error::new_spanned(expr, "unsupported expression type for `kind`")), - /// } - /// # } - /// ``` - /// - /// ```console - /// error: unsupported expression type for `kind` - /// --> src/main.rs:3:14 - /// | - /// 3 | #[tea(kind = async { replicator.await })] - /// | ^^^^^^^^^^^^^^^^^^^^^^^^^^ - /// ``` - pub fn error(&self, msg: impl Display) -> Error { - let start_span = self.path.segments[0].ident.span(); - let end_span = self.input.cursor().prev_span(); - crate::error::new2(start_span, end_span, msg) - } -} - -pub(crate) fn parse_nested_meta( - input: ParseStream, - mut logic: impl FnMut(ParseNestedMeta) -> Result<()>, -) -> Result<()> { - loop { - let path = input.call(parse_meta_path)?; - logic(ParseNestedMeta { path, input })?; - if input.is_empty() { - return Ok(()); - } - input.parse::<Token![,]>()?; - if input.is_empty() { - return Ok(()); - } - } -} - -// Like Path::parse_mod_style, but accepts keywords in the path. -fn parse_meta_path(input: ParseStream) -> Result<Path> { - Ok(Path { - leading_colon: input.parse()?, - segments: { - let mut segments = Punctuated::new(); - if input.peek(Ident::peek_any) { - let ident = Ident::parse_any(input)?; - segments.push_value(PathSegment::from(ident)); - } else if input.is_empty() { - return Err(input.error("expected nested attribute")); - } else if input.peek(Lit) { - return Err(input.error("unexpected literal in nested attribute, expected ident")); - } else { - return Err(input.error("unexpected token in nested attribute, expected ident")); - } - while input.peek(Token![::]) { - let punct = input.parse()?; - segments.push_punct(punct); - let ident = Ident::parse_any(input)?; - segments.push_value(PathSegment::from(ident)); - } - segments - }, - }) -} diff --git a/vendor/syn/src/op.rs b/vendor/syn/src/op.rs deleted file mode 100644 index 575d9faa1273ad..00000000000000 --- a/vendor/syn/src/op.rs +++ /dev/null @@ -1,219 +0,0 @@ -ast_enum! { - /// A binary operator: `+`, `+=`, `&`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - #[non_exhaustive] - pub enum BinOp { - /// The `+` operator (addition) - Add(Token![+]), - /// The `-` operator (subtraction) - Sub(Token![-]), - /// The `*` operator (multiplication) - Mul(Token![*]), - /// The `/` operator (division) - Div(Token![/]), - /// The `%` operator (modulus) - Rem(Token![%]), - /// The `&&` operator (logical and) - And(Token![&&]), - /// The `||` operator (logical or) - Or(Token![||]), - /// The `^` operator (bitwise xor) - BitXor(Token![^]), - /// The `&` operator (bitwise and) - BitAnd(Token![&]), - /// The `|` operator (bitwise or) - BitOr(Token![|]), - /// The `<<` operator (shift left) - Shl(Token![<<]), - /// The `>>` operator (shift right) - Shr(Token![>>]), - /// The `==` operator (equality) - Eq(Token![==]), - /// The `<` operator (less than) - Lt(Token![<]), - /// The `<=` operator (less than or equal to) - Le(Token![<=]), - /// The `!=` operator (not equal to) - Ne(Token![!=]), - /// The `>=` operator (greater than or equal to) - Ge(Token![>=]), - /// The `>` operator (greater than) - Gt(Token![>]), - /// The `+=` operator - AddAssign(Token![+=]), - /// The `-=` operator - SubAssign(Token![-=]), - /// The `*=` operator - MulAssign(Token![*=]), - /// The `/=` operator - DivAssign(Token![/=]), - /// The `%=` operator - RemAssign(Token![%=]), - /// The `^=` operator - BitXorAssign(Token![^=]), - /// The `&=` operator - BitAndAssign(Token![&=]), - /// The `|=` operator - BitOrAssign(Token![|=]), - /// The `<<=` operator - ShlAssign(Token![<<=]), - /// The `>>=` operator - ShrAssign(Token![>>=]), - } -} - -ast_enum! { - /// A unary operator: `*`, `!`, `-`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - #[non_exhaustive] - pub enum UnOp { - /// The `*` operator for dereferencing - Deref(Token![*]), - /// The `!` operator for logical inversion - Not(Token![!]), - /// The `-` operator for negation - Neg(Token![-]), - } -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::error::Result; - use crate::op::{BinOp, UnOp}; - use crate::parse::{Parse, ParseStream}; - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for BinOp { - fn parse(input: ParseStream) -> Result<Self> { - if input.peek(Token![+=]) { - input.parse().map(BinOp::AddAssign) - } else if input.peek(Token![-=]) { - input.parse().map(BinOp::SubAssign) - } else if input.peek(Token![*=]) { - input.parse().map(BinOp::MulAssign) - } else if input.peek(Token![/=]) { - input.parse().map(BinOp::DivAssign) - } else if input.peek(Token![%=]) { - input.parse().map(BinOp::RemAssign) - } else if input.peek(Token![^=]) { - input.parse().map(BinOp::BitXorAssign) - } else if input.peek(Token![&=]) { - input.parse().map(BinOp::BitAndAssign) - } else if input.peek(Token![|=]) { - input.parse().map(BinOp::BitOrAssign) - } else if input.peek(Token![<<=]) { - input.parse().map(BinOp::ShlAssign) - } else if input.peek(Token![>>=]) { - input.parse().map(BinOp::ShrAssign) - } else if input.peek(Token![&&]) { - input.parse().map(BinOp::And) - } else if input.peek(Token![||]) { - input.parse().map(BinOp::Or) - } else if input.peek(Token![<<]) { - input.parse().map(BinOp::Shl) - } else if input.peek(Token![>>]) { - input.parse().map(BinOp::Shr) - } else if input.peek(Token![==]) { - input.parse().map(BinOp::Eq) - } else if input.peek(Token![<=]) { - input.parse().map(BinOp::Le) - } else if input.peek(Token![!=]) { - input.parse().map(BinOp::Ne) - } else if input.peek(Token![>=]) { - input.parse().map(BinOp::Ge) - } else if input.peek(Token![+]) { - input.parse().map(BinOp::Add) - } else if input.peek(Token![-]) { - input.parse().map(BinOp::Sub) - } else if input.peek(Token![*]) { - input.parse().map(BinOp::Mul) - } else if input.peek(Token![/]) { - input.parse().map(BinOp::Div) - } else if input.peek(Token![%]) { - input.parse().map(BinOp::Rem) - } else if input.peek(Token![^]) { - input.parse().map(BinOp::BitXor) - } else if input.peek(Token![&]) { - input.parse().map(BinOp::BitAnd) - } else if input.peek(Token![|]) { - input.parse().map(BinOp::BitOr) - } else if input.peek(Token![<]) { - input.parse().map(BinOp::Lt) - } else if input.peek(Token![>]) { - input.parse().map(BinOp::Gt) - } else { - Err(input.error("expected binary operator")) - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for UnOp { - fn parse(input: ParseStream) -> Result<Self> { - let lookahead = input.lookahead1(); - if lookahead.peek(Token![*]) { - input.parse().map(UnOp::Deref) - } else if lookahead.peek(Token![!]) { - input.parse().map(UnOp::Not) - } else if lookahead.peek(Token![-]) { - input.parse().map(UnOp::Neg) - } else { - Err(lookahead.error()) - } - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use crate::op::{BinOp, UnOp}; - use proc_macro2::TokenStream; - use quote::ToTokens; - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for BinOp { - fn to_tokens(&self, tokens: &mut TokenStream) { - match self { - BinOp::Add(t) => t.to_tokens(tokens), - BinOp::Sub(t) => t.to_tokens(tokens), - BinOp::Mul(t) => t.to_tokens(tokens), - BinOp::Div(t) => t.to_tokens(tokens), - BinOp::Rem(t) => t.to_tokens(tokens), - BinOp::And(t) => t.to_tokens(tokens), - BinOp::Or(t) => t.to_tokens(tokens), - BinOp::BitXor(t) => t.to_tokens(tokens), - BinOp::BitAnd(t) => t.to_tokens(tokens), - BinOp::BitOr(t) => t.to_tokens(tokens), - BinOp::Shl(t) => t.to_tokens(tokens), - BinOp::Shr(t) => t.to_tokens(tokens), - BinOp::Eq(t) => t.to_tokens(tokens), - BinOp::Lt(t) => t.to_tokens(tokens), - BinOp::Le(t) => t.to_tokens(tokens), - BinOp::Ne(t) => t.to_tokens(tokens), - BinOp::Ge(t) => t.to_tokens(tokens), - BinOp::Gt(t) => t.to_tokens(tokens), - BinOp::AddAssign(t) => t.to_tokens(tokens), - BinOp::SubAssign(t) => t.to_tokens(tokens), - BinOp::MulAssign(t) => t.to_tokens(tokens), - BinOp::DivAssign(t) => t.to_tokens(tokens), - BinOp::RemAssign(t) => t.to_tokens(tokens), - BinOp::BitXorAssign(t) => t.to_tokens(tokens), - BinOp::BitAndAssign(t) => t.to_tokens(tokens), - BinOp::BitOrAssign(t) => t.to_tokens(tokens), - BinOp::ShlAssign(t) => t.to_tokens(tokens), - BinOp::ShrAssign(t) => t.to_tokens(tokens), - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for UnOp { - fn to_tokens(&self, tokens: &mut TokenStream) { - match self { - UnOp::Deref(t) => t.to_tokens(tokens), - UnOp::Not(t) => t.to_tokens(tokens), - UnOp::Neg(t) => t.to_tokens(tokens), - } - } - } -} diff --git a/vendor/syn/src/parse.rs b/vendor/syn/src/parse.rs deleted file mode 100644 index 57531005ac4eb2..00000000000000 --- a/vendor/syn/src/parse.rs +++ /dev/null @@ -1,1419 +0,0 @@ -//! Parsing interface for parsing a token stream into a syntax tree node. -//! -//! Parsing in Syn is built on parser functions that take in a [`ParseStream`] -//! and produce a [`Result<T>`] where `T` is some syntax tree node. Underlying -//! these parser functions is a lower level mechanism built around the -//! [`Cursor`] type. `Cursor` is a cheaply copyable cursor over a range of -//! tokens in a token stream. -//! -//! [`Result<T>`]: Result -//! [`Cursor`]: crate::buffer::Cursor -//! -//! # Example -//! -//! Here is a snippet of parsing code to get a feel for the style of the -//! library. We define data structures for a subset of Rust syntax including -//! enums (not shown) and structs, then provide implementations of the [`Parse`] -//! trait to parse these syntax tree data structures from a token stream. -//! -//! Once `Parse` impls have been defined, they can be called conveniently from a -//! procedural macro through [`parse_macro_input!`] as shown at the bottom of -//! the snippet. If the caller provides syntactically invalid input to the -//! procedural macro, they will receive a helpful compiler error message -//! pointing out the exact token that triggered the failure to parse. -//! -//! [`parse_macro_input!`]: crate::parse_macro_input! -//! -//! ``` -//! # extern crate proc_macro; -//! # -//! use proc_macro::TokenStream; -//! use syn::{braced, parse_macro_input, token, Field, Ident, Result, Token}; -//! use syn::parse::{Parse, ParseStream}; -//! use syn::punctuated::Punctuated; -//! -//! enum Item { -//! Struct(ItemStruct), -//! Enum(ItemEnum), -//! } -//! -//! struct ItemStruct { -//! struct_token: Token![struct], -//! ident: Ident, -//! brace_token: token::Brace, -//! fields: Punctuated<Field, Token![,]>, -//! } -//! # -//! # enum ItemEnum {} -//! -//! impl Parse for Item { -//! fn parse(input: ParseStream) -> Result<Self> { -//! let lookahead = input.lookahead1(); -//! if lookahead.peek(Token![struct]) { -//! input.parse().map(Item::Struct) -//! } else if lookahead.peek(Token![enum]) { -//! input.parse().map(Item::Enum) -//! } else { -//! Err(lookahead.error()) -//! } -//! } -//! } -//! -//! impl Parse for ItemStruct { -//! fn parse(input: ParseStream) -> Result<Self> { -//! let content; -//! Ok(ItemStruct { -//! struct_token: input.parse()?, -//! ident: input.parse()?, -//! brace_token: braced!(content in input), -//! fields: content.parse_terminated(Field::parse_named, Token![,])?, -//! }) -//! } -//! } -//! # -//! # impl Parse for ItemEnum { -//! # fn parse(input: ParseStream) -> Result<Self> { -//! # unimplemented!() -//! # } -//! # } -//! -//! # const IGNORE: &str = stringify! { -//! #[proc_macro] -//! # }; -//! pub fn my_macro(tokens: TokenStream) -> TokenStream { -//! let input = parse_macro_input!(tokens as Item); -//! -//! /* ... */ -//! # TokenStream::new() -//! } -//! ``` -//! -//! # The `syn::parse*` functions -//! -//! The [`syn::parse`], [`syn::parse2`], and [`syn::parse_str`] functions serve -//! as an entry point for parsing syntax tree nodes that can be parsed in an -//! obvious default way. These functions can return any syntax tree node that -//! implements the [`Parse`] trait, which includes most types in Syn. -//! -//! [`syn::parse`]: crate::parse() -//! [`syn::parse2`]: crate::parse2() -//! [`syn::parse_str`]: crate::parse_str() -//! -//! ``` -//! use syn::Type; -//! -//! # fn run_parser() -> syn::Result<()> { -//! let t: Type = syn::parse_str("std::collections::HashMap<String, Value>")?; -//! # Ok(()) -//! # } -//! # -//! # run_parser().unwrap(); -//! ``` -//! -//! The [`parse_quote!`] macro also uses this approach. -//! -//! [`parse_quote!`]: crate::parse_quote! -//! -//! # The `Parser` trait -//! -//! Some types can be parsed in several ways depending on context. For example -//! an [`Attribute`] can be either "outer" like `#[...]` or "inner" like -//! `#![...]` and parsing the wrong one would be a bug. Similarly [`Punctuated`] -//! may or may not allow trailing punctuation, and parsing it the wrong way -//! would either reject valid input or accept invalid input. -//! -//! [`Attribute`]: crate::Attribute -//! [`Punctuated`]: crate::punctuated -//! -//! The `Parse` trait is not implemented in these cases because there is no good -//! behavior to consider the default. -//! -//! ```compile_fail -//! # extern crate proc_macro; -//! # -//! # use syn::punctuated::Punctuated; -//! # use syn::{PathSegment, Result, Token}; -//! # -//! # fn f(tokens: proc_macro::TokenStream) -> Result<()> { -//! # -//! // Can't parse `Punctuated` without knowing whether trailing punctuation -//! // should be allowed in this context. -//! let path: Punctuated<PathSegment, Token![::]> = syn::parse(tokens)?; -//! # -//! # Ok(()) -//! # } -//! ``` -//! -//! In these cases the types provide a choice of parser functions rather than a -//! single `Parse` implementation, and those parser functions can be invoked -//! through the [`Parser`] trait. -//! -//! -//! ``` -//! # extern crate proc_macro; -//! # -//! use proc_macro::TokenStream; -//! use syn::parse::Parser; -//! use syn::punctuated::Punctuated; -//! use syn::{Attribute, Expr, PathSegment, Result, Token}; -//! -//! fn call_some_parser_methods(input: TokenStream) -> Result<()> { -//! // Parse a nonempty sequence of path segments separated by `::` punctuation -//! // with no trailing punctuation. -//! let tokens = input.clone(); -//! let parser = Punctuated::<PathSegment, Token![::]>::parse_separated_nonempty; -//! let _path = parser.parse(tokens)?; -//! -//! // Parse a possibly empty sequence of expressions terminated by commas with -//! // an optional trailing punctuation. -//! let tokens = input.clone(); -//! let parser = Punctuated::<Expr, Token![,]>::parse_terminated; -//! let _args = parser.parse(tokens)?; -//! -//! // Parse zero or more outer attributes but not inner attributes. -//! let tokens = input.clone(); -//! let parser = Attribute::parse_outer; -//! let _attrs = parser.parse(tokens)?; -//! -//! Ok(()) -//! } -//! ``` - -#[path = "discouraged.rs"] -pub mod discouraged; - -use crate::buffer::{Cursor, TokenBuffer}; -use crate::error; -use crate::lookahead; -use crate::punctuated::Punctuated; -use crate::token::Token; -use proc_macro2::{Delimiter, Group, Literal, Punct, Span, TokenStream, TokenTree}; -#[cfg(feature = "printing")] -use quote::ToTokens; -use std::cell::Cell; -use std::fmt::{self, Debug, Display}; -#[cfg(feature = "extra-traits")] -use std::hash::{Hash, Hasher}; -use std::marker::PhantomData; -use std::mem; -use std::ops::Deref; -use std::panic::{RefUnwindSafe, UnwindSafe}; -use std::rc::Rc; -use std::str::FromStr; - -pub use crate::error::{Error, Result}; -pub use crate::lookahead::{End, Lookahead1, Peek}; - -/// Parsing interface implemented by all types that can be parsed in a default -/// way from a token stream. -/// -/// Refer to the [module documentation] for details about implementing and using -/// the `Parse` trait. -/// -/// [module documentation]: self -pub trait Parse: Sized { - fn parse(input: ParseStream) -> Result<Self>; -} - -/// Input to a Syn parser function. -/// -/// See the methods of this type under the documentation of [`ParseBuffer`]. For -/// an overview of parsing in Syn, refer to the [module documentation]. -/// -/// [module documentation]: self -pub type ParseStream<'a> = &'a ParseBuffer<'a>; - -/// Cursor position within a buffered token stream. -/// -/// This type is more commonly used through the type alias [`ParseStream`] which -/// is an alias for `&ParseBuffer`. -/// -/// `ParseStream` is the input type for all parser functions in Syn. They have -/// the signature `fn(ParseStream) -> Result<T>`. -/// -/// ## Calling a parser function -/// -/// There is no public way to construct a `ParseBuffer`. Instead, if you are -/// looking to invoke a parser function that requires `ParseStream` as input, -/// you will need to go through one of the public parsing entry points. -/// -/// - The [`parse_macro_input!`] macro if parsing input of a procedural macro; -/// - One of [the `syn::parse*` functions][syn-parse]; or -/// - A method of the [`Parser`] trait. -/// -/// [`parse_macro_input!`]: crate::parse_macro_input! -/// [syn-parse]: self#the-synparse-functions -pub struct ParseBuffer<'a> { - scope: Span, - // Instead of Cell<Cursor<'a>> so that ParseBuffer<'a> is covariant in 'a. - // The rest of the code in this module needs to be careful that only a - // cursor derived from this `cell` is ever assigned to this `cell`. - // - // Cell<Cursor<'a>> cannot be covariant in 'a because then we could take a - // ParseBuffer<'a>, upcast to ParseBuffer<'short> for some lifetime shorter - // than 'a, and then assign a Cursor<'short> into the Cell. - // - // By extension, it would not be safe to expose an API that accepts a - // Cursor<'a> and trusts that it lives as long as the cursor currently in - // the cell. - cell: Cell<Cursor<'static>>, - marker: PhantomData<Cursor<'a>>, - unexpected: Cell<Option<Rc<Cell<Unexpected>>>>, -} - -impl<'a> Drop for ParseBuffer<'a> { - fn drop(&mut self) { - if let Some((unexpected_span, delimiter)) = span_of_unexpected_ignoring_nones(self.cursor()) - { - let (inner, old_span) = inner_unexpected(self); - if old_span.is_none() { - inner.set(Unexpected::Some(unexpected_span, delimiter)); - } - } - } -} - -impl<'a> Display for ParseBuffer<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&self.cursor().token_stream(), f) - } -} - -impl<'a> Debug for ParseBuffer<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Debug::fmt(&self.cursor().token_stream(), f) - } -} - -impl<'a> UnwindSafe for ParseBuffer<'a> {} -impl<'a> RefUnwindSafe for ParseBuffer<'a> {} - -/// Cursor state associated with speculative parsing. -/// -/// This type is the input of the closure provided to [`ParseStream::step`]. -/// -/// [`ParseStream::step`]: ParseBuffer::step -/// -/// # Example -/// -/// ``` -/// use proc_macro2::TokenTree; -/// use syn::Result; -/// use syn::parse::ParseStream; -/// -/// // This function advances the stream past the next occurrence of `@`. If -/// // no `@` is present in the stream, the stream position is unchanged and -/// // an error is returned. -/// fn skip_past_next_at(input: ParseStream) -> Result<()> { -/// input.step(|cursor| { -/// let mut rest = *cursor; -/// while let Some((tt, next)) = rest.token_tree() { -/// match &tt { -/// TokenTree::Punct(punct) if punct.as_char() == '@' => { -/// return Ok(((), next)); -/// } -/// _ => rest = next, -/// } -/// } -/// Err(cursor.error("no `@` was found after this point")) -/// }) -/// } -/// # -/// # fn remainder_after_skipping_past_next_at( -/// # input: ParseStream, -/// # ) -> Result<proc_macro2::TokenStream> { -/// # skip_past_next_at(input)?; -/// # input.parse() -/// # } -/// # -/// # use syn::parse::Parser; -/// # let remainder = remainder_after_skipping_past_next_at -/// # .parse_str("a @ b c") -/// # .unwrap(); -/// # assert_eq!(remainder.to_string(), "b c"); -/// ``` -pub struct StepCursor<'c, 'a> { - scope: Span, - // This field is covariant in 'c. - cursor: Cursor<'c>, - // This field is contravariant in 'c. Together these make StepCursor - // invariant in 'c. Also covariant in 'a. The user cannot cast 'c to a - // different lifetime but can upcast into a StepCursor with a shorter - // lifetime 'a. - // - // As long as we only ever construct a StepCursor for which 'c outlives 'a, - // this means if ever a StepCursor<'c, 'a> exists we are guaranteed that 'c - // outlives 'a. - marker: PhantomData<fn(Cursor<'c>) -> Cursor<'a>>, -} - -impl<'c, 'a> Deref for StepCursor<'c, 'a> { - type Target = Cursor<'c>; - - fn deref(&self) -> &Self::Target { - &self.cursor - } -} - -impl<'c, 'a> Copy for StepCursor<'c, 'a> {} - -impl<'c, 'a> Clone for StepCursor<'c, 'a> { - fn clone(&self) -> Self { - *self - } -} - -impl<'c, 'a> StepCursor<'c, 'a> { - /// Triggers an error at the current position of the parse stream. - /// - /// The `ParseStream::step` invocation will return this same error without - /// advancing the stream state. - pub fn error<T: Display>(self, message: T) -> Error { - error::new_at(self.scope, self.cursor, message) - } -} - -pub(crate) fn advance_step_cursor<'c, 'a>(proof: StepCursor<'c, 'a>, to: Cursor<'c>) -> Cursor<'a> { - // Refer to the comments within the StepCursor definition. We use the - // fact that a StepCursor<'c, 'a> exists as proof that 'c outlives 'a. - // Cursor is covariant in its lifetime parameter so we can cast a - // Cursor<'c> to one with the shorter lifetime Cursor<'a>. - let _ = proof; - unsafe { mem::transmute::<Cursor<'c>, Cursor<'a>>(to) } -} - -pub(crate) fn new_parse_buffer( - scope: Span, - cursor: Cursor, - unexpected: Rc<Cell<Unexpected>>, -) -> ParseBuffer { - ParseBuffer { - scope, - // See comment on `cell` in the struct definition. - cell: Cell::new(unsafe { mem::transmute::<Cursor, Cursor<'static>>(cursor) }), - marker: PhantomData, - unexpected: Cell::new(Some(unexpected)), - } -} - -pub(crate) enum Unexpected { - None, - Some(Span, Delimiter), - Chain(Rc<Cell<Unexpected>>), -} - -impl Default for Unexpected { - fn default() -> Self { - Unexpected::None - } -} - -impl Clone for Unexpected { - fn clone(&self) -> Self { - match self { - Unexpected::None => Unexpected::None, - Unexpected::Some(span, delimiter) => Unexpected::Some(*span, *delimiter), - Unexpected::Chain(next) => Unexpected::Chain(next.clone()), - } - } -} - -// We call this on Cell<Unexpected> and Cell<Option<T>> where temporarily -// swapping in a None is cheap. -fn cell_clone<T: Default + Clone>(cell: &Cell<T>) -> T { - let prev = cell.take(); - let ret = prev.clone(); - cell.set(prev); - ret -} - -fn inner_unexpected(buffer: &ParseBuffer) -> (Rc<Cell<Unexpected>>, Option<(Span, Delimiter)>) { - let mut unexpected = get_unexpected(buffer); - loop { - match cell_clone(&unexpected) { - Unexpected::None => return (unexpected, None), - Unexpected::Some(span, delimiter) => return (unexpected, Some((span, delimiter))), - Unexpected::Chain(next) => unexpected = next, - } - } -} - -pub(crate) fn get_unexpected(buffer: &ParseBuffer) -> Rc<Cell<Unexpected>> { - cell_clone(&buffer.unexpected).unwrap() -} - -fn span_of_unexpected_ignoring_nones(mut cursor: Cursor) -> Option<(Span, Delimiter)> { - if cursor.eof() { - return None; - } - while let Some((inner, _span, rest)) = cursor.group(Delimiter::None) { - if let Some(unexpected) = span_of_unexpected_ignoring_nones(inner) { - return Some(unexpected); - } - cursor = rest; - } - if cursor.eof() { - None - } else { - Some((cursor.span(), cursor.scope_delimiter())) - } -} - -impl<'a> ParseBuffer<'a> { - /// Parses a syntax tree node of type `T`, advancing the position of our - /// parse stream past it. - pub fn parse<T: Parse>(&self) -> Result<T> { - T::parse(self) - } - - /// Calls the given parser function to parse a syntax tree node of type `T` - /// from this stream. - /// - /// # Example - /// - /// The parser below invokes [`Attribute::parse_outer`] to parse a vector of - /// zero or more outer attributes. - /// - /// [`Attribute::parse_outer`]: crate::Attribute::parse_outer - /// - /// ``` - /// use syn::{Attribute, Ident, Result, Token}; - /// use syn::parse::{Parse, ParseStream}; - /// - /// // Parses a unit struct with attributes. - /// // - /// // #[path = "s.tmpl"] - /// // struct S; - /// struct UnitStruct { - /// attrs: Vec<Attribute>, - /// struct_token: Token![struct], - /// name: Ident, - /// semi_token: Token![;], - /// } - /// - /// impl Parse for UnitStruct { - /// fn parse(input: ParseStream) -> Result<Self> { - /// Ok(UnitStruct { - /// attrs: input.call(Attribute::parse_outer)?, - /// struct_token: input.parse()?, - /// name: input.parse()?, - /// semi_token: input.parse()?, - /// }) - /// } - /// } - /// ``` - pub fn call<T>(&'a self, function: fn(ParseStream<'a>) -> Result<T>) -> Result<T> { - function(self) - } - - /// Looks at the next token in the parse stream to determine whether it - /// matches the requested type of token. - /// - /// Does not advance the position of the parse stream. - /// - /// # Syntax - /// - /// Note that this method does not use turbofish syntax. Pass the peek type - /// inside of parentheses. - /// - /// - `input.peek(Token![struct])` - /// - `input.peek(Token![==])` - /// - `input.peek(syn::Ident)` *(does not accept keywords)* - /// - `input.peek(syn::Ident::peek_any)` - /// - `input.peek(Lifetime)` - /// - `input.peek(token::Brace)` - /// - /// # Example - /// - /// In this example we finish parsing the list of supertraits when the next - /// token in the input is either `where` or an opening curly brace. - /// - /// ``` - /// use syn::{braced, token, Generics, Ident, Result, Token, TypeParamBound}; - /// use syn::parse::{Parse, ParseStream}; - /// use syn::punctuated::Punctuated; - /// - /// // Parses a trait definition containing no associated items. - /// // - /// // trait Marker<'de, T>: A + B<'de> where Box<T>: Clone {} - /// struct MarkerTrait { - /// trait_token: Token![trait], - /// ident: Ident, - /// generics: Generics, - /// colon_token: Option<Token![:]>, - /// supertraits: Punctuated<TypeParamBound, Token![+]>, - /// brace_token: token::Brace, - /// } - /// - /// impl Parse for MarkerTrait { - /// fn parse(input: ParseStream) -> Result<Self> { - /// let trait_token: Token![trait] = input.parse()?; - /// let ident: Ident = input.parse()?; - /// let mut generics: Generics = input.parse()?; - /// let colon_token: Option<Token![:]> = input.parse()?; - /// - /// let mut supertraits = Punctuated::new(); - /// if colon_token.is_some() { - /// loop { - /// supertraits.push_value(input.parse()?); - /// if input.peek(Token![where]) || input.peek(token::Brace) { - /// break; - /// } - /// supertraits.push_punct(input.parse()?); - /// } - /// } - /// - /// generics.where_clause = input.parse()?; - /// let content; - /// let empty_brace_token = braced!(content in input); - /// - /// Ok(MarkerTrait { - /// trait_token, - /// ident, - /// generics, - /// colon_token, - /// supertraits, - /// brace_token: empty_brace_token, - /// }) - /// } - /// } - /// ``` - pub fn peek<T: Peek>(&self, token: T) -> bool { - let _ = token; - T::Token::peek(self.cursor()) - } - - /// Looks at the second-next token in the parse stream. - /// - /// This is commonly useful as a way to implement contextual keywords. - /// - /// # Example - /// - /// This example needs to use `peek2` because the symbol `union` is not a - /// keyword in Rust. We can't use just `peek` and decide to parse a union if - /// the very next token is `union`, because someone is free to write a `mod - /// union` and a macro invocation that looks like `union::some_macro! { ... - /// }`. In other words `union` is a contextual keyword. - /// - /// ``` - /// use syn::{Ident, ItemUnion, Macro, Result, Token}; - /// use syn::parse::{Parse, ParseStream}; - /// - /// // Parses either a union or a macro invocation. - /// enum UnionOrMacro { - /// // union MaybeUninit<T> { uninit: (), value: T } - /// Union(ItemUnion), - /// // lazy_static! { ... } - /// Macro(Macro), - /// } - /// - /// impl Parse for UnionOrMacro { - /// fn parse(input: ParseStream) -> Result<Self> { - /// if input.peek(Token![union]) && input.peek2(Ident) { - /// input.parse().map(UnionOrMacro::Union) - /// } else { - /// input.parse().map(UnionOrMacro::Macro) - /// } - /// } - /// } - /// ``` - pub fn peek2<T: Peek>(&self, token: T) -> bool { - fn peek2(buffer: &ParseBuffer, peek: fn(Cursor) -> bool) -> bool { - buffer.cursor().skip().map_or(false, peek) - } - - let _ = token; - peek2(self, T::Token::peek) - } - - /// Looks at the third-next token in the parse stream. - pub fn peek3<T: Peek>(&self, token: T) -> bool { - fn peek3(buffer: &ParseBuffer, peek: fn(Cursor) -> bool) -> bool { - buffer - .cursor() - .skip() - .and_then(Cursor::skip) - .map_or(false, peek) - } - - let _ = token; - peek3(self, T::Token::peek) - } - - /// Parses zero or more occurrences of `T` separated by punctuation of type - /// `P`, with optional trailing punctuation. - /// - /// Parsing continues until the end of this parse stream. The entire content - /// of this parse stream must consist of `T` and `P`. - /// - /// # Example - /// - /// ``` - /// # use quote::quote; - /// # - /// use syn::{parenthesized, token, Ident, Result, Token, Type}; - /// use syn::parse::{Parse, ParseStream}; - /// use syn::punctuated::Punctuated; - /// - /// // Parse a simplified tuple struct syntax like: - /// // - /// // struct S(A, B); - /// struct TupleStruct { - /// struct_token: Token![struct], - /// ident: Ident, - /// paren_token: token::Paren, - /// fields: Punctuated<Type, Token![,]>, - /// semi_token: Token![;], - /// } - /// - /// impl Parse for TupleStruct { - /// fn parse(input: ParseStream) -> Result<Self> { - /// let content; - /// Ok(TupleStruct { - /// struct_token: input.parse()?, - /// ident: input.parse()?, - /// paren_token: parenthesized!(content in input), - /// fields: content.parse_terminated(Type::parse, Token![,])?, - /// semi_token: input.parse()?, - /// }) - /// } - /// } - /// # - /// # let input = quote! { - /// # struct S(A, B); - /// # }; - /// # syn::parse2::<TupleStruct>(input).unwrap(); - /// ``` - /// - /// # See also - /// - /// If your separator is anything more complicated than an invocation of the - /// `Token!` macro, this method won't be applicable and you can instead - /// directly use `Punctuated`'s parser functions: [`parse_terminated`], - /// [`parse_separated_nonempty`] etc. - /// - /// [`parse_terminated`]: Punctuated::parse_terminated - /// [`parse_separated_nonempty`]: Punctuated::parse_separated_nonempty - /// - /// ``` - /// use syn::{custom_keyword, Expr, Result, Token}; - /// use syn::parse::{Parse, ParseStream}; - /// use syn::punctuated::Punctuated; - /// - /// mod kw { - /// syn::custom_keyword!(fin); - /// } - /// - /// struct Fin(kw::fin, Token![;]); - /// - /// impl Parse for Fin { - /// fn parse(input: ParseStream) -> Result<Self> { - /// Ok(Self(input.parse()?, input.parse()?)) - /// } - /// } - /// - /// struct Thing { - /// steps: Punctuated<Expr, Fin>, - /// } - /// - /// impl Parse for Thing { - /// fn parse(input: ParseStream) -> Result<Self> { - /// # if true { - /// Ok(Thing { - /// steps: Punctuated::parse_terminated(input)?, - /// }) - /// # } else { - /// // or equivalently, this means the same thing: - /// # Ok(Thing { - /// steps: input.call(Punctuated::parse_terminated)?, - /// # }) - /// # } - /// } - /// } - /// ``` - pub fn parse_terminated<T, P>( - &'a self, - parser: fn(ParseStream<'a>) -> Result<T>, - separator: P, - ) -> Result<Punctuated<T, P::Token>> - where - P: Peek, - P::Token: Parse, - { - let _ = separator; - Punctuated::parse_terminated_with(self, parser) - } - - /// Returns whether there are no more tokens remaining to be parsed from - /// this stream. - /// - /// This method returns true upon reaching the end of the content within a - /// set of delimiters, as well as at the end of the tokens provided to the - /// outermost parsing entry point. - /// - /// This is equivalent to - /// <code>.<a href="#method.peek">peek</a>(<a href="struct.End.html">syn::parse::End</a>)</code>. - /// Use `.peek2(End)` or `.peek3(End)` to look for the end of a parse stream - /// further ahead than the current position. - /// - /// # Example - /// - /// ``` - /// use syn::{braced, token, Ident, Item, Result, Token}; - /// use syn::parse::{Parse, ParseStream}; - /// - /// // Parses a Rust `mod m { ... }` containing zero or more items. - /// struct Mod { - /// mod_token: Token![mod], - /// name: Ident, - /// brace_token: token::Brace, - /// items: Vec<Item>, - /// } - /// - /// impl Parse for Mod { - /// fn parse(input: ParseStream) -> Result<Self> { - /// let content; - /// Ok(Mod { - /// mod_token: input.parse()?, - /// name: input.parse()?, - /// brace_token: braced!(content in input), - /// items: { - /// let mut items = Vec::new(); - /// while !content.is_empty() { - /// items.push(content.parse()?); - /// } - /// items - /// }, - /// }) - /// } - /// } - /// ``` - pub fn is_empty(&self) -> bool { - self.cursor().eof() - } - - /// Constructs a helper for peeking at the next token in this stream and - /// building an error message if it is not one of a set of expected tokens. - /// - /// # Example - /// - /// ``` - /// use syn::{ConstParam, Ident, Lifetime, LifetimeParam, Result, Token, TypeParam}; - /// use syn::parse::{Parse, ParseStream}; - /// - /// // A generic parameter, a single one of the comma-separated elements inside - /// // angle brackets in: - /// // - /// // fn f<T: Clone, 'a, 'b: 'a, const N: usize>() { ... } - /// // - /// // On invalid input, lookahead gives us a reasonable error message. - /// // - /// // error: expected one of: identifier, lifetime, `const` - /// // | - /// // 5 | fn f<!Sized>() {} - /// // | ^ - /// enum GenericParam { - /// Type(TypeParam), - /// Lifetime(LifetimeParam), - /// Const(ConstParam), - /// } - /// - /// impl Parse for GenericParam { - /// fn parse(input: ParseStream) -> Result<Self> { - /// let lookahead = input.lookahead1(); - /// if lookahead.peek(Ident) { - /// input.parse().map(GenericParam::Type) - /// } else if lookahead.peek(Lifetime) { - /// input.parse().map(GenericParam::Lifetime) - /// } else if lookahead.peek(Token![const]) { - /// input.parse().map(GenericParam::Const) - /// } else { - /// Err(lookahead.error()) - /// } - /// } - /// } - /// ``` - pub fn lookahead1(&self) -> Lookahead1<'a> { - lookahead::new(self.scope, self.cursor()) - } - - /// Forks a parse stream so that parsing tokens out of either the original - /// or the fork does not advance the position of the other. - /// - /// # Performance - /// - /// Forking a parse stream is a cheap fixed amount of work and does not - /// involve copying token buffers. Where you might hit performance problems - /// is if your macro ends up parsing a large amount of content more than - /// once. - /// - /// ``` - /// # use syn::{Expr, Result}; - /// # use syn::parse::ParseStream; - /// # - /// # fn bad(input: ParseStream) -> Result<Expr> { - /// // Do not do this. - /// if input.fork().parse::<Expr>().is_ok() { - /// return input.parse::<Expr>(); - /// } - /// # unimplemented!() - /// # } - /// ``` - /// - /// As a rule, avoid parsing an unbounded amount of tokens out of a forked - /// parse stream. Only use a fork when the amount of work performed against - /// the fork is small and bounded. - /// - /// When complex speculative parsing against the forked stream is - /// unavoidable, use [`parse::discouraged::Speculative`] to advance the - /// original stream once the fork's parse is determined to have been - /// successful. - /// - /// For a lower level way to perform speculative parsing at the token level, - /// consider using [`ParseStream::step`] instead. - /// - /// [`parse::discouraged::Speculative`]: discouraged::Speculative - /// [`ParseStream::step`]: ParseBuffer::step - /// - /// # Example - /// - /// The parse implementation shown here parses possibly restricted `pub` - /// visibilities. - /// - /// - `pub` - /// - `pub(crate)` - /// - `pub(self)` - /// - `pub(super)` - /// - `pub(in some::path)` - /// - /// To handle the case of visibilities inside of tuple structs, the parser - /// needs to distinguish parentheses that specify visibility restrictions - /// from parentheses that form part of a tuple type. - /// - /// ``` - /// # struct A; - /// # struct B; - /// # struct C; - /// # - /// struct S(pub(crate) A, pub (B, C)); - /// ``` - /// - /// In this example input the first tuple struct element of `S` has - /// `pub(crate)` visibility while the second tuple struct element has `pub` - /// visibility; the parentheses around `(B, C)` are part of the type rather - /// than part of a visibility restriction. - /// - /// The parser uses a forked parse stream to check the first token inside of - /// parentheses after the `pub` keyword. This is a small bounded amount of - /// work performed against the forked parse stream. - /// - /// ``` - /// use syn::{parenthesized, token, Ident, Path, Result, Token}; - /// use syn::ext::IdentExt; - /// use syn::parse::{Parse, ParseStream}; - /// - /// struct PubVisibility { - /// pub_token: Token![pub], - /// restricted: Option<Restricted>, - /// } - /// - /// struct Restricted { - /// paren_token: token::Paren, - /// in_token: Option<Token![in]>, - /// path: Path, - /// } - /// - /// impl Parse for PubVisibility { - /// fn parse(input: ParseStream) -> Result<Self> { - /// let pub_token: Token![pub] = input.parse()?; - /// - /// if input.peek(token::Paren) { - /// let ahead = input.fork(); - /// let mut content; - /// parenthesized!(content in ahead); - /// - /// if content.peek(Token![crate]) - /// || content.peek(Token![self]) - /// || content.peek(Token![super]) - /// { - /// return Ok(PubVisibility { - /// pub_token, - /// restricted: Some(Restricted { - /// paren_token: parenthesized!(content in input), - /// in_token: None, - /// path: Path::from(content.call(Ident::parse_any)?), - /// }), - /// }); - /// } else if content.peek(Token![in]) { - /// return Ok(PubVisibility { - /// pub_token, - /// restricted: Some(Restricted { - /// paren_token: parenthesized!(content in input), - /// in_token: Some(content.parse()?), - /// path: content.call(Path::parse_mod_style)?, - /// }), - /// }); - /// } - /// } - /// - /// Ok(PubVisibility { - /// pub_token, - /// restricted: None, - /// }) - /// } - /// } - /// ``` - pub fn fork(&self) -> Self { - ParseBuffer { - scope: self.scope, - cell: self.cell.clone(), - marker: PhantomData, - // Not the parent's unexpected. Nothing cares whether the clone - // parses all the way unless we `advance_to`. - unexpected: Cell::new(Some(Rc::new(Cell::new(Unexpected::None)))), - } - } - - /// Triggers an error at the current position of the parse stream. - /// - /// # Example - /// - /// ``` - /// use syn::{Expr, Result, Token}; - /// use syn::parse::{Parse, ParseStream}; - /// - /// // Some kind of loop: `while` or `for` or `loop`. - /// struct Loop { - /// expr: Expr, - /// } - /// - /// impl Parse for Loop { - /// fn parse(input: ParseStream) -> Result<Self> { - /// if input.peek(Token![while]) - /// || input.peek(Token![for]) - /// || input.peek(Token![loop]) - /// { - /// Ok(Loop { - /// expr: input.parse()?, - /// }) - /// } else { - /// Err(input.error("expected some kind of loop")) - /// } - /// } - /// } - /// ``` - pub fn error<T: Display>(&self, message: T) -> Error { - error::new_at(self.scope, self.cursor(), message) - } - - /// Speculatively parses tokens from this parse stream, advancing the - /// position of this stream only if parsing succeeds. - /// - /// This is a powerful low-level API used for defining the `Parse` impls of - /// the basic built-in token types. It is not something that will be used - /// widely outside of the Syn codebase. - /// - /// # Example - /// - /// ``` - /// use proc_macro2::TokenTree; - /// use syn::Result; - /// use syn::parse::ParseStream; - /// - /// // This function advances the stream past the next occurrence of `@`. If - /// // no `@` is present in the stream, the stream position is unchanged and - /// // an error is returned. - /// fn skip_past_next_at(input: ParseStream) -> Result<()> { - /// input.step(|cursor| { - /// let mut rest = *cursor; - /// while let Some((tt, next)) = rest.token_tree() { - /// match &tt { - /// TokenTree::Punct(punct) if punct.as_char() == '@' => { - /// return Ok(((), next)); - /// } - /// _ => rest = next, - /// } - /// } - /// Err(cursor.error("no `@` was found after this point")) - /// }) - /// } - /// # - /// # fn remainder_after_skipping_past_next_at( - /// # input: ParseStream, - /// # ) -> Result<proc_macro2::TokenStream> { - /// # skip_past_next_at(input)?; - /// # input.parse() - /// # } - /// # - /// # use syn::parse::Parser; - /// # let remainder = remainder_after_skipping_past_next_at - /// # .parse_str("a @ b c") - /// # .unwrap(); - /// # assert_eq!(remainder.to_string(), "b c"); - /// ``` - pub fn step<F, R>(&self, function: F) -> Result<R> - where - F: for<'c> FnOnce(StepCursor<'c, 'a>) -> Result<(R, Cursor<'c>)>, - { - // Since the user's function is required to work for any 'c, we know - // that the Cursor<'c> they return is either derived from the input - // StepCursor<'c, 'a> or from a Cursor<'static>. - // - // It would not be legal to write this function without the invariant - // lifetime 'c in StepCursor<'c, 'a>. If this function were written only - // in terms of 'a, the user could take our ParseBuffer<'a>, upcast it to - // a ParseBuffer<'short> which some shorter lifetime than 'a, invoke - // `step` on their ParseBuffer<'short> with a closure that returns - // Cursor<'short>, and we would wrongly write that Cursor<'short> into - // the Cell intended to hold Cursor<'a>. - // - // In some cases it may be necessary for R to contain a Cursor<'a>. - // Within Syn we solve this using `advance_step_cursor` which uses the - // existence of a StepCursor<'c, 'a> as proof that it is safe to cast - // from Cursor<'c> to Cursor<'a>. If needed outside of Syn, it would be - // safe to expose that API as a method on StepCursor. - let (node, rest) = function(StepCursor { - scope: self.scope, - cursor: self.cell.get(), - marker: PhantomData, - })?; - self.cell.set(rest); - Ok(node) - } - - /// Returns the `Span` of the next token in the parse stream, or - /// `Span::call_site()` if this parse stream has completely exhausted its - /// input `TokenStream`. - pub fn span(&self) -> Span { - let cursor = self.cursor(); - if cursor.eof() { - self.scope - } else { - crate::buffer::open_span_of_group(cursor) - } - } - - /// Provides low-level access to the token representation underlying this - /// parse stream. - /// - /// Cursors are immutable so no operations you perform against the cursor - /// will affect the state of this parse stream. - /// - /// # Example - /// - /// ``` - /// use proc_macro2::TokenStream; - /// use syn::buffer::Cursor; - /// use syn::parse::{ParseStream, Result}; - /// - /// // Run a parser that returns T, but get its output as TokenStream instead of T. - /// // This works without T needing to implement ToTokens. - /// fn recognize_token_stream<T>( - /// recognizer: fn(ParseStream) -> Result<T>, - /// ) -> impl Fn(ParseStream) -> Result<TokenStream> { - /// move |input| { - /// let begin = input.cursor(); - /// recognizer(input)?; - /// let end = input.cursor(); - /// Ok(tokens_between(begin, end)) - /// } - /// } - /// - /// // Collect tokens between two cursors as a TokenStream. - /// fn tokens_between(begin: Cursor, end: Cursor) -> TokenStream { - /// assert!(begin <= end); - /// - /// let mut cursor = begin; - /// let mut tokens = TokenStream::new(); - /// while cursor < end { - /// let (token, next) = cursor.token_tree().unwrap(); - /// tokens.extend(std::iter::once(token)); - /// cursor = next; - /// } - /// tokens - /// } - /// - /// fn main() { - /// use quote::quote; - /// use syn::parse::{Parse, Parser}; - /// use syn::Token; - /// - /// // Parse syn::Type as a TokenStream, surrounded by angle brackets. - /// fn example(input: ParseStream) -> Result<TokenStream> { - /// let _langle: Token![<] = input.parse()?; - /// let ty = recognize_token_stream(syn::Type::parse)(input)?; - /// let _rangle: Token![>] = input.parse()?; - /// Ok(ty) - /// } - /// - /// let tokens = quote! { <fn() -> u8> }; - /// println!("{}", example.parse2(tokens).unwrap()); - /// } - /// ``` - pub fn cursor(&self) -> Cursor<'a> { - self.cell.get() - } - - fn check_unexpected(&self) -> Result<()> { - match inner_unexpected(self).1 { - Some((span, delimiter)) => Err(err_unexpected_token(span, delimiter)), - None => Ok(()), - } - } -} - -#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] -impl<T: Parse> Parse for Box<T> { - fn parse(input: ParseStream) -> Result<Self> { - input.parse().map(Box::new) - } -} - -#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] -impl<T: Parse + Token> Parse for Option<T> { - fn parse(input: ParseStream) -> Result<Self> { - if T::peek(input.cursor()) { - Ok(Some(input.parse()?)) - } else { - Ok(None) - } - } -} - -#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] -impl Parse for TokenStream { - fn parse(input: ParseStream) -> Result<Self> { - input.step(|cursor| Ok((cursor.token_stream(), Cursor::empty()))) - } -} - -#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] -impl Parse for TokenTree { - fn parse(input: ParseStream) -> Result<Self> { - input.step(|cursor| match cursor.token_tree() { - Some((tt, rest)) => Ok((tt, rest)), - None => Err(cursor.error("expected token tree")), - }) - } -} - -#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] -impl Parse for Group { - fn parse(input: ParseStream) -> Result<Self> { - input.step(|cursor| { - if let Some((group, rest)) = cursor.any_group_token() { - if group.delimiter() != Delimiter::None { - return Ok((group, rest)); - } - } - Err(cursor.error("expected group token")) - }) - } -} - -#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] -impl Parse for Punct { - fn parse(input: ParseStream) -> Result<Self> { - input.step(|cursor| match cursor.punct() { - Some((punct, rest)) => Ok((punct, rest)), - None => Err(cursor.error("expected punctuation token")), - }) - } -} - -#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] -impl Parse for Literal { - fn parse(input: ParseStream) -> Result<Self> { - input.step(|cursor| match cursor.literal() { - Some((literal, rest)) => Ok((literal, rest)), - None => Err(cursor.error("expected literal token")), - }) - } -} - -/// Parser that can parse Rust tokens into a particular syntax tree node. -/// -/// Refer to the [module documentation] for details about parsing in Syn. -/// -/// [module documentation]: self -pub trait Parser: Sized { - type Output; - - /// Parse a proc-macro2 token stream into the chosen syntax tree node. - /// - /// This function enforces that the input is fully parsed. If there are any - /// unparsed tokens at the end of the stream, an error is returned. - fn parse2(self, tokens: TokenStream) -> Result<Self::Output>; - - /// Parse tokens of source code into the chosen syntax tree node. - /// - /// This function enforces that the input is fully parsed. If there are any - /// unparsed tokens at the end of the stream, an error is returned. - #[cfg(feature = "proc-macro")] - #[cfg_attr(docsrs, doc(cfg(feature = "proc-macro")))] - fn parse(self, tokens: proc_macro::TokenStream) -> Result<Self::Output> { - self.parse2(proc_macro2::TokenStream::from(tokens)) - } - - /// Parse a string of Rust code into the chosen syntax tree node. - /// - /// This function enforces that the input is fully parsed. If there are any - /// unparsed tokens at the end of the string, an error is returned. - /// - /// # Hygiene - /// - /// Every span in the resulting syntax tree will be set to resolve at the - /// macro call site. - fn parse_str(self, s: &str) -> Result<Self::Output> { - self.parse2(proc_macro2::TokenStream::from_str(s)?) - } - - // Not public API. - #[doc(hidden)] - fn __parse_scoped(self, scope: Span, tokens: TokenStream) -> Result<Self::Output> { - let _ = scope; - self.parse2(tokens) - } -} - -fn tokens_to_parse_buffer(tokens: &TokenBuffer) -> ParseBuffer { - let scope = Span::call_site(); - let cursor = tokens.begin(); - let unexpected = Rc::new(Cell::new(Unexpected::None)); - new_parse_buffer(scope, cursor, unexpected) -} - -impl<F, T> Parser for F -where - F: FnOnce(ParseStream) -> Result<T>, -{ - type Output = T; - - fn parse2(self, tokens: TokenStream) -> Result<T> { - let buf = TokenBuffer::new2(tokens); - let state = tokens_to_parse_buffer(&buf); - let node = self(&state)?; - state.check_unexpected()?; - if let Some((unexpected_span, delimiter)) = - span_of_unexpected_ignoring_nones(state.cursor()) - { - Err(err_unexpected_token(unexpected_span, delimiter)) - } else { - Ok(node) - } - } - - fn __parse_scoped(self, scope: Span, tokens: TokenStream) -> Result<Self::Output> { - let buf = TokenBuffer::new2(tokens); - let cursor = buf.begin(); - let unexpected = Rc::new(Cell::new(Unexpected::None)); - let state = new_parse_buffer(scope, cursor, unexpected); - let node = self(&state)?; - state.check_unexpected()?; - if let Some((unexpected_span, delimiter)) = - span_of_unexpected_ignoring_nones(state.cursor()) - { - Err(err_unexpected_token(unexpected_span, delimiter)) - } else { - Ok(node) - } - } -} - -pub(crate) fn parse_scoped<F: Parser>(f: F, scope: Span, tokens: TokenStream) -> Result<F::Output> { - f.__parse_scoped(scope, tokens) -} - -fn err_unexpected_token(span: Span, delimiter: Delimiter) -> Error { - let msg = match delimiter { - Delimiter::Parenthesis => "unexpected token, expected `)`", - Delimiter::Brace => "unexpected token, expected `}`", - Delimiter::Bracket => "unexpected token, expected `]`", - Delimiter::None => "unexpected token", - }; - Error::new(span, msg) -} - -/// An empty syntax tree node that consumes no tokens when parsed. -/// -/// This is useful for attribute macros that want to ensure they are not -/// provided any attribute args. -/// -/// ``` -/// # extern crate proc_macro; -/// # -/// use proc_macro::TokenStream; -/// use syn::parse_macro_input; -/// use syn::parse::Nothing; -/// -/// # const IGNORE: &str = stringify! { -/// #[proc_macro_attribute] -/// # }; -/// pub fn my_attr(args: TokenStream, input: TokenStream) -> TokenStream { -/// parse_macro_input!(args as Nothing); -/// -/// /* ... */ -/// # TokenStream::new() -/// } -/// ``` -/// -/// ```text -/// error: unexpected token -/// --> src/main.rs:3:19 -/// | -/// 3 | #[my_attr(asdf)] -/// | ^^^^ -/// ``` -pub struct Nothing; - -impl Parse for Nothing { - fn parse(_input: ParseStream) -> Result<Self> { - Ok(Nothing) - } -} - -#[cfg(feature = "printing")] -#[cfg_attr(docsrs, doc(cfg(feature = "printing")))] -impl ToTokens for Nothing { - fn to_tokens(&self, tokens: &mut TokenStream) { - let _ = tokens; - } -} - -#[cfg(feature = "clone-impls")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for Nothing { - fn clone(&self) -> Self { - *self - } -} - -#[cfg(feature = "clone-impls")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Copy for Nothing {} - -#[cfg(feature = "extra-traits")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for Nothing { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("Nothing") - } -} - -#[cfg(feature = "extra-traits")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Eq for Nothing {} - -#[cfg(feature = "extra-traits")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for Nothing { - fn eq(&self, _other: &Self) -> bool { - true - } -} - -#[cfg(feature = "extra-traits")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for Nothing { - fn hash<H: Hasher>(&self, _state: &mut H) {} -} diff --git a/vendor/syn/src/parse_macro_input.rs b/vendor/syn/src/parse_macro_input.rs deleted file mode 100644 index f0660aedd7dff9..00000000000000 --- a/vendor/syn/src/parse_macro_input.rs +++ /dev/null @@ -1,128 +0,0 @@ -/// Parse the input TokenStream of a macro, triggering a compile error if the -/// tokens fail to parse. -/// -/// Refer to the [`parse` module] documentation for more details about parsing -/// in Syn. -/// -/// [`parse` module]: mod@crate::parse -/// -/// <br> -/// -/// # Intended usage -/// -/// This macro must be called from a function that returns -/// `proc_macro::TokenStream`. Usually this will be your proc macro entry point, -/// the function that has the #\[proc_macro\] / #\[proc_macro_derive\] / -/// #\[proc_macro_attribute\] attribute. -/// -/// ``` -/// # extern crate proc_macro; -/// # -/// use proc_macro::TokenStream; -/// use syn::{parse_macro_input, Result}; -/// use syn::parse::{Parse, ParseStream}; -/// -/// struct MyMacroInput { -/// /* ... */ -/// } -/// -/// impl Parse for MyMacroInput { -/// fn parse(input: ParseStream) -> Result<Self> { -/// /* ... */ -/// # Ok(MyMacroInput {}) -/// } -/// } -/// -/// # const IGNORE: &str = stringify! { -/// #[proc_macro] -/// # }; -/// pub fn my_macro(tokens: TokenStream) -> TokenStream { -/// let input = parse_macro_input!(tokens as MyMacroInput); -/// -/// /* ... */ -/// # TokenStream::new() -/// } -/// ``` -/// -/// <br> -/// -/// # Usage with Parser -/// -/// This macro can also be used with the [`Parser` trait] for types that have -/// multiple ways that they can be parsed. -/// -/// [`Parser` trait]: crate::parse::Parser -/// -/// ``` -/// # extern crate proc_macro; -/// # -/// # use proc_macro::TokenStream; -/// # use syn::{parse_macro_input, Result}; -/// # use syn::parse::ParseStream; -/// # -/// # struct MyMacroInput {} -/// # -/// impl MyMacroInput { -/// fn parse_alternate(input: ParseStream) -> Result<Self> { -/// /* ... */ -/// # Ok(MyMacroInput {}) -/// } -/// } -/// -/// # const IGNORE: &str = stringify! { -/// #[proc_macro] -/// # }; -/// pub fn my_macro(tokens: TokenStream) -> TokenStream { -/// let input = parse_macro_input!(tokens with MyMacroInput::parse_alternate); -/// -/// /* ... */ -/// # TokenStream::new() -/// } -/// ``` -/// -/// <br> -/// -/// # Expansion -/// -/// `parse_macro_input!($variable as $Type)` expands to something like: -/// -/// ```no_run -/// # extern crate proc_macro; -/// # -/// # macro_rules! doc_test { -/// # ($variable:ident as $Type:ty) => { -/// match syn::parse::<$Type>($variable) { -/// Ok(syntax_tree) => syntax_tree, -/// Err(err) => return proc_macro::TokenStream::from(err.to_compile_error()), -/// } -/// # }; -/// # } -/// # -/// # fn test(input: proc_macro::TokenStream) -> proc_macro::TokenStream { -/// # let _ = doc_test!(input as syn::Ident); -/// # proc_macro::TokenStream::new() -/// # } -/// ``` -#[macro_export] -#[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "proc-macro"))))] -macro_rules! parse_macro_input { - ($tokenstream:ident as $ty:ty) => { - match $crate::parse::<$ty>($tokenstream) { - $crate::__private::Ok(data) => data, - $crate::__private::Err(err) => { - return $crate::__private::TokenStream::from(err.to_compile_error()); - } - } - }; - ($tokenstream:ident with $parser:path) => { - match $crate::parse::Parser::parse($parser, $tokenstream) { - $crate::__private::Ok(data) => data, - $crate::__private::Err(err) => { - return $crate::__private::TokenStream::from(err.to_compile_error()); - } - } - }; - ($tokenstream:ident) => { - $crate::parse_macro_input!($tokenstream as _) - }; -} diff --git a/vendor/syn/src/parse_quote.rs b/vendor/syn/src/parse_quote.rs deleted file mode 100644 index 2db20597c43682..00000000000000 --- a/vendor/syn/src/parse_quote.rs +++ /dev/null @@ -1,240 +0,0 @@ -/// Quasi-quotation macro that accepts input like the [`quote!`] macro but uses -/// type inference to figure out a return type for those tokens. -/// -/// [`quote!`]: https://docs.rs/quote/1.0/quote/index.html -/// -/// The return type can be any syntax tree node that implements the [`Parse`] -/// trait. -/// -/// [`Parse`]: crate::parse::Parse -/// -/// ``` -/// use quote::quote; -/// use syn::{parse_quote, Stmt}; -/// -/// fn main() { -/// let name = quote!(v); -/// let ty = quote!(u8); -/// -/// let stmt: Stmt = parse_quote! { -/// let #name: #ty = Default::default(); -/// }; -/// -/// println!("{:#?}", stmt); -/// } -/// ``` -/// -/// *This macro is available only if Syn is built with both the `"parsing"` and -/// `"printing"` features.* -/// -/// # Example -/// -/// The following helper function adds a bound `T: HeapSize` to every type -/// parameter `T` in the input generics. -/// -/// ``` -/// use syn::{parse_quote, Generics, GenericParam}; -/// -/// // Add a bound `T: HeapSize` to every type parameter T. -/// fn add_trait_bounds(mut generics: Generics) -> Generics { -/// for param in &mut generics.params { -/// if let GenericParam::Type(type_param) = param { -/// type_param.bounds.push(parse_quote!(HeapSize)); -/// } -/// } -/// generics -/// } -/// ``` -/// -/// # Special cases -/// -/// This macro can parse the following additional types as a special case even -/// though they do not implement the `Parse` trait. -/// -/// - [`Attribute`] — parses one attribute, allowing either outer like `#[...]` -/// or inner like `#![...]` -/// - [`Vec<Attribute>`] — parses multiple attributes, including mixed kinds in -/// any order -/// - [`Punctuated<T, P>`] — parses zero or more `T` separated by punctuation -/// `P` with optional trailing punctuation -/// - [`Vec<Arm>`] — parses arms separated by optional commas according to the -/// same grammar as the inside of a `match` expression -/// - [`Vec<Stmt>`] — parses the same as `Block::parse_within` -/// - [`Pat`], [`Box<Pat>`] — parses the same as -/// `Pat::parse_multi_with_leading_vert` -/// - [`Field`] — parses a named or unnamed struct field -/// -/// [`Vec<Attribute>`]: Attribute -/// [`Vec<Arm>`]: Arm -/// [`Vec<Stmt>`]: Block::parse_within -/// [`Pat`]: Pat::parse_multi_with_leading_vert -/// [`Box<Pat>`]: Pat::parse_multi_with_leading_vert -/// -/// # Panics -/// -/// Panics if the tokens fail to parse as the expected syntax tree type. The -/// caller is responsible for ensuring that the input tokens are syntactically -/// valid. -#[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "printing"))))] -#[macro_export] -macro_rules! parse_quote { - ($($tt:tt)*) => { - $crate::__private::parse_quote($crate::__private::quote::quote!($($tt)*)) - }; -} - -/// This macro is [`parse_quote!`] + [`quote_spanned!`][quote::quote_spanned]. -/// -/// Please refer to each of their documentation. -/// -/// # Example -/// -/// ``` -/// use quote::{quote, quote_spanned}; -/// use syn::spanned::Spanned; -/// use syn::{parse_quote_spanned, ReturnType, Signature}; -/// -/// // Changes `fn()` to `fn() -> Pin<Box<dyn Future<Output = ()>>>`, -/// // and `fn() -> T` to `fn() -> Pin<Box<dyn Future<Output = T>>>`, -/// // without introducing any call_site() spans. -/// fn make_ret_pinned_future(sig: &mut Signature) { -/// let ret = match &sig.output { -/// ReturnType::Default => quote_spanned!(sig.paren_token.span=> ()), -/// ReturnType::Type(_, ret) => quote!(#ret), -/// }; -/// sig.output = parse_quote_spanned! {ret.span()=> -/// -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = #ret>>> -/// }; -/// } -/// ``` -#[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "printing"))))] -#[macro_export] -macro_rules! parse_quote_spanned { - ($span:expr=> $($tt:tt)*) => { - $crate::__private::parse_quote($crate::__private::quote::quote_spanned!($span=> $($tt)*)) - }; -} - -//////////////////////////////////////////////////////////////////////////////// -// Can parse any type that implements Parse. - -use crate::error::Result; -use crate::parse::{Parse, ParseStream, Parser}; -use proc_macro2::TokenStream; - -// Not public API. -#[doc(hidden)] -#[track_caller] -pub fn parse<T: ParseQuote>(token_stream: TokenStream) -> T { - let parser = T::parse; - match parser.parse2(token_stream) { - Ok(t) => t, - Err(err) => panic!("{}", err), - } -} - -#[doc(hidden)] -pub trait ParseQuote: Sized { - fn parse(input: ParseStream) -> Result<Self>; -} - -impl<T: Parse> ParseQuote for T { - fn parse(input: ParseStream) -> Result<Self> { - <T as Parse>::parse(input) - } -} - -//////////////////////////////////////////////////////////////////////////////// -// Any other types that we want `parse_quote!` to be able to parse. - -use crate::punctuated::Punctuated; -#[cfg(any(feature = "full", feature = "derive"))] -use crate::{attr, Attribute, Field, FieldMutability, Ident, Type, Visibility}; -#[cfg(feature = "full")] -use crate::{Arm, Block, Pat, Stmt}; - -#[cfg(any(feature = "full", feature = "derive"))] -impl ParseQuote for Attribute { - fn parse(input: ParseStream) -> Result<Self> { - if input.peek(Token![#]) && input.peek2(Token![!]) { - attr::parsing::single_parse_inner(input) - } else { - attr::parsing::single_parse_outer(input) - } - } -} - -#[cfg(any(feature = "full", feature = "derive"))] -impl ParseQuote for Vec<Attribute> { - fn parse(input: ParseStream) -> Result<Self> { - let mut attrs = Vec::new(); - while !input.is_empty() { - attrs.push(ParseQuote::parse(input)?); - } - Ok(attrs) - } -} - -#[cfg(any(feature = "full", feature = "derive"))] -impl ParseQuote for Field { - fn parse(input: ParseStream) -> Result<Self> { - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - - let ident: Option<Ident>; - let colon_token: Option<Token![:]>; - let is_named = input.peek(Ident) && input.peek2(Token![:]) && !input.peek2(Token![::]); - if is_named { - ident = Some(input.parse()?); - colon_token = Some(input.parse()?); - } else { - ident = None; - colon_token = None; - } - - let ty: Type = input.parse()?; - - Ok(Field { - attrs, - vis, - mutability: FieldMutability::None, - ident, - colon_token, - ty, - }) - } -} - -#[cfg(feature = "full")] -impl ParseQuote for Pat { - fn parse(input: ParseStream) -> Result<Self> { - Pat::parse_multi_with_leading_vert(input) - } -} - -#[cfg(feature = "full")] -impl ParseQuote for Box<Pat> { - fn parse(input: ParseStream) -> Result<Self> { - <Pat as ParseQuote>::parse(input).map(Box::new) - } -} - -impl<T: Parse, P: Parse> ParseQuote for Punctuated<T, P> { - fn parse(input: ParseStream) -> Result<Self> { - Self::parse_terminated(input) - } -} - -#[cfg(feature = "full")] -impl ParseQuote for Vec<Stmt> { - fn parse(input: ParseStream) -> Result<Self> { - Block::parse_within(input) - } -} - -#[cfg(feature = "full")] -impl ParseQuote for Vec<Arm> { - fn parse(input: ParseStream) -> Result<Self> { - Arm::parse_multiple(input) - } -} diff --git a/vendor/syn/src/pat.rs b/vendor/syn/src/pat.rs deleted file mode 100644 index 5cc3ff9081a8ad..00000000000000 --- a/vendor/syn/src/pat.rs +++ /dev/null @@ -1,955 +0,0 @@ -use crate::attr::Attribute; -use crate::expr::Member; -use crate::ident::Ident; -use crate::path::{Path, QSelf}; -use crate::punctuated::Punctuated; -use crate::token; -use crate::ty::Type; -use proc_macro2::TokenStream; - -pub use crate::expr::{ - ExprConst as PatConst, ExprLit as PatLit, ExprMacro as PatMacro, ExprPath as PatPath, - ExprRange as PatRange, -}; - -ast_enum_of_structs! { - /// A pattern in a local binding, function signature, match expression, or - /// various other places. - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - #[non_exhaustive] - pub enum Pat { - /// A const block: `const { ... }`. - Const(PatConst), - - /// A pattern that binds a new variable: `ref mut binding @ SUBPATTERN`. - Ident(PatIdent), - - /// A literal pattern: `0`. - Lit(PatLit), - - /// A macro in pattern position. - Macro(PatMacro), - - /// A pattern that matches any one of a set of cases. - Or(PatOr), - - /// A parenthesized pattern: `(A | B)`. - Paren(PatParen), - - /// A path pattern like `Color::Red`, optionally qualified with a - /// self-type. - /// - /// Unqualified path patterns can legally refer to variants, structs, - /// constants or associated constants. Qualified path patterns like - /// `<A>::B::C` and `<A as Trait>::B::C` can only legally refer to - /// associated constants. - Path(PatPath), - - /// A range pattern: `1..=2`. - Range(PatRange), - - /// A reference pattern: `&mut var`. - Reference(PatReference), - - /// The dots in a tuple or slice pattern: `[0, 1, ..]`. - Rest(PatRest), - - /// A dynamically sized slice pattern: `[a, b, ref i @ .., y, z]`. - Slice(PatSlice), - - /// A struct or struct variant pattern: `Variant { x, y, .. }`. - Struct(PatStruct), - - /// A tuple pattern: `(a, b)`. - Tuple(PatTuple), - - /// A tuple struct or tuple variant pattern: `Variant(x, y, .., z)`. - TupleStruct(PatTupleStruct), - - /// A type ascription pattern: `foo: f64`. - Type(PatType), - - /// Tokens in pattern position not interpreted by Syn. - Verbatim(TokenStream), - - /// A pattern that matches any value: `_`. - Wild(PatWild), - - // For testing exhaustiveness in downstream code, use the following idiom: - // - // match pat { - // #![cfg_attr(test, deny(non_exhaustive_omitted_patterns))] - // - // Pat::Box(pat) => {...} - // Pat::Ident(pat) => {...} - // ... - // Pat::Wild(pat) => {...} - // - // _ => { /* some sane fallback */ } - // } - // - // This way we fail your tests but don't break your library when adding - // a variant. You will be notified by a test failure when a variant is - // added, so that you can add code to handle it, but your library will - // continue to compile and work for downstream users in the interim. - } -} - -ast_struct! { - /// A pattern that binds a new variable: `ref mut binding @ SUBPATTERN`. - /// - /// It may also be a unit struct or struct variant (e.g. `None`), or a - /// constant; these cannot be distinguished syntactically. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct PatIdent { - pub attrs: Vec<Attribute>, - pub by_ref: Option<Token![ref]>, - pub mutability: Option<Token![mut]>, - pub ident: Ident, - pub subpat: Option<(Token![@], Box<Pat>)>, - } -} - -ast_struct! { - /// A pattern that matches any one of a set of cases. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct PatOr { - pub attrs: Vec<Attribute>, - pub leading_vert: Option<Token![|]>, - pub cases: Punctuated<Pat, Token![|]>, - } -} - -ast_struct! { - /// A parenthesized pattern: `(A | B)`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct PatParen { - pub attrs: Vec<Attribute>, - pub paren_token: token::Paren, - pub pat: Box<Pat>, - } -} - -ast_struct! { - /// A reference pattern: `&mut var`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct PatReference { - pub attrs: Vec<Attribute>, - pub and_token: Token![&], - pub mutability: Option<Token![mut]>, - pub pat: Box<Pat>, - } -} - -ast_struct! { - /// The dots in a tuple or slice pattern: `[0, 1, ..]`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct PatRest { - pub attrs: Vec<Attribute>, - pub dot2_token: Token![..], - } -} - -ast_struct! { - /// A dynamically sized slice pattern: `[a, b, ref i @ .., y, z]`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct PatSlice { - pub attrs: Vec<Attribute>, - pub bracket_token: token::Bracket, - pub elems: Punctuated<Pat, Token![,]>, - } -} - -ast_struct! { - /// A struct or struct variant pattern: `Variant { x, y, .. }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct PatStruct { - pub attrs: Vec<Attribute>, - pub qself: Option<QSelf>, - pub path: Path, - pub brace_token: token::Brace, - pub fields: Punctuated<FieldPat, Token![,]>, - pub rest: Option<PatRest>, - } -} - -ast_struct! { - /// A tuple pattern: `(a, b)`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct PatTuple { - pub attrs: Vec<Attribute>, - pub paren_token: token::Paren, - pub elems: Punctuated<Pat, Token![,]>, - } -} - -ast_struct! { - /// A tuple struct or tuple variant pattern: `Variant(x, y, .., z)`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct PatTupleStruct { - pub attrs: Vec<Attribute>, - pub qself: Option<QSelf>, - pub path: Path, - pub paren_token: token::Paren, - pub elems: Punctuated<Pat, Token![,]>, - } -} - -ast_struct! { - /// A type ascription pattern: `foo: f64`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct PatType { - pub attrs: Vec<Attribute>, - pub pat: Box<Pat>, - pub colon_token: Token![:], - pub ty: Box<Type>, - } -} - -ast_struct! { - /// A pattern that matches any value: `_`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct PatWild { - pub attrs: Vec<Attribute>, - pub underscore_token: Token![_], - } -} - -ast_struct! { - /// A single field in a struct pattern. - /// - /// Patterns like the fields of Foo `{ x, ref y, ref mut z }` are treated - /// the same as `x: x, y: ref y, z: ref mut z` but there is no colon token. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct FieldPat { - pub attrs: Vec<Attribute>, - pub member: Member, - pub colon_token: Option<Token![:]>, - pub pat: Box<Pat>, - } -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::attr::Attribute; - use crate::error::{self, Result}; - use crate::expr::{ - Expr, ExprConst, ExprLit, ExprMacro, ExprPath, ExprRange, Member, RangeLimits, - }; - use crate::ext::IdentExt as _; - use crate::ident::Ident; - use crate::lit::Lit; - use crate::mac::{self, Macro}; - use crate::parse::{Parse, ParseBuffer, ParseStream}; - use crate::pat::{ - FieldPat, Pat, PatIdent, PatOr, PatParen, PatReference, PatRest, PatSlice, PatStruct, - PatTuple, PatTupleStruct, PatType, PatWild, - }; - use crate::path::{self, Path, QSelf}; - use crate::punctuated::Punctuated; - use crate::stmt::Block; - use crate::token; - use crate::verbatim; - use proc_macro2::TokenStream; - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Pat { - /// Parse a pattern that does _not_ involve `|` at the top level. - /// - /// This parser matches the behavior of the `$:pat_param` macro_rules - /// matcher, and on editions prior to Rust 2021, the behavior of - /// `$:pat`. - /// - /// In Rust syntax, some examples of where this syntax would occur are - /// in the argument pattern of functions and closures. Patterns using - /// `|` are not allowed to occur in these positions. - /// - /// ```compile_fail - /// fn f(Some(_) | None: Option<T>) { - /// let _ = |Some(_) | None: Option<T>| {}; - /// // ^^^^^^^^^^^^^^^^^^^^^^^^^??? :( - /// } - /// ``` - /// - /// ```console - /// error: top-level or-patterns are not allowed in function parameters - /// --> src/main.rs:1:6 - /// | - /// 1 | fn f(Some(_) | None: Option<T>) { - /// | ^^^^^^^^^^^^^^ help: wrap the pattern in parentheses: `(Some(_) | None)` - /// ``` - pub fn parse_single(input: ParseStream) -> Result<Self> { - let begin = input.fork(); - let lookahead = input.lookahead1(); - if lookahead.peek(Ident) - && (input.peek2(Token![::]) - || input.peek2(Token![!]) - || input.peek2(token::Brace) - || input.peek2(token::Paren) - || input.peek2(Token![..])) - || input.peek(Token![self]) && input.peek2(Token![::]) - || lookahead.peek(Token![::]) - || lookahead.peek(Token![<]) - || input.peek(Token![Self]) - || input.peek(Token![super]) - || input.peek(Token![crate]) - { - pat_path_or_macro_or_struct_or_range(input) - } else if lookahead.peek(Token![_]) { - input.call(pat_wild).map(Pat::Wild) - } else if input.peek(Token![box]) { - pat_box(begin, input) - } else if input.peek(Token![-]) || lookahead.peek(Lit) || lookahead.peek(Token![const]) - { - pat_lit_or_range(input) - } else if lookahead.peek(Token![ref]) - || lookahead.peek(Token![mut]) - || input.peek(Token![self]) - || input.peek(Ident) - { - input.call(pat_ident).map(Pat::Ident) - } else if lookahead.peek(Token![&]) { - input.call(pat_reference).map(Pat::Reference) - } else if lookahead.peek(token::Paren) { - input.call(pat_paren_or_tuple) - } else if lookahead.peek(token::Bracket) { - input.call(pat_slice).map(Pat::Slice) - } else if lookahead.peek(Token![..]) && !input.peek(Token![...]) { - pat_range_half_open(input) - } else if lookahead.peek(Token![const]) { - input.call(pat_const).map(Pat::Verbatim) - } else { - Err(lookahead.error()) - } - } - - /// Parse a pattern, possibly involving `|`, but not a leading `|`. - pub fn parse_multi(input: ParseStream) -> Result<Self> { - multi_pat_impl(input, None) - } - - /// Parse a pattern, possibly involving `|`, possibly including a - /// leading `|`. - /// - /// This parser matches the behavior of the Rust 2021 edition's `$:pat` - /// macro_rules matcher. - /// - /// In Rust syntax, an example of where this syntax would occur is in - /// the pattern of a `match` arm, where the language permits an optional - /// leading `|`, although it is not idiomatic to write one there in - /// handwritten code. - /// - /// ``` - /// # let wat = None; - /// match wat { - /// | None | Some(false) => {} - /// | Some(true) => {} - /// } - /// ``` - /// - /// The compiler accepts it only to facilitate some situations in - /// macro-generated code where a macro author might need to write: - /// - /// ``` - /// # macro_rules! doc { - /// # ($value:expr, ($($conditions1:pat),*), ($($conditions2:pat),*), $then:expr) => { - /// match $value { - /// $(| $conditions1)* $(| $conditions2)* => $then - /// } - /// # }; - /// # } - /// # - /// # doc!(true, (true), (false), {}); - /// # doc!(true, (), (true, false), {}); - /// # doc!(true, (true, false), (), {}); - /// ``` - /// - /// Expressing the same thing correctly in the case that either one (but - /// not both) of `$conditions1` and `$conditions2` might be empty, - /// without leading `|`, is complex. - /// - /// Use [`Pat::parse_multi`] instead if you are not intending to support - /// macro-generated macro input. - pub fn parse_multi_with_leading_vert(input: ParseStream) -> Result<Self> { - let leading_vert: Option<Token![|]> = input.parse()?; - multi_pat_impl(input, leading_vert) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for PatType { - fn parse(input: ParseStream) -> Result<Self> { - Ok(PatType { - attrs: Vec::new(), - pat: Box::new(Pat::parse_single(input)?), - colon_token: input.parse()?, - ty: input.parse()?, - }) - } - } - - fn multi_pat_impl(input: ParseStream, leading_vert: Option<Token![|]>) -> Result<Pat> { - let mut pat = Pat::parse_single(input)?; - if leading_vert.is_some() - || input.peek(Token![|]) && !input.peek(Token![||]) && !input.peek(Token![|=]) - { - let mut cases = Punctuated::new(); - cases.push_value(pat); - while input.peek(Token![|]) && !input.peek(Token![||]) && !input.peek(Token![|=]) { - let punct = input.parse()?; - cases.push_punct(punct); - let pat = Pat::parse_single(input)?; - cases.push_value(pat); - } - pat = Pat::Or(PatOr { - attrs: Vec::new(), - leading_vert, - cases, - }); - } - Ok(pat) - } - - fn pat_path_or_macro_or_struct_or_range(input: ParseStream) -> Result<Pat> { - let expr_style = true; - let (qself, path) = path::parsing::qpath(input, expr_style)?; - - if qself.is_none() - && input.peek(Token![!]) - && !input.peek(Token![!=]) - && path.is_mod_style() - { - let bang_token: Token![!] = input.parse()?; - let (delimiter, tokens) = mac::parse_delimiter(input)?; - return Ok(Pat::Macro(ExprMacro { - attrs: Vec::new(), - mac: Macro { - path, - bang_token, - delimiter, - tokens, - }, - })); - } - - if input.peek(token::Brace) { - pat_struct(input, qself, path).map(Pat::Struct) - } else if input.peek(token::Paren) { - pat_tuple_struct(input, qself, path).map(Pat::TupleStruct) - } else if input.peek(Token![..]) { - pat_range(input, qself, path) - } else { - Ok(Pat::Path(ExprPath { - attrs: Vec::new(), - qself, - path, - })) - } - } - - fn pat_wild(input: ParseStream) -> Result<PatWild> { - Ok(PatWild { - attrs: Vec::new(), - underscore_token: input.parse()?, - }) - } - - fn pat_box(begin: ParseBuffer, input: ParseStream) -> Result<Pat> { - input.parse::<Token![box]>()?; - Pat::parse_single(input)?; - Ok(Pat::Verbatim(verbatim::between(&begin, input))) - } - - fn pat_ident(input: ParseStream) -> Result<PatIdent> { - Ok(PatIdent { - attrs: Vec::new(), - by_ref: input.parse()?, - mutability: input.parse()?, - ident: { - if input.peek(Token![self]) { - input.call(Ident::parse_any)? - } else { - input.parse()? - } - }, - subpat: { - if input.peek(Token![@]) { - let at_token: Token![@] = input.parse()?; - let subpat = Pat::parse_single(input)?; - Some((at_token, Box::new(subpat))) - } else { - None - } - }, - }) - } - - fn pat_tuple_struct( - input: ParseStream, - qself: Option<QSelf>, - path: Path, - ) -> Result<PatTupleStruct> { - let content; - let paren_token = parenthesized!(content in input); - - let mut elems = Punctuated::new(); - while !content.is_empty() { - let value = Pat::parse_multi_with_leading_vert(&content)?; - elems.push_value(value); - if content.is_empty() { - break; - } - let punct = content.parse()?; - elems.push_punct(punct); - } - - Ok(PatTupleStruct { - attrs: Vec::new(), - qself, - path, - paren_token, - elems, - }) - } - - fn pat_struct(input: ParseStream, qself: Option<QSelf>, path: Path) -> Result<PatStruct> { - let content; - let brace_token = braced!(content in input); - - let mut fields = Punctuated::new(); - let mut rest = None; - while !content.is_empty() { - let attrs = content.call(Attribute::parse_outer)?; - if content.peek(Token![..]) { - rest = Some(PatRest { - attrs, - dot2_token: content.parse()?, - }); - break; - } - let mut value = content.call(field_pat)?; - value.attrs = attrs; - fields.push_value(value); - if content.is_empty() { - break; - } - let punct: Token![,] = content.parse()?; - fields.push_punct(punct); - } - - Ok(PatStruct { - attrs: Vec::new(), - qself, - path, - brace_token, - fields, - rest, - }) - } - - fn field_pat(input: ParseStream) -> Result<FieldPat> { - let begin = input.fork(); - let boxed: Option<Token![box]> = input.parse()?; - let by_ref: Option<Token![ref]> = input.parse()?; - let mutability: Option<Token![mut]> = input.parse()?; - - let member = if boxed.is_some() || by_ref.is_some() || mutability.is_some() { - input.parse().map(Member::Named) - } else { - input.parse() - }?; - - if boxed.is_none() && by_ref.is_none() && mutability.is_none() && input.peek(Token![:]) - || !member.is_named() - { - return Ok(FieldPat { - attrs: Vec::new(), - member, - colon_token: Some(input.parse()?), - pat: Box::new(Pat::parse_multi_with_leading_vert(input)?), - }); - } - - let ident = match member { - Member::Named(ident) => ident, - Member::Unnamed(_) => unreachable!(), - }; - - let pat = if boxed.is_some() { - Pat::Verbatim(verbatim::between(&begin, input)) - } else { - Pat::Ident(PatIdent { - attrs: Vec::new(), - by_ref, - mutability, - ident: ident.clone(), - subpat: None, - }) - }; - - Ok(FieldPat { - attrs: Vec::new(), - member: Member::Named(ident), - colon_token: None, - pat: Box::new(pat), - }) - } - - fn pat_range(input: ParseStream, qself: Option<QSelf>, path: Path) -> Result<Pat> { - let limits = RangeLimits::parse_obsolete(input)?; - let end = input.call(pat_range_bound)?; - if let (RangeLimits::Closed(_), None) = (&limits, &end) { - return Err(input.error("expected range upper bound")); - } - Ok(Pat::Range(ExprRange { - attrs: Vec::new(), - start: Some(Box::new(Expr::Path(ExprPath { - attrs: Vec::new(), - qself, - path, - }))), - limits, - end: end.map(PatRangeBound::into_expr), - })) - } - - fn pat_range_half_open(input: ParseStream) -> Result<Pat> { - let limits: RangeLimits = input.parse()?; - let end = input.call(pat_range_bound)?; - if end.is_some() { - Ok(Pat::Range(ExprRange { - attrs: Vec::new(), - start: None, - limits, - end: end.map(PatRangeBound::into_expr), - })) - } else { - match limits { - RangeLimits::HalfOpen(dot2_token) => Ok(Pat::Rest(PatRest { - attrs: Vec::new(), - dot2_token, - })), - RangeLimits::Closed(_) => Err(input.error("expected range upper bound")), - } - } - } - - fn pat_paren_or_tuple(input: ParseStream) -> Result<Pat> { - let content; - let paren_token = parenthesized!(content in input); - - let mut elems = Punctuated::new(); - while !content.is_empty() { - let value = Pat::parse_multi_with_leading_vert(&content)?; - if content.is_empty() { - if elems.is_empty() && !matches!(value, Pat::Rest(_)) { - return Ok(Pat::Paren(PatParen { - attrs: Vec::new(), - paren_token, - pat: Box::new(value), - })); - } - elems.push_value(value); - break; - } - elems.push_value(value); - let punct = content.parse()?; - elems.push_punct(punct); - } - - Ok(Pat::Tuple(PatTuple { - attrs: Vec::new(), - paren_token, - elems, - })) - } - - fn pat_reference(input: ParseStream) -> Result<PatReference> { - Ok(PatReference { - attrs: Vec::new(), - and_token: input.parse()?, - mutability: input.parse()?, - pat: Box::new(Pat::parse_single(input)?), - }) - } - - fn pat_lit_or_range(input: ParseStream) -> Result<Pat> { - let start = input.call(pat_range_bound)?.unwrap(); - if input.peek(Token![..]) { - let limits = RangeLimits::parse_obsolete(input)?; - let end = input.call(pat_range_bound)?; - if let (RangeLimits::Closed(_), None) = (&limits, &end) { - return Err(input.error("expected range upper bound")); - } - Ok(Pat::Range(ExprRange { - attrs: Vec::new(), - start: Some(start.into_expr()), - limits, - end: end.map(PatRangeBound::into_expr), - })) - } else { - Ok(start.into_pat()) - } - } - - // Patterns that can appear on either side of a range pattern. - enum PatRangeBound { - Const(ExprConst), - Lit(ExprLit), - Path(ExprPath), - } - - impl PatRangeBound { - fn into_expr(self) -> Box<Expr> { - Box::new(match self { - PatRangeBound::Const(pat) => Expr::Const(pat), - PatRangeBound::Lit(pat) => Expr::Lit(pat), - PatRangeBound::Path(pat) => Expr::Path(pat), - }) - } - - fn into_pat(self) -> Pat { - match self { - PatRangeBound::Const(pat) => Pat::Const(pat), - PatRangeBound::Lit(pat) => Pat::Lit(pat), - PatRangeBound::Path(pat) => Pat::Path(pat), - } - } - } - - fn pat_range_bound(input: ParseStream) -> Result<Option<PatRangeBound>> { - if input.is_empty() - || input.peek(Token![|]) - || input.peek(Token![=]) - || input.peek(Token![:]) && !input.peek(Token![::]) - || input.peek(Token![,]) - || input.peek(Token![;]) - || input.peek(Token![if]) - { - return Ok(None); - } - - let lookahead = input.lookahead1(); - let expr = if lookahead.peek(Lit) { - PatRangeBound::Lit(input.parse()?) - } else if lookahead.peek(Ident) - || lookahead.peek(Token![::]) - || lookahead.peek(Token![<]) - || lookahead.peek(Token![self]) - || lookahead.peek(Token![Self]) - || lookahead.peek(Token![super]) - || lookahead.peek(Token![crate]) - { - PatRangeBound::Path(input.parse()?) - } else if lookahead.peek(Token![const]) { - PatRangeBound::Const(input.parse()?) - } else { - return Err(lookahead.error()); - }; - - Ok(Some(expr)) - } - - fn pat_slice(input: ParseStream) -> Result<PatSlice> { - let content; - let bracket_token = bracketed!(content in input); - - let mut elems = Punctuated::new(); - while !content.is_empty() { - let value = Pat::parse_multi_with_leading_vert(&content)?; - match value { - Pat::Range(pat) if pat.start.is_none() || pat.end.is_none() => { - let (start, end) = match pat.limits { - RangeLimits::HalfOpen(dot_dot) => (dot_dot.spans[0], dot_dot.spans[1]), - RangeLimits::Closed(dot_dot_eq) => { - (dot_dot_eq.spans[0], dot_dot_eq.spans[2]) - } - }; - let msg = "range pattern is not allowed unparenthesized inside slice pattern"; - return Err(error::new2(start, end, msg)); - } - _ => {} - } - elems.push_value(value); - if content.is_empty() { - break; - } - let punct = content.parse()?; - elems.push_punct(punct); - } - - Ok(PatSlice { - attrs: Vec::new(), - bracket_token, - elems, - }) - } - - fn pat_const(input: ParseStream) -> Result<TokenStream> { - let begin = input.fork(); - input.parse::<Token![const]>()?; - - let content; - braced!(content in input); - content.call(Attribute::parse_inner)?; - content.call(Block::parse_within)?; - - Ok(verbatim::between(&begin, input)) - } -} - -#[cfg(feature = "printing")] -mod printing { - use crate::attr::FilterAttrs; - use crate::pat::{ - FieldPat, Pat, PatIdent, PatOr, PatParen, PatReference, PatRest, PatSlice, PatStruct, - PatTuple, PatTupleStruct, PatType, PatWild, - }; - use crate::path; - use crate::path::printing::PathStyle; - use proc_macro2::TokenStream; - use quote::{ToTokens, TokenStreamExt as _}; - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PatIdent { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.by_ref.to_tokens(tokens); - self.mutability.to_tokens(tokens); - self.ident.to_tokens(tokens); - if let Some((at_token, subpat)) = &self.subpat { - at_token.to_tokens(tokens); - subpat.to_tokens(tokens); - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PatOr { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.leading_vert.to_tokens(tokens); - self.cases.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PatParen { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.paren_token.surround(tokens, |tokens| { - self.pat.to_tokens(tokens); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PatReference { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.and_token.to_tokens(tokens); - self.mutability.to_tokens(tokens); - self.pat.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PatRest { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.dot2_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PatSlice { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.bracket_token.surround(tokens, |tokens| { - self.elems.to_tokens(tokens); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PatStruct { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - path::printing::print_qpath(tokens, &self.qself, &self.path, PathStyle::Expr); - self.brace_token.surround(tokens, |tokens| { - self.fields.to_tokens(tokens); - // NOTE: We need a comma before the dot2 token if it is present. - if !self.fields.empty_or_trailing() && self.rest.is_some() { - <Token![,]>::default().to_tokens(tokens); - } - self.rest.to_tokens(tokens); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PatTuple { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.paren_token.surround(tokens, |tokens| { - self.elems.to_tokens(tokens); - // If there is only one element, a trailing comma is needed to - // distinguish PatTuple from PatParen, unless this is `(..)` - // which is a tuple pattern even without comma. - if self.elems.len() == 1 - && !self.elems.trailing_punct() - && !matches!(self.elems[0], Pat::Rest { .. }) - { - <Token![,]>::default().to_tokens(tokens); - } - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PatTupleStruct { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - path::printing::print_qpath(tokens, &self.qself, &self.path, PathStyle::Expr); - self.paren_token.surround(tokens, |tokens| { - self.elems.to_tokens(tokens); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PatType { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.pat.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PatWild { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.underscore_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for FieldPat { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - if let Some(colon_token) = &self.colon_token { - self.member.to_tokens(tokens); - colon_token.to_tokens(tokens); - } - self.pat.to_tokens(tokens); - } - } -} diff --git a/vendor/syn/src/path.rs b/vendor/syn/src/path.rs deleted file mode 100644 index d2fcb9bc5d28b4..00000000000000 --- a/vendor/syn/src/path.rs +++ /dev/null @@ -1,966 +0,0 @@ -#[cfg(feature = "parsing")] -use crate::error::Result; -use crate::expr::Expr; -use crate::generics::TypeParamBound; -use crate::ident::Ident; -use crate::lifetime::Lifetime; -use crate::punctuated::Punctuated; -use crate::token; -use crate::ty::{ReturnType, Type}; - -ast_struct! { - /// A path at which a named item is exported (e.g. `std::collections::HashMap`). - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct Path { - pub leading_colon: Option<Token![::]>, - pub segments: Punctuated<PathSegment, Token![::]>, - } -} - -impl<T> From<T> for Path -where - T: Into<PathSegment>, -{ - fn from(segment: T) -> Self { - let mut path = Path { - leading_colon: None, - segments: Punctuated::new(), - }; - path.segments.push_value(segment.into()); - path - } -} - -impl Path { - /// Determines whether this is a path of length 1 equal to the given - /// ident. - /// - /// For them to compare equal, it must be the case that: - /// - /// - the path has no leading colon, - /// - the number of path segments is 1, - /// - the first path segment has no angle bracketed or parenthesized - /// path arguments, and - /// - the ident of the first path segment is equal to the given one. - /// - /// # Example - /// - /// ``` - /// use proc_macro2::TokenStream; - /// use syn::{Attribute, Error, Meta, Result}; - /// - /// fn get_serde_meta_item(attr: &Attribute) -> Result<Option<&TokenStream>> { - /// if attr.path().is_ident("serde") { - /// match &attr.meta { - /// Meta::List(meta) => Ok(Some(&meta.tokens)), - /// bad => Err(Error::new_spanned(bad, "unrecognized attribute")), - /// } - /// } else { - /// Ok(None) - /// } - /// } - /// ``` - pub fn is_ident<I>(&self, ident: &I) -> bool - where - I: ?Sized, - Ident: PartialEq<I>, - { - match self.get_ident() { - Some(id) => id == ident, - None => false, - } - } - - /// If this path consists of a single ident, returns the ident. - /// - /// A path is considered an ident if: - /// - /// - the path has no leading colon, - /// - the number of path segments is 1, and - /// - the first path segment has no angle bracketed or parenthesized - /// path arguments. - pub fn get_ident(&self) -> Option<&Ident> { - if self.leading_colon.is_none() - && self.segments.len() == 1 - && self.segments[0].arguments.is_none() - { - Some(&self.segments[0].ident) - } else { - None - } - } - - /// An error if this path is not a single ident, as defined in `get_ident`. - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn require_ident(&self) -> Result<&Ident> { - self.get_ident().ok_or_else(|| { - crate::error::new2( - self.segments.first().unwrap().ident.span(), - self.segments.last().unwrap().ident.span(), - "expected this path to be an identifier", - ) - }) - } -} - -ast_struct! { - /// A segment of a path together with any path arguments on that segment. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct PathSegment { - pub ident: Ident, - pub arguments: PathArguments, - } -} - -impl<T> From<T> for PathSegment -where - T: Into<Ident>, -{ - fn from(ident: T) -> Self { - PathSegment { - ident: ident.into(), - arguments: PathArguments::None, - } - } -} - -ast_enum! { - /// Angle bracketed or parenthesized arguments of a path segment. - /// - /// ## Angle bracketed - /// - /// The `<'a, T>` in `std::slice::iter<'a, T>`. - /// - /// ## Parenthesized - /// - /// The `(A, B) -> C` in `Fn(A, B) -> C`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub enum PathArguments { - None, - /// The `<'a, T>` in `std::slice::iter<'a, T>`. - AngleBracketed(AngleBracketedGenericArguments), - /// The `(A, B) -> C` in `Fn(A, B) -> C`. - Parenthesized(ParenthesizedGenericArguments), - } -} - -impl Default for PathArguments { - fn default() -> Self { - PathArguments::None - } -} - -impl PathArguments { - pub fn is_empty(&self) -> bool { - match self { - PathArguments::None => true, - PathArguments::AngleBracketed(bracketed) => bracketed.args.is_empty(), - PathArguments::Parenthesized(_) => false, - } - } - - pub fn is_none(&self) -> bool { - match self { - PathArguments::None => true, - PathArguments::AngleBracketed(_) | PathArguments::Parenthesized(_) => false, - } - } -} - -ast_enum! { - /// An individual generic argument, like `'a`, `T`, or `Item = T`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - #[non_exhaustive] - pub enum GenericArgument { - /// A lifetime argument. - Lifetime(Lifetime), - /// A type argument. - Type(Type), - /// A const expression. Must be inside of a block. - /// - /// NOTE: Identity expressions are represented as Type arguments, as - /// they are indistinguishable syntactically. - Const(Expr), - /// A binding (equality constraint) on an associated type: the `Item = - /// u8` in `Iterator<Item = u8>`. - AssocType(AssocType), - /// An equality constraint on an associated constant: the `PANIC = - /// false` in `Trait<PANIC = false>`. - AssocConst(AssocConst), - /// An associated type bound: `Iterator<Item: Display>`. - Constraint(Constraint), - } -} - -ast_struct! { - /// Angle bracketed arguments of a path segment: the `<K, V>` in `HashMap<K, - /// V>`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct AngleBracketedGenericArguments { - pub colon2_token: Option<Token![::]>, - pub lt_token: Token![<], - pub args: Punctuated<GenericArgument, Token![,]>, - pub gt_token: Token![>], - } -} - -ast_struct! { - /// A binding (equality constraint) on an associated type: the `Item = u8` - /// in `Iterator<Item = u8>`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct AssocType { - pub ident: Ident, - pub generics: Option<AngleBracketedGenericArguments>, - pub eq_token: Token![=], - pub ty: Type, - } -} - -ast_struct! { - /// An equality constraint on an associated constant: the `PANIC = false` in - /// `Trait<PANIC = false>`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct AssocConst { - pub ident: Ident, - pub generics: Option<AngleBracketedGenericArguments>, - pub eq_token: Token![=], - pub value: Expr, - } -} - -ast_struct! { - /// An associated type bound: `Iterator<Item: Display>`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct Constraint { - pub ident: Ident, - pub generics: Option<AngleBracketedGenericArguments>, - pub colon_token: Token![:], - pub bounds: Punctuated<TypeParamBound, Token![+]>, - } -} - -ast_struct! { - /// Arguments of a function path segment: the `(A, B) -> C` in `Fn(A,B) -> - /// C`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct ParenthesizedGenericArguments { - pub paren_token: token::Paren, - /// `(A, B)` - pub inputs: Punctuated<Type, Token![,]>, - /// `C` - pub output: ReturnType, - } -} - -ast_struct! { - /// The explicit Self type in a qualified path: the `T` in `<T as - /// Display>::fmt`. - /// - /// The actual path, including the trait and the associated item, is stored - /// separately. The `position` field represents the index of the associated - /// item qualified with this Self type. - /// - /// ```text - /// <Vec<T> as a::b::Trait>::AssociatedItem - /// ^~~~~~ ~~~~~~~~~~~~~~^ - /// ty position = 3 - /// - /// <Vec<T>>::AssociatedItem - /// ^~~~~~ ^ - /// ty position = 0 - /// ``` - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct QSelf { - pub lt_token: Token![<], - pub ty: Box<Type>, - pub position: usize, - pub as_token: Option<Token![as]>, - pub gt_token: Token![>], - } -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::error::Result; - #[cfg(feature = "full")] - use crate::expr::ExprBlock; - use crate::expr::{Expr, ExprPath}; - use crate::ext::IdentExt as _; - #[cfg(feature = "full")] - use crate::generics::TypeParamBound; - use crate::ident::Ident; - use crate::lifetime::Lifetime; - use crate::lit::Lit; - use crate::parse::{Parse, ParseStream}; - #[cfg(feature = "full")] - use crate::path::Constraint; - use crate::path::{ - AngleBracketedGenericArguments, AssocConst, AssocType, GenericArgument, - ParenthesizedGenericArguments, Path, PathArguments, PathSegment, QSelf, - }; - use crate::punctuated::Punctuated; - use crate::token; - use crate::ty::{ReturnType, Type}; - #[cfg(not(feature = "full"))] - use crate::verbatim; - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Path { - fn parse(input: ParseStream) -> Result<Self> { - Self::parse_helper(input, false) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for GenericArgument { - fn parse(input: ParseStream) -> Result<Self> { - if input.peek(Lifetime) && !input.peek2(Token![+]) { - return Ok(GenericArgument::Lifetime(input.parse()?)); - } - - if input.peek(Lit) || input.peek(token::Brace) { - return const_argument(input).map(GenericArgument::Const); - } - - let mut argument: Type = input.parse()?; - - match argument { - Type::Path(mut ty) - if ty.qself.is_none() - && ty.path.leading_colon.is_none() - && ty.path.segments.len() == 1 - && match &ty.path.segments[0].arguments { - PathArguments::None | PathArguments::AngleBracketed(_) => true, - PathArguments::Parenthesized(_) => false, - } => - { - if let Some(eq_token) = input.parse::<Option<Token![=]>>()? { - let segment = ty.path.segments.pop().unwrap().into_value(); - let ident = segment.ident; - let generics = match segment.arguments { - PathArguments::None => None, - PathArguments::AngleBracketed(arguments) => Some(arguments), - PathArguments::Parenthesized(_) => unreachable!(), - }; - return if input.peek(Lit) || input.peek(token::Brace) { - Ok(GenericArgument::AssocConst(AssocConst { - ident, - generics, - eq_token, - value: const_argument(input)?, - })) - } else { - Ok(GenericArgument::AssocType(AssocType { - ident, - generics, - eq_token, - ty: input.parse()?, - })) - }; - } - - #[cfg(feature = "full")] - if let Some(colon_token) = input.parse::<Option<Token![:]>>()? { - let segment = ty.path.segments.pop().unwrap().into_value(); - return Ok(GenericArgument::Constraint(Constraint { - ident: segment.ident, - generics: match segment.arguments { - PathArguments::None => None, - PathArguments::AngleBracketed(arguments) => Some(arguments), - PathArguments::Parenthesized(_) => unreachable!(), - }, - colon_token, - bounds: { - let mut bounds = Punctuated::new(); - loop { - if input.peek(Token![,]) || input.peek(Token![>]) { - break; - } - bounds.push_value({ - let allow_precise_capture = false; - let allow_const = true; - TypeParamBound::parse_single( - input, - allow_precise_capture, - allow_const, - )? - }); - if !input.peek(Token![+]) { - break; - } - let punct: Token![+] = input.parse()?; - bounds.push_punct(punct); - } - bounds - }, - })); - } - - argument = Type::Path(ty); - } - _ => {} - } - - Ok(GenericArgument::Type(argument)) - } - } - - pub(crate) fn const_argument(input: ParseStream) -> Result<Expr> { - let lookahead = input.lookahead1(); - - if input.peek(Lit) { - let lit = input.parse()?; - return Ok(Expr::Lit(lit)); - } - - if input.peek(Ident) { - let ident: Ident = input.parse()?; - return Ok(Expr::Path(ExprPath { - attrs: Vec::new(), - qself: None, - path: Path::from(ident), - })); - } - - if input.peek(token::Brace) { - #[cfg(feature = "full")] - { - let block: ExprBlock = input.parse()?; - return Ok(Expr::Block(block)); - } - - #[cfg(not(feature = "full"))] - { - let begin = input.fork(); - let content; - braced!(content in input); - content.parse::<Expr>()?; - let verbatim = verbatim::between(&begin, input); - return Ok(Expr::Verbatim(verbatim)); - } - } - - Err(lookahead.error()) - } - - impl AngleBracketedGenericArguments { - /// Parse `::<…>` with mandatory leading `::`. - /// - /// The ordinary [`Parse`] impl for `AngleBracketedGenericArguments` - /// parses optional leading `::`. - #[cfg(feature = "full")] - #[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "full"))))] - pub fn parse_turbofish(input: ParseStream) -> Result<Self> { - let colon2_token: Token![::] = input.parse()?; - Self::do_parse(Some(colon2_token), input) - } - - pub(crate) fn do_parse( - colon2_token: Option<Token![::]>, - input: ParseStream, - ) -> Result<Self> { - Ok(AngleBracketedGenericArguments { - colon2_token, - lt_token: input.parse()?, - args: { - let mut args = Punctuated::new(); - loop { - if input.peek(Token![>]) { - break; - } - let value: GenericArgument = input.parse()?; - args.push_value(value); - if input.peek(Token![>]) { - break; - } - let punct: Token![,] = input.parse()?; - args.push_punct(punct); - } - args - }, - gt_token: input.parse()?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for AngleBracketedGenericArguments { - fn parse(input: ParseStream) -> Result<Self> { - let colon2_token: Option<Token![::]> = input.parse()?; - Self::do_parse(colon2_token, input) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ParenthesizedGenericArguments { - fn parse(input: ParseStream) -> Result<Self> { - let content; - Ok(ParenthesizedGenericArguments { - paren_token: parenthesized!(content in input), - inputs: content.parse_terminated(Type::parse, Token![,])?, - output: input.call(ReturnType::without_plus)?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for PathSegment { - fn parse(input: ParseStream) -> Result<Self> { - Self::parse_helper(input, false) - } - } - - impl PathSegment { - fn parse_helper(input: ParseStream, expr_style: bool) -> Result<Self> { - if input.peek(Token![super]) - || input.peek(Token![self]) - || input.peek(Token![crate]) - || cfg!(feature = "full") && input.peek(Token![try]) - { - let ident = input.call(Ident::parse_any)?; - return Ok(PathSegment::from(ident)); - } - - let ident = if input.peek(Token![Self]) { - input.call(Ident::parse_any)? - } else { - input.parse()? - }; - - if !expr_style - && input.peek(Token![<]) - && !input.peek(Token![<=]) - && !input.peek(Token![<<=]) - || input.peek(Token![::]) && input.peek3(Token![<]) - { - Ok(PathSegment { - ident, - arguments: PathArguments::AngleBracketed(input.parse()?), - }) - } else { - Ok(PathSegment::from(ident)) - } - } - } - - impl Path { - /// Parse a `Path` containing no path arguments on any of its segments. - /// - /// # Example - /// - /// ``` - /// use syn::{Path, Result, Token}; - /// use syn::parse::{Parse, ParseStream}; - /// - /// // A simplified single `use` statement like: - /// // - /// // use std::collections::HashMap; - /// // - /// // Note that generic parameters are not allowed in a `use` statement - /// // so the following must not be accepted. - /// // - /// // use a::<b>::c; - /// struct SingleUse { - /// use_token: Token![use], - /// path: Path, - /// } - /// - /// impl Parse for SingleUse { - /// fn parse(input: ParseStream) -> Result<Self> { - /// Ok(SingleUse { - /// use_token: input.parse()?, - /// path: input.call(Path::parse_mod_style)?, - /// }) - /// } - /// } - /// ``` - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_mod_style(input: ParseStream) -> Result<Self> { - Ok(Path { - leading_colon: input.parse()?, - segments: { - let mut segments = Punctuated::new(); - loop { - if !input.peek(Ident) - && !input.peek(Token![super]) - && !input.peek(Token![self]) - && !input.peek(Token![Self]) - && !input.peek(Token![crate]) - { - break; - } - let ident = Ident::parse_any(input)?; - segments.push_value(PathSegment::from(ident)); - if !input.peek(Token![::]) { - break; - } - let punct = input.parse()?; - segments.push_punct(punct); - } - if segments.is_empty() { - return Err(input.parse::<Ident>().unwrap_err()); - } else if segments.trailing_punct() { - return Err(input.error("expected path segment after `::`")); - } - segments - }, - }) - } - - pub(crate) fn parse_helper(input: ParseStream, expr_style: bool) -> Result<Self> { - let mut path = Path { - leading_colon: input.parse()?, - segments: { - let mut segments = Punctuated::new(); - let value = PathSegment::parse_helper(input, expr_style)?; - segments.push_value(value); - segments - }, - }; - Path::parse_rest(input, &mut path, expr_style)?; - Ok(path) - } - - pub(crate) fn parse_rest( - input: ParseStream, - path: &mut Self, - expr_style: bool, - ) -> Result<()> { - while input.peek(Token![::]) && !input.peek3(token::Paren) { - let punct: Token![::] = input.parse()?; - path.segments.push_punct(punct); - let value = PathSegment::parse_helper(input, expr_style)?; - path.segments.push_value(value); - } - Ok(()) - } - - pub(crate) fn is_mod_style(&self) -> bool { - self.segments - .iter() - .all(|segment| segment.arguments.is_none()) - } - } - - pub(crate) fn qpath(input: ParseStream, expr_style: bool) -> Result<(Option<QSelf>, Path)> { - if input.peek(Token![<]) { - let lt_token: Token![<] = input.parse()?; - let this: Type = input.parse()?; - let path = if input.peek(Token![as]) { - let as_token: Token![as] = input.parse()?; - let path: Path = input.parse()?; - Some((as_token, path)) - } else { - None - }; - let gt_token: Token![>] = input.parse()?; - let colon2_token: Token![::] = input.parse()?; - let mut rest = Punctuated::new(); - loop { - let path = PathSegment::parse_helper(input, expr_style)?; - rest.push_value(path); - if !input.peek(Token![::]) { - break; - } - let punct: Token![::] = input.parse()?; - rest.push_punct(punct); - } - let (position, as_token, path) = match path { - Some((as_token, mut path)) => { - let pos = path.segments.len(); - path.segments.push_punct(colon2_token); - path.segments.extend(rest.into_pairs()); - (pos, Some(as_token), path) - } - None => { - let path = Path { - leading_colon: Some(colon2_token), - segments: rest, - }; - (0, None, path) - } - }; - let qself = QSelf { - lt_token, - ty: Box::new(this), - position, - as_token, - gt_token, - }; - Ok((Some(qself), path)) - } else { - let path = Path::parse_helper(input, expr_style)?; - Ok((None, path)) - } - } -} - -#[cfg(feature = "printing")] -pub(crate) mod printing { - use crate::generics; - use crate::path::{ - AngleBracketedGenericArguments, AssocConst, AssocType, Constraint, GenericArgument, - ParenthesizedGenericArguments, Path, PathArguments, PathSegment, QSelf, - }; - use crate::print::TokensOrDefault; - #[cfg(feature = "parsing")] - use crate::spanned::Spanned; - #[cfg(feature = "parsing")] - use proc_macro2::Span; - use proc_macro2::TokenStream; - use quote::ToTokens; - use std::cmp; - - pub(crate) enum PathStyle { - Expr, - Mod, - AsWritten, - } - - impl Copy for PathStyle {} - - impl Clone for PathStyle { - fn clone(&self) -> Self { - *self - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Path { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_path(tokens, self, PathStyle::AsWritten); - } - } - - pub(crate) fn print_path(tokens: &mut TokenStream, path: &Path, style: PathStyle) { - path.leading_colon.to_tokens(tokens); - for segment in path.segments.pairs() { - print_path_segment(tokens, segment.value(), style); - segment.punct().to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PathSegment { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_path_segment(tokens, self, PathStyle::AsWritten); - } - } - - fn print_path_segment(tokens: &mut TokenStream, segment: &PathSegment, style: PathStyle) { - segment.ident.to_tokens(tokens); - print_path_arguments(tokens, &segment.arguments, style); - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for PathArguments { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_path_arguments(tokens, self, PathStyle::AsWritten); - } - } - - fn print_path_arguments(tokens: &mut TokenStream, arguments: &PathArguments, style: PathStyle) { - match arguments { - PathArguments::None => {} - PathArguments::AngleBracketed(arguments) => { - print_angle_bracketed_generic_arguments(tokens, arguments, style); - } - PathArguments::Parenthesized(arguments) => { - print_parenthesized_generic_arguments(tokens, arguments, style); - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for GenericArgument { - #[allow(clippy::match_same_arms)] - fn to_tokens(&self, tokens: &mut TokenStream) { - match self { - GenericArgument::Lifetime(lt) => lt.to_tokens(tokens), - GenericArgument::Type(ty) => ty.to_tokens(tokens), - GenericArgument::Const(expr) => { - generics::printing::print_const_argument(expr, tokens); - } - GenericArgument::AssocType(assoc) => assoc.to_tokens(tokens), - GenericArgument::AssocConst(assoc) => assoc.to_tokens(tokens), - GenericArgument::Constraint(constraint) => constraint.to_tokens(tokens), - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for AngleBracketedGenericArguments { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_angle_bracketed_generic_arguments(tokens, self, PathStyle::AsWritten); - } - } - - pub(crate) fn print_angle_bracketed_generic_arguments( - tokens: &mut TokenStream, - arguments: &AngleBracketedGenericArguments, - style: PathStyle, - ) { - if let PathStyle::Mod = style { - return; - } - - conditionally_print_turbofish(tokens, &arguments.colon2_token, style); - arguments.lt_token.to_tokens(tokens); - - // Print lifetimes before types/consts/bindings, regardless of their - // order in args. - let mut trailing_or_empty = true; - for param in arguments.args.pairs() { - match param.value() { - GenericArgument::Lifetime(_) => { - param.to_tokens(tokens); - trailing_or_empty = param.punct().is_some(); - } - GenericArgument::Type(_) - | GenericArgument::Const(_) - | GenericArgument::AssocType(_) - | GenericArgument::AssocConst(_) - | GenericArgument::Constraint(_) => {} - } - } - for param in arguments.args.pairs() { - match param.value() { - GenericArgument::Type(_) - | GenericArgument::Const(_) - | GenericArgument::AssocType(_) - | GenericArgument::AssocConst(_) - | GenericArgument::Constraint(_) => { - if !trailing_or_empty { - <Token![,]>::default().to_tokens(tokens); - } - param.to_tokens(tokens); - trailing_or_empty = param.punct().is_some(); - } - GenericArgument::Lifetime(_) => {} - } - } - - arguments.gt_token.to_tokens(tokens); - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for AssocType { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for AssocConst { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - generics::printing::print_const_argument(&self.value, tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Constraint { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ParenthesizedGenericArguments { - fn to_tokens(&self, tokens: &mut TokenStream) { - print_parenthesized_generic_arguments(tokens, self, PathStyle::AsWritten); - } - } - - fn print_parenthesized_generic_arguments( - tokens: &mut TokenStream, - arguments: &ParenthesizedGenericArguments, - style: PathStyle, - ) { - if let PathStyle::Mod = style { - return; - } - - conditionally_print_turbofish(tokens, &None, style); - arguments.paren_token.surround(tokens, |tokens| { - arguments.inputs.to_tokens(tokens); - }); - arguments.output.to_tokens(tokens); - } - - pub(crate) fn print_qpath( - tokens: &mut TokenStream, - qself: &Option<QSelf>, - path: &Path, - style: PathStyle, - ) { - let qself = match qself { - Some(qself) => qself, - None => { - print_path(tokens, path, style); - return; - } - }; - qself.lt_token.to_tokens(tokens); - qself.ty.to_tokens(tokens); - - let pos = cmp::min(qself.position, path.segments.len()); - let mut segments = path.segments.pairs(); - if pos > 0 { - TokensOrDefault(&qself.as_token).to_tokens(tokens); - path.leading_colon.to_tokens(tokens); - for (i, segment) in segments.by_ref().take(pos).enumerate() { - print_path_segment(tokens, segment.value(), PathStyle::AsWritten); - if i + 1 == pos { - qself.gt_token.to_tokens(tokens); - } - segment.punct().to_tokens(tokens); - } - } else { - qself.gt_token.to_tokens(tokens); - path.leading_colon.to_tokens(tokens); - } - for segment in segments { - print_path_segment(tokens, segment.value(), style); - segment.punct().to_tokens(tokens); - } - } - - fn conditionally_print_turbofish( - tokens: &mut TokenStream, - colon2_token: &Option<Token![::]>, - style: PathStyle, - ) { - match style { - PathStyle::Expr => TokensOrDefault(colon2_token).to_tokens(tokens), - PathStyle::Mod => unreachable!(), - PathStyle::AsWritten => colon2_token.to_tokens(tokens), - } - } - - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(all(feature = "parsing", feature = "printing"))))] - impl Spanned for QSelf { - fn span(&self) -> Span { - struct QSelfDelimiters<'a>(&'a QSelf); - - impl<'a> ToTokens for QSelfDelimiters<'a> { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.0.lt_token.to_tokens(tokens); - self.0.gt_token.to_tokens(tokens); - } - } - - QSelfDelimiters(self).span() - } - } -} diff --git a/vendor/syn/src/precedence.rs b/vendor/syn/src/precedence.rs deleted file mode 100644 index 1891bfc202fda5..00000000000000 --- a/vendor/syn/src/precedence.rs +++ /dev/null @@ -1,210 +0,0 @@ -#[cfg(all(feature = "printing", feature = "full"))] -use crate::attr::{AttrStyle, Attribute}; -#[cfg(feature = "printing")] -use crate::expr::Expr; -#[cfg(all(feature = "printing", feature = "full"))] -use crate::expr::{ - ExprArray, ExprAsync, ExprAwait, ExprBlock, ExprBreak, ExprCall, ExprConst, ExprContinue, - ExprField, ExprForLoop, ExprGroup, ExprIf, ExprIndex, ExprInfer, ExprLit, ExprLoop, ExprMacro, - ExprMatch, ExprMethodCall, ExprParen, ExprPath, ExprRepeat, ExprReturn, ExprStruct, ExprTry, - ExprTryBlock, ExprTuple, ExprUnsafe, ExprWhile, ExprYield, -}; -use crate::op::BinOp; -#[cfg(all(feature = "printing", feature = "full"))] -use crate::ty::ReturnType; -use std::cmp::Ordering; - -// Reference: https://doc.rust-lang.org/reference/expressions.html#expression-precedence -pub(crate) enum Precedence { - // return, break, closures - Jump, - // = += -= *= /= %= &= |= ^= <<= >>= - Assign, - // .. ..= - Range, - // || - Or, - // && - And, - // let - #[cfg(feature = "printing")] - Let, - // == != < > <= >= - Compare, - // | - BitOr, - // ^ - BitXor, - // & - BitAnd, - // << >> - Shift, - // + - - Sum, - // * / % - Product, - // as - Cast, - // unary - * ! & &mut - #[cfg(feature = "printing")] - Prefix, - // paths, loops, function calls, array indexing, field expressions, method calls - #[cfg(feature = "printing")] - Unambiguous, -} - -impl Precedence { - pub(crate) const MIN: Self = Precedence::Jump; - - pub(crate) fn of_binop(op: &BinOp) -> Self { - match op { - BinOp::Add(_) | BinOp::Sub(_) => Precedence::Sum, - BinOp::Mul(_) | BinOp::Div(_) | BinOp::Rem(_) => Precedence::Product, - BinOp::And(_) => Precedence::And, - BinOp::Or(_) => Precedence::Or, - BinOp::BitXor(_) => Precedence::BitXor, - BinOp::BitAnd(_) => Precedence::BitAnd, - BinOp::BitOr(_) => Precedence::BitOr, - BinOp::Shl(_) | BinOp::Shr(_) => Precedence::Shift, - - BinOp::Eq(_) - | BinOp::Lt(_) - | BinOp::Le(_) - | BinOp::Ne(_) - | BinOp::Ge(_) - | BinOp::Gt(_) => Precedence::Compare, - - BinOp::AddAssign(_) - | BinOp::SubAssign(_) - | BinOp::MulAssign(_) - | BinOp::DivAssign(_) - | BinOp::RemAssign(_) - | BinOp::BitXorAssign(_) - | BinOp::BitAndAssign(_) - | BinOp::BitOrAssign(_) - | BinOp::ShlAssign(_) - | BinOp::ShrAssign(_) => Precedence::Assign, - } - } - - #[cfg(feature = "printing")] - pub(crate) fn of(e: &Expr) -> Self { - #[cfg(feature = "full")] - fn prefix_attrs(attrs: &[Attribute]) -> Precedence { - for attr in attrs { - if let AttrStyle::Outer = attr.style { - return Precedence::Prefix; - } - } - Precedence::Unambiguous - } - - match e { - #[cfg(feature = "full")] - Expr::Closure(e) => match e.output { - ReturnType::Default => Precedence::Jump, - ReturnType::Type(..) => prefix_attrs(&e.attrs), - }, - - #[cfg(feature = "full")] - Expr::Break(ExprBreak { expr, .. }) - | Expr::Return(ExprReturn { expr, .. }) - | Expr::Yield(ExprYield { expr, .. }) => match expr { - Some(_) => Precedence::Jump, - None => Precedence::Unambiguous, - }, - - Expr::Assign(_) => Precedence::Assign, - Expr::Range(_) => Precedence::Range, - Expr::Binary(e) => Precedence::of_binop(&e.op), - Expr::Let(_) => Precedence::Let, - Expr::Cast(_) => Precedence::Cast, - Expr::RawAddr(_) | Expr::Reference(_) | Expr::Unary(_) => Precedence::Prefix, - - #[cfg(feature = "full")] - Expr::Array(ExprArray { attrs, .. }) - | Expr::Async(ExprAsync { attrs, .. }) - | Expr::Await(ExprAwait { attrs, .. }) - | Expr::Block(ExprBlock { attrs, .. }) - | Expr::Call(ExprCall { attrs, .. }) - | Expr::Const(ExprConst { attrs, .. }) - | Expr::Continue(ExprContinue { attrs, .. }) - | Expr::Field(ExprField { attrs, .. }) - | Expr::ForLoop(ExprForLoop { attrs, .. }) - | Expr::Group(ExprGroup { attrs, .. }) - | Expr::If(ExprIf { attrs, .. }) - | Expr::Index(ExprIndex { attrs, .. }) - | Expr::Infer(ExprInfer { attrs, .. }) - | Expr::Lit(ExprLit { attrs, .. }) - | Expr::Loop(ExprLoop { attrs, .. }) - | Expr::Macro(ExprMacro { attrs, .. }) - | Expr::Match(ExprMatch { attrs, .. }) - | Expr::MethodCall(ExprMethodCall { attrs, .. }) - | Expr::Paren(ExprParen { attrs, .. }) - | Expr::Path(ExprPath { attrs, .. }) - | Expr::Repeat(ExprRepeat { attrs, .. }) - | Expr::Struct(ExprStruct { attrs, .. }) - | Expr::Try(ExprTry { attrs, .. }) - | Expr::TryBlock(ExprTryBlock { attrs, .. }) - | Expr::Tuple(ExprTuple { attrs, .. }) - | Expr::Unsafe(ExprUnsafe { attrs, .. }) - | Expr::While(ExprWhile { attrs, .. }) => prefix_attrs(attrs), - - #[cfg(not(feature = "full"))] - Expr::Array(_) - | Expr::Async(_) - | Expr::Await(_) - | Expr::Block(_) - | Expr::Call(_) - | Expr::Const(_) - | Expr::Continue(_) - | Expr::Field(_) - | Expr::ForLoop(_) - | Expr::Group(_) - | Expr::If(_) - | Expr::Index(_) - | Expr::Infer(_) - | Expr::Lit(_) - | Expr::Loop(_) - | Expr::Macro(_) - | Expr::Match(_) - | Expr::MethodCall(_) - | Expr::Paren(_) - | Expr::Path(_) - | Expr::Repeat(_) - | Expr::Struct(_) - | Expr::Try(_) - | Expr::TryBlock(_) - | Expr::Tuple(_) - | Expr::Unsafe(_) - | Expr::While(_) => Precedence::Unambiguous, - - Expr::Verbatim(_) => Precedence::Unambiguous, - - #[cfg(not(feature = "full"))] - Expr::Break(_) | Expr::Closure(_) | Expr::Return(_) | Expr::Yield(_) => unreachable!(), - } - } -} - -impl Copy for Precedence {} - -impl Clone for Precedence { - fn clone(&self) -> Self { - *self - } -} - -impl PartialEq for Precedence { - fn eq(&self, other: &Self) -> bool { - *self as u8 == *other as u8 - } -} - -impl PartialOrd for Precedence { - fn partial_cmp(&self, other: &Self) -> Option<Ordering> { - let this = *self as u8; - let other = *other as u8; - Some(this.cmp(&other)) - } -} diff --git a/vendor/syn/src/print.rs b/vendor/syn/src/print.rs deleted file mode 100644 index 07409932677bba..00000000000000 --- a/vendor/syn/src/print.rs +++ /dev/null @@ -1,16 +0,0 @@ -use proc_macro2::TokenStream; -use quote::ToTokens; - -pub(crate) struct TokensOrDefault<'a, T: 'a>(pub &'a Option<T>); - -impl<'a, T> ToTokens for TokensOrDefault<'a, T> -where - T: ToTokens + Default, -{ - fn to_tokens(&self, tokens: &mut TokenStream) { - match self.0 { - Some(t) => t.to_tokens(tokens), - None => T::default().to_tokens(tokens), - } - } -} diff --git a/vendor/syn/src/punctuated.rs b/vendor/syn/src/punctuated.rs deleted file mode 100644 index fdefc7d24bed93..00000000000000 --- a/vendor/syn/src/punctuated.rs +++ /dev/null @@ -1,1169 +0,0 @@ -//! A punctuated sequence of syntax tree nodes separated by punctuation. -//! -//! Lots of things in Rust are punctuated sequences. -//! -//! - The fields of a struct are `Punctuated<Field, Token![,]>`. -//! - The segments of a path are `Punctuated<PathSegment, Token![::]>`. -//! - The bounds on a generic parameter are `Punctuated<TypeParamBound, -//! Token![+]>`. -//! - The arguments to a function call are `Punctuated<Expr, Token![,]>`. -//! -//! This module provides a common representation for these punctuated sequences -//! in the form of the [`Punctuated<T, P>`] type. We store a vector of pairs of -//! syntax tree node + punctuation, where every node in the sequence is followed -//! by punctuation except for possibly the final one. -//! -//! [`Punctuated<T, P>`]: Punctuated -//! -//! ```text -//! a_function_call(arg1, arg2, arg3); -//! ~~~~^ ~~~~^ ~~~~ -//! ``` - -use crate::drops::{NoDrop, TrivialDrop}; -#[cfg(feature = "parsing")] -use crate::error::Result; -#[cfg(feature = "parsing")] -use crate::parse::{Parse, ParseStream}; -#[cfg(feature = "parsing")] -use crate::token::Token; -#[cfg(all(feature = "fold", any(feature = "full", feature = "derive")))] -use std::collections::VecDeque; -#[cfg(feature = "extra-traits")] -use std::fmt::{self, Debug}; -#[cfg(feature = "extra-traits")] -use std::hash::{Hash, Hasher}; -#[cfg(any(feature = "full", feature = "derive"))] -use std::iter; -use std::ops::{Index, IndexMut}; -use std::option; -use std::slice; -use std::vec; - -/// **A punctuated sequence of syntax tree nodes of type `T` separated by -/// punctuation of type `P`.** -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: self -pub struct Punctuated<T, P> { - inner: Vec<(T, P)>, - last: Option<Box<T>>, -} - -impl<T, P> Punctuated<T, P> { - /// Creates an empty punctuated sequence. - pub const fn new() -> Self { - Punctuated { - inner: Vec::new(), - last: None, - } - } - - /// Determines whether this punctuated sequence is empty, meaning it - /// contains no syntax tree nodes or punctuation. - pub fn is_empty(&self) -> bool { - self.inner.len() == 0 && self.last.is_none() - } - - /// Returns the number of syntax tree nodes in this punctuated sequence. - /// - /// This is the number of nodes of type `T`, not counting the punctuation of - /// type `P`. - pub fn len(&self) -> usize { - self.inner.len() + if self.last.is_some() { 1 } else { 0 } - } - - /// Borrows the first element in this sequence. - pub fn first(&self) -> Option<&T> { - self.iter().next() - } - - /// Mutably borrows the first element in this sequence. - pub fn first_mut(&mut self) -> Option<&mut T> { - self.iter_mut().next() - } - - /// Borrows the last element in this sequence. - pub fn last(&self) -> Option<&T> { - self.iter().next_back() - } - - /// Mutably borrows the last element in this sequence. - pub fn last_mut(&mut self) -> Option<&mut T> { - self.iter_mut().next_back() - } - - /// Borrows the element at the given index. - pub fn get(&self, index: usize) -> Option<&T> { - if let Some((value, _punct)) = self.inner.get(index) { - Some(value) - } else if index == self.inner.len() { - self.last.as_deref() - } else { - None - } - } - - /// Mutably borrows the element at the given index. - pub fn get_mut(&mut self, index: usize) -> Option<&mut T> { - let inner_len = self.inner.len(); - if let Some((value, _punct)) = self.inner.get_mut(index) { - Some(value) - } else if index == inner_len { - self.last.as_deref_mut() - } else { - None - } - } - - /// Returns an iterator over borrowed syntax tree nodes of type `&T`. - pub fn iter(&self) -> Iter<T> { - Iter { - inner: Box::new(NoDrop::new(PrivateIter { - inner: self.inner.iter(), - last: self.last.as_ref().map(Box::as_ref).into_iter(), - })), - } - } - - /// Returns an iterator over mutably borrowed syntax tree nodes of type - /// `&mut T`. - pub fn iter_mut(&mut self) -> IterMut<T> { - IterMut { - inner: Box::new(NoDrop::new(PrivateIterMut { - inner: self.inner.iter_mut(), - last: self.last.as_mut().map(Box::as_mut).into_iter(), - })), - } - } - - /// Returns an iterator over the contents of this sequence as borrowed - /// punctuated pairs. - pub fn pairs(&self) -> Pairs<T, P> { - Pairs { - inner: self.inner.iter(), - last: self.last.as_ref().map(Box::as_ref).into_iter(), - } - } - - /// Returns an iterator over the contents of this sequence as mutably - /// borrowed punctuated pairs. - pub fn pairs_mut(&mut self) -> PairsMut<T, P> { - PairsMut { - inner: self.inner.iter_mut(), - last: self.last.as_mut().map(Box::as_mut).into_iter(), - } - } - - /// Returns an iterator over the contents of this sequence as owned - /// punctuated pairs. - pub fn into_pairs(self) -> IntoPairs<T, P> { - IntoPairs { - inner: self.inner.into_iter(), - last: self.last.map(|t| *t).into_iter(), - } - } - - /// Appends a syntax tree node onto the end of this punctuated sequence. The - /// sequence must already have a trailing punctuation, or be empty. - /// - /// Use [`push`] instead if the punctuated sequence may or may not already - /// have trailing punctuation. - /// - /// [`push`]: Punctuated::push - /// - /// # Panics - /// - /// Panics if the sequence is nonempty and does not already have a trailing - /// punctuation. - pub fn push_value(&mut self, value: T) { - assert!( - self.empty_or_trailing(), - "Punctuated::push_value: cannot push value if Punctuated is missing trailing punctuation", - ); - - self.last = Some(Box::new(value)); - } - - /// Appends a trailing punctuation onto the end of this punctuated sequence. - /// The sequence must be non-empty and must not already have trailing - /// punctuation. - /// - /// # Panics - /// - /// Panics if the sequence is empty or already has a trailing punctuation. - pub fn push_punct(&mut self, punctuation: P) { - assert!( - self.last.is_some(), - "Punctuated::push_punct: cannot push punctuation if Punctuated is empty or already has trailing punctuation", - ); - - let last = self.last.take().unwrap(); - self.inner.push((*last, punctuation)); - } - - /// Removes the last punctuated pair from this sequence, or `None` if the - /// sequence is empty. - pub fn pop(&mut self) -> Option<Pair<T, P>> { - if self.last.is_some() { - self.last.take().map(|t| Pair::End(*t)) - } else { - self.inner.pop().map(|(t, p)| Pair::Punctuated(t, p)) - } - } - - /// Removes the trailing punctuation from this punctuated sequence, or - /// `None` if there isn't any. - pub fn pop_punct(&mut self) -> Option<P> { - if self.last.is_some() { - None - } else { - let (t, p) = self.inner.pop()?; - self.last = Some(Box::new(t)); - Some(p) - } - } - - /// Determines whether this punctuated sequence ends with a trailing - /// punctuation. - pub fn trailing_punct(&self) -> bool { - self.last.is_none() && !self.is_empty() - } - - /// Returns true if either this `Punctuated` is empty, or it has a trailing - /// punctuation. - /// - /// Equivalent to `punctuated.is_empty() || punctuated.trailing_punct()`. - pub fn empty_or_trailing(&self) -> bool { - self.last.is_none() - } - - /// Appends a syntax tree node onto the end of this punctuated sequence. - /// - /// If there is not a trailing punctuation in this sequence when this method - /// is called, the default value of punctuation type `P` is inserted before - /// the given value of type `T`. - pub fn push(&mut self, value: T) - where - P: Default, - { - if !self.empty_or_trailing() { - self.push_punct(Default::default()); - } - self.push_value(value); - } - - /// Inserts an element at position `index`. - /// - /// # Panics - /// - /// Panics if `index` is greater than the number of elements previously in - /// this punctuated sequence. - pub fn insert(&mut self, index: usize, value: T) - where - P: Default, - { - assert!( - index <= self.len(), - "Punctuated::insert: index out of range", - ); - - if index == self.len() { - self.push(value); - } else { - self.inner.insert(index, (value, Default::default())); - } - } - - /// Clears the sequence of all values and punctuation, making it empty. - pub fn clear(&mut self) { - self.inner.clear(); - self.last = None; - } - - /// Parses zero or more occurrences of `T` separated by punctuation of type - /// `P`, with optional trailing punctuation. - /// - /// Parsing continues until the end of this parse stream. The entire content - /// of this parse stream must consist of `T` and `P`. - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_terminated(input: ParseStream) -> Result<Self> - where - T: Parse, - P: Parse, - { - Self::parse_terminated_with(input, T::parse) - } - - /// Parses zero or more occurrences of `T` using the given parse function, - /// separated by punctuation of type `P`, with optional trailing - /// punctuation. - /// - /// Like [`parse_terminated`], the entire content of this stream is expected - /// to be parsed. - /// - /// [`parse_terminated`]: Punctuated::parse_terminated - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_terminated_with<'a>( - input: ParseStream<'a>, - parser: fn(ParseStream<'a>) -> Result<T>, - ) -> Result<Self> - where - P: Parse, - { - let mut punctuated = Punctuated::new(); - - loop { - if input.is_empty() { - break; - } - let value = parser(input)?; - punctuated.push_value(value); - if input.is_empty() { - break; - } - let punct = input.parse()?; - punctuated.push_punct(punct); - } - - Ok(punctuated) - } - - /// Parses one or more occurrences of `T` separated by punctuation of type - /// `P`, not accepting trailing punctuation. - /// - /// Parsing continues as long as punctuation `P` is present at the head of - /// the stream. This method returns upon parsing a `T` and observing that it - /// is not followed by a `P`, even if there are remaining tokens in the - /// stream. - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_separated_nonempty(input: ParseStream) -> Result<Self> - where - T: Parse, - P: Token + Parse, - { - Self::parse_separated_nonempty_with(input, T::parse) - } - - /// Parses one or more occurrences of `T` using the given parse function, - /// separated by punctuation of type `P`, not accepting trailing - /// punctuation. - /// - /// Like [`parse_separated_nonempty`], may complete early without parsing - /// the entire content of this stream. - /// - /// [`parse_separated_nonempty`]: Punctuated::parse_separated_nonempty - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_separated_nonempty_with<'a>( - input: ParseStream<'a>, - parser: fn(ParseStream<'a>) -> Result<T>, - ) -> Result<Self> - where - P: Token + Parse, - { - let mut punctuated = Punctuated::new(); - - loop { - let value = parser(input)?; - punctuated.push_value(value); - if !P::peek(input.cursor()) { - break; - } - let punct = input.parse()?; - punctuated.push_punct(punct); - } - - Ok(punctuated) - } -} - -#[cfg(feature = "clone-impls")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl<T, P> Clone for Punctuated<T, P> -where - T: Clone, - P: Clone, -{ - fn clone(&self) -> Self { - Punctuated { - inner: self.inner.clone(), - last: self.last.clone(), - } - } - - fn clone_from(&mut self, other: &Self) { - self.inner.clone_from(&other.inner); - self.last.clone_from(&other.last); - } -} - -#[cfg(feature = "extra-traits")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl<T, P> Eq for Punctuated<T, P> -where - T: Eq, - P: Eq, -{ -} - -#[cfg(feature = "extra-traits")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl<T, P> PartialEq for Punctuated<T, P> -where - T: PartialEq, - P: PartialEq, -{ - fn eq(&self, other: &Self) -> bool { - let Punctuated { inner, last } = self; - *inner == other.inner && *last == other.last - } -} - -#[cfg(feature = "extra-traits")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl<T, P> Hash for Punctuated<T, P> -where - T: Hash, - P: Hash, -{ - fn hash<H: Hasher>(&self, state: &mut H) { - let Punctuated { inner, last } = self; - inner.hash(state); - last.hash(state); - } -} - -#[cfg(feature = "extra-traits")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl<T: Debug, P: Debug> Debug for Punctuated<T, P> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut list = f.debug_list(); - for (t, p) in &self.inner { - list.entry(t); - list.entry(p); - } - if let Some(last) = &self.last { - list.entry(last); - } - list.finish() - } -} - -impl<T, P> FromIterator<T> for Punctuated<T, P> -where - P: Default, -{ - fn from_iter<I: IntoIterator<Item = T>>(i: I) -> Self { - let mut ret = Punctuated::new(); - ret.extend(i); - ret - } -} - -impl<T, P> Extend<T> for Punctuated<T, P> -where - P: Default, -{ - fn extend<I: IntoIterator<Item = T>>(&mut self, i: I) { - for value in i { - self.push(value); - } - } -} - -impl<T, P> FromIterator<Pair<T, P>> for Punctuated<T, P> { - fn from_iter<I: IntoIterator<Item = Pair<T, P>>>(i: I) -> Self { - let mut ret = Punctuated::new(); - do_extend(&mut ret, i.into_iter()); - ret - } -} - -impl<T, P> Extend<Pair<T, P>> for Punctuated<T, P> -where - P: Default, -{ - fn extend<I: IntoIterator<Item = Pair<T, P>>>(&mut self, i: I) { - if !self.empty_or_trailing() { - self.push_punct(P::default()); - } - do_extend(self, i.into_iter()); - } -} - -fn do_extend<T, P, I>(punctuated: &mut Punctuated<T, P>, i: I) -where - I: Iterator<Item = Pair<T, P>>, -{ - let mut nomore = false; - for pair in i { - if nomore { - panic!("punctuated extended with items after a Pair::End"); - } - match pair { - Pair::Punctuated(a, b) => punctuated.inner.push((a, b)), - Pair::End(a) => { - punctuated.last = Some(Box::new(a)); - nomore = true; - } - } - } -} - -impl<T, P> IntoIterator for Punctuated<T, P> { - type Item = T; - type IntoIter = IntoIter<T>; - - fn into_iter(self) -> Self::IntoIter { - let mut elements = Vec::with_capacity(self.len()); - - for (t, _) in self.inner { - elements.push(t); - } - if let Some(t) = self.last { - elements.push(*t); - } - - IntoIter { - inner: elements.into_iter(), - } - } -} - -impl<'a, T, P> IntoIterator for &'a Punctuated<T, P> { - type Item = &'a T; - type IntoIter = Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - Punctuated::iter(self) - } -} - -impl<'a, T, P> IntoIterator for &'a mut Punctuated<T, P> { - type Item = &'a mut T; - type IntoIter = IterMut<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - Punctuated::iter_mut(self) - } -} - -impl<T, P> Default for Punctuated<T, P> { - fn default() -> Self { - Punctuated::new() - } -} - -/// An iterator over borrowed pairs of type `Pair<&T, &P>`. -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: self -pub struct Pairs<'a, T: 'a, P: 'a> { - inner: slice::Iter<'a, (T, P)>, - last: option::IntoIter<&'a T>, -} - -impl<'a, T, P> Iterator for Pairs<'a, T, P> { - type Item = Pair<&'a T, &'a P>; - - fn next(&mut self) -> Option<Self::Item> { - self.inner - .next() - .map(|(t, p)| Pair::Punctuated(t, p)) - .or_else(|| self.last.next().map(Pair::End)) - } - - fn size_hint(&self) -> (usize, Option<usize>) { - (self.len(), Some(self.len())) - } -} - -impl<'a, T, P> DoubleEndedIterator for Pairs<'a, T, P> { - fn next_back(&mut self) -> Option<Self::Item> { - self.last - .next() - .map(Pair::End) - .or_else(|| self.inner.next_back().map(|(t, p)| Pair::Punctuated(t, p))) - } -} - -impl<'a, T, P> ExactSizeIterator for Pairs<'a, T, P> { - fn len(&self) -> usize { - self.inner.len() + self.last.len() - } -} - -// No Clone bound on T or P. -impl<'a, T, P> Clone for Pairs<'a, T, P> { - fn clone(&self) -> Self { - Pairs { - inner: self.inner.clone(), - last: self.last.clone(), - } - } -} - -/// An iterator over mutably borrowed pairs of type `Pair<&mut T, &mut P>`. -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: self -pub struct PairsMut<'a, T: 'a, P: 'a> { - inner: slice::IterMut<'a, (T, P)>, - last: option::IntoIter<&'a mut T>, -} - -impl<'a, T, P> Iterator for PairsMut<'a, T, P> { - type Item = Pair<&'a mut T, &'a mut P>; - - fn next(&mut self) -> Option<Self::Item> { - self.inner - .next() - .map(|(t, p)| Pair::Punctuated(t, p)) - .or_else(|| self.last.next().map(Pair::End)) - } - - fn size_hint(&self) -> (usize, Option<usize>) { - (self.len(), Some(self.len())) - } -} - -impl<'a, T, P> DoubleEndedIterator for PairsMut<'a, T, P> { - fn next_back(&mut self) -> Option<Self::Item> { - self.last - .next() - .map(Pair::End) - .or_else(|| self.inner.next_back().map(|(t, p)| Pair::Punctuated(t, p))) - } -} - -impl<'a, T, P> ExactSizeIterator for PairsMut<'a, T, P> { - fn len(&self) -> usize { - self.inner.len() + self.last.len() - } -} - -/// An iterator over owned pairs of type `Pair<T, P>`. -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: self -pub struct IntoPairs<T, P> { - inner: vec::IntoIter<(T, P)>, - last: option::IntoIter<T>, -} - -impl<T, P> Iterator for IntoPairs<T, P> { - type Item = Pair<T, P>; - - fn next(&mut self) -> Option<Self::Item> { - self.inner - .next() - .map(|(t, p)| Pair::Punctuated(t, p)) - .or_else(|| self.last.next().map(Pair::End)) - } - - fn size_hint(&self) -> (usize, Option<usize>) { - (self.len(), Some(self.len())) - } -} - -impl<T, P> DoubleEndedIterator for IntoPairs<T, P> { - fn next_back(&mut self) -> Option<Self::Item> { - self.last - .next() - .map(Pair::End) - .or_else(|| self.inner.next_back().map(|(t, p)| Pair::Punctuated(t, p))) - } -} - -impl<T, P> ExactSizeIterator for IntoPairs<T, P> { - fn len(&self) -> usize { - self.inner.len() + self.last.len() - } -} - -impl<T, P> Clone for IntoPairs<T, P> -where - T: Clone, - P: Clone, -{ - fn clone(&self) -> Self { - IntoPairs { - inner: self.inner.clone(), - last: self.last.clone(), - } - } -} - -/// An iterator over owned values of type `T`. -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: self -pub struct IntoIter<T> { - inner: vec::IntoIter<T>, -} - -impl<T> Iterator for IntoIter<T> { - type Item = T; - - fn next(&mut self) -> Option<Self::Item> { - self.inner.next() - } - - fn size_hint(&self) -> (usize, Option<usize>) { - (self.len(), Some(self.len())) - } -} - -impl<T> DoubleEndedIterator for IntoIter<T> { - fn next_back(&mut self) -> Option<Self::Item> { - self.inner.next_back() - } -} - -impl<T> ExactSizeIterator for IntoIter<T> { - fn len(&self) -> usize { - self.inner.len() - } -} - -impl<T> Clone for IntoIter<T> -where - T: Clone, -{ - fn clone(&self) -> Self { - IntoIter { - inner: self.inner.clone(), - } - } -} - -/// An iterator over borrowed values of type `&T`. -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: self -pub struct Iter<'a, T: 'a> { - inner: Box<NoDrop<dyn IterTrait<'a, T> + 'a>>, -} - -trait IterTrait<'a, T: 'a>: Iterator<Item = &'a T> + DoubleEndedIterator + ExactSizeIterator { - fn clone_box(&self) -> Box<NoDrop<dyn IterTrait<'a, T> + 'a>>; -} - -struct PrivateIter<'a, T: 'a, P: 'a> { - inner: slice::Iter<'a, (T, P)>, - last: option::IntoIter<&'a T>, -} - -impl<'a, T, P> TrivialDrop for PrivateIter<'a, T, P> -where - slice::Iter<'a, (T, P)>: TrivialDrop, - option::IntoIter<&'a T>: TrivialDrop, -{ -} - -#[cfg(any(feature = "full", feature = "derive"))] -pub(crate) fn empty_punctuated_iter<'a, T>() -> Iter<'a, T> { - Iter { - inner: Box::new(NoDrop::new(iter::empty())), - } -} - -// No Clone bound on T. -impl<'a, T> Clone for Iter<'a, T> { - fn clone(&self) -> Self { - Iter { - inner: self.inner.clone_box(), - } - } -} - -impl<'a, T> Iterator for Iter<'a, T> { - type Item = &'a T; - - fn next(&mut self) -> Option<Self::Item> { - self.inner.next() - } - - fn size_hint(&self) -> (usize, Option<usize>) { - (self.len(), Some(self.len())) - } -} - -impl<'a, T> DoubleEndedIterator for Iter<'a, T> { - fn next_back(&mut self) -> Option<Self::Item> { - self.inner.next_back() - } -} - -impl<'a, T> ExactSizeIterator for Iter<'a, T> { - fn len(&self) -> usize { - self.inner.len() - } -} - -impl<'a, T, P> Iterator for PrivateIter<'a, T, P> { - type Item = &'a T; - - fn next(&mut self) -> Option<Self::Item> { - self.inner - .next() - .map(|pair| &pair.0) - .or_else(|| self.last.next()) - } -} - -impl<'a, T, P> DoubleEndedIterator for PrivateIter<'a, T, P> { - fn next_back(&mut self) -> Option<Self::Item> { - self.last - .next() - .or_else(|| self.inner.next_back().map(|pair| &pair.0)) - } -} - -impl<'a, T, P> ExactSizeIterator for PrivateIter<'a, T, P> { - fn len(&self) -> usize { - self.inner.len() + self.last.len() - } -} - -// No Clone bound on T or P. -impl<'a, T, P> Clone for PrivateIter<'a, T, P> { - fn clone(&self) -> Self { - PrivateIter { - inner: self.inner.clone(), - last: self.last.clone(), - } - } -} - -impl<'a, T, I> IterTrait<'a, T> for I -where - T: 'a, - I: DoubleEndedIterator<Item = &'a T> - + ExactSizeIterator<Item = &'a T> - + Clone - + TrivialDrop - + 'a, -{ - fn clone_box(&self) -> Box<NoDrop<dyn IterTrait<'a, T> + 'a>> { - Box::new(NoDrop::new(self.clone())) - } -} - -/// An iterator over mutably borrowed values of type `&mut T`. -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: self -pub struct IterMut<'a, T: 'a> { - inner: Box<NoDrop<dyn IterMutTrait<'a, T, Item = &'a mut T> + 'a>>, -} - -trait IterMutTrait<'a, T: 'a>: - DoubleEndedIterator<Item = &'a mut T> + ExactSizeIterator<Item = &'a mut T> -{ -} - -struct PrivateIterMut<'a, T: 'a, P: 'a> { - inner: slice::IterMut<'a, (T, P)>, - last: option::IntoIter<&'a mut T>, -} - -impl<'a, T, P> TrivialDrop for PrivateIterMut<'a, T, P> -where - slice::IterMut<'a, (T, P)>: TrivialDrop, - option::IntoIter<&'a mut T>: TrivialDrop, -{ -} - -#[cfg(any(feature = "full", feature = "derive"))] -pub(crate) fn empty_punctuated_iter_mut<'a, T>() -> IterMut<'a, T> { - IterMut { - inner: Box::new(NoDrop::new(iter::empty())), - } -} - -impl<'a, T> Iterator for IterMut<'a, T> { - type Item = &'a mut T; - - fn next(&mut self) -> Option<Self::Item> { - self.inner.next() - } - - fn size_hint(&self) -> (usize, Option<usize>) { - (self.len(), Some(self.len())) - } -} - -impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { - fn next_back(&mut self) -> Option<Self::Item> { - self.inner.next_back() - } -} - -impl<'a, T> ExactSizeIterator for IterMut<'a, T> { - fn len(&self) -> usize { - self.inner.len() - } -} - -impl<'a, T, P> Iterator for PrivateIterMut<'a, T, P> { - type Item = &'a mut T; - - fn next(&mut self) -> Option<Self::Item> { - self.inner - .next() - .map(|pair| &mut pair.0) - .or_else(|| self.last.next()) - } -} - -impl<'a, T, P> DoubleEndedIterator for PrivateIterMut<'a, T, P> { - fn next_back(&mut self) -> Option<Self::Item> { - self.last - .next() - .or_else(|| self.inner.next_back().map(|pair| &mut pair.0)) - } -} - -impl<'a, T, P> ExactSizeIterator for PrivateIterMut<'a, T, P> { - fn len(&self) -> usize { - self.inner.len() + self.last.len() - } -} - -impl<'a, T, I> IterMutTrait<'a, T> for I -where - T: 'a, - I: DoubleEndedIterator<Item = &'a mut T> + ExactSizeIterator<Item = &'a mut T> + 'a, -{ -} - -/// A single syntax tree node of type `T` followed by its trailing punctuation -/// of type `P` if any. -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: self -pub enum Pair<T, P> { - Punctuated(T, P), - End(T), -} - -impl<T, P> Pair<T, P> { - /// Extracts the syntax tree node from this punctuated pair, discarding the - /// following punctuation. - pub fn into_value(self) -> T { - match self { - Pair::Punctuated(t, _) | Pair::End(t) => t, - } - } - - /// Borrows the syntax tree node from this punctuated pair. - pub fn value(&self) -> &T { - match self { - Pair::Punctuated(t, _) | Pair::End(t) => t, - } - } - - /// Mutably borrows the syntax tree node from this punctuated pair. - pub fn value_mut(&mut self) -> &mut T { - match self { - Pair::Punctuated(t, _) | Pair::End(t) => t, - } - } - - /// Borrows the punctuation from this punctuated pair, unless this pair is - /// the final one and there is no trailing punctuation. - pub fn punct(&self) -> Option<&P> { - match self { - Pair::Punctuated(_, p) => Some(p), - Pair::End(_) => None, - } - } - - /// Mutably borrows the punctuation from this punctuated pair, unless the - /// pair is the final one and there is no trailing punctuation. - /// - /// # Example - /// - /// ``` - /// # use proc_macro2::Span; - /// # use syn::punctuated::Punctuated; - /// # use syn::{parse_quote, Token, TypeParamBound}; - /// # - /// # let mut punctuated = Punctuated::<TypeParamBound, Token![+]>::new(); - /// # let span = Span::call_site(); - /// # - /// punctuated.insert(0, parse_quote!('lifetime)); - /// if let Some(punct) = punctuated.pairs_mut().next().unwrap().punct_mut() { - /// punct.span = span; - /// } - /// ``` - pub fn punct_mut(&mut self) -> Option<&mut P> { - match self { - Pair::Punctuated(_, p) => Some(p), - Pair::End(_) => None, - } - } - - /// Creates a punctuated pair out of a syntax tree node and an optional - /// following punctuation. - pub fn new(t: T, p: Option<P>) -> Self { - match p { - Some(p) => Pair::Punctuated(t, p), - None => Pair::End(t), - } - } - - /// Produces this punctuated pair as a tuple of syntax tree node and - /// optional following punctuation. - pub fn into_tuple(self) -> (T, Option<P>) { - match self { - Pair::Punctuated(t, p) => (t, Some(p)), - Pair::End(t) => (t, None), - } - } -} - -#[cfg(feature = "clone-impls")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl<T, P> Pair<&T, &P> { - pub fn cloned(self) -> Pair<T, P> - where - T: Clone, - P: Clone, - { - match self { - Pair::Punctuated(t, p) => Pair::Punctuated(t.clone(), p.clone()), - Pair::End(t) => Pair::End(t.clone()), - } - } -} - -#[cfg(feature = "clone-impls")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl<T, P> Clone for Pair<T, P> -where - T: Clone, - P: Clone, -{ - fn clone(&self) -> Self { - match self { - Pair::Punctuated(t, p) => Pair::Punctuated(t.clone(), p.clone()), - Pair::End(t) => Pair::End(t.clone()), - } - } -} - -#[cfg(feature = "clone-impls")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl<T, P> Copy for Pair<T, P> -where - T: Copy, - P: Copy, -{ -} - -impl<T, P> Index<usize> for Punctuated<T, P> { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - if index.checked_add(1) == Some(self.len()) { - match &self.last { - Some(t) => t, - None => &self.inner[index].0, - } - } else { - &self.inner[index].0 - } - } -} - -impl<T, P> IndexMut<usize> for Punctuated<T, P> { - fn index_mut(&mut self, index: usize) -> &mut Self::Output { - if index.checked_add(1) == Some(self.len()) { - match &mut self.last { - Some(t) => t, - None => &mut self.inner[index].0, - } - } else { - &mut self.inner[index].0 - } - } -} - -#[cfg(all(feature = "fold", any(feature = "full", feature = "derive")))] -pub(crate) fn fold<T, P, V, F>( - punctuated: Punctuated<T, P>, - fold: &mut V, - mut f: F, -) -> Punctuated<T, P> -where - V: ?Sized, - F: FnMut(&mut V, T) -> T, -{ - let Punctuated { inner, last } = punctuated; - - // Convert into VecDeque to prevent needing to allocate a new Vec<(T, P)> - // for the folded elements. - let mut inner = VecDeque::from(inner); - for _ in 0..inner.len() { - if let Some((t, p)) = inner.pop_front() { - inner.push_back((f(fold, t), p)); - } - } - - Punctuated { - inner: Vec::from(inner), - last: match last { - Some(t) => Some(Box::new(f(fold, *t))), - None => None, - }, - } -} - -#[cfg(feature = "printing")] -mod printing { - use crate::punctuated::{Pair, Punctuated}; - use proc_macro2::TokenStream; - use quote::{ToTokens, TokenStreamExt as _}; - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl<T, P> ToTokens for Punctuated<T, P> - where - T: ToTokens, - P: ToTokens, - { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.pairs()); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl<T, P> ToTokens for Pair<T, P> - where - T: ToTokens, - P: ToTokens, - { - fn to_tokens(&self, tokens: &mut TokenStream) { - match self { - Pair::Punctuated(a, b) => { - a.to_tokens(tokens); - b.to_tokens(tokens); - } - Pair::End(a) => a.to_tokens(tokens), - } - } - } -} diff --git a/vendor/syn/src/restriction.rs b/vendor/syn/src/restriction.rs deleted file mode 100644 index 6e6758f3cd7369..00000000000000 --- a/vendor/syn/src/restriction.rs +++ /dev/null @@ -1,178 +0,0 @@ -use crate::path::Path; -use crate::token; - -ast_enum! { - /// The visibility level of an item: inherited or `pub` or - /// `pub(restricted)`. - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub enum Visibility { - /// A public visibility level: `pub`. - Public(Token![pub]), - - /// A visibility level restricted to some path: `pub(self)` or - /// `pub(super)` or `pub(crate)` or `pub(in some::module)`. - Restricted(VisRestricted), - - /// An inherited visibility, which usually means private. - Inherited, - } -} - -ast_struct! { - /// A visibility level restricted to some path: `pub(self)` or - /// `pub(super)` or `pub(crate)` or `pub(in some::module)`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct VisRestricted { - pub pub_token: Token![pub], - pub paren_token: token::Paren, - pub in_token: Option<Token![in]>, - pub path: Box<Path>, - } -} - -ast_enum! { - /// Unused, but reserved for RFC 3323 restrictions. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - #[non_exhaustive] - pub enum FieldMutability { - None, - - // TODO: https://rust-lang.github.io/rfcs/3323-restrictions.html - // - // FieldMutability::Restricted(MutRestricted) - // - // pub struct MutRestricted { - // pub mut_token: Token![mut], - // pub paren_token: token::Paren, - // pub in_token: Option<Token![in]>, - // pub path: Box<Path>, - // } - } -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::error::Result; - use crate::ext::IdentExt as _; - use crate::ident::Ident; - use crate::parse::discouraged::Speculative as _; - use crate::parse::{Parse, ParseStream}; - use crate::path::Path; - use crate::restriction::{VisRestricted, Visibility}; - use crate::token; - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Visibility { - fn parse(input: ParseStream) -> Result<Self> { - // Recognize an empty None-delimited group, as produced by a $:vis - // matcher that matched no tokens. - if input.peek(token::Group) { - let ahead = input.fork(); - let group = crate::group::parse_group(&ahead)?; - if group.content.is_empty() { - input.advance_to(&ahead); - return Ok(Visibility::Inherited); - } - } - - if input.peek(Token![pub]) { - Self::parse_pub(input) - } else { - Ok(Visibility::Inherited) - } - } - } - - impl Visibility { - fn parse_pub(input: ParseStream) -> Result<Self> { - let pub_token = input.parse::<Token![pub]>()?; - - if input.peek(token::Paren) { - let ahead = input.fork(); - - let content; - let paren_token = parenthesized!(content in ahead); - if content.peek(Token![crate]) - || content.peek(Token![self]) - || content.peek(Token![super]) - { - let path = content.call(Ident::parse_any)?; - - // Ensure there are no additional tokens within `content`. - // Without explicitly checking, we may misinterpret a tuple - // field as a restricted visibility, causing a parse error. - // e.g. `pub (crate::A, crate::B)` (Issue #720). - if content.is_empty() { - input.advance_to(&ahead); - return Ok(Visibility::Restricted(VisRestricted { - pub_token, - paren_token, - in_token: None, - path: Box::new(Path::from(path)), - })); - } - } else if content.peek(Token![in]) { - let in_token: Token![in] = content.parse()?; - let path = content.call(Path::parse_mod_style)?; - - input.advance_to(&ahead); - return Ok(Visibility::Restricted(VisRestricted { - pub_token, - paren_token, - in_token: Some(in_token), - path: Box::new(path), - })); - } - } - - Ok(Visibility::Public(pub_token)) - } - - #[cfg(feature = "full")] - pub(crate) fn is_some(&self) -> bool { - match self { - Visibility::Inherited => false, - _ => true, - } - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use crate::path; - use crate::path::printing::PathStyle; - use crate::restriction::{VisRestricted, Visibility}; - use proc_macro2::TokenStream; - use quote::ToTokens; - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Visibility { - fn to_tokens(&self, tokens: &mut TokenStream) { - match self { - Visibility::Public(pub_token) => pub_token.to_tokens(tokens), - Visibility::Restricted(vis_restricted) => vis_restricted.to_tokens(tokens), - Visibility::Inherited => {} - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for VisRestricted { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.pub_token.to_tokens(tokens); - self.paren_token.surround(tokens, |tokens| { - // TODO: If we have a path which is not "self" or "super" or - // "crate", automatically add the "in" token. - self.in_token.to_tokens(tokens); - path::printing::print_path(tokens, &self.path, PathStyle::Mod); - }); - } - } -} diff --git a/vendor/syn/src/scan_expr.rs b/vendor/syn/src/scan_expr.rs deleted file mode 100644 index a3a0416cb09731..00000000000000 --- a/vendor/syn/src/scan_expr.rs +++ /dev/null @@ -1,268 +0,0 @@ -use self::{Action::*, Input::*}; -use proc_macro2::{Delimiter, Ident, Spacing, TokenTree}; -use syn::parse::{ParseStream, Result}; -#[allow(unused_imports)] -//#[cfg_attr(not(test), expect(unused_imports))] // Rust 1.81+ -use syn::Token; -use syn::{AngleBracketedGenericArguments, BinOp, Expr, ExprPath, Lifetime, Lit, Type}; - -enum Input { - Keyword(&'static str), - Punct(&'static str), - ConsumeAny, - ConsumeBinOp, - ConsumeBrace, - ConsumeDelimiter, - ConsumeIdent, - ConsumeLifetime, - ConsumeLiteral, - ConsumeNestedBrace, - ExpectPath, - ExpectTurbofish, - ExpectType, - CanBeginExpr, - Otherwise, - Empty, -} - -enum Action { - SetState(&'static [(Input, Action)]), - IncDepth, - DecDepth, - Finish, -} - -static INIT: [(Input, Action); 28] = [ - (ConsumeDelimiter, SetState(&POSTFIX)), - (Keyword("async"), SetState(&ASYNC)), - (Keyword("break"), SetState(&BREAK_LABEL)), - (Keyword("const"), SetState(&CONST)), - (Keyword("continue"), SetState(&CONTINUE)), - (Keyword("for"), SetState(&FOR)), - (Keyword("if"), IncDepth), - (Keyword("let"), SetState(&PATTERN)), - (Keyword("loop"), SetState(&BLOCK)), - (Keyword("match"), IncDepth), - (Keyword("move"), SetState(&CLOSURE)), - (Keyword("return"), SetState(&RETURN)), - (Keyword("static"), SetState(&CLOSURE)), - (Keyword("unsafe"), SetState(&BLOCK)), - (Keyword("while"), IncDepth), - (Keyword("yield"), SetState(&RETURN)), - (Keyword("_"), SetState(&POSTFIX)), - (Punct("!"), SetState(&INIT)), - (Punct("#"), SetState(&[(ConsumeDelimiter, SetState(&INIT))])), - (Punct("&"), SetState(&REFERENCE)), - (Punct("*"), SetState(&INIT)), - (Punct("-"), SetState(&INIT)), - (Punct("..="), SetState(&INIT)), - (Punct(".."), SetState(&RANGE)), - (Punct("|"), SetState(&CLOSURE_ARGS)), - (ConsumeLifetime, SetState(&[(Punct(":"), SetState(&INIT))])), - (ConsumeLiteral, SetState(&POSTFIX)), - (ExpectPath, SetState(&PATH)), -]; - -static POSTFIX: [(Input, Action); 10] = [ - (Keyword("as"), SetState(&[(ExpectType, SetState(&POSTFIX))])), - (Punct("..="), SetState(&INIT)), - (Punct(".."), SetState(&RANGE)), - (Punct("."), SetState(&DOT)), - (Punct("?"), SetState(&POSTFIX)), - (ConsumeBinOp, SetState(&INIT)), - (Punct("="), SetState(&INIT)), - (ConsumeNestedBrace, SetState(&IF_THEN)), - (ConsumeDelimiter, SetState(&POSTFIX)), - (Empty, Finish), -]; - -static ASYNC: [(Input, Action); 3] = [ - (Keyword("move"), SetState(&ASYNC)), - (Punct("|"), SetState(&CLOSURE_ARGS)), - (ConsumeBrace, SetState(&POSTFIX)), -]; - -static BLOCK: [(Input, Action); 1] = [(ConsumeBrace, SetState(&POSTFIX))]; - -static BREAK_LABEL: [(Input, Action); 2] = [ - (ConsumeLifetime, SetState(&BREAK_VALUE)), - (Otherwise, SetState(&BREAK_VALUE)), -]; - -static BREAK_VALUE: [(Input, Action); 3] = [ - (ConsumeNestedBrace, SetState(&IF_THEN)), - (CanBeginExpr, SetState(&INIT)), - (Otherwise, SetState(&POSTFIX)), -]; - -static CLOSURE: [(Input, Action); 7] = [ - (Keyword("async"), SetState(&CLOSURE)), - (Keyword("move"), SetState(&CLOSURE)), - (Punct(","), SetState(&CLOSURE)), - (Punct(">"), SetState(&CLOSURE)), - (Punct("|"), SetState(&CLOSURE_ARGS)), - (ConsumeLifetime, SetState(&CLOSURE)), - (ConsumeIdent, SetState(&CLOSURE)), -]; - -static CLOSURE_ARGS: [(Input, Action); 2] = [ - (Punct("|"), SetState(&CLOSURE_RET)), - (ConsumeAny, SetState(&CLOSURE_ARGS)), -]; - -static CLOSURE_RET: [(Input, Action); 2] = [ - (Punct("->"), SetState(&[(ExpectType, SetState(&BLOCK))])), - (Otherwise, SetState(&INIT)), -]; - -static CONST: [(Input, Action); 2] = [ - (Punct("|"), SetState(&CLOSURE_ARGS)), - (ConsumeBrace, SetState(&POSTFIX)), -]; - -static CONTINUE: [(Input, Action); 2] = [ - (ConsumeLifetime, SetState(&POSTFIX)), - (Otherwise, SetState(&POSTFIX)), -]; - -static DOT: [(Input, Action); 3] = [ - (Keyword("await"), SetState(&POSTFIX)), - (ConsumeIdent, SetState(&METHOD)), - (ConsumeLiteral, SetState(&POSTFIX)), -]; - -static FOR: [(Input, Action); 2] = [ - (Punct("<"), SetState(&CLOSURE)), - (Otherwise, SetState(&PATTERN)), -]; - -static IF_ELSE: [(Input, Action); 2] = [(Keyword("if"), SetState(&INIT)), (ConsumeBrace, DecDepth)]; -static IF_THEN: [(Input, Action); 2] = - [(Keyword("else"), SetState(&IF_ELSE)), (Otherwise, DecDepth)]; - -static METHOD: [(Input, Action); 1] = [(ExpectTurbofish, SetState(&POSTFIX))]; - -static PATH: [(Input, Action); 4] = [ - (Punct("!="), SetState(&INIT)), - (Punct("!"), SetState(&INIT)), - (ConsumeNestedBrace, SetState(&IF_THEN)), - (Otherwise, SetState(&POSTFIX)), -]; - -static PATTERN: [(Input, Action); 15] = [ - (ConsumeDelimiter, SetState(&PATTERN)), - (Keyword("box"), SetState(&PATTERN)), - (Keyword("in"), IncDepth), - (Keyword("mut"), SetState(&PATTERN)), - (Keyword("ref"), SetState(&PATTERN)), - (Keyword("_"), SetState(&PATTERN)), - (Punct("!"), SetState(&PATTERN)), - (Punct("&"), SetState(&PATTERN)), - (Punct("..="), SetState(&PATTERN)), - (Punct(".."), SetState(&PATTERN)), - (Punct("="), SetState(&INIT)), - (Punct("@"), SetState(&PATTERN)), - (Punct("|"), SetState(&PATTERN)), - (ConsumeLiteral, SetState(&PATTERN)), - (ExpectPath, SetState(&PATTERN)), -]; - -static RANGE: [(Input, Action); 6] = [ - (Punct("..="), SetState(&INIT)), - (Punct(".."), SetState(&RANGE)), - (Punct("."), SetState(&DOT)), - (ConsumeNestedBrace, SetState(&IF_THEN)), - (Empty, Finish), - (Otherwise, SetState(&INIT)), -]; - -static RAW: [(Input, Action); 3] = [ - (Keyword("const"), SetState(&INIT)), - (Keyword("mut"), SetState(&INIT)), - (Otherwise, SetState(&POSTFIX)), -]; - -static REFERENCE: [(Input, Action); 3] = [ - (Keyword("mut"), SetState(&INIT)), - (Keyword("raw"), SetState(&RAW)), - (Otherwise, SetState(&INIT)), -]; - -static RETURN: [(Input, Action); 2] = [ - (CanBeginExpr, SetState(&INIT)), - (Otherwise, SetState(&POSTFIX)), -]; - -pub(crate) fn scan_expr(input: ParseStream) -> Result<()> { - let mut state = INIT.as_slice(); - let mut depth = 0usize; - 'table: loop { - for rule in state { - if match rule.0 { - Input::Keyword(expected) => input.step(|cursor| match cursor.ident() { - Some((ident, rest)) if ident == expected => Ok((true, rest)), - _ => Ok((false, *cursor)), - })?, - Input::Punct(expected) => input.step(|cursor| { - let begin = *cursor; - let mut cursor = begin; - for (i, ch) in expected.chars().enumerate() { - match cursor.punct() { - Some((punct, _)) if punct.as_char() != ch => break, - Some((_, rest)) if i == expected.len() - 1 => { - return Ok((true, rest)); - } - Some((punct, rest)) if punct.spacing() == Spacing::Joint => { - cursor = rest; - } - _ => break, - } - } - Ok((false, begin)) - })?, - Input::ConsumeAny => input.parse::<Option<TokenTree>>()?.is_some(), - Input::ConsumeBinOp => input.parse::<BinOp>().is_ok(), - Input::ConsumeBrace | Input::ConsumeNestedBrace => { - (matches!(rule.0, Input::ConsumeBrace) || depth > 0) - && input.step(|cursor| match cursor.group(Delimiter::Brace) { - Some((_inside, _span, rest)) => Ok((true, rest)), - None => Ok((false, *cursor)), - })? - } - Input::ConsumeDelimiter => input.step(|cursor| match cursor.any_group() { - Some((_inside, _delimiter, _span, rest)) => Ok((true, rest)), - None => Ok((false, *cursor)), - })?, - Input::ConsumeIdent => input.parse::<Option<Ident>>()?.is_some(), - Input::ConsumeLifetime => input.parse::<Option<Lifetime>>()?.is_some(), - Input::ConsumeLiteral => input.parse::<Option<Lit>>()?.is_some(), - Input::ExpectPath => { - input.parse::<ExprPath>()?; - true - } - Input::ExpectTurbofish => { - if input.peek(Token![::]) { - input.parse::<AngleBracketedGenericArguments>()?; - } - true - } - Input::ExpectType => { - Type::without_plus(input)?; - true - } - Input::CanBeginExpr => Expr::peek(input), - Input::Otherwise => true, - Input::Empty => input.is_empty() || input.peek(Token![,]), - } { - state = match rule.1 { - Action::SetState(next) => next, - Action::IncDepth => (depth += 1, &INIT).1, - Action::DecDepth => (depth -= 1, &POSTFIX).1, - Action::Finish => return if depth == 0 { Ok(()) } else { break }, - }; - continue 'table; - } - } - return Err(input.error("unsupported expression")); - } -} diff --git a/vendor/syn/src/sealed.rs b/vendor/syn/src/sealed.rs deleted file mode 100644 index dc804742d12db0..00000000000000 --- a/vendor/syn/src/sealed.rs +++ /dev/null @@ -1,4 +0,0 @@ -#[cfg(feature = "parsing")] -pub(crate) mod lookahead { - pub trait Sealed: Copy {} -} diff --git a/vendor/syn/src/span.rs b/vendor/syn/src/span.rs deleted file mode 100644 index eb2779479aaac5..00000000000000 --- a/vendor/syn/src/span.rs +++ /dev/null @@ -1,63 +0,0 @@ -use proc_macro2::extra::DelimSpan; -use proc_macro2::{Delimiter, Group, Span, TokenStream}; - -#[doc(hidden)] -pub trait IntoSpans<S> { - fn into_spans(self) -> S; -} - -impl IntoSpans<Span> for Span { - fn into_spans(self) -> Span { - self - } -} - -impl IntoSpans<[Span; 1]> for Span { - fn into_spans(self) -> [Span; 1] { - [self] - } -} - -impl IntoSpans<[Span; 2]> for Span { - fn into_spans(self) -> [Span; 2] { - [self, self] - } -} - -impl IntoSpans<[Span; 3]> for Span { - fn into_spans(self) -> [Span; 3] { - [self, self, self] - } -} - -impl IntoSpans<[Span; 1]> for [Span; 1] { - fn into_spans(self) -> [Span; 1] { - self - } -} - -impl IntoSpans<[Span; 2]> for [Span; 2] { - fn into_spans(self) -> [Span; 2] { - self - } -} - -impl IntoSpans<[Span; 3]> for [Span; 3] { - fn into_spans(self) -> [Span; 3] { - self - } -} - -impl IntoSpans<DelimSpan> for Span { - fn into_spans(self) -> DelimSpan { - let mut group = Group::new(Delimiter::None, TokenStream::new()); - group.set_span(self); - group.delim_span() - } -} - -impl IntoSpans<DelimSpan> for DelimSpan { - fn into_spans(self) -> DelimSpan { - self - } -} diff --git a/vendor/syn/src/spanned.rs b/vendor/syn/src/spanned.rs deleted file mode 100644 index 17b69e9f5b2847..00000000000000 --- a/vendor/syn/src/spanned.rs +++ /dev/null @@ -1,118 +0,0 @@ -//! A trait that can provide the `Span` of the complete contents of a syntax -//! tree node. -//! -//! <br> -//! -//! # Example -//! -//! Suppose in a procedural macro we have a [`Type`] that we want to assert -//! implements the [`Sync`] trait. Maybe this is the type of one of the fields -//! of a struct for which we are deriving a trait implementation, and we need to -//! be able to pass a reference to one of those fields across threads. -//! -//! [`Type`]: crate::Type -//! [`Sync`]: std::marker::Sync -//! -//! If the field type does *not* implement `Sync` as required, we want the -//! compiler to report an error pointing out exactly which type it was. -//! -//! The following macro code takes a variable `ty` of type `Type` and produces a -//! static assertion that `Sync` is implemented for that type. -//! -//! ``` -//! # extern crate proc_macro; -//! # -//! use proc_macro::TokenStream; -//! use proc_macro2::Span; -//! use quote::quote_spanned; -//! use syn::Type; -//! use syn::spanned::Spanned; -//! -//! # const IGNORE_TOKENS: &str = stringify! { -//! #[proc_macro_derive(MyMacro)] -//! # }; -//! pub fn my_macro(input: TokenStream) -> TokenStream { -//! # let ty = get_a_type(); -//! /* ... */ -//! -//! let assert_sync = quote_spanned! {ty.span()=> -//! struct _AssertSync where #ty: Sync; -//! }; -//! -//! /* ... */ -//! # input -//! } -//! # -//! # fn get_a_type() -> Type { -//! # unimplemented!() -//! # } -//! ``` -//! -//! By inserting this `assert_sync` fragment into the output code generated by -//! our macro, the user's code will fail to compile if `ty` does not implement -//! `Sync`. The errors they would see look like the following. -//! -//! ```text -//! error[E0277]: the trait bound `*const i32: std::marker::Sync` is not satisfied -//! --> src/main.rs:10:21 -//! | -//! 10 | bad_field: *const i32, -//! | ^^^^^^^^^^ `*const i32` cannot be shared between threads safely -//! ``` -//! -//! In this technique, using the `Type`'s span for the error message makes the -//! error appear in the correct place underlining the right type. -//! -//! <br> -//! -//! # Limitations -//! -//! The underlying [`proc_macro::Span::join`] method is nightly-only. When -//! called from within a procedural macro in a nightly compiler, `Spanned` will -//! use `join` to produce the intended span. When not using a nightly compiler, -//! only the span of the *first token* of the syntax tree node is returned. -//! -//! In the common case of wanting to use the joined span as the span of a -//! `syn::Error`, consider instead using [`syn::Error::new_spanned`] which is -//! able to span the error correctly under the complete syntax tree node without -//! needing the unstable `join`. -//! -//! [`syn::Error::new_spanned`]: crate::Error::new_spanned - -use proc_macro2::Span; -use quote::spanned::Spanned as ToTokens; - -/// A trait that can provide the `Span` of the complete contents of a syntax -/// tree node. -/// -/// This trait is automatically implemented for all types that implement -/// [`ToTokens`] from the `quote` crate, as well as for `Span` itself. -/// -/// [`ToTokens`]: quote::ToTokens -/// -/// See the [module documentation] for an example. -/// -/// [module documentation]: self -pub trait Spanned: private::Sealed { - /// Returns a `Span` covering the complete contents of this syntax tree - /// node, or [`Span::call_site()`] if this node is empty. - /// - /// [`Span::call_site()`]: proc_macro2::Span::call_site - fn span(&self) -> Span; -} - -impl<T: ?Sized + ToTokens> Spanned for T { - fn span(&self) -> Span { - self.__span() - } -} - -mod private { - use crate::spanned::ToTokens; - - pub trait Sealed {} - impl<T: ?Sized + ToTokens> Sealed for T {} - - #[cfg(any(feature = "full", feature = "derive"))] - impl Sealed for crate::QSelf {} -} diff --git a/vendor/syn/src/stmt.rs b/vendor/syn/src/stmt.rs deleted file mode 100644 index 970bc13dc25a1d..00000000000000 --- a/vendor/syn/src/stmt.rs +++ /dev/null @@ -1,484 +0,0 @@ -use crate::attr::Attribute; -use crate::expr::Expr; -use crate::item::Item; -use crate::mac::Macro; -use crate::pat::Pat; -use crate::token; - -ast_struct! { - /// A braced block containing Rust statements. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct Block { - pub brace_token: token::Brace, - /// Statements in a block - pub stmts: Vec<Stmt>, - } -} - -ast_enum! { - /// A statement, usually ending in a semicolon. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub enum Stmt { - /// A local (let) binding. - Local(Local), - - /// An item definition. - Item(Item), - - /// Expression, with or without trailing semicolon. - Expr(Expr, Option<Token![;]>), - - /// A macro invocation in statement position. - /// - /// Syntactically it's ambiguous which other kind of statement this - /// macro would expand to. It can be any of local variable (`let`), - /// item, or expression. - Macro(StmtMacro), - } -} - -ast_struct! { - /// A local `let` binding: `let x: u64 = s.parse()?;`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct Local { - pub attrs: Vec<Attribute>, - pub let_token: Token![let], - pub pat: Pat, - pub init: Option<LocalInit>, - pub semi_token: Token![;], - } -} - -ast_struct! { - /// The expression assigned in a local `let` binding, including optional - /// diverging `else` block. - /// - /// `LocalInit` represents `= s.parse()?` in `let x: u64 = s.parse()?` and - /// `= r else { return }` in `let Ok(x) = r else { return }`. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct LocalInit { - pub eq_token: Token![=], - pub expr: Box<Expr>, - pub diverge: Option<(Token![else], Box<Expr>)>, - } -} - -ast_struct! { - /// A macro invocation in statement position. - /// - /// Syntactically it's ambiguous which other kind of statement this macro - /// would expand to. It can be any of local variable (`let`), item, or - /// expression. - #[cfg_attr(docsrs, doc(cfg(feature = "full")))] - pub struct StmtMacro { - pub attrs: Vec<Attribute>, - pub mac: Macro, - pub semi_token: Option<Token![;]>, - } -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::attr::Attribute; - use crate::classify; - use crate::error::Result; - use crate::expr::{Expr, ExprBlock, ExprMacro}; - use crate::ident::Ident; - use crate::item; - use crate::mac::{self, Macro}; - use crate::parse::discouraged::Speculative as _; - use crate::parse::{Parse, ParseStream}; - use crate::pat::{Pat, PatType}; - use crate::path::Path; - use crate::stmt::{Block, Local, LocalInit, Stmt, StmtMacro}; - use crate::token; - use crate::ty::Type; - use proc_macro2::TokenStream; - - struct AllowNoSemi(bool); - - impl Block { - /// Parse the body of a block as zero or more statements, possibly - /// including one trailing expression. - /// - /// # Example - /// - /// ``` - /// use syn::{braced, token, Attribute, Block, Ident, Result, Stmt, Token}; - /// use syn::parse::{Parse, ParseStream}; - /// - /// // Parse a function with no generics or parameter list. - /// // - /// // fn playground { - /// // let mut x = 1; - /// // x += 1; - /// // println!("{}", x); - /// // } - /// struct MiniFunction { - /// attrs: Vec<Attribute>, - /// fn_token: Token![fn], - /// name: Ident, - /// brace_token: token::Brace, - /// stmts: Vec<Stmt>, - /// } - /// - /// impl Parse for MiniFunction { - /// fn parse(input: ParseStream) -> Result<Self> { - /// let outer_attrs = input.call(Attribute::parse_outer)?; - /// let fn_token: Token![fn] = input.parse()?; - /// let name: Ident = input.parse()?; - /// - /// let content; - /// let brace_token = braced!(content in input); - /// let inner_attrs = content.call(Attribute::parse_inner)?; - /// let stmts = content.call(Block::parse_within)?; - /// - /// Ok(MiniFunction { - /// attrs: { - /// let mut attrs = outer_attrs; - /// attrs.extend(inner_attrs); - /// attrs - /// }, - /// fn_token, - /// name, - /// brace_token, - /// stmts, - /// }) - /// } - /// } - /// ``` - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn parse_within(input: ParseStream) -> Result<Vec<Stmt>> { - let mut stmts = Vec::new(); - loop { - while let semi @ Some(_) = input.parse()? { - stmts.push(Stmt::Expr(Expr::Verbatim(TokenStream::new()), semi)); - } - if input.is_empty() { - break; - } - let stmt = parse_stmt(input, AllowNoSemi(true))?; - let requires_semicolon = match &stmt { - Stmt::Expr(stmt, None) => classify::requires_semi_to_be_stmt(stmt), - Stmt::Macro(stmt) => { - stmt.semi_token.is_none() && !stmt.mac.delimiter.is_brace() - } - Stmt::Local(_) | Stmt::Item(_) | Stmt::Expr(_, Some(_)) => false, - }; - stmts.push(stmt); - if input.is_empty() { - break; - } else if requires_semicolon { - return Err(input.error("unexpected token, expected `;`")); - } - } - Ok(stmts) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Block { - fn parse(input: ParseStream) -> Result<Self> { - let content; - Ok(Block { - brace_token: braced!(content in input), - stmts: content.call(Block::parse_within)?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Stmt { - fn parse(input: ParseStream) -> Result<Self> { - let allow_nosemi = AllowNoSemi(false); - parse_stmt(input, allow_nosemi) - } - } - - fn parse_stmt(input: ParseStream, allow_nosemi: AllowNoSemi) -> Result<Stmt> { - let begin = input.fork(); - let attrs = input.call(Attribute::parse_outer)?; - - // brace-style macros; paren and bracket macros get parsed as - // expression statements. - let ahead = input.fork(); - let mut is_item_macro = false; - if let Ok(path) = ahead.call(Path::parse_mod_style) { - if ahead.peek(Token![!]) { - if ahead.peek2(Ident) || ahead.peek2(Token![try]) { - is_item_macro = true; - } else if ahead.peek2(token::Brace) - && !(ahead.peek3(Token![.]) && !ahead.peek3(Token![..]) - || ahead.peek3(Token![?])) - { - input.advance_to(&ahead); - return stmt_mac(input, attrs, path).map(Stmt::Macro); - } - } - } - - if input.peek(Token![let]) && !input.peek(token::Group) { - stmt_local(input, attrs).map(Stmt::Local) - } else if input.peek(Token![pub]) - || input.peek(Token![crate]) && !input.peek2(Token![::]) - || input.peek(Token![extern]) - || input.peek(Token![use]) - || input.peek(Token![static]) - && (input.peek2(Token![mut]) - || input.peek2(Ident) - && !(input.peek2(Token![async]) - && (input.peek3(Token![move]) || input.peek3(Token![|])))) - || input.peek(Token![const]) - && !(input.peek2(token::Brace) - || input.peek2(Token![static]) - || input.peek2(Token![async]) - && !(input.peek3(Token![unsafe]) - || input.peek3(Token![extern]) - || input.peek3(Token![fn])) - || input.peek2(Token![move]) - || input.peek2(Token![|])) - || input.peek(Token![unsafe]) && !input.peek2(token::Brace) - || input.peek(Token![async]) - && (input.peek2(Token![unsafe]) - || input.peek2(Token![extern]) - || input.peek2(Token![fn])) - || input.peek(Token![fn]) - || input.peek(Token![mod]) - || input.peek(Token![type]) - || input.peek(Token![struct]) - || input.peek(Token![enum]) - || input.peek(Token![union]) && input.peek2(Ident) - || input.peek(Token![auto]) && input.peek2(Token![trait]) - || input.peek(Token![trait]) - || input.peek(Token![default]) - && (input.peek2(Token![unsafe]) || input.peek2(Token![impl])) - || input.peek(Token![impl]) - || input.peek(Token![macro]) - || is_item_macro - { - let item = item::parsing::parse_rest_of_item(begin, attrs, input)?; - Ok(Stmt::Item(item)) - } else { - stmt_expr(input, allow_nosemi, attrs) - } - } - - fn stmt_mac(input: ParseStream, attrs: Vec<Attribute>, path: Path) -> Result<StmtMacro> { - let bang_token: Token![!] = input.parse()?; - let (delimiter, tokens) = mac::parse_delimiter(input)?; - let semi_token: Option<Token![;]> = input.parse()?; - - Ok(StmtMacro { - attrs, - mac: Macro { - path, - bang_token, - delimiter, - tokens, - }, - semi_token, - }) - } - - fn stmt_local(input: ParseStream, attrs: Vec<Attribute>) -> Result<Local> { - let let_token: Token![let] = input.parse()?; - - let mut pat = Pat::parse_single(input)?; - if input.peek(Token![:]) { - let colon_token: Token![:] = input.parse()?; - let ty: Type = input.parse()?; - pat = Pat::Type(PatType { - attrs: Vec::new(), - pat: Box::new(pat), - colon_token, - ty: Box::new(ty), - }); - } - - let init = if let Some(eq_token) = input.parse()? { - let eq_token: Token![=] = eq_token; - let expr: Expr = input.parse()?; - - let diverge = if !classify::expr_trailing_brace(&expr) && input.peek(Token![else]) { - let else_token: Token![else] = input.parse()?; - let diverge = ExprBlock { - attrs: Vec::new(), - label: None, - block: input.parse()?, - }; - Some((else_token, Box::new(Expr::Block(diverge)))) - } else { - None - }; - - Some(LocalInit { - eq_token, - expr: Box::new(expr), - diverge, - }) - } else { - None - }; - - let semi_token: Token![;] = input.parse()?; - - Ok(Local { - attrs, - let_token, - pat, - init, - semi_token, - }) - } - - fn stmt_expr( - input: ParseStream, - allow_nosemi: AllowNoSemi, - mut attrs: Vec<Attribute>, - ) -> Result<Stmt> { - let mut e = Expr::parse_with_earlier_boundary_rule(input)?; - - let mut attr_target = &mut e; - loop { - attr_target = match attr_target { - Expr::Assign(e) => &mut e.left, - Expr::Binary(e) => &mut e.left, - Expr::Cast(e) => &mut e.expr, - Expr::Array(_) - | Expr::Async(_) - | Expr::Await(_) - | Expr::Block(_) - | Expr::Break(_) - | Expr::Call(_) - | Expr::Closure(_) - | Expr::Const(_) - | Expr::Continue(_) - | Expr::Field(_) - | Expr::ForLoop(_) - | Expr::Group(_) - | Expr::If(_) - | Expr::Index(_) - | Expr::Infer(_) - | Expr::Let(_) - | Expr::Lit(_) - | Expr::Loop(_) - | Expr::Macro(_) - | Expr::Match(_) - | Expr::MethodCall(_) - | Expr::Paren(_) - | Expr::Path(_) - | Expr::Range(_) - | Expr::RawAddr(_) - | Expr::Reference(_) - | Expr::Repeat(_) - | Expr::Return(_) - | Expr::Struct(_) - | Expr::Try(_) - | Expr::TryBlock(_) - | Expr::Tuple(_) - | Expr::Unary(_) - | Expr::Unsafe(_) - | Expr::While(_) - | Expr::Yield(_) - | Expr::Verbatim(_) => break, - }; - } - attrs.extend(attr_target.replace_attrs(Vec::new())); - attr_target.replace_attrs(attrs); - - let semi_token: Option<Token![;]> = input.parse()?; - - match e { - Expr::Macro(ExprMacro { attrs, mac }) - if semi_token.is_some() || mac.delimiter.is_brace() => - { - return Ok(Stmt::Macro(StmtMacro { - attrs, - mac, - semi_token, - })); - } - _ => {} - } - - if semi_token.is_some() { - Ok(Stmt::Expr(e, semi_token)) - } else if allow_nosemi.0 || !classify::requires_semi_to_be_stmt(&e) { - Ok(Stmt::Expr(e, None)) - } else { - Err(input.error("expected semicolon")) - } - } -} - -#[cfg(feature = "printing")] -pub(crate) mod printing { - use crate::classify; - use crate::expr::{self, Expr}; - use crate::fixup::FixupContext; - use crate::stmt::{Block, Local, Stmt, StmtMacro}; - use crate::token; - use proc_macro2::TokenStream; - use quote::{ToTokens, TokenStreamExt as _}; - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Block { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.brace_token.surround(tokens, |tokens| { - tokens.append_all(&self.stmts); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Stmt { - fn to_tokens(&self, tokens: &mut TokenStream) { - match self { - Stmt::Local(local) => local.to_tokens(tokens), - Stmt::Item(item) => item.to_tokens(tokens), - Stmt::Expr(expr, semi) => { - expr::printing::print_expr(expr, tokens, FixupContext::new_stmt()); - semi.to_tokens(tokens); - } - Stmt::Macro(mac) => mac.to_tokens(tokens), - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Local { - fn to_tokens(&self, tokens: &mut TokenStream) { - expr::printing::outer_attrs_to_tokens(&self.attrs, tokens); - self.let_token.to_tokens(tokens); - self.pat.to_tokens(tokens); - if let Some(init) = &self.init { - init.eq_token.to_tokens(tokens); - expr::printing::print_subexpression( - &init.expr, - init.diverge.is_some() && classify::expr_trailing_brace(&init.expr), - tokens, - FixupContext::NONE, - ); - if let Some((else_token, diverge)) = &init.diverge { - else_token.to_tokens(tokens); - match &**diverge { - Expr::Block(diverge) => diverge.to_tokens(tokens), - _ => token::Brace::default().surround(tokens, |tokens| { - expr::printing::print_expr(diverge, tokens, FixupContext::new_stmt()); - }), - } - } - } - self.semi_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for StmtMacro { - fn to_tokens(&self, tokens: &mut TokenStream) { - expr::printing::outer_attrs_to_tokens(&self.attrs, tokens); - self.mac.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } -} diff --git a/vendor/syn/src/thread.rs b/vendor/syn/src/thread.rs deleted file mode 100644 index b33d248afc6063..00000000000000 --- a/vendor/syn/src/thread.rs +++ /dev/null @@ -1,60 +0,0 @@ -use std::fmt::{self, Debug}; -use std::thread::{self, ThreadId}; - -/// ThreadBound is a Sync-maker and Send-maker that allows accessing a value -/// of type T only from the original thread on which the ThreadBound was -/// constructed. -pub(crate) struct ThreadBound<T> { - value: T, - thread_id: ThreadId, -} - -unsafe impl<T> Sync for ThreadBound<T> {} - -// Send bound requires Copy, as otherwise Drop could run in the wrong place. -// -// Today Copy and Drop are mutually exclusive so `T: Copy` implies `T: !Drop`. -// This impl needs to be revisited if that restriction is relaxed in the future. -unsafe impl<T: Copy> Send for ThreadBound<T> {} - -impl<T> ThreadBound<T> { - pub(crate) fn new(value: T) -> Self { - ThreadBound { - value, - thread_id: thread::current().id(), - } - } - - pub(crate) fn get(&self) -> Option<&T> { - if thread::current().id() == self.thread_id { - Some(&self.value) - } else { - None - } - } -} - -impl<T: Debug> Debug for ThreadBound<T> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match self.get() { - Some(value) => Debug::fmt(value, formatter), - None => formatter.write_str("unknown"), - } - } -} - -// Copy the bytes of T, even if the currently running thread is the "wrong" -// thread. This is fine as long as the original thread is not simultaneously -// mutating this value via interior mutability, which would be a data race. -// -// Currently `T: Copy` is sufficient to guarantee that T contains no interior -// mutability, because _all_ interior mutability in Rust is built on -// std::cell::UnsafeCell, which has no Copy impl. This impl needs to be -// revisited if that restriction is relaxed in the future. -impl<T: Copy> Copy for ThreadBound<T> {} - -impl<T: Copy> Clone for ThreadBound<T> { - fn clone(&self) -> Self { - *self - } -} diff --git a/vendor/syn/src/token.rs b/vendor/syn/src/token.rs deleted file mode 100644 index 52321fc6c70611..00000000000000 --- a/vendor/syn/src/token.rs +++ /dev/null @@ -1,1093 +0,0 @@ -//! Tokens representing Rust punctuation, keywords, and delimiters. -//! -//! The type names in this module can be difficult to keep straight, so we -//! prefer to use the [`Token!`] macro instead. This is a type-macro that -//! expands to the token type of the given token. -//! -//! [`Token!`]: crate::Token -//! -//! # Example -//! -//! The [`ItemStatic`] syntax tree node is defined like this. -//! -//! [`ItemStatic`]: crate::ItemStatic -//! -//! ``` -//! # use syn::{Attribute, Expr, Ident, Token, Type, Visibility}; -//! # -//! pub struct ItemStatic { -//! pub attrs: Vec<Attribute>, -//! pub vis: Visibility, -//! pub static_token: Token![static], -//! pub mutability: Option<Token![mut]>, -//! pub ident: Ident, -//! pub colon_token: Token![:], -//! pub ty: Box<Type>, -//! pub eq_token: Token![=], -//! pub expr: Box<Expr>, -//! pub semi_token: Token![;], -//! } -//! ``` -//! -//! # Parsing -//! -//! Keywords and punctuation can be parsed through the [`ParseStream::parse`] -//! method. Delimiter tokens are parsed using the [`parenthesized!`], -//! [`bracketed!`] and [`braced!`] macros. -//! -//! [`ParseStream::parse`]: crate::parse::ParseBuffer::parse() -//! [`parenthesized!`]: crate::parenthesized! -//! [`bracketed!`]: crate::bracketed! -//! [`braced!`]: crate::braced! -//! -//! ``` -//! use syn::{Attribute, Result}; -//! use syn::parse::{Parse, ParseStream}; -//! # -//! # enum ItemStatic {} -//! -//! // Parse the ItemStatic struct shown above. -//! impl Parse for ItemStatic { -//! fn parse(input: ParseStream) -> Result<Self> { -//! # use syn::ItemStatic; -//! # fn parse(input: ParseStream) -> Result<ItemStatic> { -//! Ok(ItemStatic { -//! attrs: input.call(Attribute::parse_outer)?, -//! vis: input.parse()?, -//! static_token: input.parse()?, -//! mutability: input.parse()?, -//! ident: input.parse()?, -//! colon_token: input.parse()?, -//! ty: input.parse()?, -//! eq_token: input.parse()?, -//! expr: input.parse()?, -//! semi_token: input.parse()?, -//! }) -//! # } -//! # unimplemented!() -//! } -//! } -//! ``` -//! -//! # Other operations -//! -//! Every keyword and punctuation token supports the following operations. -//! -//! - [Peeking] — `input.peek(Token![...])` -//! -//! - [Parsing] — `input.parse::<Token![...]>()?` -//! -//! - [Printing] — `quote!( ... #the_token ... )` -//! -//! - Construction from a [`Span`] — `let the_token = Token![...](sp)` -//! -//! - Field access to its span — `let sp = the_token.span` -//! -//! [Peeking]: crate::parse::ParseBuffer::peek() -//! [Parsing]: crate::parse::ParseBuffer::parse() -//! [Printing]: https://docs.rs/quote/1.0/quote/trait.ToTokens.html -//! [`Span`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.Span.html - -#[cfg(feature = "parsing")] -pub(crate) use self::private::CustomToken; -use self::private::WithSpan; -#[cfg(feature = "parsing")] -use crate::buffer::Cursor; -#[cfg(feature = "parsing")] -use crate::error::Result; -#[cfg(feature = "parsing")] -use crate::lifetime::Lifetime; -#[cfg(feature = "parsing")] -use crate::parse::{Parse, ParseStream}; -use crate::span::IntoSpans; -use proc_macro2::extra::DelimSpan; -use proc_macro2::Span; -#[cfg(feature = "printing")] -use proc_macro2::TokenStream; -#[cfg(any(feature = "parsing", feature = "printing"))] -use proc_macro2::{Delimiter, Ident}; -#[cfg(feature = "parsing")] -use proc_macro2::{Literal, Punct, TokenTree}; -#[cfg(feature = "printing")] -use quote::{ToTokens, TokenStreamExt as _}; -#[cfg(feature = "extra-traits")] -use std::cmp; -#[cfg(feature = "extra-traits")] -use std::fmt::{self, Debug}; -#[cfg(feature = "extra-traits")] -use std::hash::{Hash, Hasher}; -use std::ops::{Deref, DerefMut}; - -/// Marker trait for types that represent single tokens. -/// -/// This trait is sealed and cannot be implemented for types outside of Syn. -#[cfg(feature = "parsing")] -pub trait Token: private::Sealed { - // Not public API. - #[doc(hidden)] - fn peek(cursor: Cursor) -> bool; - - // Not public API. - #[doc(hidden)] - fn display() -> &'static str; -} - -pub(crate) mod private { - #[cfg(feature = "parsing")] - use crate::buffer::Cursor; - use proc_macro2::Span; - - #[cfg(feature = "parsing")] - pub trait Sealed {} - - /// Support writing `token.span` rather than `token.spans[0]` on tokens that - /// hold a single span. - #[repr(transparent)] - #[allow(unknown_lints, repr_transparent_non_zst_fields)] // False positive: https://github.com/rust-lang/rust/issues/115922 - pub struct WithSpan { - pub span: Span, - } - - // Not public API. - #[doc(hidden)] - #[cfg(feature = "parsing")] - pub trait CustomToken { - fn peek(cursor: Cursor) -> bool; - fn display() -> &'static str; - } -} - -#[cfg(feature = "parsing")] -impl private::Sealed for Ident {} - -macro_rules! impl_low_level_token { - ($display:literal $($path:ident)::+ $get:ident) => { - #[cfg(feature = "parsing")] - impl Token for $($path)::+ { - fn peek(cursor: Cursor) -> bool { - cursor.$get().is_some() - } - - fn display() -> &'static str { - $display - } - } - - #[cfg(feature = "parsing")] - impl private::Sealed for $($path)::+ {} - }; -} - -impl_low_level_token!("punctuation token" Punct punct); -impl_low_level_token!("literal" Literal literal); -impl_low_level_token!("token" TokenTree token_tree); -impl_low_level_token!("group token" proc_macro2::Group any_group); -impl_low_level_token!("lifetime" Lifetime lifetime); - -#[cfg(feature = "parsing")] -impl<T: CustomToken> private::Sealed for T {} - -#[cfg(feature = "parsing")] -impl<T: CustomToken> Token for T { - fn peek(cursor: Cursor) -> bool { - <Self as CustomToken>::peek(cursor) - } - - fn display() -> &'static str { - <Self as CustomToken>::display() - } -} - -macro_rules! define_keywords { - ($($token:literal pub struct $name:ident)*) => { - $( - #[doc = concat!('`', $token, '`')] - /// - /// Don't try to remember the name of this type — use the - /// [`Token!`] macro instead. - /// - /// [`Token!`]: crate::token - pub struct $name { - pub span: Span, - } - - #[doc(hidden)] - #[allow(non_snake_case)] - pub fn $name<S: IntoSpans<Span>>(span: S) -> $name { - $name { - span: span.into_spans(), - } - } - - impl std::default::Default for $name { - fn default() -> Self { - $name { - span: Span::call_site(), - } - } - } - - #[cfg(feature = "clone-impls")] - #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] - impl Copy for $name {} - - #[cfg(feature = "clone-impls")] - #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] - impl Clone for $name { - fn clone(&self) -> Self { - *self - } - } - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl Debug for $name { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(stringify!($name)) - } - } - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl cmp::Eq for $name {} - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl PartialEq for $name { - fn eq(&self, _other: &$name) -> bool { - true - } - } - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl Hash for $name { - fn hash<H: Hasher>(&self, _state: &mut H) {} - } - - #[cfg(feature = "printing")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for $name { - fn to_tokens(&self, tokens: &mut TokenStream) { - printing::keyword($token, self.span, tokens); - } - } - - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for $name { - fn parse(input: ParseStream) -> Result<Self> { - Ok($name { - span: parsing::keyword(input, $token)?, - }) - } - } - - #[cfg(feature = "parsing")] - impl Token for $name { - fn peek(cursor: Cursor) -> bool { - parsing::peek_keyword(cursor, $token) - } - - fn display() -> &'static str { - concat!("`", $token, "`") - } - } - - #[cfg(feature = "parsing")] - impl private::Sealed for $name {} - )* - }; -} - -macro_rules! impl_deref_if_len_is_1 { - ($name:ident/1) => { - impl Deref for $name { - type Target = WithSpan; - - fn deref(&self) -> &Self::Target { - unsafe { &*(self as *const Self).cast::<WithSpan>() } - } - } - - impl DerefMut for $name { - fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut Self).cast::<WithSpan>() } - } - } - }; - - ($name:ident/$len:literal) => {}; -} - -macro_rules! define_punctuation_structs { - ($($token:literal pub struct $name:ident/$len:tt #[doc = $usage:literal])*) => { - $( - #[cfg_attr(not(doc), repr(transparent))] - #[allow(unknown_lints, repr_transparent_non_zst_fields)] // False positive: https://github.com/rust-lang/rust/issues/115922 - #[doc = concat!('`', $token, '`')] - /// - /// Usage: - #[doc = concat!($usage, '.')] - /// - /// Don't try to remember the name of this type — use the - /// [`Token!`] macro instead. - /// - /// [`Token!`]: crate::token - pub struct $name { - pub spans: [Span; $len], - } - - #[doc(hidden)] - #[allow(non_snake_case)] - pub fn $name<S: IntoSpans<[Span; $len]>>(spans: S) -> $name { - $name { - spans: spans.into_spans(), - } - } - - impl std::default::Default for $name { - fn default() -> Self { - $name { - spans: [Span::call_site(); $len], - } - } - } - - #[cfg(feature = "clone-impls")] - #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] - impl Copy for $name {} - - #[cfg(feature = "clone-impls")] - #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] - impl Clone for $name { - fn clone(&self) -> Self { - *self - } - } - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl Debug for $name { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(stringify!($name)) - } - } - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl cmp::Eq for $name {} - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl PartialEq for $name { - fn eq(&self, _other: &$name) -> bool { - true - } - } - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl Hash for $name { - fn hash<H: Hasher>(&self, _state: &mut H) {} - } - - impl_deref_if_len_is_1!($name/$len); - )* - }; -} - -macro_rules! define_punctuation { - ($($token:literal pub struct $name:ident/$len:tt #[doc = $usage:literal])*) => { - $( - define_punctuation_structs! { - $token pub struct $name/$len #[doc = $usage] - } - - #[cfg(feature = "printing")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for $name { - fn to_tokens(&self, tokens: &mut TokenStream) { - printing::punct($token, &self.spans, tokens); - } - } - - #[cfg(feature = "parsing")] - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for $name { - fn parse(input: ParseStream) -> Result<Self> { - Ok($name { - spans: parsing::punct(input, $token)?, - }) - } - } - - #[cfg(feature = "parsing")] - impl Token for $name { - fn peek(cursor: Cursor) -> bool { - parsing::peek_punct(cursor, $token) - } - - fn display() -> &'static str { - concat!("`", $token, "`") - } - } - - #[cfg(feature = "parsing")] - impl private::Sealed for $name {} - )* - }; -} - -macro_rules! define_delimiters { - ($($delim:ident pub struct $name:ident #[$doc:meta])*) => { - $( - #[$doc] - pub struct $name { - pub span: DelimSpan, - } - - #[doc(hidden)] - #[allow(non_snake_case)] - pub fn $name<S: IntoSpans<DelimSpan>>(span: S) -> $name { - $name { - span: span.into_spans(), - } - } - - impl std::default::Default for $name { - fn default() -> Self { - $name(Span::call_site()) - } - } - - #[cfg(feature = "clone-impls")] - #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] - impl Copy for $name {} - - #[cfg(feature = "clone-impls")] - #[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] - impl Clone for $name { - fn clone(&self) -> Self { - *self - } - } - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl Debug for $name { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(stringify!($name)) - } - } - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl cmp::Eq for $name {} - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl PartialEq for $name { - fn eq(&self, _other: &$name) -> bool { - true - } - } - - #[cfg(feature = "extra-traits")] - #[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] - impl Hash for $name { - fn hash<H: Hasher>(&self, _state: &mut H) {} - } - - impl $name { - #[cfg(feature = "printing")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - pub fn surround<F>(&self, tokens: &mut TokenStream, f: F) - where - F: FnOnce(&mut TokenStream), - { - let mut inner = TokenStream::new(); - f(&mut inner); - printing::delim(Delimiter::$delim, self.span.join(), tokens, inner); - } - } - - #[cfg(feature = "parsing")] - impl private::Sealed for $name {} - )* - }; -} - -define_punctuation_structs! { - "_" pub struct Underscore/1 /// wildcard patterns, inferred types, unnamed items in constants, extern crates, use declarations, and destructuring assignment -} - -#[cfg(feature = "printing")] -#[cfg_attr(docsrs, doc(cfg(feature = "printing")))] -impl ToTokens for Underscore { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Ident::new("_", self.span)); - } -} - -#[cfg(feature = "parsing")] -#[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] -impl Parse for Underscore { - fn parse(input: ParseStream) -> Result<Self> { - input.step(|cursor| { - if let Some((ident, rest)) = cursor.ident() { - if ident == "_" { - return Ok((Underscore(ident.span()), rest)); - } - } - if let Some((punct, rest)) = cursor.punct() { - if punct.as_char() == '_' { - return Ok((Underscore(punct.span()), rest)); - } - } - Err(cursor.error("expected `_`")) - }) - } -} - -#[cfg(feature = "parsing")] -impl Token for Underscore { - fn peek(cursor: Cursor) -> bool { - if let Some((ident, _rest)) = cursor.ident() { - return ident == "_"; - } - if let Some((punct, _rest)) = cursor.punct() { - return punct.as_char() == '_'; - } - false - } - - fn display() -> &'static str { - "`_`" - } -} - -#[cfg(feature = "parsing")] -impl private::Sealed for Underscore {} - -/// None-delimited group -pub struct Group { - pub span: Span, -} - -#[doc(hidden)] -#[allow(non_snake_case)] -pub fn Group<S: IntoSpans<Span>>(span: S) -> Group { - Group { - span: span.into_spans(), - } -} - -impl std::default::Default for Group { - fn default() -> Self { - Group { - span: Span::call_site(), - } - } -} - -#[cfg(feature = "clone-impls")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Copy for Group {} - -#[cfg(feature = "clone-impls")] -#[cfg_attr(docsrs, doc(cfg(feature = "clone-impls")))] -impl Clone for Group { - fn clone(&self) -> Self { - *self - } -} - -#[cfg(feature = "extra-traits")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Debug for Group { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("Group") - } -} - -#[cfg(feature = "extra-traits")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl cmp::Eq for Group {} - -#[cfg(feature = "extra-traits")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl PartialEq for Group { - fn eq(&self, _other: &Group) -> bool { - true - } -} - -#[cfg(feature = "extra-traits")] -#[cfg_attr(docsrs, doc(cfg(feature = "extra-traits")))] -impl Hash for Group { - fn hash<H: Hasher>(&self, _state: &mut H) {} -} - -impl Group { - #[cfg(feature = "printing")] - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - pub fn surround<F>(&self, tokens: &mut TokenStream, f: F) - where - F: FnOnce(&mut TokenStream), - { - let mut inner = TokenStream::new(); - f(&mut inner); - printing::delim(Delimiter::None, self.span, tokens, inner); - } -} - -#[cfg(feature = "parsing")] -impl private::Sealed for Group {} - -#[cfg(feature = "parsing")] -impl Token for Paren { - fn peek(cursor: Cursor) -> bool { - cursor.group(Delimiter::Parenthesis).is_some() - } - - fn display() -> &'static str { - "parentheses" - } -} - -#[cfg(feature = "parsing")] -impl Token for Brace { - fn peek(cursor: Cursor) -> bool { - cursor.group(Delimiter::Brace).is_some() - } - - fn display() -> &'static str { - "curly braces" - } -} - -#[cfg(feature = "parsing")] -impl Token for Bracket { - fn peek(cursor: Cursor) -> bool { - cursor.group(Delimiter::Bracket).is_some() - } - - fn display() -> &'static str { - "square brackets" - } -} - -#[cfg(feature = "parsing")] -impl Token for Group { - fn peek(cursor: Cursor) -> bool { - cursor.group(Delimiter::None).is_some() - } - - fn display() -> &'static str { - "invisible group" - } -} - -define_keywords! { - "abstract" pub struct Abstract - "as" pub struct As - "async" pub struct Async - "auto" pub struct Auto - "await" pub struct Await - "become" pub struct Become - "box" pub struct Box - "break" pub struct Break - "const" pub struct Const - "continue" pub struct Continue - "crate" pub struct Crate - "default" pub struct Default - "do" pub struct Do - "dyn" pub struct Dyn - "else" pub struct Else - "enum" pub struct Enum - "extern" pub struct Extern - "final" pub struct Final - "fn" pub struct Fn - "for" pub struct For - "if" pub struct If - "impl" pub struct Impl - "in" pub struct In - "let" pub struct Let - "loop" pub struct Loop - "macro" pub struct Macro - "match" pub struct Match - "mod" pub struct Mod - "move" pub struct Move - "mut" pub struct Mut - "override" pub struct Override - "priv" pub struct Priv - "pub" pub struct Pub - "raw" pub struct Raw - "ref" pub struct Ref - "return" pub struct Return - "Self" pub struct SelfType - "self" pub struct SelfValue - "static" pub struct Static - "struct" pub struct Struct - "super" pub struct Super - "trait" pub struct Trait - "try" pub struct Try - "type" pub struct Type - "typeof" pub struct Typeof - "union" pub struct Union - "unsafe" pub struct Unsafe - "unsized" pub struct Unsized - "use" pub struct Use - "virtual" pub struct Virtual - "where" pub struct Where - "while" pub struct While - "yield" pub struct Yield -} - -define_punctuation! { - "&" pub struct And/1 /// bitwise and logical AND, borrow, references, reference patterns - "&&" pub struct AndAnd/2 /// lazy AND, borrow, references, reference patterns - "&=" pub struct AndEq/2 /// bitwise AND assignment - "@" pub struct At/1 /// subpattern binding - "^" pub struct Caret/1 /// bitwise and logical XOR - "^=" pub struct CaretEq/2 /// bitwise XOR assignment - ":" pub struct Colon/1 /// various separators - "," pub struct Comma/1 /// various separators - "$" pub struct Dollar/1 /// macros - "." pub struct Dot/1 /// field access, tuple index - ".." pub struct DotDot/2 /// range, struct expressions, patterns, range patterns - "..." pub struct DotDotDot/3 /// variadic functions, range patterns - "..=" pub struct DotDotEq/3 /// inclusive range, range patterns - "=" pub struct Eq/1 /// assignment, attributes, various type definitions - "==" pub struct EqEq/2 /// equal - "=>" pub struct FatArrow/2 /// match arms, macros - ">=" pub struct Ge/2 /// greater than or equal to, generics - ">" pub struct Gt/1 /// greater than, generics, paths - "<-" pub struct LArrow/2 /// unused - "<=" pub struct Le/2 /// less than or equal to - "<" pub struct Lt/1 /// less than, generics, paths - "-" pub struct Minus/1 /// subtraction, negation - "-=" pub struct MinusEq/2 /// subtraction assignment - "!=" pub struct Ne/2 /// not equal - "!" pub struct Not/1 /// bitwise and logical NOT, macro calls, inner attributes, never type, negative impls - "|" pub struct Or/1 /// bitwise and logical OR, closures, patterns in match, if let, and while let - "|=" pub struct OrEq/2 /// bitwise OR assignment - "||" pub struct OrOr/2 /// lazy OR, closures - "::" pub struct PathSep/2 /// path separator - "%" pub struct Percent/1 /// remainder - "%=" pub struct PercentEq/2 /// remainder assignment - "+" pub struct Plus/1 /// addition, trait bounds, macro Kleene matcher - "+=" pub struct PlusEq/2 /// addition assignment - "#" pub struct Pound/1 /// attributes - "?" pub struct Question/1 /// question mark operator, questionably sized, macro Kleene matcher - "->" pub struct RArrow/2 /// function return type, closure return type, function pointer type - ";" pub struct Semi/1 /// terminator for various items and statements, array types - "<<" pub struct Shl/2 /// shift left, nested generics - "<<=" pub struct ShlEq/3 /// shift left assignment - ">>" pub struct Shr/2 /// shift right, nested generics - ">>=" pub struct ShrEq/3 /// shift right assignment, nested generics - "/" pub struct Slash/1 /// division - "/=" pub struct SlashEq/2 /// division assignment - "*" pub struct Star/1 /// multiplication, dereference, raw pointers, macro Kleene matcher, use wildcards - "*=" pub struct StarEq/2 /// multiplication assignment - "~" pub struct Tilde/1 /// unused since before Rust 1.0 -} - -define_delimiters! { - Brace pub struct Brace /// `{`…`}` - Bracket pub struct Bracket /// `[`…`]` - Parenthesis pub struct Paren /// `(`…`)` -} - -/// A type-macro that expands to the name of the Rust type representation of a -/// given token. -/// -/// As a type, `Token!` is commonly used in the type of struct fields, the type -/// of a `let` statement, or in turbofish for a `parse` function. -/// -/// ``` -/// use syn::{Ident, Token}; -/// use syn::parse::{Parse, ParseStream, Result}; -/// -/// // `struct Foo;` -/// pub struct UnitStruct { -/// struct_token: Token![struct], -/// ident: Ident, -/// semi_token: Token![;], -/// } -/// -/// impl Parse for UnitStruct { -/// fn parse(input: ParseStream) -> Result<Self> { -/// let struct_token: Token![struct] = input.parse()?; -/// let ident: Ident = input.parse()?; -/// let semi_token = input.parse::<Token![;]>()?; -/// Ok(UnitStruct { struct_token, ident, semi_token }) -/// } -/// } -/// ``` -/// -/// As an expression, `Token!` is used for peeking tokens or instantiating -/// tokens from a span. -/// -/// ``` -/// # use syn::{Ident, Token}; -/// # use syn::parse::{Parse, ParseStream, Result}; -/// # -/// # struct UnitStruct { -/// # struct_token: Token![struct], -/// # ident: Ident, -/// # semi_token: Token![;], -/// # } -/// # -/// # impl Parse for UnitStruct { -/// # fn parse(input: ParseStream) -> Result<Self> { -/// # unimplemented!() -/// # } -/// # } -/// # -/// fn make_unit_struct(name: Ident) -> UnitStruct { -/// let span = name.span(); -/// UnitStruct { -/// struct_token: Token![struct](span), -/// ident: name, -/// semi_token: Token![;](span), -/// } -/// } -/// -/// # fn parse(input: ParseStream) -> Result<()> { -/// if input.peek(Token![struct]) { -/// let unit_struct: UnitStruct = input.parse()?; -/// /* ... */ -/// } -/// # Ok(()) -/// # } -/// ``` -/// -/// See the [token module] documentation for details and examples. -/// -/// [token module]: crate::token -#[macro_export] -macro_rules! Token { - [abstract] => { $crate::token::Abstract }; - [as] => { $crate::token::As }; - [async] => { $crate::token::Async }; - [auto] => { $crate::token::Auto }; - [await] => { $crate::token::Await }; - [become] => { $crate::token::Become }; - [box] => { $crate::token::Box }; - [break] => { $crate::token::Break }; - [const] => { $crate::token::Const }; - [continue] => { $crate::token::Continue }; - [crate] => { $crate::token::Crate }; - [default] => { $crate::token::Default }; - [do] => { $crate::token::Do }; - [dyn] => { $crate::token::Dyn }; - [else] => { $crate::token::Else }; - [enum] => { $crate::token::Enum }; - [extern] => { $crate::token::Extern }; - [final] => { $crate::token::Final }; - [fn] => { $crate::token::Fn }; - [for] => { $crate::token::For }; - [if] => { $crate::token::If }; - [impl] => { $crate::token::Impl }; - [in] => { $crate::token::In }; - [let] => { $crate::token::Let }; - [loop] => { $crate::token::Loop }; - [macro] => { $crate::token::Macro }; - [match] => { $crate::token::Match }; - [mod] => { $crate::token::Mod }; - [move] => { $crate::token::Move }; - [mut] => { $crate::token::Mut }; - [override] => { $crate::token::Override }; - [priv] => { $crate::token::Priv }; - [pub] => { $crate::token::Pub }; - [raw] => { $crate::token::Raw }; - [ref] => { $crate::token::Ref }; - [return] => { $crate::token::Return }; - [Self] => { $crate::token::SelfType }; - [self] => { $crate::token::SelfValue }; - [static] => { $crate::token::Static }; - [struct] => { $crate::token::Struct }; - [super] => { $crate::token::Super }; - [trait] => { $crate::token::Trait }; - [try] => { $crate::token::Try }; - [type] => { $crate::token::Type }; - [typeof] => { $crate::token::Typeof }; - [union] => { $crate::token::Union }; - [unsafe] => { $crate::token::Unsafe }; - [unsized] => { $crate::token::Unsized }; - [use] => { $crate::token::Use }; - [virtual] => { $crate::token::Virtual }; - [where] => { $crate::token::Where }; - [while] => { $crate::token::While }; - [yield] => { $crate::token::Yield }; - [&] => { $crate::token::And }; - [&&] => { $crate::token::AndAnd }; - [&=] => { $crate::token::AndEq }; - [@] => { $crate::token::At }; - [^] => { $crate::token::Caret }; - [^=] => { $crate::token::CaretEq }; - [:] => { $crate::token::Colon }; - [,] => { $crate::token::Comma }; - [$] => { $crate::token::Dollar }; - [.] => { $crate::token::Dot }; - [..] => { $crate::token::DotDot }; - [...] => { $crate::token::DotDotDot }; - [..=] => { $crate::token::DotDotEq }; - [=] => { $crate::token::Eq }; - [==] => { $crate::token::EqEq }; - [=>] => { $crate::token::FatArrow }; - [>=] => { $crate::token::Ge }; - [>] => { $crate::token::Gt }; - [<-] => { $crate::token::LArrow }; - [<=] => { $crate::token::Le }; - [<] => { $crate::token::Lt }; - [-] => { $crate::token::Minus }; - [-=] => { $crate::token::MinusEq }; - [!=] => { $crate::token::Ne }; - [!] => { $crate::token::Not }; - [|] => { $crate::token::Or }; - [|=] => { $crate::token::OrEq }; - [||] => { $crate::token::OrOr }; - [::] => { $crate::token::PathSep }; - [%] => { $crate::token::Percent }; - [%=] => { $crate::token::PercentEq }; - [+] => { $crate::token::Plus }; - [+=] => { $crate::token::PlusEq }; - [#] => { $crate::token::Pound }; - [?] => { $crate::token::Question }; - [->] => { $crate::token::RArrow }; - [;] => { $crate::token::Semi }; - [<<] => { $crate::token::Shl }; - [<<=] => { $crate::token::ShlEq }; - [>>] => { $crate::token::Shr }; - [>>=] => { $crate::token::ShrEq }; - [/] => { $crate::token::Slash }; - [/=] => { $crate::token::SlashEq }; - [*] => { $crate::token::Star }; - [*=] => { $crate::token::StarEq }; - [~] => { $crate::token::Tilde }; - [_] => { $crate::token::Underscore }; -} - -// Not public API. -#[doc(hidden)] -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::buffer::Cursor; - use crate::error::{Error, Result}; - use crate::parse::ParseStream; - use proc_macro2::{Spacing, Span}; - - pub(crate) fn keyword(input: ParseStream, token: &str) -> Result<Span> { - input.step(|cursor| { - if let Some((ident, rest)) = cursor.ident() { - if ident == token { - return Ok((ident.span(), rest)); - } - } - Err(cursor.error(format!("expected `{}`", token))) - }) - } - - pub(crate) fn peek_keyword(cursor: Cursor, token: &str) -> bool { - if let Some((ident, _rest)) = cursor.ident() { - ident == token - } else { - false - } - } - - #[doc(hidden)] - pub fn punct<const N: usize>(input: ParseStream, token: &str) -> Result<[Span; N]> { - let mut spans = [input.span(); N]; - punct_helper(input, token, &mut spans)?; - Ok(spans) - } - - fn punct_helper(input: ParseStream, token: &str, spans: &mut [Span]) -> Result<()> { - input.step(|cursor| { - let mut cursor = *cursor; - assert_eq!(token.len(), spans.len()); - - for (i, ch) in token.chars().enumerate() { - match cursor.punct() { - Some((punct, rest)) => { - spans[i] = punct.span(); - if punct.as_char() != ch { - break; - } else if i == token.len() - 1 { - return Ok(((), rest)); - } else if punct.spacing() != Spacing::Joint { - break; - } - cursor = rest; - } - None => break, - } - } - - Err(Error::new(spans[0], format!("expected `{}`", token))) - }) - } - - #[doc(hidden)] - pub fn peek_punct(mut cursor: Cursor, token: &str) -> bool { - for (i, ch) in token.chars().enumerate() { - match cursor.punct() { - Some((punct, rest)) => { - if punct.as_char() != ch { - break; - } else if i == token.len() - 1 { - return true; - } else if punct.spacing() != Spacing::Joint { - break; - } - cursor = rest; - } - None => break, - } - } - false - } -} - -// Not public API. -#[doc(hidden)] -#[cfg(feature = "printing")] -pub(crate) mod printing { - use crate::ext::PunctExt as _; - use proc_macro2::{Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream}; - use quote::TokenStreamExt as _; - - #[doc(hidden)] - pub fn punct(s: &str, spans: &[Span], tokens: &mut TokenStream) { - assert_eq!(s.len(), spans.len()); - - let mut chars = s.chars(); - let mut spans = spans.iter(); - let ch = chars.next_back().unwrap(); - let span = spans.next_back().unwrap(); - for (ch, span) in chars.zip(spans) { - tokens.append(Punct::new_spanned(ch, Spacing::Joint, *span)); - } - - tokens.append(Punct::new_spanned(ch, Spacing::Alone, *span)); - } - - pub(crate) fn keyword(s: &str, span: Span, tokens: &mut TokenStream) { - tokens.append(Ident::new(s, span)); - } - - pub(crate) fn delim( - delim: Delimiter, - span: Span, - tokens: &mut TokenStream, - inner: TokenStream, - ) { - let mut g = Group::new(delim, inner); - g.set_span(span); - tokens.append(g); - } -} diff --git a/vendor/syn/src/tt.rs b/vendor/syn/src/tt.rs deleted file mode 100644 index 2a9843e1a1c8ad..00000000000000 --- a/vendor/syn/src/tt.rs +++ /dev/null @@ -1,96 +0,0 @@ -use proc_macro2::{Delimiter, Spacing, TokenStream, TokenTree}; -use std::hash::{Hash, Hasher}; - -pub(crate) struct TokenTreeHelper<'a>(pub &'a TokenTree); - -impl<'a> PartialEq for TokenTreeHelper<'a> { - fn eq(&self, other: &Self) -> bool { - match (self.0, other.0) { - (TokenTree::Group(g1), TokenTree::Group(g2)) => { - match (g1.delimiter(), g2.delimiter()) { - (Delimiter::Parenthesis, Delimiter::Parenthesis) - | (Delimiter::Brace, Delimiter::Brace) - | (Delimiter::Bracket, Delimiter::Bracket) - | (Delimiter::None, Delimiter::None) => {} - _ => return false, - } - - TokenStreamHelper(&g1.stream()) == TokenStreamHelper(&g2.stream()) - } - (TokenTree::Punct(o1), TokenTree::Punct(o2)) => { - o1.as_char() == o2.as_char() - && match (o1.spacing(), o2.spacing()) { - (Spacing::Alone, Spacing::Alone) | (Spacing::Joint, Spacing::Joint) => true, - _ => false, - } - } - (TokenTree::Literal(l1), TokenTree::Literal(l2)) => l1.to_string() == l2.to_string(), - (TokenTree::Ident(s1), TokenTree::Ident(s2)) => s1 == s2, - _ => false, - } - } -} - -impl<'a> Hash for TokenTreeHelper<'a> { - fn hash<H: Hasher>(&self, h: &mut H) { - match self.0 { - TokenTree::Group(g) => { - 0u8.hash(h); - match g.delimiter() { - Delimiter::Parenthesis => 0u8.hash(h), - Delimiter::Brace => 1u8.hash(h), - Delimiter::Bracket => 2u8.hash(h), - Delimiter::None => 3u8.hash(h), - } - - for item in g.stream() { - TokenTreeHelper(&item).hash(h); - } - 0xFFu8.hash(h); // terminator w/ a variant we don't normally hash - } - TokenTree::Punct(op) => { - 1u8.hash(h); - op.as_char().hash(h); - match op.spacing() { - Spacing::Alone => 0u8.hash(h), - Spacing::Joint => 1u8.hash(h), - } - } - TokenTree::Literal(lit) => (2u8, lit.to_string()).hash(h), - TokenTree::Ident(word) => (3u8, word).hash(h), - } - } -} - -pub(crate) struct TokenStreamHelper<'a>(pub &'a TokenStream); - -impl<'a> PartialEq for TokenStreamHelper<'a> { - fn eq(&self, other: &Self) -> bool { - let left = self.0.clone().into_iter(); - let mut right = other.0.clone().into_iter(); - - for item1 in left { - let item2 = match right.next() { - Some(item) => item, - None => return false, - }; - if TokenTreeHelper(&item1) != TokenTreeHelper(&item2) { - return false; - } - } - - right.next().is_none() - } -} - -impl<'a> Hash for TokenStreamHelper<'a> { - fn hash<H: Hasher>(&self, state: &mut H) { - let tokens = self.0.clone().into_iter(); - - tokens.clone().count().hash(state); - - for tt in tokens { - TokenTreeHelper(&tt).hash(state); - } - } -} diff --git a/vendor/syn/src/ty.rs b/vendor/syn/src/ty.rs deleted file mode 100644 index 5b4177f6875418..00000000000000 --- a/vendor/syn/src/ty.rs +++ /dev/null @@ -1,1271 +0,0 @@ -use crate::attr::Attribute; -use crate::expr::Expr; -use crate::generics::{BoundLifetimes, TypeParamBound}; -use crate::ident::Ident; -use crate::lifetime::Lifetime; -use crate::lit::LitStr; -use crate::mac::Macro; -use crate::path::{Path, QSelf}; -use crate::punctuated::Punctuated; -use crate::token; -use proc_macro2::TokenStream; - -ast_enum_of_structs! { - /// The possible types that a Rust value could have. - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: crate::expr::Expr#syntax-tree-enums - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - #[non_exhaustive] - pub enum Type { - /// A fixed size array type: `[T; n]`. - Array(TypeArray), - - /// A bare function type: `fn(usize) -> bool`. - BareFn(TypeBareFn), - - /// A type contained within invisible delimiters. - Group(TypeGroup), - - /// An `impl Bound1 + Bound2 + Bound3` type where `Bound` is a trait or - /// a lifetime. - ImplTrait(TypeImplTrait), - - /// Indication that a type should be inferred by the compiler: `_`. - Infer(TypeInfer), - - /// A macro in the type position. - Macro(TypeMacro), - - /// The never type: `!`. - Never(TypeNever), - - /// A parenthesized type equivalent to the inner type. - Paren(TypeParen), - - /// A path like `std::slice::Iter`, optionally qualified with a - /// self-type as in `<Vec<T> as SomeTrait>::Associated`. - Path(TypePath), - - /// A raw pointer type: `*const T` or `*mut T`. - Ptr(TypePtr), - - /// A reference type: `&'a T` or `&'a mut T`. - Reference(TypeReference), - - /// A dynamically sized slice type: `[T]`. - Slice(TypeSlice), - - /// A trait object type `dyn Bound1 + Bound2 + Bound3` where `Bound` is a - /// trait or a lifetime. - TraitObject(TypeTraitObject), - - /// A tuple type: `(A, B, C, String)`. - Tuple(TypeTuple), - - /// Tokens in type position not interpreted by Syn. - Verbatim(TokenStream), - - // For testing exhaustiveness in downstream code, use the following idiom: - // - // match ty { - // #![cfg_attr(test, deny(non_exhaustive_omitted_patterns))] - // - // Type::Array(ty) => {...} - // Type::BareFn(ty) => {...} - // ... - // Type::Verbatim(ty) => {...} - // - // _ => { /* some sane fallback */ } - // } - // - // This way we fail your tests but don't break your library when adding - // a variant. You will be notified by a test failure when a variant is - // added, so that you can add code to handle it, but your library will - // continue to compile and work for downstream users in the interim. - } -} - -ast_struct! { - /// A fixed size array type: `[T; n]`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TypeArray { - pub bracket_token: token::Bracket, - pub elem: Box<Type>, - pub semi_token: Token![;], - pub len: Expr, - } -} - -ast_struct! { - /// A bare function type: `fn(usize) -> bool`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TypeBareFn { - pub lifetimes: Option<BoundLifetimes>, - pub unsafety: Option<Token![unsafe]>, - pub abi: Option<Abi>, - pub fn_token: Token![fn], - pub paren_token: token::Paren, - pub inputs: Punctuated<BareFnArg, Token![,]>, - pub variadic: Option<BareVariadic>, - pub output: ReturnType, - } -} - -ast_struct! { - /// A type contained within invisible delimiters. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TypeGroup { - pub group_token: token::Group, - pub elem: Box<Type>, - } -} - -ast_struct! { - /// An `impl Bound1 + Bound2 + Bound3` type where `Bound` is a trait or - /// a lifetime. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TypeImplTrait { - pub impl_token: Token![impl], - pub bounds: Punctuated<TypeParamBound, Token![+]>, - } -} - -ast_struct! { - /// Indication that a type should be inferred by the compiler: `_`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TypeInfer { - pub underscore_token: Token![_], - } -} - -ast_struct! { - /// A macro in the type position. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TypeMacro { - pub mac: Macro, - } -} - -ast_struct! { - /// The never type: `!`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TypeNever { - pub bang_token: Token![!], - } -} - -ast_struct! { - /// A parenthesized type equivalent to the inner type. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TypeParen { - pub paren_token: token::Paren, - pub elem: Box<Type>, - } -} - -ast_struct! { - /// A path like `std::slice::Iter`, optionally qualified with a - /// self-type as in `<Vec<T> as SomeTrait>::Associated`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TypePath { - pub qself: Option<QSelf>, - pub path: Path, - } -} - -ast_struct! { - /// A raw pointer type: `*const T` or `*mut T`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TypePtr { - pub star_token: Token![*], - pub const_token: Option<Token![const]>, - pub mutability: Option<Token![mut]>, - pub elem: Box<Type>, - } -} - -ast_struct! { - /// A reference type: `&'a T` or `&'a mut T`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TypeReference { - pub and_token: Token![&], - pub lifetime: Option<Lifetime>, - pub mutability: Option<Token![mut]>, - pub elem: Box<Type>, - } -} - -ast_struct! { - /// A dynamically sized slice type: `[T]`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TypeSlice { - pub bracket_token: token::Bracket, - pub elem: Box<Type>, - } -} - -ast_struct! { - /// A trait object type `dyn Bound1 + Bound2 + Bound3` where `Bound` is a - /// trait or a lifetime. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TypeTraitObject { - pub dyn_token: Option<Token![dyn]>, - pub bounds: Punctuated<TypeParamBound, Token![+]>, - } -} - -ast_struct! { - /// A tuple type: `(A, B, C, String)`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct TypeTuple { - pub paren_token: token::Paren, - pub elems: Punctuated<Type, Token![,]>, - } -} - -ast_struct! { - /// The binary interface of a function: `extern "C"`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct Abi { - pub extern_token: Token![extern], - pub name: Option<LitStr>, - } -} - -ast_struct! { - /// An argument in a function type: the `usize` in `fn(usize) -> bool`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct BareFnArg { - pub attrs: Vec<Attribute>, - pub name: Option<(Ident, Token![:])>, - pub ty: Type, - } -} - -ast_struct! { - /// The variadic argument of a function pointer like `fn(usize, ...)`. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub struct BareVariadic { - pub attrs: Vec<Attribute>, - pub name: Option<(Ident, Token![:])>, - pub dots: Token![...], - pub comma: Option<Token![,]>, - } -} - -ast_enum! { - /// Return type of a function signature. - #[cfg_attr(docsrs, doc(cfg(any(feature = "full", feature = "derive"))))] - pub enum ReturnType { - /// Return type is not specified. - /// - /// Functions default to `()` and closures default to type inference. - Default, - /// A particular type is returned. - Type(Token![->], Box<Type>), - } -} - -#[cfg(feature = "parsing")] -pub(crate) mod parsing { - use crate::attr::Attribute; - use crate::error::{self, Result}; - use crate::ext::IdentExt as _; - use crate::generics::{BoundLifetimes, TraitBound, TraitBoundModifier, TypeParamBound}; - use crate::ident::Ident; - use crate::lifetime::Lifetime; - use crate::mac::{self, Macro}; - use crate::parse::{Parse, ParseStream}; - use crate::path; - use crate::path::{Path, PathArguments, QSelf}; - use crate::punctuated::Punctuated; - use crate::token; - use crate::ty::{ - Abi, BareFnArg, BareVariadic, ReturnType, Type, TypeArray, TypeBareFn, TypeGroup, - TypeImplTrait, TypeInfer, TypeMacro, TypeNever, TypeParen, TypePath, TypePtr, - TypeReference, TypeSlice, TypeTraitObject, TypeTuple, - }; - use crate::verbatim; - use proc_macro2::Span; - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Type { - fn parse(input: ParseStream) -> Result<Self> { - let allow_plus = true; - let allow_group_generic = true; - ambig_ty(input, allow_plus, allow_group_generic) - } - } - - impl Type { - /// In some positions, types may not contain the `+` character, to - /// disambiguate them. For example in the expression `1 as T`, T may not - /// contain a `+` character. - /// - /// This parser does not allow a `+`, while the default parser does. - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn without_plus(input: ParseStream) -> Result<Self> { - let allow_plus = false; - let allow_group_generic = true; - ambig_ty(input, allow_plus, allow_group_generic) - } - } - - pub(crate) fn ambig_ty( - input: ParseStream, - allow_plus: bool, - allow_group_generic: bool, - ) -> Result<Type> { - let begin = input.fork(); - - if input.peek(token::Group) { - let mut group: TypeGroup = input.parse()?; - if input.peek(Token![::]) && input.peek3(Ident::peek_any) { - if let Type::Path(mut ty) = *group.elem { - Path::parse_rest(input, &mut ty.path, false)?; - return Ok(Type::Path(ty)); - } else { - return Ok(Type::Path(TypePath { - qself: Some(QSelf { - lt_token: Token![<](group.group_token.span), - position: 0, - as_token: None, - gt_token: Token![>](group.group_token.span), - ty: group.elem, - }), - path: Path::parse_helper(input, false)?, - })); - } - } else if input.peek(Token![<]) && allow_group_generic - || input.peek(Token![::]) && input.peek3(Token![<]) - { - if let Type::Path(mut ty) = *group.elem { - let arguments = &mut ty.path.segments.last_mut().unwrap().arguments; - if arguments.is_none() { - *arguments = PathArguments::AngleBracketed(input.parse()?); - Path::parse_rest(input, &mut ty.path, false)?; - return Ok(Type::Path(ty)); - } else { - *group.elem = Type::Path(ty); - } - } - } - return Ok(Type::Group(group)); - } - - let mut lifetimes = None::<BoundLifetimes>; - let mut lookahead = input.lookahead1(); - if lookahead.peek(Token![for]) { - lifetimes = input.parse()?; - lookahead = input.lookahead1(); - if !lookahead.peek(Ident) - && !lookahead.peek(Token![fn]) - && !lookahead.peek(Token![unsafe]) - && !lookahead.peek(Token![extern]) - && !lookahead.peek(Token![super]) - && !lookahead.peek(Token![self]) - && !lookahead.peek(Token![Self]) - && !lookahead.peek(Token![crate]) - || input.peek(Token![dyn]) - { - return Err(lookahead.error()); - } - } - - if lookahead.peek(token::Paren) { - let content; - let paren_token = parenthesized!(content in input); - if content.is_empty() { - return Ok(Type::Tuple(TypeTuple { - paren_token, - elems: Punctuated::new(), - })); - } - if content.peek(Lifetime) { - return Ok(Type::Paren(TypeParen { - paren_token, - elem: Box::new(Type::TraitObject(content.parse()?)), - })); - } - if content.peek(Token![?]) { - return Ok(Type::TraitObject(TypeTraitObject { - dyn_token: None, - bounds: { - let mut bounds = Punctuated::new(); - bounds.push_value(TypeParamBound::Trait(TraitBound { - paren_token: Some(paren_token), - ..content.parse()? - })); - while let Some(plus) = input.parse()? { - bounds.push_punct(plus); - bounds.push_value({ - let allow_precise_capture = false; - let allow_const = false; - TypeParamBound::parse_single( - input, - allow_precise_capture, - allow_const, - )? - }); - } - bounds - }, - })); - } - let mut first: Type = content.parse()?; - if content.peek(Token![,]) { - return Ok(Type::Tuple(TypeTuple { - paren_token, - elems: { - let mut elems = Punctuated::new(); - elems.push_value(first); - elems.push_punct(content.parse()?); - while !content.is_empty() { - elems.push_value(content.parse()?); - if content.is_empty() { - break; - } - elems.push_punct(content.parse()?); - } - elems - }, - })); - } - if allow_plus && input.peek(Token![+]) { - loop { - let first = match first { - Type::Path(TypePath { qself: None, path }) => { - TypeParamBound::Trait(TraitBound { - paren_token: Some(paren_token), - modifier: TraitBoundModifier::None, - lifetimes: None, - path, - }) - } - Type::TraitObject(TypeTraitObject { - dyn_token: None, - bounds, - }) => { - if bounds.len() > 1 || bounds.trailing_punct() { - first = Type::TraitObject(TypeTraitObject { - dyn_token: None, - bounds, - }); - break; - } - match bounds.into_iter().next().unwrap() { - TypeParamBound::Trait(trait_bound) => { - TypeParamBound::Trait(TraitBound { - paren_token: Some(paren_token), - ..trait_bound - }) - } - other @ (TypeParamBound::Lifetime(_) - | TypeParamBound::PreciseCapture(_) - | TypeParamBound::Verbatim(_)) => other, - } - } - _ => break, - }; - return Ok(Type::TraitObject(TypeTraitObject { - dyn_token: None, - bounds: { - let mut bounds = Punctuated::new(); - bounds.push_value(first); - while let Some(plus) = input.parse()? { - bounds.push_punct(plus); - bounds.push_value({ - let allow_precise_capture = false; - let allow_const = false; - TypeParamBound::parse_single( - input, - allow_precise_capture, - allow_const, - )? - }); - } - bounds - }, - })); - } - } - Ok(Type::Paren(TypeParen { - paren_token, - elem: Box::new(first), - })) - } else if lookahead.peek(Token![fn]) - || lookahead.peek(Token![unsafe]) - || lookahead.peek(Token![extern]) - { - let mut bare_fn: TypeBareFn = input.parse()?; - bare_fn.lifetimes = lifetimes; - Ok(Type::BareFn(bare_fn)) - } else if lookahead.peek(Ident) - || input.peek(Token![super]) - || input.peek(Token![self]) - || input.peek(Token![Self]) - || input.peek(Token![crate]) - || lookahead.peek(Token![::]) - || lookahead.peek(Token![<]) - { - let ty: TypePath = input.parse()?; - if ty.qself.is_some() { - return Ok(Type::Path(ty)); - } - - if input.peek(Token![!]) && !input.peek(Token![!=]) && ty.path.is_mod_style() { - let bang_token: Token![!] = input.parse()?; - let (delimiter, tokens) = mac::parse_delimiter(input)?; - return Ok(Type::Macro(TypeMacro { - mac: Macro { - path: ty.path, - bang_token, - delimiter, - tokens, - }, - })); - } - - if lifetimes.is_some() || allow_plus && input.peek(Token![+]) { - let mut bounds = Punctuated::new(); - bounds.push_value(TypeParamBound::Trait(TraitBound { - paren_token: None, - modifier: TraitBoundModifier::None, - lifetimes, - path: ty.path, - })); - if allow_plus { - while input.peek(Token![+]) { - bounds.push_punct(input.parse()?); - if !(input.peek(Ident::peek_any) - || input.peek(Token![::]) - || input.peek(Token![?]) - || input.peek(Lifetime) - || input.peek(token::Paren)) - { - break; - } - bounds.push_value({ - let allow_precise_capture = false; - let allow_const = false; - TypeParamBound::parse_single(input, allow_precise_capture, allow_const)? - }); - } - } - return Ok(Type::TraitObject(TypeTraitObject { - dyn_token: None, - bounds, - })); - } - - Ok(Type::Path(ty)) - } else if lookahead.peek(Token![dyn]) { - let dyn_token: Token![dyn] = input.parse()?; - let dyn_span = dyn_token.span; - let star_token: Option<Token![*]> = input.parse()?; - let bounds = TypeTraitObject::parse_bounds(dyn_span, input, allow_plus)?; - Ok(if star_token.is_some() { - Type::Verbatim(verbatim::between(&begin, input)) - } else { - Type::TraitObject(TypeTraitObject { - dyn_token: Some(dyn_token), - bounds, - }) - }) - } else if lookahead.peek(token::Bracket) { - let content; - let bracket_token = bracketed!(content in input); - let elem: Type = content.parse()?; - if content.peek(Token![;]) { - Ok(Type::Array(TypeArray { - bracket_token, - elem: Box::new(elem), - semi_token: content.parse()?, - len: content.parse()?, - })) - } else { - Ok(Type::Slice(TypeSlice { - bracket_token, - elem: Box::new(elem), - })) - } - } else if lookahead.peek(Token![*]) { - input.parse().map(Type::Ptr) - } else if lookahead.peek(Token![&]) { - input.parse().map(Type::Reference) - } else if lookahead.peek(Token![!]) && !input.peek(Token![=]) { - input.parse().map(Type::Never) - } else if lookahead.peek(Token![impl]) { - TypeImplTrait::parse(input, allow_plus).map(Type::ImplTrait) - } else if lookahead.peek(Token![_]) { - input.parse().map(Type::Infer) - } else if lookahead.peek(Lifetime) { - input.parse().map(Type::TraitObject) - } else { - Err(lookahead.error()) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypeSlice { - fn parse(input: ParseStream) -> Result<Self> { - let content; - Ok(TypeSlice { - bracket_token: bracketed!(content in input), - elem: content.parse()?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypeArray { - fn parse(input: ParseStream) -> Result<Self> { - let content; - Ok(TypeArray { - bracket_token: bracketed!(content in input), - elem: content.parse()?, - semi_token: content.parse()?, - len: content.parse()?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypePtr { - fn parse(input: ParseStream) -> Result<Self> { - let star_token: Token![*] = input.parse()?; - - let lookahead = input.lookahead1(); - let (const_token, mutability) = if lookahead.peek(Token![const]) { - (Some(input.parse()?), None) - } else if lookahead.peek(Token![mut]) { - (None, Some(input.parse()?)) - } else { - return Err(lookahead.error()); - }; - - Ok(TypePtr { - star_token, - const_token, - mutability, - elem: Box::new(input.call(Type::without_plus)?), - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypeReference { - fn parse(input: ParseStream) -> Result<Self> { - Ok(TypeReference { - and_token: input.parse()?, - lifetime: input.parse()?, - mutability: input.parse()?, - // & binds tighter than +, so we don't allow + here. - elem: Box::new(input.call(Type::without_plus)?), - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypeBareFn { - fn parse(input: ParseStream) -> Result<Self> { - let args; - let mut variadic = None; - - Ok(TypeBareFn { - lifetimes: input.parse()?, - unsafety: input.parse()?, - abi: input.parse()?, - fn_token: input.parse()?, - paren_token: parenthesized!(args in input), - inputs: { - let mut inputs = Punctuated::new(); - - while !args.is_empty() { - let attrs = args.call(Attribute::parse_outer)?; - - if inputs.empty_or_trailing() - && (args.peek(Token![...]) - || (args.peek(Ident) || args.peek(Token![_])) - && args.peek2(Token![:]) - && args.peek3(Token![...])) - { - variadic = Some(parse_bare_variadic(&args, attrs)?); - break; - } - - let allow_self = inputs.is_empty(); - let arg = parse_bare_fn_arg(&args, allow_self)?; - inputs.push_value(BareFnArg { attrs, ..arg }); - if args.is_empty() { - break; - } - - let comma = args.parse()?; - inputs.push_punct(comma); - } - - inputs - }, - variadic, - output: input.call(ReturnType::without_plus)?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypeNever { - fn parse(input: ParseStream) -> Result<Self> { - Ok(TypeNever { - bang_token: input.parse()?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypeInfer { - fn parse(input: ParseStream) -> Result<Self> { - Ok(TypeInfer { - underscore_token: input.parse()?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypeTuple { - fn parse(input: ParseStream) -> Result<Self> { - let content; - let paren_token = parenthesized!(content in input); - - if content.is_empty() { - return Ok(TypeTuple { - paren_token, - elems: Punctuated::new(), - }); - } - - let first: Type = content.parse()?; - Ok(TypeTuple { - paren_token, - elems: { - let mut elems = Punctuated::new(); - elems.push_value(first); - elems.push_punct(content.parse()?); - while !content.is_empty() { - elems.push_value(content.parse()?); - if content.is_empty() { - break; - } - elems.push_punct(content.parse()?); - } - elems - }, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypeMacro { - fn parse(input: ParseStream) -> Result<Self> { - Ok(TypeMacro { - mac: input.parse()?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypePath { - fn parse(input: ParseStream) -> Result<Self> { - let expr_style = false; - let (qself, path) = path::parsing::qpath(input, expr_style)?; - Ok(TypePath { qself, path }) - } - } - - impl ReturnType { - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn without_plus(input: ParseStream) -> Result<Self> { - let allow_plus = false; - Self::parse(input, allow_plus) - } - - pub(crate) fn parse(input: ParseStream, allow_plus: bool) -> Result<Self> { - if input.peek(Token![->]) { - let arrow = input.parse()?; - let allow_group_generic = true; - let ty = ambig_ty(input, allow_plus, allow_group_generic)?; - Ok(ReturnType::Type(arrow, Box::new(ty))) - } else { - Ok(ReturnType::Default) - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for ReturnType { - fn parse(input: ParseStream) -> Result<Self> { - let allow_plus = true; - Self::parse(input, allow_plus) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypeTraitObject { - fn parse(input: ParseStream) -> Result<Self> { - let allow_plus = true; - Self::parse(input, allow_plus) - } - } - - impl TypeTraitObject { - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn without_plus(input: ParseStream) -> Result<Self> { - let allow_plus = false; - Self::parse(input, allow_plus) - } - - // Only allow multiple trait references if allow_plus is true. - pub(crate) fn parse(input: ParseStream, allow_plus: bool) -> Result<Self> { - let dyn_token: Option<Token![dyn]> = input.parse()?; - let dyn_span = match &dyn_token { - Some(token) => token.span, - None => input.span(), - }; - let bounds = Self::parse_bounds(dyn_span, input, allow_plus)?; - Ok(TypeTraitObject { dyn_token, bounds }) - } - - fn parse_bounds( - dyn_span: Span, - input: ParseStream, - allow_plus: bool, - ) -> Result<Punctuated<TypeParamBound, Token![+]>> { - let allow_precise_capture = false; - let allow_const = false; - let bounds = TypeParamBound::parse_multiple( - input, - allow_plus, - allow_precise_capture, - allow_const, - )?; - let mut last_lifetime_span = None; - let mut at_least_one_trait = false; - for bound in &bounds { - match bound { - TypeParamBound::Trait(_) => { - at_least_one_trait = true; - break; - } - TypeParamBound::Lifetime(lifetime) => { - last_lifetime_span = Some(lifetime.ident.span()); - } - TypeParamBound::PreciseCapture(_) | TypeParamBound::Verbatim(_) => { - unreachable!() - } - } - } - // Just lifetimes like `'a + 'b` is not a TraitObject. - if !at_least_one_trait { - let msg = "at least one trait is required for an object type"; - return Err(error::new2(dyn_span, last_lifetime_span.unwrap(), msg)); - } - Ok(bounds) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypeImplTrait { - fn parse(input: ParseStream) -> Result<Self> { - let allow_plus = true; - Self::parse(input, allow_plus) - } - } - - impl TypeImplTrait { - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - pub fn without_plus(input: ParseStream) -> Result<Self> { - let allow_plus = false; - Self::parse(input, allow_plus) - } - - pub(crate) fn parse(input: ParseStream, allow_plus: bool) -> Result<Self> { - let impl_token: Token![impl] = input.parse()?; - let allow_precise_capture = true; - let allow_const = true; - let bounds = TypeParamBound::parse_multiple( - input, - allow_plus, - allow_precise_capture, - allow_const, - )?; - let mut last_nontrait_span = None; - let mut at_least_one_trait = false; - for bound in &bounds { - match bound { - TypeParamBound::Trait(_) => { - at_least_one_trait = true; - break; - } - TypeParamBound::Lifetime(lifetime) => { - last_nontrait_span = Some(lifetime.ident.span()); - } - TypeParamBound::PreciseCapture(precise_capture) => { - #[cfg(feature = "full")] - { - last_nontrait_span = Some(precise_capture.gt_token.span); - } - #[cfg(not(feature = "full"))] - { - _ = precise_capture; - unreachable!(); - } - } - TypeParamBound::Verbatim(_) => { - // `[const] Trait` - at_least_one_trait = true; - break; - } - } - } - if !at_least_one_trait { - let msg = "at least one trait must be specified"; - return Err(error::new2( - impl_token.span, - last_nontrait_span.unwrap(), - msg, - )); - } - Ok(TypeImplTrait { impl_token, bounds }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypeGroup { - fn parse(input: ParseStream) -> Result<Self> { - let group = crate::group::parse_group(input)?; - Ok(TypeGroup { - group_token: group.token, - elem: group.content.parse()?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for TypeParen { - fn parse(input: ParseStream) -> Result<Self> { - let allow_plus = false; - Self::parse(input, allow_plus) - } - } - - impl TypeParen { - fn parse(input: ParseStream, allow_plus: bool) -> Result<Self> { - let content; - Ok(TypeParen { - paren_token: parenthesized!(content in input), - elem: Box::new({ - let allow_group_generic = true; - ambig_ty(&content, allow_plus, allow_group_generic)? - }), - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for BareFnArg { - fn parse(input: ParseStream) -> Result<Self> { - let allow_self = false; - parse_bare_fn_arg(input, allow_self) - } - } - - fn parse_bare_fn_arg(input: ParseStream, allow_self: bool) -> Result<BareFnArg> { - let attrs = input.call(Attribute::parse_outer)?; - - let begin = input.fork(); - - let has_mut_self = allow_self && input.peek(Token![mut]) && input.peek2(Token![self]); - if has_mut_self { - input.parse::<Token![mut]>()?; - } - - let mut has_self = false; - let mut name = if (input.peek(Ident) || input.peek(Token![_]) || { - has_self = allow_self && input.peek(Token![self]); - has_self - }) && input.peek2(Token![:]) - && !input.peek2(Token![::]) - { - let name = input.call(Ident::parse_any)?; - let colon: Token![:] = input.parse()?; - Some((name, colon)) - } else { - has_self = false; - None - }; - - let ty = if allow_self && !has_self && input.peek(Token![mut]) && input.peek2(Token![self]) - { - input.parse::<Token![mut]>()?; - input.parse::<Token![self]>()?; - None - } else if has_mut_self && name.is_none() { - input.parse::<Token![self]>()?; - None - } else { - Some(input.parse()?) - }; - - let ty = match ty { - Some(ty) if !has_mut_self => ty, - _ => { - name = None; - Type::Verbatim(verbatim::between(&begin, input)) - } - }; - - Ok(BareFnArg { attrs, name, ty }) - } - - fn parse_bare_variadic(input: ParseStream, attrs: Vec<Attribute>) -> Result<BareVariadic> { - Ok(BareVariadic { - attrs, - name: if input.peek(Ident) || input.peek(Token![_]) { - let name = input.call(Ident::parse_any)?; - let colon: Token![:] = input.parse()?; - Some((name, colon)) - } else { - None - }, - dots: input.parse()?, - comma: input.parse()?, - }) - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Abi { - fn parse(input: ParseStream) -> Result<Self> { - Ok(Abi { - extern_token: input.parse()?, - name: input.parse()?, - }) - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "parsing")))] - impl Parse for Option<Abi> { - fn parse(input: ParseStream) -> Result<Self> { - if input.peek(Token![extern]) { - input.parse().map(Some) - } else { - Ok(None) - } - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use crate::attr::FilterAttrs; - use crate::path; - use crate::path::printing::PathStyle; - use crate::print::TokensOrDefault; - use crate::ty::{ - Abi, BareFnArg, BareVariadic, ReturnType, TypeArray, TypeBareFn, TypeGroup, TypeImplTrait, - TypeInfer, TypeMacro, TypeNever, TypeParen, TypePath, TypePtr, TypeReference, TypeSlice, - TypeTraitObject, TypeTuple, - }; - use proc_macro2::TokenStream; - use quote::{ToTokens, TokenStreamExt as _}; - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TypeSlice { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.bracket_token.surround(tokens, |tokens| { - self.elem.to_tokens(tokens); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TypeArray { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.bracket_token.surround(tokens, |tokens| { - self.elem.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - self.len.to_tokens(tokens); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TypePtr { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.star_token.to_tokens(tokens); - match &self.mutability { - Some(tok) => tok.to_tokens(tokens), - None => { - TokensOrDefault(&self.const_token).to_tokens(tokens); - } - } - self.elem.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TypeReference { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.and_token.to_tokens(tokens); - self.lifetime.to_tokens(tokens); - self.mutability.to_tokens(tokens); - self.elem.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TypeBareFn { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.lifetimes.to_tokens(tokens); - self.unsafety.to_tokens(tokens); - self.abi.to_tokens(tokens); - self.fn_token.to_tokens(tokens); - self.paren_token.surround(tokens, |tokens| { - self.inputs.to_tokens(tokens); - if let Some(variadic) = &self.variadic { - if !self.inputs.empty_or_trailing() { - let span = variadic.dots.spans[0]; - Token![,](span).to_tokens(tokens); - } - variadic.to_tokens(tokens); - } - }); - self.output.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TypeNever { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.bang_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TypeTuple { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.paren_token.surround(tokens, |tokens| { - self.elems.to_tokens(tokens); - // If we only have one argument, we need a trailing comma to - // distinguish TypeTuple from TypeParen. - if self.elems.len() == 1 && !self.elems.trailing_punct() { - <Token![,]>::default().to_tokens(tokens); - } - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TypePath { - fn to_tokens(&self, tokens: &mut TokenStream) { - path::printing::print_qpath(tokens, &self.qself, &self.path, PathStyle::AsWritten); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TypeTraitObject { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.dyn_token.to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TypeImplTrait { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.impl_token.to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TypeGroup { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.group_token.surround(tokens, |tokens| { - self.elem.to_tokens(tokens); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TypeParen { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.paren_token.surround(tokens, |tokens| { - self.elem.to_tokens(tokens); - }); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TypeInfer { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.underscore_token.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for TypeMacro { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.mac.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for ReturnType { - fn to_tokens(&self, tokens: &mut TokenStream) { - match self { - ReturnType::Default => {} - ReturnType::Type(arrow, ty) => { - arrow.to_tokens(tokens); - ty.to_tokens(tokens); - } - } - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for BareFnArg { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - if let Some((name, colon)) = &self.name { - name.to_tokens(tokens); - colon.to_tokens(tokens); - } - self.ty.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for BareVariadic { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - if let Some((name, colon)) = &self.name { - name.to_tokens(tokens); - colon.to_tokens(tokens); - } - self.dots.to_tokens(tokens); - self.comma.to_tokens(tokens); - } - } - - #[cfg_attr(docsrs, doc(cfg(feature = "printing")))] - impl ToTokens for Abi { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.extern_token.to_tokens(tokens); - self.name.to_tokens(tokens); - } - } -} diff --git a/vendor/syn/src/verbatim.rs b/vendor/syn/src/verbatim.rs deleted file mode 100644 index 4a7ea2e1bb2388..00000000000000 --- a/vendor/syn/src/verbatim.rs +++ /dev/null @@ -1,33 +0,0 @@ -use crate::ext::TokenStreamExt as _; -use crate::parse::ParseStream; -use proc_macro2::{Delimiter, TokenStream}; -use std::cmp::Ordering; - -pub(crate) fn between<'a>(begin: ParseStream<'a>, end: ParseStream<'a>) -> TokenStream { - let end = end.cursor(); - let mut cursor = begin.cursor(); - assert!(crate::buffer::same_buffer(end, cursor)); - - let mut tokens = TokenStream::new(); - while cursor != end { - let (tt, next) = cursor.token_tree().unwrap(); - - if crate::buffer::cmp_assuming_same_buffer(end, next) == Ordering::Less { - // A syntax node can cross the boundary of a None-delimited group - // due to such groups being transparent to the parser in most cases. - // Any time this occurs the group is known to be semantically - // irrelevant. https://github.com/dtolnay/syn/issues/1235 - if let Some((inside, _span, after)) = cursor.group(Delimiter::None) { - assert!(next == after); - cursor = inside; - continue; - } else { - panic!("verbatim end must not be inside a delimited group"); - } - } - - tokens.append(tt); - cursor = next; - } - tokens -} diff --git a/vendor/syn/src/whitespace.rs b/vendor/syn/src/whitespace.rs deleted file mode 100644 index a50b5069a68b92..00000000000000 --- a/vendor/syn/src/whitespace.rs +++ /dev/null @@ -1,65 +0,0 @@ -pub(crate) fn skip(mut s: &str) -> &str { - 'skip: while !s.is_empty() { - let byte = s.as_bytes()[0]; - if byte == b'/' { - if s.starts_with("//") - && (!s.starts_with("///") || s.starts_with("////")) - && !s.starts_with("//!") - { - if let Some(i) = s.find('\n') { - s = &s[i + 1..]; - continue; - } else { - return ""; - } - } else if s.starts_with("/**/") { - s = &s[4..]; - continue; - } else if s.starts_with("/*") - && (!s.starts_with("/**") || s.starts_with("/***")) - && !s.starts_with("/*!") - { - let mut depth = 0; - let bytes = s.as_bytes(); - let mut i = 0; - let upper = bytes.len() - 1; - while i < upper { - if bytes[i] == b'/' && bytes[i + 1] == b'*' { - depth += 1; - i += 1; // eat '*' - } else if bytes[i] == b'*' && bytes[i + 1] == b'/' { - depth -= 1; - if depth == 0 { - s = &s[i + 2..]; - continue 'skip; - } - i += 1; // eat '/' - } - i += 1; - } - return s; - } - } - match byte { - b' ' | 0x09..=0x0D => { - s = &s[1..]; - continue; - } - b if b <= 0x7F => {} - _ => { - let ch = s.chars().next().unwrap(); - if is_whitespace(ch) { - s = &s[ch.len_utf8()..]; - continue; - } - } - } - return s; - } - s -} - -fn is_whitespace(ch: char) -> bool { - // Rust treats left-to-right mark and right-to-left mark as whitespace - ch.is_whitespace() || ch == '\u{200e}' || ch == '\u{200f}' -} diff --git a/vendor/syn/tests/common/eq.rs b/vendor/syn/tests/common/eq.rs deleted file mode 100644 index 6bf4a58169fb73..00000000000000 --- a/vendor/syn/tests/common/eq.rs +++ /dev/null @@ -1,898 +0,0 @@ -#![allow(unused_macro_rules)] - -extern crate rustc_ast; -extern crate rustc_data_structures; -extern crate rustc_driver; -extern crate rustc_span; -extern crate thin_vec; - -use rustc_ast::ast::AngleBracketedArg; -use rustc_ast::ast::AngleBracketedArgs; -use rustc_ast::ast::AnonConst; -use rustc_ast::ast::Arm; -use rustc_ast::ast::AsmMacro; -use rustc_ast::ast::AssignOpKind; -use rustc_ast::ast::AssocItemConstraint; -use rustc_ast::ast::AssocItemConstraintKind; -use rustc_ast::ast::AssocItemKind; -use rustc_ast::ast::AttrArgs; -use rustc_ast::ast::AttrId; -use rustc_ast::ast::AttrItem; -use rustc_ast::ast::AttrKind; -use rustc_ast::ast::AttrStyle; -use rustc_ast::ast::Attribute; -use rustc_ast::ast::BinOpKind; -use rustc_ast::ast::BindingMode; -use rustc_ast::ast::Block; -use rustc_ast::ast::BlockCheckMode; -use rustc_ast::ast::BorrowKind; -use rustc_ast::ast::BoundAsyncness; -use rustc_ast::ast::BoundConstness; -use rustc_ast::ast::BoundPolarity; -use rustc_ast::ast::ByRef; -use rustc_ast::ast::CaptureBy; -use rustc_ast::ast::Closure; -use rustc_ast::ast::ClosureBinder; -use rustc_ast::ast::Const; -use rustc_ast::ast::ConstItem; -use rustc_ast::ast::ConstItemRhs; -use rustc_ast::ast::CoroutineKind; -use rustc_ast::ast::Crate; -use rustc_ast::ast::Defaultness; -use rustc_ast::ast::Delegation; -use rustc_ast::ast::DelegationMac; -use rustc_ast::ast::DelimArgs; -use rustc_ast::ast::EnumDef; -use rustc_ast::ast::Expr; -use rustc_ast::ast::ExprField; -use rustc_ast::ast::ExprKind; -use rustc_ast::ast::Extern; -use rustc_ast::ast::FieldDef; -use rustc_ast::ast::FloatTy; -use rustc_ast::ast::Fn; -use rustc_ast::ast::FnContract; -use rustc_ast::ast::FnDecl; -use rustc_ast::ast::FnHeader; -use rustc_ast::ast::FnPtrTy; -use rustc_ast::ast::FnRetTy; -use rustc_ast::ast::FnSig; -use rustc_ast::ast::ForLoopKind; -use rustc_ast::ast::ForeignItemKind; -use rustc_ast::ast::ForeignMod; -use rustc_ast::ast::FormatAlignment; -use rustc_ast::ast::FormatArgPosition; -use rustc_ast::ast::FormatArgPositionKind; -use rustc_ast::ast::FormatArgs; -use rustc_ast::ast::FormatArgsPiece; -use rustc_ast::ast::FormatArgument; -use rustc_ast::ast::FormatArgumentKind; -use rustc_ast::ast::FormatArguments; -use rustc_ast::ast::FormatCount; -use rustc_ast::ast::FormatDebugHex; -use rustc_ast::ast::FormatOptions; -use rustc_ast::ast::FormatPlaceholder; -use rustc_ast::ast::FormatSign; -use rustc_ast::ast::FormatTrait; -use rustc_ast::ast::GenBlockKind; -use rustc_ast::ast::GenericArg; -use rustc_ast::ast::GenericArgs; -use rustc_ast::ast::GenericBound; -use rustc_ast::ast::GenericParam; -use rustc_ast::ast::GenericParamKind; -use rustc_ast::ast::Generics; -use rustc_ast::ast::Impl; -use rustc_ast::ast::ImplPolarity; -use rustc_ast::ast::Inline; -use rustc_ast::ast::InlineAsm; -use rustc_ast::ast::InlineAsmOperand; -use rustc_ast::ast::InlineAsmOptions; -use rustc_ast::ast::InlineAsmRegOrRegClass; -use rustc_ast::ast::InlineAsmSym; -use rustc_ast::ast::InlineAsmTemplatePiece; -use rustc_ast::ast::IntTy; -use rustc_ast::ast::IsAuto; -use rustc_ast::ast::Item; -use rustc_ast::ast::ItemKind; -use rustc_ast::ast::Label; -use rustc_ast::ast::Lifetime; -use rustc_ast::ast::LitFloatType; -use rustc_ast::ast::LitIntType; -use rustc_ast::ast::LitKind; -use rustc_ast::ast::Local; -use rustc_ast::ast::LocalKind; -use rustc_ast::ast::MacCall; -use rustc_ast::ast::MacCallStmt; -use rustc_ast::ast::MacStmtStyle; -use rustc_ast::ast::MacroDef; -use rustc_ast::ast::MatchKind; -use rustc_ast::ast::MetaItem; -use rustc_ast::ast::MetaItemInner; -use rustc_ast::ast::MetaItemKind; -use rustc_ast::ast::MetaItemLit; -use rustc_ast::ast::MethodCall; -use rustc_ast::ast::ModKind; -use rustc_ast::ast::ModSpans; -use rustc_ast::ast::Movability; -use rustc_ast::ast::MutTy; -use rustc_ast::ast::Mutability; -use rustc_ast::ast::NodeId; -use rustc_ast::ast::NormalAttr; -use rustc_ast::ast::Param; -use rustc_ast::ast::Parens; -use rustc_ast::ast::ParenthesizedArgs; -use rustc_ast::ast::Pat; -use rustc_ast::ast::PatField; -use rustc_ast::ast::PatFieldsRest; -use rustc_ast::ast::PatKind; -use rustc_ast::ast::Path; -use rustc_ast::ast::PathSegment; -use rustc_ast::ast::Pinnedness; -use rustc_ast::ast::PolyTraitRef; -use rustc_ast::ast::PreciseCapturingArg; -use rustc_ast::ast::QSelf; -use rustc_ast::ast::RangeEnd; -use rustc_ast::ast::RangeLimits; -use rustc_ast::ast::RangeSyntax; -use rustc_ast::ast::Recovered; -use rustc_ast::ast::Safety; -use rustc_ast::ast::StaticItem; -use rustc_ast::ast::Stmt; -use rustc_ast::ast::StmtKind; -use rustc_ast::ast::StrLit; -use rustc_ast::ast::StrStyle; -use rustc_ast::ast::StructExpr; -use rustc_ast::ast::StructRest; -use rustc_ast::ast::Term; -use rustc_ast::ast::Trait; -use rustc_ast::ast::TraitAlias; -use rustc_ast::ast::TraitBoundModifiers; -use rustc_ast::ast::TraitImplHeader; -use rustc_ast::ast::TraitObjectSyntax; -use rustc_ast::ast::TraitRef; -use rustc_ast::ast::Ty; -use rustc_ast::ast::TyAlias; -use rustc_ast::ast::TyKind; -use rustc_ast::ast::TyPat; -use rustc_ast::ast::TyPatKind; -use rustc_ast::ast::UintTy; -use rustc_ast::ast::UnOp; -use rustc_ast::ast::UnsafeBinderCastKind; -use rustc_ast::ast::UnsafeBinderTy; -use rustc_ast::ast::UnsafeSource; -use rustc_ast::ast::UseTree; -use rustc_ast::ast::UseTreeKind; -use rustc_ast::ast::Variant; -use rustc_ast::ast::VariantData; -use rustc_ast::ast::Visibility; -use rustc_ast::ast::VisibilityKind; -use rustc_ast::ast::WhereBoundPredicate; -use rustc_ast::ast::WhereClause; -use rustc_ast::ast::WhereEqPredicate; -use rustc_ast::ast::WherePredicate; -use rustc_ast::ast::WherePredicateKind; -use rustc_ast::ast::WhereRegionPredicate; -use rustc_ast::ast::YieldKind; -use rustc_ast::token::{self, CommentKind, Delimiter, IdentIsRaw, Lit, Token, TokenKind}; -use rustc_ast::tokenstream::{ - AttrTokenStream, AttrTokenTree, AttrsTarget, DelimSpacing, DelimSpan, LazyAttrTokenStream, - Spacing, TokenStream, TokenTree, -}; -use rustc_data_structures::packed::Pu128; -use rustc_span::source_map::Spanned; -use rustc_span::symbol::{sym, ByteSymbol, Ident, Symbol}; -use rustc_span::{ErrorGuaranteed, Span, SyntaxContext, DUMMY_SP}; -use std::borrow::Cow; -use std::collections::HashMap; -use std::hash::{BuildHasher, Hash}; -use std::sync::Arc; -use thin_vec::ThinVec; - -pub trait SpanlessEq { - fn eq(&self, other: &Self) -> bool; -} - -impl<T: ?Sized + SpanlessEq> SpanlessEq for Box<T> { - fn eq(&self, other: &Self) -> bool { - SpanlessEq::eq(&**self, &**other) - } -} - -impl<T: ?Sized + SpanlessEq> SpanlessEq for Arc<T> { - fn eq(&self, other: &Self) -> bool { - SpanlessEq::eq(&**self, &**other) - } -} - -impl<T: SpanlessEq> SpanlessEq for Option<T> { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (None, None) => true, - (Some(this), Some(other)) => SpanlessEq::eq(this, other), - _ => false, - } - } -} - -impl<T: SpanlessEq, E: SpanlessEq> SpanlessEq for Result<T, E> { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Ok(this), Ok(other)) => SpanlessEq::eq(this, other), - (Err(this), Err(other)) => SpanlessEq::eq(this, other), - _ => false, - } - } -} - -impl<T: SpanlessEq> SpanlessEq for [T] { - fn eq(&self, other: &Self) -> bool { - self.len() == other.len() && self.iter().zip(other).all(|(a, b)| SpanlessEq::eq(a, b)) - } -} - -impl<T: SpanlessEq> SpanlessEq for Vec<T> { - fn eq(&self, other: &Self) -> bool { - <[T] as SpanlessEq>::eq(self, other) - } -} - -impl<T: SpanlessEq> SpanlessEq for ThinVec<T> { - fn eq(&self, other: &Self) -> bool { - self.len() == other.len() - && self - .iter() - .zip(other.iter()) - .all(|(a, b)| SpanlessEq::eq(a, b)) - } -} - -impl<K: Eq + Hash, V: SpanlessEq, S: BuildHasher> SpanlessEq for HashMap<K, V, S> { - fn eq(&self, other: &Self) -> bool { - self.len() == other.len() - && self.iter().all(|(key, this_v)| { - other - .get(key) - .map_or(false, |other_v| SpanlessEq::eq(this_v, other_v)) - }) - } -} - -impl<'a, T: ?Sized + ToOwned + SpanlessEq> SpanlessEq for Cow<'a, T> { - fn eq(&self, other: &Self) -> bool { - <T as SpanlessEq>::eq(self, other) - } -} - -impl<T: SpanlessEq> SpanlessEq for Spanned<T> { - fn eq(&self, other: &Self) -> bool { - SpanlessEq::eq(&self.node, &other.node) - } -} - -impl<A: SpanlessEq, B: SpanlessEq> SpanlessEq for (A, B) { - fn eq(&self, other: &Self) -> bool { - SpanlessEq::eq(&self.0, &other.0) && SpanlessEq::eq(&self.1, &other.1) - } -} - -impl<A: SpanlessEq, B: SpanlessEq, C: SpanlessEq> SpanlessEq for (A, B, C) { - fn eq(&self, other: &Self) -> bool { - SpanlessEq::eq(&self.0, &other.0) - && SpanlessEq::eq(&self.1, &other.1) - && SpanlessEq::eq(&self.2, &other.2) - } -} - -macro_rules! spanless_eq_true { - ($name:ty) => { - impl SpanlessEq for $name { - fn eq(&self, _other: &Self) -> bool { - true - } - } - }; -} - -spanless_eq_true!(Span); -spanless_eq_true!(DelimSpan); -spanless_eq_true!(AttrId); -spanless_eq_true!(NodeId); -spanless_eq_true!(SyntaxContext); -spanless_eq_true!(Spacing); - -macro_rules! spanless_eq_partial_eq { - ($name:ty) => { - impl SpanlessEq for $name { - fn eq(&self, other: &Self) -> bool { - PartialEq::eq(self, other) - } - } - }; -} - -spanless_eq_partial_eq!(()); -spanless_eq_partial_eq!(bool); -spanless_eq_partial_eq!(u8); -spanless_eq_partial_eq!(u16); -spanless_eq_partial_eq!(u32); -spanless_eq_partial_eq!(u128); -spanless_eq_partial_eq!(usize); -spanless_eq_partial_eq!(char); -spanless_eq_partial_eq!(str); -spanless_eq_partial_eq!(String); -spanless_eq_partial_eq!(Pu128); -spanless_eq_partial_eq!(Symbol); -spanless_eq_partial_eq!(ByteSymbol); -spanless_eq_partial_eq!(CommentKind); -spanless_eq_partial_eq!(Delimiter); -spanless_eq_partial_eq!(InlineAsmOptions); -spanless_eq_partial_eq!(token::LitKind); -spanless_eq_partial_eq!(ErrorGuaranteed); - -macro_rules! spanless_eq_struct { - { - $($name:ident)::+ $(<$param:ident>)? - $([$field:tt $this:ident $other:ident])* - $(![$ignore:tt])*; - } => { - impl $(<$param: SpanlessEq>)* SpanlessEq for $($name)::+ $(<$param>)* { - fn eq(&self, other: &Self) -> bool { - let $($name)::+ { $($field: $this,)* $($ignore: _,)* } = self; - let $($name)::+ { $($field: $other,)* $($ignore: _,)* } = other; - true $(&& SpanlessEq::eq($this, $other))* - } - } - }; - - { - $($name:ident)::+ $(<$param:ident>)? - $([$field:tt $this:ident $other:ident])* - $(![$ignore:tt])*; - !$next:tt - $($rest:tt)* - } => { - spanless_eq_struct! { - $($name)::+ $(<$param>)* - $([$field $this $other])* - $(![$ignore])* - ![$next]; - $($rest)* - } - }; - - { - $($name:ident)::+ $(<$param:ident>)? - $([$field:tt $this:ident $other:ident])* - $(![$ignore:tt])*; - $next:tt - $($rest:tt)* - } => { - spanless_eq_struct! { - $($name)::+ $(<$param>)* - $([$field $this $other])* - [$next this other] - $(![$ignore])*; - $($rest)* - } - }; -} - -macro_rules! spanless_eq_enum { - { - $($name:ident)::+; - $([$($variant:ident)::+; $([$field:tt $this:ident $other:ident])* $(![$ignore:tt])*])* - } => { - impl SpanlessEq for $($name)::+ { - fn eq(&self, other: &Self) -> bool { - match self { - $( - $($variant)::+ { .. } => {} - )* - } - #[allow(unreachable_patterns)] - match (self, other) { - $( - ( - $($variant)::+ { $($field: $this,)* $($ignore: _,)* }, - $($variant)::+ { $($field: $other,)* $($ignore: _,)* }, - ) => { - true $(&& SpanlessEq::eq($this, $other))* - } - )* - _ => false, - } - } - } - }; - - { - $($name:ident)::+; - $([$($variant:ident)::+; $($fields:tt)*])* - $next:ident [$([$($named:tt)*])* $(![$ignore:tt])*] (!$i:tt $($field:tt)*) - $($rest:tt)* - } => { - spanless_eq_enum! { - $($name)::+; - $([$($variant)::+; $($fields)*])* - $next [$([$($named)*])* $(![$ignore])* ![$i]] ($($field)*) - $($rest)* - } - }; - - { - $($name:ident)::+; - $([$($variant:ident)::+; $($fields:tt)*])* - $next:ident [$([$($named:tt)*])* $(![$ignore:tt])*] ($i:tt $($field:tt)*) - $($rest:tt)* - } => { - spanless_eq_enum! { - $($name)::+; - $([$($variant)::+; $($fields)*])* - $next [$([$($named)*])* [$i this other] $(![$ignore])*] ($($field)*) - $($rest)* - } - }; - - { - $($name:ident)::+; - $([$($variant:ident)::+; $($fields:tt)*])* - $next:ident [$($named:tt)*] () - $($rest:tt)* - } => { - spanless_eq_enum! { - $($name)::+; - $([$($variant)::+; $($fields)*])* - [$($name)::+::$next; $($named)*] - $($rest)* - } - }; - - { - $($name:ident)::+; - $([$($variant:ident)::+; $($fields:tt)*])* - $next:ident ($($field:tt)*) - $($rest:tt)* - } => { - spanless_eq_enum! { - $($name)::+; - $([$($variant)::+; $($fields)*])* - $next [] ($($field)*) - $($rest)* - } - }; - - { - $($name:ident)::+; - $([$($variant:ident)::+; $($fields:tt)*])* - $next:ident - $($rest:tt)* - } => { - spanless_eq_enum! { - $($name)::+; - $([$($variant)::+; $($fields)*])* - [$($name)::+::$next;] - $($rest)* - } - }; -} - -spanless_eq_struct!(AngleBracketedArgs; span args); -spanless_eq_struct!(AnonConst; id value); -spanless_eq_struct!(Arm; attrs pat guard body span id is_placeholder); -spanless_eq_struct!(AssocItemConstraint; id ident gen_args kind span); -spanless_eq_struct!(AttrItem; unsafety path args tokens); -spanless_eq_struct!(AttrTokenStream; 0); -spanless_eq_struct!(Attribute; kind id style span); -spanless_eq_struct!(AttrsTarget; attrs tokens); -spanless_eq_struct!(BindingMode; 0 1); -spanless_eq_struct!(Block; stmts id rules span tokens); -spanless_eq_struct!(Closure; binder capture_clause constness coroutine_kind movability fn_decl body !fn_decl_span !fn_arg_span); -spanless_eq_struct!(ConstItem; defaultness ident generics ty rhs define_opaque); -spanless_eq_struct!(Crate; attrs items spans id is_placeholder); -spanless_eq_struct!(Delegation; id qself path ident rename body from_glob); -spanless_eq_struct!(DelegationMac; qself prefix suffixes body); -spanless_eq_struct!(DelimArgs; dspan delim tokens); -spanless_eq_struct!(DelimSpacing; open close); -spanless_eq_struct!(EnumDef; variants); -spanless_eq_struct!(Expr; id kind span attrs !tokens); -spanless_eq_struct!(ExprField; attrs id span ident expr is_shorthand is_placeholder); -spanless_eq_struct!(FieldDef; attrs id span vis safety ident ty default is_placeholder); -spanless_eq_struct!(Fn; defaultness ident generics sig contract define_opaque body); -spanless_eq_struct!(FnContract; declarations requires ensures); -spanless_eq_struct!(FnDecl; inputs output); -spanless_eq_struct!(FnHeader; constness coroutine_kind safety ext); -spanless_eq_struct!(FnPtrTy; safety ext generic_params decl decl_span); -spanless_eq_struct!(FnSig; header decl span); -spanless_eq_struct!(ForeignMod; extern_span safety abi items); -spanless_eq_struct!(FormatArgPosition; index kind span); -spanless_eq_struct!(FormatArgs; span template arguments uncooked_fmt_str is_source_literal); -spanless_eq_struct!(FormatArgument; kind expr); -spanless_eq_struct!(FormatOptions; width precision alignment fill sign alternate zero_pad debug_hex); -spanless_eq_struct!(FormatPlaceholder; argument span format_trait format_options); -spanless_eq_struct!(GenericParam; id ident attrs bounds is_placeholder kind !colon_span); -spanless_eq_struct!(Generics; params where_clause span); -spanless_eq_struct!(Impl; generics of_trait self_ty items); -spanless_eq_struct!(InlineAsm; asm_macro template template_strs operands clobber_abis options line_spans); -spanless_eq_struct!(InlineAsmSym; id qself path); -spanless_eq_struct!(Item<K>; attrs id span vis kind !tokens); -spanless_eq_struct!(Label; ident); -spanless_eq_struct!(Lifetime; id ident); -spanless_eq_struct!(Lit; kind symbol suffix); -spanless_eq_struct!(Local; id super_ pat ty kind span colon_sp attrs !tokens); -spanless_eq_struct!(MacCall; path args); -spanless_eq_struct!(MacCallStmt; mac style attrs tokens); -spanless_eq_struct!(MacroDef; body macro_rules); -spanless_eq_struct!(MetaItem; unsafety path kind span); -spanless_eq_struct!(MetaItemLit; symbol suffix kind span); -spanless_eq_struct!(MethodCall; seg receiver args !span); -spanless_eq_struct!(ModSpans; !inner_span !inject_use_span); -spanless_eq_struct!(MutTy; ty mutbl); -spanless_eq_struct!(NormalAttr; item tokens); -spanless_eq_struct!(ParenthesizedArgs; span inputs inputs_span output); -spanless_eq_struct!(Pat; id kind span tokens); -spanless_eq_struct!(PatField; ident pat is_shorthand attrs id span is_placeholder); -spanless_eq_struct!(Path; span segments tokens); -spanless_eq_struct!(PathSegment; ident id args); -spanless_eq_struct!(PolyTraitRef; bound_generic_params modifiers trait_ref span parens); -spanless_eq_struct!(QSelf; ty path_span position); -spanless_eq_struct!(StaticItem; ident ty safety mutability expr define_opaque); -spanless_eq_struct!(Stmt; id kind span); -spanless_eq_struct!(StrLit; symbol suffix symbol_unescaped style span); -spanless_eq_struct!(StructExpr; qself path fields rest); -spanless_eq_struct!(Token; kind span); -spanless_eq_struct!(Trait; constness safety is_auto ident generics bounds items); -spanless_eq_struct!(TraitAlias; constness ident generics bounds); -spanless_eq_struct!(TraitBoundModifiers; constness asyncness polarity); -spanless_eq_struct!(TraitImplHeader; defaultness safety constness polarity trait_ref); -spanless_eq_struct!(TraitRef; path ref_id); -spanless_eq_struct!(Ty; id kind span tokens); -spanless_eq_struct!(TyAlias; defaultness ident generics after_where_clause bounds ty); -spanless_eq_struct!(TyPat; id kind span tokens); -spanless_eq_struct!(UnsafeBinderTy; generic_params inner_ty); -spanless_eq_struct!(UseTree; prefix kind span); -spanless_eq_struct!(Variant; attrs id span !vis ident data disr_expr is_placeholder); -spanless_eq_struct!(Visibility; kind span tokens); -spanless_eq_struct!(WhereBoundPredicate; bound_generic_params bounded_ty bounds); -spanless_eq_struct!(WhereClause; has_where_token predicates span); -spanless_eq_struct!(WhereEqPredicate; lhs_ty rhs_ty); -spanless_eq_struct!(WherePredicate; attrs kind id span is_placeholder); -spanless_eq_struct!(WhereRegionPredicate; lifetime bounds); -spanless_eq_enum!(AngleBracketedArg; Arg(0) Constraint(0)); -spanless_eq_enum!(AsmMacro; Asm GlobalAsm NakedAsm); -spanless_eq_enum!(AssocItemConstraintKind; Equality(term) Bound(bounds)); -spanless_eq_enum!(AssocItemKind; Const(0) Fn(0) Type(0) MacCall(0) Delegation(0) DelegationMac(0)); -spanless_eq_enum!(AttrArgs; Empty Delimited(0) Eq(eq_span expr)); -spanless_eq_enum!(AttrStyle; Outer Inner); -spanless_eq_enum!(AttrTokenTree; Token(0 1) Delimited(0 1 2 3) AttrsTarget(0)); -spanless_eq_enum!(BinOpKind; Add Sub Mul Div Rem And Or BitXor BitAnd BitOr Shl Shr Eq Lt Le Ne Ge Gt); -spanless_eq_enum!(BlockCheckMode; Default Unsafe(0)); -spanless_eq_enum!(BorrowKind; Ref Raw Pin); -spanless_eq_enum!(BoundAsyncness; Normal Async(0)); -spanless_eq_enum!(BoundConstness; Never Always(0) Maybe(0)); -spanless_eq_enum!(BoundPolarity; Positive Negative(0) Maybe(0)); -spanless_eq_enum!(ByRef; Yes(0 1) No); -spanless_eq_enum!(CaptureBy; Value(move_kw) Ref Use(use_kw)); -spanless_eq_enum!(ClosureBinder; NotPresent For(span generic_params)); -spanless_eq_enum!(Const; Yes(0) No); -spanless_eq_enum!(ConstItemRhs; TypeConst(0) Body(0)); -spanless_eq_enum!(Defaultness; Default(0) Final); -spanless_eq_enum!(Extern; None Implicit(0) Explicit(0 1)); -spanless_eq_enum!(FloatTy; F16 F32 F64 F128); -spanless_eq_enum!(FnRetTy; Default(0) Ty(0)); -spanless_eq_enum!(ForLoopKind; For ForAwait); -spanless_eq_enum!(ForeignItemKind; Static(0) Fn(0) TyAlias(0) MacCall(0)); -spanless_eq_enum!(FormatAlignment; Left Right Center); -spanless_eq_enum!(FormatArgPositionKind; Implicit Number Named); -spanless_eq_enum!(FormatArgsPiece; Literal(0) Placeholder(0)); -spanless_eq_enum!(FormatArgumentKind; Normal Named(0) Captured(0)); -spanless_eq_enum!(FormatCount; Literal(0) Argument(0)); -spanless_eq_enum!(FormatDebugHex; Lower Upper); -spanless_eq_enum!(FormatSign; Plus Minus); -spanless_eq_enum!(FormatTrait; Display Debug LowerExp UpperExp Octal Pointer Binary LowerHex UpperHex); -spanless_eq_enum!(GenBlockKind; Async Gen AsyncGen); -spanless_eq_enum!(GenericArg; Lifetime(0) Type(0) Const(0)); -spanless_eq_enum!(GenericArgs; AngleBracketed(0) Parenthesized(0) ParenthesizedElided(0)); -spanless_eq_enum!(GenericBound; Trait(0) Outlives(0) Use(0 1)); -spanless_eq_enum!(GenericParamKind; Lifetime Type(default) Const(ty span default)); -spanless_eq_enum!(ImplPolarity; Positive Negative(0)); -spanless_eq_enum!(Inline; Yes No(had_parse_error)); -spanless_eq_enum!(InlineAsmRegOrRegClass; Reg(0) RegClass(0)); -spanless_eq_enum!(InlineAsmTemplatePiece; String(0) Placeholder(operand_idx modifier span)); -spanless_eq_enum!(IntTy; Isize I8 I16 I32 I64 I128); -spanless_eq_enum!(IsAuto; Yes No); -spanless_eq_enum!(LitFloatType; Suffixed(0) Unsuffixed); -spanless_eq_enum!(LitIntType; Signed(0) Unsigned(0) Unsuffixed); -spanless_eq_enum!(LocalKind; Decl Init(0) InitElse(0 1)); -spanless_eq_enum!(MacStmtStyle; Semicolon Braces NoBraces); -spanless_eq_enum!(MatchKind; Prefix Postfix); -spanless_eq_enum!(MetaItemKind; Word List(0) NameValue(0)); -spanless_eq_enum!(MetaItemInner; MetaItem(0) Lit(0)); -spanless_eq_enum!(ModKind; Loaded(0 1 2) Unloaded); -spanless_eq_enum!(Movability; Static Movable); -spanless_eq_enum!(Mutability; Mut Not); -spanless_eq_enum!(Parens; Yes No); -spanless_eq_enum!(PatFieldsRest; Rest(0) Recovered(0) None); -spanless_eq_enum!(Pinnedness; Not Pinned); -spanless_eq_enum!(PreciseCapturingArg; Lifetime(0) Arg(0 1)); -spanless_eq_enum!(RangeEnd; Included(0) Excluded); -spanless_eq_enum!(RangeLimits; HalfOpen Closed); -spanless_eq_enum!(Recovered; No Yes(0)); -spanless_eq_enum!(Safety; Unsafe(0) Safe(0) Default); -spanless_eq_enum!(StmtKind; Let(0) Item(0) Expr(0) Semi(0) Empty MacCall(0)); -spanless_eq_enum!(StrStyle; Cooked Raw(0)); -spanless_eq_enum!(StructRest; Base(0) Rest(0) None); -spanless_eq_enum!(Term; Ty(0) Const(0)); -spanless_eq_enum!(TokenTree; Token(0 1) Delimited(0 1 2 3)); -spanless_eq_enum!(TraitObjectSyntax; Dyn None); -spanless_eq_enum!(TyPatKind; Range(0 1 2) NotNull Or(0) Err(0)); -spanless_eq_enum!(UintTy; Usize U8 U16 U32 U64 U128); -spanless_eq_enum!(UnOp; Deref Not Neg); -spanless_eq_enum!(UnsafeBinderCastKind; Wrap Unwrap); -spanless_eq_enum!(UnsafeSource; CompilerGenerated UserProvided); -spanless_eq_enum!(UseTreeKind; Simple(0) Nested(items span) Glob); -spanless_eq_enum!(VariantData; Struct(fields recovered) Tuple(0 1) Unit(0)); -spanless_eq_enum!(VisibilityKind; Public Restricted(path id shorthand) Inherited); -spanless_eq_enum!(WherePredicateKind; BoundPredicate(0) RegionPredicate(0) EqPredicate(0)); -spanless_eq_enum!(YieldKind; Prefix(0) Postfix(0)); -spanless_eq_enum!(AssignOpKind; AddAssign SubAssign MulAssign DivAssign - RemAssign BitXorAssign BitAndAssign BitOrAssign ShlAssign ShrAssign); -spanless_eq_enum!(CoroutineKind; Async(span closure_id return_impl_trait_id) - Gen(span closure_id return_impl_trait_id) - AsyncGen(span closure_id return_impl_trait_id)); -spanless_eq_enum!(ExprKind; Array(0) ConstBlock(0) Call(0 1) MethodCall(0) - Tup(0) Binary(0 1 2) Unary(0 1) Lit(0) Cast(0 1) Type(0 1) Let(0 1 2 3) - If(0 1 2) While(0 1 2) ForLoop(pat iter body label kind) Loop(0 1 2) - Match(0 1 2) Closure(0) Block(0 1) Gen(0 1 2 3) Await(0 1) Use(0 1) - TryBlock(0) Assign(0 1 2) AssignOp(0 1 2) Field(0 1) Index(0 1 2) Underscore - Range(0 1 2) Path(0 1) AddrOf(0 1 2) Break(0 1) Continue(0) Ret(0) - InlineAsm(0) OffsetOf(0 1) MacCall(0) Struct(0) Repeat(0 1) Paren(0) Try(0) - Yield(0) Yeet(0) Become(0) IncludedBytes(0) FormatArgs(0) - UnsafeBinderCast(0 1 2) Err(0) Dummy); -spanless_eq_enum!(InlineAsmOperand; In(reg expr) Out(reg late expr) - InOut(reg late expr) SplitInOut(reg late in_expr out_expr) Const(anon_const) - Sym(sym) Label(block)); -spanless_eq_enum!(ItemKind; ExternCrate(0 1) Use(0) Static(0) Const(0) Fn(0) - Mod(0 1 2) ForeignMod(0) GlobalAsm(0) TyAlias(0) Enum(0 1 2) Struct(0 1 2) - Union(0 1 2) Trait(0) TraitAlias(0) Impl(0) MacCall(0) MacroDef(0 1) - Delegation(0) DelegationMac(0)); -spanless_eq_enum!(LitKind; Str(0 1) ByteStr(0 1) CStr(0 1) Byte(0) Char(0) - Int(0 1) Float(0 1) Bool(0) Err(0)); -spanless_eq_enum!(PatKind; Missing Wild Ident(0 1 2) Struct(0 1 2 3) - TupleStruct(0 1 2) Or(0) Path(0 1) Tuple(0) Box(0) Deref(0) Ref(0 1) Expr(0) - Range(0 1 2) Slice(0) Rest Never Guard(0 1) Paren(0) MacCall(0) Err(0)); -spanless_eq_enum!(TyKind; Slice(0) Array(0 1) Ptr(0) Ref(0 1) PinnedRef(0 1) - FnPtr(0) UnsafeBinder(0) Never Tup(0) Path(0 1) TraitObject(0 1) - ImplTrait(0 1) Paren(0) Typeof(0) Infer ImplicitSelf MacCall(0) CVarArgs - Pat(0 1) Dummy Err(0)); - -impl SpanlessEq for Ident { - fn eq(&self, other: &Self) -> bool { - self.as_str() == other.as_str() - } -} - -impl SpanlessEq for RangeSyntax { - fn eq(&self, _other: &Self) -> bool { - match self { - RangeSyntax::DotDotDot | RangeSyntax::DotDotEq => true, - } - } -} - -impl SpanlessEq for Param { - fn eq(&self, other: &Self) -> bool { - let Param { - attrs, - ty, - pat, - id, - span: _, - is_placeholder, - } = self; - let Param { - attrs: attrs2, - ty: ty2, - pat: pat2, - id: id2, - span: _, - is_placeholder: is_placeholder2, - } = other; - SpanlessEq::eq(id, id2) - && SpanlessEq::eq(is_placeholder, is_placeholder2) - && (matches!(ty.kind, TyKind::Err(_)) - || matches!(ty2.kind, TyKind::Err(_)) - || SpanlessEq::eq(attrs, attrs2) - && SpanlessEq::eq(ty, ty2) - && SpanlessEq::eq(pat, pat2)) - } -} - -impl SpanlessEq for TokenKind { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (TokenKind::Literal(this), TokenKind::Literal(other)) => SpanlessEq::eq(this, other), - (TokenKind::DotDotEq | TokenKind::DotDotDot, _) => match other { - TokenKind::DotDotEq | TokenKind::DotDotDot => true, - _ => false, - }, - _ => self == other, - } - } -} - -impl SpanlessEq for TokenStream { - fn eq(&self, other: &Self) -> bool { - let mut this_trees = self.iter(); - let mut other_trees = other.iter(); - loop { - let Some(this) = this_trees.next() else { - return other_trees.next().is_none(); - }; - let Some(other) = other_trees.next() else { - return false; - }; - if SpanlessEq::eq(this, other) { - continue; - } - if let (TokenTree::Token(this, _), TokenTree::Token(other, _)) = (this, other) { - if match (&this.kind, &other.kind) { - (TokenKind::Literal(this), TokenKind::Literal(other)) => { - SpanlessEq::eq(this, other) - } - (TokenKind::DocComment(_kind, style, symbol), TokenKind::Pound) => { - doc_comment(*style, *symbol, &mut other_trees) - } - (TokenKind::Pound, TokenKind::DocComment(_kind, style, symbol)) => { - doc_comment(*style, *symbol, &mut this_trees) - } - _ => false, - } { - continue; - } - } - return false; - } - } -} - -fn doc_comment<'a>( - style: AttrStyle, - unescaped: Symbol, - trees: &mut impl Iterator<Item = &'a TokenTree>, -) -> bool { - if match style { - AttrStyle::Outer => false, - AttrStyle::Inner => true, - } { - match trees.next() { - Some(TokenTree::Token( - Token { - kind: TokenKind::Bang, - span: _, - }, - _spacing, - )) => {} - _ => return false, - } - } - let Some(TokenTree::Delimited(_span, _spacing, Delimiter::Bracket, stream)) = trees.next() - else { - return false; - }; - let mut trees = stream.iter(); - match trees.next() { - Some(TokenTree::Token( - Token { - kind: TokenKind::Ident(symbol, IdentIsRaw::No), - span: _, - }, - _spacing, - )) if *symbol == sym::doc => {} - _ => return false, - } - match trees.next() { - Some(TokenTree::Token( - Token { - kind: TokenKind::Eq, - span: _, - }, - _spacing, - )) => {} - _ => return false, - } - match trees.next() { - Some(TokenTree::Token(token, _spacing)) => { - is_escaped_literal_token(token, unescaped) && trees.next().is_none() - } - _ => false, - } -} - -fn is_escaped_literal_token(token: &Token, unescaped: Symbol) -> bool { - match token { - Token { - kind: TokenKind::Literal(lit), - span: _, - } => match MetaItemLit::from_token_lit(*lit, DUMMY_SP) { - Ok(lit) => is_escaped_literal_meta_item_lit(&lit, unescaped), - Err(_) => false, - }, - _ => false, - } -} - -fn is_escaped_literal_meta_item_lit(lit: &MetaItemLit, unescaped: Symbol) -> bool { - match lit { - MetaItemLit { - symbol: _, - suffix: None, - kind, - span: _, - } => is_escaped_lit_kind(kind, unescaped), - _ => false, - } -} - -fn is_escaped_lit(lit: &Lit, unescaped: Symbol) -> bool { - match lit { - Lit { - kind: token::LitKind::Str, - symbol: _, - suffix: None, - } => match LitKind::from_token_lit(*lit) { - Ok(lit_kind) => is_escaped_lit_kind(&lit_kind, unescaped), - _ => false, - }, - _ => false, - } -} - -fn is_escaped_lit_kind(kind: &LitKind, unescaped: Symbol) -> bool { - match kind { - LitKind::Str(symbol, StrStyle::Cooked) => { - symbol.as_str().replace('\r', "") == unescaped.as_str().replace('\r', "") - } - _ => false, - } -} - -impl SpanlessEq for LazyAttrTokenStream { - fn eq(&self, other: &Self) -> bool { - let this = self.to_attr_token_stream(); - let other = other.to_attr_token_stream(); - SpanlessEq::eq(&this, &other) - } -} - -impl SpanlessEq for AttrKind { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (AttrKind::Normal(normal), AttrKind::Normal(normal2)) => { - SpanlessEq::eq(normal, normal2) - } - (AttrKind::DocComment(kind, symbol), AttrKind::DocComment(kind2, symbol2)) => { - SpanlessEq::eq(kind, kind2) && SpanlessEq::eq(symbol, symbol2) - } - (AttrKind::DocComment(kind, unescaped), AttrKind::Normal(normal2)) => { - match kind { - CommentKind::Line | CommentKind::Block => {} - } - let path = Path::from_ident(Ident::with_dummy_span(sym::doc)); - SpanlessEq::eq(&path, &normal2.item.path) - && match &normal2.item.args { - AttrArgs::Empty | AttrArgs::Delimited(_) => false, - AttrArgs::Eq { eq_span: _, expr } => match &expr.kind { - ExprKind::Lit(lit) => is_escaped_lit(lit, *unescaped), - _ => false, - }, - } - } - (AttrKind::Normal(_), AttrKind::DocComment(..)) => SpanlessEq::eq(other, self), - } - } -} - -impl SpanlessEq for FormatArguments { - fn eq(&self, other: &Self) -> bool { - SpanlessEq::eq(self.all_args(), other.all_args()) - } -} diff --git a/vendor/syn/tests/common/mod.rs b/vendor/syn/tests/common/mod.rs deleted file mode 100644 index ead830f811656a..00000000000000 --- a/vendor/syn/tests/common/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -#![allow(dead_code)] -#![allow(clippy::module_name_repetitions, clippy::shadow_unrelated)] - -pub mod eq; -pub mod parse; -pub mod visit; diff --git a/vendor/syn/tests/common/parse.rs b/vendor/syn/tests/common/parse.rs deleted file mode 100644 index 81ae357c1d1e9e..00000000000000 --- a/vendor/syn/tests/common/parse.rs +++ /dev/null @@ -1,52 +0,0 @@ -extern crate rustc_ast; -extern crate rustc_driver; -extern crate rustc_expand; -extern crate rustc_parse; -extern crate rustc_session; -extern crate rustc_span; - -use rustc_ast::ast; -use rustc_parse::lexer::StripTokens; -use rustc_session::parse::ParseSess; -use rustc_span::FileName; -use std::panic; - -pub fn librustc_expr(input: &str) -> Option<Box<ast::Expr>> { - match panic::catch_unwind(|| { - let locale_resources = rustc_driver::DEFAULT_LOCALE_RESOURCES.to_vec(); - let sess = ParseSess::new(locale_resources); - let name = FileName::Custom("test_precedence".to_string()); - let mut parser = rustc_parse::new_parser_from_source_str( - &sess, - name, - input.to_string(), - StripTokens::ShebangAndFrontmatter, - ) - .unwrap(); - let presult = parser.parse_expr(); - match presult { - Ok(expr) => Some(expr), - Err(diagnostic) => { - diagnostic.emit(); - None - } - } - }) { - Ok(Some(e)) => Some(e), - Ok(None) => None, - Err(_) => { - errorf!("librustc panicked\n"); - None - } - } -} - -pub fn syn_expr(input: &str) -> Option<syn::Expr> { - match syn::parse_str(input) { - Ok(e) => Some(e), - Err(msg) => { - errorf!("syn failed to parse\n{:?}\n", msg); - None - } - } -} diff --git a/vendor/syn/tests/common/visit.rs b/vendor/syn/tests/common/visit.rs deleted file mode 100644 index 2d2a6c5382d53b..00000000000000 --- a/vendor/syn/tests/common/visit.rs +++ /dev/null @@ -1,119 +0,0 @@ -use proc_macro2::{Delimiter, Group, TokenStream, TokenTree}; -use std::mem; -use syn::visit_mut::{self, VisitMut}; -use syn::{Expr, File, Generics, LifetimeParam, MacroDelimiter, Stmt, StmtMacro, TypeParam}; - -pub struct FlattenParens { - discard_paren_attrs: bool, -} - -impl FlattenParens { - pub fn discard_attrs() -> Self { - FlattenParens { - discard_paren_attrs: true, - } - } - - pub fn combine_attrs() -> Self { - FlattenParens { - discard_paren_attrs: false, - } - } - - pub fn visit_token_stream_mut(tokens: &mut TokenStream) { - *tokens = mem::take(tokens) - .into_iter() - .flat_map(|tt| { - if let TokenTree::Group(group) = tt { - let delimiter = group.delimiter(); - let mut content = group.stream(); - Self::visit_token_stream_mut(&mut content); - if let Delimiter::Parenthesis = delimiter { - content - } else { - TokenStream::from(TokenTree::Group(Group::new(delimiter, content))) - } - } else { - TokenStream::from(tt) - } - }) - .collect(); - } -} - -impl VisitMut for FlattenParens { - fn visit_expr_mut(&mut self, e: &mut Expr) { - while let Expr::Paren(paren) = e { - let paren_attrs = mem::take(&mut paren.attrs); - *e = mem::replace(&mut *paren.expr, Expr::PLACEHOLDER); - if !paren_attrs.is_empty() && !self.discard_paren_attrs { - let nested_attrs = match e { - Expr::Assign(e) => &mut e.attrs, - Expr::Binary(e) => &mut e.attrs, - Expr::Cast(e) => &mut e.attrs, - _ => unimplemented!(), - }; - assert!(nested_attrs.is_empty()); - *nested_attrs = paren_attrs; - } - } - visit_mut::visit_expr_mut(self, e); - } -} - -pub struct AsIfPrinted; - -impl VisitMut for AsIfPrinted { - fn visit_file_mut(&mut self, file: &mut File) { - file.shebang = None; - visit_mut::visit_file_mut(self, file); - } - - fn visit_generics_mut(&mut self, generics: &mut Generics) { - if generics.params.is_empty() { - generics.lt_token = None; - generics.gt_token = None; - } - if let Some(where_clause) = &generics.where_clause { - if where_clause.predicates.is_empty() { - generics.where_clause = None; - } - } - visit_mut::visit_generics_mut(self, generics); - } - - fn visit_lifetime_param_mut(&mut self, param: &mut LifetimeParam) { - if param.bounds.is_empty() { - param.colon_token = None; - } - visit_mut::visit_lifetime_param_mut(self, param); - } - - fn visit_stmt_mut(&mut self, stmt: &mut Stmt) { - if let Stmt::Expr(expr, semi) = stmt { - if let Expr::Macro(e) = expr { - if match e.mac.delimiter { - MacroDelimiter::Brace(_) => true, - MacroDelimiter::Paren(_) | MacroDelimiter::Bracket(_) => semi.is_some(), - } { - let Expr::Macro(expr) = mem::replace(expr, Expr::PLACEHOLDER) else { - unreachable!(); - }; - *stmt = Stmt::Macro(StmtMacro { - attrs: expr.attrs, - mac: expr.mac, - semi_token: *semi, - }); - } - } - } - visit_mut::visit_stmt_mut(self, stmt); - } - - fn visit_type_param_mut(&mut self, param: &mut TypeParam) { - if param.bounds.is_empty() { - param.colon_token = None; - } - visit_mut::visit_type_param_mut(self, param); - } -} diff --git a/vendor/syn/tests/debug/gen.rs b/vendor/syn/tests/debug/gen.rs deleted file mode 100644 index f91977a6769dac..00000000000000 --- a/vendor/syn/tests/debug/gen.rs +++ /dev/null @@ -1,5239 +0,0 @@ -// This file is @generated by syn-internal-codegen. -// It is not intended for manual editing. - -#![allow(repr_transparent_non_zst_fields)] -#![allow(clippy::match_wildcard_for_single_variants)] -use super::{Lite, Present}; -use ref_cast::RefCast; -use std::fmt::{self, Debug, Display}; -impl Debug for Lite<syn::Abi> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Abi"); - if let Some(val) = &self.value.name { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::LitStr); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("name", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::AngleBracketedGenericArguments> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("AngleBracketedGenericArguments"); - if self.value.colon2_token.is_some() { - formatter.field("colon2_token", &Present); - } - if !self.value.args.is_empty() { - formatter.field("args", Lite(&self.value.args)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::Arm> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Arm"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("pat", Lite(&self.value.pat)); - if let Some(val) = &self.value.guard { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::If, Box<syn::Expr>)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.1), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("guard", Print::ref_cast(val)); - } - formatter.field("body", Lite(&self.value.body)); - if self.value.comma.is_some() { - formatter.field("comma", &Present); - } - formatter.finish() - } -} -impl Debug for Lite<syn::AssocConst> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("AssocConst"); - formatter.field("ident", Lite(&self.value.ident)); - if let Some(val) = &self.value.generics { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::AngleBracketedGenericArguments); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("generics", Print::ref_cast(val)); - } - formatter.field("value", Lite(&self.value.value)); - formatter.finish() - } -} -impl Debug for Lite<syn::AssocType> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("AssocType"); - formatter.field("ident", Lite(&self.value.ident)); - if let Some(val) = &self.value.generics { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::AngleBracketedGenericArguments); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("generics", Print::ref_cast(val)); - } - formatter.field("ty", Lite(&self.value.ty)); - formatter.finish() - } -} -impl Debug for Lite<syn::AttrStyle> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::AttrStyle::Outer => formatter.write_str("AttrStyle::Outer"), - syn::AttrStyle::Inner(_val) => { - formatter.write_str("AttrStyle::Inner")?; - Ok(()) - } - } - } -} -impl Debug for Lite<syn::Attribute> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Attribute"); - formatter.field("style", Lite(&self.value.style)); - formatter.field("meta", Lite(&self.value.meta)); - formatter.finish() - } -} -impl Debug for Lite<syn::BareFnArg> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("BareFnArg"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.name { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((proc_macro2::Ident, syn::token::Colon)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("name", Print::ref_cast(val)); - } - formatter.field("ty", Lite(&self.value.ty)); - formatter.finish() - } -} -impl Debug for Lite<syn::BareVariadic> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("BareVariadic"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.name { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((proc_macro2::Ident, syn::token::Colon)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("name", Print::ref_cast(val)); - } - if self.value.comma.is_some() { - formatter.field("comma", &Present); - } - formatter.finish() - } -} -impl Debug for Lite<syn::BinOp> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::BinOp::Add(_val) => { - formatter.write_str("BinOp::Add")?; - Ok(()) - } - syn::BinOp::Sub(_val) => { - formatter.write_str("BinOp::Sub")?; - Ok(()) - } - syn::BinOp::Mul(_val) => { - formatter.write_str("BinOp::Mul")?; - Ok(()) - } - syn::BinOp::Div(_val) => { - formatter.write_str("BinOp::Div")?; - Ok(()) - } - syn::BinOp::Rem(_val) => { - formatter.write_str("BinOp::Rem")?; - Ok(()) - } - syn::BinOp::And(_val) => { - formatter.write_str("BinOp::And")?; - Ok(()) - } - syn::BinOp::Or(_val) => { - formatter.write_str("BinOp::Or")?; - Ok(()) - } - syn::BinOp::BitXor(_val) => { - formatter.write_str("BinOp::BitXor")?; - Ok(()) - } - syn::BinOp::BitAnd(_val) => { - formatter.write_str("BinOp::BitAnd")?; - Ok(()) - } - syn::BinOp::BitOr(_val) => { - formatter.write_str("BinOp::BitOr")?; - Ok(()) - } - syn::BinOp::Shl(_val) => { - formatter.write_str("BinOp::Shl")?; - Ok(()) - } - syn::BinOp::Shr(_val) => { - formatter.write_str("BinOp::Shr")?; - Ok(()) - } - syn::BinOp::Eq(_val) => { - formatter.write_str("BinOp::Eq")?; - Ok(()) - } - syn::BinOp::Lt(_val) => { - formatter.write_str("BinOp::Lt")?; - Ok(()) - } - syn::BinOp::Le(_val) => { - formatter.write_str("BinOp::Le")?; - Ok(()) - } - syn::BinOp::Ne(_val) => { - formatter.write_str("BinOp::Ne")?; - Ok(()) - } - syn::BinOp::Ge(_val) => { - formatter.write_str("BinOp::Ge")?; - Ok(()) - } - syn::BinOp::Gt(_val) => { - formatter.write_str("BinOp::Gt")?; - Ok(()) - } - syn::BinOp::AddAssign(_val) => { - formatter.write_str("BinOp::AddAssign")?; - Ok(()) - } - syn::BinOp::SubAssign(_val) => { - formatter.write_str("BinOp::SubAssign")?; - Ok(()) - } - syn::BinOp::MulAssign(_val) => { - formatter.write_str("BinOp::MulAssign")?; - Ok(()) - } - syn::BinOp::DivAssign(_val) => { - formatter.write_str("BinOp::DivAssign")?; - Ok(()) - } - syn::BinOp::RemAssign(_val) => { - formatter.write_str("BinOp::RemAssign")?; - Ok(()) - } - syn::BinOp::BitXorAssign(_val) => { - formatter.write_str("BinOp::BitXorAssign")?; - Ok(()) - } - syn::BinOp::BitAndAssign(_val) => { - formatter.write_str("BinOp::BitAndAssign")?; - Ok(()) - } - syn::BinOp::BitOrAssign(_val) => { - formatter.write_str("BinOp::BitOrAssign")?; - Ok(()) - } - syn::BinOp::ShlAssign(_val) => { - formatter.write_str("BinOp::ShlAssign")?; - Ok(()) - } - syn::BinOp::ShrAssign(_val) => { - formatter.write_str("BinOp::ShrAssign")?; - Ok(()) - } - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::Block> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Block"); - formatter.field("stmts", Lite(&self.value.stmts)); - formatter.finish() - } -} -impl Debug for Lite<syn::BoundLifetimes> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("BoundLifetimes"); - if !self.value.lifetimes.is_empty() { - formatter.field("lifetimes", Lite(&self.value.lifetimes)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::CapturedParam> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::CapturedParam::Lifetime(_val) => { - formatter.write_str("CapturedParam::Lifetime")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::CapturedParam::Ident(_val) => { - formatter.write_str("CapturedParam::Ident")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::ConstParam> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ConstParam"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("ty", Lite(&self.value.ty)); - if self.value.eq_token.is_some() { - formatter.field("eq_token", &Present); - } - if let Some(val) = &self.value.default { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Expr); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("default", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::Constraint> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Constraint"); - formatter.field("ident", Lite(&self.value.ident)); - if let Some(val) = &self.value.generics { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::AngleBracketedGenericArguments); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("generics", Print::ref_cast(val)); - } - if !self.value.bounds.is_empty() { - formatter.field("bounds", Lite(&self.value.bounds)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::Data> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::Data::Struct(_val) => { - let mut formatter = formatter.debug_struct("Data::Struct"); - formatter.field("fields", Lite(&_val.fields)); - if _val.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } - syn::Data::Enum(_val) => { - let mut formatter = formatter.debug_struct("Data::Enum"); - if !_val.variants.is_empty() { - formatter.field("variants", Lite(&_val.variants)); - } - formatter.finish() - } - syn::Data::Union(_val) => { - let mut formatter = formatter.debug_struct("Data::Union"); - formatter.field("fields", Lite(&_val.fields)); - formatter.finish() - } - } - } -} -impl Debug for Lite<syn::DataEnum> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("DataEnum"); - if !self.value.variants.is_empty() { - formatter.field("variants", Lite(&self.value.variants)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::DataStruct> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("DataStruct"); - formatter.field("fields", Lite(&self.value.fields)); - if self.value.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } -} -impl Debug for Lite<syn::DataUnion> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("DataUnion"); - formatter.field("fields", Lite(&self.value.fields)); - formatter.finish() - } -} -impl Debug for Lite<syn::DeriveInput> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("DeriveInput"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("generics", Lite(&self.value.generics)); - formatter.field("data", Lite(&self.value.data)); - formatter.finish() - } -} -impl Debug for Lite<syn::Expr> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::Expr::Array(_val) => { - let mut formatter = formatter.debug_struct("Expr::Array"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if !_val.elems.is_empty() { - formatter.field("elems", Lite(&_val.elems)); - } - formatter.finish() - } - syn::Expr::Assign(_val) => { - let mut formatter = formatter.debug_struct("Expr::Assign"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("left", Lite(&_val.left)); - formatter.field("right", Lite(&_val.right)); - formatter.finish() - } - syn::Expr::Async(_val) => { - let mut formatter = formatter.debug_struct("Expr::Async"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if _val.capture.is_some() { - formatter.field("capture", &Present); - } - formatter.field("block", Lite(&_val.block)); - formatter.finish() - } - syn::Expr::Await(_val) => { - let mut formatter = formatter.debug_struct("Expr::Await"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("base", Lite(&_val.base)); - formatter.finish() - } - syn::Expr::Binary(_val) => { - let mut formatter = formatter.debug_struct("Expr::Binary"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("left", Lite(&_val.left)); - formatter.field("op", Lite(&_val.op)); - formatter.field("right", Lite(&_val.right)); - formatter.finish() - } - syn::Expr::Block(_val) => { - let mut formatter = formatter.debug_struct("Expr::Block"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if let Some(val) = &_val.label { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Label); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("label", Print::ref_cast(val)); - } - formatter.field("block", Lite(&_val.block)); - formatter.finish() - } - syn::Expr::Break(_val) => { - let mut formatter = formatter.debug_struct("Expr::Break"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if let Some(val) = &_val.label { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Lifetime); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("label", Print::ref_cast(val)); - } - if let Some(val) = &_val.expr { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(Box<syn::Expr>); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("expr", Print::ref_cast(val)); - } - formatter.finish() - } - syn::Expr::Call(_val) => { - let mut formatter = formatter.debug_struct("Expr::Call"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("func", Lite(&_val.func)); - if !_val.args.is_empty() { - formatter.field("args", Lite(&_val.args)); - } - formatter.finish() - } - syn::Expr::Cast(_val) => { - let mut formatter = formatter.debug_struct("Expr::Cast"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("expr", Lite(&_val.expr)); - formatter.field("ty", Lite(&_val.ty)); - formatter.finish() - } - syn::Expr::Closure(_val) => { - let mut formatter = formatter.debug_struct("Expr::Closure"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if let Some(val) = &_val.lifetimes { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::BoundLifetimes); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("lifetimes", Print::ref_cast(val)); - } - if _val.constness.is_some() { - formatter.field("constness", &Present); - } - if _val.movability.is_some() { - formatter.field("movability", &Present); - } - if _val.asyncness.is_some() { - formatter.field("asyncness", &Present); - } - if _val.capture.is_some() { - formatter.field("capture", &Present); - } - if !_val.inputs.is_empty() { - formatter.field("inputs", Lite(&_val.inputs)); - } - formatter.field("output", Lite(&_val.output)); - formatter.field("body", Lite(&_val.body)); - formatter.finish() - } - syn::Expr::Const(_val) => { - let mut formatter = formatter.debug_struct("Expr::Const"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("block", Lite(&_val.block)); - formatter.finish() - } - syn::Expr::Continue(_val) => { - let mut formatter = formatter.debug_struct("Expr::Continue"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if let Some(val) = &_val.label { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Lifetime); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("label", Print::ref_cast(val)); - } - formatter.finish() - } - syn::Expr::Field(_val) => { - let mut formatter = formatter.debug_struct("Expr::Field"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("base", Lite(&_val.base)); - formatter.field("member", Lite(&_val.member)); - formatter.finish() - } - syn::Expr::ForLoop(_val) => { - let mut formatter = formatter.debug_struct("Expr::ForLoop"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if let Some(val) = &_val.label { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Label); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("label", Print::ref_cast(val)); - } - formatter.field("pat", Lite(&_val.pat)); - formatter.field("expr", Lite(&_val.expr)); - formatter.field("body", Lite(&_val.body)); - formatter.finish() - } - syn::Expr::Group(_val) => { - let mut formatter = formatter.debug_struct("Expr::Group"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("expr", Lite(&_val.expr)); - formatter.finish() - } - syn::Expr::If(_val) => { - let mut formatter = formatter.debug_struct("Expr::If"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("cond", Lite(&_val.cond)); - formatter.field("then_branch", Lite(&_val.then_branch)); - if let Some(val) = &_val.else_branch { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::Else, Box<syn::Expr>)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.1), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("else_branch", Print::ref_cast(val)); - } - formatter.finish() - } - syn::Expr::Index(_val) => { - let mut formatter = formatter.debug_struct("Expr::Index"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("expr", Lite(&_val.expr)); - formatter.field("index", Lite(&_val.index)); - formatter.finish() - } - syn::Expr::Infer(_val) => { - let mut formatter = formatter.debug_struct("Expr::Infer"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.finish() - } - syn::Expr::Let(_val) => { - let mut formatter = formatter.debug_struct("Expr::Let"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("pat", Lite(&_val.pat)); - formatter.field("expr", Lite(&_val.expr)); - formatter.finish() - } - syn::Expr::Lit(_val) => { - let mut formatter = formatter.debug_struct("Expr::Lit"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("lit", Lite(&_val.lit)); - formatter.finish() - } - syn::Expr::Loop(_val) => { - let mut formatter = formatter.debug_struct("Expr::Loop"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if let Some(val) = &_val.label { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Label); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("label", Print::ref_cast(val)); - } - formatter.field("body", Lite(&_val.body)); - formatter.finish() - } - syn::Expr::Macro(_val) => { - let mut formatter = formatter.debug_struct("Expr::Macro"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("mac", Lite(&_val.mac)); - formatter.finish() - } - syn::Expr::Match(_val) => { - let mut formatter = formatter.debug_struct("Expr::Match"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("expr", Lite(&_val.expr)); - if !_val.arms.is_empty() { - formatter.field("arms", Lite(&_val.arms)); - } - formatter.finish() - } - syn::Expr::MethodCall(_val) => { - let mut formatter = formatter.debug_struct("Expr::MethodCall"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("receiver", Lite(&_val.receiver)); - formatter.field("method", Lite(&_val.method)); - if let Some(val) = &_val.turbofish { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::AngleBracketedGenericArguments); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("turbofish", Print::ref_cast(val)); - } - if !_val.args.is_empty() { - formatter.field("args", Lite(&_val.args)); - } - formatter.finish() - } - syn::Expr::Paren(_val) => { - let mut formatter = formatter.debug_struct("Expr::Paren"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("expr", Lite(&_val.expr)); - formatter.finish() - } - syn::Expr::Path(_val) => { - let mut formatter = formatter.debug_struct("Expr::Path"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if let Some(val) = &_val.qself { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::QSelf); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("qself", Print::ref_cast(val)); - } - formatter.field("path", Lite(&_val.path)); - formatter.finish() - } - syn::Expr::Range(_val) => { - let mut formatter = formatter.debug_struct("Expr::Range"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if let Some(val) = &_val.start { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(Box<syn::Expr>); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("start", Print::ref_cast(val)); - } - formatter.field("limits", Lite(&_val.limits)); - if let Some(val) = &_val.end { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(Box<syn::Expr>); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("end", Print::ref_cast(val)); - } - formatter.finish() - } - syn::Expr::RawAddr(_val) => { - let mut formatter = formatter.debug_struct("Expr::RawAddr"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("mutability", Lite(&_val.mutability)); - formatter.field("expr", Lite(&_val.expr)); - formatter.finish() - } - syn::Expr::Reference(_val) => { - let mut formatter = formatter.debug_struct("Expr::Reference"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if _val.mutability.is_some() { - formatter.field("mutability", &Present); - } - formatter.field("expr", Lite(&_val.expr)); - formatter.finish() - } - syn::Expr::Repeat(_val) => { - let mut formatter = formatter.debug_struct("Expr::Repeat"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("expr", Lite(&_val.expr)); - formatter.field("len", Lite(&_val.len)); - formatter.finish() - } - syn::Expr::Return(_val) => { - let mut formatter = formatter.debug_struct("Expr::Return"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if let Some(val) = &_val.expr { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(Box<syn::Expr>); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("expr", Print::ref_cast(val)); - } - formatter.finish() - } - syn::Expr::Struct(_val) => { - let mut formatter = formatter.debug_struct("Expr::Struct"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if let Some(val) = &_val.qself { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::QSelf); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("qself", Print::ref_cast(val)); - } - formatter.field("path", Lite(&_val.path)); - if !_val.fields.is_empty() { - formatter.field("fields", Lite(&_val.fields)); - } - if _val.dot2_token.is_some() { - formatter.field("dot2_token", &Present); - } - if let Some(val) = &_val.rest { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(Box<syn::Expr>); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("rest", Print::ref_cast(val)); - } - formatter.finish() - } - syn::Expr::Try(_val) => { - let mut formatter = formatter.debug_struct("Expr::Try"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("expr", Lite(&_val.expr)); - formatter.finish() - } - syn::Expr::TryBlock(_val) => { - let mut formatter = formatter.debug_struct("Expr::TryBlock"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("block", Lite(&_val.block)); - formatter.finish() - } - syn::Expr::Tuple(_val) => { - let mut formatter = formatter.debug_struct("Expr::Tuple"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if !_val.elems.is_empty() { - formatter.field("elems", Lite(&_val.elems)); - } - formatter.finish() - } - syn::Expr::Unary(_val) => { - let mut formatter = formatter.debug_struct("Expr::Unary"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("op", Lite(&_val.op)); - formatter.field("expr", Lite(&_val.expr)); - formatter.finish() - } - syn::Expr::Unsafe(_val) => { - let mut formatter = formatter.debug_struct("Expr::Unsafe"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("block", Lite(&_val.block)); - formatter.finish() - } - syn::Expr::Verbatim(_val) => { - formatter.write_str("Expr::Verbatim")?; - formatter.write_str("(`")?; - Display::fmt(_val, formatter)?; - formatter.write_str("`)")?; - Ok(()) - } - syn::Expr::While(_val) => { - let mut formatter = formatter.debug_struct("Expr::While"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if let Some(val) = &_val.label { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Label); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("label", Print::ref_cast(val)); - } - formatter.field("cond", Lite(&_val.cond)); - formatter.field("body", Lite(&_val.body)); - formatter.finish() - } - syn::Expr::Yield(_val) => { - let mut formatter = formatter.debug_struct("Expr::Yield"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if let Some(val) = &_val.expr { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(Box<syn::Expr>); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("expr", Print::ref_cast(val)); - } - formatter.finish() - } - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::ExprArray> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprArray"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if !self.value.elems.is_empty() { - formatter.field("elems", Lite(&self.value.elems)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ExprAssign> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprAssign"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("left", Lite(&self.value.left)); - formatter.field("right", Lite(&self.value.right)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprAsync> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprAsync"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if self.value.capture.is_some() { - formatter.field("capture", &Present); - } - formatter.field("block", Lite(&self.value.block)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprAwait> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprAwait"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("base", Lite(&self.value.base)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprBinary> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprBinary"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("left", Lite(&self.value.left)); - formatter.field("op", Lite(&self.value.op)); - formatter.field("right", Lite(&self.value.right)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprBlock> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprBlock"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.label { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Label); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("label", Print::ref_cast(val)); - } - formatter.field("block", Lite(&self.value.block)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprBreak> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprBreak"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.label { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Lifetime); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("label", Print::ref_cast(val)); - } - if let Some(val) = &self.value.expr { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(Box<syn::Expr>); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("expr", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ExprCall> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprCall"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("func", Lite(&self.value.func)); - if !self.value.args.is_empty() { - formatter.field("args", Lite(&self.value.args)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ExprCast> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprCast"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("expr", Lite(&self.value.expr)); - formatter.field("ty", Lite(&self.value.ty)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprClosure> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprClosure"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.lifetimes { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::BoundLifetimes); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("lifetimes", Print::ref_cast(val)); - } - if self.value.constness.is_some() { - formatter.field("constness", &Present); - } - if self.value.movability.is_some() { - formatter.field("movability", &Present); - } - if self.value.asyncness.is_some() { - formatter.field("asyncness", &Present); - } - if self.value.capture.is_some() { - formatter.field("capture", &Present); - } - if !self.value.inputs.is_empty() { - formatter.field("inputs", Lite(&self.value.inputs)); - } - formatter.field("output", Lite(&self.value.output)); - formatter.field("body", Lite(&self.value.body)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprConst> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprConst"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("block", Lite(&self.value.block)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprContinue> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprContinue"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.label { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Lifetime); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("label", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ExprField> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprField"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("base", Lite(&self.value.base)); - formatter.field("member", Lite(&self.value.member)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprForLoop> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprForLoop"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.label { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Label); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("label", Print::ref_cast(val)); - } - formatter.field("pat", Lite(&self.value.pat)); - formatter.field("expr", Lite(&self.value.expr)); - formatter.field("body", Lite(&self.value.body)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprGroup> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprGroup"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("expr", Lite(&self.value.expr)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprIf> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprIf"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("cond", Lite(&self.value.cond)); - formatter.field("then_branch", Lite(&self.value.then_branch)); - if let Some(val) = &self.value.else_branch { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::Else, Box<syn::Expr>)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.1), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("else_branch", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ExprIndex> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprIndex"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("expr", Lite(&self.value.expr)); - formatter.field("index", Lite(&self.value.index)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprInfer> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprInfer"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ExprLet> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprLet"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("pat", Lite(&self.value.pat)); - formatter.field("expr", Lite(&self.value.expr)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprLit> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprLit"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("lit", Lite(&self.value.lit)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprLoop> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprLoop"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.label { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Label); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("label", Print::ref_cast(val)); - } - formatter.field("body", Lite(&self.value.body)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprMacro> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprMacro"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("mac", Lite(&self.value.mac)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprMatch> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprMatch"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("expr", Lite(&self.value.expr)); - if !self.value.arms.is_empty() { - formatter.field("arms", Lite(&self.value.arms)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ExprMethodCall> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprMethodCall"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("receiver", Lite(&self.value.receiver)); - formatter.field("method", Lite(&self.value.method)); - if let Some(val) = &self.value.turbofish { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::AngleBracketedGenericArguments); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("turbofish", Print::ref_cast(val)); - } - if !self.value.args.is_empty() { - formatter.field("args", Lite(&self.value.args)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ExprParen> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprParen"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("expr", Lite(&self.value.expr)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprPath> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprPath"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.qself { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::QSelf); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("qself", Print::ref_cast(val)); - } - formatter.field("path", Lite(&self.value.path)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprRange> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprRange"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.start { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(Box<syn::Expr>); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("start", Print::ref_cast(val)); - } - formatter.field("limits", Lite(&self.value.limits)); - if let Some(val) = &self.value.end { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(Box<syn::Expr>); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("end", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ExprRawAddr> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprRawAddr"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("mutability", Lite(&self.value.mutability)); - formatter.field("expr", Lite(&self.value.expr)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprReference> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprReference"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if self.value.mutability.is_some() { - formatter.field("mutability", &Present); - } - formatter.field("expr", Lite(&self.value.expr)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprRepeat> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprRepeat"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("expr", Lite(&self.value.expr)); - formatter.field("len", Lite(&self.value.len)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprReturn> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprReturn"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.expr { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(Box<syn::Expr>); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("expr", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ExprStruct> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprStruct"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.qself { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::QSelf); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("qself", Print::ref_cast(val)); - } - formatter.field("path", Lite(&self.value.path)); - if !self.value.fields.is_empty() { - formatter.field("fields", Lite(&self.value.fields)); - } - if self.value.dot2_token.is_some() { - formatter.field("dot2_token", &Present); - } - if let Some(val) = &self.value.rest { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(Box<syn::Expr>); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("rest", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ExprTry> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprTry"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("expr", Lite(&self.value.expr)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprTryBlock> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprTryBlock"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("block", Lite(&self.value.block)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprTuple> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprTuple"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if !self.value.elems.is_empty() { - formatter.field("elems", Lite(&self.value.elems)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ExprUnary> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprUnary"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("op", Lite(&self.value.op)); - formatter.field("expr", Lite(&self.value.expr)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprUnsafe> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprUnsafe"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("block", Lite(&self.value.block)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprWhile> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprWhile"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.label { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Label); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("label", Print::ref_cast(val)); - } - formatter.field("cond", Lite(&self.value.cond)); - formatter.field("body", Lite(&self.value.body)); - formatter.finish() - } -} -impl Debug for Lite<syn::ExprYield> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ExprYield"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.expr { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(Box<syn::Expr>); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("expr", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::Field> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Field"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - match self.value.mutability { - syn::FieldMutability::None => {} - _ => { - formatter.field("mutability", Lite(&self.value.mutability)); - } - } - if let Some(val) = &self.value.ident { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(proc_macro2::Ident); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("ident", Print::ref_cast(val)); - } - if self.value.colon_token.is_some() { - formatter.field("colon_token", &Present); - } - formatter.field("ty", Lite(&self.value.ty)); - formatter.finish() - } -} -impl Debug for Lite<syn::FieldMutability> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::FieldMutability::None => formatter.write_str("FieldMutability::None"), - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::FieldPat> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("FieldPat"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("member", Lite(&self.value.member)); - if self.value.colon_token.is_some() { - formatter.field("colon_token", &Present); - } - formatter.field("pat", Lite(&self.value.pat)); - formatter.finish() - } -} -impl Debug for Lite<syn::FieldValue> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("FieldValue"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("member", Lite(&self.value.member)); - if self.value.colon_token.is_some() { - formatter.field("colon_token", &Present); - } - formatter.field("expr", Lite(&self.value.expr)); - formatter.finish() - } -} -impl Debug for Lite<syn::Fields> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::Fields::Named(_val) => { - let mut formatter = formatter.debug_struct("Fields::Named"); - if !_val.named.is_empty() { - formatter.field("named", Lite(&_val.named)); - } - formatter.finish() - } - syn::Fields::Unnamed(_val) => { - let mut formatter = formatter.debug_struct("Fields::Unnamed"); - if !_val.unnamed.is_empty() { - formatter.field("unnamed", Lite(&_val.unnamed)); - } - formatter.finish() - } - syn::Fields::Unit => formatter.write_str("Fields::Unit"), - } - } -} -impl Debug for Lite<syn::FieldsNamed> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("FieldsNamed"); - if !self.value.named.is_empty() { - formatter.field("named", Lite(&self.value.named)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::FieldsUnnamed> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("FieldsUnnamed"); - if !self.value.unnamed.is_empty() { - formatter.field("unnamed", Lite(&self.value.unnamed)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::File> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("File"); - if let Some(val) = &self.value.shebang { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(String); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("shebang", Print::ref_cast(val)); - } - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if !self.value.items.is_empty() { - formatter.field("items", Lite(&self.value.items)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::FnArg> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::FnArg::Receiver(_val) => { - formatter.write_str("FnArg::Receiver")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::FnArg::Typed(_val) => { - formatter.write_str("FnArg::Typed")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - } -} -impl Debug for Lite<syn::ForeignItem> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::ForeignItem::Fn(_val) => { - let mut formatter = formatter.debug_struct("ForeignItem::Fn"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - formatter.field("sig", Lite(&_val.sig)); - formatter.finish() - } - syn::ForeignItem::Static(_val) => { - let mut formatter = formatter.debug_struct("ForeignItem::Static"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - match _val.mutability { - syn::StaticMutability::None => {} - _ => { - formatter.field("mutability", Lite(&_val.mutability)); - } - } - formatter.field("ident", Lite(&_val.ident)); - formatter.field("ty", Lite(&_val.ty)); - formatter.finish() - } - syn::ForeignItem::Type(_val) => { - let mut formatter = formatter.debug_struct("ForeignItem::Type"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - formatter.field("ident", Lite(&_val.ident)); - formatter.field("generics", Lite(&_val.generics)); - formatter.finish() - } - syn::ForeignItem::Macro(_val) => { - let mut formatter = formatter.debug_struct("ForeignItem::Macro"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("mac", Lite(&_val.mac)); - if _val.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } - syn::ForeignItem::Verbatim(_val) => { - formatter.write_str("ForeignItem::Verbatim")?; - formatter.write_str("(`")?; - Display::fmt(_val, formatter)?; - formatter.write_str("`)")?; - Ok(()) - } - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::ForeignItemFn> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ForeignItemFn"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - formatter.field("sig", Lite(&self.value.sig)); - formatter.finish() - } -} -impl Debug for Lite<syn::ForeignItemMacro> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ForeignItemMacro"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("mac", Lite(&self.value.mac)); - if self.value.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ForeignItemStatic> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ForeignItemStatic"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - match self.value.mutability { - syn::StaticMutability::None => {} - _ => { - formatter.field("mutability", Lite(&self.value.mutability)); - } - } - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("ty", Lite(&self.value.ty)); - formatter.finish() - } -} -impl Debug for Lite<syn::ForeignItemType> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ForeignItemType"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("generics", Lite(&self.value.generics)); - formatter.finish() - } -} -impl Debug for Lite<syn::GenericArgument> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::GenericArgument::Lifetime(_val) => { - formatter.write_str("GenericArgument::Lifetime")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::GenericArgument::Type(_val) => { - formatter.write_str("GenericArgument::Type")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::GenericArgument::Const(_val) => { - formatter.write_str("GenericArgument::Const")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::GenericArgument::AssocType(_val) => { - formatter.write_str("GenericArgument::AssocType")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::GenericArgument::AssocConst(_val) => { - formatter.write_str("GenericArgument::AssocConst")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::GenericArgument::Constraint(_val) => { - formatter.write_str("GenericArgument::Constraint")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::GenericParam> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::GenericParam::Lifetime(_val) => { - formatter.write_str("GenericParam::Lifetime")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::GenericParam::Type(_val) => { - formatter.write_str("GenericParam::Type")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::GenericParam::Const(_val) => { - formatter.write_str("GenericParam::Const")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - } -} -impl Debug for Lite<syn::Generics> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Generics"); - if self.value.lt_token.is_some() { - formatter.field("lt_token", &Present); - } - if !self.value.params.is_empty() { - formatter.field("params", Lite(&self.value.params)); - } - if self.value.gt_token.is_some() { - formatter.field("gt_token", &Present); - } - if let Some(val) = &self.value.where_clause { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::WhereClause); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("where_clause", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ImplItem> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::ImplItem::Const(_val) => { - let mut formatter = formatter.debug_struct("ImplItem::Const"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - if _val.defaultness.is_some() { - formatter.field("defaultness", &Present); - } - formatter.field("ident", Lite(&_val.ident)); - formatter.field("generics", Lite(&_val.generics)); - formatter.field("ty", Lite(&_val.ty)); - formatter.field("expr", Lite(&_val.expr)); - formatter.finish() - } - syn::ImplItem::Fn(_val) => { - let mut formatter = formatter.debug_struct("ImplItem::Fn"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - if _val.defaultness.is_some() { - formatter.field("defaultness", &Present); - } - formatter.field("sig", Lite(&_val.sig)); - formatter.field("block", Lite(&_val.block)); - formatter.finish() - } - syn::ImplItem::Type(_val) => { - let mut formatter = formatter.debug_struct("ImplItem::Type"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - if _val.defaultness.is_some() { - formatter.field("defaultness", &Present); - } - formatter.field("ident", Lite(&_val.ident)); - formatter.field("generics", Lite(&_val.generics)); - formatter.field("ty", Lite(&_val.ty)); - formatter.finish() - } - syn::ImplItem::Macro(_val) => { - let mut formatter = formatter.debug_struct("ImplItem::Macro"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("mac", Lite(&_val.mac)); - if _val.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } - syn::ImplItem::Verbatim(_val) => { - formatter.write_str("ImplItem::Verbatim")?; - formatter.write_str("(`")?; - Display::fmt(_val, formatter)?; - formatter.write_str("`)")?; - Ok(()) - } - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::ImplItemConst> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ImplItemConst"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - if self.value.defaultness.is_some() { - formatter.field("defaultness", &Present); - } - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("generics", Lite(&self.value.generics)); - formatter.field("ty", Lite(&self.value.ty)); - formatter.field("expr", Lite(&self.value.expr)); - formatter.finish() - } -} -impl Debug for Lite<syn::ImplItemFn> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ImplItemFn"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - if self.value.defaultness.is_some() { - formatter.field("defaultness", &Present); - } - formatter.field("sig", Lite(&self.value.sig)); - formatter.field("block", Lite(&self.value.block)); - formatter.finish() - } -} -impl Debug for Lite<syn::ImplItemMacro> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ImplItemMacro"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("mac", Lite(&self.value.mac)); - if self.value.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ImplItemType> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ImplItemType"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - if self.value.defaultness.is_some() { - formatter.field("defaultness", &Present); - } - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("generics", Lite(&self.value.generics)); - formatter.field("ty", Lite(&self.value.ty)); - formatter.finish() - } -} -impl Debug for Lite<syn::ImplRestriction> { - fn fmt(&self, _formatter: &mut fmt::Formatter) -> fmt::Result { - unreachable!() - } -} -impl Debug for Lite<syn::Index> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Index"); - formatter.field("index", Lite(&self.value.index)); - formatter.finish() - } -} -impl Debug for Lite<syn::Item> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::Item::Const(_val) => { - let mut formatter = formatter.debug_struct("Item::Const"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - formatter.field("ident", Lite(&_val.ident)); - formatter.field("generics", Lite(&_val.generics)); - formatter.field("ty", Lite(&_val.ty)); - formatter.field("expr", Lite(&_val.expr)); - formatter.finish() - } - syn::Item::Enum(_val) => { - let mut formatter = formatter.debug_struct("Item::Enum"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - formatter.field("ident", Lite(&_val.ident)); - formatter.field("generics", Lite(&_val.generics)); - if !_val.variants.is_empty() { - formatter.field("variants", Lite(&_val.variants)); - } - formatter.finish() - } - syn::Item::ExternCrate(_val) => { - let mut formatter = formatter.debug_struct("Item::ExternCrate"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - formatter.field("ident", Lite(&_val.ident)); - if let Some(val) = &_val.rename { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::As, proc_macro2::Ident)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.1), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("rename", Print::ref_cast(val)); - } - formatter.finish() - } - syn::Item::Fn(_val) => { - let mut formatter = formatter.debug_struct("Item::Fn"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - formatter.field("sig", Lite(&_val.sig)); - formatter.field("block", Lite(&_val.block)); - formatter.finish() - } - syn::Item::ForeignMod(_val) => { - let mut formatter = formatter.debug_struct("Item::ForeignMod"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if _val.unsafety.is_some() { - formatter.field("unsafety", &Present); - } - formatter.field("abi", Lite(&_val.abi)); - if !_val.items.is_empty() { - formatter.field("items", Lite(&_val.items)); - } - formatter.finish() - } - syn::Item::Impl(_val) => { - let mut formatter = formatter.debug_struct("Item::Impl"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if _val.defaultness.is_some() { - formatter.field("defaultness", &Present); - } - if _val.unsafety.is_some() { - formatter.field("unsafety", &Present); - } - formatter.field("generics", Lite(&_val.generics)); - if let Some(val) = &_val.trait_ { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((Option<syn::token::Not>, syn::Path, syn::token::For)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt( - &( - &super::Option { - present: self.0.0.is_some(), - }, - Lite(&self.0.1), - ), - formatter, - )?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("trait_", Print::ref_cast(val)); - } - formatter.field("self_ty", Lite(&_val.self_ty)); - if !_val.items.is_empty() { - formatter.field("items", Lite(&_val.items)); - } - formatter.finish() - } - syn::Item::Macro(_val) => { - let mut formatter = formatter.debug_struct("Item::Macro"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if let Some(val) = &_val.ident { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(proc_macro2::Ident); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("ident", Print::ref_cast(val)); - } - formatter.field("mac", Lite(&_val.mac)); - if _val.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } - syn::Item::Mod(_val) => { - let mut formatter = formatter.debug_struct("Item::Mod"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - if _val.unsafety.is_some() { - formatter.field("unsafety", &Present); - } - formatter.field("ident", Lite(&_val.ident)); - if let Some(val) = &_val.content { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::Brace, Vec<syn::Item>)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.1), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("content", Print::ref_cast(val)); - } - if _val.semi.is_some() { - formatter.field("semi", &Present); - } - formatter.finish() - } - syn::Item::Static(_val) => { - let mut formatter = formatter.debug_struct("Item::Static"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - match _val.mutability { - syn::StaticMutability::None => {} - _ => { - formatter.field("mutability", Lite(&_val.mutability)); - } - } - formatter.field("ident", Lite(&_val.ident)); - formatter.field("ty", Lite(&_val.ty)); - formatter.field("expr", Lite(&_val.expr)); - formatter.finish() - } - syn::Item::Struct(_val) => { - let mut formatter = formatter.debug_struct("Item::Struct"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - formatter.field("ident", Lite(&_val.ident)); - formatter.field("generics", Lite(&_val.generics)); - formatter.field("fields", Lite(&_val.fields)); - if _val.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } - syn::Item::Trait(_val) => { - let mut formatter = formatter.debug_struct("Item::Trait"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - if _val.unsafety.is_some() { - formatter.field("unsafety", &Present); - } - if _val.auto_token.is_some() { - formatter.field("auto_token", &Present); - } - if let Some(val) = &_val.restriction { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::ImplRestriction); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("restriction", Print::ref_cast(val)); - } - formatter.field("ident", Lite(&_val.ident)); - formatter.field("generics", Lite(&_val.generics)); - if _val.colon_token.is_some() { - formatter.field("colon_token", &Present); - } - if !_val.supertraits.is_empty() { - formatter.field("supertraits", Lite(&_val.supertraits)); - } - if !_val.items.is_empty() { - formatter.field("items", Lite(&_val.items)); - } - formatter.finish() - } - syn::Item::TraitAlias(_val) => { - let mut formatter = formatter.debug_struct("Item::TraitAlias"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - formatter.field("ident", Lite(&_val.ident)); - formatter.field("generics", Lite(&_val.generics)); - if !_val.bounds.is_empty() { - formatter.field("bounds", Lite(&_val.bounds)); - } - formatter.finish() - } - syn::Item::Type(_val) => { - let mut formatter = formatter.debug_struct("Item::Type"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - formatter.field("ident", Lite(&_val.ident)); - formatter.field("generics", Lite(&_val.generics)); - formatter.field("ty", Lite(&_val.ty)); - formatter.finish() - } - syn::Item::Union(_val) => { - let mut formatter = formatter.debug_struct("Item::Union"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - formatter.field("ident", Lite(&_val.ident)); - formatter.field("generics", Lite(&_val.generics)); - formatter.field("fields", Lite(&_val.fields)); - formatter.finish() - } - syn::Item::Use(_val) => { - let mut formatter = formatter.debug_struct("Item::Use"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("vis", Lite(&_val.vis)); - if _val.leading_colon.is_some() { - formatter.field("leading_colon", &Present); - } - formatter.field("tree", Lite(&_val.tree)); - formatter.finish() - } - syn::Item::Verbatim(_val) => { - formatter.write_str("Item::Verbatim")?; - formatter.write_str("(`")?; - Display::fmt(_val, formatter)?; - formatter.write_str("`)")?; - Ok(()) - } - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::ItemConst> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ItemConst"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("generics", Lite(&self.value.generics)); - formatter.field("ty", Lite(&self.value.ty)); - formatter.field("expr", Lite(&self.value.expr)); - formatter.finish() - } -} -impl Debug for Lite<syn::ItemEnum> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ItemEnum"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("generics", Lite(&self.value.generics)); - if !self.value.variants.is_empty() { - formatter.field("variants", Lite(&self.value.variants)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ItemExternCrate> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ItemExternCrate"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - formatter.field("ident", Lite(&self.value.ident)); - if let Some(val) = &self.value.rename { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::As, proc_macro2::Ident)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.1), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("rename", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ItemFn> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ItemFn"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - formatter.field("sig", Lite(&self.value.sig)); - formatter.field("block", Lite(&self.value.block)); - formatter.finish() - } -} -impl Debug for Lite<syn::ItemForeignMod> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ItemForeignMod"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if self.value.unsafety.is_some() { - formatter.field("unsafety", &Present); - } - formatter.field("abi", Lite(&self.value.abi)); - if !self.value.items.is_empty() { - formatter.field("items", Lite(&self.value.items)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ItemImpl> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ItemImpl"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if self.value.defaultness.is_some() { - formatter.field("defaultness", &Present); - } - if self.value.unsafety.is_some() { - formatter.field("unsafety", &Present); - } - formatter.field("generics", Lite(&self.value.generics)); - if let Some(val) = &self.value.trait_ { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((Option<syn::token::Not>, syn::Path, syn::token::For)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt( - &( - &super::Option { - present: self.0.0.is_some(), - }, - Lite(&self.0.1), - ), - formatter, - )?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("trait_", Print::ref_cast(val)); - } - formatter.field("self_ty", Lite(&self.value.self_ty)); - if !self.value.items.is_empty() { - formatter.field("items", Lite(&self.value.items)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ItemMacro> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ItemMacro"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.ident { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(proc_macro2::Ident); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("ident", Print::ref_cast(val)); - } - formatter.field("mac", Lite(&self.value.mac)); - if self.value.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ItemMod> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ItemMod"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - if self.value.unsafety.is_some() { - formatter.field("unsafety", &Present); - } - formatter.field("ident", Lite(&self.value.ident)); - if let Some(val) = &self.value.content { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::Brace, Vec<syn::Item>)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.1), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("content", Print::ref_cast(val)); - } - if self.value.semi.is_some() { - formatter.field("semi", &Present); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ItemStatic> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ItemStatic"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - match self.value.mutability { - syn::StaticMutability::None => {} - _ => { - formatter.field("mutability", Lite(&self.value.mutability)); - } - } - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("ty", Lite(&self.value.ty)); - formatter.field("expr", Lite(&self.value.expr)); - formatter.finish() - } -} -impl Debug for Lite<syn::ItemStruct> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ItemStruct"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("generics", Lite(&self.value.generics)); - formatter.field("fields", Lite(&self.value.fields)); - if self.value.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ItemTrait> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ItemTrait"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - if self.value.unsafety.is_some() { - formatter.field("unsafety", &Present); - } - if self.value.auto_token.is_some() { - formatter.field("auto_token", &Present); - } - if let Some(val) = &self.value.restriction { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::ImplRestriction); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("restriction", Print::ref_cast(val)); - } - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("generics", Lite(&self.value.generics)); - if self.value.colon_token.is_some() { - formatter.field("colon_token", &Present); - } - if !self.value.supertraits.is_empty() { - formatter.field("supertraits", Lite(&self.value.supertraits)); - } - if !self.value.items.is_empty() { - formatter.field("items", Lite(&self.value.items)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ItemTraitAlias> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ItemTraitAlias"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("generics", Lite(&self.value.generics)); - if !self.value.bounds.is_empty() { - formatter.field("bounds", Lite(&self.value.bounds)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::ItemType> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ItemType"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("generics", Lite(&self.value.generics)); - formatter.field("ty", Lite(&self.value.ty)); - formatter.finish() - } -} -impl Debug for Lite<syn::ItemUnion> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ItemUnion"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("generics", Lite(&self.value.generics)); - formatter.field("fields", Lite(&self.value.fields)); - formatter.finish() - } -} -impl Debug for Lite<syn::ItemUse> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ItemUse"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("vis", Lite(&self.value.vis)); - if self.value.leading_colon.is_some() { - formatter.field("leading_colon", &Present); - } - formatter.field("tree", Lite(&self.value.tree)); - formatter.finish() - } -} -impl Debug for Lite<syn::Label> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Label"); - formatter.field("name", Lite(&self.value.name)); - formatter.finish() - } -} -impl Debug for Lite<syn::Lifetime> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Lifetime"); - formatter.field("ident", Lite(&self.value.ident)); - formatter.finish() - } -} -impl Debug for Lite<syn::LifetimeParam> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("LifetimeParam"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("lifetime", Lite(&self.value.lifetime)); - if self.value.colon_token.is_some() { - formatter.field("colon_token", &Present); - } - if !self.value.bounds.is_empty() { - formatter.field("bounds", Lite(&self.value.bounds)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::Lit> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::Lit::Str(_val) => write!(formatter, "{:?}", _val.value()), - syn::Lit::ByteStr(_val) => write!(formatter, "{:?}", _val.value()), - syn::Lit::CStr(_val) => write!(formatter, "{:?}", _val.value()), - syn::Lit::Byte(_val) => write!(formatter, "{:?}", _val.value()), - syn::Lit::Char(_val) => write!(formatter, "{:?}", _val.value()), - syn::Lit::Int(_val) => write!(formatter, "{}", _val), - syn::Lit::Float(_val) => write!(formatter, "{}", _val), - syn::Lit::Bool(_val) => { - let mut formatter = formatter.debug_struct("Lit::Bool"); - formatter.field("value", Lite(&_val.value)); - formatter.finish() - } - syn::Lit::Verbatim(_val) => { - formatter.write_str("Lit::Verbatim")?; - formatter.write_str("(`")?; - Display::fmt(_val, formatter)?; - formatter.write_str("`)")?; - Ok(()) - } - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::LitBool> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("LitBool"); - formatter.field("value", Lite(&self.value.value)); - formatter.finish() - } -} -impl Debug for Lite<syn::LitByte> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "{:?}", self.value.value()) - } -} -impl Debug for Lite<syn::LitByteStr> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "{:?}", self.value.value()) - } -} -impl Debug for Lite<syn::LitCStr> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "{:?}", self.value.value()) - } -} -impl Debug for Lite<syn::LitChar> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "{:?}", self.value.value()) - } -} -impl Debug for Lite<syn::LitFloat> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "{}", & self.value) - } -} -impl Debug for Lite<syn::LitInt> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "{}", & self.value) - } -} -impl Debug for Lite<syn::LitStr> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "{:?}", self.value.value()) - } -} -impl Debug for Lite<syn::Local> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Local"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("pat", Lite(&self.value.pat)); - if let Some(val) = &self.value.init { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::LocalInit); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("init", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::LocalInit> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("LocalInit"); - formatter.field("expr", Lite(&self.value.expr)); - if let Some(val) = &self.value.diverge { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::Else, Box<syn::Expr>)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.1), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("diverge", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::Macro> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Macro"); - formatter.field("path", Lite(&self.value.path)); - formatter.field("delimiter", Lite(&self.value.delimiter)); - formatter.field("tokens", Lite(&self.value.tokens)); - formatter.finish() - } -} -impl Debug for Lite<syn::MacroDelimiter> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::MacroDelimiter::Paren(_val) => { - formatter.write_str("MacroDelimiter::Paren")?; - Ok(()) - } - syn::MacroDelimiter::Brace(_val) => { - formatter.write_str("MacroDelimiter::Brace")?; - Ok(()) - } - syn::MacroDelimiter::Bracket(_val) => { - formatter.write_str("MacroDelimiter::Bracket")?; - Ok(()) - } - } - } -} -impl Debug for Lite<syn::Member> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::Member::Named(_val) => { - formatter.write_str("Member::Named")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::Member::Unnamed(_val) => { - formatter.write_str("Member::Unnamed")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - } -} -impl Debug for Lite<syn::Meta> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::Meta::Path(_val) => { - let mut formatter = formatter.debug_struct("Meta::Path"); - if _val.leading_colon.is_some() { - formatter.field("leading_colon", &Present); - } - if !_val.segments.is_empty() { - formatter.field("segments", Lite(&_val.segments)); - } - formatter.finish() - } - syn::Meta::List(_val) => { - let mut formatter = formatter.debug_struct("Meta::List"); - formatter.field("path", Lite(&_val.path)); - formatter.field("delimiter", Lite(&_val.delimiter)); - formatter.field("tokens", Lite(&_val.tokens)); - formatter.finish() - } - syn::Meta::NameValue(_val) => { - let mut formatter = formatter.debug_struct("Meta::NameValue"); - formatter.field("path", Lite(&_val.path)); - formatter.field("value", Lite(&_val.value)); - formatter.finish() - } - } - } -} -impl Debug for Lite<syn::MetaList> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("MetaList"); - formatter.field("path", Lite(&self.value.path)); - formatter.field("delimiter", Lite(&self.value.delimiter)); - formatter.field("tokens", Lite(&self.value.tokens)); - formatter.finish() - } -} -impl Debug for Lite<syn::MetaNameValue> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("MetaNameValue"); - formatter.field("path", Lite(&self.value.path)); - formatter.field("value", Lite(&self.value.value)); - formatter.finish() - } -} -impl Debug for Lite<syn::ParenthesizedGenericArguments> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("ParenthesizedGenericArguments"); - if !self.value.inputs.is_empty() { - formatter.field("inputs", Lite(&self.value.inputs)); - } - formatter.field("output", Lite(&self.value.output)); - formatter.finish() - } -} -impl Debug for Lite<syn::Pat> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::Pat::Const(_val) => { - formatter.write_str("Pat::Const")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::Pat::Ident(_val) => { - let mut formatter = formatter.debug_struct("Pat::Ident"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if _val.by_ref.is_some() { - formatter.field("by_ref", &Present); - } - if _val.mutability.is_some() { - formatter.field("mutability", &Present); - } - formatter.field("ident", Lite(&_val.ident)); - if let Some(val) = &_val.subpat { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::At, Box<syn::Pat>)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.1), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("subpat", Print::ref_cast(val)); - } - formatter.finish() - } - syn::Pat::Lit(_val) => { - formatter.write_str("Pat::Lit")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::Pat::Macro(_val) => { - formatter.write_str("Pat::Macro")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::Pat::Or(_val) => { - let mut formatter = formatter.debug_struct("Pat::Or"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if _val.leading_vert.is_some() { - formatter.field("leading_vert", &Present); - } - if !_val.cases.is_empty() { - formatter.field("cases", Lite(&_val.cases)); - } - formatter.finish() - } - syn::Pat::Paren(_val) => { - let mut formatter = formatter.debug_struct("Pat::Paren"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("pat", Lite(&_val.pat)); - formatter.finish() - } - syn::Pat::Path(_val) => { - formatter.write_str("Pat::Path")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::Pat::Range(_val) => { - formatter.write_str("Pat::Range")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::Pat::Reference(_val) => { - let mut formatter = formatter.debug_struct("Pat::Reference"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if _val.mutability.is_some() { - formatter.field("mutability", &Present); - } - formatter.field("pat", Lite(&_val.pat)); - formatter.finish() - } - syn::Pat::Rest(_val) => { - let mut formatter = formatter.debug_struct("Pat::Rest"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.finish() - } - syn::Pat::Slice(_val) => { - let mut formatter = formatter.debug_struct("Pat::Slice"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if !_val.elems.is_empty() { - formatter.field("elems", Lite(&_val.elems)); - } - formatter.finish() - } - syn::Pat::Struct(_val) => { - let mut formatter = formatter.debug_struct("Pat::Struct"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if let Some(val) = &_val.qself { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::QSelf); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("qself", Print::ref_cast(val)); - } - formatter.field("path", Lite(&_val.path)); - if !_val.fields.is_empty() { - formatter.field("fields", Lite(&_val.fields)); - } - if let Some(val) = &_val.rest { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::PatRest); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("rest", Print::ref_cast(val)); - } - formatter.finish() - } - syn::Pat::Tuple(_val) => { - let mut formatter = formatter.debug_struct("Pat::Tuple"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if !_val.elems.is_empty() { - formatter.field("elems", Lite(&_val.elems)); - } - formatter.finish() - } - syn::Pat::TupleStruct(_val) => { - let mut formatter = formatter.debug_struct("Pat::TupleStruct"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - if let Some(val) = &_val.qself { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::QSelf); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("qself", Print::ref_cast(val)); - } - formatter.field("path", Lite(&_val.path)); - if !_val.elems.is_empty() { - formatter.field("elems", Lite(&_val.elems)); - } - formatter.finish() - } - syn::Pat::Type(_val) => { - let mut formatter = formatter.debug_struct("Pat::Type"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("pat", Lite(&_val.pat)); - formatter.field("ty", Lite(&_val.ty)); - formatter.finish() - } - syn::Pat::Verbatim(_val) => { - formatter.write_str("Pat::Verbatim")?; - formatter.write_str("(`")?; - Display::fmt(_val, formatter)?; - formatter.write_str("`)")?; - Ok(()) - } - syn::Pat::Wild(_val) => { - let mut formatter = formatter.debug_struct("Pat::Wild"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.finish() - } - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::PatIdent> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PatIdent"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if self.value.by_ref.is_some() { - formatter.field("by_ref", &Present); - } - if self.value.mutability.is_some() { - formatter.field("mutability", &Present); - } - formatter.field("ident", Lite(&self.value.ident)); - if let Some(val) = &self.value.subpat { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::At, Box<syn::Pat>)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.1), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("subpat", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::PatOr> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PatOr"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if self.value.leading_vert.is_some() { - formatter.field("leading_vert", &Present); - } - if !self.value.cases.is_empty() { - formatter.field("cases", Lite(&self.value.cases)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::PatParen> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PatParen"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("pat", Lite(&self.value.pat)); - formatter.finish() - } -} -impl Debug for Lite<syn::PatReference> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PatReference"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if self.value.mutability.is_some() { - formatter.field("mutability", &Present); - } - formatter.field("pat", Lite(&self.value.pat)); - formatter.finish() - } -} -impl Debug for Lite<syn::PatRest> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PatRest"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::PatSlice> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PatSlice"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if !self.value.elems.is_empty() { - formatter.field("elems", Lite(&self.value.elems)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::PatStruct> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PatStruct"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.qself { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::QSelf); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("qself", Print::ref_cast(val)); - } - formatter.field("path", Lite(&self.value.path)); - if !self.value.fields.is_empty() { - formatter.field("fields", Lite(&self.value.fields)); - } - if let Some(val) = &self.value.rest { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::PatRest); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("rest", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::PatTuple> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PatTuple"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if !self.value.elems.is_empty() { - formatter.field("elems", Lite(&self.value.elems)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::PatTupleStruct> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PatTupleStruct"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.qself { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::QSelf); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("qself", Print::ref_cast(val)); - } - formatter.field("path", Lite(&self.value.path)); - if !self.value.elems.is_empty() { - formatter.field("elems", Lite(&self.value.elems)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::PatType> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PatType"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("pat", Lite(&self.value.pat)); - formatter.field("ty", Lite(&self.value.ty)); - formatter.finish() - } -} -impl Debug for Lite<syn::PatWild> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PatWild"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::Path> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Path"); - if self.value.leading_colon.is_some() { - formatter.field("leading_colon", &Present); - } - if !self.value.segments.is_empty() { - formatter.field("segments", Lite(&self.value.segments)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::PathArguments> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::PathArguments::None => formatter.write_str("PathArguments::None"), - syn::PathArguments::AngleBracketed(_val) => { - let mut formatter = formatter - .debug_struct("PathArguments::AngleBracketed"); - if _val.colon2_token.is_some() { - formatter.field("colon2_token", &Present); - } - if !_val.args.is_empty() { - formatter.field("args", Lite(&_val.args)); - } - formatter.finish() - } - syn::PathArguments::Parenthesized(_val) => { - let mut formatter = formatter - .debug_struct("PathArguments::Parenthesized"); - if !_val.inputs.is_empty() { - formatter.field("inputs", Lite(&_val.inputs)); - } - formatter.field("output", Lite(&_val.output)); - formatter.finish() - } - } - } -} -impl Debug for Lite<syn::PathSegment> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PathSegment"); - formatter.field("ident", Lite(&self.value.ident)); - match self.value.arguments { - syn::PathArguments::None => {} - _ => { - formatter.field("arguments", Lite(&self.value.arguments)); - } - } - formatter.finish() - } -} -impl Debug for Lite<syn::PointerMutability> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::PointerMutability::Const(_val) => { - formatter.write_str("PointerMutability::Const")?; - Ok(()) - } - syn::PointerMutability::Mut(_val) => { - formatter.write_str("PointerMutability::Mut")?; - Ok(()) - } - } - } -} -impl Debug for Lite<syn::PreciseCapture> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PreciseCapture"); - if !self.value.params.is_empty() { - formatter.field("params", Lite(&self.value.params)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::PredicateLifetime> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PredicateLifetime"); - formatter.field("lifetime", Lite(&self.value.lifetime)); - if !self.value.bounds.is_empty() { - formatter.field("bounds", Lite(&self.value.bounds)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::PredicateType> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("PredicateType"); - if let Some(val) = &self.value.lifetimes { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::BoundLifetimes); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("lifetimes", Print::ref_cast(val)); - } - formatter.field("bounded_ty", Lite(&self.value.bounded_ty)); - if !self.value.bounds.is_empty() { - formatter.field("bounds", Lite(&self.value.bounds)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::QSelf> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("QSelf"); - formatter.field("ty", Lite(&self.value.ty)); - formatter.field("position", Lite(&self.value.position)); - if self.value.as_token.is_some() { - formatter.field("as_token", &Present); - } - formatter.finish() - } -} -impl Debug for Lite<syn::RangeLimits> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::RangeLimits::HalfOpen(_val) => { - formatter.write_str("RangeLimits::HalfOpen")?; - Ok(()) - } - syn::RangeLimits::Closed(_val) => { - formatter.write_str("RangeLimits::Closed")?; - Ok(()) - } - } - } -} -impl Debug for Lite<syn::Receiver> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Receiver"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.reference { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::And, Option<syn::Lifetime>)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt( - { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(Option<syn::Lifetime>); - impl Debug for Print { - fn fmt( - &self, - formatter: &mut fmt::Formatter, - ) -> fmt::Result { - match &self.0 { - Some(_val) => { - formatter.write_str("Some(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - None => formatter.write_str("None"), - } - } - } - Print::ref_cast(&self.0.1) - }, - formatter, - )?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("reference", Print::ref_cast(val)); - } - if self.value.mutability.is_some() { - formatter.field("mutability", &Present); - } - if self.value.colon_token.is_some() { - formatter.field("colon_token", &Present); - } - formatter.field("ty", Lite(&self.value.ty)); - formatter.finish() - } -} -impl Debug for Lite<syn::ReturnType> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::ReturnType::Default => formatter.write_str("ReturnType::Default"), - syn::ReturnType::Type(_v0, _v1) => { - let mut formatter = formatter.debug_tuple("ReturnType::Type"); - formatter.field(Lite(_v1)); - formatter.finish() - } - } - } -} -impl Debug for Lite<syn::Signature> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Signature"); - if self.value.constness.is_some() { - formatter.field("constness", &Present); - } - if self.value.asyncness.is_some() { - formatter.field("asyncness", &Present); - } - if self.value.unsafety.is_some() { - formatter.field("unsafety", &Present); - } - if let Some(val) = &self.value.abi { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Abi); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("abi", Print::ref_cast(val)); - } - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("generics", Lite(&self.value.generics)); - if !self.value.inputs.is_empty() { - formatter.field("inputs", Lite(&self.value.inputs)); - } - if let Some(val) = &self.value.variadic { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Variadic); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("variadic", Print::ref_cast(val)); - } - formatter.field("output", Lite(&self.value.output)); - formatter.finish() - } -} -impl Debug for Lite<syn::StaticMutability> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::StaticMutability::Mut(_val) => { - formatter.write_str("StaticMutability::Mut")?; - Ok(()) - } - syn::StaticMutability::None => formatter.write_str("StaticMutability::None"), - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::Stmt> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::Stmt::Local(_val) => { - let mut formatter = formatter.debug_struct("Stmt::Local"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("pat", Lite(&_val.pat)); - if let Some(val) = &_val.init { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::LocalInit); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("init", Print::ref_cast(val)); - } - formatter.finish() - } - syn::Stmt::Item(_val) => { - formatter.write_str("Stmt::Item")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::Stmt::Expr(_v0, _v1) => { - let mut formatter = formatter.debug_tuple("Stmt::Expr"); - formatter.field(Lite(_v0)); - formatter - .field( - &super::Option { - present: _v1.is_some(), - }, - ); - formatter.finish() - } - syn::Stmt::Macro(_val) => { - let mut formatter = formatter.debug_struct("Stmt::Macro"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("mac", Lite(&_val.mac)); - if _val.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } - } - } -} -impl Debug for Lite<syn::StmtMacro> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("StmtMacro"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("mac", Lite(&self.value.mac)); - if self.value.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } -} -impl Debug for Lite<syn::TraitBound> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TraitBound"); - if self.value.paren_token.is_some() { - formatter.field("paren_token", &Present); - } - match self.value.modifier { - syn::TraitBoundModifier::None => {} - _ => { - formatter.field("modifier", Lite(&self.value.modifier)); - } - } - if let Some(val) = &self.value.lifetimes { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::BoundLifetimes); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("lifetimes", Print::ref_cast(val)); - } - formatter.field("path", Lite(&self.value.path)); - formatter.finish() - } -} -impl Debug for Lite<syn::TraitBoundModifier> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::TraitBoundModifier::None => { - formatter.write_str("TraitBoundModifier::None") - } - syn::TraitBoundModifier::Maybe(_val) => { - formatter.write_str("TraitBoundModifier::Maybe")?; - Ok(()) - } - } - } -} -impl Debug for Lite<syn::TraitItem> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::TraitItem::Const(_val) => { - let mut formatter = formatter.debug_struct("TraitItem::Const"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("ident", Lite(&_val.ident)); - formatter.field("generics", Lite(&_val.generics)); - formatter.field("ty", Lite(&_val.ty)); - if let Some(val) = &_val.default { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::Eq, syn::Expr)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.1), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("default", Print::ref_cast(val)); - } - formatter.finish() - } - syn::TraitItem::Fn(_val) => { - let mut formatter = formatter.debug_struct("TraitItem::Fn"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("sig", Lite(&_val.sig)); - if let Some(val) = &_val.default { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Block); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("default", Print::ref_cast(val)); - } - if _val.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } - syn::TraitItem::Type(_val) => { - let mut formatter = formatter.debug_struct("TraitItem::Type"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("ident", Lite(&_val.ident)); - formatter.field("generics", Lite(&_val.generics)); - if _val.colon_token.is_some() { - formatter.field("colon_token", &Present); - } - if !_val.bounds.is_empty() { - formatter.field("bounds", Lite(&_val.bounds)); - } - if let Some(val) = &_val.default { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::Eq, syn::Type)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.1), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("default", Print::ref_cast(val)); - } - formatter.finish() - } - syn::TraitItem::Macro(_val) => { - let mut formatter = formatter.debug_struct("TraitItem::Macro"); - if !_val.attrs.is_empty() { - formatter.field("attrs", Lite(&_val.attrs)); - } - formatter.field("mac", Lite(&_val.mac)); - if _val.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } - syn::TraitItem::Verbatim(_val) => { - formatter.write_str("TraitItem::Verbatim")?; - formatter.write_str("(`")?; - Display::fmt(_val, formatter)?; - formatter.write_str("`)")?; - Ok(()) - } - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::TraitItemConst> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TraitItemConst"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("generics", Lite(&self.value.generics)); - formatter.field("ty", Lite(&self.value.ty)); - if let Some(val) = &self.value.default { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::Eq, syn::Expr)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.1), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("default", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::TraitItemFn> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TraitItemFn"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("sig", Lite(&self.value.sig)); - if let Some(val) = &self.value.default { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Block); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("default", Print::ref_cast(val)); - } - if self.value.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } -} -impl Debug for Lite<syn::TraitItemMacro> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TraitItemMacro"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("mac", Lite(&self.value.mac)); - if self.value.semi_token.is_some() { - formatter.field("semi_token", &Present); - } - formatter.finish() - } -} -impl Debug for Lite<syn::TraitItemType> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TraitItemType"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("generics", Lite(&self.value.generics)); - if self.value.colon_token.is_some() { - formatter.field("colon_token", &Present); - } - if !self.value.bounds.is_empty() { - formatter.field("bounds", Lite(&self.value.bounds)); - } - if let Some(val) = &self.value.default { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::Eq, syn::Type)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.1), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("default", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::Type> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::Type::Array(_val) => { - let mut formatter = formatter.debug_struct("Type::Array"); - formatter.field("elem", Lite(&_val.elem)); - formatter.field("len", Lite(&_val.len)); - formatter.finish() - } - syn::Type::BareFn(_val) => { - let mut formatter = formatter.debug_struct("Type::BareFn"); - if let Some(val) = &_val.lifetimes { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::BoundLifetimes); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("lifetimes", Print::ref_cast(val)); - } - if _val.unsafety.is_some() { - formatter.field("unsafety", &Present); - } - if let Some(val) = &_val.abi { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Abi); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("abi", Print::ref_cast(val)); - } - if !_val.inputs.is_empty() { - formatter.field("inputs", Lite(&_val.inputs)); - } - if let Some(val) = &_val.variadic { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::BareVariadic); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("variadic", Print::ref_cast(val)); - } - formatter.field("output", Lite(&_val.output)); - formatter.finish() - } - syn::Type::Group(_val) => { - let mut formatter = formatter.debug_struct("Type::Group"); - formatter.field("elem", Lite(&_val.elem)); - formatter.finish() - } - syn::Type::ImplTrait(_val) => { - let mut formatter = formatter.debug_struct("Type::ImplTrait"); - if !_val.bounds.is_empty() { - formatter.field("bounds", Lite(&_val.bounds)); - } - formatter.finish() - } - syn::Type::Infer(_val) => { - let mut formatter = formatter.debug_struct("Type::Infer"); - formatter.finish() - } - syn::Type::Macro(_val) => { - let mut formatter = formatter.debug_struct("Type::Macro"); - formatter.field("mac", Lite(&_val.mac)); - formatter.finish() - } - syn::Type::Never(_val) => { - let mut formatter = formatter.debug_struct("Type::Never"); - formatter.finish() - } - syn::Type::Paren(_val) => { - let mut formatter = formatter.debug_struct("Type::Paren"); - formatter.field("elem", Lite(&_val.elem)); - formatter.finish() - } - syn::Type::Path(_val) => { - let mut formatter = formatter.debug_struct("Type::Path"); - if let Some(val) = &_val.qself { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::QSelf); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("qself", Print::ref_cast(val)); - } - formatter.field("path", Lite(&_val.path)); - formatter.finish() - } - syn::Type::Ptr(_val) => { - let mut formatter = formatter.debug_struct("Type::Ptr"); - if _val.const_token.is_some() { - formatter.field("const_token", &Present); - } - if _val.mutability.is_some() { - formatter.field("mutability", &Present); - } - formatter.field("elem", Lite(&_val.elem)); - formatter.finish() - } - syn::Type::Reference(_val) => { - let mut formatter = formatter.debug_struct("Type::Reference"); - if let Some(val) = &_val.lifetime { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Lifetime); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("lifetime", Print::ref_cast(val)); - } - if _val.mutability.is_some() { - formatter.field("mutability", &Present); - } - formatter.field("elem", Lite(&_val.elem)); - formatter.finish() - } - syn::Type::Slice(_val) => { - let mut formatter = formatter.debug_struct("Type::Slice"); - formatter.field("elem", Lite(&_val.elem)); - formatter.finish() - } - syn::Type::TraitObject(_val) => { - let mut formatter = formatter.debug_struct("Type::TraitObject"); - if _val.dyn_token.is_some() { - formatter.field("dyn_token", &Present); - } - if !_val.bounds.is_empty() { - formatter.field("bounds", Lite(&_val.bounds)); - } - formatter.finish() - } - syn::Type::Tuple(_val) => { - let mut formatter = formatter.debug_struct("Type::Tuple"); - if !_val.elems.is_empty() { - formatter.field("elems", Lite(&_val.elems)); - } - formatter.finish() - } - syn::Type::Verbatim(_val) => { - formatter.write_str("Type::Verbatim")?; - formatter.write_str("(`")?; - Display::fmt(_val, formatter)?; - formatter.write_str("`)")?; - Ok(()) - } - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::TypeArray> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypeArray"); - formatter.field("elem", Lite(&self.value.elem)); - formatter.field("len", Lite(&self.value.len)); - formatter.finish() - } -} -impl Debug for Lite<syn::TypeBareFn> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypeBareFn"); - if let Some(val) = &self.value.lifetimes { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::BoundLifetimes); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("lifetimes", Print::ref_cast(val)); - } - if self.value.unsafety.is_some() { - formatter.field("unsafety", &Present); - } - if let Some(val) = &self.value.abi { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Abi); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("abi", Print::ref_cast(val)); - } - if !self.value.inputs.is_empty() { - formatter.field("inputs", Lite(&self.value.inputs)); - } - if let Some(val) = &self.value.variadic { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::BareVariadic); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("variadic", Print::ref_cast(val)); - } - formatter.field("output", Lite(&self.value.output)); - formatter.finish() - } -} -impl Debug for Lite<syn::TypeGroup> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypeGroup"); - formatter.field("elem", Lite(&self.value.elem)); - formatter.finish() - } -} -impl Debug for Lite<syn::TypeImplTrait> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypeImplTrait"); - if !self.value.bounds.is_empty() { - formatter.field("bounds", Lite(&self.value.bounds)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::TypeInfer> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypeInfer"); - formatter.finish() - } -} -impl Debug for Lite<syn::TypeMacro> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypeMacro"); - formatter.field("mac", Lite(&self.value.mac)); - formatter.finish() - } -} -impl Debug for Lite<syn::TypeNever> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypeNever"); - formatter.finish() - } -} -impl Debug for Lite<syn::TypeParam> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypeParam"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("ident", Lite(&self.value.ident)); - if self.value.colon_token.is_some() { - formatter.field("colon_token", &Present); - } - if !self.value.bounds.is_empty() { - formatter.field("bounds", Lite(&self.value.bounds)); - } - if self.value.eq_token.is_some() { - formatter.field("eq_token", &Present); - } - if let Some(val) = &self.value.default { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Type); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("default", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::TypeParamBound> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::TypeParamBound::Trait(_val) => { - formatter.write_str("TypeParamBound::Trait")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::TypeParamBound::Lifetime(_val) => { - let mut formatter = formatter.debug_struct("TypeParamBound::Lifetime"); - formatter.field("ident", Lite(&_val.ident)); - formatter.finish() - } - syn::TypeParamBound::PreciseCapture(_val) => { - formatter.write_str("TypeParamBound::PreciseCapture")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::TypeParamBound::Verbatim(_val) => { - formatter.write_str("TypeParamBound::Verbatim")?; - formatter.write_str("(`")?; - Display::fmt(_val, formatter)?; - formatter.write_str("`)")?; - Ok(()) - } - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::TypeParen> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypeParen"); - formatter.field("elem", Lite(&self.value.elem)); - formatter.finish() - } -} -impl Debug for Lite<syn::TypePath> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypePath"); - if let Some(val) = &self.value.qself { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::QSelf); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("qself", Print::ref_cast(val)); - } - formatter.field("path", Lite(&self.value.path)); - formatter.finish() - } -} -impl Debug for Lite<syn::TypePtr> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypePtr"); - if self.value.const_token.is_some() { - formatter.field("const_token", &Present); - } - if self.value.mutability.is_some() { - formatter.field("mutability", &Present); - } - formatter.field("elem", Lite(&self.value.elem)); - formatter.finish() - } -} -impl Debug for Lite<syn::TypeReference> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypeReference"); - if let Some(val) = &self.value.lifetime { - #[derive(RefCast)] - #[repr(transparent)] - struct Print(syn::Lifetime); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("lifetime", Print::ref_cast(val)); - } - if self.value.mutability.is_some() { - formatter.field("mutability", &Present); - } - formatter.field("elem", Lite(&self.value.elem)); - formatter.finish() - } -} -impl Debug for Lite<syn::TypeSlice> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypeSlice"); - formatter.field("elem", Lite(&self.value.elem)); - formatter.finish() - } -} -impl Debug for Lite<syn::TypeTraitObject> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypeTraitObject"); - if self.value.dyn_token.is_some() { - formatter.field("dyn_token", &Present); - } - if !self.value.bounds.is_empty() { - formatter.field("bounds", Lite(&self.value.bounds)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::TypeTuple> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("TypeTuple"); - if !self.value.elems.is_empty() { - formatter.field("elems", Lite(&self.value.elems)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::UnOp> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::UnOp::Deref(_val) => { - formatter.write_str("UnOp::Deref")?; - Ok(()) - } - syn::UnOp::Not(_val) => { - formatter.write_str("UnOp::Not")?; - Ok(()) - } - syn::UnOp::Neg(_val) => { - formatter.write_str("UnOp::Neg")?; - Ok(()) - } - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::UseGlob> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("UseGlob"); - formatter.finish() - } -} -impl Debug for Lite<syn::UseGroup> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("UseGroup"); - if !self.value.items.is_empty() { - formatter.field("items", Lite(&self.value.items)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::UseName> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("UseName"); - formatter.field("ident", Lite(&self.value.ident)); - formatter.finish() - } -} -impl Debug for Lite<syn::UsePath> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("UsePath"); - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("tree", Lite(&self.value.tree)); - formatter.finish() - } -} -impl Debug for Lite<syn::UseRename> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("UseRename"); - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("rename", Lite(&self.value.rename)); - formatter.finish() - } -} -impl Debug for Lite<syn::UseTree> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::UseTree::Path(_val) => { - formatter.write_str("UseTree::Path")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::UseTree::Name(_val) => { - formatter.write_str("UseTree::Name")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::UseTree::Rename(_val) => { - formatter.write_str("UseTree::Rename")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::UseTree::Glob(_val) => { - formatter.write_str("UseTree::Glob")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::UseTree::Group(_val) => { - formatter.write_str("UseTree::Group")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - } -} -impl Debug for Lite<syn::Variadic> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Variadic"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - if let Some(val) = &self.value.pat { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((Box<syn::Pat>, syn::token::Colon)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.0), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("pat", Print::ref_cast(val)); - } - if self.value.comma.is_some() { - formatter.field("comma", &Present); - } - formatter.finish() - } -} -impl Debug for Lite<syn::Variant> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("Variant"); - if !self.value.attrs.is_empty() { - formatter.field("attrs", Lite(&self.value.attrs)); - } - formatter.field("ident", Lite(&self.value.ident)); - formatter.field("fields", Lite(&self.value.fields)); - if let Some(val) = &self.value.discriminant { - #[derive(RefCast)] - #[repr(transparent)] - struct Print((syn::token::Eq, syn::Expr)); - impl Debug for Print { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some(")?; - Debug::fmt(Lite(&self.0.1), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - } - formatter.field("discriminant", Print::ref_cast(val)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::VisRestricted> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("VisRestricted"); - if self.value.in_token.is_some() { - formatter.field("in_token", &Present); - } - formatter.field("path", Lite(&self.value.path)); - formatter.finish() - } -} -impl Debug for Lite<syn::Visibility> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::Visibility::Public(_val) => { - formatter.write_str("Visibility::Public")?; - Ok(()) - } - syn::Visibility::Restricted(_val) => { - let mut formatter = formatter.debug_struct("Visibility::Restricted"); - if _val.in_token.is_some() { - formatter.field("in_token", &Present); - } - formatter.field("path", Lite(&_val.path)); - formatter.finish() - } - syn::Visibility::Inherited => formatter.write_str("Visibility::Inherited"), - } - } -} -impl Debug for Lite<syn::WhereClause> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut formatter = formatter.debug_struct("WhereClause"); - if !self.value.predicates.is_empty() { - formatter.field("predicates", Lite(&self.value.predicates)); - } - formatter.finish() - } -} -impl Debug for Lite<syn::WherePredicate> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match &self.value { - syn::WherePredicate::Lifetime(_val) => { - formatter.write_str("WherePredicate::Lifetime")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - syn::WherePredicate::Type(_val) => { - formatter.write_str("WherePredicate::Type")?; - formatter.write_str("(")?; - Debug::fmt(Lite(_val), formatter)?; - formatter.write_str(")")?; - Ok(()) - } - _ => unreachable!(), - } - } -} -impl Debug for Lite<syn::token::Abstract> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![abstract]") - } -} -impl Debug for Lite<syn::token::And> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![&]") - } -} -impl Debug for Lite<syn::token::AndAnd> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![&&]") - } -} -impl Debug for Lite<syn::token::AndEq> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![&=]") - } -} -impl Debug for Lite<syn::token::As> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![as]") - } -} -impl Debug for Lite<syn::token::Async> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![async]") - } -} -impl Debug for Lite<syn::token::At> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![@]") - } -} -impl Debug for Lite<syn::token::Auto> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![auto]") - } -} -impl Debug for Lite<syn::token::Await> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![await]") - } -} -impl Debug for Lite<syn::token::Become> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![become]") - } -} -impl Debug for Lite<syn::token::Box> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![box]") - } -} -impl Debug for Lite<syn::token::Break> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![break]") - } -} -impl Debug for Lite<syn::token::Caret> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![^]") - } -} -impl Debug for Lite<syn::token::CaretEq> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![^=]") - } -} -impl Debug for Lite<syn::token::Colon> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![:]") - } -} -impl Debug for Lite<syn::token::Comma> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![,]") - } -} -impl Debug for Lite<syn::token::Const> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![const]") - } -} -impl Debug for Lite<syn::token::Continue> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![continue]") - } -} -impl Debug for Lite<syn::token::Crate> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![crate]") - } -} -impl Debug for Lite<syn::token::Default> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![default]") - } -} -impl Debug for Lite<syn::token::Do> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![do]") - } -} -impl Debug for Lite<syn::token::Dollar> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![$]") - } -} -impl Debug for Lite<syn::token::Dot> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![.]") - } -} -impl Debug for Lite<syn::token::DotDot> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![..]") - } -} -impl Debug for Lite<syn::token::DotDotDot> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![...]") - } -} -impl Debug for Lite<syn::token::DotDotEq> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![..=]") - } -} -impl Debug for Lite<syn::token::Dyn> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![dyn]") - } -} -impl Debug for Lite<syn::token::Else> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![else]") - } -} -impl Debug for Lite<syn::token::Enum> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![enum]") - } -} -impl Debug for Lite<syn::token::Eq> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![=]") - } -} -impl Debug for Lite<syn::token::EqEq> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![==]") - } -} -impl Debug for Lite<syn::token::Extern> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![extern]") - } -} -impl Debug for Lite<syn::token::FatArrow> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![=>]") - } -} -impl Debug for Lite<syn::token::Final> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![final]") - } -} -impl Debug for Lite<syn::token::Fn> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![fn]") - } -} -impl Debug for Lite<syn::token::For> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![for]") - } -} -impl Debug for Lite<syn::token::Ge> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![>=]") - } -} -impl Debug for Lite<syn::token::Gt> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![>]") - } -} -impl Debug for Lite<syn::token::If> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![if]") - } -} -impl Debug for Lite<syn::token::Impl> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![impl]") - } -} -impl Debug for Lite<syn::token::In> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![in]") - } -} -impl Debug for Lite<syn::token::LArrow> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![<-]") - } -} -impl Debug for Lite<syn::token::Le> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![<=]") - } -} -impl Debug for Lite<syn::token::Let> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![let]") - } -} -impl Debug for Lite<syn::token::Loop> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![loop]") - } -} -impl Debug for Lite<syn::token::Lt> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![<]") - } -} -impl Debug for Lite<syn::token::Macro> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![macro]") - } -} -impl Debug for Lite<syn::token::Match> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![match]") - } -} -impl Debug for Lite<syn::token::Minus> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![-]") - } -} -impl Debug for Lite<syn::token::MinusEq> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![-=]") - } -} -impl Debug for Lite<syn::token::Mod> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![mod]") - } -} -impl Debug for Lite<syn::token::Move> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![move]") - } -} -impl Debug for Lite<syn::token::Mut> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![mut]") - } -} -impl Debug for Lite<syn::token::Ne> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![!=]") - } -} -impl Debug for Lite<syn::token::Not> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![!]") - } -} -impl Debug for Lite<syn::token::Or> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![|]") - } -} -impl Debug for Lite<syn::token::OrEq> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![|=]") - } -} -impl Debug for Lite<syn::token::OrOr> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![||]") - } -} -impl Debug for Lite<syn::token::Override> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![override]") - } -} -impl Debug for Lite<syn::token::PathSep> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![::]") - } -} -impl Debug for Lite<syn::token::Percent> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![%]") - } -} -impl Debug for Lite<syn::token::PercentEq> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![%=]") - } -} -impl Debug for Lite<syn::token::Plus> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![+]") - } -} -impl Debug for Lite<syn::token::PlusEq> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![+=]") - } -} -impl Debug for Lite<syn::token::Pound> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![#]") - } -} -impl Debug for Lite<syn::token::Priv> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![priv]") - } -} -impl Debug for Lite<syn::token::Pub> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![pub]") - } -} -impl Debug for Lite<syn::token::Question> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![?]") - } -} -impl Debug for Lite<syn::token::RArrow> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![->]") - } -} -impl Debug for Lite<syn::token::Raw> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![raw]") - } -} -impl Debug for Lite<syn::token::Ref> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![ref]") - } -} -impl Debug for Lite<syn::token::Return> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![return]") - } -} -impl Debug for Lite<syn::token::SelfType> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![Self]") - } -} -impl Debug for Lite<syn::token::SelfValue> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![self]") - } -} -impl Debug for Lite<syn::token::Semi> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![;]") - } -} -impl Debug for Lite<syn::token::Shl> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![<<]") - } -} -impl Debug for Lite<syn::token::ShlEq> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![<<=]") - } -} -impl Debug for Lite<syn::token::Shr> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![>>]") - } -} -impl Debug for Lite<syn::token::ShrEq> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![>>=]") - } -} -impl Debug for Lite<syn::token::Slash> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![/]") - } -} -impl Debug for Lite<syn::token::SlashEq> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![/=]") - } -} -impl Debug for Lite<syn::token::Star> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![*]") - } -} -impl Debug for Lite<syn::token::StarEq> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![*=]") - } -} -impl Debug for Lite<syn::token::Static> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![static]") - } -} -impl Debug for Lite<syn::token::Struct> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![struct]") - } -} -impl Debug for Lite<syn::token::Super> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![super]") - } -} -impl Debug for Lite<syn::token::Tilde> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![~]") - } -} -impl Debug for Lite<syn::token::Trait> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![trait]") - } -} -impl Debug for Lite<syn::token::Try> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![try]") - } -} -impl Debug for Lite<syn::token::Type> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![type]") - } -} -impl Debug for Lite<syn::token::Typeof> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![typeof]") - } -} -impl Debug for Lite<syn::token::Underscore> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![_]") - } -} -impl Debug for Lite<syn::token::Union> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![union]") - } -} -impl Debug for Lite<syn::token::Unsafe> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![unsafe]") - } -} -impl Debug for Lite<syn::token::Unsized> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![unsized]") - } -} -impl Debug for Lite<syn::token::Use> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![use]") - } -} -impl Debug for Lite<syn::token::Virtual> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![virtual]") - } -} -impl Debug for Lite<syn::token::Where> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![where]") - } -} -impl Debug for Lite<syn::token::While> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![while]") - } -} -impl Debug for Lite<syn::token::Yield> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Token![yield]") - } -} diff --git a/vendor/syn/tests/debug/mod.rs b/vendor/syn/tests/debug/mod.rs deleted file mode 100644 index 7ab2b795d5d350..00000000000000 --- a/vendor/syn/tests/debug/mod.rs +++ /dev/null @@ -1,147 +0,0 @@ -#![allow( - clippy::no_effect_underscore_binding, - clippy::too_many_lines, - clippy::used_underscore_binding -)] - -#[rustfmt::skip] -mod gen; - -use proc_macro2::{Ident, Literal, TokenStream}; -use ref_cast::RefCast; -use std::fmt::{self, Debug}; -use std::ops::Deref; -use syn::punctuated::Punctuated; - -#[derive(RefCast)] -#[repr(transparent)] -pub struct Lite<T: ?Sized> { - value: T, -} - -#[allow(non_snake_case)] -pub fn Lite<T: ?Sized>(value: &T) -> &Lite<T> { - Lite::ref_cast(value) -} - -impl<T: ?Sized> Deref for Lite<T> { - type Target = T; - - fn deref(&self) -> &Self::Target { - &self.value - } -} - -impl Debug for Lite<bool> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "{}", self.value) - } -} - -impl Debug for Lite<u32> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "{}", self.value) - } -} - -impl Debug for Lite<usize> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "{}", self.value) - } -} - -impl Debug for Lite<String> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "{:?}", self.value) - } -} - -impl Debug for Lite<Ident> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "{:?}", self.value.to_string()) - } -} - -impl Debug for Lite<Literal> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "{}", self.value) - } -} - -impl Debug for Lite<TokenStream> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let string = self.value.to_string(); - if string.len() <= 80 { - write!(formatter, "TokenStream(`{}`)", self.value) - } else { - formatter - .debug_tuple("TokenStream") - .field(&format_args!("`{}`", string)) - .finish() - } - } -} - -impl<T> Debug for Lite<&T> -where - Lite<T>: Debug, -{ - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - Debug::fmt(Lite(self.value), formatter) - } -} - -impl<T> Debug for Lite<Box<T>> -where - Lite<T>: Debug, -{ - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - Debug::fmt(Lite(&*self.value), formatter) - } -} - -impl<T> Debug for Lite<Vec<T>> -where - Lite<T>: Debug, -{ - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter - .debug_list() - .entries(self.value.iter().map(Lite)) - .finish() - } -} - -impl<T, P> Debug for Lite<Punctuated<T, P>> -where - Lite<T>: Debug, - Lite<P>: Debug, -{ - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - let mut list = formatter.debug_list(); - for pair in self.pairs() { - let (node, punct) = pair.into_tuple(); - list.entry(Lite(node)); - list.entries(punct.map(Lite)); - } - list.finish() - } -} - -struct Present; - -impl Debug for Present { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Some") - } -} - -struct Option { - present: bool, -} - -impl Debug for Option { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str(if self.present { "Some" } else { "None" }) - } -} diff --git a/vendor/syn/tests/macros/mod.rs b/vendor/syn/tests/macros/mod.rs deleted file mode 100644 index 9c9a957f71c27f..00000000000000 --- a/vendor/syn/tests/macros/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -macro_rules! errorf { - ($($tt:tt)*) => {{ - use ::std::io::Write; - let stderr = ::std::io::stderr(); - write!(stderr.lock(), $($tt)*).unwrap(); - }}; -} diff --git a/vendor/syn/tests/regression.rs b/vendor/syn/tests/regression.rs deleted file mode 100644 index 5c7fcddc8da9a6..00000000000000 --- a/vendor/syn/tests/regression.rs +++ /dev/null @@ -1,5 +0,0 @@ -#![allow(clippy::let_underscore_untyped, clippy::uninlined_format_args)] - -mod regression { - automod::dir!("tests/regression"); -} diff --git a/vendor/syn/tests/regression/issue1108.rs b/vendor/syn/tests/regression/issue1108.rs deleted file mode 100644 index 11a82adaadb0e7..00000000000000 --- a/vendor/syn/tests/regression/issue1108.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[test] -fn issue1108() { - let data = "impl<x<>>::x for"; - let _ = syn::parse_file(data); -} diff --git a/vendor/syn/tests/regression/issue1235.rs b/vendor/syn/tests/regression/issue1235.rs deleted file mode 100644 index 8836030664b8b7..00000000000000 --- a/vendor/syn/tests/regression/issue1235.rs +++ /dev/null @@ -1,32 +0,0 @@ -use proc_macro2::{Delimiter, Group}; -use quote::quote; - -#[test] -fn main() { - // Okay. Rustc allows top-level `static` with no value syntactically, but - // not semantically. Syn parses as Item::Verbatim. - let tokens = quote! { - pub static FOO: usize; - pub static BAR: usize; - }; - let file = syn::parse2::<syn::File>(tokens).unwrap(); - println!("{:#?}", file); - - // Okay. - let inner = Group::new( - Delimiter::None, - quote!(static FOO: usize = 0; pub static BAR: usize = 0), - ); - let tokens = quote!(pub #inner;); - let file = syn::parse2::<syn::File>(tokens).unwrap(); - println!("{:#?}", file); - - // Formerly parser crash. - let inner = Group::new( - Delimiter::None, - quote!(static FOO: usize; pub static BAR: usize), - ); - let tokens = quote!(pub #inner;); - let file = syn::parse2::<syn::File>(tokens).unwrap(); - println!("{:#?}", file); -} diff --git a/vendor/syn/tests/repo/mod.rs b/vendor/syn/tests/repo/mod.rs deleted file mode 100644 index 8cbb83bf8e79fd..00000000000000 --- a/vendor/syn/tests/repo/mod.rs +++ /dev/null @@ -1,630 +0,0 @@ -#![allow(clippy::manual_assert)] - -mod progress; - -use self::progress::Progress; -use anyhow::Result; -use flate2::read::GzDecoder; -use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; -use rayon::ThreadPoolBuilder; -use std::collections::BTreeSet; -use std::env; -use std::ffi::OsStr; -use std::fs; -use std::path::{Path, PathBuf}; -use tar::Archive; -use walkdir::{DirEntry, WalkDir}; - -// nightly-2025-08-14 -const REVISION: &str = "3672a55b7cfd0a12e7097197b6242872473ffaa7"; - -#[rustfmt::skip] -static EXCLUDE_FILES: &[&str] = &[ - // TODO: const traits: `pub const trait Trait {}` - // https://github.com/dtolnay/syn/issues/1887 - "src/tools/clippy/tests/ui/assign_ops.rs", - "src/tools/clippy/tests/ui/missing_const_for_fn/const_trait.rs", - "src/tools/clippy/tests/ui/trait_duplication_in_bounds.rs", - "src/tools/rust-analyzer/crates/test-utils/src/minicore.rs", - - // TODO: unsafe binders: `unsafe<'a> &'a T` - // https://github.com/dtolnay/syn/issues/1791 - "src/tools/rustfmt/tests/source/unsafe-binders.rs", - "src/tools/rustfmt/tests/target/unsafe-binders.rs", - "tests/mir-opt/gvn_on_unsafe_binder.rs", - "tests/rustdoc/auxiliary/unsafe-binder-dep.rs", - "tests/rustdoc/unsafe-binder.rs", - "tests/ui/unsafe-binders/cat-projection.rs", - - // TODO: unsafe fields: `struct S { unsafe field: T }` - // https://github.com/dtolnay/syn/issues/1792 - "src/tools/clippy/tests/ui/derive.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/record_field_list.rs", - "src/tools/rustfmt/tests/source/unsafe-field.rs", - "src/tools/rustfmt/tests/target/unsafe-field.rs", - "tests/ui/unsafe-fields/auxiliary/unsafe-fields-crate-dep.rs", - - // TODO: guard patterns: `match expr { (A if f()) | (B if g()) => {} }` - // https://github.com/dtolnay/syn/issues/1793 - "src/tools/rustfmt/tests/target/guard_patterns.rs", - "tests/ui/pattern/rfc-3637-guard-patterns/only-gather-locals-once.rs", - - // TODO: struct field default: `struct S { field: i32 = 1 }` - // https://github.com/dtolnay/syn/issues/1774 - "compiler/rustc_errors/src/markdown/parse.rs", - "compiler/rustc_session/src/config.rs", - "src/tools/clippy/tests/ui/exhaustive_items.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/record_field_default_values.rs", - "src/tools/rustfmt/tests/source/default-field-values.rs", - "src/tools/rustfmt/tests/target/default-field-values.rs", - "tests/ui/structs/default-field-values/auxiliary/struct_field_default.rs", - "tests/ui/structs/default-field-values/const-trait-default-field-value.rs", - "tests/ui/structs/default-field-values/field-references-param.rs", - "tests/ui/structs/default-field-values/support.rs", - "tests/ui/structs/default-field-values/use-normalized-ty-for-default-struct-value.rs", - - // TODO: return type notation: `where T: Trait<method(): Send>` and `where T::method(..): Send` - // https://github.com/dtolnay/syn/issues/1434 - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/return_type_syntax_in_path.rs", - "src/tools/rustfmt/tests/target/return-type-notation.rs", - "tests/rustdoc-json/return-type-notation.rs", - "tests/rustdoc/return-type-notation.rs", - "tests/ui/associated-type-bounds/all-generics-lookup.rs", - "tests/ui/associated-type-bounds/implied-from-self-where-clause.rs", - "tests/ui/associated-type-bounds/return-type-notation/basic.rs", - "tests/ui/associated-type-bounds/return-type-notation/higher-ranked-bound-works.rs", - "tests/ui/associated-type-bounds/return-type-notation/namespace-conflict.rs", - "tests/ui/associated-type-bounds/return-type-notation/path-constrained-in-method.rs", - "tests/ui/associated-type-bounds/return-type-notation/path-self-qself.rs", - "tests/ui/associated-type-bounds/return-type-notation/path-works.rs", - "tests/ui/associated-type-bounds/return-type-notation/unpretty-parenthesized.rs", - "tests/ui/async-await/return-type-notation/issue-110963-late.rs", - "tests/ui/async-await/return-type-notation/normalizing-self-auto-trait-issue-109924.rs", - "tests/ui/async-await/return-type-notation/rtn-implied-in-supertrait.rs", - "tests/ui/async-await/return-type-notation/super-method-bound.rs", - "tests/ui/async-await/return-type-notation/supertrait-bound.rs", - "tests/ui/borrowck/alias-liveness/rtn-static.rs", - "tests/ui/feature-gates/feature-gate-return_type_notation.rs", - - // TODO: lazy type alias syntax with where-clause in trailing position - // https://github.com/dtolnay/syn/issues/1525 - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/type_item_where_clause.rs", - "src/tools/rustfmt/tests/source/type-alias-where-clauses-with-comments.rs", - "src/tools/rustfmt/tests/source/type-alias-where-clauses.rs", - "src/tools/rustfmt/tests/target/type-alias-where-clauses-with-comments.rs", - "src/tools/rustfmt/tests/target/type-alias-where-clauses.rs", - "tests/rustdoc/typedef-inner-variants-lazy_type_alias.rs", - - // TODO: gen blocks and functions - // https://github.com/dtolnay/syn/issues/1526 - "compiler/rustc_codegen_cranelift/example/gen_block_iterate.rs", - "compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs", - "compiler/rustc_metadata/src/rmeta/decoder.rs", - "compiler/rustc_middle/src/ty/closure.rs", - "compiler/rustc_middle/src/ty/context.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/gen_blocks.rs", - "tests/ui/async-await/async-drop/assign-incompatible-types.rs", - "tests/ui/coroutine/async-gen-deduce-yield.rs", - "tests/ui/coroutine/async-gen-yield-ty-is-unit.rs", - "tests/ui/coroutine/async_gen_fn_iter.rs", - "tests/ui/coroutine/gen_block_is_fused_iter.rs", - "tests/ui/coroutine/gen_block_is_iter.rs", - "tests/ui/coroutine/gen_block_iterate.rs", - "tests/ui/coroutine/gen_fn_iter.rs", - "tests/ui/coroutine/gen_fn_lifetime_capture.rs", - "tests/ui/coroutine/other-attribute-on-gen.rs", - "tests/ui/coroutine/return-types-diverge.rs", - "tests/ui/higher-ranked/builtin-closure-like-bounds.rs", - "tests/ui/sanitizer/cfi/coroutine.rs", - - // TODO: postfix yield - // https://github.com/dtolnay/syn/issues/1890 - "tests/pretty/postfix-yield.rs", - "tests/ui/coroutine/postfix-yield.rs", - - // TODO: `!` as a pattern - // https://github.com/dtolnay/syn/issues/1546 - "tests/mir-opt/building/match/never_patterns.rs", - "tests/pretty/never-pattern.rs", - "tests/ui/rfcs/rfc-0000-never_patterns/always-read-in-closure-capture.rs", - "tests/ui/rfcs/rfc-0000-never_patterns/diverges.rs", - "tests/ui/rfcs/rfc-0000-never_patterns/use-bindings.rs", - - // TODO: async trait bounds: `impl async Fn()` - // https://github.com/dtolnay/syn/issues/1628 - "src/tools/miri/tests/pass/async-closure-captures.rs", - "src/tools/miri/tests/pass/async-closure-drop.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/async_trait_bound.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/for_binder_bound.rs", - "src/tools/rustfmt/tests/target/asyncness.rs", - "tests/coverage/async_closure.rs", - "tests/ui/async-await/async-closures/async-fn-mut-for-async-fn.rs", - "tests/ui/async-await/async-closures/async-fn-once-for-async-fn.rs", - "tests/ui/async-await/async-closures/auxiliary/foreign.rs", - "tests/ui/async-await/async-closures/body-check-on-non-fnmut.rs", - "tests/ui/async-await/async-closures/box-deref-in-debuginfo.rs", - "tests/ui/async-await/async-closures/brand.rs", - "tests/ui/async-await/async-closures/captures.rs", - "tests/ui/async-await/async-closures/clone-closure.rs", - "tests/ui/async-await/async-closures/constrained-but-no-upvars-yet.rs", - "tests/ui/async-await/async-closures/debuginfo-by-move-body.rs", - "tests/ui/async-await/async-closures/drop.rs", - "tests/ui/async-await/async-closures/force-move-due-to-inferred-kind.rs", - "tests/ui/async-await/async-closures/foreign.rs", - "tests/ui/async-await/async-closures/inline-body.rs", - "tests/ui/async-await/async-closures/mangle.rs", - "tests/ui/async-await/async-closures/moro-example.rs", - "tests/ui/async-await/async-closures/move-is-async-fn.rs", - "tests/ui/async-await/async-closures/mut-ref-reborrow.rs", - "tests/ui/async-await/async-closures/no-borrow-from-env.rs", - "tests/ui/async-await/async-closures/non-copy-arg-does-not-force-inner-move.rs", - "tests/ui/async-await/async-closures/overlapping-projs.rs", - "tests/ui/async-await/async-closures/precise-captures.rs", - "tests/ui/async-await/async-closures/refd.rs", - "tests/ui/async-await/async-closures/signature-deduction.rs", - "tests/ui/async-await/async-fn/edition-2015-not-async-bound.rs", - "tests/ui/async-await/async-fn/higher-ranked-async-fn.rs", - "tests/ui/async-await/async-fn/impl-trait.rs", - "tests/ui/async-await/async-fn/project.rs", - "tests/ui/async-await/async-fn/sugar.rs", - - // TODO: mutable by-reference bindings (mut ref) - // https://github.com/dtolnay/syn/issues/1629 - "src/tools/rustfmt/tests/source/mut_ref.rs", - "src/tools/rustfmt/tests/target/mut_ref.rs", - "tests/ui/mut/mut-ref.rs", - - // TODO: postfix match - // https://github.com/dtolnay/syn/issues/1630 - "src/tools/clippy/tests/ui/unnecessary_semicolon.rs", - "src/tools/rustfmt/tests/source/postfix-match/pf-match.rs", - "src/tools/rustfmt/tests/target/postfix-match/pf-match.rs", - "tests/pretty/postfix-match/simple-matches.rs", - "tests/ui/match/postfix-match/no-unused-parens.rs", - "tests/ui/match/postfix-match/pf-match-chain.rs", - "tests/ui/match/postfix-match/postfix-match.rs", - - // TODO: delegation: `reuse Trait::bar { Box::new(self.0) }` - // https://github.com/dtolnay/syn/issues/1580 - "tests/pretty/delegation.rs", - "tests/pretty/hir-delegation.rs", - "tests/ui/delegation/body-identity-glob.rs", - "tests/ui/delegation/body-identity-list.rs", - "tests/ui/delegation/explicit-paths-in-traits-pass.rs", - "tests/ui/delegation/explicit-paths-pass.rs", - "tests/ui/delegation/explicit-paths-signature-pass.rs", - "tests/ui/delegation/fn-header.rs", - "tests/ui/delegation/generics/free-fn-to-free-fn-pass.rs", - "tests/ui/delegation/generics/free-fn-to-trait-method-pass.rs", - "tests/ui/delegation/generics/impl-to-free-fn-pass.rs", - "tests/ui/delegation/generics/impl-trait-to-trait-method-pass.rs", - "tests/ui/delegation/generics/inherent-impl-to-trait-method-pass.rs", - "tests/ui/delegation/generics/trait-method-to-other-pass.rs", - "tests/ui/delegation/glob-glob.rs", - "tests/ui/delegation/glob-override.rs", - "tests/ui/delegation/glob.rs", - "tests/ui/delegation/impl-trait.rs", - "tests/ui/delegation/list.rs", - "tests/ui/delegation/macro-inside-glob.rs", - "tests/ui/delegation/macro-inside-list.rs", - "tests/ui/delegation/method-call-priority.rs", - "tests/ui/delegation/parse.rs", - "tests/ui/delegation/rename.rs", - "tests/ui/delegation/self-coercion.rs", - - // TODO: for await - // https://github.com/dtolnay/syn/issues/1631 - "tests/ui/async-await/for-await-2015.rs", - "tests/ui/async-await/for-await-passthrough.rs", - "tests/ui/async-await/for-await.rs", - - // TODO: unparenthesized half-open range pattern inside slice pattern: `[1..]` - // https://github.com/dtolnay/syn/issues/1769 - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/range_pat.rs", - "tests/ui/consts/miri_unleashed/const_refers_to_static_cross_crate.rs", - - // TODO: pinned type sugar: `&pin const Self` - // https://github.com/dtolnay/syn/issues/1770 - "src/tools/rustfmt/tests/source/pin_sugar.rs", - "src/tools/rustfmt/tests/target/pin_sugar.rs", - "tests/pretty/pin-ergonomics-hir.rs", - "tests/pretty/pin-ergonomics.rs", - "tests/ui/pin-ergonomics/borrow.rs", - "tests/ui/pin-ergonomics/sugar-self.rs", - "tests/ui/pin-ergonomics/sugar.rs", - - // TODO: attributes on where-predicates - // https://github.com/dtolnay/syn/issues/1705 - "src/tools/rustfmt/tests/target/cfg_attribute_in_where.rs", - - // TODO: super let - // https://github.com/dtolnay/syn/issues/1889 - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/let_stmt.rs", - - // TODO: "ergonomic clones": `f(obj.use)`, `thread::spawn(use || f(obj))`, `async use` - // https://github.com/dtolnay/syn/issues/1802 - "tests/codegen-llvm/ergonomic-clones/closure.rs", - "tests/mir-opt/ergonomic-clones/closure.rs", - "tests/ui/ergonomic-clones/async/basic.rs", - "tests/ui/ergonomic-clones/closure/basic.rs", - "tests/ui/ergonomic-clones/closure/const-closure.rs", - "tests/ui/ergonomic-clones/closure/mutation.rs", - "tests/ui/ergonomic-clones/closure/nested.rs", - "tests/ui/ergonomic-clones/closure/once-move-out-on-heap.rs", - "tests/ui/ergonomic-clones/closure/with-binders.rs", - "tests/ui/ergonomic-clones/dotuse/basic.rs", - "tests/ui/ergonomic-clones/dotuse/block.rs", - - // TODO: contracts - // https://github.com/dtolnay/syn/issues/1892 - "tests/ui/contracts/internal_machinery/contract-ast-extensions-nest.rs", - "tests/ui/contracts/internal_machinery/contract-ast-extensions-tail.rs", - "tests/ui/contracts/internal_machinery/contracts-lowering-ensures-is-not-inherited-when-nesting.rs", - "tests/ui/contracts/internal_machinery/contracts-lowering-requires-is-not-inherited-when-nesting.rs", - - // TODO: frontmatter - // https://github.com/dtolnay/syn/issues/1893 - "tests/ui/frontmatter/auxiliary/lib.rs", - "tests/ui/frontmatter/dot-in-infostring-non-leading.rs", - "tests/ui/frontmatter/escape.rs", - "tests/ui/frontmatter/frontmatter-inner-hyphens-1.rs", - "tests/ui/frontmatter/frontmatter-inner-hyphens-2.rs", - "tests/ui/frontmatter/frontmatter-non-lexible-tokens.rs", - "tests/ui/frontmatter/frontmatter-whitespace-3.rs", - "tests/ui/frontmatter/frontmatter-whitespace-4.rs", - "tests/ui/frontmatter/shebang.rs", - "tests/ui/unpretty/frontmatter.rs", - - // TODO: `|| .. .method()` - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/closure_range_method_call.rs", - "src/tools/rustfmt/tests/source/issue-4808.rs", - - // Negative inherent impl: `impl !Box<JoinHandle> {}` - "src/tools/rustfmt/tests/source/negative-impl.rs", - "src/tools/rustfmt/tests/target/negative-impl.rs", - - // Compile-fail expr parameter in const generic position: `f::<1 + 2>()` - "tests/ui/const-generics/early/closing-args-token.rs", - "tests/ui/const-generics/early/const-expression-parameter.rs", - - // Compile-fail variadics in not the last position of a function parameter list - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/fn_def_param.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/param_list_vararg.rs", - "tests/ui/parser/variadic-ffi-syntactic-pass.rs", - - // Need at least one trait in impl Trait, no such type as impl 'static - "tests/ui/type-alias-impl-trait/generic_type_does_not_live_long_enough.rs", - - // Negative polarity trait bound: `where T: !Copy` - "src/tools/rustfmt/tests/target/negative-bounds.rs", - "tests/ui/traits/negative-bounds/supertrait.rs", - - // Const impl that is not a trait impl: `impl ~const T {}` - "tests/ui/traits/const-traits/syntax.rs", - - // Lifetimes and types out of order in angle bracketed path arguments - "tests/ui/parser/constraints-before-generic-args-syntactic-pass.rs", - - // Deprecated anonymous parameter syntax in traits - "src/tools/rustfmt/tests/source/trait.rs", - "src/tools/rustfmt/tests/target/trait.rs", - "tests/pretty/hir-fn-params.rs", - "tests/rustdoc/anon-fn-params.rs", - "tests/rustdoc/auxiliary/ext-anon-fn-params.rs", - "tests/ui/fn/anonymous-parameters-trait-13105.rs", - "tests/ui/issues/issue-34074.rs", - "tests/ui/proc-macro/trait-fn-args-2015.rs", - "tests/ui/trait-bounds/anonymous-parameters-13775.rs", - - // Deprecated where-clause location - "src/tools/rustfmt/tests/source/issue_4257.rs", - "src/tools/rustfmt/tests/source/issue_4911.rs", - "src/tools/rustfmt/tests/target/issue_4257.rs", - "src/tools/rustfmt/tests/target/issue_4911.rs", - "tests/pretty/gat-bounds.rs", - "tests/rustdoc/generic-associated-types/gats.rs", - - // Deprecated trait object syntax with parenthesized generic arguments and no dyn keyword - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/path_fn_trait_args.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/typepathfn_with_coloncolon.rs", - "src/tools/rustfmt/tests/source/attrib.rs", - "src/tools/rustfmt/tests/source/closure.rs", - "src/tools/rustfmt/tests/source/existential_type.rs", - "src/tools/rustfmt/tests/source/fn-simple.rs", - "src/tools/rustfmt/tests/source/fn_args_layout-vertical.rs", - "src/tools/rustfmt/tests/source/issue-4689/one.rs", - "src/tools/rustfmt/tests/source/issue-4689/two.rs", - "src/tools/rustfmt/tests/source/paths.rs", - "src/tools/rustfmt/tests/source/structs.rs", - "src/tools/rustfmt/tests/target/attrib.rs", - "src/tools/rustfmt/tests/target/closure.rs", - "src/tools/rustfmt/tests/target/existential_type.rs", - "src/tools/rustfmt/tests/target/fn-simple.rs", - "src/tools/rustfmt/tests/target/fn.rs", - "src/tools/rustfmt/tests/target/fn_args_layout-vertical.rs", - "src/tools/rustfmt/tests/target/issue-4689/one.rs", - "src/tools/rustfmt/tests/target/issue-4689/two.rs", - "src/tools/rustfmt/tests/target/paths.rs", - "src/tools/rustfmt/tests/target/structs.rs", - "tests/codegen-units/item-collection/non-generic-closures.rs", - "tests/debuginfo/recursive-enum.rs", - "tests/pretty/closure-reform-pretty.rs", - "tests/run-make/reproducible-build-2/reproducible-build.rs", - "tests/run-make/reproducible-build/reproducible-build.rs", - "tests/ui/impl-trait/generic-with-implicit-hrtb-without-dyn.rs", - "tests/ui/lifetimes/auxiliary/lifetime_bound_will_change_warning_lib.rs", - "tests/ui/lifetimes/bare-trait-object-borrowck.rs", - "tests/ui/lifetimes/bare-trait-object.rs", - "tests/ui/parser/bounds-obj-parens.rs", - - // Various extensions to Rust syntax made up by rust-analyzer - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/assoc_type_bound.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/const_param_default_path.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/field_expr.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/generic_arg_bounds.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/global_asm.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/question_for_type_trait_bound.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/ref_expr.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/use_tree_abs_star.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/ok/0015_use_tree.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/ok/0029_range_forms.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/ok/0051_parameter_attrs.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/ok/0055_dot_dot_dot.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/ok/0068_item_modifiers.rs", - "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/0031_block_inner_attrs.rs", - "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/0038_endless_inclusive_range.rs", - "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/0045_ambiguous_trait_object.rs", - "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/0046_mutable_const_item.rs", - "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/0224_dangling_dyn.rs", - "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/0261_dangling_impl_undeclared_lifetime.rs", - "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/dangling_impl.rs", - "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/dangling_impl_reference.rs", - "src/tools/rust-analyzer/crates/syntax/test_data/parser/validation/impl_trait_lifetime_only.rs", - - // Placeholder syntax for "throw expressions" - "compiler/rustc_errors/src/translation.rs", - "compiler/rustc_expand/src/module.rs", - "compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs", - "src/tools/clippy/tests/ui/needless_return.rs", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/yeet_expr.rs", - "tests/pretty/yeet-expr.rs", - "tests/ui/try-trait/yeet-for-option.rs", - "tests/ui/try-trait/yeet-for-result.rs", - - // Edition 2015 code using identifiers that are now keywords - // TODO: some of these we should probably parse - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/ok/dyn_trait_type_weak.rs", - "src/tools/rustfmt/tests/source/configs/indent_style/block_call.rs", - "src/tools/rustfmt/tests/source/configs/use_try_shorthand/false.rs", - "src/tools/rustfmt/tests/source/configs/use_try_shorthand/true.rs", - "src/tools/rustfmt/tests/source/issue_1306.rs", - "src/tools/rustfmt/tests/source/try-conversion.rs", - "src/tools/rustfmt/tests/target/configs/indent_style/block_call.rs", - "src/tools/rustfmt/tests/target/configs/use_try_shorthand/false.rs", - "src/tools/rustfmt/tests/target/issue-1681.rs", - "src/tools/rustfmt/tests/target/issue_1306.rs", - "tests/ui/dyn-keyword/dyn-2015-no-warnings-without-lints.rs", - "tests/ui/editions/edition-keywords-2015-2015.rs", - "tests/ui/editions/edition-keywords-2015-2018.rs", - "tests/ui/lint/keyword-idents/auxiliary/multi_file_submod.rs", - "tests/ui/lint/lint_pre_expansion_extern_module_aux.rs", - "tests/ui/macros/macro-comma-support-rpass.rs", - "tests/ui/macros/try-macro.rs", - "tests/ui/parser/extern-crate-async.rs", - "tests/ui/try-block/try-is-identifier-edition2015.rs", - - // Excessive nesting - "tests/ui/issues/issue-74564-if-expr-stack-overflow.rs", - - // Testing tools on invalid syntax - "src/tools/clippy/tests/ui/non_expressive_names_error_recovery.rs", - "src/tools/rustfmt/tests/coverage/target/comments.rs", - "src/tools/rustfmt/tests/parser/issue-4126/invalid.rs", - "src/tools/rustfmt/tests/parser/issue_4418.rs", - "src/tools/rustfmt/tests/parser/stashed-diag.rs", - "src/tools/rustfmt/tests/parser/stashed-diag2.rs", - "src/tools/rustfmt/tests/parser/unclosed-delims/issue_4466.rs", - "src/tools/rustfmt/tests/source/configs/disable_all_formatting/true.rs", - "src/tools/rustfmt/tests/source/configs/spaces_around_ranges/false.rs", - "src/tools/rustfmt/tests/source/configs/spaces_around_ranges/true.rs", - "src/tools/rustfmt/tests/source/type.rs", - "src/tools/rustfmt/tests/target/configs/spaces_around_ranges/false.rs", - "src/tools/rustfmt/tests/target/configs/spaces_around_ranges/true.rs", - "src/tools/rustfmt/tests/target/type.rs", - "src/tools/rustfmt/tests/target/unsafe_extern_blocks.rs", - "tests/run-make/translation/test.rs", - "tests/ui/generics/issue-94432-garbage-ice.rs", - - // Generated file containing a top-level expression, used with `include!` - "compiler/rustc_codegen_gcc/src/intrinsic/archs.rs", - - // Not actually test cases - "tests/ui/lint/expansion-time-include.rs", - "tests/ui/macros/auxiliary/macro-comma-support.rs", - "tests/ui/macros/auxiliary/macro-include-items-expr.rs", - "tests/ui/macros/include-single-expr-helper.rs", - "tests/ui/macros/include-single-expr-helper-1.rs", - "tests/ui/parser/issues/auxiliary/issue-21146-inc.rs", -]; - -#[rustfmt::skip] -static EXCLUDE_DIRS: &[&str] = &[ - // Inputs that intentionally do not parse - "src/tools/rust-analyzer/crates/parser/test_data/parser/err", - "src/tools/rust-analyzer/crates/parser/test_data/parser/inline/err", - - // Inputs that lex but do not necessarily parse - "src/tools/rust-analyzer/crates/parser/test_data/lexer", - - // Inputs that used to crash rust-analyzer, but aren't necessarily supposed to parse - "src/tools/rust-analyzer/crates/syntax/test_data/parser/fuzz-failures", - "src/tools/rust-analyzer/crates/syntax/test_data/reparse/fuzz-failures", - - // Inputs that crash rustc, making no claim about whether they are valid Rust - "tests/crashes", -]; - -// Directories in which a .stderr implies the corresponding .rs is not expected -// to work. -static UI_TEST_DIRS: &[&str] = &["tests/ui", "tests/rustdoc-ui"]; - -pub fn for_each_rust_file(for_each: impl Fn(&Path) + Sync + Send) { - let mut rs_files = BTreeSet::new(); - - let repo_dir = Path::new("tests/rust"); - for entry in WalkDir::new(repo_dir) - .into_iter() - .filter_entry(base_dir_filter) - { - let entry = entry.unwrap(); - if !entry.file_type().is_dir() { - rs_files.insert(entry.into_path()); - } - } - - for ui_test_dir in UI_TEST_DIRS { - for entry in WalkDir::new(repo_dir.join(ui_test_dir)) { - let mut path = entry.unwrap().into_path(); - if path.extension() == Some(OsStr::new("stderr")) { - loop { - rs_files.remove(&path.with_extension("rs")); - path = path.with_extension(""); - if path.extension().is_none() { - break; - } - } - } - } - } - - rs_files.par_iter().map(PathBuf::as_path).for_each(for_each); -} - -pub fn base_dir_filter(entry: &DirEntry) -> bool { - let path = entry.path(); - - let mut path_string = path.to_string_lossy(); - if cfg!(windows) { - path_string = path_string.replace('\\', "/").into(); - } - let path_string = if path_string == "tests/rust" { - return true; - } else if let Some(path) = path_string.strip_prefix("tests/rust/") { - path - } else { - panic!("unexpected path in Rust dist: {}", path_string); - }; - - if path.is_dir() { - return !EXCLUDE_DIRS.contains(&path_string); - } - - if path.extension() != Some(OsStr::new("rs")) { - return false; - } - - !EXCLUDE_FILES.contains(&path_string) -} - -#[allow(dead_code)] -pub fn edition(path: &Path) -> &'static str { - if path.ends_with("dyn-2015-no-warnings-without-lints.rs") { - "2015" - } else { - "2021" - } -} - -#[allow(dead_code)] -pub fn abort_after() -> usize { - match env::var("ABORT_AFTER_FAILURE") { - Ok(s) => s.parse().expect("failed to parse ABORT_AFTER_FAILURE"), - Err(_) => usize::MAX, - } -} - -pub fn rayon_init() { - let stack_size = match env::var("RUST_MIN_STACK") { - Ok(s) => s.parse().expect("failed to parse RUST_MIN_STACK"), - Err(_) => 1024 * 1024 * if cfg!(debug_assertions) { 40 } else { 20 }, - }; - ThreadPoolBuilder::new() - .stack_size(stack_size) - .build_global() - .unwrap(); -} - -pub fn clone_rust() { - let needs_clone = match fs::read_to_string("tests/rust/COMMIT") { - Err(_) => true, - Ok(contents) => contents.trim() != REVISION, - }; - if needs_clone { - download_and_unpack().unwrap(); - } - - let mut missing = String::new(); - let test_src = Path::new("tests/rust"); - - let mut exclude_files_set = BTreeSet::new(); - for exclude in EXCLUDE_FILES { - if !exclude_files_set.insert(exclude) { - panic!("duplicate path in EXCLUDE_FILES: {}", exclude); - } - for dir in EXCLUDE_DIRS { - if Path::new(exclude).starts_with(dir) { - panic!("excluded file {} is inside an excluded dir", exclude); - } - } - if !test_src.join(exclude).is_file() { - missing += "\ntests/rust/"; - missing += exclude; - } - } - - let mut exclude_dirs_set = BTreeSet::new(); - for exclude in EXCLUDE_DIRS { - if !exclude_dirs_set.insert(exclude) { - panic!("duplicate path in EXCLUDE_DIRS: {}", exclude); - } - if !test_src.join(exclude).is_dir() { - missing += "\ntests/rust/"; - missing += exclude; - missing += "/"; - } - } - - if !missing.is_empty() { - panic!("excluded test file does not exist:{}\n", missing); - } -} - -fn download_and_unpack() -> Result<()> { - let url = format!("https://github.com/rust-lang/rust/archive/{REVISION}.tar.gz"); - errorf!("downloading {url}\n"); - - let response = reqwest::blocking::get(url)?.error_for_status()?; - let progress = Progress::new(response); - let decoder = GzDecoder::new(progress); - let mut archive = Archive::new(decoder); - let prefix = format!("rust-{}", REVISION); - - let tests_rust = Path::new("tests/rust"); - if tests_rust.exists() { - fs::remove_dir_all(tests_rust)?; - } - - for entry in archive.entries()? { - let mut entry = entry?; - let path = entry.path()?; - if path == Path::new("pax_global_header") { - continue; - } - let relative = path.strip_prefix(&prefix)?; - let out = tests_rust.join(relative); - entry.unpack(&out)?; - } - - fs::write("tests/rust/COMMIT", REVISION)?; - Ok(()) -} diff --git a/vendor/syn/tests/repo/progress.rs b/vendor/syn/tests/repo/progress.rs deleted file mode 100644 index 28c8a44b1298a8..00000000000000 --- a/vendor/syn/tests/repo/progress.rs +++ /dev/null @@ -1,37 +0,0 @@ -use std::io::{Read, Result}; -use std::time::{Duration, Instant}; - -pub struct Progress<R> { - bytes: usize, - tick: Instant, - stream: R, -} - -impl<R> Progress<R> { - pub fn new(stream: R) -> Self { - Progress { - bytes: 0, - tick: Instant::now() + Duration::from_millis(2000), - stream, - } - } -} - -impl<R: Read> Read for Progress<R> { - fn read(&mut self, buf: &mut [u8]) -> Result<usize> { - let num = self.stream.read(buf)?; - self.bytes += num; - let now = Instant::now(); - if now > self.tick { - self.tick = now + Duration::from_millis(500); - errorf!("downloading... {} bytes\n", self.bytes); - } - Ok(num) - } -} - -impl<R> Drop for Progress<R> { - fn drop(&mut self) { - errorf!("done ({} bytes)\n", self.bytes); - } -} diff --git a/vendor/syn/tests/snapshot/mod.rs b/vendor/syn/tests/snapshot/mod.rs deleted file mode 100644 index 98d2aebc9d303e..00000000000000 --- a/vendor/syn/tests/snapshot/mod.rs +++ /dev/null @@ -1,68 +0,0 @@ -#![allow(unused_macros, unused_macro_rules)] - -use std::str::FromStr; -use syn::parse::Result; - -macro_rules! snapshot { - ($($args:tt)*) => { - snapshot_impl!(() $($args)*) - }; -} - -macro_rules! snapshot_impl { - (($expr:ident) as $t:ty, @$snapshot:literal) => { - let tokens = crate::snapshot::TryIntoTokens::try_into_tokens($expr).unwrap(); - let $expr: $t = syn::parse_quote!(#tokens); - let debug = crate::debug::Lite(&$expr); - if !cfg!(miri) { - #[allow(clippy::needless_raw_string_hashes)] // https://github.com/mitsuhiko/insta/issues/389 - { - insta::assert_debug_snapshot!(debug, @$snapshot); - } - } - }; - (($($expr:tt)*) as $t:ty, @$snapshot:literal) => {{ - let tokens = crate::snapshot::TryIntoTokens::try_into_tokens($($expr)*).unwrap(); - let syntax_tree: $t = syn::parse_quote!(#tokens); - let debug = crate::debug::Lite(&syntax_tree); - if !cfg!(miri) { - #[allow(clippy::needless_raw_string_hashes)] - { - insta::assert_debug_snapshot!(debug, @$snapshot); - } - } - syntax_tree - }}; - (($($expr:tt)*) , @$snapshot:literal) => {{ - let syntax_tree = $($expr)*; - let debug = crate::debug::Lite(&syntax_tree); - if !cfg!(miri) { - #[allow(clippy::needless_raw_string_hashes)] - { - insta::assert_debug_snapshot!(debug, @$snapshot); - } - } - syntax_tree - }}; - (($($expr:tt)*) $next:tt $($rest:tt)*) => { - snapshot_impl!(($($expr)* $next) $($rest)*) - }; -} - -pub trait TryIntoTokens { - #[allow(dead_code)] - fn try_into_tokens(self) -> Result<proc_macro2::TokenStream>; -} - -impl TryIntoTokens for &str { - fn try_into_tokens(self) -> Result<proc_macro2::TokenStream> { - let tokens = proc_macro2::TokenStream::from_str(self)?; - Ok(tokens) - } -} - -impl TryIntoTokens for proc_macro2::TokenStream { - fn try_into_tokens(self) -> Result<proc_macro2::TokenStream> { - Ok(self) - } -} diff --git a/vendor/syn/tests/test_asyncness.rs b/vendor/syn/tests/test_asyncness.rs deleted file mode 100644 index c7aee3285bb29a..00000000000000 --- a/vendor/syn/tests/test_asyncness.rs +++ /dev/null @@ -1,49 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use syn::{Expr, Item}; - -#[test] -fn test_async_fn() { - let input = "async fn process() {}"; - - snapshot!(input as Item, @r#" - Item::Fn { - vis: Visibility::Inherited, - sig: Signature { - asyncness: Some, - ident: "process", - generics: Generics, - output: ReturnType::Default, - }, - block: Block { - stmts: [], - }, - } - "#); -} - -#[test] -fn test_async_closure() { - let input = "async || {}"; - - snapshot!(input as Expr, @r#" - Expr::Closure { - asyncness: Some, - output: ReturnType::Default, - body: Expr::Block { - block: Block { - stmts: [], - }, - }, - } - "#); -} diff --git a/vendor/syn/tests/test_attribute.rs b/vendor/syn/tests/test_attribute.rs deleted file mode 100644 index 81c485e6b28fc7..00000000000000 --- a/vendor/syn/tests/test_attribute.rs +++ /dev/null @@ -1,231 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use syn::parse::Parser; -use syn::{Attribute, Meta}; - -#[test] -fn test_meta_item_word() { - let meta = test("#[foo]"); - - snapshot!(meta, @r#" - Meta::Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - } - "#); -} - -#[test] -fn test_meta_item_name_value() { - let meta = test("#[foo = 5]"); - - snapshot!(meta, @r#" - Meta::NameValue { - path: Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - }, - value: Expr::Lit { - lit: 5, - }, - } - "#); -} - -#[test] -fn test_meta_item_bool_value() { - let meta = test("#[foo = true]"); - - snapshot!(meta, @r#" - Meta::NameValue { - path: Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - }, - value: Expr::Lit { - lit: Lit::Bool { - value: true, - }, - }, - } - "#); - - let meta = test("#[foo = false]"); - - snapshot!(meta, @r#" - Meta::NameValue { - path: Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - }, - value: Expr::Lit { - lit: Lit::Bool { - value: false, - }, - }, - } - "#); -} - -#[test] -fn test_meta_item_list_lit() { - let meta = test("#[foo(5)]"); - - snapshot!(meta, @r#" - Meta::List { - path: Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(`5`), - } - "#); -} - -#[test] -fn test_meta_item_list_word() { - let meta = test("#[foo(bar)]"); - - snapshot!(meta, @r#" - Meta::List { - path: Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(`bar`), - } - "#); -} - -#[test] -fn test_meta_item_list_name_value() { - let meta = test("#[foo(bar = 5)]"); - - snapshot!(meta, @r#" - Meta::List { - path: Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(`bar = 5`), - } - "#); -} - -#[test] -fn test_meta_item_list_bool_value() { - let meta = test("#[foo(bar = true)]"); - - snapshot!(meta, @r#" - Meta::List { - path: Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(`bar = true`), - } - "#); -} - -#[test] -fn test_meta_item_multiple() { - let meta = test("#[foo(word, name = 5, list(name2 = 6), word2)]"); - - snapshot!(meta, @r#" - Meta::List { - path: Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(`word , name = 5 , list (name2 = 6) , word2`), - } - "#); -} - -#[test] -fn test_bool_lit() { - let meta = test("#[foo(true)]"); - - snapshot!(meta, @r#" - Meta::List { - path: Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(`true`), - } - "#); -} - -#[test] -fn test_negative_lit() { - let meta = test("#[form(min = -1, max = 200)]"); - - snapshot!(meta, @r#" - Meta::List { - path: Path { - segments: [ - PathSegment { - ident: "form", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(`min = - 1 , max = 200`), - } - "#); -} - -fn test(input: &str) -> Meta { - let attrs = Attribute::parse_outer.parse_str(input).unwrap(); - - assert_eq!(attrs.len(), 1); - let attr = attrs.into_iter().next().unwrap(); - - attr.meta -} diff --git a/vendor/syn/tests/test_derive_input.rs b/vendor/syn/tests/test_derive_input.rs deleted file mode 100644 index 790e2792adb3a7..00000000000000 --- a/vendor/syn/tests/test_derive_input.rs +++ /dev/null @@ -1,785 +0,0 @@ -#![allow( - clippy::assertions_on_result_states, - clippy::elidable_lifetime_names, - clippy::manual_let_else, - clippy::needless_lifetimes, - clippy::too_many_lines, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use quote::quote; -use syn::{Data, DeriveInput}; - -#[test] -fn test_unit() { - let input = quote! { - struct Unit; - }; - - snapshot!(input as DeriveInput, @r#" - DeriveInput { - vis: Visibility::Inherited, - ident: "Unit", - generics: Generics, - data: Data::Struct { - fields: Fields::Unit, - semi_token: Some, - }, - } - "#); -} - -#[test] -fn test_struct() { - let input = quote! { - #[derive(Debug, Clone)] - pub struct Item { - pub ident: Ident, - pub attrs: Vec<Attribute> - } - }; - - snapshot!(input as DeriveInput, @r#" - DeriveInput { - attrs: [ - Attribute { - style: AttrStyle::Outer, - meta: Meta::List { - path: Path { - segments: [ - PathSegment { - ident: "derive", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(`Debug , Clone`), - }, - }, - ], - vis: Visibility::Public, - ident: "Item", - generics: Generics, - data: Data::Struct { - fields: Fields::Named { - named: [ - Field { - vis: Visibility::Public, - ident: Some("ident"), - colon_token: Some, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Ident", - }, - ], - }, - }, - }, - Token![,], - Field { - vis: Visibility::Public, - ident: Some("attrs"), - colon_token: Some, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Vec", - arguments: PathArguments::AngleBracketed { - args: [ - GenericArgument::Type(Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Attribute", - }, - ], - }, - }), - ], - }, - }, - ], - }, - }, - }, - ], - }, - }, - } - "#); - - snapshot!(&input.attrs[0].meta, @r#" - Meta::List { - path: Path { - segments: [ - PathSegment { - ident: "derive", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(`Debug , Clone`), - } - "#); -} - -#[test] -fn test_union() { - let input = quote! { - union MaybeUninit<T> { - uninit: (), - value: T - } - }; - - snapshot!(input as DeriveInput, @r#" - DeriveInput { - vis: Visibility::Inherited, - ident: "MaybeUninit", - generics: Generics { - lt_token: Some, - params: [ - GenericParam::Type(TypeParam { - ident: "T", - }), - ], - gt_token: Some, - }, - data: Data::Union { - fields: FieldsNamed { - named: [ - Field { - vis: Visibility::Inherited, - ident: Some("uninit"), - colon_token: Some, - ty: Type::Tuple, - }, - Token![,], - Field { - vis: Visibility::Inherited, - ident: Some("value"), - colon_token: Some, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "T", - }, - ], - }, - }, - }, - ], - }, - }, - } - "#); -} - -#[test] -#[cfg(feature = "full")] -fn test_enum() { - let input = quote! { - /// See the std::result module documentation for details. - #[must_use] - pub enum Result<T, E> { - Ok(T), - Err(E), - Surprise = 0isize, - - // Smuggling data into a proc_macro_derive, - // in the style of https://github.com/dtolnay/proc-macro-hack - ProcMacroHack = (0, "data").0 - } - }; - - snapshot!(input as DeriveInput, @r#" - DeriveInput { - attrs: [ - Attribute { - style: AttrStyle::Outer, - meta: Meta::NameValue { - path: Path { - segments: [ - PathSegment { - ident: "doc", - }, - ], - }, - value: Expr::Lit { - lit: " See the std::result module documentation for details.", - }, - }, - }, - Attribute { - style: AttrStyle::Outer, - meta: Meta::Path { - segments: [ - PathSegment { - ident: "must_use", - }, - ], - }, - }, - ], - vis: Visibility::Public, - ident: "Result", - generics: Generics { - lt_token: Some, - params: [ - GenericParam::Type(TypeParam { - ident: "T", - }), - Token![,], - GenericParam::Type(TypeParam { - ident: "E", - }), - ], - gt_token: Some, - }, - data: Data::Enum { - variants: [ - Variant { - ident: "Ok", - fields: Fields::Unnamed { - unnamed: [ - Field { - vis: Visibility::Inherited, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "T", - }, - ], - }, - }, - }, - ], - }, - }, - Token![,], - Variant { - ident: "Err", - fields: Fields::Unnamed { - unnamed: [ - Field { - vis: Visibility::Inherited, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "E", - }, - ], - }, - }, - }, - ], - }, - }, - Token![,], - Variant { - ident: "Surprise", - fields: Fields::Unit, - discriminant: Some(Expr::Lit { - lit: 0isize, - }), - }, - Token![,], - Variant { - ident: "ProcMacroHack", - fields: Fields::Unit, - discriminant: Some(Expr::Field { - base: Expr::Tuple { - elems: [ - Expr::Lit { - lit: 0, - }, - Token![,], - Expr::Lit { - lit: "data", - }, - ], - }, - member: Member::Unnamed(Index { - index: 0, - }), - }), - }, - ], - }, - } - "#); - - let meta_items: Vec<_> = input.attrs.into_iter().map(|attr| attr.meta).collect(); - - snapshot!(meta_items, @r#" - [ - Meta::NameValue { - path: Path { - segments: [ - PathSegment { - ident: "doc", - }, - ], - }, - value: Expr::Lit { - lit: " See the std::result module documentation for details.", - }, - }, - Meta::Path { - segments: [ - PathSegment { - ident: "must_use", - }, - ], - }, - ] - "#); -} - -#[test] -fn test_attr_with_non_mod_style_path() { - let input = quote! { - #[inert <T>] - struct S; - }; - - syn::parse2::<DeriveInput>(input).unwrap_err(); -} - -#[test] -fn test_attr_with_mod_style_path_with_self() { - let input = quote! { - #[foo::self] - struct S; - }; - - snapshot!(input as DeriveInput, @r#" - DeriveInput { - attrs: [ - Attribute { - style: AttrStyle::Outer, - meta: Meta::Path { - segments: [ - PathSegment { - ident: "foo", - }, - Token![::], - PathSegment { - ident: "self", - }, - ], - }, - }, - ], - vis: Visibility::Inherited, - ident: "S", - generics: Generics, - data: Data::Struct { - fields: Fields::Unit, - semi_token: Some, - }, - } - "#); - - snapshot!(&input.attrs[0].meta, @r#" - Meta::Path { - segments: [ - PathSegment { - ident: "foo", - }, - Token![::], - PathSegment { - ident: "self", - }, - ], - } - "#); -} - -#[test] -fn test_pub_restricted() { - // Taken from tests/rust/src/test/ui/resolve/auxiliary/privacy-struct-ctor.rs - let input = quote! { - pub(in m) struct Z(pub(in m::n) u8); - }; - - snapshot!(input as DeriveInput, @r#" - DeriveInput { - vis: Visibility::Restricted { - in_token: Some, - path: Path { - segments: [ - PathSegment { - ident: "m", - }, - ], - }, - }, - ident: "Z", - generics: Generics, - data: Data::Struct { - fields: Fields::Unnamed { - unnamed: [ - Field { - vis: Visibility::Restricted { - in_token: Some, - path: Path { - segments: [ - PathSegment { - ident: "m", - }, - Token![::], - PathSegment { - ident: "n", - }, - ], - }, - }, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "u8", - }, - ], - }, - }, - }, - ], - }, - semi_token: Some, - }, - } - "#); -} - -#[test] -fn test_pub_restricted_crate() { - let input = quote! { - pub(crate) struct S; - }; - - snapshot!(input as DeriveInput, @r#" - DeriveInput { - vis: Visibility::Restricted { - path: Path { - segments: [ - PathSegment { - ident: "crate", - }, - ], - }, - }, - ident: "S", - generics: Generics, - data: Data::Struct { - fields: Fields::Unit, - semi_token: Some, - }, - } - "#); -} - -#[test] -fn test_pub_restricted_super() { - let input = quote! { - pub(super) struct S; - }; - - snapshot!(input as DeriveInput, @r#" - DeriveInput { - vis: Visibility::Restricted { - path: Path { - segments: [ - PathSegment { - ident: "super", - }, - ], - }, - }, - ident: "S", - generics: Generics, - data: Data::Struct { - fields: Fields::Unit, - semi_token: Some, - }, - } - "#); -} - -#[test] -fn test_pub_restricted_in_super() { - let input = quote! { - pub(in super) struct S; - }; - - snapshot!(input as DeriveInput, @r#" - DeriveInput { - vis: Visibility::Restricted { - in_token: Some, - path: Path { - segments: [ - PathSegment { - ident: "super", - }, - ], - }, - }, - ident: "S", - generics: Generics, - data: Data::Struct { - fields: Fields::Unit, - semi_token: Some, - }, - } - "#); -} - -#[test] -fn test_fields_on_unit_struct() { - let input = quote! { - struct S; - }; - - snapshot!(input as DeriveInput, @r#" - DeriveInput { - vis: Visibility::Inherited, - ident: "S", - generics: Generics, - data: Data::Struct { - fields: Fields::Unit, - semi_token: Some, - }, - } - "#); - - let data = match input.data { - Data::Struct(data) => data, - _ => panic!("expected a struct"), - }; - - assert_eq!(0, data.fields.iter().count()); -} - -#[test] -fn test_fields_on_named_struct() { - let input = quote! { - struct S { - foo: i32, - pub bar: String, - } - }; - - snapshot!(input as DeriveInput, @r#" - DeriveInput { - vis: Visibility::Inherited, - ident: "S", - generics: Generics, - data: Data::Struct { - fields: Fields::Named { - named: [ - Field { - vis: Visibility::Inherited, - ident: Some("foo"), - colon_token: Some, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "i32", - }, - ], - }, - }, - }, - Token![,], - Field { - vis: Visibility::Public, - ident: Some("bar"), - colon_token: Some, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "String", - }, - ], - }, - }, - }, - Token![,], - ], - }, - }, - } - "#); - - let data = match input.data { - Data::Struct(data) => data, - _ => panic!("expected a struct"), - }; - - snapshot!(data.fields.into_iter().collect::<Vec<_>>(), @r#" - [ - Field { - vis: Visibility::Inherited, - ident: Some("foo"), - colon_token: Some, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "i32", - }, - ], - }, - }, - }, - Field { - vis: Visibility::Public, - ident: Some("bar"), - colon_token: Some, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "String", - }, - ], - }, - }, - }, - ] - "#); -} - -#[test] -fn test_fields_on_tuple_struct() { - let input = quote! { - struct S(i32, pub String); - }; - - snapshot!(input as DeriveInput, @r#" - DeriveInput { - vis: Visibility::Inherited, - ident: "S", - generics: Generics, - data: Data::Struct { - fields: Fields::Unnamed { - unnamed: [ - Field { - vis: Visibility::Inherited, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "i32", - }, - ], - }, - }, - }, - Token![,], - Field { - vis: Visibility::Public, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "String", - }, - ], - }, - }, - }, - ], - }, - semi_token: Some, - }, - } - "#); - - let data = match input.data { - Data::Struct(data) => data, - _ => panic!("expected a struct"), - }; - - snapshot!(data.fields.iter().collect::<Vec<_>>(), @r#" - [ - Field { - vis: Visibility::Inherited, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "i32", - }, - ], - }, - }, - }, - Field { - vis: Visibility::Public, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "String", - }, - ], - }, - }, - }, - ] - "#); -} - -#[test] -fn test_ambiguous_crate() { - let input = quote! { - // The field type is `(crate::X)` not `crate (::X)`. - struct S(crate::X); - }; - - snapshot!(input as DeriveInput, @r#" - DeriveInput { - vis: Visibility::Inherited, - ident: "S", - generics: Generics, - data: Data::Struct { - fields: Fields::Unnamed { - unnamed: [ - Field { - vis: Visibility::Inherited, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "crate", - }, - Token![::], - PathSegment { - ident: "X", - }, - ], - }, - }, - }, - ], - }, - semi_token: Some, - }, - } - "#); -} diff --git a/vendor/syn/tests/test_expr.rs b/vendor/syn/tests/test_expr.rs deleted file mode 100644 index e21373cf96d84c..00000000000000 --- a/vendor/syn/tests/test_expr.rs +++ /dev/null @@ -1,1702 +0,0 @@ -#![cfg(not(miri))] -#![recursion_limit = "1024"] -#![feature(rustc_private)] -#![allow( - clippy::elidable_lifetime_names, - clippy::match_like_matches_macro, - clippy::needless_lifetimes, - clippy::single_element_loop, - clippy::too_many_lines, - clippy::uninlined_format_args, - clippy::unreadable_literal -)] - -#[macro_use] -mod macros; -#[macro_use] -mod snapshot; - -mod common; -mod debug; - -use crate::common::visit::{AsIfPrinted, FlattenParens}; -use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream}; -use quote::{quote, ToTokens as _}; -use std::process::ExitCode; -use syn::punctuated::Punctuated; -use syn::visit_mut::VisitMut as _; -use syn::{ - parse_quote, token, AngleBracketedGenericArguments, Arm, BinOp, Block, Expr, ExprArray, - ExprAssign, ExprAsync, ExprAwait, ExprBinary, ExprBlock, ExprBreak, ExprCall, ExprCast, - ExprClosure, ExprConst, ExprContinue, ExprField, ExprForLoop, ExprIf, ExprIndex, ExprLet, - ExprLit, ExprLoop, ExprMacro, ExprMatch, ExprMethodCall, ExprPath, ExprRange, ExprRawAddr, - ExprReference, ExprReturn, ExprStruct, ExprTry, ExprTryBlock, ExprTuple, ExprUnary, ExprUnsafe, - ExprWhile, ExprYield, GenericArgument, Label, Lifetime, Lit, LitInt, Macro, MacroDelimiter, - Member, Pat, PatWild, Path, PathArguments, PathSegment, PointerMutability, QSelf, RangeLimits, - ReturnType, Stmt, Token, Type, TypePath, UnOp, -}; - -#[test] -fn test_expr_parse() { - let tokens = quote!(..100u32); - snapshot!(tokens as Expr, @r#" - Expr::Range { - limits: RangeLimits::HalfOpen, - end: Some(Expr::Lit { - lit: 100u32, - }), - } - "#); - - let tokens = quote!(..100u32); - snapshot!(tokens as ExprRange, @r#" - ExprRange { - limits: RangeLimits::HalfOpen, - end: Some(Expr::Lit { - lit: 100u32, - }), - } - "#); -} - -#[test] -fn test_await() { - // Must not parse as Expr::Field. - let tokens = quote!(fut.await); - - snapshot!(tokens as Expr, @r#" - Expr::Await { - base: Expr::Path { - path: Path { - segments: [ - PathSegment { - ident: "fut", - }, - ], - }, - }, - } - "#); -} - -#[rustfmt::skip] -#[test] -fn test_tuple_multi_index() { - let expected = snapshot!("tuple.0.0" as Expr, @r#" - Expr::Field { - base: Expr::Field { - base: Expr::Path { - path: Path { - segments: [ - PathSegment { - ident: "tuple", - }, - ], - }, - }, - member: Member::Unnamed(Index { - index: 0, - }), - }, - member: Member::Unnamed(Index { - index: 0, - }), - } - "#); - - for &input in &[ - "tuple .0.0", - "tuple. 0.0", - "tuple.0 .0", - "tuple.0. 0", - "tuple . 0 . 0", - ] { - assert_eq!(expected, syn::parse_str(input).unwrap()); - } - - for tokens in [ - quote!(tuple.0.0), - quote!(tuple .0.0), - quote!(tuple. 0.0), - quote!(tuple.0 .0), - quote!(tuple.0. 0), - quote!(tuple . 0 . 0), - ] { - assert_eq!(expected, syn::parse2(tokens).unwrap()); - } -} - -#[test] -fn test_macro_variable_func() { - // mimics the token stream corresponding to `$fn()` - let path = Group::new(Delimiter::None, quote!(f)); - let tokens = quote!(#path()); - - snapshot!(tokens as Expr, @r#" - Expr::Call { - func: Expr::Group { - expr: Expr::Path { - path: Path { - segments: [ - PathSegment { - ident: "f", - }, - ], - }, - }, - }, - } - "#); - - let path = Group::new(Delimiter::None, quote! { #[inside] f }); - let tokens = quote!(#[outside] #path()); - - snapshot!(tokens as Expr, @r#" - Expr::Call { - attrs: [ - Attribute { - style: AttrStyle::Outer, - meta: Meta::Path { - segments: [ - PathSegment { - ident: "outside", - }, - ], - }, - }, - ], - func: Expr::Group { - expr: Expr::Path { - attrs: [ - Attribute { - style: AttrStyle::Outer, - meta: Meta::Path { - segments: [ - PathSegment { - ident: "inside", - }, - ], - }, - }, - ], - path: Path { - segments: [ - PathSegment { - ident: "f", - }, - ], - }, - }, - }, - } - "#); -} - -#[test] -fn test_macro_variable_macro() { - // mimics the token stream corresponding to `$macro!()` - let mac = Group::new(Delimiter::None, quote!(m)); - let tokens = quote!(#mac!()); - - snapshot!(tokens as Expr, @r#" - Expr::Macro { - mac: Macro { - path: Path { - segments: [ - PathSegment { - ident: "m", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(``), - }, - } - "#); -} - -#[test] -fn test_macro_variable_struct() { - // mimics the token stream corresponding to `$struct {}` - let s = Group::new(Delimiter::None, quote! { S }); - let tokens = quote!(#s {}); - - snapshot!(tokens as Expr, @r#" - Expr::Struct { - path: Path { - segments: [ - PathSegment { - ident: "S", - }, - ], - }, - } - "#); -} - -#[test] -fn test_macro_variable_unary() { - // mimics the token stream corresponding to `$expr.method()` where expr is `&self` - let inner = Group::new(Delimiter::None, quote!(&self)); - let tokens = quote!(#inner.method()); - snapshot!(tokens as Expr, @r#" - Expr::MethodCall { - receiver: Expr::Group { - expr: Expr::Reference { - expr: Expr::Path { - path: Path { - segments: [ - PathSegment { - ident: "self", - }, - ], - }, - }, - }, - }, - method: "method", - } - "#); -} - -#[test] -fn test_macro_variable_match_arm() { - // mimics the token stream corresponding to `match v { _ => $expr }` - let expr = Group::new(Delimiter::None, quote! { #[a] () }); - let tokens = quote!(match v { _ => #expr }); - snapshot!(tokens as Expr, @r#" - Expr::Match { - expr: Expr::Path { - path: Path { - segments: [ - PathSegment { - ident: "v", - }, - ], - }, - }, - arms: [ - Arm { - pat: Pat::Wild, - body: Expr::Group { - expr: Expr::Tuple { - attrs: [ - Attribute { - style: AttrStyle::Outer, - meta: Meta::Path { - segments: [ - PathSegment { - ident: "a", - }, - ], - }, - }, - ], - }, - }, - }, - ], - } - "#); - - let expr = Group::new(Delimiter::None, quote!(loop {} + 1)); - let tokens = quote!(match v { _ => #expr }); - snapshot!(tokens as Expr, @r#" - Expr::Match { - expr: Expr::Path { - path: Path { - segments: [ - PathSegment { - ident: "v", - }, - ], - }, - }, - arms: [ - Arm { - pat: Pat::Wild, - body: Expr::Group { - expr: Expr::Binary { - left: Expr::Loop { - body: Block { - stmts: [], - }, - }, - op: BinOp::Add, - right: Expr::Lit { - lit: 1, - }, - }, - }, - }, - ], - } - "#); -} - -// https://github.com/dtolnay/syn/issues/1019 -#[test] -fn test_closure_vs_rangefull() { - #[rustfmt::skip] // rustfmt bug: https://github.com/rust-lang/rustfmt/issues/4808 - let tokens = quote!(|| .. .method()); - snapshot!(tokens as Expr, @r#" - Expr::MethodCall { - receiver: Expr::Closure { - output: ReturnType::Default, - body: Expr::Range { - limits: RangeLimits::HalfOpen, - }, - }, - method: "method", - } - "#); -} - -#[test] -fn test_postfix_operator_after_cast() { - syn::parse_str::<Expr>("|| &x as T[0]").unwrap_err(); - syn::parse_str::<Expr>("|| () as ()()").unwrap_err(); -} - -#[test] -fn test_range_kinds() { - syn::parse_str::<Expr>("..").unwrap(); - syn::parse_str::<Expr>("..hi").unwrap(); - syn::parse_str::<Expr>("lo..").unwrap(); - syn::parse_str::<Expr>("lo..hi").unwrap(); - - syn::parse_str::<Expr>("..=").unwrap_err(); - syn::parse_str::<Expr>("..=hi").unwrap(); - syn::parse_str::<Expr>("lo..=").unwrap_err(); - syn::parse_str::<Expr>("lo..=hi").unwrap(); - - syn::parse_str::<Expr>("...").unwrap_err(); - syn::parse_str::<Expr>("...hi").unwrap_err(); - syn::parse_str::<Expr>("lo...").unwrap_err(); - syn::parse_str::<Expr>("lo...hi").unwrap_err(); -} - -#[test] -fn test_range_precedence() { - snapshot!(".. .." as Expr, @r#" - Expr::Range { - limits: RangeLimits::HalfOpen, - end: Some(Expr::Range { - limits: RangeLimits::HalfOpen, - }), - } - "#); - - snapshot!(".. .. ()" as Expr, @r#" - Expr::Range { - limits: RangeLimits::HalfOpen, - end: Some(Expr::Range { - limits: RangeLimits::HalfOpen, - end: Some(Expr::Tuple), - }), - } - "#); - - snapshot!("() .. .." as Expr, @r#" - Expr::Range { - start: Some(Expr::Tuple), - limits: RangeLimits::HalfOpen, - end: Some(Expr::Range { - limits: RangeLimits::HalfOpen, - }), - } - "#); - - snapshot!("() = .. + ()" as Expr, @r" - Expr::Binary { - left: Expr::Assign { - left: Expr::Tuple, - right: Expr::Range { - limits: RangeLimits::HalfOpen, - }, - }, - op: BinOp::Add, - right: Expr::Tuple, - } - "); - - // A range with a lower bound cannot be the upper bound of another range, - // and a range with an upper bound cannot be the lower bound of another - // range. - syn::parse_str::<Expr>(".. x ..").unwrap_err(); - syn::parse_str::<Expr>("x .. x ..").unwrap_err(); -} - -#[test] -fn test_range_attrs() { - // Attributes are not allowed on range expressions starting with `..` - syn::parse_str::<Expr>("#[allow()] ..").unwrap_err(); - syn::parse_str::<Expr>("#[allow()] .. hi").unwrap_err(); - - snapshot!("#[allow()] lo .. hi" as Expr, @r#" - Expr::Range { - start: Some(Expr::Path { - attrs: [ - Attribute { - style: AttrStyle::Outer, - meta: Meta::List { - path: Path { - segments: [ - PathSegment { - ident: "allow", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(``), - }, - }, - ], - path: Path { - segments: [ - PathSegment { - ident: "lo", - }, - ], - }, - }), - limits: RangeLimits::HalfOpen, - end: Some(Expr::Path { - path: Path { - segments: [ - PathSegment { - ident: "hi", - }, - ], - }, - }), - } - "#); -} - -#[test] -fn test_ranges_bailout() { - syn::parse_str::<Expr>(".. ?").unwrap_err(); - syn::parse_str::<Expr>(".. .field").unwrap_err(); - - snapshot!("return .. ?" as Expr, @r" - Expr::Try { - expr: Expr::Return { - expr: Some(Expr::Range { - limits: RangeLimits::HalfOpen, - }), - }, - } - "); - - snapshot!("break .. ?" as Expr, @r" - Expr::Try { - expr: Expr::Break { - expr: Some(Expr::Range { - limits: RangeLimits::HalfOpen, - }), - }, - } - "); - - snapshot!("|| .. ?" as Expr, @r" - Expr::Try { - expr: Expr::Closure { - output: ReturnType::Default, - body: Expr::Range { - limits: RangeLimits::HalfOpen, - }, - }, - } - "); - - snapshot!("return .. .field" as Expr, @r#" - Expr::Field { - base: Expr::Return { - expr: Some(Expr::Range { - limits: RangeLimits::HalfOpen, - }), - }, - member: Member::Named("field"), - } - "#); - - snapshot!("break .. .field" as Expr, @r#" - Expr::Field { - base: Expr::Break { - expr: Some(Expr::Range { - limits: RangeLimits::HalfOpen, - }), - }, - member: Member::Named("field"), - } - "#); - - snapshot!("|| .. .field" as Expr, @r#" - Expr::Field { - base: Expr::Closure { - output: ReturnType::Default, - body: Expr::Range { - limits: RangeLimits::HalfOpen, - }, - }, - member: Member::Named("field"), - } - "#); - - snapshot!("return .. = ()" as Expr, @r" - Expr::Assign { - left: Expr::Return { - expr: Some(Expr::Range { - limits: RangeLimits::HalfOpen, - }), - }, - right: Expr::Tuple, - } - "); - - snapshot!("return .. += ()" as Expr, @r" - Expr::Binary { - left: Expr::Return { - expr: Some(Expr::Range { - limits: RangeLimits::HalfOpen, - }), - }, - op: BinOp::AddAssign, - right: Expr::Tuple, - } - "); -} - -#[test] -fn test_ambiguous_label() { - for stmt in [ - quote! { - return 'label: loop { break 'label 42; }; - }, - quote! { - break ('label: loop { break 'label 42; }); - }, - quote! { - break 1 + 'label: loop { break 'label 42; }; - }, - quote! { - break 'outer 'inner: loop { break 'inner 42; }; - }, - ] { - syn::parse2::<Stmt>(stmt).unwrap(); - } - - for stmt in [ - // Parentheses required. See https://github.com/rust-lang/rust/pull/87026. - quote! { - break 'label: loop { break 'label 42; }; - }, - ] { - syn::parse2::<Stmt>(stmt).unwrap_err(); - } -} - -#[test] -fn test_extended_interpolated_path() { - let path = Group::new(Delimiter::None, quote!(a::b)); - - let tokens = quote!(if #path {}); - snapshot!(tokens as Expr, @r#" - Expr::If { - cond: Expr::Group { - expr: Expr::Path { - path: Path { - segments: [ - PathSegment { - ident: "a", - }, - Token![::], - PathSegment { - ident: "b", - }, - ], - }, - }, - }, - then_branch: Block { - stmts: [], - }, - } - "#); - - let tokens = quote!(#path {}); - snapshot!(tokens as Expr, @r#" - Expr::Struct { - path: Path { - segments: [ - PathSegment { - ident: "a", - }, - Token![::], - PathSegment { - ident: "b", - }, - ], - }, - } - "#); - - let tokens = quote!(#path :: c); - snapshot!(tokens as Expr, @r#" - Expr::Path { - path: Path { - segments: [ - PathSegment { - ident: "a", - }, - Token![::], - PathSegment { - ident: "b", - }, - Token![::], - PathSegment { - ident: "c", - }, - ], - }, - } - "#); - - let nested = Group::new(Delimiter::None, quote!(a::b || true)); - let tokens = quote!(if #nested && false {}); - snapshot!(tokens as Expr, @r#" - Expr::If { - cond: Expr::Binary { - left: Expr::Group { - expr: Expr::Binary { - left: Expr::Path { - path: Path { - segments: [ - PathSegment { - ident: "a", - }, - Token![::], - PathSegment { - ident: "b", - }, - ], - }, - }, - op: BinOp::Or, - right: Expr::Lit { - lit: Lit::Bool { - value: true, - }, - }, - }, - }, - op: BinOp::And, - right: Expr::Lit { - lit: Lit::Bool { - value: false, - }, - }, - }, - then_branch: Block { - stmts: [], - }, - } - "#); -} - -#[test] -fn test_tuple_comma() { - let mut expr = ExprTuple { - attrs: Vec::new(), - paren_token: token::Paren::default(), - elems: Punctuated::new(), - }; - snapshot!(expr.to_token_stream() as Expr, @"Expr::Tuple"); - - expr.elems.push_value(parse_quote!(continue)); - // Must not parse to Expr::Paren - snapshot!(expr.to_token_stream() as Expr, @r#" - Expr::Tuple { - elems: [ - Expr::Continue, - Token![,], - ], - } - "#); - - expr.elems.push_punct(<Token![,]>::default()); - snapshot!(expr.to_token_stream() as Expr, @r#" - Expr::Tuple { - elems: [ - Expr::Continue, - Token![,], - ], - } - "#); - - expr.elems.push_value(parse_quote!(continue)); - snapshot!(expr.to_token_stream() as Expr, @r#" - Expr::Tuple { - elems: [ - Expr::Continue, - Token![,], - Expr::Continue, - ], - } - "#); - - expr.elems.push_punct(<Token![,]>::default()); - snapshot!(expr.to_token_stream() as Expr, @r#" - Expr::Tuple { - elems: [ - Expr::Continue, - Token![,], - Expr::Continue, - Token![,], - ], - } - "#); -} - -#[test] -fn test_binop_associativity() { - // Left to right. - snapshot!("() + () + ()" as Expr, @r#" - Expr::Binary { - left: Expr::Binary { - left: Expr::Tuple, - op: BinOp::Add, - right: Expr::Tuple, - }, - op: BinOp::Add, - right: Expr::Tuple, - } - "#); - - // Right to left. - snapshot!("() += () += ()" as Expr, @r#" - Expr::Binary { - left: Expr::Tuple, - op: BinOp::AddAssign, - right: Expr::Binary { - left: Expr::Tuple, - op: BinOp::AddAssign, - right: Expr::Tuple, - }, - } - "#); - - // Parenthesization is required. - syn::parse_str::<Expr>("() == () == ()").unwrap_err(); -} - -#[test] -fn test_assign_range_precedence() { - // Range has higher precedence as the right-hand of an assignment, but - // ambiguous precedence as the left-hand of an assignment. - snapshot!("() = () .. ()" as Expr, @r#" - Expr::Assign { - left: Expr::Tuple, - right: Expr::Range { - start: Some(Expr::Tuple), - limits: RangeLimits::HalfOpen, - end: Some(Expr::Tuple), - }, - } - "#); - - snapshot!("() += () .. ()" as Expr, @r#" - Expr::Binary { - left: Expr::Tuple, - op: BinOp::AddAssign, - right: Expr::Range { - start: Some(Expr::Tuple), - limits: RangeLimits::HalfOpen, - end: Some(Expr::Tuple), - }, - } - "#); - - syn::parse_str::<Expr>("() .. () = ()").unwrap_err(); - syn::parse_str::<Expr>("() .. () += ()").unwrap_err(); -} - -#[test] -fn test_chained_comparison() { - // https://github.com/dtolnay/syn/issues/1738 - let _ = syn::parse_str::<Expr>("a = a < a <"); - let _ = syn::parse_str::<Expr>("a = a .. a .."); - let _ = syn::parse_str::<Expr>("a = a .. a +="); - - let err = syn::parse_str::<Expr>("a < a < a").unwrap_err(); - assert_eq!("comparison operators cannot be chained", err.to_string()); - - let err = syn::parse_str::<Expr>("a .. a .. a").unwrap_err(); - assert_eq!("unexpected token", err.to_string()); - - let err = syn::parse_str::<Expr>("a .. a += a").unwrap_err(); - assert_eq!("unexpected token", err.to_string()); -} - -#[test] -fn test_fixup() { - for tokens in [ - quote! { 2 * (1 + 1) }, - quote! { 0 + (0 + 0) }, - quote! { (a = b) = c }, - quote! { (x as i32) < 0 }, - quote! { 1 + (x as i32) < 0 }, - quote! { (1 + 1).abs() }, - quote! { (lo..hi)[..] }, - quote! { (a..b)..(c..d) }, - quote! { (x > ..) > x }, - quote! { (&mut fut).await }, - quote! { &mut (x as i32) }, - quote! { -(x as i32) }, - quote! { if (S {}) == 1 {} }, - quote! { { (m! {}) - 1 } }, - quote! { match m { _ => ({}) - 1 } }, - quote! { if let _ = (a && b) && c {} }, - quote! { if let _ = (S {}) {} }, - quote! { if (S {}) == 0 && let Some(_) = x {} }, - quote! { break ('a: loop { break 'a 1 } + 1) }, - quote! { a + (|| b) + c }, - quote! { if let _ = ((break) - 1 || true) {} }, - quote! { if let _ = (break + 1 || true) {} }, - quote! { if break (break) {} }, - quote! { if break break {} {} }, - quote! { if return (..) {} }, - quote! { if return .. {} {} }, - quote! { if || (Struct {}) {} }, - quote! { if || (Struct {}).await {} }, - quote! { if break || Struct {}.await {} }, - quote! { if break 'outer 'block: {} {} }, - quote! { if ..'block: {} {} }, - quote! { if break ({}).await {} }, - quote! { (break)() }, - quote! { (..) = () }, - quote! { (..) += () }, - quote! { (1 < 2) == (3 < 4) }, - quote! { { (let _ = ()) } }, - quote! { (#[attr] thing).field }, - quote! { #[attr] (1 + 1) }, - quote! { #[attr] (x = 1) }, - quote! { #[attr] (x += 1) }, - quote! { #[attr] (1 as T) }, - quote! { (return #[attr] (x + ..)).field }, - quote! { (self.f)() }, - quote! { (return)..=return }, - quote! { 1 + (return)..=1 + return }, - quote! { .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. }, - ] { - let original: Expr = syn::parse2(tokens).unwrap(); - - let mut flat = original.clone(); - FlattenParens::combine_attrs().visit_expr_mut(&mut flat); - let reconstructed: Expr = match syn::parse2(flat.to_token_stream()) { - Ok(reconstructed) => reconstructed, - Err(err) => panic!("failed to parse `{}`: {}", flat.to_token_stream(), err), - }; - - assert!( - original == reconstructed, - "original: {}\n{:#?}\nreconstructed: {}\n{:#?}", - original.to_token_stream(), - crate::debug::Lite(&original), - reconstructed.to_token_stream(), - crate::debug::Lite(&reconstructed), - ); - } -} - -#[test] -fn test_permutations() -> ExitCode { - fn iter(depth: usize, f: &mut dyn FnMut(Expr)) { - let span = Span::call_site(); - - // Expr::Path - f(Expr::Path(ExprPath { - // `x` - attrs: Vec::new(), - qself: None, - path: Path::from(Ident::new("x", span)), - })); - if false { - f(Expr::Path(ExprPath { - // `x::<T>` - attrs: Vec::new(), - qself: None, - path: Path { - leading_colon: None, - segments: Punctuated::from_iter([PathSegment { - ident: Ident::new("x", span), - arguments: PathArguments::AngleBracketed(AngleBracketedGenericArguments { - colon2_token: Some(Token![::](span)), - lt_token: Token![<](span), - args: Punctuated::from_iter([GenericArgument::Type(Type::Path( - TypePath { - qself: None, - path: Path::from(Ident::new("T", span)), - }, - ))]), - gt_token: Token![>](span), - }), - }]), - }, - })); - f(Expr::Path(ExprPath { - // `<T as Trait>::CONST` - attrs: Vec::new(), - qself: Some(QSelf { - lt_token: Token![<](span), - ty: Box::new(Type::Path(TypePath { - qself: None, - path: Path::from(Ident::new("T", span)), - })), - position: 1, - as_token: Some(Token![as](span)), - gt_token: Token![>](span), - }), - path: Path { - leading_colon: None, - segments: Punctuated::from_iter([ - PathSegment::from(Ident::new("Trait", span)), - PathSegment::from(Ident::new("CONST", span)), - ]), - }, - })); - } - - let Some(depth) = depth.checked_sub(1) else { - return; - }; - - // Expr::Assign - iter(depth, &mut |expr| { - iter(0, &mut |simple| { - f(Expr::Assign(ExprAssign { - // `x = $expr` - attrs: Vec::new(), - left: Box::new(simple.clone()), - eq_token: Token![=](span), - right: Box::new(expr.clone()), - })); - f(Expr::Assign(ExprAssign { - // `$expr = x` - attrs: Vec::new(), - left: Box::new(expr.clone()), - eq_token: Token![=](span), - right: Box::new(simple), - })); - }); - }); - - // Expr::Binary - iter(depth, &mut |expr| { - iter(0, &mut |simple| { - for op in [ - BinOp::Add(Token![+](span)), - //BinOp::Sub(Token![-](span)), - //BinOp::Mul(Token![*](span)), - //BinOp::Div(Token![/](span)), - //BinOp::Rem(Token![%](span)), - //BinOp::And(Token![&&](span)), - //BinOp::Or(Token![||](span)), - //BinOp::BitXor(Token![^](span)), - //BinOp::BitAnd(Token![&](span)), - //BinOp::BitOr(Token![|](span)), - //BinOp::Shl(Token![<<](span)), - //BinOp::Shr(Token![>>](span)), - //BinOp::Eq(Token![==](span)), - BinOp::Lt(Token![<](span)), - //BinOp::Le(Token![<=](span)), - //BinOp::Ne(Token![!=](span)), - //BinOp::Ge(Token![>=](span)), - //BinOp::Gt(Token![>](span)), - BinOp::ShlAssign(Token![<<=](span)), - ] { - f(Expr::Binary(ExprBinary { - // `x + $expr` - attrs: Vec::new(), - left: Box::new(simple.clone()), - op, - right: Box::new(expr.clone()), - })); - f(Expr::Binary(ExprBinary { - // `$expr + x` - attrs: Vec::new(), - left: Box::new(expr.clone()), - op, - right: Box::new(simple.clone()), - })); - } - }); - }); - - // Expr::Block - f(Expr::Block(ExprBlock { - // `{}` - attrs: Vec::new(), - label: None, - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - - // Expr::Break - f(Expr::Break(ExprBreak { - // `break` - attrs: Vec::new(), - break_token: Token![break](span), - label: None, - expr: None, - })); - iter(depth, &mut |expr| { - f(Expr::Break(ExprBreak { - // `break $expr` - attrs: Vec::new(), - break_token: Token![break](span), - label: None, - expr: Some(Box::new(expr)), - })); - }); - - // Expr::Call - iter(depth, &mut |expr| { - f(Expr::Call(ExprCall { - // `$expr()` - attrs: Vec::new(), - func: Box::new(expr), - paren_token: token::Paren(span), - args: Punctuated::new(), - })); - }); - - // Expr::Cast - iter(depth, &mut |expr| { - f(Expr::Cast(ExprCast { - // `$expr as T` - attrs: Vec::new(), - expr: Box::new(expr), - as_token: Token![as](span), - ty: Box::new(Type::Path(TypePath { - qself: None, - path: Path::from(Ident::new("T", span)), - })), - })); - }); - - // Expr::Closure - iter(depth, &mut |expr| { - f(Expr::Closure(ExprClosure { - // `|| $expr` - attrs: Vec::new(), - lifetimes: None, - constness: None, - movability: None, - asyncness: None, - capture: None, - or1_token: Token![|](span), - inputs: Punctuated::new(), - or2_token: Token![|](span), - output: ReturnType::Default, - body: Box::new(expr), - })); - }); - - // Expr::Field - iter(depth, &mut |expr| { - f(Expr::Field(ExprField { - // `$expr.field` - attrs: Vec::new(), - base: Box::new(expr), - dot_token: Token![.](span), - member: Member::Named(Ident::new("field", span)), - })); - }); - - // Expr::If - iter(depth, &mut |expr| { - f(Expr::If(ExprIf { - // `if $expr {}` - attrs: Vec::new(), - if_token: Token![if](span), - cond: Box::new(expr), - then_branch: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - else_branch: None, - })); - }); - - // Expr::Let - iter(depth, &mut |expr| { - f(Expr::Let(ExprLet { - attrs: Vec::new(), - let_token: Token![let](span), - pat: Box::new(Pat::Wild(PatWild { - attrs: Vec::new(), - underscore_token: Token![_](span), - })), - eq_token: Token![=](span), - expr: Box::new(expr), - })); - }); - - // Expr::Range - f(Expr::Range(ExprRange { - // `..` - attrs: Vec::new(), - start: None, - limits: RangeLimits::HalfOpen(Token![..](span)), - end: None, - })); - iter(depth, &mut |expr| { - f(Expr::Range(ExprRange { - // `..$expr` - attrs: Vec::new(), - start: None, - limits: RangeLimits::HalfOpen(Token![..](span)), - end: Some(Box::new(expr.clone())), - })); - f(Expr::Range(ExprRange { - // `$expr..` - attrs: Vec::new(), - start: Some(Box::new(expr)), - limits: RangeLimits::HalfOpen(Token![..](span)), - end: None, - })); - }); - - // Expr::Reference - iter(depth, &mut |expr| { - f(Expr::Reference(ExprReference { - // `&$expr` - attrs: Vec::new(), - and_token: Token![&](span), - mutability: None, - expr: Box::new(expr), - })); - }); - - // Expr::Return - f(Expr::Return(ExprReturn { - // `return` - attrs: Vec::new(), - return_token: Token![return](span), - expr: None, - })); - iter(depth, &mut |expr| { - f(Expr::Return(ExprReturn { - // `return $expr` - attrs: Vec::new(), - return_token: Token![return](span), - expr: Some(Box::new(expr)), - })); - }); - - // Expr::Try - iter(depth, &mut |expr| { - f(Expr::Try(ExprTry { - // `$expr?` - attrs: Vec::new(), - expr: Box::new(expr), - question_token: Token![?](span), - })); - }); - - // Expr::Unary - iter(depth, &mut |expr| { - for op in [ - UnOp::Deref(Token![*](span)), - //UnOp::Not(Token![!](span)), - //UnOp::Neg(Token![-](span)), - ] { - f(Expr::Unary(ExprUnary { - // `*$expr` - attrs: Vec::new(), - op, - expr: Box::new(expr.clone()), - })); - } - }); - - if false { - // Expr::Array - f(Expr::Array(ExprArray { - // `[]` - attrs: Vec::new(), - bracket_token: token::Bracket(span), - elems: Punctuated::new(), - })); - - // Expr::Async - f(Expr::Async(ExprAsync { - // `async {}` - attrs: Vec::new(), - async_token: Token![async](span), - capture: None, - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - - // Expr::Await - iter(depth, &mut |expr| { - f(Expr::Await(ExprAwait { - // `$expr.await` - attrs: Vec::new(), - base: Box::new(expr), - dot_token: Token![.](span), - await_token: Token![await](span), - })); - }); - - // Expr::Block - f(Expr::Block(ExprBlock { - // `'a: {}` - attrs: Vec::new(), - label: Some(Label { - name: Lifetime::new("'a", span), - colon_token: Token![:](span), - }), - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - iter(depth, &mut |expr| { - f(Expr::Block(ExprBlock { - // `{ $expr }` - attrs: Vec::new(), - label: None, - block: Block { - brace_token: token::Brace(span), - stmts: Vec::from([Stmt::Expr(expr.clone(), None)]), - }, - })); - f(Expr::Block(ExprBlock { - // `{ $expr; }` - attrs: Vec::new(), - label: None, - block: Block { - brace_token: token::Brace(span), - stmts: Vec::from([Stmt::Expr(expr, Some(Token![;](span)))]), - }, - })); - }); - - // Expr::Break - f(Expr::Break(ExprBreak { - // `break 'a` - attrs: Vec::new(), - break_token: Token![break](span), - label: Some(Lifetime::new("'a", span)), - expr: None, - })); - iter(depth, &mut |expr| { - f(Expr::Break(ExprBreak { - // `break 'a $expr` - attrs: Vec::new(), - break_token: Token![break](span), - label: Some(Lifetime::new("'a", span)), - expr: Some(Box::new(expr)), - })); - }); - - // Expr::Closure - f(Expr::Closure(ExprClosure { - // `|| -> T {}` - attrs: Vec::new(), - lifetimes: None, - constness: None, - movability: None, - asyncness: None, - capture: None, - or1_token: Token![|](span), - inputs: Punctuated::new(), - or2_token: Token![|](span), - output: ReturnType::Type( - Token![->](span), - Box::new(Type::Path(TypePath { - qself: None, - path: Path::from(Ident::new("T", span)), - })), - ), - body: Box::new(Expr::Block(ExprBlock { - attrs: Vec::new(), - label: None, - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })), - })); - - // Expr::Const - f(Expr::Const(ExprConst { - // `const {}` - attrs: Vec::new(), - const_token: Token![const](span), - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - - // Expr::Continue - f(Expr::Continue(ExprContinue { - // `continue` - attrs: Vec::new(), - continue_token: Token![continue](span), - label: None, - })); - f(Expr::Continue(ExprContinue { - // `continue 'a` - attrs: Vec::new(), - continue_token: Token![continue](span), - label: Some(Lifetime::new("'a", span)), - })); - - // Expr::ForLoop - iter(depth, &mut |expr| { - f(Expr::ForLoop(ExprForLoop { - // `for _ in $expr {}` - attrs: Vec::new(), - label: None, - for_token: Token![for](span), - pat: Box::new(Pat::Wild(PatWild { - attrs: Vec::new(), - underscore_token: Token![_](span), - })), - in_token: Token![in](span), - expr: Box::new(expr.clone()), - body: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - f(Expr::ForLoop(ExprForLoop { - // `'a: for _ in $expr {}` - attrs: Vec::new(), - label: Some(Label { - name: Lifetime::new("'a", span), - colon_token: Token![:](span), - }), - for_token: Token![for](span), - pat: Box::new(Pat::Wild(PatWild { - attrs: Vec::new(), - underscore_token: Token![_](span), - })), - in_token: Token![in](span), - expr: Box::new(expr), - body: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - }); - - // Expr::Index - iter(depth, &mut |expr| { - f(Expr::Index(ExprIndex { - // `$expr[0]` - attrs: Vec::new(), - expr: Box::new(expr), - bracket_token: token::Bracket(span), - index: Box::new(Expr::Lit(ExprLit { - attrs: Vec::new(), - lit: Lit::Int(LitInt::new("0", span)), - })), - })); - }); - - // Expr::Loop - f(Expr::Loop(ExprLoop { - // `loop {}` - attrs: Vec::new(), - label: None, - loop_token: Token![loop](span), - body: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - f(Expr::Loop(ExprLoop { - // `'a: loop {}` - attrs: Vec::new(), - label: Some(Label { - name: Lifetime::new("'a", span), - colon_token: Token![:](span), - }), - loop_token: Token![loop](span), - body: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - - // Expr::Macro - f(Expr::Macro(ExprMacro { - // `m!()` - attrs: Vec::new(), - mac: Macro { - path: Path::from(Ident::new("m", span)), - bang_token: Token![!](span), - delimiter: MacroDelimiter::Paren(token::Paren(span)), - tokens: TokenStream::new(), - }, - })); - f(Expr::Macro(ExprMacro { - // `m! {}` - attrs: Vec::new(), - mac: Macro { - path: Path::from(Ident::new("m", span)), - bang_token: Token![!](span), - delimiter: MacroDelimiter::Brace(token::Brace(span)), - tokens: TokenStream::new(), - }, - })); - - // Expr::Match - iter(depth, &mut |expr| { - f(Expr::Match(ExprMatch { - // `match $expr {}` - attrs: Vec::new(), - match_token: Token![match](span), - expr: Box::new(expr.clone()), - brace_token: token::Brace(span), - arms: Vec::new(), - })); - f(Expr::Match(ExprMatch { - // `match x { _ => $expr }` - attrs: Vec::new(), - match_token: Token![match](span), - expr: Box::new(Expr::Path(ExprPath { - attrs: Vec::new(), - qself: None, - path: Path::from(Ident::new("x", span)), - })), - brace_token: token::Brace(span), - arms: Vec::from([Arm { - attrs: Vec::new(), - pat: Pat::Wild(PatWild { - attrs: Vec::new(), - underscore_token: Token![_](span), - }), - guard: None, - fat_arrow_token: Token![=>](span), - body: Box::new(expr.clone()), - comma: None, - }]), - })); - f(Expr::Match(ExprMatch { - // `match x { _ if $expr => {} }` - attrs: Vec::new(), - match_token: Token![match](span), - expr: Box::new(Expr::Path(ExprPath { - attrs: Vec::new(), - qself: None, - path: Path::from(Ident::new("x", span)), - })), - brace_token: token::Brace(span), - arms: Vec::from([Arm { - attrs: Vec::new(), - pat: Pat::Wild(PatWild { - attrs: Vec::new(), - underscore_token: Token![_](span), - }), - guard: Some((Token![if](span), Box::new(expr))), - fat_arrow_token: Token![=>](span), - body: Box::new(Expr::Block(ExprBlock { - attrs: Vec::new(), - label: None, - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })), - comma: None, - }]), - })); - }); - - // Expr::MethodCall - iter(depth, &mut |expr| { - f(Expr::MethodCall(ExprMethodCall { - // `$expr.method()` - attrs: Vec::new(), - receiver: Box::new(expr.clone()), - dot_token: Token![.](span), - method: Ident::new("method", span), - turbofish: None, - paren_token: token::Paren(span), - args: Punctuated::new(), - })); - f(Expr::MethodCall(ExprMethodCall { - // `$expr.method::<T>()` - attrs: Vec::new(), - receiver: Box::new(expr), - dot_token: Token![.](span), - method: Ident::new("method", span), - turbofish: Some(AngleBracketedGenericArguments { - colon2_token: Some(Token![::](span)), - lt_token: Token![<](span), - args: Punctuated::from_iter([GenericArgument::Type(Type::Path( - TypePath { - qself: None, - path: Path::from(Ident::new("T", span)), - }, - ))]), - gt_token: Token![>](span), - }), - paren_token: token::Paren(span), - args: Punctuated::new(), - })); - }); - - // Expr::RawAddr - iter(depth, &mut |expr| { - f(Expr::RawAddr(ExprRawAddr { - // `&raw const $expr` - attrs: Vec::new(), - and_token: Token![&](span), - raw: Token![raw](span), - mutability: PointerMutability::Const(Token![const](span)), - expr: Box::new(expr), - })); - }); - - // Expr::Struct - f(Expr::Struct(ExprStruct { - // `Struct {}` - attrs: Vec::new(), - qself: None, - path: Path::from(Ident::new("Struct", span)), - brace_token: token::Brace(span), - fields: Punctuated::new(), - dot2_token: None, - rest: None, - })); - - // Expr::TryBlock - f(Expr::TryBlock(ExprTryBlock { - // `try {}` - attrs: Vec::new(), - try_token: Token![try](span), - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - - // Expr::Unsafe - f(Expr::Unsafe(ExprUnsafe { - // `unsafe {}` - attrs: Vec::new(), - unsafe_token: Token![unsafe](span), - block: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - - // Expr::While - iter(depth, &mut |expr| { - f(Expr::While(ExprWhile { - // `while $expr {}` - attrs: Vec::new(), - label: None, - while_token: Token![while](span), - cond: Box::new(expr.clone()), - body: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - f(Expr::While(ExprWhile { - // `'a: while $expr {}` - attrs: Vec::new(), - label: Some(Label { - name: Lifetime::new("'a", span), - colon_token: Token![:](span), - }), - while_token: Token![while](span), - cond: Box::new(expr), - body: Block { - brace_token: token::Brace(span), - stmts: Vec::new(), - }, - })); - }); - - // Expr::Yield - f(Expr::Yield(ExprYield { - // `yield` - attrs: Vec::new(), - yield_token: Token![yield](span), - expr: None, - })); - iter(depth, &mut |expr| { - f(Expr::Yield(ExprYield { - // `yield $expr` - attrs: Vec::new(), - yield_token: Token![yield](span), - expr: Some(Box::new(expr)), - })); - }); - } - } - - let mut failures = 0; - macro_rules! fail { - ($($message:tt)*) => {{ - eprintln!($($message)*); - failures += 1; - return; - }}; - } - let mut assert = |mut original: Expr| { - let tokens = original.to_token_stream(); - let Ok(mut parsed) = syn::parse2::<Expr>(tokens.clone()) else { - fail!( - "failed to parse: {}\n{:#?}", - tokens, - crate::debug::Lite(&original), - ); - }; - AsIfPrinted.visit_expr_mut(&mut original); - FlattenParens::combine_attrs().visit_expr_mut(&mut parsed); - if original != parsed { - fail!( - "before: {}\n{:#?}\nafter: {}\n{:#?}", - tokens, - crate::debug::Lite(&original), - parsed.to_token_stream(), - crate::debug::Lite(&parsed), - ); - } - let mut tokens_no_paren = tokens.clone(); - FlattenParens::visit_token_stream_mut(&mut tokens_no_paren); - if tokens.to_string() != tokens_no_paren.to_string() { - if let Ok(mut parsed2) = syn::parse2::<Expr>(tokens_no_paren) { - FlattenParens::combine_attrs().visit_expr_mut(&mut parsed2); - if original == parsed2 { - fail!("redundant parens: {}", tokens); - } - } - } - }; - - iter(4, &mut assert); - if failures > 0 { - eprintln!("FAILURES: {failures}"); - ExitCode::FAILURE - } else { - ExitCode::SUCCESS - } -} diff --git a/vendor/syn/tests/test_generics.rs b/vendor/syn/tests/test_generics.rs deleted file mode 100644 index 2cb05251c16e8c..00000000000000 --- a/vendor/syn/tests/test_generics.rs +++ /dev/null @@ -1,345 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::manual_let_else, - clippy::needless_lifetimes, - clippy::too_many_lines, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use quote::quote; -use syn::{ - parse_quote, DeriveInput, GenericParam, Generics, ItemFn, Lifetime, LifetimeParam, - TypeParamBound, WhereClause, WherePredicate, -}; - -#[test] -fn test_split_for_impl() { - let input = quote! { - struct S<'a, 'b: 'a, #[may_dangle] T: 'a = ()> where T: Debug; - }; - - snapshot!(input as DeriveInput, @r#" - DeriveInput { - vis: Visibility::Inherited, - ident: "S", - generics: Generics { - lt_token: Some, - params: [ - GenericParam::Lifetime(LifetimeParam { - lifetime: Lifetime { - ident: "a", - }, - }), - Token![,], - GenericParam::Lifetime(LifetimeParam { - lifetime: Lifetime { - ident: "b", - }, - colon_token: Some, - bounds: [ - Lifetime { - ident: "a", - }, - ], - }), - Token![,], - GenericParam::Type(TypeParam { - attrs: [ - Attribute { - style: AttrStyle::Outer, - meta: Meta::Path { - segments: [ - PathSegment { - ident: "may_dangle", - }, - ], - }, - }, - ], - ident: "T", - colon_token: Some, - bounds: [ - TypeParamBound::Lifetime { - ident: "a", - }, - ], - eq_token: Some, - default: Some(Type::Tuple), - }), - ], - gt_token: Some, - where_clause: Some(WhereClause { - predicates: [ - WherePredicate::Type(PredicateType { - bounded_ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "T", - }, - ], - }, - }, - bounds: [ - TypeParamBound::Trait(TraitBound { - path: Path { - segments: [ - PathSegment { - ident: "Debug", - }, - ], - }, - }), - ], - }), - ], - }), - }, - data: Data::Struct { - fields: Fields::Unit, - semi_token: Some, - }, - } - "#); - - let generics = input.generics; - let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); - - let generated = quote! { - impl #impl_generics MyTrait for Test #ty_generics #where_clause {} - }; - let expected = quote! { - impl<'a, 'b: 'a, #[may_dangle] T: 'a> MyTrait - for Test<'a, 'b, T> - where - T: Debug - {} - }; - assert_eq!(generated.to_string(), expected.to_string()); - - let turbofish = ty_generics.as_turbofish(); - let generated = quote! { - Test #turbofish - }; - let expected = quote! { - Test::<'a, 'b, T> - }; - assert_eq!(generated.to_string(), expected.to_string()); -} - -#[test] -fn test_type_param_bound() { - let tokens = quote!('a); - snapshot!(tokens as TypeParamBound, @r#" - TypeParamBound::Lifetime { - ident: "a", - } - "#); - - let tokens = quote!('_); - snapshot!(tokens as TypeParamBound, @r#" - TypeParamBound::Lifetime { - ident: "_", - } - "#); - - let tokens = quote!(Debug); - snapshot!(tokens as TypeParamBound, @r#" - TypeParamBound::Trait(TraitBound { - path: Path { - segments: [ - PathSegment { - ident: "Debug", - }, - ], - }, - }) - "#); - - let tokens = quote!(?Sized); - snapshot!(tokens as TypeParamBound, @r#" - TypeParamBound::Trait(TraitBound { - modifier: TraitBoundModifier::Maybe, - path: Path { - segments: [ - PathSegment { - ident: "Sized", - }, - ], - }, - }) - "#); - - let tokens = quote!(for<'a> Trait); - snapshot!(tokens as TypeParamBound, @r#" - TypeParamBound::Trait(TraitBound { - lifetimes: Some(BoundLifetimes { - lifetimes: [ - GenericParam::Lifetime(LifetimeParam { - lifetime: Lifetime { - ident: "a", - }, - }), - ], - }), - path: Path { - segments: [ - PathSegment { - ident: "Trait", - }, - ], - }, - }) - "#); - - let tokens = quote!(for<> ?Trait); - let err = syn::parse2::<TypeParamBound>(tokens).unwrap_err(); - assert_eq!( - "`for<...>` binder not allowed with `?` trait polarity modifier", - err.to_string(), - ); - - let tokens = quote!(?for<> Trait); - let err = syn::parse2::<TypeParamBound>(tokens).unwrap_err(); - assert_eq!( - "`for<...>` binder not allowed with `?` trait polarity modifier", - err.to_string(), - ); -} - -#[test] -fn test_fn_precedence_in_where_clause() { - // This should parse as two separate bounds, `FnOnce() -> i32` and `Send` - not - // `FnOnce() -> (i32 + Send)`. - let input = quote! { - fn f<G>() - where - G: FnOnce() -> i32 + Send, - { - } - }; - - snapshot!(input as ItemFn, @r#" - ItemFn { - vis: Visibility::Inherited, - sig: Signature { - ident: "f", - generics: Generics { - lt_token: Some, - params: [ - GenericParam::Type(TypeParam { - ident: "G", - }), - ], - gt_token: Some, - where_clause: Some(WhereClause { - predicates: [ - WherePredicate::Type(PredicateType { - bounded_ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "G", - }, - ], - }, - }, - bounds: [ - TypeParamBound::Trait(TraitBound { - path: Path { - segments: [ - PathSegment { - ident: "FnOnce", - arguments: PathArguments::Parenthesized { - output: ReturnType::Type( - Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "i32", - }, - ], - }, - }, - ), - }, - }, - ], - }, - }), - Token![+], - TypeParamBound::Trait(TraitBound { - path: Path { - segments: [ - PathSegment { - ident: "Send", - }, - ], - }, - }), - ], - }), - Token![,], - ], - }), - }, - output: ReturnType::Default, - }, - block: Block { - stmts: [], - }, - } - "#); - - let where_clause = input.sig.generics.where_clause.as_ref().unwrap(); - assert_eq!(where_clause.predicates.len(), 1); - - let predicate = match &where_clause.predicates[0] { - WherePredicate::Type(pred) => pred, - _ => panic!("wrong predicate kind"), - }; - - assert_eq!(predicate.bounds.len(), 2, "{:#?}", predicate.bounds); - - let first_bound = &predicate.bounds[0]; - assert_eq!(quote!(#first_bound).to_string(), "FnOnce () -> i32"); - - let second_bound = &predicate.bounds[1]; - assert_eq!(quote!(#second_bound).to_string(), "Send"); -} - -#[test] -fn test_where_clause_at_end_of_input() { - let input = quote! { - where - }; - - snapshot!(input as WhereClause, @"WhereClause"); - - assert_eq!(input.predicates.len(), 0); -} - -// Regression test for https://github.com/dtolnay/syn/issues/1718 -#[test] -#[allow(clippy::map_unwrap_or)] -fn no_opaque_drop() { - let mut generics = Generics::default(); - - let _ = generics - .lifetimes() - .next() - .map(|param| param.lifetime.clone()) - .unwrap_or_else(|| { - let lifetime: Lifetime = parse_quote!('a); - generics.params.insert( - 0, - GenericParam::Lifetime(LifetimeParam::new(lifetime.clone())), - ); - lifetime - }); -} diff --git a/vendor/syn/tests/test_grouping.rs b/vendor/syn/tests/test_grouping.rs deleted file mode 100644 index b466c7e7217e09..00000000000000 --- a/vendor/syn/tests/test_grouping.rs +++ /dev/null @@ -1,59 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use proc_macro2::{Delimiter, Group, Literal, Punct, Spacing, TokenStream, TokenTree}; -use syn::Expr; - -#[test] -fn test_grouping() { - let tokens: TokenStream = TokenStream::from_iter([ - TokenTree::Literal(Literal::i32_suffixed(1)), - TokenTree::Punct(Punct::new('+', Spacing::Alone)), - TokenTree::Group(Group::new( - Delimiter::None, - TokenStream::from_iter([ - TokenTree::Literal(Literal::i32_suffixed(2)), - TokenTree::Punct(Punct::new('+', Spacing::Alone)), - TokenTree::Literal(Literal::i32_suffixed(3)), - ]), - )), - TokenTree::Punct(Punct::new('*', Spacing::Alone)), - TokenTree::Literal(Literal::i32_suffixed(4)), - ]); - - assert_eq!(tokens.to_string(), "1i32 + 2i32 + 3i32 * 4i32"); - - snapshot!(tokens as Expr, @r#" - Expr::Binary { - left: Expr::Lit { - lit: 1i32, - }, - op: BinOp::Add, - right: Expr::Binary { - left: Expr::Group { - expr: Expr::Binary { - left: Expr::Lit { - lit: 2i32, - }, - op: BinOp::Add, - right: Expr::Lit { - lit: 3i32, - }, - }, - }, - op: BinOp::Mul, - right: Expr::Lit { - lit: 4i32, - }, - }, - } - "#); -} diff --git a/vendor/syn/tests/test_ident.rs b/vendor/syn/tests/test_ident.rs deleted file mode 100644 index 10df0ad56c2ad6..00000000000000 --- a/vendor/syn/tests/test_ident.rs +++ /dev/null @@ -1,87 +0,0 @@ -use proc_macro2::{Ident, Span, TokenStream}; -use std::str::FromStr; -use syn::Result; - -#[track_caller] -fn parse(s: &str) -> Result<Ident> { - syn::parse2(TokenStream::from_str(s).unwrap()) -} - -#[track_caller] -fn new(s: &str) -> Ident { - Ident::new(s, Span::call_site()) -} - -#[test] -fn ident_parse() { - parse("String").unwrap(); -} - -#[test] -fn ident_parse_keyword() { - parse("abstract").unwrap_err(); -} - -#[test] -fn ident_parse_empty() { - parse("").unwrap_err(); -} - -#[test] -fn ident_parse_lifetime() { - parse("'static").unwrap_err(); -} - -#[test] -fn ident_parse_underscore() { - parse("_").unwrap_err(); -} - -#[test] -fn ident_parse_number() { - parse("255").unwrap_err(); -} - -#[test] -fn ident_parse_invalid() { - parse("a#").unwrap_err(); -} - -#[test] -fn ident_new() { - new("String"); -} - -#[test] -fn ident_new_keyword() { - new("abstract"); -} - -#[test] -#[should_panic(expected = "use Option<Ident>")] -fn ident_new_empty() { - new(""); -} - -#[test] -#[should_panic(expected = "not a valid Ident")] -fn ident_new_lifetime() { - new("'static"); -} - -#[test] -fn ident_new_underscore() { - new("_"); -} - -#[test] -#[should_panic(expected = "use Literal instead")] -fn ident_new_number() { - new("255"); -} - -#[test] -#[should_panic(expected = "\"a#\" is not a valid Ident")] -fn ident_new_invalid() { - new("a#"); -} diff --git a/vendor/syn/tests/test_item.rs b/vendor/syn/tests/test_item.rs deleted file mode 100644 index d9a7b5b6b08b60..00000000000000 --- a/vendor/syn/tests/test_item.rs +++ /dev/null @@ -1,316 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream, TokenTree}; -use quote::quote; -use syn::{Item, ItemTrait}; - -#[test] -fn test_macro_variable_attr() { - // mimics the token stream corresponding to `$attr fn f() {}` - let tokens = TokenStream::from_iter([ - TokenTree::Group(Group::new(Delimiter::None, quote! { #[test] })), - TokenTree::Ident(Ident::new("fn", Span::call_site())), - TokenTree::Ident(Ident::new("f", Span::call_site())), - TokenTree::Group(Group::new(Delimiter::Parenthesis, TokenStream::new())), - TokenTree::Group(Group::new(Delimiter::Brace, TokenStream::new())), - ]); - - snapshot!(tokens as Item, @r#" - Item::Fn { - attrs: [ - Attribute { - style: AttrStyle::Outer, - meta: Meta::Path { - segments: [ - PathSegment { - ident: "test", - }, - ], - }, - }, - ], - vis: Visibility::Inherited, - sig: Signature { - ident: "f", - generics: Generics, - output: ReturnType::Default, - }, - block: Block { - stmts: [], - }, - } - "#); -} - -#[test] -fn test_negative_impl() { - #[cfg(any())] - impl ! {} - let tokens = quote! { - impl ! {} - }; - snapshot!(tokens as Item, @r#" - Item::Impl { - generics: Generics, - self_ty: Type::Never, - } - "#); - - let tokens = quote! { - impl !Trait {} - }; - let err = syn::parse2::<Item>(tokens).unwrap_err(); - assert_eq!(err.to_string(), "inherent impls cannot be negative"); - - #[cfg(any())] - impl !Trait for T {} - let tokens = quote! { - impl !Trait for T {} - }; - snapshot!(tokens as Item, @r#" - Item::Impl { - generics: Generics, - trait_: Some(( - Some, - Path { - segments: [ - PathSegment { - ident: "Trait", - }, - ], - }, - )), - self_ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "T", - }, - ], - }, - }, - } - "#); -} - -#[test] -fn test_macro_variable_impl() { - // mimics the token stream corresponding to `impl $trait for $ty {}` - let tokens = TokenStream::from_iter([ - TokenTree::Ident(Ident::new("impl", Span::call_site())), - TokenTree::Group(Group::new(Delimiter::None, quote!(Trait))), - TokenTree::Ident(Ident::new("for", Span::call_site())), - TokenTree::Group(Group::new(Delimiter::None, quote!(Type))), - TokenTree::Group(Group::new(Delimiter::Brace, TokenStream::new())), - ]); - - snapshot!(tokens as Item, @r#" - Item::Impl { - generics: Generics, - trait_: Some(( - None, - Path { - segments: [ - PathSegment { - ident: "Trait", - }, - ], - }, - )), - self_ty: Type::Group { - elem: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Type", - }, - ], - }, - }, - }, - } - "#); -} - -#[test] -fn test_supertraits() { - // Rustc parses all of the following. - - #[rustfmt::skip] - let tokens = quote!(trait Trait where {}); - snapshot!(tokens as ItemTrait, @r#" - ItemTrait { - vis: Visibility::Inherited, - ident: "Trait", - generics: Generics { - where_clause: Some(WhereClause), - }, - } - "#); - - #[rustfmt::skip] - let tokens = quote!(trait Trait: where {}); - snapshot!(tokens as ItemTrait, @r#" - ItemTrait { - vis: Visibility::Inherited, - ident: "Trait", - generics: Generics { - where_clause: Some(WhereClause), - }, - colon_token: Some, - } - "#); - - #[rustfmt::skip] - let tokens = quote!(trait Trait: Sized where {}); - snapshot!(tokens as ItemTrait, @r#" - ItemTrait { - vis: Visibility::Inherited, - ident: "Trait", - generics: Generics { - where_clause: Some(WhereClause), - }, - colon_token: Some, - supertraits: [ - TypeParamBound::Trait(TraitBound { - path: Path { - segments: [ - PathSegment { - ident: "Sized", - }, - ], - }, - }), - ], - } - "#); - - #[rustfmt::skip] - let tokens = quote!(trait Trait: Sized + where {}); - snapshot!(tokens as ItemTrait, @r#" - ItemTrait { - vis: Visibility::Inherited, - ident: "Trait", - generics: Generics { - where_clause: Some(WhereClause), - }, - colon_token: Some, - supertraits: [ - TypeParamBound::Trait(TraitBound { - path: Path { - segments: [ - PathSegment { - ident: "Sized", - }, - ], - }, - }), - Token![+], - ], - } - "#); -} - -#[test] -fn test_type_empty_bounds() { - #[rustfmt::skip] - let tokens = quote! { - trait Foo { - type Bar: ; - } - }; - - snapshot!(tokens as ItemTrait, @r#" - ItemTrait { - vis: Visibility::Inherited, - ident: "Foo", - generics: Generics, - items: [ - TraitItem::Type { - ident: "Bar", - generics: Generics, - colon_token: Some, - }, - ], - } - "#); -} - -#[test] -fn test_impl_visibility() { - let tokens = quote! { - pub default unsafe impl union {} - }; - - snapshot!(tokens as Item, @"Item::Verbatim(`pub default unsafe impl union { }`)"); -} - -#[test] -fn test_impl_type_parameter_defaults() { - #[cfg(any())] - impl<T = ()> () {} - let tokens = quote! { - impl<T = ()> () {} - }; - snapshot!(tokens as Item, @r#" - Item::Impl { - generics: Generics { - lt_token: Some, - params: [ - GenericParam::Type(TypeParam { - ident: "T", - eq_token: Some, - default: Some(Type::Tuple), - }), - ], - gt_token: Some, - }, - self_ty: Type::Tuple, - } - "#); -} - -#[test] -fn test_impl_trait_trailing_plus() { - let tokens = quote! { - fn f() -> impl Sized + {} - }; - - snapshot!(tokens as Item, @r#" - Item::Fn { - vis: Visibility::Inherited, - sig: Signature { - ident: "f", - generics: Generics, - output: ReturnType::Type( - Type::ImplTrait { - bounds: [ - TypeParamBound::Trait(TraitBound { - path: Path { - segments: [ - PathSegment { - ident: "Sized", - }, - ], - }, - }), - Token![+], - ], - }, - ), - }, - block: Block { - stmts: [], - }, - } - "#); -} diff --git a/vendor/syn/tests/test_lit.rs b/vendor/syn/tests/test_lit.rs deleted file mode 100644 index f2367b44165daf..00000000000000 --- a/vendor/syn/tests/test_lit.rs +++ /dev/null @@ -1,335 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::float_cmp, - clippy::needless_lifetimes, - clippy::needless_raw_string_hashes, - clippy::non_ascii_literal, - clippy::single_match_else, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use proc_macro2::{Delimiter, Group, Literal, Span, TokenStream, TokenTree}; -use quote::ToTokens; -use std::ffi::CStr; -use std::str::FromStr; -use syn::{Lit, LitFloat, LitInt, LitStr}; - -#[track_caller] -fn lit(s: &str) -> Lit { - let mut tokens = TokenStream::from_str(s).unwrap().into_iter(); - match tokens.next().unwrap() { - TokenTree::Literal(lit) => { - assert!(tokens.next().is_none()); - Lit::new(lit) - } - wrong => panic!("{:?}", wrong), - } -} - -#[test] -fn strings() { - #[track_caller] - fn test_string(s: &str, value: &str) { - let s = s.trim(); - match lit(s) { - Lit::Str(lit) => { - assert_eq!(lit.value(), value); - let again = lit.into_token_stream().to_string(); - if again != s { - test_string(&again, value); - } - } - wrong => panic!("{:?}", wrong), - } - } - - test_string(r#" "" "#, ""); - test_string(r#" "a" "#, "a"); - test_string(r#" "\n" "#, "\n"); - test_string(r#" "\r" "#, "\r"); - test_string(r#" "\t" "#, "\t"); - test_string(r#" "🐕" "#, "🐕"); // NOTE: This is an emoji - test_string(r#" "\"" "#, "\""); - test_string(r#" "'" "#, "'"); - test_string(r#" "\u{1F415}" "#, "\u{1F415}"); - test_string(r#" "\u{1_2__3_}" "#, "\u{123}"); - test_string( - "\"contains\nnewlines\\\nescaped newlines\"", - "contains\nnewlinesescaped newlines", - ); - test_string( - "\"escaped newline\\\n \x0C unsupported whitespace\"", - "escaped newline\x0C unsupported whitespace", - ); - test_string("r\"raw\nstring\\\nhere\"", "raw\nstring\\\nhere"); - test_string("\"...\"q", "..."); - test_string("r\"...\"q", "..."); - test_string("r##\"...\"##q", "..."); -} - -#[test] -fn byte_strings() { - #[track_caller] - fn test_byte_string(s: &str, value: &[u8]) { - let s = s.trim(); - match lit(s) { - Lit::ByteStr(lit) => { - assert_eq!(lit.value(), value); - let again = lit.into_token_stream().to_string(); - if again != s { - test_byte_string(&again, value); - } - } - wrong => panic!("{:?}", wrong), - } - } - - test_byte_string(r#" b"" "#, b""); - test_byte_string(r#" b"a" "#, b"a"); - test_byte_string(r#" b"\n" "#, b"\n"); - test_byte_string(r#" b"\r" "#, b"\r"); - test_byte_string(r#" b"\t" "#, b"\t"); - test_byte_string(r#" b"\"" "#, b"\""); - test_byte_string(r#" b"'" "#, b"'"); - test_byte_string( - "b\"contains\nnewlines\\\nescaped newlines\"", - b"contains\nnewlinesescaped newlines", - ); - test_byte_string("br\"raw\nstring\\\nhere\"", b"raw\nstring\\\nhere"); - test_byte_string("b\"...\"q", b"..."); - test_byte_string("br\"...\"q", b"..."); - test_byte_string("br##\"...\"##q", b"..."); -} - -#[test] -fn c_strings() { - #[track_caller] - fn test_c_string(s: &str, value: &CStr) { - let s = s.trim(); - match lit(s) { - Lit::CStr(lit) => { - assert_eq!(*lit.value(), *value); - let again = lit.into_token_stream().to_string(); - if again != s { - test_c_string(&again, value); - } - } - wrong => panic!("{:?}", wrong), - } - } - - test_c_string(r#" c"" "#, c""); - test_c_string(r#" c"a" "#, c"a"); - test_c_string(r#" c"\n" "#, c"\n"); - test_c_string(r#" c"\r" "#, c"\r"); - test_c_string(r#" c"\t" "#, c"\t"); - test_c_string(r#" c"\\" "#, c"\\"); - test_c_string(r#" c"\'" "#, c"'"); - test_c_string(r#" c"\"" "#, c"\""); - test_c_string( - "c\"contains\nnewlines\\\nescaped newlines\"", - c"contains\nnewlinesescaped newlines", - ); - test_c_string("cr\"raw\nstring\\\nhere\"", c"raw\nstring\\\nhere"); - test_c_string("c\"...\"q", c"..."); - test_c_string("cr\"...\"", c"..."); - test_c_string("cr##\"...\"##", c"..."); - test_c_string( - r#" c"hello\x80我叫\u{1F980}" "#, // from the RFC - c"hello\x80我叫\u{1F980}", - ); -} - -#[test] -fn bytes() { - #[track_caller] - fn test_byte(s: &str, value: u8) { - let s = s.trim(); - match lit(s) { - Lit::Byte(lit) => { - assert_eq!(lit.value(), value); - let again = lit.into_token_stream().to_string(); - assert_eq!(again, s); - } - wrong => panic!("{:?}", wrong), - } - } - - test_byte(r#" b'a' "#, b'a'); - test_byte(r#" b'\n' "#, b'\n'); - test_byte(r#" b'\r' "#, b'\r'); - test_byte(r#" b'\t' "#, b'\t'); - test_byte(r#" b'\'' "#, b'\''); - test_byte(r#" b'"' "#, b'"'); - test_byte(r#" b'a'q "#, b'a'); -} - -#[test] -fn chars() { - #[track_caller] - fn test_char(s: &str, value: char) { - let s = s.trim(); - match lit(s) { - Lit::Char(lit) => { - assert_eq!(lit.value(), value); - let again = lit.into_token_stream().to_string(); - if again != s { - test_char(&again, value); - } - } - wrong => panic!("{:?}", wrong), - } - } - - test_char(r#" 'a' "#, 'a'); - test_char(r#" '\n' "#, '\n'); - test_char(r#" '\r' "#, '\r'); - test_char(r#" '\t' "#, '\t'); - test_char(r#" '🐕' "#, '🐕'); // NOTE: This is an emoji - test_char(r#" '\'' "#, '\''); - test_char(r#" '"' "#, '"'); - test_char(r#" '\u{1F415}' "#, '\u{1F415}'); - test_char(r#" 'a'q "#, 'a'); -} - -#[test] -fn ints() { - #[track_caller] - fn test_int(s: &str, value: u64, suffix: &str) { - match lit(s) { - Lit::Int(lit) => { - assert_eq!(lit.base10_digits().parse::<u64>().unwrap(), value); - assert_eq!(lit.suffix(), suffix); - let again = lit.into_token_stream().to_string(); - if again != s { - test_int(&again, value, suffix); - } - } - wrong => panic!("{:?}", wrong), - } - } - - test_int("5", 5, ""); - test_int("5u32", 5, "u32"); - test_int("0E", 0, "E"); - test_int("0ECMA", 0, "ECMA"); - test_int("0o0A", 0, "A"); - test_int("5_0", 50, ""); - test_int("5_____0_____", 50, ""); - test_int("0x7f", 127, ""); - test_int("0x7F", 127, ""); - test_int("0b1001", 9, ""); - test_int("0o73", 59, ""); - test_int("0x7Fu8", 127, "u8"); - test_int("0b1001i8", 9, "i8"); - test_int("0o73u32", 59, "u32"); - test_int("0x__7___f_", 127, ""); - test_int("0x__7___F_", 127, ""); - test_int("0b_1_0__01", 9, ""); - test_int("0o_7__3", 59, ""); - test_int("0x_7F__u8", 127, "u8"); - test_int("0b__10__0_1i8", 9, "i8"); - test_int("0o__7__________________3u32", 59, "u32"); - test_int("0e1\u{5c5}", 0, "e1\u{5c5}"); -} - -#[test] -fn floats() { - #[track_caller] - fn test_float(s: &str, value: f64, suffix: &str) { - match lit(s) { - Lit::Float(lit) => { - assert_eq!(lit.base10_digits().parse::<f64>().unwrap(), value); - assert_eq!(lit.suffix(), suffix); - let again = lit.into_token_stream().to_string(); - if again != s { - test_float(&again, value, suffix); - } - } - wrong => panic!("{:?}", wrong), - } - } - - test_float("5.5", 5.5, ""); - test_float("5.5E12", 5.5e12, ""); - test_float("5.5e12", 5.5e12, ""); - test_float("1.0__3e-12", 1.03e-12, ""); - test_float("1.03e+12", 1.03e12, ""); - test_float("9e99e99", 9e99, "e99"); - test_float("1e_0", 1.0, ""); - test_float("0.0ECMA", 0.0, "ECMA"); -} - -#[test] -fn negative() { - let span = Span::call_site(); - assert_eq!("-1", LitInt::new("-1", span).to_string()); - assert_eq!("-1i8", LitInt::new("-1i8", span).to_string()); - assert_eq!("-1i16", LitInt::new("-1i16", span).to_string()); - assert_eq!("-1i32", LitInt::new("-1i32", span).to_string()); - assert_eq!("-1i64", LitInt::new("-1i64", span).to_string()); - assert_eq!("-1.5", LitFloat::new("-1.5", span).to_string()); - assert_eq!("-1.5f32", LitFloat::new("-1.5f32", span).to_string()); - assert_eq!("-1.5f64", LitFloat::new("-1.5f64", span).to_string()); -} - -#[test] -fn suffix() { - #[track_caller] - fn get_suffix(token: &str) -> String { - let lit = syn::parse_str::<Lit>(token).unwrap(); - match lit { - Lit::Str(lit) => lit.suffix().to_owned(), - Lit::ByteStr(lit) => lit.suffix().to_owned(), - Lit::CStr(lit) => lit.suffix().to_owned(), - Lit::Byte(lit) => lit.suffix().to_owned(), - Lit::Char(lit) => lit.suffix().to_owned(), - Lit::Int(lit) => lit.suffix().to_owned(), - Lit::Float(lit) => lit.suffix().to_owned(), - _ => unimplemented!(), - } - } - - assert_eq!(get_suffix("\"\"s"), "s"); - assert_eq!(get_suffix("r\"\"r"), "r"); - assert_eq!(get_suffix("r#\"\"#r"), "r"); - assert_eq!(get_suffix("b\"\"b"), "b"); - assert_eq!(get_suffix("br\"\"br"), "br"); - assert_eq!(get_suffix("br#\"\"#br"), "br"); - assert_eq!(get_suffix("c\"\"c"), "c"); - assert_eq!(get_suffix("cr\"\"cr"), "cr"); - assert_eq!(get_suffix("cr#\"\"#cr"), "cr"); - assert_eq!(get_suffix("'c'c"), "c"); - assert_eq!(get_suffix("b'b'b"), "b"); - assert_eq!(get_suffix("1i32"), "i32"); - assert_eq!(get_suffix("1_i32"), "i32"); - assert_eq!(get_suffix("1.0f32"), "f32"); - assert_eq!(get_suffix("1.0_f32"), "f32"); -} - -#[test] -fn test_deep_group_empty() { - let tokens = TokenStream::from_iter([TokenTree::Group(Group::new( - Delimiter::None, - TokenStream::from_iter([TokenTree::Group(Group::new( - Delimiter::None, - TokenStream::from_iter([TokenTree::Literal(Literal::string("hi"))]), - ))]), - ))]); - - snapshot!(tokens as Lit, @r#""hi""# ); -} - -#[test] -fn test_error() { - let err = syn::parse_str::<LitStr>("...").unwrap_err(); - assert_eq!("expected string literal", err.to_string()); - - let err = syn::parse_str::<LitStr>("5").unwrap_err(); - assert_eq!("expected string literal", err.to_string()); -} diff --git a/vendor/syn/tests/test_meta.rs b/vendor/syn/tests/test_meta.rs deleted file mode 100644 index 4e1f9caf38e0c1..00000000000000 --- a/vendor/syn/tests/test_meta.rs +++ /dev/null @@ -1,180 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::needless_lifetimes, - clippy::shadow_unrelated, - clippy::too_many_lines, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use quote::quote; -use syn::parse::{ParseStream, Parser as _, Result}; -use syn::{Meta, MetaList, MetaNameValue, Token}; - -#[test] -fn test_parse_meta_item_word() { - let input = "hello"; - - snapshot!(input as Meta, @r#" - Meta::Path { - segments: [ - PathSegment { - ident: "hello", - }, - ], - } - "#); -} - -#[test] -fn test_parse_meta_name_value() { - let input = "foo = 5"; - let (inner, meta) = (input, input); - - snapshot!(inner as MetaNameValue, @r#" - MetaNameValue { - path: Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - }, - value: Expr::Lit { - lit: 5, - }, - } - "#); - - snapshot!(meta as Meta, @r#" - Meta::NameValue { - path: Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - }, - value: Expr::Lit { - lit: 5, - }, - } - "#); - - assert_eq!(meta, Meta::NameValue(inner)); -} - -#[test] -fn test_parse_meta_item_list_lit() { - let input = "foo(5)"; - let (inner, meta) = (input, input); - - snapshot!(inner as MetaList, @r#" - MetaList { - path: Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(`5`), - } - "#); - - snapshot!(meta as Meta, @r#" - Meta::List { - path: Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(`5`), - } - "#); - - assert_eq!(meta, Meta::List(inner)); -} - -#[test] -fn test_parse_meta_item_multiple() { - let input = "foo(word, name = 5, list(name2 = 6), word2)"; - let (inner, meta) = (input, input); - - snapshot!(inner as MetaList, @r#" - MetaList { - path: Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(`word , name = 5 , list (name2 = 6) , word2`), - } - "#); - - snapshot!(meta as Meta, @r#" - Meta::List { - path: Path { - segments: [ - PathSegment { - ident: "foo", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(`word , name = 5 , list (name2 = 6) , word2`), - } - "#); - - assert_eq!(meta, Meta::List(inner)); -} - -#[test] -fn test_parse_path() { - let input = "::serde::Serialize"; - snapshot!(input as Meta, @r#" - Meta::Path { - leading_colon: Some, - segments: [ - PathSegment { - ident: "serde", - }, - Token![::], - PathSegment { - ident: "Serialize", - }, - ], - } - "#); -} - -#[test] -fn test_fat_arrow_after_meta() { - fn parse(input: ParseStream) -> Result<()> { - while !input.is_empty() { - let _: Meta = input.parse()?; - let _: Token![=>] = input.parse()?; - let brace; - syn::braced!(brace in input); - } - Ok(()) - } - - let input = quote! { - target_os = "linux" => {} - windows => {} - }; - - parse.parse2(input).unwrap(); -} diff --git a/vendor/syn/tests/test_parse_buffer.rs b/vendor/syn/tests/test_parse_buffer.rs deleted file mode 100644 index 62abc6d2825407..00000000000000 --- a/vendor/syn/tests/test_parse_buffer.rs +++ /dev/null @@ -1,103 +0,0 @@ -#![allow(clippy::non_ascii_literal)] - -use proc_macro2::{Delimiter, Group, Ident, Punct, Spacing, TokenStream, TokenTree}; -use std::panic; -use syn::parse::discouraged::Speculative as _; -use syn::parse::{Parse, ParseStream, Parser, Result}; -use syn::{parenthesized, Token}; - -#[test] -#[should_panic(expected = "fork was not derived from the advancing parse stream")] -fn smuggled_speculative_cursor_between_sources() { - struct BreakRules; - impl Parse for BreakRules { - fn parse(input1: ParseStream) -> Result<Self> { - let nested = |input2: ParseStream| { - input1.advance_to(input2); - Ok(Self) - }; - nested.parse_str("") - } - } - - syn::parse_str::<BreakRules>("").unwrap(); -} - -#[test] -#[should_panic(expected = "fork was not derived from the advancing parse stream")] -fn smuggled_speculative_cursor_between_brackets() { - struct BreakRules; - impl Parse for BreakRules { - fn parse(input: ParseStream) -> Result<Self> { - let a; - let b; - parenthesized!(a in input); - parenthesized!(b in input); - a.advance_to(&b); - Ok(Self) - } - } - - syn::parse_str::<BreakRules>("()()").unwrap(); -} - -#[test] -#[should_panic(expected = "fork was not derived from the advancing parse stream")] -fn smuggled_speculative_cursor_into_brackets() { - struct BreakRules; - impl Parse for BreakRules { - fn parse(input: ParseStream) -> Result<Self> { - let a; - parenthesized!(a in input); - input.advance_to(&a); - Ok(Self) - } - } - - syn::parse_str::<BreakRules>("()").unwrap(); -} - -#[test] -fn trailing_empty_none_group() { - fn parse(input: ParseStream) -> Result<()> { - input.parse::<Token![+]>()?; - - let content; - parenthesized!(content in input); - content.parse::<Token![+]>()?; - - Ok(()) - } - - // `+ ( + «∅ ∅» ) «∅ «∅ ∅» ∅»` - let tokens = TokenStream::from_iter([ - TokenTree::Punct(Punct::new('+', Spacing::Alone)), - TokenTree::Group(Group::new( - Delimiter::Parenthesis, - TokenStream::from_iter([ - TokenTree::Punct(Punct::new('+', Spacing::Alone)), - TokenTree::Group(Group::new(Delimiter::None, TokenStream::new())), - ]), - )), - TokenTree::Group(Group::new(Delimiter::None, TokenStream::new())), - TokenTree::Group(Group::new( - Delimiter::None, - TokenStream::from_iter([TokenTree::Group(Group::new( - Delimiter::None, - TokenStream::new(), - ))]), - )), - ]); - - parse.parse2(tokens).unwrap(); -} - -#[test] -fn test_unwind_safe() { - fn parse(input: ParseStream) -> Result<Ident> { - let thread_result = panic::catch_unwind(|| input.parse()); - thread_result.unwrap() - } - - parse.parse_str("throw").unwrap(); -} diff --git a/vendor/syn/tests/test_parse_quote.rs b/vendor/syn/tests/test_parse_quote.rs deleted file mode 100644 index 600870bab58a43..00000000000000 --- a/vendor/syn/tests/test_parse_quote.rs +++ /dev/null @@ -1,172 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use syn::punctuated::Punctuated; -use syn::{parse_quote, Attribute, Field, Lit, Pat, Stmt, Token}; - -#[test] -fn test_attribute() { - let attr: Attribute = parse_quote!(#[test]); - snapshot!(attr, @r#" - Attribute { - style: AttrStyle::Outer, - meta: Meta::Path { - segments: [ - PathSegment { - ident: "test", - }, - ], - }, - } - "#); - - let attr: Attribute = parse_quote!(#![no_std]); - snapshot!(attr, @r#" - Attribute { - style: AttrStyle::Inner, - meta: Meta::Path { - segments: [ - PathSegment { - ident: "no_std", - }, - ], - }, - } - "#); -} - -#[test] -fn test_field() { - let field: Field = parse_quote!(pub enabled: bool); - snapshot!(field, @r#" - Field { - vis: Visibility::Public, - ident: Some("enabled"), - colon_token: Some, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "bool", - }, - ], - }, - }, - } - "#); - - let field: Field = parse_quote!(primitive::bool); - snapshot!(field, @r#" - Field { - vis: Visibility::Inherited, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "primitive", - }, - Token![::], - PathSegment { - ident: "bool", - }, - ], - }, - }, - } - "#); -} - -#[test] -fn test_pat() { - let pat: Pat = parse_quote!(Some(false) | None); - snapshot!(&pat, @r#" - Pat::Or { - cases: [ - Pat::TupleStruct { - path: Path { - segments: [ - PathSegment { - ident: "Some", - }, - ], - }, - elems: [ - Pat::Lit(ExprLit { - lit: Lit::Bool { - value: false, - }, - }), - ], - }, - Token![|], - Pat::Ident { - ident: "None", - }, - ], - } - "#); - - let boxed_pat: Box<Pat> = parse_quote!(Some(false) | None); - assert_eq!(*boxed_pat, pat); -} - -#[test] -fn test_punctuated() { - let punctuated: Punctuated<Lit, Token![|]> = parse_quote!(true | true); - snapshot!(punctuated, @r#" - [ - Lit::Bool { - value: true, - }, - Token![|], - Lit::Bool { - value: true, - }, - ] - "#); - - let punctuated: Punctuated<Lit, Token![|]> = parse_quote!(true | true |); - snapshot!(punctuated, @r#" - [ - Lit::Bool { - value: true, - }, - Token![|], - Lit::Bool { - value: true, - }, - Token![|], - ] - "#); -} - -#[test] -fn test_vec_stmt() { - let stmts: Vec<Stmt> = parse_quote! { - let _; - true - }; - snapshot!(stmts, @r#" - [ - Stmt::Local { - pat: Pat::Wild, - }, - Stmt::Expr( - Expr::Lit { - lit: Lit::Bool { - value: true, - }, - }, - None, - ), - ] - "#); -} diff --git a/vendor/syn/tests/test_parse_stream.rs b/vendor/syn/tests/test_parse_stream.rs deleted file mode 100644 index a650fc85346c25..00000000000000 --- a/vendor/syn/tests/test_parse_stream.rs +++ /dev/null @@ -1,187 +0,0 @@ -#![allow(clippy::items_after_statements, clippy::let_underscore_untyped)] - -use proc_macro2::{Delimiter, Group, Punct, Spacing, Span, TokenStream, TokenTree}; -use quote::quote; -use syn::ext::IdentExt as _; -use syn::parse::discouraged::AnyDelimiter; -use syn::parse::{ParseStream, Parser as _, Result}; -use syn::{parenthesized, token, Ident, Lifetime, Token}; - -#[test] -fn test_peek_punct() { - let tokens = quote!(+= + =); - - fn assert(input: ParseStream) -> Result<()> { - assert!(input.peek(Token![+])); - assert!(input.peek(Token![+=])); - - let _: Token![+] = input.parse()?; - - assert!(input.peek(Token![=])); - assert!(!input.peek(Token![==])); - assert!(!input.peek(Token![+])); - - let _: Token![=] = input.parse()?; - - assert!(input.peek(Token![+])); - assert!(!input.peek(Token![+=])); - - let _: Token![+] = input.parse()?; - let _: Token![=] = input.parse()?; - Ok(()) - } - - assert.parse2(tokens).unwrap(); -} - -#[test] -fn test_peek_lifetime() { - // 'static ; - let tokens = TokenStream::from_iter([ - TokenTree::Punct(Punct::new('\'', Spacing::Joint)), - TokenTree::Ident(Ident::new("static", Span::call_site())), - TokenTree::Punct(Punct::new(';', Spacing::Alone)), - ]); - - fn assert(input: ParseStream) -> Result<()> { - assert!(input.peek(Lifetime)); - assert!(input.peek2(Token![;])); - assert!(!input.peek2(Token![static])); - - let _: Lifetime = input.parse()?; - - assert!(input.peek(Token![;])); - - let _: Token![;] = input.parse()?; - Ok(()) - } - - assert.parse2(tokens).unwrap(); -} - -#[test] -fn test_peek_not_lifetime() { - // ' static - let tokens = TokenStream::from_iter([ - TokenTree::Punct(Punct::new('\'', Spacing::Alone)), - TokenTree::Ident(Ident::new("static", Span::call_site())), - ]); - - fn assert(input: ParseStream) -> Result<()> { - assert!(!input.peek(Lifetime)); - assert!(input.parse::<Option<Punct>>()?.is_none()); - - let _: TokenTree = input.parse()?; - - assert!(input.peek(Token![static])); - - let _: Token![static] = input.parse()?; - Ok(()) - } - - assert.parse2(tokens).unwrap(); -} - -#[test] -fn test_peek_ident() { - let tokens = quote!(static var); - - fn assert(input: ParseStream) -> Result<()> { - assert!(!input.peek(Ident)); - assert!(input.peek(Ident::peek_any)); - assert!(input.peek(Token![static])); - - let _: Token![static] = input.parse()?; - - assert!(input.peek(Ident)); - assert!(input.peek(Ident::peek_any)); - - let _: Ident = input.parse()?; - Ok(()) - } - - assert.parse2(tokens).unwrap(); -} - -#[test] -fn test_peek_groups() { - // pub ( :: ) «∅ ! = ∅» static - let tokens = TokenStream::from_iter([ - TokenTree::Ident(Ident::new("pub", Span::call_site())), - TokenTree::Group(Group::new( - Delimiter::Parenthesis, - TokenStream::from_iter([ - TokenTree::Punct(Punct::new(':', Spacing::Joint)), - TokenTree::Punct(Punct::new(':', Spacing::Alone)), - ]), - )), - TokenTree::Group(Group::new( - Delimiter::None, - TokenStream::from_iter([ - TokenTree::Punct(Punct::new('!', Spacing::Alone)), - TokenTree::Punct(Punct::new('=', Spacing::Alone)), - ]), - )), - TokenTree::Ident(Ident::new("static", Span::call_site())), - ]); - - fn assert(input: ParseStream) -> Result<()> { - assert!(input.peek2(token::Paren)); - assert!(input.peek3(token::Group)); - assert!(input.peek3(Token![!])); - - let _: Token![pub] = input.parse()?; - - assert!(input.peek(token::Paren)); - assert!(!input.peek(Token![::])); - assert!(!input.peek2(Token![::])); - assert!(input.peek2(Token![!])); - assert!(input.peek2(token::Group)); - assert!(input.peek3(Token![=])); - assert!(!input.peek3(Token![static])); - - let content; - parenthesized!(content in input); - - assert!(content.peek(Token![::])); - assert!(content.peek2(Token![:])); - assert!(!content.peek3(token::Group)); - assert!(!content.peek3(Token![!])); - - assert!(input.peek(token::Group)); - assert!(input.peek(Token![!])); - - let _: Token![::] = content.parse()?; - - assert!(input.peek(token::Group)); - assert!(input.peek(Token![!])); - assert!(input.peek2(Token![=])); - assert!(input.peek3(Token![static])); - assert!(!input.peek2(Token![static])); - - let implicit = input.fork(); - let explicit = input.fork(); - - let _: Token![!] = implicit.parse()?; - assert!(implicit.peek(Token![=])); - assert!(implicit.peek2(Token![static])); - let _: Token![=] = implicit.parse()?; - assert!(implicit.peek(Token![static])); - - let (delimiter, _span, grouped) = explicit.parse_any_delimiter()?; - assert_eq!(delimiter, Delimiter::None); - assert!(grouped.peek(Token![!])); - assert!(grouped.peek2(Token![=])); - assert!(!grouped.peek3(Token![static])); - let _: Token![!] = grouped.parse()?; - assert!(grouped.peek(Token![=])); - assert!(!grouped.peek2(Token![static])); - let _: Token![=] = grouped.parse()?; - assert!(!grouped.peek(Token![static])); - - let _: TokenStream = input.parse()?; - Ok(()) - } - - assert.parse2(tokens).unwrap(); -} diff --git a/vendor/syn/tests/test_pat.rs b/vendor/syn/tests/test_pat.rs deleted file mode 100644 index f778928bc99341..00000000000000 --- a/vendor/syn/tests/test_pat.rs +++ /dev/null @@ -1,158 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use proc_macro2::{Delimiter, Group, TokenStream, TokenTree}; -use quote::{quote, ToTokens as _}; -use syn::parse::Parser; -use syn::punctuated::Punctuated; -use syn::{parse_quote, token, Item, Pat, PatTuple, Stmt, Token}; - -#[test] -fn test_pat_ident() { - match Pat::parse_single.parse2(quote!(self)).unwrap() { - Pat::Ident(_) => (), - value => panic!("expected PatIdent, got {:?}", value), - } -} - -#[test] -fn test_pat_path() { - match Pat::parse_single.parse2(quote!(self::CONST)).unwrap() { - Pat::Path(_) => (), - value => panic!("expected PatPath, got {:?}", value), - } -} - -#[test] -fn test_leading_vert() { - // https://github.com/rust-lang/rust/blob/1.43.0/src/test/ui/or-patterns/remove-leading-vert.rs - - syn::parse_str::<Item>("fn f() {}").unwrap(); - syn::parse_str::<Item>("fn fun1(| A: E) {}").unwrap_err(); - syn::parse_str::<Item>("fn fun2(|| A: E) {}").unwrap_err(); - - syn::parse_str::<Stmt>("let | () = ();").unwrap_err(); - syn::parse_str::<Stmt>("let (| A): E;").unwrap(); - syn::parse_str::<Stmt>("let (|| A): (E);").unwrap_err(); - syn::parse_str::<Stmt>("let (| A,): (E,);").unwrap(); - syn::parse_str::<Stmt>("let [| A]: [E; 1];").unwrap(); - syn::parse_str::<Stmt>("let [|| A]: [E; 1];").unwrap_err(); - syn::parse_str::<Stmt>("let TS(| A): TS;").unwrap(); - syn::parse_str::<Stmt>("let TS(|| A): TS;").unwrap_err(); - syn::parse_str::<Stmt>("let NS { f: | A }: NS;").unwrap(); - syn::parse_str::<Stmt>("let NS { f: || A }: NS;").unwrap_err(); -} - -#[test] -fn test_group() { - let group = Group::new(Delimiter::None, quote!(Some(_))); - let tokens = TokenStream::from_iter([TokenTree::Group(group)]); - let pat = Pat::parse_single.parse2(tokens).unwrap(); - - snapshot!(pat, @r#" - Pat::TupleStruct { - path: Path { - segments: [ - PathSegment { - ident: "Some", - }, - ], - }, - elems: [ - Pat::Wild, - ], - } - "#); -} - -#[test] -fn test_ranges() { - Pat::parse_single.parse_str("..").unwrap(); - Pat::parse_single.parse_str("..hi").unwrap(); - Pat::parse_single.parse_str("lo..").unwrap(); - Pat::parse_single.parse_str("lo..hi").unwrap(); - - Pat::parse_single.parse_str("..=").unwrap_err(); - Pat::parse_single.parse_str("..=hi").unwrap(); - Pat::parse_single.parse_str("lo..=").unwrap_err(); - Pat::parse_single.parse_str("lo..=hi").unwrap(); - - Pat::parse_single.parse_str("...").unwrap_err(); - Pat::parse_single.parse_str("...hi").unwrap_err(); - Pat::parse_single.parse_str("lo...").unwrap_err(); - Pat::parse_single.parse_str("lo...hi").unwrap(); - - Pat::parse_single.parse_str("[lo..]").unwrap_err(); - Pat::parse_single.parse_str("[..=hi]").unwrap_err(); - Pat::parse_single.parse_str("[(lo..)]").unwrap(); - Pat::parse_single.parse_str("[(..=hi)]").unwrap(); - Pat::parse_single.parse_str("[lo..=hi]").unwrap(); - - Pat::parse_single.parse_str("[_, lo.., _]").unwrap_err(); - Pat::parse_single.parse_str("[_, ..=hi, _]").unwrap_err(); - Pat::parse_single.parse_str("[_, (lo..), _]").unwrap(); - Pat::parse_single.parse_str("[_, (..=hi), _]").unwrap(); - Pat::parse_single.parse_str("[_, lo..=hi, _]").unwrap(); -} - -#[test] -fn test_tuple_comma() { - let mut expr = PatTuple { - attrs: Vec::new(), - paren_token: token::Paren::default(), - elems: Punctuated::new(), - }; - snapshot!(expr.to_token_stream() as Pat, @"Pat::Tuple"); - - expr.elems.push_value(parse_quote!(_)); - // Must not parse to Pat::Paren - snapshot!(expr.to_token_stream() as Pat, @r#" - Pat::Tuple { - elems: [ - Pat::Wild, - Token![,], - ], - } - "#); - - expr.elems.push_punct(<Token![,]>::default()); - snapshot!(expr.to_token_stream() as Pat, @r#" - Pat::Tuple { - elems: [ - Pat::Wild, - Token![,], - ], - } - "#); - - expr.elems.push_value(parse_quote!(_)); - snapshot!(expr.to_token_stream() as Pat, @r#" - Pat::Tuple { - elems: [ - Pat::Wild, - Token![,], - Pat::Wild, - ], - } - "#); - - expr.elems.push_punct(<Token![,]>::default()); - snapshot!(expr.to_token_stream() as Pat, @r#" - Pat::Tuple { - elems: [ - Pat::Wild, - Token![,], - Pat::Wild, - Token![,], - ], - } - "#); -} diff --git a/vendor/syn/tests/test_path.rs b/vendor/syn/tests/test_path.rs deleted file mode 100644 index 7f9e515d26963e..00000000000000 --- a/vendor/syn/tests/test_path.rs +++ /dev/null @@ -1,116 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use proc_macro2::{Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream, TokenTree}; -use quote::{quote, ToTokens}; -use syn::{parse_quote, Expr, Type, TypePath}; - -#[test] -fn parse_interpolated_leading_component() { - // mimics the token stream corresponding to `$mod::rest` - let tokens = TokenStream::from_iter([ - TokenTree::Group(Group::new(Delimiter::None, quote! { first })), - TokenTree::Punct(Punct::new(':', Spacing::Joint)), - TokenTree::Punct(Punct::new(':', Spacing::Alone)), - TokenTree::Ident(Ident::new("rest", Span::call_site())), - ]); - - snapshot!(tokens.clone() as Expr, @r#" - Expr::Path { - path: Path { - segments: [ - PathSegment { - ident: "first", - }, - Token![::], - PathSegment { - ident: "rest", - }, - ], - }, - } - "#); - - snapshot!(tokens as Type, @r#" - Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "first", - }, - Token![::], - PathSegment { - ident: "rest", - }, - ], - }, - } - "#); -} - -#[test] -fn print_incomplete_qpath() { - // qpath with `as` token - let mut ty: TypePath = parse_quote!(<Self as A>::Q); - snapshot!(ty.to_token_stream(), @"TokenStream(`< Self as A > :: Q`)"); - assert!(ty.path.segments.pop().is_some()); - snapshot!(ty.to_token_stream(), @"TokenStream(`< Self as A > ::`)"); - assert!(ty.path.segments.pop().is_some()); - snapshot!(ty.to_token_stream(), @"TokenStream(`< Self >`)"); - assert!(ty.path.segments.pop().is_none()); - - // qpath without `as` token - let mut ty: TypePath = parse_quote!(<Self>::A::B); - snapshot!(ty.to_token_stream(), @"TokenStream(`< Self > :: A :: B`)"); - assert!(ty.path.segments.pop().is_some()); - snapshot!(ty.to_token_stream(), @"TokenStream(`< Self > :: A ::`)"); - assert!(ty.path.segments.pop().is_some()); - snapshot!(ty.to_token_stream(), @"TokenStream(`< Self > ::`)"); - assert!(ty.path.segments.pop().is_none()); - - // normal path - let mut ty: TypePath = parse_quote!(Self::A::B); - snapshot!(ty.to_token_stream(), @"TokenStream(`Self :: A :: B`)"); - assert!(ty.path.segments.pop().is_some()); - snapshot!(ty.to_token_stream(), @"TokenStream(`Self :: A ::`)"); - assert!(ty.path.segments.pop().is_some()); - snapshot!(ty.to_token_stream(), @"TokenStream(`Self ::`)"); - assert!(ty.path.segments.pop().is_some()); - snapshot!(ty.to_token_stream(), @"TokenStream(``)"); - assert!(ty.path.segments.pop().is_none()); -} - -#[test] -fn parse_parenthesized_path_arguments_with_disambiguator() { - #[rustfmt::skip] - let tokens = quote!(dyn FnOnce::() -> !); - snapshot!(tokens as Type, @r#" - Type::TraitObject { - dyn_token: Some, - bounds: [ - TypeParamBound::Trait(TraitBound { - path: Path { - segments: [ - PathSegment { - ident: "FnOnce", - arguments: PathArguments::Parenthesized { - output: ReturnType::Type( - Type::Never, - ), - }, - }, - ], - }, - }), - ], - } - "#); -} diff --git a/vendor/syn/tests/test_precedence.rs b/vendor/syn/tests/test_precedence.rs deleted file mode 100644 index eb193a5aef0db9..00000000000000 --- a/vendor/syn/tests/test_precedence.rs +++ /dev/null @@ -1,558 +0,0 @@ -// This test does the following for every file in the rust-lang/rust repo: -// -// 1. Parse the file using syn into a syn::File. -// 2. Extract every syn::Expr from the file. -// 3. Print each expr to a string of source code. -// 4. Parse the source code using librustc_parse into a rustc_ast::Expr. -// 5. For both the syn::Expr and rustc_ast::Expr, crawl the syntax tree to -// insert parentheses surrounding every subexpression. -// 6. Serialize the fully parenthesized syn::Expr to a string of source code. -// 7. Parse the fully parenthesized source code using librustc_parse. -// 8. Compare the rustc_ast::Expr resulting from parenthesizing using rustc data -// structures vs syn data structures, ignoring spans. If they agree, rustc's -// parser and syn's parser have identical handling of expression precedence. - -#![cfg(not(syn_disable_nightly_tests))] -#![cfg(not(miri))] -#![recursion_limit = "1024"] -#![feature(rustc_private)] -#![allow( - clippy::blocks_in_conditions, - clippy::doc_markdown, - clippy::elidable_lifetime_names, - clippy::explicit_deref_methods, - clippy::let_underscore_untyped, - clippy::manual_assert, - clippy::manual_let_else, - clippy::match_like_matches_macro, - clippy::match_wildcard_for_single_variants, - clippy::needless_lifetimes, - clippy::too_many_lines, - clippy::uninlined_format_args, - clippy::unnecessary_box_returns -)] - -extern crate rustc_ast; -extern crate rustc_ast_pretty; -extern crate rustc_data_structures; -extern crate rustc_driver; -extern crate rustc_span; -extern crate smallvec; -extern crate thin_vec; - -use crate::common::eq::SpanlessEq; -use crate::common::parse; -use quote::ToTokens; -use rustc_ast::ast; -use rustc_ast_pretty::pprust; -use rustc_span::edition::Edition; -use std::fs; -use std::mem; -use std::path::Path; -use std::process; -use std::sync::atomic::{AtomicUsize, Ordering}; -use syn::parse::Parser as _; - -#[macro_use] -mod macros; - -mod common; -mod repo; - -#[path = "../src/scan_expr.rs"] -mod scan_expr; - -#[test] -fn test_rustc_precedence() { - repo::rayon_init(); - repo::clone_rust(); - let abort_after = repo::abort_after(); - if abort_after == 0 { - panic!("skipping all precedence tests"); - } - - let passed = AtomicUsize::new(0); - let failed = AtomicUsize::new(0); - - repo::for_each_rust_file(|path| { - let content = fs::read_to_string(path).unwrap(); - - let (l_passed, l_failed) = match syn::parse_file(&content) { - Ok(file) => { - let edition = repo::edition(path).parse().unwrap(); - let exprs = collect_exprs(file); - let (l_passed, l_failed) = test_expressions(path, edition, exprs); - errorf!( - "=== {}: {} passed | {} failed\n", - path.display(), - l_passed, - l_failed, - ); - (l_passed, l_failed) - } - Err(msg) => { - errorf!("\nFAIL {} - syn failed to parse: {}\n", path.display(), msg); - (0, 1) - } - }; - - passed.fetch_add(l_passed, Ordering::Relaxed); - let prev_failed = failed.fetch_add(l_failed, Ordering::Relaxed); - - if prev_failed + l_failed >= abort_after { - process::exit(1); - } - }); - - let passed = passed.into_inner(); - let failed = failed.into_inner(); - - errorf!("\n===== Precedence Test Results =====\n"); - errorf!("{} passed | {} failed\n", passed, failed); - - if failed > 0 { - panic!("{} failures", failed); - } -} - -fn test_expressions(path: &Path, edition: Edition, exprs: Vec<syn::Expr>) -> (usize, usize) { - let mut passed = 0; - let mut failed = 0; - - rustc_span::create_session_if_not_set_then(edition, |_| { - for expr in exprs { - let expr_tokens = expr.to_token_stream(); - let source_code = expr_tokens.to_string(); - let librustc_ast = if let Some(e) = librustc_parse_and_rewrite(&source_code) { - e - } else { - failed += 1; - errorf!( - "\nFAIL {} - librustc failed to parse original\n", - path.display(), - ); - continue; - }; - - let syn_parenthesized_code = - syn_parenthesize(expr.clone()).to_token_stream().to_string(); - let syn_ast = if let Some(e) = parse::librustc_expr(&syn_parenthesized_code) { - e - } else { - failed += 1; - errorf!( - "\nFAIL {} - librustc failed to parse parenthesized\n", - path.display(), - ); - continue; - }; - - if !SpanlessEq::eq(&syn_ast, &librustc_ast) { - failed += 1; - let syn_pretty = pprust::expr_to_string(&syn_ast); - let librustc_pretty = pprust::expr_to_string(&librustc_ast); - errorf!( - "\nFAIL {}\n{}\nsyn != rustc\n{}\n", - path.display(), - syn_pretty, - librustc_pretty, - ); - continue; - } - - let expr_invisible = make_parens_invisible(expr); - let Ok(reparsed_expr_invisible) = syn::parse2(expr_invisible.to_token_stream()) else { - failed += 1; - errorf!( - "\nFAIL {} - syn failed to parse invisible delimiters\n{}\n", - path.display(), - source_code, - ); - continue; - }; - if expr_invisible != reparsed_expr_invisible { - failed += 1; - errorf!( - "\nFAIL {} - mismatch after parsing invisible delimiters\n{}\n", - path.display(), - source_code, - ); - continue; - } - - if scan_expr::scan_expr.parse2(expr_tokens).is_err() { - failed += 1; - errorf!( - "\nFAIL {} - failed to scan expr\n{}\n", - path.display(), - source_code, - ); - continue; - } - - passed += 1; - } - }); - - (passed, failed) -} - -fn librustc_parse_and_rewrite(input: &str) -> Option<Box<ast::Expr>> { - parse::librustc_expr(input).map(librustc_parenthesize) -} - -fn librustc_parenthesize(mut librustc_expr: Box<ast::Expr>) -> Box<ast::Expr> { - use rustc_ast::ast::{ - AssocItem, AssocItemKind, Attribute, BinOpKind, Block, BoundConstness, Expr, ExprField, - ExprKind, GenericArg, GenericBound, Local, LocalKind, Pat, PolyTraitRef, Stmt, StmtKind, - StructExpr, StructRest, TraitBoundModifiers, Ty, - }; - use rustc_ast::mut_visit::{walk_flat_map_assoc_item, MutVisitor}; - use rustc_ast::visit::{AssocCtxt, BoundKind}; - use rustc_data_structures::flat_map_in_place::FlatMapInPlace; - use rustc_span::DUMMY_SP; - use smallvec::SmallVec; - use std::ops::DerefMut; - use thin_vec::ThinVec; - - struct FullyParenthesize; - - fn contains_let_chain(expr: &Expr) -> bool { - match &expr.kind { - ExprKind::Let(..) => true, - ExprKind::Binary(binop, left, right) => { - binop.node == BinOpKind::And - && (contains_let_chain(left) || contains_let_chain(right)) - } - _ => false, - } - } - - fn flat_map_field<T: MutVisitor>(mut f: ExprField, vis: &mut T) -> Vec<ExprField> { - if f.is_shorthand { - noop_visit_expr(&mut f.expr, vis); - } else { - vis.visit_expr(&mut f.expr); - } - vec![f] - } - - fn flat_map_stmt<T: MutVisitor>(stmt: Stmt, vis: &mut T) -> Vec<Stmt> { - let kind = match stmt.kind { - // Don't wrap toplevel expressions in statements. - StmtKind::Expr(mut e) => { - noop_visit_expr(&mut e, vis); - StmtKind::Expr(e) - } - StmtKind::Semi(mut e) => { - noop_visit_expr(&mut e, vis); - StmtKind::Semi(e) - } - s => s, - }; - - vec![Stmt { kind, ..stmt }] - } - - fn noop_visit_expr<T: MutVisitor>(e: &mut Expr, vis: &mut T) { - match &mut e.kind { - ExprKind::Become(..) => {} - ExprKind::Struct(expr) => { - let StructExpr { - qself, - path, - fields, - rest, - } = expr.deref_mut(); - if let Some(qself) = qself { - vis.visit_qself(qself); - } - vis.visit_path(path); - fields.flat_map_in_place(|field| flat_map_field(field, vis)); - if let StructRest::Base(rest) = rest { - vis.visit_expr(rest); - } - } - _ => rustc_ast::mut_visit::walk_expr(vis, e), - } - } - - impl MutVisitor for FullyParenthesize { - fn visit_expr(&mut self, e: &mut Expr) { - noop_visit_expr(e, self); - match e.kind { - ExprKind::Block(..) | ExprKind::If(..) | ExprKind::Let(..) => {} - ExprKind::Binary(..) if contains_let_chain(e) => {} - _ => { - let inner = mem::replace(e, Expr::dummy()); - *e = Expr { - id: ast::DUMMY_NODE_ID, - kind: ExprKind::Paren(Box::new(inner)), - span: DUMMY_SP, - attrs: ThinVec::new(), - tokens: None, - }; - } - } - } - - fn visit_generic_arg(&mut self, arg: &mut GenericArg) { - match arg { - GenericArg::Lifetime(_lifetime) => {} - GenericArg::Type(arg) => self.visit_ty(arg), - // Don't wrap unbraced const generic arg as that's invalid syntax. - GenericArg::Const(anon_const) => { - if let ExprKind::Block(..) = &mut anon_const.value.kind { - noop_visit_expr(&mut anon_const.value, self); - } - } - } - } - - fn visit_param_bound(&mut self, bound: &mut GenericBound, _ctxt: BoundKind) { - match bound { - GenericBound::Trait(PolyTraitRef { - modifiers: - TraitBoundModifiers { - constness: BoundConstness::Maybe(_), - .. - }, - .. - }) - | GenericBound::Outlives(..) - | GenericBound::Use(..) => {} - GenericBound::Trait(ty) => self.visit_poly_trait_ref(ty), - } - } - - fn visit_block(&mut self, block: &mut Block) { - self.visit_id(&mut block.id); - block - .stmts - .flat_map_in_place(|stmt| flat_map_stmt(stmt, self)); - self.visit_span(&mut block.span); - } - - fn visit_local(&mut self, local: &mut Local) { - match &mut local.kind { - LocalKind::Decl => {} - LocalKind::Init(init) => { - self.visit_expr(init); - } - LocalKind::InitElse(init, els) => { - self.visit_expr(init); - self.visit_block(els); - } - } - } - - fn flat_map_assoc_item( - &mut self, - item: Box<AssocItem>, - ctxt: AssocCtxt, - ) -> SmallVec<[Box<AssocItem>; 1]> { - match &item.kind { - AssocItemKind::Const(const_item) - if !const_item.generics.params.is_empty() - || !const_item.generics.where_clause.predicates.is_empty() => - { - SmallVec::from([item]) - } - _ => walk_flat_map_assoc_item(self, item, ctxt), - } - } - - // We don't want to look at expressions that might appear in patterns or - // types yet. We'll look into comparing those in the future. For now - // focus on expressions appearing in other places. - fn visit_pat(&mut self, pat: &mut Pat) { - let _ = pat; - } - - fn visit_ty(&mut self, ty: &mut Ty) { - let _ = ty; - } - - fn visit_attribute(&mut self, attr: &mut Attribute) { - let _ = attr; - } - } - - let mut folder = FullyParenthesize; - folder.visit_expr(&mut librustc_expr); - librustc_expr -} - -fn syn_parenthesize(syn_expr: syn::Expr) -> syn::Expr { - use syn::fold::{fold_expr, fold_generic_argument, Fold}; - use syn::{ - token, BinOp, Expr, ExprParen, GenericArgument, Lit, MetaNameValue, Pat, Stmt, Type, - }; - - struct FullyParenthesize; - - fn parenthesize(expr: Expr) -> Expr { - Expr::Paren(ExprParen { - attrs: Vec::new(), - expr: Box::new(expr), - paren_token: token::Paren::default(), - }) - } - - fn needs_paren(expr: &Expr) -> bool { - match expr { - Expr::Group(_) => unreachable!(), - Expr::If(_) | Expr::Unsafe(_) | Expr::Block(_) | Expr::Let(_) => false, - Expr::Binary(_) => !contains_let_chain(expr), - _ => true, - } - } - - fn contains_let_chain(expr: &Expr) -> bool { - match expr { - Expr::Let(_) => true, - Expr::Binary(expr) => { - matches!(expr.op, BinOp::And(_)) - && (contains_let_chain(&expr.left) || contains_let_chain(&expr.right)) - } - _ => false, - } - } - - impl Fold for FullyParenthesize { - fn fold_expr(&mut self, expr: Expr) -> Expr { - let needs_paren = needs_paren(&expr); - let folded = fold_expr(self, expr); - if needs_paren { - parenthesize(folded) - } else { - folded - } - } - - fn fold_generic_argument(&mut self, arg: GenericArgument) -> GenericArgument { - match arg { - GenericArgument::Const(arg) => GenericArgument::Const(match arg { - Expr::Block(_) => fold_expr(self, arg), - // Don't wrap unbraced const generic arg as that's invalid syntax. - _ => arg, - }), - _ => fold_generic_argument(self, arg), - } - } - - fn fold_stmt(&mut self, stmt: Stmt) -> Stmt { - match stmt { - // Don't wrap toplevel expressions in statements. - Stmt::Expr(Expr::Verbatim(_), Some(_)) => stmt, - Stmt::Expr(e, semi) => Stmt::Expr(fold_expr(self, e), semi), - s => s, - } - } - - fn fold_meta_name_value(&mut self, meta: MetaNameValue) -> MetaNameValue { - // Don't turn #[p = "..."] into #[p = ("...")]. - meta - } - - // We don't want to look at expressions that might appear in patterns or - // types yet. We'll look into comparing those in the future. For now - // focus on expressions appearing in other places. - fn fold_pat(&mut self, pat: Pat) -> Pat { - pat - } - - fn fold_type(&mut self, ty: Type) -> Type { - ty - } - - fn fold_lit(&mut self, lit: Lit) -> Lit { - if let Lit::Verbatim(lit) = &lit { - panic!("unexpected verbatim literal: {lit}"); - } - lit - } - } - - let mut folder = FullyParenthesize; - folder.fold_expr(syn_expr) -} - -fn make_parens_invisible(expr: syn::Expr) -> syn::Expr { - use syn::fold::{fold_expr, fold_stmt, Fold}; - use syn::{token, Expr, ExprGroup, ExprParen, Stmt}; - - struct MakeParensInvisible; - - impl Fold for MakeParensInvisible { - fn fold_expr(&mut self, mut expr: Expr) -> Expr { - if let Expr::Paren(paren) = expr { - expr = Expr::Group(ExprGroup { - attrs: paren.attrs, - group_token: token::Group(paren.paren_token.span.join()), - expr: paren.expr, - }); - } - fold_expr(self, expr) - } - - fn fold_stmt(&mut self, stmt: Stmt) -> Stmt { - if let Stmt::Expr(expr @ (Expr::Binary(_) | Expr::Call(_) | Expr::Cast(_)), None) = stmt - { - Stmt::Expr( - Expr::Paren(ExprParen { - attrs: Vec::new(), - paren_token: token::Paren::default(), - expr: Box::new(fold_expr(self, expr)), - }), - None, - ) - } else { - fold_stmt(self, stmt) - } - } - } - - let mut folder = MakeParensInvisible; - folder.fold_expr(expr) -} - -/// Walk through a crate collecting all expressions we can find in it. -fn collect_exprs(file: syn::File) -> Vec<syn::Expr> { - use syn::fold::Fold; - use syn::punctuated::Punctuated; - use syn::{token, ConstParam, Expr, ExprTuple, Pat, Path}; - - struct CollectExprs(Vec<Expr>); - impl Fold for CollectExprs { - fn fold_expr(&mut self, expr: Expr) -> Expr { - match expr { - Expr::Verbatim(_) => {} - _ => self.0.push(expr), - } - - Expr::Tuple(ExprTuple { - attrs: vec![], - elems: Punctuated::new(), - paren_token: token::Paren::default(), - }) - } - - fn fold_pat(&mut self, pat: Pat) -> Pat { - pat - } - - fn fold_path(&mut self, path: Path) -> Path { - // Skip traversing into const generic path arguments - path - } - - fn fold_const_param(&mut self, const_param: ConstParam) -> ConstParam { - const_param - } - } - - let mut folder = CollectExprs(vec![]); - folder.fold_file(file); - folder.0 -} diff --git a/vendor/syn/tests/test_punctuated.rs b/vendor/syn/tests/test_punctuated.rs deleted file mode 100644 index 14ea96c7717221..00000000000000 --- a/vendor/syn/tests/test_punctuated.rs +++ /dev/null @@ -1,92 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] - -use syn::punctuated::{Pair, Punctuated}; -use syn::Token; - -macro_rules! punctuated { - ($($e:expr,)+) => {{ - let mut seq = ::syn::punctuated::Punctuated::new(); - $( - seq.push($e); - )+ - seq - }}; - - ($($e:expr),+) => { - punctuated!($($e,)+) - }; -} - -macro_rules! check_exact_size_iterator { - ($iter:expr) => {{ - let iter = $iter; - let size_hint = iter.size_hint(); - let len = iter.len(); - let count = iter.count(); - assert_eq!(len, count); - assert_eq!(size_hint, (count, Some(count))); - }}; -} - -#[test] -fn pairs() { - let mut p: Punctuated<_, Token![,]> = punctuated!(2, 3, 4); - - check_exact_size_iterator!(p.pairs()); - check_exact_size_iterator!(p.pairs_mut()); - check_exact_size_iterator!(p.into_pairs()); - - let mut p: Punctuated<_, Token![,]> = punctuated!(2, 3, 4); - - assert_eq!(p.pairs().next_back().map(Pair::into_value), Some(&4)); - assert_eq!( - p.pairs_mut().next_back().map(Pair::into_value), - Some(&mut 4) - ); - assert_eq!(p.into_pairs().next_back().map(Pair::into_value), Some(4)); -} - -#[test] -fn iter() { - let mut p: Punctuated<_, Token![,]> = punctuated!(2, 3, 4); - - check_exact_size_iterator!(p.iter()); - check_exact_size_iterator!(p.iter_mut()); - check_exact_size_iterator!(p.into_iter()); - - let mut p: Punctuated<_, Token![,]> = punctuated!(2, 3, 4); - - assert_eq!(p.iter().next_back(), Some(&4)); - assert_eq!(p.iter_mut().next_back(), Some(&mut 4)); - assert_eq!(p.into_iter().next_back(), Some(4)); -} - -#[test] -fn may_dangle() { - let p: Punctuated<_, Token![,]> = punctuated!(2, 3, 4); - for element in &p { - if *element == 2 { - drop(p); - break; - } - } - - let mut p: Punctuated<_, Token![,]> = punctuated!(2, 3, 4); - for element in &mut p { - if *element == 2 { - drop(p); - break; - } - } -} - -#[test] -#[should_panic = "index out of bounds: the len is 0 but the index is 0"] -fn index_out_of_bounds() { - let p = Punctuated::<syn::Ident, Token![,]>::new(); - let _ = p[0].clone(); -} diff --git a/vendor/syn/tests/test_receiver.rs b/vendor/syn/tests/test_receiver.rs deleted file mode 100644 index 98194101fdac0d..00000000000000 --- a/vendor/syn/tests/test_receiver.rs +++ /dev/null @@ -1,327 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use syn::{parse_quote, TraitItemFn}; - -#[test] -fn test_by_value() { - let TraitItemFn { sig, .. } = parse_quote! { - fn by_value(self: Self); - }; - snapshot!(&sig.inputs[0], @r#" - FnArg::Receiver(Receiver { - colon_token: Some, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Self", - }, - ], - }, - }, - }) - "#); -} - -#[test] -fn test_by_mut_value() { - let TraitItemFn { sig, .. } = parse_quote! { - fn by_mut(mut self: Self); - }; - snapshot!(&sig.inputs[0], @r#" - FnArg::Receiver(Receiver { - mutability: Some, - colon_token: Some, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Self", - }, - ], - }, - }, - }) - "#); -} - -#[test] -fn test_by_ref() { - let TraitItemFn { sig, .. } = parse_quote! { - fn by_ref(self: &Self); - }; - snapshot!(&sig.inputs[0], @r#" - FnArg::Receiver(Receiver { - colon_token: Some, - ty: Type::Reference { - elem: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Self", - }, - ], - }, - }, - }, - }) - "#); -} - -#[test] -fn test_by_box() { - let TraitItemFn { sig, .. } = parse_quote! { - fn by_box(self: Box<Self>); - }; - snapshot!(&sig.inputs[0], @r#" - FnArg::Receiver(Receiver { - colon_token: Some, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Box", - arguments: PathArguments::AngleBracketed { - args: [ - GenericArgument::Type(Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Self", - }, - ], - }, - }), - ], - }, - }, - ], - }, - }, - }) - "#); -} - -#[test] -fn test_by_pin() { - let TraitItemFn { sig, .. } = parse_quote! { - fn by_pin(self: Pin<Self>); - }; - snapshot!(&sig.inputs[0], @r#" - FnArg::Receiver(Receiver { - colon_token: Some, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Pin", - arguments: PathArguments::AngleBracketed { - args: [ - GenericArgument::Type(Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Self", - }, - ], - }, - }), - ], - }, - }, - ], - }, - }, - }) - "#); -} - -#[test] -fn test_explicit_type() { - let TraitItemFn { sig, .. } = parse_quote! { - fn explicit_type(self: Pin<MyType>); - }; - snapshot!(&sig.inputs[0], @r#" - FnArg::Receiver(Receiver { - colon_token: Some, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Pin", - arguments: PathArguments::AngleBracketed { - args: [ - GenericArgument::Type(Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "MyType", - }, - ], - }, - }), - ], - }, - }, - ], - }, - }, - }) - "#); -} - -#[test] -fn test_value_shorthand() { - let TraitItemFn { sig, .. } = parse_quote! { - fn value_shorthand(self); - }; - snapshot!(&sig.inputs[0], @r#" - FnArg::Receiver(Receiver { - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Self", - }, - ], - }, - }, - }) - "#); -} - -#[test] -fn test_mut_value_shorthand() { - let TraitItemFn { sig, .. } = parse_quote! { - fn mut_value_shorthand(mut self); - }; - snapshot!(&sig.inputs[0], @r#" - FnArg::Receiver(Receiver { - mutability: Some, - ty: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Self", - }, - ], - }, - }, - }) - "#); -} - -#[test] -fn test_ref_shorthand() { - let TraitItemFn { sig, .. } = parse_quote! { - fn ref_shorthand(&self); - }; - snapshot!(&sig.inputs[0], @r#" - FnArg::Receiver(Receiver { - reference: Some(None), - ty: Type::Reference { - elem: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Self", - }, - ], - }, - }, - }, - }) - "#); -} - -#[test] -fn test_ref_shorthand_with_lifetime() { - let TraitItemFn { sig, .. } = parse_quote! { - fn ref_shorthand(&'a self); - }; - snapshot!(&sig.inputs[0], @r#" - FnArg::Receiver(Receiver { - reference: Some(Some(Lifetime { - ident: "a", - })), - ty: Type::Reference { - lifetime: Some(Lifetime { - ident: "a", - }), - elem: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Self", - }, - ], - }, - }, - }, - }) - "#); -} - -#[test] -fn test_ref_mut_shorthand() { - let TraitItemFn { sig, .. } = parse_quote! { - fn ref_mut_shorthand(&mut self); - }; - snapshot!(&sig.inputs[0], @r#" - FnArg::Receiver(Receiver { - reference: Some(None), - mutability: Some, - ty: Type::Reference { - mutability: Some, - elem: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Self", - }, - ], - }, - }, - }, - }) - "#); -} - -#[test] -fn test_ref_mut_shorthand_with_lifetime() { - let TraitItemFn { sig, .. } = parse_quote! { - fn ref_mut_shorthand(&'a mut self); - }; - snapshot!(&sig.inputs[0], @r#" - FnArg::Receiver(Receiver { - reference: Some(Some(Lifetime { - ident: "a", - })), - mutability: Some, - ty: Type::Reference { - lifetime: Some(Lifetime { - ident: "a", - }), - mutability: Some, - elem: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Self", - }, - ], - }, - }, - }, - }) - "#); -} diff --git a/vendor/syn/tests/test_round_trip.rs b/vendor/syn/tests/test_round_trip.rs deleted file mode 100644 index 5b1b833a6a95a4..00000000000000 --- a/vendor/syn/tests/test_round_trip.rs +++ /dev/null @@ -1,256 +0,0 @@ -#![cfg(not(syn_disable_nightly_tests))] -#![cfg(not(miri))] -#![recursion_limit = "1024"] -#![feature(rustc_private)] -#![allow( - clippy::blocks_in_conditions, - clippy::elidable_lifetime_names, - clippy::manual_assert, - clippy::manual_let_else, - clippy::match_like_matches_macro, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] -#![allow(mismatched_lifetime_syntaxes)] - -extern crate rustc_ast; -extern crate rustc_ast_pretty; -extern crate rustc_data_structures; -extern crate rustc_driver; -extern crate rustc_error_messages; -extern crate rustc_errors; -extern crate rustc_expand; -extern crate rustc_parse; -extern crate rustc_session; -extern crate rustc_span; - -use crate::common::eq::SpanlessEq; -use quote::quote; -use rustc_ast::ast::{ - AngleBracketedArg, Crate, GenericArg, GenericArgs, GenericParamKind, Generics, -}; -use rustc_ast::mut_visit::{self, MutVisitor}; -use rustc_ast_pretty::pprust; -use rustc_data_structures::flat_map_in_place::FlatMapInPlace; -use rustc_error_messages::{DiagMessage, LazyFallbackBundle}; -use rustc_errors::{translation, Diag, PResult}; -use rustc_parse::lexer::StripTokens; -use rustc_session::parse::ParseSess; -use rustc_span::FileName; -use std::borrow::Cow; -use std::fs; -use std::panic; -use std::path::Path; -use std::process; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::time::Instant; - -#[macro_use] -mod macros; - -mod common; -mod repo; - -#[test] -fn test_round_trip() { - repo::rayon_init(); - repo::clone_rust(); - let abort_after = repo::abort_after(); - if abort_after == 0 { - panic!("skipping all round_trip tests"); - } - - let failed = AtomicUsize::new(0); - - repo::for_each_rust_file(|path| test(path, &failed, abort_after)); - - let failed = failed.into_inner(); - if failed > 0 { - panic!("{} failures", failed); - } -} - -fn test(path: &Path, failed: &AtomicUsize, abort_after: usize) { - let failed = || { - let prev_failed = failed.fetch_add(1, Ordering::Relaxed); - if prev_failed + 1 >= abort_after { - process::exit(1); - } - }; - - let content = fs::read_to_string(path).unwrap(); - - let (back, elapsed) = match panic::catch_unwind(|| { - let start = Instant::now(); - let result = syn::parse_file(&content); - let elapsed = start.elapsed(); - result.map(|krate| (quote!(#krate).to_string(), elapsed)) - }) { - Err(_) => { - errorf!("=== {}: syn panic\n", path.display()); - failed(); - return; - } - Ok(Err(msg)) => { - errorf!("=== {}: syn failed to parse\n{:?}\n", path.display(), msg); - failed(); - return; - } - Ok(Ok(result)) => result, - }; - - let edition = repo::edition(path).parse().unwrap(); - - rustc_span::create_session_if_not_set_then(edition, |_| { - let equal = match panic::catch_unwind(|| { - let locale_resources = rustc_driver::DEFAULT_LOCALE_RESOURCES.to_vec(); - let sess = ParseSess::new(locale_resources); - let before = match librustc_parse(content, &sess) { - Ok(before) => before, - Err(diagnostic) => { - errorf!( - "=== {}: ignore - librustc failed to parse original content: {}\n", - path.display(), - translate_message(&diagnostic), - ); - diagnostic.cancel(); - return Err(true); - } - }; - let after = match librustc_parse(back, &sess) { - Ok(after) => after, - Err(diagnostic) => { - errorf!("=== {}: librustc failed to parse", path.display()); - diagnostic.emit(); - return Err(false); - } - }; - Ok((before, after)) - }) { - Err(_) => { - errorf!("=== {}: ignoring librustc panic\n", path.display()); - true - } - Ok(Err(equal)) => equal, - Ok(Ok((mut before, mut after))) => { - normalize(&mut before); - normalize(&mut after); - if SpanlessEq::eq(&before, &after) { - errorf!( - "=== {}: pass in {}ms\n", - path.display(), - elapsed.as_secs() * 1000 + u64::from(elapsed.subsec_nanos()) / 1_000_000 - ); - true - } else { - errorf!( - "=== {}: FAIL\n{}\n!=\n{}\n", - path.display(), - pprust::crate_to_string_for_macros(&before), - pprust::crate_to_string_for_macros(&after), - ); - false - } - } - }; - if !equal { - failed(); - } - }); -} - -fn librustc_parse(content: String, sess: &ParseSess) -> PResult<Crate> { - static COUNTER: AtomicUsize = AtomicUsize::new(0); - let counter = COUNTER.fetch_add(1, Ordering::Relaxed); - let name = FileName::Custom(format!("test_round_trip{}", counter)); - let mut parser = rustc_parse::new_parser_from_source_str( - sess, - name, - content, - StripTokens::ShebangAndFrontmatter, - ) - .unwrap(); - parser.parse_crate_mod() -} - -fn translate_message(diagnostic: &Diag) -> Cow<'static, str> { - thread_local! { - static FLUENT_BUNDLE: LazyFallbackBundle = { - let locale_resources = rustc_driver::DEFAULT_LOCALE_RESOURCES.to_vec(); - let with_directionality_markers = false; - rustc_error_messages::fallback_fluent_bundle(locale_resources, with_directionality_markers) - }; - } - - let message = &diagnostic.messages[0].0; - let args = translation::to_fluent_args(diagnostic.args.iter()); - - let (identifier, attr) = match message { - DiagMessage::Str(msg) | DiagMessage::Translated(msg) => return msg.clone(), - DiagMessage::FluentIdentifier(identifier, attr) => (identifier, attr), - }; - - FLUENT_BUNDLE.with(|fluent_bundle| { - let message = fluent_bundle - .get_message(identifier) - .expect("missing diagnostic in fluent bundle"); - let value = match attr { - Some(attr) => message - .get_attribute(attr) - .expect("missing attribute in fluent message") - .value(), - None => message.value().expect("missing value in fluent message"), - }; - - let mut err = Vec::new(); - let translated = fluent_bundle.format_pattern(value, Some(&args), &mut err); - assert!(err.is_empty()); - Cow::Owned(translated.into_owned()) - }) -} - -fn normalize(krate: &mut Crate) { - struct NormalizeVisitor; - - impl MutVisitor for NormalizeVisitor { - fn visit_generic_args(&mut self, e: &mut GenericArgs) { - if let GenericArgs::AngleBracketed(e) = e { - #[derive(Ord, PartialOrd, Eq, PartialEq)] - enum Group { - Lifetimes, - TypesAndConsts, - Constraints, - } - e.args.sort_by_key(|arg| match arg { - AngleBracketedArg::Arg(arg) => match arg { - GenericArg::Lifetime(_) => Group::Lifetimes, - GenericArg::Type(_) | GenericArg::Const(_) => Group::TypesAndConsts, - }, - AngleBracketedArg::Constraint(_) => Group::Constraints, - }); - } - mut_visit::walk_generic_args(self, e); - } - - fn visit_generics(&mut self, e: &mut Generics) { - #[derive(Ord, PartialOrd, Eq, PartialEq)] - enum Group { - Lifetimes, - TypesAndConsts, - } - e.params.sort_by_key(|param| match param.kind { - GenericParamKind::Lifetime => Group::Lifetimes, - GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => { - Group::TypesAndConsts - } - }); - e.params - .flat_map_in_place(|param| self.flat_map_generic_param(param)); - if e.where_clause.predicates.is_empty() { - e.where_clause.has_where_token = false; - } - } - } - - NormalizeVisitor.visit_crate(krate); -} diff --git a/vendor/syn/tests/test_shebang.rs b/vendor/syn/tests/test_shebang.rs deleted file mode 100644 index 3b55ddfdd59d03..00000000000000 --- a/vendor/syn/tests/test_shebang.rs +++ /dev/null @@ -1,73 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -#[test] -fn test_basic() { - let content = "#!/usr/bin/env rustx\nfn main() {}"; - let file = syn::parse_file(content).unwrap(); - snapshot!(file, @r##" - File { - shebang: Some("#!/usr/bin/env rustx"), - items: [ - Item::Fn { - vis: Visibility::Inherited, - sig: Signature { - ident: "main", - generics: Generics, - output: ReturnType::Default, - }, - block: Block { - stmts: [], - }, - }, - ], - } - "##); -} - -#[test] -fn test_comment() { - let content = "#!//am/i/a/comment\n[allow(dead_code)] fn main() {}"; - let file = syn::parse_file(content).unwrap(); - snapshot!(file, @r#" - File { - attrs: [ - Attribute { - style: AttrStyle::Inner, - meta: Meta::List { - path: Path { - segments: [ - PathSegment { - ident: "allow", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(`dead_code`), - }, - }, - ], - items: [ - Item::Fn { - vis: Visibility::Inherited, - sig: Signature { - ident: "main", - generics: Generics, - output: ReturnType::Default, - }, - block: Block { - stmts: [], - }, - }, - ], - } - "#); -} diff --git a/vendor/syn/tests/test_size.rs b/vendor/syn/tests/test_size.rs deleted file mode 100644 index 29fd43589d427a..00000000000000 --- a/vendor/syn/tests/test_size.rs +++ /dev/null @@ -1,54 +0,0 @@ -// Assumes proc-macro2's "span-locations" feature is off. - -use std::mem; -use syn::{Expr, Item, Lit, Pat, Type}; - -#[rustversion::attr(before(2022-11-24), ignore = "requires nightly-2022-11-24 or newer")] -#[rustversion::attr( - since(2022-11-24), - cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit") -)] -#[test] -fn test_expr_size() { - assert_eq!(mem::size_of::<Expr>(), 176); -} - -#[rustversion::attr(before(2022-09-09), ignore = "requires nightly-2022-09-09 or newer")] -#[rustversion::attr( - since(2022-09-09), - cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit") -)] -#[test] -fn test_item_size() { - assert_eq!(mem::size_of::<Item>(), 352); -} - -#[rustversion::attr(before(2023-04-29), ignore = "requires nightly-2023-04-29 or newer")] -#[rustversion::attr( - since(2023-04-29), - cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit") -)] -#[test] -fn test_type_size() { - assert_eq!(mem::size_of::<Type>(), 224); -} - -#[rustversion::attr(before(2023-04-29), ignore = "requires nightly-2023-04-29 or newer")] -#[rustversion::attr( - since(2023-04-29), - cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit") -)] -#[test] -fn test_pat_size() { - assert_eq!(mem::size_of::<Pat>(), 184); -} - -#[rustversion::attr(before(2023-12-20), ignore = "requires nightly-2023-12-20 or newer")] -#[rustversion::attr( - since(2023-12-20), - cfg_attr(not(target_pointer_width = "64"), ignore = "only applicable to 64-bit") -)] -#[test] -fn test_lit_size() { - assert_eq!(mem::size_of::<Lit>(), 24); -} diff --git a/vendor/syn/tests/test_stmt.rs b/vendor/syn/tests/test_stmt.rs deleted file mode 100644 index 101c1b1c906d2f..00000000000000 --- a/vendor/syn/tests/test_stmt.rs +++ /dev/null @@ -1,337 +0,0 @@ -#![allow( - clippy::assertions_on_result_states, - clippy::elidable_lifetime_names, - clippy::needless_lifetimes, - clippy::non_ascii_literal, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use proc_macro2::{Delimiter, Group, Ident, Span, TokenStream, TokenTree}; -use quote::{quote, ToTokens as _}; -use syn::parse::Parser as _; -use syn::{Block, Stmt}; - -#[test] -fn test_raw_operator() { - let stmt = syn::parse_str::<Stmt>("let _ = &raw const x;").unwrap(); - - snapshot!(stmt, @r#" - Stmt::Local { - pat: Pat::Wild, - init: Some(LocalInit { - expr: Expr::RawAddr { - mutability: PointerMutability::Const, - expr: Expr::Path { - path: Path { - segments: [ - PathSegment { - ident: "x", - }, - ], - }, - }, - }, - }), - } - "#); -} - -#[test] -fn test_raw_variable() { - let stmt = syn::parse_str::<Stmt>("let _ = &raw;").unwrap(); - - snapshot!(stmt, @r#" - Stmt::Local { - pat: Pat::Wild, - init: Some(LocalInit { - expr: Expr::Reference { - expr: Expr::Path { - path: Path { - segments: [ - PathSegment { - ident: "raw", - }, - ], - }, - }, - }, - }), - } - "#); -} - -#[test] -fn test_raw_invalid() { - assert!(syn::parse_str::<Stmt>("let _ = &raw x;").is_err()); -} - -#[test] -fn test_none_group() { - // «∅ async fn f() {} ∅» - let tokens = TokenStream::from_iter([TokenTree::Group(Group::new( - Delimiter::None, - TokenStream::from_iter([ - TokenTree::Ident(Ident::new("async", Span::call_site())), - TokenTree::Ident(Ident::new("fn", Span::call_site())), - TokenTree::Ident(Ident::new("f", Span::call_site())), - TokenTree::Group(Group::new(Delimiter::Parenthesis, TokenStream::new())), - TokenTree::Group(Group::new(Delimiter::Brace, TokenStream::new())), - ]), - ))]); - snapshot!(tokens as Stmt, @r#" - Stmt::Item(Item::Fn { - vis: Visibility::Inherited, - sig: Signature { - asyncness: Some, - ident: "f", - generics: Generics, - output: ReturnType::Default, - }, - block: Block { - stmts: [], - }, - }) - "#); - - let tokens = Group::new(Delimiter::None, quote!(let None = None)).to_token_stream(); - let stmts = Block::parse_within.parse2(tokens).unwrap(); - snapshot!(stmts, @r#" - [ - Stmt::Expr( - Expr::Group { - expr: Expr::Let { - pat: Pat::Ident { - ident: "None", - }, - expr: Expr::Path { - path: Path { - segments: [ - PathSegment { - ident: "None", - }, - ], - }, - }, - }, - }, - None, - ), - ] - "#); -} - -#[test] -fn test_let_dot_dot() { - let tokens = quote! { - let .. = 10; - }; - - snapshot!(tokens as Stmt, @r#" - Stmt::Local { - pat: Pat::Rest, - init: Some(LocalInit { - expr: Expr::Lit { - lit: 10, - }, - }), - } - "#); -} - -#[test] -fn test_let_else() { - let tokens = quote! { - let Some(x) = None else { return 0; }; - }; - - snapshot!(tokens as Stmt, @r#" - Stmt::Local { - pat: Pat::TupleStruct { - path: Path { - segments: [ - PathSegment { - ident: "Some", - }, - ], - }, - elems: [ - Pat::Ident { - ident: "x", - }, - ], - }, - init: Some(LocalInit { - expr: Expr::Path { - path: Path { - segments: [ - PathSegment { - ident: "None", - }, - ], - }, - }, - diverge: Some(Expr::Block { - block: Block { - stmts: [ - Stmt::Expr( - Expr::Return { - expr: Some(Expr::Lit { - lit: 0, - }), - }, - Some, - ), - ], - }, - }), - }), - } - "#); -} - -#[test] -fn test_macros() { - let tokens = quote! { - fn main() { - macro_rules! mac {} - thread_local! { static FOO } - println!(""); - vec![] - } - }; - - snapshot!(tokens as Stmt, @r#" - Stmt::Item(Item::Fn { - vis: Visibility::Inherited, - sig: Signature { - ident: "main", - generics: Generics, - output: ReturnType::Default, - }, - block: Block { - stmts: [ - Stmt::Item(Item::Macro { - ident: Some("mac"), - mac: Macro { - path: Path { - segments: [ - PathSegment { - ident: "macro_rules", - }, - ], - }, - delimiter: MacroDelimiter::Brace, - tokens: TokenStream(``), - }, - }), - Stmt::Macro { - mac: Macro { - path: Path { - segments: [ - PathSegment { - ident: "thread_local", - }, - ], - }, - delimiter: MacroDelimiter::Brace, - tokens: TokenStream(`static FOO`), - }, - }, - Stmt::Macro { - mac: Macro { - path: Path { - segments: [ - PathSegment { - ident: "println", - }, - ], - }, - delimiter: MacroDelimiter::Paren, - tokens: TokenStream(`""`), - }, - semi_token: Some, - }, - Stmt::Expr( - Expr::Macro { - mac: Macro { - path: Path { - segments: [ - PathSegment { - ident: "vec", - }, - ], - }, - delimiter: MacroDelimiter::Bracket, - tokens: TokenStream(``), - }, - }, - None, - ), - ], - }, - }) - "#); -} - -#[test] -fn test_early_parse_loop() { - // The following is an Expr::Loop followed by Expr::Tuple. It is not an - // Expr::Call. - let tokens = quote! { - loop {} - () - }; - - let stmts = Block::parse_within.parse2(tokens).unwrap(); - - snapshot!(stmts, @r#" - [ - Stmt::Expr( - Expr::Loop { - body: Block { - stmts: [], - }, - }, - None, - ), - Stmt::Expr( - Expr::Tuple, - None, - ), - ] - "#); - - let tokens = quote! { - 'a: loop {} - () - }; - - let stmts = Block::parse_within.parse2(tokens).unwrap(); - - snapshot!(stmts, @r#" - [ - Stmt::Expr( - Expr::Loop { - label: Some(Label { - name: Lifetime { - ident: "a", - }, - }), - body: Block { - stmts: [], - }, - }, - None, - ), - Stmt::Expr( - Expr::Tuple, - None, - ), - ] - "#); -} diff --git a/vendor/syn/tests/test_token_trees.rs b/vendor/syn/tests/test_token_trees.rs deleted file mode 100644 index 1b473858cd1b28..00000000000000 --- a/vendor/syn/tests/test_token_trees.rs +++ /dev/null @@ -1,38 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use proc_macro2::TokenStream; -use quote::quote; -use syn::Lit; - -#[test] -fn test_struct() { - let input = " - #[derive(Debug, Clone)] - pub struct Item { - pub ident: Ident, - pub attrs: Vec<Attribute>, - } - "; - - snapshot!(input as TokenStream, @r##" - TokenStream( - `# [derive (Debug , Clone)] pub struct Item { pub ident : Ident , pub attrs : Vec < Attribute >, }`, - ) - "##); -} - -#[test] -fn test_literal_mangling() { - let code = "0_4"; - let parsed: Lit = syn::parse_str(code).unwrap(); - assert_eq!(code, quote!(#parsed).to_string()); -} diff --git a/vendor/syn/tests/test_ty.rs b/vendor/syn/tests/test_ty.rs deleted file mode 100644 index 5f29220114781a..00000000000000 --- a/vendor/syn/tests/test_ty.rs +++ /dev/null @@ -1,471 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use proc_macro2::{Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream, TokenTree}; -use quote::{quote, ToTokens as _}; -use syn::punctuated::Punctuated; -use syn::{parse_quote, token, Token, Type, TypeTuple}; - -#[test] -fn test_mut_self() { - syn::parse_str::<Type>("fn(mut self)").unwrap(); - syn::parse_str::<Type>("fn(mut self,)").unwrap(); - syn::parse_str::<Type>("fn(mut self: ())").unwrap(); - syn::parse_str::<Type>("fn(mut self: ...)").unwrap_err(); - syn::parse_str::<Type>("fn(mut self: mut self)").unwrap_err(); - syn::parse_str::<Type>("fn(mut self::T)").unwrap_err(); -} - -#[test] -fn test_macro_variable_type() { - // mimics the token stream corresponding to `$ty<T>` - let tokens = TokenStream::from_iter([ - TokenTree::Group(Group::new(Delimiter::None, quote! { ty })), - TokenTree::Punct(Punct::new('<', Spacing::Alone)), - TokenTree::Ident(Ident::new("T", Span::call_site())), - TokenTree::Punct(Punct::new('>', Spacing::Alone)), - ]); - - snapshot!(tokens as Type, @r#" - Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "ty", - arguments: PathArguments::AngleBracketed { - args: [ - GenericArgument::Type(Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "T", - }, - ], - }, - }), - ], - }, - }, - ], - }, - } - "#); - - // mimics the token stream corresponding to `$ty::<T>` - let tokens = TokenStream::from_iter([ - TokenTree::Group(Group::new(Delimiter::None, quote! { ty })), - TokenTree::Punct(Punct::new(':', Spacing::Joint)), - TokenTree::Punct(Punct::new(':', Spacing::Alone)), - TokenTree::Punct(Punct::new('<', Spacing::Alone)), - TokenTree::Ident(Ident::new("T", Span::call_site())), - TokenTree::Punct(Punct::new('>', Spacing::Alone)), - ]); - - snapshot!(tokens as Type, @r#" - Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "ty", - arguments: PathArguments::AngleBracketed { - colon2_token: Some, - args: [ - GenericArgument::Type(Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "T", - }, - ], - }, - }), - ], - }, - }, - ], - }, - } - "#); -} - -#[test] -fn test_group_angle_brackets() { - // mimics the token stream corresponding to `Option<$ty>` - let tokens = TokenStream::from_iter([ - TokenTree::Ident(Ident::new("Option", Span::call_site())), - TokenTree::Punct(Punct::new('<', Spacing::Alone)), - TokenTree::Group(Group::new(Delimiter::None, quote! { Vec<u8> })), - TokenTree::Punct(Punct::new('>', Spacing::Alone)), - ]); - - snapshot!(tokens as Type, @r#" - Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Option", - arguments: PathArguments::AngleBracketed { - args: [ - GenericArgument::Type(Type::Group { - elem: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Vec", - arguments: PathArguments::AngleBracketed { - args: [ - GenericArgument::Type(Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "u8", - }, - ], - }, - }), - ], - }, - }, - ], - }, - }, - }), - ], - }, - }, - ], - }, - } - "#); -} - -#[test] -fn test_group_colons() { - // mimics the token stream corresponding to `$ty::Item` - let tokens = TokenStream::from_iter([ - TokenTree::Group(Group::new(Delimiter::None, quote! { Vec<u8> })), - TokenTree::Punct(Punct::new(':', Spacing::Joint)), - TokenTree::Punct(Punct::new(':', Spacing::Alone)), - TokenTree::Ident(Ident::new("Item", Span::call_site())), - ]); - - snapshot!(tokens as Type, @r#" - Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "Vec", - arguments: PathArguments::AngleBracketed { - args: [ - GenericArgument::Type(Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "u8", - }, - ], - }, - }), - ], - }, - }, - Token![::], - PathSegment { - ident: "Item", - }, - ], - }, - } - "#); - - let tokens = TokenStream::from_iter([ - TokenTree::Group(Group::new(Delimiter::None, quote! { [T] })), - TokenTree::Punct(Punct::new(':', Spacing::Joint)), - TokenTree::Punct(Punct::new(':', Spacing::Alone)), - TokenTree::Ident(Ident::new("Element", Span::call_site())), - ]); - - snapshot!(tokens as Type, @r#" - Type::Path { - qself: Some(QSelf { - ty: Type::Slice { - elem: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "T", - }, - ], - }, - }, - }, - position: 0, - }), - path: Path { - leading_colon: Some, - segments: [ - PathSegment { - ident: "Element", - }, - ], - }, - } - "#); -} - -#[test] -fn test_trait_object() { - let tokens = quote!(dyn for<'a> Trait<'a> + 'static); - snapshot!(tokens as Type, @r#" - Type::TraitObject { - dyn_token: Some, - bounds: [ - TypeParamBound::Trait(TraitBound { - lifetimes: Some(BoundLifetimes { - lifetimes: [ - GenericParam::Lifetime(LifetimeParam { - lifetime: Lifetime { - ident: "a", - }, - }), - ], - }), - path: Path { - segments: [ - PathSegment { - ident: "Trait", - arguments: PathArguments::AngleBracketed { - args: [ - GenericArgument::Lifetime(Lifetime { - ident: "a", - }), - ], - }, - }, - ], - }, - }), - Token![+], - TypeParamBound::Lifetime { - ident: "static", - }, - ], - } - "#); - - let tokens = quote!(dyn 'a + Trait); - snapshot!(tokens as Type, @r#" - Type::TraitObject { - dyn_token: Some, - bounds: [ - TypeParamBound::Lifetime { - ident: "a", - }, - Token![+], - TypeParamBound::Trait(TraitBound { - path: Path { - segments: [ - PathSegment { - ident: "Trait", - }, - ], - }, - }), - ], - } - "#); - - // None of the following are valid Rust types. - syn::parse_str::<Type>("for<'a> dyn Trait<'a>").unwrap_err(); - syn::parse_str::<Type>("dyn for<'a> 'a + Trait").unwrap_err(); -} - -#[test] -fn test_trailing_plus() { - #[rustfmt::skip] - let tokens = quote!(impl Trait +); - snapshot!(tokens as Type, @r#" - Type::ImplTrait { - bounds: [ - TypeParamBound::Trait(TraitBound { - path: Path { - segments: [ - PathSegment { - ident: "Trait", - }, - ], - }, - }), - Token![+], - ], - } - "#); - - #[rustfmt::skip] - let tokens = quote!(dyn Trait +); - snapshot!(tokens as Type, @r#" - Type::TraitObject { - dyn_token: Some, - bounds: [ - TypeParamBound::Trait(TraitBound { - path: Path { - segments: [ - PathSegment { - ident: "Trait", - }, - ], - }, - }), - Token![+], - ], - } - "#); - - #[rustfmt::skip] - let tokens = quote!(Trait +); - snapshot!(tokens as Type, @r#" - Type::TraitObject { - bounds: [ - TypeParamBound::Trait(TraitBound { - path: Path { - segments: [ - PathSegment { - ident: "Trait", - }, - ], - }, - }), - Token![+], - ], - } - "#); -} - -#[test] -fn test_tuple_comma() { - let mut expr = TypeTuple { - paren_token: token::Paren::default(), - elems: Punctuated::new(), - }; - snapshot!(expr.to_token_stream() as Type, @"Type::Tuple"); - - expr.elems.push_value(parse_quote!(_)); - // Must not parse to Type::Paren - snapshot!(expr.to_token_stream() as Type, @r#" - Type::Tuple { - elems: [ - Type::Infer, - Token![,], - ], - } - "#); - - expr.elems.push_punct(<Token![,]>::default()); - snapshot!(expr.to_token_stream() as Type, @r#" - Type::Tuple { - elems: [ - Type::Infer, - Token![,], - ], - } - "#); - - expr.elems.push_value(parse_quote!(_)); - snapshot!(expr.to_token_stream() as Type, @r#" - Type::Tuple { - elems: [ - Type::Infer, - Token![,], - Type::Infer, - ], - } - "#); - - expr.elems.push_punct(<Token![,]>::default()); - snapshot!(expr.to_token_stream() as Type, @r#" - Type::Tuple { - elems: [ - Type::Infer, - Token![,], - Type::Infer, - Token![,], - ], - } - "#); -} - -#[test] -fn test_impl_trait_use() { - let tokens = quote! { - impl Sized + use<'_, 'a, A, Test> - }; - - snapshot!(tokens as Type, @r#" - Type::ImplTrait { - bounds: [ - TypeParamBound::Trait(TraitBound { - path: Path { - segments: [ - PathSegment { - ident: "Sized", - }, - ], - }, - }), - Token![+], - TypeParamBound::PreciseCapture(PreciseCapture { - params: [ - CapturedParam::Lifetime(Lifetime { - ident: "_", - }), - Token![,], - CapturedParam::Lifetime(Lifetime { - ident: "a", - }), - Token![,], - CapturedParam::Ident("A"), - Token![,], - CapturedParam::Ident("Test"), - ], - }), - ], - } - "#); - - let trailing = quote! { - impl Sized + use<'_,> - }; - - snapshot!(trailing as Type, @r#" - Type::ImplTrait { - bounds: [ - TypeParamBound::Trait(TraitBound { - path: Path { - segments: [ - PathSegment { - ident: "Sized", - }, - ], - }, - }), - Token![+], - TypeParamBound::PreciseCapture(PreciseCapture { - params: [ - CapturedParam::Lifetime(Lifetime { - ident: "_", - }), - Token![,], - ], - }), - ], - } - "#); -} diff --git a/vendor/syn/tests/test_unparenthesize.rs b/vendor/syn/tests/test_unparenthesize.rs deleted file mode 100644 index 5fa2e59e14be63..00000000000000 --- a/vendor/syn/tests/test_unparenthesize.rs +++ /dev/null @@ -1,70 +0,0 @@ -#![cfg(not(miri))] -#![recursion_limit = "1024"] -#![feature(rustc_private)] -#![allow( - clippy::elidable_lifetime_names, - clippy::manual_assert, - clippy::match_like_matches_macro, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] - -use crate::common::visit::{AsIfPrinted, FlattenParens}; -use quote::ToTokens as _; -use std::fs; -use std::panic; -use std::path::Path; -use std::sync::atomic::{AtomicUsize, Ordering}; -use syn::visit_mut::VisitMut as _; - -#[macro_use] -mod macros; - -mod common; -mod repo; - -#[test] -fn test_unparenthesize() { - repo::rayon_init(); - repo::clone_rust(); - - let failed = AtomicUsize::new(0); - - repo::for_each_rust_file(|path| test(path, &failed)); - - let failed = failed.into_inner(); - if failed > 0 { - panic!("{} failures", failed); - } -} - -fn test(path: &Path, failed: &AtomicUsize) { - let content = fs::read_to_string(path).unwrap(); - - match panic::catch_unwind(|| -> syn::Result<()> { - let mut before = syn::parse_file(&content)?; - FlattenParens::discard_attrs().visit_file_mut(&mut before); - let printed = before.to_token_stream(); - let mut after = syn::parse2::<syn::File>(printed.clone())?; - FlattenParens::discard_attrs().visit_file_mut(&mut after); - // Normalize features that we expect Syn not to print. - AsIfPrinted.visit_file_mut(&mut before); - if before != after { - errorf!("=== {}\n", path.display()); - if failed.fetch_add(1, Ordering::Relaxed) == 0 { - errorf!("BEFORE:\n{:#?}\nAFTER:\n{:#?}\n", before, after); - } - } - Ok(()) - }) { - Err(_) => { - errorf!("=== {}: syn panic\n", path.display()); - failed.fetch_add(1, Ordering::Relaxed); - } - Ok(Err(msg)) => { - errorf!("=== {}: syn failed to parse\n{:?}\n", path.display(), msg); - failed.fetch_add(1, Ordering::Relaxed); - } - Ok(Ok(())) => {} - } -} diff --git a/vendor/syn/tests/test_visibility.rs b/vendor/syn/tests/test_visibility.rs deleted file mode 100644 index cf15574b510299..00000000000000 --- a/vendor/syn/tests/test_visibility.rs +++ /dev/null @@ -1,191 +0,0 @@ -#![allow( - clippy::elidable_lifetime_names, - clippy::needless_lifetimes, - clippy::uninlined_format_args -)] - -#[macro_use] -mod snapshot; - -mod debug; - -use proc_macro2::{Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream, TokenTree}; -use quote::quote; -use syn::parse::{Parse, ParseStream}; -use syn::{DeriveInput, Result, Visibility}; - -#[derive(Debug)] -struct VisRest { - vis: Visibility, - rest: TokenStream, -} - -impl Parse for VisRest { - fn parse(input: ParseStream) -> Result<Self> { - Ok(VisRest { - vis: input.parse()?, - rest: input.parse()?, - }) - } -} - -macro_rules! assert_vis_parse { - ($input:expr, Ok($p:pat)) => { - assert_vis_parse!($input, Ok($p) + ""); - }; - - ($input:expr, Ok($p:pat) + $rest:expr) => { - let expected = $rest.parse::<TokenStream>().unwrap(); - let parse: VisRest = syn::parse_str($input).unwrap(); - - match parse.vis { - $p => {} - _ => panic!("expected {}, got {:?}", stringify!($p), parse.vis), - } - - // NOTE: Round-trips through `to_string` to avoid potential whitespace - // diffs. - assert_eq!(parse.rest.to_string(), expected.to_string()); - }; - - ($input:expr, Err) => { - syn::parse2::<VisRest>($input.parse().unwrap()).unwrap_err(); - }; -} - -#[test] -fn test_pub() { - assert_vis_parse!("pub", Ok(Visibility::Public(_))); -} - -#[test] -fn test_inherited() { - assert_vis_parse!("", Ok(Visibility::Inherited)); -} - -#[test] -fn test_in() { - assert_vis_parse!("pub(in foo::bar)", Ok(Visibility::Restricted(_))); -} - -#[test] -fn test_pub_crate() { - assert_vis_parse!("pub(crate)", Ok(Visibility::Restricted(_))); -} - -#[test] -fn test_pub_self() { - assert_vis_parse!("pub(self)", Ok(Visibility::Restricted(_))); -} - -#[test] -fn test_pub_super() { - assert_vis_parse!("pub(super)", Ok(Visibility::Restricted(_))); -} - -#[test] -fn test_missing_in() { - assert_vis_parse!("pub(foo::bar)", Ok(Visibility::Public(_)) + "(foo::bar)"); -} - -#[test] -fn test_missing_in_path() { - assert_vis_parse!("pub(in)", Err); -} - -#[test] -fn test_crate_path() { - assert_vis_parse!( - "pub(crate::A, crate::B)", - Ok(Visibility::Public(_)) + "(crate::A, crate::B)" - ); -} - -#[test] -fn test_junk_after_in() { - assert_vis_parse!("pub(in some::path @@garbage)", Err); -} - -#[test] -fn test_inherited_vis_named_field() { - // mimics `struct S { $vis $field: () }` where $vis is empty - let tokens = TokenStream::from_iter([ - TokenTree::Ident(Ident::new("struct", Span::call_site())), - TokenTree::Ident(Ident::new("S", Span::call_site())), - TokenTree::Group(Group::new( - Delimiter::Brace, - TokenStream::from_iter([ - TokenTree::Group(Group::new(Delimiter::None, TokenStream::new())), - TokenTree::Group(Group::new(Delimiter::None, quote!(f))), - TokenTree::Punct(Punct::new(':', Spacing::Alone)), - TokenTree::Group(Group::new(Delimiter::Parenthesis, TokenStream::new())), - ]), - )), - ]); - - snapshot!(tokens as DeriveInput, @r#" - DeriveInput { - vis: Visibility::Inherited, - ident: "S", - generics: Generics, - data: Data::Struct { - fields: Fields::Named { - named: [ - Field { - vis: Visibility::Inherited, - ident: Some("f"), - colon_token: Some, - ty: Type::Tuple, - }, - ], - }, - }, - } - "#); -} - -#[test] -fn test_inherited_vis_unnamed_field() { - // mimics `struct S($vis $ty);` where $vis is empty - let tokens = TokenStream::from_iter([ - TokenTree::Ident(Ident::new("struct", Span::call_site())), - TokenTree::Ident(Ident::new("S", Span::call_site())), - TokenTree::Group(Group::new( - Delimiter::Parenthesis, - TokenStream::from_iter([ - TokenTree::Group(Group::new(Delimiter::None, TokenStream::new())), - TokenTree::Group(Group::new(Delimiter::None, quote!(str))), - ]), - )), - TokenTree::Punct(Punct::new(';', Spacing::Alone)), - ]); - - snapshot!(tokens as DeriveInput, @r#" - DeriveInput { - vis: Visibility::Inherited, - ident: "S", - generics: Generics, - data: Data::Struct { - fields: Fields::Unnamed { - unnamed: [ - Field { - vis: Visibility::Inherited, - ty: Type::Group { - elem: Type::Path { - path: Path { - segments: [ - PathSegment { - ident: "str", - }, - ], - }, - }, - }, - }, - ], - }, - semi_token: Some, - }, - } - "#); -} diff --git a/vendor/syn/tests/zzz_stable.rs b/vendor/syn/tests/zzz_stable.rs deleted file mode 100644 index a1a670d9edeea1..00000000000000 --- a/vendor/syn/tests/zzz_stable.rs +++ /dev/null @@ -1,33 +0,0 @@ -#![cfg(syn_disable_nightly_tests)] - -use std::io::{self, Write}; -use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}; - -const MSG: &str = "\ -‖ -‖ WARNING: -‖ This is not a nightly compiler so not all tests were able to -‖ run. Syn includes tests that compare Syn's parser against the -‖ compiler's parser, which requires access to unstable librustc -‖ data structures and a nightly compiler. -‖ -"; - -#[test] -fn notice() -> io::Result<()> { - let header = "WARNING"; - let index_of_header = MSG.find(header).unwrap(); - let before = &MSG[..index_of_header]; - let after = &MSG[index_of_header + header.len()..]; - - let mut stderr = StandardStream::stderr(ColorChoice::Auto); - stderr.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))?; - write!(&mut stderr, "{}", before)?; - stderr.set_color(ColorSpec::new().set_bold(true).set_fg(Some(Color::Yellow)))?; - write!(&mut stderr, "{}", header)?; - stderr.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))?; - write!(&mut stderr, "{}", after)?; - stderr.reset()?; - - Ok(()) -} diff --git a/vendor/unicode-ident/.cargo-checksum.json b/vendor/unicode-ident/.cargo-checksum.json deleted file mode 100644 index 1eb9ea6340947c..00000000000000 --- a/vendor/unicode-ident/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"776471b17558966a9555dd89a2b08f260fe63b13f6da824632175db9f6522ae8",".github/FUNDING.yml":"b017158736b3c9751a2d21edfce7fe61c8954e2fced8da8dd3013c2f3e295bd9",".github/workflows/ci.yml":"173314c15e4d92ca66030916da40438384de3125400464d2e1957826bf5c01a6","Cargo.lock":"9e475b8f7b444cf44cfe12774442b59716d5c16ea03914340ea3b43500d80aa0","Cargo.toml":"ed14e5fd17333842d830900782e2b4e38aaf3967f540b7635a45c09451fffa01","Cargo.toml.orig":"88664595aa541de6ba77d30d7d71b7f4a533c15c2bb9da5d4ab9bc5e46ef0d56","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","LICENSE-UNICODE":"f7db81051789b729fea528a63ec4c938fdcb93d9d61d97dc8cc2e9df6d47f2a1","README.md":"2fd3a0c6c9abd2c042fb319221f9ef2c95728a6e3725b0f067e4d315c5dbfe5a","benches/xid.rs":"7eb058c1140a253f7177af8868a95aabb1b92c52dc7eee5abbfeadb507d7845d","src/lib.rs":"3254e755eaf22e9e7f347439df7e3fc882e582a874cd5832e7f49478dd973799","src/tables.rs":"96d345fb3df2dc6718a6fe15cac1fcd21b7f006d5fb531dac7a5d0a0711e16bb","tests/compare.rs":"f2311271aa1db7380e5bf153ef83ee99777e14579e4f28c2b1a3e21877ffe715","tests/fst/.gitignore":"2cd419079c0a08bb15766520880998651dd1c72c55347a31f43357595b16ac10","tests/fst/mod.rs":"69a3aaf59acd8bca962ecc6234be56be8c0934ab79b253162f10eb881523901f","tests/fst/xid_continue.fst":"b58be4f0c498253e7a5ac664046096f15f249da66131347d4b822097623549a2","tests/fst/xid_start.fst":"aec7eecdacfce308d2e6210f47d28ed3aad5c8b048efbfbc11fc22e60fbf435b","tests/roaring/mod.rs":"f5c6d55463a7f53e92a493cf046d717149250fbafc0e0fe94bdb531377bf8b11","tests/static_size.rs":"52763dc203f211561d2ef150e8f970650459820bdc20ae18c6c046876b99cd2a","tests/tables/mod.rs":"e6949172d10fc4b2431ce7546269bfd4f9146454c8c3e31faf5e5d80c16a8ab6","tests/tables/tables.rs":"302d87306100b6280f8db93e167dc70c47f724045cf1312b7354683656c3f36b","tests/trie/mod.rs":"d4acbb716bcbaf80660039797f45e138ed8bbd66749fa3b19b1a971574679cc9","tests/trie/trie.rs":"f7b9edc1e8a98e3be42b653bba27bb4eb5fc48a559d6d8d1c6a4db4b5425b0d5"},"package":"9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"} \ No newline at end of file diff --git a/vendor/unicode-ident/.cargo_vcs_info.json b/vendor/unicode-ident/.cargo_vcs_info.json deleted file mode 100644 index b80f22af9dd1ad..00000000000000 --- a/vendor/unicode-ident/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "10d5e534c9e06fffcdc6896d4779ffb25641659b" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/unicode-ident/.github/FUNDING.yml b/vendor/unicode-ident/.github/FUNDING.yml deleted file mode 100644 index 750707701cdae9..00000000000000 --- a/vendor/unicode-ident/.github/FUNDING.yml +++ /dev/null @@ -1 +0,0 @@ -github: dtolnay diff --git a/vendor/unicode-ident/.github/workflows/ci.yml b/vendor/unicode-ident/.github/workflows/ci.yml deleted file mode 100644 index dc92f2ebafa3df..00000000000000 --- a/vendor/unicode-ident/.github/workflows/ci.yml +++ /dev/null @@ -1,110 +0,0 @@ -name: CI - -on: - push: - pull_request: - workflow_dispatch: - schedule: [cron: "40 1 * * *"] - -permissions: - contents: read - -env: - RUSTFLAGS: -Dwarnings - -jobs: - pre_ci: - uses: dtolnay/.github/.github/workflows/pre_ci.yml@master - - unicode: - name: latest Unicode - runs-on: ubuntu-latest - if: github.event_name != 'pull_request' - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@stable - - id: ucd-generate - run: echo "version=$(grep 'ucd-generate [0-9]\+\.[0-9]\+\.[0-9]\+' tests/tables/tables.rs --only-matching)" >> $GITHUB_OUTPUT - - run: cargo install ucd-generate - - run: curl https://www.unicode.org/Public/latest/ucd/UCD.zip --location --remote-name --silent --show-error --fail --retry 2 - - run: unzip UCD.zip -d UCD - - run: ucd-generate property-bool UCD --include XID_Start,XID_Continue > tests/tables/tables.rs - - run: ucd-generate property-bool UCD --include XID_Start,XID_Continue --fst-dir tests/fst - - run: ucd-generate property-bool UCD --include XID_Start,XID_Continue --trie-set > tests/trie/trie.rs - - run: cargo run --manifest-path generate/Cargo.toml - - run: sed --in-place 's/ucd-generate [0-9]\+\.[0-9]\+\.[0-9]\+/${{steps.ucd-generate.outputs.version}}/' tests/tables/tables.rs tests/trie/trie.rs - - run: git diff --exit-code - - test: - name: Rust ${{matrix.rust}} - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - rust: [nightly, beta, stable, 1.81.0] - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@master - with: - toolchain: ${{matrix.rust}} - - name: Enable type layout randomization - run: echo RUSTFLAGS=${RUSTFLAGS}\ -Zrandomize-layout >> $GITHUB_ENV - if: matrix.rust == 'nightly' - - run: cargo test - - run: cargo check --benches - - uses: actions/upload-artifact@v4 - if: matrix.rust == 'nightly' && always() - with: - name: Cargo.lock - path: Cargo.lock - continue-on-error: true - - msrv: - name: Rust 1.31.0 - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@1.31.0 - - run: cargo check --manifest-path tests/crate/Cargo.toml - - doc: - name: Documentation - needs: pre_ci - if: needs.pre_ci.outputs.continue - runs-on: ubuntu-latest - timeout-minutes: 45 - env: - RUSTDOCFLAGS: -Dwarnings - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@nightly - - uses: dtolnay/install@cargo-docs-rs - - run: cargo docs-rs - - clippy: - name: Clippy - runs-on: ubuntu-latest - if: github.event_name != 'pull_request' - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@clippy - - run: cargo clippy --tests --benches --workspace -- -Dclippy::all -Dclippy::pedantic - - outdated: - name: Outdated - runs-on: ubuntu-latest - if: github.event_name != 'pull_request' - timeout-minutes: 45 - steps: - - uses: actions/checkout@v5 - - uses: dtolnay/rust-toolchain@stable - - uses: dtolnay/install@cargo-outdated - - run: cargo outdated --workspace --exit-code 1 diff --git a/vendor/unicode-ident/Cargo.lock b/vendor/unicode-ident/Cargo.lock deleted file mode 100644 index e0bdfda23dd9e2..00000000000000 --- a/vendor/unicode-ident/Cargo.lock +++ /dev/null @@ -1,499 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -[[package]] -name = "aho-corasick" -version = "1.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "memchr 2.7.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "anes" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "anstyle" -version = "1.0.13" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "autocfg" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "bytemuck" -version = "1.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "cast" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "cfg-if" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "ciborium" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ciborium-io 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ciborium-ll 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ciborium-io" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "ciborium-ll" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ciborium-io 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "half 2.7.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "clap" -version = "4.5.51" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "clap_builder 4.5.51 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "clap_builder" -version = "4.5.51" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "anstyle 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", - "clap_lex 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "clap_lex" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "criterion" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "anes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "cast 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ciborium 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "clap 4.5.51 (registry+https://github.com/rust-lang/crates.io-index)", - "criterion-plot 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", - "oorandom 11.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.12.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.145 (registry+https://github.com/rust-lang/crates.io-index)", - "tinytemplate 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "walkdir 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "criterion-plot" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cast 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crunchy" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "either" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "fst" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "getrandom" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.177 (registry+https://github.com/rust-lang/crates.io-index)", - "r-efi 5.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "wasip2 1.0.1+wasi-0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "half" -version = "2.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "crunchy 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", - "zerocopy 0.8.27 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "itertools" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "either 1.15.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "itoa" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "libc" -version = "0.2.177" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "memchr" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "num-traits" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "oorandom" -version = "11.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "ppv-lite86" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "zerocopy 0.8.27 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "proc-macro2" -version = "1.0.103" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-ident 1.0.20 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "quote" -version = "1.0.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "r-efi" -version = "5.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "rand" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_chacha 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_chacha" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ppv-lite86 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_core" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "getrandom 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "regex" -version = "1.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "aho-corasick 1.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.7.6 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-automata 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.8.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "regex-automata" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "aho-corasick 1.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.7.6 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.8.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "regex-syntax" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "roaring" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytemuck 1.24.0 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ryu" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi-util 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde_core 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_core" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde_derive 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_derive" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 2.0.108 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_json" -version = "1.0.145" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "itoa 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.7.6 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 1.0.20 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_core 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "syn" -version = "2.0.108" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-ident 1.0.20 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "tinytemplate" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.145 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ucd-trie" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "unicode-ident" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "unicode-ident" -version = "1.0.22" -dependencies = [ - "criterion 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "fst 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", - "roaring 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ucd-trie 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "unicode-xid" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "walkdir" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-util 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasip2" -version = "1.0.1+wasi-0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "wit-bindgen 0.46.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "winapi-util" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "windows-sys 0.61.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "windows-link" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "windows-sys" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "windows-link 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wit-bindgen" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "zerocopy" -version = "0.8.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "zerocopy-derive 0.8.27 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 2.0.108 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[metadata] -"checksum aho-corasick 1.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" -"checksum anes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" -"checksum anstyle 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)" = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" -"checksum autocfg 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" -"checksum bytemuck 1.24.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" -"checksum byteorder 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" -"checksum cast 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" -"checksum cfg-if 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" -"checksum ciborium 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" -"checksum ciborium-io 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" -"checksum ciborium-ll 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" -"checksum clap 4.5.51 (registry+https://github.com/rust-lang/crates.io-index)" = "4c26d721170e0295f191a69bd9a1f93efcdb0aff38684b61ab5750468972e5f5" -"checksum clap_builder 4.5.51 (registry+https://github.com/rust-lang/crates.io-index)" = "75835f0c7bf681bfd05abe44e965760fea999a5286c6eb2d59883634fd02011a" -"checksum clap_lex 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" -"checksum criterion 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e1c047a62b0cc3e145fa84415a3191f628e980b194c2755aa12300a4e6cbd928" -"checksum criterion-plot 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9b1bcc0dc7dfae599d84ad0b1a55f80cde8af3725da8313b528da95ef783e338" -"checksum crunchy 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" -"checksum either 1.15.0 (registry+https://github.com/rust-lang/crates.io-index)" = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" -"checksum fst 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)" = "7ab85b9b05e3978cc9a9cf8fea7f01b494e1a09ed3037e16ba39edc7a29eb61a" -"checksum getrandom 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" -"checksum half 2.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" -"checksum itertools 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" -"checksum itoa 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)" = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" -"checksum libc 0.2.177 (registry+https://github.com/rust-lang/crates.io-index)" = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" -"checksum memchr 2.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" -"checksum num-traits 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)" = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" -"checksum oorandom 11.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" -"checksum ppv-lite86 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)" = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" -"checksum proc-macro2 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" -"checksum quote 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)" = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" -"checksum r-efi 5.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" -"checksum rand 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" -"checksum rand_chacha 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" -"checksum rand_core 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" -"checksum regex 1.12.2 (registry+https://github.com/rust-lang/crates.io-index)" = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" -"checksum regex-automata 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" -"checksum regex-syntax 0.8.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" -"checksum roaring 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f08d6a905edb32d74a5d5737a0c9d7e950c312f3c46cb0ca0a2ca09ea11878a0" -"checksum ryu 1.0.20 (registry+https://github.com/rust-lang/crates.io-index)" = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" -"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -"checksum serde 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)" = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" -"checksum serde_core 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)" = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" -"checksum serde_derive 1.0.228 (registry+https://github.com/rust-lang/crates.io-index)" = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" -"checksum serde_json 1.0.145 (registry+https://github.com/rust-lang/crates.io-index)" = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" -"checksum syn 2.0.108 (registry+https://github.com/rust-lang/crates.io-index)" = "da58917d35242480a05c2897064da0a80589a2a0476c9a3f2fdc83b53502e917" -"checksum tinytemplate 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" -"checksum ucd-trie 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" -"checksum unicode-ident 1.0.20 (registry+https://github.com/rust-lang/crates.io-index)" = "462eeb75aeb73aea900253ce739c8e18a67423fadf006037cd3ff27e82748a06" -"checksum unicode-xid 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" -"checksum walkdir 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" -"checksum wasip2 1.0.1+wasi-0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" -"checksum winapi-util 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" -"checksum windows-link 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" -"checksum windows-sys 0.61.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" -"checksum wit-bindgen 0.46.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" -"checksum zerocopy 0.8.27 (registry+https://github.com/rust-lang/crates.io-index)" = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" -"checksum zerocopy-derive 0.8.27 (registry+https://github.com/rust-lang/crates.io-index)" = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" diff --git a/vendor/unicode-ident/Cargo.toml b/vendor/unicode-ident/Cargo.toml deleted file mode 100644 index 3bdaced0399275..00000000000000 --- a/vendor/unicode-ident/Cargo.toml +++ /dev/null @@ -1,84 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.31" -name = "unicode-ident" -version = "1.0.22" -authors = ["David Tolnay <dtolnay@gmail.com>"] -build = false -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = "Determine whether characters have the XID_Start or XID_Continue properties according to Unicode Standard Annex #31" -documentation = "https://docs.rs/unicode-ident" -readme = "README.md" -keywords = [ - "unicode", - "xid", -] -categories = [ - "development-tools::procedural-macro-helpers", - "no-std", - "no-std::no-alloc", -] -license = "(MIT OR Apache-2.0) AND Unicode-3.0" -repository = "https://github.com/dtolnay/unicode-ident" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] -rustdoc-args = [ - "--generate-link-to-definition", - "--generate-macro-expansion", - "--extern-html-root-url=core=https://doc.rust-lang.org", - "--extern-html-root-url=alloc=https://doc.rust-lang.org", - "--extern-html-root-url=std=https://doc.rust-lang.org", -] - -[lib] -name = "unicode_ident" -path = "src/lib.rs" - -[[test]] -name = "compare" -path = "tests/compare.rs" - -[[test]] -name = "static_size" -path = "tests/static_size.rs" - -[[bench]] -name = "xid" -path = "benches/xid.rs" -harness = false - -[dev-dependencies.criterion] -version = "0.7" -default-features = false - -[dev-dependencies.fst] -version = "0.4" - -[dev-dependencies.rand] -version = "0.9" - -[dev-dependencies.roaring] -version = "0.11" - -[dev-dependencies.ucd-trie] -version = "0.1" -default-features = false - -[dev-dependencies.unicode-xid] -version = "0.2.6" diff --git a/vendor/unicode-ident/LICENSE-APACHE b/vendor/unicode-ident/LICENSE-APACHE deleted file mode 100644 index 1b5ec8b78e237b..00000000000000 --- a/vendor/unicode-ident/LICENSE-APACHE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS diff --git a/vendor/unicode-ident/LICENSE-MIT b/vendor/unicode-ident/LICENSE-MIT deleted file mode 100644 index 31aa79387f27e7..00000000000000 --- a/vendor/unicode-ident/LICENSE-MIT +++ /dev/null @@ -1,23 +0,0 @@ -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/unicode-ident/LICENSE-UNICODE b/vendor/unicode-ident/LICENSE-UNICODE deleted file mode 100644 index 11f2842a303a79..00000000000000 --- a/vendor/unicode-ident/LICENSE-UNICODE +++ /dev/null @@ -1,39 +0,0 @@ -UNICODE LICENSE V3 - -COPYRIGHT AND PERMISSION NOTICE - -Copyright © 1991-2023 Unicode, Inc. - -NOTICE TO USER: Carefully read the following legal agreement. BY -DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING DATA FILES, AND/OR -SOFTWARE, YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE -TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE, DO NOT -DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE THE DATA FILES OR SOFTWARE. - -Permission is hereby granted, free of charge, to any person obtaining a -copy of data files and any associated documentation (the "Data Files") or -software and any associated documentation (the "Software") to deal in the -Data Files or Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, and/or sell -copies of the Data Files or Software, and to permit persons to whom the -Data Files or Software are furnished to do so, provided that either (a) -this copyright and permission notice appear with all copies of the Data -Files or Software, or (b) this copyright and permission notice appear in -associated Documentation. - -THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY -KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF -THIRD PARTY RIGHTS. - -IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE -BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, -OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, -ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THE DATA -FILES OR SOFTWARE. - -Except as contained in this notice, the name of a copyright holder shall -not be used in advertising or otherwise to promote the sale, use or other -dealings in these Data Files or Software without prior written -authorization of the copyright holder. diff --git a/vendor/unicode-ident/README.md b/vendor/unicode-ident/README.md deleted file mode 100644 index 2e4668ef62528c..00000000000000 --- a/vendor/unicode-ident/README.md +++ /dev/null @@ -1,274 +0,0 @@ -Unicode ident -============= - -[<img alt="github" src="https://img.shields.io/badge/github-dtolnay/unicode--ident-8da0cb?style=for-the-badge&labelColor=555555&logo=github" height="20">](https://github.com/dtolnay/unicode-ident) -[<img alt="crates.io" src="https://img.shields.io/crates/v/unicode-ident.svg?style=for-the-badge&color=fc8d62&logo=rust" height="20">](https://crates.io/crates/unicode-ident) -[<img alt="docs.rs" src="https://img.shields.io/badge/docs.rs-unicode--ident-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs" height="20">](https://docs.rs/unicode-ident) -[<img alt="build status" src="https://img.shields.io/github/actions/workflow/status/dtolnay/unicode-ident/ci.yml?branch=master&style=for-the-badge" height="20">](https://github.com/dtolnay/unicode-ident/actions?query=branch%3Amaster) - -Implementation of [Unicode Standard Annex #31][tr31] for determining which -`char` values are valid in programming language identifiers. - -[tr31]: https://www.unicode.org/reports/tr31/ - -This crate is a better optimized implementation of the older `unicode-xid` -crate. This crate uses less static storage, and is able to classify both ASCII -and non-ASCII codepoints with better performance, 6× faster than -`unicode-xid`. - -<br> - -## Comparison of performance - -The following table shows a comparison between five Unicode identifier -implementations. - -- `unicode-ident` is this crate; -- [`unicode-xid`] is a widely used crate run by the "unicode-rs" org; -- `ucd-trie` and `fst` are two data structures supported by the [`ucd-generate`] tool; -- [`roaring`] is a Rust implementation of Roaring bitmap. - -The *static storage* column shows the total size of `static` tables that the -crate bakes into your binary, measured in 1000s of bytes. - -The remaining columns show the **cost per call** to evaluate whether a single -`char` has the XID\_Start or XID\_Continue Unicode property, comparing across -different ratios of ASCII to non-ASCII codepoints in the input data. - -[`unicode-xid`]: https://github.com/unicode-rs/unicode-xid -[`ucd-generate`]: https://github.com/BurntSushi/ucd-generate -[`roaring`]: https://github.com/RoaringBitmap/roaring-rs - -| | static storage | 0% nonascii | 1% | 10% | 100% nonascii | -|---|---|---|---|---|---| -| **`unicode-ident`** | 10.3 K | 0.41 ns | 0.44 ns | 0.44 ns | 0.93 ns | -| **`unicode-xid`** | 12.0 K | 2.43 ns | 2.50 ns | 2.85 ns | 8.65 ns | -| **`ucd-trie`** | 10.4 K | 1.28 ns | 1.25 ns | 1.20 ns | 1.97 ns | -| **`fst`** | 144 K | 50.9 ns | 51.0 ns | 48.5 ns | 26.7 ns | -| **`roaring`** | 66.1 K | 4.28 ns | 4.22 ns | 4.25 ns | 4.61 ns | - -Source code for the benchmark is provided in the *bench* directory of this repo -and may be repeated by running `cargo criterion`. - -<br> - -## Comparison of data structures - -#### unicode-xid - -They use a sorted array of character ranges, and do a binary search to look up -whether a given character lands inside one of those ranges. - -```rust -static XID_Continue_table: [(char, char); 763] = [ - ('\u{30}', '\u{39}'), // 0-9 - ('\u{41}', '\u{5a}'), // A-Z - … - ('\u{e0100}', '\u{e01ef}'), -]; -``` - -The static storage used by this data structure scales with the number of -contiguous ranges of identifier codepoints in Unicode. Every table entry -consumes 8 bytes, because it consists of a pair of 32-bit `char` values. - -In some ranges of the Unicode codepoint space, this is quite a sparse -representation – there are some ranges where tens of thousands of adjacent -codepoints are all valid identifier characters. In other places, the -representation is quite inefficient. A characater like `µ` (U+00B5) which is -surrounded by non-identifier codepoints consumes 64 bits in the table, while it -would be just 1 bit in a dense bitmap. - -On a system with 64-byte cache lines, binary searching the table touches 7 cache -lines on average. Each cache line fits only 8 table entries. Additionally, the -branching performed during the binary search is probably mostly unpredictable to -the branch predictor. - -Overall, the crate ends up being about 6× slower on non-ASCII input -compared to the fastest crate. - -A potential improvement would be to pack the table entries more compactly. -Rust's `char` type is a 21-bit integer padded to 32 bits, which means every -table entry is holding 22 bits of wasted space, adding up to 3.9 K. They could -instead fit every table entry into 6 bytes, leaving out some of the padding, for -a 25% improvement in space used. With some cleverness it may be possible to fit -in 5 bytes or even 4 bytes by storing a low char and an extent, instead of low -char and high char. I don't expect that performance would improve much but this -could be the most efficient for space across all the libraries, needing only -about 7 K to store. - -#### ucd-trie - -Their data structure is a compressed trie set specifically tailored for Unicode -codepoints. The design is credited to Raph Levien in [rust-lang/rust#33098]. - -[rust-lang/rust#33098]: https://github.com/rust-lang/rust/pull/33098 - -```rust -pub struct TrieSet { - tree1_level1: &'static [u64; 32], - tree2_level1: &'static [u8; 992], - tree2_level2: &'static [u64], - tree3_level1: &'static [u8; 256], - tree3_level2: &'static [u8], - tree3_level3: &'static [u64], -} -``` - -It represents codepoint sets using a trie to achieve prefix compression. The -final states of the trie are embedded in leaves or "chunks", where each chunk is -a 64-bit integer. Each bit position of the integer corresponds to whether a -particular codepoint is in the set or not. These chunks are not just a compact -representation of the final states of the trie, but are also a form of suffix -compression. In particular, if multiple ranges of 64 contiguous codepoints have -the same Unicode properties, then they all map to the same chunk in the final -level of the trie. - -Being tailored for Unicode codepoints, this trie is partitioned into three -disjoint sets: tree1, tree2, tree3. The first set corresponds to codepoints \[0, -0x800), the second \[0x800, 0x10000) and the third \[0x10000, 0x110000). These -partitions conveniently correspond to the space of 1 or 2 byte UTF-8 encoded -codepoints, 3 byte UTF-8 encoded codepoints and 4 byte UTF-8 encoded codepoints, -respectively. - -Lookups in this data structure are significantly more efficient than binary -search. A lookup touches either 1, 2, or 3 cache lines based on which of the -trie partitions is being accessed. - -One possible performance improvement would be for this crate to expose a way to -query based on a UTF-8 encoded string, returning the Unicode property -corresponding to the first character in the string. Without such an API, the -caller is required to tokenize their UTF-8 encoded input data into `char`, hand -the `char` into `ucd-trie`, only for `ucd-trie` to undo that work by converting -back into the variable-length representation for trie traversal. - -#### fst - -Uses a [finite state transducer][fst]. This representation is built into -[ucd-generate] but I am not aware of any advantage over the `ucd-trie` -representation. In particular `ucd-trie` is optimized for storing Unicode -properties while `fst` is not. - -[fst]: https://github.com/BurntSushi/fst -[ucd-generate]: https://github.com/BurntSushi/ucd-generate - -As far as I can tell, the main thing that causes `fst` to have large size and -slow lookups for this use case relative to `ucd-trie` is that it does not -specialize for the fact that only 21 of the 32 bits in a `char` are meaningful. -There are some dense arrays in the structure with large ranges that could never -possibly be used. - -#### roaring - -This crate is a pure-Rust implementation of [Roaring Bitmap], a data structure -designed for storing sets of 32-bit unsigned integers. - -[Roaring Bitmap]: https://roaringbitmap.org/about/ - -Roaring bitmaps are compressed bitmaps which tend to outperform conventional -compressed bitmaps such as WAH, EWAH or Concise. In some instances, they can be -hundreds of times faster and they often offer significantly better compression. - -In this use case the performance was reasonably competitive but still -substantially slower than the Unicode-optimized crates. Meanwhile the -compression was significantly worse, requiring 6× as much storage for the -data structure. - -I also benchmarked the [`croaring`] crate which is an FFI wrapper around the C -reference implementation of Roaring Bitmap. This crate was consistently about -15% slower than pure-Rust `roaring`, which could just be FFI overhead. I did not -investigate further. - -[`croaring`]: https://crates.io/crates/croaring - -#### unicode-ident - -This crate is most similar to the `ucd-trie` library, in that it's based on -bitmaps stored in the leafs of a trie representation, achieving both prefix -compression and suffix compression. - -The key differences are: - -- Uses a single 2-level trie, rather than 3 disjoint partitions of different - depth each. -- Uses significantly larger chunks: 512 bits rather than 64 bits. -- Compresses the XID\_Start and XID\_Continue properties together - simultaneously, rather than duplicating identical trie leaf chunks across the - two. - -The following diagram show the XID\_Start and XID\_Continue Unicode boolean -properties in uncompressed form, in row-major order: - -<table> -<tr><th>XID_Start</th><th>XID_Continue</th></tr> -<tr> -<td><img alt="XID_Start bitmap" width="256" src="https://user-images.githubusercontent.com/1940490/168647353-c6eeb922-afec-49b2-9ef5-c03e9d1e0760.png"></td> -<td><img alt="XID_Continue bitmap" width="256" src="https://user-images.githubusercontent.com/1940490/168647367-f447cca7-2362-4d7d-8cd7-d21c011d329b.png"></td> -</tr> -</table> - -Uncompressed, these would take 140 K to store, which is beyond what would be -reasonable. However, as you can see there is a large degree of similarity -between the two bitmaps and across the rows, which lends well to compression. - -This crate stores one 512-bit "row" of the above bitmaps in the leaf level of a -trie, and a single additional level to index into the leafs. It turns out there -are 124 unique 512-bit chunks across the two bitmaps so 7 bits are sufficient to -index them. - -The chunk size of 512 bits is selected as the size that minimizes the total size -of the data structure. A smaller chunk, like 256 or 128 bits, would achieve -better deduplication but require a larger index. A larger chunk would increase -redundancy in the leaf bitmaps. 512 bit chunks are the optimum for total size of -the index plus leaf bitmaps. - -In fact since there are only 124 unique chunks, we can use an 8-bit index with a -spare bit to index at the half-chunk level. This achieves an additional 8.5% -compression by eliminating redundancies between the second half of any chunk and -the first half of any other chunk. Note that this is not the same as using -chunks which are half the size, because it does not necessitate raising the size -of the trie's first level. - -In contrast to binary search or the `ucd-trie` crate, performing lookups in this -data structure is straight-line code with no need for branching. - -```asm -is_xid_start: - mov eax, edi - mov ecx, offset unicode_ident::ZERO - shr eax, 9 - cmp edi, 210432 - lea rax, [rax + unicode_ident::tables::TRIE_START] - cmovb rcx, rax - movzx eax, byte ptr [rcx] - mov ecx, 1539 - bextr ecx, edi, ecx - and edi, 7 - shl eax, 5 - movzx eax, byte ptr [rax + rcx + unicode_ident::tables::LEAF] - bt eax, edi - setb al - ret -``` - -<br> - -## License - -Use of the Unicode Character Database, as this crate does, is governed by the <a -href="LICENSE-UNICODE">Unicode license</a>. - -All intellectual property within this crate that is **not generated** using the -Unicode Character Database as input is licensed under either of <a -href="LICENSE-APACHE">Apache License, Version 2.0</a> or <a -href="LICENSE-MIT">MIT license</a> at your option. - -The **generated** files incorporate tabular data derived from the Unicode -Character Database, together with intellectual property from the original source -code content of the crate. One must comply with the terms of both the Unicode -License Agreement and either of the Apache license or MIT license when those -generated files are involved. - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in this crate by you, as defined in the Apache-2.0 license, shall -be licensed as just described, without any additional terms or conditions. diff --git a/vendor/unicode-ident/benches/xid.rs b/vendor/unicode-ident/benches/xid.rs deleted file mode 100644 index fc7b48e44b7334..00000000000000 --- a/vendor/unicode-ident/benches/xid.rs +++ /dev/null @@ -1,126 +0,0 @@ -// To run: `cargo criterion` -// -// This benchmarks each of the different libraries at several ratios of ASCII to -// non-ASCII content. There is one additional benchmark labeled "baseline" which -// just iterates over characters in a string, converting UTF-8 to 32-bit chars. -// -// Criterion will show a time in milliseconds. The non-baseline bench functions -// each make one million function calls (2 calls per character, 500K characters -// in the strings created by gen_string). The "time per call" listed in our -// readme is computed by subtracting this baseline from the other bench -// functions' time, then dividing by one million (ms -> ns). - -#![allow( - clippy::incompatible_msrv, // https://github.com/rust-lang/rust-clippy/issues/12257 - clippy::needless_pass_by_value, -)] - -#[path = "../tests/fst/mod.rs"] -mod fst; -#[path = "../tests/roaring/mod.rs"] -mod roaring; -#[path = "../tests/trie/mod.rs"] -mod trie; - -use criterion::{criterion_group, criterion_main, Criterion}; -use rand::distr::{Bernoulli, Distribution, Uniform}; -use rand::rngs::SmallRng; -use rand::SeedableRng; -use std::hint::black_box; -use std::time::Duration; - -fn gen_string(p_nonascii: u32) -> String { - let mut rng = SmallRng::from_seed([b'!'; 32]); - let pick_nonascii = Bernoulli::from_ratio(p_nonascii, 100).unwrap(); - let ascii = Uniform::new_inclusive('\0', '\x7f').unwrap(); - let nonascii = Uniform::new_inclusive(0x80 as char, char::MAX).unwrap(); - - let mut string = String::new(); - for _ in 0..500_000 { - let distribution = if pick_nonascii.sample(&mut rng) { - nonascii - } else { - ascii - }; - string.push(distribution.sample(&mut rng)); - } - - string -} - -fn bench(c: &mut Criterion, group_name: &str, string: String) { - let mut group = c.benchmark_group(group_name); - group.measurement_time(Duration::from_secs(10)); - group.bench_function("baseline", |b| { - b.iter(|| { - for ch in string.chars() { - black_box(ch); - } - }); - }); - group.bench_function("unicode-ident", |b| { - b.iter(|| { - for ch in string.chars() { - black_box(unicode_ident::is_xid_start(ch)); - black_box(unicode_ident::is_xid_continue(ch)); - } - }); - }); - group.bench_function("unicode-xid", |b| { - b.iter(|| { - for ch in string.chars() { - black_box(unicode_xid::UnicodeXID::is_xid_start(ch)); - black_box(unicode_xid::UnicodeXID::is_xid_continue(ch)); - } - }); - }); - group.bench_function("ucd-trie", |b| { - b.iter(|| { - for ch in string.chars() { - black_box(trie::XID_START.contains_char(ch)); - black_box(trie::XID_CONTINUE.contains_char(ch)); - } - }); - }); - group.bench_function("fst", |b| { - let xid_start_fst = fst::xid_start_fst(); - let xid_continue_fst = fst::xid_continue_fst(); - b.iter(|| { - for ch in string.chars() { - let ch_bytes = (ch as u32).to_be_bytes(); - black_box(xid_start_fst.contains(ch_bytes)); - black_box(xid_continue_fst.contains(ch_bytes)); - } - }); - }); - group.bench_function("roaring", |b| { - let xid_start_bitmap = roaring::xid_start_bitmap(); - let xid_continue_bitmap = roaring::xid_continue_bitmap(); - b.iter(|| { - for ch in string.chars() { - black_box(xid_start_bitmap.contains(ch as u32)); - black_box(xid_continue_bitmap.contains(ch as u32)); - } - }); - }); - group.finish(); -} - -fn bench0(c: &mut Criterion) { - bench(c, "0%-nonascii", gen_string(0)); -} - -fn bench1(c: &mut Criterion) { - bench(c, "1%-nonascii", gen_string(1)); -} - -fn bench10(c: &mut Criterion) { - bench(c, "10%-nonascii", gen_string(10)); -} - -fn bench100(c: &mut Criterion) { - bench(c, "100%-nonascii", gen_string(100)); -} - -criterion_group!(benches, bench0, bench1, bench10, bench100); -criterion_main!(benches); diff --git a/vendor/unicode-ident/src/lib.rs b/vendor/unicode-ident/src/lib.rs deleted file mode 100644 index 2df5244f86981d..00000000000000 --- a/vendor/unicode-ident/src/lib.rs +++ /dev/null @@ -1,281 +0,0 @@ -//! [![github]](https://github.com/dtolnay/unicode-ident) [![crates-io]](https://crates.io/crates/unicode-ident) [![docs-rs]](https://docs.rs/unicode-ident) -//! -//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github -//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust -//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs -//! -//! <br> -//! -//! Implementation of [Unicode Standard Annex #31][tr31] for determining which -//! `char` values are valid in programming language identifiers. -//! -//! [tr31]: https://www.unicode.org/reports/tr31/ -//! -//! This crate is a better optimized implementation of the older `unicode-xid` -//! crate. This crate uses less static storage, and is able to classify both -//! ASCII and non-ASCII codepoints with better performance, 6× faster than -//! `unicode-xid`. -//! -//! <br> -//! -//! ## Comparison of performance -//! -//! The following table shows a comparison between five Unicode identifier -//! implementations. -//! -//! - `unicode-ident` is this crate; -//! - [`unicode-xid`] is a widely used crate run by the "unicode-rs" org; -//! - `ucd-trie` and `fst` are two data structures supported by the -//! [`ucd-generate`] tool; -//! - [`roaring`] is a Rust implementation of Roaring bitmap. -//! -//! The *static storage* column shows the total size of `static` tables that the -//! crate bakes into your binary, measured in 1000s of bytes. -//! -//! The remaining columns show the **cost per call** to evaluate whether a -//! single `char` has the XID\_Start or XID\_Continue Unicode property, -//! comparing across different ratios of ASCII to non-ASCII codepoints in the -//! input data. -//! -//! [`unicode-xid`]: https://github.com/unicode-rs/unicode-xid -//! [`ucd-generate`]: https://github.com/BurntSushi/ucd-generate -//! [`roaring`]: https://github.com/RoaringBitmap/roaring-rs -//! -//! | | static storage | 0% nonascii | 1% | 10% | 100% nonascii | -//! |---|---|---|---|---|---| -//! | **`unicode-ident`** | 10.3 K | 0.41 ns | 0.44 ns | 0.44 ns | 0.93 ns | -//! | **`unicode-xid`** | 12.0 K | 2.43 ns | 2.50 ns | 2.85 ns | 8.65 ns | -//! | **`ucd-trie`** | 10.4 K | 1.28 ns | 1.25 ns | 1.20 ns | 1.97 ns | -//! | **`fst`** | 144 K | 50.9 ns | 51.0 ns | 48.5 ns | 26.7 ns | -//! | **`roaring`** | 66.1 K | 4.28 ns | 4.22 ns | 4.25 ns | 4.61 ns | -//! -//! Source code for the benchmark is provided in the *bench* directory of this -//! repo and may be repeated by running `cargo criterion`. -//! -//! <br> -//! -//! ## Comparison of data structures -//! -//! #### unicode-xid -//! -//! They use a sorted array of character ranges, and do a binary search to look -//! up whether a given character lands inside one of those ranges. -//! -//! ```rust -//! # const _: &str = stringify! { -//! static XID_Continue_table: [(char, char); 763] = [ -//! ('\u{30}', '\u{39}'), // 0-9 -//! ('\u{41}', '\u{5a}'), // A-Z -//! # " -//! … -//! # " -//! ('\u{e0100}', '\u{e01ef}'), -//! ]; -//! # }; -//! ``` -//! -//! The static storage used by this data structure scales with the number of -//! contiguous ranges of identifier codepoints in Unicode. Every table entry -//! consumes 8 bytes, because it consists of a pair of 32-bit `char` values. -//! -//! In some ranges of the Unicode codepoint space, this is quite a sparse -//! representation – there are some ranges where tens of thousands of -//! adjacent codepoints are all valid identifier characters. In other places, -//! the representation is quite inefficient. A characater like `µ` (U+00B5) -//! which is surrounded by non-identifier codepoints consumes 64 bits in the -//! table, while it would be just 1 bit in a dense bitmap. -//! -//! On a system with 64-byte cache lines, binary searching the table touches 7 -//! cache lines on average. Each cache line fits only 8 table entries. -//! Additionally, the branching performed during the binary search is probably -//! mostly unpredictable to the branch predictor. -//! -//! Overall, the crate ends up being about 6× slower on non-ASCII input -//! compared to the fastest crate. -//! -//! A potential improvement would be to pack the table entries more compactly. -//! Rust's `char` type is a 21-bit integer padded to 32 bits, which means every -//! table entry is holding 22 bits of wasted space, adding up to 3.9 K. They -//! could instead fit every table entry into 6 bytes, leaving out some of the -//! padding, for a 25% improvement in space used. With some cleverness it may be -//! possible to fit in 5 bytes or even 4 bytes by storing a low char and an -//! extent, instead of low char and high char. I don't expect that performance -//! would improve much but this could be the most efficient for space across all -//! the libraries, needing only about 7 K to store. -//! -//! #### ucd-trie -//! -//! Their data structure is a compressed trie set specifically tailored for -//! Unicode codepoints. The design is credited to Raph Levien in -//! [rust-lang/rust#33098]. -//! -//! [rust-lang/rust#33098]: https://github.com/rust-lang/rust/pull/33098 -//! -//! ```rust -//! pub struct TrieSet { -//! tree1_level1: &'static [u64; 32], -//! tree2_level1: &'static [u8; 992], -//! tree2_level2: &'static [u64], -//! tree3_level1: &'static [u8; 256], -//! tree3_level2: &'static [u8], -//! tree3_level3: &'static [u64], -//! } -//! ``` -//! -//! It represents codepoint sets using a trie to achieve prefix compression. The -//! final states of the trie are embedded in leaves or "chunks", where each -//! chunk is a 64-bit integer. Each bit position of the integer corresponds to -//! whether a particular codepoint is in the set or not. These chunks are not -//! just a compact representation of the final states of the trie, but are also -//! a form of suffix compression. In particular, if multiple ranges of 64 -//! contiguous codepoints have the same Unicode properties, then they all map to -//! the same chunk in the final level of the trie. -//! -//! Being tailored for Unicode codepoints, this trie is partitioned into three -//! disjoint sets: tree1, tree2, tree3. The first set corresponds to codepoints -//! \[0, 0x800), the second \[0x800, 0x10000) and the third \[0x10000, -//! 0x110000). These partitions conveniently correspond to the space of 1 or 2 -//! byte UTF-8 encoded codepoints, 3 byte UTF-8 encoded codepoints and 4 byte -//! UTF-8 encoded codepoints, respectively. -//! -//! Lookups in this data structure are significantly more efficient than binary -//! search. A lookup touches either 1, 2, or 3 cache lines based on which of the -//! trie partitions is being accessed. -//! -//! One possible performance improvement would be for this crate to expose a way -//! to query based on a UTF-8 encoded string, returning the Unicode property -//! corresponding to the first character in the string. Without such an API, the -//! caller is required to tokenize their UTF-8 encoded input data into `char`, -//! hand the `char` into `ucd-trie`, only for `ucd-trie` to undo that work by -//! converting back into the variable-length representation for trie traversal. -//! -//! #### fst -//! -//! Uses a [finite state transducer][fst]. This representation is built into -//! [ucd-generate] but I am not aware of any advantage over the `ucd-trie` -//! representation. In particular `ucd-trie` is optimized for storing Unicode -//! properties while `fst` is not. -//! -//! [fst]: https://github.com/BurntSushi/fst -//! [ucd-generate]: https://github.com/BurntSushi/ucd-generate -//! -//! As far as I can tell, the main thing that causes `fst` to have large size -//! and slow lookups for this use case relative to `ucd-trie` is that it does -//! not specialize for the fact that only 21 of the 32 bits in a `char` are -//! meaningful. There are some dense arrays in the structure with large ranges -//! that could never possibly be used. -//! -//! #### roaring -//! -//! This crate is a pure-Rust implementation of [Roaring Bitmap], a data -//! structure designed for storing sets of 32-bit unsigned integers. -//! -//! [Roaring Bitmap]: https://roaringbitmap.org/about/ -//! -//! Roaring bitmaps are compressed bitmaps which tend to outperform conventional -//! compressed bitmaps such as WAH, EWAH or Concise. In some instances, they can -//! be hundreds of times faster and they often offer significantly better -//! compression. -//! -//! In this use case the performance was reasonably competitive but still -//! substantially slower than the Unicode-optimized crates. Meanwhile the -//! compression was significantly worse, requiring 6× as much storage for -//! the data structure. -//! -//! I also benchmarked the [`croaring`] crate which is an FFI wrapper around the -//! C reference implementation of Roaring Bitmap. This crate was consistently -//! about 15% slower than pure-Rust `roaring`, which could just be FFI overhead. -//! I did not investigate further. -//! -//! [`croaring`]: https://crates.io/crates/croaring -//! -//! #### unicode-ident -//! -//! This crate is most similar to the `ucd-trie` library, in that it's based on -//! bitmaps stored in the leafs of a trie representation, achieving both prefix -//! compression and suffix compression. -//! -//! The key differences are: -//! -//! - Uses a single 2-level trie, rather than 3 disjoint partitions of different -//! depth each. -//! - Uses significantly larger chunks: 512 bits rather than 64 bits. -//! - Compresses the XID\_Start and XID\_Continue properties together -//! simultaneously, rather than duplicating identical trie leaf chunks across -//! the two. -//! -//! The following diagram show the XID\_Start and XID\_Continue Unicode boolean -//! properties in uncompressed form, in row-major order: -//! -//! <table> -//! <tr><th>XID_Start</th><th>XID_Continue</th></tr> -//! <tr> -//! <td><img alt="XID_Start bitmap" width="256" src="https://user-images.githubusercontent.com/1940490/168647353-c6eeb922-afec-49b2-9ef5-c03e9d1e0760.png"></td> -//! <td><img alt="XID_Continue bitmap" width="256" src="https://user-images.githubusercontent.com/1940490/168647367-f447cca7-2362-4d7d-8cd7-d21c011d329b.png"></td> -//! </tr> -//! </table> -//! -//! Uncompressed, these would take 140 K to store, which is beyond what would be -//! reasonable. However, as you can see there is a large degree of similarity -//! between the two bitmaps and across the rows, which lends well to -//! compression. -//! -//! This crate stores one 512-bit "row" of the above bitmaps in the leaf level -//! of a trie, and a single additional level to index into the leafs. It turns -//! out there are 124 unique 512-bit chunks across the two bitmaps so 7 bits are -//! sufficient to index them. -//! -//! The chunk size of 512 bits is selected as the size that minimizes the total -//! size of the data structure. A smaller chunk, like 256 or 128 bits, would -//! achieve better deduplication but require a larger index. A larger chunk -//! would increase redundancy in the leaf bitmaps. 512 bit chunks are the -//! optimum for total size of the index plus leaf bitmaps. -//! -//! In fact since there are only 124 unique chunks, we can use an 8-bit index -//! with a spare bit to index at the half-chunk level. This achieves an -//! additional 8.5% compression by eliminating redundancies between the second -//! half of any chunk and the first half of any other chunk. Note that this is -//! not the same as using chunks which are half the size, because it does not -//! necessitate raising the size of the trie's first level. -//! -//! In contrast to binary search or the `ucd-trie` crate, performing lookups in -//! this data structure is straight-line code with no need for branching. - -#![no_std] -#![doc(html_root_url = "https://docs.rs/unicode-ident/1.0.22")] -#![allow( - clippy::doc_markdown, - clippy::must_use_candidate, - clippy::unreadable_literal -)] - -#[rustfmt::skip] -mod tables; - -pub use crate::tables::UNICODE_VERSION; -use crate::tables::{ASCII_CONTINUE, ASCII_START, CHUNK, LEAF, TRIE_CONTINUE, TRIE_START}; - -static ZERO: u8 = 0; - -/// Whether the character has the Unicode property XID\_Start. -pub fn is_xid_start(ch: char) -> bool { - if ch.is_ascii() { - return ASCII_START & (1 << ch as u128) != 0; - } - let chunk = *TRIE_START.0.get(ch as usize / 8 / CHUNK).unwrap_or(&ZERO); - let offset = chunk as usize * CHUNK / 2 + ch as usize / 8 % CHUNK; - unsafe { LEAF.0.get_unchecked(offset) }.wrapping_shr(ch as u32 % 8) & 1 != 0 -} - -/// Whether the character has the Unicode property XID\_Continue. -pub fn is_xid_continue(ch: char) -> bool { - if ch.is_ascii() { - return ASCII_CONTINUE & (1 << ch as u128) != 0; - } - let chunk = *TRIE_CONTINUE - .0 - .get(ch as usize / 8 / CHUNK) - .unwrap_or(&ZERO); - let offset = chunk as usize * CHUNK / 2 + ch as usize / 8 % CHUNK; - unsafe { LEAF.0.get_unchecked(offset) }.wrapping_shr(ch as u32 % 8) & 1 != 0 -} diff --git a/vendor/unicode-ident/src/tables.rs b/vendor/unicode-ident/src/tables.rs deleted file mode 100644 index 59634efe78b82b..00000000000000 --- a/vendor/unicode-ident/src/tables.rs +++ /dev/null @@ -1,663 +0,0 @@ -// @generated by ../generate. To regenerate, run the following in the repo root: -// -// $ curl -LO https://www.unicode.org/Public/17.0.0/ucd/UCD.zip -// $ unzip UCD.zip -d UCD -// $ cargo run --manifest-path generate/Cargo.toml - -#[repr(C, align(8))] -pub(crate) struct Align8<T>(pub(crate) T); -#[repr(C, align(64))] -pub(crate) struct Align64<T>(pub(crate) T); - -pub const UNICODE_VERSION: (u8, u8, u8) = (17, 0, 0); - -pub(crate) const ASCII_START: u128 = 0x7fffffe07fffffe0000000000000000; -pub(crate) const ASCII_CONTINUE: u128 = 0x7fffffe87fffffe03ff000000000000; - -pub(crate) const CHUNK: usize = 64; - -pub(crate) static TRIE_START: Align8<[u8; 411]> = Align8([ - 0x04, 0x0B, 0x0F, 0x13, 0x17, 0x1B, 0x1F, 0x23, 0x27, 0x2D, 0x31, 0x34, 0x38, 0x3C, 0x40, 0x02, - 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x00, 0x4D, 0x00, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x06, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x51, 0x54, 0x58, 0x5C, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x09, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x60, 0x64, 0x66, - 0x6A, 0x6E, 0x72, 0x28, 0x76, 0x78, 0x7C, 0x80, 0x84, 0x88, 0x8C, 0x90, 0x94, 0x98, 0x9C, 0xA0, - 0x05, 0x2B, 0xA4, 0x00, 0x00, 0x00, 0x00, 0xA6, 0x05, 0x05, 0xA8, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x32, 0x05, 0xAD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xAE, 0x00, 0x00, 0x00, 0x05, 0xB2, 0xB6, 0xBA, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xBE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x43, 0xC2, 0x00, 0x00, 0x00, 0x00, 0xC5, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD1, 0xD3, 0x00, 0x00, 0x00, 0xC9, - 0xD9, 0xDD, 0xE1, 0xE5, 0xE9, 0x00, 0x00, 0xED, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0xEF, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xF1, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xF3, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x52, 0x05, 0xF5, 0x00, 0x00, 0x00, 0x00, 0x05, 0xAF, 0x00, 0x00, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xA9, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xF7, -]); - -pub(crate) static TRIE_CONTINUE: Align8<[u8; 1793]> = Align8([ - 0x08, 0x0D, 0x11, 0x15, 0x19, 0x1D, 0x21, 0x25, 0x2A, 0x2F, 0x31, 0x36, 0x3A, 0x3E, 0x42, 0x02, - 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x4F, 0x00, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x06, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x51, 0x56, 0x5A, 0x5E, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x09, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x62, 0x64, 0x68, - 0x6C, 0x70, 0x74, 0x28, 0x76, 0x7A, 0x7E, 0x82, 0x86, 0x8A, 0x8E, 0x92, 0x96, 0x9A, 0x9E, 0xA2, - 0x05, 0x2B, 0xA4, 0x00, 0x00, 0x00, 0x00, 0xA6, 0x05, 0x05, 0xAB, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x32, 0x05, 0xAD, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xB0, 0x00, 0x00, 0x00, 0x05, 0xB4, 0xB8, 0xBC, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xBE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x43, 0xC2, 0x00, 0x00, 0x00, 0x00, 0xC8, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCB, 0xC3, 0xC6, 0xCE, 0xD1, 0xD5, 0x00, 0xD7, 0x00, 0xC9, - 0xDB, 0xDF, 0xE3, 0xE7, 0xEB, 0x00, 0x00, 0xED, 0x00, 0x00, 0x00, 0x00, 0x00, 0xCC, 0x00, 0x00, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0xEF, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xF1, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xF3, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x52, 0x05, 0xF5, 0x00, 0x00, 0x00, 0x00, 0x05, 0xAF, 0x00, 0x00, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xA9, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0xF7, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xCF, -]); - -pub(crate) static LEAF: Align64<[u8; 7968]> = Align64([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0x3F, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x3F, 0xFF, 0xAA, 0xFF, 0xFF, 0xFF, 0x3F, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0x5F, 0xDC, 0x1F, 0xCF, 0x0F, 0xFF, 0x1F, 0xDC, 0x1F, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x20, 0x04, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xA0, 0x04, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0xFF, 0xFF, 0x7F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC3, 0xFF, 0x03, 0x00, 0x1F, 0x50, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF, 0xB8, - 0x40, 0xD7, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC3, 0xFF, 0x03, 0x00, 0x1F, 0x50, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0xB8, - 0xC0, 0xD7, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0x03, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0x7F, 0x02, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x87, 0x07, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFB, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0x7F, 0x02, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x01, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0xB6, 0x00, 0xFF, 0xFF, 0xFF, 0x87, 0x07, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00, 0xC0, 0xFE, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x2F, 0x00, 0x60, 0xC0, 0x00, 0x9C, - 0x00, 0x00, 0xFD, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0xE0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x02, 0x00, 0x00, 0xFC, 0xFF, 0xFF, 0xFF, 0x07, 0x30, 0x04, - 0x00, 0x00, 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC3, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x9F, 0xFF, 0xFD, 0xFF, 0x9F, - 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x24, - 0xFF, 0xFF, 0x3F, 0x04, 0x10, 0x01, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x01, 0xFF, 0x07, 0xFF, 0xFF, - 0xFF, 0xFE, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xF0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x23, 0x00, 0x00, 0x01, 0xFF, 0x03, 0x00, 0xFE, 0xFF, - 0xE1, 0x9F, 0xF9, 0xFF, 0xFF, 0xFD, 0xC5, 0x23, 0x00, 0x40, 0x00, 0xB0, 0x03, 0x00, 0x03, 0x10, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0x07, 0xFF, 0xFF, - 0xFF, 0xFE, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xFF, 0xFE, 0xFF, - 0xEF, 0x9F, 0xF9, 0xFF, 0xFF, 0xFD, 0xC5, 0xF3, 0x9F, 0x79, 0x80, 0xB0, 0xCF, 0xFF, 0x03, 0x50, - 0xE0, 0x87, 0xF9, 0xFF, 0xFF, 0xFD, 0x6D, 0x03, 0x00, 0x00, 0x00, 0x5E, 0x00, 0x00, 0x1C, 0x00, - 0xE0, 0xBF, 0xFB, 0xFF, 0xFF, 0xFD, 0xED, 0x23, 0x00, 0x00, 0x01, 0x00, 0x03, 0x00, 0x00, 0x02, - 0xE0, 0x9F, 0xF9, 0xFF, 0xFF, 0xFD, 0xED, 0x23, 0x00, 0x00, 0x00, 0xB0, 0x03, 0x00, 0x02, 0x00, - 0xE8, 0xC7, 0x3D, 0xD6, 0x18, 0xC7, 0xFF, 0x03, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xEE, 0x87, 0xF9, 0xFF, 0xFF, 0xFD, 0x6D, 0xD3, 0x87, 0x39, 0x02, 0x5E, 0xC0, 0xFF, 0x3F, 0x00, - 0xEE, 0xBF, 0xFB, 0xFF, 0xFF, 0xFD, 0xED, 0xF3, 0xBF, 0x3B, 0x01, 0x00, 0xCF, 0xFF, 0x00, 0xFE, - 0xEE, 0x9F, 0xF9, 0xFF, 0xFF, 0xFD, 0xED, 0xF3, 0x9F, 0x39, 0xE0, 0xB0, 0xCF, 0xFF, 0x02, 0x00, - 0xEC, 0xC7, 0x3D, 0xD6, 0x18, 0xC7, 0xFF, 0xC3, 0xC7, 0x3D, 0x81, 0x00, 0xC0, 0xFF, 0x00, 0x00, - 0xE0, 0xDF, 0xFD, 0xFF, 0xFF, 0xFD, 0xFF, 0x23, 0x00, 0x00, 0x00, 0x37, 0x03, 0x00, 0x00, 0x00, - 0xE1, 0xDF, 0xFD, 0xFF, 0xFF, 0xFD, 0xEF, 0x23, 0x00, 0x00, 0x00, 0x70, 0x03, 0x00, 0x06, 0x00, - 0xF0, 0xDF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0x27, 0x00, 0x40, 0x70, 0x80, 0x03, 0x00, 0x00, 0xFC, - 0xE0, 0xFF, 0x7F, 0xFC, 0xFF, 0xFF, 0xFB, 0x2F, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xDF, 0xFD, 0xFF, 0xFF, 0xFD, 0xFF, 0xF3, 0xDF, 0x3D, 0x60, 0x37, 0xCF, 0xFF, 0x00, 0x00, - 0xEF, 0xDF, 0xFD, 0xFF, 0xFF, 0xFD, 0xEF, 0xF3, 0xDF, 0x3D, 0x60, 0x70, 0xCF, 0xFF, 0x0E, 0x00, - 0xFF, 0xDF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0x7D, 0xF0, 0x80, 0xCF, 0xFF, 0x00, 0xFC, - 0xEE, 0xFF, 0x7F, 0xFC, 0xFF, 0xFF, 0xFB, 0x2F, 0x7F, 0x84, 0x5F, 0xFF, 0xC0, 0xFF, 0x0C, 0x00, - 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x05, 0x00, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xD6, 0xF7, 0xFF, 0xFF, 0xAF, 0xFF, 0x05, 0x20, 0x5F, 0x00, 0x00, 0xF0, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, - 0x00, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0x7F, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, - 0xD6, 0xF7, 0xFF, 0xFF, 0xAF, 0xFF, 0xFF, 0x3F, 0x5F, 0x7F, 0xFF, 0xF3, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x03, 0xFF, 0x03, 0xA0, 0xC2, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0x1F, 0xFE, 0xFF, - 0xDF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0x1F, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x80, 0x00, 0x00, 0x3F, 0x3C, 0x62, 0xC0, 0xE1, 0xFF, - 0x03, 0x40, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0x20, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0x00, 0x00, 0x00, - 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFD, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0x20, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3D, 0x7F, 0x3D, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x3D, 0xFF, 0xFF, 0xFF, 0xFF, 0x3D, 0x7F, 0x3D, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0x3D, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x3F, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3D, 0x7F, 0x3D, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x3D, 0xFF, 0xFF, 0xFF, 0xFF, 0x3D, 0x7F, 0x3D, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0x3D, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0x00, 0xFE, 0x03, 0x00, - 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x3F, - 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x9F, 0xFF, 0xFF, - 0xFE, 0xFF, 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC7, 0xFF, 0x01, - 0xFF, 0xFF, 0x03, 0x80, 0xFF, 0xFF, 0x03, 0x00, 0xFF, 0xFF, 0x03, 0x00, 0xFF, 0xDF, 0x01, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x80, 0x10, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x9F, 0xFF, 0xFF, - 0xFE, 0xFF, 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC7, 0xFF, 0x01, - 0xFF, 0xFF, 0x3F, 0x80, 0xFF, 0xFF, 0x1F, 0x00, 0xFF, 0xFF, 0x0F, 0x00, 0xFF, 0xDF, 0x0D, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x8F, 0x30, 0xFF, 0x03, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x05, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, - 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x3F, 0x1F, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0xB8, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, - 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0x0F, 0xFF, 0x0F, 0xC0, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x1F, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0x03, 0xFF, 0x07, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0x7F, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xE0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0xE0, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xF8, 0xFF, 0xFF, 0xFF, 0x01, 0xC0, 0x00, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0x9F, - 0xFF, 0x03, 0xFF, 0x03, 0x80, 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0x0F, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0xFF, 0x03, 0x00, 0xF8, 0x0F, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x00, 0xFC, 0xFF, 0xFF, 0xFF, 0x3F, - 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDE, 0x6F, 0x04, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xE3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, - 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0x00, 0x00, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x04, 0x00, 0x00, 0x00, 0x27, 0x00, 0xF0, 0x00, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x80, - 0x00, 0x00, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x84, 0xFC, 0x2F, 0x3F, 0x50, 0xFD, 0xFF, 0xF3, 0xE0, 0x43, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x02, 0x80, - 0x00, 0x00, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x1F, 0xE2, 0xFF, 0x01, 0x00, - 0x84, 0xFC, 0x2F, 0x3F, 0x50, 0xFD, 0xFF, 0xF3, 0xE0, 0x43, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x78, 0x0C, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0x20, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, - 0xFF, 0xFF, 0x7F, 0x00, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0xF8, 0x0F, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0x20, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x80, - 0xFF, 0xFF, 0x7F, 0x00, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, - 0xE0, 0x00, 0x00, 0x00, 0xFE, 0x03, 0x3E, 0x1F, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0x7F, 0xE0, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, - 0xE0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x7F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, - 0xE0, 0x00, 0x00, 0x00, 0xFE, 0xFF, 0x3E, 0x1F, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0x7F, 0xE6, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xE0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x7F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0xFF, 0xFF, - 0xFF, 0x1F, 0xFF, 0xFF, 0x00, 0x0C, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x80, - 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, - 0x00, 0x00, 0x80, 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xF9, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0xFE, 0xFF, - 0xFF, 0x1F, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0xBF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, - 0x00, 0x00, 0x80, 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xF9, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0xFE, 0xFF, - 0xBB, 0xF7, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, - 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x68, - 0x00, 0xFC, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x1F, - 0xF0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x80, 0x00, 0x00, 0xDF, 0xFF, 0x00, 0x7C, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x10, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0xE8, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0xFF, 0xFF, 0xFF, 0x1F, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x80, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0x7F, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0xF7, 0x0F, 0x00, 0x00, 0xFF, 0xFF, 0x7F, 0xC4, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x62, 0x3E, 0x05, 0x00, 0x00, 0x38, 0xFF, 0x07, 0x1C, 0x00, - 0x7E, 0x7E, 0x7E, 0x00, 0x7F, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFF, 0x03, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0xFF, 0x3F, 0xFF, 0x03, 0xFF, 0xFF, 0x7F, 0xFC, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x38, 0xFF, 0xFF, 0x7C, 0x00, - 0x7E, 0x7E, 0x7E, 0x00, 0x7F, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFF, 0x03, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x37, 0xFF, 0x03, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, - 0x7F, 0x00, 0xF8, 0xA0, 0xFF, 0xFD, 0x7F, 0x5F, 0xDB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, - 0x7F, 0x00, 0xF8, 0xE0, 0xFF, 0xFD, 0x7F, 0x5F, 0xDB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xF0, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x03, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xAA, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, - 0x00, 0x00, 0x00, 0x00, 0xFE, 0xFF, 0xFF, 0x07, 0xFE, 0xFF, 0xFF, 0x07, 0xC0, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0x7F, 0xFC, 0xFC, 0xFC, 0x1C, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 0x18, 0x00, 0x00, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xAA, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, - 0x00, 0x00, 0xFF, 0x03, 0xFE, 0xFF, 0xFF, 0x87, 0xFE, 0xFF, 0xFF, 0x07, 0xE0, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFC, 0xFC, 0xFC, 0x1C, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xEF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0xB7, 0xFF, 0x3F, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xEF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0xB7, 0xFF, 0x3F, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xE0, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, - 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0x3E, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xE0, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, - 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0x3E, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0xFF, 0xF7, - 0xFF, 0xF7, 0xB7, 0xFF, 0xFB, 0xFF, 0xFB, 0x1B, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0xFF, 0xF7, - 0xFF, 0xF7, 0xB7, 0xFF, 0xFB, 0xFF, 0xFB, 0x1B, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, - 0x3F, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0x91, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0x7F, 0x00, - 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x37, 0x00, - 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0xEF, 0xFE, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x1F, - 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFE, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0x07, 0x00, - 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x6F, 0xF0, 0xEF, 0xFE, 0xFF, 0xFF, 0x3F, 0x87, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x1F, - 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFE, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0x07, 0x00, - 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0xFC, 0xFF, 0xFF, 0x3F, 0x80, 0xFF, 0xFF, - 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xBE, 0xFF, 0xFF, - 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x03, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0x1F, 0x80, 0x00, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, - 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x7F, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1B, 0x03, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, - 0xFF, 0xFF, 0xFF, 0x1F, 0x80, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00, 0xFF, 0xFF, - 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x7F, 0x00, - 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x00, - 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, - 0xF8, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x90, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x47, 0x00, - 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x1E, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0xC0, 0xFF, 0x3F, 0x80, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x04, 0x00, 0xFF, 0xFF, 0xFF, 0x01, 0xFF, 0x03, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xF0, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x4F, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0xDE, 0xFF, 0x17, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0x0F, 0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x7F, 0xBD, 0xFF, 0xBF, 0xFF, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x00, - 0xE0, 0x9F, 0xF9, 0xFF, 0xFF, 0xFD, 0xED, 0x23, 0x00, 0x00, 0x01, 0xE0, 0x03, 0x00, 0x00, 0x00, - 0xFF, 0x4B, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x7F, 0xBD, 0xFF, 0xBF, 0xFF, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0x03, - 0xEF, 0x9F, 0xF9, 0xFF, 0xFF, 0xFD, 0xED, 0xFB, 0x9F, 0x39, 0x81, 0xE0, 0xCF, 0x1F, 0x1F, 0x00, - 0xFF, 0x4B, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xA5, 0xF7, 0x0F, 0x00, 0x06, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x80, 0x07, 0x00, 0x80, 0x03, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0xB0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0xC3, 0x03, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0x00, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0x01, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00, 0x00, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x11, 0x00, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0xFF, 0x03, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xE7, 0xFF, 0x0F, 0xFF, 0x03, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x80, - 0x7F, 0xF2, 0x6F, 0xFF, 0xFF, 0xFF, 0x00, 0x80, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x0A, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x80, - 0x7F, 0xF2, 0x6F, 0xFF, 0xFF, 0xFF, 0xBF, 0xF9, 0x0F, 0x00, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x1B, 0x00, 0x00, 0x00, - 0x01, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x04, 0x00, 0x00, 0x01, 0xF0, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x03, 0x00, 0x20, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x80, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0x23, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0xFF, 0x03, - 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0xFF, - 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x7F, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x40, 0x00, 0x00, 0x00, 0xBF, 0xFD, 0xFF, 0xFF, - 0xFF, 0x03, 0x00, 0x01, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0x01, 0x00, 0xFF, 0x03, 0x00, 0x00, 0xFC, 0xFF, - 0xFF, 0xFF, 0xFC, 0xFF, 0xFF, 0xFE, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x7F, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xB4, 0xFF, 0x00, 0xFF, 0x03, 0xBF, 0xFD, 0xFF, 0xFF, - 0xFF, 0x7F, 0xFB, 0x01, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0x03, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x07, 0x00, - 0xF4, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x7F, 0x00, - 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xC7, 0x07, 0x00, 0xFF, 0x07, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x7E, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 0x3F, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0xF8, 0xFF, 0xFF, 0xE0, - 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0x03, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0x03, 0xFF, 0xFF, 0xFF, 0x3F, 0x1F, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x0F, 0x00, 0xFF, 0x03, 0xF8, 0xFF, 0xFF, 0xE0, - 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0xFF, 0x03, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xF9, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0xF8, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x7C, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xF9, 0xFF, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x87, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x80, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x7F, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x80, - 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x6F, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0x1F, - 0xFF, 0x01, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE0, 0xE3, 0x07, 0xF8, - 0xE7, 0x0F, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0x1F, - 0xFF, 0x01, 0xFF, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0x7F, 0xE0, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x03, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x03, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xDF, 0x64, 0xDE, 0xFF, 0xEB, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xBF, 0xE7, 0xDF, 0xDF, 0xFF, 0xFF, 0xFF, 0x7B, 0x5F, 0xFC, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xF7, 0xFF, 0xFF, 0xFF, 0xF7, - 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, - 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xF7, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xF7, 0xFF, 0xFF, 0xFF, 0xF7, - 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, - 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xF7, 0xCF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x20, 0x00, - 0x10, 0x00, 0x00, 0xF8, 0xFE, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x80, 0x3F, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x7F, 0xFF, 0xFF, 0xF9, 0xDB, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, - 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0xFF, 0x3F, 0xFF, 0x43, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x7F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x0F, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x3F, 0x01, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x7F, 0xB7, 0x3F, 0x1F, 0xC0, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0x6F, 0xFF, 0x7F, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0x3F, 0xC0, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0x6F, 0xFF, 0x7F, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0x00, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xFF, 0x03, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xEF, 0xFF, 0xFF, 0xFF, 0x96, 0xFE, 0xF7, 0x0A, 0x84, 0xEA, 0x96, 0xAA, 0x96, 0xF7, 0xF7, 0x5E, - 0xFF, 0xFB, 0xFF, 0x0F, 0xEE, 0xFB, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -]); diff --git a/vendor/unicode-ident/tests/compare.rs b/vendor/unicode-ident/tests/compare.rs deleted file mode 100644 index 7ed13fa9999c7e..00000000000000 --- a/vendor/unicode-ident/tests/compare.rs +++ /dev/null @@ -1,68 +0,0 @@ -#![allow( - clippy::incompatible_msrv, // https://github.com/rust-lang/rust-clippy/issues/12257 -)] - -mod fst; -mod roaring; -mod trie; - -#[test] -fn compare_all_implementations() { - let xid_start_fst = fst::xid_start_fst(); - let xid_continue_fst = fst::xid_continue_fst(); - let xid_start_roaring = roaring::xid_start_bitmap(); - let xid_continue_roaring = roaring::xid_continue_bitmap(); - - for ch in '\0'..=char::MAX { - let thought_to_be_start = unicode_ident::is_xid_start(ch); - let thought_to_be_continue = unicode_ident::is_xid_continue(ch); - - // unicode-xid - assert_eq!( - thought_to_be_start, - unicode_xid::UnicodeXID::is_xid_start(ch), - "{ch:?}", - ); - assert_eq!( - thought_to_be_continue, - unicode_xid::UnicodeXID::is_xid_continue(ch), - "{ch:?}", - ); - - // ucd-trie - assert_eq!( - thought_to_be_start, - trie::XID_START.contains_char(ch), - "{ch:?}", - ); - assert_eq!( - thought_to_be_continue, - trie::XID_CONTINUE.contains_char(ch), - "{ch:?}", - ); - - // fst - assert_eq!( - thought_to_be_start, - xid_start_fst.contains((ch as u32).to_be_bytes()), - "{ch:?}", - ); - assert_eq!( - thought_to_be_continue, - xid_continue_fst.contains((ch as u32).to_be_bytes()), - "{ch:?}", - ); - - // roaring - assert_eq!( - thought_to_be_start, - xid_start_roaring.contains(ch as u32), - "{ch:?}", - ); - assert_eq!( - thought_to_be_continue, - xid_continue_roaring.contains(ch as u32), - "{ch:?}", - ); - } -} diff --git a/vendor/unicode-ident/tests/fst/.gitignore b/vendor/unicode-ident/tests/fst/.gitignore deleted file mode 100644 index 0ebd2add95d418..00000000000000 --- a/vendor/unicode-ident/tests/fst/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/prop_list.rs diff --git a/vendor/unicode-ident/tests/fst/mod.rs b/vendor/unicode-ident/tests/fst/mod.rs deleted file mode 100644 index 5195efb13d2106..00000000000000 --- a/vendor/unicode-ident/tests/fst/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -#![allow(clippy::module_name_repetitions)] - -pub fn xid_start_fst() -> fst::Set<&'static [u8]> { - let data = include_bytes!("xid_start.fst"); - fst::Set::from(fst::raw::Fst::new(data.as_slice()).unwrap()) -} - -pub fn xid_continue_fst() -> fst::Set<&'static [u8]> { - let data = include_bytes!("xid_continue.fst"); - fst::Set::from(fst::raw::Fst::new(data.as_slice()).unwrap()) -} diff --git a/vendor/unicode-ident/tests/fst/xid_continue.fst b/vendor/unicode-ident/tests/fst/xid_continue.fst deleted file mode 100644 index cc15f2d5dc8d10036bcc13cc6785f161bef921d3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 76143 zcmeHQ2cQ+j6<v^SVQsO|L=Z&?T|^=90RcM-7E};oLkub)AlLxGh6U^ryJ8nx3>py7 z1W}{0gBpW?iY6j9j7G(qJAHO`X4~8M|9f9v3~%e~{+;_~@7#0lxtn<<_JLnE{`}LA z|NG&;-);Ezo3Fq6a{U+UKL5|ZKl^m;Cm(;bX7#EMSN`jRf4=|TyUX8s`>i+Mc>T41 zy!y)DUw-MuWiKrK+h3o5ZpmNfKlR|`N#{(QF#hauW5=9z<{6{^aQdjzMxJ`g$tRuo z`w=G`JLH(7k2-Sjpd$_+c-Wzb9DLA$2Mp-nZ~wl1dRKJYr|aHbI(O>Wp*^Jhcr|Kn zR;zZMy7lVw6B}&4#g<z&^taw-+wHdh%?>;6wDT^F8aHX$Y}e+ykpo&l%iUY;v1jYO z+O+*`yY?MAcIw<^@2>lF+jqb2J${$+%01Qby`Tc1H(l@3cmIC<2OMzVK?fgl=wSm7 zKVs0}Bab@zm?3cNamNoGcEa!xzd!M$lTSHy<Y}W$|HJ4r&OB?(*l}l%pD^(pm^As^ z^UlBE!YTgL60?ZPza3$N-^Uo?3WpUgUi9q31%G+w>G^+t>Q7HT@%Uqp&U@tHhaP<3 z{`>Bod(Yi>-Fe6Dx7~WnALrbB(~UP=f8Dj$%${}iRaagy^YY6sz2xE<(=VDfb;^Yo zoPXZAZZjT!!my#oALq8^UOj);qx*jQDwA&a+qP}?YQ5(kt#)tOVz=hIHf!3Xaid*! z-f71jezW~{+itV9-*Brfx7fTv{d#rl)ULHz%^IFEX}2x6aGP<@UKPC^Rv2T1!Um^K zyJ-51i!ZtKvdd>)aphH4&zgPBwbxyL!;LrHJm-(M+<M#Xcief`-S^DB_rCidc<`Zz zADQ>)V~;=a<e#4U^Zciu`OAWZ&n{ZL<hken`nRPoEPL^#m;e6CtN(cI^*7#p>+N@z zuXy*p_y769zgB*@YW13rKK^9wr=R`%KcBDrV*Qt2ef`b18@~JR_dopa$De-Q_zU>> z2&&^}{W>~^>ci%9OEwv6RU0cAwsnJ5!#Y`_=F2y|1#bs;;+p?Xbu`uJzex_z(N039 z@!2*!UqJ`w3vpDQIKk#vRL>C^#!o=|nt0A6n9Qt>ItVZYY{N4YVSENTK>J@u8$@;A zD-FI<Jre4VY;s4YOa<zX091`f;%}^m=1t~ymU*2RvX?FW+w;#YPUS`?cAtmXr+}v0 zTx_oyjM<;VZ3Z}sjlMXEOXfba!vUt)#%b3u4UfRQN8z!@;R$higiB2hA%2NClz#hf zRzov2{v@EWiQv3Y#U^NE0xj4UnaJPGmSK&8y$@|F80b-4(WZiQmov7JdOk>+qmhYZ zMxDx-gL6U<lR^MBEd<NF5MY(R+UW7>v%2xeY~FIE#4l7N&K!o@$Bo0(F13F&lTB-_ z03;0O7>G#}EgxIeZWb4_MKeen%K=v41n}MJW22V8B+~U&0b$`dm=WQ`<1P=vxngiC ziab0j^j6G3ThMICS}207nJ(F{1%b(`bG!u}38n`D(1370u{ekf#`zyuI=tLB{qnO8 zTTsyAUmvU>K*fv8C`ds}k_3mJ;DKR#TzJ;iA|`AJ2`gX&VU2>{|D_NQ22ijBXt;zh zLm6}cxa@KOdt4ZJU>NR4OD4f#0O<+<{CPeA2@!KEhlOO}*+tAm0rZHmI0WEzcmvAF zv*n|ug3XBiy^gkr7#V}eg!Mi3WVHPuEFn<|v_KP38%7X_!x2Lp(7$&DzO%b`+h^}C zojUTjwH<2k8#0gqF$UsT0>jP7v##F&!2YrZ#Lb9r3481bt@na9ZE@5C3Y?MiI(Gp; z_zai2kyZQ<APoePrVWR5C}fAwe-Ro3fsGvth>d|A920{foO>P+cx)=LU;qsT0HPCs z@C3j$TGM2a;1vK0v5>pDh5GlG-Y}<(W(BdBml0J(NOkcd9mqly>z=!BzwMSeHxuXz zkt+sUT{LZsF(eH=9+6gR9t2*cP8|iLVogLKQ9&bN1_EG5R)Ly`un>GJ0!U<)m;``A zSJ5U&XY}p^6etByP$>+O-NY9L9|_Fu!*zsDf#9dOc;ZQLGMsWME{!}5|3N4f{|%t2 zxK2kd{6?dpgBQRIT*J@B>`VNsf=XhG`1WU)o{=QGBZTgiHIX3*Vf*FpaH9&m1aTM= z6Ab%%kQkZ%CJfry(BlyYrG|DOLis^cqi{Z(J=n*iZ4mhs<`xD%iLHSR#>e3ZOb8!9 zfcQND;ZGp=C-27o1Bi*i!G{14KpxbvbDZdC@l6nV5D-1u)+h*q^x|d61CnwKx_TCX z#7F@_QXnWPZ}rZ*=mH|9Kp;~91)2h)rsCS<+n=ATdrS7HZ@*DyC#fQgDe^L>h&(Hi zd0}@ZFds(c@%_gz`JnNgK*%JNN21$aa^yX;`I*)yZI7ECPzDI?SwaKhT};^^0E`bp zX0m)cezA8Salybs4dG)MdHWf_yl!;>9Gu7YsL3&m9v+Ye$F8TadVqz`s(V25TU4@! z6}&NGTJOCprktFE^a35%93ktuF5U-ye@>q^RWXx-#H3!?c^rE1&hA`s-zrQ;qP;>n zNy>%NUzB>Uxt9kG2LApe4wluwf^ovZV(cSjVBE)tS;EFZ(Ld0@$Z!_(@$U`xT2-#S zkc1Ev<^$R2Ksuw%HTC1HHgvnXb?u^vKRfN{d5(vM?l}E$E*lbmv;((<t)L-USFb}* zv^IC>mz-h?r)jT1VHeu;&ocYtW<lDcpG(C;>TmzDsx*VDxHg#*BB3$}T7(&uVl;bI zn<vUwrS#F%yE0!2n<QeU4*wR9yvA%xi=pKJSAaH(MUmKG5xx3ifD~jlAZ#}O%c?N4 zD!gW7fY?nLCee^3oTy1?nz1ZR5)Da=F%yC&&PyoOm=KKx&$x8Vo4(EgI7t}rRgE)U zfW#074TIohR!v{!3?#tP1VYmU;6zP?mkF*6V30K-DO=NDUxlVtoJ%vpdp2+`4&E@S z#gY?`9O4`p#DqT_Pag)tg$FR$@NT!k<}$EY9ROl*@|`c8O1XtDs6aCFB!YW$g*P;G z1Avi@(Mfu}ICfG#ttK6kh0l>0v<pwwZ633k2jcRY^BSZJn0Og;E)|>PX&m_g(Agm8 z!~}XD%Ewf5xl_%#j^MPVwQ_fcNGL%h$(%4R>#p4NkYHS$#ndf<DDHdEp<cxaqCD(G zX(HI3mMOwtB<1OXs2Lxk(Zyh>jT?*OY`_pLd3L?DpuCDpG)<F-Bwy#j<T~L_$lZWo zVVEGH?IwYGm8V2JVIbjo^^y3XbTmxYB0W<ATfdiF;XZ_@Rr->;roQAqnak)@ijCys z?WV}7bLE5<><egVbeKh_b)Q1G>ytD(w#S|rDs0zgumK|tq;v*+0e<0d^j=~{M(7?v zYy9MsSjR7pU*$?h!<1s-jH+y6b8?j?hm)oW1+tGLEKP@u^hC|V@-%@a9dp|vU8Lt6 zt1i>QBQ+d`Ee$C8eWE%sQOH*0aFWy9d7P|K1x)@)yNY|!f2tIRwk-Fgr?|5OcVUeo zr!fdH-KVmMwfy18?BN2FhqERQlWr7=@EONDCyGQ<*a6N_V;wUZl|>vdqASH(Ftr$B za*II{$q9{Iw4s0d%d&KS$)6J?#Nf2k0(0P{e1if;b-jktiKCJ!I@*95FLi^U%aS*) z?-4AIkPw5iVP(BJ<-qIL_unpgv&(ja385|&55d=a3Xuj)pCPq6gF?52%1B&NLtdzs z=yI+w!pLvhtVfxxv{H*@0x*#fHvJ}Tx~Jfw**Bf%i2D%VDBA!DbsOj@{_<?rP!?zL zq%st|325CAotqWQ$yA+d+#q7Li1P?C>59bI?a<?f2z4B$jYE}jVn<r0C6aCi&!+B4 z9RkAEBxFqhp=$#J0qMIC6BGsD;x_5VREk(3UIp{e^w3CwGnR0T2#gTUIEnicyHxpP zm`+AE0BVf@7aPGf|NWE(s0$vg7Wssfz!p%9xp2(jUOl(eDH<4arq@sNPtDCNN7u4! zlbdSGC9+T_u!burf}|8<%$*6=1A<B9_+9b;jQ`CzN=6(0Z&N<r<cB9ldJm<8JCWQm z1$Xw?y#@bmD3P7INVB079PBNj8`WTQz@B8xX7&ee3){i=V8*uqh;kG?g33pcrFQHD z?cZcs#T4t+&Z@jce@OC9s1`>`?>Qh~RxH0c1Pufw&?15m$wvE9$Q3F)3R|Fd6#KPE z1u6Z<Q=Z^J^bai#&JAkv2|6rGH{#Pku7t*g(6|no3%xy)>-D8J*0!zk6gY$3X~(t# z%ssN|^(k(w1E=MNF<dtgTy|>kiQ~AxA`~2i?g;RBh0xj&0Sn-2Xo6@Bae8R}8_^wJ zwtM3&RpbO@9DPx^Z_dpwt1V2lj|%ot1xjOTh@yPdEC`(7`Y6&a^&)^uPXbW!Npens zaW@D2Mcix3rXd<pTs>0px9u64nUwp=dGcpkq=_k*+K|PSf;4O^N(qwMS_&<N0J@MA zQ*i5%ng$gz{WxvN&<Qac6d22dZ~SM<b`%sh9%pNtbVwayxTf*N*_vrPT!%Eq;$nCE zOGHnW<2R$VCQUmKDW0Xk->Y1Plb$zhGrU+ci9aA6et==C1?6aP3GP`}<_HRkXY>f; zwVYMZRV9y7HS*Zmjm`uxr`-{0%OaE%k{_q2tIb%_&TdE>Do+5j;cbHAHd9V#heEb1 z5M*Uq&cVN;oc%+W(HeeZAQT+p1Z2Tl69+g8QyZ6t8|*T<COu3d0+KRIH5*wPK#Jby zW~1`Y(H3+`XB11UPIGQF&YxT_c15I8dNWaZgmF%&Bm!s(TyZAZCU2#C(ye=FeJd8z zj+|uF6z7s}ds4IpwkE<^Rf{|jM3s-oR;hkuh~wj~r6lFbf5|oeRD<ojl1RLvzE-dZ zek9?_MuRp>G!$1iE=+?Cvr3f_=?WzxJzIOl<AJJdNX8LVzZjtEtki^HJ0jlvBnc%w ztXi_xO!d5#W(o62kpW1Bmx5G3Gg1^ca55ywUI}NoVpTDyUkC>9A~-4HL}B3^yG9e2 zXbJ?<r5Oa{MF6UV4Ld|DWTUg5ai^t}O*Wg9_hKv%0)Suu$oe9@V1qogniBu?pN&!w z_+nj-NlwYESe0}Nsvf6EVdXV&cnKU<`G(cKVO4K<(jKxJD2oArRdJGC%*#0eujd4$ zpi@E~D+FsXu_o07s?z6A@b)_>9xKb?u>0{>>AhX;mr0E=muFx_iMV`}8B=xX`fRG_ zghj<tjKuPHh6|3`PUMgn&mdgiiJauGtG4E>W?Q)g1vHYYNhC>{%TkUs60j-G76r;& zj)L7uKINK6W%;2Qku06z7-mGdH2GATi>YW~+G+AAYu`Q<y?#e=_kh={YemuudBLi9 zdhc;h0XOL-A3?AA$m=~qYs5#PEtjs<YCgttNGx>hr11g$R;|mas_FSJS~~aFvMH4j zTMCn!ZbP=ooN|M8D=+6_kjyNt(tt^=Je3irFA3@YSmrja{9I8bH=QvSI^b4iEz<C) z=hhZVR^202>Wjt;2;Yf)i>hPF_Lq$&D*j3=#gUZigmqG-m2OUFl$N5o=bt-i;<zzb z*ST{iGkg`7O=Wh4lDT9mm$<(fNky=JFbrVbu5WBfxuhs!52#{aWTsRcSq1OQf1u39 zG=x7HK+k45Y#_^mdPYoev{$M-tklyj$E$=O^qwcn664IXM0oKWN4#TJXc>T&S_WXn zmZ^%L`Nvt-{dryuZ+Ok8YIUx=SA#`cR7@Jvuwqg%{&(;;4Y$8zhog61*JyUrib=hi z7vmbmYV(;E>09pJ>hV2?x2fIk;|@1>`o1$ce_q$2`_}7mbI&@xpXuAYf5oI42i81f z?qL;^rXO+gk&};pX~=-%pBc7tc!v|;J^9*^{Z8Nfj3>@|bli0lDkk+kk9)L5<GmC{ zhYn;Nc4*(WO&(Rtm8mx04WLC!mQWQyB!WPMf*~9LL~iIqL@unlYsV5Va^9?3lKRtz zX>;@_g7c~%PS&$WcgvfAgA0=L!cYbxkhwXld=mo80)c}-+yGsMu*^Sn+t(~n7pU@u z8m<l_(KaV@bQ3WT6yLzbG*}uCcBOaWzm?JZ*qEvIw)U52T2ycE6#S%2S1Q6uSRcvX zJLBVaown-rw6W$u?y@<bEW%%%s>#Uy#g-a3(RGw`4JF6R!nnF9zhP{atNvlG2VL^p z-yxal!WjOVfU^)S8b8?{-?Ejsi>?=H8Gzk{Sqoi3_6=Ex272kCDS`_0_|vdrl$-XQ ztj;)Th>L17O>L}hm-OR5UGk=t_YEe&dy}YQ7OMaP#w|)$-zb?%Op1Ul0M@4x#Tu#A zqDZ{3W@q7Xa^NeLy3MOruhCRm*MCWl^S{d5f=Z-4DJPJEgtKq~ui<CFYj>*jh3l`o zhQ-v32#Gc(+s5HIvn>V@mvnEIjBIkA_-MRFv*HzS;sTa?1g+u~TprA5<uQ`mv3@f) znnh_?+za_GxfBMej1^lKL;7<wXzrHC-DkBgO#28Rt(pLEVM>=S2TeX<yQe01OZ7Ij zCAh>YzeMW~2G}kzLlnjcHk5JAzp6w=pYUybx8a*`R@H6Mj-G>*YB!ml-S_EA6XRnm z!Lu-bz1rF=n}`$zREYu-LtzRZbn1*<#Hz}6v*eek{&Tz&<pCOF+D4a_$7Nlxr~|?c zH>%eBo5Fon1zQ#`n$NXA(n8*fqHGQ#rqGfaN7AW~iHBthEvdi@JHjogBwC36&RRl> zwL0yh=+@)+`qQhR<Kr&5HZuQGK-H0vgUGB4g49F~8_4Yux?FW?W3^$L^$WF_+6E&5 zqiQz_cGL1T&f+y#kDAmQ#)`vn9nrB<1n&n=j*KzKHIDnml+smEk^3*smi%kt`Yiwb zVH|g?EKxKv+Legx+#M-%Yn!d(ixZLBM9$6?i8Jb%VYP9~dqM5Ta;s)Ps-3tLR=f!8 zF8Yc;CjU+>Zi^Q8B92R7DOVVmXHa_8Q8Jo>Ly9VjMi{u#d9=#CSu7y3PC(#%|Iy-_ zgQIme`3R9gDQh`dy1nu$ia%*(Pb~6<AV(2AmLfK;WYNIWuhbR)Y)5A#+05`~_MU(I zv4<a0fwj1wYNh^-mWxFK1_87HTJMGDw`prO!d@%p*7$YiKluA5dA>zGhNdlsr+u^n zwn^l&|1t-fPQ95C+p1C4N&}75gKR7oV=)|YpyS)lws9P6*=#KxYdKDAasUKkH~3SF z+P_6?T+vZK`_wT9cwPpdjCkc;TENmJ!#CaliTjmQLW=A_`$B&qa6RyFT6{_#6{XG| z7kB54Tq_$R3)fwL!;K-Sb`pc<M=3m?ZSdz8(IzUT=?}CTKd=3RNUB?htF&Q?2}uMh zNP4vz;OsTfiV*^ZE^@v?x{-`xyni98)Q^~?0Guighg=F(kk*K01r<tpv1@rL>w>?y zQmmxli>UD}1wdoIGw;8TOXS{e1X5`c>WkScwE{_UiRwutnFTY=P^uUjp%%(|hD?F2 zeUy0Pe%*Tz%p7`CGX!JU15_vH>CwE!sM$-*goU=Cazt2z{@jB5x%dsNSuFr*LTX$3 zyizG4^~&R4jaSW7C86j`kr!v=INs3%5pf>Szi*%3#E>vcgHC`tb>v}oJq=E|VtzwG zq|hQ}Dq|i<yv!0&L9!(cs<I@wcI`WWn)?8B?*V}308qhthmBEufRg+++(%!E>=^p5 zW+$kzJOYx*IfAH3HfuIq18^;U7lL>hYRv<~_5xc1EB;-@w}RZRpi+gG<)h3r0u}63 zkUpA`(<VyHE-pR8W>HC{7f638`317Z>fDnh*C!uo2Dn|cFPIMZKOnY5fBO&S)9I{| zoE$cijT#cp<!_njTP^<H<g;y^$PEy1d@Q%gW^|Sv#wLINd^)4Jt$eg*^(r$estQ>? zT{s5UH^9j6)UiWCG`4;{6foBSSplrr-xHC)YU~*Pf}mZbs#qC4=2*2v=7padT{V@r zClURt*oLbu<t!bSrknk{3o!vcja5NPV?f<mr5~jjP62iadVAzf)$fS?IUKcOD`ohv zzh5B_KEHDzzhp>D?m^x`YYW4wQu>}x4b-o@(BEzFu4nic=hvf!wDj>uV#H3Df+c2# z%JY&Ad>l_k0p9h4V=`9m0uZ&kz~4G%G6?9uD+gYGzN+)|t=X4F0+wE1Q_c+|CWQd! z@@XMlG(F<-{)|Gpuw>6or)(9GGIeWyx-0$36til^En=0z2zrWzumUEHxF)eKVOyMn zg)Lq#T2<dcwkS)em7M1!Y-hoA=1pcR*s7vYA@@+^Z3wE3U|Xm^p*Tyzp)XGBY8-RU z8jTAp4PUmy?^Hs+>mYYdjQv4`)SR&eL2&h+kRE(05^mUkPbbvvoQNq%&FB!h8ck)u z;IuU+jl6A*wr%-k%J$PO_%ln$F0$Q<azI&aS(?8Uo6V<?`I~MesN3vWiHa1BJeBb- zlmQBnS&}@P#s=%xGu)NB^-|d&_H!3BLy*iZ(eV}1%Vb+vpa{c^ag}9RU0EzR53KGL zZ*Yp#jAI*&l!3^X069acbn^y{FLyy02O#h7kj+r6XFaCLV$hgz;<tG`3on{uDQI{k zrh|`!^0EVuMvWy01h(h3a$sH!@pk-!tWUT!Hs<H^#c@1Lot{GTHltHDQ)FpICum-x z00hB{%h^60&G~am!$K<WE~;90uX<O{4VjrOQ^k^K;-HQw&dB_yrBu362IY-6d3E%# zRPVYk*85-PVp=jHlK2gk1R_sD6ql$2eItdQZy9ay`{vRWk&Q%a!R8>NS!&N)%zZ_y z%{fO^lhUZ95y=r2bP2Q4us<U5`B$HvRsEg_&bgDOuoT<>G0oqe$fl0!XAp4|zak>m zYDB}*%z4y&XUTI(-V^@X>SM9#|B@<{!iqERxPn}Y`&gM-TfvucUjZhBGqjT7zzP7i zLBDCGu%g_cFTeWwn{PLK_uub-_}`E8*XjS1-dyZwlocIXjZCGs<oCBf-b7m!h+pg% zfEw^i?Ez-HpAtxYNz}tx586P~UA|cNc?5M>v3hdWzg3l$h6R-m%_z=3iAcw}<=)~b z&W**Ac&n2GSehIysmM-9Mh6WCbLJ?zFcZFI%0wDzowXlbj0WI6GRM^n-Cvp2F0!3x z0fr{pq&HNAo=lw6MCKi;mU=aeps1yiR^UUEd^{$p+(IR=zQAb%dQmv8$toMJqy>L; zwodXqrFzE++R5@&3;nxN%|-EIZDka`&@1P2zTY+L1J8b*Amn56(2D{$r@0$WhGn6i zbD_^FRWGY9mMRBv*LTt=^b+Alg}b;m_5J?X?7dY<XL9U~5W0fEJc?Uf!dcX*&7gGY z^omVyO=@*yt|BtZyLGxbkJ7t4%kuR%z>PO)CvuooZt?G^Btxk5%Rn*!KW$25YGeIX zX;&)c$}}u4QgSolXSu(k(k&_ef*E93%&f+=c34<OY{I9V{V<p6pnP{a-;J3Qi~=`n z>NWkck&E1tNu<AJe3zE)5v3N+C)~&*k2ri_$L4v|p=ciHMgS~E5%IyC7=Tv60|+@^ zY-wZh+VZzNL3D7;ja3Er1^&X)wW~_Fvm`>(=@|cte;0Bu6A|%LU}sa9K8sGTs+4UC zO>1HLpUBVA|G5$jEX5^m)A;+Bim6CIHE~q6G!8m61|;@zi`3_(djTX@lTd_bUct8( z@5{eZ>p8{C_Zw)NvoAVuowh|xyr#;R9aJJe({w;eZSkIAkz0<<^?l^whaTjW-KR~R zLgz_Luaf?Z{3h6csHfm>mc1X#_sjx(HeNbJsMJ$!3w+WK+&lNK+i#1MfK5JC9XtsA zHxtSzNnDyL6l9(vC`S!$xfSaykRpBnq}Bqj;wQiic!CvLfNWGfic$L;%GX{Cxbp%` zJr~WX<W^54)w$L8qe?GQ=*1yI#id@h<I>0k;L+3ff#mN4h)P%@Q51G!g;K2qY$`&9 zmCF#(0g{Ku$@)<0Y@@ZGe)jMGe2%QtU$d$6UkS1J82(W}UhP$)uf`z~DL`VXP|UsO z?j(wSIaZRa65R@wWt2H|9*UWV)cXMLz0VPkgvBm)9lT2+>XjclCN@9_NcnM(3K(&# z<ka-TyfAuaDKsVQW-XB+Cu!ygACrgR48S`^`NqaDFGRiELM;%Zkq%C6ds$q?0T)T# z`m@LL$~z)~Qx#FU%F4Kddvtn+7J^y`&n{v{4-hOsnS^!ce_timrd`wsl+&=!^zo_K z0o1dtd@4u!*teL{ohBC8_o(2iSfi7AIBN_`z~obUu`{o4|3a-VA{JZI!XtU06bwqp z>;6ls9lvG76U=Yd)l}(SmW)Un>7-rRVmBV$4^@u{<sa7<9xzJ^0^l?!Xxa>DRUz63 zA+}ShpGRyro)mjT&IkA>=hR_`9w6&21XV$b7p}rb5d=y=tE5bx)BZU>tl~H4_sgj} zEX}9@<(KQD38|)z$UG7xb7L08e=O;)5JQj(HI^OqWli}*EKl3DDS0o1?z7A{#B__+ z3;eORc0EJO_L!NhE{;*=k1=%n$)9U$dal2h<@qJq1iySo+3e=<v688hx**Wd4VT%5 znCFZBLAdB3tvN{l&MtdLvsy6<qvOl#zaW_cyo!R~C`q@J%Q4FtwXF)mrDcK=IA+tL z2wZ3~ZL%&y5hQ2)NX(?=FOg5F<$BzL|4NapP9-iJRGNI*gUF*%IVy1vsx<cdM-{2N zGB&%wEC)lKOAH<VagHHXqx9}CAS?%x?83_l5}w0JzaMeJ(Bp<2J@|;j5eGczfd0Mr z>qb(Ul$LR;7R`5!(4YxP-dplq=mZd;IqW92jOn}s%R_c#-P?(LbtZ~bfYU(~GlK2_ z1R3cGy-?t|cOO6ious+|*FOVN3<d_{4<3#Ig$shgsvkR!wvRu2m`DO65Gstou3y-{ z$YOZBnTN$Po7D7P-OL+V$GfYJw@p27RDEwr18@8mUahUXHyV14xAvNB>oxq1S8pfp z+g-esO}w>DycSK#xyzb)A2;_tZ0R+wUE|n2ya{`Fx3(tBx&L=)TW@wd@0<?aXC1wz zo%toEclCbUhcD0EH^p_Q_Xr$3sF$~6Z}09t-t>NI`+@=9-~+ws2YPi6@tPgxT{_U~ zG|209q<7fS-m+sn|2Xf~<Gl^Ty!}Ram!9a=KgF9o(z|e!_uc8<s?lD>S>Enry+h9S zj+x-iJcqRT;AC&e`Cj)a-lNmJfz!Q3GrXVHGX*yy_WeHbpx%7rwO1ArxSZ4lQPyup zkIy_~^y&Bu7~vp^v}@U7w|t0=X!O#&T)_`{Kxl!BbP{|pzC2CnY;yxCz$~|oluK*@ zqm85B^gqDpGtN8<U<}zvLW;chJ6S!C{3AZ+lZEydWE{ZC=QLA7-kjSP%ILNjX8Ndh z6^r_mvW|eTSJ$_{s(fs*D0iA3($kUb=bTWsU70mp-MH9400Hc!DTFS{p992JNz`-} zvlcZ^Ray4Y$8=OrfO_d6J%VHZ^z%mlm!cR|`81lh`A3I(E&1mG@-ez4n?`@<JRATE z<!v(m0)Jel#mCB^Z*p;@C~^_$N-@Evs1e@qZC0=O=;Kc`s-FLKfff^YdbBu7NivFM ztk8INCAjJji+TRh9**AxLlX<OuYv#d6brx9;9J=Cy#D>DR-w(Vshg#R)eW+omDgXr zshui$0*F|qgyA>6$4dM?7JPBZp%}^Lbh-UEF#}<>-fg;VCW>5YfvVDkpQs^~rWY5q zeqHdgO8MqWi|f}jIh8|Cl`i;`392-qX^o=(_d|{Bmx$rgX9>AE0&Z2>5&2%ceV1ze zp0fOLrT(faiyT-vV;xp<+RWzf!~e>wv80B#w&uh2cW!j<eOnQhK(pH3l27{8q}Fll Oo;a@IXYPW<qyG>8?~OSC diff --git a/vendor/unicode-ident/tests/fst/xid_start.fst b/vendor/unicode-ident/tests/fst/xid_start.fst deleted file mode 100644 index 771dc459c6868e86a080eb6c08c4502a0df1dbf7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 67370 zcmeHQ37}3z8@?o4%1ER{$kw&*OSo4qk_ctX63LQ_qEO1#qMa7~zbGoHKc!IGNsH9q zo}xvelvKYhTPp8A+c{@u&dgc9?|fU`Pu*{3&di+i&Aeykop*WOW3tlV14sY*^N%CH zA3k_s|8M*D?%BQTf4~0n^Uj}k{P@H6@4wr&b<4NkZ2tPIFTdFM`DYtG{p911KK$VQ z_x}6tJL}(G_tu+lyuSA3RZFK_bM@p&6R)~*!uTsLA2)VP{^-j_jT|xj(qWejy{Lb` zzI}RMctNk8J<jjm?Yyq%b~&eWr;d3Y&TbF!ezFP`KBmaAMT-?LQS!J_$DdFdl{vBO zN#)9)T;UYf^Qje2t5o^)DpjkUQN2dZTD8y2u2Z*O{RRyiHEt5}&P|>A%}BC2&9`XT zs`XiI+P2HdZGU!$ypEkZpVQ^suIF`w?&tUD+3SJ}d-v(vum42@1`Zm0@sOdH47+sr zh>@c%8=XG}#*VxEit!V!yb4WBgPBk5BUp5I$cOMY87S|yHLG8J<>i-NeBt?3&po?x z#qwvKUiQ?JOP~19<Bu(Qbnzn(KeTA!gAXj2fB(FH|LeZF_ug~&U2|sNdB^Ry-FnN- zvu?WahU;g}m_BXlb=R6{cgV$q2Mruxrr_M1c5U07)w)&77R{SAZPK_=!v^*1)vc3# zX6;%vYg9j@TGcA2SFUtg#ZynIaB}%_CzU<13@UxX@uiL{S)zEcqQ@3Frf{JwGX<Mz zw`sHH2FmjxJd5roPMUo6HB+v=ZtAq@GiF|Y!;Lr1y7`t{Z@c}DJ7>?i>+XB*oqONE z{yp#h`3oL+aN(kd9)4u;qe~up{69}Dee$VgPd~GK#mZ-&TlM@4FTV8hE3d9z^V-_i z-+1$_b#JeK=iUFl_x=YTe)RDtpKkc<^NpLn`0}f-H-Gc(maW^q`+oZmKkoQx=g+_V z`oCSf_w3#G+x`Ow4;}vf$RB_HbrcXDg1_Tu*ROQl6qm}^*A@_~xJ;&kSjn<25-eTD z?LU-!d2R!IW|vzD^m)oj)m|^X_vx6HOWkUD2VN^@O~-5G<VkdIw}v8F-*kP1V~gVZ z;awg|vilwfc(;er;OzI#Y6<FAAHi}7_IinCq?`m&^#C^MC3UMV*rI35v|hH<Y}M#+ zswgh#eaV4Eb6Bw!!NAO3fBi8<W%3fu;{iFK_^(<3eHdZrPZ=6!&!VF^zdd+H(lg;3 zr8}TSK_@c#^#GrGcklY~htGA#+KBnJ%f~rKUzMg@2M1{cHJK0x2Tg0Ws7_tr2V;M_ zGn{HQ^rjLD+@;+2?t6H_y>p!_`1AoV58a=9VhD4`BM0_-EICX-vLaz;Q12xOIfKbk zSzr{DP{v8EFXgrJlPJhUxXaZ-T1xsJQ|;RG7m(xwG>;w(GnEOf>xjtw))Fq5Fv%qx zaoP}*G%dGw^;5$L8OTVLr^7}5oQ(gkVCUq-DGqlnGuOPDuv^g*mqd&o^?*yn<lX^X z5;EgjqP7f{N3#+*#o@aCbMGE3P5ox`hpUOqY9*If>BUud&9OvPs+el1Etl%iHa6K4 z8MqKzh%Gm5O2nT``YDZl3!ZZ%qS9pm_2f@3TQX9Biw2me0I-~t$gAKQ+<?fe@Ygib zSo};EGk_Up%qdGnaMLU&e=|;^Tf)s-9JPPnw)JB=+pC`So^LFjw1t!SusxUv9$U3+ z-mFPu&K>Bbil{V`C(<9t=9DCh76Wk0#mSotTqF)PYC^5raAr1bY5qed9~x+`V?I?a zwfR=Sa&b@u<LWXna1c<@IPPSa+7ZB|O40BUN`aLf$r5N(9FyeN^kY%PjOELZE?%_o zf%*3np)!^!GlB9A<MDerXdrtdeJ9Nl1Jj4MUPUrx0ID*BEx*9^7;Mh)q~MnItufGl zH)sfMU5B8{;H{bg6DZ@XHETH8ArCr2r_MNaP8a+KYc}xTZi3VfXEmSxnli5<bdlfK zC*=CRz1y~8rIz(#TABm>N=)Y1UW<ng88i@U1Y8TR{_Q*wi~RLdjbBbsv{u$W3#!5j z51{K+a|RW^ba(~<TDEe^bg9;)&{A08`6biKR93>Q5{(r&Dl4EmE8K33u1BW~Z`p9g zcqdO&6RwmB&{YYU%HW-Hb>(VXV`NRcsxWeEi<qJazu;I#(7kP7jgK(<MaWoKFM4sn zDoY~86m*u>?5mTH*((65b`(d`RzX^YSQTj{)o;d-(NaNAX)VWK@ntSP%+^#+gh^lt zLDkL!=h3v_GZjruqPBjUYN%IRTQ8pJB__onUB+*`ekRjS=UUrtU~Qf=yDojSer}<m zG<aO>Bb9~9;02&Hd%;0gNjvX1^aDpHEpEGbyQK<h7_Gf!{+O}jv=@Xzba@QZM%Pu* zUclyU6{`VmG?1H>CEPNWq;(c1ub}T6=y3xoG4Z&bs7t7E48xq<CPY_&tx1Av^*>n8 z>`43$$+IPG05&ukF?=Y|&ok?ioW$teb<<c!Eo62jq?8<n@fTU1TeKtv_=C+v$d_a) zLJF)8;jiQmu_JL_TCEug-EnCvyzTN+lCQV~YxRMs_0hx0C5V*YEvWWNu`?lbxt80Q z5DODu0~2zB3#@P;wA+s>9f)O=Wf<iv9Zs5A2@s1Wo9%}SDXRd&7UGySX@xfABz_Zs zuYm~=3ljitV*)FISeXDQb|w(i&;;#DIl)Eo0L&&Y0jhp%8Kqi6MPTGaq>ug<PC$(G z0H~Rso-GQd|LW6MUNQ|ib?sEEQIk9^?Z%^S1z@&OSkm-X6qJz3eFR;4Eq$$OUFydb zq`8e>=u<&Z@>eN4F_av)jUR$cCf*pHU0{2CRfNG6L}nvamkn!>(^UGg8q{{$hvx-H zB4OFEg4u|bF7**wvf};(V^+SAq?*;(yeSsDXMA>&sKjNPpR2KD4r0D6ZLgL!zOc<p z*RcjdvdYI=W%;^f=M31yUcL-^@uefZWCr*0?yhv9%U(+guhU&o%Fxsp)`v805)GTV z)F453o8FuJMNIOaz$=zAN3N_bgAF4831b$+@8rnWd=E#oMKlKZ&d)^erVWLex3@4( z8r1Sw@+orBNPKP<&e#P^dE3MQP=UM9m#Y1?G~XKUps}PGcM#GzY6zW%NJg#heq86y z<tw!a=L2K^JnYUTY#{C}L)s)PH!Qn#WC^1FOvndyONaMWcJ%W;i#_wSw_}y%<QLK- zijHo`)!8*8)V<HeNVuvQ3beFji(;ilz56)Xz$UoZB+_Z6rqB)PKnU_P965M^IfN2_ zP<#>(;cF+j*#&X3!_J{mdCaK;Ys_PFHs_kkit1<qKoHE+rXkE>0kF57b3dOHamxed zmj|=w;L6N;7TqmRPzL5QT%eQjPMbc2=X)8g%|N0H`pDD`{?I|}1}}NQ6Yuv;8;O?y z_k72Sp*7s~I3~|ypCxh7fc}>LoB7OBk9l7fi+LM|GXp06@~2h=F!{>Mw%(R0Cov0S zT1w~vJ)swv9p35rJCu&n@jRxAA*>dHr!iS#_haDx$2dn^kO7zzGND<?5t$SK_C^No zj*N5YE0qQ$*YN4%QE9b8Wck1<!R{0tfw+~chP}0GR_C+pIFc7Dj7^HjFD(S%Kp{XW z0A^W4E&-H<lb{^1X?7+VfCyh`1&WJ6Qf@2Hz`|(SQS$$w{*ld526Xh>J_5`kL2`I= z>Y~|d0WyLSl0%jdCJz~by)OhG0jGBUEWSSImkgjs`4MH17>*IcZ7#VD1&8V2C><Q6 z!)<Lzu{kd105UFNj{+m(+7#>6iV}PS*b$)?fCwNXZ;IaX9X_o?#OWnz5fnzCK)V_T z=>U$>VOdcC$LRnL)ImKiF;WL`s1D#*9d<va1X?c7U#90mTAhW|Gh;QgflyrvI2i!R zkA5kD426%eEln{z44pJA3^7d}*vLn`1~p{_YF$x|6sh~YOVy$~-wLN&{lS*qj2MG5 z3?q$*GHxsxAYd}+gUlsI)#AbLz?)c5B5!9=NvTI?H-rQZ1jn!hfD$oc6aM<46bV%W zB~?@-^it*$T~-z|7Yk~1YdN$O;9#jZSb<2#yHYqPEqpe8C6fs+E6W5J80+c82emES z2Af+aTQ!)qT(aCCY<ON(uN-HpD^+&VPRd&w=<<ek8nRS)n-{amOCW0(^#Rc8%qBkF z(<@srMX!vN7_|n5fh9WeK?vG}t4<ov+nY56a%Tx+&S0=(0PN=%!rj0T<#~Vz>VBMv z(}t1R&?mZSPG#BAzy4r*ZFmDL4NR)vDIwQ@CP|#XF_t8Ok|ZKPjFnc5Iv46Zn{>l$ zm+4?q{Z6@dW10vrUXj~}%`vY6-0BW|BnC7mmBe7YHwD&pwnr+v`zRz*sV77!_f}Mq zNWDZL&OqDChsGAlspSZbr9-4sK}p#t0FuX(r7mEVJt1-hX^15RQEnk-r;8bqroclO z^TC2Ob*@u@Q$TZ#)0OfISn(uS4P)5lH`Y>!+viuU2oG*MMGwh5!~$VHyM3;~ZCyGR zy168kMG|3_oza$37=0PM;Fk?+4ep~W{@=F2Frg~DNQT0MOrJrO?$iefZkC=$$Y6;D zfPjC?FJTy5p;EddQiY^k0o6NFzDTh8P~~DaBskscLO2|8P-Tv(%*@CbkTQc&<}MZl z^n^g30C%jnCy(191GgiykTe1re8>_p)}?_nik>fWcp&??=o0AtkdvR;)d^62P$>r$ z@^T%7Duy%R02Ba(Th0~14w@oK8o<IymC%6Uq!IcxvuV}i>4`y!KAwU~sNpw$*}BBr zcG^#!7D9d;j!_WeF&H*~dGXaQyI#i9acd$JRC_{@{vY&($E0V}B}Is03=2Z(*o38% zh~Nu(9opx%qk1zb?t;V1R-yqH<RA;;%yB*e7nKskeIS{dGbC|16DZ>fEO1Edgle|w zcay33sM>U>=nQCmMp0d~4;zYxHL-1?UW>)(IGG$<gps8Z+QxMV3&ubgn1;{{gB)Q5 z9f_8blHQxc<ywK3!V#ki@t~f^L{h|onp_gdRED;3uX<{jB_Fkkixe?#XiVX5+)tt} zal+!T>u62V2}XBAZqFQzcV}oubn3O&OukZu1=NpUl%*K(8h2M1jsYwX=TZTkKs*+i zcUUPmeh_q{Zf4S|3@gakX@prtu?5E`xR#HS))-pLEcr3xD*94~S(43~nq3Hmu4Coy zFg&Jfy^&TMd=DmH1Yi-i09cGIAaGp56PS|dp{zn#d4<m}@?p_+CHj=g&M#3qJ3kTs zwwIZH(sSjvRLH8hq*8W%wJM3YN22<i*eHC@3#y?S%W9$Qopl%1UsFFje`2G<O)537 z+49X+8_&vbH!3$fzjKFkI&C;7JAY-@L)`|Q->zrd3u^b?(r0DAbr*d$u*}7shTc4E z%803>vh(YW<Gg0Fj`e=~x7WeGQNwz5qX<vu&}+3b0H2tcSAi2~G1fQA=Y0}{^|1u2 zbDr9e;?eA?q@B_^icKS|_*97W8IA#UgFeV79w{1VZcf`an)TltNYNBW(F0bOy1XdF ziP0HtP-KpdOVO?c{6zzi2*oMHVxmuhHpE64x`3^&bAY;z2){d~q<O10Z_;km@|bQN zNh8!Ow!t>Fa>dj2d&?xhHT9Gi$xn=<6VKVj{fesMKoa1P&w{#}#yOqKsUpDgNFXtl z{b<0O`PN2acD>l%@u+Omvv9Z_whUF8H}8`cts3lr@APU1RMxWIK%qx%6A(jWThf~q z-4<wgTc>}&6nZwD2`a*83!-lg5<ktGWW!3Q)Llt6fJ$$*RTymm5%#-@F-UM`(#o+R zFb8NR_0W$Il1w^v<rUE0bX0rw74A8!XtjhGx%bWG@x&F27a^LsPN?cfwoyk8CsRv4 zu{bZXmh7((9D>CJaZYH!%>`iUG|N|!th=Cu6bBwt9I&e*Sk8*72aac4aq)T3#&n`F z{46#yjxR>I&9Si<^p=||L$O!sLsD?s4uXjaEN_pn-Z{{|YzG2zO5_wX(}Mv5+33I+ z9an3|TvP5S#vGcRX5`^K&Vl_TU@dXv7YscYdT)*fJ5D{x<g{tog8GxXqKfKSy5iV= zyf85!ClC_Yh>nEX4iFlH`4Rw$PD8@e82ta7`4bM*;&Iuo;LXo@;AYu9cqcSCi$G$H zS|2penWGe@UR(15_mU5*ZRu|e4XI*@IO|9_s2FRg%)Z5}h8Z=)>cRUvSojOKy(Q~4 z1jv@*G15a}<Aw8KX@osR0iJ#Xo+|`4pCKqSu2?%{i6AZ}hR}jmWI@BQx{E#09C39| zU<G$~u9Ay~Fpve6WswXVU9FsgR6z2%rKS_cW4vbBh)=)rd(LSEY7>QLwKTJxE{;(K zgoU5kP6yYLI)kKN`JL+;+kNLEPFa)WFbLA+glEwB6fXBM-CDyykXUC7q)cy98q($H zhU;g}2#mO_XDhH{T3iICA*&ckAqcd*B1Dqzbe-xU?(g0$wMWmyG|K@Ru@i(#bTa%> z^U*W8Ae(Z)XdzNGc8j*h$Plc~zMk|1>KkC})hcXDUa?#aCTTW+P5SRrZs;khm*n)U zAvT0nK2F>-<wBrrDk9k()LTN)D3PknT?#ixRb_e<##*OUJoS{=wpVAIDp^U+L@BJ1 zb=Cjdt#bEoBi7Icg%v9(X~P-cu7xJ3Z%UH$KkfKYk8U06fHiT#_!hAUWAHcxf6oNn zv^{=d6C{X+6_1^@b)vF+^3#kD^Sd355;~g1162^93=NYX_SHpRo4+EjcUTT+l7{V8 z!5lj0ne7uCj1fFCK}f(bfULMW05+~+AIS?_?`(}LN>L@lo~W%`vF|BZPA8Y}92WR$ z%jH5e!!;#IFYsZH)urDFW1<M=Xn@CAjPJ_Lsbh*c8swA(n)o<u)t85l=IYed6ox6{ z&>zqgnqkhG#3U8H^KCTW9zA)dUNI`vS0CFt`aTMpnZ!BY@W~4#T&JSF!C?<oESK50 z-)bYdPz(4TEU<eMEBdqJQzXV1L8Yy?oBnR=?FK9t0np1%Ew5v!RU4??18!O0J!%pJ zFO{RY>xd;}GTB^04nQun53s}!9|0r9KBV+0xJ=6XWYFj&PAk0?4i+40<HjdhJhQ~s zp=ro~6tPQ3+=E76I(M-Z8(4FN7m07F1-b17o}ue$%vkV^-mPm#%3_M@okc_u9Od<K z4)dzMdbS#jucoF4NLIP^5(iX%1b!?Mkt9fE)Fh#A(u+%jA58NSZ9JhHkA}L!fMtzj z$$~+kK*O|1Jr0y|v1ibr0CUHYdSW{&270!N45=qj-E^#kss$Lv=&FF9&{X9L)qTgv z{>3R}PsQ9=*gS6WfmR^kI`#WJ^B6QXx{1^E^!>JgcC^-h)IP_w$}!~^QInLACD;JP z2#d!UJV0fP9K13Z+2E3#DK+BET9XG5aQf8y4FJ!jFwpv@o0X)Ng+qn$aBFzbITW~L z>xv?dNCbMcZmbEHDxMdt^*~gpQge#{S~osbEL4X&Z`ndYI51|Rp_S}!C7f?vghI{f z4+pj)@v7WN1~yOu?-DKcr--e7%d+!$nVItCG<|*+s_H+8A!?|MxJ>f79DU2%Fww2+ z3x-GP`lXs9v8#?;U`711&cQhn_b1m)d@enrIcCuVn8~_TWozd7Eq9|vGL2%VU>0R1 ziYwcu8In}-3((GwBodKoFgqh`dIo+XZmAjcle2bCi!fwZ?(9Y$33QY0HPmSDQCVF@ z&$8wI!VJmMgNBNd_8d5&G%911LDJpC<F_UJQatBD-NT_qp5vL)i)J0MycoL;fmDrz zYCrt<b&Jr=ar9+qeo6@=1vgJMVv>fz9$<!ya7BMI9qOXBezT?SGPhx{aTZLQT@<Ql zQ6<=jINjX27yBol@*s#`eZBdcZ?|mS_TBf}NzAdGKmURelhE!+XsiRq5>AJ;#x=ef zRDkj#nF`NNT%HGhhb@&^RApeQSU=sy+xM_lwb~h8R%+5|Lic_vKNt=59RJL{nCgAO zj}v0z^+YfK%4`GhGl;947sR|#BxzqrJ3qRlz91=tLh&Q#e=cKh%M2GM1{m@PeKpY^ zQBj&90!N}(BDV0j))&)4l=kJQv(JRE@Yg0j9#!^fo*9>Aw@{Ti^FicOc`<Lv?)WRP z?!xP6)iNf7SKE~|fNnztfZ;@{7^kUbFD_CR{YizhR;*j7tACHsRTy5uRUOc}gc6%( zS|{r~gvpsK(W{X9<H@R|mMM9yijvmRIO-^E6P`r_!%f|o{Lyk;6R#I4-lq?BmWlUf z322Y%g&W)`Y8#PhUNrex$IF?;J8+?9SrgIE@BYRUEr3x0;@78mJsA!k)Q#u+ZCj-s zuZK6FRoz6H>sZzo+J&vlxpZtu5UV&pk{@lHec_pg0dgM{s>3e&+ln!=K<^+MR?el_ zggyQ33(j+o#BiRo>CrE-5p9ZMw}`k8jjC5==`GMd3(+B7I@FmRkszV6O5OzV;y283 zUk@29IV{S*PJJRhGrC8fVjRVl34sVq7&>0Qm|3}W6VMW1tKPkO_GoMfU26jHHDFLk z(+Gg!raeAbq7P6@u0SBrtP-CsJ3!zmgIS2T6`XMeM6YHRtwsE_K_DiBS~K}6s0udj zsKa*$fta-Q&Aa#P-S^x60|yTs{{6@w<aI=UModg`b>~>(*nWW$TNmQApAPEqypDma z_vx}I`E=tF9(vjK?R1QMY4wEvLAS>>lbPB$j%}O+M_b^f)c^IVCzn3)*pf#UE%by6 z3oi4x@B+N0XM~j*1dbK0;~MX|ckX>GwgeFP-Gu;)9s+n6A7WsM1sRPIqhSTCe3r(3 z0#EGHjhnvsQfq%h*F&^FQ>its=Z=HawSX;&-TFDzcgiDj^>#Qqni*E{^vWC(<|G26 zhdqhN2~0#=qd}=0S8>ac$O=)dm_;)acfoNV+Tqogo1Q_S6<%80?UmhG(vx9o_D?2t z{+<wu_c1!ap3{0(*BBFE$)g3KtfPuV>BIw|;Q-ju&Q^AKZSCuNRu{b1cFgMnVRoZ4 zIEZV_jl1!C+;Pqrq{6{Y(V`mLe6`AR_!$0nSHcC66_^GL@_}`*mS799!;I<x<;=jg zRIh0e@OM4W#=|zS9On3N4(|1AZHx*q+Q7i-HH;u*VZ?)Ks2RRZ6f#97_>1LP)4mO= z5#LKCL%W7m6oeg~_|%JG@0R%bs9j8>-<>ZXiPH)?Ki$G!a!P0_oq}2^k7me58Tv|u zzxPv$EF-5_<Gx~e*pwA}zhUmj(jz|VHyd|rEI~Wh^9}GCXmpxq)2ysL-Ueytbf5N` z%W!l*QMT}of7t%rwyi$sY`PzvNoC#yQ5o>w2snNwcS8V>eJ>j}@&-!9M+#AXzUzNv z!a3xn@BC`!Ck=~IaN5rhA(w&Sbi;Fc%eWrEVTd0h!Yxs@wSr5wM8Q@ZuU$}{(~thT zrkSr<UL0eRvZkn`-!Lcl`YU$)Bo5k0w|gwtSTNsF$xzju16DANXl97zD@ST2x|vHa z8G7-c0sZ@4*sCWNQg`jr`Rub=5i@27p`}*!YE?ZD>#a~YPz1-hu&{m<KY6@hLsfuk z;CPM!;qjmmaHuzG1B;<Hp*dh&3TS;6v;k;~#dbLaqY=lo08BMRHv`<}6$qvukl^wd zoxm}}gJG&YGo_fTCQgFMM2ZJ4Z2BlD3f+mLjaM%G<S{LaPAK|Cu?od|72jE+_i-Ca zoqoboXi3?-%3piR+=_E5&)ZS5YR#(M&X~6&wtq)!yjg4MnMLcoUYE4*-r$Y~<hxQM zuinZxwXUvfep}0Otq-<g`Fc5b=f0Ht_SwVo9`CrP^Rh0tb^Z9fJG&n^e`C*O7p&~P zs_%3C7YrygXyxFtLl0k4Xn3iSIhW1bF*yJDahtE`c;)u1dQ952lfpz}UStcAzP(Cx zCj{%&p*B@qfkSc-+uxxk_m3LYqk`HeVYT_k1pRAWcYhN}G5!!?G4S}tW-wq@*W)jt zM*H$8J0&MB!|>3&grG#jc&czFC6ncfI9KKB<gPO*xSQYMCSHp#4Pl0|O3xrYbxO1t zE}B&e(AE?XRXW`=v($Lu8GCQXGw;J4K$z3Ld1Y4F0L%eU2Eeud`^TnvX1R{BU-$O< zcis(z7$I5k{P`C;nohKsf@g`E{Jn);1?Fq<n6Ckg@-3?Mj9NV(r5-h4`)88BO7Xsk zjRh=DAXt7U0v=<TlefYvER75iw0jTQo8WcT+YvdvIaFor(lCn#XI0J_b!gO~LRsYR z+Ct=SEdQ1i3Et|_V*Yg(m-MewP$TFFV8bFMtaQQAGfr?P7Ctt0>$1EM+e-^(!D+h6 zz67t!Y2CfbhNwZ+(~bJm**#fuE_US4F7b1koI6u$On1%ITDXl%XaFfsM3wYrWV~!B z?WHHB2{Z*OCbAl36-wxThYQ7i3m;>|a>1<;eT)$^&v?j8ISrc?%35Fa*b1ZWCEB+t ZMY7iZ(5f)qddd~U^Nl|x?|**F{{g3x<S_sM diff --git a/vendor/unicode-ident/tests/roaring/mod.rs b/vendor/unicode-ident/tests/roaring/mod.rs deleted file mode 100644 index 9396996b746b34..00000000000000 --- a/vendor/unicode-ident/tests/roaring/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -#![allow(clippy::incompatible_msrv)] - -use roaring::RoaringBitmap; - -pub fn xid_start_bitmap() -> RoaringBitmap { - let mut bitmap = RoaringBitmap::new(); - for ch in '\0'..=char::MAX { - if unicode_ident::is_xid_start(ch) { - bitmap.insert(ch as u32); - } - } - bitmap -} - -pub fn xid_continue_bitmap() -> RoaringBitmap { - let mut bitmap = RoaringBitmap::new(); - for ch in '\0'..=char::MAX { - if unicode_ident::is_xid_continue(ch) { - bitmap.insert(ch as u32); - } - } - bitmap -} diff --git a/vendor/unicode-ident/tests/static_size.rs b/vendor/unicode-ident/tests/static_size.rs deleted file mode 100644 index 4b6f16f1981095..00000000000000 --- a/vendor/unicode-ident/tests/static_size.rs +++ /dev/null @@ -1,95 +0,0 @@ -#![allow(clippy::let_underscore_untyped, clippy::unreadable_literal)] - -use std::mem::size_of_val; - -#[test] -fn test_size() { - #[allow(dead_code)] - #[path = "../src/tables.rs"] - mod tables; - - let size = size_of_val(&tables::ASCII_START) - + size_of_val(&tables::ASCII_CONTINUE) - + size_of_val(&tables::TRIE_START) - + size_of_val(&tables::TRIE_CONTINUE) - + size_of_val(&tables::LEAF); - assert_eq!(10248, size); -} - -#[test] -fn test_xid_size() { - #[deny(dead_code)] - #[path = "tables/mod.rs"] - mod tables; - - let size = size_of_val(tables::XID_START) + size_of_val(tables::XID_CONTINUE); - assert_eq!(11976, size); - - let _ = tables::BY_NAME; -} - -#[cfg(target_pointer_width = "64")] -#[test] -fn test_trieset_size() { - #[deny(dead_code)] - #[allow(clippy::redundant_static_lifetimes)] - #[path = "trie/trie.rs"] - mod trie; - - let ucd_trie::TrieSet { - tree1_level1, - tree2_level1, - tree2_level2, - tree3_level1, - tree3_level2, - tree3_level3, - } = *trie::XID_START; - - let start_size = size_of_val(trie::XID_START) - + size_of_val(tree1_level1) - + size_of_val(tree2_level1) - + size_of_val(tree2_level2) - + size_of_val(tree3_level1) - + size_of_val(tree3_level2) - + size_of_val(tree3_level3); - - let ucd_trie::TrieSet { - tree1_level1, - tree2_level1, - tree2_level2, - tree3_level1, - tree3_level2, - tree3_level3, - } = *trie::XID_CONTINUE; - - let continue_size = size_of_val(trie::XID_CONTINUE) - + size_of_val(tree1_level1) - + size_of_val(tree2_level1) - + size_of_val(tree2_level2) - + size_of_val(tree3_level1) - + size_of_val(tree3_level2) - + size_of_val(tree3_level3); - - assert_eq!(10392, start_size + continue_size); - - let _ = trie::BY_NAME; -} - -#[test] -fn test_fst_size() { - let xid_start_fst = include_bytes!("fst/xid_start.fst"); - let xid_continue_fst = include_bytes!("fst/xid_continue.fst"); - let size = xid_start_fst.len() + xid_continue_fst.len(); - assert_eq!(143513, size); -} - -#[test] -fn test_roaring_size() { - #[path = "roaring/mod.rs"] - mod roaring; - - let xid_start_bitmap = roaring::xid_start_bitmap(); - let xid_continue_bitmap = roaring::xid_continue_bitmap(); - let size = xid_start_bitmap.serialized_size() + xid_continue_bitmap.serialized_size(); - assert_eq!(66104, size); -} diff --git a/vendor/unicode-ident/tests/tables/mod.rs b/vendor/unicode-ident/tests/tables/mod.rs deleted file mode 100644 index 72bfd8bd7b9507..00000000000000 --- a/vendor/unicode-ident/tests/tables/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -#![allow(clippy::module_inception)] - -#[allow(clippy::redundant_static_lifetimes)] -#[rustfmt::skip] -mod tables; - -pub(crate) use self::tables::*; diff --git a/vendor/unicode-ident/tests/tables/tables.rs b/vendor/unicode-ident/tests/tables/tables.rs deleted file mode 100644 index 9db6fe9589160b..00000000000000 --- a/vendor/unicode-ident/tests/tables/tables.rs +++ /dev/null @@ -1,361 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate property-bool UCD --include XID_Start,XID_Continue -// -// Unicode version: 17.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const BY_NAME: &'static [(&'static str, &'static [(u32, u32)])] = &[ - ("XID_Continue", XID_CONTINUE), ("XID_Start", XID_START), -]; - -pub const XID_CONTINUE: &'static [(u32, u32)] = &[ - (48, 57), (65, 90), (95, 95), (97, 122), (170, 170), (181, 181), (183, 183), - (186, 186), (192, 214), (216, 246), (248, 705), (710, 721), (736, 740), - (748, 748), (750, 750), (768, 884), (886, 887), (891, 893), (895, 895), - (902, 906), (908, 908), (910, 929), (931, 1013), (1015, 1153), (1155, 1159), - (1162, 1327), (1329, 1366), (1369, 1369), (1376, 1416), (1425, 1469), - (1471, 1471), (1473, 1474), (1476, 1477), (1479, 1479), (1488, 1514), - (1519, 1522), (1552, 1562), (1568, 1641), (1646, 1747), (1749, 1756), - (1759, 1768), (1770, 1788), (1791, 1791), (1808, 1866), (1869, 1969), - (1984, 2037), (2042, 2042), (2045, 2045), (2048, 2093), (2112, 2139), - (2144, 2154), (2160, 2183), (2185, 2191), (2199, 2273), (2275, 2403), - (2406, 2415), (2417, 2435), (2437, 2444), (2447, 2448), (2451, 2472), - (2474, 2480), (2482, 2482), (2486, 2489), (2492, 2500), (2503, 2504), - (2507, 2510), (2519, 2519), (2524, 2525), (2527, 2531), (2534, 2545), - (2556, 2556), (2558, 2558), (2561, 2563), (2565, 2570), (2575, 2576), - (2579, 2600), (2602, 2608), (2610, 2611), (2613, 2614), (2616, 2617), - (2620, 2620), (2622, 2626), (2631, 2632), (2635, 2637), (2641, 2641), - (2649, 2652), (2654, 2654), (2662, 2677), (2689, 2691), (2693, 2701), - (2703, 2705), (2707, 2728), (2730, 2736), (2738, 2739), (2741, 2745), - (2748, 2757), (2759, 2761), (2763, 2765), (2768, 2768), (2784, 2787), - (2790, 2799), (2809, 2815), (2817, 2819), (2821, 2828), (2831, 2832), - (2835, 2856), (2858, 2864), (2866, 2867), (2869, 2873), (2876, 2884), - (2887, 2888), (2891, 2893), (2901, 2903), (2908, 2909), (2911, 2915), - (2918, 2927), (2929, 2929), (2946, 2947), (2949, 2954), (2958, 2960), - (2962, 2965), (2969, 2970), (2972, 2972), (2974, 2975), (2979, 2980), - (2984, 2986), (2990, 3001), (3006, 3010), (3014, 3016), (3018, 3021), - (3024, 3024), (3031, 3031), (3046, 3055), (3072, 3084), (3086, 3088), - (3090, 3112), (3114, 3129), (3132, 3140), (3142, 3144), (3146, 3149), - (3157, 3158), (3160, 3162), (3164, 3165), (3168, 3171), (3174, 3183), - (3200, 3203), (3205, 3212), (3214, 3216), (3218, 3240), (3242, 3251), - (3253, 3257), (3260, 3268), (3270, 3272), (3274, 3277), (3285, 3286), - (3292, 3294), (3296, 3299), (3302, 3311), (3313, 3315), (3328, 3340), - (3342, 3344), (3346, 3396), (3398, 3400), (3402, 3406), (3412, 3415), - (3423, 3427), (3430, 3439), (3450, 3455), (3457, 3459), (3461, 3478), - (3482, 3505), (3507, 3515), (3517, 3517), (3520, 3526), (3530, 3530), - (3535, 3540), (3542, 3542), (3544, 3551), (3558, 3567), (3570, 3571), - (3585, 3642), (3648, 3662), (3664, 3673), (3713, 3714), (3716, 3716), - (3718, 3722), (3724, 3747), (3749, 3749), (3751, 3773), (3776, 3780), - (3782, 3782), (3784, 3790), (3792, 3801), (3804, 3807), (3840, 3840), - (3864, 3865), (3872, 3881), (3893, 3893), (3895, 3895), (3897, 3897), - (3902, 3911), (3913, 3948), (3953, 3972), (3974, 3991), (3993, 4028), - (4038, 4038), (4096, 4169), (4176, 4253), (4256, 4293), (4295, 4295), - (4301, 4301), (4304, 4346), (4348, 4680), (4682, 4685), (4688, 4694), - (4696, 4696), (4698, 4701), (4704, 4744), (4746, 4749), (4752, 4784), - (4786, 4789), (4792, 4798), (4800, 4800), (4802, 4805), (4808, 4822), - (4824, 4880), (4882, 4885), (4888, 4954), (4957, 4959), (4969, 4977), - (4992, 5007), (5024, 5109), (5112, 5117), (5121, 5740), (5743, 5759), - (5761, 5786), (5792, 5866), (5870, 5880), (5888, 5909), (5919, 5940), - (5952, 5971), (5984, 5996), (5998, 6000), (6002, 6003), (6016, 6099), - (6103, 6103), (6108, 6109), (6112, 6121), (6155, 6157), (6159, 6169), - (6176, 6264), (6272, 6314), (6320, 6389), (6400, 6430), (6432, 6443), - (6448, 6459), (6470, 6509), (6512, 6516), (6528, 6571), (6576, 6601), - (6608, 6618), (6656, 6683), (6688, 6750), (6752, 6780), (6783, 6793), - (6800, 6809), (6823, 6823), (6832, 6845), (6847, 6877), (6880, 6891), - (6912, 6988), (6992, 7001), (7019, 7027), (7040, 7155), (7168, 7223), - (7232, 7241), (7245, 7293), (7296, 7306), (7312, 7354), (7357, 7359), - (7376, 7378), (7380, 7418), (7424, 7957), (7960, 7965), (7968, 8005), - (8008, 8013), (8016, 8023), (8025, 8025), (8027, 8027), (8029, 8029), - (8031, 8061), (8064, 8116), (8118, 8124), (8126, 8126), (8130, 8132), - (8134, 8140), (8144, 8147), (8150, 8155), (8160, 8172), (8178, 8180), - (8182, 8188), (8204, 8205), (8255, 8256), (8276, 8276), (8305, 8305), - (8319, 8319), (8336, 8348), (8400, 8412), (8417, 8417), (8421, 8432), - (8450, 8450), (8455, 8455), (8458, 8467), (8469, 8469), (8472, 8477), - (8484, 8484), (8486, 8486), (8488, 8488), (8490, 8505), (8508, 8511), - (8517, 8521), (8526, 8526), (8544, 8584), (11264, 11492), (11499, 11507), - (11520, 11557), (11559, 11559), (11565, 11565), (11568, 11623), - (11631, 11631), (11647, 11670), (11680, 11686), (11688, 11694), - (11696, 11702), (11704, 11710), (11712, 11718), (11720, 11726), - (11728, 11734), (11736, 11742), (11744, 11775), (12293, 12295), - (12321, 12335), (12337, 12341), (12344, 12348), (12353, 12438), - (12441, 12442), (12445, 12447), (12449, 12543), (12549, 12591), - (12593, 12686), (12704, 12735), (12784, 12799), (13312, 19903), - (19968, 42124), (42192, 42237), (42240, 42508), (42512, 42539), - (42560, 42607), (42612, 42621), (42623, 42737), (42775, 42783), - (42786, 42888), (42891, 42972), (42993, 43047), (43052, 43052), - (43072, 43123), (43136, 43205), (43216, 43225), (43232, 43255), - (43259, 43259), (43261, 43309), (43312, 43347), (43360, 43388), - (43392, 43456), (43471, 43481), (43488, 43518), (43520, 43574), - (43584, 43597), (43600, 43609), (43616, 43638), (43642, 43714), - (43739, 43741), (43744, 43759), (43762, 43766), (43777, 43782), - (43785, 43790), (43793, 43798), (43808, 43814), (43816, 43822), - (43824, 43866), (43868, 43881), (43888, 44010), (44012, 44013), - (44016, 44025), (44032, 55203), (55216, 55238), (55243, 55291), - (63744, 64109), (64112, 64217), (64256, 64262), (64275, 64279), - (64285, 64296), (64298, 64310), (64312, 64316), (64318, 64318), - (64320, 64321), (64323, 64324), (64326, 64433), (64467, 64605), - (64612, 64829), (64848, 64911), (64914, 64967), (65008, 65017), - (65024, 65039), (65056, 65071), (65075, 65076), (65101, 65103), - (65137, 65137), (65139, 65139), (65143, 65143), (65145, 65145), - (65147, 65147), (65149, 65149), (65151, 65276), (65296, 65305), - (65313, 65338), (65343, 65343), (65345, 65370), (65381, 65470), - (65474, 65479), (65482, 65487), (65490, 65495), (65498, 65500), - (65536, 65547), (65549, 65574), (65576, 65594), (65596, 65597), - (65599, 65613), (65616, 65629), (65664, 65786), (65856, 65908), - (66045, 66045), (66176, 66204), (66208, 66256), (66272, 66272), - (66304, 66335), (66349, 66378), (66384, 66426), (66432, 66461), - (66464, 66499), (66504, 66511), (66513, 66517), (66560, 66717), - (66720, 66729), (66736, 66771), (66776, 66811), (66816, 66855), - (66864, 66915), (66928, 66938), (66940, 66954), (66956, 66962), - (66964, 66965), (66967, 66977), (66979, 66993), (66995, 67001), - (67003, 67004), (67008, 67059), (67072, 67382), (67392, 67413), - (67424, 67431), (67456, 67461), (67463, 67504), (67506, 67514), - (67584, 67589), (67592, 67592), (67594, 67637), (67639, 67640), - (67644, 67644), (67647, 67669), (67680, 67702), (67712, 67742), - (67808, 67826), (67828, 67829), (67840, 67861), (67872, 67897), - (67904, 67929), (67968, 68023), (68030, 68031), (68096, 68099), - (68101, 68102), (68108, 68115), (68117, 68119), (68121, 68149), - (68152, 68154), (68159, 68159), (68192, 68220), (68224, 68252), - (68288, 68295), (68297, 68326), (68352, 68405), (68416, 68437), - (68448, 68466), (68480, 68497), (68608, 68680), (68736, 68786), - (68800, 68850), (68864, 68903), (68912, 68921), (68928, 68965), - (68969, 68973), (68975, 68997), (69248, 69289), (69291, 69292), - (69296, 69297), (69314, 69319), (69370, 69404), (69415, 69415), - (69424, 69456), (69488, 69509), (69552, 69572), (69600, 69622), - (69632, 69702), (69734, 69749), (69759, 69818), (69826, 69826), - (69840, 69864), (69872, 69881), (69888, 69940), (69942, 69951), - (69956, 69959), (69968, 70003), (70006, 70006), (70016, 70084), - (70089, 70092), (70094, 70106), (70108, 70108), (70144, 70161), - (70163, 70199), (70206, 70209), (70272, 70278), (70280, 70280), - (70282, 70285), (70287, 70301), (70303, 70312), (70320, 70378), - (70384, 70393), (70400, 70403), (70405, 70412), (70415, 70416), - (70419, 70440), (70442, 70448), (70450, 70451), (70453, 70457), - (70459, 70468), (70471, 70472), (70475, 70477), (70480, 70480), - (70487, 70487), (70493, 70499), (70502, 70508), (70512, 70516), - (70528, 70537), (70539, 70539), (70542, 70542), (70544, 70581), - (70583, 70592), (70594, 70594), (70597, 70597), (70599, 70602), - (70604, 70611), (70625, 70626), (70656, 70730), (70736, 70745), - (70750, 70753), (70784, 70853), (70855, 70855), (70864, 70873), - (71040, 71093), (71096, 71104), (71128, 71133), (71168, 71232), - (71236, 71236), (71248, 71257), (71296, 71352), (71360, 71369), - (71376, 71395), (71424, 71450), (71453, 71467), (71472, 71481), - (71488, 71494), (71680, 71738), (71840, 71913), (71935, 71942), - (71945, 71945), (71948, 71955), (71957, 71958), (71960, 71989), - (71991, 71992), (71995, 72003), (72016, 72025), (72096, 72103), - (72106, 72151), (72154, 72161), (72163, 72164), (72192, 72254), - (72263, 72263), (72272, 72345), (72349, 72349), (72368, 72440), - (72544, 72551), (72640, 72672), (72688, 72697), (72704, 72712), - (72714, 72758), (72760, 72768), (72784, 72793), (72818, 72847), - (72850, 72871), (72873, 72886), (72960, 72966), (72968, 72969), - (72971, 73014), (73018, 73018), (73020, 73021), (73023, 73031), - (73040, 73049), (73056, 73061), (73063, 73064), (73066, 73102), - (73104, 73105), (73107, 73112), (73120, 73129), (73136, 73179), - (73184, 73193), (73440, 73462), (73472, 73488), (73490, 73530), - (73534, 73538), (73552, 73562), (73648, 73648), (73728, 74649), - (74752, 74862), (74880, 75075), (77712, 77808), (77824, 78895), - (78912, 78933), (78944, 82938), (82944, 83526), (90368, 90425), - (92160, 92728), (92736, 92766), (92768, 92777), (92784, 92862), - (92864, 92873), (92880, 92909), (92912, 92916), (92928, 92982), - (92992, 92995), (93008, 93017), (93027, 93047), (93053, 93071), - (93504, 93548), (93552, 93561), (93760, 93823), (93856, 93880), - (93883, 93907), (93952, 94026), (94031, 94087), (94095, 94111), - (94176, 94177), (94179, 94180), (94192, 94198), (94208, 101589), - (101631, 101662), (101760, 101874), (110576, 110579), (110581, 110587), - (110589, 110590), (110592, 110882), (110898, 110898), (110928, 110930), - (110933, 110933), (110948, 110951), (110960, 111355), (113664, 113770), - (113776, 113788), (113792, 113800), (113808, 113817), (113821, 113822), - (118000, 118009), (118528, 118573), (118576, 118598), (119141, 119145), - (119149, 119154), (119163, 119170), (119173, 119179), (119210, 119213), - (119362, 119364), (119808, 119892), (119894, 119964), (119966, 119967), - (119970, 119970), (119973, 119974), (119977, 119980), (119982, 119993), - (119995, 119995), (119997, 120003), (120005, 120069), (120071, 120074), - (120077, 120084), (120086, 120092), (120094, 120121), (120123, 120126), - (120128, 120132), (120134, 120134), (120138, 120144), (120146, 120485), - (120488, 120512), (120514, 120538), (120540, 120570), (120572, 120596), - (120598, 120628), (120630, 120654), (120656, 120686), (120688, 120712), - (120714, 120744), (120746, 120770), (120772, 120779), (120782, 120831), - (121344, 121398), (121403, 121452), (121461, 121461), (121476, 121476), - (121499, 121503), (121505, 121519), (122624, 122654), (122661, 122666), - (122880, 122886), (122888, 122904), (122907, 122913), (122915, 122916), - (122918, 122922), (122928, 122989), (123023, 123023), (123136, 123180), - (123184, 123197), (123200, 123209), (123214, 123214), (123536, 123566), - (123584, 123641), (124112, 124153), (124368, 124410), (124608, 124638), - (124640, 124661), (124670, 124671), (124896, 124902), (124904, 124907), - (124909, 124910), (124912, 124926), (124928, 125124), (125136, 125142), - (125184, 125259), (125264, 125273), (126464, 126467), (126469, 126495), - (126497, 126498), (126500, 126500), (126503, 126503), (126505, 126514), - (126516, 126519), (126521, 126521), (126523, 126523), (126530, 126530), - (126535, 126535), (126537, 126537), (126539, 126539), (126541, 126543), - (126545, 126546), (126548, 126548), (126551, 126551), (126553, 126553), - (126555, 126555), (126557, 126557), (126559, 126559), (126561, 126562), - (126564, 126564), (126567, 126570), (126572, 126578), (126580, 126583), - (126585, 126588), (126590, 126590), (126592, 126601), (126603, 126619), - (126625, 126627), (126629, 126633), (126635, 126651), (130032, 130041), - (131072, 173791), (173824, 178205), (178208, 183981), (183984, 191456), - (191472, 192093), (194560, 195101), (196608, 201546), (201552, 210041), - (917760, 917999), -]; - -pub const XID_START: &'static [(u32, u32)] = &[ - (65, 90), (97, 122), (170, 170), (181, 181), (186, 186), (192, 214), - (216, 246), (248, 705), (710, 721), (736, 740), (748, 748), (750, 750), - (880, 884), (886, 887), (891, 893), (895, 895), (902, 902), (904, 906), - (908, 908), (910, 929), (931, 1013), (1015, 1153), (1162, 1327), - (1329, 1366), (1369, 1369), (1376, 1416), (1488, 1514), (1519, 1522), - (1568, 1610), (1646, 1647), (1649, 1747), (1749, 1749), (1765, 1766), - (1774, 1775), (1786, 1788), (1791, 1791), (1808, 1808), (1810, 1839), - (1869, 1957), (1969, 1969), (1994, 2026), (2036, 2037), (2042, 2042), - (2048, 2069), (2074, 2074), (2084, 2084), (2088, 2088), (2112, 2136), - (2144, 2154), (2160, 2183), (2185, 2191), (2208, 2249), (2308, 2361), - (2365, 2365), (2384, 2384), (2392, 2401), (2417, 2432), (2437, 2444), - (2447, 2448), (2451, 2472), (2474, 2480), (2482, 2482), (2486, 2489), - (2493, 2493), (2510, 2510), (2524, 2525), (2527, 2529), (2544, 2545), - (2556, 2556), (2565, 2570), (2575, 2576), (2579, 2600), (2602, 2608), - (2610, 2611), (2613, 2614), (2616, 2617), (2649, 2652), (2654, 2654), - (2674, 2676), (2693, 2701), (2703, 2705), (2707, 2728), (2730, 2736), - (2738, 2739), (2741, 2745), (2749, 2749), (2768, 2768), (2784, 2785), - (2809, 2809), (2821, 2828), (2831, 2832), (2835, 2856), (2858, 2864), - (2866, 2867), (2869, 2873), (2877, 2877), (2908, 2909), (2911, 2913), - (2929, 2929), (2947, 2947), (2949, 2954), (2958, 2960), (2962, 2965), - (2969, 2970), (2972, 2972), (2974, 2975), (2979, 2980), (2984, 2986), - (2990, 3001), (3024, 3024), (3077, 3084), (3086, 3088), (3090, 3112), - (3114, 3129), (3133, 3133), (3160, 3162), (3164, 3165), (3168, 3169), - (3200, 3200), (3205, 3212), (3214, 3216), (3218, 3240), (3242, 3251), - (3253, 3257), (3261, 3261), (3292, 3294), (3296, 3297), (3313, 3314), - (3332, 3340), (3342, 3344), (3346, 3386), (3389, 3389), (3406, 3406), - (3412, 3414), (3423, 3425), (3450, 3455), (3461, 3478), (3482, 3505), - (3507, 3515), (3517, 3517), (3520, 3526), (3585, 3632), (3634, 3634), - (3648, 3654), (3713, 3714), (3716, 3716), (3718, 3722), (3724, 3747), - (3749, 3749), (3751, 3760), (3762, 3762), (3773, 3773), (3776, 3780), - (3782, 3782), (3804, 3807), (3840, 3840), (3904, 3911), (3913, 3948), - (3976, 3980), (4096, 4138), (4159, 4159), (4176, 4181), (4186, 4189), - (4193, 4193), (4197, 4198), (4206, 4208), (4213, 4225), (4238, 4238), - (4256, 4293), (4295, 4295), (4301, 4301), (4304, 4346), (4348, 4680), - (4682, 4685), (4688, 4694), (4696, 4696), (4698, 4701), (4704, 4744), - (4746, 4749), (4752, 4784), (4786, 4789), (4792, 4798), (4800, 4800), - (4802, 4805), (4808, 4822), (4824, 4880), (4882, 4885), (4888, 4954), - (4992, 5007), (5024, 5109), (5112, 5117), (5121, 5740), (5743, 5759), - (5761, 5786), (5792, 5866), (5870, 5880), (5888, 5905), (5919, 5937), - (5952, 5969), (5984, 5996), (5998, 6000), (6016, 6067), (6103, 6103), - (6108, 6108), (6176, 6264), (6272, 6312), (6314, 6314), (6320, 6389), - (6400, 6430), (6480, 6509), (6512, 6516), (6528, 6571), (6576, 6601), - (6656, 6678), (6688, 6740), (6823, 6823), (6917, 6963), (6981, 6988), - (7043, 7072), (7086, 7087), (7098, 7141), (7168, 7203), (7245, 7247), - (7258, 7293), (7296, 7306), (7312, 7354), (7357, 7359), (7401, 7404), - (7406, 7411), (7413, 7414), (7418, 7418), (7424, 7615), (7680, 7957), - (7960, 7965), (7968, 8005), (8008, 8013), (8016, 8023), (8025, 8025), - (8027, 8027), (8029, 8029), (8031, 8061), (8064, 8116), (8118, 8124), - (8126, 8126), (8130, 8132), (8134, 8140), (8144, 8147), (8150, 8155), - (8160, 8172), (8178, 8180), (8182, 8188), (8305, 8305), (8319, 8319), - (8336, 8348), (8450, 8450), (8455, 8455), (8458, 8467), (8469, 8469), - (8472, 8477), (8484, 8484), (8486, 8486), (8488, 8488), (8490, 8505), - (8508, 8511), (8517, 8521), (8526, 8526), (8544, 8584), (11264, 11492), - (11499, 11502), (11506, 11507), (11520, 11557), (11559, 11559), - (11565, 11565), (11568, 11623), (11631, 11631), (11648, 11670), - (11680, 11686), (11688, 11694), (11696, 11702), (11704, 11710), - (11712, 11718), (11720, 11726), (11728, 11734), (11736, 11742), - (12293, 12295), (12321, 12329), (12337, 12341), (12344, 12348), - (12353, 12438), (12445, 12447), (12449, 12538), (12540, 12543), - (12549, 12591), (12593, 12686), (12704, 12735), (12784, 12799), - (13312, 19903), (19968, 42124), (42192, 42237), (42240, 42508), - (42512, 42527), (42538, 42539), (42560, 42606), (42623, 42653), - (42656, 42735), (42775, 42783), (42786, 42888), (42891, 42972), - (42993, 43009), (43011, 43013), (43015, 43018), (43020, 43042), - (43072, 43123), (43138, 43187), (43250, 43255), (43259, 43259), - (43261, 43262), (43274, 43301), (43312, 43334), (43360, 43388), - (43396, 43442), (43471, 43471), (43488, 43492), (43494, 43503), - (43514, 43518), (43520, 43560), (43584, 43586), (43588, 43595), - (43616, 43638), (43642, 43642), (43646, 43695), (43697, 43697), - (43701, 43702), (43705, 43709), (43712, 43712), (43714, 43714), - (43739, 43741), (43744, 43754), (43762, 43764), (43777, 43782), - (43785, 43790), (43793, 43798), (43808, 43814), (43816, 43822), - (43824, 43866), (43868, 43881), (43888, 44002), (44032, 55203), - (55216, 55238), (55243, 55291), (63744, 64109), (64112, 64217), - (64256, 64262), (64275, 64279), (64285, 64285), (64287, 64296), - (64298, 64310), (64312, 64316), (64318, 64318), (64320, 64321), - (64323, 64324), (64326, 64433), (64467, 64605), (64612, 64829), - (64848, 64911), (64914, 64967), (65008, 65017), (65137, 65137), - (65139, 65139), (65143, 65143), (65145, 65145), (65147, 65147), - (65149, 65149), (65151, 65276), (65313, 65338), (65345, 65370), - (65382, 65437), (65440, 65470), (65474, 65479), (65482, 65487), - (65490, 65495), (65498, 65500), (65536, 65547), (65549, 65574), - (65576, 65594), (65596, 65597), (65599, 65613), (65616, 65629), - (65664, 65786), (65856, 65908), (66176, 66204), (66208, 66256), - (66304, 66335), (66349, 66378), (66384, 66421), (66432, 66461), - (66464, 66499), (66504, 66511), (66513, 66517), (66560, 66717), - (66736, 66771), (66776, 66811), (66816, 66855), (66864, 66915), - (66928, 66938), (66940, 66954), (66956, 66962), (66964, 66965), - (66967, 66977), (66979, 66993), (66995, 67001), (67003, 67004), - (67008, 67059), (67072, 67382), (67392, 67413), (67424, 67431), - (67456, 67461), (67463, 67504), (67506, 67514), (67584, 67589), - (67592, 67592), (67594, 67637), (67639, 67640), (67644, 67644), - (67647, 67669), (67680, 67702), (67712, 67742), (67808, 67826), - (67828, 67829), (67840, 67861), (67872, 67897), (67904, 67929), - (67968, 68023), (68030, 68031), (68096, 68096), (68112, 68115), - (68117, 68119), (68121, 68149), (68192, 68220), (68224, 68252), - (68288, 68295), (68297, 68324), (68352, 68405), (68416, 68437), - (68448, 68466), (68480, 68497), (68608, 68680), (68736, 68786), - (68800, 68850), (68864, 68899), (68938, 68965), (68975, 68997), - (69248, 69289), (69296, 69297), (69314, 69319), (69376, 69404), - (69415, 69415), (69424, 69445), (69488, 69505), (69552, 69572), - (69600, 69622), (69635, 69687), (69745, 69746), (69749, 69749), - (69763, 69807), (69840, 69864), (69891, 69926), (69956, 69956), - (69959, 69959), (69968, 70002), (70006, 70006), (70019, 70066), - (70081, 70084), (70106, 70106), (70108, 70108), (70144, 70161), - (70163, 70187), (70207, 70208), (70272, 70278), (70280, 70280), - (70282, 70285), (70287, 70301), (70303, 70312), (70320, 70366), - (70405, 70412), (70415, 70416), (70419, 70440), (70442, 70448), - (70450, 70451), (70453, 70457), (70461, 70461), (70480, 70480), - (70493, 70497), (70528, 70537), (70539, 70539), (70542, 70542), - (70544, 70581), (70583, 70583), (70609, 70609), (70611, 70611), - (70656, 70708), (70727, 70730), (70751, 70753), (70784, 70831), - (70852, 70853), (70855, 70855), (71040, 71086), (71128, 71131), - (71168, 71215), (71236, 71236), (71296, 71338), (71352, 71352), - (71424, 71450), (71488, 71494), (71680, 71723), (71840, 71903), - (71935, 71942), (71945, 71945), (71948, 71955), (71957, 71958), - (71960, 71983), (71999, 71999), (72001, 72001), (72096, 72103), - (72106, 72144), (72161, 72161), (72163, 72163), (72192, 72192), - (72203, 72242), (72250, 72250), (72272, 72272), (72284, 72329), - (72349, 72349), (72368, 72440), (72640, 72672), (72704, 72712), - (72714, 72750), (72768, 72768), (72818, 72847), (72960, 72966), - (72968, 72969), (72971, 73008), (73030, 73030), (73056, 73061), - (73063, 73064), (73066, 73097), (73112, 73112), (73136, 73179), - (73440, 73458), (73474, 73474), (73476, 73488), (73490, 73523), - (73648, 73648), (73728, 74649), (74752, 74862), (74880, 75075), - (77712, 77808), (77824, 78895), (78913, 78918), (78944, 82938), - (82944, 83526), (90368, 90397), (92160, 92728), (92736, 92766), - (92784, 92862), (92880, 92909), (92928, 92975), (92992, 92995), - (93027, 93047), (93053, 93071), (93504, 93548), (93760, 93823), - (93856, 93880), (93883, 93907), (93952, 94026), (94032, 94032), - (94099, 94111), (94176, 94177), (94179, 94179), (94194, 94198), - (94208, 101589), (101631, 101662), (101760, 101874), (110576, 110579), - (110581, 110587), (110589, 110590), (110592, 110882), (110898, 110898), - (110928, 110930), (110933, 110933), (110948, 110951), (110960, 111355), - (113664, 113770), (113776, 113788), (113792, 113800), (113808, 113817), - (119808, 119892), (119894, 119964), (119966, 119967), (119970, 119970), - (119973, 119974), (119977, 119980), (119982, 119993), (119995, 119995), - (119997, 120003), (120005, 120069), (120071, 120074), (120077, 120084), - (120086, 120092), (120094, 120121), (120123, 120126), (120128, 120132), - (120134, 120134), (120138, 120144), (120146, 120485), (120488, 120512), - (120514, 120538), (120540, 120570), (120572, 120596), (120598, 120628), - (120630, 120654), (120656, 120686), (120688, 120712), (120714, 120744), - (120746, 120770), (120772, 120779), (122624, 122654), (122661, 122666), - (122928, 122989), (123136, 123180), (123191, 123197), (123214, 123214), - (123536, 123565), (123584, 123627), (124112, 124139), (124368, 124397), - (124400, 124400), (124608, 124638), (124640, 124642), (124644, 124645), - (124647, 124653), (124656, 124660), (124670, 124671), (124896, 124902), - (124904, 124907), (124909, 124910), (124912, 124926), (124928, 125124), - (125184, 125251), (125259, 125259), (126464, 126467), (126469, 126495), - (126497, 126498), (126500, 126500), (126503, 126503), (126505, 126514), - (126516, 126519), (126521, 126521), (126523, 126523), (126530, 126530), - (126535, 126535), (126537, 126537), (126539, 126539), (126541, 126543), - (126545, 126546), (126548, 126548), (126551, 126551), (126553, 126553), - (126555, 126555), (126557, 126557), (126559, 126559), (126561, 126562), - (126564, 126564), (126567, 126570), (126572, 126578), (126580, 126583), - (126585, 126588), (126590, 126590), (126592, 126601), (126603, 126619), - (126625, 126627), (126629, 126633), (126635, 126651), (131072, 173791), - (173824, 178205), (178208, 183981), (183984, 191456), (191472, 192093), - (194560, 195101), (196608, 201546), (201552, 210041), -]; diff --git a/vendor/unicode-ident/tests/trie/mod.rs b/vendor/unicode-ident/tests/trie/mod.rs deleted file mode 100644 index 3e31c5cc5789c8..00000000000000 --- a/vendor/unicode-ident/tests/trie/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -#![allow(clippy::module_inception)] - -#[allow(dead_code, clippy::redundant_static_lifetimes, clippy::unreadable_literal)] -#[rustfmt::skip] -mod trie; - -pub(crate) use self::trie::*; diff --git a/vendor/unicode-ident/tests/trie/trie.rs b/vendor/unicode-ident/tests/trie/trie.rs deleted file mode 100644 index 0cca9ecce648a8..00000000000000 --- a/vendor/unicode-ident/tests/trie/trie.rs +++ /dev/null @@ -1,453 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate property-bool UCD --include XID_Start,XID_Continue --trie-set -// -// Unicode version: 17.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const BY_NAME: &'static [(&'static str, &'static ::ucd_trie::TrieSet)] = &[ - ("XID_Continue", XID_CONTINUE), ("XID_Start", XID_START), -]; - -pub const XID_CONTINUE: &'static ::ucd_trie::TrieSet = &::ucd_trie::TrieSet { - tree1_level1: &[ - 0x3FF000000000000, 0x7FFFFFE87FFFFFE, 0x4A0040000000000, - 0xFF7FFFFFFF7FFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x501F0003FFC3, - 0xFFFFFFFFFFFFFFFF, 0xB8DFFFFFFFFFFFFF, 0xFFFFFFFBFFFFD7C0, - 0xFFBFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0xFFFFFFFFFFFFFCFB, 0xFFFFFFFFFFFFFFFF, 0xFFFEFFFFFFFFFFFF, - 0xFFFFFFFF027FFFFF, 0xBFFFFFFFFFFE01FF, 0x787FFFFFF00B6, - 0xFFFFFFFF07FF0000, 0xFFFFC3FFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0x9FFFFDFF9FEFFFFF, 0xFFFFFFFFFFFF0000, 0xFFFFFFFFFFFFE7FF, - 0x3FFFFFFFFFFFF, 0x243FFFFFFFFFFFFF, - ], - tree2_level1: &[ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 4, 32, 33, 34, 4, 4, 4, 4, 4, - 35, 36, 37, 38, 39, 40, 41, 42, 4, 4, 4, 4, 4, 4, 4, 4, 43, 44, 45, 46, - 47, 4, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 4, 61, 4, 62, - 63, 64, 65, 66, 4, 4, 4, 4, 4, 4, 4, 4, 67, 68, 69, 70, 71, 72, 73, 74, - 75, 76, 77, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, - 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, - 78, 78, 78, 78, 78, 78, 78, 78, 4, 4, 4, 79, 80, 81, 82, 83, 78, 78, 78, - 78, 78, 78, 78, 78, 84, 42, 85, 4, 86, 4, 87, 88, 78, 78, 78, 78, 78, 78, - 78, 78, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 78, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 89, 90, 4, 4, 4, 4, 91, 92, 4, 93, 94, 4, 95, 96, 97, 62, 4, - 98, 99, 100, 4, 101, 102, 103, 4, 104, 105, 106, 4, 107, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 108, 109, 78, 78, - 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, - 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, - 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, - 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, - 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, - 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, - 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, - 78, 78, 78, 78, 4, 4, 4, 4, 4, 99, 4, 110, 111, 112, 93, 113, 4, 114, 4, - 4, 115, 116, 117, 118, 119, 120, 4, 121, 122, 123, 124, 125, - ], - tree2_level2: &[ - 0x3FFFFFFFFFFF, 0xFFFF07FF0FFFFFFF, 0xFFFFFFFFFF80FEFF, - 0xFFFFFFFBFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFEFFCFFFFFFFFF, - 0xF3C5FDFFFFF99FEF, 0x5003FFCFB080799F, 0xD36DFDFFFFF987EE, - 0x3FFFC05E023987, 0xF3EDFDFFFFFBBFEE, 0xFE00FFCF00013BBF, - 0xF3EDFDFFFFF99FEE, 0x2FFCFB0E0399F, 0xC3FFC718D63DC7EC, 0xFFC000813DC7, - 0xF3FFFDFFFFFDDFFF, 0xFFCF37603DDF, 0xF3EFFDFFFFFDDFEF, 0xEFFCF70603DDF, - 0xFFFFFFFFFFFDDFFF, 0xFC00FFCF80F07DDF, 0x2FFBFFFFFC7FFFEE, - 0xCFFC0FF5F847F, 0x7FFFFFFFFFFFFFE, 0x3FF7FFF, 0x3FFFFFAFFFFFF7D6, - 0xF3FF7F5F, 0xC2A003FF03000001, 0xFFFE1FFFFFFFFEFF, 0x1FFFFFFFFEFFFFDF, - 0x40, 0xFFFFFFFFFFFF03FF, 0xFFFFFFFF3FFFFFFF, 0xF7FFFFFFFFFF20BF, - 0xFFFFFFFF3D7F3DFF, 0x7F3DFFFFFFFF3DFF, 0xFFFFFFFFFF7FFF3D, - 0xFFFFFFFFFF3DFFFF, 0x3FE00E7FFFFFF, 0xFFFFFFFF0000FFFF, - 0x3F3FFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFE, 0xFFFF9FFFFFFFFFFF, - 0xFFFFFFFF07FFFFFE, 0x1FFC7FFFFFFFFFF, 0x1FFFFF803FFFFF, 0xDDFFF000FFFFF, - 0x3FF308FFFFF, 0xFFFFFFFF03FFB800, 0x1FFFFFFFFFFFFFF, 0xFFFF07FFFFFFFFFF, - 0x3FFFFFFFFFFFFF, 0xFFF0FFF7FFFFFFF, 0x1F3FFFFFFFFFC0, 0xFFFF0FFFFFFFFFFF, - 0x7FF03FF, 0xFFFFFFFF0FFFFFFF, 0x9FFFFFFF7FFFFFFF, 0xBFFF008003FF03FF, - 0xFFF3FFFFFFF, 0xFF80003FF1FFF, 0xFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFF, - 0x3FFFFFFFFFFFE3FF, 0xE7FFFFFFFFFF07FF, 0x7FFFFFFFFF70000, - 0xFFFFFFFF3F3FFFFF, 0x3FFFFFFFAAFF3F3F, 0x5FDFFFFFFFFFFFFF, - 0x1FDC1FFF0FCF1FDC, 0x8000000000003000, 0x8002000000100001, 0x1FFF0000, - 0x1FFE21FFF0000, 0xF3FFFD503F2FFC84, 0xFFFFFFFF000043E0, 0x1FF, 0, - 0xFF81FFFFFFFFF, 0xFFFF20BFFFFFFFFF, 0x800080FFFFFFFFFF, - 0x7F7F7F7F007FFFFF, 0xFFFFFFFF7F7F7F7F, 0x1F3EFFFE000000E0, - 0xFFFFFFFEE67FFFFF, 0xFFFEFFFFFFFFFFE0, 0xFFFFFFFF00007FFF, - 0xFFFF000000000000, 0x1FFF, 0x3FFFFFFFFFFF0000, 0xFFFFFFF1FFF, - 0xBFF0FFFFFFFFFFFF, 0x3FFFFFFFFFFFF, 0xFFFFFFFCFF800000, - 0xFFFFFFFFFFFFF9FF, 0xFFFE00001FFFFFFF, 0x10FFFFFFFFFF, - 0xE8FFFFFF03FF003F, 0xFFFF3FFFFFFFFFFF, 0x1FFFFFFF000FFFFF, - 0x7FFFFFFF03FF8001, 0x7FFFFFFFFFFFFF, 0xFC7FFFFF03FF3FFF, - 0x7CFFFF38000007, 0xFFFF7F7F007E7E7E, 0xFFFF03FFF7FFFFFF, - 0x3FF37FFFFFFFFFF, 0xFFFF000FFFFFFFFF, 0xFFFFFFFFFFFF87F, 0x3FFFFFF, - 0x5F7FFDFFE0F8007F, 0xFFFFFFFFFFFFFFDB, 0xFFFFFFFFFFF80000, - 0xFFFFFFF03FFFFFFF, 0x3FFFFFFFFFFFFFFF, 0xFFFFFFFFFFFF0000, - 0xFFFFFFFFFFFCFFFF, 0x3FF0000000000FF, 0x18FFFF0000FFFF, - 0xAA8A00000000E000, 0x1FFFFFFFFFFFFFFF, 0x87FFFFFE03FF0000, - 0xFFFFFFE007FFFFFE, 0x7FFFFFFFFFFFFFFF, 0x1CFCFCFC, - ], - tree3_level1: &[ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 5, 9, 10, 11, 12, 13, 14, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 15, 16, 17, 7, 18, 19, 7, 20, 7, 21, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 22, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - ], - tree3_level2: &[ - 0, 1, 2, 3, 4, 5, 4, 6, 4, 4, 7, 8, 9, 10, 11, 12, 2, 2, 13, 14, 15, 16, - 17, 18, 2, 2, 2, 2, 19, 20, 21, 4, 22, 23, 24, 25, 26, 27, 28, 4, 29, 30, - 31, 32, 33, 34, 35, 4, 2, 36, 37, 37, 38, 39, 40, 4, 4, 4, 41, 42, 43, 44, - 45, 46, 2, 47, 3, 48, 49, 50, 2, 51, 52, 53, 54, 55, 56, 57, 58, 59, 2, - 60, 2, 61, 4, 4, 62, 63, 2, 64, 65, 66, 67, 68, 4, 4, 3, 4, 69, 70, 71, - 72, 73, 74, 75, 76, 77, 65, 4, 78, 4, 79, 80, 81, 82, 4, 83, 84, 85, 86, - 4, 4, 4, 87, 88, 89, 90, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 27, - 4, 2, 91, 2, 2, 2, 92, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 93, - 94, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 95, 96, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 68, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 97, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 65, 98, 75, 99, 19, 100, 101, 4, 4, 4, - 4, 4, 4, 102, 4, 4, 4, 2, 103, 104, 2, 105, 106, 107, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 108, 24, 4, 2, 37, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 109, 2, 2, 2, 2, 110, 111, 2, 2, 2, 2, 2, - 112, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 113, 114, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 115, 4, 4, 4, 4, 4, 4, 4, 4, 116, 68, 4, 4, 4, 4, 4, - 4, 4, 117, 118, 4, 4, 119, 4, 4, 4, 4, 4, 4, 2, 120, 121, 122, 123, 124, - 2, 2, 2, 2, 125, 126, 127, 128, 129, 130, 4, 4, 4, 4, 4, 4, 4, 4, 131, - 132, 133, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 134, 4, 4, 4, - 135, 136, 137, 4, 138, 139, 4, 4, 4, 4, 140, 97, 4, 4, 4, 4, 4, 4, 4, 141, - 4, 4, 4, 142, 4, 4, 4, 143, 4, 4, 4, 144, 2, 2, 2, 145, 2, 146, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 147, 148, 149, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 115, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 150, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 11, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 116, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 151, 2, 2, 2, 2, 2, 2, 2, 2, 2, 152, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 152, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 153, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 97, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 2, 2, 2, 95, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - ], - tree3_level3: &[ - 0xB7FFFF7FFFFFEFFF, 0x3FFF3FFF, 0xFFFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFF, 0, - 0x1FFFFFFFFFFFFF, 0x2000000000000000, 0xFFFFFFFF1FFFFFFF, 0x10001FFFF, - 0xFFFFE000FFFFFFFF, 0x7FFFFFFFFFF07FF, 0xFFFFFFFF3FFFFFFF, 0x3EFF0F, - 0xFFFF03FF3FFFFFFF, 0xFFFFFFFFF0FFFFF, 0xFFFF00FFFFFFFFFF, - 0xF7FF000FFFFFFFFF, 0x1BFBFFFBFFB7F7FF, 0xFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFF, - 0xFF003FFFFF, 0x7FDFFFFFFFFFFBF, 0x91BFFFFFFFFFFD3F, 0x7FFFFF003FFFFF, - 0x7FFFFFFF, 0x37FFFF00000000, 0x3FFFFFF003FFFFF, 0x3FFFFFF, - 0xC0FFFFFFFFFFFFFF, 0x873FFFFFFEEFF06F, 0x1FFFFFFF00000000, 0x1FFFFFFF, - 0x7FFFFFFEFF, 0x3FFFFFFFFFFFFF, 0x7FFFF003FFFFF, 0x3FFFF, 0x1FF, - 0x7FFFFFFFFFFFF, 0x3FF00FFFFFFFFFF, 0xFFFFBE3FFFFFFFFF, 0x3F, - 0x31BFFFFFFFFFF, 0xFC000000000000FC, 0xFFFF00801FFFFFFF, - 0xFFFF00000001FFFF, 0xFFFF00000000003F, 0x7FFFFF0000001F, - 0x803FFFC00000007F, 0x3FF01FFFFFF0004, 0xFFDFFFFFFFFFFFFF, - 0x4FFFFFFFFF00F0, 0x17FFDE1F, 0xC0FFFFFFFFFBFFFF, 0x3, 0xFFFF01FFBFFFBD7F, - 0x3FF07FFFFFFFFFF, 0xFBEDFDFFFFF99FEF, 0x1F1FCFE081399F, - 0xFFBFFFFFFFFF4BFF, 0x6000FF7A5, 0x3C3FF07FF, 0x3FF00BF, - 0xFF3FFFFFFFFFFFFF, 0x3F000001, 0x3FF0011, 0x1FFFFFFFFFFFFFF, 0xFFFFF03FF, - 0x3FF0FFFE7FFFFFF, 0x7F, 0xFFFFFFFF00000000, 0x800003FFFFFFFFFF, - 0xF9BFFFFFFF6FF27F, 0x3FF000F, 0xFFFFFCFF00000000, 0x1BFCFFFFFF, - 0x7FFFFFFFFFFFFFFF, 0xFFFFFFFFFFFF0080, 0xFFFF000023FFFFFF, 0xFF00000000, - 0x3FF0001FFFFFFFF, 0xFF7FFFFFFFFFFDFF, 0xFFFC000003FF0001, - 0x7FFEFFFFFCFFFF, 0xB47FFFFFFFFFFB7F, 0xFFFFFDBF03FF00FF, - 0xFFFF03FF01FB7FFF, 0x3FF0FFFFFFF, 0x7FFFFF00000000, 0xC7FFFFFFFFFDFFFF, - 0x7FF0007, 0x1000000000000, 0x7FFFFFFFFFFF, 0xF, 0xFFFFFFFFFFFF0000, - 0x1FFFFFFFFFFFF, 0xFFFFFFFFFFFF, 0xFFFFFFFF003FFFFF, 0x3FFFFFFFFFFFFFF, - 0xFFFF03FF7FFFFFFF, 0x1F3FFFFFFF03FF, 0xE0FFFFF803FF000F, 0xFFFF, - 0x3FF1FFFFFFFFFFF, 0xF9FFFFFF00000000, 0xFFFFF, 0xFFFFFFFFFFFF87FF, - 0xFFFF80FF, 0x7F001B00000000, 0x80000000003FFFFF, 0x6FEF000000000000, - 0x40007FFFFFFFF, 0xFFFF00F000270000, 0xFFFFFFFFFFFFFFF, - 0x1FFF07FFFFFFFFFF, 0x63FF01FF, 0x3FF000000000000, 0xFFFF3FFFFFFFFFFF, - 0xF807E3E000000000, 0x3C0000000FE7, 0x1C, 0xFFFFFFFFFFDFFFFF, - 0xEBFFDE64DFFFFFFF, 0xFFFFFFFFFFFFFFEF, 0x7BFFFFFFDFDFE7BF, - 0xFFFFFFFFFFFDFC5F, 0xFFFFFF3FFFFFFFFF, 0xF7FFFFFFF7FFFFFD, - 0xFFDFFFFFFFDFFFFF, 0xFFFF7FFFFFFF7FFF, 0xFFFFFDFFFFFFFDFF, - 0xFFFFFFFFFFFFCFF7, 0xF87FFFFFFFFFFFFF, 0x201FFFFFFFFFFF, 0xFFFEF8000010, - 0x7E07FFFFFFF, 0xFFFF07DBF9FFFF7F, 0x3FFFFFFFFFFF, 0x8000, - 0x3FFF1FFFFFFFFFFF, 0x43FF, 0x7FFFFFFF0000, 0x3FFFFFFFFFF0000, - 0x7FFFFFFFFFF0000, 0xC03FFFFF7FFFFFFF, 0x7FFF6F7F00000000, 0x7F001F, - 0x3FF0FFF, 0xAF7FE96FFFFFFEF, 0x5EF7F796AA96EA84, 0xFFFFBEE0FFFFBFF, - 0xFFFFFFFF, 0xFFFF0001FFFFFFFF, 0x3FFFFFFF, 0xFFFFFFFFFFFF07FF, - ], -}; - -pub const XID_START: &'static ::ucd_trie::TrieSet = &::ucd_trie::TrieSet { - tree1_level1: &[ - 0, 0x7FFFFFE07FFFFFE, 0x420040000000000, 0xFF7FFFFFFF7FFFFF, - 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0xFFFFFFFFFFFFFFFF, 0x501F0003FFC3, 0, 0xB8DF000000000000, - 0xFFFFFFFBFFFFD740, 0xFFBFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFC03, 0xFFFFFFFFFFFFFFFF, - 0xFFFEFFFFFFFFFFFF, 0xFFFFFFFF027FFFFF, 0x1FF, 0x787FFFFFF0000, - 0xFFFFFFFF00000000, 0xFFFEC000000007FF, 0xFFFFFFFFFFFFFFFF, - 0x9C00C060002FFFFF, 0xFFFFFFFD0000, 0xFFFFFFFFFFFFE000, 0x2003FFFFFFFFF, - 0x43007FFFFFFFC00, - ], - tree2_level1: &[ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, - 21, 22, 23, 24, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 35, 35, - 35, 35, 36, 37, 38, 39, 40, 41, 42, 43, 35, 35, 35, 35, 35, 35, 35, 35, - 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 3, 58, 59, 60, 30, - 61, 62, 63, 64, 65, 66, 67, 68, 35, 35, 35, 30, 35, 35, 35, 35, 69, 70, - 71, 72, 30, 73, 74, 30, 75, 76, 77, 30, 30, 30, 30, 30, 30, 30, 30, 30, - 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, - 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 35, 35, 35, 78, - 79, 80, 81, 82, 30, 30, 30, 30, 30, 30, 30, 30, 83, 43, 84, 85, 86, 35, - 87, 88, 30, 30, 30, 30, 30, 30, 30, 30, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 30, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 89, 90, 35, 35, 35, 35, 91, 92, - 93, 94, 95, 35, 96, 97, 98, 49, 99, 100, 101, 102, 103, 104, 105, 106, - 107, 108, 109, 110, 35, 111, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, - 35, 112, 113, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, - 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, - 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, - 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, - 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, - 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, - 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, - 30, 30, 30, 30, 30, 30, 30, 30, 30, 35, 35, 35, 35, 35, 114, 35, 115, 116, - 117, 118, 119, 35, 120, 35, 35, 121, 122, 123, 124, 30, 125, 35, 126, 127, - 128, 129, 130, - ], - tree2_level2: &[ - 0x110043FFFFF, 0xFFFF07FF01FFFFFF, 0xFFFFFFFF0000FEFF, 0x3FF, - 0x23FFFFFFFFFFFFF0, 0xFFFE0003FF010000, 0x23C5FDFFFFF99FE1, - 0x10030003B0004000, 0x36DFDFFFFF987E0, 0x1C00005E000000, - 0x23EDFDFFFFFBBFE0, 0x200000300010000, 0x23EDFDFFFFF99FE0, - 0x20003B0000000, 0x3FFC718D63DC7E8, 0x10000, 0x23FFFDFFFFFDDFE0, - 0x337000000, 0x23EFFDFFFFFDDFE1, 0x6000370000000, 0x27FFFFFFFFFDDFF0, - 0xFC00000380704000, 0x2FFBFFFFFC7FFFE0, 0x7F, 0x5FFFFFFFFFFFE, - 0x2005FFAFFFFFF7D6, 0xF000005F, 0x1, 0x1FFFFFFFFEFF, 0x1F00, 0, - 0x800007FFFFFFFFFF, 0xFFE1C0623C3F0000, 0xFFFFFFFF00004003, - 0xF7FFFFFFFFFF20BF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF3D7F3DFF, - 0x7F3DFFFFFFFF3DFF, 0xFFFFFFFFFF7FFF3D, 0xFFFFFFFFFF3DFFFF, 0x7FFFFFF, - 0xFFFFFFFF0000FFFF, 0x3F3FFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFE, - 0xFFFF9FFFFFFFFFFF, 0xFFFFFFFF07FFFFFE, 0x1FFC7FFFFFFFFFF, - 0x3FFFF8003FFFF, 0x1DFFF0003FFFF, 0xFFFFFFFFFFFFF, 0x10800000, - 0xFFFFFFFF00000000, 0x1FFFFFFFFFFFFFF, 0xFFFF05FFFFFFFFFF, - 0x3FFFFFFFFFFFFF, 0x7FFFFFFF, 0x1F3FFFFFFF0000, 0xFFFF0FFFFFFFFFFF, - 0xFFFFFFFF007FFFFF, 0x1FFFFF, 0x8000000000, 0xFFFFFFFFFFFE0, 0x1FE0, - 0xFC00C001FFFFFFF8, 0x3FFFFFFFFF, 0xFFFFFFFFF, 0x3FFFFFFFFC00E000, - 0xE7FFFFFFFFFF07FF, 0x46FDE0000000000, 0xFFFFFFFF3F3FFFFF, - 0x3FFFFFFFAAFF3F3F, 0x5FDFFFFFFFFFFFFF, 0x1FDC1FFF0FCF1FDC, - 0x8002000000000000, 0x1FFF0000, 0xF3FFFD503F2FFC84, 0xFFFFFFFF000043E0, - 0x1FF, 0xC781FFFFFFFFF, 0xFFFF20BFFFFFFFFF, 0x80FFFFFFFFFF, - 0x7F7F7F7F007FFFFF, 0x7F7F7F7F, 0x1F3E03FE000000E0, 0xFFFFFFFEE07FFFFF, - 0xF7FFFFFFFFFFFFFF, 0xFFFEFFFFFFFFFFE0, 0xFFFFFFFF00007FFF, - 0xFFFF000000000000, 0x1FFF, 0x3FFFFFFFFFFF0000, 0xC00FFFF1FFF, - 0x80007FFFFFFFFFFF, 0xFFFFFFFF3FFFFFFF, 0xFFFFFFFFFFFF, - 0xFFFFFFFCFF800000, 0xFFFFFFFFFFFFF9FF, 0xFFFE00001FFFFFFF, 0x7FFFFF7BB, - 0xFFFFFFFFFFFFC, 0x68FC000000000000, 0xFFFF003FFFFFFC00, - 0x1FFFFFFF0000007F, 0x7FFFFFFFFFFF0, 0x7C00FFDF00008000, 0x1FFFFFFFFFF, - 0xC47FFFFF00000FF7, 0x3E62FFFFFFFFFFFF, 0x1C07FF38000005, - 0xFFFF7F7F007E7E7E, 0xFFFF03FFF7FFFFFF, 0x7FFFFFFFF, 0xFFFF000FFFFFFFFF, - 0xFFFFFFFFFFFF87F, 0xFFFF3FFFFFFFFFFF, 0x3FFFFFF, 0x5F7FFDFFA0F8007F, - 0xFFFFFFFFFFFFFFDB, 0x3FFFFFFFFFFFF, 0xFFFFFFFFFFF80000, - 0xFFFFFFF03FFFFFFF, 0x3FFFFFFFFFFFFFFF, 0xFFFFFFFFFFFF0000, - 0xFFFFFFFFFFFCFFFF, 0x3FF0000000000FF, 0xAA8A000000000000, - 0x1FFFFFFFFFFFFFFF, 0x7FFFFFE00000000, 0xFFFFFFC007FFFFFE, - 0x7FFFFFFF3FFFFFFF, 0x1CFCFCFC, - ], - tree3_level1: &[ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 5, 9, 10, 5, 11, 12, 5, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 13, 14, 15, 7, 16, 17, 7, 18, 7, 19, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - ], - tree3_level2: &[ - 0, 1, 2, 3, 4, 5, 4, 4, 4, 4, 6, 7, 8, 9, 10, 11, 2, 2, 12, 13, 14, 15, - 16, 17, 2, 2, 2, 2, 18, 19, 20, 4, 21, 22, 23, 24, 25, 26, 27, 4, 28, 29, - 30, 31, 32, 33, 34, 4, 2, 35, 36, 36, 37, 38, 39, 4, 4, 4, 40, 41, 42, 43, - 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 23, 57, 58, 59, 60, 5, - 61, 62, 63, 4, 4, 64, 65, 62, 66, 67, 4, 68, 69, 4, 4, 70, 4, 71, 72, 73, - 74, 75, 76, 77, 78, 79, 80, 4, 4, 4, 81, 82, 83, 84, 4, 85, 86, 87, 88, 4, - 4, 4, 89, 90, 4, 91, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 26, 4, - 2, 64, 2, 2, 2, 92, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 93, 94, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 62, 95, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 69, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 96, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 80, 97, 98, 99, 62, 100, 84, 4, 4, 4, 4, 4, - 4, 101, 4, 4, 4, 2, 102, 103, 2, 104, 105, 106, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 107, 23, 4, 2, 36, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 108, 2, 2, 2, 2, 109, 110, 2, 2, 2, 2, 2, 111, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 112, 113, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 114, 115, - 116, 117, 118, 2, 2, 2, 2, 119, 120, 121, 122, 123, 124, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 125, 4, - 4, 4, 126, 127, 4, 4, 128, 129, 4, 4, 4, 4, 99, 70, 4, 4, 4, 4, 4, 4, 4, - 130, 4, 4, 4, 131, 4, 4, 4, 132, 4, 4, 4, 133, 2, 2, 2, 134, 2, 135, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 136, 137, 138, 4, 4, 4, 4, - 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 139, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 10, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 140, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 141, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 96, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, - 2, 2, 96, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 142, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 143, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, - ], - tree3_level3: &[ - 0xB7FFFF7FFFFFEFFF, 0x3FFF3FFF, 0xFFFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFF, 0, - 0x1FFFFFFFFFFFFF, 0xFFFFFFFF1FFFFFFF, 0x1FFFF, 0xFFFFE000FFFFFFFF, - 0x3FFFFFFFFF07FF, 0xFFFFFFFF3FFFFFFF, 0x3EFF0F, 0xFFFF00003FFFFFFF, - 0xFFFFFFFFF0FFFFF, 0xFFFF00FFFFFFFFFF, 0xF7FF000FFFFFFFFF, - 0x1BFBFFFBFFB7F7FF, 0xFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFF, 0xFF003FFFFF, - 0x7FDFFFFFFFFFFBF, 0x91BFFFFFFFFFFD3F, 0x7FFFFF003FFFFF, 0x7FFFFFFF, - 0x37FFFF00000000, 0x3FFFFFF003FFFFF, 0x3FFFFFF, 0xC0FFFFFFFFFFFFFF, - 0x3FFFFFFEEF0001, 0x1FFFFFFF00000000, 0x1FFFFFFF, 0x1FFFFFFEFF, - 0x3FFFFFFFFFFFFF, 0x7FFFF003FFFFF, 0x3FFFF, 0x1FF, 0x7FFFFFFFFFFFF, - 0xFFFFFFFFF, 0xFFFF803FFFFFFC00, 0x3F, 0x303FFFFFFFFFF, 0xFC, - 0xFFFF00801FFFFFFF, 0xFFFF00000000003F, 0xFFFF000000000003, - 0x7FFFFF0000001F, 0xFFFFFFFFFFFFF8, 0x26000000000000, 0xFFFFFFFFFFF8, - 0x1FFFFFF0000, 0x7FFFFFFFF8, 0x47FFFFFFFF0090, 0x7FFFFFFFFFFF8, - 0x1400001E, 0x80000FFFFFFBFFFF, 0x1, 0xFFFF01FFBFFFBD7F, - 0x23EDFDFFFFF99FE0, 0x3E0010000, 0xBFFFFFFFFF4BFF, 0xA0000, 0x380000780, - 0xFFFFFFFFFFFF, 0xB0, 0x7FFFFFFFFFFF, 0xF000000, 0x10, 0x10007FFFFFFFFFF, - 0x7FFFFFF, 0x7F, 0xFFFFFFFFFFF, 0xFFFFFFFF00000000, 0x80000000FFFFFFFF, - 0x8000FFFFFF6FF27F, 0x2, 0xFFFFFCFF00000000, 0xA0001FFFF, - 0x407FFFFFFFFF801, 0xFFFFFFFFF0010000, 0xFFFF0000200003FF, - 0x1FFFFFFFFFFFFFF, 0x1FFFFFFFF, 0x7FFFFFFFFDFF, 0xFFFC000000000001, - 0xFFFF, 0x1FFFFFFFFFB7F, 0xFFFFFDBF00000040, 0xFFFF0000010003FF, - 0xFFFFFFF, 0x7FFFF00000000, 0xFFFFFFFFDFFF4, 0x1000000000000, 0xF, - 0xFFFFFFFFFFFF0000, 0x1FFFFFFFFFFFF, 0xFFFFFFFF0000007E, 0x3FFFFFFF, - 0xFFFF00007FFFFFFF, 0x7FFFFFFFFFFFFFFF, 0x3FFFFFFF0000, - 0xE0FFFFF80000000F, 0x1FFFFFFFFFFF, 0xF9FFFFFF00000000, 0xFFFFF, 0x107FF, - 0xFFF80000, 0x7C000B00000000, 0x80000000003FFFFF, 0x6FEF000000000000, - 0x40007FFFFFFFF, 0xFFFF00F000270000, 0xFFFFFFFFFFFFFFF, - 0x1FFF07FFFFFFFFFF, 0x3FF01FF, 0xFFFFFFFFFFDFFFFF, 0xEBFFDE64DFFFFFFF, - 0xFFFFFFFFFFFFFFEF, 0x7BFFFFFFDFDFE7BF, 0xFFFFFFFFFFFDFC5F, - 0xFFFFFF3FFFFFFFFF, 0xF7FFFFFFF7FFFFFD, 0xFFDFFFFFFFDFFFFF, - 0xFFFF7FFFFFFF7FFF, 0xFFFFFDFFFFFFFDFF, 0xFF7, 0x7E07FFFFFFF, - 0xFFFF000000000000, 0x3FFFFFFFFFFF, 0x3F801FFFFFFFFFFF, 0x4000, - 0xFFFFFFF0000, 0x13FFFFFFF0000, 0xC01F3FB77FFFFFFF, 0x7FFF6F7F00000000, - 0x1F, 0x80F, 0xAF7FE96FFFFFFEF, 0x5EF7F796AA96EA84, 0xFFFFBEE0FFFFBFF, - 0xFFFFFFFF, 0xFFFF3FFFFFFFFFFF, 0xFFFF0001FFFFFFFF, 0xFFFFFFFFFFFF07FF, - 0x3FFFFFFFFFFFFFF, - ], -}; diff --git a/vendor/windows-link/.cargo-checksum.json b/vendor/windows-link/.cargo-checksum.json deleted file mode 100644 index 4b8b7b4079bbef..00000000000000 --- a/vendor/windows-link/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"02f3a04d4359721839ae43d0bc753d834717cb0bf600ee0b9d88d5299b94c121","Cargo.lock":"f33a3dccb85342cd5cb58d165dc6c0421e93aeaca9ea1cd82b81f0c204d316a8","Cargo.toml":"abf0b74b168ec7d7c600f44eb90502c47e44480a199b9adc9ec74ea990605707","Cargo.toml.orig":"4a4fb4a85656696687cf1f2a8725309930e39378616aa687737c38c0e28dfad1","license-apache-2.0":"c16f8dcf1a368b83be78d826ea23de4079fe1b4469a0ab9ee20563f37ff3d44b","license-mit":"c2cfccb812fe482101a8f04597dfc5a9991a6b2748266c47ac91b6a5aae15383","readme.md":"4bbe7714285567006b5b068dfc93cb3b633afae20766c9bf1fce2444874261fb","src/lib.rs":"ca9cf5a2a97cf72d855c677c936355b6d29e41682e5abfa505f28b3d216b5333"},"package":"f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"} \ No newline at end of file diff --git a/vendor/windows-link/.cargo_vcs_info.json b/vendor/windows-link/.cargo_vcs_info.json deleted file mode 100644 index 0ca517613e6a9c..00000000000000 --- a/vendor/windows-link/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "d468916ac27a36fb8a12bafc1bf5c0ec2fe92238" - }, - "path_in_vcs": "crates/libs/link" -} \ No newline at end of file diff --git a/vendor/windows-link/Cargo.lock b/vendor/windows-link/Cargo.lock deleted file mode 100644 index 1fc750d4f4c9b9..00000000000000 --- a/vendor/windows-link/Cargo.lock +++ /dev/null @@ -1,7 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "windows-link" -version = "0.2.1" diff --git a/vendor/windows-link/Cargo.toml b/vendor/windows-link/Cargo.toml deleted file mode 100644 index 6b29fd6b128c76..00000000000000 --- a/vendor/windows-link/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.71" -name = "windows-link" -version = "0.2.1" -build = false -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = "Linking for Windows" -readme = "readme.md" -categories = ["os::windows-apis"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/microsoft/windows-rs" - -[lib] -name = "windows_link" -path = "src/lib.rs" - -[lints.rust] -missing_unsafe_on_extern = "warn" - -[lints.rust.unexpected_cfgs] -level = "warn" -priority = 0 -check-cfg = ["cfg(windows_raw_dylib, windows_slim_errors)"] diff --git a/vendor/windows-link/license-apache-2.0 b/vendor/windows-link/license-apache-2.0 deleted file mode 100644 index b5ed4ecec27b39..00000000000000 --- a/vendor/windows-link/license-apache-2.0 +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) Microsoft Corporation. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/windows-link/license-mit b/vendor/windows-link/license-mit deleted file mode 100644 index 9e841e7a26e4eb..00000000000000 --- a/vendor/windows-link/license-mit +++ /dev/null @@ -1,21 +0,0 @@ - MIT License - - Copyright (c) Microsoft Corporation. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE diff --git a/vendor/windows-link/readme.md b/vendor/windows-link/readme.md deleted file mode 100644 index f6c343f667968a..00000000000000 --- a/vendor/windows-link/readme.md +++ /dev/null @@ -1,26 +0,0 @@ -## Linking for Windows - -The [windows-link](https://crates.io/crates/windows-link) crate provides the `link` macro that simplifies linking. The `link` macro is much the same as the one provided by [windows-targets](https://crates.io/crates/windows-targets) but uses `raw-dylib` and thus does not require import lib files. - -* [Getting started](https://kennykerr.ca/rust-getting-started/) -* [Samples](https://github.com/microsoft/windows-rs/tree/master/crates/samples) -* [Releases](https://github.com/microsoft/windows-rs/releases) - -Start by adding the following to your Cargo.toml file: - -```toml -[dependencies.windows-link] -version = "0.2" -``` - -Use the `link` macro to define the external functions you wish to call: - -```rust -windows_link::link!("kernel32.dll" "system" fn SetLastError(code: u32)); -windows_link::link!("kernel32.dll" "system" fn GetLastError() -> u32); - -unsafe { - SetLastError(1234); - assert_eq!(GetLastError(), 1234); -} -``` diff --git a/vendor/windows-link/src/lib.rs b/vendor/windows-link/src/lib.rs deleted file mode 100644 index dbecf9f3b5e4fe..00000000000000 --- a/vendor/windows-link/src/lib.rs +++ /dev/null @@ -1,39 +0,0 @@ -#![doc = include_str!("../readme.md")] -#![no_std] - -/// Defines an external function to import. -#[cfg(all(windows, target_arch = "x86"))] -#[macro_export] -macro_rules! link { - ($library:literal $abi:literal $($link_name:literal)? fn $($function:tt)*) => ( - #[link(name = $library, kind = "raw-dylib", modifiers = "+verbatim", import_name_type = "undecorated")] - extern $abi { - $(#[link_name=$link_name])? - pub fn $($function)*; - } - ) -} - -/// Defines an external function to import. -#[cfg(all(windows, not(target_arch = "x86")))] -#[macro_export] -macro_rules! link { - ($library:literal $abi:literal $($link_name:literal)? fn $($function:tt)*) => ( - #[link(name = $library, kind = "raw-dylib", modifiers = "+verbatim")] - extern $abi { - $(#[link_name=$link_name])? - pub fn $($function)*; - } - ) -} - -/// Defines an external function to import. -#[cfg(not(windows))] -#[macro_export] -macro_rules! link { - ($library:literal $abi:literal $($link_name:literal)? fn $($function:tt)*) => ( - extern $abi { - pub fn $($function)*; - } - ) -} From 35cdedb5d6a0ddae375d61f07a3d7c740c0fda6a Mon Sep 17 00:00:00 2001 From: Kirill Podoprigora <kirill.bast9@mail.ru> Date: Mon, 17 Nov 2025 00:45:31 +0200 Subject: [PATCH 07/20] Restore name to standard_b64encode (#14) --- Modules/_base64/src/lib.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Modules/_base64/src/lib.rs b/Modules/_base64/src/lib.rs index 330e00de3d2525..f308a61daf301e 100644 --- a/Modules/_base64/src/lib.rs +++ b/Modules/_base64/src/lib.rs @@ -111,7 +111,7 @@ impl Drop for BorrowedBuffer { } #[unsafe(no_mangle)] -pub unsafe extern "C" fn b64encode( +pub unsafe extern "C" fn standard_b64encode( _module: *mut PyObject, args: *mut *mut PyObject, nargs: Py_ssize_t, @@ -120,7 +120,7 @@ pub unsafe extern "C" fn b64encode( unsafe { PyErr_SetString( PyExc_TypeError, - c"b64encode() takes exactly one argument".as_ptr(), + c"standard_b64encode() takes exactly one argument".as_ptr(), ); } return ptr::null_mut(); @@ -137,7 +137,7 @@ pub unsafe extern "C" fn b64encode( unsafe { PyErr_SetString( PyExc_TypeError, - c"b64encode() argument has negative length".as_ptr(), + c"standard_b64encode() argument has negative length".as_ptr(), ); } return ptr::null_mut(); @@ -206,9 +206,9 @@ unsafe impl Sync for ModuleDef {} pub static _BASE64_MODULE_METHODS: [PyMethodDef; 2] = { [ PyMethodDef { - ml_name: c"b64encode".as_ptr() as *mut c_char, + ml_name: c"standard_b64encode".as_ptr() as *mut c_char, ml_meth: PyMethodDefFuncPointer { - PyCFunctionFast: b64encode, + PyCFunctionFast: standard_b64encode, }, ml_flags: METH_FASTCALL, ml_doc: c"Demo for the _base64 module".as_ptr() as *mut c_char, From 6f5bc87ab5dd1f225a5e4bf7034e1e81cbb44862 Mon Sep 17 00:00:00 2001 From: Emma Smith <emma@emmatyping.dev> Date: Sun, 16 Nov 2025 23:59:12 -0800 Subject: [PATCH 08/20] Update Rust detection and binding generation (#15) This commit updates the build system to automatically detect cargo and enable/disable _base64 without needing to pass a flag. If cargo is unavailable, _base64 is disabled. It also updates cpython-sys to use a hand written header (which is what Linux seems to do) and splits off the parser bindings to be handled in the future (since the files are included differently). --- Makefile.pre.in | 7 +- Modules/cpython-sys/.gitignore | 1 - Modules/cpython-sys/build.rs | 24 ++-- Modules/cpython-sys/parser.h | 11 ++ Modules/cpython-sys/src/lib.rs | 5 +- Modules/cpython-sys/wrapper.h | 177 ++++++++++++++++++++++++++++ Modules/makesetup | 2 +- Python/remote_debug.h | 1 - Tools/build/regen-rust-wrapper-h.py | 34 ------ configure | 84 ++++++------- configure.ac | 59 ++++------ 11 files changed, 268 insertions(+), 137 deletions(-) delete mode 100644 Modules/cpython-sys/.gitignore create mode 100644 Modules/cpython-sys/parser.h create mode 100644 Modules/cpython-sys/wrapper.h delete mode 100644 Tools/build/regen-rust-wrapper-h.py diff --git a/Makefile.pre.in b/Makefile.pre.in index 6d113d14f31630..a27a9709330772 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -1652,10 +1652,6 @@ Makefile Modules/config.c: Makefile.pre \ @mv config.c Modules @echo "The Makefile was updated, you may need to re-run make." -.PHONY: regen-rust-wrapper-h -regen-rust-wrapper-h: $(PYTHON_HEADERS) - PYTHON_HEADERS="$(PYTHON_HEADERS)" $(PYTHON_FOR_REGEN) $(srcdir)/Tools/build/regen-rust-wrapper-h.py - .PHONY: regen-test-frozenmain regen-test-frozenmain: $(BUILDPYTHON) # Regenerate Programs/test_frozenmain.h @@ -3376,6 +3372,9 @@ Python/thread.o: @THREADHEADERS@ $(srcdir)/Python/condvar.h ########################################################################## # Module dependencies and platform-specific files +cpython-sys: Modules/cpython-sys/Cargo.toml Modules/cpython-sys/build.rs Modules/cpython-sys/wrapper.h Modules/cpython-sys/parser.h + cargo build --lib --locked --package cpython-sys --profile $(CARGO_PROFILE) + # force rebuild when header file or module build flavor (static/shared) is changed MODULE_DEPS_STATIC=Modules/config.c MODULE_DEPS_SHARED=@MODULE_DEPS_SHARED@ diff --git a/Modules/cpython-sys/.gitignore b/Modules/cpython-sys/.gitignore deleted file mode 100644 index 3536502b83a7c0..00000000000000 --- a/Modules/cpython-sys/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/wrapper.h \ No newline at end of file diff --git a/Modules/cpython-sys/build.rs b/Modules/cpython-sys/build.rs index c45ccc0b2684c7..71b73b477fd34d 100644 --- a/Modules/cpython-sys/build.rs +++ b/Modules/cpython-sys/build.rs @@ -4,25 +4,29 @@ use std::path::{Path, PathBuf}; fn main() { let curdir = std::env::current_dir().unwrap(); let srcdir = curdir.parent().and_then(Path::parent).unwrap(); + let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); + generate_c_api_bindings(srcdir, &out_path.as_path()); + // TODO(emmatyping): generate bindings to the internal parser API + // The parser includes things slightly differently, so we should generate + // it's bindings independently + //generate_parser_bindings(srcdir, &out_path.as_path()); +} + +fn generate_c_api_bindings(srcdir: &Path, out_path: &Path) { let bindings = bindgen::Builder::default() .header("wrapper.h") .clang_arg(format!("-I{}", srcdir.as_os_str().to_str().unwrap())) .clang_arg(format!("-I{}/Include", srcdir.as_os_str().to_str().unwrap())) - .clang_arg(format!("-I{}/Include/internal", srcdir.as_os_str().to_str().unwrap())) - .allowlist_function("Py.*") - .allowlist_function("_Py.*") - .allowlist_type("Py.*") - .allowlist_type("_Py.*") - .allowlist_var("Py.*") - .allowlist_var("_Py.*") + .allowlist_function("_?Py.*") + .allowlist_type("_?Py.*") + .allowlist_var("_?Py.*") .blocklist_type("^PyMethodDef$") .parse_callbacks(Box::new(bindgen::CargoCallbacks::new())) .generate() .expect("Unable to generate bindings"); - // Write the bindings to the $OUT_DIR/bindings.rs file. - let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); + // Write the bindings to the $OUT_DIR/c_api.rs file. bindings - .write_to_file(out_path.join("bindings.rs")) + .write_to_file(out_path.join("c_api.rs")) .expect("Couldn't write bindings!"); } diff --git a/Modules/cpython-sys/parser.h b/Modules/cpython-sys/parser.h new file mode 100644 index 00000000000000..e58539b7236611 --- /dev/null +++ b/Modules/cpython-sys/parser.h @@ -0,0 +1,11 @@ +/* Private APIs */ +#define Py_BUILD_CORE + +// Parser +#include "Parser/pegen.h" +#include "Parser/string_parser.h" +#include "Parser/lexer/buffer.h" +#include "Parser/lexer/lexer.h" +#include "Parser/lexer/state.h" +#include "Parser/tokenizer/tokenizer.h" +#include "Parser/tokenizer/helpers.h" diff --git a/Modules/cpython-sys/src/lib.rs b/Modules/cpython-sys/src/lib.rs index 6c0b84d70ebc87..d3e21662a68472 100644 --- a/Modules/cpython-sys/src/lib.rs +++ b/Modules/cpython-sys/src/lib.rs @@ -6,7 +6,10 @@ use std::ffi::{c_char, c_int, c_void}; -include!(concat!(env!("OUT_DIR"), "/bindings.rs")); +include!(concat!(env!("OUT_DIR"), "/c_api.rs")); + +// TODO(emmatyping): include parser bindings (see build.rs) +//include!(concat!(env!("OUT_DIR"), "/parser.rs")); /* Flag passed to newmethodobject */ /* #define METH_OLDARGS 0x0000 -- unsupported now */ pub const METH_VARARGS: c_int = 0x0001; diff --git a/Modules/cpython-sys/wrapper.h b/Modules/cpython-sys/wrapper.h new file mode 100644 index 00000000000000..f79a44f0580633 --- /dev/null +++ b/Modules/cpython-sys/wrapper.h @@ -0,0 +1,177 @@ +/* Public APIs */ +#include "Python.h" + +// Misc + +// OS macros used in C, not necessary in Rust +//#include "osdefs.h" + +// Valgrind / analysis tools macros and functions +#include "dynamic_annotations.h" +// Macros for error codes +// #include "errcode.h" +// Macros to define symbol visibility +// #include "exports.h" +// Includes pyframe.h and cpython/frameobject.h +#include "frameobject.h" +// Includes cpython/marshal.h +#include "marshal.h" +// Macros defining opcodes +#include "opcode.h" +// More macros defining opcodes +#include "opcode_ids.h" +// Dtrace probes +#include "pydtrace.h" +//New code should use descrobject.h +//#include "structmember.h" + +// List of all stdlib names, autogenerated +#include "Python/stdlib_module_names.h" + +/* Private APIs */ +#define Py_BUILD_CORE + +// Internal +#include "internal/pycore_parser.h" +#include "internal/pycore_mimalloc.h" +#include "internal/mimalloc/mimalloc.h" +#include "internal/mimalloc/mimalloc/atomic.h" +#include "internal/mimalloc/mimalloc/internal.h" +#include "internal/mimalloc/mimalloc/prim.h" +#include "internal/mimalloc/mimalloc/track.h" +#include "internal/mimalloc/mimalloc/types.h" +#include "internal/pycore_abstract.h" +#include "internal/pycore_asdl.h" +#include "internal/pycore_ast.h" +#include "internal/pycore_ast_state.h" +#include "internal/pycore_atexit.h" +#include "internal/pycore_audit.h" +#include "internal/pycore_backoff.h" +#include "internal/pycore_bitutils.h" +#include "internal/pycore_blocks_output_buffer.h" +#include "internal/pycore_brc.h" +#include "internal/pycore_bytes_methods.h" +#include "internal/pycore_bytesobject.h" +#include "internal/pycore_call.h" +#include "internal/pycore_capsule.h" +#include "internal/pycore_cell.h" +#include "internal/pycore_ceval.h" +#include "internal/pycore_ceval_state.h" +#include "internal/pycore_code.h" +#include "internal/pycore_codecs.h" +#include "internal/pycore_compile.h" +#include "internal/pycore_complexobject.h" +#include "internal/pycore_condvar.h" +#include "internal/pycore_context.h" +#include "internal/pycore_critical_section.h" +#include "internal/pycore_crossinterp.h" +#include "internal/pycore_debug_offsets.h" +#include "internal/pycore_descrobject.h" +#include "internal/pycore_dict.h" +#include "internal/pycore_dict_state.h" +#include "internal/pycore_dtoa.h" +#include "internal/pycore_exceptions.h" +#include "internal/pycore_faulthandler.h" +#include "internal/pycore_fileutils.h" +#include "internal/pycore_floatobject.h" +#include "internal/pycore_flowgraph.h" +#include "internal/pycore_format.h" +#include "internal/pycore_frame.h" +#include "internal/pycore_freelist.h" +#include "internal/pycore_freelist_state.h" +#include "internal/pycore_function.h" +#include "internal/pycore_gc.h" +#include "internal/pycore_genobject.h" +#include "internal/pycore_getopt.h" +#include "internal/pycore_gil.h" +#include "internal/pycore_global_objects.h" +#include "internal/pycore_global_objects_fini_generated.h" +#include "internal/pycore_global_strings.h" +#include "internal/pycore_hamt.h" +#include "internal/pycore_hashtable.h" +#include "internal/pycore_import.h" +#include "internal/pycore_importdl.h" +#include "internal/pycore_index_pool.h" +#include "internal/pycore_initconfig.h" +#include "internal/pycore_instruments.h" +#include "internal/pycore_instruction_sequence.h" +#include "internal/pycore_interp.h" +#include "internal/pycore_interp_structs.h" +#include "internal/pycore_interpframe.h" +#include "internal/pycore_interpframe_structs.h" +#include "internal/pycore_interpolation.h" +#include "internal/pycore_intrinsics.h" +#include "internal/pycore_jit.h" +#include "internal/pycore_list.h" +#include "internal/pycore_llist.h" +#include "internal/pycore_lock.h" +#include "internal/pycore_long.h" +#include "internal/pycore_memoryobject.h" +#include "internal/pycore_mimalloc.h" +#include "internal/pycore_modsupport.h" +#include "internal/pycore_moduleobject.h" +#include "internal/pycore_namespace.h" +#include "internal/pycore_object.h" +#include "internal/pycore_object_alloc.h" +#include "internal/pycore_object_deferred.h" +#include "internal/pycore_object_stack.h" +#include "internal/pycore_object_state.h" +#include "internal/pycore_obmalloc.h" +#include "internal/pycore_obmalloc_init.h" +#include "internal/pycore_opcode_metadata.h" +#include "internal/pycore_opcode_utils.h" +#include "internal/pycore_optimizer.h" +#include "internal/pycore_parking_lot.h" +#include "internal/pycore_parser.h" +#include "internal/pycore_pathconfig.h" +#include "internal/pycore_pyarena.h" +#include "internal/pycore_pyatomic_ft_wrappers.h" +#include "internal/pycore_pybuffer.h" +#include "internal/pycore_pyerrors.h" +#include "internal/pycore_pyhash.h" +#include "internal/pycore_pylifecycle.h" +#include "internal/pycore_pymath.h" +#include "internal/pycore_pymem.h" +#include "internal/pycore_pymem_init.h" +#include "internal/pycore_pystate.h" +#include "internal/pycore_pystats.h" +#include "internal/pycore_pythonrun.h" +#include "internal/pycore_pythread.h" +#include "internal/pycore_qsbr.h" +#include "internal/pycore_range.h" +#include "internal/pycore_runtime.h" +#include "internal/pycore_runtime_init.h" +#include "internal/pycore_runtime_init_generated.h" +#include "internal/pycore_runtime_structs.h" +#include "internal/pycore_semaphore.h" +#include "internal/pycore_setobject.h" +#include "internal/pycore_signal.h" +#include "internal/pycore_sliceobject.h" +#include "internal/pycore_stats.h" +#include "internal/pycore_strhex.h" +#include "internal/pycore_stackref.h" +#include "internal/pycore_structs.h" +#include "internal/pycore_structseq.h" +#include "internal/pycore_symtable.h" +#include "internal/pycore_sysmodule.h" +#include "internal/pycore_template.h" +#include "internal/pycore_time.h" +#include "internal/pycore_token.h" +#include "internal/pycore_traceback.h" +#include "internal/pycore_tracemalloc.h" +#include "internal/pycore_tstate.h" +#include "internal/pycore_tuple.h" +#include "internal/pycore_typedefs.h" +#include "internal/pycore_typeobject.h" +#include "internal/pycore_typevarobject.h" +#include "internal/pycore_ucnhash.h" +#include "internal/pycore_unicodectype.h" +#include "internal/pycore_unicodeobject.h" +#include "internal/pycore_unicodeobject_generated.h" +#include "internal/pycore_unionobject.h" +#include "internal/pycore_uniqueid.h" +#include "internal/pycore_uop.h" +#include "internal/pycore_uop_ids.h" +#include "internal/pycore_uop_metadata.h" +#include "internal/pycore_warnings.h" +#include "internal/pycore_weakref.h" diff --git a/Modules/makesetup b/Modules/makesetup index 773de9117f4a22..b5fb40994a010b 100755 --- a/Modules/makesetup +++ b/Modules/makesetup @@ -286,7 +286,7 @@ sed -e 's/[ ]*#.*//' -e '/^[ ]*$/d' | done libs= # depends on the headers through cpython-sys - rule="$objs: \$(srcdir)/Cargo.toml \$(srcdir)/Cargo.lock \$(srcdir)/$srcdir/$manifest Modules/cpython-sys/wrapper.h $prefixed_srcs \$(PYTHON_HEADERS)" + rule="$objs: cpython-sys \$(srcdir)/Cargo.toml \$(srcdir)/Cargo.lock \$(srcdir)/$srcdir/$manifest $prefixed_srcs \$(PYTHON_HEADERS)" rule="$rule; cargo build --lib --locked --package ${mods} --profile \$(CARGO_PROFILE)" echo "$rule" >>$rulesf for mod in $mods diff --git a/Python/remote_debug.h b/Python/remote_debug.h index eac7f2aee132eb..e7676013197fa9 100644 --- a/Python/remote_debug.h +++ b/Python/remote_debug.h @@ -29,7 +29,6 @@ extern "C" { #include "pyconfig.h" #include "internal/pycore_ceval.h" -#include "internal/pycore_debug_offsets.h" #ifdef __linux__ # include <elf.h> diff --git a/Tools/build/regen-rust-wrapper-h.py b/Tools/build/regen-rust-wrapper-h.py deleted file mode 100644 index 998d808ea40ac2..00000000000000 --- a/Tools/build/regen-rust-wrapper-h.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import re -from pathlib import Path - -ROOT = Path(__file__).resolve().parents[2] -WRAPPER_H = ROOT / "Modules" / "cpython-sys" / "wrapper.h" -SKIP_PREFIXES = ("cpython/",) -SKIP_EXACT = { - "internal/pycore_crossinterp_data_registry.h", -} - -def normalize_path(header: str) -> str: - return re.sub(r'(:?\.\/)(:?Include\/)?', '', header) - -def main(output: str = WRAPPER_H) -> None: - headers = os.environ.get("PYTHON_HEADERS") - if headers is None: - raise RuntimeError("Unable to read $PYTHON_HEADERS!") - with open(output, "w") as f: - f.write("#define Py_BUILD_CORE\n") - f.write("#include \"Modules/expat/expat.h\"\n") - for header in headers.split(): - normalized_path = normalize_path(header) - if normalized_path.startswith(SKIP_PREFIXES): - continue - if normalized_path in SKIP_EXACT: - continue - f.write(f"#include \"{normalized_path}\"\n") - if normalized_path == "Python/remote_debug.h": - f.write("#undef UNUSED\n") - -if __name__ == "__main__": - import sys - main(*sys.argv[1:]) diff --git a/configure b/configure index 0a114e20c00c30..fb959ad79bf6cb 100755 --- a/configure +++ b/configure @@ -890,7 +890,7 @@ LIBSQLITE3_CFLAGS CARGO_PROFILE CARGO_TARGET_DIR CARGO_HOME -HAS_CARGO +HAVE_CARGO LIBMPDEC_INTERNAL LIBMPDEC_LIBS LIBMPDEC_CFLAGS @@ -1122,7 +1122,6 @@ with_libs with_system_expat with_system_libmpdec with_decimal_contextvar -with_rust_base64 enable_loadable_sqlite_extensions with_dbmliborder enable_ipv6 @@ -1924,8 +1923,6 @@ Optional Packages: --with-decimal-contextvar build _decimal module using a coroutine-local rather than a thread-local context (default is yes) - --with-rust-base64 build _base64 module using the SIMD accelerated Rust - implementation --with-dbmliborder=db1:db2:... override order to check db backends for dbm; a valid value is a colon separated string with the backend @@ -16043,40 +16040,26 @@ fi -{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for --with-rust-base64" >&5 -printf %s "checking for --with-rust-base64... " >&6; } - -# Check whether --with-rust_base64 was given. -if test ${with_rust_base64+y} -then : - withval=$with_rust_base64; rust_base64="yes" -else case e in #( - e) rust_base64="no" ;; -esac +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for Rust" >&5 +printf %s "checking for Rust... " >&6; } +CARGO_TARGET_DIR= +CARGO_PROFILE= +if test "x$CARGO_HOME" = "x"; then + CARGO_HOME="$HOME/.cargo" fi - -{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $rust_base64" >&5 -printf "%s\n" "$rust_base64" >&6; } - -if test "x$rust_base64" = xyes -then : - - if test "$CARGO_HOME+set" != "set"; then - CARGO_HOME="$HOME/.cargo" - fi - # Extract the first word of "cargo", so it can be a program name with args. +# Extract the first word of "cargo", so it can be a program name with args. set dummy cargo; ac_word=$2 { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 printf %s "checking for $ac_word... " >&6; } -if test ${ac_cv_prog_HAS_CARGO+y} +if test ${ac_cv_prog_HAVE_CARGO+y} then : printf %s "(cached) " >&6 else case e in #( - e) if test -n "$HAS_CARGO"; then - ac_cv_prog_HAS_CARGO="$HAS_CARGO" # Let the user override the test. + e) if test -n "$HAVE_CARGO"; then + ac_cv_prog_HAVE_CARGO="$HAVE_CARGO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in not-found +for as_dir in no do IFS=$as_save_IFS case $as_dir in #((( @@ -16086,7 +16069,7 @@ do esac for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then - ac_cv_prog_HAS_CARGO=""$CARGO_HOME"" + ac_cv_prog_HAVE_CARGO=""$CARGO_HOME"" printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi @@ -16094,36 +16077,37 @@ done done IFS=$as_save_IFS - test -z "$ac_cv_prog_HAS_CARGO" && ac_cv_prog_HAS_CARGO="found" + test -z "$ac_cv_prog_HAVE_CARGO" && ac_cv_prog_HAVE_CARGO="yes" fi ;; esac fi -HAS_CARGO=$ac_cv_prog_HAS_CARGO -if test -n "$HAS_CARGO"; then - { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $HAS_CARGO" >&5 -printf "%s\n" "$HAS_CARGO" >&6; } +HAVE_CARGO=$ac_cv_prog_HAVE_CARGO +if test -n "$HAVE_CARGO"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $HAVE_CARGO" >&5 +printf "%s\n" "$HAVE_CARGO" >&6; } else { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } fi - if test $HAS_CARGO = "not-found"; then - as_fn_error $? "Could not find cargo. Please re-run configure with \$CARGO_HOME set" "$LINENO" 5 - fi - if test "$Py_OPT" = 'true'; then - CARGO_TARGET_DIR='release' - CARGO_PROFILE='release' - else - CARGO_TARGET_DIR='debug' - CARGO_PROFILE='dev' - fi - - +if test $HAVE_CARGO = "no"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: n/a" >&5 +printf "%s\n" "n/a" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: Could not find the cargo executable. It can be installed via rustup" >&5 +printf "%s\n" "$as_me: WARNING: Could not find the cargo executable. It can be installed via rustup" >&2;} +else + if test "$Py_OPT" = 'true'; then + CARGO_TARGET_DIR='release' + CARGO_PROFILE='release' + else + CARGO_TARGET_DIR='debug' + CARGO_PROFILE='dev' + fi +fi -fi @@ -33575,9 +33559,9 @@ printf %s "checking for stdlib extension module _base64... " >&6; } if test "$py_cv_module__base64" != "n/a" then : - if true + if test "$HAVE_CARGO" = yes then : - if test "$rust_base64" = "yes" + if test "$HAVE_CARGO" = yes then : py_cv_module__base64=yes else case e in #( diff --git a/configure.ac b/configure.ac index 9b1a035f0ccc2f..83a4c006c4feca 100644 --- a/configure.ac +++ b/configure.ac @@ -4309,40 +4309,29 @@ AC_SUBST([LIBMPDEC_INTERNAL]) dnl Try to detect cargo in the environment. Cargo and rustup dnl install into CARGO_HOME and RUSTUP_HOME, so check for those initially -AC_MSG_CHECKING([for --with-rust-base64]) -AC_ARG_WITH( - [rust_base64], - [AS_HELP_STRING( - [--with-rust-base64], - [build _base64 module using the SIMD accelerated Rust implementation] - )], - [rust_base64="yes"], - [rust_base64="no"]) -AC_MSG_RESULT([$rust_base64]) - -AS_VAR_IF( - [rust_base64], [yes], - [ - if test "$CARGO_HOME+set" != "set"; then - dnl try to guess the default UNIX value of ~/.cargo - CARGO_HOME="$HOME/.cargo" - fi - AC_CHECK_PROG(HAS_CARGO, [cargo], ["$CARGO_HOME"], [found], [not-found]) - if test $HAS_CARGO = "not-found"; then - AC_MSG_ERROR([Could not find cargo. Please re-run configure with \$CARGO_HOME set]) - fi - if test "$Py_OPT" = 'true'; then - CARGO_TARGET_DIR='release' - CARGO_PROFILE='release' - else - CARGO_TARGET_DIR='debug' - CARGO_PROFILE='dev' - fi - AC_SUBST([CARGO_HOME]) - AC_SUBST([CARGO_TARGET_DIR]) - AC_SUBST([CARGO_PROFILE]) - ] -) +AC_MSG_CHECKING([for Rust]) +CARGO_TARGET_DIR= +CARGO_PROFILE= +if test "x$CARGO_HOME" = "x"; then + dnl try to guess the default UNIX value of ~/.cargo + CARGO_HOME="$HOME/.cargo" +fi +AC_CHECK_PROG(HAVE_CARGO, [cargo], ["$CARGO_HOME"], [yes], [no]) +if test $HAVE_CARGO = "no"; then + AC_MSG_RESULT([n/a]) + AC_MSG_WARN([Could not find the cargo executable. It can be installed via rustup]) +else + if test "$Py_OPT" = 'true'; then + CARGO_TARGET_DIR='release' + CARGO_PROFILE='release' + else + CARGO_TARGET_DIR='debug' + CARGO_PROFILE='dev' + fi +fi +AC_SUBST([CARGO_HOME]) +AC_SUBST([CARGO_TARGET_DIR]) +AC_SUBST([CARGO_PROFILE]) dnl detect sqlite3 from Emscripten emport @@ -8194,7 +8183,7 @@ PY_STDLIB_MOD([_uuid], [$LIBUUID_CFLAGS], [$LIBUUID_LIBS]) PY_STDLIB_MOD([_base64], - [], [test "$rust_base64" = "yes"], + [test "$HAVE_CARGO" = yes], [test "$HAVE_CARGO" = yes], [], []) dnl compression libs From c9deee600d60509c5da6ef538a9b530f7ba12e05 Mon Sep 17 00:00:00 2001 From: Kirill Podoprigora <kirill.bast9@mail.ru> Date: Mon, 17 Nov 2025 17:20:06 +0200 Subject: [PATCH 09/20] Make Rust build work on more platforms (#16) --- Makefile.pre.in | 2 +- Modules/cpython-sys/build.rs | 50 +++++++++++++++++---- Modules/cpython-sys/src/lib.rs | 8 ++++ Modules/makesetup | 2 +- Python/stdlib_module_names.h | 1 + Tools/build/generate_stdlib_module_names.py | 16 +++++++ 6 files changed, 69 insertions(+), 10 deletions(-) diff --git a/Makefile.pre.in b/Makefile.pre.in index a27a9709330772..0db27b6c46fa68 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -3373,7 +3373,7 @@ Python/thread.o: @THREADHEADERS@ $(srcdir)/Python/condvar.h # Module dependencies and platform-specific files cpython-sys: Modules/cpython-sys/Cargo.toml Modules/cpython-sys/build.rs Modules/cpython-sys/wrapper.h Modules/cpython-sys/parser.h - cargo build --lib --locked --package cpython-sys --profile $(CARGO_PROFILE) + CARGO_TARGET_DIR=$(abs_builddir)/target PYTHON_BUILD_DIR=$(abs_builddir) cargo build --lib --locked --package cpython-sys --profile $(CARGO_PROFILE) --manifest-path $(srcdir)/Cargo.toml # force rebuild when header file or module build flavor (static/shared) is changed MODULE_DEPS_STATIC=Modules/config.c diff --git a/Modules/cpython-sys/build.rs b/Modules/cpython-sys/build.rs index 71b73b477fd34d..680066c4fd5e9d 100644 --- a/Modules/cpython-sys/build.rs +++ b/Modules/cpython-sys/build.rs @@ -2,21 +2,55 @@ use std::env; use std::path::{Path, PathBuf}; fn main() { - let curdir = std::env::current_dir().unwrap(); - let srcdir = curdir.parent().and_then(Path::parent).unwrap(); + let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()); + let srcdir = manifest_dir + .parent() + .and_then(Path::parent) + .expect("expected Modules/cpython-sys to live under the source tree"); let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); - generate_c_api_bindings(srcdir, &out_path.as_path()); + let builddir = env::var("PYTHON_BUILD_DIR").ok(); + if gil_disabled(&srcdir, builddir.as_deref()) { + println!("cargo:rustc-cfg=py_gil_disabled"); + } + generate_c_api_bindings(srcdir, builddir.as_deref(), &out_path.as_path()); // TODO(emmatyping): generate bindings to the internal parser API // The parser includes things slightly differently, so we should generate // it's bindings independently //generate_parser_bindings(srcdir, &out_path.as_path()); } -fn generate_c_api_bindings(srcdir: &Path, out_path: &Path) { - let bindings = bindgen::Builder::default() - .header("wrapper.h") - .clang_arg(format!("-I{}", srcdir.as_os_str().to_str().unwrap())) - .clang_arg(format!("-I{}/Include", srcdir.as_os_str().to_str().unwrap())) +fn gil_disabled(srcdir: &Path, builddir: Option<&str>) -> bool { + let mut candidates = Vec::new(); + if let Some(build) = builddir { + candidates.push(PathBuf::from(build)); + } + candidates.push(srcdir.to_path_buf()); + for base in candidates { + let path = base.join("pyconfig.h"); + if let Ok(contents) = std::fs::read_to_string(&path) { + if contents.contains("Py_GIL_DISABLED 1") { + return true; + } + } + } + false +} + +fn generate_c_api_bindings(srcdir: &Path, builddir: Option<&str>, out_path: &Path) { + let mut builder = bindgen::Builder::default().header("wrapper.h"); + + // Always search the source dir and the public headers. + let mut include_dirs = vec![srcdir.to_path_buf(), srcdir.join("Include")]; + // Include the build directory if provided; out-of-tree builds place + // the generated pyconfig.h there. + if let Some(build) = builddir { + include_dirs.push(PathBuf::from(build)); + } + for dir in include_dirs { + builder = builder.clang_arg(format!("-I{}", dir.display())); + } + + let bindings = builder .allowlist_function("_?Py.*") .allowlist_type("_?Py.*") .allowlist_var("_?Py.*") diff --git a/Modules/cpython-sys/src/lib.rs b/Modules/cpython-sys/src/lib.rs index d3e21662a68472..ed1d68eedd600a 100644 --- a/Modules/cpython-sys/src/lib.rs +++ b/Modules/cpython-sys/src/lib.rs @@ -111,6 +111,14 @@ impl PyMethodDef { unsafe impl Sync for PyMethodDef {} unsafe impl Send for PyMethodDef {} +#[cfg(py_gil_disabled)] +pub const PyObject_HEAD_INIT: PyObject = { + let mut obj: PyObject = unsafe { std::mem::MaybeUninit::zeroed().assume_init() }; + obj.ob_flags = _Py_STATICALLY_ALLOCATED_FLAG as _; + obj +}; + +#[cfg(not(py_gil_disabled))] pub const PyObject_HEAD_INIT: PyObject = PyObject { __bindgen_anon_1: _object__bindgen_ty_1 { ob_refcnt_full: _Py_STATIC_IMMORTAL_INITIAL_REFCNT as i64, diff --git a/Modules/makesetup b/Modules/makesetup index b5fb40994a010b..0368f1d017ac87 100755 --- a/Modules/makesetup +++ b/Modules/makesetup @@ -287,7 +287,7 @@ sed -e 's/[ ]*#.*//' -e '/^[ ]*$/d' | libs= # depends on the headers through cpython-sys rule="$objs: cpython-sys \$(srcdir)/Cargo.toml \$(srcdir)/Cargo.lock \$(srcdir)/$srcdir/$manifest $prefixed_srcs \$(PYTHON_HEADERS)" - rule="$rule; cargo build --lib --locked --package ${mods} --profile \$(CARGO_PROFILE)" + rule="$rule; CARGO_TARGET_DIR=\$(abs_builddir)/target PYTHON_BUILD_DIR=\$(abs_builddir) cargo build --lib --locked --package ${mods} --profile \$(CARGO_PROFILE) --manifest-path \$(srcdir)/Cargo.toml" echo "$rule" >>$rulesf for mod in $mods do diff --git a/Python/stdlib_module_names.h b/Python/stdlib_module_names.h index 8937e666bbbdd5..54cbc7106069bc 100644 --- a/Python/stdlib_module_names.h +++ b/Python/stdlib_module_names.h @@ -10,6 +10,7 @@ static const char* _Py_stdlib_module_names[] = { "_ast", "_ast_unparse", "_asyncio", +"_base64", "_bisect", "_blake2", "_bz2", diff --git a/Tools/build/generate_stdlib_module_names.py b/Tools/build/generate_stdlib_module_names.py index bda72539640611..646f31b49761c1 100644 --- a/Tools/build/generate_stdlib_module_names.py +++ b/Tools/build/generate_stdlib_module_names.py @@ -3,6 +3,7 @@ from __future__ import annotations import _imp +import os import os.path import sys import sysconfig @@ -14,6 +15,7 @@ SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) STDLIB_PATH = os.path.join(SRC_DIR, 'Lib') +MODULES_PATH = os.path.join(SRC_DIR, 'Modules') IGNORE = { '__init__', @@ -84,6 +86,19 @@ def list_modules_setup_extensions(names: set[str]) -> None: names.update(checker.list_module_names(all=True)) +def list_rust_modules(names: set[str]) -> None: + if not os.path.isdir(MODULES_PATH): + return + for entry in os.scandir(MODULES_PATH): + if not entry.is_dir(): + continue + if entry.name == "cpython-sys": + continue + cargo_toml = os.path.join(entry.path, "Cargo.toml") + if os.path.isfile(cargo_toml): + names.add(entry.name) + + # List frozen modules of the PyImport_FrozenModules list (Python/frozen.c). # Use the "./Programs/_testembed list_frozen" command. def list_frozen(names: set[str]) -> None: @@ -109,6 +124,7 @@ def list_modules() -> set[str]: list_builtin_modules(names) list_modules_setup_extensions(names) + list_rust_modules(names) list_packages(names) list_python_modules(names) list_frozen(names) From 071b7f1d795ec5df613bf1782daf2e96b1883168 Mon Sep 17 00:00:00 2001 From: "Jeong, YunWon" <69878+youknowone@users.noreply.github.com> Date: Wed, 19 Nov 2025 14:52:40 +0900 Subject: [PATCH 10/20] Fix cargo path and add CARGO_TARGET for cross compile (#19) There are still some issues with compilation, but those can be sorted out in a future PR. --- Makefile.pre.in | 3 ++- Modules/makesetup | 2 +- configure | 11 +++++++++++ configure.ac | 10 ++++++++++ 4 files changed, 24 insertions(+), 2 deletions(-) diff --git a/Makefile.pre.in b/Makefile.pre.in index 0db27b6c46fa68..10f299819cd40f 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -61,6 +61,7 @@ DSYMUTIL_PATH= @DSYMUTIL_PATH@ CARGO_HOME=@CARGO_HOME@ CARGO_TARGET_DIR=@CARGO_TARGET_DIR@ CARGO_PROFILE=@CARGO_PROFILE@ +CARGO_TARGET=@CARGO_TARGET@ GNULD= @GNULD@ @@ -3373,7 +3374,7 @@ Python/thread.o: @THREADHEADERS@ $(srcdir)/Python/condvar.h # Module dependencies and platform-specific files cpython-sys: Modules/cpython-sys/Cargo.toml Modules/cpython-sys/build.rs Modules/cpython-sys/wrapper.h Modules/cpython-sys/parser.h - CARGO_TARGET_DIR=$(abs_builddir)/target PYTHON_BUILD_DIR=$(abs_builddir) cargo build --lib --locked --package cpython-sys --profile $(CARGO_PROFILE) --manifest-path $(srcdir)/Cargo.toml + CARGO_TARGET_DIR=$(abs_builddir)/target PYTHON_BUILD_DIR=$(abs_builddir) \$(CARGO_HOME)/bin/cargo build --lib --locked --package cpython-sys --profile $(CARGO_PROFILE) $(if $(CARGO_TARGET),--target=$(CARGO_TARGET)) --manifest-path $(srcdir)/Cargo.toml # force rebuild when header file or module build flavor (static/shared) is changed MODULE_DEPS_STATIC=Modules/config.c diff --git a/Modules/makesetup b/Modules/makesetup index 0368f1d017ac87..586e26dd5891b0 100755 --- a/Modules/makesetup +++ b/Modules/makesetup @@ -287,7 +287,7 @@ sed -e 's/[ ]*#.*//' -e '/^[ ]*$/d' | libs= # depends on the headers through cpython-sys rule="$objs: cpython-sys \$(srcdir)/Cargo.toml \$(srcdir)/Cargo.lock \$(srcdir)/$srcdir/$manifest $prefixed_srcs \$(PYTHON_HEADERS)" - rule="$rule; CARGO_TARGET_DIR=\$(abs_builddir)/target PYTHON_BUILD_DIR=\$(abs_builddir) cargo build --lib --locked --package ${mods} --profile \$(CARGO_PROFILE) --manifest-path \$(srcdir)/Cargo.toml" + rule="$rule; CARGO_TARGET_DIR=\$(abs_builddir)/target PYTHON_BUILD_DIR=\$(abs_builddir) \$(CARGO_HOME)/bin/cargo build --lib --locked --package ${mods} --profile \$(CARGO_PROFILE) \$(if \$(CARGO_TARGET),--target=\$(CARGO_TARGET)) --manifest-path \$(srcdir)/Cargo.toml" echo "$rule" >>$rulesf for mod in $mods do diff --git a/configure b/configure index fb959ad79bf6cb..8af7ee0e042e81 100755 --- a/configure +++ b/configure @@ -887,6 +887,7 @@ TCLTK_LIBS TCLTK_CFLAGS LIBSQLITE3_LIBS LIBSQLITE3_CFLAGS +CARGO_TARGET CARGO_PROFILE CARGO_TARGET_DIR CARGO_HOME @@ -16104,6 +16105,15 @@ else CARGO_TARGET_DIR='debug' CARGO_PROFILE='dev' fi + # Set CARGO_TARGET for cross-compilation + case "$host" in + aarch64-apple-ios-simulator) + CARGO_TARGET="aarch64-apple-ios-sim" + ;; + *) + CARGO_TARGET="$host" + ;; + esac fi @@ -16113,6 +16123,7 @@ fi + if test "$ac_sys_system" = "Emscripten" -a -z "$LIBSQLITE3_CFLAGS" -a -z "$LIBSQLITE3_LIBS" then : diff --git a/configure.ac b/configure.ac index 83a4c006c4feca..a3cc99c8add8c3 100644 --- a/configure.ac +++ b/configure.ac @@ -4328,10 +4328,20 @@ else CARGO_TARGET_DIR='debug' CARGO_PROFILE='dev' fi + # Set CARGO_TARGET for cross-compilation + case "$host" in + aarch64-apple-ios-simulator) + CARGO_TARGET="aarch64-apple-ios-sim" + ;; + *) + CARGO_TARGET="$host" + ;; + esac fi AC_SUBST([CARGO_HOME]) AC_SUBST([CARGO_TARGET_DIR]) AC_SUBST([CARGO_PROFILE]) +AC_SUBST([CARGO_TARGET]) dnl detect sqlite3 from Emscripten emport From c66a76e829bc609a371f34e905b1b03736f109a9 Mon Sep 17 00:00:00 2001 From: "Jeong, YunWon" <69878+youknowone@users.noreply.github.com> Date: Tue, 25 Nov 2025 05:53:43 +0900 Subject: [PATCH 11/20] Split standard_b64encode_impl (#17) Also make PyObject an UnsafeCell<ffi::_object> so it can be passed around by & reference --- Modules/_base64/src/lib.rs | 39 ++++++++++++++++++++++------------ Modules/cpython-sys/build.rs | 1 + Modules/cpython-sys/src/lib.rs | 19 +++++++++++++---- 3 files changed, 41 insertions(+), 18 deletions(-) diff --git a/Modules/_base64/src/lib.rs b/Modules/_base64/src/lib.rs index f308a61daf301e..49fd7930045c0b 100644 --- a/Modules/_base64/src/lib.rs +++ b/Modules/_base64/src/lib.rs @@ -83,9 +83,9 @@ struct BorrowedBuffer { } impl BorrowedBuffer { - unsafe fn from_object(obj: *mut PyObject) -> Result<Self, ()> { + fn from_object(obj: &PyObject) -> Result<Self, ()> { let mut view = MaybeUninit::<Py_buffer>::uninit(); - if unsafe { PyObject_GetBuffer(obj, view.as_mut_ptr(), PYBUF_SIMPLE) } != 0 { + if unsafe { PyObject_GetBuffer(obj.as_raw(), view.as_mut_ptr(), PYBUF_SIMPLE) } != 0 { return Err(()); } Ok(Self { @@ -110,6 +110,9 @@ impl Drop for BorrowedBuffer { } } +/// # Safety +/// `module` must be a valid pointer of PyObject representing the module. +/// `args` must be a valid pointer to an array of valid PyObject pointers with length `nargs`. #[unsafe(no_mangle)] pub unsafe extern "C" fn standard_b64encode( _module: *mut PyObject, @@ -126,10 +129,19 @@ pub unsafe extern "C" fn standard_b64encode( return ptr::null_mut(); } - let source = unsafe { *args }; - let buffer = match unsafe { BorrowedBuffer::from_object(source) } { + let source = unsafe { &**args }; + + // Safe cast by Safety + match standard_b64encode_impl(source) { + Ok(result) => result, + Err(_) => ptr::null_mut(), + } +} + +fn standard_b64encode_impl(source: &PyObject) -> Result<*mut PyObject, ()> { + let buffer = match BorrowedBuffer::from_object(source) { Ok(buf) => buf, - Err(_) => return ptr::null_mut(), + Err(_) => return Err(()), }; let view_len = buffer.len(); @@ -140,8 +152,9 @@ pub unsafe extern "C" fn standard_b64encode( c"standard_b64encode() argument has negative length".as_ptr(), ); } - return ptr::null_mut(); + return Err(()); } + let input_len = view_len as usize; let input = unsafe { slice::from_raw_parts(buffer.as_ptr(), input_len) }; @@ -149,21 +162,19 @@ pub unsafe extern "C" fn standard_b64encode( unsafe { PyErr_NoMemory(); } - return ptr::null_mut(); + return Err(()); }; if output_len > isize::MAX as usize { unsafe { PyErr_NoMemory(); } - return ptr::null_mut(); + return Err(()); } - let result = unsafe { - PyBytes_FromStringAndSize(ptr::null(), output_len as Py_ssize_t) - }; + let result = unsafe { PyBytes_FromStringAndSize(ptr::null(), output_len as Py_ssize_t) }; if result.is_null() { - return ptr::null_mut(); + return Err(()); } let dest_ptr = unsafe { PyBytes_AsString(result) }; @@ -171,13 +182,13 @@ pub unsafe extern "C" fn standard_b64encode( unsafe { Py_DecRef(result); } - return ptr::null_mut(); + return Err(()); } let dest = unsafe { slice::from_raw_parts_mut(dest_ptr.cast::<u8>(), output_len) }; let written = encode_into(input, dest); debug_assert_eq!(written, output_len); - result + Ok(result) } #[unsafe(no_mangle)] diff --git a/Modules/cpython-sys/build.rs b/Modules/cpython-sys/build.rs index 680066c4fd5e9d..8256e2fc93cd03 100644 --- a/Modules/cpython-sys/build.rs +++ b/Modules/cpython-sys/build.rs @@ -55,6 +55,7 @@ fn generate_c_api_bindings(srcdir: &Path, builddir: Option<&str>, out_path: &Pat .allowlist_type("_?Py.*") .allowlist_var("_?Py.*") .blocklist_type("^PyMethodDef$") + .blocklist_type("PyObject") .parse_callbacks(Box::new(bindgen::CargoCallbacks::new())) .generate() .expect("Unable to generate bindings"); diff --git a/Modules/cpython-sys/src/lib.rs b/Modules/cpython-sys/src/lib.rs index ed1d68eedd600a..9a3c46b34d8c36 100644 --- a/Modules/cpython-sys/src/lib.rs +++ b/Modules/cpython-sys/src/lib.rs @@ -56,6 +56,17 @@ pub const _Py_STATIC_IMMORTAL_INITIAL_REFCNT: Py_ssize_t = #[cfg(not(target_pointer_width = "64"))] pub const _Py_STATIC_IMMORTAL_INITIAL_REFCNT: Py_ssize_t = 7u32 << 28; +#[repr(transparent)] +pub struct PyObject(std::cell::UnsafeCell<_object>); + +impl PyObject { + #[inline] + pub fn as_raw(&self) -> *mut Self { + self.0.get() as *mut Self + } +} + + #[repr(C)] pub union PyMethodDefFuncPointer { pub PyCFunction: unsafe extern "C" fn(slf: *mut PyObject, args: *mut PyObject) -> *mut PyObject, @@ -113,18 +124,18 @@ unsafe impl Send for PyMethodDef {} #[cfg(py_gil_disabled)] pub const PyObject_HEAD_INIT: PyObject = { - let mut obj: PyObject = unsafe { std::mem::MaybeUninit::zeroed().assume_init() }; + let mut obj: _object = unsafe { std::mem::MaybeUninit::zeroed().assume_init() }; obj.ob_flags = _Py_STATICALLY_ALLOCATED_FLAG as _; - obj + PyObject(std::cell::UnsafeCell::new(obj)) }; #[cfg(not(py_gil_disabled))] -pub const PyObject_HEAD_INIT: PyObject = PyObject { +pub const PyObject_HEAD_INIT: PyObject = PyObject(std::cell::UnsafeCell::new(_object { __bindgen_anon_1: _object__bindgen_ty_1 { ob_refcnt_full: _Py_STATIC_IMMORTAL_INITIAL_REFCNT as i64, }, ob_type: std::ptr::null_mut(), -}; +})); pub const PyModuleDef_HEAD_INIT: PyModuleDef_Base = PyModuleDef_Base { ob_base: PyObject_HEAD_INIT, From c913bd2e4a5d679486ffa31006d53e497f1aa335 Mon Sep 17 00:00:00 2001 From: "Jeong, YunWon" <69878+youknowone@users.noreply.github.com> Date: Tue, 25 Nov 2025 13:48:37 +0900 Subject: [PATCH 12/20] Group unsafe BorrowedBuffer initialization (#22) --- Modules/_base64/src/lib.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/Modules/_base64/src/lib.rs b/Modules/_base64/src/lib.rs index 49fd7930045c0b..d612c20d26b9e5 100644 --- a/Modules/_base64/src/lib.rs +++ b/Modules/_base64/src/lib.rs @@ -85,12 +85,15 @@ struct BorrowedBuffer { impl BorrowedBuffer { fn from_object(obj: &PyObject) -> Result<Self, ()> { let mut view = MaybeUninit::<Py_buffer>::uninit(); - if unsafe { PyObject_GetBuffer(obj.as_raw(), view.as_mut_ptr(), PYBUF_SIMPLE) } != 0 { - return Err(()); - } - Ok(Self { - view: unsafe { view.assume_init() }, - }) + let buffer = unsafe { + if PyObject_GetBuffer(obj.as_raw(), view.as_mut_ptr(), PYBUF_SIMPLE) != 0 { + return Err(()); + } + Self { + view: view.assume_init(), + } + }; + Ok(buffer) } fn len(&self) -> Py_ssize_t { From e7fe182b817960bdc57330acc98b7a8e5bfbbec6 Mon Sep 17 00:00:00 2001 From: "Jeong, YunWon" <69878+youknowone@users.noreply.github.com> Date: Tue, 25 Nov 2025 13:49:20 +0900 Subject: [PATCH 13/20] Run rustfmt (#21) --- Modules/_base64/src/lib.rs | 21 ++++++++++----------- Modules/cpython-sys/src/lib.rs | 1 - 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/Modules/_base64/src/lib.rs b/Modules/_base64/src/lib.rs index d612c20d26b9e5..7e13438ca21c6f 100644 --- a/Modules/_base64/src/lib.rs +++ b/Modules/_base64/src/lib.rs @@ -5,9 +5,15 @@ use std::ptr; use std::slice; use cpython_sys::METH_FASTCALL; +use cpython_sys::Py_DecRef; +use cpython_sys::Py_buffer; +use cpython_sys::Py_ssize_t; +use cpython_sys::PyBuffer_Release; use cpython_sys::PyBytes_AsString; use cpython_sys::PyBytes_FromStringAndSize; -use cpython_sys::PyBuffer_Release; +use cpython_sys::PyErr_NoMemory; +use cpython_sys::PyErr_SetString; +use cpython_sys::PyExc_TypeError; use cpython_sys::PyMethodDef; use cpython_sys::PyMethodDefFuncPointer; use cpython_sys::PyModuleDef; @@ -15,17 +21,10 @@ use cpython_sys::PyModuleDef_HEAD_INIT; use cpython_sys::PyModuleDef_Init; use cpython_sys::PyObject; use cpython_sys::PyObject_GetBuffer; -use cpython_sys::Py_DecRef; -use cpython_sys::PyErr_NoMemory; -use cpython_sys::PyErr_SetString; -use cpython_sys::PyExc_TypeError; -use cpython_sys::Py_buffer; -use cpython_sys::Py_ssize_t; const PYBUF_SIMPLE: c_int = 0; const PAD_BYTE: u8 = b'='; -const ENCODE_TABLE: [u8; 64] = - *b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; +const ENCODE_TABLE: [u8; 64] = *b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; #[inline] fn encoded_output_len(input_len: usize) -> Option<usize> { @@ -64,8 +63,8 @@ fn encode_into(input: &[u8], output: &mut [u8]) -> usize { dst_index += 4; } 2 => { - let chunk = (u32::from(input[src_index]) << 16) - | (u32::from(input[src_index + 1]) << 8); + let chunk = + (u32::from(input[src_index]) << 16) | (u32::from(input[src_index + 1]) << 8); output[dst_index] = ENCODE_TABLE[((chunk >> 18) & 0x3f) as usize]; output[dst_index + 1] = ENCODE_TABLE[((chunk >> 12) & 0x3f) as usize]; output[dst_index + 2] = ENCODE_TABLE[((chunk >> 6) & 0x3f) as usize]; diff --git a/Modules/cpython-sys/src/lib.rs b/Modules/cpython-sys/src/lib.rs index 9a3c46b34d8c36..27765abd4b7fe2 100644 --- a/Modules/cpython-sys/src/lib.rs +++ b/Modules/cpython-sys/src/lib.rs @@ -66,7 +66,6 @@ impl PyObject { } } - #[repr(C)] pub union PyMethodDefFuncPointer { pub PyCFunction: unsafe extern "C" fn(slf: *mut PyObject, args: *mut PyObject) -> *mut PyObject, From 688968d3d1453603503d25cc093e911b7c9a1eb6 Mon Sep 17 00:00:00 2001 From: Kirill Podoprigora <kirill.bast9@mail.ru> Date: Mon, 15 Dec 2025 20:22:21 +0200 Subject: [PATCH 14/20] fix build --- configure | 10 +++++++++- configure.ac | 9 +++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/configure b/configure index 8af7ee0e042e81..98175608ffa45a 100755 --- a/configure +++ b/configure @@ -16110,6 +16110,15 @@ else aarch64-apple-ios-simulator) CARGO_TARGET="aarch64-apple-ios-sim" ;; + *-apple-darwin*) + cargo_host="$host" + case "$cargo_host" in + arm64-apple-*) + cargo_host="aarch64${cargo_host#arm64}" + ;; + esac + CARGO_TARGET="${cargo_host%%-apple-darwin*}-apple-darwin" + ;; *) CARGO_TARGET="$host" ;; @@ -36175,4 +36184,3 @@ if test "$ac_cv_header_stdatomic_h" != "yes"; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: Your compiler or platform does have a working C11 stdatomic.h. A future version of Python may require stdatomic.h." >&5 printf "%s\n" "$as_me: Your compiler or platform does have a working C11 stdatomic.h. A future version of Python may require stdatomic.h." >&6;} fi - diff --git a/configure.ac b/configure.ac index a3cc99c8add8c3..91633ebbf342ae 100644 --- a/configure.ac +++ b/configure.ac @@ -4333,6 +4333,15 @@ else aarch64-apple-ios-simulator) CARGO_TARGET="aarch64-apple-ios-sim" ;; + *-apple-darwin*) + cargo_host="$host" + case "$cargo_host" in + arm64-apple-*) + cargo_host="aarch64${cargo_host#arm64}" + ;; + esac + CARGO_TARGET="${cargo_host%%-apple-darwin*}-apple-darwin" + ;; *) CARGO_TARGET="$host" ;; From 8f3946ab251e8453f92e0a2f1a9f7af112545452 Mon Sep 17 00:00:00 2001 From: Kirill Podoprigora <kirill.bast9@mail.ru> Date: Mon, 15 Dec 2025 20:27:37 +0200 Subject: [PATCH 15/20] Adjust makesetup --- Modules/makesetup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Modules/makesetup b/Modules/makesetup index 586e26dd5891b0..bd33c8fb2804c7 100755 --- a/Modules/makesetup +++ b/Modules/makesetup @@ -282,7 +282,7 @@ sed -e 's/[ ]*#.*//' -e '/^[ ]*$/d' | # there's actually only one obj, so just set it to the lib for lib in $libs do - objs="target/\$(CARGO_TARGET_DIR)/$lib" + objs="target/\$(if \$(CARGO_TARGET),\$(CARGO_TARGET)/\$(CARGO_TARGET_DIR),\$(CARGO_TARGET_DIR))/$lib" done libs= # depends on the headers through cpython-sys From dea8c3d11a9c4ee038809f739bb91fca02dd4b62 Mon Sep 17 00:00:00 2001 From: Kirill Podoprigora <kirill.bast9@mail.ru> Date: Mon, 15 Dec 2025 20:38:22 +0200 Subject: [PATCH 16/20] Check if this helps for android --- configure | 4 ++++ configure.ac | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/configure b/configure index 98175608ffa45a..56a85d9b813e55 100755 --- a/configure +++ b/configure @@ -16119,6 +16119,10 @@ else esac CARGO_TARGET="${cargo_host%%-apple-darwin*}-apple-darwin" ;; + *-linux-android*) + cargo_host="${host/-unknown-/-}" + CARGO_TARGET="$cargo_host" + ;; *) CARGO_TARGET="$host" ;; diff --git a/configure.ac b/configure.ac index 91633ebbf342ae..6d26503afb075f 100644 --- a/configure.ac +++ b/configure.ac @@ -4342,6 +4342,10 @@ else esac CARGO_TARGET="${cargo_host%%-apple-darwin*}-apple-darwin" ;; + *-linux-android*) + cargo_host="${host/-unknown-/-}" + CARGO_TARGET="$cargo_host" + ;; *) CARGO_TARGET="$host" ;; From 640feb00b2582bb40b86875b989411549a92d48a Mon Sep 17 00:00:00 2001 From: Kirill Podoprigora <kirill.bast9@mail.ru> Date: Tue, 16 Dec 2025 19:18:44 +0200 Subject: [PATCH 17/20] Remove android workaround & we'll deal with that later --- configure | 4 ---- configure.ac | 4 ---- 2 files changed, 8 deletions(-) diff --git a/configure b/configure index 56a85d9b813e55..98175608ffa45a 100755 --- a/configure +++ b/configure @@ -16119,10 +16119,6 @@ else esac CARGO_TARGET="${cargo_host%%-apple-darwin*}-apple-darwin" ;; - *-linux-android*) - cargo_host="${host/-unknown-/-}" - CARGO_TARGET="$cargo_host" - ;; *) CARGO_TARGET="$host" ;; diff --git a/configure.ac b/configure.ac index 6d26503afb075f..91633ebbf342ae 100644 --- a/configure.ac +++ b/configure.ac @@ -4342,10 +4342,6 @@ else esac CARGO_TARGET="${cargo_host%%-apple-darwin*}-apple-darwin" ;; - *-linux-android*) - cargo_host="${host/-unknown-/-}" - CARGO_TARGET="$cargo_host" - ;; *) CARGO_TARGET="$host" ;; From 2f79ddeb03e112025d4933b43537ce34a1146458 Mon Sep 17 00:00:00 2001 From: alexey semenyuk <alexsemenyuk88@gmail.com> Date: Sat, 27 Dec 2025 04:21:51 +0500 Subject: [PATCH 18/20] Add a specific case for Linux targets to convert the GNU triplet format (#14) --- configure | 3 +++ configure.ac | 3 +++ 2 files changed, 6 insertions(+) diff --git a/configure b/configure index 98175608ffa45a..5b09de36f9a0b8 100755 --- a/configure +++ b/configure @@ -16119,6 +16119,9 @@ else esac CARGO_TARGET="${cargo_host%%-apple-darwin*}-apple-darwin" ;; + *-pc-linux-*) + CARGO_TARGET=$(echo "$host" | sed 's/-pc-linux-/-unknown-linux-/') + ;; *) CARGO_TARGET="$host" ;; diff --git a/configure.ac b/configure.ac index 91633ebbf342ae..1561c6f9b2e99f 100644 --- a/configure.ac +++ b/configure.ac @@ -4342,6 +4342,9 @@ else esac CARGO_TARGET="${cargo_host%%-apple-darwin*}-apple-darwin" ;; + *-pc-linux-*) + CARGO_TARGET=$(echo "$host" | sed 's/-pc-linux-/-unknown-linux-/') + ;; *) CARGO_TARGET="$host" ;; From 15c8e5831efd96f1dfd71371fc8a60c477730d07 Mon Sep 17 00:00:00 2001 From: alexey semenyuk <alexsemenyuk88@gmail.com> Date: Sat, 27 Dec 2025 14:31:00 +0500 Subject: [PATCH 19/20] Introduce rustfmt, clippy and fix their errors (#15) Co-authored-by: Emma Smith <emma@emmatyping.dev> --- Modules/cpython-sys/build.rs | 12 ++++++------ Modules/cpython-sys/src/lib.rs | 1 + rust-toolchain.toml | 3 ++- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/Modules/cpython-sys/build.rs b/Modules/cpython-sys/build.rs index 8256e2fc93cd03..248b141bfd6d99 100644 --- a/Modules/cpython-sys/build.rs +++ b/Modules/cpython-sys/build.rs @@ -9,10 +9,10 @@ fn main() { .expect("expected Modules/cpython-sys to live under the source tree"); let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); let builddir = env::var("PYTHON_BUILD_DIR").ok(); - if gil_disabled(&srcdir, builddir.as_deref()) { + if gil_disabled(srcdir, builddir.as_deref()) { println!("cargo:rustc-cfg=py_gil_disabled"); } - generate_c_api_bindings(srcdir, builddir.as_deref(), &out_path.as_path()); + generate_c_api_bindings(srcdir, builddir.as_deref(), out_path.as_path()); // TODO(emmatyping): generate bindings to the internal parser API // The parser includes things slightly differently, so we should generate // it's bindings independently @@ -27,10 +27,10 @@ fn gil_disabled(srcdir: &Path, builddir: Option<&str>) -> bool { candidates.push(srcdir.to_path_buf()); for base in candidates { let path = base.join("pyconfig.h"); - if let Ok(contents) = std::fs::read_to_string(&path) { - if contents.contains("Py_GIL_DISABLED 1") { - return true; - } + if let Ok(contents) = std::fs::read_to_string(&path) + && contents.contains("Py_GIL_DISABLED 1") + { + return true; } } false diff --git a/Modules/cpython-sys/src/lib.rs b/Modules/cpython-sys/src/lib.rs index 27765abd4b7fe2..5180762fbef6bf 100644 --- a/Modules/cpython-sys/src/lib.rs +++ b/Modules/cpython-sys/src/lib.rs @@ -3,6 +3,7 @@ #![allow(non_snake_case)] #![allow(unsafe_op_in_unsafe_fn)] #![allow(unnecessary_transmutes)] +#![allow(clippy::approx_constant)] use std::ffi::{c_char, c_int, c_void}; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index da064b583d29e7..4f2204701d7903 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,3 @@ [toolchain] -channel = "1.91.1" \ No newline at end of file +channel = "1.91.1" +components = ["rustfmt", "clippy"] From e77670053ac994024f24cf5167ac1761c0210bae Mon Sep 17 00:00:00 2001 From: alexey semenyuk <alexsemenyuk88@gmail.com> Date: Wed, 31 Dec 2025 01:17:24 +0500 Subject: [PATCH 20/20] Add cargo workflow (#18) --- .github/workflows/cargo.yml | 27 +++++++++++++++++++++++++++ Modules/cpython-sys/build.rs | 1 + 2 files changed, 28 insertions(+) create mode 100644 .github/workflows/cargo.yml diff --git a/.github/workflows/cargo.yml b/.github/workflows/cargo.yml new file mode 100644 index 00000000000000..c9fb50043d33a5 --- /dev/null +++ b/.github/workflows/cargo.yml @@ -0,0 +1,27 @@ +name: cargo + +on: + push: + branches: + - main + pull_request: + workflow_dispatch: + +permissions: + contents: read + +jobs: + build: + timeout-minutes: 15 + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v6 + with: + persist-credentials: false + - uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + - run: ./configure + - run: cargo test + - run: cargo fmt --check + - run: cargo clippy diff --git a/Modules/cpython-sys/build.rs b/Modules/cpython-sys/build.rs index 248b141bfd6d99..a681e498171675 100644 --- a/Modules/cpython-sys/build.rs +++ b/Modules/cpython-sys/build.rs @@ -46,6 +46,7 @@ fn generate_c_api_bindings(srcdir: &Path, builddir: Option<&str>, out_path: &Pat if let Some(build) = builddir { include_dirs.push(PathBuf::from(build)); } + for dir in include_dirs { builder = builder.clang_arg(format!("-I{}", dir.display())); }

{ - #[cfg_attr(feature = "perf-inline", inline(always))] - fn find(&self, haystack: &[u8], span: Span) -> Option { - (**self).find(haystack, span) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn prefix(&self, haystack: &[u8], span: Span) -> Option { - (**self).prefix(haystack, span) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn memory_usage(&self) -> usize { - (**self).memory_usage() - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_fast(&self) -> bool { - (&**self).is_fast() - } -} - -/// A type that encapsulates the selection of a prefilter algorithm from a -/// sequence of needles. -/// -/// The existence of this type is a little tricky, because we don't (currently) -/// use it for performing a search. Instead, we really only consume it by -/// converting the underlying prefilter into a trait object, whether that be -/// `dyn PrefilterI` or `dyn Strategy` (for the meta regex engine). In order -/// to avoid re-copying the prefilter selection logic, we isolate it here, and -/// then force anything downstream that wants to convert it to a trait object -/// to do trivial case analysis on it. -/// -/// One wonders whether we *should* use an enum instead of a trait object. -/// At time of writing, I chose trait objects based on instinct because 1) I -/// knew I wasn't going to inline anything and 2) there would potentially be -/// many different choices. However, as of time of writing, I haven't actually -/// compared the trait object approach to the enum approach. That probably -/// should be litigated, but I ran out of steam. -/// -/// Note that if the `alloc` feature is disabled, then values of this type -/// are (and should) never be constructed. Also, in practice, for any of the -/// prefilters to be selected, you'll need at least one of the `perf-literal-*` -/// features enabled. -#[derive(Clone, Debug)] -pub(crate) enum Choice { - Memchr(Memchr), - Memchr2(Memchr2), - Memchr3(Memchr3), - Memmem(Memmem), - Teddy(Teddy), - ByteSet(ByteSet), - AhoCorasick(AhoCorasick), -} - -impl Choice { - /// Select what is believed to be the best prefilter algorithm for the - /// match semantics and sequence of needles given. - /// - /// This selection algorithm uses the needles as given without any - /// modification. For example, if `[bar]` is given, then this doesn't - /// try to select `memchr` for `b`. Instead, it would select `memmem` - /// for `bar`. If callers would want `memchr` selected for `[bar]`, then - /// callers should massages the literals themselves. That is, callers are - /// responsible for heuristics surrounding which sequence of literals is - /// best. - /// - /// What this selection algorithm does is attempt to use the fastest - /// prefilter that works for the literals given. So if `[a, b]`, is given, - /// then `memchr2` is selected. - /// - /// Of course, which prefilter is selected is also subject to what - /// is available. For example, if `alloc` isn't enabled, then - /// that limits which prefilters can be selected. Similarly, if - /// `perf-literal-substring` isn't enabled, then nothing from the `memchr` - /// crate can be returned. - pub(crate) fn new>( - kind: MatchKind, - needles: &[B], - ) -> Option { - // An empty set means the regex matches nothing, so no sense in - // building a prefilter. - if needles.len() == 0 { - debug!("prefilter building failed: found empty set of literals"); - return None; - } - // If the regex can match the empty string, then the prefilter - // will by definition match at every position. This is obviously - // completely ineffective. - if needles.iter().any(|n| n.as_ref().is_empty()) { - debug!("prefilter building failed: literals match empty string"); - return None; - } - // BREADCRUMBS: Perhaps the literal optimizer should special case - // sequences of length two or three if the leading bytes of each are - // "rare"? Or perhaps, if there are two or three total possible leading - // bytes, regardless of the number of literals, and all are rare... - // Then well, perhaps we should use memchr2 or memchr3 in those cases? - if let Some(pre) = Memchr::new(kind, needles) { - debug!("prefilter built: memchr"); - return Some(Choice::Memchr(pre)); - } - if let Some(pre) = Memchr2::new(kind, needles) { - debug!("prefilter built: memchr2"); - return Some(Choice::Memchr2(pre)); - } - if let Some(pre) = Memchr3::new(kind, needles) { - debug!("prefilter built: memchr3"); - return Some(Choice::Memchr3(pre)); - } - if let Some(pre) = Memmem::new(kind, needles) { - debug!("prefilter built: memmem"); - return Some(Choice::Memmem(pre)); - } - if let Some(pre) = Teddy::new(kind, needles) { - debug!("prefilter built: teddy"); - return Some(Choice::Teddy(pre)); - } - if let Some(pre) = ByteSet::new(kind, needles) { - debug!("prefilter built: byteset"); - return Some(Choice::ByteSet(pre)); - } - if let Some(pre) = AhoCorasick::new(kind, needles) { - debug!("prefilter built: aho-corasick"); - return Some(Choice::AhoCorasick(pre)); - } - debug!("prefilter building failed: no strategy could be found"); - None - } -} - -/// Extracts all of the prefix literals from the given HIR expressions into a -/// single `Seq`. The literals in the sequence are ordered with respect to the -/// order of the given HIR expressions and consistent with the match semantics -/// given. -/// -/// The sequence returned is "optimized." That is, they may be shrunk or even -/// truncated according to heuristics with the intent of making them more -/// useful as a prefilter. (Which translates to both using faster algorithms -/// and minimizing the false positive rate.) -/// -/// Note that this erases any connection between the literals and which pattern -/// (or patterns) they came from. -/// -/// The match kind given must correspond to the match semantics of the regex -/// that is represented by the HIRs given. The match semantics may change the -/// literal sequence returned. -#[cfg(feature = "syntax")] -pub(crate) fn prefixes(kind: MatchKind, hirs: &[H]) -> literal::Seq -where - H: core::borrow::Borrow, -{ - let mut extractor = literal::Extractor::new(); - extractor.kind(literal::ExtractKind::Prefix); - - let mut prefixes = literal::Seq::empty(); - for hir in hirs { - prefixes.union(&mut extractor.extract(hir.borrow())); - } - debug!( - "prefixes (len={:?}, exact={:?}) extracted before optimization: {:?}", - prefixes.len(), - prefixes.is_exact(), - prefixes - ); - match kind { - MatchKind::All => { - prefixes.sort(); - prefixes.dedup(); - } - MatchKind::LeftmostFirst => { - prefixes.optimize_for_prefix_by_preference(); - } - } - debug!( - "prefixes (len={:?}, exact={:?}) extracted after optimization: {:?}", - prefixes.len(), - prefixes.is_exact(), - prefixes - ); - prefixes -} - -/// Like `prefixes`, but for all suffixes of all matches for the given HIRs. -#[cfg(feature = "syntax")] -pub(crate) fn suffixes(kind: MatchKind, hirs: &[H]) -> literal::Seq -where - H: core::borrow::Borrow, -{ - let mut extractor = literal::Extractor::new(); - extractor.kind(literal::ExtractKind::Suffix); - - let mut suffixes = literal::Seq::empty(); - for hir in hirs { - suffixes.union(&mut extractor.extract(hir.borrow())); - } - debug!( - "suffixes (len={:?}, exact={:?}) extracted before optimization: {:?}", - suffixes.len(), - suffixes.is_exact(), - suffixes - ); - match kind { - MatchKind::All => { - suffixes.sort(); - suffixes.dedup(); - } - MatchKind::LeftmostFirst => { - suffixes.optimize_for_suffix_by_preference(); - } - } - debug!( - "suffixes (len={:?}, exact={:?}) extracted after optimization: {:?}", - suffixes.len(), - suffixes.is_exact(), - suffixes - ); - suffixes -} diff --git a/vendor/regex-automata/src/util/prefilter/teddy.rs b/vendor/regex-automata/src/util/prefilter/teddy.rs deleted file mode 100644 index fc79f2b2f3f1d4..00000000000000 --- a/vendor/regex-automata/src/util/prefilter/teddy.rs +++ /dev/null @@ -1,160 +0,0 @@ -use crate::util::{ - prefilter::PrefilterI, - search::{MatchKind, Span}, -}; - -#[derive(Clone, Debug)] -pub(crate) struct Teddy { - #[cfg(not(feature = "perf-literal-multisubstring"))] - _unused: (), - /// The actual Teddy searcher. - /// - /// Technically, it's possible that Teddy doesn't actually get used, since - /// Teddy does require its haystack to at least be of a certain size - /// (usually around the size of whatever vector is being used, so ~16 - /// or ~32 bytes). For haystacks shorter than that, the implementation - /// currently uses Rabin-Karp. - #[cfg(feature = "perf-literal-multisubstring")] - searcher: aho_corasick::packed::Searcher, - /// When running an anchored search, the packed searcher can't handle it so - /// we defer to Aho-Corasick itself. Kind of sad, but changing the packed - /// searchers to support anchored search would be difficult at worst and - /// annoying at best. Since packed searchers only apply to small numbers of - /// literals, we content ourselves that this is not much of an added cost. - /// (That packed searchers only work with a small number of literals is - /// also why we use a DFA here. Otherwise, the memory usage of a DFA would - /// likely be unacceptable.) - #[cfg(feature = "perf-literal-multisubstring")] - anchored_ac: aho_corasick::dfa::DFA, - /// The length of the smallest literal we look for. - /// - /// We use this as a heuristic to figure out whether this will be "fast" or - /// not. Generally, the longer the better, because longer needles are more - /// discriminating and thus reduce false positive rate. - #[cfg(feature = "perf-literal-multisubstring")] - minimum_len: usize, -} - -impl Teddy { - pub(crate) fn new>( - kind: MatchKind, - needles: &[B], - ) -> Option { - #[cfg(not(feature = "perf-literal-multisubstring"))] - { - None - } - #[cfg(feature = "perf-literal-multisubstring")] - { - // We only really support leftmost-first semantics. In - // theory we could at least support leftmost-longest, as the - // aho-corasick crate does, but regex-automata doesn't know about - // leftmost-longest currently. - // - // And like the aho-corasick prefilter, if we're using `All` - // semantics, then we can still use leftmost semantics for a - // prefilter. (This might be a suspicious choice for the literal - // engine, which uses a prefilter as a regex engine directly, but - // that only happens when using leftmost-first semantics.) - let (packed_match_kind, ac_match_kind) = match kind { - MatchKind::LeftmostFirst | MatchKind::All => ( - aho_corasick::packed::MatchKind::LeftmostFirst, - aho_corasick::MatchKind::LeftmostFirst, - ), - }; - let minimum_len = - needles.iter().map(|n| n.as_ref().len()).min().unwrap_or(0); - let packed = aho_corasick::packed::Config::new() - .match_kind(packed_match_kind) - .builder() - .extend(needles) - .build()?; - let anchored_ac = aho_corasick::dfa::DFA::builder() - .match_kind(ac_match_kind) - .start_kind(aho_corasick::StartKind::Anchored) - .prefilter(false) - .build(needles) - .ok()?; - Some(Teddy { searcher: packed, anchored_ac, minimum_len }) - } - } -} - -impl PrefilterI for Teddy { - fn find(&self, haystack: &[u8], span: Span) -> Option { - #[cfg(not(feature = "perf-literal-multisubstring"))] - { - unreachable!() - } - #[cfg(feature = "perf-literal-multisubstring")] - { - let ac_span = - aho_corasick::Span { start: span.start, end: span.end }; - self.searcher - .find_in(haystack, ac_span) - .map(|m| Span { start: m.start(), end: m.end() }) - } - } - - fn prefix(&self, haystack: &[u8], span: Span) -> Option { - #[cfg(not(feature = "perf-literal-multisubstring"))] - { - unreachable!() - } - #[cfg(feature = "perf-literal-multisubstring")] - { - use aho_corasick::automaton::Automaton; - let input = aho_corasick::Input::new(haystack) - .anchored(aho_corasick::Anchored::Yes) - .span(span.start..span.end); - self.anchored_ac - .try_find(&input) - // OK because we build the DFA with anchored support. - .expect("aho-corasick DFA should never fail") - .map(|m| Span { start: m.start(), end: m.end() }) - } - } - - fn memory_usage(&self) -> usize { - #[cfg(not(feature = "perf-literal-multisubstring"))] - { - unreachable!() - } - #[cfg(feature = "perf-literal-multisubstring")] - { - use aho_corasick::automaton::Automaton; - self.searcher.memory_usage() + self.anchored_ac.memory_usage() - } - } - - fn is_fast(&self) -> bool { - #[cfg(not(feature = "perf-literal-multisubstring"))] - { - unreachable!() - } - #[cfg(feature = "perf-literal-multisubstring")] - { - // Teddy is usually quite fast, but I have seen some cases where - // a large number of literals can overwhelm it and make it not so - // fast. We make an educated but conservative guess at a limit, at - // which point, we're not so comfortable thinking Teddy is "fast." - // - // Well... this used to incorporate a "limit" on the *number* - // of literals, but I have since changed it to a minimum on the - // *smallest* literal. Namely, when there is a very small literal - // (1 or 2 bytes), it is far more likely that it leads to a higher - // false positive rate. (Although, of course, not always. For - // example, 'zq' is likely to have a very low false positive rate.) - // But when we have 3 bytes, we have a really good chance of being - // quite discriminatory and thus fast. - // - // We may still want to add some kind of limit on the number of - // literals here, but keep in mind that Teddy already has its own - // somewhat small limit (64 at time of writing). The main issue - // here is that if 'is_fast' is false, it opens the door for the - // reverse inner optimization to kick in. We really only want to - // resort to the reverse inner optimization if we absolutely must. - self.minimum_len >= 3 - } - } -} diff --git a/vendor/regex-automata/src/util/primitives.rs b/vendor/regex-automata/src/util/primitives.rs deleted file mode 100644 index 5c5d187b0e6ab8..00000000000000 --- a/vendor/regex-automata/src/util/primitives.rs +++ /dev/null @@ -1,776 +0,0 @@ -/*! -Lower level primitive types that are useful in a variety of circumstances. - -# Overview - -This list represents the principle types in this module and briefly describes -when you might want to use them. - -* [`PatternID`] - A type that represents the identifier of a regex pattern. -This is probably the most widely used type in this module (which is why it's -also re-exported in the crate root). -* [`StateID`] - A type the represents the identifier of a finite automaton -state. This is used for both NFAs and DFAs, with the notable exception of -the hybrid NFA/DFA. (The hybrid NFA/DFA uses a special purpose "lazy" state -identifier.) -* [`SmallIndex`] - The internal representation of both a `PatternID` and a -`StateID`. Its purpose is to serve as a type that can index memory without -being as big as a `usize` on 64-bit targets. The main idea behind this type -is that there are many things in regex engines that will, in practice, never -overflow a 32-bit integer. (For example, like the number of patterns in a regex -or the number of states in an NFA.) Thus, a `SmallIndex` can be used to index -memory without peppering `as` casts everywhere. Moreover, it forces callers -to handle errors in the case where, somehow, the value would otherwise overflow -either a 32-bit integer or a `usize` (e.g., on 16-bit targets). -* [`NonMaxUsize`] - Represents a `usize` that cannot be `usize::MAX`. As a -result, `Option` has the same size in memory as a `usize`. This -useful, for example, when representing the offsets of submatches since it -reduces memory usage by a factor of 2. It is a legal optimization since Rust -guarantees that slices never have a length that exceeds `isize::MAX`. -*/ - -use core::num::NonZeroUsize; - -#[cfg(feature = "alloc")] -use alloc::vec::Vec; - -use crate::util::int::{Usize, U16, U32, U64}; - -/// A `usize` that can never be `usize::MAX`. -/// -/// This is similar to `core::num::NonZeroUsize`, but instead of not permitting -/// a zero value, this does not permit a max value. -/// -/// This is useful in certain contexts where one wants to optimize the memory -/// usage of things that contain match offsets. Namely, since Rust slices -/// are guaranteed to never have a length exceeding `isize::MAX`, we can use -/// `usize::MAX` as a sentinel to indicate that no match was found. Indeed, -/// types like `Option` have exactly the same size in memory as a -/// `usize`. -/// -/// This type is defined to be `repr(transparent)` for -/// `core::num::NonZeroUsize`, which is in turn defined to be -/// `repr(transparent)` for `usize`. -#[derive(Clone, Copy, Eq, Hash, PartialEq, PartialOrd, Ord)] -#[repr(transparent)] -pub struct NonMaxUsize(NonZeroUsize); - -impl NonMaxUsize { - /// Create a new `NonMaxUsize` from the given value. - /// - /// This returns `None` only when the given value is equal to `usize::MAX`. - #[inline] - pub fn new(value: usize) -> Option { - NonZeroUsize::new(value.wrapping_add(1)).map(NonMaxUsize) - } - - /// Return the underlying `usize` value. The returned value is guaranteed - /// to not equal `usize::MAX`. - #[inline] - pub fn get(self) -> usize { - self.0.get().wrapping_sub(1) - } -} - -// We provide our own Debug impl because seeing the internal repr can be quite -// surprising if you aren't expecting it. e.g., 'NonMaxUsize(5)' vs just '5'. -impl core::fmt::Debug for NonMaxUsize { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "{:?}", self.get()) - } -} - -/// A type that represents a "small" index. -/// -/// The main idea of this type is to provide something that can index memory, -/// but uses less memory than `usize` on 64-bit systems. Specifically, its -/// representation is always a `u32` and has `repr(transparent)` enabled. (So -/// it is safe to transmute between a `u32` and a `SmallIndex`.) -/// -/// A small index is typically useful in cases where there is no practical way -/// that the index will overflow a 32-bit integer. A good example of this is -/// an NFA state. If you could somehow build an NFA with `2^30` states, its -/// memory usage would be exorbitant and its runtime execution would be so -/// slow as to be completely worthless. Therefore, this crate generally deems -/// it acceptable to return an error if it would otherwise build an NFA that -/// requires a slice longer than what a 32-bit integer can index. In exchange, -/// we can use 32-bit indices instead of 64-bit indices in various places. -/// -/// This type ensures this by providing a constructor that will return an error -/// if its argument cannot fit into the type. This makes it much easier to -/// handle these sorts of boundary cases that are otherwise extremely subtle. -/// -/// On all targets, this type guarantees that its value will fit in a `u32`, -/// `i32`, `usize` and an `isize`. This means that on 16-bit targets, for -/// example, this type's maximum value will never overflow an `isize`, -/// which means it will never overflow a `i16` even though its internal -/// representation is still a `u32`. -/// -/// The purpose for making the type fit into even signed integer types like -/// `isize` is to guarantee that the difference between any two small indices -/// is itself also a small index. This is useful in certain contexts, e.g., -/// for delta encoding. -/// -/// # Other types -/// -/// The following types wrap `SmallIndex` to provide a more focused use case: -/// -/// * [`PatternID`] is for representing the identifiers of patterns. -/// * [`StateID`] is for representing the identifiers of states in finite -/// automata. It is used for both NFAs and DFAs. -/// -/// # Representation -/// -/// This type is always represented internally by a `u32` and is marked as -/// `repr(transparent)`. Thus, this type always has the same representation as -/// a `u32`. It is thus safe to transmute between a `u32` and a `SmallIndex`. -/// -/// # Indexing -/// -/// For convenience, callers may use a `SmallIndex` to index slices. -/// -/// # Safety -/// -/// While a `SmallIndex` is meant to guarantee that its value fits into `usize` -/// without using as much space as a `usize` on all targets, callers must -/// not rely on this property for safety. Callers may choose to rely on this -/// property for correctness however. For example, creating a `SmallIndex` with -/// an invalid value can be done in entirely safe code. This may in turn result -/// in panics or silent logical errors. -#[derive( - Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord, -)] -#[repr(transparent)] -pub struct SmallIndex(u32); - -impl SmallIndex { - /// The maximum index value. - #[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] - pub const MAX: SmallIndex = - // FIXME: Use as_usize() once const functions in traits are stable. - SmallIndex::new_unchecked(core::i32::MAX as usize - 1); - - /// The maximum index value. - #[cfg(target_pointer_width = "16")] - pub const MAX: SmallIndex = - SmallIndex::new_unchecked(core::isize::MAX - 1); - - /// The total number of values that can be represented as a small index. - pub const LIMIT: usize = SmallIndex::MAX.as_usize() + 1; - - /// The zero index value. - pub const ZERO: SmallIndex = SmallIndex::new_unchecked(0); - - /// The number of bytes that a single small index uses in memory. - pub const SIZE: usize = core::mem::size_of::(); - - /// Create a new small index. - /// - /// If the given index exceeds [`SmallIndex::MAX`], then this returns - /// an error. - #[inline] - pub fn new(index: usize) -> Result { - SmallIndex::try_from(index) - } - - /// Create a new small index without checking whether the given value - /// exceeds [`SmallIndex::MAX`]. - /// - /// Using this routine with an invalid index value will result in - /// unspecified behavior, but *not* undefined behavior. In particular, an - /// invalid index value is likely to cause panics or possibly even silent - /// logical errors. - /// - /// Callers must never rely on a `SmallIndex` to be within a certain range - /// for memory safety. - #[inline] - pub const fn new_unchecked(index: usize) -> SmallIndex { - // FIXME: Use as_u32() once const functions in traits are stable. - SmallIndex(index as u32) - } - - /// Like [`SmallIndex::new`], but panics if the given index is not valid. - #[inline] - pub fn must(index: usize) -> SmallIndex { - SmallIndex::new(index).expect("invalid small index") - } - - /// Return this small index as a `usize`. This is guaranteed to never - /// overflow `usize`. - #[inline] - pub const fn as_usize(&self) -> usize { - // FIXME: Use as_usize() once const functions in traits are stable. - self.0 as usize - } - - /// Return this small index as a `u64`. This is guaranteed to never - /// overflow. - #[inline] - pub const fn as_u64(&self) -> u64 { - // FIXME: Use u64::from() once const functions in traits are stable. - self.0 as u64 - } - - /// Return the internal `u32` of this small index. This is guaranteed to - /// never overflow `u32`. - #[inline] - pub const fn as_u32(&self) -> u32 { - self.0 - } - - /// Return the internal `u32` of this small index represented as an `i32`. - /// This is guaranteed to never overflow an `i32`. - #[inline] - pub const fn as_i32(&self) -> i32 { - // This is OK because we guarantee that our max value is <= i32::MAX. - self.0 as i32 - } - - /// Returns one more than this small index as a usize. - /// - /// Since a small index has constraints on its maximum value, adding `1` to - /// it will always fit in a `usize`, `u32` and a `i32`. - #[inline] - pub fn one_more(&self) -> usize { - self.as_usize() + 1 - } - - /// Decode this small index from the bytes given using the native endian - /// byte order for the current target. - /// - /// If the decoded integer is not representable as a small index for the - /// current target, then this returns an error. - #[inline] - pub fn from_ne_bytes( - bytes: [u8; 4], - ) -> Result { - let id = u32::from_ne_bytes(bytes); - if id > SmallIndex::MAX.as_u32() { - return Err(SmallIndexError { attempted: u64::from(id) }); - } - Ok(SmallIndex::new_unchecked(id.as_usize())) - } - - /// Decode this small index from the bytes given using the native endian - /// byte order for the current target. - /// - /// This is analogous to [`SmallIndex::new_unchecked`] in that is does not - /// check whether the decoded integer is representable as a small index. - #[inline] - pub fn from_ne_bytes_unchecked(bytes: [u8; 4]) -> SmallIndex { - SmallIndex::new_unchecked(u32::from_ne_bytes(bytes).as_usize()) - } - - /// Return the underlying small index integer as raw bytes in native endian - /// format. - #[inline] - pub fn to_ne_bytes(&self) -> [u8; 4] { - self.0.to_ne_bytes() - } -} - -impl core::ops::Index for [T] { - type Output = T; - - #[inline] - fn index(&self, index: SmallIndex) -> &T { - &self[index.as_usize()] - } -} - -impl core::ops::IndexMut for [T] { - #[inline] - fn index_mut(&mut self, index: SmallIndex) -> &mut T { - &mut self[index.as_usize()] - } -} - -#[cfg(feature = "alloc")] -impl core::ops::Index for Vec { - type Output = T; - - #[inline] - fn index(&self, index: SmallIndex) -> &T { - &self[index.as_usize()] - } -} - -#[cfg(feature = "alloc")] -impl core::ops::IndexMut for Vec { - #[inline] - fn index_mut(&mut self, index: SmallIndex) -> &mut T { - &mut self[index.as_usize()] - } -} - -impl From for SmallIndex { - fn from(index: u8) -> SmallIndex { - SmallIndex::new_unchecked(usize::from(index)) - } -} - -impl TryFrom for SmallIndex { - type Error = SmallIndexError; - - fn try_from(index: u16) -> Result { - if u32::from(index) > SmallIndex::MAX.as_u32() { - return Err(SmallIndexError { attempted: u64::from(index) }); - } - Ok(SmallIndex::new_unchecked(index.as_usize())) - } -} - -impl TryFrom for SmallIndex { - type Error = SmallIndexError; - - fn try_from(index: u32) -> Result { - if index > SmallIndex::MAX.as_u32() { - return Err(SmallIndexError { attempted: u64::from(index) }); - } - Ok(SmallIndex::new_unchecked(index.as_usize())) - } -} - -impl TryFrom for SmallIndex { - type Error = SmallIndexError; - - fn try_from(index: u64) -> Result { - if index > SmallIndex::MAX.as_u64() { - return Err(SmallIndexError { attempted: index }); - } - Ok(SmallIndex::new_unchecked(index.as_usize())) - } -} - -impl TryFrom for SmallIndex { - type Error = SmallIndexError; - - fn try_from(index: usize) -> Result { - if index > SmallIndex::MAX.as_usize() { - return Err(SmallIndexError { attempted: index.as_u64() }); - } - Ok(SmallIndex::new_unchecked(index)) - } -} - -#[cfg(test)] -impl quickcheck::Arbitrary for SmallIndex { - fn arbitrary(gen: &mut quickcheck::Gen) -> SmallIndex { - use core::cmp::max; - - let id = max(i32::MIN + 1, i32::arbitrary(gen)).abs(); - if id > SmallIndex::MAX.as_i32() { - SmallIndex::MAX - } else { - SmallIndex::new(usize::try_from(id).unwrap()).unwrap() - } - } -} - -/// This error occurs when a small index could not be constructed. -/// -/// This occurs when given an integer exceeding the maximum small index value. -/// -/// When the `std` feature is enabled, this implements the `Error` trait. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct SmallIndexError { - attempted: u64, -} - -impl SmallIndexError { - /// Returns the value that could not be converted to a small index. - pub fn attempted(&self) -> u64 { - self.attempted - } -} - -#[cfg(feature = "std")] -impl std::error::Error for SmallIndexError {} - -impl core::fmt::Display for SmallIndexError { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!( - f, - "failed to create small index from {:?}, which exceeds {:?}", - self.attempted(), - SmallIndex::MAX, - ) - } -} - -#[derive(Clone, Debug)] -pub(crate) struct SmallIndexIter { - rng: core::ops::Range, -} - -impl Iterator for SmallIndexIter { - type Item = SmallIndex; - - fn next(&mut self) -> Option { - if self.rng.start >= self.rng.end { - return None; - } - let next_id = self.rng.start + 1; - let id = core::mem::replace(&mut self.rng.start, next_id); - // new_unchecked is OK since we asserted that the number of - // elements in this iterator will fit in an ID at construction. - Some(SmallIndex::new_unchecked(id)) - } -} - -macro_rules! index_type_impls { - ($name:ident, $err:ident, $iter:ident, $withiter:ident) => { - impl $name { - /// The maximum value. - pub const MAX: $name = $name(SmallIndex::MAX); - - /// The total number of values that can be represented. - pub const LIMIT: usize = SmallIndex::LIMIT; - - /// The zero value. - pub const ZERO: $name = $name(SmallIndex::ZERO); - - /// The number of bytes that a single value uses in memory. - pub const SIZE: usize = SmallIndex::SIZE; - - /// Create a new value that is represented by a "small index." - /// - /// If the given index exceeds the maximum allowed value, then this - /// returns an error. - #[inline] - pub fn new(value: usize) -> Result<$name, $err> { - SmallIndex::new(value).map($name).map_err($err) - } - - /// Create a new value without checking whether the given argument - /// exceeds the maximum. - /// - /// Using this routine with an invalid value will result in - /// unspecified behavior, but *not* undefined behavior. In - /// particular, an invalid ID value is likely to cause panics or - /// possibly even silent logical errors. - /// - /// Callers must never rely on this type to be within a certain - /// range for memory safety. - #[inline] - pub const fn new_unchecked(value: usize) -> $name { - $name(SmallIndex::new_unchecked(value)) - } - - /// Like `new`, but panics if the given value is not valid. - #[inline] - pub fn must(value: usize) -> $name { - $name::new(value).expect(concat!( - "invalid ", - stringify!($name), - " value" - )) - } - - /// Return the internal value as a `usize`. This is guaranteed to - /// never overflow `usize`. - #[inline] - pub const fn as_usize(&self) -> usize { - self.0.as_usize() - } - - /// Return the internal value as a `u64`. This is guaranteed to - /// never overflow. - #[inline] - pub const fn as_u64(&self) -> u64 { - self.0.as_u64() - } - - /// Return the internal value as a `u32`. This is guaranteed to - /// never overflow `u32`. - #[inline] - pub const fn as_u32(&self) -> u32 { - self.0.as_u32() - } - - /// Return the internal value as a i32`. This is guaranteed to - /// never overflow an `i32`. - #[inline] - pub const fn as_i32(&self) -> i32 { - self.0.as_i32() - } - - /// Returns one more than this value as a usize. - /// - /// Since values represented by a "small index" have constraints - /// on their maximum value, adding `1` to it will always fit in a - /// `usize`, `u32` and a `i32`. - #[inline] - pub fn one_more(&self) -> usize { - self.0.one_more() - } - - /// Decode this value from the bytes given using the native endian - /// byte order for the current target. - /// - /// If the decoded integer is not representable as a small index - /// for the current target, then this returns an error. - #[inline] - pub fn from_ne_bytes(bytes: [u8; 4]) -> Result<$name, $err> { - SmallIndex::from_ne_bytes(bytes).map($name).map_err($err) - } - - /// Decode this value from the bytes given using the native endian - /// byte order for the current target. - /// - /// This is analogous to `new_unchecked` in that is does not check - /// whether the decoded integer is representable as a small index. - #[inline] - pub fn from_ne_bytes_unchecked(bytes: [u8; 4]) -> $name { - $name(SmallIndex::from_ne_bytes_unchecked(bytes)) - } - - /// Return the underlying integer as raw bytes in native endian - /// format. - #[inline] - pub fn to_ne_bytes(&self) -> [u8; 4] { - self.0.to_ne_bytes() - } - - /// Returns an iterator over all values from 0 up to and not - /// including the given length. - /// - /// If the given length exceeds this type's limit, then this - /// panics. - pub(crate) fn iter(len: usize) -> $iter { - $iter::new(len) - } - } - - // We write our own Debug impl so that we get things like PatternID(5) - // instead of PatternID(SmallIndex(5)). - impl core::fmt::Debug for $name { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - f.debug_tuple(stringify!($name)).field(&self.as_u32()).finish() - } - } - - impl core::ops::Index<$name> for [T] { - type Output = T; - - #[inline] - fn index(&self, index: $name) -> &T { - &self[index.as_usize()] - } - } - - impl core::ops::IndexMut<$name> for [T] { - #[inline] - fn index_mut(&mut self, index: $name) -> &mut T { - &mut self[index.as_usize()] - } - } - - #[cfg(feature = "alloc")] - impl core::ops::Index<$name> for Vec { - type Output = T; - - #[inline] - fn index(&self, index: $name) -> &T { - &self[index.as_usize()] - } - } - - #[cfg(feature = "alloc")] - impl core::ops::IndexMut<$name> for Vec { - #[inline] - fn index_mut(&mut self, index: $name) -> &mut T { - &mut self[index.as_usize()] - } - } - - impl From for $name { - fn from(value: u8) -> $name { - $name(SmallIndex::from(value)) - } - } - - impl TryFrom for $name { - type Error = $err; - - fn try_from(value: u16) -> Result<$name, $err> { - SmallIndex::try_from(value).map($name).map_err($err) - } - } - - impl TryFrom for $name { - type Error = $err; - - fn try_from(value: u32) -> Result<$name, $err> { - SmallIndex::try_from(value).map($name).map_err($err) - } - } - - impl TryFrom for $name { - type Error = $err; - - fn try_from(value: u64) -> Result<$name, $err> { - SmallIndex::try_from(value).map($name).map_err($err) - } - } - - impl TryFrom for $name { - type Error = $err; - - fn try_from(value: usize) -> Result<$name, $err> { - SmallIndex::try_from(value).map($name).map_err($err) - } - } - - #[cfg(test)] - impl quickcheck::Arbitrary for $name { - fn arbitrary(gen: &mut quickcheck::Gen) -> $name { - $name(SmallIndex::arbitrary(gen)) - } - } - - /// This error occurs when a value could not be constructed. - /// - /// This occurs when given an integer exceeding the maximum allowed - /// value. - /// - /// When the `std` feature is enabled, this implements the `Error` - /// trait. - #[derive(Clone, Debug, Eq, PartialEq)] - pub struct $err(SmallIndexError); - - impl $err { - /// Returns the value that could not be converted to an ID. - pub fn attempted(&self) -> u64 { - self.0.attempted() - } - } - - #[cfg(feature = "std")] - impl std::error::Error for $err {} - - impl core::fmt::Display for $err { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!( - f, - "failed to create {} from {:?}, which exceeds {:?}", - stringify!($name), - self.attempted(), - $name::MAX, - ) - } - } - - #[derive(Clone, Debug)] - pub(crate) struct $iter(SmallIndexIter); - - impl $iter { - fn new(len: usize) -> $iter { - assert!( - len <= $name::LIMIT, - "cannot create iterator for {} when number of \ - elements exceed {:?}", - stringify!($name), - $name::LIMIT, - ); - $iter(SmallIndexIter { rng: 0..len }) - } - } - - impl Iterator for $iter { - type Item = $name; - - fn next(&mut self) -> Option<$name> { - self.0.next().map($name) - } - } - - /// An iterator adapter that is like std::iter::Enumerate, but attaches - /// small index values instead. It requires `ExactSizeIterator`. At - /// construction, it ensures that the index of each element in the - /// iterator is representable in the corresponding small index type. - #[derive(Clone, Debug)] - pub(crate) struct $withiter { - it: I, - ids: $iter, - } - - impl $withiter { - fn new(it: I) -> $withiter { - let ids = $name::iter(it.len()); - $withiter { it, ids } - } - } - - impl Iterator for $withiter { - type Item = ($name, I::Item); - - fn next(&mut self) -> Option<($name, I::Item)> { - let item = self.it.next()?; - // Number of elements in this iterator must match, according - // to contract of ExactSizeIterator. - let id = self.ids.next().unwrap(); - Some((id, item)) - } - } - }; -} - -/// The identifier of a regex pattern, represented by a [`SmallIndex`]. -/// -/// The identifier for a pattern corresponds to its relative position among -/// other patterns in a single finite state machine. Namely, when building -/// a multi-pattern regex engine, one must supply a sequence of patterns to -/// match. The position (starting at 0) of each pattern in that sequence -/// represents its identifier. This identifier is in turn used to identify and -/// report matches of that pattern in various APIs. -/// -/// See the [`SmallIndex`] type for more information about what it means for -/// a pattern ID to be a "small index." -/// -/// Note that this type is defined in the -/// [`util::primitives`](crate::util::primitives) module, but it is also -/// re-exported at the crate root due to how common it is. -#[derive(Clone, Copy, Default, Eq, Hash, PartialEq, PartialOrd, Ord)] -#[repr(transparent)] -pub struct PatternID(SmallIndex); - -/// The identifier of a finite automaton state, represented by a -/// [`SmallIndex`]. -/// -/// Most regex engines in this crate are built on top of finite automata. Each -/// state in a finite automaton defines transitions from its state to another. -/// Those transitions point to other states via their identifiers, i.e., a -/// `StateID`. Since finite automata tend to contain many transitions, it is -/// much more memory efficient to define state IDs as small indices. -/// -/// See the [`SmallIndex`] type for more information about what it means for -/// a state ID to be a "small index." -#[derive(Clone, Copy, Default, Eq, Hash, PartialEq, PartialOrd, Ord)] -#[repr(transparent)] -pub struct StateID(SmallIndex); - -index_type_impls!(PatternID, PatternIDError, PatternIDIter, WithPatternIDIter); -index_type_impls!(StateID, StateIDError, StateIDIter, WithStateIDIter); - -/// A utility trait that defines a couple of adapters for making it convenient -/// to access indices as "small index" types. We require ExactSizeIterator so -/// that iterator construction can do a single check to make sure the index of -/// each element is representable by its small index type. -pub(crate) trait IteratorIndexExt: Iterator { - fn with_pattern_ids(self) -> WithPatternIDIter - where - Self: Sized + ExactSizeIterator, - { - WithPatternIDIter::new(self) - } - - fn with_state_ids(self) -> WithStateIDIter - where - Self: Sized + ExactSizeIterator, - { - WithStateIDIter::new(self) - } -} - -impl IteratorIndexExt for I {} diff --git a/vendor/regex-automata/src/util/search.rs b/vendor/regex-automata/src/util/search.rs deleted file mode 100644 index 3ece11d155411e..00000000000000 --- a/vendor/regex-automata/src/util/search.rs +++ /dev/null @@ -1,1988 +0,0 @@ -/*! -Types and routines that support the search APIs of most regex engines. - -This sub-module isn't exposed directly, but rather, its contents are exported -at the crate root due to the universality of most of the types and routines in -this module. -*/ - -use core::ops::{Range, RangeBounds}; - -use crate::util::{escape::DebugByte, primitives::PatternID, utf8}; - -/// The parameters for a regex search including the haystack to search. -/// -/// It turns out that regex searches have a few parameters, and in most cases, -/// those parameters have defaults that work in the vast majority of cases. -/// This `Input` type exists to make that common case seamless while also -/// providing an avenue for changing the parameters of a search. In particular, -/// this type enables doing so without a combinatorial explosion of different -/// methods and/or superfluous parameters in the common cases. -/// -/// An `Input` permits configuring the following things: -/// -/// * Search only a substring of a haystack, while taking the broader context -/// into account for resolving look-around assertions. -/// * Indicating whether to search for all patterns in a regex, or to -/// only search for one pattern in particular. -/// * Whether to perform an anchored on unanchored search. -/// * Whether to report a match as early as possible. -/// -/// All of these parameters, except for the haystack, have sensible default -/// values. This means that the minimal search configuration is simply a call -/// to [`Input::new`] with your haystack. Setting any other parameter is -/// optional. -/// -/// Moreover, for any `H` that implements `AsRef<[u8]>`, there exists a -/// `From for Input` implementation. This is useful because many of the -/// search APIs in this crate accept an `Into`. This means you can -/// provide string or byte strings to these routines directly, and they'll -/// automatically get converted into an `Input` for you. -/// -/// The lifetime parameter `'h` refers to the lifetime of the haystack. -/// -/// # Organization -/// -/// The API of `Input` is split into a few different parts: -/// -/// * A builder-like API that transforms a `Input` by value. Examples: -/// [`Input::span`] and [`Input::anchored`]. -/// * A setter API that permits mutating parameters in place. Examples: -/// [`Input::set_span`] and [`Input::set_anchored`]. -/// * A getter API that permits retrieving any of the search parameters. -/// Examples: [`Input::get_span`] and [`Input::get_anchored`]. -/// * A few convenience getter routines that don't conform to the above naming -/// pattern due to how common they are. Examples: [`Input::haystack`], -/// [`Input::start`] and [`Input::end`]. -/// * Miscellaneous predicates and other helper routines that are useful -/// in some contexts. Examples: [`Input::is_char_boundary`]. -/// -/// A `Input` exposes so much because it is meant to be used by both callers of -/// regex engines _and_ implementors of regex engines. A constraining factor is -/// that regex engines should accept a `&Input` as its lowest level API, which -/// means that implementors should only use the "getter" APIs of a `Input`. -/// -/// # Valid bounds and search termination -/// -/// An `Input` permits setting the bounds of a search via either -/// [`Input::span`] or [`Input::range`]. The bounds set must be valid, or -/// else a panic will occur. Bounds are valid if and only if: -/// -/// * The bounds represent a valid range into the input's haystack. -/// * **or** the end bound is a valid ending bound for the haystack *and* -/// the start bound is exactly one greater than the start bound. -/// -/// In the latter case, [`Input::is_done`] will return true and indicates any -/// search receiving such an input should immediately return with no match. -/// -/// Note that while `Input` is used for reverse searches in this crate, the -/// `Input::is_done` predicate assumes a forward search. Because unsigned -/// offsets are used internally, there is no way to tell from only the offsets -/// whether a reverse search is done or not. -/// -/// # Regex engine support -/// -/// Any regex engine accepting an `Input` must support at least the following -/// things: -/// -/// * Searching a `&[u8]` for matches. -/// * Searching a substring of `&[u8]` for a match, such that any match -/// reported must appear entirely within that substring. -/// * For a forwards search, a match should never be reported when -/// [`Input::is_done`] returns true. (For reverse searches, termination should -/// be handled outside of `Input`.) -/// -/// Supporting other aspects of an `Input` are optional, but regex engines -/// should handle aspects they don't support gracefully. How this is done is -/// generally up to the regex engine. This crate generally treats unsupported -/// anchored modes as an error to report for example, but for simplicity, in -/// the meta regex engine, trying to search with an invalid pattern ID just -/// results in no match being reported. -#[derive(Clone)] -pub struct Input<'h> { - haystack: &'h [u8], - span: Span, - anchored: Anchored, - earliest: bool, -} - -impl<'h> Input<'h> { - /// Create a new search configuration for the given haystack. - #[inline] - pub fn new>(haystack: &'h H) -> Input<'h> { - // Perform only one call to `haystack.as_ref()` to protect from incorrect - // implementations that return different values from multiple calls. - // This is important because there's code that relies on `span` not being - // out of bounds with respect to the stored `haystack`. - let haystack = haystack.as_ref(); - Input { - haystack, - span: Span { start: 0, end: haystack.len() }, - anchored: Anchored::No, - earliest: false, - } - } - - /// Set the span for this search. - /// - /// This routine does not panic if the span given is not a valid range for - /// this search's haystack. If this search is run with an invalid range, - /// then the most likely outcome is that the actual search execution will - /// panic. - /// - /// This routine is generic over how a span is provided. While - /// a [`Span`] may be given directly, one may also provide a - /// `std::ops::Range`. To provide anything supported by range - /// syntax, use the [`Input::range`] method. - /// - /// The default span is the entire haystack. - /// - /// Note that [`Input::range`] overrides this method and vice versa. - /// - /// # Panics - /// - /// This panics if the given span does not correspond to valid bounds in - /// the haystack or the termination of a search. - /// - /// # Example - /// - /// This example shows how the span of the search can impact whether a - /// match is reported or not. This is particularly relevant for look-around - /// operators, which might take things outside of the span into account - /// when determining whether they match. - /// - /// ``` - /// # if cfg!(miri) { return Ok(()); } // miri takes too long - /// use regex_automata::{ - /// nfa::thompson::pikevm::PikeVM, - /// Match, Input, - /// }; - /// - /// // Look for 'at', but as a distinct word. - /// let re = PikeVM::new(r"\bat\b")?; - /// let mut cache = re.create_cache(); - /// let mut caps = re.create_captures(); - /// - /// // Our haystack contains 'at', but not as a distinct word. - /// let haystack = "batter"; - /// - /// // A standard search finds nothing, as expected. - /// let input = Input::new(haystack); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(None, caps.get_match()); - /// - /// // But if we wanted to search starting at position '1', we might - /// // slice the haystack. If we do this, it's impossible for the \b - /// // anchors to take the surrounding context into account! And thus, - /// // a match is produced. - /// let input = Input::new(&haystack[1..3]); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(Some(Match::must(0, 0..2)), caps.get_match()); - /// - /// // But if we specify the span of the search instead of slicing the - /// // haystack, then the regex engine can "see" outside of the span - /// // and resolve the anchors correctly. - /// let input = Input::new(haystack).span(1..3); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(None, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// This may seem a little ham-fisted, but this scenario tends to come up - /// if some other regex engine found the match span and now you need to - /// re-process that span to look for capturing groups. (e.g., Run a faster - /// DFA first, find a match, then run the PikeVM on just the match span to - /// resolve capturing groups.) In order to implement that sort of logic - /// correctly, you need to set the span on the search instead of slicing - /// the haystack directly. - /// - /// The other advantage of using this routine to specify the bounds of the - /// search is that the match offsets are still reported in terms of the - /// original haystack. For example, the second search in the example above - /// reported a match at position `0`, even though `at` starts at offset - /// `1` because we sliced the haystack. - #[inline] - pub fn span>(mut self, span: S) -> Input<'h> { - self.set_span(span); - self - } - - /// Like `Input::span`, but accepts any range instead. - /// - /// This routine does not panic if the range given is not a valid range for - /// this search's haystack. If this search is run with an invalid range, - /// then the most likely outcome is that the actual search execution will - /// panic. - /// - /// The default range is the entire haystack. - /// - /// Note that [`Input::span`] overrides this method and vice versa. - /// - /// # Panics - /// - /// This routine will panic if the given range could not be converted - /// to a valid [`Range`]. For example, this would panic when given - /// `0..=usize::MAX` since it cannot be represented using a half-open - /// interval in terms of `usize`. - /// - /// This also panics if the given range does not correspond to valid bounds - /// in the haystack or the termination of a search. - /// - /// # Example - /// - /// ``` - /// use regex_automata::Input; - /// - /// let input = Input::new("foobar"); - /// assert_eq!(0..6, input.get_range()); - /// - /// let input = Input::new("foobar").range(2..=4); - /// assert_eq!(2..5, input.get_range()); - /// ``` - #[inline] - pub fn range>(mut self, range: R) -> Input<'h> { - self.set_range(range); - self - } - - /// Sets the anchor mode of a search. - /// - /// When a search is anchored (so that's [`Anchored::Yes`] or - /// [`Anchored::Pattern`]), a match must begin at the start of a search. - /// When a search is not anchored (that's [`Anchored::No`]), regex engines - /// will behave as if the pattern started with a `(?s-u:.)*?`. This prefix - /// permits a match to appear anywhere. - /// - /// By default, the anchored mode is [`Anchored::No`]. - /// - /// **WARNING:** this is subtly different than using a `^` at the start of - /// your regex. A `^` forces a regex to match exclusively at the start of - /// a haystack, regardless of where you begin your search. In contrast, - /// anchoring a search will allow your regex to match anywhere in your - /// haystack, but the match must start at the beginning of a search. - /// - /// For example, consider the haystack `aba` and the following searches: - /// - /// 1. The regex `^a` is compiled with `Anchored::No` and searches `aba` - /// starting at position `2`. Since `^` requires the match to start at - /// the beginning of the haystack and `2 > 0`, no match is found. - /// 2. The regex `a` is compiled with `Anchored::Yes` and searches `aba` - /// starting at position `2`. This reports a match at `[2, 3]` since - /// the match starts where the search started. Since there is no `^`, - /// there is no requirement for the match to start at the beginning of - /// the haystack. - /// 3. The regex `a` is compiled with `Anchored::Yes` and searches `aba` - /// starting at position `1`. Since `b` corresponds to position `1` and - /// since the search is anchored, it finds no match. While the regex - /// matches at other positions, configuring the search to be anchored - /// requires that it only report a match that begins at the same offset - /// as the beginning of the search. - /// 4. The regex `a` is compiled with `Anchored::No` and searches `aba` - /// starting at position `1`. Since the search is not anchored and - /// the regex does not start with `^`, the search executes as if there - /// is a `(?s:.)*?` prefix that permits it to match anywhere. Thus, it - /// reports a match at `[2, 3]`. - /// - /// Note that the [`Anchored::Pattern`] mode is like `Anchored::Yes`, - /// except it only reports matches for a particular pattern. - /// - /// # Example - /// - /// This demonstrates the differences between an anchored search and - /// a pattern that begins with `^` (as described in the above warning - /// message). - /// - /// ``` - /// use regex_automata::{ - /// nfa::thompson::pikevm::PikeVM, - /// Anchored, Match, Input, - /// }; - /// - /// let haystack = "aba"; - /// - /// let re = PikeVM::new(r"^a")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let input = Input::new(haystack).span(2..3).anchored(Anchored::No); - /// re.search(&mut cache, &input, &mut caps); - /// // No match is found because 2 is not the beginning of the haystack, - /// // which is what ^ requires. - /// assert_eq!(None, caps.get_match()); - /// - /// let re = PikeVM::new(r"a")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let input = Input::new(haystack).span(2..3).anchored(Anchored::Yes); - /// re.search(&mut cache, &input, &mut caps); - /// // An anchored search can still match anywhere in the haystack, it just - /// // must begin at the start of the search which is '2' in this case. - /// assert_eq!(Some(Match::must(0, 2..3)), caps.get_match()); - /// - /// let re = PikeVM::new(r"a")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let input = Input::new(haystack).span(1..3).anchored(Anchored::Yes); - /// re.search(&mut cache, &input, &mut caps); - /// // No match is found since we start searching at offset 1 which - /// // corresponds to 'b'. Since there is no '(?s:.)*?' prefix, no match - /// // is found. - /// assert_eq!(None, caps.get_match()); - /// - /// let re = PikeVM::new(r"a")?; - /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); - /// let input = Input::new(haystack).span(1..3).anchored(Anchored::No); - /// re.search(&mut cache, &input, &mut caps); - /// // Since anchored=no, an implicit '(?s:.)*?' prefix was added to the - /// // pattern. Even though the search starts at 'b', the 'match anything' - /// // prefix allows the search to match 'a'. - /// let expected = Some(Match::must(0, 2..3)); - /// assert_eq!(expected, caps.get_match()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn anchored(mut self, mode: Anchored) -> Input<'h> { - self.set_anchored(mode); - self - } - - /// Whether to execute an "earliest" search or not. - /// - /// When running a non-overlapping search, an "earliest" search will return - /// the match location as early as possible. For example, given a pattern - /// of `foo[0-9]+` and a haystack of `foo12345`, a normal leftmost search - /// will return `foo12345` as a match. But an "earliest" search for regex - /// engines that support "earliest" semantics will return `foo1` as a - /// match, since as soon as the first digit following `foo` is seen, it is - /// known to have found a match. - /// - /// Note that "earliest" semantics generally depend on the regex engine. - /// Different regex engines may determine there is a match at different - /// points. So there is no guarantee that "earliest" matches will always - /// return the same offsets for all regex engines. The "earliest" notion - /// is really about when the particular regex engine determines there is - /// a match rather than a consistent semantic unto itself. This is often - /// useful for implementing "did a match occur or not" predicates, but - /// sometimes the offset is useful as well. - /// - /// This is disabled by default. - /// - /// # Example - /// - /// This example shows the difference between "earliest" searching and - /// normal searching. - /// - /// ``` - /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match, Input}; - /// - /// let re = PikeVM::new(r"foo[0-9]+")?; - /// let mut cache = re.create_cache(); - /// let mut caps = re.create_captures(); - /// - /// // A normal search implements greediness like you expect. - /// let input = Input::new("foo12345"); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(Some(Match::must(0, 0..8)), caps.get_match()); - /// - /// // When 'earliest' is enabled and the regex engine supports - /// // it, the search will bail once it knows a match has been - /// // found. - /// let input = Input::new("foo12345").earliest(true); - /// re.search(&mut cache, &input, &mut caps); - /// assert_eq!(Some(Match::must(0, 0..4)), caps.get_match()); - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn earliest(mut self, yes: bool) -> Input<'h> { - self.set_earliest(yes); - self - } - - /// Set the span for this search configuration. - /// - /// This is like the [`Input::span`] method, except this mutates the - /// span in place. - /// - /// This routine is generic over how a span is provided. While - /// a [`Span`] may be given directly, one may also provide a - /// `std::ops::Range`. - /// - /// # Panics - /// - /// This panics if the given span does not correspond to valid bounds in - /// the haystack or the termination of a search. - /// - /// # Example - /// - /// ``` - /// use regex_automata::Input; - /// - /// let mut input = Input::new("foobar"); - /// assert_eq!(0..6, input.get_range()); - /// input.set_span(2..4); - /// assert_eq!(2..4, input.get_range()); - /// ``` - #[inline] - pub fn set_span>(&mut self, span: S) { - let span = span.into(); - assert!( - span.end <= self.haystack.len() - && span.start <= span.end.wrapping_add(1), - "invalid span {:?} for haystack of length {}", - span, - self.haystack.len(), - ); - self.span = span; - } - - /// Set the span for this search configuration given any range. - /// - /// This is like the [`Input::range`] method, except this mutates the - /// span in place. - /// - /// This routine does not panic if the range given is not a valid range for - /// this search's haystack. If this search is run with an invalid range, - /// then the most likely outcome is that the actual search execution will - /// panic. - /// - /// # Panics - /// - /// This routine will panic if the given range could not be converted - /// to a valid [`Range`]. For example, this would panic when given - /// `0..=usize::MAX` since it cannot be represented using a half-open - /// interval in terms of `usize`. - /// - /// This also panics if the given span does not correspond to valid bounds - /// in the haystack or the termination of a search. - /// - /// # Example - /// - /// ``` - /// use regex_automata::Input; - /// - /// let mut input = Input::new("foobar"); - /// assert_eq!(0..6, input.get_range()); - /// input.set_range(2..=4); - /// assert_eq!(2..5, input.get_range()); - /// ``` - #[inline] - pub fn set_range>(&mut self, range: R) { - use core::ops::Bound; - - // It's a little weird to convert ranges into spans, and then spans - // back into ranges when we actually slice the haystack. Because - // of that process, we always represent everything as a half-open - // internal. Therefore, handling things like m..=n is a little awkward. - let start = match range.start_bound() { - Bound::Included(&i) => i, - // Can this case ever happen? Range syntax doesn't support it... - Bound::Excluded(&i) => i.checked_add(1).unwrap(), - Bound::Unbounded => 0, - }; - let end = match range.end_bound() { - Bound::Included(&i) => i.checked_add(1).unwrap(), - Bound::Excluded(&i) => i, - Bound::Unbounded => self.haystack().len(), - }; - self.set_span(Span { start, end }); - } - - /// Set the starting offset for the span for this search configuration. - /// - /// This is a convenience routine for only mutating the start of a span - /// without having to set the entire span. - /// - /// # Panics - /// - /// This panics if the span resulting from the new start position does not - /// correspond to valid bounds in the haystack or the termination of a - /// search. - /// - /// # Example - /// - /// ``` - /// use regex_automata::Input; - /// - /// let mut input = Input::new("foobar"); - /// assert_eq!(0..6, input.get_range()); - /// input.set_start(5); - /// assert_eq!(5..6, input.get_range()); - /// ``` - #[inline] - pub fn set_start(&mut self, start: usize) { - self.set_span(Span { start, ..self.get_span() }); - } - - /// Set the ending offset for the span for this search configuration. - /// - /// This is a convenience routine for only mutating the end of a span - /// without having to set the entire span. - /// - /// # Panics - /// - /// This panics if the span resulting from the new end position does not - /// correspond to valid bounds in the haystack or the termination of a - /// search. - /// - /// # Example - /// - /// ``` - /// use regex_automata::Input; - /// - /// let mut input = Input::new("foobar"); - /// assert_eq!(0..6, input.get_range()); - /// input.set_end(5); - /// assert_eq!(0..5, input.get_range()); - /// ``` - #[inline] - pub fn set_end(&mut self, end: usize) { - self.set_span(Span { end, ..self.get_span() }); - } - - /// Set the anchor mode of a search. - /// - /// This is like [`Input::anchored`], except it mutates the search - /// configuration in place. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{Anchored, Input, PatternID}; - /// - /// let mut input = Input::new("foobar"); - /// assert_eq!(Anchored::No, input.get_anchored()); - /// - /// let pid = PatternID::must(5); - /// input.set_anchored(Anchored::Pattern(pid)); - /// assert_eq!(Anchored::Pattern(pid), input.get_anchored()); - /// ``` - #[inline] - pub fn set_anchored(&mut self, mode: Anchored) { - self.anchored = mode; - } - - /// Set whether the search should execute in "earliest" mode or not. - /// - /// This is like [`Input::earliest`], except it mutates the search - /// configuration in place. - /// - /// # Example - /// - /// ``` - /// use regex_automata::Input; - /// - /// let mut input = Input::new("foobar"); - /// assert!(!input.get_earliest()); - /// input.set_earliest(true); - /// assert!(input.get_earliest()); - /// ``` - #[inline] - pub fn set_earliest(&mut self, yes: bool) { - self.earliest = yes; - } - - /// Return a borrow of the underlying haystack as a slice of bytes. - /// - /// # Example - /// - /// ``` - /// use regex_automata::Input; - /// - /// let input = Input::new("foobar"); - /// assert_eq!(b"foobar", input.haystack()); - /// ``` - #[inline] - pub fn haystack(&self) -> &'h [u8] { - self.haystack - } - - /// Return the start position of this search. - /// - /// This is a convenience routine for `search.get_span().start()`. - /// - /// When [`Input::is_done`] is `false`, this is guaranteed to return - /// an offset that is less than or equal to [`Input::end`]. Otherwise, - /// the offset is one greater than [`Input::end`]. - /// - /// # Example - /// - /// ``` - /// use regex_automata::Input; - /// - /// let input = Input::new("foobar"); - /// assert_eq!(0, input.start()); - /// - /// let input = Input::new("foobar").span(2..4); - /// assert_eq!(2, input.start()); - /// ``` - #[inline] - pub fn start(&self) -> usize { - self.get_span().start - } - - /// Return the end position of this search. - /// - /// This is a convenience routine for `search.get_span().end()`. - /// - /// This is guaranteed to return an offset that is a valid exclusive end - /// bound for this input's haystack. - /// - /// # Example - /// - /// ``` - /// use regex_automata::Input; - /// - /// let input = Input::new("foobar"); - /// assert_eq!(6, input.end()); - /// - /// let input = Input::new("foobar").span(2..4); - /// assert_eq!(4, input.end()); - /// ``` - #[inline] - pub fn end(&self) -> usize { - self.get_span().end - } - - /// Return the span for this search configuration. - /// - /// If one was not explicitly set, then the span corresponds to the entire - /// range of the haystack. - /// - /// When [`Input::is_done`] is `false`, the span returned is guaranteed - /// to correspond to valid bounds for this input's haystack. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{Input, Span}; - /// - /// let input = Input::new("foobar"); - /// assert_eq!(Span { start: 0, end: 6 }, input.get_span()); - /// ``` - #[inline] - pub fn get_span(&self) -> Span { - self.span - } - - /// Return the span as a range for this search configuration. - /// - /// If one was not explicitly set, then the span corresponds to the entire - /// range of the haystack. - /// - /// When [`Input::is_done`] is `false`, the range returned is guaranteed - /// to correspond to valid bounds for this input's haystack. - /// - /// # Example - /// - /// ``` - /// use regex_automata::Input; - /// - /// let input = Input::new("foobar"); - /// assert_eq!(0..6, input.get_range()); - /// ``` - #[inline] - pub fn get_range(&self) -> Range { - self.get_span().range() - } - - /// Return the anchored mode for this search configuration. - /// - /// If no anchored mode was set, then it defaults to [`Anchored::No`]. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{Anchored, Input, PatternID}; - /// - /// let mut input = Input::new("foobar"); - /// assert_eq!(Anchored::No, input.get_anchored()); - /// - /// let pid = PatternID::must(5); - /// input.set_anchored(Anchored::Pattern(pid)); - /// assert_eq!(Anchored::Pattern(pid), input.get_anchored()); - /// ``` - #[inline] - pub fn get_anchored(&self) -> Anchored { - self.anchored - } - - /// Return whether this search should execute in "earliest" mode. - /// - /// # Example - /// - /// ``` - /// use regex_automata::Input; - /// - /// let input = Input::new("foobar"); - /// assert!(!input.get_earliest()); - /// ``` - #[inline] - pub fn get_earliest(&self) -> bool { - self.earliest - } - - /// Return true if and only if this search can never return any other - /// matches. - /// - /// This occurs when the start position of this search is greater than the - /// end position of the search. - /// - /// # Example - /// - /// ``` - /// use regex_automata::Input; - /// - /// let mut input = Input::new("foobar"); - /// assert!(!input.is_done()); - /// input.set_start(6); - /// assert!(!input.is_done()); - /// input.set_start(7); - /// assert!(input.is_done()); - /// ``` - #[inline] - pub fn is_done(&self) -> bool { - self.get_span().start > self.get_span().end - } - - /// Returns true if and only if the given offset in this search's haystack - /// falls on a valid UTF-8 encoded codepoint boundary. - /// - /// If the haystack is not valid UTF-8, then the behavior of this routine - /// is unspecified. - /// - /// # Example - /// - /// This shows where codepoint boundaries do and don't exist in valid - /// UTF-8. - /// - /// ``` - /// use regex_automata::Input; - /// - /// let input = Input::new("☃"); - /// assert!(input.is_char_boundary(0)); - /// assert!(!input.is_char_boundary(1)); - /// assert!(!input.is_char_boundary(2)); - /// assert!(input.is_char_boundary(3)); - /// assert!(!input.is_char_boundary(4)); - /// ``` - #[inline] - pub fn is_char_boundary(&self, offset: usize) -> bool { - utf8::is_boundary(self.haystack(), offset) - } -} - -impl<'h> core::fmt::Debug for Input<'h> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - use crate::util::escape::DebugHaystack; - - f.debug_struct("Input") - .field("haystack", &DebugHaystack(self.haystack())) - .field("span", &self.span) - .field("anchored", &self.anchored) - .field("earliest", &self.earliest) - .finish() - } -} - -impl<'h, H: ?Sized + AsRef<[u8]>> From<&'h H> for Input<'h> { - fn from(haystack: &'h H) -> Input<'h> { - Input::new(haystack) - } -} - -/// A representation of a span reported by a regex engine. -/// -/// A span corresponds to the starting and ending _byte offsets_ of a -/// contiguous region of bytes. The starting offset is inclusive while the -/// ending offset is exclusive. That is, a span is a half-open interval. -/// -/// A span is used to report the offsets of a match, but it is also used to -/// convey which region of a haystack should be searched via routines like -/// [`Input::span`]. -/// -/// This is basically equivalent to a `std::ops::Range`, except this -/// type implements `Copy` which makes it more ergonomic to use in the context -/// of this crate. Like a range, this implements `Index` for `[u8]` and `str`, -/// and `IndexMut` for `[u8]`. For convenience, this also impls `From`, -/// which means things like `Span::from(5..10)` work. -#[derive(Clone, Copy, Eq, Hash, PartialEq)] -pub struct Span { - /// The start offset of the span, inclusive. - pub start: usize, - /// The end offset of the span, exclusive. - pub end: usize, -} - -impl Span { - /// Returns this span as a range. - #[inline] - pub fn range(&self) -> Range { - Range::from(*self) - } - - /// Returns true when this span is empty. That is, when `start >= end`. - #[inline] - pub fn is_empty(&self) -> bool { - self.start >= self.end - } - - /// Returns the length of this span. - /// - /// This returns `0` in precisely the cases that `is_empty` returns `true`. - #[inline] - pub fn len(&self) -> usize { - self.end.saturating_sub(self.start) - } - - /// Returns true when the given offset is contained within this span. - /// - /// Note that an empty span contains no offsets and will always return - /// false. - #[inline] - pub fn contains(&self, offset: usize) -> bool { - !self.is_empty() && self.start <= offset && offset <= self.end - } - - /// Returns a new span with `offset` added to this span's `start` and `end` - /// values. - #[inline] - pub fn offset(&self, offset: usize) -> Span { - Span { start: self.start + offset, end: self.end + offset } - } -} - -impl core::fmt::Debug for Span { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "{}..{}", self.start, self.end) - } -} - -impl core::ops::Index for [u8] { - type Output = [u8]; - - #[inline] - fn index(&self, index: Span) -> &[u8] { - &self[index.range()] - } -} - -impl core::ops::IndexMut for [u8] { - #[inline] - fn index_mut(&mut self, index: Span) -> &mut [u8] { - &mut self[index.range()] - } -} - -impl core::ops::Index for str { - type Output = str; - - #[inline] - fn index(&self, index: Span) -> &str { - &self[index.range()] - } -} - -impl From> for Span { - #[inline] - fn from(range: Range) -> Span { - Span { start: range.start, end: range.end } - } -} - -impl From for Range { - #[inline] - fn from(span: Span) -> Range { - Range { start: span.start, end: span.end } - } -} - -impl PartialEq> for Span { - #[inline] - fn eq(&self, range: &Range) -> bool { - self.start == range.start && self.end == range.end - } -} - -impl PartialEq for Range { - #[inline] - fn eq(&self, span: &Span) -> bool { - self.start == span.start && self.end == span.end - } -} - -/// A representation of "half" of a match reported by a DFA. -/// -/// This is called a "half" match because it only includes the end location (or -/// start location for a reverse search) of a match. This corresponds to the -/// information that a single DFA scan can report. Getting the other half of -/// the match requires a second scan with a reversed DFA. -/// -/// A half match also includes the pattern that matched. The pattern is -/// identified by an ID, which corresponds to its position (starting from `0`) -/// relative to other patterns used to construct the corresponding DFA. If only -/// a single pattern is provided to the DFA, then all matches are guaranteed to -/// have a pattern ID of `0`. -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] -pub struct HalfMatch { - /// The pattern ID. - pattern: PatternID, - /// The offset of the match. - /// - /// For forward searches, the offset is exclusive. For reverse searches, - /// the offset is inclusive. - offset: usize, -} - -impl HalfMatch { - /// Create a new half match from a pattern ID and a byte offset. - #[inline] - pub fn new(pattern: PatternID, offset: usize) -> HalfMatch { - HalfMatch { pattern, offset } - } - - /// Create a new half match from a pattern ID and a byte offset. - /// - /// This is like [`HalfMatch::new`], but accepts a `usize` instead of a - /// [`PatternID`]. This panics if the given `usize` is not representable - /// as a `PatternID`. - #[inline] - pub fn must(pattern: usize, offset: usize) -> HalfMatch { - HalfMatch::new(PatternID::new(pattern).unwrap(), offset) - } - - /// Returns the ID of the pattern that matched. - /// - /// The ID of a pattern is derived from the position in which it was - /// originally inserted into the corresponding DFA. The first pattern has - /// identifier `0`, and each subsequent pattern is `1`, `2` and so on. - #[inline] - pub fn pattern(&self) -> PatternID { - self.pattern - } - - /// The position of the match. - /// - /// If this match was produced by a forward search, then the offset is - /// exclusive. If this match was produced by a reverse search, then the - /// offset is inclusive. - #[inline] - pub fn offset(&self) -> usize { - self.offset - } -} - -/// A representation of a match reported by a regex engine. -/// -/// A match has two essential pieces of information: the [`PatternID`] that -/// matches, and the [`Span`] of the match in a haystack. -/// -/// The pattern is identified by an ID, which corresponds to its position -/// (starting from `0`) relative to other patterns used to construct the -/// corresponding regex engine. If only a single pattern is provided, then all -/// matches are guaranteed to have a pattern ID of `0`. -/// -/// Every match reported by a regex engine guarantees that its span has its -/// start offset as less than or equal to its end offset. -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] -pub struct Match { - /// The pattern ID. - pattern: PatternID, - /// The underlying match span. - span: Span, -} - -impl Match { - /// Create a new match from a pattern ID and a span. - /// - /// This constructor is generic over how a span is provided. While - /// a [`Span`] may be given directly, one may also provide a - /// `std::ops::Range`. - /// - /// # Panics - /// - /// This panics if `end < start`. - /// - /// # Example - /// - /// This shows how to create a match for the first pattern in a regex - /// object using convenient range syntax. - /// - /// ``` - /// use regex_automata::{Match, PatternID}; - /// - /// let m = Match::new(PatternID::ZERO, 5..10); - /// assert_eq!(0, m.pattern().as_usize()); - /// assert_eq!(5, m.start()); - /// assert_eq!(10, m.end()); - /// ``` - #[inline] - pub fn new>(pattern: PatternID, span: S) -> Match { - let span: Span = span.into(); - assert!(span.start <= span.end, "invalid match span"); - Match { pattern, span } - } - - /// Create a new match from a pattern ID and a byte offset span. - /// - /// This constructor is generic over how a span is provided. While - /// a [`Span`] may be given directly, one may also provide a - /// `std::ops::Range`. - /// - /// This is like [`Match::new`], but accepts a `usize` instead of a - /// [`PatternID`]. This panics if the given `usize` is not representable - /// as a `PatternID`. - /// - /// # Panics - /// - /// This panics if `end < start` or if `pattern > PatternID::MAX`. - /// - /// # Example - /// - /// This shows how to create a match for the third pattern in a regex - /// object using convenient range syntax. - /// - /// ``` - /// use regex_automata::Match; - /// - /// let m = Match::must(3, 5..10); - /// assert_eq!(3, m.pattern().as_usize()); - /// assert_eq!(5, m.start()); - /// assert_eq!(10, m.end()); - /// ``` - #[inline] - pub fn must>(pattern: usize, span: S) -> Match { - Match::new(PatternID::must(pattern), span) - } - - /// Returns the ID of the pattern that matched. - /// - /// The ID of a pattern is derived from the position in which it was - /// originally inserted into the corresponding regex engine. The first - /// pattern has identifier `0`, and each subsequent pattern is `1`, `2` and - /// so on. - #[inline] - pub fn pattern(&self) -> PatternID { - self.pattern - } - - /// The starting position of the match. - /// - /// This is a convenience routine for `Match::span().start`. - #[inline] - pub fn start(&self) -> usize { - self.span().start - } - - /// The ending position of the match. - /// - /// This is a convenience routine for `Match::span().end`. - #[inline] - pub fn end(&self) -> usize { - self.span().end - } - - /// Returns the match span as a range. - /// - /// This is a convenience routine for `Match::span().range()`. - #[inline] - pub fn range(&self) -> core::ops::Range { - self.span().range() - } - - /// Returns the span for this match. - #[inline] - pub fn span(&self) -> Span { - self.span - } - - /// Returns true when the span in this match is empty. - /// - /// An empty match can only be returned when the regex itself can match - /// the empty string. - #[inline] - pub fn is_empty(&self) -> bool { - self.span().is_empty() - } - - /// Returns the length of this match. - /// - /// This returns `0` in precisely the cases that `is_empty` returns `true`. - #[inline] - pub fn len(&self) -> usize { - self.span().len() - } -} - -/// A set of `PatternID`s. -/// -/// A set of pattern identifiers is useful for recording which patterns have -/// matched a particular haystack. A pattern set _only_ includes pattern -/// identifiers. It does not include offset information. -/// -/// # Example -/// -/// This shows basic usage of a set. -/// -/// ``` -/// use regex_automata::{PatternID, PatternSet}; -/// -/// let pid1 = PatternID::must(5); -/// let pid2 = PatternID::must(8); -/// // Create a new empty set. -/// let mut set = PatternSet::new(10); -/// // Insert pattern IDs. -/// set.insert(pid1); -/// set.insert(pid2); -/// // Test membership. -/// assert!(set.contains(pid1)); -/// assert!(set.contains(pid2)); -/// // Get all members. -/// assert_eq!( -/// vec![5, 8], -/// set.iter().map(|p| p.as_usize()).collect::>(), -/// ); -/// // Clear the set. -/// set.clear(); -/// // Test that it is indeed empty. -/// assert!(set.is_empty()); -/// ``` -#[cfg(feature = "alloc")] -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct PatternSet { - /// The number of patterns set to 'true' in this set. - len: usize, - /// A map from PatternID to boolean of whether a pattern matches or not. - /// - /// This should probably be a bitset, but it's probably unlikely to matter - /// much in practice. - /// - /// The main downside of this representation (and similarly for a bitset) - /// is that iteration scales with the capacity of the set instead of - /// the length of the set. This doesn't seem likely to be a problem in - /// practice. - /// - /// Another alternative is to just use a 'SparseSet' for this. It does use - /// more memory (quite a bit more), but that seems fine I think compared - /// to the memory being used by the regex engine. The real hiccup with - /// it is that it yields pattern IDs in the order they were inserted. - /// Which is actually kind of nice, but at the time of writing, pattern - /// IDs are yielded in ascending order in the regex crate RegexSet API. - /// If we did change to 'SparseSet', we could provide an additional - /// 'iter_match_order' iterator, but keep the ascending order one for - /// compatibility. - which: alloc::boxed::Box<[bool]>, -} - -#[cfg(feature = "alloc")] -impl PatternSet { - /// Create a new set of pattern identifiers with the given capacity. - /// - /// The given capacity typically corresponds to (at least) the number of - /// patterns in a compiled regex object. - /// - /// # Panics - /// - /// This panics if the given capacity exceeds [`PatternID::LIMIT`]. This is - /// impossible if you use the `pattern_len()` method as defined on any of - /// the regex engines in this crate. Namely, a regex will fail to build by - /// returning an error if the number of patterns given to it exceeds the - /// limit. Therefore, the number of patterns in a valid regex is always - /// a correct capacity to provide here. - pub fn new(capacity: usize) -> PatternSet { - assert!( - capacity <= PatternID::LIMIT, - "pattern set capacity exceeds limit of {}", - PatternID::LIMIT, - ); - PatternSet { - len: 0, - which: alloc::vec![false; capacity].into_boxed_slice(), - } - } - - /// Clear this set such that it contains no pattern IDs. - pub fn clear(&mut self) { - self.len = 0; - for matched in self.which.iter_mut() { - *matched = false; - } - } - - /// Return true if and only if the given pattern identifier is in this set. - pub fn contains(&self, pid: PatternID) -> bool { - pid.as_usize() < self.capacity() && self.which[pid] - } - - /// Insert the given pattern identifier into this set and return `true` if - /// the given pattern ID was not previously in this set. - /// - /// If the pattern identifier is already in this set, then this is a no-op. - /// - /// Use [`PatternSet::try_insert`] for a fallible version of this routine. - /// - /// # Panics - /// - /// This panics if this pattern set has insufficient capacity to - /// store the given pattern ID. - pub fn insert(&mut self, pid: PatternID) -> bool { - self.try_insert(pid) - .expect("PatternSet should have sufficient capacity") - } - - /// Insert the given pattern identifier into this set and return `true` if - /// the given pattern ID was not previously in this set. - /// - /// If the pattern identifier is already in this set, then this is a no-op. - /// - /// # Errors - /// - /// This returns an error if this pattern set has insufficient capacity to - /// store the given pattern ID. - pub fn try_insert( - &mut self, - pid: PatternID, - ) -> Result { - if pid.as_usize() >= self.capacity() { - return Err(PatternSetInsertError { - attempted: pid, - capacity: self.capacity(), - }); - } - if self.which[pid] { - return Ok(false); - } - self.len += 1; - self.which[pid] = true; - Ok(true) - } - - /* - // This is currently commented out because it is unused and it is unclear - // whether it's useful or not. What's the harm in having it? When, if - // we ever wanted to change our representation to a 'SparseSet', then - // supporting this method would be a bit tricky. So in order to keep some - // API evolution flexibility, we leave it out for now. - - /// Remove the given pattern identifier from this set. - /// - /// If the pattern identifier was not previously in this set, then this - /// does not change the set and returns `false`. - /// - /// # Panics - /// - /// This panics if `pid` exceeds the capacity of this set. - pub fn remove(&mut self, pid: PatternID) -> bool { - if !self.which[pid] { - return false; - } - self.len -= 1; - self.which[pid] = false; - true - } - */ - - /// Return true if and only if this set has no pattern identifiers in it. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Return true if and only if this set has the maximum number of pattern - /// identifiers in the set. This occurs precisely when `PatternSet::len() - /// == PatternSet::capacity()`. - /// - /// This particular property is useful to test because it may allow one to - /// stop a search earlier than you might otherwise. Namely, if a search is - /// only reporting which patterns match a haystack and if you know all of - /// the patterns match at a given point, then there's no new information - /// that can be learned by continuing the search. (Because a pattern set - /// does not keep track of offset information.) - pub fn is_full(&self) -> bool { - self.len() == self.capacity() - } - - /// Returns the total number of pattern identifiers in this set. - pub fn len(&self) -> usize { - self.len - } - - /// Returns the total number of pattern identifiers that may be stored - /// in this set. - /// - /// This is guaranteed to be less than or equal to [`PatternID::LIMIT`]. - /// - /// Typically, the capacity of a pattern set matches the number of patterns - /// in a regex object with which you are searching. - pub fn capacity(&self) -> usize { - self.which.len() - } - - /// Returns an iterator over all pattern identifiers in this set. - /// - /// The iterator yields pattern identifiers in ascending order, starting - /// at zero. - pub fn iter(&self) -> PatternSetIter<'_> { - PatternSetIter { it: self.which.iter().enumerate() } - } -} - -/// An error that occurs when a `PatternID` failed to insert into a -/// `PatternSet`. -/// -/// An insert fails when the given `PatternID` exceeds the configured capacity -/// of the `PatternSet`. -/// -/// This error is created by the [`PatternSet::try_insert`] routine. -#[cfg(feature = "alloc")] -#[derive(Clone, Debug)] -pub struct PatternSetInsertError { - attempted: PatternID, - capacity: usize, -} - -#[cfg(feature = "std")] -impl std::error::Error for PatternSetInsertError {} - -#[cfg(feature = "alloc")] -impl core::fmt::Display for PatternSetInsertError { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!( - f, - "failed to insert pattern ID {} into pattern set \ - with insufficient capacity of {}", - self.attempted.as_usize(), - self.capacity, - ) - } -} - -/// An iterator over all pattern identifiers in a [`PatternSet`]. -/// -/// The lifetime parameter `'a` refers to the lifetime of the pattern set being -/// iterated over. -/// -/// This iterator is created by the [`PatternSet::iter`] method. -#[cfg(feature = "alloc")] -#[derive(Clone, Debug)] -pub struct PatternSetIter<'a> { - it: core::iter::Enumerate>, -} - -#[cfg(feature = "alloc")] -impl<'a> Iterator for PatternSetIter<'a> { - type Item = PatternID; - - fn next(&mut self) -> Option { - while let Some((index, &yes)) = self.it.next() { - if yes { - // Only valid 'PatternID' values can be inserted into the set - // and construction of the set panics if the capacity would - // permit storing invalid pattern IDs. Thus, 'yes' is only true - // precisely when 'index' corresponds to a valid 'PatternID'. - return Some(PatternID::new_unchecked(index)); - } - } - None - } - - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -#[cfg(feature = "alloc")] -impl<'a> DoubleEndedIterator for PatternSetIter<'a> { - fn next_back(&mut self) -> Option { - while let Some((index, &yes)) = self.it.next_back() { - if yes { - // Only valid 'PatternID' values can be inserted into the set - // and construction of the set panics if the capacity would - // permit storing invalid pattern IDs. Thus, 'yes' is only true - // precisely when 'index' corresponds to a valid 'PatternID'. - return Some(PatternID::new_unchecked(index)); - } - } - None - } -} - -/// The type of anchored search to perform. -/// -/// This is *almost* a boolean option. That is, you can either do an unanchored -/// search for any pattern in a regex, or you can do an anchored search for any -/// pattern in a regex. -/// -/// A third option exists that, assuming the regex engine supports it, permits -/// you to do an anchored search for a specific pattern. -/// -/// Note that there is no way to run an unanchored search for a specific -/// pattern. If you need that, you'll need to build separate regexes for each -/// pattern. -/// -/// # Errors -/// -/// If a regex engine does not support the anchored mode selected, then the -/// regex engine will return an error. While any non-trivial regex engine -/// should support at least one of the available anchored modes, there is no -/// singular mode that is guaranteed to be universally supported. Some regex -/// engines might only support unanchored searches (DFAs compiled without -/// anchored starting states) and some regex engines might only support -/// anchored searches (like the one-pass DFA). -/// -/// The specific error returned is a [`MatchError`] with a -/// [`MatchErrorKind::UnsupportedAnchored`] kind. The kind includes the -/// `Anchored` value given that is unsupported. -/// -/// Note that regex engines should report "no match" if, for example, an -/// `Anchored::Pattern` is provided with an invalid pattern ID _but_ where -/// anchored searches for a specific pattern are supported. This is smooths out -/// behavior such that it's possible to guarantee that an error never occurs -/// based on how the regex engine is configured. All regex engines in this -/// crate report "no match" when searching for an invalid pattern ID, but where -/// searching for a valid pattern ID is otherwise supported. -/// -/// # Example -/// -/// This example shows how to use the various `Anchored` modes to run a -/// search. We use the [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) -/// because it supports all modes unconditionally. Some regex engines, like -/// the [`onepass::DFA`](crate::dfa::onepass::DFA) cannot support unanchored -/// searches. -/// -/// ``` -/// # if cfg!(miri) { return Ok(()); } // miri takes too long -/// use regex_automata::{ -/// nfa::thompson::pikevm::PikeVM, -/// Anchored, Input, Match, PatternID, -/// }; -/// -/// let re = PikeVM::new_many(&[ -/// r"Mrs. \w+", -/// r"Miss \w+", -/// r"Mr. \w+", -/// r"Ms. \w+", -/// ])?; -/// let mut cache = re.create_cache(); -/// let hay = "Hello Mr. Springsteen!"; -/// -/// // The default is to do an unanchored search. -/// assert_eq!(Some(Match::must(2, 6..21)), re.find(&mut cache, hay)); -/// // Explicitly ask for an unanchored search. Same as above. -/// let input = Input::new(hay).anchored(Anchored::No); -/// assert_eq!(Some(Match::must(2, 6..21)), re.find(&mut cache, hay)); -/// -/// // Now try an anchored search. Since the match doesn't start at the -/// // beginning of the haystack, no match is found! -/// let input = Input::new(hay).anchored(Anchored::Yes); -/// assert_eq!(None, re.find(&mut cache, input)); -/// -/// // We can try an anchored search again, but move the location of where -/// // we start the search. Note that the offsets reported are still in -/// // terms of the overall haystack and not relative to where we started -/// // the search. -/// let input = Input::new(hay).anchored(Anchored::Yes).range(6..); -/// assert_eq!(Some(Match::must(2, 6..21)), re.find(&mut cache, input)); -/// -/// // Now try an anchored search for a specific pattern. We specifically -/// // choose a pattern that we know doesn't match to prove that the search -/// // only looks for the pattern we provide. -/// let input = Input::new(hay) -/// .anchored(Anchored::Pattern(PatternID::must(1))) -/// .range(6..); -/// assert_eq!(None, re.find(&mut cache, input)); -/// -/// // But if we switch it to the pattern that we know matches, then we find -/// // the match. -/// let input = Input::new(hay) -/// .anchored(Anchored::Pattern(PatternID::must(2))) -/// .range(6..); -/// assert_eq!(Some(Match::must(2, 6..21)), re.find(&mut cache, input)); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum Anchored { - /// Run an unanchored search. This means a match may occur anywhere at or - /// after the start position of the search. - /// - /// This search can return a match for any pattern in the regex. - No, - /// Run an anchored search. This means that a match must begin at the - /// start position of the search. - /// - /// This search can return a match for any pattern in the regex. - Yes, - /// Run an anchored search for a specific pattern. This means that a match - /// must be for the given pattern and must begin at the start position of - /// the search. - Pattern(PatternID), -} - -impl Anchored { - /// Returns true if and only if this anchor mode corresponds to any kind of - /// anchored search. - /// - /// # Example - /// - /// This examples shows that both `Anchored::Yes` and `Anchored::Pattern` - /// are considered anchored searches. - /// - /// ``` - /// use regex_automata::{Anchored, PatternID}; - /// - /// assert!(!Anchored::No.is_anchored()); - /// assert!(Anchored::Yes.is_anchored()); - /// assert!(Anchored::Pattern(PatternID::ZERO).is_anchored()); - /// ``` - #[inline] - pub fn is_anchored(&self) -> bool { - matches!(*self, Anchored::Yes | Anchored::Pattern(_)) - } - - /// Returns the pattern ID associated with this configuration if it is an - /// anchored search for a specific pattern. Otherwise `None` is returned. - /// - /// # Example - /// - /// ``` - /// use regex_automata::{Anchored, PatternID}; - /// - /// assert_eq!(None, Anchored::No.pattern()); - /// assert_eq!(None, Anchored::Yes.pattern()); - /// - /// let pid = PatternID::must(5); - /// assert_eq!(Some(pid), Anchored::Pattern(pid).pattern()); - /// ``` - #[inline] - pub fn pattern(&self) -> Option { - match *self { - Anchored::Pattern(pid) => Some(pid), - _ => None, - } - } -} - -/// The kind of match semantics to use for a regex pattern. -/// -/// The default match kind is `LeftmostFirst`, and this corresponds to the -/// match semantics used by most backtracking engines, such as Perl. -/// -/// # Leftmost first or "preference order" match semantics -/// -/// Leftmost-first semantics determine which match to report when there are -/// multiple paths through a regex that match at the same position. The tie is -/// essentially broken by how a backtracker would behave. For example, consider -/// running the regex `foofoofoo|foofoo|foo` on the haystack `foofoo`. In this -/// case, both the `foofoo` and `foo` branches match at position `0`. So should -/// the end of the match be `3` or `6`? -/// -/// A backtracker will conceptually work by trying `foofoofoo` and failing. -/// Then it will try `foofoo`, find the match and stop there. Thus, the -/// leftmost-first match position is `6`. This is called "leftmost-first" or -/// "preference order" because the order of the branches as written in the -/// regex pattern is what determines how to break the tie. -/// -/// (Note that leftmost-longest match semantics, which break ties by always -/// taking the longest matching string, are not currently supported by this -/// crate. These match semantics tend to be found in POSIX regex engines.) -/// -/// This example shows how leftmost-first semantics work, and how it even -/// applies to multi-pattern regexes: -/// -/// ``` -/// use regex_automata::{ -/// nfa::thompson::pikevm::PikeVM, -/// Match, -/// }; -/// -/// let re = PikeVM::new_many(&[ -/// r"foofoofoo", -/// r"foofoo", -/// r"foo", -/// ])?; -/// let mut cache = re.create_cache(); -/// let got: Vec = re.find_iter(&mut cache, "foofoo").collect(); -/// let expected = vec![Match::must(1, 0..6)]; -/// assert_eq!(expected, got); -/// -/// # Ok::<(), Box>(()) -/// ``` -/// -/// # All matches -/// -/// The `All` match semantics report any and all matches, and generally will -/// attempt to match as much as possible. It doesn't respect any sort of match -/// priority at all, so things like non-greedy matching don't work in this -/// mode. -/// -/// The fact that non-greedy matching doesn't work generally makes most forms -/// of unanchored non-overlapping searches have unintuitive behavior. Namely, -/// unanchored searches behave as if there is a `(?s-u:.)*?` prefix at the -/// beginning of the pattern, which is specifically non-greedy. Since it will -/// be treated as greedy in `All` match semantics, this generally means that -/// it will first attempt to consume all of the haystack and is likely to wind -/// up skipping matches. -/// -/// Generally speaking, `All` should only be used in two circumstances: -/// -/// * When running an anchored search and there is a desire to match as much as -/// possible. For example, when building a reverse regex matcher to find the -/// start of a match after finding the end. In this case, the reverse search -/// is anchored to the end of the match found by the forward search. -/// * When running overlapping searches. Since `All` encodes all possible -/// matches, this is generally what you want for an overlapping search. If you -/// try to use leftmost-first in an overlapping search, it is likely to produce -/// counter-intuitive results since leftmost-first specifically excludes some -/// matches from its underlying finite state machine. -/// -/// This example demonstrates the counter-intuitive behavior of `All` semantics -/// when using a standard leftmost unanchored search: -/// -/// ``` -/// use regex_automata::{ -/// nfa::thompson::pikevm::PikeVM, -/// Match, MatchKind, -/// }; -/// -/// let re = PikeVM::builder() -/// .configure(PikeVM::config().match_kind(MatchKind::All)) -/// .build("foo")?; -/// let hay = "first foo second foo wat"; -/// let mut cache = re.create_cache(); -/// let got: Vec = re.find_iter(&mut cache, hay).collect(); -/// // Notice that it completely skips the first 'foo'! -/// let expected = vec![Match::must(0, 17..20)]; -/// assert_eq!(expected, got); -/// -/// # Ok::<(), Box>(()) -/// ``` -/// -/// This second example shows how `All` semantics are useful for an overlapping -/// search. Note that we use lower level lazy DFA APIs here since the NFA -/// engines only currently support a very limited form of overlapping search. -/// -/// ``` -/// use regex_automata::{ -/// hybrid::dfa::{DFA, OverlappingState}, -/// HalfMatch, Input, MatchKind, -/// }; -/// -/// let re = DFA::builder() -/// // If we didn't set 'All' semantics here, then the regex would only -/// // match 'foo' at offset 3 and nothing else. Why? Because the state -/// // machine implements preference order and knows that the 'foofoo' and -/// // 'foofoofoo' branches can never match since 'foo' will always match -/// // when they match and take priority. -/// .configure(DFA::config().match_kind(MatchKind::All)) -/// .build(r"foo|foofoo|foofoofoo")?; -/// let mut cache = re.create_cache(); -/// let mut state = OverlappingState::start(); -/// let input = Input::new("foofoofoo"); -/// let mut got = vec![]; -/// loop { -/// re.try_search_overlapping_fwd(&mut cache, &input, &mut state)?; -/// let m = match state.get_match() { -/// None => break, -/// Some(m) => m, -/// }; -/// got.push(m); -/// } -/// let expected = vec![ -/// HalfMatch::must(0, 3), -/// HalfMatch::must(0, 6), -/// HalfMatch::must(0, 9), -/// ]; -/// assert_eq!(expected, got); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[non_exhaustive] -#[derive(Clone, Copy, Default, Debug, Eq, PartialEq)] -pub enum MatchKind { - /// Report all possible matches. - All, - /// Report only the leftmost matches. When multiple leftmost matches exist, - /// report the match corresponding to the part of the regex that appears - /// first in the syntax. - #[default] - LeftmostFirst, - // There is prior art in RE2 that shows that we should be able to add - // LeftmostLongest too. The tricky part of it is supporting ungreedy - // repetitions. Instead of treating all NFA states as having equivalent - // priority (as in 'All') or treating all NFA states as having distinct - // priority based on order (as in 'LeftmostFirst'), we instead group NFA - // states into sets, and treat members of each set as having equivalent - // priority, but having greater priority than all following members - // of different sets. - // - // However, it's not clear whether it's really worth adding this. After - // all, leftmost-longest can be emulated when using literals by using - // leftmost-first and sorting the literals by length in descending order. - // However, this won't work for arbitrary regexes. e.g., `\w|\w\w` will - // always match `a` in `ab` when using leftmost-first, but leftmost-longest - // would match `ab`. -} - -impl MatchKind { - #[cfg(feature = "alloc")] - pub(crate) fn continue_past_first_match(&self) -> bool { - *self == MatchKind::All - } -} - -/// An error indicating that a search stopped before reporting whether a -/// match exists or not. -/// -/// To be very clear, this error type implies that one cannot assume that no -/// matches occur, since the search stopped before completing. That is, if -/// you're looking for information about where a search determined that no -/// match can occur, then this error type does *not* give you that. (Indeed, at -/// the time of writing, if you need such a thing, you have to write your own -/// search routine.) -/// -/// Normally, when one searches for something, the response is either an -/// affirmative "it was found at this location" or a negative "not found at -/// all." However, in some cases, a regex engine can be configured to stop its -/// search before concluding whether a match exists or not. When this happens, -/// it may be important for the caller to know why the regex engine gave up and -/// where in the input it gave up at. This error type exposes the 'why' and the -/// 'where.' -/// -/// For example, the DFAs provided by this library generally cannot correctly -/// implement Unicode word boundaries. Instead, they provide an option to -/// eagerly support them on ASCII text (since Unicode word boundaries are -/// equivalent to ASCII word boundaries when searching ASCII text), but will -/// "give up" if a non-ASCII byte is seen. In such cases, one is usually -/// required to either report the failure to the caller (unergonomic) or -/// otherwise fall back to some other regex engine (ergonomic, but potentially -/// costly). -/// -/// More generally, some regex engines offer the ability for callers to specify -/// certain bytes that will trigger the regex engine to automatically quit if -/// they are seen. -/// -/// Still yet, there may be other reasons for a failed match. For example, -/// the hybrid DFA provided by this crate can be configured to give up if it -/// believes that it is not efficient. This in turn permits callers to choose a -/// different regex engine. -/// -/// (Note that DFAs are configured by default to never quit or give up in this -/// fashion. For example, by default, a DFA will fail to build if the regex -/// pattern contains a Unicode word boundary. One needs to opt into the "quit" -/// behavior via options, like -/// [`hybrid::dfa::Config::unicode_word_boundary`](crate::hybrid::dfa::Config::unicode_word_boundary).) -/// -/// There are a couple other ways a search -/// can fail. For example, when using the -/// [`BoundedBacktracker`](crate::nfa::thompson::backtrack::BoundedBacktracker) -/// with a haystack that is too long, or trying to run an unanchored search -/// with a [one-pass DFA](crate::dfa::onepass). -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct MatchError( - #[cfg(feature = "alloc")] alloc::boxed::Box, - #[cfg(not(feature = "alloc"))] MatchErrorKind, -); - -impl MatchError { - /// Create a new error value with the given kind. - /// - /// This is a more verbose version of the kind-specific constructors, - /// e.g., `MatchError::quit`. - pub fn new(kind: MatchErrorKind) -> MatchError { - #[cfg(feature = "alloc")] - { - MatchError(alloc::boxed::Box::new(kind)) - } - #[cfg(not(feature = "alloc"))] - { - MatchError(kind) - } - } - - /// Returns a reference to the underlying error kind. - pub fn kind(&self) -> &MatchErrorKind { - &self.0 - } - - /// Create a new "quit" error. The given `byte` corresponds to the value - /// that tripped a search's quit condition, and `offset` corresponds to the - /// location in the haystack at which the search quit. - /// - /// This is the same as calling `MatchError::new` with a - /// [`MatchErrorKind::Quit`] kind. - pub fn quit(byte: u8, offset: usize) -> MatchError { - MatchError::new(MatchErrorKind::Quit { byte, offset }) - } - - /// Create a new "gave up" error. The given `offset` corresponds to the - /// location in the haystack at which the search gave up. - /// - /// This is the same as calling `MatchError::new` with a - /// [`MatchErrorKind::GaveUp`] kind. - pub fn gave_up(offset: usize) -> MatchError { - MatchError::new(MatchErrorKind::GaveUp { offset }) - } - - /// Create a new "haystack too long" error. The given `len` corresponds to - /// the length of the haystack that was problematic. - /// - /// This is the same as calling `MatchError::new` with a - /// [`MatchErrorKind::HaystackTooLong`] kind. - pub fn haystack_too_long(len: usize) -> MatchError { - MatchError::new(MatchErrorKind::HaystackTooLong { len }) - } - - /// Create a new "unsupported anchored" error. This occurs when the caller - /// requests a search with an anchor mode that is not supported by the - /// regex engine. - /// - /// This is the same as calling `MatchError::new` with a - /// [`MatchErrorKind::UnsupportedAnchored`] kind. - pub fn unsupported_anchored(mode: Anchored) -> MatchError { - MatchError::new(MatchErrorKind::UnsupportedAnchored { mode }) - } -} - -/// The underlying kind of a [`MatchError`]. -/// -/// This is a **non-exhaustive** enum. That means new variants may be added in -/// a semver-compatible release. -#[non_exhaustive] -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum MatchErrorKind { - /// The search saw a "quit" byte at which it was instructed to stop - /// searching. - Quit { - /// The "quit" byte that was observed that caused the search to stop. - byte: u8, - /// The offset at which the quit byte was observed. - offset: usize, - }, - /// The search, based on heuristics, determined that it would be better - /// to stop, typically to provide the caller an opportunity to use an - /// alternative regex engine. - /// - /// Currently, the only way for this to occur is via the lazy DFA and - /// only when it is configured to do so (it will not return this error by - /// default). - GaveUp { - /// The offset at which the search stopped. This corresponds to the - /// position immediately following the last byte scanned. - offset: usize, - }, - /// This error occurs if the haystack given to the regex engine was too - /// long to be searched. This occurs, for example, with regex engines - /// like the bounded backtracker that have a configurable fixed amount of - /// capacity that is tied to the length of the haystack. Anything beyond - /// that configured limit will result in an error at search time. - HaystackTooLong { - /// The length of the haystack that exceeded the limit. - len: usize, - }, - /// An error indicating that a particular type of anchored search was - /// requested, but that the regex engine does not support it. - /// - /// Note that this error should not be returned by a regex engine simply - /// because the pattern ID is invalid (i.e., equal to or exceeds the number - /// of patterns in the regex). In that case, the regex engine should report - /// a non-match. - UnsupportedAnchored { - /// The anchored mode given that is unsupported. - mode: Anchored, - }, -} - -#[cfg(feature = "std")] -impl std::error::Error for MatchError {} - -impl core::fmt::Display for MatchError { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - match *self.kind() { - MatchErrorKind::Quit { byte, offset } => write!( - f, - "quit search after observing byte {:?} at offset {}", - DebugByte(byte), - offset, - ), - MatchErrorKind::GaveUp { offset } => { - write!(f, "gave up searching at offset {offset}") - } - MatchErrorKind::HaystackTooLong { len } => { - write!(f, "haystack of length {len} is too long") - } - MatchErrorKind::UnsupportedAnchored { mode: Anchored::Yes } => { - write!(f, "anchored searches are not supported or enabled") - } - MatchErrorKind::UnsupportedAnchored { mode: Anchored::No } => { - write!(f, "unanchored searches are not supported or enabled") - } - MatchErrorKind::UnsupportedAnchored { - mode: Anchored::Pattern(pid), - } => { - write!( - f, - "anchored searches for a specific pattern ({}) are \ - not supported or enabled", - pid.as_usize(), - ) - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - // We test that our 'MatchError' type is the size we expect. This isn't an - // API guarantee, but if the size increases, we really want to make sure we - // decide to do that intentionally. So this should be a speed bump. And in - // general, we should not increase the size without a very good reason. - // - // Why? Because low level search APIs return Result<.., MatchError>. When - // MatchError gets bigger, so to does the Result type. - // - // Now, when 'alloc' is enabled, we do box the error, which de-emphasizes - // the importance of keeping a small error type. But without 'alloc', we - // still want things to be small. - #[test] - fn match_error_size() { - let expected_size = if cfg!(feature = "alloc") { - core::mem::size_of::() - } else { - 2 * core::mem::size_of::() - }; - assert_eq!(expected_size, core::mem::size_of::()); - } - - // Same as above, but for the underlying match error kind. - #[cfg(target_pointer_width = "64")] - #[test] - fn match_error_kind_size() { - let expected_size = 2 * core::mem::size_of::(); - assert_eq!(expected_size, core::mem::size_of::()); - } - - #[cfg(target_pointer_width = "32")] - #[test] - fn match_error_kind_size() { - let expected_size = 3 * core::mem::size_of::(); - assert_eq!(expected_size, core::mem::size_of::()); - } - - #[test] - fn incorrect_asref_guard() { - struct Bad(std::cell::Cell); - - impl AsRef<[u8]> for Bad { - fn as_ref(&self) -> &[u8] { - if self.0.replace(false) { - &[] - } else { - &[0; 1000] - } - } - } - - let bad = Bad(std::cell::Cell::new(true)); - let input = Input::new(&bad); - assert!(input.end() <= input.haystack().len()); - } -} diff --git a/vendor/regex-automata/src/util/sparse_set.rs b/vendor/regex-automata/src/util/sparse_set.rs deleted file mode 100644 index e30d5b9b7f3fe6..00000000000000 --- a/vendor/regex-automata/src/util/sparse_set.rs +++ /dev/null @@ -1,239 +0,0 @@ -/*! -This module defines a sparse set data structure. Its most interesting -properties are: - -* They preserve insertion order. -* Set membership testing is done in constant time. -* Set insertion is done in constant time. -* Clearing the set is done in constant time. - -The cost for doing this is that the capacity of the set needs to be known up -front, and the elements in the set are limited to state identifiers. - -These sets are principally used when traversing an NFA state graph. This -happens at search time, for example, in the PikeVM. It also happens during DFA -determinization. -*/ - -use alloc::{vec, vec::Vec}; - -use crate::util::primitives::StateID; - -/// A pair of sparse sets. -/// -/// This is useful when one needs to compute NFA epsilon closures from a -/// previous set of states derived from an epsilon closure. One set can be the -/// starting states where as the other set can be the destination states after -/// following the transitions for a particular byte of input. -/// -/// There is no significance to 'set1' or 'set2'. They are both sparse sets of -/// the same size. -/// -/// The members of this struct are exposed so that callers may borrow 'set1' -/// and 'set2' individually without being force to borrow both at the same -/// time. -#[derive(Clone, Debug)] -pub(crate) struct SparseSets { - pub(crate) set1: SparseSet, - pub(crate) set2: SparseSet, -} - -impl SparseSets { - /// Create a new pair of sparse sets where each set has the given capacity. - /// - /// This panics if the capacity given is bigger than `StateID::LIMIT`. - pub(crate) fn new(capacity: usize) -> SparseSets { - SparseSets { - set1: SparseSet::new(capacity), - set2: SparseSet::new(capacity), - } - } - - /// Resizes these sparse sets to have the new capacity given. - /// - /// The sets are automatically cleared. - /// - /// This panics if the capacity given is bigger than `StateID::LIMIT`. - #[inline] - pub(crate) fn resize(&mut self, new_capacity: usize) { - self.set1.resize(new_capacity); - self.set2.resize(new_capacity); - } - - /// Clear both sparse sets. - pub(crate) fn clear(&mut self) { - self.set1.clear(); - self.set2.clear(); - } - - /// Swap set1 with set2. - pub(crate) fn swap(&mut self) { - core::mem::swap(&mut self.set1, &mut self.set2); - } - - /// Returns the memory usage, in bytes, used by this pair of sparse sets. - pub(crate) fn memory_usage(&self) -> usize { - self.set1.memory_usage() + self.set2.memory_usage() - } -} - -/// A sparse set used for representing ordered NFA states. -/// -/// This supports constant time addition and membership testing. Clearing an -/// entire set can also be done in constant time. Iteration yields elements -/// in the order in which they were inserted. -/// -/// The data structure is based on: https://research.swtch.com/sparse -/// Note though that we don't actually use uninitialized memory. We generally -/// reuse sparse sets, so the initial allocation cost is bearable. However, its -/// other properties listed above are extremely useful. -#[derive(Clone)] -pub(crate) struct SparseSet { - /// The number of elements currently in this set. - len: usize, - /// Dense contains the ids in the order in which they were inserted. - dense: Vec, - /// Sparse maps ids to their location in dense. - /// - /// A state ID is in the set if and only if - /// sparse[id] < len && id == dense[sparse[id]]. - /// - /// Note that these are indices into 'dense'. It's a little weird to use - /// StateID here, but we know our length can never exceed the bounds of - /// StateID (enforced by 'resize') and StateID will be at most 4 bytes - /// where as a usize is likely double that in most cases. - sparse: Vec, -} - -impl SparseSet { - /// Create a new sparse set with the given capacity. - /// - /// Sparse sets have a fixed size and they cannot grow. Attempting to - /// insert more distinct elements than the total capacity of the set will - /// result in a panic. - /// - /// This panics if the capacity given is bigger than `StateID::LIMIT`. - #[inline] - pub(crate) fn new(capacity: usize) -> SparseSet { - let mut set = SparseSet { len: 0, dense: vec![], sparse: vec![] }; - set.resize(capacity); - set - } - - /// Resizes this sparse set to have the new capacity given. - /// - /// This set is automatically cleared. - /// - /// This panics if the capacity given is bigger than `StateID::LIMIT`. - #[inline] - pub(crate) fn resize(&mut self, new_capacity: usize) { - assert!( - new_capacity <= StateID::LIMIT, - "sparse set capacity cannot exceed {:?}", - StateID::LIMIT - ); - self.clear(); - self.dense.resize(new_capacity, StateID::ZERO); - self.sparse.resize(new_capacity, StateID::ZERO); - } - - /// Returns the capacity of this set. - /// - /// The capacity represents a fixed limit on the number of distinct - /// elements that are allowed in this set. The capacity cannot be changed. - #[inline] - pub(crate) fn capacity(&self) -> usize { - self.dense.len() - } - - /// Returns the number of elements in this set. - #[inline] - pub(crate) fn len(&self) -> usize { - self.len - } - - /// Returns true if and only if this set is empty. - #[inline] - pub(crate) fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Insert the state ID value into this set and return true if the given - /// state ID was not previously in this set. - /// - /// This operation is idempotent. If the given value is already in this - /// set, then this is a no-op. - /// - /// If more than `capacity` ids are inserted, then this panics. - /// - /// This is marked as inline(always) since the compiler won't inline it - /// otherwise, and it's a fairly hot piece of code in DFA determinization. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn insert(&mut self, id: StateID) -> bool { - if self.contains(id) { - return false; - } - - let i = self.len(); - assert!( - i < self.capacity(), - "{:?} exceeds capacity of {:?} when inserting {:?}", - i, - self.capacity(), - id, - ); - // OK since i < self.capacity() and self.capacity() is guaranteed to - // be <= StateID::LIMIT. - let index = StateID::new_unchecked(i); - self.dense[index] = id; - self.sparse[id] = index; - self.len += 1; - true - } - - /// Returns true if and only if this set contains the given value. - #[inline] - pub(crate) fn contains(&self, id: StateID) -> bool { - let index = self.sparse[id]; - index.as_usize() < self.len() && self.dense[index] == id - } - - /// Clear this set such that it has no members. - #[inline] - pub(crate) fn clear(&mut self) { - self.len = 0; - } - - #[inline] - pub(crate) fn iter(&self) -> SparseSetIter<'_> { - SparseSetIter(self.dense[..self.len()].iter()) - } - - /// Returns the heap memory usage, in bytes, used by this sparse set. - #[inline] - pub(crate) fn memory_usage(&self) -> usize { - self.dense.len() * StateID::SIZE + self.sparse.len() * StateID::SIZE - } -} - -impl core::fmt::Debug for SparseSet { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let elements: Vec = self.iter().collect(); - f.debug_tuple("SparseSet").field(&elements).finish() - } -} - -/// An iterator over all elements in a sparse set. -/// -/// The lifetime `'a` refers to the lifetime of the set being iterated over. -#[derive(Debug)] -pub(crate) struct SparseSetIter<'a>(core::slice::Iter<'a, StateID>); - -impl<'a> Iterator for SparseSetIter<'a> { - type Item = StateID; - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn next(&mut self) -> Option { - self.0.next().copied() - } -} diff --git a/vendor/regex-automata/src/util/start.rs b/vendor/regex-automata/src/util/start.rs deleted file mode 100644 index c34b376a6405b0..00000000000000 --- a/vendor/regex-automata/src/util/start.rs +++ /dev/null @@ -1,479 +0,0 @@ -/*! -Provides helpers for dealing with start state configurations in DFAs. -*/ - -use crate::util::{ - look::LookMatcher, - search::{Anchored, Input}, - wire::{self, DeserializeError, SerializeError}, -}; - -/// The configuration used to determine a DFA's start state for a search. -/// -/// A DFA has a single starting state in the typical textbook description. That -/// is, it corresponds to the set of all starting states for the NFA that built -/// it, along with their epsilon closures. In this crate, however, DFAs have -/// many possible start states due to a few factors: -/// -/// * DFAs support the ability to run either anchored or unanchored searches. -/// Each type of search needs its own start state. For example, an unanchored -/// search requires starting at a state corresponding to a regex with a -/// `(?s-u:.)*?` prefix, which will match through anything. -/// * DFAs also optionally support starting an anchored search for any one -/// specific pattern. Each such pattern requires its own start state. -/// * If a look-behind assertion like `^` or `\b` is used in the regex, then -/// the DFA will need to inspect a single byte immediately before the start of -/// the search to choose the correct start state. -/// -/// Indeed, this configuration precisely encapsulates all of the above factors. -/// The [`Config::anchored`] method sets which kind of anchored search to -/// perform while the [`Config::look_behind`] method provides a way to set -/// the byte that occurs immediately before the start of the search. -/// -/// Generally speaking, this type is only useful when you want to run searches -/// without using an [`Input`]. In particular, an `Input` wants a haystack -/// slice, but callers may not have a contiguous sequence of bytes as a -/// haystack in all cases. This type provides a lower level of control such -/// that callers can provide their own anchored configuration and look-behind -/// byte explicitly. -/// -/// # Example -/// -/// This shows basic usage that permits running a search with a DFA without -/// using the `Input` abstraction. -/// -/// ``` -/// use regex_automata::{ -/// dfa::{Automaton, dense}, -/// util::start, -/// Anchored, -/// }; -/// -/// let dfa = dense::DFA::new(r"(?-u)\b\w+\b")?; -/// let haystack = "quartz"; -/// -/// let config = start::Config::new().anchored(Anchored::Yes); -/// let mut state = dfa.start_state(&config)?; -/// for &b in haystack.as_bytes().iter() { -/// state = dfa.next_state(state, b); -/// } -/// state = dfa.next_eoi_state(state); -/// assert!(dfa.is_match_state(state)); -/// -/// # Ok::<(), Box>(()) -/// ``` -/// -/// This example shows how to correctly run a search that doesn't begin at -/// the start of a haystack. Notice how we set the look-behind byte, and as -/// a result, the `\b` assertion does not match. -/// -/// ``` -/// use regex_automata::{ -/// dfa::{Automaton, dense}, -/// util::start, -/// Anchored, -/// }; -/// -/// let dfa = dense::DFA::new(r"(?-u)\b\w+\b")?; -/// let haystack = "quartz"; -/// -/// let config = start::Config::new() -/// .anchored(Anchored::Yes) -/// .look_behind(Some(b'q')); -/// let mut state = dfa.start_state(&config)?; -/// for &b in haystack.as_bytes().iter().skip(1) { -/// state = dfa.next_state(state, b); -/// } -/// state = dfa.next_eoi_state(state); -/// // No match! -/// assert!(!dfa.is_match_state(state)); -/// -/// # Ok::<(), Box>(()) -/// ``` -/// -/// If we had instead not set a look-behind byte, then the DFA would assume -/// that it was starting at the beginning of the haystack, and thus `\b` should -/// match. This in turn would result in erroneously reporting a match: -/// -/// ``` -/// use regex_automata::{ -/// dfa::{Automaton, dense}, -/// util::start, -/// Anchored, -/// }; -/// -/// let dfa = dense::DFA::new(r"(?-u)\b\w+\b")?; -/// let haystack = "quartz"; -/// -/// // Whoops, forgot the look-behind byte... -/// let config = start::Config::new().anchored(Anchored::Yes); -/// let mut state = dfa.start_state(&config)?; -/// for &b in haystack.as_bytes().iter().skip(1) { -/// state = dfa.next_state(state, b); -/// } -/// state = dfa.next_eoi_state(state); -/// // And now we get a match unexpectedly. -/// assert!(dfa.is_match_state(state)); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct Config { - look_behind: Option, - anchored: Anchored, -} - -impl Config { - /// Create a new default start configuration. - /// - /// The default is an unanchored search that starts at the beginning of the - /// haystack. - pub fn new() -> Config { - Config { anchored: Anchored::No, look_behind: None } - } - - /// A convenience routine for building a start configuration from an - /// [`Input`] for a forward search. - /// - /// This automatically sets the look-behind byte to the byte immediately - /// preceding the start of the search. If the start of the search is at - /// offset `0`, then no look-behind byte is set. - pub fn from_input_forward(input: &Input<'_>) -> Config { - let look_behind = input - .start() - .checked_sub(1) - .and_then(|i| input.haystack().get(i).copied()); - Config { look_behind, anchored: input.get_anchored() } - } - - /// A convenience routine for building a start configuration from an - /// [`Input`] for a reverse search. - /// - /// This automatically sets the look-behind byte to the byte immediately - /// following the end of the search. If the end of the search is at - /// offset `haystack.len()`, then no look-behind byte is set. - pub fn from_input_reverse(input: &Input<'_>) -> Config { - let look_behind = input.haystack().get(input.end()).copied(); - Config { look_behind, anchored: input.get_anchored() } - } - - /// Set the look-behind byte at the start of a search. - /// - /// Unless the search is intended to logically start at the beginning of a - /// haystack, this should _always_ be set to the byte immediately preceding - /// the start of the search. If no look-behind byte is set, then the start - /// configuration will assume it is at the beginning of the haystack. For - /// example, the anchor `^` will match. - /// - /// The default is that no look-behind byte is set. - pub fn look_behind(mut self, byte: Option) -> Config { - self.look_behind = byte; - self - } - - /// Set the anchored mode of a search. - /// - /// The default is an unanchored search. - pub fn anchored(mut self, mode: Anchored) -> Config { - self.anchored = mode; - self - } - - /// Return the look-behind byte in this configuration, if one exists. - pub fn get_look_behind(&self) -> Option { - self.look_behind - } - - /// Return the anchored mode in this configuration. - pub fn get_anchored(&self) -> Anchored { - self.anchored - } -} - -/// A map from every possible byte value to its corresponding starting -/// configuration. -/// -/// This map is used in order to lookup the start configuration for a particular -/// position in a haystack. This start configuration is then used in -/// combination with things like the anchored mode and pattern ID to fully -/// determine the start state. -/// -/// Generally speaking, this map is only used for fully compiled DFAs and lazy -/// DFAs. For NFAs (including the one-pass DFA), the start state is generally -/// selected by virtue of traversing the NFA state graph. DFAs do the same -/// thing, but at build time and not search time. (Well, technically the lazy -/// DFA does it at search time, but it does enough work to cache the full -/// result of the epsilon closure that the NFA engines tend to need to do.) -#[derive(Clone)] -pub(crate) struct StartByteMap { - map: [Start; 256], -} - -impl StartByteMap { - /// Create a new map from byte values to their corresponding starting - /// configurations. The map is determined, in part, by how look-around - /// assertions are matched via the matcher given. - pub(crate) fn new(lookm: &LookMatcher) -> StartByteMap { - let mut map = [Start::NonWordByte; 256]; - map[usize::from(b'\n')] = Start::LineLF; - map[usize::from(b'\r')] = Start::LineCR; - map[usize::from(b'_')] = Start::WordByte; - - let mut byte = b'0'; - while byte <= b'9' { - map[usize::from(byte)] = Start::WordByte; - byte += 1; - } - byte = b'A'; - while byte <= b'Z' { - map[usize::from(byte)] = Start::WordByte; - byte += 1; - } - byte = b'a'; - while byte <= b'z' { - map[usize::from(byte)] = Start::WordByte; - byte += 1; - } - - let lineterm = lookm.get_line_terminator(); - // If our line terminator is normal, then it is already handled by - // the LineLF and LineCR configurations. But if it's weird, then we - // overwrite whatever was there before for that terminator with a - // special configuration. The trick here is that if the terminator - // is, say, a word byte like `a`, then callers seeing this start - // configuration need to account for that and build their DFA state as - // if it *also* came from a word byte. - if lineterm != b'\r' && lineterm != b'\n' { - map[usize::from(lineterm)] = Start::CustomLineTerminator; - } - StartByteMap { map } - } - - /// Return the starting configuration for the given look-behind byte. - /// - /// If no look-behind exists, callers should use `Start::Text`. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn get(&self, byte: u8) -> Start { - self.map[usize::from(byte)] - } - - /// Deserializes a byte class map from the given slice. If the slice is of - /// insufficient length or otherwise contains an impossible mapping, then - /// an error is returned. Upon success, the number of bytes read along with - /// the map are returned. The number of bytes read is always a multiple of - /// 8. - pub(crate) fn from_bytes( - slice: &[u8], - ) -> Result<(StartByteMap, usize), DeserializeError> { - wire::check_slice_len(slice, 256, "start byte map")?; - let mut map = [Start::NonWordByte; 256]; - for (i, &repr) in slice[..256].iter().enumerate() { - map[i] = match Start::from_usize(usize::from(repr)) { - Some(start) => start, - None => { - return Err(DeserializeError::generic( - "found invalid starting configuration", - )) - } - }; - } - Ok((StartByteMap { map }, 256)) - } - - /// Writes this map to the given byte buffer. if the given buffer is too - /// small, then an error is returned. Upon success, the total number of - /// bytes written is returned. The number of bytes written is guaranteed to - /// be a multiple of 8. - pub(crate) fn write_to( - &self, - dst: &mut [u8], - ) -> Result { - let nwrite = self.write_to_len(); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small("start byte map")); - } - for (i, &start) in self.map.iter().enumerate() { - dst[i] = start.as_u8(); - } - Ok(nwrite) - } - - /// Returns the total number of bytes written by `write_to`. - pub(crate) fn write_to_len(&self) -> usize { - 256 - } -} - -impl core::fmt::Debug for StartByteMap { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - use crate::util::escape::DebugByte; - - write!(f, "StartByteMap{{")?; - for byte in 0..=255 { - if byte > 0 { - write!(f, ", ")?; - } - let start = self.map[usize::from(byte)]; - write!(f, "{:?} => {:?}", DebugByte(byte), start)?; - } - write!(f, "}}")?; - Ok(()) - } -} - -/// Represents the six possible starting configurations of a DFA search. -/// -/// The starting configuration is determined by inspecting the beginning -/// of the haystack (up to 1 byte). Ultimately, this along with a pattern ID -/// (if specified) and the type of search (anchored or not) is what selects the -/// start state to use in a DFA. -/// -/// As one example, if a DFA only supports unanchored searches and does not -/// support anchored searches for each pattern, then it will have at most 6 -/// distinct start states. (Some start states may be reused if determinization -/// can determine that they will be equivalent.) If the DFA supports both -/// anchored and unanchored searches, then it will have a maximum of 12 -/// distinct start states. Finally, if the DFA also supports anchored searches -/// for each pattern, then it can have up to `12 + (N * 6)` start states, where -/// `N` is the number of patterns. -/// -/// Handling each of these starting configurations in the context of DFA -/// determinization can be *quite* tricky and subtle. But the code is small -/// and can be found at `crate::util::determinize::set_lookbehind_from_start`. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub(crate) enum Start { - /// This occurs when the starting position is not any of the ones below. - NonWordByte = 0, - /// This occurs when the byte immediately preceding the start of the search - /// is an ASCII word byte. - WordByte = 1, - /// This occurs when the starting position of the search corresponds to the - /// beginning of the haystack. - Text = 2, - /// This occurs when the byte immediately preceding the start of the search - /// is a line terminator. Specifically, `\n`. - LineLF = 3, - /// This occurs when the byte immediately preceding the start of the search - /// is a line terminator. Specifically, `\r`. - LineCR = 4, - /// This occurs when a custom line terminator has been set via a - /// `LookMatcher`, and when that line terminator is neither a `\r` or a - /// `\n`. - /// - /// If the custom line terminator is a word byte, then this start - /// configuration is still selected. DFAs that implement word boundary - /// assertions will likely need to check whether the custom line terminator - /// is a word byte, in which case, it should behave as if the byte - /// satisfies `\b` in addition to multi-line anchors. - CustomLineTerminator = 5, -} - -impl Start { - /// Return the starting state corresponding to the given integer. If no - /// starting state exists for the given integer, then None is returned. - pub(crate) fn from_usize(n: usize) -> Option { - match n { - 0 => Some(Start::NonWordByte), - 1 => Some(Start::WordByte), - 2 => Some(Start::Text), - 3 => Some(Start::LineLF), - 4 => Some(Start::LineCR), - 5 => Some(Start::CustomLineTerminator), - _ => None, - } - } - - /// Returns the total number of starting state configurations. - pub(crate) fn len() -> usize { - 6 - } - - /// Return this starting configuration as `u8` integer. It is guaranteed to - /// be less than `Start::len()`. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn as_u8(&self) -> u8 { - // AFAIK, 'as' is the only way to zero-cost convert an int enum to an - // actual int. - *self as u8 - } - - /// Return this starting configuration as a `usize` integer. It is - /// guaranteed to be less than `Start::len()`. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub(crate) fn as_usize(&self) -> usize { - usize::from(self.as_u8()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn start_fwd_done_range() { - let smap = StartByteMap::new(&LookMatcher::default()); - let input = Input::new("").range(1..0); - let config = Config::from_input_forward(&input); - let start = - config.get_look_behind().map_or(Start::Text, |b| smap.get(b)); - assert_eq!(Start::Text, start); - } - - #[test] - fn start_rev_done_range() { - let smap = StartByteMap::new(&LookMatcher::default()); - let input = Input::new("").range(1..0); - let config = Config::from_input_reverse(&input); - let start = - config.get_look_behind().map_or(Start::Text, |b| smap.get(b)); - assert_eq!(Start::Text, start); - } - - #[test] - fn start_fwd() { - let f = |haystack, start, end| { - let smap = StartByteMap::new(&LookMatcher::default()); - let input = Input::new(haystack).range(start..end); - let config = Config::from_input_forward(&input); - let start = - config.get_look_behind().map_or(Start::Text, |b| smap.get(b)); - start - }; - - assert_eq!(Start::Text, f("", 0, 0)); - assert_eq!(Start::Text, f("abc", 0, 3)); - assert_eq!(Start::Text, f("\nabc", 0, 3)); - - assert_eq!(Start::LineLF, f("\nabc", 1, 3)); - - assert_eq!(Start::LineCR, f("\rabc", 1, 3)); - - assert_eq!(Start::WordByte, f("abc", 1, 3)); - - assert_eq!(Start::NonWordByte, f(" abc", 1, 3)); - } - - #[test] - fn start_rev() { - let f = |haystack, start, end| { - let smap = StartByteMap::new(&LookMatcher::default()); - let input = Input::new(haystack).range(start..end); - let config = Config::from_input_reverse(&input); - let start = - config.get_look_behind().map_or(Start::Text, |b| smap.get(b)); - start - }; - - assert_eq!(Start::Text, f("", 0, 0)); - assert_eq!(Start::Text, f("abc", 0, 3)); - assert_eq!(Start::Text, f("abc\n", 0, 4)); - - assert_eq!(Start::LineLF, f("abc\nz", 0, 3)); - - assert_eq!(Start::LineCR, f("abc\rz", 0, 3)); - - assert_eq!(Start::WordByte, f("abc", 0, 2)); - - assert_eq!(Start::NonWordByte, f("abc ", 0, 3)); - } -} diff --git a/vendor/regex-automata/src/util/syntax.rs b/vendor/regex-automata/src/util/syntax.rs deleted file mode 100644 index 3be07bc807581b..00000000000000 --- a/vendor/regex-automata/src/util/syntax.rs +++ /dev/null @@ -1,482 +0,0 @@ -/*! -Utilities for dealing with the syntax of a regular expression. - -This module currently only exposes a [`Config`] type that -itself represents a wrapper around the configuration for a -[`regex-syntax::ParserBuilder`](regex_syntax::ParserBuilder). The purpose of -this wrapper is to make configuring syntax options very similar to how other -configuration is done throughout this crate. Namely, instead of duplicating -syntax options across every builder (of which there are many), we instead -create small config objects like this one that can be passed around and -composed. -*/ - -use alloc::{vec, vec::Vec}; - -use regex_syntax::{ - ast, - hir::{self, Hir}, - Error, ParserBuilder, -}; - -/// A convenience routine for parsing a pattern into an HIR value with the -/// default configuration. -/// -/// # Example -/// -/// This shows how to parse a pattern into an HIR value: -/// -/// ``` -/// use regex_automata::util::syntax; -/// -/// let hir = syntax::parse(r"([a-z]+)|([0-9]+)")?; -/// assert_eq!(Some(1), hir.properties().static_explicit_captures_len()); -/// -/// # Ok::<(), Box>(()) -/// ``` -pub fn parse(pattern: &str) -> Result { - parse_with(pattern, &Config::default()) -} - -/// A convenience routine for parsing many patterns into HIR value with the -/// default configuration. -/// -/// # Example -/// -/// This shows how to parse many patterns into an corresponding HIR values: -/// -/// ``` -/// use { -/// regex_automata::util::syntax, -/// regex_syntax::hir::Properties, -/// }; -/// -/// let hirs = syntax::parse_many(&[ -/// r"([a-z]+)|([0-9]+)", -/// r"foo(A-Z]+)bar", -/// ])?; -/// let props = Properties::union(hirs.iter().map(|h| h.properties())); -/// assert_eq!(Some(1), props.static_explicit_captures_len()); -/// -/// # Ok::<(), Box>(()) -/// ``` -pub fn parse_many>(patterns: &[P]) -> Result, Error> { - parse_many_with(patterns, &Config::default()) -} - -/// A convenience routine for parsing a pattern into an HIR value using a -/// `Config`. -/// -/// # Example -/// -/// This shows how to parse a pattern into an HIR value with a non-default -/// configuration: -/// -/// ``` -/// use regex_automata::util::syntax; -/// -/// let hir = syntax::parse_with( -/// r"^[a-z]+$", -/// &syntax::Config::new().multi_line(true).crlf(true), -/// )?; -/// assert!(hir.properties().look_set().contains_anchor_crlf()); -/// -/// # Ok::<(), Box>(()) -/// ``` -pub fn parse_with(pattern: &str, config: &Config) -> Result { - let mut builder = ParserBuilder::new(); - config.apply(&mut builder); - builder.build().parse(pattern) -} - -/// A convenience routine for parsing many patterns into HIR values using a -/// `Config`. -/// -/// # Example -/// -/// This shows how to parse many patterns into an corresponding HIR values -/// with a non-default configuration: -/// -/// ``` -/// use { -/// regex_automata::util::syntax, -/// regex_syntax::hir::Properties, -/// }; -/// -/// let patterns = &[ -/// r"([a-z]+)|([0-9]+)", -/// r"\W", -/// r"foo(A-Z]+)bar", -/// ]; -/// let config = syntax::Config::new().unicode(false).utf8(false); -/// let hirs = syntax::parse_many_with(patterns, &config)?; -/// let props = Properties::union(hirs.iter().map(|h| h.properties())); -/// assert!(!props.is_utf8()); -/// -/// # Ok::<(), Box>(()) -/// ``` -pub fn parse_many_with>( - patterns: &[P], - config: &Config, -) -> Result, Error> { - let mut builder = ParserBuilder::new(); - config.apply(&mut builder); - let mut hirs = vec![]; - for p in patterns.iter() { - hirs.push(builder.build().parse(p.as_ref())?); - } - Ok(hirs) -} - -/// A common set of configuration options that apply to the syntax of a regex. -/// -/// This represents a group of configuration options that specifically apply -/// to how the concrete syntax of a regular expression is interpreted. In -/// particular, they are generally forwarded to the -/// [`ParserBuilder`](https://docs.rs/regex-syntax/*/regex_syntax/struct.ParserBuilder.html) -/// in the -/// [`regex-syntax`](https://docs.rs/regex-syntax) -/// crate when building a regex from its concrete syntax directly. -/// -/// These options are defined as a group since they apply to every regex engine -/// in this crate. Instead of re-defining them on every engine's builder, they -/// are instead provided here as one cohesive unit. -#[derive(Clone, Copy, Debug)] -pub struct Config { - case_insensitive: bool, - multi_line: bool, - dot_matches_new_line: bool, - crlf: bool, - line_terminator: u8, - swap_greed: bool, - ignore_whitespace: bool, - unicode: bool, - utf8: bool, - nest_limit: u32, - octal: bool, -} - -impl Config { - /// Return a new default syntax configuration. - pub fn new() -> Config { - // These defaults match the ones used in regex-syntax. - Config { - case_insensitive: false, - multi_line: false, - dot_matches_new_line: false, - crlf: false, - line_terminator: b'\n', - swap_greed: false, - ignore_whitespace: false, - unicode: true, - utf8: true, - nest_limit: 250, - octal: false, - } - } - - /// Enable or disable the case insensitive flag by default. - /// - /// When Unicode mode is enabled, case insensitivity is Unicode-aware. - /// Specifically, it will apply the "simple" case folding rules as - /// specified by Unicode. - /// - /// By default this is disabled. It may alternatively be selectively - /// enabled in the regular expression itself via the `i` flag. - pub fn case_insensitive(mut self, yes: bool) -> Config { - self.case_insensitive = yes; - self - } - - /// Enable or disable the multi-line matching flag by default. - /// - /// When this is enabled, the `^` and `$` look-around assertions will - /// match immediately after and immediately before a new line character, - /// respectively. Note that the `\A` and `\z` look-around assertions are - /// unaffected by this setting and always correspond to matching at the - /// beginning and end of the input. - /// - /// By default this is disabled. It may alternatively be selectively - /// enabled in the regular expression itself via the `m` flag. - pub fn multi_line(mut self, yes: bool) -> Config { - self.multi_line = yes; - self - } - - /// Enable or disable the "dot matches any character" flag by default. - /// - /// When this is enabled, `.` will match any character. When it's disabled, - /// then `.` will match any character except for a new line character. - /// - /// Note that `.` is impacted by whether the "unicode" setting is enabled - /// or not. When Unicode is enabled (the default), `.` will match any UTF-8 - /// encoding of any Unicode scalar value (sans a new line, depending on - /// whether this "dot matches new line" option is enabled). When Unicode - /// mode is disabled, `.` will match any byte instead. Because of this, - /// when Unicode mode is disabled, `.` can only be used when the "allow - /// invalid UTF-8" option is enabled, since `.` could otherwise match - /// invalid UTF-8. - /// - /// By default this is disabled. It may alternatively be selectively - /// enabled in the regular expression itself via the `s` flag. - pub fn dot_matches_new_line(mut self, yes: bool) -> Config { - self.dot_matches_new_line = yes; - self - } - - /// Enable or disable the "CRLF mode" flag by default. - /// - /// By default this is disabled. It may alternatively be selectively - /// enabled in the regular expression itself via the `R` flag. - /// - /// When CRLF mode is enabled, the following happens: - /// - /// * Unless `dot_matches_new_line` is enabled, `.` will match any character - /// except for `\r` and `\n`. - /// * When `multi_line` mode is enabled, `^` and `$` will treat `\r\n`, - /// `\r` and `\n` as line terminators. And in particular, neither will - /// match between a `\r` and a `\n`. - pub fn crlf(mut self, yes: bool) -> Config { - self.crlf = yes; - self - } - - /// Sets the line terminator for use with `(?u-s:.)` and `(?-us:.)`. - /// - /// Namely, instead of `.` (by default) matching everything except for `\n`, - /// this will cause `.` to match everything except for the byte given. - /// - /// If `.` is used in a context where Unicode mode is enabled and this byte - /// isn't ASCII, then an error will be returned. When Unicode mode is - /// disabled, then any byte is permitted, but will return an error if UTF-8 - /// mode is enabled and it is a non-ASCII byte. - /// - /// In short, any ASCII value for a line terminator is always okay. But a - /// non-ASCII byte might result in an error depending on whether Unicode - /// mode or UTF-8 mode are enabled. - /// - /// Note that if `R` mode is enabled then it always takes precedence and - /// the line terminator will be treated as `\r` and `\n` simultaneously. - /// - /// Note also that this *doesn't* impact the look-around assertions - /// `(?m:^)` and `(?m:$)`. That's usually controlled by additional - /// configuration in the regex engine itself. - pub fn line_terminator(mut self, byte: u8) -> Config { - self.line_terminator = byte; - self - } - - /// Enable or disable the "swap greed" flag by default. - /// - /// When this is enabled, `.*` (for example) will become ungreedy and `.*?` - /// will become greedy. - /// - /// By default this is disabled. It may alternatively be selectively - /// enabled in the regular expression itself via the `U` flag. - pub fn swap_greed(mut self, yes: bool) -> Config { - self.swap_greed = yes; - self - } - - /// Enable verbose mode in the regular expression. - /// - /// When enabled, verbose mode permits insignificant whitespace in many - /// places in the regular expression, as well as comments. Comments are - /// started using `#` and continue until the end of the line. - /// - /// By default, this is disabled. It may be selectively enabled in the - /// regular expression by using the `x` flag regardless of this setting. - pub fn ignore_whitespace(mut self, yes: bool) -> Config { - self.ignore_whitespace = yes; - self - } - - /// Enable or disable the Unicode flag (`u`) by default. - /// - /// By default this is **enabled**. It may alternatively be selectively - /// disabled in the regular expression itself via the `u` flag. - /// - /// Note that unless "allow invalid UTF-8" is enabled (it's disabled by - /// default), a regular expression will fail to parse if Unicode mode is - /// disabled and a sub-expression could possibly match invalid UTF-8. - /// - /// **WARNING**: Unicode mode can greatly increase the size of the compiled - /// DFA, which can noticeably impact both memory usage and compilation - /// time. This is especially noticeable if your regex contains character - /// classes like `\w` that are impacted by whether Unicode is enabled or - /// not. If Unicode is not necessary, you are encouraged to disable it. - pub fn unicode(mut self, yes: bool) -> Config { - self.unicode = yes; - self - } - - /// When disabled, the builder will permit the construction of a regular - /// expression that may match invalid UTF-8. - /// - /// For example, when [`Config::unicode`] is disabled, then - /// expressions like `[^a]` may match invalid UTF-8 since they can match - /// any single byte that is not `a`. By default, these sub-expressions - /// are disallowed to avoid returning offsets that split a UTF-8 - /// encoded codepoint. However, in cases where matching at arbitrary - /// locations is desired, this option can be disabled to permit all such - /// sub-expressions. - /// - /// When enabled (the default), the builder is guaranteed to produce a - /// regex that will only ever match valid UTF-8 (otherwise, the builder - /// will return an error). - pub fn utf8(mut self, yes: bool) -> Config { - self.utf8 = yes; - self - } - - /// Set the nesting limit used for the regular expression parser. - /// - /// The nesting limit controls how deep the abstract syntax tree is allowed - /// to be. If the AST exceeds the given limit (e.g., with too many nested - /// groups), then an error is returned by the parser. - /// - /// The purpose of this limit is to act as a heuristic to prevent stack - /// overflow when building a finite automaton from a regular expression's - /// abstract syntax tree. In particular, construction currently uses - /// recursion. In the future, the implementation may stop using recursion - /// and this option will no longer be necessary. - /// - /// This limit is not checked until the entire AST is parsed. Therefore, - /// if callers want to put a limit on the amount of heap space used, then - /// they should impose a limit on the length, in bytes, of the concrete - /// pattern string. In particular, this is viable since the parser will - /// limit itself to heap space proportional to the length of the pattern - /// string. - /// - /// Note that a nest limit of `0` will return a nest limit error for most - /// patterns but not all. For example, a nest limit of `0` permits `a` but - /// not `ab`, since `ab` requires a concatenation AST item, which results - /// in a nest depth of `1`. In general, a nest limit is not something that - /// manifests in an obvious way in the concrete syntax, therefore, it - /// should not be used in a granular way. - pub fn nest_limit(mut self, limit: u32) -> Config { - self.nest_limit = limit; - self - } - - /// Whether to support octal syntax or not. - /// - /// Octal syntax is a little-known way of uttering Unicode codepoints in - /// a regular expression. For example, `a`, `\x61`, `\u0061` and - /// `\141` are all equivalent regular expressions, where the last example - /// shows octal syntax. - /// - /// While supporting octal syntax isn't in and of itself a problem, it does - /// make good error messages harder. That is, in PCRE based regex engines, - /// syntax like `\1` invokes a backreference, which is explicitly - /// unsupported in Rust's regex engine. However, many users expect it to - /// be supported. Therefore, when octal support is disabled, the error - /// message will explicitly mention that backreferences aren't supported. - /// - /// Octal syntax is disabled by default. - pub fn octal(mut self, yes: bool) -> Config { - self.octal = yes; - self - } - - /// Returns whether "unicode" mode is enabled. - pub fn get_unicode(&self) -> bool { - self.unicode - } - - /// Returns whether "case insensitive" mode is enabled. - pub fn get_case_insensitive(&self) -> bool { - self.case_insensitive - } - - /// Returns whether "multi line" mode is enabled. - pub fn get_multi_line(&self) -> bool { - self.multi_line - } - - /// Returns whether "dot matches new line" mode is enabled. - pub fn get_dot_matches_new_line(&self) -> bool { - self.dot_matches_new_line - } - - /// Returns whether "CRLF" mode is enabled. - pub fn get_crlf(&self) -> bool { - self.crlf - } - - /// Returns the line terminator in this syntax configuration. - pub fn get_line_terminator(&self) -> u8 { - self.line_terminator - } - - /// Returns whether "swap greed" mode is enabled. - pub fn get_swap_greed(&self) -> bool { - self.swap_greed - } - - /// Returns whether "ignore whitespace" mode is enabled. - pub fn get_ignore_whitespace(&self) -> bool { - self.ignore_whitespace - } - - /// Returns whether UTF-8 mode is enabled. - pub fn get_utf8(&self) -> bool { - self.utf8 - } - - /// Returns the "nest limit" setting. - pub fn get_nest_limit(&self) -> u32 { - self.nest_limit - } - - /// Returns whether "octal" mode is enabled. - pub fn get_octal(&self) -> bool { - self.octal - } - - /// Applies this configuration to the given parser. - pub(crate) fn apply(&self, builder: &mut ParserBuilder) { - builder - .unicode(self.unicode) - .case_insensitive(self.case_insensitive) - .multi_line(self.multi_line) - .dot_matches_new_line(self.dot_matches_new_line) - .crlf(self.crlf) - .line_terminator(self.line_terminator) - .swap_greed(self.swap_greed) - .ignore_whitespace(self.ignore_whitespace) - .utf8(self.utf8) - .nest_limit(self.nest_limit) - .octal(self.octal); - } - - /// Applies this configuration to the given AST parser. - pub(crate) fn apply_ast(&self, builder: &mut ast::parse::ParserBuilder) { - builder - .ignore_whitespace(self.ignore_whitespace) - .nest_limit(self.nest_limit) - .octal(self.octal); - } - - /// Applies this configuration to the given AST-to-HIR translator. - pub(crate) fn apply_hir( - &self, - builder: &mut hir::translate::TranslatorBuilder, - ) { - builder - .unicode(self.unicode) - .case_insensitive(self.case_insensitive) - .multi_line(self.multi_line) - .crlf(self.crlf) - .dot_matches_new_line(self.dot_matches_new_line) - .line_terminator(self.line_terminator) - .swap_greed(self.swap_greed) - .utf8(self.utf8); - } -} - -impl Default for Config { - fn default() -> Config { - Config::new() - } -} diff --git a/vendor/regex-automata/src/util/unicode_data/mod.rs b/vendor/regex-automata/src/util/unicode_data/mod.rs deleted file mode 100644 index fc7b1c738ab3a1..00000000000000 --- a/vendor/regex-automata/src/util/unicode_data/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -// This cfg should match the one in src/util/look.rs that uses perl_word. -#[cfg(all( - // We have to explicitly want to support Unicode word boundaries. - feature = "unicode-word-boundary", - not(all( - // If we don't have regex-syntax at all, then we definitely need to - // bring our own \w data table. - feature = "syntax", - // If unicode-perl is enabled, then regex-syntax/unicode-perl is - // also enabled, which in turn means we can use regex-syntax's - // is_word_character routine (and thus use its data tables). But if - // unicode-perl is not enabled, even if syntax is, then we need to - // bring our own. - feature = "unicode-perl", - )), -))] -pub(crate) mod perl_word; diff --git a/vendor/regex-automata/src/util/unicode_data/perl_word.rs b/vendor/regex-automata/src/util/unicode_data/perl_word.rs deleted file mode 100644 index 21c8c0f9c839c8..00000000000000 --- a/vendor/regex-automata/src/util/unicode_data/perl_word.rs +++ /dev/null @@ -1,806 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate perl-word ucd-16.0.0 --chars -// -// Unicode version: 16.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const PERL_WORD: &'static [(char, char)] = &[ - ('0', '9'), - ('A', 'Z'), - ('_', '_'), - ('a', 'z'), - ('ª', 'ª'), - ('µ', 'µ'), - ('º', 'º'), - ('À', 'Ö'), - ('Ø', 'ö'), - ('ø', 'ˁ'), - ('ˆ', 'ˑ'), - ('ˠ', 'ˤ'), - ('ˬ', 'ˬ'), - ('ˮ', 'ˮ'), - ('\u{300}', 'ʹ'), - ('Ͷ', 'ͷ'), - ('ͺ', 'ͽ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', 'ϵ'), - ('Ϸ', 'ҁ'), - ('\u{483}', 'ԯ'), - ('Ա', 'Ֆ'), - ('ՙ', 'ՙ'), - ('ՠ', 'ֈ'), - ('\u{591}', '\u{5bd}'), - ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), - ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), - ('א', 'ת'), - ('ׯ', 'ײ'), - ('\u{610}', '\u{61a}'), - ('ؠ', '٩'), - ('ٮ', 'ۓ'), - ('ە', '\u{6dc}'), - ('\u{6df}', '\u{6e8}'), - ('\u{6ea}', 'ۼ'), - ('ۿ', 'ۿ'), - ('ܐ', '\u{74a}'), - ('ݍ', 'ޱ'), - ('߀', 'ߵ'), - ('ߺ', 'ߺ'), - ('\u{7fd}', '\u{7fd}'), - ('ࠀ', '\u{82d}'), - ('ࡀ', '\u{85b}'), - ('ࡠ', 'ࡪ'), - ('ࡰ', 'ࢇ'), - ('ࢉ', 'ࢎ'), - ('\u{897}', '\u{8e1}'), - ('\u{8e3}', '\u{963}'), - ('०', '९'), - ('ॱ', 'ঃ'), - ('অ', 'ঌ'), - ('এ', 'ঐ'), - ('ও', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('\u{9bc}', '\u{9c4}'), - ('ে', 'ৈ'), - ('ো', 'ৎ'), - ('\u{9d7}', '\u{9d7}'), - ('ড়', 'ঢ়'), - ('য়', '\u{9e3}'), - ('০', 'ৱ'), - ('ৼ', 'ৼ'), - ('\u{9fe}', '\u{9fe}'), - ('\u{a01}', 'ਃ'), - ('ਅ', 'ਊ'), - ('ਏ', 'ਐ'), - ('ਓ', 'ਨ'), - ('ਪ', 'ਰ'), - ('ਲ', 'ਲ਼'), - ('ਵ', 'ਸ਼'), - ('ਸ', 'ਹ'), - ('\u{a3c}', '\u{a3c}'), - ('ਾ', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4d}'), - ('\u{a51}', '\u{a51}'), - ('ਖ਼', 'ੜ'), - ('ਫ਼', 'ਫ਼'), - ('੦', '\u{a75}'), - ('\u{a81}', 'ઃ'), - ('અ', 'ઍ'), - ('એ', 'ઑ'), - ('ઓ', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('\u{abc}', '\u{ac5}'), - ('\u{ac7}', 'ૉ'), - ('ો', '\u{acd}'), - ('ૐ', 'ૐ'), - ('ૠ', '\u{ae3}'), - ('૦', '૯'), - ('ૹ', '\u{aff}'), - ('\u{b01}', 'ଃ'), - ('ଅ', 'ଌ'), - ('ଏ', 'ଐ'), - ('ଓ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଵ', 'ହ'), - ('\u{b3c}', '\u{b44}'), - ('େ', 'ୈ'), - ('ୋ', '\u{b4d}'), - ('\u{b55}', '\u{b57}'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', '\u{b63}'), - ('୦', '୯'), - ('ୱ', 'ୱ'), - ('\u{b82}', 'ஃ'), - ('அ', 'ஊ'), - ('எ', 'ஐ'), - ('ஒ', 'க'), - ('ங', 'ச'), - ('ஜ', 'ஜ'), - ('ஞ', 'ட'), - ('ண', 'த'), - ('ந', 'ப'), - ('ம', 'ஹ'), - ('\u{bbe}', 'ூ'), - ('ெ', 'ை'), - ('ொ', '\u{bcd}'), - ('ௐ', 'ௐ'), - ('\u{bd7}', '\u{bd7}'), - ('௦', '௯'), - ('\u{c00}', 'ఌ'), - ('ఎ', 'ఐ'), - ('ఒ', 'న'), - ('ప', 'హ'), - ('\u{c3c}', 'ౄ'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4d}'), - ('\u{c55}', '\u{c56}'), - ('ౘ', 'ౚ'), - ('ౝ', 'ౝ'), - ('ౠ', '\u{c63}'), - ('౦', '౯'), - ('ಀ', 'ಃ'), - ('ಅ', 'ಌ'), - ('ಎ', 'ಐ'), - ('ಒ', 'ನ'), - ('ಪ', 'ಳ'), - ('ವ', 'ಹ'), - ('\u{cbc}', 'ೄ'), - ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccd}'), - ('\u{cd5}', '\u{cd6}'), - ('ೝ', 'ೞ'), - ('ೠ', '\u{ce3}'), - ('೦', '೯'), - ('ೱ', 'ೳ'), - ('\u{d00}', 'ഌ'), - ('എ', 'ഐ'), - ('ഒ', '\u{d44}'), - ('െ', 'ൈ'), - ('ൊ', 'ൎ'), - ('ൔ', '\u{d57}'), - ('ൟ', '\u{d63}'), - ('൦', '൯'), - ('ൺ', 'ൿ'), - ('\u{d81}', 'ඃ'), - ('අ', 'ඖ'), - ('ක', 'න'), - ('ඳ', 'ර'), - ('ල', 'ල'), - ('ව', 'ෆ'), - ('\u{dca}', '\u{dca}'), - ('\u{dcf}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('ෘ', '\u{ddf}'), - ('෦', '෯'), - ('ෲ', 'ෳ'), - ('ก', '\u{e3a}'), - ('เ', '\u{e4e}'), - ('๐', '๙'), - ('ກ', 'ຂ'), - ('ຄ', 'ຄ'), - ('ຆ', 'ຊ'), - ('ຌ', 'ຣ'), - ('ລ', 'ລ'), - ('ວ', 'ຽ'), - ('ເ', 'ໄ'), - ('ໆ', 'ໆ'), - ('\u{ec8}', '\u{ece}'), - ('໐', '໙'), - ('ໜ', 'ໟ'), - ('ༀ', 'ༀ'), - ('\u{f18}', '\u{f19}'), - ('༠', '༩'), - ('\u{f35}', '\u{f35}'), - ('\u{f37}', '\u{f37}'), - ('\u{f39}', '\u{f39}'), - ('༾', 'ཇ'), - ('ཉ', 'ཬ'), - ('\u{f71}', '\u{f84}'), - ('\u{f86}', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('\u{fc6}', '\u{fc6}'), - ('က', '၉'), - ('ၐ', '\u{109d}'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ა', 'ჺ'), - ('ჼ', 'ቈ'), - ('ቊ', 'ቍ'), - ('ቐ', 'ቖ'), - ('ቘ', 'ቘ'), - ('ቚ', 'ቝ'), - ('በ', 'ኈ'), - ('ኊ', 'ኍ'), - ('ነ', 'ኰ'), - ('ኲ', 'ኵ'), - ('ኸ', 'ኾ'), - ('ዀ', 'ዀ'), - ('ዂ', 'ዅ'), - ('ወ', 'ዖ'), - ('ዘ', 'ጐ'), - ('ጒ', 'ጕ'), - ('ጘ', 'ፚ'), - ('\u{135d}', '\u{135f}'), - ('ᎀ', 'ᎏ'), - ('Ꭰ', 'Ᏽ'), - ('ᏸ', 'ᏽ'), - ('ᐁ', 'ᙬ'), - ('ᙯ', 'ᙿ'), - ('ᚁ', 'ᚚ'), - ('ᚠ', 'ᛪ'), - ('ᛮ', 'ᛸ'), - ('ᜀ', '\u{1715}'), - ('ᜟ', '\u{1734}'), - ('ᝀ', '\u{1753}'), - ('ᝠ', 'ᝬ'), - ('ᝮ', 'ᝰ'), - ('\u{1772}', '\u{1773}'), - ('ក', '\u{17d3}'), - ('ៗ', 'ៗ'), - ('ៜ', '\u{17dd}'), - ('០', '៩'), - ('\u{180b}', '\u{180d}'), - ('\u{180f}', '᠙'), - ('ᠠ', 'ᡸ'), - ('ᢀ', 'ᢪ'), - ('ᢰ', 'ᣵ'), - ('ᤀ', 'ᤞ'), - ('\u{1920}', 'ᤫ'), - ('ᤰ', '\u{193b}'), - ('᥆', 'ᥭ'), - ('ᥰ', 'ᥴ'), - ('ᦀ', 'ᦫ'), - ('ᦰ', 'ᧉ'), - ('᧐', '᧙'), - ('ᨀ', '\u{1a1b}'), - ('ᨠ', '\u{1a5e}'), - ('\u{1a60}', '\u{1a7c}'), - ('\u{1a7f}', '᪉'), - ('᪐', '᪙'), - ('ᪧ', 'ᪧ'), - ('\u{1ab0}', '\u{1ace}'), - ('\u{1b00}', 'ᭌ'), - ('᭐', '᭙'), - ('\u{1b6b}', '\u{1b73}'), - ('\u{1b80}', '\u{1bf3}'), - ('ᰀ', '\u{1c37}'), - ('᱀', '᱉'), - ('ᱍ', 'ᱽ'), - ('ᲀ', 'ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('\u{1cd0}', '\u{1cd2}'), - ('\u{1cd4}', 'ᳺ'), - ('ᴀ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ᾼ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῌ'), - ('ῐ', 'ΐ'), - ('ῖ', 'Ί'), - ('ῠ', 'Ῥ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῼ'), - ('\u{200c}', '\u{200d}'), - ('‿', '⁀'), - ('⁔', '⁔'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('\u{20d0}', '\u{20f0}'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℊ', 'ℓ'), - ('ℕ', 'ℕ'), - ('ℙ', 'ℝ'), - ('ℤ', 'ℤ'), - ('Ω', 'Ω'), - ('ℨ', 'ℨ'), - ('K', 'ℭ'), - ('ℯ', 'ℹ'), - ('ℼ', 'ℿ'), - ('ⅅ', 'ⅉ'), - ('ⅎ', 'ⅎ'), - ('Ⅰ', 'ↈ'), - ('Ⓐ', 'ⓩ'), - ('Ⰰ', 'ⳤ'), - ('Ⳬ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ⴰ', 'ⵧ'), - ('ⵯ', 'ⵯ'), - ('\u{2d7f}', 'ⶖ'), - ('ⶠ', 'ⶦ'), - ('ⶨ', 'ⶮ'), - ('ⶰ', 'ⶶ'), - ('ⶸ', 'ⶾ'), - ('ⷀ', 'ⷆ'), - ('ⷈ', 'ⷎ'), - ('ⷐ', 'ⷖ'), - ('ⷘ', 'ⷞ'), - ('\u{2de0}', '\u{2dff}'), - ('ⸯ', 'ⸯ'), - ('々', '〇'), - ('〡', '\u{302f}'), - ('〱', '〵'), - ('〸', '〼'), - ('ぁ', 'ゖ'), - ('\u{3099}', '\u{309a}'), - ('ゝ', 'ゟ'), - ('ァ', 'ヺ'), - ('ー', 'ヿ'), - ('ㄅ', 'ㄯ'), - ('ㄱ', 'ㆎ'), - ('ㆠ', 'ㆿ'), - ('ㇰ', 'ㇿ'), - ('㐀', '䶿'), - ('一', 'ꒌ'), - ('ꓐ', 'ꓽ'), - ('ꔀ', 'ꘌ'), - ('ꘐ', 'ꘫ'), - ('Ꙁ', '\u{a672}'), - ('\u{a674}', '\u{a67d}'), - ('ꙿ', '\u{a6f1}'), - ('ꜗ', 'ꜟ'), - ('Ꜣ', 'ꞈ'), - ('Ꞌ', 'ꟍ'), - ('Ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'Ƛ'), - ('ꟲ', 'ꠧ'), - ('\u{a82c}', '\u{a82c}'), - ('ꡀ', 'ꡳ'), - ('ꢀ', '\u{a8c5}'), - ('꣐', '꣙'), - ('\u{a8e0}', 'ꣷ'), - ('ꣻ', 'ꣻ'), - ('ꣽ', '\u{a92d}'), - ('ꤰ', '\u{a953}'), - ('ꥠ', 'ꥼ'), - ('\u{a980}', '\u{a9c0}'), - ('ꧏ', '꧙'), - ('ꧠ', 'ꧾ'), - ('ꨀ', '\u{aa36}'), - ('ꩀ', 'ꩍ'), - ('꩐', '꩙'), - ('ꩠ', 'ꩶ'), - ('ꩺ', 'ꫂ'), - ('ꫛ', 'ꫝ'), - ('ꫠ', 'ꫯ'), - ('ꫲ', '\u{aaf6}'), - ('ꬁ', 'ꬆ'), - ('ꬉ', 'ꬎ'), - ('ꬑ', 'ꬖ'), - ('ꬠ', 'ꬦ'), - ('ꬨ', 'ꬮ'), - ('ꬰ', 'ꭚ'), - ('ꭜ', 'ꭩ'), - ('ꭰ', 'ꯪ'), - ('꯬', '\u{abed}'), - ('꯰', '꯹'), - ('가', '힣'), - ('ힰ', 'ퟆ'), - ('ퟋ', 'ퟻ'), - ('豈', '舘'), - ('並', '龎'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('יִ', 'ﬨ'), - ('שׁ', 'זּ'), - ('טּ', 'לּ'), - ('מּ', 'מּ'), - ('נּ', 'סּ'), - ('ףּ', 'פּ'), - ('צּ', 'ﮱ'), - ('ﯓ', 'ﴽ'), - ('ﵐ', 'ﶏ'), - ('ﶒ', 'ﷇ'), - ('ﷰ', 'ﷻ'), - ('\u{fe00}', '\u{fe0f}'), - ('\u{fe20}', '\u{fe2f}'), - ('︳', '︴'), - ('﹍', '﹏'), - ('ﹰ', 'ﹴ'), - ('ﹶ', 'ﻼ'), - ('0', '9'), - ('A', 'Z'), - ('_', '_'), - ('a', 'z'), - ('ヲ', 'ᄒ'), - ('ᅡ', 'ᅦ'), - ('ᅧ', 'ᅬ'), - ('ᅭ', 'ᅲ'), - ('ᅳ', 'ᅵ'), - ('𐀀', '𐀋'), - ('𐀍', '𐀦'), - ('𐀨', '𐀺'), - ('𐀼', '𐀽'), - ('𐀿', '𐁍'), - ('𐁐', '𐁝'), - ('𐂀', '𐃺'), - ('𐅀', '𐅴'), - ('\u{101fd}', '\u{101fd}'), - ('𐊀', '𐊜'), - ('𐊠', '𐋐'), - ('\u{102e0}', '\u{102e0}'), - ('𐌀', '𐌟'), - ('𐌭', '𐍊'), - ('𐍐', '\u{1037a}'), - ('𐎀', '𐎝'), - ('𐎠', '𐏃'), - ('𐏈', '𐏏'), - ('𐏑', '𐏕'), - ('𐐀', '𐒝'), - ('𐒠', '𐒩'), - ('𐒰', '𐓓'), - ('𐓘', '𐓻'), - ('𐔀', '𐔧'), - ('𐔰', '𐕣'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐗀', '𐗳'), - ('𐘀', '𐜶'), - ('𐝀', '𐝕'), - ('𐝠', '𐝧'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𐠀', '𐠅'), - ('𐠈', '𐠈'), - ('𐠊', '𐠵'), - ('𐠷', '𐠸'), - ('𐠼', '𐠼'), - ('𐠿', '𐡕'), - ('𐡠', '𐡶'), - ('𐢀', '𐢞'), - ('𐣠', '𐣲'), - ('𐣴', '𐣵'), - ('𐤀', '𐤕'), - ('𐤠', '𐤹'), - ('𐦀', '𐦷'), - ('𐦾', '𐦿'), - ('𐨀', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '𐨓'), - ('𐨕', '𐨗'), - ('𐨙', '𐨵'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '\u{10a3f}'), - ('𐩠', '𐩼'), - ('𐪀', '𐪜'), - ('𐫀', '𐫇'), - ('𐫉', '\u{10ae6}'), - ('𐬀', '𐬵'), - ('𐭀', '𐭕'), - ('𐭠', '𐭲'), - ('𐮀', '𐮑'), - ('𐰀', '𐱈'), - ('𐲀', '𐲲'), - ('𐳀', '𐳲'), - ('𐴀', '\u{10d27}'), - ('𐴰', '𐴹'), - ('𐵀', '𐵥'), - ('\u{10d69}', '\u{10d6d}'), - ('𐵯', '𐶅'), - ('𐺀', '𐺩'), - ('\u{10eab}', '\u{10eac}'), - ('𐺰', '𐺱'), - ('𐻂', '𐻄'), - ('\u{10efc}', '𐼜'), - ('𐼧', '𐼧'), - ('𐼰', '\u{10f50}'), - ('𐽰', '\u{10f85}'), - ('𐾰', '𐿄'), - ('𐿠', '𐿶'), - ('𑀀', '\u{11046}'), - ('𑁦', '𑁵'), - ('\u{1107f}', '\u{110ba}'), - ('\u{110c2}', '\u{110c2}'), - ('𑃐', '𑃨'), - ('𑃰', '𑃹'), - ('\u{11100}', '\u{11134}'), - ('𑄶', '𑄿'), - ('𑅄', '𑅇'), - ('𑅐', '\u{11173}'), - ('𑅶', '𑅶'), - ('\u{11180}', '𑇄'), - ('\u{111c9}', '\u{111cc}'), - ('𑇎', '𑇚'), - ('𑇜', '𑇜'), - ('𑈀', '𑈑'), - ('𑈓', '\u{11237}'), - ('\u{1123e}', '\u{11241}'), - ('𑊀', '𑊆'), - ('𑊈', '𑊈'), - ('𑊊', '𑊍'), - ('𑊏', '𑊝'), - ('𑊟', '𑊨'), - ('𑊰', '\u{112ea}'), - ('𑋰', '𑋹'), - ('\u{11300}', '𑌃'), - ('𑌅', '𑌌'), - ('𑌏', '𑌐'), - ('𑌓', '𑌨'), - ('𑌪', '𑌰'), - ('𑌲', '𑌳'), - ('𑌵', '𑌹'), - ('\u{1133b}', '𑍄'), - ('𑍇', '𑍈'), - ('𑍋', '\u{1134d}'), - ('𑍐', '𑍐'), - ('\u{11357}', '\u{11357}'), - ('𑍝', '𑍣'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), - ('𑎀', '𑎉'), - ('𑎋', '𑎋'), - ('𑎎', '𑎎'), - ('𑎐', '𑎵'), - ('𑎷', '\u{113c0}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '𑏊'), - ('𑏌', '𑏓'), - ('\u{113e1}', '\u{113e2}'), - ('𑐀', '𑑊'), - ('𑑐', '𑑙'), - ('\u{1145e}', '𑑡'), - ('𑒀', '𑓅'), - ('𑓇', '𑓇'), - ('𑓐', '𑓙'), - ('𑖀', '\u{115b5}'), - ('𑖸', '\u{115c0}'), - ('𑗘', '\u{115dd}'), - ('𑘀', '\u{11640}'), - ('𑙄', '𑙄'), - ('𑙐', '𑙙'), - ('𑚀', '𑚸'), - ('𑛀', '𑛉'), - ('𑛐', '𑛣'), - ('𑜀', '𑜚'), - ('\u{1171d}', '\u{1172b}'), - ('𑜰', '𑜹'), - ('𑝀', '𑝆'), - ('𑠀', '\u{1183a}'), - ('𑢠', '𑣩'), - ('𑣿', '𑤆'), - ('𑤉', '𑤉'), - ('𑤌', '𑤓'), - ('𑤕', '𑤖'), - ('𑤘', '𑤵'), - ('𑤷', '𑤸'), - ('\u{1193b}', '\u{11943}'), - ('𑥐', '𑥙'), - ('𑦠', '𑦧'), - ('𑦪', '\u{119d7}'), - ('\u{119da}', '𑧡'), - ('𑧣', '𑧤'), - ('𑨀', '\u{11a3e}'), - ('\u{11a47}', '\u{11a47}'), - ('𑩐', '\u{11a99}'), - ('𑪝', '𑪝'), - ('𑪰', '𑫸'), - ('𑯀', '𑯠'), - ('𑯰', '𑯹'), - ('𑰀', '𑰈'), - ('𑰊', '\u{11c36}'), - ('\u{11c38}', '𑱀'), - ('𑱐', '𑱙'), - ('𑱲', '𑲏'), - ('\u{11c92}', '\u{11ca7}'), - ('𑲩', '\u{11cb6}'), - ('𑴀', '𑴆'), - ('𑴈', '𑴉'), - ('𑴋', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d47}'), - ('𑵐', '𑵙'), - ('𑵠', '𑵥'), - ('𑵧', '𑵨'), - ('𑵪', '𑶎'), - ('\u{11d90}', '\u{11d91}'), - ('𑶓', '𑶘'), - ('𑶠', '𑶩'), - ('𑻠', '𑻶'), - ('\u{11f00}', '𑼐'), - ('𑼒', '\u{11f3a}'), - ('𑼾', '\u{11f42}'), - ('𑽐', '\u{11f5a}'), - ('𑾰', '𑾰'), - ('𒀀', '𒎙'), - ('𒐀', '𒑮'), - ('𒒀', '𒕃'), - ('𒾐', '𒿰'), - ('𓀀', '𓐯'), - ('\u{13440}', '\u{13455}'), - ('𓑠', '𔏺'), - ('𔐀', '𔙆'), - ('𖄀', '𖄹'), - ('𖠀', '𖨸'), - ('𖩀', '𖩞'), - ('𖩠', '𖩩'), - ('𖩰', '𖪾'), - ('𖫀', '𖫉'), - ('𖫐', '𖫭'), - ('\u{16af0}', '\u{16af4}'), - ('𖬀', '\u{16b36}'), - ('𖭀', '𖭃'), - ('𖭐', '𖭙'), - ('𖭣', '𖭷'), - ('𖭽', '𖮏'), - ('𖵀', '𖵬'), - ('𖵰', '𖵹'), - ('𖹀', '𖹿'), - ('𖼀', '𖽊'), - ('\u{16f4f}', '𖾇'), - ('\u{16f8f}', '𖾟'), - ('𖿠', '𖿡'), - ('𖿣', '\u{16fe4}'), - ('\u{16ff0}', '\u{16ff1}'), - ('𗀀', '𘟷'), - ('𘠀', '𘳕'), - ('𘳿', '𘴈'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('𛀀', '𛄢'), - ('𛄲', '𛄲'), - ('𛅐', '𛅒'), - ('𛅕', '𛅕'), - ('𛅤', '𛅧'), - ('𛅰', '𛋻'), - ('𛰀', '𛱪'), - ('𛱰', '𛱼'), - ('𛲀', '𛲈'), - ('𛲐', '𛲙'), - ('\u{1bc9d}', '\u{1bc9e}'), - ('𜳰', '𜳹'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('\u{1d165}', '\u{1d169}'), - ('\u{1d16d}', '\u{1d172}'), - ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), - ('\u{1d1aa}', '\u{1d1ad}'), - ('\u{1d242}', '\u{1d244}'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝛀'), - ('𝛂', '𝛚'), - ('𝛜', '𝛺'), - ('𝛼', '𝜔'), - ('𝜖', '𝜴'), - ('𝜶', '𝝎'), - ('𝝐', '𝝮'), - ('𝝰', '𝞈'), - ('𝞊', '𝞨'), - ('𝞪', '𝟂'), - ('𝟄', '𝟋'), - ('𝟎', '𝟿'), - ('\u{1da00}', '\u{1da36}'), - ('\u{1da3b}', '\u{1da6c}'), - ('\u{1da75}', '\u{1da75}'), - ('\u{1da84}', '\u{1da84}'), - ('\u{1da9b}', '\u{1da9f}'), - ('\u{1daa1}', '\u{1daaf}'), - ('𝼀', '𝼞'), - ('𝼥', '𝼪'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), - ('𞀰', '𞁭'), - ('\u{1e08f}', '\u{1e08f}'), - ('𞄀', '𞄬'), - ('\u{1e130}', '𞄽'), - ('𞅀', '𞅉'), - ('𞅎', '𞅎'), - ('𞊐', '\u{1e2ae}'), - ('𞋀', '𞋹'), - ('𞓐', '𞓹'), - ('𞗐', '𞗺'), - ('𞟠', '𞟦'), - ('𞟨', '𞟫'), - ('𞟭', '𞟮'), - ('𞟰', '𞟾'), - ('𞠀', '𞣄'), - ('\u{1e8d0}', '\u{1e8d6}'), - ('𞤀', '𞥋'), - ('𞥐', '𞥙'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('🄰', '🅉'), - ('🅐', '🅩'), - ('🅰', '🆉'), - ('🯰', '🯹'), - ('𠀀', '𪛟'), - ('𪜀', '𫜹'), - ('𫝀', '𫠝'), - ('𫠠', '𬺡'), - ('𬺰', '𮯠'), - ('𮯰', '𮹝'), - ('丽', '𪘀'), - ('𰀀', '𱍊'), - ('𱍐', '𲎯'), - ('\u{e0100}', '\u{e01ef}'), -]; diff --git a/vendor/regex-automata/src/util/utf8.rs b/vendor/regex-automata/src/util/utf8.rs deleted file mode 100644 index 6c86e8d5fd8808..00000000000000 --- a/vendor/regex-automata/src/util/utf8.rs +++ /dev/null @@ -1,191 +0,0 @@ -/*! -Utilities for dealing with UTF-8. - -This module provides some UTF-8 related helper routines, including an -incremental decoder. -*/ - -/// Returns true if and only if the given byte is considered a word character. -/// This only applies to ASCII. -/// -/// This was copied from regex-syntax so that we can use it to determine the -/// starting DFA state while searching without depending on regex-syntax. The -/// definition is never going to change, so there's no maintenance/bit-rot -/// hazard here. -#[cfg_attr(feature = "perf-inline", inline(always))] -pub(crate) fn is_word_byte(b: u8) -> bool { - const fn mkwordset() -> [bool; 256] { - // FIXME: Use as_usize() once const functions in traits are stable. - let mut set = [false; 256]; - set[b'_' as usize] = true; - - let mut byte = b'0'; - while byte <= b'9' { - set[byte as usize] = true; - byte += 1; - } - byte = b'A'; - while byte <= b'Z' { - set[byte as usize] = true; - byte += 1; - } - byte = b'a'; - while byte <= b'z' { - set[byte as usize] = true; - byte += 1; - } - set - } - const WORD: [bool; 256] = mkwordset(); - WORD[b as usize] -} - -/// Decodes the next UTF-8 encoded codepoint from the given byte slice. -/// -/// If no valid encoding of a codepoint exists at the beginning of the given -/// byte slice, then the first byte is returned instead. -/// -/// This returns `None` if and only if `bytes` is empty. -/// -/// This never panics. -/// -/// *WARNING*: This is not designed for performance. If you're looking for a -/// fast UTF-8 decoder, this is not it. If you feel like you need one in this -/// crate, then please file an issue and discuss your use case. -#[cfg_attr(feature = "perf-inline", inline(always))] -pub(crate) fn decode(bytes: &[u8]) -> Option> { - if bytes.is_empty() { - return None; - } - let len = match len(bytes[0]) { - None => return Some(Err(bytes[0])), - Some(len) if len > bytes.len() => return Some(Err(bytes[0])), - Some(1) => return Some(Ok(char::from(bytes[0]))), - Some(len) => len, - }; - match core::str::from_utf8(&bytes[..len]) { - Ok(s) => Some(Ok(s.chars().next().unwrap())), - Err(_) => Some(Err(bytes[0])), - } -} - -/// Decodes the last UTF-8 encoded codepoint from the given byte slice. -/// -/// If no valid encoding of a codepoint exists at the end of the given byte -/// slice, then the last byte is returned instead. -/// -/// This returns `None` if and only if `bytes` is empty. -#[cfg_attr(feature = "perf-inline", inline(always))] -pub(crate) fn decode_last(bytes: &[u8]) -> Option> { - if bytes.is_empty() { - return None; - } - let mut start = bytes.len() - 1; - let limit = bytes.len().saturating_sub(4); - while start > limit && !is_leading_or_invalid_byte(bytes[start]) { - start -= 1; - } - match decode(&bytes[start..]) { - None => None, - Some(Ok(ch)) => Some(Ok(ch)), - Some(Err(_)) => Some(Err(bytes[bytes.len() - 1])), - } -} - -/// Given a UTF-8 leading byte, this returns the total number of code units -/// in the following encoded codepoint. -/// -/// If the given byte is not a valid UTF-8 leading byte, then this returns -/// `None`. -#[cfg_attr(feature = "perf-inline", inline(always))] -fn len(byte: u8) -> Option { - match byte { - 0b0000_0000..=0b0111_1111 => Some(1), - 0b1000_0000..=0b1011_1111 => None, - 0b1100_0000..=0b1101_1111 => Some(2), - 0b1110_0000..=0b1110_1111 => Some(3), - 0b1111_0000..=0b1111_0111 => Some(4), - _ => None, - } -} - -/// Returns true if and only if the given offset in the given bytes falls on a -/// valid UTF-8 encoded codepoint boundary. -/// -/// If `bytes` is not valid UTF-8, then the behavior of this routine is -/// unspecified. -#[cfg_attr(feature = "perf-inline", inline(always))] -pub(crate) fn is_boundary(bytes: &[u8], i: usize) -> bool { - match bytes.get(i) { - // The position at the end of the bytes always represents an empty - // string, which is a valid boundary. But anything after that doesn't - // make much sense to call valid a boundary. - None => i == bytes.len(), - // Other than ASCII (where the most significant bit is never set), - // valid starting bytes always have their most significant two bits - // set, where as continuation bytes never have their second most - // significant bit set. Therefore, this only returns true when bytes[i] - // corresponds to a byte that begins a valid UTF-8 encoding of a - // Unicode scalar value. - Some(&b) => b <= 0b0111_1111 || b >= 0b1100_0000, - } -} - -/// Returns true if and only if the given byte is either a valid leading UTF-8 -/// byte, or is otherwise an invalid byte that can never appear anywhere in a -/// valid UTF-8 sequence. -#[cfg_attr(feature = "perf-inline", inline(always))] -fn is_leading_or_invalid_byte(b: u8) -> bool { - // In the ASCII case, the most significant bit is never set. The leading - // byte of a 2/3/4-byte sequence always has the top two most significant - // bits set. For bytes that can never appear anywhere in valid UTF-8, this - // also returns true, since every such byte has its two most significant - // bits set: - // - // \xC0 :: 11000000 - // \xC1 :: 11000001 - // \xF5 :: 11110101 - // \xF6 :: 11110110 - // \xF7 :: 11110111 - // \xF8 :: 11111000 - // \xF9 :: 11111001 - // \xFA :: 11111010 - // \xFB :: 11111011 - // \xFC :: 11111100 - // \xFD :: 11111101 - // \xFE :: 11111110 - // \xFF :: 11111111 - (b & 0b1100_0000) != 0b1000_0000 -} - -/* -/// Returns the smallest possible index of the next valid UTF-8 sequence -/// starting after `i`. -/// -/// For all inputs, including invalid UTF-8 and any value of `i`, the return -/// value is guaranteed to be greater than `i`. (If there is no value greater -/// than `i` that fits in `usize`, then this panics.) -/// -/// Generally speaking, this should only be called on `text` when it is -/// permitted to assume that it is valid UTF-8 and where either `i >= -/// text.len()` or where `text[i]` is a leading byte of a UTF-8 sequence. -/// -/// NOTE: This method was used in a previous conception of iterators where we -/// specifically tried to skip over empty matches that split a codepoint by -/// simply requiring that our next search begin at the beginning of codepoint. -/// But we ended up changing that technique to always advance by 1 byte and -/// then filter out matches that split a codepoint after-the-fact. Thus, we no -/// longer use this method. But I've kept it around in case we want to switch -/// back to this approach. Its guarantees are a little subtle, so I'd prefer -/// not to rebuild it from whole cloth. -pub(crate) fn next(text: &[u8], i: usize) -> usize { - let b = match text.get(i) { - None => return i.checked_add(1).unwrap(), - Some(&b) => b, - }; - // For cases where we see an invalid UTF-8 byte, there isn't much we can do - // other than just start at the next byte. - let inc = len(b).unwrap_or(1); - i.checked_add(inc).unwrap() -} -*/ diff --git a/vendor/regex-automata/src/util/wire.rs b/vendor/regex-automata/src/util/wire.rs deleted file mode 100644 index 210ab6f4b24e17..00000000000000 --- a/vendor/regex-automata/src/util/wire.rs +++ /dev/null @@ -1,947 +0,0 @@ -/*! -Types and routines that support the wire format of finite automata. - -Currently, this module just exports a few error types and some small helpers -for deserializing [dense DFAs](crate::dfa::dense::DFA) using correct alignment. -*/ - -/* -A collection of helper functions, types and traits for serializing automata. - -This crate defines its own bespoke serialization mechanism for some structures -provided in the public API, namely, DFAs. A bespoke mechanism was developed -primarily because structures like automata demand a specific binary format. -Attempting to encode their rich structure in an existing serialization -format is just not feasible. Moreover, the format for each structure is -generally designed such that deserialization is cheap. More specifically, that -deserialization can be done in constant time. (The idea being that you can -embed it into your binary or mmap it, and then use it immediately.) - -In order to achieve this, the dense and sparse DFAs in this crate use an -in-memory representation that very closely corresponds to its binary serialized -form. This pervades and complicates everything, and in some cases, requires -dealing with alignment and reasoning about safety. - -This technique does have major advantages. In particular, it permits doing -the potentially costly work of compiling a finite state machine in an offline -manner, and then loading it at runtime not only without having to re-compile -the regex, but even without the code required to do the compilation. This, for -example, permits one to use a pre-compiled DFA not only in environments without -Rust's standard library, but also in environments without a heap. - -In the code below, whenever we insert some kind of padding, it's to enforce a -4-byte alignment, unless otherwise noted. Namely, u32 is the only state ID type -supported. (In a previous version of this library, DFAs were generic over the -state ID representation.) - -Also, serialization generally requires the caller to specify endianness, -where as deserialization always assumes native endianness (otherwise cheap -deserialization would be impossible). This implies that serializing a structure -generally requires serializing both its big-endian and little-endian variants, -and then loading the correct one based on the target's endianness. -*/ - -use core::{cmp, mem::size_of}; - -#[cfg(feature = "alloc")] -use alloc::{vec, vec::Vec}; - -use crate::util::{ - int::Pointer, - primitives::{PatternID, PatternIDError, StateID, StateIDError}, -}; - -/// A hack to align a smaller type `B` with a bigger type `T`. -/// -/// The usual use of this is with `B = [u8]` and `T = u32`. That is, -/// it permits aligning a sequence of bytes on a 4-byte boundary. This -/// is useful in contexts where one wants to embed a serialized [dense -/// DFA](crate::dfa::dense::DFA) into a Rust a program while guaranteeing the -/// alignment required for the DFA. -/// -/// See [`dense::DFA::from_bytes`](crate::dfa::dense::DFA::from_bytes) for an -/// example of how to use this type. -#[repr(C)] -#[derive(Debug)] -pub struct AlignAs { - /// A zero-sized field indicating the alignment we want. - pub _align: [T; 0], - /// A possibly non-sized field containing a sequence of bytes. - pub bytes: B, -} - -/// An error that occurs when serializing an object from this crate. -/// -/// Serialization, as used in this crate, universally refers to the process -/// of transforming a structure (like a DFA) into a custom binary format -/// represented by `&[u8]`. To this end, serialization is generally infallible. -/// However, it can fail when caller provided buffer sizes are too small. When -/// that occurs, a serialization error is reported. -/// -/// A `SerializeError` provides no introspection capabilities. Its only -/// supported operation is conversion to a human readable error message. -/// -/// This error type implements the `std::error::Error` trait only when the -/// `std` feature is enabled. Otherwise, this type is defined in all -/// configurations. -#[derive(Debug)] -pub struct SerializeError { - /// The name of the thing that a buffer is too small for. - /// - /// Currently, the only kind of serialization error is one that is - /// committed by a caller: providing a destination buffer that is too - /// small to fit the serialized object. This makes sense conceptually, - /// since every valid inhabitant of a type should be serializable. - /// - /// This is somewhat exposed in the public API of this crate. For example, - /// the `to_bytes_{big,little}_endian` APIs return a `Vec` and are - /// guaranteed to never panic or error. This is only possible because the - /// implementation guarantees that it will allocate a `Vec` that is - /// big enough. - /// - /// In summary, if a new serialization error kind needs to be added, then - /// it will need careful consideration. - what: &'static str, -} - -impl SerializeError { - pub(crate) fn buffer_too_small(what: &'static str) -> SerializeError { - SerializeError { what } - } -} - -impl core::fmt::Display for SerializeError { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "destination buffer is too small to write {}", self.what) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for SerializeError {} - -/// An error that occurs when deserializing an object defined in this crate. -/// -/// Serialization, as used in this crate, universally refers to the process -/// of transforming a structure (like a DFA) into a custom binary format -/// represented by `&[u8]`. Deserialization, then, refers to the process of -/// cheaply converting this binary format back to the object's in-memory -/// representation as defined in this crate. To the extent possible, -/// deserialization will report this error whenever this process fails. -/// -/// A `DeserializeError` provides no introspection capabilities. Its only -/// supported operation is conversion to a human readable error message. -/// -/// This error type implements the `std::error::Error` trait only when the -/// `std` feature is enabled. Otherwise, this type is defined in all -/// configurations. -#[derive(Debug)] -pub struct DeserializeError(DeserializeErrorKind); - -#[derive(Debug)] -enum DeserializeErrorKind { - Generic { msg: &'static str }, - BufferTooSmall { what: &'static str }, - InvalidUsize { what: &'static str }, - VersionMismatch { expected: u32, found: u32 }, - EndianMismatch { expected: u32, found: u32 }, - AlignmentMismatch { alignment: usize, address: usize }, - LabelMismatch { expected: &'static str }, - ArithmeticOverflow { what: &'static str }, - PatternID { err: PatternIDError, what: &'static str }, - StateID { err: StateIDError, what: &'static str }, -} - -impl DeserializeError { - pub(crate) fn generic(msg: &'static str) -> DeserializeError { - DeserializeError(DeserializeErrorKind::Generic { msg }) - } - - pub(crate) fn buffer_too_small(what: &'static str) -> DeserializeError { - DeserializeError(DeserializeErrorKind::BufferTooSmall { what }) - } - - fn invalid_usize(what: &'static str) -> DeserializeError { - DeserializeError(DeserializeErrorKind::InvalidUsize { what }) - } - - fn version_mismatch(expected: u32, found: u32) -> DeserializeError { - DeserializeError(DeserializeErrorKind::VersionMismatch { - expected, - found, - }) - } - - fn endian_mismatch(expected: u32, found: u32) -> DeserializeError { - DeserializeError(DeserializeErrorKind::EndianMismatch { - expected, - found, - }) - } - - fn alignment_mismatch( - alignment: usize, - address: usize, - ) -> DeserializeError { - DeserializeError(DeserializeErrorKind::AlignmentMismatch { - alignment, - address, - }) - } - - fn label_mismatch(expected: &'static str) -> DeserializeError { - DeserializeError(DeserializeErrorKind::LabelMismatch { expected }) - } - - fn arithmetic_overflow(what: &'static str) -> DeserializeError { - DeserializeError(DeserializeErrorKind::ArithmeticOverflow { what }) - } - - fn pattern_id_error( - err: PatternIDError, - what: &'static str, - ) -> DeserializeError { - DeserializeError(DeserializeErrorKind::PatternID { err, what }) - } - - pub(crate) fn state_id_error( - err: StateIDError, - what: &'static str, - ) -> DeserializeError { - DeserializeError(DeserializeErrorKind::StateID { err, what }) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for DeserializeError {} - -impl core::fmt::Display for DeserializeError { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - use self::DeserializeErrorKind::*; - - match self.0 { - Generic { msg } => write!(f, "{msg}"), - BufferTooSmall { what } => { - write!(f, "buffer is too small to read {what}") - } - InvalidUsize { what } => { - write!(f, "{what} is too big to fit in a usize") - } - VersionMismatch { expected, found } => write!( - f, - "unsupported version: \ - expected version {expected} but found version {found}", - ), - EndianMismatch { expected, found } => write!( - f, - "endianness mismatch: expected 0x{expected:X} but \ - got 0x{found:X}. (Are you trying to load an object \ - serialized with a different endianness?)", - ), - AlignmentMismatch { alignment, address } => write!( - f, - "alignment mismatch: slice starts at address 0x{address:X}, \ - which is not aligned to a {alignment} byte boundary", - ), - LabelMismatch { expected } => write!( - f, - "label mismatch: start of serialized object should \ - contain a NUL terminated {expected:?} label, but a different \ - label was found", - ), - ArithmeticOverflow { what } => { - write!(f, "arithmetic overflow for {what}") - } - PatternID { ref err, what } => { - write!(f, "failed to read pattern ID for {what}: {err}") - } - StateID { ref err, what } => { - write!(f, "failed to read state ID for {what}: {err}") - } - } - } -} - -/// Safely converts a `&[u32]` to `&[StateID]` with zero cost. -#[cfg_attr(feature = "perf-inline", inline(always))] -pub(crate) fn u32s_to_state_ids(slice: &[u32]) -> &[StateID] { - // SAFETY: This is safe because StateID is defined to have the same memory - // representation as a u32 (it is repr(transparent)). While not every u32 - // is a "valid" StateID, callers are not permitted to rely on the validity - // of StateIDs for memory safety. It can only lead to logical errors. (This - // is why StateID::new_unchecked is safe.) - unsafe { - core::slice::from_raw_parts( - slice.as_ptr().cast::(), - slice.len(), - ) - } -} - -/// Safely converts a `&mut [u32]` to `&mut [StateID]` with zero cost. -pub(crate) fn u32s_to_state_ids_mut(slice: &mut [u32]) -> &mut [StateID] { - // SAFETY: This is safe because StateID is defined to have the same memory - // representation as a u32 (it is repr(transparent)). While not every u32 - // is a "valid" StateID, callers are not permitted to rely on the validity - // of StateIDs for memory safety. It can only lead to logical errors. (This - // is why StateID::new_unchecked is safe.) - unsafe { - core::slice::from_raw_parts_mut( - slice.as_mut_ptr().cast::(), - slice.len(), - ) - } -} - -/// Safely converts a `&[u32]` to `&[PatternID]` with zero cost. -#[cfg_attr(feature = "perf-inline", inline(always))] -pub(crate) fn u32s_to_pattern_ids(slice: &[u32]) -> &[PatternID] { - // SAFETY: This is safe because PatternID is defined to have the same - // memory representation as a u32 (it is repr(transparent)). While not - // every u32 is a "valid" PatternID, callers are not permitted to rely - // on the validity of PatternIDs for memory safety. It can only lead to - // logical errors. (This is why PatternID::new_unchecked is safe.) - unsafe { - core::slice::from_raw_parts( - slice.as_ptr().cast::(), - slice.len(), - ) - } -} - -/// Checks that the given slice has an alignment that matches `T`. -/// -/// This is useful for checking that a slice has an appropriate alignment -/// before casting it to a &[T]. Note though that alignment is not itself -/// sufficient to perform the cast for any `T`. -pub(crate) fn check_alignment( - slice: &[u8], -) -> Result<(), DeserializeError> { - let alignment = core::mem::align_of::(); - let address = slice.as_ptr().as_usize(); - if address % alignment == 0 { - return Ok(()); - } - Err(DeserializeError::alignment_mismatch(alignment, address)) -} - -/// Reads a possibly empty amount of padding, up to 7 bytes, from the beginning -/// of the given slice. All padding bytes must be NUL bytes. -/// -/// This is useful because it can be theoretically necessary to pad the -/// beginning of a serialized object with NUL bytes to ensure that it starts -/// at a correctly aligned address. These padding bytes should come immediately -/// before the label. -/// -/// This returns the number of bytes read from the given slice. -pub(crate) fn skip_initial_padding(slice: &[u8]) -> usize { - let mut nread = 0; - while nread < 7 && nread < slice.len() && slice[nread] == 0 { - nread += 1; - } - nread -} - -/// Allocate a byte buffer of the given size, along with some initial padding -/// such that `buf[padding..]` has the same alignment as `T`, where the -/// alignment of `T` must be at most `8`. In particular, callers should treat -/// the first N bytes (second return value) as padding bytes that must not be -/// overwritten. In all cases, the following identity holds: -/// -/// ```ignore -/// let (buf, padding) = alloc_aligned_buffer::(SIZE); -/// assert_eq!(SIZE, buf[padding..].len()); -/// ``` -/// -/// In practice, padding is often zero. -/// -/// The requirement for `8` as a maximum here is somewhat arbitrary. In -/// practice, we never need anything bigger in this crate, and so this function -/// does some sanity asserts under the assumption of a max alignment of `8`. -#[cfg(feature = "alloc")] -pub(crate) fn alloc_aligned_buffer(size: usize) -> (Vec, usize) { - // NOTE: This is a kludge because there's no easy way to allocate a Vec - // with an alignment guaranteed to be greater than 1. We could create a - // Vec, but this cannot be safely transmuted to a Vec without - // concern, since reallocing or dropping the Vec is UB (different - // alignment than the initial allocation). We could define a wrapper type - // to manage this for us, but it seems like more machinery than it's worth. - let buf = vec![0; size]; - let align = core::mem::align_of::(); - let address = buf.as_ptr().as_usize(); - if address % align == 0 { - return (buf, 0); - } - // Let's try this again. We have to create a totally new alloc with - // the maximum amount of bytes we might need. We can't just extend our - // pre-existing 'buf' because that might create a new alloc with a - // different alignment. - let extra = align - 1; - let mut buf = vec![0; size + extra]; - let address = buf.as_ptr().as_usize(); - // The code below handles the case where 'address' is aligned to T, so if - // we got lucky and 'address' is now aligned to T (when it previously - // wasn't), then we're done. - if address % align == 0 { - buf.truncate(size); - return (buf, 0); - } - let padding = ((address & !(align - 1)).checked_add(align).unwrap()) - .checked_sub(address) - .unwrap(); - assert!(padding <= 7, "padding of {padding} is bigger than 7"); - assert!( - padding <= extra, - "padding of {padding} is bigger than extra {extra} bytes", - ); - buf.truncate(size + padding); - assert_eq!(size + padding, buf.len()); - assert_eq!( - 0, - buf[padding..].as_ptr().as_usize() % align, - "expected end of initial padding to be aligned to {align}", - ); - (buf, padding) -} - -/// Reads a NUL terminated label starting at the beginning of the given slice. -/// -/// If a NUL terminated label could not be found, then an error is returned. -/// Similarly, if a label is found but doesn't match the expected label, then -/// an error is returned. -/// -/// Upon success, the total number of bytes read (including padding bytes) is -/// returned. -pub(crate) fn read_label( - slice: &[u8], - expected_label: &'static str, -) -> Result { - // Set an upper bound on how many bytes we scan for a NUL. Since no label - // in this crate is longer than 256 bytes, if we can't find one within that - // range, then we have corrupted data. - let first_nul = - slice[..cmp::min(slice.len(), 256)].iter().position(|&b| b == 0); - let first_nul = match first_nul { - Some(first_nul) => first_nul, - None => { - return Err(DeserializeError::generic( - "could not find NUL terminated label \ - at start of serialized object", - )); - } - }; - let len = first_nul + padding_len(first_nul); - if slice.len() < len { - return Err(DeserializeError::generic( - "could not find properly sized label at start of serialized object" - )); - } - if expected_label.as_bytes() != &slice[..first_nul] { - return Err(DeserializeError::label_mismatch(expected_label)); - } - Ok(len) -} - -/// Writes the given label to the buffer as a NUL terminated string. The label -/// given must not contain NUL, otherwise this will panic. Similarly, the label -/// must not be longer than 255 bytes, otherwise this will panic. -/// -/// Additional NUL bytes are written as necessary to ensure that the number of -/// bytes written is always a multiple of 4. -/// -/// Upon success, the total number of bytes written (including padding) is -/// returned. -pub(crate) fn write_label( - label: &str, - dst: &mut [u8], -) -> Result { - let nwrite = write_label_len(label); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small("label")); - } - dst[..label.len()].copy_from_slice(label.as_bytes()); - for i in 0..(nwrite - label.len()) { - dst[label.len() + i] = 0; - } - assert_eq!(nwrite % 4, 0); - Ok(nwrite) -} - -/// Returns the total number of bytes (including padding) that would be written -/// for the given label. This panics if the given label contains a NUL byte or -/// is longer than 255 bytes. (The size restriction exists so that searching -/// for a label during deserialization can be done in small bounded space.) -pub(crate) fn write_label_len(label: &str) -> usize { - assert!(label.len() <= 255, "label must not be longer than 255 bytes"); - assert!(label.bytes().all(|b| b != 0), "label must not contain NUL bytes"); - let label_len = label.len() + 1; // +1 for the NUL terminator - label_len + padding_len(label_len) -} - -/// Reads the endianness check from the beginning of the given slice and -/// confirms that the endianness of the serialized object matches the expected -/// endianness. If the slice is too small or if the endianness check fails, -/// this returns an error. -/// -/// Upon success, the total number of bytes read is returned. -pub(crate) fn read_endianness_check( - slice: &[u8], -) -> Result { - let (n, nr) = try_read_u32(slice, "endianness check")?; - assert_eq!(nr, write_endianness_check_len()); - if n != 0xFEFF { - return Err(DeserializeError::endian_mismatch(0xFEFF, n)); - } - Ok(nr) -} - -/// Writes 0xFEFF as an integer using the given endianness. -/// -/// This is useful for writing into the header of a serialized object. It can -/// be read during deserialization as a sanity check to ensure the proper -/// endianness is used. -/// -/// Upon success, the total number of bytes written is returned. -pub(crate) fn write_endianness_check( - dst: &mut [u8], -) -> Result { - let nwrite = write_endianness_check_len(); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small("endianness check")); - } - E::write_u32(0xFEFF, dst); - Ok(nwrite) -} - -/// Returns the number of bytes written by the endianness check. -pub(crate) fn write_endianness_check_len() -> usize { - size_of::() -} - -/// Reads a version number from the beginning of the given slice and confirms -/// that is matches the expected version number given. If the slice is too -/// small or if the version numbers aren't equivalent, this returns an error. -/// -/// Upon success, the total number of bytes read is returned. -/// -/// N.B. Currently, we require that the version number is exactly equivalent. -/// In the future, if we bump the version number without a semver bump, then -/// we'll need to relax this a bit and support older versions. -pub(crate) fn read_version( - slice: &[u8], - expected_version: u32, -) -> Result { - let (n, nr) = try_read_u32(slice, "version")?; - assert_eq!(nr, write_version_len()); - if n != expected_version { - return Err(DeserializeError::version_mismatch(expected_version, n)); - } - Ok(nr) -} - -/// Writes the given version number to the beginning of the given slice. -/// -/// This is useful for writing into the header of a serialized object. It can -/// be read during deserialization as a sanity check to ensure that the library -/// code supports the format of the serialized object. -/// -/// Upon success, the total number of bytes written is returned. -pub(crate) fn write_version( - version: u32, - dst: &mut [u8], -) -> Result { - let nwrite = write_version_len(); - if dst.len() < nwrite { - return Err(SerializeError::buffer_too_small("version number")); - } - E::write_u32(version, dst); - Ok(nwrite) -} - -/// Returns the number of bytes written by writing the version number. -pub(crate) fn write_version_len() -> usize { - size_of::() -} - -/// Reads a pattern ID from the given slice. If the slice has insufficient -/// length, then this panics. If the deserialized integer exceeds the pattern -/// ID limit for the current target, then this returns an error. -/// -/// Upon success, this also returns the number of bytes read. -pub(crate) fn read_pattern_id( - slice: &[u8], - what: &'static str, -) -> Result<(PatternID, usize), DeserializeError> { - let bytes: [u8; PatternID::SIZE] = - slice[..PatternID::SIZE].try_into().unwrap(); - let pid = PatternID::from_ne_bytes(bytes) - .map_err(|err| DeserializeError::pattern_id_error(err, what))?; - Ok((pid, PatternID::SIZE)) -} - -/// Reads a pattern ID from the given slice. If the slice has insufficient -/// length, then this panics. Otherwise, the deserialized integer is assumed -/// to be a valid pattern ID. -/// -/// This also returns the number of bytes read. -pub(crate) fn read_pattern_id_unchecked(slice: &[u8]) -> (PatternID, usize) { - let pid = PatternID::from_ne_bytes_unchecked( - slice[..PatternID::SIZE].try_into().unwrap(), - ); - (pid, PatternID::SIZE) -} - -/// Write the given pattern ID to the beginning of the given slice of bytes -/// using the specified endianness. The given slice must have length at least -/// `PatternID::SIZE`, or else this panics. Upon success, the total number of -/// bytes written is returned. -pub(crate) fn write_pattern_id( - pid: PatternID, - dst: &mut [u8], -) -> usize { - E::write_u32(pid.as_u32(), dst); - PatternID::SIZE -} - -/// Attempts to read a state ID from the given slice. If the slice has an -/// insufficient number of bytes or if the state ID exceeds the limit for -/// the current target, then this returns an error. -/// -/// Upon success, this also returns the number of bytes read. -pub(crate) fn try_read_state_id( - slice: &[u8], - what: &'static str, -) -> Result<(StateID, usize), DeserializeError> { - if slice.len() < StateID::SIZE { - return Err(DeserializeError::buffer_too_small(what)); - } - read_state_id(slice, what) -} - -/// Reads a state ID from the given slice. If the slice has insufficient -/// length, then this panics. If the deserialized integer exceeds the state ID -/// limit for the current target, then this returns an error. -/// -/// Upon success, this also returns the number of bytes read. -pub(crate) fn read_state_id( - slice: &[u8], - what: &'static str, -) -> Result<(StateID, usize), DeserializeError> { - let bytes: [u8; StateID::SIZE] = - slice[..StateID::SIZE].try_into().unwrap(); - let sid = StateID::from_ne_bytes(bytes) - .map_err(|err| DeserializeError::state_id_error(err, what))?; - Ok((sid, StateID::SIZE)) -} - -/// Reads a state ID from the given slice. If the slice has insufficient -/// length, then this panics. Otherwise, the deserialized integer is assumed -/// to be a valid state ID. -/// -/// This also returns the number of bytes read. -pub(crate) fn read_state_id_unchecked(slice: &[u8]) -> (StateID, usize) { - let sid = StateID::from_ne_bytes_unchecked( - slice[..StateID::SIZE].try_into().unwrap(), - ); - (sid, StateID::SIZE) -} - -/// Write the given state ID to the beginning of the given slice of bytes -/// using the specified endianness. The given slice must have length at least -/// `StateID::SIZE`, or else this panics. Upon success, the total number of -/// bytes written is returned. -pub(crate) fn write_state_id( - sid: StateID, - dst: &mut [u8], -) -> usize { - E::write_u32(sid.as_u32(), dst); - StateID::SIZE -} - -/// Try to read a u16 as a usize from the beginning of the given slice in -/// native endian format. If the slice has fewer than 2 bytes or if the -/// deserialized number cannot be represented by usize, then this returns an -/// error. The error message will include the `what` description of what is -/// being deserialized, for better error messages. `what` should be a noun in -/// singular form. -/// -/// Upon success, this also returns the number of bytes read. -pub(crate) fn try_read_u16_as_usize( - slice: &[u8], - what: &'static str, -) -> Result<(usize, usize), DeserializeError> { - try_read_u16(slice, what).and_then(|(n, nr)| { - usize::try_from(n) - .map(|n| (n, nr)) - .map_err(|_| DeserializeError::invalid_usize(what)) - }) -} - -/// Try to read a u32 as a usize from the beginning of the given slice in -/// native endian format. If the slice has fewer than 4 bytes or if the -/// deserialized number cannot be represented by usize, then this returns an -/// error. The error message will include the `what` description of what is -/// being deserialized, for better error messages. `what` should be a noun in -/// singular form. -/// -/// Upon success, this also returns the number of bytes read. -pub(crate) fn try_read_u32_as_usize( - slice: &[u8], - what: &'static str, -) -> Result<(usize, usize), DeserializeError> { - try_read_u32(slice, what).and_then(|(n, nr)| { - usize::try_from(n) - .map(|n| (n, nr)) - .map_err(|_| DeserializeError::invalid_usize(what)) - }) -} - -/// Try to read a u16 from the beginning of the given slice in native endian -/// format. If the slice has fewer than 2 bytes, then this returns an error. -/// The error message will include the `what` description of what is being -/// deserialized, for better error messages. `what` should be a noun in -/// singular form. -/// -/// Upon success, this also returns the number of bytes read. -pub(crate) fn try_read_u16( - slice: &[u8], - what: &'static str, -) -> Result<(u16, usize), DeserializeError> { - check_slice_len(slice, size_of::(), what)?; - Ok((read_u16(slice), size_of::())) -} - -/// Try to read a u32 from the beginning of the given slice in native endian -/// format. If the slice has fewer than 4 bytes, then this returns an error. -/// The error message will include the `what` description of what is being -/// deserialized, for better error messages. `what` should be a noun in -/// singular form. -/// -/// Upon success, this also returns the number of bytes read. -pub(crate) fn try_read_u32( - slice: &[u8], - what: &'static str, -) -> Result<(u32, usize), DeserializeError> { - check_slice_len(slice, size_of::(), what)?; - Ok((read_u32(slice), size_of::())) -} - -/// Try to read a u128 from the beginning of the given slice in native endian -/// format. If the slice has fewer than 16 bytes, then this returns an error. -/// The error message will include the `what` description of what is being -/// deserialized, for better error messages. `what` should be a noun in -/// singular form. -/// -/// Upon success, this also returns the number of bytes read. -pub(crate) fn try_read_u128( - slice: &[u8], - what: &'static str, -) -> Result<(u128, usize), DeserializeError> { - check_slice_len(slice, size_of::(), what)?; - Ok((read_u128(slice), size_of::())) -} - -/// Read a u16 from the beginning of the given slice in native endian format. -/// If the slice has fewer than 2 bytes, then this panics. -/// -/// Marked as inline to speed up sparse searching which decodes integers from -/// its automaton at search time. -#[cfg_attr(feature = "perf-inline", inline(always))] -pub(crate) fn read_u16(slice: &[u8]) -> u16 { - let bytes: [u8; 2] = slice[..size_of::()].try_into().unwrap(); - u16::from_ne_bytes(bytes) -} - -/// Read a u32 from the beginning of the given slice in native endian format. -/// If the slice has fewer than 4 bytes, then this panics. -/// -/// Marked as inline to speed up sparse searching which decodes integers from -/// its automaton at search time. -#[cfg_attr(feature = "perf-inline", inline(always))] -pub(crate) fn read_u32(slice: &[u8]) -> u32 { - let bytes: [u8; 4] = slice[..size_of::()].try_into().unwrap(); - u32::from_ne_bytes(bytes) -} - -/// Read a u128 from the beginning of the given slice in native endian format. -/// If the slice has fewer than 16 bytes, then this panics. -pub(crate) fn read_u128(slice: &[u8]) -> u128 { - let bytes: [u8; 16] = slice[..size_of::()].try_into().unwrap(); - u128::from_ne_bytes(bytes) -} - -/// Checks that the given slice has some minimal length. If it's smaller than -/// the bound given, then a "buffer too small" error is returned with `what` -/// describing what the buffer represents. -pub(crate) fn check_slice_len( - slice: &[T], - at_least_len: usize, - what: &'static str, -) -> Result<(), DeserializeError> { - if slice.len() < at_least_len { - return Err(DeserializeError::buffer_too_small(what)); - } - Ok(()) -} - -/// Multiply the given numbers, and on overflow, return an error that includes -/// 'what' in the error message. -/// -/// This is useful when doing arithmetic with untrusted data. -pub(crate) fn mul( - a: usize, - b: usize, - what: &'static str, -) -> Result { - match a.checked_mul(b) { - Some(c) => Ok(c), - None => Err(DeserializeError::arithmetic_overflow(what)), - } -} - -/// Add the given numbers, and on overflow, return an error that includes -/// 'what' in the error message. -/// -/// This is useful when doing arithmetic with untrusted data. -pub(crate) fn add( - a: usize, - b: usize, - what: &'static str, -) -> Result { - match a.checked_add(b) { - Some(c) => Ok(c), - None => Err(DeserializeError::arithmetic_overflow(what)), - } -} - -/// Shift `a` left by `b`, and on overflow, return an error that includes -/// 'what' in the error message. -/// -/// This is useful when doing arithmetic with untrusted data. -pub(crate) fn shl( - a: usize, - b: usize, - what: &'static str, -) -> Result { - let amount = u32::try_from(b) - .map_err(|_| DeserializeError::arithmetic_overflow(what))?; - match a.checked_shl(amount) { - Some(c) => Ok(c), - None => Err(DeserializeError::arithmetic_overflow(what)), - } -} - -/// Returns the number of additional bytes required to add to the given length -/// in order to make the total length a multiple of 4. The return value is -/// always less than 4. -pub(crate) fn padding_len(non_padding_len: usize) -> usize { - (4 - (non_padding_len & 0b11)) & 0b11 -} - -/// A simple trait for writing code generic over endianness. -/// -/// This is similar to what byteorder provides, but we only need a very small -/// subset. -pub(crate) trait Endian { - /// Writes a u16 to the given destination buffer in a particular - /// endianness. If the destination buffer has a length smaller than 2, then - /// this panics. - fn write_u16(n: u16, dst: &mut [u8]); - - /// Writes a u32 to the given destination buffer in a particular - /// endianness. If the destination buffer has a length smaller than 4, then - /// this panics. - fn write_u32(n: u32, dst: &mut [u8]); - - /// Writes a u128 to the given destination buffer in a particular - /// endianness. If the destination buffer has a length smaller than 16, - /// then this panics. - fn write_u128(n: u128, dst: &mut [u8]); -} - -/// Little endian writing. -pub(crate) enum LE {} -/// Big endian writing. -pub(crate) enum BE {} - -#[cfg(target_endian = "little")] -pub(crate) type NE = LE; -#[cfg(target_endian = "big")] -pub(crate) type NE = BE; - -impl Endian for LE { - fn write_u16(n: u16, dst: &mut [u8]) { - dst[..2].copy_from_slice(&n.to_le_bytes()); - } - - fn write_u32(n: u32, dst: &mut [u8]) { - dst[..4].copy_from_slice(&n.to_le_bytes()); - } - - fn write_u128(n: u128, dst: &mut [u8]) { - dst[..16].copy_from_slice(&n.to_le_bytes()); - } -} - -impl Endian for BE { - fn write_u16(n: u16, dst: &mut [u8]) { - dst[..2].copy_from_slice(&n.to_be_bytes()); - } - - fn write_u32(n: u32, dst: &mut [u8]) { - dst[..4].copy_from_slice(&n.to_be_bytes()); - } - - fn write_u128(n: u128, dst: &mut [u8]) { - dst[..16].copy_from_slice(&n.to_be_bytes()); - } -} - -#[cfg(all(test, feature = "alloc"))] -mod tests { - use super::*; - - #[test] - fn labels() { - let mut buf = [0; 1024]; - - let nwrite = write_label("fooba", &mut buf).unwrap(); - assert_eq!(nwrite, 8); - assert_eq!(&buf[..nwrite], b"fooba\x00\x00\x00"); - - let nread = read_label(&buf, "fooba").unwrap(); - assert_eq!(nread, 8); - } - - #[test] - #[should_panic] - fn bad_label_interior_nul() { - // interior NULs are not allowed - write_label("foo\x00bar", &mut [0; 1024]).unwrap(); - } - - #[test] - fn bad_label_almost_too_long() { - // ok - write_label(&"z".repeat(255), &mut [0; 1024]).unwrap(); - } - - #[test] - #[should_panic] - fn bad_label_too_long() { - // labels longer than 255 bytes are banned - write_label(&"z".repeat(256), &mut [0; 1024]).unwrap(); - } - - #[test] - fn padding() { - assert_eq!(0, padding_len(8)); - assert_eq!(3, padding_len(9)); - assert_eq!(2, padding_len(10)); - assert_eq!(1, padding_len(11)); - assert_eq!(0, padding_len(12)); - assert_eq!(3, padding_len(13)); - assert_eq!(2, padding_len(14)); - assert_eq!(1, padding_len(15)); - assert_eq!(0, padding_len(16)); - } -} diff --git a/vendor/regex-automata/test b/vendor/regex-automata/test deleted file mode 100755 index df3e5ae98dea47..00000000000000 --- a/vendor/regex-automata/test +++ /dev/null @@ -1,95 +0,0 @@ -#!/bin/bash - -# This is a script that attempts to *approximately* exhaustively run the test -# suite for regex-automata. The main reason for why 'cargo test' isn't enough -# is because of crate features. regex-automata has a ton of them. This script -# tests many of those feature combinations (although not all) to try to get -# decent coverage in a finite amount of time. - -set -e - -# cd to the directory containing this crate's Cargo.toml so that we don't need -# to pass --manifest-path to every `cargo` command. -cd "$(dirname "$0")" - -echo "===== ALL FEATURES TEST ===" -cargo test --all-features - -# Man I don't *want* to have this many crate features, but... I really want -# folks to be able to slim the crate down to just the things they want. But -# the main downside is that I just can't feasibly test every combination of -# features because there are too many of them. Sad, but I'm not sure if there -# is a better alternative. -features=( - "" - "unicode-word-boundary" - "unicode-word-boundary,syntax,unicode-perl" - "unicode-word-boundary,syntax,dfa-build" - "nfa" - "dfa" - "hybrid" - "nfa,dfa" - "nfa,hybrid" - "dfa,hybrid" - "dfa-onepass" - "nfa-pikevm" - "nfa-backtrack" - "std" - "alloc" - "syntax" - "syntax,nfa-pikevm" - "syntax,hybrid" - "perf-literal-substring" - "perf-literal-multisubstring" - "meta" - "meta,nfa-backtrack" - "meta,hybrid" - "meta,dfa-build" - "meta,dfa-onepass" - "meta,nfa,dfa,hybrid,nfa-backtrack" - "meta,nfa,dfa,hybrid,nfa-backtrack,perf-literal-substring" - "meta,nfa,dfa,hybrid,nfa-backtrack,perf-literal-multisubstring" -) -for f in "${features[@]}"; do - echo "===== LIB FEATURES: $f ===" - # It's actually important to do a standard 'cargo build' in addition to a - # 'cargo test'. In particular, in the latter case, the dev-dependencies may - # wind up enabling features in dependencies (like memchr) that make it look - # like everything is well, but actually isn't. For example, the 'regex-test' - # dev-dependency uses 'bstr' and enables its 'std' feature, which in turn - # unconditionally enables 'memchr's 'std' feature. Since we're specifically - # looking to test that certain feature combinations work as expected, this - # can lead to things testing okay, but would actually fail to build. Yikes. - cargo build --no-default-features --lib --features "$f" - cargo test --no-default-features --lib --features "$f" -done - -# We can also run the integration test suite on stripped down features too. -# But the test suite doesn't do well with things like 'std' and 'unicode' -# disabled, so we always enable them. -features=( - "std,unicode,syntax,nfa-pikevm" - "std,unicode,syntax,nfa-backtrack" - "std,unicode,syntax,hybrid" - "std,unicode,syntax,dfa-onepass" - "std,unicode,syntax,dfa-search" - "std,unicode,syntax,dfa-build" - "std,unicode,meta" - # This one is a little tricky because it causes the backtracker to get used - # in more instances and results in failing tests for the 'earliest' tests. - # The actual results are semantically consistent with the API guarantee - # (the backtracker tends to report greater offsets because it isn't an FSM), - # but our tests are less flexible than the API guarantee and demand offsets - # reported by FSM regex engines. (Which is... all of them except for the - # backtracker.) - # "std,unicode,meta,nfa-backtrack" - "std,unicode,meta,hybrid" - "std,unicode,meta,dfa-onepass" - "std,unicode,meta,dfa-build" - "std,unicode,meta,nfa,dfa-onepass,hybrid" -) -for f in "${features[@]}"; do - echo "===== INTEGRATION FEATURES: $f ===" - cargo build --no-default-features --lib --features "$f" - cargo test --no-default-features --test integration --features "$f" -done diff --git a/vendor/regex-automata/tests/dfa/api.rs b/vendor/regex-automata/tests/dfa/api.rs deleted file mode 100644 index 8a015ad0fb8852..00000000000000 --- a/vendor/regex-automata/tests/dfa/api.rs +++ /dev/null @@ -1,162 +0,0 @@ -use std::error::Error; - -use regex_automata::{ - dfa::{dense, Automaton, OverlappingState}, - nfa::thompson, - Anchored, HalfMatch, Input, MatchError, -}; - -// Tests that quit bytes in the forward direction work correctly. -#[test] -fn quit_fwd() -> Result<(), Box> { - let dfa = dense::Builder::new() - .configure(dense::Config::new().quit(b'x', true)) - .build("[[:word:]]+$")?; - - assert_eq!( - Err(MatchError::quit(b'x', 3)), - dfa.try_search_fwd(&Input::new(b"abcxyz")) - ); - assert_eq!( - dfa.try_search_overlapping_fwd( - &Input::new(b"abcxyz"), - &mut OverlappingState::start() - ), - Err(MatchError::quit(b'x', 3)), - ); - - Ok(()) -} - -// Tests that quit bytes in the reverse direction work correctly. -#[test] -fn quit_rev() -> Result<(), Box> { - let dfa = dense::Builder::new() - .configure(dense::Config::new().quit(b'x', true)) - .thompson(thompson::Config::new().reverse(true)) - .build("^[[:word:]]+")?; - - assert_eq!( - Err(MatchError::quit(b'x', 3)), - dfa.try_search_rev(&Input::new(b"abcxyz")) - ); - - Ok(()) -} - -// Tests that if we heuristically enable Unicode word boundaries but then -// instruct that a non-ASCII byte should NOT be a quit byte, then the builder -// will panic. -#[test] -#[should_panic] -fn quit_panics() { - dense::Config::new().unicode_word_boundary(true).quit(b'\xFF', false); -} - -// This tests an intesting case where even if the Unicode word boundary option -// is disabled, setting all non-ASCII bytes to be quit bytes will cause Unicode -// word boundaries to be enabled. -#[test] -fn unicode_word_implicitly_works() -> Result<(), Box> { - let mut config = dense::Config::new(); - for b in 0x80..=0xFF { - config = config.quit(b, true); - } - let dfa = dense::Builder::new().configure(config).build(r"\b")?; - let expected = HalfMatch::must(0, 1); - assert_eq!(Ok(Some(expected)), dfa.try_search_fwd(&Input::new(b" a"))); - Ok(()) -} - -// A variant of [`Automaton::is_special_state`]'s doctest, but with universal -// start states. -// -// See: https://github.com/rust-lang/regex/pull/1195 -#[test] -fn universal_start_search() -> Result<(), Box> { - fn find( - dfa: &A, - haystack: &[u8], - ) -> Result, MatchError> { - let mut state = dfa - .universal_start_state(Anchored::No) - .expect("regex should not require lookbehind"); - let mut last_match = None; - // Walk all the bytes in the haystack. We can quit early if we see - // a dead or a quit state. The former means the automaton will - // never transition to any other state. The latter means that the - // automaton entered a condition in which its search failed. - for (i, &b) in haystack.iter().enumerate() { - state = dfa.next_state(state, b); - if dfa.is_special_state(state) { - if dfa.is_match_state(state) { - last_match = - Some(HalfMatch::new(dfa.match_pattern(state, 0), i)); - } else if dfa.is_dead_state(state) { - return Ok(last_match); - } else if dfa.is_quit_state(state) { - // It is possible to enter into a quit state after - // observing a match has occurred. In that case, we - // should return the match instead of an error. - if last_match.is_some() { - return Ok(last_match); - } - return Err(MatchError::quit(b, i)); - } - // Implementors may also want to check for start or accel - // states and handle them differently for performance - // reasons. But it is not necessary for correctness. - } - } - // Matches are always delayed by 1 byte, so we must explicitly walk - // the special "EOI" transition at the end of the search. - state = dfa.next_eoi_state(state); - if dfa.is_match_state(state) { - last_match = Some(HalfMatch::new( - dfa.match_pattern(state, 0), - haystack.len(), - )); - } - Ok(last_match) - } - - fn check_impl( - dfa: impl Automaton, - haystack: &str, - pat: usize, - offset: usize, - ) -> Result<(), Box> { - let haystack = haystack.as_bytes(); - let mat = find(&dfa, haystack)?.unwrap(); - assert_eq!(mat.pattern().as_usize(), pat); - assert_eq!(mat.offset(), offset); - Ok(()) - } - - fn check( - dfa: &dense::DFA>, - haystack: &str, - pat: usize, - offset: usize, - ) -> Result<(), Box> { - check_impl(dfa, haystack, pat, offset)?; - check_impl(dfa.to_sparse()?, haystack, pat, offset)?; - Ok(()) - } - - let dfa = dense::DFA::new(r"[a-z]+")?; - let haystack = "123 foobar 4567"; - check(&dfa, haystack, 0, 10)?; - - let dfa = dense::DFA::new(r"[0-9]{4}")?; - let haystack = "123 foobar 4567"; - check(&dfa, haystack, 0, 15)?; - - let dfa = dense::DFA::new_many(&[r"[a-z]+", r"[0-9]+"])?; - let haystack = "123 foobar 4567"; - check(&dfa, haystack, 1, 3)?; - check(&dfa, &haystack[3..], 0, 7)?; - check(&dfa, &haystack[10..], 1, 5)?; - - Ok(()) -} diff --git a/vendor/regex-automata/tests/dfa/mod.rs b/vendor/regex-automata/tests/dfa/mod.rs deleted file mode 100644 index 0d8f539db63938..00000000000000 --- a/vendor/regex-automata/tests/dfa/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -#[cfg(all(feature = "dfa-build", feature = "dfa-search"))] -mod api; -#[cfg(feature = "dfa-onepass")] -mod onepass; -#[cfg(all(feature = "dfa-build", feature = "dfa-search"))] -mod regression; -#[cfg(all(not(miri), feature = "dfa-build", feature = "dfa-search"))] -mod suite; diff --git a/vendor/regex-automata/tests/dfa/onepass/mod.rs b/vendor/regex-automata/tests/dfa/onepass/mod.rs deleted file mode 100644 index 9d6ab475efef12..00000000000000 --- a/vendor/regex-automata/tests/dfa/onepass/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -#[cfg(not(miri))] -mod suite; diff --git a/vendor/regex-automata/tests/dfa/onepass/suite.rs b/vendor/regex-automata/tests/dfa/onepass/suite.rs deleted file mode 100644 index aba46c86d1bf55..00000000000000 --- a/vendor/regex-automata/tests/dfa/onepass/suite.rs +++ /dev/null @@ -1,197 +0,0 @@ -use { - anyhow::Result, - regex_automata::{ - dfa::onepass::{self, DFA}, - nfa::thompson, - util::{iter, syntax}, - }, - regex_test::{ - CompiledRegex, Match, RegexTest, SearchKind, Span, TestResult, - TestRunner, - }, -}; - -use crate::{create_input, suite, testify_captures, untestify_kind}; - -const EXPANSIONS: &[&str] = &["is_match", "find", "captures"]; - -/// Tests the default configuration of the hybrid NFA/DFA. -#[test] -fn default() -> Result<()> { - let builder = DFA::builder(); - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .test_iter(suite()?.iter(), compiler(builder)) - .assert(); - Ok(()) -} - -/// Tests the hybrid NFA/DFA when 'starts_for_each_pattern' is enabled for all -/// tests. -#[test] -fn starts_for_each_pattern() -> Result<()> { - let mut builder = DFA::builder(); - builder.configure(DFA::config().starts_for_each_pattern(true)); - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .test_iter(suite()?.iter(), compiler(builder)) - .assert(); - Ok(()) -} - -/// Tests the hybrid NFA/DFA when byte classes are disabled. -/// -/// N.B. Disabling byte classes doesn't avoid any indirection at search time. -/// All it does is cause every byte value to be its own distinct equivalence -/// class. -#[test] -fn no_byte_classes() -> Result<()> { - let mut builder = DFA::builder(); - builder.configure(DFA::config().byte_classes(false)); - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .test_iter(suite()?.iter(), compiler(builder)) - .assert(); - Ok(()) -} - -fn compiler( - mut builder: onepass::Builder, -) -> impl FnMut(&RegexTest, &[String]) -> Result { - move |test, regexes| { - // Check if our regex contains things that aren't supported by DFAs. - // That is, Unicode word boundaries when searching non-ASCII text. - if !configure_onepass_builder(test, &mut builder) { - return Ok(CompiledRegex::skip()); - } - let re = match builder.build_many(®exes) { - Ok(re) => re, - Err(err) => { - let msg = err.to_string(); - // This is pretty gross, but when a regex fails to compile as - // a one-pass regex, then we want to be OK with that and just - // skip the test. But we have to be careful to only skip it - // when the expected result is that the regex compiles. If - // the test is specifically checking that the regex does not - // compile, then we should bubble up that error and allow the - // test to pass. - // - // Since our error types are all generally opaque, we just - // look for an error string. Not great, but not the end of the - // world. - if test.compiles() && msg.contains("not one-pass") { - return Ok(CompiledRegex::skip()); - } - return Err(err.into()); - } - }; - let mut cache = re.create_cache(); - Ok(CompiledRegex::compiled(move |test| -> TestResult { - run_test(&re, &mut cache, test) - })) - } -} - -fn run_test( - re: &DFA, - cache: &mut onepass::Cache, - test: &RegexTest, -) -> TestResult { - let input = create_input(test); - match test.additional_name() { - "is_match" => { - TestResult::matched(re.is_match(cache, input.earliest(true))) - } - "find" => match test.search_kind() { - SearchKind::Earliest | SearchKind::Leftmost => { - let input = - input.earliest(test.search_kind() == SearchKind::Earliest); - let mut caps = re.create_captures(); - let it = iter::Searcher::new(input) - .into_matches_iter(|input| { - re.try_search(cache, input, &mut caps)?; - Ok(caps.get_match()) - }) - .infallible() - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|m| Match { - id: m.pattern().as_usize(), - span: Span { start: m.start(), end: m.end() }, - }); - TestResult::matches(it) - } - SearchKind::Overlapping => { - // The one-pass DFA does not support any kind of overlapping - // search. This is not just a matter of not having the API. - // It's fundamentally incompatible with the one-pass concept. - // If overlapping matches were possible, then the one-pass DFA - // would fail to build. - TestResult::skip() - } - }, - "captures" => match test.search_kind() { - SearchKind::Earliest | SearchKind::Leftmost => { - let input = - input.earliest(test.search_kind() == SearchKind::Earliest); - let it = iter::Searcher::new(input) - .into_captures_iter(re.create_captures(), |input, caps| { - re.try_search(cache, input, caps) - }) - .infallible() - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|caps| testify_captures(&caps)); - TestResult::captures(it) - } - SearchKind::Overlapping => { - // The one-pass DFA does not support any kind of overlapping - // search. This is not just a matter of not having the API. - // It's fundamentally incompatible with the one-pass concept. - // If overlapping matches were possible, then the one-pass DFA - // would fail to build. - TestResult::skip() - } - }, - name => TestResult::fail(&format!("unrecognized test name: {name}")), - } -} - -/// Configures the given regex builder with all relevant settings on the given -/// regex test. -/// -/// If the regex test has a setting that is unsupported, then this returns -/// false (implying the test should be skipped). -fn configure_onepass_builder( - test: &RegexTest, - builder: &mut onepass::Builder, -) -> bool { - if !test.anchored() { - return false; - } - let match_kind = match untestify_kind(test.match_kind()) { - None => return false, - Some(k) => k, - }; - - let config = DFA::config().match_kind(match_kind); - builder - .configure(config) - .syntax(config_syntax(test)) - .thompson(config_thompson(test)); - true -} - -/// Configuration of a Thompson NFA compiler from a regex test. -fn config_thompson(test: &RegexTest) -> thompson::Config { - let mut lookm = regex_automata::util::look::LookMatcher::new(); - lookm.set_line_terminator(test.line_terminator()); - thompson::Config::new().utf8(test.utf8()).look_matcher(lookm) -} - -/// Configuration of the regex parser from a regex test. -fn config_syntax(test: &RegexTest) -> syntax::Config { - syntax::Config::new() - .case_insensitive(test.case_insensitive()) - .unicode(test.unicode()) - .utf8(test.utf8()) - .line_terminator(test.line_terminator()) -} diff --git a/vendor/regex-automata/tests/dfa/regression.rs b/vendor/regex-automata/tests/dfa/regression.rs deleted file mode 100644 index 09caffabcb1f16..00000000000000 --- a/vendor/regex-automata/tests/dfa/regression.rs +++ /dev/null @@ -1,48 +0,0 @@ -// A regression test for checking that minimization correctly translates -// whether a state is a match state or not. Previously, it was possible for -// minimization to mark a non-matching state as matching. -#[test] -#[cfg(not(miri))] -fn minimize_sets_correct_match_states() { - use regex_automata::{ - dfa::{dense::DFA, Automaton, StartKind}, - Anchored, Input, - }; - - let pattern = - // This is a subset of the grapheme matching regex. I couldn't seem - // to get a repro any smaller than this unfortunately. - r"(?x) - (?: - \p{gcb=Prepend}* - (?: - (?: - (?: - \p{gcb=L}* - (?:\p{gcb=V}+|\p{gcb=LV}\p{gcb=V}*|\p{gcb=LVT}) - \p{gcb=T}* - ) - | - \p{gcb=L}+ - | - \p{gcb=T}+ - ) - | - \p{Extended_Pictographic} - (?:\p{gcb=Extend}*\p{gcb=ZWJ}\p{Extended_Pictographic})* - | - [^\p{gcb=Control}\p{gcb=CR}\p{gcb=LF}] - ) - [\p{gcb=Extend}\p{gcb=ZWJ}\p{gcb=SpacingMark}]* - ) - "; - - let dfa = DFA::builder() - .configure( - DFA::config().start_kind(StartKind::Anchored).minimize(true), - ) - .build(pattern) - .unwrap(); - let input = Input::new(b"\xE2").anchored(Anchored::Yes); - assert_eq!(Ok(None), dfa.try_search_fwd(&input)); -} diff --git a/vendor/regex-automata/tests/dfa/suite.rs b/vendor/regex-automata/tests/dfa/suite.rs deleted file mode 100644 index 8368ffef49367c..00000000000000 --- a/vendor/regex-automata/tests/dfa/suite.rs +++ /dev/null @@ -1,443 +0,0 @@ -use { - anyhow::Result, - regex_automata::{ - dfa::{ - self, dense, regex::Regex, sparse, Automaton, OverlappingState, - StartKind, - }, - nfa::thompson, - util::{prefilter::Prefilter, syntax}, - Anchored, Input, PatternSet, - }, - regex_test::{ - CompiledRegex, Match, RegexTest, SearchKind, Span, TestResult, - TestRunner, - }, -}; - -use crate::{create_input, suite, untestify_kind}; - -const EXPANSIONS: &[&str] = &["is_match", "find", "which"]; - -/// Runs the test suite with the default configuration. -#[test] -fn unminimized_default() -> Result<()> { - let builder = Regex::builder(); - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .blacklist("expensive") - .test_iter(suite()?.iter(), dense_compiler(builder)) - .assert(); - Ok(()) -} - -/// Runs the test suite with the default configuration and a prefilter enabled, -/// if one can be built. -#[test] -fn unminimized_prefilter() -> Result<()> { - let my_compiler = |test: &RegexTest, regexes: &[String]| { - // Parse regexes as HIRs so we can get literals to build a prefilter. - let mut hirs = vec![]; - for pattern in regexes.iter() { - hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); - } - let kind = match untestify_kind(test.match_kind()) { - None => return Ok(CompiledRegex::skip()), - Some(kind) => kind, - }; - let pre = Prefilter::from_hirs_prefix(kind, &hirs); - let mut builder = Regex::builder(); - builder.dense(dense::DFA::config().prefilter(pre)); - compiler(builder, |_, _, re| { - Ok(CompiledRegex::compiled(move |test| -> TestResult { - run_test(&re, test) - })) - })(test, regexes) - }; - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .blacklist("expensive") - .test_iter(suite()?.iter(), my_compiler) - .assert(); - Ok(()) -} - -/// Runs the test suite with start states specialized. -#[test] -fn unminimized_specialized_start_states() -> Result<()> { - let mut builder = Regex::builder(); - builder.dense(dense::Config::new().specialize_start_states(true)); - - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .blacklist("expensive") - .test_iter(suite()?.iter(), dense_compiler(builder)) - .assert(); - Ok(()) -} - -/// Runs the test suite with byte classes disabled. -#[test] -fn unminimized_no_byte_class() -> Result<()> { - let mut builder = Regex::builder(); - builder.dense(dense::Config::new().byte_classes(false)); - - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .blacklist("expensive") - .test_iter(suite()?.iter(), dense_compiler(builder)) - .assert(); - Ok(()) -} - -/// Runs the test suite with NFA shrinking enabled. -#[test] -fn unminimized_nfa_shrink() -> Result<()> { - let mut builder = Regex::builder(); - builder.thompson(thompson::Config::new().shrink(true)); - - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .blacklist("expensive") - .test_iter(suite()?.iter(), dense_compiler(builder)) - .assert(); - Ok(()) -} - -/// Runs the test suite on a minimized DFA with an otherwise default -/// configuration. -#[test] -fn minimized_default() -> Result<()> { - let mut builder = Regex::builder(); - builder.dense(dense::Config::new().minimize(true)); - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .blacklist("expensive") - .test_iter(suite()?.iter(), dense_compiler(builder)) - .assert(); - Ok(()) -} - -/// Runs the test suite on a minimized DFA with byte classes disabled. -#[test] -fn minimized_no_byte_class() -> Result<()> { - let mut builder = Regex::builder(); - builder.dense(dense::Config::new().minimize(true).byte_classes(false)); - - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .blacklist("expensive") - .test_iter(suite()?.iter(), dense_compiler(builder)) - .assert(); - Ok(()) -} - -/// Runs the test suite on a sparse unminimized DFA. -#[test] -fn sparse_unminimized_default() -> Result<()> { - let builder = Regex::builder(); - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .blacklist("expensive") - .test_iter(suite()?.iter(), sparse_compiler(builder)) - .assert(); - Ok(()) -} - -/// Runs the test suite on a sparse unminimized DFA with prefilters enabled. -#[test] -fn sparse_unminimized_prefilter() -> Result<()> { - let my_compiler = |test: &RegexTest, regexes: &[String]| { - // Parse regexes as HIRs so we can get literals to build a prefilter. - let mut hirs = vec![]; - for pattern in regexes.iter() { - hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); - } - let kind = match untestify_kind(test.match_kind()) { - None => return Ok(CompiledRegex::skip()), - Some(kind) => kind, - }; - let pre = Prefilter::from_hirs_prefix(kind, &hirs); - let mut builder = Regex::builder(); - builder.dense(dense::DFA::config().prefilter(pre)); - compiler(builder, |builder, _, re| { - let fwd = re.forward().to_sparse()?; - let rev = re.reverse().to_sparse()?; - let re = builder.build_from_dfas(fwd, rev); - Ok(CompiledRegex::compiled(move |test| -> TestResult { - run_test(&re, test) - })) - })(test, regexes) - }; - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .blacklist("expensive") - .test_iter(suite()?.iter(), my_compiler) - .assert(); - Ok(()) -} - -/// Another basic sanity test that checks we can serialize and then deserialize -/// a regex, and that the resulting regex can be used for searching correctly. -#[test] -fn serialization_unminimized_default() -> Result<()> { - let builder = Regex::builder(); - let my_compiler = |builder| { - compiler(builder, |builder, _, re| { - let builder = builder.clone(); - let (fwd_bytes, _) = re.forward().to_bytes_native_endian(); - let (rev_bytes, _) = re.reverse().to_bytes_native_endian(); - Ok(CompiledRegex::compiled(move |test| -> TestResult { - let fwd: dense::DFA<&[u32]> = - dense::DFA::from_bytes(&fwd_bytes).unwrap().0; - let rev: dense::DFA<&[u32]> = - dense::DFA::from_bytes(&rev_bytes).unwrap().0; - let re = builder.build_from_dfas(fwd, rev); - - run_test(&re, test) - })) - }) - }; - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .blacklist("expensive") - .test_iter(suite()?.iter(), my_compiler(builder)) - .assert(); - Ok(()) -} - -/// A basic sanity test that checks we can serialize and then deserialize a -/// regex using sparse DFAs, and that the resulting regex can be used for -/// searching correctly. -#[test] -fn sparse_serialization_unminimized_default() -> Result<()> { - let builder = Regex::builder(); - let my_compiler = |builder| { - compiler(builder, |builder, _, re| { - let builder = builder.clone(); - let fwd_bytes = re.forward().to_sparse()?.to_bytes_native_endian(); - let rev_bytes = re.reverse().to_sparse()?.to_bytes_native_endian(); - Ok(CompiledRegex::compiled(move |test| -> TestResult { - let fwd: sparse::DFA<&[u8]> = - sparse::DFA::from_bytes(&fwd_bytes).unwrap().0; - let rev: sparse::DFA<&[u8]> = - sparse::DFA::from_bytes(&rev_bytes).unwrap().0; - let re = builder.build_from_dfas(fwd, rev); - run_test(&re, test) - })) - }) - }; - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .blacklist("expensive") - .test_iter(suite()?.iter(), my_compiler(builder)) - .assert(); - Ok(()) -} - -fn dense_compiler( - builder: dfa::regex::Builder, -) -> impl FnMut(&RegexTest, &[String]) -> Result { - compiler(builder, |_, _, re| { - Ok(CompiledRegex::compiled(move |test| -> TestResult { - run_test(&re, test) - })) - }) -} - -fn sparse_compiler( - builder: dfa::regex::Builder, -) -> impl FnMut(&RegexTest, &[String]) -> Result { - compiler(builder, |builder, _, re| { - let fwd = re.forward().to_sparse()?; - let rev = re.reverse().to_sparse()?; - let re = builder.build_from_dfas(fwd, rev); - Ok(CompiledRegex::compiled(move |test| -> TestResult { - run_test(&re, test) - })) - }) -} - -fn compiler( - mut builder: dfa::regex::Builder, - mut create_matcher: impl FnMut( - &dfa::regex::Builder, - Option, - Regex, - ) -> Result, -) -> impl FnMut(&RegexTest, &[String]) -> Result { - move |test, regexes| { - // Parse regexes as HIRs for some analysis below. - let mut hirs = vec![]; - for pattern in regexes.iter() { - hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); - } - - // Get a prefilter in case the test wants it. - let kind = match untestify_kind(test.match_kind()) { - None => return Ok(CompiledRegex::skip()), - Some(kind) => kind, - }; - let pre = Prefilter::from_hirs_prefix(kind, &hirs); - - // Check if our regex contains things that aren't supported by DFAs. - // That is, Unicode word boundaries when searching non-ASCII text. - if !test.haystack().is_ascii() { - for hir in hirs.iter() { - if hir.properties().look_set().contains_word_unicode() { - return Ok(CompiledRegex::skip()); - } - } - } - if !configure_regex_builder(test, &mut builder) { - return Ok(CompiledRegex::skip()); - } - create_matcher(&builder, pre, builder.build_many(®exes)?) - } -} - -fn run_test(re: &Regex, test: &RegexTest) -> TestResult { - let input = create_input(test); - match test.additional_name() { - "is_match" => TestResult::matched(re.is_match(input.earliest(true))), - "find" => match test.search_kind() { - SearchKind::Earliest | SearchKind::Leftmost => { - let input = - input.earliest(test.search_kind() == SearchKind::Earliest); - TestResult::matches( - re.find_iter(input) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|m| Match { - id: m.pattern().as_usize(), - span: Span { start: m.start(), end: m.end() }, - }), - ) - } - SearchKind::Overlapping => { - try_search_overlapping(re, &input).unwrap() - } - }, - "which" => match test.search_kind() { - SearchKind::Earliest | SearchKind::Leftmost => { - // There are no "which" APIs for standard searches. - TestResult::skip() - } - SearchKind::Overlapping => { - let dfa = re.forward(); - let mut patset = PatternSet::new(dfa.pattern_len()); - dfa.try_which_overlapping_matches(&input, &mut patset) - .unwrap(); - TestResult::which(patset.iter().map(|p| p.as_usize())) - } - }, - name => TestResult::fail(&format!("unrecognized test name: {name}")), - } -} - -/// Configures the given regex builder with all relevant settings on the given -/// regex test. -/// -/// If the regex test has a setting that is unsupported, then this returns -/// false (implying the test should be skipped). -fn configure_regex_builder( - test: &RegexTest, - builder: &mut dfa::regex::Builder, -) -> bool { - let match_kind = match untestify_kind(test.match_kind()) { - None => return false, - Some(k) => k, - }; - - let starts = if test.anchored() { - StartKind::Anchored - } else { - StartKind::Unanchored - }; - let mut dense_config = dense::Config::new() - .start_kind(starts) - .match_kind(match_kind) - .unicode_word_boundary(true); - // When doing an overlapping search, we might try to find the start of each - // match with a custom search routine. In that case, we need to tell the - // reverse search (for the start offset) which pattern to look for. The - // only way that API works is when anchored starting states are compiled - // for each pattern. This does technically also enable it for the forward - // DFA, but we're okay with that. - if test.search_kind() == SearchKind::Overlapping { - dense_config = dense_config.starts_for_each_pattern(true); - } - - builder - .syntax(config_syntax(test)) - .thompson(config_thompson(test)) - .dense(dense_config); - true -} - -/// Configuration of a Thompson NFA compiler from a regex test. -fn config_thompson(test: &RegexTest) -> thompson::Config { - let mut lookm = regex_automata::util::look::LookMatcher::new(); - lookm.set_line_terminator(test.line_terminator()); - thompson::Config::new().utf8(test.utf8()).look_matcher(lookm) -} - -/// Configuration of the regex syntax from a regex test. -fn config_syntax(test: &RegexTest) -> syntax::Config { - syntax::Config::new() - .case_insensitive(test.case_insensitive()) - .unicode(test.unicode()) - .utf8(test.utf8()) - .line_terminator(test.line_terminator()) -} - -/// Execute an overlapping search, and for each match found, also find its -/// overlapping starting positions. -/// -/// N.B. This routine used to be part of the crate API, but 1) it wasn't clear -/// to me how useful it was and 2) it wasn't clear to me what its semantics -/// should be. In particular, a potentially surprising footgun of this routine -/// that it is worst case *quadratic* in the size of the haystack. Namely, it's -/// possible to report a match at every position, and for every such position, -/// scan all the way to the beginning of the haystack to find the starting -/// position. Typical leftmost non-overlapping searches don't suffer from this -/// because, well, matches can't overlap. So subsequent searches after a match -/// is found don't revisit previously scanned parts of the haystack. -/// -/// Its semantics can be strange for other reasons too. For example, given -/// the regex '.*' and the haystack 'zz', the full set of overlapping matches -/// is: [0, 0], [1, 1], [0, 1], [2, 2], [1, 2], [0, 2]. The ordering of -/// those matches is quite strange, but makes sense when you think about the -/// implementation: an end offset is found left-to-right, and then one or more -/// starting offsets are found right-to-left. -/// -/// Nevertheless, we provide this routine in our test suite because it's -/// useful to test the low level DFA overlapping search and our test suite -/// is written in a way that requires starting offsets. -fn try_search_overlapping( - re: &Regex, - input: &Input<'_>, -) -> Result { - let mut matches = vec![]; - let mut fwd_state = OverlappingState::start(); - let (fwd_dfa, rev_dfa) = (re.forward(), re.reverse()); - while let Some(end) = { - fwd_dfa.try_search_overlapping_fwd(input, &mut fwd_state)?; - fwd_state.get_match() - } { - let revsearch = input - .clone() - .range(input.start()..end.offset()) - .anchored(Anchored::Pattern(end.pattern())) - .earliest(false); - let mut rev_state = OverlappingState::start(); - while let Some(start) = { - rev_dfa.try_search_overlapping_rev(&revsearch, &mut rev_state)?; - rev_state.get_match() - } { - let span = Span { start: start.offset(), end: end.offset() }; - let mat = Match { id: end.pattern().as_usize(), span }; - matches.push(mat); - } - } - Ok(TestResult::matches(matches)) -} diff --git a/vendor/regex-automata/tests/fuzz/dense.rs b/vendor/regex-automata/tests/fuzz/dense.rs deleted file mode 100644 index 213891b3e8b563..00000000000000 --- a/vendor/regex-automata/tests/fuzz/dense.rs +++ /dev/null @@ -1,52 +0,0 @@ -// This test was found by a fuzzer input that crafted a way to provide -// an invalid serialization of ByteClasses that passed our verification. -// Specifically, the verification step in the deserialization of ByteClasses -// used an iterator that depends on part of the serialized bytes being correct. -// (Specifically, the encoding of the number of classes.) -#[test] -fn invalid_byte_classes() { - let data = include_bytes!( - "testdata/deserialize_dense_crash-9486fb7c8a93b12c12a62166b43d31640c0208a9", - ); - let _ = fuzz_run(data); -} - -#[test] -fn invalid_byte_classes_min() { - let data = include_bytes!( - "testdata/deserialize_dense_minimized-from-9486fb7c8a93b12c12a62166b43d31640c0208a9", - ); - let _ = fuzz_run(data); -} - -// This is the code from the fuzz target. Kind of sucks to duplicate it here, -// but this is fundamentally how we interpret the date. -fn fuzz_run(given_data: &[u8]) -> Option<()> { - use regex_automata::dfa::Automaton; - - if given_data.len() < 2 { - return None; - } - let haystack_len = usize::from(given_data[0]); - let haystack = given_data.get(1..1 + haystack_len)?; - let given_dfa_bytes = given_data.get(1 + haystack_len..)?; - - // We help the fuzzer along by adding a preamble to the bytes that should - // at least make these first parts valid. The preamble expects a very - // specific sequence of bytes, so it makes sense to just force this. - let label = "rust-regex-automata-dfa-dense\x00\x00\x00"; - assert_eq!(0, label.len() % 4); - let endianness_check = 0xFEFFu32.to_ne_bytes().to_vec(); - let version_check = 2u32.to_ne_bytes().to_vec(); - let mut dfa_bytes: Vec = vec![]; - dfa_bytes.extend(label.as_bytes()); - dfa_bytes.extend(&endianness_check); - dfa_bytes.extend(&version_check); - dfa_bytes.extend(given_dfa_bytes); - // This is the real test: checking that any input we give to - // DFA::from_bytes will never result in a panic. - let (dfa, _) = - regex_automata::dfa::dense::DFA::from_bytes(&dfa_bytes).ok()?; - let _ = dfa.try_search_fwd(®ex_automata::Input::new(haystack)); - Some(()) -} diff --git a/vendor/regex-automata/tests/fuzz/mod.rs b/vendor/regex-automata/tests/fuzz/mod.rs deleted file mode 100644 index 960cb4251ab007..00000000000000 --- a/vendor/regex-automata/tests/fuzz/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -mod dense; -mod sparse; diff --git a/vendor/regex-automata/tests/fuzz/sparse.rs b/vendor/regex-automata/tests/fuzz/sparse.rs deleted file mode 100644 index 837ad10147c016..00000000000000 --- a/vendor/regex-automata/tests/fuzz/sparse.rs +++ /dev/null @@ -1,132 +0,0 @@ -// This is a regression test for a bug in how special states are handled. The -// fuzzer found a case where a state returned true for 'is_special_state' but -// *didn't* return true for 'is_dead_state', 'is_quit_state', 'is_match_state', -// 'is_start_state' or 'is_accel_state'. This in turn tripped a debug assertion -// in the core matching loop that requires 'is_special_state' being true to -// imply that one of the other routines returns true. -// -// We fixed this by adding some validation to both dense and sparse DFAs that -// checks that this property is true for every state ID in the DFA. -#[test] -fn invalid_special_state() { - let data = include_bytes!( - "testdata/deserialize_sparse_crash-a1b839d899ced76d5d7d0f78f9edb7a421505838", - ); - let _ = fuzz_run(data); -} - -// This is an interesting case where a fuzzer generated a DFA with -// a transition to a state ID that decoded as a valid state, but -// where the ID itself did not point to one of the two existing -// states for this particular DFA. This combined with marking this -// transition's state ID as special but without actually making one of the -// 'is_{dead,quit,match,start,accel}_state' predicates return true ended up -// tripping the 'debug_assert(dfa.is_quit_state(sid))' code in the search -// routine. -// -// We fixed this in alloc mode by checking that every transition points to a -// valid state ID. Technically this bug still exists in core-only mode, but -// it's not clear how to fix it. And it's worth pointing out that the search -// routine won't panic in production. It will just provide invalid results. And -// that's acceptable within the contract of DFA::from_bytes. -#[test] -fn transition_to_invalid_but_valid_state() { - let data = include_bytes!( - "testdata/deserialize_sparse_crash-dbb8172d3984e7e7d03f4b5f8bb86ecd1460eff9", - ); - let _ = fuzz_run(data); -} - -// Another one caught by the fuzzer where it generated a DFA that reported a -// start state as a match state. Since matches are always delayed by one byte, -// start states specifically cannot be match states. And indeed, the search -// code relies on this. -#[test] -fn start_state_is_not_match_state() { - let data = include_bytes!( - "testdata/deserialize_sparse_crash-0da59c0434eaf35e5a6b470fa9244bb79c72b000", - ); - let _ = fuzz_run(data); -} - -// This is variation on 'transition_to_invalid_but_valid_state', but happens -// to a start state. Namely, the fuzz data here builds a DFA with a start -// state ID that is incorrect but points to a sequence of bytes that satisfies -// state decoding validation. This errant state in turn has a non-zero number -// of transitions, and its those transitions that point to a state that does -// *not* satisfy state decoding validation. But we never checked those. So the -// fix here was to add validation of the transitions off of the start state. -#[test] -fn start_state_has_valid_transitions() { - let data = include_bytes!( - "testdata/deserialize_sparse_crash-61fd8e3003bf9d99f6c1e5a8488727eefd234b98", - ); - let _ = fuzz_run(data); -} - -// This fuzz input generated a DFA with a state whose ID was in the match state -// ID range, but where the state itself was encoded with zero pattern IDs. We -// added validation code to check this case. -#[test] -fn match_state_inconsistency() { - let data = include_bytes!( - "testdata/deserialize_sparse_crash-c383ae07ec5e191422eadc492117439011816570", - ); - let _ = fuzz_run(data); -} - -// This fuzz input generated a DFA with a state whose ID was in the accelerator -// range, but who didn't have any accelerators. This violated an invariant that -// assumes that if 'dfa.is_accel_state(sid)' returns true, then the state must -// have some accelerators. -#[test] -fn invalid_accelerators() { - let data = include_bytes!( - "testdata/deserialize_sparse_crash-d07703ceb94b10dcd9e4acb809f2051420449e2b", - ); - let _ = fuzz_run(data); -} - -// This fuzz input generated a DFA with a state whose EOI transition led to -// a quit state, which is generally considered illegal. Why? Because the EOI -// transition is defined over a special sentinel alphabet element and one -// cannot configure a DFA to "quit" on that sentinel. -#[test] -fn eoi_transition_to_quit_state() { - let data = include_bytes!( - "testdata/deserialize_sparse_crash-18cfc246f2ddfc3dfc92b0c7893178c7cf65efa9", - ); - let _ = fuzz_run(data); -} - -// This is the code from the fuzz target. Kind of sucks to duplicate it here, -// but this is fundamentally how we interpret the date. -fn fuzz_run(given_data: &[u8]) -> Option<()> { - use regex_automata::dfa::Automaton; - - if given_data.len() < 2 { - return None; - } - let haystack_len = usize::from(given_data[0]); - let haystack = given_data.get(1..1 + haystack_len)?; - let given_dfa_bytes = given_data.get(1 + haystack_len..)?; - - // We help the fuzzer along by adding a preamble to the bytes that should - // at least make these first parts valid. The preamble expects a very - // specific sequence of bytes, so it makes sense to just force this. - let label = "rust-regex-automata-dfa-sparse\x00\x00"; - assert_eq!(0, label.len() % 4); - let endianness_check = 0xFEFFu32.to_ne_bytes().to_vec(); - let version_check = 2u32.to_ne_bytes().to_vec(); - let mut dfa_bytes: Vec = vec![]; - dfa_bytes.extend(label.as_bytes()); - dfa_bytes.extend(&endianness_check); - dfa_bytes.extend(&version_check); - dfa_bytes.extend(given_dfa_bytes); - // This is the real test: checking that any input we give to - // DFA::from_bytes will never result in a panic. - let (dfa, _) = - regex_automata::dfa::sparse::DFA::from_bytes(&dfa_bytes).ok()?; - let _ = dfa.try_search_fwd(®ex_automata::Input::new(haystack)); - Some(()) -} diff --git a/vendor/regex-automata/tests/fuzz/testdata/deserialize_dense_crash-9486fb7c8a93b12c12a62166b43d31640c0208a9 b/vendor/regex-automata/tests/fuzz/testdata/deserialize_dense_crash-9486fb7c8a93b12c12a62166b43d31640c0208a9 deleted file mode 100644 index 972bfb2cd405c68babf906df77a2ab452c01d10a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1894 zcmd^AOHKnZ41F#IbqO~ZiPh}Nif!6ku;3W%D#2~I23M%WhQmN=JL5VtnTSlq4omsH z_<8Ku$q?`u4}IT*0BpEDbBo;e+*AZq>3}y}so)cM8erW2#Q189@%WvVQ3Uk<;cVF; z(v51j6F-tSXD1X1xY04|W^EA*=;(VKxKGl>OpT%n{KWpm0VPFt{`)FS*u{WJy~0kWN^slEmL_7mdmek zEHOE9VH{E@?O1(Tap^dB|JTBux&L~^Glt7g)e*U_mx(9%GJcIUI>ee%fj8A;!TN`Q Q7L&y7j}re3CkH_|0iGi2$p8QV diff --git a/vendor/regex-automata/tests/fuzz/testdata/deserialize_dense_minimized-from-9486fb7c8a93b12c12a62166b43d31640c0208a9 b/vendor/regex-automata/tests/fuzz/testdata/deserialize_dense_minimized-from-9486fb7c8a93b12c12a62166b43d31640c0208a9 deleted file mode 100644 index 72dbdad825d233306b86959937492c336281016e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1882 zcmd^AOHKnZ41F#IbqO~ZiPh}Nif!6ku;3W%D#2~I23M%WhQmN=JL5VtnTSlq4omsH z_<8Ku$q?`u4}IT*0BpEDbBo;e+*AZq>3}y}so)cM8erW2#Q189@%WvVQ3Uk<;cVF; z(v51j6F-tSXD1X1xY04|W^EA*=;(VKxKGl>OpT%n{KWpm0VPFt{`)FS*u{WJy~0kWN^slEmL_7mdmek zEHOE9VH{E@?O1(Tap^dB|JTBux&L~^Glt7g)e*U_mx(9%GJcIUI>ee%fj8A;!TKkF P7Ldg14-x+WC&3d=%n|Cz diff --git a/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-0da59c0434eaf35e5a6b470fa9244bb79c72b000 b/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-0da59c0434eaf35e5a6b470fa9244bb79c72b000 deleted file mode 100644 index 5ce508803ef45322bdbd969c864237e04105a6bc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 941 zcmWe-WdH+S1||s2QqNQY5dxAda25l@GBEqke<1h?7Y6D!G%z%%g9~6~ur^~AW`Np> zLy}@)Mu-Xj5eBV+F&#i&(}nR+X+}s$FaSOJ4I~5dF_2Z{}%MVF5EZXrn9#@Q_ggzeqKcI4&K>9Wwbx2a#3=HpqTv5^g|KBqJ E0N%YJX#fBK diff --git a/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-18cfc246f2ddfc3dfc92b0c7893178c7cf65efa9 b/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-18cfc246f2ddfc3dfc92b0c7893178c7cf65efa9 deleted file mode 100644 index 4fa13fbed47013e22bd6e9c9209ca0e8a61937fb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 924 zcmZSHK6fTF1H=FS{|_(#X$D3h1_B^IBR&HN3{ilfp`n2)k_boz7-Yj389<$IF!4Xw zr2ou7^C~MUA)pyZ0mVUp$-1)A7Ag#(fP!p5@nEPCXF)WIg31;sSAPPOQCC^nURha# zPzBZd2TU`9yaECsh5>~6|NpW741Ymvu)uGyTR_DB|At@{K&AkQ4+;!c5QD)1Otv#* zHB1Mya1d>1;>l=y5Em}S(g5T+Fq{CAPymyHMgju^cf;pzFiCQ09+EUNp{PdXfzrod w08n=@!h?x6jshk`oB)z4a7u$EegH|3Is+iS4Hm&e025X{RGOs$$Z!B+0PByNd;kCd diff --git a/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-61fd8e3003bf9d99f6c1e5a8488727eefd234b98 b/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-61fd8e3003bf9d99f6c1e5a8488727eefd234b98 deleted file mode 100644 index 0f809f33f44d17fb4e2d04c100226208e407d109..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 933 zcmd;OVEFSN2tNI8V5> zA;$bin6?JSbO14QVLVit5$Y;nXnX_87=nTYNHaD-l*vOV<_rd?`5-=eKrsWw*dPj_ zB&G<8G?oD*FHbdj03yoGBN@)!T4Zv@p-1Nn_%68BTLxF@#eAJ4{9t1YjH-Gz(mO8k{xCfQJAZoJAhv2T+?1lGbD(`wJ2u U$^>R12#YBgn)-p11H*d;040QR*8l(j diff --git a/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-dbb8172d3984e7e7d03f4b5f8bb86ecd1460eff9 b/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-dbb8172d3984e7e7d03f4b5f8bb86ecd1460eff9 deleted file mode 100644 index aa72eb1dd6123880d4b062ec55f9adc2c0d2539d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 728 zcmZQ-V5qG8_y2!=<$opy1_nz91_K}k1pogV{)d4K6fk2t1Y}ggKy$lFTU!l`2hop2 zsvxm(Fd=#;fGI`>1_21a31(y^NT$8*KWhS11}*_}BAhmWvyd3?;cQVLZ~ze?AR&tE zoLDHw2uQPl*bX291f(Gpgk)-9Fl1oRgbD#2%b{_y2zY8Pa!; diff --git a/vendor/regex-automata/tests/gen/README.md b/vendor/regex-automata/tests/gen/README.md deleted file mode 100644 index 4b7ac1bc90f164..00000000000000 --- a/vendor/regex-automata/tests/gen/README.md +++ /dev/null @@ -1,65 +0,0 @@ -This directory contains tests for serialized objects from the regex-automata -crate. Currently, there are only two supported such objects: dense and sparse -DFAs. - -The idea behind these tests is to commit some serialized objects and run some -basic tests by deserializing them and running searches and ensuring they are -correct. We also make sure these are run under Miri, since deserialization is -one of the biggest places where undefined behavior might occur in this crate -(at the time of writing). - -The main thing we're testing is that the *current* code can still deserialize -*old* objects correctly. Generally speaking, compatibility extends to semver -compatible releases of this crate. Beyond that, no promises are made, although -in practice callers can at least depend on errors occurring. (The serialized -format always includes a version number, and incompatible changes increment -that version number such that an error will occur if an unsupported version is -detected.) - -To generate the dense DFAs, I used this command: - -``` -$ regex-cli generate serialize dense regex \ - MULTI_PATTERN_V2 \ - tests/gen/dense/ \ - --rustfmt \ - --safe \ - --starts-for-each-pattern \ - --specialize-start-states \ - --start-kind both \ - --unicode-word-boundary \ - --minimize \ - '\b[a-zA-Z]+\b' \ - '(?m)^\S+$' \ - '(?Rm)^\S+$' -``` - -And to generate the sparse DFAs, I used this command, which is the same as -above, but with `s/dense/sparse/g`. - -``` -$ regex-cli generate serialize sparse regex \ - MULTI_PATTERN_V2 \ - tests/gen/sparse/ \ - --rustfmt \ - --safe \ - --starts-for-each-pattern \ - --specialize-start-states \ - --start-kind both \ - --unicode-word-boundary \ - --minimize \ - '\b[a-zA-Z]+\b' \ - '(?m)^\S+$' \ - '(?Rm)^\S+$' -``` - -The idea is to try to enable as many of the DFA's options as possible in order -to test that serialization works for all of them. - -Arguably we should increase test coverage here, but this is a start. Note -that in particular, this does not need to test that serialization and -deserialization correctly round-trips on its own. Indeed, the normal regex test -suite has a test that does a serialization round trip for every test supported -by DFAs. So that has very good coverage. What we're interested in testing here -is our compatibility promise: do DFAs generated with an older revision of the -code still deserialize correctly? diff --git a/vendor/regex-automata/tests/gen/dense/mod.rs b/vendor/regex-automata/tests/gen/dense/mod.rs deleted file mode 100644 index b4365d4e19d2d1..00000000000000 --- a/vendor/regex-automata/tests/gen/dense/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -use regex_automata::{Input, Match}; - -mod multi_pattern_v2; - -#[test] -fn multi_pattern_v2() { - use multi_pattern_v2::MULTI_PATTERN_V2 as RE; - - assert_eq!(Some(Match::must(0, 0..4)), RE.find("abcd")); - assert_eq!(Some(Match::must(0, 2..6)), RE.find("@ abcd @")); - assert_eq!(Some(Match::must(1, 0..6)), RE.find("@abcd@")); - assert_eq!(Some(Match::must(0, 1..5)), RE.find("\nabcd\n")); - assert_eq!(Some(Match::must(0, 1..5)), RE.find("\nabcd wxyz\n")); - assert_eq!(Some(Match::must(1, 1..7)), RE.find("\n@abcd@\n")); - assert_eq!(Some(Match::must(2, 0..6)), RE.find("@abcd@\r\n")); - assert_eq!(Some(Match::must(1, 2..8)), RE.find("\r\n@abcd@")); - assert_eq!(Some(Match::must(2, 2..8)), RE.find("\r\n@abcd@\r\n")); - - // Fails because we have heuristic support for Unicode word boundaries - // enabled. - assert!(RE.try_search(&Input::new(b"\xFF@abcd@\xFF")).is_err()); -} diff --git a/vendor/regex-automata/tests/gen/dense/multi_pattern_v2.rs b/vendor/regex-automata/tests/gen/dense/multi_pattern_v2.rs deleted file mode 100644 index a95fd204b5ef87..00000000000000 --- a/vendor/regex-automata/tests/gen/dense/multi_pattern_v2.rs +++ /dev/null @@ -1,43 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// regex-cli generate serialize dense regex MULTI_PATTERN_V2 tests/gen/dense/ --rustfmt --safe --starts-for-each-pattern --specialize-start-states --start-kind both --unicode-word-boundary --minimize \b[a-zA-Z]+\b (?m)^\S+$ (?Rm)^\S+$ -// -// regex-cli 0.0.1 is available on crates.io. - -use regex_automata::{ - dfa::{dense::DFA, regex::Regex}, - util::{lazy::Lazy, wire::AlignAs}, -}; - -pub static MULTI_PATTERN_V2: Lazy>> = - Lazy::new(|| { - let dfafwd = { - static ALIGNED: &AlignAs<[u8], u32> = &AlignAs { - _align: [], - #[cfg(target_endian = "big")] - bytes: *include_bytes!("multi_pattern_v2_fwd.bigendian.dfa"), - #[cfg(target_endian = "little")] - bytes: *include_bytes!( - "multi_pattern_v2_fwd.littleendian.dfa" - ), - }; - DFA::from_bytes(&ALIGNED.bytes) - .expect("serialized forward DFA should be valid") - .0 - }; - let dfarev = { - static ALIGNED: &AlignAs<[u8], u32> = &AlignAs { - _align: [], - #[cfg(target_endian = "big")] - bytes: *include_bytes!("multi_pattern_v2_rev.bigendian.dfa"), - #[cfg(target_endian = "little")] - bytes: *include_bytes!( - "multi_pattern_v2_rev.littleendian.dfa" - ), - }; - DFA::from_bytes(&ALIGNED.bytes) - .expect("serialized reverse DFA should be valid") - .0 - }; - Regex::builder().build_from_dfas(dfafwd, dfarev) - }); diff --git a/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_fwd.bigendian.dfa b/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_fwd.bigendian.dfa deleted file mode 100644 index 6d6e040c36f08157a233d1fa08614f9856fbfa87..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11100 zcmeHN+invv5cTG=n{X%Gp+LE}g#3elB@%%rKx$KoM}L~Xj55cbadw@{N~ytJwMUvB zpYg_?V>{VR)RVK*P5w%O!N)^3+ ze=uA_Sv4^V7<=5YuBlc_kpM%@gP0Xd4%`;m{7b!D_P!Jz0W6PTR*wbfYm03DrCu)kU(+y} zAZSe;IU=ejo6O-Ib0$JK|F!WX*Est+dM~ifviCH1D#&MDeLTrEKKpF+S;)HU{^ed* z_b<8ny>Dom_d?NE#eaP*>g}%km$P2qf5|n@zmCp7>#qCnTcr>K^5fWe4teGEn67dC zk-jSa2kblMuKOR!zUtNfOAb6H-;Sw&lkU3ziaf^WUvl6P`FBkIH|ehXFU`7t$$?8t zL0l=%u~wQoas*AA0tF4SD=-K8hs8rWBBSoiaQcvE(lG{ZH!UviGH3dyhUm z>$v@Aj&+W`$DFD5zs*0+fb(yeBi(iX@?B4nKL_K#=b_#B{;7BMzxC(7Qhfd+ zoy&i!^IvH13;Q3|f^+CAE#--E{x|7d{!^X*$|w)ZOe8sQTXfz(`NroSp8td^&Vj@C zAL`$vv-zibQb4@6`lrbn{#ke3e?dNW1@<5PBh4J?T>ep03KYis!v4E4!Q2Kr|6tC? zI+uUcn*#D%kKF$;uH+iO_i#S^9>hAEf2)!}ytewM*&6G?y6gUTz4y_F{dTOGBc02? zd#_S07e!(DeJ^ZCa~tgZgE=4TT>eu%|FrG@K=HoQ8sOSk=klLw|H)E7-`fj%bE630 z{-D5$kw!T-;eG-dys!QX=^HP9S}*!q7P?NP2r+6Cok%~TJenbDR&j-t;Y zZLn(_>f?ZRKe!ONgC++PgQ1Oyz;XbGwLI#V>!?}Bz>?AwhF)%o8Mxf_Ng}YUgYaL~ Ca~hcd diff --git a/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_fwd.littleendian.dfa b/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_fwd.littleendian.dfa deleted file mode 100644 index a1f4b3da157c729f7146d7e479d5be9e99564c0d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11100 zcmeHL*=`gu5OnT@n{Y#ba3_TQhyImFEIa{H4v9y9n!k)th$z8#(%4nBVYhu@EfA%wGEAuK})>K?dG!?3ixva-51 zf%T1XY))8;Q9E<~D-bNigRSb18>2|6 z2xvZP9|iL~n2@&xWNj2BYA~w#sJ-NP2S(&<0{sX`8Z1)n!+_dD!4MB7cuYRk$R>49%*sT7|Dc4_p2^x6ap`u#b9A+Une9f(K$5UjiZ%?Gvr_r&V|KV}PtcrYPv3!Jxp z+lN{Oi|}AV-X_rV&$B<3xbF0eD(f$8f$71#R^(n3N$XAwgm#2#{2rn|+4Zk%LAM9=eOw1o9!0pb%_Y&#Wl-(k cvGjAEjYD$`T(`gz!NiFMOkm$M*gly425W{InE(I) diff --git a/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_rev.bigendian.dfa b/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_rev.bigendian.dfa deleted file mode 100644 index 74f74ec2a95568ad24c31c70bafe2721122d6e14..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7584 zcmeHL?QRn>5cFLxDJ_&j_$~#?Hznj9{3?+M`~svlmH6pLyCN0i zy{f;0!PX+~L~J+MX~kXO-tPTY>=6$h4u``>kBKM5(}-u!ffxHP8ypg`~7nkBQ=;de2G}jx^xPurDq@iRkAKu@ov*@FBi+ZPG^zY!u!Vq>+>3d~brq7sZ6YP`p9-Yf5~ z$yL0gC-$2I^ylH`Dm7{swfysUN)GUyUqr6K`+e8wN^Vh6AbZLIIqTx@DF?50Gjn?0 zwZ$6Vxn}IQX?5dkiMhC!r3=c~h8RlUC6wnsbvak_?@SgH@I?QG3(8n((5h~7o_DM- z=UV>RZx)pES$eQH&)T9QB0LfEm4FWvW~CepZA<*`IzR5-SwQ>Z*dNog%_ec9D#i{q z)6~5(7TQpEP?EVQ<2oU7vjY^+xiD+&nfX@8w?oc3`0Q?|Gu{g^pHGqI d$#>xAm35(&!ypSn%NiQMsVj%e3S=)u&jI9TR@eXl diff --git a/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_rev.littleendian.dfa b/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_rev.littleendian.dfa deleted file mode 100644 index 663bdb9ead53e60f1c085c2a66b7ba12bd773aed..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7584 zcmeHL+foxj5S=7k1Vu335f#N7#{5J7N~(-LsZ|iFe4J04FB8w%ogN0V$(G4TM~hQ+ zINd#$?w;8Vfs?b-$?#d#EX|#aCrFY z^*kKCdHe3Y;=}Rr$H8FmX?Ou`|3+_4V389LEVAGtwf>DY7QHj)sz6YP1xxiG*6N)? zwqc54jP^VV)>$wh?8v2 z9vu@bv|wKEGPYcKuDO@SsIBCD10rP+goge#MpZievqbG`Jo3?j74wjJJ3)L8v z&wAxYo$|3v3|Q5Ryvw3`m$8YD8&NezZ6)U$&?9FN=>5-9Cc&7;Sx_rvz93-1R7+LR z;y(iW0s^0Zz=J8qXTF7k`4-fAk$x!%`{sQ{bx@5_*?;BZ^S{X#EVrO%y#*kArrCc~ z+8Uz<{_bOp{Wti6ofv;{kCcW7}9x@M@L3AKDnMl<62_&5UnGe=GXjgL8Lc hoc3>> = - Lazy::new(|| { - let dfafwd = { - #[cfg(target_endian = "big")] - static BYTES: &'static [u8] = - include_bytes!("multi_pattern_v2_fwd.bigendian.dfa"); - #[cfg(target_endian = "little")] - static BYTES: &'static [u8] = - include_bytes!("multi_pattern_v2_fwd.littleendian.dfa"); - DFA::from_bytes(BYTES) - .expect("serialized forward DFA should be valid") - .0 - }; - let dfarev = { - #[cfg(target_endian = "big")] - static BYTES: &'static [u8] = - include_bytes!("multi_pattern_v2_rev.bigendian.dfa"); - #[cfg(target_endian = "little")] - static BYTES: &'static [u8] = - include_bytes!("multi_pattern_v2_rev.littleendian.dfa"); - DFA::from_bytes(BYTES) - .expect("serialized reverse DFA should be valid") - .0 - }; - Regex::builder().build_from_dfas(dfafwd, dfarev) - }); diff --git a/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_fwd.bigendian.dfa b/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_fwd.bigendian.dfa deleted file mode 100644 index aa04f63162709f12d140017f42fdad986ecb4162..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3476 zcmcguy>1gh5Z*iAkMEo~iTMwK1QH;@SmF_=LPtYEZ7G2Y5had9>53{9JOEEYfv6~W z2Rb@x9snhL-|WuWUf&%T5i!#G+nL?@*_oX^A08i#28a8v_TLVM$D=o|hoj+O@8xiC zbTB+T+BcxnGyH^{fr#4}Eiytu*lI=b93|;2wF}BD<(1R{Ei85`X^FJF((CmuT_#;2 zT`lO^b9koG5LuJiX^znI9wjc zG;;#(2u-OtC>I+uPM{OUak^xUCBI>0#~7l_X&k(BKTf$lLJd?QdWyxK9h?hKCCY5f zB}%7Qq*ZK+Fy}C{o=lYI-L4HP>S7CqYyo%&ha%$1^1OpWqFRy_HA{^9)`ggEMN38>mJx` zyd8}jxMM1cL`(8K0C~sDcnKBjwMOJ>(+E0DLdOjibi5jw54+|GsrN`lc;sUso;+SX zHoUG{JU(aIaw?lCD6VT6FYcgw!S3|l_^9q`Q$!`EhL=!M=p54ku3CP)tDKCjU>{75y-p|&@3&XN&THq+v}UduaPqJqu^yVIA(M|D?QGAglD ze5Qy#uO(|3vw3ID@QgGks4}3K2ex;yR>Et{4c+Pcu1!RDDw}!pzgv0n4RE};Yu-d^ z`mgTv)8e7JtE~uQWa={BZKB7Yw<2GxnWBwY8I=LWJP|3O+~*s`%uhFC<^Ket&uh$B zoe=(>M-#4U78K-m6#U$T8v^`SsG=$t|688GVe8D1eM;oN5C6De@XuH4Bli5laXJ{Q qZafE`Oypdj9#jEfQz6)uHRT00Ud2DC#r@p? diff --git a/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_fwd.littleendian.dfa b/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_fwd.littleendian.dfa deleted file mode 100644 index c27d92abe1d7bccfc30bcfdd12208ec2a27532a3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3476 zcmchay>1gh5XaY!?ej-sC+0f@2qZv^vBV>|A{`9{wWS0qM3guZWh$yvrqAA3ZkO>h+%V`~9bbb2@H1SF(NZi}KO( zz_~`MM;-6oq&oJPRL73BBe|8U)u=x+NbA3-+stZ0O^#2p#Pq zz%Sw~7CPFNfnzyl(L1UVGWxwXr(A<z*9)u8_jAaU>I$0T{|Y<9y?4&7&4Oa;D2R z@FZxc$)v+KxiDv*4huaioy}2*K3!2$a2l#4h7b&8f}nRR;u1CMbujHbzmm;ROvRwd zAb~+PBTx3#>i0QbQgiO19d8;;%Rtj*kNET8$eT=6_*`;2yJ55v0(@-UGO@X0I z5cG4_{2?{xF^8#_=|IARY)PK%YbHQEA-u1q&WPRbJy}~vOqad$e<*IAOgen$d^!s~ zE1kVsLiFj1I)c+sCGnmDLzy7x=dAf_YTiqcjWr!ec#tj1lSzkf;oZoqdng;e6Tna= z2>O8Zm7z;@7L`1umR_D%U{KQBOru$;{>$$*W#PZcSxO8p8^(tV%*WR!@o(9*^eMcW n@=M{d*z{nQv4y{qf{^{) diff --git a/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_rev.bigendian.dfa b/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_rev.bigendian.dfa deleted file mode 100644 index 89867d30f605af4c8ac4294e76d6a9a8023cbfbf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1920 zcmcgs$!^p@5bbVz7JIg2*h7G@1%|;tzyamLH*m!g11AKUnTSYSa^k>8pm63_IP)L; z0O7srF2^%oloL-XS9Q7Tb=P7qk5}t#xp==g$@1g%#}D~B&)&Yvv(=}3xmpQch z7C^lRiMSA07e#TWf^KgU{Q`+c+K2%#9E}?>A$E4B)9IC~#5Ll2fg3l0TYI-X_V@1) zckgZC;Nbp)HavXv`0$D1>1_6FKA%6&E?`W2Vr)@_V4S+0Ow1%WxWk!Rx2<;@bJ|6W z>vVb(Pv$;@WCkC!*d)g~9A6>VUg3ZqnPRj@xuOx`re0{vntb&6bQ<}PN0o1duvdW( zb_6lt6JGX zO$Qh~%$M|$=@fDn?4b1~^?{+bJi&cOU`e+ImhT=?c z3G*8ahoe%C<_EGrg8VLN^+8LKpqU2uZm;E5h^@{Ba&DTn#Ps}KHg0m{ZyfXP{-Msn77fX3YvPb~hBfz1*+EAzy>mHM9&YG#hSe)>3N5 z$$yv{z~>ZutLI6E^2>|EufV;8t$ymV ia4+Rs5Vh6+6Mhx=$R&;8wDDIMKu&q54$*pF)&BrJ9)}?S diff --git a/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_rev.littleendian.dfa b/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_rev.littleendian.dfa deleted file mode 100644 index c0ca807f8947442d59554983659dd07ccee99898..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1920 zcmchXOKuZE5QclkkMY}yd4~Yu6)j-s;j!{@9L_anV!t%@pSTjauz2i^N$~rc@n>Um&CJA$#gbx?)n%8y1M$1XWj5gvX!TEJA*cv7yeCO6PF!Ww}Odk3HAUFDl8|CJKS zXN|yzuY^w#9&CHgi%r0lZ&h8Z)$7rO7fM4Q_Xu6F(XJ+^UD6t5&GPZZ0K2)}-Uq-e zhGwhP*3;Ab(y4YO^mj>7=apEHL+NGUoQ;Xbe>pOsk`4tI@BQK^V9>V z>w|1Gn%uA5>2&iu{t($%^1GnTho*Ei zaRsO>CG0|wBa6)KwFLozZc>I_NBx(5J_6m`oo6UJ^o=I-S@J8Bnws}gVt#Z{h(*3; bVr%pNl-~p&TnvU^AP><1Cwr@~dQJZW?U;ul diff --git a/vendor/regex-automata/tests/hybrid/api.rs b/vendor/regex-automata/tests/hybrid/api.rs deleted file mode 100644 index 4b04c4f8fd2337..00000000000000 --- a/vendor/regex-automata/tests/hybrid/api.rs +++ /dev/null @@ -1,171 +0,0 @@ -use std::error::Error; - -use regex_automata::{ - hybrid::dfa::{OverlappingState, DFA}, - nfa::thompson, - HalfMatch, Input, MatchError, -}; - -// Tests that too many cache resets cause the lazy DFA to quit. -// -// We only test this on 64-bit because the test is gingerly crafted based on -// implementation details of cache sizes. It's not a great test because of -// that, but it does check some interesting properties around how positions are -// reported when a search "gives up." -// -// NOTE: If you change something in lazy DFA implementation that causes this -// test to fail by reporting different "gave up" positions, then it's generally -// okay to update the positions in the test below as long as you're sure your -// changes are correct. Namely, it is expected that if there are changes in the -// cache size (or changes in how big things are inside the cache), then its -// utilization may change slightly and thus impact where a search gives up. -// Precisely where a search gives up is not an API guarantee, so changing the -// offsets here is OK. -#[test] -#[cfg(target_pointer_width = "64")] -#[cfg(not(miri))] -fn too_many_cache_resets_cause_quit() -> Result<(), Box> { - // This is a carefully chosen regex. The idea is to pick one that requires - // some decent number of states (hence the bounded repetition). But we - // specifically choose to create a class with an ASCII letter and a - // non-ASCII letter so that we can check that no new states are created - // once the cache is full. Namely, if we fill up the cache on a haystack - // of 'a's, then in order to match one 'β', a new state will need to be - // created since a 'β' is encoded with multiple bytes. - // - // So we proceed by "filling" up the cache by searching a haystack of just - // 'a's. The cache won't have enough room to add enough states to find the - // match (because of the bounded repetition), which should result in it - // giving up before it finds a match. - // - // Since there's now no more room to create states, we search a haystack - // of 'β' and confirm that it gives up immediately. - let pattern = r"[aβ]{99}"; - let dfa = DFA::builder() - .configure( - // Configure it so that we have the minimum cache capacity - // possible. And that if any resets occur, the search quits. - DFA::config() - .skip_cache_capacity_check(true) - .cache_capacity(0) - .minimum_cache_clear_count(Some(0)), - ) - .thompson(thompson::NFA::config()) - .build(pattern)?; - let mut cache = dfa.create_cache(); - - let haystack = "a".repeat(101).into_bytes(); - let err = MatchError::gave_up(24); - // Notice that we make the same amount of progress in each search! That's - // because the cache is reused and already has states to handle the first - // N bytes. - assert_eq!( - Err(err.clone()), - dfa.try_search_fwd(&mut cache, &Input::new(&haystack)) - ); - assert_eq!( - Err(err.clone()), - dfa.try_search_overlapping_fwd( - &mut cache, - &Input::new(&haystack), - &mut OverlappingState::start() - ), - ); - - let haystack = "β".repeat(101).into_bytes(); - let err = MatchError::gave_up(2); - assert_eq!( - Err(err), - dfa.try_search_fwd(&mut cache, &Input::new(&haystack)) - ); - // no need to test that other find routines quit, since we did that above - - // OK, if we reset the cache, then we should be able to create more states - // and make more progress with searching for betas. - cache.reset(&dfa); - let err = MatchError::gave_up(26); - assert_eq!( - Err(err), - dfa.try_search_fwd(&mut cache, &Input::new(&haystack)) - ); - - // ... switching back to ASCII still makes progress since it just needs to - // set transitions on existing states! - let haystack = "a".repeat(101).into_bytes(); - let err = MatchError::gave_up(13); - assert_eq!( - Err(err), - dfa.try_search_fwd(&mut cache, &Input::new(&haystack)) - ); - - Ok(()) -} - -// Tests that quit bytes in the forward direction work correctly. -#[test] -fn quit_fwd() -> Result<(), Box> { - let dfa = DFA::builder() - .configure(DFA::config().quit(b'x', true)) - .build("[[:word:]]+$")?; - let mut cache = dfa.create_cache(); - - assert_eq!( - dfa.try_search_fwd(&mut cache, &Input::new("abcxyz")), - Err(MatchError::quit(b'x', 3)), - ); - assert_eq!( - dfa.try_search_overlapping_fwd( - &mut cache, - &Input::new(b"abcxyz"), - &mut OverlappingState::start() - ), - Err(MatchError::quit(b'x', 3)), - ); - - Ok(()) -} - -// Tests that quit bytes in the reverse direction work correctly. -#[test] -fn quit_rev() -> Result<(), Box> { - let dfa = DFA::builder() - .configure(DFA::config().quit(b'x', true)) - .thompson(thompson::Config::new().reverse(true)) - .build("^[[:word:]]+")?; - let mut cache = dfa.create_cache(); - - assert_eq!( - dfa.try_search_rev(&mut cache, &Input::new("abcxyz")), - Err(MatchError::quit(b'x', 3)), - ); - - Ok(()) -} - -// Tests that if we heuristically enable Unicode word boundaries but then -// instruct that a non-ASCII byte should NOT be a quit byte, then the builder -// will panic. -#[test] -#[should_panic] -fn quit_panics() { - DFA::config().unicode_word_boundary(true).quit(b'\xFF', false); -} - -// This tests an intesting case where even if the Unicode word boundary option -// is disabled, setting all non-ASCII bytes to be quit bytes will cause Unicode -// word boundaries to be enabled. -#[test] -fn unicode_word_implicitly_works() -> Result<(), Box> { - let mut config = DFA::config(); - for b in 0x80..=0xFF { - config = config.quit(b, true); - } - let dfa = DFA::builder().configure(config).build(r"\b")?; - let mut cache = dfa.create_cache(); - let expected = HalfMatch::must(0, 1); - assert_eq!( - Ok(Some(expected)), - dfa.try_search_fwd(&mut cache, &Input::new(" a")), - ); - Ok(()) -} diff --git a/vendor/regex-automata/tests/hybrid/mod.rs b/vendor/regex-automata/tests/hybrid/mod.rs deleted file mode 100644 index 36667d09ccc37b..00000000000000 --- a/vendor/regex-automata/tests/hybrid/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod api; -#[cfg(not(miri))] -mod suite; diff --git a/vendor/regex-automata/tests/hybrid/suite.rs b/vendor/regex-automata/tests/hybrid/suite.rs deleted file mode 100644 index f0c3ebdbcaba7c..00000000000000 --- a/vendor/regex-automata/tests/hybrid/suite.rs +++ /dev/null @@ -1,347 +0,0 @@ -use { - anyhow::Result, - regex_automata::{ - hybrid::{ - dfa::{OverlappingState, DFA}, - regex::{self, Regex}, - }, - nfa::thompson, - util::{prefilter::Prefilter, syntax}, - Anchored, Input, PatternSet, - }, - regex_test::{ - CompiledRegex, Match, RegexTest, SearchKind, Span, TestResult, - TestRunner, - }, -}; - -use crate::{create_input, suite, untestify_kind}; - -const EXPANSIONS: &[&str] = &["is_match", "find", "which"]; - -/// Tests the default configuration of the hybrid NFA/DFA. -#[test] -fn default() -> Result<()> { - let builder = Regex::builder(); - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - // Without NFA shrinking, this test blows the default cache capacity. - .blacklist("expensive/regression-many-repeat-no-stack-overflow") - .test_iter(suite()?.iter(), compiler(builder)) - .assert(); - Ok(()) -} - -/// Tests the hybrid NFA/DFA with prefilters enabled. -#[test] -fn prefilter() -> Result<()> { - let my_compiler = |test: &RegexTest, regexes: &[String]| { - // Parse regexes as HIRs so we can get literals to build a prefilter. - let mut hirs = vec![]; - for pattern in regexes.iter() { - hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); - } - let kind = match untestify_kind(test.match_kind()) { - None => return Ok(CompiledRegex::skip()), - Some(kind) => kind, - }; - let pre = Prefilter::from_hirs_prefix(kind, &hirs); - let mut builder = Regex::builder(); - builder.dfa(DFA::config().prefilter(pre)); - compiler(builder)(test, regexes) - }; - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - // Without NFA shrinking, this test blows the default cache capacity. - .blacklist("expensive/regression-many-repeat-no-stack-overflow") - .test_iter(suite()?.iter(), my_compiler) - .assert(); - Ok(()) -} - -/// Tests the hybrid NFA/DFA with NFA shrinking enabled. -/// -/// This is *usually* not the configuration one wants for a lazy DFA. NFA -/// shrinking is mostly only advantageous when building a full DFA since it -/// can sharply decrease the amount of time determinization takes. But NFA -/// shrinking is itself otherwise fairly expensive currently. Since a lazy DFA -/// has no compilation time (other than for building the NFA of course) before -/// executing a search, it's usually worth it to forgo NFA shrinking. -/// -/// Nevertheless, we test to make sure everything is OK with NFA shrinking. As -/// a bonus, there are some tests we don't need to skip because they now fit in -/// the default cache capacity. -#[test] -fn nfa_shrink() -> Result<()> { - let mut builder = Regex::builder(); - builder.thompson(thompson::Config::new().shrink(true)); - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .test_iter(suite()?.iter(), compiler(builder)) - .assert(); - Ok(()) -} - -/// Tests the hybrid NFA/DFA when 'starts_for_each_pattern' is enabled for all -/// tests. -#[test] -fn starts_for_each_pattern() -> Result<()> { - let mut builder = Regex::builder(); - builder.dfa(DFA::config().starts_for_each_pattern(true)); - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - // Without NFA shrinking, this test blows the default cache capacity. - .blacklist("expensive/regression-many-repeat-no-stack-overflow") - .test_iter(suite()?.iter(), compiler(builder)) - .assert(); - Ok(()) -} - -/// Tests the hybrid NFA/DFA when 'specialize_start_states' is enabled. -#[test] -fn specialize_start_states() -> Result<()> { - let mut builder = Regex::builder(); - builder.dfa(DFA::config().specialize_start_states(true)); - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - // Without NFA shrinking, this test blows the default cache capacity. - .blacklist("expensive/regression-many-repeat-no-stack-overflow") - .test_iter(suite()?.iter(), compiler(builder)) - .assert(); - Ok(()) -} - -/// Tests the hybrid NFA/DFA when byte classes are disabled. -/// -/// N.B. Disabling byte classes doesn't avoid any indirection at search time. -/// All it does is cause every byte value to be its own distinct equivalence -/// class. -#[test] -fn no_byte_classes() -> Result<()> { - let mut builder = Regex::builder(); - builder.dfa(DFA::config().byte_classes(false)); - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - // Without NFA shrinking, this test blows the default cache capacity. - .blacklist("expensive/regression-many-repeat-no-stack-overflow") - .test_iter(suite()?.iter(), compiler(builder)) - .assert(); - Ok(()) -} - -/// Tests that hybrid NFA/DFA never clears its cache for any test with the -/// default capacity. -/// -/// N.B. If a regex suite test is added that causes the cache to be cleared, -/// then this should just skip that test. (Which can be done by calling the -/// 'blacklist' method on 'TestRunner'.) -#[test] -fn no_cache_clearing() -> Result<()> { - let mut builder = Regex::builder(); - builder.dfa(DFA::config().minimum_cache_clear_count(Some(0))); - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - // Without NFA shrinking, this test blows the default cache capacity. - .blacklist("expensive/regression-many-repeat-no-stack-overflow") - .test_iter(suite()?.iter(), compiler(builder)) - .assert(); - Ok(()) -} - -/// Tests the hybrid NFA/DFA when the minimum cache capacity is set. -#[test] -fn min_cache_capacity() -> Result<()> { - let mut builder = Regex::builder(); - builder - .dfa(DFA::config().cache_capacity(0).skip_cache_capacity_check(true)); - TestRunner::new()? - .expand(EXPANSIONS, |t| t.compiles()) - .test_iter(suite()?.iter(), compiler(builder)) - .assert(); - Ok(()) -} - -fn compiler( - mut builder: regex::Builder, -) -> impl FnMut(&RegexTest, &[String]) -> Result { - move |test, regexes| { - // Parse regexes as HIRs for some analysis below. - let mut hirs = vec![]; - for pattern in regexes.iter() { - hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); - } - - // Check if our regex contains things that aren't supported by DFAs. - // That is, Unicode word boundaries when searching non-ASCII text. - if !test.haystack().is_ascii() { - for hir in hirs.iter() { - if hir.properties().look_set().contains_word_unicode() { - return Ok(CompiledRegex::skip()); - } - } - } - if !configure_regex_builder(test, &mut builder) { - return Ok(CompiledRegex::skip()); - } - let re = builder.build_many(®exes)?; - let mut cache = re.create_cache(); - Ok(CompiledRegex::compiled(move |test| -> TestResult { - run_test(&re, &mut cache, test) - })) - } -} - -fn run_test( - re: &Regex, - cache: &mut regex::Cache, - test: &RegexTest, -) -> TestResult { - let input = create_input(test); - match test.additional_name() { - "is_match" => { - TestResult::matched(re.is_match(cache, input.earliest(true))) - } - "find" => match test.search_kind() { - SearchKind::Earliest | SearchKind::Leftmost => { - let input = - input.earliest(test.search_kind() == SearchKind::Earliest); - TestResult::matches( - re.find_iter(cache, input) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|m| Match { - id: m.pattern().as_usize(), - span: Span { start: m.start(), end: m.end() }, - }), - ) - } - SearchKind::Overlapping => { - try_search_overlapping(re, cache, &input).unwrap() - } - }, - "which" => match test.search_kind() { - SearchKind::Earliest | SearchKind::Leftmost => { - // There are no "which" APIs for standard searches. - TestResult::skip() - } - SearchKind::Overlapping => { - let dfa = re.forward(); - let cache = cache.as_parts_mut().0; - let mut patset = PatternSet::new(dfa.pattern_len()); - dfa.try_which_overlapping_matches(cache, &input, &mut patset) - .unwrap(); - TestResult::which(patset.iter().map(|p| p.as_usize())) - } - }, - name => TestResult::fail(&format!("unrecognized test name: {name}")), - } -} - -/// Configures the given regex builder with all relevant settings on the given -/// regex test. -/// -/// If the regex test has a setting that is unsupported, then this returns -/// false (implying the test should be skipped). -fn configure_regex_builder( - test: &RegexTest, - builder: &mut regex::Builder, -) -> bool { - let match_kind = match untestify_kind(test.match_kind()) { - None => return false, - Some(k) => k, - }; - - let mut dfa_config = - DFA::config().match_kind(match_kind).unicode_word_boundary(true); - // When doing an overlapping search, we might try to find the start of each - // match with a custom search routine. In that case, we need to tell the - // reverse search (for the start offset) which pattern to look for. The - // only way that API works is when anchored starting states are compiled - // for each pattern. This does technically also enable it for the forward - // DFA, but we're okay with that. - if test.search_kind() == SearchKind::Overlapping { - dfa_config = dfa_config.starts_for_each_pattern(true); - } - builder - .syntax(config_syntax(test)) - .thompson(config_thompson(test)) - .dfa(dfa_config); - true -} - -/// Configuration of a Thompson NFA compiler from a regex test. -fn config_thompson(test: &RegexTest) -> thompson::Config { - let mut lookm = regex_automata::util::look::LookMatcher::new(); - lookm.set_line_terminator(test.line_terminator()); - thompson::Config::new().utf8(test.utf8()).look_matcher(lookm) -} - -/// Configuration of the regex parser from a regex test. -fn config_syntax(test: &RegexTest) -> syntax::Config { - syntax::Config::new() - .case_insensitive(test.case_insensitive()) - .unicode(test.unicode()) - .utf8(test.utf8()) - .line_terminator(test.line_terminator()) -} - -/// Execute an overlapping search, and for each match found, also find its -/// overlapping starting positions. -/// -/// N.B. This routine used to be part of the crate API, but 1) it wasn't clear -/// to me how useful it was and 2) it wasn't clear to me what its semantics -/// should be. In particular, a potentially surprising footgun of this routine -/// that it is worst case *quadratic* in the size of the haystack. Namely, it's -/// possible to report a match at every position, and for every such position, -/// scan all the way to the beginning of the haystack to find the starting -/// position. Typical leftmost non-overlapping searches don't suffer from this -/// because, well, matches can't overlap. So subsequent searches after a match -/// is found don't revisit previously scanned parts of the haystack. -/// -/// Its semantics can be strange for other reasons too. For example, given -/// the regex '.*' and the haystack 'zz', the full set of overlapping matches -/// is: [0, 0], [1, 1], [0, 1], [2, 2], [1, 2], [0, 2]. The ordering of -/// those matches is quite strange, but makes sense when you think about the -/// implementation: an end offset is found left-to-right, and then one or more -/// starting offsets are found right-to-left. -/// -/// Nevertheless, we provide this routine in our test suite because it's -/// useful to test the low level DFA overlapping search and our test suite -/// is written in a way that requires starting offsets. -fn try_search_overlapping( - re: &Regex, - cache: &mut regex::Cache, - input: &Input<'_>, -) -> Result { - let mut matches = vec![]; - let mut fwd_state = OverlappingState::start(); - let (fwd_dfa, rev_dfa) = (re.forward(), re.reverse()); - let (fwd_cache, rev_cache) = cache.as_parts_mut(); - while let Some(end) = { - fwd_dfa.try_search_overlapping_fwd( - fwd_cache, - input, - &mut fwd_state, - )?; - fwd_state.get_match() - } { - let revsearch = input - .clone() - .range(input.start()..end.offset()) - .anchored(Anchored::Pattern(end.pattern())) - .earliest(false); - let mut rev_state = OverlappingState::start(); - while let Some(start) = { - rev_dfa.try_search_overlapping_rev( - rev_cache, - &revsearch, - &mut rev_state, - )?; - rev_state.get_match() - } { - let span = Span { start: start.offset(), end: end.offset() }; - let mat = Match { id: end.pattern().as_usize(), span }; - matches.push(mat); - } - } - Ok(TestResult::matches(matches)) -} diff --git a/vendor/regex-automata/tests/lib.rs b/vendor/regex-automata/tests/lib.rs deleted file mode 100644 index 67c979aa8dc7c7..00000000000000 --- a/vendor/regex-automata/tests/lib.rs +++ /dev/null @@ -1,115 +0,0 @@ -// We have a similar config in the regex-automata crate root. Basically, it is -// just too annoying to deal with dead code when a subset of features is -// enabled. -#![cfg_attr( - not(all( - feature = "std", - feature = "nfa", - feature = "dfa", - feature = "hybrid", - feature = "perf-literal-substring", - feature = "perf-literal-multisubstring", - )), - allow(dead_code, unused_imports, unused_variables) -)] -// Similar deal with Miri. Just let dead code warnings be. -#![cfg_attr(miri, allow(dead_code, unused_imports, unused_variables))] - -#[cfg(any(feature = "dfa-search", feature = "dfa-onepass"))] -mod dfa; -#[cfg(feature = "dfa-search")] -mod fuzz; -#[cfg(feature = "dfa-search")] -mod gen; -#[cfg(feature = "hybrid")] -mod hybrid; -#[cfg(feature = "meta")] -mod meta; -#[cfg(any(feature = "nfa-backtrack", feature = "nfa-pikevm"))] -mod nfa; - -fn suite() -> anyhow::Result { - let _ = env_logger::try_init(); - - let mut tests = regex_test::RegexTests::new(); - macro_rules! load { - ($name:expr) => {{ - const DATA: &[u8] = - include_bytes!(concat!("../../testdata/", $name, ".toml")); - tests.load_slice($name, DATA)?; - }}; - } - - load!("anchored"); - load!("bytes"); - load!("crazy"); - load!("crlf"); - load!("earliest"); - load!("empty"); - load!("expensive"); - load!("flags"); - load!("iter"); - load!("leftmost-all"); - load!("line-terminator"); - load!("misc"); - load!("multiline"); - load!("no-unicode"); - load!("overlapping"); - load!("regression"); - load!("set"); - load!("substring"); - load!("unicode"); - load!("utf8"); - load!("word-boundary"); - load!("word-boundary-special"); - load!("fowler/basic"); - load!("fowler/nullsubexpr"); - load!("fowler/repetition"); - - Ok(tests) -} - -/// Configure a regex_automata::Input with the given test configuration. -fn create_input<'h>( - test: &'h regex_test::RegexTest, -) -> regex_automata::Input<'h> { - use regex_automata::Anchored; - - let bounds = test.bounds(); - let anchored = if test.anchored() { Anchored::Yes } else { Anchored::No }; - regex_automata::Input::new(test.haystack()) - .range(bounds.start..bounds.end) - .anchored(anchored) -} - -/// Convert capture matches into the test suite's capture values. -/// -/// The given captures must represent a valid match, where the first capturing -/// group has a non-None span. Otherwise this panics. -fn testify_captures( - caps: ®ex_automata::util::captures::Captures, -) -> regex_test::Captures { - assert!(caps.is_match(), "expected captures to represent a match"); - let spans = caps.iter().map(|group| { - group.map(|m| regex_test::Span { start: m.start, end: m.end }) - }); - // These unwraps are OK because we assume our 'caps' represents a match, - // and a match always gives a non-zero number of groups with the first - // group being non-None. - regex_test::Captures::new(caps.pattern().unwrap().as_usize(), spans) - .unwrap() -} - -/// Convert a test harness match kind to a regex-automata match kind. If -/// regex-automata doesn't support the harness kind, then `None` is returned. -fn untestify_kind( - kind: regex_test::MatchKind, -) -> Option { - match kind { - regex_test::MatchKind::All => Some(regex_automata::MatchKind::All), - regex_test::MatchKind::LeftmostFirst => { - Some(regex_automata::MatchKind::LeftmostFirst) - } - regex_test::MatchKind::LeftmostLongest => None, - } -} diff --git a/vendor/regex-automata/tests/meta/mod.rs b/vendor/regex-automata/tests/meta/mod.rs deleted file mode 100644 index 9d6ab475efef12..00000000000000 --- a/vendor/regex-automata/tests/meta/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -#[cfg(not(miri))] -mod suite; diff --git a/vendor/regex-automata/tests/meta/suite.rs b/vendor/regex-automata/tests/meta/suite.rs deleted file mode 100644 index 2c3de64fb95663..00000000000000 --- a/vendor/regex-automata/tests/meta/suite.rs +++ /dev/null @@ -1,200 +0,0 @@ -use { - anyhow::Result, - regex_automata::{ - meta::{self, Regex}, - util::syntax, - MatchKind, PatternSet, - }, - regex_test::{ - CompiledRegex, Match, RegexTest, SearchKind, Span, TestResult, - TestRunner, - }, -}; - -use crate::{create_input, suite, testify_captures}; - -const BLACKLIST: &[&str] = &[ - // These 'earliest' tests are blacklisted because the meta searcher doesn't - // give the same offsets that the test expects. This is legal because the - // 'earliest' routines don't guarantee a particular match offset other - // than "the earliest the regex engine can report a match." Some regex - // engines will quit earlier than others. The backtracker, for example, - // can't really quit before finding the full leftmost-first match. Many of - // the literal searchers also don't have the ability to quit fully or it's - // otherwise not worth doing. (A literal searcher not quitting as early as - // possible usually means looking at a few more bytes. That's no biggie.) - "earliest/", -]; - -/// Tests the default configuration of the meta regex engine. -#[test] -fn default() -> Result<()> { - let builder = Regex::builder(); - let mut runner = TestRunner::new()?; - runner - .expand(&["is_match", "find", "captures"], |test| test.compiles()) - .blacklist_iter(BLACKLIST) - .test_iter(suite()?.iter(), compiler(builder)) - .assert(); - Ok(()) -} - -/// Tests the default configuration minus the full DFA. -#[test] -fn no_dfa() -> Result<()> { - let mut builder = Regex::builder(); - builder.configure(Regex::config().dfa(false)); - let mut runner = TestRunner::new()?; - runner - .expand(&["is_match", "find", "captures"], |test| test.compiles()) - .blacklist_iter(BLACKLIST) - .test_iter(suite()?.iter(), compiler(builder)) - .assert(); - Ok(()) -} - -/// Tests the default configuration minus the full DFA and lazy DFA. -#[test] -fn no_dfa_hybrid() -> Result<()> { - let mut builder = Regex::builder(); - builder.configure(Regex::config().dfa(false).hybrid(false)); - let mut runner = TestRunner::new()?; - runner - .expand(&["is_match", "find", "captures"], |test| test.compiles()) - .blacklist_iter(BLACKLIST) - .test_iter(suite()?.iter(), compiler(builder)) - .assert(); - Ok(()) -} - -/// Tests the default configuration minus the full DFA, lazy DFA and one-pass -/// DFA. -#[test] -fn no_dfa_hybrid_onepass() -> Result<()> { - let mut builder = Regex::builder(); - builder.configure(Regex::config().dfa(false).hybrid(false).onepass(false)); - let mut runner = TestRunner::new()?; - runner - .expand(&["is_match", "find", "captures"], |test| test.compiles()) - .blacklist_iter(BLACKLIST) - .test_iter(suite()?.iter(), compiler(builder)) - .assert(); - Ok(()) -} - -/// Tests the default configuration minus the full DFA, lazy DFA, one-pass -/// DFA and backtracker. -#[test] -fn no_dfa_hybrid_onepass_backtrack() -> Result<()> { - let mut builder = Regex::builder(); - builder.configure( - Regex::config() - .dfa(false) - .hybrid(false) - .onepass(false) - .backtrack(false), - ); - let mut runner = TestRunner::new()?; - runner - .expand(&["is_match", "find", "captures"], |test| test.compiles()) - .blacklist_iter(BLACKLIST) - .test_iter(suite()?.iter(), compiler(builder)) - .assert(); - Ok(()) -} - -fn compiler( - mut builder: meta::Builder, -) -> impl FnMut(&RegexTest, &[String]) -> Result { - move |test, regexes| { - if !configure_meta_builder(test, &mut builder) { - return Ok(CompiledRegex::skip()); - } - let re = builder.build_many(®exes)?; - Ok(CompiledRegex::compiled(move |test| -> TestResult { - run_test(&re, test) - })) - } -} - -fn run_test(re: &Regex, test: &RegexTest) -> TestResult { - let input = create_input(test); - match test.additional_name() { - "is_match" => TestResult::matched(re.is_match(input)), - "find" => match test.search_kind() { - SearchKind::Earliest => TestResult::matches( - re.find_iter(input.earliest(true)) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|m| Match { - id: m.pattern().as_usize(), - span: Span { start: m.start(), end: m.end() }, - }), - ), - SearchKind::Leftmost => TestResult::matches( - re.find_iter(input) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|m| Match { - id: m.pattern().as_usize(), - span: Span { start: m.start(), end: m.end() }, - }), - ), - SearchKind::Overlapping => { - let mut patset = PatternSet::new(re.pattern_len()); - re.which_overlapping_matches(&input, &mut patset); - TestResult::which(patset.iter().map(|p| p.as_usize())) - } - }, - "captures" => match test.search_kind() { - SearchKind::Earliest => { - let it = re - .captures_iter(input.earliest(true)) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|caps| testify_captures(&caps)); - TestResult::captures(it) - } - SearchKind::Leftmost => { - let it = re - .captures_iter(input) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|caps| testify_captures(&caps)); - TestResult::captures(it) - } - SearchKind::Overlapping => { - // There is no overlapping regex API that supports captures. - TestResult::skip() - } - }, - name => TestResult::fail(&format!("unrecognized test name: {name}")), - } -} - -/// Configures the given regex builder with all relevant settings on the given -/// regex test. -/// -/// If the regex test has a setting that is unsupported, then this returns -/// false (implying the test should be skipped). -fn configure_meta_builder( - test: &RegexTest, - builder: &mut meta::Builder, -) -> bool { - let match_kind = match test.match_kind() { - regex_test::MatchKind::All => MatchKind::All, - regex_test::MatchKind::LeftmostFirst => MatchKind::LeftmostFirst, - regex_test::MatchKind::LeftmostLongest => return false, - }; - let meta_config = Regex::config() - .match_kind(match_kind) - .utf8_empty(test.utf8()) - .line_terminator(test.line_terminator()); - builder.configure(meta_config).syntax(config_syntax(test)); - true -} - -/// Configuration of the regex parser from a regex test. -fn config_syntax(test: &RegexTest) -> syntax::Config { - syntax::Config::new() - .case_insensitive(test.case_insensitive()) - .unicode(test.unicode()) - .utf8(test.utf8()) - .line_terminator(test.line_terminator()) -} diff --git a/vendor/regex-automata/tests/nfa/mod.rs b/vendor/regex-automata/tests/nfa/mod.rs deleted file mode 100644 index 32686214737f4d..00000000000000 --- a/vendor/regex-automata/tests/nfa/mod.rs +++ /dev/null @@ -1 +0,0 @@ -mod thompson; diff --git a/vendor/regex-automata/tests/nfa/thompson/backtrack/mod.rs b/vendor/regex-automata/tests/nfa/thompson/backtrack/mod.rs deleted file mode 100644 index 9d6ab475efef12..00000000000000 --- a/vendor/regex-automata/tests/nfa/thompson/backtrack/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -#[cfg(not(miri))] -mod suite; diff --git a/vendor/regex-automata/tests/nfa/thompson/backtrack/suite.rs b/vendor/regex-automata/tests/nfa/thompson/backtrack/suite.rs deleted file mode 100644 index c6f3b9f1fc054c..00000000000000 --- a/vendor/regex-automata/tests/nfa/thompson/backtrack/suite.rs +++ /dev/null @@ -1,213 +0,0 @@ -use { - anyhow::Result, - regex_automata::{ - nfa::thompson::{ - self, - backtrack::{self, BoundedBacktracker}, - NFA, - }, - util::{prefilter::Prefilter, syntax}, - Input, - }, - regex_test::{ - CompiledRegex, Match, MatchKind, RegexTest, SearchKind, Span, - TestResult, TestRunner, - }, -}; - -use crate::{create_input, suite, testify_captures}; - -/// Tests the default configuration of the bounded backtracker. -#[test] -fn default() -> Result<()> { - let builder = BoundedBacktracker::builder(); - let mut runner = TestRunner::new()?; - runner.expand(&["is_match", "find", "captures"], |test| test.compiles()); - // At the time of writing, every regex search in the test suite fits - // into the backtracker's default visited capacity (except for the - // blacklisted tests below). If regexes are added that blow that capacity, - // then they should be blacklisted here. A tempting alternative is to - // automatically skip them by checking the haystack length against - // BoundedBacktracker::max_haystack_len, but that could wind up hiding - // interesting failure modes. e.g., If the visited capacity is somehow - // wrong or smaller than it should be. - runner.blacklist("expensive/backtrack-blow-visited-capacity"); - runner.test_iter(suite()?.iter(), compiler(builder)).assert(); - Ok(()) -} - -/// Tests the backtracker with prefilters enabled. -#[test] -fn prefilter() -> Result<()> { - let my_compiler = |test: &RegexTest, regexes: &[String]| { - // Parse regexes as HIRs so we can get literals to build a prefilter. - let mut hirs = vec![]; - for pattern in regexes.iter() { - hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); - } - // We can always select leftmost-first here because the backtracker - // only supports leftmost-first matching. - let pre = Prefilter::from_hirs_prefix( - regex_automata::MatchKind::LeftmostFirst, - &hirs, - ); - let mut builder = BoundedBacktracker::builder(); - builder.configure(BoundedBacktracker::config().prefilter(pre)); - compiler(builder)(test, regexes) - }; - let mut runner = TestRunner::new()?; - runner.expand(&["is_match", "find", "captures"], |test| test.compiles()); - runner.blacklist("expensive/backtrack-blow-visited-capacity"); - runner.test_iter(suite()?.iter(), my_compiler).assert(); - Ok(()) -} - -/// Tests the bounded backtracker when its visited capacity is set to its -/// minimum amount. -#[test] -fn min_visited_capacity() -> Result<()> { - let mut runner = TestRunner::new()?; - runner.expand(&["is_match", "find", "captures"], |test| test.compiles()); - runner - .test_iter(suite()?.iter(), move |test, regexes| { - let nfa = NFA::compiler() - .configure(config_thompson(test)) - .syntax(config_syntax(test)) - .build_many(®exes)?; - let mut builder = BoundedBacktracker::builder(); - if !configure_backtrack_builder(test, &mut builder) { - return Ok(CompiledRegex::skip()); - } - // Setup the bounded backtracker so that its visited capacity is - // the absolute minimum required for the test's haystack. - builder.configure(BoundedBacktracker::config().visited_capacity( - backtrack::min_visited_capacity( - &nfa, - &Input::new(test.haystack()), - ), - )); - - let re = builder.build_from_nfa(nfa)?; - let mut cache = re.create_cache(); - Ok(CompiledRegex::compiled(move |test| -> TestResult { - run_test(&re, &mut cache, test) - })) - }) - .assert(); - Ok(()) -} - -fn compiler( - mut builder: backtrack::Builder, -) -> impl FnMut(&RegexTest, &[String]) -> Result { - move |test, regexes| { - if !configure_backtrack_builder(test, &mut builder) { - return Ok(CompiledRegex::skip()); - } - let re = builder.build_many(®exes)?; - let mut cache = re.create_cache(); - Ok(CompiledRegex::compiled(move |test| -> TestResult { - run_test(&re, &mut cache, test) - })) - } -} - -fn run_test( - re: &BoundedBacktracker, - cache: &mut backtrack::Cache, - test: &RegexTest, -) -> TestResult { - let input = create_input(test); - match test.additional_name() { - "is_match" => match test.search_kind() { - SearchKind::Earliest | SearchKind::Overlapping => { - TestResult::skip() - } - SearchKind::Leftmost => { - let input = input.earliest(true); - TestResult::matched(re.try_is_match(cache, input).unwrap()) - } - }, - "find" => match test.search_kind() { - SearchKind::Earliest | SearchKind::Overlapping => { - TestResult::skip() - } - SearchKind::Leftmost => TestResult::matches( - re.try_find_iter(cache, input) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|result| result.unwrap()) - .map(|m| Match { - id: m.pattern().as_usize(), - span: Span { start: m.start(), end: m.end() }, - }), - ), - }, - "captures" => match test.search_kind() { - SearchKind::Earliest | SearchKind::Overlapping => { - TestResult::skip() - } - SearchKind::Leftmost => TestResult::captures( - re.try_captures_iter(cache, input) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|result| result.unwrap()) - .map(|caps| testify_captures(&caps)), - ), - }, - name => TestResult::fail(&format!("unrecognized test name: {name}")), - } -} - -/// Configures the given regex builder with all relevant settings on the given -/// regex test. -/// -/// If the regex test has a setting that is unsupported, then this returns -/// false (implying the test should be skipped). -fn configure_backtrack_builder( - test: &RegexTest, - builder: &mut backtrack::Builder, -) -> bool { - match (test.search_kind(), test.match_kind()) { - // For testing the standard search APIs. This is the only supported - // configuration for the backtracker. - (SearchKind::Leftmost, MatchKind::LeftmostFirst) => {} - // Overlapping APIs not supported at all for backtracker. - (SearchKind::Overlapping, _) => return false, - // Backtracking doesn't really support the notion of 'earliest'. - // Namely, backtracking already works by returning as soon as it knows - // it has found a match. It just so happens that this corresponds to - // the standard 'leftmost' formulation. - // - // The 'earliest' definition in this crate does indeed permit this - // behavior, so this is "fine," but our test suite specifically looks - // for the earliest position at which a match is known, which our - // finite automata based regex engines have no problem providing. So - // for backtracking, we just skip these tests. - (SearchKind::Earliest, _) => return false, - // For backtracking, 'all' semantics don't really make sense. - (_, MatchKind::All) => return false, - // Not supported at all in regex-automata. - (_, MatchKind::LeftmostLongest) => return false, - }; - let backtrack_config = BoundedBacktracker::config(); - builder - .configure(backtrack_config) - .syntax(config_syntax(test)) - .thompson(config_thompson(test)); - true -} - -/// Configuration of a Thompson NFA compiler from a regex test. -fn config_thompson(test: &RegexTest) -> thompson::Config { - let mut lookm = regex_automata::util::look::LookMatcher::new(); - lookm.set_line_terminator(test.line_terminator()); - thompson::Config::new().utf8(test.utf8()).look_matcher(lookm) -} - -/// Configuration of the regex parser from a regex test. -fn config_syntax(test: &RegexTest) -> syntax::Config { - syntax::Config::new() - .case_insensitive(test.case_insensitive()) - .unicode(test.unicode()) - .utf8(test.utf8()) - .line_terminator(test.line_terminator()) -} diff --git a/vendor/regex-automata/tests/nfa/thompson/mod.rs b/vendor/regex-automata/tests/nfa/thompson/mod.rs deleted file mode 100644 index b2558f7049c37d..00000000000000 --- a/vendor/regex-automata/tests/nfa/thompson/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -#[cfg(feature = "nfa-backtrack")] -mod backtrack; -#[cfg(feature = "nfa-pikevm")] -mod pikevm; diff --git a/vendor/regex-automata/tests/nfa/thompson/pikevm/mod.rs b/vendor/regex-automata/tests/nfa/thompson/pikevm/mod.rs deleted file mode 100644 index 9d6ab475efef12..00000000000000 --- a/vendor/regex-automata/tests/nfa/thompson/pikevm/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -#[cfg(not(miri))] -mod suite; diff --git a/vendor/regex-automata/tests/nfa/thompson/pikevm/suite.rs b/vendor/regex-automata/tests/nfa/thompson/pikevm/suite.rs deleted file mode 100644 index 1fb3fec9f24464..00000000000000 --- a/vendor/regex-automata/tests/nfa/thompson/pikevm/suite.rs +++ /dev/null @@ -1,162 +0,0 @@ -use { - anyhow::Result, - regex_automata::{ - nfa::thompson::{ - self, - pikevm::{self, PikeVM}, - }, - util::{prefilter::Prefilter, syntax}, - PatternSet, - }, - regex_test::{ - CompiledRegex, Match, RegexTest, SearchKind, Span, TestResult, - TestRunner, - }, -}; - -use crate::{create_input, suite, testify_captures, untestify_kind}; - -/// Tests the default configuration of the hybrid NFA/DFA. -#[test] -fn default() -> Result<()> { - let builder = PikeVM::builder(); - let mut runner = TestRunner::new()?; - runner.expand(&["is_match", "find", "captures"], |test| test.compiles()); - runner.test_iter(suite()?.iter(), compiler(builder)).assert(); - Ok(()) -} - -/// Tests the PikeVM with prefilters enabled. -#[test] -fn prefilter() -> Result<()> { - let my_compiler = |test: &RegexTest, regexes: &[String]| { - // Parse regexes as HIRs so we can get literals to build a prefilter. - let mut hirs = vec![]; - for pattern in regexes.iter() { - hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); - } - let kind = match untestify_kind(test.match_kind()) { - None => return Ok(CompiledRegex::skip()), - Some(kind) => kind, - }; - let pre = Prefilter::from_hirs_prefix(kind, &hirs); - let mut builder = PikeVM::builder(); - builder.configure(PikeVM::config().prefilter(pre)); - compiler(builder)(test, regexes) - }; - let mut runner = TestRunner::new()?; - runner.expand(&["is_match", "find", "captures"], |test| test.compiles()); - runner.test_iter(suite()?.iter(), my_compiler).assert(); - Ok(()) -} - -fn compiler( - mut builder: pikevm::Builder, -) -> impl FnMut(&RegexTest, &[String]) -> Result { - move |test, regexes| { - if !configure_pikevm_builder(test, &mut builder) { - return Ok(CompiledRegex::skip()); - } - let re = builder.build_many(®exes)?; - let mut cache = re.create_cache(); - Ok(CompiledRegex::compiled(move |test| -> TestResult { - run_test(&re, &mut cache, test) - })) - } -} - -fn run_test( - re: &PikeVM, - cache: &mut pikevm::Cache, - test: &RegexTest, -) -> TestResult { - let input = create_input(test); - match test.additional_name() { - "is_match" => TestResult::matched(re.is_match(cache, input)), - "find" => match test.search_kind() { - SearchKind::Earliest => { - let it = re - .find_iter(cache, input.earliest(true)) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|m| Match { - id: m.pattern().as_usize(), - span: Span { start: m.start(), end: m.end() }, - }); - TestResult::matches(it) - } - SearchKind::Leftmost => { - let it = re - .find_iter(cache, input) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|m| Match { - id: m.pattern().as_usize(), - span: Span { start: m.start(), end: m.end() }, - }); - TestResult::matches(it) - } - SearchKind::Overlapping => { - let mut patset = PatternSet::new(re.get_nfa().pattern_len()); - re.which_overlapping_matches(cache, &input, &mut patset); - TestResult::which(patset.iter().map(|p| p.as_usize())) - } - }, - "captures" => match test.search_kind() { - SearchKind::Earliest => { - let it = re - .captures_iter(cache, input.earliest(true)) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|caps| testify_captures(&caps)); - TestResult::captures(it) - } - SearchKind::Leftmost => { - let it = re - .captures_iter(cache, input) - .take(test.match_limit().unwrap_or(std::usize::MAX)) - .map(|caps| testify_captures(&caps)); - TestResult::captures(it) - } - SearchKind::Overlapping => { - // There is no overlapping PikeVM API that supports captures. - TestResult::skip() - } - }, - name => TestResult::fail(&format!("unrecognized test name: {name}")), - } -} - -/// Configures the given regex builder with all relevant settings on the given -/// regex test. -/// -/// If the regex test has a setting that is unsupported, then this returns -/// false (implying the test should be skipped). -fn configure_pikevm_builder( - test: &RegexTest, - builder: &mut pikevm::Builder, -) -> bool { - let match_kind = match untestify_kind(test.match_kind()) { - None => return false, - Some(k) => k, - }; - let pikevm_config = PikeVM::config().match_kind(match_kind); - builder - .configure(pikevm_config) - .syntax(config_syntax(test)) - .thompson(config_thompson(test)); - true -} - -/// Configuration of a Thompson NFA compiler from a regex test. -fn config_thompson(test: &RegexTest) -> thompson::Config { - let mut lookm = regex_automata::util::look::LookMatcher::new(); - lookm.set_line_terminator(test.line_terminator()); - thompson::Config::new().utf8(test.utf8()).look_matcher(lookm) -} - -/// Configuration of the regex parser from a regex test. -fn config_syntax(test: &RegexTest) -> syntax::Config { - syntax::Config::new() - .case_insensitive(test.case_insensitive()) - .unicode(test.unicode()) - .utf8(test.utf8()) - .line_terminator(test.line_terminator()) -} diff --git a/vendor/regex-syntax/.cargo-checksum.json b/vendor/regex-syntax/.cargo-checksum.json deleted file mode 100644 index 8ddc619c981e6d..00000000000000 --- a/vendor/regex-syntax/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"4d9740cca04e3e6eb255422e93538de6fde4a9723c3f3bd15562f6e502c246cc","Cargo.lock":"3285efe6948658ea24ca1cc194a2d56dac8422f57a72459daecfe38b2672dff5","Cargo.toml":"2633ef92fd0a0373037e587f23836288e2f965d578b1e02d01288b607252bc57","Cargo.toml.orig":"dc2b090e6ecd06b0ac9aad1a25b3d645e07b5d69d601ec7e8a48670ac0c4d568","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"b2484aa7e66fb92d1378e9a7ce7605af18f77cb12c179866eaf92ba28cfec1d9","benches/bench.rs":"d2b6ae5b939abd6093064f144b981b7739d7f474ec0698a1268052fc92406635","src/ast/mod.rs":"38df06574a3816eae2796c757d9d268b7b42ce3fb1feee86750e7d04de7649c7","src/ast/parse.rs":"af6d82e62e97379a91840e99c10be1279c0ae1298a0d8e5da429e9e7bc3ec339","src/ast/print.rs":"6a681ce021a384c47dda04c71e2868b5dd61c633bb5b1a67628b9a8df16f0413","src/ast/visitor.rs":"2af2efd77727803b8d15a6244af92267e96edbfc1a211bcbea2b32e1b5483918","src/debug.rs":"91b2492394de05bb11ee75329115b33fb378c19e076370d1a1ae665ce1682777","src/either.rs":"1758e3edd056884eccadd995708d1e374ba9aa65846bd0e13b1aae852607c560","src/error.rs":"e308b3ccad0bea927f4e3957170302e9cfa743bfdf9376f3a5f4137b44ca6cfc","src/hir/interval.rs":"74d75837d24ab9a3cff33b375b70694cdd3b9a4610c799137533f365755ba604","src/hir/literal.rs":"61e9f54103c671694dd017c23c5c9263e032735921ef77527940e83b29ced540","src/hir/mod.rs":"13ee5b65fac1f2c9780ce48a500b1e9d198cb0bc07c0d7f4a4391aab87424563","src/hir/print.rs":"e1e1dfa71983c8fea64f500a0b9dfcbd258b4060e12b95d432468015a247a5cb","src/hir/translate.rs":"73bd3e27fe117a92abfaa0ce47fe86b70a9b456e2635e19efe099b94830b947a","src/hir/visitor.rs":"71ca9c93aa48a5ed445399659fa6455093a1bbd9ef44b66bc7095c1b08b2ec1f","src/lib.rs":"c51d1e55a8b6c4608e21a278ed0ef9480f73ab5b814b6ca6127f4a049c4d5007","src/parser.rs":"6b2f4f27e3331a01a25b87c89368dd2e54396bd425dac57941f9c1ebfd238ac8","src/rank.rs":"ff3d58b0cc5ffa69e2e8c56fc7d9ef41dd399d59a639a253a51551b858cb5bbd","src/unicode.rs":"b2084dcbd4331501b9a895fd7e7575d93ff96eb661c6e6adbc8c66bb72685cde","src/unicode_tables/LICENSE-UNICODE":"74db5baf44a41b1000312c673544b3374e4198af5605c7f9080a402cec42cfa3","src/unicode_tables/age.rs":"71b7cf52acdb4aa98b44145303b8efbfa94913235493521941ef1e0092a0ffe2","src/unicode_tables/case_folding_simple.rs":"7622c7f7f03ac0dc2f2bcd51c81a217d64de0cc912f62f1add5f676603a02456","src/unicode_tables/general_category.rs":"9488e3721f7c2ae20e1b77fcff9a59b4ed8f22954b8645ea6d8592eac1856423","src/unicode_tables/grapheme_cluster_break.rs":"0dd9d66bad598f4ec3451b6699f05c17c52079e37d463baf6385bbe51aa218f1","src/unicode_tables/mod.rs":"26c837099cd934c8062e24bc9a0aaecf15fe1de03f9c6da3f3e1e5ac3ca24bee","src/unicode_tables/perl_decimal.rs":"6a59143db81a0bcaf0e8d0af265e711d1a6472e1f091ee9ee4377da5d5d0cd1f","src/unicode_tables/perl_space.rs":"ec9bb22ed7e99feef292249c7e6f4673ee0af9635d4d158f93923494c14cd5ed","src/unicode_tables/perl_word.rs":"30f073baae28ea34c373c7778c00f20c1621c3e644404eff031f7d1cc8e9c9e2","src/unicode_tables/property_bool.rs":"66cf5bd2a1438bf9694152f077a285cf014fbd50b9dd63a97233b2ea61d64962","src/unicode_tables/property_names.rs":"8c93985d1bcb01735667a3c4cb92f7e260d267326bde9d7f048bc77cd7e07855","src/unicode_tables/property_values.rs":"ef9131ce0a575c7327ec6d466aafd8b7c25600d80c232b5a4110bbf0a5a59136","src/unicode_tables/script.rs":"41bd424f1e3a03290cf4995ced678dcf24c94b38c905c62f6819bf67e098a2ec","src/unicode_tables/script_extension.rs":"a314099ddbf50a07fe350bb0835bf2fe494ed5ad278b30e171e21506eb557906","src/unicode_tables/sentence_break.rs":"be84fbe8c5c67e761b16fe6c27f16664dbb145357835cd6b92bc2a4a4c52ee79","src/unicode_tables/word_break.rs":"c551681ad49ec28c7ae32bab1371945821c736ca8f0de410cb89f28066ec2ecf","src/utf8.rs":"193f280f3b48116ed6ca8b5fe80b9d6401b6e733dfa138caf64ec1b017b6f175","test":"c7de5fbc0010d9b5b758cd49956375a64b88601c068167fd366808950257f108"},"package":"7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58"} \ No newline at end of file diff --git a/vendor/regex-syntax/.cargo_vcs_info.json b/vendor/regex-syntax/.cargo_vcs_info.json deleted file mode 100644 index 2d47b3d9eaea88..00000000000000 --- a/vendor/regex-syntax/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "691d51457db276bbdf9ca3de2cafe285c662c59f" - }, - "path_in_vcs": "regex-syntax" -} \ No newline at end of file diff --git a/vendor/regex-syntax/Cargo.lock b/vendor/regex-syntax/Cargo.lock deleted file mode 100644 index a6c29c8389f76a..00000000000000 --- a/vendor/regex-syntax/Cargo.lock +++ /dev/null @@ -1,65 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "arbitrary" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" -dependencies = [ - "derive_arbitrary", -] - -[[package]] -name = "derive_arbitrary" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "proc-macro2" -version = "1.0.101" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "regex-syntax" -version = "0.8.8" -dependencies = [ - "arbitrary", -] - -[[package]] -name = "syn" -version = "2.0.106" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "unicode-ident" -version = "1.0.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" diff --git a/vendor/regex-syntax/Cargo.toml b/vendor/regex-syntax/Cargo.toml deleted file mode 100644 index 02277a31b9e171..00000000000000 --- a/vendor/regex-syntax/Cargo.toml +++ /dev/null @@ -1,81 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.65" -name = "regex-syntax" -version = "0.8.8" -authors = [ - "The Rust Project Developers", - "Andrew Gallant ", -] -build = false -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = "A regular expression parser." -homepage = "https://github.com/rust-lang/regex/tree/master/regex-syntax" -documentation = "https://docs.rs/regex-syntax" -readme = "README.md" -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/regex" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ - "--cfg", - "docsrs_regex", -] - -[features] -arbitrary = ["dep:arbitrary"] -default = [ - "std", - "unicode", -] -std = [] -unicode = [ - "unicode-age", - "unicode-bool", - "unicode-case", - "unicode-gencat", - "unicode-perl", - "unicode-script", - "unicode-segment", -] -unicode-age = [] -unicode-bool = [] -unicode-case = [] -unicode-gencat = [] -unicode-perl = [] -unicode-script = [] -unicode-segment = [] - -[lib] -name = "regex_syntax" -path = "src/lib.rs" - -[[bench]] -name = "bench" -path = "benches/bench.rs" - -[dependencies.arbitrary] -version = "1.3.0" -features = ["derive"] -optional = true - -[lints.rust.unexpected_cfgs] -level = "allow" -priority = 0 -check-cfg = ["cfg(docsrs_regex)"] diff --git a/vendor/regex-syntax/LICENSE-APACHE b/vendor/regex-syntax/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e802f..00000000000000 --- a/vendor/regex-syntax/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/regex-syntax/LICENSE-MIT b/vendor/regex-syntax/LICENSE-MIT deleted file mode 100644 index 39d4bdb5acd313..00000000000000 --- a/vendor/regex-syntax/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/regex-syntax/README.md b/vendor/regex-syntax/README.md deleted file mode 100644 index 529513b0c8e979..00000000000000 --- a/vendor/regex-syntax/README.md +++ /dev/null @@ -1,96 +0,0 @@ -regex-syntax -============ -This crate provides a robust regular expression parser. - -[![Build status](https://github.com/rust-lang/regex/workflows/ci/badge.svg)](https://github.com/rust-lang/regex/actions) -[![Crates.io](https://img.shields.io/crates/v/regex-syntax.svg)](https://crates.io/crates/regex-syntax) - - -### Documentation - -https://docs.rs/regex-syntax - - -### Overview - -There are two primary types exported by this crate: `Ast` and `Hir`. The former -is a faithful abstract syntax of a regular expression, and can convert regular -expressions back to their concrete syntax while mostly preserving its original -form. The latter type is a high level intermediate representation of a regular -expression that is amenable to analysis and compilation into byte codes or -automata. An `Hir` achieves this by drastically simplifying the syntactic -structure of the regular expression. While an `Hir` can be converted back to -its equivalent concrete syntax, the result is unlikely to resemble the original -concrete syntax that produced the `Hir`. - - -### Example - -This example shows how to parse a pattern string into its HIR: - -```rust -use regex_syntax::{hir::Hir, parse}; - -let hir = parse("a|b").unwrap(); -assert_eq!(hir, Hir::alternation(vec![ - Hir::literal("a".as_bytes()), - Hir::literal("b".as_bytes()), -])); -``` - - -### Safety - -This crate has no `unsafe` code and sets `forbid(unsafe_code)`. While it's -possible this crate could use `unsafe` code in the future, the standard -for doing so is extremely high. In general, most code in this crate is not -performance critical, since it tends to be dwarfed by the time it takes to -compile a regular expression into an automaton. Therefore, there is little need -for extreme optimization, and therefore, use of `unsafe`. - -The standard for using `unsafe` in this crate is extremely high because this -crate is intended to be reasonably safe to use with user supplied regular -expressions. Therefore, while there may be bugs in the regex parser itself, -they should _never_ result in memory unsafety unless there is either a bug -in the compiler or the standard library. (Since `regex-syntax` has zero -dependencies.) - - -### Crate features - -By default, this crate bundles a fairly large amount of Unicode data tables -(a source size of ~750KB). Because of their large size, one can disable some -or all of these data tables. If a regular expression attempts to use Unicode -data that is not available, then an error will occur when translating the `Ast` -to the `Hir`. - -The full set of features one can disable are -[in the "Crate features" section of the documentation](https://docs.rs/regex-syntax/*/#crate-features). - - -### Testing - -Simply running `cargo test` will give you very good coverage. However, because -of the large number of features exposed by this crate, a `test` script is -included in this directory which will test several feature combinations. This -is the same script that is run in CI. - - -### Motivation - -The primary purpose of this crate is to provide the parser used by `regex`. -Specifically, this crate is treated as an implementation detail of the `regex`, -and is primarily developed for the needs of `regex`. - -Since this crate is an implementation detail of `regex`, it may experience -breaking change releases at a different cadence from `regex`. This is only -possible because this crate is _not_ a public dependency of `regex`. - -Another consequence of this de-coupling is that there is no direct way to -compile a `regex::Regex` from a `regex_syntax::hir::Hir`. Instead, one must -first convert the `Hir` to a string (via its `std::fmt::Display`) and then -compile that via `Regex::new`. While this does repeat some work, compilation -typically takes much longer than parsing. - -Stated differently, the coupling between `regex` and `regex-syntax` exists only -at the level of the concrete syntax. diff --git a/vendor/regex-syntax/benches/bench.rs b/vendor/regex-syntax/benches/bench.rs deleted file mode 100644 index d4703d4fc1ebf0..00000000000000 --- a/vendor/regex-syntax/benches/bench.rs +++ /dev/null @@ -1,63 +0,0 @@ -#![feature(test)] - -extern crate test; - -use regex_syntax::Parser; -use test::Bencher; - -#[bench] -fn parse_simple1(b: &mut Bencher) { - b.iter(|| { - let re = r"^bc(d|e)*$"; - Parser::new().parse(re).unwrap() - }); -} - -#[bench] -fn parse_simple2(b: &mut Bencher) { - b.iter(|| { - let re = r"'[a-zA-Z_][a-zA-Z0-9_]*(')\b"; - Parser::new().parse(re).unwrap() - }); -} - -#[bench] -fn parse_small1(b: &mut Bencher) { - b.iter(|| { - let re = r"\p{L}|\p{N}|\s|.|\d"; - Parser::new().parse(re).unwrap() - }); -} - -#[bench] -fn parse_medium1(b: &mut Bencher) { - b.iter(|| { - let re = r"\pL\p{Greek}\p{Hiragana}\p{Alphabetic}\p{Hebrew}\p{Arabic}"; - Parser::new().parse(re).unwrap() - }); -} - -#[bench] -fn parse_medium2(b: &mut Bencher) { - b.iter(|| { - let re = r"\s\S\w\W\d\D"; - Parser::new().parse(re).unwrap() - }); -} - -#[bench] -fn parse_medium3(b: &mut Bencher) { - b.iter(|| { - let re = - r"\p{age:3.2}\p{hira}\p{scx:hira}\p{alphabetic}\p{sc:Greek}\pL"; - Parser::new().parse(re).unwrap() - }); -} - -#[bench] -fn parse_huge(b: &mut Bencher) { - b.iter(|| { - let re = r"\p{L}{100}"; - Parser::new().parse(re).unwrap() - }); -} diff --git a/vendor/regex-syntax/src/ast/mod.rs b/vendor/regex-syntax/src/ast/mod.rs deleted file mode 100644 index 7e2426dc78fdca..00000000000000 --- a/vendor/regex-syntax/src/ast/mod.rs +++ /dev/null @@ -1,1807 +0,0 @@ -/*! -Defines an abstract syntax for regular expressions. -*/ - -use core::cmp::Ordering; - -use alloc::{boxed::Box, string::String, vec, vec::Vec}; - -pub use crate::ast::visitor::{visit, Visitor}; - -pub mod parse; -pub mod print; -mod visitor; - -/// An error that occurred while parsing a regular expression into an abstract -/// syntax tree. -/// -/// Note that not all ASTs represents a valid regular expression. For example, -/// an AST is constructed without error for `\p{Quux}`, but `Quux` is not a -/// valid Unicode property name. That particular error is reported when -/// translating an AST to the high-level intermediate representation (`HIR`). -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct Error { - /// The kind of error. - kind: ErrorKind, - /// The original pattern that the parser generated the error from. Every - /// span in an error is a valid range into this string. - pattern: String, - /// The span of this error. - span: Span, -} - -impl Error { - /// Return the type of this error. - pub fn kind(&self) -> &ErrorKind { - &self.kind - } - - /// The original pattern string in which this error occurred. - /// - /// Every span reported by this error is reported in terms of this string. - pub fn pattern(&self) -> &str { - &self.pattern - } - - /// Return the span at which this error occurred. - pub fn span(&self) -> &Span { - &self.span - } - - /// Return an auxiliary span. This span exists only for some errors that - /// benefit from being able to point to two locations in the original - /// regular expression. For example, "duplicate" errors will have the - /// main error position set to the duplicate occurrence while its - /// auxiliary span will be set to the initial occurrence. - pub fn auxiliary_span(&self) -> Option<&Span> { - use self::ErrorKind::*; - match self.kind { - FlagDuplicate { ref original } => Some(original), - FlagRepeatedNegation { ref original, .. } => Some(original), - GroupNameDuplicate { ref original, .. } => Some(original), - _ => None, - } - } -} - -/// The type of an error that occurred while building an AST. -/// -/// This error type is marked as `non_exhaustive`. This means that adding a -/// new variant is not considered a breaking change. -#[non_exhaustive] -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum ErrorKind { - /// The capturing group limit was exceeded. - /// - /// Note that this represents a limit on the total number of capturing - /// groups in a regex and not necessarily the number of nested capturing - /// groups. That is, the nest limit can be low and it is still possible for - /// this error to occur. - CaptureLimitExceeded, - /// An invalid escape sequence was found in a character class set. - ClassEscapeInvalid, - /// An invalid character class range was found. An invalid range is any - /// range where the start is greater than the end. - ClassRangeInvalid, - /// An invalid range boundary was found in a character class. Range - /// boundaries must be a single literal codepoint, but this error indicates - /// that something else was found, such as a nested class. - ClassRangeLiteral, - /// An opening `[` was found with no corresponding closing `]`. - ClassUnclosed, - /// Note that this error variant is no longer used. Namely, a decimal - /// number can only appear as a repetition quantifier. When the number - /// in a repetition quantifier is empty, then it gets its own specialized - /// error, `RepetitionCountDecimalEmpty`. - DecimalEmpty, - /// An invalid decimal number was given where one was expected. - DecimalInvalid, - /// A bracketed hex literal was empty. - EscapeHexEmpty, - /// A bracketed hex literal did not correspond to a Unicode scalar value. - EscapeHexInvalid, - /// An invalid hexadecimal digit was found. - EscapeHexInvalidDigit, - /// EOF was found before an escape sequence was completed. - EscapeUnexpectedEof, - /// An unrecognized escape sequence. - EscapeUnrecognized, - /// A dangling negation was used when setting flags, e.g., `i-`. - FlagDanglingNegation, - /// A flag was used twice, e.g., `i-i`. - FlagDuplicate { - /// The position of the original flag. The error position - /// points to the duplicate flag. - original: Span, - }, - /// The negation operator was used twice, e.g., `-i-s`. - FlagRepeatedNegation { - /// The position of the original negation operator. The error position - /// points to the duplicate negation operator. - original: Span, - }, - /// Expected a flag but got EOF, e.g., `(?`. - FlagUnexpectedEof, - /// Unrecognized flag, e.g., `a`. - FlagUnrecognized, - /// A duplicate capture name was found. - GroupNameDuplicate { - /// The position of the initial occurrence of the capture name. The - /// error position itself points to the duplicate occurrence. - original: Span, - }, - /// A capture group name is empty, e.g., `(?P<>abc)`. - GroupNameEmpty, - /// An invalid character was seen for a capture group name. This includes - /// errors where the first character is a digit (even though subsequent - /// characters are allowed to be digits). - GroupNameInvalid, - /// A closing `>` could not be found for a capture group name. - GroupNameUnexpectedEof, - /// An unclosed group, e.g., `(ab`. - /// - /// The span of this error corresponds to the unclosed parenthesis. - GroupUnclosed, - /// An unopened group, e.g., `ab)`. - GroupUnopened, - /// The nest limit was exceeded. The limit stored here is the limit - /// configured in the parser. - NestLimitExceeded(u32), - /// The range provided in a counted repetition operator is invalid. The - /// range is invalid if the start is greater than the end. - RepetitionCountInvalid, - /// An opening `{` was not followed by a valid decimal value. - /// For example, `x{}` or `x{]}` would fail. - RepetitionCountDecimalEmpty, - /// An opening `{` was found with no corresponding closing `}`. - RepetitionCountUnclosed, - /// A repetition operator was applied to a missing sub-expression. This - /// occurs, for example, in the regex consisting of just a `*` or even - /// `(?i)*`. It is, however, possible to create a repetition operating on - /// an empty sub-expression. For example, `()*` is still considered valid. - RepetitionMissing, - /// The special word boundary syntax, `\b{something}`, was used, but - /// either EOF without `}` was seen, or an invalid character in the - /// braces was seen. - SpecialWordBoundaryUnclosed, - /// The special word boundary syntax, `\b{something}`, was used, but - /// `something` was not recognized as a valid word boundary kind. - SpecialWordBoundaryUnrecognized, - /// The syntax `\b{` was observed, but afterwards the end of the pattern - /// was observed without being able to tell whether it was meant to be a - /// bounded repetition on the `\b` or the beginning of a special word - /// boundary assertion. - SpecialWordOrRepetitionUnexpectedEof, - /// The Unicode class is not valid. This typically occurs when a `\p` is - /// followed by something other than a `{`. - UnicodeClassInvalid, - /// When octal support is disabled, this error is produced when an octal - /// escape is used. The octal escape is assumed to be an invocation of - /// a backreference, which is the common case. - UnsupportedBackreference, - /// When syntax similar to PCRE's look-around is used, this error is - /// returned. Some example syntaxes that are rejected include, but are - /// not necessarily limited to, `(?=re)`, `(?!re)`, `(?<=re)` and - /// `(?) -> core::fmt::Result { - crate::error::Formatter::from(self).fmt(f) - } -} - -impl core::fmt::Display for ErrorKind { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - use self::ErrorKind::*; - match *self { - CaptureLimitExceeded => write!( - f, - "exceeded the maximum number of \ - capturing groups ({})", - u32::MAX - ), - ClassEscapeInvalid => { - write!(f, "invalid escape sequence found in character class") - } - ClassRangeInvalid => write!( - f, - "invalid character class range, \ - the start must be <= the end" - ), - ClassRangeLiteral => { - write!(f, "invalid range boundary, must be a literal") - } - ClassUnclosed => write!(f, "unclosed character class"), - DecimalEmpty => write!(f, "decimal literal empty"), - DecimalInvalid => write!(f, "decimal literal invalid"), - EscapeHexEmpty => write!(f, "hexadecimal literal empty"), - EscapeHexInvalid => { - write!(f, "hexadecimal literal is not a Unicode scalar value") - } - EscapeHexInvalidDigit => write!(f, "invalid hexadecimal digit"), - EscapeUnexpectedEof => write!( - f, - "incomplete escape sequence, \ - reached end of pattern prematurely" - ), - EscapeUnrecognized => write!(f, "unrecognized escape sequence"), - FlagDanglingNegation => { - write!(f, "dangling flag negation operator") - } - FlagDuplicate { .. } => write!(f, "duplicate flag"), - FlagRepeatedNegation { .. } => { - write!(f, "flag negation operator repeated") - } - FlagUnexpectedEof => { - write!(f, "expected flag but got end of regex") - } - FlagUnrecognized => write!(f, "unrecognized flag"), - GroupNameDuplicate { .. } => { - write!(f, "duplicate capture group name") - } - GroupNameEmpty => write!(f, "empty capture group name"), - GroupNameInvalid => write!(f, "invalid capture group character"), - GroupNameUnexpectedEof => write!(f, "unclosed capture group name"), - GroupUnclosed => write!(f, "unclosed group"), - GroupUnopened => write!(f, "unopened group"), - NestLimitExceeded(limit) => write!( - f, - "exceed the maximum number of \ - nested parentheses/brackets ({})", - limit - ), - RepetitionCountInvalid => write!( - f, - "invalid repetition count range, \ - the start must be <= the end" - ), - RepetitionCountDecimalEmpty => { - write!(f, "repetition quantifier expects a valid decimal") - } - RepetitionCountUnclosed => { - write!(f, "unclosed counted repetition") - } - RepetitionMissing => { - write!(f, "repetition operator missing expression") - } - SpecialWordBoundaryUnclosed => { - write!( - f, - "special word boundary assertion is either \ - unclosed or contains an invalid character", - ) - } - SpecialWordBoundaryUnrecognized => { - write!( - f, - "unrecognized special word boundary assertion, \ - valid choices are: start, end, start-half \ - or end-half", - ) - } - SpecialWordOrRepetitionUnexpectedEof => { - write!( - f, - "found either the beginning of a special word \ - boundary or a bounded repetition on a \\b with \ - an opening brace, but no closing brace", - ) - } - UnicodeClassInvalid => { - write!(f, "invalid Unicode character class") - } - UnsupportedBackreference => { - write!(f, "backreferences are not supported") - } - UnsupportedLookAround => write!( - f, - "look-around, including look-ahead and look-behind, \ - is not supported" - ), - } - } -} - -/// Span represents the position information of a single AST item. -/// -/// All span positions are absolute byte offsets that can be used on the -/// original regular expression that was parsed. -#[derive(Clone, Copy, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct Span { - /// The start byte offset. - pub start: Position, - /// The end byte offset. - pub end: Position, -} - -impl core::fmt::Debug for Span { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "Span({:?}, {:?})", self.start, self.end) - } -} - -impl Ord for Span { - fn cmp(&self, other: &Span) -> Ordering { - (&self.start, &self.end).cmp(&(&other.start, &other.end)) - } -} - -impl PartialOrd for Span { - fn partial_cmp(&self, other: &Span) -> Option { - Some(self.cmp(other)) - } -} - -/// A single position in a regular expression. -/// -/// A position encodes one half of a span, and include the byte offset, line -/// number and column number. -#[derive(Clone, Copy, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct Position { - /// The absolute offset of this position, starting at `0` from the - /// beginning of the regular expression pattern string. - pub offset: usize, - /// The line number, starting at `1`. - pub line: usize, - /// The approximate column number, starting at `1`. - pub column: usize, -} - -impl core::fmt::Debug for Position { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!( - f, - "Position(o: {:?}, l: {:?}, c: {:?})", - self.offset, self.line, self.column - ) - } -} - -impl Ord for Position { - fn cmp(&self, other: &Position) -> Ordering { - self.offset.cmp(&other.offset) - } -} - -impl PartialOrd for Position { - fn partial_cmp(&self, other: &Position) -> Option { - Some(self.cmp(other)) - } -} - -impl Span { - /// Create a new span with the given positions. - pub fn new(start: Position, end: Position) -> Span { - Span { start, end } - } - - /// Create a new span using the given position as the start and end. - pub fn splat(pos: Position) -> Span { - Span::new(pos, pos) - } - - /// Create a new span by replacing the starting the position with the one - /// given. - pub fn with_start(self, pos: Position) -> Span { - Span { start: pos, ..self } - } - - /// Create a new span by replacing the ending the position with the one - /// given. - pub fn with_end(self, pos: Position) -> Span { - Span { end: pos, ..self } - } - - /// Returns true if and only if this span occurs on a single line. - pub fn is_one_line(&self) -> bool { - self.start.line == self.end.line - } - - /// Returns true if and only if this span is empty. That is, it points to - /// a single position in the concrete syntax of a regular expression. - pub fn is_empty(&self) -> bool { - self.start.offset == self.end.offset - } -} - -impl Position { - /// Create a new position with the given information. - /// - /// `offset` is the absolute offset of the position, starting at `0` from - /// the beginning of the regular expression pattern string. - /// - /// `line` is the line number, starting at `1`. - /// - /// `column` is the approximate column number, starting at `1`. - pub fn new(offset: usize, line: usize, column: usize) -> Position { - Position { offset, line, column } - } -} - -/// An abstract syntax tree for a singular expression along with comments -/// found. -/// -/// Comments are not stored in the tree itself to avoid complexity. Each -/// comment contains a span of precisely where it occurred in the original -/// regular expression. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct WithComments { - /// The actual ast. - pub ast: Ast, - /// All comments found in the original regular expression. - pub comments: Vec, -} - -/// A comment from a regular expression with an associated span. -/// -/// A regular expression can only contain comments when the `x` flag is -/// enabled. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct Comment { - /// The span of this comment, including the beginning `#` and ending `\n`. - pub span: Span, - /// The comment text, starting with the first character following the `#` - /// and ending with the last character preceding the `\n`. - pub comment: String, -} - -/// An abstract syntax tree for a single regular expression. -/// -/// An `Ast`'s `fmt::Display` implementation uses constant stack space and heap -/// space proportional to the size of the `Ast`. -/// -/// This type defines its own destructor that uses constant stack space and -/// heap space proportional to the size of the `Ast`. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum Ast { - /// An empty regex that matches everything. - Empty(Box), - /// A set of flags, e.g., `(?is)`. - Flags(Box), - /// A single character literal, which includes escape sequences. - Literal(Box), - /// The "any character" class. - Dot(Box), - /// A single zero-width assertion. - Assertion(Box), - /// A single Unicode character class, e.g., `\pL` or `\p{Greek}`. - ClassUnicode(Box), - /// A single perl character class, e.g., `\d` or `\W`. - ClassPerl(Box), - /// A single bracketed character class set, which may contain zero or more - /// character ranges and/or zero or more nested classes. e.g., - /// `[a-zA-Z\pL]`. - ClassBracketed(Box), - /// A repetition operator applied to an arbitrary regular expression. - Repetition(Box), - /// A grouped regular expression. - Group(Box), - /// An alternation of regular expressions. - Alternation(Box), - /// A concatenation of regular expressions. - Concat(Box), -} - -impl Ast { - /// Create an "empty" AST item. - pub fn empty(span: Span) -> Ast { - Ast::Empty(Box::new(span)) - } - - /// Create a "flags" AST item. - pub fn flags(e: SetFlags) -> Ast { - Ast::Flags(Box::new(e)) - } - - /// Create a "literal" AST item. - pub fn literal(e: Literal) -> Ast { - Ast::Literal(Box::new(e)) - } - - /// Create a "dot" AST item. - pub fn dot(span: Span) -> Ast { - Ast::Dot(Box::new(span)) - } - - /// Create a "assertion" AST item. - pub fn assertion(e: Assertion) -> Ast { - Ast::Assertion(Box::new(e)) - } - - /// Create a "Unicode class" AST item. - pub fn class_unicode(e: ClassUnicode) -> Ast { - Ast::ClassUnicode(Box::new(e)) - } - - /// Create a "Perl class" AST item. - pub fn class_perl(e: ClassPerl) -> Ast { - Ast::ClassPerl(Box::new(e)) - } - - /// Create a "bracketed class" AST item. - pub fn class_bracketed(e: ClassBracketed) -> Ast { - Ast::ClassBracketed(Box::new(e)) - } - - /// Create a "repetition" AST item. - pub fn repetition(e: Repetition) -> Ast { - Ast::Repetition(Box::new(e)) - } - - /// Create a "group" AST item. - pub fn group(e: Group) -> Ast { - Ast::Group(Box::new(e)) - } - - /// Create a "alternation" AST item. - pub fn alternation(e: Alternation) -> Ast { - Ast::Alternation(Box::new(e)) - } - - /// Create a "concat" AST item. - pub fn concat(e: Concat) -> Ast { - Ast::Concat(Box::new(e)) - } - - /// Return the span of this abstract syntax tree. - pub fn span(&self) -> &Span { - match *self { - Ast::Empty(ref span) => span, - Ast::Flags(ref x) => &x.span, - Ast::Literal(ref x) => &x.span, - Ast::Dot(ref span) => span, - Ast::Assertion(ref x) => &x.span, - Ast::ClassUnicode(ref x) => &x.span, - Ast::ClassPerl(ref x) => &x.span, - Ast::ClassBracketed(ref x) => &x.span, - Ast::Repetition(ref x) => &x.span, - Ast::Group(ref x) => &x.span, - Ast::Alternation(ref x) => &x.span, - Ast::Concat(ref x) => &x.span, - } - } - - /// Return true if and only if this Ast is empty. - pub fn is_empty(&self) -> bool { - match *self { - Ast::Empty(_) => true, - _ => false, - } - } - - /// Returns true if and only if this AST has any (including possibly empty) - /// subexpressions. - fn has_subexprs(&self) -> bool { - match *self { - Ast::Empty(_) - | Ast::Flags(_) - | Ast::Literal(_) - | Ast::Dot(_) - | Ast::Assertion(_) - | Ast::ClassUnicode(_) - | Ast::ClassPerl(_) => false, - Ast::ClassBracketed(_) - | Ast::Repetition(_) - | Ast::Group(_) - | Ast::Alternation(_) - | Ast::Concat(_) => true, - } - } -} - -/// Print a display representation of this Ast. -/// -/// This does not preserve any of the original whitespace formatting that may -/// have originally been present in the concrete syntax from which this Ast -/// was generated. -/// -/// This implementation uses constant stack space and heap space proportional -/// to the size of the `Ast`. -impl core::fmt::Display for Ast { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - use crate::ast::print::Printer; - Printer::new().print(self, f) - } -} - -/// An alternation of regular expressions. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct Alternation { - /// The span of this alternation. - pub span: Span, - /// The alternate regular expressions. - pub asts: Vec, -} - -impl Alternation { - /// Return this alternation as an AST. - /// - /// If this alternation contains zero ASTs, then `Ast::empty` is returned. - /// If this alternation contains exactly 1 AST, then the corresponding AST - /// is returned. Otherwise, `Ast::alternation` is returned. - pub fn into_ast(mut self) -> Ast { - match self.asts.len() { - 0 => Ast::empty(self.span), - 1 => self.asts.pop().unwrap(), - _ => Ast::alternation(self), - } - } -} - -/// A concatenation of regular expressions. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct Concat { - /// The span of this concatenation. - pub span: Span, - /// The concatenation regular expressions. - pub asts: Vec, -} - -impl Concat { - /// Return this concatenation as an AST. - /// - /// If this alternation contains zero ASTs, then `Ast::empty` is returned. - /// If this alternation contains exactly 1 AST, then the corresponding AST - /// is returned. Otherwise, `Ast::concat` is returned. - pub fn into_ast(mut self) -> Ast { - match self.asts.len() { - 0 => Ast::empty(self.span), - 1 => self.asts.pop().unwrap(), - _ => Ast::concat(self), - } - } -} - -/// A single literal expression. -/// -/// A literal corresponds to a single Unicode scalar value. Literals may be -/// represented in their literal form, e.g., `a` or in their escaped form, -/// e.g., `\x61`. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct Literal { - /// The span of this literal. - pub span: Span, - /// The kind of this literal. - pub kind: LiteralKind, - /// The Unicode scalar value corresponding to this literal. - pub c: char, -} - -impl Literal { - /// If this literal was written as a `\x` hex escape, then this returns - /// the corresponding byte value. Otherwise, this returns `None`. - pub fn byte(&self) -> Option { - match self.kind { - LiteralKind::HexFixed(HexLiteralKind::X) => { - u8::try_from(self.c).ok() - } - _ => None, - } - } -} - -/// The kind of a single literal expression. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum LiteralKind { - /// The literal is written verbatim, e.g., `a` or `☃`. - Verbatim, - /// The literal is written as an escape because it is otherwise a special - /// regex meta character, e.g., `\*` or `\[`. - Meta, - /// The literal is written as an escape despite the fact that the escape is - /// unnecessary, e.g., `\%` or `\/`. - Superfluous, - /// The literal is written as an octal escape, e.g., `\141`. - Octal, - /// The literal is written as a hex code with a fixed number of digits - /// depending on the type of the escape, e.g., `\x61` or `\u0061` or - /// `\U00000061`. - HexFixed(HexLiteralKind), - /// The literal is written as a hex code with a bracketed number of - /// digits. The only restriction is that the bracketed hex code must refer - /// to a valid Unicode scalar value. - HexBrace(HexLiteralKind), - /// The literal is written as a specially recognized escape, e.g., `\f` - /// or `\n`. - Special(SpecialLiteralKind), -} - -/// The type of a special literal. -/// -/// A special literal is a special escape sequence recognized by the regex -/// parser, e.g., `\f` or `\n`. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum SpecialLiteralKind { - /// Bell, spelled `\a` (`\x07`). - Bell, - /// Form feed, spelled `\f` (`\x0C`). - FormFeed, - /// Tab, spelled `\t` (`\x09`). - Tab, - /// Line feed, spelled `\n` (`\x0A`). - LineFeed, - /// Carriage return, spelled `\r` (`\x0D`). - CarriageReturn, - /// Vertical tab, spelled `\v` (`\x0B`). - VerticalTab, - /// Space, spelled `\ ` (`\x20`). Note that this can only appear when - /// parsing in verbose mode. - Space, -} - -/// The type of a Unicode hex literal. -/// -/// Note that all variants behave the same when used with brackets. They only -/// differ when used without brackets in the number of hex digits that must -/// follow. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum HexLiteralKind { - /// A `\x` prefix. When used without brackets, this form is limited to - /// two digits. - X, - /// A `\u` prefix. When used without brackets, this form is limited to - /// four digits. - UnicodeShort, - /// A `\U` prefix. When used without brackets, this form is limited to - /// eight digits. - UnicodeLong, -} - -impl HexLiteralKind { - /// The number of digits that must be used with this literal form when - /// used without brackets. When used with brackets, there is no - /// restriction on the number of digits. - pub fn digits(&self) -> u32 { - match *self { - HexLiteralKind::X => 2, - HexLiteralKind::UnicodeShort => 4, - HexLiteralKind::UnicodeLong => 8, - } - } -} - -/// A Perl character class. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct ClassPerl { - /// The span of this class. - pub span: Span, - /// The kind of Perl class. - pub kind: ClassPerlKind, - /// Whether the class is negated or not. e.g., `\d` is not negated but - /// `\D` is. - pub negated: bool, -} - -/// The available Perl character classes. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum ClassPerlKind { - /// Decimal numbers. - Digit, - /// Whitespace. - Space, - /// Word characters. - Word, -} - -/// An ASCII character class. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct ClassAscii { - /// The span of this class. - pub span: Span, - /// The kind of ASCII class. - pub kind: ClassAsciiKind, - /// Whether the class is negated or not. e.g., `[[:alpha:]]` is not negated - /// but `[[:^alpha:]]` is. - pub negated: bool, -} - -/// The available ASCII character classes. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum ClassAsciiKind { - /// `[0-9A-Za-z]` - Alnum, - /// `[A-Za-z]` - Alpha, - /// `[\x00-\x7F]` - Ascii, - /// `[ \t]` - Blank, - /// `[\x00-\x1F\x7F]` - Cntrl, - /// `[0-9]` - Digit, - /// `[!-~]` - Graph, - /// `[a-z]` - Lower, - /// `[ -~]` - Print, - /// ``[!-/:-@\[-`{-~]`` - Punct, - /// `[\t\n\v\f\r ]` - Space, - /// `[A-Z]` - Upper, - /// `[0-9A-Za-z_]` - Word, - /// `[0-9A-Fa-f]` - Xdigit, -} - -impl ClassAsciiKind { - /// Return the corresponding ClassAsciiKind variant for the given name. - /// - /// The name given should correspond to the lowercase version of the - /// variant name. e.g., `cntrl` is the name for `ClassAsciiKind::Cntrl`. - /// - /// If no variant with the corresponding name exists, then `None` is - /// returned. - pub fn from_name(name: &str) -> Option { - use self::ClassAsciiKind::*; - match name { - "alnum" => Some(Alnum), - "alpha" => Some(Alpha), - "ascii" => Some(Ascii), - "blank" => Some(Blank), - "cntrl" => Some(Cntrl), - "digit" => Some(Digit), - "graph" => Some(Graph), - "lower" => Some(Lower), - "print" => Some(Print), - "punct" => Some(Punct), - "space" => Some(Space), - "upper" => Some(Upper), - "word" => Some(Word), - "xdigit" => Some(Xdigit), - _ => None, - } - } -} - -/// A Unicode character class. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct ClassUnicode { - /// The span of this class. - pub span: Span, - /// Whether this class is negated or not. - /// - /// Note: be careful when using this attribute. This specifically refers - /// to whether the class is written as `\p` or `\P`, where the latter - /// is `negated = true`. However, it also possible to write something like - /// `\P{scx!=Katakana}` which is actually equivalent to - /// `\p{scx=Katakana}` and is therefore not actually negated even though - /// `negated = true` here. To test whether this class is truly negated - /// or not, use the `is_negated` method. - pub negated: bool, - /// The kind of Unicode class. - pub kind: ClassUnicodeKind, -} - -impl ClassUnicode { - /// Returns true if this class has been negated. - /// - /// Note that this takes the Unicode op into account, if it's present. - /// e.g., `is_negated` for `\P{scx!=Katakana}` will return `false`. - pub fn is_negated(&self) -> bool { - match self.kind { - ClassUnicodeKind::NamedValue { - op: ClassUnicodeOpKind::NotEqual, - .. - } => !self.negated, - _ => self.negated, - } - } -} - -/// The available forms of Unicode character classes. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ClassUnicodeKind { - /// A one letter abbreviated class, e.g., `\pN`. - OneLetter(char), - /// A binary property, general category or script. The string may be - /// empty. - Named(String), - /// A property name and an associated value. - NamedValue { - /// The type of Unicode op used to associate `name` with `value`. - op: ClassUnicodeOpKind, - /// The property name (which may be empty). - name: String, - /// The property value (which may be empty). - value: String, - }, -} - -#[cfg(feature = "arbitrary")] -impl arbitrary::Arbitrary<'_> for ClassUnicodeKind { - fn arbitrary( - u: &mut arbitrary::Unstructured, - ) -> arbitrary::Result { - #[cfg(any( - feature = "unicode-age", - feature = "unicode-bool", - feature = "unicode-gencat", - feature = "unicode-perl", - feature = "unicode-script", - feature = "unicode-segment", - ))] - { - use alloc::string::ToString; - - use super::unicode_tables::{ - property_names::PROPERTY_NAMES, - property_values::PROPERTY_VALUES, - }; - - match u.choose_index(3)? { - 0 => { - let all = PROPERTY_VALUES - .iter() - .flat_map(|e| e.1.iter()) - .filter(|(name, _)| name.len() == 1) - .count(); - let idx = u.choose_index(all)?; - let value = PROPERTY_VALUES - .iter() - .flat_map(|e| e.1.iter()) - .take(idx + 1) - .last() - .unwrap() - .0 - .chars() - .next() - .unwrap(); - Ok(ClassUnicodeKind::OneLetter(value)) - } - 1 => { - let all = PROPERTY_VALUES - .iter() - .map(|e| e.1.len()) - .sum::() - + PROPERTY_NAMES.len(); - let idx = u.choose_index(all)?; - let name = PROPERTY_VALUES - .iter() - .flat_map(|e| e.1.iter()) - .chain(PROPERTY_NAMES) - .map(|(_, e)| e) - .take(idx + 1) - .last() - .unwrap(); - Ok(ClassUnicodeKind::Named(name.to_string())) - } - 2 => { - let all = PROPERTY_VALUES - .iter() - .map(|e| e.1.len()) - .sum::(); - let idx = u.choose_index(all)?; - let (prop, value) = PROPERTY_VALUES - .iter() - .flat_map(|e| { - e.1.iter().map(|(_, value)| (e.0, value)) - }) - .take(idx + 1) - .last() - .unwrap(); - Ok(ClassUnicodeKind::NamedValue { - op: u.arbitrary()?, - name: prop.to_string(), - value: value.to_string(), - }) - } - _ => unreachable!("index chosen is impossible"), - } - } - #[cfg(not(any( - feature = "unicode-age", - feature = "unicode-bool", - feature = "unicode-gencat", - feature = "unicode-perl", - feature = "unicode-script", - feature = "unicode-segment", - )))] - { - match u.choose_index(3)? { - 0 => Ok(ClassUnicodeKind::OneLetter(u.arbitrary()?)), - 1 => Ok(ClassUnicodeKind::Named(u.arbitrary()?)), - 2 => Ok(ClassUnicodeKind::NamedValue { - op: u.arbitrary()?, - name: u.arbitrary()?, - value: u.arbitrary()?, - }), - _ => unreachable!("index chosen is impossible"), - } - } - } - - fn size_hint(depth: usize) -> (usize, Option) { - #[cfg(any( - feature = "unicode-age", - feature = "unicode-bool", - feature = "unicode-gencat", - feature = "unicode-perl", - feature = "unicode-script", - feature = "unicode-segment", - ))] - { - arbitrary::size_hint::and_all(&[ - usize::size_hint(depth), - usize::size_hint(depth), - arbitrary::size_hint::or( - (0, Some(0)), - ClassUnicodeOpKind::size_hint(depth), - ), - ]) - } - #[cfg(not(any( - feature = "unicode-age", - feature = "unicode-bool", - feature = "unicode-gencat", - feature = "unicode-perl", - feature = "unicode-script", - feature = "unicode-segment", - )))] - { - arbitrary::size_hint::and( - usize::size_hint(depth), - arbitrary::size_hint::or_all(&[ - char::size_hint(depth), - String::size_hint(depth), - arbitrary::size_hint::and_all(&[ - String::size_hint(depth), - String::size_hint(depth), - ClassUnicodeOpKind::size_hint(depth), - ]), - ]), - ) - } - } -} - -/// The type of op used in a Unicode character class. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum ClassUnicodeOpKind { - /// A property set to a specific value, e.g., `\p{scx=Katakana}`. - Equal, - /// A property set to a specific value using a colon, e.g., - /// `\p{scx:Katakana}`. - Colon, - /// A property that isn't a particular value, e.g., `\p{scx!=Katakana}`. - NotEqual, -} - -impl ClassUnicodeOpKind { - /// Whether the op is an equality op or not. - pub fn is_equal(&self) -> bool { - match *self { - ClassUnicodeOpKind::Equal | ClassUnicodeOpKind::Colon => true, - _ => false, - } - } -} - -/// A bracketed character class, e.g., `[a-z0-9]`. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct ClassBracketed { - /// The span of this class. - pub span: Span, - /// Whether this class is negated or not. e.g., `[a]` is not negated but - /// `[^a]` is. - pub negated: bool, - /// The type of this set. A set is either a normal union of things, e.g., - /// `[abc]` or a result of applying set operations, e.g., `[\pL--c]`. - pub kind: ClassSet, -} - -/// A character class set. -/// -/// This type corresponds to the internal structure of a bracketed character -/// class. That is, every bracketed character is one of two types: a union of -/// items (literals, ranges, other bracketed classes) or a tree of binary set -/// operations. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum ClassSet { - /// An item, which can be a single literal, range, nested character class - /// or a union of items. - Item(ClassSetItem), - /// A single binary operation (i.e., &&, -- or ~~). - BinaryOp(ClassSetBinaryOp), -} - -impl ClassSet { - /// Build a set from a union. - pub fn union(ast: ClassSetUnion) -> ClassSet { - ClassSet::Item(ClassSetItem::Union(ast)) - } - - /// Return the span of this character class set. - pub fn span(&self) -> &Span { - match *self { - ClassSet::Item(ref x) => x.span(), - ClassSet::BinaryOp(ref x) => &x.span, - } - } - - /// Return true if and only if this class set is empty. - fn is_empty(&self) -> bool { - match *self { - ClassSet::Item(ClassSetItem::Empty(_)) => true, - _ => false, - } - } -} - -/// A single component of a character class set. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum ClassSetItem { - /// An empty item. - /// - /// Note that a bracketed character class cannot contain a single empty - /// item. Empty items can appear when using one of the binary operators. - /// For example, `[&&]` is the intersection of two empty classes. - Empty(Span), - /// A single literal. - Literal(Literal), - /// A range between two literals. - Range(ClassSetRange), - /// An ASCII character class, e.g., `[:alnum:]` or `[:punct:]`. - Ascii(ClassAscii), - /// A Unicode character class, e.g., `\pL` or `\p{Greek}`. - Unicode(ClassUnicode), - /// A perl character class, e.g., `\d` or `\W`. - Perl(ClassPerl), - /// A bracketed character class set, which may contain zero or more - /// character ranges and/or zero or more nested classes. e.g., - /// `[a-zA-Z\pL]`. - Bracketed(Box), - /// A union of items. - Union(ClassSetUnion), -} - -impl ClassSetItem { - /// Return the span of this character class set item. - pub fn span(&self) -> &Span { - match *self { - ClassSetItem::Empty(ref span) => span, - ClassSetItem::Literal(ref x) => &x.span, - ClassSetItem::Range(ref x) => &x.span, - ClassSetItem::Ascii(ref x) => &x.span, - ClassSetItem::Perl(ref x) => &x.span, - ClassSetItem::Unicode(ref x) => &x.span, - ClassSetItem::Bracketed(ref x) => &x.span, - ClassSetItem::Union(ref x) => &x.span, - } - } -} - -/// A single character class range in a set. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct ClassSetRange { - /// The span of this range. - pub span: Span, - /// The start of this range. - pub start: Literal, - /// The end of this range. - pub end: Literal, -} - -impl ClassSetRange { - /// Returns true if and only if this character class range is valid. - /// - /// The only case where a range is invalid is if its start is greater than - /// its end. - pub fn is_valid(&self) -> bool { - self.start.c <= self.end.c - } -} - -/// A union of items inside a character class set. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct ClassSetUnion { - /// The span of the items in this operation. e.g., the `a-z0-9` in - /// `[^a-z0-9]` - pub span: Span, - /// The sequence of items that make up this union. - pub items: Vec, -} - -impl ClassSetUnion { - /// Push a new item in this union. - /// - /// The ending position of this union's span is updated to the ending - /// position of the span of the item given. If the union is empty, then - /// the starting position of this union is set to the starting position - /// of this item. - /// - /// In other words, if you only use this method to add items to a union - /// and you set the spans on each item correctly, then you should never - /// need to adjust the span of the union directly. - pub fn push(&mut self, item: ClassSetItem) { - if self.items.is_empty() { - self.span.start = item.span().start; - } - self.span.end = item.span().end; - self.items.push(item); - } - - /// Return this union as a character class set item. - /// - /// If this union contains zero items, then an empty union is - /// returned. If this concatenation contains exactly 1 item, then the - /// corresponding item is returned. Otherwise, ClassSetItem::Union is - /// returned. - pub fn into_item(mut self) -> ClassSetItem { - match self.items.len() { - 0 => ClassSetItem::Empty(self.span), - 1 => self.items.pop().unwrap(), - _ => ClassSetItem::Union(self), - } - } -} - -/// A Unicode character class set operation. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct ClassSetBinaryOp { - /// The span of this operation. e.g., the `a-z--[h-p]` in `[a-z--h-p]`. - pub span: Span, - /// The type of this set operation. - pub kind: ClassSetBinaryOpKind, - /// The left hand side of the operation. - pub lhs: Box, - /// The right hand side of the operation. - pub rhs: Box, -} - -/// The type of a Unicode character class set operation. -/// -/// Note that this doesn't explicitly represent union since there is no -/// explicit union operator. Concatenation inside a character class corresponds -/// to the union operation. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum ClassSetBinaryOpKind { - /// The intersection of two sets, e.g., `\pN&&[a-z]`. - Intersection, - /// The difference of two sets, e.g., `\pN--[0-9]`. - Difference, - /// The symmetric difference of two sets. The symmetric difference is the - /// set of elements belonging to one but not both sets. - /// e.g., `[\pL~~[:ascii:]]`. - SymmetricDifference, -} - -/// A single zero-width assertion. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct Assertion { - /// The span of this assertion. - pub span: Span, - /// The assertion kind, e.g., `\b` or `^`. - pub kind: AssertionKind, -} - -/// An assertion kind. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum AssertionKind { - /// `^` - StartLine, - /// `$` - EndLine, - /// `\A` - StartText, - /// `\z` - EndText, - /// `\b` - WordBoundary, - /// `\B` - NotWordBoundary, - /// `\b{start}` - WordBoundaryStart, - /// `\b{end}` - WordBoundaryEnd, - /// `\<` (alias for `\b{start}`) - WordBoundaryStartAngle, - /// `\>` (alias for `\b{end}`) - WordBoundaryEndAngle, - /// `\b{start-half}` - WordBoundaryStartHalf, - /// `\b{end-half}` - WordBoundaryEndHalf, -} - -/// A repetition operation applied to a regular expression. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct Repetition { - /// The span of this operation. - pub span: Span, - /// The actual operation. - pub op: RepetitionOp, - /// Whether this operation was applied greedily or not. - pub greedy: bool, - /// The regular expression under repetition. - pub ast: Box, -} - -/// The repetition operator itself. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct RepetitionOp { - /// The span of this operator. This includes things like `+`, `*?` and - /// `{m,n}`. - pub span: Span, - /// The type of operation. - pub kind: RepetitionKind, -} - -/// The kind of a repetition operator. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum RepetitionKind { - /// `?` - ZeroOrOne, - /// `*` - ZeroOrMore, - /// `+` - OneOrMore, - /// `{m,n}` - Range(RepetitionRange), -} - -/// A range repetition operator. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum RepetitionRange { - /// `{m}` - Exactly(u32), - /// `{m,}` - AtLeast(u32), - /// `{m,n}` - Bounded(u32, u32), -} - -impl RepetitionRange { - /// Returns true if and only if this repetition range is valid. - /// - /// The only case where a repetition range is invalid is if it is bounded - /// and its start is greater than its end. - pub fn is_valid(&self) -> bool { - match *self { - RepetitionRange::Bounded(s, e) if s > e => false, - _ => true, - } - } -} - -/// A grouped regular expression. -/// -/// This includes both capturing and non-capturing groups. This does **not** -/// include flag-only groups like `(?is)`, but does contain any group that -/// contains a sub-expression, e.g., `(a)`, `(?Pa)`, `(?:a)` and -/// `(?is:a)`. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct Group { - /// The span of this group. - pub span: Span, - /// The kind of this group. - pub kind: GroupKind, - /// The regular expression in this group. - pub ast: Box, -} - -impl Group { - /// If this group is non-capturing, then this returns the (possibly empty) - /// set of flags. Otherwise, `None` is returned. - pub fn flags(&self) -> Option<&Flags> { - match self.kind { - GroupKind::NonCapturing(ref flags) => Some(flags), - _ => None, - } - } - - /// Returns true if and only if this group is capturing. - pub fn is_capturing(&self) -> bool { - match self.kind { - GroupKind::CaptureIndex(_) | GroupKind::CaptureName { .. } => true, - GroupKind::NonCapturing(_) => false, - } - } - - /// Returns the capture index of this group, if this is a capturing group. - /// - /// This returns a capture index precisely when `is_capturing` is `true`. - pub fn capture_index(&self) -> Option { - match self.kind { - GroupKind::CaptureIndex(i) => Some(i), - GroupKind::CaptureName { ref name, .. } => Some(name.index), - GroupKind::NonCapturing(_) => None, - } - } -} - -/// The kind of a group. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum GroupKind { - /// `(a)` - CaptureIndex(u32), - /// `(?a)` or `(?Pa)` - CaptureName { - /// True if the `?P<` syntax is used and false if the `?<` syntax is used. - starts_with_p: bool, - /// The capture name. - name: CaptureName, - }, - /// `(?:a)` and `(?i:a)` - NonCapturing(Flags), -} - -/// A capture name. -/// -/// This corresponds to the name itself between the angle brackets in, e.g., -/// `(?Pexpr)`. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct CaptureName { - /// The span of this capture name. - pub span: Span, - /// The capture name. - pub name: String, - /// The capture index. - pub index: u32, -} - -#[cfg(feature = "arbitrary")] -impl arbitrary::Arbitrary<'_> for CaptureName { - fn arbitrary( - u: &mut arbitrary::Unstructured, - ) -> arbitrary::Result { - let len = u.arbitrary_len::()?; - if len == 0 { - return Err(arbitrary::Error::NotEnoughData); - } - let mut name: String = String::new(); - for _ in 0..len { - let ch: char = u.arbitrary()?; - let cp = u32::from(ch); - let ascii_letter_offset = u8::try_from(cp % 26).unwrap(); - let ascii_letter = b'a' + ascii_letter_offset; - name.push(char::from(ascii_letter)); - } - Ok(CaptureName { span: u.arbitrary()?, name, index: u.arbitrary()? }) - } - - fn size_hint(depth: usize) -> (usize, Option) { - arbitrary::size_hint::and_all(&[ - Span::size_hint(depth), - usize::size_hint(depth), - u32::size_hint(depth), - ]) - } -} - -/// A group of flags that is not applied to a particular regular expression. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct SetFlags { - /// The span of these flags, including the grouping parentheses. - pub span: Span, - /// The actual sequence of flags. - pub flags: Flags, -} - -/// A group of flags. -/// -/// This corresponds only to the sequence of flags themselves, e.g., `is-u`. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct Flags { - /// The span of this group of flags. - pub span: Span, - /// A sequence of flag items. Each item is either a flag or a negation - /// operator. - pub items: Vec, -} - -impl Flags { - /// Add the given item to this sequence of flags. - /// - /// If the item was added successfully, then `None` is returned. If the - /// given item is a duplicate, then `Some(i)` is returned, where - /// `items[i].kind == item.kind`. - pub fn add_item(&mut self, item: FlagsItem) -> Option { - for (i, x) in self.items.iter().enumerate() { - if x.kind == item.kind { - return Some(i); - } - } - self.items.push(item); - None - } - - /// Returns the state of the given flag in this set. - /// - /// If the given flag is in the set but is negated, then `Some(false)` is - /// returned. - /// - /// If the given flag is in the set and is not negated, then `Some(true)` - /// is returned. - /// - /// Otherwise, `None` is returned. - pub fn flag_state(&self, flag: Flag) -> Option { - let mut negated = false; - for x in &self.items { - match x.kind { - FlagsItemKind::Negation => { - negated = true; - } - FlagsItemKind::Flag(ref xflag) if xflag == &flag => { - return Some(!negated); - } - _ => {} - } - } - None - } -} - -/// A single item in a group of flags. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub struct FlagsItem { - /// The span of this item. - pub span: Span, - /// The kind of this item. - pub kind: FlagsItemKind, -} - -/// The kind of an item in a group of flags. -#[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum FlagsItemKind { - /// A negation operator applied to all subsequent flags in the enclosing - /// group. - Negation, - /// A single flag in a group. - Flag(Flag), -} - -impl FlagsItemKind { - /// Returns true if and only if this item is a negation operator. - pub fn is_negation(&self) -> bool { - match *self { - FlagsItemKind::Negation => true, - _ => false, - } - } -} - -/// A single flag. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -pub enum Flag { - /// `i` - CaseInsensitive, - /// `m` - MultiLine, - /// `s` - DotMatchesNewLine, - /// `U` - SwapGreed, - /// `u` - Unicode, - /// `R` - CRLF, - /// `x` - IgnoreWhitespace, -} - -/// A custom `Drop` impl is used for `Ast` such that it uses constant stack -/// space but heap space proportional to the depth of the `Ast`. -impl Drop for Ast { - fn drop(&mut self) { - use core::mem; - - match *self { - Ast::Empty(_) - | Ast::Flags(_) - | Ast::Literal(_) - | Ast::Dot(_) - | Ast::Assertion(_) - | Ast::ClassUnicode(_) - | Ast::ClassPerl(_) - // Bracketed classes are recursive, they get their own Drop impl. - | Ast::ClassBracketed(_) => return, - Ast::Repetition(ref x) if !x.ast.has_subexprs() => return, - Ast::Group(ref x) if !x.ast.has_subexprs() => return, - Ast::Alternation(ref x) if x.asts.is_empty() => return, - Ast::Concat(ref x) if x.asts.is_empty() => return, - _ => {} - } - - let empty_span = || Span::splat(Position::new(0, 0, 0)); - let empty_ast = || Ast::empty(empty_span()); - let mut stack = vec![mem::replace(self, empty_ast())]; - while let Some(mut ast) = stack.pop() { - match ast { - Ast::Empty(_) - | Ast::Flags(_) - | Ast::Literal(_) - | Ast::Dot(_) - | Ast::Assertion(_) - | Ast::ClassUnicode(_) - | Ast::ClassPerl(_) - // Bracketed classes are recursive, so they get their own Drop - // impl. - | Ast::ClassBracketed(_) => {} - Ast::Repetition(ref mut x) => { - stack.push(mem::replace(&mut x.ast, empty_ast())); - } - Ast::Group(ref mut x) => { - stack.push(mem::replace(&mut x.ast, empty_ast())); - } - Ast::Alternation(ref mut x) => { - stack.extend(x.asts.drain(..)); - } - Ast::Concat(ref mut x) => { - stack.extend(x.asts.drain(..)); - } - } - } - } -} - -/// A custom `Drop` impl is used for `ClassSet` such that it uses constant -/// stack space but heap space proportional to the depth of the `ClassSet`. -impl Drop for ClassSet { - fn drop(&mut self) { - use core::mem; - - match *self { - ClassSet::Item(ref item) => match *item { - ClassSetItem::Empty(_) - | ClassSetItem::Literal(_) - | ClassSetItem::Range(_) - | ClassSetItem::Ascii(_) - | ClassSetItem::Unicode(_) - | ClassSetItem::Perl(_) => return, - ClassSetItem::Bracketed(ref x) => { - if x.kind.is_empty() { - return; - } - } - ClassSetItem::Union(ref x) => { - if x.items.is_empty() { - return; - } - } - }, - ClassSet::BinaryOp(ref op) => { - if op.lhs.is_empty() && op.rhs.is_empty() { - return; - } - } - } - - let empty_span = || Span::splat(Position::new(0, 0, 0)); - let empty_set = || ClassSet::Item(ClassSetItem::Empty(empty_span())); - let mut stack = vec![mem::replace(self, empty_set())]; - while let Some(mut set) = stack.pop() { - match set { - ClassSet::Item(ref mut item) => match *item { - ClassSetItem::Empty(_) - | ClassSetItem::Literal(_) - | ClassSetItem::Range(_) - | ClassSetItem::Ascii(_) - | ClassSetItem::Unicode(_) - | ClassSetItem::Perl(_) => {} - ClassSetItem::Bracketed(ref mut x) => { - stack.push(mem::replace(&mut x.kind, empty_set())); - } - ClassSetItem::Union(ref mut x) => { - stack.extend(x.items.drain(..).map(ClassSet::Item)); - } - }, - ClassSet::BinaryOp(ref mut op) => { - stack.push(mem::replace(&mut op.lhs, empty_set())); - stack.push(mem::replace(&mut op.rhs, empty_set())); - } - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - // We use a thread with an explicit stack size to test that our destructor - // for Ast can handle arbitrarily sized expressions in constant stack - // space. In case we run on a platform without threads (WASM?), we limit - // this test to Windows/Unix. - #[test] - #[cfg(any(unix, windows))] - fn no_stack_overflow_on_drop() { - use std::thread; - - let run = || { - let span = || Span::splat(Position::new(0, 0, 0)); - let mut ast = Ast::empty(span()); - for i in 0..200 { - ast = Ast::group(Group { - span: span(), - kind: GroupKind::CaptureIndex(i), - ast: Box::new(ast), - }); - } - assert!(!ast.is_empty()); - }; - - // We run our test on a thread with a small stack size so we can - // force the issue more easily. - // - // NOTE(2023-03-21): It turns out that some platforms (like FreeBSD) - // will just barf with very small stack sizes. So we bump this up a bit - // to give more room to breath. When I did this, I confirmed that if - // I remove the custom `Drop` impl for `Ast`, then this test does - // indeed still fail with a stack overflow. (At the time of writing, I - // had to bump it all the way up to 32K before the test would pass even - // without the custom `Drop` impl. So 16K seems like a safe number - // here.) - // - // See: https://github.com/rust-lang/regex/issues/967 - thread::Builder::new() - .stack_size(16 << 10) - .spawn(run) - .unwrap() - .join() - .unwrap(); - } - - // This tests that our `Ast` has a reasonable size. This isn't a hard rule - // and it can be increased if given a good enough reason. But this test - // exists because the size of `Ast` was at one point over 200 bytes on a - // 64-bit target. Wow. - #[test] - fn ast_size() { - let max = 2 * core::mem::size_of::(); - let size = core::mem::size_of::(); - assert!( - size <= max, - "Ast size of {size} bytes is bigger than suggested max {max}", - ); - } -} diff --git a/vendor/regex-syntax/src/ast/parse.rs b/vendor/regex-syntax/src/ast/parse.rs deleted file mode 100644 index bdaab72283857a..00000000000000 --- a/vendor/regex-syntax/src/ast/parse.rs +++ /dev/null @@ -1,6377 +0,0 @@ -/*! -This module provides a regular expression parser. -*/ - -use core::{ - borrow::Borrow, - cell::{Cell, RefCell}, - mem, -}; - -use alloc::{ - boxed::Box, - string::{String, ToString}, - vec, - vec::Vec, -}; - -use crate::{ - ast::{self, Ast, Position, Span}, - either::Either, - is_escapeable_character, is_meta_character, -}; - -type Result = core::result::Result; - -/// A primitive is an expression with no sub-expressions. This includes -/// literals, assertions and non-set character classes. This representation -/// is used as intermediate state in the parser. -/// -/// This does not include ASCII character classes, since they can only appear -/// within a set character class. -#[derive(Clone, Debug, Eq, PartialEq)] -enum Primitive { - Literal(ast::Literal), - Assertion(ast::Assertion), - Dot(Span), - Perl(ast::ClassPerl), - Unicode(ast::ClassUnicode), -} - -impl Primitive { - /// Return the span of this primitive. - fn span(&self) -> &Span { - match *self { - Primitive::Literal(ref x) => &x.span, - Primitive::Assertion(ref x) => &x.span, - Primitive::Dot(ref span) => span, - Primitive::Perl(ref x) => &x.span, - Primitive::Unicode(ref x) => &x.span, - } - } - - /// Convert this primitive into a proper AST. - fn into_ast(self) -> Ast { - match self { - Primitive::Literal(lit) => Ast::literal(lit), - Primitive::Assertion(assert) => Ast::assertion(assert), - Primitive::Dot(span) => Ast::dot(span), - Primitive::Perl(cls) => Ast::class_perl(cls), - Primitive::Unicode(cls) => Ast::class_unicode(cls), - } - } - - /// Convert this primitive into an item in a character class. - /// - /// If this primitive is not a legal item (i.e., an assertion or a dot), - /// then return an error. - fn into_class_set_item>( - self, - p: &ParserI<'_, P>, - ) -> Result { - use self::Primitive::*; - use crate::ast::ClassSetItem; - - match self { - Literal(lit) => Ok(ClassSetItem::Literal(lit)), - Perl(cls) => Ok(ClassSetItem::Perl(cls)), - Unicode(cls) => Ok(ClassSetItem::Unicode(cls)), - x => Err(p.error(*x.span(), ast::ErrorKind::ClassEscapeInvalid)), - } - } - - /// Convert this primitive into a literal in a character class. In - /// particular, literals are the only valid items that can appear in - /// ranges. - /// - /// If this primitive is not a legal item (i.e., a class, assertion or a - /// dot), then return an error. - fn into_class_literal>( - self, - p: &ParserI<'_, P>, - ) -> Result { - use self::Primitive::*; - - match self { - Literal(lit) => Ok(lit), - x => Err(p.error(*x.span(), ast::ErrorKind::ClassRangeLiteral)), - } - } -} - -/// Returns true if the given character is a hexadecimal digit. -fn is_hex(c: char) -> bool { - ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F') -} - -/// Returns true if the given character is a valid in a capture group name. -/// -/// If `first` is true, then `c` is treated as the first character in the -/// group name (which must be alphabetic or underscore). -fn is_capture_char(c: char, first: bool) -> bool { - if first { - c == '_' || c.is_alphabetic() - } else { - c == '_' || c == '.' || c == '[' || c == ']' || c.is_alphanumeric() - } -} - -/// A builder for a regular expression parser. -/// -/// This builder permits modifying configuration options for the parser. -#[derive(Clone, Debug)] -pub struct ParserBuilder { - ignore_whitespace: bool, - nest_limit: u32, - octal: bool, - empty_min_range: bool, -} - -impl Default for ParserBuilder { - fn default() -> ParserBuilder { - ParserBuilder::new() - } -} - -impl ParserBuilder { - /// Create a new parser builder with a default configuration. - pub fn new() -> ParserBuilder { - ParserBuilder { - ignore_whitespace: false, - nest_limit: 250, - octal: false, - empty_min_range: false, - } - } - - /// Build a parser from this configuration with the given pattern. - pub fn build(&self) -> Parser { - Parser { - pos: Cell::new(Position { offset: 0, line: 1, column: 1 }), - capture_index: Cell::new(0), - nest_limit: self.nest_limit, - octal: self.octal, - empty_min_range: self.empty_min_range, - initial_ignore_whitespace: self.ignore_whitespace, - ignore_whitespace: Cell::new(self.ignore_whitespace), - comments: RefCell::new(vec![]), - stack_group: RefCell::new(vec![]), - stack_class: RefCell::new(vec![]), - capture_names: RefCell::new(vec![]), - scratch: RefCell::new(String::new()), - } - } - - /// Set the nesting limit for this parser. - /// - /// The nesting limit controls how deep the abstract syntax tree is allowed - /// to be. If the AST exceeds the given limit (e.g., with too many nested - /// groups), then an error is returned by the parser. - /// - /// The purpose of this limit is to act as a heuristic to prevent stack - /// overflow for consumers that do structural induction on an `Ast` using - /// explicit recursion. While this crate never does this (instead using - /// constant stack space and moving the call stack to the heap), other - /// crates may. - /// - /// This limit is not checked until the entire AST is parsed. Therefore, - /// if callers want to put a limit on the amount of heap space used, then - /// they should impose a limit on the length, in bytes, of the concrete - /// pattern string. In particular, this is viable since this parser - /// implementation will limit itself to heap space proportional to the - /// length of the pattern string. - /// - /// Note that a nest limit of `0` will return a nest limit error for most - /// patterns but not all. For example, a nest limit of `0` permits `a` but - /// not `ab`, since `ab` requires a concatenation, which results in a nest - /// depth of `1`. In general, a nest limit is not something that manifests - /// in an obvious way in the concrete syntax, therefore, it should not be - /// used in a granular way. - pub fn nest_limit(&mut self, limit: u32) -> &mut ParserBuilder { - self.nest_limit = limit; - self - } - - /// Whether to support octal syntax or not. - /// - /// Octal syntax is a little-known way of uttering Unicode codepoints in - /// a regular expression. For example, `a`, `\x61`, `\u0061` and - /// `\141` are all equivalent regular expressions, where the last example - /// shows octal syntax. - /// - /// While supporting octal syntax isn't in and of itself a problem, it does - /// make good error messages harder. That is, in PCRE based regex engines, - /// syntax like `\0` invokes a backreference, which is explicitly - /// unsupported in Rust's regex engine. However, many users expect it to - /// be supported. Therefore, when octal support is disabled, the error - /// message will explicitly mention that backreferences aren't supported. - /// - /// Octal syntax is disabled by default. - pub fn octal(&mut self, yes: bool) -> &mut ParserBuilder { - self.octal = yes; - self - } - - /// Enable verbose mode in the regular expression. - /// - /// When enabled, verbose mode permits insignificant whitespace in many - /// places in the regular expression, as well as comments. Comments are - /// started using `#` and continue until the end of the line. - /// - /// By default, this is disabled. It may be selectively enabled in the - /// regular expression by using the `x` flag regardless of this setting. - pub fn ignore_whitespace(&mut self, yes: bool) -> &mut ParserBuilder { - self.ignore_whitespace = yes; - self - } - - /// Allow using `{,n}` as an equivalent to `{0,n}`. - /// - /// When enabled, the parser accepts `{,n}` as valid syntax for `{0,n}`. - /// Most regular expression engines don't support the `{,n}` syntax, but - /// some others do it, namely Python's `re` library. - /// - /// This is disabled by default. - pub fn empty_min_range(&mut self, yes: bool) -> &mut ParserBuilder { - self.empty_min_range = yes; - self - } -} - -/// A regular expression parser. -/// -/// This parses a string representation of a regular expression into an -/// abstract syntax tree. The size of the tree is proportional to the length -/// of the regular expression pattern. -/// -/// A `Parser` can be configured in more detail via a [`ParserBuilder`]. -#[derive(Clone, Debug)] -pub struct Parser { - /// The current position of the parser. - pos: Cell, - /// The current capture index. - capture_index: Cell, - /// The maximum number of open parens/brackets allowed. If the parser - /// exceeds this number, then an error is returned. - nest_limit: u32, - /// Whether to support octal syntax or not. When `false`, the parser will - /// return an error helpfully pointing out that backreferences are not - /// supported. - octal: bool, - /// The initial setting for `ignore_whitespace` as provided by - /// `ParserBuilder`. It is used when resetting the parser's state. - initial_ignore_whitespace: bool, - /// Whether the parser supports `{,n}` repetitions as an equivalent to - /// `{0,n}.` - empty_min_range: bool, - /// Whether whitespace should be ignored. When enabled, comments are - /// also permitted. - ignore_whitespace: Cell, - /// A list of comments, in order of appearance. - comments: RefCell>, - /// A stack of grouped sub-expressions, including alternations. - stack_group: RefCell>, - /// A stack of nested character classes. This is only non-empty when - /// parsing a class. - stack_class: RefCell>, - /// A sorted sequence of capture names. This is used to detect duplicate - /// capture names and report an error if one is detected. - capture_names: RefCell>, - /// A scratch buffer used in various places. Mostly this is used to - /// accumulate relevant characters from parts of a pattern. - scratch: RefCell, -} - -/// ParserI is the internal parser implementation. -/// -/// We use this separate type so that we can carry the provided pattern string -/// along with us. In particular, a `Parser` internal state is not tied to any -/// one pattern, but `ParserI` is. -/// -/// This type also lets us use `ParserI<&Parser>` in production code while -/// retaining the convenience of `ParserI` for tests, which sometimes -/// work against the internal interface of the parser. -#[derive(Clone, Debug)] -struct ParserI<'s, P> { - /// The parser state/configuration. - parser: P, - /// The full regular expression provided by the user. - pattern: &'s str, -} - -/// GroupState represents a single stack frame while parsing nested groups -/// and alternations. Each frame records the state up to an opening parenthesis -/// or a alternating bracket `|`. -#[derive(Clone, Debug)] -enum GroupState { - /// This state is pushed whenever an opening group is found. - Group { - /// The concatenation immediately preceding the opening group. - concat: ast::Concat, - /// The group that has been opened. Its sub-AST is always empty. - group: ast::Group, - /// Whether this group has the `x` flag enabled or not. - ignore_whitespace: bool, - }, - /// This state is pushed whenever a new alternation branch is found. If - /// an alternation branch is found and this state is at the top of the - /// stack, then this state should be modified to include the new - /// alternation. - Alternation(ast::Alternation), -} - -/// ClassState represents a single stack frame while parsing character classes. -/// Each frame records the state up to an intersection, difference, symmetric -/// difference or nested class. -/// -/// Note that a parser's character class stack is only non-empty when parsing -/// a character class. In all other cases, it is empty. -#[derive(Clone, Debug)] -enum ClassState { - /// This state is pushed whenever an opening bracket is found. - Open { - /// The union of class items immediately preceding this class. - union: ast::ClassSetUnion, - /// The class that has been opened. Typically this just corresponds - /// to the `[`, but it can also include `[^` since `^` indicates - /// negation of the class. - set: ast::ClassBracketed, - }, - /// This state is pushed when a operator is seen. When popped, the stored - /// set becomes the left hand side of the operator. - Op { - /// The type of the operation, i.e., &&, -- or ~~. - kind: ast::ClassSetBinaryOpKind, - /// The left-hand side of the operator. - lhs: ast::ClassSet, - }, -} - -impl Parser { - /// Create a new parser with a default configuration. - /// - /// The parser can be run with either the `parse` or `parse_with_comments` - /// methods. The parse methods return an abstract syntax tree. - /// - /// To set configuration options on the parser, use [`ParserBuilder`]. - pub fn new() -> Parser { - ParserBuilder::new().build() - } - - /// Parse the regular expression into an abstract syntax tree. - pub fn parse(&mut self, pattern: &str) -> Result { - ParserI::new(self, pattern).parse() - } - - /// Parse the regular expression and return an abstract syntax tree with - /// all of the comments found in the pattern. - pub fn parse_with_comments( - &mut self, - pattern: &str, - ) -> Result { - ParserI::new(self, pattern).parse_with_comments() - } - - /// Reset the internal state of a parser. - /// - /// This is called at the beginning of every parse. This prevents the - /// parser from running with inconsistent state (say, if a previous - /// invocation returned an error and the parser is reused). - fn reset(&self) { - // These settings should be in line with the construction - // in `ParserBuilder::build`. - self.pos.set(Position { offset: 0, line: 1, column: 1 }); - self.ignore_whitespace.set(self.initial_ignore_whitespace); - self.comments.borrow_mut().clear(); - self.stack_group.borrow_mut().clear(); - self.stack_class.borrow_mut().clear(); - } -} - -impl<'s, P: Borrow> ParserI<'s, P> { - /// Build an internal parser from a parser configuration and a pattern. - fn new(parser: P, pattern: &'s str) -> ParserI<'s, P> { - ParserI { parser, pattern } - } - - /// Return a reference to the parser state. - fn parser(&self) -> &Parser { - self.parser.borrow() - } - - /// Return a reference to the pattern being parsed. - fn pattern(&self) -> &str { - self.pattern - } - - /// Create a new error with the given span and error type. - fn error(&self, span: Span, kind: ast::ErrorKind) -> ast::Error { - ast::Error { kind, pattern: self.pattern().to_string(), span } - } - - /// Return the current offset of the parser. - /// - /// The offset starts at `0` from the beginning of the regular expression - /// pattern string. - fn offset(&self) -> usize { - self.parser().pos.get().offset - } - - /// Return the current line number of the parser. - /// - /// The line number starts at `1`. - fn line(&self) -> usize { - self.parser().pos.get().line - } - - /// Return the current column of the parser. - /// - /// The column number starts at `1` and is reset whenever a `\n` is seen. - fn column(&self) -> usize { - self.parser().pos.get().column - } - - /// Return the next capturing index. Each subsequent call increments the - /// internal index. - /// - /// The span given should correspond to the location of the opening - /// parenthesis. - /// - /// If the capture limit is exceeded, then an error is returned. - fn next_capture_index(&self, span: Span) -> Result { - let current = self.parser().capture_index.get(); - let i = current.checked_add(1).ok_or_else(|| { - self.error(span, ast::ErrorKind::CaptureLimitExceeded) - })?; - self.parser().capture_index.set(i); - Ok(i) - } - - /// Adds the given capture name to this parser. If this capture name has - /// already been used, then an error is returned. - fn add_capture_name(&self, cap: &ast::CaptureName) -> Result<()> { - let mut names = self.parser().capture_names.borrow_mut(); - match names - .binary_search_by_key(&cap.name.as_str(), |c| c.name.as_str()) - { - Err(i) => { - names.insert(i, cap.clone()); - Ok(()) - } - Ok(i) => Err(self.error( - cap.span, - ast::ErrorKind::GroupNameDuplicate { original: names[i].span }, - )), - } - } - - /// Return whether the parser should ignore whitespace or not. - fn ignore_whitespace(&self) -> bool { - self.parser().ignore_whitespace.get() - } - - /// Return the character at the current position of the parser. - /// - /// This panics if the current position does not point to a valid char. - fn char(&self) -> char { - self.char_at(self.offset()) - } - - /// Return the character at the given position. - /// - /// This panics if the given position does not point to a valid char. - fn char_at(&self, i: usize) -> char { - self.pattern()[i..] - .chars() - .next() - .unwrap_or_else(|| panic!("expected char at offset {i}")) - } - - /// Bump the parser to the next Unicode scalar value. - /// - /// If the end of the input has been reached, then `false` is returned. - fn bump(&self) -> bool { - if self.is_eof() { - return false; - } - let Position { mut offset, mut line, mut column } = self.pos(); - if self.char() == '\n' { - line = line.checked_add(1).unwrap(); - column = 1; - } else { - column = column.checked_add(1).unwrap(); - } - offset += self.char().len_utf8(); - self.parser().pos.set(Position { offset, line, column }); - self.pattern()[self.offset()..].chars().next().is_some() - } - - /// If the substring starting at the current position of the parser has - /// the given prefix, then bump the parser to the character immediately - /// following the prefix and return true. Otherwise, don't bump the parser - /// and return false. - fn bump_if(&self, prefix: &str) -> bool { - if self.pattern()[self.offset()..].starts_with(prefix) { - for _ in 0..prefix.chars().count() { - self.bump(); - } - true - } else { - false - } - } - - /// Returns true if and only if the parser is positioned at a look-around - /// prefix. The conditions under which this returns true must always - /// correspond to a regular expression that would otherwise be consider - /// invalid. - /// - /// This should only be called immediately after parsing the opening of - /// a group or a set of flags. - fn is_lookaround_prefix(&self) -> bool { - self.bump_if("?=") - || self.bump_if("?!") - || self.bump_if("?<=") - || self.bump_if("? bool { - if !self.bump() { - return false; - } - self.bump_space(); - !self.is_eof() - } - - /// If the `x` flag is enabled (i.e., whitespace insensitivity with - /// comments), then this will advance the parser through all whitespace - /// and comments to the next non-whitespace non-comment byte. - /// - /// If the `x` flag is disabled, then this is a no-op. - /// - /// This should be used selectively throughout the parser where - /// arbitrary whitespace is permitted when the `x` flag is enabled. For - /// example, `{ 5 , 6}` is equivalent to `{5,6}`. - fn bump_space(&self) { - if !self.ignore_whitespace() { - return; - } - while !self.is_eof() { - if self.char().is_whitespace() { - self.bump(); - } else if self.char() == '#' { - let start = self.pos(); - let mut comment_text = String::new(); - self.bump(); - while !self.is_eof() { - let c = self.char(); - self.bump(); - if c == '\n' { - break; - } - comment_text.push(c); - } - let comment = ast::Comment { - span: Span::new(start, self.pos()), - comment: comment_text, - }; - self.parser().comments.borrow_mut().push(comment); - } else { - break; - } - } - } - - /// Peek at the next character in the input without advancing the parser. - /// - /// If the input has been exhausted, then this returns `None`. - fn peek(&self) -> Option { - if self.is_eof() { - return None; - } - self.pattern()[self.offset() + self.char().len_utf8()..].chars().next() - } - - /// Like peek, but will ignore spaces when the parser is in whitespace - /// insensitive mode. - fn peek_space(&self) -> Option { - if !self.ignore_whitespace() { - return self.peek(); - } - if self.is_eof() { - return None; - } - let mut start = self.offset() + self.char().len_utf8(); - let mut in_comment = false; - for (i, c) in self.pattern()[start..].char_indices() { - if c.is_whitespace() { - continue; - } else if !in_comment && c == '#' { - in_comment = true; - } else if in_comment && c == '\n' { - in_comment = false; - } else { - start += i; - break; - } - } - self.pattern()[start..].chars().next() - } - - /// Returns true if the next call to `bump` would return false. - fn is_eof(&self) -> bool { - self.offset() == self.pattern().len() - } - - /// Return the current position of the parser, which includes the offset, - /// line and column. - fn pos(&self) -> Position { - self.parser().pos.get() - } - - /// Create a span at the current position of the parser. Both the start - /// and end of the span are set. - fn span(&self) -> Span { - Span::splat(self.pos()) - } - - /// Create a span that covers the current character. - fn span_char(&self) -> Span { - let mut next = Position { - offset: self.offset().checked_add(self.char().len_utf8()).unwrap(), - line: self.line(), - column: self.column().checked_add(1).unwrap(), - }; - if self.char() == '\n' { - next.line += 1; - next.column = 1; - } - Span::new(self.pos(), next) - } - - /// Parse and push a single alternation on to the parser's internal stack. - /// If the top of the stack already has an alternation, then add to that - /// instead of pushing a new one. - /// - /// The concatenation given corresponds to a single alternation branch. - /// The concatenation returned starts the next branch and is empty. - /// - /// This assumes the parser is currently positioned at `|` and will advance - /// the parser to the character following `|`. - #[inline(never)] - fn push_alternate(&self, mut concat: ast::Concat) -> Result { - assert_eq!(self.char(), '|'); - concat.span.end = self.pos(); - self.push_or_add_alternation(concat); - self.bump(); - Ok(ast::Concat { span: self.span(), asts: vec![] }) - } - - /// Pushes or adds the given branch of an alternation to the parser's - /// internal stack of state. - fn push_or_add_alternation(&self, concat: ast::Concat) { - use self::GroupState::*; - - let mut stack = self.parser().stack_group.borrow_mut(); - if let Some(&mut Alternation(ref mut alts)) = stack.last_mut() { - alts.asts.push(concat.into_ast()); - return; - } - stack.push(Alternation(ast::Alternation { - span: Span::new(concat.span.start, self.pos()), - asts: vec![concat.into_ast()], - })); - } - - /// Parse and push a group AST (and its parent concatenation) on to the - /// parser's internal stack. Return a fresh concatenation corresponding - /// to the group's sub-AST. - /// - /// If a set of flags was found (with no group), then the concatenation - /// is returned with that set of flags added. - /// - /// This assumes that the parser is currently positioned on the opening - /// parenthesis. It advances the parser to the character at the start - /// of the sub-expression (or adjoining expression). - /// - /// If there was a problem parsing the start of the group, then an error - /// is returned. - #[inline(never)] - fn push_group(&self, mut concat: ast::Concat) -> Result { - assert_eq!(self.char(), '('); - match self.parse_group()? { - Either::Left(set) => { - let ignore = set.flags.flag_state(ast::Flag::IgnoreWhitespace); - if let Some(v) = ignore { - self.parser().ignore_whitespace.set(v); - } - - concat.asts.push(Ast::flags(set)); - Ok(concat) - } - Either::Right(group) => { - let old_ignore_whitespace = self.ignore_whitespace(); - let new_ignore_whitespace = group - .flags() - .and_then(|f| f.flag_state(ast::Flag::IgnoreWhitespace)) - .unwrap_or(old_ignore_whitespace); - self.parser().stack_group.borrow_mut().push( - GroupState::Group { - concat, - group, - ignore_whitespace: old_ignore_whitespace, - }, - ); - self.parser().ignore_whitespace.set(new_ignore_whitespace); - Ok(ast::Concat { span: self.span(), asts: vec![] }) - } - } - } - - /// Pop a group AST from the parser's internal stack and set the group's - /// AST to the given concatenation. Return the concatenation containing - /// the group. - /// - /// This assumes that the parser is currently positioned on the closing - /// parenthesis and advances the parser to the character following the `)`. - /// - /// If no such group could be popped, then an unopened group error is - /// returned. - #[inline(never)] - fn pop_group(&self, mut group_concat: ast::Concat) -> Result { - use self::GroupState::*; - - assert_eq!(self.char(), ')'); - let mut stack = self.parser().stack_group.borrow_mut(); - let (mut prior_concat, mut group, ignore_whitespace, alt) = match stack - .pop() - { - Some(Group { concat, group, ignore_whitespace }) => { - (concat, group, ignore_whitespace, None) - } - Some(Alternation(alt)) => match stack.pop() { - Some(Group { concat, group, ignore_whitespace }) => { - (concat, group, ignore_whitespace, Some(alt)) - } - None | Some(Alternation(_)) => { - return Err(self.error( - self.span_char(), - ast::ErrorKind::GroupUnopened, - )); - } - }, - None => { - return Err(self - .error(self.span_char(), ast::ErrorKind::GroupUnopened)); - } - }; - self.parser().ignore_whitespace.set(ignore_whitespace); - group_concat.span.end = self.pos(); - self.bump(); - group.span.end = self.pos(); - match alt { - Some(mut alt) => { - alt.span.end = group_concat.span.end; - alt.asts.push(group_concat.into_ast()); - group.ast = Box::new(alt.into_ast()); - } - None => { - group.ast = Box::new(group_concat.into_ast()); - } - } - prior_concat.asts.push(Ast::group(group)); - Ok(prior_concat) - } - - /// Pop the last state from the parser's internal stack, if it exists, and - /// add the given concatenation to it. There either must be no state or a - /// single alternation item on the stack. Any other scenario produces an - /// error. - /// - /// This assumes that the parser has advanced to the end. - #[inline(never)] - fn pop_group_end(&self, mut concat: ast::Concat) -> Result { - concat.span.end = self.pos(); - let mut stack = self.parser().stack_group.borrow_mut(); - let ast = match stack.pop() { - None => Ok(concat.into_ast()), - Some(GroupState::Alternation(mut alt)) => { - alt.span.end = self.pos(); - alt.asts.push(concat.into_ast()); - Ok(Ast::alternation(alt)) - } - Some(GroupState::Group { group, .. }) => { - return Err( - self.error(group.span, ast::ErrorKind::GroupUnclosed) - ); - } - }; - // If we try to pop again, there should be nothing. - match stack.pop() { - None => ast, - Some(GroupState::Alternation(_)) => { - // This unreachable is unfortunate. This case can't happen - // because the only way we can be here is if there were two - // `GroupState::Alternation`s adjacent in the parser's stack, - // which we guarantee to never happen because we never push a - // `GroupState::Alternation` if one is already at the top of - // the stack. - unreachable!() - } - Some(GroupState::Group { group, .. }) => { - Err(self.error(group.span, ast::ErrorKind::GroupUnclosed)) - } - } - } - - /// Parse the opening of a character class and push the current class - /// parsing context onto the parser's stack. This assumes that the parser - /// is positioned at an opening `[`. The given union should correspond to - /// the union of set items built up before seeing the `[`. - /// - /// If there was a problem parsing the opening of the class, then an error - /// is returned. Otherwise, a new union of set items for the class is - /// returned (which may be populated with either a `]` or a `-`). - #[inline(never)] - fn push_class_open( - &self, - parent_union: ast::ClassSetUnion, - ) -> Result { - assert_eq!(self.char(), '['); - - let (nested_set, nested_union) = self.parse_set_class_open()?; - self.parser() - .stack_class - .borrow_mut() - .push(ClassState::Open { union: parent_union, set: nested_set }); - Ok(nested_union) - } - - /// Parse the end of a character class set and pop the character class - /// parser stack. The union given corresponds to the last union built - /// before seeing the closing `]`. The union returned corresponds to the - /// parent character class set with the nested class added to it. - /// - /// This assumes that the parser is positioned at a `]` and will advance - /// the parser to the byte immediately following the `]`. - /// - /// If the stack is empty after popping, then this returns the final - /// "top-level" character class AST (where a "top-level" character class - /// is one that is not nested inside any other character class). - /// - /// If there is no corresponding opening bracket on the parser's stack, - /// then an error is returned. - #[inline(never)] - fn pop_class( - &self, - nested_union: ast::ClassSetUnion, - ) -> Result> { - assert_eq!(self.char(), ']'); - - let item = ast::ClassSet::Item(nested_union.into_item()); - let prevset = self.pop_class_op(item); - let mut stack = self.parser().stack_class.borrow_mut(); - match stack.pop() { - None => { - // We can never observe an empty stack: - // - // 1) We are guaranteed to start with a non-empty stack since - // the character class parser is only initiated when it sees - // a `[`. - // 2) If we ever observe an empty stack while popping after - // seeing a `]`, then we signal the character class parser - // to terminate. - panic!("unexpected empty character class stack") - } - Some(ClassState::Op { .. }) => { - // This panic is unfortunate, but this case is impossible - // since we already popped the Op state if one exists above. - // Namely, every push to the class parser stack is guarded by - // whether an existing Op is already on the top of the stack. - // If it is, the existing Op is modified. That is, the stack - // can never have consecutive Op states. - panic!("unexpected ClassState::Op") - } - Some(ClassState::Open { mut union, mut set }) => { - self.bump(); - set.span.end = self.pos(); - set.kind = prevset; - if stack.is_empty() { - Ok(Either::Right(set)) - } else { - union.push(ast::ClassSetItem::Bracketed(Box::new(set))); - Ok(Either::Left(union)) - } - } - } - } - - /// Return an "unclosed class" error whose span points to the most - /// recently opened class. - /// - /// This should only be called while parsing a character class. - #[inline(never)] - fn unclosed_class_error(&self) -> ast::Error { - for state in self.parser().stack_class.borrow().iter().rev() { - if let ClassState::Open { ref set, .. } = *state { - return self.error(set.span, ast::ErrorKind::ClassUnclosed); - } - } - // We are guaranteed to have a non-empty stack with at least - // one open bracket, so we should never get here. - panic!("no open character class found") - } - - /// Push the current set of class items on to the class parser's stack as - /// the left hand side of the given operator. - /// - /// A fresh set union is returned, which should be used to build the right - /// hand side of this operator. - #[inline(never)] - fn push_class_op( - &self, - next_kind: ast::ClassSetBinaryOpKind, - next_union: ast::ClassSetUnion, - ) -> ast::ClassSetUnion { - let item = ast::ClassSet::Item(next_union.into_item()); - let new_lhs = self.pop_class_op(item); - self.parser() - .stack_class - .borrow_mut() - .push(ClassState::Op { kind: next_kind, lhs: new_lhs }); - ast::ClassSetUnion { span: self.span(), items: vec![] } - } - - /// Pop a character class set from the character class parser stack. If the - /// top of the stack is just an item (not an operation), then return the - /// given set unchanged. If the top of the stack is an operation, then the - /// given set will be used as the rhs of the operation on the top of the - /// stack. In that case, the binary operation is returned as a set. - #[inline(never)] - fn pop_class_op(&self, rhs: ast::ClassSet) -> ast::ClassSet { - let mut stack = self.parser().stack_class.borrow_mut(); - let (kind, lhs) = match stack.pop() { - Some(ClassState::Op { kind, lhs }) => (kind, lhs), - Some(state @ ClassState::Open { .. }) => { - stack.push(state); - return rhs; - } - None => unreachable!(), - }; - let span = Span::new(lhs.span().start, rhs.span().end); - ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp { - span, - kind, - lhs: Box::new(lhs), - rhs: Box::new(rhs), - }) - } -} - -impl<'s, P: Borrow> ParserI<'s, P> { - /// Parse the regular expression into an abstract syntax tree. - fn parse(&self) -> Result { - self.parse_with_comments().map(|astc| astc.ast) - } - - /// Parse the regular expression and return an abstract syntax tree with - /// all of the comments found in the pattern. - fn parse_with_comments(&self) -> Result { - assert_eq!(self.offset(), 0, "parser can only be used once"); - self.parser().reset(); - let mut concat = ast::Concat { span: self.span(), asts: vec![] }; - loop { - self.bump_space(); - if self.is_eof() { - break; - } - match self.char() { - '(' => concat = self.push_group(concat)?, - ')' => concat = self.pop_group(concat)?, - '|' => concat = self.push_alternate(concat)?, - '[' => { - let class = self.parse_set_class()?; - concat.asts.push(Ast::class_bracketed(class)); - } - '?' => { - concat = self.parse_uncounted_repetition( - concat, - ast::RepetitionKind::ZeroOrOne, - )?; - } - '*' => { - concat = self.parse_uncounted_repetition( - concat, - ast::RepetitionKind::ZeroOrMore, - )?; - } - '+' => { - concat = self.parse_uncounted_repetition( - concat, - ast::RepetitionKind::OneOrMore, - )?; - } - '{' => { - concat = self.parse_counted_repetition(concat)?; - } - _ => concat.asts.push(self.parse_primitive()?.into_ast()), - } - } - let ast = self.pop_group_end(concat)?; - NestLimiter::new(self).check(&ast)?; - Ok(ast::WithComments { - ast, - comments: mem::replace( - &mut *self.parser().comments.borrow_mut(), - vec![], - ), - }) - } - - /// Parses an uncounted repetition operation. An uncounted repetition - /// operator includes ?, * and +, but does not include the {m,n} syntax. - /// The given `kind` should correspond to the operator observed by the - /// caller. - /// - /// This assumes that the parser is currently positioned at the repetition - /// operator and advances the parser to the first character after the - /// operator. (Note that the operator may include a single additional `?`, - /// which makes the operator ungreedy.) - /// - /// The caller should include the concatenation that is being built. The - /// concatenation returned includes the repetition operator applied to the - /// last expression in the given concatenation. - #[inline(never)] - fn parse_uncounted_repetition( - &self, - mut concat: ast::Concat, - kind: ast::RepetitionKind, - ) -> Result { - assert!( - self.char() == '?' || self.char() == '*' || self.char() == '+' - ); - let op_start = self.pos(); - let ast = match concat.asts.pop() { - Some(ast) => ast, - None => { - return Err( - self.error(self.span(), ast::ErrorKind::RepetitionMissing) - ) - } - }; - match ast { - Ast::Empty(_) | Ast::Flags(_) => { - return Err( - self.error(self.span(), ast::ErrorKind::RepetitionMissing) - ) - } - _ => {} - } - let mut greedy = true; - if self.bump() && self.char() == '?' { - greedy = false; - self.bump(); - } - concat.asts.push(Ast::repetition(ast::Repetition { - span: ast.span().with_end(self.pos()), - op: ast::RepetitionOp { - span: Span::new(op_start, self.pos()), - kind, - }, - greedy, - ast: Box::new(ast), - })); - Ok(concat) - } - - /// Parses a counted repetition operation. A counted repetition operator - /// corresponds to the {m,n} syntax, and does not include the ?, * or + - /// operators. - /// - /// This assumes that the parser is currently positioned at the opening `{` - /// and advances the parser to the first character after the operator. - /// (Note that the operator may include a single additional `?`, which - /// makes the operator ungreedy.) - /// - /// The caller should include the concatenation that is being built. The - /// concatenation returned includes the repetition operator applied to the - /// last expression in the given concatenation. - #[inline(never)] - fn parse_counted_repetition( - &self, - mut concat: ast::Concat, - ) -> Result { - assert!(self.char() == '{'); - let start = self.pos(); - let ast = match concat.asts.pop() { - Some(ast) => ast, - None => { - return Err( - self.error(self.span(), ast::ErrorKind::RepetitionMissing) - ) - } - }; - match ast { - Ast::Empty(_) | Ast::Flags(_) => { - return Err( - self.error(self.span(), ast::ErrorKind::RepetitionMissing) - ) - } - _ => {} - } - if !self.bump_and_bump_space() { - return Err(self.error( - Span::new(start, self.pos()), - ast::ErrorKind::RepetitionCountUnclosed, - )); - } - let count_start = specialize_err( - self.parse_decimal(), - ast::ErrorKind::DecimalEmpty, - ast::ErrorKind::RepetitionCountDecimalEmpty, - ); - if self.is_eof() { - return Err(self.error( - Span::new(start, self.pos()), - ast::ErrorKind::RepetitionCountUnclosed, - )); - } - let range = if self.char() == ',' { - if !self.bump_and_bump_space() { - return Err(self.error( - Span::new(start, self.pos()), - ast::ErrorKind::RepetitionCountUnclosed, - )); - } - if self.char() != '}' { - let count_start = match count_start { - Ok(c) => c, - Err(err) - if err.kind - == ast::ErrorKind::RepetitionCountDecimalEmpty => - { - if self.parser().empty_min_range { - 0 - } else { - return Err(err); - } - } - err => err?, - }; - let count_end = specialize_err( - self.parse_decimal(), - ast::ErrorKind::DecimalEmpty, - ast::ErrorKind::RepetitionCountDecimalEmpty, - )?; - ast::RepetitionRange::Bounded(count_start, count_end) - } else { - ast::RepetitionRange::AtLeast(count_start?) - } - } else { - ast::RepetitionRange::Exactly(count_start?) - }; - - if self.is_eof() || self.char() != '}' { - return Err(self.error( - Span::new(start, self.pos()), - ast::ErrorKind::RepetitionCountUnclosed, - )); - } - - let mut greedy = true; - if self.bump_and_bump_space() && self.char() == '?' { - greedy = false; - self.bump(); - } - - let op_span = Span::new(start, self.pos()); - if !range.is_valid() { - return Err( - self.error(op_span, ast::ErrorKind::RepetitionCountInvalid) - ); - } - concat.asts.push(Ast::repetition(ast::Repetition { - span: ast.span().with_end(self.pos()), - op: ast::RepetitionOp { - span: op_span, - kind: ast::RepetitionKind::Range(range), - }, - greedy, - ast: Box::new(ast), - })); - Ok(concat) - } - - /// Parse a group (which contains a sub-expression) or a set of flags. - /// - /// If a group was found, then it is returned with an empty AST. If a set - /// of flags is found, then that set is returned. - /// - /// The parser should be positioned at the opening parenthesis. - /// - /// This advances the parser to the character before the start of the - /// sub-expression (in the case of a group) or to the closing parenthesis - /// immediately following the set of flags. - /// - /// # Errors - /// - /// If flags are given and incorrectly specified, then a corresponding - /// error is returned. - /// - /// If a capture name is given and it is incorrectly specified, then a - /// corresponding error is returned. - #[inline(never)] - fn parse_group(&self) -> Result> { - assert_eq!(self.char(), '('); - let open_span = self.span_char(); - self.bump(); - self.bump_space(); - if self.is_lookaround_prefix() { - return Err(self.error( - Span::new(open_span.start, self.span().end), - ast::ErrorKind::UnsupportedLookAround, - )); - } - let inner_span = self.span(); - let mut starts_with_p = true; - if self.bump_if("?P<") || { - starts_with_p = false; - self.bump_if("?<") - } { - let capture_index = self.next_capture_index(open_span)?; - let name = self.parse_capture_name(capture_index)?; - Ok(Either::Right(ast::Group { - span: open_span, - kind: ast::GroupKind::CaptureName { starts_with_p, name }, - ast: Box::new(Ast::empty(self.span())), - })) - } else if self.bump_if("?") { - if self.is_eof() { - return Err( - self.error(open_span, ast::ErrorKind::GroupUnclosed) - ); - } - let flags = self.parse_flags()?; - let char_end = self.char(); - self.bump(); - if char_end == ')' { - // We don't allow empty flags, e.g., `(?)`. We instead - // interpret it as a repetition operator missing its argument. - if flags.items.is_empty() { - return Err(self.error( - inner_span, - ast::ErrorKind::RepetitionMissing, - )); - } - Ok(Either::Left(ast::SetFlags { - span: Span { end: self.pos(), ..open_span }, - flags, - })) - } else { - assert_eq!(char_end, ':'); - Ok(Either::Right(ast::Group { - span: open_span, - kind: ast::GroupKind::NonCapturing(flags), - ast: Box::new(Ast::empty(self.span())), - })) - } - } else { - let capture_index = self.next_capture_index(open_span)?; - Ok(Either::Right(ast::Group { - span: open_span, - kind: ast::GroupKind::CaptureIndex(capture_index), - ast: Box::new(Ast::empty(self.span())), - })) - } - } - - /// Parses a capture group name. Assumes that the parser is positioned at - /// the first character in the name following the opening `<` (and may - /// possibly be EOF). This advances the parser to the first character - /// following the closing `>`. - /// - /// The caller must provide the capture index of the group for this name. - #[inline(never)] - fn parse_capture_name( - &self, - capture_index: u32, - ) -> Result { - if self.is_eof() { - return Err(self - .error(self.span(), ast::ErrorKind::GroupNameUnexpectedEof)); - } - let start = self.pos(); - loop { - if self.char() == '>' { - break; - } - if !is_capture_char(self.char(), self.pos() == start) { - return Err(self.error( - self.span_char(), - ast::ErrorKind::GroupNameInvalid, - )); - } - if !self.bump() { - break; - } - } - let end = self.pos(); - if self.is_eof() { - return Err(self - .error(self.span(), ast::ErrorKind::GroupNameUnexpectedEof)); - } - assert_eq!(self.char(), '>'); - self.bump(); - let name = &self.pattern()[start.offset..end.offset]; - if name.is_empty() { - return Err(self.error( - Span::new(start, start), - ast::ErrorKind::GroupNameEmpty, - )); - } - let capname = ast::CaptureName { - span: Span::new(start, end), - name: name.to_string(), - index: capture_index, - }; - self.add_capture_name(&capname)?; - Ok(capname) - } - - /// Parse a sequence of flags starting at the current character. - /// - /// This advances the parser to the character immediately following the - /// flags, which is guaranteed to be either `:` or `)`. - /// - /// # Errors - /// - /// If any flags are duplicated, then an error is returned. - /// - /// If the negation operator is used more than once, then an error is - /// returned. - /// - /// If no flags could be found or if the negation operation is not followed - /// by any flags, then an error is returned. - #[inline(never)] - fn parse_flags(&self) -> Result { - let mut flags = ast::Flags { span: self.span(), items: vec![] }; - let mut last_was_negation = None; - while self.char() != ':' && self.char() != ')' { - if self.char() == '-' { - last_was_negation = Some(self.span_char()); - let item = ast::FlagsItem { - span: self.span_char(), - kind: ast::FlagsItemKind::Negation, - }; - if let Some(i) = flags.add_item(item) { - return Err(self.error( - self.span_char(), - ast::ErrorKind::FlagRepeatedNegation { - original: flags.items[i].span, - }, - )); - } - } else { - last_was_negation = None; - let item = ast::FlagsItem { - span: self.span_char(), - kind: ast::FlagsItemKind::Flag(self.parse_flag()?), - }; - if let Some(i) = flags.add_item(item) { - return Err(self.error( - self.span_char(), - ast::ErrorKind::FlagDuplicate { - original: flags.items[i].span, - }, - )); - } - } - if !self.bump() { - return Err( - self.error(self.span(), ast::ErrorKind::FlagUnexpectedEof) - ); - } - } - if let Some(span) = last_was_negation { - return Err(self.error(span, ast::ErrorKind::FlagDanglingNegation)); - } - flags.span.end = self.pos(); - Ok(flags) - } - - /// Parse the current character as a flag. Do not advance the parser. - /// - /// # Errors - /// - /// If the flag is not recognized, then an error is returned. - #[inline(never)] - fn parse_flag(&self) -> Result { - match self.char() { - 'i' => Ok(ast::Flag::CaseInsensitive), - 'm' => Ok(ast::Flag::MultiLine), - 's' => Ok(ast::Flag::DotMatchesNewLine), - 'U' => Ok(ast::Flag::SwapGreed), - 'u' => Ok(ast::Flag::Unicode), - 'R' => Ok(ast::Flag::CRLF), - 'x' => Ok(ast::Flag::IgnoreWhitespace), - _ => { - Err(self - .error(self.span_char(), ast::ErrorKind::FlagUnrecognized)) - } - } - } - - /// Parse a primitive AST. e.g., A literal, non-set character class or - /// assertion. - /// - /// This assumes that the parser expects a primitive at the current - /// location. i.e., All other non-primitive cases have been handled. - /// For example, if the parser's position is at `|`, then `|` will be - /// treated as a literal (e.g., inside a character class). - /// - /// This advances the parser to the first character immediately following - /// the primitive. - fn parse_primitive(&self) -> Result { - match self.char() { - '\\' => self.parse_escape(), - '.' => { - let ast = Primitive::Dot(self.span_char()); - self.bump(); - Ok(ast) - } - '^' => { - let ast = Primitive::Assertion(ast::Assertion { - span: self.span_char(), - kind: ast::AssertionKind::StartLine, - }); - self.bump(); - Ok(ast) - } - '$' => { - let ast = Primitive::Assertion(ast::Assertion { - span: self.span_char(), - kind: ast::AssertionKind::EndLine, - }); - self.bump(); - Ok(ast) - } - c => { - let ast = Primitive::Literal(ast::Literal { - span: self.span_char(), - kind: ast::LiteralKind::Verbatim, - c, - }); - self.bump(); - Ok(ast) - } - } - } - - /// Parse an escape sequence as a primitive AST. - /// - /// This assumes the parser is positioned at the start of the escape - /// sequence, i.e., `\`. It advances the parser to the first position - /// immediately following the escape sequence. - #[inline(never)] - fn parse_escape(&self) -> Result { - assert_eq!(self.char(), '\\'); - let start = self.pos(); - if !self.bump() { - return Err(self.error( - Span::new(start, self.pos()), - ast::ErrorKind::EscapeUnexpectedEof, - )); - } - let c = self.char(); - // Put some of the more complicated routines into helpers. - match c { - '0'..='7' => { - if !self.parser().octal { - return Err(self.error( - Span::new(start, self.span_char().end), - ast::ErrorKind::UnsupportedBackreference, - )); - } - let mut lit = self.parse_octal(); - lit.span.start = start; - return Ok(Primitive::Literal(lit)); - } - '8'..='9' if !self.parser().octal => { - return Err(self.error( - Span::new(start, self.span_char().end), - ast::ErrorKind::UnsupportedBackreference, - )); - } - 'x' | 'u' | 'U' => { - let mut lit = self.parse_hex()?; - lit.span.start = start; - return Ok(Primitive::Literal(lit)); - } - 'p' | 'P' => { - let mut cls = self.parse_unicode_class()?; - cls.span.start = start; - return Ok(Primitive::Unicode(cls)); - } - 'd' | 's' | 'w' | 'D' | 'S' | 'W' => { - let mut cls = self.parse_perl_class(); - cls.span.start = start; - return Ok(Primitive::Perl(cls)); - } - _ => {} - } - - // Handle all of the one letter sequences inline. - self.bump(); - let span = Span::new(start, self.pos()); - if is_meta_character(c) { - return Ok(Primitive::Literal(ast::Literal { - span, - kind: ast::LiteralKind::Meta, - c, - })); - } - if is_escapeable_character(c) { - return Ok(Primitive::Literal(ast::Literal { - span, - kind: ast::LiteralKind::Superfluous, - c, - })); - } - let special = |kind, c| { - Ok(Primitive::Literal(ast::Literal { - span, - kind: ast::LiteralKind::Special(kind), - c, - })) - }; - match c { - 'a' => special(ast::SpecialLiteralKind::Bell, '\x07'), - 'f' => special(ast::SpecialLiteralKind::FormFeed, '\x0C'), - 't' => special(ast::SpecialLiteralKind::Tab, '\t'), - 'n' => special(ast::SpecialLiteralKind::LineFeed, '\n'), - 'r' => special(ast::SpecialLiteralKind::CarriageReturn, '\r'), - 'v' => special(ast::SpecialLiteralKind::VerticalTab, '\x0B'), - 'A' => Ok(Primitive::Assertion(ast::Assertion { - span, - kind: ast::AssertionKind::StartText, - })), - 'z' => Ok(Primitive::Assertion(ast::Assertion { - span, - kind: ast::AssertionKind::EndText, - })), - 'b' => { - let mut wb = ast::Assertion { - span, - kind: ast::AssertionKind::WordBoundary, - }; - // After a \b, we "try" to parse things like \b{start} for - // special word boundary assertions. - if !self.is_eof() && self.char() == '{' { - if let Some(kind) = - self.maybe_parse_special_word_boundary(start)? - { - wb.kind = kind; - wb.span.end = self.pos(); - } - } - Ok(Primitive::Assertion(wb)) - } - 'B' => Ok(Primitive::Assertion(ast::Assertion { - span, - kind: ast::AssertionKind::NotWordBoundary, - })), - '<' => Ok(Primitive::Assertion(ast::Assertion { - span, - kind: ast::AssertionKind::WordBoundaryStartAngle, - })), - '>' => Ok(Primitive::Assertion(ast::Assertion { - span, - kind: ast::AssertionKind::WordBoundaryEndAngle, - })), - _ => Err(self.error(span, ast::ErrorKind::EscapeUnrecognized)), - } - } - - /// Attempt to parse a specialty word boundary. That is, `\b{start}`, - /// `\b{end}`, `\b{start-half}` or `\b{end-half}`. - /// - /// This is similar to `maybe_parse_ascii_class` in that, in most cases, - /// if it fails it will just return `None` with no error. This is done - /// because `\b{5}` is a valid expression and we want to let that be parsed - /// by the existing counted repetition parsing code. (I thought about just - /// invoking the counted repetition code from here, but it seemed a little - /// ham-fisted.) - /// - /// Unlike `maybe_parse_ascii_class` though, this can return an error. - /// Namely, if we definitely know it isn't a counted repetition, then we - /// return an error specific to the specialty word boundaries. - /// - /// This assumes the parser is positioned at a `{` immediately following - /// a `\b`. When `None` is returned, the parser is returned to the position - /// at which it started: pointing at a `{`. - /// - /// The position given should correspond to the start of the `\b`. - fn maybe_parse_special_word_boundary( - &self, - wb_start: Position, - ) -> Result> { - assert_eq!(self.char(), '{'); - - let is_valid_char = |c| match c { - 'A'..='Z' | 'a'..='z' | '-' => true, - _ => false, - }; - let start = self.pos(); - if !self.bump_and_bump_space() { - return Err(self.error( - Span::new(wb_start, self.pos()), - ast::ErrorKind::SpecialWordOrRepetitionUnexpectedEof, - )); - } - let start_contents = self.pos(); - // This is one of the critical bits: if the first non-whitespace - // character isn't in [-A-Za-z] (i.e., this can't be a special word - // boundary), then we bail and let the counted repetition parser deal - // with this. - if !is_valid_char(self.char()) { - self.parser().pos.set(start); - return Ok(None); - } - - // Now collect up our chars until we see a '}'. - let mut scratch = self.parser().scratch.borrow_mut(); - scratch.clear(); - while !self.is_eof() && is_valid_char(self.char()) { - scratch.push(self.char()); - self.bump_and_bump_space(); - } - if self.is_eof() || self.char() != '}' { - return Err(self.error( - Span::new(start, self.pos()), - ast::ErrorKind::SpecialWordBoundaryUnclosed, - )); - } - let end = self.pos(); - self.bump(); - let kind = match scratch.as_str() { - "start" => ast::AssertionKind::WordBoundaryStart, - "end" => ast::AssertionKind::WordBoundaryEnd, - "start-half" => ast::AssertionKind::WordBoundaryStartHalf, - "end-half" => ast::AssertionKind::WordBoundaryEndHalf, - _ => { - return Err(self.error( - Span::new(start_contents, end), - ast::ErrorKind::SpecialWordBoundaryUnrecognized, - )) - } - }; - Ok(Some(kind)) - } - - /// Parse an octal representation of a Unicode codepoint up to 3 digits - /// long. This expects the parser to be positioned at the first octal - /// digit and advances the parser to the first character immediately - /// following the octal number. This also assumes that parsing octal - /// escapes is enabled. - /// - /// Assuming the preconditions are met, this routine can never fail. - #[inline(never)] - fn parse_octal(&self) -> ast::Literal { - assert!(self.parser().octal); - assert!('0' <= self.char() && self.char() <= '7'); - let start = self.pos(); - // Parse up to two more digits. - while self.bump() - && '0' <= self.char() - && self.char() <= '7' - && self.pos().offset - start.offset <= 2 - {} - let end = self.pos(); - let octal = &self.pattern()[start.offset..end.offset]; - // Parsing the octal should never fail since the above guarantees a - // valid number. - let codepoint = - u32::from_str_radix(octal, 8).expect("valid octal number"); - // The max value for 3 digit octal is 0777 = 511 and [0, 511] has no - // invalid Unicode scalar values. - let c = char::from_u32(codepoint).expect("Unicode scalar value"); - ast::Literal { - span: Span::new(start, end), - kind: ast::LiteralKind::Octal, - c, - } - } - - /// Parse a hex representation of a Unicode codepoint. This handles both - /// hex notations, i.e., `\xFF` and `\x{FFFF}`. This expects the parser to - /// be positioned at the `x`, `u` or `U` prefix. The parser is advanced to - /// the first character immediately following the hexadecimal literal. - #[inline(never)] - fn parse_hex(&self) -> Result { - assert!( - self.char() == 'x' || self.char() == 'u' || self.char() == 'U' - ); - - let hex_kind = match self.char() { - 'x' => ast::HexLiteralKind::X, - 'u' => ast::HexLiteralKind::UnicodeShort, - _ => ast::HexLiteralKind::UnicodeLong, - }; - if !self.bump_and_bump_space() { - return Err( - self.error(self.span(), ast::ErrorKind::EscapeUnexpectedEof) - ); - } - if self.char() == '{' { - self.parse_hex_brace(hex_kind) - } else { - self.parse_hex_digits(hex_kind) - } - } - - /// Parse an N-digit hex representation of a Unicode codepoint. This - /// expects the parser to be positioned at the first digit and will advance - /// the parser to the first character immediately following the escape - /// sequence. - /// - /// The number of digits given must be 2 (for `\xNN`), 4 (for `\uNNNN`) - /// or 8 (for `\UNNNNNNNN`). - #[inline(never)] - fn parse_hex_digits( - &self, - kind: ast::HexLiteralKind, - ) -> Result { - let mut scratch = self.parser().scratch.borrow_mut(); - scratch.clear(); - - let start = self.pos(); - for i in 0..kind.digits() { - if i > 0 && !self.bump_and_bump_space() { - return Err(self - .error(self.span(), ast::ErrorKind::EscapeUnexpectedEof)); - } - if !is_hex(self.char()) { - return Err(self.error( - self.span_char(), - ast::ErrorKind::EscapeHexInvalidDigit, - )); - } - scratch.push(self.char()); - } - // The final bump just moves the parser past the literal, which may - // be EOF. - self.bump_and_bump_space(); - let end = self.pos(); - let hex = scratch.as_str(); - match u32::from_str_radix(hex, 16).ok().and_then(char::from_u32) { - None => Err(self.error( - Span::new(start, end), - ast::ErrorKind::EscapeHexInvalid, - )), - Some(c) => Ok(ast::Literal { - span: Span::new(start, end), - kind: ast::LiteralKind::HexFixed(kind), - c, - }), - } - } - - /// Parse a hex representation of any Unicode scalar value. This expects - /// the parser to be positioned at the opening brace `{` and will advance - /// the parser to the first character following the closing brace `}`. - #[inline(never)] - fn parse_hex_brace( - &self, - kind: ast::HexLiteralKind, - ) -> Result { - let mut scratch = self.parser().scratch.borrow_mut(); - scratch.clear(); - - let brace_pos = self.pos(); - let start = self.span_char().end; - while self.bump_and_bump_space() && self.char() != '}' { - if !is_hex(self.char()) { - return Err(self.error( - self.span_char(), - ast::ErrorKind::EscapeHexInvalidDigit, - )); - } - scratch.push(self.char()); - } - if self.is_eof() { - return Err(self.error( - Span::new(brace_pos, self.pos()), - ast::ErrorKind::EscapeUnexpectedEof, - )); - } - let end = self.pos(); - let hex = scratch.as_str(); - assert_eq!(self.char(), '}'); - self.bump_and_bump_space(); - - if hex.is_empty() { - return Err(self.error( - Span::new(brace_pos, self.pos()), - ast::ErrorKind::EscapeHexEmpty, - )); - } - match u32::from_str_radix(hex, 16).ok().and_then(char::from_u32) { - None => Err(self.error( - Span::new(start, end), - ast::ErrorKind::EscapeHexInvalid, - )), - Some(c) => Ok(ast::Literal { - span: Span::new(start, self.pos()), - kind: ast::LiteralKind::HexBrace(kind), - c, - }), - } - } - - /// Parse a decimal number into a u32 while trimming leading and trailing - /// whitespace. - /// - /// This expects the parser to be positioned at the first position where - /// a decimal digit could occur. This will advance the parser to the byte - /// immediately following the last contiguous decimal digit. - /// - /// If no decimal digit could be found or if there was a problem parsing - /// the complete set of digits into a u32, then an error is returned. - fn parse_decimal(&self) -> Result { - let mut scratch = self.parser().scratch.borrow_mut(); - scratch.clear(); - - while !self.is_eof() && self.char().is_whitespace() { - self.bump(); - } - let start = self.pos(); - while !self.is_eof() && '0' <= self.char() && self.char() <= '9' { - scratch.push(self.char()); - self.bump_and_bump_space(); - } - let span = Span::new(start, self.pos()); - while !self.is_eof() && self.char().is_whitespace() { - self.bump_and_bump_space(); - } - let digits = scratch.as_str(); - if digits.is_empty() { - return Err(self.error(span, ast::ErrorKind::DecimalEmpty)); - } - match u32::from_str_radix(digits, 10).ok() { - Some(n) => Ok(n), - None => Err(self.error(span, ast::ErrorKind::DecimalInvalid)), - } - } - - /// Parse a standard character class consisting primarily of characters or - /// character ranges, but can also contain nested character classes of - /// any type (sans `.`). - /// - /// This assumes the parser is positioned at the opening `[`. If parsing - /// is successful, then the parser is advanced to the position immediately - /// following the closing `]`. - #[inline(never)] - fn parse_set_class(&self) -> Result { - assert_eq!(self.char(), '['); - - let mut union = - ast::ClassSetUnion { span: self.span(), items: vec![] }; - loop { - self.bump_space(); - if self.is_eof() { - return Err(self.unclosed_class_error()); - } - match self.char() { - '[' => { - // If we've already parsed the opening bracket, then - // attempt to treat this as the beginning of an ASCII - // class. If ASCII class parsing fails, then the parser - // backs up to `[`. - if !self.parser().stack_class.borrow().is_empty() { - if let Some(cls) = self.maybe_parse_ascii_class() { - union.push(ast::ClassSetItem::Ascii(cls)); - continue; - } - } - union = self.push_class_open(union)?; - } - ']' => match self.pop_class(union)? { - Either::Left(nested_union) => { - union = nested_union; - } - Either::Right(class) => return Ok(class), - }, - '&' if self.peek() == Some('&') => { - assert!(self.bump_if("&&")); - union = self.push_class_op( - ast::ClassSetBinaryOpKind::Intersection, - union, - ); - } - '-' if self.peek() == Some('-') => { - assert!(self.bump_if("--")); - union = self.push_class_op( - ast::ClassSetBinaryOpKind::Difference, - union, - ); - } - '~' if self.peek() == Some('~') => { - assert!(self.bump_if("~~")); - union = self.push_class_op( - ast::ClassSetBinaryOpKind::SymmetricDifference, - union, - ); - } - _ => { - union.push(self.parse_set_class_range()?); - } - } - } - } - - /// Parse a single primitive item in a character class set. The item to - /// be parsed can either be one of a simple literal character, a range - /// between two simple literal characters or a "primitive" character - /// class like \w or \p{Greek}. - /// - /// If an invalid escape is found, or if a character class is found where - /// a simple literal is expected (e.g., in a range), then an error is - /// returned. - #[inline(never)] - fn parse_set_class_range(&self) -> Result { - let prim1 = self.parse_set_class_item()?; - self.bump_space(); - if self.is_eof() { - return Err(self.unclosed_class_error()); - } - // If the next char isn't a `-`, then we don't have a range. - // There are two exceptions. If the char after a `-` is a `]`, then - // `-` is interpreted as a literal `-`. Alternatively, if the char - // after a `-` is a `-`, then `--` corresponds to a "difference" - // operation. - if self.char() != '-' - || self.peek_space() == Some(']') - || self.peek_space() == Some('-') - { - return prim1.into_class_set_item(self); - } - // OK, now we're parsing a range, so bump past the `-` and parse the - // second half of the range. - if !self.bump_and_bump_space() { - return Err(self.unclosed_class_error()); - } - let prim2 = self.parse_set_class_item()?; - let range = ast::ClassSetRange { - span: Span::new(prim1.span().start, prim2.span().end), - start: prim1.into_class_literal(self)?, - end: prim2.into_class_literal(self)?, - }; - if !range.is_valid() { - return Err( - self.error(range.span, ast::ErrorKind::ClassRangeInvalid) - ); - } - Ok(ast::ClassSetItem::Range(range)) - } - - /// Parse a single item in a character class as a primitive, where the - /// primitive either consists of a verbatim literal or a single escape - /// sequence. - /// - /// This assumes the parser is positioned at the beginning of a primitive, - /// and advances the parser to the first position after the primitive if - /// successful. - /// - /// Note that it is the caller's responsibility to report an error if an - /// illegal primitive was parsed. - #[inline(never)] - fn parse_set_class_item(&self) -> Result { - if self.char() == '\\' { - self.parse_escape() - } else { - let x = Primitive::Literal(ast::Literal { - span: self.span_char(), - kind: ast::LiteralKind::Verbatim, - c: self.char(), - }); - self.bump(); - Ok(x) - } - } - - /// Parses the opening of a character class set. This includes the opening - /// bracket along with `^` if present to indicate negation. This also - /// starts parsing the opening set of unioned items if applicable, since - /// there are special rules applied to certain characters in the opening - /// of a character class. For example, `[^]]` is the class of all - /// characters not equal to `]`. (`]` would need to be escaped in any other - /// position.) Similarly for `-`. - /// - /// In all cases, the op inside the returned `ast::ClassBracketed` is an - /// empty union. This empty union should be replaced with the actual item - /// when it is popped from the parser's stack. - /// - /// This assumes the parser is positioned at the opening `[` and advances - /// the parser to the first non-special byte of the character class. - /// - /// An error is returned if EOF is found. - #[inline(never)] - fn parse_set_class_open( - &self, - ) -> Result<(ast::ClassBracketed, ast::ClassSetUnion)> { - assert_eq!(self.char(), '['); - let start = self.pos(); - if !self.bump_and_bump_space() { - return Err(self.error( - Span::new(start, self.pos()), - ast::ErrorKind::ClassUnclosed, - )); - } - - let negated = if self.char() != '^' { - false - } else { - if !self.bump_and_bump_space() { - return Err(self.error( - Span::new(start, self.pos()), - ast::ErrorKind::ClassUnclosed, - )); - } - true - }; - // Accept any number of `-` as literal `-`. - let mut union = - ast::ClassSetUnion { span: self.span(), items: vec![] }; - while self.char() == '-' { - union.push(ast::ClassSetItem::Literal(ast::Literal { - span: self.span_char(), - kind: ast::LiteralKind::Verbatim, - c: '-', - })); - if !self.bump_and_bump_space() { - return Err(self.error( - Span::new(start, start), - ast::ErrorKind::ClassUnclosed, - )); - } - } - // If `]` is the *first* char in a set, then interpret it as a literal - // `]`. That is, an empty class is impossible to write. - if union.items.is_empty() && self.char() == ']' { - union.push(ast::ClassSetItem::Literal(ast::Literal { - span: self.span_char(), - kind: ast::LiteralKind::Verbatim, - c: ']', - })); - if !self.bump_and_bump_space() { - return Err(self.error( - Span::new(start, self.pos()), - ast::ErrorKind::ClassUnclosed, - )); - } - } - let set = ast::ClassBracketed { - span: Span::new(start, self.pos()), - negated, - kind: ast::ClassSet::union(ast::ClassSetUnion { - span: Span::new(union.span.start, union.span.start), - items: vec![], - }), - }; - Ok((set, union)) - } - - /// Attempt to parse an ASCII character class, e.g., `[:alnum:]`. - /// - /// This assumes the parser is positioned at the opening `[`. - /// - /// If no valid ASCII character class could be found, then this does not - /// advance the parser and `None` is returned. Otherwise, the parser is - /// advanced to the first byte following the closing `]` and the - /// corresponding ASCII class is returned. - #[inline(never)] - fn maybe_parse_ascii_class(&self) -> Option { - // ASCII character classes are interesting from a parsing perspective - // because parsing cannot fail with any interesting error. For example, - // in order to use an ASCII character class, it must be enclosed in - // double brackets, e.g., `[[:alnum:]]`. Alternatively, you might think - // of it as "ASCII character classes have the syntax `[:NAME:]` which - // can only appear within character brackets." This means that things - // like `[[:lower:]A]` are legal constructs. - // - // However, if one types an incorrect ASCII character class, e.g., - // `[[:loower:]]`, then we treat that as a normal nested character - // class containing the characters `:elorw`. One might argue that we - // should return an error instead since the repeated colons give away - // the intent to write an ASCII class. But what if the user typed - // `[[:lower]]` instead? How can we tell that was intended to be an - // ASCII class and not just a normal nested class? - // - // Reasonable people can probably disagree over this, but for better - // or worse, we implement semantics that never fails at the expense - // of better failure modes. - assert_eq!(self.char(), '['); - // If parsing fails, then we back up the parser to this starting point. - let start = self.pos(); - let mut negated = false; - if !self.bump() || self.char() != ':' { - self.parser().pos.set(start); - return None; - } - if !self.bump() { - self.parser().pos.set(start); - return None; - } - if self.char() == '^' { - negated = true; - if !self.bump() { - self.parser().pos.set(start); - return None; - } - } - let name_start = self.offset(); - while self.char() != ':' && self.bump() {} - if self.is_eof() { - self.parser().pos.set(start); - return None; - } - let name = &self.pattern()[name_start..self.offset()]; - if !self.bump_if(":]") { - self.parser().pos.set(start); - return None; - } - let kind = match ast::ClassAsciiKind::from_name(name) { - Some(kind) => kind, - None => { - self.parser().pos.set(start); - return None; - } - }; - Some(ast::ClassAscii { - span: Span::new(start, self.pos()), - kind, - negated, - }) - } - - /// Parse a Unicode class in either the single character notation, `\pN` - /// or the multi-character bracketed notation, `\p{Greek}`. This assumes - /// the parser is positioned at the `p` (or `P` for negation) and will - /// advance the parser to the character immediately following the class. - /// - /// Note that this does not check whether the class name is valid or not. - #[inline(never)] - fn parse_unicode_class(&self) -> Result { - assert!(self.char() == 'p' || self.char() == 'P'); - - let mut scratch = self.parser().scratch.borrow_mut(); - scratch.clear(); - - let negated = self.char() == 'P'; - if !self.bump_and_bump_space() { - return Err( - self.error(self.span(), ast::ErrorKind::EscapeUnexpectedEof) - ); - } - let (start, kind) = if self.char() == '{' { - let start = self.span_char().end; - while self.bump_and_bump_space() && self.char() != '}' { - scratch.push(self.char()); - } - if self.is_eof() { - return Err(self - .error(self.span(), ast::ErrorKind::EscapeUnexpectedEof)); - } - assert_eq!(self.char(), '}'); - self.bump(); - - let name = scratch.as_str(); - if let Some(i) = name.find("!=") { - ( - start, - ast::ClassUnicodeKind::NamedValue { - op: ast::ClassUnicodeOpKind::NotEqual, - name: name[..i].to_string(), - value: name[i + 2..].to_string(), - }, - ) - } else if let Some(i) = name.find(':') { - ( - start, - ast::ClassUnicodeKind::NamedValue { - op: ast::ClassUnicodeOpKind::Colon, - name: name[..i].to_string(), - value: name[i + 1..].to_string(), - }, - ) - } else if let Some(i) = name.find('=') { - ( - start, - ast::ClassUnicodeKind::NamedValue { - op: ast::ClassUnicodeOpKind::Equal, - name: name[..i].to_string(), - value: name[i + 1..].to_string(), - }, - ) - } else { - (start, ast::ClassUnicodeKind::Named(name.to_string())) - } - } else { - let start = self.pos(); - let c = self.char(); - if c == '\\' { - return Err(self.error( - self.span_char(), - ast::ErrorKind::UnicodeClassInvalid, - )); - } - self.bump_and_bump_space(); - let kind = ast::ClassUnicodeKind::OneLetter(c); - (start, kind) - }; - Ok(ast::ClassUnicode { - span: Span::new(start, self.pos()), - negated, - kind, - }) - } - - /// Parse a Perl character class, e.g., `\d` or `\W`. This assumes the - /// parser is currently at a valid character class name and will be - /// advanced to the character immediately following the class. - #[inline(never)] - fn parse_perl_class(&self) -> ast::ClassPerl { - let c = self.char(); - let span = self.span_char(); - self.bump(); - let (negated, kind) = match c { - 'd' => (false, ast::ClassPerlKind::Digit), - 'D' => (true, ast::ClassPerlKind::Digit), - 's' => (false, ast::ClassPerlKind::Space), - 'S' => (true, ast::ClassPerlKind::Space), - 'w' => (false, ast::ClassPerlKind::Word), - 'W' => (true, ast::ClassPerlKind::Word), - c => panic!("expected valid Perl class but got '{c}'"), - }; - ast::ClassPerl { span, kind, negated } - } -} - -/// A type that traverses a fully parsed Ast and checks whether its depth -/// exceeds the specified nesting limit. If it does, then an error is returned. -#[derive(Debug)] -struct NestLimiter<'p, 's, P> { - /// The parser that is checking the nest limit. - p: &'p ParserI<'s, P>, - /// The current depth while walking an Ast. - depth: u32, -} - -impl<'p, 's, P: Borrow> NestLimiter<'p, 's, P> { - fn new(p: &'p ParserI<'s, P>) -> NestLimiter<'p, 's, P> { - NestLimiter { p, depth: 0 } - } - - #[inline(never)] - fn check(self, ast: &Ast) -> Result<()> { - ast::visit(ast, self) - } - - fn increment_depth(&mut self, span: &Span) -> Result<()> { - let new = self.depth.checked_add(1).ok_or_else(|| { - self.p.error( - span.clone(), - ast::ErrorKind::NestLimitExceeded(u32::MAX), - ) - })?; - let limit = self.p.parser().nest_limit; - if new > limit { - return Err(self.p.error( - span.clone(), - ast::ErrorKind::NestLimitExceeded(limit), - )); - } - self.depth = new; - Ok(()) - } - - fn decrement_depth(&mut self) { - // Assuming the correctness of the visitor, this should never drop - // below 0. - self.depth = self.depth.checked_sub(1).unwrap(); - } -} - -impl<'p, 's, P: Borrow> ast::Visitor for NestLimiter<'p, 's, P> { - type Output = (); - type Err = ast::Error; - - fn finish(self) -> Result<()> { - Ok(()) - } - - fn visit_pre(&mut self, ast: &Ast) -> Result<()> { - let span = match *ast { - Ast::Empty(_) - | Ast::Flags(_) - | Ast::Literal(_) - | Ast::Dot(_) - | Ast::Assertion(_) - | Ast::ClassUnicode(_) - | Ast::ClassPerl(_) => { - // These are all base cases, so we don't increment depth. - return Ok(()); - } - Ast::ClassBracketed(ref x) => &x.span, - Ast::Repetition(ref x) => &x.span, - Ast::Group(ref x) => &x.span, - Ast::Alternation(ref x) => &x.span, - Ast::Concat(ref x) => &x.span, - }; - self.increment_depth(span) - } - - fn visit_post(&mut self, ast: &Ast) -> Result<()> { - match *ast { - Ast::Empty(_) - | Ast::Flags(_) - | Ast::Literal(_) - | Ast::Dot(_) - | Ast::Assertion(_) - | Ast::ClassUnicode(_) - | Ast::ClassPerl(_) => { - // These are all base cases, so we don't decrement depth. - Ok(()) - } - Ast::ClassBracketed(_) - | Ast::Repetition(_) - | Ast::Group(_) - | Ast::Alternation(_) - | Ast::Concat(_) => { - self.decrement_depth(); - Ok(()) - } - } - } - - fn visit_class_set_item_pre( - &mut self, - ast: &ast::ClassSetItem, - ) -> Result<()> { - let span = match *ast { - ast::ClassSetItem::Empty(_) - | ast::ClassSetItem::Literal(_) - | ast::ClassSetItem::Range(_) - | ast::ClassSetItem::Ascii(_) - | ast::ClassSetItem::Unicode(_) - | ast::ClassSetItem::Perl(_) => { - // These are all base cases, so we don't increment depth. - return Ok(()); - } - ast::ClassSetItem::Bracketed(ref x) => &x.span, - ast::ClassSetItem::Union(ref x) => &x.span, - }; - self.increment_depth(span) - } - - fn visit_class_set_item_post( - &mut self, - ast: &ast::ClassSetItem, - ) -> Result<()> { - match *ast { - ast::ClassSetItem::Empty(_) - | ast::ClassSetItem::Literal(_) - | ast::ClassSetItem::Range(_) - | ast::ClassSetItem::Ascii(_) - | ast::ClassSetItem::Unicode(_) - | ast::ClassSetItem::Perl(_) => { - // These are all base cases, so we don't decrement depth. - Ok(()) - } - ast::ClassSetItem::Bracketed(_) | ast::ClassSetItem::Union(_) => { - self.decrement_depth(); - Ok(()) - } - } - } - - fn visit_class_set_binary_op_pre( - &mut self, - ast: &ast::ClassSetBinaryOp, - ) -> Result<()> { - self.increment_depth(&ast.span) - } - - fn visit_class_set_binary_op_post( - &mut self, - _ast: &ast::ClassSetBinaryOp, - ) -> Result<()> { - self.decrement_depth(); - Ok(()) - } -} - -/// When the result is an error, transforms the ast::ErrorKind from the source -/// Result into another one. This function is used to return clearer error -/// messages when possible. -fn specialize_err( - result: Result, - from: ast::ErrorKind, - to: ast::ErrorKind, -) -> Result { - if let Err(e) = result { - if e.kind == from { - Err(ast::Error { kind: to, pattern: e.pattern, span: e.span }) - } else { - Err(e) - } - } else { - result - } -} - -#[cfg(test)] -mod tests { - use core::ops::Range; - - use alloc::format; - - use super::*; - - // Our own assert_eq, which has slightly better formatting (but honestly - // still kind of crappy). - macro_rules! assert_eq { - ($left:expr, $right:expr) => {{ - match (&$left, &$right) { - (left_val, right_val) => { - if !(*left_val == *right_val) { - panic!( - "assertion failed: `(left == right)`\n\n\ - left: `{:?}`\nright: `{:?}`\n\n", - left_val, right_val - ) - } - } - } - }}; - } - - // We create these errors to compare with real ast::Errors in the tests. - // We define equality between TestError and ast::Error to disregard the - // pattern string in ast::Error, which is annoying to provide in tests. - #[derive(Clone, Debug)] - struct TestError { - span: Span, - kind: ast::ErrorKind, - } - - impl PartialEq for TestError { - fn eq(&self, other: &ast::Error) -> bool { - self.span == other.span && self.kind == other.kind - } - } - - impl PartialEq for ast::Error { - fn eq(&self, other: &TestError) -> bool { - self.span == other.span && self.kind == other.kind - } - } - - fn s(str: &str) -> String { - str.to_string() - } - - fn parser(pattern: &str) -> ParserI<'_, Parser> { - ParserI::new(Parser::new(), pattern) - } - - fn parser_octal(pattern: &str) -> ParserI<'_, Parser> { - let parser = ParserBuilder::new().octal(true).build(); - ParserI::new(parser, pattern) - } - - fn parser_empty_min_range(pattern: &str) -> ParserI<'_, Parser> { - let parser = ParserBuilder::new().empty_min_range(true).build(); - ParserI::new(parser, pattern) - } - - fn parser_nest_limit( - pattern: &str, - nest_limit: u32, - ) -> ParserI<'_, Parser> { - let p = ParserBuilder::new().nest_limit(nest_limit).build(); - ParserI::new(p, pattern) - } - - fn parser_ignore_whitespace(pattern: &str) -> ParserI<'_, Parser> { - let p = ParserBuilder::new().ignore_whitespace(true).build(); - ParserI::new(p, pattern) - } - - /// Short alias for creating a new span. - fn nspan(start: Position, end: Position) -> Span { - Span::new(start, end) - } - - /// Short alias for creating a new position. - fn npos(offset: usize, line: usize, column: usize) -> Position { - Position::new(offset, line, column) - } - - /// Create a new span from the given offset range. This assumes a single - /// line and sets the columns based on the offsets. i.e., This only works - /// out of the box for ASCII, which is fine for most tests. - fn span(range: Range) -> Span { - let start = Position::new(range.start, 1, range.start + 1); - let end = Position::new(range.end, 1, range.end + 1); - Span::new(start, end) - } - - /// Create a new span for the corresponding byte range in the given string. - fn span_range(subject: &str, range: Range) -> Span { - let start = Position { - offset: range.start, - line: 1 + subject[..range.start].matches('\n').count(), - column: 1 + subject[..range.start] - .chars() - .rev() - .position(|c| c == '\n') - .unwrap_or(subject[..range.start].chars().count()), - }; - let end = Position { - offset: range.end, - line: 1 + subject[..range.end].matches('\n').count(), - column: 1 + subject[..range.end] - .chars() - .rev() - .position(|c| c == '\n') - .unwrap_or(subject[..range.end].chars().count()), - }; - Span::new(start, end) - } - - /// Create a verbatim literal starting at the given position. - fn lit(c: char, start: usize) -> Ast { - lit_with(c, span(start..start + c.len_utf8())) - } - - /// Create a meta literal starting at the given position. - fn meta_lit(c: char, span: Span) -> Ast { - Ast::literal(ast::Literal { span, kind: ast::LiteralKind::Meta, c }) - } - - /// Create a verbatim literal with the given span. - fn lit_with(c: char, span: Span) -> Ast { - Ast::literal(ast::Literal { - span, - kind: ast::LiteralKind::Verbatim, - c, - }) - } - - /// Create a concatenation with the given range. - fn concat(range: Range, asts: Vec) -> Ast { - concat_with(span(range), asts) - } - - /// Create a concatenation with the given span. - fn concat_with(span: Span, asts: Vec) -> Ast { - Ast::concat(ast::Concat { span, asts }) - } - - /// Create an alternation with the given span. - fn alt(range: Range, asts: Vec) -> Ast { - Ast::alternation(ast::Alternation { span: span(range), asts }) - } - - /// Create a capturing group with the given span. - fn group(range: Range, index: u32, ast: Ast) -> Ast { - Ast::group(ast::Group { - span: span(range), - kind: ast::GroupKind::CaptureIndex(index), - ast: Box::new(ast), - }) - } - - /// Create an ast::SetFlags. - /// - /// The given pattern should be the full pattern string. The range given - /// should correspond to the byte offsets where the flag set occurs. - /// - /// If negated is true, then the set is interpreted as beginning with a - /// negation. - fn flag_set( - pat: &str, - range: Range, - flag: ast::Flag, - negated: bool, - ) -> Ast { - let mut items = vec![ast::FlagsItem { - span: span_range(pat, (range.end - 2)..(range.end - 1)), - kind: ast::FlagsItemKind::Flag(flag), - }]; - if negated { - items.insert( - 0, - ast::FlagsItem { - span: span_range(pat, (range.start + 2)..(range.end - 2)), - kind: ast::FlagsItemKind::Negation, - }, - ); - } - Ast::flags(ast::SetFlags { - span: span_range(pat, range.clone()), - flags: ast::Flags { - span: span_range(pat, (range.start + 2)..(range.end - 1)), - items, - }, - }) - } - - #[test] - fn parse_nest_limit() { - // A nest limit of 0 still allows some types of regexes. - assert_eq!( - parser_nest_limit("", 0).parse(), - Ok(Ast::empty(span(0..0))) - ); - assert_eq!(parser_nest_limit("a", 0).parse(), Ok(lit('a', 0))); - - // Test repetition operations, which require one level of nesting. - assert_eq!( - parser_nest_limit("a+", 0).parse().unwrap_err(), - TestError { - span: span(0..2), - kind: ast::ErrorKind::NestLimitExceeded(0), - } - ); - assert_eq!( - parser_nest_limit("a+", 1).parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..2), - op: ast::RepetitionOp { - span: span(1..2), - kind: ast::RepetitionKind::OneOrMore, - }, - greedy: true, - ast: Box::new(lit('a', 0)), - })) - ); - assert_eq!( - parser_nest_limit("(a)+", 1).parse().unwrap_err(), - TestError { - span: span(0..3), - kind: ast::ErrorKind::NestLimitExceeded(1), - } - ); - assert_eq!( - parser_nest_limit("a+*", 1).parse().unwrap_err(), - TestError { - span: span(0..2), - kind: ast::ErrorKind::NestLimitExceeded(1), - } - ); - assert_eq!( - parser_nest_limit("a+*", 2).parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..3), - op: ast::RepetitionOp { - span: span(2..3), - kind: ast::RepetitionKind::ZeroOrMore, - }, - greedy: true, - ast: Box::new(Ast::repetition(ast::Repetition { - span: span(0..2), - op: ast::RepetitionOp { - span: span(1..2), - kind: ast::RepetitionKind::OneOrMore, - }, - greedy: true, - ast: Box::new(lit('a', 0)), - })), - })) - ); - - // Test concatenations. A concatenation requires one level of nesting. - assert_eq!( - parser_nest_limit("ab", 0).parse().unwrap_err(), - TestError { - span: span(0..2), - kind: ast::ErrorKind::NestLimitExceeded(0), - } - ); - assert_eq!( - parser_nest_limit("ab", 1).parse(), - Ok(concat(0..2, vec![lit('a', 0), lit('b', 1)])) - ); - assert_eq!( - parser_nest_limit("abc", 1).parse(), - Ok(concat(0..3, vec![lit('a', 0), lit('b', 1), lit('c', 2)])) - ); - - // Test alternations. An alternation requires one level of nesting. - assert_eq!( - parser_nest_limit("a|b", 0).parse().unwrap_err(), - TestError { - span: span(0..3), - kind: ast::ErrorKind::NestLimitExceeded(0), - } - ); - assert_eq!( - parser_nest_limit("a|b", 1).parse(), - Ok(alt(0..3, vec![lit('a', 0), lit('b', 2)])) - ); - assert_eq!( - parser_nest_limit("a|b|c", 1).parse(), - Ok(alt(0..5, vec![lit('a', 0), lit('b', 2), lit('c', 4)])) - ); - - // Test character classes. Classes form their own mini-recursive - // syntax! - assert_eq!( - parser_nest_limit("[a]", 0).parse().unwrap_err(), - TestError { - span: span(0..3), - kind: ast::ErrorKind::NestLimitExceeded(0), - } - ); - assert_eq!( - parser_nest_limit("[a]", 1).parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..3), - negated: false, - kind: ast::ClassSet::Item(ast::ClassSetItem::Literal( - ast::Literal { - span: span(1..2), - kind: ast::LiteralKind::Verbatim, - c: 'a', - } - )), - })) - ); - assert_eq!( - parser_nest_limit("[ab]", 1).parse().unwrap_err(), - TestError { - span: span(1..3), - kind: ast::ErrorKind::NestLimitExceeded(1), - } - ); - assert_eq!( - parser_nest_limit("[ab[cd]]", 2).parse().unwrap_err(), - TestError { - span: span(3..7), - kind: ast::ErrorKind::NestLimitExceeded(2), - } - ); - assert_eq!( - parser_nest_limit("[ab[cd]]", 3).parse().unwrap_err(), - TestError { - span: span(4..6), - kind: ast::ErrorKind::NestLimitExceeded(3), - } - ); - assert_eq!( - parser_nest_limit("[a--b]", 1).parse().unwrap_err(), - TestError { - span: span(1..5), - kind: ast::ErrorKind::NestLimitExceeded(1), - } - ); - assert_eq!( - parser_nest_limit("[a--bc]", 2).parse().unwrap_err(), - TestError { - span: span(4..6), - kind: ast::ErrorKind::NestLimitExceeded(2), - } - ); - } - - #[test] - fn parse_comments() { - let pat = "(?x) -# This is comment 1. -foo # This is comment 2. - # This is comment 3. -bar -# This is comment 4."; - let astc = parser(pat).parse_with_comments().unwrap(); - assert_eq!( - astc.ast, - concat_with( - span_range(pat, 0..pat.len()), - vec![ - flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), - lit_with('f', span_range(pat, 26..27)), - lit_with('o', span_range(pat, 27..28)), - lit_with('o', span_range(pat, 28..29)), - lit_with('b', span_range(pat, 74..75)), - lit_with('a', span_range(pat, 75..76)), - lit_with('r', span_range(pat, 76..77)), - ] - ) - ); - assert_eq!( - astc.comments, - vec![ - ast::Comment { - span: span_range(pat, 5..26), - comment: s(" This is comment 1."), - }, - ast::Comment { - span: span_range(pat, 30..51), - comment: s(" This is comment 2."), - }, - ast::Comment { - span: span_range(pat, 53..74), - comment: s(" This is comment 3."), - }, - ast::Comment { - span: span_range(pat, 78..98), - comment: s(" This is comment 4."), - }, - ] - ); - } - - #[test] - fn parse_holistic() { - assert_eq!(parser("]").parse(), Ok(lit(']', 0))); - assert_eq!( - parser(r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#\&\-\~").parse(), - Ok(concat( - 0..36, - vec![ - meta_lit('\\', span(0..2)), - meta_lit('.', span(2..4)), - meta_lit('+', span(4..6)), - meta_lit('*', span(6..8)), - meta_lit('?', span(8..10)), - meta_lit('(', span(10..12)), - meta_lit(')', span(12..14)), - meta_lit('|', span(14..16)), - meta_lit('[', span(16..18)), - meta_lit(']', span(18..20)), - meta_lit('{', span(20..22)), - meta_lit('}', span(22..24)), - meta_lit('^', span(24..26)), - meta_lit('$', span(26..28)), - meta_lit('#', span(28..30)), - meta_lit('&', span(30..32)), - meta_lit('-', span(32..34)), - meta_lit('~', span(34..36)), - ] - )) - ); - } - - #[test] - fn parse_ignore_whitespace() { - // Test that basic whitespace insensitivity works. - let pat = "(?x)a b"; - assert_eq!( - parser(pat).parse(), - Ok(concat_with( - nspan(npos(0, 1, 1), npos(7, 1, 8)), - vec![ - flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), - lit_with('a', nspan(npos(4, 1, 5), npos(5, 1, 6))), - lit_with('b', nspan(npos(6, 1, 7), npos(7, 1, 8))), - ] - )) - ); - - // Test that we can toggle whitespace insensitivity. - let pat = "(?x)a b(?-x)a b"; - assert_eq!( - parser(pat).parse(), - Ok(concat_with( - nspan(npos(0, 1, 1), npos(15, 1, 16)), - vec![ - flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), - lit_with('a', nspan(npos(4, 1, 5), npos(5, 1, 6))), - lit_with('b', nspan(npos(6, 1, 7), npos(7, 1, 8))), - flag_set(pat, 7..12, ast::Flag::IgnoreWhitespace, true), - lit_with('a', nspan(npos(12, 1, 13), npos(13, 1, 14))), - lit_with(' ', nspan(npos(13, 1, 14), npos(14, 1, 15))), - lit_with('b', nspan(npos(14, 1, 15), npos(15, 1, 16))), - ] - )) - ); - - // Test that nesting whitespace insensitive flags works. - let pat = "a (?x:a )a "; - assert_eq!( - parser(pat).parse(), - Ok(concat_with( - span_range(pat, 0..11), - vec![ - lit_with('a', span_range(pat, 0..1)), - lit_with(' ', span_range(pat, 1..2)), - Ast::group(ast::Group { - span: span_range(pat, 2..9), - kind: ast::GroupKind::NonCapturing(ast::Flags { - span: span_range(pat, 4..5), - items: vec![ast::FlagsItem { - span: span_range(pat, 4..5), - kind: ast::FlagsItemKind::Flag( - ast::Flag::IgnoreWhitespace - ), - },], - }), - ast: Box::new(lit_with('a', span_range(pat, 6..7))), - }), - lit_with('a', span_range(pat, 9..10)), - lit_with(' ', span_range(pat, 10..11)), - ] - )) - ); - - // Test that whitespace after an opening paren is insignificant. - let pat = "(?x)( ?P a )"; - assert_eq!( - parser(pat).parse(), - Ok(concat_with( - span_range(pat, 0..pat.len()), - vec![ - flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), - Ast::group(ast::Group { - span: span_range(pat, 4..pat.len()), - kind: ast::GroupKind::CaptureName { - starts_with_p: true, - name: ast::CaptureName { - span: span_range(pat, 9..12), - name: s("foo"), - index: 1, - } - }, - ast: Box::new(lit_with('a', span_range(pat, 14..15))), - }), - ] - )) - ); - let pat = "(?x)( a )"; - assert_eq!( - parser(pat).parse(), - Ok(concat_with( - span_range(pat, 0..pat.len()), - vec![ - flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), - Ast::group(ast::Group { - span: span_range(pat, 4..pat.len()), - kind: ast::GroupKind::CaptureIndex(1), - ast: Box::new(lit_with('a', span_range(pat, 7..8))), - }), - ] - )) - ); - let pat = "(?x)( ?: a )"; - assert_eq!( - parser(pat).parse(), - Ok(concat_with( - span_range(pat, 0..pat.len()), - vec![ - flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), - Ast::group(ast::Group { - span: span_range(pat, 4..pat.len()), - kind: ast::GroupKind::NonCapturing(ast::Flags { - span: span_range(pat, 8..8), - items: vec![], - }), - ast: Box::new(lit_with('a', span_range(pat, 11..12))), - }), - ] - )) - ); - let pat = r"(?x)\x { 53 }"; - assert_eq!( - parser(pat).parse(), - Ok(concat_with( - span_range(pat, 0..pat.len()), - vec![ - flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), - Ast::literal(ast::Literal { - span: span(4..13), - kind: ast::LiteralKind::HexBrace( - ast::HexLiteralKind::X - ), - c: 'S', - }), - ] - )) - ); - - // Test that whitespace after an escape is OK. - let pat = r"(?x)\ "; - assert_eq!( - parser(pat).parse(), - Ok(concat_with( - span_range(pat, 0..pat.len()), - vec![ - flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), - Ast::literal(ast::Literal { - span: span_range(pat, 4..6), - kind: ast::LiteralKind::Superfluous, - c: ' ', - }), - ] - )) - ); - } - - #[test] - fn parse_newlines() { - let pat = ".\n."; - assert_eq!( - parser(pat).parse(), - Ok(concat_with( - span_range(pat, 0..3), - vec![ - Ast::dot(span_range(pat, 0..1)), - lit_with('\n', span_range(pat, 1..2)), - Ast::dot(span_range(pat, 2..3)), - ] - )) - ); - - let pat = "foobar\nbaz\nquux\n"; - assert_eq!( - parser(pat).parse(), - Ok(concat_with( - span_range(pat, 0..pat.len()), - vec![ - lit_with('f', nspan(npos(0, 1, 1), npos(1, 1, 2))), - lit_with('o', nspan(npos(1, 1, 2), npos(2, 1, 3))), - lit_with('o', nspan(npos(2, 1, 3), npos(3, 1, 4))), - lit_with('b', nspan(npos(3, 1, 4), npos(4, 1, 5))), - lit_with('a', nspan(npos(4, 1, 5), npos(5, 1, 6))), - lit_with('r', nspan(npos(5, 1, 6), npos(6, 1, 7))), - lit_with('\n', nspan(npos(6, 1, 7), npos(7, 2, 1))), - lit_with('b', nspan(npos(7, 2, 1), npos(8, 2, 2))), - lit_with('a', nspan(npos(8, 2, 2), npos(9, 2, 3))), - lit_with('z', nspan(npos(9, 2, 3), npos(10, 2, 4))), - lit_with('\n', nspan(npos(10, 2, 4), npos(11, 3, 1))), - lit_with('q', nspan(npos(11, 3, 1), npos(12, 3, 2))), - lit_with('u', nspan(npos(12, 3, 2), npos(13, 3, 3))), - lit_with('u', nspan(npos(13, 3, 3), npos(14, 3, 4))), - lit_with('x', nspan(npos(14, 3, 4), npos(15, 3, 5))), - lit_with('\n', nspan(npos(15, 3, 5), npos(16, 4, 1))), - ] - )) - ); - } - - #[test] - fn parse_uncounted_repetition() { - assert_eq!( - parser(r"a*").parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..2), - op: ast::RepetitionOp { - span: span(1..2), - kind: ast::RepetitionKind::ZeroOrMore, - }, - greedy: true, - ast: Box::new(lit('a', 0)), - })) - ); - assert_eq!( - parser(r"a+").parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..2), - op: ast::RepetitionOp { - span: span(1..2), - kind: ast::RepetitionKind::OneOrMore, - }, - greedy: true, - ast: Box::new(lit('a', 0)), - })) - ); - - assert_eq!( - parser(r"a?").parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..2), - op: ast::RepetitionOp { - span: span(1..2), - kind: ast::RepetitionKind::ZeroOrOne, - }, - greedy: true, - ast: Box::new(lit('a', 0)), - })) - ); - assert_eq!( - parser(r"a??").parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..3), - op: ast::RepetitionOp { - span: span(1..3), - kind: ast::RepetitionKind::ZeroOrOne, - }, - greedy: false, - ast: Box::new(lit('a', 0)), - })) - ); - assert_eq!( - parser(r"a?").parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..2), - op: ast::RepetitionOp { - span: span(1..2), - kind: ast::RepetitionKind::ZeroOrOne, - }, - greedy: true, - ast: Box::new(lit('a', 0)), - })) - ); - assert_eq!( - parser(r"a?b").parse(), - Ok(concat( - 0..3, - vec![ - Ast::repetition(ast::Repetition { - span: span(0..2), - op: ast::RepetitionOp { - span: span(1..2), - kind: ast::RepetitionKind::ZeroOrOne, - }, - greedy: true, - ast: Box::new(lit('a', 0)), - }), - lit('b', 2), - ] - )) - ); - assert_eq!( - parser(r"a??b").parse(), - Ok(concat( - 0..4, - vec![ - Ast::repetition(ast::Repetition { - span: span(0..3), - op: ast::RepetitionOp { - span: span(1..3), - kind: ast::RepetitionKind::ZeroOrOne, - }, - greedy: false, - ast: Box::new(lit('a', 0)), - }), - lit('b', 3), - ] - )) - ); - assert_eq!( - parser(r"ab?").parse(), - Ok(concat( - 0..3, - vec![ - lit('a', 0), - Ast::repetition(ast::Repetition { - span: span(1..3), - op: ast::RepetitionOp { - span: span(2..3), - kind: ast::RepetitionKind::ZeroOrOne, - }, - greedy: true, - ast: Box::new(lit('b', 1)), - }), - ] - )) - ); - assert_eq!( - parser(r"(ab)?").parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..5), - op: ast::RepetitionOp { - span: span(4..5), - kind: ast::RepetitionKind::ZeroOrOne, - }, - greedy: true, - ast: Box::new(group( - 0..4, - 1, - concat(1..3, vec![lit('a', 1), lit('b', 2),]) - )), - })) - ); - assert_eq!( - parser(r"|a?").parse(), - Ok(alt( - 0..3, - vec![ - Ast::empty(span(0..0)), - Ast::repetition(ast::Repetition { - span: span(1..3), - op: ast::RepetitionOp { - span: span(2..3), - kind: ast::RepetitionKind::ZeroOrOne, - }, - greedy: true, - ast: Box::new(lit('a', 1)), - }), - ] - )) - ); - - assert_eq!( - parser(r"*").parse().unwrap_err(), - TestError { - span: span(0..0), - kind: ast::ErrorKind::RepetitionMissing, - } - ); - assert_eq!( - parser(r"(?i)*").parse().unwrap_err(), - TestError { - span: span(4..4), - kind: ast::ErrorKind::RepetitionMissing, - } - ); - assert_eq!( - parser(r"(*)").parse().unwrap_err(), - TestError { - span: span(1..1), - kind: ast::ErrorKind::RepetitionMissing, - } - ); - assert_eq!( - parser(r"(?:?)").parse().unwrap_err(), - TestError { - span: span(3..3), - kind: ast::ErrorKind::RepetitionMissing, - } - ); - assert_eq!( - parser(r"+").parse().unwrap_err(), - TestError { - span: span(0..0), - kind: ast::ErrorKind::RepetitionMissing, - } - ); - assert_eq!( - parser(r"?").parse().unwrap_err(), - TestError { - span: span(0..0), - kind: ast::ErrorKind::RepetitionMissing, - } - ); - assert_eq!( - parser(r"(?)").parse().unwrap_err(), - TestError { - span: span(1..1), - kind: ast::ErrorKind::RepetitionMissing, - } - ); - assert_eq!( - parser(r"|*").parse().unwrap_err(), - TestError { - span: span(1..1), - kind: ast::ErrorKind::RepetitionMissing, - } - ); - assert_eq!( - parser(r"|+").parse().unwrap_err(), - TestError { - span: span(1..1), - kind: ast::ErrorKind::RepetitionMissing, - } - ); - assert_eq!( - parser(r"|?").parse().unwrap_err(), - TestError { - span: span(1..1), - kind: ast::ErrorKind::RepetitionMissing, - } - ); - } - - #[test] - fn parse_counted_repetition() { - assert_eq!( - parser(r"a{5}").parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..4), - op: ast::RepetitionOp { - span: span(1..4), - kind: ast::RepetitionKind::Range( - ast::RepetitionRange::Exactly(5) - ), - }, - greedy: true, - ast: Box::new(lit('a', 0)), - })) - ); - assert_eq!( - parser(r"a{5,}").parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..5), - op: ast::RepetitionOp { - span: span(1..5), - kind: ast::RepetitionKind::Range( - ast::RepetitionRange::AtLeast(5) - ), - }, - greedy: true, - ast: Box::new(lit('a', 0)), - })) - ); - assert_eq!( - parser(r"a{5,9}").parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..6), - op: ast::RepetitionOp { - span: span(1..6), - kind: ast::RepetitionKind::Range( - ast::RepetitionRange::Bounded(5, 9) - ), - }, - greedy: true, - ast: Box::new(lit('a', 0)), - })) - ); - assert_eq!( - parser(r"a{5}?").parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..5), - op: ast::RepetitionOp { - span: span(1..5), - kind: ast::RepetitionKind::Range( - ast::RepetitionRange::Exactly(5) - ), - }, - greedy: false, - ast: Box::new(lit('a', 0)), - })) - ); - assert_eq!( - parser(r"ab{5}").parse(), - Ok(concat( - 0..5, - vec![ - lit('a', 0), - Ast::repetition(ast::Repetition { - span: span(1..5), - op: ast::RepetitionOp { - span: span(2..5), - kind: ast::RepetitionKind::Range( - ast::RepetitionRange::Exactly(5) - ), - }, - greedy: true, - ast: Box::new(lit('b', 1)), - }), - ] - )) - ); - assert_eq!( - parser(r"ab{5}c").parse(), - Ok(concat( - 0..6, - vec![ - lit('a', 0), - Ast::repetition(ast::Repetition { - span: span(1..5), - op: ast::RepetitionOp { - span: span(2..5), - kind: ast::RepetitionKind::Range( - ast::RepetitionRange::Exactly(5) - ), - }, - greedy: true, - ast: Box::new(lit('b', 1)), - }), - lit('c', 5), - ] - )) - ); - - assert_eq!( - parser(r"a{ 5 }").parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..6), - op: ast::RepetitionOp { - span: span(1..6), - kind: ast::RepetitionKind::Range( - ast::RepetitionRange::Exactly(5) - ), - }, - greedy: true, - ast: Box::new(lit('a', 0)), - })) - ); - assert_eq!( - parser(r"a{ 5 , 9 }").parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..10), - op: ast::RepetitionOp { - span: span(1..10), - kind: ast::RepetitionKind::Range( - ast::RepetitionRange::Bounded(5, 9) - ), - }, - greedy: true, - ast: Box::new(lit('a', 0)), - })) - ); - assert_eq!( - parser_empty_min_range(r"a{,9}").parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..5), - op: ast::RepetitionOp { - span: span(1..5), - kind: ast::RepetitionKind::Range( - ast::RepetitionRange::Bounded(0, 9) - ), - }, - greedy: true, - ast: Box::new(lit('a', 0)), - })) - ); - assert_eq!( - parser_ignore_whitespace(r"a{5,9} ?").parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..8), - op: ast::RepetitionOp { - span: span(1..8), - kind: ast::RepetitionKind::Range( - ast::RepetitionRange::Bounded(5, 9) - ), - }, - greedy: false, - ast: Box::new(lit('a', 0)), - })) - ); - assert_eq!( - parser(r"\b{5,9}").parse(), - Ok(Ast::repetition(ast::Repetition { - span: span(0..7), - op: ast::RepetitionOp { - span: span(2..7), - kind: ast::RepetitionKind::Range( - ast::RepetitionRange::Bounded(5, 9) - ), - }, - greedy: true, - ast: Box::new(Ast::assertion(ast::Assertion { - span: span(0..2), - kind: ast::AssertionKind::WordBoundary, - })), - })) - ); - - assert_eq!( - parser(r"(?i){0}").parse().unwrap_err(), - TestError { - span: span(4..4), - kind: ast::ErrorKind::RepetitionMissing, - } - ); - assert_eq!( - parser(r"(?m){1,1}").parse().unwrap_err(), - TestError { - span: span(4..4), - kind: ast::ErrorKind::RepetitionMissing, - } - ); - assert_eq!( - parser(r"a{]}").parse().unwrap_err(), - TestError { - span: span(2..2), - kind: ast::ErrorKind::RepetitionCountDecimalEmpty, - } - ); - assert_eq!( - parser(r"a{1,]}").parse().unwrap_err(), - TestError { - span: span(4..4), - kind: ast::ErrorKind::RepetitionCountDecimalEmpty, - } - ); - assert_eq!( - parser(r"a{").parse().unwrap_err(), - TestError { - span: span(1..2), - kind: ast::ErrorKind::RepetitionCountUnclosed, - } - ); - assert_eq!( - parser(r"a{}").parse().unwrap_err(), - TestError { - span: span(2..2), - kind: ast::ErrorKind::RepetitionCountDecimalEmpty, - } - ); - assert_eq!( - parser(r"a{a").parse().unwrap_err(), - TestError { - span: span(2..2), - kind: ast::ErrorKind::RepetitionCountDecimalEmpty, - } - ); - assert_eq!( - parser(r"a{9999999999}").parse().unwrap_err(), - TestError { - span: span(2..12), - kind: ast::ErrorKind::DecimalInvalid, - } - ); - assert_eq!( - parser(r"a{9").parse().unwrap_err(), - TestError { - span: span(1..3), - kind: ast::ErrorKind::RepetitionCountUnclosed, - } - ); - assert_eq!( - parser(r"a{9,a").parse().unwrap_err(), - TestError { - span: span(4..4), - kind: ast::ErrorKind::RepetitionCountDecimalEmpty, - } - ); - assert_eq!( - parser(r"a{9,9999999999}").parse().unwrap_err(), - TestError { - span: span(4..14), - kind: ast::ErrorKind::DecimalInvalid, - } - ); - assert_eq!( - parser(r"a{9,").parse().unwrap_err(), - TestError { - span: span(1..4), - kind: ast::ErrorKind::RepetitionCountUnclosed, - } - ); - assert_eq!( - parser(r"a{9,11").parse().unwrap_err(), - TestError { - span: span(1..6), - kind: ast::ErrorKind::RepetitionCountUnclosed, - } - ); - assert_eq!( - parser(r"a{2,1}").parse().unwrap_err(), - TestError { - span: span(1..6), - kind: ast::ErrorKind::RepetitionCountInvalid, - } - ); - assert_eq!( - parser(r"{5}").parse().unwrap_err(), - TestError { - span: span(0..0), - kind: ast::ErrorKind::RepetitionMissing, - } - ); - assert_eq!( - parser(r"|{5}").parse().unwrap_err(), - TestError { - span: span(1..1), - kind: ast::ErrorKind::RepetitionMissing, - } - ); - } - - #[test] - fn parse_alternate() { - assert_eq!( - parser(r"a|b").parse(), - Ok(Ast::alternation(ast::Alternation { - span: span(0..3), - asts: vec![lit('a', 0), lit('b', 2)], - })) - ); - assert_eq!( - parser(r"(a|b)").parse(), - Ok(group( - 0..5, - 1, - Ast::alternation(ast::Alternation { - span: span(1..4), - asts: vec![lit('a', 1), lit('b', 3)], - }) - )) - ); - - assert_eq!( - parser(r"a|b|c").parse(), - Ok(Ast::alternation(ast::Alternation { - span: span(0..5), - asts: vec![lit('a', 0), lit('b', 2), lit('c', 4)], - })) - ); - assert_eq!( - parser(r"ax|by|cz").parse(), - Ok(Ast::alternation(ast::Alternation { - span: span(0..8), - asts: vec![ - concat(0..2, vec![lit('a', 0), lit('x', 1)]), - concat(3..5, vec![lit('b', 3), lit('y', 4)]), - concat(6..8, vec![lit('c', 6), lit('z', 7)]), - ], - })) - ); - assert_eq!( - parser(r"(ax|by|cz)").parse(), - Ok(group( - 0..10, - 1, - Ast::alternation(ast::Alternation { - span: span(1..9), - asts: vec![ - concat(1..3, vec![lit('a', 1), lit('x', 2)]), - concat(4..6, vec![lit('b', 4), lit('y', 5)]), - concat(7..9, vec![lit('c', 7), lit('z', 8)]), - ], - }) - )) - ); - assert_eq!( - parser(r"(ax|(by|(cz)))").parse(), - Ok(group( - 0..14, - 1, - alt( - 1..13, - vec![ - concat(1..3, vec![lit('a', 1), lit('x', 2)]), - group( - 4..13, - 2, - alt( - 5..12, - vec![ - concat( - 5..7, - vec![lit('b', 5), lit('y', 6)] - ), - group( - 8..12, - 3, - concat( - 9..11, - vec![lit('c', 9), lit('z', 10),] - ) - ), - ] - ) - ), - ] - ) - )) - ); - - assert_eq!( - parser(r"|").parse(), - Ok(alt( - 0..1, - vec![Ast::empty(span(0..0)), Ast::empty(span(1..1)),] - )) - ); - assert_eq!( - parser(r"||").parse(), - Ok(alt( - 0..2, - vec![ - Ast::empty(span(0..0)), - Ast::empty(span(1..1)), - Ast::empty(span(2..2)), - ] - )) - ); - assert_eq!( - parser(r"a|").parse(), - Ok(alt(0..2, vec![lit('a', 0), Ast::empty(span(2..2)),])) - ); - assert_eq!( - parser(r"|a").parse(), - Ok(alt(0..2, vec![Ast::empty(span(0..0)), lit('a', 1),])) - ); - - assert_eq!( - parser(r"(|)").parse(), - Ok(group( - 0..3, - 1, - alt( - 1..2, - vec![Ast::empty(span(1..1)), Ast::empty(span(2..2)),] - ) - )) - ); - assert_eq!( - parser(r"(a|)").parse(), - Ok(group( - 0..4, - 1, - alt(1..3, vec![lit('a', 1), Ast::empty(span(3..3)),]) - )) - ); - assert_eq!( - parser(r"(|a)").parse(), - Ok(group( - 0..4, - 1, - alt(1..3, vec![Ast::empty(span(1..1)), lit('a', 2),]) - )) - ); - - assert_eq!( - parser(r"a|b)").parse().unwrap_err(), - TestError { - span: span(3..4), - kind: ast::ErrorKind::GroupUnopened, - } - ); - assert_eq!( - parser(r"(a|b").parse().unwrap_err(), - TestError { - span: span(0..1), - kind: ast::ErrorKind::GroupUnclosed, - } - ); - } - - #[test] - fn parse_unsupported_lookaround() { - assert_eq!( - parser(r"(?=a)").parse().unwrap_err(), - TestError { - span: span(0..3), - kind: ast::ErrorKind::UnsupportedLookAround, - } - ); - assert_eq!( - parser(r"(?!a)").parse().unwrap_err(), - TestError { - span: span(0..3), - kind: ast::ErrorKind::UnsupportedLookAround, - } - ); - assert_eq!( - parser(r"(?<=a)").parse().unwrap_err(), - TestError { - span: span(0..4), - kind: ast::ErrorKind::UnsupportedLookAround, - } - ); - assert_eq!( - parser(r"(?z)").parse(), - Ok(Ast::group(ast::Group { - span: span(0..7), - kind: ast::GroupKind::CaptureName { - starts_with_p: false, - name: ast::CaptureName { - span: span(3..4), - name: s("a"), - index: 1, - } - }, - ast: Box::new(lit('z', 5)), - })) - ); - assert_eq!( - parser("(?Pz)").parse(), - Ok(Ast::group(ast::Group { - span: span(0..8), - kind: ast::GroupKind::CaptureName { - starts_with_p: true, - name: ast::CaptureName { - span: span(4..5), - name: s("a"), - index: 1, - } - }, - ast: Box::new(lit('z', 6)), - })) - ); - assert_eq!( - parser("(?Pz)").parse(), - Ok(Ast::group(ast::Group { - span: span(0..10), - kind: ast::GroupKind::CaptureName { - starts_with_p: true, - name: ast::CaptureName { - span: span(4..7), - name: s("abc"), - index: 1, - } - }, - ast: Box::new(lit('z', 8)), - })) - ); - - assert_eq!( - parser("(?Pz)").parse(), - Ok(Ast::group(ast::Group { - span: span(0..10), - kind: ast::GroupKind::CaptureName { - starts_with_p: true, - name: ast::CaptureName { - span: span(4..7), - name: s("a_1"), - index: 1, - } - }, - ast: Box::new(lit('z', 8)), - })) - ); - - assert_eq!( - parser("(?Pz)").parse(), - Ok(Ast::group(ast::Group { - span: span(0..10), - kind: ast::GroupKind::CaptureName { - starts_with_p: true, - name: ast::CaptureName { - span: span(4..7), - name: s("a.1"), - index: 1, - } - }, - ast: Box::new(lit('z', 8)), - })) - ); - - assert_eq!( - parser("(?Pz)").parse(), - Ok(Ast::group(ast::Group { - span: span(0..11), - kind: ast::GroupKind::CaptureName { - starts_with_p: true, - name: ast::CaptureName { - span: span(4..8), - name: s("a[1]"), - index: 1, - } - }, - ast: Box::new(lit('z', 9)), - })) - ); - - assert_eq!( - parser("(?P)").parse(), - Ok(Ast::group(ast::Group { - span: Span::new( - Position::new(0, 1, 1), - Position::new(9, 1, 9), - ), - kind: ast::GroupKind::CaptureName { - starts_with_p: true, - name: ast::CaptureName { - span: Span::new( - Position::new(4, 1, 5), - Position::new(7, 1, 7), - ), - name: s("a¾"), - index: 1, - } - }, - ast: Box::new(Ast::empty(Span::new( - Position::new(8, 1, 8), - Position::new(8, 1, 8), - ))), - })) - ); - assert_eq!( - parser("(?P<名字>)").parse(), - Ok(Ast::group(ast::Group { - span: Span::new( - Position::new(0, 1, 1), - Position::new(12, 1, 9), - ), - kind: ast::GroupKind::CaptureName { - starts_with_p: true, - name: ast::CaptureName { - span: Span::new( - Position::new(4, 1, 5), - Position::new(10, 1, 7), - ), - name: s("名字"), - index: 1, - } - }, - ast: Box::new(Ast::empty(Span::new( - Position::new(11, 1, 8), - Position::new(11, 1, 8), - ))), - })) - ); - - assert_eq!( - parser("(?P<").parse().unwrap_err(), - TestError { - span: span(4..4), - kind: ast::ErrorKind::GroupNameUnexpectedEof, - } - ); - assert_eq!( - parser("(?P<>z)").parse().unwrap_err(), - TestError { - span: span(4..4), - kind: ast::ErrorKind::GroupNameEmpty, - } - ); - assert_eq!( - parser("(?Py)(?Pz)").parse().unwrap_err(), - TestError { - span: span(12..13), - kind: ast::ErrorKind::GroupNameDuplicate { - original: span(4..5), - }, - } - ); - assert_eq!( - parser("(?P<5>)").parse().unwrap_err(), - TestError { - span: span(4..5), - kind: ast::ErrorKind::GroupNameInvalid, - } - ); - assert_eq!( - parser("(?P<5a>)").parse().unwrap_err(), - TestError { - span: span(4..5), - kind: ast::ErrorKind::GroupNameInvalid, - } - ); - assert_eq!( - parser("(?P<¾>)").parse().unwrap_err(), - TestError { - span: Span::new( - Position::new(4, 1, 5), - Position::new(6, 1, 6), - ), - kind: ast::ErrorKind::GroupNameInvalid, - } - ); - assert_eq!( - parser("(?P<¾a>)").parse().unwrap_err(), - TestError { - span: Span::new( - Position::new(4, 1, 5), - Position::new(6, 1, 6), - ), - kind: ast::ErrorKind::GroupNameInvalid, - } - ); - assert_eq!( - parser("(?P<☃>)").parse().unwrap_err(), - TestError { - span: Span::new( - Position::new(4, 1, 5), - Position::new(7, 1, 6), - ), - kind: ast::ErrorKind::GroupNameInvalid, - } - ); - assert_eq!( - parser("(?P)").parse().unwrap_err(), - TestError { - span: Span::new( - Position::new(5, 1, 6), - Position::new(8, 1, 7), - ), - kind: ast::ErrorKind::GroupNameInvalid, - } - ); - } - - #[test] - fn parse_flags() { - assert_eq!( - parser("i:").parse_flags(), - Ok(ast::Flags { - span: span(0..1), - items: vec![ast::FlagsItem { - span: span(0..1), - kind: ast::FlagsItemKind::Flag(ast::Flag::CaseInsensitive), - }], - }) - ); - assert_eq!( - parser("i)").parse_flags(), - Ok(ast::Flags { - span: span(0..1), - items: vec![ast::FlagsItem { - span: span(0..1), - kind: ast::FlagsItemKind::Flag(ast::Flag::CaseInsensitive), - }], - }) - ); - - assert_eq!( - parser("isU:").parse_flags(), - Ok(ast::Flags { - span: span(0..3), - items: vec![ - ast::FlagsItem { - span: span(0..1), - kind: ast::FlagsItemKind::Flag( - ast::Flag::CaseInsensitive - ), - }, - ast::FlagsItem { - span: span(1..2), - kind: ast::FlagsItemKind::Flag( - ast::Flag::DotMatchesNewLine - ), - }, - ast::FlagsItem { - span: span(2..3), - kind: ast::FlagsItemKind::Flag(ast::Flag::SwapGreed), - }, - ], - }) - ); - - assert_eq!( - parser("-isU:").parse_flags(), - Ok(ast::Flags { - span: span(0..4), - items: vec![ - ast::FlagsItem { - span: span(0..1), - kind: ast::FlagsItemKind::Negation, - }, - ast::FlagsItem { - span: span(1..2), - kind: ast::FlagsItemKind::Flag( - ast::Flag::CaseInsensitive - ), - }, - ast::FlagsItem { - span: span(2..3), - kind: ast::FlagsItemKind::Flag( - ast::Flag::DotMatchesNewLine - ), - }, - ast::FlagsItem { - span: span(3..4), - kind: ast::FlagsItemKind::Flag(ast::Flag::SwapGreed), - }, - ], - }) - ); - assert_eq!( - parser("i-sU:").parse_flags(), - Ok(ast::Flags { - span: span(0..4), - items: vec![ - ast::FlagsItem { - span: span(0..1), - kind: ast::FlagsItemKind::Flag( - ast::Flag::CaseInsensitive - ), - }, - ast::FlagsItem { - span: span(1..2), - kind: ast::FlagsItemKind::Negation, - }, - ast::FlagsItem { - span: span(2..3), - kind: ast::FlagsItemKind::Flag( - ast::Flag::DotMatchesNewLine - ), - }, - ast::FlagsItem { - span: span(3..4), - kind: ast::FlagsItemKind::Flag(ast::Flag::SwapGreed), - }, - ], - }) - ); - assert_eq!( - parser("i-sR:").parse_flags(), - Ok(ast::Flags { - span: span(0..4), - items: vec![ - ast::FlagsItem { - span: span(0..1), - kind: ast::FlagsItemKind::Flag( - ast::Flag::CaseInsensitive - ), - }, - ast::FlagsItem { - span: span(1..2), - kind: ast::FlagsItemKind::Negation, - }, - ast::FlagsItem { - span: span(2..3), - kind: ast::FlagsItemKind::Flag( - ast::Flag::DotMatchesNewLine - ), - }, - ast::FlagsItem { - span: span(3..4), - kind: ast::FlagsItemKind::Flag(ast::Flag::CRLF), - }, - ], - }) - ); - - assert_eq!( - parser("isU").parse_flags().unwrap_err(), - TestError { - span: span(3..3), - kind: ast::ErrorKind::FlagUnexpectedEof, - } - ); - assert_eq!( - parser("isUa:").parse_flags().unwrap_err(), - TestError { - span: span(3..4), - kind: ast::ErrorKind::FlagUnrecognized, - } - ); - assert_eq!( - parser("isUi:").parse_flags().unwrap_err(), - TestError { - span: span(3..4), - kind: ast::ErrorKind::FlagDuplicate { original: span(0..1) }, - } - ); - assert_eq!( - parser("i-sU-i:").parse_flags().unwrap_err(), - TestError { - span: span(4..5), - kind: ast::ErrorKind::FlagRepeatedNegation { - original: span(1..2), - }, - } - ); - assert_eq!( - parser("-)").parse_flags().unwrap_err(), - TestError { - span: span(0..1), - kind: ast::ErrorKind::FlagDanglingNegation, - } - ); - assert_eq!( - parser("i-)").parse_flags().unwrap_err(), - TestError { - span: span(1..2), - kind: ast::ErrorKind::FlagDanglingNegation, - } - ); - assert_eq!( - parser("iU-)").parse_flags().unwrap_err(), - TestError { - span: span(2..3), - kind: ast::ErrorKind::FlagDanglingNegation, - } - ); - } - - #[test] - fn parse_flag() { - assert_eq!(parser("i").parse_flag(), Ok(ast::Flag::CaseInsensitive)); - assert_eq!(parser("m").parse_flag(), Ok(ast::Flag::MultiLine)); - assert_eq!(parser("s").parse_flag(), Ok(ast::Flag::DotMatchesNewLine)); - assert_eq!(parser("U").parse_flag(), Ok(ast::Flag::SwapGreed)); - assert_eq!(parser("u").parse_flag(), Ok(ast::Flag::Unicode)); - assert_eq!(parser("R").parse_flag(), Ok(ast::Flag::CRLF)); - assert_eq!(parser("x").parse_flag(), Ok(ast::Flag::IgnoreWhitespace)); - - assert_eq!( - parser("a").parse_flag().unwrap_err(), - TestError { - span: span(0..1), - kind: ast::ErrorKind::FlagUnrecognized, - } - ); - assert_eq!( - parser("☃").parse_flag().unwrap_err(), - TestError { - span: span_range("☃", 0..3), - kind: ast::ErrorKind::FlagUnrecognized, - } - ); - } - - #[test] - fn parse_primitive_non_escape() { - assert_eq!( - parser(r".").parse_primitive(), - Ok(Primitive::Dot(span(0..1))) - ); - assert_eq!( - parser(r"^").parse_primitive(), - Ok(Primitive::Assertion(ast::Assertion { - span: span(0..1), - kind: ast::AssertionKind::StartLine, - })) - ); - assert_eq!( - parser(r"$").parse_primitive(), - Ok(Primitive::Assertion(ast::Assertion { - span: span(0..1), - kind: ast::AssertionKind::EndLine, - })) - ); - - assert_eq!( - parser(r"a").parse_primitive(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..1), - kind: ast::LiteralKind::Verbatim, - c: 'a', - })) - ); - assert_eq!( - parser(r"|").parse_primitive(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..1), - kind: ast::LiteralKind::Verbatim, - c: '|', - })) - ); - assert_eq!( - parser(r"☃").parse_primitive(), - Ok(Primitive::Literal(ast::Literal { - span: span_range("☃", 0..3), - kind: ast::LiteralKind::Verbatim, - c: '☃', - })) - ); - } - - #[test] - fn parse_escape() { - assert_eq!( - parser(r"\|").parse_primitive(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..2), - kind: ast::LiteralKind::Meta, - c: '|', - })) - ); - let specials = &[ - (r"\a", '\x07', ast::SpecialLiteralKind::Bell), - (r"\f", '\x0C', ast::SpecialLiteralKind::FormFeed), - (r"\t", '\t', ast::SpecialLiteralKind::Tab), - (r"\n", '\n', ast::SpecialLiteralKind::LineFeed), - (r"\r", '\r', ast::SpecialLiteralKind::CarriageReturn), - (r"\v", '\x0B', ast::SpecialLiteralKind::VerticalTab), - ]; - for &(pat, c, ref kind) in specials { - assert_eq!( - parser(pat).parse_primitive(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..2), - kind: ast::LiteralKind::Special(kind.clone()), - c, - })) - ); - } - assert_eq!( - parser(r"\A").parse_primitive(), - Ok(Primitive::Assertion(ast::Assertion { - span: span(0..2), - kind: ast::AssertionKind::StartText, - })) - ); - assert_eq!( - parser(r"\z").parse_primitive(), - Ok(Primitive::Assertion(ast::Assertion { - span: span(0..2), - kind: ast::AssertionKind::EndText, - })) - ); - assert_eq!( - parser(r"\b").parse_primitive(), - Ok(Primitive::Assertion(ast::Assertion { - span: span(0..2), - kind: ast::AssertionKind::WordBoundary, - })) - ); - assert_eq!( - parser(r"\b{start}").parse_primitive(), - Ok(Primitive::Assertion(ast::Assertion { - span: span(0..9), - kind: ast::AssertionKind::WordBoundaryStart, - })) - ); - assert_eq!( - parser(r"\b{end}").parse_primitive(), - Ok(Primitive::Assertion(ast::Assertion { - span: span(0..7), - kind: ast::AssertionKind::WordBoundaryEnd, - })) - ); - assert_eq!( - parser(r"\b{start-half}").parse_primitive(), - Ok(Primitive::Assertion(ast::Assertion { - span: span(0..14), - kind: ast::AssertionKind::WordBoundaryStartHalf, - })) - ); - assert_eq!( - parser(r"\b{end-half}").parse_primitive(), - Ok(Primitive::Assertion(ast::Assertion { - span: span(0..12), - kind: ast::AssertionKind::WordBoundaryEndHalf, - })) - ); - assert_eq!( - parser(r"\<").parse_primitive(), - Ok(Primitive::Assertion(ast::Assertion { - span: span(0..2), - kind: ast::AssertionKind::WordBoundaryStartAngle, - })) - ); - assert_eq!( - parser(r"\>").parse_primitive(), - Ok(Primitive::Assertion(ast::Assertion { - span: span(0..2), - kind: ast::AssertionKind::WordBoundaryEndAngle, - })) - ); - assert_eq!( - parser(r"\B").parse_primitive(), - Ok(Primitive::Assertion(ast::Assertion { - span: span(0..2), - kind: ast::AssertionKind::NotWordBoundary, - })) - ); - - // We also support superfluous escapes in most cases now too. - for c in ['!', '@', '%', '"', '\'', '/', ' '] { - let pat = format!(r"\{c}"); - assert_eq!( - parser(&pat).parse_primitive(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..2), - kind: ast::LiteralKind::Superfluous, - c, - })) - ); - } - - // Some superfluous escapes, namely [0-9A-Za-z], are still banned. This - // gives flexibility for future evolution. - assert_eq!( - parser(r"\e").parse_escape().unwrap_err(), - TestError { - span: span(0..2), - kind: ast::ErrorKind::EscapeUnrecognized, - } - ); - assert_eq!( - parser(r"\y").parse_escape().unwrap_err(), - TestError { - span: span(0..2), - kind: ast::ErrorKind::EscapeUnrecognized, - } - ); - - // Starting a special word boundary without any non-whitespace chars - // after the brace makes it ambiguous whether the user meant to write - // a counted repetition (probably not?) or an actual special word - // boundary assertion. - assert_eq!( - parser(r"\b{").parse_escape().unwrap_err(), - TestError { - span: span(0..3), - kind: ast::ErrorKind::SpecialWordOrRepetitionUnexpectedEof, - } - ); - assert_eq!( - parser_ignore_whitespace(r"\b{ ").parse_escape().unwrap_err(), - TestError { - span: span(0..4), - kind: ast::ErrorKind::SpecialWordOrRepetitionUnexpectedEof, - } - ); - // When 'x' is not enabled, the space is seen as a non-[-A-Za-z] char, - // and thus causes the parser to treat it as a counted repetition. - assert_eq!( - parser(r"\b{ ").parse().unwrap_err(), - TestError { - span: span(2..4), - kind: ast::ErrorKind::RepetitionCountUnclosed, - } - ); - // In this case, we got some valid chars that makes it look like the - // user is writing one of the special word boundary assertions, but - // we forget to close the brace. - assert_eq!( - parser(r"\b{foo").parse_escape().unwrap_err(), - TestError { - span: span(2..6), - kind: ast::ErrorKind::SpecialWordBoundaryUnclosed, - } - ); - // We get the same error as above, except it is provoked by seeing a - // char that we know is invalid before seeing a closing brace. - assert_eq!( - parser(r"\b{foo!}").parse_escape().unwrap_err(), - TestError { - span: span(2..6), - kind: ast::ErrorKind::SpecialWordBoundaryUnclosed, - } - ); - // And this one occurs when, syntactically, everything looks okay, but - // we don't use a valid spelling of a word boundary assertion. - assert_eq!( - parser(r"\b{foo}").parse_escape().unwrap_err(), - TestError { - span: span(3..6), - kind: ast::ErrorKind::SpecialWordBoundaryUnrecognized, - } - ); - - // An unfinished escape is illegal. - assert_eq!( - parser(r"\").parse_escape().unwrap_err(), - TestError { - span: span(0..1), - kind: ast::ErrorKind::EscapeUnexpectedEof, - } - ); - } - - #[test] - fn parse_unsupported_backreference() { - assert_eq!( - parser(r"\0").parse_escape().unwrap_err(), - TestError { - span: span(0..2), - kind: ast::ErrorKind::UnsupportedBackreference, - } - ); - assert_eq!( - parser(r"\9").parse_escape().unwrap_err(), - TestError { - span: span(0..2), - kind: ast::ErrorKind::UnsupportedBackreference, - } - ); - } - - #[test] - fn parse_octal() { - for i in 0..511 { - let pat = format!(r"\{i:o}"); - assert_eq!( - parser_octal(&pat).parse_escape(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..pat.len()), - kind: ast::LiteralKind::Octal, - c: char::from_u32(i).unwrap(), - })) - ); - } - assert_eq!( - parser_octal(r"\778").parse_escape(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..3), - kind: ast::LiteralKind::Octal, - c: '?', - })) - ); - assert_eq!( - parser_octal(r"\7777").parse_escape(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..4), - kind: ast::LiteralKind::Octal, - c: '\u{01FF}', - })) - ); - assert_eq!( - parser_octal(r"\778").parse(), - Ok(Ast::concat(ast::Concat { - span: span(0..4), - asts: vec![ - Ast::literal(ast::Literal { - span: span(0..3), - kind: ast::LiteralKind::Octal, - c: '?', - }), - Ast::literal(ast::Literal { - span: span(3..4), - kind: ast::LiteralKind::Verbatim, - c: '8', - }), - ], - })) - ); - assert_eq!( - parser_octal(r"\7777").parse(), - Ok(Ast::concat(ast::Concat { - span: span(0..5), - asts: vec![ - Ast::literal(ast::Literal { - span: span(0..4), - kind: ast::LiteralKind::Octal, - c: '\u{01FF}', - }), - Ast::literal(ast::Literal { - span: span(4..5), - kind: ast::LiteralKind::Verbatim, - c: '7', - }), - ], - })) - ); - - assert_eq!( - parser_octal(r"\8").parse_escape().unwrap_err(), - TestError { - span: span(0..2), - kind: ast::ErrorKind::EscapeUnrecognized, - } - ); - } - - #[test] - fn parse_hex_two() { - for i in 0..256 { - let pat = format!(r"\x{i:02x}"); - assert_eq!( - parser(&pat).parse_escape(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..pat.len()), - kind: ast::LiteralKind::HexFixed(ast::HexLiteralKind::X), - c: char::from_u32(i).unwrap(), - })) - ); - } - - assert_eq!( - parser(r"\xF").parse_escape().unwrap_err(), - TestError { - span: span(3..3), - kind: ast::ErrorKind::EscapeUnexpectedEof, - } - ); - assert_eq!( - parser(r"\xG").parse_escape().unwrap_err(), - TestError { - span: span(2..3), - kind: ast::ErrorKind::EscapeHexInvalidDigit, - } - ); - assert_eq!( - parser(r"\xFG").parse_escape().unwrap_err(), - TestError { - span: span(3..4), - kind: ast::ErrorKind::EscapeHexInvalidDigit, - } - ); - } - - #[test] - fn parse_hex_four() { - for i in 0..65536 { - let c = match char::from_u32(i) { - None => continue, - Some(c) => c, - }; - let pat = format!(r"\u{i:04x}"); - assert_eq!( - parser(&pat).parse_escape(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..pat.len()), - kind: ast::LiteralKind::HexFixed( - ast::HexLiteralKind::UnicodeShort - ), - c, - })) - ); - } - - assert_eq!( - parser(r"\uF").parse_escape().unwrap_err(), - TestError { - span: span(3..3), - kind: ast::ErrorKind::EscapeUnexpectedEof, - } - ); - assert_eq!( - parser(r"\uG").parse_escape().unwrap_err(), - TestError { - span: span(2..3), - kind: ast::ErrorKind::EscapeHexInvalidDigit, - } - ); - assert_eq!( - parser(r"\uFG").parse_escape().unwrap_err(), - TestError { - span: span(3..4), - kind: ast::ErrorKind::EscapeHexInvalidDigit, - } - ); - assert_eq!( - parser(r"\uFFG").parse_escape().unwrap_err(), - TestError { - span: span(4..5), - kind: ast::ErrorKind::EscapeHexInvalidDigit, - } - ); - assert_eq!( - parser(r"\uFFFG").parse_escape().unwrap_err(), - TestError { - span: span(5..6), - kind: ast::ErrorKind::EscapeHexInvalidDigit, - } - ); - assert_eq!( - parser(r"\uD800").parse_escape().unwrap_err(), - TestError { - span: span(2..6), - kind: ast::ErrorKind::EscapeHexInvalid, - } - ); - } - - #[test] - fn parse_hex_eight() { - for i in 0..65536 { - let c = match char::from_u32(i) { - None => continue, - Some(c) => c, - }; - let pat = format!(r"\U{i:08x}"); - assert_eq!( - parser(&pat).parse_escape(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..pat.len()), - kind: ast::LiteralKind::HexFixed( - ast::HexLiteralKind::UnicodeLong - ), - c, - })) - ); - } - - assert_eq!( - parser(r"\UF").parse_escape().unwrap_err(), - TestError { - span: span(3..3), - kind: ast::ErrorKind::EscapeUnexpectedEof, - } - ); - assert_eq!( - parser(r"\UG").parse_escape().unwrap_err(), - TestError { - span: span(2..3), - kind: ast::ErrorKind::EscapeHexInvalidDigit, - } - ); - assert_eq!( - parser(r"\UFG").parse_escape().unwrap_err(), - TestError { - span: span(3..4), - kind: ast::ErrorKind::EscapeHexInvalidDigit, - } - ); - assert_eq!( - parser(r"\UFFG").parse_escape().unwrap_err(), - TestError { - span: span(4..5), - kind: ast::ErrorKind::EscapeHexInvalidDigit, - } - ); - assert_eq!( - parser(r"\UFFFG").parse_escape().unwrap_err(), - TestError { - span: span(5..6), - kind: ast::ErrorKind::EscapeHexInvalidDigit, - } - ); - assert_eq!( - parser(r"\UFFFFG").parse_escape().unwrap_err(), - TestError { - span: span(6..7), - kind: ast::ErrorKind::EscapeHexInvalidDigit, - } - ); - assert_eq!( - parser(r"\UFFFFFG").parse_escape().unwrap_err(), - TestError { - span: span(7..8), - kind: ast::ErrorKind::EscapeHexInvalidDigit, - } - ); - assert_eq!( - parser(r"\UFFFFFFG").parse_escape().unwrap_err(), - TestError { - span: span(8..9), - kind: ast::ErrorKind::EscapeHexInvalidDigit, - } - ); - assert_eq!( - parser(r"\UFFFFFFFG").parse_escape().unwrap_err(), - TestError { - span: span(9..10), - kind: ast::ErrorKind::EscapeHexInvalidDigit, - } - ); - } - - #[test] - fn parse_hex_brace() { - assert_eq!( - parser(r"\u{26c4}").parse_escape(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..8), - kind: ast::LiteralKind::HexBrace( - ast::HexLiteralKind::UnicodeShort - ), - c: '⛄', - })) - ); - assert_eq!( - parser(r"\U{26c4}").parse_escape(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..8), - kind: ast::LiteralKind::HexBrace( - ast::HexLiteralKind::UnicodeLong - ), - c: '⛄', - })) - ); - assert_eq!( - parser(r"\x{26c4}").parse_escape(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..8), - kind: ast::LiteralKind::HexBrace(ast::HexLiteralKind::X), - c: '⛄', - })) - ); - assert_eq!( - parser(r"\x{26C4}").parse_escape(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..8), - kind: ast::LiteralKind::HexBrace(ast::HexLiteralKind::X), - c: '⛄', - })) - ); - assert_eq!( - parser(r"\x{10fFfF}").parse_escape(), - Ok(Primitive::Literal(ast::Literal { - span: span(0..10), - kind: ast::LiteralKind::HexBrace(ast::HexLiteralKind::X), - c: '\u{10FFFF}', - })) - ); - - assert_eq!( - parser(r"\x").parse_escape().unwrap_err(), - TestError { - span: span(2..2), - kind: ast::ErrorKind::EscapeUnexpectedEof, - } - ); - assert_eq!( - parser(r"\x{").parse_escape().unwrap_err(), - TestError { - span: span(2..3), - kind: ast::ErrorKind::EscapeUnexpectedEof, - } - ); - assert_eq!( - parser(r"\x{FF").parse_escape().unwrap_err(), - TestError { - span: span(2..5), - kind: ast::ErrorKind::EscapeUnexpectedEof, - } - ); - assert_eq!( - parser(r"\x{}").parse_escape().unwrap_err(), - TestError { - span: span(2..4), - kind: ast::ErrorKind::EscapeHexEmpty, - } - ); - assert_eq!( - parser(r"\x{FGF}").parse_escape().unwrap_err(), - TestError { - span: span(4..5), - kind: ast::ErrorKind::EscapeHexInvalidDigit, - } - ); - assert_eq!( - parser(r"\x{FFFFFF}").parse_escape().unwrap_err(), - TestError { - span: span(3..9), - kind: ast::ErrorKind::EscapeHexInvalid, - } - ); - assert_eq!( - parser(r"\x{D800}").parse_escape().unwrap_err(), - TestError { - span: span(3..7), - kind: ast::ErrorKind::EscapeHexInvalid, - } - ); - assert_eq!( - parser(r"\x{FFFFFFFFF}").parse_escape().unwrap_err(), - TestError { - span: span(3..12), - kind: ast::ErrorKind::EscapeHexInvalid, - } - ); - } - - #[test] - fn parse_decimal() { - assert_eq!(parser("123").parse_decimal(), Ok(123)); - assert_eq!(parser("0").parse_decimal(), Ok(0)); - assert_eq!(parser("01").parse_decimal(), Ok(1)); - - assert_eq!( - parser("-1").parse_decimal().unwrap_err(), - TestError { span: span(0..0), kind: ast::ErrorKind::DecimalEmpty } - ); - assert_eq!( - parser("").parse_decimal().unwrap_err(), - TestError { span: span(0..0), kind: ast::ErrorKind::DecimalEmpty } - ); - assert_eq!( - parser("9999999999").parse_decimal().unwrap_err(), - TestError { - span: span(0..10), - kind: ast::ErrorKind::DecimalInvalid, - } - ); - } - - #[test] - fn parse_set_class() { - fn union(span: Span, items: Vec) -> ast::ClassSet { - ast::ClassSet::union(ast::ClassSetUnion { span, items }) - } - - fn intersection( - span: Span, - lhs: ast::ClassSet, - rhs: ast::ClassSet, - ) -> ast::ClassSet { - ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp { - span, - kind: ast::ClassSetBinaryOpKind::Intersection, - lhs: Box::new(lhs), - rhs: Box::new(rhs), - }) - } - - fn difference( - span: Span, - lhs: ast::ClassSet, - rhs: ast::ClassSet, - ) -> ast::ClassSet { - ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp { - span, - kind: ast::ClassSetBinaryOpKind::Difference, - lhs: Box::new(lhs), - rhs: Box::new(rhs), - }) - } - - fn symdifference( - span: Span, - lhs: ast::ClassSet, - rhs: ast::ClassSet, - ) -> ast::ClassSet { - ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp { - span, - kind: ast::ClassSetBinaryOpKind::SymmetricDifference, - lhs: Box::new(lhs), - rhs: Box::new(rhs), - }) - } - - fn itemset(item: ast::ClassSetItem) -> ast::ClassSet { - ast::ClassSet::Item(item) - } - - fn item_ascii(cls: ast::ClassAscii) -> ast::ClassSetItem { - ast::ClassSetItem::Ascii(cls) - } - - fn item_unicode(cls: ast::ClassUnicode) -> ast::ClassSetItem { - ast::ClassSetItem::Unicode(cls) - } - - fn item_perl(cls: ast::ClassPerl) -> ast::ClassSetItem { - ast::ClassSetItem::Perl(cls) - } - - fn item_bracket(cls: ast::ClassBracketed) -> ast::ClassSetItem { - ast::ClassSetItem::Bracketed(Box::new(cls)) - } - - fn lit(span: Span, c: char) -> ast::ClassSetItem { - ast::ClassSetItem::Literal(ast::Literal { - span, - kind: ast::LiteralKind::Verbatim, - c, - }) - } - - fn empty(span: Span) -> ast::ClassSetItem { - ast::ClassSetItem::Empty(span) - } - - fn range(span: Span, start: char, end: char) -> ast::ClassSetItem { - let pos1 = Position { - offset: span.start.offset + start.len_utf8(), - column: span.start.column + 1, - ..span.start - }; - let pos2 = Position { - offset: span.end.offset - end.len_utf8(), - column: span.end.column - 1, - ..span.end - }; - ast::ClassSetItem::Range(ast::ClassSetRange { - span, - start: ast::Literal { - span: Span { end: pos1, ..span }, - kind: ast::LiteralKind::Verbatim, - c: start, - }, - end: ast::Literal { - span: Span { start: pos2, ..span }, - kind: ast::LiteralKind::Verbatim, - c: end, - }, - }) - } - - fn alnum(span: Span, negated: bool) -> ast::ClassAscii { - ast::ClassAscii { span, kind: ast::ClassAsciiKind::Alnum, negated } - } - - fn lower(span: Span, negated: bool) -> ast::ClassAscii { - ast::ClassAscii { span, kind: ast::ClassAsciiKind::Lower, negated } - } - - assert_eq!( - parser("[[:alnum:]]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..11), - negated: false, - kind: itemset(item_ascii(alnum(span(1..10), false))), - })) - ); - assert_eq!( - parser("[[[:alnum:]]]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..13), - negated: false, - kind: itemset(item_bracket(ast::ClassBracketed { - span: span(1..12), - negated: false, - kind: itemset(item_ascii(alnum(span(2..11), false))), - })), - })) - ); - assert_eq!( - parser("[[:alnum:]&&[:lower:]]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..22), - negated: false, - kind: intersection( - span(1..21), - itemset(item_ascii(alnum(span(1..10), false))), - itemset(item_ascii(lower(span(12..21), false))), - ), - })) - ); - assert_eq!( - parser("[[:alnum:]--[:lower:]]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..22), - negated: false, - kind: difference( - span(1..21), - itemset(item_ascii(alnum(span(1..10), false))), - itemset(item_ascii(lower(span(12..21), false))), - ), - })) - ); - assert_eq!( - parser("[[:alnum:]~~[:lower:]]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..22), - negated: false, - kind: symdifference( - span(1..21), - itemset(item_ascii(alnum(span(1..10), false))), - itemset(item_ascii(lower(span(12..21), false))), - ), - })) - ); - - assert_eq!( - parser("[a]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..3), - negated: false, - kind: itemset(lit(span(1..2), 'a')), - })) - ); - assert_eq!( - parser(r"[a\]]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..5), - negated: false, - kind: union( - span(1..4), - vec![ - lit(span(1..2), 'a'), - ast::ClassSetItem::Literal(ast::Literal { - span: span(2..4), - kind: ast::LiteralKind::Meta, - c: ']', - }), - ] - ), - })) - ); - assert_eq!( - parser(r"[a\-z]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..6), - negated: false, - kind: union( - span(1..5), - vec![ - lit(span(1..2), 'a'), - ast::ClassSetItem::Literal(ast::Literal { - span: span(2..4), - kind: ast::LiteralKind::Meta, - c: '-', - }), - lit(span(4..5), 'z'), - ] - ), - })) - ); - assert_eq!( - parser("[ab]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..4), - negated: false, - kind: union( - span(1..3), - vec![lit(span(1..2), 'a'), lit(span(2..3), 'b'),] - ), - })) - ); - assert_eq!( - parser("[a-]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..4), - negated: false, - kind: union( - span(1..3), - vec![lit(span(1..2), 'a'), lit(span(2..3), '-'),] - ), - })) - ); - assert_eq!( - parser("[-a]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..4), - negated: false, - kind: union( - span(1..3), - vec![lit(span(1..2), '-'), lit(span(2..3), 'a'),] - ), - })) - ); - assert_eq!( - parser(r"[\pL]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..5), - negated: false, - kind: itemset(item_unicode(ast::ClassUnicode { - span: span(1..4), - negated: false, - kind: ast::ClassUnicodeKind::OneLetter('L'), - })), - })) - ); - assert_eq!( - parser(r"[\w]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..4), - negated: false, - kind: itemset(item_perl(ast::ClassPerl { - span: span(1..3), - kind: ast::ClassPerlKind::Word, - negated: false, - })), - })) - ); - assert_eq!( - parser(r"[a\wz]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..6), - negated: false, - kind: union( - span(1..5), - vec![ - lit(span(1..2), 'a'), - item_perl(ast::ClassPerl { - span: span(2..4), - kind: ast::ClassPerlKind::Word, - negated: false, - }), - lit(span(4..5), 'z'), - ] - ), - })) - ); - - assert_eq!( - parser("[a-z]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..5), - negated: false, - kind: itemset(range(span(1..4), 'a', 'z')), - })) - ); - assert_eq!( - parser("[a-cx-z]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..8), - negated: false, - kind: union( - span(1..7), - vec![ - range(span(1..4), 'a', 'c'), - range(span(4..7), 'x', 'z'), - ] - ), - })) - ); - assert_eq!( - parser(r"[\w&&a-cx-z]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..12), - negated: false, - kind: intersection( - span(1..11), - itemset(item_perl(ast::ClassPerl { - span: span(1..3), - kind: ast::ClassPerlKind::Word, - negated: false, - })), - union( - span(5..11), - vec![ - range(span(5..8), 'a', 'c'), - range(span(8..11), 'x', 'z'), - ] - ), - ), - })) - ); - assert_eq!( - parser(r"[a-cx-z&&\w]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..12), - negated: false, - kind: intersection( - span(1..11), - union( - span(1..7), - vec![ - range(span(1..4), 'a', 'c'), - range(span(4..7), 'x', 'z'), - ] - ), - itemset(item_perl(ast::ClassPerl { - span: span(9..11), - kind: ast::ClassPerlKind::Word, - negated: false, - })), - ), - })) - ); - assert_eq!( - parser(r"[a--b--c]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..9), - negated: false, - kind: difference( - span(1..8), - difference( - span(1..5), - itemset(lit(span(1..2), 'a')), - itemset(lit(span(4..5), 'b')), - ), - itemset(lit(span(7..8), 'c')), - ), - })) - ); - assert_eq!( - parser(r"[a~~b~~c]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..9), - negated: false, - kind: symdifference( - span(1..8), - symdifference( - span(1..5), - itemset(lit(span(1..2), 'a')), - itemset(lit(span(4..5), 'b')), - ), - itemset(lit(span(7..8), 'c')), - ), - })) - ); - assert_eq!( - parser(r"[\^&&^]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..7), - negated: false, - kind: intersection( - span(1..6), - itemset(ast::ClassSetItem::Literal(ast::Literal { - span: span(1..3), - kind: ast::LiteralKind::Meta, - c: '^', - })), - itemset(lit(span(5..6), '^')), - ), - })) - ); - assert_eq!( - parser(r"[\&&&&]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..7), - negated: false, - kind: intersection( - span(1..6), - itemset(ast::ClassSetItem::Literal(ast::Literal { - span: span(1..3), - kind: ast::LiteralKind::Meta, - c: '&', - })), - itemset(lit(span(5..6), '&')), - ), - })) - ); - assert_eq!( - parser(r"[&&&&]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..6), - negated: false, - kind: intersection( - span(1..5), - intersection( - span(1..3), - itemset(empty(span(1..1))), - itemset(empty(span(3..3))), - ), - itemset(empty(span(5..5))), - ), - })) - ); - - let pat = "[☃-⛄]"; - assert_eq!( - parser(pat).parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span_range(pat, 0..9), - negated: false, - kind: itemset(ast::ClassSetItem::Range(ast::ClassSetRange { - span: span_range(pat, 1..8), - start: ast::Literal { - span: span_range(pat, 1..4), - kind: ast::LiteralKind::Verbatim, - c: '☃', - }, - end: ast::Literal { - span: span_range(pat, 5..8), - kind: ast::LiteralKind::Verbatim, - c: '⛄', - }, - })), - })) - ); - - assert_eq!( - parser(r"[]]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..3), - negated: false, - kind: itemset(lit(span(1..2), ']')), - })) - ); - assert_eq!( - parser(r"[]\[]").parse(), - Ok(Ast::class_bracketed(ast::ClassBracketed { - span: span(0..5), - negated: false, - kind: union( - span(1..4), - vec![ - lit(span(1..2), ']'), - ast::ClassSetItem::Literal(ast::Literal { - span: span(2..4), - kind: ast::LiteralKind::Meta, - c: '[', - }), - ] - ), - })) - ); - assert_eq!( - parser(r"[\[]]").parse(), - Ok(concat( - 0..5, - vec![ - Ast::class_bracketed(ast::ClassBracketed { - span: span(0..4), - negated: false, - kind: itemset(ast::ClassSetItem::Literal( - ast::Literal { - span: span(1..3), - kind: ast::LiteralKind::Meta, - c: '[', - } - )), - }), - Ast::literal(ast::Literal { - span: span(4..5), - kind: ast::LiteralKind::Verbatim, - c: ']', - }), - ] - )) - ); - - assert_eq!( - parser("[").parse().unwrap_err(), - TestError { - span: span(0..1), - kind: ast::ErrorKind::ClassUnclosed, - } - ); - assert_eq!( - parser("[[").parse().unwrap_err(), - TestError { - span: span(1..2), - kind: ast::ErrorKind::ClassUnclosed, - } - ); - assert_eq!( - parser("[[-]").parse().unwrap_err(), - TestError { - span: span(0..1), - kind: ast::ErrorKind::ClassUnclosed, - } - ); - assert_eq!( - parser("[[[:alnum:]").parse().unwrap_err(), - TestError { - span: span(1..2), - kind: ast::ErrorKind::ClassUnclosed, - } - ); - assert_eq!( - parser(r"[\b]").parse().unwrap_err(), - TestError { - span: span(1..3), - kind: ast::ErrorKind::ClassEscapeInvalid, - } - ); - assert_eq!( - parser(r"[\w-a]").parse().unwrap_err(), - TestError { - span: span(1..3), - kind: ast::ErrorKind::ClassRangeLiteral, - } - ); - assert_eq!( - parser(r"[a-\w]").parse().unwrap_err(), - TestError { - span: span(3..5), - kind: ast::ErrorKind::ClassRangeLiteral, - } - ); - assert_eq!( - parser(r"[z-a]").parse().unwrap_err(), - TestError { - span: span(1..4), - kind: ast::ErrorKind::ClassRangeInvalid, - } - ); - - assert_eq!( - parser_ignore_whitespace("[a ").parse().unwrap_err(), - TestError { - span: span(0..1), - kind: ast::ErrorKind::ClassUnclosed, - } - ); - assert_eq!( - parser_ignore_whitespace("[a- ").parse().unwrap_err(), - TestError { - span: span(0..1), - kind: ast::ErrorKind::ClassUnclosed, - } - ); - } - - #[test] - fn parse_set_class_open() { - assert_eq!(parser("[a]").parse_set_class_open(), { - let set = ast::ClassBracketed { - span: span(0..1), - negated: false, - kind: ast::ClassSet::union(ast::ClassSetUnion { - span: span(1..1), - items: vec![], - }), - }; - let union = ast::ClassSetUnion { span: span(1..1), items: vec![] }; - Ok((set, union)) - }); - assert_eq!( - parser_ignore_whitespace("[ a]").parse_set_class_open(), - { - let set = ast::ClassBracketed { - span: span(0..4), - negated: false, - kind: ast::ClassSet::union(ast::ClassSetUnion { - span: span(4..4), - items: vec![], - }), - }; - let union = - ast::ClassSetUnion { span: span(4..4), items: vec![] }; - Ok((set, union)) - } - ); - assert_eq!(parser("[^a]").parse_set_class_open(), { - let set = ast::ClassBracketed { - span: span(0..2), - negated: true, - kind: ast::ClassSet::union(ast::ClassSetUnion { - span: span(2..2), - items: vec![], - }), - }; - let union = ast::ClassSetUnion { span: span(2..2), items: vec![] }; - Ok((set, union)) - }); - assert_eq!( - parser_ignore_whitespace("[ ^ a]").parse_set_class_open(), - { - let set = ast::ClassBracketed { - span: span(0..4), - negated: true, - kind: ast::ClassSet::union(ast::ClassSetUnion { - span: span(4..4), - items: vec![], - }), - }; - let union = - ast::ClassSetUnion { span: span(4..4), items: vec![] }; - Ok((set, union)) - } - ); - assert_eq!(parser("[-a]").parse_set_class_open(), { - let set = ast::ClassBracketed { - span: span(0..2), - negated: false, - kind: ast::ClassSet::union(ast::ClassSetUnion { - span: span(1..1), - items: vec![], - }), - }; - let union = ast::ClassSetUnion { - span: span(1..2), - items: vec![ast::ClassSetItem::Literal(ast::Literal { - span: span(1..2), - kind: ast::LiteralKind::Verbatim, - c: '-', - })], - }; - Ok((set, union)) - }); - assert_eq!( - parser_ignore_whitespace("[ - a]").parse_set_class_open(), - { - let set = ast::ClassBracketed { - span: span(0..4), - negated: false, - kind: ast::ClassSet::union(ast::ClassSetUnion { - span: span(2..2), - items: vec![], - }), - }; - let union = ast::ClassSetUnion { - span: span(2..3), - items: vec![ast::ClassSetItem::Literal(ast::Literal { - span: span(2..3), - kind: ast::LiteralKind::Verbatim, - c: '-', - })], - }; - Ok((set, union)) - } - ); - assert_eq!(parser("[^-a]").parse_set_class_open(), { - let set = ast::ClassBracketed { - span: span(0..3), - negated: true, - kind: ast::ClassSet::union(ast::ClassSetUnion { - span: span(2..2), - items: vec![], - }), - }; - let union = ast::ClassSetUnion { - span: span(2..3), - items: vec![ast::ClassSetItem::Literal(ast::Literal { - span: span(2..3), - kind: ast::LiteralKind::Verbatim, - c: '-', - })], - }; - Ok((set, union)) - }); - assert_eq!(parser("[--a]").parse_set_class_open(), { - let set = ast::ClassBracketed { - span: span(0..3), - negated: false, - kind: ast::ClassSet::union(ast::ClassSetUnion { - span: span(1..1), - items: vec![], - }), - }; - let union = ast::ClassSetUnion { - span: span(1..3), - items: vec![ - ast::ClassSetItem::Literal(ast::Literal { - span: span(1..2), - kind: ast::LiteralKind::Verbatim, - c: '-', - }), - ast::ClassSetItem::Literal(ast::Literal { - span: span(2..3), - kind: ast::LiteralKind::Verbatim, - c: '-', - }), - ], - }; - Ok((set, union)) - }); - assert_eq!(parser("[]a]").parse_set_class_open(), { - let set = ast::ClassBracketed { - span: span(0..2), - negated: false, - kind: ast::ClassSet::union(ast::ClassSetUnion { - span: span(1..1), - items: vec![], - }), - }; - let union = ast::ClassSetUnion { - span: span(1..2), - items: vec![ast::ClassSetItem::Literal(ast::Literal { - span: span(1..2), - kind: ast::LiteralKind::Verbatim, - c: ']', - })], - }; - Ok((set, union)) - }); - assert_eq!( - parser_ignore_whitespace("[ ] a]").parse_set_class_open(), - { - let set = ast::ClassBracketed { - span: span(0..4), - negated: false, - kind: ast::ClassSet::union(ast::ClassSetUnion { - span: span(2..2), - items: vec![], - }), - }; - let union = ast::ClassSetUnion { - span: span(2..3), - items: vec![ast::ClassSetItem::Literal(ast::Literal { - span: span(2..3), - kind: ast::LiteralKind::Verbatim, - c: ']', - })], - }; - Ok((set, union)) - } - ); - assert_eq!(parser("[^]a]").parse_set_class_open(), { - let set = ast::ClassBracketed { - span: span(0..3), - negated: true, - kind: ast::ClassSet::union(ast::ClassSetUnion { - span: span(2..2), - items: vec![], - }), - }; - let union = ast::ClassSetUnion { - span: span(2..3), - items: vec![ast::ClassSetItem::Literal(ast::Literal { - span: span(2..3), - kind: ast::LiteralKind::Verbatim, - c: ']', - })], - }; - Ok((set, union)) - }); - assert_eq!(parser("[-]a]").parse_set_class_open(), { - let set = ast::ClassBracketed { - span: span(0..2), - negated: false, - kind: ast::ClassSet::union(ast::ClassSetUnion { - span: span(1..1), - items: vec![], - }), - }; - let union = ast::ClassSetUnion { - span: span(1..2), - items: vec![ast::ClassSetItem::Literal(ast::Literal { - span: span(1..2), - kind: ast::LiteralKind::Verbatim, - c: '-', - })], - }; - Ok((set, union)) - }); - - assert_eq!( - parser("[").parse_set_class_open().unwrap_err(), - TestError { - span: span(0..1), - kind: ast::ErrorKind::ClassUnclosed, - } - ); - assert_eq!( - parser_ignore_whitespace("[ ") - .parse_set_class_open() - .unwrap_err(), - TestError { - span: span(0..5), - kind: ast::ErrorKind::ClassUnclosed, - } - ); - assert_eq!( - parser("[^").parse_set_class_open().unwrap_err(), - TestError { - span: span(0..2), - kind: ast::ErrorKind::ClassUnclosed, - } - ); - assert_eq!( - parser("[]").parse_set_class_open().unwrap_err(), - TestError { - span: span(0..2), - kind: ast::ErrorKind::ClassUnclosed, - } - ); - assert_eq!( - parser("[-").parse_set_class_open().unwrap_err(), - TestError { - span: span(0..0), - kind: ast::ErrorKind::ClassUnclosed, - } - ); - assert_eq!( - parser("[--").parse_set_class_open().unwrap_err(), - TestError { - span: span(0..0), - kind: ast::ErrorKind::ClassUnclosed, - } - ); - - // See: https://github.com/rust-lang/regex/issues/792 - assert_eq!( - parser("(?x)[-#]").parse_with_comments().unwrap_err(), - TestError { - span: span(4..4), - kind: ast::ErrorKind::ClassUnclosed, - } - ); - } - - #[test] - fn maybe_parse_ascii_class() { - assert_eq!( - parser(r"[:alnum:]").maybe_parse_ascii_class(), - Some(ast::ClassAscii { - span: span(0..9), - kind: ast::ClassAsciiKind::Alnum, - negated: false, - }) - ); - assert_eq!( - parser(r"[:alnum:]A").maybe_parse_ascii_class(), - Some(ast::ClassAscii { - span: span(0..9), - kind: ast::ClassAsciiKind::Alnum, - negated: false, - }) - ); - assert_eq!( - parser(r"[:^alnum:]").maybe_parse_ascii_class(), - Some(ast::ClassAscii { - span: span(0..10), - kind: ast::ClassAsciiKind::Alnum, - negated: true, - }) - ); - - let p = parser(r"[:"); - assert_eq!(p.maybe_parse_ascii_class(), None); - assert_eq!(p.offset(), 0); - - let p = parser(r"[:^"); - assert_eq!(p.maybe_parse_ascii_class(), None); - assert_eq!(p.offset(), 0); - - let p = parser(r"[^:alnum:]"); - assert_eq!(p.maybe_parse_ascii_class(), None); - assert_eq!(p.offset(), 0); - - let p = parser(r"[:alnnum:]"); - assert_eq!(p.maybe_parse_ascii_class(), None); - assert_eq!(p.offset(), 0); - - let p = parser(r"[:alnum]"); - assert_eq!(p.maybe_parse_ascii_class(), None); - assert_eq!(p.offset(), 0); - - let p = parser(r"[:alnum:"); - assert_eq!(p.maybe_parse_ascii_class(), None); - assert_eq!(p.offset(), 0); - } - - #[test] - fn parse_unicode_class() { - assert_eq!( - parser(r"\pN").parse_escape(), - Ok(Primitive::Unicode(ast::ClassUnicode { - span: span(0..3), - negated: false, - kind: ast::ClassUnicodeKind::OneLetter('N'), - })) - ); - assert_eq!( - parser(r"\PN").parse_escape(), - Ok(Primitive::Unicode(ast::ClassUnicode { - span: span(0..3), - negated: true, - kind: ast::ClassUnicodeKind::OneLetter('N'), - })) - ); - assert_eq!( - parser(r"\p{N}").parse_escape(), - Ok(Primitive::Unicode(ast::ClassUnicode { - span: span(0..5), - negated: false, - kind: ast::ClassUnicodeKind::Named(s("N")), - })) - ); - assert_eq!( - parser(r"\P{N}").parse_escape(), - Ok(Primitive::Unicode(ast::ClassUnicode { - span: span(0..5), - negated: true, - kind: ast::ClassUnicodeKind::Named(s("N")), - })) - ); - assert_eq!( - parser(r"\p{Greek}").parse_escape(), - Ok(Primitive::Unicode(ast::ClassUnicode { - span: span(0..9), - negated: false, - kind: ast::ClassUnicodeKind::Named(s("Greek")), - })) - ); - - assert_eq!( - parser(r"\p{scx:Katakana}").parse_escape(), - Ok(Primitive::Unicode(ast::ClassUnicode { - span: span(0..16), - negated: false, - kind: ast::ClassUnicodeKind::NamedValue { - op: ast::ClassUnicodeOpKind::Colon, - name: s("scx"), - value: s("Katakana"), - }, - })) - ); - assert_eq!( - parser(r"\p{scx=Katakana}").parse_escape(), - Ok(Primitive::Unicode(ast::ClassUnicode { - span: span(0..16), - negated: false, - kind: ast::ClassUnicodeKind::NamedValue { - op: ast::ClassUnicodeOpKind::Equal, - name: s("scx"), - value: s("Katakana"), - }, - })) - ); - assert_eq!( - parser(r"\p{scx!=Katakana}").parse_escape(), - Ok(Primitive::Unicode(ast::ClassUnicode { - span: span(0..17), - negated: false, - kind: ast::ClassUnicodeKind::NamedValue { - op: ast::ClassUnicodeOpKind::NotEqual, - name: s("scx"), - value: s("Katakana"), - }, - })) - ); - - assert_eq!( - parser(r"\p{:}").parse_escape(), - Ok(Primitive::Unicode(ast::ClassUnicode { - span: span(0..5), - negated: false, - kind: ast::ClassUnicodeKind::NamedValue { - op: ast::ClassUnicodeOpKind::Colon, - name: s(""), - value: s(""), - }, - })) - ); - assert_eq!( - parser(r"\p{=}").parse_escape(), - Ok(Primitive::Unicode(ast::ClassUnicode { - span: span(0..5), - negated: false, - kind: ast::ClassUnicodeKind::NamedValue { - op: ast::ClassUnicodeOpKind::Equal, - name: s(""), - value: s(""), - }, - })) - ); - assert_eq!( - parser(r"\p{!=}").parse_escape(), - Ok(Primitive::Unicode(ast::ClassUnicode { - span: span(0..6), - negated: false, - kind: ast::ClassUnicodeKind::NamedValue { - op: ast::ClassUnicodeOpKind::NotEqual, - name: s(""), - value: s(""), - }, - })) - ); - - assert_eq!( - parser(r"\p").parse_escape().unwrap_err(), - TestError { - span: span(2..2), - kind: ast::ErrorKind::EscapeUnexpectedEof, - } - ); - assert_eq!( - parser(r"\p{").parse_escape().unwrap_err(), - TestError { - span: span(3..3), - kind: ast::ErrorKind::EscapeUnexpectedEof, - } - ); - assert_eq!( - parser(r"\p{N").parse_escape().unwrap_err(), - TestError { - span: span(4..4), - kind: ast::ErrorKind::EscapeUnexpectedEof, - } - ); - assert_eq!( - parser(r"\p{Greek").parse_escape().unwrap_err(), - TestError { - span: span(8..8), - kind: ast::ErrorKind::EscapeUnexpectedEof, - } - ); - - assert_eq!( - parser(r"\pNz").parse(), - Ok(Ast::concat(ast::Concat { - span: span(0..4), - asts: vec![ - Ast::class_unicode(ast::ClassUnicode { - span: span(0..3), - negated: false, - kind: ast::ClassUnicodeKind::OneLetter('N'), - }), - Ast::literal(ast::Literal { - span: span(3..4), - kind: ast::LiteralKind::Verbatim, - c: 'z', - }), - ], - })) - ); - assert_eq!( - parser(r"\p{Greek}z").parse(), - Ok(Ast::concat(ast::Concat { - span: span(0..10), - asts: vec![ - Ast::class_unicode(ast::ClassUnicode { - span: span(0..9), - negated: false, - kind: ast::ClassUnicodeKind::Named(s("Greek")), - }), - Ast::literal(ast::Literal { - span: span(9..10), - kind: ast::LiteralKind::Verbatim, - c: 'z', - }), - ], - })) - ); - assert_eq!( - parser(r"\p\{").parse().unwrap_err(), - TestError { - span: span(2..3), - kind: ast::ErrorKind::UnicodeClassInvalid, - } - ); - assert_eq!( - parser(r"\P\{").parse().unwrap_err(), - TestError { - span: span(2..3), - kind: ast::ErrorKind::UnicodeClassInvalid, - } - ); - } - - #[test] - fn parse_perl_class() { - assert_eq!( - parser(r"\d").parse_escape(), - Ok(Primitive::Perl(ast::ClassPerl { - span: span(0..2), - kind: ast::ClassPerlKind::Digit, - negated: false, - })) - ); - assert_eq!( - parser(r"\D").parse_escape(), - Ok(Primitive::Perl(ast::ClassPerl { - span: span(0..2), - kind: ast::ClassPerlKind::Digit, - negated: true, - })) - ); - assert_eq!( - parser(r"\s").parse_escape(), - Ok(Primitive::Perl(ast::ClassPerl { - span: span(0..2), - kind: ast::ClassPerlKind::Space, - negated: false, - })) - ); - assert_eq!( - parser(r"\S").parse_escape(), - Ok(Primitive::Perl(ast::ClassPerl { - span: span(0..2), - kind: ast::ClassPerlKind::Space, - negated: true, - })) - ); - assert_eq!( - parser(r"\w").parse_escape(), - Ok(Primitive::Perl(ast::ClassPerl { - span: span(0..2), - kind: ast::ClassPerlKind::Word, - negated: false, - })) - ); - assert_eq!( - parser(r"\W").parse_escape(), - Ok(Primitive::Perl(ast::ClassPerl { - span: span(0..2), - kind: ast::ClassPerlKind::Word, - negated: true, - })) - ); - - assert_eq!( - parser(r"\d").parse(), - Ok(Ast::class_perl(ast::ClassPerl { - span: span(0..2), - kind: ast::ClassPerlKind::Digit, - negated: false, - })) - ); - assert_eq!( - parser(r"\dz").parse(), - Ok(Ast::concat(ast::Concat { - span: span(0..3), - asts: vec![ - Ast::class_perl(ast::ClassPerl { - span: span(0..2), - kind: ast::ClassPerlKind::Digit, - negated: false, - }), - Ast::literal(ast::Literal { - span: span(2..3), - kind: ast::LiteralKind::Verbatim, - c: 'z', - }), - ], - })) - ); - } - - // This tests a bug fix where the nest limit checker wasn't decrementing - // its depth during post-traversal, which causes long regexes to trip - // the default limit too aggressively. - #[test] - fn regression_454_nest_too_big() { - let pattern = r#" - 2(?: - [45]\d{3}| - 7(?: - 1[0-267]| - 2[0-289]| - 3[0-29]| - 4[01]| - 5[1-3]| - 6[013]| - 7[0178]| - 91 - )| - 8(?: - 0[125]| - [139][1-6]| - 2[0157-9]| - 41| - 6[1-35]| - 7[1-5]| - 8[1-8]| - 90 - )| - 9(?: - 0[0-2]| - 1[0-4]| - 2[568]| - 3[3-6]| - 5[5-7]| - 6[0167]| - 7[15]| - 8[0146-9] - ) - )\d{4} - "#; - assert!(parser_nest_limit(pattern, 50).parse().is_ok()); - } - - // This tests that we treat a trailing `-` in a character class as a - // literal `-` even when whitespace mode is enabled and there is whitespace - // after the trailing `-`. - #[test] - fn regression_455_trailing_dash_ignore_whitespace() { - assert!(parser("(?x)[ / - ]").parse().is_ok()); - assert!(parser("(?x)[ a - ]").parse().is_ok()); - assert!(parser( - "(?x)[ - a - - ] - " - ) - .parse() - .is_ok()); - assert!(parser( - "(?x)[ - a # wat - - ] - " - ) - .parse() - .is_ok()); - - assert!(parser("(?x)[ / -").parse().is_err()); - assert!(parser("(?x)[ / - ").parse().is_err()); - assert!(parser( - "(?x)[ - / - - " - ) - .parse() - .is_err()); - assert!(parser( - "(?x)[ - / - # wat - " - ) - .parse() - .is_err()); - } -} diff --git a/vendor/regex-syntax/src/ast/print.rs b/vendor/regex-syntax/src/ast/print.rs deleted file mode 100644 index 556d91f4a0087a..00000000000000 --- a/vendor/regex-syntax/src/ast/print.rs +++ /dev/null @@ -1,577 +0,0 @@ -/*! -This module provides a regular expression printer for `Ast`. -*/ - -use core::fmt; - -use crate::ast::{ - self, - visitor::{self, Visitor}, - Ast, -}; - -/// A builder for constructing a printer. -/// -/// Note that since a printer doesn't have any configuration knobs, this type -/// remains unexported. -#[derive(Clone, Debug)] -struct PrinterBuilder { - _priv: (), -} - -impl Default for PrinterBuilder { - fn default() -> PrinterBuilder { - PrinterBuilder::new() - } -} - -impl PrinterBuilder { - fn new() -> PrinterBuilder { - PrinterBuilder { _priv: () } - } - - fn build(&self) -> Printer { - Printer { _priv: () } - } -} - -/// A printer for a regular expression abstract syntax tree. -/// -/// A printer converts an abstract syntax tree (AST) to a regular expression -/// pattern string. This particular printer uses constant stack space and heap -/// space proportional to the size of the AST. -/// -/// This printer will not necessarily preserve the original formatting of the -/// regular expression pattern string. For example, all whitespace and comments -/// are ignored. -#[derive(Debug)] -pub struct Printer { - _priv: (), -} - -impl Printer { - /// Create a new printer. - pub fn new() -> Printer { - PrinterBuilder::new().build() - } - - /// Print the given `Ast` to the given writer. The writer must implement - /// `fmt::Write`. Typical implementations of `fmt::Write` that can be used - /// here are a `fmt::Formatter` (which is available in `fmt::Display` - /// implementations) or a `&mut String`. - pub fn print(&mut self, ast: &Ast, wtr: W) -> fmt::Result { - visitor::visit(ast, Writer { wtr }) - } -} - -#[derive(Debug)] -struct Writer { - wtr: W, -} - -impl Visitor for Writer { - type Output = (); - type Err = fmt::Error; - - fn finish(self) -> fmt::Result { - Ok(()) - } - - fn visit_pre(&mut self, ast: &Ast) -> fmt::Result { - match *ast { - Ast::Group(ref x) => self.fmt_group_pre(x), - Ast::ClassBracketed(ref x) => self.fmt_class_bracketed_pre(x), - _ => Ok(()), - } - } - - fn visit_post(&mut self, ast: &Ast) -> fmt::Result { - match *ast { - Ast::Empty(_) => Ok(()), - Ast::Flags(ref x) => self.fmt_set_flags(x), - Ast::Literal(ref x) => self.fmt_literal(x), - Ast::Dot(_) => self.wtr.write_str("."), - Ast::Assertion(ref x) => self.fmt_assertion(x), - Ast::ClassPerl(ref x) => self.fmt_class_perl(x), - Ast::ClassUnicode(ref x) => self.fmt_class_unicode(x), - Ast::ClassBracketed(ref x) => self.fmt_class_bracketed_post(x), - Ast::Repetition(ref x) => self.fmt_repetition(x), - Ast::Group(ref x) => self.fmt_group_post(x), - Ast::Alternation(_) => Ok(()), - Ast::Concat(_) => Ok(()), - } - } - - fn visit_alternation_in(&mut self) -> fmt::Result { - self.wtr.write_str("|") - } - - fn visit_class_set_item_pre( - &mut self, - ast: &ast::ClassSetItem, - ) -> Result<(), Self::Err> { - match *ast { - ast::ClassSetItem::Bracketed(ref x) => { - self.fmt_class_bracketed_pre(x) - } - _ => Ok(()), - } - } - - fn visit_class_set_item_post( - &mut self, - ast: &ast::ClassSetItem, - ) -> Result<(), Self::Err> { - use crate::ast::ClassSetItem::*; - - match *ast { - Empty(_) => Ok(()), - Literal(ref x) => self.fmt_literal(x), - Range(ref x) => { - self.fmt_literal(&x.start)?; - self.wtr.write_str("-")?; - self.fmt_literal(&x.end)?; - Ok(()) - } - Ascii(ref x) => self.fmt_class_ascii(x), - Unicode(ref x) => self.fmt_class_unicode(x), - Perl(ref x) => self.fmt_class_perl(x), - Bracketed(ref x) => self.fmt_class_bracketed_post(x), - Union(_) => Ok(()), - } - } - - fn visit_class_set_binary_op_in( - &mut self, - ast: &ast::ClassSetBinaryOp, - ) -> Result<(), Self::Err> { - self.fmt_class_set_binary_op_kind(&ast.kind) - } -} - -impl Writer { - fn fmt_group_pre(&mut self, ast: &ast::Group) -> fmt::Result { - use crate::ast::GroupKind::*; - match ast.kind { - CaptureIndex(_) => self.wtr.write_str("("), - CaptureName { ref name, starts_with_p } => { - let start = if starts_with_p { "(?P<" } else { "(?<" }; - self.wtr.write_str(start)?; - self.wtr.write_str(&name.name)?; - self.wtr.write_str(">")?; - Ok(()) - } - NonCapturing(ref flags) => { - self.wtr.write_str("(?")?; - self.fmt_flags(flags)?; - self.wtr.write_str(":")?; - Ok(()) - } - } - } - - fn fmt_group_post(&mut self, _ast: &ast::Group) -> fmt::Result { - self.wtr.write_str(")") - } - - fn fmt_repetition(&mut self, ast: &ast::Repetition) -> fmt::Result { - use crate::ast::RepetitionKind::*; - match ast.op.kind { - ZeroOrOne if ast.greedy => self.wtr.write_str("?"), - ZeroOrOne => self.wtr.write_str("??"), - ZeroOrMore if ast.greedy => self.wtr.write_str("*"), - ZeroOrMore => self.wtr.write_str("*?"), - OneOrMore if ast.greedy => self.wtr.write_str("+"), - OneOrMore => self.wtr.write_str("+?"), - Range(ref x) => { - self.fmt_repetition_range(x)?; - if !ast.greedy { - self.wtr.write_str("?")?; - } - Ok(()) - } - } - } - - fn fmt_repetition_range( - &mut self, - ast: &ast::RepetitionRange, - ) -> fmt::Result { - use crate::ast::RepetitionRange::*; - match *ast { - Exactly(x) => write!(self.wtr, "{{{x}}}"), - AtLeast(x) => write!(self.wtr, "{{{x},}}"), - Bounded(x, y) => write!(self.wtr, "{{{x},{y}}}"), - } - } - - fn fmt_literal(&mut self, ast: &ast::Literal) -> fmt::Result { - use crate::ast::LiteralKind::*; - - match ast.kind { - Verbatim => self.wtr.write_char(ast.c), - Meta | Superfluous => write!(self.wtr, r"\{}", ast.c), - Octal => write!(self.wtr, r"\{:o}", u32::from(ast.c)), - HexFixed(ast::HexLiteralKind::X) => { - write!(self.wtr, r"\x{:02X}", u32::from(ast.c)) - } - HexFixed(ast::HexLiteralKind::UnicodeShort) => { - write!(self.wtr, r"\u{:04X}", u32::from(ast.c)) - } - HexFixed(ast::HexLiteralKind::UnicodeLong) => { - write!(self.wtr, r"\U{:08X}", u32::from(ast.c)) - } - HexBrace(ast::HexLiteralKind::X) => { - write!(self.wtr, r"\x{{{:X}}}", u32::from(ast.c)) - } - HexBrace(ast::HexLiteralKind::UnicodeShort) => { - write!(self.wtr, r"\u{{{:X}}}", u32::from(ast.c)) - } - HexBrace(ast::HexLiteralKind::UnicodeLong) => { - write!(self.wtr, r"\U{{{:X}}}", u32::from(ast.c)) - } - Special(ast::SpecialLiteralKind::Bell) => { - self.wtr.write_str(r"\a") - } - Special(ast::SpecialLiteralKind::FormFeed) => { - self.wtr.write_str(r"\f") - } - Special(ast::SpecialLiteralKind::Tab) => self.wtr.write_str(r"\t"), - Special(ast::SpecialLiteralKind::LineFeed) => { - self.wtr.write_str(r"\n") - } - Special(ast::SpecialLiteralKind::CarriageReturn) => { - self.wtr.write_str(r"\r") - } - Special(ast::SpecialLiteralKind::VerticalTab) => { - self.wtr.write_str(r"\v") - } - Special(ast::SpecialLiteralKind::Space) => { - self.wtr.write_str(r"\ ") - } - } - } - - fn fmt_assertion(&mut self, ast: &ast::Assertion) -> fmt::Result { - use crate::ast::AssertionKind::*; - match ast.kind { - StartLine => self.wtr.write_str("^"), - EndLine => self.wtr.write_str("$"), - StartText => self.wtr.write_str(r"\A"), - EndText => self.wtr.write_str(r"\z"), - WordBoundary => self.wtr.write_str(r"\b"), - NotWordBoundary => self.wtr.write_str(r"\B"), - WordBoundaryStart => self.wtr.write_str(r"\b{start}"), - WordBoundaryEnd => self.wtr.write_str(r"\b{end}"), - WordBoundaryStartAngle => self.wtr.write_str(r"\<"), - WordBoundaryEndAngle => self.wtr.write_str(r"\>"), - WordBoundaryStartHalf => self.wtr.write_str(r"\b{start-half}"), - WordBoundaryEndHalf => self.wtr.write_str(r"\b{end-half}"), - } - } - - fn fmt_set_flags(&mut self, ast: &ast::SetFlags) -> fmt::Result { - self.wtr.write_str("(?")?; - self.fmt_flags(&ast.flags)?; - self.wtr.write_str(")")?; - Ok(()) - } - - fn fmt_flags(&mut self, ast: &ast::Flags) -> fmt::Result { - use crate::ast::{Flag, FlagsItemKind}; - - for item in &ast.items { - match item.kind { - FlagsItemKind::Negation => self.wtr.write_str("-"), - FlagsItemKind::Flag(ref flag) => match *flag { - Flag::CaseInsensitive => self.wtr.write_str("i"), - Flag::MultiLine => self.wtr.write_str("m"), - Flag::DotMatchesNewLine => self.wtr.write_str("s"), - Flag::SwapGreed => self.wtr.write_str("U"), - Flag::Unicode => self.wtr.write_str("u"), - Flag::CRLF => self.wtr.write_str("R"), - Flag::IgnoreWhitespace => self.wtr.write_str("x"), - }, - }?; - } - Ok(()) - } - - fn fmt_class_bracketed_pre( - &mut self, - ast: &ast::ClassBracketed, - ) -> fmt::Result { - if ast.negated { - self.wtr.write_str("[^") - } else { - self.wtr.write_str("[") - } - } - - fn fmt_class_bracketed_post( - &mut self, - _ast: &ast::ClassBracketed, - ) -> fmt::Result { - self.wtr.write_str("]") - } - - fn fmt_class_set_binary_op_kind( - &mut self, - ast: &ast::ClassSetBinaryOpKind, - ) -> fmt::Result { - use crate::ast::ClassSetBinaryOpKind::*; - match *ast { - Intersection => self.wtr.write_str("&&"), - Difference => self.wtr.write_str("--"), - SymmetricDifference => self.wtr.write_str("~~"), - } - } - - fn fmt_class_perl(&mut self, ast: &ast::ClassPerl) -> fmt::Result { - use crate::ast::ClassPerlKind::*; - match ast.kind { - Digit if ast.negated => self.wtr.write_str(r"\D"), - Digit => self.wtr.write_str(r"\d"), - Space if ast.negated => self.wtr.write_str(r"\S"), - Space => self.wtr.write_str(r"\s"), - Word if ast.negated => self.wtr.write_str(r"\W"), - Word => self.wtr.write_str(r"\w"), - } - } - - fn fmt_class_ascii(&mut self, ast: &ast::ClassAscii) -> fmt::Result { - use crate::ast::ClassAsciiKind::*; - match ast.kind { - Alnum if ast.negated => self.wtr.write_str("[:^alnum:]"), - Alnum => self.wtr.write_str("[:alnum:]"), - Alpha if ast.negated => self.wtr.write_str("[:^alpha:]"), - Alpha => self.wtr.write_str("[:alpha:]"), - Ascii if ast.negated => self.wtr.write_str("[:^ascii:]"), - Ascii => self.wtr.write_str("[:ascii:]"), - Blank if ast.negated => self.wtr.write_str("[:^blank:]"), - Blank => self.wtr.write_str("[:blank:]"), - Cntrl if ast.negated => self.wtr.write_str("[:^cntrl:]"), - Cntrl => self.wtr.write_str("[:cntrl:]"), - Digit if ast.negated => self.wtr.write_str("[:^digit:]"), - Digit => self.wtr.write_str("[:digit:]"), - Graph if ast.negated => self.wtr.write_str("[:^graph:]"), - Graph => self.wtr.write_str("[:graph:]"), - Lower if ast.negated => self.wtr.write_str("[:^lower:]"), - Lower => self.wtr.write_str("[:lower:]"), - Print if ast.negated => self.wtr.write_str("[:^print:]"), - Print => self.wtr.write_str("[:print:]"), - Punct if ast.negated => self.wtr.write_str("[:^punct:]"), - Punct => self.wtr.write_str("[:punct:]"), - Space if ast.negated => self.wtr.write_str("[:^space:]"), - Space => self.wtr.write_str("[:space:]"), - Upper if ast.negated => self.wtr.write_str("[:^upper:]"), - Upper => self.wtr.write_str("[:upper:]"), - Word if ast.negated => self.wtr.write_str("[:^word:]"), - Word => self.wtr.write_str("[:word:]"), - Xdigit if ast.negated => self.wtr.write_str("[:^xdigit:]"), - Xdigit => self.wtr.write_str("[:xdigit:]"), - } - } - - fn fmt_class_unicode(&mut self, ast: &ast::ClassUnicode) -> fmt::Result { - use crate::ast::ClassUnicodeKind::*; - use crate::ast::ClassUnicodeOpKind::*; - - if ast.negated { - self.wtr.write_str(r"\P")?; - } else { - self.wtr.write_str(r"\p")?; - } - match ast.kind { - OneLetter(c) => self.wtr.write_char(c), - Named(ref x) => write!(self.wtr, "{{{}}}", x), - NamedValue { op: Equal, ref name, ref value } => { - write!(self.wtr, "{{{}={}}}", name, value) - } - NamedValue { op: Colon, ref name, ref value } => { - write!(self.wtr, "{{{}:{}}}", name, value) - } - NamedValue { op: NotEqual, ref name, ref value } => { - write!(self.wtr, "{{{}!={}}}", name, value) - } - } - } -} - -#[cfg(test)] -mod tests { - use alloc::string::String; - - use crate::ast::parse::ParserBuilder; - - use super::*; - - fn roundtrip(given: &str) { - roundtrip_with(|b| b, given); - } - - fn roundtrip_with(mut f: F, given: &str) - where - F: FnMut(&mut ParserBuilder) -> &mut ParserBuilder, - { - let mut builder = ParserBuilder::new(); - f(&mut builder); - let ast = builder.build().parse(given).unwrap(); - - let mut printer = Printer::new(); - let mut dst = String::new(); - printer.print(&ast, &mut dst).unwrap(); - assert_eq!(given, dst); - } - - #[test] - fn print_literal() { - roundtrip("a"); - roundtrip(r"\["); - roundtrip_with(|b| b.octal(true), r"\141"); - roundtrip(r"\x61"); - roundtrip(r"\x7F"); - roundtrip(r"\u0061"); - roundtrip(r"\U00000061"); - roundtrip(r"\x{61}"); - roundtrip(r"\x{7F}"); - roundtrip(r"\u{61}"); - roundtrip(r"\U{61}"); - - roundtrip(r"\a"); - roundtrip(r"\f"); - roundtrip(r"\t"); - roundtrip(r"\n"); - roundtrip(r"\r"); - roundtrip(r"\v"); - roundtrip(r"(?x)\ "); - } - - #[test] - fn print_dot() { - roundtrip("."); - } - - #[test] - fn print_concat() { - roundtrip("ab"); - roundtrip("abcde"); - roundtrip("a(bcd)ef"); - } - - #[test] - fn print_alternation() { - roundtrip("a|b"); - roundtrip("a|b|c|d|e"); - roundtrip("|a|b|c|d|e"); - roundtrip("|a|b|c|d|e|"); - roundtrip("a(b|c|d)|e|f"); - } - - #[test] - fn print_assertion() { - roundtrip(r"^"); - roundtrip(r"$"); - roundtrip(r"\A"); - roundtrip(r"\z"); - roundtrip(r"\b"); - roundtrip(r"\B"); - } - - #[test] - fn print_repetition() { - roundtrip("a?"); - roundtrip("a??"); - roundtrip("a*"); - roundtrip("a*?"); - roundtrip("a+"); - roundtrip("a+?"); - roundtrip("a{5}"); - roundtrip("a{5}?"); - roundtrip("a{5,}"); - roundtrip("a{5,}?"); - roundtrip("a{5,10}"); - roundtrip("a{5,10}?"); - } - - #[test] - fn print_flags() { - roundtrip("(?i)"); - roundtrip("(?-i)"); - roundtrip("(?s-i)"); - roundtrip("(?-si)"); - roundtrip("(?siUmux)"); - } - - #[test] - fn print_group() { - roundtrip("(?i:a)"); - roundtrip("(?Pa)"); - roundtrip("(?a)"); - roundtrip("(a)"); - } - - #[test] - fn print_class() { - roundtrip(r"[abc]"); - roundtrip(r"[a-z]"); - roundtrip(r"[^a-z]"); - roundtrip(r"[a-z0-9]"); - roundtrip(r"[-a-z0-9]"); - roundtrip(r"[-a-z0-9]"); - roundtrip(r"[a-z0-9---]"); - roundtrip(r"[a-z&&m-n]"); - roundtrip(r"[[a-z&&m-n]]"); - roundtrip(r"[a-z--m-n]"); - roundtrip(r"[a-z~~m-n]"); - roundtrip(r"[a-z[0-9]]"); - roundtrip(r"[a-z[^0-9]]"); - - roundtrip(r"\d"); - roundtrip(r"\D"); - roundtrip(r"\s"); - roundtrip(r"\S"); - roundtrip(r"\w"); - roundtrip(r"\W"); - - roundtrip(r"[[:alnum:]]"); - roundtrip(r"[[:^alnum:]]"); - roundtrip(r"[[:alpha:]]"); - roundtrip(r"[[:^alpha:]]"); - roundtrip(r"[[:ascii:]]"); - roundtrip(r"[[:^ascii:]]"); - roundtrip(r"[[:blank:]]"); - roundtrip(r"[[:^blank:]]"); - roundtrip(r"[[:cntrl:]]"); - roundtrip(r"[[:^cntrl:]]"); - roundtrip(r"[[:digit:]]"); - roundtrip(r"[[:^digit:]]"); - roundtrip(r"[[:graph:]]"); - roundtrip(r"[[:^graph:]]"); - roundtrip(r"[[:lower:]]"); - roundtrip(r"[[:^lower:]]"); - roundtrip(r"[[:print:]]"); - roundtrip(r"[[:^print:]]"); - roundtrip(r"[[:punct:]]"); - roundtrip(r"[[:^punct:]]"); - roundtrip(r"[[:space:]]"); - roundtrip(r"[[:^space:]]"); - roundtrip(r"[[:upper:]]"); - roundtrip(r"[[:^upper:]]"); - roundtrip(r"[[:word:]]"); - roundtrip(r"[[:^word:]]"); - roundtrip(r"[[:xdigit:]]"); - roundtrip(r"[[:^xdigit:]]"); - - roundtrip(r"\pL"); - roundtrip(r"\PL"); - roundtrip(r"\p{L}"); - roundtrip(r"\P{L}"); - roundtrip(r"\p{X=Y}"); - roundtrip(r"\P{X=Y}"); - roundtrip(r"\p{X:Y}"); - roundtrip(r"\P{X:Y}"); - roundtrip(r"\p{X!=Y}"); - roundtrip(r"\P{X!=Y}"); - } -} diff --git a/vendor/regex-syntax/src/ast/visitor.rs b/vendor/regex-syntax/src/ast/visitor.rs deleted file mode 100644 index 36cd713c0f3dc6..00000000000000 --- a/vendor/regex-syntax/src/ast/visitor.rs +++ /dev/null @@ -1,522 +0,0 @@ -use alloc::{vec, vec::Vec}; - -use crate::ast::{self, Ast}; - -/// A trait for visiting an abstract syntax tree (AST) in depth first order. -/// -/// The principle aim of this trait is to enable callers to perform case -/// analysis on an abstract syntax tree without necessarily using recursion. -/// In particular, this permits callers to do case analysis with constant stack -/// usage, which can be important since the size of an abstract syntax tree -/// may be proportional to end user input. -/// -/// Typical usage of this trait involves providing an implementation and then -/// running it using the [`visit`] function. -/// -/// Note that the abstract syntax tree for a regular expression is quite -/// complex. Unless you specifically need it, you might be able to use the much -/// simpler [high-level intermediate representation](crate::hir::Hir) and its -/// [corresponding `Visitor` trait](crate::hir::Visitor) instead. -pub trait Visitor { - /// The result of visiting an AST. - type Output; - /// An error that visiting an AST might return. - type Err; - - /// All implementors of `Visitor` must provide a `finish` method, which - /// yields the result of visiting the AST or an error. - fn finish(self) -> Result; - - /// This method is called before beginning traversal of the AST. - fn start(&mut self) {} - - /// This method is called on an `Ast` before descending into child `Ast` - /// nodes. - fn visit_pre(&mut self, _ast: &Ast) -> Result<(), Self::Err> { - Ok(()) - } - - /// This method is called on an `Ast` after descending all of its child - /// `Ast` nodes. - fn visit_post(&mut self, _ast: &Ast) -> Result<(), Self::Err> { - Ok(()) - } - - /// This method is called between child nodes of an - /// [`Alternation`](ast::Alternation). - fn visit_alternation_in(&mut self) -> Result<(), Self::Err> { - Ok(()) - } - - /// This method is called between child nodes of a concatenation. - fn visit_concat_in(&mut self) -> Result<(), Self::Err> { - Ok(()) - } - - /// This method is called on every [`ClassSetItem`](ast::ClassSetItem) - /// before descending into child nodes. - fn visit_class_set_item_pre( - &mut self, - _ast: &ast::ClassSetItem, - ) -> Result<(), Self::Err> { - Ok(()) - } - - /// This method is called on every [`ClassSetItem`](ast::ClassSetItem) - /// after descending into child nodes. - fn visit_class_set_item_post( - &mut self, - _ast: &ast::ClassSetItem, - ) -> Result<(), Self::Err> { - Ok(()) - } - - /// This method is called on every - /// [`ClassSetBinaryOp`](ast::ClassSetBinaryOp) before descending into - /// child nodes. - fn visit_class_set_binary_op_pre( - &mut self, - _ast: &ast::ClassSetBinaryOp, - ) -> Result<(), Self::Err> { - Ok(()) - } - - /// This method is called on every - /// [`ClassSetBinaryOp`](ast::ClassSetBinaryOp) after descending into child - /// nodes. - fn visit_class_set_binary_op_post( - &mut self, - _ast: &ast::ClassSetBinaryOp, - ) -> Result<(), Self::Err> { - Ok(()) - } - - /// This method is called between the left hand and right hand child nodes - /// of a [`ClassSetBinaryOp`](ast::ClassSetBinaryOp). - fn visit_class_set_binary_op_in( - &mut self, - _ast: &ast::ClassSetBinaryOp, - ) -> Result<(), Self::Err> { - Ok(()) - } -} - -/// Executes an implementation of `Visitor` in constant stack space. -/// -/// This function will visit every node in the given `Ast` while calling the -/// appropriate methods provided by the [`Visitor`] trait. -/// -/// The primary use case for this method is when one wants to perform case -/// analysis over an `Ast` without using a stack size proportional to the depth -/// of the `Ast`. Namely, this method will instead use constant stack size, but -/// will use heap space proportional to the size of the `Ast`. This may be -/// desirable in cases where the size of `Ast` is proportional to end user -/// input. -/// -/// If the visitor returns an error at any point, then visiting is stopped and -/// the error is returned. -pub fn visit(ast: &Ast, visitor: V) -> Result { - HeapVisitor::new().visit(ast, visitor) -} - -/// HeapVisitor visits every item in an `Ast` recursively using constant stack -/// size and a heap size proportional to the size of the `Ast`. -struct HeapVisitor<'a> { - /// A stack of `Ast` nodes. This is roughly analogous to the call stack - /// used in a typical recursive visitor. - stack: Vec<(&'a Ast, Frame<'a>)>, - /// Similar to the `Ast` stack above, but is used only for character - /// classes. In particular, character classes embed their own mini - /// recursive syntax. - stack_class: Vec<(ClassInduct<'a>, ClassFrame<'a>)>, -} - -/// Represents a single stack frame while performing structural induction over -/// an `Ast`. -enum Frame<'a> { - /// A stack frame allocated just before descending into a repetition - /// operator's child node. - Repetition(&'a ast::Repetition), - /// A stack frame allocated just before descending into a group's child - /// node. - Group(&'a ast::Group), - /// The stack frame used while visiting every child node of a concatenation - /// of expressions. - Concat { - /// The child node we are currently visiting. - head: &'a Ast, - /// The remaining child nodes to visit (which may be empty). - tail: &'a [Ast], - }, - /// The stack frame used while visiting every child node of an alternation - /// of expressions. - Alternation { - /// The child node we are currently visiting. - head: &'a Ast, - /// The remaining child nodes to visit (which may be empty). - tail: &'a [Ast], - }, -} - -/// Represents a single stack frame while performing structural induction over -/// a character class. -enum ClassFrame<'a> { - /// The stack frame used while visiting every child node of a union of - /// character class items. - Union { - /// The child node we are currently visiting. - head: &'a ast::ClassSetItem, - /// The remaining child nodes to visit (which may be empty). - tail: &'a [ast::ClassSetItem], - }, - /// The stack frame used while a binary class operation. - Binary { op: &'a ast::ClassSetBinaryOp }, - /// A stack frame allocated just before descending into a binary operator's - /// left hand child node. - BinaryLHS { - op: &'a ast::ClassSetBinaryOp, - lhs: &'a ast::ClassSet, - rhs: &'a ast::ClassSet, - }, - /// A stack frame allocated just before descending into a binary operator's - /// right hand child node. - BinaryRHS { op: &'a ast::ClassSetBinaryOp, rhs: &'a ast::ClassSet }, -} - -/// A representation of the inductive step when performing structural induction -/// over a character class. -/// -/// Note that there is no analogous explicit type for the inductive step for -/// `Ast` nodes because the inductive step is just an `Ast`. For character -/// classes, the inductive step can produce one of two possible child nodes: -/// an item or a binary operation. (An item cannot be a binary operation -/// because that would imply binary operations can be unioned in the concrete -/// syntax, which is not possible.) -enum ClassInduct<'a> { - Item(&'a ast::ClassSetItem), - BinaryOp(&'a ast::ClassSetBinaryOp), -} - -impl<'a> HeapVisitor<'a> { - fn new() -> HeapVisitor<'a> { - HeapVisitor { stack: vec![], stack_class: vec![] } - } - - fn visit( - &mut self, - mut ast: &'a Ast, - mut visitor: V, - ) -> Result { - self.stack.clear(); - self.stack_class.clear(); - - visitor.start(); - loop { - visitor.visit_pre(ast)?; - if let Some(x) = self.induct(ast, &mut visitor)? { - let child = x.child(); - self.stack.push((ast, x)); - ast = child; - continue; - } - // No induction means we have a base case, so we can post visit - // it now. - visitor.visit_post(ast)?; - - // At this point, we now try to pop our call stack until it is - // either empty or we hit another inductive case. - loop { - let (post_ast, frame) = match self.stack.pop() { - None => return visitor.finish(), - Some((post_ast, frame)) => (post_ast, frame), - }; - // If this is a concat/alternate, then we might have additional - // inductive steps to process. - if let Some(x) = self.pop(frame) { - match x { - Frame::Alternation { .. } => { - visitor.visit_alternation_in()?; - } - Frame::Concat { .. } => { - visitor.visit_concat_in()?; - } - _ => {} - } - ast = x.child(); - self.stack.push((post_ast, x)); - break; - } - // Otherwise, we've finished visiting all the child nodes for - // this AST, so we can post visit it now. - visitor.visit_post(post_ast)?; - } - } - } - - /// Build a stack frame for the given AST if one is needed (which occurs if - /// and only if there are child nodes in the AST). Otherwise, return None. - /// - /// If this visits a class, then the underlying visitor implementation may - /// return an error which will be passed on here. - fn induct( - &mut self, - ast: &'a Ast, - visitor: &mut V, - ) -> Result>, V::Err> { - Ok(match *ast { - Ast::ClassBracketed(ref x) => { - self.visit_class(x, visitor)?; - None - } - Ast::Repetition(ref x) => Some(Frame::Repetition(x)), - Ast::Group(ref x) => Some(Frame::Group(x)), - Ast::Concat(ref x) if x.asts.is_empty() => None, - Ast::Concat(ref x) => { - Some(Frame::Concat { head: &x.asts[0], tail: &x.asts[1..] }) - } - Ast::Alternation(ref x) if x.asts.is_empty() => None, - Ast::Alternation(ref x) => Some(Frame::Alternation { - head: &x.asts[0], - tail: &x.asts[1..], - }), - _ => None, - }) - } - - /// Pops the given frame. If the frame has an additional inductive step, - /// then return it, otherwise return `None`. - fn pop(&self, induct: Frame<'a>) -> Option> { - match induct { - Frame::Repetition(_) => None, - Frame::Group(_) => None, - Frame::Concat { tail, .. } => { - if tail.is_empty() { - None - } else { - Some(Frame::Concat { head: &tail[0], tail: &tail[1..] }) - } - } - Frame::Alternation { tail, .. } => { - if tail.is_empty() { - None - } else { - Some(Frame::Alternation { - head: &tail[0], - tail: &tail[1..], - }) - } - } - } - } - - fn visit_class( - &mut self, - ast: &'a ast::ClassBracketed, - visitor: &mut V, - ) -> Result<(), V::Err> { - let mut ast = ClassInduct::from_bracketed(ast); - loop { - self.visit_class_pre(&ast, visitor)?; - if let Some(x) = self.induct_class(&ast) { - let child = x.child(); - self.stack_class.push((ast, x)); - ast = child; - continue; - } - self.visit_class_post(&ast, visitor)?; - - // At this point, we now try to pop our call stack until it is - // either empty or we hit another inductive case. - loop { - let (post_ast, frame) = match self.stack_class.pop() { - None => return Ok(()), - Some((post_ast, frame)) => (post_ast, frame), - }; - // If this is a union or a binary op, then we might have - // additional inductive steps to process. - if let Some(x) = self.pop_class(frame) { - if let ClassFrame::BinaryRHS { ref op, .. } = x { - visitor.visit_class_set_binary_op_in(op)?; - } - ast = x.child(); - self.stack_class.push((post_ast, x)); - break; - } - // Otherwise, we've finished visiting all the child nodes for - // this class node, so we can post visit it now. - self.visit_class_post(&post_ast, visitor)?; - } - } - } - - /// Call the appropriate `Visitor` methods given an inductive step. - fn visit_class_pre( - &self, - ast: &ClassInduct<'a>, - visitor: &mut V, - ) -> Result<(), V::Err> { - match *ast { - ClassInduct::Item(item) => { - visitor.visit_class_set_item_pre(item)?; - } - ClassInduct::BinaryOp(op) => { - visitor.visit_class_set_binary_op_pre(op)?; - } - } - Ok(()) - } - - /// Call the appropriate `Visitor` methods given an inductive step. - fn visit_class_post( - &self, - ast: &ClassInduct<'a>, - visitor: &mut V, - ) -> Result<(), V::Err> { - match *ast { - ClassInduct::Item(item) => { - visitor.visit_class_set_item_post(item)?; - } - ClassInduct::BinaryOp(op) => { - visitor.visit_class_set_binary_op_post(op)?; - } - } - Ok(()) - } - - /// Build a stack frame for the given class node if one is needed (which - /// occurs if and only if there are child nodes). Otherwise, return None. - fn induct_class(&self, ast: &ClassInduct<'a>) -> Option> { - match *ast { - ClassInduct::Item(&ast::ClassSetItem::Bracketed(ref x)) => { - match x.kind { - ast::ClassSet::Item(ref item) => { - Some(ClassFrame::Union { head: item, tail: &[] }) - } - ast::ClassSet::BinaryOp(ref op) => { - Some(ClassFrame::Binary { op }) - } - } - } - ClassInduct::Item(&ast::ClassSetItem::Union(ref x)) => { - if x.items.is_empty() { - None - } else { - Some(ClassFrame::Union { - head: &x.items[0], - tail: &x.items[1..], - }) - } - } - ClassInduct::BinaryOp(op) => { - Some(ClassFrame::BinaryLHS { op, lhs: &op.lhs, rhs: &op.rhs }) - } - _ => None, - } - } - - /// Pops the given frame. If the frame has an additional inductive step, - /// then return it, otherwise return `None`. - fn pop_class(&self, induct: ClassFrame<'a>) -> Option> { - match induct { - ClassFrame::Union { tail, .. } => { - if tail.is_empty() { - None - } else { - Some(ClassFrame::Union { - head: &tail[0], - tail: &tail[1..], - }) - } - } - ClassFrame::Binary { .. } => None, - ClassFrame::BinaryLHS { op, rhs, .. } => { - Some(ClassFrame::BinaryRHS { op, rhs }) - } - ClassFrame::BinaryRHS { .. } => None, - } - } -} - -impl<'a> Frame<'a> { - /// Perform the next inductive step on this frame and return the next - /// child AST node to visit. - fn child(&self) -> &'a Ast { - match *self { - Frame::Repetition(rep) => &rep.ast, - Frame::Group(group) => &group.ast, - Frame::Concat { head, .. } => head, - Frame::Alternation { head, .. } => head, - } - } -} - -impl<'a> ClassFrame<'a> { - /// Perform the next inductive step on this frame and return the next - /// child class node to visit. - fn child(&self) -> ClassInduct<'a> { - match *self { - ClassFrame::Union { head, .. } => ClassInduct::Item(head), - ClassFrame::Binary { op, .. } => ClassInduct::BinaryOp(op), - ClassFrame::BinaryLHS { ref lhs, .. } => { - ClassInduct::from_set(lhs) - } - ClassFrame::BinaryRHS { ref rhs, .. } => { - ClassInduct::from_set(rhs) - } - } - } -} - -impl<'a> ClassInduct<'a> { - fn from_bracketed(ast: &'a ast::ClassBracketed) -> ClassInduct<'a> { - ClassInduct::from_set(&ast.kind) - } - - fn from_set(ast: &'a ast::ClassSet) -> ClassInduct<'a> { - match *ast { - ast::ClassSet::Item(ref item) => ClassInduct::Item(item), - ast::ClassSet::BinaryOp(ref op) => ClassInduct::BinaryOp(op), - } - } -} - -impl<'a> core::fmt::Debug for ClassFrame<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let x = match *self { - ClassFrame::Union { .. } => "Union", - ClassFrame::Binary { .. } => "Binary", - ClassFrame::BinaryLHS { .. } => "BinaryLHS", - ClassFrame::BinaryRHS { .. } => "BinaryRHS", - }; - write!(f, "{x}") - } -} - -impl<'a> core::fmt::Debug for ClassInduct<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let x = match *self { - ClassInduct::Item(it) => match *it { - ast::ClassSetItem::Empty(_) => "Item(Empty)", - ast::ClassSetItem::Literal(_) => "Item(Literal)", - ast::ClassSetItem::Range(_) => "Item(Range)", - ast::ClassSetItem::Ascii(_) => "Item(Ascii)", - ast::ClassSetItem::Perl(_) => "Item(Perl)", - ast::ClassSetItem::Unicode(_) => "Item(Unicode)", - ast::ClassSetItem::Bracketed(_) => "Item(Bracketed)", - ast::ClassSetItem::Union(_) => "Item(Union)", - }, - ClassInduct::BinaryOp(it) => match it.kind { - ast::ClassSetBinaryOpKind::Intersection => { - "BinaryOp(Intersection)" - } - ast::ClassSetBinaryOpKind::Difference => { - "BinaryOp(Difference)" - } - ast::ClassSetBinaryOpKind::SymmetricDifference => { - "BinaryOp(SymmetricDifference)" - } - }, - }; - write!(f, "{x}") - } -} diff --git a/vendor/regex-syntax/src/debug.rs b/vendor/regex-syntax/src/debug.rs deleted file mode 100644 index 7a47d9de8eb339..00000000000000 --- a/vendor/regex-syntax/src/debug.rs +++ /dev/null @@ -1,107 +0,0 @@ -/// A type that wraps a single byte with a convenient fmt::Debug impl that -/// escapes the byte. -pub(crate) struct Byte(pub(crate) u8); - -impl core::fmt::Debug for Byte { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - // Special case ASCII space. It's too hard to read otherwise, so - // put quotes around it. I sometimes wonder whether just '\x20' would - // be better... - if self.0 == b' ' { - return write!(f, "' '"); - } - // 10 bytes is enough to cover any output from ascii::escape_default. - let mut bytes = [0u8; 10]; - let mut len = 0; - for (i, mut b) in core::ascii::escape_default(self.0).enumerate() { - // capitalize \xab to \xAB - if i >= 2 && b'a' <= b && b <= b'f' { - b -= 32; - } - bytes[len] = b; - len += 1; - } - write!(f, "{}", core::str::from_utf8(&bytes[..len]).unwrap()) - } -} - -/// A type that provides a human readable debug impl for arbitrary bytes. -/// -/// This generally works best when the bytes are presumed to be mostly UTF-8, -/// but will work for anything. -/// -/// N.B. This is copied nearly verbatim from regex-automata. Sigh. -pub(crate) struct Bytes<'a>(pub(crate) &'a [u8]); - -impl<'a> core::fmt::Debug for Bytes<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "\"")?; - // This is a sad re-implementation of a similar impl found in bstr. - let mut bytes = self.0; - while let Some(result) = utf8_decode(bytes) { - let ch = match result { - Ok(ch) => ch, - Err(byte) => { - write!(f, r"\x{byte:02x}")?; - bytes = &bytes[1..]; - continue; - } - }; - bytes = &bytes[ch.len_utf8()..]; - match ch { - '\0' => write!(f, "\\0")?, - // ASCII control characters except \0, \n, \r, \t - '\x01'..='\x08' - | '\x0b' - | '\x0c' - | '\x0e'..='\x19' - | '\x7f' => { - write!(f, "\\x{:02x}", u32::from(ch))?; - } - '\n' | '\r' | '\t' | _ => { - write!(f, "{}", ch.escape_debug())?; - } - } - } - write!(f, "\"")?; - Ok(()) - } -} - -/// Decodes the next UTF-8 encoded codepoint from the given byte slice. -/// -/// If no valid encoding of a codepoint exists at the beginning of the given -/// byte slice, then the first byte is returned instead. -/// -/// This returns `None` if and only if `bytes` is empty. -pub(crate) fn utf8_decode(bytes: &[u8]) -> Option> { - fn len(byte: u8) -> Option { - if byte <= 0x7F { - return Some(1); - } else if byte & 0b1100_0000 == 0b1000_0000 { - return None; - } else if byte <= 0b1101_1111 { - Some(2) - } else if byte <= 0b1110_1111 { - Some(3) - } else if byte <= 0b1111_0111 { - Some(4) - } else { - None - } - } - - if bytes.is_empty() { - return None; - } - let len = match len(bytes[0]) { - None => return Some(Err(bytes[0])), - Some(len) if len > bytes.len() => return Some(Err(bytes[0])), - Some(1) => return Some(Ok(char::from(bytes[0]))), - Some(len) => len, - }; - match core::str::from_utf8(&bytes[..len]) { - Ok(s) => Some(Ok(s.chars().next().unwrap())), - Err(_) => Some(Err(bytes[0])), - } -} diff --git a/vendor/regex-syntax/src/either.rs b/vendor/regex-syntax/src/either.rs deleted file mode 100644 index 7ae41e4ced7460..00000000000000 --- a/vendor/regex-syntax/src/either.rs +++ /dev/null @@ -1,8 +0,0 @@ -/// A simple binary sum type. -/// -/// This is occasionally useful in an ad hoc fashion. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum Either { - Left(Left), - Right(Right), -} diff --git a/vendor/regex-syntax/src/error.rs b/vendor/regex-syntax/src/error.rs deleted file mode 100644 index 21e484df96dcd9..00000000000000 --- a/vendor/regex-syntax/src/error.rs +++ /dev/null @@ -1,311 +0,0 @@ -use alloc::{ - format, - string::{String, ToString}, - vec, - vec::Vec, -}; - -use crate::{ast, hir}; - -/// This error type encompasses any error that can be returned by this crate. -/// -/// This error type is marked as `non_exhaustive`. This means that adding a -/// new variant is not considered a breaking change. -#[non_exhaustive] -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum Error { - /// An error that occurred while translating concrete syntax into abstract - /// syntax (AST). - Parse(ast::Error), - /// An error that occurred while translating abstract syntax into a high - /// level intermediate representation (HIR). - Translate(hir::Error), -} - -impl From for Error { - fn from(err: ast::Error) -> Error { - Error::Parse(err) - } -} - -impl From for Error { - fn from(err: hir::Error) -> Error { - Error::Translate(err) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for Error {} - -impl core::fmt::Display for Error { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match *self { - Error::Parse(ref x) => x.fmt(f), - Error::Translate(ref x) => x.fmt(f), - } - } -} - -/// A helper type for formatting nice error messages. -/// -/// This type is responsible for reporting regex parse errors in a nice human -/// readable format. Most of its complexity is from interspersing notational -/// markers pointing out the position where an error occurred. -#[derive(Debug)] -pub struct Formatter<'e, E> { - /// The original regex pattern in which the error occurred. - pattern: &'e str, - /// The error kind. It must impl fmt::Display. - err: &'e E, - /// The primary span of the error. - span: &'e ast::Span, - /// An auxiliary and optional span, in case the error needs to point to - /// two locations (e.g., when reporting a duplicate capture group name). - aux_span: Option<&'e ast::Span>, -} - -impl<'e> From<&'e ast::Error> for Formatter<'e, ast::ErrorKind> { - fn from(err: &'e ast::Error) -> Self { - Formatter { - pattern: err.pattern(), - err: err.kind(), - span: err.span(), - aux_span: err.auxiliary_span(), - } - } -} - -impl<'e> From<&'e hir::Error> for Formatter<'e, hir::ErrorKind> { - fn from(err: &'e hir::Error) -> Self { - Formatter { - pattern: err.pattern(), - err: err.kind(), - span: err.span(), - aux_span: None, - } - } -} - -impl<'e, E: core::fmt::Display> core::fmt::Display for Formatter<'e, E> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let spans = Spans::from_formatter(self); - if self.pattern.contains('\n') { - let divider = repeat_char('~', 79); - - writeln!(f, "regex parse error:")?; - writeln!(f, "{divider}")?; - let notated = spans.notate(); - write!(f, "{notated}")?; - writeln!(f, "{divider}")?; - // If we have error spans that cover multiple lines, then we just - // note the line numbers. - if !spans.multi_line.is_empty() { - let mut notes = vec![]; - for span in &spans.multi_line { - notes.push(format!( - "on line {} (column {}) through line {} (column {})", - span.start.line, - span.start.column, - span.end.line, - span.end.column - 1 - )); - } - writeln!(f, "{}", notes.join("\n"))?; - } - write!(f, "error: {}", self.err)?; - } else { - writeln!(f, "regex parse error:")?; - let notated = Spans::from_formatter(self).notate(); - write!(f, "{notated}")?; - write!(f, "error: {}", self.err)?; - } - Ok(()) - } -} - -/// This type represents an arbitrary number of error spans in a way that makes -/// it convenient to notate the regex pattern. ("Notate" means "point out -/// exactly where the error occurred in the regex pattern.") -/// -/// Technically, we can only ever have two spans given our current error -/// structure. However, after toiling with a specific algorithm for handling -/// two spans, it became obvious that an algorithm to handle an arbitrary -/// number of spans was actually much simpler. -struct Spans<'p> { - /// The original regex pattern string. - pattern: &'p str, - /// The total width that should be used for line numbers. The width is - /// used for left padding the line numbers for alignment. - /// - /// A value of `0` means line numbers should not be displayed. That is, - /// the pattern is itself only one line. - line_number_width: usize, - /// All error spans that occur on a single line. This sequence always has - /// length equivalent to the number of lines in `pattern`, where the index - /// of the sequence represents a line number, starting at `0`. The spans - /// in each line are sorted in ascending order. - by_line: Vec>, - /// All error spans that occur over one or more lines. That is, the start - /// and end position of the span have different line numbers. The spans are - /// sorted in ascending order. - multi_line: Vec, -} - -impl<'p> Spans<'p> { - /// Build a sequence of spans from a formatter. - fn from_formatter<'e, E: core::fmt::Display>( - fmter: &'p Formatter<'e, E>, - ) -> Spans<'p> { - let mut line_count = fmter.pattern.lines().count(); - // If the pattern ends with a `\n` literal, then our line count is - // off by one, since a span can occur immediately after the last `\n`, - // which is consider to be an additional line. - if fmter.pattern.ends_with('\n') { - line_count += 1; - } - let line_number_width = - if line_count <= 1 { 0 } else { line_count.to_string().len() }; - let mut spans = Spans { - pattern: &fmter.pattern, - line_number_width, - by_line: vec![vec![]; line_count], - multi_line: vec![], - }; - spans.add(fmter.span.clone()); - if let Some(span) = fmter.aux_span { - spans.add(span.clone()); - } - spans - } - - /// Add the given span to this sequence, putting it in the right place. - fn add(&mut self, span: ast::Span) { - // This is grossly inefficient since we sort after each add, but right - // now, we only ever add two spans at most. - if span.is_one_line() { - let i = span.start.line - 1; // because lines are 1-indexed - self.by_line[i].push(span); - self.by_line[i].sort(); - } else { - self.multi_line.push(span); - self.multi_line.sort(); - } - } - - /// Notate the pattern string with carets (`^`) pointing at each span - /// location. This only applies to spans that occur within a single line. - fn notate(&self) -> String { - let mut notated = String::new(); - for (i, line) in self.pattern.lines().enumerate() { - if self.line_number_width > 0 { - notated.push_str(&self.left_pad_line_number(i + 1)); - notated.push_str(": "); - } else { - notated.push_str(" "); - } - notated.push_str(line); - notated.push('\n'); - if let Some(notes) = self.notate_line(i) { - notated.push_str(¬es); - notated.push('\n'); - } - } - notated - } - - /// Return notes for the line indexed at `i` (zero-based). If there are no - /// spans for the given line, then `None` is returned. Otherwise, an - /// appropriately space padded string with correctly positioned `^` is - /// returned, accounting for line numbers. - fn notate_line(&self, i: usize) -> Option { - let spans = &self.by_line[i]; - if spans.is_empty() { - return None; - } - let mut notes = String::new(); - for _ in 0..self.line_number_padding() { - notes.push(' '); - } - let mut pos = 0; - for span in spans { - for _ in pos..(span.start.column - 1) { - notes.push(' '); - pos += 1; - } - let note_len = span.end.column.saturating_sub(span.start.column); - for _ in 0..core::cmp::max(1, note_len) { - notes.push('^'); - pos += 1; - } - } - Some(notes) - } - - /// Left pad the given line number with spaces such that it is aligned with - /// other line numbers. - fn left_pad_line_number(&self, n: usize) -> String { - let n = n.to_string(); - let pad = self.line_number_width.checked_sub(n.len()).unwrap(); - let mut result = repeat_char(' ', pad); - result.push_str(&n); - result - } - - /// Return the line number padding beginning at the start of each line of - /// the pattern. - /// - /// If the pattern is only one line, then this returns a fixed padding - /// for visual indentation. - fn line_number_padding(&self) -> usize { - if self.line_number_width == 0 { - 4 - } else { - 2 + self.line_number_width - } - } -} - -fn repeat_char(c: char, count: usize) -> String { - core::iter::repeat(c).take(count).collect() -} - -#[cfg(test)] -mod tests { - use alloc::string::ToString; - - use crate::ast::parse::Parser; - - fn assert_panic_message(pattern: &str, expected_msg: &str) { - let result = Parser::new().parse(pattern); - match result { - Ok(_) => { - panic!("regex should not have parsed"); - } - Err(err) => { - assert_eq!(err.to_string(), expected_msg.trim()); - } - } - } - - // See: https://github.com/rust-lang/regex/issues/464 - #[test] - fn regression_464() { - let err = Parser::new().parse("a{\n").unwrap_err(); - // This test checks that the error formatter doesn't panic. - assert!(!err.to_string().is_empty()); - } - - // See: https://github.com/rust-lang/regex/issues/545 - #[test] - fn repetition_quantifier_expects_a_valid_decimal() { - assert_panic_message( - r"\\u{[^}]*}", - r#" -regex parse error: - \\u{[^}]*} - ^ -error: repetition quantifier expects a valid decimal -"#, - ); - } -} diff --git a/vendor/regex-syntax/src/hir/interval.rs b/vendor/regex-syntax/src/hir/interval.rs deleted file mode 100644 index d507ee724d3918..00000000000000 --- a/vendor/regex-syntax/src/hir/interval.rs +++ /dev/null @@ -1,564 +0,0 @@ -use core::{char, cmp, fmt::Debug, slice}; - -use alloc::vec::Vec; - -use crate::unicode; - -// This module contains an *internal* implementation of interval sets. -// -// The primary invariant that interval sets guards is canonical ordering. That -// is, every interval set contains an ordered sequence of intervals where -// no two intervals are overlapping or adjacent. While this invariant is -// occasionally broken within the implementation, it should be impossible for -// callers to observe it. -// -// Since case folding (as implemented below) breaks that invariant, we roll -// that into this API even though it is a little out of place in an otherwise -// generic interval set. (Hence the reason why the `unicode` module is imported -// here.) -// -// Some of the implementation complexity here is a result of me wanting to -// preserve the sequential representation without using additional memory. -// In many cases, we do use linear extra memory, but it is at most 2x and it -// is amortized. If we relaxed the memory requirements, this implementation -// could become much simpler. The extra memory is honestly probably OK, but -// character classes (especially of the Unicode variety) can become quite -// large, and it would be nice to keep regex compilation snappy even in debug -// builds. (In the past, I have been careless with this area of code and it has -// caused slow regex compilations in debug mode, so this isn't entirely -// unwarranted.) -// -// Tests on this are relegated to the public API of HIR in src/hir.rs. - -#[derive(Clone, Debug)] -pub struct IntervalSet { - /// A sorted set of non-overlapping ranges. - ranges: Vec, - /// While not required at all for correctness, we keep track of whether an - /// interval set has been case folded or not. This helps us avoid doing - /// redundant work if, for example, a set has already been cased folded. - /// And note that whether a set is folded or not is preserved through - /// all of the pairwise set operations. That is, if both interval sets - /// have been case folded, then any of difference, union, intersection or - /// symmetric difference all produce a case folded set. - /// - /// Note that when this is true, it *must* be the case that the set is case - /// folded. But when it's false, the set *may* be case folded. In other - /// words, we only set this to true when we know it to be case, but we're - /// okay with it being false if it would otherwise be costly to determine - /// whether it should be true. This means code cannot assume that a false - /// value necessarily indicates that the set is not case folded. - /// - /// Bottom line: this is a performance optimization. - folded: bool, -} - -impl Eq for IntervalSet {} - -// We implement PartialEq manually so that we don't consider the set's internal -// 'folded' property to be part of its identity. The 'folded' property is -// strictly an optimization. -impl PartialEq for IntervalSet { - fn eq(&self, other: &IntervalSet) -> bool { - self.ranges.eq(&other.ranges) - } -} - -impl IntervalSet { - /// Create a new set from a sequence of intervals. Each interval is - /// specified as a pair of bounds, where both bounds are inclusive. - /// - /// The given ranges do not need to be in any specific order, and ranges - /// may overlap. - pub fn new>(intervals: T) -> IntervalSet { - let ranges: Vec = intervals.into_iter().collect(); - // An empty set is case folded. - let folded = ranges.is_empty(); - let mut set = IntervalSet { ranges, folded }; - set.canonicalize(); - set - } - - /// Add a new interval to this set. - pub fn push(&mut self, interval: I) { - // TODO: This could be faster. e.g., Push the interval such that - // it preserves canonicalization. - self.ranges.push(interval); - self.canonicalize(); - // We don't know whether the new interval added here is considered - // case folded, so we conservatively assume that the entire set is - // no longer case folded if it was previously. - self.folded = false; - } - - /// Return an iterator over all intervals in this set. - /// - /// The iterator yields intervals in ascending order. - pub fn iter(&self) -> IntervalSetIter<'_, I> { - IntervalSetIter(self.ranges.iter()) - } - - /// Return an immutable slice of intervals in this set. - /// - /// The sequence returned is in canonical ordering. - pub fn intervals(&self) -> &[I] { - &self.ranges - } - - /// Expand this interval set such that it contains all case folded - /// characters. For example, if this class consists of the range `a-z`, - /// then applying case folding will result in the class containing both the - /// ranges `a-z` and `A-Z`. - /// - /// This returns an error if the necessary case mapping data is not - /// available. - pub fn case_fold_simple(&mut self) -> Result<(), unicode::CaseFoldError> { - if self.folded { - return Ok(()); - } - let len = self.ranges.len(); - for i in 0..len { - let range = self.ranges[i]; - if let Err(err) = range.case_fold_simple(&mut self.ranges) { - self.canonicalize(); - return Err(err); - } - } - self.canonicalize(); - self.folded = true; - Ok(()) - } - - /// Union this set with the given set, in place. - pub fn union(&mut self, other: &IntervalSet) { - if other.ranges.is_empty() || self.ranges == other.ranges { - return; - } - // This could almost certainly be done more efficiently. - self.ranges.extend(&other.ranges); - self.canonicalize(); - self.folded = self.folded && other.folded; - } - - /// Intersect this set with the given set, in place. - pub fn intersect(&mut self, other: &IntervalSet) { - if self.ranges.is_empty() { - return; - } - if other.ranges.is_empty() { - self.ranges.clear(); - // An empty set is case folded. - self.folded = true; - return; - } - - // There should be a way to do this in-place with constant memory, - // but I couldn't figure out a simple way to do it. So just append - // the intersection to the end of this range, and then drain it before - // we're done. - let drain_end = self.ranges.len(); - - let mut ita = 0..drain_end; - let mut itb = 0..other.ranges.len(); - let mut a = ita.next().unwrap(); - let mut b = itb.next().unwrap(); - loop { - if let Some(ab) = self.ranges[a].intersect(&other.ranges[b]) { - self.ranges.push(ab); - } - let (it, aorb) = - if self.ranges[a].upper() < other.ranges[b].upper() { - (&mut ita, &mut a) - } else { - (&mut itb, &mut b) - }; - match it.next() { - Some(v) => *aorb = v, - None => break, - } - } - self.ranges.drain(..drain_end); - self.folded = self.folded && other.folded; - } - - /// Subtract the given set from this set, in place. - pub fn difference(&mut self, other: &IntervalSet) { - if self.ranges.is_empty() || other.ranges.is_empty() { - return; - } - - // This algorithm is (to me) surprisingly complex. A search of the - // interwebs indicate that this is a potentially interesting problem. - // Folks seem to suggest interval or segment trees, but I'd like to - // avoid the overhead (both runtime and conceptual) of that. - // - // The following is basically my Shitty First Draft. Therefore, in - // order to grok it, you probably need to read each line carefully. - // Simplifications are most welcome! - // - // Remember, we can assume the canonical format invariant here, which - // says that all ranges are sorted, not overlapping and not adjacent in - // each class. - let drain_end = self.ranges.len(); - let (mut a, mut b) = (0, 0); - 'LOOP: while a < drain_end && b < other.ranges.len() { - // Basically, the easy cases are when neither range overlaps with - // each other. If the `b` range is less than our current `a` - // range, then we can skip it and move on. - if other.ranges[b].upper() < self.ranges[a].lower() { - b += 1; - continue; - } - // ... similarly for the `a` range. If it's less than the smallest - // `b` range, then we can add it as-is. - if self.ranges[a].upper() < other.ranges[b].lower() { - let range = self.ranges[a]; - self.ranges.push(range); - a += 1; - continue; - } - // Otherwise, we have overlapping ranges. - assert!(!self.ranges[a].is_intersection_empty(&other.ranges[b])); - - // This part is tricky and was non-obvious to me without looking - // at explicit examples (see the tests). The trickiness stems from - // two things: 1) subtracting a range from another range could - // yield two ranges and 2) after subtracting a range, it's possible - // that future ranges can have an impact. The loop below advances - // the `b` ranges until they can't possible impact the current - // range. - // - // For example, if our `a` range is `a-t` and our next three `b` - // ranges are `a-c`, `g-i`, `r-t` and `x-z`, then we need to apply - // subtraction three times before moving on to the next `a` range. - let mut range = self.ranges[a]; - while b < other.ranges.len() - && !range.is_intersection_empty(&other.ranges[b]) - { - let old_range = range; - range = match range.difference(&other.ranges[b]) { - (None, None) => { - // We lost the entire range, so move on to the next - // without adding this one. - a += 1; - continue 'LOOP; - } - (Some(range1), None) | (None, Some(range1)) => range1, - (Some(range1), Some(range2)) => { - self.ranges.push(range1); - range2 - } - }; - // It's possible that the `b` range has more to contribute - // here. In particular, if it is greater than the original - // range, then it might impact the next `a` range *and* it - // has impacted the current `a` range as much as possible, - // so we can quit. We don't bump `b` so that the next `a` - // range can apply it. - if other.ranges[b].upper() > old_range.upper() { - break; - } - // Otherwise, the next `b` range might apply to the current - // `a` range. - b += 1; - } - self.ranges.push(range); - a += 1; - } - while a < drain_end { - let range = self.ranges[a]; - self.ranges.push(range); - a += 1; - } - self.ranges.drain(..drain_end); - self.folded = self.folded && other.folded; - } - - /// Compute the symmetric difference of the two sets, in place. - /// - /// This computes the symmetric difference of two interval sets. This - /// removes all elements in this set that are also in the given set, - /// but also adds all elements from the given set that aren't in this - /// set. That is, the set will contain all elements in either set, - /// but will not contain any elements that are in both sets. - pub fn symmetric_difference(&mut self, other: &IntervalSet) { - // TODO(burntsushi): Fix this so that it amortizes allocation. - let mut intersection = self.clone(); - intersection.intersect(other); - self.union(other); - self.difference(&intersection); - } - - /// Negate this interval set. - /// - /// For all `x` where `x` is any element, if `x` was in this set, then it - /// will not be in this set after negation. - pub fn negate(&mut self) { - if self.ranges.is_empty() { - let (min, max) = (I::Bound::min_value(), I::Bound::max_value()); - self.ranges.push(I::create(min, max)); - // The set containing everything must case folded. - self.folded = true; - return; - } - - // There should be a way to do this in-place with constant memory, - // but I couldn't figure out a simple way to do it. So just append - // the negation to the end of this range, and then drain it before - // we're done. - let drain_end = self.ranges.len(); - - // We do checked arithmetic below because of the canonical ordering - // invariant. - if self.ranges[0].lower() > I::Bound::min_value() { - let upper = self.ranges[0].lower().decrement(); - self.ranges.push(I::create(I::Bound::min_value(), upper)); - } - for i in 1..drain_end { - let lower = self.ranges[i - 1].upper().increment(); - let upper = self.ranges[i].lower().decrement(); - self.ranges.push(I::create(lower, upper)); - } - if self.ranges[drain_end - 1].upper() < I::Bound::max_value() { - let lower = self.ranges[drain_end - 1].upper().increment(); - self.ranges.push(I::create(lower, I::Bound::max_value())); - } - self.ranges.drain(..drain_end); - // We don't need to update whether this set is folded or not, because - // it is conservatively preserved through negation. Namely, if a set - // is not folded, then it is possible that its negation is folded, for - // example, [^☃]. But we're fine with assuming that the set is not - // folded in that case. (`folded` permits false negatives but not false - // positives.) - // - // But what about when a set is folded, is its negation also - // necessarily folded? Yes. Because if a set is folded, then for every - // character in the set, it necessarily included its equivalence class - // of case folded characters. Negating it in turn means that all - // equivalence classes in the set are negated, and any equivalence - // class that was previously not in the set is now entirely in the set. - } - - /// Converts this set into a canonical ordering. - fn canonicalize(&mut self) { - if self.is_canonical() { - return; - } - self.ranges.sort(); - assert!(!self.ranges.is_empty()); - - // Is there a way to do this in-place with constant memory? I couldn't - // figure out a way to do it. So just append the canonicalization to - // the end of this range, and then drain it before we're done. - let drain_end = self.ranges.len(); - for oldi in 0..drain_end { - // If we've added at least one new range, then check if we can - // merge this range in the previously added range. - if self.ranges.len() > drain_end { - let (last, rest) = self.ranges.split_last_mut().unwrap(); - if let Some(union) = last.union(&rest[oldi]) { - *last = union; - continue; - } - } - let range = self.ranges[oldi]; - self.ranges.push(range); - } - self.ranges.drain(..drain_end); - } - - /// Returns true if and only if this class is in a canonical ordering. - fn is_canonical(&self) -> bool { - for pair in self.ranges.windows(2) { - if pair[0] >= pair[1] { - return false; - } - if pair[0].is_contiguous(&pair[1]) { - return false; - } - } - true - } -} - -/// An iterator over intervals. -#[derive(Debug)] -pub struct IntervalSetIter<'a, I>(slice::Iter<'a, I>); - -impl<'a, I> Iterator for IntervalSetIter<'a, I> { - type Item = &'a I; - - fn next(&mut self) -> Option<&'a I> { - self.0.next() - } -} - -pub trait Interval: - Clone + Copy + Debug + Default + Eq + PartialEq + PartialOrd + Ord -{ - type Bound: Bound; - - fn lower(&self) -> Self::Bound; - fn upper(&self) -> Self::Bound; - fn set_lower(&mut self, bound: Self::Bound); - fn set_upper(&mut self, bound: Self::Bound); - fn case_fold_simple( - &self, - intervals: &mut Vec, - ) -> Result<(), unicode::CaseFoldError>; - - /// Create a new interval. - fn create(lower: Self::Bound, upper: Self::Bound) -> Self { - let mut int = Self::default(); - if lower <= upper { - int.set_lower(lower); - int.set_upper(upper); - } else { - int.set_lower(upper); - int.set_upper(lower); - } - int - } - - /// Union the given overlapping range into this range. - /// - /// If the two ranges aren't contiguous, then this returns `None`. - fn union(&self, other: &Self) -> Option { - if !self.is_contiguous(other) { - return None; - } - let lower = cmp::min(self.lower(), other.lower()); - let upper = cmp::max(self.upper(), other.upper()); - Some(Self::create(lower, upper)) - } - - /// Intersect this range with the given range and return the result. - /// - /// If the intersection is empty, then this returns `None`. - fn intersect(&self, other: &Self) -> Option { - let lower = cmp::max(self.lower(), other.lower()); - let upper = cmp::min(self.upper(), other.upper()); - if lower <= upper { - Some(Self::create(lower, upper)) - } else { - None - } - } - - /// Subtract the given range from this range and return the resulting - /// ranges. - /// - /// If subtraction would result in an empty range, then no ranges are - /// returned. - fn difference(&self, other: &Self) -> (Option, Option) { - if self.is_subset(other) { - return (None, None); - } - if self.is_intersection_empty(other) { - return (Some(self.clone()), None); - } - let add_lower = other.lower() > self.lower(); - let add_upper = other.upper() < self.upper(); - // We know this because !self.is_subset(other) and the ranges have - // a non-empty intersection. - assert!(add_lower || add_upper); - let mut ret = (None, None); - if add_lower { - let upper = other.lower().decrement(); - ret.0 = Some(Self::create(self.lower(), upper)); - } - if add_upper { - let lower = other.upper().increment(); - let range = Self::create(lower, self.upper()); - if ret.0.is_none() { - ret.0 = Some(range); - } else { - ret.1 = Some(range); - } - } - ret - } - - /// Returns true if and only if the two ranges are contiguous. Two ranges - /// are contiguous if and only if the ranges are either overlapping or - /// adjacent. - fn is_contiguous(&self, other: &Self) -> bool { - let lower1 = self.lower().as_u32(); - let upper1 = self.upper().as_u32(); - let lower2 = other.lower().as_u32(); - let upper2 = other.upper().as_u32(); - cmp::max(lower1, lower2) <= cmp::min(upper1, upper2).saturating_add(1) - } - - /// Returns true if and only if the intersection of this range and the - /// other range is empty. - fn is_intersection_empty(&self, other: &Self) -> bool { - let (lower1, upper1) = (self.lower(), self.upper()); - let (lower2, upper2) = (other.lower(), other.upper()); - cmp::max(lower1, lower2) > cmp::min(upper1, upper2) - } - - /// Returns true if and only if this range is a subset of the other range. - fn is_subset(&self, other: &Self) -> bool { - let (lower1, upper1) = (self.lower(), self.upper()); - let (lower2, upper2) = (other.lower(), other.upper()); - (lower2 <= lower1 && lower1 <= upper2) - && (lower2 <= upper1 && upper1 <= upper2) - } -} - -pub trait Bound: - Copy + Clone + Debug + Eq + PartialEq + PartialOrd + Ord -{ - fn min_value() -> Self; - fn max_value() -> Self; - fn as_u32(self) -> u32; - fn increment(self) -> Self; - fn decrement(self) -> Self; -} - -impl Bound for u8 { - fn min_value() -> Self { - u8::MIN - } - fn max_value() -> Self { - u8::MAX - } - fn as_u32(self) -> u32 { - u32::from(self) - } - fn increment(self) -> Self { - self.checked_add(1).unwrap() - } - fn decrement(self) -> Self { - self.checked_sub(1).unwrap() - } -} - -impl Bound for char { - fn min_value() -> Self { - '\x00' - } - fn max_value() -> Self { - '\u{10FFFF}' - } - fn as_u32(self) -> u32 { - u32::from(self) - } - - fn increment(self) -> Self { - match self { - '\u{D7FF}' => '\u{E000}', - c => char::from_u32(u32::from(c).checked_add(1).unwrap()).unwrap(), - } - } - - fn decrement(self) -> Self { - match self { - '\u{E000}' => '\u{D7FF}', - c => char::from_u32(u32::from(c).checked_sub(1).unwrap()).unwrap(), - } - } -} - -// Tests for interval sets are written in src/hir.rs against the public API. diff --git a/vendor/regex-syntax/src/hir/literal.rs b/vendor/regex-syntax/src/hir/literal.rs deleted file mode 100644 index 2a6350e64663ce..00000000000000 --- a/vendor/regex-syntax/src/hir/literal.rs +++ /dev/null @@ -1,3214 +0,0 @@ -/*! -Provides literal extraction from `Hir` expressions. - -An [`Extractor`] pulls literals out of [`Hir`] expressions and returns a -[`Seq`] of [`Literal`]s. - -The purpose of literal extraction is generally to provide avenues for -optimizing regex searches. The main idea is that substring searches can be an -order of magnitude faster than a regex search. Therefore, if one can execute -a substring search to find candidate match locations and only run the regex -search at those locations, then it is possible for huge improvements in -performance to be realized. - -With that said, literal optimizations are generally a black art because even -though substring search is generally faster, if the number of candidates -produced is high, then it can create a lot of overhead by ping-ponging between -the substring search and the regex search. - -Here are some heuristics that might be used to help increase the chances of -effective literal optimizations: - -* Stick to small [`Seq`]s. If you search for too many literals, it's likely -to lead to substring search that is only a little faster than a regex search, -and thus the overhead of using literal optimizations in the first place might -make things slower overall. -* The literals in your [`Seq`] shouldn't be too short. In general, longer is -better. A sequence corresponding to single bytes that occur frequently in the -haystack, for example, is probably a bad literal optimization because it's -likely to produce many false positive candidates. Longer literals are less -likely to match, and thus probably produce fewer false positives. -* If it's possible to estimate the approximate frequency of each byte according -to some pre-computed background distribution, it is possible to compute a score -of how "good" a `Seq` is. If a `Seq` isn't good enough, you might consider -skipping the literal optimization and just use the regex engine. - -(It should be noted that there are always pathological cases that can make -any kind of literal optimization be a net slower result. This is why it -might be a good idea to be conservative, or to even provide a means for -literal optimizations to be dynamically disabled if they are determined to be -ineffective according to some measure.) - -You're encouraged to explore the methods on [`Seq`], which permit shrinking -the size of sequences in a preference-order preserving fashion. - -Finally, note that it isn't strictly necessary to use an [`Extractor`]. Namely, -an `Extractor` only uses public APIs of the [`Seq`] and [`Literal`] types, -so it is possible to implement your own extractor. For example, for n-grams -or "inner" literals (i.e., not prefix or suffix literals). The `Extractor` -is mostly responsible for the case analysis over `Hir` expressions. Much of -the "trickier" parts are how to combine literal sequences, and that is all -implemented on [`Seq`]. -*/ - -use core::{cmp, mem, num::NonZeroUsize}; - -use alloc::{vec, vec::Vec}; - -use crate::hir::{self, Hir}; - -/// Extracts prefix or suffix literal sequences from [`Hir`] expressions. -/// -/// Literal extraction is based on the following observations: -/// -/// * Many regexes start with one or a small number of literals. -/// * Substring search for literals is often much faster (sometimes by an order -/// of magnitude) than a regex search. -/// -/// Thus, in many cases, one can search for literals to find candidate starting -/// locations of a match, and then only run the full regex engine at each such -/// location instead of over the full haystack. -/// -/// The main downside of literal extraction is that it can wind up causing a -/// search to be slower overall. For example, if there are many matches or if -/// there are many candidates that don't ultimately lead to a match, then a -/// lot of overhead will be spent in shuffling back-and-forth between substring -/// search and the regex engine. This is the fundamental reason why literal -/// optimizations for regex patterns is sometimes considered a "black art." -/// -/// # Look-around assertions -/// -/// Literal extraction treats all look-around assertions as-if they match every -/// empty string. So for example, the regex `\bquux\b` will yield a sequence -/// containing a single exact literal `quux`. However, not all occurrences -/// of `quux` correspond to a match a of the regex. For example, `\bquux\b` -/// does not match `ZquuxZ` anywhere because `quux` does not fall on a word -/// boundary. -/// -/// In effect, if your regex contains look-around assertions, then a match of -/// an exact literal does not necessarily mean the regex overall matches. So -/// you may still need to run the regex engine in such cases to confirm the -/// match. -/// -/// The precise guarantee you get from a literal sequence is: if every literal -/// in the sequence is exact and the original regex contains zero look-around -/// assertions, then a preference-order multi-substring search of those -/// literals will precisely match a preference-order search of the original -/// regex. -/// -/// # Example -/// -/// This shows how to extract prefixes: -/// -/// ``` -/// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; -/// -/// let hir = parse(r"(a|b|c)(x|y|z)[A-Z]+foo")?; -/// let got = Extractor::new().extract(&hir); -/// // All literals returned are "inexact" because none of them reach the -/// // match state. -/// let expected = Seq::from_iter([ -/// Literal::inexact("ax"), -/// Literal::inexact("ay"), -/// Literal::inexact("az"), -/// Literal::inexact("bx"), -/// Literal::inexact("by"), -/// Literal::inexact("bz"), -/// Literal::inexact("cx"), -/// Literal::inexact("cy"), -/// Literal::inexact("cz"), -/// ]); -/// assert_eq!(expected, got); -/// -/// # Ok::<(), Box>(()) -/// ``` -/// -/// This shows how to extract suffixes: -/// -/// ``` -/// use regex_syntax::{ -/// hir::literal::{Extractor, ExtractKind, Literal, Seq}, -/// parse, -/// }; -/// -/// let hir = parse(r"foo|[A-Z]+bar")?; -/// let got = Extractor::new().kind(ExtractKind::Suffix).extract(&hir); -/// // Since 'foo' gets to a match state, it is considered exact. But 'bar' -/// // does not because of the '[A-Z]+', and thus is marked inexact. -/// let expected = Seq::from_iter([ -/// Literal::exact("foo"), -/// Literal::inexact("bar"), -/// ]); -/// assert_eq!(expected, got); -/// -/// # Ok::<(), Box>(()) -/// ``` -#[derive(Clone, Debug)] -pub struct Extractor { - kind: ExtractKind, - limit_class: usize, - limit_repeat: usize, - limit_literal_len: usize, - limit_total: usize, -} - -impl Extractor { - /// Create a new extractor with a default configuration. - /// - /// The extractor can be optionally configured before calling - /// [`Extractor::extract`] to get a literal sequence. - pub fn new() -> Extractor { - Extractor { - kind: ExtractKind::Prefix, - limit_class: 10, - limit_repeat: 10, - limit_literal_len: 100, - limit_total: 250, - } - } - - /// Execute the extractor and return a sequence of literals. - pub fn extract(&self, hir: &Hir) -> Seq { - use crate::hir::HirKind::*; - - match *hir.kind() { - Empty | Look(_) => Seq::singleton(self::Literal::exact(vec![])), - Literal(hir::Literal(ref bytes)) => { - let mut seq = - Seq::singleton(self::Literal::exact(bytes.to_vec())); - self.enforce_literal_len(&mut seq); - seq - } - Class(hir::Class::Unicode(ref cls)) => { - self.extract_class_unicode(cls) - } - Class(hir::Class::Bytes(ref cls)) => self.extract_class_bytes(cls), - Repetition(ref rep) => self.extract_repetition(rep), - Capture(hir::Capture { ref sub, .. }) => self.extract(sub), - Concat(ref hirs) => match self.kind { - ExtractKind::Prefix => self.extract_concat(hirs.iter()), - ExtractKind::Suffix => self.extract_concat(hirs.iter().rev()), - }, - Alternation(ref hirs) => { - // Unlike concat, we always union starting from the beginning, - // since the beginning corresponds to the highest preference, - // which doesn't change based on forwards vs reverse. - self.extract_alternation(hirs.iter()) - } - } - } - - /// Set the kind of literal sequence to extract from an [`Hir`] expression. - /// - /// The default is to extract prefixes, but suffixes can be selected - /// instead. The contract for prefixes is that every match of the - /// corresponding `Hir` must start with one of the literals in the sequence - /// returned. Moreover, the _order_ of the sequence returned corresponds to - /// the preference order. - /// - /// Suffixes satisfy a similar contract in that every match of the - /// corresponding `Hir` must end with one of the literals in the sequence - /// returned. However, there is no guarantee that the literals are in - /// preference order. - /// - /// Remember that a sequence can be infinite. For example, unless the - /// limits are configured to be impractically large, attempting to extract - /// prefixes (or suffixes) for the pattern `[A-Z]` will return an infinite - /// sequence. Generally speaking, if the sequence returned is infinite, - /// then it is presumed to be unwise to do prefix (or suffix) optimizations - /// for the pattern. - pub fn kind(&mut self, kind: ExtractKind) -> &mut Extractor { - self.kind = kind; - self - } - - /// Configure a limit on the length of the sequence that is permitted for - /// a character class. If a character class exceeds this limit, then the - /// sequence returned for it is infinite. - /// - /// This prevents classes like `[A-Z]` or `\pL` from getting turned into - /// huge and likely unproductive sequences of literals. - /// - /// # Example - /// - /// This example shows how this limit can be lowered to decrease the tolerance - /// for character classes being turned into literal sequences. - /// - /// ``` - /// use regex_syntax::{hir::literal::{Extractor, Seq}, parse}; - /// - /// let hir = parse(r"[0-9]")?; - /// - /// let got = Extractor::new().extract(&hir); - /// let expected = Seq::new([ - /// "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", - /// ]); - /// assert_eq!(expected, got); - /// - /// // Now let's shrink the limit and see how that changes things. - /// let got = Extractor::new().limit_class(4).extract(&hir); - /// let expected = Seq::infinite(); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn limit_class(&mut self, limit: usize) -> &mut Extractor { - self.limit_class = limit; - self - } - - /// Configure a limit on the total number of repetitions that is permitted - /// before literal extraction is stopped. - /// - /// This is useful for limiting things like `(abcde){50}`, or more - /// insidiously, `(?:){1000000000}`. This limit prevents any one single - /// repetition from adding too much to a literal sequence. - /// - /// With this limit set, repetitions that exceed it will be stopped and any - /// literals extracted up to that point will be made inexact. - /// - /// # Example - /// - /// This shows how to decrease the limit and compares it with the default. - /// - /// ``` - /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; - /// - /// let hir = parse(r"(abc){8}")?; - /// - /// let got = Extractor::new().extract(&hir); - /// let expected = Seq::new(["abcabcabcabcabcabcabcabc"]); - /// assert_eq!(expected, got); - /// - /// // Now let's shrink the limit and see how that changes things. - /// let got = Extractor::new().limit_repeat(4).extract(&hir); - /// let expected = Seq::from_iter([ - /// Literal::inexact("abcabcabcabc"), - /// ]); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn limit_repeat(&mut self, limit: usize) -> &mut Extractor { - self.limit_repeat = limit; - self - } - - /// Configure a limit on the maximum length of any literal in a sequence. - /// - /// This is useful for limiting things like `(abcde){5}{5}{5}{5}`. While - /// each repetition or literal in that regex is small, when all the - /// repetitions are applied, one ends up with a literal of length `5^4 = - /// 625`. - /// - /// With this limit set, literals that exceed it will be made inexact and - /// thus prevented from growing. - /// - /// # Example - /// - /// This shows how to decrease the limit and compares it with the default. - /// - /// ``` - /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; - /// - /// let hir = parse(r"(abc){2}{2}{2}")?; - /// - /// let got = Extractor::new().extract(&hir); - /// let expected = Seq::new(["abcabcabcabcabcabcabcabc"]); - /// assert_eq!(expected, got); - /// - /// // Now let's shrink the limit and see how that changes things. - /// let got = Extractor::new().limit_literal_len(14).extract(&hir); - /// let expected = Seq::from_iter([ - /// Literal::inexact("abcabcabcabcab"), - /// ]); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn limit_literal_len(&mut self, limit: usize) -> &mut Extractor { - self.limit_literal_len = limit; - self - } - - /// Configure a limit on the total number of literals that will be - /// returned. - /// - /// This is useful as a practical measure for avoiding the creation of - /// large sequences of literals. While the extractor will automatically - /// handle local creations of large sequences (for example, `[A-Z]` yields - /// an infinite sequence by default), large sequences can be created - /// through non-local means as well. - /// - /// For example, `[ab]{3}{3}` would yield a sequence of length `512 = 2^9` - /// despite each of the repetitions being small on their own. This limit - /// thus represents a "catch all" for avoiding locally small sequences from - /// combining into large sequences. - /// - /// # Example - /// - /// This example shows how reducing the limit will change the literal - /// sequence returned. - /// - /// ``` - /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; - /// - /// let hir = parse(r"[ab]{2}{2}")?; - /// - /// let got = Extractor::new().extract(&hir); - /// let expected = Seq::new([ - /// "aaaa", "aaab", "aaba", "aabb", - /// "abaa", "abab", "abba", "abbb", - /// "baaa", "baab", "baba", "babb", - /// "bbaa", "bbab", "bbba", "bbbb", - /// ]); - /// assert_eq!(expected, got); - /// - /// // The default limit is not too big, but big enough to extract all - /// // literals from '[ab]{2}{2}'. If we shrink the limit to less than 16, - /// // then we'll get a truncated set. Notice that it returns a sequence of - /// // length 4 even though our limit was 10. This is because the sequence - /// // is difficult to increase without blowing the limit. Notice also - /// // that every literal in the sequence is now inexact because they were - /// // stripped of some suffix. - /// let got = Extractor::new().limit_total(10).extract(&hir); - /// let expected = Seq::from_iter([ - /// Literal::inexact("aa"), - /// Literal::inexact("ab"), - /// Literal::inexact("ba"), - /// Literal::inexact("bb"), - /// ]); - /// assert_eq!(expected, got); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn limit_total(&mut self, limit: usize) -> &mut Extractor { - self.limit_total = limit; - self - } - - /// Extract a sequence from the given concatenation. Sequences from each of - /// the child HIR expressions are combined via cross product. - /// - /// This short circuits once the cross product turns into a sequence - /// containing only inexact literals. - fn extract_concat<'a, I: Iterator>(&self, it: I) -> Seq { - let mut seq = Seq::singleton(self::Literal::exact(vec![])); - for hir in it { - // If every element in the sequence is inexact, then a cross - // product will always be a no-op. Thus, there is nothing else we - // can add to it and can quit early. Note that this also includes - // infinite sequences. - if seq.is_inexact() { - break; - } - // Note that 'cross' also dispatches based on whether we're - // extracting prefixes or suffixes. - seq = self.cross(seq, &mut self.extract(hir)); - } - seq - } - - /// Extract a sequence from the given alternation. - /// - /// This short circuits once the union turns into an infinite sequence. - fn extract_alternation<'a, I: Iterator>( - &self, - it: I, - ) -> Seq { - let mut seq = Seq::empty(); - for hir in it { - // Once our 'seq' is infinite, every subsequent union - // operation on it will itself always result in an - // infinite sequence. Thus, it can never change and we can - // short-circuit. - if !seq.is_finite() { - break; - } - seq = self.union(seq, &mut self.extract(hir)); - } - seq - } - - /// Extract a sequence of literals from the given repetition. We do our - /// best, Some examples: - /// - /// 'a*' => [inexact(a), exact("")] - /// 'a*?' => [exact(""), inexact(a)] - /// 'a+' => [inexact(a)] - /// 'a{3}' => [exact(aaa)] - /// 'a{3,5} => [inexact(aaa)] - /// - /// The key here really is making sure we get the 'inexact' vs 'exact' - /// attributes correct on each of the literals we add. For example, the - /// fact that 'a*' gives us an inexact 'a' and an exact empty string means - /// that a regex like 'ab*c' will result in [inexact(ab), exact(ac)] - /// literals being extracted, which might actually be a better prefilter - /// than just 'a'. - fn extract_repetition(&self, rep: &hir::Repetition) -> Seq { - let mut subseq = self.extract(&rep.sub); - match *rep { - hir::Repetition { min: 0, max, greedy, .. } => { - // When 'max=1', we can retain exactness, since 'a?' is - // equivalent to 'a|'. Similarly below, 'a??' is equivalent to - // '|a'. - if max != Some(1) { - subseq.make_inexact(); - } - let mut empty = Seq::singleton(Literal::exact(vec![])); - if !greedy { - mem::swap(&mut subseq, &mut empty); - } - self.union(subseq, &mut empty) - } - hir::Repetition { min, max: Some(max), .. } if min == max => { - assert!(min > 0); // handled above - let limit = - u32::try_from(self.limit_repeat).unwrap_or(u32::MAX); - let mut seq = Seq::singleton(Literal::exact(vec![])); - for _ in 0..cmp::min(min, limit) { - if seq.is_inexact() { - break; - } - seq = self.cross(seq, &mut subseq.clone()); - } - if usize::try_from(min).is_err() || min > limit { - seq.make_inexact(); - } - seq - } - hir::Repetition { min, .. } => { - assert!(min > 0); // handled above - let limit = - u32::try_from(self.limit_repeat).unwrap_or(u32::MAX); - let mut seq = Seq::singleton(Literal::exact(vec![])); - for _ in 0..cmp::min(min, limit) { - if seq.is_inexact() { - break; - } - seq = self.cross(seq, &mut subseq.clone()); - } - seq.make_inexact(); - seq - } - } - } - - /// Convert the given Unicode class into a sequence of literals if the - /// class is small enough. If the class is too big, return an infinite - /// sequence. - fn extract_class_unicode(&self, cls: &hir::ClassUnicode) -> Seq { - if self.class_over_limit_unicode(cls) { - return Seq::infinite(); - } - let mut seq = Seq::empty(); - for r in cls.iter() { - for ch in r.start()..=r.end() { - seq.push(Literal::from(ch)); - } - } - self.enforce_literal_len(&mut seq); - seq - } - - /// Convert the given byte class into a sequence of literals if the class - /// is small enough. If the class is too big, return an infinite sequence. - fn extract_class_bytes(&self, cls: &hir::ClassBytes) -> Seq { - if self.class_over_limit_bytes(cls) { - return Seq::infinite(); - } - let mut seq = Seq::empty(); - for r in cls.iter() { - for b in r.start()..=r.end() { - seq.push(Literal::from(b)); - } - } - self.enforce_literal_len(&mut seq); - seq - } - - /// Returns true if the given Unicode class exceeds the configured limits - /// on this extractor. - fn class_over_limit_unicode(&self, cls: &hir::ClassUnicode) -> bool { - let mut count = 0; - for r in cls.iter() { - if count > self.limit_class { - return true; - } - count += r.len(); - } - count > self.limit_class - } - - /// Returns true if the given byte class exceeds the configured limits on - /// this extractor. - fn class_over_limit_bytes(&self, cls: &hir::ClassBytes) -> bool { - let mut count = 0; - for r in cls.iter() { - if count > self.limit_class { - return true; - } - count += r.len(); - } - count > self.limit_class - } - - /// Compute the cross product of the two sequences if the result would be - /// within configured limits. Otherwise, make `seq2` infinite and cross the - /// infinite sequence with `seq1`. - fn cross(&self, mut seq1: Seq, seq2: &mut Seq) -> Seq { - if seq1.max_cross_len(seq2).map_or(false, |len| len > self.limit_total) - { - seq2.make_infinite(); - } - if let ExtractKind::Suffix = self.kind { - seq1.cross_reverse(seq2); - } else { - seq1.cross_forward(seq2); - } - assert!(seq1.len().map_or(true, |x| x <= self.limit_total)); - self.enforce_literal_len(&mut seq1); - seq1 - } - - /// Union the two sequences if the result would be within configured - /// limits. Otherwise, make `seq2` infinite and union the infinite sequence - /// with `seq1`. - fn union(&self, mut seq1: Seq, seq2: &mut Seq) -> Seq { - if seq1.max_union_len(seq2).map_or(false, |len| len > self.limit_total) - { - // We try to trim our literal sequences to see if we can make - // room for more literals. The idea is that we'd rather trim down - // literals already in our sequence if it means we can add a few - // more and retain a finite sequence. Otherwise, we'll union with - // an infinite sequence and that infects everything and effectively - // stops literal extraction in its tracks. - // - // We do we keep 4 bytes here? Well, it's a bit of an abstraction - // leakage. Downstream, the literals may wind up getting fed to - // the Teddy algorithm, which supports searching literals up to - // length 4. So that's why we pick that number here. Arguably this - // should be a tunable parameter, but it seems a little tricky to - // describe. And I'm still unsure if this is the right way to go - // about culling literal sequences. - match self.kind { - ExtractKind::Prefix => { - seq1.keep_first_bytes(4); - seq2.keep_first_bytes(4); - } - ExtractKind::Suffix => { - seq1.keep_last_bytes(4); - seq2.keep_last_bytes(4); - } - } - seq1.dedup(); - seq2.dedup(); - if seq1 - .max_union_len(seq2) - .map_or(false, |len| len > self.limit_total) - { - seq2.make_infinite(); - } - } - seq1.union(seq2); - assert!(seq1.len().map_or(true, |x| x <= self.limit_total)); - seq1 - } - - /// Applies the literal length limit to the given sequence. If none of the - /// literals in the sequence exceed the limit, then this is a no-op. - fn enforce_literal_len(&self, seq: &mut Seq) { - let len = self.limit_literal_len; - match self.kind { - ExtractKind::Prefix => seq.keep_first_bytes(len), - ExtractKind::Suffix => seq.keep_last_bytes(len), - } - } -} - -impl Default for Extractor { - fn default() -> Extractor { - Extractor::new() - } -} - -/// The kind of literals to extract from an [`Hir`] expression. -/// -/// The default extraction kind is `Prefix`. -#[non_exhaustive] -#[derive(Clone, Debug)] -pub enum ExtractKind { - /// Extracts only prefix literals from a regex. - Prefix, - /// Extracts only suffix literals from a regex. - /// - /// Note that the sequence returned by suffix literals currently may - /// not correctly represent leftmost-first or "preference" order match - /// semantics. - Suffix, -} - -impl ExtractKind { - /// Returns true if this kind is the `Prefix` variant. - pub fn is_prefix(&self) -> bool { - matches!(*self, ExtractKind::Prefix) - } - - /// Returns true if this kind is the `Suffix` variant. - pub fn is_suffix(&self) -> bool { - matches!(*self, ExtractKind::Suffix) - } -} - -impl Default for ExtractKind { - fn default() -> ExtractKind { - ExtractKind::Prefix - } -} - -/// A sequence of literals. -/// -/// A `Seq` is very much like a set in that it represents a union of its -/// members. That is, it corresponds to a set of literals where at least one -/// must match in order for a particular [`Hir`] expression to match. (Whether -/// this corresponds to the entire `Hir` expression, a prefix of it or a suffix -/// of it depends on how the `Seq` was extracted from the `Hir`.) -/// -/// It is also unlike a set in that multiple identical literals may appear, -/// and that the order of the literals in the `Seq` matters. For example, if -/// the sequence is `[sam, samwise]` and leftmost-first matching is used, then -/// `samwise` can never match and the sequence is equivalent to `[sam]`. -/// -/// # States of a sequence -/// -/// A `Seq` has a few different logical states to consider: -/// -/// * The sequence can represent "any" literal. When this happens, the set does -/// not have a finite size. The purpose of this state is to inhibit callers -/// from making assumptions about what literals are required in order to match -/// a particular [`Hir`] expression. Generally speaking, when a set is in this -/// state, literal optimizations are inhibited. A good example of a regex that -/// will cause this sort of set to appear is `[A-Za-z]`. The character class -/// is just too big (and also too narrow) to be usefully expanded into 52 -/// different literals. (Note that the decision for when a seq should become -/// infinite is determined by the caller. A seq itself has no hard-coded -/// limits.) -/// * The sequence can be empty, in which case, it is an affirmative statement -/// that there are no literals that can match the corresponding `Hir`. -/// Consequently, the `Hir` never matches any input. For example, `[a&&b]`. -/// * The sequence can be non-empty, in which case, at least one of the -/// literals must match in order for the corresponding `Hir` to match. -/// -/// # Example -/// -/// This example shows how literal sequences can be simplified by stripping -/// suffixes and minimizing while maintaining preference order. -/// -/// ``` -/// use regex_syntax::hir::literal::{Literal, Seq}; -/// -/// let mut seq = Seq::new(&[ -/// "farm", -/// "appliance", -/// "faraway", -/// "apple", -/// "fare", -/// "gap", -/// "applicant", -/// "applaud", -/// ]); -/// seq.keep_first_bytes(3); -/// seq.minimize_by_preference(); -/// // Notice that 'far' comes before 'app', which matches the order in the -/// // original sequence. This guarantees that leftmost-first semantics are -/// // not altered by simplifying the set. -/// let expected = Seq::from_iter([ -/// Literal::inexact("far"), -/// Literal::inexact("app"), -/// Literal::exact("gap"), -/// ]); -/// assert_eq!(expected, seq); -/// ``` -#[derive(Clone, Eq, PartialEq)] -pub struct Seq { - /// The members of this seq. - /// - /// When `None`, the seq represents all possible literals. That is, it - /// prevents one from making assumptions about specific literals in the - /// seq, and forces one to treat it as if any literal might be in the seq. - /// - /// Note that `Some(vec![])` is valid and corresponds to the empty seq of - /// literals, i.e., a regex that can never match. For example, `[a&&b]`. - /// It is distinct from `Some(vec![""])`, which corresponds to the seq - /// containing an empty string, which matches at every position. - literals: Option>, -} - -impl Seq { - /// Returns an empty sequence. - /// - /// An empty sequence matches zero literals, and thus corresponds to a - /// regex that itself can never match. - #[inline] - pub fn empty() -> Seq { - Seq { literals: Some(vec![]) } - } - - /// Returns a sequence of literals without a finite size and may contain - /// any literal. - /// - /// A sequence without finite size does not reveal anything about the - /// characteristics of the literals in its set. There are no fixed prefixes - /// or suffixes, nor are lower or upper bounds on the length of the literals - /// in the set known. - /// - /// This is useful to represent constructs in a regex that are "too big" - /// to useful represent as a sequence of literals. For example, `[A-Za-z]`. - /// When sequences get too big, they lose their discriminating nature and - /// are more likely to produce false positives, which in turn makes them - /// less likely to speed up searches. - /// - /// More pragmatically, for many regexes, enumerating all possible literals - /// is itself not possible or might otherwise use too many resources. So - /// constraining the size of sets during extraction is a practical trade - /// off to make. - #[inline] - pub fn infinite() -> Seq { - Seq { literals: None } - } - - /// Returns a sequence containing a single literal. - #[inline] - pub fn singleton(lit: Literal) -> Seq { - Seq { literals: Some(vec![lit]) } - } - - /// Returns a sequence of exact literals from the given byte strings. - #[inline] - pub fn new(it: I) -> Seq - where - I: IntoIterator, - B: AsRef<[u8]>, - { - it.into_iter().map(|b| Literal::exact(b.as_ref())).collect() - } - - /// If this is a finite sequence, return its members as a slice of - /// literals. - /// - /// The slice returned may be empty, in which case, there are no literals - /// that can match this sequence. - #[inline] - pub fn literals(&self) -> Option<&[Literal]> { - self.literals.as_deref() - } - - /// Push a literal to the end of this sequence. - /// - /// If this sequence is not finite, then this is a no-op. - /// - /// Similarly, if the most recently added item of this sequence is - /// equivalent to the literal given, then it is not added. This reflects - /// a `Seq`'s "set like" behavior, and represents a practical trade off. - /// Namely, there is never any need to have two adjacent and equivalent - /// literals in the same sequence, _and_ it is easy to detect in some - /// cases. - #[inline] - pub fn push(&mut self, lit: Literal) { - let lits = match self.literals { - None => return, - Some(ref mut lits) => lits, - }; - if lits.last().map_or(false, |m| m == &lit) { - return; - } - lits.push(lit); - } - - /// Make all of the literals in this sequence inexact. - /// - /// This is a no-op if this sequence is not finite. - #[inline] - pub fn make_inexact(&mut self) { - let lits = match self.literals { - None => return, - Some(ref mut lits) => lits, - }; - for lit in lits.iter_mut() { - lit.make_inexact(); - } - } - - /// Converts this sequence to an infinite sequence. - /// - /// This is a no-op if the sequence is already infinite. - #[inline] - pub fn make_infinite(&mut self) { - self.literals = None; - } - - /// Modify this sequence to contain the cross product between it and the - /// sequence given. - /// - /// The cross product only considers literals in this sequence that are - /// exact. That is, inexact literals are not extended. - /// - /// The literals are always drained from `other`, even if none are used. - /// This permits callers to reuse the sequence allocation elsewhere. - /// - /// If this sequence is infinite, then this is a no-op, regardless of what - /// `other` contains (and in this case, the literals are still drained from - /// `other`). If `other` is infinite and this sequence is finite, then this - /// is a no-op, unless this sequence contains a zero-length literal. In - /// which case, the infiniteness of `other` infects this sequence, and this - /// sequence is itself made infinite. - /// - /// Like [`Seq::union`], this may attempt to deduplicate literals. See - /// [`Seq::dedup`] for how deduplication deals with exact and inexact - /// literals. - /// - /// # Example - /// - /// This example shows basic usage and how exact and inexact literals - /// interact. - /// - /// ``` - /// use regex_syntax::hir::literal::{Literal, Seq}; - /// - /// let mut seq1 = Seq::from_iter([ - /// Literal::exact("foo"), - /// Literal::inexact("bar"), - /// ]); - /// let mut seq2 = Seq::from_iter([ - /// Literal::inexact("quux"), - /// Literal::exact("baz"), - /// ]); - /// seq1.cross_forward(&mut seq2); - /// - /// // The literals are pulled out of seq2. - /// assert_eq!(Some(0), seq2.len()); - /// - /// let expected = Seq::from_iter([ - /// Literal::inexact("fooquux"), - /// Literal::exact("foobaz"), - /// Literal::inexact("bar"), - /// ]); - /// assert_eq!(expected, seq1); - /// ``` - /// - /// This example shows the behavior of when `other` is an infinite - /// sequence. - /// - /// ``` - /// use regex_syntax::hir::literal::{Literal, Seq}; - /// - /// let mut seq1 = Seq::from_iter([ - /// Literal::exact("foo"), - /// Literal::inexact("bar"), - /// ]); - /// let mut seq2 = Seq::infinite(); - /// seq1.cross_forward(&mut seq2); - /// - /// // When seq2 is infinite, cross product doesn't add anything, but - /// // ensures all members of seq1 are inexact. - /// let expected = Seq::from_iter([ - /// Literal::inexact("foo"), - /// Literal::inexact("bar"), - /// ]); - /// assert_eq!(expected, seq1); - /// ``` - /// - /// This example is like the one above, but shows what happens when this - /// sequence contains an empty string. In this case, an infinite `other` - /// sequence infects this sequence (because the empty string means that - /// there are no finite prefixes): - /// - /// ``` - /// use regex_syntax::hir::literal::{Literal, Seq}; - /// - /// let mut seq1 = Seq::from_iter([ - /// Literal::exact("foo"), - /// Literal::exact(""), // inexact provokes same behavior - /// Literal::inexact("bar"), - /// ]); - /// let mut seq2 = Seq::infinite(); - /// seq1.cross_forward(&mut seq2); - /// - /// // seq1 is now infinite! - /// assert!(!seq1.is_finite()); - /// ``` - /// - /// This example shows the behavior of this sequence is infinite. - /// - /// ``` - /// use regex_syntax::hir::literal::{Literal, Seq}; - /// - /// let mut seq1 = Seq::infinite(); - /// let mut seq2 = Seq::from_iter([ - /// Literal::exact("foo"), - /// Literal::inexact("bar"), - /// ]); - /// seq1.cross_forward(&mut seq2); - /// - /// // seq1 remains unchanged. - /// assert!(!seq1.is_finite()); - /// // Even though the literals in seq2 weren't used, it was still drained. - /// assert_eq!(Some(0), seq2.len()); - /// ``` - #[inline] - pub fn cross_forward(&mut self, other: &mut Seq) { - let (lits1, lits2) = match self.cross_preamble(other) { - None => return, - Some((lits1, lits2)) => (lits1, lits2), - }; - let newcap = lits1.len().saturating_mul(lits2.len()); - for selflit in mem::replace(lits1, Vec::with_capacity(newcap)) { - if !selflit.is_exact() { - lits1.push(selflit); - continue; - } - for otherlit in lits2.iter() { - let mut newlit = Literal::exact(Vec::with_capacity( - selflit.len() + otherlit.len(), - )); - newlit.extend(&selflit); - newlit.extend(&otherlit); - if !otherlit.is_exact() { - newlit.make_inexact(); - } - lits1.push(newlit); - } - } - lits2.drain(..); - self.dedup(); - } - - /// Modify this sequence to contain the cross product between it and - /// the sequence given, where the sequences are treated as suffixes - /// instead of prefixes. Namely, the sequence `other` is *prepended* - /// to `self` (as opposed to `other` being *appended* to `self` in - /// [`Seq::cross_forward`]). - /// - /// The cross product only considers literals in this sequence that are - /// exact. That is, inexact literals are not extended. - /// - /// The literals are always drained from `other`, even if none are used. - /// This permits callers to reuse the sequence allocation elsewhere. - /// - /// If this sequence is infinite, then this is a no-op, regardless of what - /// `other` contains (and in this case, the literals are still drained from - /// `other`). If `other` is infinite and this sequence is finite, then this - /// is a no-op, unless this sequence contains a zero-length literal. In - /// which case, the infiniteness of `other` infects this sequence, and this - /// sequence is itself made infinite. - /// - /// Like [`Seq::union`], this may attempt to deduplicate literals. See - /// [`Seq::dedup`] for how deduplication deals with exact and inexact - /// literals. - /// - /// # Example - /// - /// This example shows basic usage and how exact and inexact literals - /// interact. - /// - /// ``` - /// use regex_syntax::hir::literal::{Literal, Seq}; - /// - /// let mut seq1 = Seq::from_iter([ - /// Literal::exact("foo"), - /// Literal::inexact("bar"), - /// ]); - /// let mut seq2 = Seq::from_iter([ - /// Literal::inexact("quux"), - /// Literal::exact("baz"), - /// ]); - /// seq1.cross_reverse(&mut seq2); - /// - /// // The literals are pulled out of seq2. - /// assert_eq!(Some(0), seq2.len()); - /// - /// let expected = Seq::from_iter([ - /// Literal::inexact("quuxfoo"), - /// Literal::inexact("bar"), - /// Literal::exact("bazfoo"), - /// ]); - /// assert_eq!(expected, seq1); - /// ``` - /// - /// This example shows the behavior of when `other` is an infinite - /// sequence. - /// - /// ``` - /// use regex_syntax::hir::literal::{Literal, Seq}; - /// - /// let mut seq1 = Seq::from_iter([ - /// Literal::exact("foo"), - /// Literal::inexact("bar"), - /// ]); - /// let mut seq2 = Seq::infinite(); - /// seq1.cross_reverse(&mut seq2); - /// - /// // When seq2 is infinite, cross product doesn't add anything, but - /// // ensures all members of seq1 are inexact. - /// let expected = Seq::from_iter([ - /// Literal::inexact("foo"), - /// Literal::inexact("bar"), - /// ]); - /// assert_eq!(expected, seq1); - /// ``` - /// - /// This example is like the one above, but shows what happens when this - /// sequence contains an empty string. In this case, an infinite `other` - /// sequence infects this sequence (because the empty string means that - /// there are no finite suffixes): - /// - /// ``` - /// use regex_syntax::hir::literal::{Literal, Seq}; - /// - /// let mut seq1 = Seq::from_iter([ - /// Literal::exact("foo"), - /// Literal::exact(""), // inexact provokes same behavior - /// Literal::inexact("bar"), - /// ]); - /// let mut seq2 = Seq::infinite(); - /// seq1.cross_reverse(&mut seq2); - /// - /// // seq1 is now infinite! - /// assert!(!seq1.is_finite()); - /// ``` - /// - /// This example shows the behavior when this sequence is infinite. - /// - /// ``` - /// use regex_syntax::hir::literal::{Literal, Seq}; - /// - /// let mut seq1 = Seq::infinite(); - /// let mut seq2 = Seq::from_iter([ - /// Literal::exact("foo"), - /// Literal::inexact("bar"), - /// ]); - /// seq1.cross_reverse(&mut seq2); - /// - /// // seq1 remains unchanged. - /// assert!(!seq1.is_finite()); - /// // Even though the literals in seq2 weren't used, it was still drained. - /// assert_eq!(Some(0), seq2.len()); - /// ``` - #[inline] - pub fn cross_reverse(&mut self, other: &mut Seq) { - let (lits1, lits2) = match self.cross_preamble(other) { - None => return, - Some((lits1, lits2)) => (lits1, lits2), - }; - // We basically proceed as we do in 'cross_forward' at this point, - // except that the outer loop is now 'other' and the inner loop is now - // 'self'. That's because 'self' corresponds to suffixes and 'other' - // corresponds to the sequence we want to *prepend* to the suffixes. - let newcap = lits1.len().saturating_mul(lits2.len()); - let selflits = mem::replace(lits1, Vec::with_capacity(newcap)); - for (i, otherlit) in lits2.drain(..).enumerate() { - for selflit in selflits.iter() { - if !selflit.is_exact() { - // If the suffix isn't exact, then we can't prepend - // anything to it. However, we still want to keep it. But - // we only want to keep one of them, to avoid duplication. - // (The duplication is okay from a correctness perspective, - // but wasteful.) - if i == 0 { - lits1.push(selflit.clone()); - } - continue; - } - let mut newlit = Literal::exact(Vec::with_capacity( - otherlit.len() + selflit.len(), - )); - newlit.extend(&otherlit); - newlit.extend(&selflit); - if !otherlit.is_exact() { - newlit.make_inexact(); - } - lits1.push(newlit); - } - } - self.dedup(); - } - - /// A helper function the corresponds to the subtle preamble for both - /// `cross_forward` and `cross_reverse`. In effect, it handles the cases - /// of infinite sequences for both `self` and `other`, as well as ensuring - /// that literals from `other` are drained even if they aren't used. - fn cross_preamble<'a>( - &'a mut self, - other: &'a mut Seq, - ) -> Option<(&'a mut Vec, &'a mut Vec)> { - let lits2 = match other.literals { - None => { - // If our current seq contains the empty string and the seq - // we're adding matches any literal, then it follows that the - // current seq must now also match any literal. - // - // Otherwise, we just have to make sure everything in this - // sequence is inexact. - if self.min_literal_len() == Some(0) { - *self = Seq::infinite(); - } else { - self.make_inexact(); - } - return None; - } - Some(ref mut lits) => lits, - }; - let lits1 = match self.literals { - None => { - // If we aren't going to make it to the end of this routine - // where lits2 is drained, then we need to do it now. - lits2.drain(..); - return None; - } - Some(ref mut lits) => lits, - }; - Some((lits1, lits2)) - } - - /// Unions the `other` sequence into this one. - /// - /// The literals are always drained out of the given `other` sequence, - /// even if they are being unioned into an infinite sequence. This permits - /// the caller to reuse the `other` sequence in another context. - /// - /// Some literal deduping may be performed. If any deduping happens, - /// any leftmost-first or "preference" order match semantics will be - /// preserved. - /// - /// # Example - /// - /// This example shows basic usage. - /// - /// ``` - /// use regex_syntax::hir::literal::Seq; - /// - /// let mut seq1 = Seq::new(&["foo", "bar"]); - /// let mut seq2 = Seq::new(&["bar", "quux", "foo"]); - /// seq1.union(&mut seq2); - /// - /// // The literals are pulled out of seq2. - /// assert_eq!(Some(0), seq2.len()); - /// - /// // Adjacent literals are deduped, but non-adjacent literals may not be. - /// assert_eq!(Seq::new(&["foo", "bar", "quux", "foo"]), seq1); - /// ``` - /// - /// This example shows that literals are drained from `other` even when - /// they aren't necessarily used. - /// - /// ``` - /// use regex_syntax::hir::literal::Seq; - /// - /// let mut seq1 = Seq::infinite(); - /// // Infinite sequences have no finite length. - /// assert_eq!(None, seq1.len()); - /// - /// let mut seq2 = Seq::new(&["bar", "quux", "foo"]); - /// seq1.union(&mut seq2); - /// - /// // seq1 is still infinite and seq2 has been drained. - /// assert_eq!(None, seq1.len()); - /// assert_eq!(Some(0), seq2.len()); - /// ``` - #[inline] - pub fn union(&mut self, other: &mut Seq) { - let lits2 = match other.literals { - None => { - // Unioning with an infinite sequence always results in an - // infinite sequence. - self.make_infinite(); - return; - } - Some(ref mut lits) => lits.drain(..), - }; - let lits1 = match self.literals { - None => return, - Some(ref mut lits) => lits, - }; - lits1.extend(lits2); - self.dedup(); - } - - /// Unions the `other` sequence into this one by splice the `other` - /// sequence at the position of the first zero-length literal. - /// - /// This is useful for preserving preference order semantics when combining - /// two literal sequences. For example, in the regex `(a||f)+foo`, the - /// correct preference order prefix sequence is `[a, foo, f]`. - /// - /// The literals are always drained out of the given `other` sequence, - /// even if they are being unioned into an infinite sequence. This permits - /// the caller to reuse the `other` sequence in another context. Note that - /// the literals are drained even if no union is performed as well, i.e., - /// when this sequence does not contain a zero-length literal. - /// - /// Some literal deduping may be performed. If any deduping happens, - /// any leftmost-first or "preference" order match semantics will be - /// preserved. - /// - /// # Example - /// - /// This example shows basic usage. - /// - /// ``` - /// use regex_syntax::hir::literal::Seq; - /// - /// let mut seq1 = Seq::new(&["a", "", "f", ""]); - /// let mut seq2 = Seq::new(&["foo"]); - /// seq1.union_into_empty(&mut seq2); - /// - /// // The literals are pulled out of seq2. - /// assert_eq!(Some(0), seq2.len()); - /// // 'foo' gets spliced into seq1 where the first empty string occurs. - /// assert_eq!(Seq::new(&["a", "foo", "f"]), seq1); - /// ``` - /// - /// This example shows that literals are drained from `other` even when - /// they aren't necessarily used. - /// - /// ``` - /// use regex_syntax::hir::literal::Seq; - /// - /// let mut seq1 = Seq::new(&["foo", "bar"]); - /// let mut seq2 = Seq::new(&["bar", "quux", "foo"]); - /// seq1.union_into_empty(&mut seq2); - /// - /// // seq1 has no zero length literals, so no splicing happens. - /// assert_eq!(Seq::new(&["foo", "bar"]), seq1); - /// // Even though no splicing happens, seq2 is still drained. - /// assert_eq!(Some(0), seq2.len()); - /// ``` - #[inline] - pub fn union_into_empty(&mut self, other: &mut Seq) { - let lits2 = other.literals.as_mut().map(|lits| lits.drain(..)); - let lits1 = match self.literals { - None => return, - Some(ref mut lits) => lits, - }; - let first_empty = match lits1.iter().position(|m| m.is_empty()) { - None => return, - Some(i) => i, - }; - let lits2 = match lits2 { - None => { - // Note that we are only here if we've found an empty literal, - // which implies that an infinite sequence infects this seq and - // also turns it into an infinite sequence. - self.literals = None; - return; - } - Some(lits) => lits, - }; - // Clearing out the empties needs to come before the splice because - // the splice might add more empties that we don't want to get rid - // of. Since we're splicing into the position of the first empty, the - // 'first_empty' position computed above is still correct. - lits1.retain(|m| !m.is_empty()); - lits1.splice(first_empty..first_empty, lits2); - self.dedup(); - } - - /// Deduplicate adjacent equivalent literals in this sequence. - /// - /// If adjacent literals are equivalent strings but one is exact and the - /// other inexact, the inexact literal is kept and the exact one is - /// removed. - /// - /// Deduping an infinite sequence is a no-op. - /// - /// # Example - /// - /// This example shows how literals that are duplicate byte strings but - /// are not equivalent with respect to exactness are resolved. - /// - /// ``` - /// use regex_syntax::hir::literal::{Literal, Seq}; - /// - /// let mut seq = Seq::from_iter([ - /// Literal::exact("foo"), - /// Literal::inexact("foo"), - /// ]); - /// seq.dedup(); - /// - /// assert_eq!(Seq::from_iter([Literal::inexact("foo")]), seq); - /// ``` - #[inline] - pub fn dedup(&mut self) { - if let Some(ref mut lits) = self.literals { - lits.dedup_by(|lit1, lit2| { - if lit1.as_bytes() != lit2.as_bytes() { - return false; - } - if lit1.is_exact() != lit2.is_exact() { - lit1.make_inexact(); - lit2.make_inexact(); - } - true - }); - } - } - - /// Sorts this sequence of literals lexicographically. - /// - /// Note that if, before sorting, if a literal that is a prefix of another - /// literal appears after it, then after sorting, the sequence will not - /// represent the same preference order match semantics. For example, - /// sorting the sequence `[samwise, sam]` yields the sequence `[sam, - /// samwise]`. Under preference order semantics, the latter sequence will - /// never match `samwise` where as the first sequence can. - /// - /// # Example - /// - /// This example shows basic usage. - /// - /// ``` - /// use regex_syntax::hir::literal::Seq; - /// - /// let mut seq = Seq::new(&["foo", "quux", "bar"]); - /// seq.sort(); - /// - /// assert_eq!(Seq::new(&["bar", "foo", "quux"]), seq); - /// ``` - #[inline] - pub fn sort(&mut self) { - if let Some(ref mut lits) = self.literals { - lits.sort(); - } - } - - /// Reverses all of the literals in this sequence. - /// - /// The order of the sequence itself is preserved. - /// - /// # Example - /// - /// This example shows basic usage. - /// - /// ``` - /// use regex_syntax::hir::literal::Seq; - /// - /// let mut seq = Seq::new(&["oof", "rab"]); - /// seq.reverse_literals(); - /// assert_eq!(Seq::new(&["foo", "bar"]), seq); - /// ``` - #[inline] - pub fn reverse_literals(&mut self) { - if let Some(ref mut lits) = self.literals { - for lit in lits.iter_mut() { - lit.reverse(); - } - } - } - - /// Shrinks this seq to its minimal size while respecting the preference - /// order of its literals. - /// - /// While this routine will remove duplicate literals from this seq, it - /// will also remove literals that can never match in a leftmost-first or - /// "preference order" search. Similar to [`Seq::dedup`], if a literal is - /// deduped, then the one that remains is made inexact. - /// - /// This is a no-op on seqs that are empty or not finite. - /// - /// # Example - /// - /// This example shows the difference between `{sam, samwise}` and - /// `{samwise, sam}`. - /// - /// ``` - /// use regex_syntax::hir::literal::{Literal, Seq}; - /// - /// // If 'sam' comes before 'samwise' and a preference order search is - /// // executed, then 'samwise' can never match. - /// let mut seq = Seq::new(&["sam", "samwise"]); - /// seq.minimize_by_preference(); - /// assert_eq!(Seq::from_iter([Literal::inexact("sam")]), seq); - /// - /// // But if they are reversed, then it's possible for 'samwise' to match - /// // since it is given higher preference. - /// let mut seq = Seq::new(&["samwise", "sam"]); - /// seq.minimize_by_preference(); - /// assert_eq!(Seq::new(&["samwise", "sam"]), seq); - /// ``` - /// - /// This example shows that if an empty string is in this seq, then - /// anything that comes after it can never match. - /// - /// ``` - /// use regex_syntax::hir::literal::{Literal, Seq}; - /// - /// // An empty string is a prefix of all strings, so it automatically - /// // inhibits any subsequent strings from matching. - /// let mut seq = Seq::new(&["foo", "bar", "", "quux", "fox"]); - /// seq.minimize_by_preference(); - /// let expected = Seq::from_iter([ - /// Literal::exact("foo"), - /// Literal::exact("bar"), - /// Literal::inexact(""), - /// ]); - /// assert_eq!(expected, seq); - /// - /// // And of course, if it's at the beginning, then it makes it impossible - /// // for anything else to match. - /// let mut seq = Seq::new(&["", "foo", "quux", "fox"]); - /// seq.minimize_by_preference(); - /// assert_eq!(Seq::from_iter([Literal::inexact("")]), seq); - /// ``` - #[inline] - pub fn minimize_by_preference(&mut self) { - if let Some(ref mut lits) = self.literals { - PreferenceTrie::minimize(lits, false); - } - } - - /// Trims all literals in this seq such that only the first `len` bytes - /// remain. If a literal has less than or equal to `len` bytes, then it - /// remains unchanged. Otherwise, it is trimmed and made inexact. - /// - /// # Example - /// - /// ``` - /// use regex_syntax::hir::literal::{Literal, Seq}; - /// - /// let mut seq = Seq::new(&["a", "foo", "quux"]); - /// seq.keep_first_bytes(2); - /// - /// let expected = Seq::from_iter([ - /// Literal::exact("a"), - /// Literal::inexact("fo"), - /// Literal::inexact("qu"), - /// ]); - /// assert_eq!(expected, seq); - /// ``` - #[inline] - pub fn keep_first_bytes(&mut self, len: usize) { - if let Some(ref mut lits) = self.literals { - for m in lits.iter_mut() { - m.keep_first_bytes(len); - } - } - } - - /// Trims all literals in this seq such that only the last `len` bytes - /// remain. If a literal has less than or equal to `len` bytes, then it - /// remains unchanged. Otherwise, it is trimmed and made inexact. - /// - /// # Example - /// - /// ``` - /// use regex_syntax::hir::literal::{Literal, Seq}; - /// - /// let mut seq = Seq::new(&["a", "foo", "quux"]); - /// seq.keep_last_bytes(2); - /// - /// let expected = Seq::from_iter([ - /// Literal::exact("a"), - /// Literal::inexact("oo"), - /// Literal::inexact("ux"), - /// ]); - /// assert_eq!(expected, seq); - /// ``` - #[inline] - pub fn keep_last_bytes(&mut self, len: usize) { - if let Some(ref mut lits) = self.literals { - for m in lits.iter_mut() { - m.keep_last_bytes(len); - } - } - } - - /// Returns true if this sequence is finite. - /// - /// When false, this sequence is infinite and must be treated as if it - /// contains every possible literal. - #[inline] - pub fn is_finite(&self) -> bool { - self.literals.is_some() - } - - /// Returns true if and only if this sequence is finite and empty. - /// - /// An empty sequence never matches anything. It can only be produced by - /// literal extraction when the corresponding regex itself cannot match. - #[inline] - pub fn is_empty(&self) -> bool { - self.len() == Some(0) - } - - /// Returns the number of literals in this sequence if the sequence is - /// finite. If the sequence is infinite, then `None` is returned. - #[inline] - pub fn len(&self) -> Option { - self.literals.as_ref().map(|lits| lits.len()) - } - - /// Returns true if and only if all literals in this sequence are exact. - /// - /// This returns false if the sequence is infinite. - #[inline] - pub fn is_exact(&self) -> bool { - self.literals().map_or(false, |lits| lits.iter().all(|x| x.is_exact())) - } - - /// Returns true if and only if all literals in this sequence are inexact. - /// - /// This returns true if the sequence is infinite. - #[inline] - pub fn is_inexact(&self) -> bool { - self.literals().map_or(true, |lits| lits.iter().all(|x| !x.is_exact())) - } - - /// Return the maximum length of the sequence that would result from - /// unioning `self` with `other`. If either set is infinite, then this - /// returns `None`. - #[inline] - pub fn max_union_len(&self, other: &Seq) -> Option { - let len1 = self.len()?; - let len2 = other.len()?; - Some(len1.saturating_add(len2)) - } - - /// Return the maximum length of the sequence that would result from the - /// cross product of `self` with `other`. If either set is infinite, then - /// this returns `None`. - #[inline] - pub fn max_cross_len(&self, other: &Seq) -> Option { - let len1 = self.len()?; - let len2 = other.len()?; - Some(len1.saturating_mul(len2)) - } - - /// Returns the length of the shortest literal in this sequence. - /// - /// If the sequence is infinite or empty, then this returns `None`. - #[inline] - pub fn min_literal_len(&self) -> Option { - self.literals.as_ref()?.iter().map(|x| x.len()).min() - } - - /// Returns the length of the longest literal in this sequence. - /// - /// If the sequence is infinite or empty, then this returns `None`. - #[inline] - pub fn max_literal_len(&self) -> Option { - self.literals.as_ref()?.iter().map(|x| x.len()).max() - } - - /// Returns the longest common prefix from this seq. - /// - /// If the seq matches any literal or other contains no literals, then - /// there is no meaningful prefix and this returns `None`. - /// - /// # Example - /// - /// This shows some example seqs and their longest common prefix. - /// - /// ``` - /// use regex_syntax::hir::literal::Seq; - /// - /// let seq = Seq::new(&["foo", "foobar", "fo"]); - /// assert_eq!(Some(&b"fo"[..]), seq.longest_common_prefix()); - /// let seq = Seq::new(&["foo", "foo"]); - /// assert_eq!(Some(&b"foo"[..]), seq.longest_common_prefix()); - /// let seq = Seq::new(&["foo", "bar"]); - /// assert_eq!(Some(&b""[..]), seq.longest_common_prefix()); - /// let seq = Seq::new(&[""]); - /// assert_eq!(Some(&b""[..]), seq.longest_common_prefix()); - /// - /// let seq = Seq::infinite(); - /// assert_eq!(None, seq.longest_common_prefix()); - /// let seq = Seq::empty(); - /// assert_eq!(None, seq.longest_common_prefix()); - /// ``` - #[inline] - pub fn longest_common_prefix(&self) -> Option<&[u8]> { - // If we match everything or match nothing, then there's no meaningful - // longest common prefix. - let lits = match self.literals { - None => return None, - Some(ref lits) => lits, - }; - if lits.len() == 0 { - return None; - } - let base = lits[0].as_bytes(); - let mut len = base.len(); - for m in lits.iter().skip(1) { - len = m - .as_bytes() - .iter() - .zip(base[..len].iter()) - .take_while(|&(a, b)| a == b) - .count(); - if len == 0 { - return Some(&[]); - } - } - Some(&base[..len]) - } - - /// Returns the longest common suffix from this seq. - /// - /// If the seq matches any literal or other contains no literals, then - /// there is no meaningful suffix and this returns `None`. - /// - /// # Example - /// - /// This shows some example seqs and their longest common suffix. - /// - /// ``` - /// use regex_syntax::hir::literal::Seq; - /// - /// let seq = Seq::new(&["oof", "raboof", "of"]); - /// assert_eq!(Some(&b"of"[..]), seq.longest_common_suffix()); - /// let seq = Seq::new(&["foo", "foo"]); - /// assert_eq!(Some(&b"foo"[..]), seq.longest_common_suffix()); - /// let seq = Seq::new(&["foo", "bar"]); - /// assert_eq!(Some(&b""[..]), seq.longest_common_suffix()); - /// let seq = Seq::new(&[""]); - /// assert_eq!(Some(&b""[..]), seq.longest_common_suffix()); - /// - /// let seq = Seq::infinite(); - /// assert_eq!(None, seq.longest_common_suffix()); - /// let seq = Seq::empty(); - /// assert_eq!(None, seq.longest_common_suffix()); - /// ``` - #[inline] - pub fn longest_common_suffix(&self) -> Option<&[u8]> { - // If we match everything or match nothing, then there's no meaningful - // longest common suffix. - let lits = match self.literals { - None => return None, - Some(ref lits) => lits, - }; - if lits.len() == 0 { - return None; - } - let base = lits[0].as_bytes(); - let mut len = base.len(); - for m in lits.iter().skip(1) { - len = m - .as_bytes() - .iter() - .rev() - .zip(base[base.len() - len..].iter().rev()) - .take_while(|&(a, b)| a == b) - .count(); - if len == 0 { - return Some(&[]); - } - } - Some(&base[base.len() - len..]) - } - - /// Optimizes this seq while treating its literals as prefixes and - /// respecting the preference order of its literals. - /// - /// The specific way "optimization" works is meant to be an implementation - /// detail, as it essentially represents a set of heuristics. The goal - /// that optimization tries to accomplish is to make the literals in this - /// set reflect inputs that will result in a more effective prefilter. - /// Principally by reducing the false positive rate of candidates found by - /// the literals in this sequence. That is, when a match of a literal is - /// found, we would like it to be a strong predictor of the overall match - /// of the regex. If it isn't, then much time will be spent starting and - /// stopping the prefilter search and attempting to confirm the match only - /// to have it fail. - /// - /// Some of those heuristics might be: - /// - /// * Identifying a common prefix from a larger sequence of literals, and - /// shrinking the sequence down to that single common prefix. - /// * Rejecting the sequence entirely if it is believed to result in very - /// high false positive rate. When this happens, the sequence is made - /// infinite. - /// * Shrinking the sequence to a smaller number of literals representing - /// prefixes, but not shrinking it so much as to make literals too short. - /// (A sequence with very short literals, of 1 or 2 bytes, will typically - /// result in a higher false positive rate.) - /// - /// Optimization should only be run once extraction is complete. Namely, - /// optimization may make assumptions that do not compose with other - /// operations in the middle of extraction. For example, optimization will - /// reduce `[E(sam), E(samwise)]` to `[E(sam)]`, but such a transformation - /// is only valid if no other extraction will occur. If other extraction - /// may occur, then the correct transformation would be to `[I(sam)]`. - /// - /// The [`Seq::optimize_for_suffix_by_preference`] does the same thing, but - /// for suffixes. - /// - /// # Example - /// - /// This shows how optimization might transform a sequence. Note that - /// the specific behavior is not a documented guarantee. The heuristics - /// used are an implementation detail and may change over time in semver - /// compatible releases. - /// - /// ``` - /// use regex_syntax::hir::literal::{Seq, Literal}; - /// - /// let mut seq = Seq::new(&[ - /// "samantha", - /// "sam", - /// "samwise", - /// "frodo", - /// ]); - /// seq.optimize_for_prefix_by_preference(); - /// assert_eq!(Seq::from_iter([ - /// Literal::exact("samantha"), - /// // Kept exact even though 'samwise' got pruned - /// // because optimization assumes literal extraction - /// // has finished. - /// Literal::exact("sam"), - /// Literal::exact("frodo"), - /// ]), seq); - /// ``` - /// - /// # Example: optimization may make the sequence infinite - /// - /// If the heuristics deem that the sequence could cause a very high false - /// positive rate, then it may make the sequence infinite, effectively - /// disabling its use as a prefilter. - /// - /// ``` - /// use regex_syntax::hir::literal::{Seq, Literal}; - /// - /// let mut seq = Seq::new(&[ - /// "samantha", - /// // An empty string matches at every position, - /// // thus rendering the prefilter completely - /// // ineffective. - /// "", - /// "sam", - /// "samwise", - /// "frodo", - /// ]); - /// seq.optimize_for_prefix_by_preference(); - /// assert!(!seq.is_finite()); - /// ``` - /// - /// Do note that just because there is a `" "` in the sequence, that - /// doesn't mean the sequence will always be made infinite after it is - /// optimized. Namely, if the sequence is considered exact (any match - /// corresponds to an overall match of the original regex), then any match - /// is an overall match, and so the false positive rate is always `0`. - /// - /// To demonstrate this, we remove `samwise` from our sequence. This - /// results in no optimization happening and all literals remain exact. - /// Thus the entire sequence is exact, and it is kept as-is, even though - /// one is an ASCII space: - /// - /// ``` - /// use regex_syntax::hir::literal::{Seq, Literal}; - /// - /// let mut seq = Seq::new(&[ - /// "samantha", - /// " ", - /// "sam", - /// "frodo", - /// ]); - /// seq.optimize_for_prefix_by_preference(); - /// assert!(seq.is_finite()); - /// ``` - #[inline] - pub fn optimize_for_prefix_by_preference(&mut self) { - self.optimize_by_preference(true); - } - - /// Optimizes this seq while treating its literals as suffixes and - /// respecting the preference order of its literals. - /// - /// Optimization should only be run once extraction is complete. - /// - /// The [`Seq::optimize_for_prefix_by_preference`] does the same thing, but - /// for prefixes. See its documentation for more explanation. - #[inline] - pub fn optimize_for_suffix_by_preference(&mut self) { - self.optimize_by_preference(false); - } - - fn optimize_by_preference(&mut self, prefix: bool) { - let origlen = match self.len() { - None => return, - Some(len) => len, - }; - // Just give up now if our sequence contains an empty string. - if self.min_literal_len().map_or(false, |len| len == 0) { - // We squash the sequence so that nobody else gets any bright - // ideas to try and use it. An empty string implies a match at - // every position. A prefilter cannot help you here. - self.make_infinite(); - return; - } - // Make sure we start with the smallest sequence possible. We use a - // special version of preference minimization that retains exactness. - // This is legal because optimization is only expected to occur once - // extraction is complete. - if prefix { - if let Some(ref mut lits) = self.literals { - PreferenceTrie::minimize(lits, true); - } - } - - // Look for a common prefix (or suffix). If we found one of those and - // it's long enough, then it's a good bet that it will be our fastest - // possible prefilter since single-substring search is so fast. - let fix = if prefix { - self.longest_common_prefix() - } else { - self.longest_common_suffix() - }; - if let Some(fix) = fix { - // As a special case, if we have a common prefix and the leading - // byte of that prefix is one that we think probably occurs rarely, - // then strip everything down to just that single byte. This should - // promote the use of memchr. - // - // ... we only do this though if our sequence has more than one - // literal. Otherwise, we'd rather just stick with a single literal - // scan. That is, using memchr is probably better than looking - // for 2 or more literals, but probably not as good as a straight - // memmem search. - // - // ... and also only do this when the prefix is short and probably - // not too discriminatory anyway. If it's longer, then it's - // probably quite discriminatory and thus is likely to have a low - // false positive rate. - if prefix - && origlen > 1 - && fix.len() >= 1 - && fix.len() <= 3 - && rank(fix[0]) < 200 - { - self.keep_first_bytes(1); - self.dedup(); - return; - } - // We only strip down to the common prefix/suffix if we think - // the existing set of literals isn't great, or if the common - // prefix/suffix is expected to be particularly discriminatory. - let isfast = - self.is_exact() && self.len().map_or(false, |len| len <= 16); - let usefix = fix.len() > 4 || (fix.len() > 1 && !isfast); - if usefix { - // If we keep exactly the number of bytes equal to the length - // of the prefix (or suffix), then by the definition of a - // prefix, every literal in the sequence will be equivalent. - // Thus, 'dedup' will leave us with one literal. - // - // We do it this way to avoid an alloc, but also to make sure - // the exactness of literals is kept (or not). - if prefix { - self.keep_first_bytes(fix.len()); - } else { - self.keep_last_bytes(fix.len()); - } - self.dedup(); - assert_eq!(Some(1), self.len()); - // We still fall through here. In particular, we want our - // longest common prefix to be subject to the poison check. - } - } - // If we have an exact sequence, we *probably* just want to keep it - // as-is. But there are some cases where we don't. So we save a copy of - // the exact sequence now, and then try to do some more optimizations - // below. If those don't work out, we go back to this exact sequence. - // - // The specific motivation for this is that we sometimes wind up with - // an exact sequence with a hefty number of literals. Say, 100. If we - // stuck with that, it would be too big for Teddy and would result in - // using Aho-Corasick. Which is fine... but the lazy DFA is plenty - // suitable in such cases. The real issue is that we will wind up not - // using a fast prefilter at all. So in cases like this, even though - // we have an exact sequence, it would be better to try and shrink the - // sequence (which we do below) and use it as a prefilter that can - // produce false positive matches. - // - // But if the shrinking below results in a sequence that "sucks," then - // we don't want to use that because we already have an exact sequence - // in hand. - let exact: Option = - if self.is_exact() { Some(self.clone()) } else { None }; - // Now we attempt to shorten the sequence. The idea here is that we - // don't want to look for too many literals, but we want to shorten - // our sequence enough to improve our odds of using better algorithms - // downstream (such as Teddy). - // - // The pair of numbers in this list corresponds to the maximal prefix - // (in bytes) to keep for all literals and the length of the sequence - // at which to do it. - // - // So for example, the pair (3, 500) would mean, "if we have more than - // 500 literals in our sequence, then truncate all of our literals - // such that they are at most 3 bytes in length and the minimize the - // sequence." - const ATTEMPTS: [(usize, usize); 5] = - [(5, 10), (4, 10), (3, 64), (2, 64), (1, 10)]; - for (keep, limit) in ATTEMPTS { - let len = match self.len() { - None => break, - Some(len) => len, - }; - if len <= limit { - break; - } - if prefix { - self.keep_first_bytes(keep); - } else { - self.keep_last_bytes(keep); - } - if prefix { - if let Some(ref mut lits) = self.literals { - PreferenceTrie::minimize(lits, true); - } - } - } - // Check for a poison literal. A poison literal is one that is short - // and is believed to have a very high match count. These poisons - // generally lead to a prefilter with a very high false positive rate, - // and thus overall worse performance. - // - // We do this last because we could have gone from a non-poisonous - // sequence to a poisonous one. Perhaps we should add some code to - // prevent such transitions in the first place, but then again, we - // likely only made the transition in the first place if the sequence - // was itself huge. And huge sequences are themselves poisonous. So... - if let Some(lits) = self.literals() { - if lits.iter().any(|lit| lit.is_poisonous()) { - self.make_infinite(); - } - } - // OK, if we had an exact sequence before attempting more optimizations - // above and our post-optimized sequence sucks for some reason or - // another, then we go back to the exact sequence. - if let Some(exact) = exact { - // If optimizing resulted in dropping our literals, then certainly - // backup and use the exact sequence that we had. - if !self.is_finite() { - *self = exact; - return; - } - // If our optimized sequence contains a short literal, then it's - // *probably* not so great. So throw it away and revert to the - // exact sequence. - if self.min_literal_len().map_or(true, |len| len <= 2) { - *self = exact; - return; - } - // Finally, if our optimized sequence is "big" (i.e., can't use - // Teddy), then also don't use it and rely on the exact sequence. - if self.len().map_or(true, |len| len > 64) { - *self = exact; - return; - } - } - } -} - -impl core::fmt::Debug for Seq { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "Seq")?; - if let Some(lits) = self.literals() { - f.debug_list().entries(lits.iter()).finish() - } else { - write!(f, "[∞]") - } - } -} - -impl FromIterator for Seq { - fn from_iter>(it: T) -> Seq { - let mut seq = Seq::empty(); - for literal in it { - seq.push(literal); - } - seq - } -} - -/// A single literal extracted from an [`Hir`] expression. -/// -/// A literal is composed of two things: -/// -/// * A sequence of bytes. No guarantees with respect to UTF-8 are provided. -/// In particular, even if the regex a literal is extracted from is UTF-8, the -/// literal extracted may not be valid UTF-8. (For example, if an [`Extractor`] -/// limit resulted in trimming a literal in a way that splits a codepoint.) -/// * Whether the literal is "exact" or not. An "exact" literal means that it -/// has not been trimmed, and may continue to be extended. If a literal is -/// "exact" after visiting the entire `Hir` expression, then this implies that -/// the literal leads to a match state. (Although it doesn't necessarily imply -/// all occurrences of the literal correspond to a match of the regex, since -/// literal extraction ignores look-around assertions.) -#[derive(Clone, Eq, PartialEq, PartialOrd, Ord)] -pub struct Literal { - bytes: Vec, - exact: bool, -} - -impl Literal { - /// Returns a new exact literal containing the bytes given. - #[inline] - pub fn exact>>(bytes: B) -> Literal { - Literal { bytes: bytes.into(), exact: true } - } - - /// Returns a new inexact literal containing the bytes given. - #[inline] - pub fn inexact>>(bytes: B) -> Literal { - Literal { bytes: bytes.into(), exact: false } - } - - /// Returns the bytes in this literal. - #[inline] - pub fn as_bytes(&self) -> &[u8] { - &self.bytes - } - - /// Yields ownership of the bytes inside this literal. - /// - /// Note that this throws away whether the literal is "exact" or not. - #[inline] - pub fn into_bytes(self) -> Vec { - self.bytes - } - - /// Returns the length of this literal in bytes. - #[inline] - pub fn len(&self) -> usize { - self.as_bytes().len() - } - - /// Returns true if and only if this literal has zero bytes. - #[inline] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns true if and only if this literal is exact. - #[inline] - pub fn is_exact(&self) -> bool { - self.exact - } - - /// Marks this literal as inexact. - /// - /// Inexact literals can never be extended. For example, - /// [`Seq::cross_forward`] will not extend inexact literals. - #[inline] - pub fn make_inexact(&mut self) { - self.exact = false; - } - - /// Reverse the bytes in this literal. - #[inline] - pub fn reverse(&mut self) { - self.bytes.reverse(); - } - - /// Extend this literal with the literal given. - /// - /// If this literal is inexact, then this is a no-op. - #[inline] - pub fn extend(&mut self, lit: &Literal) { - if !self.is_exact() { - return; - } - self.bytes.extend_from_slice(&lit.bytes); - } - - /// Trims this literal such that only the first `len` bytes remain. If - /// this literal has fewer than `len` bytes, then it remains unchanged. - /// Otherwise, the literal is marked as inexact. - #[inline] - pub fn keep_first_bytes(&mut self, len: usize) { - if len >= self.len() { - return; - } - self.make_inexact(); - self.bytes.truncate(len); - } - - /// Trims this literal such that only the last `len` bytes remain. If this - /// literal has fewer than `len` bytes, then it remains unchanged. - /// Otherwise, the literal is marked as inexact. - #[inline] - pub fn keep_last_bytes(&mut self, len: usize) { - if len >= self.len() { - return; - } - self.make_inexact(); - self.bytes.drain(..self.len() - len); - } - - /// Returns true if it is believe that this literal is likely to match very - /// frequently, and is thus not a good candidate for a prefilter. - fn is_poisonous(&self) -> bool { - self.is_empty() || (self.len() == 1 && rank(self.as_bytes()[0]) >= 250) - } -} - -impl From for Literal { - fn from(byte: u8) -> Literal { - Literal::exact(vec![byte]) - } -} - -impl From for Literal { - fn from(ch: char) -> Literal { - use alloc::string::ToString; - Literal::exact(ch.encode_utf8(&mut [0; 4]).to_string()) - } -} - -impl AsRef<[u8]> for Literal { - fn as_ref(&self) -> &[u8] { - self.as_bytes() - } -} - -impl core::fmt::Debug for Literal { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let tag = if self.exact { "E" } else { "I" }; - f.debug_tuple(tag) - .field(&crate::debug::Bytes(self.as_bytes())) - .finish() - } -} - -/// A "preference" trie that rejects literals that will never match when -/// executing a leftmost first or "preference" search. -/// -/// For example, if 'sam' is inserted, then trying to insert 'samwise' will be -/// rejected because 'samwise' can never match since 'sam' will always take -/// priority. However, if 'samwise' is inserted first, then inserting 'sam' -/// after it is accepted. In this case, either 'samwise' or 'sam' can match in -/// a "preference" search. -/// -/// Note that we only use this trie as a "set." That is, given a sequence of -/// literals, we insert each one in order. An `insert` will reject a literal -/// if a prefix of that literal already exists in the trie. Thus, to rebuild -/// the "minimal" sequence, we simply only keep literals that were successfully -/// inserted. (Since we don't need traversal, one wonders whether we can make -/// some simplifications here, but I haven't given it a ton of thought and I've -/// never seen this show up on a profile. Because of the heuristic limits -/// imposed on literal extractions, the size of the inputs here is usually -/// very small.) -#[derive(Debug)] -struct PreferenceTrie { - /// The states in this trie. The index of a state in this vector is its ID. - states: Vec, - /// This vec indicates which states are match states. It always has - /// the same length as `states` and is indexed by the same state ID. - /// A state with identifier `sid` is a match state if and only if - /// `matches[sid].is_some()`. The option contains the index of the literal - /// corresponding to the match. The index is offset by 1 so that it fits in - /// a NonZeroUsize. - matches: Vec>, - /// The index to allocate to the next literal added to this trie. Starts at - /// 1 and increments by 1 for every literal successfully added to the trie. - next_literal_index: usize, -} - -/// A single state in a trie. Uses a sparse representation for its transitions. -#[derive(Debug, Default)] -struct State { - /// Sparse representation of the transitions out of this state. Transitions - /// are sorted by byte. There is at most one such transition for any - /// particular byte. - trans: Vec<(u8, usize)>, -} - -impl PreferenceTrie { - /// Minimizes the given sequence of literals while preserving preference - /// order semantics. - /// - /// When `keep_exact` is true, the exactness of every literal retained is - /// kept. This is useful when dealing with a fully extracted `Seq` that - /// only contains exact literals. In that case, we can keep all retained - /// literals as exact because we know we'll never need to match anything - /// after them and because any removed literals are guaranteed to never - /// match. - fn minimize(literals: &mut Vec, keep_exact: bool) { - let mut trie = PreferenceTrie { - states: vec![], - matches: vec![], - next_literal_index: 1, - }; - let mut make_inexact = vec![]; - literals.retain_mut(|lit| match trie.insert(lit.as_bytes()) { - Ok(_) => true, - Err(i) => { - if !keep_exact { - make_inexact.push(i.checked_sub(1).unwrap()); - } - false - } - }); - for i in make_inexact { - literals[i].make_inexact(); - } - } - - /// Returns `Ok` if the given byte string is accepted into this trie and - /// `Err` otherwise. The index for the success case corresponds to the - /// index of the literal added. The index for the error case corresponds to - /// the index of the literal already in the trie that prevented the given - /// byte string from being added. (Which implies it is a prefix of the one - /// given.) - /// - /// In short, the byte string given is accepted into the trie if and only - /// if it is possible for it to match when executing a preference order - /// search. - fn insert(&mut self, bytes: &[u8]) -> Result { - let mut prev = self.root(); - if let Some(idx) = self.matches[prev] { - return Err(idx.get()); - } - for &b in bytes.iter() { - match self.states[prev].trans.binary_search_by_key(&b, |t| t.0) { - Ok(i) => { - prev = self.states[prev].trans[i].1; - if let Some(idx) = self.matches[prev] { - return Err(idx.get()); - } - } - Err(i) => { - let next = self.create_state(); - self.states[prev].trans.insert(i, (b, next)); - prev = next; - } - } - } - let idx = self.next_literal_index; - self.next_literal_index += 1; - self.matches[prev] = NonZeroUsize::new(idx); - Ok(idx) - } - - /// Returns the root state ID, and if it doesn't exist, creates it. - fn root(&mut self) -> usize { - if !self.states.is_empty() { - 0 - } else { - self.create_state() - } - } - - /// Creates a new empty state and returns its ID. - fn create_state(&mut self) -> usize { - let id = self.states.len(); - self.states.push(State::default()); - self.matches.push(None); - id - } -} - -/// Returns the "rank" of the given byte. -/// -/// The minimum rank value is `0` and the maximum rank value is `255`. -/// -/// The rank of a byte is derived from a heuristic background distribution of -/// relative frequencies of bytes. The heuristic says that lower the rank of a -/// byte, the less likely that byte is to appear in any arbitrary haystack. -pub fn rank(byte: u8) -> u8 { - crate::rank::BYTE_FREQUENCIES[usize::from(byte)] -} - -#[cfg(test)] -mod tests { - use super::*; - - fn parse(pattern: &str) -> Hir { - crate::ParserBuilder::new().utf8(false).build().parse(pattern).unwrap() - } - - fn prefixes(pattern: &str) -> Seq { - Extractor::new().kind(ExtractKind::Prefix).extract(&parse(pattern)) - } - - fn suffixes(pattern: &str) -> Seq { - Extractor::new().kind(ExtractKind::Suffix).extract(&parse(pattern)) - } - - fn e(pattern: &str) -> (Seq, Seq) { - (prefixes(pattern), suffixes(pattern)) - } - - #[allow(non_snake_case)] - fn E(x: &str) -> Literal { - Literal::exact(x.as_bytes()) - } - - #[allow(non_snake_case)] - fn I(x: &str) -> Literal { - Literal::inexact(x.as_bytes()) - } - - fn seq>(it: I) -> Seq { - Seq::from_iter(it) - } - - fn infinite() -> (Seq, Seq) { - (Seq::infinite(), Seq::infinite()) - } - - fn inexact(it1: I1, it2: I2) -> (Seq, Seq) - where - I1: IntoIterator, - I2: IntoIterator, - { - (Seq::from_iter(it1), Seq::from_iter(it2)) - } - - fn exact, I: IntoIterator>(it: I) -> (Seq, Seq) { - let s1 = Seq::new(it); - let s2 = s1.clone(); - (s1, s2) - } - - fn opt, I: IntoIterator>(it: I) -> (Seq, Seq) { - let (mut p, mut s) = exact(it); - p.optimize_for_prefix_by_preference(); - s.optimize_for_suffix_by_preference(); - (p, s) - } - - #[test] - fn literal() { - assert_eq!(exact(["a"]), e("a")); - assert_eq!(exact(["aaaaa"]), e("aaaaa")); - assert_eq!(exact(["A", "a"]), e("(?i-u)a")); - assert_eq!(exact(["AB", "Ab", "aB", "ab"]), e("(?i-u)ab")); - assert_eq!(exact(["abC", "abc"]), e("ab(?i-u)c")); - - assert_eq!(exact([b"\xFF"]), e(r"(?-u:\xFF)")); - - #[cfg(feature = "unicode-case")] - { - assert_eq!(exact(["☃"]), e("☃")); - assert_eq!(exact(["☃"]), e("(?i)☃")); - assert_eq!(exact(["☃☃☃☃☃"]), e("☃☃☃☃☃")); - - assert_eq!(exact(["Δ"]), e("Δ")); - assert_eq!(exact(["δ"]), e("δ")); - assert_eq!(exact(["Δ", "δ"]), e("(?i)Δ")); - assert_eq!(exact(["Δ", "δ"]), e("(?i)δ")); - - assert_eq!(exact(["S", "s", "ſ"]), e("(?i)S")); - assert_eq!(exact(["S", "s", "ſ"]), e("(?i)s")); - assert_eq!(exact(["S", "s", "ſ"]), e("(?i)ſ")); - } - - let letters = "ͱͳͷΐάέήίΰαβγδεζηθικλμνξοπρςστυφχψωϊϋ"; - assert_eq!(exact([letters]), e(letters)); - } - - #[test] - fn class() { - assert_eq!(exact(["a", "b", "c"]), e("[abc]")); - assert_eq!(exact(["a1b", "a2b", "a3b"]), e("a[123]b")); - assert_eq!(exact(["δ", "ε"]), e("[εδ]")); - #[cfg(feature = "unicode-case")] - { - assert_eq!(exact(["Δ", "Ε", "δ", "ε", "ϵ"]), e(r"(?i)[εδ]")); - } - } - - #[test] - fn look() { - assert_eq!(exact(["ab"]), e(r"a\Ab")); - assert_eq!(exact(["ab"]), e(r"a\zb")); - assert_eq!(exact(["ab"]), e(r"a(?m:^)b")); - assert_eq!(exact(["ab"]), e(r"a(?m:$)b")); - assert_eq!(exact(["ab"]), e(r"a\bb")); - assert_eq!(exact(["ab"]), e(r"a\Bb")); - assert_eq!(exact(["ab"]), e(r"a(?-u:\b)b")); - assert_eq!(exact(["ab"]), e(r"a(?-u:\B)b")); - - assert_eq!(exact(["ab"]), e(r"^ab")); - assert_eq!(exact(["ab"]), e(r"$ab")); - assert_eq!(exact(["ab"]), e(r"(?m:^)ab")); - assert_eq!(exact(["ab"]), e(r"(?m:$)ab")); - assert_eq!(exact(["ab"]), e(r"\bab")); - assert_eq!(exact(["ab"]), e(r"\Bab")); - assert_eq!(exact(["ab"]), e(r"(?-u:\b)ab")); - assert_eq!(exact(["ab"]), e(r"(?-u:\B)ab")); - - assert_eq!(exact(["ab"]), e(r"ab^")); - assert_eq!(exact(["ab"]), e(r"ab$")); - assert_eq!(exact(["ab"]), e(r"ab(?m:^)")); - assert_eq!(exact(["ab"]), e(r"ab(?m:$)")); - assert_eq!(exact(["ab"]), e(r"ab\b")); - assert_eq!(exact(["ab"]), e(r"ab\B")); - assert_eq!(exact(["ab"]), e(r"ab(?-u:\b)")); - assert_eq!(exact(["ab"]), e(r"ab(?-u:\B)")); - - let expected = (seq([I("aZ"), E("ab")]), seq([I("Zb"), E("ab")])); - assert_eq!(expected, e(r"^aZ*b")); - } - - #[test] - fn repetition() { - assert_eq!(exact(["a", ""]), e(r"a?")); - assert_eq!(exact(["", "a"]), e(r"a??")); - assert_eq!(inexact([I("a"), E("")], [I("a"), E("")]), e(r"a*")); - assert_eq!(inexact([E(""), I("a")], [E(""), I("a")]), e(r"a*?")); - assert_eq!(inexact([I("a")], [I("a")]), e(r"a+")); - assert_eq!(inexact([I("a")], [I("a")]), e(r"(a+)+")); - - assert_eq!(exact(["ab"]), e(r"aZ{0}b")); - assert_eq!(exact(["aZb", "ab"]), e(r"aZ?b")); - assert_eq!(exact(["ab", "aZb"]), e(r"aZ??b")); - assert_eq!( - inexact([I("aZ"), E("ab")], [I("Zb"), E("ab")]), - e(r"aZ*b") - ); - assert_eq!( - inexact([E("ab"), I("aZ")], [E("ab"), I("Zb")]), - e(r"aZ*?b") - ); - assert_eq!(inexact([I("aZ")], [I("Zb")]), e(r"aZ+b")); - assert_eq!(inexact([I("aZ")], [I("Zb")]), e(r"aZ+?b")); - - assert_eq!(exact(["aZZb"]), e(r"aZ{2}b")); - assert_eq!(inexact([I("aZZ")], [I("ZZb")]), e(r"aZ{2,3}b")); - - assert_eq!(exact(["abc", ""]), e(r"(abc)?")); - assert_eq!(exact(["", "abc"]), e(r"(abc)??")); - - assert_eq!(inexact([I("a"), E("b")], [I("ab"), E("b")]), e(r"a*b")); - assert_eq!(inexact([E("b"), I("a")], [E("b"), I("ab")]), e(r"a*?b")); - assert_eq!(inexact([I("ab")], [I("b")]), e(r"ab+")); - assert_eq!(inexact([I("a"), I("b")], [I("b")]), e(r"a*b+")); - - // FIXME: The suffixes for this don't look quite right to me. I think - // the right suffixes would be: [I(ac), I(bc), E(c)]. The main issue I - // think is that suffixes are computed by iterating over concatenations - // in reverse, and then [bc, ac, c] ordering is indeed correct from - // that perspective. We also test a few more equivalent regexes, and - // we get the same result, so it is consistent at least I suppose. - // - // The reason why this isn't an issue is that it only messes up - // preference order, and currently, suffixes are never used in a - // context where preference order matters. For prefixes it matters - // because we sometimes want to use prefilters without confirmation - // when all of the literals are exact (and there's no look-around). But - // we never do that for suffixes. Any time we use suffixes, we always - // include a confirmation step. If that ever changes, then it's likely - // this bug will need to be fixed, but last time I looked, it appears - // hard to do so. - assert_eq!( - inexact([I("a"), I("b"), E("c")], [I("bc"), I("ac"), E("c")]), - e(r"a*b*c") - ); - assert_eq!( - inexact([I("a"), I("b"), E("c")], [I("bc"), I("ac"), E("c")]), - e(r"(a+)?(b+)?c") - ); - assert_eq!( - inexact([I("a"), I("b"), E("c")], [I("bc"), I("ac"), E("c")]), - e(r"(a+|)(b+|)c") - ); - // A few more similarish but not identical regexes. These may have a - // similar problem as above. - assert_eq!( - inexact( - [I("a"), I("b"), I("c"), E("")], - [I("c"), I("b"), I("a"), E("")] - ), - e(r"a*b*c*") - ); - assert_eq!(inexact([I("a"), I("b"), I("c")], [I("c")]), e(r"a*b*c+")); - assert_eq!(inexact([I("a"), I("b")], [I("bc")]), e(r"a*b+c")); - assert_eq!(inexact([I("a"), I("b")], [I("c"), I("b")]), e(r"a*b+c*")); - assert_eq!(inexact([I("ab"), E("a")], [I("b"), E("a")]), e(r"ab*")); - assert_eq!( - inexact([I("ab"), E("ac")], [I("bc"), E("ac")]), - e(r"ab*c") - ); - assert_eq!(inexact([I("ab")], [I("b")]), e(r"ab+")); - assert_eq!(inexact([I("ab")], [I("bc")]), e(r"ab+c")); - - assert_eq!( - inexact([I("z"), E("azb")], [I("zazb"), E("azb")]), - e(r"z*azb") - ); - - let expected = - exact(["aaa", "aab", "aba", "abb", "baa", "bab", "bba", "bbb"]); - assert_eq!(expected, e(r"[ab]{3}")); - let expected = inexact( - [ - I("aaa"), - I("aab"), - I("aba"), - I("abb"), - I("baa"), - I("bab"), - I("bba"), - I("bbb"), - ], - [ - I("aaa"), - I("aab"), - I("aba"), - I("abb"), - I("baa"), - I("bab"), - I("bba"), - I("bbb"), - ], - ); - assert_eq!(expected, e(r"[ab]{3,4}")); - } - - #[test] - fn concat() { - let empty: [&str; 0] = []; - - assert_eq!(exact(["abcxyz"]), e(r"abc()xyz")); - assert_eq!(exact(["abcxyz"]), e(r"(abc)(xyz)")); - assert_eq!(exact(["abcmnoxyz"]), e(r"abc()mno()xyz")); - assert_eq!(exact(empty), e(r"abc[a&&b]xyz")); - assert_eq!(exact(["abcxyz"]), e(r"abc[a&&b]*xyz")); - } - - #[test] - fn alternation() { - assert_eq!(exact(["abc", "mno", "xyz"]), e(r"abc|mno|xyz")); - assert_eq!( - inexact( - [E("abc"), I("mZ"), E("mo"), E("xyz")], - [E("abc"), I("Zo"), E("mo"), E("xyz")] - ), - e(r"abc|mZ*o|xyz") - ); - assert_eq!(exact(["abc", "xyz"]), e(r"abc|M[a&&b]N|xyz")); - assert_eq!(exact(["abc", "MN", "xyz"]), e(r"abc|M[a&&b]*N|xyz")); - - assert_eq!(exact(["aaa", "aaaaa"]), e(r"(?:|aa)aaa")); - assert_eq!( - inexact( - [I("aaa"), E(""), I("aaaaa"), E("aa")], - [I("aaa"), E(""), E("aa")] - ), - e(r"(?:|aa)(?:aaa)*") - ); - assert_eq!( - inexact( - [E(""), I("aaa"), E("aa"), I("aaaaa")], - [E(""), I("aaa"), E("aa")] - ), - e(r"(?:|aa)(?:aaa)*?") - ); - - assert_eq!( - inexact([E("a"), I("b"), E("")], [E("a"), I("b"), E("")]), - e(r"a|b*") - ); - assert_eq!(inexact([E("a"), I("b")], [E("a"), I("b")]), e(r"a|b+")); - - assert_eq!( - inexact([I("a"), E("b"), E("c")], [I("ab"), E("b"), E("c")]), - e(r"a*b|c") - ); - - assert_eq!( - inexact( - [E("a"), E("b"), I("c"), E("")], - [E("a"), E("b"), I("c"), E("")] - ), - e(r"a|(?:b|c*)") - ); - - assert_eq!( - inexact( - [I("a"), I("b"), E("c"), I("a"), I("ab"), E("c")], - [I("ac"), I("bc"), E("c"), I("ac"), I("abc"), E("c")], - ), - e(r"(a|b)*c|(a|ab)*c") - ); - - assert_eq!( - exact(["abef", "abgh", "cdef", "cdgh"]), - e(r"(ab|cd)(ef|gh)") - ); - assert_eq!( - exact([ - "abefij", "abefkl", "abghij", "abghkl", "cdefij", "cdefkl", - "cdghij", "cdghkl", - ]), - e(r"(ab|cd)(ef|gh)(ij|kl)") - ); - - assert_eq!(inexact([E("abab")], [E("abab")]), e(r"(ab){2}")); - - assert_eq!(inexact([I("abab")], [I("abab")]), e(r"(ab){2,3}")); - - assert_eq!(inexact([I("abab")], [I("abab")]), e(r"(ab){2,}")); - } - - #[test] - fn impossible() { - let empty: [&str; 0] = []; - - assert_eq!(exact(empty), e(r"[a&&b]")); - assert_eq!(exact(empty), e(r"a[a&&b]")); - assert_eq!(exact(empty), e(r"[a&&b]b")); - assert_eq!(exact(empty), e(r"a[a&&b]b")); - assert_eq!(exact(["a", "b"]), e(r"a|[a&&b]|b")); - assert_eq!(exact(["a", "b"]), e(r"a|c[a&&b]|b")); - assert_eq!(exact(["a", "b"]), e(r"a|[a&&b]d|b")); - assert_eq!(exact(["a", "b"]), e(r"a|c[a&&b]d|b")); - assert_eq!(exact([""]), e(r"[a&&b]*")); - assert_eq!(exact(["MN"]), e(r"M[a&&b]*N")); - } - - // This tests patterns that contain something that defeats literal - // detection, usually because it would blow some limit on the total number - // of literals that can be returned. - // - // The main idea is that when literal extraction sees something that - // it knows will blow a limit, it replaces it with a marker that says - // "any literal will match here." While not necessarily true, the - // over-estimation is just fine for the purposes of literal extraction, - // because the imprecision doesn't matter: too big is too big. - // - // This is one of the trickier parts of literal extraction, since we need - // to make sure all of our literal extraction operations correctly compose - // with the markers. - #[test] - fn anything() { - assert_eq!(infinite(), e(r".")); - assert_eq!(infinite(), e(r"(?s).")); - assert_eq!(infinite(), e(r"[A-Za-z]")); - assert_eq!(infinite(), e(r"[A-Z]")); - assert_eq!(exact([""]), e(r"[A-Z]{0}")); - assert_eq!(infinite(), e(r"[A-Z]?")); - assert_eq!(infinite(), e(r"[A-Z]*")); - assert_eq!(infinite(), e(r"[A-Z]+")); - assert_eq!((seq([I("1")]), Seq::infinite()), e(r"1[A-Z]")); - assert_eq!((seq([I("1")]), seq([I("2")])), e(r"1[A-Z]2")); - assert_eq!((Seq::infinite(), seq([I("123")])), e(r"[A-Z]+123")); - assert_eq!(infinite(), e(r"[A-Z]+123[A-Z]+")); - assert_eq!(infinite(), e(r"1|[A-Z]|3")); - assert_eq!( - (seq([E("1"), I("2"), E("3")]), Seq::infinite()), - e(r"1|2[A-Z]|3"), - ); - assert_eq!( - (Seq::infinite(), seq([E("1"), I("2"), E("3")])), - e(r"1|[A-Z]2|3"), - ); - assert_eq!( - (seq([E("1"), I("2"), E("4")]), seq([E("1"), I("3"), E("4")])), - e(r"1|2[A-Z]3|4"), - ); - assert_eq!((Seq::infinite(), seq([I("2")])), e(r"(?:|1)[A-Z]2")); - assert_eq!(inexact([I("a")], [I("z")]), e(r"a.z")); - } - - // Like the 'anything' test, but it uses smaller limits in order to test - // the logic for effectively aborting literal extraction when the seqs get - // too big. - #[test] - fn anything_small_limits() { - fn prefixes(pattern: &str) -> Seq { - Extractor::new() - .kind(ExtractKind::Prefix) - .limit_total(10) - .extract(&parse(pattern)) - } - - fn suffixes(pattern: &str) -> Seq { - Extractor::new() - .kind(ExtractKind::Suffix) - .limit_total(10) - .extract(&parse(pattern)) - } - - fn e(pattern: &str) -> (Seq, Seq) { - (prefixes(pattern), suffixes(pattern)) - } - - assert_eq!( - ( - seq([ - I("aaa"), - I("aab"), - I("aba"), - I("abb"), - I("baa"), - I("bab"), - I("bba"), - I("bbb") - ]), - seq([ - I("aaa"), - I("aab"), - I("aba"), - I("abb"), - I("baa"), - I("bab"), - I("bba"), - I("bbb") - ]) - ), - e(r"[ab]{3}{3}") - ); - - assert_eq!(infinite(), e(r"ab|cd|ef|gh|ij|kl|mn|op|qr|st|uv|wx|yz")); - } - - #[test] - fn empty() { - assert_eq!(exact([""]), e(r"")); - assert_eq!(exact([""]), e(r"^")); - assert_eq!(exact([""]), e(r"$")); - assert_eq!(exact([""]), e(r"(?m:^)")); - assert_eq!(exact([""]), e(r"(?m:$)")); - assert_eq!(exact([""]), e(r"\b")); - assert_eq!(exact([""]), e(r"\B")); - assert_eq!(exact([""]), e(r"(?-u:\b)")); - assert_eq!(exact([""]), e(r"(?-u:\B)")); - } - - #[test] - fn odds_and_ends() { - assert_eq!((Seq::infinite(), seq([I("a")])), e(r".a")); - assert_eq!((seq([I("a")]), Seq::infinite()), e(r"a.")); - assert_eq!(infinite(), e(r"a|.")); - assert_eq!(infinite(), e(r".|a")); - - let pat = r"M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]"; - let expected = inexact( - ["Mo'am", "Moam", "Mu'am", "Muam"].map(I), - [ - "ddafi", "ddafy", "dhafi", "dhafy", "dzafi", "dzafy", "dafi", - "dafy", "tdafi", "tdafy", "thafi", "thafy", "tzafi", "tzafy", - "tafi", "tafy", "zdafi", "zdafy", "zhafi", "zhafy", "zzafi", - "zzafy", "zafi", "zafy", - ] - .map(I), - ); - assert_eq!(expected, e(pat)); - - assert_eq!( - (seq(["fn is_", "fn as_"].map(I)), Seq::infinite()), - e(r"fn is_([A-Z]+)|fn as_([A-Z]+)"), - ); - assert_eq!( - inexact([I("foo")], [I("quux")]), - e(r"foo[A-Z]+bar[A-Z]+quux") - ); - assert_eq!(infinite(), e(r"[A-Z]+bar[A-Z]+")); - assert_eq!( - exact(["Sherlock Holmes"]), - e(r"(?m)^Sherlock Holmes|Sherlock Holmes$") - ); - - assert_eq!(exact(["sa", "sb"]), e(r"\bs(?:[ab])")); - } - - // This tests a specific regex along with some heuristic steps to reduce - // the sequences extracted. This is meant to roughly correspond to the - // types of heuristics used to shrink literal sets in practice. (Shrinking - // is done because you want to balance "spend too much work looking for - // too many literals" and "spend too much work processing false positive - // matches from short literals.") - #[test] - #[cfg(feature = "unicode-case")] - fn holmes() { - let expected = inexact( - ["HOL", "HOl", "HoL", "Hol", "hOL", "hOl", "hoL", "hol"].map(I), - [ - "MES", "MEs", "Eſ", "MeS", "Mes", "eſ", "mES", "mEs", "meS", - "mes", - ] - .map(I), - ); - let (mut prefixes, mut suffixes) = e(r"(?i)Holmes"); - prefixes.keep_first_bytes(3); - suffixes.keep_last_bytes(3); - prefixes.minimize_by_preference(); - suffixes.minimize_by_preference(); - assert_eq!(expected, (prefixes, suffixes)); - } - - // This tests that we get some kind of literals extracted for a beefier - // alternation with case insensitive mode enabled. At one point during - // development, this returned nothing, and motivated some special case - // code in Extractor::union to try and trim down the literal sequences - // if the union would blow the limits set. - #[test] - #[cfg(feature = "unicode-case")] - fn holmes_alt() { - let mut pre = - prefixes(r"(?i)Sherlock|Holmes|Watson|Irene|Adler|John|Baker"); - assert!(pre.len().unwrap() > 0); - pre.optimize_for_prefix_by_preference(); - assert!(pre.len().unwrap() > 0); - } - - // See: https://github.com/rust-lang/regex/security/advisories/GHSA-m5pq-gvj9-9vr8 - // See: CVE-2022-24713 - // - // We test this here to ensure literal extraction completes in reasonable - // time and isn't materially impacted by these sorts of pathological - // repeats. - #[test] - fn crazy_repeats() { - assert_eq!(inexact([E("")], [E("")]), e(r"(?:){4294967295}")); - assert_eq!( - inexact([E("")], [E("")]), - e(r"(?:){64}{64}{64}{64}{64}{64}") - ); - assert_eq!(inexact([E("")], [E("")]), e(r"x{0}{4294967295}")); - assert_eq!(inexact([E("")], [E("")]), e(r"(?:|){4294967295}")); - - assert_eq!( - inexact([E("")], [E("")]), - e(r"(?:){8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}") - ); - let repa = "a".repeat(100); - assert_eq!( - inexact([I(&repa)], [I(&repa)]), - e(r"a{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}") - ); - } - - #[test] - fn huge() { - let pat = r#"(?-u) - 2(?: - [45]\d{3}| - 7(?: - 1[0-267]| - 2[0-289]| - 3[0-29]| - 4[01]| - 5[1-3]| - 6[013]| - 7[0178]| - 91 - )| - 8(?: - 0[125]| - [139][1-6]| - 2[0157-9]| - 41| - 6[1-35]| - 7[1-5]| - 8[1-8]| - 90 - )| - 9(?: - 0[0-2]| - 1[0-4]| - 2[568]| - 3[3-6]| - 5[5-7]| - 6[0167]| - 7[15]| - 8[0146-9] - ) - )\d{4}| - 3(?: - 12?[5-7]\d{2}| - 0(?: - 2(?: - [025-79]\d| - [348]\d{1,2} - )| - 3(?: - [2-4]\d| - [56]\d? - ) - )| - 2(?: - 1\d{2}| - 2(?: - [12]\d| - [35]\d{1,2}| - 4\d? - ) - )| - 3(?: - 1\d{2}| - 2(?: - [2356]\d| - 4\d{1,2} - ) - )| - 4(?: - 1\d{2}| - 2(?: - 2\d{1,2}| - [47]| - 5\d{2} - ) - )| - 5(?: - 1\d{2}| - 29 - )| - [67]1\d{2}| - 8(?: - 1\d{2}| - 2(?: - 2\d{2}| - 3| - 4\d - ) - ) - )\d{3}| - 4(?: - 0(?: - 2(?: - [09]\d| - 7 - )| - 33\d{2} - )| - 1\d{3}| - 2(?: - 1\d{2}| - 2(?: - [25]\d?| - [348]\d| - [67]\d{1,2} - ) - )| - 3(?: - 1\d{2}(?: - \d{2} - )?| - 2(?: - [045]\d| - [236-9]\d{1,2} - )| - 32\d{2} - )| - 4(?: - [18]\d{2}| - 2(?: - [2-46]\d{2}| - 3 - )| - 5[25]\d{2} - )| - 5(?: - 1\d{2}| - 2(?: - 3\d| - 5 - ) - )| - 6(?: - [18]\d{2}| - 2(?: - 3(?: - \d{2} - )?| - [46]\d{1,2}| - 5\d{2}| - 7\d - )| - 5(?: - 3\d?| - 4\d| - [57]\d{1,2}| - 6\d{2}| - 8 - ) - )| - 71\d{2}| - 8(?: - [18]\d{2}| - 23\d{2}| - 54\d{2} - )| - 9(?: - [18]\d{2}| - 2[2-5]\d{2}| - 53\d{1,2} - ) - )\d{3}| - 5(?: - 02[03489]\d{2}| - 1\d{2}| - 2(?: - 1\d{2}| - 2(?: - 2(?: - \d{2} - )?| - [457]\d{2} - ) - )| - 3(?: - 1\d{2}| - 2(?: - [37](?: - \d{2} - )?| - [569]\d{2} - ) - )| - 4(?: - 1\d{2}| - 2[46]\d{2} - )| - 5(?: - 1\d{2}| - 26\d{1,2} - )| - 6(?: - [18]\d{2}| - 2| - 53\d{2} - )| - 7(?: - 1| - 24 - )\d{2}| - 8(?: - 1| - 26 - )\d{2}| - 91\d{2} - )\d{3}| - 6(?: - 0(?: - 1\d{2}| - 2(?: - 3\d{2}| - 4\d{1,2} - ) - )| - 2(?: - 2[2-5]\d{2}| - 5(?: - [3-5]\d{2}| - 7 - )| - 8\d{2} - )| - 3(?: - 1| - 2[3478] - )\d{2}| - 4(?: - 1| - 2[34] - )\d{2}| - 5(?: - 1| - 2[47] - )\d{2}| - 6(?: - [18]\d{2}| - 6(?: - 2(?: - 2\d| - [34]\d{2} - )| - 5(?: - [24]\d{2}| - 3\d| - 5\d{1,2} - ) - ) - )| - 72[2-5]\d{2}| - 8(?: - 1\d{2}| - 2[2-5]\d{2} - )| - 9(?: - 1\d{2}| - 2[2-6]\d{2} - ) - )\d{3}| - 7(?: - (?: - 02| - [3-589]1| - 6[12]| - 72[24] - )\d{2}| - 21\d{3}| - 32 - )\d{3}| - 8(?: - (?: - 4[12]| - [5-7]2| - 1\d? - )| - (?: - 0| - 3[12]| - [5-7]1| - 217 - )\d - )\d{4}| - 9(?: - [35]1| - (?: - [024]2| - 81 - )\d| - (?: - 1| - [24]1 - )\d{2} - )\d{3} - "#; - // TODO: This is a good candidate of a seq of literals that could be - // shrunk quite a bit and still be very productive with respect to - // literal optimizations. - let (prefixes, suffixes) = e(pat); - assert!(!suffixes.is_finite()); - assert_eq!(Some(243), prefixes.len()); - } - - #[test] - fn optimize() { - // This gets a common prefix that isn't too short. - let (p, s) = - opt(["foobarfoobar", "foobar", "foobarzfoobar", "foobarfoobar"]); - assert_eq!(seq([I("foobar")]), p); - assert_eq!(seq([I("foobar")]), s); - - // This also finds a common prefix, but since it's only one byte, it - // prefers the multiple literals. - let (p, s) = opt(["abba", "akka", "abccba"]); - assert_eq!(exact(["abba", "akka", "abccba"]), (p, s)); - - let (p, s) = opt(["sam", "samwise"]); - assert_eq!((seq([E("sam")]), seq([E("sam"), E("samwise")])), (p, s)); - - // The empty string is poisonous, so our seq becomes infinite, even - // though all literals are exact. - let (p, s) = opt(["foobarfoo", "foo", "", "foozfoo", "foofoo"]); - assert!(!p.is_finite()); - assert!(!s.is_finite()); - - // A space is also poisonous, so our seq becomes infinite. But this - // only gets triggered when we don't have a completely exact sequence. - // When the sequence is exact, spaces are okay, since we presume that - // any prefilter will match a space more quickly than the regex engine. - // (When the sequence is exact, there's a chance of the prefilter being - // used without needing the regex engine at all.) - let mut p = seq([E("foobarfoo"), I("foo"), E(" "), E("foofoo")]); - p.optimize_for_prefix_by_preference(); - assert!(!p.is_finite()); - } -} diff --git a/vendor/regex-syntax/src/hir/mod.rs b/vendor/regex-syntax/src/hir/mod.rs deleted file mode 100644 index 6d57fe3fd537c4..00000000000000 --- a/vendor/regex-syntax/src/hir/mod.rs +++ /dev/null @@ -1,3873 +0,0 @@ -/*! -Defines a high-level intermediate (HIR) representation for regular expressions. - -The HIR is represented by the [`Hir`] type, and it principally constructed via -[translation](translate) from an [`Ast`](crate::ast::Ast). Alternatively, users -may use the smart constructors defined on `Hir` to build their own by hand. The -smart constructors simultaneously simplify and "optimize" the HIR, and are also -the same routines used by translation. - -Most regex engines only have an HIR like this, and usually construct it -directly from the concrete syntax. This crate however first parses the -concrete syntax into an `Ast`, and only then creates the HIR from the `Ast`, -as mentioned above. It's done this way to facilitate better error reporting, -and to have a structured representation of a regex that faithfully represents -its concrete syntax. Namely, while an `Hir` value can be converted back to an -equivalent regex pattern string, it is unlikely to look like the original due -to its simplified structure. -*/ - -use core::{char, cmp}; - -use alloc::{ - boxed::Box, - format, - string::{String, ToString}, - vec, - vec::Vec, -}; - -use crate::{ - ast::Span, - hir::interval::{Interval, IntervalSet, IntervalSetIter}, - unicode, -}; - -pub use crate::{ - hir::visitor::{visit, Visitor}, - unicode::CaseFoldError, -}; - -mod interval; -pub mod literal; -pub mod print; -pub mod translate; -mod visitor; - -/// An error that can occur while translating an `Ast` to a `Hir`. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct Error { - /// The kind of error. - kind: ErrorKind, - /// The original pattern that the translator's Ast was parsed from. Every - /// span in an error is a valid range into this string. - pattern: String, - /// The span of this error, derived from the Ast given to the translator. - span: Span, -} - -impl Error { - /// Return the type of this error. - pub fn kind(&self) -> &ErrorKind { - &self.kind - } - - /// The original pattern string in which this error occurred. - /// - /// Every span reported by this error is reported in terms of this string. - pub fn pattern(&self) -> &str { - &self.pattern - } - - /// Return the span at which this error occurred. - pub fn span(&self) -> &Span { - &self.span - } -} - -/// The type of an error that occurred while building an `Hir`. -/// -/// This error type is marked as `non_exhaustive`. This means that adding a -/// new variant is not considered a breaking change. -#[non_exhaustive] -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ErrorKind { - /// This error occurs when a Unicode feature is used when Unicode - /// support is disabled. For example `(?-u:\pL)` would trigger this error. - UnicodeNotAllowed, - /// This error occurs when translating a pattern that could match a byte - /// sequence that isn't UTF-8 and `utf8` was enabled. - InvalidUtf8, - /// This error occurs when one uses a non-ASCII byte for a line terminator, - /// but where Unicode mode is enabled and UTF-8 mode is disabled. - InvalidLineTerminator, - /// This occurs when an unrecognized Unicode property name could not - /// be found. - UnicodePropertyNotFound, - /// This occurs when an unrecognized Unicode property value could not - /// be found. - UnicodePropertyValueNotFound, - /// This occurs when a Unicode-aware Perl character class (`\w`, `\s` or - /// `\d`) could not be found. This can occur when the `unicode-perl` - /// crate feature is not enabled. - UnicodePerlClassNotFound, - /// This occurs when the Unicode simple case mapping tables are not - /// available, and the regular expression required Unicode aware case - /// insensitivity. - UnicodeCaseUnavailable, -} - -#[cfg(feature = "std")] -impl std::error::Error for Error {} - -impl core::fmt::Display for Error { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - crate::error::Formatter::from(self).fmt(f) - } -} - -impl core::fmt::Display for ErrorKind { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - use self::ErrorKind::*; - - let msg = match *self { - UnicodeNotAllowed => "Unicode not allowed here", - InvalidUtf8 => "pattern can match invalid UTF-8", - InvalidLineTerminator => "invalid line terminator, must be ASCII", - UnicodePropertyNotFound => "Unicode property not found", - UnicodePropertyValueNotFound => "Unicode property value not found", - UnicodePerlClassNotFound => { - "Unicode-aware Perl class not found \ - (make sure the unicode-perl feature is enabled)" - } - UnicodeCaseUnavailable => { - "Unicode-aware case insensitivity matching is not available \ - (make sure the unicode-case feature is enabled)" - } - }; - f.write_str(msg) - } -} - -/// A high-level intermediate representation (HIR) for a regular expression. -/// -/// An HIR value is a combination of a [`HirKind`] and a set of [`Properties`]. -/// An `HirKind` indicates what kind of regular expression it is (a literal, -/// a repetition, a look-around assertion, etc.), where as a `Properties` -/// describes various facts about the regular expression. For example, whether -/// it matches UTF-8 or if it matches the empty string. -/// -/// The HIR of a regular expression represents an intermediate step between -/// its abstract syntax (a structured description of the concrete syntax) and -/// an actual regex matcher. The purpose of HIR is to make regular expressions -/// easier to analyze. In particular, the AST is much more complex than the -/// HIR. For example, while an AST supports arbitrarily nested character -/// classes, the HIR will flatten all nested classes into a single set. The HIR -/// will also "compile away" every flag present in the concrete syntax. For -/// example, users of HIR expressions never need to worry about case folding; -/// it is handled automatically by the translator (e.g., by translating -/// `(?i:A)` to `[aA]`). -/// -/// The specific type of an HIR expression can be accessed via its `kind` -/// or `into_kind` methods. This extra level of indirection exists for two -/// reasons: -/// -/// 1. Construction of an HIR expression *must* use the constructor methods on -/// this `Hir` type instead of building the `HirKind` values directly. This -/// permits construction to enforce invariants like "concatenations always -/// consist of two or more sub-expressions." -/// 2. Every HIR expression contains attributes that are defined inductively, -/// and can be computed cheaply during the construction process. For example, -/// one such attribute is whether the expression must match at the beginning of -/// the haystack. -/// -/// In particular, if you have an `HirKind` value, then there is intentionally -/// no way to build an `Hir` value from it. You instead need to do case -/// analysis on the `HirKind` value and build the `Hir` value using its smart -/// constructors. -/// -/// # UTF-8 -/// -/// If the HIR was produced by a translator with -/// [`TranslatorBuilder::utf8`](translate::TranslatorBuilder::utf8) enabled, -/// then the HIR is guaranteed to match UTF-8 exclusively for all non-empty -/// matches. -/// -/// For empty matches, those can occur at any position. It is the -/// responsibility of the regex engine to determine whether empty matches are -/// permitted between the code units of a single codepoint. -/// -/// # Stack space -/// -/// This type defines its own destructor that uses constant stack space and -/// heap space proportional to the size of the HIR. -/// -/// Also, an `Hir`'s `fmt::Display` implementation prints an HIR as a regular -/// expression pattern string, and uses constant stack space and heap space -/// proportional to the size of the `Hir`. The regex it prints is guaranteed to -/// be _semantically_ equivalent to the original concrete syntax, but it may -/// look very different. (And potentially not practically readable by a human.) -/// -/// An `Hir`'s `fmt::Debug` implementation currently does not use constant -/// stack space. The implementation will also suppress some details (such as -/// the `Properties` inlined into every `Hir` value to make it less noisy). -#[derive(Clone, Eq, PartialEq)] -pub struct Hir { - /// The underlying HIR kind. - kind: HirKind, - /// Analysis info about this HIR, computed during construction. - props: Properties, -} - -/// Methods for accessing the underlying `HirKind` and `Properties`. -impl Hir { - /// Returns a reference to the underlying HIR kind. - pub fn kind(&self) -> &HirKind { - &self.kind - } - - /// Consumes ownership of this HIR expression and returns its underlying - /// `HirKind`. - pub fn into_kind(mut self) -> HirKind { - core::mem::replace(&mut self.kind, HirKind::Empty) - } - - /// Returns the properties computed for this `Hir`. - pub fn properties(&self) -> &Properties { - &self.props - } - - /// Splits this HIR into its constituent parts. - /// - /// This is useful because `let Hir { kind, props } = hir;` does not work - /// because of `Hir`'s custom `Drop` implementation. - fn into_parts(mut self) -> (HirKind, Properties) { - ( - core::mem::replace(&mut self.kind, HirKind::Empty), - core::mem::replace(&mut self.props, Properties::empty()), - ) - } -} - -/// Smart constructors for HIR values. -/// -/// These constructors are called "smart" because they do inductive work or -/// simplifications. For example, calling `Hir::repetition` with a repetition -/// like `a{0}` will actually return a `Hir` with a `HirKind::Empty` kind -/// since it is equivalent to an empty regex. Another example is calling -/// `Hir::concat(vec![expr])`. Instead of getting a `HirKind::Concat`, you'll -/// just get back the original `expr` since it's precisely equivalent. -/// -/// Smart constructors enable maintaining invariants about the HIR data type -/// while also simultaneously keeping the representation as simple as possible. -impl Hir { - /// Returns an empty HIR expression. - /// - /// An empty HIR expression always matches, including the empty string. - #[inline] - pub fn empty() -> Hir { - let props = Properties::empty(); - Hir { kind: HirKind::Empty, props } - } - - /// Returns an HIR expression that can never match anything. That is, - /// the size of the set of strings in the language described by the HIR - /// returned is `0`. - /// - /// This is distinct from [`Hir::empty`] in that the empty string matches - /// the HIR returned by `Hir::empty`. That is, the set of strings in the - /// language describe described by `Hir::empty` is non-empty. - /// - /// Note that currently, the HIR returned uses an empty character class to - /// indicate that nothing can match. An equivalent expression that cannot - /// match is an empty alternation, but all such "fail" expressions are - /// normalized (via smart constructors) to empty character classes. This is - /// because empty character classes can be spelled in the concrete syntax - /// of a regex (e.g., `\P{any}` or `(?-u:[^\x00-\xFF])` or `[a&&b]`), but - /// empty alternations cannot. - #[inline] - pub fn fail() -> Hir { - let class = Class::Bytes(ClassBytes::empty()); - let props = Properties::class(&class); - // We can't just call Hir::class here because it defers to Hir::fail - // in order to canonicalize the Hir value used to represent "cannot - // match." - Hir { kind: HirKind::Class(class), props } - } - - /// Creates a literal HIR expression. - /// - /// This accepts anything that can be converted into a `Box<[u8]>`. - /// - /// Note that there is no mechanism for storing a `char` or a `Box` - /// in an HIR. Everything is "just bytes." Whether a `Literal` (or - /// any HIR node) matches valid UTF-8 exclusively can be queried via - /// [`Properties::is_utf8`]. - /// - /// # Example - /// - /// This example shows that concatenations of `Literal` HIR values will - /// automatically get flattened and combined together. So for example, even - /// if you concat multiple `Literal` values that are themselves not valid - /// UTF-8, they might add up to valid UTF-8. This also demonstrates just - /// how "smart" Hir's smart constructors are. - /// - /// ``` - /// use regex_syntax::hir::{Hir, HirKind, Literal}; - /// - /// let literals = vec![ - /// Hir::literal([0xE2]), - /// Hir::literal([0x98]), - /// Hir::literal([0x83]), - /// ]; - /// // Each literal, on its own, is invalid UTF-8. - /// assert!(literals.iter().all(|hir| !hir.properties().is_utf8())); - /// - /// let concat = Hir::concat(literals); - /// // But the concatenation is valid UTF-8! - /// assert!(concat.properties().is_utf8()); - /// - /// // And also notice that the literals have been concatenated into a - /// // single `Literal`, to the point where there is no explicit `Concat`! - /// let expected = HirKind::Literal(Literal(Box::from("☃".as_bytes()))); - /// assert_eq!(&expected, concat.kind()); - /// ``` - /// - /// # Example: building a literal from a `char` - /// - /// This example shows how to build a single `Hir` literal from a `char` - /// value. Since a [`Literal`] is just bytes, we just need to UTF-8 - /// encode a `char` value: - /// - /// ``` - /// use regex_syntax::hir::{Hir, HirKind, Literal}; - /// - /// let ch = '☃'; - /// let got = Hir::literal(ch.encode_utf8(&mut [0; 4]).as_bytes()); - /// - /// let expected = HirKind::Literal(Literal(Box::from("☃".as_bytes()))); - /// assert_eq!(&expected, got.kind()); - /// ``` - #[inline] - pub fn literal>>(lit: B) -> Hir { - let bytes = lit.into(); - if bytes.is_empty() { - return Hir::empty(); - } - - let lit = Literal(bytes); - let props = Properties::literal(&lit); - Hir { kind: HirKind::Literal(lit), props } - } - - /// Creates a class HIR expression. The class may either be defined over - /// ranges of Unicode codepoints or ranges of raw byte values. - /// - /// Note that an empty class is permitted. An empty class is equivalent to - /// `Hir::fail()`. - #[inline] - pub fn class(class: Class) -> Hir { - if class.is_empty() { - return Hir::fail(); - } else if let Some(bytes) = class.literal() { - return Hir::literal(bytes); - } - let props = Properties::class(&class); - Hir { kind: HirKind::Class(class), props } - } - - /// Creates a look-around assertion HIR expression. - #[inline] - pub fn look(look: Look) -> Hir { - let props = Properties::look(look); - Hir { kind: HirKind::Look(look), props } - } - - /// Creates a repetition HIR expression. - #[inline] - pub fn repetition(mut rep: Repetition) -> Hir { - // If the sub-expression of a repetition can only match the empty - // string, then we force its maximum to be at most 1. - if rep.sub.properties().maximum_len() == Some(0) { - rep.min = cmp::min(rep.min, 1); - rep.max = rep.max.map(|n| cmp::min(n, 1)).or(Some(1)); - } - // The regex 'a{0}' is always equivalent to the empty regex. This is - // true even when 'a' is an expression that never matches anything - // (like '\P{any}'). - // - // Additionally, the regex 'a{1}' is always equivalent to 'a'. - if rep.min == 0 && rep.max == Some(0) { - return Hir::empty(); - } else if rep.min == 1 && rep.max == Some(1) { - return *rep.sub; - } - let props = Properties::repetition(&rep); - Hir { kind: HirKind::Repetition(rep), props } - } - - /// Creates a capture HIR expression. - /// - /// Note that there is no explicit HIR value for a non-capturing group. - /// Since a non-capturing group only exists to override precedence in the - /// concrete syntax and since an HIR already does its own grouping based on - /// what is parsed, there is no need to explicitly represent non-capturing - /// groups in the HIR. - #[inline] - pub fn capture(capture: Capture) -> Hir { - let props = Properties::capture(&capture); - Hir { kind: HirKind::Capture(capture), props } - } - - /// Returns the concatenation of the given expressions. - /// - /// This attempts to flatten and simplify the concatenation as appropriate. - /// - /// # Example - /// - /// This shows a simple example of basic flattening of both concatenations - /// and literals. - /// - /// ``` - /// use regex_syntax::hir::Hir; - /// - /// let hir = Hir::concat(vec![ - /// Hir::concat(vec![ - /// Hir::literal([b'a']), - /// Hir::literal([b'b']), - /// Hir::literal([b'c']), - /// ]), - /// Hir::concat(vec![ - /// Hir::literal([b'x']), - /// Hir::literal([b'y']), - /// Hir::literal([b'z']), - /// ]), - /// ]); - /// let expected = Hir::literal("abcxyz".as_bytes()); - /// assert_eq!(expected, hir); - /// ``` - pub fn concat(subs: Vec) -> Hir { - // We rebuild the concatenation by simplifying it. Would be nice to do - // it in place, but that seems a little tricky? - let mut new = vec![]; - // This gobbles up any adjacent literals in a concatenation and smushes - // them together. Basically, when we see a literal, we add its bytes - // to 'prior_lit', and whenever we see anything else, we first take - // any bytes in 'prior_lit' and add it to the 'new' concatenation. - let mut prior_lit: Option> = None; - for sub in subs { - let (kind, props) = sub.into_parts(); - match kind { - HirKind::Literal(Literal(bytes)) => { - if let Some(ref mut prior_bytes) = prior_lit { - prior_bytes.extend_from_slice(&bytes); - } else { - prior_lit = Some(bytes.to_vec()); - } - } - // We also flatten concats that are direct children of another - // concat. We only need to do this one level deep since - // Hir::concat is the only way to build concatenations, and so - // flattening happens inductively. - HirKind::Concat(subs2) => { - for sub2 in subs2 { - let (kind2, props2) = sub2.into_parts(); - match kind2 { - HirKind::Literal(Literal(bytes)) => { - if let Some(ref mut prior_bytes) = prior_lit { - prior_bytes.extend_from_slice(&bytes); - } else { - prior_lit = Some(bytes.to_vec()); - } - } - kind2 => { - if let Some(prior_bytes) = prior_lit.take() { - new.push(Hir::literal(prior_bytes)); - } - new.push(Hir { kind: kind2, props: props2 }); - } - } - } - } - // We can just skip empty HIRs. - HirKind::Empty => {} - kind => { - if let Some(prior_bytes) = prior_lit.take() { - new.push(Hir::literal(prior_bytes)); - } - new.push(Hir { kind, props }); - } - } - } - if let Some(prior_bytes) = prior_lit.take() { - new.push(Hir::literal(prior_bytes)); - } - if new.is_empty() { - return Hir::empty(); - } else if new.len() == 1 { - return new.pop().unwrap(); - } - let props = Properties::concat(&new); - Hir { kind: HirKind::Concat(new), props } - } - - /// Returns the alternation of the given expressions. - /// - /// This flattens and simplifies the alternation as appropriate. This may - /// include factoring out common prefixes or even rewriting the alternation - /// as a character class. - /// - /// Note that an empty alternation is equivalent to `Hir::fail()`. (It - /// is not possible for one to write an empty alternation, or even an - /// alternation with a single sub-expression, in the concrete syntax of a - /// regex.) - /// - /// # Example - /// - /// This is a simple example showing how an alternation might get - /// simplified. - /// - /// ``` - /// use regex_syntax::hir::{Hir, Class, ClassUnicode, ClassUnicodeRange}; - /// - /// let hir = Hir::alternation(vec![ - /// Hir::literal([b'a']), - /// Hir::literal([b'b']), - /// Hir::literal([b'c']), - /// Hir::literal([b'd']), - /// Hir::literal([b'e']), - /// Hir::literal([b'f']), - /// ]); - /// let expected = Hir::class(Class::Unicode(ClassUnicode::new([ - /// ClassUnicodeRange::new('a', 'f'), - /// ]))); - /// assert_eq!(expected, hir); - /// ``` - /// - /// And another example showing how common prefixes might get factored - /// out. - /// - /// ``` - /// use regex_syntax::hir::{Hir, Class, ClassUnicode, ClassUnicodeRange}; - /// - /// let hir = Hir::alternation(vec![ - /// Hir::concat(vec![ - /// Hir::literal("abc".as_bytes()), - /// Hir::class(Class::Unicode(ClassUnicode::new([ - /// ClassUnicodeRange::new('A', 'Z'), - /// ]))), - /// ]), - /// Hir::concat(vec![ - /// Hir::literal("abc".as_bytes()), - /// Hir::class(Class::Unicode(ClassUnicode::new([ - /// ClassUnicodeRange::new('a', 'z'), - /// ]))), - /// ]), - /// ]); - /// let expected = Hir::concat(vec![ - /// Hir::literal("abc".as_bytes()), - /// Hir::alternation(vec![ - /// Hir::class(Class::Unicode(ClassUnicode::new([ - /// ClassUnicodeRange::new('A', 'Z'), - /// ]))), - /// Hir::class(Class::Unicode(ClassUnicode::new([ - /// ClassUnicodeRange::new('a', 'z'), - /// ]))), - /// ]), - /// ]); - /// assert_eq!(expected, hir); - /// ``` - /// - /// Note that these sorts of simplifications are not guaranteed. - pub fn alternation(subs: Vec) -> Hir { - // We rebuild the alternation by simplifying it. We proceed similarly - // as the concatenation case. But in this case, there's no literal - // simplification happening. We're just flattening alternations. - let mut new = Vec::with_capacity(subs.len()); - for sub in subs { - let (kind, props) = sub.into_parts(); - match kind { - HirKind::Alternation(subs2) => { - new.extend(subs2); - } - kind => { - new.push(Hir { kind, props }); - } - } - } - if new.is_empty() { - return Hir::fail(); - } else if new.len() == 1 { - return new.pop().unwrap(); - } - // Now that it's completely flattened, look for the special case of - // 'char1|char2|...|charN' and collapse that into a class. Note that - // we look for 'char' first and then bytes. The issue here is that if - // we find both non-ASCII codepoints and non-ASCII singleton bytes, - // then it isn't actually possible to smush them into a single class. - // (Because classes are either "all codepoints" or "all bytes." You - // can have a class that both matches non-ASCII but valid UTF-8 and - // invalid UTF-8.) So we look for all chars and then all bytes, and - // don't handle anything else. - if let Some(singletons) = singleton_chars(&new) { - let it = singletons - .into_iter() - .map(|ch| ClassUnicodeRange { start: ch, end: ch }); - return Hir::class(Class::Unicode(ClassUnicode::new(it))); - } - if let Some(singletons) = singleton_bytes(&new) { - let it = singletons - .into_iter() - .map(|b| ClassBytesRange { start: b, end: b }); - return Hir::class(Class::Bytes(ClassBytes::new(it))); - } - // Similar to singleton chars, we can also look for alternations of - // classes. Those can be smushed into a single class. - if let Some(cls) = class_chars(&new) { - return Hir::class(cls); - } - if let Some(cls) = class_bytes(&new) { - return Hir::class(cls); - } - // Factor out a common prefix if we can, which might potentially - // simplify the expression and unlock other optimizations downstream. - // It also might generally make NFA matching and DFA construction - // faster by reducing the scope of branching in the regex. - new = match lift_common_prefix(new) { - Ok(hir) => return hir, - Err(unchanged) => unchanged, - }; - let props = Properties::alternation(&new); - Hir { kind: HirKind::Alternation(new), props } - } - - /// Returns an HIR expression for `.`. - /// - /// * [`Dot::AnyChar`] maps to `(?su-R:.)`. - /// * [`Dot::AnyByte`] maps to `(?s-Ru:.)`. - /// * [`Dot::AnyCharExceptLF`] maps to `(?u-Rs:.)`. - /// * [`Dot::AnyCharExceptCRLF`] maps to `(?Ru-s:.)`. - /// * [`Dot::AnyByteExceptLF`] maps to `(?-Rsu:.)`. - /// * [`Dot::AnyByteExceptCRLF`] maps to `(?R-su:.)`. - /// - /// # Example - /// - /// Note that this is a convenience routine for constructing the correct - /// character class based on the value of `Dot`. There is no explicit "dot" - /// HIR value. It is just an abbreviation for a common character class. - /// - /// ``` - /// use regex_syntax::hir::{Hir, Dot, Class, ClassBytes, ClassBytesRange}; - /// - /// let hir = Hir::dot(Dot::AnyByte); - /// let expected = Hir::class(Class::Bytes(ClassBytes::new([ - /// ClassBytesRange::new(0x00, 0xFF), - /// ]))); - /// assert_eq!(expected, hir); - /// ``` - #[inline] - pub fn dot(dot: Dot) -> Hir { - match dot { - Dot::AnyChar => Hir::class(Class::Unicode(ClassUnicode::new([ - ClassUnicodeRange::new('\0', '\u{10FFFF}'), - ]))), - Dot::AnyByte => Hir::class(Class::Bytes(ClassBytes::new([ - ClassBytesRange::new(b'\0', b'\xFF'), - ]))), - Dot::AnyCharExcept(ch) => { - let mut cls = - ClassUnicode::new([ClassUnicodeRange::new(ch, ch)]); - cls.negate(); - Hir::class(Class::Unicode(cls)) - } - Dot::AnyCharExceptLF => { - Hir::class(Class::Unicode(ClassUnicode::new([ - ClassUnicodeRange::new('\0', '\x09'), - ClassUnicodeRange::new('\x0B', '\u{10FFFF}'), - ]))) - } - Dot::AnyCharExceptCRLF => { - Hir::class(Class::Unicode(ClassUnicode::new([ - ClassUnicodeRange::new('\0', '\x09'), - ClassUnicodeRange::new('\x0B', '\x0C'), - ClassUnicodeRange::new('\x0E', '\u{10FFFF}'), - ]))) - } - Dot::AnyByteExcept(byte) => { - let mut cls = - ClassBytes::new([ClassBytesRange::new(byte, byte)]); - cls.negate(); - Hir::class(Class::Bytes(cls)) - } - Dot::AnyByteExceptLF => { - Hir::class(Class::Bytes(ClassBytes::new([ - ClassBytesRange::new(b'\0', b'\x09'), - ClassBytesRange::new(b'\x0B', b'\xFF'), - ]))) - } - Dot::AnyByteExceptCRLF => { - Hir::class(Class::Bytes(ClassBytes::new([ - ClassBytesRange::new(b'\0', b'\x09'), - ClassBytesRange::new(b'\x0B', b'\x0C'), - ClassBytesRange::new(b'\x0E', b'\xFF'), - ]))) - } - } - } -} - -/// The underlying kind of an arbitrary [`Hir`] expression. -/// -/// An `HirKind` is principally useful for doing case analysis on the type -/// of a regular expression. If you're looking to build new `Hir` values, -/// then you _must_ use the smart constructors defined on `Hir`, like -/// [`Hir::repetition`], to build new `Hir` values. The API intentionally does -/// not expose any way of building an `Hir` directly from an `HirKind`. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum HirKind { - /// The empty regular expression, which matches everything, including the - /// empty string. - Empty, - /// A literal string that matches exactly these bytes. - Literal(Literal), - /// A single character class that matches any of the characters in the - /// class. A class can either consist of Unicode scalar values as - /// characters, or it can use bytes. - /// - /// A class may be empty. In which case, it matches nothing. - Class(Class), - /// A look-around assertion. A look-around match always has zero length. - Look(Look), - /// A repetition operation applied to a sub-expression. - Repetition(Repetition), - /// A capturing group, which contains a sub-expression. - Capture(Capture), - /// A concatenation of expressions. - /// - /// A concatenation matches only if each of its sub-expressions match one - /// after the other. - /// - /// Concatenations are guaranteed by `Hir`'s smart constructors to always - /// have at least two sub-expressions. - Concat(Vec), - /// An alternation of expressions. - /// - /// An alternation matches only if at least one of its sub-expressions - /// match. If multiple sub-expressions match, then the leftmost is - /// preferred. - /// - /// Alternations are guaranteed by `Hir`'s smart constructors to always - /// have at least two sub-expressions. - Alternation(Vec), -} - -impl HirKind { - /// Returns a slice of this kind's sub-expressions, if any. - pub fn subs(&self) -> &[Hir] { - use core::slice::from_ref; - - match *self { - HirKind::Empty - | HirKind::Literal(_) - | HirKind::Class(_) - | HirKind::Look(_) => &[], - HirKind::Repetition(Repetition { ref sub, .. }) => from_ref(sub), - HirKind::Capture(Capture { ref sub, .. }) => from_ref(sub), - HirKind::Concat(ref subs) => subs, - HirKind::Alternation(ref subs) => subs, - } - } -} - -impl core::fmt::Debug for Hir { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - self.kind.fmt(f) - } -} - -/// Print a display representation of this Hir. -/// -/// The result of this is a valid regular expression pattern string. -/// -/// This implementation uses constant stack space and heap space proportional -/// to the size of the `Hir`. -impl core::fmt::Display for Hir { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - crate::hir::print::Printer::new().print(self, f) - } -} - -/// The high-level intermediate representation of a literal. -/// -/// A literal corresponds to `0` or more bytes that should be matched -/// literally. The smart constructors defined on `Hir` will automatically -/// concatenate adjacent literals into one literal, and will even automatically -/// replace empty literals with `Hir::empty()`. -/// -/// Note that despite a literal being represented by a sequence of bytes, its -/// `Debug` implementation will attempt to print it as a normal string. (That -/// is, not a sequence of decimal numbers.) -#[derive(Clone, Eq, PartialEq)] -pub struct Literal(pub Box<[u8]>); - -impl core::fmt::Debug for Literal { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - crate::debug::Bytes(&self.0).fmt(f) - } -} - -/// The high-level intermediate representation of a character class. -/// -/// A character class corresponds to a set of characters. A character is either -/// defined by a Unicode scalar value or a byte. -/// -/// A character class, regardless of its character type, is represented by a -/// sequence of non-overlapping non-adjacent ranges of characters. -/// -/// There are no guarantees about which class variant is used. Generally -/// speaking, the Unicode variant is used whenever a class needs to contain -/// non-ASCII Unicode scalar values. But the Unicode variant can be used even -/// when Unicode mode is disabled. For example, at the time of writing, the -/// regex `(?-u:a|\xc2\xa0)` will compile down to HIR for the Unicode class -/// `[a\u00A0]` due to optimizations. -/// -/// Note that `Bytes` variant may be produced even when it exclusively matches -/// valid UTF-8. This is because a `Bytes` variant represents an intention by -/// the author of the regular expression to disable Unicode mode, which in turn -/// impacts the semantics of case insensitive matching. For example, `(?i)k` -/// and `(?i-u)k` will not match the same set of strings. -#[derive(Clone, Eq, PartialEq)] -pub enum Class { - /// A set of characters represented by Unicode scalar values. - Unicode(ClassUnicode), - /// A set of characters represented by arbitrary bytes (one byte per - /// character). - Bytes(ClassBytes), -} - -impl Class { - /// Apply Unicode simple case folding to this character class, in place. - /// The character class will be expanded to include all simple case folded - /// character variants. - /// - /// If this is a byte oriented character class, then this will be limited - /// to the ASCII ranges `A-Z` and `a-z`. - /// - /// # Panics - /// - /// This routine panics when the case mapping data necessary for this - /// routine to complete is unavailable. This occurs when the `unicode-case` - /// feature is not enabled and the underlying class is Unicode oriented. - /// - /// Callers should prefer using `try_case_fold_simple` instead, which will - /// return an error instead of panicking. - pub fn case_fold_simple(&mut self) { - match *self { - Class::Unicode(ref mut x) => x.case_fold_simple(), - Class::Bytes(ref mut x) => x.case_fold_simple(), - } - } - - /// Apply Unicode simple case folding to this character class, in place. - /// The character class will be expanded to include all simple case folded - /// character variants. - /// - /// If this is a byte oriented character class, then this will be limited - /// to the ASCII ranges `A-Z` and `a-z`. - /// - /// # Error - /// - /// This routine returns an error when the case mapping data necessary - /// for this routine to complete is unavailable. This occurs when the - /// `unicode-case` feature is not enabled and the underlying class is - /// Unicode oriented. - pub fn try_case_fold_simple( - &mut self, - ) -> core::result::Result<(), CaseFoldError> { - match *self { - Class::Unicode(ref mut x) => x.try_case_fold_simple()?, - Class::Bytes(ref mut x) => x.case_fold_simple(), - } - Ok(()) - } - - /// Negate this character class in place. - /// - /// After completion, this character class will contain precisely the - /// characters that weren't previously in the class. - pub fn negate(&mut self) { - match *self { - Class::Unicode(ref mut x) => x.negate(), - Class::Bytes(ref mut x) => x.negate(), - } - } - - /// Returns true if and only if this character class will only ever match - /// valid UTF-8. - /// - /// A character class can match invalid UTF-8 only when the following - /// conditions are met: - /// - /// 1. The translator was configured to permit generating an expression - /// that can match invalid UTF-8. (By default, this is disabled.) - /// 2. Unicode mode (via the `u` flag) was disabled either in the concrete - /// syntax or in the parser builder. By default, Unicode mode is - /// enabled. - pub fn is_utf8(&self) -> bool { - match *self { - Class::Unicode(_) => true, - Class::Bytes(ref x) => x.is_ascii(), - } - } - - /// Returns the length, in bytes, of the smallest string matched by this - /// character class. - /// - /// For non-empty byte oriented classes, this always returns `1`. For - /// non-empty Unicode oriented classes, this can return `1`, `2`, `3` or - /// `4`. For empty classes, `None` is returned. It is impossible for `0` to - /// be returned. - /// - /// # Example - /// - /// This example shows some examples of regexes and their corresponding - /// minimum length, if any. - /// - /// ``` - /// use regex_syntax::{hir::Properties, parse}; - /// - /// // The empty string has a min length of 0. - /// let hir = parse(r"")?; - /// assert_eq!(Some(0), hir.properties().minimum_len()); - /// // As do other types of regexes that only match the empty string. - /// let hir = parse(r"^$\b\B")?; - /// assert_eq!(Some(0), hir.properties().minimum_len()); - /// // A regex that can match the empty string but match more is still 0. - /// let hir = parse(r"a*")?; - /// assert_eq!(Some(0), hir.properties().minimum_len()); - /// // A regex that matches nothing has no minimum defined. - /// let hir = parse(r"[a&&b]")?; - /// assert_eq!(None, hir.properties().minimum_len()); - /// // Character classes usually have a minimum length of 1. - /// let hir = parse(r"\w")?; - /// assert_eq!(Some(1), hir.properties().minimum_len()); - /// // But sometimes Unicode classes might be bigger! - /// let hir = parse(r"\p{Cyrillic}")?; - /// assert_eq!(Some(2), hir.properties().minimum_len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn minimum_len(&self) -> Option { - match *self { - Class::Unicode(ref x) => x.minimum_len(), - Class::Bytes(ref x) => x.minimum_len(), - } - } - - /// Returns the length, in bytes, of the longest string matched by this - /// character class. - /// - /// For non-empty byte oriented classes, this always returns `1`. For - /// non-empty Unicode oriented classes, this can return `1`, `2`, `3` or - /// `4`. For empty classes, `None` is returned. It is impossible for `0` to - /// be returned. - /// - /// # Example - /// - /// This example shows some examples of regexes and their corresponding - /// maximum length, if any. - /// - /// ``` - /// use regex_syntax::{hir::Properties, parse}; - /// - /// // The empty string has a max length of 0. - /// let hir = parse(r"")?; - /// assert_eq!(Some(0), hir.properties().maximum_len()); - /// // As do other types of regexes that only match the empty string. - /// let hir = parse(r"^$\b\B")?; - /// assert_eq!(Some(0), hir.properties().maximum_len()); - /// // A regex that matches nothing has no maximum defined. - /// let hir = parse(r"[a&&b]")?; - /// assert_eq!(None, hir.properties().maximum_len()); - /// // Bounded repeats work as you expect. - /// let hir = parse(r"x{2,10}")?; - /// assert_eq!(Some(10), hir.properties().maximum_len()); - /// // An unbounded repeat means there is no maximum. - /// let hir = parse(r"x{2,}")?; - /// assert_eq!(None, hir.properties().maximum_len()); - /// // With Unicode enabled, \w can match up to 4 bytes! - /// let hir = parse(r"\w")?; - /// assert_eq!(Some(4), hir.properties().maximum_len()); - /// // Without Unicode enabled, \w matches at most 1 byte. - /// let hir = parse(r"(?-u)\w")?; - /// assert_eq!(Some(1), hir.properties().maximum_len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn maximum_len(&self) -> Option { - match *self { - Class::Unicode(ref x) => x.maximum_len(), - Class::Bytes(ref x) => x.maximum_len(), - } - } - - /// Returns true if and only if this character class is empty. That is, - /// it has no elements. - /// - /// An empty character can never match anything, including an empty string. - pub fn is_empty(&self) -> bool { - match *self { - Class::Unicode(ref x) => x.ranges().is_empty(), - Class::Bytes(ref x) => x.ranges().is_empty(), - } - } - - /// If this class consists of exactly one element (whether a codepoint or a - /// byte), then return it as a literal byte string. - /// - /// If this class is empty or contains more than one element, then `None` - /// is returned. - pub fn literal(&self) -> Option> { - match *self { - Class::Unicode(ref x) => x.literal(), - Class::Bytes(ref x) => x.literal(), - } - } -} - -impl core::fmt::Debug for Class { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - use crate::debug::Byte; - - let mut fmter = f.debug_set(); - match *self { - Class::Unicode(ref cls) => { - for r in cls.ranges().iter() { - fmter.entry(&(r.start..=r.end)); - } - } - Class::Bytes(ref cls) => { - for r in cls.ranges().iter() { - fmter.entry(&(Byte(r.start)..=Byte(r.end))); - } - } - } - fmter.finish() - } -} - -/// A set of characters represented by Unicode scalar values. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct ClassUnicode { - set: IntervalSet, -} - -impl ClassUnicode { - /// Create a new class from a sequence of ranges. - /// - /// The given ranges do not need to be in any specific order, and ranges - /// may overlap. Ranges will automatically be sorted into a canonical - /// non-overlapping order. - pub fn new(ranges: I) -> ClassUnicode - where - I: IntoIterator, - { - ClassUnicode { set: IntervalSet::new(ranges) } - } - - /// Create a new class with no ranges. - /// - /// An empty class matches nothing. That is, it is equivalent to - /// [`Hir::fail`]. - pub fn empty() -> ClassUnicode { - ClassUnicode::new(vec![]) - } - - /// Add a new range to this set. - pub fn push(&mut self, range: ClassUnicodeRange) { - self.set.push(range); - } - - /// Return an iterator over all ranges in this class. - /// - /// The iterator yields ranges in ascending order. - pub fn iter(&self) -> ClassUnicodeIter<'_> { - ClassUnicodeIter(self.set.iter()) - } - - /// Return the underlying ranges as a slice. - pub fn ranges(&self) -> &[ClassUnicodeRange] { - self.set.intervals() - } - - /// Expand this character class such that it contains all case folded - /// characters, according to Unicode's "simple" mapping. For example, if - /// this class consists of the range `a-z`, then applying case folding will - /// result in the class containing both the ranges `a-z` and `A-Z`. - /// - /// # Panics - /// - /// This routine panics when the case mapping data necessary for this - /// routine to complete is unavailable. This occurs when the `unicode-case` - /// feature is not enabled. - /// - /// Callers should prefer using `try_case_fold_simple` instead, which will - /// return an error instead of panicking. - pub fn case_fold_simple(&mut self) { - self.set - .case_fold_simple() - .expect("unicode-case feature must be enabled"); - } - - /// Expand this character class such that it contains all case folded - /// characters, according to Unicode's "simple" mapping. For example, if - /// this class consists of the range `a-z`, then applying case folding will - /// result in the class containing both the ranges `a-z` and `A-Z`. - /// - /// # Error - /// - /// This routine returns an error when the case mapping data necessary - /// for this routine to complete is unavailable. This occurs when the - /// `unicode-case` feature is not enabled. - pub fn try_case_fold_simple( - &mut self, - ) -> core::result::Result<(), CaseFoldError> { - self.set.case_fold_simple() - } - - /// Negate this character class. - /// - /// For all `c` where `c` is a Unicode scalar value, if `c` was in this - /// set, then it will not be in this set after negation. - pub fn negate(&mut self) { - self.set.negate(); - } - - /// Union this character class with the given character class, in place. - pub fn union(&mut self, other: &ClassUnicode) { - self.set.union(&other.set); - } - - /// Intersect this character class with the given character class, in - /// place. - pub fn intersect(&mut self, other: &ClassUnicode) { - self.set.intersect(&other.set); - } - - /// Subtract the given character class from this character class, in place. - pub fn difference(&mut self, other: &ClassUnicode) { - self.set.difference(&other.set); - } - - /// Compute the symmetric difference of the given character classes, in - /// place. - /// - /// This computes the symmetric difference of two character classes. This - /// removes all elements in this class that are also in the given class, - /// but all adds all elements from the given class that aren't in this - /// class. That is, the class will contain all elements in either class, - /// but will not contain any elements that are in both classes. - pub fn symmetric_difference(&mut self, other: &ClassUnicode) { - self.set.symmetric_difference(&other.set); - } - - /// Returns true if and only if this character class will either match - /// nothing or only ASCII bytes. Stated differently, this returns false - /// if and only if this class contains a non-ASCII codepoint. - pub fn is_ascii(&self) -> bool { - self.set.intervals().last().map_or(true, |r| r.end <= '\x7F') - } - - /// Returns the length, in bytes, of the smallest string matched by this - /// character class. - /// - /// Returns `None` when the class is empty. - pub fn minimum_len(&self) -> Option { - let first = self.ranges().get(0)?; - // Correct because c1 < c2 implies c1.len_utf8() < c2.len_utf8(). - Some(first.start.len_utf8()) - } - - /// Returns the length, in bytes, of the longest string matched by this - /// character class. - /// - /// Returns `None` when the class is empty. - pub fn maximum_len(&self) -> Option { - let last = self.ranges().last()?; - // Correct because c1 < c2 implies c1.len_utf8() < c2.len_utf8(). - Some(last.end.len_utf8()) - } - - /// If this class consists of exactly one codepoint, then return it as - /// a literal byte string. - /// - /// If this class is empty or contains more than one codepoint, then `None` - /// is returned. - pub fn literal(&self) -> Option> { - let rs = self.ranges(); - if rs.len() == 1 && rs[0].start == rs[0].end { - Some(rs[0].start.encode_utf8(&mut [0; 4]).to_string().into_bytes()) - } else { - None - } - } - - /// If this class consists of only ASCII ranges, then return its - /// corresponding and equivalent byte class. - pub fn to_byte_class(&self) -> Option { - if !self.is_ascii() { - return None; - } - Some(ClassBytes::new(self.ranges().iter().map(|r| { - // Since we are guaranteed that our codepoint range is ASCII, the - // 'u8::try_from' calls below are guaranteed to be correct. - ClassBytesRange { - start: u8::try_from(r.start).unwrap(), - end: u8::try_from(r.end).unwrap(), - } - }))) - } -} - -/// An iterator over all ranges in a Unicode character class. -/// -/// The lifetime `'a` refers to the lifetime of the underlying class. -#[derive(Debug)] -pub struct ClassUnicodeIter<'a>(IntervalSetIter<'a, ClassUnicodeRange>); - -impl<'a> Iterator for ClassUnicodeIter<'a> { - type Item = &'a ClassUnicodeRange; - - fn next(&mut self) -> Option<&'a ClassUnicodeRange> { - self.0.next() - } -} - -/// A single range of characters represented by Unicode scalar values. -/// -/// The range is closed. That is, the start and end of the range are included -/// in the range. -#[derive(Clone, Copy, Default, Eq, PartialEq, PartialOrd, Ord)] -pub struct ClassUnicodeRange { - start: char, - end: char, -} - -impl core::fmt::Debug for ClassUnicodeRange { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let start = if !self.start.is_whitespace() && !self.start.is_control() - { - self.start.to_string() - } else { - format!("0x{:X}", u32::from(self.start)) - }; - let end = if !self.end.is_whitespace() && !self.end.is_control() { - self.end.to_string() - } else { - format!("0x{:X}", u32::from(self.end)) - }; - f.debug_struct("ClassUnicodeRange") - .field("start", &start) - .field("end", &end) - .finish() - } -} - -impl Interval for ClassUnicodeRange { - type Bound = char; - - #[inline] - fn lower(&self) -> char { - self.start - } - #[inline] - fn upper(&self) -> char { - self.end - } - #[inline] - fn set_lower(&mut self, bound: char) { - self.start = bound; - } - #[inline] - fn set_upper(&mut self, bound: char) { - self.end = bound; - } - - /// Apply simple case folding to this Unicode scalar value range. - /// - /// Additional ranges are appended to the given vector. Canonical ordering - /// is *not* maintained in the given vector. - fn case_fold_simple( - &self, - ranges: &mut Vec, - ) -> Result<(), unicode::CaseFoldError> { - let mut folder = unicode::SimpleCaseFolder::new()?; - if !folder.overlaps(self.start, self.end) { - return Ok(()); - } - let (start, end) = (u32::from(self.start), u32::from(self.end)); - for cp in (start..=end).filter_map(char::from_u32) { - for &cp_folded in folder.mapping(cp) { - ranges.push(ClassUnicodeRange::new(cp_folded, cp_folded)); - } - } - Ok(()) - } -} - -impl ClassUnicodeRange { - /// Create a new Unicode scalar value range for a character class. - /// - /// The returned range is always in a canonical form. That is, the range - /// returned always satisfies the invariant that `start <= end`. - pub fn new(start: char, end: char) -> ClassUnicodeRange { - ClassUnicodeRange::create(start, end) - } - - /// Return the start of this range. - /// - /// The start of a range is always less than or equal to the end of the - /// range. - pub fn start(&self) -> char { - self.start - } - - /// Return the end of this range. - /// - /// The end of a range is always greater than or equal to the start of the - /// range. - pub fn end(&self) -> char { - self.end - } - - /// Returns the number of codepoints in this range. - pub fn len(&self) -> usize { - let diff = 1 + u32::from(self.end) - u32::from(self.start); - // This is likely to panic in 16-bit targets since a usize can only fit - // 2^16. It's not clear what to do here, other than to return an error - // when building a Unicode class that contains a range whose length - // overflows usize. (Which, to be honest, is probably quite common on - // 16-bit targets. For example, this would imply that '.' and '\p{any}' - // would be impossible to build.) - usize::try_from(diff).expect("char class len fits in usize") - } -} - -/// A set of characters represented by arbitrary bytes. -/// -/// Each byte corresponds to one character. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct ClassBytes { - set: IntervalSet, -} - -impl ClassBytes { - /// Create a new class from a sequence of ranges. - /// - /// The given ranges do not need to be in any specific order, and ranges - /// may overlap. Ranges will automatically be sorted into a canonical - /// non-overlapping order. - pub fn new(ranges: I) -> ClassBytes - where - I: IntoIterator, - { - ClassBytes { set: IntervalSet::new(ranges) } - } - - /// Create a new class with no ranges. - /// - /// An empty class matches nothing. That is, it is equivalent to - /// [`Hir::fail`]. - pub fn empty() -> ClassBytes { - ClassBytes::new(vec![]) - } - - /// Add a new range to this set. - pub fn push(&mut self, range: ClassBytesRange) { - self.set.push(range); - } - - /// Return an iterator over all ranges in this class. - /// - /// The iterator yields ranges in ascending order. - pub fn iter(&self) -> ClassBytesIter<'_> { - ClassBytesIter(self.set.iter()) - } - - /// Return the underlying ranges as a slice. - pub fn ranges(&self) -> &[ClassBytesRange] { - self.set.intervals() - } - - /// Expand this character class such that it contains all case folded - /// characters. For example, if this class consists of the range `a-z`, - /// then applying case folding will result in the class containing both the - /// ranges `a-z` and `A-Z`. - /// - /// Note that this only applies ASCII case folding, which is limited to the - /// characters `a-z` and `A-Z`. - pub fn case_fold_simple(&mut self) { - self.set.case_fold_simple().expect("ASCII case folding never fails"); - } - - /// Negate this byte class. - /// - /// For all `b` where `b` is a any byte, if `b` was in this set, then it - /// will not be in this set after negation. - pub fn negate(&mut self) { - self.set.negate(); - } - - /// Union this byte class with the given byte class, in place. - pub fn union(&mut self, other: &ClassBytes) { - self.set.union(&other.set); - } - - /// Intersect this byte class with the given byte class, in place. - pub fn intersect(&mut self, other: &ClassBytes) { - self.set.intersect(&other.set); - } - - /// Subtract the given byte class from this byte class, in place. - pub fn difference(&mut self, other: &ClassBytes) { - self.set.difference(&other.set); - } - - /// Compute the symmetric difference of the given byte classes, in place. - /// - /// This computes the symmetric difference of two byte classes. This - /// removes all elements in this class that are also in the given class, - /// but all adds all elements from the given class that aren't in this - /// class. That is, the class will contain all elements in either class, - /// but will not contain any elements that are in both classes. - pub fn symmetric_difference(&mut self, other: &ClassBytes) { - self.set.symmetric_difference(&other.set); - } - - /// Returns true if and only if this character class will either match - /// nothing or only ASCII bytes. Stated differently, this returns false - /// if and only if this class contains a non-ASCII byte. - pub fn is_ascii(&self) -> bool { - self.set.intervals().last().map_or(true, |r| r.end <= 0x7F) - } - - /// Returns the length, in bytes, of the smallest string matched by this - /// character class. - /// - /// Returns `None` when the class is empty. - pub fn minimum_len(&self) -> Option { - if self.ranges().is_empty() { - None - } else { - Some(1) - } - } - - /// Returns the length, in bytes, of the longest string matched by this - /// character class. - /// - /// Returns `None` when the class is empty. - pub fn maximum_len(&self) -> Option { - if self.ranges().is_empty() { - None - } else { - Some(1) - } - } - - /// If this class consists of exactly one byte, then return it as - /// a literal byte string. - /// - /// If this class is empty or contains more than one byte, then `None` - /// is returned. - pub fn literal(&self) -> Option> { - let rs = self.ranges(); - if rs.len() == 1 && rs[0].start == rs[0].end { - Some(vec![rs[0].start]) - } else { - None - } - } - - /// If this class consists of only ASCII ranges, then return its - /// corresponding and equivalent Unicode class. - pub fn to_unicode_class(&self) -> Option { - if !self.is_ascii() { - return None; - } - Some(ClassUnicode::new(self.ranges().iter().map(|r| { - // Since we are guaranteed that our byte range is ASCII, the - // 'char::from' calls below are correct and will not erroneously - // convert a raw byte value into its corresponding codepoint. - ClassUnicodeRange { - start: char::from(r.start), - end: char::from(r.end), - } - }))) - } -} - -/// An iterator over all ranges in a byte character class. -/// -/// The lifetime `'a` refers to the lifetime of the underlying class. -#[derive(Debug)] -pub struct ClassBytesIter<'a>(IntervalSetIter<'a, ClassBytesRange>); - -impl<'a> Iterator for ClassBytesIter<'a> { - type Item = &'a ClassBytesRange; - - fn next(&mut self) -> Option<&'a ClassBytesRange> { - self.0.next() - } -} - -/// A single range of characters represented by arbitrary bytes. -/// -/// The range is closed. That is, the start and end of the range are included -/// in the range. -#[derive(Clone, Copy, Default, Eq, PartialEq, PartialOrd, Ord)] -pub struct ClassBytesRange { - start: u8, - end: u8, -} - -impl Interval for ClassBytesRange { - type Bound = u8; - - #[inline] - fn lower(&self) -> u8 { - self.start - } - #[inline] - fn upper(&self) -> u8 { - self.end - } - #[inline] - fn set_lower(&mut self, bound: u8) { - self.start = bound; - } - #[inline] - fn set_upper(&mut self, bound: u8) { - self.end = bound; - } - - /// Apply simple case folding to this byte range. Only ASCII case mappings - /// (for a-z) are applied. - /// - /// Additional ranges are appended to the given vector. Canonical ordering - /// is *not* maintained in the given vector. - fn case_fold_simple( - &self, - ranges: &mut Vec, - ) -> Result<(), unicode::CaseFoldError> { - if !ClassBytesRange::new(b'a', b'z').is_intersection_empty(self) { - let lower = cmp::max(self.start, b'a'); - let upper = cmp::min(self.end, b'z'); - ranges.push(ClassBytesRange::new(lower - 32, upper - 32)); - } - if !ClassBytesRange::new(b'A', b'Z').is_intersection_empty(self) { - let lower = cmp::max(self.start, b'A'); - let upper = cmp::min(self.end, b'Z'); - ranges.push(ClassBytesRange::new(lower + 32, upper + 32)); - } - Ok(()) - } -} - -impl ClassBytesRange { - /// Create a new byte range for a character class. - /// - /// The returned range is always in a canonical form. That is, the range - /// returned always satisfies the invariant that `start <= end`. - pub fn new(start: u8, end: u8) -> ClassBytesRange { - ClassBytesRange::create(start, end) - } - - /// Return the start of this range. - /// - /// The start of a range is always less than or equal to the end of the - /// range. - pub fn start(&self) -> u8 { - self.start - } - - /// Return the end of this range. - /// - /// The end of a range is always greater than or equal to the start of the - /// range. - pub fn end(&self) -> u8 { - self.end - } - - /// Returns the number of bytes in this range. - pub fn len(&self) -> usize { - usize::from(self.end.checked_sub(self.start).unwrap()) - .checked_add(1) - .unwrap() - } -} - -impl core::fmt::Debug for ClassBytesRange { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("ClassBytesRange") - .field("start", &crate::debug::Byte(self.start)) - .field("end", &crate::debug::Byte(self.end)) - .finish() - } -} - -/// The high-level intermediate representation for a look-around assertion. -/// -/// An assertion match is always zero-length. Also called an "empty match." -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum Look { - /// Match the beginning of text. Specifically, this matches at the starting - /// position of the input. - Start = 1 << 0, - /// Match the end of text. Specifically, this matches at the ending - /// position of the input. - End = 1 << 1, - /// Match the beginning of a line or the beginning of text. Specifically, - /// this matches at the starting position of the input, or at the position - /// immediately following a `\n` character. - StartLF = 1 << 2, - /// Match the end of a line or the end of text. Specifically, this matches - /// at the end position of the input, or at the position immediately - /// preceding a `\n` character. - EndLF = 1 << 3, - /// Match the beginning of a line or the beginning of text. Specifically, - /// this matches at the starting position of the input, or at the position - /// immediately following either a `\r` or `\n` character, but never after - /// a `\r` when a `\n` follows. - StartCRLF = 1 << 4, - /// Match the end of a line or the end of text. Specifically, this matches - /// at the end position of the input, or at the position immediately - /// preceding a `\r` or `\n` character, but never before a `\n` when a `\r` - /// precedes it. - EndCRLF = 1 << 5, - /// Match an ASCII-only word boundary. That is, this matches a position - /// where the left adjacent character and right adjacent character - /// correspond to a word and non-word or a non-word and word character. - WordAscii = 1 << 6, - /// Match an ASCII-only negation of a word boundary. - WordAsciiNegate = 1 << 7, - /// Match a Unicode-aware word boundary. That is, this matches a position - /// where the left adjacent character and right adjacent character - /// correspond to a word and non-word or a non-word and word character. - WordUnicode = 1 << 8, - /// Match a Unicode-aware negation of a word boundary. - WordUnicodeNegate = 1 << 9, - /// Match the start of an ASCII-only word boundary. That is, this matches a - /// position at either the beginning of the haystack or where the previous - /// character is not a word character and the following character is a word - /// character. - WordStartAscii = 1 << 10, - /// Match the end of an ASCII-only word boundary. That is, this matches - /// a position at either the end of the haystack or where the previous - /// character is a word character and the following character is not a word - /// character. - WordEndAscii = 1 << 11, - /// Match the start of a Unicode word boundary. That is, this matches a - /// position at either the beginning of the haystack or where the previous - /// character is not a word character and the following character is a word - /// character. - WordStartUnicode = 1 << 12, - /// Match the end of a Unicode word boundary. That is, this matches a - /// position at either the end of the haystack or where the previous - /// character is a word character and the following character is not a word - /// character. - WordEndUnicode = 1 << 13, - /// Match the start half of an ASCII-only word boundary. That is, this - /// matches a position at either the beginning of the haystack or where the - /// previous character is not a word character. - WordStartHalfAscii = 1 << 14, - /// Match the end half of an ASCII-only word boundary. That is, this - /// matches a position at either the end of the haystack or where the - /// following character is not a word character. - WordEndHalfAscii = 1 << 15, - /// Match the start half of a Unicode word boundary. That is, this matches - /// a position at either the beginning of the haystack or where the - /// previous character is not a word character. - WordStartHalfUnicode = 1 << 16, - /// Match the end half of a Unicode word boundary. That is, this matches - /// a position at either the end of the haystack or where the following - /// character is not a word character. - WordEndHalfUnicode = 1 << 17, -} - -impl Look { - /// Flip the look-around assertion to its equivalent for reverse searches. - /// For example, `StartLF` gets translated to `EndLF`. - /// - /// Some assertions, such as `WordUnicode`, remain the same since they - /// match the same positions regardless of the direction of the search. - #[inline] - pub const fn reversed(self) -> Look { - match self { - Look::Start => Look::End, - Look::End => Look::Start, - Look::StartLF => Look::EndLF, - Look::EndLF => Look::StartLF, - Look::StartCRLF => Look::EndCRLF, - Look::EndCRLF => Look::StartCRLF, - Look::WordAscii => Look::WordAscii, - Look::WordAsciiNegate => Look::WordAsciiNegate, - Look::WordUnicode => Look::WordUnicode, - Look::WordUnicodeNegate => Look::WordUnicodeNegate, - Look::WordStartAscii => Look::WordEndAscii, - Look::WordEndAscii => Look::WordStartAscii, - Look::WordStartUnicode => Look::WordEndUnicode, - Look::WordEndUnicode => Look::WordStartUnicode, - Look::WordStartHalfAscii => Look::WordEndHalfAscii, - Look::WordEndHalfAscii => Look::WordStartHalfAscii, - Look::WordStartHalfUnicode => Look::WordEndHalfUnicode, - Look::WordEndHalfUnicode => Look::WordStartHalfUnicode, - } - } - - /// Return the underlying representation of this look-around enumeration - /// as an integer. Giving the return value to the [`Look::from_repr`] - /// constructor is guaranteed to return the same look-around variant that - /// one started with within a semver compatible release of this crate. - #[inline] - pub const fn as_repr(self) -> u32 { - // AFAIK, 'as' is the only way to zero-cost convert an int enum to an - // actual int. - self as u32 - } - - /// Given the underlying representation of a `Look` value, return the - /// corresponding `Look` value if the representation is valid. Otherwise - /// `None` is returned. - #[inline] - pub const fn from_repr(repr: u32) -> Option { - match repr { - 0b00_0000_0000_0000_0001 => Some(Look::Start), - 0b00_0000_0000_0000_0010 => Some(Look::End), - 0b00_0000_0000_0000_0100 => Some(Look::StartLF), - 0b00_0000_0000_0000_1000 => Some(Look::EndLF), - 0b00_0000_0000_0001_0000 => Some(Look::StartCRLF), - 0b00_0000_0000_0010_0000 => Some(Look::EndCRLF), - 0b00_0000_0000_0100_0000 => Some(Look::WordAscii), - 0b00_0000_0000_1000_0000 => Some(Look::WordAsciiNegate), - 0b00_0000_0001_0000_0000 => Some(Look::WordUnicode), - 0b00_0000_0010_0000_0000 => Some(Look::WordUnicodeNegate), - 0b00_0000_0100_0000_0000 => Some(Look::WordStartAscii), - 0b00_0000_1000_0000_0000 => Some(Look::WordEndAscii), - 0b00_0001_0000_0000_0000 => Some(Look::WordStartUnicode), - 0b00_0010_0000_0000_0000 => Some(Look::WordEndUnicode), - 0b00_0100_0000_0000_0000 => Some(Look::WordStartHalfAscii), - 0b00_1000_0000_0000_0000 => Some(Look::WordEndHalfAscii), - 0b01_0000_0000_0000_0000 => Some(Look::WordStartHalfUnicode), - 0b10_0000_0000_0000_0000 => Some(Look::WordEndHalfUnicode), - _ => None, - } - } - - /// Returns a convenient single codepoint representation of this - /// look-around assertion. Each assertion is guaranteed to be represented - /// by a distinct character. - /// - /// This is useful for succinctly representing a look-around assertion in - /// human friendly but succinct output intended for a programmer working on - /// regex internals. - #[inline] - pub const fn as_char(self) -> char { - match self { - Look::Start => 'A', - Look::End => 'z', - Look::StartLF => '^', - Look::EndLF => '$', - Look::StartCRLF => 'r', - Look::EndCRLF => 'R', - Look::WordAscii => 'b', - Look::WordAsciiNegate => 'B', - Look::WordUnicode => '𝛃', - Look::WordUnicodeNegate => '𝚩', - Look::WordStartAscii => '<', - Look::WordEndAscii => '>', - Look::WordStartUnicode => '〈', - Look::WordEndUnicode => '〉', - Look::WordStartHalfAscii => '◁', - Look::WordEndHalfAscii => '▷', - Look::WordStartHalfUnicode => '◀', - Look::WordEndHalfUnicode => '▶', - } - } -} - -/// The high-level intermediate representation for a capturing group. -/// -/// A capturing group always has an index and a child expression. It may -/// also have a name associated with it (e.g., `(?P\w)`), but it's not -/// necessary. -/// -/// Note that there is no explicit representation of a non-capturing group -/// in a `Hir`. Instead, non-capturing grouping is handled automatically by -/// the recursive structure of the `Hir` itself. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct Capture { - /// The capture index of the capture. - pub index: u32, - /// The name of the capture, if it exists. - pub name: Option>, - /// The expression inside the capturing group, which may be empty. - pub sub: Box, -} - -/// The high-level intermediate representation of a repetition operator. -/// -/// A repetition operator permits the repetition of an arbitrary -/// sub-expression. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct Repetition { - /// The minimum range of the repetition. - /// - /// Note that special cases like `?`, `+` and `*` all get translated into - /// the ranges `{0,1}`, `{1,}` and `{0,}`, respectively. - /// - /// When `min` is zero, this expression can match the empty string - /// regardless of what its sub-expression is. - pub min: u32, - /// The maximum range of the repetition. - /// - /// Note that when `max` is `None`, `min` acts as a lower bound but where - /// there is no upper bound. For something like `x{5}` where the min and - /// max are equivalent, `min` will be set to `5` and `max` will be set to - /// `Some(5)`. - pub max: Option, - /// Whether this repetition operator is greedy or not. A greedy operator - /// will match as much as it can. A non-greedy operator will match as - /// little as it can. - /// - /// Typically, operators are greedy by default and are only non-greedy when - /// a `?` suffix is used, e.g., `(expr)*` is greedy while `(expr)*?` is - /// not. However, this can be inverted via the `U` "ungreedy" flag. - pub greedy: bool, - /// The expression being repeated. - pub sub: Box, -} - -impl Repetition { - /// Returns a new repetition with the same `min`, `max` and `greedy` - /// values, but with its sub-expression replaced with the one given. - pub fn with(&self, sub: Hir) -> Repetition { - Repetition { - min: self.min, - max: self.max, - greedy: self.greedy, - sub: Box::new(sub), - } - } -} - -/// A type describing the different flavors of `.`. -/// -/// This type is meant to be used with [`Hir::dot`], which is a convenience -/// routine for building HIR values derived from the `.` regex. -#[non_exhaustive] -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum Dot { - /// Matches the UTF-8 encoding of any Unicode scalar value. - /// - /// This is equivalent to `(?su:.)` and also `\p{any}`. - AnyChar, - /// Matches any byte value. - /// - /// This is equivalent to `(?s-u:.)` and also `(?-u:[\x00-\xFF])`. - AnyByte, - /// Matches the UTF-8 encoding of any Unicode scalar value except for the - /// `char` given. - /// - /// This is equivalent to using `(?u-s:.)` with the line terminator set - /// to a particular ASCII byte. (Because of peculiarities in the regex - /// engines, a line terminator must be a single byte. It follows that when - /// UTF-8 mode is enabled, this single byte must also be a Unicode scalar - /// value. That is, ti must be ASCII.) - /// - /// (This and `AnyCharExceptLF` both exist because of legacy reasons. - /// `AnyCharExceptLF` will be dropped in the next breaking change release.) - AnyCharExcept(char), - /// Matches the UTF-8 encoding of any Unicode scalar value except for `\n`. - /// - /// This is equivalent to `(?u-s:.)` and also `[\p{any}--\n]`. - AnyCharExceptLF, - /// Matches the UTF-8 encoding of any Unicode scalar value except for `\r` - /// and `\n`. - /// - /// This is equivalent to `(?uR-s:.)` and also `[\p{any}--\r\n]`. - AnyCharExceptCRLF, - /// Matches any byte value except for the `u8` given. - /// - /// This is equivalent to using `(?-us:.)` with the line terminator set - /// to a particular ASCII byte. (Because of peculiarities in the regex - /// engines, a line terminator must be a single byte. It follows that when - /// UTF-8 mode is enabled, this single byte must also be a Unicode scalar - /// value. That is, ti must be ASCII.) - /// - /// (This and `AnyByteExceptLF` both exist because of legacy reasons. - /// `AnyByteExceptLF` will be dropped in the next breaking change release.) - AnyByteExcept(u8), - /// Matches any byte value except for `\n`. - /// - /// This is equivalent to `(?-su:.)` and also `(?-u:[[\x00-\xFF]--\n])`. - AnyByteExceptLF, - /// Matches any byte value except for `\r` and `\n`. - /// - /// This is equivalent to `(?R-su:.)` and also `(?-u:[[\x00-\xFF]--\r\n])`. - AnyByteExceptCRLF, -} - -/// A custom `Drop` impl is used for `HirKind` such that it uses constant stack -/// space but heap space proportional to the depth of the total `Hir`. -impl Drop for Hir { - fn drop(&mut self) { - use core::mem; - - match *self.kind() { - HirKind::Empty - | HirKind::Literal(_) - | HirKind::Class(_) - | HirKind::Look(_) => return, - HirKind::Capture(ref x) if x.sub.kind.subs().is_empty() => return, - HirKind::Repetition(ref x) if x.sub.kind.subs().is_empty() => { - return - } - HirKind::Concat(ref x) if x.is_empty() => return, - HirKind::Alternation(ref x) if x.is_empty() => return, - _ => {} - } - - let mut stack = vec![mem::replace(self, Hir::empty())]; - while let Some(mut expr) = stack.pop() { - match expr.kind { - HirKind::Empty - | HirKind::Literal(_) - | HirKind::Class(_) - | HirKind::Look(_) => {} - HirKind::Capture(ref mut x) => { - stack.push(mem::replace(&mut x.sub, Hir::empty())); - } - HirKind::Repetition(ref mut x) => { - stack.push(mem::replace(&mut x.sub, Hir::empty())); - } - HirKind::Concat(ref mut x) => { - stack.extend(x.drain(..)); - } - HirKind::Alternation(ref mut x) => { - stack.extend(x.drain(..)); - } - } - } - } -} - -/// A type that collects various properties of an HIR value. -/// -/// Properties are always scalar values and represent meta data that is -/// computed inductively on an HIR value. Properties are defined for all -/// HIR values. -/// -/// All methods on a `Properties` value take constant time and are meant to -/// be cheap to call. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct Properties(Box); - -/// The property definition. It is split out so that we can box it, and -/// there by make `Properties` use less stack size. This is kind-of important -/// because every HIR value has a `Properties` attached to it. -/// -/// This does have the unfortunate consequence that creating any HIR value -/// always leads to at least one alloc for properties, but this is generally -/// true anyway (for pretty much all HirKinds except for look-arounds). -#[derive(Clone, Debug, Eq, PartialEq)] -struct PropertiesI { - minimum_len: Option, - maximum_len: Option, - look_set: LookSet, - look_set_prefix: LookSet, - look_set_suffix: LookSet, - look_set_prefix_any: LookSet, - look_set_suffix_any: LookSet, - utf8: bool, - explicit_captures_len: usize, - static_explicit_captures_len: Option, - literal: bool, - alternation_literal: bool, -} - -impl Properties { - /// Returns the length (in bytes) of the smallest string matched by this - /// HIR. - /// - /// A return value of `0` is possible and occurs when the HIR can match an - /// empty string. - /// - /// `None` is returned when there is no minimum length. This occurs in - /// precisely the cases where the HIR matches nothing. i.e., The language - /// the regex matches is empty. An example of such a regex is `\P{any}`. - #[inline] - pub fn minimum_len(&self) -> Option { - self.0.minimum_len - } - - /// Returns the length (in bytes) of the longest string matched by this - /// HIR. - /// - /// A return value of `0` is possible and occurs when nothing longer than - /// the empty string is in the language described by this HIR. - /// - /// `None` is returned when there is no longest matching string. This - /// occurs when the HIR matches nothing or when there is no upper bound on - /// the length of matching strings. Example of such regexes are `\P{any}` - /// (matches nothing) and `a+` (has no upper bound). - #[inline] - pub fn maximum_len(&self) -> Option { - self.0.maximum_len - } - - /// Returns a set of all look-around assertions that appear at least once - /// in this HIR value. - #[inline] - pub fn look_set(&self) -> LookSet { - self.0.look_set - } - - /// Returns a set of all look-around assertions that appear as a prefix for - /// this HIR value. That is, the set returned corresponds to the set of - /// assertions that must be passed before matching any bytes in a haystack. - /// - /// For example, `hir.look_set_prefix().contains(Look::Start)` returns true - /// if and only if the HIR is fully anchored at the start. - #[inline] - pub fn look_set_prefix(&self) -> LookSet { - self.0.look_set_prefix - } - - /// Returns a set of all look-around assertions that appear as a _possible_ - /// prefix for this HIR value. That is, the set returned corresponds to the - /// set of assertions that _may_ be passed before matching any bytes in a - /// haystack. - /// - /// For example, `hir.look_set_prefix_any().contains(Look::Start)` returns - /// true if and only if it's possible for the regex to match through a - /// anchored assertion before consuming any input. - #[inline] - pub fn look_set_prefix_any(&self) -> LookSet { - self.0.look_set_prefix_any - } - - /// Returns a set of all look-around assertions that appear as a suffix for - /// this HIR value. That is, the set returned corresponds to the set of - /// assertions that must be passed in order to be considered a match after - /// all other consuming HIR expressions. - /// - /// For example, `hir.look_set_suffix().contains(Look::End)` returns true - /// if and only if the HIR is fully anchored at the end. - #[inline] - pub fn look_set_suffix(&self) -> LookSet { - self.0.look_set_suffix - } - - /// Returns a set of all look-around assertions that appear as a _possible_ - /// suffix for this HIR value. That is, the set returned corresponds to the - /// set of assertions that _may_ be passed before matching any bytes in a - /// haystack. - /// - /// For example, `hir.look_set_suffix_any().contains(Look::End)` returns - /// true if and only if it's possible for the regex to match through a - /// anchored assertion at the end of a match without consuming any input. - #[inline] - pub fn look_set_suffix_any(&self) -> LookSet { - self.0.look_set_suffix_any - } - - /// Return true if and only if the corresponding HIR will always match - /// valid UTF-8. - /// - /// When this returns false, then it is possible for this HIR expression to - /// match invalid UTF-8, including by matching between the code units of - /// a single UTF-8 encoded codepoint. - /// - /// Note that this returns true even when the corresponding HIR can match - /// the empty string. Since an empty string can technically appear between - /// UTF-8 code units, it is possible for a match to be reported that splits - /// a codepoint which could in turn be considered matching invalid UTF-8. - /// However, it is generally assumed that such empty matches are handled - /// specially by the search routine if it is absolutely required that - /// matches not split a codepoint. - /// - /// # Example - /// - /// This code example shows the UTF-8 property of a variety of patterns. - /// - /// ``` - /// use regex_syntax::{ParserBuilder, parse}; - /// - /// // Examples of 'is_utf8() == true'. - /// assert!(parse(r"a")?.properties().is_utf8()); - /// assert!(parse(r"[^a]")?.properties().is_utf8()); - /// assert!(parse(r".")?.properties().is_utf8()); - /// assert!(parse(r"\W")?.properties().is_utf8()); - /// assert!(parse(r"\b")?.properties().is_utf8()); - /// assert!(parse(r"\B")?.properties().is_utf8()); - /// assert!(parse(r"(?-u)\b")?.properties().is_utf8()); - /// assert!(parse(r"(?-u)\B")?.properties().is_utf8()); - /// // Unicode mode is enabled by default, and in - /// // that mode, all \x hex escapes are treated as - /// // codepoints. So this actually matches the UTF-8 - /// // encoding of U+00FF. - /// assert!(parse(r"\xFF")?.properties().is_utf8()); - /// - /// // Now we show examples of 'is_utf8() == false'. - /// // The only way to do this is to force the parser - /// // to permit invalid UTF-8, otherwise all of these - /// // would fail to parse! - /// let parse = |pattern| { - /// ParserBuilder::new().utf8(false).build().parse(pattern) - /// }; - /// assert!(!parse(r"(?-u)[^a]")?.properties().is_utf8()); - /// assert!(!parse(r"(?-u).")?.properties().is_utf8()); - /// assert!(!parse(r"(?-u)\W")?.properties().is_utf8()); - /// // Conversely to the equivalent example above, - /// // when Unicode mode is disabled, \x hex escapes - /// // are treated as their raw byte values. - /// assert!(!parse(r"(?-u)\xFF")?.properties().is_utf8()); - /// // Note that just because we disabled UTF-8 in the - /// // parser doesn't mean we still can't use Unicode. - /// // It is enabled by default, so \xFF is still - /// // equivalent to matching the UTF-8 encoding of - /// // U+00FF by default. - /// assert!(parse(r"\xFF")?.properties().is_utf8()); - /// // Even though we use raw bytes that individually - /// // are not valid UTF-8, when combined together, the - /// // overall expression *does* match valid UTF-8! - /// assert!(parse(r"(?-u)\xE2\x98\x83")?.properties().is_utf8()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn is_utf8(&self) -> bool { - self.0.utf8 - } - - /// Returns the total number of explicit capturing groups in the - /// corresponding HIR. - /// - /// Note that this does not include the implicit capturing group - /// corresponding to the entire match that is typically included by regex - /// engines. - /// - /// # Example - /// - /// This method will return `0` for `a` and `1` for `(a)`: - /// - /// ``` - /// use regex_syntax::parse; - /// - /// assert_eq!(0, parse("a")?.properties().explicit_captures_len()); - /// assert_eq!(1, parse("(a)")?.properties().explicit_captures_len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn explicit_captures_len(&self) -> usize { - self.0.explicit_captures_len - } - - /// Returns the total number of explicit capturing groups that appear in - /// every possible match. - /// - /// If the number of capture groups can vary depending on the match, then - /// this returns `None`. That is, a value is only returned when the number - /// of matching groups is invariant or "static." - /// - /// Note that this does not include the implicit capturing group - /// corresponding to the entire match. - /// - /// # Example - /// - /// This shows a few cases where a static number of capture groups is - /// available and a few cases where it is not. - /// - /// ``` - /// use regex_syntax::parse; - /// - /// let len = |pattern| { - /// parse(pattern).map(|h| { - /// h.properties().static_explicit_captures_len() - /// }) - /// }; - /// - /// assert_eq!(Some(0), len("a")?); - /// assert_eq!(Some(1), len("(a)")?); - /// assert_eq!(Some(1), len("(a)|(b)")?); - /// assert_eq!(Some(2), len("(a)(b)|(c)(d)")?); - /// assert_eq!(None, len("(a)|b")?); - /// assert_eq!(None, len("a|(b)")?); - /// assert_eq!(None, len("(b)*")?); - /// assert_eq!(Some(1), len("(b)+")?); - /// - /// # Ok::<(), Box>(()) - /// ``` - #[inline] - pub fn static_explicit_captures_len(&self) -> Option { - self.0.static_explicit_captures_len - } - - /// Return true if and only if this HIR is a simple literal. This is - /// only true when this HIR expression is either itself a `Literal` or a - /// concatenation of only `Literal`s. - /// - /// For example, `f` and `foo` are literals, but `f+`, `(foo)`, `foo()` and - /// the empty string are not (even though they contain sub-expressions that - /// are literals). - #[inline] - pub fn is_literal(&self) -> bool { - self.0.literal - } - - /// Return true if and only if this HIR is either a simple literal or an - /// alternation of simple literals. This is only - /// true when this HIR expression is either itself a `Literal` or a - /// concatenation of only `Literal`s or an alternation of only `Literal`s. - /// - /// For example, `f`, `foo`, `a|b|c`, and `foo|bar|baz` are alternation - /// literals, but `f+`, `(foo)`, `foo()`, and the empty pattern are not - /// (even though that contain sub-expressions that are literals). - #[inline] - pub fn is_alternation_literal(&self) -> bool { - self.0.alternation_literal - } - - /// Returns the total amount of heap memory usage, in bytes, used by this - /// `Properties` value. - #[inline] - pub fn memory_usage(&self) -> usize { - core::mem::size_of::() - } - - /// Returns a new set of properties that corresponds to the union of the - /// iterator of properties given. - /// - /// This is useful when one has multiple `Hir` expressions and wants - /// to combine them into a single alternation without constructing the - /// corresponding `Hir`. This routine provides a way of combining the - /// properties of each `Hir` expression into one set of properties - /// representing the union of those expressions. - /// - /// # Example: union with HIRs that never match - /// - /// This example shows that unioning properties together with one that - /// represents a regex that never matches will "poison" certain attributes, - /// like the minimum and maximum lengths. - /// - /// ``` - /// use regex_syntax::{hir::Properties, parse}; - /// - /// let hir1 = parse("ab?c?")?; - /// assert_eq!(Some(1), hir1.properties().minimum_len()); - /// assert_eq!(Some(3), hir1.properties().maximum_len()); - /// - /// let hir2 = parse(r"[a&&b]")?; - /// assert_eq!(None, hir2.properties().minimum_len()); - /// assert_eq!(None, hir2.properties().maximum_len()); - /// - /// let hir3 = parse(r"wxy?z?")?; - /// assert_eq!(Some(2), hir3.properties().minimum_len()); - /// assert_eq!(Some(4), hir3.properties().maximum_len()); - /// - /// let unioned = Properties::union([ - /// hir1.properties(), - /// hir2.properties(), - /// hir3.properties(), - /// ]); - /// assert_eq!(None, unioned.minimum_len()); - /// assert_eq!(None, unioned.maximum_len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - /// - /// The maximum length can also be "poisoned" by a pattern that has no - /// upper bound on the length of a match. The minimum length remains - /// unaffected: - /// - /// ``` - /// use regex_syntax::{hir::Properties, parse}; - /// - /// let hir1 = parse("ab?c?")?; - /// assert_eq!(Some(1), hir1.properties().minimum_len()); - /// assert_eq!(Some(3), hir1.properties().maximum_len()); - /// - /// let hir2 = parse(r"a+")?; - /// assert_eq!(Some(1), hir2.properties().minimum_len()); - /// assert_eq!(None, hir2.properties().maximum_len()); - /// - /// let hir3 = parse(r"wxy?z?")?; - /// assert_eq!(Some(2), hir3.properties().minimum_len()); - /// assert_eq!(Some(4), hir3.properties().maximum_len()); - /// - /// let unioned = Properties::union([ - /// hir1.properties(), - /// hir2.properties(), - /// hir3.properties(), - /// ]); - /// assert_eq!(Some(1), unioned.minimum_len()); - /// assert_eq!(None, unioned.maximum_len()); - /// - /// # Ok::<(), Box>(()) - /// ``` - pub fn union(props: I) -> Properties - where - I: IntoIterator, - P: core::borrow::Borrow, - { - let mut it = props.into_iter().peekable(); - // While empty alternations aren't possible, we still behave as if they - // are. When we have an empty alternate, then clearly the look-around - // prefix and suffix is empty. Otherwise, it is the intersection of all - // prefixes and suffixes (respectively) of the branches. - let fix = if it.peek().is_none() { - LookSet::empty() - } else { - LookSet::full() - }; - // And also, an empty alternate means we have 0 static capture groups, - // but we otherwise start with the number corresponding to the first - // alternate. If any subsequent alternate has a different number of - // static capture groups, then we overall have a variation and not a - // static number of groups. - let static_explicit_captures_len = - it.peek().and_then(|p| p.borrow().static_explicit_captures_len()); - // The base case is an empty alternation, which matches nothing. - // Note though that empty alternations aren't possible, because the - // Hir::alternation smart constructor rewrites those as empty character - // classes. - let mut props = PropertiesI { - minimum_len: None, - maximum_len: None, - look_set: LookSet::empty(), - look_set_prefix: fix, - look_set_suffix: fix, - look_set_prefix_any: LookSet::empty(), - look_set_suffix_any: LookSet::empty(), - utf8: true, - explicit_captures_len: 0, - static_explicit_captures_len, - literal: false, - alternation_literal: true, - }; - let (mut min_poisoned, mut max_poisoned) = (false, false); - // Handle properties that need to visit every child hir. - for prop in it { - let p = prop.borrow(); - props.look_set.set_union(p.look_set()); - props.look_set_prefix.set_intersect(p.look_set_prefix()); - props.look_set_suffix.set_intersect(p.look_set_suffix()); - props.look_set_prefix_any.set_union(p.look_set_prefix_any()); - props.look_set_suffix_any.set_union(p.look_set_suffix_any()); - props.utf8 = props.utf8 && p.is_utf8(); - props.explicit_captures_len = props - .explicit_captures_len - .saturating_add(p.explicit_captures_len()); - if props.static_explicit_captures_len - != p.static_explicit_captures_len() - { - props.static_explicit_captures_len = None; - } - props.alternation_literal = - props.alternation_literal && p.is_literal(); - if !min_poisoned { - if let Some(xmin) = p.minimum_len() { - if props.minimum_len.map_or(true, |pmin| xmin < pmin) { - props.minimum_len = Some(xmin); - } - } else { - props.minimum_len = None; - min_poisoned = true; - } - } - if !max_poisoned { - if let Some(xmax) = p.maximum_len() { - if props.maximum_len.map_or(true, |pmax| xmax > pmax) { - props.maximum_len = Some(xmax); - } - } else { - props.maximum_len = None; - max_poisoned = true; - } - } - } - Properties(Box::new(props)) - } -} - -impl Properties { - /// Create a new set of HIR properties for an empty regex. - fn empty() -> Properties { - let inner = PropertiesI { - minimum_len: Some(0), - maximum_len: Some(0), - look_set: LookSet::empty(), - look_set_prefix: LookSet::empty(), - look_set_suffix: LookSet::empty(), - look_set_prefix_any: LookSet::empty(), - look_set_suffix_any: LookSet::empty(), - // It is debatable whether an empty regex always matches at valid - // UTF-8 boundaries. Strictly speaking, at a byte oriented view, - // it is clearly false. There are, for example, many empty strings - // between the bytes encoding a '☃'. - // - // However, when Unicode mode is enabled, the fundamental atom - // of matching is really a codepoint. And in that scenario, an - // empty regex is defined to only match at valid UTF-8 boundaries - // and to never split a codepoint. It just so happens that this - // enforcement is somewhat tricky to do for regexes that match - // the empty string inside regex engines themselves. It usually - // requires some layer above the regex engine to filter out such - // matches. - // - // In any case, 'true' is really the only coherent option. If it - // were false, for example, then 'a*' would also need to be false - // since it too can match the empty string. - utf8: true, - explicit_captures_len: 0, - static_explicit_captures_len: Some(0), - literal: false, - alternation_literal: false, - }; - Properties(Box::new(inner)) - } - - /// Create a new set of HIR properties for a literal regex. - fn literal(lit: &Literal) -> Properties { - let inner = PropertiesI { - minimum_len: Some(lit.0.len()), - maximum_len: Some(lit.0.len()), - look_set: LookSet::empty(), - look_set_prefix: LookSet::empty(), - look_set_suffix: LookSet::empty(), - look_set_prefix_any: LookSet::empty(), - look_set_suffix_any: LookSet::empty(), - utf8: core::str::from_utf8(&lit.0).is_ok(), - explicit_captures_len: 0, - static_explicit_captures_len: Some(0), - literal: true, - alternation_literal: true, - }; - Properties(Box::new(inner)) - } - - /// Create a new set of HIR properties for a character class. - fn class(class: &Class) -> Properties { - let inner = PropertiesI { - minimum_len: class.minimum_len(), - maximum_len: class.maximum_len(), - look_set: LookSet::empty(), - look_set_prefix: LookSet::empty(), - look_set_suffix: LookSet::empty(), - look_set_prefix_any: LookSet::empty(), - look_set_suffix_any: LookSet::empty(), - utf8: class.is_utf8(), - explicit_captures_len: 0, - static_explicit_captures_len: Some(0), - literal: false, - alternation_literal: false, - }; - Properties(Box::new(inner)) - } - - /// Create a new set of HIR properties for a look-around assertion. - fn look(look: Look) -> Properties { - let inner = PropertiesI { - minimum_len: Some(0), - maximum_len: Some(0), - look_set: LookSet::singleton(look), - look_set_prefix: LookSet::singleton(look), - look_set_suffix: LookSet::singleton(look), - look_set_prefix_any: LookSet::singleton(look), - look_set_suffix_any: LookSet::singleton(look), - // This requires a little explanation. Basically, we don't consider - // matching an empty string to be equivalent to matching invalid - // UTF-8, even though technically matching every empty string will - // split the UTF-8 encoding of a single codepoint when treating a - // UTF-8 encoded string as a sequence of bytes. Our defense here is - // that in such a case, a codepoint should logically be treated as - // the fundamental atom for matching, and thus the only valid match - // points are between codepoints and not bytes. - // - // More practically, this is true here because it's also true - // for 'Hir::empty()', otherwise something like 'a*' would be - // considered to match invalid UTF-8. That in turn makes this - // property borderline useless. - utf8: true, - explicit_captures_len: 0, - static_explicit_captures_len: Some(0), - literal: false, - alternation_literal: false, - }; - Properties(Box::new(inner)) - } - - /// Create a new set of HIR properties for a repetition. - fn repetition(rep: &Repetition) -> Properties { - let p = rep.sub.properties(); - let minimum_len = p.minimum_len().map(|child_min| { - let rep_min = usize::try_from(rep.min).unwrap_or(usize::MAX); - child_min.saturating_mul(rep_min) - }); - let maximum_len = rep.max.and_then(|rep_max| { - let rep_max = usize::try_from(rep_max).ok()?; - let child_max = p.maximum_len()?; - child_max.checked_mul(rep_max) - }); - - let mut inner = PropertiesI { - minimum_len, - maximum_len, - look_set: p.look_set(), - look_set_prefix: LookSet::empty(), - look_set_suffix: LookSet::empty(), - look_set_prefix_any: p.look_set_prefix_any(), - look_set_suffix_any: p.look_set_suffix_any(), - utf8: p.is_utf8(), - explicit_captures_len: p.explicit_captures_len(), - static_explicit_captures_len: p.static_explicit_captures_len(), - literal: false, - alternation_literal: false, - }; - // If the repetition operator can match the empty string, then its - // lookset prefix and suffixes themselves remain empty since they are - // no longer required to match. - if rep.min > 0 { - inner.look_set_prefix = p.look_set_prefix(); - inner.look_set_suffix = p.look_set_suffix(); - } - // If the static captures len of the sub-expression is not known or - // is greater than zero, then it automatically propagates to the - // repetition, regardless of the repetition. Otherwise, it might - // change, but only when the repetition can match 0 times. - if rep.min == 0 - && inner.static_explicit_captures_len.map_or(false, |len| len > 0) - { - // If we require a match 0 times, then our captures len is - // guaranteed to be zero. Otherwise, if we *can* match the empty - // string, then it's impossible to know how many captures will be - // in the resulting match. - if rep.max == Some(0) { - inner.static_explicit_captures_len = Some(0); - } else { - inner.static_explicit_captures_len = None; - } - } - Properties(Box::new(inner)) - } - - /// Create a new set of HIR properties for a capture. - fn capture(capture: &Capture) -> Properties { - let p = capture.sub.properties(); - Properties(Box::new(PropertiesI { - explicit_captures_len: p.explicit_captures_len().saturating_add(1), - static_explicit_captures_len: p - .static_explicit_captures_len() - .map(|len| len.saturating_add(1)), - literal: false, - alternation_literal: false, - ..*p.0.clone() - })) - } - - /// Create a new set of HIR properties for a concatenation. - fn concat(concat: &[Hir]) -> Properties { - // The base case is an empty concatenation, which matches the empty - // string. Note though that empty concatenations aren't possible, - // because the Hir::concat smart constructor rewrites those as - // Hir::empty. - let mut props = PropertiesI { - minimum_len: Some(0), - maximum_len: Some(0), - look_set: LookSet::empty(), - look_set_prefix: LookSet::empty(), - look_set_suffix: LookSet::empty(), - look_set_prefix_any: LookSet::empty(), - look_set_suffix_any: LookSet::empty(), - utf8: true, - explicit_captures_len: 0, - static_explicit_captures_len: Some(0), - literal: true, - alternation_literal: true, - }; - // Handle properties that need to visit every child hir. - for x in concat.iter() { - let p = x.properties(); - props.look_set.set_union(p.look_set()); - props.utf8 = props.utf8 && p.is_utf8(); - props.explicit_captures_len = props - .explicit_captures_len - .saturating_add(p.explicit_captures_len()); - props.static_explicit_captures_len = p - .static_explicit_captures_len() - .and_then(|len1| { - Some((len1, props.static_explicit_captures_len?)) - }) - .and_then(|(len1, len2)| Some(len1.saturating_add(len2))); - props.literal = props.literal && p.is_literal(); - props.alternation_literal = - props.alternation_literal && p.is_alternation_literal(); - if let Some(minimum_len) = props.minimum_len { - match p.minimum_len() { - None => props.minimum_len = None, - Some(len) => { - // We use saturating arithmetic here because the - // minimum is just a lower bound. We can't go any - // higher than what our number types permit. - props.minimum_len = - Some(minimum_len.saturating_add(len)); - } - } - } - if let Some(maximum_len) = props.maximum_len { - match p.maximum_len() { - None => props.maximum_len = None, - Some(len) => { - props.maximum_len = maximum_len.checked_add(len) - } - } - } - } - // Handle the prefix properties, which only requires visiting - // child exprs until one matches more than the empty string. - let mut it = concat.iter(); - while let Some(x) = it.next() { - props.look_set_prefix.set_union(x.properties().look_set_prefix()); - props - .look_set_prefix_any - .set_union(x.properties().look_set_prefix_any()); - if x.properties().maximum_len().map_or(true, |x| x > 0) { - break; - } - } - // Same thing for the suffix properties, but in reverse. - let mut it = concat.iter().rev(); - while let Some(x) = it.next() { - props.look_set_suffix.set_union(x.properties().look_set_suffix()); - props - .look_set_suffix_any - .set_union(x.properties().look_set_suffix_any()); - if x.properties().maximum_len().map_or(true, |x| x > 0) { - break; - } - } - Properties(Box::new(props)) - } - - /// Create a new set of HIR properties for a concatenation. - fn alternation(alts: &[Hir]) -> Properties { - Properties::union(alts.iter().map(|hir| hir.properties())) - } -} - -/// A set of look-around assertions. -/// -/// This is useful for efficiently tracking look-around assertions. For -/// example, an [`Hir`] provides properties that return `LookSet`s. -#[derive(Clone, Copy, Default, Eq, PartialEq)] -pub struct LookSet { - /// The underlying representation this set is exposed to make it possible - /// to store it somewhere efficiently. The representation is that - /// of a bitset, where each assertion occupies bit `i` where `i = - /// Look::as_repr()`. - /// - /// Note that users of this internal representation must permit the full - /// range of `u16` values to be represented. For example, even if the - /// current implementation only makes use of the 10 least significant bits, - /// it may use more bits in a future semver compatible release. - pub bits: u32, -} - -impl LookSet { - /// Create an empty set of look-around assertions. - #[inline] - pub fn empty() -> LookSet { - LookSet { bits: 0 } - } - - /// Create a full set of look-around assertions. - /// - /// This set contains all possible look-around assertions. - #[inline] - pub fn full() -> LookSet { - LookSet { bits: !0 } - } - - /// Create a look-around set containing the look-around assertion given. - /// - /// This is a convenience routine for creating an empty set and inserting - /// one look-around assertions. - #[inline] - pub fn singleton(look: Look) -> LookSet { - LookSet::empty().insert(look) - } - - /// Returns the total number of look-around assertions in this set. - #[inline] - pub fn len(self) -> usize { - // OK because max value always fits in a u8, which in turn always - // fits in a usize, regardless of target. - usize::try_from(self.bits.count_ones()).unwrap() - } - - /// Returns true if and only if this set is empty. - #[inline] - pub fn is_empty(self) -> bool { - self.len() == 0 - } - - /// Returns true if and only if the given look-around assertion is in this - /// set. - #[inline] - pub fn contains(self, look: Look) -> bool { - self.bits & look.as_repr() != 0 - } - - /// Returns true if and only if this set contains any anchor assertions. - /// This includes both "start/end of haystack" and "start/end of line." - #[inline] - pub fn contains_anchor(&self) -> bool { - self.contains_anchor_haystack() || self.contains_anchor_line() - } - - /// Returns true if and only if this set contains any "start/end of - /// haystack" anchors. This doesn't include "start/end of line" anchors. - #[inline] - pub fn contains_anchor_haystack(&self) -> bool { - self.contains(Look::Start) || self.contains(Look::End) - } - - /// Returns true if and only if this set contains any "start/end of line" - /// anchors. This doesn't include "start/end of haystack" anchors. This - /// includes both `\n` line anchors and CRLF (`\r\n`) aware line anchors. - #[inline] - pub fn contains_anchor_line(&self) -> bool { - self.contains(Look::StartLF) - || self.contains(Look::EndLF) - || self.contains(Look::StartCRLF) - || self.contains(Look::EndCRLF) - } - - /// Returns true if and only if this set contains any "start/end of line" - /// anchors that only treat `\n` as line terminators. This does not include - /// haystack anchors or CRLF aware line anchors. - #[inline] - pub fn contains_anchor_lf(&self) -> bool { - self.contains(Look::StartLF) || self.contains(Look::EndLF) - } - - /// Returns true if and only if this set contains any "start/end of line" - /// anchors that are CRLF-aware. This doesn't include "start/end of - /// haystack" or "start/end of line-feed" anchors. - #[inline] - pub fn contains_anchor_crlf(&self) -> bool { - self.contains(Look::StartCRLF) || self.contains(Look::EndCRLF) - } - - /// Returns true if and only if this set contains any word boundary or - /// negated word boundary assertions. This include both Unicode and ASCII - /// word boundaries. - #[inline] - pub fn contains_word(self) -> bool { - self.contains_word_unicode() || self.contains_word_ascii() - } - - /// Returns true if and only if this set contains any Unicode word boundary - /// or negated Unicode word boundary assertions. - #[inline] - pub fn contains_word_unicode(self) -> bool { - self.contains(Look::WordUnicode) - || self.contains(Look::WordUnicodeNegate) - || self.contains(Look::WordStartUnicode) - || self.contains(Look::WordEndUnicode) - || self.contains(Look::WordStartHalfUnicode) - || self.contains(Look::WordEndHalfUnicode) - } - - /// Returns true if and only if this set contains any ASCII word boundary - /// or negated ASCII word boundary assertions. - #[inline] - pub fn contains_word_ascii(self) -> bool { - self.contains(Look::WordAscii) - || self.contains(Look::WordAsciiNegate) - || self.contains(Look::WordStartAscii) - || self.contains(Look::WordEndAscii) - || self.contains(Look::WordStartHalfAscii) - || self.contains(Look::WordEndHalfAscii) - } - - /// Returns an iterator over all of the look-around assertions in this set. - #[inline] - pub fn iter(self) -> LookSetIter { - LookSetIter { set: self } - } - - /// Return a new set that is equivalent to the original, but with the given - /// assertion added to it. If the assertion is already in the set, then the - /// returned set is equivalent to the original. - #[inline] - pub fn insert(self, look: Look) -> LookSet { - LookSet { bits: self.bits | look.as_repr() } - } - - /// Updates this set in place with the result of inserting the given - /// assertion into this set. - #[inline] - pub fn set_insert(&mut self, look: Look) { - *self = self.insert(look); - } - - /// Return a new set that is equivalent to the original, but with the given - /// assertion removed from it. If the assertion is not in the set, then the - /// returned set is equivalent to the original. - #[inline] - pub fn remove(self, look: Look) -> LookSet { - LookSet { bits: self.bits & !look.as_repr() } - } - - /// Updates this set in place with the result of removing the given - /// assertion from this set. - #[inline] - pub fn set_remove(&mut self, look: Look) { - *self = self.remove(look); - } - - /// Returns a new set that is the result of subtracting the given set from - /// this set. - #[inline] - pub fn subtract(self, other: LookSet) -> LookSet { - LookSet { bits: self.bits & !other.bits } - } - - /// Updates this set in place with the result of subtracting the given set - /// from this set. - #[inline] - pub fn set_subtract(&mut self, other: LookSet) { - *self = self.subtract(other); - } - - /// Returns a new set that is the union of this and the one given. - #[inline] - pub fn union(self, other: LookSet) -> LookSet { - LookSet { bits: self.bits | other.bits } - } - - /// Updates this set in place with the result of unioning it with the one - /// given. - #[inline] - pub fn set_union(&mut self, other: LookSet) { - *self = self.union(other); - } - - /// Returns a new set that is the intersection of this and the one given. - #[inline] - pub fn intersect(self, other: LookSet) -> LookSet { - LookSet { bits: self.bits & other.bits } - } - - /// Updates this set in place with the result of intersecting it with the - /// one given. - #[inline] - pub fn set_intersect(&mut self, other: LookSet) { - *self = self.intersect(other); - } - - /// Return a `LookSet` from the slice given as a native endian 32-bit - /// integer. - /// - /// # Panics - /// - /// This panics if `slice.len() < 4`. - #[inline] - pub fn read_repr(slice: &[u8]) -> LookSet { - let bits = u32::from_ne_bytes(slice[..4].try_into().unwrap()); - LookSet { bits } - } - - /// Write a `LookSet` as a native endian 32-bit integer to the beginning - /// of the slice given. - /// - /// # Panics - /// - /// This panics if `slice.len() < 4`. - #[inline] - pub fn write_repr(self, slice: &mut [u8]) { - let raw = self.bits.to_ne_bytes(); - slice[0] = raw[0]; - slice[1] = raw[1]; - slice[2] = raw[2]; - slice[3] = raw[3]; - } -} - -impl core::fmt::Debug for LookSet { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - if self.is_empty() { - return write!(f, "∅"); - } - for look in self.iter() { - write!(f, "{}", look.as_char())?; - } - Ok(()) - } -} - -/// An iterator over all look-around assertions in a [`LookSet`]. -/// -/// This iterator is created by [`LookSet::iter`]. -#[derive(Clone, Debug)] -pub struct LookSetIter { - set: LookSet, -} - -impl Iterator for LookSetIter { - type Item = Look; - - #[inline] - fn next(&mut self) -> Option { - if self.set.is_empty() { - return None; - } - // We'll never have more than u8::MAX distinct look-around assertions, - // so 'bit' will always fit into a u16. - let bit = u16::try_from(self.set.bits.trailing_zeros()).unwrap(); - let look = Look::from_repr(1 << bit)?; - self.set = self.set.remove(look); - Some(look) - } -} - -/// Given a sequence of HIR values where each value corresponds to a Unicode -/// class (or an all-ASCII byte class), return a single Unicode class -/// corresponding to the union of the classes found. -fn class_chars(hirs: &[Hir]) -> Option { - let mut cls = ClassUnicode::new(vec![]); - for hir in hirs.iter() { - match *hir.kind() { - HirKind::Class(Class::Unicode(ref cls2)) => { - cls.union(cls2); - } - HirKind::Class(Class::Bytes(ref cls2)) => { - cls.union(&cls2.to_unicode_class()?); - } - _ => return None, - }; - } - Some(Class::Unicode(cls)) -} - -/// Given a sequence of HIR values where each value corresponds to a byte class -/// (or an all-ASCII Unicode class), return a single byte class corresponding -/// to the union of the classes found. -fn class_bytes(hirs: &[Hir]) -> Option { - let mut cls = ClassBytes::new(vec![]); - for hir in hirs.iter() { - match *hir.kind() { - HirKind::Class(Class::Unicode(ref cls2)) => { - cls.union(&cls2.to_byte_class()?); - } - HirKind::Class(Class::Bytes(ref cls2)) => { - cls.union(cls2); - } - _ => return None, - }; - } - Some(Class::Bytes(cls)) -} - -/// Given a sequence of HIR values where each value corresponds to a literal -/// that is a single `char`, return that sequence of `char`s. Otherwise return -/// None. No deduplication is done. -fn singleton_chars(hirs: &[Hir]) -> Option> { - let mut singletons = vec![]; - for hir in hirs.iter() { - let literal = match *hir.kind() { - HirKind::Literal(Literal(ref bytes)) => bytes, - _ => return None, - }; - let ch = match crate::debug::utf8_decode(literal) { - None => return None, - Some(Err(_)) => return None, - Some(Ok(ch)) => ch, - }; - if literal.len() != ch.len_utf8() { - return None; - } - singletons.push(ch); - } - Some(singletons) -} - -/// Given a sequence of HIR values where each value corresponds to a literal -/// that is a single byte, return that sequence of bytes. Otherwise return -/// None. No deduplication is done. -fn singleton_bytes(hirs: &[Hir]) -> Option> { - let mut singletons = vec![]; - for hir in hirs.iter() { - let literal = match *hir.kind() { - HirKind::Literal(Literal(ref bytes)) => bytes, - _ => return None, - }; - if literal.len() != 1 { - return None; - } - singletons.push(literal[0]); - } - Some(singletons) -} - -/// Looks for a common prefix in the list of alternation branches given. If one -/// is found, then an equivalent but (hopefully) simplified Hir is returned. -/// Otherwise, the original given list of branches is returned unmodified. -/// -/// This is not quite as good as it could be. Right now, it requires that -/// all branches are 'Concat' expressions. It also doesn't do well with -/// literals. For example, given 'foofoo|foobar', it will not refactor it to -/// 'foo(?:foo|bar)' because literals are flattened into their own special -/// concatenation. (One wonders if perhaps 'Literal' should be a single atom -/// instead of a string of bytes because of this. Otherwise, handling the -/// current representation in this routine will be pretty gnarly. Sigh.) -fn lift_common_prefix(hirs: Vec) -> Result> { - if hirs.len() <= 1 { - return Err(hirs); - } - let mut prefix = match hirs[0].kind() { - HirKind::Concat(ref xs) => &**xs, - _ => return Err(hirs), - }; - if prefix.is_empty() { - return Err(hirs); - } - for h in hirs.iter().skip(1) { - let concat = match h.kind() { - HirKind::Concat(ref xs) => xs, - _ => return Err(hirs), - }; - let common_len = prefix - .iter() - .zip(concat.iter()) - .take_while(|(x, y)| x == y) - .count(); - prefix = &prefix[..common_len]; - if prefix.is_empty() { - return Err(hirs); - } - } - let len = prefix.len(); - assert_ne!(0, len); - let mut prefix_concat = vec![]; - let mut suffix_alts = vec![]; - for h in hirs { - let mut concat = match h.into_kind() { - HirKind::Concat(xs) => xs, - // We required all sub-expressions to be - // concats above, so we're only here if we - // have a concat. - _ => unreachable!(), - }; - suffix_alts.push(Hir::concat(concat.split_off(len))); - if prefix_concat.is_empty() { - prefix_concat = concat; - } - } - let mut concat = prefix_concat; - concat.push(Hir::alternation(suffix_alts)); - Ok(Hir::concat(concat)) -} - -#[cfg(test)] -mod tests { - use super::*; - - fn uclass(ranges: &[(char, char)]) -> ClassUnicode { - let ranges: Vec = ranges - .iter() - .map(|&(s, e)| ClassUnicodeRange::new(s, e)) - .collect(); - ClassUnicode::new(ranges) - } - - fn bclass(ranges: &[(u8, u8)]) -> ClassBytes { - let ranges: Vec = - ranges.iter().map(|&(s, e)| ClassBytesRange::new(s, e)).collect(); - ClassBytes::new(ranges) - } - - fn uranges(cls: &ClassUnicode) -> Vec<(char, char)> { - cls.iter().map(|x| (x.start(), x.end())).collect() - } - - #[cfg(feature = "unicode-case")] - fn ucasefold(cls: &ClassUnicode) -> ClassUnicode { - let mut cls_ = cls.clone(); - cls_.case_fold_simple(); - cls_ - } - - fn uunion(cls1: &ClassUnicode, cls2: &ClassUnicode) -> ClassUnicode { - let mut cls_ = cls1.clone(); - cls_.union(cls2); - cls_ - } - - fn uintersect(cls1: &ClassUnicode, cls2: &ClassUnicode) -> ClassUnicode { - let mut cls_ = cls1.clone(); - cls_.intersect(cls2); - cls_ - } - - fn udifference(cls1: &ClassUnicode, cls2: &ClassUnicode) -> ClassUnicode { - let mut cls_ = cls1.clone(); - cls_.difference(cls2); - cls_ - } - - fn usymdifference( - cls1: &ClassUnicode, - cls2: &ClassUnicode, - ) -> ClassUnicode { - let mut cls_ = cls1.clone(); - cls_.symmetric_difference(cls2); - cls_ - } - - fn unegate(cls: &ClassUnicode) -> ClassUnicode { - let mut cls_ = cls.clone(); - cls_.negate(); - cls_ - } - - fn branges(cls: &ClassBytes) -> Vec<(u8, u8)> { - cls.iter().map(|x| (x.start(), x.end())).collect() - } - - fn bcasefold(cls: &ClassBytes) -> ClassBytes { - let mut cls_ = cls.clone(); - cls_.case_fold_simple(); - cls_ - } - - fn bunion(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { - let mut cls_ = cls1.clone(); - cls_.union(cls2); - cls_ - } - - fn bintersect(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { - let mut cls_ = cls1.clone(); - cls_.intersect(cls2); - cls_ - } - - fn bdifference(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { - let mut cls_ = cls1.clone(); - cls_.difference(cls2); - cls_ - } - - fn bsymdifference(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { - let mut cls_ = cls1.clone(); - cls_.symmetric_difference(cls2); - cls_ - } - - fn bnegate(cls: &ClassBytes) -> ClassBytes { - let mut cls_ = cls.clone(); - cls_.negate(); - cls_ - } - - #[test] - fn class_range_canonical_unicode() { - let range = ClassUnicodeRange::new('\u{00FF}', '\0'); - assert_eq!('\0', range.start()); - assert_eq!('\u{00FF}', range.end()); - } - - #[test] - fn class_range_canonical_bytes() { - let range = ClassBytesRange::new(b'\xFF', b'\0'); - assert_eq!(b'\0', range.start()); - assert_eq!(b'\xFF', range.end()); - } - - #[test] - fn class_canonicalize_unicode() { - let cls = uclass(&[('a', 'c'), ('x', 'z')]); - let expected = vec![('a', 'c'), ('x', 'z')]; - assert_eq!(expected, uranges(&cls)); - - let cls = uclass(&[('x', 'z'), ('a', 'c')]); - let expected = vec![('a', 'c'), ('x', 'z')]; - assert_eq!(expected, uranges(&cls)); - - let cls = uclass(&[('x', 'z'), ('w', 'y')]); - let expected = vec![('w', 'z')]; - assert_eq!(expected, uranges(&cls)); - - let cls = uclass(&[ - ('c', 'f'), - ('a', 'g'), - ('d', 'j'), - ('a', 'c'), - ('m', 'p'), - ('l', 's'), - ]); - let expected = vec![('a', 'j'), ('l', 's')]; - assert_eq!(expected, uranges(&cls)); - - let cls = uclass(&[('x', 'z'), ('u', 'w')]); - let expected = vec![('u', 'z')]; - assert_eq!(expected, uranges(&cls)); - - let cls = uclass(&[('\x00', '\u{10FFFF}'), ('\x00', '\u{10FFFF}')]); - let expected = vec![('\x00', '\u{10FFFF}')]; - assert_eq!(expected, uranges(&cls)); - - let cls = uclass(&[('a', 'a'), ('b', 'b')]); - let expected = vec![('a', 'b')]; - assert_eq!(expected, uranges(&cls)); - } - - #[test] - fn class_canonicalize_bytes() { - let cls = bclass(&[(b'a', b'c'), (b'x', b'z')]); - let expected = vec![(b'a', b'c'), (b'x', b'z')]; - assert_eq!(expected, branges(&cls)); - - let cls = bclass(&[(b'x', b'z'), (b'a', b'c')]); - let expected = vec![(b'a', b'c'), (b'x', b'z')]; - assert_eq!(expected, branges(&cls)); - - let cls = bclass(&[(b'x', b'z'), (b'w', b'y')]); - let expected = vec![(b'w', b'z')]; - assert_eq!(expected, branges(&cls)); - - let cls = bclass(&[ - (b'c', b'f'), - (b'a', b'g'), - (b'd', b'j'), - (b'a', b'c'), - (b'm', b'p'), - (b'l', b's'), - ]); - let expected = vec![(b'a', b'j'), (b'l', b's')]; - assert_eq!(expected, branges(&cls)); - - let cls = bclass(&[(b'x', b'z'), (b'u', b'w')]); - let expected = vec![(b'u', b'z')]; - assert_eq!(expected, branges(&cls)); - - let cls = bclass(&[(b'\x00', b'\xFF'), (b'\x00', b'\xFF')]); - let expected = vec![(b'\x00', b'\xFF')]; - assert_eq!(expected, branges(&cls)); - - let cls = bclass(&[(b'a', b'a'), (b'b', b'b')]); - let expected = vec![(b'a', b'b')]; - assert_eq!(expected, branges(&cls)); - } - - #[test] - #[cfg(feature = "unicode-case")] - fn class_case_fold_unicode() { - let cls = uclass(&[ - ('C', 'F'), - ('A', 'G'), - ('D', 'J'), - ('A', 'C'), - ('M', 'P'), - ('L', 'S'), - ('c', 'f'), - ]); - let expected = uclass(&[ - ('A', 'J'), - ('L', 'S'), - ('a', 'j'), - ('l', 's'), - ('\u{17F}', '\u{17F}'), - ]); - assert_eq!(expected, ucasefold(&cls)); - - let cls = uclass(&[('A', 'Z')]); - let expected = uclass(&[ - ('A', 'Z'), - ('a', 'z'), - ('\u{17F}', '\u{17F}'), - ('\u{212A}', '\u{212A}'), - ]); - assert_eq!(expected, ucasefold(&cls)); - - let cls = uclass(&[('a', 'z')]); - let expected = uclass(&[ - ('A', 'Z'), - ('a', 'z'), - ('\u{17F}', '\u{17F}'), - ('\u{212A}', '\u{212A}'), - ]); - assert_eq!(expected, ucasefold(&cls)); - - let cls = uclass(&[('A', 'A'), ('_', '_')]); - let expected = uclass(&[('A', 'A'), ('_', '_'), ('a', 'a')]); - assert_eq!(expected, ucasefold(&cls)); - - let cls = uclass(&[('A', 'A'), ('=', '=')]); - let expected = uclass(&[('=', '='), ('A', 'A'), ('a', 'a')]); - assert_eq!(expected, ucasefold(&cls)); - - let cls = uclass(&[('\x00', '\x10')]); - assert_eq!(cls, ucasefold(&cls)); - - let cls = uclass(&[('k', 'k')]); - let expected = - uclass(&[('K', 'K'), ('k', 'k'), ('\u{212A}', '\u{212A}')]); - assert_eq!(expected, ucasefold(&cls)); - - let cls = uclass(&[('@', '@')]); - assert_eq!(cls, ucasefold(&cls)); - } - - #[test] - #[cfg(not(feature = "unicode-case"))] - fn class_case_fold_unicode_disabled() { - let mut cls = uclass(&[ - ('C', 'F'), - ('A', 'G'), - ('D', 'J'), - ('A', 'C'), - ('M', 'P'), - ('L', 'S'), - ('c', 'f'), - ]); - assert!(cls.try_case_fold_simple().is_err()); - } - - #[test] - #[should_panic] - #[cfg(not(feature = "unicode-case"))] - fn class_case_fold_unicode_disabled_panics() { - let mut cls = uclass(&[ - ('C', 'F'), - ('A', 'G'), - ('D', 'J'), - ('A', 'C'), - ('M', 'P'), - ('L', 'S'), - ('c', 'f'), - ]); - cls.case_fold_simple(); - } - - #[test] - fn class_case_fold_bytes() { - let cls = bclass(&[ - (b'C', b'F'), - (b'A', b'G'), - (b'D', b'J'), - (b'A', b'C'), - (b'M', b'P'), - (b'L', b'S'), - (b'c', b'f'), - ]); - let expected = - bclass(&[(b'A', b'J'), (b'L', b'S'), (b'a', b'j'), (b'l', b's')]); - assert_eq!(expected, bcasefold(&cls)); - - let cls = bclass(&[(b'A', b'Z')]); - let expected = bclass(&[(b'A', b'Z'), (b'a', b'z')]); - assert_eq!(expected, bcasefold(&cls)); - - let cls = bclass(&[(b'a', b'z')]); - let expected = bclass(&[(b'A', b'Z'), (b'a', b'z')]); - assert_eq!(expected, bcasefold(&cls)); - - let cls = bclass(&[(b'A', b'A'), (b'_', b'_')]); - let expected = bclass(&[(b'A', b'A'), (b'_', b'_'), (b'a', b'a')]); - assert_eq!(expected, bcasefold(&cls)); - - let cls = bclass(&[(b'A', b'A'), (b'=', b'=')]); - let expected = bclass(&[(b'=', b'='), (b'A', b'A'), (b'a', b'a')]); - assert_eq!(expected, bcasefold(&cls)); - - let cls = bclass(&[(b'\x00', b'\x10')]); - assert_eq!(cls, bcasefold(&cls)); - - let cls = bclass(&[(b'k', b'k')]); - let expected = bclass(&[(b'K', b'K'), (b'k', b'k')]); - assert_eq!(expected, bcasefold(&cls)); - - let cls = bclass(&[(b'@', b'@')]); - assert_eq!(cls, bcasefold(&cls)); - } - - #[test] - fn class_negate_unicode() { - let cls = uclass(&[('a', 'a')]); - let expected = uclass(&[('\x00', '\x60'), ('\x62', '\u{10FFFF}')]); - assert_eq!(expected, unegate(&cls)); - - let cls = uclass(&[('a', 'a'), ('b', 'b')]); - let expected = uclass(&[('\x00', '\x60'), ('\x63', '\u{10FFFF}')]); - assert_eq!(expected, unegate(&cls)); - - let cls = uclass(&[('a', 'c'), ('x', 'z')]); - let expected = uclass(&[ - ('\x00', '\x60'), - ('\x64', '\x77'), - ('\x7B', '\u{10FFFF}'), - ]); - assert_eq!(expected, unegate(&cls)); - - let cls = uclass(&[('\x00', 'a')]); - let expected = uclass(&[('\x62', '\u{10FFFF}')]); - assert_eq!(expected, unegate(&cls)); - - let cls = uclass(&[('a', '\u{10FFFF}')]); - let expected = uclass(&[('\x00', '\x60')]); - assert_eq!(expected, unegate(&cls)); - - let cls = uclass(&[('\x00', '\u{10FFFF}')]); - let expected = uclass(&[]); - assert_eq!(expected, unegate(&cls)); - - let cls = uclass(&[]); - let expected = uclass(&[('\x00', '\u{10FFFF}')]); - assert_eq!(expected, unegate(&cls)); - - let cls = - uclass(&[('\x00', '\u{10FFFD}'), ('\u{10FFFF}', '\u{10FFFF}')]); - let expected = uclass(&[('\u{10FFFE}', '\u{10FFFE}')]); - assert_eq!(expected, unegate(&cls)); - - let cls = uclass(&[('\x00', '\u{D7FF}')]); - let expected = uclass(&[('\u{E000}', '\u{10FFFF}')]); - assert_eq!(expected, unegate(&cls)); - - let cls = uclass(&[('\x00', '\u{D7FE}')]); - let expected = uclass(&[('\u{D7FF}', '\u{10FFFF}')]); - assert_eq!(expected, unegate(&cls)); - - let cls = uclass(&[('\u{E000}', '\u{10FFFF}')]); - let expected = uclass(&[('\x00', '\u{D7FF}')]); - assert_eq!(expected, unegate(&cls)); - - let cls = uclass(&[('\u{E001}', '\u{10FFFF}')]); - let expected = uclass(&[('\x00', '\u{E000}')]); - assert_eq!(expected, unegate(&cls)); - } - - #[test] - fn class_negate_bytes() { - let cls = bclass(&[(b'a', b'a')]); - let expected = bclass(&[(b'\x00', b'\x60'), (b'\x62', b'\xFF')]); - assert_eq!(expected, bnegate(&cls)); - - let cls = bclass(&[(b'a', b'a'), (b'b', b'b')]); - let expected = bclass(&[(b'\x00', b'\x60'), (b'\x63', b'\xFF')]); - assert_eq!(expected, bnegate(&cls)); - - let cls = bclass(&[(b'a', b'c'), (b'x', b'z')]); - let expected = bclass(&[ - (b'\x00', b'\x60'), - (b'\x64', b'\x77'), - (b'\x7B', b'\xFF'), - ]); - assert_eq!(expected, bnegate(&cls)); - - let cls = bclass(&[(b'\x00', b'a')]); - let expected = bclass(&[(b'\x62', b'\xFF')]); - assert_eq!(expected, bnegate(&cls)); - - let cls = bclass(&[(b'a', b'\xFF')]); - let expected = bclass(&[(b'\x00', b'\x60')]); - assert_eq!(expected, bnegate(&cls)); - - let cls = bclass(&[(b'\x00', b'\xFF')]); - let expected = bclass(&[]); - assert_eq!(expected, bnegate(&cls)); - - let cls = bclass(&[]); - let expected = bclass(&[(b'\x00', b'\xFF')]); - assert_eq!(expected, bnegate(&cls)); - - let cls = bclass(&[(b'\x00', b'\xFD'), (b'\xFF', b'\xFF')]); - let expected = bclass(&[(b'\xFE', b'\xFE')]); - assert_eq!(expected, bnegate(&cls)); - } - - #[test] - fn class_union_unicode() { - let cls1 = uclass(&[('a', 'g'), ('m', 't'), ('A', 'C')]); - let cls2 = uclass(&[('a', 'z')]); - let expected = uclass(&[('a', 'z'), ('A', 'C')]); - assert_eq!(expected, uunion(&cls1, &cls2)); - } - - #[test] - fn class_union_bytes() { - let cls1 = bclass(&[(b'a', b'g'), (b'm', b't'), (b'A', b'C')]); - let cls2 = bclass(&[(b'a', b'z')]); - let expected = bclass(&[(b'a', b'z'), (b'A', b'C')]); - assert_eq!(expected, bunion(&cls1, &cls2)); - } - - #[test] - fn class_intersect_unicode() { - let cls1 = uclass(&[]); - let cls2 = uclass(&[('a', 'a')]); - let expected = uclass(&[]); - assert_eq!(expected, uintersect(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'a')]); - let cls2 = uclass(&[('a', 'a')]); - let expected = uclass(&[('a', 'a')]); - assert_eq!(expected, uintersect(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'a')]); - let cls2 = uclass(&[('b', 'b')]); - let expected = uclass(&[]); - assert_eq!(expected, uintersect(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'a')]); - let cls2 = uclass(&[('a', 'c')]); - let expected = uclass(&[('a', 'a')]); - assert_eq!(expected, uintersect(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'b')]); - let cls2 = uclass(&[('a', 'c')]); - let expected = uclass(&[('a', 'b')]); - assert_eq!(expected, uintersect(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'b')]); - let cls2 = uclass(&[('b', 'c')]); - let expected = uclass(&[('b', 'b')]); - assert_eq!(expected, uintersect(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'b')]); - let cls2 = uclass(&[('c', 'd')]); - let expected = uclass(&[]); - assert_eq!(expected, uintersect(&cls1, &cls2)); - - let cls1 = uclass(&[('b', 'c')]); - let cls2 = uclass(&[('a', 'd')]); - let expected = uclass(&[('b', 'c')]); - assert_eq!(expected, uintersect(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); - let cls2 = uclass(&[('a', 'h')]); - let expected = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); - assert_eq!(expected, uintersect(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); - let cls2 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); - let expected = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); - assert_eq!(expected, uintersect(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'b'), ('g', 'h')]); - let cls2 = uclass(&[('d', 'e'), ('k', 'l')]); - let expected = uclass(&[]); - assert_eq!(expected, uintersect(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); - let cls2 = uclass(&[('h', 'h')]); - let expected = uclass(&[('h', 'h')]); - assert_eq!(expected, uintersect(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'b'), ('e', 'f'), ('i', 'j')]); - let cls2 = uclass(&[('c', 'd'), ('g', 'h'), ('k', 'l')]); - let expected = uclass(&[]); - assert_eq!(expected, uintersect(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'b'), ('c', 'd'), ('e', 'f')]); - let cls2 = uclass(&[('b', 'c'), ('d', 'e'), ('f', 'g')]); - let expected = uclass(&[('b', 'f')]); - assert_eq!(expected, uintersect(&cls1, &cls2)); - } - - #[test] - fn class_intersect_bytes() { - let cls1 = bclass(&[]); - let cls2 = bclass(&[(b'a', b'a')]); - let expected = bclass(&[]); - assert_eq!(expected, bintersect(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'a')]); - let cls2 = bclass(&[(b'a', b'a')]); - let expected = bclass(&[(b'a', b'a')]); - assert_eq!(expected, bintersect(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'a')]); - let cls2 = bclass(&[(b'b', b'b')]); - let expected = bclass(&[]); - assert_eq!(expected, bintersect(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'a')]); - let cls2 = bclass(&[(b'a', b'c')]); - let expected = bclass(&[(b'a', b'a')]); - assert_eq!(expected, bintersect(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'b')]); - let cls2 = bclass(&[(b'a', b'c')]); - let expected = bclass(&[(b'a', b'b')]); - assert_eq!(expected, bintersect(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'b')]); - let cls2 = bclass(&[(b'b', b'c')]); - let expected = bclass(&[(b'b', b'b')]); - assert_eq!(expected, bintersect(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'b')]); - let cls2 = bclass(&[(b'c', b'd')]); - let expected = bclass(&[]); - assert_eq!(expected, bintersect(&cls1, &cls2)); - - let cls1 = bclass(&[(b'b', b'c')]); - let cls2 = bclass(&[(b'a', b'd')]); - let expected = bclass(&[(b'b', b'c')]); - assert_eq!(expected, bintersect(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); - let cls2 = bclass(&[(b'a', b'h')]); - let expected = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); - assert_eq!(expected, bintersect(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); - let cls2 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); - let expected = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); - assert_eq!(expected, bintersect(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'b'), (b'g', b'h')]); - let cls2 = bclass(&[(b'd', b'e'), (b'k', b'l')]); - let expected = bclass(&[]); - assert_eq!(expected, bintersect(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); - let cls2 = bclass(&[(b'h', b'h')]); - let expected = bclass(&[(b'h', b'h')]); - assert_eq!(expected, bintersect(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'b'), (b'e', b'f'), (b'i', b'j')]); - let cls2 = bclass(&[(b'c', b'd'), (b'g', b'h'), (b'k', b'l')]); - let expected = bclass(&[]); - assert_eq!(expected, bintersect(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'b'), (b'c', b'd'), (b'e', b'f')]); - let cls2 = bclass(&[(b'b', b'c'), (b'd', b'e'), (b'f', b'g')]); - let expected = bclass(&[(b'b', b'f')]); - assert_eq!(expected, bintersect(&cls1, &cls2)); - } - - #[test] - fn class_difference_unicode() { - let cls1 = uclass(&[('a', 'a')]); - let cls2 = uclass(&[('a', 'a')]); - let expected = uclass(&[]); - assert_eq!(expected, udifference(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'a')]); - let cls2 = uclass(&[]); - let expected = uclass(&[('a', 'a')]); - assert_eq!(expected, udifference(&cls1, &cls2)); - - let cls1 = uclass(&[]); - let cls2 = uclass(&[('a', 'a')]); - let expected = uclass(&[]); - assert_eq!(expected, udifference(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'z')]); - let cls2 = uclass(&[('a', 'a')]); - let expected = uclass(&[('b', 'z')]); - assert_eq!(expected, udifference(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'z')]); - let cls2 = uclass(&[('z', 'z')]); - let expected = uclass(&[('a', 'y')]); - assert_eq!(expected, udifference(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'z')]); - let cls2 = uclass(&[('m', 'm')]); - let expected = uclass(&[('a', 'l'), ('n', 'z')]); - assert_eq!(expected, udifference(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); - let cls2 = uclass(&[('a', 'z')]); - let expected = uclass(&[]); - assert_eq!(expected, udifference(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); - let cls2 = uclass(&[('d', 'v')]); - let expected = uclass(&[('a', 'c')]); - assert_eq!(expected, udifference(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); - let cls2 = uclass(&[('b', 'g'), ('s', 'u')]); - let expected = uclass(&[('a', 'a'), ('h', 'i'), ('r', 'r')]); - assert_eq!(expected, udifference(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); - let cls2 = uclass(&[('b', 'd'), ('e', 'g'), ('s', 'u')]); - let expected = uclass(&[('a', 'a'), ('h', 'i'), ('r', 'r')]); - assert_eq!(expected, udifference(&cls1, &cls2)); - - let cls1 = uclass(&[('x', 'z')]); - let cls2 = uclass(&[('a', 'c'), ('e', 'g'), ('s', 'u')]); - let expected = uclass(&[('x', 'z')]); - assert_eq!(expected, udifference(&cls1, &cls2)); - - let cls1 = uclass(&[('a', 'z')]); - let cls2 = uclass(&[('a', 'c'), ('e', 'g'), ('s', 'u')]); - let expected = uclass(&[('d', 'd'), ('h', 'r'), ('v', 'z')]); - assert_eq!(expected, udifference(&cls1, &cls2)); - } - - #[test] - fn class_difference_bytes() { - let cls1 = bclass(&[(b'a', b'a')]); - let cls2 = bclass(&[(b'a', b'a')]); - let expected = bclass(&[]); - assert_eq!(expected, bdifference(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'a')]); - let cls2 = bclass(&[]); - let expected = bclass(&[(b'a', b'a')]); - assert_eq!(expected, bdifference(&cls1, &cls2)); - - let cls1 = bclass(&[]); - let cls2 = bclass(&[(b'a', b'a')]); - let expected = bclass(&[]); - assert_eq!(expected, bdifference(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'z')]); - let cls2 = bclass(&[(b'a', b'a')]); - let expected = bclass(&[(b'b', b'z')]); - assert_eq!(expected, bdifference(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'z')]); - let cls2 = bclass(&[(b'z', b'z')]); - let expected = bclass(&[(b'a', b'y')]); - assert_eq!(expected, bdifference(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'z')]); - let cls2 = bclass(&[(b'm', b'm')]); - let expected = bclass(&[(b'a', b'l'), (b'n', b'z')]); - assert_eq!(expected, bdifference(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); - let cls2 = bclass(&[(b'a', b'z')]); - let expected = bclass(&[]); - assert_eq!(expected, bdifference(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); - let cls2 = bclass(&[(b'd', b'v')]); - let expected = bclass(&[(b'a', b'c')]); - assert_eq!(expected, bdifference(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); - let cls2 = bclass(&[(b'b', b'g'), (b's', b'u')]); - let expected = bclass(&[(b'a', b'a'), (b'h', b'i'), (b'r', b'r')]); - assert_eq!(expected, bdifference(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); - let cls2 = bclass(&[(b'b', b'd'), (b'e', b'g'), (b's', b'u')]); - let expected = bclass(&[(b'a', b'a'), (b'h', b'i'), (b'r', b'r')]); - assert_eq!(expected, bdifference(&cls1, &cls2)); - - let cls1 = bclass(&[(b'x', b'z')]); - let cls2 = bclass(&[(b'a', b'c'), (b'e', b'g'), (b's', b'u')]); - let expected = bclass(&[(b'x', b'z')]); - assert_eq!(expected, bdifference(&cls1, &cls2)); - - let cls1 = bclass(&[(b'a', b'z')]); - let cls2 = bclass(&[(b'a', b'c'), (b'e', b'g'), (b's', b'u')]); - let expected = bclass(&[(b'd', b'd'), (b'h', b'r'), (b'v', b'z')]); - assert_eq!(expected, bdifference(&cls1, &cls2)); - } - - #[test] - fn class_symmetric_difference_unicode() { - let cls1 = uclass(&[('a', 'm')]); - let cls2 = uclass(&[('g', 't')]); - let expected = uclass(&[('a', 'f'), ('n', 't')]); - assert_eq!(expected, usymdifference(&cls1, &cls2)); - } - - #[test] - fn class_symmetric_difference_bytes() { - let cls1 = bclass(&[(b'a', b'm')]); - let cls2 = bclass(&[(b'g', b't')]); - let expected = bclass(&[(b'a', b'f'), (b'n', b't')]); - assert_eq!(expected, bsymdifference(&cls1, &cls2)); - } - - // We use a thread with an explicit stack size to test that our destructor - // for Hir can handle arbitrarily sized expressions in constant stack - // space. In case we run on a platform without threads (WASM?), we limit - // this test to Windows/Unix. - #[test] - #[cfg(any(unix, windows))] - fn no_stack_overflow_on_drop() { - use std::thread; - - let run = || { - let mut expr = Hir::empty(); - for _ in 0..100 { - expr = Hir::capture(Capture { - index: 1, - name: None, - sub: Box::new(expr), - }); - expr = Hir::repetition(Repetition { - min: 0, - max: Some(1), - greedy: true, - sub: Box::new(expr), - }); - - expr = Hir { - kind: HirKind::Concat(vec![expr]), - props: Properties::empty(), - }; - expr = Hir { - kind: HirKind::Alternation(vec![expr]), - props: Properties::empty(), - }; - } - assert!(!matches!(*expr.kind(), HirKind::Empty)); - }; - - // We run our test on a thread with a small stack size so we can - // force the issue more easily. - // - // NOTE(2023-03-21): See the corresponding test in 'crate::ast::tests' - // for context on the specific stack size chosen here. - thread::Builder::new() - .stack_size(16 << 10) - .spawn(run) - .unwrap() - .join() - .unwrap(); - } - - #[test] - fn look_set_iter() { - let set = LookSet::empty(); - assert_eq!(0, set.iter().count()); - - let set = LookSet::full(); - assert_eq!(18, set.iter().count()); - - let set = - LookSet::empty().insert(Look::StartLF).insert(Look::WordUnicode); - assert_eq!(2, set.iter().count()); - - let set = LookSet::empty().insert(Look::StartLF); - assert_eq!(1, set.iter().count()); - - let set = LookSet::empty().insert(Look::WordAsciiNegate); - assert_eq!(1, set.iter().count()); - } - - #[test] - fn look_set_debug() { - let res = format!("{:?}", LookSet::empty()); - assert_eq!("∅", res); - let res = format!("{:?}", LookSet::full()); - assert_eq!("Az^$rRbB𝛃𝚩<>〈〉◁▷◀▶", res); - } -} diff --git a/vendor/regex-syntax/src/hir/print.rs b/vendor/regex-syntax/src/hir/print.rs deleted file mode 100644 index 89db08c25bfaf8..00000000000000 --- a/vendor/regex-syntax/src/hir/print.rs +++ /dev/null @@ -1,608 +0,0 @@ -/*! -This module provides a regular expression printer for `Hir`. -*/ - -use core::fmt; - -use crate::{ - hir::{ - self, - visitor::{self, Visitor}, - Hir, HirKind, - }, - is_meta_character, -}; - -/// A builder for constructing a printer. -/// -/// Note that since a printer doesn't have any configuration knobs, this type -/// remains unexported. -#[derive(Clone, Debug)] -struct PrinterBuilder { - _priv: (), -} - -impl Default for PrinterBuilder { - fn default() -> PrinterBuilder { - PrinterBuilder::new() - } -} - -impl PrinterBuilder { - fn new() -> PrinterBuilder { - PrinterBuilder { _priv: () } - } - - fn build(&self) -> Printer { - Printer { _priv: () } - } -} - -/// A printer for a regular expression's high-level intermediate -/// representation. -/// -/// A printer converts a high-level intermediate representation (HIR) to a -/// regular expression pattern string. This particular printer uses constant -/// stack space and heap space proportional to the size of the HIR. -/// -/// Since this printer is only using the HIR, the pattern it prints will likely -/// not resemble the original pattern at all. For example, a pattern like -/// `\pL` will have its entire class written out. -/// -/// The purpose of this printer is to provide a means to mutate an HIR and then -/// build a regular expression from the result of that mutation. (A regex -/// library could provide a constructor from this HIR explicitly, but that -/// creates an unnecessary public coupling between the regex library and this -/// specific HIR representation.) -#[derive(Debug)] -pub struct Printer { - _priv: (), -} - -impl Printer { - /// Create a new printer. - pub fn new() -> Printer { - PrinterBuilder::new().build() - } - - /// Print the given `Ast` to the given writer. The writer must implement - /// `fmt::Write`. Typical implementations of `fmt::Write` that can be used - /// here are a `fmt::Formatter` (which is available in `fmt::Display` - /// implementations) or a `&mut String`. - pub fn print(&mut self, hir: &Hir, wtr: W) -> fmt::Result { - visitor::visit(hir, Writer { wtr }) - } -} - -#[derive(Debug)] -struct Writer { - wtr: W, -} - -impl Visitor for Writer { - type Output = (); - type Err = fmt::Error; - - fn finish(self) -> fmt::Result { - Ok(()) - } - - fn visit_pre(&mut self, hir: &Hir) -> fmt::Result { - match *hir.kind() { - HirKind::Empty => { - // Technically an empty sub-expression could be "printed" by - // just ignoring it, but in practice, you could have a - // repetition operator attached to an empty expression, and you - // really need something in the concrete syntax to make that - // work as you'd expect. - self.wtr.write_str(r"(?:)")?; - } - // Repetition operators are strictly suffix oriented. - HirKind::Repetition(_) => {} - HirKind::Literal(hir::Literal(ref bytes)) => { - // See the comment on the 'Concat' and 'Alternation' case below - // for why we put parens here. Literals are, conceptually, - // a special case of concatenation where each element is a - // character. The HIR flattens this into a Box<[u8]>, but we - // still need to treat it like a concatenation for correct - // printing. As a special case, we don't write parens if there - // is only one character. One character means there is no - // concat so we don't need parens. Adding parens would still be - // correct, but we drop them here because it tends to create - // rather noisy regexes even in simple cases. - let result = core::str::from_utf8(bytes); - let len = result.map_or(bytes.len(), |s| s.chars().count()); - if len > 1 { - self.wtr.write_str(r"(?:")?; - } - match result { - Ok(string) => { - for c in string.chars() { - self.write_literal_char(c)?; - } - } - Err(_) => { - for &b in bytes.iter() { - self.write_literal_byte(b)?; - } - } - } - if len > 1 { - self.wtr.write_str(r")")?; - } - } - HirKind::Class(hir::Class::Unicode(ref cls)) => { - if cls.ranges().is_empty() { - return self.wtr.write_str("[a&&b]"); - } - self.wtr.write_str("[")?; - for range in cls.iter() { - if range.start() == range.end() { - self.write_literal_char(range.start())?; - } else if u32::from(range.start()) + 1 - == u32::from(range.end()) - { - self.write_literal_char(range.start())?; - self.write_literal_char(range.end())?; - } else { - self.write_literal_char(range.start())?; - self.wtr.write_str("-")?; - self.write_literal_char(range.end())?; - } - } - self.wtr.write_str("]")?; - } - HirKind::Class(hir::Class::Bytes(ref cls)) => { - if cls.ranges().is_empty() { - return self.wtr.write_str("[a&&b]"); - } - self.wtr.write_str("(?-u:[")?; - for range in cls.iter() { - if range.start() == range.end() { - self.write_literal_class_byte(range.start())?; - } else if range.start() + 1 == range.end() { - self.write_literal_class_byte(range.start())?; - self.write_literal_class_byte(range.end())?; - } else { - self.write_literal_class_byte(range.start())?; - self.wtr.write_str("-")?; - self.write_literal_class_byte(range.end())?; - } - } - self.wtr.write_str("])")?; - } - HirKind::Look(ref look) => match *look { - hir::Look::Start => { - self.wtr.write_str(r"\A")?; - } - hir::Look::End => { - self.wtr.write_str(r"\z")?; - } - hir::Look::StartLF => { - self.wtr.write_str("(?m:^)")?; - } - hir::Look::EndLF => { - self.wtr.write_str("(?m:$)")?; - } - hir::Look::StartCRLF => { - self.wtr.write_str("(?mR:^)")?; - } - hir::Look::EndCRLF => { - self.wtr.write_str("(?mR:$)")?; - } - hir::Look::WordAscii => { - self.wtr.write_str(r"(?-u:\b)")?; - } - hir::Look::WordAsciiNegate => { - self.wtr.write_str(r"(?-u:\B)")?; - } - hir::Look::WordUnicode => { - self.wtr.write_str(r"\b")?; - } - hir::Look::WordUnicodeNegate => { - self.wtr.write_str(r"\B")?; - } - hir::Look::WordStartAscii => { - self.wtr.write_str(r"(?-u:\b{start})")?; - } - hir::Look::WordEndAscii => { - self.wtr.write_str(r"(?-u:\b{end})")?; - } - hir::Look::WordStartUnicode => { - self.wtr.write_str(r"\b{start}")?; - } - hir::Look::WordEndUnicode => { - self.wtr.write_str(r"\b{end}")?; - } - hir::Look::WordStartHalfAscii => { - self.wtr.write_str(r"(?-u:\b{start-half})")?; - } - hir::Look::WordEndHalfAscii => { - self.wtr.write_str(r"(?-u:\b{end-half})")?; - } - hir::Look::WordStartHalfUnicode => { - self.wtr.write_str(r"\b{start-half}")?; - } - hir::Look::WordEndHalfUnicode => { - self.wtr.write_str(r"\b{end-half}")?; - } - }, - HirKind::Capture(hir::Capture { ref name, .. }) => { - self.wtr.write_str("(")?; - if let Some(ref name) = *name { - write!(self.wtr, "?P<{name}>")?; - } - } - // Why do this? Wrapping concats and alts in non-capturing groups - // is not *always* necessary, but is sometimes necessary. For - // example, 'concat(a, alt(b, c))' should be written as 'a(?:b|c)' - // and not 'ab|c'. The former is clearly the intended meaning, but - // the latter is actually 'alt(concat(a, b), c)'. - // - // It would be possible to only group these things in cases where - // it's strictly necessary, but it requires knowing the parent - // expression. And since this technique is simpler and always - // correct, we take this route. More to the point, it is a non-goal - // of an HIR printer to show a nice easy-to-read regex. Indeed, - // its construction forbids it from doing so. Therefore, inserting - // extra groups where they aren't necessary is perfectly okay. - HirKind::Concat(_) | HirKind::Alternation(_) => { - self.wtr.write_str(r"(?:")?; - } - } - Ok(()) - } - - fn visit_post(&mut self, hir: &Hir) -> fmt::Result { - match *hir.kind() { - // Handled during visit_pre - HirKind::Empty - | HirKind::Literal(_) - | HirKind::Class(_) - | HirKind::Look(_) => {} - HirKind::Repetition(ref x) => { - match (x.min, x.max) { - (0, Some(1)) => { - self.wtr.write_str("?")?; - } - (0, None) => { - self.wtr.write_str("*")?; - } - (1, None) => { - self.wtr.write_str("+")?; - } - (1, Some(1)) => { - // 'a{1}' and 'a{1}?' are exactly equivalent to 'a'. - return Ok(()); - } - (m, None) => { - write!(self.wtr, "{{{m},}}")?; - } - (m, Some(n)) if m == n => { - write!(self.wtr, "{{{m}}}")?; - // a{m} and a{m}? are always exactly equivalent. - return Ok(()); - } - (m, Some(n)) => { - write!(self.wtr, "{{{m},{n}}}")?; - } - } - if !x.greedy { - self.wtr.write_str("?")?; - } - } - HirKind::Capture(_) - | HirKind::Concat(_) - | HirKind::Alternation(_) => { - self.wtr.write_str(r")")?; - } - } - Ok(()) - } - - fn visit_alternation_in(&mut self) -> fmt::Result { - self.wtr.write_str("|") - } -} - -impl Writer { - fn write_literal_char(&mut self, c: char) -> fmt::Result { - if is_meta_character(c) { - self.wtr.write_str("\\")?; - } - self.wtr.write_char(c) - } - - fn write_literal_byte(&mut self, b: u8) -> fmt::Result { - if b <= 0x7F && !b.is_ascii_control() && !b.is_ascii_whitespace() { - self.write_literal_char(char::try_from(b).unwrap()) - } else { - write!(self.wtr, "(?-u:\\x{b:02X})") - } - } - - fn write_literal_class_byte(&mut self, b: u8) -> fmt::Result { - if b <= 0x7F && !b.is_ascii_control() && !b.is_ascii_whitespace() { - self.write_literal_char(char::try_from(b).unwrap()) - } else { - write!(self.wtr, "\\x{b:02X}") - } - } -} - -#[cfg(test)] -mod tests { - use alloc::{ - boxed::Box, - string::{String, ToString}, - }; - - use crate::ParserBuilder; - - use super::*; - - fn roundtrip(given: &str, expected: &str) { - roundtrip_with(|b| b, given, expected); - } - - fn roundtrip_bytes(given: &str, expected: &str) { - roundtrip_with(|b| b.utf8(false), given, expected); - } - - fn roundtrip_with(mut f: F, given: &str, expected: &str) - where - F: FnMut(&mut ParserBuilder) -> &mut ParserBuilder, - { - let mut builder = ParserBuilder::new(); - f(&mut builder); - let hir = builder.build().parse(given).unwrap(); - - let mut printer = Printer::new(); - let mut dst = String::new(); - printer.print(&hir, &mut dst).unwrap(); - - // Check that the result is actually valid. - builder.build().parse(&dst).unwrap(); - - assert_eq!(expected, dst); - } - - #[test] - fn print_literal() { - roundtrip("a", "a"); - roundtrip(r"\xff", "\u{FF}"); - roundtrip_bytes(r"\xff", "\u{FF}"); - roundtrip_bytes(r"(?-u)\xff", r"(?-u:\xFF)"); - roundtrip("☃", "☃"); - } - - #[test] - fn print_class() { - roundtrip(r"[a]", r"a"); - roundtrip(r"[ab]", r"[ab]"); - roundtrip(r"[a-z]", r"[a-z]"); - roundtrip(r"[a-z--b-c--x-y]", r"[ad-wz]"); - roundtrip(r"[^\x01-\u{10FFFF}]", "\u{0}"); - roundtrip(r"[-]", r"\-"); - roundtrip(r"[☃-⛄]", r"[☃-⛄]"); - - roundtrip(r"(?-u)[a]", r"a"); - roundtrip(r"(?-u)[ab]", r"(?-u:[ab])"); - roundtrip(r"(?-u)[a-z]", r"(?-u:[a-z])"); - roundtrip_bytes(r"(?-u)[a-\xFF]", r"(?-u:[a-\xFF])"); - - // The following test that the printer escapes meta characters - // in character classes. - roundtrip(r"[\[]", r"\["); - roundtrip(r"[Z-_]", r"[Z-_]"); - roundtrip(r"[Z-_--Z]", r"[\[-_]"); - - // The following test that the printer escapes meta characters - // in byte oriented character classes. - roundtrip_bytes(r"(?-u)[\[]", r"\["); - roundtrip_bytes(r"(?-u)[Z-_]", r"(?-u:[Z-_])"); - roundtrip_bytes(r"(?-u)[Z-_--Z]", r"(?-u:[\[-_])"); - - // This tests that an empty character class is correctly roundtripped. - #[cfg(feature = "unicode-gencat")] - roundtrip(r"\P{any}", r"[a&&b]"); - roundtrip_bytes(r"(?-u)[^\x00-\xFF]", r"[a&&b]"); - } - - #[test] - fn print_anchor() { - roundtrip(r"^", r"\A"); - roundtrip(r"$", r"\z"); - roundtrip(r"(?m)^", r"(?m:^)"); - roundtrip(r"(?m)$", r"(?m:$)"); - } - - #[test] - fn print_word_boundary() { - roundtrip(r"\b", r"\b"); - roundtrip(r"\B", r"\B"); - roundtrip(r"(?-u)\b", r"(?-u:\b)"); - roundtrip_bytes(r"(?-u)\B", r"(?-u:\B)"); - } - - #[test] - fn print_repetition() { - roundtrip("a?", "a?"); - roundtrip("a??", "a??"); - roundtrip("(?U)a?", "a??"); - - roundtrip("a*", "a*"); - roundtrip("a*?", "a*?"); - roundtrip("(?U)a*", "a*?"); - - roundtrip("a+", "a+"); - roundtrip("a+?", "a+?"); - roundtrip("(?U)a+", "a+?"); - - roundtrip("a{1}", "a"); - roundtrip("a{2}", "a{2}"); - roundtrip("a{1,}", "a+"); - roundtrip("a{1,5}", "a{1,5}"); - roundtrip("a{1}?", "a"); - roundtrip("a{2}?", "a{2}"); - roundtrip("a{1,}?", "a+?"); - roundtrip("a{1,5}?", "a{1,5}?"); - roundtrip("(?U)a{1}", "a"); - roundtrip("(?U)a{2}", "a{2}"); - roundtrip("(?U)a{1,}", "a+?"); - roundtrip("(?U)a{1,5}", "a{1,5}?"); - - // Test that various zero-length repetitions always translate to an - // empty regex. This is more a property of HIR's smart constructors - // than the printer though. - roundtrip("a{0}", "(?:)"); - roundtrip("(?:ab){0}", "(?:)"); - #[cfg(feature = "unicode-gencat")] - { - roundtrip(r"\p{any}{0}", "(?:)"); - roundtrip(r"\P{any}{0}", "(?:)"); - } - } - - #[test] - fn print_group() { - roundtrip("()", "((?:))"); - roundtrip("(?P)", "(?P(?:))"); - roundtrip("(?:)", "(?:)"); - - roundtrip("(a)", "(a)"); - roundtrip("(?Pa)", "(?Pa)"); - roundtrip("(?:a)", "a"); - - roundtrip("((((a))))", "((((a))))"); - } - - #[test] - fn print_alternation() { - roundtrip("|", "(?:(?:)|(?:))"); - roundtrip("||", "(?:(?:)|(?:)|(?:))"); - - roundtrip("a|b", "[ab]"); - roundtrip("ab|cd", "(?:(?:ab)|(?:cd))"); - roundtrip("a|b|c", "[a-c]"); - roundtrip("ab|cd|ef", "(?:(?:ab)|(?:cd)|(?:ef))"); - roundtrip("foo|bar|quux", "(?:(?:foo)|(?:bar)|(?:quux))"); - } - - // This is a regression test that stresses a peculiarity of how the HIR - // is both constructed and printed. Namely, it is legal for a repetition - // to directly contain a concatenation. This particular construct isn't - // really possible to build from the concrete syntax directly, since you'd - // be forced to put the concatenation into (at least) a non-capturing - // group. Concurrently, the printer doesn't consider this case and just - // kind of naively prints the child expression and tacks on the repetition - // operator. - // - // As a result, if you attached '+' to a 'concat(a, b)', the printer gives - // you 'ab+', but clearly it really should be '(?:ab)+'. - // - // This bug isn't easy to surface because most ways of building an HIR - // come directly from the concrete syntax, and as mentioned above, it just - // isn't possible to build this kind of HIR from the concrete syntax. - // Nevertheless, this is definitely a bug. - // - // See: https://github.com/rust-lang/regex/issues/731 - #[test] - fn regression_repetition_concat() { - let expr = Hir::concat(alloc::vec![ - Hir::literal("x".as_bytes()), - Hir::repetition(hir::Repetition { - min: 1, - max: None, - greedy: true, - sub: Box::new(Hir::literal("ab".as_bytes())), - }), - Hir::literal("y".as_bytes()), - ]); - assert_eq!(r"(?:x(?:ab)+y)", expr.to_string()); - - let expr = Hir::concat(alloc::vec![ - Hir::look(hir::Look::Start), - Hir::repetition(hir::Repetition { - min: 1, - max: None, - greedy: true, - sub: Box::new(Hir::concat(alloc::vec![ - Hir::look(hir::Look::Start), - Hir::look(hir::Look::End), - ])), - }), - Hir::look(hir::Look::End), - ]); - assert_eq!(r"(?:\A\A\z\z)", expr.to_string()); - } - - // Just like regression_repetition_concat, but with the repetition using - // an alternation as a child expression instead. - // - // See: https://github.com/rust-lang/regex/issues/731 - #[test] - fn regression_repetition_alternation() { - let expr = Hir::concat(alloc::vec![ - Hir::literal("ab".as_bytes()), - Hir::repetition(hir::Repetition { - min: 1, - max: None, - greedy: true, - sub: Box::new(Hir::alternation(alloc::vec![ - Hir::literal("cd".as_bytes()), - Hir::literal("ef".as_bytes()), - ])), - }), - Hir::literal("gh".as_bytes()), - ]); - assert_eq!(r"(?:(?:ab)(?:(?:cd)|(?:ef))+(?:gh))", expr.to_string()); - - let expr = Hir::concat(alloc::vec![ - Hir::look(hir::Look::Start), - Hir::repetition(hir::Repetition { - min: 1, - max: None, - greedy: true, - sub: Box::new(Hir::alternation(alloc::vec![ - Hir::look(hir::Look::Start), - Hir::look(hir::Look::End), - ])), - }), - Hir::look(hir::Look::End), - ]); - assert_eq!(r"(?:\A(?:\A|\z)\z)", expr.to_string()); - } - - // This regression test is very similar in flavor to - // regression_repetition_concat in that the root of the issue lies in a - // peculiarity of how the HIR is represented and how the printer writes it - // out. Like the other regression, this one is also rooted in the fact that - // you can't produce the peculiar HIR from the concrete syntax. Namely, you - // just can't have a 'concat(a, alt(b, c))' because the 'alt' will normally - // be in (at least) a non-capturing group. Why? Because the '|' has very - // low precedence (lower that concatenation), and so something like 'ab|c' - // is actually 'alt(ab, c)'. - // - // See: https://github.com/rust-lang/regex/issues/516 - #[test] - fn regression_alternation_concat() { - let expr = Hir::concat(alloc::vec![ - Hir::literal("ab".as_bytes()), - Hir::alternation(alloc::vec![ - Hir::literal("mn".as_bytes()), - Hir::literal("xy".as_bytes()), - ]), - ]); - assert_eq!(r"(?:(?:ab)(?:(?:mn)|(?:xy)))", expr.to_string()); - - let expr = Hir::concat(alloc::vec![ - Hir::look(hir::Look::Start), - Hir::alternation(alloc::vec![ - Hir::look(hir::Look::Start), - Hir::look(hir::Look::End), - ]), - ]); - assert_eq!(r"(?:\A(?:\A|\z))", expr.to_string()); - } -} diff --git a/vendor/regex-syntax/src/hir/translate.rs b/vendor/regex-syntax/src/hir/translate.rs deleted file mode 100644 index 48469f9e1615d0..00000000000000 --- a/vendor/regex-syntax/src/hir/translate.rs +++ /dev/null @@ -1,3740 +0,0 @@ -/*! -Defines a translator that converts an `Ast` to an `Hir`. -*/ - -use core::cell::{Cell, RefCell}; - -use alloc::{boxed::Box, string::ToString, vec, vec::Vec}; - -use crate::{ - ast::{self, Ast, Span, Visitor}, - either::Either, - hir::{self, Error, ErrorKind, Hir, HirKind}, - unicode::{self, ClassQuery}, -}; - -type Result = core::result::Result; - -/// A builder for constructing an AST->HIR translator. -#[derive(Clone, Debug)] -pub struct TranslatorBuilder { - utf8: bool, - line_terminator: u8, - flags: Flags, -} - -impl Default for TranslatorBuilder { - fn default() -> TranslatorBuilder { - TranslatorBuilder::new() - } -} - -impl TranslatorBuilder { - /// Create a new translator builder with a default configuration. - pub fn new() -> TranslatorBuilder { - TranslatorBuilder { - utf8: true, - line_terminator: b'\n', - flags: Flags::default(), - } - } - - /// Build a translator using the current configuration. - pub fn build(&self) -> Translator { - Translator { - stack: RefCell::new(vec![]), - flags: Cell::new(self.flags), - utf8: self.utf8, - line_terminator: self.line_terminator, - } - } - - /// When disabled, translation will permit the construction of a regular - /// expression that may match invalid UTF-8. - /// - /// When enabled (the default), the translator is guaranteed to produce an - /// expression that, for non-empty matches, will only ever produce spans - /// that are entirely valid UTF-8 (otherwise, the translator will return an - /// error). - /// - /// Perhaps surprisingly, when UTF-8 is enabled, an empty regex or even - /// a negated ASCII word boundary (uttered as `(?-u:\B)` in the concrete - /// syntax) will be allowed even though they can produce matches that split - /// a UTF-8 encoded codepoint. This only applies to zero-width or "empty" - /// matches, and it is expected that the regex engine itself must handle - /// these cases if necessary (perhaps by suppressing any zero-width matches - /// that split a codepoint). - pub fn utf8(&mut self, yes: bool) -> &mut TranslatorBuilder { - self.utf8 = yes; - self - } - - /// Sets the line terminator for use with `(?u-s:.)` and `(?-us:.)`. - /// - /// Namely, instead of `.` (by default) matching everything except for `\n`, - /// this will cause `.` to match everything except for the byte given. - /// - /// If `.` is used in a context where Unicode mode is enabled and this byte - /// isn't ASCII, then an error will be returned. When Unicode mode is - /// disabled, then any byte is permitted, but will return an error if UTF-8 - /// mode is enabled and it is a non-ASCII byte. - /// - /// In short, any ASCII value for a line terminator is always okay. But a - /// non-ASCII byte might result in an error depending on whether Unicode - /// mode or UTF-8 mode are enabled. - /// - /// Note that if `R` mode is enabled then it always takes precedence and - /// the line terminator will be treated as `\r` and `\n` simultaneously. - /// - /// Note also that this *doesn't* impact the look-around assertions - /// `(?m:^)` and `(?m:$)`. That's usually controlled by additional - /// configuration in the regex engine itself. - pub fn line_terminator(&mut self, byte: u8) -> &mut TranslatorBuilder { - self.line_terminator = byte; - self - } - - /// Enable or disable the case insensitive flag (`i`) by default. - pub fn case_insensitive(&mut self, yes: bool) -> &mut TranslatorBuilder { - self.flags.case_insensitive = if yes { Some(true) } else { None }; - self - } - - /// Enable or disable the multi-line matching flag (`m`) by default. - pub fn multi_line(&mut self, yes: bool) -> &mut TranslatorBuilder { - self.flags.multi_line = if yes { Some(true) } else { None }; - self - } - - /// Enable or disable the "dot matches any character" flag (`s`) by - /// default. - pub fn dot_matches_new_line( - &mut self, - yes: bool, - ) -> &mut TranslatorBuilder { - self.flags.dot_matches_new_line = if yes { Some(true) } else { None }; - self - } - - /// Enable or disable the CRLF mode flag (`R`) by default. - pub fn crlf(&mut self, yes: bool) -> &mut TranslatorBuilder { - self.flags.crlf = if yes { Some(true) } else { None }; - self - } - - /// Enable or disable the "swap greed" flag (`U`) by default. - pub fn swap_greed(&mut self, yes: bool) -> &mut TranslatorBuilder { - self.flags.swap_greed = if yes { Some(true) } else { None }; - self - } - - /// Enable or disable the Unicode flag (`u`) by default. - pub fn unicode(&mut self, yes: bool) -> &mut TranslatorBuilder { - self.flags.unicode = if yes { None } else { Some(false) }; - self - } -} - -/// A translator maps abstract syntax to a high level intermediate -/// representation. -/// -/// A translator may be benefit from reuse. That is, a translator can translate -/// many abstract syntax trees. -/// -/// A `Translator` can be configured in more detail via a -/// [`TranslatorBuilder`]. -#[derive(Clone, Debug)] -pub struct Translator { - /// Our call stack, but on the heap. - stack: RefCell>, - /// The current flag settings. - flags: Cell, - /// Whether we're allowed to produce HIR that can match arbitrary bytes. - utf8: bool, - /// The line terminator to use for `.`. - line_terminator: u8, -} - -impl Translator { - /// Create a new translator using the default configuration. - pub fn new() -> Translator { - TranslatorBuilder::new().build() - } - - /// Translate the given abstract syntax tree (AST) into a high level - /// intermediate representation (HIR). - /// - /// If there was a problem doing the translation, then an HIR-specific - /// error is returned. - /// - /// The original pattern string used to produce the `Ast` *must* also be - /// provided. The translator does not use the pattern string during any - /// correct translation, but is used for error reporting. - pub fn translate(&mut self, pattern: &str, ast: &Ast) -> Result { - ast::visit(ast, TranslatorI::new(self, pattern)) - } -} - -/// An HirFrame is a single stack frame, represented explicitly, which is -/// created for each item in the Ast that we traverse. -/// -/// Note that technically, this type doesn't represent our entire stack -/// frame. In particular, the Ast visitor represents any state associated with -/// traversing the Ast itself. -#[derive(Clone, Debug)] -enum HirFrame { - /// An arbitrary HIR expression. These get pushed whenever we hit a base - /// case in the Ast. They get popped after an inductive (i.e., recursive) - /// step is complete. - Expr(Hir), - /// A literal that is being constructed, character by character, from the - /// AST. We need this because the AST gives each individual character its - /// own node. So as we see characters, we peek at the top-most HirFrame. - /// If it's a literal, then we add to it. Otherwise, we push a new literal. - /// When it comes time to pop it, we convert it to an Hir via Hir::literal. - Literal(Vec), - /// A Unicode character class. This frame is mutated as we descend into - /// the Ast of a character class (which is itself its own mini recursive - /// structure). - ClassUnicode(hir::ClassUnicode), - /// A byte-oriented character class. This frame is mutated as we descend - /// into the Ast of a character class (which is itself its own mini - /// recursive structure). - /// - /// Byte character classes are created when Unicode mode (`u`) is disabled. - /// If `utf8` is enabled (the default), then a byte character is only - /// permitted to match ASCII text. - ClassBytes(hir::ClassBytes), - /// This is pushed whenever a repetition is observed. After visiting every - /// sub-expression in the repetition, the translator's stack is expected to - /// have this sentinel at the top. - /// - /// This sentinel only exists to stop other things (like flattening - /// literals) from reaching across repetition operators. - Repetition, - /// This is pushed on to the stack upon first seeing any kind of capture, - /// indicated by parentheses (including non-capturing groups). It is popped - /// upon leaving a group. - Group { - /// The old active flags when this group was opened. - /// - /// If this group sets flags, then the new active flags are set to the - /// result of merging the old flags with the flags introduced by this - /// group. If the group doesn't set any flags, then this is simply - /// equivalent to whatever flags were set when the group was opened. - /// - /// When this group is popped, the active flags should be restored to - /// the flags set here. - /// - /// The "active" flags correspond to whatever flags are set in the - /// Translator. - old_flags: Flags, - }, - /// This is pushed whenever a concatenation is observed. After visiting - /// every sub-expression in the concatenation, the translator's stack is - /// popped until it sees a Concat frame. - Concat, - /// This is pushed whenever an alternation is observed. After visiting - /// every sub-expression in the alternation, the translator's stack is - /// popped until it sees an Alternation frame. - Alternation, - /// This is pushed immediately before each sub-expression in an - /// alternation. This separates the branches of an alternation on the - /// stack and prevents literal flattening from reaching across alternation - /// branches. - /// - /// It is popped after each expression in a branch until an 'Alternation' - /// frame is observed when doing a post visit on an alternation. - AlternationBranch, -} - -impl HirFrame { - /// Assert that the current stack frame is an Hir expression and return it. - fn unwrap_expr(self) -> Hir { - match self { - HirFrame::Expr(expr) => expr, - HirFrame::Literal(lit) => Hir::literal(lit), - _ => panic!("tried to unwrap expr from HirFrame, got: {self:?}"), - } - } - - /// Assert that the current stack frame is a Unicode class expression and - /// return it. - fn unwrap_class_unicode(self) -> hir::ClassUnicode { - match self { - HirFrame::ClassUnicode(cls) => cls, - _ => panic!( - "tried to unwrap Unicode class \ - from HirFrame, got: {:?}", - self - ), - } - } - - /// Assert that the current stack frame is a byte class expression and - /// return it. - fn unwrap_class_bytes(self) -> hir::ClassBytes { - match self { - HirFrame::ClassBytes(cls) => cls, - _ => panic!( - "tried to unwrap byte class \ - from HirFrame, got: {:?}", - self - ), - } - } - - /// Assert that the current stack frame is a repetition sentinel. If it - /// isn't, then panic. - fn unwrap_repetition(self) { - match self { - HirFrame::Repetition => {} - _ => { - panic!( - "tried to unwrap repetition from HirFrame, got: {self:?}" - ) - } - } - } - - /// Assert that the current stack frame is a group indicator and return - /// its corresponding flags (the flags that were active at the time the - /// group was entered). - fn unwrap_group(self) -> Flags { - match self { - HirFrame::Group { old_flags } => old_flags, - _ => { - panic!("tried to unwrap group from HirFrame, got: {self:?}") - } - } - } - - /// Assert that the current stack frame is an alternation pipe sentinel. If - /// it isn't, then panic. - fn unwrap_alternation_pipe(self) { - match self { - HirFrame::AlternationBranch => {} - _ => { - panic!("tried to unwrap alt pipe from HirFrame, got: {self:?}") - } - } - } -} - -impl<'t, 'p> Visitor for TranslatorI<'t, 'p> { - type Output = Hir; - type Err = Error; - - fn finish(self) -> Result { - // ... otherwise, we should have exactly one HIR on the stack. - assert_eq!(self.trans().stack.borrow().len(), 1); - Ok(self.pop().unwrap().unwrap_expr()) - } - - fn visit_pre(&mut self, ast: &Ast) -> Result<()> { - match *ast { - Ast::ClassBracketed(_) => { - if self.flags().unicode() { - let cls = hir::ClassUnicode::empty(); - self.push(HirFrame::ClassUnicode(cls)); - } else { - let cls = hir::ClassBytes::empty(); - self.push(HirFrame::ClassBytes(cls)); - } - } - Ast::Repetition(_) => self.push(HirFrame::Repetition), - Ast::Group(ref x) => { - let old_flags = x - .flags() - .map(|ast| self.set_flags(ast)) - .unwrap_or_else(|| self.flags()); - self.push(HirFrame::Group { old_flags }); - } - Ast::Concat(_) => { - self.push(HirFrame::Concat); - } - Ast::Alternation(ref x) => { - self.push(HirFrame::Alternation); - if !x.asts.is_empty() { - self.push(HirFrame::AlternationBranch); - } - } - _ => {} - } - Ok(()) - } - - fn visit_post(&mut self, ast: &Ast) -> Result<()> { - match *ast { - Ast::Empty(_) => { - self.push(HirFrame::Expr(Hir::empty())); - } - Ast::Flags(ref x) => { - self.set_flags(&x.flags); - // Flags in the AST are generally considered directives and - // not actual sub-expressions. However, they can be used in - // the concrete syntax like `((?i))`, and we need some kind of - // indication of an expression there, and Empty is the correct - // choice. - // - // There can also be things like `(?i)+`, but we rule those out - // in the parser. In the future, we might allow them for - // consistency sake. - self.push(HirFrame::Expr(Hir::empty())); - } - Ast::Literal(ref x) => match self.ast_literal_to_scalar(x)? { - Either::Right(byte) => self.push_byte(byte), - Either::Left(ch) => match self.case_fold_char(x.span, ch)? { - None => self.push_char(ch), - Some(expr) => self.push(HirFrame::Expr(expr)), - }, - }, - Ast::Dot(ref span) => { - self.push(HirFrame::Expr(self.hir_dot(**span)?)); - } - Ast::Assertion(ref x) => { - self.push(HirFrame::Expr(self.hir_assertion(x)?)); - } - Ast::ClassPerl(ref x) => { - if self.flags().unicode() { - let cls = self.hir_perl_unicode_class(x)?; - let hcls = hir::Class::Unicode(cls); - self.push(HirFrame::Expr(Hir::class(hcls))); - } else { - let cls = self.hir_perl_byte_class(x)?; - let hcls = hir::Class::Bytes(cls); - self.push(HirFrame::Expr(Hir::class(hcls))); - } - } - Ast::ClassUnicode(ref x) => { - let cls = hir::Class::Unicode(self.hir_unicode_class(x)?); - self.push(HirFrame::Expr(Hir::class(cls))); - } - Ast::ClassBracketed(ref ast) => { - if self.flags().unicode() { - let mut cls = self.pop().unwrap().unwrap_class_unicode(); - self.unicode_fold_and_negate( - &ast.span, - ast.negated, - &mut cls, - )?; - let expr = Hir::class(hir::Class::Unicode(cls)); - self.push(HirFrame::Expr(expr)); - } else { - let mut cls = self.pop().unwrap().unwrap_class_bytes(); - self.bytes_fold_and_negate( - &ast.span, - ast.negated, - &mut cls, - )?; - let expr = Hir::class(hir::Class::Bytes(cls)); - self.push(HirFrame::Expr(expr)); - } - } - Ast::Repetition(ref x) => { - let expr = self.pop().unwrap().unwrap_expr(); - self.pop().unwrap().unwrap_repetition(); - self.push(HirFrame::Expr(self.hir_repetition(x, expr))); - } - Ast::Group(ref x) => { - let expr = self.pop().unwrap().unwrap_expr(); - let old_flags = self.pop().unwrap().unwrap_group(); - self.trans().flags.set(old_flags); - self.push(HirFrame::Expr(self.hir_capture(x, expr))); - } - Ast::Concat(_) => { - let mut exprs = vec![]; - while let Some(expr) = self.pop_concat_expr() { - if !matches!(*expr.kind(), HirKind::Empty) { - exprs.push(expr); - } - } - exprs.reverse(); - self.push(HirFrame::Expr(Hir::concat(exprs))); - } - Ast::Alternation(_) => { - let mut exprs = vec![]; - while let Some(expr) = self.pop_alt_expr() { - self.pop().unwrap().unwrap_alternation_pipe(); - exprs.push(expr); - } - exprs.reverse(); - self.push(HirFrame::Expr(Hir::alternation(exprs))); - } - } - Ok(()) - } - - fn visit_alternation_in(&mut self) -> Result<()> { - self.push(HirFrame::AlternationBranch); - Ok(()) - } - - fn visit_class_set_item_pre( - &mut self, - ast: &ast::ClassSetItem, - ) -> Result<()> { - match *ast { - ast::ClassSetItem::Bracketed(_) => { - if self.flags().unicode() { - let cls = hir::ClassUnicode::empty(); - self.push(HirFrame::ClassUnicode(cls)); - } else { - let cls = hir::ClassBytes::empty(); - self.push(HirFrame::ClassBytes(cls)); - } - } - // We needn't handle the Union case here since the visitor will - // do it for us. - _ => {} - } - Ok(()) - } - - fn visit_class_set_item_post( - &mut self, - ast: &ast::ClassSetItem, - ) -> Result<()> { - match *ast { - ast::ClassSetItem::Empty(_) => {} - ast::ClassSetItem::Literal(ref x) => { - if self.flags().unicode() { - let mut cls = self.pop().unwrap().unwrap_class_unicode(); - cls.push(hir::ClassUnicodeRange::new(x.c, x.c)); - self.push(HirFrame::ClassUnicode(cls)); - } else { - let mut cls = self.pop().unwrap().unwrap_class_bytes(); - let byte = self.class_literal_byte(x)?; - cls.push(hir::ClassBytesRange::new(byte, byte)); - self.push(HirFrame::ClassBytes(cls)); - } - } - ast::ClassSetItem::Range(ref x) => { - if self.flags().unicode() { - let mut cls = self.pop().unwrap().unwrap_class_unicode(); - cls.push(hir::ClassUnicodeRange::new(x.start.c, x.end.c)); - self.push(HirFrame::ClassUnicode(cls)); - } else { - let mut cls = self.pop().unwrap().unwrap_class_bytes(); - let start = self.class_literal_byte(&x.start)?; - let end = self.class_literal_byte(&x.end)?; - cls.push(hir::ClassBytesRange::new(start, end)); - self.push(HirFrame::ClassBytes(cls)); - } - } - ast::ClassSetItem::Ascii(ref x) => { - if self.flags().unicode() { - let xcls = self.hir_ascii_unicode_class(x)?; - let mut cls = self.pop().unwrap().unwrap_class_unicode(); - cls.union(&xcls); - self.push(HirFrame::ClassUnicode(cls)); - } else { - let xcls = self.hir_ascii_byte_class(x)?; - let mut cls = self.pop().unwrap().unwrap_class_bytes(); - cls.union(&xcls); - self.push(HirFrame::ClassBytes(cls)); - } - } - ast::ClassSetItem::Unicode(ref x) => { - let xcls = self.hir_unicode_class(x)?; - let mut cls = self.pop().unwrap().unwrap_class_unicode(); - cls.union(&xcls); - self.push(HirFrame::ClassUnicode(cls)); - } - ast::ClassSetItem::Perl(ref x) => { - if self.flags().unicode() { - let xcls = self.hir_perl_unicode_class(x)?; - let mut cls = self.pop().unwrap().unwrap_class_unicode(); - cls.union(&xcls); - self.push(HirFrame::ClassUnicode(cls)); - } else { - let xcls = self.hir_perl_byte_class(x)?; - let mut cls = self.pop().unwrap().unwrap_class_bytes(); - cls.union(&xcls); - self.push(HirFrame::ClassBytes(cls)); - } - } - ast::ClassSetItem::Bracketed(ref ast) => { - if self.flags().unicode() { - let mut cls1 = self.pop().unwrap().unwrap_class_unicode(); - self.unicode_fold_and_negate( - &ast.span, - ast.negated, - &mut cls1, - )?; - - let mut cls2 = self.pop().unwrap().unwrap_class_unicode(); - cls2.union(&cls1); - self.push(HirFrame::ClassUnicode(cls2)); - } else { - let mut cls1 = self.pop().unwrap().unwrap_class_bytes(); - self.bytes_fold_and_negate( - &ast.span, - ast.negated, - &mut cls1, - )?; - - let mut cls2 = self.pop().unwrap().unwrap_class_bytes(); - cls2.union(&cls1); - self.push(HirFrame::ClassBytes(cls2)); - } - } - // This is handled automatically by the visitor. - ast::ClassSetItem::Union(_) => {} - } - Ok(()) - } - - fn visit_class_set_binary_op_pre( - &mut self, - _op: &ast::ClassSetBinaryOp, - ) -> Result<()> { - if self.flags().unicode() { - let cls = hir::ClassUnicode::empty(); - self.push(HirFrame::ClassUnicode(cls)); - } else { - let cls = hir::ClassBytes::empty(); - self.push(HirFrame::ClassBytes(cls)); - } - Ok(()) - } - - fn visit_class_set_binary_op_in( - &mut self, - _op: &ast::ClassSetBinaryOp, - ) -> Result<()> { - if self.flags().unicode() { - let cls = hir::ClassUnicode::empty(); - self.push(HirFrame::ClassUnicode(cls)); - } else { - let cls = hir::ClassBytes::empty(); - self.push(HirFrame::ClassBytes(cls)); - } - Ok(()) - } - - fn visit_class_set_binary_op_post( - &mut self, - op: &ast::ClassSetBinaryOp, - ) -> Result<()> { - use crate::ast::ClassSetBinaryOpKind::*; - - if self.flags().unicode() { - let mut rhs = self.pop().unwrap().unwrap_class_unicode(); - let mut lhs = self.pop().unwrap().unwrap_class_unicode(); - let mut cls = self.pop().unwrap().unwrap_class_unicode(); - if self.flags().case_insensitive() { - rhs.try_case_fold_simple().map_err(|_| { - self.error( - op.rhs.span().clone(), - ErrorKind::UnicodeCaseUnavailable, - ) - })?; - lhs.try_case_fold_simple().map_err(|_| { - self.error( - op.lhs.span().clone(), - ErrorKind::UnicodeCaseUnavailable, - ) - })?; - } - match op.kind { - Intersection => lhs.intersect(&rhs), - Difference => lhs.difference(&rhs), - SymmetricDifference => lhs.symmetric_difference(&rhs), - } - cls.union(&lhs); - self.push(HirFrame::ClassUnicode(cls)); - } else { - let mut rhs = self.pop().unwrap().unwrap_class_bytes(); - let mut lhs = self.pop().unwrap().unwrap_class_bytes(); - let mut cls = self.pop().unwrap().unwrap_class_bytes(); - if self.flags().case_insensitive() { - rhs.case_fold_simple(); - lhs.case_fold_simple(); - } - match op.kind { - Intersection => lhs.intersect(&rhs), - Difference => lhs.difference(&rhs), - SymmetricDifference => lhs.symmetric_difference(&rhs), - } - cls.union(&lhs); - self.push(HirFrame::ClassBytes(cls)); - } - Ok(()) - } -} - -/// The internal implementation of a translator. -/// -/// This type is responsible for carrying around the original pattern string, -/// which is not tied to the internal state of a translator. -/// -/// A TranslatorI exists for the time it takes to translate a single Ast. -#[derive(Clone, Debug)] -struct TranslatorI<'t, 'p> { - trans: &'t Translator, - pattern: &'p str, -} - -impl<'t, 'p> TranslatorI<'t, 'p> { - /// Build a new internal translator. - fn new(trans: &'t Translator, pattern: &'p str) -> TranslatorI<'t, 'p> { - TranslatorI { trans, pattern } - } - - /// Return a reference to the underlying translator. - fn trans(&self) -> &Translator { - &self.trans - } - - /// Push the given frame on to the call stack. - fn push(&self, frame: HirFrame) { - self.trans().stack.borrow_mut().push(frame); - } - - /// Push the given literal char on to the call stack. - /// - /// If the top-most element of the stack is a literal, then the char - /// is appended to the end of that literal. Otherwise, a new literal - /// containing just the given char is pushed to the top of the stack. - fn push_char(&self, ch: char) { - let mut buf = [0; 4]; - let bytes = ch.encode_utf8(&mut buf).as_bytes(); - let mut stack = self.trans().stack.borrow_mut(); - if let Some(HirFrame::Literal(ref mut literal)) = stack.last_mut() { - literal.extend_from_slice(bytes); - } else { - stack.push(HirFrame::Literal(bytes.to_vec())); - } - } - - /// Push the given literal byte on to the call stack. - /// - /// If the top-most element of the stack is a literal, then the byte - /// is appended to the end of that literal. Otherwise, a new literal - /// containing just the given byte is pushed to the top of the stack. - fn push_byte(&self, byte: u8) { - let mut stack = self.trans().stack.borrow_mut(); - if let Some(HirFrame::Literal(ref mut literal)) = stack.last_mut() { - literal.push(byte); - } else { - stack.push(HirFrame::Literal(vec![byte])); - } - } - - /// Pop the top of the call stack. If the call stack is empty, return None. - fn pop(&self) -> Option { - self.trans().stack.borrow_mut().pop() - } - - /// Pop an HIR expression from the top of the stack for a concatenation. - /// - /// This returns None if the stack is empty or when a concat frame is seen. - /// Otherwise, it panics if it could not find an HIR expression. - fn pop_concat_expr(&self) -> Option { - let frame = self.pop()?; - match frame { - HirFrame::Concat => None, - HirFrame::Expr(expr) => Some(expr), - HirFrame::Literal(lit) => Some(Hir::literal(lit)), - HirFrame::ClassUnicode(_) => { - unreachable!("expected expr or concat, got Unicode class") - } - HirFrame::ClassBytes(_) => { - unreachable!("expected expr or concat, got byte class") - } - HirFrame::Repetition => { - unreachable!("expected expr or concat, got repetition") - } - HirFrame::Group { .. } => { - unreachable!("expected expr or concat, got group") - } - HirFrame::Alternation => { - unreachable!("expected expr or concat, got alt marker") - } - HirFrame::AlternationBranch => { - unreachable!("expected expr or concat, got alt branch marker") - } - } - } - - /// Pop an HIR expression from the top of the stack for an alternation. - /// - /// This returns None if the stack is empty or when an alternation frame is - /// seen. Otherwise, it panics if it could not find an HIR expression. - fn pop_alt_expr(&self) -> Option { - let frame = self.pop()?; - match frame { - HirFrame::Alternation => None, - HirFrame::Expr(expr) => Some(expr), - HirFrame::Literal(lit) => Some(Hir::literal(lit)), - HirFrame::ClassUnicode(_) => { - unreachable!("expected expr or alt, got Unicode class") - } - HirFrame::ClassBytes(_) => { - unreachable!("expected expr or alt, got byte class") - } - HirFrame::Repetition => { - unreachable!("expected expr or alt, got repetition") - } - HirFrame::Group { .. } => { - unreachable!("expected expr or alt, got group") - } - HirFrame::Concat => { - unreachable!("expected expr or alt, got concat marker") - } - HirFrame::AlternationBranch => { - unreachable!("expected expr or alt, got alt branch marker") - } - } - } - - /// Create a new error with the given span and error type. - fn error(&self, span: Span, kind: ErrorKind) -> Error { - Error { kind, pattern: self.pattern.to_string(), span } - } - - /// Return a copy of the active flags. - fn flags(&self) -> Flags { - self.trans().flags.get() - } - - /// Set the flags of this translator from the flags set in the given AST. - /// Then, return the old flags. - fn set_flags(&self, ast_flags: &ast::Flags) -> Flags { - let old_flags = self.flags(); - let mut new_flags = Flags::from_ast(ast_flags); - new_flags.merge(&old_flags); - self.trans().flags.set(new_flags); - old_flags - } - - /// Convert an Ast literal to its scalar representation. - /// - /// When Unicode mode is enabled, then this always succeeds and returns a - /// `char` (Unicode scalar value). - /// - /// When Unicode mode is disabled, then a `char` will still be returned - /// whenever possible. A byte is returned only when invalid UTF-8 is - /// allowed and when the byte is not ASCII. Otherwise, a non-ASCII byte - /// will result in an error when invalid UTF-8 is not allowed. - fn ast_literal_to_scalar( - &self, - lit: &ast::Literal, - ) -> Result> { - if self.flags().unicode() { - return Ok(Either::Left(lit.c)); - } - let byte = match lit.byte() { - None => return Ok(Either::Left(lit.c)), - Some(byte) => byte, - }; - if byte <= 0x7F { - return Ok(Either::Left(char::try_from(byte).unwrap())); - } - if self.trans().utf8 { - return Err(self.error(lit.span, ErrorKind::InvalidUtf8)); - } - Ok(Either::Right(byte)) - } - - fn case_fold_char(&self, span: Span, c: char) -> Result> { - if !self.flags().case_insensitive() { - return Ok(None); - } - if self.flags().unicode() { - // If case folding won't do anything, then don't bother trying. - let map = unicode::SimpleCaseFolder::new() - .map(|f| f.overlaps(c, c)) - .map_err(|_| { - self.error(span, ErrorKind::UnicodeCaseUnavailable) - })?; - if !map { - return Ok(None); - } - let mut cls = - hir::ClassUnicode::new(vec![hir::ClassUnicodeRange::new( - c, c, - )]); - cls.try_case_fold_simple().map_err(|_| { - self.error(span, ErrorKind::UnicodeCaseUnavailable) - })?; - Ok(Some(Hir::class(hir::Class::Unicode(cls)))) - } else { - if !c.is_ascii() { - return Ok(None); - } - // If case folding won't do anything, then don't bother trying. - match c { - 'A'..='Z' | 'a'..='z' => {} - _ => return Ok(None), - } - let mut cls = - hir::ClassBytes::new(vec![hir::ClassBytesRange::new( - // OK because 'c.len_utf8() == 1' which in turn implies - // that 'c' is ASCII. - u8::try_from(c).unwrap(), - u8::try_from(c).unwrap(), - )]); - cls.case_fold_simple(); - Ok(Some(Hir::class(hir::Class::Bytes(cls)))) - } - } - - fn hir_dot(&self, span: Span) -> Result { - let (utf8, lineterm, flags) = - (self.trans().utf8, self.trans().line_terminator, self.flags()); - if utf8 && (!flags.unicode() || !lineterm.is_ascii()) { - return Err(self.error(span, ErrorKind::InvalidUtf8)); - } - let dot = if flags.dot_matches_new_line() { - if flags.unicode() { - hir::Dot::AnyChar - } else { - hir::Dot::AnyByte - } - } else { - if flags.unicode() { - if flags.crlf() { - hir::Dot::AnyCharExceptCRLF - } else { - if !lineterm.is_ascii() { - return Err( - self.error(span, ErrorKind::InvalidLineTerminator) - ); - } - hir::Dot::AnyCharExcept(char::from(lineterm)) - } - } else { - if flags.crlf() { - hir::Dot::AnyByteExceptCRLF - } else { - hir::Dot::AnyByteExcept(lineterm) - } - } - }; - Ok(Hir::dot(dot)) - } - - fn hir_assertion(&self, asst: &ast::Assertion) -> Result { - let unicode = self.flags().unicode(); - let multi_line = self.flags().multi_line(); - let crlf = self.flags().crlf(); - Ok(match asst.kind { - ast::AssertionKind::StartLine => Hir::look(if multi_line { - if crlf { - hir::Look::StartCRLF - } else { - hir::Look::StartLF - } - } else { - hir::Look::Start - }), - ast::AssertionKind::EndLine => Hir::look(if multi_line { - if crlf { - hir::Look::EndCRLF - } else { - hir::Look::EndLF - } - } else { - hir::Look::End - }), - ast::AssertionKind::StartText => Hir::look(hir::Look::Start), - ast::AssertionKind::EndText => Hir::look(hir::Look::End), - ast::AssertionKind::WordBoundary => Hir::look(if unicode { - hir::Look::WordUnicode - } else { - hir::Look::WordAscii - }), - ast::AssertionKind::NotWordBoundary => Hir::look(if unicode { - hir::Look::WordUnicodeNegate - } else { - hir::Look::WordAsciiNegate - }), - ast::AssertionKind::WordBoundaryStart - | ast::AssertionKind::WordBoundaryStartAngle => { - Hir::look(if unicode { - hir::Look::WordStartUnicode - } else { - hir::Look::WordStartAscii - }) - } - ast::AssertionKind::WordBoundaryEnd - | ast::AssertionKind::WordBoundaryEndAngle => { - Hir::look(if unicode { - hir::Look::WordEndUnicode - } else { - hir::Look::WordEndAscii - }) - } - ast::AssertionKind::WordBoundaryStartHalf => { - Hir::look(if unicode { - hir::Look::WordStartHalfUnicode - } else { - hir::Look::WordStartHalfAscii - }) - } - ast::AssertionKind::WordBoundaryEndHalf => Hir::look(if unicode { - hir::Look::WordEndHalfUnicode - } else { - hir::Look::WordEndHalfAscii - }), - }) - } - - fn hir_capture(&self, group: &ast::Group, expr: Hir) -> Hir { - let (index, name) = match group.kind { - ast::GroupKind::CaptureIndex(index) => (index, None), - ast::GroupKind::CaptureName { ref name, .. } => { - (name.index, Some(name.name.clone().into_boxed_str())) - } - // The HIR doesn't need to use non-capturing groups, since the way - // in which the data type is defined handles this automatically. - ast::GroupKind::NonCapturing(_) => return expr, - }; - Hir::capture(hir::Capture { index, name, sub: Box::new(expr) }) - } - - fn hir_repetition(&self, rep: &ast::Repetition, expr: Hir) -> Hir { - let (min, max) = match rep.op.kind { - ast::RepetitionKind::ZeroOrOne => (0, Some(1)), - ast::RepetitionKind::ZeroOrMore => (0, None), - ast::RepetitionKind::OneOrMore => (1, None), - ast::RepetitionKind::Range(ast::RepetitionRange::Exactly(m)) => { - (m, Some(m)) - } - ast::RepetitionKind::Range(ast::RepetitionRange::AtLeast(m)) => { - (m, None) - } - ast::RepetitionKind::Range(ast::RepetitionRange::Bounded( - m, - n, - )) => (m, Some(n)), - }; - let greedy = - if self.flags().swap_greed() { !rep.greedy } else { rep.greedy }; - Hir::repetition(hir::Repetition { - min, - max, - greedy, - sub: Box::new(expr), - }) - } - - fn hir_unicode_class( - &self, - ast_class: &ast::ClassUnicode, - ) -> Result { - use crate::ast::ClassUnicodeKind::*; - - if !self.flags().unicode() { - return Err( - self.error(ast_class.span, ErrorKind::UnicodeNotAllowed) - ); - } - let query = match ast_class.kind { - OneLetter(name) => ClassQuery::OneLetter(name), - Named(ref name) => ClassQuery::Binary(name), - NamedValue { ref name, ref value, .. } => ClassQuery::ByValue { - property_name: name, - property_value: value, - }, - }; - let mut result = self.convert_unicode_class_error( - &ast_class.span, - unicode::class(query), - ); - if let Ok(ref mut class) = result { - self.unicode_fold_and_negate( - &ast_class.span, - ast_class.negated, - class, - )?; - } - result - } - - fn hir_ascii_unicode_class( - &self, - ast: &ast::ClassAscii, - ) -> Result { - let mut cls = hir::ClassUnicode::new( - ascii_class_as_chars(&ast.kind) - .map(|(s, e)| hir::ClassUnicodeRange::new(s, e)), - ); - self.unicode_fold_and_negate(&ast.span, ast.negated, &mut cls)?; - Ok(cls) - } - - fn hir_ascii_byte_class( - &self, - ast: &ast::ClassAscii, - ) -> Result { - let mut cls = hir::ClassBytes::new( - ascii_class(&ast.kind) - .map(|(s, e)| hir::ClassBytesRange::new(s, e)), - ); - self.bytes_fold_and_negate(&ast.span, ast.negated, &mut cls)?; - Ok(cls) - } - - fn hir_perl_unicode_class( - &self, - ast_class: &ast::ClassPerl, - ) -> Result { - use crate::ast::ClassPerlKind::*; - - assert!(self.flags().unicode()); - let result = match ast_class.kind { - Digit => unicode::perl_digit(), - Space => unicode::perl_space(), - Word => unicode::perl_word(), - }; - let mut class = - self.convert_unicode_class_error(&ast_class.span, result)?; - // We needn't apply case folding here because the Perl Unicode classes - // are already closed under Unicode simple case folding. - if ast_class.negated { - class.negate(); - } - Ok(class) - } - - fn hir_perl_byte_class( - &self, - ast_class: &ast::ClassPerl, - ) -> Result { - use crate::ast::ClassPerlKind::*; - - assert!(!self.flags().unicode()); - let mut class = match ast_class.kind { - Digit => hir_ascii_class_bytes(&ast::ClassAsciiKind::Digit), - Space => hir_ascii_class_bytes(&ast::ClassAsciiKind::Space), - Word => hir_ascii_class_bytes(&ast::ClassAsciiKind::Word), - }; - // We needn't apply case folding here because the Perl ASCII classes - // are already closed (under ASCII case folding). - if ast_class.negated { - class.negate(); - } - // Negating a Perl byte class is likely to cause it to match invalid - // UTF-8. That's only OK if the translator is configured to allow such - // things. - if self.trans().utf8 && !class.is_ascii() { - return Err(self.error(ast_class.span, ErrorKind::InvalidUtf8)); - } - Ok(class) - } - - /// Converts the given Unicode specific error to an HIR translation error. - /// - /// The span given should approximate the position at which an error would - /// occur. - fn convert_unicode_class_error( - &self, - span: &Span, - result: core::result::Result, - ) -> Result { - result.map_err(|err| { - let sp = span.clone(); - match err { - unicode::Error::PropertyNotFound => { - self.error(sp, ErrorKind::UnicodePropertyNotFound) - } - unicode::Error::PropertyValueNotFound => { - self.error(sp, ErrorKind::UnicodePropertyValueNotFound) - } - unicode::Error::PerlClassNotFound => { - self.error(sp, ErrorKind::UnicodePerlClassNotFound) - } - } - }) - } - - fn unicode_fold_and_negate( - &self, - span: &Span, - negated: bool, - class: &mut hir::ClassUnicode, - ) -> Result<()> { - // Note that we must apply case folding before negation! - // Consider `(?i)[^x]`. If we applied negation first, then - // the result would be the character class that matched any - // Unicode scalar value. - if self.flags().case_insensitive() { - class.try_case_fold_simple().map_err(|_| { - self.error(span.clone(), ErrorKind::UnicodeCaseUnavailable) - })?; - } - if negated { - class.negate(); - } - Ok(()) - } - - fn bytes_fold_and_negate( - &self, - span: &Span, - negated: bool, - class: &mut hir::ClassBytes, - ) -> Result<()> { - // Note that we must apply case folding before negation! - // Consider `(?i)[^x]`. If we applied negation first, then - // the result would be the character class that matched any - // Unicode scalar value. - if self.flags().case_insensitive() { - class.case_fold_simple(); - } - if negated { - class.negate(); - } - if self.trans().utf8 && !class.is_ascii() { - return Err(self.error(span.clone(), ErrorKind::InvalidUtf8)); - } - Ok(()) - } - - /// Return a scalar byte value suitable for use as a literal in a byte - /// character class. - fn class_literal_byte(&self, ast: &ast::Literal) -> Result { - match self.ast_literal_to_scalar(ast)? { - Either::Right(byte) => Ok(byte), - Either::Left(ch) => { - if ch.is_ascii() { - Ok(u8::try_from(ch).unwrap()) - } else { - // We can't feasibly support Unicode in - // byte oriented classes. Byte classes don't - // do Unicode case folding. - Err(self.error(ast.span, ErrorKind::UnicodeNotAllowed)) - } - } - } - } -} - -/// A translator's representation of a regular expression's flags at any given -/// moment in time. -/// -/// Each flag can be in one of three states: absent, present but disabled or -/// present but enabled. -#[derive(Clone, Copy, Debug, Default)] -struct Flags { - case_insensitive: Option, - multi_line: Option, - dot_matches_new_line: Option, - swap_greed: Option, - unicode: Option, - crlf: Option, - // Note that `ignore_whitespace` is omitted here because it is handled - // entirely in the parser. -} - -impl Flags { - fn from_ast(ast: &ast::Flags) -> Flags { - let mut flags = Flags::default(); - let mut enable = true; - for item in &ast.items { - match item.kind { - ast::FlagsItemKind::Negation => { - enable = false; - } - ast::FlagsItemKind::Flag(ast::Flag::CaseInsensitive) => { - flags.case_insensitive = Some(enable); - } - ast::FlagsItemKind::Flag(ast::Flag::MultiLine) => { - flags.multi_line = Some(enable); - } - ast::FlagsItemKind::Flag(ast::Flag::DotMatchesNewLine) => { - flags.dot_matches_new_line = Some(enable); - } - ast::FlagsItemKind::Flag(ast::Flag::SwapGreed) => { - flags.swap_greed = Some(enable); - } - ast::FlagsItemKind::Flag(ast::Flag::Unicode) => { - flags.unicode = Some(enable); - } - ast::FlagsItemKind::Flag(ast::Flag::CRLF) => { - flags.crlf = Some(enable); - } - ast::FlagsItemKind::Flag(ast::Flag::IgnoreWhitespace) => {} - } - } - flags - } - - fn merge(&mut self, previous: &Flags) { - if self.case_insensitive.is_none() { - self.case_insensitive = previous.case_insensitive; - } - if self.multi_line.is_none() { - self.multi_line = previous.multi_line; - } - if self.dot_matches_new_line.is_none() { - self.dot_matches_new_line = previous.dot_matches_new_line; - } - if self.swap_greed.is_none() { - self.swap_greed = previous.swap_greed; - } - if self.unicode.is_none() { - self.unicode = previous.unicode; - } - if self.crlf.is_none() { - self.crlf = previous.crlf; - } - } - - fn case_insensitive(&self) -> bool { - self.case_insensitive.unwrap_or(false) - } - - fn multi_line(&self) -> bool { - self.multi_line.unwrap_or(false) - } - - fn dot_matches_new_line(&self) -> bool { - self.dot_matches_new_line.unwrap_or(false) - } - - fn swap_greed(&self) -> bool { - self.swap_greed.unwrap_or(false) - } - - fn unicode(&self) -> bool { - self.unicode.unwrap_or(true) - } - - fn crlf(&self) -> bool { - self.crlf.unwrap_or(false) - } -} - -fn hir_ascii_class_bytes(kind: &ast::ClassAsciiKind) -> hir::ClassBytes { - let ranges: Vec<_> = ascii_class(kind) - .map(|(s, e)| hir::ClassBytesRange::new(s, e)) - .collect(); - hir::ClassBytes::new(ranges) -} - -fn ascii_class(kind: &ast::ClassAsciiKind) -> impl Iterator { - use crate::ast::ClassAsciiKind::*; - - let slice: &'static [(u8, u8)] = match *kind { - Alnum => &[(b'0', b'9'), (b'A', b'Z'), (b'a', b'z')], - Alpha => &[(b'A', b'Z'), (b'a', b'z')], - Ascii => &[(b'\x00', b'\x7F')], - Blank => &[(b'\t', b'\t'), (b' ', b' ')], - Cntrl => &[(b'\x00', b'\x1F'), (b'\x7F', b'\x7F')], - Digit => &[(b'0', b'9')], - Graph => &[(b'!', b'~')], - Lower => &[(b'a', b'z')], - Print => &[(b' ', b'~')], - Punct => &[(b'!', b'/'), (b':', b'@'), (b'[', b'`'), (b'{', b'~')], - Space => &[ - (b'\t', b'\t'), - (b'\n', b'\n'), - (b'\x0B', b'\x0B'), - (b'\x0C', b'\x0C'), - (b'\r', b'\r'), - (b' ', b' '), - ], - Upper => &[(b'A', b'Z')], - Word => &[(b'0', b'9'), (b'A', b'Z'), (b'_', b'_'), (b'a', b'z')], - Xdigit => &[(b'0', b'9'), (b'A', b'F'), (b'a', b'f')], - }; - slice.iter().copied() -} - -fn ascii_class_as_chars( - kind: &ast::ClassAsciiKind, -) -> impl Iterator { - ascii_class(kind).map(|(s, e)| (char::from(s), char::from(e))) -} - -#[cfg(test)] -mod tests { - use crate::{ - ast::{parse::ParserBuilder, Position}, - hir::{Look, Properties}, - }; - - use super::*; - - // We create these errors to compare with real hir::Errors in the tests. - // We define equality between TestError and hir::Error to disregard the - // pattern string in hir::Error, which is annoying to provide in tests. - #[derive(Clone, Debug)] - struct TestError { - span: Span, - kind: hir::ErrorKind, - } - - impl PartialEq for TestError { - fn eq(&self, other: &hir::Error) -> bool { - self.span == other.span && self.kind == other.kind - } - } - - impl PartialEq for hir::Error { - fn eq(&self, other: &TestError) -> bool { - self.span == other.span && self.kind == other.kind - } - } - - fn parse(pattern: &str) -> Ast { - ParserBuilder::new().octal(true).build().parse(pattern).unwrap() - } - - fn t(pattern: &str) -> Hir { - TranslatorBuilder::new() - .utf8(true) - .build() - .translate(pattern, &parse(pattern)) - .unwrap() - } - - fn t_err(pattern: &str) -> hir::Error { - TranslatorBuilder::new() - .utf8(true) - .build() - .translate(pattern, &parse(pattern)) - .unwrap_err() - } - - fn t_bytes(pattern: &str) -> Hir { - TranslatorBuilder::new() - .utf8(false) - .build() - .translate(pattern, &parse(pattern)) - .unwrap() - } - - fn props(pattern: &str) -> Properties { - t(pattern).properties().clone() - } - - fn props_bytes(pattern: &str) -> Properties { - t_bytes(pattern).properties().clone() - } - - fn hir_lit(s: &str) -> Hir { - hir_blit(s.as_bytes()) - } - - fn hir_blit(s: &[u8]) -> Hir { - Hir::literal(s) - } - - fn hir_capture(index: u32, expr: Hir) -> Hir { - Hir::capture(hir::Capture { index, name: None, sub: Box::new(expr) }) - } - - fn hir_capture_name(index: u32, name: &str, expr: Hir) -> Hir { - Hir::capture(hir::Capture { - index, - name: Some(name.into()), - sub: Box::new(expr), - }) - } - - fn hir_quest(greedy: bool, expr: Hir) -> Hir { - Hir::repetition(hir::Repetition { - min: 0, - max: Some(1), - greedy, - sub: Box::new(expr), - }) - } - - fn hir_star(greedy: bool, expr: Hir) -> Hir { - Hir::repetition(hir::Repetition { - min: 0, - max: None, - greedy, - sub: Box::new(expr), - }) - } - - fn hir_plus(greedy: bool, expr: Hir) -> Hir { - Hir::repetition(hir::Repetition { - min: 1, - max: None, - greedy, - sub: Box::new(expr), - }) - } - - fn hir_range(greedy: bool, min: u32, max: Option, expr: Hir) -> Hir { - Hir::repetition(hir::Repetition { - min, - max, - greedy, - sub: Box::new(expr), - }) - } - - fn hir_alt(alts: Vec) -> Hir { - Hir::alternation(alts) - } - - fn hir_cat(exprs: Vec) -> Hir { - Hir::concat(exprs) - } - - #[allow(dead_code)] - fn hir_uclass_query(query: ClassQuery<'_>) -> Hir { - Hir::class(hir::Class::Unicode(unicode::class(query).unwrap())) - } - - #[allow(dead_code)] - fn hir_uclass_perl_word() -> Hir { - Hir::class(hir::Class::Unicode(unicode::perl_word().unwrap())) - } - - fn hir_ascii_uclass(kind: &ast::ClassAsciiKind) -> Hir { - Hir::class(hir::Class::Unicode(hir::ClassUnicode::new( - ascii_class_as_chars(kind) - .map(|(s, e)| hir::ClassUnicodeRange::new(s, e)), - ))) - } - - fn hir_ascii_bclass(kind: &ast::ClassAsciiKind) -> Hir { - Hir::class(hir::Class::Bytes(hir::ClassBytes::new( - ascii_class(kind).map(|(s, e)| hir::ClassBytesRange::new(s, e)), - ))) - } - - fn hir_uclass(ranges: &[(char, char)]) -> Hir { - Hir::class(uclass(ranges)) - } - - fn hir_bclass(ranges: &[(u8, u8)]) -> Hir { - Hir::class(bclass(ranges)) - } - - fn hir_case_fold(expr: Hir) -> Hir { - match expr.into_kind() { - HirKind::Class(mut cls) => { - cls.case_fold_simple(); - Hir::class(cls) - } - _ => panic!("cannot case fold non-class Hir expr"), - } - } - - fn hir_negate(expr: Hir) -> Hir { - match expr.into_kind() { - HirKind::Class(mut cls) => { - cls.negate(); - Hir::class(cls) - } - _ => panic!("cannot negate non-class Hir expr"), - } - } - - fn uclass(ranges: &[(char, char)]) -> hir::Class { - let ranges: Vec = ranges - .iter() - .map(|&(s, e)| hir::ClassUnicodeRange::new(s, e)) - .collect(); - hir::Class::Unicode(hir::ClassUnicode::new(ranges)) - } - - fn bclass(ranges: &[(u8, u8)]) -> hir::Class { - let ranges: Vec = ranges - .iter() - .map(|&(s, e)| hir::ClassBytesRange::new(s, e)) - .collect(); - hir::Class::Bytes(hir::ClassBytes::new(ranges)) - } - - #[cfg(feature = "unicode-case")] - fn class_case_fold(mut cls: hir::Class) -> Hir { - cls.case_fold_simple(); - Hir::class(cls) - } - - fn class_negate(mut cls: hir::Class) -> Hir { - cls.negate(); - Hir::class(cls) - } - - #[allow(dead_code)] - fn hir_union(expr1: Hir, expr2: Hir) -> Hir { - use crate::hir::Class::{Bytes, Unicode}; - - match (expr1.into_kind(), expr2.into_kind()) { - (HirKind::Class(Unicode(mut c1)), HirKind::Class(Unicode(c2))) => { - c1.union(&c2); - Hir::class(hir::Class::Unicode(c1)) - } - (HirKind::Class(Bytes(mut c1)), HirKind::Class(Bytes(c2))) => { - c1.union(&c2); - Hir::class(hir::Class::Bytes(c1)) - } - _ => panic!("cannot union non-class Hir exprs"), - } - } - - #[allow(dead_code)] - fn hir_difference(expr1: Hir, expr2: Hir) -> Hir { - use crate::hir::Class::{Bytes, Unicode}; - - match (expr1.into_kind(), expr2.into_kind()) { - (HirKind::Class(Unicode(mut c1)), HirKind::Class(Unicode(c2))) => { - c1.difference(&c2); - Hir::class(hir::Class::Unicode(c1)) - } - (HirKind::Class(Bytes(mut c1)), HirKind::Class(Bytes(c2))) => { - c1.difference(&c2); - Hir::class(hir::Class::Bytes(c1)) - } - _ => panic!("cannot difference non-class Hir exprs"), - } - } - - fn hir_look(look: hir::Look) -> Hir { - Hir::look(look) - } - - #[test] - fn empty() { - assert_eq!(t(""), Hir::empty()); - assert_eq!(t("(?i)"), Hir::empty()); - assert_eq!(t("()"), hir_capture(1, Hir::empty())); - assert_eq!(t("(?:)"), Hir::empty()); - assert_eq!(t("(?P)"), hir_capture_name(1, "wat", Hir::empty())); - assert_eq!(t("|"), hir_alt(vec![Hir::empty(), Hir::empty()])); - assert_eq!( - t("()|()"), - hir_alt(vec![ - hir_capture(1, Hir::empty()), - hir_capture(2, Hir::empty()), - ]) - ); - assert_eq!( - t("(|b)"), - hir_capture(1, hir_alt(vec![Hir::empty(), hir_lit("b"),])) - ); - assert_eq!( - t("(a|)"), - hir_capture(1, hir_alt(vec![hir_lit("a"), Hir::empty(),])) - ); - assert_eq!( - t("(a||c)"), - hir_capture( - 1, - hir_alt(vec![hir_lit("a"), Hir::empty(), hir_lit("c"),]) - ) - ); - assert_eq!( - t("(||)"), - hir_capture( - 1, - hir_alt(vec![Hir::empty(), Hir::empty(), Hir::empty(),]) - ) - ); - } - - #[test] - fn literal() { - assert_eq!(t("a"), hir_lit("a")); - assert_eq!(t("(?-u)a"), hir_lit("a")); - assert_eq!(t("☃"), hir_lit("☃")); - assert_eq!(t("abcd"), hir_lit("abcd")); - - assert_eq!(t_bytes("(?-u)a"), hir_lit("a")); - assert_eq!(t_bytes("(?-u)\x61"), hir_lit("a")); - assert_eq!(t_bytes(r"(?-u)\x61"), hir_lit("a")); - assert_eq!(t_bytes(r"(?-u)\xFF"), hir_blit(b"\xFF")); - - assert_eq!(t("(?-u)☃"), hir_lit("☃")); - assert_eq!( - t_err(r"(?-u)\xFF"), - TestError { - kind: hir::ErrorKind::InvalidUtf8, - span: Span::new( - Position::new(5, 1, 6), - Position::new(9, 1, 10) - ), - } - ); - } - - #[test] - fn literal_case_insensitive() { - #[cfg(feature = "unicode-case")] - assert_eq!(t("(?i)a"), hir_uclass(&[('A', 'A'), ('a', 'a'),])); - #[cfg(feature = "unicode-case")] - assert_eq!(t("(?i:a)"), hir_uclass(&[('A', 'A'), ('a', 'a')])); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("a(?i)a(?-i)a"), - hir_cat(vec![ - hir_lit("a"), - hir_uclass(&[('A', 'A'), ('a', 'a')]), - hir_lit("a"), - ]) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?i)ab@c"), - hir_cat(vec![ - hir_uclass(&[('A', 'A'), ('a', 'a')]), - hir_uclass(&[('B', 'B'), ('b', 'b')]), - hir_lit("@"), - hir_uclass(&[('C', 'C'), ('c', 'c')]), - ]) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?i)β"), - hir_uclass(&[('Β', 'Β'), ('β', 'β'), ('ϐ', 'ϐ'),]) - ); - - assert_eq!(t("(?i-u)a"), hir_bclass(&[(b'A', b'A'), (b'a', b'a'),])); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?-u)a(?i)a(?-i)a"), - hir_cat(vec![ - hir_lit("a"), - hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), - hir_lit("a"), - ]) - ); - assert_eq!( - t("(?i-u)ab@c"), - hir_cat(vec![ - hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), - hir_bclass(&[(b'B', b'B'), (b'b', b'b')]), - hir_lit("@"), - hir_bclass(&[(b'C', b'C'), (b'c', b'c')]), - ]) - ); - - assert_eq!( - t_bytes("(?i-u)a"), - hir_bclass(&[(b'A', b'A'), (b'a', b'a'),]) - ); - assert_eq!( - t_bytes("(?i-u)\x61"), - hir_bclass(&[(b'A', b'A'), (b'a', b'a'),]) - ); - assert_eq!( - t_bytes(r"(?i-u)\x61"), - hir_bclass(&[(b'A', b'A'), (b'a', b'a'),]) - ); - assert_eq!(t_bytes(r"(?i-u)\xFF"), hir_blit(b"\xFF")); - - assert_eq!(t("(?i-u)β"), hir_lit("β"),); - } - - #[test] - fn dot() { - assert_eq!( - t("."), - hir_uclass(&[('\0', '\t'), ('\x0B', '\u{10FFFF}')]) - ); - assert_eq!( - t("(?R)."), - hir_uclass(&[ - ('\0', '\t'), - ('\x0B', '\x0C'), - ('\x0E', '\u{10FFFF}'), - ]) - ); - assert_eq!(t("(?s)."), hir_uclass(&[('\0', '\u{10FFFF}')])); - assert_eq!(t("(?Rs)."), hir_uclass(&[('\0', '\u{10FFFF}')])); - assert_eq!( - t_bytes("(?-u)."), - hir_bclass(&[(b'\0', b'\t'), (b'\x0B', b'\xFF')]) - ); - assert_eq!( - t_bytes("(?R-u)."), - hir_bclass(&[ - (b'\0', b'\t'), - (b'\x0B', b'\x0C'), - (b'\x0E', b'\xFF'), - ]) - ); - assert_eq!(t_bytes("(?s-u)."), hir_bclass(&[(b'\0', b'\xFF'),])); - assert_eq!(t_bytes("(?Rs-u)."), hir_bclass(&[(b'\0', b'\xFF'),])); - - // If invalid UTF-8 isn't allowed, then non-Unicode `.` isn't allowed. - assert_eq!( - t_err("(?-u)."), - TestError { - kind: hir::ErrorKind::InvalidUtf8, - span: Span::new( - Position::new(5, 1, 6), - Position::new(6, 1, 7) - ), - } - ); - assert_eq!( - t_err("(?R-u)."), - TestError { - kind: hir::ErrorKind::InvalidUtf8, - span: Span::new( - Position::new(6, 1, 7), - Position::new(7, 1, 8) - ), - } - ); - assert_eq!( - t_err("(?s-u)."), - TestError { - kind: hir::ErrorKind::InvalidUtf8, - span: Span::new( - Position::new(6, 1, 7), - Position::new(7, 1, 8) - ), - } - ); - assert_eq!( - t_err("(?Rs-u)."), - TestError { - kind: hir::ErrorKind::InvalidUtf8, - span: Span::new( - Position::new(7, 1, 8), - Position::new(8, 1, 9) - ), - } - ); - } - - #[test] - fn assertions() { - assert_eq!(t("^"), hir_look(hir::Look::Start)); - assert_eq!(t("$"), hir_look(hir::Look::End)); - assert_eq!(t(r"\A"), hir_look(hir::Look::Start)); - assert_eq!(t(r"\z"), hir_look(hir::Look::End)); - assert_eq!(t("(?m)^"), hir_look(hir::Look::StartLF)); - assert_eq!(t("(?m)$"), hir_look(hir::Look::EndLF)); - assert_eq!(t(r"(?m)\A"), hir_look(hir::Look::Start)); - assert_eq!(t(r"(?m)\z"), hir_look(hir::Look::End)); - - assert_eq!(t(r"\b"), hir_look(hir::Look::WordUnicode)); - assert_eq!(t(r"\B"), hir_look(hir::Look::WordUnicodeNegate)); - assert_eq!(t(r"(?-u)\b"), hir_look(hir::Look::WordAscii)); - assert_eq!(t(r"(?-u)\B"), hir_look(hir::Look::WordAsciiNegate)); - } - - #[test] - fn group() { - assert_eq!(t("(a)"), hir_capture(1, hir_lit("a"))); - assert_eq!( - t("(a)(b)"), - hir_cat(vec![ - hir_capture(1, hir_lit("a")), - hir_capture(2, hir_lit("b")), - ]) - ); - assert_eq!( - t("(a)|(b)"), - hir_alt(vec![ - hir_capture(1, hir_lit("a")), - hir_capture(2, hir_lit("b")), - ]) - ); - assert_eq!(t("(?P)"), hir_capture_name(1, "foo", Hir::empty())); - assert_eq!(t("(?Pa)"), hir_capture_name(1, "foo", hir_lit("a"))); - assert_eq!( - t("(?Pa)(?Pb)"), - hir_cat(vec![ - hir_capture_name(1, "foo", hir_lit("a")), - hir_capture_name(2, "bar", hir_lit("b")), - ]) - ); - assert_eq!(t("(?:)"), Hir::empty()); - assert_eq!(t("(?:a)"), hir_lit("a")); - assert_eq!( - t("(?:a)(b)"), - hir_cat(vec![hir_lit("a"), hir_capture(1, hir_lit("b")),]) - ); - assert_eq!( - t("(a)(?:b)(c)"), - hir_cat(vec![ - hir_capture(1, hir_lit("a")), - hir_lit("b"), - hir_capture(2, hir_lit("c")), - ]) - ); - assert_eq!( - t("(a)(?Pb)(c)"), - hir_cat(vec![ - hir_capture(1, hir_lit("a")), - hir_capture_name(2, "foo", hir_lit("b")), - hir_capture(3, hir_lit("c")), - ]) - ); - assert_eq!(t("()"), hir_capture(1, Hir::empty())); - assert_eq!(t("((?i))"), hir_capture(1, Hir::empty())); - assert_eq!(t("((?x))"), hir_capture(1, Hir::empty())); - assert_eq!( - t("(((?x)))"), - hir_capture(1, hir_capture(2, Hir::empty())) - ); - } - - #[test] - fn line_anchors() { - assert_eq!(t("^"), hir_look(hir::Look::Start)); - assert_eq!(t("$"), hir_look(hir::Look::End)); - assert_eq!(t(r"\A"), hir_look(hir::Look::Start)); - assert_eq!(t(r"\z"), hir_look(hir::Look::End)); - - assert_eq!(t(r"(?m)\A"), hir_look(hir::Look::Start)); - assert_eq!(t(r"(?m)\z"), hir_look(hir::Look::End)); - assert_eq!(t("(?m)^"), hir_look(hir::Look::StartLF)); - assert_eq!(t("(?m)$"), hir_look(hir::Look::EndLF)); - - assert_eq!(t(r"(?R)\A"), hir_look(hir::Look::Start)); - assert_eq!(t(r"(?R)\z"), hir_look(hir::Look::End)); - assert_eq!(t("(?R)^"), hir_look(hir::Look::Start)); - assert_eq!(t("(?R)$"), hir_look(hir::Look::End)); - - assert_eq!(t(r"(?Rm)\A"), hir_look(hir::Look::Start)); - assert_eq!(t(r"(?Rm)\z"), hir_look(hir::Look::End)); - assert_eq!(t("(?Rm)^"), hir_look(hir::Look::StartCRLF)); - assert_eq!(t("(?Rm)$"), hir_look(hir::Look::EndCRLF)); - } - - #[test] - fn flags() { - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?i:a)a"), - hir_cat( - vec![hir_uclass(&[('A', 'A'), ('a', 'a')]), hir_lit("a"),] - ) - ); - assert_eq!( - t("(?i-u:a)β"), - hir_cat(vec![ - hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), - hir_lit("β"), - ]) - ); - assert_eq!( - t("(?:(?i-u)a)b"), - hir_cat(vec![ - hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), - hir_lit("b"), - ]) - ); - assert_eq!( - t("((?i-u)a)b"), - hir_cat(vec![ - hir_capture(1, hir_bclass(&[(b'A', b'A'), (b'a', b'a')])), - hir_lit("b"), - ]) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?i)(?-i:a)a"), - hir_cat( - vec![hir_lit("a"), hir_uclass(&[('A', 'A'), ('a', 'a')]),] - ) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?im)a^"), - hir_cat(vec![ - hir_uclass(&[('A', 'A'), ('a', 'a')]), - hir_look(hir::Look::StartLF), - ]) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?im)a^(?i-m)a^"), - hir_cat(vec![ - hir_uclass(&[('A', 'A'), ('a', 'a')]), - hir_look(hir::Look::StartLF), - hir_uclass(&[('A', 'A'), ('a', 'a')]), - hir_look(hir::Look::Start), - ]) - ); - assert_eq!( - t("(?U)a*a*?(?-U)a*a*?"), - hir_cat(vec![ - hir_star(false, hir_lit("a")), - hir_star(true, hir_lit("a")), - hir_star(true, hir_lit("a")), - hir_star(false, hir_lit("a")), - ]) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?:a(?i)a)a"), - hir_cat(vec![ - hir_cat(vec![ - hir_lit("a"), - hir_uclass(&[('A', 'A'), ('a', 'a')]), - ]), - hir_lit("a"), - ]) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?i)(?:a(?-i)a)a"), - hir_cat(vec![ - hir_cat(vec![ - hir_uclass(&[('A', 'A'), ('a', 'a')]), - hir_lit("a"), - ]), - hir_uclass(&[('A', 'A'), ('a', 'a')]), - ]) - ); - } - - #[test] - fn escape() { - assert_eq!( - t(r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#"), - hir_lit(r"\.+*?()|[]{}^$#") - ); - } - - #[test] - fn repetition() { - assert_eq!(t("a?"), hir_quest(true, hir_lit("a"))); - assert_eq!(t("a*"), hir_star(true, hir_lit("a"))); - assert_eq!(t("a+"), hir_plus(true, hir_lit("a"))); - assert_eq!(t("a??"), hir_quest(false, hir_lit("a"))); - assert_eq!(t("a*?"), hir_star(false, hir_lit("a"))); - assert_eq!(t("a+?"), hir_plus(false, hir_lit("a"))); - - assert_eq!(t("a{1}"), hir_range(true, 1, Some(1), hir_lit("a"),)); - assert_eq!(t("a{1,}"), hir_range(true, 1, None, hir_lit("a"),)); - assert_eq!(t("a{1,2}"), hir_range(true, 1, Some(2), hir_lit("a"),)); - assert_eq!(t("a{1}?"), hir_range(false, 1, Some(1), hir_lit("a"),)); - assert_eq!(t("a{1,}?"), hir_range(false, 1, None, hir_lit("a"),)); - assert_eq!(t("a{1,2}?"), hir_range(false, 1, Some(2), hir_lit("a"),)); - - assert_eq!( - t("ab?"), - hir_cat(vec![hir_lit("a"), hir_quest(true, hir_lit("b")),]) - ); - assert_eq!(t("(ab)?"), hir_quest(true, hir_capture(1, hir_lit("ab")))); - assert_eq!( - t("a|b?"), - hir_alt(vec![hir_lit("a"), hir_quest(true, hir_lit("b")),]) - ); - } - - #[test] - fn cat_alt() { - let a = || hir_look(hir::Look::Start); - let b = || hir_look(hir::Look::End); - let c = || hir_look(hir::Look::WordUnicode); - let d = || hir_look(hir::Look::WordUnicodeNegate); - - assert_eq!(t("(^$)"), hir_capture(1, hir_cat(vec![a(), b()]))); - assert_eq!(t("^|$"), hir_alt(vec![a(), b()])); - assert_eq!(t(r"^|$|\b"), hir_alt(vec![a(), b(), c()])); - assert_eq!( - t(r"^$|$\b|\b\B"), - hir_alt(vec![ - hir_cat(vec![a(), b()]), - hir_cat(vec![b(), c()]), - hir_cat(vec![c(), d()]), - ]) - ); - assert_eq!(t("(^|$)"), hir_capture(1, hir_alt(vec![a(), b()]))); - assert_eq!( - t(r"(^|$|\b)"), - hir_capture(1, hir_alt(vec![a(), b(), c()])) - ); - assert_eq!( - t(r"(^$|$\b|\b\B)"), - hir_capture( - 1, - hir_alt(vec![ - hir_cat(vec![a(), b()]), - hir_cat(vec![b(), c()]), - hir_cat(vec![c(), d()]), - ]) - ) - ); - assert_eq!( - t(r"(^$|($\b|(\b\B)))"), - hir_capture( - 1, - hir_alt(vec![ - hir_cat(vec![a(), b()]), - hir_capture( - 2, - hir_alt(vec![ - hir_cat(vec![b(), c()]), - hir_capture(3, hir_cat(vec![c(), d()])), - ]) - ), - ]) - ) - ); - } - - // Tests the HIR transformation of things like '[a-z]|[A-Z]' into - // '[A-Za-z]'. In other words, an alternation of just classes is always - // equivalent to a single class corresponding to the union of the branches - // in that class. (Unless some branches match invalid UTF-8 and others - // match non-ASCII Unicode.) - #[test] - fn cat_class_flattened() { - assert_eq!(t(r"[a-z]|[A-Z]"), hir_uclass(&[('A', 'Z'), ('a', 'z')])); - // Combining all of the letter properties should give us the one giant - // letter property. - #[cfg(feature = "unicode-gencat")] - assert_eq!( - t(r"(?x) - \p{Lowercase_Letter} - |\p{Uppercase_Letter} - |\p{Titlecase_Letter} - |\p{Modifier_Letter} - |\p{Other_Letter} - "), - hir_uclass_query(ClassQuery::Binary("letter")) - ); - // Byte classes that can truly match invalid UTF-8 cannot be combined - // with Unicode classes. - assert_eq!( - t_bytes(r"[Δδ]|(?-u:[\x90-\xFF])|[Λλ]"), - hir_alt(vec![ - hir_uclass(&[('Δ', 'Δ'), ('δ', 'δ')]), - hir_bclass(&[(b'\x90', b'\xFF')]), - hir_uclass(&[('Λ', 'Λ'), ('λ', 'λ')]), - ]) - ); - // Byte classes on their own can be combined, even if some are ASCII - // and others are invalid UTF-8. - assert_eq!( - t_bytes(r"[a-z]|(?-u:[\x90-\xFF])|[A-Z]"), - hir_bclass(&[(b'A', b'Z'), (b'a', b'z'), (b'\x90', b'\xFF')]), - ); - } - - #[test] - fn class_ascii() { - assert_eq!( - t("[[:alnum:]]"), - hir_ascii_uclass(&ast::ClassAsciiKind::Alnum) - ); - assert_eq!( - t("[[:alpha:]]"), - hir_ascii_uclass(&ast::ClassAsciiKind::Alpha) - ); - assert_eq!( - t("[[:ascii:]]"), - hir_ascii_uclass(&ast::ClassAsciiKind::Ascii) - ); - assert_eq!( - t("[[:blank:]]"), - hir_ascii_uclass(&ast::ClassAsciiKind::Blank) - ); - assert_eq!( - t("[[:cntrl:]]"), - hir_ascii_uclass(&ast::ClassAsciiKind::Cntrl) - ); - assert_eq!( - t("[[:digit:]]"), - hir_ascii_uclass(&ast::ClassAsciiKind::Digit) - ); - assert_eq!( - t("[[:graph:]]"), - hir_ascii_uclass(&ast::ClassAsciiKind::Graph) - ); - assert_eq!( - t("[[:lower:]]"), - hir_ascii_uclass(&ast::ClassAsciiKind::Lower) - ); - assert_eq!( - t("[[:print:]]"), - hir_ascii_uclass(&ast::ClassAsciiKind::Print) - ); - assert_eq!( - t("[[:punct:]]"), - hir_ascii_uclass(&ast::ClassAsciiKind::Punct) - ); - assert_eq!( - t("[[:space:]]"), - hir_ascii_uclass(&ast::ClassAsciiKind::Space) - ); - assert_eq!( - t("[[:upper:]]"), - hir_ascii_uclass(&ast::ClassAsciiKind::Upper) - ); - assert_eq!( - t("[[:word:]]"), - hir_ascii_uclass(&ast::ClassAsciiKind::Word) - ); - assert_eq!( - t("[[:xdigit:]]"), - hir_ascii_uclass(&ast::ClassAsciiKind::Xdigit) - ); - - assert_eq!( - t("[[:^lower:]]"), - hir_negate(hir_ascii_uclass(&ast::ClassAsciiKind::Lower)) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?i)[[:lower:]]"), - hir_uclass(&[ - ('A', 'Z'), - ('a', 'z'), - ('\u{17F}', '\u{17F}'), - ('\u{212A}', '\u{212A}'), - ]) - ); - - assert_eq!( - t("(?-u)[[:lower:]]"), - hir_ascii_bclass(&ast::ClassAsciiKind::Lower) - ); - assert_eq!( - t("(?i-u)[[:lower:]]"), - hir_case_fold(hir_ascii_bclass(&ast::ClassAsciiKind::Lower)) - ); - - assert_eq!( - t_err("(?-u)[[:^lower:]]"), - TestError { - kind: hir::ErrorKind::InvalidUtf8, - span: Span::new( - Position::new(6, 1, 7), - Position::new(16, 1, 17) - ), - } - ); - assert_eq!( - t_err("(?i-u)[[:^lower:]]"), - TestError { - kind: hir::ErrorKind::InvalidUtf8, - span: Span::new( - Position::new(7, 1, 8), - Position::new(17, 1, 18) - ), - } - ); - } - - #[test] - fn class_ascii_multiple() { - // See: https://github.com/rust-lang/regex/issues/680 - assert_eq!( - t("[[:alnum:][:^ascii:]]"), - hir_union( - hir_ascii_uclass(&ast::ClassAsciiKind::Alnum), - hir_uclass(&[('\u{80}', '\u{10FFFF}')]), - ), - ); - assert_eq!( - t_bytes("(?-u)[[:alnum:][:^ascii:]]"), - hir_union( - hir_ascii_bclass(&ast::ClassAsciiKind::Alnum), - hir_bclass(&[(0x80, 0xFF)]), - ), - ); - } - - #[test] - #[cfg(feature = "unicode-perl")] - fn class_perl_unicode() { - // Unicode - assert_eq!(t(r"\d"), hir_uclass_query(ClassQuery::Binary("digit"))); - assert_eq!(t(r"\s"), hir_uclass_query(ClassQuery::Binary("space"))); - assert_eq!(t(r"\w"), hir_uclass_perl_word()); - #[cfg(feature = "unicode-case")] - assert_eq!( - t(r"(?i)\d"), - hir_uclass_query(ClassQuery::Binary("digit")) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t(r"(?i)\s"), - hir_uclass_query(ClassQuery::Binary("space")) - ); - #[cfg(feature = "unicode-case")] - assert_eq!(t(r"(?i)\w"), hir_uclass_perl_word()); - - // Unicode, negated - assert_eq!( - t(r"\D"), - hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) - ); - assert_eq!( - t(r"\S"), - hir_negate(hir_uclass_query(ClassQuery::Binary("space"))) - ); - assert_eq!(t(r"\W"), hir_negate(hir_uclass_perl_word())); - #[cfg(feature = "unicode-case")] - assert_eq!( - t(r"(?i)\D"), - hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t(r"(?i)\S"), - hir_negate(hir_uclass_query(ClassQuery::Binary("space"))) - ); - #[cfg(feature = "unicode-case")] - assert_eq!(t(r"(?i)\W"), hir_negate(hir_uclass_perl_word())); - } - - #[test] - fn class_perl_ascii() { - // ASCII only - assert_eq!( - t(r"(?-u)\d"), - hir_ascii_bclass(&ast::ClassAsciiKind::Digit) - ); - assert_eq!( - t(r"(?-u)\s"), - hir_ascii_bclass(&ast::ClassAsciiKind::Space) - ); - assert_eq!( - t(r"(?-u)\w"), - hir_ascii_bclass(&ast::ClassAsciiKind::Word) - ); - assert_eq!( - t(r"(?i-u)\d"), - hir_ascii_bclass(&ast::ClassAsciiKind::Digit) - ); - assert_eq!( - t(r"(?i-u)\s"), - hir_ascii_bclass(&ast::ClassAsciiKind::Space) - ); - assert_eq!( - t(r"(?i-u)\w"), - hir_ascii_bclass(&ast::ClassAsciiKind::Word) - ); - - // ASCII only, negated - assert_eq!( - t_bytes(r"(?-u)\D"), - hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) - ); - assert_eq!( - t_bytes(r"(?-u)\S"), - hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Space)) - ); - assert_eq!( - t_bytes(r"(?-u)\W"), - hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Word)) - ); - assert_eq!( - t_bytes(r"(?i-u)\D"), - hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) - ); - assert_eq!( - t_bytes(r"(?i-u)\S"), - hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Space)) - ); - assert_eq!( - t_bytes(r"(?i-u)\W"), - hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Word)) - ); - - // ASCII only, negated, with UTF-8 mode enabled. - // In this case, negating any Perl class results in an error because - // all such classes can match invalid UTF-8. - assert_eq!( - t_err(r"(?-u)\D"), - TestError { - kind: hir::ErrorKind::InvalidUtf8, - span: Span::new( - Position::new(5, 1, 6), - Position::new(7, 1, 8), - ), - }, - ); - assert_eq!( - t_err(r"(?-u)\S"), - TestError { - kind: hir::ErrorKind::InvalidUtf8, - span: Span::new( - Position::new(5, 1, 6), - Position::new(7, 1, 8), - ), - }, - ); - assert_eq!( - t_err(r"(?-u)\W"), - TestError { - kind: hir::ErrorKind::InvalidUtf8, - span: Span::new( - Position::new(5, 1, 6), - Position::new(7, 1, 8), - ), - }, - ); - assert_eq!( - t_err(r"(?i-u)\D"), - TestError { - kind: hir::ErrorKind::InvalidUtf8, - span: Span::new( - Position::new(6, 1, 7), - Position::new(8, 1, 9), - ), - }, - ); - assert_eq!( - t_err(r"(?i-u)\S"), - TestError { - kind: hir::ErrorKind::InvalidUtf8, - span: Span::new( - Position::new(6, 1, 7), - Position::new(8, 1, 9), - ), - }, - ); - assert_eq!( - t_err(r"(?i-u)\W"), - TestError { - kind: hir::ErrorKind::InvalidUtf8, - span: Span::new( - Position::new(6, 1, 7), - Position::new(8, 1, 9), - ), - }, - ); - } - - #[test] - #[cfg(not(feature = "unicode-perl"))] - fn class_perl_word_disabled() { - assert_eq!( - t_err(r"\w"), - TestError { - kind: hir::ErrorKind::UnicodePerlClassNotFound, - span: Span::new( - Position::new(0, 1, 1), - Position::new(2, 1, 3) - ), - } - ); - } - - #[test] - #[cfg(all(not(feature = "unicode-perl"), not(feature = "unicode-bool")))] - fn class_perl_space_disabled() { - assert_eq!( - t_err(r"\s"), - TestError { - kind: hir::ErrorKind::UnicodePerlClassNotFound, - span: Span::new( - Position::new(0, 1, 1), - Position::new(2, 1, 3) - ), - } - ); - } - - #[test] - #[cfg(all( - not(feature = "unicode-perl"), - not(feature = "unicode-gencat") - ))] - fn class_perl_digit_disabled() { - assert_eq!( - t_err(r"\d"), - TestError { - kind: hir::ErrorKind::UnicodePerlClassNotFound, - span: Span::new( - Position::new(0, 1, 1), - Position::new(2, 1, 3) - ), - } - ); - } - - #[test] - #[cfg(feature = "unicode-gencat")] - fn class_unicode_gencat() { - assert_eq!(t(r"\pZ"), hir_uclass_query(ClassQuery::Binary("Z"))); - assert_eq!(t(r"\pz"), hir_uclass_query(ClassQuery::Binary("Z"))); - assert_eq!( - t(r"\p{Separator}"), - hir_uclass_query(ClassQuery::Binary("Z")) - ); - assert_eq!( - t(r"\p{se PaRa ToR}"), - hir_uclass_query(ClassQuery::Binary("Z")) - ); - assert_eq!( - t(r"\p{gc:Separator}"), - hir_uclass_query(ClassQuery::Binary("Z")) - ); - assert_eq!( - t(r"\p{gc=Separator}"), - hir_uclass_query(ClassQuery::Binary("Z")) - ); - assert_eq!( - t(r"\p{Other}"), - hir_uclass_query(ClassQuery::Binary("Other")) - ); - assert_eq!(t(r"\pC"), hir_uclass_query(ClassQuery::Binary("Other"))); - - assert_eq!( - t(r"\PZ"), - hir_negate(hir_uclass_query(ClassQuery::Binary("Z"))) - ); - assert_eq!( - t(r"\P{separator}"), - hir_negate(hir_uclass_query(ClassQuery::Binary("Z"))) - ); - assert_eq!( - t(r"\P{gc!=separator}"), - hir_negate(hir_uclass_query(ClassQuery::Binary("Z"))) - ); - - assert_eq!(t(r"\p{any}"), hir_uclass_query(ClassQuery::Binary("Any"))); - assert_eq!( - t(r"\p{assigned}"), - hir_uclass_query(ClassQuery::Binary("Assigned")) - ); - assert_eq!( - t(r"\p{ascii}"), - hir_uclass_query(ClassQuery::Binary("ASCII")) - ); - assert_eq!( - t(r"\p{gc:any}"), - hir_uclass_query(ClassQuery::Binary("Any")) - ); - assert_eq!( - t(r"\p{gc:assigned}"), - hir_uclass_query(ClassQuery::Binary("Assigned")) - ); - assert_eq!( - t(r"\p{gc:ascii}"), - hir_uclass_query(ClassQuery::Binary("ASCII")) - ); - - assert_eq!( - t_err(r"(?-u)\pZ"), - TestError { - kind: hir::ErrorKind::UnicodeNotAllowed, - span: Span::new( - Position::new(5, 1, 6), - Position::new(8, 1, 9) - ), - } - ); - assert_eq!( - t_err(r"(?-u)\p{Separator}"), - TestError { - kind: hir::ErrorKind::UnicodeNotAllowed, - span: Span::new( - Position::new(5, 1, 6), - Position::new(18, 1, 19) - ), - } - ); - assert_eq!( - t_err(r"\pE"), - TestError { - kind: hir::ErrorKind::UnicodePropertyNotFound, - span: Span::new( - Position::new(0, 1, 1), - Position::new(3, 1, 4) - ), - } - ); - assert_eq!( - t_err(r"\p{Foo}"), - TestError { - kind: hir::ErrorKind::UnicodePropertyNotFound, - span: Span::new( - Position::new(0, 1, 1), - Position::new(7, 1, 8) - ), - } - ); - assert_eq!( - t_err(r"\p{gc:Foo}"), - TestError { - kind: hir::ErrorKind::UnicodePropertyValueNotFound, - span: Span::new( - Position::new(0, 1, 1), - Position::new(10, 1, 11) - ), - } - ); - } - - #[test] - #[cfg(not(feature = "unicode-gencat"))] - fn class_unicode_gencat_disabled() { - assert_eq!( - t_err(r"\p{Separator}"), - TestError { - kind: hir::ErrorKind::UnicodePropertyNotFound, - span: Span::new( - Position::new(0, 1, 1), - Position::new(13, 1, 14) - ), - } - ); - - assert_eq!( - t_err(r"\p{Any}"), - TestError { - kind: hir::ErrorKind::UnicodePropertyNotFound, - span: Span::new( - Position::new(0, 1, 1), - Position::new(7, 1, 8) - ), - } - ); - } - - #[test] - #[cfg(feature = "unicode-script")] - fn class_unicode_script() { - assert_eq!( - t(r"\p{Greek}"), - hir_uclass_query(ClassQuery::Binary("Greek")) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t(r"(?i)\p{Greek}"), - hir_case_fold(hir_uclass_query(ClassQuery::Binary("Greek"))) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t(r"(?i)\P{Greek}"), - hir_negate(hir_case_fold(hir_uclass_query(ClassQuery::Binary( - "Greek" - )))) - ); - - assert_eq!( - t_err(r"\p{sc:Foo}"), - TestError { - kind: hir::ErrorKind::UnicodePropertyValueNotFound, - span: Span::new( - Position::new(0, 1, 1), - Position::new(10, 1, 11) - ), - } - ); - assert_eq!( - t_err(r"\p{scx:Foo}"), - TestError { - kind: hir::ErrorKind::UnicodePropertyValueNotFound, - span: Span::new( - Position::new(0, 1, 1), - Position::new(11, 1, 12) - ), - } - ); - } - - #[test] - #[cfg(not(feature = "unicode-script"))] - fn class_unicode_script_disabled() { - assert_eq!( - t_err(r"\p{Greek}"), - TestError { - kind: hir::ErrorKind::UnicodePropertyNotFound, - span: Span::new( - Position::new(0, 1, 1), - Position::new(9, 1, 10) - ), - } - ); - - assert_eq!( - t_err(r"\p{scx:Greek}"), - TestError { - kind: hir::ErrorKind::UnicodePropertyNotFound, - span: Span::new( - Position::new(0, 1, 1), - Position::new(13, 1, 14) - ), - } - ); - } - - #[test] - #[cfg(feature = "unicode-age")] - fn class_unicode_age() { - assert_eq!( - t_err(r"\p{age:Foo}"), - TestError { - kind: hir::ErrorKind::UnicodePropertyValueNotFound, - span: Span::new( - Position::new(0, 1, 1), - Position::new(11, 1, 12) - ), - } - ); - } - - #[test] - #[cfg(feature = "unicode-gencat")] - fn class_unicode_any_empty() { - assert_eq!(t(r"\P{any}"), hir_uclass(&[]),); - } - - #[test] - #[cfg(not(feature = "unicode-age"))] - fn class_unicode_age_disabled() { - assert_eq!( - t_err(r"\p{age:3.0}"), - TestError { - kind: hir::ErrorKind::UnicodePropertyNotFound, - span: Span::new( - Position::new(0, 1, 1), - Position::new(11, 1, 12) - ), - } - ); - } - - #[test] - fn class_bracketed() { - assert_eq!(t("[a]"), hir_lit("a")); - assert_eq!(t("[ab]"), hir_uclass(&[('a', 'b')])); - assert_eq!(t("[^[a]]"), class_negate(uclass(&[('a', 'a')]))); - assert_eq!(t("[a-z]"), hir_uclass(&[('a', 'z')])); - assert_eq!(t("[a-fd-h]"), hir_uclass(&[('a', 'h')])); - assert_eq!(t("[a-fg-m]"), hir_uclass(&[('a', 'm')])); - assert_eq!(t(r"[\x00]"), hir_uclass(&[('\0', '\0')])); - assert_eq!(t(r"[\n]"), hir_uclass(&[('\n', '\n')])); - assert_eq!(t("[\n]"), hir_uclass(&[('\n', '\n')])); - #[cfg(any(feature = "unicode-perl", feature = "unicode-gencat"))] - assert_eq!(t(r"[\d]"), hir_uclass_query(ClassQuery::Binary("digit"))); - #[cfg(feature = "unicode-gencat")] - assert_eq!( - t(r"[\pZ]"), - hir_uclass_query(ClassQuery::Binary("separator")) - ); - #[cfg(feature = "unicode-gencat")] - assert_eq!( - t(r"[\p{separator}]"), - hir_uclass_query(ClassQuery::Binary("separator")) - ); - #[cfg(any(feature = "unicode-perl", feature = "unicode-gencat"))] - assert_eq!(t(r"[^\D]"), hir_uclass_query(ClassQuery::Binary("digit"))); - #[cfg(feature = "unicode-gencat")] - assert_eq!( - t(r"[^\PZ]"), - hir_uclass_query(ClassQuery::Binary("separator")) - ); - #[cfg(feature = "unicode-gencat")] - assert_eq!( - t(r"[^\P{separator}]"), - hir_uclass_query(ClassQuery::Binary("separator")) - ); - #[cfg(all( - feature = "unicode-case", - any(feature = "unicode-perl", feature = "unicode-gencat") - ))] - assert_eq!( - t(r"(?i)[^\D]"), - hir_uclass_query(ClassQuery::Binary("digit")) - ); - #[cfg(all(feature = "unicode-case", feature = "unicode-script"))] - assert_eq!( - t(r"(?i)[^\P{greek}]"), - hir_case_fold(hir_uclass_query(ClassQuery::Binary("greek"))) - ); - - assert_eq!(t("(?-u)[a]"), hir_bclass(&[(b'a', b'a')])); - assert_eq!(t(r"(?-u)[\x00]"), hir_bclass(&[(b'\0', b'\0')])); - assert_eq!(t_bytes(r"(?-u)[\xFF]"), hir_bclass(&[(b'\xFF', b'\xFF')])); - - #[cfg(feature = "unicode-case")] - assert_eq!(t("(?i)[a]"), hir_uclass(&[('A', 'A'), ('a', 'a')])); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?i)[k]"), - hir_uclass(&[('K', 'K'), ('k', 'k'), ('\u{212A}', '\u{212A}'),]) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?i)[β]"), - hir_uclass(&[('Β', 'Β'), ('β', 'β'), ('ϐ', 'ϐ'),]) - ); - assert_eq!(t("(?i-u)[k]"), hir_bclass(&[(b'K', b'K'), (b'k', b'k'),])); - - assert_eq!(t("[^a]"), class_negate(uclass(&[('a', 'a')]))); - assert_eq!(t(r"[^\x00]"), class_negate(uclass(&[('\0', '\0')]))); - assert_eq!( - t_bytes("(?-u)[^a]"), - class_negate(bclass(&[(b'a', b'a')])) - ); - #[cfg(any(feature = "unicode-perl", feature = "unicode-gencat"))] - assert_eq!( - t(r"[^\d]"), - hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) - ); - #[cfg(feature = "unicode-gencat")] - assert_eq!( - t(r"[^\pZ]"), - hir_negate(hir_uclass_query(ClassQuery::Binary("separator"))) - ); - #[cfg(feature = "unicode-gencat")] - assert_eq!( - t(r"[^\p{separator}]"), - hir_negate(hir_uclass_query(ClassQuery::Binary("separator"))) - ); - #[cfg(all(feature = "unicode-case", feature = "unicode-script"))] - assert_eq!( - t(r"(?i)[^\p{greek}]"), - hir_negate(hir_case_fold(hir_uclass_query(ClassQuery::Binary( - "greek" - )))) - ); - #[cfg(all(feature = "unicode-case", feature = "unicode-script"))] - assert_eq!( - t(r"(?i)[\P{greek}]"), - hir_negate(hir_case_fold(hir_uclass_query(ClassQuery::Binary( - "greek" - )))) - ); - - // Test some weird cases. - assert_eq!(t(r"[\[]"), hir_uclass(&[('[', '[')])); - - assert_eq!(t(r"[&]"), hir_uclass(&[('&', '&')])); - assert_eq!(t(r"[\&]"), hir_uclass(&[('&', '&')])); - assert_eq!(t(r"[\&\&]"), hir_uclass(&[('&', '&')])); - assert_eq!(t(r"[\x00-&]"), hir_uclass(&[('\0', '&')])); - assert_eq!(t(r"[&-\xFF]"), hir_uclass(&[('&', '\u{FF}')])); - - assert_eq!(t(r"[~]"), hir_uclass(&[('~', '~')])); - assert_eq!(t(r"[\~]"), hir_uclass(&[('~', '~')])); - assert_eq!(t(r"[\~\~]"), hir_uclass(&[('~', '~')])); - assert_eq!(t(r"[\x00-~]"), hir_uclass(&[('\0', '~')])); - assert_eq!(t(r"[~-\xFF]"), hir_uclass(&[('~', '\u{FF}')])); - - assert_eq!(t(r"[-]"), hir_uclass(&[('-', '-')])); - assert_eq!(t(r"[\-]"), hir_uclass(&[('-', '-')])); - assert_eq!(t(r"[\-\-]"), hir_uclass(&[('-', '-')])); - assert_eq!(t(r"[\x00-\-]"), hir_uclass(&[('\0', '-')])); - assert_eq!(t(r"[\--\xFF]"), hir_uclass(&[('-', '\u{FF}')])); - - assert_eq!( - t_err("(?-u)[^a]"), - TestError { - kind: hir::ErrorKind::InvalidUtf8, - span: Span::new( - Position::new(5, 1, 6), - Position::new(9, 1, 10) - ), - } - ); - #[cfg(any(feature = "unicode-perl", feature = "unicode-bool"))] - assert_eq!(t(r"[^\s\S]"), hir_uclass(&[]),); - #[cfg(any(feature = "unicode-perl", feature = "unicode-bool"))] - assert_eq!(t_bytes(r"(?-u)[^\s\S]"), hir_bclass(&[]),); - } - - #[test] - fn class_bracketed_union() { - assert_eq!(t("[a-zA-Z]"), hir_uclass(&[('A', 'Z'), ('a', 'z')])); - #[cfg(feature = "unicode-gencat")] - assert_eq!( - t(r"[a\pZb]"), - hir_union( - hir_uclass(&[('a', 'b')]), - hir_uclass_query(ClassQuery::Binary("separator")) - ) - ); - #[cfg(all(feature = "unicode-gencat", feature = "unicode-script"))] - assert_eq!( - t(r"[\pZ\p{Greek}]"), - hir_union( - hir_uclass_query(ClassQuery::Binary("greek")), - hir_uclass_query(ClassQuery::Binary("separator")) - ) - ); - #[cfg(all( - feature = "unicode-age", - feature = "unicode-gencat", - feature = "unicode-script" - ))] - assert_eq!( - t(r"[\p{age:3.0}\pZ\p{Greek}]"), - hir_union( - hir_uclass_query(ClassQuery::ByValue { - property_name: "age", - property_value: "3.0", - }), - hir_union( - hir_uclass_query(ClassQuery::Binary("greek")), - hir_uclass_query(ClassQuery::Binary("separator")) - ) - ) - ); - #[cfg(all( - feature = "unicode-age", - feature = "unicode-gencat", - feature = "unicode-script" - ))] - assert_eq!( - t(r"[[[\p{age:3.0}\pZ]\p{Greek}][\p{Cyrillic}]]"), - hir_union( - hir_uclass_query(ClassQuery::ByValue { - property_name: "age", - property_value: "3.0", - }), - hir_union( - hir_uclass_query(ClassQuery::Binary("cyrillic")), - hir_union( - hir_uclass_query(ClassQuery::Binary("greek")), - hir_uclass_query(ClassQuery::Binary("separator")) - ) - ) - ) - ); - - #[cfg(all( - feature = "unicode-age", - feature = "unicode-case", - feature = "unicode-gencat", - feature = "unicode-script" - ))] - assert_eq!( - t(r"(?i)[\p{age:3.0}\pZ\p{Greek}]"), - hir_case_fold(hir_union( - hir_uclass_query(ClassQuery::ByValue { - property_name: "age", - property_value: "3.0", - }), - hir_union( - hir_uclass_query(ClassQuery::Binary("greek")), - hir_uclass_query(ClassQuery::Binary("separator")) - ) - )) - ); - #[cfg(all( - feature = "unicode-age", - feature = "unicode-gencat", - feature = "unicode-script" - ))] - assert_eq!( - t(r"[^\p{age:3.0}\pZ\p{Greek}]"), - hir_negate(hir_union( - hir_uclass_query(ClassQuery::ByValue { - property_name: "age", - property_value: "3.0", - }), - hir_union( - hir_uclass_query(ClassQuery::Binary("greek")), - hir_uclass_query(ClassQuery::Binary("separator")) - ) - )) - ); - #[cfg(all( - feature = "unicode-age", - feature = "unicode-case", - feature = "unicode-gencat", - feature = "unicode-script" - ))] - assert_eq!( - t(r"(?i)[^\p{age:3.0}\pZ\p{Greek}]"), - hir_negate(hir_case_fold(hir_union( - hir_uclass_query(ClassQuery::ByValue { - property_name: "age", - property_value: "3.0", - }), - hir_union( - hir_uclass_query(ClassQuery::Binary("greek")), - hir_uclass_query(ClassQuery::Binary("separator")) - ) - ))) - ); - } - - #[test] - fn class_bracketed_nested() { - assert_eq!(t(r"[a[^c]]"), class_negate(uclass(&[('c', 'c')]))); - assert_eq!(t(r"[a-b[^c]]"), class_negate(uclass(&[('c', 'c')]))); - assert_eq!(t(r"[a-c[^c]]"), class_negate(uclass(&[]))); - - assert_eq!(t(r"[^a[^c]]"), hir_uclass(&[('c', 'c')])); - assert_eq!(t(r"[^a-b[^c]]"), hir_uclass(&[('c', 'c')])); - - #[cfg(feature = "unicode-case")] - assert_eq!( - t(r"(?i)[a[^c]]"), - hir_negate(class_case_fold(uclass(&[('c', 'c')]))) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t(r"(?i)[a-b[^c]]"), - hir_negate(class_case_fold(uclass(&[('c', 'c')]))) - ); - - #[cfg(feature = "unicode-case")] - assert_eq!(t(r"(?i)[^a[^c]]"), hir_uclass(&[('C', 'C'), ('c', 'c')])); - #[cfg(feature = "unicode-case")] - assert_eq!( - t(r"(?i)[^a-b[^c]]"), - hir_uclass(&[('C', 'C'), ('c', 'c')]) - ); - - assert_eq!(t(r"[^a-c[^c]]"), hir_uclass(&[]),); - #[cfg(feature = "unicode-case")] - assert_eq!(t(r"(?i)[^a-c[^c]]"), hir_uclass(&[]),); - } - - #[test] - fn class_bracketed_intersect() { - assert_eq!(t("[abc&&b-c]"), hir_uclass(&[('b', 'c')])); - assert_eq!(t("[abc&&[b-c]]"), hir_uclass(&[('b', 'c')])); - assert_eq!(t("[[abc]&&[b-c]]"), hir_uclass(&[('b', 'c')])); - assert_eq!(t("[a-z&&b-y&&c-x]"), hir_uclass(&[('c', 'x')])); - assert_eq!(t("[c-da-b&&a-d]"), hir_uclass(&[('a', 'd')])); - assert_eq!(t("[a-d&&c-da-b]"), hir_uclass(&[('a', 'd')])); - assert_eq!(t(r"[a-z&&a-c]"), hir_uclass(&[('a', 'c')])); - assert_eq!(t(r"[[a-z&&a-c]]"), hir_uclass(&[('a', 'c')])); - assert_eq!(t(r"[^[a-z&&a-c]]"), hir_negate(hir_uclass(&[('a', 'c')]))); - - assert_eq!(t("(?-u)[abc&&b-c]"), hir_bclass(&[(b'b', b'c')])); - assert_eq!(t("(?-u)[abc&&[b-c]]"), hir_bclass(&[(b'b', b'c')])); - assert_eq!(t("(?-u)[[abc]&&[b-c]]"), hir_bclass(&[(b'b', b'c')])); - assert_eq!(t("(?-u)[a-z&&b-y&&c-x]"), hir_bclass(&[(b'c', b'x')])); - assert_eq!(t("(?-u)[c-da-b&&a-d]"), hir_bclass(&[(b'a', b'd')])); - assert_eq!(t("(?-u)[a-d&&c-da-b]"), hir_bclass(&[(b'a', b'd')])); - - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?i)[abc&&b-c]"), - hir_case_fold(hir_uclass(&[('b', 'c')])) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?i)[abc&&[b-c]]"), - hir_case_fold(hir_uclass(&[('b', 'c')])) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?i)[[abc]&&[b-c]]"), - hir_case_fold(hir_uclass(&[('b', 'c')])) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?i)[a-z&&b-y&&c-x]"), - hir_case_fold(hir_uclass(&[('c', 'x')])) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?i)[c-da-b&&a-d]"), - hir_case_fold(hir_uclass(&[('a', 'd')])) - ); - #[cfg(feature = "unicode-case")] - assert_eq!( - t("(?i)[a-d&&c-da-b]"), - hir_case_fold(hir_uclass(&[('a', 'd')])) - ); - - assert_eq!( - t("(?i-u)[abc&&b-c]"), - hir_case_fold(hir_bclass(&[(b'b', b'c')])) - ); - assert_eq!( - t("(?i-u)[abc&&[b-c]]"), - hir_case_fold(hir_bclass(&[(b'b', b'c')])) - ); - assert_eq!( - t("(?i-u)[[abc]&&[b-c]]"), - hir_case_fold(hir_bclass(&[(b'b', b'c')])) - ); - assert_eq!( - t("(?i-u)[a-z&&b-y&&c-x]"), - hir_case_fold(hir_bclass(&[(b'c', b'x')])) - ); - assert_eq!( - t("(?i-u)[c-da-b&&a-d]"), - hir_case_fold(hir_bclass(&[(b'a', b'd')])) - ); - assert_eq!( - t("(?i-u)[a-d&&c-da-b]"), - hir_case_fold(hir_bclass(&[(b'a', b'd')])) - ); - - // In `[a^]`, `^` does not need to be escaped, so it makes sense that - // `^` is also allowed to be unescaped after `&&`. - assert_eq!(t(r"[\^&&^]"), hir_uclass(&[('^', '^')])); - // `]` needs to be escaped after `&&` since it's not at start of class. - assert_eq!(t(r"[]&&\]]"), hir_uclass(&[(']', ']')])); - assert_eq!(t(r"[-&&-]"), hir_uclass(&[('-', '-')])); - assert_eq!(t(r"[\&&&&]"), hir_uclass(&[('&', '&')])); - assert_eq!(t(r"[\&&&\&]"), hir_uclass(&[('&', '&')])); - // Test precedence. - assert_eq!( - t(r"[a-w&&[^c-g]z]"), - hir_uclass(&[('a', 'b'), ('h', 'w')]) - ); - } - - #[test] - fn class_bracketed_intersect_negate() { - #[cfg(feature = "unicode-perl")] - assert_eq!( - t(r"[^\w&&\d]"), - hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) - ); - assert_eq!(t(r"[^[a-z&&a-c]]"), hir_negate(hir_uclass(&[('a', 'c')]))); - #[cfg(feature = "unicode-perl")] - assert_eq!( - t(r"[^[\w&&\d]]"), - hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) - ); - #[cfg(feature = "unicode-perl")] - assert_eq!( - t(r"[^[^\w&&\d]]"), - hir_uclass_query(ClassQuery::Binary("digit")) - ); - #[cfg(feature = "unicode-perl")] - assert_eq!(t(r"[[[^\w]&&[^\d]]]"), hir_negate(hir_uclass_perl_word())); - - #[cfg(feature = "unicode-perl")] - assert_eq!( - t_bytes(r"(?-u)[^\w&&\d]"), - hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) - ); - assert_eq!( - t_bytes(r"(?-u)[^[a-z&&a-c]]"), - hir_negate(hir_bclass(&[(b'a', b'c')])) - ); - assert_eq!( - t_bytes(r"(?-u)[^[\w&&\d]]"), - hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) - ); - assert_eq!( - t_bytes(r"(?-u)[^[^\w&&\d]]"), - hir_ascii_bclass(&ast::ClassAsciiKind::Digit) - ); - assert_eq!( - t_bytes(r"(?-u)[[[^\w]&&[^\d]]]"), - hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Word)) - ); - } - - #[test] - fn class_bracketed_difference() { - #[cfg(feature = "unicode-gencat")] - assert_eq!( - t(r"[\pL--[:ascii:]]"), - hir_difference( - hir_uclass_query(ClassQuery::Binary("letter")), - hir_uclass(&[('\0', '\x7F')]) - ) - ); - - assert_eq!( - t(r"(?-u)[[:alpha:]--[:lower:]]"), - hir_bclass(&[(b'A', b'Z')]) - ); - } - - #[test] - fn class_bracketed_symmetric_difference() { - #[cfg(feature = "unicode-script")] - assert_eq!( - t(r"[\p{sc:Greek}~~\p{scx:Greek}]"), - // Class({ - // '·'..='·', - // '\u{300}'..='\u{301}', - // '\u{304}'..='\u{304}', - // '\u{306}'..='\u{306}', - // '\u{308}'..='\u{308}', - // '\u{313}'..='\u{313}', - // '\u{342}'..='\u{342}', - // '\u{345}'..='\u{345}', - // 'ʹ'..='ʹ', - // '\u{1dc0}'..='\u{1dc1}', - // '⁝'..='⁝', - // }) - hir_uclass(&[ - ('·', '·'), - ('\u{0300}', '\u{0301}'), - ('\u{0304}', '\u{0304}'), - ('\u{0306}', '\u{0306}'), - ('\u{0308}', '\u{0308}'), - ('\u{0313}', '\u{0313}'), - ('\u{0342}', '\u{0342}'), - ('\u{0345}', '\u{0345}'), - ('ʹ', 'ʹ'), - ('\u{1DC0}', '\u{1DC1}'), - ('⁝', '⁝'), - ]) - ); - assert_eq!(t(r"[a-g~~c-j]"), hir_uclass(&[('a', 'b'), ('h', 'j')])); - - assert_eq!( - t(r"(?-u)[a-g~~c-j]"), - hir_bclass(&[(b'a', b'b'), (b'h', b'j')]) - ); - } - - #[test] - fn ignore_whitespace() { - assert_eq!(t(r"(?x)\12 3"), hir_lit("\n3")); - assert_eq!(t(r"(?x)\x { 53 }"), hir_lit("S")); - assert_eq!( - t(r"(?x)\x # comment -{ # comment - 53 # comment -} #comment"), - hir_lit("S") - ); - - assert_eq!(t(r"(?x)\x 53"), hir_lit("S")); - assert_eq!( - t(r"(?x)\x # comment - 53 # comment"), - hir_lit("S") - ); - assert_eq!(t(r"(?x)\x5 3"), hir_lit("S")); - - #[cfg(feature = "unicode-gencat")] - assert_eq!( - t(r"(?x)\p # comment -{ # comment - Separator # comment -} # comment"), - hir_uclass_query(ClassQuery::Binary("separator")) - ); - - assert_eq!( - t(r"(?x)a # comment -{ # comment - 5 # comment - , # comment - 10 # comment -} # comment"), - hir_range(true, 5, Some(10), hir_lit("a")) - ); - - assert_eq!(t(r"(?x)a\ # hi there"), hir_lit("a ")); - } - - #[test] - fn analysis_is_utf8() { - // Positive examples. - assert!(props_bytes(r"a").is_utf8()); - assert!(props_bytes(r"ab").is_utf8()); - assert!(props_bytes(r"(?-u)a").is_utf8()); - assert!(props_bytes(r"(?-u)ab").is_utf8()); - assert!(props_bytes(r"\xFF").is_utf8()); - assert!(props_bytes(r"\xFF\xFF").is_utf8()); - assert!(props_bytes(r"[^a]").is_utf8()); - assert!(props_bytes(r"[^a][^a]").is_utf8()); - assert!(props_bytes(r"\b").is_utf8()); - assert!(props_bytes(r"\B").is_utf8()); - assert!(props_bytes(r"(?-u)\b").is_utf8()); - assert!(props_bytes(r"(?-u)\B").is_utf8()); - - // Negative examples. - assert!(!props_bytes(r"(?-u)\xFF").is_utf8()); - assert!(!props_bytes(r"(?-u)\xFF\xFF").is_utf8()); - assert!(!props_bytes(r"(?-u)[^a]").is_utf8()); - assert!(!props_bytes(r"(?-u)[^a][^a]").is_utf8()); - } - - #[test] - fn analysis_captures_len() { - assert_eq!(0, props(r"a").explicit_captures_len()); - assert_eq!(0, props(r"(?:a)").explicit_captures_len()); - assert_eq!(0, props(r"(?i-u:a)").explicit_captures_len()); - assert_eq!(0, props(r"(?i-u)a").explicit_captures_len()); - assert_eq!(1, props(r"(a)").explicit_captures_len()); - assert_eq!(1, props(r"(?Pa)").explicit_captures_len()); - assert_eq!(1, props(r"()").explicit_captures_len()); - assert_eq!(1, props(r"()a").explicit_captures_len()); - assert_eq!(1, props(r"(a)+").explicit_captures_len()); - assert_eq!(2, props(r"(a)(b)").explicit_captures_len()); - assert_eq!(2, props(r"(a)|(b)").explicit_captures_len()); - assert_eq!(2, props(r"((a))").explicit_captures_len()); - assert_eq!(1, props(r"([a&&b])").explicit_captures_len()); - } - - #[test] - fn analysis_static_captures_len() { - let len = |pattern| props(pattern).static_explicit_captures_len(); - assert_eq!(Some(0), len(r"")); - assert_eq!(Some(0), len(r"foo|bar")); - assert_eq!(None, len(r"(foo)|bar")); - assert_eq!(None, len(r"foo|(bar)")); - assert_eq!(Some(1), len(r"(foo|bar)")); - assert_eq!(Some(1), len(r"(a|b|c|d|e|f)")); - assert_eq!(Some(1), len(r"(a)|(b)|(c)|(d)|(e)|(f)")); - assert_eq!(Some(2), len(r"(a)(b)|(c)(d)|(e)(f)")); - assert_eq!(Some(6), len(r"(a)(b)(c)(d)(e)(f)")); - assert_eq!(Some(3), len(r"(a)(b)(extra)|(a)(b)()")); - assert_eq!(Some(3), len(r"(a)(b)((?:extra)?)")); - assert_eq!(None, len(r"(a)(b)(extra)?")); - assert_eq!(Some(1), len(r"(foo)|(bar)")); - assert_eq!(Some(2), len(r"(foo)(bar)")); - assert_eq!(Some(2), len(r"(foo)+(bar)")); - assert_eq!(None, len(r"(foo)*(bar)")); - assert_eq!(Some(0), len(r"(foo)?{0}")); - assert_eq!(None, len(r"(foo)?{1}")); - assert_eq!(Some(1), len(r"(foo){1}")); - assert_eq!(Some(1), len(r"(foo){1,}")); - assert_eq!(Some(1), len(r"(foo){1,}?")); - assert_eq!(None, len(r"(foo){1,}??")); - assert_eq!(None, len(r"(foo){0,}")); - assert_eq!(Some(1), len(r"(foo)(?:bar)")); - assert_eq!(Some(2), len(r"(foo(?:bar)+)(?:baz(boo))")); - assert_eq!(Some(2), len(r"(?Pfoo)(?:bar)(bal|loon)")); - assert_eq!( - Some(2), - len(r#"<(a)[^>]+href="([^"]+)"|<(img)[^>]+src="([^"]+)""#) - ); - } - - #[test] - fn analysis_is_all_assertions() { - // Positive examples. - let p = props(r"\b"); - assert!(!p.look_set().is_empty()); - assert_eq!(p.minimum_len(), Some(0)); - - let p = props(r"\B"); - assert!(!p.look_set().is_empty()); - assert_eq!(p.minimum_len(), Some(0)); - - let p = props(r"^"); - assert!(!p.look_set().is_empty()); - assert_eq!(p.minimum_len(), Some(0)); - - let p = props(r"$"); - assert!(!p.look_set().is_empty()); - assert_eq!(p.minimum_len(), Some(0)); - - let p = props(r"\A"); - assert!(!p.look_set().is_empty()); - assert_eq!(p.minimum_len(), Some(0)); - - let p = props(r"\z"); - assert!(!p.look_set().is_empty()); - assert_eq!(p.minimum_len(), Some(0)); - - let p = props(r"$^\z\A\b\B"); - assert!(!p.look_set().is_empty()); - assert_eq!(p.minimum_len(), Some(0)); - - let p = props(r"$|^|\z|\A|\b|\B"); - assert!(!p.look_set().is_empty()); - assert_eq!(p.minimum_len(), Some(0)); - - let p = props(r"^$|$^"); - assert!(!p.look_set().is_empty()); - assert_eq!(p.minimum_len(), Some(0)); - - let p = props(r"((\b)+())*^"); - assert!(!p.look_set().is_empty()); - assert_eq!(p.minimum_len(), Some(0)); - - // Negative examples. - let p = props(r"^a"); - assert!(!p.look_set().is_empty()); - assert_eq!(p.minimum_len(), Some(1)); - } - - #[test] - fn analysis_look_set_prefix_any() { - let p = props(r"(?-u)(?i:(?:\b|_)win(?:32|64|dows)?(?:\b|_))"); - assert!(p.look_set_prefix_any().contains(Look::WordAscii)); - } - - #[test] - fn analysis_is_anchored() { - let is_start = |p| props(p).look_set_prefix().contains(Look::Start); - let is_end = |p| props(p).look_set_suffix().contains(Look::End); - - // Positive examples. - assert!(is_start(r"^")); - assert!(is_end(r"$")); - - assert!(is_start(r"^^")); - assert!(props(r"$$").look_set_suffix().contains(Look::End)); - - assert!(is_start(r"^$")); - assert!(is_end(r"^$")); - - assert!(is_start(r"^foo")); - assert!(is_end(r"foo$")); - - assert!(is_start(r"^foo|^bar")); - assert!(is_end(r"foo$|bar$")); - - assert!(is_start(r"^(foo|bar)")); - assert!(is_end(r"(foo|bar)$")); - - assert!(is_start(r"^+")); - assert!(is_end(r"$+")); - assert!(is_start(r"^++")); - assert!(is_end(r"$++")); - assert!(is_start(r"(^)+")); - assert!(is_end(r"($)+")); - - assert!(is_start(r"$^")); - assert!(is_start(r"$^")); - assert!(is_start(r"$^|^$")); - assert!(is_end(r"$^|^$")); - - assert!(is_start(r"\b^")); - assert!(is_end(r"$\b")); - assert!(is_start(r"^(?m:^)")); - assert!(is_end(r"(?m:$)$")); - assert!(is_start(r"(?m:^)^")); - assert!(is_end(r"$(?m:$)")); - - // Negative examples. - assert!(!is_start(r"(?m)^")); - assert!(!is_end(r"(?m)$")); - assert!(!is_start(r"(?m:^$)|$^")); - assert!(!is_end(r"(?m:^$)|$^")); - assert!(!is_start(r"$^|(?m:^$)")); - assert!(!is_end(r"$^|(?m:^$)")); - - assert!(!is_start(r"a^")); - assert!(!is_start(r"$a")); - - assert!(!is_end(r"a^")); - assert!(!is_end(r"$a")); - - assert!(!is_start(r"^foo|bar")); - assert!(!is_end(r"foo|bar$")); - - assert!(!is_start(r"^*")); - assert!(!is_end(r"$*")); - assert!(!is_start(r"^*+")); - assert!(!is_end(r"$*+")); - assert!(!is_start(r"^+*")); - assert!(!is_end(r"$+*")); - assert!(!is_start(r"(^)*")); - assert!(!is_end(r"($)*")); - } - - #[test] - fn analysis_is_any_anchored() { - let is_start = |p| props(p).look_set().contains(Look::Start); - let is_end = |p| props(p).look_set().contains(Look::End); - - // Positive examples. - assert!(is_start(r"^")); - assert!(is_end(r"$")); - assert!(is_start(r"\A")); - assert!(is_end(r"\z")); - - // Negative examples. - assert!(!is_start(r"(?m)^")); - assert!(!is_end(r"(?m)$")); - assert!(!is_start(r"$")); - assert!(!is_end(r"^")); - } - - #[test] - fn analysis_can_empty() { - // Positive examples. - let assert_empty = - |p| assert_eq!(Some(0), props_bytes(p).minimum_len()); - assert_empty(r""); - assert_empty(r"()"); - assert_empty(r"()*"); - assert_empty(r"()+"); - assert_empty(r"()?"); - assert_empty(r"a*"); - assert_empty(r"a?"); - assert_empty(r"a{0}"); - assert_empty(r"a{0,}"); - assert_empty(r"a{0,1}"); - assert_empty(r"a{0,10}"); - #[cfg(feature = "unicode-gencat")] - assert_empty(r"\pL*"); - assert_empty(r"a*|b"); - assert_empty(r"b|a*"); - assert_empty(r"a|"); - assert_empty(r"|a"); - assert_empty(r"a||b"); - assert_empty(r"a*a?(abcd)*"); - assert_empty(r"^"); - assert_empty(r"$"); - assert_empty(r"(?m)^"); - assert_empty(r"(?m)$"); - assert_empty(r"\A"); - assert_empty(r"\z"); - assert_empty(r"\B"); - assert_empty(r"(?-u)\B"); - assert_empty(r"\b"); - assert_empty(r"(?-u)\b"); - - // Negative examples. - let assert_non_empty = - |p| assert_ne!(Some(0), props_bytes(p).minimum_len()); - assert_non_empty(r"a+"); - assert_non_empty(r"a{1}"); - assert_non_empty(r"a{1,}"); - assert_non_empty(r"a{1,2}"); - assert_non_empty(r"a{1,10}"); - assert_non_empty(r"b|a"); - assert_non_empty(r"a*a+(abcd)*"); - #[cfg(feature = "unicode-gencat")] - assert_non_empty(r"\P{any}"); - assert_non_empty(r"[a--a]"); - assert_non_empty(r"[a&&b]"); - } - - #[test] - fn analysis_is_literal() { - // Positive examples. - assert!(props(r"a").is_literal()); - assert!(props(r"ab").is_literal()); - assert!(props(r"abc").is_literal()); - assert!(props(r"(?m)abc").is_literal()); - assert!(props(r"(?:a)").is_literal()); - assert!(props(r"foo(?:a)").is_literal()); - assert!(props(r"(?:a)foo").is_literal()); - assert!(props(r"[a]").is_literal()); - - // Negative examples. - assert!(!props(r"").is_literal()); - assert!(!props(r"^").is_literal()); - assert!(!props(r"a|b").is_literal()); - assert!(!props(r"(a)").is_literal()); - assert!(!props(r"a+").is_literal()); - assert!(!props(r"foo(a)").is_literal()); - assert!(!props(r"(a)foo").is_literal()); - assert!(!props(r"[ab]").is_literal()); - } - - #[test] - fn analysis_is_alternation_literal() { - // Positive examples. - assert!(props(r"a").is_alternation_literal()); - assert!(props(r"ab").is_alternation_literal()); - assert!(props(r"abc").is_alternation_literal()); - assert!(props(r"(?m)abc").is_alternation_literal()); - assert!(props(r"foo|bar").is_alternation_literal()); - assert!(props(r"foo|bar|baz").is_alternation_literal()); - assert!(props(r"[a]").is_alternation_literal()); - assert!(props(r"(?:ab)|cd").is_alternation_literal()); - assert!(props(r"ab|(?:cd)").is_alternation_literal()); - - // Negative examples. - assert!(!props(r"").is_alternation_literal()); - assert!(!props(r"^").is_alternation_literal()); - assert!(!props(r"(a)").is_alternation_literal()); - assert!(!props(r"a+").is_alternation_literal()); - assert!(!props(r"foo(a)").is_alternation_literal()); - assert!(!props(r"(a)foo").is_alternation_literal()); - assert!(!props(r"[ab]").is_alternation_literal()); - assert!(!props(r"[ab]|b").is_alternation_literal()); - assert!(!props(r"a|[ab]").is_alternation_literal()); - assert!(!props(r"(a)|b").is_alternation_literal()); - assert!(!props(r"a|(b)").is_alternation_literal()); - assert!(!props(r"a|b").is_alternation_literal()); - assert!(!props(r"a|b|c").is_alternation_literal()); - assert!(!props(r"[a]|b").is_alternation_literal()); - assert!(!props(r"a|[b]").is_alternation_literal()); - assert!(!props(r"(?:a)|b").is_alternation_literal()); - assert!(!props(r"a|(?:b)").is_alternation_literal()); - assert!(!props(r"(?:z|xx)@|xx").is_alternation_literal()); - } - - // This tests that the smart Hir::repetition constructors does some basic - // simplifications. - #[test] - fn smart_repetition() { - assert_eq!(t(r"a{0}"), Hir::empty()); - assert_eq!(t(r"a{1}"), hir_lit("a")); - assert_eq!(t(r"\B{32111}"), hir_look(hir::Look::WordUnicodeNegate)); - } - - // This tests that the smart Hir::concat constructor simplifies the given - // exprs in a way we expect. - #[test] - fn smart_concat() { - assert_eq!(t(""), Hir::empty()); - assert_eq!(t("(?:)"), Hir::empty()); - assert_eq!(t("abc"), hir_lit("abc")); - assert_eq!(t("(?:foo)(?:bar)"), hir_lit("foobar")); - assert_eq!(t("quux(?:foo)(?:bar)baz"), hir_lit("quuxfoobarbaz")); - assert_eq!( - t("foo(?:bar^baz)quux"), - hir_cat(vec![ - hir_lit("foobar"), - hir_look(hir::Look::Start), - hir_lit("bazquux"), - ]) - ); - assert_eq!( - t("foo(?:ba(?:r^b)az)quux"), - hir_cat(vec![ - hir_lit("foobar"), - hir_look(hir::Look::Start), - hir_lit("bazquux"), - ]) - ); - } - - // This tests that the smart Hir::alternation constructor simplifies the - // given exprs in a way we expect. - #[test] - fn smart_alternation() { - assert_eq!( - t("(?:foo)|(?:bar)"), - hir_alt(vec![hir_lit("foo"), hir_lit("bar")]) - ); - assert_eq!( - t("quux|(?:abc|def|xyz)|baz"), - hir_alt(vec![ - hir_lit("quux"), - hir_lit("abc"), - hir_lit("def"), - hir_lit("xyz"), - hir_lit("baz"), - ]) - ); - assert_eq!( - t("quux|(?:abc|(?:def|mno)|xyz)|baz"), - hir_alt(vec![ - hir_lit("quux"), - hir_lit("abc"), - hir_lit("def"), - hir_lit("mno"), - hir_lit("xyz"), - hir_lit("baz"), - ]) - ); - assert_eq!( - t("a|b|c|d|e|f|x|y|z"), - hir_uclass(&[('a', 'f'), ('x', 'z')]), - ); - // Tests that we lift common prefixes out of an alternation. - assert_eq!( - t("[A-Z]foo|[A-Z]quux"), - hir_cat(vec![ - hir_uclass(&[('A', 'Z')]), - hir_alt(vec![hir_lit("foo"), hir_lit("quux")]), - ]), - ); - assert_eq!( - t("[A-Z][A-Z]|[A-Z]quux"), - hir_cat(vec![ - hir_uclass(&[('A', 'Z')]), - hir_alt(vec![hir_uclass(&[('A', 'Z')]), hir_lit("quux")]), - ]), - ); - assert_eq!( - t("[A-Z][A-Z]|[A-Z][A-Z]quux"), - hir_cat(vec![ - hir_uclass(&[('A', 'Z')]), - hir_uclass(&[('A', 'Z')]), - hir_alt(vec![Hir::empty(), hir_lit("quux")]), - ]), - ); - assert_eq!( - t("[A-Z]foo|[A-Z]foobar"), - hir_cat(vec![ - hir_uclass(&[('A', 'Z')]), - hir_alt(vec![hir_lit("foo"), hir_lit("foobar")]), - ]), - ); - } - - #[test] - fn regression_alt_empty_concat() { - use crate::ast::{self, Ast}; - - let span = Span::splat(Position::new(0, 0, 0)); - let ast = Ast::alternation(ast::Alternation { - span, - asts: vec![Ast::concat(ast::Concat { span, asts: vec![] })], - }); - - let mut t = Translator::new(); - assert_eq!(Ok(Hir::empty()), t.translate("", &ast)); - } - - #[test] - fn regression_empty_alt() { - use crate::ast::{self, Ast}; - - let span = Span::splat(Position::new(0, 0, 0)); - let ast = Ast::concat(ast::Concat { - span, - asts: vec![Ast::alternation(ast::Alternation { - span, - asts: vec![], - })], - }); - - let mut t = Translator::new(); - assert_eq!(Ok(Hir::fail()), t.translate("", &ast)); - } - - #[test] - fn regression_singleton_alt() { - use crate::{ - ast::{self, Ast}, - hir::Dot, - }; - - let span = Span::splat(Position::new(0, 0, 0)); - let ast = Ast::concat(ast::Concat { - span, - asts: vec![Ast::alternation(ast::Alternation { - span, - asts: vec![Ast::dot(span)], - })], - }); - - let mut t = Translator::new(); - assert_eq!(Ok(Hir::dot(Dot::AnyCharExceptLF)), t.translate("", &ast)); - } - - // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=63168 - #[test] - fn regression_fuzz_match() { - let pat = "[(\u{6} \0-\u{afdf5}] \0 "; - let ast = ParserBuilder::new() - .octal(false) - .ignore_whitespace(true) - .build() - .parse(pat) - .unwrap(); - let hir = TranslatorBuilder::new() - .utf8(true) - .case_insensitive(false) - .multi_line(false) - .dot_matches_new_line(false) - .swap_greed(true) - .unicode(true) - .build() - .translate(pat, &ast) - .unwrap(); - assert_eq!( - hir, - Hir::concat(vec![ - hir_uclass(&[('\0', '\u{afdf5}')]), - hir_lit("\0"), - ]) - ); - } - - // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=63155 - #[cfg(feature = "unicode")] - #[test] - fn regression_fuzz_difference1() { - let pat = r"\W\W|\W[^\v--\W\W\P{Script_Extensions:Pau_Cin_Hau}\u10A1A1-\U{3E3E3}--~~~~--~~~~~~~~------~~~~~~--~~~~~~]*"; - let _ = t(pat); // shouldn't panic - } - - // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=63153 - #[test] - fn regression_fuzz_char_decrement1() { - let pat = "w[w[^w?\rw\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\r\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0w?\rw[^w?\rw[^w?\rw[^w\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\u{1}\0]\0\0\0\0\0\0\0\0\0*\0\0\u{1}\0]\0\0-*\0][^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\u{1}\0]\0\0\0\0\0\0\0\0\0x\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\0\0\0*??\0\u{7f}{2}\u{10}??\0\0\0\0\0\0\0\0\0\u{3}\0\0\0}\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\0\u{1}\0]\0\u{1}\u{1}H-i]-]\0\0\0\0\u{1}\0]\0\0\0\u{1}\0]\0\0-*\0\0\0\0\u{1}9-\u{7f}]\0'|-\u{7f}]\0'|(?i-ux)[-\u{7f}]\0'\u{3}\0\0\0}\0-*\0] Result; - - /// This method is called before beginning traversal of the HIR. - fn start(&mut self) {} - - /// This method is called on an `Hir` before descending into child `Hir` - /// nodes. - fn visit_pre(&mut self, _hir: &Hir) -> Result<(), Self::Err> { - Ok(()) - } - - /// This method is called on an `Hir` after descending all of its child - /// `Hir` nodes. - fn visit_post(&mut self, _hir: &Hir) -> Result<(), Self::Err> { - Ok(()) - } - - /// This method is called between child nodes of an alternation. - fn visit_alternation_in(&mut self) -> Result<(), Self::Err> { - Ok(()) - } - - /// This method is called between child nodes of a concatenation. - fn visit_concat_in(&mut self) -> Result<(), Self::Err> { - Ok(()) - } -} - -/// Executes an implementation of `Visitor` in constant stack space. -/// -/// This function will visit every node in the given `Hir` while calling -/// appropriate methods provided by the [`Visitor`] trait. -/// -/// The primary use case for this method is when one wants to perform case -/// analysis over an `Hir` without using a stack size proportional to the depth -/// of the `Hir`. Namely, this method will instead use constant stack space, -/// but will use heap space proportional to the size of the `Hir`. This may be -/// desirable in cases where the size of `Hir` is proportional to end user -/// input. -/// -/// If the visitor returns an error at any point, then visiting is stopped and -/// the error is returned. -pub fn visit(hir: &Hir, visitor: V) -> Result { - HeapVisitor::new().visit(hir, visitor) -} - -/// HeapVisitor visits every item in an `Hir` recursively using constant stack -/// size and a heap size proportional to the size of the `Hir`. -struct HeapVisitor<'a> { - /// A stack of `Hir` nodes. This is roughly analogous to the call stack - /// used in a typical recursive visitor. - stack: Vec<(&'a Hir, Frame<'a>)>, -} - -/// Represents a single stack frame while performing structural induction over -/// an `Hir`. -enum Frame<'a> { - /// A stack frame allocated just before descending into a repetition - /// operator's child node. - Repetition(&'a hir::Repetition), - /// A stack frame allocated just before descending into a capture's child - /// node. - Capture(&'a hir::Capture), - /// The stack frame used while visiting every child node of a concatenation - /// of expressions. - Concat { - /// The child node we are currently visiting. - head: &'a Hir, - /// The remaining child nodes to visit (which may be empty). - tail: &'a [Hir], - }, - /// The stack frame used while visiting every child node of an alternation - /// of expressions. - Alternation { - /// The child node we are currently visiting. - head: &'a Hir, - /// The remaining child nodes to visit (which may be empty). - tail: &'a [Hir], - }, -} - -impl<'a> HeapVisitor<'a> { - fn new() -> HeapVisitor<'a> { - HeapVisitor { stack: vec![] } - } - - fn visit( - &mut self, - mut hir: &'a Hir, - mut visitor: V, - ) -> Result { - self.stack.clear(); - - visitor.start(); - loop { - visitor.visit_pre(hir)?; - if let Some(x) = self.induct(hir) { - let child = x.child(); - self.stack.push((hir, x)); - hir = child; - continue; - } - // No induction means we have a base case, so we can post visit - // it now. - visitor.visit_post(hir)?; - - // At this point, we now try to pop our call stack until it is - // either empty or we hit another inductive case. - loop { - let (post_hir, frame) = match self.stack.pop() { - None => return visitor.finish(), - Some((post_hir, frame)) => (post_hir, frame), - }; - // If this is a concat/alternate, then we might have additional - // inductive steps to process. - if let Some(x) = self.pop(frame) { - match x { - Frame::Alternation { .. } => { - visitor.visit_alternation_in()?; - } - Frame::Concat { .. } => { - visitor.visit_concat_in()?; - } - _ => {} - } - hir = x.child(); - self.stack.push((post_hir, x)); - break; - } - // Otherwise, we've finished visiting all the child nodes for - // this HIR, so we can post visit it now. - visitor.visit_post(post_hir)?; - } - } - } - - /// Build a stack frame for the given HIR if one is needed (which occurs if - /// and only if there are child nodes in the HIR). Otherwise, return None. - fn induct(&mut self, hir: &'a Hir) -> Option> { - match *hir.kind() { - HirKind::Repetition(ref x) => Some(Frame::Repetition(x)), - HirKind::Capture(ref x) => Some(Frame::Capture(x)), - HirKind::Concat(ref x) if x.is_empty() => None, - HirKind::Concat(ref x) => { - Some(Frame::Concat { head: &x[0], tail: &x[1..] }) - } - HirKind::Alternation(ref x) if x.is_empty() => None, - HirKind::Alternation(ref x) => { - Some(Frame::Alternation { head: &x[0], tail: &x[1..] }) - } - _ => None, - } - } - - /// Pops the given frame. If the frame has an additional inductive step, - /// then return it, otherwise return `None`. - fn pop(&self, induct: Frame<'a>) -> Option> { - match induct { - Frame::Repetition(_) => None, - Frame::Capture(_) => None, - Frame::Concat { tail, .. } => { - if tail.is_empty() { - None - } else { - Some(Frame::Concat { head: &tail[0], tail: &tail[1..] }) - } - } - Frame::Alternation { tail, .. } => { - if tail.is_empty() { - None - } else { - Some(Frame::Alternation { - head: &tail[0], - tail: &tail[1..], - }) - } - } - } - } -} - -impl<'a> Frame<'a> { - /// Perform the next inductive step on this frame and return the next - /// child HIR node to visit. - fn child(&self) -> &'a Hir { - match *self { - Frame::Repetition(rep) => &rep.sub, - Frame::Capture(capture) => &capture.sub, - Frame::Concat { head, .. } => head, - Frame::Alternation { head, .. } => head, - } - } -} diff --git a/vendor/regex-syntax/src/lib.rs b/vendor/regex-syntax/src/lib.rs deleted file mode 100644 index a4512e23de360d..00000000000000 --- a/vendor/regex-syntax/src/lib.rs +++ /dev/null @@ -1,433 +0,0 @@ -/*! -This crate provides a robust regular expression parser. - -This crate defines two primary types: - -* [`Ast`](ast::Ast) is the abstract syntax of a regular expression. - An abstract syntax corresponds to a *structured representation* of the - concrete syntax of a regular expression, where the concrete syntax is the - pattern string itself (e.g., `foo(bar)+`). Given some abstract syntax, it - can be converted back to the original concrete syntax (modulo some details, - like whitespace). To a first approximation, the abstract syntax is complex - and difficult to analyze. -* [`Hir`](hir::Hir) is the high-level intermediate representation - ("HIR" or "high-level IR" for short) of regular expression. It corresponds to - an intermediate state of a regular expression that sits between the abstract - syntax and the low level compiled opcodes that are eventually responsible for - executing a regular expression search. Given some high-level IR, it is not - possible to produce the original concrete syntax (although it is possible to - produce an equivalent concrete syntax, but it will likely scarcely resemble - the original pattern). To a first approximation, the high-level IR is simple - and easy to analyze. - -These two types come with conversion routines: - -* An [`ast::parse::Parser`] converts concrete syntax (a `&str`) to an -[`Ast`](ast::Ast). -* A [`hir::translate::Translator`] converts an [`Ast`](ast::Ast) to a -[`Hir`](hir::Hir). - -As a convenience, the above two conversion routines are combined into one via -the top-level [`Parser`] type. This `Parser` will first convert your pattern to -an `Ast` and then convert the `Ast` to an `Hir`. It's also exposed as top-level -[`parse`] free function. - - -# Example - -This example shows how to parse a pattern string into its HIR: - -``` -use regex_syntax::{hir::Hir, parse}; - -let hir = parse("a|b")?; -assert_eq!(hir, Hir::alternation(vec![ - Hir::literal("a".as_bytes()), - Hir::literal("b".as_bytes()), -])); -# Ok::<(), Box>(()) -``` - - -# Concrete syntax supported - -The concrete syntax is documented as part of the public API of the -[`regex` crate](https://docs.rs/regex/%2A/regex/#syntax). - - -# Input safety - -A key feature of this library is that it is safe to use with end user facing -input. This plays a significant role in the internal implementation. In -particular: - -1. Parsers provide a `nest_limit` option that permits callers to control how - deeply nested a regular expression is allowed to be. This makes it possible - to do case analysis over an `Ast` or an `Hir` using recursion without - worrying about stack overflow. -2. Since relying on a particular stack size is brittle, this crate goes to - great lengths to ensure that all interactions with both the `Ast` and the - `Hir` do not use recursion. Namely, they use constant stack space and heap - space proportional to the size of the original pattern string (in bytes). - This includes the type's corresponding destructors. (One exception to this - is literal extraction, but this will eventually get fixed.) - - -# Error reporting - -The `Display` implementations on all `Error` types exposed in this library -provide nice human readable errors that are suitable for showing to end users -in a monospace font. - - -# Literal extraction - -This crate provides limited support for [literal extraction from `Hir` -values](hir::literal). Be warned that literal extraction uses recursion, and -therefore, stack size proportional to the size of the `Hir`. - -The purpose of literal extraction is to speed up searches. That is, if you -know a regular expression must match a prefix or suffix literal, then it is -often quicker to search for instances of that literal, and then confirm or deny -the match using the full regular expression engine. These optimizations are -done automatically in the `regex` crate. - - -# Crate features - -An important feature provided by this crate is its Unicode support. This -includes things like case folding, boolean properties, general categories, -scripts and Unicode-aware support for the Perl classes `\w`, `\s` and `\d`. -However, a downside of this support is that it requires bundling several -Unicode data tables that are substantial in size. - -A fair number of use cases do not require full Unicode support. For this -reason, this crate exposes a number of features to control which Unicode -data is available. - -If a regular expression attempts to use a Unicode feature that is not available -because the corresponding crate feature was disabled, then translating that -regular expression to an `Hir` will return an error. (It is still possible -construct an `Ast` for such a regular expression, since Unicode data is not -used until translation to an `Hir`.) Stated differently, enabling or disabling -any of the features below can only add or subtract from the total set of valid -regular expressions. Enabling or disabling a feature will never modify the -match semantics of a regular expression. - -The following features are available: - -* **std** - - Enables support for the standard library. This feature is enabled by default. - When disabled, only `core` and `alloc` are used. Otherwise, enabling `std` - generally just enables `std::error::Error` trait impls for the various error - types. -* **unicode** - - Enables all Unicode features. This feature is enabled by default, and will - always cover all Unicode features, even if more are added in the future. -* **unicode-age** - - Provide the data for the - [Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age). - This makes it possible to use classes like `\p{Age:6.0}` to refer to all - codepoints first introduced in Unicode 6.0 -* **unicode-bool** - - Provide the data for numerous Unicode boolean properties. The full list - is not included here, but contains properties like `Alphabetic`, `Emoji`, - `Lowercase`, `Math`, `Uppercase` and `White_Space`. -* **unicode-case** - - Provide the data for case insensitive matching using - [Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches). -* **unicode-gencat** - - Provide the data for - [Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values). - This includes, but is not limited to, `Decimal_Number`, `Letter`, - `Math_Symbol`, `Number` and `Punctuation`. -* **unicode-perl** - - Provide the data for supporting the Unicode-aware Perl character classes, - corresponding to `\w`, `\s` and `\d`. This is also necessary for using - Unicode-aware word boundary assertions. Note that if this feature is - disabled, the `\s` and `\d` character classes are still available if the - `unicode-bool` and `unicode-gencat` features are enabled, respectively. -* **unicode-script** - - Provide the data for - [Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/). - This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`, - `Latin` and `Thai`. -* **unicode-segment** - - Provide the data necessary to provide the properties used to implement the - [Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/). - This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and - `\p{sb=ATerm}`. -* **arbitrary** - - Enabling this feature introduces a public dependency on the - [`arbitrary`](https://crates.io/crates/arbitrary) - crate. Namely, it implements the `Arbitrary` trait from that crate for the - [`Ast`](crate::ast::Ast) type. This feature is disabled by default. -*/ - -#![no_std] -#![forbid(unsafe_code)] -#![deny(missing_docs, rustdoc::broken_intra_doc_links)] -#![warn(missing_debug_implementations)] -// This adds Cargo feature annotations to items in the rustdoc output. Which is -// sadly hugely beneficial for this crate due to the number of features. -#![cfg_attr(docsrs_regex, feature(doc_cfg))] - -#[cfg(any(test, feature = "std"))] -extern crate std; - -extern crate alloc; - -pub use crate::{ - error::Error, - parser::{parse, Parser, ParserBuilder}, - unicode::UnicodeWordError, -}; - -use alloc::string::String; - -pub mod ast; -mod debug; -mod either; -mod error; -pub mod hir; -mod parser; -mod rank; -mod unicode; -mod unicode_tables; -pub mod utf8; - -/// Escapes all regular expression meta characters in `text`. -/// -/// The string returned may be safely used as a literal in a regular -/// expression. -pub fn escape(text: &str) -> String { - let mut quoted = String::new(); - escape_into(text, &mut quoted); - quoted -} - -/// Escapes all meta characters in `text` and writes the result into `buf`. -/// -/// This will append escape characters into the given buffer. The characters -/// that are appended are safe to use as a literal in a regular expression. -pub fn escape_into(text: &str, buf: &mut String) { - buf.reserve(text.len()); - for c in text.chars() { - if is_meta_character(c) { - buf.push('\\'); - } - buf.push(c); - } -} - -/// Returns true if the given character has significance in a regex. -/// -/// Generally speaking, these are the only characters which _must_ be escaped -/// in order to match their literal meaning. For example, to match a literal -/// `|`, one could write `\|`. Sometimes escaping isn't always necessary. For -/// example, `-` is treated as a meta character because of its significance -/// for writing ranges inside of character classes, but the regex `-` will -/// match a literal `-` because `-` has no special meaning outside of character -/// classes. -/// -/// In order to determine whether a character may be escaped at all, the -/// [`is_escapeable_character`] routine should be used. The difference between -/// `is_meta_character` and `is_escapeable_character` is that the latter will -/// return true for some characters that are _not_ meta characters. For -/// example, `%` and `\%` both match a literal `%` in all contexts. In other -/// words, `is_escapeable_character` includes "superfluous" escapes. -/// -/// Note that the set of characters for which this function returns `true` or -/// `false` is fixed and won't change in a semver compatible release. (In this -/// case, "semver compatible release" actually refers to the `regex` crate -/// itself, since reducing or expanding the set of meta characters would be a -/// breaking change for not just `regex-syntax` but also `regex` itself.) -/// -/// # Example -/// -/// ``` -/// use regex_syntax::is_meta_character; -/// -/// assert!(is_meta_character('?')); -/// assert!(is_meta_character('-')); -/// assert!(is_meta_character('&')); -/// assert!(is_meta_character('#')); -/// -/// assert!(!is_meta_character('%')); -/// assert!(!is_meta_character('/')); -/// assert!(!is_meta_character('!')); -/// assert!(!is_meta_character('"')); -/// assert!(!is_meta_character('e')); -/// ``` -pub fn is_meta_character(c: char) -> bool { - match c { - '\\' | '.' | '+' | '*' | '?' | '(' | ')' | '|' | '[' | ']' | '{' - | '}' | '^' | '$' | '#' | '&' | '-' | '~' => true, - _ => false, - } -} - -/// Returns true if the given character can be escaped in a regex. -/// -/// This returns true in all cases that `is_meta_character` returns true, but -/// also returns true in some cases where `is_meta_character` returns false. -/// For example, `%` is not a meta character, but it is escapable. That is, -/// `%` and `\%` both match a literal `%` in all contexts. -/// -/// The purpose of this routine is to provide knowledge about what characters -/// may be escaped. Namely, most regex engines permit "superfluous" escapes -/// where characters without any special significance may be escaped even -/// though there is no actual _need_ to do so. -/// -/// This will return false for some characters. For example, `e` is not -/// escapable. Therefore, `\e` will either result in a parse error (which is -/// true today), or it could backwards compatibly evolve into a new construct -/// with its own meaning. Indeed, that is the purpose of banning _some_ -/// superfluous escapes: it provides a way to evolve the syntax in a compatible -/// manner. -/// -/// # Example -/// -/// ``` -/// use regex_syntax::is_escapeable_character; -/// -/// assert!(is_escapeable_character('?')); -/// assert!(is_escapeable_character('-')); -/// assert!(is_escapeable_character('&')); -/// assert!(is_escapeable_character('#')); -/// assert!(is_escapeable_character('%')); -/// assert!(is_escapeable_character('/')); -/// assert!(is_escapeable_character('!')); -/// assert!(is_escapeable_character('"')); -/// -/// assert!(!is_escapeable_character('e')); -/// ``` -pub fn is_escapeable_character(c: char) -> bool { - // Certainly escapable if it's a meta character. - if is_meta_character(c) { - return true; - } - // Any character that isn't ASCII is definitely not escapable. There's - // no real need to allow things like \☃ right? - if !c.is_ascii() { - return false; - } - // Otherwise, we basically say that everything is escapable unless it's a - // letter or digit. Things like \3 are either octal (when enabled) or an - // error, and we should keep it that way. Otherwise, letters are reserved - // for adding new syntax in a backwards compatible way. - match c { - '0'..='9' | 'A'..='Z' | 'a'..='z' => false, - // While not currently supported, we keep these as not escapable to - // give us some flexibility with respect to supporting the \< and - // \> word boundary assertions in the future. By rejecting them as - // escapable, \< and \> will result in a parse error. Thus, we can - // turn them into something else in the future without it being a - // backwards incompatible change. - // - // OK, now we support \< and \>, and we need to retain them as *not* - // escapable here since the escape sequence is significant. - '<' | '>' => false, - _ => true, - } -} - -/// Returns true if and only if the given character is a Unicode word -/// character. -/// -/// A Unicode word character is defined by -/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties). -/// In particular, a character -/// is considered a word character if it is in either of the `Alphabetic` or -/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark` -/// or `Connector_Punctuation` general categories. -/// -/// # Panics -/// -/// If the `unicode-perl` feature is not enabled, then this function -/// panics. For this reason, it is recommended that callers use -/// [`try_is_word_character`] instead. -pub fn is_word_character(c: char) -> bool { - try_is_word_character(c).expect("unicode-perl feature must be enabled") -} - -/// Returns true if and only if the given character is a Unicode word -/// character. -/// -/// A Unicode word character is defined by -/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties). -/// In particular, a character -/// is considered a word character if it is in either of the `Alphabetic` or -/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark` -/// or `Connector_Punctuation` general categories. -/// -/// # Errors -/// -/// If the `unicode-perl` feature is not enabled, then this function always -/// returns an error. -pub fn try_is_word_character( - c: char, -) -> core::result::Result { - unicode::is_word_character(c) -} - -/// Returns true if and only if the given character is an ASCII word character. -/// -/// An ASCII word character is defined by the following character class: -/// `[_0-9a-zA-Z]`. -pub fn is_word_byte(c: u8) -> bool { - match c { - b'_' | b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' => true, - _ => false, - } -} - -#[cfg(test)] -mod tests { - use alloc::string::ToString; - - use super::*; - - #[test] - fn escape_meta() { - assert_eq!( - escape(r"\.+*?()|[]{}^$#&-~"), - r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#\&\-\~".to_string() - ); - } - - #[test] - fn word_byte() { - assert!(is_word_byte(b'a')); - assert!(!is_word_byte(b'-')); - } - - #[test] - #[cfg(feature = "unicode-perl")] - fn word_char() { - assert!(is_word_character('a'), "ASCII"); - assert!(is_word_character('à'), "Latin-1"); - assert!(is_word_character('β'), "Greek"); - assert!(is_word_character('\u{11011}'), "Brahmi (Unicode 6.0)"); - assert!(is_word_character('\u{11611}'), "Modi (Unicode 7.0)"); - assert!(is_word_character('\u{11711}'), "Ahom (Unicode 8.0)"); - assert!(is_word_character('\u{17828}'), "Tangut (Unicode 9.0)"); - assert!(is_word_character('\u{1B1B1}'), "Nushu (Unicode 10.0)"); - assert!(is_word_character('\u{16E40}'), "Medefaidrin (Unicode 11.0)"); - assert!(!is_word_character('-')); - assert!(!is_word_character('☃')); - } - - #[test] - #[should_panic] - #[cfg(not(feature = "unicode-perl"))] - fn word_char_disabled_panic() { - assert!(is_word_character('a')); - } - - #[test] - #[cfg(not(feature = "unicode-perl"))] - fn word_char_disabled_error() { - assert!(try_is_word_character('a').is_err()); - } -} diff --git a/vendor/regex-syntax/src/parser.rs b/vendor/regex-syntax/src/parser.rs deleted file mode 100644 index f482b84667a7aa..00000000000000 --- a/vendor/regex-syntax/src/parser.rs +++ /dev/null @@ -1,254 +0,0 @@ -use crate::{ast, hir, Error}; - -/// A convenience routine for parsing a regex using default options. -/// -/// This is equivalent to `Parser::new().parse(pattern)`. -/// -/// If you need to set non-default options, then use a [`ParserBuilder`]. -/// -/// This routine returns an [`Hir`](hir::Hir) value. Namely, it automatically -/// parses the pattern as an [`Ast`](ast::Ast) and then invokes the translator -/// to convert the `Ast` into an `Hir`. If you need access to the `Ast`, then -/// you should use a [`ast::parse::Parser`]. -pub fn parse(pattern: &str) -> Result { - Parser::new().parse(pattern) -} - -/// A builder for a regular expression parser. -/// -/// This builder permits modifying configuration options for the parser. -/// -/// This type combines the builder options for both the [AST -/// `ParserBuilder`](ast::parse::ParserBuilder) and the [HIR -/// `TranslatorBuilder`](hir::translate::TranslatorBuilder). -#[derive(Clone, Debug, Default)] -pub struct ParserBuilder { - ast: ast::parse::ParserBuilder, - hir: hir::translate::TranslatorBuilder, -} - -impl ParserBuilder { - /// Create a new parser builder with a default configuration. - pub fn new() -> ParserBuilder { - ParserBuilder::default() - } - - /// Build a parser from this configuration with the given pattern. - pub fn build(&self) -> Parser { - Parser { ast: self.ast.build(), hir: self.hir.build() } - } - - /// Set the nesting limit for this parser. - /// - /// The nesting limit controls how deep the abstract syntax tree is allowed - /// to be. If the AST exceeds the given limit (e.g., with too many nested - /// groups), then an error is returned by the parser. - /// - /// The purpose of this limit is to act as a heuristic to prevent stack - /// overflow for consumers that do structural induction on an `Ast` using - /// explicit recursion. While this crate never does this (instead using - /// constant stack space and moving the call stack to the heap), other - /// crates may. - /// - /// This limit is not checked until the entire Ast is parsed. Therefore, - /// if callers want to put a limit on the amount of heap space used, then - /// they should impose a limit on the length, in bytes, of the concrete - /// pattern string. In particular, this is viable since this parser - /// implementation will limit itself to heap space proportional to the - /// length of the pattern string. - /// - /// Note that a nest limit of `0` will return a nest limit error for most - /// patterns but not all. For example, a nest limit of `0` permits `a` but - /// not `ab`, since `ab` requires a concatenation, which results in a nest - /// depth of `1`. In general, a nest limit is not something that manifests - /// in an obvious way in the concrete syntax, therefore, it should not be - /// used in a granular way. - pub fn nest_limit(&mut self, limit: u32) -> &mut ParserBuilder { - self.ast.nest_limit(limit); - self - } - - /// Whether to support octal syntax or not. - /// - /// Octal syntax is a little-known way of uttering Unicode codepoints in - /// a regular expression. For example, `a`, `\x61`, `\u0061` and - /// `\141` are all equivalent regular expressions, where the last example - /// shows octal syntax. - /// - /// While supporting octal syntax isn't in and of itself a problem, it does - /// make good error messages harder. That is, in PCRE based regex engines, - /// syntax like `\0` invokes a backreference, which is explicitly - /// unsupported in Rust's regex engine. However, many users expect it to - /// be supported. Therefore, when octal support is disabled, the error - /// message will explicitly mention that backreferences aren't supported. - /// - /// Octal syntax is disabled by default. - pub fn octal(&mut self, yes: bool) -> &mut ParserBuilder { - self.ast.octal(yes); - self - } - - /// When disabled, translation will permit the construction of a regular - /// expression that may match invalid UTF-8. - /// - /// When enabled (the default), the translator is guaranteed to produce an - /// expression that, for non-empty matches, will only ever produce spans - /// that are entirely valid UTF-8 (otherwise, the translator will return an - /// error). - /// - /// Perhaps surprisingly, when UTF-8 is enabled, an empty regex or even - /// a negated ASCII word boundary (uttered as `(?-u:\B)` in the concrete - /// syntax) will be allowed even though they can produce matches that split - /// a UTF-8 encoded codepoint. This only applies to zero-width or "empty" - /// matches, and it is expected that the regex engine itself must handle - /// these cases if necessary (perhaps by suppressing any zero-width matches - /// that split a codepoint). - pub fn utf8(&mut self, yes: bool) -> &mut ParserBuilder { - self.hir.utf8(yes); - self - } - - /// Enable verbose mode in the regular expression. - /// - /// When enabled, verbose mode permits insignificant whitespace in many - /// places in the regular expression, as well as comments. Comments are - /// started using `#` and continue until the end of the line. - /// - /// By default, this is disabled. It may be selectively enabled in the - /// regular expression by using the `x` flag regardless of this setting. - pub fn ignore_whitespace(&mut self, yes: bool) -> &mut ParserBuilder { - self.ast.ignore_whitespace(yes); - self - } - - /// Enable or disable the case insensitive flag by default. - /// - /// By default this is disabled. It may alternatively be selectively - /// enabled in the regular expression itself via the `i` flag. - pub fn case_insensitive(&mut self, yes: bool) -> &mut ParserBuilder { - self.hir.case_insensitive(yes); - self - } - - /// Enable or disable the multi-line matching flag by default. - /// - /// By default this is disabled. It may alternatively be selectively - /// enabled in the regular expression itself via the `m` flag. - pub fn multi_line(&mut self, yes: bool) -> &mut ParserBuilder { - self.hir.multi_line(yes); - self - } - - /// Enable or disable the "dot matches any character" flag by default. - /// - /// By default this is disabled. It may alternatively be selectively - /// enabled in the regular expression itself via the `s` flag. - pub fn dot_matches_new_line(&mut self, yes: bool) -> &mut ParserBuilder { - self.hir.dot_matches_new_line(yes); - self - } - - /// Enable or disable the CRLF mode flag by default. - /// - /// By default this is disabled. It may alternatively be selectively - /// enabled in the regular expression itself via the `R` flag. - /// - /// When CRLF mode is enabled, the following happens: - /// - /// * Unless `dot_matches_new_line` is enabled, `.` will match any character - /// except for `\r` and `\n`. - /// * When `multi_line` mode is enabled, `^` and `$` will treat `\r\n`, - /// `\r` and `\n` as line terminators. And in particular, neither will - /// match between a `\r` and a `\n`. - pub fn crlf(&mut self, yes: bool) -> &mut ParserBuilder { - self.hir.crlf(yes); - self - } - - /// Sets the line terminator for use with `(?u-s:.)` and `(?-us:.)`. - /// - /// Namely, instead of `.` (by default) matching everything except for `\n`, - /// this will cause `.` to match everything except for the byte given. - /// - /// If `.` is used in a context where Unicode mode is enabled and this byte - /// isn't ASCII, then an error will be returned. When Unicode mode is - /// disabled, then any byte is permitted, but will return an error if UTF-8 - /// mode is enabled and it is a non-ASCII byte. - /// - /// In short, any ASCII value for a line terminator is always okay. But a - /// non-ASCII byte might result in an error depending on whether Unicode - /// mode or UTF-8 mode are enabled. - /// - /// Note that if `R` mode is enabled then it always takes precedence and - /// the line terminator will be treated as `\r` and `\n` simultaneously. - /// - /// Note also that this *doesn't* impact the look-around assertions - /// `(?m:^)` and `(?m:$)`. That's usually controlled by additional - /// configuration in the regex engine itself. - pub fn line_terminator(&mut self, byte: u8) -> &mut ParserBuilder { - self.hir.line_terminator(byte); - self - } - - /// Enable or disable the "swap greed" flag by default. - /// - /// By default this is disabled. It may alternatively be selectively - /// enabled in the regular expression itself via the `U` flag. - pub fn swap_greed(&mut self, yes: bool) -> &mut ParserBuilder { - self.hir.swap_greed(yes); - self - } - - /// Enable or disable the Unicode flag (`u`) by default. - /// - /// By default this is **enabled**. It may alternatively be selectively - /// disabled in the regular expression itself via the `u` flag. - /// - /// Note that unless `utf8` is disabled (it's enabled by default), a - /// regular expression will fail to parse if Unicode mode is disabled and a - /// sub-expression could possibly match invalid UTF-8. - pub fn unicode(&mut self, yes: bool) -> &mut ParserBuilder { - self.hir.unicode(yes); - self - } -} - -/// A convenience parser for regular expressions. -/// -/// This parser takes as input a regular expression pattern string (the -/// "concrete syntax") and returns a high-level intermediate representation -/// (the HIR) suitable for most types of analysis. In particular, this parser -/// hides the intermediate state of producing an AST (the "abstract syntax"). -/// The AST is itself far more complex than the HIR, so this parser serves as a -/// convenience for never having to deal with it at all. -/// -/// If callers have more fine grained use cases that need an AST, then please -/// see the [`ast::parse`] module. -/// -/// A `Parser` can be configured in more detail via a [`ParserBuilder`]. -#[derive(Clone, Debug)] -pub struct Parser { - ast: ast::parse::Parser, - hir: hir::translate::Translator, -} - -impl Parser { - /// Create a new parser with a default configuration. - /// - /// The parser can be run with `parse` method. The parse method returns - /// a high level intermediate representation of the given regular - /// expression. - /// - /// To set configuration options on the parser, use [`ParserBuilder`]. - pub fn new() -> Parser { - ParserBuilder::new().build() - } - - /// Parse the regular expression into a high level intermediate - /// representation. - pub fn parse(&mut self, pattern: &str) -> Result { - let ast = self.ast.parse(pattern)?; - let hir = self.hir.translate(pattern, &ast)?; - Ok(hir) - } -} diff --git a/vendor/regex-syntax/src/rank.rs b/vendor/regex-syntax/src/rank.rs deleted file mode 100644 index ccb25a20aedcdf..00000000000000 --- a/vendor/regex-syntax/src/rank.rs +++ /dev/null @@ -1,258 +0,0 @@ -pub(crate) const BYTE_FREQUENCIES: [u8; 256] = [ - 55, // '\x00' - 52, // '\x01' - 51, // '\x02' - 50, // '\x03' - 49, // '\x04' - 48, // '\x05' - 47, // '\x06' - 46, // '\x07' - 45, // '\x08' - 103, // '\t' - 242, // '\n' - 66, // '\x0b' - 67, // '\x0c' - 229, // '\r' - 44, // '\x0e' - 43, // '\x0f' - 42, // '\x10' - 41, // '\x11' - 40, // '\x12' - 39, // '\x13' - 38, // '\x14' - 37, // '\x15' - 36, // '\x16' - 35, // '\x17' - 34, // '\x18' - 33, // '\x19' - 56, // '\x1a' - 32, // '\x1b' - 31, // '\x1c' - 30, // '\x1d' - 29, // '\x1e' - 28, // '\x1f' - 255, // ' ' - 148, // '!' - 164, // '"' - 149, // '#' - 136, // '$' - 160, // '%' - 155, // '&' - 173, // "'" - 221, // '(' - 222, // ')' - 134, // '*' - 122, // '+' - 232, // ',' - 202, // '-' - 215, // '.' - 224, // '/' - 208, // '0' - 220, // '1' - 204, // '2' - 187, // '3' - 183, // '4' - 179, // '5' - 177, // '6' - 168, // '7' - 178, // '8' - 200, // '9' - 226, // ':' - 195, // ';' - 154, // '<' - 184, // '=' - 174, // '>' - 126, // '?' - 120, // '@' - 191, // 'A' - 157, // 'B' - 194, // 'C' - 170, // 'D' - 189, // 'E' - 162, // 'F' - 161, // 'G' - 150, // 'H' - 193, // 'I' - 142, // 'J' - 137, // 'K' - 171, // 'L' - 176, // 'M' - 185, // 'N' - 167, // 'O' - 186, // 'P' - 112, // 'Q' - 175, // 'R' - 192, // 'S' - 188, // 'T' - 156, // 'U' - 140, // 'V' - 143, // 'W' - 123, // 'X' - 133, // 'Y' - 128, // 'Z' - 147, // '[' - 138, // '\\' - 146, // ']' - 114, // '^' - 223, // '_' - 151, // '`' - 249, // 'a' - 216, // 'b' - 238, // 'c' - 236, // 'd' - 253, // 'e' - 227, // 'f' - 218, // 'g' - 230, // 'h' - 247, // 'i' - 135, // 'j' - 180, // 'k' - 241, // 'l' - 233, // 'm' - 246, // 'n' - 244, // 'o' - 231, // 'p' - 139, // 'q' - 245, // 'r' - 243, // 's' - 251, // 't' - 235, // 'u' - 201, // 'v' - 196, // 'w' - 240, // 'x' - 214, // 'y' - 152, // 'z' - 182, // '{' - 205, // '|' - 181, // '}' - 127, // '~' - 27, // '\x7f' - 212, // '\x80' - 211, // '\x81' - 210, // '\x82' - 213, // '\x83' - 228, // '\x84' - 197, // '\x85' - 169, // '\x86' - 159, // '\x87' - 131, // '\x88' - 172, // '\x89' - 105, // '\x8a' - 80, // '\x8b' - 98, // '\x8c' - 96, // '\x8d' - 97, // '\x8e' - 81, // '\x8f' - 207, // '\x90' - 145, // '\x91' - 116, // '\x92' - 115, // '\x93' - 144, // '\x94' - 130, // '\x95' - 153, // '\x96' - 121, // '\x97' - 107, // '\x98' - 132, // '\x99' - 109, // '\x9a' - 110, // '\x9b' - 124, // '\x9c' - 111, // '\x9d' - 82, // '\x9e' - 108, // '\x9f' - 118, // '\xa0' - 141, // '¡' - 113, // '¢' - 129, // '£' - 119, // '¤' - 125, // '¥' - 165, // '¦' - 117, // '§' - 92, // '¨' - 106, // '©' - 83, // 'ª' - 72, // '«' - 99, // '¬' - 93, // '\xad' - 65, // '®' - 79, // '¯' - 166, // '°' - 237, // '±' - 163, // '²' - 199, // '³' - 190, // '´' - 225, // 'µ' - 209, // '¶' - 203, // '·' - 198, // '¸' - 217, // '¹' - 219, // 'º' - 206, // '»' - 234, // '¼' - 248, // '½' - 158, // '¾' - 239, // '¿' - 255, // 'À' - 255, // 'Á' - 255, // 'Â' - 255, // 'Ã' - 255, // 'Ä' - 255, // 'Å' - 255, // 'Æ' - 255, // 'Ç' - 255, // 'È' - 255, // 'É' - 255, // 'Ê' - 255, // 'Ë' - 255, // 'Ì' - 255, // 'Í' - 255, // 'Î' - 255, // 'Ï' - 255, // 'Ð' - 255, // 'Ñ' - 255, // 'Ò' - 255, // 'Ó' - 255, // 'Ô' - 255, // 'Õ' - 255, // 'Ö' - 255, // '×' - 255, // 'Ø' - 255, // 'Ù' - 255, // 'Ú' - 255, // 'Û' - 255, // 'Ü' - 255, // 'Ý' - 255, // 'Þ' - 255, // 'ß' - 255, // 'à' - 255, // 'á' - 255, // 'â' - 255, // 'ã' - 255, // 'ä' - 255, // 'å' - 255, // 'æ' - 255, // 'ç' - 255, // 'è' - 255, // 'é' - 255, // 'ê' - 255, // 'ë' - 255, // 'ì' - 255, // 'í' - 255, // 'î' - 255, // 'ï' - 255, // 'ð' - 255, // 'ñ' - 255, // 'ò' - 255, // 'ó' - 255, // 'ô' - 255, // 'õ' - 255, // 'ö' - 255, // '÷' - 255, // 'ø' - 255, // 'ù' - 255, // 'ú' - 255, // 'û' - 255, // 'ü' - 255, // 'ý' - 255, // 'þ' - 255, // 'ÿ' -]; diff --git a/vendor/regex-syntax/src/unicode.rs b/vendor/regex-syntax/src/unicode.rs deleted file mode 100644 index 07f78194b21eaf..00000000000000 --- a/vendor/regex-syntax/src/unicode.rs +++ /dev/null @@ -1,1041 +0,0 @@ -use alloc::{ - string::{String, ToString}, - vec::Vec, -}; - -use crate::hir; - -/// An inclusive range of codepoints from a generated file (hence the static -/// lifetime). -type Range = &'static [(char, char)]; - -/// An error that occurs when dealing with Unicode. -/// -/// We don't impl the Error trait here because these always get converted -/// into other public errors. (This error type isn't exported.) -#[derive(Debug)] -pub enum Error { - PropertyNotFound, - PropertyValueNotFound, - // Not used when unicode-perl is enabled. - #[allow(dead_code)] - PerlClassNotFound, -} - -/// An error that occurs when Unicode-aware simple case folding fails. -/// -/// This error can occur when the case mapping tables necessary for Unicode -/// aware case folding are unavailable. This only occurs when the -/// `unicode-case` feature is disabled. (The feature is enabled by default.) -#[derive(Debug)] -pub struct CaseFoldError(()); - -#[cfg(feature = "std")] -impl std::error::Error for CaseFoldError {} - -impl core::fmt::Display for CaseFoldError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!( - f, - "Unicode-aware case folding is not available \ - (probably because the unicode-case feature is not enabled)" - ) - } -} - -/// An error that occurs when the Unicode-aware `\w` class is unavailable. -/// -/// This error can occur when the data tables necessary for the Unicode aware -/// Perl character class `\w` are unavailable. This only occurs when the -/// `unicode-perl` feature is disabled. (The feature is enabled by default.) -#[derive(Debug)] -pub struct UnicodeWordError(()); - -#[cfg(feature = "std")] -impl std::error::Error for UnicodeWordError {} - -impl core::fmt::Display for UnicodeWordError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!( - f, - "Unicode-aware \\w class is not available \ - (probably because the unicode-perl feature is not enabled)" - ) - } -} - -/// A state oriented traverser of the simple case folding table. -/// -/// A case folder can be constructed via `SimpleCaseFolder::new()`, which will -/// return an error if the underlying case folding table is unavailable. -/// -/// After construction, it is expected that callers will use -/// `SimpleCaseFolder::mapping` by calling it with codepoints in strictly -/// increasing order. For example, calling it on `b` and then on `a` is illegal -/// and will result in a panic. -/// -/// The main idea of this type is that it tries hard to make mapping lookups -/// fast by exploiting the structure of the underlying table, and the ordering -/// assumption enables this. -#[derive(Debug)] -pub struct SimpleCaseFolder { - /// The simple case fold table. It's a sorted association list, where the - /// keys are Unicode scalar values and the values are the corresponding - /// equivalence class (not including the key) of the "simple" case folded - /// Unicode scalar values. - table: &'static [(char, &'static [char])], - /// The last codepoint that was used for a lookup. - last: Option, - /// The index to the entry in `table` corresponding to the smallest key `k` - /// such that `k > k0`, where `k0` is the most recent key lookup. Note that - /// in particular, `k0` may not be in the table! - next: usize, -} - -impl SimpleCaseFolder { - /// Create a new simple case folder, returning an error if the underlying - /// case folding table is unavailable. - pub fn new() -> Result { - #[cfg(not(feature = "unicode-case"))] - { - Err(CaseFoldError(())) - } - #[cfg(feature = "unicode-case")] - { - Ok(SimpleCaseFolder { - table: crate::unicode_tables::case_folding_simple::CASE_FOLDING_SIMPLE, - last: None, - next: 0, - }) - } - } - - /// Return the equivalence class of case folded codepoints for the given - /// codepoint. The equivalence class returned never includes the codepoint - /// given. If the given codepoint has no case folded codepoints (i.e., - /// no entry in the underlying case folding table), then this returns an - /// empty slice. - /// - /// # Panics - /// - /// This panics when called with a `c` that is less than or equal to the - /// previous call. In other words, callers need to use this method with - /// strictly increasing values of `c`. - pub fn mapping(&mut self, c: char) -> &'static [char] { - if let Some(last) = self.last { - assert!( - last < c, - "got codepoint U+{:X} which occurs before \ - last codepoint U+{:X}", - u32::from(c), - u32::from(last), - ); - } - self.last = Some(c); - if self.next >= self.table.len() { - return &[]; - } - let (k, v) = self.table[self.next]; - if k == c { - self.next += 1; - return v; - } - match self.get(c) { - Err(i) => { - self.next = i; - &[] - } - Ok(i) => { - // Since we require lookups to proceed - // in order, anything we find should be - // after whatever we thought might be - // next. Otherwise, the caller is either - // going out of order or we would have - // found our next key at 'self.next'. - assert!(i > self.next); - self.next = i + 1; - self.table[i].1 - } - } - } - - /// Returns true if and only if the given range overlaps with any region - /// of the underlying case folding table. That is, when true, there exists - /// at least one codepoint in the inclusive range `[start, end]` that has - /// a non-trivial equivalence class of case folded codepoints. Conversely, - /// when this returns false, all codepoints in the range `[start, end]` - /// correspond to the trivial equivalence class of case folded codepoints, - /// i.e., itself. - /// - /// This is useful to call before iterating over the codepoints in the - /// range and looking up the mapping for each. If you know none of the - /// mappings will return anything, then you might be able to skip doing it - /// altogether. - /// - /// # Panics - /// - /// This panics when `end < start`. - pub fn overlaps(&self, start: char, end: char) -> bool { - use core::cmp::Ordering; - - assert!(start <= end); - self.table - .binary_search_by(|&(c, _)| { - if start <= c && c <= end { - Ordering::Equal - } else if c > end { - Ordering::Greater - } else { - Ordering::Less - } - }) - .is_ok() - } - - /// Returns the index at which `c` occurs in the simple case fold table. If - /// `c` does not occur, then this returns an `i` such that `table[i-1].0 < - /// c` and `table[i].0 > c`. - fn get(&self, c: char) -> Result { - self.table.binary_search_by_key(&c, |&(c1, _)| c1) - } -} - -/// A query for finding a character class defined by Unicode. This supports -/// either use of a property name directly, or lookup by property value. The -/// former generally refers to Binary properties (see UTS#44, Table 8), but -/// as a special exception (see UTS#18, Section 1.2) both general categories -/// (an enumeration) and scripts (a catalog) are supported as if each of their -/// possible values were a binary property. -/// -/// In all circumstances, property names and values are normalized and -/// canonicalized. That is, `GC == gc == GeneralCategory == general_category`. -/// -/// The lifetime `'a` refers to the shorter of the lifetimes of property name -/// and property value. -#[derive(Debug)] -pub enum ClassQuery<'a> { - /// Return a class corresponding to a Unicode binary property, named by - /// a single letter. - OneLetter(char), - /// Return a class corresponding to a Unicode binary property. - /// - /// Note that, by special exception (see UTS#18, Section 1.2), both - /// general category values and script values are permitted here as if - /// they were a binary property. - Binary(&'a str), - /// Return a class corresponding to all codepoints whose property - /// (identified by `property_name`) corresponds to the given value - /// (identified by `property_value`). - ByValue { - /// A property name. - property_name: &'a str, - /// A property value. - property_value: &'a str, - }, -} - -impl<'a> ClassQuery<'a> { - fn canonicalize(&self) -> Result { - match *self { - ClassQuery::OneLetter(c) => self.canonical_binary(&c.to_string()), - ClassQuery::Binary(name) => self.canonical_binary(name), - ClassQuery::ByValue { property_name, property_value } => { - let property_name = symbolic_name_normalize(property_name); - let property_value = symbolic_name_normalize(property_value); - - let canon_name = match canonical_prop(&property_name)? { - None => return Err(Error::PropertyNotFound), - Some(canon_name) => canon_name, - }; - Ok(match canon_name { - "General_Category" => { - let canon = match canonical_gencat(&property_value)? { - None => return Err(Error::PropertyValueNotFound), - Some(canon) => canon, - }; - CanonicalClassQuery::GeneralCategory(canon) - } - "Script" => { - let canon = match canonical_script(&property_value)? { - None => return Err(Error::PropertyValueNotFound), - Some(canon) => canon, - }; - CanonicalClassQuery::Script(canon) - } - _ => { - let vals = match property_values(canon_name)? { - None => return Err(Error::PropertyValueNotFound), - Some(vals) => vals, - }; - let canon_val = - match canonical_value(vals, &property_value) { - None => { - return Err(Error::PropertyValueNotFound) - } - Some(canon_val) => canon_val, - }; - CanonicalClassQuery::ByValue { - property_name: canon_name, - property_value: canon_val, - } - } - }) - } - } - } - - fn canonical_binary( - &self, - name: &str, - ) -> Result { - let norm = symbolic_name_normalize(name); - - // This is a special case where 'cf' refers to the 'Format' general - // category, but where the 'cf' abbreviation is also an abbreviation - // for the 'Case_Folding' property. But we want to treat it as - // a general category. (Currently, we don't even support the - // 'Case_Folding' property. But if we do in the future, users will be - // required to spell it out.) - // - // Also 'sc' refers to the 'Currency_Symbol' general category, but is - // also the abbreviation for the 'Script' property. So we avoid calling - // 'canonical_prop' for it too, which would erroneously normalize it - // to 'Script'. - // - // Another case: 'lc' is an abbreviation for the 'Cased_Letter' - // general category, but is also an abbreviation for the 'Lowercase_Mapping' - // property. We don't currently support the latter, so as with 'cf' - // above, we treat 'lc' as 'Cased_Letter'. - if norm != "cf" && norm != "sc" && norm != "lc" { - if let Some(canon) = canonical_prop(&norm)? { - return Ok(CanonicalClassQuery::Binary(canon)); - } - } - if let Some(canon) = canonical_gencat(&norm)? { - return Ok(CanonicalClassQuery::GeneralCategory(canon)); - } - if let Some(canon) = canonical_script(&norm)? { - return Ok(CanonicalClassQuery::Script(canon)); - } - Err(Error::PropertyNotFound) - } -} - -/// Like ClassQuery, but its parameters have been canonicalized. This also -/// differentiates binary properties from flattened general categories and -/// scripts. -#[derive(Debug, Eq, PartialEq)] -enum CanonicalClassQuery { - /// The canonical binary property name. - Binary(&'static str), - /// The canonical general category name. - GeneralCategory(&'static str), - /// The canonical script name. - Script(&'static str), - /// An arbitrary association between property and value, both of which - /// have been canonicalized. - /// - /// Note that by construction, the property name of ByValue will never - /// be General_Category or Script. Those two cases are subsumed by the - /// eponymous variants. - ByValue { - /// The canonical property name. - property_name: &'static str, - /// The canonical property value. - property_value: &'static str, - }, -} - -/// Looks up a Unicode class given a query. If one doesn't exist, then -/// `None` is returned. -pub fn class(query: ClassQuery<'_>) -> Result { - use self::CanonicalClassQuery::*; - - match query.canonicalize()? { - Binary(name) => bool_property(name), - GeneralCategory(name) => gencat(name), - Script(name) => script(name), - ByValue { property_name: "Age", property_value } => { - let mut class = hir::ClassUnicode::empty(); - for set in ages(property_value)? { - class.union(&hir_class(set)); - } - Ok(class) - } - ByValue { property_name: "Script_Extensions", property_value } => { - script_extension(property_value) - } - ByValue { - property_name: "Grapheme_Cluster_Break", - property_value, - } => gcb(property_value), - ByValue { property_name: "Sentence_Break", property_value } => { - sb(property_value) - } - ByValue { property_name: "Word_Break", property_value } => { - wb(property_value) - } - _ => { - // What else should we support? - Err(Error::PropertyNotFound) - } - } -} - -/// Returns a Unicode aware class for \w. -/// -/// This returns an error if the data is not available for \w. -pub fn perl_word() -> Result { - #[cfg(not(feature = "unicode-perl"))] - fn imp() -> Result { - Err(Error::PerlClassNotFound) - } - - #[cfg(feature = "unicode-perl")] - fn imp() -> Result { - use crate::unicode_tables::perl_word::PERL_WORD; - Ok(hir_class(PERL_WORD)) - } - - imp() -} - -/// Returns a Unicode aware class for \s. -/// -/// This returns an error if the data is not available for \s. -pub fn perl_space() -> Result { - #[cfg(not(any(feature = "unicode-perl", feature = "unicode-bool")))] - fn imp() -> Result { - Err(Error::PerlClassNotFound) - } - - #[cfg(all(feature = "unicode-perl", not(feature = "unicode-bool")))] - fn imp() -> Result { - use crate::unicode_tables::perl_space::WHITE_SPACE; - Ok(hir_class(WHITE_SPACE)) - } - - #[cfg(feature = "unicode-bool")] - fn imp() -> Result { - use crate::unicode_tables::property_bool::WHITE_SPACE; - Ok(hir_class(WHITE_SPACE)) - } - - imp() -} - -/// Returns a Unicode aware class for \d. -/// -/// This returns an error if the data is not available for \d. -pub fn perl_digit() -> Result { - #[cfg(not(any(feature = "unicode-perl", feature = "unicode-gencat")))] - fn imp() -> Result { - Err(Error::PerlClassNotFound) - } - - #[cfg(all(feature = "unicode-perl", not(feature = "unicode-gencat")))] - fn imp() -> Result { - use crate::unicode_tables::perl_decimal::DECIMAL_NUMBER; - Ok(hir_class(DECIMAL_NUMBER)) - } - - #[cfg(feature = "unicode-gencat")] - fn imp() -> Result { - use crate::unicode_tables::general_category::DECIMAL_NUMBER; - Ok(hir_class(DECIMAL_NUMBER)) - } - - imp() -} - -/// Build a Unicode HIR class from a sequence of Unicode scalar value ranges. -pub fn hir_class(ranges: &[(char, char)]) -> hir::ClassUnicode { - let hir_ranges: Vec = ranges - .iter() - .map(|&(s, e)| hir::ClassUnicodeRange::new(s, e)) - .collect(); - hir::ClassUnicode::new(hir_ranges) -} - -/// Returns true only if the given codepoint is in the `\w` character class. -/// -/// If the `unicode-perl` feature is not enabled, then this returns an error. -pub fn is_word_character(c: char) -> Result { - #[cfg(not(feature = "unicode-perl"))] - fn imp(_: char) -> Result { - Err(UnicodeWordError(())) - } - - #[cfg(feature = "unicode-perl")] - fn imp(c: char) -> Result { - use crate::{is_word_byte, unicode_tables::perl_word::PERL_WORD}; - - if u8::try_from(c).map_or(false, is_word_byte) { - return Ok(true); - } - Ok(PERL_WORD - .binary_search_by(|&(start, end)| { - use core::cmp::Ordering; - - if start <= c && c <= end { - Ordering::Equal - } else if start > c { - Ordering::Greater - } else { - Ordering::Less - } - }) - .is_ok()) - } - - imp(c) -} - -/// A mapping of property values for a specific property. -/// -/// The first element of each tuple is a normalized property value while the -/// second element of each tuple is the corresponding canonical property -/// value. -type PropertyValues = &'static [(&'static str, &'static str)]; - -fn canonical_gencat( - normalized_value: &str, -) -> Result, Error> { - Ok(match normalized_value { - "any" => Some("Any"), - "assigned" => Some("Assigned"), - "ascii" => Some("ASCII"), - _ => { - let gencats = property_values("General_Category")?.unwrap(); - canonical_value(gencats, normalized_value) - } - }) -} - -fn canonical_script( - normalized_value: &str, -) -> Result, Error> { - let scripts = property_values("Script")?.unwrap(); - Ok(canonical_value(scripts, normalized_value)) -} - -/// Find the canonical property name for the given normalized property name. -/// -/// If no such property exists, then `None` is returned. -/// -/// The normalized property name must have been normalized according to -/// UAX44 LM3, which can be done using `symbolic_name_normalize`. -/// -/// If the property names data is not available, then an error is returned. -fn canonical_prop( - normalized_name: &str, -) -> Result, Error> { - #[cfg(not(any( - feature = "unicode-age", - feature = "unicode-bool", - feature = "unicode-gencat", - feature = "unicode-perl", - feature = "unicode-script", - feature = "unicode-segment", - )))] - fn imp(_: &str) -> Result, Error> { - Err(Error::PropertyNotFound) - } - - #[cfg(any( - feature = "unicode-age", - feature = "unicode-bool", - feature = "unicode-gencat", - feature = "unicode-perl", - feature = "unicode-script", - feature = "unicode-segment", - ))] - fn imp(name: &str) -> Result, Error> { - use crate::unicode_tables::property_names::PROPERTY_NAMES; - - Ok(PROPERTY_NAMES - .binary_search_by_key(&name, |&(n, _)| n) - .ok() - .map(|i| PROPERTY_NAMES[i].1)) - } - - imp(normalized_name) -} - -/// Find the canonical property value for the given normalized property -/// value. -/// -/// The given property values should correspond to the values for the property -/// under question, which can be found using `property_values`. -/// -/// If no such property value exists, then `None` is returned. -/// -/// The normalized property value must have been normalized according to -/// UAX44 LM3, which can be done using `symbolic_name_normalize`. -fn canonical_value( - vals: PropertyValues, - normalized_value: &str, -) -> Option<&'static str> { - vals.binary_search_by_key(&normalized_value, |&(n, _)| n) - .ok() - .map(|i| vals[i].1) -} - -/// Return the table of property values for the given property name. -/// -/// If the property values data is not available, then an error is returned. -fn property_values( - canonical_property_name: &'static str, -) -> Result, Error> { - #[cfg(not(any( - feature = "unicode-age", - feature = "unicode-bool", - feature = "unicode-gencat", - feature = "unicode-perl", - feature = "unicode-script", - feature = "unicode-segment", - )))] - fn imp(_: &'static str) -> Result, Error> { - Err(Error::PropertyValueNotFound) - } - - #[cfg(any( - feature = "unicode-age", - feature = "unicode-bool", - feature = "unicode-gencat", - feature = "unicode-perl", - feature = "unicode-script", - feature = "unicode-segment", - ))] - fn imp(name: &'static str) -> Result, Error> { - use crate::unicode_tables::property_values::PROPERTY_VALUES; - - Ok(PROPERTY_VALUES - .binary_search_by_key(&name, |&(n, _)| n) - .ok() - .map(|i| PROPERTY_VALUES[i].1)) - } - - imp(canonical_property_name) -} - -// This is only used in some cases, but small enough to just let it be dead -// instead of figuring out (and maintaining) the right set of features. -#[allow(dead_code)] -fn property_set( - name_map: &'static [(&'static str, Range)], - canonical: &'static str, -) -> Option { - name_map - .binary_search_by_key(&canonical, |x| x.0) - .ok() - .map(|i| name_map[i].1) -} - -/// Returns an iterator over Unicode Age sets. Each item corresponds to a set -/// of codepoints that were added in a particular revision of Unicode. The -/// iterator yields items in chronological order. -/// -/// If the given age value isn't valid or if the data isn't available, then an -/// error is returned instead. -fn ages(canonical_age: &str) -> Result, Error> { - #[cfg(not(feature = "unicode-age"))] - fn imp(_: &str) -> Result, Error> { - use core::option::IntoIter; - Err::, _>(Error::PropertyNotFound) - } - - #[cfg(feature = "unicode-age")] - fn imp(canonical_age: &str) -> Result, Error> { - use crate::unicode_tables::age; - - const AGES: &[(&str, Range)] = &[ - ("V1_1", age::V1_1), - ("V2_0", age::V2_0), - ("V2_1", age::V2_1), - ("V3_0", age::V3_0), - ("V3_1", age::V3_1), - ("V3_2", age::V3_2), - ("V4_0", age::V4_0), - ("V4_1", age::V4_1), - ("V5_0", age::V5_0), - ("V5_1", age::V5_1), - ("V5_2", age::V5_2), - ("V6_0", age::V6_0), - ("V6_1", age::V6_1), - ("V6_2", age::V6_2), - ("V6_3", age::V6_3), - ("V7_0", age::V7_0), - ("V8_0", age::V8_0), - ("V9_0", age::V9_0), - ("V10_0", age::V10_0), - ("V11_0", age::V11_0), - ("V12_0", age::V12_0), - ("V12_1", age::V12_1), - ("V13_0", age::V13_0), - ("V14_0", age::V14_0), - ("V15_0", age::V15_0), - ("V15_1", age::V15_1), - ("V16_0", age::V16_0), - ]; - assert_eq!(AGES.len(), age::BY_NAME.len(), "ages are out of sync"); - - let pos = AGES.iter().position(|&(age, _)| canonical_age == age); - match pos { - None => Err(Error::PropertyValueNotFound), - Some(i) => Ok(AGES[..=i].iter().map(|&(_, classes)| classes)), - } - } - - imp(canonical_age) -} - -/// Returns the Unicode HIR class corresponding to the given general category. -/// -/// Name canonicalization is assumed to be performed by the caller. -/// -/// If the given general category could not be found, or if the general -/// category data is not available, then an error is returned. -fn gencat(canonical_name: &'static str) -> Result { - #[cfg(not(feature = "unicode-gencat"))] - fn imp(_: &'static str) -> Result { - Err(Error::PropertyNotFound) - } - - #[cfg(feature = "unicode-gencat")] - fn imp(name: &'static str) -> Result { - use crate::unicode_tables::general_category::BY_NAME; - match name { - "ASCII" => Ok(hir_class(&[('\0', '\x7F')])), - "Any" => Ok(hir_class(&[('\0', '\u{10FFFF}')])), - "Assigned" => { - let mut cls = gencat("Unassigned")?; - cls.negate(); - Ok(cls) - } - name => property_set(BY_NAME, name) - .map(hir_class) - .ok_or(Error::PropertyValueNotFound), - } - } - - match canonical_name { - "Decimal_Number" => perl_digit(), - name => imp(name), - } -} - -/// Returns the Unicode HIR class corresponding to the given script. -/// -/// Name canonicalization is assumed to be performed by the caller. -/// -/// If the given script could not be found, or if the script data is not -/// available, then an error is returned. -fn script(canonical_name: &'static str) -> Result { - #[cfg(not(feature = "unicode-script"))] - fn imp(_: &'static str) -> Result { - Err(Error::PropertyNotFound) - } - - #[cfg(feature = "unicode-script")] - fn imp(name: &'static str) -> Result { - use crate::unicode_tables::script::BY_NAME; - property_set(BY_NAME, name) - .map(hir_class) - .ok_or(Error::PropertyValueNotFound) - } - - imp(canonical_name) -} - -/// Returns the Unicode HIR class corresponding to the given script extension. -/// -/// Name canonicalization is assumed to be performed by the caller. -/// -/// If the given script extension could not be found, or if the script data is -/// not available, then an error is returned. -fn script_extension( - canonical_name: &'static str, -) -> Result { - #[cfg(not(feature = "unicode-script"))] - fn imp(_: &'static str) -> Result { - Err(Error::PropertyNotFound) - } - - #[cfg(feature = "unicode-script")] - fn imp(name: &'static str) -> Result { - use crate::unicode_tables::script_extension::BY_NAME; - property_set(BY_NAME, name) - .map(hir_class) - .ok_or(Error::PropertyValueNotFound) - } - - imp(canonical_name) -} - -/// Returns the Unicode HIR class corresponding to the given Unicode boolean -/// property. -/// -/// Name canonicalization is assumed to be performed by the caller. -/// -/// If the given boolean property could not be found, or if the boolean -/// property data is not available, then an error is returned. -fn bool_property( - canonical_name: &'static str, -) -> Result { - #[cfg(not(feature = "unicode-bool"))] - fn imp(_: &'static str) -> Result { - Err(Error::PropertyNotFound) - } - - #[cfg(feature = "unicode-bool")] - fn imp(name: &'static str) -> Result { - use crate::unicode_tables::property_bool::BY_NAME; - property_set(BY_NAME, name) - .map(hir_class) - .ok_or(Error::PropertyNotFound) - } - - match canonical_name { - "Decimal_Number" => perl_digit(), - "White_Space" => perl_space(), - name => imp(name), - } -} - -/// Returns the Unicode HIR class corresponding to the given grapheme cluster -/// break property. -/// -/// Name canonicalization is assumed to be performed by the caller. -/// -/// If the given property could not be found, or if the corresponding data is -/// not available, then an error is returned. -fn gcb(canonical_name: &'static str) -> Result { - #[cfg(not(feature = "unicode-segment"))] - fn imp(_: &'static str) -> Result { - Err(Error::PropertyNotFound) - } - - #[cfg(feature = "unicode-segment")] - fn imp(name: &'static str) -> Result { - use crate::unicode_tables::grapheme_cluster_break::BY_NAME; - property_set(BY_NAME, name) - .map(hir_class) - .ok_or(Error::PropertyValueNotFound) - } - - imp(canonical_name) -} - -/// Returns the Unicode HIR class corresponding to the given word break -/// property. -/// -/// Name canonicalization is assumed to be performed by the caller. -/// -/// If the given property could not be found, or if the corresponding data is -/// not available, then an error is returned. -fn wb(canonical_name: &'static str) -> Result { - #[cfg(not(feature = "unicode-segment"))] - fn imp(_: &'static str) -> Result { - Err(Error::PropertyNotFound) - } - - #[cfg(feature = "unicode-segment")] - fn imp(name: &'static str) -> Result { - use crate::unicode_tables::word_break::BY_NAME; - property_set(BY_NAME, name) - .map(hir_class) - .ok_or(Error::PropertyValueNotFound) - } - - imp(canonical_name) -} - -/// Returns the Unicode HIR class corresponding to the given sentence -/// break property. -/// -/// Name canonicalization is assumed to be performed by the caller. -/// -/// If the given property could not be found, or if the corresponding data is -/// not available, then an error is returned. -fn sb(canonical_name: &'static str) -> Result { - #[cfg(not(feature = "unicode-segment"))] - fn imp(_: &'static str) -> Result { - Err(Error::PropertyNotFound) - } - - #[cfg(feature = "unicode-segment")] - fn imp(name: &'static str) -> Result { - use crate::unicode_tables::sentence_break::BY_NAME; - property_set(BY_NAME, name) - .map(hir_class) - .ok_or(Error::PropertyValueNotFound) - } - - imp(canonical_name) -} - -/// Like symbolic_name_normalize_bytes, but operates on a string. -fn symbolic_name_normalize(x: &str) -> String { - let mut tmp = x.as_bytes().to_vec(); - let len = symbolic_name_normalize_bytes(&mut tmp).len(); - tmp.truncate(len); - // This should always succeed because `symbolic_name_normalize_bytes` - // guarantees that `&tmp[..len]` is always valid UTF-8. - // - // N.B. We could avoid the additional UTF-8 check here, but it's unlikely - // to be worth skipping the additional safety check. A benchmark must - // justify it first. - String::from_utf8(tmp).unwrap() -} - -/// Normalize the given symbolic name in place according to UAX44-LM3. -/// -/// A "symbolic name" typically corresponds to property names and property -/// value aliases. Note, though, that it should not be applied to property -/// string values. -/// -/// The slice returned is guaranteed to be valid UTF-8 for all possible values -/// of `slice`. -/// -/// See: https://unicode.org/reports/tr44/#UAX44-LM3 -fn symbolic_name_normalize_bytes(slice: &mut [u8]) -> &mut [u8] { - // I couldn't find a place in the standard that specified that property - // names/aliases had a particular structure (unlike character names), but - // we assume that it's ASCII only and drop anything that isn't ASCII. - let mut start = 0; - let mut starts_with_is = false; - if slice.len() >= 2 { - // Ignore any "is" prefix. - starts_with_is = slice[0..2] == b"is"[..] - || slice[0..2] == b"IS"[..] - || slice[0..2] == b"iS"[..] - || slice[0..2] == b"Is"[..]; - if starts_with_is { - start = 2; - } - } - let mut next_write = 0; - for i in start..slice.len() { - // VALIDITY ARGUMENT: To guarantee that the resulting slice is valid - // UTF-8, we ensure that the slice contains only ASCII bytes. In - // particular, we drop every non-ASCII byte from the normalized string. - let b = slice[i]; - if b == b' ' || b == b'_' || b == b'-' { - continue; - } else if b'A' <= b && b <= b'Z' { - slice[next_write] = b + (b'a' - b'A'); - next_write += 1; - } else if b <= 0x7F { - slice[next_write] = b; - next_write += 1; - } - } - // Special case: ISO_Comment has a 'isc' abbreviation. Since we generally - // ignore 'is' prefixes, the 'isc' abbreviation gets caught in the cross - // fire and ends up creating an alias for 'c' to 'ISO_Comment', but it - // is actually an alias for the 'Other' general category. - if starts_with_is && next_write == 1 && slice[0] == b'c' { - slice[0] = b'i'; - slice[1] = b's'; - slice[2] = b'c'; - next_write = 3; - } - &mut slice[..next_write] -} - -#[cfg(test)] -mod tests { - use super::*; - - #[cfg(feature = "unicode-case")] - fn simple_fold_ok(c: char) -> impl Iterator { - SimpleCaseFolder::new().unwrap().mapping(c).iter().copied() - } - - #[cfg(feature = "unicode-case")] - fn contains_case_map(start: char, end: char) -> bool { - SimpleCaseFolder::new().unwrap().overlaps(start, end) - } - - #[test] - #[cfg(feature = "unicode-case")] - fn simple_fold_k() { - let xs: Vec = simple_fold_ok('k').collect(); - assert_eq!(xs, alloc::vec!['K', 'K']); - - let xs: Vec = simple_fold_ok('K').collect(); - assert_eq!(xs, alloc::vec!['k', 'K']); - - let xs: Vec = simple_fold_ok('K').collect(); - assert_eq!(xs, alloc::vec!['K', 'k']); - } - - #[test] - #[cfg(feature = "unicode-case")] - fn simple_fold_a() { - let xs: Vec = simple_fold_ok('a').collect(); - assert_eq!(xs, alloc::vec!['A']); - - let xs: Vec = simple_fold_ok('A').collect(); - assert_eq!(xs, alloc::vec!['a']); - } - - #[test] - #[cfg(not(feature = "unicode-case"))] - fn simple_fold_disabled() { - assert!(SimpleCaseFolder::new().is_err()); - } - - #[test] - #[cfg(feature = "unicode-case")] - fn range_contains() { - assert!(contains_case_map('A', 'A')); - assert!(contains_case_map('Z', 'Z')); - assert!(contains_case_map('A', 'Z')); - assert!(contains_case_map('@', 'A')); - assert!(contains_case_map('Z', '[')); - assert!(contains_case_map('☃', 'Ⰰ')); - - assert!(!contains_case_map('[', '[')); - assert!(!contains_case_map('[', '`')); - - assert!(!contains_case_map('☃', '☃')); - } - - #[test] - #[cfg(feature = "unicode-gencat")] - fn regression_466() { - use super::{CanonicalClassQuery, ClassQuery}; - - let q = ClassQuery::OneLetter('C'); - assert_eq!( - q.canonicalize().unwrap(), - CanonicalClassQuery::GeneralCategory("Other") - ); - } - - #[test] - fn sym_normalize() { - let sym_norm = symbolic_name_normalize; - - assert_eq!(sym_norm("Line_Break"), "linebreak"); - assert_eq!(sym_norm("Line-break"), "linebreak"); - assert_eq!(sym_norm("linebreak"), "linebreak"); - assert_eq!(sym_norm("BA"), "ba"); - assert_eq!(sym_norm("ba"), "ba"); - assert_eq!(sym_norm("Greek"), "greek"); - assert_eq!(sym_norm("isGreek"), "greek"); - assert_eq!(sym_norm("IS_Greek"), "greek"); - assert_eq!(sym_norm("isc"), "isc"); - assert_eq!(sym_norm("is c"), "isc"); - assert_eq!(sym_norm("is_c"), "isc"); - } - - #[test] - fn valid_utf8_symbolic() { - let mut x = b"abc\xFFxyz".to_vec(); - let y = symbolic_name_normalize_bytes(&mut x); - assert_eq!(y, b"abcxyz"); - } -} diff --git a/vendor/regex-syntax/src/unicode_tables/LICENSE-UNICODE b/vendor/regex-syntax/src/unicode_tables/LICENSE-UNICODE deleted file mode 100644 index b82826bdbdd2c3..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/LICENSE-UNICODE +++ /dev/null @@ -1,57 +0,0 @@ -UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE - -Unicode Data Files include all data files under the directories -http://www.unicode.org/Public/, http://www.unicode.org/reports/, -http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and -http://www.unicode.org/utility/trac/browser/. - -Unicode Data Files do not include PDF online code charts under the -directory http://www.unicode.org/Public/. - -Software includes any source code published in the Unicode Standard -or under the directories -http://www.unicode.org/Public/, http://www.unicode.org/reports/, -http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and -http://www.unicode.org/utility/trac/browser/. - -NOTICE TO USER: Carefully read the following legal agreement. -BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S -DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), -YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE -TERMS AND CONDITIONS OF THIS AGREEMENT. -IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE -THE DATA FILES OR SOFTWARE. - -COPYRIGHT AND PERMISSION NOTICE - -Copyright © 1991-2018 Unicode, Inc. All rights reserved. -Distributed under the Terms of Use in http://www.unicode.org/copyright.html. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of the Unicode data files and any associated documentation -(the "Data Files") or Unicode software and any associated documentation -(the "Software") to deal in the Data Files or Software -without restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, and/or sell copies of -the Data Files or Software, and to permit persons to whom the Data Files -or Software are furnished to do so, provided that either -(a) this copyright and permission notice appear with all copies -of the Data Files or Software, or -(b) this copyright and permission notice appear in associated -Documentation. - -THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT OF THIRD PARTY RIGHTS. -IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS -NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL -DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, -DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THE DATA FILES OR SOFTWARE. - -Except as contained in this notice, the name of a copyright holder -shall not be used in advertising or otherwise to promote the sale, -use or other dealings in these Data Files or Software without prior -written authorization of the copyright holder. diff --git a/vendor/regex-syntax/src/unicode_tables/age.rs b/vendor/regex-syntax/src/unicode_tables/age.rs deleted file mode 100644 index 466510c9e6131e..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/age.rs +++ /dev/null @@ -1,1846 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate age ucd-16.0.0 --chars -// -// Unicode version: 16.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ - ("V10_0", V10_0), - ("V11_0", V11_0), - ("V12_0", V12_0), - ("V12_1", V12_1), - ("V13_0", V13_0), - ("V14_0", V14_0), - ("V15_0", V15_0), - ("V15_1", V15_1), - ("V16_0", V16_0), - ("V1_1", V1_1), - ("V2_0", V2_0), - ("V2_1", V2_1), - ("V3_0", V3_0), - ("V3_1", V3_1), - ("V3_2", V3_2), - ("V4_0", V4_0), - ("V4_1", V4_1), - ("V5_0", V5_0), - ("V5_1", V5_1), - ("V5_2", V5_2), - ("V6_0", V6_0), - ("V6_1", V6_1), - ("V6_2", V6_2), - ("V6_3", V6_3), - ("V7_0", V7_0), - ("V8_0", V8_0), - ("V9_0", V9_0), -]; - -pub const V10_0: &'static [(char, char)] = &[ - ('ࡠ', 'ࡪ'), - ('ৼ', '৽'), - ('\u{afa}', '\u{aff}'), - ('\u{d00}', '\u{d00}'), - ('\u{d3b}', '\u{d3c}'), - ('᳷', '᳷'), - ('\u{1df6}', '\u{1df9}'), - ('₿', '₿'), - ('⏿', '⏿'), - ('⯒', '⯒'), - ('⹅', '⹉'), - ('ㄮ', 'ㄮ'), - ('鿖', '鿪'), - ('𐌭', '𐌯'), - ('𑨀', '\u{11a47}'), - ('𑩐', '𑪃'), - ('𑪆', '𑪜'), - ('𑪞', '𑪢'), - ('𑴀', '𑴆'), - ('𑴈', '𑴉'), - ('𑴋', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d47}'), - ('𑵐', '𑵙'), - ('𖿡', '𖿡'), - ('𛀂', '𛄞'), - ('𛅰', '𛋻'), - ('🉠', '🉥'), - ('🛓', '🛔'), - ('🛷', '🛸'), - ('🤀', '🤋'), - ('🤟', '🤟'), - ('🤨', '🤯'), - ('🤱', '🤲'), - ('🥌', '🥌'), - ('🥟', '🥫'), - ('🦒', '🦗'), - ('🧐', '🧦'), - ('𬺰', '𮯠'), -]; - -pub const V11_0: &'static [(char, char)] = &[ - ('ՠ', 'ՠ'), - ('ֈ', 'ֈ'), - ('ׯ', 'ׯ'), - ('\u{7fd}', '߿'), - ('\u{8d3}', '\u{8d3}'), - ('\u{9fe}', '\u{9fe}'), - ('੶', '੶'), - ('\u{c04}', '\u{c04}'), - ('಄', '಄'), - ('ᡸ', 'ᡸ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('⮺', '⮼'), - ('⯓', '⯫'), - ('⯰', '⯾'), - ('⹊', '⹎'), - ('ㄯ', 'ㄯ'), - ('鿫', '鿯'), - ('ꞯ', 'ꞯ'), - ('Ꞹ', 'ꞹ'), - ('ꣾ', '\u{a8ff}'), - ('𐨴', '𐨵'), - ('𐩈', '𐩈'), - ('𐴀', '\u{10d27}'), - ('𐴰', '𐴹'), - ('𐼀', '𐼧'), - ('𐼰', '𐽙'), - ('\u{110cd}', '\u{110cd}'), - ('𑅄', '𑅆'), - ('\u{1133b}', '\u{1133b}'), - ('\u{1145e}', '\u{1145e}'), - ('𑜚', '𑜚'), - ('𑠀', '𑠻'), - ('𑪝', '𑪝'), - ('𑵠', '𑵥'), - ('𑵧', '𑵨'), - ('𑵪', '𑶎'), - ('\u{11d90}', '\u{11d91}'), - ('𑶓', '𑶘'), - ('𑶠', '𑶩'), - ('𑻠', '𑻸'), - ('𖹀', '𖺚'), - ('𘟭', '𘟱'), - ('𝋠', '𝋳'), - ('𝍲', '𝍸'), - ('𞱱', '𞲴'), - ('🄯', '🄯'), - ('🛹', '🛹'), - ('🟕', '🟘'), - ('🥍', '🥏'), - ('🥬', '🥰'), - ('🥳', '🥶'), - ('🥺', '🥺'), - ('🥼', '🥿'), - ('🦘', '🦢'), - ('🦰', '🦹'), - ('🧁', '🧂'), - ('🧧', '🧿'), - ('🩠', '🩭'), -]; - -pub const V12_0: &'static [(char, char)] = &[ - ('౷', '౷'), - ('ຆ', 'ຆ'), - ('ຉ', 'ຉ'), - ('ຌ', 'ຌ'), - ('ຎ', 'ຓ'), - ('ຘ', 'ຘ'), - ('ຠ', 'ຠ'), - ('ຨ', 'ຩ'), - ('ຬ', 'ຬ'), - ('\u{eba}', '\u{eba}'), - ('ᳺ', 'ᳺ'), - ('⯉', '⯉'), - ('⯿', '⯿'), - ('⹏', '⹏'), - ('Ꞻ', 'ꞿ'), - ('Ꟃ', 'Ᶎ'), - ('ꭦ', 'ꭧ'), - ('𐿠', '𐿶'), - ('𑑟', '𑑟'), - ('𑚸', '𑚸'), - ('𑦠', '𑦧'), - ('𑦪', '\u{119d7}'), - ('\u{119da}', '𑧤'), - ('𑪄', '𑪅'), - ('𑿀', '𑿱'), - ('𑿿', '𑿿'), - ('\u{13430}', '\u{13438}'), - ('𖽅', '𖽊'), - ('\u{16f4f}', '\u{16f4f}'), - ('𖽿', '𖾇'), - ('𖿢', '𖿣'), - ('𘟲', '𘟷'), - ('𛅐', '𛅒'), - ('𛅤', '𛅧'), - ('𞄀', '𞄬'), - ('\u{1e130}', '𞄽'), - ('𞅀', '𞅉'), - ('𞅎', '𞅏'), - ('𞋀', '𞋹'), - ('𞋿', '𞋿'), - ('𞥋', '𞥋'), - ('𞴁', '𞴽'), - ('🅬', '🅬'), - ('🛕', '🛕'), - ('🛺', '🛺'), - ('🟠', '🟫'), - ('🤍', '🤏'), - ('🤿', '🤿'), - ('🥱', '🥱'), - ('🥻', '🥻'), - ('🦥', '🦪'), - ('🦮', '🦯'), - ('🦺', '🦿'), - ('🧃', '🧊'), - ('🧍', '🧏'), - ('🨀', '🩓'), - ('🩰', '🩳'), - ('🩸', '🩺'), - ('🪀', '🪂'), - ('🪐', '🪕'), -]; - -pub const V12_1: &'static [(char, char)] = &[('㋿', '㋿')]; - -pub const V13_0: &'static [(char, char)] = &[ - ('ࢾ', 'ࣇ'), - ('\u{b55}', '\u{b55}'), - ('ഄ', 'ഄ'), - ('\u{d81}', '\u{d81}'), - ('\u{1abf}', '\u{1ac0}'), - ('⮗', '⮗'), - ('⹐', '⹒'), - ('ㆻ', 'ㆿ'), - ('䶶', '䶿'), - ('鿰', '鿼'), - ('Ꟈ', 'ꟊ'), - ('Ꟶ', 'ꟶ'), - ('\u{a82c}', '\u{a82c}'), - ('ꭨ', '꭫'), - ('𐆜', '𐆜'), - ('𐺀', '𐺩'), - ('\u{10eab}', '𐺭'), - ('𐺰', '𐺱'), - ('𐾰', '𐿋'), - ('𑅇', '𑅇'), - ('𑇎', '\u{111cf}'), - ('𑑚', '𑑚'), - ('𑑠', '𑑡'), - ('𑤀', '𑤆'), - ('𑤉', '𑤉'), - ('𑤌', '𑤓'), - ('𑤕', '𑤖'), - ('𑤘', '𑤵'), - ('𑤷', '𑤸'), - ('\u{1193b}', '𑥆'), - ('𑥐', '𑥙'), - ('𑾰', '𑾰'), - ('\u{16fe4}', '\u{16fe4}'), - ('\u{16ff0}', '\u{16ff1}'), - ('𘫳', '𘳕'), - ('𘴀', '𘴈'), - ('🄍', '🄏'), - ('🅭', '🅯'), - ('🆭', '🆭'), - ('🛖', '🛗'), - ('🛻', '🛼'), - ('🢰', '🢱'), - ('🤌', '🤌'), - ('🥲', '🥲'), - ('🥷', '🥸'), - ('🦣', '🦤'), - ('🦫', '🦭'), - ('🧋', '🧋'), - ('🩴', '🩴'), - ('🪃', '🪆'), - ('🪖', '🪨'), - ('🪰', '🪶'), - ('🫀', '🫂'), - ('🫐', '🫖'), - ('🬀', '🮒'), - ('🮔', '🯊'), - ('🯰', '🯹'), - ('𪛗', '𪛝'), - ('𰀀', '𱍊'), -]; - -pub const V14_0: &'static [(char, char)] = &[ - ('؝', '؝'), - ('ࡰ', 'ࢎ'), - ('\u{890}', '\u{891}'), - ('\u{898}', '\u{89f}'), - ('ࢵ', 'ࢵ'), - ('ࣈ', '\u{8d2}'), - ('\u{c3c}', '\u{c3c}'), - ('ౝ', 'ౝ'), - ('ೝ', 'ೝ'), - ('ᜍ', 'ᜍ'), - ('\u{1715}', '\u{1715}'), - ('ᜟ', 'ᜟ'), - ('\u{180f}', '\u{180f}'), - ('\u{1ac1}', '\u{1ace}'), - ('ᭌ', 'ᭌ'), - ('᭽', '᭾'), - ('\u{1dfa}', '\u{1dfa}'), - ('⃀', '⃀'), - ('Ⱟ', 'Ⱟ'), - ('ⱟ', 'ⱟ'), - ('⹓', '⹝'), - ('鿽', '鿿'), - ('Ꟁ', 'ꟁ'), - ('Ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'ꟙ'), - ('ꟲ', 'ꟴ'), - ('﯂', '﯂'), - ('﵀', '﵏'), - ('﷏', '﷏'), - ('﷾', '﷿'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𐽰', '𐾉'), - ('\u{11070}', '𑁵'), - ('\u{110c2}', '\u{110c2}'), - ('𑚹', '𑚹'), - ('𑝀', '𑝆'), - ('𑪰', '𑪿'), - ('𒾐', '𒿲'), - ('𖩰', '𖪾'), - ('𖫀', '𖫉'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('𛄟', '𛄢'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('𜽐', '𜿃'), - ('𝇩', '𝇪'), - ('𝼀', '𝼞'), - ('𞊐', '\u{1e2ae}'), - ('𞟠', '𞟦'), - ('𞟨', '𞟫'), - ('𞟭', '𞟮'), - ('𞟰', '𞟾'), - ('🛝', '🛟'), - ('🟰', '🟰'), - ('🥹', '🥹'), - ('🧌', '🧌'), - ('🩻', '🩼'), - ('🪩', '🪬'), - ('🪷', '🪺'), - ('🫃', '🫅'), - ('🫗', '🫙'), - ('🫠', '🫧'), - ('🫰', '🫶'), - ('𪛞', '𪛟'), - ('𫜵', '𫜸'), -]; - -pub const V15_0: &'static [(char, char)] = &[ - ('ೳ', 'ೳ'), - ('\u{ece}', '\u{ece}'), - ('\u{10efd}', '\u{10eff}'), - ('𑈿', '\u{11241}'), - ('𑬀', '𑬉'), - ('\u{11f00}', '𑼐'), - ('𑼒', '\u{11f3a}'), - ('𑼾', '𑽙'), - ('𓐯', '𓐯'), - ('\u{13439}', '\u{13455}'), - ('𛄲', '𛄲'), - ('𛅕', '𛅕'), - ('𝋀', '𝋓'), - ('𝼥', '𝼪'), - ('𞀰', '𞁭'), - ('\u{1e08f}', '\u{1e08f}'), - ('𞓐', '𞓹'), - ('🛜', '🛜'), - ('🝴', '🝶'), - ('🝻', '🝿'), - ('🟙', '🟙'), - ('🩵', '🩷'), - ('🪇', '🪈'), - ('🪭', '🪯'), - ('🪻', '🪽'), - ('🪿', '🪿'), - ('🫎', '🫏'), - ('🫚', '🫛'), - ('🫨', '🫨'), - ('🫷', '🫸'), - ('𫜹', '𫜹'), - ('𱍐', '𲎯'), -]; - -pub const V15_1: &'static [(char, char)] = - &[('⿼', '⿿'), ('㇯', '㇯'), ('𮯰', '𮹝')]; - -pub const V16_0: &'static [(char, char)] = &[ - ('\u{897}', '\u{897}'), - ('᭎', '᭏'), - ('᭿', '᭿'), - ('Ᲊ', 'ᲊ'), - ('␧', '␩'), - ('㇤', '㇥'), - ('Ɤ', 'ꟍ'), - ('Ꟛ', 'Ƛ'), - ('𐗀', '𐗳'), - ('𐵀', '𐵥'), - ('\u{10d69}', '𐶅'), - ('𐶎', '𐶏'), - ('𐻂', '𐻄'), - ('\u{10efc}', '\u{10efc}'), - ('𑎀', '𑎉'), - ('𑎋', '𑎋'), - ('𑎎', '𑎎'), - ('𑎐', '𑎵'), - ('𑎷', '\u{113c0}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '𑏊'), - ('𑏌', '𑏕'), - ('𑏗', '𑏘'), - ('\u{113e1}', '\u{113e2}'), - ('𑛐', '𑛣'), - ('𑯀', '𑯡'), - ('𑯰', '𑯹'), - ('\u{11f5a}', '\u{11f5a}'), - ('𓑠', '𔏺'), - ('𖄀', '𖄹'), - ('𖵀', '𖵹'), - ('𘳿', '𘳿'), - ('𜰀', '𜳹'), - ('𜴀', '𜺳'), - ('𞗐', '𞗺'), - ('𞗿', '𞗿'), - ('🢲', '🢻'), - ('🣀', '🣁'), - ('🪉', '🪉'), - ('🪏', '🪏'), - ('🪾', '🪾'), - ('🫆', '🫆'), - ('🫜', '🫜'), - ('🫟', '🫟'), - ('🫩', '🫩'), - ('🯋', '🯯'), -]; - -pub const V1_1: &'static [(char, char)] = &[ - ('\0', 'ǵ'), - ('Ǻ', 'ȗ'), - ('ɐ', 'ʨ'), - ('ʰ', '˞'), - ('ˠ', '˩'), - ('\u{300}', '\u{345}'), - ('\u{360}', '\u{361}'), - ('ʹ', '͵'), - ('ͺ', 'ͺ'), - (';', ';'), - ('΄', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', 'ώ'), - ('ϐ', 'ϖ'), - ('Ϛ', 'Ϛ'), - ('Ϝ', 'Ϝ'), - ('Ϟ', 'Ϟ'), - ('Ϡ', 'Ϡ'), - ('Ϣ', 'ϳ'), - ('Ё', 'Ќ'), - ('Ў', 'я'), - ('ё', 'ќ'), - ('ў', '\u{486}'), - ('Ґ', 'ӄ'), - ('Ӈ', 'ӈ'), - ('Ӌ', 'ӌ'), - ('Ӑ', 'ӫ'), - ('Ӯ', 'ӵ'), - ('Ӹ', 'ӹ'), - ('Ա', 'Ֆ'), - ('ՙ', '՟'), - ('ա', 'և'), - ('։', '։'), - ('\u{5b0}', '\u{5b9}'), - ('\u{5bb}', '׃'), - ('א', 'ת'), - ('װ', '״'), - ('،', '،'), - ('؛', '؛'), - ('؟', '؟'), - ('ء', 'غ'), - ('ـ', '\u{652}'), - ('٠', '٭'), - ('\u{670}', 'ڷ'), - ('ں', 'ھ'), - ('ۀ', 'ێ'), - ('ې', '\u{6ed}'), - ('۰', '۹'), - ('\u{901}', 'ः'), - ('अ', 'ह'), - ('\u{93c}', '\u{94d}'), - ('ॐ', '\u{954}'), - ('क़', '॰'), - ('\u{981}', 'ঃ'), - ('অ', 'ঌ'), - ('এ', 'ঐ'), - ('ও', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('\u{9bc}', '\u{9bc}'), - ('\u{9be}', '\u{9c4}'), - ('ে', 'ৈ'), - ('ো', '\u{9cd}'), - ('\u{9d7}', '\u{9d7}'), - ('ড়', 'ঢ়'), - ('য়', '\u{9e3}'), - ('০', '৺'), - ('\u{a02}', '\u{a02}'), - ('ਅ', 'ਊ'), - ('ਏ', 'ਐ'), - ('ਓ', 'ਨ'), - ('ਪ', 'ਰ'), - ('ਲ', 'ਲ਼'), - ('ਵ', 'ਸ਼'), - ('ਸ', 'ਹ'), - ('\u{a3c}', '\u{a3c}'), - ('ਾ', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4d}'), - ('ਖ਼', 'ੜ'), - ('ਫ਼', 'ਫ਼'), - ('੦', 'ੴ'), - ('\u{a81}', 'ઃ'), - ('અ', 'ઋ'), - ('ઍ', 'ઍ'), - ('એ', 'ઑ'), - ('ઓ', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('\u{abc}', '\u{ac5}'), - ('\u{ac7}', 'ૉ'), - ('ો', '\u{acd}'), - ('ૐ', 'ૐ'), - ('ૠ', 'ૠ'), - ('૦', '૯'), - ('\u{b01}', 'ଃ'), - ('ଅ', 'ଌ'), - ('ଏ', 'ଐ'), - ('ଓ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଶ', 'ହ'), - ('\u{b3c}', '\u{b43}'), - ('େ', 'ୈ'), - ('ୋ', '\u{b4d}'), - ('\u{b56}', '\u{b57}'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', 'ୡ'), - ('୦', '୰'), - ('\u{b82}', 'ஃ'), - ('அ', 'ஊ'), - ('எ', 'ஐ'), - ('ஒ', 'க'), - ('ங', 'ச'), - ('ஜ', 'ஜ'), - ('ஞ', 'ட'), - ('ண', 'த'), - ('ந', 'ப'), - ('ம', 'வ'), - ('ஷ', 'ஹ'), - ('\u{bbe}', 'ூ'), - ('ெ', 'ை'), - ('ொ', '\u{bcd}'), - ('\u{bd7}', '\u{bd7}'), - ('௧', '௲'), - ('ఁ', 'ః'), - ('అ', 'ఌ'), - ('ఎ', 'ఐ'), - ('ఒ', 'న'), - ('ప', 'ళ'), - ('వ', 'హ'), - ('\u{c3e}', 'ౄ'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4d}'), - ('\u{c55}', '\u{c56}'), - ('ౠ', 'ౡ'), - ('౦', '౯'), - ('ಂ', 'ಃ'), - ('ಅ', 'ಌ'), - ('ಎ', 'ಐ'), - ('ಒ', 'ನ'), - ('ಪ', 'ಳ'), - ('ವ', 'ಹ'), - ('ಾ', 'ೄ'), - ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccd}'), - ('\u{cd5}', '\u{cd6}'), - ('ೞ', 'ೞ'), - ('ೠ', 'ೡ'), - ('೦', '೯'), - ('ം', 'ഃ'), - ('അ', 'ഌ'), - ('എ', 'ഐ'), - ('ഒ', 'ന'), - ('പ', 'ഹ'), - ('\u{d3e}', '\u{d43}'), - ('െ', 'ൈ'), - ('ൊ', '\u{d4d}'), - ('\u{d57}', '\u{d57}'), - ('ൠ', 'ൡ'), - ('൦', '൯'), - ('ก', '\u{e3a}'), - ('฿', '๛'), - ('ກ', 'ຂ'), - ('ຄ', 'ຄ'), - ('ງ', 'ຈ'), - ('ຊ', 'ຊ'), - ('ຍ', 'ຍ'), - ('ດ', 'ທ'), - ('ນ', 'ຟ'), - ('ມ', 'ຣ'), - ('ລ', 'ລ'), - ('ວ', 'ວ'), - ('ສ', 'ຫ'), - ('ອ', '\u{eb9}'), - ('\u{ebb}', 'ຽ'), - ('ເ', 'ໄ'), - ('ໆ', 'ໆ'), - ('\u{ec8}', '\u{ecd}'), - ('໐', '໙'), - ('ໜ', 'ໝ'), - ('Ⴀ', 'Ⴥ'), - ('ა', 'ჶ'), - ('჻', '჻'), - ('ᄀ', 'ᅙ'), - ('ᅟ', 'ᆢ'), - ('ᆨ', 'ᇹ'), - ('Ḁ', 'ẚ'), - ('Ạ', 'ỹ'), - ('ἀ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ῄ'), - ('ῆ', 'ΐ'), - ('ῖ', 'Ί'), - ('῝', '`'), - ('ῲ', 'ῴ'), - ('ῶ', '῾'), - ('\u{2000}', '\u{202e}'), - ('‰', '⁆'), - ('\u{206a}', '⁰'), - ('⁴', '₎'), - ('₠', '₪'), - ('\u{20d0}', '\u{20e1}'), - ('℀', 'ℸ'), - ('⅓', 'ↂ'), - ('←', '⇪'), - ('∀', '⋱'), - ('⌀', '⌀'), - ('⌂', '⍺'), - ('␀', '␤'), - ('⑀', '⑊'), - ('①', '⓪'), - ('─', '▕'), - ('■', '◯'), - ('☀', '☓'), - ('☚', '♯'), - ('✁', '✄'), - ('✆', '✉'), - ('✌', '✧'), - ('✩', '❋'), - ('❍', '❍'), - ('❏', '❒'), - ('❖', '❖'), - ('❘', '❞'), - ('❡', '❧'), - ('❶', '➔'), - ('➘', '➯'), - ('➱', '➾'), - ('\u{3000}', '〷'), - ('〿', '〿'), - ('ぁ', 'ゔ'), - ('\u{3099}', 'ゞ'), - ('ァ', 'ヾ'), - ('ㄅ', 'ㄬ'), - ('ㄱ', 'ㆎ'), - ('㆐', '㆟'), - ('㈀', '㈜'), - ('㈠', '㉃'), - ('㉠', '㉻'), - ('㉿', '㊰'), - ('㋀', '㋋'), - ('㋐', '㋾'), - ('㌀', '㍶'), - ('㍻', '㏝'), - ('㏠', '㏾'), - ('一', '龥'), - ('\u{e000}', '鶴'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('\u{fb1e}', 'זּ'), - ('טּ', 'לּ'), - ('מּ', 'מּ'), - ('נּ', 'סּ'), - ('ףּ', 'פּ'), - ('צּ', 'ﮱ'), - ('ﯓ', '﴿'), - ('ﵐ', 'ﶏ'), - ('ﶒ', 'ﷇ'), - ('ﷰ', 'ﷻ'), - ('\u{fe20}', '\u{fe23}'), - ('︰', '﹄'), - ('﹉', '﹒'), - ('﹔', '﹦'), - ('﹨', '﹫'), - ('ﹰ', 'ﹲ'), - ('ﹴ', 'ﹴ'), - ('ﹶ', 'ﻼ'), - ('\u{feff}', '\u{feff}'), - ('!', '~'), - ('。', 'ᄒ'), - ('ᅡ', 'ᅦ'), - ('ᅧ', 'ᅬ'), - ('ᅭ', 'ᅲ'), - ('ᅳ', 'ᅵ'), - ('¢', '₩'), - ('│', '○'), - ('�', '\u{ffff}'), -]; - -pub const V2_0: &'static [(char, char)] = &[ - ('\u{591}', '\u{5a1}'), - ('\u{5a3}', '\u{5af}'), - ('\u{5c4}', '\u{5c4}'), - ('ༀ', 'ཇ'), - ('ཉ', 'ཀྵ'), - ('\u{f71}', 'ྋ'), - ('\u{f90}', '\u{f95}'), - ('\u{f97}', '\u{f97}'), - ('\u{f99}', '\u{fad}'), - ('\u{fb1}', '\u{fb7}'), - ('\u{fb9}', '\u{fb9}'), - ('ẛ', 'ẛ'), - ('₫', '₫'), - ('가', '힣'), - ('\u{1fffe}', '\u{1ffff}'), - ('\u{2fffe}', '\u{2ffff}'), - ('\u{3fffe}', '\u{3ffff}'), - ('\u{4fffe}', '\u{4ffff}'), - ('\u{5fffe}', '\u{5ffff}'), - ('\u{6fffe}', '\u{6ffff}'), - ('\u{7fffe}', '\u{7ffff}'), - ('\u{8fffe}', '\u{8ffff}'), - ('\u{9fffe}', '\u{9ffff}'), - ('\u{afffe}', '\u{affff}'), - ('\u{bfffe}', '\u{bffff}'), - ('\u{cfffe}', '\u{cffff}'), - ('\u{dfffe}', '\u{dffff}'), - ('\u{efffe}', '\u{10ffff}'), -]; - -pub const V2_1: &'static [(char, char)] = &[('€', '€'), ('', '')]; - -pub const V3_0: &'static [(char, char)] = &[ - ('Ƕ', 'ǹ'), - ('Ș', 'ȟ'), - ('Ȣ', 'ȳ'), - ('ʩ', 'ʭ'), - ('˟', '˟'), - ('˪', 'ˮ'), - ('\u{346}', '\u{34e}'), - ('\u{362}', '\u{362}'), - ('ϗ', 'ϗ'), - ('ϛ', 'ϛ'), - ('ϝ', 'ϝ'), - ('ϟ', 'ϟ'), - ('ϡ', 'ϡ'), - ('Ѐ', 'Ѐ'), - ('Ѝ', 'Ѝ'), - ('ѐ', 'ѐ'), - ('ѝ', 'ѝ'), - ('\u{488}', '\u{489}'), - ('Ҍ', 'ҏ'), - ('Ӭ', 'ӭ'), - ('֊', '֊'), - ('\u{653}', '\u{655}'), - ('ڸ', 'ڹ'), - ('ڿ', 'ڿ'), - ('ۏ', 'ۏ'), - ('ۺ', '۾'), - ('܀', '܍'), - ('\u{70f}', 'ܬ'), - ('\u{730}', '\u{74a}'), - ('ހ', '\u{7b0}'), - ('ං', 'ඃ'), - ('අ', 'ඖ'), - ('ක', 'න'), - ('ඳ', 'ර'), - ('ල', 'ල'), - ('ව', 'ෆ'), - ('\u{dca}', '\u{dca}'), - ('\u{dcf}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('ෘ', '\u{ddf}'), - ('ෲ', '෴'), - ('ཪ', 'ཪ'), - ('\u{f96}', '\u{f96}'), - ('\u{fae}', '\u{fb0}'), - ('\u{fb8}', '\u{fb8}'), - ('\u{fba}', '\u{fbc}'), - ('྾', '࿌'), - ('࿏', '࿏'), - ('က', 'အ'), - ('ဣ', 'ဧ'), - ('ဩ', 'ဪ'), - ('ာ', '\u{1032}'), - ('\u{1036}', '\u{1039}'), - ('၀', '\u{1059}'), - ('ሀ', 'ሆ'), - ('ለ', 'ቆ'), - ('ቈ', 'ቈ'), - ('ቊ', 'ቍ'), - ('ቐ', 'ቖ'), - ('ቘ', 'ቘ'), - ('ቚ', 'ቝ'), - ('በ', 'ኆ'), - ('ኈ', 'ኈ'), - ('ኊ', 'ኍ'), - ('ነ', 'ኮ'), - ('ኰ', 'ኰ'), - ('ኲ', 'ኵ'), - ('ኸ', 'ኾ'), - ('ዀ', 'ዀ'), - ('ዂ', 'ዅ'), - ('ወ', 'ዎ'), - ('ዐ', 'ዖ'), - ('ዘ', 'ዮ'), - ('ደ', 'ጎ'), - ('ጐ', 'ጐ'), - ('ጒ', 'ጕ'), - ('ጘ', 'ጞ'), - ('ጠ', 'ፆ'), - ('ፈ', 'ፚ'), - ('፡', '፼'), - ('Ꭰ', 'Ᏼ'), - ('ᐁ', 'ᙶ'), - ('\u{1680}', '᚜'), - ('ᚠ', 'ᛰ'), - ('ក', 'ៜ'), - ('០', '៩'), - ('᠀', '\u{180e}'), - ('᠐', '᠙'), - ('ᠠ', 'ᡷ'), - ('ᢀ', '\u{18a9}'), - ('\u{202f}', '\u{202f}'), - ('⁈', '⁍'), - ('₭', '₯'), - ('\u{20e2}', '\u{20e3}'), - ('ℹ', '℺'), - ('Ↄ', 'Ↄ'), - ('⇫', '⇳'), - ('⌁', '⌁'), - ('⍻', '⍻'), - ('⍽', '⎚'), - ('␥', '␦'), - ('◰', '◷'), - ('☙', '☙'), - ('♰', '♱'), - ('⠀', '⣿'), - ('⺀', '⺙'), - ('⺛', '⻳'), - ('⼀', '⿕'), - ('⿰', '⿻'), - ('〸', '〺'), - ('〾', '〾'), - ('ㆠ', 'ㆷ'), - ('㐀', '䶵'), - ('ꀀ', 'ꒌ'), - ('꒐', '꒡'), - ('꒤', '꒳'), - ('꒵', '꓀'), - ('꓂', '꓄'), - ('꓆', '꓆'), - ('יִ', 'יִ'), - ('\u{fff9}', '\u{fffb}'), -]; - -pub const V3_1: &'static [(char, char)] = &[ - ('ϴ', 'ϵ'), - ('\u{fdd0}', '\u{fdef}'), - ('𐌀', '𐌞'), - ('𐌠', '𐌣'), - ('𐌰', '𐍊'), - ('𐐀', '𐐥'), - ('𐐨', '𐑍'), - ('𝀀', '𝃵'), - ('𝄀', '𝄦'), - ('𝄪', '𝇝'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓀'), - ('𝓂', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚣'), - ('𝚨', '𝟉'), - ('𝟎', '𝟿'), - ('𠀀', '𪛖'), - ('丽', '𪘀'), - ('\u{e0001}', '\u{e0001}'), - ('\u{e0020}', '\u{e007f}'), -]; - -pub const V3_2: &'static [(char, char)] = &[ - ('Ƞ', 'Ƞ'), - ('\u{34f}', '\u{34f}'), - ('\u{363}', '\u{36f}'), - ('Ϙ', 'ϙ'), - ('϶', '϶'), - ('Ҋ', 'ҋ'), - ('Ӆ', 'ӆ'), - ('Ӊ', 'ӊ'), - ('Ӎ', 'ӎ'), - ('Ԁ', 'ԏ'), - ('ٮ', 'ٯ'), - ('ޱ', 'ޱ'), - ('ჷ', 'ჸ'), - ('ᜀ', 'ᜌ'), - ('ᜎ', '\u{1714}'), - ('ᜠ', '᜶'), - ('ᝀ', '\u{1753}'), - ('ᝠ', 'ᝬ'), - ('ᝮ', 'ᝰ'), - ('\u{1772}', '\u{1773}'), - ('⁇', '⁇'), - ('⁎', '⁒'), - ('⁗', '⁗'), - ('\u{205f}', '\u{2063}'), - ('ⁱ', 'ⁱ'), - ('₰', '₱'), - ('\u{20e4}', '\u{20ea}'), - ('ℽ', '⅋'), - ('⇴', '⇿'), - ('⋲', '⋿'), - ('⍼', '⍼'), - ('⎛', '⏎'), - ('⓫', '⓾'), - ('▖', '▟'), - ('◸', '◿'), - ('☖', '☗'), - ('♲', '♽'), - ('⚀', '⚉'), - ('❨', '❵'), - ('⟐', '⟫'), - ('⟰', '⟿'), - ('⤀', '⫿'), - ('〻', '〽'), - ('ゕ', 'ゖ'), - ('ゟ', '゠'), - ('ヿ', 'ヿ'), - ('ㇰ', 'ㇿ'), - ('㉑', '㉟'), - ('㊱', '㊿'), - ('꒢', '꒣'), - ('꒴', '꒴'), - ('꓁', '꓁'), - ('꓅', '꓅'), - ('侮', '頻'), - ('﷼', '﷼'), - ('\u{fe00}', '\u{fe0f}'), - ('﹅', '﹆'), - ('ﹳ', 'ﹳ'), - ('⦅', '⦆'), -]; - -pub const V4_0: &'static [(char, char)] = &[ - ('ȡ', 'ȡ'), - ('ȴ', 'ȶ'), - ('ʮ', 'ʯ'), - ('˯', '˿'), - ('\u{350}', '\u{357}'), - ('\u{35d}', '\u{35f}'), - ('Ϸ', 'ϻ'), - ('\u{600}', '\u{603}'), - ('؍', '\u{615}'), - ('\u{656}', '\u{658}'), - ('ۮ', 'ۯ'), - ('ۿ', 'ۿ'), - ('ܭ', 'ܯ'), - ('ݍ', 'ݏ'), - ('ऄ', 'ऄ'), - ('ঽ', 'ঽ'), - ('\u{a01}', '\u{a01}'), - ('ਃ', 'ਃ'), - ('ઌ', 'ઌ'), - ('ૡ', '\u{ae3}'), - ('૱', '૱'), - ('ଵ', 'ଵ'), - ('ୱ', 'ୱ'), - ('௳', '௺'), - ('\u{cbc}', 'ಽ'), - ('\u{17dd}', '\u{17dd}'), - ('៰', '៹'), - ('ᤀ', 'ᤜ'), - ('\u{1920}', 'ᤫ'), - ('ᤰ', '\u{193b}'), - ('᥀', '᥀'), - ('᥄', 'ᥭ'), - ('ᥰ', 'ᥴ'), - ('᧠', '᧿'), - ('ᴀ', 'ᵫ'), - ('⁓', '⁔'), - ('℻', '℻'), - ('⏏', '⏐'), - ('⓿', '⓿'), - ('☔', '☕'), - ('⚊', '⚑'), - ('⚠', '⚡'), - ('⬀', '⬍'), - ('㈝', '㈞'), - ('㉐', '㉐'), - ('㉼', '㉽'), - ('㋌', '㋏'), - ('㍷', '㍺'), - ('㏞', '㏟'), - ('㏿', '㏿'), - ('䷀', '䷿'), - ('﷽', '﷽'), - ('﹇', '﹈'), - ('𐀀', '𐀋'), - ('𐀍', '𐀦'), - ('𐀨', '𐀺'), - ('𐀼', '𐀽'), - ('𐀿', '𐁍'), - ('𐁐', '𐁝'), - ('𐂀', '𐃺'), - ('𐄀', '𐄂'), - ('𐄇', '𐄳'), - ('𐄷', '𐄿'), - ('𐎀', '𐎝'), - ('𐎟', '𐎟'), - ('𐐦', '𐐧'), - ('𐑎', '𐒝'), - ('𐒠', '𐒩'), - ('𐠀', '𐠅'), - ('𐠈', '𐠈'), - ('𐠊', '𐠵'), - ('𐠷', '𐠸'), - ('𐠼', '𐠼'), - ('𐠿', '𐠿'), - ('𝌀', '𝍖'), - ('𝓁', '𝓁'), - ('\u{e0100}', '\u{e01ef}'), -]; - -pub const V4_1: &'static [(char, char)] = &[ - ('ȷ', 'Ɂ'), - ('\u{358}', '\u{35c}'), - ('ϼ', 'Ͽ'), - ('Ӷ', 'ӷ'), - ('\u{5a2}', '\u{5a2}'), - ('\u{5c5}', '\u{5c7}'), - ('؋', '؋'), - ('؞', '؞'), - ('\u{659}', '\u{65e}'), - ('ݐ', 'ݭ'), - ('ॽ', 'ॽ'), - ('ৎ', 'ৎ'), - ('ஶ', 'ஶ'), - ('௦', '௦'), - ('࿐', '࿑'), - ('ჹ', 'ჺ'), - ('ჼ', 'ჼ'), - ('ሇ', 'ሇ'), - ('ቇ', 'ቇ'), - ('ኇ', 'ኇ'), - ('ኯ', 'ኯ'), - ('ዏ', 'ዏ'), - ('ዯ', 'ዯ'), - ('ጏ', 'ጏ'), - ('ጟ', 'ጟ'), - ('ፇ', 'ፇ'), - ('\u{135f}', '፠'), - ('ᎀ', '᎙'), - ('ᦀ', 'ᦩ'), - ('ᦰ', 'ᧉ'), - ('᧐', '᧙'), - ('᧞', '᧟'), - ('ᨀ', '\u{1a1b}'), - ('᨞', '᨟'), - ('ᵬ', '\u{1dc3}'), - ('⁕', '⁖'), - ('⁘', '⁞'), - ('ₐ', 'ₔ'), - ('₲', '₵'), - ('\u{20eb}', '\u{20eb}'), - ('ℼ', 'ℼ'), - ('⅌', '⅌'), - ('⏑', '⏛'), - ('☘', '☘'), - ('♾', '♿'), - ('⚒', '⚜'), - ('⚢', '⚱'), - ('⟀', '⟆'), - ('⬎', '⬓'), - ('Ⰰ', 'Ⱞ'), - ('ⰰ', 'ⱞ'), - ('Ⲁ', '⳪'), - ('⳹', 'ⴥ'), - ('ⴰ', 'ⵥ'), - ('ⵯ', 'ⵯ'), - ('ⶀ', 'ⶖ'), - ('ⶠ', 'ⶦ'), - ('ⶨ', 'ⶮ'), - ('ⶰ', 'ⶶ'), - ('ⶸ', 'ⶾ'), - ('ⷀ', 'ⷆ'), - ('ⷈ', 'ⷎ'), - ('ⷐ', 'ⷖ'), - ('ⷘ', 'ⷞ'), - ('⸀', '⸗'), - ('⸜', '⸝'), - ('㇀', '㇏'), - ('㉾', '㉾'), - ('龦', '龻'), - ('꜀', '꜖'), - ('ꠀ', '꠫'), - ('並', '龎'), - ('︐', '︙'), - ('𐅀', '𐆊'), - ('𐎠', '𐏃'), - ('𐏈', '𐏕'), - ('𐨀', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '𐨓'), - ('𐨕', '𐨗'), - ('𐨙', '𐨳'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '𐩇'), - ('𐩐', '𐩘'), - ('𝈀', '𝉅'), - ('𝚤', '𝚥'), -]; - -pub const V5_0: &'static [(char, char)] = &[ - ('ɂ', 'ɏ'), - ('ͻ', 'ͽ'), - ('ӏ', 'ӏ'), - ('Ӻ', 'ӿ'), - ('Ԑ', 'ԓ'), - ('\u{5ba}', '\u{5ba}'), - ('߀', 'ߺ'), - ('ॻ', 'ॼ'), - ('ॾ', 'ॿ'), - ('\u{ce2}', '\u{ce3}'), - ('ೱ', 'ೲ'), - ('\u{1b00}', 'ᭋ'), - ('᭐', '᭼'), - ('\u{1dc4}', '\u{1dca}'), - ('\u{1dfe}', '\u{1dff}'), - ('\u{20ec}', '\u{20ef}'), - ('⅍', 'ⅎ'), - ('ↄ', 'ↄ'), - ('⏜', '⏧'), - ('⚲', '⚲'), - ('⟇', '⟊'), - ('⬔', '⬚'), - ('⬠', '⬣'), - ('Ⱡ', 'ⱬ'), - ('ⱴ', 'ⱷ'), - ('ꜗ', 'ꜚ'), - ('꜠', '꜡'), - ('ꡀ', '꡷'), - ('𐤀', '𐤙'), - ('𐤟', '𐤟'), - ('𒀀', '𒍮'), - ('𒐀', '𒑢'), - ('𒑰', '𒑳'), - ('𝍠', '𝍱'), - ('𝟊', '𝟋'), -]; - -pub const V5_1: &'static [(char, char)] = &[ - ('Ͱ', 'ͳ'), - ('Ͷ', 'ͷ'), - ('Ϗ', 'Ϗ'), - ('\u{487}', '\u{487}'), - ('Ԕ', 'ԣ'), - ('؆', '؊'), - ('\u{616}', '\u{61a}'), - ('ػ', 'ؿ'), - ('ݮ', 'ݿ'), - ('ॱ', 'ॲ'), - ('\u{a51}', '\u{a51}'), - ('\u{a75}', '\u{a75}'), - ('\u{b44}', '\u{b44}'), - ('\u{b62}', '\u{b63}'), - ('ௐ', 'ௐ'), - ('ఽ', 'ఽ'), - ('ౘ', 'ౙ'), - ('\u{c62}', '\u{c63}'), - ('౸', '౿'), - ('ഽ', 'ഽ'), - ('\u{d44}', '\u{d44}'), - ('\u{d62}', '\u{d63}'), - ('൰', '൵'), - ('൹', 'ൿ'), - ('ཫ', 'ཬ'), - ('࿎', '࿎'), - ('࿒', '࿔'), - ('ဢ', 'ဢ'), - ('ဨ', 'ဨ'), - ('ါ', 'ါ'), - ('\u{1033}', '\u{1035}'), - ('\u{103a}', 'ဿ'), - ('ၚ', '႙'), - ('႞', '႟'), - ('ᢪ', 'ᢪ'), - ('\u{1b80}', '\u{1baa}'), - ('ᮮ', '᮹'), - ('ᰀ', '\u{1c37}'), - ('᰻', '᱉'), - ('ᱍ', '᱿'), - ('\u{1dcb}', '\u{1de6}'), - ('ẜ', 'ẟ'), - ('Ỻ', 'ỿ'), - ('\u{2064}', '\u{2064}'), - ('\u{20f0}', '\u{20f0}'), - ('⅏', '⅏'), - ('ↅ', 'ↈ'), - ('⚝', '⚝'), - ('⚳', '⚼'), - ('⛀', '⛃'), - ('⟌', '⟌'), - ('⟬', '⟯'), - ('⬛', '⬟'), - ('⬤', '⭌'), - ('⭐', '⭔'), - ('Ɑ', 'Ɐ'), - ('ⱱ', 'ⱳ'), - ('ⱸ', 'ⱽ'), - ('\u{2de0}', '\u{2dff}'), - ('⸘', '⸛'), - ('⸞', '⸰'), - ('ㄭ', 'ㄭ'), - ('㇐', '㇣'), - ('龼', '鿃'), - ('ꔀ', 'ꘫ'), - ('Ꙁ', 'ꙟ'), - ('Ꙣ', '꙳'), - ('\u{a67c}', 'ꚗ'), - ('ꜛ', 'ꜟ'), - ('Ꜣ', 'ꞌ'), - ('ꟻ', 'ꟿ'), - ('ꢀ', '\u{a8c4}'), - ('꣎', '꣙'), - ('꤀', '\u{a953}'), - ('꥟', '꥟'), - ('ꨀ', '\u{aa36}'), - ('ꩀ', 'ꩍ'), - ('꩐', '꩙'), - ('꩜', '꩟'), - ('\u{fe24}', '\u{fe26}'), - ('𐆐', '𐆛'), - ('𐇐', '\u{101fd}'), - ('𐊀', '𐊜'), - ('𐊠', '𐋐'), - ('𐤠', '𐤹'), - ('𐤿', '𐤿'), - ('𝄩', '𝄩'), - ('🀀', '🀫'), - ('🀰', '🂓'), -]; - -pub const V5_2: &'static [(char, char)] = &[ - ('Ԥ', 'ԥ'), - ('ࠀ', '\u{82d}'), - ('࠰', '࠾'), - ('\u{900}', '\u{900}'), - ('ॎ', 'ॎ'), - ('\u{955}', '\u{955}'), - ('ॹ', 'ॺ'), - ('৻', '৻'), - ('࿕', '࿘'), - ('ႚ', '\u{109d}'), - ('ᅚ', 'ᅞ'), - ('ᆣ', 'ᆧ'), - ('ᇺ', 'ᇿ'), - ('᐀', '᐀'), - ('ᙷ', 'ᙿ'), - ('ᢰ', 'ᣵ'), - ('ᦪ', 'ᦫ'), - ('᧚', '᧚'), - ('ᨠ', '\u{1a5e}'), - ('\u{1a60}', '\u{1a7c}'), - ('\u{1a7f}', '᪉'), - ('᪐', '᪙'), - ('᪠', '᪭'), - ('\u{1cd0}', 'ᳲ'), - ('\u{1dfd}', '\u{1dfd}'), - ('₶', '₸'), - ('⅐', '⅒'), - ('↉', '↉'), - ('⏨', '⏨'), - ('⚞', '⚟'), - ('⚽', '⚿'), - ('⛄', '⛍'), - ('⛏', '⛡'), - ('⛣', '⛣'), - ('⛨', '⛿'), - ('❗', '❗'), - ('⭕', '⭙'), - ('Ɒ', 'Ɒ'), - ('Ȿ', 'Ɀ'), - ('Ⳬ', '\u{2cf1}'), - ('⸱', '⸱'), - ('㉄', '㉏'), - ('鿄', '鿋'), - ('ꓐ', '꓿'), - ('ꚠ', '꛷'), - ('꠰', '꠹'), - ('\u{a8e0}', 'ꣻ'), - ('ꥠ', 'ꥼ'), - ('\u{a980}', '꧍'), - ('ꧏ', '꧙'), - ('꧞', '꧟'), - ('ꩠ', 'ꩻ'), - ('ꪀ', 'ꫂ'), - ('ꫛ', '꫟'), - ('ꯀ', '\u{abed}'), - ('꯰', '꯹'), - ('ힰ', 'ퟆ'), - ('ퟋ', 'ퟻ'), - ('恵', '舘'), - ('𐡀', '𐡕'), - ('𐡗', '𐡟'), - ('𐤚', '𐤛'), - ('𐩠', '𐩿'), - ('𐬀', '𐬵'), - ('𐬹', '𐭕'), - ('𐭘', '𐭲'), - ('𐭸', '𐭿'), - ('𐰀', '𐱈'), - ('𐹠', '𐹾'), - ('\u{11080}', '𑃁'), - ('𓀀', '𓐮'), - ('🄀', '🄊'), - ('🄐', '🄮'), - ('🄱', '🄱'), - ('🄽', '🄽'), - ('🄿', '🄿'), - ('🅂', '🅂'), - ('🅆', '🅆'), - ('🅊', '🅎'), - ('🅗', '🅗'), - ('🅟', '🅟'), - ('🅹', '🅹'), - ('🅻', '🅼'), - ('🅿', '🅿'), - ('🆊', '🆍'), - ('🆐', '🆐'), - ('🈀', '🈀'), - ('🈐', '🈱'), - ('🉀', '🉈'), - ('𪜀', '𫜴'), -]; - -pub const V6_0: &'static [(char, char)] = &[ - ('Ԧ', 'ԧ'), - ('ؠ', 'ؠ'), - ('\u{65f}', '\u{65f}'), - ('ࡀ', '\u{85b}'), - ('࡞', '࡞'), - ('\u{93a}', 'ऻ'), - ('ॏ', 'ॏ'), - ('\u{956}', '\u{957}'), - ('ॳ', 'ॷ'), - ('୲', '୷'), - ('ഩ', 'ഩ'), - ('ഺ', 'ഺ'), - ('ൎ', 'ൎ'), - ('ྌ', '\u{f8f}'), - ('࿙', '࿚'), - ('\u{135d}', '\u{135e}'), - ('ᯀ', '\u{1bf3}'), - ('᯼', '᯿'), - ('\u{1dfc}', '\u{1dfc}'), - ('ₕ', 'ₜ'), - ('₹', '₹'), - ('⏩', '⏳'), - ('⛎', '⛎'), - ('⛢', '⛢'), - ('⛤', '⛧'), - ('✅', '✅'), - ('✊', '✋'), - ('✨', '✨'), - ('❌', '❌'), - ('❎', '❎'), - ('❓', '❕'), - ('❟', '❠'), - ('➕', '➗'), - ('➰', '➰'), - ('➿', '➿'), - ('⟎', '⟏'), - ('⵰', '⵰'), - ('\u{2d7f}', '\u{2d7f}'), - ('ㆸ', 'ㆺ'), - ('Ꙡ', 'ꙡ'), - ('Ɥ', 'ꞎ'), - ('Ꞑ', 'ꞑ'), - ('Ꞡ', 'ꞩ'), - ('ꟺ', 'ꟺ'), - ('ꬁ', 'ꬆ'), - ('ꬉ', 'ꬎ'), - ('ꬑ', 'ꬖ'), - ('ꬠ', 'ꬦ'), - ('ꬨ', 'ꬮ'), - ('﮲', '﯁'), - ('𑀀', '𑁍'), - ('𑁒', '𑁯'), - ('𖠀', '𖨸'), - ('𛀀', '𛀁'), - ('🂠', '🂮'), - ('🂱', '🂾'), - ('🃁', '🃏'), - ('🃑', '🃟'), - ('🄰', '🄰'), - ('🄲', '🄼'), - ('🄾', '🄾'), - ('🅀', '🅁'), - ('🅃', '🅅'), - ('🅇', '🅉'), - ('🅏', '🅖'), - ('🅘', '🅞'), - ('🅠', '🅩'), - ('🅰', '🅸'), - ('🅺', '🅺'), - ('🅽', '🅾'), - ('🆀', '🆉'), - ('🆎', '🆏'), - ('🆑', '🆚'), - ('🇦', '🇿'), - ('🈁', '🈂'), - ('🈲', '🈺'), - ('🉐', '🉑'), - ('🌀', '🌠'), - ('🌰', '🌵'), - ('🌷', '🍼'), - ('🎀', '🎓'), - ('🎠', '🏄'), - ('🏆', '🏊'), - ('🏠', '🏰'), - ('🐀', '🐾'), - ('👀', '👀'), - ('👂', '📷'), - ('📹', '📼'), - ('🔀', '🔽'), - ('🕐', '🕧'), - ('🗻', '🗿'), - ('😁', '😐'), - ('😒', '😔'), - ('😖', '😖'), - ('😘', '😘'), - ('😚', '😚'), - ('😜', '😞'), - ('😠', '😥'), - ('😨', '😫'), - ('😭', '😭'), - ('😰', '😳'), - ('😵', '🙀'), - ('🙅', '🙏'), - ('🚀', '🛅'), - ('🜀', '🝳'), - ('𫝀', '𫠝'), -]; - -pub const V6_1: &'static [(char, char)] = &[ - ('֏', '֏'), - ('\u{604}', '\u{604}'), - ('ࢠ', 'ࢠ'), - ('ࢢ', 'ࢬ'), - ('\u{8e4}', '\u{8fe}'), - ('૰', '૰'), - ('ໞ', 'ໟ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ჽ', 'ჿ'), - ('\u{1bab}', '\u{1bad}'), - ('ᮺ', 'ᮿ'), - ('᳀', '᳇'), - ('ᳳ', 'ᳶ'), - ('⟋', '⟋'), - ('⟍', '⟍'), - ('Ⳳ', 'ⳳ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ⵦ', 'ⵧ'), - ('⸲', '⸻'), - ('鿌', '鿌'), - ('\u{a674}', '\u{a67b}'), - ('\u{a69f}', '\u{a69f}'), - ('Ꞓ', 'ꞓ'), - ('Ɦ', 'Ɦ'), - ('ꟸ', 'ꟹ'), - ('ꫠ', '\u{aaf6}'), - ('郞', '隷'), - ('𐦀', '𐦷'), - ('𐦾', '𐦿'), - ('𑃐', '𑃨'), - ('𑃰', '𑃹'), - ('\u{11100}', '\u{11134}'), - ('𑄶', '𑅃'), - ('\u{11180}', '𑇈'), - ('𑇐', '𑇙'), - ('𑚀', '\u{116b7}'), - ('𑛀', '𑛉'), - ('𖼀', '𖽄'), - ('𖽐', '𖽾'), - ('\u{16f8f}', '𖾟'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('𞻰', '𞻱'), - ('🅪', '🅫'), - ('🕀', '🕃'), - ('😀', '😀'), - ('😑', '😑'), - ('😕', '😕'), - ('😗', '😗'), - ('😙', '😙'), - ('😛', '😛'), - ('😟', '😟'), - ('😦', '😧'), - ('😬', '😬'), - ('😮', '😯'), - ('😴', '😴'), -]; - -pub const V6_2: &'static [(char, char)] = &[('₺', '₺')]; - -pub const V6_3: &'static [(char, char)] = - &[('\u{61c}', '\u{61c}'), ('\u{2066}', '\u{2069}')]; - -pub const V7_0: &'static [(char, char)] = &[ - ('Ϳ', 'Ϳ'), - ('Ԩ', 'ԯ'), - ('֍', '֎'), - ('\u{605}', '\u{605}'), - ('ࢡ', 'ࢡ'), - ('ࢭ', 'ࢲ'), - ('\u{8ff}', '\u{8ff}'), - ('ॸ', 'ॸ'), - ('ঀ', 'ঀ'), - ('\u{c00}', '\u{c00}'), - ('ఴ', 'ఴ'), - ('\u{c81}', '\u{c81}'), - ('\u{d01}', '\u{d01}'), - ('෦', '෯'), - ('ᛱ', 'ᛸ'), - ('ᤝ', 'ᤞ'), - ('\u{1ab0}', '\u{1abe}'), - ('\u{1cf8}', '\u{1cf9}'), - ('\u{1de7}', '\u{1df5}'), - ('₻', '₽'), - ('⏴', '⏺'), - ('✀', '✀'), - ('⭍', '⭏'), - ('⭚', '⭳'), - ('⭶', '⮕'), - ('⮘', '⮹'), - ('⮽', '⯈'), - ('⯊', '⯑'), - ('⸼', '⹂'), - ('Ꚙ', 'ꚝ'), - ('ꞔ', 'ꞟ'), - ('Ɜ', 'Ɬ'), - ('Ʞ', 'Ʇ'), - ('ꟷ', 'ꟷ'), - ('ꧠ', 'ꧾ'), - ('\u{aa7c}', 'ꩿ'), - ('ꬰ', 'ꭟ'), - ('ꭤ', 'ꭥ'), - ('\u{fe27}', '\u{fe2d}'), - ('𐆋', '𐆌'), - ('𐆠', '𐆠'), - ('\u{102e0}', '𐋻'), - ('𐌟', '𐌟'), - ('𐍐', '\u{1037a}'), - ('𐔀', '𐔧'), - ('𐔰', '𐕣'), - ('𐕯', '𐕯'), - ('𐘀', '𐜶'), - ('𐝀', '𐝕'), - ('𐝠', '𐝧'), - ('𐡠', '𐢞'), - ('𐢧', '𐢯'), - ('𐪀', '𐪟'), - ('𐫀', '\u{10ae6}'), - ('𐫫', '𐫶'), - ('𐮀', '𐮑'), - ('𐮙', '𐮜'), - ('𐮩', '𐮯'), - ('\u{1107f}', '\u{1107f}'), - ('𑅐', '𑅶'), - ('𑇍', '𑇍'), - ('𑇚', '𑇚'), - ('𑇡', '𑇴'), - ('𑈀', '𑈑'), - ('𑈓', '𑈽'), - ('𑊰', '\u{112ea}'), - ('𑋰', '𑋹'), - ('\u{11301}', '𑌃'), - ('𑌅', '𑌌'), - ('𑌏', '𑌐'), - ('𑌓', '𑌨'), - ('𑌪', '𑌰'), - ('𑌲', '𑌳'), - ('𑌵', '𑌹'), - ('\u{1133c}', '𑍄'), - ('𑍇', '𑍈'), - ('𑍋', '\u{1134d}'), - ('\u{11357}', '\u{11357}'), - ('𑍝', '𑍣'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), - ('𑒀', '𑓇'), - ('𑓐', '𑓙'), - ('𑖀', '\u{115b5}'), - ('𑖸', '𑗉'), - ('𑘀', '𑙄'), - ('𑙐', '𑙙'), - ('𑢠', '𑣲'), - ('𑣿', '𑣿'), - ('𑫀', '𑫸'), - ('𒍯', '𒎘'), - ('𒑣', '𒑮'), - ('𒑴', '𒑴'), - ('𖩀', '𖩞'), - ('𖩠', '𖩩'), - ('𖩮', '𖩯'), - ('𖫐', '𖫭'), - ('\u{16af0}', '𖫵'), - ('𖬀', '𖭅'), - ('𖭐', '𖭙'), - ('𖭛', '𖭡'), - ('𖭣', '𖭷'), - ('𖭽', '𖮏'), - ('𛰀', '𛱪'), - ('𛱰', '𛱼'), - ('𛲀', '𛲈'), - ('𛲐', '𛲙'), - ('𛲜', '\u{1bca3}'), - ('𞠀', '𞣄'), - ('𞣇', '\u{1e8d6}'), - ('🂿', '🂿'), - ('🃠', '🃵'), - ('🄋', '🄌'), - ('🌡', '🌬'), - ('🌶', '🌶'), - ('🍽', '🍽'), - ('🎔', '🎟'), - ('🏅', '🏅'), - ('🏋', '🏎'), - ('🏔', '🏟'), - ('🏱', '🏷'), - ('🐿', '🐿'), - ('👁', '👁'), - ('📸', '📸'), - ('📽', '📾'), - ('🔾', '🔿'), - ('🕄', '🕊'), - ('🕨', '🕹'), - ('🕻', '🖣'), - ('🖥', '🗺'), - ('🙁', '🙂'), - ('🙐', '🙿'), - ('🛆', '🛏'), - ('🛠', '🛬'), - ('🛰', '🛳'), - ('🞀', '🟔'), - ('🠀', '🠋'), - ('🠐', '🡇'), - ('🡐', '🡙'), - ('🡠', '🢇'), - ('🢐', '🢭'), -]; - -pub const V8_0: &'static [(char, char)] = &[ - ('ࢳ', 'ࢴ'), - ('\u{8e3}', '\u{8e3}'), - ('ૹ', 'ૹ'), - ('ౚ', 'ౚ'), - ('ൟ', 'ൟ'), - ('Ᏽ', 'Ᏽ'), - ('ᏸ', 'ᏽ'), - ('₾', '₾'), - ('↊', '↋'), - ('⯬', '⯯'), - ('鿍', '鿕'), - ('\u{a69e}', '\u{a69e}'), - ('ꞏ', 'ꞏ'), - ('Ʝ', 'ꞷ'), - ('꣼', 'ꣽ'), - ('ꭠ', 'ꭣ'), - ('ꭰ', 'ꮿ'), - ('\u{fe2e}', '\u{fe2f}'), - ('𐣠', '𐣲'), - ('𐣴', '𐣵'), - ('𐣻', '𐣿'), - ('𐦼', '𐦽'), - ('𐧀', '𐧏'), - ('𐧒', '𐧿'), - ('𐲀', '𐲲'), - ('𐳀', '𐳲'), - ('𐳺', '𐳿'), - ('\u{111c9}', '\u{111cc}'), - ('𑇛', '𑇟'), - ('𑊀', '𑊆'), - ('𑊈', '𑊈'), - ('𑊊', '𑊍'), - ('𑊏', '𑊝'), - ('𑊟', '𑊩'), - ('\u{11300}', '\u{11300}'), - ('𑍐', '𑍐'), - ('𑗊', '\u{115dd}'), - ('𑜀', '𑜙'), - ('\u{1171d}', '\u{1172b}'), - ('𑜰', '𑜿'), - ('𒎙', '𒎙'), - ('𒒀', '𒕃'), - ('𔐀', '𔙆'), - ('𝇞', '𝇨'), - ('𝠀', '𝪋'), - ('\u{1da9b}', '\u{1da9f}'), - ('\u{1daa1}', '\u{1daaf}'), - ('🌭', '🌯'), - ('🍾', '🍿'), - ('🏏', '🏓'), - ('🏸', '🏿'), - ('📿', '📿'), - ('🕋', '🕏'), - ('🙃', '🙄'), - ('🛐', '🛐'), - ('🤐', '🤘'), - ('🦀', '🦄'), - ('🧀', '🧀'), - ('𫠠', '𬺡'), -]; - -pub const V9_0: &'static [(char, char)] = &[ - ('ࢶ', 'ࢽ'), - ('\u{8d4}', '\u{8e2}'), - ('ಀ', 'ಀ'), - ('൏', '൏'), - ('ൔ', 'ൖ'), - ('൘', '൞'), - ('൶', '൸'), - ('ᲀ', 'ᲈ'), - ('\u{1dfb}', '\u{1dfb}'), - ('⏻', '⏾'), - ('⹃', '⹄'), - ('Ɪ', 'Ɪ'), - ('\u{a8c5}', '\u{a8c5}'), - ('𐆍', '𐆎'), - ('𐒰', '𐓓'), - ('𐓘', '𐓻'), - ('\u{1123e}', '\u{1123e}'), - ('𑐀', '𑑙'), - ('𑑛', '𑑛'), - ('𑑝', '𑑝'), - ('𑙠', '𑙬'), - ('𑰀', '𑰈'), - ('𑰊', '\u{11c36}'), - ('\u{11c38}', '𑱅'), - ('𑱐', '𑱬'), - ('𑱰', '𑲏'), - ('\u{11c92}', '\u{11ca7}'), - ('𑲩', '\u{11cb6}'), - ('𖿠', '𖿠'), - ('𗀀', '𘟬'), - ('𘠀', '𘫲'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), - ('𞤀', '\u{1e94a}'), - ('𞥐', '𞥙'), - ('𞥞', '𞥟'), - ('🆛', '🆬'), - ('🈻', '🈻'), - ('🕺', '🕺'), - ('🖤', '🖤'), - ('🛑', '🛒'), - ('🛴', '🛶'), - ('🤙', '🤞'), - ('🤠', '🤧'), - ('🤰', '🤰'), - ('🤳', '🤾'), - ('🥀', '🥋'), - ('🥐', '🥞'), - ('🦅', '🦑'), -]; diff --git a/vendor/regex-syntax/src/unicode_tables/case_folding_simple.rs b/vendor/regex-syntax/src/unicode_tables/case_folding_simple.rs deleted file mode 100644 index 07f6ff2f5af7f8..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/case_folding_simple.rs +++ /dev/null @@ -1,2948 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate case-folding-simple ucd-16.0.0 --chars --all-pairs -// -// Unicode version: 16.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const CASE_FOLDING_SIMPLE: &'static [(char, &'static [char])] = &[ - ('A', &['a']), - ('B', &['b']), - ('C', &['c']), - ('D', &['d']), - ('E', &['e']), - ('F', &['f']), - ('G', &['g']), - ('H', &['h']), - ('I', &['i']), - ('J', &['j']), - ('K', &['k', 'K']), - ('L', &['l']), - ('M', &['m']), - ('N', &['n']), - ('O', &['o']), - ('P', &['p']), - ('Q', &['q']), - ('R', &['r']), - ('S', &['s', 'ſ']), - ('T', &['t']), - ('U', &['u']), - ('V', &['v']), - ('W', &['w']), - ('X', &['x']), - ('Y', &['y']), - ('Z', &['z']), - ('a', &['A']), - ('b', &['B']), - ('c', &['C']), - ('d', &['D']), - ('e', &['E']), - ('f', &['F']), - ('g', &['G']), - ('h', &['H']), - ('i', &['I']), - ('j', &['J']), - ('k', &['K', 'K']), - ('l', &['L']), - ('m', &['M']), - ('n', &['N']), - ('o', &['O']), - ('p', &['P']), - ('q', &['Q']), - ('r', &['R']), - ('s', &['S', 'ſ']), - ('t', &['T']), - ('u', &['U']), - ('v', &['V']), - ('w', &['W']), - ('x', &['X']), - ('y', &['Y']), - ('z', &['Z']), - ('µ', &['Μ', 'μ']), - ('À', &['à']), - ('Á', &['á']), - ('Â', &['â']), - ('Ã', &['ã']), - ('Ä', &['ä']), - ('Å', &['å', 'Å']), - ('Æ', &['æ']), - ('Ç', &['ç']), - ('È', &['è']), - ('É', &['é']), - ('Ê', &['ê']), - ('Ë', &['ë']), - ('Ì', &['ì']), - ('Í', &['í']), - ('Î', &['î']), - ('Ï', &['ï']), - ('Ð', &['ð']), - ('Ñ', &['ñ']), - ('Ò', &['ò']), - ('Ó', &['ó']), - ('Ô', &['ô']), - ('Õ', &['õ']), - ('Ö', &['ö']), - ('Ø', &['ø']), - ('Ù', &['ù']), - ('Ú', &['ú']), - ('Û', &['û']), - ('Ü', &['ü']), - ('Ý', &['ý']), - ('Þ', &['þ']), - ('ß', &['ẞ']), - ('à', &['À']), - ('á', &['Á']), - ('â', &['Â']), - ('ã', &['Ã']), - ('ä', &['Ä']), - ('å', &['Å', 'Å']), - ('æ', &['Æ']), - ('ç', &['Ç']), - ('è', &['È']), - ('é', &['É']), - ('ê', &['Ê']), - ('ë', &['Ë']), - ('ì', &['Ì']), - ('í', &['Í']), - ('î', &['Î']), - ('ï', &['Ï']), - ('ð', &['Ð']), - ('ñ', &['Ñ']), - ('ò', &['Ò']), - ('ó', &['Ó']), - ('ô', &['Ô']), - ('õ', &['Õ']), - ('ö', &['Ö']), - ('ø', &['Ø']), - ('ù', &['Ù']), - ('ú', &['Ú']), - ('û', &['Û']), - ('ü', &['Ü']), - ('ý', &['Ý']), - ('þ', &['Þ']), - ('ÿ', &['Ÿ']), - ('Ā', &['ā']), - ('ā', &['Ā']), - ('Ă', &['ă']), - ('ă', &['Ă']), - ('Ą', &['ą']), - ('ą', &['Ą']), - ('Ć', &['ć']), - ('ć', &['Ć']), - ('Ĉ', &['ĉ']), - ('ĉ', &['Ĉ']), - ('Ċ', &['ċ']), - ('ċ', &['Ċ']), - ('Č', &['č']), - ('č', &['Č']), - ('Ď', &['ď']), - ('ď', &['Ď']), - ('Đ', &['đ']), - ('đ', &['Đ']), - ('Ē', &['ē']), - ('ē', &['Ē']), - ('Ĕ', &['ĕ']), - ('ĕ', &['Ĕ']), - ('Ė', &['ė']), - ('ė', &['Ė']), - ('Ę', &['ę']), - ('ę', &['Ę']), - ('Ě', &['ě']), - ('ě', &['Ě']), - ('Ĝ', &['ĝ']), - ('ĝ', &['Ĝ']), - ('Ğ', &['ğ']), - ('ğ', &['Ğ']), - ('Ġ', &['ġ']), - ('ġ', &['Ġ']), - ('Ģ', &['ģ']), - ('ģ', &['Ģ']), - ('Ĥ', &['ĥ']), - ('ĥ', &['Ĥ']), - ('Ħ', &['ħ']), - ('ħ', &['Ħ']), - ('Ĩ', &['ĩ']), - ('ĩ', &['Ĩ']), - ('Ī', &['ī']), - ('ī', &['Ī']), - ('Ĭ', &['ĭ']), - ('ĭ', &['Ĭ']), - ('Į', &['į']), - ('į', &['Į']), - ('IJ', &['ij']), - ('ij', &['IJ']), - ('Ĵ', &['ĵ']), - ('ĵ', &['Ĵ']), - ('Ķ', &['ķ']), - ('ķ', &['Ķ']), - ('Ĺ', &['ĺ']), - ('ĺ', &['Ĺ']), - ('Ļ', &['ļ']), - ('ļ', &['Ļ']), - ('Ľ', &['ľ']), - ('ľ', &['Ľ']), - ('Ŀ', &['ŀ']), - ('ŀ', &['Ŀ']), - ('Ł', &['ł']), - ('ł', &['Ł']), - ('Ń', &['ń']), - ('ń', &['Ń']), - ('Ņ', &['ņ']), - ('ņ', &['Ņ']), - ('Ň', &['ň']), - ('ň', &['Ň']), - ('Ŋ', &['ŋ']), - ('ŋ', &['Ŋ']), - ('Ō', &['ō']), - ('ō', &['Ō']), - ('Ŏ', &['ŏ']), - ('ŏ', &['Ŏ']), - ('Ő', &['ő']), - ('ő', &['Ő']), - ('Œ', &['œ']), - ('œ', &['Œ']), - ('Ŕ', &['ŕ']), - ('ŕ', &['Ŕ']), - ('Ŗ', &['ŗ']), - ('ŗ', &['Ŗ']), - ('Ř', &['ř']), - ('ř', &['Ř']), - ('Ś', &['ś']), - ('ś', &['Ś']), - ('Ŝ', &['ŝ']), - ('ŝ', &['Ŝ']), - ('Ş', &['ş']), - ('ş', &['Ş']), - ('Š', &['š']), - ('š', &['Š']), - ('Ţ', &['ţ']), - ('ţ', &['Ţ']), - ('Ť', &['ť']), - ('ť', &['Ť']), - ('Ŧ', &['ŧ']), - ('ŧ', &['Ŧ']), - ('Ũ', &['ũ']), - ('ũ', &['Ũ']), - ('Ū', &['ū']), - ('ū', &['Ū']), - ('Ŭ', &['ŭ']), - ('ŭ', &['Ŭ']), - ('Ů', &['ů']), - ('ů', &['Ů']), - ('Ű', &['ű']), - ('ű', &['Ű']), - ('Ų', &['ų']), - ('ų', &['Ų']), - ('Ŵ', &['ŵ']), - ('ŵ', &['Ŵ']), - ('Ŷ', &['ŷ']), - ('ŷ', &['Ŷ']), - ('Ÿ', &['ÿ']), - ('Ź', &['ź']), - ('ź', &['Ź']), - ('Ż', &['ż']), - ('ż', &['Ż']), - ('Ž', &['ž']), - ('ž', &['Ž']), - ('ſ', &['S', 's']), - ('ƀ', &['Ƀ']), - ('Ɓ', &['ɓ']), - ('Ƃ', &['ƃ']), - ('ƃ', &['Ƃ']), - ('Ƅ', &['ƅ']), - ('ƅ', &['Ƅ']), - ('Ɔ', &['ɔ']), - ('Ƈ', &['ƈ']), - ('ƈ', &['Ƈ']), - ('Ɖ', &['ɖ']), - ('Ɗ', &['ɗ']), - ('Ƌ', &['ƌ']), - ('ƌ', &['Ƌ']), - ('Ǝ', &['ǝ']), - ('Ə', &['ə']), - ('Ɛ', &['ɛ']), - ('Ƒ', &['ƒ']), - ('ƒ', &['Ƒ']), - ('Ɠ', &['ɠ']), - ('Ɣ', &['ɣ']), - ('ƕ', &['Ƕ']), - ('Ɩ', &['ɩ']), - ('Ɨ', &['ɨ']), - ('Ƙ', &['ƙ']), - ('ƙ', &['Ƙ']), - ('ƚ', &['Ƚ']), - ('ƛ', &['Ƛ']), - ('Ɯ', &['ɯ']), - ('Ɲ', &['ɲ']), - ('ƞ', &['Ƞ']), - ('Ɵ', &['ɵ']), - ('Ơ', &['ơ']), - ('ơ', &['Ơ']), - ('Ƣ', &['ƣ']), - ('ƣ', &['Ƣ']), - ('Ƥ', &['ƥ']), - ('ƥ', &['Ƥ']), - ('Ʀ', &['ʀ']), - ('Ƨ', &['ƨ']), - ('ƨ', &['Ƨ']), - ('Ʃ', &['ʃ']), - ('Ƭ', &['ƭ']), - ('ƭ', &['Ƭ']), - ('Ʈ', &['ʈ']), - ('Ư', &['ư']), - ('ư', &['Ư']), - ('Ʊ', &['ʊ']), - ('Ʋ', &['ʋ']), - ('Ƴ', &['ƴ']), - ('ƴ', &['Ƴ']), - ('Ƶ', &['ƶ']), - ('ƶ', &['Ƶ']), - ('Ʒ', &['ʒ']), - ('Ƹ', &['ƹ']), - ('ƹ', &['Ƹ']), - ('Ƽ', &['ƽ']), - ('ƽ', &['Ƽ']), - ('ƿ', &['Ƿ']), - ('DŽ', &['Dž', 'dž']), - ('Dž', &['DŽ', 'dž']), - ('dž', &['DŽ', 'Dž']), - ('LJ', &['Lj', 'lj']), - ('Lj', &['LJ', 'lj']), - ('lj', &['LJ', 'Lj']), - ('NJ', &['Nj', 'nj']), - ('Nj', &['NJ', 'nj']), - ('nj', &['NJ', 'Nj']), - ('Ǎ', &['ǎ']), - ('ǎ', &['Ǎ']), - ('Ǐ', &['ǐ']), - ('ǐ', &['Ǐ']), - ('Ǒ', &['ǒ']), - ('ǒ', &['Ǒ']), - ('Ǔ', &['ǔ']), - ('ǔ', &['Ǔ']), - ('Ǖ', &['ǖ']), - ('ǖ', &['Ǖ']), - ('Ǘ', &['ǘ']), - ('ǘ', &['Ǘ']), - ('Ǚ', &['ǚ']), - ('ǚ', &['Ǚ']), - ('Ǜ', &['ǜ']), - ('ǜ', &['Ǜ']), - ('ǝ', &['Ǝ']), - ('Ǟ', &['ǟ']), - ('ǟ', &['Ǟ']), - ('Ǡ', &['ǡ']), - ('ǡ', &['Ǡ']), - ('Ǣ', &['ǣ']), - ('ǣ', &['Ǣ']), - ('Ǥ', &['ǥ']), - ('ǥ', &['Ǥ']), - ('Ǧ', &['ǧ']), - ('ǧ', &['Ǧ']), - ('Ǩ', &['ǩ']), - ('ǩ', &['Ǩ']), - ('Ǫ', &['ǫ']), - ('ǫ', &['Ǫ']), - ('Ǭ', &['ǭ']), - ('ǭ', &['Ǭ']), - ('Ǯ', &['ǯ']), - ('ǯ', &['Ǯ']), - ('DZ', &['Dz', 'dz']), - ('Dz', &['DZ', 'dz']), - ('dz', &['DZ', 'Dz']), - ('Ǵ', &['ǵ']), - ('ǵ', &['Ǵ']), - ('Ƕ', &['ƕ']), - ('Ƿ', &['ƿ']), - ('Ǹ', &['ǹ']), - ('ǹ', &['Ǹ']), - ('Ǻ', &['ǻ']), - ('ǻ', &['Ǻ']), - ('Ǽ', &['ǽ']), - ('ǽ', &['Ǽ']), - ('Ǿ', &['ǿ']), - ('ǿ', &['Ǿ']), - ('Ȁ', &['ȁ']), - ('ȁ', &['Ȁ']), - ('Ȃ', &['ȃ']), - ('ȃ', &['Ȃ']), - ('Ȅ', &['ȅ']), - ('ȅ', &['Ȅ']), - ('Ȇ', &['ȇ']), - ('ȇ', &['Ȇ']), - ('Ȉ', &['ȉ']), - ('ȉ', &['Ȉ']), - ('Ȋ', &['ȋ']), - ('ȋ', &['Ȋ']), - ('Ȍ', &['ȍ']), - ('ȍ', &['Ȍ']), - ('Ȏ', &['ȏ']), - ('ȏ', &['Ȏ']), - ('Ȑ', &['ȑ']), - ('ȑ', &['Ȑ']), - ('Ȓ', &['ȓ']), - ('ȓ', &['Ȓ']), - ('Ȕ', &['ȕ']), - ('ȕ', &['Ȕ']), - ('Ȗ', &['ȗ']), - ('ȗ', &['Ȗ']), - ('Ș', &['ș']), - ('ș', &['Ș']), - ('Ț', &['ț']), - ('ț', &['Ț']), - ('Ȝ', &['ȝ']), - ('ȝ', &['Ȝ']), - ('Ȟ', &['ȟ']), - ('ȟ', &['Ȟ']), - ('Ƞ', &['ƞ']), - ('Ȣ', &['ȣ']), - ('ȣ', &['Ȣ']), - ('Ȥ', &['ȥ']), - ('ȥ', &['Ȥ']), - ('Ȧ', &['ȧ']), - ('ȧ', &['Ȧ']), - ('Ȩ', &['ȩ']), - ('ȩ', &['Ȩ']), - ('Ȫ', &['ȫ']), - ('ȫ', &['Ȫ']), - ('Ȭ', &['ȭ']), - ('ȭ', &['Ȭ']), - ('Ȯ', &['ȯ']), - ('ȯ', &['Ȯ']), - ('Ȱ', &['ȱ']), - ('ȱ', &['Ȱ']), - ('Ȳ', &['ȳ']), - ('ȳ', &['Ȳ']), - ('Ⱥ', &['ⱥ']), - ('Ȼ', &['ȼ']), - ('ȼ', &['Ȼ']), - ('Ƚ', &['ƚ']), - ('Ⱦ', &['ⱦ']), - ('ȿ', &['Ȿ']), - ('ɀ', &['Ɀ']), - ('Ɂ', &['ɂ']), - ('ɂ', &['Ɂ']), - ('Ƀ', &['ƀ']), - ('Ʉ', &['ʉ']), - ('Ʌ', &['ʌ']), - ('Ɇ', &['ɇ']), - ('ɇ', &['Ɇ']), - ('Ɉ', &['ɉ']), - ('ɉ', &['Ɉ']), - ('Ɋ', &['ɋ']), - ('ɋ', &['Ɋ']), - ('Ɍ', &['ɍ']), - ('ɍ', &['Ɍ']), - ('Ɏ', &['ɏ']), - ('ɏ', &['Ɏ']), - ('ɐ', &['Ɐ']), - ('ɑ', &['Ɑ']), - ('ɒ', &['Ɒ']), - ('ɓ', &['Ɓ']), - ('ɔ', &['Ɔ']), - ('ɖ', &['Ɖ']), - ('ɗ', &['Ɗ']), - ('ə', &['Ə']), - ('ɛ', &['Ɛ']), - ('ɜ', &['Ɜ']), - ('ɠ', &['Ɠ']), - ('ɡ', &['Ɡ']), - ('ɣ', &['Ɣ']), - ('ɤ', &['Ɤ']), - ('ɥ', &['Ɥ']), - ('ɦ', &['Ɦ']), - ('ɨ', &['Ɨ']), - ('ɩ', &['Ɩ']), - ('ɪ', &['Ɪ']), - ('ɫ', &['Ɫ']), - ('ɬ', &['Ɬ']), - ('ɯ', &['Ɯ']), - ('ɱ', &['Ɱ']), - ('ɲ', &['Ɲ']), - ('ɵ', &['Ɵ']), - ('ɽ', &['Ɽ']), - ('ʀ', &['Ʀ']), - ('ʂ', &['Ʂ']), - ('ʃ', &['Ʃ']), - ('ʇ', &['Ʇ']), - ('ʈ', &['Ʈ']), - ('ʉ', &['Ʉ']), - ('ʊ', &['Ʊ']), - ('ʋ', &['Ʋ']), - ('ʌ', &['Ʌ']), - ('ʒ', &['Ʒ']), - ('ʝ', &['Ʝ']), - ('ʞ', &['Ʞ']), - ('\u{345}', &['Ι', 'ι', 'ι']), - ('Ͱ', &['ͱ']), - ('ͱ', &['Ͱ']), - ('Ͳ', &['ͳ']), - ('ͳ', &['Ͳ']), - ('Ͷ', &['ͷ']), - ('ͷ', &['Ͷ']), - ('ͻ', &['Ͻ']), - ('ͼ', &['Ͼ']), - ('ͽ', &['Ͽ']), - ('Ϳ', &['ϳ']), - ('Ά', &['ά']), - ('Έ', &['έ']), - ('Ή', &['ή']), - ('Ί', &['ί']), - ('Ό', &['ό']), - ('Ύ', &['ύ']), - ('Ώ', &['ώ']), - ('ΐ', &['ΐ']), - ('Α', &['α']), - ('Β', &['β', 'ϐ']), - ('Γ', &['γ']), - ('Δ', &['δ']), - ('Ε', &['ε', 'ϵ']), - ('Ζ', &['ζ']), - ('Η', &['η']), - ('Θ', &['θ', 'ϑ', 'ϴ']), - ('Ι', &['\u{345}', 'ι', 'ι']), - ('Κ', &['κ', 'ϰ']), - ('Λ', &['λ']), - ('Μ', &['µ', 'μ']), - ('Ν', &['ν']), - ('Ξ', &['ξ']), - ('Ο', &['ο']), - ('Π', &['π', 'ϖ']), - ('Ρ', &['ρ', 'ϱ']), - ('Σ', &['ς', 'σ']), - ('Τ', &['τ']), - ('Υ', &['υ']), - ('Φ', &['φ', 'ϕ']), - ('Χ', &['χ']), - ('Ψ', &['ψ']), - ('Ω', &['ω', 'Ω']), - ('Ϊ', &['ϊ']), - ('Ϋ', &['ϋ']), - ('ά', &['Ά']), - ('έ', &['Έ']), - ('ή', &['Ή']), - ('ί', &['Ί']), - ('ΰ', &['ΰ']), - ('α', &['Α']), - ('β', &['Β', 'ϐ']), - ('γ', &['Γ']), - ('δ', &['Δ']), - ('ε', &['Ε', 'ϵ']), - ('ζ', &['Ζ']), - ('η', &['Η']), - ('θ', &['Θ', 'ϑ', 'ϴ']), - ('ι', &['\u{345}', 'Ι', 'ι']), - ('κ', &['Κ', 'ϰ']), - ('λ', &['Λ']), - ('μ', &['µ', 'Μ']), - ('ν', &['Ν']), - ('ξ', &['Ξ']), - ('ο', &['Ο']), - ('π', &['Π', 'ϖ']), - ('ρ', &['Ρ', 'ϱ']), - ('ς', &['Σ', 'σ']), - ('σ', &['Σ', 'ς']), - ('τ', &['Τ']), - ('υ', &['Υ']), - ('φ', &['Φ', 'ϕ']), - ('χ', &['Χ']), - ('ψ', &['Ψ']), - ('ω', &['Ω', 'Ω']), - ('ϊ', &['Ϊ']), - ('ϋ', &['Ϋ']), - ('ό', &['Ό']), - ('ύ', &['Ύ']), - ('ώ', &['Ώ']), - ('Ϗ', &['ϗ']), - ('ϐ', &['Β', 'β']), - ('ϑ', &['Θ', 'θ', 'ϴ']), - ('ϕ', &['Φ', 'φ']), - ('ϖ', &['Π', 'π']), - ('ϗ', &['Ϗ']), - ('Ϙ', &['ϙ']), - ('ϙ', &['Ϙ']), - ('Ϛ', &['ϛ']), - ('ϛ', &['Ϛ']), - ('Ϝ', &['ϝ']), - ('ϝ', &['Ϝ']), - ('Ϟ', &['ϟ']), - ('ϟ', &['Ϟ']), - ('Ϡ', &['ϡ']), - ('ϡ', &['Ϡ']), - ('Ϣ', &['ϣ']), - ('ϣ', &['Ϣ']), - ('Ϥ', &['ϥ']), - ('ϥ', &['Ϥ']), - ('Ϧ', &['ϧ']), - ('ϧ', &['Ϧ']), - ('Ϩ', &['ϩ']), - ('ϩ', &['Ϩ']), - ('Ϫ', &['ϫ']), - ('ϫ', &['Ϫ']), - ('Ϭ', &['ϭ']), - ('ϭ', &['Ϭ']), - ('Ϯ', &['ϯ']), - ('ϯ', &['Ϯ']), - ('ϰ', &['Κ', 'κ']), - ('ϱ', &['Ρ', 'ρ']), - ('ϲ', &['Ϲ']), - ('ϳ', &['Ϳ']), - ('ϴ', &['Θ', 'θ', 'ϑ']), - ('ϵ', &['Ε', 'ε']), - ('Ϸ', &['ϸ']), - ('ϸ', &['Ϸ']), - ('Ϲ', &['ϲ']), - ('Ϻ', &['ϻ']), - ('ϻ', &['Ϻ']), - ('Ͻ', &['ͻ']), - ('Ͼ', &['ͼ']), - ('Ͽ', &['ͽ']), - ('Ѐ', &['ѐ']), - ('Ё', &['ё']), - ('Ђ', &['ђ']), - ('Ѓ', &['ѓ']), - ('Є', &['є']), - ('Ѕ', &['ѕ']), - ('І', &['і']), - ('Ї', &['ї']), - ('Ј', &['ј']), - ('Љ', &['љ']), - ('Њ', &['њ']), - ('Ћ', &['ћ']), - ('Ќ', &['ќ']), - ('Ѝ', &['ѝ']), - ('Ў', &['ў']), - ('Џ', &['џ']), - ('А', &['а']), - ('Б', &['б']), - ('В', &['в', 'ᲀ']), - ('Г', &['г']), - ('Д', &['д', 'ᲁ']), - ('Е', &['е']), - ('Ж', &['ж']), - ('З', &['з']), - ('И', &['и']), - ('Й', &['й']), - ('К', &['к']), - ('Л', &['л']), - ('М', &['м']), - ('Н', &['н']), - ('О', &['о', 'ᲂ']), - ('П', &['п']), - ('Р', &['р']), - ('С', &['с', 'ᲃ']), - ('Т', &['т', 'ᲄ', 'ᲅ']), - ('У', &['у']), - ('Ф', &['ф']), - ('Х', &['х']), - ('Ц', &['ц']), - ('Ч', &['ч']), - ('Ш', &['ш']), - ('Щ', &['щ']), - ('Ъ', &['ъ', 'ᲆ']), - ('Ы', &['ы']), - ('Ь', &['ь']), - ('Э', &['э']), - ('Ю', &['ю']), - ('Я', &['я']), - ('а', &['А']), - ('б', &['Б']), - ('в', &['В', 'ᲀ']), - ('г', &['Г']), - ('д', &['Д', 'ᲁ']), - ('е', &['Е']), - ('ж', &['Ж']), - ('з', &['З']), - ('и', &['И']), - ('й', &['Й']), - ('к', &['К']), - ('л', &['Л']), - ('м', &['М']), - ('н', &['Н']), - ('о', &['О', 'ᲂ']), - ('п', &['П']), - ('р', &['Р']), - ('с', &['С', 'ᲃ']), - ('т', &['Т', 'ᲄ', 'ᲅ']), - ('у', &['У']), - ('ф', &['Ф']), - ('х', &['Х']), - ('ц', &['Ц']), - ('ч', &['Ч']), - ('ш', &['Ш']), - ('щ', &['Щ']), - ('ъ', &['Ъ', 'ᲆ']), - ('ы', &['Ы']), - ('ь', &['Ь']), - ('э', &['Э']), - ('ю', &['Ю']), - ('я', &['Я']), - ('ѐ', &['Ѐ']), - ('ё', &['Ё']), - ('ђ', &['Ђ']), - ('ѓ', &['Ѓ']), - ('є', &['Є']), - ('ѕ', &['Ѕ']), - ('і', &['І']), - ('ї', &['Ї']), - ('ј', &['Ј']), - ('љ', &['Љ']), - ('њ', &['Њ']), - ('ћ', &['Ћ']), - ('ќ', &['Ќ']), - ('ѝ', &['Ѝ']), - ('ў', &['Ў']), - ('џ', &['Џ']), - ('Ѡ', &['ѡ']), - ('ѡ', &['Ѡ']), - ('Ѣ', &['ѣ', 'ᲇ']), - ('ѣ', &['Ѣ', 'ᲇ']), - ('Ѥ', &['ѥ']), - ('ѥ', &['Ѥ']), - ('Ѧ', &['ѧ']), - ('ѧ', &['Ѧ']), - ('Ѩ', &['ѩ']), - ('ѩ', &['Ѩ']), - ('Ѫ', &['ѫ']), - ('ѫ', &['Ѫ']), - ('Ѭ', &['ѭ']), - ('ѭ', &['Ѭ']), - ('Ѯ', &['ѯ']), - ('ѯ', &['Ѯ']), - ('Ѱ', &['ѱ']), - ('ѱ', &['Ѱ']), - ('Ѳ', &['ѳ']), - ('ѳ', &['Ѳ']), - ('Ѵ', &['ѵ']), - ('ѵ', &['Ѵ']), - ('Ѷ', &['ѷ']), - ('ѷ', &['Ѷ']), - ('Ѹ', &['ѹ']), - ('ѹ', &['Ѹ']), - ('Ѻ', &['ѻ']), - ('ѻ', &['Ѻ']), - ('Ѽ', &['ѽ']), - ('ѽ', &['Ѽ']), - ('Ѿ', &['ѿ']), - ('ѿ', &['Ѿ']), - ('Ҁ', &['ҁ']), - ('ҁ', &['Ҁ']), - ('Ҋ', &['ҋ']), - ('ҋ', &['Ҋ']), - ('Ҍ', &['ҍ']), - ('ҍ', &['Ҍ']), - ('Ҏ', &['ҏ']), - ('ҏ', &['Ҏ']), - ('Ґ', &['ґ']), - ('ґ', &['Ґ']), - ('Ғ', &['ғ']), - ('ғ', &['Ғ']), - ('Ҕ', &['ҕ']), - ('ҕ', &['Ҕ']), - ('Җ', &['җ']), - ('җ', &['Җ']), - ('Ҙ', &['ҙ']), - ('ҙ', &['Ҙ']), - ('Қ', &['қ']), - ('қ', &['Қ']), - ('Ҝ', &['ҝ']), - ('ҝ', &['Ҝ']), - ('Ҟ', &['ҟ']), - ('ҟ', &['Ҟ']), - ('Ҡ', &['ҡ']), - ('ҡ', &['Ҡ']), - ('Ң', &['ң']), - ('ң', &['Ң']), - ('Ҥ', &['ҥ']), - ('ҥ', &['Ҥ']), - ('Ҧ', &['ҧ']), - ('ҧ', &['Ҧ']), - ('Ҩ', &['ҩ']), - ('ҩ', &['Ҩ']), - ('Ҫ', &['ҫ']), - ('ҫ', &['Ҫ']), - ('Ҭ', &['ҭ']), - ('ҭ', &['Ҭ']), - ('Ү', &['ү']), - ('ү', &['Ү']), - ('Ұ', &['ұ']), - ('ұ', &['Ұ']), - ('Ҳ', &['ҳ']), - ('ҳ', &['Ҳ']), - ('Ҵ', &['ҵ']), - ('ҵ', &['Ҵ']), - ('Ҷ', &['ҷ']), - ('ҷ', &['Ҷ']), - ('Ҹ', &['ҹ']), - ('ҹ', &['Ҹ']), - ('Һ', &['һ']), - ('һ', &['Һ']), - ('Ҽ', &['ҽ']), - ('ҽ', &['Ҽ']), - ('Ҿ', &['ҿ']), - ('ҿ', &['Ҿ']), - ('Ӏ', &['ӏ']), - ('Ӂ', &['ӂ']), - ('ӂ', &['Ӂ']), - ('Ӄ', &['ӄ']), - ('ӄ', &['Ӄ']), - ('Ӆ', &['ӆ']), - ('ӆ', &['Ӆ']), - ('Ӈ', &['ӈ']), - ('ӈ', &['Ӈ']), - ('Ӊ', &['ӊ']), - ('ӊ', &['Ӊ']), - ('Ӌ', &['ӌ']), - ('ӌ', &['Ӌ']), - ('Ӎ', &['ӎ']), - ('ӎ', &['Ӎ']), - ('ӏ', &['Ӏ']), - ('Ӑ', &['ӑ']), - ('ӑ', &['Ӑ']), - ('Ӓ', &['ӓ']), - ('ӓ', &['Ӓ']), - ('Ӕ', &['ӕ']), - ('ӕ', &['Ӕ']), - ('Ӗ', &['ӗ']), - ('ӗ', &['Ӗ']), - ('Ә', &['ә']), - ('ә', &['Ә']), - ('Ӛ', &['ӛ']), - ('ӛ', &['Ӛ']), - ('Ӝ', &['ӝ']), - ('ӝ', &['Ӝ']), - ('Ӟ', &['ӟ']), - ('ӟ', &['Ӟ']), - ('Ӡ', &['ӡ']), - ('ӡ', &['Ӡ']), - ('Ӣ', &['ӣ']), - ('ӣ', &['Ӣ']), - ('Ӥ', &['ӥ']), - ('ӥ', &['Ӥ']), - ('Ӧ', &['ӧ']), - ('ӧ', &['Ӧ']), - ('Ө', &['ө']), - ('ө', &['Ө']), - ('Ӫ', &['ӫ']), - ('ӫ', &['Ӫ']), - ('Ӭ', &['ӭ']), - ('ӭ', &['Ӭ']), - ('Ӯ', &['ӯ']), - ('ӯ', &['Ӯ']), - ('Ӱ', &['ӱ']), - ('ӱ', &['Ӱ']), - ('Ӳ', &['ӳ']), - ('ӳ', &['Ӳ']), - ('Ӵ', &['ӵ']), - ('ӵ', &['Ӵ']), - ('Ӷ', &['ӷ']), - ('ӷ', &['Ӷ']), - ('Ӹ', &['ӹ']), - ('ӹ', &['Ӹ']), - ('Ӻ', &['ӻ']), - ('ӻ', &['Ӻ']), - ('Ӽ', &['ӽ']), - ('ӽ', &['Ӽ']), - ('Ӿ', &['ӿ']), - ('ӿ', &['Ӿ']), - ('Ԁ', &['ԁ']), - ('ԁ', &['Ԁ']), - ('Ԃ', &['ԃ']), - ('ԃ', &['Ԃ']), - ('Ԅ', &['ԅ']), - ('ԅ', &['Ԅ']), - ('Ԇ', &['ԇ']), - ('ԇ', &['Ԇ']), - ('Ԉ', &['ԉ']), - ('ԉ', &['Ԉ']), - ('Ԋ', &['ԋ']), - ('ԋ', &['Ԋ']), - ('Ԍ', &['ԍ']), - ('ԍ', &['Ԍ']), - ('Ԏ', &['ԏ']), - ('ԏ', &['Ԏ']), - ('Ԑ', &['ԑ']), - ('ԑ', &['Ԑ']), - ('Ԓ', &['ԓ']), - ('ԓ', &['Ԓ']), - ('Ԕ', &['ԕ']), - ('ԕ', &['Ԕ']), - ('Ԗ', &['ԗ']), - ('ԗ', &['Ԗ']), - ('Ԙ', &['ԙ']), - ('ԙ', &['Ԙ']), - ('Ԛ', &['ԛ']), - ('ԛ', &['Ԛ']), - ('Ԝ', &['ԝ']), - ('ԝ', &['Ԝ']), - ('Ԟ', &['ԟ']), - ('ԟ', &['Ԟ']), - ('Ԡ', &['ԡ']), - ('ԡ', &['Ԡ']), - ('Ԣ', &['ԣ']), - ('ԣ', &['Ԣ']), - ('Ԥ', &['ԥ']), - ('ԥ', &['Ԥ']), - ('Ԧ', &['ԧ']), - ('ԧ', &['Ԧ']), - ('Ԩ', &['ԩ']), - ('ԩ', &['Ԩ']), - ('Ԫ', &['ԫ']), - ('ԫ', &['Ԫ']), - ('Ԭ', &['ԭ']), - ('ԭ', &['Ԭ']), - ('Ԯ', &['ԯ']), - ('ԯ', &['Ԯ']), - ('Ա', &['ա']), - ('Բ', &['բ']), - ('Գ', &['գ']), - ('Դ', &['դ']), - ('Ե', &['ե']), - ('Զ', &['զ']), - ('Է', &['է']), - ('Ը', &['ը']), - ('Թ', &['թ']), - ('Ժ', &['ժ']), - ('Ի', &['ի']), - ('Լ', &['լ']), - ('Խ', &['խ']), - ('Ծ', &['ծ']), - ('Կ', &['կ']), - ('Հ', &['հ']), - ('Ձ', &['ձ']), - ('Ղ', &['ղ']), - ('Ճ', &['ճ']), - ('Մ', &['մ']), - ('Յ', &['յ']), - ('Ն', &['ն']), - ('Շ', &['շ']), - ('Ո', &['ո']), - ('Չ', &['չ']), - ('Պ', &['պ']), - ('Ջ', &['ջ']), - ('Ռ', &['ռ']), - ('Ս', &['ս']), - ('Վ', &['վ']), - ('Տ', &['տ']), - ('Ր', &['ր']), - ('Ց', &['ց']), - ('Ւ', &['ւ']), - ('Փ', &['փ']), - ('Ք', &['ք']), - ('Օ', &['օ']), - ('Ֆ', &['ֆ']), - ('ա', &['Ա']), - ('բ', &['Բ']), - ('գ', &['Գ']), - ('դ', &['Դ']), - ('ե', &['Ե']), - ('զ', &['Զ']), - ('է', &['Է']), - ('ը', &['Ը']), - ('թ', &['Թ']), - ('ժ', &['Ժ']), - ('ի', &['Ի']), - ('լ', &['Լ']), - ('խ', &['Խ']), - ('ծ', &['Ծ']), - ('կ', &['Կ']), - ('հ', &['Հ']), - ('ձ', &['Ձ']), - ('ղ', &['Ղ']), - ('ճ', &['Ճ']), - ('մ', &['Մ']), - ('յ', &['Յ']), - ('ն', &['Ն']), - ('շ', &['Շ']), - ('ո', &['Ո']), - ('չ', &['Չ']), - ('պ', &['Պ']), - ('ջ', &['Ջ']), - ('ռ', &['Ռ']), - ('ս', &['Ս']), - ('վ', &['Վ']), - ('տ', &['Տ']), - ('ր', &['Ր']), - ('ց', &['Ց']), - ('ւ', &['Ւ']), - ('փ', &['Փ']), - ('ք', &['Ք']), - ('օ', &['Օ']), - ('ֆ', &['Ֆ']), - ('Ⴀ', &['ⴀ']), - ('Ⴁ', &['ⴁ']), - ('Ⴂ', &['ⴂ']), - ('Ⴃ', &['ⴃ']), - ('Ⴄ', &['ⴄ']), - ('Ⴅ', &['ⴅ']), - ('Ⴆ', &['ⴆ']), - ('Ⴇ', &['ⴇ']), - ('Ⴈ', &['ⴈ']), - ('Ⴉ', &['ⴉ']), - ('Ⴊ', &['ⴊ']), - ('Ⴋ', &['ⴋ']), - ('Ⴌ', &['ⴌ']), - ('Ⴍ', &['ⴍ']), - ('Ⴎ', &['ⴎ']), - ('Ⴏ', &['ⴏ']), - ('Ⴐ', &['ⴐ']), - ('Ⴑ', &['ⴑ']), - ('Ⴒ', &['ⴒ']), - ('Ⴓ', &['ⴓ']), - ('Ⴔ', &['ⴔ']), - ('Ⴕ', &['ⴕ']), - ('Ⴖ', &['ⴖ']), - ('Ⴗ', &['ⴗ']), - ('Ⴘ', &['ⴘ']), - ('Ⴙ', &['ⴙ']), - ('Ⴚ', &['ⴚ']), - ('Ⴛ', &['ⴛ']), - ('Ⴜ', &['ⴜ']), - ('Ⴝ', &['ⴝ']), - ('Ⴞ', &['ⴞ']), - ('Ⴟ', &['ⴟ']), - ('Ⴠ', &['ⴠ']), - ('Ⴡ', &['ⴡ']), - ('Ⴢ', &['ⴢ']), - ('Ⴣ', &['ⴣ']), - ('Ⴤ', &['ⴤ']), - ('Ⴥ', &['ⴥ']), - ('Ⴧ', &['ⴧ']), - ('Ⴭ', &['ⴭ']), - ('ა', &['Ა']), - ('ბ', &['Ბ']), - ('გ', &['Გ']), - ('დ', &['Დ']), - ('ე', &['Ე']), - ('ვ', &['Ვ']), - ('ზ', &['Ზ']), - ('თ', &['Თ']), - ('ი', &['Ი']), - ('კ', &['Კ']), - ('ლ', &['Ლ']), - ('მ', &['Მ']), - ('ნ', &['Ნ']), - ('ო', &['Ო']), - ('პ', &['Პ']), - ('ჟ', &['Ჟ']), - ('რ', &['Რ']), - ('ს', &['Ს']), - ('ტ', &['Ტ']), - ('უ', &['Უ']), - ('ფ', &['Ფ']), - ('ქ', &['Ქ']), - ('ღ', &['Ღ']), - ('ყ', &['Ყ']), - ('შ', &['Შ']), - ('ჩ', &['Ჩ']), - ('ც', &['Ც']), - ('ძ', &['Ძ']), - ('წ', &['Წ']), - ('ჭ', &['Ჭ']), - ('ხ', &['Ხ']), - ('ჯ', &['Ჯ']), - ('ჰ', &['Ჰ']), - ('ჱ', &['Ჱ']), - ('ჲ', &['Ჲ']), - ('ჳ', &['Ჳ']), - ('ჴ', &['Ჴ']), - ('ჵ', &['Ჵ']), - ('ჶ', &['Ჶ']), - ('ჷ', &['Ჷ']), - ('ჸ', &['Ჸ']), - ('ჹ', &['Ჹ']), - ('ჺ', &['Ჺ']), - ('ჽ', &['Ჽ']), - ('ჾ', &['Ჾ']), - ('ჿ', &['Ჿ']), - ('Ꭰ', &['ꭰ']), - ('Ꭱ', &['ꭱ']), - ('Ꭲ', &['ꭲ']), - ('Ꭳ', &['ꭳ']), - ('Ꭴ', &['ꭴ']), - ('Ꭵ', &['ꭵ']), - ('Ꭶ', &['ꭶ']), - ('Ꭷ', &['ꭷ']), - ('Ꭸ', &['ꭸ']), - ('Ꭹ', &['ꭹ']), - ('Ꭺ', &['ꭺ']), - ('Ꭻ', &['ꭻ']), - ('Ꭼ', &['ꭼ']), - ('Ꭽ', &['ꭽ']), - ('Ꭾ', &['ꭾ']), - ('Ꭿ', &['ꭿ']), - ('Ꮀ', &['ꮀ']), - ('Ꮁ', &['ꮁ']), - ('Ꮂ', &['ꮂ']), - ('Ꮃ', &['ꮃ']), - ('Ꮄ', &['ꮄ']), - ('Ꮅ', &['ꮅ']), - ('Ꮆ', &['ꮆ']), - ('Ꮇ', &['ꮇ']), - ('Ꮈ', &['ꮈ']), - ('Ꮉ', &['ꮉ']), - ('Ꮊ', &['ꮊ']), - ('Ꮋ', &['ꮋ']), - ('Ꮌ', &['ꮌ']), - ('Ꮍ', &['ꮍ']), - ('Ꮎ', &['ꮎ']), - ('Ꮏ', &['ꮏ']), - ('Ꮐ', &['ꮐ']), - ('Ꮑ', &['ꮑ']), - ('Ꮒ', &['ꮒ']), - ('Ꮓ', &['ꮓ']), - ('Ꮔ', &['ꮔ']), - ('Ꮕ', &['ꮕ']), - ('Ꮖ', &['ꮖ']), - ('Ꮗ', &['ꮗ']), - ('Ꮘ', &['ꮘ']), - ('Ꮙ', &['ꮙ']), - ('Ꮚ', &['ꮚ']), - ('Ꮛ', &['ꮛ']), - ('Ꮜ', &['ꮜ']), - ('Ꮝ', &['ꮝ']), - ('Ꮞ', &['ꮞ']), - ('Ꮟ', &['ꮟ']), - ('Ꮠ', &['ꮠ']), - ('Ꮡ', &['ꮡ']), - ('Ꮢ', &['ꮢ']), - ('Ꮣ', &['ꮣ']), - ('Ꮤ', &['ꮤ']), - ('Ꮥ', &['ꮥ']), - ('Ꮦ', &['ꮦ']), - ('Ꮧ', &['ꮧ']), - ('Ꮨ', &['ꮨ']), - ('Ꮩ', &['ꮩ']), - ('Ꮪ', &['ꮪ']), - ('Ꮫ', &['ꮫ']), - ('Ꮬ', &['ꮬ']), - ('Ꮭ', &['ꮭ']), - ('Ꮮ', &['ꮮ']), - ('Ꮯ', &['ꮯ']), - ('Ꮰ', &['ꮰ']), - ('Ꮱ', &['ꮱ']), - ('Ꮲ', &['ꮲ']), - ('Ꮳ', &['ꮳ']), - ('Ꮴ', &['ꮴ']), - ('Ꮵ', &['ꮵ']), - ('Ꮶ', &['ꮶ']), - ('Ꮷ', &['ꮷ']), - ('Ꮸ', &['ꮸ']), - ('Ꮹ', &['ꮹ']), - ('Ꮺ', &['ꮺ']), - ('Ꮻ', &['ꮻ']), - ('Ꮼ', &['ꮼ']), - ('Ꮽ', &['ꮽ']), - ('Ꮾ', &['ꮾ']), - ('Ꮿ', &['ꮿ']), - ('Ᏸ', &['ᏸ']), - ('Ᏹ', &['ᏹ']), - ('Ᏺ', &['ᏺ']), - ('Ᏻ', &['ᏻ']), - ('Ᏼ', &['ᏼ']), - ('Ᏽ', &['ᏽ']), - ('ᏸ', &['Ᏸ']), - ('ᏹ', &['Ᏹ']), - ('ᏺ', &['Ᏺ']), - ('ᏻ', &['Ᏻ']), - ('ᏼ', &['Ᏼ']), - ('ᏽ', &['Ᏽ']), - ('ᲀ', &['В', 'в']), - ('ᲁ', &['Д', 'д']), - ('ᲂ', &['О', 'о']), - ('ᲃ', &['С', 'с']), - ('ᲄ', &['Т', 'т', 'ᲅ']), - ('ᲅ', &['Т', 'т', 'ᲄ']), - ('ᲆ', &['Ъ', 'ъ']), - ('ᲇ', &['Ѣ', 'ѣ']), - ('ᲈ', &['Ꙋ', 'ꙋ']), - ('Ᲊ', &['ᲊ']), - ('ᲊ', &['Ᲊ']), - ('Ა', &['ა']), - ('Ბ', &['ბ']), - ('Გ', &['გ']), - ('Დ', &['დ']), - ('Ე', &['ე']), - ('Ვ', &['ვ']), - ('Ზ', &['ზ']), - ('Თ', &['თ']), - ('Ი', &['ი']), - ('Კ', &['კ']), - ('Ლ', &['ლ']), - ('Მ', &['მ']), - ('Ნ', &['ნ']), - ('Ო', &['ო']), - ('Პ', &['პ']), - ('Ჟ', &['ჟ']), - ('Რ', &['რ']), - ('Ს', &['ს']), - ('Ტ', &['ტ']), - ('Უ', &['უ']), - ('Ფ', &['ფ']), - ('Ქ', &['ქ']), - ('Ღ', &['ღ']), - ('Ყ', &['ყ']), - ('Შ', &['შ']), - ('Ჩ', &['ჩ']), - ('Ც', &['ც']), - ('Ძ', &['ძ']), - ('Წ', &['წ']), - ('Ჭ', &['ჭ']), - ('Ხ', &['ხ']), - ('Ჯ', &['ჯ']), - ('Ჰ', &['ჰ']), - ('Ჱ', &['ჱ']), - ('Ჲ', &['ჲ']), - ('Ჳ', &['ჳ']), - ('Ჴ', &['ჴ']), - ('Ჵ', &['ჵ']), - ('Ჶ', &['ჶ']), - ('Ჷ', &['ჷ']), - ('Ჸ', &['ჸ']), - ('Ჹ', &['ჹ']), - ('Ჺ', &['ჺ']), - ('Ჽ', &['ჽ']), - ('Ჾ', &['ჾ']), - ('Ჿ', &['ჿ']), - ('ᵹ', &['Ᵹ']), - ('ᵽ', &['Ᵽ']), - ('ᶎ', &['Ᶎ']), - ('Ḁ', &['ḁ']), - ('ḁ', &['Ḁ']), - ('Ḃ', &['ḃ']), - ('ḃ', &['Ḃ']), - ('Ḅ', &['ḅ']), - ('ḅ', &['Ḅ']), - ('Ḇ', &['ḇ']), - ('ḇ', &['Ḇ']), - ('Ḉ', &['ḉ']), - ('ḉ', &['Ḉ']), - ('Ḋ', &['ḋ']), - ('ḋ', &['Ḋ']), - ('Ḍ', &['ḍ']), - ('ḍ', &['Ḍ']), - ('Ḏ', &['ḏ']), - ('ḏ', &['Ḏ']), - ('Ḑ', &['ḑ']), - ('ḑ', &['Ḑ']), - ('Ḓ', &['ḓ']), - ('ḓ', &['Ḓ']), - ('Ḕ', &['ḕ']), - ('ḕ', &['Ḕ']), - ('Ḗ', &['ḗ']), - ('ḗ', &['Ḗ']), - ('Ḙ', &['ḙ']), - ('ḙ', &['Ḙ']), - ('Ḛ', &['ḛ']), - ('ḛ', &['Ḛ']), - ('Ḝ', &['ḝ']), - ('ḝ', &['Ḝ']), - ('Ḟ', &['ḟ']), - ('ḟ', &['Ḟ']), - ('Ḡ', &['ḡ']), - ('ḡ', &['Ḡ']), - ('Ḣ', &['ḣ']), - ('ḣ', &['Ḣ']), - ('Ḥ', &['ḥ']), - ('ḥ', &['Ḥ']), - ('Ḧ', &['ḧ']), - ('ḧ', &['Ḧ']), - ('Ḩ', &['ḩ']), - ('ḩ', &['Ḩ']), - ('Ḫ', &['ḫ']), - ('ḫ', &['Ḫ']), - ('Ḭ', &['ḭ']), - ('ḭ', &['Ḭ']), - ('Ḯ', &['ḯ']), - ('ḯ', &['Ḯ']), - ('Ḱ', &['ḱ']), - ('ḱ', &['Ḱ']), - ('Ḳ', &['ḳ']), - ('ḳ', &['Ḳ']), - ('Ḵ', &['ḵ']), - ('ḵ', &['Ḵ']), - ('Ḷ', &['ḷ']), - ('ḷ', &['Ḷ']), - ('Ḹ', &['ḹ']), - ('ḹ', &['Ḹ']), - ('Ḻ', &['ḻ']), - ('ḻ', &['Ḻ']), - ('Ḽ', &['ḽ']), - ('ḽ', &['Ḽ']), - ('Ḿ', &['ḿ']), - ('ḿ', &['Ḿ']), - ('Ṁ', &['ṁ']), - ('ṁ', &['Ṁ']), - ('Ṃ', &['ṃ']), - ('ṃ', &['Ṃ']), - ('Ṅ', &['ṅ']), - ('ṅ', &['Ṅ']), - ('Ṇ', &['ṇ']), - ('ṇ', &['Ṇ']), - ('Ṉ', &['ṉ']), - ('ṉ', &['Ṉ']), - ('Ṋ', &['ṋ']), - ('ṋ', &['Ṋ']), - ('Ṍ', &['ṍ']), - ('ṍ', &['Ṍ']), - ('Ṏ', &['ṏ']), - ('ṏ', &['Ṏ']), - ('Ṑ', &['ṑ']), - ('ṑ', &['Ṑ']), - ('Ṓ', &['ṓ']), - ('ṓ', &['Ṓ']), - ('Ṕ', &['ṕ']), - ('ṕ', &['Ṕ']), - ('Ṗ', &['ṗ']), - ('ṗ', &['Ṗ']), - ('Ṙ', &['ṙ']), - ('ṙ', &['Ṙ']), - ('Ṛ', &['ṛ']), - ('ṛ', &['Ṛ']), - ('Ṝ', &['ṝ']), - ('ṝ', &['Ṝ']), - ('Ṟ', &['ṟ']), - ('ṟ', &['Ṟ']), - ('Ṡ', &['ṡ', 'ẛ']), - ('ṡ', &['Ṡ', 'ẛ']), - ('Ṣ', &['ṣ']), - ('ṣ', &['Ṣ']), - ('Ṥ', &['ṥ']), - ('ṥ', &['Ṥ']), - ('Ṧ', &['ṧ']), - ('ṧ', &['Ṧ']), - ('Ṩ', &['ṩ']), - ('ṩ', &['Ṩ']), - ('Ṫ', &['ṫ']), - ('ṫ', &['Ṫ']), - ('Ṭ', &['ṭ']), - ('ṭ', &['Ṭ']), - ('Ṯ', &['ṯ']), - ('ṯ', &['Ṯ']), - ('Ṱ', &['ṱ']), - ('ṱ', &['Ṱ']), - ('Ṳ', &['ṳ']), - ('ṳ', &['Ṳ']), - ('Ṵ', &['ṵ']), - ('ṵ', &['Ṵ']), - ('Ṷ', &['ṷ']), - ('ṷ', &['Ṷ']), - ('Ṹ', &['ṹ']), - ('ṹ', &['Ṹ']), - ('Ṻ', &['ṻ']), - ('ṻ', &['Ṻ']), - ('Ṽ', &['ṽ']), - ('ṽ', &['Ṽ']), - ('Ṿ', &['ṿ']), - ('ṿ', &['Ṿ']), - ('Ẁ', &['ẁ']), - ('ẁ', &['Ẁ']), - ('Ẃ', &['ẃ']), - ('ẃ', &['Ẃ']), - ('Ẅ', &['ẅ']), - ('ẅ', &['Ẅ']), - ('Ẇ', &['ẇ']), - ('ẇ', &['Ẇ']), - ('Ẉ', &['ẉ']), - ('ẉ', &['Ẉ']), - ('Ẋ', &['ẋ']), - ('ẋ', &['Ẋ']), - ('Ẍ', &['ẍ']), - ('ẍ', &['Ẍ']), - ('Ẏ', &['ẏ']), - ('ẏ', &['Ẏ']), - ('Ẑ', &['ẑ']), - ('ẑ', &['Ẑ']), - ('Ẓ', &['ẓ']), - ('ẓ', &['Ẓ']), - ('Ẕ', &['ẕ']), - ('ẕ', &['Ẕ']), - ('ẛ', &['Ṡ', 'ṡ']), - ('ẞ', &['ß']), - ('Ạ', &['ạ']), - ('ạ', &['Ạ']), - ('Ả', &['ả']), - ('ả', &['Ả']), - ('Ấ', &['ấ']), - ('ấ', &['Ấ']), - ('Ầ', &['ầ']), - ('ầ', &['Ầ']), - ('Ẩ', &['ẩ']), - ('ẩ', &['Ẩ']), - ('Ẫ', &['ẫ']), - ('ẫ', &['Ẫ']), - ('Ậ', &['ậ']), - ('ậ', &['Ậ']), - ('Ắ', &['ắ']), - ('ắ', &['Ắ']), - ('Ằ', &['ằ']), - ('ằ', &['Ằ']), - ('Ẳ', &['ẳ']), - ('ẳ', &['Ẳ']), - ('Ẵ', &['ẵ']), - ('ẵ', &['Ẵ']), - ('Ặ', &['ặ']), - ('ặ', &['Ặ']), - ('Ẹ', &['ẹ']), - ('ẹ', &['Ẹ']), - ('Ẻ', &['ẻ']), - ('ẻ', &['Ẻ']), - ('Ẽ', &['ẽ']), - ('ẽ', &['Ẽ']), - ('Ế', &['ế']), - ('ế', &['Ế']), - ('Ề', &['ề']), - ('ề', &['Ề']), - ('Ể', &['ể']), - ('ể', &['Ể']), - ('Ễ', &['ễ']), - ('ễ', &['Ễ']), - ('Ệ', &['ệ']), - ('ệ', &['Ệ']), - ('Ỉ', &['ỉ']), - ('ỉ', &['Ỉ']), - ('Ị', &['ị']), - ('ị', &['Ị']), - ('Ọ', &['ọ']), - ('ọ', &['Ọ']), - ('Ỏ', &['ỏ']), - ('ỏ', &['Ỏ']), - ('Ố', &['ố']), - ('ố', &['Ố']), - ('Ồ', &['ồ']), - ('ồ', &['Ồ']), - ('Ổ', &['ổ']), - ('ổ', &['Ổ']), - ('Ỗ', &['ỗ']), - ('ỗ', &['Ỗ']), - ('Ộ', &['ộ']), - ('ộ', &['Ộ']), - ('Ớ', &['ớ']), - ('ớ', &['Ớ']), - ('Ờ', &['ờ']), - ('ờ', &['Ờ']), - ('Ở', &['ở']), - ('ở', &['Ở']), - ('Ỡ', &['ỡ']), - ('ỡ', &['Ỡ']), - ('Ợ', &['ợ']), - ('ợ', &['Ợ']), - ('Ụ', &['ụ']), - ('ụ', &['Ụ']), - ('Ủ', &['ủ']), - ('ủ', &['Ủ']), - ('Ứ', &['ứ']), - ('ứ', &['Ứ']), - ('Ừ', &['ừ']), - ('ừ', &['Ừ']), - ('Ử', &['ử']), - ('ử', &['Ử']), - ('Ữ', &['ữ']), - ('ữ', &['Ữ']), - ('Ự', &['ự']), - ('ự', &['Ự']), - ('Ỳ', &['ỳ']), - ('ỳ', &['Ỳ']), - ('Ỵ', &['ỵ']), - ('ỵ', &['Ỵ']), - ('Ỷ', &['ỷ']), - ('ỷ', &['Ỷ']), - ('Ỹ', &['ỹ']), - ('ỹ', &['Ỹ']), - ('Ỻ', &['ỻ']), - ('ỻ', &['Ỻ']), - ('Ỽ', &['ỽ']), - ('ỽ', &['Ỽ']), - ('Ỿ', &['ỿ']), - ('ỿ', &['Ỿ']), - ('ἀ', &['Ἀ']), - ('ἁ', &['Ἁ']), - ('ἂ', &['Ἂ']), - ('ἃ', &['Ἃ']), - ('ἄ', &['Ἄ']), - ('ἅ', &['Ἅ']), - ('ἆ', &['Ἆ']), - ('ἇ', &['Ἇ']), - ('Ἀ', &['ἀ']), - ('Ἁ', &['ἁ']), - ('Ἂ', &['ἂ']), - ('Ἃ', &['ἃ']), - ('Ἄ', &['ἄ']), - ('Ἅ', &['ἅ']), - ('Ἆ', &['ἆ']), - ('Ἇ', &['ἇ']), - ('ἐ', &['Ἐ']), - ('ἑ', &['Ἑ']), - ('ἒ', &['Ἒ']), - ('ἓ', &['Ἓ']), - ('ἔ', &['Ἔ']), - ('ἕ', &['Ἕ']), - ('Ἐ', &['ἐ']), - ('Ἑ', &['ἑ']), - ('Ἒ', &['ἒ']), - ('Ἓ', &['ἓ']), - ('Ἔ', &['ἔ']), - ('Ἕ', &['ἕ']), - ('ἠ', &['Ἠ']), - ('ἡ', &['Ἡ']), - ('ἢ', &['Ἢ']), - ('ἣ', &['Ἣ']), - ('ἤ', &['Ἤ']), - ('ἥ', &['Ἥ']), - ('ἦ', &['Ἦ']), - ('ἧ', &['Ἧ']), - ('Ἠ', &['ἠ']), - ('Ἡ', &['ἡ']), - ('Ἢ', &['ἢ']), - ('Ἣ', &['ἣ']), - ('Ἤ', &['ἤ']), - ('Ἥ', &['ἥ']), - ('Ἦ', &['ἦ']), - ('Ἧ', &['ἧ']), - ('ἰ', &['Ἰ']), - ('ἱ', &['Ἱ']), - ('ἲ', &['Ἲ']), - ('ἳ', &['Ἳ']), - ('ἴ', &['Ἴ']), - ('ἵ', &['Ἵ']), - ('ἶ', &['Ἶ']), - ('ἷ', &['Ἷ']), - ('Ἰ', &['ἰ']), - ('Ἱ', &['ἱ']), - ('Ἲ', &['ἲ']), - ('Ἳ', &['ἳ']), - ('Ἴ', &['ἴ']), - ('Ἵ', &['ἵ']), - ('Ἶ', &['ἶ']), - ('Ἷ', &['ἷ']), - ('ὀ', &['Ὀ']), - ('ὁ', &['Ὁ']), - ('ὂ', &['Ὂ']), - ('ὃ', &['Ὃ']), - ('ὄ', &['Ὄ']), - ('ὅ', &['Ὅ']), - ('Ὀ', &['ὀ']), - ('Ὁ', &['ὁ']), - ('Ὂ', &['ὂ']), - ('Ὃ', &['ὃ']), - ('Ὄ', &['ὄ']), - ('Ὅ', &['ὅ']), - ('ὑ', &['Ὑ']), - ('ὓ', &['Ὓ']), - ('ὕ', &['Ὕ']), - ('ὗ', &['Ὗ']), - ('Ὑ', &['ὑ']), - ('Ὓ', &['ὓ']), - ('Ὕ', &['ὕ']), - ('Ὗ', &['ὗ']), - ('ὠ', &['Ὠ']), - ('ὡ', &['Ὡ']), - ('ὢ', &['Ὢ']), - ('ὣ', &['Ὣ']), - ('ὤ', &['Ὤ']), - ('ὥ', &['Ὥ']), - ('ὦ', &['Ὦ']), - ('ὧ', &['Ὧ']), - ('Ὠ', &['ὠ']), - ('Ὡ', &['ὡ']), - ('Ὢ', &['ὢ']), - ('Ὣ', &['ὣ']), - ('Ὤ', &['ὤ']), - ('Ὥ', &['ὥ']), - ('Ὦ', &['ὦ']), - ('Ὧ', &['ὧ']), - ('ὰ', &['Ὰ']), - ('ά', &['Ά']), - ('ὲ', &['Ὲ']), - ('έ', &['Έ']), - ('ὴ', &['Ὴ']), - ('ή', &['Ή']), - ('ὶ', &['Ὶ']), - ('ί', &['Ί']), - ('ὸ', &['Ὸ']), - ('ό', &['Ό']), - ('ὺ', &['Ὺ']), - ('ύ', &['Ύ']), - ('ὼ', &['Ὼ']), - ('ώ', &['Ώ']), - ('ᾀ', &['ᾈ']), - ('ᾁ', &['ᾉ']), - ('ᾂ', &['ᾊ']), - ('ᾃ', &['ᾋ']), - ('ᾄ', &['ᾌ']), - ('ᾅ', &['ᾍ']), - ('ᾆ', &['ᾎ']), - ('ᾇ', &['ᾏ']), - ('ᾈ', &['ᾀ']), - ('ᾉ', &['ᾁ']), - ('ᾊ', &['ᾂ']), - ('ᾋ', &['ᾃ']), - ('ᾌ', &['ᾄ']), - ('ᾍ', &['ᾅ']), - ('ᾎ', &['ᾆ']), - ('ᾏ', &['ᾇ']), - ('ᾐ', &['ᾘ']), - ('ᾑ', &['ᾙ']), - ('ᾒ', &['ᾚ']), - ('ᾓ', &['ᾛ']), - ('ᾔ', &['ᾜ']), - ('ᾕ', &['ᾝ']), - ('ᾖ', &['ᾞ']), - ('ᾗ', &['ᾟ']), - ('ᾘ', &['ᾐ']), - ('ᾙ', &['ᾑ']), - ('ᾚ', &['ᾒ']), - ('ᾛ', &['ᾓ']), - ('ᾜ', &['ᾔ']), - ('ᾝ', &['ᾕ']), - ('ᾞ', &['ᾖ']), - ('ᾟ', &['ᾗ']), - ('ᾠ', &['ᾨ']), - ('ᾡ', &['ᾩ']), - ('ᾢ', &['ᾪ']), - ('ᾣ', &['ᾫ']), - ('ᾤ', &['ᾬ']), - ('ᾥ', &['ᾭ']), - ('ᾦ', &['ᾮ']), - ('ᾧ', &['ᾯ']), - ('ᾨ', &['ᾠ']), - ('ᾩ', &['ᾡ']), - ('ᾪ', &['ᾢ']), - ('ᾫ', &['ᾣ']), - ('ᾬ', &['ᾤ']), - ('ᾭ', &['ᾥ']), - ('ᾮ', &['ᾦ']), - ('ᾯ', &['ᾧ']), - ('ᾰ', &['Ᾰ']), - ('ᾱ', &['Ᾱ']), - ('ᾳ', &['ᾼ']), - ('Ᾰ', &['ᾰ']), - ('Ᾱ', &['ᾱ']), - ('Ὰ', &['ὰ']), - ('Ά', &['ά']), - ('ᾼ', &['ᾳ']), - ('ι', &['\u{345}', 'Ι', 'ι']), - ('ῃ', &['ῌ']), - ('Ὲ', &['ὲ']), - ('Έ', &['έ']), - ('Ὴ', &['ὴ']), - ('Ή', &['ή']), - ('ῌ', &['ῃ']), - ('ῐ', &['Ῐ']), - ('ῑ', &['Ῑ']), - ('ΐ', &['ΐ']), - ('Ῐ', &['ῐ']), - ('Ῑ', &['ῑ']), - ('Ὶ', &['ὶ']), - ('Ί', &['ί']), - ('ῠ', &['Ῠ']), - ('ῡ', &['Ῡ']), - ('ΰ', &['ΰ']), - ('ῥ', &['Ῥ']), - ('Ῠ', &['ῠ']), - ('Ῡ', &['ῡ']), - ('Ὺ', &['ὺ']), - ('Ύ', &['ύ']), - ('Ῥ', &['ῥ']), - ('ῳ', &['ῼ']), - ('Ὸ', &['ὸ']), - ('Ό', &['ό']), - ('Ὼ', &['ὼ']), - ('Ώ', &['ώ']), - ('ῼ', &['ῳ']), - ('Ω', &['Ω', 'ω']), - ('K', &['K', 'k']), - ('Å', &['Å', 'å']), - ('Ⅎ', &['ⅎ']), - ('ⅎ', &['Ⅎ']), - ('Ⅰ', &['ⅰ']), - ('Ⅱ', &['ⅱ']), - ('Ⅲ', &['ⅲ']), - ('Ⅳ', &['ⅳ']), - ('Ⅴ', &['ⅴ']), - ('Ⅵ', &['ⅵ']), - ('Ⅶ', &['ⅶ']), - ('Ⅷ', &['ⅷ']), - ('Ⅸ', &['ⅸ']), - ('Ⅹ', &['ⅹ']), - ('Ⅺ', &['ⅺ']), - ('Ⅻ', &['ⅻ']), - ('Ⅼ', &['ⅼ']), - ('Ⅽ', &['ⅽ']), - ('Ⅾ', &['ⅾ']), - ('Ⅿ', &['ⅿ']), - ('ⅰ', &['Ⅰ']), - ('ⅱ', &['Ⅱ']), - ('ⅲ', &['Ⅲ']), - ('ⅳ', &['Ⅳ']), - ('ⅴ', &['Ⅴ']), - ('ⅵ', &['Ⅵ']), - ('ⅶ', &['Ⅶ']), - ('ⅷ', &['Ⅷ']), - ('ⅸ', &['Ⅸ']), - ('ⅹ', &['Ⅹ']), - ('ⅺ', &['Ⅺ']), - ('ⅻ', &['Ⅻ']), - ('ⅼ', &['Ⅼ']), - ('ⅽ', &['Ⅽ']), - ('ⅾ', &['Ⅾ']), - ('ⅿ', &['Ⅿ']), - ('Ↄ', &['ↄ']), - ('ↄ', &['Ↄ']), - ('Ⓐ', &['ⓐ']), - ('Ⓑ', &['ⓑ']), - ('Ⓒ', &['ⓒ']), - ('Ⓓ', &['ⓓ']), - ('Ⓔ', &['ⓔ']), - ('Ⓕ', &['ⓕ']), - ('Ⓖ', &['ⓖ']), - ('Ⓗ', &['ⓗ']), - ('Ⓘ', &['ⓘ']), - ('Ⓙ', &['ⓙ']), - ('Ⓚ', &['ⓚ']), - ('Ⓛ', &['ⓛ']), - ('Ⓜ', &['ⓜ']), - ('Ⓝ', &['ⓝ']), - ('Ⓞ', &['ⓞ']), - ('Ⓟ', &['ⓟ']), - ('Ⓠ', &['ⓠ']), - ('Ⓡ', &['ⓡ']), - ('Ⓢ', &['ⓢ']), - ('Ⓣ', &['ⓣ']), - ('Ⓤ', &['ⓤ']), - ('Ⓥ', &['ⓥ']), - ('Ⓦ', &['ⓦ']), - ('Ⓧ', &['ⓧ']), - ('Ⓨ', &['ⓨ']), - ('Ⓩ', &['ⓩ']), - ('ⓐ', &['Ⓐ']), - ('ⓑ', &['Ⓑ']), - ('ⓒ', &['Ⓒ']), - ('ⓓ', &['Ⓓ']), - ('ⓔ', &['Ⓔ']), - ('ⓕ', &['Ⓕ']), - ('ⓖ', &['Ⓖ']), - ('ⓗ', &['Ⓗ']), - ('ⓘ', &['Ⓘ']), - ('ⓙ', &['Ⓙ']), - ('ⓚ', &['Ⓚ']), - ('ⓛ', &['Ⓛ']), - ('ⓜ', &['Ⓜ']), - ('ⓝ', &['Ⓝ']), - ('ⓞ', &['Ⓞ']), - ('ⓟ', &['Ⓟ']), - ('ⓠ', &['Ⓠ']), - ('ⓡ', &['Ⓡ']), - ('ⓢ', &['Ⓢ']), - ('ⓣ', &['Ⓣ']), - ('ⓤ', &['Ⓤ']), - ('ⓥ', &['Ⓥ']), - ('ⓦ', &['Ⓦ']), - ('ⓧ', &['Ⓧ']), - ('ⓨ', &['Ⓨ']), - ('ⓩ', &['Ⓩ']), - ('Ⰰ', &['ⰰ']), - ('Ⰱ', &['ⰱ']), - ('Ⰲ', &['ⰲ']), - ('Ⰳ', &['ⰳ']), - ('Ⰴ', &['ⰴ']), - ('Ⰵ', &['ⰵ']), - ('Ⰶ', &['ⰶ']), - ('Ⰷ', &['ⰷ']), - ('Ⰸ', &['ⰸ']), - ('Ⰹ', &['ⰹ']), - ('Ⰺ', &['ⰺ']), - ('Ⰻ', &['ⰻ']), - ('Ⰼ', &['ⰼ']), - ('Ⰽ', &['ⰽ']), - ('Ⰾ', &['ⰾ']), - ('Ⰿ', &['ⰿ']), - ('Ⱀ', &['ⱀ']), - ('Ⱁ', &['ⱁ']), - ('Ⱂ', &['ⱂ']), - ('Ⱃ', &['ⱃ']), - ('Ⱄ', &['ⱄ']), - ('Ⱅ', &['ⱅ']), - ('Ⱆ', &['ⱆ']), - ('Ⱇ', &['ⱇ']), - ('Ⱈ', &['ⱈ']), - ('Ⱉ', &['ⱉ']), - ('Ⱊ', &['ⱊ']), - ('Ⱋ', &['ⱋ']), - ('Ⱌ', &['ⱌ']), - ('Ⱍ', &['ⱍ']), - ('Ⱎ', &['ⱎ']), - ('Ⱏ', &['ⱏ']), - ('Ⱐ', &['ⱐ']), - ('Ⱑ', &['ⱑ']), - ('Ⱒ', &['ⱒ']), - ('Ⱓ', &['ⱓ']), - ('Ⱔ', &['ⱔ']), - ('Ⱕ', &['ⱕ']), - ('Ⱖ', &['ⱖ']), - ('Ⱗ', &['ⱗ']), - ('Ⱘ', &['ⱘ']), - ('Ⱙ', &['ⱙ']), - ('Ⱚ', &['ⱚ']), - ('Ⱛ', &['ⱛ']), - ('Ⱜ', &['ⱜ']), - ('Ⱝ', &['ⱝ']), - ('Ⱞ', &['ⱞ']), - ('Ⱟ', &['ⱟ']), - ('ⰰ', &['Ⰰ']), - ('ⰱ', &['Ⰱ']), - ('ⰲ', &['Ⰲ']), - ('ⰳ', &['Ⰳ']), - ('ⰴ', &['Ⰴ']), - ('ⰵ', &['Ⰵ']), - ('ⰶ', &['Ⰶ']), - ('ⰷ', &['Ⰷ']), - ('ⰸ', &['Ⰸ']), - ('ⰹ', &['Ⰹ']), - ('ⰺ', &['Ⰺ']), - ('ⰻ', &['Ⰻ']), - ('ⰼ', &['Ⰼ']), - ('ⰽ', &['Ⰽ']), - ('ⰾ', &['Ⰾ']), - ('ⰿ', &['Ⰿ']), - ('ⱀ', &['Ⱀ']), - ('ⱁ', &['Ⱁ']), - ('ⱂ', &['Ⱂ']), - ('ⱃ', &['Ⱃ']), - ('ⱄ', &['Ⱄ']), - ('ⱅ', &['Ⱅ']), - ('ⱆ', &['Ⱆ']), - ('ⱇ', &['Ⱇ']), - ('ⱈ', &['Ⱈ']), - ('ⱉ', &['Ⱉ']), - ('ⱊ', &['Ⱊ']), - ('ⱋ', &['Ⱋ']), - ('ⱌ', &['Ⱌ']), - ('ⱍ', &['Ⱍ']), - ('ⱎ', &['Ⱎ']), - ('ⱏ', &['Ⱏ']), - ('ⱐ', &['Ⱐ']), - ('ⱑ', &['Ⱑ']), - ('ⱒ', &['Ⱒ']), - ('ⱓ', &['Ⱓ']), - ('ⱔ', &['Ⱔ']), - ('ⱕ', &['Ⱕ']), - ('ⱖ', &['Ⱖ']), - ('ⱗ', &['Ⱗ']), - ('ⱘ', &['Ⱘ']), - ('ⱙ', &['Ⱙ']), - ('ⱚ', &['Ⱚ']), - ('ⱛ', &['Ⱛ']), - ('ⱜ', &['Ⱜ']), - ('ⱝ', &['Ⱝ']), - ('ⱞ', &['Ⱞ']), - ('ⱟ', &['Ⱟ']), - ('Ⱡ', &['ⱡ']), - ('ⱡ', &['Ⱡ']), - ('Ɫ', &['ɫ']), - ('Ᵽ', &['ᵽ']), - ('Ɽ', &['ɽ']), - ('ⱥ', &['Ⱥ']), - ('ⱦ', &['Ⱦ']), - ('Ⱨ', &['ⱨ']), - ('ⱨ', &['Ⱨ']), - ('Ⱪ', &['ⱪ']), - ('ⱪ', &['Ⱪ']), - ('Ⱬ', &['ⱬ']), - ('ⱬ', &['Ⱬ']), - ('Ɑ', &['ɑ']), - ('Ɱ', &['ɱ']), - ('Ɐ', &['ɐ']), - ('Ɒ', &['ɒ']), - ('Ⱳ', &['ⱳ']), - ('ⱳ', &['Ⱳ']), - ('Ⱶ', &['ⱶ']), - ('ⱶ', &['Ⱶ']), - ('Ȿ', &['ȿ']), - ('Ɀ', &['ɀ']), - ('Ⲁ', &['ⲁ']), - ('ⲁ', &['Ⲁ']), - ('Ⲃ', &['ⲃ']), - ('ⲃ', &['Ⲃ']), - ('Ⲅ', &['ⲅ']), - ('ⲅ', &['Ⲅ']), - ('Ⲇ', &['ⲇ']), - ('ⲇ', &['Ⲇ']), - ('Ⲉ', &['ⲉ']), - ('ⲉ', &['Ⲉ']), - ('Ⲋ', &['ⲋ']), - ('ⲋ', &['Ⲋ']), - ('Ⲍ', &['ⲍ']), - ('ⲍ', &['Ⲍ']), - ('Ⲏ', &['ⲏ']), - ('ⲏ', &['Ⲏ']), - ('Ⲑ', &['ⲑ']), - ('ⲑ', &['Ⲑ']), - ('Ⲓ', &['ⲓ']), - ('ⲓ', &['Ⲓ']), - ('Ⲕ', &['ⲕ']), - ('ⲕ', &['Ⲕ']), - ('Ⲗ', &['ⲗ']), - ('ⲗ', &['Ⲗ']), - ('Ⲙ', &['ⲙ']), - ('ⲙ', &['Ⲙ']), - ('Ⲛ', &['ⲛ']), - ('ⲛ', &['Ⲛ']), - ('Ⲝ', &['ⲝ']), - ('ⲝ', &['Ⲝ']), - ('Ⲟ', &['ⲟ']), - ('ⲟ', &['Ⲟ']), - ('Ⲡ', &['ⲡ']), - ('ⲡ', &['Ⲡ']), - ('Ⲣ', &['ⲣ']), - ('ⲣ', &['Ⲣ']), - ('Ⲥ', &['ⲥ']), - ('ⲥ', &['Ⲥ']), - ('Ⲧ', &['ⲧ']), - ('ⲧ', &['Ⲧ']), - ('Ⲩ', &['ⲩ']), - ('ⲩ', &['Ⲩ']), - ('Ⲫ', &['ⲫ']), - ('ⲫ', &['Ⲫ']), - ('Ⲭ', &['ⲭ']), - ('ⲭ', &['Ⲭ']), - ('Ⲯ', &['ⲯ']), - ('ⲯ', &['Ⲯ']), - ('Ⲱ', &['ⲱ']), - ('ⲱ', &['Ⲱ']), - ('Ⲳ', &['ⲳ']), - ('ⲳ', &['Ⲳ']), - ('Ⲵ', &['ⲵ']), - ('ⲵ', &['Ⲵ']), - ('Ⲷ', &['ⲷ']), - ('ⲷ', &['Ⲷ']), - ('Ⲹ', &['ⲹ']), - ('ⲹ', &['Ⲹ']), - ('Ⲻ', &['ⲻ']), - ('ⲻ', &['Ⲻ']), - ('Ⲽ', &['ⲽ']), - ('ⲽ', &['Ⲽ']), - ('Ⲿ', &['ⲿ']), - ('ⲿ', &['Ⲿ']), - ('Ⳁ', &['ⳁ']), - ('ⳁ', &['Ⳁ']), - ('Ⳃ', &['ⳃ']), - ('ⳃ', &['Ⳃ']), - ('Ⳅ', &['ⳅ']), - ('ⳅ', &['Ⳅ']), - ('Ⳇ', &['ⳇ']), - ('ⳇ', &['Ⳇ']), - ('Ⳉ', &['ⳉ']), - ('ⳉ', &['Ⳉ']), - ('Ⳋ', &['ⳋ']), - ('ⳋ', &['Ⳋ']), - ('Ⳍ', &['ⳍ']), - ('ⳍ', &['Ⳍ']), - ('Ⳏ', &['ⳏ']), - ('ⳏ', &['Ⳏ']), - ('Ⳑ', &['ⳑ']), - ('ⳑ', &['Ⳑ']), - ('Ⳓ', &['ⳓ']), - ('ⳓ', &['Ⳓ']), - ('Ⳕ', &['ⳕ']), - ('ⳕ', &['Ⳕ']), - ('Ⳗ', &['ⳗ']), - ('ⳗ', &['Ⳗ']), - ('Ⳙ', &['ⳙ']), - ('ⳙ', &['Ⳙ']), - ('Ⳛ', &['ⳛ']), - ('ⳛ', &['Ⳛ']), - ('Ⳝ', &['ⳝ']), - ('ⳝ', &['Ⳝ']), - ('Ⳟ', &['ⳟ']), - ('ⳟ', &['Ⳟ']), - ('Ⳡ', &['ⳡ']), - ('ⳡ', &['Ⳡ']), - ('Ⳣ', &['ⳣ']), - ('ⳣ', &['Ⳣ']), - ('Ⳬ', &['ⳬ']), - ('ⳬ', &['Ⳬ']), - ('Ⳮ', &['ⳮ']), - ('ⳮ', &['Ⳮ']), - ('Ⳳ', &['ⳳ']), - ('ⳳ', &['Ⳳ']), - ('ⴀ', &['Ⴀ']), - ('ⴁ', &['Ⴁ']), - ('ⴂ', &['Ⴂ']), - ('ⴃ', &['Ⴃ']), - ('ⴄ', &['Ⴄ']), - ('ⴅ', &['Ⴅ']), - ('ⴆ', &['Ⴆ']), - ('ⴇ', &['Ⴇ']), - ('ⴈ', &['Ⴈ']), - ('ⴉ', &['Ⴉ']), - ('ⴊ', &['Ⴊ']), - ('ⴋ', &['Ⴋ']), - ('ⴌ', &['Ⴌ']), - ('ⴍ', &['Ⴍ']), - ('ⴎ', &['Ⴎ']), - ('ⴏ', &['Ⴏ']), - ('ⴐ', &['Ⴐ']), - ('ⴑ', &['Ⴑ']), - ('ⴒ', &['Ⴒ']), - ('ⴓ', &['Ⴓ']), - ('ⴔ', &['Ⴔ']), - ('ⴕ', &['Ⴕ']), - ('ⴖ', &['Ⴖ']), - ('ⴗ', &['Ⴗ']), - ('ⴘ', &['Ⴘ']), - ('ⴙ', &['Ⴙ']), - ('ⴚ', &['Ⴚ']), - ('ⴛ', &['Ⴛ']), - ('ⴜ', &['Ⴜ']), - ('ⴝ', &['Ⴝ']), - ('ⴞ', &['Ⴞ']), - ('ⴟ', &['Ⴟ']), - ('ⴠ', &['Ⴠ']), - ('ⴡ', &['Ⴡ']), - ('ⴢ', &['Ⴢ']), - ('ⴣ', &['Ⴣ']), - ('ⴤ', &['Ⴤ']), - ('ⴥ', &['Ⴥ']), - ('ⴧ', &['Ⴧ']), - ('ⴭ', &['Ⴭ']), - ('Ꙁ', &['ꙁ']), - ('ꙁ', &['Ꙁ']), - ('Ꙃ', &['ꙃ']), - ('ꙃ', &['Ꙃ']), - ('Ꙅ', &['ꙅ']), - ('ꙅ', &['Ꙅ']), - ('Ꙇ', &['ꙇ']), - ('ꙇ', &['Ꙇ']), - ('Ꙉ', &['ꙉ']), - ('ꙉ', &['Ꙉ']), - ('Ꙋ', &['ᲈ', 'ꙋ']), - ('ꙋ', &['ᲈ', 'Ꙋ']), - ('Ꙍ', &['ꙍ']), - ('ꙍ', &['Ꙍ']), - ('Ꙏ', &['ꙏ']), - ('ꙏ', &['Ꙏ']), - ('Ꙑ', &['ꙑ']), - ('ꙑ', &['Ꙑ']), - ('Ꙓ', &['ꙓ']), - ('ꙓ', &['Ꙓ']), - ('Ꙕ', &['ꙕ']), - ('ꙕ', &['Ꙕ']), - ('Ꙗ', &['ꙗ']), - ('ꙗ', &['Ꙗ']), - ('Ꙙ', &['ꙙ']), - ('ꙙ', &['Ꙙ']), - ('Ꙛ', &['ꙛ']), - ('ꙛ', &['Ꙛ']), - ('Ꙝ', &['ꙝ']), - ('ꙝ', &['Ꙝ']), - ('Ꙟ', &['ꙟ']), - ('ꙟ', &['Ꙟ']), - ('Ꙡ', &['ꙡ']), - ('ꙡ', &['Ꙡ']), - ('Ꙣ', &['ꙣ']), - ('ꙣ', &['Ꙣ']), - ('Ꙥ', &['ꙥ']), - ('ꙥ', &['Ꙥ']), - ('Ꙧ', &['ꙧ']), - ('ꙧ', &['Ꙧ']), - ('Ꙩ', &['ꙩ']), - ('ꙩ', &['Ꙩ']), - ('Ꙫ', &['ꙫ']), - ('ꙫ', &['Ꙫ']), - ('Ꙭ', &['ꙭ']), - ('ꙭ', &['Ꙭ']), - ('Ꚁ', &['ꚁ']), - ('ꚁ', &['Ꚁ']), - ('Ꚃ', &['ꚃ']), - ('ꚃ', &['Ꚃ']), - ('Ꚅ', &['ꚅ']), - ('ꚅ', &['Ꚅ']), - ('Ꚇ', &['ꚇ']), - ('ꚇ', &['Ꚇ']), - ('Ꚉ', &['ꚉ']), - ('ꚉ', &['Ꚉ']), - ('Ꚋ', &['ꚋ']), - ('ꚋ', &['Ꚋ']), - ('Ꚍ', &['ꚍ']), - ('ꚍ', &['Ꚍ']), - ('Ꚏ', &['ꚏ']), - ('ꚏ', &['Ꚏ']), - ('Ꚑ', &['ꚑ']), - ('ꚑ', &['Ꚑ']), - ('Ꚓ', &['ꚓ']), - ('ꚓ', &['Ꚓ']), - ('Ꚕ', &['ꚕ']), - ('ꚕ', &['Ꚕ']), - ('Ꚗ', &['ꚗ']), - ('ꚗ', &['Ꚗ']), - ('Ꚙ', &['ꚙ']), - ('ꚙ', &['Ꚙ']), - ('Ꚛ', &['ꚛ']), - ('ꚛ', &['Ꚛ']), - ('Ꜣ', &['ꜣ']), - ('ꜣ', &['Ꜣ']), - ('Ꜥ', &['ꜥ']), - ('ꜥ', &['Ꜥ']), - ('Ꜧ', &['ꜧ']), - ('ꜧ', &['Ꜧ']), - ('Ꜩ', &['ꜩ']), - ('ꜩ', &['Ꜩ']), - ('Ꜫ', &['ꜫ']), - ('ꜫ', &['Ꜫ']), - ('Ꜭ', &['ꜭ']), - ('ꜭ', &['Ꜭ']), - ('Ꜯ', &['ꜯ']), - ('ꜯ', &['Ꜯ']), - ('Ꜳ', &['ꜳ']), - ('ꜳ', &['Ꜳ']), - ('Ꜵ', &['ꜵ']), - ('ꜵ', &['Ꜵ']), - ('Ꜷ', &['ꜷ']), - ('ꜷ', &['Ꜷ']), - ('Ꜹ', &['ꜹ']), - ('ꜹ', &['Ꜹ']), - ('Ꜻ', &['ꜻ']), - ('ꜻ', &['Ꜻ']), - ('Ꜽ', &['ꜽ']), - ('ꜽ', &['Ꜽ']), - ('Ꜿ', &['ꜿ']), - ('ꜿ', &['Ꜿ']), - ('Ꝁ', &['ꝁ']), - ('ꝁ', &['Ꝁ']), - ('Ꝃ', &['ꝃ']), - ('ꝃ', &['Ꝃ']), - ('Ꝅ', &['ꝅ']), - ('ꝅ', &['Ꝅ']), - ('Ꝇ', &['ꝇ']), - ('ꝇ', &['Ꝇ']), - ('Ꝉ', &['ꝉ']), - ('ꝉ', &['Ꝉ']), - ('Ꝋ', &['ꝋ']), - ('ꝋ', &['Ꝋ']), - ('Ꝍ', &['ꝍ']), - ('ꝍ', &['Ꝍ']), - ('Ꝏ', &['ꝏ']), - ('ꝏ', &['Ꝏ']), - ('Ꝑ', &['ꝑ']), - ('ꝑ', &['Ꝑ']), - ('Ꝓ', &['ꝓ']), - ('ꝓ', &['Ꝓ']), - ('Ꝕ', &['ꝕ']), - ('ꝕ', &['Ꝕ']), - ('Ꝗ', &['ꝗ']), - ('ꝗ', &['Ꝗ']), - ('Ꝙ', &['ꝙ']), - ('ꝙ', &['Ꝙ']), - ('Ꝛ', &['ꝛ']), - ('ꝛ', &['Ꝛ']), - ('Ꝝ', &['ꝝ']), - ('ꝝ', &['Ꝝ']), - ('Ꝟ', &['ꝟ']), - ('ꝟ', &['Ꝟ']), - ('Ꝡ', &['ꝡ']), - ('ꝡ', &['Ꝡ']), - ('Ꝣ', &['ꝣ']), - ('ꝣ', &['Ꝣ']), - ('Ꝥ', &['ꝥ']), - ('ꝥ', &['Ꝥ']), - ('Ꝧ', &['ꝧ']), - ('ꝧ', &['Ꝧ']), - ('Ꝩ', &['ꝩ']), - ('ꝩ', &['Ꝩ']), - ('Ꝫ', &['ꝫ']), - ('ꝫ', &['Ꝫ']), - ('Ꝭ', &['ꝭ']), - ('ꝭ', &['Ꝭ']), - ('Ꝯ', &['ꝯ']), - ('ꝯ', &['Ꝯ']), - ('Ꝺ', &['ꝺ']), - ('ꝺ', &['Ꝺ']), - ('Ꝼ', &['ꝼ']), - ('ꝼ', &['Ꝼ']), - ('Ᵹ', &['ᵹ']), - ('Ꝿ', &['ꝿ']), - ('ꝿ', &['Ꝿ']), - ('Ꞁ', &['ꞁ']), - ('ꞁ', &['Ꞁ']), - ('Ꞃ', &['ꞃ']), - ('ꞃ', &['Ꞃ']), - ('Ꞅ', &['ꞅ']), - ('ꞅ', &['Ꞅ']), - ('Ꞇ', &['ꞇ']), - ('ꞇ', &['Ꞇ']), - ('Ꞌ', &['ꞌ']), - ('ꞌ', &['Ꞌ']), - ('Ɥ', &['ɥ']), - ('Ꞑ', &['ꞑ']), - ('ꞑ', &['Ꞑ']), - ('Ꞓ', &['ꞓ']), - ('ꞓ', &['Ꞓ']), - ('ꞔ', &['Ꞔ']), - ('Ꞗ', &['ꞗ']), - ('ꞗ', &['Ꞗ']), - ('Ꞙ', &['ꞙ']), - ('ꞙ', &['Ꞙ']), - ('Ꞛ', &['ꞛ']), - ('ꞛ', &['Ꞛ']), - ('Ꞝ', &['ꞝ']), - ('ꞝ', &['Ꞝ']), - ('Ꞟ', &['ꞟ']), - ('ꞟ', &['Ꞟ']), - ('Ꞡ', &['ꞡ']), - ('ꞡ', &['Ꞡ']), - ('Ꞣ', &['ꞣ']), - ('ꞣ', &['Ꞣ']), - ('Ꞥ', &['ꞥ']), - ('ꞥ', &['Ꞥ']), - ('Ꞧ', &['ꞧ']), - ('ꞧ', &['Ꞧ']), - ('Ꞩ', &['ꞩ']), - ('ꞩ', &['Ꞩ']), - ('Ɦ', &['ɦ']), - ('Ɜ', &['ɜ']), - ('Ɡ', &['ɡ']), - ('Ɬ', &['ɬ']), - ('Ɪ', &['ɪ']), - ('Ʞ', &['ʞ']), - ('Ʇ', &['ʇ']), - ('Ʝ', &['ʝ']), - ('Ꭓ', &['ꭓ']), - ('Ꞵ', &['ꞵ']), - ('ꞵ', &['Ꞵ']), - ('Ꞷ', &['ꞷ']), - ('ꞷ', &['Ꞷ']), - ('Ꞹ', &['ꞹ']), - ('ꞹ', &['Ꞹ']), - ('Ꞻ', &['ꞻ']), - ('ꞻ', &['Ꞻ']), - ('Ꞽ', &['ꞽ']), - ('ꞽ', &['Ꞽ']), - ('Ꞿ', &['ꞿ']), - ('ꞿ', &['Ꞿ']), - ('Ꟁ', &['ꟁ']), - ('ꟁ', &['Ꟁ']), - ('Ꟃ', &['ꟃ']), - ('ꟃ', &['Ꟃ']), - ('Ꞔ', &['ꞔ']), - ('Ʂ', &['ʂ']), - ('Ᶎ', &['ᶎ']), - ('Ꟈ', &['ꟈ']), - ('ꟈ', &['Ꟈ']), - ('Ꟊ', &['ꟊ']), - ('ꟊ', &['Ꟊ']), - ('Ɤ', &['ɤ']), - ('Ꟍ', &['ꟍ']), - ('ꟍ', &['Ꟍ']), - ('Ꟑ', &['ꟑ']), - ('ꟑ', &['Ꟑ']), - ('Ꟗ', &['ꟗ']), - ('ꟗ', &['Ꟗ']), - ('Ꟙ', &['ꟙ']), - ('ꟙ', &['Ꟙ']), - ('Ꟛ', &['ꟛ']), - ('ꟛ', &['Ꟛ']), - ('Ƛ', &['ƛ']), - ('Ꟶ', &['ꟶ']), - ('ꟶ', &['Ꟶ']), - ('ꭓ', &['Ꭓ']), - ('ꭰ', &['Ꭰ']), - ('ꭱ', &['Ꭱ']), - ('ꭲ', &['Ꭲ']), - ('ꭳ', &['Ꭳ']), - ('ꭴ', &['Ꭴ']), - ('ꭵ', &['Ꭵ']), - ('ꭶ', &['Ꭶ']), - ('ꭷ', &['Ꭷ']), - ('ꭸ', &['Ꭸ']), - ('ꭹ', &['Ꭹ']), - ('ꭺ', &['Ꭺ']), - ('ꭻ', &['Ꭻ']), - ('ꭼ', &['Ꭼ']), - ('ꭽ', &['Ꭽ']), - ('ꭾ', &['Ꭾ']), - ('ꭿ', &['Ꭿ']), - ('ꮀ', &['Ꮀ']), - ('ꮁ', &['Ꮁ']), - ('ꮂ', &['Ꮂ']), - ('ꮃ', &['Ꮃ']), - ('ꮄ', &['Ꮄ']), - ('ꮅ', &['Ꮅ']), - ('ꮆ', &['Ꮆ']), - ('ꮇ', &['Ꮇ']), - ('ꮈ', &['Ꮈ']), - ('ꮉ', &['Ꮉ']), - ('ꮊ', &['Ꮊ']), - ('ꮋ', &['Ꮋ']), - ('ꮌ', &['Ꮌ']), - ('ꮍ', &['Ꮍ']), - ('ꮎ', &['Ꮎ']), - ('ꮏ', &['Ꮏ']), - ('ꮐ', &['Ꮐ']), - ('ꮑ', &['Ꮑ']), - ('ꮒ', &['Ꮒ']), - ('ꮓ', &['Ꮓ']), - ('ꮔ', &['Ꮔ']), - ('ꮕ', &['Ꮕ']), - ('ꮖ', &['Ꮖ']), - ('ꮗ', &['Ꮗ']), - ('ꮘ', &['Ꮘ']), - ('ꮙ', &['Ꮙ']), - ('ꮚ', &['Ꮚ']), - ('ꮛ', &['Ꮛ']), - ('ꮜ', &['Ꮜ']), - ('ꮝ', &['Ꮝ']), - ('ꮞ', &['Ꮞ']), - ('ꮟ', &['Ꮟ']), - ('ꮠ', &['Ꮠ']), - ('ꮡ', &['Ꮡ']), - ('ꮢ', &['Ꮢ']), - ('ꮣ', &['Ꮣ']), - ('ꮤ', &['Ꮤ']), - ('ꮥ', &['Ꮥ']), - ('ꮦ', &['Ꮦ']), - ('ꮧ', &['Ꮧ']), - ('ꮨ', &['Ꮨ']), - ('ꮩ', &['Ꮩ']), - ('ꮪ', &['Ꮪ']), - ('ꮫ', &['Ꮫ']), - ('ꮬ', &['Ꮬ']), - ('ꮭ', &['Ꮭ']), - ('ꮮ', &['Ꮮ']), - ('ꮯ', &['Ꮯ']), - ('ꮰ', &['Ꮰ']), - ('ꮱ', &['Ꮱ']), - ('ꮲ', &['Ꮲ']), - ('ꮳ', &['Ꮳ']), - ('ꮴ', &['Ꮴ']), - ('ꮵ', &['Ꮵ']), - ('ꮶ', &['Ꮶ']), - ('ꮷ', &['Ꮷ']), - ('ꮸ', &['Ꮸ']), - ('ꮹ', &['Ꮹ']), - ('ꮺ', &['Ꮺ']), - ('ꮻ', &['Ꮻ']), - ('ꮼ', &['Ꮼ']), - ('ꮽ', &['Ꮽ']), - ('ꮾ', &['Ꮾ']), - ('ꮿ', &['Ꮿ']), - ('ſt', &['st']), - ('st', &['ſt']), - ('A', &['a']), - ('B', &['b']), - ('C', &['c']), - ('D', &['d']), - ('E', &['e']), - ('F', &['f']), - ('G', &['g']), - ('H', &['h']), - ('I', &['i']), - ('J', &['j']), - ('K', &['k']), - ('L', &['l']), - ('M', &['m']), - ('N', &['n']), - ('O', &['o']), - ('P', &['p']), - ('Q', &['q']), - ('R', &['r']), - ('S', &['s']), - ('T', &['t']), - ('U', &['u']), - ('V', &['v']), - ('W', &['w']), - ('X', &['x']), - ('Y', &['y']), - ('Z', &['z']), - ('a', &['A']), - ('b', &['B']), - ('c', &['C']), - ('d', &['D']), - ('e', &['E']), - ('f', &['F']), - ('g', &['G']), - ('h', &['H']), - ('i', &['I']), - ('j', &['J']), - ('k', &['K']), - ('l', &['L']), - ('m', &['M']), - ('n', &['N']), - ('o', &['O']), - ('p', &['P']), - ('q', &['Q']), - ('r', &['R']), - ('s', &['S']), - ('t', &['T']), - ('u', &['U']), - ('v', &['V']), - ('w', &['W']), - ('x', &['X']), - ('y', &['Y']), - ('z', &['Z']), - ('𐐀', &['𐐨']), - ('𐐁', &['𐐩']), - ('𐐂', &['𐐪']), - ('𐐃', &['𐐫']), - ('𐐄', &['𐐬']), - ('𐐅', &['𐐭']), - ('𐐆', &['𐐮']), - ('𐐇', &['𐐯']), - ('𐐈', &['𐐰']), - ('𐐉', &['𐐱']), - ('𐐊', &['𐐲']), - ('𐐋', &['𐐳']), - ('𐐌', &['𐐴']), - ('𐐍', &['𐐵']), - ('𐐎', &['𐐶']), - ('𐐏', &['𐐷']), - ('𐐐', &['𐐸']), - ('𐐑', &['𐐹']), - ('𐐒', &['𐐺']), - ('𐐓', &['𐐻']), - ('𐐔', &['𐐼']), - ('𐐕', &['𐐽']), - ('𐐖', &['𐐾']), - ('𐐗', &['𐐿']), - ('𐐘', &['𐑀']), - ('𐐙', &['𐑁']), - ('𐐚', &['𐑂']), - ('𐐛', &['𐑃']), - ('𐐜', &['𐑄']), - ('𐐝', &['𐑅']), - ('𐐞', &['𐑆']), - ('𐐟', &['𐑇']), - ('𐐠', &['𐑈']), - ('𐐡', &['𐑉']), - ('𐐢', &['𐑊']), - ('𐐣', &['𐑋']), - ('𐐤', &['𐑌']), - ('𐐥', &['𐑍']), - ('𐐦', &['𐑎']), - ('𐐧', &['𐑏']), - ('𐐨', &['𐐀']), - ('𐐩', &['𐐁']), - ('𐐪', &['𐐂']), - ('𐐫', &['𐐃']), - ('𐐬', &['𐐄']), - ('𐐭', &['𐐅']), - ('𐐮', &['𐐆']), - ('𐐯', &['𐐇']), - ('𐐰', &['𐐈']), - ('𐐱', &['𐐉']), - ('𐐲', &['𐐊']), - ('𐐳', &['𐐋']), - ('𐐴', &['𐐌']), - ('𐐵', &['𐐍']), - ('𐐶', &['𐐎']), - ('𐐷', &['𐐏']), - ('𐐸', &['𐐐']), - ('𐐹', &['𐐑']), - ('𐐺', &['𐐒']), - ('𐐻', &['𐐓']), - ('𐐼', &['𐐔']), - ('𐐽', &['𐐕']), - ('𐐾', &['𐐖']), - ('𐐿', &['𐐗']), - ('𐑀', &['𐐘']), - ('𐑁', &['𐐙']), - ('𐑂', &['𐐚']), - ('𐑃', &['𐐛']), - ('𐑄', &['𐐜']), - ('𐑅', &['𐐝']), - ('𐑆', &['𐐞']), - ('𐑇', &['𐐟']), - ('𐑈', &['𐐠']), - ('𐑉', &['𐐡']), - ('𐑊', &['𐐢']), - ('𐑋', &['𐐣']), - ('𐑌', &['𐐤']), - ('𐑍', &['𐐥']), - ('𐑎', &['𐐦']), - ('𐑏', &['𐐧']), - ('𐒰', &['𐓘']), - ('𐒱', &['𐓙']), - ('𐒲', &['𐓚']), - ('𐒳', &['𐓛']), - ('𐒴', &['𐓜']), - ('𐒵', &['𐓝']), - ('𐒶', &['𐓞']), - ('𐒷', &['𐓟']), - ('𐒸', &['𐓠']), - ('𐒹', &['𐓡']), - ('𐒺', &['𐓢']), - ('𐒻', &['𐓣']), - ('𐒼', &['𐓤']), - ('𐒽', &['𐓥']), - ('𐒾', &['𐓦']), - ('𐒿', &['𐓧']), - ('𐓀', &['𐓨']), - ('𐓁', &['𐓩']), - ('𐓂', &['𐓪']), - ('𐓃', &['𐓫']), - ('𐓄', &['𐓬']), - ('𐓅', &['𐓭']), - ('𐓆', &['𐓮']), - ('𐓇', &['𐓯']), - ('𐓈', &['𐓰']), - ('𐓉', &['𐓱']), - ('𐓊', &['𐓲']), - ('𐓋', &['𐓳']), - ('𐓌', &['𐓴']), - ('𐓍', &['𐓵']), - ('𐓎', &['𐓶']), - ('𐓏', &['𐓷']), - ('𐓐', &['𐓸']), - ('𐓑', &['𐓹']), - ('𐓒', &['𐓺']), - ('𐓓', &['𐓻']), - ('𐓘', &['𐒰']), - ('𐓙', &['𐒱']), - ('𐓚', &['𐒲']), - ('𐓛', &['𐒳']), - ('𐓜', &['𐒴']), - ('𐓝', &['𐒵']), - ('𐓞', &['𐒶']), - ('𐓟', &['𐒷']), - ('𐓠', &['𐒸']), - ('𐓡', &['𐒹']), - ('𐓢', &['𐒺']), - ('𐓣', &['𐒻']), - ('𐓤', &['𐒼']), - ('𐓥', &['𐒽']), - ('𐓦', &['𐒾']), - ('𐓧', &['𐒿']), - ('𐓨', &['𐓀']), - ('𐓩', &['𐓁']), - ('𐓪', &['𐓂']), - ('𐓫', &['𐓃']), - ('𐓬', &['𐓄']), - ('𐓭', &['𐓅']), - ('𐓮', &['𐓆']), - ('𐓯', &['𐓇']), - ('𐓰', &['𐓈']), - ('𐓱', &['𐓉']), - ('𐓲', &['𐓊']), - ('𐓳', &['𐓋']), - ('𐓴', &['𐓌']), - ('𐓵', &['𐓍']), - ('𐓶', &['𐓎']), - ('𐓷', &['𐓏']), - ('𐓸', &['𐓐']), - ('𐓹', &['𐓑']), - ('𐓺', &['𐓒']), - ('𐓻', &['𐓓']), - ('𐕰', &['𐖗']), - ('𐕱', &['𐖘']), - ('𐕲', &['𐖙']), - ('𐕳', &['𐖚']), - ('𐕴', &['𐖛']), - ('𐕵', &['𐖜']), - ('𐕶', &['𐖝']), - ('𐕷', &['𐖞']), - ('𐕸', &['𐖟']), - ('𐕹', &['𐖠']), - ('𐕺', &['𐖡']), - ('𐕼', &['𐖣']), - ('𐕽', &['𐖤']), - ('𐕾', &['𐖥']), - ('𐕿', &['𐖦']), - ('𐖀', &['𐖧']), - ('𐖁', &['𐖨']), - ('𐖂', &['𐖩']), - ('𐖃', &['𐖪']), - ('𐖄', &['𐖫']), - ('𐖅', &['𐖬']), - ('𐖆', &['𐖭']), - ('𐖇', &['𐖮']), - ('𐖈', &['𐖯']), - ('𐖉', &['𐖰']), - ('𐖊', &['𐖱']), - ('𐖌', &['𐖳']), - ('𐖍', &['𐖴']), - ('𐖎', &['𐖵']), - ('𐖏', &['𐖶']), - ('𐖐', &['𐖷']), - ('𐖑', &['𐖸']), - ('𐖒', &['𐖹']), - ('𐖔', &['𐖻']), - ('𐖕', &['𐖼']), - ('𐖗', &['𐕰']), - ('𐖘', &['𐕱']), - ('𐖙', &['𐕲']), - ('𐖚', &['𐕳']), - ('𐖛', &['𐕴']), - ('𐖜', &['𐕵']), - ('𐖝', &['𐕶']), - ('𐖞', &['𐕷']), - ('𐖟', &['𐕸']), - ('𐖠', &['𐕹']), - ('𐖡', &['𐕺']), - ('𐖣', &['𐕼']), - ('𐖤', &['𐕽']), - ('𐖥', &['𐕾']), - ('𐖦', &['𐕿']), - ('𐖧', &['𐖀']), - ('𐖨', &['𐖁']), - ('𐖩', &['𐖂']), - ('𐖪', &['𐖃']), - ('𐖫', &['𐖄']), - ('𐖬', &['𐖅']), - ('𐖭', &['𐖆']), - ('𐖮', &['𐖇']), - ('𐖯', &['𐖈']), - ('𐖰', &['𐖉']), - ('𐖱', &['𐖊']), - ('𐖳', &['𐖌']), - ('𐖴', &['𐖍']), - ('𐖵', &['𐖎']), - ('𐖶', &['𐖏']), - ('𐖷', &['𐖐']), - ('𐖸', &['𐖑']), - ('𐖹', &['𐖒']), - ('𐖻', &['𐖔']), - ('𐖼', &['𐖕']), - ('𐲀', &['𐳀']), - ('𐲁', &['𐳁']), - ('𐲂', &['𐳂']), - ('𐲃', &['𐳃']), - ('𐲄', &['𐳄']), - ('𐲅', &['𐳅']), - ('𐲆', &['𐳆']), - ('𐲇', &['𐳇']), - ('𐲈', &['𐳈']), - ('𐲉', &['𐳉']), - ('𐲊', &['𐳊']), - ('𐲋', &['𐳋']), - ('𐲌', &['𐳌']), - ('𐲍', &['𐳍']), - ('𐲎', &['𐳎']), - ('𐲏', &['𐳏']), - ('𐲐', &['𐳐']), - ('𐲑', &['𐳑']), - ('𐲒', &['𐳒']), - ('𐲓', &['𐳓']), - ('𐲔', &['𐳔']), - ('𐲕', &['𐳕']), - ('𐲖', &['𐳖']), - ('𐲗', &['𐳗']), - ('𐲘', &['𐳘']), - ('𐲙', &['𐳙']), - ('𐲚', &['𐳚']), - ('𐲛', &['𐳛']), - ('𐲜', &['𐳜']), - ('𐲝', &['𐳝']), - ('𐲞', &['𐳞']), - ('𐲟', &['𐳟']), - ('𐲠', &['𐳠']), - ('𐲡', &['𐳡']), - ('𐲢', &['𐳢']), - ('𐲣', &['𐳣']), - ('𐲤', &['𐳤']), - ('𐲥', &['𐳥']), - ('𐲦', &['𐳦']), - ('𐲧', &['𐳧']), - ('𐲨', &['𐳨']), - ('𐲩', &['𐳩']), - ('𐲪', &['𐳪']), - ('𐲫', &['𐳫']), - ('𐲬', &['𐳬']), - ('𐲭', &['𐳭']), - ('𐲮', &['𐳮']), - ('𐲯', &['𐳯']), - ('𐲰', &['𐳰']), - ('𐲱', &['𐳱']), - ('𐲲', &['𐳲']), - ('𐳀', &['𐲀']), - ('𐳁', &['𐲁']), - ('𐳂', &['𐲂']), - ('𐳃', &['𐲃']), - ('𐳄', &['𐲄']), - ('𐳅', &['𐲅']), - ('𐳆', &['𐲆']), - ('𐳇', &['𐲇']), - ('𐳈', &['𐲈']), - ('𐳉', &['𐲉']), - ('𐳊', &['𐲊']), - ('𐳋', &['𐲋']), - ('𐳌', &['𐲌']), - ('𐳍', &['𐲍']), - ('𐳎', &['𐲎']), - ('𐳏', &['𐲏']), - ('𐳐', &['𐲐']), - ('𐳑', &['𐲑']), - ('𐳒', &['𐲒']), - ('𐳓', &['𐲓']), - ('𐳔', &['𐲔']), - ('𐳕', &['𐲕']), - ('𐳖', &['𐲖']), - ('𐳗', &['𐲗']), - ('𐳘', &['𐲘']), - ('𐳙', &['𐲙']), - ('𐳚', &['𐲚']), - ('𐳛', &['𐲛']), - ('𐳜', &['𐲜']), - ('𐳝', &['𐲝']), - ('𐳞', &['𐲞']), - ('𐳟', &['𐲟']), - ('𐳠', &['𐲠']), - ('𐳡', &['𐲡']), - ('𐳢', &['𐲢']), - ('𐳣', &['𐲣']), - ('𐳤', &['𐲤']), - ('𐳥', &['𐲥']), - ('𐳦', &['𐲦']), - ('𐳧', &['𐲧']), - ('𐳨', &['𐲨']), - ('𐳩', &['𐲩']), - ('𐳪', &['𐲪']), - ('𐳫', &['𐲫']), - ('𐳬', &['𐲬']), - ('𐳭', &['𐲭']), - ('𐳮', &['𐲮']), - ('𐳯', &['𐲯']), - ('𐳰', &['𐲰']), - ('𐳱', &['𐲱']), - ('𐳲', &['𐲲']), - ('𐵐', &['𐵰']), - ('𐵑', &['𐵱']), - ('𐵒', &['𐵲']), - ('𐵓', &['𐵳']), - ('𐵔', &['𐵴']), - ('𐵕', &['𐵵']), - ('𐵖', &['𐵶']), - ('𐵗', &['𐵷']), - ('𐵘', &['𐵸']), - ('𐵙', &['𐵹']), - ('𐵚', &['𐵺']), - ('𐵛', &['𐵻']), - ('𐵜', &['𐵼']), - ('𐵝', &['𐵽']), - ('𐵞', &['𐵾']), - ('𐵟', &['𐵿']), - ('𐵠', &['𐶀']), - ('𐵡', &['𐶁']), - ('𐵢', &['𐶂']), - ('𐵣', &['𐶃']), - ('𐵤', &['𐶄']), - ('𐵥', &['𐶅']), - ('𐵰', &['𐵐']), - ('𐵱', &['𐵑']), - ('𐵲', &['𐵒']), - ('𐵳', &['𐵓']), - ('𐵴', &['𐵔']), - ('𐵵', &['𐵕']), - ('𐵶', &['𐵖']), - ('𐵷', &['𐵗']), - ('𐵸', &['𐵘']), - ('𐵹', &['𐵙']), - ('𐵺', &['𐵚']), - ('𐵻', &['𐵛']), - ('𐵼', &['𐵜']), - ('𐵽', &['𐵝']), - ('𐵾', &['𐵞']), - ('𐵿', &['𐵟']), - ('𐶀', &['𐵠']), - ('𐶁', &['𐵡']), - ('𐶂', &['𐵢']), - ('𐶃', &['𐵣']), - ('𐶄', &['𐵤']), - ('𐶅', &['𐵥']), - ('𑢠', &['𑣀']), - ('𑢡', &['𑣁']), - ('𑢢', &['𑣂']), - ('𑢣', &['𑣃']), - ('𑢤', &['𑣄']), - ('𑢥', &['𑣅']), - ('𑢦', &['𑣆']), - ('𑢧', &['𑣇']), - ('𑢨', &['𑣈']), - ('𑢩', &['𑣉']), - ('𑢪', &['𑣊']), - ('𑢫', &['𑣋']), - ('𑢬', &['𑣌']), - ('𑢭', &['𑣍']), - ('𑢮', &['𑣎']), - ('𑢯', &['𑣏']), - ('𑢰', &['𑣐']), - ('𑢱', &['𑣑']), - ('𑢲', &['𑣒']), - ('𑢳', &['𑣓']), - ('𑢴', &['𑣔']), - ('𑢵', &['𑣕']), - ('𑢶', &['𑣖']), - ('𑢷', &['𑣗']), - ('𑢸', &['𑣘']), - ('𑢹', &['𑣙']), - ('𑢺', &['𑣚']), - ('𑢻', &['𑣛']), - ('𑢼', &['𑣜']), - ('𑢽', &['𑣝']), - ('𑢾', &['𑣞']), - ('𑢿', &['𑣟']), - ('𑣀', &['𑢠']), - ('𑣁', &['𑢡']), - ('𑣂', &['𑢢']), - ('𑣃', &['𑢣']), - ('𑣄', &['𑢤']), - ('𑣅', &['𑢥']), - ('𑣆', &['𑢦']), - ('𑣇', &['𑢧']), - ('𑣈', &['𑢨']), - ('𑣉', &['𑢩']), - ('𑣊', &['𑢪']), - ('𑣋', &['𑢫']), - ('𑣌', &['𑢬']), - ('𑣍', &['𑢭']), - ('𑣎', &['𑢮']), - ('𑣏', &['𑢯']), - ('𑣐', &['𑢰']), - ('𑣑', &['𑢱']), - ('𑣒', &['𑢲']), - ('𑣓', &['𑢳']), - ('𑣔', &['𑢴']), - ('𑣕', &['𑢵']), - ('𑣖', &['𑢶']), - ('𑣗', &['𑢷']), - ('𑣘', &['𑢸']), - ('𑣙', &['𑢹']), - ('𑣚', &['𑢺']), - ('𑣛', &['𑢻']), - ('𑣜', &['𑢼']), - ('𑣝', &['𑢽']), - ('𑣞', &['𑢾']), - ('𑣟', &['𑢿']), - ('𖹀', &['𖹠']), - ('𖹁', &['𖹡']), - ('𖹂', &['𖹢']), - ('𖹃', &['𖹣']), - ('𖹄', &['𖹤']), - ('𖹅', &['𖹥']), - ('𖹆', &['𖹦']), - ('𖹇', &['𖹧']), - ('𖹈', &['𖹨']), - ('𖹉', &['𖹩']), - ('𖹊', &['𖹪']), - ('𖹋', &['𖹫']), - ('𖹌', &['𖹬']), - ('𖹍', &['𖹭']), - ('𖹎', &['𖹮']), - ('𖹏', &['𖹯']), - ('𖹐', &['𖹰']), - ('𖹑', &['𖹱']), - ('𖹒', &['𖹲']), - ('𖹓', &['𖹳']), - ('𖹔', &['𖹴']), - ('𖹕', &['𖹵']), - ('𖹖', &['𖹶']), - ('𖹗', &['𖹷']), - ('𖹘', &['𖹸']), - ('𖹙', &['𖹹']), - ('𖹚', &['𖹺']), - ('𖹛', &['𖹻']), - ('𖹜', &['𖹼']), - ('𖹝', &['𖹽']), - ('𖹞', &['𖹾']), - ('𖹟', &['𖹿']), - ('𖹠', &['𖹀']), - ('𖹡', &['𖹁']), - ('𖹢', &['𖹂']), - ('𖹣', &['𖹃']), - ('𖹤', &['𖹄']), - ('𖹥', &['𖹅']), - ('𖹦', &['𖹆']), - ('𖹧', &['𖹇']), - ('𖹨', &['𖹈']), - ('𖹩', &['𖹉']), - ('𖹪', &['𖹊']), - ('𖹫', &['𖹋']), - ('𖹬', &['𖹌']), - ('𖹭', &['𖹍']), - ('𖹮', &['𖹎']), - ('𖹯', &['𖹏']), - ('𖹰', &['𖹐']), - ('𖹱', &['𖹑']), - ('𖹲', &['𖹒']), - ('𖹳', &['𖹓']), - ('𖹴', &['𖹔']), - ('𖹵', &['𖹕']), - ('𖹶', &['𖹖']), - ('𖹷', &['𖹗']), - ('𖹸', &['𖹘']), - ('𖹹', &['𖹙']), - ('𖹺', &['𖹚']), - ('𖹻', &['𖹛']), - ('𖹼', &['𖹜']), - ('𖹽', &['𖹝']), - ('𖹾', &['𖹞']), - ('𖹿', &['𖹟']), - ('𞤀', &['𞤢']), - ('𞤁', &['𞤣']), - ('𞤂', &['𞤤']), - ('𞤃', &['𞤥']), - ('𞤄', &['𞤦']), - ('𞤅', &['𞤧']), - ('𞤆', &['𞤨']), - ('𞤇', &['𞤩']), - ('𞤈', &['𞤪']), - ('𞤉', &['𞤫']), - ('𞤊', &['𞤬']), - ('𞤋', &['𞤭']), - ('𞤌', &['𞤮']), - ('𞤍', &['𞤯']), - ('𞤎', &['𞤰']), - ('𞤏', &['𞤱']), - ('𞤐', &['𞤲']), - ('𞤑', &['𞤳']), - ('𞤒', &['𞤴']), - ('𞤓', &['𞤵']), - ('𞤔', &['𞤶']), - ('𞤕', &['𞤷']), - ('𞤖', &['𞤸']), - ('𞤗', &['𞤹']), - ('𞤘', &['𞤺']), - ('𞤙', &['𞤻']), - ('𞤚', &['𞤼']), - ('𞤛', &['𞤽']), - ('𞤜', &['𞤾']), - ('𞤝', &['𞤿']), - ('𞤞', &['𞥀']), - ('𞤟', &['𞥁']), - ('𞤠', &['𞥂']), - ('𞤡', &['𞥃']), - ('𞤢', &['𞤀']), - ('𞤣', &['𞤁']), - ('𞤤', &['𞤂']), - ('𞤥', &['𞤃']), - ('𞤦', &['𞤄']), - ('𞤧', &['𞤅']), - ('𞤨', &['𞤆']), - ('𞤩', &['𞤇']), - ('𞤪', &['𞤈']), - ('𞤫', &['𞤉']), - ('𞤬', &['𞤊']), - ('𞤭', &['𞤋']), - ('𞤮', &['𞤌']), - ('𞤯', &['𞤍']), - ('𞤰', &['𞤎']), - ('𞤱', &['𞤏']), - ('𞤲', &['𞤐']), - ('𞤳', &['𞤑']), - ('𞤴', &['𞤒']), - ('𞤵', &['𞤓']), - ('𞤶', &['𞤔']), - ('𞤷', &['𞤕']), - ('𞤸', &['𞤖']), - ('𞤹', &['𞤗']), - ('𞤺', &['𞤘']), - ('𞤻', &['𞤙']), - ('𞤼', &['𞤚']), - ('𞤽', &['𞤛']), - ('𞤾', &['𞤜']), - ('𞤿', &['𞤝']), - ('𞥀', &['𞤞']), - ('𞥁', &['𞤟']), - ('𞥂', &['𞤠']), - ('𞥃', &['𞤡']), -]; diff --git a/vendor/regex-syntax/src/unicode_tables/general_category.rs b/vendor/regex-syntax/src/unicode_tables/general_category.rs deleted file mode 100644 index 6ff6b5384db836..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/general_category.rs +++ /dev/null @@ -1,6717 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate general-category ucd-16.0.0 --chars --exclude surrogate -// -// Unicode version: 16.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ - ("Cased_Letter", CASED_LETTER), - ("Close_Punctuation", CLOSE_PUNCTUATION), - ("Connector_Punctuation", CONNECTOR_PUNCTUATION), - ("Control", CONTROL), - ("Currency_Symbol", CURRENCY_SYMBOL), - ("Dash_Punctuation", DASH_PUNCTUATION), - ("Decimal_Number", DECIMAL_NUMBER), - ("Enclosing_Mark", ENCLOSING_MARK), - ("Final_Punctuation", FINAL_PUNCTUATION), - ("Format", FORMAT), - ("Initial_Punctuation", INITIAL_PUNCTUATION), - ("Letter", LETTER), - ("Letter_Number", LETTER_NUMBER), - ("Line_Separator", LINE_SEPARATOR), - ("Lowercase_Letter", LOWERCASE_LETTER), - ("Mark", MARK), - ("Math_Symbol", MATH_SYMBOL), - ("Modifier_Letter", MODIFIER_LETTER), - ("Modifier_Symbol", MODIFIER_SYMBOL), - ("Nonspacing_Mark", NONSPACING_MARK), - ("Number", NUMBER), - ("Open_Punctuation", OPEN_PUNCTUATION), - ("Other", OTHER), - ("Other_Letter", OTHER_LETTER), - ("Other_Number", OTHER_NUMBER), - ("Other_Punctuation", OTHER_PUNCTUATION), - ("Other_Symbol", OTHER_SYMBOL), - ("Paragraph_Separator", PARAGRAPH_SEPARATOR), - ("Private_Use", PRIVATE_USE), - ("Punctuation", PUNCTUATION), - ("Separator", SEPARATOR), - ("Space_Separator", SPACE_SEPARATOR), - ("Spacing_Mark", SPACING_MARK), - ("Symbol", SYMBOL), - ("Titlecase_Letter", TITLECASE_LETTER), - ("Unassigned", UNASSIGNED), - ("Uppercase_Letter", UPPERCASE_LETTER), -]; - -pub const CASED_LETTER: &'static [(char, char)] = &[ - ('A', 'Z'), - ('a', 'z'), - ('µ', 'µ'), - ('À', 'Ö'), - ('Ø', 'ö'), - ('ø', 'ƺ'), - ('Ƽ', 'ƿ'), - ('DŽ', 'ʓ'), - ('ʕ', 'ʯ'), - ('Ͱ', 'ͳ'), - ('Ͷ', 'ͷ'), - ('ͻ', 'ͽ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', 'ϵ'), - ('Ϸ', 'ҁ'), - ('Ҋ', 'ԯ'), - ('Ա', 'Ֆ'), - ('ՠ', 'ֈ'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ა', 'ჺ'), - ('ჽ', 'ჿ'), - ('Ꭰ', 'Ᏽ'), - ('ᏸ', 'ᏽ'), - ('ᲀ', 'ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('ᴀ', 'ᴫ'), - ('ᵫ', 'ᵷ'), - ('ᵹ', 'ᶚ'), - ('Ḁ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ᾼ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῌ'), - ('ῐ', 'ΐ'), - ('ῖ', 'Ί'), - ('ῠ', 'Ῥ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῼ'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℊ', 'ℓ'), - ('ℕ', 'ℕ'), - ('ℙ', 'ℝ'), - ('ℤ', 'ℤ'), - ('Ω', 'Ω'), - ('ℨ', 'ℨ'), - ('K', 'ℭ'), - ('ℯ', 'ℴ'), - ('ℹ', 'ℹ'), - ('ℼ', 'ℿ'), - ('ⅅ', 'ⅉ'), - ('ⅎ', 'ⅎ'), - ('Ↄ', 'ↄ'), - ('Ⰰ', 'ⱻ'), - ('Ȿ', 'ⳤ'), - ('Ⳬ', 'ⳮ'), - ('Ⳳ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('Ꙁ', 'ꙭ'), - ('Ꚁ', 'ꚛ'), - ('Ꜣ', 'ꝯ'), - ('ꝱ', 'ꞇ'), - ('Ꞌ', 'ꞎ'), - ('Ꞑ', 'ꟍ'), - ('Ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'Ƛ'), - ('Ꟶ', 'ꟶ'), - ('ꟺ', 'ꟺ'), - ('ꬰ', 'ꭚ'), - ('ꭠ', 'ꭨ'), - ('ꭰ', 'ꮿ'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('A', 'Z'), - ('a', 'z'), - ('𐐀', '𐑏'), - ('𐒰', '𐓓'), - ('𐓘', '𐓻'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐲀', '𐲲'), - ('𐳀', '𐳲'), - ('𐵐', '𐵥'), - ('𐵰', '𐶅'), - ('𑢠', '𑣟'), - ('𖹀', '𖹿'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝛀'), - ('𝛂', '𝛚'), - ('𝛜', '𝛺'), - ('𝛼', '𝜔'), - ('𝜖', '𝜴'), - ('𝜶', '𝝎'), - ('𝝐', '𝝮'), - ('𝝰', '𝞈'), - ('𝞊', '𝞨'), - ('𝞪', '𝟂'), - ('𝟄', '𝟋'), - ('𝼀', '𝼉'), - ('𝼋', '𝼞'), - ('𝼥', '𝼪'), - ('𞤀', '𞥃'), -]; - -pub const CLOSE_PUNCTUATION: &'static [(char, char)] = &[ - (')', ')'), - (']', ']'), - ('}', '}'), - ('༻', '༻'), - ('༽', '༽'), - ('᚜', '᚜'), - ('⁆', '⁆'), - ('⁾', '⁾'), - ('₎', '₎'), - ('⌉', '⌉'), - ('⌋', '⌋'), - ('〉', '〉'), - ('❩', '❩'), - ('❫', '❫'), - ('❭', '❭'), - ('❯', '❯'), - ('❱', '❱'), - ('❳', '❳'), - ('❵', '❵'), - ('⟆', '⟆'), - ('⟧', '⟧'), - ('⟩', '⟩'), - ('⟫', '⟫'), - ('⟭', '⟭'), - ('⟯', '⟯'), - ('⦄', '⦄'), - ('⦆', '⦆'), - ('⦈', '⦈'), - ('⦊', '⦊'), - ('⦌', '⦌'), - ('⦎', '⦎'), - ('⦐', '⦐'), - ('⦒', '⦒'), - ('⦔', '⦔'), - ('⦖', '⦖'), - ('⦘', '⦘'), - ('⧙', '⧙'), - ('⧛', '⧛'), - ('⧽', '⧽'), - ('⸣', '⸣'), - ('⸥', '⸥'), - ('⸧', '⸧'), - ('⸩', '⸩'), - ('⹖', '⹖'), - ('⹘', '⹘'), - ('⹚', '⹚'), - ('⹜', '⹜'), - ('〉', '〉'), - ('》', '》'), - ('」', '」'), - ('』', '』'), - ('】', '】'), - ('〕', '〕'), - ('〗', '〗'), - ('〙', '〙'), - ('〛', '〛'), - ('〞', '〟'), - ('﴾', '﴾'), - ('︘', '︘'), - ('︶', '︶'), - ('︸', '︸'), - ('︺', '︺'), - ('︼', '︼'), - ('︾', '︾'), - ('﹀', '﹀'), - ('﹂', '﹂'), - ('﹄', '﹄'), - ('﹈', '﹈'), - ('﹚', '﹚'), - ('﹜', '﹜'), - ('﹞', '﹞'), - (')', ')'), - (']', ']'), - ('}', '}'), - ('⦆', '⦆'), - ('」', '」'), -]; - -pub const CONNECTOR_PUNCTUATION: &'static [(char, char)] = &[ - ('_', '_'), - ('‿', '⁀'), - ('⁔', '⁔'), - ('︳', '︴'), - ('﹍', '﹏'), - ('_', '_'), -]; - -pub const CONTROL: &'static [(char, char)] = - &[('\0', '\u{1f}'), ('\u{7f}', '\u{9f}')]; - -pub const CURRENCY_SYMBOL: &'static [(char, char)] = &[ - ('$', '$'), - ('¢', '¥'), - ('֏', '֏'), - ('؋', '؋'), - ('߾', '߿'), - ('৲', '৳'), - ('৻', '৻'), - ('૱', '૱'), - ('௹', '௹'), - ('฿', '฿'), - ('៛', '៛'), - ('₠', '⃀'), - ('꠸', '꠸'), - ('﷼', '﷼'), - ('﹩', '﹩'), - ('$', '$'), - ('¢', '£'), - ('¥', '₩'), - ('𑿝', '𑿠'), - ('𞋿', '𞋿'), - ('𞲰', '𞲰'), -]; - -pub const DASH_PUNCTUATION: &'static [(char, char)] = &[ - ('-', '-'), - ('֊', '֊'), - ('־', '־'), - ('᐀', '᐀'), - ('᠆', '᠆'), - ('‐', '―'), - ('⸗', '⸗'), - ('⸚', '⸚'), - ('⸺', '⸻'), - ('⹀', '⹀'), - ('⹝', '⹝'), - ('〜', '〜'), - ('〰', '〰'), - ('゠', '゠'), - ('︱', '︲'), - ('﹘', '﹘'), - ('﹣', '﹣'), - ('-', '-'), - ('𐵮', '𐵮'), - ('𐺭', '𐺭'), -]; - -pub const DECIMAL_NUMBER: &'static [(char, char)] = &[ - ('0', '9'), - ('٠', '٩'), - ('۰', '۹'), - ('߀', '߉'), - ('०', '९'), - ('০', '৯'), - ('੦', '੯'), - ('૦', '૯'), - ('୦', '୯'), - ('௦', '௯'), - ('౦', '౯'), - ('೦', '೯'), - ('൦', '൯'), - ('෦', '෯'), - ('๐', '๙'), - ('໐', '໙'), - ('༠', '༩'), - ('၀', '၉'), - ('႐', '႙'), - ('០', '៩'), - ('᠐', '᠙'), - ('᥆', '᥏'), - ('᧐', '᧙'), - ('᪀', '᪉'), - ('᪐', '᪙'), - ('᭐', '᭙'), - ('᮰', '᮹'), - ('᱀', '᱉'), - ('᱐', '᱙'), - ('꘠', '꘩'), - ('꣐', '꣙'), - ('꤀', '꤉'), - ('꧐', '꧙'), - ('꧰', '꧹'), - ('꩐', '꩙'), - ('꯰', '꯹'), - ('0', '9'), - ('𐒠', '𐒩'), - ('𐴰', '𐴹'), - ('𐵀', '𐵉'), - ('𑁦', '𑁯'), - ('𑃰', '𑃹'), - ('𑄶', '𑄿'), - ('𑇐', '𑇙'), - ('𑋰', '𑋹'), - ('𑑐', '𑑙'), - ('𑓐', '𑓙'), - ('𑙐', '𑙙'), - ('𑛀', '𑛉'), - ('𑛐', '𑛣'), - ('𑜰', '𑜹'), - ('𑣠', '𑣩'), - ('𑥐', '𑥙'), - ('𑯰', '𑯹'), - ('𑱐', '𑱙'), - ('𑵐', '𑵙'), - ('𑶠', '𑶩'), - ('𑽐', '𑽙'), - ('𖄰', '𖄹'), - ('𖩠', '𖩩'), - ('𖫀', '𖫉'), - ('𖭐', '𖭙'), - ('𖵰', '𖵹'), - ('𜳰', '𜳹'), - ('𝟎', '𝟿'), - ('𞅀', '𞅉'), - ('𞋰', '𞋹'), - ('𞓰', '𞓹'), - ('𞗱', '𞗺'), - ('𞥐', '𞥙'), - ('🯰', '🯹'), -]; - -pub const ENCLOSING_MARK: &'static [(char, char)] = &[ - ('\u{488}', '\u{489}'), - ('\u{1abe}', '\u{1abe}'), - ('\u{20dd}', '\u{20e0}'), - ('\u{20e2}', '\u{20e4}'), - ('\u{a670}', '\u{a672}'), -]; - -pub const FINAL_PUNCTUATION: &'static [(char, char)] = &[ - ('»', '»'), - ('’', '’'), - ('”', '”'), - ('›', '›'), - ('⸃', '⸃'), - ('⸅', '⸅'), - ('⸊', '⸊'), - ('⸍', '⸍'), - ('⸝', '⸝'), - ('⸡', '⸡'), -]; - -pub const FORMAT: &'static [(char, char)] = &[ - ('\u{ad}', '\u{ad}'), - ('\u{600}', '\u{605}'), - ('\u{61c}', '\u{61c}'), - ('\u{6dd}', '\u{6dd}'), - ('\u{70f}', '\u{70f}'), - ('\u{890}', '\u{891}'), - ('\u{8e2}', '\u{8e2}'), - ('\u{180e}', '\u{180e}'), - ('\u{200b}', '\u{200f}'), - ('\u{202a}', '\u{202e}'), - ('\u{2060}', '\u{2064}'), - ('\u{2066}', '\u{206f}'), - ('\u{feff}', '\u{feff}'), - ('\u{fff9}', '\u{fffb}'), - ('\u{110bd}', '\u{110bd}'), - ('\u{110cd}', '\u{110cd}'), - ('\u{13430}', '\u{1343f}'), - ('\u{1bca0}', '\u{1bca3}'), - ('\u{1d173}', '\u{1d17a}'), - ('\u{e0001}', '\u{e0001}'), - ('\u{e0020}', '\u{e007f}'), -]; - -pub const INITIAL_PUNCTUATION: &'static [(char, char)] = &[ - ('«', '«'), - ('‘', '‘'), - ('‛', '“'), - ('‟', '‟'), - ('‹', '‹'), - ('⸂', '⸂'), - ('⸄', '⸄'), - ('⸉', '⸉'), - ('⸌', '⸌'), - ('⸜', '⸜'), - ('⸠', '⸠'), -]; - -pub const LETTER: &'static [(char, char)] = &[ - ('A', 'Z'), - ('a', 'z'), - ('ª', 'ª'), - ('µ', 'µ'), - ('º', 'º'), - ('À', 'Ö'), - ('Ø', 'ö'), - ('ø', 'ˁ'), - ('ˆ', 'ˑ'), - ('ˠ', 'ˤ'), - ('ˬ', 'ˬ'), - ('ˮ', 'ˮ'), - ('Ͱ', 'ʹ'), - ('Ͷ', 'ͷ'), - ('ͺ', 'ͽ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', 'ϵ'), - ('Ϸ', 'ҁ'), - ('Ҋ', 'ԯ'), - ('Ա', 'Ֆ'), - ('ՙ', 'ՙ'), - ('ՠ', 'ֈ'), - ('א', 'ת'), - ('ׯ', 'ײ'), - ('ؠ', 'ي'), - ('ٮ', 'ٯ'), - ('ٱ', 'ۓ'), - ('ە', 'ە'), - ('ۥ', 'ۦ'), - ('ۮ', 'ۯ'), - ('ۺ', 'ۼ'), - ('ۿ', 'ۿ'), - ('ܐ', 'ܐ'), - ('ܒ', 'ܯ'), - ('ݍ', 'ޥ'), - ('ޱ', 'ޱ'), - ('ߊ', 'ߪ'), - ('ߴ', 'ߵ'), - ('ߺ', 'ߺ'), - ('ࠀ', 'ࠕ'), - ('ࠚ', 'ࠚ'), - ('ࠤ', 'ࠤ'), - ('ࠨ', 'ࠨ'), - ('ࡀ', 'ࡘ'), - ('ࡠ', 'ࡪ'), - ('ࡰ', 'ࢇ'), - ('ࢉ', 'ࢎ'), - ('ࢠ', 'ࣉ'), - ('ऄ', 'ह'), - ('ऽ', 'ऽ'), - ('ॐ', 'ॐ'), - ('क़', 'ॡ'), - ('ॱ', 'ঀ'), - ('অ', 'ঌ'), - ('এ', 'ঐ'), - ('ও', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('ঽ', 'ঽ'), - ('ৎ', 'ৎ'), - ('ড়', 'ঢ়'), - ('য়', 'ৡ'), - ('ৰ', 'ৱ'), - ('ৼ', 'ৼ'), - ('ਅ', 'ਊ'), - ('ਏ', 'ਐ'), - ('ਓ', 'ਨ'), - ('ਪ', 'ਰ'), - ('ਲ', 'ਲ਼'), - ('ਵ', 'ਸ਼'), - ('ਸ', 'ਹ'), - ('ਖ਼', 'ੜ'), - ('ਫ਼', 'ਫ਼'), - ('ੲ', 'ੴ'), - ('અ', 'ઍ'), - ('એ', 'ઑ'), - ('ઓ', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('ઽ', 'ઽ'), - ('ૐ', 'ૐ'), - ('ૠ', 'ૡ'), - ('ૹ', 'ૹ'), - ('ଅ', 'ଌ'), - ('ଏ', 'ଐ'), - ('ଓ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଵ', 'ହ'), - ('ଽ', 'ଽ'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', 'ୡ'), - ('ୱ', 'ୱ'), - ('ஃ', 'ஃ'), - ('அ', 'ஊ'), - ('எ', 'ஐ'), - ('ஒ', 'க'), - ('ங', 'ச'), - ('ஜ', 'ஜ'), - ('ஞ', 'ட'), - ('ண', 'த'), - ('ந', 'ப'), - ('ம', 'ஹ'), - ('ௐ', 'ௐ'), - ('అ', 'ఌ'), - ('ఎ', 'ఐ'), - ('ఒ', 'న'), - ('ప', 'హ'), - ('ఽ', 'ఽ'), - ('ౘ', 'ౚ'), - ('ౝ', 'ౝ'), - ('ౠ', 'ౡ'), - ('ಀ', 'ಀ'), - ('ಅ', 'ಌ'), - ('ಎ', 'ಐ'), - ('ಒ', 'ನ'), - ('ಪ', 'ಳ'), - ('ವ', 'ಹ'), - ('ಽ', 'ಽ'), - ('ೝ', 'ೞ'), - ('ೠ', 'ೡ'), - ('ೱ', 'ೲ'), - ('ഄ', 'ഌ'), - ('എ', 'ഐ'), - ('ഒ', 'ഺ'), - ('ഽ', 'ഽ'), - ('ൎ', 'ൎ'), - ('ൔ', 'ൖ'), - ('ൟ', 'ൡ'), - ('ൺ', 'ൿ'), - ('අ', 'ඖ'), - ('ක', 'න'), - ('ඳ', 'ර'), - ('ල', 'ල'), - ('ව', 'ෆ'), - ('ก', 'ะ'), - ('า', 'ำ'), - ('เ', 'ๆ'), - ('ກ', 'ຂ'), - ('ຄ', 'ຄ'), - ('ຆ', 'ຊ'), - ('ຌ', 'ຣ'), - ('ລ', 'ລ'), - ('ວ', 'ະ'), - ('າ', 'ຳ'), - ('ຽ', 'ຽ'), - ('ເ', 'ໄ'), - ('ໆ', 'ໆ'), - ('ໜ', 'ໟ'), - ('ༀ', 'ༀ'), - ('ཀ', 'ཇ'), - ('ཉ', 'ཬ'), - ('ྈ', 'ྌ'), - ('က', 'ဪ'), - ('ဿ', 'ဿ'), - ('ၐ', 'ၕ'), - ('ၚ', 'ၝ'), - ('ၡ', 'ၡ'), - ('ၥ', 'ၦ'), - ('ၮ', 'ၰ'), - ('ၵ', 'ႁ'), - ('ႎ', 'ႎ'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ა', 'ჺ'), - ('ჼ', 'ቈ'), - ('ቊ', 'ቍ'), - ('ቐ', 'ቖ'), - ('ቘ', 'ቘ'), - ('ቚ', 'ቝ'), - ('በ', 'ኈ'), - ('ኊ', 'ኍ'), - ('ነ', 'ኰ'), - ('ኲ', 'ኵ'), - ('ኸ', 'ኾ'), - ('ዀ', 'ዀ'), - ('ዂ', 'ዅ'), - ('ወ', 'ዖ'), - ('ዘ', 'ጐ'), - ('ጒ', 'ጕ'), - ('ጘ', 'ፚ'), - ('ᎀ', 'ᎏ'), - ('Ꭰ', 'Ᏽ'), - ('ᏸ', 'ᏽ'), - ('ᐁ', 'ᙬ'), - ('ᙯ', 'ᙿ'), - ('ᚁ', 'ᚚ'), - ('ᚠ', 'ᛪ'), - ('ᛱ', 'ᛸ'), - ('ᜀ', 'ᜑ'), - ('ᜟ', 'ᜱ'), - ('ᝀ', 'ᝑ'), - ('ᝠ', 'ᝬ'), - ('ᝮ', 'ᝰ'), - ('ក', 'ឳ'), - ('ៗ', 'ៗ'), - ('ៜ', 'ៜ'), - ('ᠠ', 'ᡸ'), - ('ᢀ', 'ᢄ'), - ('ᢇ', 'ᢨ'), - ('ᢪ', 'ᢪ'), - ('ᢰ', 'ᣵ'), - ('ᤀ', 'ᤞ'), - ('ᥐ', 'ᥭ'), - ('ᥰ', 'ᥴ'), - ('ᦀ', 'ᦫ'), - ('ᦰ', 'ᧉ'), - ('ᨀ', 'ᨖ'), - ('ᨠ', 'ᩔ'), - ('ᪧ', 'ᪧ'), - ('ᬅ', 'ᬳ'), - ('ᭅ', 'ᭌ'), - ('ᮃ', 'ᮠ'), - ('ᮮ', 'ᮯ'), - ('ᮺ', 'ᯥ'), - ('ᰀ', 'ᰣ'), - ('ᱍ', 'ᱏ'), - ('ᱚ', 'ᱽ'), - ('ᲀ', 'ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('ᳩ', 'ᳬ'), - ('ᳮ', 'ᳳ'), - ('ᳵ', 'ᳶ'), - ('ᳺ', 'ᳺ'), - ('ᴀ', 'ᶿ'), - ('Ḁ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ᾼ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῌ'), - ('ῐ', 'ΐ'), - ('ῖ', 'Ί'), - ('ῠ', 'Ῥ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῼ'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℊ', 'ℓ'), - ('ℕ', 'ℕ'), - ('ℙ', 'ℝ'), - ('ℤ', 'ℤ'), - ('Ω', 'Ω'), - ('ℨ', 'ℨ'), - ('K', 'ℭ'), - ('ℯ', 'ℹ'), - ('ℼ', 'ℿ'), - ('ⅅ', 'ⅉ'), - ('ⅎ', 'ⅎ'), - ('Ↄ', 'ↄ'), - ('Ⰰ', 'ⳤ'), - ('Ⳬ', 'ⳮ'), - ('Ⳳ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ⴰ', 'ⵧ'), - ('ⵯ', 'ⵯ'), - ('ⶀ', 'ⶖ'), - ('ⶠ', 'ⶦ'), - ('ⶨ', 'ⶮ'), - ('ⶰ', 'ⶶ'), - ('ⶸ', 'ⶾ'), - ('ⷀ', 'ⷆ'), - ('ⷈ', 'ⷎ'), - ('ⷐ', 'ⷖ'), - ('ⷘ', 'ⷞ'), - ('ⸯ', 'ⸯ'), - ('々', '〆'), - ('〱', '〵'), - ('〻', '〼'), - ('ぁ', 'ゖ'), - ('ゝ', 'ゟ'), - ('ァ', 'ヺ'), - ('ー', 'ヿ'), - ('ㄅ', 'ㄯ'), - ('ㄱ', 'ㆎ'), - ('ㆠ', 'ㆿ'), - ('ㇰ', 'ㇿ'), - ('㐀', '䶿'), - ('一', 'ꒌ'), - ('ꓐ', 'ꓽ'), - ('ꔀ', 'ꘌ'), - ('ꘐ', 'ꘟ'), - ('ꘪ', 'ꘫ'), - ('Ꙁ', 'ꙮ'), - ('ꙿ', 'ꚝ'), - ('ꚠ', 'ꛥ'), - ('ꜗ', 'ꜟ'), - ('Ꜣ', 'ꞈ'), - ('Ꞌ', 'ꟍ'), - ('Ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'Ƛ'), - ('ꟲ', 'ꠁ'), - ('ꠃ', 'ꠅ'), - ('ꠇ', 'ꠊ'), - ('ꠌ', 'ꠢ'), - ('ꡀ', 'ꡳ'), - ('ꢂ', 'ꢳ'), - ('ꣲ', 'ꣷ'), - ('ꣻ', 'ꣻ'), - ('ꣽ', 'ꣾ'), - ('ꤊ', 'ꤥ'), - ('ꤰ', 'ꥆ'), - ('ꥠ', 'ꥼ'), - ('ꦄ', 'ꦲ'), - ('ꧏ', 'ꧏ'), - ('ꧠ', 'ꧤ'), - ('ꧦ', 'ꧯ'), - ('ꧺ', 'ꧾ'), - ('ꨀ', 'ꨨ'), - ('ꩀ', 'ꩂ'), - ('ꩄ', 'ꩋ'), - ('ꩠ', 'ꩶ'), - ('ꩺ', 'ꩺ'), - ('ꩾ', 'ꪯ'), - ('ꪱ', 'ꪱ'), - ('ꪵ', 'ꪶ'), - ('ꪹ', 'ꪽ'), - ('ꫀ', 'ꫀ'), - ('ꫂ', 'ꫂ'), - ('ꫛ', 'ꫝ'), - ('ꫠ', 'ꫪ'), - ('ꫲ', 'ꫴ'), - ('ꬁ', 'ꬆ'), - ('ꬉ', 'ꬎ'), - ('ꬑ', 'ꬖ'), - ('ꬠ', 'ꬦ'), - ('ꬨ', 'ꬮ'), - ('ꬰ', 'ꭚ'), - ('ꭜ', 'ꭩ'), - ('ꭰ', 'ꯢ'), - ('가', '힣'), - ('ힰ', 'ퟆ'), - ('ퟋ', 'ퟻ'), - ('豈', '舘'), - ('並', '龎'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('יִ', 'יִ'), - ('ײַ', 'ﬨ'), - ('שׁ', 'זּ'), - ('טּ', 'לּ'), - ('מּ', 'מּ'), - ('נּ', 'סּ'), - ('ףּ', 'פּ'), - ('צּ', 'ﮱ'), - ('ﯓ', 'ﴽ'), - ('ﵐ', 'ﶏ'), - ('ﶒ', 'ﷇ'), - ('ﷰ', 'ﷻ'), - ('ﹰ', 'ﹴ'), - ('ﹶ', 'ﻼ'), - ('A', 'Z'), - ('a', 'z'), - ('ヲ', 'ᄒ'), - ('ᅡ', 'ᅦ'), - ('ᅧ', 'ᅬ'), - ('ᅭ', 'ᅲ'), - ('ᅳ', 'ᅵ'), - ('𐀀', '𐀋'), - ('𐀍', '𐀦'), - ('𐀨', '𐀺'), - ('𐀼', '𐀽'), - ('𐀿', '𐁍'), - ('𐁐', '𐁝'), - ('𐂀', '𐃺'), - ('𐊀', '𐊜'), - ('𐊠', '𐋐'), - ('𐌀', '𐌟'), - ('𐌭', '𐍀'), - ('𐍂', '𐍉'), - ('𐍐', '𐍵'), - ('𐎀', '𐎝'), - ('𐎠', '𐏃'), - ('𐏈', '𐏏'), - ('𐐀', '𐒝'), - ('𐒰', '𐓓'), - ('𐓘', '𐓻'), - ('𐔀', '𐔧'), - ('𐔰', '𐕣'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐗀', '𐗳'), - ('𐘀', '𐜶'), - ('𐝀', '𐝕'), - ('𐝠', '𐝧'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𐠀', '𐠅'), - ('𐠈', '𐠈'), - ('𐠊', '𐠵'), - ('𐠷', '𐠸'), - ('𐠼', '𐠼'), - ('𐠿', '𐡕'), - ('𐡠', '𐡶'), - ('𐢀', '𐢞'), - ('𐣠', '𐣲'), - ('𐣴', '𐣵'), - ('𐤀', '𐤕'), - ('𐤠', '𐤹'), - ('𐦀', '𐦷'), - ('𐦾', '𐦿'), - ('𐨀', '𐨀'), - ('𐨐', '𐨓'), - ('𐨕', '𐨗'), - ('𐨙', '𐨵'), - ('𐩠', '𐩼'), - ('𐪀', '𐪜'), - ('𐫀', '𐫇'), - ('𐫉', '𐫤'), - ('𐬀', '𐬵'), - ('𐭀', '𐭕'), - ('𐭠', '𐭲'), - ('𐮀', '𐮑'), - ('𐰀', '𐱈'), - ('𐲀', '𐲲'), - ('𐳀', '𐳲'), - ('𐴀', '𐴣'), - ('𐵊', '𐵥'), - ('𐵯', '𐶅'), - ('𐺀', '𐺩'), - ('𐺰', '𐺱'), - ('𐻂', '𐻄'), - ('𐼀', '𐼜'), - ('𐼧', '𐼧'), - ('𐼰', '𐽅'), - ('𐽰', '𐾁'), - ('𐾰', '𐿄'), - ('𐿠', '𐿶'), - ('𑀃', '𑀷'), - ('𑁱', '𑁲'), - ('𑁵', '𑁵'), - ('𑂃', '𑂯'), - ('𑃐', '𑃨'), - ('𑄃', '𑄦'), - ('𑅄', '𑅄'), - ('𑅇', '𑅇'), - ('𑅐', '𑅲'), - ('𑅶', '𑅶'), - ('𑆃', '𑆲'), - ('𑇁', '𑇄'), - ('𑇚', '𑇚'), - ('𑇜', '𑇜'), - ('𑈀', '𑈑'), - ('𑈓', '𑈫'), - ('𑈿', '𑉀'), - ('𑊀', '𑊆'), - ('𑊈', '𑊈'), - ('𑊊', '𑊍'), - ('𑊏', '𑊝'), - ('𑊟', '𑊨'), - ('𑊰', '𑋞'), - ('𑌅', '𑌌'), - ('𑌏', '𑌐'), - ('𑌓', '𑌨'), - ('𑌪', '𑌰'), - ('𑌲', '𑌳'), - ('𑌵', '𑌹'), - ('𑌽', '𑌽'), - ('𑍐', '𑍐'), - ('𑍝', '𑍡'), - ('𑎀', '𑎉'), - ('𑎋', '𑎋'), - ('𑎎', '𑎎'), - ('𑎐', '𑎵'), - ('𑎷', '𑎷'), - ('𑏑', '𑏑'), - ('𑏓', '𑏓'), - ('𑐀', '𑐴'), - ('𑑇', '𑑊'), - ('𑑟', '𑑡'), - ('𑒀', '𑒯'), - ('𑓄', '𑓅'), - ('𑓇', '𑓇'), - ('𑖀', '𑖮'), - ('𑗘', '𑗛'), - ('𑘀', '𑘯'), - ('𑙄', '𑙄'), - ('𑚀', '𑚪'), - ('𑚸', '𑚸'), - ('𑜀', '𑜚'), - ('𑝀', '𑝆'), - ('𑠀', '𑠫'), - ('𑢠', '𑣟'), - ('𑣿', '𑤆'), - ('𑤉', '𑤉'), - ('𑤌', '𑤓'), - ('𑤕', '𑤖'), - ('𑤘', '𑤯'), - ('𑤿', '𑤿'), - ('𑥁', '𑥁'), - ('𑦠', '𑦧'), - ('𑦪', '𑧐'), - ('𑧡', '𑧡'), - ('𑧣', '𑧣'), - ('𑨀', '𑨀'), - ('𑨋', '𑨲'), - ('𑨺', '𑨺'), - ('𑩐', '𑩐'), - ('𑩜', '𑪉'), - ('𑪝', '𑪝'), - ('𑪰', '𑫸'), - ('𑯀', '𑯠'), - ('𑰀', '𑰈'), - ('𑰊', '𑰮'), - ('𑱀', '𑱀'), - ('𑱲', '𑲏'), - ('𑴀', '𑴆'), - ('𑴈', '𑴉'), - ('𑴋', '𑴰'), - ('𑵆', '𑵆'), - ('𑵠', '𑵥'), - ('𑵧', '𑵨'), - ('𑵪', '𑶉'), - ('𑶘', '𑶘'), - ('𑻠', '𑻲'), - ('𑼂', '𑼂'), - ('𑼄', '𑼐'), - ('𑼒', '𑼳'), - ('𑾰', '𑾰'), - ('𒀀', '𒎙'), - ('𒒀', '𒕃'), - ('𒾐', '𒿰'), - ('𓀀', '𓐯'), - ('𓑁', '𓑆'), - ('𓑠', '𔏺'), - ('𔐀', '𔙆'), - ('𖄀', '𖄝'), - ('𖠀', '𖨸'), - ('𖩀', '𖩞'), - ('𖩰', '𖪾'), - ('𖫐', '𖫭'), - ('𖬀', '𖬯'), - ('𖭀', '𖭃'), - ('𖭣', '𖭷'), - ('𖭽', '𖮏'), - ('𖵀', '𖵬'), - ('𖹀', '𖹿'), - ('𖼀', '𖽊'), - ('𖽐', '𖽐'), - ('𖾓', '𖾟'), - ('𖿠', '𖿡'), - ('𖿣', '𖿣'), - ('𗀀', '𘟷'), - ('𘠀', '𘳕'), - ('𘳿', '𘴈'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('𛀀', '𛄢'), - ('𛄲', '𛄲'), - ('𛅐', '𛅒'), - ('𛅕', '𛅕'), - ('𛅤', '𛅧'), - ('𛅰', '𛋻'), - ('𛰀', '𛱪'), - ('𛱰', '𛱼'), - ('𛲀', '𛲈'), - ('𛲐', '𛲙'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝛀'), - ('𝛂', '𝛚'), - ('𝛜', '𝛺'), - ('𝛼', '𝜔'), - ('𝜖', '𝜴'), - ('𝜶', '𝝎'), - ('𝝐', '𝝮'), - ('𝝰', '𝞈'), - ('𝞊', '𝞨'), - ('𝞪', '𝟂'), - ('𝟄', '𝟋'), - ('𝼀', '𝼞'), - ('𝼥', '𝼪'), - ('𞀰', '𞁭'), - ('𞄀', '𞄬'), - ('𞄷', '𞄽'), - ('𞅎', '𞅎'), - ('𞊐', '𞊭'), - ('𞋀', '𞋫'), - ('𞓐', '𞓫'), - ('𞗐', '𞗭'), - ('𞗰', '𞗰'), - ('𞟠', '𞟦'), - ('𞟨', '𞟫'), - ('𞟭', '𞟮'), - ('𞟰', '𞟾'), - ('𞠀', '𞣄'), - ('𞤀', '𞥃'), - ('𞥋', '𞥋'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('𠀀', '𪛟'), - ('𪜀', '𫜹'), - ('𫝀', '𫠝'), - ('𫠠', '𬺡'), - ('𬺰', '𮯠'), - ('𮯰', '𮹝'), - ('丽', '𪘀'), - ('𰀀', '𱍊'), - ('𱍐', '𲎯'), -]; - -pub const LETTER_NUMBER: &'static [(char, char)] = &[ - ('ᛮ', 'ᛰ'), - ('Ⅰ', 'ↂ'), - ('ↅ', 'ↈ'), - ('〇', '〇'), - ('〡', '〩'), - ('〸', '〺'), - ('ꛦ', 'ꛯ'), - ('𐅀', '𐅴'), - ('𐍁', '𐍁'), - ('𐍊', '𐍊'), - ('𐏑', '𐏕'), - ('𒐀', '𒑮'), -]; - -pub const LINE_SEPARATOR: &'static [(char, char)] = - &[('\u{2028}', '\u{2028}')]; - -pub const LOWERCASE_LETTER: &'static [(char, char)] = &[ - ('a', 'z'), - ('µ', 'µ'), - ('ß', 'ö'), - ('ø', 'ÿ'), - ('ā', 'ā'), - ('ă', 'ă'), - ('ą', 'ą'), - ('ć', 'ć'), - ('ĉ', 'ĉ'), - ('ċ', 'ċ'), - ('č', 'č'), - ('ď', 'ď'), - ('đ', 'đ'), - ('ē', 'ē'), - ('ĕ', 'ĕ'), - ('ė', 'ė'), - ('ę', 'ę'), - ('ě', 'ě'), - ('ĝ', 'ĝ'), - ('ğ', 'ğ'), - ('ġ', 'ġ'), - ('ģ', 'ģ'), - ('ĥ', 'ĥ'), - ('ħ', 'ħ'), - ('ĩ', 'ĩ'), - ('ī', 'ī'), - ('ĭ', 'ĭ'), - ('į', 'į'), - ('ı', 'ı'), - ('ij', 'ij'), - ('ĵ', 'ĵ'), - ('ķ', 'ĸ'), - ('ĺ', 'ĺ'), - ('ļ', 'ļ'), - ('ľ', 'ľ'), - ('ŀ', 'ŀ'), - ('ł', 'ł'), - ('ń', 'ń'), - ('ņ', 'ņ'), - ('ň', 'ʼn'), - ('ŋ', 'ŋ'), - ('ō', 'ō'), - ('ŏ', 'ŏ'), - ('ő', 'ő'), - ('œ', 'œ'), - ('ŕ', 'ŕ'), - ('ŗ', 'ŗ'), - ('ř', 'ř'), - ('ś', 'ś'), - ('ŝ', 'ŝ'), - ('ş', 'ş'), - ('š', 'š'), - ('ţ', 'ţ'), - ('ť', 'ť'), - ('ŧ', 'ŧ'), - ('ũ', 'ũ'), - ('ū', 'ū'), - ('ŭ', 'ŭ'), - ('ů', 'ů'), - ('ű', 'ű'), - ('ų', 'ų'), - ('ŵ', 'ŵ'), - ('ŷ', 'ŷ'), - ('ź', 'ź'), - ('ż', 'ż'), - ('ž', 'ƀ'), - ('ƃ', 'ƃ'), - ('ƅ', 'ƅ'), - ('ƈ', 'ƈ'), - ('ƌ', 'ƍ'), - ('ƒ', 'ƒ'), - ('ƕ', 'ƕ'), - ('ƙ', 'ƛ'), - ('ƞ', 'ƞ'), - ('ơ', 'ơ'), - ('ƣ', 'ƣ'), - ('ƥ', 'ƥ'), - ('ƨ', 'ƨ'), - ('ƪ', 'ƫ'), - ('ƭ', 'ƭ'), - ('ư', 'ư'), - ('ƴ', 'ƴ'), - ('ƶ', 'ƶ'), - ('ƹ', 'ƺ'), - ('ƽ', 'ƿ'), - ('dž', 'dž'), - ('lj', 'lj'), - ('nj', 'nj'), - ('ǎ', 'ǎ'), - ('ǐ', 'ǐ'), - ('ǒ', 'ǒ'), - ('ǔ', 'ǔ'), - ('ǖ', 'ǖ'), - ('ǘ', 'ǘ'), - ('ǚ', 'ǚ'), - ('ǜ', 'ǝ'), - ('ǟ', 'ǟ'), - ('ǡ', 'ǡ'), - ('ǣ', 'ǣ'), - ('ǥ', 'ǥ'), - ('ǧ', 'ǧ'), - ('ǩ', 'ǩ'), - ('ǫ', 'ǫ'), - ('ǭ', 'ǭ'), - ('ǯ', 'ǰ'), - ('dz', 'dz'), - ('ǵ', 'ǵ'), - ('ǹ', 'ǹ'), - ('ǻ', 'ǻ'), - ('ǽ', 'ǽ'), - ('ǿ', 'ǿ'), - ('ȁ', 'ȁ'), - ('ȃ', 'ȃ'), - ('ȅ', 'ȅ'), - ('ȇ', 'ȇ'), - ('ȉ', 'ȉ'), - ('ȋ', 'ȋ'), - ('ȍ', 'ȍ'), - ('ȏ', 'ȏ'), - ('ȑ', 'ȑ'), - ('ȓ', 'ȓ'), - ('ȕ', 'ȕ'), - ('ȗ', 'ȗ'), - ('ș', 'ș'), - ('ț', 'ț'), - ('ȝ', 'ȝ'), - ('ȟ', 'ȟ'), - ('ȡ', 'ȡ'), - ('ȣ', 'ȣ'), - ('ȥ', 'ȥ'), - ('ȧ', 'ȧ'), - ('ȩ', 'ȩ'), - ('ȫ', 'ȫ'), - ('ȭ', 'ȭ'), - ('ȯ', 'ȯ'), - ('ȱ', 'ȱ'), - ('ȳ', 'ȹ'), - ('ȼ', 'ȼ'), - ('ȿ', 'ɀ'), - ('ɂ', 'ɂ'), - ('ɇ', 'ɇ'), - ('ɉ', 'ɉ'), - ('ɋ', 'ɋ'), - ('ɍ', 'ɍ'), - ('ɏ', 'ʓ'), - ('ʕ', 'ʯ'), - ('ͱ', 'ͱ'), - ('ͳ', 'ͳ'), - ('ͷ', 'ͷ'), - ('ͻ', 'ͽ'), - ('ΐ', 'ΐ'), - ('ά', 'ώ'), - ('ϐ', 'ϑ'), - ('ϕ', 'ϗ'), - ('ϙ', 'ϙ'), - ('ϛ', 'ϛ'), - ('ϝ', 'ϝ'), - ('ϟ', 'ϟ'), - ('ϡ', 'ϡ'), - ('ϣ', 'ϣ'), - ('ϥ', 'ϥ'), - ('ϧ', 'ϧ'), - ('ϩ', 'ϩ'), - ('ϫ', 'ϫ'), - ('ϭ', 'ϭ'), - ('ϯ', 'ϳ'), - ('ϵ', 'ϵ'), - ('ϸ', 'ϸ'), - ('ϻ', 'ϼ'), - ('а', 'џ'), - ('ѡ', 'ѡ'), - ('ѣ', 'ѣ'), - ('ѥ', 'ѥ'), - ('ѧ', 'ѧ'), - ('ѩ', 'ѩ'), - ('ѫ', 'ѫ'), - ('ѭ', 'ѭ'), - ('ѯ', 'ѯ'), - ('ѱ', 'ѱ'), - ('ѳ', 'ѳ'), - ('ѵ', 'ѵ'), - ('ѷ', 'ѷ'), - ('ѹ', 'ѹ'), - ('ѻ', 'ѻ'), - ('ѽ', 'ѽ'), - ('ѿ', 'ѿ'), - ('ҁ', 'ҁ'), - ('ҋ', 'ҋ'), - ('ҍ', 'ҍ'), - ('ҏ', 'ҏ'), - ('ґ', 'ґ'), - ('ғ', 'ғ'), - ('ҕ', 'ҕ'), - ('җ', 'җ'), - ('ҙ', 'ҙ'), - ('қ', 'қ'), - ('ҝ', 'ҝ'), - ('ҟ', 'ҟ'), - ('ҡ', 'ҡ'), - ('ң', 'ң'), - ('ҥ', 'ҥ'), - ('ҧ', 'ҧ'), - ('ҩ', 'ҩ'), - ('ҫ', 'ҫ'), - ('ҭ', 'ҭ'), - ('ү', 'ү'), - ('ұ', 'ұ'), - ('ҳ', 'ҳ'), - ('ҵ', 'ҵ'), - ('ҷ', 'ҷ'), - ('ҹ', 'ҹ'), - ('һ', 'һ'), - ('ҽ', 'ҽ'), - ('ҿ', 'ҿ'), - ('ӂ', 'ӂ'), - ('ӄ', 'ӄ'), - ('ӆ', 'ӆ'), - ('ӈ', 'ӈ'), - ('ӊ', 'ӊ'), - ('ӌ', 'ӌ'), - ('ӎ', 'ӏ'), - ('ӑ', 'ӑ'), - ('ӓ', 'ӓ'), - ('ӕ', 'ӕ'), - ('ӗ', 'ӗ'), - ('ә', 'ә'), - ('ӛ', 'ӛ'), - ('ӝ', 'ӝ'), - ('ӟ', 'ӟ'), - ('ӡ', 'ӡ'), - ('ӣ', 'ӣ'), - ('ӥ', 'ӥ'), - ('ӧ', 'ӧ'), - ('ө', 'ө'), - ('ӫ', 'ӫ'), - ('ӭ', 'ӭ'), - ('ӯ', 'ӯ'), - ('ӱ', 'ӱ'), - ('ӳ', 'ӳ'), - ('ӵ', 'ӵ'), - ('ӷ', 'ӷ'), - ('ӹ', 'ӹ'), - ('ӻ', 'ӻ'), - ('ӽ', 'ӽ'), - ('ӿ', 'ӿ'), - ('ԁ', 'ԁ'), - ('ԃ', 'ԃ'), - ('ԅ', 'ԅ'), - ('ԇ', 'ԇ'), - ('ԉ', 'ԉ'), - ('ԋ', 'ԋ'), - ('ԍ', 'ԍ'), - ('ԏ', 'ԏ'), - ('ԑ', 'ԑ'), - ('ԓ', 'ԓ'), - ('ԕ', 'ԕ'), - ('ԗ', 'ԗ'), - ('ԙ', 'ԙ'), - ('ԛ', 'ԛ'), - ('ԝ', 'ԝ'), - ('ԟ', 'ԟ'), - ('ԡ', 'ԡ'), - ('ԣ', 'ԣ'), - ('ԥ', 'ԥ'), - ('ԧ', 'ԧ'), - ('ԩ', 'ԩ'), - ('ԫ', 'ԫ'), - ('ԭ', 'ԭ'), - ('ԯ', 'ԯ'), - ('ՠ', 'ֈ'), - ('ა', 'ჺ'), - ('ჽ', 'ჿ'), - ('ᏸ', 'ᏽ'), - ('ᲀ', 'ᲈ'), - ('ᲊ', 'ᲊ'), - ('ᴀ', 'ᴫ'), - ('ᵫ', 'ᵷ'), - ('ᵹ', 'ᶚ'), - ('ḁ', 'ḁ'), - ('ḃ', 'ḃ'), - ('ḅ', 'ḅ'), - ('ḇ', 'ḇ'), - ('ḉ', 'ḉ'), - ('ḋ', 'ḋ'), - ('ḍ', 'ḍ'), - ('ḏ', 'ḏ'), - ('ḑ', 'ḑ'), - ('ḓ', 'ḓ'), - ('ḕ', 'ḕ'), - ('ḗ', 'ḗ'), - ('ḙ', 'ḙ'), - ('ḛ', 'ḛ'), - ('ḝ', 'ḝ'), - ('ḟ', 'ḟ'), - ('ḡ', 'ḡ'), - ('ḣ', 'ḣ'), - ('ḥ', 'ḥ'), - ('ḧ', 'ḧ'), - ('ḩ', 'ḩ'), - ('ḫ', 'ḫ'), - ('ḭ', 'ḭ'), - ('ḯ', 'ḯ'), - ('ḱ', 'ḱ'), - ('ḳ', 'ḳ'), - ('ḵ', 'ḵ'), - ('ḷ', 'ḷ'), - ('ḹ', 'ḹ'), - ('ḻ', 'ḻ'), - ('ḽ', 'ḽ'), - ('ḿ', 'ḿ'), - ('ṁ', 'ṁ'), - ('ṃ', 'ṃ'), - ('ṅ', 'ṅ'), - ('ṇ', 'ṇ'), - ('ṉ', 'ṉ'), - ('ṋ', 'ṋ'), - ('ṍ', 'ṍ'), - ('ṏ', 'ṏ'), - ('ṑ', 'ṑ'), - ('ṓ', 'ṓ'), - ('ṕ', 'ṕ'), - ('ṗ', 'ṗ'), - ('ṙ', 'ṙ'), - ('ṛ', 'ṛ'), - ('ṝ', 'ṝ'), - ('ṟ', 'ṟ'), - ('ṡ', 'ṡ'), - ('ṣ', 'ṣ'), - ('ṥ', 'ṥ'), - ('ṧ', 'ṧ'), - ('ṩ', 'ṩ'), - ('ṫ', 'ṫ'), - ('ṭ', 'ṭ'), - ('ṯ', 'ṯ'), - ('ṱ', 'ṱ'), - ('ṳ', 'ṳ'), - ('ṵ', 'ṵ'), - ('ṷ', 'ṷ'), - ('ṹ', 'ṹ'), - ('ṻ', 'ṻ'), - ('ṽ', 'ṽ'), - ('ṿ', 'ṿ'), - ('ẁ', 'ẁ'), - ('ẃ', 'ẃ'), - ('ẅ', 'ẅ'), - ('ẇ', 'ẇ'), - ('ẉ', 'ẉ'), - ('ẋ', 'ẋ'), - ('ẍ', 'ẍ'), - ('ẏ', 'ẏ'), - ('ẑ', 'ẑ'), - ('ẓ', 'ẓ'), - ('ẕ', 'ẝ'), - ('ẟ', 'ẟ'), - ('ạ', 'ạ'), - ('ả', 'ả'), - ('ấ', 'ấ'), - ('ầ', 'ầ'), - ('ẩ', 'ẩ'), - ('ẫ', 'ẫ'), - ('ậ', 'ậ'), - ('ắ', 'ắ'), - ('ằ', 'ằ'), - ('ẳ', 'ẳ'), - ('ẵ', 'ẵ'), - ('ặ', 'ặ'), - ('ẹ', 'ẹ'), - ('ẻ', 'ẻ'), - ('ẽ', 'ẽ'), - ('ế', 'ế'), - ('ề', 'ề'), - ('ể', 'ể'), - ('ễ', 'ễ'), - ('ệ', 'ệ'), - ('ỉ', 'ỉ'), - ('ị', 'ị'), - ('ọ', 'ọ'), - ('ỏ', 'ỏ'), - ('ố', 'ố'), - ('ồ', 'ồ'), - ('ổ', 'ổ'), - ('ỗ', 'ỗ'), - ('ộ', 'ộ'), - ('ớ', 'ớ'), - ('ờ', 'ờ'), - ('ở', 'ở'), - ('ỡ', 'ỡ'), - ('ợ', 'ợ'), - ('ụ', 'ụ'), - ('ủ', 'ủ'), - ('ứ', 'ứ'), - ('ừ', 'ừ'), - ('ử', 'ử'), - ('ữ', 'ữ'), - ('ự', 'ự'), - ('ỳ', 'ỳ'), - ('ỵ', 'ỵ'), - ('ỷ', 'ỷ'), - ('ỹ', 'ỹ'), - ('ỻ', 'ỻ'), - ('ỽ', 'ỽ'), - ('ỿ', 'ἇ'), - ('ἐ', 'ἕ'), - ('ἠ', 'ἧ'), - ('ἰ', 'ἷ'), - ('ὀ', 'ὅ'), - ('ὐ', 'ὗ'), - ('ὠ', 'ὧ'), - ('ὰ', 'ώ'), - ('ᾀ', 'ᾇ'), - ('ᾐ', 'ᾗ'), - ('ᾠ', 'ᾧ'), - ('ᾰ', 'ᾴ'), - ('ᾶ', 'ᾷ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῇ'), - ('ῐ', 'ΐ'), - ('ῖ', 'ῗ'), - ('ῠ', 'ῧ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῷ'), - ('ℊ', 'ℊ'), - ('ℎ', 'ℏ'), - ('ℓ', 'ℓ'), - ('ℯ', 'ℯ'), - ('ℴ', 'ℴ'), - ('ℹ', 'ℹ'), - ('ℼ', 'ℽ'), - ('ⅆ', 'ⅉ'), - ('ⅎ', 'ⅎ'), - ('ↄ', 'ↄ'), - ('ⰰ', 'ⱟ'), - ('ⱡ', 'ⱡ'), - ('ⱥ', 'ⱦ'), - ('ⱨ', 'ⱨ'), - ('ⱪ', 'ⱪ'), - ('ⱬ', 'ⱬ'), - ('ⱱ', 'ⱱ'), - ('ⱳ', 'ⱴ'), - ('ⱶ', 'ⱻ'), - ('ⲁ', 'ⲁ'), - ('ⲃ', 'ⲃ'), - ('ⲅ', 'ⲅ'), - ('ⲇ', 'ⲇ'), - ('ⲉ', 'ⲉ'), - ('ⲋ', 'ⲋ'), - ('ⲍ', 'ⲍ'), - ('ⲏ', 'ⲏ'), - ('ⲑ', 'ⲑ'), - ('ⲓ', 'ⲓ'), - ('ⲕ', 'ⲕ'), - ('ⲗ', 'ⲗ'), - ('ⲙ', 'ⲙ'), - ('ⲛ', 'ⲛ'), - ('ⲝ', 'ⲝ'), - ('ⲟ', 'ⲟ'), - ('ⲡ', 'ⲡ'), - ('ⲣ', 'ⲣ'), - ('ⲥ', 'ⲥ'), - ('ⲧ', 'ⲧ'), - ('ⲩ', 'ⲩ'), - ('ⲫ', 'ⲫ'), - ('ⲭ', 'ⲭ'), - ('ⲯ', 'ⲯ'), - ('ⲱ', 'ⲱ'), - ('ⲳ', 'ⲳ'), - ('ⲵ', 'ⲵ'), - ('ⲷ', 'ⲷ'), - ('ⲹ', 'ⲹ'), - ('ⲻ', 'ⲻ'), - ('ⲽ', 'ⲽ'), - ('ⲿ', 'ⲿ'), - ('ⳁ', 'ⳁ'), - ('ⳃ', 'ⳃ'), - ('ⳅ', 'ⳅ'), - ('ⳇ', 'ⳇ'), - ('ⳉ', 'ⳉ'), - ('ⳋ', 'ⳋ'), - ('ⳍ', 'ⳍ'), - ('ⳏ', 'ⳏ'), - ('ⳑ', 'ⳑ'), - ('ⳓ', 'ⳓ'), - ('ⳕ', 'ⳕ'), - ('ⳗ', 'ⳗ'), - ('ⳙ', 'ⳙ'), - ('ⳛ', 'ⳛ'), - ('ⳝ', 'ⳝ'), - ('ⳟ', 'ⳟ'), - ('ⳡ', 'ⳡ'), - ('ⳣ', 'ⳤ'), - ('ⳬ', 'ⳬ'), - ('ⳮ', 'ⳮ'), - ('ⳳ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ꙁ', 'ꙁ'), - ('ꙃ', 'ꙃ'), - ('ꙅ', 'ꙅ'), - ('ꙇ', 'ꙇ'), - ('ꙉ', 'ꙉ'), - ('ꙋ', 'ꙋ'), - ('ꙍ', 'ꙍ'), - ('ꙏ', 'ꙏ'), - ('ꙑ', 'ꙑ'), - ('ꙓ', 'ꙓ'), - ('ꙕ', 'ꙕ'), - ('ꙗ', 'ꙗ'), - ('ꙙ', 'ꙙ'), - ('ꙛ', 'ꙛ'), - ('ꙝ', 'ꙝ'), - ('ꙟ', 'ꙟ'), - ('ꙡ', 'ꙡ'), - ('ꙣ', 'ꙣ'), - ('ꙥ', 'ꙥ'), - ('ꙧ', 'ꙧ'), - ('ꙩ', 'ꙩ'), - ('ꙫ', 'ꙫ'), - ('ꙭ', 'ꙭ'), - ('ꚁ', 'ꚁ'), - ('ꚃ', 'ꚃ'), - ('ꚅ', 'ꚅ'), - ('ꚇ', 'ꚇ'), - ('ꚉ', 'ꚉ'), - ('ꚋ', 'ꚋ'), - ('ꚍ', 'ꚍ'), - ('ꚏ', 'ꚏ'), - ('ꚑ', 'ꚑ'), - ('ꚓ', 'ꚓ'), - ('ꚕ', 'ꚕ'), - ('ꚗ', 'ꚗ'), - ('ꚙ', 'ꚙ'), - ('ꚛ', 'ꚛ'), - ('ꜣ', 'ꜣ'), - ('ꜥ', 'ꜥ'), - ('ꜧ', 'ꜧ'), - ('ꜩ', 'ꜩ'), - ('ꜫ', 'ꜫ'), - ('ꜭ', 'ꜭ'), - ('ꜯ', 'ꜱ'), - ('ꜳ', 'ꜳ'), - ('ꜵ', 'ꜵ'), - ('ꜷ', 'ꜷ'), - ('ꜹ', 'ꜹ'), - ('ꜻ', 'ꜻ'), - ('ꜽ', 'ꜽ'), - ('ꜿ', 'ꜿ'), - ('ꝁ', 'ꝁ'), - ('ꝃ', 'ꝃ'), - ('ꝅ', 'ꝅ'), - ('ꝇ', 'ꝇ'), - ('ꝉ', 'ꝉ'), - ('ꝋ', 'ꝋ'), - ('ꝍ', 'ꝍ'), - ('ꝏ', 'ꝏ'), - ('ꝑ', 'ꝑ'), - ('ꝓ', 'ꝓ'), - ('ꝕ', 'ꝕ'), - ('ꝗ', 'ꝗ'), - ('ꝙ', 'ꝙ'), - ('ꝛ', 'ꝛ'), - ('ꝝ', 'ꝝ'), - ('ꝟ', 'ꝟ'), - ('ꝡ', 'ꝡ'), - ('ꝣ', 'ꝣ'), - ('ꝥ', 'ꝥ'), - ('ꝧ', 'ꝧ'), - ('ꝩ', 'ꝩ'), - ('ꝫ', 'ꝫ'), - ('ꝭ', 'ꝭ'), - ('ꝯ', 'ꝯ'), - ('ꝱ', 'ꝸ'), - ('ꝺ', 'ꝺ'), - ('ꝼ', 'ꝼ'), - ('ꝿ', 'ꝿ'), - ('ꞁ', 'ꞁ'), - ('ꞃ', 'ꞃ'), - ('ꞅ', 'ꞅ'), - ('ꞇ', 'ꞇ'), - ('ꞌ', 'ꞌ'), - ('ꞎ', 'ꞎ'), - ('ꞑ', 'ꞑ'), - ('ꞓ', 'ꞕ'), - ('ꞗ', 'ꞗ'), - ('ꞙ', 'ꞙ'), - ('ꞛ', 'ꞛ'), - ('ꞝ', 'ꞝ'), - ('ꞟ', 'ꞟ'), - ('ꞡ', 'ꞡ'), - ('ꞣ', 'ꞣ'), - ('ꞥ', 'ꞥ'), - ('ꞧ', 'ꞧ'), - ('ꞩ', 'ꞩ'), - ('ꞯ', 'ꞯ'), - ('ꞵ', 'ꞵ'), - ('ꞷ', 'ꞷ'), - ('ꞹ', 'ꞹ'), - ('ꞻ', 'ꞻ'), - ('ꞽ', 'ꞽ'), - ('ꞿ', 'ꞿ'), - ('ꟁ', 'ꟁ'), - ('ꟃ', 'ꟃ'), - ('ꟈ', 'ꟈ'), - ('ꟊ', 'ꟊ'), - ('ꟍ', 'ꟍ'), - ('ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'ꟕ'), - ('ꟗ', 'ꟗ'), - ('ꟙ', 'ꟙ'), - ('ꟛ', 'ꟛ'), - ('ꟶ', 'ꟶ'), - ('ꟺ', 'ꟺ'), - ('ꬰ', 'ꭚ'), - ('ꭠ', 'ꭨ'), - ('ꭰ', 'ꮿ'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('a', 'z'), - ('𐐨', '𐑏'), - ('𐓘', '𐓻'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐳀', '𐳲'), - ('𐵰', '𐶅'), - ('𑣀', '𑣟'), - ('𖹠', '𖹿'), - ('𝐚', '𝐳'), - ('𝑎', '𝑔'), - ('𝑖', '𝑧'), - ('𝒂', '𝒛'), - ('𝒶', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝓏'), - ('𝓪', '𝔃'), - ('𝔞', '𝔷'), - ('𝕒', '𝕫'), - ('𝖆', '𝖟'), - ('𝖺', '𝗓'), - ('𝗮', '𝘇'), - ('𝘢', '𝘻'), - ('𝙖', '𝙯'), - ('𝚊', '𝚥'), - ('𝛂', '𝛚'), - ('𝛜', '𝛡'), - ('𝛼', '𝜔'), - ('𝜖', '𝜛'), - ('𝜶', '𝝎'), - ('𝝐', '𝝕'), - ('𝝰', '𝞈'), - ('𝞊', '𝞏'), - ('𝞪', '𝟂'), - ('𝟄', '𝟉'), - ('𝟋', '𝟋'), - ('𝼀', '𝼉'), - ('𝼋', '𝼞'), - ('𝼥', '𝼪'), - ('𞤢', '𞥃'), -]; - -pub const MARK: &'static [(char, char)] = &[ - ('\u{300}', '\u{36f}'), - ('\u{483}', '\u{489}'), - ('\u{591}', '\u{5bd}'), - ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), - ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), - ('\u{610}', '\u{61a}'), - ('\u{64b}', '\u{65f}'), - ('\u{670}', '\u{670}'), - ('\u{6d6}', '\u{6dc}'), - ('\u{6df}', '\u{6e4}'), - ('\u{6e7}', '\u{6e8}'), - ('\u{6ea}', '\u{6ed}'), - ('\u{711}', '\u{711}'), - ('\u{730}', '\u{74a}'), - ('\u{7a6}', '\u{7b0}'), - ('\u{7eb}', '\u{7f3}'), - ('\u{7fd}', '\u{7fd}'), - ('\u{816}', '\u{819}'), - ('\u{81b}', '\u{823}'), - ('\u{825}', '\u{827}'), - ('\u{829}', '\u{82d}'), - ('\u{859}', '\u{85b}'), - ('\u{897}', '\u{89f}'), - ('\u{8ca}', '\u{8e1}'), - ('\u{8e3}', 'ः'), - ('\u{93a}', '\u{93c}'), - ('ा', 'ॏ'), - ('\u{951}', '\u{957}'), - ('\u{962}', '\u{963}'), - ('\u{981}', 'ঃ'), - ('\u{9bc}', '\u{9bc}'), - ('\u{9be}', '\u{9c4}'), - ('ে', 'ৈ'), - ('ো', '\u{9cd}'), - ('\u{9d7}', '\u{9d7}'), - ('\u{9e2}', '\u{9e3}'), - ('\u{9fe}', '\u{9fe}'), - ('\u{a01}', 'ਃ'), - ('\u{a3c}', '\u{a3c}'), - ('ਾ', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4d}'), - ('\u{a51}', '\u{a51}'), - ('\u{a70}', '\u{a71}'), - ('\u{a75}', '\u{a75}'), - ('\u{a81}', 'ઃ'), - ('\u{abc}', '\u{abc}'), - ('ા', '\u{ac5}'), - ('\u{ac7}', 'ૉ'), - ('ો', '\u{acd}'), - ('\u{ae2}', '\u{ae3}'), - ('\u{afa}', '\u{aff}'), - ('\u{b01}', 'ଃ'), - ('\u{b3c}', '\u{b3c}'), - ('\u{b3e}', '\u{b44}'), - ('େ', 'ୈ'), - ('ୋ', '\u{b4d}'), - ('\u{b55}', '\u{b57}'), - ('\u{b62}', '\u{b63}'), - ('\u{b82}', '\u{b82}'), - ('\u{bbe}', 'ூ'), - ('ெ', 'ை'), - ('ொ', '\u{bcd}'), - ('\u{bd7}', '\u{bd7}'), - ('\u{c00}', '\u{c04}'), - ('\u{c3c}', '\u{c3c}'), - ('\u{c3e}', 'ౄ'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4d}'), - ('\u{c55}', '\u{c56}'), - ('\u{c62}', '\u{c63}'), - ('\u{c81}', 'ಃ'), - ('\u{cbc}', '\u{cbc}'), - ('ಾ', 'ೄ'), - ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccd}'), - ('\u{cd5}', '\u{cd6}'), - ('\u{ce2}', '\u{ce3}'), - ('ೳ', 'ೳ'), - ('\u{d00}', 'ഃ'), - ('\u{d3b}', '\u{d3c}'), - ('\u{d3e}', '\u{d44}'), - ('െ', 'ൈ'), - ('ൊ', '\u{d4d}'), - ('\u{d57}', '\u{d57}'), - ('\u{d62}', '\u{d63}'), - ('\u{d81}', 'ඃ'), - ('\u{dca}', '\u{dca}'), - ('\u{dcf}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('ෘ', '\u{ddf}'), - ('ෲ', 'ෳ'), - ('\u{e31}', '\u{e31}'), - ('\u{e34}', '\u{e3a}'), - ('\u{e47}', '\u{e4e}'), - ('\u{eb1}', '\u{eb1}'), - ('\u{eb4}', '\u{ebc}'), - ('\u{ec8}', '\u{ece}'), - ('\u{f18}', '\u{f19}'), - ('\u{f35}', '\u{f35}'), - ('\u{f37}', '\u{f37}'), - ('\u{f39}', '\u{f39}'), - ('༾', '༿'), - ('\u{f71}', '\u{f84}'), - ('\u{f86}', '\u{f87}'), - ('\u{f8d}', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('\u{fc6}', '\u{fc6}'), - ('ါ', '\u{103e}'), - ('ၖ', '\u{1059}'), - ('\u{105e}', '\u{1060}'), - ('ၢ', 'ၤ'), - ('ၧ', 'ၭ'), - ('\u{1071}', '\u{1074}'), - ('\u{1082}', '\u{108d}'), - ('ႏ', 'ႏ'), - ('ႚ', '\u{109d}'), - ('\u{135d}', '\u{135f}'), - ('\u{1712}', '\u{1715}'), - ('\u{1732}', '\u{1734}'), - ('\u{1752}', '\u{1753}'), - ('\u{1772}', '\u{1773}'), - ('\u{17b4}', '\u{17d3}'), - ('\u{17dd}', '\u{17dd}'), - ('\u{180b}', '\u{180d}'), - ('\u{180f}', '\u{180f}'), - ('\u{1885}', '\u{1886}'), - ('\u{18a9}', '\u{18a9}'), - ('\u{1920}', 'ᤫ'), - ('ᤰ', '\u{193b}'), - ('\u{1a17}', '\u{1a1b}'), - ('ᩕ', '\u{1a5e}'), - ('\u{1a60}', '\u{1a7c}'), - ('\u{1a7f}', '\u{1a7f}'), - ('\u{1ab0}', '\u{1ace}'), - ('\u{1b00}', 'ᬄ'), - ('\u{1b34}', '\u{1b44}'), - ('\u{1b6b}', '\u{1b73}'), - ('\u{1b80}', 'ᮂ'), - ('ᮡ', '\u{1bad}'), - ('\u{1be6}', '\u{1bf3}'), - ('ᰤ', '\u{1c37}'), - ('\u{1cd0}', '\u{1cd2}'), - ('\u{1cd4}', '\u{1ce8}'), - ('\u{1ced}', '\u{1ced}'), - ('\u{1cf4}', '\u{1cf4}'), - ('᳷', '\u{1cf9}'), - ('\u{1dc0}', '\u{1dff}'), - ('\u{20d0}', '\u{20f0}'), - ('\u{2cef}', '\u{2cf1}'), - ('\u{2d7f}', '\u{2d7f}'), - ('\u{2de0}', '\u{2dff}'), - ('\u{302a}', '\u{302f}'), - ('\u{3099}', '\u{309a}'), - ('\u{a66f}', '\u{a672}'), - ('\u{a674}', '\u{a67d}'), - ('\u{a69e}', '\u{a69f}'), - ('\u{a6f0}', '\u{a6f1}'), - ('\u{a802}', '\u{a802}'), - ('\u{a806}', '\u{a806}'), - ('\u{a80b}', '\u{a80b}'), - ('ꠣ', 'ꠧ'), - ('\u{a82c}', '\u{a82c}'), - ('ꢀ', 'ꢁ'), - ('ꢴ', '\u{a8c5}'), - ('\u{a8e0}', '\u{a8f1}'), - ('\u{a8ff}', '\u{a8ff}'), - ('\u{a926}', '\u{a92d}'), - ('\u{a947}', '\u{a953}'), - ('\u{a980}', 'ꦃ'), - ('\u{a9b3}', '\u{a9c0}'), - ('\u{a9e5}', '\u{a9e5}'), - ('\u{aa29}', '\u{aa36}'), - ('\u{aa43}', '\u{aa43}'), - ('\u{aa4c}', 'ꩍ'), - ('ꩻ', 'ꩽ'), - ('\u{aab0}', '\u{aab0}'), - ('\u{aab2}', '\u{aab4}'), - ('\u{aab7}', '\u{aab8}'), - ('\u{aabe}', '\u{aabf}'), - ('\u{aac1}', '\u{aac1}'), - ('ꫫ', 'ꫯ'), - ('ꫵ', '\u{aaf6}'), - ('ꯣ', 'ꯪ'), - ('꯬', '\u{abed}'), - ('\u{fb1e}', '\u{fb1e}'), - ('\u{fe00}', '\u{fe0f}'), - ('\u{fe20}', '\u{fe2f}'), - ('\u{101fd}', '\u{101fd}'), - ('\u{102e0}', '\u{102e0}'), - ('\u{10376}', '\u{1037a}'), - ('\u{10a01}', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '\u{10a0f}'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '\u{10a3f}'), - ('\u{10ae5}', '\u{10ae6}'), - ('\u{10d24}', '\u{10d27}'), - ('\u{10d69}', '\u{10d6d}'), - ('\u{10eab}', '\u{10eac}'), - ('\u{10efc}', '\u{10eff}'), - ('\u{10f46}', '\u{10f50}'), - ('\u{10f82}', '\u{10f85}'), - ('𑀀', '𑀂'), - ('\u{11038}', '\u{11046}'), - ('\u{11070}', '\u{11070}'), - ('\u{11073}', '\u{11074}'), - ('\u{1107f}', '𑂂'), - ('𑂰', '\u{110ba}'), - ('\u{110c2}', '\u{110c2}'), - ('\u{11100}', '\u{11102}'), - ('\u{11127}', '\u{11134}'), - ('𑅅', '𑅆'), - ('\u{11173}', '\u{11173}'), - ('\u{11180}', '𑆂'), - ('𑆳', '\u{111c0}'), - ('\u{111c9}', '\u{111cc}'), - ('𑇎', '\u{111cf}'), - ('𑈬', '\u{11237}'), - ('\u{1123e}', '\u{1123e}'), - ('\u{11241}', '\u{11241}'), - ('\u{112df}', '\u{112ea}'), - ('\u{11300}', '𑌃'), - ('\u{1133b}', '\u{1133c}'), - ('\u{1133e}', '𑍄'), - ('𑍇', '𑍈'), - ('𑍋', '\u{1134d}'), - ('\u{11357}', '\u{11357}'), - ('𑍢', '𑍣'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), - ('\u{113b8}', '\u{113c0}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '𑏊'), - ('𑏌', '\u{113d0}'), - ('\u{113d2}', '\u{113d2}'), - ('\u{113e1}', '\u{113e2}'), - ('𑐵', '\u{11446}'), - ('\u{1145e}', '\u{1145e}'), - ('\u{114b0}', '\u{114c3}'), - ('\u{115af}', '\u{115b5}'), - ('𑖸', '\u{115c0}'), - ('\u{115dc}', '\u{115dd}'), - ('𑘰', '\u{11640}'), - ('\u{116ab}', '\u{116b7}'), - ('\u{1171d}', '\u{1172b}'), - ('𑠬', '\u{1183a}'), - ('\u{11930}', '𑤵'), - ('𑤷', '𑤸'), - ('\u{1193b}', '\u{1193e}'), - ('𑥀', '𑥀'), - ('𑥂', '\u{11943}'), - ('𑧑', '\u{119d7}'), - ('\u{119da}', '\u{119e0}'), - ('𑧤', '𑧤'), - ('\u{11a01}', '\u{11a0a}'), - ('\u{11a33}', '𑨹'), - ('\u{11a3b}', '\u{11a3e}'), - ('\u{11a47}', '\u{11a47}'), - ('\u{11a51}', '\u{11a5b}'), - ('\u{11a8a}', '\u{11a99}'), - ('𑰯', '\u{11c36}'), - ('\u{11c38}', '\u{11c3f}'), - ('\u{11c92}', '\u{11ca7}'), - ('𑲩', '\u{11cb6}'), - ('\u{11d31}', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d45}'), - ('\u{11d47}', '\u{11d47}'), - ('𑶊', '𑶎'), - ('\u{11d90}', '\u{11d91}'), - ('𑶓', '\u{11d97}'), - ('\u{11ef3}', '𑻶'), - ('\u{11f00}', '\u{11f01}'), - ('𑼃', '𑼃'), - ('𑼴', '\u{11f3a}'), - ('𑼾', '\u{11f42}'), - ('\u{11f5a}', '\u{11f5a}'), - ('\u{13440}', '\u{13440}'), - ('\u{13447}', '\u{13455}'), - ('\u{1611e}', '\u{1612f}'), - ('\u{16af0}', '\u{16af4}'), - ('\u{16b30}', '\u{16b36}'), - ('\u{16f4f}', '\u{16f4f}'), - ('𖽑', '𖾇'), - ('\u{16f8f}', '\u{16f92}'), - ('\u{16fe4}', '\u{16fe4}'), - ('\u{16ff0}', '\u{16ff1}'), - ('\u{1bc9d}', '\u{1bc9e}'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('\u{1d165}', '\u{1d169}'), - ('\u{1d16d}', '\u{1d172}'), - ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), - ('\u{1d1aa}', '\u{1d1ad}'), - ('\u{1d242}', '\u{1d244}'), - ('\u{1da00}', '\u{1da36}'), - ('\u{1da3b}', '\u{1da6c}'), - ('\u{1da75}', '\u{1da75}'), - ('\u{1da84}', '\u{1da84}'), - ('\u{1da9b}', '\u{1da9f}'), - ('\u{1daa1}', '\u{1daaf}'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), - ('\u{1e08f}', '\u{1e08f}'), - ('\u{1e130}', '\u{1e136}'), - ('\u{1e2ae}', '\u{1e2ae}'), - ('\u{1e2ec}', '\u{1e2ef}'), - ('\u{1e4ec}', '\u{1e4ef}'), - ('\u{1e5ee}', '\u{1e5ef}'), - ('\u{1e8d0}', '\u{1e8d6}'), - ('\u{1e944}', '\u{1e94a}'), - ('\u{e0100}', '\u{e01ef}'), -]; - -pub const MATH_SYMBOL: &'static [(char, char)] = &[ - ('+', '+'), - ('<', '>'), - ('|', '|'), - ('~', '~'), - ('¬', '¬'), - ('±', '±'), - ('×', '×'), - ('÷', '÷'), - ('϶', '϶'), - ('؆', '؈'), - ('⁄', '⁄'), - ('⁒', '⁒'), - ('⁺', '⁼'), - ('₊', '₌'), - ('℘', '℘'), - ('⅀', '⅄'), - ('⅋', '⅋'), - ('←', '↔'), - ('↚', '↛'), - ('↠', '↠'), - ('↣', '↣'), - ('↦', '↦'), - ('↮', '↮'), - ('⇎', '⇏'), - ('⇒', '⇒'), - ('⇔', '⇔'), - ('⇴', '⋿'), - ('⌠', '⌡'), - ('⍼', '⍼'), - ('⎛', '⎳'), - ('⏜', '⏡'), - ('▷', '▷'), - ('◁', '◁'), - ('◸', '◿'), - ('♯', '♯'), - ('⟀', '⟄'), - ('⟇', '⟥'), - ('⟰', '⟿'), - ('⤀', '⦂'), - ('⦙', '⧗'), - ('⧜', '⧻'), - ('⧾', '⫿'), - ('⬰', '⭄'), - ('⭇', '⭌'), - ('﬩', '﬩'), - ('﹢', '﹢'), - ('﹤', '﹦'), - ('+', '+'), - ('<', '>'), - ('|', '|'), - ('~', '~'), - ('¬', '¬'), - ('←', '↓'), - ('𐶎', '𐶏'), - ('𝛁', '𝛁'), - ('𝛛', '𝛛'), - ('𝛻', '𝛻'), - ('𝜕', '𝜕'), - ('𝜵', '𝜵'), - ('𝝏', '𝝏'), - ('𝝯', '𝝯'), - ('𝞉', '𝞉'), - ('𝞩', '𝞩'), - ('𝟃', '𝟃'), - ('𞻰', '𞻱'), -]; - -pub const MODIFIER_LETTER: &'static [(char, char)] = &[ - ('ʰ', 'ˁ'), - ('ˆ', 'ˑ'), - ('ˠ', 'ˤ'), - ('ˬ', 'ˬ'), - ('ˮ', 'ˮ'), - ('ʹ', 'ʹ'), - ('ͺ', 'ͺ'), - ('ՙ', 'ՙ'), - ('ـ', 'ـ'), - ('ۥ', 'ۦ'), - ('ߴ', 'ߵ'), - ('ߺ', 'ߺ'), - ('ࠚ', 'ࠚ'), - ('ࠤ', 'ࠤ'), - ('ࠨ', 'ࠨ'), - ('ࣉ', 'ࣉ'), - ('ॱ', 'ॱ'), - ('ๆ', 'ๆ'), - ('ໆ', 'ໆ'), - ('ჼ', 'ჼ'), - ('ៗ', 'ៗ'), - ('ᡃ', 'ᡃ'), - ('ᪧ', 'ᪧ'), - ('ᱸ', 'ᱽ'), - ('ᴬ', 'ᵪ'), - ('ᵸ', 'ᵸ'), - ('ᶛ', 'ᶿ'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('ⱼ', 'ⱽ'), - ('ⵯ', 'ⵯ'), - ('ⸯ', 'ⸯ'), - ('々', '々'), - ('〱', '〵'), - ('〻', '〻'), - ('ゝ', 'ゞ'), - ('ー', 'ヾ'), - ('ꀕ', 'ꀕ'), - ('ꓸ', 'ꓽ'), - ('ꘌ', 'ꘌ'), - ('ꙿ', 'ꙿ'), - ('ꚜ', 'ꚝ'), - ('ꜗ', 'ꜟ'), - ('ꝰ', 'ꝰ'), - ('ꞈ', 'ꞈ'), - ('ꟲ', 'ꟴ'), - ('ꟸ', 'ꟹ'), - ('ꧏ', 'ꧏ'), - ('ꧦ', 'ꧦ'), - ('ꩰ', 'ꩰ'), - ('ꫝ', 'ꫝ'), - ('ꫳ', 'ꫴ'), - ('ꭜ', 'ꭟ'), - ('ꭩ', 'ꭩ'), - ('ー', 'ー'), - ('\u{ff9e}', '\u{ff9f}'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𐵎', '𐵎'), - ('𐵯', '𐵯'), - ('𖭀', '𖭃'), - ('𖵀', '𖵂'), - ('𖵫', '𖵬'), - ('𖾓', '𖾟'), - ('𖿠', '𖿡'), - ('𖿣', '𖿣'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('𞀰', '𞁭'), - ('𞄷', '𞄽'), - ('𞓫', '𞓫'), - ('𞥋', '𞥋'), -]; - -pub const MODIFIER_SYMBOL: &'static [(char, char)] = &[ - ('^', '^'), - ('`', '`'), - ('¨', '¨'), - ('¯', '¯'), - ('´', '´'), - ('¸', '¸'), - ('˂', '˅'), - ('˒', '˟'), - ('˥', '˫'), - ('˭', '˭'), - ('˯', '˿'), - ('͵', '͵'), - ('΄', '΅'), - ('࢈', '࢈'), - ('᾽', '᾽'), - ('᾿', '῁'), - ('῍', '῏'), - ('῝', '῟'), - ('῭', '`'), - ('´', '῾'), - ('゛', '゜'), - ('꜀', '꜖'), - ('꜠', '꜡'), - ('꞉', '꞊'), - ('꭛', '꭛'), - ('꭪', '꭫'), - ('﮲', '﯂'), - ('^', '^'), - ('`', '`'), - (' ̄', ' ̄'), - ('🏻', '🏿'), -]; - -pub const NONSPACING_MARK: &'static [(char, char)] = &[ - ('\u{300}', '\u{36f}'), - ('\u{483}', '\u{487}'), - ('\u{591}', '\u{5bd}'), - ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), - ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), - ('\u{610}', '\u{61a}'), - ('\u{64b}', '\u{65f}'), - ('\u{670}', '\u{670}'), - ('\u{6d6}', '\u{6dc}'), - ('\u{6df}', '\u{6e4}'), - ('\u{6e7}', '\u{6e8}'), - ('\u{6ea}', '\u{6ed}'), - ('\u{711}', '\u{711}'), - ('\u{730}', '\u{74a}'), - ('\u{7a6}', '\u{7b0}'), - ('\u{7eb}', '\u{7f3}'), - ('\u{7fd}', '\u{7fd}'), - ('\u{816}', '\u{819}'), - ('\u{81b}', '\u{823}'), - ('\u{825}', '\u{827}'), - ('\u{829}', '\u{82d}'), - ('\u{859}', '\u{85b}'), - ('\u{897}', '\u{89f}'), - ('\u{8ca}', '\u{8e1}'), - ('\u{8e3}', '\u{902}'), - ('\u{93a}', '\u{93a}'), - ('\u{93c}', '\u{93c}'), - ('\u{941}', '\u{948}'), - ('\u{94d}', '\u{94d}'), - ('\u{951}', '\u{957}'), - ('\u{962}', '\u{963}'), - ('\u{981}', '\u{981}'), - ('\u{9bc}', '\u{9bc}'), - ('\u{9c1}', '\u{9c4}'), - ('\u{9cd}', '\u{9cd}'), - ('\u{9e2}', '\u{9e3}'), - ('\u{9fe}', '\u{9fe}'), - ('\u{a01}', '\u{a02}'), - ('\u{a3c}', '\u{a3c}'), - ('\u{a41}', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4d}'), - ('\u{a51}', '\u{a51}'), - ('\u{a70}', '\u{a71}'), - ('\u{a75}', '\u{a75}'), - ('\u{a81}', '\u{a82}'), - ('\u{abc}', '\u{abc}'), - ('\u{ac1}', '\u{ac5}'), - ('\u{ac7}', '\u{ac8}'), - ('\u{acd}', '\u{acd}'), - ('\u{ae2}', '\u{ae3}'), - ('\u{afa}', '\u{aff}'), - ('\u{b01}', '\u{b01}'), - ('\u{b3c}', '\u{b3c}'), - ('\u{b3f}', '\u{b3f}'), - ('\u{b41}', '\u{b44}'), - ('\u{b4d}', '\u{b4d}'), - ('\u{b55}', '\u{b56}'), - ('\u{b62}', '\u{b63}'), - ('\u{b82}', '\u{b82}'), - ('\u{bc0}', '\u{bc0}'), - ('\u{bcd}', '\u{bcd}'), - ('\u{c00}', '\u{c00}'), - ('\u{c04}', '\u{c04}'), - ('\u{c3c}', '\u{c3c}'), - ('\u{c3e}', '\u{c40}'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4d}'), - ('\u{c55}', '\u{c56}'), - ('\u{c62}', '\u{c63}'), - ('\u{c81}', '\u{c81}'), - ('\u{cbc}', '\u{cbc}'), - ('\u{cbf}', '\u{cbf}'), - ('\u{cc6}', '\u{cc6}'), - ('\u{ccc}', '\u{ccd}'), - ('\u{ce2}', '\u{ce3}'), - ('\u{d00}', '\u{d01}'), - ('\u{d3b}', '\u{d3c}'), - ('\u{d41}', '\u{d44}'), - ('\u{d4d}', '\u{d4d}'), - ('\u{d62}', '\u{d63}'), - ('\u{d81}', '\u{d81}'), - ('\u{dca}', '\u{dca}'), - ('\u{dd2}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('\u{e31}', '\u{e31}'), - ('\u{e34}', '\u{e3a}'), - ('\u{e47}', '\u{e4e}'), - ('\u{eb1}', '\u{eb1}'), - ('\u{eb4}', '\u{ebc}'), - ('\u{ec8}', '\u{ece}'), - ('\u{f18}', '\u{f19}'), - ('\u{f35}', '\u{f35}'), - ('\u{f37}', '\u{f37}'), - ('\u{f39}', '\u{f39}'), - ('\u{f71}', '\u{f7e}'), - ('\u{f80}', '\u{f84}'), - ('\u{f86}', '\u{f87}'), - ('\u{f8d}', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('\u{fc6}', '\u{fc6}'), - ('\u{102d}', '\u{1030}'), - ('\u{1032}', '\u{1037}'), - ('\u{1039}', '\u{103a}'), - ('\u{103d}', '\u{103e}'), - ('\u{1058}', '\u{1059}'), - ('\u{105e}', '\u{1060}'), - ('\u{1071}', '\u{1074}'), - ('\u{1082}', '\u{1082}'), - ('\u{1085}', '\u{1086}'), - ('\u{108d}', '\u{108d}'), - ('\u{109d}', '\u{109d}'), - ('\u{135d}', '\u{135f}'), - ('\u{1712}', '\u{1714}'), - ('\u{1732}', '\u{1733}'), - ('\u{1752}', '\u{1753}'), - ('\u{1772}', '\u{1773}'), - ('\u{17b4}', '\u{17b5}'), - ('\u{17b7}', '\u{17bd}'), - ('\u{17c6}', '\u{17c6}'), - ('\u{17c9}', '\u{17d3}'), - ('\u{17dd}', '\u{17dd}'), - ('\u{180b}', '\u{180d}'), - ('\u{180f}', '\u{180f}'), - ('\u{1885}', '\u{1886}'), - ('\u{18a9}', '\u{18a9}'), - ('\u{1920}', '\u{1922}'), - ('\u{1927}', '\u{1928}'), - ('\u{1932}', '\u{1932}'), - ('\u{1939}', '\u{193b}'), - ('\u{1a17}', '\u{1a18}'), - ('\u{1a1b}', '\u{1a1b}'), - ('\u{1a56}', '\u{1a56}'), - ('\u{1a58}', '\u{1a5e}'), - ('\u{1a60}', '\u{1a60}'), - ('\u{1a62}', '\u{1a62}'), - ('\u{1a65}', '\u{1a6c}'), - ('\u{1a73}', '\u{1a7c}'), - ('\u{1a7f}', '\u{1a7f}'), - ('\u{1ab0}', '\u{1abd}'), - ('\u{1abf}', '\u{1ace}'), - ('\u{1b00}', '\u{1b03}'), - ('\u{1b34}', '\u{1b34}'), - ('\u{1b36}', '\u{1b3a}'), - ('\u{1b3c}', '\u{1b3c}'), - ('\u{1b42}', '\u{1b42}'), - ('\u{1b6b}', '\u{1b73}'), - ('\u{1b80}', '\u{1b81}'), - ('\u{1ba2}', '\u{1ba5}'), - ('\u{1ba8}', '\u{1ba9}'), - ('\u{1bab}', '\u{1bad}'), - ('\u{1be6}', '\u{1be6}'), - ('\u{1be8}', '\u{1be9}'), - ('\u{1bed}', '\u{1bed}'), - ('\u{1bef}', '\u{1bf1}'), - ('\u{1c2c}', '\u{1c33}'), - ('\u{1c36}', '\u{1c37}'), - ('\u{1cd0}', '\u{1cd2}'), - ('\u{1cd4}', '\u{1ce0}'), - ('\u{1ce2}', '\u{1ce8}'), - ('\u{1ced}', '\u{1ced}'), - ('\u{1cf4}', '\u{1cf4}'), - ('\u{1cf8}', '\u{1cf9}'), - ('\u{1dc0}', '\u{1dff}'), - ('\u{20d0}', '\u{20dc}'), - ('\u{20e1}', '\u{20e1}'), - ('\u{20e5}', '\u{20f0}'), - ('\u{2cef}', '\u{2cf1}'), - ('\u{2d7f}', '\u{2d7f}'), - ('\u{2de0}', '\u{2dff}'), - ('\u{302a}', '\u{302d}'), - ('\u{3099}', '\u{309a}'), - ('\u{a66f}', '\u{a66f}'), - ('\u{a674}', '\u{a67d}'), - ('\u{a69e}', '\u{a69f}'), - ('\u{a6f0}', '\u{a6f1}'), - ('\u{a802}', '\u{a802}'), - ('\u{a806}', '\u{a806}'), - ('\u{a80b}', '\u{a80b}'), - ('\u{a825}', '\u{a826}'), - ('\u{a82c}', '\u{a82c}'), - ('\u{a8c4}', '\u{a8c5}'), - ('\u{a8e0}', '\u{a8f1}'), - ('\u{a8ff}', '\u{a8ff}'), - ('\u{a926}', '\u{a92d}'), - ('\u{a947}', '\u{a951}'), - ('\u{a980}', '\u{a982}'), - ('\u{a9b3}', '\u{a9b3}'), - ('\u{a9b6}', '\u{a9b9}'), - ('\u{a9bc}', '\u{a9bd}'), - ('\u{a9e5}', '\u{a9e5}'), - ('\u{aa29}', '\u{aa2e}'), - ('\u{aa31}', '\u{aa32}'), - ('\u{aa35}', '\u{aa36}'), - ('\u{aa43}', '\u{aa43}'), - ('\u{aa4c}', '\u{aa4c}'), - ('\u{aa7c}', '\u{aa7c}'), - ('\u{aab0}', '\u{aab0}'), - ('\u{aab2}', '\u{aab4}'), - ('\u{aab7}', '\u{aab8}'), - ('\u{aabe}', '\u{aabf}'), - ('\u{aac1}', '\u{aac1}'), - ('\u{aaec}', '\u{aaed}'), - ('\u{aaf6}', '\u{aaf6}'), - ('\u{abe5}', '\u{abe5}'), - ('\u{abe8}', '\u{abe8}'), - ('\u{abed}', '\u{abed}'), - ('\u{fb1e}', '\u{fb1e}'), - ('\u{fe00}', '\u{fe0f}'), - ('\u{fe20}', '\u{fe2f}'), - ('\u{101fd}', '\u{101fd}'), - ('\u{102e0}', '\u{102e0}'), - ('\u{10376}', '\u{1037a}'), - ('\u{10a01}', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '\u{10a0f}'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '\u{10a3f}'), - ('\u{10ae5}', '\u{10ae6}'), - ('\u{10d24}', '\u{10d27}'), - ('\u{10d69}', '\u{10d6d}'), - ('\u{10eab}', '\u{10eac}'), - ('\u{10efc}', '\u{10eff}'), - ('\u{10f46}', '\u{10f50}'), - ('\u{10f82}', '\u{10f85}'), - ('\u{11001}', '\u{11001}'), - ('\u{11038}', '\u{11046}'), - ('\u{11070}', '\u{11070}'), - ('\u{11073}', '\u{11074}'), - ('\u{1107f}', '\u{11081}'), - ('\u{110b3}', '\u{110b6}'), - ('\u{110b9}', '\u{110ba}'), - ('\u{110c2}', '\u{110c2}'), - ('\u{11100}', '\u{11102}'), - ('\u{11127}', '\u{1112b}'), - ('\u{1112d}', '\u{11134}'), - ('\u{11173}', '\u{11173}'), - ('\u{11180}', '\u{11181}'), - ('\u{111b6}', '\u{111be}'), - ('\u{111c9}', '\u{111cc}'), - ('\u{111cf}', '\u{111cf}'), - ('\u{1122f}', '\u{11231}'), - ('\u{11234}', '\u{11234}'), - ('\u{11236}', '\u{11237}'), - ('\u{1123e}', '\u{1123e}'), - ('\u{11241}', '\u{11241}'), - ('\u{112df}', '\u{112df}'), - ('\u{112e3}', '\u{112ea}'), - ('\u{11300}', '\u{11301}'), - ('\u{1133b}', '\u{1133c}'), - ('\u{11340}', '\u{11340}'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), - ('\u{113bb}', '\u{113c0}'), - ('\u{113ce}', '\u{113ce}'), - ('\u{113d0}', '\u{113d0}'), - ('\u{113d2}', '\u{113d2}'), - ('\u{113e1}', '\u{113e2}'), - ('\u{11438}', '\u{1143f}'), - ('\u{11442}', '\u{11444}'), - ('\u{11446}', '\u{11446}'), - ('\u{1145e}', '\u{1145e}'), - ('\u{114b3}', '\u{114b8}'), - ('\u{114ba}', '\u{114ba}'), - ('\u{114bf}', '\u{114c0}'), - ('\u{114c2}', '\u{114c3}'), - ('\u{115b2}', '\u{115b5}'), - ('\u{115bc}', '\u{115bd}'), - ('\u{115bf}', '\u{115c0}'), - ('\u{115dc}', '\u{115dd}'), - ('\u{11633}', '\u{1163a}'), - ('\u{1163d}', '\u{1163d}'), - ('\u{1163f}', '\u{11640}'), - ('\u{116ab}', '\u{116ab}'), - ('\u{116ad}', '\u{116ad}'), - ('\u{116b0}', '\u{116b5}'), - ('\u{116b7}', '\u{116b7}'), - ('\u{1171d}', '\u{1171d}'), - ('\u{1171f}', '\u{1171f}'), - ('\u{11722}', '\u{11725}'), - ('\u{11727}', '\u{1172b}'), - ('\u{1182f}', '\u{11837}'), - ('\u{11839}', '\u{1183a}'), - ('\u{1193b}', '\u{1193c}'), - ('\u{1193e}', '\u{1193e}'), - ('\u{11943}', '\u{11943}'), - ('\u{119d4}', '\u{119d7}'), - ('\u{119da}', '\u{119db}'), - ('\u{119e0}', '\u{119e0}'), - ('\u{11a01}', '\u{11a0a}'), - ('\u{11a33}', '\u{11a38}'), - ('\u{11a3b}', '\u{11a3e}'), - ('\u{11a47}', '\u{11a47}'), - ('\u{11a51}', '\u{11a56}'), - ('\u{11a59}', '\u{11a5b}'), - ('\u{11a8a}', '\u{11a96}'), - ('\u{11a98}', '\u{11a99}'), - ('\u{11c30}', '\u{11c36}'), - ('\u{11c38}', '\u{11c3d}'), - ('\u{11c3f}', '\u{11c3f}'), - ('\u{11c92}', '\u{11ca7}'), - ('\u{11caa}', '\u{11cb0}'), - ('\u{11cb2}', '\u{11cb3}'), - ('\u{11cb5}', '\u{11cb6}'), - ('\u{11d31}', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d45}'), - ('\u{11d47}', '\u{11d47}'), - ('\u{11d90}', '\u{11d91}'), - ('\u{11d95}', '\u{11d95}'), - ('\u{11d97}', '\u{11d97}'), - ('\u{11ef3}', '\u{11ef4}'), - ('\u{11f00}', '\u{11f01}'), - ('\u{11f36}', '\u{11f3a}'), - ('\u{11f40}', '\u{11f40}'), - ('\u{11f42}', '\u{11f42}'), - ('\u{11f5a}', '\u{11f5a}'), - ('\u{13440}', '\u{13440}'), - ('\u{13447}', '\u{13455}'), - ('\u{1611e}', '\u{16129}'), - ('\u{1612d}', '\u{1612f}'), - ('\u{16af0}', '\u{16af4}'), - ('\u{16b30}', '\u{16b36}'), - ('\u{16f4f}', '\u{16f4f}'), - ('\u{16f8f}', '\u{16f92}'), - ('\u{16fe4}', '\u{16fe4}'), - ('\u{1bc9d}', '\u{1bc9e}'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('\u{1d167}', '\u{1d169}'), - ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), - ('\u{1d1aa}', '\u{1d1ad}'), - ('\u{1d242}', '\u{1d244}'), - ('\u{1da00}', '\u{1da36}'), - ('\u{1da3b}', '\u{1da6c}'), - ('\u{1da75}', '\u{1da75}'), - ('\u{1da84}', '\u{1da84}'), - ('\u{1da9b}', '\u{1da9f}'), - ('\u{1daa1}', '\u{1daaf}'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), - ('\u{1e08f}', '\u{1e08f}'), - ('\u{1e130}', '\u{1e136}'), - ('\u{1e2ae}', '\u{1e2ae}'), - ('\u{1e2ec}', '\u{1e2ef}'), - ('\u{1e4ec}', '\u{1e4ef}'), - ('\u{1e5ee}', '\u{1e5ef}'), - ('\u{1e8d0}', '\u{1e8d6}'), - ('\u{1e944}', '\u{1e94a}'), - ('\u{e0100}', '\u{e01ef}'), -]; - -pub const NUMBER: &'static [(char, char)] = &[ - ('0', '9'), - ('²', '³'), - ('¹', '¹'), - ('¼', '¾'), - ('٠', '٩'), - ('۰', '۹'), - ('߀', '߉'), - ('०', '९'), - ('০', '৯'), - ('৴', '৹'), - ('੦', '੯'), - ('૦', '૯'), - ('୦', '୯'), - ('୲', '୷'), - ('௦', '௲'), - ('౦', '౯'), - ('౸', '౾'), - ('೦', '೯'), - ('൘', '൞'), - ('൦', '൸'), - ('෦', '෯'), - ('๐', '๙'), - ('໐', '໙'), - ('༠', '༳'), - ('၀', '၉'), - ('႐', '႙'), - ('፩', '፼'), - ('ᛮ', 'ᛰ'), - ('០', '៩'), - ('៰', '៹'), - ('᠐', '᠙'), - ('᥆', '᥏'), - ('᧐', '᧚'), - ('᪀', '᪉'), - ('᪐', '᪙'), - ('᭐', '᭙'), - ('᮰', '᮹'), - ('᱀', '᱉'), - ('᱐', '᱙'), - ('⁰', '⁰'), - ('⁴', '⁹'), - ('₀', '₉'), - ('⅐', 'ↂ'), - ('ↅ', '↉'), - ('①', '⒛'), - ('⓪', '⓿'), - ('❶', '➓'), - ('⳽', '⳽'), - ('〇', '〇'), - ('〡', '〩'), - ('〸', '〺'), - ('㆒', '㆕'), - ('㈠', '㈩'), - ('㉈', '㉏'), - ('㉑', '㉟'), - ('㊀', '㊉'), - ('㊱', '㊿'), - ('꘠', '꘩'), - ('ꛦ', 'ꛯ'), - ('꠰', '꠵'), - ('꣐', '꣙'), - ('꤀', '꤉'), - ('꧐', '꧙'), - ('꧰', '꧹'), - ('꩐', '꩙'), - ('꯰', '꯹'), - ('0', '9'), - ('𐄇', '𐄳'), - ('𐅀', '𐅸'), - ('𐆊', '𐆋'), - ('𐋡', '𐋻'), - ('𐌠', '𐌣'), - ('𐍁', '𐍁'), - ('𐍊', '𐍊'), - ('𐏑', '𐏕'), - ('𐒠', '𐒩'), - ('𐡘', '𐡟'), - ('𐡹', '𐡿'), - ('𐢧', '𐢯'), - ('𐣻', '𐣿'), - ('𐤖', '𐤛'), - ('𐦼', '𐦽'), - ('𐧀', '𐧏'), - ('𐧒', '𐧿'), - ('𐩀', '𐩈'), - ('𐩽', '𐩾'), - ('𐪝', '𐪟'), - ('𐫫', '𐫯'), - ('𐭘', '𐭟'), - ('𐭸', '𐭿'), - ('𐮩', '𐮯'), - ('𐳺', '𐳿'), - ('𐴰', '𐴹'), - ('𐵀', '𐵉'), - ('𐹠', '𐹾'), - ('𐼝', '𐼦'), - ('𐽑', '𐽔'), - ('𐿅', '𐿋'), - ('𑁒', '𑁯'), - ('𑃰', '𑃹'), - ('𑄶', '𑄿'), - ('𑇐', '𑇙'), - ('𑇡', '𑇴'), - ('𑋰', '𑋹'), - ('𑑐', '𑑙'), - ('𑓐', '𑓙'), - ('𑙐', '𑙙'), - ('𑛀', '𑛉'), - ('𑛐', '𑛣'), - ('𑜰', '𑜻'), - ('𑣠', '𑣲'), - ('𑥐', '𑥙'), - ('𑯰', '𑯹'), - ('𑱐', '𑱬'), - ('𑵐', '𑵙'), - ('𑶠', '𑶩'), - ('𑽐', '𑽙'), - ('𑿀', '𑿔'), - ('𒐀', '𒑮'), - ('𖄰', '𖄹'), - ('𖩠', '𖩩'), - ('𖫀', '𖫉'), - ('𖭐', '𖭙'), - ('𖭛', '𖭡'), - ('𖵰', '𖵹'), - ('𖺀', '𖺖'), - ('𜳰', '𜳹'), - ('𝋀', '𝋓'), - ('𝋠', '𝋳'), - ('𝍠', '𝍸'), - ('𝟎', '𝟿'), - ('𞅀', '𞅉'), - ('𞋰', '𞋹'), - ('𞓰', '𞓹'), - ('𞗱', '𞗺'), - ('𞣇', '𞣏'), - ('𞥐', '𞥙'), - ('𞱱', '𞲫'), - ('𞲭', '𞲯'), - ('𞲱', '𞲴'), - ('𞴁', '𞴭'), - ('𞴯', '𞴽'), - ('🄀', '🄌'), - ('🯰', '🯹'), -]; - -pub const OPEN_PUNCTUATION: &'static [(char, char)] = &[ - ('(', '('), - ('[', '['), - ('{', '{'), - ('༺', '༺'), - ('༼', '༼'), - ('᚛', '᚛'), - ('‚', '‚'), - ('„', '„'), - ('⁅', '⁅'), - ('⁽', '⁽'), - ('₍', '₍'), - ('⌈', '⌈'), - ('⌊', '⌊'), - ('〈', '〈'), - ('❨', '❨'), - ('❪', '❪'), - ('❬', '❬'), - ('❮', '❮'), - ('❰', '❰'), - ('❲', '❲'), - ('❴', '❴'), - ('⟅', '⟅'), - ('⟦', '⟦'), - ('⟨', '⟨'), - ('⟪', '⟪'), - ('⟬', '⟬'), - ('⟮', '⟮'), - ('⦃', '⦃'), - ('⦅', '⦅'), - ('⦇', '⦇'), - ('⦉', '⦉'), - ('⦋', '⦋'), - ('⦍', '⦍'), - ('⦏', '⦏'), - ('⦑', '⦑'), - ('⦓', '⦓'), - ('⦕', '⦕'), - ('⦗', '⦗'), - ('⧘', '⧘'), - ('⧚', '⧚'), - ('⧼', '⧼'), - ('⸢', '⸢'), - ('⸤', '⸤'), - ('⸦', '⸦'), - ('⸨', '⸨'), - ('⹂', '⹂'), - ('⹕', '⹕'), - ('⹗', '⹗'), - ('⹙', '⹙'), - ('⹛', '⹛'), - ('〈', '〈'), - ('《', '《'), - ('「', '「'), - ('『', '『'), - ('【', '【'), - ('〔', '〔'), - ('〖', '〖'), - ('〘', '〘'), - ('〚', '〚'), - ('〝', '〝'), - ('﴿', '﴿'), - ('︗', '︗'), - ('︵', '︵'), - ('︷', '︷'), - ('︹', '︹'), - ('︻', '︻'), - ('︽', '︽'), - ('︿', '︿'), - ('﹁', '﹁'), - ('﹃', '﹃'), - ('﹇', '﹇'), - ('﹙', '﹙'), - ('﹛', '﹛'), - ('﹝', '﹝'), - ('(', '('), - ('[', '['), - ('{', '{'), - ('⦅', '⦅'), - ('「', '「'), -]; - -pub const OTHER: &'static [(char, char)] = &[ - ('\0', '\u{1f}'), - ('\u{7f}', '\u{9f}'), - ('\u{ad}', '\u{ad}'), - ('\u{378}', '\u{379}'), - ('\u{380}', '\u{383}'), - ('\u{38b}', '\u{38b}'), - ('\u{38d}', '\u{38d}'), - ('\u{3a2}', '\u{3a2}'), - ('\u{530}', '\u{530}'), - ('\u{557}', '\u{558}'), - ('\u{58b}', '\u{58c}'), - ('\u{590}', '\u{590}'), - ('\u{5c8}', '\u{5cf}'), - ('\u{5eb}', '\u{5ee}'), - ('\u{5f5}', '\u{605}'), - ('\u{61c}', '\u{61c}'), - ('\u{6dd}', '\u{6dd}'), - ('\u{70e}', '\u{70f}'), - ('\u{74b}', '\u{74c}'), - ('\u{7b2}', '\u{7bf}'), - ('\u{7fb}', '\u{7fc}'), - ('\u{82e}', '\u{82f}'), - ('\u{83f}', '\u{83f}'), - ('\u{85c}', '\u{85d}'), - ('\u{85f}', '\u{85f}'), - ('\u{86b}', '\u{86f}'), - ('\u{88f}', '\u{896}'), - ('\u{8e2}', '\u{8e2}'), - ('\u{984}', '\u{984}'), - ('\u{98d}', '\u{98e}'), - ('\u{991}', '\u{992}'), - ('\u{9a9}', '\u{9a9}'), - ('\u{9b1}', '\u{9b1}'), - ('\u{9b3}', '\u{9b5}'), - ('\u{9ba}', '\u{9bb}'), - ('\u{9c5}', '\u{9c6}'), - ('\u{9c9}', '\u{9ca}'), - ('\u{9cf}', '\u{9d6}'), - ('\u{9d8}', '\u{9db}'), - ('\u{9de}', '\u{9de}'), - ('\u{9e4}', '\u{9e5}'), - ('\u{9ff}', '\u{a00}'), - ('\u{a04}', '\u{a04}'), - ('\u{a0b}', '\u{a0e}'), - ('\u{a11}', '\u{a12}'), - ('\u{a29}', '\u{a29}'), - ('\u{a31}', '\u{a31}'), - ('\u{a34}', '\u{a34}'), - ('\u{a37}', '\u{a37}'), - ('\u{a3a}', '\u{a3b}'), - ('\u{a3d}', '\u{a3d}'), - ('\u{a43}', '\u{a46}'), - ('\u{a49}', '\u{a4a}'), - ('\u{a4e}', '\u{a50}'), - ('\u{a52}', '\u{a58}'), - ('\u{a5d}', '\u{a5d}'), - ('\u{a5f}', '\u{a65}'), - ('\u{a77}', '\u{a80}'), - ('\u{a84}', '\u{a84}'), - ('\u{a8e}', '\u{a8e}'), - ('\u{a92}', '\u{a92}'), - ('\u{aa9}', '\u{aa9}'), - ('\u{ab1}', '\u{ab1}'), - ('\u{ab4}', '\u{ab4}'), - ('\u{aba}', '\u{abb}'), - ('\u{ac6}', '\u{ac6}'), - ('\u{aca}', '\u{aca}'), - ('\u{ace}', '\u{acf}'), - ('\u{ad1}', '\u{adf}'), - ('\u{ae4}', '\u{ae5}'), - ('\u{af2}', '\u{af8}'), - ('\u{b00}', '\u{b00}'), - ('\u{b04}', '\u{b04}'), - ('\u{b0d}', '\u{b0e}'), - ('\u{b11}', '\u{b12}'), - ('\u{b29}', '\u{b29}'), - ('\u{b31}', '\u{b31}'), - ('\u{b34}', '\u{b34}'), - ('\u{b3a}', '\u{b3b}'), - ('\u{b45}', '\u{b46}'), - ('\u{b49}', '\u{b4a}'), - ('\u{b4e}', '\u{b54}'), - ('\u{b58}', '\u{b5b}'), - ('\u{b5e}', '\u{b5e}'), - ('\u{b64}', '\u{b65}'), - ('\u{b78}', '\u{b81}'), - ('\u{b84}', '\u{b84}'), - ('\u{b8b}', '\u{b8d}'), - ('\u{b91}', '\u{b91}'), - ('\u{b96}', '\u{b98}'), - ('\u{b9b}', '\u{b9b}'), - ('\u{b9d}', '\u{b9d}'), - ('\u{ba0}', '\u{ba2}'), - ('\u{ba5}', '\u{ba7}'), - ('\u{bab}', '\u{bad}'), - ('\u{bba}', '\u{bbd}'), - ('\u{bc3}', '\u{bc5}'), - ('\u{bc9}', '\u{bc9}'), - ('\u{bce}', '\u{bcf}'), - ('\u{bd1}', '\u{bd6}'), - ('\u{bd8}', '\u{be5}'), - ('\u{bfb}', '\u{bff}'), - ('\u{c0d}', '\u{c0d}'), - ('\u{c11}', '\u{c11}'), - ('\u{c29}', '\u{c29}'), - ('\u{c3a}', '\u{c3b}'), - ('\u{c45}', '\u{c45}'), - ('\u{c49}', '\u{c49}'), - ('\u{c4e}', '\u{c54}'), - ('\u{c57}', '\u{c57}'), - ('\u{c5b}', '\u{c5c}'), - ('\u{c5e}', '\u{c5f}'), - ('\u{c64}', '\u{c65}'), - ('\u{c70}', '\u{c76}'), - ('\u{c8d}', '\u{c8d}'), - ('\u{c91}', '\u{c91}'), - ('\u{ca9}', '\u{ca9}'), - ('\u{cb4}', '\u{cb4}'), - ('\u{cba}', '\u{cbb}'), - ('\u{cc5}', '\u{cc5}'), - ('\u{cc9}', '\u{cc9}'), - ('\u{cce}', '\u{cd4}'), - ('\u{cd7}', '\u{cdc}'), - ('\u{cdf}', '\u{cdf}'), - ('\u{ce4}', '\u{ce5}'), - ('\u{cf0}', '\u{cf0}'), - ('\u{cf4}', '\u{cff}'), - ('\u{d0d}', '\u{d0d}'), - ('\u{d11}', '\u{d11}'), - ('\u{d45}', '\u{d45}'), - ('\u{d49}', '\u{d49}'), - ('\u{d50}', '\u{d53}'), - ('\u{d64}', '\u{d65}'), - ('\u{d80}', '\u{d80}'), - ('\u{d84}', '\u{d84}'), - ('\u{d97}', '\u{d99}'), - ('\u{db2}', '\u{db2}'), - ('\u{dbc}', '\u{dbc}'), - ('\u{dbe}', '\u{dbf}'), - ('\u{dc7}', '\u{dc9}'), - ('\u{dcb}', '\u{dce}'), - ('\u{dd5}', '\u{dd5}'), - ('\u{dd7}', '\u{dd7}'), - ('\u{de0}', '\u{de5}'), - ('\u{df0}', '\u{df1}'), - ('\u{df5}', '\u{e00}'), - ('\u{e3b}', '\u{e3e}'), - ('\u{e5c}', '\u{e80}'), - ('\u{e83}', '\u{e83}'), - ('\u{e85}', '\u{e85}'), - ('\u{e8b}', '\u{e8b}'), - ('\u{ea4}', '\u{ea4}'), - ('\u{ea6}', '\u{ea6}'), - ('\u{ebe}', '\u{ebf}'), - ('\u{ec5}', '\u{ec5}'), - ('\u{ec7}', '\u{ec7}'), - ('\u{ecf}', '\u{ecf}'), - ('\u{eda}', '\u{edb}'), - ('\u{ee0}', '\u{eff}'), - ('\u{f48}', '\u{f48}'), - ('\u{f6d}', '\u{f70}'), - ('\u{f98}', '\u{f98}'), - ('\u{fbd}', '\u{fbd}'), - ('\u{fcd}', '\u{fcd}'), - ('\u{fdb}', '\u{fff}'), - ('\u{10c6}', '\u{10c6}'), - ('\u{10c8}', '\u{10cc}'), - ('\u{10ce}', '\u{10cf}'), - ('\u{1249}', '\u{1249}'), - ('\u{124e}', '\u{124f}'), - ('\u{1257}', '\u{1257}'), - ('\u{1259}', '\u{1259}'), - ('\u{125e}', '\u{125f}'), - ('\u{1289}', '\u{1289}'), - ('\u{128e}', '\u{128f}'), - ('\u{12b1}', '\u{12b1}'), - ('\u{12b6}', '\u{12b7}'), - ('\u{12bf}', '\u{12bf}'), - ('\u{12c1}', '\u{12c1}'), - ('\u{12c6}', '\u{12c7}'), - ('\u{12d7}', '\u{12d7}'), - ('\u{1311}', '\u{1311}'), - ('\u{1316}', '\u{1317}'), - ('\u{135b}', '\u{135c}'), - ('\u{137d}', '\u{137f}'), - ('\u{139a}', '\u{139f}'), - ('\u{13f6}', '\u{13f7}'), - ('\u{13fe}', '\u{13ff}'), - ('\u{169d}', '\u{169f}'), - ('\u{16f9}', '\u{16ff}'), - ('\u{1716}', '\u{171e}'), - ('\u{1737}', '\u{173f}'), - ('\u{1754}', '\u{175f}'), - ('\u{176d}', '\u{176d}'), - ('\u{1771}', '\u{1771}'), - ('\u{1774}', '\u{177f}'), - ('\u{17de}', '\u{17df}'), - ('\u{17ea}', '\u{17ef}'), - ('\u{17fa}', '\u{17ff}'), - ('\u{180e}', '\u{180e}'), - ('\u{181a}', '\u{181f}'), - ('\u{1879}', '\u{187f}'), - ('\u{18ab}', '\u{18af}'), - ('\u{18f6}', '\u{18ff}'), - ('\u{191f}', '\u{191f}'), - ('\u{192c}', '\u{192f}'), - ('\u{193c}', '\u{193f}'), - ('\u{1941}', '\u{1943}'), - ('\u{196e}', '\u{196f}'), - ('\u{1975}', '\u{197f}'), - ('\u{19ac}', '\u{19af}'), - ('\u{19ca}', '\u{19cf}'), - ('\u{19db}', '\u{19dd}'), - ('\u{1a1c}', '\u{1a1d}'), - ('\u{1a5f}', '\u{1a5f}'), - ('\u{1a7d}', '\u{1a7e}'), - ('\u{1a8a}', '\u{1a8f}'), - ('\u{1a9a}', '\u{1a9f}'), - ('\u{1aae}', '\u{1aaf}'), - ('\u{1acf}', '\u{1aff}'), - ('\u{1b4d}', '\u{1b4d}'), - ('\u{1bf4}', '\u{1bfb}'), - ('\u{1c38}', '\u{1c3a}'), - ('\u{1c4a}', '\u{1c4c}'), - ('\u{1c8b}', '\u{1c8f}'), - ('\u{1cbb}', '\u{1cbc}'), - ('\u{1cc8}', '\u{1ccf}'), - ('\u{1cfb}', '\u{1cff}'), - ('\u{1f16}', '\u{1f17}'), - ('\u{1f1e}', '\u{1f1f}'), - ('\u{1f46}', '\u{1f47}'), - ('\u{1f4e}', '\u{1f4f}'), - ('\u{1f58}', '\u{1f58}'), - ('\u{1f5a}', '\u{1f5a}'), - ('\u{1f5c}', '\u{1f5c}'), - ('\u{1f5e}', '\u{1f5e}'), - ('\u{1f7e}', '\u{1f7f}'), - ('\u{1fb5}', '\u{1fb5}'), - ('\u{1fc5}', '\u{1fc5}'), - ('\u{1fd4}', '\u{1fd5}'), - ('\u{1fdc}', '\u{1fdc}'), - ('\u{1ff0}', '\u{1ff1}'), - ('\u{1ff5}', '\u{1ff5}'), - ('\u{1fff}', '\u{1fff}'), - ('\u{200b}', '\u{200f}'), - ('\u{202a}', '\u{202e}'), - ('\u{2060}', '\u{206f}'), - ('\u{2072}', '\u{2073}'), - ('\u{208f}', '\u{208f}'), - ('\u{209d}', '\u{209f}'), - ('\u{20c1}', '\u{20cf}'), - ('\u{20f1}', '\u{20ff}'), - ('\u{218c}', '\u{218f}'), - ('\u{242a}', '\u{243f}'), - ('\u{244b}', '\u{245f}'), - ('\u{2b74}', '\u{2b75}'), - ('\u{2b96}', '\u{2b96}'), - ('\u{2cf4}', '\u{2cf8}'), - ('\u{2d26}', '\u{2d26}'), - ('\u{2d28}', '\u{2d2c}'), - ('\u{2d2e}', '\u{2d2f}'), - ('\u{2d68}', '\u{2d6e}'), - ('\u{2d71}', '\u{2d7e}'), - ('\u{2d97}', '\u{2d9f}'), - ('\u{2da7}', '\u{2da7}'), - ('\u{2daf}', '\u{2daf}'), - ('\u{2db7}', '\u{2db7}'), - ('\u{2dbf}', '\u{2dbf}'), - ('\u{2dc7}', '\u{2dc7}'), - ('\u{2dcf}', '\u{2dcf}'), - ('\u{2dd7}', '\u{2dd7}'), - ('\u{2ddf}', '\u{2ddf}'), - ('\u{2e5e}', '\u{2e7f}'), - ('\u{2e9a}', '\u{2e9a}'), - ('\u{2ef4}', '\u{2eff}'), - ('\u{2fd6}', '\u{2fef}'), - ('\u{3040}', '\u{3040}'), - ('\u{3097}', '\u{3098}'), - ('\u{3100}', '\u{3104}'), - ('\u{3130}', '\u{3130}'), - ('\u{318f}', '\u{318f}'), - ('\u{31e6}', '\u{31ee}'), - ('\u{321f}', '\u{321f}'), - ('\u{a48d}', '\u{a48f}'), - ('\u{a4c7}', '\u{a4cf}'), - ('\u{a62c}', '\u{a63f}'), - ('\u{a6f8}', '\u{a6ff}'), - ('\u{a7ce}', '\u{a7cf}'), - ('\u{a7d2}', '\u{a7d2}'), - ('\u{a7d4}', '\u{a7d4}'), - ('\u{a7dd}', '\u{a7f1}'), - ('\u{a82d}', '\u{a82f}'), - ('\u{a83a}', '\u{a83f}'), - ('\u{a878}', '\u{a87f}'), - ('\u{a8c6}', '\u{a8cd}'), - ('\u{a8da}', '\u{a8df}'), - ('\u{a954}', '\u{a95e}'), - ('\u{a97d}', '\u{a97f}'), - ('\u{a9ce}', '\u{a9ce}'), - ('\u{a9da}', '\u{a9dd}'), - ('\u{a9ff}', '\u{a9ff}'), - ('\u{aa37}', '\u{aa3f}'), - ('\u{aa4e}', '\u{aa4f}'), - ('\u{aa5a}', '\u{aa5b}'), - ('\u{aac3}', '\u{aada}'), - ('\u{aaf7}', '\u{ab00}'), - ('\u{ab07}', '\u{ab08}'), - ('\u{ab0f}', '\u{ab10}'), - ('\u{ab17}', '\u{ab1f}'), - ('\u{ab27}', '\u{ab27}'), - ('\u{ab2f}', '\u{ab2f}'), - ('\u{ab6c}', '\u{ab6f}'), - ('\u{abee}', '\u{abef}'), - ('\u{abfa}', '\u{abff}'), - ('\u{d7a4}', '\u{d7af}'), - ('\u{d7c7}', '\u{d7ca}'), - ('\u{d7fc}', '\u{f8ff}'), - ('\u{fa6e}', '\u{fa6f}'), - ('\u{fada}', '\u{faff}'), - ('\u{fb07}', '\u{fb12}'), - ('\u{fb18}', '\u{fb1c}'), - ('\u{fb37}', '\u{fb37}'), - ('\u{fb3d}', '\u{fb3d}'), - ('\u{fb3f}', '\u{fb3f}'), - ('\u{fb42}', '\u{fb42}'), - ('\u{fb45}', '\u{fb45}'), - ('\u{fbc3}', '\u{fbd2}'), - ('\u{fd90}', '\u{fd91}'), - ('\u{fdc8}', '\u{fdce}'), - ('\u{fdd0}', '\u{fdef}'), - ('\u{fe1a}', '\u{fe1f}'), - ('\u{fe53}', '\u{fe53}'), - ('\u{fe67}', '\u{fe67}'), - ('\u{fe6c}', '\u{fe6f}'), - ('\u{fe75}', '\u{fe75}'), - ('\u{fefd}', '\u{ff00}'), - ('\u{ffbf}', '\u{ffc1}'), - ('\u{ffc8}', '\u{ffc9}'), - ('\u{ffd0}', '\u{ffd1}'), - ('\u{ffd8}', '\u{ffd9}'), - ('\u{ffdd}', '\u{ffdf}'), - ('\u{ffe7}', '\u{ffe7}'), - ('\u{ffef}', '\u{fffb}'), - ('\u{fffe}', '\u{ffff}'), - ('\u{1000c}', '\u{1000c}'), - ('\u{10027}', '\u{10027}'), - ('\u{1003b}', '\u{1003b}'), - ('\u{1003e}', '\u{1003e}'), - ('\u{1004e}', '\u{1004f}'), - ('\u{1005e}', '\u{1007f}'), - ('\u{100fb}', '\u{100ff}'), - ('\u{10103}', '\u{10106}'), - ('\u{10134}', '\u{10136}'), - ('\u{1018f}', '\u{1018f}'), - ('\u{1019d}', '\u{1019f}'), - ('\u{101a1}', '\u{101cf}'), - ('\u{101fe}', '\u{1027f}'), - ('\u{1029d}', '\u{1029f}'), - ('\u{102d1}', '\u{102df}'), - ('\u{102fc}', '\u{102ff}'), - ('\u{10324}', '\u{1032c}'), - ('\u{1034b}', '\u{1034f}'), - ('\u{1037b}', '\u{1037f}'), - ('\u{1039e}', '\u{1039e}'), - ('\u{103c4}', '\u{103c7}'), - ('\u{103d6}', '\u{103ff}'), - ('\u{1049e}', '\u{1049f}'), - ('\u{104aa}', '\u{104af}'), - ('\u{104d4}', '\u{104d7}'), - ('\u{104fc}', '\u{104ff}'), - ('\u{10528}', '\u{1052f}'), - ('\u{10564}', '\u{1056e}'), - ('\u{1057b}', '\u{1057b}'), - ('\u{1058b}', '\u{1058b}'), - ('\u{10593}', '\u{10593}'), - ('\u{10596}', '\u{10596}'), - ('\u{105a2}', '\u{105a2}'), - ('\u{105b2}', '\u{105b2}'), - ('\u{105ba}', '\u{105ba}'), - ('\u{105bd}', '\u{105bf}'), - ('\u{105f4}', '\u{105ff}'), - ('\u{10737}', '\u{1073f}'), - ('\u{10756}', '\u{1075f}'), - ('\u{10768}', '\u{1077f}'), - ('\u{10786}', '\u{10786}'), - ('\u{107b1}', '\u{107b1}'), - ('\u{107bb}', '\u{107ff}'), - ('\u{10806}', '\u{10807}'), - ('\u{10809}', '\u{10809}'), - ('\u{10836}', '\u{10836}'), - ('\u{10839}', '\u{1083b}'), - ('\u{1083d}', '\u{1083e}'), - ('\u{10856}', '\u{10856}'), - ('\u{1089f}', '\u{108a6}'), - ('\u{108b0}', '\u{108df}'), - ('\u{108f3}', '\u{108f3}'), - ('\u{108f6}', '\u{108fa}'), - ('\u{1091c}', '\u{1091e}'), - ('\u{1093a}', '\u{1093e}'), - ('\u{10940}', '\u{1097f}'), - ('\u{109b8}', '\u{109bb}'), - ('\u{109d0}', '\u{109d1}'), - ('\u{10a04}', '\u{10a04}'), - ('\u{10a07}', '\u{10a0b}'), - ('\u{10a14}', '\u{10a14}'), - ('\u{10a18}', '\u{10a18}'), - ('\u{10a36}', '\u{10a37}'), - ('\u{10a3b}', '\u{10a3e}'), - ('\u{10a49}', '\u{10a4f}'), - ('\u{10a59}', '\u{10a5f}'), - ('\u{10aa0}', '\u{10abf}'), - ('\u{10ae7}', '\u{10aea}'), - ('\u{10af7}', '\u{10aff}'), - ('\u{10b36}', '\u{10b38}'), - ('\u{10b56}', '\u{10b57}'), - ('\u{10b73}', '\u{10b77}'), - ('\u{10b92}', '\u{10b98}'), - ('\u{10b9d}', '\u{10ba8}'), - ('\u{10bb0}', '\u{10bff}'), - ('\u{10c49}', '\u{10c7f}'), - ('\u{10cb3}', '\u{10cbf}'), - ('\u{10cf3}', '\u{10cf9}'), - ('\u{10d28}', '\u{10d2f}'), - ('\u{10d3a}', '\u{10d3f}'), - ('\u{10d66}', '\u{10d68}'), - ('\u{10d86}', '\u{10d8d}'), - ('\u{10d90}', '\u{10e5f}'), - ('\u{10e7f}', '\u{10e7f}'), - ('\u{10eaa}', '\u{10eaa}'), - ('\u{10eae}', '\u{10eaf}'), - ('\u{10eb2}', '\u{10ec1}'), - ('\u{10ec5}', '\u{10efb}'), - ('\u{10f28}', '\u{10f2f}'), - ('\u{10f5a}', '\u{10f6f}'), - ('\u{10f8a}', '\u{10faf}'), - ('\u{10fcc}', '\u{10fdf}'), - ('\u{10ff7}', '\u{10fff}'), - ('\u{1104e}', '\u{11051}'), - ('\u{11076}', '\u{1107e}'), - ('\u{110bd}', '\u{110bd}'), - ('\u{110c3}', '\u{110cf}'), - ('\u{110e9}', '\u{110ef}'), - ('\u{110fa}', '\u{110ff}'), - ('\u{11135}', '\u{11135}'), - ('\u{11148}', '\u{1114f}'), - ('\u{11177}', '\u{1117f}'), - ('\u{111e0}', '\u{111e0}'), - ('\u{111f5}', '\u{111ff}'), - ('\u{11212}', '\u{11212}'), - ('\u{11242}', '\u{1127f}'), - ('\u{11287}', '\u{11287}'), - ('\u{11289}', '\u{11289}'), - ('\u{1128e}', '\u{1128e}'), - ('\u{1129e}', '\u{1129e}'), - ('\u{112aa}', '\u{112af}'), - ('\u{112eb}', '\u{112ef}'), - ('\u{112fa}', '\u{112ff}'), - ('\u{11304}', '\u{11304}'), - ('\u{1130d}', '\u{1130e}'), - ('\u{11311}', '\u{11312}'), - ('\u{11329}', '\u{11329}'), - ('\u{11331}', '\u{11331}'), - ('\u{11334}', '\u{11334}'), - ('\u{1133a}', '\u{1133a}'), - ('\u{11345}', '\u{11346}'), - ('\u{11349}', '\u{1134a}'), - ('\u{1134e}', '\u{1134f}'), - ('\u{11351}', '\u{11356}'), - ('\u{11358}', '\u{1135c}'), - ('\u{11364}', '\u{11365}'), - ('\u{1136d}', '\u{1136f}'), - ('\u{11375}', '\u{1137f}'), - ('\u{1138a}', '\u{1138a}'), - ('\u{1138c}', '\u{1138d}'), - ('\u{1138f}', '\u{1138f}'), - ('\u{113b6}', '\u{113b6}'), - ('\u{113c1}', '\u{113c1}'), - ('\u{113c3}', '\u{113c4}'), - ('\u{113c6}', '\u{113c6}'), - ('\u{113cb}', '\u{113cb}'), - ('\u{113d6}', '\u{113d6}'), - ('\u{113d9}', '\u{113e0}'), - ('\u{113e3}', '\u{113ff}'), - ('\u{1145c}', '\u{1145c}'), - ('\u{11462}', '\u{1147f}'), - ('\u{114c8}', '\u{114cf}'), - ('\u{114da}', '\u{1157f}'), - ('\u{115b6}', '\u{115b7}'), - ('\u{115de}', '\u{115ff}'), - ('\u{11645}', '\u{1164f}'), - ('\u{1165a}', '\u{1165f}'), - ('\u{1166d}', '\u{1167f}'), - ('\u{116ba}', '\u{116bf}'), - ('\u{116ca}', '\u{116cf}'), - ('\u{116e4}', '\u{116ff}'), - ('\u{1171b}', '\u{1171c}'), - ('\u{1172c}', '\u{1172f}'), - ('\u{11747}', '\u{117ff}'), - ('\u{1183c}', '\u{1189f}'), - ('\u{118f3}', '\u{118fe}'), - ('\u{11907}', '\u{11908}'), - ('\u{1190a}', '\u{1190b}'), - ('\u{11914}', '\u{11914}'), - ('\u{11917}', '\u{11917}'), - ('\u{11936}', '\u{11936}'), - ('\u{11939}', '\u{1193a}'), - ('\u{11947}', '\u{1194f}'), - ('\u{1195a}', '\u{1199f}'), - ('\u{119a8}', '\u{119a9}'), - ('\u{119d8}', '\u{119d9}'), - ('\u{119e5}', '\u{119ff}'), - ('\u{11a48}', '\u{11a4f}'), - ('\u{11aa3}', '\u{11aaf}'), - ('\u{11af9}', '\u{11aff}'), - ('\u{11b0a}', '\u{11bbf}'), - ('\u{11be2}', '\u{11bef}'), - ('\u{11bfa}', '\u{11bff}'), - ('\u{11c09}', '\u{11c09}'), - ('\u{11c37}', '\u{11c37}'), - ('\u{11c46}', '\u{11c4f}'), - ('\u{11c6d}', '\u{11c6f}'), - ('\u{11c90}', '\u{11c91}'), - ('\u{11ca8}', '\u{11ca8}'), - ('\u{11cb7}', '\u{11cff}'), - ('\u{11d07}', '\u{11d07}'), - ('\u{11d0a}', '\u{11d0a}'), - ('\u{11d37}', '\u{11d39}'), - ('\u{11d3b}', '\u{11d3b}'), - ('\u{11d3e}', '\u{11d3e}'), - ('\u{11d48}', '\u{11d4f}'), - ('\u{11d5a}', '\u{11d5f}'), - ('\u{11d66}', '\u{11d66}'), - ('\u{11d69}', '\u{11d69}'), - ('\u{11d8f}', '\u{11d8f}'), - ('\u{11d92}', '\u{11d92}'), - ('\u{11d99}', '\u{11d9f}'), - ('\u{11daa}', '\u{11edf}'), - ('\u{11ef9}', '\u{11eff}'), - ('\u{11f11}', '\u{11f11}'), - ('\u{11f3b}', '\u{11f3d}'), - ('\u{11f5b}', '\u{11faf}'), - ('\u{11fb1}', '\u{11fbf}'), - ('\u{11ff2}', '\u{11ffe}'), - ('\u{1239a}', '\u{123ff}'), - ('\u{1246f}', '\u{1246f}'), - ('\u{12475}', '\u{1247f}'), - ('\u{12544}', '\u{12f8f}'), - ('\u{12ff3}', '\u{12fff}'), - ('\u{13430}', '\u{1343f}'), - ('\u{13456}', '\u{1345f}'), - ('\u{143fb}', '\u{143ff}'), - ('\u{14647}', '\u{160ff}'), - ('\u{1613a}', '\u{167ff}'), - ('\u{16a39}', '\u{16a3f}'), - ('\u{16a5f}', '\u{16a5f}'), - ('\u{16a6a}', '\u{16a6d}'), - ('\u{16abf}', '\u{16abf}'), - ('\u{16aca}', '\u{16acf}'), - ('\u{16aee}', '\u{16aef}'), - ('\u{16af6}', '\u{16aff}'), - ('\u{16b46}', '\u{16b4f}'), - ('\u{16b5a}', '\u{16b5a}'), - ('\u{16b62}', '\u{16b62}'), - ('\u{16b78}', '\u{16b7c}'), - ('\u{16b90}', '\u{16d3f}'), - ('\u{16d7a}', '\u{16e3f}'), - ('\u{16e9b}', '\u{16eff}'), - ('\u{16f4b}', '\u{16f4e}'), - ('\u{16f88}', '\u{16f8e}'), - ('\u{16fa0}', '\u{16fdf}'), - ('\u{16fe5}', '\u{16fef}'), - ('\u{16ff2}', '\u{16fff}'), - ('\u{187f8}', '\u{187ff}'), - ('\u{18cd6}', '\u{18cfe}'), - ('\u{18d09}', '\u{1afef}'), - ('\u{1aff4}', '\u{1aff4}'), - ('\u{1affc}', '\u{1affc}'), - ('\u{1afff}', '\u{1afff}'), - ('\u{1b123}', '\u{1b131}'), - ('\u{1b133}', '\u{1b14f}'), - ('\u{1b153}', '\u{1b154}'), - ('\u{1b156}', '\u{1b163}'), - ('\u{1b168}', '\u{1b16f}'), - ('\u{1b2fc}', '\u{1bbff}'), - ('\u{1bc6b}', '\u{1bc6f}'), - ('\u{1bc7d}', '\u{1bc7f}'), - ('\u{1bc89}', '\u{1bc8f}'), - ('\u{1bc9a}', '\u{1bc9b}'), - ('\u{1bca0}', '\u{1cbff}'), - ('\u{1ccfa}', '\u{1ccff}'), - ('\u{1ceb4}', '\u{1ceff}'), - ('\u{1cf2e}', '\u{1cf2f}'), - ('\u{1cf47}', '\u{1cf4f}'), - ('\u{1cfc4}', '\u{1cfff}'), - ('\u{1d0f6}', '\u{1d0ff}'), - ('\u{1d127}', '\u{1d128}'), - ('\u{1d173}', '\u{1d17a}'), - ('\u{1d1eb}', '\u{1d1ff}'), - ('\u{1d246}', '\u{1d2bf}'), - ('\u{1d2d4}', '\u{1d2df}'), - ('\u{1d2f4}', '\u{1d2ff}'), - ('\u{1d357}', '\u{1d35f}'), - ('\u{1d379}', '\u{1d3ff}'), - ('\u{1d455}', '\u{1d455}'), - ('\u{1d49d}', '\u{1d49d}'), - ('\u{1d4a0}', '\u{1d4a1}'), - ('\u{1d4a3}', '\u{1d4a4}'), - ('\u{1d4a7}', '\u{1d4a8}'), - ('\u{1d4ad}', '\u{1d4ad}'), - ('\u{1d4ba}', '\u{1d4ba}'), - ('\u{1d4bc}', '\u{1d4bc}'), - ('\u{1d4c4}', '\u{1d4c4}'), - ('\u{1d506}', '\u{1d506}'), - ('\u{1d50b}', '\u{1d50c}'), - ('\u{1d515}', '\u{1d515}'), - ('\u{1d51d}', '\u{1d51d}'), - ('\u{1d53a}', '\u{1d53a}'), - ('\u{1d53f}', '\u{1d53f}'), - ('\u{1d545}', '\u{1d545}'), - ('\u{1d547}', '\u{1d549}'), - ('\u{1d551}', '\u{1d551}'), - ('\u{1d6a6}', '\u{1d6a7}'), - ('\u{1d7cc}', '\u{1d7cd}'), - ('\u{1da8c}', '\u{1da9a}'), - ('\u{1daa0}', '\u{1daa0}'), - ('\u{1dab0}', '\u{1deff}'), - ('\u{1df1f}', '\u{1df24}'), - ('\u{1df2b}', '\u{1dfff}'), - ('\u{1e007}', '\u{1e007}'), - ('\u{1e019}', '\u{1e01a}'), - ('\u{1e022}', '\u{1e022}'), - ('\u{1e025}', '\u{1e025}'), - ('\u{1e02b}', '\u{1e02f}'), - ('\u{1e06e}', '\u{1e08e}'), - ('\u{1e090}', '\u{1e0ff}'), - ('\u{1e12d}', '\u{1e12f}'), - ('\u{1e13e}', '\u{1e13f}'), - ('\u{1e14a}', '\u{1e14d}'), - ('\u{1e150}', '\u{1e28f}'), - ('\u{1e2af}', '\u{1e2bf}'), - ('\u{1e2fa}', '\u{1e2fe}'), - ('\u{1e300}', '\u{1e4cf}'), - ('\u{1e4fa}', '\u{1e5cf}'), - ('\u{1e5fb}', '\u{1e5fe}'), - ('\u{1e600}', '\u{1e7df}'), - ('\u{1e7e7}', '\u{1e7e7}'), - ('\u{1e7ec}', '\u{1e7ec}'), - ('\u{1e7ef}', '\u{1e7ef}'), - ('\u{1e7ff}', '\u{1e7ff}'), - ('\u{1e8c5}', '\u{1e8c6}'), - ('\u{1e8d7}', '\u{1e8ff}'), - ('\u{1e94c}', '\u{1e94f}'), - ('\u{1e95a}', '\u{1e95d}'), - ('\u{1e960}', '\u{1ec70}'), - ('\u{1ecb5}', '\u{1ed00}'), - ('\u{1ed3e}', '\u{1edff}'), - ('\u{1ee04}', '\u{1ee04}'), - ('\u{1ee20}', '\u{1ee20}'), - ('\u{1ee23}', '\u{1ee23}'), - ('\u{1ee25}', '\u{1ee26}'), - ('\u{1ee28}', '\u{1ee28}'), - ('\u{1ee33}', '\u{1ee33}'), - ('\u{1ee38}', '\u{1ee38}'), - ('\u{1ee3a}', '\u{1ee3a}'), - ('\u{1ee3c}', '\u{1ee41}'), - ('\u{1ee43}', '\u{1ee46}'), - ('\u{1ee48}', '\u{1ee48}'), - ('\u{1ee4a}', '\u{1ee4a}'), - ('\u{1ee4c}', '\u{1ee4c}'), - ('\u{1ee50}', '\u{1ee50}'), - ('\u{1ee53}', '\u{1ee53}'), - ('\u{1ee55}', '\u{1ee56}'), - ('\u{1ee58}', '\u{1ee58}'), - ('\u{1ee5a}', '\u{1ee5a}'), - ('\u{1ee5c}', '\u{1ee5c}'), - ('\u{1ee5e}', '\u{1ee5e}'), - ('\u{1ee60}', '\u{1ee60}'), - ('\u{1ee63}', '\u{1ee63}'), - ('\u{1ee65}', '\u{1ee66}'), - ('\u{1ee6b}', '\u{1ee6b}'), - ('\u{1ee73}', '\u{1ee73}'), - ('\u{1ee78}', '\u{1ee78}'), - ('\u{1ee7d}', '\u{1ee7d}'), - ('\u{1ee7f}', '\u{1ee7f}'), - ('\u{1ee8a}', '\u{1ee8a}'), - ('\u{1ee9c}', '\u{1eea0}'), - ('\u{1eea4}', '\u{1eea4}'), - ('\u{1eeaa}', '\u{1eeaa}'), - ('\u{1eebc}', '\u{1eeef}'), - ('\u{1eef2}', '\u{1efff}'), - ('\u{1f02c}', '\u{1f02f}'), - ('\u{1f094}', '\u{1f09f}'), - ('\u{1f0af}', '\u{1f0b0}'), - ('\u{1f0c0}', '\u{1f0c0}'), - ('\u{1f0d0}', '\u{1f0d0}'), - ('\u{1f0f6}', '\u{1f0ff}'), - ('\u{1f1ae}', '\u{1f1e5}'), - ('\u{1f203}', '\u{1f20f}'), - ('\u{1f23c}', '\u{1f23f}'), - ('\u{1f249}', '\u{1f24f}'), - ('\u{1f252}', '\u{1f25f}'), - ('\u{1f266}', '\u{1f2ff}'), - ('\u{1f6d8}', '\u{1f6db}'), - ('\u{1f6ed}', '\u{1f6ef}'), - ('\u{1f6fd}', '\u{1f6ff}'), - ('\u{1f777}', '\u{1f77a}'), - ('\u{1f7da}', '\u{1f7df}'), - ('\u{1f7ec}', '\u{1f7ef}'), - ('\u{1f7f1}', '\u{1f7ff}'), - ('\u{1f80c}', '\u{1f80f}'), - ('\u{1f848}', '\u{1f84f}'), - ('\u{1f85a}', '\u{1f85f}'), - ('\u{1f888}', '\u{1f88f}'), - ('\u{1f8ae}', '\u{1f8af}'), - ('\u{1f8bc}', '\u{1f8bf}'), - ('\u{1f8c2}', '\u{1f8ff}'), - ('\u{1fa54}', '\u{1fa5f}'), - ('\u{1fa6e}', '\u{1fa6f}'), - ('\u{1fa7d}', '\u{1fa7f}'), - ('\u{1fa8a}', '\u{1fa8e}'), - ('\u{1fac7}', '\u{1facd}'), - ('\u{1fadd}', '\u{1fade}'), - ('\u{1faea}', '\u{1faef}'), - ('\u{1faf9}', '\u{1faff}'), - ('\u{1fb93}', '\u{1fb93}'), - ('\u{1fbfa}', '\u{1ffff}'), - ('\u{2a6e0}', '\u{2a6ff}'), - ('\u{2b73a}', '\u{2b73f}'), - ('\u{2b81e}', '\u{2b81f}'), - ('\u{2cea2}', '\u{2ceaf}'), - ('\u{2ebe1}', '\u{2ebef}'), - ('\u{2ee5e}', '\u{2f7ff}'), - ('\u{2fa1e}', '\u{2ffff}'), - ('\u{3134b}', '\u{3134f}'), - ('\u{323b0}', '\u{e00ff}'), - ('\u{e01f0}', '\u{10ffff}'), -]; - -pub const OTHER_LETTER: &'static [(char, char)] = &[ - ('ª', 'ª'), - ('º', 'º'), - ('ƻ', 'ƻ'), - ('ǀ', 'ǃ'), - ('ʔ', 'ʔ'), - ('א', 'ת'), - ('ׯ', 'ײ'), - ('ؠ', 'ؿ'), - ('ف', 'ي'), - ('ٮ', 'ٯ'), - ('ٱ', 'ۓ'), - ('ە', 'ە'), - ('ۮ', 'ۯ'), - ('ۺ', 'ۼ'), - ('ۿ', 'ۿ'), - ('ܐ', 'ܐ'), - ('ܒ', 'ܯ'), - ('ݍ', 'ޥ'), - ('ޱ', 'ޱ'), - ('ߊ', 'ߪ'), - ('ࠀ', 'ࠕ'), - ('ࡀ', 'ࡘ'), - ('ࡠ', 'ࡪ'), - ('ࡰ', 'ࢇ'), - ('ࢉ', 'ࢎ'), - ('ࢠ', 'ࣈ'), - ('ऄ', 'ह'), - ('ऽ', 'ऽ'), - ('ॐ', 'ॐ'), - ('क़', 'ॡ'), - ('ॲ', 'ঀ'), - ('অ', 'ঌ'), - ('এ', 'ঐ'), - ('ও', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('ঽ', 'ঽ'), - ('ৎ', 'ৎ'), - ('ড়', 'ঢ়'), - ('য়', 'ৡ'), - ('ৰ', 'ৱ'), - ('ৼ', 'ৼ'), - ('ਅ', 'ਊ'), - ('ਏ', 'ਐ'), - ('ਓ', 'ਨ'), - ('ਪ', 'ਰ'), - ('ਲ', 'ਲ਼'), - ('ਵ', 'ਸ਼'), - ('ਸ', 'ਹ'), - ('ਖ਼', 'ੜ'), - ('ਫ਼', 'ਫ਼'), - ('ੲ', 'ੴ'), - ('અ', 'ઍ'), - ('એ', 'ઑ'), - ('ઓ', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('ઽ', 'ઽ'), - ('ૐ', 'ૐ'), - ('ૠ', 'ૡ'), - ('ૹ', 'ૹ'), - ('ଅ', 'ଌ'), - ('ଏ', 'ଐ'), - ('ଓ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଵ', 'ହ'), - ('ଽ', 'ଽ'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', 'ୡ'), - ('ୱ', 'ୱ'), - ('ஃ', 'ஃ'), - ('அ', 'ஊ'), - ('எ', 'ஐ'), - ('ஒ', 'க'), - ('ங', 'ச'), - ('ஜ', 'ஜ'), - ('ஞ', 'ட'), - ('ண', 'த'), - ('ந', 'ப'), - ('ம', 'ஹ'), - ('ௐ', 'ௐ'), - ('అ', 'ఌ'), - ('ఎ', 'ఐ'), - ('ఒ', 'న'), - ('ప', 'హ'), - ('ఽ', 'ఽ'), - ('ౘ', 'ౚ'), - ('ౝ', 'ౝ'), - ('ౠ', 'ౡ'), - ('ಀ', 'ಀ'), - ('ಅ', 'ಌ'), - ('ಎ', 'ಐ'), - ('ಒ', 'ನ'), - ('ಪ', 'ಳ'), - ('ವ', 'ಹ'), - ('ಽ', 'ಽ'), - ('ೝ', 'ೞ'), - ('ೠ', 'ೡ'), - ('ೱ', 'ೲ'), - ('ഄ', 'ഌ'), - ('എ', 'ഐ'), - ('ഒ', 'ഺ'), - ('ഽ', 'ഽ'), - ('ൎ', 'ൎ'), - ('ൔ', 'ൖ'), - ('ൟ', 'ൡ'), - ('ൺ', 'ൿ'), - ('අ', 'ඖ'), - ('ක', 'න'), - ('ඳ', 'ර'), - ('ල', 'ල'), - ('ව', 'ෆ'), - ('ก', 'ะ'), - ('า', 'ำ'), - ('เ', 'ๅ'), - ('ກ', 'ຂ'), - ('ຄ', 'ຄ'), - ('ຆ', 'ຊ'), - ('ຌ', 'ຣ'), - ('ລ', 'ລ'), - ('ວ', 'ະ'), - ('າ', 'ຳ'), - ('ຽ', 'ຽ'), - ('ເ', 'ໄ'), - ('ໜ', 'ໟ'), - ('ༀ', 'ༀ'), - ('ཀ', 'ཇ'), - ('ཉ', 'ཬ'), - ('ྈ', 'ྌ'), - ('က', 'ဪ'), - ('ဿ', 'ဿ'), - ('ၐ', 'ၕ'), - ('ၚ', 'ၝ'), - ('ၡ', 'ၡ'), - ('ၥ', 'ၦ'), - ('ၮ', 'ၰ'), - ('ၵ', 'ႁ'), - ('ႎ', 'ႎ'), - ('ᄀ', 'ቈ'), - ('ቊ', 'ቍ'), - ('ቐ', 'ቖ'), - ('ቘ', 'ቘ'), - ('ቚ', 'ቝ'), - ('በ', 'ኈ'), - ('ኊ', 'ኍ'), - ('ነ', 'ኰ'), - ('ኲ', 'ኵ'), - ('ኸ', 'ኾ'), - ('ዀ', 'ዀ'), - ('ዂ', 'ዅ'), - ('ወ', 'ዖ'), - ('ዘ', 'ጐ'), - ('ጒ', 'ጕ'), - ('ጘ', 'ፚ'), - ('ᎀ', 'ᎏ'), - ('ᐁ', 'ᙬ'), - ('ᙯ', 'ᙿ'), - ('ᚁ', 'ᚚ'), - ('ᚠ', 'ᛪ'), - ('ᛱ', 'ᛸ'), - ('ᜀ', 'ᜑ'), - ('ᜟ', 'ᜱ'), - ('ᝀ', 'ᝑ'), - ('ᝠ', 'ᝬ'), - ('ᝮ', 'ᝰ'), - ('ក', 'ឳ'), - ('ៜ', 'ៜ'), - ('ᠠ', 'ᡂ'), - ('ᡄ', 'ᡸ'), - ('ᢀ', 'ᢄ'), - ('ᢇ', 'ᢨ'), - ('ᢪ', 'ᢪ'), - ('ᢰ', 'ᣵ'), - ('ᤀ', 'ᤞ'), - ('ᥐ', 'ᥭ'), - ('ᥰ', 'ᥴ'), - ('ᦀ', 'ᦫ'), - ('ᦰ', 'ᧉ'), - ('ᨀ', 'ᨖ'), - ('ᨠ', 'ᩔ'), - ('ᬅ', 'ᬳ'), - ('ᭅ', 'ᭌ'), - ('ᮃ', 'ᮠ'), - ('ᮮ', 'ᮯ'), - ('ᮺ', 'ᯥ'), - ('ᰀ', 'ᰣ'), - ('ᱍ', 'ᱏ'), - ('ᱚ', 'ᱷ'), - ('ᳩ', 'ᳬ'), - ('ᳮ', 'ᳳ'), - ('ᳵ', 'ᳶ'), - ('ᳺ', 'ᳺ'), - ('ℵ', 'ℸ'), - ('ⴰ', 'ⵧ'), - ('ⶀ', 'ⶖ'), - ('ⶠ', 'ⶦ'), - ('ⶨ', 'ⶮ'), - ('ⶰ', 'ⶶ'), - ('ⶸ', 'ⶾ'), - ('ⷀ', 'ⷆ'), - ('ⷈ', 'ⷎ'), - ('ⷐ', 'ⷖ'), - ('ⷘ', 'ⷞ'), - ('〆', '〆'), - ('〼', '〼'), - ('ぁ', 'ゖ'), - ('ゟ', 'ゟ'), - ('ァ', 'ヺ'), - ('ヿ', 'ヿ'), - ('ㄅ', 'ㄯ'), - ('ㄱ', 'ㆎ'), - ('ㆠ', 'ㆿ'), - ('ㇰ', 'ㇿ'), - ('㐀', '䶿'), - ('一', 'ꀔ'), - ('ꀖ', 'ꒌ'), - ('ꓐ', 'ꓷ'), - ('ꔀ', 'ꘋ'), - ('ꘐ', 'ꘟ'), - ('ꘪ', 'ꘫ'), - ('ꙮ', 'ꙮ'), - ('ꚠ', 'ꛥ'), - ('ꞏ', 'ꞏ'), - ('ꟷ', 'ꟷ'), - ('ꟻ', 'ꠁ'), - ('ꠃ', 'ꠅ'), - ('ꠇ', 'ꠊ'), - ('ꠌ', 'ꠢ'), - ('ꡀ', 'ꡳ'), - ('ꢂ', 'ꢳ'), - ('ꣲ', 'ꣷ'), - ('ꣻ', 'ꣻ'), - ('ꣽ', 'ꣾ'), - ('ꤊ', 'ꤥ'), - ('ꤰ', 'ꥆ'), - ('ꥠ', 'ꥼ'), - ('ꦄ', 'ꦲ'), - ('ꧠ', 'ꧤ'), - ('ꧧ', 'ꧯ'), - ('ꧺ', 'ꧾ'), - ('ꨀ', 'ꨨ'), - ('ꩀ', 'ꩂ'), - ('ꩄ', 'ꩋ'), - ('ꩠ', 'ꩯ'), - ('ꩱ', 'ꩶ'), - ('ꩺ', 'ꩺ'), - ('ꩾ', 'ꪯ'), - ('ꪱ', 'ꪱ'), - ('ꪵ', 'ꪶ'), - ('ꪹ', 'ꪽ'), - ('ꫀ', 'ꫀ'), - ('ꫂ', 'ꫂ'), - ('ꫛ', 'ꫜ'), - ('ꫠ', 'ꫪ'), - ('ꫲ', 'ꫲ'), - ('ꬁ', 'ꬆ'), - ('ꬉ', 'ꬎ'), - ('ꬑ', 'ꬖ'), - ('ꬠ', 'ꬦ'), - ('ꬨ', 'ꬮ'), - ('ꯀ', 'ꯢ'), - ('가', '힣'), - ('ힰ', 'ퟆ'), - ('ퟋ', 'ퟻ'), - ('豈', '舘'), - ('並', '龎'), - ('יִ', 'יִ'), - ('ײַ', 'ﬨ'), - ('שׁ', 'זּ'), - ('טּ', 'לּ'), - ('מּ', 'מּ'), - ('נּ', 'סּ'), - ('ףּ', 'פּ'), - ('צּ', 'ﮱ'), - ('ﯓ', 'ﴽ'), - ('ﵐ', 'ﶏ'), - ('ﶒ', 'ﷇ'), - ('ﷰ', 'ﷻ'), - ('ﹰ', 'ﹴ'), - ('ﹶ', 'ﻼ'), - ('ヲ', 'ッ'), - ('ア', 'ン'), - ('ᅠ', 'ᄒ'), - ('ᅡ', 'ᅦ'), - ('ᅧ', 'ᅬ'), - ('ᅭ', 'ᅲ'), - ('ᅳ', 'ᅵ'), - ('𐀀', '𐀋'), - ('𐀍', '𐀦'), - ('𐀨', '𐀺'), - ('𐀼', '𐀽'), - ('𐀿', '𐁍'), - ('𐁐', '𐁝'), - ('𐂀', '𐃺'), - ('𐊀', '𐊜'), - ('𐊠', '𐋐'), - ('𐌀', '𐌟'), - ('𐌭', '𐍀'), - ('𐍂', '𐍉'), - ('𐍐', '𐍵'), - ('𐎀', '𐎝'), - ('𐎠', '𐏃'), - ('𐏈', '𐏏'), - ('𐑐', '𐒝'), - ('𐔀', '𐔧'), - ('𐔰', '𐕣'), - ('𐗀', '𐗳'), - ('𐘀', '𐜶'), - ('𐝀', '𐝕'), - ('𐝠', '𐝧'), - ('𐠀', '𐠅'), - ('𐠈', '𐠈'), - ('𐠊', '𐠵'), - ('𐠷', '𐠸'), - ('𐠼', '𐠼'), - ('𐠿', '𐡕'), - ('𐡠', '𐡶'), - ('𐢀', '𐢞'), - ('𐣠', '𐣲'), - ('𐣴', '𐣵'), - ('𐤀', '𐤕'), - ('𐤠', '𐤹'), - ('𐦀', '𐦷'), - ('𐦾', '𐦿'), - ('𐨀', '𐨀'), - ('𐨐', '𐨓'), - ('𐨕', '𐨗'), - ('𐨙', '𐨵'), - ('𐩠', '𐩼'), - ('𐪀', '𐪜'), - ('𐫀', '𐫇'), - ('𐫉', '𐫤'), - ('𐬀', '𐬵'), - ('𐭀', '𐭕'), - ('𐭠', '𐭲'), - ('𐮀', '𐮑'), - ('𐰀', '𐱈'), - ('𐴀', '𐴣'), - ('𐵊', '𐵍'), - ('𐵏', '𐵏'), - ('𐺀', '𐺩'), - ('𐺰', '𐺱'), - ('𐻂', '𐻄'), - ('𐼀', '𐼜'), - ('𐼧', '𐼧'), - ('𐼰', '𐽅'), - ('𐽰', '𐾁'), - ('𐾰', '𐿄'), - ('𐿠', '𐿶'), - ('𑀃', '𑀷'), - ('𑁱', '𑁲'), - ('𑁵', '𑁵'), - ('𑂃', '𑂯'), - ('𑃐', '𑃨'), - ('𑄃', '𑄦'), - ('𑅄', '𑅄'), - ('𑅇', '𑅇'), - ('𑅐', '𑅲'), - ('𑅶', '𑅶'), - ('𑆃', '𑆲'), - ('𑇁', '𑇄'), - ('𑇚', '𑇚'), - ('𑇜', '𑇜'), - ('𑈀', '𑈑'), - ('𑈓', '𑈫'), - ('𑈿', '𑉀'), - ('𑊀', '𑊆'), - ('𑊈', '𑊈'), - ('𑊊', '𑊍'), - ('𑊏', '𑊝'), - ('𑊟', '𑊨'), - ('𑊰', '𑋞'), - ('𑌅', '𑌌'), - ('𑌏', '𑌐'), - ('𑌓', '𑌨'), - ('𑌪', '𑌰'), - ('𑌲', '𑌳'), - ('𑌵', '𑌹'), - ('𑌽', '𑌽'), - ('𑍐', '𑍐'), - ('𑍝', '𑍡'), - ('𑎀', '𑎉'), - ('𑎋', '𑎋'), - ('𑎎', '𑎎'), - ('𑎐', '𑎵'), - ('𑎷', '𑎷'), - ('𑏑', '𑏑'), - ('𑏓', '𑏓'), - ('𑐀', '𑐴'), - ('𑑇', '𑑊'), - ('𑑟', '𑑡'), - ('𑒀', '𑒯'), - ('𑓄', '𑓅'), - ('𑓇', '𑓇'), - ('𑖀', '𑖮'), - ('𑗘', '𑗛'), - ('𑘀', '𑘯'), - ('𑙄', '𑙄'), - ('𑚀', '𑚪'), - ('𑚸', '𑚸'), - ('𑜀', '𑜚'), - ('𑝀', '𑝆'), - ('𑠀', '𑠫'), - ('𑣿', '𑤆'), - ('𑤉', '𑤉'), - ('𑤌', '𑤓'), - ('𑤕', '𑤖'), - ('𑤘', '𑤯'), - ('𑤿', '𑤿'), - ('𑥁', '𑥁'), - ('𑦠', '𑦧'), - ('𑦪', '𑧐'), - ('𑧡', '𑧡'), - ('𑧣', '𑧣'), - ('𑨀', '𑨀'), - ('𑨋', '𑨲'), - ('𑨺', '𑨺'), - ('𑩐', '𑩐'), - ('𑩜', '𑪉'), - ('𑪝', '𑪝'), - ('𑪰', '𑫸'), - ('𑯀', '𑯠'), - ('𑰀', '𑰈'), - ('𑰊', '𑰮'), - ('𑱀', '𑱀'), - ('𑱲', '𑲏'), - ('𑴀', '𑴆'), - ('𑴈', '𑴉'), - ('𑴋', '𑴰'), - ('𑵆', '𑵆'), - ('𑵠', '𑵥'), - ('𑵧', '𑵨'), - ('𑵪', '𑶉'), - ('𑶘', '𑶘'), - ('𑻠', '𑻲'), - ('𑼂', '𑼂'), - ('𑼄', '𑼐'), - ('𑼒', '𑼳'), - ('𑾰', '𑾰'), - ('𒀀', '𒎙'), - ('𒒀', '𒕃'), - ('𒾐', '𒿰'), - ('𓀀', '𓐯'), - ('𓑁', '𓑆'), - ('𓑠', '𔏺'), - ('𔐀', '𔙆'), - ('𖄀', '𖄝'), - ('𖠀', '𖨸'), - ('𖩀', '𖩞'), - ('𖩰', '𖪾'), - ('𖫐', '𖫭'), - ('𖬀', '𖬯'), - ('𖭣', '𖭷'), - ('𖭽', '𖮏'), - ('𖵃', '𖵪'), - ('𖼀', '𖽊'), - ('𖽐', '𖽐'), - ('𗀀', '𘟷'), - ('𘠀', '𘳕'), - ('𘳿', '𘴈'), - ('𛀀', '𛄢'), - ('𛄲', '𛄲'), - ('𛅐', '𛅒'), - ('𛅕', '𛅕'), - ('𛅤', '𛅧'), - ('𛅰', '𛋻'), - ('𛰀', '𛱪'), - ('𛱰', '𛱼'), - ('𛲀', '𛲈'), - ('𛲐', '𛲙'), - ('𝼊', '𝼊'), - ('𞄀', '𞄬'), - ('𞅎', '𞅎'), - ('𞊐', '𞊭'), - ('𞋀', '𞋫'), - ('𞓐', '𞓪'), - ('𞗐', '𞗭'), - ('𞗰', '𞗰'), - ('𞟠', '𞟦'), - ('𞟨', '𞟫'), - ('𞟭', '𞟮'), - ('𞟰', '𞟾'), - ('𞠀', '𞣄'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('𠀀', '𪛟'), - ('𪜀', '𫜹'), - ('𫝀', '𫠝'), - ('𫠠', '𬺡'), - ('𬺰', '𮯠'), - ('𮯰', '𮹝'), - ('丽', '𪘀'), - ('𰀀', '𱍊'), - ('𱍐', '𲎯'), -]; - -pub const OTHER_NUMBER: &'static [(char, char)] = &[ - ('²', '³'), - ('¹', '¹'), - ('¼', '¾'), - ('৴', '৹'), - ('୲', '୷'), - ('௰', '௲'), - ('౸', '౾'), - ('൘', '൞'), - ('൰', '൸'), - ('༪', '༳'), - ('፩', '፼'), - ('៰', '៹'), - ('᧚', '᧚'), - ('⁰', '⁰'), - ('⁴', '⁹'), - ('₀', '₉'), - ('⅐', '⅟'), - ('↉', '↉'), - ('①', '⒛'), - ('⓪', '⓿'), - ('❶', '➓'), - ('⳽', '⳽'), - ('㆒', '㆕'), - ('㈠', '㈩'), - ('㉈', '㉏'), - ('㉑', '㉟'), - ('㊀', '㊉'), - ('㊱', '㊿'), - ('꠰', '꠵'), - ('𐄇', '𐄳'), - ('𐅵', '𐅸'), - ('𐆊', '𐆋'), - ('𐋡', '𐋻'), - ('𐌠', '𐌣'), - ('𐡘', '𐡟'), - ('𐡹', '𐡿'), - ('𐢧', '𐢯'), - ('𐣻', '𐣿'), - ('𐤖', '𐤛'), - ('𐦼', '𐦽'), - ('𐧀', '𐧏'), - ('𐧒', '𐧿'), - ('𐩀', '𐩈'), - ('𐩽', '𐩾'), - ('𐪝', '𐪟'), - ('𐫫', '𐫯'), - ('𐭘', '𐭟'), - ('𐭸', '𐭿'), - ('𐮩', '𐮯'), - ('𐳺', '𐳿'), - ('𐹠', '𐹾'), - ('𐼝', '𐼦'), - ('𐽑', '𐽔'), - ('𐿅', '𐿋'), - ('𑁒', '𑁥'), - ('𑇡', '𑇴'), - ('𑜺', '𑜻'), - ('𑣪', '𑣲'), - ('𑱚', '𑱬'), - ('𑿀', '𑿔'), - ('𖭛', '𖭡'), - ('𖺀', '𖺖'), - ('𝋀', '𝋓'), - ('𝋠', '𝋳'), - ('𝍠', '𝍸'), - ('𞣇', '𞣏'), - ('𞱱', '𞲫'), - ('𞲭', '𞲯'), - ('𞲱', '𞲴'), - ('𞴁', '𞴭'), - ('𞴯', '𞴽'), - ('🄀', '🄌'), -]; - -pub const OTHER_PUNCTUATION: &'static [(char, char)] = &[ - ('!', '#'), - ('%', '\''), - ('*', '*'), - (',', ','), - ('.', '/'), - (':', ';'), - ('?', '@'), - ('\\', '\\'), - ('¡', '¡'), - ('§', '§'), - ('¶', '·'), - ('¿', '¿'), - (';', ';'), - ('·', '·'), - ('՚', '՟'), - ('։', '։'), - ('׀', '׀'), - ('׃', '׃'), - ('׆', '׆'), - ('׳', '״'), - ('؉', '؊'), - ('،', '؍'), - ('؛', '؛'), - ('؝', '؟'), - ('٪', '٭'), - ('۔', '۔'), - ('܀', '܍'), - ('߷', '߹'), - ('࠰', '࠾'), - ('࡞', '࡞'), - ('।', '॥'), - ('॰', '॰'), - ('৽', '৽'), - ('੶', '੶'), - ('૰', '૰'), - ('౷', '౷'), - ('಄', '಄'), - ('෴', '෴'), - ('๏', '๏'), - ('๚', '๛'), - ('༄', '༒'), - ('༔', '༔'), - ('྅', '྅'), - ('࿐', '࿔'), - ('࿙', '࿚'), - ('၊', '၏'), - ('჻', '჻'), - ('፠', '፨'), - ('᙮', '᙮'), - ('᛫', '᛭'), - ('᜵', '᜶'), - ('។', '៖'), - ('៘', '៚'), - ('᠀', '᠅'), - ('᠇', '᠊'), - ('᥄', '᥅'), - ('᨞', '᨟'), - ('᪠', '᪦'), - ('᪨', '᪭'), - ('᭎', '᭏'), - ('᭚', '᭠'), - ('᭽', '᭿'), - ('᯼', '᯿'), - ('᰻', '᰿'), - ('᱾', '᱿'), - ('᳀', '᳇'), - ('᳓', '᳓'), - ('‖', '‗'), - ('†', '‧'), - ('‰', '‸'), - ('※', '‾'), - ('⁁', '⁃'), - ('⁇', '⁑'), - ('⁓', '⁓'), - ('⁕', '⁞'), - ('⳹', '⳼'), - ('⳾', '⳿'), - ('⵰', '⵰'), - ('⸀', '⸁'), - ('⸆', '⸈'), - ('⸋', '⸋'), - ('⸎', '⸖'), - ('⸘', '⸙'), - ('⸛', '⸛'), - ('⸞', '⸟'), - ('⸪', '⸮'), - ('⸰', '⸹'), - ('⸼', '⸿'), - ('⹁', '⹁'), - ('⹃', '⹏'), - ('⹒', '⹔'), - ('、', '〃'), - ('〽', '〽'), - ('・', '・'), - ('꓾', '꓿'), - ('꘍', '꘏'), - ('꙳', '꙳'), - ('꙾', '꙾'), - ('꛲', '꛷'), - ('꡴', '꡷'), - ('꣎', '꣏'), - ('꣸', '꣺'), - ('꣼', '꣼'), - ('꤮', '꤯'), - ('꥟', '꥟'), - ('꧁', '꧍'), - ('꧞', '꧟'), - ('꩜', '꩟'), - ('꫞', '꫟'), - ('꫰', '꫱'), - ('꯫', '꯫'), - ('︐', '︖'), - ('︙', '︙'), - ('︰', '︰'), - ('﹅', '﹆'), - ('﹉', '﹌'), - ('﹐', '﹒'), - ('﹔', '﹗'), - ('﹟', '﹡'), - ('﹨', '﹨'), - ('﹪', '﹫'), - ('!', '#'), - ('%', '''), - ('*', '*'), - (',', ','), - ('.', '/'), - (':', ';'), - ('?', '@'), - ('\', '\'), - ('。', '。'), - ('、', '・'), - ('𐄀', '𐄂'), - ('𐎟', '𐎟'), - ('𐏐', '𐏐'), - ('𐕯', '𐕯'), - ('𐡗', '𐡗'), - ('𐤟', '𐤟'), - ('𐤿', '𐤿'), - ('𐩐', '𐩘'), - ('𐩿', '𐩿'), - ('𐫰', '𐫶'), - ('𐬹', '𐬿'), - ('𐮙', '𐮜'), - ('𐽕', '𐽙'), - ('𐾆', '𐾉'), - ('𑁇', '𑁍'), - ('𑂻', '𑂼'), - ('𑂾', '𑃁'), - ('𑅀', '𑅃'), - ('𑅴', '𑅵'), - ('𑇅', '𑇈'), - ('𑇍', '𑇍'), - ('𑇛', '𑇛'), - ('𑇝', '𑇟'), - ('𑈸', '𑈽'), - ('𑊩', '𑊩'), - ('𑏔', '𑏕'), - ('𑏗', '𑏘'), - ('𑑋', '𑑏'), - ('𑑚', '𑑛'), - ('𑑝', '𑑝'), - ('𑓆', '𑓆'), - ('𑗁', '𑗗'), - ('𑙁', '𑙃'), - ('𑙠', '𑙬'), - ('𑚹', '𑚹'), - ('𑜼', '𑜾'), - ('𑠻', '𑠻'), - ('𑥄', '𑥆'), - ('𑧢', '𑧢'), - ('𑨿', '𑩆'), - ('𑪚', '𑪜'), - ('𑪞', '𑪢'), - ('𑬀', '𑬉'), - ('𑯡', '𑯡'), - ('𑱁', '𑱅'), - ('𑱰', '𑱱'), - ('𑻷', '𑻸'), - ('𑽃', '𑽏'), - ('𑿿', '𑿿'), - ('𒑰', '𒑴'), - ('𒿱', '𒿲'), - ('𖩮', '𖩯'), - ('𖫵', '𖫵'), - ('𖬷', '𖬻'), - ('𖭄', '𖭄'), - ('𖵭', '𖵯'), - ('𖺗', '𖺚'), - ('𖿢', '𖿢'), - ('𛲟', '𛲟'), - ('𝪇', '𝪋'), - ('𞗿', '𞗿'), - ('𞥞', '𞥟'), -]; - -pub const OTHER_SYMBOL: &'static [(char, char)] = &[ - ('¦', '¦'), - ('©', '©'), - ('®', '®'), - ('°', '°'), - ('҂', '҂'), - ('֍', '֎'), - ('؎', '؏'), - ('۞', '۞'), - ('۩', '۩'), - ('۽', '۾'), - ('߶', '߶'), - ('৺', '৺'), - ('୰', '୰'), - ('௳', '௸'), - ('௺', '௺'), - ('౿', '౿'), - ('൏', '൏'), - ('൹', '൹'), - ('༁', '༃'), - ('༓', '༓'), - ('༕', '༗'), - ('༚', '༟'), - ('༴', '༴'), - ('༶', '༶'), - ('༸', '༸'), - ('྾', '࿅'), - ('࿇', '࿌'), - ('࿎', '࿏'), - ('࿕', '࿘'), - ('႞', '႟'), - ('᎐', '᎙'), - ('᙭', '᙭'), - ('᥀', '᥀'), - ('᧞', '᧿'), - ('᭡', '᭪'), - ('᭴', '᭼'), - ('℀', '℁'), - ('℃', '℆'), - ('℈', '℉'), - ('℔', '℔'), - ('№', '℗'), - ('℞', '℣'), - ('℥', '℥'), - ('℧', '℧'), - ('℩', '℩'), - ('℮', '℮'), - ('℺', '℻'), - ('⅊', '⅊'), - ('⅌', '⅍'), - ('⅏', '⅏'), - ('↊', '↋'), - ('↕', '↙'), - ('↜', '↟'), - ('↡', '↢'), - ('↤', '↥'), - ('↧', '↭'), - ('↯', '⇍'), - ('⇐', '⇑'), - ('⇓', '⇓'), - ('⇕', '⇳'), - ('⌀', '⌇'), - ('⌌', '⌟'), - ('⌢', '⌨'), - ('⌫', '⍻'), - ('⍽', '⎚'), - ('⎴', '⏛'), - ('⏢', '␩'), - ('⑀', '⑊'), - ('⒜', 'ⓩ'), - ('─', '▶'), - ('▸', '◀'), - ('◂', '◷'), - ('☀', '♮'), - ('♰', '❧'), - ('➔', '➿'), - ('⠀', '⣿'), - ('⬀', '⬯'), - ('⭅', '⭆'), - ('⭍', '⭳'), - ('⭶', '⮕'), - ('⮗', '⯿'), - ('⳥', '⳪'), - ('⹐', '⹑'), - ('⺀', '⺙'), - ('⺛', '⻳'), - ('⼀', '⿕'), - ('⿰', '⿿'), - ('〄', '〄'), - ('〒', '〓'), - ('〠', '〠'), - ('〶', '〷'), - ('〾', '〿'), - ('㆐', '㆑'), - ('㆖', '㆟'), - ('㇀', '㇥'), - ('㇯', '㇯'), - ('㈀', '㈞'), - ('㈪', '㉇'), - ('㉐', '㉐'), - ('㉠', '㉿'), - ('㊊', '㊰'), - ('㋀', '㏿'), - ('䷀', '䷿'), - ('꒐', '꓆'), - ('꠨', '꠫'), - ('꠶', '꠷'), - ('꠹', '꠹'), - ('꩷', '꩹'), - ('﵀', '﵏'), - ('﷏', '﷏'), - ('﷽', '﷿'), - ('¦', '¦'), - ('│', '│'), - ('■', '○'), - ('', '�'), - ('𐄷', '𐄿'), - ('𐅹', '𐆉'), - ('𐆌', '𐆎'), - ('𐆐', '𐆜'), - ('𐆠', '𐆠'), - ('𐇐', '𐇼'), - ('𐡷', '𐡸'), - ('𐫈', '𐫈'), - ('𑜿', '𑜿'), - ('𑿕', '𑿜'), - ('𑿡', '𑿱'), - ('𖬼', '𖬿'), - ('𖭅', '𖭅'), - ('𛲜', '𛲜'), - ('𜰀', '𜳯'), - ('𜴀', '𜺳'), - ('𜽐', '𜿃'), - ('𝀀', '𝃵'), - ('𝄀', '𝄦'), - ('𝄩', '𝅘𝅥𝅲'), - ('𝅪', '𝅬'), - ('𝆃', '𝆄'), - ('𝆌', '𝆩'), - ('𝆮', '𝇪'), - ('𝈀', '𝉁'), - ('𝉅', '𝉅'), - ('𝌀', '𝍖'), - ('𝠀', '𝧿'), - ('𝨷', '𝨺'), - ('𝩭', '𝩴'), - ('𝩶', '𝪃'), - ('𝪅', '𝪆'), - ('𞅏', '𞅏'), - ('𞲬', '𞲬'), - ('𞴮', '𞴮'), - ('🀀', '🀫'), - ('🀰', '🂓'), - ('🂠', '🂮'), - ('🂱', '🂿'), - ('🃁', '🃏'), - ('🃑', '🃵'), - ('🄍', '🆭'), - ('🇦', '🈂'), - ('🈐', '🈻'), - ('🉀', '🉈'), - ('🉐', '🉑'), - ('🉠', '🉥'), - ('🌀', '🏺'), - ('🐀', '🛗'), - ('🛜', '🛬'), - ('🛰', '🛼'), - ('🜀', '🝶'), - ('🝻', '🟙'), - ('🟠', '🟫'), - ('🟰', '🟰'), - ('🠀', '🠋'), - ('🠐', '🡇'), - ('🡐', '🡙'), - ('🡠', '🢇'), - ('🢐', '🢭'), - ('🢰', '🢻'), - ('🣀', '🣁'), - ('🤀', '🩓'), - ('🩠', '🩭'), - ('🩰', '🩼'), - ('🪀', '🪉'), - ('🪏', '🫆'), - ('🫎', '🫜'), - ('🫟', '🫩'), - ('🫰', '🫸'), - ('🬀', '🮒'), - ('🮔', '🯯'), -]; - -pub const PARAGRAPH_SEPARATOR: &'static [(char, char)] = - &[('\u{2029}', '\u{2029}')]; - -pub const PRIVATE_USE: &'static [(char, char)] = &[ - ('\u{e000}', '\u{f8ff}'), - ('\u{f0000}', '\u{ffffd}'), - ('\u{100000}', '\u{10fffd}'), -]; - -pub const PUNCTUATION: &'static [(char, char)] = &[ - ('!', '#'), - ('%', '*'), - (',', '/'), - (':', ';'), - ('?', '@'), - ('[', ']'), - ('_', '_'), - ('{', '{'), - ('}', '}'), - ('¡', '¡'), - ('§', '§'), - ('«', '«'), - ('¶', '·'), - ('»', '»'), - ('¿', '¿'), - (';', ';'), - ('·', '·'), - ('՚', '՟'), - ('։', '֊'), - ('־', '־'), - ('׀', '׀'), - ('׃', '׃'), - ('׆', '׆'), - ('׳', '״'), - ('؉', '؊'), - ('،', '؍'), - ('؛', '؛'), - ('؝', '؟'), - ('٪', '٭'), - ('۔', '۔'), - ('܀', '܍'), - ('߷', '߹'), - ('࠰', '࠾'), - ('࡞', '࡞'), - ('।', '॥'), - ('॰', '॰'), - ('৽', '৽'), - ('੶', '੶'), - ('૰', '૰'), - ('౷', '౷'), - ('಄', '಄'), - ('෴', '෴'), - ('๏', '๏'), - ('๚', '๛'), - ('༄', '༒'), - ('༔', '༔'), - ('༺', '༽'), - ('྅', '྅'), - ('࿐', '࿔'), - ('࿙', '࿚'), - ('၊', '၏'), - ('჻', '჻'), - ('፠', '፨'), - ('᐀', '᐀'), - ('᙮', '᙮'), - ('᚛', '᚜'), - ('᛫', '᛭'), - ('᜵', '᜶'), - ('។', '៖'), - ('៘', '៚'), - ('᠀', '᠊'), - ('᥄', '᥅'), - ('᨞', '᨟'), - ('᪠', '᪦'), - ('᪨', '᪭'), - ('᭎', '᭏'), - ('᭚', '᭠'), - ('᭽', '᭿'), - ('᯼', '᯿'), - ('᰻', '᰿'), - ('᱾', '᱿'), - ('᳀', '᳇'), - ('᳓', '᳓'), - ('‐', '‧'), - ('‰', '⁃'), - ('⁅', '⁑'), - ('⁓', '⁞'), - ('⁽', '⁾'), - ('₍', '₎'), - ('⌈', '⌋'), - ('〈', '〉'), - ('❨', '❵'), - ('⟅', '⟆'), - ('⟦', '⟯'), - ('⦃', '⦘'), - ('⧘', '⧛'), - ('⧼', '⧽'), - ('⳹', '⳼'), - ('⳾', '⳿'), - ('⵰', '⵰'), - ('⸀', '⸮'), - ('⸰', '⹏'), - ('⹒', '⹝'), - ('、', '〃'), - ('〈', '】'), - ('〔', '〟'), - ('〰', '〰'), - ('〽', '〽'), - ('゠', '゠'), - ('・', '・'), - ('꓾', '꓿'), - ('꘍', '꘏'), - ('꙳', '꙳'), - ('꙾', '꙾'), - ('꛲', '꛷'), - ('꡴', '꡷'), - ('꣎', '꣏'), - ('꣸', '꣺'), - ('꣼', '꣼'), - ('꤮', '꤯'), - ('꥟', '꥟'), - ('꧁', '꧍'), - ('꧞', '꧟'), - ('꩜', '꩟'), - ('꫞', '꫟'), - ('꫰', '꫱'), - ('꯫', '꯫'), - ('﴾', '﴿'), - ('︐', '︙'), - ('︰', '﹒'), - ('﹔', '﹡'), - ('﹣', '﹣'), - ('﹨', '﹨'), - ('﹪', '﹫'), - ('!', '#'), - ('%', '*'), - (',', '/'), - (':', ';'), - ('?', '@'), - ('[', ']'), - ('_', '_'), - ('{', '{'), - ('}', '}'), - ('⦅', '・'), - ('𐄀', '𐄂'), - ('𐎟', '𐎟'), - ('𐏐', '𐏐'), - ('𐕯', '𐕯'), - ('𐡗', '𐡗'), - ('𐤟', '𐤟'), - ('𐤿', '𐤿'), - ('𐩐', '𐩘'), - ('𐩿', '𐩿'), - ('𐫰', '𐫶'), - ('𐬹', '𐬿'), - ('𐮙', '𐮜'), - ('𐵮', '𐵮'), - ('𐺭', '𐺭'), - ('𐽕', '𐽙'), - ('𐾆', '𐾉'), - ('𑁇', '𑁍'), - ('𑂻', '𑂼'), - ('𑂾', '𑃁'), - ('𑅀', '𑅃'), - ('𑅴', '𑅵'), - ('𑇅', '𑇈'), - ('𑇍', '𑇍'), - ('𑇛', '𑇛'), - ('𑇝', '𑇟'), - ('𑈸', '𑈽'), - ('𑊩', '𑊩'), - ('𑏔', '𑏕'), - ('𑏗', '𑏘'), - ('𑑋', '𑑏'), - ('𑑚', '𑑛'), - ('𑑝', '𑑝'), - ('𑓆', '𑓆'), - ('𑗁', '𑗗'), - ('𑙁', '𑙃'), - ('𑙠', '𑙬'), - ('𑚹', '𑚹'), - ('𑜼', '𑜾'), - ('𑠻', '𑠻'), - ('𑥄', '𑥆'), - ('𑧢', '𑧢'), - ('𑨿', '𑩆'), - ('𑪚', '𑪜'), - ('𑪞', '𑪢'), - ('𑬀', '𑬉'), - ('𑯡', '𑯡'), - ('𑱁', '𑱅'), - ('𑱰', '𑱱'), - ('𑻷', '𑻸'), - ('𑽃', '𑽏'), - ('𑿿', '𑿿'), - ('𒑰', '𒑴'), - ('𒿱', '𒿲'), - ('𖩮', '𖩯'), - ('𖫵', '𖫵'), - ('𖬷', '𖬻'), - ('𖭄', '𖭄'), - ('𖵭', '𖵯'), - ('𖺗', '𖺚'), - ('𖿢', '𖿢'), - ('𛲟', '𛲟'), - ('𝪇', '𝪋'), - ('𞗿', '𞗿'), - ('𞥞', '𞥟'), -]; - -pub const SEPARATOR: &'static [(char, char)] = &[ - (' ', ' '), - ('\u{a0}', '\u{a0}'), - ('\u{1680}', '\u{1680}'), - ('\u{2000}', '\u{200a}'), - ('\u{2028}', '\u{2029}'), - ('\u{202f}', '\u{202f}'), - ('\u{205f}', '\u{205f}'), - ('\u{3000}', '\u{3000}'), -]; - -pub const SPACE_SEPARATOR: &'static [(char, char)] = &[ - (' ', ' '), - ('\u{a0}', '\u{a0}'), - ('\u{1680}', '\u{1680}'), - ('\u{2000}', '\u{200a}'), - ('\u{202f}', '\u{202f}'), - ('\u{205f}', '\u{205f}'), - ('\u{3000}', '\u{3000}'), -]; - -pub const SPACING_MARK: &'static [(char, char)] = &[ - ('ः', 'ः'), - ('ऻ', 'ऻ'), - ('ा', 'ी'), - ('ॉ', 'ौ'), - ('ॎ', 'ॏ'), - ('ং', 'ঃ'), - ('\u{9be}', 'ী'), - ('ে', 'ৈ'), - ('ো', 'ৌ'), - ('\u{9d7}', '\u{9d7}'), - ('ਃ', 'ਃ'), - ('ਾ', 'ੀ'), - ('ઃ', 'ઃ'), - ('ા', 'ી'), - ('ૉ', 'ૉ'), - ('ો', 'ૌ'), - ('ଂ', 'ଃ'), - ('\u{b3e}', '\u{b3e}'), - ('ୀ', 'ୀ'), - ('େ', 'ୈ'), - ('ୋ', 'ୌ'), - ('\u{b57}', '\u{b57}'), - ('\u{bbe}', 'ி'), - ('ு', 'ூ'), - ('ெ', 'ை'), - ('ொ', 'ௌ'), - ('\u{bd7}', '\u{bd7}'), - ('ఁ', 'ః'), - ('ు', 'ౄ'), - ('ಂ', 'ಃ'), - ('ಾ', 'ಾ'), - ('\u{cc0}', 'ೄ'), - ('\u{cc7}', '\u{cc8}'), - ('\u{cca}', '\u{ccb}'), - ('\u{cd5}', '\u{cd6}'), - ('ೳ', 'ೳ'), - ('ം', 'ഃ'), - ('\u{d3e}', 'ീ'), - ('െ', 'ൈ'), - ('ൊ', 'ൌ'), - ('\u{d57}', '\u{d57}'), - ('ං', 'ඃ'), - ('\u{dcf}', 'ෑ'), - ('ෘ', '\u{ddf}'), - ('ෲ', 'ෳ'), - ('༾', '༿'), - ('ཿ', 'ཿ'), - ('ါ', 'ာ'), - ('ေ', 'ေ'), - ('း', 'း'), - ('ျ', 'ြ'), - ('ၖ', 'ၗ'), - ('ၢ', 'ၤ'), - ('ၧ', 'ၭ'), - ('ႃ', 'ႄ'), - ('ႇ', 'ႌ'), - ('ႏ', 'ႏ'), - ('ႚ', 'ႜ'), - ('\u{1715}', '\u{1715}'), - ('\u{1734}', '\u{1734}'), - ('ា', 'ា'), - ('ើ', 'ៅ'), - ('ះ', 'ៈ'), - ('ᤣ', 'ᤦ'), - ('ᤩ', 'ᤫ'), - ('ᤰ', 'ᤱ'), - ('ᤳ', 'ᤸ'), - ('ᨙ', 'ᨚ'), - ('ᩕ', 'ᩕ'), - ('ᩗ', 'ᩗ'), - ('ᩡ', 'ᩡ'), - ('ᩣ', 'ᩤ'), - ('ᩭ', 'ᩲ'), - ('ᬄ', 'ᬄ'), - ('\u{1b35}', '\u{1b35}'), - ('\u{1b3b}', '\u{1b3b}'), - ('\u{1b3d}', 'ᭁ'), - ('\u{1b43}', '\u{1b44}'), - ('ᮂ', 'ᮂ'), - ('ᮡ', 'ᮡ'), - ('ᮦ', 'ᮧ'), - ('\u{1baa}', '\u{1baa}'), - ('ᯧ', 'ᯧ'), - ('ᯪ', 'ᯬ'), - ('ᯮ', 'ᯮ'), - ('\u{1bf2}', '\u{1bf3}'), - ('ᰤ', 'ᰫ'), - ('ᰴ', 'ᰵ'), - ('᳡', '᳡'), - ('᳷', '᳷'), - ('\u{302e}', '\u{302f}'), - ('ꠣ', 'ꠤ'), - ('ꠧ', 'ꠧ'), - ('ꢀ', 'ꢁ'), - ('ꢴ', 'ꣃ'), - ('ꥒ', '\u{a953}'), - ('ꦃ', 'ꦃ'), - ('ꦴ', 'ꦵ'), - ('ꦺ', 'ꦻ'), - ('ꦾ', '\u{a9c0}'), - ('ꨯ', 'ꨰ'), - ('ꨳ', 'ꨴ'), - ('ꩍ', 'ꩍ'), - ('ꩻ', 'ꩻ'), - ('ꩽ', 'ꩽ'), - ('ꫫ', 'ꫫ'), - ('ꫮ', 'ꫯ'), - ('ꫵ', 'ꫵ'), - ('ꯣ', 'ꯤ'), - ('ꯦ', 'ꯧ'), - ('ꯩ', 'ꯪ'), - ('꯬', '꯬'), - ('𑀀', '𑀀'), - ('𑀂', '𑀂'), - ('𑂂', '𑂂'), - ('𑂰', '𑂲'), - ('𑂷', '𑂸'), - ('𑄬', '𑄬'), - ('𑅅', '𑅆'), - ('𑆂', '𑆂'), - ('𑆳', '𑆵'), - ('𑆿', '\u{111c0}'), - ('𑇎', '𑇎'), - ('𑈬', '𑈮'), - ('𑈲', '𑈳'), - ('\u{11235}', '\u{11235}'), - ('𑋠', '𑋢'), - ('𑌂', '𑌃'), - ('\u{1133e}', '𑌿'), - ('𑍁', '𑍄'), - ('𑍇', '𑍈'), - ('𑍋', '\u{1134d}'), - ('\u{11357}', '\u{11357}'), - ('𑍢', '𑍣'), - ('\u{113b8}', '𑎺'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '𑏊'), - ('𑏌', '𑏍'), - ('\u{113cf}', '\u{113cf}'), - ('𑐵', '𑐷'), - ('𑑀', '𑑁'), - ('𑑅', '𑑅'), - ('\u{114b0}', '𑒲'), - ('𑒹', '𑒹'), - ('𑒻', '𑒾'), - ('𑓁', '𑓁'), - ('\u{115af}', '𑖱'), - ('𑖸', '𑖻'), - ('𑖾', '𑖾'), - ('𑘰', '𑘲'), - ('𑘻', '𑘼'), - ('𑘾', '𑘾'), - ('𑚬', '𑚬'), - ('𑚮', '𑚯'), - ('\u{116b6}', '\u{116b6}'), - ('𑜞', '𑜞'), - ('𑜠', '𑜡'), - ('𑜦', '𑜦'), - ('𑠬', '𑠮'), - ('𑠸', '𑠸'), - ('\u{11930}', '𑤵'), - ('𑤷', '𑤸'), - ('\u{1193d}', '\u{1193d}'), - ('𑥀', '𑥀'), - ('𑥂', '𑥂'), - ('𑧑', '𑧓'), - ('𑧜', '𑧟'), - ('𑧤', '𑧤'), - ('𑨹', '𑨹'), - ('𑩗', '𑩘'), - ('𑪗', '𑪗'), - ('𑰯', '𑰯'), - ('𑰾', '𑰾'), - ('𑲩', '𑲩'), - ('𑲱', '𑲱'), - ('𑲴', '𑲴'), - ('𑶊', '𑶎'), - ('𑶓', '𑶔'), - ('𑶖', '𑶖'), - ('𑻵', '𑻶'), - ('𑼃', '𑼃'), - ('𑼴', '𑼵'), - ('𑼾', '𑼿'), - ('\u{11f41}', '\u{11f41}'), - ('𖄪', '𖄬'), - ('𖽑', '𖾇'), - ('\u{16ff0}', '\u{16ff1}'), - ('\u{1d165}', '\u{1d166}'), - ('\u{1d16d}', '\u{1d172}'), -]; - -pub const SYMBOL: &'static [(char, char)] = &[ - ('$', '$'), - ('+', '+'), - ('<', '>'), - ('^', '^'), - ('`', '`'), - ('|', '|'), - ('~', '~'), - ('¢', '¦'), - ('¨', '©'), - ('¬', '¬'), - ('®', '±'), - ('´', '´'), - ('¸', '¸'), - ('×', '×'), - ('÷', '÷'), - ('˂', '˅'), - ('˒', '˟'), - ('˥', '˫'), - ('˭', '˭'), - ('˯', '˿'), - ('͵', '͵'), - ('΄', '΅'), - ('϶', '϶'), - ('҂', '҂'), - ('֍', '֏'), - ('؆', '؈'), - ('؋', '؋'), - ('؎', '؏'), - ('۞', '۞'), - ('۩', '۩'), - ('۽', '۾'), - ('߶', '߶'), - ('߾', '߿'), - ('࢈', '࢈'), - ('৲', '৳'), - ('৺', '৻'), - ('૱', '૱'), - ('୰', '୰'), - ('௳', '௺'), - ('౿', '౿'), - ('൏', '൏'), - ('൹', '൹'), - ('฿', '฿'), - ('༁', '༃'), - ('༓', '༓'), - ('༕', '༗'), - ('༚', '༟'), - ('༴', '༴'), - ('༶', '༶'), - ('༸', '༸'), - ('྾', '࿅'), - ('࿇', '࿌'), - ('࿎', '࿏'), - ('࿕', '࿘'), - ('႞', '႟'), - ('᎐', '᎙'), - ('᙭', '᙭'), - ('៛', '៛'), - ('᥀', '᥀'), - ('᧞', '᧿'), - ('᭡', '᭪'), - ('᭴', '᭼'), - ('᾽', '᾽'), - ('᾿', '῁'), - ('῍', '῏'), - ('῝', '῟'), - ('῭', '`'), - ('´', '῾'), - ('⁄', '⁄'), - ('⁒', '⁒'), - ('⁺', '⁼'), - ('₊', '₌'), - ('₠', '⃀'), - ('℀', '℁'), - ('℃', '℆'), - ('℈', '℉'), - ('℔', '℔'), - ('№', '℘'), - ('℞', '℣'), - ('℥', '℥'), - ('℧', '℧'), - ('℩', '℩'), - ('℮', '℮'), - ('℺', '℻'), - ('⅀', '⅄'), - ('⅊', '⅍'), - ('⅏', '⅏'), - ('↊', '↋'), - ('←', '⌇'), - ('⌌', '⌨'), - ('⌫', '␩'), - ('⑀', '⑊'), - ('⒜', 'ⓩ'), - ('─', '❧'), - ('➔', '⟄'), - ('⟇', '⟥'), - ('⟰', '⦂'), - ('⦙', '⧗'), - ('⧜', '⧻'), - ('⧾', '⭳'), - ('⭶', '⮕'), - ('⮗', '⯿'), - ('⳥', '⳪'), - ('⹐', '⹑'), - ('⺀', '⺙'), - ('⺛', '⻳'), - ('⼀', '⿕'), - ('⿰', '⿿'), - ('〄', '〄'), - ('〒', '〓'), - ('〠', '〠'), - ('〶', '〷'), - ('〾', '〿'), - ('゛', '゜'), - ('㆐', '㆑'), - ('㆖', '㆟'), - ('㇀', '㇥'), - ('㇯', '㇯'), - ('㈀', '㈞'), - ('㈪', '㉇'), - ('㉐', '㉐'), - ('㉠', '㉿'), - ('㊊', '㊰'), - ('㋀', '㏿'), - ('䷀', '䷿'), - ('꒐', '꓆'), - ('꜀', '꜖'), - ('꜠', '꜡'), - ('꞉', '꞊'), - ('꠨', '꠫'), - ('꠶', '꠹'), - ('꩷', '꩹'), - ('꭛', '꭛'), - ('꭪', '꭫'), - ('﬩', '﬩'), - ('﮲', '﯂'), - ('﵀', '﵏'), - ('﷏', '﷏'), - ('﷼', '﷿'), - ('﹢', '﹢'), - ('﹤', '﹦'), - ('﹩', '﹩'), - ('$', '$'), - ('+', '+'), - ('<', '>'), - ('^', '^'), - ('`', '`'), - ('|', '|'), - ('~', '~'), - ('¢', '₩'), - ('│', '○'), - ('', '�'), - ('𐄷', '𐄿'), - ('𐅹', '𐆉'), - ('𐆌', '𐆎'), - ('𐆐', '𐆜'), - ('𐆠', '𐆠'), - ('𐇐', '𐇼'), - ('𐡷', '𐡸'), - ('𐫈', '𐫈'), - ('𐶎', '𐶏'), - ('𑜿', '𑜿'), - ('𑿕', '𑿱'), - ('𖬼', '𖬿'), - ('𖭅', '𖭅'), - ('𛲜', '𛲜'), - ('𜰀', '𜳯'), - ('𜴀', '𜺳'), - ('𜽐', '𜿃'), - ('𝀀', '𝃵'), - ('𝄀', '𝄦'), - ('𝄩', '𝅘𝅥𝅲'), - ('𝅪', '𝅬'), - ('𝆃', '𝆄'), - ('𝆌', '𝆩'), - ('𝆮', '𝇪'), - ('𝈀', '𝉁'), - ('𝉅', '𝉅'), - ('𝌀', '𝍖'), - ('𝛁', '𝛁'), - ('𝛛', '𝛛'), - ('𝛻', '𝛻'), - ('𝜕', '𝜕'), - ('𝜵', '𝜵'), - ('𝝏', '𝝏'), - ('𝝯', '𝝯'), - ('𝞉', '𝞉'), - ('𝞩', '𝞩'), - ('𝟃', '𝟃'), - ('𝠀', '𝧿'), - ('𝨷', '𝨺'), - ('𝩭', '𝩴'), - ('𝩶', '𝪃'), - ('𝪅', '𝪆'), - ('𞅏', '𞅏'), - ('𞋿', '𞋿'), - ('𞲬', '𞲬'), - ('𞲰', '𞲰'), - ('𞴮', '𞴮'), - ('𞻰', '𞻱'), - ('🀀', '🀫'), - ('🀰', '🂓'), - ('🂠', '🂮'), - ('🂱', '🂿'), - ('🃁', '🃏'), - ('🃑', '🃵'), - ('🄍', '🆭'), - ('🇦', '🈂'), - ('🈐', '🈻'), - ('🉀', '🉈'), - ('🉐', '🉑'), - ('🉠', '🉥'), - ('🌀', '🛗'), - ('🛜', '🛬'), - ('🛰', '🛼'), - ('🜀', '🝶'), - ('🝻', '🟙'), - ('🟠', '🟫'), - ('🟰', '🟰'), - ('🠀', '🠋'), - ('🠐', '🡇'), - ('🡐', '🡙'), - ('🡠', '🢇'), - ('🢐', '🢭'), - ('🢰', '🢻'), - ('🣀', '🣁'), - ('🤀', '🩓'), - ('🩠', '🩭'), - ('🩰', '🩼'), - ('🪀', '🪉'), - ('🪏', '🫆'), - ('🫎', '🫜'), - ('🫟', '🫩'), - ('🫰', '🫸'), - ('🬀', '🮒'), - ('🮔', '🯯'), -]; - -pub const TITLECASE_LETTER: &'static [(char, char)] = &[ - ('Dž', 'Dž'), - ('Lj', 'Lj'), - ('Nj', 'Nj'), - ('Dz', 'Dz'), - ('ᾈ', 'ᾏ'), - ('ᾘ', 'ᾟ'), - ('ᾨ', 'ᾯ'), - ('ᾼ', 'ᾼ'), - ('ῌ', 'ῌ'), - ('ῼ', 'ῼ'), -]; - -pub const UNASSIGNED: &'static [(char, char)] = &[ - ('\u{378}', '\u{379}'), - ('\u{380}', '\u{383}'), - ('\u{38b}', '\u{38b}'), - ('\u{38d}', '\u{38d}'), - ('\u{3a2}', '\u{3a2}'), - ('\u{530}', '\u{530}'), - ('\u{557}', '\u{558}'), - ('\u{58b}', '\u{58c}'), - ('\u{590}', '\u{590}'), - ('\u{5c8}', '\u{5cf}'), - ('\u{5eb}', '\u{5ee}'), - ('\u{5f5}', '\u{5ff}'), - ('\u{70e}', '\u{70e}'), - ('\u{74b}', '\u{74c}'), - ('\u{7b2}', '\u{7bf}'), - ('\u{7fb}', '\u{7fc}'), - ('\u{82e}', '\u{82f}'), - ('\u{83f}', '\u{83f}'), - ('\u{85c}', '\u{85d}'), - ('\u{85f}', '\u{85f}'), - ('\u{86b}', '\u{86f}'), - ('\u{88f}', '\u{88f}'), - ('\u{892}', '\u{896}'), - ('\u{984}', '\u{984}'), - ('\u{98d}', '\u{98e}'), - ('\u{991}', '\u{992}'), - ('\u{9a9}', '\u{9a9}'), - ('\u{9b1}', '\u{9b1}'), - ('\u{9b3}', '\u{9b5}'), - ('\u{9ba}', '\u{9bb}'), - ('\u{9c5}', '\u{9c6}'), - ('\u{9c9}', '\u{9ca}'), - ('\u{9cf}', '\u{9d6}'), - ('\u{9d8}', '\u{9db}'), - ('\u{9de}', '\u{9de}'), - ('\u{9e4}', '\u{9e5}'), - ('\u{9ff}', '\u{a00}'), - ('\u{a04}', '\u{a04}'), - ('\u{a0b}', '\u{a0e}'), - ('\u{a11}', '\u{a12}'), - ('\u{a29}', '\u{a29}'), - ('\u{a31}', '\u{a31}'), - ('\u{a34}', '\u{a34}'), - ('\u{a37}', '\u{a37}'), - ('\u{a3a}', '\u{a3b}'), - ('\u{a3d}', '\u{a3d}'), - ('\u{a43}', '\u{a46}'), - ('\u{a49}', '\u{a4a}'), - ('\u{a4e}', '\u{a50}'), - ('\u{a52}', '\u{a58}'), - ('\u{a5d}', '\u{a5d}'), - ('\u{a5f}', '\u{a65}'), - ('\u{a77}', '\u{a80}'), - ('\u{a84}', '\u{a84}'), - ('\u{a8e}', '\u{a8e}'), - ('\u{a92}', '\u{a92}'), - ('\u{aa9}', '\u{aa9}'), - ('\u{ab1}', '\u{ab1}'), - ('\u{ab4}', '\u{ab4}'), - ('\u{aba}', '\u{abb}'), - ('\u{ac6}', '\u{ac6}'), - ('\u{aca}', '\u{aca}'), - ('\u{ace}', '\u{acf}'), - ('\u{ad1}', '\u{adf}'), - ('\u{ae4}', '\u{ae5}'), - ('\u{af2}', '\u{af8}'), - ('\u{b00}', '\u{b00}'), - ('\u{b04}', '\u{b04}'), - ('\u{b0d}', '\u{b0e}'), - ('\u{b11}', '\u{b12}'), - ('\u{b29}', '\u{b29}'), - ('\u{b31}', '\u{b31}'), - ('\u{b34}', '\u{b34}'), - ('\u{b3a}', '\u{b3b}'), - ('\u{b45}', '\u{b46}'), - ('\u{b49}', '\u{b4a}'), - ('\u{b4e}', '\u{b54}'), - ('\u{b58}', '\u{b5b}'), - ('\u{b5e}', '\u{b5e}'), - ('\u{b64}', '\u{b65}'), - ('\u{b78}', '\u{b81}'), - ('\u{b84}', '\u{b84}'), - ('\u{b8b}', '\u{b8d}'), - ('\u{b91}', '\u{b91}'), - ('\u{b96}', '\u{b98}'), - ('\u{b9b}', '\u{b9b}'), - ('\u{b9d}', '\u{b9d}'), - ('\u{ba0}', '\u{ba2}'), - ('\u{ba5}', '\u{ba7}'), - ('\u{bab}', '\u{bad}'), - ('\u{bba}', '\u{bbd}'), - ('\u{bc3}', '\u{bc5}'), - ('\u{bc9}', '\u{bc9}'), - ('\u{bce}', '\u{bcf}'), - ('\u{bd1}', '\u{bd6}'), - ('\u{bd8}', '\u{be5}'), - ('\u{bfb}', '\u{bff}'), - ('\u{c0d}', '\u{c0d}'), - ('\u{c11}', '\u{c11}'), - ('\u{c29}', '\u{c29}'), - ('\u{c3a}', '\u{c3b}'), - ('\u{c45}', '\u{c45}'), - ('\u{c49}', '\u{c49}'), - ('\u{c4e}', '\u{c54}'), - ('\u{c57}', '\u{c57}'), - ('\u{c5b}', '\u{c5c}'), - ('\u{c5e}', '\u{c5f}'), - ('\u{c64}', '\u{c65}'), - ('\u{c70}', '\u{c76}'), - ('\u{c8d}', '\u{c8d}'), - ('\u{c91}', '\u{c91}'), - ('\u{ca9}', '\u{ca9}'), - ('\u{cb4}', '\u{cb4}'), - ('\u{cba}', '\u{cbb}'), - ('\u{cc5}', '\u{cc5}'), - ('\u{cc9}', '\u{cc9}'), - ('\u{cce}', '\u{cd4}'), - ('\u{cd7}', '\u{cdc}'), - ('\u{cdf}', '\u{cdf}'), - ('\u{ce4}', '\u{ce5}'), - ('\u{cf0}', '\u{cf0}'), - ('\u{cf4}', '\u{cff}'), - ('\u{d0d}', '\u{d0d}'), - ('\u{d11}', '\u{d11}'), - ('\u{d45}', '\u{d45}'), - ('\u{d49}', '\u{d49}'), - ('\u{d50}', '\u{d53}'), - ('\u{d64}', '\u{d65}'), - ('\u{d80}', '\u{d80}'), - ('\u{d84}', '\u{d84}'), - ('\u{d97}', '\u{d99}'), - ('\u{db2}', '\u{db2}'), - ('\u{dbc}', '\u{dbc}'), - ('\u{dbe}', '\u{dbf}'), - ('\u{dc7}', '\u{dc9}'), - ('\u{dcb}', '\u{dce}'), - ('\u{dd5}', '\u{dd5}'), - ('\u{dd7}', '\u{dd7}'), - ('\u{de0}', '\u{de5}'), - ('\u{df0}', '\u{df1}'), - ('\u{df5}', '\u{e00}'), - ('\u{e3b}', '\u{e3e}'), - ('\u{e5c}', '\u{e80}'), - ('\u{e83}', '\u{e83}'), - ('\u{e85}', '\u{e85}'), - ('\u{e8b}', '\u{e8b}'), - ('\u{ea4}', '\u{ea4}'), - ('\u{ea6}', '\u{ea6}'), - ('\u{ebe}', '\u{ebf}'), - ('\u{ec5}', '\u{ec5}'), - ('\u{ec7}', '\u{ec7}'), - ('\u{ecf}', '\u{ecf}'), - ('\u{eda}', '\u{edb}'), - ('\u{ee0}', '\u{eff}'), - ('\u{f48}', '\u{f48}'), - ('\u{f6d}', '\u{f70}'), - ('\u{f98}', '\u{f98}'), - ('\u{fbd}', '\u{fbd}'), - ('\u{fcd}', '\u{fcd}'), - ('\u{fdb}', '\u{fff}'), - ('\u{10c6}', '\u{10c6}'), - ('\u{10c8}', '\u{10cc}'), - ('\u{10ce}', '\u{10cf}'), - ('\u{1249}', '\u{1249}'), - ('\u{124e}', '\u{124f}'), - ('\u{1257}', '\u{1257}'), - ('\u{1259}', '\u{1259}'), - ('\u{125e}', '\u{125f}'), - ('\u{1289}', '\u{1289}'), - ('\u{128e}', '\u{128f}'), - ('\u{12b1}', '\u{12b1}'), - ('\u{12b6}', '\u{12b7}'), - ('\u{12bf}', '\u{12bf}'), - ('\u{12c1}', '\u{12c1}'), - ('\u{12c6}', '\u{12c7}'), - ('\u{12d7}', '\u{12d7}'), - ('\u{1311}', '\u{1311}'), - ('\u{1316}', '\u{1317}'), - ('\u{135b}', '\u{135c}'), - ('\u{137d}', '\u{137f}'), - ('\u{139a}', '\u{139f}'), - ('\u{13f6}', '\u{13f7}'), - ('\u{13fe}', '\u{13ff}'), - ('\u{169d}', '\u{169f}'), - ('\u{16f9}', '\u{16ff}'), - ('\u{1716}', '\u{171e}'), - ('\u{1737}', '\u{173f}'), - ('\u{1754}', '\u{175f}'), - ('\u{176d}', '\u{176d}'), - ('\u{1771}', '\u{1771}'), - ('\u{1774}', '\u{177f}'), - ('\u{17de}', '\u{17df}'), - ('\u{17ea}', '\u{17ef}'), - ('\u{17fa}', '\u{17ff}'), - ('\u{181a}', '\u{181f}'), - ('\u{1879}', '\u{187f}'), - ('\u{18ab}', '\u{18af}'), - ('\u{18f6}', '\u{18ff}'), - ('\u{191f}', '\u{191f}'), - ('\u{192c}', '\u{192f}'), - ('\u{193c}', '\u{193f}'), - ('\u{1941}', '\u{1943}'), - ('\u{196e}', '\u{196f}'), - ('\u{1975}', '\u{197f}'), - ('\u{19ac}', '\u{19af}'), - ('\u{19ca}', '\u{19cf}'), - ('\u{19db}', '\u{19dd}'), - ('\u{1a1c}', '\u{1a1d}'), - ('\u{1a5f}', '\u{1a5f}'), - ('\u{1a7d}', '\u{1a7e}'), - ('\u{1a8a}', '\u{1a8f}'), - ('\u{1a9a}', '\u{1a9f}'), - ('\u{1aae}', '\u{1aaf}'), - ('\u{1acf}', '\u{1aff}'), - ('\u{1b4d}', '\u{1b4d}'), - ('\u{1bf4}', '\u{1bfb}'), - ('\u{1c38}', '\u{1c3a}'), - ('\u{1c4a}', '\u{1c4c}'), - ('\u{1c8b}', '\u{1c8f}'), - ('\u{1cbb}', '\u{1cbc}'), - ('\u{1cc8}', '\u{1ccf}'), - ('\u{1cfb}', '\u{1cff}'), - ('\u{1f16}', '\u{1f17}'), - ('\u{1f1e}', '\u{1f1f}'), - ('\u{1f46}', '\u{1f47}'), - ('\u{1f4e}', '\u{1f4f}'), - ('\u{1f58}', '\u{1f58}'), - ('\u{1f5a}', '\u{1f5a}'), - ('\u{1f5c}', '\u{1f5c}'), - ('\u{1f5e}', '\u{1f5e}'), - ('\u{1f7e}', '\u{1f7f}'), - ('\u{1fb5}', '\u{1fb5}'), - ('\u{1fc5}', '\u{1fc5}'), - ('\u{1fd4}', '\u{1fd5}'), - ('\u{1fdc}', '\u{1fdc}'), - ('\u{1ff0}', '\u{1ff1}'), - ('\u{1ff5}', '\u{1ff5}'), - ('\u{1fff}', '\u{1fff}'), - ('\u{2065}', '\u{2065}'), - ('\u{2072}', '\u{2073}'), - ('\u{208f}', '\u{208f}'), - ('\u{209d}', '\u{209f}'), - ('\u{20c1}', '\u{20cf}'), - ('\u{20f1}', '\u{20ff}'), - ('\u{218c}', '\u{218f}'), - ('\u{242a}', '\u{243f}'), - ('\u{244b}', '\u{245f}'), - ('\u{2b74}', '\u{2b75}'), - ('\u{2b96}', '\u{2b96}'), - ('\u{2cf4}', '\u{2cf8}'), - ('\u{2d26}', '\u{2d26}'), - ('\u{2d28}', '\u{2d2c}'), - ('\u{2d2e}', '\u{2d2f}'), - ('\u{2d68}', '\u{2d6e}'), - ('\u{2d71}', '\u{2d7e}'), - ('\u{2d97}', '\u{2d9f}'), - ('\u{2da7}', '\u{2da7}'), - ('\u{2daf}', '\u{2daf}'), - ('\u{2db7}', '\u{2db7}'), - ('\u{2dbf}', '\u{2dbf}'), - ('\u{2dc7}', '\u{2dc7}'), - ('\u{2dcf}', '\u{2dcf}'), - ('\u{2dd7}', '\u{2dd7}'), - ('\u{2ddf}', '\u{2ddf}'), - ('\u{2e5e}', '\u{2e7f}'), - ('\u{2e9a}', '\u{2e9a}'), - ('\u{2ef4}', '\u{2eff}'), - ('\u{2fd6}', '\u{2fef}'), - ('\u{3040}', '\u{3040}'), - ('\u{3097}', '\u{3098}'), - ('\u{3100}', '\u{3104}'), - ('\u{3130}', '\u{3130}'), - ('\u{318f}', '\u{318f}'), - ('\u{31e6}', '\u{31ee}'), - ('\u{321f}', '\u{321f}'), - ('\u{a48d}', '\u{a48f}'), - ('\u{a4c7}', '\u{a4cf}'), - ('\u{a62c}', '\u{a63f}'), - ('\u{a6f8}', '\u{a6ff}'), - ('\u{a7ce}', '\u{a7cf}'), - ('\u{a7d2}', '\u{a7d2}'), - ('\u{a7d4}', '\u{a7d4}'), - ('\u{a7dd}', '\u{a7f1}'), - ('\u{a82d}', '\u{a82f}'), - ('\u{a83a}', '\u{a83f}'), - ('\u{a878}', '\u{a87f}'), - ('\u{a8c6}', '\u{a8cd}'), - ('\u{a8da}', '\u{a8df}'), - ('\u{a954}', '\u{a95e}'), - ('\u{a97d}', '\u{a97f}'), - ('\u{a9ce}', '\u{a9ce}'), - ('\u{a9da}', '\u{a9dd}'), - ('\u{a9ff}', '\u{a9ff}'), - ('\u{aa37}', '\u{aa3f}'), - ('\u{aa4e}', '\u{aa4f}'), - ('\u{aa5a}', '\u{aa5b}'), - ('\u{aac3}', '\u{aada}'), - ('\u{aaf7}', '\u{ab00}'), - ('\u{ab07}', '\u{ab08}'), - ('\u{ab0f}', '\u{ab10}'), - ('\u{ab17}', '\u{ab1f}'), - ('\u{ab27}', '\u{ab27}'), - ('\u{ab2f}', '\u{ab2f}'), - ('\u{ab6c}', '\u{ab6f}'), - ('\u{abee}', '\u{abef}'), - ('\u{abfa}', '\u{abff}'), - ('\u{d7a4}', '\u{d7af}'), - ('\u{d7c7}', '\u{d7ca}'), - ('\u{d7fc}', '\u{d7ff}'), - ('\u{fa6e}', '\u{fa6f}'), - ('\u{fada}', '\u{faff}'), - ('\u{fb07}', '\u{fb12}'), - ('\u{fb18}', '\u{fb1c}'), - ('\u{fb37}', '\u{fb37}'), - ('\u{fb3d}', '\u{fb3d}'), - ('\u{fb3f}', '\u{fb3f}'), - ('\u{fb42}', '\u{fb42}'), - ('\u{fb45}', '\u{fb45}'), - ('\u{fbc3}', '\u{fbd2}'), - ('\u{fd90}', '\u{fd91}'), - ('\u{fdc8}', '\u{fdce}'), - ('\u{fdd0}', '\u{fdef}'), - ('\u{fe1a}', '\u{fe1f}'), - ('\u{fe53}', '\u{fe53}'), - ('\u{fe67}', '\u{fe67}'), - ('\u{fe6c}', '\u{fe6f}'), - ('\u{fe75}', '\u{fe75}'), - ('\u{fefd}', '\u{fefe}'), - ('\u{ff00}', '\u{ff00}'), - ('\u{ffbf}', '\u{ffc1}'), - ('\u{ffc8}', '\u{ffc9}'), - ('\u{ffd0}', '\u{ffd1}'), - ('\u{ffd8}', '\u{ffd9}'), - ('\u{ffdd}', '\u{ffdf}'), - ('\u{ffe7}', '\u{ffe7}'), - ('\u{ffef}', '\u{fff8}'), - ('\u{fffe}', '\u{ffff}'), - ('\u{1000c}', '\u{1000c}'), - ('\u{10027}', '\u{10027}'), - ('\u{1003b}', '\u{1003b}'), - ('\u{1003e}', '\u{1003e}'), - ('\u{1004e}', '\u{1004f}'), - ('\u{1005e}', '\u{1007f}'), - ('\u{100fb}', '\u{100ff}'), - ('\u{10103}', '\u{10106}'), - ('\u{10134}', '\u{10136}'), - ('\u{1018f}', '\u{1018f}'), - ('\u{1019d}', '\u{1019f}'), - ('\u{101a1}', '\u{101cf}'), - ('\u{101fe}', '\u{1027f}'), - ('\u{1029d}', '\u{1029f}'), - ('\u{102d1}', '\u{102df}'), - ('\u{102fc}', '\u{102ff}'), - ('\u{10324}', '\u{1032c}'), - ('\u{1034b}', '\u{1034f}'), - ('\u{1037b}', '\u{1037f}'), - ('\u{1039e}', '\u{1039e}'), - ('\u{103c4}', '\u{103c7}'), - ('\u{103d6}', '\u{103ff}'), - ('\u{1049e}', '\u{1049f}'), - ('\u{104aa}', '\u{104af}'), - ('\u{104d4}', '\u{104d7}'), - ('\u{104fc}', '\u{104ff}'), - ('\u{10528}', '\u{1052f}'), - ('\u{10564}', '\u{1056e}'), - ('\u{1057b}', '\u{1057b}'), - ('\u{1058b}', '\u{1058b}'), - ('\u{10593}', '\u{10593}'), - ('\u{10596}', '\u{10596}'), - ('\u{105a2}', '\u{105a2}'), - ('\u{105b2}', '\u{105b2}'), - ('\u{105ba}', '\u{105ba}'), - ('\u{105bd}', '\u{105bf}'), - ('\u{105f4}', '\u{105ff}'), - ('\u{10737}', '\u{1073f}'), - ('\u{10756}', '\u{1075f}'), - ('\u{10768}', '\u{1077f}'), - ('\u{10786}', '\u{10786}'), - ('\u{107b1}', '\u{107b1}'), - ('\u{107bb}', '\u{107ff}'), - ('\u{10806}', '\u{10807}'), - ('\u{10809}', '\u{10809}'), - ('\u{10836}', '\u{10836}'), - ('\u{10839}', '\u{1083b}'), - ('\u{1083d}', '\u{1083e}'), - ('\u{10856}', '\u{10856}'), - ('\u{1089f}', '\u{108a6}'), - ('\u{108b0}', '\u{108df}'), - ('\u{108f3}', '\u{108f3}'), - ('\u{108f6}', '\u{108fa}'), - ('\u{1091c}', '\u{1091e}'), - ('\u{1093a}', '\u{1093e}'), - ('\u{10940}', '\u{1097f}'), - ('\u{109b8}', '\u{109bb}'), - ('\u{109d0}', '\u{109d1}'), - ('\u{10a04}', '\u{10a04}'), - ('\u{10a07}', '\u{10a0b}'), - ('\u{10a14}', '\u{10a14}'), - ('\u{10a18}', '\u{10a18}'), - ('\u{10a36}', '\u{10a37}'), - ('\u{10a3b}', '\u{10a3e}'), - ('\u{10a49}', '\u{10a4f}'), - ('\u{10a59}', '\u{10a5f}'), - ('\u{10aa0}', '\u{10abf}'), - ('\u{10ae7}', '\u{10aea}'), - ('\u{10af7}', '\u{10aff}'), - ('\u{10b36}', '\u{10b38}'), - ('\u{10b56}', '\u{10b57}'), - ('\u{10b73}', '\u{10b77}'), - ('\u{10b92}', '\u{10b98}'), - ('\u{10b9d}', '\u{10ba8}'), - ('\u{10bb0}', '\u{10bff}'), - ('\u{10c49}', '\u{10c7f}'), - ('\u{10cb3}', '\u{10cbf}'), - ('\u{10cf3}', '\u{10cf9}'), - ('\u{10d28}', '\u{10d2f}'), - ('\u{10d3a}', '\u{10d3f}'), - ('\u{10d66}', '\u{10d68}'), - ('\u{10d86}', '\u{10d8d}'), - ('\u{10d90}', '\u{10e5f}'), - ('\u{10e7f}', '\u{10e7f}'), - ('\u{10eaa}', '\u{10eaa}'), - ('\u{10eae}', '\u{10eaf}'), - ('\u{10eb2}', '\u{10ec1}'), - ('\u{10ec5}', '\u{10efb}'), - ('\u{10f28}', '\u{10f2f}'), - ('\u{10f5a}', '\u{10f6f}'), - ('\u{10f8a}', '\u{10faf}'), - ('\u{10fcc}', '\u{10fdf}'), - ('\u{10ff7}', '\u{10fff}'), - ('\u{1104e}', '\u{11051}'), - ('\u{11076}', '\u{1107e}'), - ('\u{110c3}', '\u{110cc}'), - ('\u{110ce}', '\u{110cf}'), - ('\u{110e9}', '\u{110ef}'), - ('\u{110fa}', '\u{110ff}'), - ('\u{11135}', '\u{11135}'), - ('\u{11148}', '\u{1114f}'), - ('\u{11177}', '\u{1117f}'), - ('\u{111e0}', '\u{111e0}'), - ('\u{111f5}', '\u{111ff}'), - ('\u{11212}', '\u{11212}'), - ('\u{11242}', '\u{1127f}'), - ('\u{11287}', '\u{11287}'), - ('\u{11289}', '\u{11289}'), - ('\u{1128e}', '\u{1128e}'), - ('\u{1129e}', '\u{1129e}'), - ('\u{112aa}', '\u{112af}'), - ('\u{112eb}', '\u{112ef}'), - ('\u{112fa}', '\u{112ff}'), - ('\u{11304}', '\u{11304}'), - ('\u{1130d}', '\u{1130e}'), - ('\u{11311}', '\u{11312}'), - ('\u{11329}', '\u{11329}'), - ('\u{11331}', '\u{11331}'), - ('\u{11334}', '\u{11334}'), - ('\u{1133a}', '\u{1133a}'), - ('\u{11345}', '\u{11346}'), - ('\u{11349}', '\u{1134a}'), - ('\u{1134e}', '\u{1134f}'), - ('\u{11351}', '\u{11356}'), - ('\u{11358}', '\u{1135c}'), - ('\u{11364}', '\u{11365}'), - ('\u{1136d}', '\u{1136f}'), - ('\u{11375}', '\u{1137f}'), - ('\u{1138a}', '\u{1138a}'), - ('\u{1138c}', '\u{1138d}'), - ('\u{1138f}', '\u{1138f}'), - ('\u{113b6}', '\u{113b6}'), - ('\u{113c1}', '\u{113c1}'), - ('\u{113c3}', '\u{113c4}'), - ('\u{113c6}', '\u{113c6}'), - ('\u{113cb}', '\u{113cb}'), - ('\u{113d6}', '\u{113d6}'), - ('\u{113d9}', '\u{113e0}'), - ('\u{113e3}', '\u{113ff}'), - ('\u{1145c}', '\u{1145c}'), - ('\u{11462}', '\u{1147f}'), - ('\u{114c8}', '\u{114cf}'), - ('\u{114da}', '\u{1157f}'), - ('\u{115b6}', '\u{115b7}'), - ('\u{115de}', '\u{115ff}'), - ('\u{11645}', '\u{1164f}'), - ('\u{1165a}', '\u{1165f}'), - ('\u{1166d}', '\u{1167f}'), - ('\u{116ba}', '\u{116bf}'), - ('\u{116ca}', '\u{116cf}'), - ('\u{116e4}', '\u{116ff}'), - ('\u{1171b}', '\u{1171c}'), - ('\u{1172c}', '\u{1172f}'), - ('\u{11747}', '\u{117ff}'), - ('\u{1183c}', '\u{1189f}'), - ('\u{118f3}', '\u{118fe}'), - ('\u{11907}', '\u{11908}'), - ('\u{1190a}', '\u{1190b}'), - ('\u{11914}', '\u{11914}'), - ('\u{11917}', '\u{11917}'), - ('\u{11936}', '\u{11936}'), - ('\u{11939}', '\u{1193a}'), - ('\u{11947}', '\u{1194f}'), - ('\u{1195a}', '\u{1199f}'), - ('\u{119a8}', '\u{119a9}'), - ('\u{119d8}', '\u{119d9}'), - ('\u{119e5}', '\u{119ff}'), - ('\u{11a48}', '\u{11a4f}'), - ('\u{11aa3}', '\u{11aaf}'), - ('\u{11af9}', '\u{11aff}'), - ('\u{11b0a}', '\u{11bbf}'), - ('\u{11be2}', '\u{11bef}'), - ('\u{11bfa}', '\u{11bff}'), - ('\u{11c09}', '\u{11c09}'), - ('\u{11c37}', '\u{11c37}'), - ('\u{11c46}', '\u{11c4f}'), - ('\u{11c6d}', '\u{11c6f}'), - ('\u{11c90}', '\u{11c91}'), - ('\u{11ca8}', '\u{11ca8}'), - ('\u{11cb7}', '\u{11cff}'), - ('\u{11d07}', '\u{11d07}'), - ('\u{11d0a}', '\u{11d0a}'), - ('\u{11d37}', '\u{11d39}'), - ('\u{11d3b}', '\u{11d3b}'), - ('\u{11d3e}', '\u{11d3e}'), - ('\u{11d48}', '\u{11d4f}'), - ('\u{11d5a}', '\u{11d5f}'), - ('\u{11d66}', '\u{11d66}'), - ('\u{11d69}', '\u{11d69}'), - ('\u{11d8f}', '\u{11d8f}'), - ('\u{11d92}', '\u{11d92}'), - ('\u{11d99}', '\u{11d9f}'), - ('\u{11daa}', '\u{11edf}'), - ('\u{11ef9}', '\u{11eff}'), - ('\u{11f11}', '\u{11f11}'), - ('\u{11f3b}', '\u{11f3d}'), - ('\u{11f5b}', '\u{11faf}'), - ('\u{11fb1}', '\u{11fbf}'), - ('\u{11ff2}', '\u{11ffe}'), - ('\u{1239a}', '\u{123ff}'), - ('\u{1246f}', '\u{1246f}'), - ('\u{12475}', '\u{1247f}'), - ('\u{12544}', '\u{12f8f}'), - ('\u{12ff3}', '\u{12fff}'), - ('\u{13456}', '\u{1345f}'), - ('\u{143fb}', '\u{143ff}'), - ('\u{14647}', '\u{160ff}'), - ('\u{1613a}', '\u{167ff}'), - ('\u{16a39}', '\u{16a3f}'), - ('\u{16a5f}', '\u{16a5f}'), - ('\u{16a6a}', '\u{16a6d}'), - ('\u{16abf}', '\u{16abf}'), - ('\u{16aca}', '\u{16acf}'), - ('\u{16aee}', '\u{16aef}'), - ('\u{16af6}', '\u{16aff}'), - ('\u{16b46}', '\u{16b4f}'), - ('\u{16b5a}', '\u{16b5a}'), - ('\u{16b62}', '\u{16b62}'), - ('\u{16b78}', '\u{16b7c}'), - ('\u{16b90}', '\u{16d3f}'), - ('\u{16d7a}', '\u{16e3f}'), - ('\u{16e9b}', '\u{16eff}'), - ('\u{16f4b}', '\u{16f4e}'), - ('\u{16f88}', '\u{16f8e}'), - ('\u{16fa0}', '\u{16fdf}'), - ('\u{16fe5}', '\u{16fef}'), - ('\u{16ff2}', '\u{16fff}'), - ('\u{187f8}', '\u{187ff}'), - ('\u{18cd6}', '\u{18cfe}'), - ('\u{18d09}', '\u{1afef}'), - ('\u{1aff4}', '\u{1aff4}'), - ('\u{1affc}', '\u{1affc}'), - ('\u{1afff}', '\u{1afff}'), - ('\u{1b123}', '\u{1b131}'), - ('\u{1b133}', '\u{1b14f}'), - ('\u{1b153}', '\u{1b154}'), - ('\u{1b156}', '\u{1b163}'), - ('\u{1b168}', '\u{1b16f}'), - ('\u{1b2fc}', '\u{1bbff}'), - ('\u{1bc6b}', '\u{1bc6f}'), - ('\u{1bc7d}', '\u{1bc7f}'), - ('\u{1bc89}', '\u{1bc8f}'), - ('\u{1bc9a}', '\u{1bc9b}'), - ('\u{1bca4}', '\u{1cbff}'), - ('\u{1ccfa}', '\u{1ccff}'), - ('\u{1ceb4}', '\u{1ceff}'), - ('\u{1cf2e}', '\u{1cf2f}'), - ('\u{1cf47}', '\u{1cf4f}'), - ('\u{1cfc4}', '\u{1cfff}'), - ('\u{1d0f6}', '\u{1d0ff}'), - ('\u{1d127}', '\u{1d128}'), - ('\u{1d1eb}', '\u{1d1ff}'), - ('\u{1d246}', '\u{1d2bf}'), - ('\u{1d2d4}', '\u{1d2df}'), - ('\u{1d2f4}', '\u{1d2ff}'), - ('\u{1d357}', '\u{1d35f}'), - ('\u{1d379}', '\u{1d3ff}'), - ('\u{1d455}', '\u{1d455}'), - ('\u{1d49d}', '\u{1d49d}'), - ('\u{1d4a0}', '\u{1d4a1}'), - ('\u{1d4a3}', '\u{1d4a4}'), - ('\u{1d4a7}', '\u{1d4a8}'), - ('\u{1d4ad}', '\u{1d4ad}'), - ('\u{1d4ba}', '\u{1d4ba}'), - ('\u{1d4bc}', '\u{1d4bc}'), - ('\u{1d4c4}', '\u{1d4c4}'), - ('\u{1d506}', '\u{1d506}'), - ('\u{1d50b}', '\u{1d50c}'), - ('\u{1d515}', '\u{1d515}'), - ('\u{1d51d}', '\u{1d51d}'), - ('\u{1d53a}', '\u{1d53a}'), - ('\u{1d53f}', '\u{1d53f}'), - ('\u{1d545}', '\u{1d545}'), - ('\u{1d547}', '\u{1d549}'), - ('\u{1d551}', '\u{1d551}'), - ('\u{1d6a6}', '\u{1d6a7}'), - ('\u{1d7cc}', '\u{1d7cd}'), - ('\u{1da8c}', '\u{1da9a}'), - ('\u{1daa0}', '\u{1daa0}'), - ('\u{1dab0}', '\u{1deff}'), - ('\u{1df1f}', '\u{1df24}'), - ('\u{1df2b}', '\u{1dfff}'), - ('\u{1e007}', '\u{1e007}'), - ('\u{1e019}', '\u{1e01a}'), - ('\u{1e022}', '\u{1e022}'), - ('\u{1e025}', '\u{1e025}'), - ('\u{1e02b}', '\u{1e02f}'), - ('\u{1e06e}', '\u{1e08e}'), - ('\u{1e090}', '\u{1e0ff}'), - ('\u{1e12d}', '\u{1e12f}'), - ('\u{1e13e}', '\u{1e13f}'), - ('\u{1e14a}', '\u{1e14d}'), - ('\u{1e150}', '\u{1e28f}'), - ('\u{1e2af}', '\u{1e2bf}'), - ('\u{1e2fa}', '\u{1e2fe}'), - ('\u{1e300}', '\u{1e4cf}'), - ('\u{1e4fa}', '\u{1e5cf}'), - ('\u{1e5fb}', '\u{1e5fe}'), - ('\u{1e600}', '\u{1e7df}'), - ('\u{1e7e7}', '\u{1e7e7}'), - ('\u{1e7ec}', '\u{1e7ec}'), - ('\u{1e7ef}', '\u{1e7ef}'), - ('\u{1e7ff}', '\u{1e7ff}'), - ('\u{1e8c5}', '\u{1e8c6}'), - ('\u{1e8d7}', '\u{1e8ff}'), - ('\u{1e94c}', '\u{1e94f}'), - ('\u{1e95a}', '\u{1e95d}'), - ('\u{1e960}', '\u{1ec70}'), - ('\u{1ecb5}', '\u{1ed00}'), - ('\u{1ed3e}', '\u{1edff}'), - ('\u{1ee04}', '\u{1ee04}'), - ('\u{1ee20}', '\u{1ee20}'), - ('\u{1ee23}', '\u{1ee23}'), - ('\u{1ee25}', '\u{1ee26}'), - ('\u{1ee28}', '\u{1ee28}'), - ('\u{1ee33}', '\u{1ee33}'), - ('\u{1ee38}', '\u{1ee38}'), - ('\u{1ee3a}', '\u{1ee3a}'), - ('\u{1ee3c}', '\u{1ee41}'), - ('\u{1ee43}', '\u{1ee46}'), - ('\u{1ee48}', '\u{1ee48}'), - ('\u{1ee4a}', '\u{1ee4a}'), - ('\u{1ee4c}', '\u{1ee4c}'), - ('\u{1ee50}', '\u{1ee50}'), - ('\u{1ee53}', '\u{1ee53}'), - ('\u{1ee55}', '\u{1ee56}'), - ('\u{1ee58}', '\u{1ee58}'), - ('\u{1ee5a}', '\u{1ee5a}'), - ('\u{1ee5c}', '\u{1ee5c}'), - ('\u{1ee5e}', '\u{1ee5e}'), - ('\u{1ee60}', '\u{1ee60}'), - ('\u{1ee63}', '\u{1ee63}'), - ('\u{1ee65}', '\u{1ee66}'), - ('\u{1ee6b}', '\u{1ee6b}'), - ('\u{1ee73}', '\u{1ee73}'), - ('\u{1ee78}', '\u{1ee78}'), - ('\u{1ee7d}', '\u{1ee7d}'), - ('\u{1ee7f}', '\u{1ee7f}'), - ('\u{1ee8a}', '\u{1ee8a}'), - ('\u{1ee9c}', '\u{1eea0}'), - ('\u{1eea4}', '\u{1eea4}'), - ('\u{1eeaa}', '\u{1eeaa}'), - ('\u{1eebc}', '\u{1eeef}'), - ('\u{1eef2}', '\u{1efff}'), - ('\u{1f02c}', '\u{1f02f}'), - ('\u{1f094}', '\u{1f09f}'), - ('\u{1f0af}', '\u{1f0b0}'), - ('\u{1f0c0}', '\u{1f0c0}'), - ('\u{1f0d0}', '\u{1f0d0}'), - ('\u{1f0f6}', '\u{1f0ff}'), - ('\u{1f1ae}', '\u{1f1e5}'), - ('\u{1f203}', '\u{1f20f}'), - ('\u{1f23c}', '\u{1f23f}'), - ('\u{1f249}', '\u{1f24f}'), - ('\u{1f252}', '\u{1f25f}'), - ('\u{1f266}', '\u{1f2ff}'), - ('\u{1f6d8}', '\u{1f6db}'), - ('\u{1f6ed}', '\u{1f6ef}'), - ('\u{1f6fd}', '\u{1f6ff}'), - ('\u{1f777}', '\u{1f77a}'), - ('\u{1f7da}', '\u{1f7df}'), - ('\u{1f7ec}', '\u{1f7ef}'), - ('\u{1f7f1}', '\u{1f7ff}'), - ('\u{1f80c}', '\u{1f80f}'), - ('\u{1f848}', '\u{1f84f}'), - ('\u{1f85a}', '\u{1f85f}'), - ('\u{1f888}', '\u{1f88f}'), - ('\u{1f8ae}', '\u{1f8af}'), - ('\u{1f8bc}', '\u{1f8bf}'), - ('\u{1f8c2}', '\u{1f8ff}'), - ('\u{1fa54}', '\u{1fa5f}'), - ('\u{1fa6e}', '\u{1fa6f}'), - ('\u{1fa7d}', '\u{1fa7f}'), - ('\u{1fa8a}', '\u{1fa8e}'), - ('\u{1fac7}', '\u{1facd}'), - ('\u{1fadd}', '\u{1fade}'), - ('\u{1faea}', '\u{1faef}'), - ('\u{1faf9}', '\u{1faff}'), - ('\u{1fb93}', '\u{1fb93}'), - ('\u{1fbfa}', '\u{1ffff}'), - ('\u{2a6e0}', '\u{2a6ff}'), - ('\u{2b73a}', '\u{2b73f}'), - ('\u{2b81e}', '\u{2b81f}'), - ('\u{2cea2}', '\u{2ceaf}'), - ('\u{2ebe1}', '\u{2ebef}'), - ('\u{2ee5e}', '\u{2f7ff}'), - ('\u{2fa1e}', '\u{2ffff}'), - ('\u{3134b}', '\u{3134f}'), - ('\u{323b0}', '\u{e0000}'), - ('\u{e0002}', '\u{e001f}'), - ('\u{e0080}', '\u{e00ff}'), - ('\u{e01f0}', '\u{effff}'), - ('\u{ffffe}', '\u{fffff}'), - ('\u{10fffe}', '\u{10ffff}'), -]; - -pub const UPPERCASE_LETTER: &'static [(char, char)] = &[ - ('A', 'Z'), - ('À', 'Ö'), - ('Ø', 'Þ'), - ('Ā', 'Ā'), - ('Ă', 'Ă'), - ('Ą', 'Ą'), - ('Ć', 'Ć'), - ('Ĉ', 'Ĉ'), - ('Ċ', 'Ċ'), - ('Č', 'Č'), - ('Ď', 'Ď'), - ('Đ', 'Đ'), - ('Ē', 'Ē'), - ('Ĕ', 'Ĕ'), - ('Ė', 'Ė'), - ('Ę', 'Ę'), - ('Ě', 'Ě'), - ('Ĝ', 'Ĝ'), - ('Ğ', 'Ğ'), - ('Ġ', 'Ġ'), - ('Ģ', 'Ģ'), - ('Ĥ', 'Ĥ'), - ('Ħ', 'Ħ'), - ('Ĩ', 'Ĩ'), - ('Ī', 'Ī'), - ('Ĭ', 'Ĭ'), - ('Į', 'Į'), - ('İ', 'İ'), - ('IJ', 'IJ'), - ('Ĵ', 'Ĵ'), - ('Ķ', 'Ķ'), - ('Ĺ', 'Ĺ'), - ('Ļ', 'Ļ'), - ('Ľ', 'Ľ'), - ('Ŀ', 'Ŀ'), - ('Ł', 'Ł'), - ('Ń', 'Ń'), - ('Ņ', 'Ņ'), - ('Ň', 'Ň'), - ('Ŋ', 'Ŋ'), - ('Ō', 'Ō'), - ('Ŏ', 'Ŏ'), - ('Ő', 'Ő'), - ('Œ', 'Œ'), - ('Ŕ', 'Ŕ'), - ('Ŗ', 'Ŗ'), - ('Ř', 'Ř'), - ('Ś', 'Ś'), - ('Ŝ', 'Ŝ'), - ('Ş', 'Ş'), - ('Š', 'Š'), - ('Ţ', 'Ţ'), - ('Ť', 'Ť'), - ('Ŧ', 'Ŧ'), - ('Ũ', 'Ũ'), - ('Ū', 'Ū'), - ('Ŭ', 'Ŭ'), - ('Ů', 'Ů'), - ('Ű', 'Ű'), - ('Ų', 'Ų'), - ('Ŵ', 'Ŵ'), - ('Ŷ', 'Ŷ'), - ('Ÿ', 'Ź'), - ('Ż', 'Ż'), - ('Ž', 'Ž'), - ('Ɓ', 'Ƃ'), - ('Ƅ', 'Ƅ'), - ('Ɔ', 'Ƈ'), - ('Ɖ', 'Ƌ'), - ('Ǝ', 'Ƒ'), - ('Ɠ', 'Ɣ'), - ('Ɩ', 'Ƙ'), - ('Ɯ', 'Ɲ'), - ('Ɵ', 'Ơ'), - ('Ƣ', 'Ƣ'), - ('Ƥ', 'Ƥ'), - ('Ʀ', 'Ƨ'), - ('Ʃ', 'Ʃ'), - ('Ƭ', 'Ƭ'), - ('Ʈ', 'Ư'), - ('Ʊ', 'Ƴ'), - ('Ƶ', 'Ƶ'), - ('Ʒ', 'Ƹ'), - ('Ƽ', 'Ƽ'), - ('DŽ', 'DŽ'), - ('LJ', 'LJ'), - ('NJ', 'NJ'), - ('Ǎ', 'Ǎ'), - ('Ǐ', 'Ǐ'), - ('Ǒ', 'Ǒ'), - ('Ǔ', 'Ǔ'), - ('Ǖ', 'Ǖ'), - ('Ǘ', 'Ǘ'), - ('Ǚ', 'Ǚ'), - ('Ǜ', 'Ǜ'), - ('Ǟ', 'Ǟ'), - ('Ǡ', 'Ǡ'), - ('Ǣ', 'Ǣ'), - ('Ǥ', 'Ǥ'), - ('Ǧ', 'Ǧ'), - ('Ǩ', 'Ǩ'), - ('Ǫ', 'Ǫ'), - ('Ǭ', 'Ǭ'), - ('Ǯ', 'Ǯ'), - ('DZ', 'DZ'), - ('Ǵ', 'Ǵ'), - ('Ƕ', 'Ǹ'), - ('Ǻ', 'Ǻ'), - ('Ǽ', 'Ǽ'), - ('Ǿ', 'Ǿ'), - ('Ȁ', 'Ȁ'), - ('Ȃ', 'Ȃ'), - ('Ȅ', 'Ȅ'), - ('Ȇ', 'Ȇ'), - ('Ȉ', 'Ȉ'), - ('Ȋ', 'Ȋ'), - ('Ȍ', 'Ȍ'), - ('Ȏ', 'Ȏ'), - ('Ȑ', 'Ȑ'), - ('Ȓ', 'Ȓ'), - ('Ȕ', 'Ȕ'), - ('Ȗ', 'Ȗ'), - ('Ș', 'Ș'), - ('Ț', 'Ț'), - ('Ȝ', 'Ȝ'), - ('Ȟ', 'Ȟ'), - ('Ƞ', 'Ƞ'), - ('Ȣ', 'Ȣ'), - ('Ȥ', 'Ȥ'), - ('Ȧ', 'Ȧ'), - ('Ȩ', 'Ȩ'), - ('Ȫ', 'Ȫ'), - ('Ȭ', 'Ȭ'), - ('Ȯ', 'Ȯ'), - ('Ȱ', 'Ȱ'), - ('Ȳ', 'Ȳ'), - ('Ⱥ', 'Ȼ'), - ('Ƚ', 'Ⱦ'), - ('Ɂ', 'Ɂ'), - ('Ƀ', 'Ɇ'), - ('Ɉ', 'Ɉ'), - ('Ɋ', 'Ɋ'), - ('Ɍ', 'Ɍ'), - ('Ɏ', 'Ɏ'), - ('Ͱ', 'Ͱ'), - ('Ͳ', 'Ͳ'), - ('Ͷ', 'Ͷ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ώ'), - ('Α', 'Ρ'), - ('Σ', 'Ϋ'), - ('Ϗ', 'Ϗ'), - ('ϒ', 'ϔ'), - ('Ϙ', 'Ϙ'), - ('Ϛ', 'Ϛ'), - ('Ϝ', 'Ϝ'), - ('Ϟ', 'Ϟ'), - ('Ϡ', 'Ϡ'), - ('Ϣ', 'Ϣ'), - ('Ϥ', 'Ϥ'), - ('Ϧ', 'Ϧ'), - ('Ϩ', 'Ϩ'), - ('Ϫ', 'Ϫ'), - ('Ϭ', 'Ϭ'), - ('Ϯ', 'Ϯ'), - ('ϴ', 'ϴ'), - ('Ϸ', 'Ϸ'), - ('Ϲ', 'Ϻ'), - ('Ͻ', 'Я'), - ('Ѡ', 'Ѡ'), - ('Ѣ', 'Ѣ'), - ('Ѥ', 'Ѥ'), - ('Ѧ', 'Ѧ'), - ('Ѩ', 'Ѩ'), - ('Ѫ', 'Ѫ'), - ('Ѭ', 'Ѭ'), - ('Ѯ', 'Ѯ'), - ('Ѱ', 'Ѱ'), - ('Ѳ', 'Ѳ'), - ('Ѵ', 'Ѵ'), - ('Ѷ', 'Ѷ'), - ('Ѹ', 'Ѹ'), - ('Ѻ', 'Ѻ'), - ('Ѽ', 'Ѽ'), - ('Ѿ', 'Ѿ'), - ('Ҁ', 'Ҁ'), - ('Ҋ', 'Ҋ'), - ('Ҍ', 'Ҍ'), - ('Ҏ', 'Ҏ'), - ('Ґ', 'Ґ'), - ('Ғ', 'Ғ'), - ('Ҕ', 'Ҕ'), - ('Җ', 'Җ'), - ('Ҙ', 'Ҙ'), - ('Қ', 'Қ'), - ('Ҝ', 'Ҝ'), - ('Ҟ', 'Ҟ'), - ('Ҡ', 'Ҡ'), - ('Ң', 'Ң'), - ('Ҥ', 'Ҥ'), - ('Ҧ', 'Ҧ'), - ('Ҩ', 'Ҩ'), - ('Ҫ', 'Ҫ'), - ('Ҭ', 'Ҭ'), - ('Ү', 'Ү'), - ('Ұ', 'Ұ'), - ('Ҳ', 'Ҳ'), - ('Ҵ', 'Ҵ'), - ('Ҷ', 'Ҷ'), - ('Ҹ', 'Ҹ'), - ('Һ', 'Һ'), - ('Ҽ', 'Ҽ'), - ('Ҿ', 'Ҿ'), - ('Ӏ', 'Ӂ'), - ('Ӄ', 'Ӄ'), - ('Ӆ', 'Ӆ'), - ('Ӈ', 'Ӈ'), - ('Ӊ', 'Ӊ'), - ('Ӌ', 'Ӌ'), - ('Ӎ', 'Ӎ'), - ('Ӑ', 'Ӑ'), - ('Ӓ', 'Ӓ'), - ('Ӕ', 'Ӕ'), - ('Ӗ', 'Ӗ'), - ('Ә', 'Ә'), - ('Ӛ', 'Ӛ'), - ('Ӝ', 'Ӝ'), - ('Ӟ', 'Ӟ'), - ('Ӡ', 'Ӡ'), - ('Ӣ', 'Ӣ'), - ('Ӥ', 'Ӥ'), - ('Ӧ', 'Ӧ'), - ('Ө', 'Ө'), - ('Ӫ', 'Ӫ'), - ('Ӭ', 'Ӭ'), - ('Ӯ', 'Ӯ'), - ('Ӱ', 'Ӱ'), - ('Ӳ', 'Ӳ'), - ('Ӵ', 'Ӵ'), - ('Ӷ', 'Ӷ'), - ('Ӹ', 'Ӹ'), - ('Ӻ', 'Ӻ'), - ('Ӽ', 'Ӽ'), - ('Ӿ', 'Ӿ'), - ('Ԁ', 'Ԁ'), - ('Ԃ', 'Ԃ'), - ('Ԅ', 'Ԅ'), - ('Ԇ', 'Ԇ'), - ('Ԉ', 'Ԉ'), - ('Ԋ', 'Ԋ'), - ('Ԍ', 'Ԍ'), - ('Ԏ', 'Ԏ'), - ('Ԑ', 'Ԑ'), - ('Ԓ', 'Ԓ'), - ('Ԕ', 'Ԕ'), - ('Ԗ', 'Ԗ'), - ('Ԙ', 'Ԙ'), - ('Ԛ', 'Ԛ'), - ('Ԝ', 'Ԝ'), - ('Ԟ', 'Ԟ'), - ('Ԡ', 'Ԡ'), - ('Ԣ', 'Ԣ'), - ('Ԥ', 'Ԥ'), - ('Ԧ', 'Ԧ'), - ('Ԩ', 'Ԩ'), - ('Ԫ', 'Ԫ'), - ('Ԭ', 'Ԭ'), - ('Ԯ', 'Ԯ'), - ('Ա', 'Ֆ'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('Ꭰ', 'Ᏽ'), - ('Ᲊ', 'Ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('Ḁ', 'Ḁ'), - ('Ḃ', 'Ḃ'), - ('Ḅ', 'Ḅ'), - ('Ḇ', 'Ḇ'), - ('Ḉ', 'Ḉ'), - ('Ḋ', 'Ḋ'), - ('Ḍ', 'Ḍ'), - ('Ḏ', 'Ḏ'), - ('Ḑ', 'Ḑ'), - ('Ḓ', 'Ḓ'), - ('Ḕ', 'Ḕ'), - ('Ḗ', 'Ḗ'), - ('Ḙ', 'Ḙ'), - ('Ḛ', 'Ḛ'), - ('Ḝ', 'Ḝ'), - ('Ḟ', 'Ḟ'), - ('Ḡ', 'Ḡ'), - ('Ḣ', 'Ḣ'), - ('Ḥ', 'Ḥ'), - ('Ḧ', 'Ḧ'), - ('Ḩ', 'Ḩ'), - ('Ḫ', 'Ḫ'), - ('Ḭ', 'Ḭ'), - ('Ḯ', 'Ḯ'), - ('Ḱ', 'Ḱ'), - ('Ḳ', 'Ḳ'), - ('Ḵ', 'Ḵ'), - ('Ḷ', 'Ḷ'), - ('Ḹ', 'Ḹ'), - ('Ḻ', 'Ḻ'), - ('Ḽ', 'Ḽ'), - ('Ḿ', 'Ḿ'), - ('Ṁ', 'Ṁ'), - ('Ṃ', 'Ṃ'), - ('Ṅ', 'Ṅ'), - ('Ṇ', 'Ṇ'), - ('Ṉ', 'Ṉ'), - ('Ṋ', 'Ṋ'), - ('Ṍ', 'Ṍ'), - ('Ṏ', 'Ṏ'), - ('Ṑ', 'Ṑ'), - ('Ṓ', 'Ṓ'), - ('Ṕ', 'Ṕ'), - ('Ṗ', 'Ṗ'), - ('Ṙ', 'Ṙ'), - ('Ṛ', 'Ṛ'), - ('Ṝ', 'Ṝ'), - ('Ṟ', 'Ṟ'), - ('Ṡ', 'Ṡ'), - ('Ṣ', 'Ṣ'), - ('Ṥ', 'Ṥ'), - ('Ṧ', 'Ṧ'), - ('Ṩ', 'Ṩ'), - ('Ṫ', 'Ṫ'), - ('Ṭ', 'Ṭ'), - ('Ṯ', 'Ṯ'), - ('Ṱ', 'Ṱ'), - ('Ṳ', 'Ṳ'), - ('Ṵ', 'Ṵ'), - ('Ṷ', 'Ṷ'), - ('Ṹ', 'Ṹ'), - ('Ṻ', 'Ṻ'), - ('Ṽ', 'Ṽ'), - ('Ṿ', 'Ṿ'), - ('Ẁ', 'Ẁ'), - ('Ẃ', 'Ẃ'), - ('Ẅ', 'Ẅ'), - ('Ẇ', 'Ẇ'), - ('Ẉ', 'Ẉ'), - ('Ẋ', 'Ẋ'), - ('Ẍ', 'Ẍ'), - ('Ẏ', 'Ẏ'), - ('Ẑ', 'Ẑ'), - ('Ẓ', 'Ẓ'), - ('Ẕ', 'Ẕ'), - ('ẞ', 'ẞ'), - ('Ạ', 'Ạ'), - ('Ả', 'Ả'), - ('Ấ', 'Ấ'), - ('Ầ', 'Ầ'), - ('Ẩ', 'Ẩ'), - ('Ẫ', 'Ẫ'), - ('Ậ', 'Ậ'), - ('Ắ', 'Ắ'), - ('Ằ', 'Ằ'), - ('Ẳ', 'Ẳ'), - ('Ẵ', 'Ẵ'), - ('Ặ', 'Ặ'), - ('Ẹ', 'Ẹ'), - ('Ẻ', 'Ẻ'), - ('Ẽ', 'Ẽ'), - ('Ế', 'Ế'), - ('Ề', 'Ề'), - ('Ể', 'Ể'), - ('Ễ', 'Ễ'), - ('Ệ', 'Ệ'), - ('Ỉ', 'Ỉ'), - ('Ị', 'Ị'), - ('Ọ', 'Ọ'), - ('Ỏ', 'Ỏ'), - ('Ố', 'Ố'), - ('Ồ', 'Ồ'), - ('Ổ', 'Ổ'), - ('Ỗ', 'Ỗ'), - ('Ộ', 'Ộ'), - ('Ớ', 'Ớ'), - ('Ờ', 'Ờ'), - ('Ở', 'Ở'), - ('Ỡ', 'Ỡ'), - ('Ợ', 'Ợ'), - ('Ụ', 'Ụ'), - ('Ủ', 'Ủ'), - ('Ứ', 'Ứ'), - ('Ừ', 'Ừ'), - ('Ử', 'Ử'), - ('Ữ', 'Ữ'), - ('Ự', 'Ự'), - ('Ỳ', 'Ỳ'), - ('Ỵ', 'Ỵ'), - ('Ỷ', 'Ỷ'), - ('Ỹ', 'Ỹ'), - ('Ỻ', 'Ỻ'), - ('Ỽ', 'Ỽ'), - ('Ỿ', 'Ỿ'), - ('Ἀ', 'Ἇ'), - ('Ἐ', 'Ἕ'), - ('Ἠ', 'Ἧ'), - ('Ἰ', 'Ἷ'), - ('Ὀ', 'Ὅ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'Ὗ'), - ('Ὠ', 'Ὧ'), - ('Ᾰ', 'Ά'), - ('Ὲ', 'Ή'), - ('Ῐ', 'Ί'), - ('Ῠ', 'Ῥ'), - ('Ὸ', 'Ώ'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℋ', 'ℍ'), - ('ℐ', 'ℒ'), - ('ℕ', 'ℕ'), - ('ℙ', 'ℝ'), - ('ℤ', 'ℤ'), - ('Ω', 'Ω'), - ('ℨ', 'ℨ'), - ('K', 'ℭ'), - ('ℰ', 'ℳ'), - ('ℾ', 'ℿ'), - ('ⅅ', 'ⅅ'), - ('Ↄ', 'Ↄ'), - ('Ⰰ', 'Ⱟ'), - ('Ⱡ', 'Ⱡ'), - ('Ɫ', 'Ɽ'), - ('Ⱨ', 'Ⱨ'), - ('Ⱪ', 'Ⱪ'), - ('Ⱬ', 'Ⱬ'), - ('Ɑ', 'Ɒ'), - ('Ⱳ', 'Ⱳ'), - ('Ⱶ', 'Ⱶ'), - ('Ȿ', 'Ⲁ'), - ('Ⲃ', 'Ⲃ'), - ('Ⲅ', 'Ⲅ'), - ('Ⲇ', 'Ⲇ'), - ('Ⲉ', 'Ⲉ'), - ('Ⲋ', 'Ⲋ'), - ('Ⲍ', 'Ⲍ'), - ('Ⲏ', 'Ⲏ'), - ('Ⲑ', 'Ⲑ'), - ('Ⲓ', 'Ⲓ'), - ('Ⲕ', 'Ⲕ'), - ('Ⲗ', 'Ⲗ'), - ('Ⲙ', 'Ⲙ'), - ('Ⲛ', 'Ⲛ'), - ('Ⲝ', 'Ⲝ'), - ('Ⲟ', 'Ⲟ'), - ('Ⲡ', 'Ⲡ'), - ('Ⲣ', 'Ⲣ'), - ('Ⲥ', 'Ⲥ'), - ('Ⲧ', 'Ⲧ'), - ('Ⲩ', 'Ⲩ'), - ('Ⲫ', 'Ⲫ'), - ('Ⲭ', 'Ⲭ'), - ('Ⲯ', 'Ⲯ'), - ('Ⲱ', 'Ⲱ'), - ('Ⲳ', 'Ⲳ'), - ('Ⲵ', 'Ⲵ'), - ('Ⲷ', 'Ⲷ'), - ('Ⲹ', 'Ⲹ'), - ('Ⲻ', 'Ⲻ'), - ('Ⲽ', 'Ⲽ'), - ('Ⲿ', 'Ⲿ'), - ('Ⳁ', 'Ⳁ'), - ('Ⳃ', 'Ⳃ'), - ('Ⳅ', 'Ⳅ'), - ('Ⳇ', 'Ⳇ'), - ('Ⳉ', 'Ⳉ'), - ('Ⳋ', 'Ⳋ'), - ('Ⳍ', 'Ⳍ'), - ('Ⳏ', 'Ⳏ'), - ('Ⳑ', 'Ⳑ'), - ('Ⳓ', 'Ⳓ'), - ('Ⳕ', 'Ⳕ'), - ('Ⳗ', 'Ⳗ'), - ('Ⳙ', 'Ⳙ'), - ('Ⳛ', 'Ⳛ'), - ('Ⳝ', 'Ⳝ'), - ('Ⳟ', 'Ⳟ'), - ('Ⳡ', 'Ⳡ'), - ('Ⳣ', 'Ⳣ'), - ('Ⳬ', 'Ⳬ'), - ('Ⳮ', 'Ⳮ'), - ('Ⳳ', 'Ⳳ'), - ('Ꙁ', 'Ꙁ'), - ('Ꙃ', 'Ꙃ'), - ('Ꙅ', 'Ꙅ'), - ('Ꙇ', 'Ꙇ'), - ('Ꙉ', 'Ꙉ'), - ('Ꙋ', 'Ꙋ'), - ('Ꙍ', 'Ꙍ'), - ('Ꙏ', 'Ꙏ'), - ('Ꙑ', 'Ꙑ'), - ('Ꙓ', 'Ꙓ'), - ('Ꙕ', 'Ꙕ'), - ('Ꙗ', 'Ꙗ'), - ('Ꙙ', 'Ꙙ'), - ('Ꙛ', 'Ꙛ'), - ('Ꙝ', 'Ꙝ'), - ('Ꙟ', 'Ꙟ'), - ('Ꙡ', 'Ꙡ'), - ('Ꙣ', 'Ꙣ'), - ('Ꙥ', 'Ꙥ'), - ('Ꙧ', 'Ꙧ'), - ('Ꙩ', 'Ꙩ'), - ('Ꙫ', 'Ꙫ'), - ('Ꙭ', 'Ꙭ'), - ('Ꚁ', 'Ꚁ'), - ('Ꚃ', 'Ꚃ'), - ('Ꚅ', 'Ꚅ'), - ('Ꚇ', 'Ꚇ'), - ('Ꚉ', 'Ꚉ'), - ('Ꚋ', 'Ꚋ'), - ('Ꚍ', 'Ꚍ'), - ('Ꚏ', 'Ꚏ'), - ('Ꚑ', 'Ꚑ'), - ('Ꚓ', 'Ꚓ'), - ('Ꚕ', 'Ꚕ'), - ('Ꚗ', 'Ꚗ'), - ('Ꚙ', 'Ꚙ'), - ('Ꚛ', 'Ꚛ'), - ('Ꜣ', 'Ꜣ'), - ('Ꜥ', 'Ꜥ'), - ('Ꜧ', 'Ꜧ'), - ('Ꜩ', 'Ꜩ'), - ('Ꜫ', 'Ꜫ'), - ('Ꜭ', 'Ꜭ'), - ('Ꜯ', 'Ꜯ'), - ('Ꜳ', 'Ꜳ'), - ('Ꜵ', 'Ꜵ'), - ('Ꜷ', 'Ꜷ'), - ('Ꜹ', 'Ꜹ'), - ('Ꜻ', 'Ꜻ'), - ('Ꜽ', 'Ꜽ'), - ('Ꜿ', 'Ꜿ'), - ('Ꝁ', 'Ꝁ'), - ('Ꝃ', 'Ꝃ'), - ('Ꝅ', 'Ꝅ'), - ('Ꝇ', 'Ꝇ'), - ('Ꝉ', 'Ꝉ'), - ('Ꝋ', 'Ꝋ'), - ('Ꝍ', 'Ꝍ'), - ('Ꝏ', 'Ꝏ'), - ('Ꝑ', 'Ꝑ'), - ('Ꝓ', 'Ꝓ'), - ('Ꝕ', 'Ꝕ'), - ('Ꝗ', 'Ꝗ'), - ('Ꝙ', 'Ꝙ'), - ('Ꝛ', 'Ꝛ'), - ('Ꝝ', 'Ꝝ'), - ('Ꝟ', 'Ꝟ'), - ('Ꝡ', 'Ꝡ'), - ('Ꝣ', 'Ꝣ'), - ('Ꝥ', 'Ꝥ'), - ('Ꝧ', 'Ꝧ'), - ('Ꝩ', 'Ꝩ'), - ('Ꝫ', 'Ꝫ'), - ('Ꝭ', 'Ꝭ'), - ('Ꝯ', 'Ꝯ'), - ('Ꝺ', 'Ꝺ'), - ('Ꝼ', 'Ꝼ'), - ('Ᵹ', 'Ꝿ'), - ('Ꞁ', 'Ꞁ'), - ('Ꞃ', 'Ꞃ'), - ('Ꞅ', 'Ꞅ'), - ('Ꞇ', 'Ꞇ'), - ('Ꞌ', 'Ꞌ'), - ('Ɥ', 'Ɥ'), - ('Ꞑ', 'Ꞑ'), - ('Ꞓ', 'Ꞓ'), - ('Ꞗ', 'Ꞗ'), - ('Ꞙ', 'Ꞙ'), - ('Ꞛ', 'Ꞛ'), - ('Ꞝ', 'Ꞝ'), - ('Ꞟ', 'Ꞟ'), - ('Ꞡ', 'Ꞡ'), - ('Ꞣ', 'Ꞣ'), - ('Ꞥ', 'Ꞥ'), - ('Ꞧ', 'Ꞧ'), - ('Ꞩ', 'Ꞩ'), - ('Ɦ', 'Ɪ'), - ('Ʞ', 'Ꞵ'), - ('Ꞷ', 'Ꞷ'), - ('Ꞹ', 'Ꞹ'), - ('Ꞻ', 'Ꞻ'), - ('Ꞽ', 'Ꞽ'), - ('Ꞿ', 'Ꞿ'), - ('Ꟁ', 'Ꟁ'), - ('Ꟃ', 'Ꟃ'), - ('Ꞔ', 'Ꟈ'), - ('Ꟊ', 'Ꟊ'), - ('Ɤ', 'Ꟍ'), - ('Ꟑ', 'Ꟑ'), - ('Ꟗ', 'Ꟗ'), - ('Ꟙ', 'Ꟙ'), - ('Ꟛ', 'Ꟛ'), - ('Ƛ', 'Ƛ'), - ('Ꟶ', 'Ꟶ'), - ('A', 'Z'), - ('𐐀', '𐐧'), - ('𐒰', '𐓓'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐲀', '𐲲'), - ('𐵐', '𐵥'), - ('𑢠', '𑢿'), - ('𖹀', '𖹟'), - ('𝐀', '𝐙'), - ('𝐴', '𝑍'), - ('𝑨', '𝒁'), - ('𝒜', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒵'), - ('𝓐', '𝓩'), - ('𝔄', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔸', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕬', '𝖅'), - ('𝖠', '𝖹'), - ('𝗔', '𝗭'), - ('𝘈', '𝘡'), - ('𝘼', '𝙕'), - ('𝙰', '𝚉'), - ('𝚨', '𝛀'), - ('𝛢', '𝛺'), - ('𝜜', '𝜴'), - ('𝝖', '𝝮'), - ('𝞐', '𝞨'), - ('𝟊', '𝟊'), - ('𞤀', '𞤡'), -]; diff --git a/vendor/regex-syntax/src/unicode_tables/grapheme_cluster_break.rs b/vendor/regex-syntax/src/unicode_tables/grapheme_cluster_break.rs deleted file mode 100644 index 6a6ec2af5f25fa..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/grapheme_cluster_break.rs +++ /dev/null @@ -1,1420 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate grapheme-cluster-break ucd-16.0.0 --chars -// -// Unicode version: 16.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ - ("CR", CR), - ("Control", CONTROL), - ("Extend", EXTEND), - ("L", L), - ("LF", LF), - ("LV", LV), - ("LVT", LVT), - ("Prepend", PREPEND), - ("Regional_Indicator", REGIONAL_INDICATOR), - ("SpacingMark", SPACINGMARK), - ("T", T), - ("V", V), - ("ZWJ", ZWJ), -]; - -pub const CR: &'static [(char, char)] = &[('\r', '\r')]; - -pub const CONTROL: &'static [(char, char)] = &[ - ('\0', '\t'), - ('\u{b}', '\u{c}'), - ('\u{e}', '\u{1f}'), - ('\u{7f}', '\u{9f}'), - ('\u{ad}', '\u{ad}'), - ('\u{61c}', '\u{61c}'), - ('\u{180e}', '\u{180e}'), - ('\u{200b}', '\u{200b}'), - ('\u{200e}', '\u{200f}'), - ('\u{2028}', '\u{202e}'), - ('\u{2060}', '\u{206f}'), - ('\u{feff}', '\u{feff}'), - ('\u{fff0}', '\u{fffb}'), - ('\u{13430}', '\u{1343f}'), - ('\u{1bca0}', '\u{1bca3}'), - ('\u{1d173}', '\u{1d17a}'), - ('\u{e0000}', '\u{e001f}'), - ('\u{e0080}', '\u{e00ff}'), - ('\u{e01f0}', '\u{e0fff}'), -]; - -pub const EXTEND: &'static [(char, char)] = &[ - ('\u{300}', '\u{36f}'), - ('\u{483}', '\u{489}'), - ('\u{591}', '\u{5bd}'), - ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), - ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), - ('\u{610}', '\u{61a}'), - ('\u{64b}', '\u{65f}'), - ('\u{670}', '\u{670}'), - ('\u{6d6}', '\u{6dc}'), - ('\u{6df}', '\u{6e4}'), - ('\u{6e7}', '\u{6e8}'), - ('\u{6ea}', '\u{6ed}'), - ('\u{711}', '\u{711}'), - ('\u{730}', '\u{74a}'), - ('\u{7a6}', '\u{7b0}'), - ('\u{7eb}', '\u{7f3}'), - ('\u{7fd}', '\u{7fd}'), - ('\u{816}', '\u{819}'), - ('\u{81b}', '\u{823}'), - ('\u{825}', '\u{827}'), - ('\u{829}', '\u{82d}'), - ('\u{859}', '\u{85b}'), - ('\u{897}', '\u{89f}'), - ('\u{8ca}', '\u{8e1}'), - ('\u{8e3}', '\u{902}'), - ('\u{93a}', '\u{93a}'), - ('\u{93c}', '\u{93c}'), - ('\u{941}', '\u{948}'), - ('\u{94d}', '\u{94d}'), - ('\u{951}', '\u{957}'), - ('\u{962}', '\u{963}'), - ('\u{981}', '\u{981}'), - ('\u{9bc}', '\u{9bc}'), - ('\u{9be}', '\u{9be}'), - ('\u{9c1}', '\u{9c4}'), - ('\u{9cd}', '\u{9cd}'), - ('\u{9d7}', '\u{9d7}'), - ('\u{9e2}', '\u{9e3}'), - ('\u{9fe}', '\u{9fe}'), - ('\u{a01}', '\u{a02}'), - ('\u{a3c}', '\u{a3c}'), - ('\u{a41}', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4d}'), - ('\u{a51}', '\u{a51}'), - ('\u{a70}', '\u{a71}'), - ('\u{a75}', '\u{a75}'), - ('\u{a81}', '\u{a82}'), - ('\u{abc}', '\u{abc}'), - ('\u{ac1}', '\u{ac5}'), - ('\u{ac7}', '\u{ac8}'), - ('\u{acd}', '\u{acd}'), - ('\u{ae2}', '\u{ae3}'), - ('\u{afa}', '\u{aff}'), - ('\u{b01}', '\u{b01}'), - ('\u{b3c}', '\u{b3c}'), - ('\u{b3e}', '\u{b3f}'), - ('\u{b41}', '\u{b44}'), - ('\u{b4d}', '\u{b4d}'), - ('\u{b55}', '\u{b57}'), - ('\u{b62}', '\u{b63}'), - ('\u{b82}', '\u{b82}'), - ('\u{bbe}', '\u{bbe}'), - ('\u{bc0}', '\u{bc0}'), - ('\u{bcd}', '\u{bcd}'), - ('\u{bd7}', '\u{bd7}'), - ('\u{c00}', '\u{c00}'), - ('\u{c04}', '\u{c04}'), - ('\u{c3c}', '\u{c3c}'), - ('\u{c3e}', '\u{c40}'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4d}'), - ('\u{c55}', '\u{c56}'), - ('\u{c62}', '\u{c63}'), - ('\u{c81}', '\u{c81}'), - ('\u{cbc}', '\u{cbc}'), - ('\u{cbf}', '\u{cc0}'), - ('\u{cc2}', '\u{cc2}'), - ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccd}'), - ('\u{cd5}', '\u{cd6}'), - ('\u{ce2}', '\u{ce3}'), - ('\u{d00}', '\u{d01}'), - ('\u{d3b}', '\u{d3c}'), - ('\u{d3e}', '\u{d3e}'), - ('\u{d41}', '\u{d44}'), - ('\u{d4d}', '\u{d4d}'), - ('\u{d57}', '\u{d57}'), - ('\u{d62}', '\u{d63}'), - ('\u{d81}', '\u{d81}'), - ('\u{dca}', '\u{dca}'), - ('\u{dcf}', '\u{dcf}'), - ('\u{dd2}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('\u{ddf}', '\u{ddf}'), - ('\u{e31}', '\u{e31}'), - ('\u{e34}', '\u{e3a}'), - ('\u{e47}', '\u{e4e}'), - ('\u{eb1}', '\u{eb1}'), - ('\u{eb4}', '\u{ebc}'), - ('\u{ec8}', '\u{ece}'), - ('\u{f18}', '\u{f19}'), - ('\u{f35}', '\u{f35}'), - ('\u{f37}', '\u{f37}'), - ('\u{f39}', '\u{f39}'), - ('\u{f71}', '\u{f7e}'), - ('\u{f80}', '\u{f84}'), - ('\u{f86}', '\u{f87}'), - ('\u{f8d}', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('\u{fc6}', '\u{fc6}'), - ('\u{102d}', '\u{1030}'), - ('\u{1032}', '\u{1037}'), - ('\u{1039}', '\u{103a}'), - ('\u{103d}', '\u{103e}'), - ('\u{1058}', '\u{1059}'), - ('\u{105e}', '\u{1060}'), - ('\u{1071}', '\u{1074}'), - ('\u{1082}', '\u{1082}'), - ('\u{1085}', '\u{1086}'), - ('\u{108d}', '\u{108d}'), - ('\u{109d}', '\u{109d}'), - ('\u{135d}', '\u{135f}'), - ('\u{1712}', '\u{1715}'), - ('\u{1732}', '\u{1734}'), - ('\u{1752}', '\u{1753}'), - ('\u{1772}', '\u{1773}'), - ('\u{17b4}', '\u{17b5}'), - ('\u{17b7}', '\u{17bd}'), - ('\u{17c6}', '\u{17c6}'), - ('\u{17c9}', '\u{17d3}'), - ('\u{17dd}', '\u{17dd}'), - ('\u{180b}', '\u{180d}'), - ('\u{180f}', '\u{180f}'), - ('\u{1885}', '\u{1886}'), - ('\u{18a9}', '\u{18a9}'), - ('\u{1920}', '\u{1922}'), - ('\u{1927}', '\u{1928}'), - ('\u{1932}', '\u{1932}'), - ('\u{1939}', '\u{193b}'), - ('\u{1a17}', '\u{1a18}'), - ('\u{1a1b}', '\u{1a1b}'), - ('\u{1a56}', '\u{1a56}'), - ('\u{1a58}', '\u{1a5e}'), - ('\u{1a60}', '\u{1a60}'), - ('\u{1a62}', '\u{1a62}'), - ('\u{1a65}', '\u{1a6c}'), - ('\u{1a73}', '\u{1a7c}'), - ('\u{1a7f}', '\u{1a7f}'), - ('\u{1ab0}', '\u{1ace}'), - ('\u{1b00}', '\u{1b03}'), - ('\u{1b34}', '\u{1b3d}'), - ('\u{1b42}', '\u{1b44}'), - ('\u{1b6b}', '\u{1b73}'), - ('\u{1b80}', '\u{1b81}'), - ('\u{1ba2}', '\u{1ba5}'), - ('\u{1ba8}', '\u{1bad}'), - ('\u{1be6}', '\u{1be6}'), - ('\u{1be8}', '\u{1be9}'), - ('\u{1bed}', '\u{1bed}'), - ('\u{1bef}', '\u{1bf3}'), - ('\u{1c2c}', '\u{1c33}'), - ('\u{1c36}', '\u{1c37}'), - ('\u{1cd0}', '\u{1cd2}'), - ('\u{1cd4}', '\u{1ce0}'), - ('\u{1ce2}', '\u{1ce8}'), - ('\u{1ced}', '\u{1ced}'), - ('\u{1cf4}', '\u{1cf4}'), - ('\u{1cf8}', '\u{1cf9}'), - ('\u{1dc0}', '\u{1dff}'), - ('\u{200c}', '\u{200c}'), - ('\u{20d0}', '\u{20f0}'), - ('\u{2cef}', '\u{2cf1}'), - ('\u{2d7f}', '\u{2d7f}'), - ('\u{2de0}', '\u{2dff}'), - ('\u{302a}', '\u{302f}'), - ('\u{3099}', '\u{309a}'), - ('\u{a66f}', '\u{a672}'), - ('\u{a674}', '\u{a67d}'), - ('\u{a69e}', '\u{a69f}'), - ('\u{a6f0}', '\u{a6f1}'), - ('\u{a802}', '\u{a802}'), - ('\u{a806}', '\u{a806}'), - ('\u{a80b}', '\u{a80b}'), - ('\u{a825}', '\u{a826}'), - ('\u{a82c}', '\u{a82c}'), - ('\u{a8c4}', '\u{a8c5}'), - ('\u{a8e0}', '\u{a8f1}'), - ('\u{a8ff}', '\u{a8ff}'), - ('\u{a926}', '\u{a92d}'), - ('\u{a947}', '\u{a951}'), - ('\u{a953}', '\u{a953}'), - ('\u{a980}', '\u{a982}'), - ('\u{a9b3}', '\u{a9b3}'), - ('\u{a9b6}', '\u{a9b9}'), - ('\u{a9bc}', '\u{a9bd}'), - ('\u{a9c0}', '\u{a9c0}'), - ('\u{a9e5}', '\u{a9e5}'), - ('\u{aa29}', '\u{aa2e}'), - ('\u{aa31}', '\u{aa32}'), - ('\u{aa35}', '\u{aa36}'), - ('\u{aa43}', '\u{aa43}'), - ('\u{aa4c}', '\u{aa4c}'), - ('\u{aa7c}', '\u{aa7c}'), - ('\u{aab0}', '\u{aab0}'), - ('\u{aab2}', '\u{aab4}'), - ('\u{aab7}', '\u{aab8}'), - ('\u{aabe}', '\u{aabf}'), - ('\u{aac1}', '\u{aac1}'), - ('\u{aaec}', '\u{aaed}'), - ('\u{aaf6}', '\u{aaf6}'), - ('\u{abe5}', '\u{abe5}'), - ('\u{abe8}', '\u{abe8}'), - ('\u{abed}', '\u{abed}'), - ('\u{fb1e}', '\u{fb1e}'), - ('\u{fe00}', '\u{fe0f}'), - ('\u{fe20}', '\u{fe2f}'), - ('\u{ff9e}', '\u{ff9f}'), - ('\u{101fd}', '\u{101fd}'), - ('\u{102e0}', '\u{102e0}'), - ('\u{10376}', '\u{1037a}'), - ('\u{10a01}', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '\u{10a0f}'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '\u{10a3f}'), - ('\u{10ae5}', '\u{10ae6}'), - ('\u{10d24}', '\u{10d27}'), - ('\u{10d69}', '\u{10d6d}'), - ('\u{10eab}', '\u{10eac}'), - ('\u{10efc}', '\u{10eff}'), - ('\u{10f46}', '\u{10f50}'), - ('\u{10f82}', '\u{10f85}'), - ('\u{11001}', '\u{11001}'), - ('\u{11038}', '\u{11046}'), - ('\u{11070}', '\u{11070}'), - ('\u{11073}', '\u{11074}'), - ('\u{1107f}', '\u{11081}'), - ('\u{110b3}', '\u{110b6}'), - ('\u{110b9}', '\u{110ba}'), - ('\u{110c2}', '\u{110c2}'), - ('\u{11100}', '\u{11102}'), - ('\u{11127}', '\u{1112b}'), - ('\u{1112d}', '\u{11134}'), - ('\u{11173}', '\u{11173}'), - ('\u{11180}', '\u{11181}'), - ('\u{111b6}', '\u{111be}'), - ('\u{111c0}', '\u{111c0}'), - ('\u{111c9}', '\u{111cc}'), - ('\u{111cf}', '\u{111cf}'), - ('\u{1122f}', '\u{11231}'), - ('\u{11234}', '\u{11237}'), - ('\u{1123e}', '\u{1123e}'), - ('\u{11241}', '\u{11241}'), - ('\u{112df}', '\u{112df}'), - ('\u{112e3}', '\u{112ea}'), - ('\u{11300}', '\u{11301}'), - ('\u{1133b}', '\u{1133c}'), - ('\u{1133e}', '\u{1133e}'), - ('\u{11340}', '\u{11340}'), - ('\u{1134d}', '\u{1134d}'), - ('\u{11357}', '\u{11357}'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), - ('\u{113b8}', '\u{113b8}'), - ('\u{113bb}', '\u{113c0}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '\u{113c9}'), - ('\u{113ce}', '\u{113d0}'), - ('\u{113d2}', '\u{113d2}'), - ('\u{113e1}', '\u{113e2}'), - ('\u{11438}', '\u{1143f}'), - ('\u{11442}', '\u{11444}'), - ('\u{11446}', '\u{11446}'), - ('\u{1145e}', '\u{1145e}'), - ('\u{114b0}', '\u{114b0}'), - ('\u{114b3}', '\u{114b8}'), - ('\u{114ba}', '\u{114ba}'), - ('\u{114bd}', '\u{114bd}'), - ('\u{114bf}', '\u{114c0}'), - ('\u{114c2}', '\u{114c3}'), - ('\u{115af}', '\u{115af}'), - ('\u{115b2}', '\u{115b5}'), - ('\u{115bc}', '\u{115bd}'), - ('\u{115bf}', '\u{115c0}'), - ('\u{115dc}', '\u{115dd}'), - ('\u{11633}', '\u{1163a}'), - ('\u{1163d}', '\u{1163d}'), - ('\u{1163f}', '\u{11640}'), - ('\u{116ab}', '\u{116ab}'), - ('\u{116ad}', '\u{116ad}'), - ('\u{116b0}', '\u{116b7}'), - ('\u{1171d}', '\u{1171d}'), - ('\u{1171f}', '\u{1171f}'), - ('\u{11722}', '\u{11725}'), - ('\u{11727}', '\u{1172b}'), - ('\u{1182f}', '\u{11837}'), - ('\u{11839}', '\u{1183a}'), - ('\u{11930}', '\u{11930}'), - ('\u{1193b}', '\u{1193e}'), - ('\u{11943}', '\u{11943}'), - ('\u{119d4}', '\u{119d7}'), - ('\u{119da}', '\u{119db}'), - ('\u{119e0}', '\u{119e0}'), - ('\u{11a01}', '\u{11a0a}'), - ('\u{11a33}', '\u{11a38}'), - ('\u{11a3b}', '\u{11a3e}'), - ('\u{11a47}', '\u{11a47}'), - ('\u{11a51}', '\u{11a56}'), - ('\u{11a59}', '\u{11a5b}'), - ('\u{11a8a}', '\u{11a96}'), - ('\u{11a98}', '\u{11a99}'), - ('\u{11c30}', '\u{11c36}'), - ('\u{11c38}', '\u{11c3d}'), - ('\u{11c3f}', '\u{11c3f}'), - ('\u{11c92}', '\u{11ca7}'), - ('\u{11caa}', '\u{11cb0}'), - ('\u{11cb2}', '\u{11cb3}'), - ('\u{11cb5}', '\u{11cb6}'), - ('\u{11d31}', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d45}'), - ('\u{11d47}', '\u{11d47}'), - ('\u{11d90}', '\u{11d91}'), - ('\u{11d95}', '\u{11d95}'), - ('\u{11d97}', '\u{11d97}'), - ('\u{11ef3}', '\u{11ef4}'), - ('\u{11f00}', '\u{11f01}'), - ('\u{11f36}', '\u{11f3a}'), - ('\u{11f40}', '\u{11f42}'), - ('\u{11f5a}', '\u{11f5a}'), - ('\u{13440}', '\u{13440}'), - ('\u{13447}', '\u{13455}'), - ('\u{1611e}', '\u{16129}'), - ('\u{1612d}', '\u{1612f}'), - ('\u{16af0}', '\u{16af4}'), - ('\u{16b30}', '\u{16b36}'), - ('\u{16f4f}', '\u{16f4f}'), - ('\u{16f8f}', '\u{16f92}'), - ('\u{16fe4}', '\u{16fe4}'), - ('\u{16ff0}', '\u{16ff1}'), - ('\u{1bc9d}', '\u{1bc9e}'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('\u{1d165}', '\u{1d169}'), - ('\u{1d16d}', '\u{1d172}'), - ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), - ('\u{1d1aa}', '\u{1d1ad}'), - ('\u{1d242}', '\u{1d244}'), - ('\u{1da00}', '\u{1da36}'), - ('\u{1da3b}', '\u{1da6c}'), - ('\u{1da75}', '\u{1da75}'), - ('\u{1da84}', '\u{1da84}'), - ('\u{1da9b}', '\u{1da9f}'), - ('\u{1daa1}', '\u{1daaf}'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), - ('\u{1e08f}', '\u{1e08f}'), - ('\u{1e130}', '\u{1e136}'), - ('\u{1e2ae}', '\u{1e2ae}'), - ('\u{1e2ec}', '\u{1e2ef}'), - ('\u{1e4ec}', '\u{1e4ef}'), - ('\u{1e5ee}', '\u{1e5ef}'), - ('\u{1e8d0}', '\u{1e8d6}'), - ('\u{1e944}', '\u{1e94a}'), - ('🏻', '🏿'), - ('\u{e0020}', '\u{e007f}'), - ('\u{e0100}', '\u{e01ef}'), -]; - -pub const L: &'static [(char, char)] = &[('ᄀ', 'ᅟ'), ('ꥠ', 'ꥼ')]; - -pub const LF: &'static [(char, char)] = &[('\n', '\n')]; - -pub const LV: &'static [(char, char)] = &[ - ('가', '가'), - ('개', '개'), - ('갸', '갸'), - ('걔', '걔'), - ('거', '거'), - ('게', '게'), - ('겨', '겨'), - ('계', '계'), - ('고', '고'), - ('과', '과'), - ('괘', '괘'), - ('괴', '괴'), - ('교', '교'), - ('구', '구'), - ('궈', '궈'), - ('궤', '궤'), - ('귀', '귀'), - ('규', '규'), - ('그', '그'), - ('긔', '긔'), - ('기', '기'), - ('까', '까'), - ('깨', '깨'), - ('꺄', '꺄'), - ('꺠', '꺠'), - ('꺼', '꺼'), - ('께', '께'), - ('껴', '껴'), - ('꼐', '꼐'), - ('꼬', '꼬'), - ('꽈', '꽈'), - ('꽤', '꽤'), - ('꾀', '꾀'), - ('꾜', '꾜'), - ('꾸', '꾸'), - ('꿔', '꿔'), - ('꿰', '꿰'), - ('뀌', '뀌'), - ('뀨', '뀨'), - ('끄', '끄'), - ('끠', '끠'), - ('끼', '끼'), - ('나', '나'), - ('내', '내'), - ('냐', '냐'), - ('냬', '냬'), - ('너', '너'), - ('네', '네'), - ('녀', '녀'), - ('녜', '녜'), - ('노', '노'), - ('놔', '놔'), - ('놰', '놰'), - ('뇌', '뇌'), - ('뇨', '뇨'), - ('누', '누'), - ('눠', '눠'), - ('눼', '눼'), - ('뉘', '뉘'), - ('뉴', '뉴'), - ('느', '느'), - ('늬', '늬'), - ('니', '니'), - ('다', '다'), - ('대', '대'), - ('댜', '댜'), - ('댸', '댸'), - ('더', '더'), - ('데', '데'), - ('뎌', '뎌'), - ('뎨', '뎨'), - ('도', '도'), - ('돠', '돠'), - ('돼', '돼'), - ('되', '되'), - ('됴', '됴'), - ('두', '두'), - ('둬', '둬'), - ('뒈', '뒈'), - ('뒤', '뒤'), - ('듀', '듀'), - ('드', '드'), - ('듸', '듸'), - ('디', '디'), - ('따', '따'), - ('때', '때'), - ('땨', '땨'), - ('떄', '떄'), - ('떠', '떠'), - ('떼', '떼'), - ('뗘', '뗘'), - ('뗴', '뗴'), - ('또', '또'), - ('똬', '똬'), - ('뙈', '뙈'), - ('뙤', '뙤'), - ('뚀', '뚀'), - ('뚜', '뚜'), - ('뚸', '뚸'), - ('뛔', '뛔'), - ('뛰', '뛰'), - ('뜌', '뜌'), - ('뜨', '뜨'), - ('띄', '띄'), - ('띠', '띠'), - ('라', '라'), - ('래', '래'), - ('랴', '랴'), - ('럐', '럐'), - ('러', '러'), - ('레', '레'), - ('려', '려'), - ('례', '례'), - ('로', '로'), - ('롸', '롸'), - ('뢔', '뢔'), - ('뢰', '뢰'), - ('료', '료'), - ('루', '루'), - ('뤄', '뤄'), - ('뤠', '뤠'), - ('뤼', '뤼'), - ('류', '류'), - ('르', '르'), - ('릐', '릐'), - ('리', '리'), - ('마', '마'), - ('매', '매'), - ('먀', '먀'), - ('먜', '먜'), - ('머', '머'), - ('메', '메'), - ('며', '며'), - ('몌', '몌'), - ('모', '모'), - ('뫄', '뫄'), - ('뫠', '뫠'), - ('뫼', '뫼'), - ('묘', '묘'), - ('무', '무'), - ('뭐', '뭐'), - ('뭬', '뭬'), - ('뮈', '뮈'), - ('뮤', '뮤'), - ('므', '므'), - ('믜', '믜'), - ('미', '미'), - ('바', '바'), - ('배', '배'), - ('뱌', '뱌'), - ('뱨', '뱨'), - ('버', '버'), - ('베', '베'), - ('벼', '벼'), - ('볘', '볘'), - ('보', '보'), - ('봐', '봐'), - ('봬', '봬'), - ('뵈', '뵈'), - ('뵤', '뵤'), - ('부', '부'), - ('붜', '붜'), - ('붸', '붸'), - ('뷔', '뷔'), - ('뷰', '뷰'), - ('브', '브'), - ('븨', '븨'), - ('비', '비'), - ('빠', '빠'), - ('빼', '빼'), - ('뺘', '뺘'), - ('뺴', '뺴'), - ('뻐', '뻐'), - ('뻬', '뻬'), - ('뼈', '뼈'), - ('뼤', '뼤'), - ('뽀', '뽀'), - ('뽜', '뽜'), - ('뽸', '뽸'), - ('뾔', '뾔'), - ('뾰', '뾰'), - ('뿌', '뿌'), - ('뿨', '뿨'), - ('쀄', '쀄'), - ('쀠', '쀠'), - ('쀼', '쀼'), - ('쁘', '쁘'), - ('쁴', '쁴'), - ('삐', '삐'), - ('사', '사'), - ('새', '새'), - ('샤', '샤'), - ('섀', '섀'), - ('서', '서'), - ('세', '세'), - ('셔', '셔'), - ('셰', '셰'), - ('소', '소'), - ('솨', '솨'), - ('쇄', '쇄'), - ('쇠', '쇠'), - ('쇼', '쇼'), - ('수', '수'), - ('숴', '숴'), - ('쉐', '쉐'), - ('쉬', '쉬'), - ('슈', '슈'), - ('스', '스'), - ('싀', '싀'), - ('시', '시'), - ('싸', '싸'), - ('쌔', '쌔'), - ('쌰', '쌰'), - ('썌', '썌'), - ('써', '써'), - ('쎄', '쎄'), - ('쎠', '쎠'), - ('쎼', '쎼'), - ('쏘', '쏘'), - ('쏴', '쏴'), - ('쐐', '쐐'), - ('쐬', '쐬'), - ('쑈', '쑈'), - ('쑤', '쑤'), - ('쒀', '쒀'), - ('쒜', '쒜'), - ('쒸', '쒸'), - ('쓔', '쓔'), - ('쓰', '쓰'), - ('씌', '씌'), - ('씨', '씨'), - ('아', '아'), - ('애', '애'), - ('야', '야'), - ('얘', '얘'), - ('어', '어'), - ('에', '에'), - ('여', '여'), - ('예', '예'), - ('오', '오'), - ('와', '와'), - ('왜', '왜'), - ('외', '외'), - ('요', '요'), - ('우', '우'), - ('워', '워'), - ('웨', '웨'), - ('위', '위'), - ('유', '유'), - ('으', '으'), - ('의', '의'), - ('이', '이'), - ('자', '자'), - ('재', '재'), - ('쟈', '쟈'), - ('쟤', '쟤'), - ('저', '저'), - ('제', '제'), - ('져', '져'), - ('졔', '졔'), - ('조', '조'), - ('좌', '좌'), - ('좨', '좨'), - ('죄', '죄'), - ('죠', '죠'), - ('주', '주'), - ('줘', '줘'), - ('줴', '줴'), - ('쥐', '쥐'), - ('쥬', '쥬'), - ('즈', '즈'), - ('즤', '즤'), - ('지', '지'), - ('짜', '짜'), - ('째', '째'), - ('쨔', '쨔'), - ('쨰', '쨰'), - ('쩌', '쩌'), - ('쩨', '쩨'), - ('쪄', '쪄'), - ('쪠', '쪠'), - ('쪼', '쪼'), - ('쫘', '쫘'), - ('쫴', '쫴'), - ('쬐', '쬐'), - ('쬬', '쬬'), - ('쭈', '쭈'), - ('쭤', '쭤'), - ('쮀', '쮀'), - ('쮜', '쮜'), - ('쮸', '쮸'), - ('쯔', '쯔'), - ('쯰', '쯰'), - ('찌', '찌'), - ('차', '차'), - ('채', '채'), - ('챠', '챠'), - ('챼', '챼'), - ('처', '처'), - ('체', '체'), - ('쳐', '쳐'), - ('쳬', '쳬'), - ('초', '초'), - ('촤', '촤'), - ('쵀', '쵀'), - ('최', '최'), - ('쵸', '쵸'), - ('추', '추'), - ('춰', '춰'), - ('췌', '췌'), - ('취', '취'), - ('츄', '츄'), - ('츠', '츠'), - ('츼', '츼'), - ('치', '치'), - ('카', '카'), - ('캐', '캐'), - ('캬', '캬'), - ('컈', '컈'), - ('커', '커'), - ('케', '케'), - ('켜', '켜'), - ('켸', '켸'), - ('코', '코'), - ('콰', '콰'), - ('쾌', '쾌'), - ('쾨', '쾨'), - ('쿄', '쿄'), - ('쿠', '쿠'), - ('쿼', '쿼'), - ('퀘', '퀘'), - ('퀴', '퀴'), - ('큐', '큐'), - ('크', '크'), - ('킈', '킈'), - ('키', '키'), - ('타', '타'), - ('태', '태'), - ('탸', '탸'), - ('턔', '턔'), - ('터', '터'), - ('테', '테'), - ('텨', '텨'), - ('톄', '톄'), - ('토', '토'), - ('톼', '톼'), - ('퇘', '퇘'), - ('퇴', '퇴'), - ('툐', '툐'), - ('투', '투'), - ('퉈', '퉈'), - ('퉤', '퉤'), - ('튀', '튀'), - ('튜', '튜'), - ('트', '트'), - ('틔', '틔'), - ('티', '티'), - ('파', '파'), - ('패', '패'), - ('퍄', '퍄'), - ('퍠', '퍠'), - ('퍼', '퍼'), - ('페', '페'), - ('펴', '펴'), - ('폐', '폐'), - ('포', '포'), - ('퐈', '퐈'), - ('퐤', '퐤'), - ('푀', '푀'), - ('표', '표'), - ('푸', '푸'), - ('풔', '풔'), - ('풰', '풰'), - ('퓌', '퓌'), - ('퓨', '퓨'), - ('프', '프'), - ('픠', '픠'), - ('피', '피'), - ('하', '하'), - ('해', '해'), - ('햐', '햐'), - ('햬', '햬'), - ('허', '허'), - ('헤', '헤'), - ('혀', '혀'), - ('혜', '혜'), - ('호', '호'), - ('화', '화'), - ('홰', '홰'), - ('회', '회'), - ('효', '효'), - ('후', '후'), - ('훠', '훠'), - ('훼', '훼'), - ('휘', '휘'), - ('휴', '휴'), - ('흐', '흐'), - ('희', '희'), - ('히', '히'), -]; - -pub const LVT: &'static [(char, char)] = &[ - ('각', '갛'), - ('객', '갷'), - ('갹', '걓'), - ('걕', '걯'), - ('걱', '겋'), - ('겍', '겧'), - ('격', '곃'), - ('곅', '곟'), - ('곡', '곻'), - ('곽', '괗'), - ('괙', '괳'), - ('괵', '굏'), - ('굑', '굫'), - ('국', '궇'), - ('궉', '궣'), - ('궥', '궿'), - ('귁', '귛'), - ('귝', '귷'), - ('극', '긓'), - ('긕', '긯'), - ('긱', '깋'), - ('깍', '깧'), - ('깩', '꺃'), - ('꺅', '꺟'), - ('꺡', '꺻'), - ('꺽', '껗'), - ('껙', '껳'), - ('껵', '꼏'), - ('꼑', '꼫'), - ('꼭', '꽇'), - ('꽉', '꽣'), - ('꽥', '꽿'), - ('꾁', '꾛'), - ('꾝', '꾷'), - ('꾹', '꿓'), - ('꿕', '꿯'), - ('꿱', '뀋'), - ('뀍', '뀧'), - ('뀩', '끃'), - ('끅', '끟'), - ('끡', '끻'), - ('끽', '낗'), - ('낙', '낳'), - ('낵', '냏'), - ('냑', '냫'), - ('냭', '넇'), - ('넉', '넣'), - ('넥', '넿'), - ('녁', '녛'), - ('녝', '녷'), - ('녹', '놓'), - ('놕', '놯'), - ('놱', '뇋'), - ('뇍', '뇧'), - ('뇩', '눃'), - ('눅', '눟'), - ('눡', '눻'), - ('눽', '뉗'), - ('뉙', '뉳'), - ('뉵', '늏'), - ('늑', '늫'), - ('늭', '닇'), - ('닉', '닣'), - ('닥', '닿'), - ('댁', '댛'), - ('댝', '댷'), - ('댹', '덓'), - ('덕', '덯'), - ('덱', '뎋'), - ('뎍', '뎧'), - ('뎩', '돃'), - ('독', '돟'), - ('돡', '돻'), - ('돽', '됗'), - ('됙', '됳'), - ('됵', '둏'), - ('둑', '둫'), - ('둭', '뒇'), - ('뒉', '뒣'), - ('뒥', '뒿'), - ('듁', '듛'), - ('득', '듷'), - ('듹', '딓'), - ('딕', '딯'), - ('딱', '땋'), - ('땍', '땧'), - ('땩', '떃'), - ('떅', '떟'), - ('떡', '떻'), - ('떽', '뗗'), - ('뗙', '뗳'), - ('뗵', '똏'), - ('똑', '똫'), - ('똭', '뙇'), - ('뙉', '뙣'), - ('뙥', '뙿'), - ('뚁', '뚛'), - ('뚝', '뚷'), - ('뚹', '뛓'), - ('뛕', '뛯'), - ('뛱', '뜋'), - ('뜍', '뜧'), - ('뜩', '띃'), - ('띅', '띟'), - ('띡', '띻'), - ('락', '랗'), - ('랙', '랳'), - ('략', '럏'), - ('럑', '럫'), - ('럭', '렇'), - ('렉', '렣'), - ('력', '렿'), - ('롁', '롛'), - ('록', '롷'), - ('롹', '뢓'), - ('뢕', '뢯'), - ('뢱', '룋'), - ('룍', '룧'), - ('룩', '뤃'), - ('뤅', '뤟'), - ('뤡', '뤻'), - ('뤽', '륗'), - ('륙', '륳'), - ('륵', '릏'), - ('릑', '릫'), - ('릭', '맇'), - ('막', '맣'), - ('맥', '맿'), - ('먁', '먛'), - ('먝', '먷'), - ('먹', '멓'), - ('멕', '멯'), - ('멱', '몋'), - ('몍', '몧'), - ('목', '뫃'), - ('뫅', '뫟'), - ('뫡', '뫻'), - ('뫽', '묗'), - ('묙', '묳'), - ('묵', '뭏'), - ('뭑', '뭫'), - ('뭭', '뮇'), - ('뮉', '뮣'), - ('뮥', '뮿'), - ('믁', '믛'), - ('믝', '믷'), - ('믹', '밓'), - ('박', '밯'), - ('백', '뱋'), - ('뱍', '뱧'), - ('뱩', '벃'), - ('벅', '벟'), - ('벡', '벻'), - ('벽', '볗'), - ('볙', '볳'), - ('복', '봏'), - ('봑', '봫'), - ('봭', '뵇'), - ('뵉', '뵣'), - ('뵥', '뵿'), - ('북', '붛'), - ('붝', '붷'), - ('붹', '뷓'), - ('뷕', '뷯'), - ('뷱', '븋'), - ('븍', '븧'), - ('븩', '빃'), - ('빅', '빟'), - ('빡', '빻'), - ('빽', '뺗'), - ('뺙', '뺳'), - ('뺵', '뻏'), - ('뻑', '뻫'), - ('뻭', '뼇'), - ('뼉', '뼣'), - ('뼥', '뼿'), - ('뽁', '뽛'), - ('뽝', '뽷'), - ('뽹', '뾓'), - ('뾕', '뾯'), - ('뾱', '뿋'), - ('뿍', '뿧'), - ('뿩', '쀃'), - ('쀅', '쀟'), - ('쀡', '쀻'), - ('쀽', '쁗'), - ('쁙', '쁳'), - ('쁵', '삏'), - ('삑', '삫'), - ('삭', '샇'), - ('색', '샣'), - ('샥', '샿'), - ('섁', '섛'), - ('석', '섷'), - ('섹', '셓'), - ('셕', '셯'), - ('셱', '솋'), - ('속', '솧'), - ('솩', '쇃'), - ('쇅', '쇟'), - ('쇡', '쇻'), - ('쇽', '숗'), - ('숙', '숳'), - ('숵', '쉏'), - ('쉑', '쉫'), - ('쉭', '슇'), - ('슉', '슣'), - ('슥', '슿'), - ('싁', '싛'), - ('식', '싷'), - ('싹', '쌓'), - ('쌕', '쌯'), - ('쌱', '썋'), - ('썍', '썧'), - ('썩', '쎃'), - ('쎅', '쎟'), - ('쎡', '쎻'), - ('쎽', '쏗'), - ('쏙', '쏳'), - ('쏵', '쐏'), - ('쐑', '쐫'), - ('쐭', '쑇'), - ('쑉', '쑣'), - ('쑥', '쑿'), - ('쒁', '쒛'), - ('쒝', '쒷'), - ('쒹', '쓓'), - ('쓕', '쓯'), - ('쓱', '씋'), - ('씍', '씧'), - ('씩', '앃'), - ('악', '앟'), - ('액', '앻'), - ('약', '얗'), - ('얙', '얳'), - ('억', '엏'), - ('엑', '엫'), - ('역', '옇'), - ('옉', '옣'), - ('옥', '옿'), - ('왁', '왛'), - ('왝', '왷'), - ('왹', '욓'), - ('욕', '욯'), - ('욱', '웋'), - ('웍', '웧'), - ('웩', '윃'), - ('윅', '윟'), - ('육', '윻'), - ('윽', '읗'), - ('읙', '읳'), - ('익', '잏'), - ('작', '잫'), - ('잭', '쟇'), - ('쟉', '쟣'), - ('쟥', '쟿'), - ('적', '젛'), - ('젝', '젷'), - ('젹', '졓'), - ('졕', '졯'), - ('족', '좋'), - ('좍', '좧'), - ('좩', '죃'), - ('죅', '죟'), - ('죡', '죻'), - ('죽', '줗'), - ('줙', '줳'), - ('줵', '쥏'), - ('쥑', '쥫'), - ('쥭', '즇'), - ('즉', '즣'), - ('즥', '즿'), - ('직', '짛'), - ('짝', '짷'), - ('짹', '쨓'), - ('쨕', '쨯'), - ('쨱', '쩋'), - ('쩍', '쩧'), - ('쩩', '쪃'), - ('쪅', '쪟'), - ('쪡', '쪻'), - ('쪽', '쫗'), - ('쫙', '쫳'), - ('쫵', '쬏'), - ('쬑', '쬫'), - ('쬭', '쭇'), - ('쭉', '쭣'), - ('쭥', '쭿'), - ('쮁', '쮛'), - ('쮝', '쮷'), - ('쮹', '쯓'), - ('쯕', '쯯'), - ('쯱', '찋'), - ('찍', '찧'), - ('착', '챃'), - ('책', '챟'), - ('챡', '챻'), - ('챽', '첗'), - ('척', '첳'), - ('첵', '쳏'), - ('쳑', '쳫'), - ('쳭', '촇'), - ('촉', '촣'), - ('촥', '촿'), - ('쵁', '쵛'), - ('쵝', '쵷'), - ('쵹', '춓'), - ('축', '춯'), - ('춱', '췋'), - ('췍', '췧'), - ('췩', '츃'), - ('츅', '츟'), - ('측', '츻'), - ('츽', '칗'), - ('칙', '칳'), - ('칵', '캏'), - ('캑', '캫'), - ('캭', '컇'), - ('컉', '컣'), - ('컥', '컿'), - ('켁', '켛'), - ('켝', '켷'), - ('켹', '콓'), - ('콕', '콯'), - ('콱', '쾋'), - ('쾍', '쾧'), - ('쾩', '쿃'), - ('쿅', '쿟'), - ('쿡', '쿻'), - ('쿽', '퀗'), - ('퀙', '퀳'), - ('퀵', '큏'), - ('큑', '큫'), - ('큭', '킇'), - ('킉', '킣'), - ('킥', '킿'), - ('탁', '탛'), - ('택', '탷'), - ('탹', '턓'), - ('턕', '턯'), - ('턱', '텋'), - ('텍', '텧'), - ('텩', '톃'), - ('톅', '톟'), - ('톡', '톻'), - ('톽', '퇗'), - ('퇙', '퇳'), - ('퇵', '툏'), - ('툑', '툫'), - ('툭', '퉇'), - ('퉉', '퉣'), - ('퉥', '퉿'), - ('튁', '튛'), - ('튝', '튷'), - ('특', '틓'), - ('틕', '틯'), - ('틱', '팋'), - ('팍', '팧'), - ('팩', '퍃'), - ('퍅', '퍟'), - ('퍡', '퍻'), - ('퍽', '펗'), - ('펙', '펳'), - ('펵', '폏'), - ('폑', '폫'), - ('폭', '퐇'), - ('퐉', '퐣'), - ('퐥', '퐿'), - ('푁', '푛'), - ('푝', '푷'), - ('푹', '풓'), - ('풕', '풯'), - ('풱', '퓋'), - ('퓍', '퓧'), - ('퓩', '픃'), - ('픅', '픟'), - ('픡', '픻'), - ('픽', '핗'), - ('학', '핳'), - ('핵', '햏'), - ('햑', '햫'), - ('햭', '헇'), - ('헉', '헣'), - ('헥', '헿'), - ('혁', '혛'), - ('혝', '혷'), - ('혹', '홓'), - ('확', '홯'), - ('홱', '횋'), - ('획', '횧'), - ('횩', '훃'), - ('훅', '훟'), - ('훡', '훻'), - ('훽', '휗'), - ('휙', '휳'), - ('휵', '흏'), - ('흑', '흫'), - ('흭', '힇'), - ('힉', '힣'), -]; - -pub const PREPEND: &'static [(char, char)] = &[ - ('\u{600}', '\u{605}'), - ('\u{6dd}', '\u{6dd}'), - ('\u{70f}', '\u{70f}'), - ('\u{890}', '\u{891}'), - ('\u{8e2}', '\u{8e2}'), - ('ൎ', 'ൎ'), - ('\u{110bd}', '\u{110bd}'), - ('\u{110cd}', '\u{110cd}'), - ('𑇂', '𑇃'), - ('𑏑', '𑏑'), - ('𑤿', '𑤿'), - ('𑥁', '𑥁'), - ('𑨺', '𑨺'), - ('𑪄', '𑪉'), - ('𑵆', '𑵆'), - ('𑼂', '𑼂'), -]; - -pub const REGIONAL_INDICATOR: &'static [(char, char)] = &[('🇦', '🇿')]; - -pub const SPACINGMARK: &'static [(char, char)] = &[ - ('ः', 'ः'), - ('ऻ', 'ऻ'), - ('ा', 'ी'), - ('ॉ', 'ौ'), - ('ॎ', 'ॏ'), - ('ং', 'ঃ'), - ('ি', 'ী'), - ('ে', 'ৈ'), - ('ো', 'ৌ'), - ('ਃ', 'ਃ'), - ('ਾ', 'ੀ'), - ('ઃ', 'ઃ'), - ('ા', 'ી'), - ('ૉ', 'ૉ'), - ('ો', 'ૌ'), - ('ଂ', 'ଃ'), - ('ୀ', 'ୀ'), - ('େ', 'ୈ'), - ('ୋ', 'ୌ'), - ('ி', 'ி'), - ('ு', 'ூ'), - ('ெ', 'ை'), - ('ொ', 'ௌ'), - ('ఁ', 'ః'), - ('ు', 'ౄ'), - ('ಂ', 'ಃ'), - ('ಾ', 'ಾ'), - ('ು', 'ು'), - ('ೃ', 'ೄ'), - ('ೳ', 'ೳ'), - ('ം', 'ഃ'), - ('ി', 'ീ'), - ('െ', 'ൈ'), - ('ൊ', 'ൌ'), - ('ං', 'ඃ'), - ('ැ', 'ෑ'), - ('ෘ', 'ෞ'), - ('ෲ', 'ෳ'), - ('ำ', 'ำ'), - ('ຳ', 'ຳ'), - ('༾', '༿'), - ('ཿ', 'ཿ'), - ('ေ', 'ေ'), - ('ျ', 'ြ'), - ('ၖ', 'ၗ'), - ('ႄ', 'ႄ'), - ('ា', 'ា'), - ('ើ', 'ៅ'), - ('ះ', 'ៈ'), - ('ᤣ', 'ᤦ'), - ('ᤩ', 'ᤫ'), - ('ᤰ', 'ᤱ'), - ('ᤳ', 'ᤸ'), - ('ᨙ', 'ᨚ'), - ('ᩕ', 'ᩕ'), - ('ᩗ', 'ᩗ'), - ('ᩭ', 'ᩲ'), - ('ᬄ', 'ᬄ'), - ('ᬾ', 'ᭁ'), - ('ᮂ', 'ᮂ'), - ('ᮡ', 'ᮡ'), - ('ᮦ', 'ᮧ'), - ('ᯧ', 'ᯧ'), - ('ᯪ', 'ᯬ'), - ('ᯮ', 'ᯮ'), - ('ᰤ', 'ᰫ'), - ('ᰴ', 'ᰵ'), - ('᳡', '᳡'), - ('᳷', '᳷'), - ('ꠣ', 'ꠤ'), - ('ꠧ', 'ꠧ'), - ('ꢀ', 'ꢁ'), - ('ꢴ', 'ꣃ'), - ('ꥒ', 'ꥒ'), - ('ꦃ', 'ꦃ'), - ('ꦴ', 'ꦵ'), - ('ꦺ', 'ꦻ'), - ('ꦾ', 'ꦿ'), - ('ꨯ', 'ꨰ'), - ('ꨳ', 'ꨴ'), - ('ꩍ', 'ꩍ'), - ('ꫫ', 'ꫫ'), - ('ꫮ', 'ꫯ'), - ('ꫵ', 'ꫵ'), - ('ꯣ', 'ꯤ'), - ('ꯦ', 'ꯧ'), - ('ꯩ', 'ꯪ'), - ('꯬', '꯬'), - ('𑀀', '𑀀'), - ('𑀂', '𑀂'), - ('𑂂', '𑂂'), - ('𑂰', '𑂲'), - ('𑂷', '𑂸'), - ('𑄬', '𑄬'), - ('𑅅', '𑅆'), - ('𑆂', '𑆂'), - ('𑆳', '𑆵'), - ('𑆿', '𑆿'), - ('𑇎', '𑇎'), - ('𑈬', '𑈮'), - ('𑈲', '𑈳'), - ('𑋠', '𑋢'), - ('𑌂', '𑌃'), - ('𑌿', '𑌿'), - ('𑍁', '𑍄'), - ('𑍇', '𑍈'), - ('𑍋', '𑍌'), - ('𑍢', '𑍣'), - ('𑎹', '𑎺'), - ('𑏊', '𑏊'), - ('𑏌', '𑏍'), - ('𑐵', '𑐷'), - ('𑑀', '𑑁'), - ('𑑅', '𑑅'), - ('𑒱', '𑒲'), - ('𑒹', '𑒹'), - ('𑒻', '𑒼'), - ('𑒾', '𑒾'), - ('𑓁', '𑓁'), - ('𑖰', '𑖱'), - ('𑖸', '𑖻'), - ('𑖾', '𑖾'), - ('𑘰', '𑘲'), - ('𑘻', '𑘼'), - ('𑘾', '𑘾'), - ('𑚬', '𑚬'), - ('𑚮', '𑚯'), - ('𑜞', '𑜞'), - ('𑜦', '𑜦'), - ('𑠬', '𑠮'), - ('𑠸', '𑠸'), - ('𑤱', '𑤵'), - ('𑤷', '𑤸'), - ('𑥀', '𑥀'), - ('𑥂', '𑥂'), - ('𑧑', '𑧓'), - ('𑧜', '𑧟'), - ('𑧤', '𑧤'), - ('𑨹', '𑨹'), - ('𑩗', '𑩘'), - ('𑪗', '𑪗'), - ('𑰯', '𑰯'), - ('𑰾', '𑰾'), - ('𑲩', '𑲩'), - ('𑲱', '𑲱'), - ('𑲴', '𑲴'), - ('𑶊', '𑶎'), - ('𑶓', '𑶔'), - ('𑶖', '𑶖'), - ('𑻵', '𑻶'), - ('𑼃', '𑼃'), - ('𑼴', '𑼵'), - ('𑼾', '𑼿'), - ('𖄪', '𖄬'), - ('𖽑', '𖾇'), -]; - -pub const T: &'static [(char, char)] = &[('ᆨ', 'ᇿ'), ('ퟋ', 'ퟻ')]; - -pub const V: &'static [(char, char)] = - &[('ᅠ', 'ᆧ'), ('ힰ', 'ퟆ'), ('𖵣', '𖵣'), ('𖵧', '𖵪')]; - -pub const ZWJ: &'static [(char, char)] = &[('\u{200d}', '\u{200d}')]; diff --git a/vendor/regex-syntax/src/unicode_tables/mod.rs b/vendor/regex-syntax/src/unicode_tables/mod.rs deleted file mode 100644 index 20736c7ac813e4..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/mod.rs +++ /dev/null @@ -1,57 +0,0 @@ -#[cfg(feature = "unicode-age")] -pub mod age; - -#[cfg(feature = "unicode-case")] -pub mod case_folding_simple; - -#[cfg(feature = "unicode-gencat")] -pub mod general_category; - -#[cfg(feature = "unicode-segment")] -pub mod grapheme_cluster_break; - -#[cfg(all(feature = "unicode-perl", not(feature = "unicode-gencat")))] -#[allow(dead_code)] -pub mod perl_decimal; - -#[cfg(all(feature = "unicode-perl", not(feature = "unicode-bool")))] -#[allow(dead_code)] -pub mod perl_space; - -#[cfg(feature = "unicode-perl")] -pub mod perl_word; - -#[cfg(feature = "unicode-bool")] -pub mod property_bool; - -#[cfg(any( - feature = "unicode-age", - feature = "unicode-bool", - feature = "unicode-gencat", - feature = "unicode-perl", - feature = "unicode-script", - feature = "unicode-segment", -))] -pub mod property_names; - -#[cfg(any( - feature = "unicode-age", - feature = "unicode-bool", - feature = "unicode-gencat", - feature = "unicode-perl", - feature = "unicode-script", - feature = "unicode-segment", -))] -pub mod property_values; - -#[cfg(feature = "unicode-script")] -pub mod script; - -#[cfg(feature = "unicode-script")] -pub mod script_extension; - -#[cfg(feature = "unicode-segment")] -pub mod sentence_break; - -#[cfg(feature = "unicode-segment")] -pub mod word_break; diff --git a/vendor/regex-syntax/src/unicode_tables/perl_decimal.rs b/vendor/regex-syntax/src/unicode_tables/perl_decimal.rs deleted file mode 100644 index 18996c2bfcb0f4..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/perl_decimal.rs +++ /dev/null @@ -1,84 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate general-category ucd-16.0.0 --chars --include decimalnumber -// -// Unicode version: 16.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = - &[("Decimal_Number", DECIMAL_NUMBER)]; - -pub const DECIMAL_NUMBER: &'static [(char, char)] = &[ - ('0', '9'), - ('٠', '٩'), - ('۰', '۹'), - ('߀', '߉'), - ('०', '९'), - ('০', '৯'), - ('੦', '੯'), - ('૦', '૯'), - ('୦', '୯'), - ('௦', '௯'), - ('౦', '౯'), - ('೦', '೯'), - ('൦', '൯'), - ('෦', '෯'), - ('๐', '๙'), - ('໐', '໙'), - ('༠', '༩'), - ('၀', '၉'), - ('႐', '႙'), - ('០', '៩'), - ('᠐', '᠙'), - ('᥆', '᥏'), - ('᧐', '᧙'), - ('᪀', '᪉'), - ('᪐', '᪙'), - ('᭐', '᭙'), - ('᮰', '᮹'), - ('᱀', '᱉'), - ('᱐', '᱙'), - ('꘠', '꘩'), - ('꣐', '꣙'), - ('꤀', '꤉'), - ('꧐', '꧙'), - ('꧰', '꧹'), - ('꩐', '꩙'), - ('꯰', '꯹'), - ('0', '9'), - ('𐒠', '𐒩'), - ('𐴰', '𐴹'), - ('𐵀', '𐵉'), - ('𑁦', '𑁯'), - ('𑃰', '𑃹'), - ('𑄶', '𑄿'), - ('𑇐', '𑇙'), - ('𑋰', '𑋹'), - ('𑑐', '𑑙'), - ('𑓐', '𑓙'), - ('𑙐', '𑙙'), - ('𑛀', '𑛉'), - ('𑛐', '𑛣'), - ('𑜰', '𑜹'), - ('𑣠', '𑣩'), - ('𑥐', '𑥙'), - ('𑯰', '𑯹'), - ('𑱐', '𑱙'), - ('𑵐', '𑵙'), - ('𑶠', '𑶩'), - ('𑽐', '𑽙'), - ('𖄰', '𖄹'), - ('𖩠', '𖩩'), - ('𖫀', '𖫉'), - ('𖭐', '𖭙'), - ('𖵰', '𖵹'), - ('𜳰', '𜳹'), - ('𝟎', '𝟿'), - ('𞅀', '𞅉'), - ('𞋰', '𞋹'), - ('𞓰', '𞓹'), - ('𞗱', '𞗺'), - ('𞥐', '𞥙'), - ('🯰', '🯹'), -]; diff --git a/vendor/regex-syntax/src/unicode_tables/perl_space.rs b/vendor/regex-syntax/src/unicode_tables/perl_space.rs deleted file mode 100644 index c969e3733add9a..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/perl_space.rs +++ /dev/null @@ -1,23 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate property-bool ucd-16.0.0 --chars --include whitespace -// -// Unicode version: 16.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = - &[("White_Space", WHITE_SPACE)]; - -pub const WHITE_SPACE: &'static [(char, char)] = &[ - ('\t', '\r'), - (' ', ' '), - ('\u{85}', '\u{85}'), - ('\u{a0}', '\u{a0}'), - ('\u{1680}', '\u{1680}'), - ('\u{2000}', '\u{200a}'), - ('\u{2028}', '\u{2029}'), - ('\u{202f}', '\u{202f}'), - ('\u{205f}', '\u{205f}'), - ('\u{3000}', '\u{3000}'), -]; diff --git a/vendor/regex-syntax/src/unicode_tables/perl_word.rs b/vendor/regex-syntax/src/unicode_tables/perl_word.rs deleted file mode 100644 index 21c8c0f9c839c8..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/perl_word.rs +++ /dev/null @@ -1,806 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate perl-word ucd-16.0.0 --chars -// -// Unicode version: 16.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const PERL_WORD: &'static [(char, char)] = &[ - ('0', '9'), - ('A', 'Z'), - ('_', '_'), - ('a', 'z'), - ('ª', 'ª'), - ('µ', 'µ'), - ('º', 'º'), - ('À', 'Ö'), - ('Ø', 'ö'), - ('ø', 'ˁ'), - ('ˆ', 'ˑ'), - ('ˠ', 'ˤ'), - ('ˬ', 'ˬ'), - ('ˮ', 'ˮ'), - ('\u{300}', 'ʹ'), - ('Ͷ', 'ͷ'), - ('ͺ', 'ͽ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', 'ϵ'), - ('Ϸ', 'ҁ'), - ('\u{483}', 'ԯ'), - ('Ա', 'Ֆ'), - ('ՙ', 'ՙ'), - ('ՠ', 'ֈ'), - ('\u{591}', '\u{5bd}'), - ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), - ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), - ('א', 'ת'), - ('ׯ', 'ײ'), - ('\u{610}', '\u{61a}'), - ('ؠ', '٩'), - ('ٮ', 'ۓ'), - ('ە', '\u{6dc}'), - ('\u{6df}', '\u{6e8}'), - ('\u{6ea}', 'ۼ'), - ('ۿ', 'ۿ'), - ('ܐ', '\u{74a}'), - ('ݍ', 'ޱ'), - ('߀', 'ߵ'), - ('ߺ', 'ߺ'), - ('\u{7fd}', '\u{7fd}'), - ('ࠀ', '\u{82d}'), - ('ࡀ', '\u{85b}'), - ('ࡠ', 'ࡪ'), - ('ࡰ', 'ࢇ'), - ('ࢉ', 'ࢎ'), - ('\u{897}', '\u{8e1}'), - ('\u{8e3}', '\u{963}'), - ('०', '९'), - ('ॱ', 'ঃ'), - ('অ', 'ঌ'), - ('এ', 'ঐ'), - ('ও', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('\u{9bc}', '\u{9c4}'), - ('ে', 'ৈ'), - ('ো', 'ৎ'), - ('\u{9d7}', '\u{9d7}'), - ('ড়', 'ঢ়'), - ('য়', '\u{9e3}'), - ('০', 'ৱ'), - ('ৼ', 'ৼ'), - ('\u{9fe}', '\u{9fe}'), - ('\u{a01}', 'ਃ'), - ('ਅ', 'ਊ'), - ('ਏ', 'ਐ'), - ('ਓ', 'ਨ'), - ('ਪ', 'ਰ'), - ('ਲ', 'ਲ਼'), - ('ਵ', 'ਸ਼'), - ('ਸ', 'ਹ'), - ('\u{a3c}', '\u{a3c}'), - ('ਾ', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4d}'), - ('\u{a51}', '\u{a51}'), - ('ਖ਼', 'ੜ'), - ('ਫ਼', 'ਫ਼'), - ('੦', '\u{a75}'), - ('\u{a81}', 'ઃ'), - ('અ', 'ઍ'), - ('એ', 'ઑ'), - ('ઓ', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('\u{abc}', '\u{ac5}'), - ('\u{ac7}', 'ૉ'), - ('ો', '\u{acd}'), - ('ૐ', 'ૐ'), - ('ૠ', '\u{ae3}'), - ('૦', '૯'), - ('ૹ', '\u{aff}'), - ('\u{b01}', 'ଃ'), - ('ଅ', 'ଌ'), - ('ଏ', 'ଐ'), - ('ଓ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଵ', 'ହ'), - ('\u{b3c}', '\u{b44}'), - ('େ', 'ୈ'), - ('ୋ', '\u{b4d}'), - ('\u{b55}', '\u{b57}'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', '\u{b63}'), - ('୦', '୯'), - ('ୱ', 'ୱ'), - ('\u{b82}', 'ஃ'), - ('அ', 'ஊ'), - ('எ', 'ஐ'), - ('ஒ', 'க'), - ('ங', 'ச'), - ('ஜ', 'ஜ'), - ('ஞ', 'ட'), - ('ண', 'த'), - ('ந', 'ப'), - ('ம', 'ஹ'), - ('\u{bbe}', 'ூ'), - ('ெ', 'ை'), - ('ொ', '\u{bcd}'), - ('ௐ', 'ௐ'), - ('\u{bd7}', '\u{bd7}'), - ('௦', '௯'), - ('\u{c00}', 'ఌ'), - ('ఎ', 'ఐ'), - ('ఒ', 'న'), - ('ప', 'హ'), - ('\u{c3c}', 'ౄ'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4d}'), - ('\u{c55}', '\u{c56}'), - ('ౘ', 'ౚ'), - ('ౝ', 'ౝ'), - ('ౠ', '\u{c63}'), - ('౦', '౯'), - ('ಀ', 'ಃ'), - ('ಅ', 'ಌ'), - ('ಎ', 'ಐ'), - ('ಒ', 'ನ'), - ('ಪ', 'ಳ'), - ('ವ', 'ಹ'), - ('\u{cbc}', 'ೄ'), - ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccd}'), - ('\u{cd5}', '\u{cd6}'), - ('ೝ', 'ೞ'), - ('ೠ', '\u{ce3}'), - ('೦', '೯'), - ('ೱ', 'ೳ'), - ('\u{d00}', 'ഌ'), - ('എ', 'ഐ'), - ('ഒ', '\u{d44}'), - ('െ', 'ൈ'), - ('ൊ', 'ൎ'), - ('ൔ', '\u{d57}'), - ('ൟ', '\u{d63}'), - ('൦', '൯'), - ('ൺ', 'ൿ'), - ('\u{d81}', 'ඃ'), - ('අ', 'ඖ'), - ('ක', 'න'), - ('ඳ', 'ර'), - ('ල', 'ල'), - ('ව', 'ෆ'), - ('\u{dca}', '\u{dca}'), - ('\u{dcf}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('ෘ', '\u{ddf}'), - ('෦', '෯'), - ('ෲ', 'ෳ'), - ('ก', '\u{e3a}'), - ('เ', '\u{e4e}'), - ('๐', '๙'), - ('ກ', 'ຂ'), - ('ຄ', 'ຄ'), - ('ຆ', 'ຊ'), - ('ຌ', 'ຣ'), - ('ລ', 'ລ'), - ('ວ', 'ຽ'), - ('ເ', 'ໄ'), - ('ໆ', 'ໆ'), - ('\u{ec8}', '\u{ece}'), - ('໐', '໙'), - ('ໜ', 'ໟ'), - ('ༀ', 'ༀ'), - ('\u{f18}', '\u{f19}'), - ('༠', '༩'), - ('\u{f35}', '\u{f35}'), - ('\u{f37}', '\u{f37}'), - ('\u{f39}', '\u{f39}'), - ('༾', 'ཇ'), - ('ཉ', 'ཬ'), - ('\u{f71}', '\u{f84}'), - ('\u{f86}', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('\u{fc6}', '\u{fc6}'), - ('က', '၉'), - ('ၐ', '\u{109d}'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ა', 'ჺ'), - ('ჼ', 'ቈ'), - ('ቊ', 'ቍ'), - ('ቐ', 'ቖ'), - ('ቘ', 'ቘ'), - ('ቚ', 'ቝ'), - ('በ', 'ኈ'), - ('ኊ', 'ኍ'), - ('ነ', 'ኰ'), - ('ኲ', 'ኵ'), - ('ኸ', 'ኾ'), - ('ዀ', 'ዀ'), - ('ዂ', 'ዅ'), - ('ወ', 'ዖ'), - ('ዘ', 'ጐ'), - ('ጒ', 'ጕ'), - ('ጘ', 'ፚ'), - ('\u{135d}', '\u{135f}'), - ('ᎀ', 'ᎏ'), - ('Ꭰ', 'Ᏽ'), - ('ᏸ', 'ᏽ'), - ('ᐁ', 'ᙬ'), - ('ᙯ', 'ᙿ'), - ('ᚁ', 'ᚚ'), - ('ᚠ', 'ᛪ'), - ('ᛮ', 'ᛸ'), - ('ᜀ', '\u{1715}'), - ('ᜟ', '\u{1734}'), - ('ᝀ', '\u{1753}'), - ('ᝠ', 'ᝬ'), - ('ᝮ', 'ᝰ'), - ('\u{1772}', '\u{1773}'), - ('ក', '\u{17d3}'), - ('ៗ', 'ៗ'), - ('ៜ', '\u{17dd}'), - ('០', '៩'), - ('\u{180b}', '\u{180d}'), - ('\u{180f}', '᠙'), - ('ᠠ', 'ᡸ'), - ('ᢀ', 'ᢪ'), - ('ᢰ', 'ᣵ'), - ('ᤀ', 'ᤞ'), - ('\u{1920}', 'ᤫ'), - ('ᤰ', '\u{193b}'), - ('᥆', 'ᥭ'), - ('ᥰ', 'ᥴ'), - ('ᦀ', 'ᦫ'), - ('ᦰ', 'ᧉ'), - ('᧐', '᧙'), - ('ᨀ', '\u{1a1b}'), - ('ᨠ', '\u{1a5e}'), - ('\u{1a60}', '\u{1a7c}'), - ('\u{1a7f}', '᪉'), - ('᪐', '᪙'), - ('ᪧ', 'ᪧ'), - ('\u{1ab0}', '\u{1ace}'), - ('\u{1b00}', 'ᭌ'), - ('᭐', '᭙'), - ('\u{1b6b}', '\u{1b73}'), - ('\u{1b80}', '\u{1bf3}'), - ('ᰀ', '\u{1c37}'), - ('᱀', '᱉'), - ('ᱍ', 'ᱽ'), - ('ᲀ', 'ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('\u{1cd0}', '\u{1cd2}'), - ('\u{1cd4}', 'ᳺ'), - ('ᴀ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ᾼ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῌ'), - ('ῐ', 'ΐ'), - ('ῖ', 'Ί'), - ('ῠ', 'Ῥ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῼ'), - ('\u{200c}', '\u{200d}'), - ('‿', '⁀'), - ('⁔', '⁔'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('\u{20d0}', '\u{20f0}'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℊ', 'ℓ'), - ('ℕ', 'ℕ'), - ('ℙ', 'ℝ'), - ('ℤ', 'ℤ'), - ('Ω', 'Ω'), - ('ℨ', 'ℨ'), - ('K', 'ℭ'), - ('ℯ', 'ℹ'), - ('ℼ', 'ℿ'), - ('ⅅ', 'ⅉ'), - ('ⅎ', 'ⅎ'), - ('Ⅰ', 'ↈ'), - ('Ⓐ', 'ⓩ'), - ('Ⰰ', 'ⳤ'), - ('Ⳬ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ⴰ', 'ⵧ'), - ('ⵯ', 'ⵯ'), - ('\u{2d7f}', 'ⶖ'), - ('ⶠ', 'ⶦ'), - ('ⶨ', 'ⶮ'), - ('ⶰ', 'ⶶ'), - ('ⶸ', 'ⶾ'), - ('ⷀ', 'ⷆ'), - ('ⷈ', 'ⷎ'), - ('ⷐ', 'ⷖ'), - ('ⷘ', 'ⷞ'), - ('\u{2de0}', '\u{2dff}'), - ('ⸯ', 'ⸯ'), - ('々', '〇'), - ('〡', '\u{302f}'), - ('〱', '〵'), - ('〸', '〼'), - ('ぁ', 'ゖ'), - ('\u{3099}', '\u{309a}'), - ('ゝ', 'ゟ'), - ('ァ', 'ヺ'), - ('ー', 'ヿ'), - ('ㄅ', 'ㄯ'), - ('ㄱ', 'ㆎ'), - ('ㆠ', 'ㆿ'), - ('ㇰ', 'ㇿ'), - ('㐀', '䶿'), - ('一', 'ꒌ'), - ('ꓐ', 'ꓽ'), - ('ꔀ', 'ꘌ'), - ('ꘐ', 'ꘫ'), - ('Ꙁ', '\u{a672}'), - ('\u{a674}', '\u{a67d}'), - ('ꙿ', '\u{a6f1}'), - ('ꜗ', 'ꜟ'), - ('Ꜣ', 'ꞈ'), - ('Ꞌ', 'ꟍ'), - ('Ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'Ƛ'), - ('ꟲ', 'ꠧ'), - ('\u{a82c}', '\u{a82c}'), - ('ꡀ', 'ꡳ'), - ('ꢀ', '\u{a8c5}'), - ('꣐', '꣙'), - ('\u{a8e0}', 'ꣷ'), - ('ꣻ', 'ꣻ'), - ('ꣽ', '\u{a92d}'), - ('ꤰ', '\u{a953}'), - ('ꥠ', 'ꥼ'), - ('\u{a980}', '\u{a9c0}'), - ('ꧏ', '꧙'), - ('ꧠ', 'ꧾ'), - ('ꨀ', '\u{aa36}'), - ('ꩀ', 'ꩍ'), - ('꩐', '꩙'), - ('ꩠ', 'ꩶ'), - ('ꩺ', 'ꫂ'), - ('ꫛ', 'ꫝ'), - ('ꫠ', 'ꫯ'), - ('ꫲ', '\u{aaf6}'), - ('ꬁ', 'ꬆ'), - ('ꬉ', 'ꬎ'), - ('ꬑ', 'ꬖ'), - ('ꬠ', 'ꬦ'), - ('ꬨ', 'ꬮ'), - ('ꬰ', 'ꭚ'), - ('ꭜ', 'ꭩ'), - ('ꭰ', 'ꯪ'), - ('꯬', '\u{abed}'), - ('꯰', '꯹'), - ('가', '힣'), - ('ힰ', 'ퟆ'), - ('ퟋ', 'ퟻ'), - ('豈', '舘'), - ('並', '龎'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('יִ', 'ﬨ'), - ('שׁ', 'זּ'), - ('טּ', 'לּ'), - ('מּ', 'מּ'), - ('נּ', 'סּ'), - ('ףּ', 'פּ'), - ('צּ', 'ﮱ'), - ('ﯓ', 'ﴽ'), - ('ﵐ', 'ﶏ'), - ('ﶒ', 'ﷇ'), - ('ﷰ', 'ﷻ'), - ('\u{fe00}', '\u{fe0f}'), - ('\u{fe20}', '\u{fe2f}'), - ('︳', '︴'), - ('﹍', '﹏'), - ('ﹰ', 'ﹴ'), - ('ﹶ', 'ﻼ'), - ('0', '9'), - ('A', 'Z'), - ('_', '_'), - ('a', 'z'), - ('ヲ', 'ᄒ'), - ('ᅡ', 'ᅦ'), - ('ᅧ', 'ᅬ'), - ('ᅭ', 'ᅲ'), - ('ᅳ', 'ᅵ'), - ('𐀀', '𐀋'), - ('𐀍', '𐀦'), - ('𐀨', '𐀺'), - ('𐀼', '𐀽'), - ('𐀿', '𐁍'), - ('𐁐', '𐁝'), - ('𐂀', '𐃺'), - ('𐅀', '𐅴'), - ('\u{101fd}', '\u{101fd}'), - ('𐊀', '𐊜'), - ('𐊠', '𐋐'), - ('\u{102e0}', '\u{102e0}'), - ('𐌀', '𐌟'), - ('𐌭', '𐍊'), - ('𐍐', '\u{1037a}'), - ('𐎀', '𐎝'), - ('𐎠', '𐏃'), - ('𐏈', '𐏏'), - ('𐏑', '𐏕'), - ('𐐀', '𐒝'), - ('𐒠', '𐒩'), - ('𐒰', '𐓓'), - ('𐓘', '𐓻'), - ('𐔀', '𐔧'), - ('𐔰', '𐕣'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐗀', '𐗳'), - ('𐘀', '𐜶'), - ('𐝀', '𐝕'), - ('𐝠', '𐝧'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𐠀', '𐠅'), - ('𐠈', '𐠈'), - ('𐠊', '𐠵'), - ('𐠷', '𐠸'), - ('𐠼', '𐠼'), - ('𐠿', '𐡕'), - ('𐡠', '𐡶'), - ('𐢀', '𐢞'), - ('𐣠', '𐣲'), - ('𐣴', '𐣵'), - ('𐤀', '𐤕'), - ('𐤠', '𐤹'), - ('𐦀', '𐦷'), - ('𐦾', '𐦿'), - ('𐨀', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '𐨓'), - ('𐨕', '𐨗'), - ('𐨙', '𐨵'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '\u{10a3f}'), - ('𐩠', '𐩼'), - ('𐪀', '𐪜'), - ('𐫀', '𐫇'), - ('𐫉', '\u{10ae6}'), - ('𐬀', '𐬵'), - ('𐭀', '𐭕'), - ('𐭠', '𐭲'), - ('𐮀', '𐮑'), - ('𐰀', '𐱈'), - ('𐲀', '𐲲'), - ('𐳀', '𐳲'), - ('𐴀', '\u{10d27}'), - ('𐴰', '𐴹'), - ('𐵀', '𐵥'), - ('\u{10d69}', '\u{10d6d}'), - ('𐵯', '𐶅'), - ('𐺀', '𐺩'), - ('\u{10eab}', '\u{10eac}'), - ('𐺰', '𐺱'), - ('𐻂', '𐻄'), - ('\u{10efc}', '𐼜'), - ('𐼧', '𐼧'), - ('𐼰', '\u{10f50}'), - ('𐽰', '\u{10f85}'), - ('𐾰', '𐿄'), - ('𐿠', '𐿶'), - ('𑀀', '\u{11046}'), - ('𑁦', '𑁵'), - ('\u{1107f}', '\u{110ba}'), - ('\u{110c2}', '\u{110c2}'), - ('𑃐', '𑃨'), - ('𑃰', '𑃹'), - ('\u{11100}', '\u{11134}'), - ('𑄶', '𑄿'), - ('𑅄', '𑅇'), - ('𑅐', '\u{11173}'), - ('𑅶', '𑅶'), - ('\u{11180}', '𑇄'), - ('\u{111c9}', '\u{111cc}'), - ('𑇎', '𑇚'), - ('𑇜', '𑇜'), - ('𑈀', '𑈑'), - ('𑈓', '\u{11237}'), - ('\u{1123e}', '\u{11241}'), - ('𑊀', '𑊆'), - ('𑊈', '𑊈'), - ('𑊊', '𑊍'), - ('𑊏', '𑊝'), - ('𑊟', '𑊨'), - ('𑊰', '\u{112ea}'), - ('𑋰', '𑋹'), - ('\u{11300}', '𑌃'), - ('𑌅', '𑌌'), - ('𑌏', '𑌐'), - ('𑌓', '𑌨'), - ('𑌪', '𑌰'), - ('𑌲', '𑌳'), - ('𑌵', '𑌹'), - ('\u{1133b}', '𑍄'), - ('𑍇', '𑍈'), - ('𑍋', '\u{1134d}'), - ('𑍐', '𑍐'), - ('\u{11357}', '\u{11357}'), - ('𑍝', '𑍣'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), - ('𑎀', '𑎉'), - ('𑎋', '𑎋'), - ('𑎎', '𑎎'), - ('𑎐', '𑎵'), - ('𑎷', '\u{113c0}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '𑏊'), - ('𑏌', '𑏓'), - ('\u{113e1}', '\u{113e2}'), - ('𑐀', '𑑊'), - ('𑑐', '𑑙'), - ('\u{1145e}', '𑑡'), - ('𑒀', '𑓅'), - ('𑓇', '𑓇'), - ('𑓐', '𑓙'), - ('𑖀', '\u{115b5}'), - ('𑖸', '\u{115c0}'), - ('𑗘', '\u{115dd}'), - ('𑘀', '\u{11640}'), - ('𑙄', '𑙄'), - ('𑙐', '𑙙'), - ('𑚀', '𑚸'), - ('𑛀', '𑛉'), - ('𑛐', '𑛣'), - ('𑜀', '𑜚'), - ('\u{1171d}', '\u{1172b}'), - ('𑜰', '𑜹'), - ('𑝀', '𑝆'), - ('𑠀', '\u{1183a}'), - ('𑢠', '𑣩'), - ('𑣿', '𑤆'), - ('𑤉', '𑤉'), - ('𑤌', '𑤓'), - ('𑤕', '𑤖'), - ('𑤘', '𑤵'), - ('𑤷', '𑤸'), - ('\u{1193b}', '\u{11943}'), - ('𑥐', '𑥙'), - ('𑦠', '𑦧'), - ('𑦪', '\u{119d7}'), - ('\u{119da}', '𑧡'), - ('𑧣', '𑧤'), - ('𑨀', '\u{11a3e}'), - ('\u{11a47}', '\u{11a47}'), - ('𑩐', '\u{11a99}'), - ('𑪝', '𑪝'), - ('𑪰', '𑫸'), - ('𑯀', '𑯠'), - ('𑯰', '𑯹'), - ('𑰀', '𑰈'), - ('𑰊', '\u{11c36}'), - ('\u{11c38}', '𑱀'), - ('𑱐', '𑱙'), - ('𑱲', '𑲏'), - ('\u{11c92}', '\u{11ca7}'), - ('𑲩', '\u{11cb6}'), - ('𑴀', '𑴆'), - ('𑴈', '𑴉'), - ('𑴋', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d47}'), - ('𑵐', '𑵙'), - ('𑵠', '𑵥'), - ('𑵧', '𑵨'), - ('𑵪', '𑶎'), - ('\u{11d90}', '\u{11d91}'), - ('𑶓', '𑶘'), - ('𑶠', '𑶩'), - ('𑻠', '𑻶'), - ('\u{11f00}', '𑼐'), - ('𑼒', '\u{11f3a}'), - ('𑼾', '\u{11f42}'), - ('𑽐', '\u{11f5a}'), - ('𑾰', '𑾰'), - ('𒀀', '𒎙'), - ('𒐀', '𒑮'), - ('𒒀', '𒕃'), - ('𒾐', '𒿰'), - ('𓀀', '𓐯'), - ('\u{13440}', '\u{13455}'), - ('𓑠', '𔏺'), - ('𔐀', '𔙆'), - ('𖄀', '𖄹'), - ('𖠀', '𖨸'), - ('𖩀', '𖩞'), - ('𖩠', '𖩩'), - ('𖩰', '𖪾'), - ('𖫀', '𖫉'), - ('𖫐', '𖫭'), - ('\u{16af0}', '\u{16af4}'), - ('𖬀', '\u{16b36}'), - ('𖭀', '𖭃'), - ('𖭐', '𖭙'), - ('𖭣', '𖭷'), - ('𖭽', '𖮏'), - ('𖵀', '𖵬'), - ('𖵰', '𖵹'), - ('𖹀', '𖹿'), - ('𖼀', '𖽊'), - ('\u{16f4f}', '𖾇'), - ('\u{16f8f}', '𖾟'), - ('𖿠', '𖿡'), - ('𖿣', '\u{16fe4}'), - ('\u{16ff0}', '\u{16ff1}'), - ('𗀀', '𘟷'), - ('𘠀', '𘳕'), - ('𘳿', '𘴈'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('𛀀', '𛄢'), - ('𛄲', '𛄲'), - ('𛅐', '𛅒'), - ('𛅕', '𛅕'), - ('𛅤', '𛅧'), - ('𛅰', '𛋻'), - ('𛰀', '𛱪'), - ('𛱰', '𛱼'), - ('𛲀', '𛲈'), - ('𛲐', '𛲙'), - ('\u{1bc9d}', '\u{1bc9e}'), - ('𜳰', '𜳹'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('\u{1d165}', '\u{1d169}'), - ('\u{1d16d}', '\u{1d172}'), - ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), - ('\u{1d1aa}', '\u{1d1ad}'), - ('\u{1d242}', '\u{1d244}'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝛀'), - ('𝛂', '𝛚'), - ('𝛜', '𝛺'), - ('𝛼', '𝜔'), - ('𝜖', '𝜴'), - ('𝜶', '𝝎'), - ('𝝐', '𝝮'), - ('𝝰', '𝞈'), - ('𝞊', '𝞨'), - ('𝞪', '𝟂'), - ('𝟄', '𝟋'), - ('𝟎', '𝟿'), - ('\u{1da00}', '\u{1da36}'), - ('\u{1da3b}', '\u{1da6c}'), - ('\u{1da75}', '\u{1da75}'), - ('\u{1da84}', '\u{1da84}'), - ('\u{1da9b}', '\u{1da9f}'), - ('\u{1daa1}', '\u{1daaf}'), - ('𝼀', '𝼞'), - ('𝼥', '𝼪'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), - ('𞀰', '𞁭'), - ('\u{1e08f}', '\u{1e08f}'), - ('𞄀', '𞄬'), - ('\u{1e130}', '𞄽'), - ('𞅀', '𞅉'), - ('𞅎', '𞅎'), - ('𞊐', '\u{1e2ae}'), - ('𞋀', '𞋹'), - ('𞓐', '𞓹'), - ('𞗐', '𞗺'), - ('𞟠', '𞟦'), - ('𞟨', '𞟫'), - ('𞟭', '𞟮'), - ('𞟰', '𞟾'), - ('𞠀', '𞣄'), - ('\u{1e8d0}', '\u{1e8d6}'), - ('𞤀', '𞥋'), - ('𞥐', '𞥙'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('🄰', '🅉'), - ('🅐', '🅩'), - ('🅰', '🆉'), - ('🯰', '🯹'), - ('𠀀', '𪛟'), - ('𪜀', '𫜹'), - ('𫝀', '𫠝'), - ('𫠠', '𬺡'), - ('𬺰', '𮯠'), - ('𮯰', '𮹝'), - ('丽', '𪘀'), - ('𰀀', '𱍊'), - ('𱍐', '𲎯'), - ('\u{e0100}', '\u{e01ef}'), -]; diff --git a/vendor/regex-syntax/src/unicode_tables/property_bool.rs b/vendor/regex-syntax/src/unicode_tables/property_bool.rs deleted file mode 100644 index 3d62edc42317ba..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/property_bool.rs +++ /dev/null @@ -1,12095 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate property-bool ucd-16.0.0 --chars -// -// Unicode version: 16.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ - ("ASCII_Hex_Digit", ASCII_HEX_DIGIT), - ("Alphabetic", ALPHABETIC), - ("Bidi_Control", BIDI_CONTROL), - ("Bidi_Mirrored", BIDI_MIRRORED), - ("Case_Ignorable", CASE_IGNORABLE), - ("Cased", CASED), - ("Changes_When_Casefolded", CHANGES_WHEN_CASEFOLDED), - ("Changes_When_Casemapped", CHANGES_WHEN_CASEMAPPED), - ("Changes_When_Lowercased", CHANGES_WHEN_LOWERCASED), - ("Changes_When_Titlecased", CHANGES_WHEN_TITLECASED), - ("Changes_When_Uppercased", CHANGES_WHEN_UPPERCASED), - ("Dash", DASH), - ("Default_Ignorable_Code_Point", DEFAULT_IGNORABLE_CODE_POINT), - ("Deprecated", DEPRECATED), - ("Diacritic", DIACRITIC), - ("Emoji", EMOJI), - ("Emoji_Component", EMOJI_COMPONENT), - ("Emoji_Modifier", EMOJI_MODIFIER), - ("Emoji_Modifier_Base", EMOJI_MODIFIER_BASE), - ("Emoji_Presentation", EMOJI_PRESENTATION), - ("Extended_Pictographic", EXTENDED_PICTOGRAPHIC), - ("Extender", EXTENDER), - ("Grapheme_Base", GRAPHEME_BASE), - ("Grapheme_Extend", GRAPHEME_EXTEND), - ("Grapheme_Link", GRAPHEME_LINK), - ("Hex_Digit", HEX_DIGIT), - ("Hyphen", HYPHEN), - ("IDS_Binary_Operator", IDS_BINARY_OPERATOR), - ("IDS_Trinary_Operator", IDS_TRINARY_OPERATOR), - ("IDS_Unary_Operator", IDS_UNARY_OPERATOR), - ("ID_Compat_Math_Continue", ID_COMPAT_MATH_CONTINUE), - ("ID_Compat_Math_Start", ID_COMPAT_MATH_START), - ("ID_Continue", ID_CONTINUE), - ("ID_Start", ID_START), - ("Ideographic", IDEOGRAPHIC), - ("InCB", INCB), - ("Join_Control", JOIN_CONTROL), - ("Logical_Order_Exception", LOGICAL_ORDER_EXCEPTION), - ("Lowercase", LOWERCASE), - ("Math", MATH), - ("Modifier_Combining_Mark", MODIFIER_COMBINING_MARK), - ("Noncharacter_Code_Point", NONCHARACTER_CODE_POINT), - ("Other_Alphabetic", OTHER_ALPHABETIC), - ("Other_Default_Ignorable_Code_Point", OTHER_DEFAULT_IGNORABLE_CODE_POINT), - ("Other_Grapheme_Extend", OTHER_GRAPHEME_EXTEND), - ("Other_ID_Continue", OTHER_ID_CONTINUE), - ("Other_ID_Start", OTHER_ID_START), - ("Other_Lowercase", OTHER_LOWERCASE), - ("Other_Math", OTHER_MATH), - ("Other_Uppercase", OTHER_UPPERCASE), - ("Pattern_Syntax", PATTERN_SYNTAX), - ("Pattern_White_Space", PATTERN_WHITE_SPACE), - ("Prepended_Concatenation_Mark", PREPENDED_CONCATENATION_MARK), - ("Quotation_Mark", QUOTATION_MARK), - ("Radical", RADICAL), - ("Regional_Indicator", REGIONAL_INDICATOR), - ("Sentence_Terminal", SENTENCE_TERMINAL), - ("Soft_Dotted", SOFT_DOTTED), - ("Terminal_Punctuation", TERMINAL_PUNCTUATION), - ("Unified_Ideograph", UNIFIED_IDEOGRAPH), - ("Uppercase", UPPERCASE), - ("Variation_Selector", VARIATION_SELECTOR), - ("White_Space", WHITE_SPACE), - ("XID_Continue", XID_CONTINUE), - ("XID_Start", XID_START), -]; - -pub const ASCII_HEX_DIGIT: &'static [(char, char)] = - &[('0', '9'), ('A', 'F'), ('a', 'f')]; - -pub const ALPHABETIC: &'static [(char, char)] = &[ - ('A', 'Z'), - ('a', 'z'), - ('ª', 'ª'), - ('µ', 'µ'), - ('º', 'º'), - ('À', 'Ö'), - ('Ø', 'ö'), - ('ø', 'ˁ'), - ('ˆ', 'ˑ'), - ('ˠ', 'ˤ'), - ('ˬ', 'ˬ'), - ('ˮ', 'ˮ'), - ('\u{345}', '\u{345}'), - ('\u{363}', 'ʹ'), - ('Ͷ', 'ͷ'), - ('ͺ', 'ͽ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', 'ϵ'), - ('Ϸ', 'ҁ'), - ('Ҋ', 'ԯ'), - ('Ա', 'Ֆ'), - ('ՙ', 'ՙ'), - ('ՠ', 'ֈ'), - ('\u{5b0}', '\u{5bd}'), - ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), - ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), - ('א', 'ת'), - ('ׯ', 'ײ'), - ('\u{610}', '\u{61a}'), - ('ؠ', '\u{657}'), - ('\u{659}', '\u{65f}'), - ('ٮ', 'ۓ'), - ('ە', '\u{6dc}'), - ('\u{6e1}', '\u{6e8}'), - ('\u{6ed}', 'ۯ'), - ('ۺ', 'ۼ'), - ('ۿ', 'ۿ'), - ('ܐ', '\u{73f}'), - ('ݍ', 'ޱ'), - ('ߊ', 'ߪ'), - ('ߴ', 'ߵ'), - ('ߺ', 'ߺ'), - ('ࠀ', '\u{817}'), - ('ࠚ', '\u{82c}'), - ('ࡀ', 'ࡘ'), - ('ࡠ', 'ࡪ'), - ('ࡰ', 'ࢇ'), - ('ࢉ', 'ࢎ'), - ('\u{897}', '\u{897}'), - ('ࢠ', 'ࣉ'), - ('\u{8d4}', '\u{8df}'), - ('\u{8e3}', '\u{8e9}'), - ('\u{8f0}', 'ऻ'), - ('ऽ', 'ौ'), - ('ॎ', 'ॐ'), - ('\u{955}', '\u{963}'), - ('ॱ', 'ঃ'), - ('অ', 'ঌ'), - ('এ', 'ঐ'), - ('ও', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('ঽ', '\u{9c4}'), - ('ে', 'ৈ'), - ('ো', 'ৌ'), - ('ৎ', 'ৎ'), - ('\u{9d7}', '\u{9d7}'), - ('ড়', 'ঢ়'), - ('য়', '\u{9e3}'), - ('ৰ', 'ৱ'), - ('ৼ', 'ৼ'), - ('\u{a01}', 'ਃ'), - ('ਅ', 'ਊ'), - ('ਏ', 'ਐ'), - ('ਓ', 'ਨ'), - ('ਪ', 'ਰ'), - ('ਲ', 'ਲ਼'), - ('ਵ', 'ਸ਼'), - ('ਸ', 'ਹ'), - ('ਾ', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4c}'), - ('\u{a51}', '\u{a51}'), - ('ਖ਼', 'ੜ'), - ('ਫ਼', 'ਫ਼'), - ('\u{a70}', '\u{a75}'), - ('\u{a81}', 'ઃ'), - ('અ', 'ઍ'), - ('એ', 'ઑ'), - ('ઓ', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('ઽ', '\u{ac5}'), - ('\u{ac7}', 'ૉ'), - ('ો', 'ૌ'), - ('ૐ', 'ૐ'), - ('ૠ', '\u{ae3}'), - ('ૹ', '\u{afc}'), - ('\u{b01}', 'ଃ'), - ('ଅ', 'ଌ'), - ('ଏ', 'ଐ'), - ('ଓ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଵ', 'ହ'), - ('ଽ', '\u{b44}'), - ('େ', 'ୈ'), - ('ୋ', 'ୌ'), - ('\u{b56}', '\u{b57}'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', '\u{b63}'), - ('ୱ', 'ୱ'), - ('\u{b82}', 'ஃ'), - ('அ', 'ஊ'), - ('எ', 'ஐ'), - ('ஒ', 'க'), - ('ங', 'ச'), - ('ஜ', 'ஜ'), - ('ஞ', 'ட'), - ('ண', 'த'), - ('ந', 'ப'), - ('ம', 'ஹ'), - ('\u{bbe}', 'ூ'), - ('ெ', 'ை'), - ('ொ', 'ௌ'), - ('ௐ', 'ௐ'), - ('\u{bd7}', '\u{bd7}'), - ('\u{c00}', 'ఌ'), - ('ఎ', 'ఐ'), - ('ఒ', 'న'), - ('ప', 'హ'), - ('ఽ', 'ౄ'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4c}'), - ('\u{c55}', '\u{c56}'), - ('ౘ', 'ౚ'), - ('ౝ', 'ౝ'), - ('ౠ', '\u{c63}'), - ('ಀ', 'ಃ'), - ('ಅ', 'ಌ'), - ('ಎ', 'ಐ'), - ('ಒ', 'ನ'), - ('ಪ', 'ಳ'), - ('ವ', 'ಹ'), - ('ಽ', 'ೄ'), - ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccc}'), - ('\u{cd5}', '\u{cd6}'), - ('ೝ', 'ೞ'), - ('ೠ', '\u{ce3}'), - ('ೱ', 'ೳ'), - ('\u{d00}', 'ഌ'), - ('എ', 'ഐ'), - ('ഒ', 'ഺ'), - ('ഽ', '\u{d44}'), - ('െ', 'ൈ'), - ('ൊ', 'ൌ'), - ('ൎ', 'ൎ'), - ('ൔ', '\u{d57}'), - ('ൟ', '\u{d63}'), - ('ൺ', 'ൿ'), - ('\u{d81}', 'ඃ'), - ('අ', 'ඖ'), - ('ක', 'න'), - ('ඳ', 'ර'), - ('ල', 'ල'), - ('ව', 'ෆ'), - ('\u{dcf}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('ෘ', '\u{ddf}'), - ('ෲ', 'ෳ'), - ('ก', '\u{e3a}'), - ('เ', 'ๆ'), - ('\u{e4d}', '\u{e4d}'), - ('ກ', 'ຂ'), - ('ຄ', 'ຄ'), - ('ຆ', 'ຊ'), - ('ຌ', 'ຣ'), - ('ລ', 'ລ'), - ('ວ', '\u{eb9}'), - ('\u{ebb}', 'ຽ'), - ('ເ', 'ໄ'), - ('ໆ', 'ໆ'), - ('\u{ecd}', '\u{ecd}'), - ('ໜ', 'ໟ'), - ('ༀ', 'ༀ'), - ('ཀ', 'ཇ'), - ('ཉ', 'ཬ'), - ('\u{f71}', '\u{f83}'), - ('ྈ', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('က', '\u{1036}'), - ('း', 'း'), - ('ျ', 'ဿ'), - ('ၐ', 'ႏ'), - ('ႚ', '\u{109d}'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ა', 'ჺ'), - ('ჼ', 'ቈ'), - ('ቊ', 'ቍ'), - ('ቐ', 'ቖ'), - ('ቘ', 'ቘ'), - ('ቚ', 'ቝ'), - ('በ', 'ኈ'), - ('ኊ', 'ኍ'), - ('ነ', 'ኰ'), - ('ኲ', 'ኵ'), - ('ኸ', 'ኾ'), - ('ዀ', 'ዀ'), - ('ዂ', 'ዅ'), - ('ወ', 'ዖ'), - ('ዘ', 'ጐ'), - ('ጒ', 'ጕ'), - ('ጘ', 'ፚ'), - ('ᎀ', 'ᎏ'), - ('Ꭰ', 'Ᏽ'), - ('ᏸ', 'ᏽ'), - ('ᐁ', 'ᙬ'), - ('ᙯ', 'ᙿ'), - ('ᚁ', 'ᚚ'), - ('ᚠ', 'ᛪ'), - ('ᛮ', 'ᛸ'), - ('ᜀ', '\u{1713}'), - ('ᜟ', '\u{1733}'), - ('ᝀ', '\u{1753}'), - ('ᝠ', 'ᝬ'), - ('ᝮ', 'ᝰ'), - ('\u{1772}', '\u{1773}'), - ('ក', 'ឳ'), - ('ា', 'ៈ'), - ('ៗ', 'ៗ'), - ('ៜ', 'ៜ'), - ('ᠠ', 'ᡸ'), - ('ᢀ', 'ᢪ'), - ('ᢰ', 'ᣵ'), - ('ᤀ', 'ᤞ'), - ('\u{1920}', 'ᤫ'), - ('ᤰ', 'ᤸ'), - ('ᥐ', 'ᥭ'), - ('ᥰ', 'ᥴ'), - ('ᦀ', 'ᦫ'), - ('ᦰ', 'ᧉ'), - ('ᨀ', '\u{1a1b}'), - ('ᨠ', '\u{1a5e}'), - ('ᩡ', '\u{1a74}'), - ('ᪧ', 'ᪧ'), - ('\u{1abf}', '\u{1ac0}'), - ('\u{1acc}', '\u{1ace}'), - ('\u{1b00}', 'ᬳ'), - ('\u{1b35}', '\u{1b43}'), - ('ᭅ', 'ᭌ'), - ('\u{1b80}', '\u{1ba9}'), - ('\u{1bac}', 'ᮯ'), - ('ᮺ', 'ᯥ'), - ('ᯧ', '\u{1bf1}'), - ('ᰀ', '\u{1c36}'), - ('ᱍ', 'ᱏ'), - ('ᱚ', 'ᱽ'), - ('ᲀ', 'ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('ᳩ', 'ᳬ'), - ('ᳮ', 'ᳳ'), - ('ᳵ', 'ᳶ'), - ('ᳺ', 'ᳺ'), - ('ᴀ', 'ᶿ'), - ('\u{1dd3}', '\u{1df4}'), - ('Ḁ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ᾼ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῌ'), - ('ῐ', 'ΐ'), - ('ῖ', 'Ί'), - ('ῠ', 'Ῥ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῼ'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℊ', 'ℓ'), - ('ℕ', 'ℕ'), - ('ℙ', 'ℝ'), - ('ℤ', 'ℤ'), - ('Ω', 'Ω'), - ('ℨ', 'ℨ'), - ('K', 'ℭ'), - ('ℯ', 'ℹ'), - ('ℼ', 'ℿ'), - ('ⅅ', 'ⅉ'), - ('ⅎ', 'ⅎ'), - ('Ⅰ', 'ↈ'), - ('Ⓐ', 'ⓩ'), - ('Ⰰ', 'ⳤ'), - ('Ⳬ', 'ⳮ'), - ('Ⳳ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ⴰ', 'ⵧ'), - ('ⵯ', 'ⵯ'), - ('ⶀ', 'ⶖ'), - ('ⶠ', 'ⶦ'), - ('ⶨ', 'ⶮ'), - ('ⶰ', 'ⶶ'), - ('ⶸ', 'ⶾ'), - ('ⷀ', 'ⷆ'), - ('ⷈ', 'ⷎ'), - ('ⷐ', 'ⷖ'), - ('ⷘ', 'ⷞ'), - ('\u{2de0}', '\u{2dff}'), - ('ⸯ', 'ⸯ'), - ('々', '〇'), - ('〡', '〩'), - ('〱', '〵'), - ('〸', '〼'), - ('ぁ', 'ゖ'), - ('ゝ', 'ゟ'), - ('ァ', 'ヺ'), - ('ー', 'ヿ'), - ('ㄅ', 'ㄯ'), - ('ㄱ', 'ㆎ'), - ('ㆠ', 'ㆿ'), - ('ㇰ', 'ㇿ'), - ('㐀', '䶿'), - ('一', 'ꒌ'), - ('ꓐ', 'ꓽ'), - ('ꔀ', 'ꘌ'), - ('ꘐ', 'ꘟ'), - ('ꘪ', 'ꘫ'), - ('Ꙁ', 'ꙮ'), - ('\u{a674}', '\u{a67b}'), - ('ꙿ', 'ꛯ'), - ('ꜗ', 'ꜟ'), - ('Ꜣ', 'ꞈ'), - ('Ꞌ', 'ꟍ'), - ('Ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'Ƛ'), - ('ꟲ', 'ꠅ'), - ('ꠇ', 'ꠧ'), - ('ꡀ', 'ꡳ'), - ('ꢀ', 'ꣃ'), - ('\u{a8c5}', '\u{a8c5}'), - ('ꣲ', 'ꣷ'), - ('ꣻ', 'ꣻ'), - ('ꣽ', '\u{a8ff}'), - ('ꤊ', '\u{a92a}'), - ('ꤰ', 'ꥒ'), - ('ꥠ', 'ꥼ'), - ('\u{a980}', 'ꦲ'), - ('ꦴ', 'ꦿ'), - ('ꧏ', 'ꧏ'), - ('ꧠ', 'ꧯ'), - ('ꧺ', 'ꧾ'), - ('ꨀ', '\u{aa36}'), - ('ꩀ', 'ꩍ'), - ('ꩠ', 'ꩶ'), - ('ꩺ', '\u{aabe}'), - ('ꫀ', 'ꫀ'), - ('ꫂ', 'ꫂ'), - ('ꫛ', 'ꫝ'), - ('ꫠ', 'ꫯ'), - ('ꫲ', 'ꫵ'), - ('ꬁ', 'ꬆ'), - ('ꬉ', 'ꬎ'), - ('ꬑ', 'ꬖ'), - ('ꬠ', 'ꬦ'), - ('ꬨ', 'ꬮ'), - ('ꬰ', 'ꭚ'), - ('ꭜ', 'ꭩ'), - ('ꭰ', 'ꯪ'), - ('가', '힣'), - ('ힰ', 'ퟆ'), - ('ퟋ', 'ퟻ'), - ('豈', '舘'), - ('並', '龎'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('יִ', 'ﬨ'), - ('שׁ', 'זּ'), - ('טּ', 'לּ'), - ('מּ', 'מּ'), - ('נּ', 'סּ'), - ('ףּ', 'פּ'), - ('צּ', 'ﮱ'), - ('ﯓ', 'ﴽ'), - ('ﵐ', 'ﶏ'), - ('ﶒ', 'ﷇ'), - ('ﷰ', 'ﷻ'), - ('ﹰ', 'ﹴ'), - ('ﹶ', 'ﻼ'), - ('A', 'Z'), - ('a', 'z'), - ('ヲ', 'ᄒ'), - ('ᅡ', 'ᅦ'), - ('ᅧ', 'ᅬ'), - ('ᅭ', 'ᅲ'), - ('ᅳ', 'ᅵ'), - ('𐀀', '𐀋'), - ('𐀍', '𐀦'), - ('𐀨', '𐀺'), - ('𐀼', '𐀽'), - ('𐀿', '𐁍'), - ('𐁐', '𐁝'), - ('𐂀', '𐃺'), - ('𐅀', '𐅴'), - ('𐊀', '𐊜'), - ('𐊠', '𐋐'), - ('𐌀', '𐌟'), - ('𐌭', '𐍊'), - ('𐍐', '\u{1037a}'), - ('𐎀', '𐎝'), - ('𐎠', '𐏃'), - ('𐏈', '𐏏'), - ('𐏑', '𐏕'), - ('𐐀', '𐒝'), - ('𐒰', '𐓓'), - ('𐓘', '𐓻'), - ('𐔀', '𐔧'), - ('𐔰', '𐕣'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐗀', '𐗳'), - ('𐘀', '𐜶'), - ('𐝀', '𐝕'), - ('𐝠', '𐝧'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𐠀', '𐠅'), - ('𐠈', '𐠈'), - ('𐠊', '𐠵'), - ('𐠷', '𐠸'), - ('𐠼', '𐠼'), - ('𐠿', '𐡕'), - ('𐡠', '𐡶'), - ('𐢀', '𐢞'), - ('𐣠', '𐣲'), - ('𐣴', '𐣵'), - ('𐤀', '𐤕'), - ('𐤠', '𐤹'), - ('𐦀', '𐦷'), - ('𐦾', '𐦿'), - ('𐨀', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '𐨓'), - ('𐨕', '𐨗'), - ('𐨙', '𐨵'), - ('𐩠', '𐩼'), - ('𐪀', '𐪜'), - ('𐫀', '𐫇'), - ('𐫉', '𐫤'), - ('𐬀', '𐬵'), - ('𐭀', '𐭕'), - ('𐭠', '𐭲'), - ('𐮀', '𐮑'), - ('𐰀', '𐱈'), - ('𐲀', '𐲲'), - ('𐳀', '𐳲'), - ('𐴀', '\u{10d27}'), - ('𐵊', '𐵥'), - ('\u{10d69}', '\u{10d69}'), - ('𐵯', '𐶅'), - ('𐺀', '𐺩'), - ('\u{10eab}', '\u{10eac}'), - ('𐺰', '𐺱'), - ('𐻂', '𐻄'), - ('\u{10efc}', '\u{10efc}'), - ('𐼀', '𐼜'), - ('𐼧', '𐼧'), - ('𐼰', '𐽅'), - ('𐽰', '𐾁'), - ('𐾰', '𐿄'), - ('𐿠', '𐿶'), - ('𑀀', '\u{11045}'), - ('𑁱', '𑁵'), - ('\u{11080}', '𑂸'), - ('\u{110c2}', '\u{110c2}'), - ('𑃐', '𑃨'), - ('\u{11100}', '\u{11132}'), - ('𑅄', '𑅇'), - ('𑅐', '𑅲'), - ('𑅶', '𑅶'), - ('\u{11180}', '𑆿'), - ('𑇁', '𑇄'), - ('𑇎', '\u{111cf}'), - ('𑇚', '𑇚'), - ('𑇜', '𑇜'), - ('𑈀', '𑈑'), - ('𑈓', '\u{11234}'), - ('\u{11237}', '\u{11237}'), - ('\u{1123e}', '\u{11241}'), - ('𑊀', '𑊆'), - ('𑊈', '𑊈'), - ('𑊊', '𑊍'), - ('𑊏', '𑊝'), - ('𑊟', '𑊨'), - ('𑊰', '\u{112e8}'), - ('\u{11300}', '𑌃'), - ('𑌅', '𑌌'), - ('𑌏', '𑌐'), - ('𑌓', '𑌨'), - ('𑌪', '𑌰'), - ('𑌲', '𑌳'), - ('𑌵', '𑌹'), - ('𑌽', '𑍄'), - ('𑍇', '𑍈'), - ('𑍋', '𑍌'), - ('𑍐', '𑍐'), - ('\u{11357}', '\u{11357}'), - ('𑍝', '𑍣'), - ('𑎀', '𑎉'), - ('𑎋', '𑎋'), - ('𑎎', '𑎎'), - ('𑎐', '𑎵'), - ('𑎷', '\u{113c0}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '𑏊'), - ('𑏌', '𑏍'), - ('𑏑', '𑏑'), - ('𑏓', '𑏓'), - ('𑐀', '𑑁'), - ('\u{11443}', '𑑅'), - ('𑑇', '𑑊'), - ('𑑟', '𑑡'), - ('𑒀', '𑓁'), - ('𑓄', '𑓅'), - ('𑓇', '𑓇'), - ('𑖀', '\u{115b5}'), - ('𑖸', '𑖾'), - ('𑗘', '\u{115dd}'), - ('𑘀', '𑘾'), - ('\u{11640}', '\u{11640}'), - ('𑙄', '𑙄'), - ('𑚀', '\u{116b5}'), - ('𑚸', '𑚸'), - ('𑜀', '𑜚'), - ('\u{1171d}', '\u{1172a}'), - ('𑝀', '𑝆'), - ('𑠀', '𑠸'), - ('𑢠', '𑣟'), - ('𑣿', '𑤆'), - ('𑤉', '𑤉'), - ('𑤌', '𑤓'), - ('𑤕', '𑤖'), - ('𑤘', '𑤵'), - ('𑤷', '𑤸'), - ('\u{1193b}', '\u{1193c}'), - ('𑤿', '𑥂'), - ('𑦠', '𑦧'), - ('𑦪', '\u{119d7}'), - ('\u{119da}', '𑧟'), - ('𑧡', '𑧡'), - ('𑧣', '𑧤'), - ('𑨀', '𑨲'), - ('\u{11a35}', '\u{11a3e}'), - ('𑩐', '𑪗'), - ('𑪝', '𑪝'), - ('𑪰', '𑫸'), - ('𑯀', '𑯠'), - ('𑰀', '𑰈'), - ('𑰊', '\u{11c36}'), - ('\u{11c38}', '𑰾'), - ('𑱀', '𑱀'), - ('𑱲', '𑲏'), - ('\u{11c92}', '\u{11ca7}'), - ('𑲩', '\u{11cb6}'), - ('𑴀', '𑴆'), - ('𑴈', '𑴉'), - ('𑴋', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d41}'), - ('\u{11d43}', '\u{11d43}'), - ('𑵆', '\u{11d47}'), - ('𑵠', '𑵥'), - ('𑵧', '𑵨'), - ('𑵪', '𑶎'), - ('\u{11d90}', '\u{11d91}'), - ('𑶓', '𑶖'), - ('𑶘', '𑶘'), - ('𑻠', '𑻶'), - ('\u{11f00}', '𑼐'), - ('𑼒', '\u{11f3a}'), - ('𑼾', '\u{11f40}'), - ('𑾰', '𑾰'), - ('𒀀', '𒎙'), - ('𒐀', '𒑮'), - ('𒒀', '𒕃'), - ('𒾐', '𒿰'), - ('𓀀', '𓐯'), - ('𓑁', '𓑆'), - ('𓑠', '𔏺'), - ('𔐀', '𔙆'), - ('𖄀', '\u{1612e}'), - ('𖠀', '𖨸'), - ('𖩀', '𖩞'), - ('𖩰', '𖪾'), - ('𖫐', '𖫭'), - ('𖬀', '𖬯'), - ('𖭀', '𖭃'), - ('𖭣', '𖭷'), - ('𖭽', '𖮏'), - ('𖵀', '𖵬'), - ('𖹀', '𖹿'), - ('𖼀', '𖽊'), - ('\u{16f4f}', '𖾇'), - ('\u{16f8f}', '𖾟'), - ('𖿠', '𖿡'), - ('𖿣', '𖿣'), - ('\u{16ff0}', '\u{16ff1}'), - ('𗀀', '𘟷'), - ('𘠀', '𘳕'), - ('𘳿', '𘴈'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('𛀀', '𛄢'), - ('𛄲', '𛄲'), - ('𛅐', '𛅒'), - ('𛅕', '𛅕'), - ('𛅤', '𛅧'), - ('𛅰', '𛋻'), - ('𛰀', '𛱪'), - ('𛱰', '𛱼'), - ('𛲀', '𛲈'), - ('𛲐', '𛲙'), - ('\u{1bc9e}', '\u{1bc9e}'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝛀'), - ('𝛂', '𝛚'), - ('𝛜', '𝛺'), - ('𝛼', '𝜔'), - ('𝜖', '𝜴'), - ('𝜶', '𝝎'), - ('𝝐', '𝝮'), - ('𝝰', '𝞈'), - ('𝞊', '𝞨'), - ('𝞪', '𝟂'), - ('𝟄', '𝟋'), - ('𝼀', '𝼞'), - ('𝼥', '𝼪'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), - ('𞀰', '𞁭'), - ('\u{1e08f}', '\u{1e08f}'), - ('𞄀', '𞄬'), - ('𞄷', '𞄽'), - ('𞅎', '𞅎'), - ('𞊐', '𞊭'), - ('𞋀', '𞋫'), - ('𞓐', '𞓫'), - ('𞗐', '𞗭'), - ('𞗰', '𞗰'), - ('𞟠', '𞟦'), - ('𞟨', '𞟫'), - ('𞟭', '𞟮'), - ('𞟰', '𞟾'), - ('𞠀', '𞣄'), - ('𞤀', '𞥃'), - ('\u{1e947}', '\u{1e947}'), - ('𞥋', '𞥋'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('🄰', '🅉'), - ('🅐', '🅩'), - ('🅰', '🆉'), - ('𠀀', '𪛟'), - ('𪜀', '𫜹'), - ('𫝀', '𫠝'), - ('𫠠', '𬺡'), - ('𬺰', '𮯠'), - ('𮯰', '𮹝'), - ('丽', '𪘀'), - ('𰀀', '𱍊'), - ('𱍐', '𲎯'), -]; - -pub const BIDI_CONTROL: &'static [(char, char)] = &[ - ('\u{61c}', '\u{61c}'), - ('\u{200e}', '\u{200f}'), - ('\u{202a}', '\u{202e}'), - ('\u{2066}', '\u{2069}'), -]; - -pub const BIDI_MIRRORED: &'static [(char, char)] = &[ - ('(', ')'), - ('<', '<'), - ('>', '>'), - ('[', '['), - (']', ']'), - ('{', '{'), - ('}', '}'), - ('«', '«'), - ('»', '»'), - ('༺', '༽'), - ('᚛', '᚜'), - ('‹', '›'), - ('⁅', '⁆'), - ('⁽', '⁾'), - ('₍', '₎'), - ('⅀', '⅀'), - ('∁', '∄'), - ('∈', '∍'), - ('∑', '∑'), - ('∕', '∖'), - ('√', '∝'), - ('∟', '∢'), - ('∤', '∤'), - ('∦', '∦'), - ('∫', '∳'), - ('∹', '∹'), - ('∻', '≌'), - ('≒', '≕'), - ('≟', '≠'), - ('≢', '≢'), - ('≤', '≫'), - ('≭', '⊌'), - ('⊏', '⊒'), - ('⊘', '⊘'), - ('⊢', '⊣'), - ('⊦', '⊸'), - ('⊾', '⊿'), - ('⋉', '⋍'), - ('⋐', '⋑'), - ('⋖', '⋭'), - ('⋰', '⋿'), - ('⌈', '⌋'), - ('⌠', '⌡'), - ('〈', '〉'), - ('❨', '❵'), - ('⟀', '⟀'), - ('⟃', '⟆'), - ('⟈', '⟉'), - ('⟋', '⟍'), - ('⟓', '⟖'), - ('⟜', '⟞'), - ('⟢', '⟯'), - ('⦃', '⦘'), - ('⦛', '⦠'), - ('⦢', '⦯'), - ('⦸', '⦸'), - ('⧀', '⧅'), - ('⧉', '⧉'), - ('⧎', '⧒'), - ('⧔', '⧕'), - ('⧘', '⧜'), - ('⧡', '⧡'), - ('⧣', '⧥'), - ('⧨', '⧩'), - ('⧴', '⧹'), - ('⧼', '⧽'), - ('⨊', '⨜'), - ('⨞', '⨡'), - ('⨤', '⨤'), - ('⨦', '⨦'), - ('⨩', '⨩'), - ('⨫', '⨮'), - ('⨴', '⨵'), - ('⨼', '⨾'), - ('⩗', '⩘'), - ('⩤', '⩥'), - ('⩪', '⩭'), - ('⩯', '⩰'), - ('⩳', '⩴'), - ('⩹', '⪣'), - ('⪦', '⪭'), - ('⪯', '⫖'), - ('⫝̸', '⫝̸'), - ('⫞', '⫞'), - ('⫢', '⫦'), - ('⫬', '⫮'), - ('⫳', '⫳'), - ('⫷', '⫻'), - ('⫽', '⫽'), - ('⯾', '⯾'), - ('⸂', '⸅'), - ('⸉', '⸊'), - ('⸌', '⸍'), - ('⸜', '⸝'), - ('⸠', '⸩'), - ('⹕', '⹜'), - ('〈', '】'), - ('〔', '〛'), - ('﹙', '﹞'), - ('﹤', '﹥'), - ('(', ')'), - ('<', '<'), - ('>', '>'), - ('[', '['), - (']', ']'), - ('{', '{'), - ('}', '}'), - ('⦅', '⦆'), - ('「', '」'), - ('𝛛', '𝛛'), - ('𝜕', '𝜕'), - ('𝝏', '𝝏'), - ('𝞉', '𝞉'), - ('𝟃', '𝟃'), -]; - -pub const CASE_IGNORABLE: &'static [(char, char)] = &[ - ('\'', '\''), - ('.', '.'), - (':', ':'), - ('^', '^'), - ('`', '`'), - ('¨', '¨'), - ('\u{ad}', '\u{ad}'), - ('¯', '¯'), - ('´', '´'), - ('·', '¸'), - ('ʰ', '\u{36f}'), - ('ʹ', '͵'), - ('ͺ', 'ͺ'), - ('΄', '΅'), - ('·', '·'), - ('\u{483}', '\u{489}'), - ('ՙ', 'ՙ'), - ('՟', '՟'), - ('\u{591}', '\u{5bd}'), - ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), - ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), - ('״', '״'), - ('\u{600}', '\u{605}'), - ('\u{610}', '\u{61a}'), - ('\u{61c}', '\u{61c}'), - ('ـ', 'ـ'), - ('\u{64b}', '\u{65f}'), - ('\u{670}', '\u{670}'), - ('\u{6d6}', '\u{6dd}'), - ('\u{6df}', '\u{6e8}'), - ('\u{6ea}', '\u{6ed}'), - ('\u{70f}', '\u{70f}'), - ('\u{711}', '\u{711}'), - ('\u{730}', '\u{74a}'), - ('\u{7a6}', '\u{7b0}'), - ('\u{7eb}', 'ߵ'), - ('ߺ', 'ߺ'), - ('\u{7fd}', '\u{7fd}'), - ('\u{816}', '\u{82d}'), - ('\u{859}', '\u{85b}'), - ('࢈', '࢈'), - ('\u{890}', '\u{891}'), - ('\u{897}', '\u{89f}'), - ('ࣉ', '\u{902}'), - ('\u{93a}', '\u{93a}'), - ('\u{93c}', '\u{93c}'), - ('\u{941}', '\u{948}'), - ('\u{94d}', '\u{94d}'), - ('\u{951}', '\u{957}'), - ('\u{962}', '\u{963}'), - ('ॱ', 'ॱ'), - ('\u{981}', '\u{981}'), - ('\u{9bc}', '\u{9bc}'), - ('\u{9c1}', '\u{9c4}'), - ('\u{9cd}', '\u{9cd}'), - ('\u{9e2}', '\u{9e3}'), - ('\u{9fe}', '\u{9fe}'), - ('\u{a01}', '\u{a02}'), - ('\u{a3c}', '\u{a3c}'), - ('\u{a41}', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4d}'), - ('\u{a51}', '\u{a51}'), - ('\u{a70}', '\u{a71}'), - ('\u{a75}', '\u{a75}'), - ('\u{a81}', '\u{a82}'), - ('\u{abc}', '\u{abc}'), - ('\u{ac1}', '\u{ac5}'), - ('\u{ac7}', '\u{ac8}'), - ('\u{acd}', '\u{acd}'), - ('\u{ae2}', '\u{ae3}'), - ('\u{afa}', '\u{aff}'), - ('\u{b01}', '\u{b01}'), - ('\u{b3c}', '\u{b3c}'), - ('\u{b3f}', '\u{b3f}'), - ('\u{b41}', '\u{b44}'), - ('\u{b4d}', '\u{b4d}'), - ('\u{b55}', '\u{b56}'), - ('\u{b62}', '\u{b63}'), - ('\u{b82}', '\u{b82}'), - ('\u{bc0}', '\u{bc0}'), - ('\u{bcd}', '\u{bcd}'), - ('\u{c00}', '\u{c00}'), - ('\u{c04}', '\u{c04}'), - ('\u{c3c}', '\u{c3c}'), - ('\u{c3e}', '\u{c40}'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4d}'), - ('\u{c55}', '\u{c56}'), - ('\u{c62}', '\u{c63}'), - ('\u{c81}', '\u{c81}'), - ('\u{cbc}', '\u{cbc}'), - ('\u{cbf}', '\u{cbf}'), - ('\u{cc6}', '\u{cc6}'), - ('\u{ccc}', '\u{ccd}'), - ('\u{ce2}', '\u{ce3}'), - ('\u{d00}', '\u{d01}'), - ('\u{d3b}', '\u{d3c}'), - ('\u{d41}', '\u{d44}'), - ('\u{d4d}', '\u{d4d}'), - ('\u{d62}', '\u{d63}'), - ('\u{d81}', '\u{d81}'), - ('\u{dca}', '\u{dca}'), - ('\u{dd2}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('\u{e31}', '\u{e31}'), - ('\u{e34}', '\u{e3a}'), - ('ๆ', '\u{e4e}'), - ('\u{eb1}', '\u{eb1}'), - ('\u{eb4}', '\u{ebc}'), - ('ໆ', 'ໆ'), - ('\u{ec8}', '\u{ece}'), - ('\u{f18}', '\u{f19}'), - ('\u{f35}', '\u{f35}'), - ('\u{f37}', '\u{f37}'), - ('\u{f39}', '\u{f39}'), - ('\u{f71}', '\u{f7e}'), - ('\u{f80}', '\u{f84}'), - ('\u{f86}', '\u{f87}'), - ('\u{f8d}', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('\u{fc6}', '\u{fc6}'), - ('\u{102d}', '\u{1030}'), - ('\u{1032}', '\u{1037}'), - ('\u{1039}', '\u{103a}'), - ('\u{103d}', '\u{103e}'), - ('\u{1058}', '\u{1059}'), - ('\u{105e}', '\u{1060}'), - ('\u{1071}', '\u{1074}'), - ('\u{1082}', '\u{1082}'), - ('\u{1085}', '\u{1086}'), - ('\u{108d}', '\u{108d}'), - ('\u{109d}', '\u{109d}'), - ('ჼ', 'ჼ'), - ('\u{135d}', '\u{135f}'), - ('\u{1712}', '\u{1714}'), - ('\u{1732}', '\u{1733}'), - ('\u{1752}', '\u{1753}'), - ('\u{1772}', '\u{1773}'), - ('\u{17b4}', '\u{17b5}'), - ('\u{17b7}', '\u{17bd}'), - ('\u{17c6}', '\u{17c6}'), - ('\u{17c9}', '\u{17d3}'), - ('ៗ', 'ៗ'), - ('\u{17dd}', '\u{17dd}'), - ('\u{180b}', '\u{180f}'), - ('ᡃ', 'ᡃ'), - ('\u{1885}', '\u{1886}'), - ('\u{18a9}', '\u{18a9}'), - ('\u{1920}', '\u{1922}'), - ('\u{1927}', '\u{1928}'), - ('\u{1932}', '\u{1932}'), - ('\u{1939}', '\u{193b}'), - ('\u{1a17}', '\u{1a18}'), - ('\u{1a1b}', '\u{1a1b}'), - ('\u{1a56}', '\u{1a56}'), - ('\u{1a58}', '\u{1a5e}'), - ('\u{1a60}', '\u{1a60}'), - ('\u{1a62}', '\u{1a62}'), - ('\u{1a65}', '\u{1a6c}'), - ('\u{1a73}', '\u{1a7c}'), - ('\u{1a7f}', '\u{1a7f}'), - ('ᪧ', 'ᪧ'), - ('\u{1ab0}', '\u{1ace}'), - ('\u{1b00}', '\u{1b03}'), - ('\u{1b34}', '\u{1b34}'), - ('\u{1b36}', '\u{1b3a}'), - ('\u{1b3c}', '\u{1b3c}'), - ('\u{1b42}', '\u{1b42}'), - ('\u{1b6b}', '\u{1b73}'), - ('\u{1b80}', '\u{1b81}'), - ('\u{1ba2}', '\u{1ba5}'), - ('\u{1ba8}', '\u{1ba9}'), - ('\u{1bab}', '\u{1bad}'), - ('\u{1be6}', '\u{1be6}'), - ('\u{1be8}', '\u{1be9}'), - ('\u{1bed}', '\u{1bed}'), - ('\u{1bef}', '\u{1bf1}'), - ('\u{1c2c}', '\u{1c33}'), - ('\u{1c36}', '\u{1c37}'), - ('ᱸ', 'ᱽ'), - ('\u{1cd0}', '\u{1cd2}'), - ('\u{1cd4}', '\u{1ce0}'), - ('\u{1ce2}', '\u{1ce8}'), - ('\u{1ced}', '\u{1ced}'), - ('\u{1cf4}', '\u{1cf4}'), - ('\u{1cf8}', '\u{1cf9}'), - ('ᴬ', 'ᵪ'), - ('ᵸ', 'ᵸ'), - ('ᶛ', '\u{1dff}'), - ('᾽', '᾽'), - ('᾿', '῁'), - ('῍', '῏'), - ('῝', '῟'), - ('῭', '`'), - ('´', '῾'), - ('\u{200b}', '\u{200f}'), - ('‘', '’'), - ('․', '․'), - ('‧', '‧'), - ('\u{202a}', '\u{202e}'), - ('\u{2060}', '\u{2064}'), - ('\u{2066}', '\u{206f}'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('\u{20d0}', '\u{20f0}'), - ('ⱼ', 'ⱽ'), - ('\u{2cef}', '\u{2cf1}'), - ('ⵯ', 'ⵯ'), - ('\u{2d7f}', '\u{2d7f}'), - ('\u{2de0}', '\u{2dff}'), - ('ⸯ', 'ⸯ'), - ('々', '々'), - ('\u{302a}', '\u{302d}'), - ('〱', '〵'), - ('〻', '〻'), - ('\u{3099}', 'ゞ'), - ('ー', 'ヾ'), - ('ꀕ', 'ꀕ'), - ('ꓸ', 'ꓽ'), - ('ꘌ', 'ꘌ'), - ('\u{a66f}', '\u{a672}'), - ('\u{a674}', '\u{a67d}'), - ('ꙿ', 'ꙿ'), - ('ꚜ', '\u{a69f}'), - ('\u{a6f0}', '\u{a6f1}'), - ('꜀', '꜡'), - ('ꝰ', 'ꝰ'), - ('ꞈ', '꞊'), - ('ꟲ', 'ꟴ'), - ('ꟸ', 'ꟹ'), - ('\u{a802}', '\u{a802}'), - ('\u{a806}', '\u{a806}'), - ('\u{a80b}', '\u{a80b}'), - ('\u{a825}', '\u{a826}'), - ('\u{a82c}', '\u{a82c}'), - ('\u{a8c4}', '\u{a8c5}'), - ('\u{a8e0}', '\u{a8f1}'), - ('\u{a8ff}', '\u{a8ff}'), - ('\u{a926}', '\u{a92d}'), - ('\u{a947}', '\u{a951}'), - ('\u{a980}', '\u{a982}'), - ('\u{a9b3}', '\u{a9b3}'), - ('\u{a9b6}', '\u{a9b9}'), - ('\u{a9bc}', '\u{a9bd}'), - ('ꧏ', 'ꧏ'), - ('\u{a9e5}', 'ꧦ'), - ('\u{aa29}', '\u{aa2e}'), - ('\u{aa31}', '\u{aa32}'), - ('\u{aa35}', '\u{aa36}'), - ('\u{aa43}', '\u{aa43}'), - ('\u{aa4c}', '\u{aa4c}'), - ('ꩰ', 'ꩰ'), - ('\u{aa7c}', '\u{aa7c}'), - ('\u{aab0}', '\u{aab0}'), - ('\u{aab2}', '\u{aab4}'), - ('\u{aab7}', '\u{aab8}'), - ('\u{aabe}', '\u{aabf}'), - ('\u{aac1}', '\u{aac1}'), - ('ꫝ', 'ꫝ'), - ('\u{aaec}', '\u{aaed}'), - ('ꫳ', 'ꫴ'), - ('\u{aaf6}', '\u{aaf6}'), - ('꭛', 'ꭟ'), - ('ꭩ', '꭫'), - ('\u{abe5}', '\u{abe5}'), - ('\u{abe8}', '\u{abe8}'), - ('\u{abed}', '\u{abed}'), - ('\u{fb1e}', '\u{fb1e}'), - ('﮲', '﯂'), - ('\u{fe00}', '\u{fe0f}'), - ('︓', '︓'), - ('\u{fe20}', '\u{fe2f}'), - ('﹒', '﹒'), - ('﹕', '﹕'), - ('\u{feff}', '\u{feff}'), - (''', '''), - ('.', '.'), - (':', ':'), - ('^', '^'), - ('`', '`'), - ('ー', 'ー'), - ('\u{ff9e}', '\u{ff9f}'), - (' ̄', ' ̄'), - ('\u{fff9}', '\u{fffb}'), - ('\u{101fd}', '\u{101fd}'), - ('\u{102e0}', '\u{102e0}'), - ('\u{10376}', '\u{1037a}'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('\u{10a01}', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '\u{10a0f}'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '\u{10a3f}'), - ('\u{10ae5}', '\u{10ae6}'), - ('\u{10d24}', '\u{10d27}'), - ('𐵎', '𐵎'), - ('\u{10d69}', '\u{10d6d}'), - ('𐵯', '𐵯'), - ('\u{10eab}', '\u{10eac}'), - ('\u{10efc}', '\u{10eff}'), - ('\u{10f46}', '\u{10f50}'), - ('\u{10f82}', '\u{10f85}'), - ('\u{11001}', '\u{11001}'), - ('\u{11038}', '\u{11046}'), - ('\u{11070}', '\u{11070}'), - ('\u{11073}', '\u{11074}'), - ('\u{1107f}', '\u{11081}'), - ('\u{110b3}', '\u{110b6}'), - ('\u{110b9}', '\u{110ba}'), - ('\u{110bd}', '\u{110bd}'), - ('\u{110c2}', '\u{110c2}'), - ('\u{110cd}', '\u{110cd}'), - ('\u{11100}', '\u{11102}'), - ('\u{11127}', '\u{1112b}'), - ('\u{1112d}', '\u{11134}'), - ('\u{11173}', '\u{11173}'), - ('\u{11180}', '\u{11181}'), - ('\u{111b6}', '\u{111be}'), - ('\u{111c9}', '\u{111cc}'), - ('\u{111cf}', '\u{111cf}'), - ('\u{1122f}', '\u{11231}'), - ('\u{11234}', '\u{11234}'), - ('\u{11236}', '\u{11237}'), - ('\u{1123e}', '\u{1123e}'), - ('\u{11241}', '\u{11241}'), - ('\u{112df}', '\u{112df}'), - ('\u{112e3}', '\u{112ea}'), - ('\u{11300}', '\u{11301}'), - ('\u{1133b}', '\u{1133c}'), - ('\u{11340}', '\u{11340}'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), - ('\u{113bb}', '\u{113c0}'), - ('\u{113ce}', '\u{113ce}'), - ('\u{113d0}', '\u{113d0}'), - ('\u{113d2}', '\u{113d2}'), - ('\u{113e1}', '\u{113e2}'), - ('\u{11438}', '\u{1143f}'), - ('\u{11442}', '\u{11444}'), - ('\u{11446}', '\u{11446}'), - ('\u{1145e}', '\u{1145e}'), - ('\u{114b3}', '\u{114b8}'), - ('\u{114ba}', '\u{114ba}'), - ('\u{114bf}', '\u{114c0}'), - ('\u{114c2}', '\u{114c3}'), - ('\u{115b2}', '\u{115b5}'), - ('\u{115bc}', '\u{115bd}'), - ('\u{115bf}', '\u{115c0}'), - ('\u{115dc}', '\u{115dd}'), - ('\u{11633}', '\u{1163a}'), - ('\u{1163d}', '\u{1163d}'), - ('\u{1163f}', '\u{11640}'), - ('\u{116ab}', '\u{116ab}'), - ('\u{116ad}', '\u{116ad}'), - ('\u{116b0}', '\u{116b5}'), - ('\u{116b7}', '\u{116b7}'), - ('\u{1171d}', '\u{1171d}'), - ('\u{1171f}', '\u{1171f}'), - ('\u{11722}', '\u{11725}'), - ('\u{11727}', '\u{1172b}'), - ('\u{1182f}', '\u{11837}'), - ('\u{11839}', '\u{1183a}'), - ('\u{1193b}', '\u{1193c}'), - ('\u{1193e}', '\u{1193e}'), - ('\u{11943}', '\u{11943}'), - ('\u{119d4}', '\u{119d7}'), - ('\u{119da}', '\u{119db}'), - ('\u{119e0}', '\u{119e0}'), - ('\u{11a01}', '\u{11a0a}'), - ('\u{11a33}', '\u{11a38}'), - ('\u{11a3b}', '\u{11a3e}'), - ('\u{11a47}', '\u{11a47}'), - ('\u{11a51}', '\u{11a56}'), - ('\u{11a59}', '\u{11a5b}'), - ('\u{11a8a}', '\u{11a96}'), - ('\u{11a98}', '\u{11a99}'), - ('\u{11c30}', '\u{11c36}'), - ('\u{11c38}', '\u{11c3d}'), - ('\u{11c3f}', '\u{11c3f}'), - ('\u{11c92}', '\u{11ca7}'), - ('\u{11caa}', '\u{11cb0}'), - ('\u{11cb2}', '\u{11cb3}'), - ('\u{11cb5}', '\u{11cb6}'), - ('\u{11d31}', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d45}'), - ('\u{11d47}', '\u{11d47}'), - ('\u{11d90}', '\u{11d91}'), - ('\u{11d95}', '\u{11d95}'), - ('\u{11d97}', '\u{11d97}'), - ('\u{11ef3}', '\u{11ef4}'), - ('\u{11f00}', '\u{11f01}'), - ('\u{11f36}', '\u{11f3a}'), - ('\u{11f40}', '\u{11f40}'), - ('\u{11f42}', '\u{11f42}'), - ('\u{11f5a}', '\u{11f5a}'), - ('\u{13430}', '\u{13440}'), - ('\u{13447}', '\u{13455}'), - ('\u{1611e}', '\u{16129}'), - ('\u{1612d}', '\u{1612f}'), - ('\u{16af0}', '\u{16af4}'), - ('\u{16b30}', '\u{16b36}'), - ('𖭀', '𖭃'), - ('𖵀', '𖵂'), - ('𖵫', '𖵬'), - ('\u{16f4f}', '\u{16f4f}'), - ('\u{16f8f}', '𖾟'), - ('𖿠', '𖿡'), - ('𖿣', '\u{16fe4}'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('\u{1bc9d}', '\u{1bc9e}'), - ('\u{1bca0}', '\u{1bca3}'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('\u{1d167}', '\u{1d169}'), - ('\u{1d173}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), - ('\u{1d1aa}', '\u{1d1ad}'), - ('\u{1d242}', '\u{1d244}'), - ('\u{1da00}', '\u{1da36}'), - ('\u{1da3b}', '\u{1da6c}'), - ('\u{1da75}', '\u{1da75}'), - ('\u{1da84}', '\u{1da84}'), - ('\u{1da9b}', '\u{1da9f}'), - ('\u{1daa1}', '\u{1daaf}'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), - ('𞀰', '𞁭'), - ('\u{1e08f}', '\u{1e08f}'), - ('\u{1e130}', '𞄽'), - ('\u{1e2ae}', '\u{1e2ae}'), - ('\u{1e2ec}', '\u{1e2ef}'), - ('𞓫', '\u{1e4ef}'), - ('\u{1e5ee}', '\u{1e5ef}'), - ('\u{1e8d0}', '\u{1e8d6}'), - ('\u{1e944}', '𞥋'), - ('🏻', '🏿'), - ('\u{e0001}', '\u{e0001}'), - ('\u{e0020}', '\u{e007f}'), - ('\u{e0100}', '\u{e01ef}'), -]; - -pub const CASED: &'static [(char, char)] = &[ - ('A', 'Z'), - ('a', 'z'), - ('ª', 'ª'), - ('µ', 'µ'), - ('º', 'º'), - ('À', 'Ö'), - ('Ø', 'ö'), - ('ø', 'ƺ'), - ('Ƽ', 'ƿ'), - ('DŽ', 'ʓ'), - ('ʕ', 'ʸ'), - ('ˀ', 'ˁ'), - ('ˠ', 'ˤ'), - ('\u{345}', '\u{345}'), - ('Ͱ', 'ͳ'), - ('Ͷ', 'ͷ'), - ('ͺ', 'ͽ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', 'ϵ'), - ('Ϸ', 'ҁ'), - ('Ҋ', 'ԯ'), - ('Ա', 'Ֆ'), - ('ՠ', 'ֈ'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ა', 'ჺ'), - ('ჼ', 'ჿ'), - ('Ꭰ', 'Ᏽ'), - ('ᏸ', 'ᏽ'), - ('ᲀ', 'ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('ᴀ', 'ᶿ'), - ('Ḁ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ᾼ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῌ'), - ('ῐ', 'ΐ'), - ('ῖ', 'Ί'), - ('ῠ', 'Ῥ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῼ'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℊ', 'ℓ'), - ('ℕ', 'ℕ'), - ('ℙ', 'ℝ'), - ('ℤ', 'ℤ'), - ('Ω', 'Ω'), - ('ℨ', 'ℨ'), - ('K', 'ℭ'), - ('ℯ', 'ℴ'), - ('ℹ', 'ℹ'), - ('ℼ', 'ℿ'), - ('ⅅ', 'ⅉ'), - ('ⅎ', 'ⅎ'), - ('Ⅰ', 'ⅿ'), - ('Ↄ', 'ↄ'), - ('Ⓐ', 'ⓩ'), - ('Ⰰ', 'ⳤ'), - ('Ⳬ', 'ⳮ'), - ('Ⳳ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('Ꙁ', 'ꙭ'), - ('Ꚁ', 'ꚝ'), - ('Ꜣ', 'ꞇ'), - ('Ꞌ', 'ꞎ'), - ('Ꞑ', 'ꟍ'), - ('Ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'Ƛ'), - ('ꟲ', 'ꟶ'), - ('ꟸ', 'ꟺ'), - ('ꬰ', 'ꭚ'), - ('ꭜ', 'ꭩ'), - ('ꭰ', 'ꮿ'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('A', 'Z'), - ('a', 'z'), - ('𐐀', '𐑏'), - ('𐒰', '𐓓'), - ('𐓘', '𐓻'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐞀', '𐞀'), - ('𐞃', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𐲀', '𐲲'), - ('𐳀', '𐳲'), - ('𐵐', '𐵥'), - ('𐵰', '𐶅'), - ('𑢠', '𑣟'), - ('𖹀', '𖹿'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝛀'), - ('𝛂', '𝛚'), - ('𝛜', '𝛺'), - ('𝛼', '𝜔'), - ('𝜖', '𝜴'), - ('𝜶', '𝝎'), - ('𝝐', '𝝮'), - ('𝝰', '𝞈'), - ('𝞊', '𝞨'), - ('𝞪', '𝟂'), - ('𝟄', '𝟋'), - ('𝼀', '𝼉'), - ('𝼋', '𝼞'), - ('𝼥', '𝼪'), - ('𞀰', '𞁭'), - ('𞤀', '𞥃'), - ('🄰', '🅉'), - ('🅐', '🅩'), - ('🅰', '🆉'), -]; - -pub const CHANGES_WHEN_CASEFOLDED: &'static [(char, char)] = &[ - ('A', 'Z'), - ('µ', 'µ'), - ('À', 'Ö'), - ('Ø', 'ß'), - ('Ā', 'Ā'), - ('Ă', 'Ă'), - ('Ą', 'Ą'), - ('Ć', 'Ć'), - ('Ĉ', 'Ĉ'), - ('Ċ', 'Ċ'), - ('Č', 'Č'), - ('Ď', 'Ď'), - ('Đ', 'Đ'), - ('Ē', 'Ē'), - ('Ĕ', 'Ĕ'), - ('Ė', 'Ė'), - ('Ę', 'Ę'), - ('Ě', 'Ě'), - ('Ĝ', 'Ĝ'), - ('Ğ', 'Ğ'), - ('Ġ', 'Ġ'), - ('Ģ', 'Ģ'), - ('Ĥ', 'Ĥ'), - ('Ħ', 'Ħ'), - ('Ĩ', 'Ĩ'), - ('Ī', 'Ī'), - ('Ĭ', 'Ĭ'), - ('Į', 'Į'), - ('İ', 'İ'), - ('IJ', 'IJ'), - ('Ĵ', 'Ĵ'), - ('Ķ', 'Ķ'), - ('Ĺ', 'Ĺ'), - ('Ļ', 'Ļ'), - ('Ľ', 'Ľ'), - ('Ŀ', 'Ŀ'), - ('Ł', 'Ł'), - ('Ń', 'Ń'), - ('Ņ', 'Ņ'), - ('Ň', 'Ň'), - ('ʼn', 'Ŋ'), - ('Ō', 'Ō'), - ('Ŏ', 'Ŏ'), - ('Ő', 'Ő'), - ('Œ', 'Œ'), - ('Ŕ', 'Ŕ'), - ('Ŗ', 'Ŗ'), - ('Ř', 'Ř'), - ('Ś', 'Ś'), - ('Ŝ', 'Ŝ'), - ('Ş', 'Ş'), - ('Š', 'Š'), - ('Ţ', 'Ţ'), - ('Ť', 'Ť'), - ('Ŧ', 'Ŧ'), - ('Ũ', 'Ũ'), - ('Ū', 'Ū'), - ('Ŭ', 'Ŭ'), - ('Ů', 'Ů'), - ('Ű', 'Ű'), - ('Ų', 'Ų'), - ('Ŵ', 'Ŵ'), - ('Ŷ', 'Ŷ'), - ('Ÿ', 'Ź'), - ('Ż', 'Ż'), - ('Ž', 'Ž'), - ('ſ', 'ſ'), - ('Ɓ', 'Ƃ'), - ('Ƅ', 'Ƅ'), - ('Ɔ', 'Ƈ'), - ('Ɖ', 'Ƌ'), - ('Ǝ', 'Ƒ'), - ('Ɠ', 'Ɣ'), - ('Ɩ', 'Ƙ'), - ('Ɯ', 'Ɲ'), - ('Ɵ', 'Ơ'), - ('Ƣ', 'Ƣ'), - ('Ƥ', 'Ƥ'), - ('Ʀ', 'Ƨ'), - ('Ʃ', 'Ʃ'), - ('Ƭ', 'Ƭ'), - ('Ʈ', 'Ư'), - ('Ʊ', 'Ƴ'), - ('Ƶ', 'Ƶ'), - ('Ʒ', 'Ƹ'), - ('Ƽ', 'Ƽ'), - ('DŽ', 'Dž'), - ('LJ', 'Lj'), - ('NJ', 'Nj'), - ('Ǎ', 'Ǎ'), - ('Ǐ', 'Ǐ'), - ('Ǒ', 'Ǒ'), - ('Ǔ', 'Ǔ'), - ('Ǖ', 'Ǖ'), - ('Ǘ', 'Ǘ'), - ('Ǚ', 'Ǚ'), - ('Ǜ', 'Ǜ'), - ('Ǟ', 'Ǟ'), - ('Ǡ', 'Ǡ'), - ('Ǣ', 'Ǣ'), - ('Ǥ', 'Ǥ'), - ('Ǧ', 'Ǧ'), - ('Ǩ', 'Ǩ'), - ('Ǫ', 'Ǫ'), - ('Ǭ', 'Ǭ'), - ('Ǯ', 'Ǯ'), - ('DZ', 'Dz'), - ('Ǵ', 'Ǵ'), - ('Ƕ', 'Ǹ'), - ('Ǻ', 'Ǻ'), - ('Ǽ', 'Ǽ'), - ('Ǿ', 'Ǿ'), - ('Ȁ', 'Ȁ'), - ('Ȃ', 'Ȃ'), - ('Ȅ', 'Ȅ'), - ('Ȇ', 'Ȇ'), - ('Ȉ', 'Ȉ'), - ('Ȋ', 'Ȋ'), - ('Ȍ', 'Ȍ'), - ('Ȏ', 'Ȏ'), - ('Ȑ', 'Ȑ'), - ('Ȓ', 'Ȓ'), - ('Ȕ', 'Ȕ'), - ('Ȗ', 'Ȗ'), - ('Ș', 'Ș'), - ('Ț', 'Ț'), - ('Ȝ', 'Ȝ'), - ('Ȟ', 'Ȟ'), - ('Ƞ', 'Ƞ'), - ('Ȣ', 'Ȣ'), - ('Ȥ', 'Ȥ'), - ('Ȧ', 'Ȧ'), - ('Ȩ', 'Ȩ'), - ('Ȫ', 'Ȫ'), - ('Ȭ', 'Ȭ'), - ('Ȯ', 'Ȯ'), - ('Ȱ', 'Ȱ'), - ('Ȳ', 'Ȳ'), - ('Ⱥ', 'Ȼ'), - ('Ƚ', 'Ⱦ'), - ('Ɂ', 'Ɂ'), - ('Ƀ', 'Ɇ'), - ('Ɉ', 'Ɉ'), - ('Ɋ', 'Ɋ'), - ('Ɍ', 'Ɍ'), - ('Ɏ', 'Ɏ'), - ('\u{345}', '\u{345}'), - ('Ͱ', 'Ͱ'), - ('Ͳ', 'Ͳ'), - ('Ͷ', 'Ͷ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ώ'), - ('Α', 'Ρ'), - ('Σ', 'Ϋ'), - ('ς', 'ς'), - ('Ϗ', 'ϑ'), - ('ϕ', 'ϖ'), - ('Ϙ', 'Ϙ'), - ('Ϛ', 'Ϛ'), - ('Ϝ', 'Ϝ'), - ('Ϟ', 'Ϟ'), - ('Ϡ', 'Ϡ'), - ('Ϣ', 'Ϣ'), - ('Ϥ', 'Ϥ'), - ('Ϧ', 'Ϧ'), - ('Ϩ', 'Ϩ'), - ('Ϫ', 'Ϫ'), - ('Ϭ', 'Ϭ'), - ('Ϯ', 'Ϯ'), - ('ϰ', 'ϱ'), - ('ϴ', 'ϵ'), - ('Ϸ', 'Ϸ'), - ('Ϲ', 'Ϻ'), - ('Ͻ', 'Я'), - ('Ѡ', 'Ѡ'), - ('Ѣ', 'Ѣ'), - ('Ѥ', 'Ѥ'), - ('Ѧ', 'Ѧ'), - ('Ѩ', 'Ѩ'), - ('Ѫ', 'Ѫ'), - ('Ѭ', 'Ѭ'), - ('Ѯ', 'Ѯ'), - ('Ѱ', 'Ѱ'), - ('Ѳ', 'Ѳ'), - ('Ѵ', 'Ѵ'), - ('Ѷ', 'Ѷ'), - ('Ѹ', 'Ѹ'), - ('Ѻ', 'Ѻ'), - ('Ѽ', 'Ѽ'), - ('Ѿ', 'Ѿ'), - ('Ҁ', 'Ҁ'), - ('Ҋ', 'Ҋ'), - ('Ҍ', 'Ҍ'), - ('Ҏ', 'Ҏ'), - ('Ґ', 'Ґ'), - ('Ғ', 'Ғ'), - ('Ҕ', 'Ҕ'), - ('Җ', 'Җ'), - ('Ҙ', 'Ҙ'), - ('Қ', 'Қ'), - ('Ҝ', 'Ҝ'), - ('Ҟ', 'Ҟ'), - ('Ҡ', 'Ҡ'), - ('Ң', 'Ң'), - ('Ҥ', 'Ҥ'), - ('Ҧ', 'Ҧ'), - ('Ҩ', 'Ҩ'), - ('Ҫ', 'Ҫ'), - ('Ҭ', 'Ҭ'), - ('Ү', 'Ү'), - ('Ұ', 'Ұ'), - ('Ҳ', 'Ҳ'), - ('Ҵ', 'Ҵ'), - ('Ҷ', 'Ҷ'), - ('Ҹ', 'Ҹ'), - ('Һ', 'Һ'), - ('Ҽ', 'Ҽ'), - ('Ҿ', 'Ҿ'), - ('Ӏ', 'Ӂ'), - ('Ӄ', 'Ӄ'), - ('Ӆ', 'Ӆ'), - ('Ӈ', 'Ӈ'), - ('Ӊ', 'Ӊ'), - ('Ӌ', 'Ӌ'), - ('Ӎ', 'Ӎ'), - ('Ӑ', 'Ӑ'), - ('Ӓ', 'Ӓ'), - ('Ӕ', 'Ӕ'), - ('Ӗ', 'Ӗ'), - ('Ә', 'Ә'), - ('Ӛ', 'Ӛ'), - ('Ӝ', 'Ӝ'), - ('Ӟ', 'Ӟ'), - ('Ӡ', 'Ӡ'), - ('Ӣ', 'Ӣ'), - ('Ӥ', 'Ӥ'), - ('Ӧ', 'Ӧ'), - ('Ө', 'Ө'), - ('Ӫ', 'Ӫ'), - ('Ӭ', 'Ӭ'), - ('Ӯ', 'Ӯ'), - ('Ӱ', 'Ӱ'), - ('Ӳ', 'Ӳ'), - ('Ӵ', 'Ӵ'), - ('Ӷ', 'Ӷ'), - ('Ӹ', 'Ӹ'), - ('Ӻ', 'Ӻ'), - ('Ӽ', 'Ӽ'), - ('Ӿ', 'Ӿ'), - ('Ԁ', 'Ԁ'), - ('Ԃ', 'Ԃ'), - ('Ԅ', 'Ԅ'), - ('Ԇ', 'Ԇ'), - ('Ԉ', 'Ԉ'), - ('Ԋ', 'Ԋ'), - ('Ԍ', 'Ԍ'), - ('Ԏ', 'Ԏ'), - ('Ԑ', 'Ԑ'), - ('Ԓ', 'Ԓ'), - ('Ԕ', 'Ԕ'), - ('Ԗ', 'Ԗ'), - ('Ԙ', 'Ԙ'), - ('Ԛ', 'Ԛ'), - ('Ԝ', 'Ԝ'), - ('Ԟ', 'Ԟ'), - ('Ԡ', 'Ԡ'), - ('Ԣ', 'Ԣ'), - ('Ԥ', 'Ԥ'), - ('Ԧ', 'Ԧ'), - ('Ԩ', 'Ԩ'), - ('Ԫ', 'Ԫ'), - ('Ԭ', 'Ԭ'), - ('Ԯ', 'Ԯ'), - ('Ա', 'Ֆ'), - ('և', 'և'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ᏸ', 'ᏽ'), - ('ᲀ', 'Ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('Ḁ', 'Ḁ'), - ('Ḃ', 'Ḃ'), - ('Ḅ', 'Ḅ'), - ('Ḇ', 'Ḇ'), - ('Ḉ', 'Ḉ'), - ('Ḋ', 'Ḋ'), - ('Ḍ', 'Ḍ'), - ('Ḏ', 'Ḏ'), - ('Ḑ', 'Ḑ'), - ('Ḓ', 'Ḓ'), - ('Ḕ', 'Ḕ'), - ('Ḗ', 'Ḗ'), - ('Ḙ', 'Ḙ'), - ('Ḛ', 'Ḛ'), - ('Ḝ', 'Ḝ'), - ('Ḟ', 'Ḟ'), - ('Ḡ', 'Ḡ'), - ('Ḣ', 'Ḣ'), - ('Ḥ', 'Ḥ'), - ('Ḧ', 'Ḧ'), - ('Ḩ', 'Ḩ'), - ('Ḫ', 'Ḫ'), - ('Ḭ', 'Ḭ'), - ('Ḯ', 'Ḯ'), - ('Ḱ', 'Ḱ'), - ('Ḳ', 'Ḳ'), - ('Ḵ', 'Ḵ'), - ('Ḷ', 'Ḷ'), - ('Ḹ', 'Ḹ'), - ('Ḻ', 'Ḻ'), - ('Ḽ', 'Ḽ'), - ('Ḿ', 'Ḿ'), - ('Ṁ', 'Ṁ'), - ('Ṃ', 'Ṃ'), - ('Ṅ', 'Ṅ'), - ('Ṇ', 'Ṇ'), - ('Ṉ', 'Ṉ'), - ('Ṋ', 'Ṋ'), - ('Ṍ', 'Ṍ'), - ('Ṏ', 'Ṏ'), - ('Ṑ', 'Ṑ'), - ('Ṓ', 'Ṓ'), - ('Ṕ', 'Ṕ'), - ('Ṗ', 'Ṗ'), - ('Ṙ', 'Ṙ'), - ('Ṛ', 'Ṛ'), - ('Ṝ', 'Ṝ'), - ('Ṟ', 'Ṟ'), - ('Ṡ', 'Ṡ'), - ('Ṣ', 'Ṣ'), - ('Ṥ', 'Ṥ'), - ('Ṧ', 'Ṧ'), - ('Ṩ', 'Ṩ'), - ('Ṫ', 'Ṫ'), - ('Ṭ', 'Ṭ'), - ('Ṯ', 'Ṯ'), - ('Ṱ', 'Ṱ'), - ('Ṳ', 'Ṳ'), - ('Ṵ', 'Ṵ'), - ('Ṷ', 'Ṷ'), - ('Ṹ', 'Ṹ'), - ('Ṻ', 'Ṻ'), - ('Ṽ', 'Ṽ'), - ('Ṿ', 'Ṿ'), - ('Ẁ', 'Ẁ'), - ('Ẃ', 'Ẃ'), - ('Ẅ', 'Ẅ'), - ('Ẇ', 'Ẇ'), - ('Ẉ', 'Ẉ'), - ('Ẋ', 'Ẋ'), - ('Ẍ', 'Ẍ'), - ('Ẏ', 'Ẏ'), - ('Ẑ', 'Ẑ'), - ('Ẓ', 'Ẓ'), - ('Ẕ', 'Ẕ'), - ('ẚ', 'ẛ'), - ('ẞ', 'ẞ'), - ('Ạ', 'Ạ'), - ('Ả', 'Ả'), - ('Ấ', 'Ấ'), - ('Ầ', 'Ầ'), - ('Ẩ', 'Ẩ'), - ('Ẫ', 'Ẫ'), - ('Ậ', 'Ậ'), - ('Ắ', 'Ắ'), - ('Ằ', 'Ằ'), - ('Ẳ', 'Ẳ'), - ('Ẵ', 'Ẵ'), - ('Ặ', 'Ặ'), - ('Ẹ', 'Ẹ'), - ('Ẻ', 'Ẻ'), - ('Ẽ', 'Ẽ'), - ('Ế', 'Ế'), - ('Ề', 'Ề'), - ('Ể', 'Ể'), - ('Ễ', 'Ễ'), - ('Ệ', 'Ệ'), - ('Ỉ', 'Ỉ'), - ('Ị', 'Ị'), - ('Ọ', 'Ọ'), - ('Ỏ', 'Ỏ'), - ('Ố', 'Ố'), - ('Ồ', 'Ồ'), - ('Ổ', 'Ổ'), - ('Ỗ', 'Ỗ'), - ('Ộ', 'Ộ'), - ('Ớ', 'Ớ'), - ('Ờ', 'Ờ'), - ('Ở', 'Ở'), - ('Ỡ', 'Ỡ'), - ('Ợ', 'Ợ'), - ('Ụ', 'Ụ'), - ('Ủ', 'Ủ'), - ('Ứ', 'Ứ'), - ('Ừ', 'Ừ'), - ('Ử', 'Ử'), - ('Ữ', 'Ữ'), - ('Ự', 'Ự'), - ('Ỳ', 'Ỳ'), - ('Ỵ', 'Ỵ'), - ('Ỷ', 'Ỷ'), - ('Ỹ', 'Ỹ'), - ('Ỻ', 'Ỻ'), - ('Ỽ', 'Ỽ'), - ('Ỿ', 'Ỿ'), - ('Ἀ', 'Ἇ'), - ('Ἐ', 'Ἕ'), - ('Ἠ', 'Ἧ'), - ('Ἰ', 'Ἷ'), - ('Ὀ', 'Ὅ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'Ὗ'), - ('Ὠ', 'Ὧ'), - ('ᾀ', 'ᾯ'), - ('ᾲ', 'ᾴ'), - ('ᾷ', 'ᾼ'), - ('ῂ', 'ῄ'), - ('ῇ', 'ῌ'), - ('Ῐ', 'Ί'), - ('Ῠ', 'Ῥ'), - ('ῲ', 'ῴ'), - ('ῷ', 'ῼ'), - ('Ω', 'Ω'), - ('K', 'Å'), - ('Ⅎ', 'Ⅎ'), - ('Ⅰ', 'Ⅿ'), - ('Ↄ', 'Ↄ'), - ('Ⓐ', 'Ⓩ'), - ('Ⰰ', 'Ⱟ'), - ('Ⱡ', 'Ⱡ'), - ('Ɫ', 'Ɽ'), - ('Ⱨ', 'Ⱨ'), - ('Ⱪ', 'Ⱪ'), - ('Ⱬ', 'Ⱬ'), - ('Ɑ', 'Ɒ'), - ('Ⱳ', 'Ⱳ'), - ('Ⱶ', 'Ⱶ'), - ('Ȿ', 'Ⲁ'), - ('Ⲃ', 'Ⲃ'), - ('Ⲅ', 'Ⲅ'), - ('Ⲇ', 'Ⲇ'), - ('Ⲉ', 'Ⲉ'), - ('Ⲋ', 'Ⲋ'), - ('Ⲍ', 'Ⲍ'), - ('Ⲏ', 'Ⲏ'), - ('Ⲑ', 'Ⲑ'), - ('Ⲓ', 'Ⲓ'), - ('Ⲕ', 'Ⲕ'), - ('Ⲗ', 'Ⲗ'), - ('Ⲙ', 'Ⲙ'), - ('Ⲛ', 'Ⲛ'), - ('Ⲝ', 'Ⲝ'), - ('Ⲟ', 'Ⲟ'), - ('Ⲡ', 'Ⲡ'), - ('Ⲣ', 'Ⲣ'), - ('Ⲥ', 'Ⲥ'), - ('Ⲧ', 'Ⲧ'), - ('Ⲩ', 'Ⲩ'), - ('Ⲫ', 'Ⲫ'), - ('Ⲭ', 'Ⲭ'), - ('Ⲯ', 'Ⲯ'), - ('Ⲱ', 'Ⲱ'), - ('Ⲳ', 'Ⲳ'), - ('Ⲵ', 'Ⲵ'), - ('Ⲷ', 'Ⲷ'), - ('Ⲹ', 'Ⲹ'), - ('Ⲻ', 'Ⲻ'), - ('Ⲽ', 'Ⲽ'), - ('Ⲿ', 'Ⲿ'), - ('Ⳁ', 'Ⳁ'), - ('Ⳃ', 'Ⳃ'), - ('Ⳅ', 'Ⳅ'), - ('Ⳇ', 'Ⳇ'), - ('Ⳉ', 'Ⳉ'), - ('Ⳋ', 'Ⳋ'), - ('Ⳍ', 'Ⳍ'), - ('Ⳏ', 'Ⳏ'), - ('Ⳑ', 'Ⳑ'), - ('Ⳓ', 'Ⳓ'), - ('Ⳕ', 'Ⳕ'), - ('Ⳗ', 'Ⳗ'), - ('Ⳙ', 'Ⳙ'), - ('Ⳛ', 'Ⳛ'), - ('Ⳝ', 'Ⳝ'), - ('Ⳟ', 'Ⳟ'), - ('Ⳡ', 'Ⳡ'), - ('Ⳣ', 'Ⳣ'), - ('Ⳬ', 'Ⳬ'), - ('Ⳮ', 'Ⳮ'), - ('Ⳳ', 'Ⳳ'), - ('Ꙁ', 'Ꙁ'), - ('Ꙃ', 'Ꙃ'), - ('Ꙅ', 'Ꙅ'), - ('Ꙇ', 'Ꙇ'), - ('Ꙉ', 'Ꙉ'), - ('Ꙋ', 'Ꙋ'), - ('Ꙍ', 'Ꙍ'), - ('Ꙏ', 'Ꙏ'), - ('Ꙑ', 'Ꙑ'), - ('Ꙓ', 'Ꙓ'), - ('Ꙕ', 'Ꙕ'), - ('Ꙗ', 'Ꙗ'), - ('Ꙙ', 'Ꙙ'), - ('Ꙛ', 'Ꙛ'), - ('Ꙝ', 'Ꙝ'), - ('Ꙟ', 'Ꙟ'), - ('Ꙡ', 'Ꙡ'), - ('Ꙣ', 'Ꙣ'), - ('Ꙥ', 'Ꙥ'), - ('Ꙧ', 'Ꙧ'), - ('Ꙩ', 'Ꙩ'), - ('Ꙫ', 'Ꙫ'), - ('Ꙭ', 'Ꙭ'), - ('Ꚁ', 'Ꚁ'), - ('Ꚃ', 'Ꚃ'), - ('Ꚅ', 'Ꚅ'), - ('Ꚇ', 'Ꚇ'), - ('Ꚉ', 'Ꚉ'), - ('Ꚋ', 'Ꚋ'), - ('Ꚍ', 'Ꚍ'), - ('Ꚏ', 'Ꚏ'), - ('Ꚑ', 'Ꚑ'), - ('Ꚓ', 'Ꚓ'), - ('Ꚕ', 'Ꚕ'), - ('Ꚗ', 'Ꚗ'), - ('Ꚙ', 'Ꚙ'), - ('Ꚛ', 'Ꚛ'), - ('Ꜣ', 'Ꜣ'), - ('Ꜥ', 'Ꜥ'), - ('Ꜧ', 'Ꜧ'), - ('Ꜩ', 'Ꜩ'), - ('Ꜫ', 'Ꜫ'), - ('Ꜭ', 'Ꜭ'), - ('Ꜯ', 'Ꜯ'), - ('Ꜳ', 'Ꜳ'), - ('Ꜵ', 'Ꜵ'), - ('Ꜷ', 'Ꜷ'), - ('Ꜹ', 'Ꜹ'), - ('Ꜻ', 'Ꜻ'), - ('Ꜽ', 'Ꜽ'), - ('Ꜿ', 'Ꜿ'), - ('Ꝁ', 'Ꝁ'), - ('Ꝃ', 'Ꝃ'), - ('Ꝅ', 'Ꝅ'), - ('Ꝇ', 'Ꝇ'), - ('Ꝉ', 'Ꝉ'), - ('Ꝋ', 'Ꝋ'), - ('Ꝍ', 'Ꝍ'), - ('Ꝏ', 'Ꝏ'), - ('Ꝑ', 'Ꝑ'), - ('Ꝓ', 'Ꝓ'), - ('Ꝕ', 'Ꝕ'), - ('Ꝗ', 'Ꝗ'), - ('Ꝙ', 'Ꝙ'), - ('Ꝛ', 'Ꝛ'), - ('Ꝝ', 'Ꝝ'), - ('Ꝟ', 'Ꝟ'), - ('Ꝡ', 'Ꝡ'), - ('Ꝣ', 'Ꝣ'), - ('Ꝥ', 'Ꝥ'), - ('Ꝧ', 'Ꝧ'), - ('Ꝩ', 'Ꝩ'), - ('Ꝫ', 'Ꝫ'), - ('Ꝭ', 'Ꝭ'), - ('Ꝯ', 'Ꝯ'), - ('Ꝺ', 'Ꝺ'), - ('Ꝼ', 'Ꝼ'), - ('Ᵹ', 'Ꝿ'), - ('Ꞁ', 'Ꞁ'), - ('Ꞃ', 'Ꞃ'), - ('Ꞅ', 'Ꞅ'), - ('Ꞇ', 'Ꞇ'), - ('Ꞌ', 'Ꞌ'), - ('Ɥ', 'Ɥ'), - ('Ꞑ', 'Ꞑ'), - ('Ꞓ', 'Ꞓ'), - ('Ꞗ', 'Ꞗ'), - ('Ꞙ', 'Ꞙ'), - ('Ꞛ', 'Ꞛ'), - ('Ꞝ', 'Ꞝ'), - ('Ꞟ', 'Ꞟ'), - ('Ꞡ', 'Ꞡ'), - ('Ꞣ', 'Ꞣ'), - ('Ꞥ', 'Ꞥ'), - ('Ꞧ', 'Ꞧ'), - ('Ꞩ', 'Ꞩ'), - ('Ɦ', 'Ɪ'), - ('Ʞ', 'Ꞵ'), - ('Ꞷ', 'Ꞷ'), - ('Ꞹ', 'Ꞹ'), - ('Ꞻ', 'Ꞻ'), - ('Ꞽ', 'Ꞽ'), - ('Ꞿ', 'Ꞿ'), - ('Ꟁ', 'Ꟁ'), - ('Ꟃ', 'Ꟃ'), - ('Ꞔ', 'Ꟈ'), - ('Ꟊ', 'Ꟊ'), - ('Ɤ', 'Ꟍ'), - ('Ꟑ', 'Ꟑ'), - ('Ꟗ', 'Ꟗ'), - ('Ꟙ', 'Ꟙ'), - ('Ꟛ', 'Ꟛ'), - ('Ƛ', 'Ƛ'), - ('Ꟶ', 'Ꟶ'), - ('ꭰ', 'ꮿ'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('A', 'Z'), - ('𐐀', '𐐧'), - ('𐒰', '𐓓'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐲀', '𐲲'), - ('𐵐', '𐵥'), - ('𑢠', '𑢿'), - ('𖹀', '𖹟'), - ('𞤀', '𞤡'), -]; - -pub const CHANGES_WHEN_CASEMAPPED: &'static [(char, char)] = &[ - ('A', 'Z'), - ('a', 'z'), - ('µ', 'µ'), - ('À', 'Ö'), - ('Ø', 'ö'), - ('ø', 'ķ'), - ('Ĺ', 'ƌ'), - ('Ǝ', 'Ʃ'), - ('Ƭ', 'ƹ'), - ('Ƽ', 'ƽ'), - ('ƿ', 'ƿ'), - ('DŽ', 'Ƞ'), - ('Ȣ', 'ȳ'), - ('Ⱥ', 'ɔ'), - ('ɖ', 'ɗ'), - ('ə', 'ə'), - ('ɛ', 'ɜ'), - ('ɠ', 'ɡ'), - ('ɣ', 'ɦ'), - ('ɨ', 'ɬ'), - ('ɯ', 'ɯ'), - ('ɱ', 'ɲ'), - ('ɵ', 'ɵ'), - ('ɽ', 'ɽ'), - ('ʀ', 'ʀ'), - ('ʂ', 'ʃ'), - ('ʇ', 'ʌ'), - ('ʒ', 'ʒ'), - ('ʝ', 'ʞ'), - ('\u{345}', '\u{345}'), - ('Ͱ', 'ͳ'), - ('Ͷ', 'ͷ'), - ('ͻ', 'ͽ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', 'ϑ'), - ('ϕ', 'ϵ'), - ('Ϸ', 'ϻ'), - ('Ͻ', 'ҁ'), - ('Ҋ', 'ԯ'), - ('Ա', 'Ֆ'), - ('ա', 'և'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ა', 'ჺ'), - ('ჽ', 'ჿ'), - ('Ꭰ', 'Ᏽ'), - ('ᏸ', 'ᏽ'), - ('ᲀ', 'ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('ᵹ', 'ᵹ'), - ('ᵽ', 'ᵽ'), - ('ᶎ', 'ᶎ'), - ('Ḁ', 'ẛ'), - ('ẞ', 'ẞ'), - ('Ạ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ᾼ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῌ'), - ('ῐ', 'ΐ'), - ('ῖ', 'Ί'), - ('ῠ', 'Ῥ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῼ'), - ('Ω', 'Ω'), - ('K', 'Å'), - ('Ⅎ', 'Ⅎ'), - ('ⅎ', 'ⅎ'), - ('Ⅰ', 'ⅿ'), - ('Ↄ', 'ↄ'), - ('Ⓐ', 'ⓩ'), - ('Ⰰ', 'Ɒ'), - ('Ⱳ', 'ⱳ'), - ('Ⱶ', 'ⱶ'), - ('Ȿ', 'ⳣ'), - ('Ⳬ', 'ⳮ'), - ('Ⳳ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('Ꙁ', 'ꙭ'), - ('Ꚁ', 'ꚛ'), - ('Ꜣ', 'ꜯ'), - ('Ꜳ', 'ꝯ'), - ('Ꝺ', 'ꞇ'), - ('Ꞌ', 'Ɥ'), - ('Ꞑ', 'ꞔ'), - ('Ꞗ', 'Ɪ'), - ('Ʞ', 'ꟍ'), - ('Ꟑ', 'ꟑ'), - ('Ꟗ', 'Ƛ'), - ('Ꟶ', 'ꟶ'), - ('ꭓ', 'ꭓ'), - ('ꭰ', 'ꮿ'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('A', 'Z'), - ('a', 'z'), - ('𐐀', '𐑏'), - ('𐒰', '𐓓'), - ('𐓘', '𐓻'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐲀', '𐲲'), - ('𐳀', '𐳲'), - ('𐵐', '𐵥'), - ('𐵰', '𐶅'), - ('𑢠', '𑣟'), - ('𖹀', '𖹿'), - ('𞤀', '𞥃'), -]; - -pub const CHANGES_WHEN_LOWERCASED: &'static [(char, char)] = &[ - ('A', 'Z'), - ('À', 'Ö'), - ('Ø', 'Þ'), - ('Ā', 'Ā'), - ('Ă', 'Ă'), - ('Ą', 'Ą'), - ('Ć', 'Ć'), - ('Ĉ', 'Ĉ'), - ('Ċ', 'Ċ'), - ('Č', 'Č'), - ('Ď', 'Ď'), - ('Đ', 'Đ'), - ('Ē', 'Ē'), - ('Ĕ', 'Ĕ'), - ('Ė', 'Ė'), - ('Ę', 'Ę'), - ('Ě', 'Ě'), - ('Ĝ', 'Ĝ'), - ('Ğ', 'Ğ'), - ('Ġ', 'Ġ'), - ('Ģ', 'Ģ'), - ('Ĥ', 'Ĥ'), - ('Ħ', 'Ħ'), - ('Ĩ', 'Ĩ'), - ('Ī', 'Ī'), - ('Ĭ', 'Ĭ'), - ('Į', 'Į'), - ('İ', 'İ'), - ('IJ', 'IJ'), - ('Ĵ', 'Ĵ'), - ('Ķ', 'Ķ'), - ('Ĺ', 'Ĺ'), - ('Ļ', 'Ļ'), - ('Ľ', 'Ľ'), - ('Ŀ', 'Ŀ'), - ('Ł', 'Ł'), - ('Ń', 'Ń'), - ('Ņ', 'Ņ'), - ('Ň', 'Ň'), - ('Ŋ', 'Ŋ'), - ('Ō', 'Ō'), - ('Ŏ', 'Ŏ'), - ('Ő', 'Ő'), - ('Œ', 'Œ'), - ('Ŕ', 'Ŕ'), - ('Ŗ', 'Ŗ'), - ('Ř', 'Ř'), - ('Ś', 'Ś'), - ('Ŝ', 'Ŝ'), - ('Ş', 'Ş'), - ('Š', 'Š'), - ('Ţ', 'Ţ'), - ('Ť', 'Ť'), - ('Ŧ', 'Ŧ'), - ('Ũ', 'Ũ'), - ('Ū', 'Ū'), - ('Ŭ', 'Ŭ'), - ('Ů', 'Ů'), - ('Ű', 'Ű'), - ('Ų', 'Ų'), - ('Ŵ', 'Ŵ'), - ('Ŷ', 'Ŷ'), - ('Ÿ', 'Ź'), - ('Ż', 'Ż'), - ('Ž', 'Ž'), - ('Ɓ', 'Ƃ'), - ('Ƅ', 'Ƅ'), - ('Ɔ', 'Ƈ'), - ('Ɖ', 'Ƌ'), - ('Ǝ', 'Ƒ'), - ('Ɠ', 'Ɣ'), - ('Ɩ', 'Ƙ'), - ('Ɯ', 'Ɲ'), - ('Ɵ', 'Ơ'), - ('Ƣ', 'Ƣ'), - ('Ƥ', 'Ƥ'), - ('Ʀ', 'Ƨ'), - ('Ʃ', 'Ʃ'), - ('Ƭ', 'Ƭ'), - ('Ʈ', 'Ư'), - ('Ʊ', 'Ƴ'), - ('Ƶ', 'Ƶ'), - ('Ʒ', 'Ƹ'), - ('Ƽ', 'Ƽ'), - ('DŽ', 'Dž'), - ('LJ', 'Lj'), - ('NJ', 'Nj'), - ('Ǎ', 'Ǎ'), - ('Ǐ', 'Ǐ'), - ('Ǒ', 'Ǒ'), - ('Ǔ', 'Ǔ'), - ('Ǖ', 'Ǖ'), - ('Ǘ', 'Ǘ'), - ('Ǚ', 'Ǚ'), - ('Ǜ', 'Ǜ'), - ('Ǟ', 'Ǟ'), - ('Ǡ', 'Ǡ'), - ('Ǣ', 'Ǣ'), - ('Ǥ', 'Ǥ'), - ('Ǧ', 'Ǧ'), - ('Ǩ', 'Ǩ'), - ('Ǫ', 'Ǫ'), - ('Ǭ', 'Ǭ'), - ('Ǯ', 'Ǯ'), - ('DZ', 'Dz'), - ('Ǵ', 'Ǵ'), - ('Ƕ', 'Ǹ'), - ('Ǻ', 'Ǻ'), - ('Ǽ', 'Ǽ'), - ('Ǿ', 'Ǿ'), - ('Ȁ', 'Ȁ'), - ('Ȃ', 'Ȃ'), - ('Ȅ', 'Ȅ'), - ('Ȇ', 'Ȇ'), - ('Ȉ', 'Ȉ'), - ('Ȋ', 'Ȋ'), - ('Ȍ', 'Ȍ'), - ('Ȏ', 'Ȏ'), - ('Ȑ', 'Ȑ'), - ('Ȓ', 'Ȓ'), - ('Ȕ', 'Ȕ'), - ('Ȗ', 'Ȗ'), - ('Ș', 'Ș'), - ('Ț', 'Ț'), - ('Ȝ', 'Ȝ'), - ('Ȟ', 'Ȟ'), - ('Ƞ', 'Ƞ'), - ('Ȣ', 'Ȣ'), - ('Ȥ', 'Ȥ'), - ('Ȧ', 'Ȧ'), - ('Ȩ', 'Ȩ'), - ('Ȫ', 'Ȫ'), - ('Ȭ', 'Ȭ'), - ('Ȯ', 'Ȯ'), - ('Ȱ', 'Ȱ'), - ('Ȳ', 'Ȳ'), - ('Ⱥ', 'Ȼ'), - ('Ƚ', 'Ⱦ'), - ('Ɂ', 'Ɂ'), - ('Ƀ', 'Ɇ'), - ('Ɉ', 'Ɉ'), - ('Ɋ', 'Ɋ'), - ('Ɍ', 'Ɍ'), - ('Ɏ', 'Ɏ'), - ('Ͱ', 'Ͱ'), - ('Ͳ', 'Ͳ'), - ('Ͷ', 'Ͷ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ώ'), - ('Α', 'Ρ'), - ('Σ', 'Ϋ'), - ('Ϗ', 'Ϗ'), - ('Ϙ', 'Ϙ'), - ('Ϛ', 'Ϛ'), - ('Ϝ', 'Ϝ'), - ('Ϟ', 'Ϟ'), - ('Ϡ', 'Ϡ'), - ('Ϣ', 'Ϣ'), - ('Ϥ', 'Ϥ'), - ('Ϧ', 'Ϧ'), - ('Ϩ', 'Ϩ'), - ('Ϫ', 'Ϫ'), - ('Ϭ', 'Ϭ'), - ('Ϯ', 'Ϯ'), - ('ϴ', 'ϴ'), - ('Ϸ', 'Ϸ'), - ('Ϲ', 'Ϻ'), - ('Ͻ', 'Я'), - ('Ѡ', 'Ѡ'), - ('Ѣ', 'Ѣ'), - ('Ѥ', 'Ѥ'), - ('Ѧ', 'Ѧ'), - ('Ѩ', 'Ѩ'), - ('Ѫ', 'Ѫ'), - ('Ѭ', 'Ѭ'), - ('Ѯ', 'Ѯ'), - ('Ѱ', 'Ѱ'), - ('Ѳ', 'Ѳ'), - ('Ѵ', 'Ѵ'), - ('Ѷ', 'Ѷ'), - ('Ѹ', 'Ѹ'), - ('Ѻ', 'Ѻ'), - ('Ѽ', 'Ѽ'), - ('Ѿ', 'Ѿ'), - ('Ҁ', 'Ҁ'), - ('Ҋ', 'Ҋ'), - ('Ҍ', 'Ҍ'), - ('Ҏ', 'Ҏ'), - ('Ґ', 'Ґ'), - ('Ғ', 'Ғ'), - ('Ҕ', 'Ҕ'), - ('Җ', 'Җ'), - ('Ҙ', 'Ҙ'), - ('Қ', 'Қ'), - ('Ҝ', 'Ҝ'), - ('Ҟ', 'Ҟ'), - ('Ҡ', 'Ҡ'), - ('Ң', 'Ң'), - ('Ҥ', 'Ҥ'), - ('Ҧ', 'Ҧ'), - ('Ҩ', 'Ҩ'), - ('Ҫ', 'Ҫ'), - ('Ҭ', 'Ҭ'), - ('Ү', 'Ү'), - ('Ұ', 'Ұ'), - ('Ҳ', 'Ҳ'), - ('Ҵ', 'Ҵ'), - ('Ҷ', 'Ҷ'), - ('Ҹ', 'Ҹ'), - ('Һ', 'Һ'), - ('Ҽ', 'Ҽ'), - ('Ҿ', 'Ҿ'), - ('Ӏ', 'Ӂ'), - ('Ӄ', 'Ӄ'), - ('Ӆ', 'Ӆ'), - ('Ӈ', 'Ӈ'), - ('Ӊ', 'Ӊ'), - ('Ӌ', 'Ӌ'), - ('Ӎ', 'Ӎ'), - ('Ӑ', 'Ӑ'), - ('Ӓ', 'Ӓ'), - ('Ӕ', 'Ӕ'), - ('Ӗ', 'Ӗ'), - ('Ә', 'Ә'), - ('Ӛ', 'Ӛ'), - ('Ӝ', 'Ӝ'), - ('Ӟ', 'Ӟ'), - ('Ӡ', 'Ӡ'), - ('Ӣ', 'Ӣ'), - ('Ӥ', 'Ӥ'), - ('Ӧ', 'Ӧ'), - ('Ө', 'Ө'), - ('Ӫ', 'Ӫ'), - ('Ӭ', 'Ӭ'), - ('Ӯ', 'Ӯ'), - ('Ӱ', 'Ӱ'), - ('Ӳ', 'Ӳ'), - ('Ӵ', 'Ӵ'), - ('Ӷ', 'Ӷ'), - ('Ӹ', 'Ӹ'), - ('Ӻ', 'Ӻ'), - ('Ӽ', 'Ӽ'), - ('Ӿ', 'Ӿ'), - ('Ԁ', 'Ԁ'), - ('Ԃ', 'Ԃ'), - ('Ԅ', 'Ԅ'), - ('Ԇ', 'Ԇ'), - ('Ԉ', 'Ԉ'), - ('Ԋ', 'Ԋ'), - ('Ԍ', 'Ԍ'), - ('Ԏ', 'Ԏ'), - ('Ԑ', 'Ԑ'), - ('Ԓ', 'Ԓ'), - ('Ԕ', 'Ԕ'), - ('Ԗ', 'Ԗ'), - ('Ԙ', 'Ԙ'), - ('Ԛ', 'Ԛ'), - ('Ԝ', 'Ԝ'), - ('Ԟ', 'Ԟ'), - ('Ԡ', 'Ԡ'), - ('Ԣ', 'Ԣ'), - ('Ԥ', 'Ԥ'), - ('Ԧ', 'Ԧ'), - ('Ԩ', 'Ԩ'), - ('Ԫ', 'Ԫ'), - ('Ԭ', 'Ԭ'), - ('Ԯ', 'Ԯ'), - ('Ա', 'Ֆ'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('Ꭰ', 'Ᏽ'), - ('Ᲊ', 'Ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('Ḁ', 'Ḁ'), - ('Ḃ', 'Ḃ'), - ('Ḅ', 'Ḅ'), - ('Ḇ', 'Ḇ'), - ('Ḉ', 'Ḉ'), - ('Ḋ', 'Ḋ'), - ('Ḍ', 'Ḍ'), - ('Ḏ', 'Ḏ'), - ('Ḑ', 'Ḑ'), - ('Ḓ', 'Ḓ'), - ('Ḕ', 'Ḕ'), - ('Ḗ', 'Ḗ'), - ('Ḙ', 'Ḙ'), - ('Ḛ', 'Ḛ'), - ('Ḝ', 'Ḝ'), - ('Ḟ', 'Ḟ'), - ('Ḡ', 'Ḡ'), - ('Ḣ', 'Ḣ'), - ('Ḥ', 'Ḥ'), - ('Ḧ', 'Ḧ'), - ('Ḩ', 'Ḩ'), - ('Ḫ', 'Ḫ'), - ('Ḭ', 'Ḭ'), - ('Ḯ', 'Ḯ'), - ('Ḱ', 'Ḱ'), - ('Ḳ', 'Ḳ'), - ('Ḵ', 'Ḵ'), - ('Ḷ', 'Ḷ'), - ('Ḹ', 'Ḹ'), - ('Ḻ', 'Ḻ'), - ('Ḽ', 'Ḽ'), - ('Ḿ', 'Ḿ'), - ('Ṁ', 'Ṁ'), - ('Ṃ', 'Ṃ'), - ('Ṅ', 'Ṅ'), - ('Ṇ', 'Ṇ'), - ('Ṉ', 'Ṉ'), - ('Ṋ', 'Ṋ'), - ('Ṍ', 'Ṍ'), - ('Ṏ', 'Ṏ'), - ('Ṑ', 'Ṑ'), - ('Ṓ', 'Ṓ'), - ('Ṕ', 'Ṕ'), - ('Ṗ', 'Ṗ'), - ('Ṙ', 'Ṙ'), - ('Ṛ', 'Ṛ'), - ('Ṝ', 'Ṝ'), - ('Ṟ', 'Ṟ'), - ('Ṡ', 'Ṡ'), - ('Ṣ', 'Ṣ'), - ('Ṥ', 'Ṥ'), - ('Ṧ', 'Ṧ'), - ('Ṩ', 'Ṩ'), - ('Ṫ', 'Ṫ'), - ('Ṭ', 'Ṭ'), - ('Ṯ', 'Ṯ'), - ('Ṱ', 'Ṱ'), - ('Ṳ', 'Ṳ'), - ('Ṵ', 'Ṵ'), - ('Ṷ', 'Ṷ'), - ('Ṹ', 'Ṹ'), - ('Ṻ', 'Ṻ'), - ('Ṽ', 'Ṽ'), - ('Ṿ', 'Ṿ'), - ('Ẁ', 'Ẁ'), - ('Ẃ', 'Ẃ'), - ('Ẅ', 'Ẅ'), - ('Ẇ', 'Ẇ'), - ('Ẉ', 'Ẉ'), - ('Ẋ', 'Ẋ'), - ('Ẍ', 'Ẍ'), - ('Ẏ', 'Ẏ'), - ('Ẑ', 'Ẑ'), - ('Ẓ', 'Ẓ'), - ('Ẕ', 'Ẕ'), - ('ẞ', 'ẞ'), - ('Ạ', 'Ạ'), - ('Ả', 'Ả'), - ('Ấ', 'Ấ'), - ('Ầ', 'Ầ'), - ('Ẩ', 'Ẩ'), - ('Ẫ', 'Ẫ'), - ('Ậ', 'Ậ'), - ('Ắ', 'Ắ'), - ('Ằ', 'Ằ'), - ('Ẳ', 'Ẳ'), - ('Ẵ', 'Ẵ'), - ('Ặ', 'Ặ'), - ('Ẹ', 'Ẹ'), - ('Ẻ', 'Ẻ'), - ('Ẽ', 'Ẽ'), - ('Ế', 'Ế'), - ('Ề', 'Ề'), - ('Ể', 'Ể'), - ('Ễ', 'Ễ'), - ('Ệ', 'Ệ'), - ('Ỉ', 'Ỉ'), - ('Ị', 'Ị'), - ('Ọ', 'Ọ'), - ('Ỏ', 'Ỏ'), - ('Ố', 'Ố'), - ('Ồ', 'Ồ'), - ('Ổ', 'Ổ'), - ('Ỗ', 'Ỗ'), - ('Ộ', 'Ộ'), - ('Ớ', 'Ớ'), - ('Ờ', 'Ờ'), - ('Ở', 'Ở'), - ('Ỡ', 'Ỡ'), - ('Ợ', 'Ợ'), - ('Ụ', 'Ụ'), - ('Ủ', 'Ủ'), - ('Ứ', 'Ứ'), - ('Ừ', 'Ừ'), - ('Ử', 'Ử'), - ('Ữ', 'Ữ'), - ('Ự', 'Ự'), - ('Ỳ', 'Ỳ'), - ('Ỵ', 'Ỵ'), - ('Ỷ', 'Ỷ'), - ('Ỹ', 'Ỹ'), - ('Ỻ', 'Ỻ'), - ('Ỽ', 'Ỽ'), - ('Ỿ', 'Ỿ'), - ('Ἀ', 'Ἇ'), - ('Ἐ', 'Ἕ'), - ('Ἠ', 'Ἧ'), - ('Ἰ', 'Ἷ'), - ('Ὀ', 'Ὅ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'Ὗ'), - ('Ὠ', 'Ὧ'), - ('ᾈ', 'ᾏ'), - ('ᾘ', 'ᾟ'), - ('ᾨ', 'ᾯ'), - ('Ᾰ', 'ᾼ'), - ('Ὲ', 'ῌ'), - ('Ῐ', 'Ί'), - ('Ῠ', 'Ῥ'), - ('Ὸ', 'ῼ'), - ('Ω', 'Ω'), - ('K', 'Å'), - ('Ⅎ', 'Ⅎ'), - ('Ⅰ', 'Ⅿ'), - ('Ↄ', 'Ↄ'), - ('Ⓐ', 'Ⓩ'), - ('Ⰰ', 'Ⱟ'), - ('Ⱡ', 'Ⱡ'), - ('Ɫ', 'Ɽ'), - ('Ⱨ', 'Ⱨ'), - ('Ⱪ', 'Ⱪ'), - ('Ⱬ', 'Ⱬ'), - ('Ɑ', 'Ɒ'), - ('Ⱳ', 'Ⱳ'), - ('Ⱶ', 'Ⱶ'), - ('Ȿ', 'Ⲁ'), - ('Ⲃ', 'Ⲃ'), - ('Ⲅ', 'Ⲅ'), - ('Ⲇ', 'Ⲇ'), - ('Ⲉ', 'Ⲉ'), - ('Ⲋ', 'Ⲋ'), - ('Ⲍ', 'Ⲍ'), - ('Ⲏ', 'Ⲏ'), - ('Ⲑ', 'Ⲑ'), - ('Ⲓ', 'Ⲓ'), - ('Ⲕ', 'Ⲕ'), - ('Ⲗ', 'Ⲗ'), - ('Ⲙ', 'Ⲙ'), - ('Ⲛ', 'Ⲛ'), - ('Ⲝ', 'Ⲝ'), - ('Ⲟ', 'Ⲟ'), - ('Ⲡ', 'Ⲡ'), - ('Ⲣ', 'Ⲣ'), - ('Ⲥ', 'Ⲥ'), - ('Ⲧ', 'Ⲧ'), - ('Ⲩ', 'Ⲩ'), - ('Ⲫ', 'Ⲫ'), - ('Ⲭ', 'Ⲭ'), - ('Ⲯ', 'Ⲯ'), - ('Ⲱ', 'Ⲱ'), - ('Ⲳ', 'Ⲳ'), - ('Ⲵ', 'Ⲵ'), - ('Ⲷ', 'Ⲷ'), - ('Ⲹ', 'Ⲹ'), - ('Ⲻ', 'Ⲻ'), - ('Ⲽ', 'Ⲽ'), - ('Ⲿ', 'Ⲿ'), - ('Ⳁ', 'Ⳁ'), - ('Ⳃ', 'Ⳃ'), - ('Ⳅ', 'Ⳅ'), - ('Ⳇ', 'Ⳇ'), - ('Ⳉ', 'Ⳉ'), - ('Ⳋ', 'Ⳋ'), - ('Ⳍ', 'Ⳍ'), - ('Ⳏ', 'Ⳏ'), - ('Ⳑ', 'Ⳑ'), - ('Ⳓ', 'Ⳓ'), - ('Ⳕ', 'Ⳕ'), - ('Ⳗ', 'Ⳗ'), - ('Ⳙ', 'Ⳙ'), - ('Ⳛ', 'Ⳛ'), - ('Ⳝ', 'Ⳝ'), - ('Ⳟ', 'Ⳟ'), - ('Ⳡ', 'Ⳡ'), - ('Ⳣ', 'Ⳣ'), - ('Ⳬ', 'Ⳬ'), - ('Ⳮ', 'Ⳮ'), - ('Ⳳ', 'Ⳳ'), - ('Ꙁ', 'Ꙁ'), - ('Ꙃ', 'Ꙃ'), - ('Ꙅ', 'Ꙅ'), - ('Ꙇ', 'Ꙇ'), - ('Ꙉ', 'Ꙉ'), - ('Ꙋ', 'Ꙋ'), - ('Ꙍ', 'Ꙍ'), - ('Ꙏ', 'Ꙏ'), - ('Ꙑ', 'Ꙑ'), - ('Ꙓ', 'Ꙓ'), - ('Ꙕ', 'Ꙕ'), - ('Ꙗ', 'Ꙗ'), - ('Ꙙ', 'Ꙙ'), - ('Ꙛ', 'Ꙛ'), - ('Ꙝ', 'Ꙝ'), - ('Ꙟ', 'Ꙟ'), - ('Ꙡ', 'Ꙡ'), - ('Ꙣ', 'Ꙣ'), - ('Ꙥ', 'Ꙥ'), - ('Ꙧ', 'Ꙧ'), - ('Ꙩ', 'Ꙩ'), - ('Ꙫ', 'Ꙫ'), - ('Ꙭ', 'Ꙭ'), - ('Ꚁ', 'Ꚁ'), - ('Ꚃ', 'Ꚃ'), - ('Ꚅ', 'Ꚅ'), - ('Ꚇ', 'Ꚇ'), - ('Ꚉ', 'Ꚉ'), - ('Ꚋ', 'Ꚋ'), - ('Ꚍ', 'Ꚍ'), - ('Ꚏ', 'Ꚏ'), - ('Ꚑ', 'Ꚑ'), - ('Ꚓ', 'Ꚓ'), - ('Ꚕ', 'Ꚕ'), - ('Ꚗ', 'Ꚗ'), - ('Ꚙ', 'Ꚙ'), - ('Ꚛ', 'Ꚛ'), - ('Ꜣ', 'Ꜣ'), - ('Ꜥ', 'Ꜥ'), - ('Ꜧ', 'Ꜧ'), - ('Ꜩ', 'Ꜩ'), - ('Ꜫ', 'Ꜫ'), - ('Ꜭ', 'Ꜭ'), - ('Ꜯ', 'Ꜯ'), - ('Ꜳ', 'Ꜳ'), - ('Ꜵ', 'Ꜵ'), - ('Ꜷ', 'Ꜷ'), - ('Ꜹ', 'Ꜹ'), - ('Ꜻ', 'Ꜻ'), - ('Ꜽ', 'Ꜽ'), - ('Ꜿ', 'Ꜿ'), - ('Ꝁ', 'Ꝁ'), - ('Ꝃ', 'Ꝃ'), - ('Ꝅ', 'Ꝅ'), - ('Ꝇ', 'Ꝇ'), - ('Ꝉ', 'Ꝉ'), - ('Ꝋ', 'Ꝋ'), - ('Ꝍ', 'Ꝍ'), - ('Ꝏ', 'Ꝏ'), - ('Ꝑ', 'Ꝑ'), - ('Ꝓ', 'Ꝓ'), - ('Ꝕ', 'Ꝕ'), - ('Ꝗ', 'Ꝗ'), - ('Ꝙ', 'Ꝙ'), - ('Ꝛ', 'Ꝛ'), - ('Ꝝ', 'Ꝝ'), - ('Ꝟ', 'Ꝟ'), - ('Ꝡ', 'Ꝡ'), - ('Ꝣ', 'Ꝣ'), - ('Ꝥ', 'Ꝥ'), - ('Ꝧ', 'Ꝧ'), - ('Ꝩ', 'Ꝩ'), - ('Ꝫ', 'Ꝫ'), - ('Ꝭ', 'Ꝭ'), - ('Ꝯ', 'Ꝯ'), - ('Ꝺ', 'Ꝺ'), - ('Ꝼ', 'Ꝼ'), - ('Ᵹ', 'Ꝿ'), - ('Ꞁ', 'Ꞁ'), - ('Ꞃ', 'Ꞃ'), - ('Ꞅ', 'Ꞅ'), - ('Ꞇ', 'Ꞇ'), - ('Ꞌ', 'Ꞌ'), - ('Ɥ', 'Ɥ'), - ('Ꞑ', 'Ꞑ'), - ('Ꞓ', 'Ꞓ'), - ('Ꞗ', 'Ꞗ'), - ('Ꞙ', 'Ꞙ'), - ('Ꞛ', 'Ꞛ'), - ('Ꞝ', 'Ꞝ'), - ('Ꞟ', 'Ꞟ'), - ('Ꞡ', 'Ꞡ'), - ('Ꞣ', 'Ꞣ'), - ('Ꞥ', 'Ꞥ'), - ('Ꞧ', 'Ꞧ'), - ('Ꞩ', 'Ꞩ'), - ('Ɦ', 'Ɪ'), - ('Ʞ', 'Ꞵ'), - ('Ꞷ', 'Ꞷ'), - ('Ꞹ', 'Ꞹ'), - ('Ꞻ', 'Ꞻ'), - ('Ꞽ', 'Ꞽ'), - ('Ꞿ', 'Ꞿ'), - ('Ꟁ', 'Ꟁ'), - ('Ꟃ', 'Ꟃ'), - ('Ꞔ', 'Ꟈ'), - ('Ꟊ', 'Ꟊ'), - ('Ɤ', 'Ꟍ'), - ('Ꟑ', 'Ꟑ'), - ('Ꟗ', 'Ꟗ'), - ('Ꟙ', 'Ꟙ'), - ('Ꟛ', 'Ꟛ'), - ('Ƛ', 'Ƛ'), - ('Ꟶ', 'Ꟶ'), - ('A', 'Z'), - ('𐐀', '𐐧'), - ('𐒰', '𐓓'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐲀', '𐲲'), - ('𐵐', '𐵥'), - ('𑢠', '𑢿'), - ('𖹀', '𖹟'), - ('𞤀', '𞤡'), -]; - -pub const CHANGES_WHEN_TITLECASED: &'static [(char, char)] = &[ - ('a', 'z'), - ('µ', 'µ'), - ('ß', 'ö'), - ('ø', 'ÿ'), - ('ā', 'ā'), - ('ă', 'ă'), - ('ą', 'ą'), - ('ć', 'ć'), - ('ĉ', 'ĉ'), - ('ċ', 'ċ'), - ('č', 'č'), - ('ď', 'ď'), - ('đ', 'đ'), - ('ē', 'ē'), - ('ĕ', 'ĕ'), - ('ė', 'ė'), - ('ę', 'ę'), - ('ě', 'ě'), - ('ĝ', 'ĝ'), - ('ğ', 'ğ'), - ('ġ', 'ġ'), - ('ģ', 'ģ'), - ('ĥ', 'ĥ'), - ('ħ', 'ħ'), - ('ĩ', 'ĩ'), - ('ī', 'ī'), - ('ĭ', 'ĭ'), - ('į', 'į'), - ('ı', 'ı'), - ('ij', 'ij'), - ('ĵ', 'ĵ'), - ('ķ', 'ķ'), - ('ĺ', 'ĺ'), - ('ļ', 'ļ'), - ('ľ', 'ľ'), - ('ŀ', 'ŀ'), - ('ł', 'ł'), - ('ń', 'ń'), - ('ņ', 'ņ'), - ('ň', 'ʼn'), - ('ŋ', 'ŋ'), - ('ō', 'ō'), - ('ŏ', 'ŏ'), - ('ő', 'ő'), - ('œ', 'œ'), - ('ŕ', 'ŕ'), - ('ŗ', 'ŗ'), - ('ř', 'ř'), - ('ś', 'ś'), - ('ŝ', 'ŝ'), - ('ş', 'ş'), - ('š', 'š'), - ('ţ', 'ţ'), - ('ť', 'ť'), - ('ŧ', 'ŧ'), - ('ũ', 'ũ'), - ('ū', 'ū'), - ('ŭ', 'ŭ'), - ('ů', 'ů'), - ('ű', 'ű'), - ('ų', 'ų'), - ('ŵ', 'ŵ'), - ('ŷ', 'ŷ'), - ('ź', 'ź'), - ('ż', 'ż'), - ('ž', 'ƀ'), - ('ƃ', 'ƃ'), - ('ƅ', 'ƅ'), - ('ƈ', 'ƈ'), - ('ƌ', 'ƌ'), - ('ƒ', 'ƒ'), - ('ƕ', 'ƕ'), - ('ƙ', 'ƛ'), - ('ƞ', 'ƞ'), - ('ơ', 'ơ'), - ('ƣ', 'ƣ'), - ('ƥ', 'ƥ'), - ('ƨ', 'ƨ'), - ('ƭ', 'ƭ'), - ('ư', 'ư'), - ('ƴ', 'ƴ'), - ('ƶ', 'ƶ'), - ('ƹ', 'ƹ'), - ('ƽ', 'ƽ'), - ('ƿ', 'ƿ'), - ('DŽ', 'DŽ'), - ('dž', 'LJ'), - ('lj', 'NJ'), - ('nj', 'nj'), - ('ǎ', 'ǎ'), - ('ǐ', 'ǐ'), - ('ǒ', 'ǒ'), - ('ǔ', 'ǔ'), - ('ǖ', 'ǖ'), - ('ǘ', 'ǘ'), - ('ǚ', 'ǚ'), - ('ǜ', 'ǝ'), - ('ǟ', 'ǟ'), - ('ǡ', 'ǡ'), - ('ǣ', 'ǣ'), - ('ǥ', 'ǥ'), - ('ǧ', 'ǧ'), - ('ǩ', 'ǩ'), - ('ǫ', 'ǫ'), - ('ǭ', 'ǭ'), - ('ǯ', 'DZ'), - ('dz', 'dz'), - ('ǵ', 'ǵ'), - ('ǹ', 'ǹ'), - ('ǻ', 'ǻ'), - ('ǽ', 'ǽ'), - ('ǿ', 'ǿ'), - ('ȁ', 'ȁ'), - ('ȃ', 'ȃ'), - ('ȅ', 'ȅ'), - ('ȇ', 'ȇ'), - ('ȉ', 'ȉ'), - ('ȋ', 'ȋ'), - ('ȍ', 'ȍ'), - ('ȏ', 'ȏ'), - ('ȑ', 'ȑ'), - ('ȓ', 'ȓ'), - ('ȕ', 'ȕ'), - ('ȗ', 'ȗ'), - ('ș', 'ș'), - ('ț', 'ț'), - ('ȝ', 'ȝ'), - ('ȟ', 'ȟ'), - ('ȣ', 'ȣ'), - ('ȥ', 'ȥ'), - ('ȧ', 'ȧ'), - ('ȩ', 'ȩ'), - ('ȫ', 'ȫ'), - ('ȭ', 'ȭ'), - ('ȯ', 'ȯ'), - ('ȱ', 'ȱ'), - ('ȳ', 'ȳ'), - ('ȼ', 'ȼ'), - ('ȿ', 'ɀ'), - ('ɂ', 'ɂ'), - ('ɇ', 'ɇ'), - ('ɉ', 'ɉ'), - ('ɋ', 'ɋ'), - ('ɍ', 'ɍ'), - ('ɏ', 'ɔ'), - ('ɖ', 'ɗ'), - ('ə', 'ə'), - ('ɛ', 'ɜ'), - ('ɠ', 'ɡ'), - ('ɣ', 'ɦ'), - ('ɨ', 'ɬ'), - ('ɯ', 'ɯ'), - ('ɱ', 'ɲ'), - ('ɵ', 'ɵ'), - ('ɽ', 'ɽ'), - ('ʀ', 'ʀ'), - ('ʂ', 'ʃ'), - ('ʇ', 'ʌ'), - ('ʒ', 'ʒ'), - ('ʝ', 'ʞ'), - ('\u{345}', '\u{345}'), - ('ͱ', 'ͱ'), - ('ͳ', 'ͳ'), - ('ͷ', 'ͷ'), - ('ͻ', 'ͽ'), - ('ΐ', 'ΐ'), - ('ά', 'ώ'), - ('ϐ', 'ϑ'), - ('ϕ', 'ϗ'), - ('ϙ', 'ϙ'), - ('ϛ', 'ϛ'), - ('ϝ', 'ϝ'), - ('ϟ', 'ϟ'), - ('ϡ', 'ϡ'), - ('ϣ', 'ϣ'), - ('ϥ', 'ϥ'), - ('ϧ', 'ϧ'), - ('ϩ', 'ϩ'), - ('ϫ', 'ϫ'), - ('ϭ', 'ϭ'), - ('ϯ', 'ϳ'), - ('ϵ', 'ϵ'), - ('ϸ', 'ϸ'), - ('ϻ', 'ϻ'), - ('а', 'џ'), - ('ѡ', 'ѡ'), - ('ѣ', 'ѣ'), - ('ѥ', 'ѥ'), - ('ѧ', 'ѧ'), - ('ѩ', 'ѩ'), - ('ѫ', 'ѫ'), - ('ѭ', 'ѭ'), - ('ѯ', 'ѯ'), - ('ѱ', 'ѱ'), - ('ѳ', 'ѳ'), - ('ѵ', 'ѵ'), - ('ѷ', 'ѷ'), - ('ѹ', 'ѹ'), - ('ѻ', 'ѻ'), - ('ѽ', 'ѽ'), - ('ѿ', 'ѿ'), - ('ҁ', 'ҁ'), - ('ҋ', 'ҋ'), - ('ҍ', 'ҍ'), - ('ҏ', 'ҏ'), - ('ґ', 'ґ'), - ('ғ', 'ғ'), - ('ҕ', 'ҕ'), - ('җ', 'җ'), - ('ҙ', 'ҙ'), - ('қ', 'қ'), - ('ҝ', 'ҝ'), - ('ҟ', 'ҟ'), - ('ҡ', 'ҡ'), - ('ң', 'ң'), - ('ҥ', 'ҥ'), - ('ҧ', 'ҧ'), - ('ҩ', 'ҩ'), - ('ҫ', 'ҫ'), - ('ҭ', 'ҭ'), - ('ү', 'ү'), - ('ұ', 'ұ'), - ('ҳ', 'ҳ'), - ('ҵ', 'ҵ'), - ('ҷ', 'ҷ'), - ('ҹ', 'ҹ'), - ('һ', 'һ'), - ('ҽ', 'ҽ'), - ('ҿ', 'ҿ'), - ('ӂ', 'ӂ'), - ('ӄ', 'ӄ'), - ('ӆ', 'ӆ'), - ('ӈ', 'ӈ'), - ('ӊ', 'ӊ'), - ('ӌ', 'ӌ'), - ('ӎ', 'ӏ'), - ('ӑ', 'ӑ'), - ('ӓ', 'ӓ'), - ('ӕ', 'ӕ'), - ('ӗ', 'ӗ'), - ('ә', 'ә'), - ('ӛ', 'ӛ'), - ('ӝ', 'ӝ'), - ('ӟ', 'ӟ'), - ('ӡ', 'ӡ'), - ('ӣ', 'ӣ'), - ('ӥ', 'ӥ'), - ('ӧ', 'ӧ'), - ('ө', 'ө'), - ('ӫ', 'ӫ'), - ('ӭ', 'ӭ'), - ('ӯ', 'ӯ'), - ('ӱ', 'ӱ'), - ('ӳ', 'ӳ'), - ('ӵ', 'ӵ'), - ('ӷ', 'ӷ'), - ('ӹ', 'ӹ'), - ('ӻ', 'ӻ'), - ('ӽ', 'ӽ'), - ('ӿ', 'ӿ'), - ('ԁ', 'ԁ'), - ('ԃ', 'ԃ'), - ('ԅ', 'ԅ'), - ('ԇ', 'ԇ'), - ('ԉ', 'ԉ'), - ('ԋ', 'ԋ'), - ('ԍ', 'ԍ'), - ('ԏ', 'ԏ'), - ('ԑ', 'ԑ'), - ('ԓ', 'ԓ'), - ('ԕ', 'ԕ'), - ('ԗ', 'ԗ'), - ('ԙ', 'ԙ'), - ('ԛ', 'ԛ'), - ('ԝ', 'ԝ'), - ('ԟ', 'ԟ'), - ('ԡ', 'ԡ'), - ('ԣ', 'ԣ'), - ('ԥ', 'ԥ'), - ('ԧ', 'ԧ'), - ('ԩ', 'ԩ'), - ('ԫ', 'ԫ'), - ('ԭ', 'ԭ'), - ('ԯ', 'ԯ'), - ('ա', 'և'), - ('ᏸ', 'ᏽ'), - ('ᲀ', 'ᲈ'), - ('ᲊ', 'ᲊ'), - ('ᵹ', 'ᵹ'), - ('ᵽ', 'ᵽ'), - ('ᶎ', 'ᶎ'), - ('ḁ', 'ḁ'), - ('ḃ', 'ḃ'), - ('ḅ', 'ḅ'), - ('ḇ', 'ḇ'), - ('ḉ', 'ḉ'), - ('ḋ', 'ḋ'), - ('ḍ', 'ḍ'), - ('ḏ', 'ḏ'), - ('ḑ', 'ḑ'), - ('ḓ', 'ḓ'), - ('ḕ', 'ḕ'), - ('ḗ', 'ḗ'), - ('ḙ', 'ḙ'), - ('ḛ', 'ḛ'), - ('ḝ', 'ḝ'), - ('ḟ', 'ḟ'), - ('ḡ', 'ḡ'), - ('ḣ', 'ḣ'), - ('ḥ', 'ḥ'), - ('ḧ', 'ḧ'), - ('ḩ', 'ḩ'), - ('ḫ', 'ḫ'), - ('ḭ', 'ḭ'), - ('ḯ', 'ḯ'), - ('ḱ', 'ḱ'), - ('ḳ', 'ḳ'), - ('ḵ', 'ḵ'), - ('ḷ', 'ḷ'), - ('ḹ', 'ḹ'), - ('ḻ', 'ḻ'), - ('ḽ', 'ḽ'), - ('ḿ', 'ḿ'), - ('ṁ', 'ṁ'), - ('ṃ', 'ṃ'), - ('ṅ', 'ṅ'), - ('ṇ', 'ṇ'), - ('ṉ', 'ṉ'), - ('ṋ', 'ṋ'), - ('ṍ', 'ṍ'), - ('ṏ', 'ṏ'), - ('ṑ', 'ṑ'), - ('ṓ', 'ṓ'), - ('ṕ', 'ṕ'), - ('ṗ', 'ṗ'), - ('ṙ', 'ṙ'), - ('ṛ', 'ṛ'), - ('ṝ', 'ṝ'), - ('ṟ', 'ṟ'), - ('ṡ', 'ṡ'), - ('ṣ', 'ṣ'), - ('ṥ', 'ṥ'), - ('ṧ', 'ṧ'), - ('ṩ', 'ṩ'), - ('ṫ', 'ṫ'), - ('ṭ', 'ṭ'), - ('ṯ', 'ṯ'), - ('ṱ', 'ṱ'), - ('ṳ', 'ṳ'), - ('ṵ', 'ṵ'), - ('ṷ', 'ṷ'), - ('ṹ', 'ṹ'), - ('ṻ', 'ṻ'), - ('ṽ', 'ṽ'), - ('ṿ', 'ṿ'), - ('ẁ', 'ẁ'), - ('ẃ', 'ẃ'), - ('ẅ', 'ẅ'), - ('ẇ', 'ẇ'), - ('ẉ', 'ẉ'), - ('ẋ', 'ẋ'), - ('ẍ', 'ẍ'), - ('ẏ', 'ẏ'), - ('ẑ', 'ẑ'), - ('ẓ', 'ẓ'), - ('ẕ', 'ẛ'), - ('ạ', 'ạ'), - ('ả', 'ả'), - ('ấ', 'ấ'), - ('ầ', 'ầ'), - ('ẩ', 'ẩ'), - ('ẫ', 'ẫ'), - ('ậ', 'ậ'), - ('ắ', 'ắ'), - ('ằ', 'ằ'), - ('ẳ', 'ẳ'), - ('ẵ', 'ẵ'), - ('ặ', 'ặ'), - ('ẹ', 'ẹ'), - ('ẻ', 'ẻ'), - ('ẽ', 'ẽ'), - ('ế', 'ế'), - ('ề', 'ề'), - ('ể', 'ể'), - ('ễ', 'ễ'), - ('ệ', 'ệ'), - ('ỉ', 'ỉ'), - ('ị', 'ị'), - ('ọ', 'ọ'), - ('ỏ', 'ỏ'), - ('ố', 'ố'), - ('ồ', 'ồ'), - ('ổ', 'ổ'), - ('ỗ', 'ỗ'), - ('ộ', 'ộ'), - ('ớ', 'ớ'), - ('ờ', 'ờ'), - ('ở', 'ở'), - ('ỡ', 'ỡ'), - ('ợ', 'ợ'), - ('ụ', 'ụ'), - ('ủ', 'ủ'), - ('ứ', 'ứ'), - ('ừ', 'ừ'), - ('ử', 'ử'), - ('ữ', 'ữ'), - ('ự', 'ự'), - ('ỳ', 'ỳ'), - ('ỵ', 'ỵ'), - ('ỷ', 'ỷ'), - ('ỹ', 'ỹ'), - ('ỻ', 'ỻ'), - ('ỽ', 'ỽ'), - ('ỿ', 'ἇ'), - ('ἐ', 'ἕ'), - ('ἠ', 'ἧ'), - ('ἰ', 'ἷ'), - ('ὀ', 'ὅ'), - ('ὐ', 'ὗ'), - ('ὠ', 'ὧ'), - ('ὰ', 'ώ'), - ('ᾀ', 'ᾇ'), - ('ᾐ', 'ᾗ'), - ('ᾠ', 'ᾧ'), - ('ᾰ', 'ᾴ'), - ('ᾶ', 'ᾷ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῇ'), - ('ῐ', 'ΐ'), - ('ῖ', 'ῗ'), - ('ῠ', 'ῧ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῷ'), - ('ⅎ', 'ⅎ'), - ('ⅰ', 'ⅿ'), - ('ↄ', 'ↄ'), - ('ⓐ', 'ⓩ'), - ('ⰰ', 'ⱟ'), - ('ⱡ', 'ⱡ'), - ('ⱥ', 'ⱦ'), - ('ⱨ', 'ⱨ'), - ('ⱪ', 'ⱪ'), - ('ⱬ', 'ⱬ'), - ('ⱳ', 'ⱳ'), - ('ⱶ', 'ⱶ'), - ('ⲁ', 'ⲁ'), - ('ⲃ', 'ⲃ'), - ('ⲅ', 'ⲅ'), - ('ⲇ', 'ⲇ'), - ('ⲉ', 'ⲉ'), - ('ⲋ', 'ⲋ'), - ('ⲍ', 'ⲍ'), - ('ⲏ', 'ⲏ'), - ('ⲑ', 'ⲑ'), - ('ⲓ', 'ⲓ'), - ('ⲕ', 'ⲕ'), - ('ⲗ', 'ⲗ'), - ('ⲙ', 'ⲙ'), - ('ⲛ', 'ⲛ'), - ('ⲝ', 'ⲝ'), - ('ⲟ', 'ⲟ'), - ('ⲡ', 'ⲡ'), - ('ⲣ', 'ⲣ'), - ('ⲥ', 'ⲥ'), - ('ⲧ', 'ⲧ'), - ('ⲩ', 'ⲩ'), - ('ⲫ', 'ⲫ'), - ('ⲭ', 'ⲭ'), - ('ⲯ', 'ⲯ'), - ('ⲱ', 'ⲱ'), - ('ⲳ', 'ⲳ'), - ('ⲵ', 'ⲵ'), - ('ⲷ', 'ⲷ'), - ('ⲹ', 'ⲹ'), - ('ⲻ', 'ⲻ'), - ('ⲽ', 'ⲽ'), - ('ⲿ', 'ⲿ'), - ('ⳁ', 'ⳁ'), - ('ⳃ', 'ⳃ'), - ('ⳅ', 'ⳅ'), - ('ⳇ', 'ⳇ'), - ('ⳉ', 'ⳉ'), - ('ⳋ', 'ⳋ'), - ('ⳍ', 'ⳍ'), - ('ⳏ', 'ⳏ'), - ('ⳑ', 'ⳑ'), - ('ⳓ', 'ⳓ'), - ('ⳕ', 'ⳕ'), - ('ⳗ', 'ⳗ'), - ('ⳙ', 'ⳙ'), - ('ⳛ', 'ⳛ'), - ('ⳝ', 'ⳝ'), - ('ⳟ', 'ⳟ'), - ('ⳡ', 'ⳡ'), - ('ⳣ', 'ⳣ'), - ('ⳬ', 'ⳬ'), - ('ⳮ', 'ⳮ'), - ('ⳳ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ꙁ', 'ꙁ'), - ('ꙃ', 'ꙃ'), - ('ꙅ', 'ꙅ'), - ('ꙇ', 'ꙇ'), - ('ꙉ', 'ꙉ'), - ('ꙋ', 'ꙋ'), - ('ꙍ', 'ꙍ'), - ('ꙏ', 'ꙏ'), - ('ꙑ', 'ꙑ'), - ('ꙓ', 'ꙓ'), - ('ꙕ', 'ꙕ'), - ('ꙗ', 'ꙗ'), - ('ꙙ', 'ꙙ'), - ('ꙛ', 'ꙛ'), - ('ꙝ', 'ꙝ'), - ('ꙟ', 'ꙟ'), - ('ꙡ', 'ꙡ'), - ('ꙣ', 'ꙣ'), - ('ꙥ', 'ꙥ'), - ('ꙧ', 'ꙧ'), - ('ꙩ', 'ꙩ'), - ('ꙫ', 'ꙫ'), - ('ꙭ', 'ꙭ'), - ('ꚁ', 'ꚁ'), - ('ꚃ', 'ꚃ'), - ('ꚅ', 'ꚅ'), - ('ꚇ', 'ꚇ'), - ('ꚉ', 'ꚉ'), - ('ꚋ', 'ꚋ'), - ('ꚍ', 'ꚍ'), - ('ꚏ', 'ꚏ'), - ('ꚑ', 'ꚑ'), - ('ꚓ', 'ꚓ'), - ('ꚕ', 'ꚕ'), - ('ꚗ', 'ꚗ'), - ('ꚙ', 'ꚙ'), - ('ꚛ', 'ꚛ'), - ('ꜣ', 'ꜣ'), - ('ꜥ', 'ꜥ'), - ('ꜧ', 'ꜧ'), - ('ꜩ', 'ꜩ'), - ('ꜫ', 'ꜫ'), - ('ꜭ', 'ꜭ'), - ('ꜯ', 'ꜯ'), - ('ꜳ', 'ꜳ'), - ('ꜵ', 'ꜵ'), - ('ꜷ', 'ꜷ'), - ('ꜹ', 'ꜹ'), - ('ꜻ', 'ꜻ'), - ('ꜽ', 'ꜽ'), - ('ꜿ', 'ꜿ'), - ('ꝁ', 'ꝁ'), - ('ꝃ', 'ꝃ'), - ('ꝅ', 'ꝅ'), - ('ꝇ', 'ꝇ'), - ('ꝉ', 'ꝉ'), - ('ꝋ', 'ꝋ'), - ('ꝍ', 'ꝍ'), - ('ꝏ', 'ꝏ'), - ('ꝑ', 'ꝑ'), - ('ꝓ', 'ꝓ'), - ('ꝕ', 'ꝕ'), - ('ꝗ', 'ꝗ'), - ('ꝙ', 'ꝙ'), - ('ꝛ', 'ꝛ'), - ('ꝝ', 'ꝝ'), - ('ꝟ', 'ꝟ'), - ('ꝡ', 'ꝡ'), - ('ꝣ', 'ꝣ'), - ('ꝥ', 'ꝥ'), - ('ꝧ', 'ꝧ'), - ('ꝩ', 'ꝩ'), - ('ꝫ', 'ꝫ'), - ('ꝭ', 'ꝭ'), - ('ꝯ', 'ꝯ'), - ('ꝺ', 'ꝺ'), - ('ꝼ', 'ꝼ'), - ('ꝿ', 'ꝿ'), - ('ꞁ', 'ꞁ'), - ('ꞃ', 'ꞃ'), - ('ꞅ', 'ꞅ'), - ('ꞇ', 'ꞇ'), - ('ꞌ', 'ꞌ'), - ('ꞑ', 'ꞑ'), - ('ꞓ', 'ꞔ'), - ('ꞗ', 'ꞗ'), - ('ꞙ', 'ꞙ'), - ('ꞛ', 'ꞛ'), - ('ꞝ', 'ꞝ'), - ('ꞟ', 'ꞟ'), - ('ꞡ', 'ꞡ'), - ('ꞣ', 'ꞣ'), - ('ꞥ', 'ꞥ'), - ('ꞧ', 'ꞧ'), - ('ꞩ', 'ꞩ'), - ('ꞵ', 'ꞵ'), - ('ꞷ', 'ꞷ'), - ('ꞹ', 'ꞹ'), - ('ꞻ', 'ꞻ'), - ('ꞽ', 'ꞽ'), - ('ꞿ', 'ꞿ'), - ('ꟁ', 'ꟁ'), - ('ꟃ', 'ꟃ'), - ('ꟈ', 'ꟈ'), - ('ꟊ', 'ꟊ'), - ('ꟍ', 'ꟍ'), - ('ꟑ', 'ꟑ'), - ('ꟗ', 'ꟗ'), - ('ꟙ', 'ꟙ'), - ('ꟛ', 'ꟛ'), - ('ꟶ', 'ꟶ'), - ('ꭓ', 'ꭓ'), - ('ꭰ', 'ꮿ'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('a', 'z'), - ('𐐨', '𐑏'), - ('𐓘', '𐓻'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐳀', '𐳲'), - ('𐵰', '𐶅'), - ('𑣀', '𑣟'), - ('𖹠', '𖹿'), - ('𞤢', '𞥃'), -]; - -pub const CHANGES_WHEN_UPPERCASED: &'static [(char, char)] = &[ - ('a', 'z'), - ('µ', 'µ'), - ('ß', 'ö'), - ('ø', 'ÿ'), - ('ā', 'ā'), - ('ă', 'ă'), - ('ą', 'ą'), - ('ć', 'ć'), - ('ĉ', 'ĉ'), - ('ċ', 'ċ'), - ('č', 'č'), - ('ď', 'ď'), - ('đ', 'đ'), - ('ē', 'ē'), - ('ĕ', 'ĕ'), - ('ė', 'ė'), - ('ę', 'ę'), - ('ě', 'ě'), - ('ĝ', 'ĝ'), - ('ğ', 'ğ'), - ('ġ', 'ġ'), - ('ģ', 'ģ'), - ('ĥ', 'ĥ'), - ('ħ', 'ħ'), - ('ĩ', 'ĩ'), - ('ī', 'ī'), - ('ĭ', 'ĭ'), - ('į', 'į'), - ('ı', 'ı'), - ('ij', 'ij'), - ('ĵ', 'ĵ'), - ('ķ', 'ķ'), - ('ĺ', 'ĺ'), - ('ļ', 'ļ'), - ('ľ', 'ľ'), - ('ŀ', 'ŀ'), - ('ł', 'ł'), - ('ń', 'ń'), - ('ņ', 'ņ'), - ('ň', 'ʼn'), - ('ŋ', 'ŋ'), - ('ō', 'ō'), - ('ŏ', 'ŏ'), - ('ő', 'ő'), - ('œ', 'œ'), - ('ŕ', 'ŕ'), - ('ŗ', 'ŗ'), - ('ř', 'ř'), - ('ś', 'ś'), - ('ŝ', 'ŝ'), - ('ş', 'ş'), - ('š', 'š'), - ('ţ', 'ţ'), - ('ť', 'ť'), - ('ŧ', 'ŧ'), - ('ũ', 'ũ'), - ('ū', 'ū'), - ('ŭ', 'ŭ'), - ('ů', 'ů'), - ('ű', 'ű'), - ('ų', 'ų'), - ('ŵ', 'ŵ'), - ('ŷ', 'ŷ'), - ('ź', 'ź'), - ('ż', 'ż'), - ('ž', 'ƀ'), - ('ƃ', 'ƃ'), - ('ƅ', 'ƅ'), - ('ƈ', 'ƈ'), - ('ƌ', 'ƌ'), - ('ƒ', 'ƒ'), - ('ƕ', 'ƕ'), - ('ƙ', 'ƛ'), - ('ƞ', 'ƞ'), - ('ơ', 'ơ'), - ('ƣ', 'ƣ'), - ('ƥ', 'ƥ'), - ('ƨ', 'ƨ'), - ('ƭ', 'ƭ'), - ('ư', 'ư'), - ('ƴ', 'ƴ'), - ('ƶ', 'ƶ'), - ('ƹ', 'ƹ'), - ('ƽ', 'ƽ'), - ('ƿ', 'ƿ'), - ('Dž', 'dž'), - ('Lj', 'lj'), - ('Nj', 'nj'), - ('ǎ', 'ǎ'), - ('ǐ', 'ǐ'), - ('ǒ', 'ǒ'), - ('ǔ', 'ǔ'), - ('ǖ', 'ǖ'), - ('ǘ', 'ǘ'), - ('ǚ', 'ǚ'), - ('ǜ', 'ǝ'), - ('ǟ', 'ǟ'), - ('ǡ', 'ǡ'), - ('ǣ', 'ǣ'), - ('ǥ', 'ǥ'), - ('ǧ', 'ǧ'), - ('ǩ', 'ǩ'), - ('ǫ', 'ǫ'), - ('ǭ', 'ǭ'), - ('ǯ', 'ǰ'), - ('Dz', 'dz'), - ('ǵ', 'ǵ'), - ('ǹ', 'ǹ'), - ('ǻ', 'ǻ'), - ('ǽ', 'ǽ'), - ('ǿ', 'ǿ'), - ('ȁ', 'ȁ'), - ('ȃ', 'ȃ'), - ('ȅ', 'ȅ'), - ('ȇ', 'ȇ'), - ('ȉ', 'ȉ'), - ('ȋ', 'ȋ'), - ('ȍ', 'ȍ'), - ('ȏ', 'ȏ'), - ('ȑ', 'ȑ'), - ('ȓ', 'ȓ'), - ('ȕ', 'ȕ'), - ('ȗ', 'ȗ'), - ('ș', 'ș'), - ('ț', 'ț'), - ('ȝ', 'ȝ'), - ('ȟ', 'ȟ'), - ('ȣ', 'ȣ'), - ('ȥ', 'ȥ'), - ('ȧ', 'ȧ'), - ('ȩ', 'ȩ'), - ('ȫ', 'ȫ'), - ('ȭ', 'ȭ'), - ('ȯ', 'ȯ'), - ('ȱ', 'ȱ'), - ('ȳ', 'ȳ'), - ('ȼ', 'ȼ'), - ('ȿ', 'ɀ'), - ('ɂ', 'ɂ'), - ('ɇ', 'ɇ'), - ('ɉ', 'ɉ'), - ('ɋ', 'ɋ'), - ('ɍ', 'ɍ'), - ('ɏ', 'ɔ'), - ('ɖ', 'ɗ'), - ('ə', 'ə'), - ('ɛ', 'ɜ'), - ('ɠ', 'ɡ'), - ('ɣ', 'ɦ'), - ('ɨ', 'ɬ'), - ('ɯ', 'ɯ'), - ('ɱ', 'ɲ'), - ('ɵ', 'ɵ'), - ('ɽ', 'ɽ'), - ('ʀ', 'ʀ'), - ('ʂ', 'ʃ'), - ('ʇ', 'ʌ'), - ('ʒ', 'ʒ'), - ('ʝ', 'ʞ'), - ('\u{345}', '\u{345}'), - ('ͱ', 'ͱ'), - ('ͳ', 'ͳ'), - ('ͷ', 'ͷ'), - ('ͻ', 'ͽ'), - ('ΐ', 'ΐ'), - ('ά', 'ώ'), - ('ϐ', 'ϑ'), - ('ϕ', 'ϗ'), - ('ϙ', 'ϙ'), - ('ϛ', 'ϛ'), - ('ϝ', 'ϝ'), - ('ϟ', 'ϟ'), - ('ϡ', 'ϡ'), - ('ϣ', 'ϣ'), - ('ϥ', 'ϥ'), - ('ϧ', 'ϧ'), - ('ϩ', 'ϩ'), - ('ϫ', 'ϫ'), - ('ϭ', 'ϭ'), - ('ϯ', 'ϳ'), - ('ϵ', 'ϵ'), - ('ϸ', 'ϸ'), - ('ϻ', 'ϻ'), - ('а', 'џ'), - ('ѡ', 'ѡ'), - ('ѣ', 'ѣ'), - ('ѥ', 'ѥ'), - ('ѧ', 'ѧ'), - ('ѩ', 'ѩ'), - ('ѫ', 'ѫ'), - ('ѭ', 'ѭ'), - ('ѯ', 'ѯ'), - ('ѱ', 'ѱ'), - ('ѳ', 'ѳ'), - ('ѵ', 'ѵ'), - ('ѷ', 'ѷ'), - ('ѹ', 'ѹ'), - ('ѻ', 'ѻ'), - ('ѽ', 'ѽ'), - ('ѿ', 'ѿ'), - ('ҁ', 'ҁ'), - ('ҋ', 'ҋ'), - ('ҍ', 'ҍ'), - ('ҏ', 'ҏ'), - ('ґ', 'ґ'), - ('ғ', 'ғ'), - ('ҕ', 'ҕ'), - ('җ', 'җ'), - ('ҙ', 'ҙ'), - ('қ', 'қ'), - ('ҝ', 'ҝ'), - ('ҟ', 'ҟ'), - ('ҡ', 'ҡ'), - ('ң', 'ң'), - ('ҥ', 'ҥ'), - ('ҧ', 'ҧ'), - ('ҩ', 'ҩ'), - ('ҫ', 'ҫ'), - ('ҭ', 'ҭ'), - ('ү', 'ү'), - ('ұ', 'ұ'), - ('ҳ', 'ҳ'), - ('ҵ', 'ҵ'), - ('ҷ', 'ҷ'), - ('ҹ', 'ҹ'), - ('һ', 'һ'), - ('ҽ', 'ҽ'), - ('ҿ', 'ҿ'), - ('ӂ', 'ӂ'), - ('ӄ', 'ӄ'), - ('ӆ', 'ӆ'), - ('ӈ', 'ӈ'), - ('ӊ', 'ӊ'), - ('ӌ', 'ӌ'), - ('ӎ', 'ӏ'), - ('ӑ', 'ӑ'), - ('ӓ', 'ӓ'), - ('ӕ', 'ӕ'), - ('ӗ', 'ӗ'), - ('ә', 'ә'), - ('ӛ', 'ӛ'), - ('ӝ', 'ӝ'), - ('ӟ', 'ӟ'), - ('ӡ', 'ӡ'), - ('ӣ', 'ӣ'), - ('ӥ', 'ӥ'), - ('ӧ', 'ӧ'), - ('ө', 'ө'), - ('ӫ', 'ӫ'), - ('ӭ', 'ӭ'), - ('ӯ', 'ӯ'), - ('ӱ', 'ӱ'), - ('ӳ', 'ӳ'), - ('ӵ', 'ӵ'), - ('ӷ', 'ӷ'), - ('ӹ', 'ӹ'), - ('ӻ', 'ӻ'), - ('ӽ', 'ӽ'), - ('ӿ', 'ӿ'), - ('ԁ', 'ԁ'), - ('ԃ', 'ԃ'), - ('ԅ', 'ԅ'), - ('ԇ', 'ԇ'), - ('ԉ', 'ԉ'), - ('ԋ', 'ԋ'), - ('ԍ', 'ԍ'), - ('ԏ', 'ԏ'), - ('ԑ', 'ԑ'), - ('ԓ', 'ԓ'), - ('ԕ', 'ԕ'), - ('ԗ', 'ԗ'), - ('ԙ', 'ԙ'), - ('ԛ', 'ԛ'), - ('ԝ', 'ԝ'), - ('ԟ', 'ԟ'), - ('ԡ', 'ԡ'), - ('ԣ', 'ԣ'), - ('ԥ', 'ԥ'), - ('ԧ', 'ԧ'), - ('ԩ', 'ԩ'), - ('ԫ', 'ԫ'), - ('ԭ', 'ԭ'), - ('ԯ', 'ԯ'), - ('ա', 'և'), - ('ა', 'ჺ'), - ('ჽ', 'ჿ'), - ('ᏸ', 'ᏽ'), - ('ᲀ', 'ᲈ'), - ('ᲊ', 'ᲊ'), - ('ᵹ', 'ᵹ'), - ('ᵽ', 'ᵽ'), - ('ᶎ', 'ᶎ'), - ('ḁ', 'ḁ'), - ('ḃ', 'ḃ'), - ('ḅ', 'ḅ'), - ('ḇ', 'ḇ'), - ('ḉ', 'ḉ'), - ('ḋ', 'ḋ'), - ('ḍ', 'ḍ'), - ('ḏ', 'ḏ'), - ('ḑ', 'ḑ'), - ('ḓ', 'ḓ'), - ('ḕ', 'ḕ'), - ('ḗ', 'ḗ'), - ('ḙ', 'ḙ'), - ('ḛ', 'ḛ'), - ('ḝ', 'ḝ'), - ('ḟ', 'ḟ'), - ('ḡ', 'ḡ'), - ('ḣ', 'ḣ'), - ('ḥ', 'ḥ'), - ('ḧ', 'ḧ'), - ('ḩ', 'ḩ'), - ('ḫ', 'ḫ'), - ('ḭ', 'ḭ'), - ('ḯ', 'ḯ'), - ('ḱ', 'ḱ'), - ('ḳ', 'ḳ'), - ('ḵ', 'ḵ'), - ('ḷ', 'ḷ'), - ('ḹ', 'ḹ'), - ('ḻ', 'ḻ'), - ('ḽ', 'ḽ'), - ('ḿ', 'ḿ'), - ('ṁ', 'ṁ'), - ('ṃ', 'ṃ'), - ('ṅ', 'ṅ'), - ('ṇ', 'ṇ'), - ('ṉ', 'ṉ'), - ('ṋ', 'ṋ'), - ('ṍ', 'ṍ'), - ('ṏ', 'ṏ'), - ('ṑ', 'ṑ'), - ('ṓ', 'ṓ'), - ('ṕ', 'ṕ'), - ('ṗ', 'ṗ'), - ('ṙ', 'ṙ'), - ('ṛ', 'ṛ'), - ('ṝ', 'ṝ'), - ('ṟ', 'ṟ'), - ('ṡ', 'ṡ'), - ('ṣ', 'ṣ'), - ('ṥ', 'ṥ'), - ('ṧ', 'ṧ'), - ('ṩ', 'ṩ'), - ('ṫ', 'ṫ'), - ('ṭ', 'ṭ'), - ('ṯ', 'ṯ'), - ('ṱ', 'ṱ'), - ('ṳ', 'ṳ'), - ('ṵ', 'ṵ'), - ('ṷ', 'ṷ'), - ('ṹ', 'ṹ'), - ('ṻ', 'ṻ'), - ('ṽ', 'ṽ'), - ('ṿ', 'ṿ'), - ('ẁ', 'ẁ'), - ('ẃ', 'ẃ'), - ('ẅ', 'ẅ'), - ('ẇ', 'ẇ'), - ('ẉ', 'ẉ'), - ('ẋ', 'ẋ'), - ('ẍ', 'ẍ'), - ('ẏ', 'ẏ'), - ('ẑ', 'ẑ'), - ('ẓ', 'ẓ'), - ('ẕ', 'ẛ'), - ('ạ', 'ạ'), - ('ả', 'ả'), - ('ấ', 'ấ'), - ('ầ', 'ầ'), - ('ẩ', 'ẩ'), - ('ẫ', 'ẫ'), - ('ậ', 'ậ'), - ('ắ', 'ắ'), - ('ằ', 'ằ'), - ('ẳ', 'ẳ'), - ('ẵ', 'ẵ'), - ('ặ', 'ặ'), - ('ẹ', 'ẹ'), - ('ẻ', 'ẻ'), - ('ẽ', 'ẽ'), - ('ế', 'ế'), - ('ề', 'ề'), - ('ể', 'ể'), - ('ễ', 'ễ'), - ('ệ', 'ệ'), - ('ỉ', 'ỉ'), - ('ị', 'ị'), - ('ọ', 'ọ'), - ('ỏ', 'ỏ'), - ('ố', 'ố'), - ('ồ', 'ồ'), - ('ổ', 'ổ'), - ('ỗ', 'ỗ'), - ('ộ', 'ộ'), - ('ớ', 'ớ'), - ('ờ', 'ờ'), - ('ở', 'ở'), - ('ỡ', 'ỡ'), - ('ợ', 'ợ'), - ('ụ', 'ụ'), - ('ủ', 'ủ'), - ('ứ', 'ứ'), - ('ừ', 'ừ'), - ('ử', 'ử'), - ('ữ', 'ữ'), - ('ự', 'ự'), - ('ỳ', 'ỳ'), - ('ỵ', 'ỵ'), - ('ỷ', 'ỷ'), - ('ỹ', 'ỹ'), - ('ỻ', 'ỻ'), - ('ỽ', 'ỽ'), - ('ỿ', 'ἇ'), - ('ἐ', 'ἕ'), - ('ἠ', 'ἧ'), - ('ἰ', 'ἷ'), - ('ὀ', 'ὅ'), - ('ὐ', 'ὗ'), - ('ὠ', 'ὧ'), - ('ὰ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ᾷ'), - ('ᾼ', 'ᾼ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῇ'), - ('ῌ', 'ῌ'), - ('ῐ', 'ΐ'), - ('ῖ', 'ῗ'), - ('ῠ', 'ῧ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῷ'), - ('ῼ', 'ῼ'), - ('ⅎ', 'ⅎ'), - ('ⅰ', 'ⅿ'), - ('ↄ', 'ↄ'), - ('ⓐ', 'ⓩ'), - ('ⰰ', 'ⱟ'), - ('ⱡ', 'ⱡ'), - ('ⱥ', 'ⱦ'), - ('ⱨ', 'ⱨ'), - ('ⱪ', 'ⱪ'), - ('ⱬ', 'ⱬ'), - ('ⱳ', 'ⱳ'), - ('ⱶ', 'ⱶ'), - ('ⲁ', 'ⲁ'), - ('ⲃ', 'ⲃ'), - ('ⲅ', 'ⲅ'), - ('ⲇ', 'ⲇ'), - ('ⲉ', 'ⲉ'), - ('ⲋ', 'ⲋ'), - ('ⲍ', 'ⲍ'), - ('ⲏ', 'ⲏ'), - ('ⲑ', 'ⲑ'), - ('ⲓ', 'ⲓ'), - ('ⲕ', 'ⲕ'), - ('ⲗ', 'ⲗ'), - ('ⲙ', 'ⲙ'), - ('ⲛ', 'ⲛ'), - ('ⲝ', 'ⲝ'), - ('ⲟ', 'ⲟ'), - ('ⲡ', 'ⲡ'), - ('ⲣ', 'ⲣ'), - ('ⲥ', 'ⲥ'), - ('ⲧ', 'ⲧ'), - ('ⲩ', 'ⲩ'), - ('ⲫ', 'ⲫ'), - ('ⲭ', 'ⲭ'), - ('ⲯ', 'ⲯ'), - ('ⲱ', 'ⲱ'), - ('ⲳ', 'ⲳ'), - ('ⲵ', 'ⲵ'), - ('ⲷ', 'ⲷ'), - ('ⲹ', 'ⲹ'), - ('ⲻ', 'ⲻ'), - ('ⲽ', 'ⲽ'), - ('ⲿ', 'ⲿ'), - ('ⳁ', 'ⳁ'), - ('ⳃ', 'ⳃ'), - ('ⳅ', 'ⳅ'), - ('ⳇ', 'ⳇ'), - ('ⳉ', 'ⳉ'), - ('ⳋ', 'ⳋ'), - ('ⳍ', 'ⳍ'), - ('ⳏ', 'ⳏ'), - ('ⳑ', 'ⳑ'), - ('ⳓ', 'ⳓ'), - ('ⳕ', 'ⳕ'), - ('ⳗ', 'ⳗ'), - ('ⳙ', 'ⳙ'), - ('ⳛ', 'ⳛ'), - ('ⳝ', 'ⳝ'), - ('ⳟ', 'ⳟ'), - ('ⳡ', 'ⳡ'), - ('ⳣ', 'ⳣ'), - ('ⳬ', 'ⳬ'), - ('ⳮ', 'ⳮ'), - ('ⳳ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ꙁ', 'ꙁ'), - ('ꙃ', 'ꙃ'), - ('ꙅ', 'ꙅ'), - ('ꙇ', 'ꙇ'), - ('ꙉ', 'ꙉ'), - ('ꙋ', 'ꙋ'), - ('ꙍ', 'ꙍ'), - ('ꙏ', 'ꙏ'), - ('ꙑ', 'ꙑ'), - ('ꙓ', 'ꙓ'), - ('ꙕ', 'ꙕ'), - ('ꙗ', 'ꙗ'), - ('ꙙ', 'ꙙ'), - ('ꙛ', 'ꙛ'), - ('ꙝ', 'ꙝ'), - ('ꙟ', 'ꙟ'), - ('ꙡ', 'ꙡ'), - ('ꙣ', 'ꙣ'), - ('ꙥ', 'ꙥ'), - ('ꙧ', 'ꙧ'), - ('ꙩ', 'ꙩ'), - ('ꙫ', 'ꙫ'), - ('ꙭ', 'ꙭ'), - ('ꚁ', 'ꚁ'), - ('ꚃ', 'ꚃ'), - ('ꚅ', 'ꚅ'), - ('ꚇ', 'ꚇ'), - ('ꚉ', 'ꚉ'), - ('ꚋ', 'ꚋ'), - ('ꚍ', 'ꚍ'), - ('ꚏ', 'ꚏ'), - ('ꚑ', 'ꚑ'), - ('ꚓ', 'ꚓ'), - ('ꚕ', 'ꚕ'), - ('ꚗ', 'ꚗ'), - ('ꚙ', 'ꚙ'), - ('ꚛ', 'ꚛ'), - ('ꜣ', 'ꜣ'), - ('ꜥ', 'ꜥ'), - ('ꜧ', 'ꜧ'), - ('ꜩ', 'ꜩ'), - ('ꜫ', 'ꜫ'), - ('ꜭ', 'ꜭ'), - ('ꜯ', 'ꜯ'), - ('ꜳ', 'ꜳ'), - ('ꜵ', 'ꜵ'), - ('ꜷ', 'ꜷ'), - ('ꜹ', 'ꜹ'), - ('ꜻ', 'ꜻ'), - ('ꜽ', 'ꜽ'), - ('ꜿ', 'ꜿ'), - ('ꝁ', 'ꝁ'), - ('ꝃ', 'ꝃ'), - ('ꝅ', 'ꝅ'), - ('ꝇ', 'ꝇ'), - ('ꝉ', 'ꝉ'), - ('ꝋ', 'ꝋ'), - ('ꝍ', 'ꝍ'), - ('ꝏ', 'ꝏ'), - ('ꝑ', 'ꝑ'), - ('ꝓ', 'ꝓ'), - ('ꝕ', 'ꝕ'), - ('ꝗ', 'ꝗ'), - ('ꝙ', 'ꝙ'), - ('ꝛ', 'ꝛ'), - ('ꝝ', 'ꝝ'), - ('ꝟ', 'ꝟ'), - ('ꝡ', 'ꝡ'), - ('ꝣ', 'ꝣ'), - ('ꝥ', 'ꝥ'), - ('ꝧ', 'ꝧ'), - ('ꝩ', 'ꝩ'), - ('ꝫ', 'ꝫ'), - ('ꝭ', 'ꝭ'), - ('ꝯ', 'ꝯ'), - ('ꝺ', 'ꝺ'), - ('ꝼ', 'ꝼ'), - ('ꝿ', 'ꝿ'), - ('ꞁ', 'ꞁ'), - ('ꞃ', 'ꞃ'), - ('ꞅ', 'ꞅ'), - ('ꞇ', 'ꞇ'), - ('ꞌ', 'ꞌ'), - ('ꞑ', 'ꞑ'), - ('ꞓ', 'ꞔ'), - ('ꞗ', 'ꞗ'), - ('ꞙ', 'ꞙ'), - ('ꞛ', 'ꞛ'), - ('ꞝ', 'ꞝ'), - ('ꞟ', 'ꞟ'), - ('ꞡ', 'ꞡ'), - ('ꞣ', 'ꞣ'), - ('ꞥ', 'ꞥ'), - ('ꞧ', 'ꞧ'), - ('ꞩ', 'ꞩ'), - ('ꞵ', 'ꞵ'), - ('ꞷ', 'ꞷ'), - ('ꞹ', 'ꞹ'), - ('ꞻ', 'ꞻ'), - ('ꞽ', 'ꞽ'), - ('ꞿ', 'ꞿ'), - ('ꟁ', 'ꟁ'), - ('ꟃ', 'ꟃ'), - ('ꟈ', 'ꟈ'), - ('ꟊ', 'ꟊ'), - ('ꟍ', 'ꟍ'), - ('ꟑ', 'ꟑ'), - ('ꟗ', 'ꟗ'), - ('ꟙ', 'ꟙ'), - ('ꟛ', 'ꟛ'), - ('ꟶ', 'ꟶ'), - ('ꭓ', 'ꭓ'), - ('ꭰ', 'ꮿ'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('a', 'z'), - ('𐐨', '𐑏'), - ('𐓘', '𐓻'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐳀', '𐳲'), - ('𐵰', '𐶅'), - ('𑣀', '𑣟'), - ('𖹠', '𖹿'), - ('𞤢', '𞥃'), -]; - -pub const DASH: &'static [(char, char)] = &[ - ('-', '-'), - ('֊', '֊'), - ('־', '־'), - ('᐀', '᐀'), - ('᠆', '᠆'), - ('‐', '―'), - ('⁓', '⁓'), - ('⁻', '⁻'), - ('₋', '₋'), - ('−', '−'), - ('⸗', '⸗'), - ('⸚', '⸚'), - ('⸺', '⸻'), - ('⹀', '⹀'), - ('⹝', '⹝'), - ('〜', '〜'), - ('〰', '〰'), - ('゠', '゠'), - ('︱', '︲'), - ('﹘', '﹘'), - ('﹣', '﹣'), - ('-', '-'), - ('𐵮', '𐵮'), - ('𐺭', '𐺭'), -]; - -pub const DEFAULT_IGNORABLE_CODE_POINT: &'static [(char, char)] = &[ - ('\u{ad}', '\u{ad}'), - ('\u{34f}', '\u{34f}'), - ('\u{61c}', '\u{61c}'), - ('ᅟ', 'ᅠ'), - ('\u{17b4}', '\u{17b5}'), - ('\u{180b}', '\u{180f}'), - ('\u{200b}', '\u{200f}'), - ('\u{202a}', '\u{202e}'), - ('\u{2060}', '\u{206f}'), - ('ㅤ', 'ㅤ'), - ('\u{fe00}', '\u{fe0f}'), - ('\u{feff}', '\u{feff}'), - ('ᅠ', 'ᅠ'), - ('\u{fff0}', '\u{fff8}'), - ('\u{1bca0}', '\u{1bca3}'), - ('\u{1d173}', '\u{1d17a}'), - ('\u{e0000}', '\u{e0fff}'), -]; - -pub const DEPRECATED: &'static [(char, char)] = &[ - ('ʼn', 'ʼn'), - ('ٳ', 'ٳ'), - ('\u{f77}', '\u{f77}'), - ('\u{f79}', '\u{f79}'), - ('ឣ', 'ឤ'), - ('\u{206a}', '\u{206f}'), - ('〈', '〉'), - ('\u{e0001}', '\u{e0001}'), -]; - -pub const DIACRITIC: &'static [(char, char)] = &[ - ('^', '^'), - ('`', '`'), - ('¨', '¨'), - ('¯', '¯'), - ('´', '´'), - ('·', '¸'), - ('ʰ', '\u{34e}'), - ('\u{350}', '\u{357}'), - ('\u{35d}', '\u{362}'), - ('ʹ', '͵'), - ('ͺ', 'ͺ'), - ('΄', '΅'), - ('\u{483}', '\u{487}'), - ('ՙ', 'ՙ'), - ('\u{591}', '\u{5a1}'), - ('\u{5a3}', '\u{5bd}'), - ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), - ('\u{5c4}', '\u{5c4}'), - ('\u{64b}', '\u{652}'), - ('\u{657}', '\u{658}'), - ('\u{6df}', '\u{6e0}'), - ('ۥ', 'ۦ'), - ('\u{6ea}', '\u{6ec}'), - ('\u{730}', '\u{74a}'), - ('\u{7a6}', '\u{7b0}'), - ('\u{7eb}', 'ߵ'), - ('\u{818}', '\u{819}'), - ('\u{898}', '\u{89f}'), - ('ࣉ', '\u{8d2}'), - ('\u{8e3}', '\u{8fe}'), - ('\u{93c}', '\u{93c}'), - ('\u{94d}', '\u{94d}'), - ('\u{951}', '\u{954}'), - ('ॱ', 'ॱ'), - ('\u{9bc}', '\u{9bc}'), - ('\u{9cd}', '\u{9cd}'), - ('\u{a3c}', '\u{a3c}'), - ('\u{a4d}', '\u{a4d}'), - ('\u{abc}', '\u{abc}'), - ('\u{acd}', '\u{acd}'), - ('\u{afd}', '\u{aff}'), - ('\u{b3c}', '\u{b3c}'), - ('\u{b4d}', '\u{b4d}'), - ('\u{b55}', '\u{b55}'), - ('\u{bcd}', '\u{bcd}'), - ('\u{c3c}', '\u{c3c}'), - ('\u{c4d}', '\u{c4d}'), - ('\u{cbc}', '\u{cbc}'), - ('\u{ccd}', '\u{ccd}'), - ('\u{d3b}', '\u{d3c}'), - ('\u{d4d}', '\u{d4d}'), - ('\u{dca}', '\u{dca}'), - ('\u{e3a}', '\u{e3a}'), - ('\u{e47}', '\u{e4c}'), - ('\u{e4e}', '\u{e4e}'), - ('\u{eba}', '\u{eba}'), - ('\u{ec8}', '\u{ecc}'), - ('\u{f18}', '\u{f19}'), - ('\u{f35}', '\u{f35}'), - ('\u{f37}', '\u{f37}'), - ('\u{f39}', '\u{f39}'), - ('༾', '༿'), - ('\u{f82}', '\u{f84}'), - ('\u{f86}', '\u{f87}'), - ('\u{fc6}', '\u{fc6}'), - ('\u{1037}', '\u{1037}'), - ('\u{1039}', '\u{103a}'), - ('ၣ', 'ၤ'), - ('ၩ', 'ၭ'), - ('ႇ', '\u{108d}'), - ('ႏ', 'ႏ'), - ('ႚ', 'ႛ'), - ('\u{135d}', '\u{135f}'), - ('\u{1714}', '\u{1715}'), - ('\u{1734}', '\u{1734}'), - ('\u{17c9}', '\u{17d3}'), - ('\u{17dd}', '\u{17dd}'), - ('\u{1939}', '\u{193b}'), - ('\u{1a60}', '\u{1a60}'), - ('\u{1a75}', '\u{1a7c}'), - ('\u{1a7f}', '\u{1a7f}'), - ('\u{1ab0}', '\u{1abe}'), - ('\u{1ac1}', '\u{1acb}'), - ('\u{1b34}', '\u{1b34}'), - ('\u{1b44}', '\u{1b44}'), - ('\u{1b6b}', '\u{1b73}'), - ('\u{1baa}', '\u{1bab}'), - ('\u{1be6}', '\u{1be6}'), - ('\u{1bf2}', '\u{1bf3}'), - ('\u{1c36}', '\u{1c37}'), - ('ᱸ', 'ᱽ'), - ('\u{1cd0}', '\u{1ce8}'), - ('\u{1ced}', '\u{1ced}'), - ('\u{1cf4}', '\u{1cf4}'), - ('᳷', '\u{1cf9}'), - ('ᴬ', 'ᵪ'), - ('\u{1dc4}', '\u{1dcf}'), - ('\u{1df5}', '\u{1dff}'), - ('᾽', '᾽'), - ('᾿', '῁'), - ('῍', '῏'), - ('῝', '῟'), - ('῭', '`'), - ('´', '῾'), - ('\u{2cef}', '\u{2cf1}'), - ('ⸯ', 'ⸯ'), - ('\u{302a}', '\u{302f}'), - ('\u{3099}', '゜'), - ('ー', 'ー'), - ('\u{a66f}', '\u{a66f}'), - ('\u{a67c}', '\u{a67d}'), - ('ꙿ', 'ꙿ'), - ('ꚜ', 'ꚝ'), - ('\u{a6f0}', '\u{a6f1}'), - ('꜀', '꜡'), - ('ꞈ', '꞊'), - ('ꟸ', 'ꟹ'), - ('\u{a806}', '\u{a806}'), - ('\u{a82c}', '\u{a82c}'), - ('\u{a8c4}', '\u{a8c4}'), - ('\u{a8e0}', '\u{a8f1}'), - ('\u{a92b}', '꤮'), - ('\u{a953}', '\u{a953}'), - ('\u{a9b3}', '\u{a9b3}'), - ('\u{a9c0}', '\u{a9c0}'), - ('\u{a9e5}', '\u{a9e5}'), - ('ꩻ', 'ꩽ'), - ('\u{aabf}', 'ꫂ'), - ('\u{aaf6}', '\u{aaf6}'), - ('꭛', 'ꭟ'), - ('ꭩ', '꭫'), - ('꯬', '\u{abed}'), - ('\u{fb1e}', '\u{fb1e}'), - ('\u{fe20}', '\u{fe2f}'), - ('^', '^'), - ('`', '`'), - ('ー', 'ー'), - ('\u{ff9e}', '\u{ff9f}'), - (' ̄', ' ̄'), - ('\u{102e0}', '\u{102e0}'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '\u{10a3f}'), - ('\u{10ae5}', '\u{10ae6}'), - ('𐴢', '\u{10d27}'), - ('𐵎', '𐵎'), - ('\u{10d69}', '\u{10d6d}'), - ('\u{10efd}', '\u{10eff}'), - ('\u{10f46}', '\u{10f50}'), - ('\u{10f82}', '\u{10f85}'), - ('\u{11046}', '\u{11046}'), - ('\u{11070}', '\u{11070}'), - ('\u{110b9}', '\u{110ba}'), - ('\u{11133}', '\u{11134}'), - ('\u{11173}', '\u{11173}'), - ('\u{111c0}', '\u{111c0}'), - ('\u{111ca}', '\u{111cc}'), - ('\u{11235}', '\u{11236}'), - ('\u{112e9}', '\u{112ea}'), - ('\u{1133b}', '\u{1133c}'), - ('\u{1134d}', '\u{1134d}'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), - ('\u{113ce}', '\u{113d0}'), - ('\u{113d2}', '𑏓'), - ('\u{113e1}', '\u{113e2}'), - ('\u{11442}', '\u{11442}'), - ('\u{11446}', '\u{11446}'), - ('\u{114c2}', '\u{114c3}'), - ('\u{115bf}', '\u{115c0}'), - ('\u{1163f}', '\u{1163f}'), - ('\u{116b6}', '\u{116b7}'), - ('\u{1172b}', '\u{1172b}'), - ('\u{11839}', '\u{1183a}'), - ('\u{1193d}', '\u{1193e}'), - ('\u{11943}', '\u{11943}'), - ('\u{119e0}', '\u{119e0}'), - ('\u{11a34}', '\u{11a34}'), - ('\u{11a47}', '\u{11a47}'), - ('\u{11a99}', '\u{11a99}'), - ('\u{11c3f}', '\u{11c3f}'), - ('\u{11d42}', '\u{11d42}'), - ('\u{11d44}', '\u{11d45}'), - ('\u{11d97}', '\u{11d97}'), - ('\u{11f41}', '\u{11f42}'), - ('\u{11f5a}', '\u{11f5a}'), - ('\u{13447}', '\u{13455}'), - ('\u{1612f}', '\u{1612f}'), - ('\u{16af0}', '\u{16af4}'), - ('\u{16b30}', '\u{16b36}'), - ('𖵫', '𖵬'), - ('\u{16f8f}', '𖾟'), - ('\u{16ff0}', '\u{16ff1}'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('\u{1d167}', '\u{1d169}'), - ('\u{1d16d}', '\u{1d172}'), - ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), - ('\u{1d1aa}', '\u{1d1ad}'), - ('𞀰', '𞁭'), - ('\u{1e130}', '\u{1e136}'), - ('\u{1e2ae}', '\u{1e2ae}'), - ('\u{1e2ec}', '\u{1e2ef}'), - ('\u{1e5ee}', '\u{1e5ef}'), - ('\u{1e8d0}', '\u{1e8d6}'), - ('\u{1e944}', '\u{1e946}'), - ('\u{1e948}', '\u{1e94a}'), -]; - -pub const EMOJI: &'static [(char, char)] = &[ - ('#', '#'), - ('*', '*'), - ('0', '9'), - ('©', '©'), - ('®', '®'), - ('‼', '‼'), - ('⁉', '⁉'), - ('™', '™'), - ('ℹ', 'ℹ'), - ('↔', '↙'), - ('↩', '↪'), - ('⌚', '⌛'), - ('⌨', '⌨'), - ('⏏', '⏏'), - ('⏩', '⏳'), - ('⏸', '⏺'), - ('Ⓜ', 'Ⓜ'), - ('▪', '▫'), - ('▶', '▶'), - ('◀', '◀'), - ('◻', '◾'), - ('☀', '☄'), - ('☎', '☎'), - ('☑', '☑'), - ('☔', '☕'), - ('☘', '☘'), - ('☝', '☝'), - ('☠', '☠'), - ('☢', '☣'), - ('☦', '☦'), - ('☪', '☪'), - ('☮', '☯'), - ('☸', '☺'), - ('♀', '♀'), - ('♂', '♂'), - ('♈', '♓'), - ('♟', '♠'), - ('♣', '♣'), - ('♥', '♦'), - ('♨', '♨'), - ('♻', '♻'), - ('♾', '♿'), - ('⚒', '⚗'), - ('⚙', '⚙'), - ('⚛', '⚜'), - ('⚠', '⚡'), - ('⚧', '⚧'), - ('⚪', '⚫'), - ('⚰', '⚱'), - ('⚽', '⚾'), - ('⛄', '⛅'), - ('⛈', '⛈'), - ('⛎', '⛏'), - ('⛑', '⛑'), - ('⛓', '⛔'), - ('⛩', '⛪'), - ('⛰', '⛵'), - ('⛷', '⛺'), - ('⛽', '⛽'), - ('✂', '✂'), - ('✅', '✅'), - ('✈', '✍'), - ('✏', '✏'), - ('✒', '✒'), - ('✔', '✔'), - ('✖', '✖'), - ('✝', '✝'), - ('✡', '✡'), - ('✨', '✨'), - ('✳', '✴'), - ('❄', '❄'), - ('❇', '❇'), - ('❌', '❌'), - ('❎', '❎'), - ('❓', '❕'), - ('❗', '❗'), - ('❣', '❤'), - ('➕', '➗'), - ('➡', '➡'), - ('➰', '➰'), - ('➿', '➿'), - ('⤴', '⤵'), - ('⬅', '⬇'), - ('⬛', '⬜'), - ('⭐', '⭐'), - ('⭕', '⭕'), - ('〰', '〰'), - ('〽', '〽'), - ('㊗', '㊗'), - ('㊙', '㊙'), - ('🀄', '🀄'), - ('🃏', '🃏'), - ('🅰', '🅱'), - ('🅾', '🅿'), - ('🆎', '🆎'), - ('🆑', '🆚'), - ('🇦', '🇿'), - ('🈁', '🈂'), - ('🈚', '🈚'), - ('🈯', '🈯'), - ('🈲', '🈺'), - ('🉐', '🉑'), - ('🌀', '🌡'), - ('🌤', '🎓'), - ('🎖', '🎗'), - ('🎙', '🎛'), - ('🎞', '🏰'), - ('🏳', '🏵'), - ('🏷', '📽'), - ('📿', '🔽'), - ('🕉', '🕎'), - ('🕐', '🕧'), - ('🕯', '🕰'), - ('🕳', '🕺'), - ('🖇', '🖇'), - ('🖊', '🖍'), - ('🖐', '🖐'), - ('🖕', '🖖'), - ('🖤', '🖥'), - ('🖨', '🖨'), - ('🖱', '🖲'), - ('🖼', '🖼'), - ('🗂', '🗄'), - ('🗑', '🗓'), - ('🗜', '🗞'), - ('🗡', '🗡'), - ('🗣', '🗣'), - ('🗨', '🗨'), - ('🗯', '🗯'), - ('🗳', '🗳'), - ('🗺', '🙏'), - ('🚀', '🛅'), - ('🛋', '🛒'), - ('🛕', '🛗'), - ('🛜', '🛥'), - ('🛩', '🛩'), - ('🛫', '🛬'), - ('🛰', '🛰'), - ('🛳', '🛼'), - ('🟠', '🟫'), - ('🟰', '🟰'), - ('🤌', '🤺'), - ('🤼', '🥅'), - ('🥇', '🧿'), - ('🩰', '🩼'), - ('🪀', '🪉'), - ('🪏', '🫆'), - ('🫎', '🫜'), - ('🫟', '🫩'), - ('🫰', '🫸'), -]; - -pub const EMOJI_COMPONENT: &'static [(char, char)] = &[ - ('#', '#'), - ('*', '*'), - ('0', '9'), - ('\u{200d}', '\u{200d}'), - ('\u{20e3}', '\u{20e3}'), - ('\u{fe0f}', '\u{fe0f}'), - ('🇦', '🇿'), - ('🏻', '🏿'), - ('🦰', '🦳'), - ('\u{e0020}', '\u{e007f}'), -]; - -pub const EMOJI_MODIFIER: &'static [(char, char)] = &[('🏻', '🏿')]; - -pub const EMOJI_MODIFIER_BASE: &'static [(char, char)] = &[ - ('☝', '☝'), - ('⛹', '⛹'), - ('✊', '✍'), - ('🎅', '🎅'), - ('🏂', '🏄'), - ('🏇', '🏇'), - ('🏊', '🏌'), - ('👂', '👃'), - ('👆', '👐'), - ('👦', '👸'), - ('👼', '👼'), - ('💁', '💃'), - ('💅', '💇'), - ('💏', '💏'), - ('💑', '💑'), - ('💪', '💪'), - ('🕴', '🕵'), - ('🕺', '🕺'), - ('🖐', '🖐'), - ('🖕', '🖖'), - ('🙅', '🙇'), - ('🙋', '🙏'), - ('🚣', '🚣'), - ('🚴', '🚶'), - ('🛀', '🛀'), - ('🛌', '🛌'), - ('🤌', '🤌'), - ('🤏', '🤏'), - ('🤘', '🤟'), - ('🤦', '🤦'), - ('🤰', '🤹'), - ('🤼', '🤾'), - ('🥷', '🥷'), - ('🦵', '🦶'), - ('🦸', '🦹'), - ('🦻', '🦻'), - ('🧍', '🧏'), - ('🧑', '🧝'), - ('🫃', '🫅'), - ('🫰', '🫸'), -]; - -pub const EMOJI_PRESENTATION: &'static [(char, char)] = &[ - ('⌚', '⌛'), - ('⏩', '⏬'), - ('⏰', '⏰'), - ('⏳', '⏳'), - ('◽', '◾'), - ('☔', '☕'), - ('♈', '♓'), - ('♿', '♿'), - ('⚓', '⚓'), - ('⚡', '⚡'), - ('⚪', '⚫'), - ('⚽', '⚾'), - ('⛄', '⛅'), - ('⛎', '⛎'), - ('⛔', '⛔'), - ('⛪', '⛪'), - ('⛲', '⛳'), - ('⛵', '⛵'), - ('⛺', '⛺'), - ('⛽', '⛽'), - ('✅', '✅'), - ('✊', '✋'), - ('✨', '✨'), - ('❌', '❌'), - ('❎', '❎'), - ('❓', '❕'), - ('❗', '❗'), - ('➕', '➗'), - ('➰', '➰'), - ('➿', '➿'), - ('⬛', '⬜'), - ('⭐', '⭐'), - ('⭕', '⭕'), - ('🀄', '🀄'), - ('🃏', '🃏'), - ('🆎', '🆎'), - ('🆑', '🆚'), - ('🇦', '🇿'), - ('🈁', '🈁'), - ('🈚', '🈚'), - ('🈯', '🈯'), - ('🈲', '🈶'), - ('🈸', '🈺'), - ('🉐', '🉑'), - ('🌀', '🌠'), - ('🌭', '🌵'), - ('🌷', '🍼'), - ('🍾', '🎓'), - ('🎠', '🏊'), - ('🏏', '🏓'), - ('🏠', '🏰'), - ('🏴', '🏴'), - ('🏸', '🐾'), - ('👀', '👀'), - ('👂', '📼'), - ('📿', '🔽'), - ('🕋', '🕎'), - ('🕐', '🕧'), - ('🕺', '🕺'), - ('🖕', '🖖'), - ('🖤', '🖤'), - ('🗻', '🙏'), - ('🚀', '🛅'), - ('🛌', '🛌'), - ('🛐', '🛒'), - ('🛕', '🛗'), - ('🛜', '🛟'), - ('🛫', '🛬'), - ('🛴', '🛼'), - ('🟠', '🟫'), - ('🟰', '🟰'), - ('🤌', '🤺'), - ('🤼', '🥅'), - ('🥇', '🧿'), - ('🩰', '🩼'), - ('🪀', '🪉'), - ('🪏', '🫆'), - ('🫎', '🫜'), - ('🫟', '🫩'), - ('🫰', '🫸'), -]; - -pub const EXTENDED_PICTOGRAPHIC: &'static [(char, char)] = &[ - ('©', '©'), - ('®', '®'), - ('‼', '‼'), - ('⁉', '⁉'), - ('™', '™'), - ('ℹ', 'ℹ'), - ('↔', '↙'), - ('↩', '↪'), - ('⌚', '⌛'), - ('⌨', '⌨'), - ('⎈', '⎈'), - ('⏏', '⏏'), - ('⏩', '⏳'), - ('⏸', '⏺'), - ('Ⓜ', 'Ⓜ'), - ('▪', '▫'), - ('▶', '▶'), - ('◀', '◀'), - ('◻', '◾'), - ('☀', '★'), - ('☇', '☒'), - ('☔', '⚅'), - ('⚐', '✅'), - ('✈', '✒'), - ('✔', '✔'), - ('✖', '✖'), - ('✝', '✝'), - ('✡', '✡'), - ('✨', '✨'), - ('✳', '✴'), - ('❄', '❄'), - ('❇', '❇'), - ('❌', '❌'), - ('❎', '❎'), - ('❓', '❕'), - ('❗', '❗'), - ('❣', '❧'), - ('➕', '➗'), - ('➡', '➡'), - ('➰', '➰'), - ('➿', '➿'), - ('⤴', '⤵'), - ('⬅', '⬇'), - ('⬛', '⬜'), - ('⭐', '⭐'), - ('⭕', '⭕'), - ('〰', '〰'), - ('〽', '〽'), - ('㊗', '㊗'), - ('㊙', '㊙'), - ('🀀', '\u{1f0ff}'), - ('🄍', '🄏'), - ('🄯', '🄯'), - ('🅬', '🅱'), - ('🅾', '🅿'), - ('🆎', '🆎'), - ('🆑', '🆚'), - ('🆭', '\u{1f1e5}'), - ('🈁', '\u{1f20f}'), - ('🈚', '🈚'), - ('🈯', '🈯'), - ('🈲', '🈺'), - ('\u{1f23c}', '\u{1f23f}'), - ('\u{1f249}', '🏺'), - ('🐀', '🔽'), - ('🕆', '🙏'), - ('🚀', '\u{1f6ff}'), - ('🝴', '🝿'), - ('🟕', '\u{1f7ff}'), - ('\u{1f80c}', '\u{1f80f}'), - ('\u{1f848}', '\u{1f84f}'), - ('\u{1f85a}', '\u{1f85f}'), - ('\u{1f888}', '\u{1f88f}'), - ('\u{1f8ae}', '\u{1f8ff}'), - ('🤌', '🤺'), - ('🤼', '🥅'), - ('🥇', '\u{1faff}'), - ('\u{1fc00}', '\u{1fffd}'), -]; - -pub const EXTENDER: &'static [(char, char)] = &[ - ('·', '·'), - ('ː', 'ˑ'), - ('ـ', 'ـ'), - ('ߺ', 'ߺ'), - ('\u{a71}', '\u{a71}'), - ('\u{afb}', '\u{afb}'), - ('\u{b55}', '\u{b55}'), - ('ๆ', 'ๆ'), - ('ໆ', 'ໆ'), - ('᠊', '᠊'), - ('ᡃ', 'ᡃ'), - ('ᪧ', 'ᪧ'), - ('\u{1c36}', '\u{1c36}'), - ('ᱻ', 'ᱻ'), - ('々', '々'), - ('〱', '〵'), - ('ゝ', 'ゞ'), - ('ー', 'ヾ'), - ('ꀕ', 'ꀕ'), - ('ꘌ', 'ꘌ'), - ('ꧏ', 'ꧏ'), - ('ꧦ', 'ꧦ'), - ('ꩰ', 'ꩰ'), - ('ꫝ', 'ꫝ'), - ('ꫳ', 'ꫴ'), - ('ー', 'ー'), - ('𐞁', '𐞂'), - ('𐵎', '𐵎'), - ('\u{10d6a}', '\u{10d6a}'), - ('𐵯', '𐵯'), - ('\u{11237}', '\u{11237}'), - ('𑍝', '𑍝'), - ('\u{113d2}', '𑏓'), - ('𑗆', '𑗈'), - ('\u{11a98}', '\u{11a98}'), - ('𖭂', '𖭃'), - ('𖿠', '𖿡'), - ('𖿣', '𖿣'), - ('𞄼', '𞄽'), - ('\u{1e5ef}', '\u{1e5ef}'), - ('\u{1e944}', '\u{1e946}'), -]; - -pub const GRAPHEME_BASE: &'static [(char, char)] = &[ - (' ', '~'), - ('\u{a0}', '¬'), - ('®', '˿'), - ('Ͱ', 'ͷ'), - ('ͺ', 'Ϳ'), - ('΄', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', '҂'), - ('Ҋ', 'ԯ'), - ('Ա', 'Ֆ'), - ('ՙ', '֊'), - ('֍', '֏'), - ('־', '־'), - ('׀', '׀'), - ('׃', '׃'), - ('׆', '׆'), - ('א', 'ת'), - ('ׯ', '״'), - ('؆', '؏'), - ('؛', '؛'), - ('؝', 'ي'), - ('٠', 'ٯ'), - ('ٱ', 'ە'), - ('۞', '۞'), - ('ۥ', 'ۦ'), - ('۩', '۩'), - ('ۮ', '܍'), - ('ܐ', 'ܐ'), - ('ܒ', 'ܯ'), - ('ݍ', 'ޥ'), - ('ޱ', 'ޱ'), - ('߀', 'ߪ'), - ('ߴ', 'ߺ'), - ('߾', 'ࠕ'), - ('ࠚ', 'ࠚ'), - ('ࠤ', 'ࠤ'), - ('ࠨ', 'ࠨ'), - ('࠰', '࠾'), - ('ࡀ', 'ࡘ'), - ('࡞', '࡞'), - ('ࡠ', 'ࡪ'), - ('ࡰ', 'ࢎ'), - ('ࢠ', 'ࣉ'), - ('ः', 'ह'), - ('ऻ', 'ऻ'), - ('ऽ', 'ी'), - ('ॉ', 'ौ'), - ('ॎ', 'ॐ'), - ('क़', 'ॡ'), - ('।', 'ঀ'), - ('ং', 'ঃ'), - ('অ', 'ঌ'), - ('এ', 'ঐ'), - ('ও', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('ঽ', 'ঽ'), - ('ি', 'ী'), - ('ে', 'ৈ'), - ('ো', 'ৌ'), - ('ৎ', 'ৎ'), - ('ড়', 'ঢ়'), - ('য়', 'ৡ'), - ('০', '৽'), - ('ਃ', 'ਃ'), - ('ਅ', 'ਊ'), - ('ਏ', 'ਐ'), - ('ਓ', 'ਨ'), - ('ਪ', 'ਰ'), - ('ਲ', 'ਲ਼'), - ('ਵ', 'ਸ਼'), - ('ਸ', 'ਹ'), - ('ਾ', 'ੀ'), - ('ਖ਼', 'ੜ'), - ('ਫ਼', 'ਫ਼'), - ('੦', '੯'), - ('ੲ', 'ੴ'), - ('੶', '੶'), - ('ઃ', 'ઃ'), - ('અ', 'ઍ'), - ('એ', 'ઑ'), - ('ઓ', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('ઽ', 'ી'), - ('ૉ', 'ૉ'), - ('ો', 'ૌ'), - ('ૐ', 'ૐ'), - ('ૠ', 'ૡ'), - ('૦', '૱'), - ('ૹ', 'ૹ'), - ('ଂ', 'ଃ'), - ('ଅ', 'ଌ'), - ('ଏ', 'ଐ'), - ('ଓ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଵ', 'ହ'), - ('ଽ', 'ଽ'), - ('ୀ', 'ୀ'), - ('େ', 'ୈ'), - ('ୋ', 'ୌ'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', 'ୡ'), - ('୦', '୷'), - ('ஃ', 'ஃ'), - ('அ', 'ஊ'), - ('எ', 'ஐ'), - ('ஒ', 'க'), - ('ங', 'ச'), - ('ஜ', 'ஜ'), - ('ஞ', 'ட'), - ('ண', 'த'), - ('ந', 'ப'), - ('ம', 'ஹ'), - ('ி', 'ி'), - ('ு', 'ூ'), - ('ெ', 'ை'), - ('ொ', 'ௌ'), - ('ௐ', 'ௐ'), - ('௦', '௺'), - ('ఁ', 'ః'), - ('అ', 'ఌ'), - ('ఎ', 'ఐ'), - ('ఒ', 'న'), - ('ప', 'హ'), - ('ఽ', 'ఽ'), - ('ు', 'ౄ'), - ('ౘ', 'ౚ'), - ('ౝ', 'ౝ'), - ('ౠ', 'ౡ'), - ('౦', '౯'), - ('౷', 'ಀ'), - ('ಂ', 'ಌ'), - ('ಎ', 'ಐ'), - ('ಒ', 'ನ'), - ('ಪ', 'ಳ'), - ('ವ', 'ಹ'), - ('ಽ', 'ಾ'), - ('ು', 'ು'), - ('ೃ', 'ೄ'), - ('ೝ', 'ೞ'), - ('ೠ', 'ೡ'), - ('೦', '೯'), - ('ೱ', 'ೳ'), - ('ം', 'ഌ'), - ('എ', 'ഐ'), - ('ഒ', 'ഺ'), - ('ഽ', 'ഽ'), - ('ി', 'ീ'), - ('െ', 'ൈ'), - ('ൊ', 'ൌ'), - ('ൎ', '൏'), - ('ൔ', 'ൖ'), - ('൘', 'ൡ'), - ('൦', 'ൿ'), - ('ං', 'ඃ'), - ('අ', 'ඖ'), - ('ක', 'න'), - ('ඳ', 'ර'), - ('ල', 'ල'), - ('ව', 'ෆ'), - ('ැ', 'ෑ'), - ('ෘ', 'ෞ'), - ('෦', '෯'), - ('ෲ', '෴'), - ('ก', 'ะ'), - ('า', 'ำ'), - ('฿', 'ๆ'), - ('๏', '๛'), - ('ກ', 'ຂ'), - ('ຄ', 'ຄ'), - ('ຆ', 'ຊ'), - ('ຌ', 'ຣ'), - ('ລ', 'ລ'), - ('ວ', 'ະ'), - ('າ', 'ຳ'), - ('ຽ', 'ຽ'), - ('ເ', 'ໄ'), - ('ໆ', 'ໆ'), - ('໐', '໙'), - ('ໜ', 'ໟ'), - ('ༀ', '༗'), - ('༚', '༴'), - ('༶', '༶'), - ('༸', '༸'), - ('༺', 'ཇ'), - ('ཉ', 'ཬ'), - ('ཿ', 'ཿ'), - ('྅', '྅'), - ('ྈ', 'ྌ'), - ('྾', '࿅'), - ('࿇', '࿌'), - ('࿎', '࿚'), - ('က', 'ာ'), - ('ေ', 'ေ'), - ('း', 'း'), - ('ျ', 'ြ'), - ('ဿ', 'ၗ'), - ('ၚ', 'ၝ'), - ('ၡ', 'ၰ'), - ('ၵ', 'ႁ'), - ('ႃ', 'ႄ'), - ('ႇ', 'ႌ'), - ('ႎ', 'ႜ'), - ('႞', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ა', 'ቈ'), - ('ቊ', 'ቍ'), - ('ቐ', 'ቖ'), - ('ቘ', 'ቘ'), - ('ቚ', 'ቝ'), - ('በ', 'ኈ'), - ('ኊ', 'ኍ'), - ('ነ', 'ኰ'), - ('ኲ', 'ኵ'), - ('ኸ', 'ኾ'), - ('ዀ', 'ዀ'), - ('ዂ', 'ዅ'), - ('ወ', 'ዖ'), - ('ዘ', 'ጐ'), - ('ጒ', 'ጕ'), - ('ጘ', 'ፚ'), - ('፠', '፼'), - ('ᎀ', '᎙'), - ('Ꭰ', 'Ᏽ'), - ('ᏸ', 'ᏽ'), - ('᐀', '᚜'), - ('ᚠ', 'ᛸ'), - ('ᜀ', 'ᜑ'), - ('ᜟ', 'ᜱ'), - ('᜵', '᜶'), - ('ᝀ', 'ᝑ'), - ('ᝠ', 'ᝬ'), - ('ᝮ', 'ᝰ'), - ('ក', 'ឳ'), - ('ា', 'ា'), - ('ើ', 'ៅ'), - ('ះ', 'ៈ'), - ('។', 'ៜ'), - ('០', '៩'), - ('៰', '៹'), - ('᠀', '᠊'), - ('᠐', '᠙'), - ('ᠠ', 'ᡸ'), - ('ᢀ', 'ᢄ'), - ('ᢇ', 'ᢨ'), - ('ᢪ', 'ᢪ'), - ('ᢰ', 'ᣵ'), - ('ᤀ', 'ᤞ'), - ('ᤣ', 'ᤦ'), - ('ᤩ', 'ᤫ'), - ('ᤰ', 'ᤱ'), - ('ᤳ', 'ᤸ'), - ('᥀', '᥀'), - ('᥄', 'ᥭ'), - ('ᥰ', 'ᥴ'), - ('ᦀ', 'ᦫ'), - ('ᦰ', 'ᧉ'), - ('᧐', '᧚'), - ('᧞', 'ᨖ'), - ('ᨙ', 'ᨚ'), - ('᨞', 'ᩕ'), - ('ᩗ', 'ᩗ'), - ('ᩡ', 'ᩡ'), - ('ᩣ', 'ᩤ'), - ('ᩭ', 'ᩲ'), - ('᪀', '᪉'), - ('᪐', '᪙'), - ('᪠', '᪭'), - ('ᬄ', 'ᬳ'), - ('ᬾ', 'ᭁ'), - ('ᭅ', 'ᭌ'), - ('᭎', '᭪'), - ('᭴', '᭿'), - ('ᮂ', 'ᮡ'), - ('ᮦ', 'ᮧ'), - ('ᮮ', 'ᯥ'), - ('ᯧ', 'ᯧ'), - ('ᯪ', 'ᯬ'), - ('ᯮ', 'ᯮ'), - ('᯼', 'ᰫ'), - ('ᰴ', 'ᰵ'), - ('᰻', '᱉'), - ('ᱍ', 'ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', '᳇'), - ('᳓', '᳓'), - ('᳡', '᳡'), - ('ᳩ', 'ᳬ'), - ('ᳮ', 'ᳳ'), - ('ᳵ', '᳷'), - ('ᳺ', 'ᳺ'), - ('ᴀ', 'ᶿ'), - ('Ḁ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ῄ'), - ('ῆ', 'ΐ'), - ('ῖ', 'Ί'), - ('῝', '`'), - ('ῲ', 'ῴ'), - ('ῶ', '῾'), - ('\u{2000}', '\u{200a}'), - ('‐', '‧'), - ('\u{202f}', '\u{205f}'), - ('⁰', 'ⁱ'), - ('⁴', '₎'), - ('ₐ', 'ₜ'), - ('₠', '⃀'), - ('℀', '↋'), - ('←', '␩'), - ('⑀', '⑊'), - ('①', '⭳'), - ('⭶', '⮕'), - ('⮗', 'ⳮ'), - ('Ⳳ', 'ⳳ'), - ('⳹', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ⴰ', 'ⵧ'), - ('ⵯ', '⵰'), - ('ⶀ', 'ⶖ'), - ('ⶠ', 'ⶦ'), - ('ⶨ', 'ⶮ'), - ('ⶰ', 'ⶶ'), - ('ⶸ', 'ⶾ'), - ('ⷀ', 'ⷆ'), - ('ⷈ', 'ⷎ'), - ('ⷐ', 'ⷖ'), - ('ⷘ', 'ⷞ'), - ('⸀', '⹝'), - ('⺀', '⺙'), - ('⺛', '⻳'), - ('⼀', '⿕'), - ('⿰', '〩'), - ('〰', '〿'), - ('ぁ', 'ゖ'), - ('゛', 'ヿ'), - ('ㄅ', 'ㄯ'), - ('ㄱ', 'ㆎ'), - ('㆐', '㇥'), - ('㇯', '㈞'), - ('㈠', 'ꒌ'), - ('꒐', '꓆'), - ('ꓐ', 'ꘫ'), - ('Ꙁ', 'ꙮ'), - ('꙳', '꙳'), - ('꙾', 'ꚝ'), - ('ꚠ', 'ꛯ'), - ('꛲', '꛷'), - ('꜀', 'ꟍ'), - ('Ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'Ƛ'), - ('ꟲ', 'ꠁ'), - ('ꠃ', 'ꠅ'), - ('ꠇ', 'ꠊ'), - ('ꠌ', 'ꠤ'), - ('ꠧ', '꠫'), - ('꠰', '꠹'), - ('ꡀ', '꡷'), - ('ꢀ', 'ꣃ'), - ('꣎', '꣙'), - ('ꣲ', 'ꣾ'), - ('꤀', 'ꤥ'), - ('꤮', 'ꥆ'), - ('ꥒ', 'ꥒ'), - ('꥟', 'ꥼ'), - ('ꦃ', 'ꦲ'), - ('ꦴ', 'ꦵ'), - ('ꦺ', 'ꦻ'), - ('ꦾ', 'ꦿ'), - ('꧁', '꧍'), - ('ꧏ', '꧙'), - ('꧞', 'ꧤ'), - ('ꧦ', 'ꧾ'), - ('ꨀ', 'ꨨ'), - ('ꨯ', 'ꨰ'), - ('ꨳ', 'ꨴ'), - ('ꩀ', 'ꩂ'), - ('ꩄ', 'ꩋ'), - ('ꩍ', 'ꩍ'), - ('꩐', '꩙'), - ('꩜', 'ꩻ'), - ('ꩽ', 'ꪯ'), - ('ꪱ', 'ꪱ'), - ('ꪵ', 'ꪶ'), - ('ꪹ', 'ꪽ'), - ('ꫀ', 'ꫀ'), - ('ꫂ', 'ꫂ'), - ('ꫛ', 'ꫫ'), - ('ꫮ', 'ꫵ'), - ('ꬁ', 'ꬆ'), - ('ꬉ', 'ꬎ'), - ('ꬑ', 'ꬖ'), - ('ꬠ', 'ꬦ'), - ('ꬨ', 'ꬮ'), - ('ꬰ', '꭫'), - ('ꭰ', 'ꯤ'), - ('ꯦ', 'ꯧ'), - ('ꯩ', '꯬'), - ('꯰', '꯹'), - ('가', '힣'), - ('ힰ', 'ퟆ'), - ('ퟋ', 'ퟻ'), - ('豈', '舘'), - ('並', '龎'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('יִ', 'יִ'), - ('ײַ', 'זּ'), - ('טּ', 'לּ'), - ('מּ', 'מּ'), - ('נּ', 'סּ'), - ('ףּ', 'פּ'), - ('צּ', '﯂'), - ('ﯓ', 'ﶏ'), - ('ﶒ', 'ﷇ'), - ('﷏', '﷏'), - ('ﷰ', '﷿'), - ('︐', '︙'), - ('︰', '﹒'), - ('﹔', '﹦'), - ('﹨', '﹫'), - ('ﹰ', 'ﹴ'), - ('ﹶ', 'ﻼ'), - ('!', 'ン'), - ('ᅠ', 'ᄒ'), - ('ᅡ', 'ᅦ'), - ('ᅧ', 'ᅬ'), - ('ᅭ', 'ᅲ'), - ('ᅳ', 'ᅵ'), - ('¢', '₩'), - ('│', '○'), - ('', '�'), - ('𐀀', '𐀋'), - ('𐀍', '𐀦'), - ('𐀨', '𐀺'), - ('𐀼', '𐀽'), - ('𐀿', '𐁍'), - ('𐁐', '𐁝'), - ('𐂀', '𐃺'), - ('𐄀', '𐄂'), - ('𐄇', '𐄳'), - ('𐄷', '𐆎'), - ('𐆐', '𐆜'), - ('𐆠', '𐆠'), - ('𐇐', '𐇼'), - ('𐊀', '𐊜'), - ('𐊠', '𐋐'), - ('𐋡', '𐋻'), - ('𐌀', '𐌣'), - ('𐌭', '𐍊'), - ('𐍐', '𐍵'), - ('𐎀', '𐎝'), - ('𐎟', '𐏃'), - ('𐏈', '𐏕'), - ('𐐀', '𐒝'), - ('𐒠', '𐒩'), - ('𐒰', '𐓓'), - ('𐓘', '𐓻'), - ('𐔀', '𐔧'), - ('𐔰', '𐕣'), - ('𐕯', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐗀', '𐗳'), - ('𐘀', '𐜶'), - ('𐝀', '𐝕'), - ('𐝠', '𐝧'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𐠀', '𐠅'), - ('𐠈', '𐠈'), - ('𐠊', '𐠵'), - ('𐠷', '𐠸'), - ('𐠼', '𐠼'), - ('𐠿', '𐡕'), - ('𐡗', '𐢞'), - ('𐢧', '𐢯'), - ('𐣠', '𐣲'), - ('𐣴', '𐣵'), - ('𐣻', '𐤛'), - ('𐤟', '𐤹'), - ('𐤿', '𐤿'), - ('𐦀', '𐦷'), - ('𐦼', '𐧏'), - ('𐧒', '𐨀'), - ('𐨐', '𐨓'), - ('𐨕', '𐨗'), - ('𐨙', '𐨵'), - ('𐩀', '𐩈'), - ('𐩐', '𐩘'), - ('𐩠', '𐪟'), - ('𐫀', '𐫤'), - ('𐫫', '𐫶'), - ('𐬀', '𐬵'), - ('𐬹', '𐭕'), - ('𐭘', '𐭲'), - ('𐭸', '𐮑'), - ('𐮙', '𐮜'), - ('𐮩', '𐮯'), - ('𐰀', '𐱈'), - ('𐲀', '𐲲'), - ('𐳀', '𐳲'), - ('𐳺', '𐴣'), - ('𐴰', '𐴹'), - ('𐵀', '𐵥'), - ('𐵮', '𐶅'), - ('𐶎', '𐶏'), - ('𐹠', '𐹾'), - ('𐺀', '𐺩'), - ('𐺭', '𐺭'), - ('𐺰', '𐺱'), - ('𐻂', '𐻄'), - ('𐼀', '𐼧'), - ('𐼰', '𐽅'), - ('𐽑', '𐽙'), - ('𐽰', '𐾁'), - ('𐾆', '𐾉'), - ('𐾰', '𐿋'), - ('𐿠', '𐿶'), - ('𑀀', '𑀀'), - ('𑀂', '𑀷'), - ('𑁇', '𑁍'), - ('𑁒', '𑁯'), - ('𑁱', '𑁲'), - ('𑁵', '𑁵'), - ('𑂂', '𑂲'), - ('𑂷', '𑂸'), - ('𑂻', '𑂼'), - ('𑂾', '𑃁'), - ('𑃐', '𑃨'), - ('𑃰', '𑃹'), - ('𑄃', '𑄦'), - ('𑄬', '𑄬'), - ('𑄶', '𑅇'), - ('𑅐', '𑅲'), - ('𑅴', '𑅶'), - ('𑆂', '𑆵'), - ('𑆿', '𑆿'), - ('𑇁', '𑇈'), - ('𑇍', '𑇎'), - ('𑇐', '𑇟'), - ('𑇡', '𑇴'), - ('𑈀', '𑈑'), - ('𑈓', '𑈮'), - ('𑈲', '𑈳'), - ('𑈸', '𑈽'), - ('𑈿', '𑉀'), - ('𑊀', '𑊆'), - ('𑊈', '𑊈'), - ('𑊊', '𑊍'), - ('𑊏', '𑊝'), - ('𑊟', '𑊩'), - ('𑊰', '𑋞'), - ('𑋠', '𑋢'), - ('𑋰', '𑋹'), - ('𑌂', '𑌃'), - ('𑌅', '𑌌'), - ('𑌏', '𑌐'), - ('𑌓', '𑌨'), - ('𑌪', '𑌰'), - ('𑌲', '𑌳'), - ('𑌵', '𑌹'), - ('𑌽', '𑌽'), - ('𑌿', '𑌿'), - ('𑍁', '𑍄'), - ('𑍇', '𑍈'), - ('𑍋', '𑍌'), - ('𑍐', '𑍐'), - ('𑍝', '𑍣'), - ('𑎀', '𑎉'), - ('𑎋', '𑎋'), - ('𑎎', '𑎎'), - ('𑎐', '𑎵'), - ('𑎷', '𑎷'), - ('𑎹', '𑎺'), - ('𑏊', '𑏊'), - ('𑏌', '𑏍'), - ('𑏑', '𑏑'), - ('𑏓', '𑏕'), - ('𑏗', '𑏘'), - ('𑐀', '𑐷'), - ('𑑀', '𑑁'), - ('𑑅', '𑑅'), - ('𑑇', '𑑛'), - ('𑑝', '𑑝'), - ('𑑟', '𑑡'), - ('𑒀', '𑒯'), - ('𑒱', '𑒲'), - ('𑒹', '𑒹'), - ('𑒻', '𑒼'), - ('𑒾', '𑒾'), - ('𑓁', '𑓁'), - ('𑓄', '𑓇'), - ('𑓐', '𑓙'), - ('𑖀', '𑖮'), - ('𑖰', '𑖱'), - ('𑖸', '𑖻'), - ('𑖾', '𑖾'), - ('𑗁', '𑗛'), - ('𑘀', '𑘲'), - ('𑘻', '𑘼'), - ('𑘾', '𑘾'), - ('𑙁', '𑙄'), - ('𑙐', '𑙙'), - ('𑙠', '𑙬'), - ('𑚀', '𑚪'), - ('𑚬', '𑚬'), - ('𑚮', '𑚯'), - ('𑚸', '𑚹'), - ('𑛀', '𑛉'), - ('𑛐', '𑛣'), - ('𑜀', '𑜚'), - ('𑜞', '𑜞'), - ('𑜠', '𑜡'), - ('𑜦', '𑜦'), - ('𑜰', '𑝆'), - ('𑠀', '𑠮'), - ('𑠸', '𑠸'), - ('𑠻', '𑠻'), - ('𑢠', '𑣲'), - ('𑣿', '𑤆'), - ('𑤉', '𑤉'), - ('𑤌', '𑤓'), - ('𑤕', '𑤖'), - ('𑤘', '𑤯'), - ('𑤱', '𑤵'), - ('𑤷', '𑤸'), - ('𑤿', '𑥂'), - ('𑥄', '𑥆'), - ('𑥐', '𑥙'), - ('𑦠', '𑦧'), - ('𑦪', '𑧓'), - ('𑧜', '𑧟'), - ('𑧡', '𑧤'), - ('𑨀', '𑨀'), - ('𑨋', '𑨲'), - ('𑨹', '𑨺'), - ('𑨿', '𑩆'), - ('𑩐', '𑩐'), - ('𑩗', '𑩘'), - ('𑩜', '𑪉'), - ('𑪗', '𑪗'), - ('𑪚', '𑪢'), - ('𑪰', '𑫸'), - ('𑬀', '𑬉'), - ('𑯀', '𑯡'), - ('𑯰', '𑯹'), - ('𑰀', '𑰈'), - ('𑰊', '𑰯'), - ('𑰾', '𑰾'), - ('𑱀', '𑱅'), - ('𑱐', '𑱬'), - ('𑱰', '𑲏'), - ('𑲩', '𑲩'), - ('𑲱', '𑲱'), - ('𑲴', '𑲴'), - ('𑴀', '𑴆'), - ('𑴈', '𑴉'), - ('𑴋', '𑴰'), - ('𑵆', '𑵆'), - ('𑵐', '𑵙'), - ('𑵠', '𑵥'), - ('𑵧', '𑵨'), - ('𑵪', '𑶎'), - ('𑶓', '𑶔'), - ('𑶖', '𑶖'), - ('𑶘', '𑶘'), - ('𑶠', '𑶩'), - ('𑻠', '𑻲'), - ('𑻵', '𑻸'), - ('𑼂', '𑼐'), - ('𑼒', '𑼵'), - ('𑼾', '𑼿'), - ('𑽃', '𑽙'), - ('𑾰', '𑾰'), - ('𑿀', '𑿱'), - ('𑿿', '𒎙'), - ('𒐀', '𒑮'), - ('𒑰', '𒑴'), - ('𒒀', '𒕃'), - ('𒾐', '𒿲'), - ('𓀀', '𓐯'), - ('𓑁', '𓑆'), - ('𓑠', '𔏺'), - ('𔐀', '𔙆'), - ('𖄀', '𖄝'), - ('𖄪', '𖄬'), - ('𖄰', '𖄹'), - ('𖠀', '𖨸'), - ('𖩀', '𖩞'), - ('𖩠', '𖩩'), - ('𖩮', '𖪾'), - ('𖫀', '𖫉'), - ('𖫐', '𖫭'), - ('𖫵', '𖫵'), - ('𖬀', '𖬯'), - ('𖬷', '𖭅'), - ('𖭐', '𖭙'), - ('𖭛', '𖭡'), - ('𖭣', '𖭷'), - ('𖭽', '𖮏'), - ('𖵀', '𖵹'), - ('𖹀', '𖺚'), - ('𖼀', '𖽊'), - ('𖽐', '𖾇'), - ('𖾓', '𖾟'), - ('𖿠', '𖿣'), - ('𗀀', '𘟷'), - ('𘠀', '𘳕'), - ('𘳿', '𘴈'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('𛀀', '𛄢'), - ('𛄲', '𛄲'), - ('𛅐', '𛅒'), - ('𛅕', '𛅕'), - ('𛅤', '𛅧'), - ('𛅰', '𛋻'), - ('𛰀', '𛱪'), - ('𛱰', '𛱼'), - ('𛲀', '𛲈'), - ('𛲐', '𛲙'), - ('𛲜', '𛲜'), - ('𛲟', '𛲟'), - ('𜰀', '𜳹'), - ('𜴀', '𜺳'), - ('𜽐', '𜿃'), - ('𝀀', '𝃵'), - ('𝄀', '𝄦'), - ('𝄩', '𝅘𝅥𝅲'), - ('𝅪', '𝅬'), - ('𝆃', '𝆄'), - ('𝆌', '𝆩'), - ('𝆮', '𝇪'), - ('𝈀', '𝉁'), - ('𝉅', '𝉅'), - ('𝋀', '𝋓'), - ('𝋠', '𝋳'), - ('𝌀', '𝍖'), - ('𝍠', '𝍸'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝟋'), - ('𝟎', '𝧿'), - ('𝨷', '𝨺'), - ('𝩭', '𝩴'), - ('𝩶', '𝪃'), - ('𝪅', '𝪋'), - ('𝼀', '𝼞'), - ('𝼥', '𝼪'), - ('𞀰', '𞁭'), - ('𞄀', '𞄬'), - ('𞄷', '𞄽'), - ('𞅀', '𞅉'), - ('𞅎', '𞅏'), - ('𞊐', '𞊭'), - ('𞋀', '𞋫'), - ('𞋰', '𞋹'), - ('𞋿', '𞋿'), - ('𞓐', '𞓫'), - ('𞓰', '𞓹'), - ('𞗐', '𞗭'), - ('𞗰', '𞗺'), - ('𞗿', '𞗿'), - ('𞟠', '𞟦'), - ('𞟨', '𞟫'), - ('𞟭', '𞟮'), - ('𞟰', '𞟾'), - ('𞠀', '𞣄'), - ('𞣇', '𞣏'), - ('𞤀', '𞥃'), - ('𞥋', '𞥋'), - ('𞥐', '𞥙'), - ('𞥞', '𞥟'), - ('𞱱', '𞲴'), - ('𞴁', '𞴽'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('𞻰', '𞻱'), - ('🀀', '🀫'), - ('🀰', '🂓'), - ('🂠', '🂮'), - ('🂱', '🂿'), - ('🃁', '🃏'), - ('🃑', '🃵'), - ('🄀', '🆭'), - ('🇦', '🈂'), - ('🈐', '🈻'), - ('🉀', '🉈'), - ('🉐', '🉑'), - ('🉠', '🉥'), - ('🌀', '🛗'), - ('🛜', '🛬'), - ('🛰', '🛼'), - ('🜀', '🝶'), - ('🝻', '🟙'), - ('🟠', '🟫'), - ('🟰', '🟰'), - ('🠀', '🠋'), - ('🠐', '🡇'), - ('🡐', '🡙'), - ('🡠', '🢇'), - ('🢐', '🢭'), - ('🢰', '🢻'), - ('🣀', '🣁'), - ('🤀', '🩓'), - ('🩠', '🩭'), - ('🩰', '🩼'), - ('🪀', '🪉'), - ('🪏', '🫆'), - ('🫎', '🫜'), - ('🫟', '🫩'), - ('🫰', '🫸'), - ('🬀', '🮒'), - ('🮔', '🯹'), - ('𠀀', '𪛟'), - ('𪜀', '𫜹'), - ('𫝀', '𫠝'), - ('𫠠', '𬺡'), - ('𬺰', '𮯠'), - ('𮯰', '𮹝'), - ('丽', '𪘀'), - ('𰀀', '𱍊'), - ('𱍐', '𲎯'), -]; - -pub const GRAPHEME_EXTEND: &'static [(char, char)] = &[ - ('\u{300}', '\u{36f}'), - ('\u{483}', '\u{489}'), - ('\u{591}', '\u{5bd}'), - ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), - ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), - ('\u{610}', '\u{61a}'), - ('\u{64b}', '\u{65f}'), - ('\u{670}', '\u{670}'), - ('\u{6d6}', '\u{6dc}'), - ('\u{6df}', '\u{6e4}'), - ('\u{6e7}', '\u{6e8}'), - ('\u{6ea}', '\u{6ed}'), - ('\u{711}', '\u{711}'), - ('\u{730}', '\u{74a}'), - ('\u{7a6}', '\u{7b0}'), - ('\u{7eb}', '\u{7f3}'), - ('\u{7fd}', '\u{7fd}'), - ('\u{816}', '\u{819}'), - ('\u{81b}', '\u{823}'), - ('\u{825}', '\u{827}'), - ('\u{829}', '\u{82d}'), - ('\u{859}', '\u{85b}'), - ('\u{897}', '\u{89f}'), - ('\u{8ca}', '\u{8e1}'), - ('\u{8e3}', '\u{902}'), - ('\u{93a}', '\u{93a}'), - ('\u{93c}', '\u{93c}'), - ('\u{941}', '\u{948}'), - ('\u{94d}', '\u{94d}'), - ('\u{951}', '\u{957}'), - ('\u{962}', '\u{963}'), - ('\u{981}', '\u{981}'), - ('\u{9bc}', '\u{9bc}'), - ('\u{9be}', '\u{9be}'), - ('\u{9c1}', '\u{9c4}'), - ('\u{9cd}', '\u{9cd}'), - ('\u{9d7}', '\u{9d7}'), - ('\u{9e2}', '\u{9e3}'), - ('\u{9fe}', '\u{9fe}'), - ('\u{a01}', '\u{a02}'), - ('\u{a3c}', '\u{a3c}'), - ('\u{a41}', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4d}'), - ('\u{a51}', '\u{a51}'), - ('\u{a70}', '\u{a71}'), - ('\u{a75}', '\u{a75}'), - ('\u{a81}', '\u{a82}'), - ('\u{abc}', '\u{abc}'), - ('\u{ac1}', '\u{ac5}'), - ('\u{ac7}', '\u{ac8}'), - ('\u{acd}', '\u{acd}'), - ('\u{ae2}', '\u{ae3}'), - ('\u{afa}', '\u{aff}'), - ('\u{b01}', '\u{b01}'), - ('\u{b3c}', '\u{b3c}'), - ('\u{b3e}', '\u{b3f}'), - ('\u{b41}', '\u{b44}'), - ('\u{b4d}', '\u{b4d}'), - ('\u{b55}', '\u{b57}'), - ('\u{b62}', '\u{b63}'), - ('\u{b82}', '\u{b82}'), - ('\u{bbe}', '\u{bbe}'), - ('\u{bc0}', '\u{bc0}'), - ('\u{bcd}', '\u{bcd}'), - ('\u{bd7}', '\u{bd7}'), - ('\u{c00}', '\u{c00}'), - ('\u{c04}', '\u{c04}'), - ('\u{c3c}', '\u{c3c}'), - ('\u{c3e}', '\u{c40}'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4d}'), - ('\u{c55}', '\u{c56}'), - ('\u{c62}', '\u{c63}'), - ('\u{c81}', '\u{c81}'), - ('\u{cbc}', '\u{cbc}'), - ('\u{cbf}', '\u{cc0}'), - ('\u{cc2}', '\u{cc2}'), - ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccd}'), - ('\u{cd5}', '\u{cd6}'), - ('\u{ce2}', '\u{ce3}'), - ('\u{d00}', '\u{d01}'), - ('\u{d3b}', '\u{d3c}'), - ('\u{d3e}', '\u{d3e}'), - ('\u{d41}', '\u{d44}'), - ('\u{d4d}', '\u{d4d}'), - ('\u{d57}', '\u{d57}'), - ('\u{d62}', '\u{d63}'), - ('\u{d81}', '\u{d81}'), - ('\u{dca}', '\u{dca}'), - ('\u{dcf}', '\u{dcf}'), - ('\u{dd2}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('\u{ddf}', '\u{ddf}'), - ('\u{e31}', '\u{e31}'), - ('\u{e34}', '\u{e3a}'), - ('\u{e47}', '\u{e4e}'), - ('\u{eb1}', '\u{eb1}'), - ('\u{eb4}', '\u{ebc}'), - ('\u{ec8}', '\u{ece}'), - ('\u{f18}', '\u{f19}'), - ('\u{f35}', '\u{f35}'), - ('\u{f37}', '\u{f37}'), - ('\u{f39}', '\u{f39}'), - ('\u{f71}', '\u{f7e}'), - ('\u{f80}', '\u{f84}'), - ('\u{f86}', '\u{f87}'), - ('\u{f8d}', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('\u{fc6}', '\u{fc6}'), - ('\u{102d}', '\u{1030}'), - ('\u{1032}', '\u{1037}'), - ('\u{1039}', '\u{103a}'), - ('\u{103d}', '\u{103e}'), - ('\u{1058}', '\u{1059}'), - ('\u{105e}', '\u{1060}'), - ('\u{1071}', '\u{1074}'), - ('\u{1082}', '\u{1082}'), - ('\u{1085}', '\u{1086}'), - ('\u{108d}', '\u{108d}'), - ('\u{109d}', '\u{109d}'), - ('\u{135d}', '\u{135f}'), - ('\u{1712}', '\u{1715}'), - ('\u{1732}', '\u{1734}'), - ('\u{1752}', '\u{1753}'), - ('\u{1772}', '\u{1773}'), - ('\u{17b4}', '\u{17b5}'), - ('\u{17b7}', '\u{17bd}'), - ('\u{17c6}', '\u{17c6}'), - ('\u{17c9}', '\u{17d3}'), - ('\u{17dd}', '\u{17dd}'), - ('\u{180b}', '\u{180d}'), - ('\u{180f}', '\u{180f}'), - ('\u{1885}', '\u{1886}'), - ('\u{18a9}', '\u{18a9}'), - ('\u{1920}', '\u{1922}'), - ('\u{1927}', '\u{1928}'), - ('\u{1932}', '\u{1932}'), - ('\u{1939}', '\u{193b}'), - ('\u{1a17}', '\u{1a18}'), - ('\u{1a1b}', '\u{1a1b}'), - ('\u{1a56}', '\u{1a56}'), - ('\u{1a58}', '\u{1a5e}'), - ('\u{1a60}', '\u{1a60}'), - ('\u{1a62}', '\u{1a62}'), - ('\u{1a65}', '\u{1a6c}'), - ('\u{1a73}', '\u{1a7c}'), - ('\u{1a7f}', '\u{1a7f}'), - ('\u{1ab0}', '\u{1ace}'), - ('\u{1b00}', '\u{1b03}'), - ('\u{1b34}', '\u{1b3d}'), - ('\u{1b42}', '\u{1b44}'), - ('\u{1b6b}', '\u{1b73}'), - ('\u{1b80}', '\u{1b81}'), - ('\u{1ba2}', '\u{1ba5}'), - ('\u{1ba8}', '\u{1bad}'), - ('\u{1be6}', '\u{1be6}'), - ('\u{1be8}', '\u{1be9}'), - ('\u{1bed}', '\u{1bed}'), - ('\u{1bef}', '\u{1bf3}'), - ('\u{1c2c}', '\u{1c33}'), - ('\u{1c36}', '\u{1c37}'), - ('\u{1cd0}', '\u{1cd2}'), - ('\u{1cd4}', '\u{1ce0}'), - ('\u{1ce2}', '\u{1ce8}'), - ('\u{1ced}', '\u{1ced}'), - ('\u{1cf4}', '\u{1cf4}'), - ('\u{1cf8}', '\u{1cf9}'), - ('\u{1dc0}', '\u{1dff}'), - ('\u{200c}', '\u{200c}'), - ('\u{20d0}', '\u{20f0}'), - ('\u{2cef}', '\u{2cf1}'), - ('\u{2d7f}', '\u{2d7f}'), - ('\u{2de0}', '\u{2dff}'), - ('\u{302a}', '\u{302f}'), - ('\u{3099}', '\u{309a}'), - ('\u{a66f}', '\u{a672}'), - ('\u{a674}', '\u{a67d}'), - ('\u{a69e}', '\u{a69f}'), - ('\u{a6f0}', '\u{a6f1}'), - ('\u{a802}', '\u{a802}'), - ('\u{a806}', '\u{a806}'), - ('\u{a80b}', '\u{a80b}'), - ('\u{a825}', '\u{a826}'), - ('\u{a82c}', '\u{a82c}'), - ('\u{a8c4}', '\u{a8c5}'), - ('\u{a8e0}', '\u{a8f1}'), - ('\u{a8ff}', '\u{a8ff}'), - ('\u{a926}', '\u{a92d}'), - ('\u{a947}', '\u{a951}'), - ('\u{a953}', '\u{a953}'), - ('\u{a980}', '\u{a982}'), - ('\u{a9b3}', '\u{a9b3}'), - ('\u{a9b6}', '\u{a9b9}'), - ('\u{a9bc}', '\u{a9bd}'), - ('\u{a9c0}', '\u{a9c0}'), - ('\u{a9e5}', '\u{a9e5}'), - ('\u{aa29}', '\u{aa2e}'), - ('\u{aa31}', '\u{aa32}'), - ('\u{aa35}', '\u{aa36}'), - ('\u{aa43}', '\u{aa43}'), - ('\u{aa4c}', '\u{aa4c}'), - ('\u{aa7c}', '\u{aa7c}'), - ('\u{aab0}', '\u{aab0}'), - ('\u{aab2}', '\u{aab4}'), - ('\u{aab7}', '\u{aab8}'), - ('\u{aabe}', '\u{aabf}'), - ('\u{aac1}', '\u{aac1}'), - ('\u{aaec}', '\u{aaed}'), - ('\u{aaf6}', '\u{aaf6}'), - ('\u{abe5}', '\u{abe5}'), - ('\u{abe8}', '\u{abe8}'), - ('\u{abed}', '\u{abed}'), - ('\u{fb1e}', '\u{fb1e}'), - ('\u{fe00}', '\u{fe0f}'), - ('\u{fe20}', '\u{fe2f}'), - ('\u{ff9e}', '\u{ff9f}'), - ('\u{101fd}', '\u{101fd}'), - ('\u{102e0}', '\u{102e0}'), - ('\u{10376}', '\u{1037a}'), - ('\u{10a01}', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '\u{10a0f}'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '\u{10a3f}'), - ('\u{10ae5}', '\u{10ae6}'), - ('\u{10d24}', '\u{10d27}'), - ('\u{10d69}', '\u{10d6d}'), - ('\u{10eab}', '\u{10eac}'), - ('\u{10efc}', '\u{10eff}'), - ('\u{10f46}', '\u{10f50}'), - ('\u{10f82}', '\u{10f85}'), - ('\u{11001}', '\u{11001}'), - ('\u{11038}', '\u{11046}'), - ('\u{11070}', '\u{11070}'), - ('\u{11073}', '\u{11074}'), - ('\u{1107f}', '\u{11081}'), - ('\u{110b3}', '\u{110b6}'), - ('\u{110b9}', '\u{110ba}'), - ('\u{110c2}', '\u{110c2}'), - ('\u{11100}', '\u{11102}'), - ('\u{11127}', '\u{1112b}'), - ('\u{1112d}', '\u{11134}'), - ('\u{11173}', '\u{11173}'), - ('\u{11180}', '\u{11181}'), - ('\u{111b6}', '\u{111be}'), - ('\u{111c0}', '\u{111c0}'), - ('\u{111c9}', '\u{111cc}'), - ('\u{111cf}', '\u{111cf}'), - ('\u{1122f}', '\u{11231}'), - ('\u{11234}', '\u{11237}'), - ('\u{1123e}', '\u{1123e}'), - ('\u{11241}', '\u{11241}'), - ('\u{112df}', '\u{112df}'), - ('\u{112e3}', '\u{112ea}'), - ('\u{11300}', '\u{11301}'), - ('\u{1133b}', '\u{1133c}'), - ('\u{1133e}', '\u{1133e}'), - ('\u{11340}', '\u{11340}'), - ('\u{1134d}', '\u{1134d}'), - ('\u{11357}', '\u{11357}'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), - ('\u{113b8}', '\u{113b8}'), - ('\u{113bb}', '\u{113c0}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '\u{113c9}'), - ('\u{113ce}', '\u{113d0}'), - ('\u{113d2}', '\u{113d2}'), - ('\u{113e1}', '\u{113e2}'), - ('\u{11438}', '\u{1143f}'), - ('\u{11442}', '\u{11444}'), - ('\u{11446}', '\u{11446}'), - ('\u{1145e}', '\u{1145e}'), - ('\u{114b0}', '\u{114b0}'), - ('\u{114b3}', '\u{114b8}'), - ('\u{114ba}', '\u{114ba}'), - ('\u{114bd}', '\u{114bd}'), - ('\u{114bf}', '\u{114c0}'), - ('\u{114c2}', '\u{114c3}'), - ('\u{115af}', '\u{115af}'), - ('\u{115b2}', '\u{115b5}'), - ('\u{115bc}', '\u{115bd}'), - ('\u{115bf}', '\u{115c0}'), - ('\u{115dc}', '\u{115dd}'), - ('\u{11633}', '\u{1163a}'), - ('\u{1163d}', '\u{1163d}'), - ('\u{1163f}', '\u{11640}'), - ('\u{116ab}', '\u{116ab}'), - ('\u{116ad}', '\u{116ad}'), - ('\u{116b0}', '\u{116b7}'), - ('\u{1171d}', '\u{1171d}'), - ('\u{1171f}', '\u{1171f}'), - ('\u{11722}', '\u{11725}'), - ('\u{11727}', '\u{1172b}'), - ('\u{1182f}', '\u{11837}'), - ('\u{11839}', '\u{1183a}'), - ('\u{11930}', '\u{11930}'), - ('\u{1193b}', '\u{1193e}'), - ('\u{11943}', '\u{11943}'), - ('\u{119d4}', '\u{119d7}'), - ('\u{119da}', '\u{119db}'), - ('\u{119e0}', '\u{119e0}'), - ('\u{11a01}', '\u{11a0a}'), - ('\u{11a33}', '\u{11a38}'), - ('\u{11a3b}', '\u{11a3e}'), - ('\u{11a47}', '\u{11a47}'), - ('\u{11a51}', '\u{11a56}'), - ('\u{11a59}', '\u{11a5b}'), - ('\u{11a8a}', '\u{11a96}'), - ('\u{11a98}', '\u{11a99}'), - ('\u{11c30}', '\u{11c36}'), - ('\u{11c38}', '\u{11c3d}'), - ('\u{11c3f}', '\u{11c3f}'), - ('\u{11c92}', '\u{11ca7}'), - ('\u{11caa}', '\u{11cb0}'), - ('\u{11cb2}', '\u{11cb3}'), - ('\u{11cb5}', '\u{11cb6}'), - ('\u{11d31}', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d45}'), - ('\u{11d47}', '\u{11d47}'), - ('\u{11d90}', '\u{11d91}'), - ('\u{11d95}', '\u{11d95}'), - ('\u{11d97}', '\u{11d97}'), - ('\u{11ef3}', '\u{11ef4}'), - ('\u{11f00}', '\u{11f01}'), - ('\u{11f36}', '\u{11f3a}'), - ('\u{11f40}', '\u{11f42}'), - ('\u{11f5a}', '\u{11f5a}'), - ('\u{13440}', '\u{13440}'), - ('\u{13447}', '\u{13455}'), - ('\u{1611e}', '\u{16129}'), - ('\u{1612d}', '\u{1612f}'), - ('\u{16af0}', '\u{16af4}'), - ('\u{16b30}', '\u{16b36}'), - ('\u{16f4f}', '\u{16f4f}'), - ('\u{16f8f}', '\u{16f92}'), - ('\u{16fe4}', '\u{16fe4}'), - ('\u{16ff0}', '\u{16ff1}'), - ('\u{1bc9d}', '\u{1bc9e}'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('\u{1d165}', '\u{1d169}'), - ('\u{1d16d}', '\u{1d172}'), - ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), - ('\u{1d1aa}', '\u{1d1ad}'), - ('\u{1d242}', '\u{1d244}'), - ('\u{1da00}', '\u{1da36}'), - ('\u{1da3b}', '\u{1da6c}'), - ('\u{1da75}', '\u{1da75}'), - ('\u{1da84}', '\u{1da84}'), - ('\u{1da9b}', '\u{1da9f}'), - ('\u{1daa1}', '\u{1daaf}'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), - ('\u{1e08f}', '\u{1e08f}'), - ('\u{1e130}', '\u{1e136}'), - ('\u{1e2ae}', '\u{1e2ae}'), - ('\u{1e2ec}', '\u{1e2ef}'), - ('\u{1e4ec}', '\u{1e4ef}'), - ('\u{1e5ee}', '\u{1e5ef}'), - ('\u{1e8d0}', '\u{1e8d6}'), - ('\u{1e944}', '\u{1e94a}'), - ('\u{e0020}', '\u{e007f}'), - ('\u{e0100}', '\u{e01ef}'), -]; - -pub const GRAPHEME_LINK: &'static [(char, char)] = &[ - ('\u{94d}', '\u{94d}'), - ('\u{9cd}', '\u{9cd}'), - ('\u{a4d}', '\u{a4d}'), - ('\u{acd}', '\u{acd}'), - ('\u{b4d}', '\u{b4d}'), - ('\u{bcd}', '\u{bcd}'), - ('\u{c4d}', '\u{c4d}'), - ('\u{ccd}', '\u{ccd}'), - ('\u{d3b}', '\u{d3c}'), - ('\u{d4d}', '\u{d4d}'), - ('\u{dca}', '\u{dca}'), - ('\u{e3a}', '\u{e3a}'), - ('\u{eba}', '\u{eba}'), - ('\u{f84}', '\u{f84}'), - ('\u{1039}', '\u{103a}'), - ('\u{1714}', '\u{1715}'), - ('\u{1734}', '\u{1734}'), - ('\u{17d2}', '\u{17d2}'), - ('\u{1a60}', '\u{1a60}'), - ('\u{1b44}', '\u{1b44}'), - ('\u{1baa}', '\u{1bab}'), - ('\u{1bf2}', '\u{1bf3}'), - ('\u{2d7f}', '\u{2d7f}'), - ('\u{a806}', '\u{a806}'), - ('\u{a82c}', '\u{a82c}'), - ('\u{a8c4}', '\u{a8c4}'), - ('\u{a953}', '\u{a953}'), - ('\u{a9c0}', '\u{a9c0}'), - ('\u{aaf6}', '\u{aaf6}'), - ('\u{abed}', '\u{abed}'), - ('\u{10a3f}', '\u{10a3f}'), - ('\u{11046}', '\u{11046}'), - ('\u{11070}', '\u{11070}'), - ('\u{1107f}', '\u{1107f}'), - ('\u{110b9}', '\u{110b9}'), - ('\u{11133}', '\u{11134}'), - ('\u{111c0}', '\u{111c0}'), - ('\u{11235}', '\u{11235}'), - ('\u{112ea}', '\u{112ea}'), - ('\u{1134d}', '\u{1134d}'), - ('\u{113ce}', '\u{113d0}'), - ('\u{11442}', '\u{11442}'), - ('\u{114c2}', '\u{114c2}'), - ('\u{115bf}', '\u{115bf}'), - ('\u{1163f}', '\u{1163f}'), - ('\u{116b6}', '\u{116b6}'), - ('\u{1172b}', '\u{1172b}'), - ('\u{11839}', '\u{11839}'), - ('\u{1193d}', '\u{1193e}'), - ('\u{119e0}', '\u{119e0}'), - ('\u{11a34}', '\u{11a34}'), - ('\u{11a47}', '\u{11a47}'), - ('\u{11a99}', '\u{11a99}'), - ('\u{11c3f}', '\u{11c3f}'), - ('\u{11d44}', '\u{11d45}'), - ('\u{11d97}', '\u{11d97}'), - ('\u{11f41}', '\u{11f42}'), - ('\u{1612f}', '\u{1612f}'), -]; - -pub const HEX_DIGIT: &'static [(char, char)] = &[ - ('0', '9'), - ('A', 'F'), - ('a', 'f'), - ('0', '9'), - ('A', 'F'), - ('a', 'f'), -]; - -pub const HYPHEN: &'static [(char, char)] = &[ - ('-', '-'), - ('\u{ad}', '\u{ad}'), - ('֊', '֊'), - ('᠆', '᠆'), - ('‐', '‑'), - ('⸗', '⸗'), - ('・', '・'), - ('﹣', '﹣'), - ('-', '-'), - ('・', '・'), -]; - -pub const IDS_BINARY_OPERATOR: &'static [(char, char)] = - &[('⿰', '⿱'), ('⿴', '⿽'), ('㇯', '㇯')]; - -pub const IDS_TRINARY_OPERATOR: &'static [(char, char)] = &[('⿲', '⿳')]; - -pub const IDS_UNARY_OPERATOR: &'static [(char, char)] = &[('⿾', '⿿')]; - -pub const ID_COMPAT_MATH_CONTINUE: &'static [(char, char)] = &[ - ('²', '³'), - ('¹', '¹'), - ('⁰', '⁰'), - ('⁴', '⁾'), - ('₀', '₎'), - ('∂', '∂'), - ('∇', '∇'), - ('∞', '∞'), - ('𝛁', '𝛁'), - ('𝛛', '𝛛'), - ('𝛻', '𝛻'), - ('𝜕', '𝜕'), - ('𝜵', '𝜵'), - ('𝝏', '𝝏'), - ('𝝯', '𝝯'), - ('𝞉', '𝞉'), - ('𝞩', '𝞩'), - ('𝟃', '𝟃'), -]; - -pub const ID_COMPAT_MATH_START: &'static [(char, char)] = &[ - ('∂', '∂'), - ('∇', '∇'), - ('∞', '∞'), - ('𝛁', '𝛁'), - ('𝛛', '𝛛'), - ('𝛻', '𝛻'), - ('𝜕', '𝜕'), - ('𝜵', '𝜵'), - ('𝝏', '𝝏'), - ('𝝯', '𝝯'), - ('𝞉', '𝞉'), - ('𝞩', '𝞩'), - ('𝟃', '𝟃'), -]; - -pub const ID_CONTINUE: &'static [(char, char)] = &[ - ('0', '9'), - ('A', 'Z'), - ('_', '_'), - ('a', 'z'), - ('ª', 'ª'), - ('µ', 'µ'), - ('·', '·'), - ('º', 'º'), - ('À', 'Ö'), - ('Ø', 'ö'), - ('ø', 'ˁ'), - ('ˆ', 'ˑ'), - ('ˠ', 'ˤ'), - ('ˬ', 'ˬ'), - ('ˮ', 'ˮ'), - ('\u{300}', 'ʹ'), - ('Ͷ', 'ͷ'), - ('ͺ', 'ͽ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', 'ϵ'), - ('Ϸ', 'ҁ'), - ('\u{483}', '\u{487}'), - ('Ҋ', 'ԯ'), - ('Ա', 'Ֆ'), - ('ՙ', 'ՙ'), - ('ՠ', 'ֈ'), - ('\u{591}', '\u{5bd}'), - ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), - ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), - ('א', 'ת'), - ('ׯ', 'ײ'), - ('\u{610}', '\u{61a}'), - ('ؠ', '٩'), - ('ٮ', 'ۓ'), - ('ە', '\u{6dc}'), - ('\u{6df}', '\u{6e8}'), - ('\u{6ea}', 'ۼ'), - ('ۿ', 'ۿ'), - ('ܐ', '\u{74a}'), - ('ݍ', 'ޱ'), - ('߀', 'ߵ'), - ('ߺ', 'ߺ'), - ('\u{7fd}', '\u{7fd}'), - ('ࠀ', '\u{82d}'), - ('ࡀ', '\u{85b}'), - ('ࡠ', 'ࡪ'), - ('ࡰ', 'ࢇ'), - ('ࢉ', 'ࢎ'), - ('\u{897}', '\u{8e1}'), - ('\u{8e3}', '\u{963}'), - ('०', '९'), - ('ॱ', 'ঃ'), - ('অ', 'ঌ'), - ('এ', 'ঐ'), - ('ও', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('\u{9bc}', '\u{9c4}'), - ('ে', 'ৈ'), - ('ো', 'ৎ'), - ('\u{9d7}', '\u{9d7}'), - ('ড়', 'ঢ়'), - ('য়', '\u{9e3}'), - ('০', 'ৱ'), - ('ৼ', 'ৼ'), - ('\u{9fe}', '\u{9fe}'), - ('\u{a01}', 'ਃ'), - ('ਅ', 'ਊ'), - ('ਏ', 'ਐ'), - ('ਓ', 'ਨ'), - ('ਪ', 'ਰ'), - ('ਲ', 'ਲ਼'), - ('ਵ', 'ਸ਼'), - ('ਸ', 'ਹ'), - ('\u{a3c}', '\u{a3c}'), - ('ਾ', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4d}'), - ('\u{a51}', '\u{a51}'), - ('ਖ਼', 'ੜ'), - ('ਫ਼', 'ਫ਼'), - ('੦', '\u{a75}'), - ('\u{a81}', 'ઃ'), - ('અ', 'ઍ'), - ('એ', 'ઑ'), - ('ઓ', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('\u{abc}', '\u{ac5}'), - ('\u{ac7}', 'ૉ'), - ('ો', '\u{acd}'), - ('ૐ', 'ૐ'), - ('ૠ', '\u{ae3}'), - ('૦', '૯'), - ('ૹ', '\u{aff}'), - ('\u{b01}', 'ଃ'), - ('ଅ', 'ଌ'), - ('ଏ', 'ଐ'), - ('ଓ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଵ', 'ହ'), - ('\u{b3c}', '\u{b44}'), - ('େ', 'ୈ'), - ('ୋ', '\u{b4d}'), - ('\u{b55}', '\u{b57}'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', '\u{b63}'), - ('୦', '୯'), - ('ୱ', 'ୱ'), - ('\u{b82}', 'ஃ'), - ('அ', 'ஊ'), - ('எ', 'ஐ'), - ('ஒ', 'க'), - ('ங', 'ச'), - ('ஜ', 'ஜ'), - ('ஞ', 'ட'), - ('ண', 'த'), - ('ந', 'ப'), - ('ம', 'ஹ'), - ('\u{bbe}', 'ூ'), - ('ெ', 'ை'), - ('ொ', '\u{bcd}'), - ('ௐ', 'ௐ'), - ('\u{bd7}', '\u{bd7}'), - ('௦', '௯'), - ('\u{c00}', 'ఌ'), - ('ఎ', 'ఐ'), - ('ఒ', 'న'), - ('ప', 'హ'), - ('\u{c3c}', 'ౄ'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4d}'), - ('\u{c55}', '\u{c56}'), - ('ౘ', 'ౚ'), - ('ౝ', 'ౝ'), - ('ౠ', '\u{c63}'), - ('౦', '౯'), - ('ಀ', 'ಃ'), - ('ಅ', 'ಌ'), - ('ಎ', 'ಐ'), - ('ಒ', 'ನ'), - ('ಪ', 'ಳ'), - ('ವ', 'ಹ'), - ('\u{cbc}', 'ೄ'), - ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccd}'), - ('\u{cd5}', '\u{cd6}'), - ('ೝ', 'ೞ'), - ('ೠ', '\u{ce3}'), - ('೦', '೯'), - ('ೱ', 'ೳ'), - ('\u{d00}', 'ഌ'), - ('എ', 'ഐ'), - ('ഒ', '\u{d44}'), - ('െ', 'ൈ'), - ('ൊ', 'ൎ'), - ('ൔ', '\u{d57}'), - ('ൟ', '\u{d63}'), - ('൦', '൯'), - ('ൺ', 'ൿ'), - ('\u{d81}', 'ඃ'), - ('අ', 'ඖ'), - ('ක', 'න'), - ('ඳ', 'ර'), - ('ල', 'ල'), - ('ව', 'ෆ'), - ('\u{dca}', '\u{dca}'), - ('\u{dcf}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('ෘ', '\u{ddf}'), - ('෦', '෯'), - ('ෲ', 'ෳ'), - ('ก', '\u{e3a}'), - ('เ', '\u{e4e}'), - ('๐', '๙'), - ('ກ', 'ຂ'), - ('ຄ', 'ຄ'), - ('ຆ', 'ຊ'), - ('ຌ', 'ຣ'), - ('ລ', 'ລ'), - ('ວ', 'ຽ'), - ('ເ', 'ໄ'), - ('ໆ', 'ໆ'), - ('\u{ec8}', '\u{ece}'), - ('໐', '໙'), - ('ໜ', 'ໟ'), - ('ༀ', 'ༀ'), - ('\u{f18}', '\u{f19}'), - ('༠', '༩'), - ('\u{f35}', '\u{f35}'), - ('\u{f37}', '\u{f37}'), - ('\u{f39}', '\u{f39}'), - ('༾', 'ཇ'), - ('ཉ', 'ཬ'), - ('\u{f71}', '\u{f84}'), - ('\u{f86}', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('\u{fc6}', '\u{fc6}'), - ('က', '၉'), - ('ၐ', '\u{109d}'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ა', 'ჺ'), - ('ჼ', 'ቈ'), - ('ቊ', 'ቍ'), - ('ቐ', 'ቖ'), - ('ቘ', 'ቘ'), - ('ቚ', 'ቝ'), - ('በ', 'ኈ'), - ('ኊ', 'ኍ'), - ('ነ', 'ኰ'), - ('ኲ', 'ኵ'), - ('ኸ', 'ኾ'), - ('ዀ', 'ዀ'), - ('ዂ', 'ዅ'), - ('ወ', 'ዖ'), - ('ዘ', 'ጐ'), - ('ጒ', 'ጕ'), - ('ጘ', 'ፚ'), - ('\u{135d}', '\u{135f}'), - ('፩', '፱'), - ('ᎀ', 'ᎏ'), - ('Ꭰ', 'Ᏽ'), - ('ᏸ', 'ᏽ'), - ('ᐁ', 'ᙬ'), - ('ᙯ', 'ᙿ'), - ('ᚁ', 'ᚚ'), - ('ᚠ', 'ᛪ'), - ('ᛮ', 'ᛸ'), - ('ᜀ', '\u{1715}'), - ('ᜟ', '\u{1734}'), - ('ᝀ', '\u{1753}'), - ('ᝠ', 'ᝬ'), - ('ᝮ', 'ᝰ'), - ('\u{1772}', '\u{1773}'), - ('ក', '\u{17d3}'), - ('ៗ', 'ៗ'), - ('ៜ', '\u{17dd}'), - ('០', '៩'), - ('\u{180b}', '\u{180d}'), - ('\u{180f}', '᠙'), - ('ᠠ', 'ᡸ'), - ('ᢀ', 'ᢪ'), - ('ᢰ', 'ᣵ'), - ('ᤀ', 'ᤞ'), - ('\u{1920}', 'ᤫ'), - ('ᤰ', '\u{193b}'), - ('᥆', 'ᥭ'), - ('ᥰ', 'ᥴ'), - ('ᦀ', 'ᦫ'), - ('ᦰ', 'ᧉ'), - ('᧐', '᧚'), - ('ᨀ', '\u{1a1b}'), - ('ᨠ', '\u{1a5e}'), - ('\u{1a60}', '\u{1a7c}'), - ('\u{1a7f}', '᪉'), - ('᪐', '᪙'), - ('ᪧ', 'ᪧ'), - ('\u{1ab0}', '\u{1abd}'), - ('\u{1abf}', '\u{1ace}'), - ('\u{1b00}', 'ᭌ'), - ('᭐', '᭙'), - ('\u{1b6b}', '\u{1b73}'), - ('\u{1b80}', '\u{1bf3}'), - ('ᰀ', '\u{1c37}'), - ('᱀', '᱉'), - ('ᱍ', 'ᱽ'), - ('ᲀ', 'ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('\u{1cd0}', '\u{1cd2}'), - ('\u{1cd4}', 'ᳺ'), - ('ᴀ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ᾼ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῌ'), - ('ῐ', 'ΐ'), - ('ῖ', 'Ί'), - ('ῠ', 'Ῥ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῼ'), - ('\u{200c}', '\u{200d}'), - ('‿', '⁀'), - ('⁔', '⁔'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('\u{20d0}', '\u{20dc}'), - ('\u{20e1}', '\u{20e1}'), - ('\u{20e5}', '\u{20f0}'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℊ', 'ℓ'), - ('ℕ', 'ℕ'), - ('℘', 'ℝ'), - ('ℤ', 'ℤ'), - ('Ω', 'Ω'), - ('ℨ', 'ℨ'), - ('K', 'ℹ'), - ('ℼ', 'ℿ'), - ('ⅅ', 'ⅉ'), - ('ⅎ', 'ⅎ'), - ('Ⅰ', 'ↈ'), - ('Ⰰ', 'ⳤ'), - ('Ⳬ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ⴰ', 'ⵧ'), - ('ⵯ', 'ⵯ'), - ('\u{2d7f}', 'ⶖ'), - ('ⶠ', 'ⶦ'), - ('ⶨ', 'ⶮ'), - ('ⶰ', 'ⶶ'), - ('ⶸ', 'ⶾ'), - ('ⷀ', 'ⷆ'), - ('ⷈ', 'ⷎ'), - ('ⷐ', 'ⷖ'), - ('ⷘ', 'ⷞ'), - ('\u{2de0}', '\u{2dff}'), - ('々', '〇'), - ('〡', '\u{302f}'), - ('〱', '〵'), - ('〸', '〼'), - ('ぁ', 'ゖ'), - ('\u{3099}', 'ゟ'), - ('ァ', 'ヿ'), - ('ㄅ', 'ㄯ'), - ('ㄱ', 'ㆎ'), - ('ㆠ', 'ㆿ'), - ('ㇰ', 'ㇿ'), - ('㐀', '䶿'), - ('一', 'ꒌ'), - ('ꓐ', 'ꓽ'), - ('ꔀ', 'ꘌ'), - ('ꘐ', 'ꘫ'), - ('Ꙁ', '\u{a66f}'), - ('\u{a674}', '\u{a67d}'), - ('ꙿ', '\u{a6f1}'), - ('ꜗ', 'ꜟ'), - ('Ꜣ', 'ꞈ'), - ('Ꞌ', 'ꟍ'), - ('Ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'Ƛ'), - ('ꟲ', 'ꠧ'), - ('\u{a82c}', '\u{a82c}'), - ('ꡀ', 'ꡳ'), - ('ꢀ', '\u{a8c5}'), - ('꣐', '꣙'), - ('\u{a8e0}', 'ꣷ'), - ('ꣻ', 'ꣻ'), - ('ꣽ', '\u{a92d}'), - ('ꤰ', '\u{a953}'), - ('ꥠ', 'ꥼ'), - ('\u{a980}', '\u{a9c0}'), - ('ꧏ', '꧙'), - ('ꧠ', 'ꧾ'), - ('ꨀ', '\u{aa36}'), - ('ꩀ', 'ꩍ'), - ('꩐', '꩙'), - ('ꩠ', 'ꩶ'), - ('ꩺ', 'ꫂ'), - ('ꫛ', 'ꫝ'), - ('ꫠ', 'ꫯ'), - ('ꫲ', '\u{aaf6}'), - ('ꬁ', 'ꬆ'), - ('ꬉ', 'ꬎ'), - ('ꬑ', 'ꬖ'), - ('ꬠ', 'ꬦ'), - ('ꬨ', 'ꬮ'), - ('ꬰ', 'ꭚ'), - ('ꭜ', 'ꭩ'), - ('ꭰ', 'ꯪ'), - ('꯬', '\u{abed}'), - ('꯰', '꯹'), - ('가', '힣'), - ('ힰ', 'ퟆ'), - ('ퟋ', 'ퟻ'), - ('豈', '舘'), - ('並', '龎'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('יִ', 'ﬨ'), - ('שׁ', 'זּ'), - ('טּ', 'לּ'), - ('מּ', 'מּ'), - ('נּ', 'סּ'), - ('ףּ', 'פּ'), - ('צּ', 'ﮱ'), - ('ﯓ', 'ﴽ'), - ('ﵐ', 'ﶏ'), - ('ﶒ', 'ﷇ'), - ('ﷰ', 'ﷻ'), - ('\u{fe00}', '\u{fe0f}'), - ('\u{fe20}', '\u{fe2f}'), - ('︳', '︴'), - ('﹍', '﹏'), - ('ﹰ', 'ﹴ'), - ('ﹶ', 'ﻼ'), - ('0', '9'), - ('A', 'Z'), - ('_', '_'), - ('a', 'z'), - ('・', 'ᄒ'), - ('ᅡ', 'ᅦ'), - ('ᅧ', 'ᅬ'), - ('ᅭ', 'ᅲ'), - ('ᅳ', 'ᅵ'), - ('𐀀', '𐀋'), - ('𐀍', '𐀦'), - ('𐀨', '𐀺'), - ('𐀼', '𐀽'), - ('𐀿', '𐁍'), - ('𐁐', '𐁝'), - ('𐂀', '𐃺'), - ('𐅀', '𐅴'), - ('\u{101fd}', '\u{101fd}'), - ('𐊀', '𐊜'), - ('𐊠', '𐋐'), - ('\u{102e0}', '\u{102e0}'), - ('𐌀', '𐌟'), - ('𐌭', '𐍊'), - ('𐍐', '\u{1037a}'), - ('𐎀', '𐎝'), - ('𐎠', '𐏃'), - ('𐏈', '𐏏'), - ('𐏑', '𐏕'), - ('𐐀', '𐒝'), - ('𐒠', '𐒩'), - ('𐒰', '𐓓'), - ('𐓘', '𐓻'), - ('𐔀', '𐔧'), - ('𐔰', '𐕣'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐗀', '𐗳'), - ('𐘀', '𐜶'), - ('𐝀', '𐝕'), - ('𐝠', '𐝧'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𐠀', '𐠅'), - ('𐠈', '𐠈'), - ('𐠊', '𐠵'), - ('𐠷', '𐠸'), - ('𐠼', '𐠼'), - ('𐠿', '𐡕'), - ('𐡠', '𐡶'), - ('𐢀', '𐢞'), - ('𐣠', '𐣲'), - ('𐣴', '𐣵'), - ('𐤀', '𐤕'), - ('𐤠', '𐤹'), - ('𐦀', '𐦷'), - ('𐦾', '𐦿'), - ('𐨀', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '𐨓'), - ('𐨕', '𐨗'), - ('𐨙', '𐨵'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '\u{10a3f}'), - ('𐩠', '𐩼'), - ('𐪀', '𐪜'), - ('𐫀', '𐫇'), - ('𐫉', '\u{10ae6}'), - ('𐬀', '𐬵'), - ('𐭀', '𐭕'), - ('𐭠', '𐭲'), - ('𐮀', '𐮑'), - ('𐰀', '𐱈'), - ('𐲀', '𐲲'), - ('𐳀', '𐳲'), - ('𐴀', '\u{10d27}'), - ('𐴰', '𐴹'), - ('𐵀', '𐵥'), - ('\u{10d69}', '\u{10d6d}'), - ('𐵯', '𐶅'), - ('𐺀', '𐺩'), - ('\u{10eab}', '\u{10eac}'), - ('𐺰', '𐺱'), - ('𐻂', '𐻄'), - ('\u{10efc}', '𐼜'), - ('𐼧', '𐼧'), - ('𐼰', '\u{10f50}'), - ('𐽰', '\u{10f85}'), - ('𐾰', '𐿄'), - ('𐿠', '𐿶'), - ('𑀀', '\u{11046}'), - ('𑁦', '𑁵'), - ('\u{1107f}', '\u{110ba}'), - ('\u{110c2}', '\u{110c2}'), - ('𑃐', '𑃨'), - ('𑃰', '𑃹'), - ('\u{11100}', '\u{11134}'), - ('𑄶', '𑄿'), - ('𑅄', '𑅇'), - ('𑅐', '\u{11173}'), - ('𑅶', '𑅶'), - ('\u{11180}', '𑇄'), - ('\u{111c9}', '\u{111cc}'), - ('𑇎', '𑇚'), - ('𑇜', '𑇜'), - ('𑈀', '𑈑'), - ('𑈓', '\u{11237}'), - ('\u{1123e}', '\u{11241}'), - ('𑊀', '𑊆'), - ('𑊈', '𑊈'), - ('𑊊', '𑊍'), - ('𑊏', '𑊝'), - ('𑊟', '𑊨'), - ('𑊰', '\u{112ea}'), - ('𑋰', '𑋹'), - ('\u{11300}', '𑌃'), - ('𑌅', '𑌌'), - ('𑌏', '𑌐'), - ('𑌓', '𑌨'), - ('𑌪', '𑌰'), - ('𑌲', '𑌳'), - ('𑌵', '𑌹'), - ('\u{1133b}', '𑍄'), - ('𑍇', '𑍈'), - ('𑍋', '\u{1134d}'), - ('𑍐', '𑍐'), - ('\u{11357}', '\u{11357}'), - ('𑍝', '𑍣'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), - ('𑎀', '𑎉'), - ('𑎋', '𑎋'), - ('𑎎', '𑎎'), - ('𑎐', '𑎵'), - ('𑎷', '\u{113c0}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '𑏊'), - ('𑏌', '𑏓'), - ('\u{113e1}', '\u{113e2}'), - ('𑐀', '𑑊'), - ('𑑐', '𑑙'), - ('\u{1145e}', '𑑡'), - ('𑒀', '𑓅'), - ('𑓇', '𑓇'), - ('𑓐', '𑓙'), - ('𑖀', '\u{115b5}'), - ('𑖸', '\u{115c0}'), - ('𑗘', '\u{115dd}'), - ('𑘀', '\u{11640}'), - ('𑙄', '𑙄'), - ('𑙐', '𑙙'), - ('𑚀', '𑚸'), - ('𑛀', '𑛉'), - ('𑛐', '𑛣'), - ('𑜀', '𑜚'), - ('\u{1171d}', '\u{1172b}'), - ('𑜰', '𑜹'), - ('𑝀', '𑝆'), - ('𑠀', '\u{1183a}'), - ('𑢠', '𑣩'), - ('𑣿', '𑤆'), - ('𑤉', '𑤉'), - ('𑤌', '𑤓'), - ('𑤕', '𑤖'), - ('𑤘', '𑤵'), - ('𑤷', '𑤸'), - ('\u{1193b}', '\u{11943}'), - ('𑥐', '𑥙'), - ('𑦠', '𑦧'), - ('𑦪', '\u{119d7}'), - ('\u{119da}', '𑧡'), - ('𑧣', '𑧤'), - ('𑨀', '\u{11a3e}'), - ('\u{11a47}', '\u{11a47}'), - ('𑩐', '\u{11a99}'), - ('𑪝', '𑪝'), - ('𑪰', '𑫸'), - ('𑯀', '𑯠'), - ('𑯰', '𑯹'), - ('𑰀', '𑰈'), - ('𑰊', '\u{11c36}'), - ('\u{11c38}', '𑱀'), - ('𑱐', '𑱙'), - ('𑱲', '𑲏'), - ('\u{11c92}', '\u{11ca7}'), - ('𑲩', '\u{11cb6}'), - ('𑴀', '𑴆'), - ('𑴈', '𑴉'), - ('𑴋', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d47}'), - ('𑵐', '𑵙'), - ('𑵠', '𑵥'), - ('𑵧', '𑵨'), - ('𑵪', '𑶎'), - ('\u{11d90}', '\u{11d91}'), - ('𑶓', '𑶘'), - ('𑶠', '𑶩'), - ('𑻠', '𑻶'), - ('\u{11f00}', '𑼐'), - ('𑼒', '\u{11f3a}'), - ('𑼾', '\u{11f42}'), - ('𑽐', '\u{11f5a}'), - ('𑾰', '𑾰'), - ('𒀀', '𒎙'), - ('𒐀', '𒑮'), - ('𒒀', '𒕃'), - ('𒾐', '𒿰'), - ('𓀀', '𓐯'), - ('\u{13440}', '\u{13455}'), - ('𓑠', '𔏺'), - ('𔐀', '𔙆'), - ('𖄀', '𖄹'), - ('𖠀', '𖨸'), - ('𖩀', '𖩞'), - ('𖩠', '𖩩'), - ('𖩰', '𖪾'), - ('𖫀', '𖫉'), - ('𖫐', '𖫭'), - ('\u{16af0}', '\u{16af4}'), - ('𖬀', '\u{16b36}'), - ('𖭀', '𖭃'), - ('𖭐', '𖭙'), - ('𖭣', '𖭷'), - ('𖭽', '𖮏'), - ('𖵀', '𖵬'), - ('𖵰', '𖵹'), - ('𖹀', '𖹿'), - ('𖼀', '𖽊'), - ('\u{16f4f}', '𖾇'), - ('\u{16f8f}', '𖾟'), - ('𖿠', '𖿡'), - ('𖿣', '\u{16fe4}'), - ('\u{16ff0}', '\u{16ff1}'), - ('𗀀', '𘟷'), - ('𘠀', '𘳕'), - ('𘳿', '𘴈'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('𛀀', '𛄢'), - ('𛄲', '𛄲'), - ('𛅐', '𛅒'), - ('𛅕', '𛅕'), - ('𛅤', '𛅧'), - ('𛅰', '𛋻'), - ('𛰀', '𛱪'), - ('𛱰', '𛱼'), - ('𛲀', '𛲈'), - ('𛲐', '𛲙'), - ('\u{1bc9d}', '\u{1bc9e}'), - ('𜳰', '𜳹'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('\u{1d165}', '\u{1d169}'), - ('\u{1d16d}', '\u{1d172}'), - ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), - ('\u{1d1aa}', '\u{1d1ad}'), - ('\u{1d242}', '\u{1d244}'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝛀'), - ('𝛂', '𝛚'), - ('𝛜', '𝛺'), - ('𝛼', '𝜔'), - ('𝜖', '𝜴'), - ('𝜶', '𝝎'), - ('𝝐', '𝝮'), - ('𝝰', '𝞈'), - ('𝞊', '𝞨'), - ('𝞪', '𝟂'), - ('𝟄', '𝟋'), - ('𝟎', '𝟿'), - ('\u{1da00}', '\u{1da36}'), - ('\u{1da3b}', '\u{1da6c}'), - ('\u{1da75}', '\u{1da75}'), - ('\u{1da84}', '\u{1da84}'), - ('\u{1da9b}', '\u{1da9f}'), - ('\u{1daa1}', '\u{1daaf}'), - ('𝼀', '𝼞'), - ('𝼥', '𝼪'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), - ('𞀰', '𞁭'), - ('\u{1e08f}', '\u{1e08f}'), - ('𞄀', '𞄬'), - ('\u{1e130}', '𞄽'), - ('𞅀', '𞅉'), - ('𞅎', '𞅎'), - ('𞊐', '\u{1e2ae}'), - ('𞋀', '𞋹'), - ('𞓐', '𞓹'), - ('𞗐', '𞗺'), - ('𞟠', '𞟦'), - ('𞟨', '𞟫'), - ('𞟭', '𞟮'), - ('𞟰', '𞟾'), - ('𞠀', '𞣄'), - ('\u{1e8d0}', '\u{1e8d6}'), - ('𞤀', '𞥋'), - ('𞥐', '𞥙'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('🯰', '🯹'), - ('𠀀', '𪛟'), - ('𪜀', '𫜹'), - ('𫝀', '𫠝'), - ('𫠠', '𬺡'), - ('𬺰', '𮯠'), - ('𮯰', '𮹝'), - ('丽', '𪘀'), - ('𰀀', '𱍊'), - ('𱍐', '𲎯'), - ('\u{e0100}', '\u{e01ef}'), -]; - -pub const ID_START: &'static [(char, char)] = &[ - ('A', 'Z'), - ('a', 'z'), - ('ª', 'ª'), - ('µ', 'µ'), - ('º', 'º'), - ('À', 'Ö'), - ('Ø', 'ö'), - ('ø', 'ˁ'), - ('ˆ', 'ˑ'), - ('ˠ', 'ˤ'), - ('ˬ', 'ˬ'), - ('ˮ', 'ˮ'), - ('Ͱ', 'ʹ'), - ('Ͷ', 'ͷ'), - ('ͺ', 'ͽ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', 'ϵ'), - ('Ϸ', 'ҁ'), - ('Ҋ', 'ԯ'), - ('Ա', 'Ֆ'), - ('ՙ', 'ՙ'), - ('ՠ', 'ֈ'), - ('א', 'ת'), - ('ׯ', 'ײ'), - ('ؠ', 'ي'), - ('ٮ', 'ٯ'), - ('ٱ', 'ۓ'), - ('ە', 'ە'), - ('ۥ', 'ۦ'), - ('ۮ', 'ۯ'), - ('ۺ', 'ۼ'), - ('ۿ', 'ۿ'), - ('ܐ', 'ܐ'), - ('ܒ', 'ܯ'), - ('ݍ', 'ޥ'), - ('ޱ', 'ޱ'), - ('ߊ', 'ߪ'), - ('ߴ', 'ߵ'), - ('ߺ', 'ߺ'), - ('ࠀ', 'ࠕ'), - ('ࠚ', 'ࠚ'), - ('ࠤ', 'ࠤ'), - ('ࠨ', 'ࠨ'), - ('ࡀ', 'ࡘ'), - ('ࡠ', 'ࡪ'), - ('ࡰ', 'ࢇ'), - ('ࢉ', 'ࢎ'), - ('ࢠ', 'ࣉ'), - ('ऄ', 'ह'), - ('ऽ', 'ऽ'), - ('ॐ', 'ॐ'), - ('क़', 'ॡ'), - ('ॱ', 'ঀ'), - ('অ', 'ঌ'), - ('এ', 'ঐ'), - ('ও', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('ঽ', 'ঽ'), - ('ৎ', 'ৎ'), - ('ড়', 'ঢ়'), - ('য়', 'ৡ'), - ('ৰ', 'ৱ'), - ('ৼ', 'ৼ'), - ('ਅ', 'ਊ'), - ('ਏ', 'ਐ'), - ('ਓ', 'ਨ'), - ('ਪ', 'ਰ'), - ('ਲ', 'ਲ਼'), - ('ਵ', 'ਸ਼'), - ('ਸ', 'ਹ'), - ('ਖ਼', 'ੜ'), - ('ਫ਼', 'ਫ਼'), - ('ੲ', 'ੴ'), - ('અ', 'ઍ'), - ('એ', 'ઑ'), - ('ઓ', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('ઽ', 'ઽ'), - ('ૐ', 'ૐ'), - ('ૠ', 'ૡ'), - ('ૹ', 'ૹ'), - ('ଅ', 'ଌ'), - ('ଏ', 'ଐ'), - ('ଓ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଵ', 'ହ'), - ('ଽ', 'ଽ'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', 'ୡ'), - ('ୱ', 'ୱ'), - ('ஃ', 'ஃ'), - ('அ', 'ஊ'), - ('எ', 'ஐ'), - ('ஒ', 'க'), - ('ங', 'ச'), - ('ஜ', 'ஜ'), - ('ஞ', 'ட'), - ('ண', 'த'), - ('ந', 'ப'), - ('ம', 'ஹ'), - ('ௐ', 'ௐ'), - ('అ', 'ఌ'), - ('ఎ', 'ఐ'), - ('ఒ', 'న'), - ('ప', 'హ'), - ('ఽ', 'ఽ'), - ('ౘ', 'ౚ'), - ('ౝ', 'ౝ'), - ('ౠ', 'ౡ'), - ('ಀ', 'ಀ'), - ('ಅ', 'ಌ'), - ('ಎ', 'ಐ'), - ('ಒ', 'ನ'), - ('ಪ', 'ಳ'), - ('ವ', 'ಹ'), - ('ಽ', 'ಽ'), - ('ೝ', 'ೞ'), - ('ೠ', 'ೡ'), - ('ೱ', 'ೲ'), - ('ഄ', 'ഌ'), - ('എ', 'ഐ'), - ('ഒ', 'ഺ'), - ('ഽ', 'ഽ'), - ('ൎ', 'ൎ'), - ('ൔ', 'ൖ'), - ('ൟ', 'ൡ'), - ('ൺ', 'ൿ'), - ('අ', 'ඖ'), - ('ක', 'න'), - ('ඳ', 'ර'), - ('ල', 'ල'), - ('ව', 'ෆ'), - ('ก', 'ะ'), - ('า', 'ำ'), - ('เ', 'ๆ'), - ('ກ', 'ຂ'), - ('ຄ', 'ຄ'), - ('ຆ', 'ຊ'), - ('ຌ', 'ຣ'), - ('ລ', 'ລ'), - ('ວ', 'ະ'), - ('າ', 'ຳ'), - ('ຽ', 'ຽ'), - ('ເ', 'ໄ'), - ('ໆ', 'ໆ'), - ('ໜ', 'ໟ'), - ('ༀ', 'ༀ'), - ('ཀ', 'ཇ'), - ('ཉ', 'ཬ'), - ('ྈ', 'ྌ'), - ('က', 'ဪ'), - ('ဿ', 'ဿ'), - ('ၐ', 'ၕ'), - ('ၚ', 'ၝ'), - ('ၡ', 'ၡ'), - ('ၥ', 'ၦ'), - ('ၮ', 'ၰ'), - ('ၵ', 'ႁ'), - ('ႎ', 'ႎ'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ა', 'ჺ'), - ('ჼ', 'ቈ'), - ('ቊ', 'ቍ'), - ('ቐ', 'ቖ'), - ('ቘ', 'ቘ'), - ('ቚ', 'ቝ'), - ('በ', 'ኈ'), - ('ኊ', 'ኍ'), - ('ነ', 'ኰ'), - ('ኲ', 'ኵ'), - ('ኸ', 'ኾ'), - ('ዀ', 'ዀ'), - ('ዂ', 'ዅ'), - ('ወ', 'ዖ'), - ('ዘ', 'ጐ'), - ('ጒ', 'ጕ'), - ('ጘ', 'ፚ'), - ('ᎀ', 'ᎏ'), - ('Ꭰ', 'Ᏽ'), - ('ᏸ', 'ᏽ'), - ('ᐁ', 'ᙬ'), - ('ᙯ', 'ᙿ'), - ('ᚁ', 'ᚚ'), - ('ᚠ', 'ᛪ'), - ('ᛮ', 'ᛸ'), - ('ᜀ', 'ᜑ'), - ('ᜟ', 'ᜱ'), - ('ᝀ', 'ᝑ'), - ('ᝠ', 'ᝬ'), - ('ᝮ', 'ᝰ'), - ('ក', 'ឳ'), - ('ៗ', 'ៗ'), - ('ៜ', 'ៜ'), - ('ᠠ', 'ᡸ'), - ('ᢀ', 'ᢨ'), - ('ᢪ', 'ᢪ'), - ('ᢰ', 'ᣵ'), - ('ᤀ', 'ᤞ'), - ('ᥐ', 'ᥭ'), - ('ᥰ', 'ᥴ'), - ('ᦀ', 'ᦫ'), - ('ᦰ', 'ᧉ'), - ('ᨀ', 'ᨖ'), - ('ᨠ', 'ᩔ'), - ('ᪧ', 'ᪧ'), - ('ᬅ', 'ᬳ'), - ('ᭅ', 'ᭌ'), - ('ᮃ', 'ᮠ'), - ('ᮮ', 'ᮯ'), - ('ᮺ', 'ᯥ'), - ('ᰀ', 'ᰣ'), - ('ᱍ', 'ᱏ'), - ('ᱚ', 'ᱽ'), - ('ᲀ', 'ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('ᳩ', 'ᳬ'), - ('ᳮ', 'ᳳ'), - ('ᳵ', 'ᳶ'), - ('ᳺ', 'ᳺ'), - ('ᴀ', 'ᶿ'), - ('Ḁ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ᾼ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῌ'), - ('ῐ', 'ΐ'), - ('ῖ', 'Ί'), - ('ῠ', 'Ῥ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῼ'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℊ', 'ℓ'), - ('ℕ', 'ℕ'), - ('℘', 'ℝ'), - ('ℤ', 'ℤ'), - ('Ω', 'Ω'), - ('ℨ', 'ℨ'), - ('K', 'ℹ'), - ('ℼ', 'ℿ'), - ('ⅅ', 'ⅉ'), - ('ⅎ', 'ⅎ'), - ('Ⅰ', 'ↈ'), - ('Ⰰ', 'ⳤ'), - ('Ⳬ', 'ⳮ'), - ('Ⳳ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ⴰ', 'ⵧ'), - ('ⵯ', 'ⵯ'), - ('ⶀ', 'ⶖ'), - ('ⶠ', 'ⶦ'), - ('ⶨ', 'ⶮ'), - ('ⶰ', 'ⶶ'), - ('ⶸ', 'ⶾ'), - ('ⷀ', 'ⷆ'), - ('ⷈ', 'ⷎ'), - ('ⷐ', 'ⷖ'), - ('ⷘ', 'ⷞ'), - ('々', '〇'), - ('〡', '〩'), - ('〱', '〵'), - ('〸', '〼'), - ('ぁ', 'ゖ'), - ('゛', 'ゟ'), - ('ァ', 'ヺ'), - ('ー', 'ヿ'), - ('ㄅ', 'ㄯ'), - ('ㄱ', 'ㆎ'), - ('ㆠ', 'ㆿ'), - ('ㇰ', 'ㇿ'), - ('㐀', '䶿'), - ('一', 'ꒌ'), - ('ꓐ', 'ꓽ'), - ('ꔀ', 'ꘌ'), - ('ꘐ', 'ꘟ'), - ('ꘪ', 'ꘫ'), - ('Ꙁ', 'ꙮ'), - ('ꙿ', 'ꚝ'), - ('ꚠ', 'ꛯ'), - ('ꜗ', 'ꜟ'), - ('Ꜣ', 'ꞈ'), - ('Ꞌ', 'ꟍ'), - ('Ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'Ƛ'), - ('ꟲ', 'ꠁ'), - ('ꠃ', 'ꠅ'), - ('ꠇ', 'ꠊ'), - ('ꠌ', 'ꠢ'), - ('ꡀ', 'ꡳ'), - ('ꢂ', 'ꢳ'), - ('ꣲ', 'ꣷ'), - ('ꣻ', 'ꣻ'), - ('ꣽ', 'ꣾ'), - ('ꤊ', 'ꤥ'), - ('ꤰ', 'ꥆ'), - ('ꥠ', 'ꥼ'), - ('ꦄ', 'ꦲ'), - ('ꧏ', 'ꧏ'), - ('ꧠ', 'ꧤ'), - ('ꧦ', 'ꧯ'), - ('ꧺ', 'ꧾ'), - ('ꨀ', 'ꨨ'), - ('ꩀ', 'ꩂ'), - ('ꩄ', 'ꩋ'), - ('ꩠ', 'ꩶ'), - ('ꩺ', 'ꩺ'), - ('ꩾ', 'ꪯ'), - ('ꪱ', 'ꪱ'), - ('ꪵ', 'ꪶ'), - ('ꪹ', 'ꪽ'), - ('ꫀ', 'ꫀ'), - ('ꫂ', 'ꫂ'), - ('ꫛ', 'ꫝ'), - ('ꫠ', 'ꫪ'), - ('ꫲ', 'ꫴ'), - ('ꬁ', 'ꬆ'), - ('ꬉ', 'ꬎ'), - ('ꬑ', 'ꬖ'), - ('ꬠ', 'ꬦ'), - ('ꬨ', 'ꬮ'), - ('ꬰ', 'ꭚ'), - ('ꭜ', 'ꭩ'), - ('ꭰ', 'ꯢ'), - ('가', '힣'), - ('ힰ', 'ퟆ'), - ('ퟋ', 'ퟻ'), - ('豈', '舘'), - ('並', '龎'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('יִ', 'יִ'), - ('ײַ', 'ﬨ'), - ('שׁ', 'זּ'), - ('טּ', 'לּ'), - ('מּ', 'מּ'), - ('נּ', 'סּ'), - ('ףּ', 'פּ'), - ('צּ', 'ﮱ'), - ('ﯓ', 'ﴽ'), - ('ﵐ', 'ﶏ'), - ('ﶒ', 'ﷇ'), - ('ﷰ', 'ﷻ'), - ('ﹰ', 'ﹴ'), - ('ﹶ', 'ﻼ'), - ('A', 'Z'), - ('a', 'z'), - ('ヲ', 'ᄒ'), - ('ᅡ', 'ᅦ'), - ('ᅧ', 'ᅬ'), - ('ᅭ', 'ᅲ'), - ('ᅳ', 'ᅵ'), - ('𐀀', '𐀋'), - ('𐀍', '𐀦'), - ('𐀨', '𐀺'), - ('𐀼', '𐀽'), - ('𐀿', '𐁍'), - ('𐁐', '𐁝'), - ('𐂀', '𐃺'), - ('𐅀', '𐅴'), - ('𐊀', '𐊜'), - ('𐊠', '𐋐'), - ('𐌀', '𐌟'), - ('𐌭', '𐍊'), - ('𐍐', '𐍵'), - ('𐎀', '𐎝'), - ('𐎠', '𐏃'), - ('𐏈', '𐏏'), - ('𐏑', '𐏕'), - ('𐐀', '𐒝'), - ('𐒰', '𐓓'), - ('𐓘', '𐓻'), - ('𐔀', '𐔧'), - ('𐔰', '𐕣'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐗀', '𐗳'), - ('𐘀', '𐜶'), - ('𐝀', '𐝕'), - ('𐝠', '𐝧'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𐠀', '𐠅'), - ('𐠈', '𐠈'), - ('𐠊', '𐠵'), - ('𐠷', '𐠸'), - ('𐠼', '𐠼'), - ('𐠿', '𐡕'), - ('𐡠', '𐡶'), - ('𐢀', '𐢞'), - ('𐣠', '𐣲'), - ('𐣴', '𐣵'), - ('𐤀', '𐤕'), - ('𐤠', '𐤹'), - ('𐦀', '𐦷'), - ('𐦾', '𐦿'), - ('𐨀', '𐨀'), - ('𐨐', '𐨓'), - ('𐨕', '𐨗'), - ('𐨙', '𐨵'), - ('𐩠', '𐩼'), - ('𐪀', '𐪜'), - ('𐫀', '𐫇'), - ('𐫉', '𐫤'), - ('𐬀', '𐬵'), - ('𐭀', '𐭕'), - ('𐭠', '𐭲'), - ('𐮀', '𐮑'), - ('𐰀', '𐱈'), - ('𐲀', '𐲲'), - ('𐳀', '𐳲'), - ('𐴀', '𐴣'), - ('𐵊', '𐵥'), - ('𐵯', '𐶅'), - ('𐺀', '𐺩'), - ('𐺰', '𐺱'), - ('𐻂', '𐻄'), - ('𐼀', '𐼜'), - ('𐼧', '𐼧'), - ('𐼰', '𐽅'), - ('𐽰', '𐾁'), - ('𐾰', '𐿄'), - ('𐿠', '𐿶'), - ('𑀃', '𑀷'), - ('𑁱', '𑁲'), - ('𑁵', '𑁵'), - ('𑂃', '𑂯'), - ('𑃐', '𑃨'), - ('𑄃', '𑄦'), - ('𑅄', '𑅄'), - ('𑅇', '𑅇'), - ('𑅐', '𑅲'), - ('𑅶', '𑅶'), - ('𑆃', '𑆲'), - ('𑇁', '𑇄'), - ('𑇚', '𑇚'), - ('𑇜', '𑇜'), - ('𑈀', '𑈑'), - ('𑈓', '𑈫'), - ('𑈿', '𑉀'), - ('𑊀', '𑊆'), - ('𑊈', '𑊈'), - ('𑊊', '𑊍'), - ('𑊏', '𑊝'), - ('𑊟', '𑊨'), - ('𑊰', '𑋞'), - ('𑌅', '𑌌'), - ('𑌏', '𑌐'), - ('𑌓', '𑌨'), - ('𑌪', '𑌰'), - ('𑌲', '𑌳'), - ('𑌵', '𑌹'), - ('𑌽', '𑌽'), - ('𑍐', '𑍐'), - ('𑍝', '𑍡'), - ('𑎀', '𑎉'), - ('𑎋', '𑎋'), - ('𑎎', '𑎎'), - ('𑎐', '𑎵'), - ('𑎷', '𑎷'), - ('𑏑', '𑏑'), - ('𑏓', '𑏓'), - ('𑐀', '𑐴'), - ('𑑇', '𑑊'), - ('𑑟', '𑑡'), - ('𑒀', '𑒯'), - ('𑓄', '𑓅'), - ('𑓇', '𑓇'), - ('𑖀', '𑖮'), - ('𑗘', '𑗛'), - ('𑘀', '𑘯'), - ('𑙄', '𑙄'), - ('𑚀', '𑚪'), - ('𑚸', '𑚸'), - ('𑜀', '𑜚'), - ('𑝀', '𑝆'), - ('𑠀', '𑠫'), - ('𑢠', '𑣟'), - ('𑣿', '𑤆'), - ('𑤉', '𑤉'), - ('𑤌', '𑤓'), - ('𑤕', '𑤖'), - ('𑤘', '𑤯'), - ('𑤿', '𑤿'), - ('𑥁', '𑥁'), - ('𑦠', '𑦧'), - ('𑦪', '𑧐'), - ('𑧡', '𑧡'), - ('𑧣', '𑧣'), - ('𑨀', '𑨀'), - ('𑨋', '𑨲'), - ('𑨺', '𑨺'), - ('𑩐', '𑩐'), - ('𑩜', '𑪉'), - ('𑪝', '𑪝'), - ('𑪰', '𑫸'), - ('𑯀', '𑯠'), - ('𑰀', '𑰈'), - ('𑰊', '𑰮'), - ('𑱀', '𑱀'), - ('𑱲', '𑲏'), - ('𑴀', '𑴆'), - ('𑴈', '𑴉'), - ('𑴋', '𑴰'), - ('𑵆', '𑵆'), - ('𑵠', '𑵥'), - ('𑵧', '𑵨'), - ('𑵪', '𑶉'), - ('𑶘', '𑶘'), - ('𑻠', '𑻲'), - ('𑼂', '𑼂'), - ('𑼄', '𑼐'), - ('𑼒', '𑼳'), - ('𑾰', '𑾰'), - ('𒀀', '𒎙'), - ('𒐀', '𒑮'), - ('𒒀', '𒕃'), - ('𒾐', '𒿰'), - ('𓀀', '𓐯'), - ('𓑁', '𓑆'), - ('𓑠', '𔏺'), - ('𔐀', '𔙆'), - ('𖄀', '𖄝'), - ('𖠀', '𖨸'), - ('𖩀', '𖩞'), - ('𖩰', '𖪾'), - ('𖫐', '𖫭'), - ('𖬀', '𖬯'), - ('𖭀', '𖭃'), - ('𖭣', '𖭷'), - ('𖭽', '𖮏'), - ('𖵀', '𖵬'), - ('𖹀', '𖹿'), - ('𖼀', '𖽊'), - ('𖽐', '𖽐'), - ('𖾓', '𖾟'), - ('𖿠', '𖿡'), - ('𖿣', '𖿣'), - ('𗀀', '𘟷'), - ('𘠀', '𘳕'), - ('𘳿', '𘴈'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('𛀀', '𛄢'), - ('𛄲', '𛄲'), - ('𛅐', '𛅒'), - ('𛅕', '𛅕'), - ('𛅤', '𛅧'), - ('𛅰', '𛋻'), - ('𛰀', '𛱪'), - ('𛱰', '𛱼'), - ('𛲀', '𛲈'), - ('𛲐', '𛲙'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝛀'), - ('𝛂', '𝛚'), - ('𝛜', '𝛺'), - ('𝛼', '𝜔'), - ('𝜖', '𝜴'), - ('𝜶', '𝝎'), - ('𝝐', '𝝮'), - ('𝝰', '𝞈'), - ('𝞊', '𝞨'), - ('𝞪', '𝟂'), - ('𝟄', '𝟋'), - ('𝼀', '𝼞'), - ('𝼥', '𝼪'), - ('𞀰', '𞁭'), - ('𞄀', '𞄬'), - ('𞄷', '𞄽'), - ('𞅎', '𞅎'), - ('𞊐', '𞊭'), - ('𞋀', '𞋫'), - ('𞓐', '𞓫'), - ('𞗐', '𞗭'), - ('𞗰', '𞗰'), - ('𞟠', '𞟦'), - ('𞟨', '𞟫'), - ('𞟭', '𞟮'), - ('𞟰', '𞟾'), - ('𞠀', '𞣄'), - ('𞤀', '𞥃'), - ('𞥋', '𞥋'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('𠀀', '𪛟'), - ('𪜀', '𫜹'), - ('𫝀', '𫠝'), - ('𫠠', '𬺡'), - ('𬺰', '𮯠'), - ('𮯰', '𮹝'), - ('丽', '𪘀'), - ('𰀀', '𱍊'), - ('𱍐', '𲎯'), -]; - -pub const IDEOGRAPHIC: &'static [(char, char)] = &[ - ('〆', '〇'), - ('〡', '〩'), - ('〸', '〺'), - ('㐀', '䶿'), - ('一', '鿿'), - ('豈', '舘'), - ('並', '龎'), - ('\u{16fe4}', '\u{16fe4}'), - ('𗀀', '𘟷'), - ('𘠀', '𘳕'), - ('𘳿', '𘴈'), - ('𛅰', '𛋻'), - ('𠀀', '𪛟'), - ('𪜀', '𫜹'), - ('𫝀', '𫠝'), - ('𫠠', '𬺡'), - ('𬺰', '𮯠'), - ('𮯰', '𮹝'), - ('丽', '𪘀'), - ('𰀀', '𱍊'), - ('𱍐', '𲎯'), -]; - -pub const INCB: &'static [(char, char)] = &[ - ('\u{300}', '\u{36f}'), - ('\u{483}', '\u{489}'), - ('\u{591}', '\u{5bd}'), - ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), - ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), - ('\u{610}', '\u{61a}'), - ('\u{64b}', '\u{65f}'), - ('\u{670}', '\u{670}'), - ('\u{6d6}', '\u{6dc}'), - ('\u{6df}', '\u{6e4}'), - ('\u{6e7}', '\u{6e8}'), - ('\u{6ea}', '\u{6ed}'), - ('\u{711}', '\u{711}'), - ('\u{730}', '\u{74a}'), - ('\u{7a6}', '\u{7b0}'), - ('\u{7eb}', '\u{7f3}'), - ('\u{7fd}', '\u{7fd}'), - ('\u{816}', '\u{819}'), - ('\u{81b}', '\u{823}'), - ('\u{825}', '\u{827}'), - ('\u{829}', '\u{82d}'), - ('\u{859}', '\u{85b}'), - ('\u{897}', '\u{89f}'), - ('\u{8ca}', '\u{8e1}'), - ('\u{8e3}', '\u{902}'), - ('क', '\u{93a}'), - ('\u{93c}', '\u{93c}'), - ('\u{941}', '\u{948}'), - ('\u{94d}', '\u{94d}'), - ('\u{951}', 'य़'), - ('\u{962}', '\u{963}'), - ('ॸ', 'ॿ'), - ('\u{981}', '\u{981}'), - ('ক', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('\u{9bc}', '\u{9bc}'), - ('\u{9be}', '\u{9be}'), - ('\u{9c1}', '\u{9c4}'), - ('\u{9cd}', '\u{9cd}'), - ('\u{9d7}', '\u{9d7}'), - ('ড়', 'ঢ়'), - ('য়', 'য়'), - ('\u{9e2}', '\u{9e3}'), - ('ৰ', 'ৱ'), - ('\u{9fe}', '\u{9fe}'), - ('\u{a01}', '\u{a02}'), - ('\u{a3c}', '\u{a3c}'), - ('\u{a41}', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4d}'), - ('\u{a51}', '\u{a51}'), - ('\u{a70}', '\u{a71}'), - ('\u{a75}', '\u{a75}'), - ('\u{a81}', '\u{a82}'), - ('ક', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('\u{abc}', '\u{abc}'), - ('\u{ac1}', '\u{ac5}'), - ('\u{ac7}', '\u{ac8}'), - ('\u{acd}', '\u{acd}'), - ('\u{ae2}', '\u{ae3}'), - ('ૹ', '\u{aff}'), - ('\u{b01}', '\u{b01}'), - ('କ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଵ', 'ହ'), - ('\u{b3c}', '\u{b3c}'), - ('\u{b3e}', '\u{b3f}'), - ('\u{b41}', '\u{b44}'), - ('\u{b4d}', '\u{b4d}'), - ('\u{b55}', '\u{b57}'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', 'ୟ'), - ('\u{b62}', '\u{b63}'), - ('ୱ', 'ୱ'), - ('\u{b82}', '\u{b82}'), - ('\u{bbe}', '\u{bbe}'), - ('\u{bc0}', '\u{bc0}'), - ('\u{bcd}', '\u{bcd}'), - ('\u{bd7}', '\u{bd7}'), - ('\u{c00}', '\u{c00}'), - ('\u{c04}', '\u{c04}'), - ('క', 'న'), - ('ప', 'హ'), - ('\u{c3c}', '\u{c3c}'), - ('\u{c3e}', '\u{c40}'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4d}'), - ('\u{c55}', '\u{c56}'), - ('ౘ', 'ౚ'), - ('\u{c62}', '\u{c63}'), - ('\u{c81}', '\u{c81}'), - ('\u{cbc}', '\u{cbc}'), - ('\u{cbf}', '\u{cc0}'), - ('\u{cc2}', '\u{cc2}'), - ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccd}'), - ('\u{cd5}', '\u{cd6}'), - ('\u{ce2}', '\u{ce3}'), - ('\u{d00}', '\u{d01}'), - ('ക', '\u{d3c}'), - ('\u{d3e}', '\u{d3e}'), - ('\u{d41}', '\u{d44}'), - ('\u{d4d}', '\u{d4d}'), - ('\u{d57}', '\u{d57}'), - ('\u{d62}', '\u{d63}'), - ('\u{d81}', '\u{d81}'), - ('\u{dca}', '\u{dca}'), - ('\u{dcf}', '\u{dcf}'), - ('\u{dd2}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('\u{ddf}', '\u{ddf}'), - ('\u{e31}', '\u{e31}'), - ('\u{e34}', '\u{e3a}'), - ('\u{e47}', '\u{e4e}'), - ('\u{eb1}', '\u{eb1}'), - ('\u{eb4}', '\u{ebc}'), - ('\u{ec8}', '\u{ece}'), - ('\u{f18}', '\u{f19}'), - ('\u{f35}', '\u{f35}'), - ('\u{f37}', '\u{f37}'), - ('\u{f39}', '\u{f39}'), - ('\u{f71}', '\u{f7e}'), - ('\u{f80}', '\u{f84}'), - ('\u{f86}', '\u{f87}'), - ('\u{f8d}', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('\u{fc6}', '\u{fc6}'), - ('\u{102d}', '\u{1030}'), - ('\u{1032}', '\u{1037}'), - ('\u{1039}', '\u{103a}'), - ('\u{103d}', '\u{103e}'), - ('\u{1058}', '\u{1059}'), - ('\u{105e}', '\u{1060}'), - ('\u{1071}', '\u{1074}'), - ('\u{1082}', '\u{1082}'), - ('\u{1085}', '\u{1086}'), - ('\u{108d}', '\u{108d}'), - ('\u{109d}', '\u{109d}'), - ('\u{135d}', '\u{135f}'), - ('\u{1712}', '\u{1715}'), - ('\u{1732}', '\u{1734}'), - ('\u{1752}', '\u{1753}'), - ('\u{1772}', '\u{1773}'), - ('\u{17b4}', '\u{17b5}'), - ('\u{17b7}', '\u{17bd}'), - ('\u{17c6}', '\u{17c6}'), - ('\u{17c9}', '\u{17d3}'), - ('\u{17dd}', '\u{17dd}'), - ('\u{180b}', '\u{180d}'), - ('\u{180f}', '\u{180f}'), - ('\u{1885}', '\u{1886}'), - ('\u{18a9}', '\u{18a9}'), - ('\u{1920}', '\u{1922}'), - ('\u{1927}', '\u{1928}'), - ('\u{1932}', '\u{1932}'), - ('\u{1939}', '\u{193b}'), - ('\u{1a17}', '\u{1a18}'), - ('\u{1a1b}', '\u{1a1b}'), - ('\u{1a56}', '\u{1a56}'), - ('\u{1a58}', '\u{1a5e}'), - ('\u{1a60}', '\u{1a60}'), - ('\u{1a62}', '\u{1a62}'), - ('\u{1a65}', '\u{1a6c}'), - ('\u{1a73}', '\u{1a7c}'), - ('\u{1a7f}', '\u{1a7f}'), - ('\u{1ab0}', '\u{1ace}'), - ('\u{1b00}', '\u{1b03}'), - ('\u{1b34}', '\u{1b3d}'), - ('\u{1b42}', '\u{1b44}'), - ('\u{1b6b}', '\u{1b73}'), - ('\u{1b80}', '\u{1b81}'), - ('\u{1ba2}', '\u{1ba5}'), - ('\u{1ba8}', '\u{1bad}'), - ('\u{1be6}', '\u{1be6}'), - ('\u{1be8}', '\u{1be9}'), - ('\u{1bed}', '\u{1bed}'), - ('\u{1bef}', '\u{1bf3}'), - ('\u{1c2c}', '\u{1c33}'), - ('\u{1c36}', '\u{1c37}'), - ('\u{1cd0}', '\u{1cd2}'), - ('\u{1cd4}', '\u{1ce0}'), - ('\u{1ce2}', '\u{1ce8}'), - ('\u{1ced}', '\u{1ced}'), - ('\u{1cf4}', '\u{1cf4}'), - ('\u{1cf8}', '\u{1cf9}'), - ('\u{1dc0}', '\u{1dff}'), - ('\u{200d}', '\u{200d}'), - ('\u{20d0}', '\u{20f0}'), - ('\u{2cef}', '\u{2cf1}'), - ('\u{2d7f}', '\u{2d7f}'), - ('\u{2de0}', '\u{2dff}'), - ('\u{302a}', '\u{302f}'), - ('\u{3099}', '\u{309a}'), - ('\u{a66f}', '\u{a672}'), - ('\u{a674}', '\u{a67d}'), - ('\u{a69e}', '\u{a69f}'), - ('\u{a6f0}', '\u{a6f1}'), - ('\u{a802}', '\u{a802}'), - ('\u{a806}', '\u{a806}'), - ('\u{a80b}', '\u{a80b}'), - ('\u{a825}', '\u{a826}'), - ('\u{a82c}', '\u{a82c}'), - ('\u{a8c4}', '\u{a8c5}'), - ('\u{a8e0}', '\u{a8f1}'), - ('\u{a8ff}', '\u{a8ff}'), - ('\u{a926}', '\u{a92d}'), - ('\u{a947}', '\u{a951}'), - ('\u{a953}', '\u{a953}'), - ('\u{a980}', '\u{a982}'), - ('\u{a9b3}', '\u{a9b3}'), - ('\u{a9b6}', '\u{a9b9}'), - ('\u{a9bc}', '\u{a9bd}'), - ('\u{a9c0}', '\u{a9c0}'), - ('\u{a9e5}', '\u{a9e5}'), - ('\u{aa29}', '\u{aa2e}'), - ('\u{aa31}', '\u{aa32}'), - ('\u{aa35}', '\u{aa36}'), - ('\u{aa43}', '\u{aa43}'), - ('\u{aa4c}', '\u{aa4c}'), - ('\u{aa7c}', '\u{aa7c}'), - ('\u{aab0}', '\u{aab0}'), - ('\u{aab2}', '\u{aab4}'), - ('\u{aab7}', '\u{aab8}'), - ('\u{aabe}', '\u{aabf}'), - ('\u{aac1}', '\u{aac1}'), - ('\u{aaec}', '\u{aaed}'), - ('\u{aaf6}', '\u{aaf6}'), - ('\u{abe5}', '\u{abe5}'), - ('\u{abe8}', '\u{abe8}'), - ('\u{abed}', '\u{abed}'), - ('\u{fb1e}', '\u{fb1e}'), - ('\u{fe00}', '\u{fe0f}'), - ('\u{fe20}', '\u{fe2f}'), - ('\u{ff9e}', '\u{ff9f}'), - ('\u{101fd}', '\u{101fd}'), - ('\u{102e0}', '\u{102e0}'), - ('\u{10376}', '\u{1037a}'), - ('\u{10a01}', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '\u{10a0f}'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '\u{10a3f}'), - ('\u{10ae5}', '\u{10ae6}'), - ('\u{10d24}', '\u{10d27}'), - ('\u{10d69}', '\u{10d6d}'), - ('\u{10eab}', '\u{10eac}'), - ('\u{10efc}', '\u{10eff}'), - ('\u{10f46}', '\u{10f50}'), - ('\u{10f82}', '\u{10f85}'), - ('\u{11001}', '\u{11001}'), - ('\u{11038}', '\u{11046}'), - ('\u{11070}', '\u{11070}'), - ('\u{11073}', '\u{11074}'), - ('\u{1107f}', '\u{11081}'), - ('\u{110b3}', '\u{110b6}'), - ('\u{110b9}', '\u{110ba}'), - ('\u{110c2}', '\u{110c2}'), - ('\u{11100}', '\u{11102}'), - ('\u{11127}', '\u{1112b}'), - ('\u{1112d}', '\u{11134}'), - ('\u{11173}', '\u{11173}'), - ('\u{11180}', '\u{11181}'), - ('\u{111b6}', '\u{111be}'), - ('\u{111c0}', '\u{111c0}'), - ('\u{111c9}', '\u{111cc}'), - ('\u{111cf}', '\u{111cf}'), - ('\u{1122f}', '\u{11231}'), - ('\u{11234}', '\u{11237}'), - ('\u{1123e}', '\u{1123e}'), - ('\u{11241}', '\u{11241}'), - ('\u{112df}', '\u{112df}'), - ('\u{112e3}', '\u{112ea}'), - ('\u{11300}', '\u{11301}'), - ('\u{1133b}', '\u{1133c}'), - ('\u{1133e}', '\u{1133e}'), - ('\u{11340}', '\u{11340}'), - ('\u{1134d}', '\u{1134d}'), - ('\u{11357}', '\u{11357}'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), - ('\u{113b8}', '\u{113b8}'), - ('\u{113bb}', '\u{113c0}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '\u{113c9}'), - ('\u{113ce}', '\u{113d0}'), - ('\u{113d2}', '\u{113d2}'), - ('\u{113e1}', '\u{113e2}'), - ('\u{11438}', '\u{1143f}'), - ('\u{11442}', '\u{11444}'), - ('\u{11446}', '\u{11446}'), - ('\u{1145e}', '\u{1145e}'), - ('\u{114b0}', '\u{114b0}'), - ('\u{114b3}', '\u{114b8}'), - ('\u{114ba}', '\u{114ba}'), - ('\u{114bd}', '\u{114bd}'), - ('\u{114bf}', '\u{114c0}'), - ('\u{114c2}', '\u{114c3}'), - ('\u{115af}', '\u{115af}'), - ('\u{115b2}', '\u{115b5}'), - ('\u{115bc}', '\u{115bd}'), - ('\u{115bf}', '\u{115c0}'), - ('\u{115dc}', '\u{115dd}'), - ('\u{11633}', '\u{1163a}'), - ('\u{1163d}', '\u{1163d}'), - ('\u{1163f}', '\u{11640}'), - ('\u{116ab}', '\u{116ab}'), - ('\u{116ad}', '\u{116ad}'), - ('\u{116b0}', '\u{116b7}'), - ('\u{1171d}', '\u{1171d}'), - ('\u{1171f}', '\u{1171f}'), - ('\u{11722}', '\u{11725}'), - ('\u{11727}', '\u{1172b}'), - ('\u{1182f}', '\u{11837}'), - ('\u{11839}', '\u{1183a}'), - ('\u{11930}', '\u{11930}'), - ('\u{1193b}', '\u{1193e}'), - ('\u{11943}', '\u{11943}'), - ('\u{119d4}', '\u{119d7}'), - ('\u{119da}', '\u{119db}'), - ('\u{119e0}', '\u{119e0}'), - ('\u{11a01}', '\u{11a0a}'), - ('\u{11a33}', '\u{11a38}'), - ('\u{11a3b}', '\u{11a3e}'), - ('\u{11a47}', '\u{11a47}'), - ('\u{11a51}', '\u{11a56}'), - ('\u{11a59}', '\u{11a5b}'), - ('\u{11a8a}', '\u{11a96}'), - ('\u{11a98}', '\u{11a99}'), - ('\u{11c30}', '\u{11c36}'), - ('\u{11c38}', '\u{11c3d}'), - ('\u{11c3f}', '\u{11c3f}'), - ('\u{11c92}', '\u{11ca7}'), - ('\u{11caa}', '\u{11cb0}'), - ('\u{11cb2}', '\u{11cb3}'), - ('\u{11cb5}', '\u{11cb6}'), - ('\u{11d31}', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d45}'), - ('\u{11d47}', '\u{11d47}'), - ('\u{11d90}', '\u{11d91}'), - ('\u{11d95}', '\u{11d95}'), - ('\u{11d97}', '\u{11d97}'), - ('\u{11ef3}', '\u{11ef4}'), - ('\u{11f00}', '\u{11f01}'), - ('\u{11f36}', '\u{11f3a}'), - ('\u{11f40}', '\u{11f42}'), - ('\u{11f5a}', '\u{11f5a}'), - ('\u{13440}', '\u{13440}'), - ('\u{13447}', '\u{13455}'), - ('\u{1611e}', '\u{16129}'), - ('\u{1612d}', '\u{1612f}'), - ('\u{16af0}', '\u{16af4}'), - ('\u{16b30}', '\u{16b36}'), - ('\u{16f4f}', '\u{16f4f}'), - ('\u{16f8f}', '\u{16f92}'), - ('\u{16fe4}', '\u{16fe4}'), - ('\u{16ff0}', '\u{16ff1}'), - ('\u{1bc9d}', '\u{1bc9e}'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('\u{1d165}', '\u{1d169}'), - ('\u{1d16d}', '\u{1d172}'), - ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), - ('\u{1d1aa}', '\u{1d1ad}'), - ('\u{1d242}', '\u{1d244}'), - ('\u{1da00}', '\u{1da36}'), - ('\u{1da3b}', '\u{1da6c}'), - ('\u{1da75}', '\u{1da75}'), - ('\u{1da84}', '\u{1da84}'), - ('\u{1da9b}', '\u{1da9f}'), - ('\u{1daa1}', '\u{1daaf}'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), - ('\u{1e08f}', '\u{1e08f}'), - ('\u{1e130}', '\u{1e136}'), - ('\u{1e2ae}', '\u{1e2ae}'), - ('\u{1e2ec}', '\u{1e2ef}'), - ('\u{1e4ec}', '\u{1e4ef}'), - ('\u{1e5ee}', '\u{1e5ef}'), - ('\u{1e8d0}', '\u{1e8d6}'), - ('\u{1e944}', '\u{1e94a}'), - ('🏻', '🏿'), - ('\u{e0020}', '\u{e007f}'), - ('\u{e0100}', '\u{e01ef}'), -]; - -pub const JOIN_CONTROL: &'static [(char, char)] = &[('\u{200c}', '\u{200d}')]; - -pub const LOGICAL_ORDER_EXCEPTION: &'static [(char, char)] = &[ - ('เ', 'ไ'), - ('ເ', 'ໄ'), - ('ᦵ', 'ᦷ'), - ('ᦺ', 'ᦺ'), - ('ꪵ', 'ꪶ'), - ('ꪹ', 'ꪹ'), - ('ꪻ', 'ꪼ'), -]; - -pub const LOWERCASE: &'static [(char, char)] = &[ - ('a', 'z'), - ('ª', 'ª'), - ('µ', 'µ'), - ('º', 'º'), - ('ß', 'ö'), - ('ø', 'ÿ'), - ('ā', 'ā'), - ('ă', 'ă'), - ('ą', 'ą'), - ('ć', 'ć'), - ('ĉ', 'ĉ'), - ('ċ', 'ċ'), - ('č', 'č'), - ('ď', 'ď'), - ('đ', 'đ'), - ('ē', 'ē'), - ('ĕ', 'ĕ'), - ('ė', 'ė'), - ('ę', 'ę'), - ('ě', 'ě'), - ('ĝ', 'ĝ'), - ('ğ', 'ğ'), - ('ġ', 'ġ'), - ('ģ', 'ģ'), - ('ĥ', 'ĥ'), - ('ħ', 'ħ'), - ('ĩ', 'ĩ'), - ('ī', 'ī'), - ('ĭ', 'ĭ'), - ('į', 'į'), - ('ı', 'ı'), - ('ij', 'ij'), - ('ĵ', 'ĵ'), - ('ķ', 'ĸ'), - ('ĺ', 'ĺ'), - ('ļ', 'ļ'), - ('ľ', 'ľ'), - ('ŀ', 'ŀ'), - ('ł', 'ł'), - ('ń', 'ń'), - ('ņ', 'ņ'), - ('ň', 'ʼn'), - ('ŋ', 'ŋ'), - ('ō', 'ō'), - ('ŏ', 'ŏ'), - ('ő', 'ő'), - ('œ', 'œ'), - ('ŕ', 'ŕ'), - ('ŗ', 'ŗ'), - ('ř', 'ř'), - ('ś', 'ś'), - ('ŝ', 'ŝ'), - ('ş', 'ş'), - ('š', 'š'), - ('ţ', 'ţ'), - ('ť', 'ť'), - ('ŧ', 'ŧ'), - ('ũ', 'ũ'), - ('ū', 'ū'), - ('ŭ', 'ŭ'), - ('ů', 'ů'), - ('ű', 'ű'), - ('ų', 'ų'), - ('ŵ', 'ŵ'), - ('ŷ', 'ŷ'), - ('ź', 'ź'), - ('ż', 'ż'), - ('ž', 'ƀ'), - ('ƃ', 'ƃ'), - ('ƅ', 'ƅ'), - ('ƈ', 'ƈ'), - ('ƌ', 'ƍ'), - ('ƒ', 'ƒ'), - ('ƕ', 'ƕ'), - ('ƙ', 'ƛ'), - ('ƞ', 'ƞ'), - ('ơ', 'ơ'), - ('ƣ', 'ƣ'), - ('ƥ', 'ƥ'), - ('ƨ', 'ƨ'), - ('ƪ', 'ƫ'), - ('ƭ', 'ƭ'), - ('ư', 'ư'), - ('ƴ', 'ƴ'), - ('ƶ', 'ƶ'), - ('ƹ', 'ƺ'), - ('ƽ', 'ƿ'), - ('dž', 'dž'), - ('lj', 'lj'), - ('nj', 'nj'), - ('ǎ', 'ǎ'), - ('ǐ', 'ǐ'), - ('ǒ', 'ǒ'), - ('ǔ', 'ǔ'), - ('ǖ', 'ǖ'), - ('ǘ', 'ǘ'), - ('ǚ', 'ǚ'), - ('ǜ', 'ǝ'), - ('ǟ', 'ǟ'), - ('ǡ', 'ǡ'), - ('ǣ', 'ǣ'), - ('ǥ', 'ǥ'), - ('ǧ', 'ǧ'), - ('ǩ', 'ǩ'), - ('ǫ', 'ǫ'), - ('ǭ', 'ǭ'), - ('ǯ', 'ǰ'), - ('dz', 'dz'), - ('ǵ', 'ǵ'), - ('ǹ', 'ǹ'), - ('ǻ', 'ǻ'), - ('ǽ', 'ǽ'), - ('ǿ', 'ǿ'), - ('ȁ', 'ȁ'), - ('ȃ', 'ȃ'), - ('ȅ', 'ȅ'), - ('ȇ', 'ȇ'), - ('ȉ', 'ȉ'), - ('ȋ', 'ȋ'), - ('ȍ', 'ȍ'), - ('ȏ', 'ȏ'), - ('ȑ', 'ȑ'), - ('ȓ', 'ȓ'), - ('ȕ', 'ȕ'), - ('ȗ', 'ȗ'), - ('ș', 'ș'), - ('ț', 'ț'), - ('ȝ', 'ȝ'), - ('ȟ', 'ȟ'), - ('ȡ', 'ȡ'), - ('ȣ', 'ȣ'), - ('ȥ', 'ȥ'), - ('ȧ', 'ȧ'), - ('ȩ', 'ȩ'), - ('ȫ', 'ȫ'), - ('ȭ', 'ȭ'), - ('ȯ', 'ȯ'), - ('ȱ', 'ȱ'), - ('ȳ', 'ȹ'), - ('ȼ', 'ȼ'), - ('ȿ', 'ɀ'), - ('ɂ', 'ɂ'), - ('ɇ', 'ɇ'), - ('ɉ', 'ɉ'), - ('ɋ', 'ɋ'), - ('ɍ', 'ɍ'), - ('ɏ', 'ʓ'), - ('ʕ', 'ʸ'), - ('ˀ', 'ˁ'), - ('ˠ', 'ˤ'), - ('\u{345}', '\u{345}'), - ('ͱ', 'ͱ'), - ('ͳ', 'ͳ'), - ('ͷ', 'ͷ'), - ('ͺ', 'ͽ'), - ('ΐ', 'ΐ'), - ('ά', 'ώ'), - ('ϐ', 'ϑ'), - ('ϕ', 'ϗ'), - ('ϙ', 'ϙ'), - ('ϛ', 'ϛ'), - ('ϝ', 'ϝ'), - ('ϟ', 'ϟ'), - ('ϡ', 'ϡ'), - ('ϣ', 'ϣ'), - ('ϥ', 'ϥ'), - ('ϧ', 'ϧ'), - ('ϩ', 'ϩ'), - ('ϫ', 'ϫ'), - ('ϭ', 'ϭ'), - ('ϯ', 'ϳ'), - ('ϵ', 'ϵ'), - ('ϸ', 'ϸ'), - ('ϻ', 'ϼ'), - ('а', 'џ'), - ('ѡ', 'ѡ'), - ('ѣ', 'ѣ'), - ('ѥ', 'ѥ'), - ('ѧ', 'ѧ'), - ('ѩ', 'ѩ'), - ('ѫ', 'ѫ'), - ('ѭ', 'ѭ'), - ('ѯ', 'ѯ'), - ('ѱ', 'ѱ'), - ('ѳ', 'ѳ'), - ('ѵ', 'ѵ'), - ('ѷ', 'ѷ'), - ('ѹ', 'ѹ'), - ('ѻ', 'ѻ'), - ('ѽ', 'ѽ'), - ('ѿ', 'ѿ'), - ('ҁ', 'ҁ'), - ('ҋ', 'ҋ'), - ('ҍ', 'ҍ'), - ('ҏ', 'ҏ'), - ('ґ', 'ґ'), - ('ғ', 'ғ'), - ('ҕ', 'ҕ'), - ('җ', 'җ'), - ('ҙ', 'ҙ'), - ('қ', 'қ'), - ('ҝ', 'ҝ'), - ('ҟ', 'ҟ'), - ('ҡ', 'ҡ'), - ('ң', 'ң'), - ('ҥ', 'ҥ'), - ('ҧ', 'ҧ'), - ('ҩ', 'ҩ'), - ('ҫ', 'ҫ'), - ('ҭ', 'ҭ'), - ('ү', 'ү'), - ('ұ', 'ұ'), - ('ҳ', 'ҳ'), - ('ҵ', 'ҵ'), - ('ҷ', 'ҷ'), - ('ҹ', 'ҹ'), - ('һ', 'һ'), - ('ҽ', 'ҽ'), - ('ҿ', 'ҿ'), - ('ӂ', 'ӂ'), - ('ӄ', 'ӄ'), - ('ӆ', 'ӆ'), - ('ӈ', 'ӈ'), - ('ӊ', 'ӊ'), - ('ӌ', 'ӌ'), - ('ӎ', 'ӏ'), - ('ӑ', 'ӑ'), - ('ӓ', 'ӓ'), - ('ӕ', 'ӕ'), - ('ӗ', 'ӗ'), - ('ә', 'ә'), - ('ӛ', 'ӛ'), - ('ӝ', 'ӝ'), - ('ӟ', 'ӟ'), - ('ӡ', 'ӡ'), - ('ӣ', 'ӣ'), - ('ӥ', 'ӥ'), - ('ӧ', 'ӧ'), - ('ө', 'ө'), - ('ӫ', 'ӫ'), - ('ӭ', 'ӭ'), - ('ӯ', 'ӯ'), - ('ӱ', 'ӱ'), - ('ӳ', 'ӳ'), - ('ӵ', 'ӵ'), - ('ӷ', 'ӷ'), - ('ӹ', 'ӹ'), - ('ӻ', 'ӻ'), - ('ӽ', 'ӽ'), - ('ӿ', 'ӿ'), - ('ԁ', 'ԁ'), - ('ԃ', 'ԃ'), - ('ԅ', 'ԅ'), - ('ԇ', 'ԇ'), - ('ԉ', 'ԉ'), - ('ԋ', 'ԋ'), - ('ԍ', 'ԍ'), - ('ԏ', 'ԏ'), - ('ԑ', 'ԑ'), - ('ԓ', 'ԓ'), - ('ԕ', 'ԕ'), - ('ԗ', 'ԗ'), - ('ԙ', 'ԙ'), - ('ԛ', 'ԛ'), - ('ԝ', 'ԝ'), - ('ԟ', 'ԟ'), - ('ԡ', 'ԡ'), - ('ԣ', 'ԣ'), - ('ԥ', 'ԥ'), - ('ԧ', 'ԧ'), - ('ԩ', 'ԩ'), - ('ԫ', 'ԫ'), - ('ԭ', 'ԭ'), - ('ԯ', 'ԯ'), - ('ՠ', 'ֈ'), - ('ა', 'ჺ'), - ('ჼ', 'ჿ'), - ('ᏸ', 'ᏽ'), - ('ᲀ', 'ᲈ'), - ('ᲊ', 'ᲊ'), - ('ᴀ', 'ᶿ'), - ('ḁ', 'ḁ'), - ('ḃ', 'ḃ'), - ('ḅ', 'ḅ'), - ('ḇ', 'ḇ'), - ('ḉ', 'ḉ'), - ('ḋ', 'ḋ'), - ('ḍ', 'ḍ'), - ('ḏ', 'ḏ'), - ('ḑ', 'ḑ'), - ('ḓ', 'ḓ'), - ('ḕ', 'ḕ'), - ('ḗ', 'ḗ'), - ('ḙ', 'ḙ'), - ('ḛ', 'ḛ'), - ('ḝ', 'ḝ'), - ('ḟ', 'ḟ'), - ('ḡ', 'ḡ'), - ('ḣ', 'ḣ'), - ('ḥ', 'ḥ'), - ('ḧ', 'ḧ'), - ('ḩ', 'ḩ'), - ('ḫ', 'ḫ'), - ('ḭ', 'ḭ'), - ('ḯ', 'ḯ'), - ('ḱ', 'ḱ'), - ('ḳ', 'ḳ'), - ('ḵ', 'ḵ'), - ('ḷ', 'ḷ'), - ('ḹ', 'ḹ'), - ('ḻ', 'ḻ'), - ('ḽ', 'ḽ'), - ('ḿ', 'ḿ'), - ('ṁ', 'ṁ'), - ('ṃ', 'ṃ'), - ('ṅ', 'ṅ'), - ('ṇ', 'ṇ'), - ('ṉ', 'ṉ'), - ('ṋ', 'ṋ'), - ('ṍ', 'ṍ'), - ('ṏ', 'ṏ'), - ('ṑ', 'ṑ'), - ('ṓ', 'ṓ'), - ('ṕ', 'ṕ'), - ('ṗ', 'ṗ'), - ('ṙ', 'ṙ'), - ('ṛ', 'ṛ'), - ('ṝ', 'ṝ'), - ('ṟ', 'ṟ'), - ('ṡ', 'ṡ'), - ('ṣ', 'ṣ'), - ('ṥ', 'ṥ'), - ('ṧ', 'ṧ'), - ('ṩ', 'ṩ'), - ('ṫ', 'ṫ'), - ('ṭ', 'ṭ'), - ('ṯ', 'ṯ'), - ('ṱ', 'ṱ'), - ('ṳ', 'ṳ'), - ('ṵ', 'ṵ'), - ('ṷ', 'ṷ'), - ('ṹ', 'ṹ'), - ('ṻ', 'ṻ'), - ('ṽ', 'ṽ'), - ('ṿ', 'ṿ'), - ('ẁ', 'ẁ'), - ('ẃ', 'ẃ'), - ('ẅ', 'ẅ'), - ('ẇ', 'ẇ'), - ('ẉ', 'ẉ'), - ('ẋ', 'ẋ'), - ('ẍ', 'ẍ'), - ('ẏ', 'ẏ'), - ('ẑ', 'ẑ'), - ('ẓ', 'ẓ'), - ('ẕ', 'ẝ'), - ('ẟ', 'ẟ'), - ('ạ', 'ạ'), - ('ả', 'ả'), - ('ấ', 'ấ'), - ('ầ', 'ầ'), - ('ẩ', 'ẩ'), - ('ẫ', 'ẫ'), - ('ậ', 'ậ'), - ('ắ', 'ắ'), - ('ằ', 'ằ'), - ('ẳ', 'ẳ'), - ('ẵ', 'ẵ'), - ('ặ', 'ặ'), - ('ẹ', 'ẹ'), - ('ẻ', 'ẻ'), - ('ẽ', 'ẽ'), - ('ế', 'ế'), - ('ề', 'ề'), - ('ể', 'ể'), - ('ễ', 'ễ'), - ('ệ', 'ệ'), - ('ỉ', 'ỉ'), - ('ị', 'ị'), - ('ọ', 'ọ'), - ('ỏ', 'ỏ'), - ('ố', 'ố'), - ('ồ', 'ồ'), - ('ổ', 'ổ'), - ('ỗ', 'ỗ'), - ('ộ', 'ộ'), - ('ớ', 'ớ'), - ('ờ', 'ờ'), - ('ở', 'ở'), - ('ỡ', 'ỡ'), - ('ợ', 'ợ'), - ('ụ', 'ụ'), - ('ủ', 'ủ'), - ('ứ', 'ứ'), - ('ừ', 'ừ'), - ('ử', 'ử'), - ('ữ', 'ữ'), - ('ự', 'ự'), - ('ỳ', 'ỳ'), - ('ỵ', 'ỵ'), - ('ỷ', 'ỷ'), - ('ỹ', 'ỹ'), - ('ỻ', 'ỻ'), - ('ỽ', 'ỽ'), - ('ỿ', 'ἇ'), - ('ἐ', 'ἕ'), - ('ἠ', 'ἧ'), - ('ἰ', 'ἷ'), - ('ὀ', 'ὅ'), - ('ὐ', 'ὗ'), - ('ὠ', 'ὧ'), - ('ὰ', 'ώ'), - ('ᾀ', 'ᾇ'), - ('ᾐ', 'ᾗ'), - ('ᾠ', 'ᾧ'), - ('ᾰ', 'ᾴ'), - ('ᾶ', 'ᾷ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῇ'), - ('ῐ', 'ΐ'), - ('ῖ', 'ῗ'), - ('ῠ', 'ῧ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῷ'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('ℊ', 'ℊ'), - ('ℎ', 'ℏ'), - ('ℓ', 'ℓ'), - ('ℯ', 'ℯ'), - ('ℴ', 'ℴ'), - ('ℹ', 'ℹ'), - ('ℼ', 'ℽ'), - ('ⅆ', 'ⅉ'), - ('ⅎ', 'ⅎ'), - ('ⅰ', 'ⅿ'), - ('ↄ', 'ↄ'), - ('ⓐ', 'ⓩ'), - ('ⰰ', 'ⱟ'), - ('ⱡ', 'ⱡ'), - ('ⱥ', 'ⱦ'), - ('ⱨ', 'ⱨ'), - ('ⱪ', 'ⱪ'), - ('ⱬ', 'ⱬ'), - ('ⱱ', 'ⱱ'), - ('ⱳ', 'ⱴ'), - ('ⱶ', 'ⱽ'), - ('ⲁ', 'ⲁ'), - ('ⲃ', 'ⲃ'), - ('ⲅ', 'ⲅ'), - ('ⲇ', 'ⲇ'), - ('ⲉ', 'ⲉ'), - ('ⲋ', 'ⲋ'), - ('ⲍ', 'ⲍ'), - ('ⲏ', 'ⲏ'), - ('ⲑ', 'ⲑ'), - ('ⲓ', 'ⲓ'), - ('ⲕ', 'ⲕ'), - ('ⲗ', 'ⲗ'), - ('ⲙ', 'ⲙ'), - ('ⲛ', 'ⲛ'), - ('ⲝ', 'ⲝ'), - ('ⲟ', 'ⲟ'), - ('ⲡ', 'ⲡ'), - ('ⲣ', 'ⲣ'), - ('ⲥ', 'ⲥ'), - ('ⲧ', 'ⲧ'), - ('ⲩ', 'ⲩ'), - ('ⲫ', 'ⲫ'), - ('ⲭ', 'ⲭ'), - ('ⲯ', 'ⲯ'), - ('ⲱ', 'ⲱ'), - ('ⲳ', 'ⲳ'), - ('ⲵ', 'ⲵ'), - ('ⲷ', 'ⲷ'), - ('ⲹ', 'ⲹ'), - ('ⲻ', 'ⲻ'), - ('ⲽ', 'ⲽ'), - ('ⲿ', 'ⲿ'), - ('ⳁ', 'ⳁ'), - ('ⳃ', 'ⳃ'), - ('ⳅ', 'ⳅ'), - ('ⳇ', 'ⳇ'), - ('ⳉ', 'ⳉ'), - ('ⳋ', 'ⳋ'), - ('ⳍ', 'ⳍ'), - ('ⳏ', 'ⳏ'), - ('ⳑ', 'ⳑ'), - ('ⳓ', 'ⳓ'), - ('ⳕ', 'ⳕ'), - ('ⳗ', 'ⳗ'), - ('ⳙ', 'ⳙ'), - ('ⳛ', 'ⳛ'), - ('ⳝ', 'ⳝ'), - ('ⳟ', 'ⳟ'), - ('ⳡ', 'ⳡ'), - ('ⳣ', 'ⳤ'), - ('ⳬ', 'ⳬ'), - ('ⳮ', 'ⳮ'), - ('ⳳ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ꙁ', 'ꙁ'), - ('ꙃ', 'ꙃ'), - ('ꙅ', 'ꙅ'), - ('ꙇ', 'ꙇ'), - ('ꙉ', 'ꙉ'), - ('ꙋ', 'ꙋ'), - ('ꙍ', 'ꙍ'), - ('ꙏ', 'ꙏ'), - ('ꙑ', 'ꙑ'), - ('ꙓ', 'ꙓ'), - ('ꙕ', 'ꙕ'), - ('ꙗ', 'ꙗ'), - ('ꙙ', 'ꙙ'), - ('ꙛ', 'ꙛ'), - ('ꙝ', 'ꙝ'), - ('ꙟ', 'ꙟ'), - ('ꙡ', 'ꙡ'), - ('ꙣ', 'ꙣ'), - ('ꙥ', 'ꙥ'), - ('ꙧ', 'ꙧ'), - ('ꙩ', 'ꙩ'), - ('ꙫ', 'ꙫ'), - ('ꙭ', 'ꙭ'), - ('ꚁ', 'ꚁ'), - ('ꚃ', 'ꚃ'), - ('ꚅ', 'ꚅ'), - ('ꚇ', 'ꚇ'), - ('ꚉ', 'ꚉ'), - ('ꚋ', 'ꚋ'), - ('ꚍ', 'ꚍ'), - ('ꚏ', 'ꚏ'), - ('ꚑ', 'ꚑ'), - ('ꚓ', 'ꚓ'), - ('ꚕ', 'ꚕ'), - ('ꚗ', 'ꚗ'), - ('ꚙ', 'ꚙ'), - ('ꚛ', 'ꚝ'), - ('ꜣ', 'ꜣ'), - ('ꜥ', 'ꜥ'), - ('ꜧ', 'ꜧ'), - ('ꜩ', 'ꜩ'), - ('ꜫ', 'ꜫ'), - ('ꜭ', 'ꜭ'), - ('ꜯ', 'ꜱ'), - ('ꜳ', 'ꜳ'), - ('ꜵ', 'ꜵ'), - ('ꜷ', 'ꜷ'), - ('ꜹ', 'ꜹ'), - ('ꜻ', 'ꜻ'), - ('ꜽ', 'ꜽ'), - ('ꜿ', 'ꜿ'), - ('ꝁ', 'ꝁ'), - ('ꝃ', 'ꝃ'), - ('ꝅ', 'ꝅ'), - ('ꝇ', 'ꝇ'), - ('ꝉ', 'ꝉ'), - ('ꝋ', 'ꝋ'), - ('ꝍ', 'ꝍ'), - ('ꝏ', 'ꝏ'), - ('ꝑ', 'ꝑ'), - ('ꝓ', 'ꝓ'), - ('ꝕ', 'ꝕ'), - ('ꝗ', 'ꝗ'), - ('ꝙ', 'ꝙ'), - ('ꝛ', 'ꝛ'), - ('ꝝ', 'ꝝ'), - ('ꝟ', 'ꝟ'), - ('ꝡ', 'ꝡ'), - ('ꝣ', 'ꝣ'), - ('ꝥ', 'ꝥ'), - ('ꝧ', 'ꝧ'), - ('ꝩ', 'ꝩ'), - ('ꝫ', 'ꝫ'), - ('ꝭ', 'ꝭ'), - ('ꝯ', 'ꝸ'), - ('ꝺ', 'ꝺ'), - ('ꝼ', 'ꝼ'), - ('ꝿ', 'ꝿ'), - ('ꞁ', 'ꞁ'), - ('ꞃ', 'ꞃ'), - ('ꞅ', 'ꞅ'), - ('ꞇ', 'ꞇ'), - ('ꞌ', 'ꞌ'), - ('ꞎ', 'ꞎ'), - ('ꞑ', 'ꞑ'), - ('ꞓ', 'ꞕ'), - ('ꞗ', 'ꞗ'), - ('ꞙ', 'ꞙ'), - ('ꞛ', 'ꞛ'), - ('ꞝ', 'ꞝ'), - ('ꞟ', 'ꞟ'), - ('ꞡ', 'ꞡ'), - ('ꞣ', 'ꞣ'), - ('ꞥ', 'ꞥ'), - ('ꞧ', 'ꞧ'), - ('ꞩ', 'ꞩ'), - ('ꞯ', 'ꞯ'), - ('ꞵ', 'ꞵ'), - ('ꞷ', 'ꞷ'), - ('ꞹ', 'ꞹ'), - ('ꞻ', 'ꞻ'), - ('ꞽ', 'ꞽ'), - ('ꞿ', 'ꞿ'), - ('ꟁ', 'ꟁ'), - ('ꟃ', 'ꟃ'), - ('ꟈ', 'ꟈ'), - ('ꟊ', 'ꟊ'), - ('ꟍ', 'ꟍ'), - ('ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'ꟕ'), - ('ꟗ', 'ꟗ'), - ('ꟙ', 'ꟙ'), - ('ꟛ', 'ꟛ'), - ('ꟲ', 'ꟴ'), - ('ꟶ', 'ꟶ'), - ('ꟸ', 'ꟺ'), - ('ꬰ', 'ꭚ'), - ('ꭜ', 'ꭩ'), - ('ꭰ', 'ꮿ'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('a', 'z'), - ('𐐨', '𐑏'), - ('𐓘', '𐓻'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐞀', '𐞀'), - ('𐞃', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𐳀', '𐳲'), - ('𐵰', '𐶅'), - ('𑣀', '𑣟'), - ('𖹠', '𖹿'), - ('𝐚', '𝐳'), - ('𝑎', '𝑔'), - ('𝑖', '𝑧'), - ('𝒂', '𝒛'), - ('𝒶', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝓏'), - ('𝓪', '𝔃'), - ('𝔞', '𝔷'), - ('𝕒', '𝕫'), - ('𝖆', '𝖟'), - ('𝖺', '𝗓'), - ('𝗮', '𝘇'), - ('𝘢', '𝘻'), - ('𝙖', '𝙯'), - ('𝚊', '𝚥'), - ('𝛂', '𝛚'), - ('𝛜', '𝛡'), - ('𝛼', '𝜔'), - ('𝜖', '𝜛'), - ('𝜶', '𝝎'), - ('𝝐', '𝝕'), - ('𝝰', '𝞈'), - ('𝞊', '𝞏'), - ('𝞪', '𝟂'), - ('𝟄', '𝟉'), - ('𝟋', '𝟋'), - ('𝼀', '𝼉'), - ('𝼋', '𝼞'), - ('𝼥', '𝼪'), - ('𞀰', '𞁭'), - ('𞤢', '𞥃'), -]; - -pub const MATH: &'static [(char, char)] = &[ - ('+', '+'), - ('<', '>'), - ('^', '^'), - ('|', '|'), - ('~', '~'), - ('¬', '¬'), - ('±', '±'), - ('×', '×'), - ('÷', '÷'), - ('ϐ', 'ϒ'), - ('ϕ', 'ϕ'), - ('ϰ', 'ϱ'), - ('ϴ', '϶'), - ('؆', '؈'), - ('‖', '‖'), - ('′', '‴'), - ('⁀', '⁀'), - ('⁄', '⁄'), - ('⁒', '⁒'), - ('\u{2061}', '\u{2064}'), - ('⁺', '⁾'), - ('₊', '₎'), - ('\u{20d0}', '\u{20dc}'), - ('\u{20e1}', '\u{20e1}'), - ('\u{20e5}', '\u{20e6}'), - ('\u{20eb}', '\u{20ef}'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℊ', 'ℓ'), - ('ℕ', 'ℕ'), - ('℘', 'ℝ'), - ('ℤ', 'ℤ'), - ('ℨ', '℩'), - ('ℬ', 'ℭ'), - ('ℯ', 'ℱ'), - ('ℳ', 'ℸ'), - ('ℼ', 'ⅉ'), - ('⅋', '⅋'), - ('←', '↧'), - ('↩', '↮'), - ('↰', '↱'), - ('↶', '↷'), - ('↼', '⇛'), - ('⇝', '⇝'), - ('⇤', '⇥'), - ('⇴', '⋿'), - ('⌈', '⌋'), - ('⌠', '⌡'), - ('⍼', '⍼'), - ('⎛', '⎵'), - ('⎷', '⎷'), - ('⏐', '⏐'), - ('⏜', '⏢'), - ('■', '□'), - ('▮', '▷'), - ('▼', '◁'), - ('◆', '◇'), - ('◊', '○'), - ('●', '◓'), - ('◢', '◢'), - ('◤', '◤'), - ('◧', '◬'), - ('◸', '◿'), - ('★', '☆'), - ('♀', '♀'), - ('♂', '♂'), - ('♠', '♣'), - ('♭', '♯'), - ('⟀', '⟿'), - ('⤀', '⫿'), - ('⬰', '⭄'), - ('⭇', '⭌'), - ('﬩', '﬩'), - ('﹡', '﹦'), - ('﹨', '﹨'), - ('+', '+'), - ('<', '>'), - ('\', '\'), - ('^', '^'), - ('|', '|'), - ('~', '~'), - ('¬', '¬'), - ('←', '↓'), - ('𐶎', '𐶏'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝟋'), - ('𝟎', '𝟿'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('𞻰', '𞻱'), -]; - -pub const MODIFIER_COMBINING_MARK: &'static [(char, char)] = &[ - ('\u{654}', '\u{655}'), - ('\u{658}', '\u{658}'), - ('\u{6dc}', '\u{6dc}'), - ('\u{6e3}', '\u{6e3}'), - ('\u{6e7}', '\u{6e8}'), - ('\u{8ca}', '\u{8cb}'), - ('\u{8cd}', '\u{8cf}'), - ('\u{8d3}', '\u{8d3}'), - ('\u{8f3}', '\u{8f3}'), -]; - -pub const NONCHARACTER_CODE_POINT: &'static [(char, char)] = &[ - ('\u{fdd0}', '\u{fdef}'), - ('\u{fffe}', '\u{ffff}'), - ('\u{1fffe}', '\u{1ffff}'), - ('\u{2fffe}', '\u{2ffff}'), - ('\u{3fffe}', '\u{3ffff}'), - ('\u{4fffe}', '\u{4ffff}'), - ('\u{5fffe}', '\u{5ffff}'), - ('\u{6fffe}', '\u{6ffff}'), - ('\u{7fffe}', '\u{7ffff}'), - ('\u{8fffe}', '\u{8ffff}'), - ('\u{9fffe}', '\u{9ffff}'), - ('\u{afffe}', '\u{affff}'), - ('\u{bfffe}', '\u{bffff}'), - ('\u{cfffe}', '\u{cffff}'), - ('\u{dfffe}', '\u{dffff}'), - ('\u{efffe}', '\u{effff}'), - ('\u{ffffe}', '\u{fffff}'), - ('\u{10fffe}', '\u{10ffff}'), -]; - -pub const OTHER_ALPHABETIC: &'static [(char, char)] = &[ - ('\u{345}', '\u{345}'), - ('\u{363}', '\u{36f}'), - ('\u{5b0}', '\u{5bd}'), - ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), - ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), - ('\u{610}', '\u{61a}'), - ('\u{64b}', '\u{657}'), - ('\u{659}', '\u{65f}'), - ('\u{670}', '\u{670}'), - ('\u{6d6}', '\u{6dc}'), - ('\u{6e1}', '\u{6e4}'), - ('\u{6e7}', '\u{6e8}'), - ('\u{6ed}', '\u{6ed}'), - ('\u{711}', '\u{711}'), - ('\u{730}', '\u{73f}'), - ('\u{7a6}', '\u{7b0}'), - ('\u{816}', '\u{817}'), - ('\u{81b}', '\u{823}'), - ('\u{825}', '\u{827}'), - ('\u{829}', '\u{82c}'), - ('\u{897}', '\u{897}'), - ('\u{8d4}', '\u{8df}'), - ('\u{8e3}', '\u{8e9}'), - ('\u{8f0}', 'ः'), - ('\u{93a}', 'ऻ'), - ('ा', 'ौ'), - ('ॎ', 'ॏ'), - ('\u{955}', '\u{957}'), - ('\u{962}', '\u{963}'), - ('\u{981}', 'ঃ'), - ('\u{9be}', '\u{9c4}'), - ('ে', 'ৈ'), - ('ো', 'ৌ'), - ('\u{9d7}', '\u{9d7}'), - ('\u{9e2}', '\u{9e3}'), - ('\u{a01}', 'ਃ'), - ('ਾ', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4c}'), - ('\u{a51}', '\u{a51}'), - ('\u{a70}', '\u{a71}'), - ('\u{a75}', '\u{a75}'), - ('\u{a81}', 'ઃ'), - ('ા', '\u{ac5}'), - ('\u{ac7}', 'ૉ'), - ('ો', 'ૌ'), - ('\u{ae2}', '\u{ae3}'), - ('\u{afa}', '\u{afc}'), - ('\u{b01}', 'ଃ'), - ('\u{b3e}', '\u{b44}'), - ('େ', 'ୈ'), - ('ୋ', 'ୌ'), - ('\u{b56}', '\u{b57}'), - ('\u{b62}', '\u{b63}'), - ('\u{b82}', '\u{b82}'), - ('\u{bbe}', 'ூ'), - ('ெ', 'ை'), - ('ொ', 'ௌ'), - ('\u{bd7}', '\u{bd7}'), - ('\u{c00}', '\u{c04}'), - ('\u{c3e}', 'ౄ'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4c}'), - ('\u{c55}', '\u{c56}'), - ('\u{c62}', '\u{c63}'), - ('\u{c81}', 'ಃ'), - ('ಾ', 'ೄ'), - ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccc}'), - ('\u{cd5}', '\u{cd6}'), - ('\u{ce2}', '\u{ce3}'), - ('ೳ', 'ೳ'), - ('\u{d00}', 'ഃ'), - ('\u{d3e}', '\u{d44}'), - ('െ', 'ൈ'), - ('ൊ', 'ൌ'), - ('\u{d57}', '\u{d57}'), - ('\u{d62}', '\u{d63}'), - ('\u{d81}', 'ඃ'), - ('\u{dcf}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('ෘ', '\u{ddf}'), - ('ෲ', 'ෳ'), - ('\u{e31}', '\u{e31}'), - ('\u{e34}', '\u{e3a}'), - ('\u{e4d}', '\u{e4d}'), - ('\u{eb1}', '\u{eb1}'), - ('\u{eb4}', '\u{eb9}'), - ('\u{ebb}', '\u{ebc}'), - ('\u{ecd}', '\u{ecd}'), - ('\u{f71}', '\u{f83}'), - ('\u{f8d}', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('ါ', '\u{1036}'), - ('း', 'း'), - ('ျ', '\u{103e}'), - ('ၖ', '\u{1059}'), - ('\u{105e}', '\u{1060}'), - ('ၢ', 'ၤ'), - ('ၧ', 'ၭ'), - ('\u{1071}', '\u{1074}'), - ('\u{1082}', '\u{108d}'), - ('ႏ', 'ႏ'), - ('ႚ', '\u{109d}'), - ('\u{1712}', '\u{1713}'), - ('\u{1732}', '\u{1733}'), - ('\u{1752}', '\u{1753}'), - ('\u{1772}', '\u{1773}'), - ('ា', 'ៈ'), - ('\u{1885}', '\u{1886}'), - ('\u{18a9}', '\u{18a9}'), - ('\u{1920}', 'ᤫ'), - ('ᤰ', 'ᤸ'), - ('\u{1a17}', '\u{1a1b}'), - ('ᩕ', '\u{1a5e}'), - ('ᩡ', '\u{1a74}'), - ('\u{1abf}', '\u{1ac0}'), - ('\u{1acc}', '\u{1ace}'), - ('\u{1b00}', 'ᬄ'), - ('\u{1b35}', '\u{1b43}'), - ('\u{1b80}', 'ᮂ'), - ('ᮡ', '\u{1ba9}'), - ('\u{1bac}', '\u{1bad}'), - ('ᯧ', '\u{1bf1}'), - ('ᰤ', '\u{1c36}'), - ('\u{1dd3}', '\u{1df4}'), - ('Ⓐ', 'ⓩ'), - ('\u{2de0}', '\u{2dff}'), - ('\u{a674}', '\u{a67b}'), - ('\u{a69e}', '\u{a69f}'), - ('\u{a802}', '\u{a802}'), - ('\u{a80b}', '\u{a80b}'), - ('ꠣ', 'ꠧ'), - ('ꢀ', 'ꢁ'), - ('ꢴ', 'ꣃ'), - ('\u{a8c5}', '\u{a8c5}'), - ('\u{a8ff}', '\u{a8ff}'), - ('\u{a926}', '\u{a92a}'), - ('\u{a947}', 'ꥒ'), - ('\u{a980}', 'ꦃ'), - ('ꦴ', 'ꦿ'), - ('\u{a9e5}', '\u{a9e5}'), - ('\u{aa29}', '\u{aa36}'), - ('\u{aa43}', '\u{aa43}'), - ('\u{aa4c}', 'ꩍ'), - ('ꩻ', 'ꩽ'), - ('\u{aab0}', '\u{aab0}'), - ('\u{aab2}', '\u{aab4}'), - ('\u{aab7}', '\u{aab8}'), - ('\u{aabe}', '\u{aabe}'), - ('ꫫ', 'ꫯ'), - ('ꫵ', 'ꫵ'), - ('ꯣ', 'ꯪ'), - ('\u{fb1e}', '\u{fb1e}'), - ('\u{10376}', '\u{1037a}'), - ('\u{10a01}', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '\u{10a0f}'), - ('\u{10d24}', '\u{10d27}'), - ('\u{10d69}', '\u{10d69}'), - ('\u{10eab}', '\u{10eac}'), - ('\u{10efc}', '\u{10efc}'), - ('𑀀', '𑀂'), - ('\u{11038}', '\u{11045}'), - ('\u{11073}', '\u{11074}'), - ('\u{11080}', '𑂂'), - ('𑂰', '𑂸'), - ('\u{110c2}', '\u{110c2}'), - ('\u{11100}', '\u{11102}'), - ('\u{11127}', '\u{11132}'), - ('𑅅', '𑅆'), - ('\u{11180}', '𑆂'), - ('𑆳', '𑆿'), - ('𑇎', '\u{111cf}'), - ('𑈬', '\u{11234}'), - ('\u{11237}', '\u{11237}'), - ('\u{1123e}', '\u{1123e}'), - ('\u{11241}', '\u{11241}'), - ('\u{112df}', '\u{112e8}'), - ('\u{11300}', '𑌃'), - ('\u{1133e}', '𑍄'), - ('𑍇', '𑍈'), - ('𑍋', '𑍌'), - ('\u{11357}', '\u{11357}'), - ('𑍢', '𑍣'), - ('\u{113b8}', '\u{113c0}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '𑏊'), - ('𑏌', '𑏍'), - ('𑐵', '𑑁'), - ('\u{11443}', '𑑅'), - ('\u{114b0}', '𑓁'), - ('\u{115af}', '\u{115b5}'), - ('𑖸', '𑖾'), - ('\u{115dc}', '\u{115dd}'), - ('𑘰', '𑘾'), - ('\u{11640}', '\u{11640}'), - ('\u{116ab}', '\u{116b5}'), - ('\u{1171d}', '\u{1172a}'), - ('𑠬', '𑠸'), - ('\u{11930}', '𑤵'), - ('𑤷', '𑤸'), - ('\u{1193b}', '\u{1193c}'), - ('𑥀', '𑥀'), - ('𑥂', '𑥂'), - ('𑧑', '\u{119d7}'), - ('\u{119da}', '𑧟'), - ('𑧤', '𑧤'), - ('\u{11a01}', '\u{11a0a}'), - ('\u{11a35}', '𑨹'), - ('\u{11a3b}', '\u{11a3e}'), - ('\u{11a51}', '\u{11a5b}'), - ('\u{11a8a}', '𑪗'), - ('𑰯', '\u{11c36}'), - ('\u{11c38}', '𑰾'), - ('\u{11c92}', '\u{11ca7}'), - ('𑲩', '\u{11cb6}'), - ('\u{11d31}', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d41}'), - ('\u{11d43}', '\u{11d43}'), - ('\u{11d47}', '\u{11d47}'), - ('𑶊', '𑶎'), - ('\u{11d90}', '\u{11d91}'), - ('𑶓', '𑶖'), - ('\u{11ef3}', '𑻶'), - ('\u{11f00}', '\u{11f01}'), - ('𑼃', '𑼃'), - ('𑼴', '\u{11f3a}'), - ('𑼾', '\u{11f40}'), - ('\u{1611e}', '\u{1612e}'), - ('\u{16f4f}', '\u{16f4f}'), - ('𖽑', '𖾇'), - ('\u{16f8f}', '\u{16f92}'), - ('\u{16ff0}', '\u{16ff1}'), - ('\u{1bc9e}', '\u{1bc9e}'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), - ('\u{1e08f}', '\u{1e08f}'), - ('\u{1e947}', '\u{1e947}'), - ('🄰', '🅉'), - ('🅐', '🅩'), - ('🅰', '🆉'), -]; - -pub const OTHER_DEFAULT_IGNORABLE_CODE_POINT: &'static [(char, char)] = &[ - ('\u{34f}', '\u{34f}'), - ('ᅟ', 'ᅠ'), - ('\u{17b4}', '\u{17b5}'), - ('\u{2065}', '\u{2065}'), - ('ㅤ', 'ㅤ'), - ('ᅠ', 'ᅠ'), - ('\u{fff0}', '\u{fff8}'), - ('\u{e0000}', '\u{e0000}'), - ('\u{e0002}', '\u{e001f}'), - ('\u{e0080}', '\u{e00ff}'), - ('\u{e01f0}', '\u{e0fff}'), -]; - -pub const OTHER_GRAPHEME_EXTEND: &'static [(char, char)] = &[ - ('\u{9be}', '\u{9be}'), - ('\u{9d7}', '\u{9d7}'), - ('\u{b3e}', '\u{b3e}'), - ('\u{b57}', '\u{b57}'), - ('\u{bbe}', '\u{bbe}'), - ('\u{bd7}', '\u{bd7}'), - ('\u{cc0}', '\u{cc0}'), - ('\u{cc2}', '\u{cc2}'), - ('\u{cc7}', '\u{cc8}'), - ('\u{cca}', '\u{ccb}'), - ('\u{cd5}', '\u{cd6}'), - ('\u{d3e}', '\u{d3e}'), - ('\u{d57}', '\u{d57}'), - ('\u{dcf}', '\u{dcf}'), - ('\u{ddf}', '\u{ddf}'), - ('\u{1715}', '\u{1715}'), - ('\u{1734}', '\u{1734}'), - ('\u{1b35}', '\u{1b35}'), - ('\u{1b3b}', '\u{1b3b}'), - ('\u{1b3d}', '\u{1b3d}'), - ('\u{1b43}', '\u{1b44}'), - ('\u{1baa}', '\u{1baa}'), - ('\u{1bf2}', '\u{1bf3}'), - ('\u{200c}', '\u{200c}'), - ('\u{302e}', '\u{302f}'), - ('\u{a953}', '\u{a953}'), - ('\u{a9c0}', '\u{a9c0}'), - ('\u{ff9e}', '\u{ff9f}'), - ('\u{111c0}', '\u{111c0}'), - ('\u{11235}', '\u{11235}'), - ('\u{1133e}', '\u{1133e}'), - ('\u{1134d}', '\u{1134d}'), - ('\u{11357}', '\u{11357}'), - ('\u{113b8}', '\u{113b8}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '\u{113c9}'), - ('\u{113cf}', '\u{113cf}'), - ('\u{114b0}', '\u{114b0}'), - ('\u{114bd}', '\u{114bd}'), - ('\u{115af}', '\u{115af}'), - ('\u{116b6}', '\u{116b6}'), - ('\u{11930}', '\u{11930}'), - ('\u{1193d}', '\u{1193d}'), - ('\u{11f41}', '\u{11f41}'), - ('\u{16ff0}', '\u{16ff1}'), - ('\u{1d165}', '\u{1d166}'), - ('\u{1d16d}', '\u{1d172}'), - ('\u{e0020}', '\u{e007f}'), -]; - -pub const OTHER_ID_CONTINUE: &'static [(char, char)] = &[ - ('·', '·'), - ('·', '·'), - ('፩', '፱'), - ('᧚', '᧚'), - ('\u{200c}', '\u{200d}'), - ('・', '・'), - ('・', '・'), -]; - -pub const OTHER_ID_START: &'static [(char, char)] = - &[('\u{1885}', '\u{1886}'), ('℘', '℘'), ('℮', '℮'), ('゛', '゜')]; - -pub const OTHER_LOWERCASE: &'static [(char, char)] = &[ - ('ª', 'ª'), - ('º', 'º'), - ('ʰ', 'ʸ'), - ('ˀ', 'ˁ'), - ('ˠ', 'ˤ'), - ('\u{345}', '\u{345}'), - ('ͺ', 'ͺ'), - ('ჼ', 'ჼ'), - ('ᴬ', 'ᵪ'), - ('ᵸ', 'ᵸ'), - ('ᶛ', 'ᶿ'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('ⅰ', 'ⅿ'), - ('ⓐ', 'ⓩ'), - ('ⱼ', 'ⱽ'), - ('ꚜ', 'ꚝ'), - ('ꝰ', 'ꝰ'), - ('ꟲ', 'ꟴ'), - ('ꟸ', 'ꟹ'), - ('ꭜ', 'ꭟ'), - ('ꭩ', 'ꭩ'), - ('𐞀', '𐞀'), - ('𐞃', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𞀰', '𞁭'), -]; - -pub const OTHER_MATH: &'static [(char, char)] = &[ - ('^', '^'), - ('ϐ', 'ϒ'), - ('ϕ', 'ϕ'), - ('ϰ', 'ϱ'), - ('ϴ', 'ϵ'), - ('‖', '‖'), - ('′', '‴'), - ('⁀', '⁀'), - ('\u{2061}', '\u{2064}'), - ('⁽', '⁾'), - ('₍', '₎'), - ('\u{20d0}', '\u{20dc}'), - ('\u{20e1}', '\u{20e1}'), - ('\u{20e5}', '\u{20e6}'), - ('\u{20eb}', '\u{20ef}'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℊ', 'ℓ'), - ('ℕ', 'ℕ'), - ('ℙ', 'ℝ'), - ('ℤ', 'ℤ'), - ('ℨ', '℩'), - ('ℬ', 'ℭ'), - ('ℯ', 'ℱ'), - ('ℳ', 'ℸ'), - ('ℼ', 'ℿ'), - ('ⅅ', 'ⅉ'), - ('↕', '↙'), - ('↜', '↟'), - ('↡', '↢'), - ('↤', '↥'), - ('↧', '↧'), - ('↩', '↭'), - ('↰', '↱'), - ('↶', '↷'), - ('↼', '⇍'), - ('⇐', '⇑'), - ('⇓', '⇓'), - ('⇕', '⇛'), - ('⇝', '⇝'), - ('⇤', '⇥'), - ('⌈', '⌋'), - ('⎴', '⎵'), - ('⎷', '⎷'), - ('⏐', '⏐'), - ('⏢', '⏢'), - ('■', '□'), - ('▮', '▶'), - ('▼', '◀'), - ('◆', '◇'), - ('◊', '○'), - ('●', '◓'), - ('◢', '◢'), - ('◤', '◤'), - ('◧', '◬'), - ('★', '☆'), - ('♀', '♀'), - ('♂', '♂'), - ('♠', '♣'), - ('♭', '♮'), - ('⟅', '⟆'), - ('⟦', '⟯'), - ('⦃', '⦘'), - ('⧘', '⧛'), - ('⧼', '⧽'), - ('﹡', '﹡'), - ('﹣', '﹣'), - ('﹨', '﹨'), - ('\', '\'), - ('^', '^'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝛀'), - ('𝛂', '𝛚'), - ('𝛜', '𝛺'), - ('𝛼', '𝜔'), - ('𝜖', '𝜴'), - ('𝜶', '𝝎'), - ('𝝐', '𝝮'), - ('𝝰', '𝞈'), - ('𝞊', '𝞨'), - ('𝞪', '𝟂'), - ('𝟄', '𝟋'), - ('𝟎', '𝟿'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), -]; - -pub const OTHER_UPPERCASE: &'static [(char, char)] = - &[('Ⅰ', 'Ⅿ'), ('Ⓐ', 'Ⓩ'), ('🄰', '🅉'), ('🅐', '🅩'), ('🅰', '🆉')]; - -pub const PATTERN_SYNTAX: &'static [(char, char)] = &[ - ('!', '/'), - (':', '@'), - ('[', '^'), - ('`', '`'), - ('{', '~'), - ('¡', '§'), - ('©', '©'), - ('«', '¬'), - ('®', '®'), - ('°', '±'), - ('¶', '¶'), - ('»', '»'), - ('¿', '¿'), - ('×', '×'), - ('÷', '÷'), - ('‐', '‧'), - ('‰', '‾'), - ('⁁', '⁓'), - ('⁕', '⁞'), - ('←', '\u{245f}'), - ('─', '❵'), - ('➔', '⯿'), - ('⸀', '\u{2e7f}'), - ('、', '〃'), - ('〈', '〠'), - ('〰', '〰'), - ('﴾', '﴿'), - ('﹅', '﹆'), -]; - -pub const PATTERN_WHITE_SPACE: &'static [(char, char)] = &[ - ('\t', '\r'), - (' ', ' '), - ('\u{85}', '\u{85}'), - ('\u{200e}', '\u{200f}'), - ('\u{2028}', '\u{2029}'), -]; - -pub const PREPENDED_CONCATENATION_MARK: &'static [(char, char)] = &[ - ('\u{600}', '\u{605}'), - ('\u{6dd}', '\u{6dd}'), - ('\u{70f}', '\u{70f}'), - ('\u{890}', '\u{891}'), - ('\u{8e2}', '\u{8e2}'), - ('\u{110bd}', '\u{110bd}'), - ('\u{110cd}', '\u{110cd}'), -]; - -pub const QUOTATION_MARK: &'static [(char, char)] = &[ - ('"', '"'), - ('\'', '\''), - ('«', '«'), - ('»', '»'), - ('‘', '‟'), - ('‹', '›'), - ('⹂', '⹂'), - ('「', '』'), - ('〝', '〟'), - ('﹁', '﹄'), - ('"', '"'), - (''', '''), - ('「', '」'), -]; - -pub const RADICAL: &'static [(char, char)] = - &[('⺀', '⺙'), ('⺛', '⻳'), ('⼀', '⿕')]; - -pub const REGIONAL_INDICATOR: &'static [(char, char)] = &[('🇦', '🇿')]; - -pub const SENTENCE_TERMINAL: &'static [(char, char)] = &[ - ('!', '!'), - ('.', '.'), - ('?', '?'), - ('։', '։'), - ('؝', '؟'), - ('۔', '۔'), - ('܀', '܂'), - ('߹', '߹'), - ('࠷', '࠷'), - ('࠹', '࠹'), - ('࠽', '࠾'), - ('।', '॥'), - ('၊', '။'), - ('።', '።'), - ('፧', '፨'), - ('᙮', '᙮'), - ('᜵', '᜶'), - ('។', '៕'), - ('᠃', '᠃'), - ('᠉', '᠉'), - ('᥄', '᥅'), - ('᪨', '᪫'), - ('᭎', '᭏'), - ('᭚', '᭛'), - ('᭞', '᭟'), - ('᭽', '᭿'), - ('᰻', '᰼'), - ('᱾', '᱿'), - ('․', '․'), - ('‼', '‽'), - ('⁇', '⁉'), - ('⳹', '⳻'), - ('⸮', '⸮'), - ('⸼', '⸼'), - ('⹓', '⹔'), - ('。', '。'), - ('꓿', '꓿'), - ('꘎', '꘏'), - ('꛳', '꛳'), - ('꛷', '꛷'), - ('꡶', '꡷'), - ('꣎', '꣏'), - ('꤯', '꤯'), - ('꧈', '꧉'), - ('꩝', '꩟'), - ('꫰', '꫱'), - ('꯫', '꯫'), - ('︒', '︒'), - ('︕', '︖'), - ('﹒', '﹒'), - ('﹖', '﹗'), - ('!', '!'), - ('.', '.'), - ('?', '?'), - ('。', '。'), - ('𐩖', '𐩗'), - ('𐽕', '𐽙'), - ('𐾆', '𐾉'), - ('𑁇', '𑁈'), - ('𑂾', '𑃁'), - ('𑅁', '𑅃'), - ('𑇅', '𑇆'), - ('𑇍', '𑇍'), - ('𑇞', '𑇟'), - ('𑈸', '𑈹'), - ('𑈻', '𑈼'), - ('𑊩', '𑊩'), - ('𑏔', '𑏕'), - ('𑑋', '𑑌'), - ('𑗂', '𑗃'), - ('𑗉', '𑗗'), - ('𑙁', '𑙂'), - ('𑜼', '𑜾'), - ('𑥄', '𑥄'), - ('𑥆', '𑥆'), - ('𑩂', '𑩃'), - ('𑪛', '𑪜'), - ('𑱁', '𑱂'), - ('𑻷', '𑻸'), - ('𑽃', '𑽄'), - ('𖩮', '𖩯'), - ('𖫵', '𖫵'), - ('𖬷', '𖬸'), - ('𖭄', '𖭄'), - ('𖵮', '𖵯'), - ('𖺘', '𖺘'), - ('𛲟', '𛲟'), - ('𝪈', '𝪈'), -]; - -pub const SOFT_DOTTED: &'static [(char, char)] = &[ - ('i', 'j'), - ('į', 'į'), - ('ɉ', 'ɉ'), - ('ɨ', 'ɨ'), - ('ʝ', 'ʝ'), - ('ʲ', 'ʲ'), - ('ϳ', 'ϳ'), - ('і', 'і'), - ('ј', 'ј'), - ('ᵢ', 'ᵢ'), - ('ᶖ', 'ᶖ'), - ('ᶤ', 'ᶤ'), - ('ᶨ', 'ᶨ'), - ('ḭ', 'ḭ'), - ('ị', 'ị'), - ('ⁱ', 'ⁱ'), - ('ⅈ', 'ⅉ'), - ('ⱼ', 'ⱼ'), - ('𝐢', '𝐣'), - ('𝑖', '𝑗'), - ('𝒊', '𝒋'), - ('𝒾', '𝒿'), - ('𝓲', '𝓳'), - ('𝔦', '𝔧'), - ('𝕚', '𝕛'), - ('𝖎', '𝖏'), - ('𝗂', '𝗃'), - ('𝗶', '𝗷'), - ('𝘪', '𝘫'), - ('𝙞', '𝙟'), - ('𝚒', '𝚓'), - ('𝼚', '𝼚'), - ('𞁌', '𞁍'), - ('𞁨', '𞁨'), -]; - -pub const TERMINAL_PUNCTUATION: &'static [(char, char)] = &[ - ('!', '!'), - (',', ','), - ('.', '.'), - (':', ';'), - ('?', '?'), - (';', ';'), - ('·', '·'), - ('։', '։'), - ('׃', '׃'), - ('،', '،'), - ('؛', '؛'), - ('؝', '؟'), - ('۔', '۔'), - ('܀', '܊'), - ('܌', '܌'), - ('߸', '߹'), - ('࠰', '࠵'), - ('࠷', '࠾'), - ('࡞', '࡞'), - ('।', '॥'), - ('๚', '๛'), - ('༈', '༈'), - ('།', '༒'), - ('၊', '။'), - ('፡', '፨'), - ('᙮', '᙮'), - ('᛫', '᛭'), - ('᜵', '᜶'), - ('។', '៖'), - ('៚', '៚'), - ('᠂', '᠅'), - ('᠈', '᠉'), - ('᥄', '᥅'), - ('᪨', '᪫'), - ('᭎', '᭏'), - ('᭚', '᭛'), - ('᭝', '᭟'), - ('᭽', '᭿'), - ('᰻', '᰿'), - ('᱾', '᱿'), - ('․', '․'), - ('‼', '‽'), - ('⁇', '⁉'), - ('⳹', '⳻'), - ('⸮', '⸮'), - ('⸼', '⸼'), - ('⹁', '⹁'), - ('⹌', '⹌'), - ('⹎', '⹏'), - ('⹓', '⹔'), - ('、', '。'), - ('꓾', '꓿'), - ('꘍', '꘏'), - ('꛳', '꛷'), - ('꡶', '꡷'), - ('꣎', '꣏'), - ('꤯', '꤯'), - ('꧇', '꧉'), - ('꩝', '꩟'), - ('꫟', '꫟'), - ('꫰', '꫱'), - ('꯫', '꯫'), - ('︒', '︒'), - ('︕', '︖'), - ('﹐', '﹒'), - ('﹔', '﹗'), - ('!', '!'), - (',', ','), - ('.', '.'), - (':', ';'), - ('?', '?'), - ('。', '。'), - ('、', '、'), - ('𐎟', '𐎟'), - ('𐏐', '𐏐'), - ('𐡗', '𐡗'), - ('𐤟', '𐤟'), - ('𐩖', '𐩗'), - ('𐫰', '𐫵'), - ('𐬺', '𐬿'), - ('𐮙', '𐮜'), - ('𐽕', '𐽙'), - ('𐾆', '𐾉'), - ('𑁇', '𑁍'), - ('𑂾', '𑃁'), - ('𑅁', '𑅃'), - ('𑇅', '𑇆'), - ('𑇍', '𑇍'), - ('𑇞', '𑇟'), - ('𑈸', '𑈼'), - ('𑊩', '𑊩'), - ('𑏔', '𑏕'), - ('𑑋', '𑑍'), - ('𑑚', '𑑛'), - ('𑗂', '𑗅'), - ('𑗉', '𑗗'), - ('𑙁', '𑙂'), - ('𑜼', '𑜾'), - ('𑥄', '𑥄'), - ('𑥆', '𑥆'), - ('𑩂', '𑩃'), - ('𑪛', '𑪜'), - ('𑪡', '𑪢'), - ('𑱁', '𑱃'), - ('𑱱', '𑱱'), - ('𑻷', '𑻸'), - ('𑽃', '𑽄'), - ('𒑰', '𒑴'), - ('𖩮', '𖩯'), - ('𖫵', '𖫵'), - ('𖬷', '𖬹'), - ('𖭄', '𖭄'), - ('𖵮', '𖵯'), - ('𖺗', '𖺘'), - ('𛲟', '𛲟'), - ('𝪇', '𝪊'), -]; - -pub const UNIFIED_IDEOGRAPH: &'static [(char, char)] = &[ - ('㐀', '䶿'), - ('一', '鿿'), - ('﨎', '﨏'), - ('﨑', '﨑'), - ('﨓', '﨔'), - ('﨟', '﨟'), - ('﨡', '﨡'), - ('﨣', '﨤'), - ('﨧', '﨩'), - ('𠀀', '𪛟'), - ('𪜀', '𫜹'), - ('𫝀', '𫠝'), - ('𫠠', '𬺡'), - ('𬺰', '𮯠'), - ('𮯰', '𮹝'), - ('𰀀', '𱍊'), - ('𱍐', '𲎯'), -]; - -pub const UPPERCASE: &'static [(char, char)] = &[ - ('A', 'Z'), - ('À', 'Ö'), - ('Ø', 'Þ'), - ('Ā', 'Ā'), - ('Ă', 'Ă'), - ('Ą', 'Ą'), - ('Ć', 'Ć'), - ('Ĉ', 'Ĉ'), - ('Ċ', 'Ċ'), - ('Č', 'Č'), - ('Ď', 'Ď'), - ('Đ', 'Đ'), - ('Ē', 'Ē'), - ('Ĕ', 'Ĕ'), - ('Ė', 'Ė'), - ('Ę', 'Ę'), - ('Ě', 'Ě'), - ('Ĝ', 'Ĝ'), - ('Ğ', 'Ğ'), - ('Ġ', 'Ġ'), - ('Ģ', 'Ģ'), - ('Ĥ', 'Ĥ'), - ('Ħ', 'Ħ'), - ('Ĩ', 'Ĩ'), - ('Ī', 'Ī'), - ('Ĭ', 'Ĭ'), - ('Į', 'Į'), - ('İ', 'İ'), - ('IJ', 'IJ'), - ('Ĵ', 'Ĵ'), - ('Ķ', 'Ķ'), - ('Ĺ', 'Ĺ'), - ('Ļ', 'Ļ'), - ('Ľ', 'Ľ'), - ('Ŀ', 'Ŀ'), - ('Ł', 'Ł'), - ('Ń', 'Ń'), - ('Ņ', 'Ņ'), - ('Ň', 'Ň'), - ('Ŋ', 'Ŋ'), - ('Ō', 'Ō'), - ('Ŏ', 'Ŏ'), - ('Ő', 'Ő'), - ('Œ', 'Œ'), - ('Ŕ', 'Ŕ'), - ('Ŗ', 'Ŗ'), - ('Ř', 'Ř'), - ('Ś', 'Ś'), - ('Ŝ', 'Ŝ'), - ('Ş', 'Ş'), - ('Š', 'Š'), - ('Ţ', 'Ţ'), - ('Ť', 'Ť'), - ('Ŧ', 'Ŧ'), - ('Ũ', 'Ũ'), - ('Ū', 'Ū'), - ('Ŭ', 'Ŭ'), - ('Ů', 'Ů'), - ('Ű', 'Ű'), - ('Ų', 'Ų'), - ('Ŵ', 'Ŵ'), - ('Ŷ', 'Ŷ'), - ('Ÿ', 'Ź'), - ('Ż', 'Ż'), - ('Ž', 'Ž'), - ('Ɓ', 'Ƃ'), - ('Ƅ', 'Ƅ'), - ('Ɔ', 'Ƈ'), - ('Ɖ', 'Ƌ'), - ('Ǝ', 'Ƒ'), - ('Ɠ', 'Ɣ'), - ('Ɩ', 'Ƙ'), - ('Ɯ', 'Ɲ'), - ('Ɵ', 'Ơ'), - ('Ƣ', 'Ƣ'), - ('Ƥ', 'Ƥ'), - ('Ʀ', 'Ƨ'), - ('Ʃ', 'Ʃ'), - ('Ƭ', 'Ƭ'), - ('Ʈ', 'Ư'), - ('Ʊ', 'Ƴ'), - ('Ƶ', 'Ƶ'), - ('Ʒ', 'Ƹ'), - ('Ƽ', 'Ƽ'), - ('DŽ', 'DŽ'), - ('LJ', 'LJ'), - ('NJ', 'NJ'), - ('Ǎ', 'Ǎ'), - ('Ǐ', 'Ǐ'), - ('Ǒ', 'Ǒ'), - ('Ǔ', 'Ǔ'), - ('Ǖ', 'Ǖ'), - ('Ǘ', 'Ǘ'), - ('Ǚ', 'Ǚ'), - ('Ǜ', 'Ǜ'), - ('Ǟ', 'Ǟ'), - ('Ǡ', 'Ǡ'), - ('Ǣ', 'Ǣ'), - ('Ǥ', 'Ǥ'), - ('Ǧ', 'Ǧ'), - ('Ǩ', 'Ǩ'), - ('Ǫ', 'Ǫ'), - ('Ǭ', 'Ǭ'), - ('Ǯ', 'Ǯ'), - ('DZ', 'DZ'), - ('Ǵ', 'Ǵ'), - ('Ƕ', 'Ǹ'), - ('Ǻ', 'Ǻ'), - ('Ǽ', 'Ǽ'), - ('Ǿ', 'Ǿ'), - ('Ȁ', 'Ȁ'), - ('Ȃ', 'Ȃ'), - ('Ȅ', 'Ȅ'), - ('Ȇ', 'Ȇ'), - ('Ȉ', 'Ȉ'), - ('Ȋ', 'Ȋ'), - ('Ȍ', 'Ȍ'), - ('Ȏ', 'Ȏ'), - ('Ȑ', 'Ȑ'), - ('Ȓ', 'Ȓ'), - ('Ȕ', 'Ȕ'), - ('Ȗ', 'Ȗ'), - ('Ș', 'Ș'), - ('Ț', 'Ț'), - ('Ȝ', 'Ȝ'), - ('Ȟ', 'Ȟ'), - ('Ƞ', 'Ƞ'), - ('Ȣ', 'Ȣ'), - ('Ȥ', 'Ȥ'), - ('Ȧ', 'Ȧ'), - ('Ȩ', 'Ȩ'), - ('Ȫ', 'Ȫ'), - ('Ȭ', 'Ȭ'), - ('Ȯ', 'Ȯ'), - ('Ȱ', 'Ȱ'), - ('Ȳ', 'Ȳ'), - ('Ⱥ', 'Ȼ'), - ('Ƚ', 'Ⱦ'), - ('Ɂ', 'Ɂ'), - ('Ƀ', 'Ɇ'), - ('Ɉ', 'Ɉ'), - ('Ɋ', 'Ɋ'), - ('Ɍ', 'Ɍ'), - ('Ɏ', 'Ɏ'), - ('Ͱ', 'Ͱ'), - ('Ͳ', 'Ͳ'), - ('Ͷ', 'Ͷ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ώ'), - ('Α', 'Ρ'), - ('Σ', 'Ϋ'), - ('Ϗ', 'Ϗ'), - ('ϒ', 'ϔ'), - ('Ϙ', 'Ϙ'), - ('Ϛ', 'Ϛ'), - ('Ϝ', 'Ϝ'), - ('Ϟ', 'Ϟ'), - ('Ϡ', 'Ϡ'), - ('Ϣ', 'Ϣ'), - ('Ϥ', 'Ϥ'), - ('Ϧ', 'Ϧ'), - ('Ϩ', 'Ϩ'), - ('Ϫ', 'Ϫ'), - ('Ϭ', 'Ϭ'), - ('Ϯ', 'Ϯ'), - ('ϴ', 'ϴ'), - ('Ϸ', 'Ϸ'), - ('Ϲ', 'Ϻ'), - ('Ͻ', 'Я'), - ('Ѡ', 'Ѡ'), - ('Ѣ', 'Ѣ'), - ('Ѥ', 'Ѥ'), - ('Ѧ', 'Ѧ'), - ('Ѩ', 'Ѩ'), - ('Ѫ', 'Ѫ'), - ('Ѭ', 'Ѭ'), - ('Ѯ', 'Ѯ'), - ('Ѱ', 'Ѱ'), - ('Ѳ', 'Ѳ'), - ('Ѵ', 'Ѵ'), - ('Ѷ', 'Ѷ'), - ('Ѹ', 'Ѹ'), - ('Ѻ', 'Ѻ'), - ('Ѽ', 'Ѽ'), - ('Ѿ', 'Ѿ'), - ('Ҁ', 'Ҁ'), - ('Ҋ', 'Ҋ'), - ('Ҍ', 'Ҍ'), - ('Ҏ', 'Ҏ'), - ('Ґ', 'Ґ'), - ('Ғ', 'Ғ'), - ('Ҕ', 'Ҕ'), - ('Җ', 'Җ'), - ('Ҙ', 'Ҙ'), - ('Қ', 'Қ'), - ('Ҝ', 'Ҝ'), - ('Ҟ', 'Ҟ'), - ('Ҡ', 'Ҡ'), - ('Ң', 'Ң'), - ('Ҥ', 'Ҥ'), - ('Ҧ', 'Ҧ'), - ('Ҩ', 'Ҩ'), - ('Ҫ', 'Ҫ'), - ('Ҭ', 'Ҭ'), - ('Ү', 'Ү'), - ('Ұ', 'Ұ'), - ('Ҳ', 'Ҳ'), - ('Ҵ', 'Ҵ'), - ('Ҷ', 'Ҷ'), - ('Ҹ', 'Ҹ'), - ('Һ', 'Һ'), - ('Ҽ', 'Ҽ'), - ('Ҿ', 'Ҿ'), - ('Ӏ', 'Ӂ'), - ('Ӄ', 'Ӄ'), - ('Ӆ', 'Ӆ'), - ('Ӈ', 'Ӈ'), - ('Ӊ', 'Ӊ'), - ('Ӌ', 'Ӌ'), - ('Ӎ', 'Ӎ'), - ('Ӑ', 'Ӑ'), - ('Ӓ', 'Ӓ'), - ('Ӕ', 'Ӕ'), - ('Ӗ', 'Ӗ'), - ('Ә', 'Ә'), - ('Ӛ', 'Ӛ'), - ('Ӝ', 'Ӝ'), - ('Ӟ', 'Ӟ'), - ('Ӡ', 'Ӡ'), - ('Ӣ', 'Ӣ'), - ('Ӥ', 'Ӥ'), - ('Ӧ', 'Ӧ'), - ('Ө', 'Ө'), - ('Ӫ', 'Ӫ'), - ('Ӭ', 'Ӭ'), - ('Ӯ', 'Ӯ'), - ('Ӱ', 'Ӱ'), - ('Ӳ', 'Ӳ'), - ('Ӵ', 'Ӵ'), - ('Ӷ', 'Ӷ'), - ('Ӹ', 'Ӹ'), - ('Ӻ', 'Ӻ'), - ('Ӽ', 'Ӽ'), - ('Ӿ', 'Ӿ'), - ('Ԁ', 'Ԁ'), - ('Ԃ', 'Ԃ'), - ('Ԅ', 'Ԅ'), - ('Ԇ', 'Ԇ'), - ('Ԉ', 'Ԉ'), - ('Ԋ', 'Ԋ'), - ('Ԍ', 'Ԍ'), - ('Ԏ', 'Ԏ'), - ('Ԑ', 'Ԑ'), - ('Ԓ', 'Ԓ'), - ('Ԕ', 'Ԕ'), - ('Ԗ', 'Ԗ'), - ('Ԙ', 'Ԙ'), - ('Ԛ', 'Ԛ'), - ('Ԝ', 'Ԝ'), - ('Ԟ', 'Ԟ'), - ('Ԡ', 'Ԡ'), - ('Ԣ', 'Ԣ'), - ('Ԥ', 'Ԥ'), - ('Ԧ', 'Ԧ'), - ('Ԩ', 'Ԩ'), - ('Ԫ', 'Ԫ'), - ('Ԭ', 'Ԭ'), - ('Ԯ', 'Ԯ'), - ('Ա', 'Ֆ'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('Ꭰ', 'Ᏽ'), - ('Ᲊ', 'Ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('Ḁ', 'Ḁ'), - ('Ḃ', 'Ḃ'), - ('Ḅ', 'Ḅ'), - ('Ḇ', 'Ḇ'), - ('Ḉ', 'Ḉ'), - ('Ḋ', 'Ḋ'), - ('Ḍ', 'Ḍ'), - ('Ḏ', 'Ḏ'), - ('Ḑ', 'Ḑ'), - ('Ḓ', 'Ḓ'), - ('Ḕ', 'Ḕ'), - ('Ḗ', 'Ḗ'), - ('Ḙ', 'Ḙ'), - ('Ḛ', 'Ḛ'), - ('Ḝ', 'Ḝ'), - ('Ḟ', 'Ḟ'), - ('Ḡ', 'Ḡ'), - ('Ḣ', 'Ḣ'), - ('Ḥ', 'Ḥ'), - ('Ḧ', 'Ḧ'), - ('Ḩ', 'Ḩ'), - ('Ḫ', 'Ḫ'), - ('Ḭ', 'Ḭ'), - ('Ḯ', 'Ḯ'), - ('Ḱ', 'Ḱ'), - ('Ḳ', 'Ḳ'), - ('Ḵ', 'Ḵ'), - ('Ḷ', 'Ḷ'), - ('Ḹ', 'Ḹ'), - ('Ḻ', 'Ḻ'), - ('Ḽ', 'Ḽ'), - ('Ḿ', 'Ḿ'), - ('Ṁ', 'Ṁ'), - ('Ṃ', 'Ṃ'), - ('Ṅ', 'Ṅ'), - ('Ṇ', 'Ṇ'), - ('Ṉ', 'Ṉ'), - ('Ṋ', 'Ṋ'), - ('Ṍ', 'Ṍ'), - ('Ṏ', 'Ṏ'), - ('Ṑ', 'Ṑ'), - ('Ṓ', 'Ṓ'), - ('Ṕ', 'Ṕ'), - ('Ṗ', 'Ṗ'), - ('Ṙ', 'Ṙ'), - ('Ṛ', 'Ṛ'), - ('Ṝ', 'Ṝ'), - ('Ṟ', 'Ṟ'), - ('Ṡ', 'Ṡ'), - ('Ṣ', 'Ṣ'), - ('Ṥ', 'Ṥ'), - ('Ṧ', 'Ṧ'), - ('Ṩ', 'Ṩ'), - ('Ṫ', 'Ṫ'), - ('Ṭ', 'Ṭ'), - ('Ṯ', 'Ṯ'), - ('Ṱ', 'Ṱ'), - ('Ṳ', 'Ṳ'), - ('Ṵ', 'Ṵ'), - ('Ṷ', 'Ṷ'), - ('Ṹ', 'Ṹ'), - ('Ṻ', 'Ṻ'), - ('Ṽ', 'Ṽ'), - ('Ṿ', 'Ṿ'), - ('Ẁ', 'Ẁ'), - ('Ẃ', 'Ẃ'), - ('Ẅ', 'Ẅ'), - ('Ẇ', 'Ẇ'), - ('Ẉ', 'Ẉ'), - ('Ẋ', 'Ẋ'), - ('Ẍ', 'Ẍ'), - ('Ẏ', 'Ẏ'), - ('Ẑ', 'Ẑ'), - ('Ẓ', 'Ẓ'), - ('Ẕ', 'Ẕ'), - ('ẞ', 'ẞ'), - ('Ạ', 'Ạ'), - ('Ả', 'Ả'), - ('Ấ', 'Ấ'), - ('Ầ', 'Ầ'), - ('Ẩ', 'Ẩ'), - ('Ẫ', 'Ẫ'), - ('Ậ', 'Ậ'), - ('Ắ', 'Ắ'), - ('Ằ', 'Ằ'), - ('Ẳ', 'Ẳ'), - ('Ẵ', 'Ẵ'), - ('Ặ', 'Ặ'), - ('Ẹ', 'Ẹ'), - ('Ẻ', 'Ẻ'), - ('Ẽ', 'Ẽ'), - ('Ế', 'Ế'), - ('Ề', 'Ề'), - ('Ể', 'Ể'), - ('Ễ', 'Ễ'), - ('Ệ', 'Ệ'), - ('Ỉ', 'Ỉ'), - ('Ị', 'Ị'), - ('Ọ', 'Ọ'), - ('Ỏ', 'Ỏ'), - ('Ố', 'Ố'), - ('Ồ', 'Ồ'), - ('Ổ', 'Ổ'), - ('Ỗ', 'Ỗ'), - ('Ộ', 'Ộ'), - ('Ớ', 'Ớ'), - ('Ờ', 'Ờ'), - ('Ở', 'Ở'), - ('Ỡ', 'Ỡ'), - ('Ợ', 'Ợ'), - ('Ụ', 'Ụ'), - ('Ủ', 'Ủ'), - ('Ứ', 'Ứ'), - ('Ừ', 'Ừ'), - ('Ử', 'Ử'), - ('Ữ', 'Ữ'), - ('Ự', 'Ự'), - ('Ỳ', 'Ỳ'), - ('Ỵ', 'Ỵ'), - ('Ỷ', 'Ỷ'), - ('Ỹ', 'Ỹ'), - ('Ỻ', 'Ỻ'), - ('Ỽ', 'Ỽ'), - ('Ỿ', 'Ỿ'), - ('Ἀ', 'Ἇ'), - ('Ἐ', 'Ἕ'), - ('Ἠ', 'Ἧ'), - ('Ἰ', 'Ἷ'), - ('Ὀ', 'Ὅ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'Ὗ'), - ('Ὠ', 'Ὧ'), - ('Ᾰ', 'Ά'), - ('Ὲ', 'Ή'), - ('Ῐ', 'Ί'), - ('Ῠ', 'Ῥ'), - ('Ὸ', 'Ώ'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℋ', 'ℍ'), - ('ℐ', 'ℒ'), - ('ℕ', 'ℕ'), - ('ℙ', 'ℝ'), - ('ℤ', 'ℤ'), - ('Ω', 'Ω'), - ('ℨ', 'ℨ'), - ('K', 'ℭ'), - ('ℰ', 'ℳ'), - ('ℾ', 'ℿ'), - ('ⅅ', 'ⅅ'), - ('Ⅰ', 'Ⅿ'), - ('Ↄ', 'Ↄ'), - ('Ⓐ', 'Ⓩ'), - ('Ⰰ', 'Ⱟ'), - ('Ⱡ', 'Ⱡ'), - ('Ɫ', 'Ɽ'), - ('Ⱨ', 'Ⱨ'), - ('Ⱪ', 'Ⱪ'), - ('Ⱬ', 'Ⱬ'), - ('Ɑ', 'Ɒ'), - ('Ⱳ', 'Ⱳ'), - ('Ⱶ', 'Ⱶ'), - ('Ȿ', 'Ⲁ'), - ('Ⲃ', 'Ⲃ'), - ('Ⲅ', 'Ⲅ'), - ('Ⲇ', 'Ⲇ'), - ('Ⲉ', 'Ⲉ'), - ('Ⲋ', 'Ⲋ'), - ('Ⲍ', 'Ⲍ'), - ('Ⲏ', 'Ⲏ'), - ('Ⲑ', 'Ⲑ'), - ('Ⲓ', 'Ⲓ'), - ('Ⲕ', 'Ⲕ'), - ('Ⲗ', 'Ⲗ'), - ('Ⲙ', 'Ⲙ'), - ('Ⲛ', 'Ⲛ'), - ('Ⲝ', 'Ⲝ'), - ('Ⲟ', 'Ⲟ'), - ('Ⲡ', 'Ⲡ'), - ('Ⲣ', 'Ⲣ'), - ('Ⲥ', 'Ⲥ'), - ('Ⲧ', 'Ⲧ'), - ('Ⲩ', 'Ⲩ'), - ('Ⲫ', 'Ⲫ'), - ('Ⲭ', 'Ⲭ'), - ('Ⲯ', 'Ⲯ'), - ('Ⲱ', 'Ⲱ'), - ('Ⲳ', 'Ⲳ'), - ('Ⲵ', 'Ⲵ'), - ('Ⲷ', 'Ⲷ'), - ('Ⲹ', 'Ⲹ'), - ('Ⲻ', 'Ⲻ'), - ('Ⲽ', 'Ⲽ'), - ('Ⲿ', 'Ⲿ'), - ('Ⳁ', 'Ⳁ'), - ('Ⳃ', 'Ⳃ'), - ('Ⳅ', 'Ⳅ'), - ('Ⳇ', 'Ⳇ'), - ('Ⳉ', 'Ⳉ'), - ('Ⳋ', 'Ⳋ'), - ('Ⳍ', 'Ⳍ'), - ('Ⳏ', 'Ⳏ'), - ('Ⳑ', 'Ⳑ'), - ('Ⳓ', 'Ⳓ'), - ('Ⳕ', 'Ⳕ'), - ('Ⳗ', 'Ⳗ'), - ('Ⳙ', 'Ⳙ'), - ('Ⳛ', 'Ⳛ'), - ('Ⳝ', 'Ⳝ'), - ('Ⳟ', 'Ⳟ'), - ('Ⳡ', 'Ⳡ'), - ('Ⳣ', 'Ⳣ'), - ('Ⳬ', 'Ⳬ'), - ('Ⳮ', 'Ⳮ'), - ('Ⳳ', 'Ⳳ'), - ('Ꙁ', 'Ꙁ'), - ('Ꙃ', 'Ꙃ'), - ('Ꙅ', 'Ꙅ'), - ('Ꙇ', 'Ꙇ'), - ('Ꙉ', 'Ꙉ'), - ('Ꙋ', 'Ꙋ'), - ('Ꙍ', 'Ꙍ'), - ('Ꙏ', 'Ꙏ'), - ('Ꙑ', 'Ꙑ'), - ('Ꙓ', 'Ꙓ'), - ('Ꙕ', 'Ꙕ'), - ('Ꙗ', 'Ꙗ'), - ('Ꙙ', 'Ꙙ'), - ('Ꙛ', 'Ꙛ'), - ('Ꙝ', 'Ꙝ'), - ('Ꙟ', 'Ꙟ'), - ('Ꙡ', 'Ꙡ'), - ('Ꙣ', 'Ꙣ'), - ('Ꙥ', 'Ꙥ'), - ('Ꙧ', 'Ꙧ'), - ('Ꙩ', 'Ꙩ'), - ('Ꙫ', 'Ꙫ'), - ('Ꙭ', 'Ꙭ'), - ('Ꚁ', 'Ꚁ'), - ('Ꚃ', 'Ꚃ'), - ('Ꚅ', 'Ꚅ'), - ('Ꚇ', 'Ꚇ'), - ('Ꚉ', 'Ꚉ'), - ('Ꚋ', 'Ꚋ'), - ('Ꚍ', 'Ꚍ'), - ('Ꚏ', 'Ꚏ'), - ('Ꚑ', 'Ꚑ'), - ('Ꚓ', 'Ꚓ'), - ('Ꚕ', 'Ꚕ'), - ('Ꚗ', 'Ꚗ'), - ('Ꚙ', 'Ꚙ'), - ('Ꚛ', 'Ꚛ'), - ('Ꜣ', 'Ꜣ'), - ('Ꜥ', 'Ꜥ'), - ('Ꜧ', 'Ꜧ'), - ('Ꜩ', 'Ꜩ'), - ('Ꜫ', 'Ꜫ'), - ('Ꜭ', 'Ꜭ'), - ('Ꜯ', 'Ꜯ'), - ('Ꜳ', 'Ꜳ'), - ('Ꜵ', 'Ꜵ'), - ('Ꜷ', 'Ꜷ'), - ('Ꜹ', 'Ꜹ'), - ('Ꜻ', 'Ꜻ'), - ('Ꜽ', 'Ꜽ'), - ('Ꜿ', 'Ꜿ'), - ('Ꝁ', 'Ꝁ'), - ('Ꝃ', 'Ꝃ'), - ('Ꝅ', 'Ꝅ'), - ('Ꝇ', 'Ꝇ'), - ('Ꝉ', 'Ꝉ'), - ('Ꝋ', 'Ꝋ'), - ('Ꝍ', 'Ꝍ'), - ('Ꝏ', 'Ꝏ'), - ('Ꝑ', 'Ꝑ'), - ('Ꝓ', 'Ꝓ'), - ('Ꝕ', 'Ꝕ'), - ('Ꝗ', 'Ꝗ'), - ('Ꝙ', 'Ꝙ'), - ('Ꝛ', 'Ꝛ'), - ('Ꝝ', 'Ꝝ'), - ('Ꝟ', 'Ꝟ'), - ('Ꝡ', 'Ꝡ'), - ('Ꝣ', 'Ꝣ'), - ('Ꝥ', 'Ꝥ'), - ('Ꝧ', 'Ꝧ'), - ('Ꝩ', 'Ꝩ'), - ('Ꝫ', 'Ꝫ'), - ('Ꝭ', 'Ꝭ'), - ('Ꝯ', 'Ꝯ'), - ('Ꝺ', 'Ꝺ'), - ('Ꝼ', 'Ꝼ'), - ('Ᵹ', 'Ꝿ'), - ('Ꞁ', 'Ꞁ'), - ('Ꞃ', 'Ꞃ'), - ('Ꞅ', 'Ꞅ'), - ('Ꞇ', 'Ꞇ'), - ('Ꞌ', 'Ꞌ'), - ('Ɥ', 'Ɥ'), - ('Ꞑ', 'Ꞑ'), - ('Ꞓ', 'Ꞓ'), - ('Ꞗ', 'Ꞗ'), - ('Ꞙ', 'Ꞙ'), - ('Ꞛ', 'Ꞛ'), - ('Ꞝ', 'Ꞝ'), - ('Ꞟ', 'Ꞟ'), - ('Ꞡ', 'Ꞡ'), - ('Ꞣ', 'Ꞣ'), - ('Ꞥ', 'Ꞥ'), - ('Ꞧ', 'Ꞧ'), - ('Ꞩ', 'Ꞩ'), - ('Ɦ', 'Ɪ'), - ('Ʞ', 'Ꞵ'), - ('Ꞷ', 'Ꞷ'), - ('Ꞹ', 'Ꞹ'), - ('Ꞻ', 'Ꞻ'), - ('Ꞽ', 'Ꞽ'), - ('Ꞿ', 'Ꞿ'), - ('Ꟁ', 'Ꟁ'), - ('Ꟃ', 'Ꟃ'), - ('Ꞔ', 'Ꟈ'), - ('Ꟊ', 'Ꟊ'), - ('Ɤ', 'Ꟍ'), - ('Ꟑ', 'Ꟑ'), - ('Ꟗ', 'Ꟗ'), - ('Ꟙ', 'Ꟙ'), - ('Ꟛ', 'Ꟛ'), - ('Ƛ', 'Ƛ'), - ('Ꟶ', 'Ꟶ'), - ('A', 'Z'), - ('𐐀', '𐐧'), - ('𐒰', '𐓓'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐲀', '𐲲'), - ('𐵐', '𐵥'), - ('𑢠', '𑢿'), - ('𖹀', '𖹟'), - ('𝐀', '𝐙'), - ('𝐴', '𝑍'), - ('𝑨', '𝒁'), - ('𝒜', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒵'), - ('𝓐', '𝓩'), - ('𝔄', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔸', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕬', '𝖅'), - ('𝖠', '𝖹'), - ('𝗔', '𝗭'), - ('𝘈', '𝘡'), - ('𝘼', '𝙕'), - ('𝙰', '𝚉'), - ('𝚨', '𝛀'), - ('𝛢', '𝛺'), - ('𝜜', '𝜴'), - ('𝝖', '𝝮'), - ('𝞐', '𝞨'), - ('𝟊', '𝟊'), - ('𞤀', '𞤡'), - ('🄰', '🅉'), - ('🅐', '🅩'), - ('🅰', '🆉'), -]; - -pub const VARIATION_SELECTOR: &'static [(char, char)] = &[ - ('\u{180b}', '\u{180d}'), - ('\u{180f}', '\u{180f}'), - ('\u{fe00}', '\u{fe0f}'), - ('\u{e0100}', '\u{e01ef}'), -]; - -pub const WHITE_SPACE: &'static [(char, char)] = &[ - ('\t', '\r'), - (' ', ' '), - ('\u{85}', '\u{85}'), - ('\u{a0}', '\u{a0}'), - ('\u{1680}', '\u{1680}'), - ('\u{2000}', '\u{200a}'), - ('\u{2028}', '\u{2029}'), - ('\u{202f}', '\u{202f}'), - ('\u{205f}', '\u{205f}'), - ('\u{3000}', '\u{3000}'), -]; - -pub const XID_CONTINUE: &'static [(char, char)] = &[ - ('0', '9'), - ('A', 'Z'), - ('_', '_'), - ('a', 'z'), - ('ª', 'ª'), - ('µ', 'µ'), - ('·', '·'), - ('º', 'º'), - ('À', 'Ö'), - ('Ø', 'ö'), - ('ø', 'ˁ'), - ('ˆ', 'ˑ'), - ('ˠ', 'ˤ'), - ('ˬ', 'ˬ'), - ('ˮ', 'ˮ'), - ('\u{300}', 'ʹ'), - ('Ͷ', 'ͷ'), - ('ͻ', 'ͽ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', 'ϵ'), - ('Ϸ', 'ҁ'), - ('\u{483}', '\u{487}'), - ('Ҋ', 'ԯ'), - ('Ա', 'Ֆ'), - ('ՙ', 'ՙ'), - ('ՠ', 'ֈ'), - ('\u{591}', '\u{5bd}'), - ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), - ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), - ('א', 'ת'), - ('ׯ', 'ײ'), - ('\u{610}', '\u{61a}'), - ('ؠ', '٩'), - ('ٮ', 'ۓ'), - ('ە', '\u{6dc}'), - ('\u{6df}', '\u{6e8}'), - ('\u{6ea}', 'ۼ'), - ('ۿ', 'ۿ'), - ('ܐ', '\u{74a}'), - ('ݍ', 'ޱ'), - ('߀', 'ߵ'), - ('ߺ', 'ߺ'), - ('\u{7fd}', '\u{7fd}'), - ('ࠀ', '\u{82d}'), - ('ࡀ', '\u{85b}'), - ('ࡠ', 'ࡪ'), - ('ࡰ', 'ࢇ'), - ('ࢉ', 'ࢎ'), - ('\u{897}', '\u{8e1}'), - ('\u{8e3}', '\u{963}'), - ('०', '९'), - ('ॱ', 'ঃ'), - ('অ', 'ঌ'), - ('এ', 'ঐ'), - ('ও', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('\u{9bc}', '\u{9c4}'), - ('ে', 'ৈ'), - ('ো', 'ৎ'), - ('\u{9d7}', '\u{9d7}'), - ('ড়', 'ঢ়'), - ('য়', '\u{9e3}'), - ('০', 'ৱ'), - ('ৼ', 'ৼ'), - ('\u{9fe}', '\u{9fe}'), - ('\u{a01}', 'ਃ'), - ('ਅ', 'ਊ'), - ('ਏ', 'ਐ'), - ('ਓ', 'ਨ'), - ('ਪ', 'ਰ'), - ('ਲ', 'ਲ਼'), - ('ਵ', 'ਸ਼'), - ('ਸ', 'ਹ'), - ('\u{a3c}', '\u{a3c}'), - ('ਾ', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4d}'), - ('\u{a51}', '\u{a51}'), - ('ਖ਼', 'ੜ'), - ('ਫ਼', 'ਫ਼'), - ('੦', '\u{a75}'), - ('\u{a81}', 'ઃ'), - ('અ', 'ઍ'), - ('એ', 'ઑ'), - ('ઓ', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('\u{abc}', '\u{ac5}'), - ('\u{ac7}', 'ૉ'), - ('ો', '\u{acd}'), - ('ૐ', 'ૐ'), - ('ૠ', '\u{ae3}'), - ('૦', '૯'), - ('ૹ', '\u{aff}'), - ('\u{b01}', 'ଃ'), - ('ଅ', 'ଌ'), - ('ଏ', 'ଐ'), - ('ଓ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଵ', 'ହ'), - ('\u{b3c}', '\u{b44}'), - ('େ', 'ୈ'), - ('ୋ', '\u{b4d}'), - ('\u{b55}', '\u{b57}'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', '\u{b63}'), - ('୦', '୯'), - ('ୱ', 'ୱ'), - ('\u{b82}', 'ஃ'), - ('அ', 'ஊ'), - ('எ', 'ஐ'), - ('ஒ', 'க'), - ('ங', 'ச'), - ('ஜ', 'ஜ'), - ('ஞ', 'ட'), - ('ண', 'த'), - ('ந', 'ப'), - ('ம', 'ஹ'), - ('\u{bbe}', 'ூ'), - ('ெ', 'ை'), - ('ொ', '\u{bcd}'), - ('ௐ', 'ௐ'), - ('\u{bd7}', '\u{bd7}'), - ('௦', '௯'), - ('\u{c00}', 'ఌ'), - ('ఎ', 'ఐ'), - ('ఒ', 'న'), - ('ప', 'హ'), - ('\u{c3c}', 'ౄ'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4d}'), - ('\u{c55}', '\u{c56}'), - ('ౘ', 'ౚ'), - ('ౝ', 'ౝ'), - ('ౠ', '\u{c63}'), - ('౦', '౯'), - ('ಀ', 'ಃ'), - ('ಅ', 'ಌ'), - ('ಎ', 'ಐ'), - ('ಒ', 'ನ'), - ('ಪ', 'ಳ'), - ('ವ', 'ಹ'), - ('\u{cbc}', 'ೄ'), - ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccd}'), - ('\u{cd5}', '\u{cd6}'), - ('ೝ', 'ೞ'), - ('ೠ', '\u{ce3}'), - ('೦', '೯'), - ('ೱ', 'ೳ'), - ('\u{d00}', 'ഌ'), - ('എ', 'ഐ'), - ('ഒ', '\u{d44}'), - ('െ', 'ൈ'), - ('ൊ', 'ൎ'), - ('ൔ', '\u{d57}'), - ('ൟ', '\u{d63}'), - ('൦', '൯'), - ('ൺ', 'ൿ'), - ('\u{d81}', 'ඃ'), - ('අ', 'ඖ'), - ('ක', 'න'), - ('ඳ', 'ර'), - ('ල', 'ල'), - ('ව', 'ෆ'), - ('\u{dca}', '\u{dca}'), - ('\u{dcf}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('ෘ', '\u{ddf}'), - ('෦', '෯'), - ('ෲ', 'ෳ'), - ('ก', '\u{e3a}'), - ('เ', '\u{e4e}'), - ('๐', '๙'), - ('ກ', 'ຂ'), - ('ຄ', 'ຄ'), - ('ຆ', 'ຊ'), - ('ຌ', 'ຣ'), - ('ລ', 'ລ'), - ('ວ', 'ຽ'), - ('ເ', 'ໄ'), - ('ໆ', 'ໆ'), - ('\u{ec8}', '\u{ece}'), - ('໐', '໙'), - ('ໜ', 'ໟ'), - ('ༀ', 'ༀ'), - ('\u{f18}', '\u{f19}'), - ('༠', '༩'), - ('\u{f35}', '\u{f35}'), - ('\u{f37}', '\u{f37}'), - ('\u{f39}', '\u{f39}'), - ('༾', 'ཇ'), - ('ཉ', 'ཬ'), - ('\u{f71}', '\u{f84}'), - ('\u{f86}', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('\u{fc6}', '\u{fc6}'), - ('က', '၉'), - ('ၐ', '\u{109d}'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ა', 'ჺ'), - ('ჼ', 'ቈ'), - ('ቊ', 'ቍ'), - ('ቐ', 'ቖ'), - ('ቘ', 'ቘ'), - ('ቚ', 'ቝ'), - ('በ', 'ኈ'), - ('ኊ', 'ኍ'), - ('ነ', 'ኰ'), - ('ኲ', 'ኵ'), - ('ኸ', 'ኾ'), - ('ዀ', 'ዀ'), - ('ዂ', 'ዅ'), - ('ወ', 'ዖ'), - ('ዘ', 'ጐ'), - ('ጒ', 'ጕ'), - ('ጘ', 'ፚ'), - ('\u{135d}', '\u{135f}'), - ('፩', '፱'), - ('ᎀ', 'ᎏ'), - ('Ꭰ', 'Ᏽ'), - ('ᏸ', 'ᏽ'), - ('ᐁ', 'ᙬ'), - ('ᙯ', 'ᙿ'), - ('ᚁ', 'ᚚ'), - ('ᚠ', 'ᛪ'), - ('ᛮ', 'ᛸ'), - ('ᜀ', '\u{1715}'), - ('ᜟ', '\u{1734}'), - ('ᝀ', '\u{1753}'), - ('ᝠ', 'ᝬ'), - ('ᝮ', 'ᝰ'), - ('\u{1772}', '\u{1773}'), - ('ក', '\u{17d3}'), - ('ៗ', 'ៗ'), - ('ៜ', '\u{17dd}'), - ('០', '៩'), - ('\u{180b}', '\u{180d}'), - ('\u{180f}', '᠙'), - ('ᠠ', 'ᡸ'), - ('ᢀ', 'ᢪ'), - ('ᢰ', 'ᣵ'), - ('ᤀ', 'ᤞ'), - ('\u{1920}', 'ᤫ'), - ('ᤰ', '\u{193b}'), - ('᥆', 'ᥭ'), - ('ᥰ', 'ᥴ'), - ('ᦀ', 'ᦫ'), - ('ᦰ', 'ᧉ'), - ('᧐', '᧚'), - ('ᨀ', '\u{1a1b}'), - ('ᨠ', '\u{1a5e}'), - ('\u{1a60}', '\u{1a7c}'), - ('\u{1a7f}', '᪉'), - ('᪐', '᪙'), - ('ᪧ', 'ᪧ'), - ('\u{1ab0}', '\u{1abd}'), - ('\u{1abf}', '\u{1ace}'), - ('\u{1b00}', 'ᭌ'), - ('᭐', '᭙'), - ('\u{1b6b}', '\u{1b73}'), - ('\u{1b80}', '\u{1bf3}'), - ('ᰀ', '\u{1c37}'), - ('᱀', '᱉'), - ('ᱍ', 'ᱽ'), - ('ᲀ', 'ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('\u{1cd0}', '\u{1cd2}'), - ('\u{1cd4}', 'ᳺ'), - ('ᴀ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ᾼ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῌ'), - ('ῐ', 'ΐ'), - ('ῖ', 'Ί'), - ('ῠ', 'Ῥ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῼ'), - ('\u{200c}', '\u{200d}'), - ('‿', '⁀'), - ('⁔', '⁔'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('\u{20d0}', '\u{20dc}'), - ('\u{20e1}', '\u{20e1}'), - ('\u{20e5}', '\u{20f0}'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℊ', 'ℓ'), - ('ℕ', 'ℕ'), - ('℘', 'ℝ'), - ('ℤ', 'ℤ'), - ('Ω', 'Ω'), - ('ℨ', 'ℨ'), - ('K', 'ℹ'), - ('ℼ', 'ℿ'), - ('ⅅ', 'ⅉ'), - ('ⅎ', 'ⅎ'), - ('Ⅰ', 'ↈ'), - ('Ⰰ', 'ⳤ'), - ('Ⳬ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ⴰ', 'ⵧ'), - ('ⵯ', 'ⵯ'), - ('\u{2d7f}', 'ⶖ'), - ('ⶠ', 'ⶦ'), - ('ⶨ', 'ⶮ'), - ('ⶰ', 'ⶶ'), - ('ⶸ', 'ⶾ'), - ('ⷀ', 'ⷆ'), - ('ⷈ', 'ⷎ'), - ('ⷐ', 'ⷖ'), - ('ⷘ', 'ⷞ'), - ('\u{2de0}', '\u{2dff}'), - ('々', '〇'), - ('〡', '\u{302f}'), - ('〱', '〵'), - ('〸', '〼'), - ('ぁ', 'ゖ'), - ('\u{3099}', '\u{309a}'), - ('ゝ', 'ゟ'), - ('ァ', 'ヿ'), - ('ㄅ', 'ㄯ'), - ('ㄱ', 'ㆎ'), - ('ㆠ', 'ㆿ'), - ('ㇰ', 'ㇿ'), - ('㐀', '䶿'), - ('一', 'ꒌ'), - ('ꓐ', 'ꓽ'), - ('ꔀ', 'ꘌ'), - ('ꘐ', 'ꘫ'), - ('Ꙁ', '\u{a66f}'), - ('\u{a674}', '\u{a67d}'), - ('ꙿ', '\u{a6f1}'), - ('ꜗ', 'ꜟ'), - ('Ꜣ', 'ꞈ'), - ('Ꞌ', 'ꟍ'), - ('Ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'Ƛ'), - ('ꟲ', 'ꠧ'), - ('\u{a82c}', '\u{a82c}'), - ('ꡀ', 'ꡳ'), - ('ꢀ', '\u{a8c5}'), - ('꣐', '꣙'), - ('\u{a8e0}', 'ꣷ'), - ('ꣻ', 'ꣻ'), - ('ꣽ', '\u{a92d}'), - ('ꤰ', '\u{a953}'), - ('ꥠ', 'ꥼ'), - ('\u{a980}', '\u{a9c0}'), - ('ꧏ', '꧙'), - ('ꧠ', 'ꧾ'), - ('ꨀ', '\u{aa36}'), - ('ꩀ', 'ꩍ'), - ('꩐', '꩙'), - ('ꩠ', 'ꩶ'), - ('ꩺ', 'ꫂ'), - ('ꫛ', 'ꫝ'), - ('ꫠ', 'ꫯ'), - ('ꫲ', '\u{aaf6}'), - ('ꬁ', 'ꬆ'), - ('ꬉ', 'ꬎ'), - ('ꬑ', 'ꬖ'), - ('ꬠ', 'ꬦ'), - ('ꬨ', 'ꬮ'), - ('ꬰ', 'ꭚ'), - ('ꭜ', 'ꭩ'), - ('ꭰ', 'ꯪ'), - ('꯬', '\u{abed}'), - ('꯰', '꯹'), - ('가', '힣'), - ('ힰ', 'ퟆ'), - ('ퟋ', 'ퟻ'), - ('豈', '舘'), - ('並', '龎'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('יִ', 'ﬨ'), - ('שׁ', 'זּ'), - ('טּ', 'לּ'), - ('מּ', 'מּ'), - ('נּ', 'סּ'), - ('ףּ', 'פּ'), - ('צּ', 'ﮱ'), - ('ﯓ', 'ﱝ'), - ('ﱤ', 'ﴽ'), - ('ﵐ', 'ﶏ'), - ('ﶒ', 'ﷇ'), - ('ﷰ', 'ﷹ'), - ('\u{fe00}', '\u{fe0f}'), - ('\u{fe20}', '\u{fe2f}'), - ('︳', '︴'), - ('﹍', '﹏'), - ('ﹱ', 'ﹱ'), - ('ﹳ', 'ﹳ'), - ('ﹷ', 'ﹷ'), - ('ﹹ', 'ﹹ'), - ('ﹻ', 'ﹻ'), - ('ﹽ', 'ﹽ'), - ('ﹿ', 'ﻼ'), - ('0', '9'), - ('A', 'Z'), - ('_', '_'), - ('a', 'z'), - ('・', 'ᄒ'), - ('ᅡ', 'ᅦ'), - ('ᅧ', 'ᅬ'), - ('ᅭ', 'ᅲ'), - ('ᅳ', 'ᅵ'), - ('𐀀', '𐀋'), - ('𐀍', '𐀦'), - ('𐀨', '𐀺'), - ('𐀼', '𐀽'), - ('𐀿', '𐁍'), - ('𐁐', '𐁝'), - ('𐂀', '𐃺'), - ('𐅀', '𐅴'), - ('\u{101fd}', '\u{101fd}'), - ('𐊀', '𐊜'), - ('𐊠', '𐋐'), - ('\u{102e0}', '\u{102e0}'), - ('𐌀', '𐌟'), - ('𐌭', '𐍊'), - ('𐍐', '\u{1037a}'), - ('𐎀', '𐎝'), - ('𐎠', '𐏃'), - ('𐏈', '𐏏'), - ('𐏑', '𐏕'), - ('𐐀', '𐒝'), - ('𐒠', '𐒩'), - ('𐒰', '𐓓'), - ('𐓘', '𐓻'), - ('𐔀', '𐔧'), - ('𐔰', '𐕣'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐗀', '𐗳'), - ('𐘀', '𐜶'), - ('𐝀', '𐝕'), - ('𐝠', '𐝧'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𐠀', '𐠅'), - ('𐠈', '𐠈'), - ('𐠊', '𐠵'), - ('𐠷', '𐠸'), - ('𐠼', '𐠼'), - ('𐠿', '𐡕'), - ('𐡠', '𐡶'), - ('𐢀', '𐢞'), - ('𐣠', '𐣲'), - ('𐣴', '𐣵'), - ('𐤀', '𐤕'), - ('𐤠', '𐤹'), - ('𐦀', '𐦷'), - ('𐦾', '𐦿'), - ('𐨀', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '𐨓'), - ('𐨕', '𐨗'), - ('𐨙', '𐨵'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '\u{10a3f}'), - ('𐩠', '𐩼'), - ('𐪀', '𐪜'), - ('𐫀', '𐫇'), - ('𐫉', '\u{10ae6}'), - ('𐬀', '𐬵'), - ('𐭀', '𐭕'), - ('𐭠', '𐭲'), - ('𐮀', '𐮑'), - ('𐰀', '𐱈'), - ('𐲀', '𐲲'), - ('𐳀', '𐳲'), - ('𐴀', '\u{10d27}'), - ('𐴰', '𐴹'), - ('𐵀', '𐵥'), - ('\u{10d69}', '\u{10d6d}'), - ('𐵯', '𐶅'), - ('𐺀', '𐺩'), - ('\u{10eab}', '\u{10eac}'), - ('𐺰', '𐺱'), - ('𐻂', '𐻄'), - ('\u{10efc}', '𐼜'), - ('𐼧', '𐼧'), - ('𐼰', '\u{10f50}'), - ('𐽰', '\u{10f85}'), - ('𐾰', '𐿄'), - ('𐿠', '𐿶'), - ('𑀀', '\u{11046}'), - ('𑁦', '𑁵'), - ('\u{1107f}', '\u{110ba}'), - ('\u{110c2}', '\u{110c2}'), - ('𑃐', '𑃨'), - ('𑃰', '𑃹'), - ('\u{11100}', '\u{11134}'), - ('𑄶', '𑄿'), - ('𑅄', '𑅇'), - ('𑅐', '\u{11173}'), - ('𑅶', '𑅶'), - ('\u{11180}', '𑇄'), - ('\u{111c9}', '\u{111cc}'), - ('𑇎', '𑇚'), - ('𑇜', '𑇜'), - ('𑈀', '𑈑'), - ('𑈓', '\u{11237}'), - ('\u{1123e}', '\u{11241}'), - ('𑊀', '𑊆'), - ('𑊈', '𑊈'), - ('𑊊', '𑊍'), - ('𑊏', '𑊝'), - ('𑊟', '𑊨'), - ('𑊰', '\u{112ea}'), - ('𑋰', '𑋹'), - ('\u{11300}', '𑌃'), - ('𑌅', '𑌌'), - ('𑌏', '𑌐'), - ('𑌓', '𑌨'), - ('𑌪', '𑌰'), - ('𑌲', '𑌳'), - ('𑌵', '𑌹'), - ('\u{1133b}', '𑍄'), - ('𑍇', '𑍈'), - ('𑍋', '\u{1134d}'), - ('𑍐', '𑍐'), - ('\u{11357}', '\u{11357}'), - ('𑍝', '𑍣'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), - ('𑎀', '𑎉'), - ('𑎋', '𑎋'), - ('𑎎', '𑎎'), - ('𑎐', '𑎵'), - ('𑎷', '\u{113c0}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '𑏊'), - ('𑏌', '𑏓'), - ('\u{113e1}', '\u{113e2}'), - ('𑐀', '𑑊'), - ('𑑐', '𑑙'), - ('\u{1145e}', '𑑡'), - ('𑒀', '𑓅'), - ('𑓇', '𑓇'), - ('𑓐', '𑓙'), - ('𑖀', '\u{115b5}'), - ('𑖸', '\u{115c0}'), - ('𑗘', '\u{115dd}'), - ('𑘀', '\u{11640}'), - ('𑙄', '𑙄'), - ('𑙐', '𑙙'), - ('𑚀', '𑚸'), - ('𑛀', '𑛉'), - ('𑛐', '𑛣'), - ('𑜀', '𑜚'), - ('\u{1171d}', '\u{1172b}'), - ('𑜰', '𑜹'), - ('𑝀', '𑝆'), - ('𑠀', '\u{1183a}'), - ('𑢠', '𑣩'), - ('𑣿', '𑤆'), - ('𑤉', '𑤉'), - ('𑤌', '𑤓'), - ('𑤕', '𑤖'), - ('𑤘', '𑤵'), - ('𑤷', '𑤸'), - ('\u{1193b}', '\u{11943}'), - ('𑥐', '𑥙'), - ('𑦠', '𑦧'), - ('𑦪', '\u{119d7}'), - ('\u{119da}', '𑧡'), - ('𑧣', '𑧤'), - ('𑨀', '\u{11a3e}'), - ('\u{11a47}', '\u{11a47}'), - ('𑩐', '\u{11a99}'), - ('𑪝', '𑪝'), - ('𑪰', '𑫸'), - ('𑯀', '𑯠'), - ('𑯰', '𑯹'), - ('𑰀', '𑰈'), - ('𑰊', '\u{11c36}'), - ('\u{11c38}', '𑱀'), - ('𑱐', '𑱙'), - ('𑱲', '𑲏'), - ('\u{11c92}', '\u{11ca7}'), - ('𑲩', '\u{11cb6}'), - ('𑴀', '𑴆'), - ('𑴈', '𑴉'), - ('𑴋', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d47}'), - ('𑵐', '𑵙'), - ('𑵠', '𑵥'), - ('𑵧', '𑵨'), - ('𑵪', '𑶎'), - ('\u{11d90}', '\u{11d91}'), - ('𑶓', '𑶘'), - ('𑶠', '𑶩'), - ('𑻠', '𑻶'), - ('\u{11f00}', '𑼐'), - ('𑼒', '\u{11f3a}'), - ('𑼾', '\u{11f42}'), - ('𑽐', '\u{11f5a}'), - ('𑾰', '𑾰'), - ('𒀀', '𒎙'), - ('𒐀', '𒑮'), - ('𒒀', '𒕃'), - ('𒾐', '𒿰'), - ('𓀀', '𓐯'), - ('\u{13440}', '\u{13455}'), - ('𓑠', '𔏺'), - ('𔐀', '𔙆'), - ('𖄀', '𖄹'), - ('𖠀', '𖨸'), - ('𖩀', '𖩞'), - ('𖩠', '𖩩'), - ('𖩰', '𖪾'), - ('𖫀', '𖫉'), - ('𖫐', '𖫭'), - ('\u{16af0}', '\u{16af4}'), - ('𖬀', '\u{16b36}'), - ('𖭀', '𖭃'), - ('𖭐', '𖭙'), - ('𖭣', '𖭷'), - ('𖭽', '𖮏'), - ('𖵀', '𖵬'), - ('𖵰', '𖵹'), - ('𖹀', '𖹿'), - ('𖼀', '𖽊'), - ('\u{16f4f}', '𖾇'), - ('\u{16f8f}', '𖾟'), - ('𖿠', '𖿡'), - ('𖿣', '\u{16fe4}'), - ('\u{16ff0}', '\u{16ff1}'), - ('𗀀', '𘟷'), - ('𘠀', '𘳕'), - ('𘳿', '𘴈'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('𛀀', '𛄢'), - ('𛄲', '𛄲'), - ('𛅐', '𛅒'), - ('𛅕', '𛅕'), - ('𛅤', '𛅧'), - ('𛅰', '𛋻'), - ('𛰀', '𛱪'), - ('𛱰', '𛱼'), - ('𛲀', '𛲈'), - ('𛲐', '𛲙'), - ('\u{1bc9d}', '\u{1bc9e}'), - ('𜳰', '𜳹'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('\u{1d165}', '\u{1d169}'), - ('\u{1d16d}', '\u{1d172}'), - ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), - ('\u{1d1aa}', '\u{1d1ad}'), - ('\u{1d242}', '\u{1d244}'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝛀'), - ('𝛂', '𝛚'), - ('𝛜', '𝛺'), - ('𝛼', '𝜔'), - ('𝜖', '𝜴'), - ('𝜶', '𝝎'), - ('𝝐', '𝝮'), - ('𝝰', '𝞈'), - ('𝞊', '𝞨'), - ('𝞪', '𝟂'), - ('𝟄', '𝟋'), - ('𝟎', '𝟿'), - ('\u{1da00}', '\u{1da36}'), - ('\u{1da3b}', '\u{1da6c}'), - ('\u{1da75}', '\u{1da75}'), - ('\u{1da84}', '\u{1da84}'), - ('\u{1da9b}', '\u{1da9f}'), - ('\u{1daa1}', '\u{1daaf}'), - ('𝼀', '𝼞'), - ('𝼥', '𝼪'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), - ('𞀰', '𞁭'), - ('\u{1e08f}', '\u{1e08f}'), - ('𞄀', '𞄬'), - ('\u{1e130}', '𞄽'), - ('𞅀', '𞅉'), - ('𞅎', '𞅎'), - ('𞊐', '\u{1e2ae}'), - ('𞋀', '𞋹'), - ('𞓐', '𞓹'), - ('𞗐', '𞗺'), - ('𞟠', '𞟦'), - ('𞟨', '𞟫'), - ('𞟭', '𞟮'), - ('𞟰', '𞟾'), - ('𞠀', '𞣄'), - ('\u{1e8d0}', '\u{1e8d6}'), - ('𞤀', '𞥋'), - ('𞥐', '𞥙'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('🯰', '🯹'), - ('𠀀', '𪛟'), - ('𪜀', '𫜹'), - ('𫝀', '𫠝'), - ('𫠠', '𬺡'), - ('𬺰', '𮯠'), - ('𮯰', '𮹝'), - ('丽', '𪘀'), - ('𰀀', '𱍊'), - ('𱍐', '𲎯'), - ('\u{e0100}', '\u{e01ef}'), -]; - -pub const XID_START: &'static [(char, char)] = &[ - ('A', 'Z'), - ('a', 'z'), - ('ª', 'ª'), - ('µ', 'µ'), - ('º', 'º'), - ('À', 'Ö'), - ('Ø', 'ö'), - ('ø', 'ˁ'), - ('ˆ', 'ˑ'), - ('ˠ', 'ˤ'), - ('ˬ', 'ˬ'), - ('ˮ', 'ˮ'), - ('Ͱ', 'ʹ'), - ('Ͷ', 'ͷ'), - ('ͻ', 'ͽ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', 'ϵ'), - ('Ϸ', 'ҁ'), - ('Ҋ', 'ԯ'), - ('Ա', 'Ֆ'), - ('ՙ', 'ՙ'), - ('ՠ', 'ֈ'), - ('א', 'ת'), - ('ׯ', 'ײ'), - ('ؠ', 'ي'), - ('ٮ', 'ٯ'), - ('ٱ', 'ۓ'), - ('ە', 'ە'), - ('ۥ', 'ۦ'), - ('ۮ', 'ۯ'), - ('ۺ', 'ۼ'), - ('ۿ', 'ۿ'), - ('ܐ', 'ܐ'), - ('ܒ', 'ܯ'), - ('ݍ', 'ޥ'), - ('ޱ', 'ޱ'), - ('ߊ', 'ߪ'), - ('ߴ', 'ߵ'), - ('ߺ', 'ߺ'), - ('ࠀ', 'ࠕ'), - ('ࠚ', 'ࠚ'), - ('ࠤ', 'ࠤ'), - ('ࠨ', 'ࠨ'), - ('ࡀ', 'ࡘ'), - ('ࡠ', 'ࡪ'), - ('ࡰ', 'ࢇ'), - ('ࢉ', 'ࢎ'), - ('ࢠ', 'ࣉ'), - ('ऄ', 'ह'), - ('ऽ', 'ऽ'), - ('ॐ', 'ॐ'), - ('क़', 'ॡ'), - ('ॱ', 'ঀ'), - ('অ', 'ঌ'), - ('এ', 'ঐ'), - ('ও', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('ঽ', 'ঽ'), - ('ৎ', 'ৎ'), - ('ড়', 'ঢ়'), - ('য়', 'ৡ'), - ('ৰ', 'ৱ'), - ('ৼ', 'ৼ'), - ('ਅ', 'ਊ'), - ('ਏ', 'ਐ'), - ('ਓ', 'ਨ'), - ('ਪ', 'ਰ'), - ('ਲ', 'ਲ਼'), - ('ਵ', 'ਸ਼'), - ('ਸ', 'ਹ'), - ('ਖ਼', 'ੜ'), - ('ਫ਼', 'ਫ਼'), - ('ੲ', 'ੴ'), - ('અ', 'ઍ'), - ('એ', 'ઑ'), - ('ઓ', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('ઽ', 'ઽ'), - ('ૐ', 'ૐ'), - ('ૠ', 'ૡ'), - ('ૹ', 'ૹ'), - ('ଅ', 'ଌ'), - ('ଏ', 'ଐ'), - ('ଓ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଵ', 'ହ'), - ('ଽ', 'ଽ'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', 'ୡ'), - ('ୱ', 'ୱ'), - ('ஃ', 'ஃ'), - ('அ', 'ஊ'), - ('எ', 'ஐ'), - ('ஒ', 'க'), - ('ங', 'ச'), - ('ஜ', 'ஜ'), - ('ஞ', 'ட'), - ('ண', 'த'), - ('ந', 'ப'), - ('ம', 'ஹ'), - ('ௐ', 'ௐ'), - ('అ', 'ఌ'), - ('ఎ', 'ఐ'), - ('ఒ', 'న'), - ('ప', 'హ'), - ('ఽ', 'ఽ'), - ('ౘ', 'ౚ'), - ('ౝ', 'ౝ'), - ('ౠ', 'ౡ'), - ('ಀ', 'ಀ'), - ('ಅ', 'ಌ'), - ('ಎ', 'ಐ'), - ('ಒ', 'ನ'), - ('ಪ', 'ಳ'), - ('ವ', 'ಹ'), - ('ಽ', 'ಽ'), - ('ೝ', 'ೞ'), - ('ೠ', 'ೡ'), - ('ೱ', 'ೲ'), - ('ഄ', 'ഌ'), - ('എ', 'ഐ'), - ('ഒ', 'ഺ'), - ('ഽ', 'ഽ'), - ('ൎ', 'ൎ'), - ('ൔ', 'ൖ'), - ('ൟ', 'ൡ'), - ('ൺ', 'ൿ'), - ('අ', 'ඖ'), - ('ක', 'න'), - ('ඳ', 'ර'), - ('ල', 'ල'), - ('ව', 'ෆ'), - ('ก', 'ะ'), - ('า', 'า'), - ('เ', 'ๆ'), - ('ກ', 'ຂ'), - ('ຄ', 'ຄ'), - ('ຆ', 'ຊ'), - ('ຌ', 'ຣ'), - ('ລ', 'ລ'), - ('ວ', 'ະ'), - ('າ', 'າ'), - ('ຽ', 'ຽ'), - ('ເ', 'ໄ'), - ('ໆ', 'ໆ'), - ('ໜ', 'ໟ'), - ('ༀ', 'ༀ'), - ('ཀ', 'ཇ'), - ('ཉ', 'ཬ'), - ('ྈ', 'ྌ'), - ('က', 'ဪ'), - ('ဿ', 'ဿ'), - ('ၐ', 'ၕ'), - ('ၚ', 'ၝ'), - ('ၡ', 'ၡ'), - ('ၥ', 'ၦ'), - ('ၮ', 'ၰ'), - ('ၵ', 'ႁ'), - ('ႎ', 'ႎ'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ა', 'ჺ'), - ('ჼ', 'ቈ'), - ('ቊ', 'ቍ'), - ('ቐ', 'ቖ'), - ('ቘ', 'ቘ'), - ('ቚ', 'ቝ'), - ('በ', 'ኈ'), - ('ኊ', 'ኍ'), - ('ነ', 'ኰ'), - ('ኲ', 'ኵ'), - ('ኸ', 'ኾ'), - ('ዀ', 'ዀ'), - ('ዂ', 'ዅ'), - ('ወ', 'ዖ'), - ('ዘ', 'ጐ'), - ('ጒ', 'ጕ'), - ('ጘ', 'ፚ'), - ('ᎀ', 'ᎏ'), - ('Ꭰ', 'Ᏽ'), - ('ᏸ', 'ᏽ'), - ('ᐁ', 'ᙬ'), - ('ᙯ', 'ᙿ'), - ('ᚁ', 'ᚚ'), - ('ᚠ', 'ᛪ'), - ('ᛮ', 'ᛸ'), - ('ᜀ', 'ᜑ'), - ('ᜟ', 'ᜱ'), - ('ᝀ', 'ᝑ'), - ('ᝠ', 'ᝬ'), - ('ᝮ', 'ᝰ'), - ('ក', 'ឳ'), - ('ៗ', 'ៗ'), - ('ៜ', 'ៜ'), - ('ᠠ', 'ᡸ'), - ('ᢀ', 'ᢨ'), - ('ᢪ', 'ᢪ'), - ('ᢰ', 'ᣵ'), - ('ᤀ', 'ᤞ'), - ('ᥐ', 'ᥭ'), - ('ᥰ', 'ᥴ'), - ('ᦀ', 'ᦫ'), - ('ᦰ', 'ᧉ'), - ('ᨀ', 'ᨖ'), - ('ᨠ', 'ᩔ'), - ('ᪧ', 'ᪧ'), - ('ᬅ', 'ᬳ'), - ('ᭅ', 'ᭌ'), - ('ᮃ', 'ᮠ'), - ('ᮮ', 'ᮯ'), - ('ᮺ', 'ᯥ'), - ('ᰀ', 'ᰣ'), - ('ᱍ', 'ᱏ'), - ('ᱚ', 'ᱽ'), - ('ᲀ', 'ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('ᳩ', 'ᳬ'), - ('ᳮ', 'ᳳ'), - ('ᳵ', 'ᳶ'), - ('ᳺ', 'ᳺ'), - ('ᴀ', 'ᶿ'), - ('Ḁ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ᾼ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῌ'), - ('ῐ', 'ΐ'), - ('ῖ', 'Ί'), - ('ῠ', 'Ῥ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῼ'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℊ', 'ℓ'), - ('ℕ', 'ℕ'), - ('℘', 'ℝ'), - ('ℤ', 'ℤ'), - ('Ω', 'Ω'), - ('ℨ', 'ℨ'), - ('K', 'ℹ'), - ('ℼ', 'ℿ'), - ('ⅅ', 'ⅉ'), - ('ⅎ', 'ⅎ'), - ('Ⅰ', 'ↈ'), - ('Ⰰ', 'ⳤ'), - ('Ⳬ', 'ⳮ'), - ('Ⳳ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ⴰ', 'ⵧ'), - ('ⵯ', 'ⵯ'), - ('ⶀ', 'ⶖ'), - ('ⶠ', 'ⶦ'), - ('ⶨ', 'ⶮ'), - ('ⶰ', 'ⶶ'), - ('ⶸ', 'ⶾ'), - ('ⷀ', 'ⷆ'), - ('ⷈ', 'ⷎ'), - ('ⷐ', 'ⷖ'), - ('ⷘ', 'ⷞ'), - ('々', '〇'), - ('〡', '〩'), - ('〱', '〵'), - ('〸', '〼'), - ('ぁ', 'ゖ'), - ('ゝ', 'ゟ'), - ('ァ', 'ヺ'), - ('ー', 'ヿ'), - ('ㄅ', 'ㄯ'), - ('ㄱ', 'ㆎ'), - ('ㆠ', 'ㆿ'), - ('ㇰ', 'ㇿ'), - ('㐀', '䶿'), - ('一', 'ꒌ'), - ('ꓐ', 'ꓽ'), - ('ꔀ', 'ꘌ'), - ('ꘐ', 'ꘟ'), - ('ꘪ', 'ꘫ'), - ('Ꙁ', 'ꙮ'), - ('ꙿ', 'ꚝ'), - ('ꚠ', 'ꛯ'), - ('ꜗ', 'ꜟ'), - ('Ꜣ', 'ꞈ'), - ('Ꞌ', 'ꟍ'), - ('Ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'Ƛ'), - ('ꟲ', 'ꠁ'), - ('ꠃ', 'ꠅ'), - ('ꠇ', 'ꠊ'), - ('ꠌ', 'ꠢ'), - ('ꡀ', 'ꡳ'), - ('ꢂ', 'ꢳ'), - ('ꣲ', 'ꣷ'), - ('ꣻ', 'ꣻ'), - ('ꣽ', 'ꣾ'), - ('ꤊ', 'ꤥ'), - ('ꤰ', 'ꥆ'), - ('ꥠ', 'ꥼ'), - ('ꦄ', 'ꦲ'), - ('ꧏ', 'ꧏ'), - ('ꧠ', 'ꧤ'), - ('ꧦ', 'ꧯ'), - ('ꧺ', 'ꧾ'), - ('ꨀ', 'ꨨ'), - ('ꩀ', 'ꩂ'), - ('ꩄ', 'ꩋ'), - ('ꩠ', 'ꩶ'), - ('ꩺ', 'ꩺ'), - ('ꩾ', 'ꪯ'), - ('ꪱ', 'ꪱ'), - ('ꪵ', 'ꪶ'), - ('ꪹ', 'ꪽ'), - ('ꫀ', 'ꫀ'), - ('ꫂ', 'ꫂ'), - ('ꫛ', 'ꫝ'), - ('ꫠ', 'ꫪ'), - ('ꫲ', 'ꫴ'), - ('ꬁ', 'ꬆ'), - ('ꬉ', 'ꬎ'), - ('ꬑ', 'ꬖ'), - ('ꬠ', 'ꬦ'), - ('ꬨ', 'ꬮ'), - ('ꬰ', 'ꭚ'), - ('ꭜ', 'ꭩ'), - ('ꭰ', 'ꯢ'), - ('가', '힣'), - ('ힰ', 'ퟆ'), - ('ퟋ', 'ퟻ'), - ('豈', '舘'), - ('並', '龎'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('יִ', 'יִ'), - ('ײַ', 'ﬨ'), - ('שׁ', 'זּ'), - ('טּ', 'לּ'), - ('מּ', 'מּ'), - ('נּ', 'סּ'), - ('ףּ', 'פּ'), - ('צּ', 'ﮱ'), - ('ﯓ', 'ﱝ'), - ('ﱤ', 'ﴽ'), - ('ﵐ', 'ﶏ'), - ('ﶒ', 'ﷇ'), - ('ﷰ', 'ﷹ'), - ('ﹱ', 'ﹱ'), - ('ﹳ', 'ﹳ'), - ('ﹷ', 'ﹷ'), - ('ﹹ', 'ﹹ'), - ('ﹻ', 'ﹻ'), - ('ﹽ', 'ﹽ'), - ('ﹿ', 'ﻼ'), - ('A', 'Z'), - ('a', 'z'), - ('ヲ', 'ン'), - ('ᅠ', 'ᄒ'), - ('ᅡ', 'ᅦ'), - ('ᅧ', 'ᅬ'), - ('ᅭ', 'ᅲ'), - ('ᅳ', 'ᅵ'), - ('𐀀', '𐀋'), - ('𐀍', '𐀦'), - ('𐀨', '𐀺'), - ('𐀼', '𐀽'), - ('𐀿', '𐁍'), - ('𐁐', '𐁝'), - ('𐂀', '𐃺'), - ('𐅀', '𐅴'), - ('𐊀', '𐊜'), - ('𐊠', '𐋐'), - ('𐌀', '𐌟'), - ('𐌭', '𐍊'), - ('𐍐', '𐍵'), - ('𐎀', '𐎝'), - ('𐎠', '𐏃'), - ('𐏈', '𐏏'), - ('𐏑', '𐏕'), - ('𐐀', '𐒝'), - ('𐒰', '𐓓'), - ('𐓘', '𐓻'), - ('𐔀', '𐔧'), - ('𐔰', '𐕣'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐗀', '𐗳'), - ('𐘀', '𐜶'), - ('𐝀', '𐝕'), - ('𐝠', '𐝧'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𐠀', '𐠅'), - ('𐠈', '𐠈'), - ('𐠊', '𐠵'), - ('𐠷', '𐠸'), - ('𐠼', '𐠼'), - ('𐠿', '𐡕'), - ('𐡠', '𐡶'), - ('𐢀', '𐢞'), - ('𐣠', '𐣲'), - ('𐣴', '𐣵'), - ('𐤀', '𐤕'), - ('𐤠', '𐤹'), - ('𐦀', '𐦷'), - ('𐦾', '𐦿'), - ('𐨀', '𐨀'), - ('𐨐', '𐨓'), - ('𐨕', '𐨗'), - ('𐨙', '𐨵'), - ('𐩠', '𐩼'), - ('𐪀', '𐪜'), - ('𐫀', '𐫇'), - ('𐫉', '𐫤'), - ('𐬀', '𐬵'), - ('𐭀', '𐭕'), - ('𐭠', '𐭲'), - ('𐮀', '𐮑'), - ('𐰀', '𐱈'), - ('𐲀', '𐲲'), - ('𐳀', '𐳲'), - ('𐴀', '𐴣'), - ('𐵊', '𐵥'), - ('𐵯', '𐶅'), - ('𐺀', '𐺩'), - ('𐺰', '𐺱'), - ('𐻂', '𐻄'), - ('𐼀', '𐼜'), - ('𐼧', '𐼧'), - ('𐼰', '𐽅'), - ('𐽰', '𐾁'), - ('𐾰', '𐿄'), - ('𐿠', '𐿶'), - ('𑀃', '𑀷'), - ('𑁱', '𑁲'), - ('𑁵', '𑁵'), - ('𑂃', '𑂯'), - ('𑃐', '𑃨'), - ('𑄃', '𑄦'), - ('𑅄', '𑅄'), - ('𑅇', '𑅇'), - ('𑅐', '𑅲'), - ('𑅶', '𑅶'), - ('𑆃', '𑆲'), - ('𑇁', '𑇄'), - ('𑇚', '𑇚'), - ('𑇜', '𑇜'), - ('𑈀', '𑈑'), - ('𑈓', '𑈫'), - ('𑈿', '𑉀'), - ('𑊀', '𑊆'), - ('𑊈', '𑊈'), - ('𑊊', '𑊍'), - ('𑊏', '𑊝'), - ('𑊟', '𑊨'), - ('𑊰', '𑋞'), - ('𑌅', '𑌌'), - ('𑌏', '𑌐'), - ('𑌓', '𑌨'), - ('𑌪', '𑌰'), - ('𑌲', '𑌳'), - ('𑌵', '𑌹'), - ('𑌽', '𑌽'), - ('𑍐', '𑍐'), - ('𑍝', '𑍡'), - ('𑎀', '𑎉'), - ('𑎋', '𑎋'), - ('𑎎', '𑎎'), - ('𑎐', '𑎵'), - ('𑎷', '𑎷'), - ('𑏑', '𑏑'), - ('𑏓', '𑏓'), - ('𑐀', '𑐴'), - ('𑑇', '𑑊'), - ('𑑟', '𑑡'), - ('𑒀', '𑒯'), - ('𑓄', '𑓅'), - ('𑓇', '𑓇'), - ('𑖀', '𑖮'), - ('𑗘', '𑗛'), - ('𑘀', '𑘯'), - ('𑙄', '𑙄'), - ('𑚀', '𑚪'), - ('𑚸', '𑚸'), - ('𑜀', '𑜚'), - ('𑝀', '𑝆'), - ('𑠀', '𑠫'), - ('𑢠', '𑣟'), - ('𑣿', '𑤆'), - ('𑤉', '𑤉'), - ('𑤌', '𑤓'), - ('𑤕', '𑤖'), - ('𑤘', '𑤯'), - ('𑤿', '𑤿'), - ('𑥁', '𑥁'), - ('𑦠', '𑦧'), - ('𑦪', '𑧐'), - ('𑧡', '𑧡'), - ('𑧣', '𑧣'), - ('𑨀', '𑨀'), - ('𑨋', '𑨲'), - ('𑨺', '𑨺'), - ('𑩐', '𑩐'), - ('𑩜', '𑪉'), - ('𑪝', '𑪝'), - ('𑪰', '𑫸'), - ('𑯀', '𑯠'), - ('𑰀', '𑰈'), - ('𑰊', '𑰮'), - ('𑱀', '𑱀'), - ('𑱲', '𑲏'), - ('𑴀', '𑴆'), - ('𑴈', '𑴉'), - ('𑴋', '𑴰'), - ('𑵆', '𑵆'), - ('𑵠', '𑵥'), - ('𑵧', '𑵨'), - ('𑵪', '𑶉'), - ('𑶘', '𑶘'), - ('𑻠', '𑻲'), - ('𑼂', '𑼂'), - ('𑼄', '𑼐'), - ('𑼒', '𑼳'), - ('𑾰', '𑾰'), - ('𒀀', '𒎙'), - ('𒐀', '𒑮'), - ('𒒀', '𒕃'), - ('𒾐', '𒿰'), - ('𓀀', '𓐯'), - ('𓑁', '𓑆'), - ('𓑠', '𔏺'), - ('𔐀', '𔙆'), - ('𖄀', '𖄝'), - ('𖠀', '𖨸'), - ('𖩀', '𖩞'), - ('𖩰', '𖪾'), - ('𖫐', '𖫭'), - ('𖬀', '𖬯'), - ('𖭀', '𖭃'), - ('𖭣', '𖭷'), - ('𖭽', '𖮏'), - ('𖵀', '𖵬'), - ('𖹀', '𖹿'), - ('𖼀', '𖽊'), - ('𖽐', '𖽐'), - ('𖾓', '𖾟'), - ('𖿠', '𖿡'), - ('𖿣', '𖿣'), - ('𗀀', '𘟷'), - ('𘠀', '𘳕'), - ('𘳿', '𘴈'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('𛀀', '𛄢'), - ('𛄲', '𛄲'), - ('𛅐', '𛅒'), - ('𛅕', '𛅕'), - ('𛅤', '𛅧'), - ('𛅰', '𛋻'), - ('𛰀', '𛱪'), - ('𛱰', '𛱼'), - ('𛲀', '𛲈'), - ('𛲐', '𛲙'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝛀'), - ('𝛂', '𝛚'), - ('𝛜', '𝛺'), - ('𝛼', '𝜔'), - ('𝜖', '𝜴'), - ('𝜶', '𝝎'), - ('𝝐', '𝝮'), - ('𝝰', '𝞈'), - ('𝞊', '𝞨'), - ('𝞪', '𝟂'), - ('𝟄', '𝟋'), - ('𝼀', '𝼞'), - ('𝼥', '𝼪'), - ('𞀰', '𞁭'), - ('𞄀', '𞄬'), - ('𞄷', '𞄽'), - ('𞅎', '𞅎'), - ('𞊐', '𞊭'), - ('𞋀', '𞋫'), - ('𞓐', '𞓫'), - ('𞗐', '𞗭'), - ('𞗰', '𞗰'), - ('𞟠', '𞟦'), - ('𞟨', '𞟫'), - ('𞟭', '𞟮'), - ('𞟰', '𞟾'), - ('𞠀', '𞣄'), - ('𞤀', '𞥃'), - ('𞥋', '𞥋'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('𠀀', '𪛟'), - ('𪜀', '𫜹'), - ('𫝀', '𫠝'), - ('𫠠', '𬺡'), - ('𬺰', '𮯠'), - ('𮯰', '𮹝'), - ('丽', '𪘀'), - ('𰀀', '𱍊'), - ('𱍐', '𲎯'), -]; diff --git a/vendor/regex-syntax/src/unicode_tables/property_names.rs b/vendor/regex-syntax/src/unicode_tables/property_names.rs deleted file mode 100644 index a27b49133d33ac..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/property_names.rs +++ /dev/null @@ -1,281 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate property-names ucd-16.0.0 -// -// Unicode version: 16.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const PROPERTY_NAMES: &'static [(&'static str, &'static str)] = &[ - ("age", "Age"), - ("ahex", "ASCII_Hex_Digit"), - ("alpha", "Alphabetic"), - ("alphabetic", "Alphabetic"), - ("asciihexdigit", "ASCII_Hex_Digit"), - ("bc", "Bidi_Class"), - ("bidic", "Bidi_Control"), - ("bidiclass", "Bidi_Class"), - ("bidicontrol", "Bidi_Control"), - ("bidim", "Bidi_Mirrored"), - ("bidimirrored", "Bidi_Mirrored"), - ("bidimirroringglyph", "Bidi_Mirroring_Glyph"), - ("bidipairedbracket", "Bidi_Paired_Bracket"), - ("bidipairedbrackettype", "Bidi_Paired_Bracket_Type"), - ("blk", "Block"), - ("block", "Block"), - ("bmg", "Bidi_Mirroring_Glyph"), - ("bpb", "Bidi_Paired_Bracket"), - ("bpt", "Bidi_Paired_Bracket_Type"), - ("canonicalcombiningclass", "Canonical_Combining_Class"), - ("cased", "Cased"), - ("casefolding", "Case_Folding"), - ("caseignorable", "Case_Ignorable"), - ("ccc", "Canonical_Combining_Class"), - ("ce", "Composition_Exclusion"), - ("cf", "Case_Folding"), - ("changeswhencasefolded", "Changes_When_Casefolded"), - ("changeswhencasemapped", "Changes_When_Casemapped"), - ("changeswhenlowercased", "Changes_When_Lowercased"), - ("changeswhennfkccasefolded", "Changes_When_NFKC_Casefolded"), - ("changeswhentitlecased", "Changes_When_Titlecased"), - ("changeswhenuppercased", "Changes_When_Uppercased"), - ("ci", "Case_Ignorable"), - ("cjkaccountingnumeric", "kAccountingNumeric"), - ("cjkcompatibilityvariant", "kCompatibilityVariant"), - ("cjkiicore", "kIICore"), - ("cjkirggsource", "kIRG_GSource"), - ("cjkirghsource", "kIRG_HSource"), - ("cjkirgjsource", "kIRG_JSource"), - ("cjkirgkpsource", "kIRG_KPSource"), - ("cjkirgksource", "kIRG_KSource"), - ("cjkirgmsource", "kIRG_MSource"), - ("cjkirgssource", "kIRG_SSource"), - ("cjkirgtsource", "kIRG_TSource"), - ("cjkirguksource", "kIRG_UKSource"), - ("cjkirgusource", "kIRG_USource"), - ("cjkirgvsource", "kIRG_VSource"), - ("cjkothernumeric", "kOtherNumeric"), - ("cjkprimarynumeric", "kPrimaryNumeric"), - ("cjkrsunicode", "kRSUnicode"), - ("compex", "Full_Composition_Exclusion"), - ("compositionexclusion", "Composition_Exclusion"), - ("cwcf", "Changes_When_Casefolded"), - ("cwcm", "Changes_When_Casemapped"), - ("cwkcf", "Changes_When_NFKC_Casefolded"), - ("cwl", "Changes_When_Lowercased"), - ("cwt", "Changes_When_Titlecased"), - ("cwu", "Changes_When_Uppercased"), - ("dash", "Dash"), - ("decompositionmapping", "Decomposition_Mapping"), - ("decompositiontype", "Decomposition_Type"), - ("defaultignorablecodepoint", "Default_Ignorable_Code_Point"), - ("dep", "Deprecated"), - ("deprecated", "Deprecated"), - ("di", "Default_Ignorable_Code_Point"), - ("dia", "Diacritic"), - ("diacritic", "Diacritic"), - ("dm", "Decomposition_Mapping"), - ("dt", "Decomposition_Type"), - ("ea", "East_Asian_Width"), - ("eastasianwidth", "East_Asian_Width"), - ("ebase", "Emoji_Modifier_Base"), - ("ecomp", "Emoji_Component"), - ("emod", "Emoji_Modifier"), - ("emoji", "Emoji"), - ("emojicomponent", "Emoji_Component"), - ("emojimodifier", "Emoji_Modifier"), - ("emojimodifierbase", "Emoji_Modifier_Base"), - ("emojipresentation", "Emoji_Presentation"), - ("epres", "Emoji_Presentation"), - ("equideo", "Equivalent_Unified_Ideograph"), - ("equivalentunifiedideograph", "Equivalent_Unified_Ideograph"), - ("expandsonnfc", "Expands_On_NFC"), - ("expandsonnfd", "Expands_On_NFD"), - ("expandsonnfkc", "Expands_On_NFKC"), - ("expandsonnfkd", "Expands_On_NFKD"), - ("ext", "Extender"), - ("extendedpictographic", "Extended_Pictographic"), - ("extender", "Extender"), - ("extpict", "Extended_Pictographic"), - ("fcnfkc", "FC_NFKC_Closure"), - ("fcnfkcclosure", "FC_NFKC_Closure"), - ("fullcompositionexclusion", "Full_Composition_Exclusion"), - ("gc", "General_Category"), - ("gcb", "Grapheme_Cluster_Break"), - ("generalcategory", "General_Category"), - ("graphemebase", "Grapheme_Base"), - ("graphemeclusterbreak", "Grapheme_Cluster_Break"), - ("graphemeextend", "Grapheme_Extend"), - ("graphemelink", "Grapheme_Link"), - ("grbase", "Grapheme_Base"), - ("grext", "Grapheme_Extend"), - ("grlink", "Grapheme_Link"), - ("hangulsyllabletype", "Hangul_Syllable_Type"), - ("hex", "Hex_Digit"), - ("hexdigit", "Hex_Digit"), - ("hst", "Hangul_Syllable_Type"), - ("hyphen", "Hyphen"), - ("idc", "ID_Continue"), - ("idcompatmathcontinue", "ID_Compat_Math_Continue"), - ("idcompatmathstart", "ID_Compat_Math_Start"), - ("idcontinue", "ID_Continue"), - ("ideo", "Ideographic"), - ("ideographic", "Ideographic"), - ("ids", "ID_Start"), - ("idsb", "IDS_Binary_Operator"), - ("idsbinaryoperator", "IDS_Binary_Operator"), - ("idst", "IDS_Trinary_Operator"), - ("idstart", "ID_Start"), - ("idstrinaryoperator", "IDS_Trinary_Operator"), - ("idsu", "IDS_Unary_Operator"), - ("idsunaryoperator", "IDS_Unary_Operator"), - ("incb", "Indic_Conjunct_Break"), - ("indicconjunctbreak", "Indic_Conjunct_Break"), - ("indicpositionalcategory", "Indic_Positional_Category"), - ("indicsyllabiccategory", "Indic_Syllabic_Category"), - ("inpc", "Indic_Positional_Category"), - ("insc", "Indic_Syllabic_Category"), - ("isc", "ISO_Comment"), - ("jamoshortname", "Jamo_Short_Name"), - ("jg", "Joining_Group"), - ("joinc", "Join_Control"), - ("joincontrol", "Join_Control"), - ("joininggroup", "Joining_Group"), - ("joiningtype", "Joining_Type"), - ("jsn", "Jamo_Short_Name"), - ("jt", "Joining_Type"), - ("kaccountingnumeric", "kAccountingNumeric"), - ("kcompatibilityvariant", "kCompatibilityVariant"), - ("kehcat", "kEH_Cat"), - ("kehdesc", "kEH_Desc"), - ("kehhg", "kEH_HG"), - ("kehifao", "kEH_IFAO"), - ("kehjsesh", "kEH_JSesh"), - ("kehnomirror", "kEH_NoMirror"), - ("kehnorotate", "kEH_NoRotate"), - ("kiicore", "kIICore"), - ("kirggsource", "kIRG_GSource"), - ("kirghsource", "kIRG_HSource"), - ("kirgjsource", "kIRG_JSource"), - ("kirgkpsource", "kIRG_KPSource"), - ("kirgksource", "kIRG_KSource"), - ("kirgmsource", "kIRG_MSource"), - ("kirgssource", "kIRG_SSource"), - ("kirgtsource", "kIRG_TSource"), - ("kirguksource", "kIRG_UKSource"), - ("kirgusource", "kIRG_USource"), - ("kirgvsource", "kIRG_VSource"), - ("kothernumeric", "kOtherNumeric"), - ("kprimarynumeric", "kPrimaryNumeric"), - ("krsunicode", "kRSUnicode"), - ("lb", "Line_Break"), - ("lc", "Lowercase_Mapping"), - ("linebreak", "Line_Break"), - ("loe", "Logical_Order_Exception"), - ("logicalorderexception", "Logical_Order_Exception"), - ("lower", "Lowercase"), - ("lowercase", "Lowercase"), - ("lowercasemapping", "Lowercase_Mapping"), - ("math", "Math"), - ("mcm", "Modifier_Combining_Mark"), - ("modifiercombiningmark", "Modifier_Combining_Mark"), - ("na", "Name"), - ("na1", "Unicode_1_Name"), - ("name", "Name"), - ("namealias", "Name_Alias"), - ("nchar", "Noncharacter_Code_Point"), - ("nfcqc", "NFC_Quick_Check"), - ("nfcquickcheck", "NFC_Quick_Check"), - ("nfdqc", "NFD_Quick_Check"), - ("nfdquickcheck", "NFD_Quick_Check"), - ("nfkccasefold", "NFKC_Casefold"), - ("nfkccf", "NFKC_Casefold"), - ("nfkcqc", "NFKC_Quick_Check"), - ("nfkcquickcheck", "NFKC_Quick_Check"), - ("nfkcscf", "NFKC_Simple_Casefold"), - ("nfkcsimplecasefold", "NFKC_Simple_Casefold"), - ("nfkdqc", "NFKD_Quick_Check"), - ("nfkdquickcheck", "NFKD_Quick_Check"), - ("noncharactercodepoint", "Noncharacter_Code_Point"), - ("nt", "Numeric_Type"), - ("numerictype", "Numeric_Type"), - ("numericvalue", "Numeric_Value"), - ("nv", "Numeric_Value"), - ("oalpha", "Other_Alphabetic"), - ("ocomment", "ISO_Comment"), - ("odi", "Other_Default_Ignorable_Code_Point"), - ("ogrext", "Other_Grapheme_Extend"), - ("oidc", "Other_ID_Continue"), - ("oids", "Other_ID_Start"), - ("olower", "Other_Lowercase"), - ("omath", "Other_Math"), - ("otheralphabetic", "Other_Alphabetic"), - ("otherdefaultignorablecodepoint", "Other_Default_Ignorable_Code_Point"), - ("othergraphemeextend", "Other_Grapheme_Extend"), - ("otheridcontinue", "Other_ID_Continue"), - ("otheridstart", "Other_ID_Start"), - ("otherlowercase", "Other_Lowercase"), - ("othermath", "Other_Math"), - ("otheruppercase", "Other_Uppercase"), - ("oupper", "Other_Uppercase"), - ("patsyn", "Pattern_Syntax"), - ("patternsyntax", "Pattern_Syntax"), - ("patternwhitespace", "Pattern_White_Space"), - ("patws", "Pattern_White_Space"), - ("pcm", "Prepended_Concatenation_Mark"), - ("prependedconcatenationmark", "Prepended_Concatenation_Mark"), - ("qmark", "Quotation_Mark"), - ("quotationmark", "Quotation_Mark"), - ("radical", "Radical"), - ("regionalindicator", "Regional_Indicator"), - ("ri", "Regional_Indicator"), - ("sb", "Sentence_Break"), - ("sc", "Script"), - ("scf", "Simple_Case_Folding"), - ("script", "Script"), - ("scriptextensions", "Script_Extensions"), - ("scx", "Script_Extensions"), - ("sd", "Soft_Dotted"), - ("sentencebreak", "Sentence_Break"), - ("sentenceterminal", "Sentence_Terminal"), - ("sfc", "Simple_Case_Folding"), - ("simplecasefolding", "Simple_Case_Folding"), - ("simplelowercasemapping", "Simple_Lowercase_Mapping"), - ("simpletitlecasemapping", "Simple_Titlecase_Mapping"), - ("simpleuppercasemapping", "Simple_Uppercase_Mapping"), - ("slc", "Simple_Lowercase_Mapping"), - ("softdotted", "Soft_Dotted"), - ("space", "White_Space"), - ("stc", "Simple_Titlecase_Mapping"), - ("sterm", "Sentence_Terminal"), - ("suc", "Simple_Uppercase_Mapping"), - ("tc", "Titlecase_Mapping"), - ("term", "Terminal_Punctuation"), - ("terminalpunctuation", "Terminal_Punctuation"), - ("titlecasemapping", "Titlecase_Mapping"), - ("uc", "Uppercase_Mapping"), - ("uideo", "Unified_Ideograph"), - ("unicode1name", "Unicode_1_Name"), - ("unicoderadicalstroke", "kRSUnicode"), - ("unifiedideograph", "Unified_Ideograph"), - ("upper", "Uppercase"), - ("uppercase", "Uppercase"), - ("uppercasemapping", "Uppercase_Mapping"), - ("urs", "kRSUnicode"), - ("variationselector", "Variation_Selector"), - ("verticalorientation", "Vertical_Orientation"), - ("vo", "Vertical_Orientation"), - ("vs", "Variation_Selector"), - ("wb", "Word_Break"), - ("whitespace", "White_Space"), - ("wordbreak", "Word_Break"), - ("wspace", "White_Space"), - ("xidc", "XID_Continue"), - ("xidcontinue", "XID_Continue"), - ("xids", "XID_Start"), - ("xidstart", "XID_Start"), - ("xonfc", "Expands_On_NFC"), - ("xonfd", "Expands_On_NFD"), - ("xonfkc", "Expands_On_NFKC"), - ("xonfkd", "Expands_On_NFKD"), -]; diff --git a/vendor/regex-syntax/src/unicode_tables/property_values.rs b/vendor/regex-syntax/src/unicode_tables/property_values.rs deleted file mode 100644 index 2270d66383735d..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/property_values.rs +++ /dev/null @@ -1,956 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate property-values ucd-16.0.0 --include gc,script,scx,age,gcb,wb,sb -// -// Unicode version: 16.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const PROPERTY_VALUES: &'static [( - &'static str, - &'static [(&'static str, &'static str)], -)] = &[ - ( - "Age", - &[ - ("1.1", "V1_1"), - ("10.0", "V10_0"), - ("11.0", "V11_0"), - ("12.0", "V12_0"), - ("12.1", "V12_1"), - ("13.0", "V13_0"), - ("14.0", "V14_0"), - ("15.0", "V15_0"), - ("15.1", "V15_1"), - ("16.0", "V16_0"), - ("2.0", "V2_0"), - ("2.1", "V2_1"), - ("3.0", "V3_0"), - ("3.1", "V3_1"), - ("3.2", "V3_2"), - ("4.0", "V4_0"), - ("4.1", "V4_1"), - ("5.0", "V5_0"), - ("5.1", "V5_1"), - ("5.2", "V5_2"), - ("6.0", "V6_0"), - ("6.1", "V6_1"), - ("6.2", "V6_2"), - ("6.3", "V6_3"), - ("7.0", "V7_0"), - ("8.0", "V8_0"), - ("9.0", "V9_0"), - ("na", "Unassigned"), - ("unassigned", "Unassigned"), - ("v100", "V10_0"), - ("v11", "V1_1"), - ("v110", "V11_0"), - ("v120", "V12_0"), - ("v121", "V12_1"), - ("v130", "V13_0"), - ("v140", "V14_0"), - ("v150", "V15_0"), - ("v151", "V15_1"), - ("v160", "V16_0"), - ("v20", "V2_0"), - ("v21", "V2_1"), - ("v30", "V3_0"), - ("v31", "V3_1"), - ("v32", "V3_2"), - ("v40", "V4_0"), - ("v41", "V4_1"), - ("v50", "V5_0"), - ("v51", "V5_1"), - ("v52", "V5_2"), - ("v60", "V6_0"), - ("v61", "V6_1"), - ("v62", "V6_2"), - ("v63", "V6_3"), - ("v70", "V7_0"), - ("v80", "V8_0"), - ("v90", "V9_0"), - ], - ), - ( - "General_Category", - &[ - ("c", "Other"), - ("casedletter", "Cased_Letter"), - ("cc", "Control"), - ("cf", "Format"), - ("closepunctuation", "Close_Punctuation"), - ("cn", "Unassigned"), - ("cntrl", "Control"), - ("co", "Private_Use"), - ("combiningmark", "Mark"), - ("connectorpunctuation", "Connector_Punctuation"), - ("control", "Control"), - ("cs", "Surrogate"), - ("currencysymbol", "Currency_Symbol"), - ("dashpunctuation", "Dash_Punctuation"), - ("decimalnumber", "Decimal_Number"), - ("digit", "Decimal_Number"), - ("enclosingmark", "Enclosing_Mark"), - ("finalpunctuation", "Final_Punctuation"), - ("format", "Format"), - ("initialpunctuation", "Initial_Punctuation"), - ("l", "Letter"), - ("lc", "Cased_Letter"), - ("letter", "Letter"), - ("letternumber", "Letter_Number"), - ("lineseparator", "Line_Separator"), - ("ll", "Lowercase_Letter"), - ("lm", "Modifier_Letter"), - ("lo", "Other_Letter"), - ("lowercaseletter", "Lowercase_Letter"), - ("lt", "Titlecase_Letter"), - ("lu", "Uppercase_Letter"), - ("m", "Mark"), - ("mark", "Mark"), - ("mathsymbol", "Math_Symbol"), - ("mc", "Spacing_Mark"), - ("me", "Enclosing_Mark"), - ("mn", "Nonspacing_Mark"), - ("modifierletter", "Modifier_Letter"), - ("modifiersymbol", "Modifier_Symbol"), - ("n", "Number"), - ("nd", "Decimal_Number"), - ("nl", "Letter_Number"), - ("no", "Other_Number"), - ("nonspacingmark", "Nonspacing_Mark"), - ("number", "Number"), - ("openpunctuation", "Open_Punctuation"), - ("other", "Other"), - ("otherletter", "Other_Letter"), - ("othernumber", "Other_Number"), - ("otherpunctuation", "Other_Punctuation"), - ("othersymbol", "Other_Symbol"), - ("p", "Punctuation"), - ("paragraphseparator", "Paragraph_Separator"), - ("pc", "Connector_Punctuation"), - ("pd", "Dash_Punctuation"), - ("pe", "Close_Punctuation"), - ("pf", "Final_Punctuation"), - ("pi", "Initial_Punctuation"), - ("po", "Other_Punctuation"), - ("privateuse", "Private_Use"), - ("ps", "Open_Punctuation"), - ("punct", "Punctuation"), - ("punctuation", "Punctuation"), - ("s", "Symbol"), - ("sc", "Currency_Symbol"), - ("separator", "Separator"), - ("sk", "Modifier_Symbol"), - ("sm", "Math_Symbol"), - ("so", "Other_Symbol"), - ("spaceseparator", "Space_Separator"), - ("spacingmark", "Spacing_Mark"), - ("surrogate", "Surrogate"), - ("symbol", "Symbol"), - ("titlecaseletter", "Titlecase_Letter"), - ("unassigned", "Unassigned"), - ("uppercaseletter", "Uppercase_Letter"), - ("z", "Separator"), - ("zl", "Line_Separator"), - ("zp", "Paragraph_Separator"), - ("zs", "Space_Separator"), - ], - ), - ( - "Grapheme_Cluster_Break", - &[ - ("cn", "Control"), - ("control", "Control"), - ("cr", "CR"), - ("eb", "E_Base"), - ("ebase", "E_Base"), - ("ebasegaz", "E_Base_GAZ"), - ("ebg", "E_Base_GAZ"), - ("em", "E_Modifier"), - ("emodifier", "E_Modifier"), - ("ex", "Extend"), - ("extend", "Extend"), - ("gaz", "Glue_After_Zwj"), - ("glueafterzwj", "Glue_After_Zwj"), - ("l", "L"), - ("lf", "LF"), - ("lv", "LV"), - ("lvt", "LVT"), - ("other", "Other"), - ("pp", "Prepend"), - ("prepend", "Prepend"), - ("regionalindicator", "Regional_Indicator"), - ("ri", "Regional_Indicator"), - ("sm", "SpacingMark"), - ("spacingmark", "SpacingMark"), - ("t", "T"), - ("v", "V"), - ("xx", "Other"), - ("zwj", "ZWJ"), - ], - ), - ( - "Script", - &[ - ("adlam", "Adlam"), - ("adlm", "Adlam"), - ("aghb", "Caucasian_Albanian"), - ("ahom", "Ahom"), - ("anatolianhieroglyphs", "Anatolian_Hieroglyphs"), - ("arab", "Arabic"), - ("arabic", "Arabic"), - ("armenian", "Armenian"), - ("armi", "Imperial_Aramaic"), - ("armn", "Armenian"), - ("avestan", "Avestan"), - ("avst", "Avestan"), - ("bali", "Balinese"), - ("balinese", "Balinese"), - ("bamu", "Bamum"), - ("bamum", "Bamum"), - ("bass", "Bassa_Vah"), - ("bassavah", "Bassa_Vah"), - ("batak", "Batak"), - ("batk", "Batak"), - ("beng", "Bengali"), - ("bengali", "Bengali"), - ("bhaiksuki", "Bhaiksuki"), - ("bhks", "Bhaiksuki"), - ("bopo", "Bopomofo"), - ("bopomofo", "Bopomofo"), - ("brah", "Brahmi"), - ("brahmi", "Brahmi"), - ("brai", "Braille"), - ("braille", "Braille"), - ("bugi", "Buginese"), - ("buginese", "Buginese"), - ("buhd", "Buhid"), - ("buhid", "Buhid"), - ("cakm", "Chakma"), - ("canadianaboriginal", "Canadian_Aboriginal"), - ("cans", "Canadian_Aboriginal"), - ("cari", "Carian"), - ("carian", "Carian"), - ("caucasianalbanian", "Caucasian_Albanian"), - ("chakma", "Chakma"), - ("cham", "Cham"), - ("cher", "Cherokee"), - ("cherokee", "Cherokee"), - ("chorasmian", "Chorasmian"), - ("chrs", "Chorasmian"), - ("common", "Common"), - ("copt", "Coptic"), - ("coptic", "Coptic"), - ("cpmn", "Cypro_Minoan"), - ("cprt", "Cypriot"), - ("cuneiform", "Cuneiform"), - ("cypriot", "Cypriot"), - ("cyprominoan", "Cypro_Minoan"), - ("cyrillic", "Cyrillic"), - ("cyrl", "Cyrillic"), - ("deseret", "Deseret"), - ("deva", "Devanagari"), - ("devanagari", "Devanagari"), - ("diak", "Dives_Akuru"), - ("divesakuru", "Dives_Akuru"), - ("dogr", "Dogra"), - ("dogra", "Dogra"), - ("dsrt", "Deseret"), - ("dupl", "Duployan"), - ("duployan", "Duployan"), - ("egyp", "Egyptian_Hieroglyphs"), - ("egyptianhieroglyphs", "Egyptian_Hieroglyphs"), - ("elba", "Elbasan"), - ("elbasan", "Elbasan"), - ("elym", "Elymaic"), - ("elymaic", "Elymaic"), - ("ethi", "Ethiopic"), - ("ethiopic", "Ethiopic"), - ("gara", "Garay"), - ("garay", "Garay"), - ("geor", "Georgian"), - ("georgian", "Georgian"), - ("glag", "Glagolitic"), - ("glagolitic", "Glagolitic"), - ("gong", "Gunjala_Gondi"), - ("gonm", "Masaram_Gondi"), - ("goth", "Gothic"), - ("gothic", "Gothic"), - ("gran", "Grantha"), - ("grantha", "Grantha"), - ("greek", "Greek"), - ("grek", "Greek"), - ("gujarati", "Gujarati"), - ("gujr", "Gujarati"), - ("gukh", "Gurung_Khema"), - ("gunjalagondi", "Gunjala_Gondi"), - ("gurmukhi", "Gurmukhi"), - ("guru", "Gurmukhi"), - ("gurungkhema", "Gurung_Khema"), - ("han", "Han"), - ("hang", "Hangul"), - ("hangul", "Hangul"), - ("hani", "Han"), - ("hanifirohingya", "Hanifi_Rohingya"), - ("hano", "Hanunoo"), - ("hanunoo", "Hanunoo"), - ("hatr", "Hatran"), - ("hatran", "Hatran"), - ("hebr", "Hebrew"), - ("hebrew", "Hebrew"), - ("hira", "Hiragana"), - ("hiragana", "Hiragana"), - ("hluw", "Anatolian_Hieroglyphs"), - ("hmng", "Pahawh_Hmong"), - ("hmnp", "Nyiakeng_Puachue_Hmong"), - ("hrkt", "Katakana_Or_Hiragana"), - ("hung", "Old_Hungarian"), - ("imperialaramaic", "Imperial_Aramaic"), - ("inherited", "Inherited"), - ("inscriptionalpahlavi", "Inscriptional_Pahlavi"), - ("inscriptionalparthian", "Inscriptional_Parthian"), - ("ital", "Old_Italic"), - ("java", "Javanese"), - ("javanese", "Javanese"), - ("kaithi", "Kaithi"), - ("kali", "Kayah_Li"), - ("kana", "Katakana"), - ("kannada", "Kannada"), - ("katakana", "Katakana"), - ("katakanaorhiragana", "Katakana_Or_Hiragana"), - ("kawi", "Kawi"), - ("kayahli", "Kayah_Li"), - ("khar", "Kharoshthi"), - ("kharoshthi", "Kharoshthi"), - ("khitansmallscript", "Khitan_Small_Script"), - ("khmer", "Khmer"), - ("khmr", "Khmer"), - ("khoj", "Khojki"), - ("khojki", "Khojki"), - ("khudawadi", "Khudawadi"), - ("kiratrai", "Kirat_Rai"), - ("kits", "Khitan_Small_Script"), - ("knda", "Kannada"), - ("krai", "Kirat_Rai"), - ("kthi", "Kaithi"), - ("lana", "Tai_Tham"), - ("lao", "Lao"), - ("laoo", "Lao"), - ("latin", "Latin"), - ("latn", "Latin"), - ("lepc", "Lepcha"), - ("lepcha", "Lepcha"), - ("limb", "Limbu"), - ("limbu", "Limbu"), - ("lina", "Linear_A"), - ("linb", "Linear_B"), - ("lineara", "Linear_A"), - ("linearb", "Linear_B"), - ("lisu", "Lisu"), - ("lyci", "Lycian"), - ("lycian", "Lycian"), - ("lydi", "Lydian"), - ("lydian", "Lydian"), - ("mahajani", "Mahajani"), - ("mahj", "Mahajani"), - ("maka", "Makasar"), - ("makasar", "Makasar"), - ("malayalam", "Malayalam"), - ("mand", "Mandaic"), - ("mandaic", "Mandaic"), - ("mani", "Manichaean"), - ("manichaean", "Manichaean"), - ("marc", "Marchen"), - ("marchen", "Marchen"), - ("masaramgondi", "Masaram_Gondi"), - ("medefaidrin", "Medefaidrin"), - ("medf", "Medefaidrin"), - ("meeteimayek", "Meetei_Mayek"), - ("mend", "Mende_Kikakui"), - ("mendekikakui", "Mende_Kikakui"), - ("merc", "Meroitic_Cursive"), - ("mero", "Meroitic_Hieroglyphs"), - ("meroiticcursive", "Meroitic_Cursive"), - ("meroitichieroglyphs", "Meroitic_Hieroglyphs"), - ("miao", "Miao"), - ("mlym", "Malayalam"), - ("modi", "Modi"), - ("mong", "Mongolian"), - ("mongolian", "Mongolian"), - ("mro", "Mro"), - ("mroo", "Mro"), - ("mtei", "Meetei_Mayek"), - ("mult", "Multani"), - ("multani", "Multani"), - ("myanmar", "Myanmar"), - ("mymr", "Myanmar"), - ("nabataean", "Nabataean"), - ("nagm", "Nag_Mundari"), - ("nagmundari", "Nag_Mundari"), - ("nand", "Nandinagari"), - ("nandinagari", "Nandinagari"), - ("narb", "Old_North_Arabian"), - ("nbat", "Nabataean"), - ("newa", "Newa"), - ("newtailue", "New_Tai_Lue"), - ("nko", "Nko"), - ("nkoo", "Nko"), - ("nshu", "Nushu"), - ("nushu", "Nushu"), - ("nyiakengpuachuehmong", "Nyiakeng_Puachue_Hmong"), - ("ogam", "Ogham"), - ("ogham", "Ogham"), - ("olchiki", "Ol_Chiki"), - ("olck", "Ol_Chiki"), - ("oldhungarian", "Old_Hungarian"), - ("olditalic", "Old_Italic"), - ("oldnortharabian", "Old_North_Arabian"), - ("oldpermic", "Old_Permic"), - ("oldpersian", "Old_Persian"), - ("oldsogdian", "Old_Sogdian"), - ("oldsoutharabian", "Old_South_Arabian"), - ("oldturkic", "Old_Turkic"), - ("olduyghur", "Old_Uyghur"), - ("olonal", "Ol_Onal"), - ("onao", "Ol_Onal"), - ("oriya", "Oriya"), - ("orkh", "Old_Turkic"), - ("orya", "Oriya"), - ("osage", "Osage"), - ("osge", "Osage"), - ("osma", "Osmanya"), - ("osmanya", "Osmanya"), - ("ougr", "Old_Uyghur"), - ("pahawhhmong", "Pahawh_Hmong"), - ("palm", "Palmyrene"), - ("palmyrene", "Palmyrene"), - ("pauc", "Pau_Cin_Hau"), - ("paucinhau", "Pau_Cin_Hau"), - ("perm", "Old_Permic"), - ("phag", "Phags_Pa"), - ("phagspa", "Phags_Pa"), - ("phli", "Inscriptional_Pahlavi"), - ("phlp", "Psalter_Pahlavi"), - ("phnx", "Phoenician"), - ("phoenician", "Phoenician"), - ("plrd", "Miao"), - ("prti", "Inscriptional_Parthian"), - ("psalterpahlavi", "Psalter_Pahlavi"), - ("qaac", "Coptic"), - ("qaai", "Inherited"), - ("rejang", "Rejang"), - ("rjng", "Rejang"), - ("rohg", "Hanifi_Rohingya"), - ("runic", "Runic"), - ("runr", "Runic"), - ("samaritan", "Samaritan"), - ("samr", "Samaritan"), - ("sarb", "Old_South_Arabian"), - ("saur", "Saurashtra"), - ("saurashtra", "Saurashtra"), - ("sgnw", "SignWriting"), - ("sharada", "Sharada"), - ("shavian", "Shavian"), - ("shaw", "Shavian"), - ("shrd", "Sharada"), - ("sidd", "Siddham"), - ("siddham", "Siddham"), - ("signwriting", "SignWriting"), - ("sind", "Khudawadi"), - ("sinh", "Sinhala"), - ("sinhala", "Sinhala"), - ("sogd", "Sogdian"), - ("sogdian", "Sogdian"), - ("sogo", "Old_Sogdian"), - ("sora", "Sora_Sompeng"), - ("sorasompeng", "Sora_Sompeng"), - ("soyo", "Soyombo"), - ("soyombo", "Soyombo"), - ("sund", "Sundanese"), - ("sundanese", "Sundanese"), - ("sunu", "Sunuwar"), - ("sunuwar", "Sunuwar"), - ("sylo", "Syloti_Nagri"), - ("sylotinagri", "Syloti_Nagri"), - ("syrc", "Syriac"), - ("syriac", "Syriac"), - ("tagalog", "Tagalog"), - ("tagb", "Tagbanwa"), - ("tagbanwa", "Tagbanwa"), - ("taile", "Tai_Le"), - ("taitham", "Tai_Tham"), - ("taiviet", "Tai_Viet"), - ("takr", "Takri"), - ("takri", "Takri"), - ("tale", "Tai_Le"), - ("talu", "New_Tai_Lue"), - ("tamil", "Tamil"), - ("taml", "Tamil"), - ("tang", "Tangut"), - ("tangsa", "Tangsa"), - ("tangut", "Tangut"), - ("tavt", "Tai_Viet"), - ("telu", "Telugu"), - ("telugu", "Telugu"), - ("tfng", "Tifinagh"), - ("tglg", "Tagalog"), - ("thaa", "Thaana"), - ("thaana", "Thaana"), - ("thai", "Thai"), - ("tibetan", "Tibetan"), - ("tibt", "Tibetan"), - ("tifinagh", "Tifinagh"), - ("tirh", "Tirhuta"), - ("tirhuta", "Tirhuta"), - ("tnsa", "Tangsa"), - ("todhri", "Todhri"), - ("todr", "Todhri"), - ("toto", "Toto"), - ("tulutigalari", "Tulu_Tigalari"), - ("tutg", "Tulu_Tigalari"), - ("ugar", "Ugaritic"), - ("ugaritic", "Ugaritic"), - ("unknown", "Unknown"), - ("vai", "Vai"), - ("vaii", "Vai"), - ("vith", "Vithkuqi"), - ("vithkuqi", "Vithkuqi"), - ("wancho", "Wancho"), - ("wara", "Warang_Citi"), - ("warangciti", "Warang_Citi"), - ("wcho", "Wancho"), - ("xpeo", "Old_Persian"), - ("xsux", "Cuneiform"), - ("yezi", "Yezidi"), - ("yezidi", "Yezidi"), - ("yi", "Yi"), - ("yiii", "Yi"), - ("zanabazarsquare", "Zanabazar_Square"), - ("zanb", "Zanabazar_Square"), - ("zinh", "Inherited"), - ("zyyy", "Common"), - ("zzzz", "Unknown"), - ], - ), - ( - "Script_Extensions", - &[ - ("adlam", "Adlam"), - ("adlm", "Adlam"), - ("aghb", "Caucasian_Albanian"), - ("ahom", "Ahom"), - ("anatolianhieroglyphs", "Anatolian_Hieroglyphs"), - ("arab", "Arabic"), - ("arabic", "Arabic"), - ("armenian", "Armenian"), - ("armi", "Imperial_Aramaic"), - ("armn", "Armenian"), - ("avestan", "Avestan"), - ("avst", "Avestan"), - ("bali", "Balinese"), - ("balinese", "Balinese"), - ("bamu", "Bamum"), - ("bamum", "Bamum"), - ("bass", "Bassa_Vah"), - ("bassavah", "Bassa_Vah"), - ("batak", "Batak"), - ("batk", "Batak"), - ("beng", "Bengali"), - ("bengali", "Bengali"), - ("bhaiksuki", "Bhaiksuki"), - ("bhks", "Bhaiksuki"), - ("bopo", "Bopomofo"), - ("bopomofo", "Bopomofo"), - ("brah", "Brahmi"), - ("brahmi", "Brahmi"), - ("brai", "Braille"), - ("braille", "Braille"), - ("bugi", "Buginese"), - ("buginese", "Buginese"), - ("buhd", "Buhid"), - ("buhid", "Buhid"), - ("cakm", "Chakma"), - ("canadianaboriginal", "Canadian_Aboriginal"), - ("cans", "Canadian_Aboriginal"), - ("cari", "Carian"), - ("carian", "Carian"), - ("caucasianalbanian", "Caucasian_Albanian"), - ("chakma", "Chakma"), - ("cham", "Cham"), - ("cher", "Cherokee"), - ("cherokee", "Cherokee"), - ("chorasmian", "Chorasmian"), - ("chrs", "Chorasmian"), - ("common", "Common"), - ("copt", "Coptic"), - ("coptic", "Coptic"), - ("cpmn", "Cypro_Minoan"), - ("cprt", "Cypriot"), - ("cuneiform", "Cuneiform"), - ("cypriot", "Cypriot"), - ("cyprominoan", "Cypro_Minoan"), - ("cyrillic", "Cyrillic"), - ("cyrl", "Cyrillic"), - ("deseret", "Deseret"), - ("deva", "Devanagari"), - ("devanagari", "Devanagari"), - ("diak", "Dives_Akuru"), - ("divesakuru", "Dives_Akuru"), - ("dogr", "Dogra"), - ("dogra", "Dogra"), - ("dsrt", "Deseret"), - ("dupl", "Duployan"), - ("duployan", "Duployan"), - ("egyp", "Egyptian_Hieroglyphs"), - ("egyptianhieroglyphs", "Egyptian_Hieroglyphs"), - ("elba", "Elbasan"), - ("elbasan", "Elbasan"), - ("elym", "Elymaic"), - ("elymaic", "Elymaic"), - ("ethi", "Ethiopic"), - ("ethiopic", "Ethiopic"), - ("gara", "Garay"), - ("garay", "Garay"), - ("geor", "Georgian"), - ("georgian", "Georgian"), - ("glag", "Glagolitic"), - ("glagolitic", "Glagolitic"), - ("gong", "Gunjala_Gondi"), - ("gonm", "Masaram_Gondi"), - ("goth", "Gothic"), - ("gothic", "Gothic"), - ("gran", "Grantha"), - ("grantha", "Grantha"), - ("greek", "Greek"), - ("grek", "Greek"), - ("gujarati", "Gujarati"), - ("gujr", "Gujarati"), - ("gukh", "Gurung_Khema"), - ("gunjalagondi", "Gunjala_Gondi"), - ("gurmukhi", "Gurmukhi"), - ("guru", "Gurmukhi"), - ("gurungkhema", "Gurung_Khema"), - ("han", "Han"), - ("hang", "Hangul"), - ("hangul", "Hangul"), - ("hani", "Han"), - ("hanifirohingya", "Hanifi_Rohingya"), - ("hano", "Hanunoo"), - ("hanunoo", "Hanunoo"), - ("hatr", "Hatran"), - ("hatran", "Hatran"), - ("hebr", "Hebrew"), - ("hebrew", "Hebrew"), - ("hira", "Hiragana"), - ("hiragana", "Hiragana"), - ("hluw", "Anatolian_Hieroglyphs"), - ("hmng", "Pahawh_Hmong"), - ("hmnp", "Nyiakeng_Puachue_Hmong"), - ("hrkt", "Katakana_Or_Hiragana"), - ("hung", "Old_Hungarian"), - ("imperialaramaic", "Imperial_Aramaic"), - ("inherited", "Inherited"), - ("inscriptionalpahlavi", "Inscriptional_Pahlavi"), - ("inscriptionalparthian", "Inscriptional_Parthian"), - ("ital", "Old_Italic"), - ("java", "Javanese"), - ("javanese", "Javanese"), - ("kaithi", "Kaithi"), - ("kali", "Kayah_Li"), - ("kana", "Katakana"), - ("kannada", "Kannada"), - ("katakana", "Katakana"), - ("katakanaorhiragana", "Katakana_Or_Hiragana"), - ("kawi", "Kawi"), - ("kayahli", "Kayah_Li"), - ("khar", "Kharoshthi"), - ("kharoshthi", "Kharoshthi"), - ("khitansmallscript", "Khitan_Small_Script"), - ("khmer", "Khmer"), - ("khmr", "Khmer"), - ("khoj", "Khojki"), - ("khojki", "Khojki"), - ("khudawadi", "Khudawadi"), - ("kiratrai", "Kirat_Rai"), - ("kits", "Khitan_Small_Script"), - ("knda", "Kannada"), - ("krai", "Kirat_Rai"), - ("kthi", "Kaithi"), - ("lana", "Tai_Tham"), - ("lao", "Lao"), - ("laoo", "Lao"), - ("latin", "Latin"), - ("latn", "Latin"), - ("lepc", "Lepcha"), - ("lepcha", "Lepcha"), - ("limb", "Limbu"), - ("limbu", "Limbu"), - ("lina", "Linear_A"), - ("linb", "Linear_B"), - ("lineara", "Linear_A"), - ("linearb", "Linear_B"), - ("lisu", "Lisu"), - ("lyci", "Lycian"), - ("lycian", "Lycian"), - ("lydi", "Lydian"), - ("lydian", "Lydian"), - ("mahajani", "Mahajani"), - ("mahj", "Mahajani"), - ("maka", "Makasar"), - ("makasar", "Makasar"), - ("malayalam", "Malayalam"), - ("mand", "Mandaic"), - ("mandaic", "Mandaic"), - ("mani", "Manichaean"), - ("manichaean", "Manichaean"), - ("marc", "Marchen"), - ("marchen", "Marchen"), - ("masaramgondi", "Masaram_Gondi"), - ("medefaidrin", "Medefaidrin"), - ("medf", "Medefaidrin"), - ("meeteimayek", "Meetei_Mayek"), - ("mend", "Mende_Kikakui"), - ("mendekikakui", "Mende_Kikakui"), - ("merc", "Meroitic_Cursive"), - ("mero", "Meroitic_Hieroglyphs"), - ("meroiticcursive", "Meroitic_Cursive"), - ("meroitichieroglyphs", "Meroitic_Hieroglyphs"), - ("miao", "Miao"), - ("mlym", "Malayalam"), - ("modi", "Modi"), - ("mong", "Mongolian"), - ("mongolian", "Mongolian"), - ("mro", "Mro"), - ("mroo", "Mro"), - ("mtei", "Meetei_Mayek"), - ("mult", "Multani"), - ("multani", "Multani"), - ("myanmar", "Myanmar"), - ("mymr", "Myanmar"), - ("nabataean", "Nabataean"), - ("nagm", "Nag_Mundari"), - ("nagmundari", "Nag_Mundari"), - ("nand", "Nandinagari"), - ("nandinagari", "Nandinagari"), - ("narb", "Old_North_Arabian"), - ("nbat", "Nabataean"), - ("newa", "Newa"), - ("newtailue", "New_Tai_Lue"), - ("nko", "Nko"), - ("nkoo", "Nko"), - ("nshu", "Nushu"), - ("nushu", "Nushu"), - ("nyiakengpuachuehmong", "Nyiakeng_Puachue_Hmong"), - ("ogam", "Ogham"), - ("ogham", "Ogham"), - ("olchiki", "Ol_Chiki"), - ("olck", "Ol_Chiki"), - ("oldhungarian", "Old_Hungarian"), - ("olditalic", "Old_Italic"), - ("oldnortharabian", "Old_North_Arabian"), - ("oldpermic", "Old_Permic"), - ("oldpersian", "Old_Persian"), - ("oldsogdian", "Old_Sogdian"), - ("oldsoutharabian", "Old_South_Arabian"), - ("oldturkic", "Old_Turkic"), - ("olduyghur", "Old_Uyghur"), - ("olonal", "Ol_Onal"), - ("onao", "Ol_Onal"), - ("oriya", "Oriya"), - ("orkh", "Old_Turkic"), - ("orya", "Oriya"), - ("osage", "Osage"), - ("osge", "Osage"), - ("osma", "Osmanya"), - ("osmanya", "Osmanya"), - ("ougr", "Old_Uyghur"), - ("pahawhhmong", "Pahawh_Hmong"), - ("palm", "Palmyrene"), - ("palmyrene", "Palmyrene"), - ("pauc", "Pau_Cin_Hau"), - ("paucinhau", "Pau_Cin_Hau"), - ("perm", "Old_Permic"), - ("phag", "Phags_Pa"), - ("phagspa", "Phags_Pa"), - ("phli", "Inscriptional_Pahlavi"), - ("phlp", "Psalter_Pahlavi"), - ("phnx", "Phoenician"), - ("phoenician", "Phoenician"), - ("plrd", "Miao"), - ("prti", "Inscriptional_Parthian"), - ("psalterpahlavi", "Psalter_Pahlavi"), - ("qaac", "Coptic"), - ("qaai", "Inherited"), - ("rejang", "Rejang"), - ("rjng", "Rejang"), - ("rohg", "Hanifi_Rohingya"), - ("runic", "Runic"), - ("runr", "Runic"), - ("samaritan", "Samaritan"), - ("samr", "Samaritan"), - ("sarb", "Old_South_Arabian"), - ("saur", "Saurashtra"), - ("saurashtra", "Saurashtra"), - ("sgnw", "SignWriting"), - ("sharada", "Sharada"), - ("shavian", "Shavian"), - ("shaw", "Shavian"), - ("shrd", "Sharada"), - ("sidd", "Siddham"), - ("siddham", "Siddham"), - ("signwriting", "SignWriting"), - ("sind", "Khudawadi"), - ("sinh", "Sinhala"), - ("sinhala", "Sinhala"), - ("sogd", "Sogdian"), - ("sogdian", "Sogdian"), - ("sogo", "Old_Sogdian"), - ("sora", "Sora_Sompeng"), - ("sorasompeng", "Sora_Sompeng"), - ("soyo", "Soyombo"), - ("soyombo", "Soyombo"), - ("sund", "Sundanese"), - ("sundanese", "Sundanese"), - ("sunu", "Sunuwar"), - ("sunuwar", "Sunuwar"), - ("sylo", "Syloti_Nagri"), - ("sylotinagri", "Syloti_Nagri"), - ("syrc", "Syriac"), - ("syriac", "Syriac"), - ("tagalog", "Tagalog"), - ("tagb", "Tagbanwa"), - ("tagbanwa", "Tagbanwa"), - ("taile", "Tai_Le"), - ("taitham", "Tai_Tham"), - ("taiviet", "Tai_Viet"), - ("takr", "Takri"), - ("takri", "Takri"), - ("tale", "Tai_Le"), - ("talu", "New_Tai_Lue"), - ("tamil", "Tamil"), - ("taml", "Tamil"), - ("tang", "Tangut"), - ("tangsa", "Tangsa"), - ("tangut", "Tangut"), - ("tavt", "Tai_Viet"), - ("telu", "Telugu"), - ("telugu", "Telugu"), - ("tfng", "Tifinagh"), - ("tglg", "Tagalog"), - ("thaa", "Thaana"), - ("thaana", "Thaana"), - ("thai", "Thai"), - ("tibetan", "Tibetan"), - ("tibt", "Tibetan"), - ("tifinagh", "Tifinagh"), - ("tirh", "Tirhuta"), - ("tirhuta", "Tirhuta"), - ("tnsa", "Tangsa"), - ("todhri", "Todhri"), - ("todr", "Todhri"), - ("toto", "Toto"), - ("tulutigalari", "Tulu_Tigalari"), - ("tutg", "Tulu_Tigalari"), - ("ugar", "Ugaritic"), - ("ugaritic", "Ugaritic"), - ("unknown", "Unknown"), - ("vai", "Vai"), - ("vaii", "Vai"), - ("vith", "Vithkuqi"), - ("vithkuqi", "Vithkuqi"), - ("wancho", "Wancho"), - ("wara", "Warang_Citi"), - ("warangciti", "Warang_Citi"), - ("wcho", "Wancho"), - ("xpeo", "Old_Persian"), - ("xsux", "Cuneiform"), - ("yezi", "Yezidi"), - ("yezidi", "Yezidi"), - ("yi", "Yi"), - ("yiii", "Yi"), - ("zanabazarsquare", "Zanabazar_Square"), - ("zanb", "Zanabazar_Square"), - ("zinh", "Inherited"), - ("zyyy", "Common"), - ("zzzz", "Unknown"), - ], - ), - ( - "Sentence_Break", - &[ - ("at", "ATerm"), - ("aterm", "ATerm"), - ("cl", "Close"), - ("close", "Close"), - ("cr", "CR"), - ("ex", "Extend"), - ("extend", "Extend"), - ("fo", "Format"), - ("format", "Format"), - ("le", "OLetter"), - ("lf", "LF"), - ("lo", "Lower"), - ("lower", "Lower"), - ("nu", "Numeric"), - ("numeric", "Numeric"), - ("oletter", "OLetter"), - ("other", "Other"), - ("sc", "SContinue"), - ("scontinue", "SContinue"), - ("se", "Sep"), - ("sep", "Sep"), - ("sp", "Sp"), - ("st", "STerm"), - ("sterm", "STerm"), - ("up", "Upper"), - ("upper", "Upper"), - ("xx", "Other"), - ], - ), - ( - "Word_Break", - &[ - ("aletter", "ALetter"), - ("cr", "CR"), - ("doublequote", "Double_Quote"), - ("dq", "Double_Quote"), - ("eb", "E_Base"), - ("ebase", "E_Base"), - ("ebasegaz", "E_Base_GAZ"), - ("ebg", "E_Base_GAZ"), - ("em", "E_Modifier"), - ("emodifier", "E_Modifier"), - ("ex", "ExtendNumLet"), - ("extend", "Extend"), - ("extendnumlet", "ExtendNumLet"), - ("fo", "Format"), - ("format", "Format"), - ("gaz", "Glue_After_Zwj"), - ("glueafterzwj", "Glue_After_Zwj"), - ("hebrewletter", "Hebrew_Letter"), - ("hl", "Hebrew_Letter"), - ("ka", "Katakana"), - ("katakana", "Katakana"), - ("le", "ALetter"), - ("lf", "LF"), - ("mb", "MidNumLet"), - ("midletter", "MidLetter"), - ("midnum", "MidNum"), - ("midnumlet", "MidNumLet"), - ("ml", "MidLetter"), - ("mn", "MidNum"), - ("newline", "Newline"), - ("nl", "Newline"), - ("nu", "Numeric"), - ("numeric", "Numeric"), - ("other", "Other"), - ("regionalindicator", "Regional_Indicator"), - ("ri", "Regional_Indicator"), - ("singlequote", "Single_Quote"), - ("sq", "Single_Quote"), - ("wsegspace", "WSegSpace"), - ("xx", "Other"), - ("zwj", "ZWJ"), - ], - ), -]; diff --git a/vendor/regex-syntax/src/unicode_tables/script.rs b/vendor/regex-syntax/src/unicode_tables/script.rs deleted file mode 100644 index 3e437ca9ca73e5..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/script.rs +++ /dev/null @@ -1,1300 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate script ucd-16.0.0 --chars -// -// Unicode version: 16.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ - ("Adlam", ADLAM), - ("Ahom", AHOM), - ("Anatolian_Hieroglyphs", ANATOLIAN_HIEROGLYPHS), - ("Arabic", ARABIC), - ("Armenian", ARMENIAN), - ("Avestan", AVESTAN), - ("Balinese", BALINESE), - ("Bamum", BAMUM), - ("Bassa_Vah", BASSA_VAH), - ("Batak", BATAK), - ("Bengali", BENGALI), - ("Bhaiksuki", BHAIKSUKI), - ("Bopomofo", BOPOMOFO), - ("Brahmi", BRAHMI), - ("Braille", BRAILLE), - ("Buginese", BUGINESE), - ("Buhid", BUHID), - ("Canadian_Aboriginal", CANADIAN_ABORIGINAL), - ("Carian", CARIAN), - ("Caucasian_Albanian", CAUCASIAN_ALBANIAN), - ("Chakma", CHAKMA), - ("Cham", CHAM), - ("Cherokee", CHEROKEE), - ("Chorasmian", CHORASMIAN), - ("Common", COMMON), - ("Coptic", COPTIC), - ("Cuneiform", CUNEIFORM), - ("Cypriot", CYPRIOT), - ("Cypro_Minoan", CYPRO_MINOAN), - ("Cyrillic", CYRILLIC), - ("Deseret", DESERET), - ("Devanagari", DEVANAGARI), - ("Dives_Akuru", DIVES_AKURU), - ("Dogra", DOGRA), - ("Duployan", DUPLOYAN), - ("Egyptian_Hieroglyphs", EGYPTIAN_HIEROGLYPHS), - ("Elbasan", ELBASAN), - ("Elymaic", ELYMAIC), - ("Ethiopic", ETHIOPIC), - ("Garay", GARAY), - ("Georgian", GEORGIAN), - ("Glagolitic", GLAGOLITIC), - ("Gothic", GOTHIC), - ("Grantha", GRANTHA), - ("Greek", GREEK), - ("Gujarati", GUJARATI), - ("Gunjala_Gondi", GUNJALA_GONDI), - ("Gurmukhi", GURMUKHI), - ("Gurung_Khema", GURUNG_KHEMA), - ("Han", HAN), - ("Hangul", HANGUL), - ("Hanifi_Rohingya", HANIFI_ROHINGYA), - ("Hanunoo", HANUNOO), - ("Hatran", HATRAN), - ("Hebrew", HEBREW), - ("Hiragana", HIRAGANA), - ("Imperial_Aramaic", IMPERIAL_ARAMAIC), - ("Inherited", INHERITED), - ("Inscriptional_Pahlavi", INSCRIPTIONAL_PAHLAVI), - ("Inscriptional_Parthian", INSCRIPTIONAL_PARTHIAN), - ("Javanese", JAVANESE), - ("Kaithi", KAITHI), - ("Kannada", KANNADA), - ("Katakana", KATAKANA), - ("Kawi", KAWI), - ("Kayah_Li", KAYAH_LI), - ("Kharoshthi", KHAROSHTHI), - ("Khitan_Small_Script", KHITAN_SMALL_SCRIPT), - ("Khmer", KHMER), - ("Khojki", KHOJKI), - ("Khudawadi", KHUDAWADI), - ("Kirat_Rai", KIRAT_RAI), - ("Lao", LAO), - ("Latin", LATIN), - ("Lepcha", LEPCHA), - ("Limbu", LIMBU), - ("Linear_A", LINEAR_A), - ("Linear_B", LINEAR_B), - ("Lisu", LISU), - ("Lycian", LYCIAN), - ("Lydian", LYDIAN), - ("Mahajani", MAHAJANI), - ("Makasar", MAKASAR), - ("Malayalam", MALAYALAM), - ("Mandaic", MANDAIC), - ("Manichaean", MANICHAEAN), - ("Marchen", MARCHEN), - ("Masaram_Gondi", MASARAM_GONDI), - ("Medefaidrin", MEDEFAIDRIN), - ("Meetei_Mayek", MEETEI_MAYEK), - ("Mende_Kikakui", MENDE_KIKAKUI), - ("Meroitic_Cursive", MEROITIC_CURSIVE), - ("Meroitic_Hieroglyphs", MEROITIC_HIEROGLYPHS), - ("Miao", MIAO), - ("Modi", MODI), - ("Mongolian", MONGOLIAN), - ("Mro", MRO), - ("Multani", MULTANI), - ("Myanmar", MYANMAR), - ("Nabataean", NABATAEAN), - ("Nag_Mundari", NAG_MUNDARI), - ("Nandinagari", NANDINAGARI), - ("New_Tai_Lue", NEW_TAI_LUE), - ("Newa", NEWA), - ("Nko", NKO), - ("Nushu", NUSHU), - ("Nyiakeng_Puachue_Hmong", NYIAKENG_PUACHUE_HMONG), - ("Ogham", OGHAM), - ("Ol_Chiki", OL_CHIKI), - ("Ol_Onal", OL_ONAL), - ("Old_Hungarian", OLD_HUNGARIAN), - ("Old_Italic", OLD_ITALIC), - ("Old_North_Arabian", OLD_NORTH_ARABIAN), - ("Old_Permic", OLD_PERMIC), - ("Old_Persian", OLD_PERSIAN), - ("Old_Sogdian", OLD_SOGDIAN), - ("Old_South_Arabian", OLD_SOUTH_ARABIAN), - ("Old_Turkic", OLD_TURKIC), - ("Old_Uyghur", OLD_UYGHUR), - ("Oriya", ORIYA), - ("Osage", OSAGE), - ("Osmanya", OSMANYA), - ("Pahawh_Hmong", PAHAWH_HMONG), - ("Palmyrene", PALMYRENE), - ("Pau_Cin_Hau", PAU_CIN_HAU), - ("Phags_Pa", PHAGS_PA), - ("Phoenician", PHOENICIAN), - ("Psalter_Pahlavi", PSALTER_PAHLAVI), - ("Rejang", REJANG), - ("Runic", RUNIC), - ("Samaritan", SAMARITAN), - ("Saurashtra", SAURASHTRA), - ("Sharada", SHARADA), - ("Shavian", SHAVIAN), - ("Siddham", SIDDHAM), - ("SignWriting", SIGNWRITING), - ("Sinhala", SINHALA), - ("Sogdian", SOGDIAN), - ("Sora_Sompeng", SORA_SOMPENG), - ("Soyombo", SOYOMBO), - ("Sundanese", SUNDANESE), - ("Sunuwar", SUNUWAR), - ("Syloti_Nagri", SYLOTI_NAGRI), - ("Syriac", SYRIAC), - ("Tagalog", TAGALOG), - ("Tagbanwa", TAGBANWA), - ("Tai_Le", TAI_LE), - ("Tai_Tham", TAI_THAM), - ("Tai_Viet", TAI_VIET), - ("Takri", TAKRI), - ("Tamil", TAMIL), - ("Tangsa", TANGSA), - ("Tangut", TANGUT), - ("Telugu", TELUGU), - ("Thaana", THAANA), - ("Thai", THAI), - ("Tibetan", TIBETAN), - ("Tifinagh", TIFINAGH), - ("Tirhuta", TIRHUTA), - ("Todhri", TODHRI), - ("Toto", TOTO), - ("Tulu_Tigalari", TULU_TIGALARI), - ("Ugaritic", UGARITIC), - ("Vai", VAI), - ("Vithkuqi", VITHKUQI), - ("Wancho", WANCHO), - ("Warang_Citi", WARANG_CITI), - ("Yezidi", YEZIDI), - ("Yi", YI), - ("Zanabazar_Square", ZANABAZAR_SQUARE), -]; - -pub const ADLAM: &'static [(char, char)] = - &[('𞤀', '𞥋'), ('𞥐', '𞥙'), ('𞥞', '𞥟')]; - -pub const AHOM: &'static [(char, char)] = - &[('𑜀', '𑜚'), ('\u{1171d}', '\u{1172b}'), ('𑜰', '𑝆')]; - -pub const ANATOLIAN_HIEROGLYPHS: &'static [(char, char)] = &[('𔐀', '𔙆')]; - -pub const ARABIC: &'static [(char, char)] = &[ - ('\u{600}', '\u{604}'), - ('؆', '؋'), - ('؍', '\u{61a}'), - ('\u{61c}', '؞'), - ('ؠ', 'ؿ'), - ('ف', 'ي'), - ('\u{656}', 'ٯ'), - ('ٱ', '\u{6dc}'), - ('۞', 'ۿ'), - ('ݐ', 'ݿ'), - ('ࡰ', 'ࢎ'), - ('\u{890}', '\u{891}'), - ('\u{897}', '\u{8e1}'), - ('\u{8e3}', '\u{8ff}'), - ('ﭐ', '﯂'), - ('ﯓ', 'ﴽ'), - ('﵀', 'ﶏ'), - ('ﶒ', 'ﷇ'), - ('﷏', '﷏'), - ('ﷰ', '﷿'), - ('ﹰ', 'ﹴ'), - ('ﹶ', 'ﻼ'), - ('𐹠', '𐹾'), - ('𐻂', '𐻄'), - ('\u{10efc}', '\u{10eff}'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('𞻰', '𞻱'), -]; - -pub const ARMENIAN: &'static [(char, char)] = - &[('Ա', 'Ֆ'), ('ՙ', '֊'), ('֍', '֏'), ('ﬓ', 'ﬗ')]; - -pub const AVESTAN: &'static [(char, char)] = &[('𐬀', '𐬵'), ('𐬹', '𐬿')]; - -pub const BALINESE: &'static [(char, char)] = &[('\u{1b00}', 'ᭌ'), ('᭎', '᭿')]; - -pub const BAMUM: &'static [(char, char)] = &[('ꚠ', '꛷'), ('𖠀', '𖨸')]; - -pub const BASSA_VAH: &'static [(char, char)] = - &[('𖫐', '𖫭'), ('\u{16af0}', '𖫵')]; - -pub const BATAK: &'static [(char, char)] = &[('ᯀ', '\u{1bf3}'), ('᯼', '᯿')]; - -pub const BENGALI: &'static [(char, char)] = &[ - ('ঀ', 'ঃ'), - ('অ', 'ঌ'), - ('এ', 'ঐ'), - ('ও', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('\u{9bc}', '\u{9c4}'), - ('ে', 'ৈ'), - ('ো', 'ৎ'), - ('\u{9d7}', '\u{9d7}'), - ('ড়', 'ঢ়'), - ('য়', '\u{9e3}'), - ('০', '\u{9fe}'), -]; - -pub const BHAIKSUKI: &'static [(char, char)] = - &[('𑰀', '𑰈'), ('𑰊', '\u{11c36}'), ('\u{11c38}', '𑱅'), ('𑱐', '𑱬')]; - -pub const BOPOMOFO: &'static [(char, char)] = - &[('˪', '˫'), ('ㄅ', 'ㄯ'), ('ㆠ', 'ㆿ')]; - -pub const BRAHMI: &'static [(char, char)] = - &[('𑀀', '𑁍'), ('𑁒', '𑁵'), ('\u{1107f}', '\u{1107f}')]; - -pub const BRAILLE: &'static [(char, char)] = &[('⠀', '⣿')]; - -pub const BUGINESE: &'static [(char, char)] = &[('ᨀ', '\u{1a1b}'), ('᨞', '᨟')]; - -pub const BUHID: &'static [(char, char)] = &[('ᝀ', '\u{1753}')]; - -pub const CANADIAN_ABORIGINAL: &'static [(char, char)] = - &[('᐀', 'ᙿ'), ('ᢰ', 'ᣵ'), ('𑪰', '𑪿')]; - -pub const CARIAN: &'static [(char, char)] = &[('𐊠', '𐋐')]; - -pub const CAUCASIAN_ALBANIAN: &'static [(char, char)] = - &[('𐔰', '𐕣'), ('𐕯', '𐕯')]; - -pub const CHAKMA: &'static [(char, char)] = - &[('\u{11100}', '\u{11134}'), ('𑄶', '𑅇')]; - -pub const CHAM: &'static [(char, char)] = - &[('ꨀ', '\u{aa36}'), ('ꩀ', 'ꩍ'), ('꩐', '꩙'), ('꩜', '꩟')]; - -pub const CHEROKEE: &'static [(char, char)] = - &[('Ꭰ', 'Ᏽ'), ('ᏸ', 'ᏽ'), ('ꭰ', 'ꮿ')]; - -pub const CHORASMIAN: &'static [(char, char)] = &[('𐾰', '𐿋')]; - -pub const COMMON: &'static [(char, char)] = &[ - ('\0', '@'), - ('[', '`'), - ('{', '©'), - ('«', '¹'), - ('»', '¿'), - ('×', '×'), - ('÷', '÷'), - ('ʹ', '˟'), - ('˥', '˩'), - ('ˬ', '˿'), - ('ʹ', 'ʹ'), - (';', ';'), - ('΅', '΅'), - ('·', '·'), - ('\u{605}', '\u{605}'), - ('،', '،'), - ('؛', '؛'), - ('؟', '؟'), - ('ـ', 'ـ'), - ('\u{6dd}', '\u{6dd}'), - ('\u{8e2}', '\u{8e2}'), - ('।', '॥'), - ('฿', '฿'), - ('࿕', '࿘'), - ('჻', '჻'), - ('᛫', '᛭'), - ('᜵', '᜶'), - ('᠂', '᠃'), - ('᠅', '᠅'), - ('᳓', '᳓'), - ('᳡', '᳡'), - ('ᳩ', 'ᳬ'), - ('ᳮ', 'ᳳ'), - ('ᳵ', '᳷'), - ('ᳺ', 'ᳺ'), - ('\u{2000}', '\u{200b}'), - ('\u{200e}', '\u{2064}'), - ('\u{2066}', '⁰'), - ('⁴', '⁾'), - ('₀', '₎'), - ('₠', '⃀'), - ('℀', '℥'), - ('℧', '℩'), - ('ℬ', 'ℱ'), - ('ℳ', '⅍'), - ('⅏', '⅟'), - ('↉', '↋'), - ('←', '␩'), - ('⑀', '⑊'), - ('①', '⟿'), - ('⤀', '⭳'), - ('⭶', '⮕'), - ('⮗', '⯿'), - ('⸀', '⹝'), - ('⿰', '〄'), - ('〆', '〆'), - ('〈', '〠'), - ('〰', '〷'), - ('〼', '〿'), - ('゛', '゜'), - ('゠', '゠'), - ('・', 'ー'), - ('㆐', '㆟'), - ('㇀', '㇥'), - ('㇯', '㇯'), - ('㈠', '㉟'), - ('㉿', '㋏'), - ('㋿', '㋿'), - ('㍘', '㏿'), - ('䷀', '䷿'), - ('꜀', '꜡'), - ('ꞈ', '꞊'), - ('꠰', '꠹'), - ('꤮', '꤮'), - ('ꧏ', 'ꧏ'), - ('꭛', '꭛'), - ('꭪', '꭫'), - ('﴾', '﴿'), - ('︐', '︙'), - ('︰', '﹒'), - ('﹔', '﹦'), - ('﹨', '﹫'), - ('\u{feff}', '\u{feff}'), - ('!', '@'), - ('[', '`'), - ('{', '・'), - ('ー', 'ー'), - ('\u{ff9e}', '\u{ff9f}'), - ('¢', '₩'), - ('│', '○'), - ('\u{fff9}', '�'), - ('𐄀', '𐄂'), - ('𐄇', '𐄳'), - ('𐄷', '𐄿'), - ('𐆐', '𐆜'), - ('𐇐', '𐇼'), - ('𐋡', '𐋻'), - ('\u{1bca0}', '\u{1bca3}'), - ('𜰀', '𜳹'), - ('𜴀', '𜺳'), - ('𜽐', '𜿃'), - ('𝀀', '𝃵'), - ('𝄀', '𝄦'), - ('𝄩', '\u{1d166}'), - ('𝅪', '\u{1d17a}'), - ('𝆃', '𝆄'), - ('𝆌', '𝆩'), - ('𝆮', '𝇪'), - ('𝋀', '𝋓'), - ('𝋠', '𝋳'), - ('𝌀', '𝍖'), - ('𝍠', '𝍸'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝟋'), - ('𝟎', '𝟿'), - ('𞱱', '𞲴'), - ('𞴁', '𞴽'), - ('🀀', '🀫'), - ('🀰', '🂓'), - ('🂠', '🂮'), - ('🂱', '🂿'), - ('🃁', '🃏'), - ('🃑', '🃵'), - ('🄀', '🆭'), - ('🇦', '🇿'), - ('🈁', '🈂'), - ('🈐', '🈻'), - ('🉀', '🉈'), - ('🉐', '🉑'), - ('🉠', '🉥'), - ('🌀', '🛗'), - ('🛜', '🛬'), - ('🛰', '🛼'), - ('🜀', '🝶'), - ('🝻', '🟙'), - ('🟠', '🟫'), - ('🟰', '🟰'), - ('🠀', '🠋'), - ('🠐', '🡇'), - ('🡐', '🡙'), - ('🡠', '🢇'), - ('🢐', '🢭'), - ('🢰', '🢻'), - ('🣀', '🣁'), - ('🤀', '🩓'), - ('🩠', '🩭'), - ('🩰', '🩼'), - ('🪀', '🪉'), - ('🪏', '🫆'), - ('🫎', '🫜'), - ('🫟', '🫩'), - ('🫰', '🫸'), - ('🬀', '🮒'), - ('🮔', '🯹'), - ('\u{e0001}', '\u{e0001}'), - ('\u{e0020}', '\u{e007f}'), -]; - -pub const COPTIC: &'static [(char, char)] = - &[('Ϣ', 'ϯ'), ('Ⲁ', 'ⳳ'), ('⳹', '⳿')]; - -pub const CUNEIFORM: &'static [(char, char)] = - &[('𒀀', '𒎙'), ('𒐀', '𒑮'), ('𒑰', '𒑴'), ('𒒀', '𒕃')]; - -pub const CYPRIOT: &'static [(char, char)] = - &[('𐠀', '𐠅'), ('𐠈', '𐠈'), ('𐠊', '𐠵'), ('𐠷', '𐠸'), ('𐠼', '𐠼'), ('𐠿', '𐠿')]; - -pub const CYPRO_MINOAN: &'static [(char, char)] = &[('𒾐', '𒿲')]; - -pub const CYRILLIC: &'static [(char, char)] = &[ - ('Ѐ', '\u{484}'), - ('\u{487}', 'ԯ'), - ('ᲀ', 'ᲊ'), - ('ᴫ', 'ᴫ'), - ('ᵸ', 'ᵸ'), - ('\u{2de0}', '\u{2dff}'), - ('Ꙁ', '\u{a69f}'), - ('\u{fe2e}', '\u{fe2f}'), - ('𞀰', '𞁭'), - ('\u{1e08f}', '\u{1e08f}'), -]; - -pub const DESERET: &'static [(char, char)] = &[('𐐀', '𐑏')]; - -pub const DEVANAGARI: &'static [(char, char)] = &[ - ('\u{900}', 'ॐ'), - ('\u{955}', '\u{963}'), - ('०', 'ॿ'), - ('\u{a8e0}', '\u{a8ff}'), - ('𑬀', '𑬉'), -]; - -pub const DIVES_AKURU: &'static [(char, char)] = &[ - ('𑤀', '𑤆'), - ('𑤉', '𑤉'), - ('𑤌', '𑤓'), - ('𑤕', '𑤖'), - ('𑤘', '𑤵'), - ('𑤷', '𑤸'), - ('\u{1193b}', '𑥆'), - ('𑥐', '𑥙'), -]; - -pub const DOGRA: &'static [(char, char)] = &[('𑠀', '𑠻')]; - -pub const DUPLOYAN: &'static [(char, char)] = - &[('𛰀', '𛱪'), ('𛱰', '𛱼'), ('𛲀', '𛲈'), ('𛲐', '𛲙'), ('𛲜', '𛲟')]; - -pub const EGYPTIAN_HIEROGLYPHS: &'static [(char, char)] = - &[('𓀀', '\u{13455}'), ('𓑠', '𔏺')]; - -pub const ELBASAN: &'static [(char, char)] = &[('𐔀', '𐔧')]; - -pub const ELYMAIC: &'static [(char, char)] = &[('𐿠', '𐿶')]; - -pub const ETHIOPIC: &'static [(char, char)] = &[ - ('ሀ', 'ቈ'), - ('ቊ', 'ቍ'), - ('ቐ', 'ቖ'), - ('ቘ', 'ቘ'), - ('ቚ', 'ቝ'), - ('በ', 'ኈ'), - ('ኊ', 'ኍ'), - ('ነ', 'ኰ'), - ('ኲ', 'ኵ'), - ('ኸ', 'ኾ'), - ('ዀ', 'ዀ'), - ('ዂ', 'ዅ'), - ('ወ', 'ዖ'), - ('ዘ', 'ጐ'), - ('ጒ', 'ጕ'), - ('ጘ', 'ፚ'), - ('\u{135d}', '፼'), - ('ᎀ', '᎙'), - ('ⶀ', 'ⶖ'), - ('ⶠ', 'ⶦ'), - ('ⶨ', 'ⶮ'), - ('ⶰ', 'ⶶ'), - ('ⶸ', 'ⶾ'), - ('ⷀ', 'ⷆ'), - ('ⷈ', 'ⷎ'), - ('ⷐ', 'ⷖ'), - ('ⷘ', 'ⷞ'), - ('ꬁ', 'ꬆ'), - ('ꬉ', 'ꬎ'), - ('ꬑ', 'ꬖ'), - ('ꬠ', 'ꬦ'), - ('ꬨ', 'ꬮ'), - ('𞟠', '𞟦'), - ('𞟨', '𞟫'), - ('𞟭', '𞟮'), - ('𞟰', '𞟾'), -]; - -pub const GARAY: &'static [(char, char)] = - &[('𐵀', '𐵥'), ('\u{10d69}', '𐶅'), ('𐶎', '𐶏')]; - -pub const GEORGIAN: &'static [(char, char)] = &[ - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ა', 'ჺ'), - ('ჼ', 'ჿ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), -]; - -pub const GLAGOLITIC: &'static [(char, char)] = &[ - ('Ⰰ', 'ⱟ'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), -]; - -pub const GOTHIC: &'static [(char, char)] = &[('𐌰', '𐍊')]; - -pub const GRANTHA: &'static [(char, char)] = &[ - ('\u{11300}', '𑌃'), - ('𑌅', '𑌌'), - ('𑌏', '𑌐'), - ('𑌓', '𑌨'), - ('𑌪', '𑌰'), - ('𑌲', '𑌳'), - ('𑌵', '𑌹'), - ('\u{1133c}', '𑍄'), - ('𑍇', '𑍈'), - ('𑍋', '\u{1134d}'), - ('𑍐', '𑍐'), - ('\u{11357}', '\u{11357}'), - ('𑍝', '𑍣'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), -]; - -pub const GREEK: &'static [(char, char)] = &[ - ('Ͱ', 'ͳ'), - ('͵', 'ͷ'), - ('ͺ', 'ͽ'), - ('Ϳ', 'Ϳ'), - ('΄', '΄'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', 'ϡ'), - ('ϰ', 'Ͽ'), - ('ᴦ', 'ᴪ'), - ('ᵝ', 'ᵡ'), - ('ᵦ', 'ᵪ'), - ('ᶿ', 'ᶿ'), - ('ἀ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ῄ'), - ('ῆ', 'ΐ'), - ('ῖ', 'Ί'), - ('῝', '`'), - ('ῲ', 'ῴ'), - ('ῶ', '῾'), - ('Ω', 'Ω'), - ('ꭥ', 'ꭥ'), - ('𐅀', '𐆎'), - ('𐆠', '𐆠'), - ('𝈀', '𝉅'), -]; - -pub const GUJARATI: &'static [(char, char)] = &[ - ('\u{a81}', 'ઃ'), - ('અ', 'ઍ'), - ('એ', 'ઑ'), - ('ઓ', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('\u{abc}', '\u{ac5}'), - ('\u{ac7}', 'ૉ'), - ('ો', '\u{acd}'), - ('ૐ', 'ૐ'), - ('ૠ', '\u{ae3}'), - ('૦', '૱'), - ('ૹ', '\u{aff}'), -]; - -pub const GUNJALA_GONDI: &'static [(char, char)] = &[ - ('𑵠', '𑵥'), - ('𑵧', '𑵨'), - ('𑵪', '𑶎'), - ('\u{11d90}', '\u{11d91}'), - ('𑶓', '𑶘'), - ('𑶠', '𑶩'), -]; - -pub const GURMUKHI: &'static [(char, char)] = &[ - ('\u{a01}', 'ਃ'), - ('ਅ', 'ਊ'), - ('ਏ', 'ਐ'), - ('ਓ', 'ਨ'), - ('ਪ', 'ਰ'), - ('ਲ', 'ਲ਼'), - ('ਵ', 'ਸ਼'), - ('ਸ', 'ਹ'), - ('\u{a3c}', '\u{a3c}'), - ('ਾ', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4d}'), - ('\u{a51}', '\u{a51}'), - ('ਖ਼', 'ੜ'), - ('ਫ਼', 'ਫ਼'), - ('੦', '੶'), -]; - -pub const GURUNG_KHEMA: &'static [(char, char)] = &[('𖄀', '𖄹')]; - -pub const HAN: &'static [(char, char)] = &[ - ('⺀', '⺙'), - ('⺛', '⻳'), - ('⼀', '⿕'), - ('々', '々'), - ('〇', '〇'), - ('〡', '〩'), - ('〸', '〻'), - ('㐀', '䶿'), - ('一', '鿿'), - ('豈', '舘'), - ('並', '龎'), - ('𖿢', '𖿣'), - ('\u{16ff0}', '\u{16ff1}'), - ('𠀀', '𪛟'), - ('𪜀', '𫜹'), - ('𫝀', '𫠝'), - ('𫠠', '𬺡'), - ('𬺰', '𮯠'), - ('𮯰', '𮹝'), - ('丽', '𪘀'), - ('𰀀', '𱍊'), - ('𱍐', '𲎯'), -]; - -pub const HANGUL: &'static [(char, char)] = &[ - ('ᄀ', 'ᇿ'), - ('\u{302e}', '\u{302f}'), - ('ㄱ', 'ㆎ'), - ('㈀', '㈞'), - ('㉠', '㉾'), - ('ꥠ', 'ꥼ'), - ('가', '힣'), - ('ힰ', 'ퟆ'), - ('ퟋ', 'ퟻ'), - ('ᅠ', 'ᄒ'), - ('ᅡ', 'ᅦ'), - ('ᅧ', 'ᅬ'), - ('ᅭ', 'ᅲ'), - ('ᅳ', 'ᅵ'), -]; - -pub const HANIFI_ROHINGYA: &'static [(char, char)] = - &[('𐴀', '\u{10d27}'), ('𐴰', '𐴹')]; - -pub const HANUNOO: &'static [(char, char)] = &[('ᜠ', '\u{1734}')]; - -pub const HATRAN: &'static [(char, char)] = - &[('𐣠', '𐣲'), ('𐣴', '𐣵'), ('𐣻', '𐣿')]; - -pub const HEBREW: &'static [(char, char)] = &[ - ('\u{591}', '\u{5c7}'), - ('א', 'ת'), - ('ׯ', '״'), - ('יִ', 'זּ'), - ('טּ', 'לּ'), - ('מּ', 'מּ'), - ('נּ', 'סּ'), - ('ףּ', 'פּ'), - ('צּ', 'ﭏ'), -]; - -pub const HIRAGANA: &'static [(char, char)] = &[ - ('ぁ', 'ゖ'), - ('ゝ', 'ゟ'), - ('𛀁', '𛄟'), - ('𛄲', '𛄲'), - ('𛅐', '𛅒'), - ('🈀', '🈀'), -]; - -pub const IMPERIAL_ARAMAIC: &'static [(char, char)] = - &[('𐡀', '𐡕'), ('𐡗', '𐡟')]; - -pub const INHERITED: &'static [(char, char)] = &[ - ('\u{300}', '\u{36f}'), - ('\u{485}', '\u{486}'), - ('\u{64b}', '\u{655}'), - ('\u{670}', '\u{670}'), - ('\u{951}', '\u{954}'), - ('\u{1ab0}', '\u{1ace}'), - ('\u{1cd0}', '\u{1cd2}'), - ('\u{1cd4}', '\u{1ce0}'), - ('\u{1ce2}', '\u{1ce8}'), - ('\u{1ced}', '\u{1ced}'), - ('\u{1cf4}', '\u{1cf4}'), - ('\u{1cf8}', '\u{1cf9}'), - ('\u{1dc0}', '\u{1dff}'), - ('\u{200c}', '\u{200d}'), - ('\u{20d0}', '\u{20f0}'), - ('\u{302a}', '\u{302d}'), - ('\u{3099}', '\u{309a}'), - ('\u{fe00}', '\u{fe0f}'), - ('\u{fe20}', '\u{fe2d}'), - ('\u{101fd}', '\u{101fd}'), - ('\u{102e0}', '\u{102e0}'), - ('\u{1133b}', '\u{1133b}'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('\u{1d167}', '\u{1d169}'), - ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), - ('\u{1d1aa}', '\u{1d1ad}'), - ('\u{e0100}', '\u{e01ef}'), -]; - -pub const INSCRIPTIONAL_PAHLAVI: &'static [(char, char)] = - &[('𐭠', '𐭲'), ('𐭸', '𐭿')]; - -pub const INSCRIPTIONAL_PARTHIAN: &'static [(char, char)] = - &[('𐭀', '𐭕'), ('𐭘', '𐭟')]; - -pub const JAVANESE: &'static [(char, char)] = - &[('\u{a980}', '꧍'), ('꧐', '꧙'), ('꧞', '꧟')]; - -pub const KAITHI: &'static [(char, char)] = - &[('\u{11080}', '\u{110c2}'), ('\u{110cd}', '\u{110cd}')]; - -pub const KANNADA: &'static [(char, char)] = &[ - ('ಀ', 'ಌ'), - ('ಎ', 'ಐ'), - ('ಒ', 'ನ'), - ('ಪ', 'ಳ'), - ('ವ', 'ಹ'), - ('\u{cbc}', 'ೄ'), - ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccd}'), - ('\u{cd5}', '\u{cd6}'), - ('ೝ', 'ೞ'), - ('ೠ', '\u{ce3}'), - ('೦', '೯'), - ('ೱ', 'ೳ'), -]; - -pub const KATAKANA: &'static [(char, char)] = &[ - ('ァ', 'ヺ'), - ('ヽ', 'ヿ'), - ('ㇰ', 'ㇿ'), - ('㋐', '㋾'), - ('㌀', '㍗'), - ('ヲ', 'ッ'), - ('ア', 'ン'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('𛀀', '𛀀'), - ('𛄠', '𛄢'), - ('𛅕', '𛅕'), - ('𛅤', '𛅧'), -]; - -pub const KAWI: &'static [(char, char)] = - &[('\u{11f00}', '𑼐'), ('𑼒', '\u{11f3a}'), ('𑼾', '\u{11f5a}')]; - -pub const KAYAH_LI: &'static [(char, char)] = &[('꤀', '\u{a92d}'), ('꤯', '꤯')]; - -pub const KHAROSHTHI: &'static [(char, char)] = &[ - ('𐨀', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '𐨓'), - ('𐨕', '𐨗'), - ('𐨙', '𐨵'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '𐩈'), - ('𐩐', '𐩘'), -]; - -pub const KHITAN_SMALL_SCRIPT: &'static [(char, char)] = - &[('\u{16fe4}', '\u{16fe4}'), ('𘬀', '𘳕'), ('𘳿', '𘳿')]; - -pub const KHMER: &'static [(char, char)] = - &[('ក', '\u{17dd}'), ('០', '៩'), ('៰', '៹'), ('᧠', '᧿')]; - -pub const KHOJKI: &'static [(char, char)] = &[('𑈀', '𑈑'), ('𑈓', '\u{11241}')]; - -pub const KHUDAWADI: &'static [(char, char)] = - &[('𑊰', '\u{112ea}'), ('𑋰', '𑋹')]; - -pub const KIRAT_RAI: &'static [(char, char)] = &[('𖵀', '𖵹')]; - -pub const LAO: &'static [(char, char)] = &[ - ('ກ', 'ຂ'), - ('ຄ', 'ຄ'), - ('ຆ', 'ຊ'), - ('ຌ', 'ຣ'), - ('ລ', 'ລ'), - ('ວ', 'ຽ'), - ('ເ', 'ໄ'), - ('ໆ', 'ໆ'), - ('\u{ec8}', '\u{ece}'), - ('໐', '໙'), - ('ໜ', 'ໟ'), -]; - -pub const LATIN: &'static [(char, char)] = &[ - ('A', 'Z'), - ('a', 'z'), - ('ª', 'ª'), - ('º', 'º'), - ('À', 'Ö'), - ('Ø', 'ö'), - ('ø', 'ʸ'), - ('ˠ', 'ˤ'), - ('ᴀ', 'ᴥ'), - ('ᴬ', 'ᵜ'), - ('ᵢ', 'ᵥ'), - ('ᵫ', 'ᵷ'), - ('ᵹ', 'ᶾ'), - ('Ḁ', 'ỿ'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('K', 'Å'), - ('Ⅎ', 'Ⅎ'), - ('ⅎ', 'ⅎ'), - ('Ⅰ', 'ↈ'), - ('Ⱡ', 'Ɀ'), - ('Ꜣ', 'ꞇ'), - ('Ꞌ', 'ꟍ'), - ('Ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'Ƛ'), - ('ꟲ', 'ꟿ'), - ('ꬰ', 'ꭚ'), - ('ꭜ', 'ꭤ'), - ('ꭦ', 'ꭩ'), - ('ff', 'st'), - ('A', 'Z'), - ('a', 'z'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𝼀', '𝼞'), - ('𝼥', '𝼪'), -]; - -pub const LEPCHA: &'static [(char, char)] = - &[('ᰀ', '\u{1c37}'), ('᰻', '᱉'), ('ᱍ', 'ᱏ')]; - -pub const LIMBU: &'static [(char, char)] = &[ - ('ᤀ', 'ᤞ'), - ('\u{1920}', 'ᤫ'), - ('ᤰ', '\u{193b}'), - ('᥀', '᥀'), - ('᥄', '᥏'), -]; - -pub const LINEAR_A: &'static [(char, char)] = - &[('𐘀', '𐜶'), ('𐝀', '𐝕'), ('𐝠', '𐝧')]; - -pub const LINEAR_B: &'static [(char, char)] = &[ - ('𐀀', '𐀋'), - ('𐀍', '𐀦'), - ('𐀨', '𐀺'), - ('𐀼', '𐀽'), - ('𐀿', '𐁍'), - ('𐁐', '𐁝'), - ('𐂀', '𐃺'), -]; - -pub const LISU: &'static [(char, char)] = &[('ꓐ', '꓿'), ('𑾰', '𑾰')]; - -pub const LYCIAN: &'static [(char, char)] = &[('𐊀', '𐊜')]; - -pub const LYDIAN: &'static [(char, char)] = &[('𐤠', '𐤹'), ('𐤿', '𐤿')]; - -pub const MAHAJANI: &'static [(char, char)] = &[('𑅐', '𑅶')]; - -pub const MAKASAR: &'static [(char, char)] = &[('𑻠', '𑻸')]; - -pub const MALAYALAM: &'static [(char, char)] = &[ - ('\u{d00}', 'ഌ'), - ('എ', 'ഐ'), - ('ഒ', '\u{d44}'), - ('െ', 'ൈ'), - ('ൊ', '൏'), - ('ൔ', '\u{d63}'), - ('൦', 'ൿ'), -]; - -pub const MANDAIC: &'static [(char, char)] = &[('ࡀ', '\u{85b}'), ('࡞', '࡞')]; - -pub const MANICHAEAN: &'static [(char, char)] = - &[('𐫀', '\u{10ae6}'), ('𐫫', '𐫶')]; - -pub const MARCHEN: &'static [(char, char)] = - &[('𑱰', '𑲏'), ('\u{11c92}', '\u{11ca7}'), ('𑲩', '\u{11cb6}')]; - -pub const MASARAM_GONDI: &'static [(char, char)] = &[ - ('𑴀', '𑴆'), - ('𑴈', '𑴉'), - ('𑴋', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d47}'), - ('𑵐', '𑵙'), -]; - -pub const MEDEFAIDRIN: &'static [(char, char)] = &[('𖹀', '𖺚')]; - -pub const MEETEI_MAYEK: &'static [(char, char)] = - &[('ꫠ', '\u{aaf6}'), ('ꯀ', '\u{abed}'), ('꯰', '꯹')]; - -pub const MENDE_KIKAKUI: &'static [(char, char)] = - &[('𞠀', '𞣄'), ('𞣇', '\u{1e8d6}')]; - -pub const MEROITIC_CURSIVE: &'static [(char, char)] = - &[('𐦠', '𐦷'), ('𐦼', '𐧏'), ('𐧒', '𐧿')]; - -pub const MEROITIC_HIEROGLYPHS: &'static [(char, char)] = &[('𐦀', '𐦟')]; - -pub const MIAO: &'static [(char, char)] = - &[('𖼀', '𖽊'), ('\u{16f4f}', '𖾇'), ('\u{16f8f}', '𖾟')]; - -pub const MODI: &'static [(char, char)] = &[('𑘀', '𑙄'), ('𑙐', '𑙙')]; - -pub const MONGOLIAN: &'static [(char, char)] = - &[('᠀', '᠁'), ('᠄', '᠄'), ('᠆', '᠙'), ('ᠠ', 'ᡸ'), ('ᢀ', 'ᢪ'), ('𑙠', '𑙬')]; - -pub const MRO: &'static [(char, char)] = &[('𖩀', '𖩞'), ('𖩠', '𖩩'), ('𖩮', '𖩯')]; - -pub const MULTANI: &'static [(char, char)] = - &[('𑊀', '𑊆'), ('𑊈', '𑊈'), ('𑊊', '𑊍'), ('𑊏', '𑊝'), ('𑊟', '𑊩')]; - -pub const MYANMAR: &'static [(char, char)] = - &[('က', '႟'), ('ꧠ', 'ꧾ'), ('ꩠ', 'ꩿ'), ('𑛐', '𑛣')]; - -pub const NABATAEAN: &'static [(char, char)] = &[('𐢀', '𐢞'), ('𐢧', '𐢯')]; - -pub const NAG_MUNDARI: &'static [(char, char)] = &[('𞓐', '𞓹')]; - -pub const NANDINAGARI: &'static [(char, char)] = - &[('𑦠', '𑦧'), ('𑦪', '\u{119d7}'), ('\u{119da}', '𑧤')]; - -pub const NEW_TAI_LUE: &'static [(char, char)] = - &[('ᦀ', 'ᦫ'), ('ᦰ', 'ᧉ'), ('᧐', '᧚'), ('᧞', '᧟')]; - -pub const NEWA: &'static [(char, char)] = &[('𑐀', '𑑛'), ('𑑝', '𑑡')]; - -pub const NKO: &'static [(char, char)] = &[('߀', 'ߺ'), ('\u{7fd}', '߿')]; - -pub const NUSHU: &'static [(char, char)] = &[('𖿡', '𖿡'), ('𛅰', '𛋻')]; - -pub const NYIAKENG_PUACHUE_HMONG: &'static [(char, char)] = - &[('𞄀', '𞄬'), ('\u{1e130}', '𞄽'), ('𞅀', '𞅉'), ('𞅎', '𞅏')]; - -pub const OGHAM: &'static [(char, char)] = &[('\u{1680}', '᚜')]; - -pub const OL_CHIKI: &'static [(char, char)] = &[('᱐', '᱿')]; - -pub const OL_ONAL: &'static [(char, char)] = &[('𞗐', '𞗺'), ('𞗿', '𞗿')]; - -pub const OLD_HUNGARIAN: &'static [(char, char)] = - &[('𐲀', '𐲲'), ('𐳀', '𐳲'), ('𐳺', '𐳿')]; - -pub const OLD_ITALIC: &'static [(char, char)] = &[('𐌀', '𐌣'), ('𐌭', '𐌯')]; - -pub const OLD_NORTH_ARABIAN: &'static [(char, char)] = &[('𐪀', '𐪟')]; - -pub const OLD_PERMIC: &'static [(char, char)] = &[('𐍐', '\u{1037a}')]; - -pub const OLD_PERSIAN: &'static [(char, char)] = &[('𐎠', '𐏃'), ('𐏈', '𐏕')]; - -pub const OLD_SOGDIAN: &'static [(char, char)] = &[('𐼀', '𐼧')]; - -pub const OLD_SOUTH_ARABIAN: &'static [(char, char)] = &[('𐩠', '𐩿')]; - -pub const OLD_TURKIC: &'static [(char, char)] = &[('𐰀', '𐱈')]; - -pub const OLD_UYGHUR: &'static [(char, char)] = &[('𐽰', '𐾉')]; - -pub const ORIYA: &'static [(char, char)] = &[ - ('\u{b01}', 'ଃ'), - ('ଅ', 'ଌ'), - ('ଏ', 'ଐ'), - ('ଓ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଵ', 'ହ'), - ('\u{b3c}', '\u{b44}'), - ('େ', 'ୈ'), - ('ୋ', '\u{b4d}'), - ('\u{b55}', '\u{b57}'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', '\u{b63}'), - ('୦', '୷'), -]; - -pub const OSAGE: &'static [(char, char)] = &[('𐒰', '𐓓'), ('𐓘', '𐓻')]; - -pub const OSMANYA: &'static [(char, char)] = &[('𐒀', '𐒝'), ('𐒠', '𐒩')]; - -pub const PAHAWH_HMONG: &'static [(char, char)] = - &[('𖬀', '𖭅'), ('𖭐', '𖭙'), ('𖭛', '𖭡'), ('𖭣', '𖭷'), ('𖭽', '𖮏')]; - -pub const PALMYRENE: &'static [(char, char)] = &[('𐡠', '𐡿')]; - -pub const PAU_CIN_HAU: &'static [(char, char)] = &[('𑫀', '𑫸')]; - -pub const PHAGS_PA: &'static [(char, char)] = &[('ꡀ', '꡷')]; - -pub const PHOENICIAN: &'static [(char, char)] = &[('𐤀', '𐤛'), ('𐤟', '𐤟')]; - -pub const PSALTER_PAHLAVI: &'static [(char, char)] = - &[('𐮀', '𐮑'), ('𐮙', '𐮜'), ('𐮩', '𐮯')]; - -pub const REJANG: &'static [(char, char)] = &[('ꤰ', '\u{a953}'), ('꥟', '꥟')]; - -pub const RUNIC: &'static [(char, char)] = &[('ᚠ', 'ᛪ'), ('ᛮ', 'ᛸ')]; - -pub const SAMARITAN: &'static [(char, char)] = &[('ࠀ', '\u{82d}'), ('࠰', '࠾')]; - -pub const SAURASHTRA: &'static [(char, char)] = - &[('ꢀ', '\u{a8c5}'), ('꣎', '꣙')]; - -pub const SHARADA: &'static [(char, char)] = &[('\u{11180}', '𑇟')]; - -pub const SHAVIAN: &'static [(char, char)] = &[('𐑐', '𐑿')]; - -pub const SIDDHAM: &'static [(char, char)] = - &[('𑖀', '\u{115b5}'), ('𑖸', '\u{115dd}')]; - -pub const SIGNWRITING: &'static [(char, char)] = - &[('𝠀', '𝪋'), ('\u{1da9b}', '\u{1da9f}'), ('\u{1daa1}', '\u{1daaf}')]; - -pub const SINHALA: &'static [(char, char)] = &[ - ('\u{d81}', 'ඃ'), - ('අ', 'ඖ'), - ('ක', 'න'), - ('ඳ', 'ර'), - ('ල', 'ල'), - ('ව', 'ෆ'), - ('\u{dca}', '\u{dca}'), - ('\u{dcf}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('ෘ', '\u{ddf}'), - ('෦', '෯'), - ('ෲ', '෴'), - ('𑇡', '𑇴'), -]; - -pub const SOGDIAN: &'static [(char, char)] = &[('𐼰', '𐽙')]; - -pub const SORA_SOMPENG: &'static [(char, char)] = &[('𑃐', '𑃨'), ('𑃰', '𑃹')]; - -pub const SOYOMBO: &'static [(char, char)] = &[('𑩐', '𑪢')]; - -pub const SUNDANESE: &'static [(char, char)] = - &[('\u{1b80}', 'ᮿ'), ('᳀', '᳇')]; - -pub const SUNUWAR: &'static [(char, char)] = &[('𑯀', '𑯡'), ('𑯰', '𑯹')]; - -pub const SYLOTI_NAGRI: &'static [(char, char)] = &[('ꠀ', '\u{a82c}')]; - -pub const SYRIAC: &'static [(char, char)] = - &[('܀', '܍'), ('\u{70f}', '\u{74a}'), ('ݍ', 'ݏ'), ('ࡠ', 'ࡪ')]; - -pub const TAGALOG: &'static [(char, char)] = &[('ᜀ', '\u{1715}'), ('ᜟ', 'ᜟ')]; - -pub const TAGBANWA: &'static [(char, char)] = - &[('ᝠ', 'ᝬ'), ('ᝮ', 'ᝰ'), ('\u{1772}', '\u{1773}')]; - -pub const TAI_LE: &'static [(char, char)] = &[('ᥐ', 'ᥭ'), ('ᥰ', 'ᥴ')]; - -pub const TAI_THAM: &'static [(char, char)] = &[ - ('ᨠ', '\u{1a5e}'), - ('\u{1a60}', '\u{1a7c}'), - ('\u{1a7f}', '᪉'), - ('᪐', '᪙'), - ('᪠', '᪭'), -]; - -pub const TAI_VIET: &'static [(char, char)] = &[('ꪀ', 'ꫂ'), ('ꫛ', '꫟')]; - -pub const TAKRI: &'static [(char, char)] = &[('𑚀', '𑚹'), ('𑛀', '𑛉')]; - -pub const TAMIL: &'static [(char, char)] = &[ - ('\u{b82}', 'ஃ'), - ('அ', 'ஊ'), - ('எ', 'ஐ'), - ('ஒ', 'க'), - ('ங', 'ச'), - ('ஜ', 'ஜ'), - ('ஞ', 'ட'), - ('ண', 'த'), - ('ந', 'ப'), - ('ம', 'ஹ'), - ('\u{bbe}', 'ூ'), - ('ெ', 'ை'), - ('ொ', '\u{bcd}'), - ('ௐ', 'ௐ'), - ('\u{bd7}', '\u{bd7}'), - ('௦', '௺'), - ('𑿀', '𑿱'), - ('𑿿', '𑿿'), -]; - -pub const TANGSA: &'static [(char, char)] = &[('𖩰', '𖪾'), ('𖫀', '𖫉')]; - -pub const TANGUT: &'static [(char, char)] = - &[('𖿠', '𖿠'), ('𗀀', '𘟷'), ('𘠀', '𘫿'), ('𘴀', '𘴈')]; - -pub const TELUGU: &'static [(char, char)] = &[ - ('\u{c00}', 'ఌ'), - ('ఎ', 'ఐ'), - ('ఒ', 'న'), - ('ప', 'హ'), - ('\u{c3c}', 'ౄ'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4d}'), - ('\u{c55}', '\u{c56}'), - ('ౘ', 'ౚ'), - ('ౝ', 'ౝ'), - ('ౠ', '\u{c63}'), - ('౦', '౯'), - ('౷', '౿'), -]; - -pub const THAANA: &'static [(char, char)] = &[('ހ', 'ޱ')]; - -pub const THAI: &'static [(char, char)] = &[('ก', '\u{e3a}'), ('เ', '๛')]; - -pub const TIBETAN: &'static [(char, char)] = &[ - ('ༀ', 'ཇ'), - ('ཉ', 'ཬ'), - ('\u{f71}', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('྾', '࿌'), - ('࿎', '࿔'), - ('࿙', '࿚'), -]; - -pub const TIFINAGH: &'static [(char, char)] = - &[('ⴰ', 'ⵧ'), ('ⵯ', '⵰'), ('\u{2d7f}', '\u{2d7f}')]; - -pub const TIRHUTA: &'static [(char, char)] = &[('𑒀', '𑓇'), ('𑓐', '𑓙')]; - -pub const TODHRI: &'static [(char, char)] = &[('𐗀', '𐗳')]; - -pub const TOTO: &'static [(char, char)] = &[('𞊐', '\u{1e2ae}')]; - -pub const TULU_TIGALARI: &'static [(char, char)] = &[ - ('𑎀', '𑎉'), - ('𑎋', '𑎋'), - ('𑎎', '𑎎'), - ('𑎐', '𑎵'), - ('𑎷', '\u{113c0}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '𑏊'), - ('𑏌', '𑏕'), - ('𑏗', '𑏘'), - ('\u{113e1}', '\u{113e2}'), -]; - -pub const UGARITIC: &'static [(char, char)] = &[('𐎀', '𐎝'), ('𐎟', '𐎟')]; - -pub const VAI: &'static [(char, char)] = &[('ꔀ', 'ꘫ')]; - -pub const VITHKUQI: &'static [(char, char)] = &[ - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), -]; - -pub const WANCHO: &'static [(char, char)] = &[('𞋀', '𞋹'), ('𞋿', '𞋿')]; - -pub const WARANG_CITI: &'static [(char, char)] = &[('𑢠', '𑣲'), ('𑣿', '𑣿')]; - -pub const YEZIDI: &'static [(char, char)] = - &[('𐺀', '𐺩'), ('\u{10eab}', '𐺭'), ('𐺰', '𐺱')]; - -pub const YI: &'static [(char, char)] = &[('ꀀ', 'ꒌ'), ('꒐', '꓆')]; - -pub const ZANABAZAR_SQUARE: &'static [(char, char)] = &[('𑨀', '\u{11a47}')]; diff --git a/vendor/regex-syntax/src/unicode_tables/script_extension.rs b/vendor/regex-syntax/src/unicode_tables/script_extension.rs deleted file mode 100644 index e3f492e2d6bee1..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/script_extension.rs +++ /dev/null @@ -1,1718 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate script-extension ucd-16.0.0 --chars -// -// Unicode version: 16.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ - ("Adlam", ADLAM), - ("Ahom", AHOM), - ("Anatolian_Hieroglyphs", ANATOLIAN_HIEROGLYPHS), - ("Arabic", ARABIC), - ("Armenian", ARMENIAN), - ("Avestan", AVESTAN), - ("Balinese", BALINESE), - ("Bamum", BAMUM), - ("Bassa_Vah", BASSA_VAH), - ("Batak", BATAK), - ("Bengali", BENGALI), - ("Bhaiksuki", BHAIKSUKI), - ("Bopomofo", BOPOMOFO), - ("Brahmi", BRAHMI), - ("Braille", BRAILLE), - ("Buginese", BUGINESE), - ("Buhid", BUHID), - ("Canadian_Aboriginal", CANADIAN_ABORIGINAL), - ("Carian", CARIAN), - ("Caucasian_Albanian", CAUCASIAN_ALBANIAN), - ("Chakma", CHAKMA), - ("Cham", CHAM), - ("Cherokee", CHEROKEE), - ("Chorasmian", CHORASMIAN), - ("Common", COMMON), - ("Coptic", COPTIC), - ("Cuneiform", CUNEIFORM), - ("Cypriot", CYPRIOT), - ("Cypro_Minoan", CYPRO_MINOAN), - ("Cyrillic", CYRILLIC), - ("Deseret", DESERET), - ("Devanagari", DEVANAGARI), - ("Dives_Akuru", DIVES_AKURU), - ("Dogra", DOGRA), - ("Duployan", DUPLOYAN), - ("Egyptian_Hieroglyphs", EGYPTIAN_HIEROGLYPHS), - ("Elbasan", ELBASAN), - ("Elymaic", ELYMAIC), - ("Ethiopic", ETHIOPIC), - ("Garay", GARAY), - ("Georgian", GEORGIAN), - ("Glagolitic", GLAGOLITIC), - ("Gothic", GOTHIC), - ("Grantha", GRANTHA), - ("Greek", GREEK), - ("Gujarati", GUJARATI), - ("Gunjala_Gondi", GUNJALA_GONDI), - ("Gurmukhi", GURMUKHI), - ("Gurung_Khema", GURUNG_KHEMA), - ("Han", HAN), - ("Hangul", HANGUL), - ("Hanifi_Rohingya", HANIFI_ROHINGYA), - ("Hanunoo", HANUNOO), - ("Hatran", HATRAN), - ("Hebrew", HEBREW), - ("Hiragana", HIRAGANA), - ("Imperial_Aramaic", IMPERIAL_ARAMAIC), - ("Inherited", INHERITED), - ("Inscriptional_Pahlavi", INSCRIPTIONAL_PAHLAVI), - ("Inscriptional_Parthian", INSCRIPTIONAL_PARTHIAN), - ("Javanese", JAVANESE), - ("Kaithi", KAITHI), - ("Kannada", KANNADA), - ("Katakana", KATAKANA), - ("Kawi", KAWI), - ("Kayah_Li", KAYAH_LI), - ("Kharoshthi", KHAROSHTHI), - ("Khitan_Small_Script", KHITAN_SMALL_SCRIPT), - ("Khmer", KHMER), - ("Khojki", KHOJKI), - ("Khudawadi", KHUDAWADI), - ("Kirat_Rai", KIRAT_RAI), - ("Lao", LAO), - ("Latin", LATIN), - ("Lepcha", LEPCHA), - ("Limbu", LIMBU), - ("Linear_A", LINEAR_A), - ("Linear_B", LINEAR_B), - ("Lisu", LISU), - ("Lycian", LYCIAN), - ("Lydian", LYDIAN), - ("Mahajani", MAHAJANI), - ("Makasar", MAKASAR), - ("Malayalam", MALAYALAM), - ("Mandaic", MANDAIC), - ("Manichaean", MANICHAEAN), - ("Marchen", MARCHEN), - ("Masaram_Gondi", MASARAM_GONDI), - ("Medefaidrin", MEDEFAIDRIN), - ("Meetei_Mayek", MEETEI_MAYEK), - ("Mende_Kikakui", MENDE_KIKAKUI), - ("Meroitic_Cursive", MEROITIC_CURSIVE), - ("Meroitic_Hieroglyphs", MEROITIC_HIEROGLYPHS), - ("Miao", MIAO), - ("Modi", MODI), - ("Mongolian", MONGOLIAN), - ("Mro", MRO), - ("Multani", MULTANI), - ("Myanmar", MYANMAR), - ("Nabataean", NABATAEAN), - ("Nag_Mundari", NAG_MUNDARI), - ("Nandinagari", NANDINAGARI), - ("New_Tai_Lue", NEW_TAI_LUE), - ("Newa", NEWA), - ("Nko", NKO), - ("Nushu", NUSHU), - ("Nyiakeng_Puachue_Hmong", NYIAKENG_PUACHUE_HMONG), - ("Ogham", OGHAM), - ("Ol_Chiki", OL_CHIKI), - ("Ol_Onal", OL_ONAL), - ("Old_Hungarian", OLD_HUNGARIAN), - ("Old_Italic", OLD_ITALIC), - ("Old_North_Arabian", OLD_NORTH_ARABIAN), - ("Old_Permic", OLD_PERMIC), - ("Old_Persian", OLD_PERSIAN), - ("Old_Sogdian", OLD_SOGDIAN), - ("Old_South_Arabian", OLD_SOUTH_ARABIAN), - ("Old_Turkic", OLD_TURKIC), - ("Old_Uyghur", OLD_UYGHUR), - ("Oriya", ORIYA), - ("Osage", OSAGE), - ("Osmanya", OSMANYA), - ("Pahawh_Hmong", PAHAWH_HMONG), - ("Palmyrene", PALMYRENE), - ("Pau_Cin_Hau", PAU_CIN_HAU), - ("Phags_Pa", PHAGS_PA), - ("Phoenician", PHOENICIAN), - ("Psalter_Pahlavi", PSALTER_PAHLAVI), - ("Rejang", REJANG), - ("Runic", RUNIC), - ("Samaritan", SAMARITAN), - ("Saurashtra", SAURASHTRA), - ("Sharada", SHARADA), - ("Shavian", SHAVIAN), - ("Siddham", SIDDHAM), - ("SignWriting", SIGNWRITING), - ("Sinhala", SINHALA), - ("Sogdian", SOGDIAN), - ("Sora_Sompeng", SORA_SOMPENG), - ("Soyombo", SOYOMBO), - ("Sundanese", SUNDANESE), - ("Sunuwar", SUNUWAR), - ("Syloti_Nagri", SYLOTI_NAGRI), - ("Syriac", SYRIAC), - ("Tagalog", TAGALOG), - ("Tagbanwa", TAGBANWA), - ("Tai_Le", TAI_LE), - ("Tai_Tham", TAI_THAM), - ("Tai_Viet", TAI_VIET), - ("Takri", TAKRI), - ("Tamil", TAMIL), - ("Tangsa", TANGSA), - ("Tangut", TANGUT), - ("Telugu", TELUGU), - ("Thaana", THAANA), - ("Thai", THAI), - ("Tibetan", TIBETAN), - ("Tifinagh", TIFINAGH), - ("Tirhuta", TIRHUTA), - ("Todhri", TODHRI), - ("Toto", TOTO), - ("Tulu_Tigalari", TULU_TIGALARI), - ("Ugaritic", UGARITIC), - ("Vai", VAI), - ("Vithkuqi", VITHKUQI), - ("Wancho", WANCHO), - ("Warang_Citi", WARANG_CITI), - ("Yezidi", YEZIDI), - ("Yi", YI), - ("Zanabazar_Square", ZANABAZAR_SQUARE), -]; - -pub const ADLAM: &'static [(char, char)] = &[ - ('؟', '؟'), - ('ـ', 'ـ'), - ('⁏', '⁏'), - ('⹁', '⹁'), - ('𞤀', '𞥋'), - ('𞥐', '𞥙'), - ('𞥞', '𞥟'), -]; - -pub const AHOM: &'static [(char, char)] = - &[('𑜀', '𑜚'), ('\u{1171d}', '\u{1172b}'), ('𑜰', '𑝆')]; - -pub const ANATOLIAN_HIEROGLYPHS: &'static [(char, char)] = &[('𔐀', '𔙆')]; - -pub const ARABIC: &'static [(char, char)] = &[ - ('\u{600}', '\u{604}'), - ('؆', '\u{6dc}'), - ('۞', 'ۿ'), - ('ݐ', 'ݿ'), - ('ࡰ', 'ࢎ'), - ('\u{890}', '\u{891}'), - ('\u{897}', '\u{8e1}'), - ('\u{8e3}', '\u{8ff}'), - ('⁏', '⁏'), - ('⹁', '⹁'), - ('ﭐ', '﯂'), - ('ﯓ', 'ﶏ'), - ('ﶒ', 'ﷇ'), - ('﷏', '﷏'), - ('ﷰ', '﷿'), - ('ﹰ', 'ﹴ'), - ('ﹶ', 'ﻼ'), - ('\u{102e0}', '𐋻'), - ('𐹠', '𐹾'), - ('𐻂', '𐻄'), - ('\u{10efc}', '\u{10eff}'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('𞻰', '𞻱'), -]; - -pub const ARMENIAN: &'static [(char, char)] = - &[('\u{308}', '\u{308}'), ('Ա', 'Ֆ'), ('ՙ', '֊'), ('֍', '֏'), ('ﬓ', 'ﬗ')]; - -pub const AVESTAN: &'static [(char, char)] = - &[('·', '·'), ('⸰', '⸱'), ('𐬀', '𐬵'), ('𐬹', '𐬿')]; - -pub const BALINESE: &'static [(char, char)] = &[('\u{1b00}', 'ᭌ'), ('᭎', '᭿')]; - -pub const BAMUM: &'static [(char, char)] = &[('ꚠ', '꛷'), ('𖠀', '𖨸')]; - -pub const BASSA_VAH: &'static [(char, char)] = - &[('𖫐', '𖫭'), ('\u{16af0}', '𖫵')]; - -pub const BATAK: &'static [(char, char)] = &[('ᯀ', '\u{1bf3}'), ('᯼', '᯿')]; - -pub const BENGALI: &'static [(char, char)] = &[ - ('ʼ', 'ʼ'), - ('\u{951}', '\u{952}'), - ('।', '॥'), - ('ঀ', 'ঃ'), - ('অ', 'ঌ'), - ('এ', 'ঐ'), - ('ও', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('\u{9bc}', '\u{9c4}'), - ('ে', 'ৈ'), - ('ো', 'ৎ'), - ('\u{9d7}', '\u{9d7}'), - ('ড়', 'ঢ়'), - ('য়', '\u{9e3}'), - ('০', '\u{9fe}'), - ('\u{1cd0}', '\u{1cd0}'), - ('\u{1cd2}', '\u{1cd2}'), - ('\u{1cd5}', '\u{1cd6}'), - ('\u{1cd8}', '\u{1cd8}'), - ('᳡', '᳡'), - ('ᳪ', 'ᳪ'), - ('\u{1ced}', '\u{1ced}'), - ('ᳲ', 'ᳲ'), - ('ᳵ', '᳷'), - ('\u{a8f1}', '\u{a8f1}'), -]; - -pub const BHAIKSUKI: &'static [(char, char)] = - &[('𑰀', '𑰈'), ('𑰊', '\u{11c36}'), ('\u{11c38}', '𑱅'), ('𑱐', '𑱬')]; - -pub const BOPOMOFO: &'static [(char, char)] = &[ - ('ˇ', 'ˇ'), - ('ˉ', 'ˋ'), - ('˙', '˙'), - ('˪', '˫'), - ('、', '〃'), - ('〈', '】'), - ('〓', '〟'), - ('\u{302a}', '\u{302d}'), - ('〰', '〰'), - ('〷', '〷'), - ('・', '・'), - ('ㄅ', 'ㄯ'), - ('ㆠ', 'ㆿ'), - ('﹅', '﹆'), - ('。', '・'), -]; - -pub const BRAHMI: &'static [(char, char)] = - &[('𑀀', '𑁍'), ('𑁒', '𑁵'), ('\u{1107f}', '\u{1107f}')]; - -pub const BRAILLE: &'static [(char, char)] = &[('⠀', '⣿')]; - -pub const BUGINESE: &'static [(char, char)] = - &[('ᨀ', '\u{1a1b}'), ('᨞', '᨟'), ('ꧏ', 'ꧏ')]; - -pub const BUHID: &'static [(char, char)] = &[('᜵', '᜶'), ('ᝀ', '\u{1753}')]; - -pub const CANADIAN_ABORIGINAL: &'static [(char, char)] = - &[('᐀', 'ᙿ'), ('ᢰ', 'ᣵ'), ('𑪰', '𑪿')]; - -pub const CARIAN: &'static [(char, char)] = - &[('·', '·'), ('⁚', '⁚'), ('⁝', '⁝'), ('⸱', '⸱'), ('𐊠', '𐋐')]; - -pub const CAUCASIAN_ALBANIAN: &'static [(char, char)] = &[ - ('\u{304}', '\u{304}'), - ('\u{331}', '\u{331}'), - ('\u{35e}', '\u{35e}'), - ('𐔰', '𐕣'), - ('𐕯', '𐕯'), -]; - -pub const CHAKMA: &'static [(char, char)] = - &[('০', '৯'), ('၀', '၉'), ('\u{11100}', '\u{11134}'), ('𑄶', '𑅇')]; - -pub const CHAM: &'static [(char, char)] = - &[('ꨀ', '\u{aa36}'), ('ꩀ', 'ꩍ'), ('꩐', '꩙'), ('꩜', '꩟')]; - -pub const CHEROKEE: &'static [(char, char)] = &[ - ('\u{300}', '\u{302}'), - ('\u{304}', '\u{304}'), - ('\u{30b}', '\u{30c}'), - ('\u{323}', '\u{324}'), - ('\u{330}', '\u{331}'), - ('Ꭰ', 'Ᏽ'), - ('ᏸ', 'ᏽ'), - ('ꭰ', 'ꮿ'), -]; - -pub const CHORASMIAN: &'static [(char, char)] = &[('𐾰', '𐿋')]; - -pub const COMMON: &'static [(char, char)] = &[ - ('\0', '@'), - ('[', '`'), - ('{', '©'), - ('«', '¶'), - ('¸', '¹'), - ('»', '¿'), - ('×', '×'), - ('÷', '÷'), - ('ʹ', 'ʻ'), - ('ʽ', 'ˆ'), - ('ˈ', 'ˈ'), - ('ˌ', 'ˌ'), - ('ˎ', '˖'), - ('˘', '˘'), - ('˚', '˟'), - ('˥', '˩'), - ('ˬ', '˿'), - (';', ';'), - ('΅', '΅'), - ('·', '·'), - ('\u{605}', '\u{605}'), - ('\u{6dd}', '\u{6dd}'), - ('\u{8e2}', '\u{8e2}'), - ('฿', '฿'), - ('࿕', '࿘'), - ('\u{2000}', '\u{200b}'), - ('\u{200e}', '\u{202e}'), - ('‰', '⁎'), - ('⁐', '⁙'), - ('⁛', '⁜'), - ('⁞', '\u{2064}'), - ('\u{2066}', '⁰'), - ('⁴', '⁾'), - ('₀', '₎'), - ('₠', '⃀'), - ('℀', '℥'), - ('℧', '℩'), - ('ℬ', 'ℱ'), - ('ℳ', '⅍'), - ('⅏', '⅟'), - ('↉', '↋'), - ('←', '␩'), - ('⑀', '⑊'), - ('①', '⟿'), - ('⤀', '⭳'), - ('⭶', '⮕'), - ('⮗', '⯿'), - ('⸀', '⸖'), - ('⸘', 'ⸯ'), - ('⸲', '⸻'), - ('⸽', '⹀'), - ('⹂', '⹂'), - ('⹄', '⹝'), - ('\u{3000}', '\u{3000}'), - ('〄', '〄'), - ('〒', '〒'), - ('〠', '〠'), - ('〶', '〶'), - ('㉈', '㉟'), - ('㉿', '㉿'), - ('㊱', '㊿'), - ('㋌', '㋏'), - ('㍱', '㍺'), - ('㎀', '㏟'), - ('㏿', '㏿'), - ('䷀', '䷿'), - ('꜈', '꜡'), - ('ꞈ', '꞊'), - ('꭛', '꭛'), - ('꭪', '꭫'), - ('︐', '︙'), - ('︰', '﹄'), - ('﹇', '﹒'), - ('﹔', '﹦'), - ('﹨', '﹫'), - ('\u{feff}', '\u{feff}'), - ('!', '@'), - ('[', '`'), - ('{', '⦆'), - ('¢', '₩'), - ('│', '○'), - ('\u{fff9}', '�'), - ('𐆐', '𐆜'), - ('𐇐', '𐇼'), - ('𜰀', '𜳹'), - ('𜴀', '𜺳'), - ('𜽐', '𜿃'), - ('𝀀', '𝃵'), - ('𝄀', '𝄦'), - ('𝄩', '\u{1d166}'), - ('𝅪', '\u{1d17a}'), - ('𝆃', '𝆄'), - ('𝆌', '𝆩'), - ('𝆮', '𝇪'), - ('𝋀', '𝋓'), - ('𝋠', '𝋳'), - ('𝌀', '𝍖'), - ('𝍲', '𝍸'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝟋'), - ('𝟎', '𝟿'), - ('𞱱', '𞲴'), - ('𞴁', '𞴽'), - ('🀀', '🀫'), - ('🀰', '🂓'), - ('🂠', '🂮'), - ('🂱', '🂿'), - ('🃁', '🃏'), - ('🃑', '🃵'), - ('🄀', '🆭'), - ('🇦', '🇿'), - ('🈁', '🈂'), - ('🈐', '🈻'), - ('🉀', '🉈'), - ('🉠', '🉥'), - ('🌀', '🛗'), - ('🛜', '🛬'), - ('🛰', '🛼'), - ('🜀', '🝶'), - ('🝻', '🟙'), - ('🟠', '🟫'), - ('🟰', '🟰'), - ('🠀', '🠋'), - ('🠐', '🡇'), - ('🡐', '🡙'), - ('🡠', '🢇'), - ('🢐', '🢭'), - ('🢰', '🢻'), - ('🣀', '🣁'), - ('🤀', '🩓'), - ('🩠', '🩭'), - ('🩰', '🩼'), - ('🪀', '🪉'), - ('🪏', '🫆'), - ('🫎', '🫜'), - ('🫟', '🫩'), - ('🫰', '🫸'), - ('🬀', '🮒'), - ('🮔', '🯹'), - ('\u{e0001}', '\u{e0001}'), - ('\u{e0020}', '\u{e007f}'), -]; - -pub const COPTIC: &'static [(char, char)] = &[ - ('·', '·'), - ('\u{300}', '\u{300}'), - ('\u{304}', '\u{305}'), - ('\u{307}', '\u{307}'), - ('ʹ', '͵'), - ('Ϣ', 'ϯ'), - ('Ⲁ', 'ⳳ'), - ('⳹', '⳿'), - ('⸗', '⸗'), - ('\u{102e0}', '𐋻'), -]; - -pub const CUNEIFORM: &'static [(char, char)] = - &[('𒀀', '𒎙'), ('𒐀', '𒑮'), ('𒑰', '𒑴'), ('𒒀', '𒕃')]; - -pub const CYPRIOT: &'static [(char, char)] = &[ - ('𐄀', '𐄂'), - ('𐄇', '𐄳'), - ('𐄷', '𐄿'), - ('𐠀', '𐠅'), - ('𐠈', '𐠈'), - ('𐠊', '𐠵'), - ('𐠷', '𐠸'), - ('𐠼', '𐠼'), - ('𐠿', '𐠿'), -]; - -pub const CYPRO_MINOAN: &'static [(char, char)] = &[('𐄀', '𐄁'), ('𒾐', '𒿲')]; - -pub const CYRILLIC: &'static [(char, char)] = &[ - ('ʼ', 'ʼ'), - ('\u{300}', '\u{302}'), - ('\u{304}', '\u{304}'), - ('\u{306}', '\u{306}'), - ('\u{308}', '\u{308}'), - ('\u{30b}', '\u{30b}'), - ('\u{311}', '\u{311}'), - ('Ѐ', 'ԯ'), - ('ᲀ', 'ᲊ'), - ('ᴫ', 'ᴫ'), - ('ᵸ', 'ᵸ'), - ('\u{1df8}', '\u{1df8}'), - ('\u{2de0}', '\u{2dff}'), - ('⹃', '⹃'), - ('Ꙁ', '\u{a69f}'), - ('\u{fe2e}', '\u{fe2f}'), - ('𞀰', '𞁭'), - ('\u{1e08f}', '\u{1e08f}'), -]; - -pub const DESERET: &'static [(char, char)] = &[('𐐀', '𐑏')]; - -pub const DEVANAGARI: &'static [(char, char)] = &[ - ('ʼ', 'ʼ'), - ('\u{900}', '\u{952}'), - ('\u{955}', 'ॿ'), - ('\u{1cd0}', 'ᳶ'), - ('\u{1cf8}', '\u{1cf9}'), - ('\u{20f0}', '\u{20f0}'), - ('꠰', '꠹'), - ('\u{a8e0}', '\u{a8ff}'), - ('𑬀', '𑬉'), -]; - -pub const DIVES_AKURU: &'static [(char, char)] = &[ - ('𑤀', '𑤆'), - ('𑤉', '𑤉'), - ('𑤌', '𑤓'), - ('𑤕', '𑤖'), - ('𑤘', '𑤵'), - ('𑤷', '𑤸'), - ('\u{1193b}', '𑥆'), - ('𑥐', '𑥙'), -]; - -pub const DOGRA: &'static [(char, char)] = - &[('।', '९'), ('꠰', '꠹'), ('𑠀', '𑠻')]; - -pub const DUPLOYAN: &'static [(char, char)] = &[ - ('·', '·'), - ('\u{307}', '\u{308}'), - ('\u{30a}', '\u{30a}'), - ('\u{323}', '\u{324}'), - ('⸼', '⸼'), - ('𛰀', '𛱪'), - ('𛱰', '𛱼'), - ('𛲀', '𛲈'), - ('𛲐', '𛲙'), - ('𛲜', '\u{1bca3}'), -]; - -pub const EGYPTIAN_HIEROGLYPHS: &'static [(char, char)] = - &[('𓀀', '\u{13455}'), ('𓑠', '𔏺')]; - -pub const ELBASAN: &'static [(char, char)] = - &[('·', '·'), ('\u{305}', '\u{305}'), ('𐔀', '𐔧')]; - -pub const ELYMAIC: &'static [(char, char)] = &[('𐿠', '𐿶')]; - -pub const ETHIOPIC: &'static [(char, char)] = &[ - ('\u{30e}', '\u{30e}'), - ('ሀ', 'ቈ'), - ('ቊ', 'ቍ'), - ('ቐ', 'ቖ'), - ('ቘ', 'ቘ'), - ('ቚ', 'ቝ'), - ('በ', 'ኈ'), - ('ኊ', 'ኍ'), - ('ነ', 'ኰ'), - ('ኲ', 'ኵ'), - ('ኸ', 'ኾ'), - ('ዀ', 'ዀ'), - ('ዂ', 'ዅ'), - ('ወ', 'ዖ'), - ('ዘ', 'ጐ'), - ('ጒ', 'ጕ'), - ('ጘ', 'ፚ'), - ('\u{135d}', '፼'), - ('ᎀ', '᎙'), - ('ⶀ', 'ⶖ'), - ('ⶠ', 'ⶦ'), - ('ⶨ', 'ⶮ'), - ('ⶰ', 'ⶶ'), - ('ⶸ', 'ⶾ'), - ('ⷀ', 'ⷆ'), - ('ⷈ', 'ⷎ'), - ('ⷐ', 'ⷖ'), - ('ⷘ', 'ⷞ'), - ('ꬁ', 'ꬆ'), - ('ꬉ', 'ꬎ'), - ('ꬑ', 'ꬖ'), - ('ꬠ', 'ꬦ'), - ('ꬨ', 'ꬮ'), - ('𞟠', '𞟦'), - ('𞟨', '𞟫'), - ('𞟭', '𞟮'), - ('𞟰', '𞟾'), -]; - -pub const GARAY: &'static [(char, char)] = &[ - ('،', '،'), - ('؛', '؛'), - ('؟', '؟'), - ('𐵀', '𐵥'), - ('\u{10d69}', '𐶅'), - ('𐶎', '𐶏'), -]; - -pub const GEORGIAN: &'static [(char, char)] = &[ - ('·', '·'), - ('։', '։'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ა', 'ჿ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('⁚', '⁚'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('⸱', '⸱'), -]; - -pub const GLAGOLITIC: &'static [(char, char)] = &[ - ('·', '·'), - ('\u{303}', '\u{303}'), - ('\u{305}', '\u{305}'), - ('\u{484}', '\u{484}'), - ('\u{487}', '\u{487}'), - ('։', '։'), - ('჻', '჻'), - ('⁚', '⁚'), - ('Ⰰ', 'ⱟ'), - ('⹃', '⹃'), - ('\u{a66f}', '\u{a66f}'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), -]; - -pub const GOTHIC: &'static [(char, char)] = &[ - ('·', '·'), - ('\u{304}', '\u{305}'), - ('\u{308}', '\u{308}'), - ('\u{331}', '\u{331}'), - ('𐌰', '𐍊'), -]; - -pub const GRANTHA: &'static [(char, char)] = &[ - ('\u{951}', '\u{952}'), - ('।', '॥'), - ('௦', '௳'), - ('\u{1cd0}', '\u{1cd0}'), - ('\u{1cd2}', '᳓'), - ('ᳲ', '\u{1cf4}'), - ('\u{1cf8}', '\u{1cf9}'), - ('\u{20f0}', '\u{20f0}'), - ('\u{11300}', '𑌃'), - ('𑌅', '𑌌'), - ('𑌏', '𑌐'), - ('𑌓', '𑌨'), - ('𑌪', '𑌰'), - ('𑌲', '𑌳'), - ('𑌵', '𑌹'), - ('\u{1133b}', '𑍄'), - ('𑍇', '𑍈'), - ('𑍋', '\u{1134d}'), - ('𑍐', '𑍐'), - ('\u{11357}', '\u{11357}'), - ('𑍝', '𑍣'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), - ('𑿐', '𑿑'), - ('𑿓', '𑿓'), -]; - -pub const GREEK: &'static [(char, char)] = &[ - ('·', '·'), - ('\u{300}', '\u{301}'), - ('\u{304}', '\u{304}'), - ('\u{306}', '\u{306}'), - ('\u{308}', '\u{308}'), - ('\u{313}', '\u{313}'), - ('\u{342}', '\u{342}'), - ('\u{345}', '\u{345}'), - ('Ͱ', 'ͷ'), - ('ͺ', 'ͽ'), - ('Ϳ', 'Ϳ'), - ('΄', '΄'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', 'ϡ'), - ('ϰ', 'Ͽ'), - ('ᴦ', 'ᴪ'), - ('ᵝ', 'ᵡ'), - ('ᵦ', 'ᵪ'), - ('ᶿ', '\u{1dc1}'), - ('ἀ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ῄ'), - ('ῆ', 'ΐ'), - ('ῖ', 'Ί'), - ('῝', '`'), - ('ῲ', 'ῴ'), - ('ῶ', '῾'), - ('⁝', '⁝'), - ('Ω', 'Ω'), - ('ꭥ', 'ꭥ'), - ('𐅀', '𐆎'), - ('𐆠', '𐆠'), - ('𝈀', '𝉅'), -]; - -pub const GUJARATI: &'static [(char, char)] = &[ - ('\u{951}', '\u{952}'), - ('।', '॥'), - ('\u{a81}', 'ઃ'), - ('અ', 'ઍ'), - ('એ', 'ઑ'), - ('ઓ', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('\u{abc}', '\u{ac5}'), - ('\u{ac7}', 'ૉ'), - ('ો', '\u{acd}'), - ('ૐ', 'ૐ'), - ('ૠ', '\u{ae3}'), - ('૦', '૱'), - ('ૹ', '\u{aff}'), - ('꠰', '꠹'), -]; - -pub const GUNJALA_GONDI: &'static [(char, char)] = &[ - ('·', '·'), - ('।', '॥'), - ('𑵠', '𑵥'), - ('𑵧', '𑵨'), - ('𑵪', '𑶎'), - ('\u{11d90}', '\u{11d91}'), - ('𑶓', '𑶘'), - ('𑶠', '𑶩'), -]; - -pub const GURMUKHI: &'static [(char, char)] = &[ - ('\u{951}', '\u{952}'), - ('।', '॥'), - ('\u{a01}', 'ਃ'), - ('ਅ', 'ਊ'), - ('ਏ', 'ਐ'), - ('ਓ', 'ਨ'), - ('ਪ', 'ਰ'), - ('ਲ', 'ਲ਼'), - ('ਵ', 'ਸ਼'), - ('ਸ', 'ਹ'), - ('\u{a3c}', '\u{a3c}'), - ('ਾ', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4d}'), - ('\u{a51}', '\u{a51}'), - ('ਖ਼', 'ੜ'), - ('ਫ਼', 'ਫ਼'), - ('੦', '੶'), - ('꠰', '꠹'), -]; - -pub const GURUNG_KHEMA: &'static [(char, char)] = &[('॥', '॥'), ('𖄀', '𖄹')]; - -pub const HAN: &'static [(char, char)] = &[ - ('·', '·'), - ('⺀', '⺙'), - ('⺛', '⻳'), - ('⼀', '⿕'), - ('⿰', '⿿'), - ('、', '〃'), - ('々', '】'), - ('〓', '〟'), - ('〡', '\u{302d}'), - ('〰', '〰'), - ('〷', '〿'), - ('・', '・'), - ('㆐', '㆟'), - ('㇀', '㇥'), - ('㇯', '㇯'), - ('㈠', '㉇'), - ('㊀', '㊰'), - ('㋀', '㋋'), - ('㋿', '㋿'), - ('㍘', '㍰'), - ('㍻', '㍿'), - ('㏠', '㏾'), - ('㐀', '䶿'), - ('一', '鿿'), - ('꜀', '꜇'), - ('豈', '舘'), - ('並', '龎'), - ('﹅', '﹆'), - ('。', '・'), - ('𖿢', '𖿣'), - ('\u{16ff0}', '\u{16ff1}'), - ('𝍠', '𝍱'), - ('🉐', '🉑'), - ('𠀀', '𪛟'), - ('𪜀', '𫜹'), - ('𫝀', '𫠝'), - ('𫠠', '𬺡'), - ('𬺰', '𮯠'), - ('𮯰', '𮹝'), - ('丽', '𪘀'), - ('𰀀', '𱍊'), - ('𱍐', '𲎯'), -]; - -pub const HANGUL: &'static [(char, char)] = &[ - ('ᄀ', 'ᇿ'), - ('、', '〃'), - ('〈', '】'), - ('〓', '〟'), - ('\u{302e}', '〰'), - ('〷', '〷'), - ('・', '・'), - ('ㄱ', 'ㆎ'), - ('㈀', '㈞'), - ('㉠', '㉾'), - ('ꥠ', 'ꥼ'), - ('가', '힣'), - ('ힰ', 'ퟆ'), - ('ퟋ', 'ퟻ'), - ('﹅', '﹆'), - ('。', '・'), - ('ᅠ', 'ᄒ'), - ('ᅡ', 'ᅦ'), - ('ᅧ', 'ᅬ'), - ('ᅭ', 'ᅲ'), - ('ᅳ', 'ᅵ'), -]; - -pub const HANIFI_ROHINGYA: &'static [(char, char)] = &[ - ('،', '،'), - ('؛', '؛'), - ('؟', '؟'), - ('ـ', 'ـ'), - ('۔', '۔'), - ('𐴀', '\u{10d27}'), - ('𐴰', '𐴹'), -]; - -pub const HANUNOO: &'static [(char, char)] = &[('ᜠ', '᜶')]; - -pub const HATRAN: &'static [(char, char)] = - &[('𐣠', '𐣲'), ('𐣴', '𐣵'), ('𐣻', '𐣿')]; - -pub const HEBREW: &'static [(char, char)] = &[ - ('\u{307}', '\u{308}'), - ('\u{591}', '\u{5c7}'), - ('א', 'ת'), - ('ׯ', '״'), - ('יִ', 'זּ'), - ('טּ', 'לּ'), - ('מּ', 'מּ'), - ('נּ', 'סּ'), - ('ףּ', 'פּ'), - ('צּ', 'ﭏ'), -]; - -pub const HIRAGANA: &'static [(char, char)] = &[ - ('、', '〃'), - ('〈', '】'), - ('〓', '〟'), - ('〰', '〵'), - ('〷', '〷'), - ('〼', '〽'), - ('ぁ', 'ゖ'), - ('\u{3099}', '゠'), - ('・', 'ー'), - ('﹅', '﹆'), - ('。', '・'), - ('ー', 'ー'), - ('\u{ff9e}', '\u{ff9f}'), - ('𛀁', '𛄟'), - ('𛄲', '𛄲'), - ('𛅐', '𛅒'), - ('🈀', '🈀'), -]; - -pub const IMPERIAL_ARAMAIC: &'static [(char, char)] = - &[('𐡀', '𐡕'), ('𐡗', '𐡟')]; - -pub const INHERITED: &'static [(char, char)] = &[ - ('\u{30f}', '\u{30f}'), - ('\u{312}', '\u{312}'), - ('\u{314}', '\u{31f}'), - ('\u{321}', '\u{322}'), - ('\u{326}', '\u{32c}'), - ('\u{32f}', '\u{32f}'), - ('\u{332}', '\u{341}'), - ('\u{343}', '\u{344}'), - ('\u{346}', '\u{357}'), - ('\u{359}', '\u{35d}'), - ('\u{35f}', '\u{362}'), - ('\u{953}', '\u{954}'), - ('\u{1ab0}', '\u{1ace}'), - ('\u{1dc2}', '\u{1df7}'), - ('\u{1df9}', '\u{1df9}'), - ('\u{1dfb}', '\u{1dff}'), - ('\u{200c}', '\u{200d}'), - ('\u{20d0}', '\u{20ef}'), - ('\u{fe00}', '\u{fe0f}'), - ('\u{fe20}', '\u{fe2d}'), - ('\u{101fd}', '\u{101fd}'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('\u{1d167}', '\u{1d169}'), - ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), - ('\u{1d1aa}', '\u{1d1ad}'), - ('\u{e0100}', '\u{e01ef}'), -]; - -pub const INSCRIPTIONAL_PAHLAVI: &'static [(char, char)] = - &[('𐭠', '𐭲'), ('𐭸', '𐭿')]; - -pub const INSCRIPTIONAL_PARTHIAN: &'static [(char, char)] = - &[('𐭀', '𐭕'), ('𐭘', '𐭟')]; - -pub const JAVANESE: &'static [(char, char)] = - &[('\u{a980}', '꧍'), ('ꧏ', '꧙'), ('꧞', '꧟')]; - -pub const KAITHI: &'static [(char, char)] = &[ - ('०', '९'), - ('⸱', '⸱'), - ('꠰', '꠹'), - ('\u{11080}', '\u{110c2}'), - ('\u{110cd}', '\u{110cd}'), -]; - -pub const KANNADA: &'static [(char, char)] = &[ - ('\u{951}', '\u{952}'), - ('।', '॥'), - ('ಀ', 'ಌ'), - ('ಎ', 'ಐ'), - ('ಒ', 'ನ'), - ('ಪ', 'ಳ'), - ('ವ', 'ಹ'), - ('\u{cbc}', 'ೄ'), - ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccd}'), - ('\u{cd5}', '\u{cd6}'), - ('ೝ', 'ೞ'), - ('ೠ', '\u{ce3}'), - ('೦', '೯'), - ('ೱ', 'ೳ'), - ('\u{1cd0}', '\u{1cd0}'), - ('\u{1cd2}', '᳓'), - ('\u{1cda}', '\u{1cda}'), - ('ᳲ', 'ᳲ'), - ('\u{1cf4}', '\u{1cf4}'), - ('꠰', '꠵'), -]; - -pub const KATAKANA: &'static [(char, char)] = &[ - ('\u{305}', '\u{305}'), - ('\u{323}', '\u{323}'), - ('、', '〃'), - ('〈', '】'), - ('〓', '〟'), - ('〰', '〵'), - ('〷', '〷'), - ('〼', '〽'), - ('\u{3099}', '゜'), - ('゠', 'ヿ'), - ('ㇰ', 'ㇿ'), - ('㋐', '㋾'), - ('㌀', '㍗'), - ('﹅', '﹆'), - ('。', '\u{ff9f}'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('𛀀', '𛀀'), - ('𛄠', '𛄢'), - ('𛅕', '𛅕'), - ('𛅤', '𛅧'), -]; - -pub const KAWI: &'static [(char, char)] = - &[('\u{11f00}', '𑼐'), ('𑼒', '\u{11f3a}'), ('𑼾', '\u{11f5a}')]; - -pub const KAYAH_LI: &'static [(char, char)] = &[('꤀', '꤯')]; - -pub const KHAROSHTHI: &'static [(char, char)] = &[ - ('𐨀', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '𐨓'), - ('𐨕', '𐨗'), - ('𐨙', '𐨵'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '𐩈'), - ('𐩐', '𐩘'), -]; - -pub const KHITAN_SMALL_SCRIPT: &'static [(char, char)] = - &[('\u{16fe4}', '\u{16fe4}'), ('𘬀', '𘳕'), ('𘳿', '𘳿')]; - -pub const KHMER: &'static [(char, char)] = - &[('ក', '\u{17dd}'), ('០', '៩'), ('៰', '៹'), ('᧠', '᧿')]; - -pub const KHOJKI: &'static [(char, char)] = - &[('૦', '૯'), ('꠰', '꠹'), ('𑈀', '𑈑'), ('𑈓', '\u{11241}')]; - -pub const KHUDAWADI: &'static [(char, char)] = - &[('।', '॥'), ('꠰', '꠹'), ('𑊰', '\u{112ea}'), ('𑋰', '𑋹')]; - -pub const KIRAT_RAI: &'static [(char, char)] = &[('𖵀', '𖵹')]; - -pub const LAO: &'static [(char, char)] = &[ - ('ກ', 'ຂ'), - ('ຄ', 'ຄ'), - ('ຆ', 'ຊ'), - ('ຌ', 'ຣ'), - ('ລ', 'ລ'), - ('ວ', 'ຽ'), - ('ເ', 'ໄ'), - ('ໆ', 'ໆ'), - ('\u{ec8}', '\u{ece}'), - ('໐', '໙'), - ('ໜ', 'ໟ'), -]; - -pub const LATIN: &'static [(char, char)] = &[ - ('A', 'Z'), - ('a', 'z'), - ('ª', 'ª'), - ('·', '·'), - ('º', 'º'), - ('À', 'Ö'), - ('Ø', 'ö'), - ('ø', 'ʸ'), - ('ʼ', 'ʼ'), - ('ˇ', 'ˇ'), - ('ˉ', 'ˋ'), - ('ˍ', 'ˍ'), - ('˗', '˗'), - ('˙', '˙'), - ('ˠ', 'ˤ'), - ('\u{300}', '\u{30e}'), - ('\u{310}', '\u{311}'), - ('\u{313}', '\u{313}'), - ('\u{320}', '\u{320}'), - ('\u{323}', '\u{325}'), - ('\u{32d}', '\u{32e}'), - ('\u{330}', '\u{331}'), - ('\u{358}', '\u{358}'), - ('\u{35e}', '\u{35e}'), - ('\u{363}', '\u{36f}'), - ('\u{485}', '\u{486}'), - ('\u{951}', '\u{952}'), - ('჻', '჻'), - ('ᴀ', 'ᴥ'), - ('ᴬ', 'ᵜ'), - ('ᵢ', 'ᵥ'), - ('ᵫ', 'ᵷ'), - ('ᵹ', 'ᶾ'), - ('\u{1df8}', '\u{1df8}'), - ('Ḁ', 'ỿ'), - ('\u{202f}', '\u{202f}'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('\u{20f0}', '\u{20f0}'), - ('K', 'Å'), - ('Ⅎ', 'Ⅎ'), - ('ⅎ', 'ⅎ'), - ('Ⅰ', 'ↈ'), - ('Ⱡ', 'Ɀ'), - ('⸗', '⸗'), - ('꜀', '꜇'), - ('Ꜣ', 'ꞇ'), - ('Ꞌ', 'ꟍ'), - ('Ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'Ƛ'), - ('ꟲ', 'ꟿ'), - ('꤮', '꤮'), - ('ꬰ', 'ꭚ'), - ('ꭜ', 'ꭤ'), - ('ꭦ', 'ꭩ'), - ('ff', 'st'), - ('A', 'Z'), - ('a', 'z'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𝼀', '𝼞'), - ('𝼥', '𝼪'), -]; - -pub const LEPCHA: &'static [(char, char)] = - &[('ᰀ', '\u{1c37}'), ('᰻', '᱉'), ('ᱍ', 'ᱏ')]; - -pub const LIMBU: &'static [(char, char)] = &[ - ('॥', '॥'), - ('ᤀ', 'ᤞ'), - ('\u{1920}', 'ᤫ'), - ('ᤰ', '\u{193b}'), - ('᥀', '᥀'), - ('᥄', '᥏'), -]; - -pub const LINEAR_A: &'static [(char, char)] = - &[('𐄇', '𐄳'), ('𐘀', '𐜶'), ('𐝀', '𐝕'), ('𐝠', '𐝧')]; - -pub const LINEAR_B: &'static [(char, char)] = &[ - ('𐀀', '𐀋'), - ('𐀍', '𐀦'), - ('𐀨', '𐀺'), - ('𐀼', '𐀽'), - ('𐀿', '𐁍'), - ('𐁐', '𐁝'), - ('𐂀', '𐃺'), - ('𐄀', '𐄂'), - ('𐄇', '𐄳'), - ('𐄷', '𐄿'), -]; - -pub const LISU: &'static [(char, char)] = - &[('ʼ', 'ʼ'), ('ˍ', 'ˍ'), ('《', '》'), ('ꓐ', '꓿'), ('𑾰', '𑾰')]; - -pub const LYCIAN: &'static [(char, char)] = &[('⁚', '⁚'), ('𐊀', '𐊜')]; - -pub const LYDIAN: &'static [(char, char)] = - &[('·', '·'), ('⸱', '⸱'), ('𐤠', '𐤹'), ('𐤿', '𐤿')]; - -pub const MAHAJANI: &'static [(char, char)] = - &[('·', '·'), ('।', '९'), ('꠰', '꠹'), ('𑅐', '𑅶')]; - -pub const MAKASAR: &'static [(char, char)] = &[('𑻠', '𑻸')]; - -pub const MALAYALAM: &'static [(char, char)] = &[ - ('\u{951}', '\u{952}'), - ('।', '॥'), - ('\u{d00}', 'ഌ'), - ('എ', 'ഐ'), - ('ഒ', '\u{d44}'), - ('െ', 'ൈ'), - ('ൊ', '൏'), - ('ൔ', '\u{d63}'), - ('൦', 'ൿ'), - ('\u{1cda}', '\u{1cda}'), - ('ᳲ', 'ᳲ'), - ('꠰', '꠲'), -]; - -pub const MANDAIC: &'static [(char, char)] = - &[('ـ', 'ـ'), ('ࡀ', '\u{85b}'), ('࡞', '࡞')]; - -pub const MANICHAEAN: &'static [(char, char)] = - &[('ـ', 'ـ'), ('𐫀', '\u{10ae6}'), ('𐫫', '𐫶')]; - -pub const MARCHEN: &'static [(char, char)] = - &[('𑱰', '𑲏'), ('\u{11c92}', '\u{11ca7}'), ('𑲩', '\u{11cb6}')]; - -pub const MASARAM_GONDI: &'static [(char, char)] = &[ - ('।', '॥'), - ('𑴀', '𑴆'), - ('𑴈', '𑴉'), - ('𑴋', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d47}'), - ('𑵐', '𑵙'), -]; - -pub const MEDEFAIDRIN: &'static [(char, char)] = &[('𖹀', '𖺚')]; - -pub const MEETEI_MAYEK: &'static [(char, char)] = - &[('ꫠ', '\u{aaf6}'), ('ꯀ', '\u{abed}'), ('꯰', '꯹')]; - -pub const MENDE_KIKAKUI: &'static [(char, char)] = - &[('𞠀', '𞣄'), ('𞣇', '\u{1e8d6}')]; - -pub const MEROITIC_CURSIVE: &'static [(char, char)] = - &[('𐦠', '𐦷'), ('𐦼', '𐧏'), ('𐧒', '𐧿')]; - -pub const MEROITIC_HIEROGLYPHS: &'static [(char, char)] = - &[('⁝', '⁝'), ('𐦀', '𐦟')]; - -pub const MIAO: &'static [(char, char)] = - &[('𖼀', '𖽊'), ('\u{16f4f}', '𖾇'), ('\u{16f8f}', '𖾟')]; - -pub const MODI: &'static [(char, char)] = - &[('꠰', '꠹'), ('𑘀', '𑙄'), ('𑙐', '𑙙')]; - -pub const MONGOLIAN: &'static [(char, char)] = &[ - ('᠀', '᠙'), - ('ᠠ', 'ᡸ'), - ('ᢀ', 'ᢪ'), - ('\u{202f}', '\u{202f}'), - ('、', '。'), - ('〈', '》'), - ('𑙠', '𑙬'), -]; - -pub const MRO: &'static [(char, char)] = &[('𖩀', '𖩞'), ('𖩠', '𖩩'), ('𖩮', '𖩯')]; - -pub const MULTANI: &'static [(char, char)] = - &[('੦', '੯'), ('𑊀', '𑊆'), ('𑊈', '𑊈'), ('𑊊', '𑊍'), ('𑊏', '𑊝'), ('𑊟', '𑊩')]; - -pub const MYANMAR: &'static [(char, char)] = - &[('က', '႟'), ('꤮', '꤮'), ('ꧠ', 'ꧾ'), ('ꩠ', 'ꩿ'), ('𑛐', '𑛣')]; - -pub const NABATAEAN: &'static [(char, char)] = &[('𐢀', '𐢞'), ('𐢧', '𐢯')]; - -pub const NAG_MUNDARI: &'static [(char, char)] = &[('𞓐', '𞓹')]; - -pub const NANDINAGARI: &'static [(char, char)] = &[ - ('।', '॥'), - ('೦', '೯'), - ('ᳩ', 'ᳩ'), - ('ᳲ', 'ᳲ'), - ('ᳺ', 'ᳺ'), - ('꠰', '꠵'), - ('𑦠', '𑦧'), - ('𑦪', '\u{119d7}'), - ('\u{119da}', '𑧤'), -]; - -pub const NEW_TAI_LUE: &'static [(char, char)] = - &[('ᦀ', 'ᦫ'), ('ᦰ', 'ᧉ'), ('᧐', '᧚'), ('᧞', '᧟')]; - -pub const NEWA: &'static [(char, char)] = &[('𑐀', '𑑛'), ('𑑝', '𑑡')]; - -pub const NKO: &'static [(char, char)] = &[ - ('،', '،'), - ('؛', '؛'), - ('؟', '؟'), - ('߀', 'ߺ'), - ('\u{7fd}', '߿'), - ('﴾', '﴿'), -]; - -pub const NUSHU: &'static [(char, char)] = &[('𖿡', '𖿡'), ('𛅰', '𛋻')]; - -pub const NYIAKENG_PUACHUE_HMONG: &'static [(char, char)] = - &[('𞄀', '𞄬'), ('\u{1e130}', '𞄽'), ('𞅀', '𞅉'), ('𞅎', '𞅏')]; - -pub const OGHAM: &'static [(char, char)] = &[('\u{1680}', '᚜')]; - -pub const OL_CHIKI: &'static [(char, char)] = &[('᱐', '᱿')]; - -pub const OL_ONAL: &'static [(char, char)] = - &[('।', '॥'), ('𞗐', '𞗺'), ('𞗿', '𞗿')]; - -pub const OLD_HUNGARIAN: &'static [(char, char)] = &[ - ('⁚', '⁚'), - ('⁝', '⁝'), - ('⸱', '⸱'), - ('⹁', '⹁'), - ('𐲀', '𐲲'), - ('𐳀', '𐳲'), - ('𐳺', '𐳿'), -]; - -pub const OLD_ITALIC: &'static [(char, char)] = &[('𐌀', '𐌣'), ('𐌭', '𐌯')]; - -pub const OLD_NORTH_ARABIAN: &'static [(char, char)] = &[('𐪀', '𐪟')]; - -pub const OLD_PERMIC: &'static [(char, char)] = &[ - ('·', '·'), - ('\u{300}', '\u{300}'), - ('\u{306}', '\u{308}'), - ('\u{313}', '\u{313}'), - ('\u{483}', '\u{483}'), - ('𐍐', '\u{1037a}'), -]; - -pub const OLD_PERSIAN: &'static [(char, char)] = &[('𐎠', '𐏃'), ('𐏈', '𐏕')]; - -pub const OLD_SOGDIAN: &'static [(char, char)] = &[('𐼀', '𐼧')]; - -pub const OLD_SOUTH_ARABIAN: &'static [(char, char)] = &[('𐩠', '𐩿')]; - -pub const OLD_TURKIC: &'static [(char, char)] = - &[('⁚', '⁚'), ('⸰', '⸰'), ('𐰀', '𐱈')]; - -pub const OLD_UYGHUR: &'static [(char, char)] = - &[('ـ', 'ـ'), ('𐫲', '𐫲'), ('𐽰', '𐾉')]; - -pub const ORIYA: &'static [(char, char)] = &[ - ('\u{951}', '\u{952}'), - ('।', '॥'), - ('\u{b01}', 'ଃ'), - ('ଅ', 'ଌ'), - ('ଏ', 'ଐ'), - ('ଓ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଵ', 'ହ'), - ('\u{b3c}', '\u{b44}'), - ('େ', 'ୈ'), - ('ୋ', '\u{b4d}'), - ('\u{b55}', '\u{b57}'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', '\u{b63}'), - ('୦', '୷'), - ('\u{1cda}', '\u{1cda}'), - ('ᳲ', 'ᳲ'), -]; - -pub const OSAGE: &'static [(char, char)] = &[ - ('\u{301}', '\u{301}'), - ('\u{304}', '\u{304}'), - ('\u{30b}', '\u{30b}'), - ('\u{358}', '\u{358}'), - ('𐒰', '𐓓'), - ('𐓘', '𐓻'), -]; - -pub const OSMANYA: &'static [(char, char)] = &[('𐒀', '𐒝'), ('𐒠', '𐒩')]; - -pub const PAHAWH_HMONG: &'static [(char, char)] = - &[('𖬀', '𖭅'), ('𖭐', '𖭙'), ('𖭛', '𖭡'), ('𖭣', '𖭷'), ('𖭽', '𖮏')]; - -pub const PALMYRENE: &'static [(char, char)] = &[('𐡠', '𐡿')]; - -pub const PAU_CIN_HAU: &'static [(char, char)] = &[('𑫀', '𑫸')]; - -pub const PHAGS_PA: &'static [(char, char)] = &[ - ('᠂', '᠃'), - ('᠅', '᠅'), - ('\u{202f}', '\u{202f}'), - ('。', '。'), - ('ꡀ', '꡷'), -]; - -pub const PHOENICIAN: &'static [(char, char)] = &[('𐤀', '𐤛'), ('𐤟', '𐤟')]; - -pub const PSALTER_PAHLAVI: &'static [(char, char)] = - &[('ـ', 'ـ'), ('𐮀', '𐮑'), ('𐮙', '𐮜'), ('𐮩', '𐮯')]; - -pub const REJANG: &'static [(char, char)] = &[('ꤰ', '\u{a953}'), ('꥟', '꥟')]; - -pub const RUNIC: &'static [(char, char)] = &[('ᚠ', 'ᛸ')]; - -pub const SAMARITAN: &'static [(char, char)] = - &[('ࠀ', '\u{82d}'), ('࠰', '࠾'), ('⸱', '⸱')]; - -pub const SAURASHTRA: &'static [(char, char)] = - &[('ꢀ', '\u{a8c5}'), ('꣎', '꣙')]; - -pub const SHARADA: &'static [(char, char)] = &[ - ('\u{951}', '\u{951}'), - ('\u{1cd7}', '\u{1cd7}'), - ('\u{1cd9}', '\u{1cd9}'), - ('\u{1cdc}', '\u{1cdd}'), - ('\u{1ce0}', '\u{1ce0}'), - ('꠰', '꠵'), - ('꠸', '꠸'), - ('\u{11180}', '𑇟'), -]; - -pub const SHAVIAN: &'static [(char, char)] = &[('·', '·'), ('𐑐', '𐑿')]; - -pub const SIDDHAM: &'static [(char, char)] = - &[('𑖀', '\u{115b5}'), ('𑖸', '\u{115dd}')]; - -pub const SIGNWRITING: &'static [(char, char)] = - &[('𝠀', '𝪋'), ('\u{1da9b}', '\u{1da9f}'), ('\u{1daa1}', '\u{1daaf}')]; - -pub const SINHALA: &'static [(char, char)] = &[ - ('।', '॥'), - ('\u{d81}', 'ඃ'), - ('අ', 'ඖ'), - ('ක', 'න'), - ('ඳ', 'ර'), - ('ල', 'ල'), - ('ව', 'ෆ'), - ('\u{dca}', '\u{dca}'), - ('\u{dcf}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('ෘ', '\u{ddf}'), - ('෦', '෯'), - ('ෲ', '෴'), - ('ᳲ', 'ᳲ'), - ('𑇡', '𑇴'), -]; - -pub const SOGDIAN: &'static [(char, char)] = &[('ـ', 'ـ'), ('𐼰', '𐽙')]; - -pub const SORA_SOMPENG: &'static [(char, char)] = &[('𑃐', '𑃨'), ('𑃰', '𑃹')]; - -pub const SOYOMBO: &'static [(char, char)] = &[('𑩐', '𑪢')]; - -pub const SUNDANESE: &'static [(char, char)] = - &[('\u{1b80}', 'ᮿ'), ('᳀', '᳇')]; - -pub const SUNUWAR: &'static [(char, char)] = &[ - ('\u{300}', '\u{301}'), - ('\u{303}', '\u{303}'), - ('\u{30d}', '\u{30d}'), - ('\u{310}', '\u{310}'), - ('\u{32d}', '\u{32d}'), - ('\u{331}', '\u{331}'), - ('𑯀', '𑯡'), - ('𑯰', '𑯹'), -]; - -pub const SYLOTI_NAGRI: &'static [(char, char)] = - &[('।', '॥'), ('০', '৯'), ('ꠀ', '\u{a82c}')]; - -pub const SYRIAC: &'static [(char, char)] = &[ - ('\u{303}', '\u{304}'), - ('\u{307}', '\u{308}'), - ('\u{30a}', '\u{30a}'), - ('\u{320}', '\u{320}'), - ('\u{323}', '\u{325}'), - ('\u{32d}', '\u{32e}'), - ('\u{330}', '\u{330}'), - ('،', '،'), - ('؛', '\u{61c}'), - ('؟', '؟'), - ('ـ', 'ـ'), - ('\u{64b}', '\u{655}'), - ('\u{670}', '\u{670}'), - ('܀', '܍'), - ('\u{70f}', '\u{74a}'), - ('ݍ', 'ݏ'), - ('ࡠ', 'ࡪ'), - ('\u{1df8}', '\u{1df8}'), - ('\u{1dfa}', '\u{1dfa}'), -]; - -pub const TAGALOG: &'static [(char, char)] = - &[('ᜀ', '\u{1715}'), ('ᜟ', 'ᜟ'), ('᜵', '᜶')]; - -pub const TAGBANWA: &'static [(char, char)] = - &[('᜵', '᜶'), ('ᝠ', 'ᝬ'), ('ᝮ', 'ᝰ'), ('\u{1772}', '\u{1773}')]; - -pub const TAI_LE: &'static [(char, char)] = &[ - ('\u{300}', '\u{301}'), - ('\u{307}', '\u{308}'), - ('\u{30c}', '\u{30c}'), - ('၀', '၉'), - ('ᥐ', 'ᥭ'), - ('ᥰ', 'ᥴ'), -]; - -pub const TAI_THAM: &'static [(char, char)] = &[ - ('ᨠ', '\u{1a5e}'), - ('\u{1a60}', '\u{1a7c}'), - ('\u{1a7f}', '᪉'), - ('᪐', '᪙'), - ('᪠', '᪭'), -]; - -pub const TAI_VIET: &'static [(char, char)] = &[('ꪀ', 'ꫂ'), ('ꫛ', '꫟')]; - -pub const TAKRI: &'static [(char, char)] = - &[('।', '॥'), ('꠰', '꠹'), ('𑚀', '𑚹'), ('𑛀', '𑛉')]; - -pub const TAMIL: &'static [(char, char)] = &[ - ('\u{951}', '\u{952}'), - ('।', '॥'), - ('\u{b82}', 'ஃ'), - ('அ', 'ஊ'), - ('எ', 'ஐ'), - ('ஒ', 'க'), - ('ங', 'ச'), - ('ஜ', 'ஜ'), - ('ஞ', 'ட'), - ('ண', 'த'), - ('ந', 'ப'), - ('ம', 'ஹ'), - ('\u{bbe}', 'ூ'), - ('ெ', 'ை'), - ('ொ', '\u{bcd}'), - ('ௐ', 'ௐ'), - ('\u{bd7}', '\u{bd7}'), - ('௦', '௺'), - ('\u{1cda}', '\u{1cda}'), - ('ꣳ', 'ꣳ'), - ('\u{11301}', '\u{11301}'), - ('𑌃', '𑌃'), - ('\u{1133b}', '\u{1133c}'), - ('𑿀', '𑿱'), - ('𑿿', '𑿿'), -]; - -pub const TANGSA: &'static [(char, char)] = &[('𖩰', '𖪾'), ('𖫀', '𖫉')]; - -pub const TANGUT: &'static [(char, char)] = &[ - ('⿰', '⿿'), - ('㇯', '㇯'), - ('𖿠', '𖿠'), - ('𗀀', '𘟷'), - ('𘠀', '𘫿'), - ('𘴀', '𘴈'), -]; - -pub const TELUGU: &'static [(char, char)] = &[ - ('\u{951}', '\u{952}'), - ('।', '॥'), - ('\u{c00}', 'ఌ'), - ('ఎ', 'ఐ'), - ('ఒ', 'న'), - ('ప', 'హ'), - ('\u{c3c}', 'ౄ'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4d}'), - ('\u{c55}', '\u{c56}'), - ('ౘ', 'ౚ'), - ('ౝ', 'ౝ'), - ('ౠ', '\u{c63}'), - ('౦', '౯'), - ('౷', '౿'), - ('\u{1cda}', '\u{1cda}'), - ('ᳲ', 'ᳲ'), -]; - -pub const THAANA: &'static [(char, char)] = &[ - ('،', '،'), - ('؛', '\u{61c}'), - ('؟', '؟'), - ('٠', '٩'), - ('ހ', 'ޱ'), - ('ﷲ', 'ﷲ'), - ('﷽', '﷽'), -]; - -pub const THAI: &'static [(char, char)] = &[ - ('ʼ', 'ʼ'), - ('˗', '˗'), - ('\u{303}', '\u{303}'), - ('\u{331}', '\u{331}'), - ('ก', '\u{e3a}'), - ('เ', '๛'), -]; - -pub const TIBETAN: &'static [(char, char)] = &[ - ('ༀ', 'ཇ'), - ('ཉ', 'ཬ'), - ('\u{f71}', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('྾', '࿌'), - ('࿎', '࿔'), - ('࿙', '࿚'), - ('〈', '》'), -]; - -pub const TIFINAGH: &'static [(char, char)] = &[ - ('\u{302}', '\u{302}'), - ('\u{304}', '\u{304}'), - ('\u{307}', '\u{307}'), - ('\u{309}', '\u{309}'), - ('ⴰ', 'ⵧ'), - ('ⵯ', '⵰'), - ('\u{2d7f}', '\u{2d7f}'), -]; - -pub const TIRHUTA: &'static [(char, char)] = &[ - ('\u{951}', '\u{952}'), - ('।', '॥'), - ('ᳲ', 'ᳲ'), - ('꠰', '꠹'), - ('𑒀', '𑓇'), - ('𑓐', '𑓙'), -]; - -pub const TODHRI: &'static [(char, char)] = &[ - ('\u{301}', '\u{301}'), - ('\u{304}', '\u{304}'), - ('\u{307}', '\u{307}'), - ('\u{311}', '\u{311}'), - ('\u{313}', '\u{313}'), - ('\u{35e}', '\u{35e}'), - ('𐗀', '𐗳'), -]; - -pub const TOTO: &'static [(char, char)] = &[('ʼ', 'ʼ'), ('𞊐', '\u{1e2ae}')]; - -pub const TULU_TIGALARI: &'static [(char, char)] = &[ - ('೦', '೯'), - ('ᳲ', 'ᳲ'), - ('\u{1cf4}', '\u{1cf4}'), - ('꠰', '꠵'), - ('\u{a8f1}', '\u{a8f1}'), - ('𑎀', '𑎉'), - ('𑎋', '𑎋'), - ('𑎎', '𑎎'), - ('𑎐', '𑎵'), - ('𑎷', '\u{113c0}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '𑏊'), - ('𑏌', '𑏕'), - ('𑏗', '𑏘'), - ('\u{113e1}', '\u{113e2}'), -]; - -pub const UGARITIC: &'static [(char, char)] = &[('𐎀', '𐎝'), ('𐎟', '𐎟')]; - -pub const VAI: &'static [(char, char)] = &[('ꔀ', 'ꘫ')]; - -pub const VITHKUQI: &'static [(char, char)] = &[ - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), -]; - -pub const WANCHO: &'static [(char, char)] = &[('𞋀', '𞋹'), ('𞋿', '𞋿')]; - -pub const WARANG_CITI: &'static [(char, char)] = &[('𑢠', '𑣲'), ('𑣿', '𑣿')]; - -pub const YEZIDI: &'static [(char, char)] = &[ - ('،', '،'), - ('؛', '؛'), - ('؟', '؟'), - ('٠', '٩'), - ('𐺀', '𐺩'), - ('\u{10eab}', '𐺭'), - ('𐺰', '𐺱'), -]; - -pub const YI: &'static [(char, char)] = &[ - ('、', '。'), - ('〈', '】'), - ('〔', '〛'), - ('・', '・'), - ('ꀀ', 'ꒌ'), - ('꒐', '꓆'), - ('。', '・'), -]; - -pub const ZANABAZAR_SQUARE: &'static [(char, char)] = &[('𑨀', '\u{11a47}')]; diff --git a/vendor/regex-syntax/src/unicode_tables/sentence_break.rs b/vendor/regex-syntax/src/unicode_tables/sentence_break.rs deleted file mode 100644 index af1c5bea91b6d8..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/sentence_break.rs +++ /dev/null @@ -1,2530 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate sentence-break ucd-16.0.0 --chars -// -// Unicode version: 16.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ - ("ATerm", ATERM), - ("CR", CR), - ("Close", CLOSE), - ("Extend", EXTEND), - ("Format", FORMAT), - ("LF", LF), - ("Lower", LOWER), - ("Numeric", NUMERIC), - ("OLetter", OLETTER), - ("SContinue", SCONTINUE), - ("STerm", STERM), - ("Sep", SEP), - ("Sp", SP), - ("Upper", UPPER), -]; - -pub const ATERM: &'static [(char, char)] = - &[('.', '.'), ('․', '․'), ('﹒', '﹒'), ('.', '.')]; - -pub const CR: &'static [(char, char)] = &[('\r', '\r')]; - -pub const CLOSE: &'static [(char, char)] = &[ - ('"', '"'), - ('\'', ')'), - ('[', '['), - (']', ']'), - ('{', '{'), - ('}', '}'), - ('«', '«'), - ('»', '»'), - ('༺', '༽'), - ('᚛', '᚜'), - ('‘', '‟'), - ('‹', '›'), - ('⁅', '⁆'), - ('⁽', '⁾'), - ('₍', '₎'), - ('⌈', '⌋'), - ('〈', '〉'), - ('❛', '❠'), - ('❨', '❵'), - ('⟅', '⟆'), - ('⟦', '⟯'), - ('⦃', '⦘'), - ('⧘', '⧛'), - ('⧼', '⧽'), - ('⸀', '⸍'), - ('⸜', '⸝'), - ('⸠', '⸩'), - ('⹂', '⹂'), - ('⹕', '⹜'), - ('〈', '】'), - ('〔', '〛'), - ('〝', '〟'), - ('﴾', '﴿'), - ('︗', '︘'), - ('︵', '﹄'), - ('﹇', '﹈'), - ('﹙', '﹞'), - ('(', ')'), - ('[', '['), - (']', ']'), - ('{', '{'), - ('}', '}'), - ('⦅', '⦆'), - ('「', '」'), - ('🙶', '🙸'), -]; - -pub const EXTEND: &'static [(char, char)] = &[ - ('\u{300}', '\u{36f}'), - ('\u{483}', '\u{489}'), - ('\u{591}', '\u{5bd}'), - ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), - ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), - ('\u{610}', '\u{61a}'), - ('\u{64b}', '\u{65f}'), - ('\u{670}', '\u{670}'), - ('\u{6d6}', '\u{6dc}'), - ('\u{6df}', '\u{6e4}'), - ('\u{6e7}', '\u{6e8}'), - ('\u{6ea}', '\u{6ed}'), - ('\u{711}', '\u{711}'), - ('\u{730}', '\u{74a}'), - ('\u{7a6}', '\u{7b0}'), - ('\u{7eb}', '\u{7f3}'), - ('\u{7fd}', '\u{7fd}'), - ('\u{816}', '\u{819}'), - ('\u{81b}', '\u{823}'), - ('\u{825}', '\u{827}'), - ('\u{829}', '\u{82d}'), - ('\u{859}', '\u{85b}'), - ('\u{897}', '\u{89f}'), - ('\u{8ca}', '\u{8e1}'), - ('\u{8e3}', 'ः'), - ('\u{93a}', '\u{93c}'), - ('ा', 'ॏ'), - ('\u{951}', '\u{957}'), - ('\u{962}', '\u{963}'), - ('\u{981}', 'ঃ'), - ('\u{9bc}', '\u{9bc}'), - ('\u{9be}', '\u{9c4}'), - ('ে', 'ৈ'), - ('ো', '\u{9cd}'), - ('\u{9d7}', '\u{9d7}'), - ('\u{9e2}', '\u{9e3}'), - ('\u{9fe}', '\u{9fe}'), - ('\u{a01}', 'ਃ'), - ('\u{a3c}', '\u{a3c}'), - ('ਾ', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4d}'), - ('\u{a51}', '\u{a51}'), - ('\u{a70}', '\u{a71}'), - ('\u{a75}', '\u{a75}'), - ('\u{a81}', 'ઃ'), - ('\u{abc}', '\u{abc}'), - ('ા', '\u{ac5}'), - ('\u{ac7}', 'ૉ'), - ('ો', '\u{acd}'), - ('\u{ae2}', '\u{ae3}'), - ('\u{afa}', '\u{aff}'), - ('\u{b01}', 'ଃ'), - ('\u{b3c}', '\u{b3c}'), - ('\u{b3e}', '\u{b44}'), - ('େ', 'ୈ'), - ('ୋ', '\u{b4d}'), - ('\u{b55}', '\u{b57}'), - ('\u{b62}', '\u{b63}'), - ('\u{b82}', '\u{b82}'), - ('\u{bbe}', 'ூ'), - ('ெ', 'ை'), - ('ொ', '\u{bcd}'), - ('\u{bd7}', '\u{bd7}'), - ('\u{c00}', '\u{c04}'), - ('\u{c3c}', '\u{c3c}'), - ('\u{c3e}', 'ౄ'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4d}'), - ('\u{c55}', '\u{c56}'), - ('\u{c62}', '\u{c63}'), - ('\u{c81}', 'ಃ'), - ('\u{cbc}', '\u{cbc}'), - ('ಾ', 'ೄ'), - ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccd}'), - ('\u{cd5}', '\u{cd6}'), - ('\u{ce2}', '\u{ce3}'), - ('ೳ', 'ೳ'), - ('\u{d00}', 'ഃ'), - ('\u{d3b}', '\u{d3c}'), - ('\u{d3e}', '\u{d44}'), - ('െ', 'ൈ'), - ('ൊ', '\u{d4d}'), - ('\u{d57}', '\u{d57}'), - ('\u{d62}', '\u{d63}'), - ('\u{d81}', 'ඃ'), - ('\u{dca}', '\u{dca}'), - ('\u{dcf}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('ෘ', '\u{ddf}'), - ('ෲ', 'ෳ'), - ('\u{e31}', '\u{e31}'), - ('\u{e34}', '\u{e3a}'), - ('\u{e47}', '\u{e4e}'), - ('\u{eb1}', '\u{eb1}'), - ('\u{eb4}', '\u{ebc}'), - ('\u{ec8}', '\u{ece}'), - ('\u{f18}', '\u{f19}'), - ('\u{f35}', '\u{f35}'), - ('\u{f37}', '\u{f37}'), - ('\u{f39}', '\u{f39}'), - ('༾', '༿'), - ('\u{f71}', '\u{f84}'), - ('\u{f86}', '\u{f87}'), - ('\u{f8d}', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('\u{fc6}', '\u{fc6}'), - ('ါ', '\u{103e}'), - ('ၖ', '\u{1059}'), - ('\u{105e}', '\u{1060}'), - ('ၢ', 'ၤ'), - ('ၧ', 'ၭ'), - ('\u{1071}', '\u{1074}'), - ('\u{1082}', '\u{108d}'), - ('ႏ', 'ႏ'), - ('ႚ', '\u{109d}'), - ('\u{135d}', '\u{135f}'), - ('\u{1712}', '\u{1715}'), - ('\u{1732}', '\u{1734}'), - ('\u{1752}', '\u{1753}'), - ('\u{1772}', '\u{1773}'), - ('\u{17b4}', '\u{17d3}'), - ('\u{17dd}', '\u{17dd}'), - ('\u{180b}', '\u{180d}'), - ('\u{180f}', '\u{180f}'), - ('\u{1885}', '\u{1886}'), - ('\u{18a9}', '\u{18a9}'), - ('\u{1920}', 'ᤫ'), - ('ᤰ', '\u{193b}'), - ('\u{1a17}', '\u{1a1b}'), - ('ᩕ', '\u{1a5e}'), - ('\u{1a60}', '\u{1a7c}'), - ('\u{1a7f}', '\u{1a7f}'), - ('\u{1ab0}', '\u{1ace}'), - ('\u{1b00}', 'ᬄ'), - ('\u{1b34}', '\u{1b44}'), - ('\u{1b6b}', '\u{1b73}'), - ('\u{1b80}', 'ᮂ'), - ('ᮡ', '\u{1bad}'), - ('\u{1be6}', '\u{1bf3}'), - ('ᰤ', '\u{1c37}'), - ('\u{1cd0}', '\u{1cd2}'), - ('\u{1cd4}', '\u{1ce8}'), - ('\u{1ced}', '\u{1ced}'), - ('\u{1cf4}', '\u{1cf4}'), - ('᳷', '\u{1cf9}'), - ('\u{1dc0}', '\u{1dff}'), - ('\u{200c}', '\u{200d}'), - ('\u{20d0}', '\u{20f0}'), - ('\u{2cef}', '\u{2cf1}'), - ('\u{2d7f}', '\u{2d7f}'), - ('\u{2de0}', '\u{2dff}'), - ('\u{302a}', '\u{302f}'), - ('\u{3099}', '\u{309a}'), - ('\u{a66f}', '\u{a672}'), - ('\u{a674}', '\u{a67d}'), - ('\u{a69e}', '\u{a69f}'), - ('\u{a6f0}', '\u{a6f1}'), - ('\u{a802}', '\u{a802}'), - ('\u{a806}', '\u{a806}'), - ('\u{a80b}', '\u{a80b}'), - ('ꠣ', 'ꠧ'), - ('\u{a82c}', '\u{a82c}'), - ('ꢀ', 'ꢁ'), - ('ꢴ', '\u{a8c5}'), - ('\u{a8e0}', '\u{a8f1}'), - ('\u{a8ff}', '\u{a8ff}'), - ('\u{a926}', '\u{a92d}'), - ('\u{a947}', '\u{a953}'), - ('\u{a980}', 'ꦃ'), - ('\u{a9b3}', '\u{a9c0}'), - ('\u{a9e5}', '\u{a9e5}'), - ('\u{aa29}', '\u{aa36}'), - ('\u{aa43}', '\u{aa43}'), - ('\u{aa4c}', 'ꩍ'), - ('ꩻ', 'ꩽ'), - ('\u{aab0}', '\u{aab0}'), - ('\u{aab2}', '\u{aab4}'), - ('\u{aab7}', '\u{aab8}'), - ('\u{aabe}', '\u{aabf}'), - ('\u{aac1}', '\u{aac1}'), - ('ꫫ', 'ꫯ'), - ('ꫵ', '\u{aaf6}'), - ('ꯣ', 'ꯪ'), - ('꯬', '\u{abed}'), - ('\u{fb1e}', '\u{fb1e}'), - ('\u{fe00}', '\u{fe0f}'), - ('\u{fe20}', '\u{fe2f}'), - ('\u{ff9e}', '\u{ff9f}'), - ('\u{101fd}', '\u{101fd}'), - ('\u{102e0}', '\u{102e0}'), - ('\u{10376}', '\u{1037a}'), - ('\u{10a01}', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '\u{10a0f}'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '\u{10a3f}'), - ('\u{10ae5}', '\u{10ae6}'), - ('\u{10d24}', '\u{10d27}'), - ('\u{10d69}', '\u{10d6d}'), - ('\u{10eab}', '\u{10eac}'), - ('\u{10efc}', '\u{10eff}'), - ('\u{10f46}', '\u{10f50}'), - ('\u{10f82}', '\u{10f85}'), - ('𑀀', '𑀂'), - ('\u{11038}', '\u{11046}'), - ('\u{11070}', '\u{11070}'), - ('\u{11073}', '\u{11074}'), - ('\u{1107f}', '𑂂'), - ('𑂰', '\u{110ba}'), - ('\u{110c2}', '\u{110c2}'), - ('\u{11100}', '\u{11102}'), - ('\u{11127}', '\u{11134}'), - ('𑅅', '𑅆'), - ('\u{11173}', '\u{11173}'), - ('\u{11180}', '𑆂'), - ('𑆳', '\u{111c0}'), - ('\u{111c9}', '\u{111cc}'), - ('𑇎', '\u{111cf}'), - ('𑈬', '\u{11237}'), - ('\u{1123e}', '\u{1123e}'), - ('\u{11241}', '\u{11241}'), - ('\u{112df}', '\u{112ea}'), - ('\u{11300}', '𑌃'), - ('\u{1133b}', '\u{1133c}'), - ('\u{1133e}', '𑍄'), - ('𑍇', '𑍈'), - ('𑍋', '\u{1134d}'), - ('\u{11357}', '\u{11357}'), - ('𑍢', '𑍣'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), - ('\u{113b8}', '\u{113c0}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '𑏊'), - ('𑏌', '\u{113d0}'), - ('\u{113d2}', '\u{113d2}'), - ('\u{113e1}', '\u{113e2}'), - ('𑐵', '\u{11446}'), - ('\u{1145e}', '\u{1145e}'), - ('\u{114b0}', '\u{114c3}'), - ('\u{115af}', '\u{115b5}'), - ('𑖸', '\u{115c0}'), - ('\u{115dc}', '\u{115dd}'), - ('𑘰', '\u{11640}'), - ('\u{116ab}', '\u{116b7}'), - ('\u{1171d}', '\u{1172b}'), - ('𑠬', '\u{1183a}'), - ('\u{11930}', '𑤵'), - ('𑤷', '𑤸'), - ('\u{1193b}', '\u{1193e}'), - ('𑥀', '𑥀'), - ('𑥂', '\u{11943}'), - ('𑧑', '\u{119d7}'), - ('\u{119da}', '\u{119e0}'), - ('𑧤', '𑧤'), - ('\u{11a01}', '\u{11a0a}'), - ('\u{11a33}', '𑨹'), - ('\u{11a3b}', '\u{11a3e}'), - ('\u{11a47}', '\u{11a47}'), - ('\u{11a51}', '\u{11a5b}'), - ('\u{11a8a}', '\u{11a99}'), - ('𑰯', '\u{11c36}'), - ('\u{11c38}', '\u{11c3f}'), - ('\u{11c92}', '\u{11ca7}'), - ('𑲩', '\u{11cb6}'), - ('\u{11d31}', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d45}'), - ('\u{11d47}', '\u{11d47}'), - ('𑶊', '𑶎'), - ('\u{11d90}', '\u{11d91}'), - ('𑶓', '\u{11d97}'), - ('\u{11ef3}', '𑻶'), - ('\u{11f00}', '\u{11f01}'), - ('𑼃', '𑼃'), - ('𑼴', '\u{11f3a}'), - ('𑼾', '\u{11f42}'), - ('\u{11f5a}', '\u{11f5a}'), - ('\u{13440}', '\u{13440}'), - ('\u{13447}', '\u{13455}'), - ('\u{1611e}', '\u{1612f}'), - ('\u{16af0}', '\u{16af4}'), - ('\u{16b30}', '\u{16b36}'), - ('\u{16f4f}', '\u{16f4f}'), - ('𖽑', '𖾇'), - ('\u{16f8f}', '\u{16f92}'), - ('\u{16fe4}', '\u{16fe4}'), - ('\u{16ff0}', '\u{16ff1}'), - ('\u{1bc9d}', '\u{1bc9e}'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('\u{1d165}', '\u{1d169}'), - ('\u{1d16d}', '\u{1d172}'), - ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), - ('\u{1d1aa}', '\u{1d1ad}'), - ('\u{1d242}', '\u{1d244}'), - ('\u{1da00}', '\u{1da36}'), - ('\u{1da3b}', '\u{1da6c}'), - ('\u{1da75}', '\u{1da75}'), - ('\u{1da84}', '\u{1da84}'), - ('\u{1da9b}', '\u{1da9f}'), - ('\u{1daa1}', '\u{1daaf}'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), - ('\u{1e08f}', '\u{1e08f}'), - ('\u{1e130}', '\u{1e136}'), - ('\u{1e2ae}', '\u{1e2ae}'), - ('\u{1e2ec}', '\u{1e2ef}'), - ('\u{1e4ec}', '\u{1e4ef}'), - ('\u{1e5ee}', '\u{1e5ef}'), - ('\u{1e8d0}', '\u{1e8d6}'), - ('\u{1e944}', '\u{1e94a}'), - ('\u{e0020}', '\u{e007f}'), - ('\u{e0100}', '\u{e01ef}'), -]; - -pub const FORMAT: &'static [(char, char)] = &[ - ('\u{ad}', '\u{ad}'), - ('\u{61c}', '\u{61c}'), - ('\u{70f}', '\u{70f}'), - ('\u{180e}', '\u{180e}'), - ('\u{200b}', '\u{200b}'), - ('\u{200e}', '\u{200f}'), - ('\u{202a}', '\u{202e}'), - ('\u{2060}', '\u{2064}'), - ('\u{2066}', '\u{206f}'), - ('\u{feff}', '\u{feff}'), - ('\u{fff9}', '\u{fffb}'), - ('\u{13430}', '\u{1343f}'), - ('\u{1bca0}', '\u{1bca3}'), - ('\u{1d173}', '\u{1d17a}'), - ('\u{e0001}', '\u{e0001}'), -]; - -pub const LF: &'static [(char, char)] = &[('\n', '\n')]; - -pub const LOWER: &'static [(char, char)] = &[ - ('a', 'z'), - ('ª', 'ª'), - ('µ', 'µ'), - ('º', 'º'), - ('ß', 'ö'), - ('ø', 'ÿ'), - ('ā', 'ā'), - ('ă', 'ă'), - ('ą', 'ą'), - ('ć', 'ć'), - ('ĉ', 'ĉ'), - ('ċ', 'ċ'), - ('č', 'č'), - ('ď', 'ď'), - ('đ', 'đ'), - ('ē', 'ē'), - ('ĕ', 'ĕ'), - ('ė', 'ė'), - ('ę', 'ę'), - ('ě', 'ě'), - ('ĝ', 'ĝ'), - ('ğ', 'ğ'), - ('ġ', 'ġ'), - ('ģ', 'ģ'), - ('ĥ', 'ĥ'), - ('ħ', 'ħ'), - ('ĩ', 'ĩ'), - ('ī', 'ī'), - ('ĭ', 'ĭ'), - ('į', 'į'), - ('ı', 'ı'), - ('ij', 'ij'), - ('ĵ', 'ĵ'), - ('ķ', 'ĸ'), - ('ĺ', 'ĺ'), - ('ļ', 'ļ'), - ('ľ', 'ľ'), - ('ŀ', 'ŀ'), - ('ł', 'ł'), - ('ń', 'ń'), - ('ņ', 'ņ'), - ('ň', 'ʼn'), - ('ŋ', 'ŋ'), - ('ō', 'ō'), - ('ŏ', 'ŏ'), - ('ő', 'ő'), - ('œ', 'œ'), - ('ŕ', 'ŕ'), - ('ŗ', 'ŗ'), - ('ř', 'ř'), - ('ś', 'ś'), - ('ŝ', 'ŝ'), - ('ş', 'ş'), - ('š', 'š'), - ('ţ', 'ţ'), - ('ť', 'ť'), - ('ŧ', 'ŧ'), - ('ũ', 'ũ'), - ('ū', 'ū'), - ('ŭ', 'ŭ'), - ('ů', 'ů'), - ('ű', 'ű'), - ('ų', 'ų'), - ('ŵ', 'ŵ'), - ('ŷ', 'ŷ'), - ('ź', 'ź'), - ('ż', 'ż'), - ('ž', 'ƀ'), - ('ƃ', 'ƃ'), - ('ƅ', 'ƅ'), - ('ƈ', 'ƈ'), - ('ƌ', 'ƍ'), - ('ƒ', 'ƒ'), - ('ƕ', 'ƕ'), - ('ƙ', 'ƛ'), - ('ƞ', 'ƞ'), - ('ơ', 'ơ'), - ('ƣ', 'ƣ'), - ('ƥ', 'ƥ'), - ('ƨ', 'ƨ'), - ('ƪ', 'ƫ'), - ('ƭ', 'ƭ'), - ('ư', 'ư'), - ('ƴ', 'ƴ'), - ('ƶ', 'ƶ'), - ('ƹ', 'ƺ'), - ('ƽ', 'ƿ'), - ('dž', 'dž'), - ('lj', 'lj'), - ('nj', 'nj'), - ('ǎ', 'ǎ'), - ('ǐ', 'ǐ'), - ('ǒ', 'ǒ'), - ('ǔ', 'ǔ'), - ('ǖ', 'ǖ'), - ('ǘ', 'ǘ'), - ('ǚ', 'ǚ'), - ('ǜ', 'ǝ'), - ('ǟ', 'ǟ'), - ('ǡ', 'ǡ'), - ('ǣ', 'ǣ'), - ('ǥ', 'ǥ'), - ('ǧ', 'ǧ'), - ('ǩ', 'ǩ'), - ('ǫ', 'ǫ'), - ('ǭ', 'ǭ'), - ('ǯ', 'ǰ'), - ('dz', 'dz'), - ('ǵ', 'ǵ'), - ('ǹ', 'ǹ'), - ('ǻ', 'ǻ'), - ('ǽ', 'ǽ'), - ('ǿ', 'ǿ'), - ('ȁ', 'ȁ'), - ('ȃ', 'ȃ'), - ('ȅ', 'ȅ'), - ('ȇ', 'ȇ'), - ('ȉ', 'ȉ'), - ('ȋ', 'ȋ'), - ('ȍ', 'ȍ'), - ('ȏ', 'ȏ'), - ('ȑ', 'ȑ'), - ('ȓ', 'ȓ'), - ('ȕ', 'ȕ'), - ('ȗ', 'ȗ'), - ('ș', 'ș'), - ('ț', 'ț'), - ('ȝ', 'ȝ'), - ('ȟ', 'ȟ'), - ('ȡ', 'ȡ'), - ('ȣ', 'ȣ'), - ('ȥ', 'ȥ'), - ('ȧ', 'ȧ'), - ('ȩ', 'ȩ'), - ('ȫ', 'ȫ'), - ('ȭ', 'ȭ'), - ('ȯ', 'ȯ'), - ('ȱ', 'ȱ'), - ('ȳ', 'ȹ'), - ('ȼ', 'ȼ'), - ('ȿ', 'ɀ'), - ('ɂ', 'ɂ'), - ('ɇ', 'ɇ'), - ('ɉ', 'ɉ'), - ('ɋ', 'ɋ'), - ('ɍ', 'ɍ'), - ('ɏ', 'ʓ'), - ('ʕ', 'ʸ'), - ('ˀ', 'ˁ'), - ('ˠ', 'ˤ'), - ('ͱ', 'ͱ'), - ('ͳ', 'ͳ'), - ('ͷ', 'ͷ'), - ('ͺ', 'ͽ'), - ('ΐ', 'ΐ'), - ('ά', 'ώ'), - ('ϐ', 'ϑ'), - ('ϕ', 'ϗ'), - ('ϙ', 'ϙ'), - ('ϛ', 'ϛ'), - ('ϝ', 'ϝ'), - ('ϟ', 'ϟ'), - ('ϡ', 'ϡ'), - ('ϣ', 'ϣ'), - ('ϥ', 'ϥ'), - ('ϧ', 'ϧ'), - ('ϩ', 'ϩ'), - ('ϫ', 'ϫ'), - ('ϭ', 'ϭ'), - ('ϯ', 'ϳ'), - ('ϵ', 'ϵ'), - ('ϸ', 'ϸ'), - ('ϻ', 'ϼ'), - ('а', 'џ'), - ('ѡ', 'ѡ'), - ('ѣ', 'ѣ'), - ('ѥ', 'ѥ'), - ('ѧ', 'ѧ'), - ('ѩ', 'ѩ'), - ('ѫ', 'ѫ'), - ('ѭ', 'ѭ'), - ('ѯ', 'ѯ'), - ('ѱ', 'ѱ'), - ('ѳ', 'ѳ'), - ('ѵ', 'ѵ'), - ('ѷ', 'ѷ'), - ('ѹ', 'ѹ'), - ('ѻ', 'ѻ'), - ('ѽ', 'ѽ'), - ('ѿ', 'ѿ'), - ('ҁ', 'ҁ'), - ('ҋ', 'ҋ'), - ('ҍ', 'ҍ'), - ('ҏ', 'ҏ'), - ('ґ', 'ґ'), - ('ғ', 'ғ'), - ('ҕ', 'ҕ'), - ('җ', 'җ'), - ('ҙ', 'ҙ'), - ('қ', 'қ'), - ('ҝ', 'ҝ'), - ('ҟ', 'ҟ'), - ('ҡ', 'ҡ'), - ('ң', 'ң'), - ('ҥ', 'ҥ'), - ('ҧ', 'ҧ'), - ('ҩ', 'ҩ'), - ('ҫ', 'ҫ'), - ('ҭ', 'ҭ'), - ('ү', 'ү'), - ('ұ', 'ұ'), - ('ҳ', 'ҳ'), - ('ҵ', 'ҵ'), - ('ҷ', 'ҷ'), - ('ҹ', 'ҹ'), - ('һ', 'һ'), - ('ҽ', 'ҽ'), - ('ҿ', 'ҿ'), - ('ӂ', 'ӂ'), - ('ӄ', 'ӄ'), - ('ӆ', 'ӆ'), - ('ӈ', 'ӈ'), - ('ӊ', 'ӊ'), - ('ӌ', 'ӌ'), - ('ӎ', 'ӏ'), - ('ӑ', 'ӑ'), - ('ӓ', 'ӓ'), - ('ӕ', 'ӕ'), - ('ӗ', 'ӗ'), - ('ә', 'ә'), - ('ӛ', 'ӛ'), - ('ӝ', 'ӝ'), - ('ӟ', 'ӟ'), - ('ӡ', 'ӡ'), - ('ӣ', 'ӣ'), - ('ӥ', 'ӥ'), - ('ӧ', 'ӧ'), - ('ө', 'ө'), - ('ӫ', 'ӫ'), - ('ӭ', 'ӭ'), - ('ӯ', 'ӯ'), - ('ӱ', 'ӱ'), - ('ӳ', 'ӳ'), - ('ӵ', 'ӵ'), - ('ӷ', 'ӷ'), - ('ӹ', 'ӹ'), - ('ӻ', 'ӻ'), - ('ӽ', 'ӽ'), - ('ӿ', 'ӿ'), - ('ԁ', 'ԁ'), - ('ԃ', 'ԃ'), - ('ԅ', 'ԅ'), - ('ԇ', 'ԇ'), - ('ԉ', 'ԉ'), - ('ԋ', 'ԋ'), - ('ԍ', 'ԍ'), - ('ԏ', 'ԏ'), - ('ԑ', 'ԑ'), - ('ԓ', 'ԓ'), - ('ԕ', 'ԕ'), - ('ԗ', 'ԗ'), - ('ԙ', 'ԙ'), - ('ԛ', 'ԛ'), - ('ԝ', 'ԝ'), - ('ԟ', 'ԟ'), - ('ԡ', 'ԡ'), - ('ԣ', 'ԣ'), - ('ԥ', 'ԥ'), - ('ԧ', 'ԧ'), - ('ԩ', 'ԩ'), - ('ԫ', 'ԫ'), - ('ԭ', 'ԭ'), - ('ԯ', 'ԯ'), - ('ՠ', 'ֈ'), - ('ჼ', 'ჼ'), - ('ᏸ', 'ᏽ'), - ('ᲀ', 'ᲈ'), - ('ᲊ', 'ᲊ'), - ('ᴀ', 'ᶿ'), - ('ḁ', 'ḁ'), - ('ḃ', 'ḃ'), - ('ḅ', 'ḅ'), - ('ḇ', 'ḇ'), - ('ḉ', 'ḉ'), - ('ḋ', 'ḋ'), - ('ḍ', 'ḍ'), - ('ḏ', 'ḏ'), - ('ḑ', 'ḑ'), - ('ḓ', 'ḓ'), - ('ḕ', 'ḕ'), - ('ḗ', 'ḗ'), - ('ḙ', 'ḙ'), - ('ḛ', 'ḛ'), - ('ḝ', 'ḝ'), - ('ḟ', 'ḟ'), - ('ḡ', 'ḡ'), - ('ḣ', 'ḣ'), - ('ḥ', 'ḥ'), - ('ḧ', 'ḧ'), - ('ḩ', 'ḩ'), - ('ḫ', 'ḫ'), - ('ḭ', 'ḭ'), - ('ḯ', 'ḯ'), - ('ḱ', 'ḱ'), - ('ḳ', 'ḳ'), - ('ḵ', 'ḵ'), - ('ḷ', 'ḷ'), - ('ḹ', 'ḹ'), - ('ḻ', 'ḻ'), - ('ḽ', 'ḽ'), - ('ḿ', 'ḿ'), - ('ṁ', 'ṁ'), - ('ṃ', 'ṃ'), - ('ṅ', 'ṅ'), - ('ṇ', 'ṇ'), - ('ṉ', 'ṉ'), - ('ṋ', 'ṋ'), - ('ṍ', 'ṍ'), - ('ṏ', 'ṏ'), - ('ṑ', 'ṑ'), - ('ṓ', 'ṓ'), - ('ṕ', 'ṕ'), - ('ṗ', 'ṗ'), - ('ṙ', 'ṙ'), - ('ṛ', 'ṛ'), - ('ṝ', 'ṝ'), - ('ṟ', 'ṟ'), - ('ṡ', 'ṡ'), - ('ṣ', 'ṣ'), - ('ṥ', 'ṥ'), - ('ṧ', 'ṧ'), - ('ṩ', 'ṩ'), - ('ṫ', 'ṫ'), - ('ṭ', 'ṭ'), - ('ṯ', 'ṯ'), - ('ṱ', 'ṱ'), - ('ṳ', 'ṳ'), - ('ṵ', 'ṵ'), - ('ṷ', 'ṷ'), - ('ṹ', 'ṹ'), - ('ṻ', 'ṻ'), - ('ṽ', 'ṽ'), - ('ṿ', 'ṿ'), - ('ẁ', 'ẁ'), - ('ẃ', 'ẃ'), - ('ẅ', 'ẅ'), - ('ẇ', 'ẇ'), - ('ẉ', 'ẉ'), - ('ẋ', 'ẋ'), - ('ẍ', 'ẍ'), - ('ẏ', 'ẏ'), - ('ẑ', 'ẑ'), - ('ẓ', 'ẓ'), - ('ẕ', 'ẝ'), - ('ẟ', 'ẟ'), - ('ạ', 'ạ'), - ('ả', 'ả'), - ('ấ', 'ấ'), - ('ầ', 'ầ'), - ('ẩ', 'ẩ'), - ('ẫ', 'ẫ'), - ('ậ', 'ậ'), - ('ắ', 'ắ'), - ('ằ', 'ằ'), - ('ẳ', 'ẳ'), - ('ẵ', 'ẵ'), - ('ặ', 'ặ'), - ('ẹ', 'ẹ'), - ('ẻ', 'ẻ'), - ('ẽ', 'ẽ'), - ('ế', 'ế'), - ('ề', 'ề'), - ('ể', 'ể'), - ('ễ', 'ễ'), - ('ệ', 'ệ'), - ('ỉ', 'ỉ'), - ('ị', 'ị'), - ('ọ', 'ọ'), - ('ỏ', 'ỏ'), - ('ố', 'ố'), - ('ồ', 'ồ'), - ('ổ', 'ổ'), - ('ỗ', 'ỗ'), - ('ộ', 'ộ'), - ('ớ', 'ớ'), - ('ờ', 'ờ'), - ('ở', 'ở'), - ('ỡ', 'ỡ'), - ('ợ', 'ợ'), - ('ụ', 'ụ'), - ('ủ', 'ủ'), - ('ứ', 'ứ'), - ('ừ', 'ừ'), - ('ử', 'ử'), - ('ữ', 'ữ'), - ('ự', 'ự'), - ('ỳ', 'ỳ'), - ('ỵ', 'ỵ'), - ('ỷ', 'ỷ'), - ('ỹ', 'ỹ'), - ('ỻ', 'ỻ'), - ('ỽ', 'ỽ'), - ('ỿ', 'ἇ'), - ('ἐ', 'ἕ'), - ('ἠ', 'ἧ'), - ('ἰ', 'ἷ'), - ('ὀ', 'ὅ'), - ('ὐ', 'ὗ'), - ('ὠ', 'ὧ'), - ('ὰ', 'ώ'), - ('ᾀ', 'ᾇ'), - ('ᾐ', 'ᾗ'), - ('ᾠ', 'ᾧ'), - ('ᾰ', 'ᾴ'), - ('ᾶ', 'ᾷ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῇ'), - ('ῐ', 'ΐ'), - ('ῖ', 'ῗ'), - ('ῠ', 'ῧ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῷ'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('ℊ', 'ℊ'), - ('ℎ', 'ℏ'), - ('ℓ', 'ℓ'), - ('ℯ', 'ℯ'), - ('ℴ', 'ℴ'), - ('ℹ', 'ℹ'), - ('ℼ', 'ℽ'), - ('ⅆ', 'ⅉ'), - ('ⅎ', 'ⅎ'), - ('ⅰ', 'ⅿ'), - ('ↄ', 'ↄ'), - ('ⓐ', 'ⓩ'), - ('ⰰ', 'ⱟ'), - ('ⱡ', 'ⱡ'), - ('ⱥ', 'ⱦ'), - ('ⱨ', 'ⱨ'), - ('ⱪ', 'ⱪ'), - ('ⱬ', 'ⱬ'), - ('ⱱ', 'ⱱ'), - ('ⱳ', 'ⱴ'), - ('ⱶ', 'ⱽ'), - ('ⲁ', 'ⲁ'), - ('ⲃ', 'ⲃ'), - ('ⲅ', 'ⲅ'), - ('ⲇ', 'ⲇ'), - ('ⲉ', 'ⲉ'), - ('ⲋ', 'ⲋ'), - ('ⲍ', 'ⲍ'), - ('ⲏ', 'ⲏ'), - ('ⲑ', 'ⲑ'), - ('ⲓ', 'ⲓ'), - ('ⲕ', 'ⲕ'), - ('ⲗ', 'ⲗ'), - ('ⲙ', 'ⲙ'), - ('ⲛ', 'ⲛ'), - ('ⲝ', 'ⲝ'), - ('ⲟ', 'ⲟ'), - ('ⲡ', 'ⲡ'), - ('ⲣ', 'ⲣ'), - ('ⲥ', 'ⲥ'), - ('ⲧ', 'ⲧ'), - ('ⲩ', 'ⲩ'), - ('ⲫ', 'ⲫ'), - ('ⲭ', 'ⲭ'), - ('ⲯ', 'ⲯ'), - ('ⲱ', 'ⲱ'), - ('ⲳ', 'ⲳ'), - ('ⲵ', 'ⲵ'), - ('ⲷ', 'ⲷ'), - ('ⲹ', 'ⲹ'), - ('ⲻ', 'ⲻ'), - ('ⲽ', 'ⲽ'), - ('ⲿ', 'ⲿ'), - ('ⳁ', 'ⳁ'), - ('ⳃ', 'ⳃ'), - ('ⳅ', 'ⳅ'), - ('ⳇ', 'ⳇ'), - ('ⳉ', 'ⳉ'), - ('ⳋ', 'ⳋ'), - ('ⳍ', 'ⳍ'), - ('ⳏ', 'ⳏ'), - ('ⳑ', 'ⳑ'), - ('ⳓ', 'ⳓ'), - ('ⳕ', 'ⳕ'), - ('ⳗ', 'ⳗ'), - ('ⳙ', 'ⳙ'), - ('ⳛ', 'ⳛ'), - ('ⳝ', 'ⳝ'), - ('ⳟ', 'ⳟ'), - ('ⳡ', 'ⳡ'), - ('ⳣ', 'ⳤ'), - ('ⳬ', 'ⳬ'), - ('ⳮ', 'ⳮ'), - ('ⳳ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ꙁ', 'ꙁ'), - ('ꙃ', 'ꙃ'), - ('ꙅ', 'ꙅ'), - ('ꙇ', 'ꙇ'), - ('ꙉ', 'ꙉ'), - ('ꙋ', 'ꙋ'), - ('ꙍ', 'ꙍ'), - ('ꙏ', 'ꙏ'), - ('ꙑ', 'ꙑ'), - ('ꙓ', 'ꙓ'), - ('ꙕ', 'ꙕ'), - ('ꙗ', 'ꙗ'), - ('ꙙ', 'ꙙ'), - ('ꙛ', 'ꙛ'), - ('ꙝ', 'ꙝ'), - ('ꙟ', 'ꙟ'), - ('ꙡ', 'ꙡ'), - ('ꙣ', 'ꙣ'), - ('ꙥ', 'ꙥ'), - ('ꙧ', 'ꙧ'), - ('ꙩ', 'ꙩ'), - ('ꙫ', 'ꙫ'), - ('ꙭ', 'ꙭ'), - ('ꚁ', 'ꚁ'), - ('ꚃ', 'ꚃ'), - ('ꚅ', 'ꚅ'), - ('ꚇ', 'ꚇ'), - ('ꚉ', 'ꚉ'), - ('ꚋ', 'ꚋ'), - ('ꚍ', 'ꚍ'), - ('ꚏ', 'ꚏ'), - ('ꚑ', 'ꚑ'), - ('ꚓ', 'ꚓ'), - ('ꚕ', 'ꚕ'), - ('ꚗ', 'ꚗ'), - ('ꚙ', 'ꚙ'), - ('ꚛ', 'ꚝ'), - ('ꜣ', 'ꜣ'), - ('ꜥ', 'ꜥ'), - ('ꜧ', 'ꜧ'), - ('ꜩ', 'ꜩ'), - ('ꜫ', 'ꜫ'), - ('ꜭ', 'ꜭ'), - ('ꜯ', 'ꜱ'), - ('ꜳ', 'ꜳ'), - ('ꜵ', 'ꜵ'), - ('ꜷ', 'ꜷ'), - ('ꜹ', 'ꜹ'), - ('ꜻ', 'ꜻ'), - ('ꜽ', 'ꜽ'), - ('ꜿ', 'ꜿ'), - ('ꝁ', 'ꝁ'), - ('ꝃ', 'ꝃ'), - ('ꝅ', 'ꝅ'), - ('ꝇ', 'ꝇ'), - ('ꝉ', 'ꝉ'), - ('ꝋ', 'ꝋ'), - ('ꝍ', 'ꝍ'), - ('ꝏ', 'ꝏ'), - ('ꝑ', 'ꝑ'), - ('ꝓ', 'ꝓ'), - ('ꝕ', 'ꝕ'), - ('ꝗ', 'ꝗ'), - ('ꝙ', 'ꝙ'), - ('ꝛ', 'ꝛ'), - ('ꝝ', 'ꝝ'), - ('ꝟ', 'ꝟ'), - ('ꝡ', 'ꝡ'), - ('ꝣ', 'ꝣ'), - ('ꝥ', 'ꝥ'), - ('ꝧ', 'ꝧ'), - ('ꝩ', 'ꝩ'), - ('ꝫ', 'ꝫ'), - ('ꝭ', 'ꝭ'), - ('ꝯ', 'ꝸ'), - ('ꝺ', 'ꝺ'), - ('ꝼ', 'ꝼ'), - ('ꝿ', 'ꝿ'), - ('ꞁ', 'ꞁ'), - ('ꞃ', 'ꞃ'), - ('ꞅ', 'ꞅ'), - ('ꞇ', 'ꞇ'), - ('ꞌ', 'ꞌ'), - ('ꞎ', 'ꞎ'), - ('ꞑ', 'ꞑ'), - ('ꞓ', 'ꞕ'), - ('ꞗ', 'ꞗ'), - ('ꞙ', 'ꞙ'), - ('ꞛ', 'ꞛ'), - ('ꞝ', 'ꞝ'), - ('ꞟ', 'ꞟ'), - ('ꞡ', 'ꞡ'), - ('ꞣ', 'ꞣ'), - ('ꞥ', 'ꞥ'), - ('ꞧ', 'ꞧ'), - ('ꞩ', 'ꞩ'), - ('ꞯ', 'ꞯ'), - ('ꞵ', 'ꞵ'), - ('ꞷ', 'ꞷ'), - ('ꞹ', 'ꞹ'), - ('ꞻ', 'ꞻ'), - ('ꞽ', 'ꞽ'), - ('ꞿ', 'ꞿ'), - ('ꟁ', 'ꟁ'), - ('ꟃ', 'ꟃ'), - ('ꟈ', 'ꟈ'), - ('ꟊ', 'ꟊ'), - ('ꟍ', 'ꟍ'), - ('ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'ꟕ'), - ('ꟗ', 'ꟗ'), - ('ꟙ', 'ꟙ'), - ('ꟛ', 'ꟛ'), - ('ꟲ', 'ꟴ'), - ('ꟶ', 'ꟶ'), - ('ꟸ', 'ꟺ'), - ('ꬰ', 'ꭚ'), - ('ꭜ', 'ꭩ'), - ('ꭰ', 'ꮿ'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('a', 'z'), - ('𐐨', '𐑏'), - ('𐓘', '𐓻'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐞀', '𐞀'), - ('𐞃', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𐳀', '𐳲'), - ('𐵰', '𐶅'), - ('𑣀', '𑣟'), - ('𖹠', '𖹿'), - ('𝐚', '𝐳'), - ('𝑎', '𝑔'), - ('𝑖', '𝑧'), - ('𝒂', '𝒛'), - ('𝒶', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝓏'), - ('𝓪', '𝔃'), - ('𝔞', '𝔷'), - ('𝕒', '𝕫'), - ('𝖆', '𝖟'), - ('𝖺', '𝗓'), - ('𝗮', '𝘇'), - ('𝘢', '𝘻'), - ('𝙖', '𝙯'), - ('𝚊', '𝚥'), - ('𝛂', '𝛚'), - ('𝛜', '𝛡'), - ('𝛼', '𝜔'), - ('𝜖', '𝜛'), - ('𝜶', '𝝎'), - ('𝝐', '𝝕'), - ('𝝰', '𝞈'), - ('𝞊', '𝞏'), - ('𝞪', '𝟂'), - ('𝟄', '𝟉'), - ('𝟋', '𝟋'), - ('𝼀', '𝼉'), - ('𝼋', '𝼞'), - ('𝼥', '𝼪'), - ('𞀰', '𞁭'), - ('𞤢', '𞥃'), -]; - -pub const NUMERIC: &'static [(char, char)] = &[ - ('0', '9'), - ('\u{600}', '\u{605}'), - ('٠', '٩'), - ('٫', '٬'), - ('\u{6dd}', '\u{6dd}'), - ('۰', '۹'), - ('߀', '߉'), - ('\u{890}', '\u{891}'), - ('\u{8e2}', '\u{8e2}'), - ('०', '९'), - ('০', '৯'), - ('੦', '੯'), - ('૦', '૯'), - ('୦', '୯'), - ('௦', '௯'), - ('౦', '౯'), - ('೦', '೯'), - ('൦', '൯'), - ('෦', '෯'), - ('๐', '๙'), - ('໐', '໙'), - ('༠', '༩'), - ('၀', '၉'), - ('႐', '႙'), - ('០', '៩'), - ('᠐', '᠙'), - ('᥆', '᥏'), - ('᧐', '᧚'), - ('᪀', '᪉'), - ('᪐', '᪙'), - ('᭐', '᭙'), - ('᮰', '᮹'), - ('᱀', '᱉'), - ('᱐', '᱙'), - ('꘠', '꘩'), - ('꣐', '꣙'), - ('꤀', '꤉'), - ('꧐', '꧙'), - ('꧰', '꧹'), - ('꩐', '꩙'), - ('꯰', '꯹'), - ('0', '9'), - ('𐒠', '𐒩'), - ('𐴰', '𐴹'), - ('𐵀', '𐵉'), - ('𑁦', '𑁯'), - ('\u{110bd}', '\u{110bd}'), - ('\u{110cd}', '\u{110cd}'), - ('𑃰', '𑃹'), - ('𑄶', '𑄿'), - ('𑇐', '𑇙'), - ('𑋰', '𑋹'), - ('𑑐', '𑑙'), - ('𑓐', '𑓙'), - ('𑙐', '𑙙'), - ('𑛀', '𑛉'), - ('𑛐', '𑛣'), - ('𑜰', '𑜹'), - ('𑣠', '𑣩'), - ('𑥐', '𑥙'), - ('𑯰', '𑯹'), - ('𑱐', '𑱙'), - ('𑵐', '𑵙'), - ('𑶠', '𑶩'), - ('𑽐', '𑽙'), - ('𖄰', '𖄹'), - ('𖩠', '𖩩'), - ('𖫀', '𖫉'), - ('𖭐', '𖭙'), - ('𖵰', '𖵹'), - ('𜳰', '𜳹'), - ('𝟎', '𝟿'), - ('𞅀', '𞅉'), - ('𞋰', '𞋹'), - ('𞓰', '𞓹'), - ('𞗱', '𞗺'), - ('𞥐', '𞥙'), - ('🯰', '🯹'), -]; - -pub const OLETTER: &'static [(char, char)] = &[ - ('ƻ', 'ƻ'), - ('ǀ', 'ǃ'), - ('ʔ', 'ʔ'), - ('ʹ', 'ʿ'), - ('ˆ', 'ˑ'), - ('ˬ', 'ˬ'), - ('ˮ', 'ˮ'), - ('ʹ', 'ʹ'), - ('ՙ', 'ՙ'), - ('א', 'ת'), - ('ׯ', '׳'), - ('ؠ', 'ي'), - ('ٮ', 'ٯ'), - ('ٱ', 'ۓ'), - ('ە', 'ە'), - ('ۥ', 'ۦ'), - ('ۮ', 'ۯ'), - ('ۺ', 'ۼ'), - ('ۿ', 'ۿ'), - ('ܐ', 'ܐ'), - ('ܒ', 'ܯ'), - ('ݍ', 'ޥ'), - ('ޱ', 'ޱ'), - ('ߊ', 'ߪ'), - ('ߴ', 'ߵ'), - ('ߺ', 'ߺ'), - ('ࠀ', 'ࠕ'), - ('ࠚ', 'ࠚ'), - ('ࠤ', 'ࠤ'), - ('ࠨ', 'ࠨ'), - ('ࡀ', 'ࡘ'), - ('ࡠ', 'ࡪ'), - ('ࡰ', 'ࢇ'), - ('ࢉ', 'ࢎ'), - ('ࢠ', 'ࣉ'), - ('ऄ', 'ह'), - ('ऽ', 'ऽ'), - ('ॐ', 'ॐ'), - ('क़', 'ॡ'), - ('ॱ', 'ঀ'), - ('অ', 'ঌ'), - ('এ', 'ঐ'), - ('ও', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('ঽ', 'ঽ'), - ('ৎ', 'ৎ'), - ('ড়', 'ঢ়'), - ('য়', 'ৡ'), - ('ৰ', 'ৱ'), - ('ৼ', 'ৼ'), - ('ਅ', 'ਊ'), - ('ਏ', 'ਐ'), - ('ਓ', 'ਨ'), - ('ਪ', 'ਰ'), - ('ਲ', 'ਲ਼'), - ('ਵ', 'ਸ਼'), - ('ਸ', 'ਹ'), - ('ਖ਼', 'ੜ'), - ('ਫ਼', 'ਫ਼'), - ('ੲ', 'ੴ'), - ('અ', 'ઍ'), - ('એ', 'ઑ'), - ('ઓ', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('ઽ', 'ઽ'), - ('ૐ', 'ૐ'), - ('ૠ', 'ૡ'), - ('ૹ', 'ૹ'), - ('ଅ', 'ଌ'), - ('ଏ', 'ଐ'), - ('ଓ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଵ', 'ହ'), - ('ଽ', 'ଽ'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', 'ୡ'), - ('ୱ', 'ୱ'), - ('ஃ', 'ஃ'), - ('அ', 'ஊ'), - ('எ', 'ஐ'), - ('ஒ', 'க'), - ('ங', 'ச'), - ('ஜ', 'ஜ'), - ('ஞ', 'ட'), - ('ண', 'த'), - ('ந', 'ப'), - ('ம', 'ஹ'), - ('ௐ', 'ௐ'), - ('అ', 'ఌ'), - ('ఎ', 'ఐ'), - ('ఒ', 'న'), - ('ప', 'హ'), - ('ఽ', 'ఽ'), - ('ౘ', 'ౚ'), - ('ౝ', 'ౝ'), - ('ౠ', 'ౡ'), - ('ಀ', 'ಀ'), - ('ಅ', 'ಌ'), - ('ಎ', 'ಐ'), - ('ಒ', 'ನ'), - ('ಪ', 'ಳ'), - ('ವ', 'ಹ'), - ('ಽ', 'ಽ'), - ('ೝ', 'ೞ'), - ('ೠ', 'ೡ'), - ('ೱ', 'ೲ'), - ('ഄ', 'ഌ'), - ('എ', 'ഐ'), - ('ഒ', 'ഺ'), - ('ഽ', 'ഽ'), - ('ൎ', 'ൎ'), - ('ൔ', 'ൖ'), - ('ൟ', 'ൡ'), - ('ൺ', 'ൿ'), - ('අ', 'ඖ'), - ('ක', 'න'), - ('ඳ', 'ර'), - ('ල', 'ල'), - ('ව', 'ෆ'), - ('ก', 'ะ'), - ('า', 'ำ'), - ('เ', 'ๆ'), - ('ກ', 'ຂ'), - ('ຄ', 'ຄ'), - ('ຆ', 'ຊ'), - ('ຌ', 'ຣ'), - ('ລ', 'ລ'), - ('ວ', 'ະ'), - ('າ', 'ຳ'), - ('ຽ', 'ຽ'), - ('ເ', 'ໄ'), - ('ໆ', 'ໆ'), - ('ໜ', 'ໟ'), - ('ༀ', 'ༀ'), - ('ཀ', 'ཇ'), - ('ཉ', 'ཬ'), - ('ྈ', 'ྌ'), - ('က', 'ဪ'), - ('ဿ', 'ဿ'), - ('ၐ', 'ၕ'), - ('ၚ', 'ၝ'), - ('ၡ', 'ၡ'), - ('ၥ', 'ၦ'), - ('ၮ', 'ၰ'), - ('ၵ', 'ႁ'), - ('ႎ', 'ႎ'), - ('ა', 'ჺ'), - ('ჽ', 'ቈ'), - ('ቊ', 'ቍ'), - ('ቐ', 'ቖ'), - ('ቘ', 'ቘ'), - ('ቚ', 'ቝ'), - ('በ', 'ኈ'), - ('ኊ', 'ኍ'), - ('ነ', 'ኰ'), - ('ኲ', 'ኵ'), - ('ኸ', 'ኾ'), - ('ዀ', 'ዀ'), - ('ዂ', 'ዅ'), - ('ወ', 'ዖ'), - ('ዘ', 'ጐ'), - ('ጒ', 'ጕ'), - ('ጘ', 'ፚ'), - ('ᎀ', 'ᎏ'), - ('ᐁ', 'ᙬ'), - ('ᙯ', 'ᙿ'), - ('ᚁ', 'ᚚ'), - ('ᚠ', 'ᛪ'), - ('ᛮ', 'ᛸ'), - ('ᜀ', 'ᜑ'), - ('ᜟ', 'ᜱ'), - ('ᝀ', 'ᝑ'), - ('ᝠ', 'ᝬ'), - ('ᝮ', 'ᝰ'), - ('ក', 'ឳ'), - ('ៗ', 'ៗ'), - ('ៜ', 'ៜ'), - ('ᠠ', 'ᡸ'), - ('ᢀ', 'ᢄ'), - ('ᢇ', 'ᢨ'), - ('ᢪ', 'ᢪ'), - ('ᢰ', 'ᣵ'), - ('ᤀ', 'ᤞ'), - ('ᥐ', 'ᥭ'), - ('ᥰ', 'ᥴ'), - ('ᦀ', 'ᦫ'), - ('ᦰ', 'ᧉ'), - ('ᨀ', 'ᨖ'), - ('ᨠ', 'ᩔ'), - ('ᪧ', 'ᪧ'), - ('ᬅ', 'ᬳ'), - ('ᭅ', 'ᭌ'), - ('ᮃ', 'ᮠ'), - ('ᮮ', 'ᮯ'), - ('ᮺ', 'ᯥ'), - ('ᰀ', 'ᰣ'), - ('ᱍ', 'ᱏ'), - ('ᱚ', 'ᱽ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('ᳩ', 'ᳬ'), - ('ᳮ', 'ᳳ'), - ('ᳵ', 'ᳶ'), - ('ᳺ', 'ᳺ'), - ('ℵ', 'ℸ'), - ('ↀ', 'ↂ'), - ('ↅ', 'ↈ'), - ('ⴰ', 'ⵧ'), - ('ⵯ', 'ⵯ'), - ('ⶀ', 'ⶖ'), - ('ⶠ', 'ⶦ'), - ('ⶨ', 'ⶮ'), - ('ⶰ', 'ⶶ'), - ('ⶸ', 'ⶾ'), - ('ⷀ', 'ⷆ'), - ('ⷈ', 'ⷎ'), - ('ⷐ', 'ⷖ'), - ('ⷘ', 'ⷞ'), - ('ⸯ', 'ⸯ'), - ('々', '〇'), - ('〡', '〩'), - ('〱', '〵'), - ('〸', '〼'), - ('ぁ', 'ゖ'), - ('ゝ', 'ゟ'), - ('ァ', 'ヺ'), - ('ー', 'ヿ'), - ('ㄅ', 'ㄯ'), - ('ㄱ', 'ㆎ'), - ('ㆠ', 'ㆿ'), - ('ㇰ', 'ㇿ'), - ('㐀', '䶿'), - ('一', 'ꒌ'), - ('ꓐ', 'ꓽ'), - ('ꔀ', 'ꘌ'), - ('ꘐ', 'ꘟ'), - ('ꘪ', 'ꘫ'), - ('ꙮ', 'ꙮ'), - ('ꙿ', 'ꙿ'), - ('ꚠ', 'ꛯ'), - ('ꜗ', 'ꜟ'), - ('ꞈ', 'ꞈ'), - ('ꞏ', 'ꞏ'), - ('ꟷ', 'ꟷ'), - ('ꟻ', 'ꠁ'), - ('ꠃ', 'ꠅ'), - ('ꠇ', 'ꠊ'), - ('ꠌ', 'ꠢ'), - ('ꡀ', 'ꡳ'), - ('ꢂ', 'ꢳ'), - ('ꣲ', 'ꣷ'), - ('ꣻ', 'ꣻ'), - ('ꣽ', 'ꣾ'), - ('ꤊ', 'ꤥ'), - ('ꤰ', 'ꥆ'), - ('ꥠ', 'ꥼ'), - ('ꦄ', 'ꦲ'), - ('ꧏ', 'ꧏ'), - ('ꧠ', 'ꧤ'), - ('ꧦ', 'ꧯ'), - ('ꧺ', 'ꧾ'), - ('ꨀ', 'ꨨ'), - ('ꩀ', 'ꩂ'), - ('ꩄ', 'ꩋ'), - ('ꩠ', 'ꩶ'), - ('ꩺ', 'ꩺ'), - ('ꩾ', 'ꪯ'), - ('ꪱ', 'ꪱ'), - ('ꪵ', 'ꪶ'), - ('ꪹ', 'ꪽ'), - ('ꫀ', 'ꫀ'), - ('ꫂ', 'ꫂ'), - ('ꫛ', 'ꫝ'), - ('ꫠ', 'ꫪ'), - ('ꫲ', 'ꫴ'), - ('ꬁ', 'ꬆ'), - ('ꬉ', 'ꬎ'), - ('ꬑ', 'ꬖ'), - ('ꬠ', 'ꬦ'), - ('ꬨ', 'ꬮ'), - ('ꯀ', 'ꯢ'), - ('가', '힣'), - ('ힰ', 'ퟆ'), - ('ퟋ', 'ퟻ'), - ('豈', '舘'), - ('並', '龎'), - ('יִ', 'יִ'), - ('ײַ', 'ﬨ'), - ('שׁ', 'זּ'), - ('טּ', 'לּ'), - ('מּ', 'מּ'), - ('נּ', 'סּ'), - ('ףּ', 'פּ'), - ('צּ', 'ﮱ'), - ('ﯓ', 'ﴽ'), - ('ﵐ', 'ﶏ'), - ('ﶒ', 'ﷇ'), - ('ﷰ', 'ﷻ'), - ('ﹰ', 'ﹴ'), - ('ﹶ', 'ﻼ'), - ('ヲ', 'ン'), - ('ᅠ', 'ᄒ'), - ('ᅡ', 'ᅦ'), - ('ᅧ', 'ᅬ'), - ('ᅭ', 'ᅲ'), - ('ᅳ', 'ᅵ'), - ('𐀀', '𐀋'), - ('𐀍', '𐀦'), - ('𐀨', '𐀺'), - ('𐀼', '𐀽'), - ('𐀿', '𐁍'), - ('𐁐', '𐁝'), - ('𐂀', '𐃺'), - ('𐅀', '𐅴'), - ('𐊀', '𐊜'), - ('𐊠', '𐋐'), - ('𐌀', '𐌟'), - ('𐌭', '𐍊'), - ('𐍐', '𐍵'), - ('𐎀', '𐎝'), - ('𐎠', '𐏃'), - ('𐏈', '𐏏'), - ('𐏑', '𐏕'), - ('𐑐', '𐒝'), - ('𐔀', '𐔧'), - ('𐔰', '𐕣'), - ('𐗀', '𐗳'), - ('𐘀', '𐜶'), - ('𐝀', '𐝕'), - ('𐝠', '𐝧'), - ('𐞁', '𐞂'), - ('𐠀', '𐠅'), - ('𐠈', '𐠈'), - ('𐠊', '𐠵'), - ('𐠷', '𐠸'), - ('𐠼', '𐠼'), - ('𐠿', '𐡕'), - ('𐡠', '𐡶'), - ('𐢀', '𐢞'), - ('𐣠', '𐣲'), - ('𐣴', '𐣵'), - ('𐤀', '𐤕'), - ('𐤠', '𐤹'), - ('𐦀', '𐦷'), - ('𐦾', '𐦿'), - ('𐨀', '𐨀'), - ('𐨐', '𐨓'), - ('𐨕', '𐨗'), - ('𐨙', '𐨵'), - ('𐩠', '𐩼'), - ('𐪀', '𐪜'), - ('𐫀', '𐫇'), - ('𐫉', '𐫤'), - ('𐬀', '𐬵'), - ('𐭀', '𐭕'), - ('𐭠', '𐭲'), - ('𐮀', '𐮑'), - ('𐰀', '𐱈'), - ('𐴀', '𐴣'), - ('𐵊', '𐵏'), - ('𐵯', '𐵯'), - ('𐺀', '𐺩'), - ('𐺰', '𐺱'), - ('𐻂', '𐻄'), - ('𐼀', '𐼜'), - ('𐼧', '𐼧'), - ('𐼰', '𐽅'), - ('𐽰', '𐾁'), - ('𐾰', '𐿄'), - ('𐿠', '𐿶'), - ('𑀃', '𑀷'), - ('𑁱', '𑁲'), - ('𑁵', '𑁵'), - ('𑂃', '𑂯'), - ('𑃐', '𑃨'), - ('𑄃', '𑄦'), - ('𑅄', '𑅄'), - ('𑅇', '𑅇'), - ('𑅐', '𑅲'), - ('𑅶', '𑅶'), - ('𑆃', '𑆲'), - ('𑇁', '𑇄'), - ('𑇚', '𑇚'), - ('𑇜', '𑇜'), - ('𑈀', '𑈑'), - ('𑈓', '𑈫'), - ('𑈿', '𑉀'), - ('𑊀', '𑊆'), - ('𑊈', '𑊈'), - ('𑊊', '𑊍'), - ('𑊏', '𑊝'), - ('𑊟', '𑊨'), - ('𑊰', '𑋞'), - ('𑌅', '𑌌'), - ('𑌏', '𑌐'), - ('𑌓', '𑌨'), - ('𑌪', '𑌰'), - ('𑌲', '𑌳'), - ('𑌵', '𑌹'), - ('𑌽', '𑌽'), - ('𑍐', '𑍐'), - ('𑍝', '𑍡'), - ('𑎀', '𑎉'), - ('𑎋', '𑎋'), - ('𑎎', '𑎎'), - ('𑎐', '𑎵'), - ('𑎷', '𑎷'), - ('𑏑', '𑏑'), - ('𑏓', '𑏓'), - ('𑐀', '𑐴'), - ('𑑇', '𑑊'), - ('𑑟', '𑑡'), - ('𑒀', '𑒯'), - ('𑓄', '𑓅'), - ('𑓇', '𑓇'), - ('𑖀', '𑖮'), - ('𑗘', '𑗛'), - ('𑘀', '𑘯'), - ('𑙄', '𑙄'), - ('𑚀', '𑚪'), - ('𑚸', '𑚸'), - ('𑜀', '𑜚'), - ('𑝀', '𑝆'), - ('𑠀', '𑠫'), - ('𑣿', '𑤆'), - ('𑤉', '𑤉'), - ('𑤌', '𑤓'), - ('𑤕', '𑤖'), - ('𑤘', '𑤯'), - ('𑤿', '𑤿'), - ('𑥁', '𑥁'), - ('𑦠', '𑦧'), - ('𑦪', '𑧐'), - ('𑧡', '𑧡'), - ('𑧣', '𑧣'), - ('𑨀', '𑨀'), - ('𑨋', '𑨲'), - ('𑨺', '𑨺'), - ('𑩐', '𑩐'), - ('𑩜', '𑪉'), - ('𑪝', '𑪝'), - ('𑪰', '𑫸'), - ('𑯀', '𑯠'), - ('𑰀', '𑰈'), - ('𑰊', '𑰮'), - ('𑱀', '𑱀'), - ('𑱲', '𑲏'), - ('𑴀', '𑴆'), - ('𑴈', '𑴉'), - ('𑴋', '𑴰'), - ('𑵆', '𑵆'), - ('𑵠', '𑵥'), - ('𑵧', '𑵨'), - ('𑵪', '𑶉'), - ('𑶘', '𑶘'), - ('𑻠', '𑻲'), - ('𑼂', '𑼂'), - ('𑼄', '𑼐'), - ('𑼒', '𑼳'), - ('𑾰', '𑾰'), - ('𒀀', '𒎙'), - ('𒐀', '𒑮'), - ('𒒀', '𒕃'), - ('𒾐', '𒿰'), - ('𓀀', '𓐯'), - ('𓑁', '𓑆'), - ('𓑠', '𔏺'), - ('𔐀', '𔙆'), - ('𖄀', '𖄝'), - ('𖠀', '𖨸'), - ('𖩀', '𖩞'), - ('𖩰', '𖪾'), - ('𖫐', '𖫭'), - ('𖬀', '𖬯'), - ('𖭀', '𖭃'), - ('𖭣', '𖭷'), - ('𖭽', '𖮏'), - ('𖵀', '𖵬'), - ('𖼀', '𖽊'), - ('𖽐', '𖽐'), - ('𖾓', '𖾟'), - ('𖿠', '𖿡'), - ('𖿣', '𖿣'), - ('𗀀', '𘟷'), - ('𘠀', '𘳕'), - ('𘳿', '𘴈'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('𛀀', '𛄢'), - ('𛄲', '𛄲'), - ('𛅐', '𛅒'), - ('𛅕', '𛅕'), - ('𛅤', '𛅧'), - ('𛅰', '𛋻'), - ('𛰀', '𛱪'), - ('𛱰', '𛱼'), - ('𛲀', '𛲈'), - ('𛲐', '𛲙'), - ('𝼊', '𝼊'), - ('𞄀', '𞄬'), - ('𞄷', '𞄽'), - ('𞅎', '𞅎'), - ('𞊐', '𞊭'), - ('𞋀', '𞋫'), - ('𞓐', '𞓫'), - ('𞗐', '𞗭'), - ('𞗰', '𞗰'), - ('𞟠', '𞟦'), - ('𞟨', '𞟫'), - ('𞟭', '𞟮'), - ('𞟰', '𞟾'), - ('𞠀', '𞣄'), - ('𞥋', '𞥋'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('𠀀', '𪛟'), - ('𪜀', '𫜹'), - ('𫝀', '𫠝'), - ('𫠠', '𬺡'), - ('𬺰', '𮯠'), - ('𮯰', '𮹝'), - ('丽', '𪘀'), - ('𰀀', '𱍊'), - ('𱍐', '𲎯'), -]; - -pub const SCONTINUE: &'static [(char, char)] = &[ - (',', '-'), - (':', ';'), - (';', ';'), - ('՝', '՝'), - ('،', '؍'), - ('߸', '߸'), - ('᠂', '᠂'), - ('᠈', '᠈'), - ('–', '—'), - ('、', '、'), - ('︐', '︑'), - ('︓', '︔'), - ('︱', '︲'), - ('﹐', '﹑'), - ('﹔', '﹕'), - ('﹘', '﹘'), - ('﹣', '﹣'), - (',', '-'), - (':', ';'), - ('、', '、'), -]; - -pub const STERM: &'static [(char, char)] = &[ - ('!', '!'), - ('?', '?'), - ('։', '։'), - ('؝', '؟'), - ('۔', '۔'), - ('܀', '܂'), - ('߹', '߹'), - ('࠷', '࠷'), - ('࠹', '࠹'), - ('࠽', '࠾'), - ('।', '॥'), - ('၊', '။'), - ('።', '።'), - ('፧', '፨'), - ('᙮', '᙮'), - ('᜵', '᜶'), - ('។', '៕'), - ('᠃', '᠃'), - ('᠉', '᠉'), - ('᥄', '᥅'), - ('᪨', '᪫'), - ('᭎', '᭏'), - ('᭚', '᭛'), - ('᭞', '᭟'), - ('᭽', '᭿'), - ('᰻', '᰼'), - ('᱾', '᱿'), - ('‼', '‽'), - ('⁇', '⁉'), - ('⳹', '⳻'), - ('⸮', '⸮'), - ('⸼', '⸼'), - ('⹓', '⹔'), - ('。', '。'), - ('꓿', '꓿'), - ('꘎', '꘏'), - ('꛳', '꛳'), - ('꛷', '꛷'), - ('꡶', '꡷'), - ('꣎', '꣏'), - ('꤯', '꤯'), - ('꧈', '꧉'), - ('꩝', '꩟'), - ('꫰', '꫱'), - ('꯫', '꯫'), - ('︒', '︒'), - ('︕', '︖'), - ('﹖', '﹗'), - ('!', '!'), - ('?', '?'), - ('。', '。'), - ('𐩖', '𐩗'), - ('𐽕', '𐽙'), - ('𐾆', '𐾉'), - ('𑁇', '𑁈'), - ('𑂾', '𑃁'), - ('𑅁', '𑅃'), - ('𑇅', '𑇆'), - ('𑇍', '𑇍'), - ('𑇞', '𑇟'), - ('𑈸', '𑈹'), - ('𑈻', '𑈼'), - ('𑊩', '𑊩'), - ('𑏔', '𑏕'), - ('𑑋', '𑑌'), - ('𑗂', '𑗃'), - ('𑗉', '𑗗'), - ('𑙁', '𑙂'), - ('𑜼', '𑜾'), - ('𑥄', '𑥄'), - ('𑥆', '𑥆'), - ('𑩂', '𑩃'), - ('𑪛', '𑪜'), - ('𑱁', '𑱂'), - ('𑻷', '𑻸'), - ('𑽃', '𑽄'), - ('𖩮', '𖩯'), - ('𖫵', '𖫵'), - ('𖬷', '𖬸'), - ('𖭄', '𖭄'), - ('𖵮', '𖵯'), - ('𖺘', '𖺘'), - ('𛲟', '𛲟'), - ('𝪈', '𝪈'), -]; - -pub const SEP: &'static [(char, char)] = - &[('\u{85}', '\u{85}'), ('\u{2028}', '\u{2029}')]; - -pub const SP: &'static [(char, char)] = &[ - ('\t', '\t'), - ('\u{b}', '\u{c}'), - (' ', ' '), - ('\u{a0}', '\u{a0}'), - ('\u{1680}', '\u{1680}'), - ('\u{2000}', '\u{200a}'), - ('\u{202f}', '\u{202f}'), - ('\u{205f}', '\u{205f}'), - ('\u{3000}', '\u{3000}'), -]; - -pub const UPPER: &'static [(char, char)] = &[ - ('A', 'Z'), - ('À', 'Ö'), - ('Ø', 'Þ'), - ('Ā', 'Ā'), - ('Ă', 'Ă'), - ('Ą', 'Ą'), - ('Ć', 'Ć'), - ('Ĉ', 'Ĉ'), - ('Ċ', 'Ċ'), - ('Č', 'Č'), - ('Ď', 'Ď'), - ('Đ', 'Đ'), - ('Ē', 'Ē'), - ('Ĕ', 'Ĕ'), - ('Ė', 'Ė'), - ('Ę', 'Ę'), - ('Ě', 'Ě'), - ('Ĝ', 'Ĝ'), - ('Ğ', 'Ğ'), - ('Ġ', 'Ġ'), - ('Ģ', 'Ģ'), - ('Ĥ', 'Ĥ'), - ('Ħ', 'Ħ'), - ('Ĩ', 'Ĩ'), - ('Ī', 'Ī'), - ('Ĭ', 'Ĭ'), - ('Į', 'Į'), - ('İ', 'İ'), - ('IJ', 'IJ'), - ('Ĵ', 'Ĵ'), - ('Ķ', 'Ķ'), - ('Ĺ', 'Ĺ'), - ('Ļ', 'Ļ'), - ('Ľ', 'Ľ'), - ('Ŀ', 'Ŀ'), - ('Ł', 'Ł'), - ('Ń', 'Ń'), - ('Ņ', 'Ņ'), - ('Ň', 'Ň'), - ('Ŋ', 'Ŋ'), - ('Ō', 'Ō'), - ('Ŏ', 'Ŏ'), - ('Ő', 'Ő'), - ('Œ', 'Œ'), - ('Ŕ', 'Ŕ'), - ('Ŗ', 'Ŗ'), - ('Ř', 'Ř'), - ('Ś', 'Ś'), - ('Ŝ', 'Ŝ'), - ('Ş', 'Ş'), - ('Š', 'Š'), - ('Ţ', 'Ţ'), - ('Ť', 'Ť'), - ('Ŧ', 'Ŧ'), - ('Ũ', 'Ũ'), - ('Ū', 'Ū'), - ('Ŭ', 'Ŭ'), - ('Ů', 'Ů'), - ('Ű', 'Ű'), - ('Ų', 'Ų'), - ('Ŵ', 'Ŵ'), - ('Ŷ', 'Ŷ'), - ('Ÿ', 'Ź'), - ('Ż', 'Ż'), - ('Ž', 'Ž'), - ('Ɓ', 'Ƃ'), - ('Ƅ', 'Ƅ'), - ('Ɔ', 'Ƈ'), - ('Ɖ', 'Ƌ'), - ('Ǝ', 'Ƒ'), - ('Ɠ', 'Ɣ'), - ('Ɩ', 'Ƙ'), - ('Ɯ', 'Ɲ'), - ('Ɵ', 'Ơ'), - ('Ƣ', 'Ƣ'), - ('Ƥ', 'Ƥ'), - ('Ʀ', 'Ƨ'), - ('Ʃ', 'Ʃ'), - ('Ƭ', 'Ƭ'), - ('Ʈ', 'Ư'), - ('Ʊ', 'Ƴ'), - ('Ƶ', 'Ƶ'), - ('Ʒ', 'Ƹ'), - ('Ƽ', 'Ƽ'), - ('DŽ', 'Dž'), - ('LJ', 'Lj'), - ('NJ', 'Nj'), - ('Ǎ', 'Ǎ'), - ('Ǐ', 'Ǐ'), - ('Ǒ', 'Ǒ'), - ('Ǔ', 'Ǔ'), - ('Ǖ', 'Ǖ'), - ('Ǘ', 'Ǘ'), - ('Ǚ', 'Ǚ'), - ('Ǜ', 'Ǜ'), - ('Ǟ', 'Ǟ'), - ('Ǡ', 'Ǡ'), - ('Ǣ', 'Ǣ'), - ('Ǥ', 'Ǥ'), - ('Ǧ', 'Ǧ'), - ('Ǩ', 'Ǩ'), - ('Ǫ', 'Ǫ'), - ('Ǭ', 'Ǭ'), - ('Ǯ', 'Ǯ'), - ('DZ', 'Dz'), - ('Ǵ', 'Ǵ'), - ('Ƕ', 'Ǹ'), - ('Ǻ', 'Ǻ'), - ('Ǽ', 'Ǽ'), - ('Ǿ', 'Ǿ'), - ('Ȁ', 'Ȁ'), - ('Ȃ', 'Ȃ'), - ('Ȅ', 'Ȅ'), - ('Ȇ', 'Ȇ'), - ('Ȉ', 'Ȉ'), - ('Ȋ', 'Ȋ'), - ('Ȍ', 'Ȍ'), - ('Ȏ', 'Ȏ'), - ('Ȑ', 'Ȑ'), - ('Ȓ', 'Ȓ'), - ('Ȕ', 'Ȕ'), - ('Ȗ', 'Ȗ'), - ('Ș', 'Ș'), - ('Ț', 'Ț'), - ('Ȝ', 'Ȝ'), - ('Ȟ', 'Ȟ'), - ('Ƞ', 'Ƞ'), - ('Ȣ', 'Ȣ'), - ('Ȥ', 'Ȥ'), - ('Ȧ', 'Ȧ'), - ('Ȩ', 'Ȩ'), - ('Ȫ', 'Ȫ'), - ('Ȭ', 'Ȭ'), - ('Ȯ', 'Ȯ'), - ('Ȱ', 'Ȱ'), - ('Ȳ', 'Ȳ'), - ('Ⱥ', 'Ȼ'), - ('Ƚ', 'Ⱦ'), - ('Ɂ', 'Ɂ'), - ('Ƀ', 'Ɇ'), - ('Ɉ', 'Ɉ'), - ('Ɋ', 'Ɋ'), - ('Ɍ', 'Ɍ'), - ('Ɏ', 'Ɏ'), - ('Ͱ', 'Ͱ'), - ('Ͳ', 'Ͳ'), - ('Ͷ', 'Ͷ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ώ'), - ('Α', 'Ρ'), - ('Σ', 'Ϋ'), - ('Ϗ', 'Ϗ'), - ('ϒ', 'ϔ'), - ('Ϙ', 'Ϙ'), - ('Ϛ', 'Ϛ'), - ('Ϝ', 'Ϝ'), - ('Ϟ', 'Ϟ'), - ('Ϡ', 'Ϡ'), - ('Ϣ', 'Ϣ'), - ('Ϥ', 'Ϥ'), - ('Ϧ', 'Ϧ'), - ('Ϩ', 'Ϩ'), - ('Ϫ', 'Ϫ'), - ('Ϭ', 'Ϭ'), - ('Ϯ', 'Ϯ'), - ('ϴ', 'ϴ'), - ('Ϸ', 'Ϸ'), - ('Ϲ', 'Ϻ'), - ('Ͻ', 'Я'), - ('Ѡ', 'Ѡ'), - ('Ѣ', 'Ѣ'), - ('Ѥ', 'Ѥ'), - ('Ѧ', 'Ѧ'), - ('Ѩ', 'Ѩ'), - ('Ѫ', 'Ѫ'), - ('Ѭ', 'Ѭ'), - ('Ѯ', 'Ѯ'), - ('Ѱ', 'Ѱ'), - ('Ѳ', 'Ѳ'), - ('Ѵ', 'Ѵ'), - ('Ѷ', 'Ѷ'), - ('Ѹ', 'Ѹ'), - ('Ѻ', 'Ѻ'), - ('Ѽ', 'Ѽ'), - ('Ѿ', 'Ѿ'), - ('Ҁ', 'Ҁ'), - ('Ҋ', 'Ҋ'), - ('Ҍ', 'Ҍ'), - ('Ҏ', 'Ҏ'), - ('Ґ', 'Ґ'), - ('Ғ', 'Ғ'), - ('Ҕ', 'Ҕ'), - ('Җ', 'Җ'), - ('Ҙ', 'Ҙ'), - ('Қ', 'Қ'), - ('Ҝ', 'Ҝ'), - ('Ҟ', 'Ҟ'), - ('Ҡ', 'Ҡ'), - ('Ң', 'Ң'), - ('Ҥ', 'Ҥ'), - ('Ҧ', 'Ҧ'), - ('Ҩ', 'Ҩ'), - ('Ҫ', 'Ҫ'), - ('Ҭ', 'Ҭ'), - ('Ү', 'Ү'), - ('Ұ', 'Ұ'), - ('Ҳ', 'Ҳ'), - ('Ҵ', 'Ҵ'), - ('Ҷ', 'Ҷ'), - ('Ҹ', 'Ҹ'), - ('Һ', 'Һ'), - ('Ҽ', 'Ҽ'), - ('Ҿ', 'Ҿ'), - ('Ӏ', 'Ӂ'), - ('Ӄ', 'Ӄ'), - ('Ӆ', 'Ӆ'), - ('Ӈ', 'Ӈ'), - ('Ӊ', 'Ӊ'), - ('Ӌ', 'Ӌ'), - ('Ӎ', 'Ӎ'), - ('Ӑ', 'Ӑ'), - ('Ӓ', 'Ӓ'), - ('Ӕ', 'Ӕ'), - ('Ӗ', 'Ӗ'), - ('Ә', 'Ә'), - ('Ӛ', 'Ӛ'), - ('Ӝ', 'Ӝ'), - ('Ӟ', 'Ӟ'), - ('Ӡ', 'Ӡ'), - ('Ӣ', 'Ӣ'), - ('Ӥ', 'Ӥ'), - ('Ӧ', 'Ӧ'), - ('Ө', 'Ө'), - ('Ӫ', 'Ӫ'), - ('Ӭ', 'Ӭ'), - ('Ӯ', 'Ӯ'), - ('Ӱ', 'Ӱ'), - ('Ӳ', 'Ӳ'), - ('Ӵ', 'Ӵ'), - ('Ӷ', 'Ӷ'), - ('Ӹ', 'Ӹ'), - ('Ӻ', 'Ӻ'), - ('Ӽ', 'Ӽ'), - ('Ӿ', 'Ӿ'), - ('Ԁ', 'Ԁ'), - ('Ԃ', 'Ԃ'), - ('Ԅ', 'Ԅ'), - ('Ԇ', 'Ԇ'), - ('Ԉ', 'Ԉ'), - ('Ԋ', 'Ԋ'), - ('Ԍ', 'Ԍ'), - ('Ԏ', 'Ԏ'), - ('Ԑ', 'Ԑ'), - ('Ԓ', 'Ԓ'), - ('Ԕ', 'Ԕ'), - ('Ԗ', 'Ԗ'), - ('Ԙ', 'Ԙ'), - ('Ԛ', 'Ԛ'), - ('Ԝ', 'Ԝ'), - ('Ԟ', 'Ԟ'), - ('Ԡ', 'Ԡ'), - ('Ԣ', 'Ԣ'), - ('Ԥ', 'Ԥ'), - ('Ԧ', 'Ԧ'), - ('Ԩ', 'Ԩ'), - ('Ԫ', 'Ԫ'), - ('Ԭ', 'Ԭ'), - ('Ԯ', 'Ԯ'), - ('Ա', 'Ֆ'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('Ꭰ', 'Ᏽ'), - ('Ᲊ', 'Ᲊ'), - ('Ḁ', 'Ḁ'), - ('Ḃ', 'Ḃ'), - ('Ḅ', 'Ḅ'), - ('Ḇ', 'Ḇ'), - ('Ḉ', 'Ḉ'), - ('Ḋ', 'Ḋ'), - ('Ḍ', 'Ḍ'), - ('Ḏ', 'Ḏ'), - ('Ḑ', 'Ḑ'), - ('Ḓ', 'Ḓ'), - ('Ḕ', 'Ḕ'), - ('Ḗ', 'Ḗ'), - ('Ḙ', 'Ḙ'), - ('Ḛ', 'Ḛ'), - ('Ḝ', 'Ḝ'), - ('Ḟ', 'Ḟ'), - ('Ḡ', 'Ḡ'), - ('Ḣ', 'Ḣ'), - ('Ḥ', 'Ḥ'), - ('Ḧ', 'Ḧ'), - ('Ḩ', 'Ḩ'), - ('Ḫ', 'Ḫ'), - ('Ḭ', 'Ḭ'), - ('Ḯ', 'Ḯ'), - ('Ḱ', 'Ḱ'), - ('Ḳ', 'Ḳ'), - ('Ḵ', 'Ḵ'), - ('Ḷ', 'Ḷ'), - ('Ḹ', 'Ḹ'), - ('Ḻ', 'Ḻ'), - ('Ḽ', 'Ḽ'), - ('Ḿ', 'Ḿ'), - ('Ṁ', 'Ṁ'), - ('Ṃ', 'Ṃ'), - ('Ṅ', 'Ṅ'), - ('Ṇ', 'Ṇ'), - ('Ṉ', 'Ṉ'), - ('Ṋ', 'Ṋ'), - ('Ṍ', 'Ṍ'), - ('Ṏ', 'Ṏ'), - ('Ṑ', 'Ṑ'), - ('Ṓ', 'Ṓ'), - ('Ṕ', 'Ṕ'), - ('Ṗ', 'Ṗ'), - ('Ṙ', 'Ṙ'), - ('Ṛ', 'Ṛ'), - ('Ṝ', 'Ṝ'), - ('Ṟ', 'Ṟ'), - ('Ṡ', 'Ṡ'), - ('Ṣ', 'Ṣ'), - ('Ṥ', 'Ṥ'), - ('Ṧ', 'Ṧ'), - ('Ṩ', 'Ṩ'), - ('Ṫ', 'Ṫ'), - ('Ṭ', 'Ṭ'), - ('Ṯ', 'Ṯ'), - ('Ṱ', 'Ṱ'), - ('Ṳ', 'Ṳ'), - ('Ṵ', 'Ṵ'), - ('Ṷ', 'Ṷ'), - ('Ṹ', 'Ṹ'), - ('Ṻ', 'Ṻ'), - ('Ṽ', 'Ṽ'), - ('Ṿ', 'Ṿ'), - ('Ẁ', 'Ẁ'), - ('Ẃ', 'Ẃ'), - ('Ẅ', 'Ẅ'), - ('Ẇ', 'Ẇ'), - ('Ẉ', 'Ẉ'), - ('Ẋ', 'Ẋ'), - ('Ẍ', 'Ẍ'), - ('Ẏ', 'Ẏ'), - ('Ẑ', 'Ẑ'), - ('Ẓ', 'Ẓ'), - ('Ẕ', 'Ẕ'), - ('ẞ', 'ẞ'), - ('Ạ', 'Ạ'), - ('Ả', 'Ả'), - ('Ấ', 'Ấ'), - ('Ầ', 'Ầ'), - ('Ẩ', 'Ẩ'), - ('Ẫ', 'Ẫ'), - ('Ậ', 'Ậ'), - ('Ắ', 'Ắ'), - ('Ằ', 'Ằ'), - ('Ẳ', 'Ẳ'), - ('Ẵ', 'Ẵ'), - ('Ặ', 'Ặ'), - ('Ẹ', 'Ẹ'), - ('Ẻ', 'Ẻ'), - ('Ẽ', 'Ẽ'), - ('Ế', 'Ế'), - ('Ề', 'Ề'), - ('Ể', 'Ể'), - ('Ễ', 'Ễ'), - ('Ệ', 'Ệ'), - ('Ỉ', 'Ỉ'), - ('Ị', 'Ị'), - ('Ọ', 'Ọ'), - ('Ỏ', 'Ỏ'), - ('Ố', 'Ố'), - ('Ồ', 'Ồ'), - ('Ổ', 'Ổ'), - ('Ỗ', 'Ỗ'), - ('Ộ', 'Ộ'), - ('Ớ', 'Ớ'), - ('Ờ', 'Ờ'), - ('Ở', 'Ở'), - ('Ỡ', 'Ỡ'), - ('Ợ', 'Ợ'), - ('Ụ', 'Ụ'), - ('Ủ', 'Ủ'), - ('Ứ', 'Ứ'), - ('Ừ', 'Ừ'), - ('Ử', 'Ử'), - ('Ữ', 'Ữ'), - ('Ự', 'Ự'), - ('Ỳ', 'Ỳ'), - ('Ỵ', 'Ỵ'), - ('Ỷ', 'Ỷ'), - ('Ỹ', 'Ỹ'), - ('Ỻ', 'Ỻ'), - ('Ỽ', 'Ỽ'), - ('Ỿ', 'Ỿ'), - ('Ἀ', 'Ἇ'), - ('Ἐ', 'Ἕ'), - ('Ἠ', 'Ἧ'), - ('Ἰ', 'Ἷ'), - ('Ὀ', 'Ὅ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'Ὗ'), - ('Ὠ', 'Ὧ'), - ('ᾈ', 'ᾏ'), - ('ᾘ', 'ᾟ'), - ('ᾨ', 'ᾯ'), - ('Ᾰ', 'ᾼ'), - ('Ὲ', 'ῌ'), - ('Ῐ', 'Ί'), - ('Ῠ', 'Ῥ'), - ('Ὸ', 'ῼ'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℋ', 'ℍ'), - ('ℐ', 'ℒ'), - ('ℕ', 'ℕ'), - ('ℙ', 'ℝ'), - ('ℤ', 'ℤ'), - ('Ω', 'Ω'), - ('ℨ', 'ℨ'), - ('K', 'ℭ'), - ('ℰ', 'ℳ'), - ('ℾ', 'ℿ'), - ('ⅅ', 'ⅅ'), - ('Ⅰ', 'Ⅿ'), - ('Ↄ', 'Ↄ'), - ('Ⓐ', 'Ⓩ'), - ('Ⰰ', 'Ⱟ'), - ('Ⱡ', 'Ⱡ'), - ('Ɫ', 'Ɽ'), - ('Ⱨ', 'Ⱨ'), - ('Ⱪ', 'Ⱪ'), - ('Ⱬ', 'Ⱬ'), - ('Ɑ', 'Ɒ'), - ('Ⱳ', 'Ⱳ'), - ('Ⱶ', 'Ⱶ'), - ('Ȿ', 'Ⲁ'), - ('Ⲃ', 'Ⲃ'), - ('Ⲅ', 'Ⲅ'), - ('Ⲇ', 'Ⲇ'), - ('Ⲉ', 'Ⲉ'), - ('Ⲋ', 'Ⲋ'), - ('Ⲍ', 'Ⲍ'), - ('Ⲏ', 'Ⲏ'), - ('Ⲑ', 'Ⲑ'), - ('Ⲓ', 'Ⲓ'), - ('Ⲕ', 'Ⲕ'), - ('Ⲗ', 'Ⲗ'), - ('Ⲙ', 'Ⲙ'), - ('Ⲛ', 'Ⲛ'), - ('Ⲝ', 'Ⲝ'), - ('Ⲟ', 'Ⲟ'), - ('Ⲡ', 'Ⲡ'), - ('Ⲣ', 'Ⲣ'), - ('Ⲥ', 'Ⲥ'), - ('Ⲧ', 'Ⲧ'), - ('Ⲩ', 'Ⲩ'), - ('Ⲫ', 'Ⲫ'), - ('Ⲭ', 'Ⲭ'), - ('Ⲯ', 'Ⲯ'), - ('Ⲱ', 'Ⲱ'), - ('Ⲳ', 'Ⲳ'), - ('Ⲵ', 'Ⲵ'), - ('Ⲷ', 'Ⲷ'), - ('Ⲹ', 'Ⲹ'), - ('Ⲻ', 'Ⲻ'), - ('Ⲽ', 'Ⲽ'), - ('Ⲿ', 'Ⲿ'), - ('Ⳁ', 'Ⳁ'), - ('Ⳃ', 'Ⳃ'), - ('Ⳅ', 'Ⳅ'), - ('Ⳇ', 'Ⳇ'), - ('Ⳉ', 'Ⳉ'), - ('Ⳋ', 'Ⳋ'), - ('Ⳍ', 'Ⳍ'), - ('Ⳏ', 'Ⳏ'), - ('Ⳑ', 'Ⳑ'), - ('Ⳓ', 'Ⳓ'), - ('Ⳕ', 'Ⳕ'), - ('Ⳗ', 'Ⳗ'), - ('Ⳙ', 'Ⳙ'), - ('Ⳛ', 'Ⳛ'), - ('Ⳝ', 'Ⳝ'), - ('Ⳟ', 'Ⳟ'), - ('Ⳡ', 'Ⳡ'), - ('Ⳣ', 'Ⳣ'), - ('Ⳬ', 'Ⳬ'), - ('Ⳮ', 'Ⳮ'), - ('Ⳳ', 'Ⳳ'), - ('Ꙁ', 'Ꙁ'), - ('Ꙃ', 'Ꙃ'), - ('Ꙅ', 'Ꙅ'), - ('Ꙇ', 'Ꙇ'), - ('Ꙉ', 'Ꙉ'), - ('Ꙋ', 'Ꙋ'), - ('Ꙍ', 'Ꙍ'), - ('Ꙏ', 'Ꙏ'), - ('Ꙑ', 'Ꙑ'), - ('Ꙓ', 'Ꙓ'), - ('Ꙕ', 'Ꙕ'), - ('Ꙗ', 'Ꙗ'), - ('Ꙙ', 'Ꙙ'), - ('Ꙛ', 'Ꙛ'), - ('Ꙝ', 'Ꙝ'), - ('Ꙟ', 'Ꙟ'), - ('Ꙡ', 'Ꙡ'), - ('Ꙣ', 'Ꙣ'), - ('Ꙥ', 'Ꙥ'), - ('Ꙧ', 'Ꙧ'), - ('Ꙩ', 'Ꙩ'), - ('Ꙫ', 'Ꙫ'), - ('Ꙭ', 'Ꙭ'), - ('Ꚁ', 'Ꚁ'), - ('Ꚃ', 'Ꚃ'), - ('Ꚅ', 'Ꚅ'), - ('Ꚇ', 'Ꚇ'), - ('Ꚉ', 'Ꚉ'), - ('Ꚋ', 'Ꚋ'), - ('Ꚍ', 'Ꚍ'), - ('Ꚏ', 'Ꚏ'), - ('Ꚑ', 'Ꚑ'), - ('Ꚓ', 'Ꚓ'), - ('Ꚕ', 'Ꚕ'), - ('Ꚗ', 'Ꚗ'), - ('Ꚙ', 'Ꚙ'), - ('Ꚛ', 'Ꚛ'), - ('Ꜣ', 'Ꜣ'), - ('Ꜥ', 'Ꜥ'), - ('Ꜧ', 'Ꜧ'), - ('Ꜩ', 'Ꜩ'), - ('Ꜫ', 'Ꜫ'), - ('Ꜭ', 'Ꜭ'), - ('Ꜯ', 'Ꜯ'), - ('Ꜳ', 'Ꜳ'), - ('Ꜵ', 'Ꜵ'), - ('Ꜷ', 'Ꜷ'), - ('Ꜹ', 'Ꜹ'), - ('Ꜻ', 'Ꜻ'), - ('Ꜽ', 'Ꜽ'), - ('Ꜿ', 'Ꜿ'), - ('Ꝁ', 'Ꝁ'), - ('Ꝃ', 'Ꝃ'), - ('Ꝅ', 'Ꝅ'), - ('Ꝇ', 'Ꝇ'), - ('Ꝉ', 'Ꝉ'), - ('Ꝋ', 'Ꝋ'), - ('Ꝍ', 'Ꝍ'), - ('Ꝏ', 'Ꝏ'), - ('Ꝑ', 'Ꝑ'), - ('Ꝓ', 'Ꝓ'), - ('Ꝕ', 'Ꝕ'), - ('Ꝗ', 'Ꝗ'), - ('Ꝙ', 'Ꝙ'), - ('Ꝛ', 'Ꝛ'), - ('Ꝝ', 'Ꝝ'), - ('Ꝟ', 'Ꝟ'), - ('Ꝡ', 'Ꝡ'), - ('Ꝣ', 'Ꝣ'), - ('Ꝥ', 'Ꝥ'), - ('Ꝧ', 'Ꝧ'), - ('Ꝩ', 'Ꝩ'), - ('Ꝫ', 'Ꝫ'), - ('Ꝭ', 'Ꝭ'), - ('Ꝯ', 'Ꝯ'), - ('Ꝺ', 'Ꝺ'), - ('Ꝼ', 'Ꝼ'), - ('Ᵹ', 'Ꝿ'), - ('Ꞁ', 'Ꞁ'), - ('Ꞃ', 'Ꞃ'), - ('Ꞅ', 'Ꞅ'), - ('Ꞇ', 'Ꞇ'), - ('Ꞌ', 'Ꞌ'), - ('Ɥ', 'Ɥ'), - ('Ꞑ', 'Ꞑ'), - ('Ꞓ', 'Ꞓ'), - ('Ꞗ', 'Ꞗ'), - ('Ꞙ', 'Ꞙ'), - ('Ꞛ', 'Ꞛ'), - ('Ꞝ', 'Ꞝ'), - ('Ꞟ', 'Ꞟ'), - ('Ꞡ', 'Ꞡ'), - ('Ꞣ', 'Ꞣ'), - ('Ꞥ', 'Ꞥ'), - ('Ꞧ', 'Ꞧ'), - ('Ꞩ', 'Ꞩ'), - ('Ɦ', 'Ɪ'), - ('Ʞ', 'Ꞵ'), - ('Ꞷ', 'Ꞷ'), - ('Ꞹ', 'Ꞹ'), - ('Ꞻ', 'Ꞻ'), - ('Ꞽ', 'Ꞽ'), - ('Ꞿ', 'Ꞿ'), - ('Ꟁ', 'Ꟁ'), - ('Ꟃ', 'Ꟃ'), - ('Ꞔ', 'Ꟈ'), - ('Ꟊ', 'Ꟊ'), - ('Ɤ', 'Ꟍ'), - ('Ꟑ', 'Ꟑ'), - ('Ꟗ', 'Ꟗ'), - ('Ꟙ', 'Ꟙ'), - ('Ꟛ', 'Ꟛ'), - ('Ƛ', 'Ƛ'), - ('Ꟶ', 'Ꟶ'), - ('A', 'Z'), - ('𐐀', '𐐧'), - ('𐒰', '𐓓'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐲀', '𐲲'), - ('𐵐', '𐵥'), - ('𑢠', '𑢿'), - ('𖹀', '𖹟'), - ('𝐀', '𝐙'), - ('𝐴', '𝑍'), - ('𝑨', '𝒁'), - ('𝒜', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒵'), - ('𝓐', '𝓩'), - ('𝔄', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔸', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕬', '𝖅'), - ('𝖠', '𝖹'), - ('𝗔', '𝗭'), - ('𝘈', '𝘡'), - ('𝘼', '𝙕'), - ('𝙰', '𝚉'), - ('𝚨', '𝛀'), - ('𝛢', '𝛺'), - ('𝜜', '𝜴'), - ('𝝖', '𝝮'), - ('𝞐', '𝞨'), - ('𝟊', '𝟊'), - ('𞤀', '𞤡'), - ('🄰', '🅉'), - ('🅐', '🅩'), - ('🅰', '🆉'), -]; diff --git a/vendor/regex-syntax/src/unicode_tables/word_break.rs b/vendor/regex-syntax/src/unicode_tables/word_break.rs deleted file mode 100644 index b764d34ac72451..00000000000000 --- a/vendor/regex-syntax/src/unicode_tables/word_break.rs +++ /dev/null @@ -1,1152 +0,0 @@ -// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: -// -// ucd-generate word-break ucd-16.0.0 --chars -// -// Unicode version: 16.0.0. -// -// ucd-generate 0.3.1 is available on crates.io. - -pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ - ("ALetter", ALETTER), - ("CR", CR), - ("Double_Quote", DOUBLE_QUOTE), - ("Extend", EXTEND), - ("ExtendNumLet", EXTENDNUMLET), - ("Format", FORMAT), - ("Hebrew_Letter", HEBREW_LETTER), - ("Katakana", KATAKANA), - ("LF", LF), - ("MidLetter", MIDLETTER), - ("MidNum", MIDNUM), - ("MidNumLet", MIDNUMLET), - ("Newline", NEWLINE), - ("Numeric", NUMERIC), - ("Regional_Indicator", REGIONAL_INDICATOR), - ("Single_Quote", SINGLE_QUOTE), - ("WSegSpace", WSEGSPACE), - ("ZWJ", ZWJ), -]; - -pub const ALETTER: &'static [(char, char)] = &[ - ('A', 'Z'), - ('a', 'z'), - ('ª', 'ª'), - ('µ', 'µ'), - ('º', 'º'), - ('À', 'Ö'), - ('Ø', 'ö'), - ('ø', '˗'), - ('˞', '˿'), - ('Ͱ', 'ʹ'), - ('Ͷ', 'ͷ'), - ('ͺ', 'ͽ'), - ('Ϳ', 'Ϳ'), - ('Ά', 'Ά'), - ('Έ', 'Ί'), - ('Ό', 'Ό'), - ('Ύ', 'Ρ'), - ('Σ', 'ϵ'), - ('Ϸ', 'ҁ'), - ('Ҋ', 'ԯ'), - ('Ա', 'Ֆ'), - ('ՙ', '՜'), - ('՞', '՞'), - ('ՠ', 'ֈ'), - ('֊', '֊'), - ('׳', '׳'), - ('ؠ', 'ي'), - ('ٮ', 'ٯ'), - ('ٱ', 'ۓ'), - ('ە', 'ە'), - ('ۥ', 'ۦ'), - ('ۮ', 'ۯ'), - ('ۺ', 'ۼ'), - ('ۿ', 'ۿ'), - ('\u{70f}', 'ܐ'), - ('ܒ', 'ܯ'), - ('ݍ', 'ޥ'), - ('ޱ', 'ޱ'), - ('ߊ', 'ߪ'), - ('ߴ', 'ߵ'), - ('ߺ', 'ߺ'), - ('ࠀ', 'ࠕ'), - ('ࠚ', 'ࠚ'), - ('ࠤ', 'ࠤ'), - ('ࠨ', 'ࠨ'), - ('ࡀ', 'ࡘ'), - ('ࡠ', 'ࡪ'), - ('ࡰ', 'ࢇ'), - ('ࢉ', 'ࢎ'), - ('ࢠ', 'ࣉ'), - ('ऄ', 'ह'), - ('ऽ', 'ऽ'), - ('ॐ', 'ॐ'), - ('क़', 'ॡ'), - ('ॱ', 'ঀ'), - ('অ', 'ঌ'), - ('এ', 'ঐ'), - ('ও', 'ন'), - ('প', 'র'), - ('ল', 'ল'), - ('শ', 'হ'), - ('ঽ', 'ঽ'), - ('ৎ', 'ৎ'), - ('ড়', 'ঢ়'), - ('য়', 'ৡ'), - ('ৰ', 'ৱ'), - ('ৼ', 'ৼ'), - ('ਅ', 'ਊ'), - ('ਏ', 'ਐ'), - ('ਓ', 'ਨ'), - ('ਪ', 'ਰ'), - ('ਲ', 'ਲ਼'), - ('ਵ', 'ਸ਼'), - ('ਸ', 'ਹ'), - ('ਖ਼', 'ੜ'), - ('ਫ਼', 'ਫ਼'), - ('ੲ', 'ੴ'), - ('અ', 'ઍ'), - ('એ', 'ઑ'), - ('ઓ', 'ન'), - ('પ', 'ર'), - ('લ', 'ળ'), - ('વ', 'હ'), - ('ઽ', 'ઽ'), - ('ૐ', 'ૐ'), - ('ૠ', 'ૡ'), - ('ૹ', 'ૹ'), - ('ଅ', 'ଌ'), - ('ଏ', 'ଐ'), - ('ଓ', 'ନ'), - ('ପ', 'ର'), - ('ଲ', 'ଳ'), - ('ଵ', 'ହ'), - ('ଽ', 'ଽ'), - ('ଡ଼', 'ଢ଼'), - ('ୟ', 'ୡ'), - ('ୱ', 'ୱ'), - ('ஃ', 'ஃ'), - ('அ', 'ஊ'), - ('எ', 'ஐ'), - ('ஒ', 'க'), - ('ங', 'ச'), - ('ஜ', 'ஜ'), - ('ஞ', 'ட'), - ('ண', 'த'), - ('ந', 'ப'), - ('ம', 'ஹ'), - ('ௐ', 'ௐ'), - ('అ', 'ఌ'), - ('ఎ', 'ఐ'), - ('ఒ', 'న'), - ('ప', 'హ'), - ('ఽ', 'ఽ'), - ('ౘ', 'ౚ'), - ('ౝ', 'ౝ'), - ('ౠ', 'ౡ'), - ('ಀ', 'ಀ'), - ('ಅ', 'ಌ'), - ('ಎ', 'ಐ'), - ('ಒ', 'ನ'), - ('ಪ', 'ಳ'), - ('ವ', 'ಹ'), - ('ಽ', 'ಽ'), - ('ೝ', 'ೞ'), - ('ೠ', 'ೡ'), - ('ೱ', 'ೲ'), - ('ഄ', 'ഌ'), - ('എ', 'ഐ'), - ('ഒ', 'ഺ'), - ('ഽ', 'ഽ'), - ('ൎ', 'ൎ'), - ('ൔ', 'ൖ'), - ('ൟ', 'ൡ'), - ('ൺ', 'ൿ'), - ('අ', 'ඖ'), - ('ක', 'න'), - ('ඳ', 'ර'), - ('ල', 'ල'), - ('ව', 'ෆ'), - ('ༀ', 'ༀ'), - ('ཀ', 'ཇ'), - ('ཉ', 'ཬ'), - ('ྈ', 'ྌ'), - ('Ⴀ', 'Ⴥ'), - ('Ⴧ', 'Ⴧ'), - ('Ⴭ', 'Ⴭ'), - ('ა', 'ჺ'), - ('ჼ', 'ቈ'), - ('ቊ', 'ቍ'), - ('ቐ', 'ቖ'), - ('ቘ', 'ቘ'), - ('ቚ', 'ቝ'), - ('በ', 'ኈ'), - ('ኊ', 'ኍ'), - ('ነ', 'ኰ'), - ('ኲ', 'ኵ'), - ('ኸ', 'ኾ'), - ('ዀ', 'ዀ'), - ('ዂ', 'ዅ'), - ('ወ', 'ዖ'), - ('ዘ', 'ጐ'), - ('ጒ', 'ጕ'), - ('ጘ', 'ፚ'), - ('ᎀ', 'ᎏ'), - ('Ꭰ', 'Ᏽ'), - ('ᏸ', 'ᏽ'), - ('ᐁ', 'ᙬ'), - ('ᙯ', 'ᙿ'), - ('ᚁ', 'ᚚ'), - ('ᚠ', 'ᛪ'), - ('ᛮ', 'ᛸ'), - ('ᜀ', 'ᜑ'), - ('ᜟ', 'ᜱ'), - ('ᝀ', 'ᝑ'), - ('ᝠ', 'ᝬ'), - ('ᝮ', 'ᝰ'), - ('ᠠ', 'ᡸ'), - ('ᢀ', 'ᢄ'), - ('ᢇ', 'ᢨ'), - ('ᢪ', 'ᢪ'), - ('ᢰ', 'ᣵ'), - ('ᤀ', 'ᤞ'), - ('ᨀ', 'ᨖ'), - ('ᬅ', 'ᬳ'), - ('ᭅ', 'ᭌ'), - ('ᮃ', 'ᮠ'), - ('ᮮ', 'ᮯ'), - ('ᮺ', 'ᯥ'), - ('ᰀ', 'ᰣ'), - ('ᱍ', 'ᱏ'), - ('ᱚ', 'ᱽ'), - ('ᲀ', 'ᲊ'), - ('Ა', 'Ჺ'), - ('Ჽ', 'Ჿ'), - ('ᳩ', 'ᳬ'), - ('ᳮ', 'ᳳ'), - ('ᳵ', 'ᳶ'), - ('ᳺ', 'ᳺ'), - ('ᴀ', 'ᶿ'), - ('Ḁ', 'ἕ'), - ('Ἐ', 'Ἕ'), - ('ἠ', 'ὅ'), - ('Ὀ', 'Ὅ'), - ('ὐ', 'ὗ'), - ('Ὑ', 'Ὑ'), - ('Ὓ', 'Ὓ'), - ('Ὕ', 'Ὕ'), - ('Ὗ', 'ώ'), - ('ᾀ', 'ᾴ'), - ('ᾶ', 'ᾼ'), - ('ι', 'ι'), - ('ῂ', 'ῄ'), - ('ῆ', 'ῌ'), - ('ῐ', 'ΐ'), - ('ῖ', 'Ί'), - ('ῠ', 'Ῥ'), - ('ῲ', 'ῴ'), - ('ῶ', 'ῼ'), - ('ⁱ', 'ⁱ'), - ('ⁿ', 'ⁿ'), - ('ₐ', 'ₜ'), - ('ℂ', 'ℂ'), - ('ℇ', 'ℇ'), - ('ℊ', 'ℓ'), - ('ℕ', 'ℕ'), - ('ℙ', 'ℝ'), - ('ℤ', 'ℤ'), - ('Ω', 'Ω'), - ('ℨ', 'ℨ'), - ('K', 'ℭ'), - ('ℯ', 'ℹ'), - ('ℼ', 'ℿ'), - ('ⅅ', 'ⅉ'), - ('ⅎ', 'ⅎ'), - ('Ⅰ', 'ↈ'), - ('Ⓐ', 'ⓩ'), - ('Ⰰ', 'ⳤ'), - ('Ⳬ', 'ⳮ'), - ('Ⳳ', 'ⳳ'), - ('ⴀ', 'ⴥ'), - ('ⴧ', 'ⴧ'), - ('ⴭ', 'ⴭ'), - ('ⴰ', 'ⵧ'), - ('ⵯ', 'ⵯ'), - ('ⶀ', 'ⶖ'), - ('ⶠ', 'ⶦ'), - ('ⶨ', 'ⶮ'), - ('ⶰ', 'ⶶ'), - ('ⶸ', 'ⶾ'), - ('ⷀ', 'ⷆ'), - ('ⷈ', 'ⷎ'), - ('ⷐ', 'ⷖ'), - ('ⷘ', 'ⷞ'), - ('ⸯ', 'ⸯ'), - ('々', '々'), - ('〻', '〼'), - ('ㄅ', 'ㄯ'), - ('ㄱ', 'ㆎ'), - ('ㆠ', 'ㆿ'), - ('ꀀ', 'ꒌ'), - ('ꓐ', 'ꓽ'), - ('ꔀ', 'ꘌ'), - ('ꘐ', 'ꘟ'), - ('ꘪ', 'ꘫ'), - ('Ꙁ', 'ꙮ'), - ('ꙿ', 'ꚝ'), - ('ꚠ', 'ꛯ'), - ('꜈', 'ꟍ'), - ('Ꟑ', 'ꟑ'), - ('ꟓ', 'ꟓ'), - ('ꟕ', 'Ƛ'), - ('ꟲ', 'ꠁ'), - ('ꠃ', 'ꠅ'), - ('ꠇ', 'ꠊ'), - ('ꠌ', 'ꠢ'), - ('ꡀ', 'ꡳ'), - ('ꢂ', 'ꢳ'), - ('ꣲ', 'ꣷ'), - ('ꣻ', 'ꣻ'), - ('ꣽ', 'ꣾ'), - ('ꤊ', 'ꤥ'), - ('ꤰ', 'ꥆ'), - ('ꥠ', 'ꥼ'), - ('ꦄ', 'ꦲ'), - ('ꧏ', 'ꧏ'), - ('ꨀ', 'ꨨ'), - ('ꩀ', 'ꩂ'), - ('ꩄ', 'ꩋ'), - ('ꫠ', 'ꫪ'), - ('ꫲ', 'ꫴ'), - ('ꬁ', 'ꬆ'), - ('ꬉ', 'ꬎ'), - ('ꬑ', 'ꬖ'), - ('ꬠ', 'ꬦ'), - ('ꬨ', 'ꬮ'), - ('ꬰ', 'ꭩ'), - ('ꭰ', 'ꯢ'), - ('가', '힣'), - ('ힰ', 'ퟆ'), - ('ퟋ', 'ퟻ'), - ('ff', 'st'), - ('ﬓ', 'ﬗ'), - ('ﭐ', 'ﮱ'), - ('ﯓ', 'ﴽ'), - ('ﵐ', 'ﶏ'), - ('ﶒ', 'ﷇ'), - ('ﷰ', 'ﷻ'), - ('ﹰ', 'ﹴ'), - ('ﹶ', 'ﻼ'), - ('A', 'Z'), - ('a', 'z'), - ('ᅠ', 'ᄒ'), - ('ᅡ', 'ᅦ'), - ('ᅧ', 'ᅬ'), - ('ᅭ', 'ᅲ'), - ('ᅳ', 'ᅵ'), - ('𐀀', '𐀋'), - ('𐀍', '𐀦'), - ('𐀨', '𐀺'), - ('𐀼', '𐀽'), - ('𐀿', '𐁍'), - ('𐁐', '𐁝'), - ('𐂀', '𐃺'), - ('𐅀', '𐅴'), - ('𐊀', '𐊜'), - ('𐊠', '𐋐'), - ('𐌀', '𐌟'), - ('𐌭', '𐍊'), - ('𐍐', '𐍵'), - ('𐎀', '𐎝'), - ('𐎠', '𐏃'), - ('𐏈', '𐏏'), - ('𐏑', '𐏕'), - ('𐐀', '𐒝'), - ('𐒰', '𐓓'), - ('𐓘', '𐓻'), - ('𐔀', '𐔧'), - ('𐔰', '𐕣'), - ('𐕰', '𐕺'), - ('𐕼', '𐖊'), - ('𐖌', '𐖒'), - ('𐖔', '𐖕'), - ('𐖗', '𐖡'), - ('𐖣', '𐖱'), - ('𐖳', '𐖹'), - ('𐖻', '𐖼'), - ('𐗀', '𐗳'), - ('𐘀', '𐜶'), - ('𐝀', '𐝕'), - ('𐝠', '𐝧'), - ('𐞀', '𐞅'), - ('𐞇', '𐞰'), - ('𐞲', '𐞺'), - ('𐠀', '𐠅'), - ('𐠈', '𐠈'), - ('𐠊', '𐠵'), - ('𐠷', '𐠸'), - ('𐠼', '𐠼'), - ('𐠿', '𐡕'), - ('𐡠', '𐡶'), - ('𐢀', '𐢞'), - ('𐣠', '𐣲'), - ('𐣴', '𐣵'), - ('𐤀', '𐤕'), - ('𐤠', '𐤹'), - ('𐦀', '𐦷'), - ('𐦾', '𐦿'), - ('𐨀', '𐨀'), - ('𐨐', '𐨓'), - ('𐨕', '𐨗'), - ('𐨙', '𐨵'), - ('𐩠', '𐩼'), - ('𐪀', '𐪜'), - ('𐫀', '𐫇'), - ('𐫉', '𐫤'), - ('𐬀', '𐬵'), - ('𐭀', '𐭕'), - ('𐭠', '𐭲'), - ('𐮀', '𐮑'), - ('𐰀', '𐱈'), - ('𐲀', '𐲲'), - ('𐳀', '𐳲'), - ('𐴀', '𐴣'), - ('𐵊', '𐵥'), - ('𐵯', '𐶅'), - ('𐺀', '𐺩'), - ('𐺰', '𐺱'), - ('𐻂', '𐻄'), - ('𐼀', '𐼜'), - ('𐼧', '𐼧'), - ('𐼰', '𐽅'), - ('𐽰', '𐾁'), - ('𐾰', '𐿄'), - ('𐿠', '𐿶'), - ('𑀃', '𑀷'), - ('𑁱', '𑁲'), - ('𑁵', '𑁵'), - ('𑂃', '𑂯'), - ('𑃐', '𑃨'), - ('𑄃', '𑄦'), - ('𑅄', '𑅄'), - ('𑅇', '𑅇'), - ('𑅐', '𑅲'), - ('𑅶', '𑅶'), - ('𑆃', '𑆲'), - ('𑇁', '𑇄'), - ('𑇚', '𑇚'), - ('𑇜', '𑇜'), - ('𑈀', '𑈑'), - ('𑈓', '𑈫'), - ('𑈿', '𑉀'), - ('𑊀', '𑊆'), - ('𑊈', '𑊈'), - ('𑊊', '𑊍'), - ('𑊏', '𑊝'), - ('𑊟', '𑊨'), - ('𑊰', '𑋞'), - ('𑌅', '𑌌'), - ('𑌏', '𑌐'), - ('𑌓', '𑌨'), - ('𑌪', '𑌰'), - ('𑌲', '𑌳'), - ('𑌵', '𑌹'), - ('𑌽', '𑌽'), - ('𑍐', '𑍐'), - ('𑍝', '𑍡'), - ('𑎀', '𑎉'), - ('𑎋', '𑎋'), - ('𑎎', '𑎎'), - ('𑎐', '𑎵'), - ('𑎷', '𑎷'), - ('𑏑', '𑏑'), - ('𑏓', '𑏓'), - ('𑐀', '𑐴'), - ('𑑇', '𑑊'), - ('𑑟', '𑑡'), - ('𑒀', '𑒯'), - ('𑓄', '𑓅'), - ('𑓇', '𑓇'), - ('𑖀', '𑖮'), - ('𑗘', '𑗛'), - ('𑘀', '𑘯'), - ('𑙄', '𑙄'), - ('𑚀', '𑚪'), - ('𑚸', '𑚸'), - ('𑠀', '𑠫'), - ('𑢠', '𑣟'), - ('𑣿', '𑤆'), - ('𑤉', '𑤉'), - ('𑤌', '𑤓'), - ('𑤕', '𑤖'), - ('𑤘', '𑤯'), - ('𑤿', '𑤿'), - ('𑥁', '𑥁'), - ('𑦠', '𑦧'), - ('𑦪', '𑧐'), - ('𑧡', '𑧡'), - ('𑧣', '𑧣'), - ('𑨀', '𑨀'), - ('𑨋', '𑨲'), - ('𑨺', '𑨺'), - ('𑩐', '𑩐'), - ('𑩜', '𑪉'), - ('𑪝', '𑪝'), - ('𑪰', '𑫸'), - ('𑯀', '𑯠'), - ('𑰀', '𑰈'), - ('𑰊', '𑰮'), - ('𑱀', '𑱀'), - ('𑱲', '𑲏'), - ('𑴀', '𑴆'), - ('𑴈', '𑴉'), - ('𑴋', '𑴰'), - ('𑵆', '𑵆'), - ('𑵠', '𑵥'), - ('𑵧', '𑵨'), - ('𑵪', '𑶉'), - ('𑶘', '𑶘'), - ('𑻠', '𑻲'), - ('𑼂', '𑼂'), - ('𑼄', '𑼐'), - ('𑼒', '𑼳'), - ('𑾰', '𑾰'), - ('𒀀', '𒎙'), - ('𒐀', '𒑮'), - ('𒒀', '𒕃'), - ('𒾐', '𒿰'), - ('𓀀', '𓐯'), - ('𓑁', '𓑆'), - ('𓑠', '𔏺'), - ('𔐀', '𔙆'), - ('𖄀', '𖄝'), - ('𖠀', '𖨸'), - ('𖩀', '𖩞'), - ('𖩰', '𖪾'), - ('𖫐', '𖫭'), - ('𖬀', '𖬯'), - ('𖭀', '𖭃'), - ('𖭣', '𖭷'), - ('𖭽', '𖮏'), - ('𖵀', '𖵬'), - ('𖹀', '𖹿'), - ('𖼀', '𖽊'), - ('𖽐', '𖽐'), - ('𖾓', '𖾟'), - ('𖿠', '𖿡'), - ('𖿣', '𖿣'), - ('𛰀', '𛱪'), - ('𛱰', '𛱼'), - ('𛲀', '𛲈'), - ('𛲐', '𛲙'), - ('𝐀', '𝑔'), - ('𝑖', '𝒜'), - ('𝒞', '𝒟'), - ('𝒢', '𝒢'), - ('𝒥', '𝒦'), - ('𝒩', '𝒬'), - ('𝒮', '𝒹'), - ('𝒻', '𝒻'), - ('𝒽', '𝓃'), - ('𝓅', '𝔅'), - ('𝔇', '𝔊'), - ('𝔍', '𝔔'), - ('𝔖', '𝔜'), - ('𝔞', '𝔹'), - ('𝔻', '𝔾'), - ('𝕀', '𝕄'), - ('𝕆', '𝕆'), - ('𝕊', '𝕐'), - ('𝕒', '𝚥'), - ('𝚨', '𝛀'), - ('𝛂', '𝛚'), - ('𝛜', '𝛺'), - ('𝛼', '𝜔'), - ('𝜖', '𝜴'), - ('𝜶', '𝝎'), - ('𝝐', '𝝮'), - ('𝝰', '𝞈'), - ('𝞊', '𝞨'), - ('𝞪', '𝟂'), - ('𝟄', '𝟋'), - ('𝼀', '𝼞'), - ('𝼥', '𝼪'), - ('𞀰', '𞁭'), - ('𞄀', '𞄬'), - ('𞄷', '𞄽'), - ('𞅎', '𞅎'), - ('𞊐', '𞊭'), - ('𞋀', '𞋫'), - ('𞓐', '𞓫'), - ('𞗐', '𞗭'), - ('𞗰', '𞗰'), - ('𞟠', '𞟦'), - ('𞟨', '𞟫'), - ('𞟭', '𞟮'), - ('𞟰', '𞟾'), - ('𞠀', '𞣄'), - ('𞤀', '𞥃'), - ('𞥋', '𞥋'), - ('𞸀', '𞸃'), - ('𞸅', '𞸟'), - ('𞸡', '𞸢'), - ('𞸤', '𞸤'), - ('𞸧', '𞸧'), - ('𞸩', '𞸲'), - ('𞸴', '𞸷'), - ('𞸹', '𞸹'), - ('𞸻', '𞸻'), - ('𞹂', '𞹂'), - ('𞹇', '𞹇'), - ('𞹉', '𞹉'), - ('𞹋', '𞹋'), - ('𞹍', '𞹏'), - ('𞹑', '𞹒'), - ('𞹔', '𞹔'), - ('𞹗', '𞹗'), - ('𞹙', '𞹙'), - ('𞹛', '𞹛'), - ('𞹝', '𞹝'), - ('𞹟', '𞹟'), - ('𞹡', '𞹢'), - ('𞹤', '𞹤'), - ('𞹧', '𞹪'), - ('𞹬', '𞹲'), - ('𞹴', '𞹷'), - ('𞹹', '𞹼'), - ('𞹾', '𞹾'), - ('𞺀', '𞺉'), - ('𞺋', '𞺛'), - ('𞺡', '𞺣'), - ('𞺥', '𞺩'), - ('𞺫', '𞺻'), - ('🄰', '🅉'), - ('🅐', '🅩'), - ('🅰', '🆉'), -]; - -pub const CR: &'static [(char, char)] = &[('\r', '\r')]; - -pub const DOUBLE_QUOTE: &'static [(char, char)] = &[('"', '"')]; - -pub const EXTEND: &'static [(char, char)] = &[ - ('\u{300}', '\u{36f}'), - ('\u{483}', '\u{489}'), - ('\u{591}', '\u{5bd}'), - ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), - ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), - ('\u{610}', '\u{61a}'), - ('\u{64b}', '\u{65f}'), - ('\u{670}', '\u{670}'), - ('\u{6d6}', '\u{6dc}'), - ('\u{6df}', '\u{6e4}'), - ('\u{6e7}', '\u{6e8}'), - ('\u{6ea}', '\u{6ed}'), - ('\u{711}', '\u{711}'), - ('\u{730}', '\u{74a}'), - ('\u{7a6}', '\u{7b0}'), - ('\u{7eb}', '\u{7f3}'), - ('\u{7fd}', '\u{7fd}'), - ('\u{816}', '\u{819}'), - ('\u{81b}', '\u{823}'), - ('\u{825}', '\u{827}'), - ('\u{829}', '\u{82d}'), - ('\u{859}', '\u{85b}'), - ('\u{897}', '\u{89f}'), - ('\u{8ca}', '\u{8e1}'), - ('\u{8e3}', 'ः'), - ('\u{93a}', '\u{93c}'), - ('ा', 'ॏ'), - ('\u{951}', '\u{957}'), - ('\u{962}', '\u{963}'), - ('\u{981}', 'ঃ'), - ('\u{9bc}', '\u{9bc}'), - ('\u{9be}', '\u{9c4}'), - ('ে', 'ৈ'), - ('ো', '\u{9cd}'), - ('\u{9d7}', '\u{9d7}'), - ('\u{9e2}', '\u{9e3}'), - ('\u{9fe}', '\u{9fe}'), - ('\u{a01}', 'ਃ'), - ('\u{a3c}', '\u{a3c}'), - ('ਾ', '\u{a42}'), - ('\u{a47}', '\u{a48}'), - ('\u{a4b}', '\u{a4d}'), - ('\u{a51}', '\u{a51}'), - ('\u{a70}', '\u{a71}'), - ('\u{a75}', '\u{a75}'), - ('\u{a81}', 'ઃ'), - ('\u{abc}', '\u{abc}'), - ('ા', '\u{ac5}'), - ('\u{ac7}', 'ૉ'), - ('ો', '\u{acd}'), - ('\u{ae2}', '\u{ae3}'), - ('\u{afa}', '\u{aff}'), - ('\u{b01}', 'ଃ'), - ('\u{b3c}', '\u{b3c}'), - ('\u{b3e}', '\u{b44}'), - ('େ', 'ୈ'), - ('ୋ', '\u{b4d}'), - ('\u{b55}', '\u{b57}'), - ('\u{b62}', '\u{b63}'), - ('\u{b82}', '\u{b82}'), - ('\u{bbe}', 'ூ'), - ('ெ', 'ை'), - ('ொ', '\u{bcd}'), - ('\u{bd7}', '\u{bd7}'), - ('\u{c00}', '\u{c04}'), - ('\u{c3c}', '\u{c3c}'), - ('\u{c3e}', 'ౄ'), - ('\u{c46}', '\u{c48}'), - ('\u{c4a}', '\u{c4d}'), - ('\u{c55}', '\u{c56}'), - ('\u{c62}', '\u{c63}'), - ('\u{c81}', 'ಃ'), - ('\u{cbc}', '\u{cbc}'), - ('ಾ', 'ೄ'), - ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccd}'), - ('\u{cd5}', '\u{cd6}'), - ('\u{ce2}', '\u{ce3}'), - ('ೳ', 'ೳ'), - ('\u{d00}', 'ഃ'), - ('\u{d3b}', '\u{d3c}'), - ('\u{d3e}', '\u{d44}'), - ('െ', 'ൈ'), - ('ൊ', '\u{d4d}'), - ('\u{d57}', '\u{d57}'), - ('\u{d62}', '\u{d63}'), - ('\u{d81}', 'ඃ'), - ('\u{dca}', '\u{dca}'), - ('\u{dcf}', '\u{dd4}'), - ('\u{dd6}', '\u{dd6}'), - ('ෘ', '\u{ddf}'), - ('ෲ', 'ෳ'), - ('\u{e31}', '\u{e31}'), - ('\u{e34}', '\u{e3a}'), - ('\u{e47}', '\u{e4e}'), - ('\u{eb1}', '\u{eb1}'), - ('\u{eb4}', '\u{ebc}'), - ('\u{ec8}', '\u{ece}'), - ('\u{f18}', '\u{f19}'), - ('\u{f35}', '\u{f35}'), - ('\u{f37}', '\u{f37}'), - ('\u{f39}', '\u{f39}'), - ('༾', '༿'), - ('\u{f71}', '\u{f84}'), - ('\u{f86}', '\u{f87}'), - ('\u{f8d}', '\u{f97}'), - ('\u{f99}', '\u{fbc}'), - ('\u{fc6}', '\u{fc6}'), - ('ါ', '\u{103e}'), - ('ၖ', '\u{1059}'), - ('\u{105e}', '\u{1060}'), - ('ၢ', 'ၤ'), - ('ၧ', 'ၭ'), - ('\u{1071}', '\u{1074}'), - ('\u{1082}', '\u{108d}'), - ('ႏ', 'ႏ'), - ('ႚ', '\u{109d}'), - ('\u{135d}', '\u{135f}'), - ('\u{1712}', '\u{1715}'), - ('\u{1732}', '\u{1734}'), - ('\u{1752}', '\u{1753}'), - ('\u{1772}', '\u{1773}'), - ('\u{17b4}', '\u{17d3}'), - ('\u{17dd}', '\u{17dd}'), - ('\u{180b}', '\u{180d}'), - ('\u{180f}', '\u{180f}'), - ('\u{1885}', '\u{1886}'), - ('\u{18a9}', '\u{18a9}'), - ('\u{1920}', 'ᤫ'), - ('ᤰ', '\u{193b}'), - ('\u{1a17}', '\u{1a1b}'), - ('ᩕ', '\u{1a5e}'), - ('\u{1a60}', '\u{1a7c}'), - ('\u{1a7f}', '\u{1a7f}'), - ('\u{1ab0}', '\u{1ace}'), - ('\u{1b00}', 'ᬄ'), - ('\u{1b34}', '\u{1b44}'), - ('\u{1b6b}', '\u{1b73}'), - ('\u{1b80}', 'ᮂ'), - ('ᮡ', '\u{1bad}'), - ('\u{1be6}', '\u{1bf3}'), - ('ᰤ', '\u{1c37}'), - ('\u{1cd0}', '\u{1cd2}'), - ('\u{1cd4}', '\u{1ce8}'), - ('\u{1ced}', '\u{1ced}'), - ('\u{1cf4}', '\u{1cf4}'), - ('᳷', '\u{1cf9}'), - ('\u{1dc0}', '\u{1dff}'), - ('\u{200c}', '\u{200c}'), - ('\u{20d0}', '\u{20f0}'), - ('\u{2cef}', '\u{2cf1}'), - ('\u{2d7f}', '\u{2d7f}'), - ('\u{2de0}', '\u{2dff}'), - ('\u{302a}', '\u{302f}'), - ('\u{3099}', '\u{309a}'), - ('\u{a66f}', '\u{a672}'), - ('\u{a674}', '\u{a67d}'), - ('\u{a69e}', '\u{a69f}'), - ('\u{a6f0}', '\u{a6f1}'), - ('\u{a802}', '\u{a802}'), - ('\u{a806}', '\u{a806}'), - ('\u{a80b}', '\u{a80b}'), - ('ꠣ', 'ꠧ'), - ('\u{a82c}', '\u{a82c}'), - ('ꢀ', 'ꢁ'), - ('ꢴ', '\u{a8c5}'), - ('\u{a8e0}', '\u{a8f1}'), - ('\u{a8ff}', '\u{a8ff}'), - ('\u{a926}', '\u{a92d}'), - ('\u{a947}', '\u{a953}'), - ('\u{a980}', 'ꦃ'), - ('\u{a9b3}', '\u{a9c0}'), - ('\u{a9e5}', '\u{a9e5}'), - ('\u{aa29}', '\u{aa36}'), - ('\u{aa43}', '\u{aa43}'), - ('\u{aa4c}', 'ꩍ'), - ('ꩻ', 'ꩽ'), - ('\u{aab0}', '\u{aab0}'), - ('\u{aab2}', '\u{aab4}'), - ('\u{aab7}', '\u{aab8}'), - ('\u{aabe}', '\u{aabf}'), - ('\u{aac1}', '\u{aac1}'), - ('ꫫ', 'ꫯ'), - ('ꫵ', '\u{aaf6}'), - ('ꯣ', 'ꯪ'), - ('꯬', '\u{abed}'), - ('\u{fb1e}', '\u{fb1e}'), - ('\u{fe00}', '\u{fe0f}'), - ('\u{fe20}', '\u{fe2f}'), - ('\u{ff9e}', '\u{ff9f}'), - ('\u{101fd}', '\u{101fd}'), - ('\u{102e0}', '\u{102e0}'), - ('\u{10376}', '\u{1037a}'), - ('\u{10a01}', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), - ('\u{10a0c}', '\u{10a0f}'), - ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '\u{10a3f}'), - ('\u{10ae5}', '\u{10ae6}'), - ('\u{10d24}', '\u{10d27}'), - ('\u{10d69}', '\u{10d6d}'), - ('\u{10eab}', '\u{10eac}'), - ('\u{10efc}', '\u{10eff}'), - ('\u{10f46}', '\u{10f50}'), - ('\u{10f82}', '\u{10f85}'), - ('𑀀', '𑀂'), - ('\u{11038}', '\u{11046}'), - ('\u{11070}', '\u{11070}'), - ('\u{11073}', '\u{11074}'), - ('\u{1107f}', '𑂂'), - ('𑂰', '\u{110ba}'), - ('\u{110c2}', '\u{110c2}'), - ('\u{11100}', '\u{11102}'), - ('\u{11127}', '\u{11134}'), - ('𑅅', '𑅆'), - ('\u{11173}', '\u{11173}'), - ('\u{11180}', '𑆂'), - ('𑆳', '\u{111c0}'), - ('\u{111c9}', '\u{111cc}'), - ('𑇎', '\u{111cf}'), - ('𑈬', '\u{11237}'), - ('\u{1123e}', '\u{1123e}'), - ('\u{11241}', '\u{11241}'), - ('\u{112df}', '\u{112ea}'), - ('\u{11300}', '𑌃'), - ('\u{1133b}', '\u{1133c}'), - ('\u{1133e}', '𑍄'), - ('𑍇', '𑍈'), - ('𑍋', '\u{1134d}'), - ('\u{11357}', '\u{11357}'), - ('𑍢', '𑍣'), - ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), - ('\u{113b8}', '\u{113c0}'), - ('\u{113c2}', '\u{113c2}'), - ('\u{113c5}', '\u{113c5}'), - ('\u{113c7}', '𑏊'), - ('𑏌', '\u{113d0}'), - ('\u{113d2}', '\u{113d2}'), - ('\u{113e1}', '\u{113e2}'), - ('𑐵', '\u{11446}'), - ('\u{1145e}', '\u{1145e}'), - ('\u{114b0}', '\u{114c3}'), - ('\u{115af}', '\u{115b5}'), - ('𑖸', '\u{115c0}'), - ('\u{115dc}', '\u{115dd}'), - ('𑘰', '\u{11640}'), - ('\u{116ab}', '\u{116b7}'), - ('\u{1171d}', '\u{1172b}'), - ('𑠬', '\u{1183a}'), - ('\u{11930}', '𑤵'), - ('𑤷', '𑤸'), - ('\u{1193b}', '\u{1193e}'), - ('𑥀', '𑥀'), - ('𑥂', '\u{11943}'), - ('𑧑', '\u{119d7}'), - ('\u{119da}', '\u{119e0}'), - ('𑧤', '𑧤'), - ('\u{11a01}', '\u{11a0a}'), - ('\u{11a33}', '𑨹'), - ('\u{11a3b}', '\u{11a3e}'), - ('\u{11a47}', '\u{11a47}'), - ('\u{11a51}', '\u{11a5b}'), - ('\u{11a8a}', '\u{11a99}'), - ('𑰯', '\u{11c36}'), - ('\u{11c38}', '\u{11c3f}'), - ('\u{11c92}', '\u{11ca7}'), - ('𑲩', '\u{11cb6}'), - ('\u{11d31}', '\u{11d36}'), - ('\u{11d3a}', '\u{11d3a}'), - ('\u{11d3c}', '\u{11d3d}'), - ('\u{11d3f}', '\u{11d45}'), - ('\u{11d47}', '\u{11d47}'), - ('𑶊', '𑶎'), - ('\u{11d90}', '\u{11d91}'), - ('𑶓', '\u{11d97}'), - ('\u{11ef3}', '𑻶'), - ('\u{11f00}', '\u{11f01}'), - ('𑼃', '𑼃'), - ('𑼴', '\u{11f3a}'), - ('𑼾', '\u{11f42}'), - ('\u{11f5a}', '\u{11f5a}'), - ('\u{13440}', '\u{13440}'), - ('\u{13447}', '\u{13455}'), - ('\u{1611e}', '\u{1612f}'), - ('\u{16af0}', '\u{16af4}'), - ('\u{16b30}', '\u{16b36}'), - ('\u{16f4f}', '\u{16f4f}'), - ('𖽑', '𖾇'), - ('\u{16f8f}', '\u{16f92}'), - ('\u{16fe4}', '\u{16fe4}'), - ('\u{16ff0}', '\u{16ff1}'), - ('\u{1bc9d}', '\u{1bc9e}'), - ('\u{1cf00}', '\u{1cf2d}'), - ('\u{1cf30}', '\u{1cf46}'), - ('\u{1d165}', '\u{1d169}'), - ('\u{1d16d}', '\u{1d172}'), - ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), - ('\u{1d1aa}', '\u{1d1ad}'), - ('\u{1d242}', '\u{1d244}'), - ('\u{1da00}', '\u{1da36}'), - ('\u{1da3b}', '\u{1da6c}'), - ('\u{1da75}', '\u{1da75}'), - ('\u{1da84}', '\u{1da84}'), - ('\u{1da9b}', '\u{1da9f}'), - ('\u{1daa1}', '\u{1daaf}'), - ('\u{1e000}', '\u{1e006}'), - ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), - ('\u{1e023}', '\u{1e024}'), - ('\u{1e026}', '\u{1e02a}'), - ('\u{1e08f}', '\u{1e08f}'), - ('\u{1e130}', '\u{1e136}'), - ('\u{1e2ae}', '\u{1e2ae}'), - ('\u{1e2ec}', '\u{1e2ef}'), - ('\u{1e4ec}', '\u{1e4ef}'), - ('\u{1e5ee}', '\u{1e5ef}'), - ('\u{1e8d0}', '\u{1e8d6}'), - ('\u{1e944}', '\u{1e94a}'), - ('🏻', '🏿'), - ('\u{e0020}', '\u{e007f}'), - ('\u{e0100}', '\u{e01ef}'), -]; - -pub const EXTENDNUMLET: &'static [(char, char)] = &[ - ('_', '_'), - ('\u{202f}', '\u{202f}'), - ('‿', '⁀'), - ('⁔', '⁔'), - ('︳', '︴'), - ('﹍', '﹏'), - ('_', '_'), -]; - -pub const FORMAT: &'static [(char, char)] = &[ - ('\u{ad}', '\u{ad}'), - ('\u{61c}', '\u{61c}'), - ('\u{180e}', '\u{180e}'), - ('\u{200e}', '\u{200f}'), - ('\u{202a}', '\u{202e}'), - ('\u{2060}', '\u{2064}'), - ('\u{2066}', '\u{206f}'), - ('\u{feff}', '\u{feff}'), - ('\u{fff9}', '\u{fffb}'), - ('\u{13430}', '\u{1343f}'), - ('\u{1bca0}', '\u{1bca3}'), - ('\u{1d173}', '\u{1d17a}'), - ('\u{e0001}', '\u{e0001}'), -]; - -pub const HEBREW_LETTER: &'static [(char, char)] = &[ - ('א', 'ת'), - ('ׯ', 'ײ'), - ('יִ', 'יִ'), - ('ײַ', 'ﬨ'), - ('שׁ', 'זּ'), - ('טּ', 'לּ'), - ('מּ', 'מּ'), - ('נּ', 'סּ'), - ('ףּ', 'פּ'), - ('צּ', 'ﭏ'), -]; - -pub const KATAKANA: &'static [(char, char)] = &[ - ('〱', '〵'), - ('゛', '゜'), - ('゠', 'ヺ'), - ('ー', 'ヿ'), - ('ㇰ', 'ㇿ'), - ('㋐', '㋾'), - ('㌀', '㍗'), - ('ヲ', 'ン'), - ('𚿰', '𚿳'), - ('𚿵', '𚿻'), - ('𚿽', '𚿾'), - ('𛀀', '𛀀'), - ('𛄠', '𛄢'), - ('𛅕', '𛅕'), - ('𛅤', '𛅧'), -]; - -pub const LF: &'static [(char, char)] = &[('\n', '\n')]; - -pub const MIDLETTER: &'static [(char, char)] = &[ - (':', ':'), - ('·', '·'), - ('·', '·'), - ('՟', '՟'), - ('״', '״'), - ('‧', '‧'), - ('︓', '︓'), - ('﹕', '﹕'), - (':', ':'), -]; - -pub const MIDNUM: &'static [(char, char)] = &[ - (',', ','), - (';', ';'), - (';', ';'), - ('։', '։'), - ('،', '؍'), - ('٬', '٬'), - ('߸', '߸'), - ('⁄', '⁄'), - ('﹐', '﹐'), - ('﹔', '﹔'), - (',', ','), - (';', ';'), -]; - -pub const MIDNUMLET: &'static [(char, char)] = &[ - ('.', '.'), - ('‘', '’'), - ('․', '․'), - ('﹒', '﹒'), - (''', '''), - ('.', '.'), -]; - -pub const NEWLINE: &'static [(char, char)] = - &[('\u{b}', '\u{c}'), ('\u{85}', '\u{85}'), ('\u{2028}', '\u{2029}')]; - -pub const NUMERIC: &'static [(char, char)] = &[ - ('0', '9'), - ('\u{600}', '\u{605}'), - ('٠', '٩'), - ('٫', '٫'), - ('\u{6dd}', '\u{6dd}'), - ('۰', '۹'), - ('߀', '߉'), - ('\u{890}', '\u{891}'), - ('\u{8e2}', '\u{8e2}'), - ('०', '९'), - ('০', '৯'), - ('੦', '੯'), - ('૦', '૯'), - ('୦', '୯'), - ('௦', '௯'), - ('౦', '౯'), - ('೦', '೯'), - ('൦', '൯'), - ('෦', '෯'), - ('๐', '๙'), - ('໐', '໙'), - ('༠', '༩'), - ('၀', '၉'), - ('႐', '႙'), - ('០', '៩'), - ('᠐', '᠙'), - ('᥆', '᥏'), - ('᧐', '᧚'), - ('᪀', '᪉'), - ('᪐', '᪙'), - ('᭐', '᭙'), - ('᮰', '᮹'), - ('᱀', '᱉'), - ('᱐', '᱙'), - ('꘠', '꘩'), - ('꣐', '꣙'), - ('꤀', '꤉'), - ('꧐', '꧙'), - ('꧰', '꧹'), - ('꩐', '꩙'), - ('꯰', '꯹'), - ('0', '9'), - ('𐒠', '𐒩'), - ('𐴰', '𐴹'), - ('𐵀', '𐵉'), - ('𑁦', '𑁯'), - ('\u{110bd}', '\u{110bd}'), - ('\u{110cd}', '\u{110cd}'), - ('𑃰', '𑃹'), - ('𑄶', '𑄿'), - ('𑇐', '𑇙'), - ('𑋰', '𑋹'), - ('𑑐', '𑑙'), - ('𑓐', '𑓙'), - ('𑙐', '𑙙'), - ('𑛀', '𑛉'), - ('𑛐', '𑛣'), - ('𑜰', '𑜹'), - ('𑣠', '𑣩'), - ('𑥐', '𑥙'), - ('𑯰', '𑯹'), - ('𑱐', '𑱙'), - ('𑵐', '𑵙'), - ('𑶠', '𑶩'), - ('𑽐', '𑽙'), - ('𖄰', '𖄹'), - ('𖩠', '𖩩'), - ('𖫀', '𖫉'), - ('𖭐', '𖭙'), - ('𖵰', '𖵹'), - ('𜳰', '𜳹'), - ('𝟎', '𝟿'), - ('𞅀', '𞅉'), - ('𞋰', '𞋹'), - ('𞓰', '𞓹'), - ('𞗱', '𞗺'), - ('𞥐', '𞥙'), - ('🯰', '🯹'), -]; - -pub const REGIONAL_INDICATOR: &'static [(char, char)] = &[('🇦', '🇿')]; - -pub const SINGLE_QUOTE: &'static [(char, char)] = &[('\'', '\'')]; - -pub const WSEGSPACE: &'static [(char, char)] = &[ - (' ', ' '), - ('\u{1680}', '\u{1680}'), - ('\u{2000}', '\u{2006}'), - ('\u{2008}', '\u{200a}'), - ('\u{205f}', '\u{205f}'), - ('\u{3000}', '\u{3000}'), -]; - -pub const ZWJ: &'static [(char, char)] = &[('\u{200d}', '\u{200d}')]; diff --git a/vendor/regex-syntax/src/utf8.rs b/vendor/regex-syntax/src/utf8.rs deleted file mode 100644 index 537035ed1d99b3..00000000000000 --- a/vendor/regex-syntax/src/utf8.rs +++ /dev/null @@ -1,592 +0,0 @@ -/*! -Converts ranges of Unicode scalar values to equivalent ranges of UTF-8 bytes. - -This is sub-module is useful for constructing byte based automatons that need -to embed UTF-8 decoding. The most common use of this module is in conjunction -with the [`hir::ClassUnicodeRange`](crate::hir::ClassUnicodeRange) type. - -See the documentation on the `Utf8Sequences` iterator for more details and -an example. - -# Wait, what is this? - -This is simplest to explain with an example. Let's say you wanted to test -whether a particular byte sequence was a Cyrillic character. One possible -scalar value range is `[0400-04FF]`. The set of allowed bytes for this -range can be expressed as a sequence of byte ranges: - -```text -[D0-D3][80-BF] -``` - -This is simple enough: simply encode the boundaries, `0400` encodes to -`D0 80` and `04FF` encodes to `D3 BF`, and create ranges from each -corresponding pair of bytes: `D0` to `D3` and `80` to `BF`. - -However, what if you wanted to add the Cyrillic Supplementary characters to -your range? Your range might then become `[0400-052F]`. The same procedure -as above doesn't quite work because `052F` encodes to `D4 AF`. The byte ranges -you'd get from the previous transformation would be `[D0-D4][80-AF]`. However, -this isn't quite correct because this range doesn't capture many characters, -for example, `04FF` (because its last byte, `BF` isn't in the range `80-AF`). - -Instead, you need multiple sequences of byte ranges: - -```text -[D0-D3][80-BF] # matches codepoints 0400-04FF -[D4][80-AF] # matches codepoints 0500-052F -``` - -This gets even more complicated if you want bigger ranges, particularly if -they naively contain surrogate codepoints. For example, the sequence of byte -ranges for the basic multilingual plane (`[0000-FFFF]`) look like this: - -```text -[0-7F] -[C2-DF][80-BF] -[E0][A0-BF][80-BF] -[E1-EC][80-BF][80-BF] -[ED][80-9F][80-BF] -[EE-EF][80-BF][80-BF] -``` - -Note that the byte ranges above will *not* match any erroneous encoding of -UTF-8, including encodings of surrogate codepoints. - -And, of course, for all of Unicode (`[000000-10FFFF]`): - -```text -[0-7F] -[C2-DF][80-BF] -[E0][A0-BF][80-BF] -[E1-EC][80-BF][80-BF] -[ED][80-9F][80-BF] -[EE-EF][80-BF][80-BF] -[F0][90-BF][80-BF][80-BF] -[F1-F3][80-BF][80-BF][80-BF] -[F4][80-8F][80-BF][80-BF] -``` - -This module automates the process of creating these byte ranges from ranges of -Unicode scalar values. - -# Lineage - -I got the idea and general implementation strategy from Russ Cox in his -[article on regexps](https://web.archive.org/web/20160404141123/https://swtch.com/~rsc/regexp/regexp3.html) and RE2. -Russ Cox got it from Ken Thompson's `grep` (no source, folk lore?). -I also got the idea from -[Lucene](https://github.com/apache/lucene-solr/blob/ae93f4e7ac6a3908046391de35d4f50a0d3c59ca/lucene/core/src/java/org/apache/lucene/util/automaton/UTF32ToUTF8.java), -which uses it for executing automata on their term index. -*/ - -use core::{char, fmt, iter::FusedIterator, slice}; - -use alloc::{vec, vec::Vec}; - -const MAX_UTF8_BYTES: usize = 4; - -/// Utf8Sequence represents a sequence of byte ranges. -/// -/// To match a Utf8Sequence, a candidate byte sequence must match each -/// successive range. -/// -/// For example, if there are two ranges, `[C2-DF][80-BF]`, then the byte -/// sequence `\xDD\x61` would not match because `0x61 < 0x80`. -#[derive(Copy, Clone, Eq, PartialEq, PartialOrd, Ord)] -pub enum Utf8Sequence { - /// One byte range. - One(Utf8Range), - /// Two successive byte ranges. - Two([Utf8Range; 2]), - /// Three successive byte ranges. - Three([Utf8Range; 3]), - /// Four successive byte ranges. - Four([Utf8Range; 4]), -} - -impl Utf8Sequence { - /// Creates a new UTF-8 sequence from the encoded bytes of a scalar value - /// range. - /// - /// This assumes that `start` and `end` have the same length. - fn from_encoded_range(start: &[u8], end: &[u8]) -> Self { - assert_eq!(start.len(), end.len()); - match start.len() { - 2 => Utf8Sequence::Two([ - Utf8Range::new(start[0], end[0]), - Utf8Range::new(start[1], end[1]), - ]), - 3 => Utf8Sequence::Three([ - Utf8Range::new(start[0], end[0]), - Utf8Range::new(start[1], end[1]), - Utf8Range::new(start[2], end[2]), - ]), - 4 => Utf8Sequence::Four([ - Utf8Range::new(start[0], end[0]), - Utf8Range::new(start[1], end[1]), - Utf8Range::new(start[2], end[2]), - Utf8Range::new(start[3], end[3]), - ]), - n => unreachable!("invalid encoded length: {n}"), - } - } - - /// Returns the underlying sequence of byte ranges as a slice. - pub fn as_slice(&self) -> &[Utf8Range] { - use self::Utf8Sequence::*; - match *self { - One(ref r) => slice::from_ref(r), - Two(ref r) => &r[..], - Three(ref r) => &r[..], - Four(ref r) => &r[..], - } - } - - /// Returns the number of byte ranges in this sequence. - /// - /// The length is guaranteed to be in the closed interval `[1, 4]`. - pub fn len(&self) -> usize { - self.as_slice().len() - } - - /// Reverses the ranges in this sequence. - /// - /// For example, if this corresponds to the following sequence: - /// - /// ```text - /// [D0-D3][80-BF] - /// ``` - /// - /// Then after reversal, it will be - /// - /// ```text - /// [80-BF][D0-D3] - /// ``` - /// - /// This is useful when one is constructing a UTF-8 automaton to match - /// character classes in reverse. - pub fn reverse(&mut self) { - match *self { - Utf8Sequence::One(_) => {} - Utf8Sequence::Two(ref mut x) => x.reverse(), - Utf8Sequence::Three(ref mut x) => x.reverse(), - Utf8Sequence::Four(ref mut x) => x.reverse(), - } - } - - /// Returns true if and only if a prefix of `bytes` matches this sequence - /// of byte ranges. - pub fn matches(&self, bytes: &[u8]) -> bool { - if bytes.len() < self.len() { - return false; - } - for (&b, r) in bytes.iter().zip(self) { - if !r.matches(b) { - return false; - } - } - true - } -} - -impl<'a> IntoIterator for &'a Utf8Sequence { - type IntoIter = slice::Iter<'a, Utf8Range>; - type Item = &'a Utf8Range; - - fn into_iter(self) -> Self::IntoIter { - self.as_slice().iter() - } -} - -impl fmt::Debug for Utf8Sequence { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use self::Utf8Sequence::*; - match *self { - One(ref r) => write!(f, "{r:?}"), - Two(ref r) => write!(f, "{:?}{:?}", r[0], r[1]), - Three(ref r) => write!(f, "{:?}{:?}{:?}", r[0], r[1], r[2]), - Four(ref r) => { - write!(f, "{:?}{:?}{:?}{:?}", r[0], r[1], r[2], r[3]) - } - } - } -} - -/// A single inclusive range of UTF-8 bytes. -#[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord)] -pub struct Utf8Range { - /// Start of byte range (inclusive). - pub start: u8, - /// End of byte range (inclusive). - pub end: u8, -} - -impl Utf8Range { - fn new(start: u8, end: u8) -> Self { - Utf8Range { start, end } - } - - /// Returns true if and only if the given byte is in this range. - pub fn matches(&self, b: u8) -> bool { - self.start <= b && b <= self.end - } -} - -impl fmt::Debug for Utf8Range { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.start == self.end { - write!(f, "[{:X}]", self.start) - } else { - write!(f, "[{:X}-{:X}]", self.start, self.end) - } - } -} - -/// An iterator over ranges of matching UTF-8 byte sequences. -/// -/// The iteration represents an alternation of comprehensive byte sequences -/// that match precisely the set of UTF-8 encoded scalar values. -/// -/// A byte sequence corresponds to one of the scalar values in the range given -/// if and only if it completely matches exactly one of the sequences of byte -/// ranges produced by this iterator. -/// -/// Each sequence of byte ranges matches a unique set of bytes. That is, no two -/// sequences will match the same bytes. -/// -/// # Example -/// -/// This shows how to match an arbitrary byte sequence against a range of -/// scalar values. -/// -/// ```rust -/// use regex_syntax::utf8::{Utf8Sequences, Utf8Sequence}; -/// -/// fn matches(seqs: &[Utf8Sequence], bytes: &[u8]) -> bool { -/// for range in seqs { -/// if range.matches(bytes) { -/// return true; -/// } -/// } -/// false -/// } -/// -/// // Test the basic multilingual plane. -/// let seqs: Vec<_> = Utf8Sequences::new('\u{0}', '\u{FFFF}').collect(); -/// -/// // UTF-8 encoding of 'a'. -/// assert!(matches(&seqs, &[0x61])); -/// // UTF-8 encoding of '☃' (`\u{2603}`). -/// assert!(matches(&seqs, &[0xE2, 0x98, 0x83])); -/// // UTF-8 encoding of `\u{10348}` (outside the BMP). -/// assert!(!matches(&seqs, &[0xF0, 0x90, 0x8D, 0x88])); -/// // Tries to match against a UTF-8 encoding of a surrogate codepoint, -/// // which is invalid UTF-8, and therefore fails, despite the fact that -/// // the corresponding codepoint (0xD800) falls in the range given. -/// assert!(!matches(&seqs, &[0xED, 0xA0, 0x80])); -/// // And fails against plain old invalid UTF-8. -/// assert!(!matches(&seqs, &[0xFF, 0xFF])); -/// ``` -/// -/// If this example seems circuitous, that's because it is! It's meant to be -/// illustrative. In practice, you could just try to decode your byte sequence -/// and compare it with the scalar value range directly. However, this is not -/// always possible (for example, in a byte based automaton). -#[derive(Debug)] -pub struct Utf8Sequences { - range_stack: Vec, -} - -impl Utf8Sequences { - /// Create a new iterator over UTF-8 byte ranges for the scalar value range - /// given. - pub fn new(start: char, end: char) -> Self { - let range = - ScalarRange { start: u32::from(start), end: u32::from(end) }; - Utf8Sequences { range_stack: vec![range] } - } - - /// reset resets the scalar value range. - /// Any existing state is cleared, but resources may be reused. - /// - /// N.B. Benchmarks say that this method is dubious. - #[doc(hidden)] - pub fn reset(&mut self, start: char, end: char) { - self.range_stack.clear(); - self.push(u32::from(start), u32::from(end)); - } - - fn push(&mut self, start: u32, end: u32) { - self.range_stack.push(ScalarRange { start, end }); - } -} - -struct ScalarRange { - start: u32, - end: u32, -} - -impl fmt::Debug for ScalarRange { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "ScalarRange({:X}, {:X})", self.start, self.end) - } -} - -impl Iterator for Utf8Sequences { - type Item = Utf8Sequence; - - fn next(&mut self) -> Option { - 'TOP: while let Some(mut r) = self.range_stack.pop() { - 'INNER: loop { - if let Some((r1, r2)) = r.split() { - self.push(r2.start, r2.end); - r.start = r1.start; - r.end = r1.end; - continue 'INNER; - } - if !r.is_valid() { - continue 'TOP; - } - for i in 1..MAX_UTF8_BYTES { - let max = max_scalar_value(i); - if r.start <= max && max < r.end { - self.push(max + 1, r.end); - r.end = max; - continue 'INNER; - } - } - if let Some(ascii_range) = r.as_ascii() { - return Some(Utf8Sequence::One(ascii_range)); - } - for i in 1..MAX_UTF8_BYTES { - let m = (1 << (6 * i)) - 1; - if (r.start & !m) != (r.end & !m) { - if (r.start & m) != 0 { - self.push((r.start | m) + 1, r.end); - r.end = r.start | m; - continue 'INNER; - } - if (r.end & m) != m { - self.push(r.end & !m, r.end); - r.end = (r.end & !m) - 1; - continue 'INNER; - } - } - } - let mut start = [0; MAX_UTF8_BYTES]; - let mut end = [0; MAX_UTF8_BYTES]; - let n = r.encode(&mut start, &mut end); - return Some(Utf8Sequence::from_encoded_range( - &start[0..n], - &end[0..n], - )); - } - } - None - } -} - -impl FusedIterator for Utf8Sequences {} - -impl ScalarRange { - /// split splits this range if it overlaps with a surrogate codepoint. - /// - /// Either or both ranges may be invalid. - fn split(&self) -> Option<(ScalarRange, ScalarRange)> { - if self.start < 0xE000 && self.end > 0xD7FF { - Some(( - ScalarRange { start: self.start, end: 0xD7FF }, - ScalarRange { start: 0xE000, end: self.end }, - )) - } else { - None - } - } - - /// is_valid returns true if and only if start <= end. - fn is_valid(&self) -> bool { - self.start <= self.end - } - - /// as_ascii returns this range as a Utf8Range if and only if all scalar - /// values in this range can be encoded as a single byte. - fn as_ascii(&self) -> Option { - if self.is_ascii() { - let start = u8::try_from(self.start).unwrap(); - let end = u8::try_from(self.end).unwrap(); - Some(Utf8Range::new(start, end)) - } else { - None - } - } - - /// is_ascii returns true if the range is ASCII only (i.e., takes a single - /// byte to encode any scalar value). - fn is_ascii(&self) -> bool { - self.is_valid() && self.end <= 0x7f - } - - /// encode writes the UTF-8 encoding of the start and end of this range - /// to the corresponding destination slices, and returns the number of - /// bytes written. - /// - /// The slices should have room for at least `MAX_UTF8_BYTES`. - fn encode(&self, start: &mut [u8], end: &mut [u8]) -> usize { - let cs = char::from_u32(self.start).unwrap(); - let ce = char::from_u32(self.end).unwrap(); - let ss = cs.encode_utf8(start); - let se = ce.encode_utf8(end); - assert_eq!(ss.len(), se.len()); - ss.len() - } -} - -fn max_scalar_value(nbytes: usize) -> u32 { - match nbytes { - 1 => 0x007F, - 2 => 0x07FF, - 3 => 0xFFFF, - 4 => 0x0010_FFFF, - _ => unreachable!("invalid UTF-8 byte sequence size"), - } -} - -#[cfg(test)] -mod tests { - use core::char; - - use alloc::{vec, vec::Vec}; - - use crate::utf8::{Utf8Range, Utf8Sequences}; - - fn rutf8(s: u8, e: u8) -> Utf8Range { - Utf8Range::new(s, e) - } - - fn never_accepts_surrogate_codepoints(start: char, end: char) { - for cp in 0xD800..0xE000 { - let buf = encode_surrogate(cp); - for r in Utf8Sequences::new(start, end) { - if r.matches(&buf) { - panic!( - "Sequence ({:X}, {:X}) contains range {:?}, \ - which matches surrogate code point {:X} \ - with encoded bytes {:?}", - u32::from(start), - u32::from(end), - r, - cp, - buf, - ); - } - } - } - } - - #[test] - fn codepoints_no_surrogates() { - never_accepts_surrogate_codepoints('\u{0}', '\u{FFFF}'); - never_accepts_surrogate_codepoints('\u{0}', '\u{10FFFF}'); - never_accepts_surrogate_codepoints('\u{0}', '\u{10FFFE}'); - never_accepts_surrogate_codepoints('\u{80}', '\u{10FFFF}'); - never_accepts_surrogate_codepoints('\u{D7FF}', '\u{E000}'); - } - - #[test] - fn single_codepoint_one_sequence() { - // Tests that every range of scalar values that contains a single - // scalar value is recognized by one sequence of byte ranges. - for i in 0x0..=0x0010_FFFF { - let c = match char::from_u32(i) { - None => continue, - Some(c) => c, - }; - let seqs: Vec<_> = Utf8Sequences::new(c, c).collect(); - assert_eq!(seqs.len(), 1); - } - } - - #[test] - fn bmp() { - use crate::utf8::Utf8Sequence::*; - - let seqs = Utf8Sequences::new('\u{0}', '\u{FFFF}').collect::>(); - assert_eq!( - seqs, - vec![ - One(rutf8(0x0, 0x7F)), - Two([rutf8(0xC2, 0xDF), rutf8(0x80, 0xBF)]), - Three([ - rutf8(0xE0, 0xE0), - rutf8(0xA0, 0xBF), - rutf8(0x80, 0xBF) - ]), - Three([ - rutf8(0xE1, 0xEC), - rutf8(0x80, 0xBF), - rutf8(0x80, 0xBF) - ]), - Three([ - rutf8(0xED, 0xED), - rutf8(0x80, 0x9F), - rutf8(0x80, 0xBF) - ]), - Three([ - rutf8(0xEE, 0xEF), - rutf8(0x80, 0xBF), - rutf8(0x80, 0xBF) - ]), - ] - ); - } - - #[test] - fn reverse() { - use crate::utf8::Utf8Sequence::*; - - let mut s = One(rutf8(0xA, 0xB)); - s.reverse(); - assert_eq!(s.as_slice(), &[rutf8(0xA, 0xB)]); - - let mut s = Two([rutf8(0xA, 0xB), rutf8(0xB, 0xC)]); - s.reverse(); - assert_eq!(s.as_slice(), &[rutf8(0xB, 0xC), rutf8(0xA, 0xB)]); - - let mut s = Three([rutf8(0xA, 0xB), rutf8(0xB, 0xC), rutf8(0xC, 0xD)]); - s.reverse(); - assert_eq!( - s.as_slice(), - &[rutf8(0xC, 0xD), rutf8(0xB, 0xC), rutf8(0xA, 0xB)] - ); - - let mut s = Four([ - rutf8(0xA, 0xB), - rutf8(0xB, 0xC), - rutf8(0xC, 0xD), - rutf8(0xD, 0xE), - ]); - s.reverse(); - assert_eq!( - s.as_slice(), - &[ - rutf8(0xD, 0xE), - rutf8(0xC, 0xD), - rutf8(0xB, 0xC), - rutf8(0xA, 0xB) - ] - ); - } - - fn encode_surrogate(cp: u32) -> [u8; 3] { - const TAG_CONT: u8 = 0b1000_0000; - const TAG_THREE_B: u8 = 0b1110_0000; - - assert!(0xD800 <= cp && cp < 0xE000); - let mut dst = [0; 3]; - dst[0] = u8::try_from(cp >> 12 & 0x0F).unwrap() | TAG_THREE_B; - dst[1] = u8::try_from(cp >> 6 & 0x3F).unwrap() | TAG_CONT; - dst[2] = u8::try_from(cp & 0x3F).unwrap() | TAG_CONT; - dst - } -} diff --git a/vendor/regex-syntax/test b/vendor/regex-syntax/test deleted file mode 100755 index 8626c3bfccbabd..00000000000000 --- a/vendor/regex-syntax/test +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -set -e - -# cd to the directory containing this crate's Cargo.toml so that we don't need -# to pass --manifest-path to every `cargo` command. -cd "$(dirname "$0")" - -# This is a convenience script for running a broad swath of the syntax tests. -echo "===== DEFAULT FEATURES ===" -cargo test - -features=( - std - unicode - unicode-age - unicode-bool - unicode-case - unicode-gencat - unicode-perl - unicode-script - unicode-segment -) -for f in "${features[@]}"; do - echo "=== FEATURE: $f ===" - # We only run library tests because I couldn't figure out how to easily - # make doc tests run in 'no_std' mode. In particular, without the Error - # trait, using '?' in doc tests seems tricky. - cargo test --no-default-features --lib --features "$f" -done diff --git a/vendor/regex/.cargo-checksum.json b/vendor/regex/.cargo-checksum.json deleted file mode 100644 index 334adc622d2168..00000000000000 --- a/vendor/regex/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"5b9b771da9f8ff576a830ff12bc819aa5fad56cb5c079f3d87cb5ddf7e79b9b2",".vim/coc-settings.json":"87b0e2edd6fc8170b3f918bfbf92a78cd77a15033f718a8733c6d6277bf3e1fe","CHANGELOG.md":"154fdf1ae0e8cbc50e8cb8457f61c403c5d9a1a53cef78f19e48660af6e5d22a","Cargo.lock":"b089faa224c30c8416766f4289c9c4319a2cf88c1884d20754e9845b9e1e0c71","Cargo.toml":"709b6ec1da93140957cb14d7b57367e0aa180c8efc26368f761c34682f67f0bc","Cargo.toml.orig":"8501f3490d81b4d822457510173df5d2edfbdd70851ff0fb798681adeaf6b9ae","Cross.toml":"4a11d6c63ecc919016b59fa0fe23674eb05682fb91ffbe677a4a7077e9e684ff","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"2e5ffce9b5781a2c286517f0fb81e7e00d9736ffa938c9a34b5e92f30352a115","UNICODE.md":"91ee848bf40a67626940d242d3ef05e90c7d5ef72d23bcf626033b5394aee0ea","bench/README.md":"0aee42206b0e5edcb400a11faa2c536f512bcc6086e5ffdda001b9bfe4d19808","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","src/builders.rs":"d08f5867d8b994395546e318860d05e00cd70347223505b43d578b8d1477fe8f","src/bytes.rs":"cce2b7012f5896cf82fc3086bf8128dc9efe2b69bf6917d041c1a171eabacdc0","src/error.rs":"362c126a701852b355906acdb2c19ee31230570a408bbe52deb2803a1dc77039","src/find_byte.rs":"e17cd3b765467685946707840b92ea4e37d3c11081fbf316174a15858cd4bd99","src/lib.rs":"033460754d7a51fb9fa90ad096f76dbaaf10dc4c49f1195bb088fe23d35ded75","src/pattern.rs":"53971d02dde4f8e69055c36e7c56c6c872f0302161bf0977a02b97dc8a152d46","src/regex/bytes.rs":"fae9e125ff320e85fe5e59e2a32ae24d85f6ca9f38c737c4e929a8376b9b53b0","src/regex/mod.rs":"c220b6dd7a5e1945f8e743d1dcd796c5f782c91b0c34eb9915c588174a517fe8","src/regex/string.rs":"9f7686e10535fe385a767063132d39ee1a1af1a20a119d78df479f110822e274","src/regexset/bytes.rs":"25c8d896e4b9caf627cce46e3c305d2e640aeeacea96c40526699f86960d1868","src/regexset/mod.rs":"c220b6dd7a5e1945f8e743d1dcd796c5f782c91b0c34eb9915c588174a517fe8","src/regexset/string.rs":"ac3fc9c8d2d58379e63bcd92ab2f8ee1c32a1210dceec63925d0c23f1d9dfedd","test":"c0122c20a2c9b7ba6e9a8aaeb2b7d9910315ef31063539949f28d9501ef3193c","testdata/README.md":"8c06d771da52048ac5b67de8b61f386a4aa70c904a7da4efec1aa86c710b0be5","testdata/anchored.toml":"7a1b5cd81deed2099796a451bf764a3f9bd21f0d60c0fa46accd3a35666866f2","testdata/bytes.toml":"1d84179165fd25f3b94bd2bfbeb43fc8a162041f7bf98b717e0f85cef7fb652b","testdata/crazy.toml":"a146e2d2e23f1a57168979d9b1fc193c2ba38dca66294b61140d6d2a2958ec86","testdata/crlf.toml":"d19cf22756434d145dd20946c00af01c102a556a252070405c3c8294129d9ece","testdata/earliest.toml":"d561e643623ee1889b5b049fdcf3c7cb71b0c746d7eb822ddbd09d0acda2620b","testdata/empty.toml":"738dbe92fbd8971385a1cf3affb0e956e5b692c858b9b48439d718f10801c08e","testdata/expensive.toml":"5ce2f60209c99cdd2cdcb9d3069d1d5ca13d5e08a85e913efe57267b2f5f0e9d","testdata/flags.toml":"9a7e001808195c84f2a7d3e18bc0a82c7386e60f03a616e99af00c3f7f2c3fd4","testdata/fowler/basic.toml":"a82c7e233451cd7cfe0c3d817f3a1ab44478bb81ae62432efdd515fa8370275e","testdata/fowler/dat/README":"e53d6c37b5931cb26dc9ae4c40358eea63f7a469c4db6ca816c072a8ced6a61a","testdata/fowler/dat/basic.dat":"b1126dda59075c08f574987090273c9977790115f1e1941d0708c0b82b256905","testdata/fowler/dat/nullsubexpr.dat":"f880940907754dbfddee886605b65f9e743a820411c3955b31ddeb494d07e839","testdata/fowler/dat/repetition.dat":"2b8b2b191229a804fba49e6b888d8194bf488f7744057b550da9d95a2aa6617a","testdata/fowler/nullsubexpr.toml":"cd812e7e8fa0469253b34f0db93b5883c9d8b9740fc4f7825a38e7df880a4eed","testdata/fowler/repetition.toml":"8c09164f064b3db81309c53483863bdcec493781644de162416e9f485e772615","testdata/iter.toml":"6875460302974a5b3073a7304a865c45aba9653c54afea2c4d26e1ea248a81f7","testdata/leftmost-all.toml":"903bfbeff888b7664296f4d5aa367ce53d1dafe249ab0a3359223ae94d596396","testdata/line-terminator.toml":"02148068137b69d95587966917bdf0697bf7eb41ad6d47387f2eb30f67d04fd9","testdata/misc.toml":"32c9591655c6fb118dfefcb4de49a04820a63cb960533dfc2538cdaabf4f4047","testdata/multiline.toml":"eb07cf5427e6ddbcf61f4cc64c2d74ff41b5ef75ef857959651b20196f3cd157","testdata/no-unicode.toml":"d209da04506900fd5f69e48170cddaad0702355ac6176c3a75ab3ff96974457c","testdata/overlapping.toml":"5d96497a7233566d40b05ba22047e483fa8662e45515a9be86da45cf6c28703a","testdata/regex-lite.toml":"fecca7cc8c9cea2e1f84f846a89fd9b3ca7011c83698211a2eeda8924deb900c","testdata/regression.toml":"6006ef4fcfbfd7155ce5ce8b8427904f7261c5549396f20cb065c0294733686d","testdata/set.toml":"dfd265dc1aee80026e881616840df0236ae9abf12467d7ec0e141a52c236128c","testdata/substring.toml":"48122d9f3477ed81f95e3ad42c06e9bb25f849b66994601a75ceae0693b81866","testdata/unicode.toml":"7e4b013039b0cdd85fa73f32d15d096182fe901643d4e40c0910087a736cd46d","testdata/utf8.toml":"2eabce0582bcacb2073e08bbe7ca413f096d14d06e917b107949691e24f84b20","testdata/word-boundary-special.toml":"7d0ea2f796478d1ca2a6954430cb1cfbd04031a182f8611cb50a7c73e443ce33","testdata/word-boundary.toml":"51bc1c498ab825420340a2dd3e6623de4054937ba6d5020ff8cd14b1c1e45271","tests/lib.rs":"9bffc95568c09ac95b6a3e7ca64b6e858a0552d0c0b0fca2c447da3b9c0a45a2","tests/misc.rs":"1aeadbeb8860bd5f5b99a0adb459baf77dd3af4f23ac6c56ecf537f793407cca","tests/regression.rs":"3490aac99fdbf3f0949ba1f338d5184a84b505ebd96d0b6d6145c610587aa60b","tests/regression_fuzz.rs":"57e0bcba0fdfa7797865e35ae547cd7fe1c6132b80a7bfdfb06eb053a568b00d","tests/replace.rs":"78ff9bf7f78783ad83a78041bb7ee0705c7efc85b4d12301581d0ce5b2a59325","tests/searcher.rs":"04152e5c86431deec0c196d2564a11bc4ec36f14c77e8c16a2f9d1cbc9fc574e","tests/suite_bytes.rs":"75fb0a332527c36d31e126f6032c8ccf1f81ba47ee785affa834404bc1a79f4c","tests/suite_bytes_set.rs":"db85513e87429fc68904270a0f414e75ae0b7c6b7deb1c66f05eb4f98b09c67a","tests/suite_string.rs":"249d707dba99d23ada40558a7526f028c1d3fdf715d3866a106f3435da01bf66","tests/suite_string_set.rs":"c839fb3c08a23348230591a49118406373353bc6c9a87528f36e4e9635e7b9ac"},"package":"843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4"} \ No newline at end of file diff --git a/vendor/regex/.cargo_vcs_info.json b/vendor/regex/.cargo_vcs_info.json deleted file mode 100644 index de15d531d9c53f..00000000000000 --- a/vendor/regex/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "5ea3eb1e95f0338e283f5f0b4681f0891a1cd836" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/regex/.vim/coc-settings.json b/vendor/regex/.vim/coc-settings.json deleted file mode 100644 index d75676750938f6..00000000000000 --- a/vendor/regex/.vim/coc-settings.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "rust-analyzer.linkedProjects": [ - "fuzz/Cargo.toml", - "Cargo.toml" - ] -} diff --git a/vendor/regex/CHANGELOG.md b/vendor/regex/CHANGELOG.md deleted file mode 100644 index 1bd16a1e53deea..00000000000000 --- a/vendor/regex/CHANGELOG.md +++ /dev/null @@ -1,1742 +0,0 @@ -1.12.2 (2025-10-13) -=================== -This release fixes a `cargo doc` breakage on nightly when `--cfg docsrs` is -enabled. This caused documentation to fail to build on docs.rs. - -Bug fixes: - -* [BUG #1305](https://github.com/rust-lang/regex/issues/1305): -Switches the `doc_auto_cfg` feature to `doc_cfg` on nightly for docs.rs builds. - - -1.12.1 (2025-10-10) -=================== -This release makes a bug fix in the new `regex::Captures::get_match` API -introduced in `1.12.0`. There was an oversight with the lifetime parameter -for the `Match` returned. This is technically a breaking change, but given -that it was caught almost immediately and I've yanked the `1.12.0` release, -I think this is fine. - - -1.12.0 (2025-10-10) -=================== -This release contains a smattering of bug fixes, a fix for excessive memory -consumption in some cases and a new `regex::Captures::get_match` API. - -Improvements: - -* [FEATURE #1146](https://github.com/rust-lang/regex/issues/1146): -Add `Capture::get_match` for returning the overall match without `unwrap()`. - -Bug fixes: - -* [BUG #1083](https://github.com/rust-lang/regex/issues/1083): -Fixes a panic in the lazy DFA (can only occur for especially large regexes). -* [BUG #1116](https://github.com/rust-lang/regex/issues/1116): -Fixes a memory usage regression for large regexes (introduced in `regex 1.9`). -* [BUG #1195](https://github.com/rust-lang/regex/issues/1195): -Fix universal start states in sparse DFA. -* [BUG #1295](https://github.com/rust-lang/regex/pull/1295): -Fixes a panic when deserializing a corrupted dense DFA. -* [BUG 8f5d9479](https://github.com/rust-lang/regex/commit/8f5d9479d0f1da5726488a530d7fd66a73d05b80): -Make `regex_automata::meta::Regex::find` consistently return `None` when -`WhichCaptures::None` is used. - - -1.11.3 (2025-09-25) -=================== -This is a small patch release with an improvement in memory usage in some -cases. - -Improvements: - -* [BUG #1297](https://github.com/rust-lang/regex/issues/1297): -Improve memory usage by trimming excess memory capacity in some spots. - - -1.11.2 (2025-08-24) -=================== -This is a new patch release of `regex` with some minor fixes. A larger number -of typo or lint fix patches were merged. Also, we now finally recommend using -`std::sync::LazyLock`. - -Improvements: - -* [BUG #1217](https://github.com/rust-lang/regex/issues/1217): -Switch recommendation from `once_cell` to `std::sync::LazyLock`. -* [BUG #1225](https://github.com/rust-lang/regex/issues/1225): -Add `DFA::set_prefilter` to `regex-automata`. - -Bug fixes: - -* [BUG #1165](https://github.com/rust-lang/regex/pull/1150): -Remove `std` dependency from `perf-literal-multisubstring` crate feature. -* [BUG #1165](https://github.com/rust-lang/regex/pull/1165): -Clarify the meaning of `(?R)$` in the documentation. -* [BUG #1281](https://github.com/rust-lang/regex/pull/1281): -Remove `fuzz/` and `record/` directories from published crate on crates.io. - - -1.11.1 (2024-10-24) -=================== -This is a new patch release of `regex` that fixes compilation on nightly -Rust when the unstable `pattern` crate feature is enabled. Users on nightly -Rust without this feature enabled are unaffected. - -Bug fixes: - -* [BUG #1231](https://github.com/rust-lang/regex/issues/1231): -Fix the `Pattern` trait implementation as a result of nightly API breakage. - - -1.11.0 (2024-09-29) -=================== -This is a new minor release of `regex` that brings in an update to the -Unicode Character Database. Specifically, this updates the Unicode data -used by `regex` internally to the version 16 release. - -New features: - -* [FEATURE #1228](https://github.com/rust-lang/regex/pull/1228): -Add new `regex::SetMatches::matched_all` method. -* [FEATURE #1229](https://github.com/rust-lang/regex/pull/1229): -Update to Unicode Character Database (UCD) version 16. - - -1.10.6 (2024-08-02) -=================== -This is a new patch release with a fix for the `unstable` crate feature that -enables `std::str::Pattern` trait integration. - -Bug fixes: - -* [BUG #1219](https://github.com/rust-lang/regex/pull/1219): -Fix the `Pattern` trait implementation as a result of nightly API breakage. - - -1.10.5 (2024-06-09) -=================== -This is a new patch release with some minor fixes. - -Bug fixes: - -* [BUG #1203](https://github.com/rust-lang/regex/pull/1203): -Escape invalid UTF-8 when in the `Debug` impl of `regex::bytes::Match`. - - -1.10.4 (2024-03-22) -=================== -This is a new patch release with some minor fixes. - -* [BUG #1169](https://github.com/rust-lang/regex/issues/1169): -Fixes a bug with compiling a reverse NFA automaton in `regex-automata`. -* [BUG #1178](https://github.com/rust-lang/regex/pull/1178): -Clarifies that when `Cow::Borrowed` is returned from replace APIs, it is -equivalent to the input. - - -1.10.3 (2024-01-21) -=================== -This is a new patch release that fixes the feature configuration of optional -dependencies, and fixes an unsound use of bounds check elision. - -Bug fixes: - -* [BUG #1147](https://github.com/rust-lang/regex/issues/1147): -Set `default-features=false` for the `memchr` and `aho-corasick` dependencies. -* [BUG #1154](https://github.com/rust-lang/regex/pull/1154): -Fix unsound bounds check elision. - - -1.10.2 (2023-10-16) -=================== -This is a new patch release that fixes a search regression where incorrect -matches could be reported. - -Bug fixes: - -* [BUG #1110](https://github.com/rust-lang/regex/issues/1110): -Revert broadening of reverse suffix literal optimization introduced in 1.10.1. - - -1.10.1 (2023-10-14) -=================== -This is a new patch release with a minor increase in the number of valid -patterns and a broadening of some literal optimizations. - -New features: - -* [FEATURE 04f5d7be](https://github.com/rust-lang/regex/commit/04f5d7be4efc542864cc400f5d43fbea4eb9bab6): -Loosen ASCII-compatible rules such that regexes like `(?-u:☃)` are now allowed. - -Performance improvements: - -* [PERF 8a8d599f](https://github.com/rust-lang/regex/commit/8a8d599f9d2f2d78e9ad84e4084788c2d563afa5): -Broader the reverse suffix optimization to apply in more cases. - - -1.10.0 (2023-10-09) -=================== -This is a new minor release of `regex` that adds support for start and end -word boundary assertions. That is, `\<` and `\>`. The minimum supported Rust -version has also been raised to 1.65, which was released about one year ago. - -The new word boundary assertions are: - -* `\<` or `\b{start}`: a Unicode start-of-word boundary (`\W|\A` on the left, -`\w` on the right). -* `\>` or `\b{end}`: a Unicode end-of-word boundary (`\w` on the left, `\W|\z` -on the right). -* `\b{start-half}`: half of a Unicode start-of-word boundary (`\W|\A` on the -left). -* `\b{end-half}`: half of a Unicode end-of-word boundary (`\W|\z` on the -right). - -The `\<` and `\>` are GNU extensions to POSIX regexes. They have been added -to the `regex` crate because they enjoy somewhat broad support in other regex -engines as well (for example, vim). The `\b{start}` and `\b{end}` assertions -are aliases for `\<` and `\>`, respectively. - -The `\b{start-half}` and `\b{end-half}` assertions are not found in any -other regex engine (although regex engines with general look-around support -can certainly express them). They were added principally to support the -implementation of word matching in grep programs, where one generally wants to -be a bit more flexible in what is considered a word boundary. - -New features: - -* [FEATURE #469](https://github.com/rust-lang/regex/issues/469): -Add support for `\<` and `\>` word boundary assertions. -* [FEATURE(regex-automata) #1031](https://github.com/rust-lang/regex/pull/1031): -DFAs now have a `start_state` method that doesn't use an `Input`. - -Performance improvements: - -* [PERF #1051](https://github.com/rust-lang/regex/pull/1051): -Unicode character class operations have been optimized in `regex-syntax`. -* [PERF #1090](https://github.com/rust-lang/regex/issues/1090): -Make patterns containing lots of literal characters use less memory. - -Bug fixes: - -* [BUG #1046](https://github.com/rust-lang/regex/issues/1046): -Fix a bug that could result in incorrect match spans when using a Unicode word -boundary and searching non-ASCII strings. -* [BUG(regex-syntax) #1047](https://github.com/rust-lang/regex/issues/1047): -Fix panics that can occur in `Ast->Hir` translation (not reachable from `regex` -crate). -* [BUG(regex-syntax) #1088](https://github.com/rust-lang/regex/issues/1088): -Remove guarantees in the API that connect the `u` flag with a specific HIR -representation. - -`regex-automata` breaking change release: - -This release includes a `regex-automata 0.4.0` breaking change release, which -was necessary in order to support the new word boundary assertions. For -example, the `Look` enum has new variants and the `LookSet` type now uses `u32` -instead of `u16` to represent a bitset of look-around assertions. These are -overall very minor changes, and most users of `regex-automata` should be able -to move to `0.4` from `0.3` without any changes at all. - -`regex-syntax` breaking change release: - -This release also includes a `regex-syntax 0.8.0` breaking change release, -which, like `regex-automata`, was necessary in order to support the new word -boundary assertions. This release also includes some changes to the `Ast` -type to reduce heap usage in some cases. If you are using the `Ast` type -directly, your code may require some minor modifications. Otherwise, users of -`regex-syntax 0.7` should be able to migrate to `0.8` without any code changes. - -`regex-lite` release: - -The `regex-lite 0.1.1` release contains support for the new word boundary -assertions. There are no breaking changes. - - -1.9.6 (2023-09-30) -================== -This is a patch release that fixes a panic that can occur when the default -regex size limit is increased to a large number. - -* [BUG aa4e4c71](https://github.com/rust-lang/regex/commit/aa4e4c7120b0090ce0624e3c42a2ed06dd8b918a): -Fix a bug where computing the maximum haystack length for the bounded -backtracker could result underflow and thus provoke a panic later in a search -due to a broken invariant. - - -1.9.5 (2023-09-02) -================== -This is a patch release that hopefully mostly fixes a performance bug that -occurs when sharing a regex across multiple threads. - -Issue [#934](https://github.com/rust-lang/regex/issues/934) -explains this in more detail. It is [also noted in the crate -documentation](https://docs.rs/regex/latest/regex/#sharing-a-regex-across-threads-can-result-in-contention). -The bug can appear when sharing a regex across multiple threads simultaneously, -as might be the case when using a regex from a `OnceLock`, `lazy_static` or -similar primitive. Usually high contention only results when using many threads -to execute searches on small haystacks. - -One can avoid the contention problem entirely through one of two methods. -The first is to use lower level APIs from `regex-automata` that require passing -state explicitly, such as [`meta::Regex::search_with`](https://docs.rs/regex-automata/latest/regex_automata/meta/struct.Regex.html#method.search_with). -The second is to clone a regex and send it to other threads explicitly. This -will not use any additional memory usage compared to sharing the regex. The -only downside of this approach is that it may be less convenient, for example, -it won't work with things like `OnceLock` or `lazy_static` or `once_cell`. - -With that said, as of this release, the contention performance problems have -been greatly reduced. This was achieved by changing the free-list so that it -was sharded across threads, and that ensuring each sharded mutex occupies a -single cache line to mitigate false sharing. So while contention may still -impact performance in some cases, it should be a lot better now. - -Because of the changes to how the free-list works, please report any issues you -find with this release. That not only includes search time regressions but also -significant regressions in memory usage. Reporting improvements is also welcome -as well! If possible, provide a reproduction. - -Bug fixes: - -* [BUG #934](https://github.com/rust-lang/regex/issues/934): -Fix a performance bug where high contention on a single regex led to massive -slow-downs. - - -1.9.4 (2023-08-26) -================== -This is a patch release that fixes a bug where `RegexSet::is_match(..)` could -incorrectly return false (even when `RegexSet::matches(..).matched_any()` -returns true). - -Bug fixes: - -* [BUG #1070](https://github.com/rust-lang/regex/issues/1070): -Fix a bug where a prefilter was incorrectly configured for a `RegexSet`. - - -1.9.3 (2023-08-05) -================== -This is a patch release that fixes a bug where some searches could result in -incorrect match offsets being reported. It is difficult to characterize the -types of regexes susceptible to this bug. They generally involve patterns -that contain no prefix or suffix literals, but have an inner literal along with -a regex prefix that can conditionally match. - -Bug fixes: - -* [BUG #1060](https://github.com/rust-lang/regex/issues/1060): -Fix a bug with the reverse inner literal optimization reporting incorrect match -offsets. - - -1.9.2 (2023-08-05) -================== -This is a patch release that fixes another memory usage regression. This -particular regression occurred only when using a `RegexSet`. In some cases, -much more heap memory (by one or two orders of magnitude) was allocated than in -versions prior to 1.9.0. - -Bug fixes: - -* [BUG #1059](https://github.com/rust-lang/regex/issues/1059): -Fix a memory usage regression when using a `RegexSet`. - - -1.9.1 (2023-07-07) -================== -This is a patch release which fixes a memory usage regression. In the regex -1.9 release, one of the internal engines used a more aggressive allocation -strategy than what was done previously. This patch release reverts to the -prior on-demand strategy. - -Bug fixes: - -* [BUG #1027](https://github.com/rust-lang/regex/issues/1027): -Change the allocation strategy for the backtracker to be less aggressive. - - -1.9.0 (2023-07-05) -================== -This release marks the end of a [years long rewrite of the regex crate -internals](https://github.com/rust-lang/regex/issues/656). Since this is -such a big release, please report any issues or regressions you find. We would -also love to hear about improvements as well. - -In addition to many internal improvements that should hopefully result in -"my regex searches are faster," there have also been a few API additions: - -* A new `Captures::extract` method for quickly accessing the substrings -that match each capture group in a regex. -* A new inline flag, `R`, which enables CRLF mode. This makes `.` match any -Unicode scalar value except for `\r` and `\n`, and also makes `(?m:^)` and -`(?m:$)` match after and before both `\r` and `\n`, respectively, but never -between a `\r` and `\n`. -* `RegexBuilder::line_terminator` was added to further customize the line -terminator used by `(?m:^)` and `(?m:$)` to be any arbitrary byte. -* The `std` Cargo feature is now actually optional. That is, the `regex` crate -can be used without the standard library. -* Because `regex 1.9` may make binary size and compile times even worse, a -new experimental crate called `regex-lite` has been published. It prioritizes -binary size and compile times over functionality (like Unicode) and -performance. It shares no code with the `regex` crate. - -New features: - -* [FEATURE #244](https://github.com/rust-lang/regex/issues/244): -One can opt into CRLF mode via the `R` flag. -e.g., `(?mR:$)` matches just before `\r\n`. -* [FEATURE #259](https://github.com/rust-lang/regex/issues/259): -Multi-pattern searches with offsets can be done with `regex-automata 0.3`. -* [FEATURE #476](https://github.com/rust-lang/regex/issues/476): -`std` is now an optional feature. `regex` may be used with only `alloc`. -* [FEATURE #644](https://github.com/rust-lang/regex/issues/644): -`RegexBuilder::line_terminator` configures how `(?m:^)` and `(?m:$)` behave. -* [FEATURE #675](https://github.com/rust-lang/regex/issues/675): -Anchored search APIs are now available in `regex-automata 0.3`. -* [FEATURE #824](https://github.com/rust-lang/regex/issues/824): -Add new `Captures::extract` method for easier capture group access. -* [FEATURE #961](https://github.com/rust-lang/regex/issues/961): -Add `regex-lite` crate with smaller binary sizes and faster compile times. -* [FEATURE #1022](https://github.com/rust-lang/regex/pull/1022): -Add `TryFrom` implementations for the `Regex` type. - -Performance improvements: - -* [PERF #68](https://github.com/rust-lang/regex/issues/68): -Added a one-pass DFA engine for faster capture group matching. -* [PERF #510](https://github.com/rust-lang/regex/issues/510): -Inner literals are now used to accelerate searches, e.g., `\w+@\w+` will scan -for `@`. -* [PERF #787](https://github.com/rust-lang/regex/issues/787), -[PERF #891](https://github.com/rust-lang/regex/issues/891): -Makes literal optimizations apply to regexes of the form `\b(foo|bar|quux)\b`. - -(There are many more performance improvements as well, but not all of them have -specific issues devoted to them.) - -Bug fixes: - -* [BUG #429](https://github.com/rust-lang/regex/issues/429): -Fix matching bugs related to `\B` and inconsistencies across internal engines. -* [BUG #517](https://github.com/rust-lang/regex/issues/517): -Fix matching bug with capture groups. -* [BUG #579](https://github.com/rust-lang/regex/issues/579): -Fix matching bug with word boundaries. -* [BUG #779](https://github.com/rust-lang/regex/issues/779): -Fix bug where some regexes like `(re)+` were not equivalent to `(re)(re)*`. -* [BUG #850](https://github.com/rust-lang/regex/issues/850): -Fix matching bug inconsistency between NFA and DFA engines. -* [BUG #921](https://github.com/rust-lang/regex/issues/921): -Fix matching bug where literal extraction got confused by `$`. -* [BUG #976](https://github.com/rust-lang/regex/issues/976): -Add documentation to replacement routines about dealing with fallibility. -* [BUG #1002](https://github.com/rust-lang/regex/issues/1002): -Use corpus rejection in fuzz testing. - - -1.8.4 (2023-06-05) -================== -This is a patch release that fixes a bug where `(?-u:\B)` was allowed in -Unicode regexes, despite the fact that the current matching engines can report -match offsets between the code units of a single UTF-8 encoded codepoint. That -in turn means that match offsets that split a codepoint could be reported, -which in turn results in panicking when one uses them to slice a `&str`. - -This bug occurred in the transition to `regex 1.8` because the underlying -syntactical error that prevented this regex from compiling was intentionally -removed. That's because `(?-u:\B)` will be permitted in Unicode regexes in -`regex 1.9`, but the matching engines will guarantee to never report match -offsets that split a codepoint. When the underlying syntactical error was -removed, no code was added to ensure that `(?-u:\B)` didn't compile in the -`regex 1.8` transition release. This release, `regex 1.8.4`, adds that code -such that `Regex::new(r"(?-u:\B)")` returns to the `regex <1.8` behavior of -not compiling. (A `bytes::Regex` can still of course compile it.) - -Bug fixes: - -* [BUG #1006](https://github.com/rust-lang/regex/issues/1006): -Fix a bug where `(?-u:\B)` was allowed in Unicode regexes, and in turn could -lead to match offsets that split a codepoint in `&str`. - - -1.8.3 (2023-05-25) -================== -This is a patch release that fixes a bug where the regex would report a -match at every position even when it shouldn't. This could occur in a very -small subset of regexes, usually an alternation of simple literals that -have particular properties. (See the issue linked below for a more precise -description.) - -Bug fixes: - -* [BUG #999](https://github.com/rust-lang/regex/issues/999): -Fix a bug where a match at every position is erroneously reported. - - -1.8.2 (2023-05-22) -================== -This is a patch release that fixes a bug where regex compilation could panic -in debug mode for regexes with large counted repetitions. For example, -`a{2147483516}{2147483416}{5}` resulted in an integer overflow that wrapped -in release mode but panicking in debug mode. Despite the unintended wrapping -arithmetic in release mode, it didn't cause any other logical bugs since the -errant code was for new analysis that wasn't used yet. - -Bug fixes: - -* [BUG #995](https://github.com/rust-lang/regex/issues/995): -Fix a bug where regex compilation with large counted repetitions could panic. - - -1.8.1 (2023-04-21) -================== -This is a patch release that fixes a bug where a regex match could be reported -where none was found. Specifically, the bug occurs when a pattern contains some -literal prefixes that could be extracted _and_ an optional word boundary in the -prefix. - -Bug fixes: - -* [BUG #981](https://github.com/rust-lang/regex/issues/981): -Fix a bug where a word boundary could interact with prefix literal -optimizations and lead to a false positive match. - - -1.8.0 (2023-04-20) -================== -This is a sizeable release that will be soon followed by another sizeable -release. Both of them will combined close over 40 existing issues and PRs. - -This first release, despite its size, essentially represents preparatory work -for the second release, which will be even bigger. Namely, this release: - -* Increases the MSRV to Rust 1.60.0, which was released about 1 year ago. -* Upgrades its dependency on `aho-corasick` to the recently released 1.0 -version. -* Upgrades its dependency on `regex-syntax` to the simultaneously released -`0.7` version. The changes to `regex-syntax` principally revolve around a -rewrite of its literal extraction code and a number of simplifications and -optimizations to its high-level intermediate representation (HIR). - -The second release, which will follow ~shortly after the release above, will -contain a soup-to-nuts rewrite of every regex engine. This will be done by -bringing [`regex-automata`](https://github.com/BurntSushi/regex-automata) into -this repository, and then changing the `regex` crate to be nothing but an API -shim layer on top of `regex-automata`'s API. - -These tandem releases are the culmination of about 3 -years of on-and-off work that [began in earnest in March -2020](https://github.com/rust-lang/regex/issues/656). - -Because of the scale of changes involved in these releases, I would love to -hear about your experience. Especially if you notice undocumented changes in -behavior or performance changes (positive *or* negative). - -Most changes in the first release are listed below. For more details, please -see the commit log, which reflects a linear and decently documented history -of all changes. - -New features: - -* [FEATURE #501](https://github.com/rust-lang/regex/issues/501): -Permit many more characters to be escaped, even if they have no significance. -More specifically, any ASCII character except for `[0-9A-Za-z<>]` can now be -escaped. Also, a new routine, `is_escapeable_character`, has been added to -`regex-syntax` to query whether a character is escapable or not. -* [FEATURE #547](https://github.com/rust-lang/regex/issues/547): -Add `Regex::captures_at`. This fills a hole in the API, but doesn't otherwise -introduce any new expressive power. -* [FEATURE #595](https://github.com/rust-lang/regex/issues/595): -Capture group names are now Unicode-aware. They can now begin with either a `_` -or any "alphabetic" codepoint. After the first codepoint, subsequent codepoints -can be any sequence of alphanumeric codepoints, along with `_`, `.`, `[` and -`]`. Note that replacement syntax has not changed. -* [FEATURE #810](https://github.com/rust-lang/regex/issues/810): -Add `Match::is_empty` and `Match::len` APIs. -* [FEATURE #905](https://github.com/rust-lang/regex/issues/905): -Add an `impl Default for RegexSet`, with the default being the empty set. -* [FEATURE #908](https://github.com/rust-lang/regex/issues/908): -A new method, `Regex::static_captures_len`, has been added which returns the -number of capture groups in the pattern if and only if every possible match -always contains the same number of matching groups. -* [FEATURE #955](https://github.com/rust-lang/regex/issues/955): -Named captures can now be written as `(?re)` in addition to -`(?Pre)`. -* FEATURE: `regex-syntax` now supports empty character classes. -* FEATURE: `regex-syntax` now has an optional `std` feature. (This will come -to `regex` in the second release.) -* FEATURE: The `Hir` type in `regex-syntax` has had a number of simplifications -made to it. -* FEATURE: `regex-syntax` has support for a new `R` flag for enabling CRLF -mode. This will be supported in `regex` proper in the second release. -* FEATURE: `regex-syntax` now has proper support for "regex that never -matches" via `Hir::fail()`. -* FEATURE: The `hir::literal` module of `regex-syntax` has been completely -re-worked. It now has more documentation, examples and advice. -* FEATURE: The `allow_invalid_utf8` option in `regex-syntax` has been renamed -to `utf8`, and the meaning of the boolean has been flipped. - -Performance improvements: - -* PERF: The upgrade to `aho-corasick 1.0` may improve performance in some -cases. It's difficult to characterize exactly which patterns this might impact, -but if there are a small number of longish (>= 4 bytes) prefix literals, then -it might be faster than before. - -Bug fixes: - -* [BUG #514](https://github.com/rust-lang/regex/issues/514): -Improve `Debug` impl for `Match` so that it doesn't show the entire haystack. -* BUGS [#516](https://github.com/rust-lang/regex/issues/516), -[#731](https://github.com/rust-lang/regex/issues/731): -Fix a number of issues with printing `Hir` values as regex patterns. -* [BUG #610](https://github.com/rust-lang/regex/issues/610): -Add explicit example of `foo|bar` in the regex syntax docs. -* [BUG #625](https://github.com/rust-lang/regex/issues/625): -Clarify that `SetMatches::len` does not (regrettably) refer to the number of -matches in the set. -* [BUG #660](https://github.com/rust-lang/regex/issues/660): -Clarify "verbose mode" in regex syntax documentation. -* BUG [#738](https://github.com/rust-lang/regex/issues/738), -[#950](https://github.com/rust-lang/regex/issues/950): -Fix `CaptureLocations::get` so that it never panics. -* [BUG #747](https://github.com/rust-lang/regex/issues/747): -Clarify documentation for `Regex::shortest_match`. -* [BUG #835](https://github.com/rust-lang/regex/issues/835): -Fix `\p{Sc}` so that it is equivalent to `\p{Currency_Symbol}`. -* [BUG #846](https://github.com/rust-lang/regex/issues/846): -Add more clarifying documentation to the `CompiledTooBig` error variant. -* [BUG #854](https://github.com/rust-lang/regex/issues/854): -Clarify that `regex::Regex` searches as if the haystack is a sequence of -Unicode scalar values. -* [BUG #884](https://github.com/rust-lang/regex/issues/884): -Replace `__Nonexhaustive` variants with `#[non_exhaustive]` attribute. -* [BUG #893](https://github.com/rust-lang/regex/pull/893): -Optimize case folding since it can get quite slow in some pathological cases. -* [BUG #895](https://github.com/rust-lang/regex/issues/895): -Reject `(?-u:\W)` in `regex::Regex` APIs. -* [BUG #942](https://github.com/rust-lang/regex/issues/942): -Add a missing `void` keyword to indicate "no parameters" in C API. -* [BUG #965](https://github.com/rust-lang/regex/issues/965): -Fix `\p{Lc}` so that it is equivalent to `\p{Cased_Letter}`. -* [BUG #975](https://github.com/rust-lang/regex/issues/975): -Clarify documentation for `\pX` syntax. - - -1.7.3 (2023-03-24) -================== -This is a small release that fixes a bug in `Regex::shortest_match_at` that -could cause it to panic, even when the offset given is valid. - -Bug fixes: - -* [BUG #969](https://github.com/rust-lang/regex/issues/969): - Fix a bug in how the reverse DFA was called for `Regex::shortest_match_at`. - - -1.7.2 (2023-03-21) -================== -This is a small release that fixes a failing test on FreeBSD. - -Bug fixes: - -* [BUG #967](https://github.com/rust-lang/regex/issues/967): - Fix "no stack overflow" test which can fail due to the small stack size. - - -1.7.1 (2023-01-09) -================== -This release was done principally to try and fix the doc.rs rendering for the -regex crate. - -Performance improvements: - -* [PERF #930](https://github.com/rust-lang/regex/pull/930): - Optimize `replacen`. This also applies to `replace`, but not `replace_all`. - -Bug fixes: - -* [BUG #945](https://github.com/rust-lang/regex/issues/945): - Maybe fix rustdoc rendering by just bumping a new release? - - -1.7.0 (2022-11-05) -================== -This release principally includes an upgrade to Unicode 15. - -New features: - -* [FEATURE #832](https://github.com/rust-lang/regex/issues/916): - Upgrade to Unicode 15. - - -1.6.0 (2022-07-05) -================== -This release principally includes an upgrade to Unicode 14. - -New features: - -* [FEATURE #832](https://github.com/rust-lang/regex/pull/832): - Clarify that `Captures::len` includes all groups, not just matching groups. -* [FEATURE #857](https://github.com/rust-lang/regex/pull/857): - Add an `ExactSizeIterator` impl for `SubCaptureMatches`. -* [FEATURE #861](https://github.com/rust-lang/regex/pull/861): - Improve `RegexSet` documentation examples. -* [FEATURE #877](https://github.com/rust-lang/regex/issues/877): - Upgrade to Unicode 14. - -Bug fixes: - -* [BUG #792](https://github.com/rust-lang/regex/issues/792): - Fix error message rendering bug. - - -1.5.6 (2022-05-20) -================== -This release includes a few bug fixes, including a bug that produced incorrect -matches when a non-greedy `?` operator was used. - -* [BUG #680](https://github.com/rust-lang/regex/issues/680): - Fixes a bug where `[[:alnum:][:^ascii:]]` dropped `[:alnum:]` from the class. -* [BUG #859](https://github.com/rust-lang/regex/issues/859): - Fixes a bug where `Hir::is_match_empty` returned `false` for `\b`. -* [BUG #862](https://github.com/rust-lang/regex/issues/862): - Fixes a bug where 'ab??' matches 'ab' instead of 'a' in 'ab'. - - -1.5.5 (2022-03-08) -================== -This releases fixes a security bug in the regex compiler. This bug permits a -vector for a denial-of-service attack in cases where the regex being compiled -is untrusted. There are no known problems where the regex is itself trusted, -including in cases of untrusted haystacks. - -* [SECURITY #GHSA-m5pq-gvj9-9vr8](https://github.com/rust-lang/regex/security/advisories/GHSA-m5pq-gvj9-9vr8): - Fixes a bug in the regex compiler where empty sub-expressions subverted the - existing mitigations in place to enforce a size limit on compiled regexes. - The Rust Security Response WG published an advisory about this: - https://groups.google.com/g/rustlang-security-announcements/c/NcNNL1Jq7Yw - - -1.5.4 (2021-05-06) -================== -This release fixes another compilation failure when building regex. This time, -the fix is for when the `pattern` feature is enabled, which only works on -nightly Rust. CI has been updated to test this case. - -* [BUG #772](https://github.com/rust-lang/regex/pull/772): - Fix build when `pattern` feature is enabled. - - -1.5.3 (2021-05-01) -================== -This releases fixes a bug when building regex with only the `unicode-perl` -feature. It turns out that while CI was building this configuration, it wasn't -actually failing the overall build on a failed compilation. - -* [BUG #769](https://github.com/rust-lang/regex/issues/769): - Fix build in `regex-syntax` when only the `unicode-perl` feature is enabled. - - -1.5.2 (2021-05-01) -================== -This release fixes a performance bug when Unicode word boundaries are used. -Namely, for certain regexes on certain inputs, it's possible for the lazy DFA -to stop searching (causing a fallback to a slower engine) when it doesn't -actually need to. - -[PR #768](https://github.com/rust-lang/regex/pull/768) fixes the bug, which was -originally reported in -[ripgrep#1860](https://github.com/BurntSushi/ripgrep/issues/1860). - - -1.5.1 (2021-04-30) -================== -This is a patch release that fixes a compilation error when the `perf-literal` -feature is not enabled. - - -1.5.0 (2021-04-30) -================== -This release primarily updates to Rust 2018 (finally) and bumps the MSRV to -Rust 1.41 (from Rust 1.28). Rust 1.41 was chosen because it's still reasonably -old, and is what's in Debian stable at the time of writing. - -This release also drops this crate's own bespoke substring search algorithms -in favor of a new -[`memmem` implementation provided by the `memchr` crate](https://docs.rs/memchr/2.4.0/memchr/memmem/index.html). -This will change the performance profile of some regexes, sometimes getting a -little worse, and hopefully more frequently, getting a lot better. Please -report any serious performance regressions if you find them. - - -1.4.6 (2021-04-22) -================== -This is a small patch release that fixes the compiler's size check on how much -heap memory a regex uses. Previously, the compiler did not account for the -heap usage of Unicode character classes. Now it does. It's possible that this -may make some regexes fail to compile that previously did compile. If that -happens, please file an issue. - -* [BUG OSS-fuzz#33579](https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=33579): - Some regexes can use more heap memory than one would expect. - - -1.4.5 (2021-03-14) -================== -This is a small patch release that fixes a regression in the size of a `Regex` -in the 1.4.4 release. Prior to 1.4.4, a `Regex` was 552 bytes. In the 1.4.4 -release, it was 856 bytes due to internal changes. In this release, a `Regex` -is now 16 bytes. In general, the size of a `Regex` was never something that was -on my radar, but this increased size in the 1.4.4 release seems to have crossed -a threshold and resulted in stack overflows in some programs. - -* [BUG #750](https://github.com/rust-lang/regex/pull/750): - Fixes stack overflows seemingly caused by a large `Regex` size by decreasing - its size. - - -1.4.4 (2021-03-11) -================== -This is a small patch release that contains some bug fixes. Notably, it also -drops the `thread_local` (and `lazy_static`, via transitivity) dependencies. - -Bug fixes: - -* [BUG #362](https://github.com/rust-lang/regex/pull/362): - Memory leaks caused by an internal caching strategy should now be fixed. -* [BUG #576](https://github.com/rust-lang/regex/pull/576): - All regex types now implement `UnwindSafe` and `RefUnwindSafe`. -* [BUG #728](https://github.com/rust-lang/regex/pull/749): - Add missing `Replacer` impls for `Vec`, `String`, `Cow`, etc. - - -1.4.3 (2021-01-08) -================== -This is a small patch release that adds some missing standard trait -implementations for some types in the public API. - -Bug fixes: - -* [BUG #734](https://github.com/rust-lang/regex/pull/734): - Add `FusedIterator` and `ExactSizeIterator` impls to iterator types. -* [BUG #735](https://github.com/rust-lang/regex/pull/735): - Add missing `Debug` impls to public API types. - - -1.4.2 (2020-11-01) -================== -This is a small bug fix release that bans `\P{any}`. We previously banned empty -classes like `[^\w\W]`, but missed the `\P{any}` case. In the future, we hope -to permit empty classes. - -* [BUG #722](https://github.com/rust-lang/regex/issues/722): - Ban `\P{any}` to avoid a panic in the regex compiler. Found by OSS-Fuzz. - - -1.4.1 (2020-10-13) -================== -This is a small bug fix release that makes `\p{cf}` work. Previously, it would -report "property not found" even though `cf` is a valid abbreviation for the -`Format` general category. - -* [BUG #719](https://github.com/rust-lang/regex/issues/719): - Fixes bug that prevented `\p{cf}` from working. - - -1.4.0 (2020-10-11) -================== -This releases has a few minor documentation fixes as well as some very minor -API additions. The MSRV remains at Rust 1.28 for now, but this is intended to -increase to at least Rust 1.41.1 soon. - -This release also adds support for OSS-Fuzz. Kudos to -[@DavidKorczynski](https://github.com/DavidKorczynski) -for doing the heavy lifting for that! - -New features: - -* [FEATURE #649](https://github.com/rust-lang/regex/issues/649): - Support `[`, `]` and `.` in capture group names. -* [FEATURE #687](https://github.com/rust-lang/regex/issues/687): - Add `is_empty` predicate to `RegexSet`. -* [FEATURE #689](https://github.com/rust-lang/regex/issues/689): - Implement `Clone` for `SubCaptureMatches`. -* [FEATURE #715](https://github.com/rust-lang/regex/issues/715): - Add `empty` constructor to `RegexSet` for convenience. - -Bug fixes: - -* [BUG #694](https://github.com/rust-lang/regex/issues/694): - Fix doc example for `Replacer::replace_append`. -* [BUG #698](https://github.com/rust-lang/regex/issues/698): - Clarify docs for `s` flag when using a `bytes::Regex`. -* [BUG #711](https://github.com/rust-lang/regex/issues/711): - Clarify `is_match` docs to indicate that it can match anywhere in string. - - -1.3.9 (2020-05-28) -================== -This release fixes a MSRV (Minimum Support Rust Version) regression in the -1.3.8 release. Namely, while 1.3.8 compiles on Rust 1.28, it actually does not -compile on other Rust versions, such as Rust 1.39. - -Bug fixes: - -* [BUG #685](https://github.com/rust-lang/regex/issues/685): - Remove use of `doc_comment` crate, which cannot be used before Rust 1.43. - - -1.3.8 (2020-05-28) -================== -This release contains a couple of important bug fixes driven -by better support for empty-subexpressions in regexes. For -example, regexes like `b|` are now allowed. Major thanks to -[@sliquister](https://github.com/sliquister) for implementing support for this -in [#677](https://github.com/rust-lang/regex/pull/677). - -Bug fixes: - -* [BUG #523](https://github.com/rust-lang/regex/pull/523): - Add note to documentation that spaces can be escaped in `x` mode. -* [BUG #524](https://github.com/rust-lang/regex/issues/524): - Add support for empty sub-expressions, including empty alternations. -* [BUG #659](https://github.com/rust-lang/regex/issues/659): - Fix match bug caused by an empty sub-expression miscompilation. - - -1.3.7 (2020-04-17) -================== -This release contains a small bug fix that fixes how `regex` forwards crate -features to `regex-syntax`. In particular, this will reduce recompilations in -some cases. - -Bug fixes: - -* [BUG #665](https://github.com/rust-lang/regex/pull/665): - Fix feature forwarding to `regex-syntax`. - - -1.3.6 (2020-03-24) -================== -This release contains a sizable (~30%) performance improvement when compiling -some kinds of large regular expressions. - -Performance improvements: - -* [PERF #657](https://github.com/rust-lang/regex/pull/657): - Improvement performance of compiling large regular expressions. - - -1.3.5 (2020-03-12) -================== -This release updates this crate to Unicode 13. - -New features: - -* [FEATURE #653](https://github.com/rust-lang/regex/pull/653): - Update `regex-syntax` to Unicode 13. - - -1.3.4 (2020-01-30) -================== -This is a small bug fix release that fixes a bug related to the scoping of -flags in a regex. Namely, before this fix, a regex like `((?i)a)b)` would -match `aB` despite the fact that `b` should not be matched case insensitively. - -Bug fixes: - -* [BUG #640](https://github.com/rust-lang/regex/issues/640): - Fix bug related to the scoping of flags in a regex. - - -1.3.3 (2020-01-09) -================== -This is a small maintenance release that upgrades the dependency on -`thread_local` from `0.3` to `1.0`. The minimum supported Rust version remains -at Rust 1.28. - - -1.3.2 (2020-01-09) -================== -This is a small maintenance release with some house cleaning and bug fixes. - -New features: - -* [FEATURE #631](https://github.com/rust-lang/regex/issues/631): - Add a `Match::range` method an a `From for Range` impl. - -Bug fixes: - -* [BUG #521](https://github.com/rust-lang/regex/issues/521): - Corrects `/-/.splitn("a", 2)` to return `["a"]` instead of `["a", ""]`. -* [BUG #594](https://github.com/rust-lang/regex/pull/594): - Improve error reporting when writing `\p\`. -* [BUG #627](https://github.com/rust-lang/regex/issues/627): - Corrects `/-/.split("a-")` to return `["a", ""]` instead of `["a"]`. -* [BUG #633](https://github.com/rust-lang/regex/pull/633): - Squash deprecation warnings for the `std::error::Error::description` method. - - -1.3.1 (2019-09-04) -================== -This is a maintenance release with no changes in order to try to work around -a [docs.rs/Cargo issue](https://github.com/rust-lang/docs.rs/issues/400). - - -1.3.0 (2019-09-03) -================== -This release adds a plethora of new crate features that permit users of regex -to shrink its size considerably, in exchange for giving up either functionality -(such as Unicode support) or runtime performance. When all such features are -disabled, the dependency tree for `regex` shrinks to exactly 1 crate -(`regex-syntax`). More information about the new crate features can be -[found in the docs](https://docs.rs/regex/*/#crate-features). - -Note that while this is a new minor version release, the minimum supported -Rust version for this crate remains at `1.28.0`. - -New features: - -* [FEATURE #474](https://github.com/rust-lang/regex/issues/474): - The `use_std` feature has been deprecated in favor of the `std` feature. - The `use_std` feature will be removed in regex 2. Until then, `use_std` will - remain as an alias for the `std` feature. -* [FEATURE #583](https://github.com/rust-lang/regex/issues/583): - Add a substantial number of crate features shrinking `regex`. - - -1.2.1 (2019-08-03) -================== -This release does a bit of house cleaning. Namely: - -* This repository is now using rustfmt. -* License headers have been removed from all files, in following suit with the - Rust project. -* Teddy has been removed from the `regex` crate, and is now part of the - `aho-corasick` crate. - [See `aho-corasick`'s new `packed` submodule for details](https://docs.rs/aho-corasick/0.7.6/aho_corasick/packed/index.html). -* The `utf8-ranges` crate has been deprecated, with its functionality moving - into the - [`utf8` sub-module of `regex-syntax`](https://docs.rs/regex-syntax/0.6.11/regex_syntax/utf8/index.html). -* The `ucd-util` dependency has been dropped, in favor of implementing what - little we need inside of `regex-syntax` itself. - -In general, this is part of an ongoing (long term) effort to make optimizations -in the regex engine easier to reason about. The current code is too convoluted, -and thus it is very easy to introduce new bugs. This simplification effort is -the primary motivation behind re-working the `aho-corasick` crate to not only -bundle algorithms like Teddy, but to also provide regex-like match semantics -automatically. - -Moving forward, the plan is to join up with the `bstr` and `regex-automata` -crates, with the former providing more sophisticated substring search -algorithms (thereby deleting existing code in `regex`) and the latter providing -ahead-of-time compiled DFAs for cases where they are inexpensive to compute. - - -1.2.0 (2019-07-20) -================== -This release updates regex's minimum supported Rust version to 1.28, which was -release almost 1 year ago. This release also updates regex's Unicode data -tables to 12.1.0. - - -1.1.9 (2019-07-06) -================== -This release contains a bug fix that caused regex's tests to fail, due to a -dependency on an unreleased behavior in regex-syntax. - -* [BUG #593](https://github.com/rust-lang/regex/issues/593): - Move an integration-style test on error messages into regex-syntax. - - -1.1.8 (2019-07-04) -================== -This release contains a few small internal refactorings. One of which fixes -an instance of undefined behavior in a part of the SIMD code. - -Bug fixes: - -* [BUG #545](https://github.com/rust-lang/regex/issues/545): - Improves error messages when a repetition operator is used without a number. -* [BUG #588](https://github.com/rust-lang/regex/issues/588): - Removes use of a repr(Rust) union used for type punning in the Teddy matcher. -* [BUG #591](https://github.com/rust-lang/regex/issues/591): - Update docs for running benchmarks and improve failure modes. - - -1.1.7 (2019-06-09) -================== -This release fixes up a few warnings as a result of recent deprecations. - - -1.1.6 (2019-04-16) -================== -This release fixes a regression introduced by a bug fix (for -[BUG #557](https://github.com/rust-lang/regex/issues/557)) which could cause -the regex engine to enter an infinite loop. This bug was originally -[reported against ripgrep](https://github.com/BurntSushi/ripgrep/issues/1247). - - -1.1.5 (2019-04-01) -================== -This release fixes a bug in regex's dependency specification where it requires -a newer version of regex-syntax, but this wasn't communicated correctly in the -Cargo.toml. This would have been caught by a minimal version check, but this -check was disabled because the `rand` crate itself advertises incorrect -dependency specifications. - -Bug fixes: - -* [BUG #570](https://github.com/rust-lang/regex/pull/570): - Fix regex-syntax minimal version. - - -1.1.4 (2019-03-31) -================== -This release fixes a backwards compatibility regression where Regex was no -longer UnwindSafe. This was caused by the upgrade to aho-corasick 0.7, whose -AhoCorasick type was itself not UnwindSafe. This has been fixed in aho-corasick -0.7.4, which we now require. - -Bug fixes: - -* [BUG #568](https://github.com/rust-lang/regex/pull/568): - Fix an API regression where Regex was no longer UnwindSafe. - - -1.1.3 (2019-03-30) -================== -This releases fixes a few bugs and adds a performance improvement when a regex -is a simple alternation of literals. - -Performance improvements: - -* [OPT #566](https://github.com/rust-lang/regex/pull/566): - Upgrades `aho-corasick` to 0.7 and uses it for `foo|bar|...|quux` regexes. - -Bug fixes: - -* [BUG #527](https://github.com/rust-lang/regex/issues/527): - Fix a bug where the parser would panic on patterns like `((?x))`. -* [BUG #555](https://github.com/rust-lang/regex/issues/555): - Fix a bug where the parser would panic on patterns like `(?m){1,1}`. -* [BUG #557](https://github.com/rust-lang/regex/issues/557): - Fix a bug where captures could lead to an incorrect match. - - -1.1.2 (2019-02-27) -================== -This release fixes a bug found in the fix introduced in 1.1.1. - -Bug fixes: - -* [BUG edf45e6f](https://github.com/rust-lang/regex/commit/edf45e6f): - Fix bug introduced in reverse suffix literal matcher in the 1.1.1 release. - - -1.1.1 (2019-02-27) -================== -This is a small release with one fix for a bug caused by literal optimizations. - -Bug fixes: - -* [BUG 661bf53d](https://github.com/rust-lang/regex/commit/661bf53d): - Fixes a bug in the reverse suffix literal optimization. This was originally - reported - [against ripgrep](https://github.com/BurntSushi/ripgrep/issues/1203). - - -1.1.0 (2018-11-30) -================== -This is a small release with a couple small enhancements. This release also -increases the minimal supported Rust version (MSRV) to 1.24.1 (from 1.20.0). In -accordance with this crate's MSRV policy, this release bumps the minor version -number. - -Performance improvements: - -* [OPT #511](https://github.com/rust-lang/regex/pull/511), - [OPT #540](https://github.com/rust-lang/regex/pull/540): - Improve lazy DFA construction for large regex sets. - -New features: - -* [FEATURE #538](https://github.com/rust-lang/regex/pull/538): - Add Emoji and "break" Unicode properties. See [UNICODE.md](UNICODE.md). - -Bug fixes: - -* [BUG #530](https://github.com/rust-lang/regex/pull/530): - Add Unicode license (for data tables). -* Various typo/doc fixups. - - -1.0.6 (2018-11-06) -================== -This is a small release. - -Performance improvements: - -* [OPT #513](https://github.com/rust-lang/regex/pull/513): - Improve performance of compiling large Unicode classes by 8-10%. - -Bug fixes: - -* [BUG #533](https://github.com/rust-lang/regex/issues/533): - Fix definition of `[[:blank:]]` class that regressed in `regex-syntax 0.5`. - - -1.0.5 (2018-09-06) -================== -This is a small release with an API enhancement. - -New features: - -* [FEATURE #509](https://github.com/rust-lang/regex/pull/509): - Generalize impls of the `Replacer` trait. - - -1.0.4 (2018-08-25) -================== -This is a small release that bumps the quickcheck dependency. - - -1.0.3 (2018-08-24) -================== -This is a small bug fix release. - -Bug fixes: - -* [BUG #504](https://github.com/rust-lang/regex/pull/504): - Fix for Cargo's "minimal version" support. -* [BUG 1e39165f](https://github.com/rust-lang/regex/commit/1e39165f): - Fix doc examples for byte regexes. - - -1.0.2 (2018-07-18) -================== -This release exposes some new lower level APIs on `Regex` that permit -amortizing allocation and controlling the location at which a search is -performed in a more granular way. Most users of the regex crate will not -need or want to use these APIs. - -New features: - -* [FEATURE #493](https://github.com/rust-lang/regex/pull/493): - Add a few lower level APIs for amortizing allocation and more fine-grained - searching. - -Bug fixes: - -* [BUG 3981d2ad](https://github.com/rust-lang/regex/commit/3981d2ad): - Correct outdated documentation on `RegexBuilder::dot_matches_new_line`. -* [BUG 7ebe4ae0](https://github.com/rust-lang/regex/commit/7ebe4ae0): - Correct outdated documentation on `Parser::allow_invalid_utf8` in the - `regex-syntax` crate. -* [BUG 24c7770b](https://github.com/rust-lang/regex/commit/24c7770b): - Fix a bug in the HIR printer where it wouldn't correctly escape meta - characters in character classes. - - -1.0.1 (2018-06-19) -================== -This release upgrades regex's Unicode tables to Unicode 11, and enables SIMD -optimizations automatically on Rust stable (1.27 or newer). - -New features: - -* [FEATURE #486](https://github.com/rust-lang/regex/pull/486): - Implement `size_hint` on `RegexSet` match iterators. -* [FEATURE #488](https://github.com/rust-lang/regex/pull/488): - Update Unicode tables for Unicode 11. -* [FEATURE #490](https://github.com/rust-lang/regex/pull/490): - SIMD optimizations are now enabled automatically in Rust stable, for versions - 1.27 and up. No compilation flags or features need to be set. CPU support - SIMD is detected automatically at runtime. - -Bug fixes: - -* [BUG #482](https://github.com/rust-lang/regex/pull/482): - Present a better compilation error when the `use_std` feature isn't used. - - -1.0.0 (2018-05-01) -================== -This release marks the 1.0 release of regex. - -While this release includes some breaking changes, most users of older versions -of the regex library should be able to migrate to 1.0 by simply bumping the -version number. The important changes are as follows: - -* We adopt Rust 1.20 as the new minimum supported version of Rust for regex. - We also tentatively adopt a policy that permits bumping the minimum supported - version of Rust in minor version releases of regex, but no patch releases. - That is, with respect to semver, we do not strictly consider bumping the - minimum version of Rust to be a breaking change, but adopt a conservative - stance as a compromise. -* Octal syntax in regular expressions has been disabled by default. This - permits better error messages that inform users that backreferences aren't - available. Octal syntax can be re-enabled via the corresponding option on - `RegexBuilder`. -* `(?-u:\B)` is no longer allowed in Unicode regexes since it can match at - invalid UTF-8 code unit boundaries. `(?-u:\b)` is still allowed in Unicode - regexes. -* The `From` impl has been removed. This formally removes - the public dependency on `regex-syntax`. -* A new feature, `use_std`, has been added and enabled by default. Disabling - the feature will result in a compilation error. In the future, this may - permit us to support `no_std` environments (w/ `alloc`) in a backwards - compatible way. - -For more information and discussion, please see -[1.0 release tracking issue](https://github.com/rust-lang/regex/issues/457). - - -0.2.11 (2018-05-01) -=================== -This release primarily contains bug fixes. Some of them resolve bugs where -the parser could panic. - -New features: - -* [FEATURE #459](https://github.com/rust-lang/regex/pull/459): - Include C++'s standard regex library and Boost's regex library in the - benchmark harness. We now include D/libphobos, C++/std, C++/boost, Oniguruma, - PCRE1, PCRE2, RE2 and Tcl in the harness. - -Bug fixes: - -* [BUG #445](https://github.com/rust-lang/regex/issues/445): - Clarify order of indices returned by RegexSet match iterator. -* [BUG #461](https://github.com/rust-lang/regex/issues/461): - Improve error messages for invalid regexes like `[\d-a]`. -* [BUG #464](https://github.com/rust-lang/regex/issues/464): - Fix a bug in the error message pretty printer that could cause a panic when - a regex contained a literal `\n` character. -* [BUG #465](https://github.com/rust-lang/regex/issues/465): - Fix a panic in the parser that was caused by applying a repetition operator - to `(?flags)`. -* [BUG #466](https://github.com/rust-lang/regex/issues/466): - Fix a bug where `\pC` was not recognized as an alias for `\p{Other}`. -* [BUG #470](https://github.com/rust-lang/regex/pull/470): - Fix a bug where literal searches did more work than necessary for anchored - regexes. - - -0.2.10 (2018-03-16) -=================== -This release primarily updates the regex crate to changes made in `std::arch` -on nightly Rust. - -New features: - -* [FEATURE #458](https://github.com/rust-lang/regex/pull/458): - The `Hir` type in `regex-syntax` now has a printer. - - -0.2.9 (2018-03-12) -================== -This release introduces a new nightly only feature, `unstable`, which enables -SIMD optimizations for certain types of regexes. No additional compile time -options are necessary, and the regex crate will automatically choose the -best CPU features at run time. As a result, the `simd` (nightly only) crate -dependency has been dropped. - -New features: - -* [FEATURE #456](https://github.com/rust-lang/regex/pull/456): - The regex crate now includes AVX2 optimizations in addition to the extant - SSSE3 optimization. - -Bug fixes: - -* [BUG #455](https://github.com/rust-lang/regex/pull/455): - Fix a bug where `(?x)[ / - ]` failed to parse. - - -0.2.8 (2018-03-12) -================== -Bug fixes: - -* [BUG #454](https://github.com/rust-lang/regex/pull/454): - Fix a bug in the nest limit checker being too aggressive. - - -0.2.7 (2018-03-07) -================== -This release includes a ground-up rewrite of the regex-syntax crate, which has -been in development for over a year. -731 -New features: - -* Error messages for invalid regexes have been greatly improved. You get these - automatically; you don't need to do anything. In addition to better - formatting, error messages will now explicitly call out the use of look - around. When regex 1.0 is released, this will happen for backreferences as - well. -* Full support for intersection, difference and symmetric difference of - character classes. These can be used via the `&&`, `--` and `~~` binary - operators within classes. -* A Unicode Level 1 conformant implementation of `\p{..}` character classes. - Things like `\p{scx:Hira}`, `\p{age:3.2}` or `\p{Changes_When_Casefolded}` - now work. All property name and value aliases are supported, and properties - are selected via loose matching. e.g., `\p{Greek}` is the same as - `\p{G r E e K}`. -* A new `UNICODE.md` document has been added to this repository that - exhaustively documents support for UTS#18. -* Empty sub-expressions are now permitted in most places. That is, `()+` is - now a valid regex. -* Almost everything in regex-syntax now uses constant stack space, even when - performing analysis that requires structural induction. This reduces the risk - of a user provided regular expression causing a stack overflow. -* [FEATURE #174](https://github.com/rust-lang/regex/issues/174): - The `Ast` type in `regex-syntax` now contains span information. -* [FEATURE #424](https://github.com/rust-lang/regex/issues/424): - Support `\u`, `\u{...}`, `\U` and `\U{...}` syntax for specifying code points - in a regular expression. -* [FEATURE #449](https://github.com/rust-lang/regex/pull/449): - Add a `Replace::by_ref` adapter for use of a replacer without consuming it. - -Bug fixes: - -* [BUG #446](https://github.com/rust-lang/regex/issues/446): - We re-enable the Boyer-Moore literal matcher. - - -0.2.6 (2018-02-08) -================== -Bug fixes: - -* [BUG #446](https://github.com/rust-lang/regex/issues/446): - Fixes a bug in the new Boyer-Moore searcher that results in a match failure. - We fix this bug by temporarily disabling Boyer-Moore. - - -0.2.5 (2017-12-30) -================== -Bug fixes: - -* [BUG #437](https://github.com/rust-lang/regex/issues/437): - Fixes a bug in the new Boyer-Moore searcher that results in a panic. - - -0.2.4 (2017-12-30) -================== -New features: - -* [FEATURE #348](https://github.com/rust-lang/regex/pull/348): - Improve performance for capture searches on anchored regex. - (Contributed by @ethanpailes. Nice work!) -* [FEATURE #419](https://github.com/rust-lang/regex/pull/419): - Expand literal searching to include Tuned Boyer-Moore in some cases. - (Contributed by @ethanpailes. Nice work!) - -Bug fixes: - -* [BUG](https://github.com/rust-lang/regex/pull/436): - The regex compiler plugin has been removed. -* [BUG](https://github.com/rust-lang/regex/pull/436): - `simd` has been bumped to `0.2.1`, which fixes a Rust nightly build error. -* [BUG](https://github.com/rust-lang/regex/pull/436): - Bring the benchmark harness up to date. - - -0.2.3 (2017-11-30) -================== -New features: - -* [FEATURE #374](https://github.com/rust-lang/regex/pull/374): - Add `impl From for &str`. -* [FEATURE #380](https://github.com/rust-lang/regex/pull/380): - Derive `Clone` and `PartialEq` on `Error`. -* [FEATURE #400](https://github.com/rust-lang/regex/pull/400): - Update to Unicode 10. - -Bug fixes: - -* [BUG #375](https://github.com/rust-lang/regex/issues/375): - Fix a bug that prevented the bounded backtracker from terminating. -* [BUG #393](https://github.com/rust-lang/regex/issues/393), - [BUG #394](https://github.com/rust-lang/regex/issues/394): - Fix bug with `replace` methods for empty matches. - - -0.2.2 (2017-05-21) -================== -New features: - -* [FEATURE #341](https://github.com/rust-lang/regex/issues/341): - Support nested character classes and intersection operation. - For example, `[\p{Greek}&&\pL]` matches greek letters and - `[[0-9]&&[^4]]` matches every decimal digit except `4`. - (Much thanks to @robinst, who contributed this awesome feature.) - -Bug fixes: - -* [BUG #321](https://github.com/rust-lang/regex/issues/321): - Fix bug in literal extraction and UTF-8 decoding. -* [BUG #326](https://github.com/rust-lang/regex/issues/326): - Add documentation tip about the `(?x)` flag. -* [BUG #333](https://github.com/rust-lang/regex/issues/333): - Show additional replacement example using curly braces. -* [BUG #334](https://github.com/rust-lang/regex/issues/334): - Fix bug when resolving captures after a match. -* [BUG #338](https://github.com/rust-lang/regex/issues/338): - Add example that uses `Captures::get` to API documentation. -* [BUG #353](https://github.com/rust-lang/regex/issues/353): - Fix RegexSet bug that caused match failure in some cases. -* [BUG #354](https://github.com/rust-lang/regex/pull/354): - Fix panic in parser when `(?x)` is used. -* [BUG #358](https://github.com/rust-lang/regex/issues/358): - Fix literal optimization bug with RegexSet. -* [BUG #359](https://github.com/rust-lang/regex/issues/359): - Fix example code in README. -* [BUG #365](https://github.com/rust-lang/regex/pull/365): - Fix bug in `rure_captures_len` in the C binding. -* [BUG #367](https://github.com/rust-lang/regex/issues/367): - Fix byte class bug that caused a panic. - - -0.2.1 -===== -One major bug with `replace_all` has been fixed along with a couple of other -touch-ups. - -* [BUG #312](https://github.com/rust-lang/regex/issues/312): - Fix documentation for `NoExpand` to reference correct lifetime parameter. -* [BUG #314](https://github.com/rust-lang/regex/issues/314): - Fix a bug with `replace_all` when replacing a match with the empty string. -* [BUG #316](https://github.com/rust-lang/regex/issues/316): - Note a missing breaking change from the `0.2.0` CHANGELOG entry. - (`RegexBuilder::compile` was renamed to `RegexBuilder::build`.) -* [BUG #324](https://github.com/rust-lang/regex/issues/324): - Compiling `regex` should only require one version of `memchr` crate. - - -0.2.0 -===== -This is a new major release of the regex crate, and is an implementation of the -[regex 1.0 RFC](https://github.com/rust-lang/rfcs/blob/master/text/1620-regex-1.0.md). -We are releasing a `0.2` first, and if there are no major problems, we will -release a `1.0` shortly. For `0.2`, the minimum *supported* Rust version is -1.12. - -There are a number of **breaking changes** in `0.2`. They are split into two -types. The first type correspond to breaking changes in regular expression -syntax. The second type correspond to breaking changes in the API. - -Breaking changes for regex syntax: - -* POSIX character classes now require double bracketing. Previously, the regex - `[:upper:]` would parse as the `upper` POSIX character class. Now it parses - as the character class containing the characters `:upper:`. The fix to this - change is to use `[[:upper:]]` instead. Note that variants like - `[[:upper:][:blank:]]` continue to work. -* The character `[` must always be escaped inside a character class. -* The characters `&`, `-` and `~` must be escaped if any one of them are - repeated consecutively. For example, `[&]`, `[\&]`, `[\&\&]`, `[&-&]` are all - equivalent while `[&&]` is illegal. (The motivation for this and the prior - change is to provide a backwards compatible path for adding character class - set notation.) -* A `bytes::Regex` now has Unicode mode enabled by default (like the main - `Regex` type). This means regexes compiled with `bytes::Regex::new` that - don't have the Unicode flag set should add `(?-u)` to recover the original - behavior. - -Breaking changes for the regex API: - -* `find` and `find_iter` now **return `Match` values instead of - `(usize, usize)`.** `Match` values have `start` and `end` methods, which - return the match offsets. `Match` values also have an `as_str` method, - which returns the text of the match itself. -* The `Captures` type now only provides a single iterator over all capturing - matches, which should replace uses of `iter` and `iter_pos`. Uses of - `iter_named` should use the `capture_names` method on `Regex`. -* The `at` method on the `Captures` type has been renamed to `get`, and it - now returns a `Match`. Similarly, the `name` method on `Captures` now returns - a `Match`. -* The `replace` methods now return `Cow` values. The `Cow::Borrowed` variant - is returned when no replacements are made. -* The `Replacer` trait has been completely overhauled. This should only - impact clients that implement this trait explicitly. Standard uses of - the `replace` methods should continue to work unchanged. If you implement - the `Replacer` trait, please consult the new documentation. -* The `quote` free function has been renamed to `escape`. -* The `Regex::with_size_limit` method has been removed. It is replaced by - `RegexBuilder::size_limit`. -* The `RegexBuilder` type has switched from owned `self` method receivers to - `&mut self` method receivers. Most uses will continue to work unchanged, but - some code may require naming an intermediate variable to hold the builder. -* The `compile` method on `RegexBuilder` has been renamed to `build`. -* The free `is_match` function has been removed. It is replaced by compiling - a `Regex` and calling its `is_match` method. -* The `PartialEq` and `Eq` impls on `Regex` have been dropped. If you relied - on these impls, the fix is to define a wrapper type around `Regex`, impl - `Deref` on it and provide the necessary impls. -* The `is_empty` method on `Captures` has been removed. This always returns - `false`, so its use is superfluous. -* The `Syntax` variant of the `Error` type now contains a string instead of - a `regex_syntax::Error`. If you were examining syntax errors more closely, - you'll need to explicitly use the `regex_syntax` crate to re-parse the regex. -* The `InvalidSet` variant of the `Error` type has been removed since it is - no longer used. -* Most of the iterator types have been renamed to match conventions. If you - were using these iterator types explicitly, please consult the documentation - for its new name. For example, `RegexSplits` has been renamed to `Split`. - -A number of bugs have been fixed: - -* [BUG #151](https://github.com/rust-lang/regex/issues/151): - The `Replacer` trait has been changed to permit the caller to control - allocation. -* [BUG #165](https://github.com/rust-lang/regex/issues/165): - Remove the free `is_match` function. -* [BUG #166](https://github.com/rust-lang/regex/issues/166): - Expose more knobs (available in `0.1`) and remove `with_size_limit`. -* [BUG #168](https://github.com/rust-lang/regex/issues/168): - Iterators produced by `Captures` now have the correct lifetime parameters. -* [BUG #175](https://github.com/rust-lang/regex/issues/175): - Fix a corner case in the parsing of POSIX character classes. -* [BUG #178](https://github.com/rust-lang/regex/issues/178): - Drop the `PartialEq` and `Eq` impls on `Regex`. -* [BUG #179](https://github.com/rust-lang/regex/issues/179): - Remove `is_empty` from `Captures` since it always returns false. -* [BUG #276](https://github.com/rust-lang/regex/issues/276): - Position of named capture can now be retrieved from a `Captures`. -* [BUG #296](https://github.com/rust-lang/regex/issues/296): - Remove winapi/kernel32-sys dependency on UNIX. -* [BUG #307](https://github.com/rust-lang/regex/issues/307): - Fix error on emscripten. - - -0.1.80 -====== -* [PR #292](https://github.com/rust-lang/regex/pull/292): - Fixes bug #291, which was introduced by PR #290. - -0.1.79 -====== -* Require regex-syntax 0.3.8. - -0.1.78 -====== -* [PR #290](https://github.com/rust-lang/regex/pull/290): - Fixes bug #289, which caused some regexes with a certain combination - of literals to match incorrectly. - -0.1.77 -====== -* [PR #281](https://github.com/rust-lang/regex/pull/281): - Fixes bug #280 by disabling all literal optimizations when a pattern - is partially anchored. - -0.1.76 -====== -* Tweak criteria for using the Teddy literal matcher. - -0.1.75 -====== -* [PR #275](https://github.com/rust-lang/regex/pull/275): - Improves match verification performance in the Teddy SIMD searcher. -* [PR #278](https://github.com/rust-lang/regex/pull/278): - Replaces slow substring loop in the Teddy SIMD searcher with Aho-Corasick. -* Implemented DoubleEndedIterator on regex set match iterators. - -0.1.74 -====== -* Release regex-syntax 0.3.5 with a minor bug fix. -* Fix bug #272. -* Fix bug #277. -* [PR #270](https://github.com/rust-lang/regex/pull/270): - Fixes bugs #264, #268 and an unreported where the DFA cache size could be - drastically underestimated in some cases (leading to high unexpected memory - usage). - -0.1.73 -====== -* Release `regex-syntax 0.3.4`. -* Bump `regex-syntax` dependency version for `regex` to `0.3.4`. - -0.1.72 -====== -* [PR #262](https://github.com/rust-lang/regex/pull/262): - Fixes a number of small bugs caught by fuzz testing (AFL). - -0.1.71 -====== -* [PR #236](https://github.com/rust-lang/regex/pull/236): - Fix a bug in how suffix literals were extracted, which could lead - to invalid match behavior in some cases. - -0.1.70 -====== -* [PR #231](https://github.com/rust-lang/regex/pull/231): - Add SIMD accelerated multiple pattern search. -* [PR #228](https://github.com/rust-lang/regex/pull/228): - Reintroduce the reverse suffix literal optimization. -* [PR #226](https://github.com/rust-lang/regex/pull/226): - Implements NFA state compression in the lazy DFA. -* [PR #223](https://github.com/rust-lang/regex/pull/223): - A fully anchored RegexSet can now short-circuit. - -0.1.69 -====== -* [PR #216](https://github.com/rust-lang/regex/pull/216): - Tweak the threshold for running backtracking. -* [PR #217](https://github.com/rust-lang/regex/pull/217): - Add upper limit (from the DFA) to capture search (for the NFA). -* [PR #218](https://github.com/rust-lang/regex/pull/218): - Add rure, a C API. - -0.1.68 -====== -* [PR #210](https://github.com/rust-lang/regex/pull/210): - Fixed a performance bug in `bytes::Regex::replace` where `extend` was used - instead of `extend_from_slice`. -* [PR #211](https://github.com/rust-lang/regex/pull/211): - Fixed a bug in the handling of word boundaries in the DFA. -* [PR #213](https://github.com/rust-lang/pull/213): - Added RE2 and Tcl to the benchmark harness. Also added a CLI utility from - running regexes using any of the following regex engines: PCRE1, PCRE2, - Oniguruma, RE2, Tcl and of course Rust's own regexes. - -0.1.67 -====== -* [PR #201](https://github.com/rust-lang/regex/pull/201): - Fix undefined behavior in the `regex!` compiler plugin macro. -* [PR #205](https://github.com/rust-lang/regex/pull/205): - More improvements to DFA performance. Competitive with RE2. See PR for - benchmarks. -* [PR #209](https://github.com/rust-lang/regex/pull/209): - Release 0.1.66 was semver incompatible since it required a newer version - of Rust than previous releases. This PR fixes that. (And `0.1.66` was - yanked.) - -0.1.66 -====== -* Speculative support for Unicode word boundaries was added to the DFA. This - should remove the last common case that disqualified use of the DFA. -* An optimization that scanned for suffix literals and then matched the regular - expression in reverse was removed because it had worst case quadratic time - complexity. It was replaced with a more limited optimization where, given any - regex of the form `re$`, it will be matched in reverse from the end of the - haystack. -* [PR #202](https://github.com/rust-lang/regex/pull/202): - The inner loop of the DFA was heavily optimized to improve cache locality - and reduce the overall number of instructions run on each iteration. This - represents the first use of `unsafe` in `regex` (to elide bounds checks). -* [PR #200](https://github.com/rust-lang/regex/pull/200): - Use of the `mempool` crate (which used thread local storage) was replaced - with a faster version of a similar API in @Amanieu's `thread_local` crate. - It should reduce contention when using a regex from multiple threads - simultaneously. -* PCRE2 JIT benchmarks were added. A benchmark comparison can be found - [here](https://gist.github.com/anonymous/14683c01993e91689f7206a18675901b). - (Includes a comparison with PCRE1's JIT and Oniguruma.) -* A bug where word boundaries weren't being matched correctly in the DFA was - fixed. This only affected use of `bytes::Regex`. -* [#160](https://github.com/rust-lang/regex/issues/160): - `Captures` now has a `Debug` impl. diff --git a/vendor/regex/Cargo.lock b/vendor/regex/Cargo.lock deleted file mode 100644 index 5e119bb19d0d98..00000000000000 --- a/vendor/regex/Cargo.lock +++ /dev/null @@ -1,383 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "aho-corasick" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" -dependencies = [ - "log", - "memchr", -] - -[[package]] -name = "anyhow" -version = "1.0.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - -[[package]] -name = "bstr" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" -dependencies = [ - "memchr", - "serde", -] - -[[package]] -name = "cfg-if" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" - -[[package]] -name = "doc-comment" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" - -[[package]] -name = "env_logger" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" -dependencies = [ - "atty", - "humantime", - "log", - "termcolor", -] - -[[package]] -name = "equivalent" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" - -[[package]] -name = "getrandom" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "hashbrown" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "humantime" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" - -[[package]] -name = "indexmap" -version = "2.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" -dependencies = [ - "equivalent", - "hashbrown", -] - -[[package]] -name = "libc" -version = "0.2.177" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" - -[[package]] -name = "log" -version = "0.4.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" - -[[package]] -name = "memchr" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" -dependencies = [ - "log", -] - -[[package]] -name = "proc-macro2" -version = "1.0.101" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quickcheck" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" -dependencies = [ - "rand", -] - -[[package]] -name = "quote" -version = "1.0.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "regex" -version = "1.12.2" -dependencies = [ - "aho-corasick", - "anyhow", - "doc-comment", - "env_logger", - "memchr", - "quickcheck", - "regex-automata", - "regex-syntax", - "regex-test", -] - -[[package]] -name = "regex-automata" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" -dependencies = [ - "aho-corasick", - "log", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" - -[[package]] -name = "regex-test" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da40f0939bc4c598b4326abdbb363a8987aa43d0526e5624aefcf3ed90344e62" -dependencies = [ - "anyhow", - "bstr", - "serde", - "toml", -] - -[[package]] -name = "serde" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" -dependencies = [ - "serde_core", - "serde_derive", -] - -[[package]] -name = "serde_core" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_spanned" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" -dependencies = [ - "serde", -] - -[[package]] -name = "syn" -version = "2.0.106" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "termcolor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "toml" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit", -] - -[[package]] -name = "toml_datetime" -version = "0.6.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_edit" -version = "0.22.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" -dependencies = [ - "indexmap", - "serde", - "serde_spanned", - "toml_datetime", - "winnow", -] - -[[package]] -name = "unicode-ident" -version = "1.0.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" - -[[package]] -name = "wasi" -version = "0.11.1+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" -dependencies = [ - "windows-sys", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-link" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" - -[[package]] -name = "windows-sys" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" -dependencies = [ - "windows-link", -] - -[[package]] -name = "winnow" -version = "0.7.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" -dependencies = [ - "memchr", -] diff --git a/vendor/regex/Cargo.toml b/vendor/regex/Cargo.toml deleted file mode 100644 index 31fd135a8fd905..00000000000000 --- a/vendor/regex/Cargo.toml +++ /dev/null @@ -1,207 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.65" -name = "regex" -version = "1.12.2" -authors = [ - "The Rust Project Developers", - "Andrew Gallant ", -] -build = false -exclude = [ - "/fuzz/*", - "/record/*", - "/scripts/*", - "tests/fuzz/*", - "/.github/*", -] -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = """ -An implementation of regular expressions for Rust. This implementation uses -finite automata and guarantees linear time matching on all inputs. -""" -homepage = "https://github.com/rust-lang/regex" -documentation = "https://docs.rs/regex" -readme = "README.md" -categories = ["text-processing"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/regex" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ - "--cfg", - "docsrs_regex", -] - -[features] -default = [ - "std", - "perf", - "unicode", - "regex-syntax/default", -] -logging = [ - "aho-corasick?/logging", - "memchr?/logging", - "regex-automata/logging", -] -pattern = [] -perf = [ - "perf-cache", - "perf-dfa", - "perf-onepass", - "perf-backtrack", - "perf-inline", - "perf-literal", -] -perf-backtrack = ["regex-automata/nfa-backtrack"] -perf-cache = [] -perf-dfa = ["regex-automata/hybrid"] -perf-dfa-full = [ - "regex-automata/dfa-build", - "regex-automata/dfa-search", -] -perf-inline = ["regex-automata/perf-inline"] -perf-literal = [ - "dep:aho-corasick", - "dep:memchr", - "regex-automata/perf-literal", -] -perf-onepass = ["regex-automata/dfa-onepass"] -std = [ - "aho-corasick?/std", - "memchr?/std", - "regex-automata/std", - "regex-syntax/std", -] -unicode = [ - "unicode-age", - "unicode-bool", - "unicode-case", - "unicode-gencat", - "unicode-perl", - "unicode-script", - "unicode-segment", - "regex-automata/unicode", - "regex-syntax/unicode", -] -unicode-age = [ - "regex-automata/unicode-age", - "regex-syntax/unicode-age", -] -unicode-bool = [ - "regex-automata/unicode-bool", - "regex-syntax/unicode-bool", -] -unicode-case = [ - "regex-automata/unicode-case", - "regex-syntax/unicode-case", -] -unicode-gencat = [ - "regex-automata/unicode-gencat", - "regex-syntax/unicode-gencat", -] -unicode-perl = [ - "regex-automata/unicode-perl", - "regex-automata/unicode-word-boundary", - "regex-syntax/unicode-perl", -] -unicode-script = [ - "regex-automata/unicode-script", - "regex-syntax/unicode-script", -] -unicode-segment = [ - "regex-automata/unicode-segment", - "regex-syntax/unicode-segment", -] -unstable = ["pattern"] -use_std = ["std"] - -[lib] -name = "regex" -path = "src/lib.rs" - -[[test]] -name = "integration" -path = "tests/lib.rs" - -[dependencies.aho-corasick] -version = "1.0.0" -optional = true -default-features = false - -[dependencies.memchr] -version = "2.6.0" -optional = true -default-features = false - -[dependencies.regex-automata] -version = "0.4.12" -features = [ - "alloc", - "syntax", - "meta", - "nfa-pikevm", -] -default-features = false - -[dependencies.regex-syntax] -version = "0.8.5" -default-features = false - -[dev-dependencies.anyhow] -version = "1.0.69" - -[dev-dependencies.doc-comment] -version = "0.3" - -[dev-dependencies.env_logger] -version = "0.9.3" -features = [ - "atty", - "humantime", - "termcolor", -] -default-features = false - -[dev-dependencies.quickcheck] -version = "1.0.3" -default-features = false - -[dev-dependencies.regex-test] -version = "0.1.0" - -[lints.rust.unexpected_cfgs] -level = "allow" -priority = 0 -check-cfg = ["cfg(docsrs_regex)"] - -[profile.bench] -debug = 2 - -[profile.dev] -opt-level = 3 -debug = 2 - -[profile.release] -debug = 2 - -[profile.test] -opt-level = 3 -debug = 2 diff --git a/vendor/regex/Cross.toml b/vendor/regex/Cross.toml deleted file mode 100644 index 5415e7a45195f6..00000000000000 --- a/vendor/regex/Cross.toml +++ /dev/null @@ -1,7 +0,0 @@ -[build.env] -passthrough = [ - "RUST_BACKTRACE", - "RUST_LOG", - "REGEX_TEST", - "REGEX_TEST_VERBOSE", -] diff --git a/vendor/regex/LICENSE-APACHE b/vendor/regex/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e802f..00000000000000 --- a/vendor/regex/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/regex/LICENSE-MIT b/vendor/regex/LICENSE-MIT deleted file mode 100644 index 39d4bdb5acd313..00000000000000 --- a/vendor/regex/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/regex/README.md b/vendor/regex/README.md deleted file mode 100644 index 0af313dea4676f..00000000000000 --- a/vendor/regex/README.md +++ /dev/null @@ -1,336 +0,0 @@ -regex -===== -This crate provides routines for searching strings for matches of a [regular -expression] (aka "regex"). The regex syntax supported by this crate is similar -to other regex engines, but it lacks several features that are not known how to -implement efficiently. This includes, but is not limited to, look-around and -backreferences. In exchange, all regex searches in this crate have worst case -`O(m * n)` time complexity, where `m` is proportional to the size of the regex -and `n` is proportional to the size of the string being searched. - -[regular expression]: https://en.wikipedia.org/wiki/Regular_expression - -[![Build status](https://github.com/rust-lang/regex/workflows/ci/badge.svg)](https://github.com/rust-lang/regex/actions) -[![Crates.io](https://img.shields.io/crates/v/regex.svg)](https://crates.io/crates/regex) - -### Documentation - -[Module documentation with examples](https://docs.rs/regex). -The module documentation also includes a comprehensive description of the -syntax supported. - -Documentation with examples for the various matching functions and iterators -can be found on the -[`Regex` type](https://docs.rs/regex/*/regex/struct.Regex.html). - -### Usage - -To bring this crate into your repository, either add `regex` to your -`Cargo.toml`, or run `cargo add regex`. - -Here's a simple example that matches a date in YYYY-MM-DD format and prints the -year, month and day: - -```rust -use regex::Regex; - -fn main() { - let re = Regex::new(r"(?x) -(?P\d{4}) # the year -- -(?P\d{2}) # the month -- -(?P\d{2}) # the day -").unwrap(); - - let caps = re.captures("2010-03-14").unwrap(); - assert_eq!("2010", &caps["year"]); - assert_eq!("03", &caps["month"]); - assert_eq!("14", &caps["day"]); -} -``` - -If you have lots of dates in text that you'd like to iterate over, then it's -easy to adapt the above example with an iterator: - -```rust -use regex::Regex; - -fn main() { - let re = Regex::new(r"(\d{4})-(\d{2})-(\d{2})").unwrap(); - let hay = "On 2010-03-14, foo happened. On 2014-10-14, bar happened."; - - let mut dates = vec![]; - for (_, [year, month, day]) in re.captures_iter(hay).map(|c| c.extract()) { - dates.push((year, month, day)); - } - assert_eq!(dates, vec![ - ("2010", "03", "14"), - ("2014", "10", "14"), - ]); -} -``` - -### Usage: Avoid compiling the same regex in a loop - -It is an anti-pattern to compile the same regular expression in a loop since -compilation is typically expensive. (It takes anywhere from a few microseconds -to a few **milliseconds** depending on the size of the regex.) Not only is -compilation itself expensive, but this also prevents optimizations that reuse -allocations internally to the matching engines. - -In Rust, it can sometimes be a pain to pass regular expressions around if -they're used from inside a helper function. Instead, we recommend using -[`std::sync::LazyLock`], or the [`once_cell`] crate, -if you can't use the standard library. - -This example shows how to use `std::sync::LazyLock`: - -```rust -use std::sync::LazyLock; - -use regex::Regex; - -fn some_helper_function(haystack: &str) -> bool { - static RE: LazyLock = LazyLock::new(|| Regex::new(r"...").unwrap()); - RE.is_match(haystack) -} - -fn main() { - assert!(some_helper_function("abc")); - assert!(!some_helper_function("ac")); -} -``` - -Specifically, in this example, the regex will be compiled when it is used for -the first time. On subsequent uses, it will reuse the previous compilation. - -[`std::sync::LazyLock`]: https://doc.rust-lang.org/std/sync/struct.LazyLock.html -[`once_cell`]: https://crates.io/crates/once_cell - -### Usage: match regular expressions on `&[u8]` - -The main API of this crate (`regex::Regex`) requires the caller to pass a -`&str` for searching. In Rust, an `&str` is required to be valid UTF-8, which -means the main API can't be used for searching arbitrary bytes. - -To match on arbitrary bytes, use the `regex::bytes::Regex` API. The API is -identical to the main API, except that it takes an `&[u8]` to search on instead -of an `&str`. The `&[u8]` APIs also permit disabling Unicode mode in the regex -even when the pattern would match invalid UTF-8. For example, `(?-u:.)` is -not allowed in `regex::Regex` but is allowed in `regex::bytes::Regex` since -`(?-u:.)` matches any byte except for `\n`. Conversely, `.` will match the -UTF-8 encoding of any Unicode scalar value except for `\n`. - -This example shows how to find all null-terminated strings in a slice of bytes: - -```rust -use regex::bytes::Regex; - -let re = Regex::new(r"(?-u)(?[^\x00]+)\x00").unwrap(); -let text = b"foo\xFFbar\x00baz\x00"; - -// Extract all of the strings without the null terminator from each match. -// The unwrap is OK here since a match requires the `cstr` capture to match. -let cstrs: Vec<&[u8]> = - re.captures_iter(text) - .map(|c| c.name("cstr").unwrap().as_bytes()) - .collect(); -assert_eq!(vec![&b"foo\xFFbar"[..], &b"baz"[..]], cstrs); -``` - -Notice here that the `[^\x00]+` will match any *byte* except for `NUL`, -including bytes like `\xFF` which are not valid UTF-8. When using the main API, -`[^\x00]+` would instead match any valid UTF-8 sequence except for `NUL`. - -### Usage: match multiple regular expressions simultaneously - -This demonstrates how to use a `RegexSet` to match multiple (possibly -overlapping) regular expressions in a single scan of the search text: - -```rust -use regex::RegexSet; - -let set = RegexSet::new(&[ - r"\w+", - r"\d+", - r"\pL+", - r"foo", - r"bar", - r"barfoo", - r"foobar", -]).unwrap(); - -// Iterate over and collect all of the matches. -let matches: Vec<_> = set.matches("foobar").into_iter().collect(); -assert_eq!(matches, vec![0, 2, 3, 4, 6]); - -// You can also test whether a particular regex matched: -let matches = set.matches("foobar"); -assert!(!matches.matched(5)); -assert!(matches.matched(6)); -``` - - -### Usage: regex internals as a library - -The [`regex-automata` directory](./regex-automata/) contains a crate that -exposes all the internal matching engines used by the `regex` crate. The -idea is that the `regex` crate exposes a simple API for 99% of use cases, but -`regex-automata` exposes oodles of customizable behaviors. - -[Documentation for `regex-automata`.](https://docs.rs/regex-automata) - - -### Usage: a regular expression parser - -This repository contains a crate that provides a well tested regular expression -parser, abstract syntax and a high-level intermediate representation for -convenient analysis. It provides no facilities for compilation or execution. -This may be useful if you're implementing your own regex engine or otherwise -need to do analysis on the syntax of a regular expression. It is otherwise not -recommended for general use. - -[Documentation for `regex-syntax`.](https://docs.rs/regex-syntax) - - -### Crate features - -This crate comes with several features that permit tweaking the trade-off -between binary size, compilation time and runtime performance. Users of this -crate can selectively disable Unicode tables, or choose from a variety of -optimizations performed by this crate to disable. - -When all of these features are disabled, runtime match performance may be much -worse, but if you're matching on short strings, or if high performance isn't -necessary, then such a configuration is perfectly serviceable. To disable -all such features, use the following `Cargo.toml` dependency configuration: - -```toml -[dependencies.regex] -version = "1.3" -default-features = false -# Unless you have a specific reason not to, it's good sense to enable standard -# library support. It enables several optimizations and avoids spin locks. It -# also shouldn't meaningfully impact compile times or binary size. -features = ["std"] -``` - -This will reduce the dependency tree of `regex` down to two crates: -`regex-syntax` and `regex-automata`. - -The full set of features one can disable are -[in the "Crate features" section of the documentation](https://docs.rs/regex/1.*/#crate-features). - - -### Performance - -One of the goals of this crate is for the regex engine to be "fast." What that -is a somewhat nebulous goal, it is usually interpreted in one of two ways. -First, it means that all searches take worst case `O(m * n)` time, where -`m` is proportional to `len(regex)` and `n` is proportional to `len(haystack)`. -Second, it means that even aside from the time complexity constraint, regex -searches are "fast" in practice. - -While the first interpretation is pretty unambiguous, the second one remains -nebulous. While nebulous, it guides this crate's architecture and the sorts of -the trade-offs it makes. For example, here are some general architectural -statements that follow as a result of the goal to be "fast": - -* When given the choice between faster regex searches and faster _Rust compile -times_, this crate will generally choose faster regex searches. -* When given the choice between faster regex searches and faster _regex compile -times_, this crate will generally choose faster regex searches. That is, it is -generally acceptable for `Regex::new` to get a little slower if it means that -searches get faster. (This is a somewhat delicate balance to strike, because -the speed of `Regex::new` needs to remain somewhat reasonable. But this is why -one should avoid re-compiling the same regex over and over again.) -* When given the choice between faster regex searches and simpler API -design, this crate will generally choose faster regex searches. For example, -if one didn't care about performance, we could like get rid of both of -the `Regex::is_match` and `Regex::find` APIs and instead just rely on -`Regex::captures`. - -There are perhaps more ways that being "fast" influences things. - -While this repository used to provide its own benchmark suite, it has since -been moved to [rebar](https://github.com/BurntSushi/rebar). The benchmarks are -quite extensive, and there are many more than what is shown in rebar's README -(which is just limited to a "curated" set meant to compare performance between -regex engines). To run all of this crate's benchmarks, first start by cloning -and installing `rebar`: - -```text -$ git clone https://github.com/BurntSushi/rebar -$ cd rebar -$ cargo install --path ./ -``` - -Then build the benchmark harness for just this crate: - -```text -$ rebar build -e '^rust/regex$' -``` - -Run all benchmarks for this crate as tests (each benchmark is executed once to -ensure it works): - -```text -$ rebar measure -e '^rust/regex$' -t -``` - -Record measurements for all benchmarks and save them to a CSV file: - -```text -$ rebar measure -e '^rust/regex$' | tee results.csv -``` - -Explore benchmark timings: - -```text -$ rebar cmp results.csv -``` - -See the `rebar` documentation for more details on how it works and how to -compare results with other regex engines. - - -### Hacking - -The `regex` crate is, for the most part, a pretty thin wrapper around the -[`meta::Regex`](https://docs.rs/regex-automata/latest/regex_automata/meta/struct.Regex.html) -from the -[`regex-automata` crate](https://docs.rs/regex-automata/latest/regex_automata/). -Therefore, if you're looking to work on the internals of this crate, you'll -likely either want to look in `regex-syntax` (for parsing) or `regex-automata` -(for construction of finite automata and the search routines). - -My [blog on regex internals](https://burntsushi.net/regex-internals/) -goes into more depth. - - -### Minimum Rust version policy - -This crate's minimum supported `rustc` version is `1.65.0`. - -The policy is that the minimum Rust version required to use this crate can be -increased in minor version updates. For example, if regex 1.0 requires Rust -1.20.0, then regex 1.0.z for all values of `z` will also require Rust 1.20.0 or -newer. However, regex 1.y for `y > 0` may require a newer minimum version of -Rust. - - -### License - -This project is licensed under either of - - * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or - https://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or - https://opensource.org/licenses/MIT) - -at your option. - -The data in `regex-syntax/src/unicode_tables/` is licensed under the Unicode -License Agreement -([LICENSE-UNICODE](https://www.unicode.org/copyright.html#License)). diff --git a/vendor/regex/UNICODE.md b/vendor/regex/UNICODE.md deleted file mode 100644 index 2b62567f12c6e6..00000000000000 --- a/vendor/regex/UNICODE.md +++ /dev/null @@ -1,258 +0,0 @@ -# Unicode conformance - -This document describes the regex crate's conformance to Unicode's -[UTS#18](https://unicode.org/reports/tr18/) -report, which lays out 3 levels of support: Basic, Extended and Tailored. - -Full support for Level 1 ("Basic Unicode Support") is provided with two -exceptions: - -1. Line boundaries are not Unicode aware. Namely, only the `\n` - (`END OF LINE`) character is recognized as a line boundary by default. - One can opt into `\r\n|\r|\n` being a line boundary via CRLF mode. -2. The compatibility properties specified by - [RL1.2a](https://unicode.org/reports/tr18/#RL1.2a) - are ASCII-only definitions. - -Little to no support is provided for either Level 2 or Level 3. For the most -part, this is because the features are either complex/hard to implement, or at -the very least, very difficult to implement without sacrificing performance. -For example, tackling canonical equivalence such that matching worked as one -would expect regardless of normalization form would be a significant -undertaking. This is at least partially a result of the fact that this regex -engine is based on finite automata, which admits less flexibility normally -associated with backtracking implementations. - - -## RL1.1 Hex Notation - -[UTS#18 RL1.1](https://unicode.org/reports/tr18/#Hex_notation) - -Hex Notation refers to the ability to specify a Unicode code point in a regular -expression via its hexadecimal code point representation. This is useful in -environments that have poor Unicode font rendering or if you need to express a -code point that is not normally displayable. All forms of hexadecimal notation -are supported - - \x7F hex character code (exactly two digits) - \x{10FFFF} any hex character code corresponding to a Unicode code point - \u007F hex character code (exactly four digits) - \u{7F} any hex character code corresponding to a Unicode code point - \U0000007F hex character code (exactly eight digits) - \U{7F} any hex character code corresponding to a Unicode code point - -Briefly, the `\x{...}`, `\u{...}` and `\U{...}` are all exactly equivalent ways -of expressing hexadecimal code points. Any number of digits can be written -within the brackets. In contrast, `\xNN`, `\uNNNN`, `\UNNNNNNNN` are all -fixed-width variants of the same idea. - -Note that when Unicode mode is disabled, any non-ASCII Unicode codepoint is -banned. Additionally, the `\xNN` syntax represents arbitrary bytes when Unicode -mode is disabled. That is, the regex `\xFF` matches the Unicode codepoint -U+00FF (encoded as `\xC3\xBF` in UTF-8) while the regex `(?-u)\xFF` matches -the literal byte `\xFF`. - - -## RL1.2 Properties - -[UTS#18 RL1.2](https://unicode.org/reports/tr18/#Categories) - -Full support for Unicode property syntax is provided. Unicode properties -provide a convenient way to construct character classes of groups of code -points specified by Unicode. The regex crate does not provide exhaustive -support, but covers a useful subset. In particular: - -* [General categories](https://unicode.org/reports/tr18/#General_Category_Property) -* [Scripts and Script Extensions](https://unicode.org/reports/tr18/#Script_Property) -* [Age](https://unicode.org/reports/tr18/#Age) -* A smattering of boolean properties, including all of those specified by - [RL1.2](https://unicode.org/reports/tr18/#RL1.2) explicitly. - -In all cases, property name and value abbreviations are supported, and all -names/values are matched loosely without regard for case, whitespace or -underscores. Property name aliases can be found in Unicode's -[`PropertyAliases.txt`](https://www.unicode.org/Public/UCD/latest/ucd/PropertyAliases.txt) -file, while property value aliases can be found in Unicode's -[`PropertyValueAliases.txt`](https://www.unicode.org/Public/UCD/latest/ucd/PropertyValueAliases.txt) -file. - -The syntax supported is also consistent with the UTS#18 recommendation: - -* `\p{Greek}` selects the `Greek` script. Equivalent expressions follow: - `\p{sc:Greek}`, `\p{Script:Greek}`, `\p{Sc=Greek}`, `\p{script=Greek}`, - `\P{sc!=Greek}`. Similarly for `General_Category` (or `gc` for short) and - `Script_Extensions` (or `scx` for short). -* `\p{age:3.2}` selects all code points in Unicode 3.2. -* `\p{Alphabetic}` selects the "alphabetic" property and can be abbreviated - via `\p{alpha}` (for example). -* Single letter variants for properties with single letter abbreviations. - For example, `\p{Letter}` can be equivalently written as `\pL`. - -The following is a list of all properties supported by the regex crate (starred -properties correspond to properties required by RL1.2): - -* `General_Category` \* (including `Any`, `ASCII` and `Assigned`) -* `Script` \* -* `Script_Extensions` \* -* `Age` -* `ASCII_Hex_Digit` -* `Alphabetic` \* -* `Bidi_Control` -* `Case_Ignorable` -* `Cased` -* `Changes_When_Casefolded` -* `Changes_When_Casemapped` -* `Changes_When_Lowercased` -* `Changes_When_Titlecased` -* `Changes_When_Uppercased` -* `Dash` -* `Default_Ignorable_Code_Point` \* -* `Deprecated` -* `Diacritic` -* `Emoji` -* `Emoji_Presentation` -* `Emoji_Modifier` -* `Emoji_Modifier_Base` -* `Emoji_Component` -* `Extended_Pictographic` -* `Extender` -* `Grapheme_Base` -* `Grapheme_Cluster_Break` -* `Grapheme_Extend` -* `Hex_Digit` -* `IDS_Binary_Operator` -* `IDS_Trinary_Operator` -* `ID_Continue` -* `ID_Start` -* `Join_Control` -* `Logical_Order_Exception` -* `Lowercase` \* -* `Math` -* `Noncharacter_Code_Point` \* -* `Pattern_Syntax` -* `Pattern_White_Space` -* `Prepended_Concatenation_Mark` -* `Quotation_Mark` -* `Radical` -* `Regional_Indicator` -* `Sentence_Break` -* `Sentence_Terminal` -* `Soft_Dotted` -* `Terminal_Punctuation` -* `Unified_Ideograph` -* `Uppercase` \* -* `Variation_Selector` -* `White_Space` \* -* `Word_Break` -* `XID_Continue` -* `XID_Start` - - -## RL1.2a Compatibility Properties - -[UTS#18 RL1.2a](https://unicode.org/reports/tr18/#RL1.2a) - -The regex crate only provides ASCII definitions of the -[compatibility properties documented in UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties) -(sans the `\X` class, for matching grapheme clusters, which isn't provided -at all). This is because it seems to be consistent with most other regular -expression engines, and in particular, because these are often referred to as -"ASCII" or "POSIX" character classes. - -Note that the `\w`, `\s` and `\d` character classes **are** Unicode aware. -Their traditional ASCII definition can be used by disabling Unicode. That is, -`[[:word:]]` and `(?-u)\w` are equivalent. - - -## RL1.3 Subtraction and Intersection - -[UTS#18 RL1.3](https://unicode.org/reports/tr18/#Subtraction_and_Intersection) - -The regex crate provides full support for nested character classes, along with -union, intersection (`&&`), difference (`--`) and symmetric difference (`~~`) -operations on arbitrary character classes. - -For example, to match all non-ASCII letters, you could use either -`[\p{Letter}--\p{Ascii}]` (difference) or `[\p{Letter}&&[^\p{Ascii}]]` -(intersecting the negation). - - -## RL1.4 Simple Word Boundaries - -[UTS#18 RL1.4](https://unicode.org/reports/tr18/#Simple_Word_Boundaries) - -The regex crate provides basic Unicode aware word boundary assertions. A word -boundary assertion can be written as `\b`, or `\B` as its negation. A word -boundary negation corresponds to a zero-width match, where its adjacent -characters correspond to word and non-word, or non-word and word characters. - -Conformance in this case chooses to define word character in the same way that -the `\w` character class is defined: a code point that is a member of one of -the following classes: - -* `\p{Alphabetic}` -* `\p{Join_Control}` -* `\p{gc:Mark}` -* `\p{gc:Decimal_Number}` -* `\p{gc:Connector_Punctuation}` - -In particular, this differs slightly from the -[prescription given in RL1.4](https://unicode.org/reports/tr18/#Simple_Word_Boundaries) -but is permissible according to -[UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties). -Namely, it is convenient and simpler to have `\w` and `\b` be in sync with -one another. - -Finally, Unicode word boundaries can be disabled, which will cause ASCII word -boundaries to be used instead. That is, `\b` is a Unicode word boundary while -`(?-u)\b` is an ASCII-only word boundary. This can occasionally be beneficial -if performance is important, since the implementation of Unicode word -boundaries is currently suboptimal on non-ASCII text. - - -## RL1.5 Simple Loose Matches - -[UTS#18 RL1.5](https://unicode.org/reports/tr18/#Simple_Loose_Matches) - -The regex crate provides full support for case-insensitive matching in -accordance with RL1.5. That is, it uses the "simple" case folding mapping. The -"simple" mapping was chosen because of a key convenient property: every -"simple" mapping is a mapping from exactly one code point to exactly one other -code point. This makes case-insensitive matching of character classes, for -example, straight-forward to implement. - -When case-insensitive mode is enabled (e.g., `(?i)[a]` is equivalent to `a|A`), -then all characters classes are case folded as well. - - -## RL1.6 Line Boundaries - -[UTS#18 RL1.6](https://unicode.org/reports/tr18/#Line_Boundaries) - -The regex crate only provides support for recognizing the `\n` (`END OF LINE`) -character as a line boundary by default. One can also opt into treating -`\r\n|\r|\n` as a line boundary via CRLF mode. This choice was made mostly for -implementation convenience, and to avoid performance cliffs that Unicode word -boundaries are subject to. - - -## RL1.7 Code Points - -[UTS#18 RL1.7](https://unicode.org/reports/tr18/#Supplementary_Characters) - -The regex crate provides full support for Unicode code point matching. Namely, -the fundamental atom of any match is always a single code point. - -Given Rust's strong ties to UTF-8, the following guarantees are also provided: - -* All matches are reported on valid UTF-8 code unit boundaries. That is, any - match range returned by the public regex API is guaranteed to successfully - slice the string that was searched. -* By consequence of the above, it is impossible to match surrogate code points. - No support for UTF-16 is provided, so this is never necessary. - -Note that when Unicode mode is disabled, the fundamental atom of matching is -no longer a code point but a single byte. When Unicode mode is disabled, many -Unicode features are disabled as well. For example, `(?-u)\pL` is not a valid -regex but `\pL(?-u)\xFF` (matches any Unicode `Letter` followed by the literal -byte `\xFF`) is, for example. diff --git a/vendor/regex/bench/README.md b/vendor/regex/bench/README.md deleted file mode 100644 index 3cc6a1a7afa60e..00000000000000 --- a/vendor/regex/bench/README.md +++ /dev/null @@ -1,2 +0,0 @@ -Benchmarks for this crate have been moved into the rebar project: -https://github.com/BurntSushi/rebar diff --git a/vendor/regex/rustfmt.toml b/vendor/regex/rustfmt.toml deleted file mode 100644 index aa37a218b97e5f..00000000000000 --- a/vendor/regex/rustfmt.toml +++ /dev/null @@ -1,2 +0,0 @@ -max_width = 79 -use_small_heuristics = "max" diff --git a/vendor/regex/src/builders.rs b/vendor/regex/src/builders.rs deleted file mode 100644 index 3bb08de8bfe5e5..00000000000000 --- a/vendor/regex/src/builders.rs +++ /dev/null @@ -1,2539 +0,0 @@ -#![allow(warnings)] - -// This module defines an internal builder that encapsulates all interaction -// with meta::Regex construction, and then 4 public API builders that wrap -// around it. The docs are essentially repeated on each of the 4 public -// builders, with tweaks to the examples as needed. -// -// The reason why there are so many builders is partially because of a misstep -// in the initial API design: the builder constructor takes in the pattern -// strings instead of using the `build` method to accept the pattern strings. -// This means `new` has a different signature for each builder. It probably -// would have been nicer to to use one builder with `fn new()`, and then add -// `build(pat)` and `build_many(pats)` constructors. -// -// The other reason is because I think the `bytes` module should probably -// have its own builder type. That way, it is completely isolated from the -// top-level API. -// -// If I could do it again, I'd probably have a `regex::Builder` and a -// `regex::bytes::Builder`. Each would have `build` and `build_set` (or -// `build_many`) methods for constructing a single pattern `Regex` and a -// multi-pattern `RegexSet`, respectively. - -use alloc::{ - string::{String, ToString}, - sync::Arc, - vec, - vec::Vec, -}; - -use regex_automata::{ - meta, nfa::thompson::WhichCaptures, util::syntax, MatchKind, -}; - -use crate::error::Error; - -/// A builder for constructing a `Regex`, `bytes::Regex`, `RegexSet` or a -/// `bytes::RegexSet`. -/// -/// This is essentially the implementation of the four different builder types -/// in the public API: `RegexBuilder`, `bytes::RegexBuilder`, `RegexSetBuilder` -/// and `bytes::RegexSetBuilder`. -#[derive(Clone, Debug)] -struct Builder { - pats: Vec, - metac: meta::Config, - syntaxc: syntax::Config, -} - -impl Default for Builder { - fn default() -> Builder { - let metac = meta::Config::new() - .nfa_size_limit(Some(10 * (1 << 20))) - .hybrid_cache_capacity(2 * (1 << 20)); - Builder { pats: vec![], metac, syntaxc: syntax::Config::default() } - } -} - -impl Builder { - fn new(patterns: I) -> Builder - where - S: AsRef, - I: IntoIterator, - { - let mut b = Builder::default(); - b.pats.extend(patterns.into_iter().map(|p| p.as_ref().to_string())); - b - } - - fn build_one_string(&self) -> Result { - assert_eq!(1, self.pats.len()); - let metac = self - .metac - .clone() - .match_kind(MatchKind::LeftmostFirst) - .utf8_empty(true); - let syntaxc = self.syntaxc.clone().utf8(true); - let pattern = Arc::from(self.pats[0].as_str()); - meta::Builder::new() - .configure(metac) - .syntax(syntaxc) - .build(&pattern) - .map(|meta| crate::Regex { meta, pattern }) - .map_err(Error::from_meta_build_error) - } - - fn build_one_bytes(&self) -> Result { - assert_eq!(1, self.pats.len()); - let metac = self - .metac - .clone() - .match_kind(MatchKind::LeftmostFirst) - .utf8_empty(false); - let syntaxc = self.syntaxc.clone().utf8(false); - let pattern = Arc::from(self.pats[0].as_str()); - meta::Builder::new() - .configure(metac) - .syntax(syntaxc) - .build(&pattern) - .map(|meta| crate::bytes::Regex { meta, pattern }) - .map_err(Error::from_meta_build_error) - } - - fn build_many_string(&self) -> Result { - let metac = self - .metac - .clone() - .match_kind(MatchKind::All) - .utf8_empty(true) - .which_captures(WhichCaptures::None); - let syntaxc = self.syntaxc.clone().utf8(true); - let patterns = Arc::from(self.pats.as_slice()); - meta::Builder::new() - .configure(metac) - .syntax(syntaxc) - .build_many(&patterns) - .map(|meta| crate::RegexSet { meta, patterns }) - .map_err(Error::from_meta_build_error) - } - - fn build_many_bytes(&self) -> Result { - let metac = self - .metac - .clone() - .match_kind(MatchKind::All) - .utf8_empty(false) - .which_captures(WhichCaptures::None); - let syntaxc = self.syntaxc.clone().utf8(false); - let patterns = Arc::from(self.pats.as_slice()); - meta::Builder::new() - .configure(metac) - .syntax(syntaxc) - .build_many(&patterns) - .map(|meta| crate::bytes::RegexSet { meta, patterns }) - .map_err(Error::from_meta_build_error) - } - - fn case_insensitive(&mut self, yes: bool) -> &mut Builder { - self.syntaxc = self.syntaxc.case_insensitive(yes); - self - } - - fn multi_line(&mut self, yes: bool) -> &mut Builder { - self.syntaxc = self.syntaxc.multi_line(yes); - self - } - - fn dot_matches_new_line(&mut self, yes: bool) -> &mut Builder { - self.syntaxc = self.syntaxc.dot_matches_new_line(yes); - self - } - - fn crlf(&mut self, yes: bool) -> &mut Builder { - self.syntaxc = self.syntaxc.crlf(yes); - self - } - - fn line_terminator(&mut self, byte: u8) -> &mut Builder { - self.metac = self.metac.clone().line_terminator(byte); - self.syntaxc = self.syntaxc.line_terminator(byte); - self - } - - fn swap_greed(&mut self, yes: bool) -> &mut Builder { - self.syntaxc = self.syntaxc.swap_greed(yes); - self - } - - fn ignore_whitespace(&mut self, yes: bool) -> &mut Builder { - self.syntaxc = self.syntaxc.ignore_whitespace(yes); - self - } - - fn unicode(&mut self, yes: bool) -> &mut Builder { - self.syntaxc = self.syntaxc.unicode(yes); - self - } - - fn octal(&mut self, yes: bool) -> &mut Builder { - self.syntaxc = self.syntaxc.octal(yes); - self - } - - fn size_limit(&mut self, limit: usize) -> &mut Builder { - self.metac = self.metac.clone().nfa_size_limit(Some(limit)); - self - } - - fn dfa_size_limit(&mut self, limit: usize) -> &mut Builder { - self.metac = self.metac.clone().hybrid_cache_capacity(limit); - self - } - - fn nest_limit(&mut self, limit: u32) -> &mut Builder { - self.syntaxc = self.syntaxc.nest_limit(limit); - self - } -} - -pub(crate) mod string { - use crate::{error::Error, Regex, RegexSet}; - - use super::Builder; - - /// A configurable builder for a [`Regex`]. - /// - /// This builder can be used to programmatically set flags such as `i` - /// (case insensitive) and `x` (for verbose mode). This builder can also be - /// used to configure things like the line terminator and a size limit on - /// the compiled regular expression. - #[derive(Clone, Debug)] - pub struct RegexBuilder { - builder: Builder, - } - - impl RegexBuilder { - /// Create a new builder with a default configuration for the given - /// pattern. - /// - /// If the pattern is invalid or exceeds the configured size limits, - /// then an error will be returned when [`RegexBuilder::build`] is - /// called. - pub fn new(pattern: &str) -> RegexBuilder { - RegexBuilder { builder: Builder::new([pattern]) } - } - - /// Compiles the pattern given to `RegexBuilder::new` with the - /// configuration set on this builder. - /// - /// If the pattern isn't a valid regex or if a configured size limit - /// was exceeded, then an error is returned. - pub fn build(&self) -> Result { - self.builder.build_one_string() - } - - /// This configures Unicode mode for the entire pattern. - /// - /// Enabling Unicode mode does a number of things: - /// - /// * Most fundamentally, it causes the fundamental atom of matching - /// to be a single codepoint. When Unicode mode is disabled, it's a - /// single byte. For example, when Unicode mode is enabled, `.` will - /// match `💩` once, where as it will match 4 times when Unicode mode - /// is disabled. (Since the UTF-8 encoding of `💩` is 4 bytes long.) - /// * Case insensitive matching uses Unicode simple case folding rules. - /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are - /// available. - /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and - /// `\d`. - /// * The word boundary assertions, `\b` and `\B`, use the Unicode - /// definition of a word character. - /// - /// Note that if Unicode mode is disabled, then the regex will fail to - /// compile if it could match invalid UTF-8. For example, when Unicode - /// mode is disabled, then since `.` matches any byte (except for - /// `\n`), then it can match invalid UTF-8 and thus building a regex - /// from it will fail. Another example is `\w` and `\W`. Since `\w` can - /// only match ASCII bytes when Unicode mode is disabled, it's allowed. - /// But `\W` can match more than ASCII bytes, including invalid UTF-8, - /// and so it is not allowed. This restriction can be lifted only by - /// using a [`bytes::Regex`](crate::bytes::Regex). - /// - /// For more details on the Unicode support in this crate, see the - /// [Unicode section](crate#unicode) in this crate's top-level - /// documentation. - /// - /// The default for this is `true`. - /// - /// # Example - /// - /// ``` - /// use regex::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"\w") - /// .unicode(false) - /// .build() - /// .unwrap(); - /// // Normally greek letters would be included in \w, but since - /// // Unicode mode is disabled, it only matches ASCII letters. - /// assert!(!re.is_match("δ")); - /// - /// let re = RegexBuilder::new(r"s") - /// .case_insensitive(true) - /// .unicode(false) - /// .build() - /// .unwrap(); - /// // Normally 'ſ' is included when searching for 's' case - /// // insensitively due to Unicode's simple case folding rules. But - /// // when Unicode mode is disabled, only ASCII case insensitive rules - /// // are used. - /// assert!(!re.is_match("ſ")); - /// ``` - pub fn unicode(&mut self, yes: bool) -> &mut RegexBuilder { - self.builder.unicode(yes); - self - } - - /// This configures whether to enable case insensitive matching for the - /// entire pattern. - /// - /// This setting can also be configured using the inline flag `i` - /// in the pattern. For example, `(?i:foo)` matches `foo` case - /// insensitively while `(?-i:foo)` matches `foo` case sensitively. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"foo(?-i:bar)quux") - /// .case_insensitive(true) - /// .build() - /// .unwrap(); - /// assert!(re.is_match("FoObarQuUx")); - /// // Even though case insensitive matching is enabled in the builder, - /// // it can be locally disabled within the pattern. In this case, - /// // `bar` is matched case sensitively. - /// assert!(!re.is_match("fooBARquux")); - /// ``` - pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexBuilder { - self.builder.case_insensitive(yes); - self - } - - /// This configures multi-line mode for the entire pattern. - /// - /// Enabling multi-line mode changes the behavior of the `^` and `$` - /// anchor assertions. Instead of only matching at the beginning and - /// end of a haystack, respectively, multi-line mode causes them to - /// match at the beginning and end of a line *in addition* to the - /// beginning and end of a haystack. More precisely, `^` will match at - /// the position immediately following a `\n` and `$` will match at the - /// position immediately preceding a `\n`. - /// - /// The behavior of this option can be impacted by other settings too: - /// - /// * The [`RegexBuilder::line_terminator`] option changes `\n` above - /// to any ASCII byte. - /// * The [`RegexBuilder::crlf`] option changes the line terminator to - /// be either `\r` or `\n`, but never at the position between a `\r` - /// and `\n`. - /// - /// This setting can also be configured using the inline flag `m` in - /// the pattern. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"^foo$") - /// .multi_line(true) - /// .build() - /// .unwrap(); - /// assert_eq!(Some(1..4), re.find("\nfoo\n").map(|m| m.range())); - /// ``` - pub fn multi_line(&mut self, yes: bool) -> &mut RegexBuilder { - self.builder.multi_line(yes); - self - } - - /// This configures dot-matches-new-line mode for the entire pattern. - /// - /// Perhaps surprisingly, the default behavior for `.` is not to match - /// any character, but rather, to match any character except for the - /// line terminator (which is `\n` by default). When this mode is - /// enabled, the behavior changes such that `.` truly matches any - /// character. - /// - /// This setting can also be configured using the inline flag `s` in - /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent - /// regexes. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"foo.bar") - /// .dot_matches_new_line(true) - /// .build() - /// .unwrap(); - /// let hay = "foo\nbar"; - /// assert_eq!(Some("foo\nbar"), re.find(hay).map(|m| m.as_str())); - /// ``` - pub fn dot_matches_new_line( - &mut self, - yes: bool, - ) -> &mut RegexBuilder { - self.builder.dot_matches_new_line(yes); - self - } - - /// This configures CRLF mode for the entire pattern. - /// - /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for - /// short) and `\n` ("line feed" or LF for short) are treated as line - /// terminators. This results in the following: - /// - /// * Unless dot-matches-new-line mode is enabled, `.` will now match - /// any character except for `\n` and `\r`. - /// * When multi-line mode is enabled, `^` will match immediately - /// following a `\n` or a `\r`. Similarly, `$` will match immediately - /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match - /// between `\r` and `\n`. - /// - /// This setting can also be configured using the inline flag `R` in - /// the pattern. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"^foo$") - /// .multi_line(true) - /// .crlf(true) - /// .build() - /// .unwrap(); - /// let hay = "\r\nfoo\r\n"; - /// // If CRLF mode weren't enabled here, then '$' wouldn't match - /// // immediately after 'foo', and thus no match would be found. - /// assert_eq!(Some("foo"), re.find(hay).map(|m| m.as_str())); - /// ``` - /// - /// This example demonstrates that `^` will never match at a position - /// between `\r` and `\n`. (`$` will similarly not match between a `\r` - /// and a `\n`.) - /// - /// ``` - /// use regex::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"^") - /// .multi_line(true) - /// .crlf(true) - /// .build() - /// .unwrap(); - /// let hay = "\r\n\r\n"; - /// let ranges: Vec<_> = re.find_iter(hay).map(|m| m.range()).collect(); - /// assert_eq!(ranges, vec![0..0, 2..2, 4..4]); - /// ``` - pub fn crlf(&mut self, yes: bool) -> &mut RegexBuilder { - self.builder.crlf(yes); - self - } - - /// Configures the line terminator to be used by the regex. - /// - /// The line terminator is relevant in two ways for a particular regex: - /// - /// * When dot-matches-new-line mode is *not* enabled (the default), - /// then `.` will match any character except for the configured line - /// terminator. - /// * When multi-line mode is enabled (not the default), then `^` and - /// `$` will match immediately after and before, respectively, a line - /// terminator. - /// - /// In both cases, if CRLF mode is enabled in a particular context, - /// then it takes precedence over any configured line terminator. - /// - /// This option cannot be configured from within the pattern. - /// - /// The default line terminator is `\n`. - /// - /// # Example - /// - /// This shows how to treat the NUL byte as a line terminator. This can - /// be a useful heuristic when searching binary data. - /// - /// ``` - /// use regex::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"^foo$") - /// .multi_line(true) - /// .line_terminator(b'\x00') - /// .build() - /// .unwrap(); - /// let hay = "\x00foo\x00"; - /// assert_eq!(Some(1..4), re.find(hay).map(|m| m.range())); - /// ``` - /// - /// This example shows that the behavior of `.` is impacted by this - /// setting as well: - /// - /// ``` - /// use regex::RegexBuilder; - /// - /// let re = RegexBuilder::new(r".") - /// .line_terminator(b'\x00') - /// .build() - /// .unwrap(); - /// assert!(re.is_match("\n")); - /// assert!(!re.is_match("\x00")); - /// ``` - /// - /// This shows that building a regex will fail if the byte given - /// is not ASCII and the pattern could result in matching invalid - /// UTF-8. This is because any singular non-ASCII byte is not valid - /// UTF-8, and it is not permitted for a [`Regex`] to match invalid - /// UTF-8. (It is permissible to use a non-ASCII byte when building a - /// [`bytes::Regex`](crate::bytes::Regex).) - /// - /// ``` - /// use regex::RegexBuilder; - /// - /// assert!(RegexBuilder::new(r".").line_terminator(0x80).build().is_err()); - /// // Note that using a non-ASCII byte isn't enough on its own to - /// // cause regex compilation to fail. You actually have to make use - /// // of it in the regex in a way that leads to matching invalid - /// // UTF-8. If you don't, then regex compilation will succeed! - /// assert!(RegexBuilder::new(r"a").line_terminator(0x80).build().is_ok()); - /// ``` - pub fn line_terminator(&mut self, byte: u8) -> &mut RegexBuilder { - self.builder.line_terminator(byte); - self - } - - /// This configures swap-greed mode for the entire pattern. - /// - /// When swap-greed mode is enabled, patterns like `a+` will become - /// non-greedy and patterns like `a+?` will become greedy. In other - /// words, the meanings of `a+` and `a+?` are switched. - /// - /// This setting can also be configured using the inline flag `U` in - /// the pattern. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"a+") - /// .swap_greed(true) - /// .build() - /// .unwrap(); - /// assert_eq!(Some("a"), re.find("aaa").map(|m| m.as_str())); - /// ``` - pub fn swap_greed(&mut self, yes: bool) -> &mut RegexBuilder { - self.builder.swap_greed(yes); - self - } - - /// This configures verbose mode for the entire pattern. - /// - /// When enabled, whitespace will treated as insignificant in the - /// pattern and `#` can be used to start a comment until the next new - /// line. - /// - /// Normally, in most places in a pattern, whitespace is treated - /// literally. For example ` +` will match one or more ASCII whitespace - /// characters. - /// - /// When verbose mode is enabled, `\#` can be used to match a literal - /// `#` and `\ ` can be used to match a literal ASCII whitespace - /// character. - /// - /// Verbose mode is useful for permitting regexes to be formatted and - /// broken up more nicely. This may make them more easily readable. - /// - /// This setting can also be configured using the inline flag `x` in - /// the pattern. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::RegexBuilder; - /// - /// let pat = r" - /// \b - /// (?\p{Uppercase}\w*) # always start with uppercase letter - /// [\s--\n]+ # whitespace should separate names - /// (?: # middle name can be an initial! - /// (?:(?\p{Uppercase})\.|(?\p{Uppercase}\w*)) - /// [\s--\n]+ - /// )? - /// (?\p{Uppercase}\w*) - /// \b - /// "; - /// let re = RegexBuilder::new(pat) - /// .ignore_whitespace(true) - /// .build() - /// .unwrap(); - /// - /// let caps = re.captures("Harry Potter").unwrap(); - /// assert_eq!("Harry", &caps["first"]); - /// assert_eq!("Potter", &caps["last"]); - /// - /// let caps = re.captures("Harry J. Potter").unwrap(); - /// assert_eq!("Harry", &caps["first"]); - /// // Since a middle name/initial isn't required for an overall match, - /// // we can't assume that 'initial' or 'middle' will be populated! - /// assert_eq!(Some("J"), caps.name("initial").map(|m| m.as_str())); - /// assert_eq!(None, caps.name("middle").map(|m| m.as_str())); - /// assert_eq!("Potter", &caps["last"]); - /// - /// let caps = re.captures("Harry James Potter").unwrap(); - /// assert_eq!("Harry", &caps["first"]); - /// // Since a middle name/initial isn't required for an overall match, - /// // we can't assume that 'initial' or 'middle' will be populated! - /// assert_eq!(None, caps.name("initial").map(|m| m.as_str())); - /// assert_eq!(Some("James"), caps.name("middle").map(|m| m.as_str())); - /// assert_eq!("Potter", &caps["last"]); - /// ``` - pub fn ignore_whitespace(&mut self, yes: bool) -> &mut RegexBuilder { - self.builder.ignore_whitespace(yes); - self - } - - /// This configures octal mode for the entire pattern. - /// - /// Octal syntax is a little-known way of uttering Unicode codepoints - /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all - /// equivalent patterns, where the last example shows octal syntax. - /// - /// While supporting octal syntax isn't in and of itself a problem, - /// it does make good error messages harder. That is, in PCRE based - /// regex engines, syntax like `\1` invokes a backreference, which is - /// explicitly unsupported this library. However, many users expect - /// backreferences to be supported. Therefore, when octal support - /// is disabled, the error message will explicitly mention that - /// backreferences aren't supported. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::RegexBuilder; - /// - /// // Normally this pattern would not compile, with an error message - /// // about backreferences not being supported. But with octal mode - /// // enabled, octal escape sequences work. - /// let re = RegexBuilder::new(r"\141") - /// .octal(true) - /// .build() - /// .unwrap(); - /// assert!(re.is_match("a")); - /// ``` - pub fn octal(&mut self, yes: bool) -> &mut RegexBuilder { - self.builder.octal(yes); - self - } - - /// Sets the approximate size limit, in bytes, of the compiled regex. - /// - /// This roughly corresponds to the number of heap memory, in - /// bytes, occupied by a single regex. If the regex would otherwise - /// approximately exceed this limit, then compiling that regex will - /// fail. - /// - /// The main utility of a method like this is to avoid compiling - /// regexes that use an unexpected amount of resources, such as - /// time and memory. Even if the memory usage of a large regex is - /// acceptable, its search time may not be. Namely, worst case time - /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and - /// `n ~ len(haystack)`. That is, search time depends, in part, on the - /// size of the compiled regex. This means that putting a limit on the - /// size of the regex limits how much a regex can impact search time. - /// - /// For more information about regex size limits, see the section on - /// [untrusted inputs](crate#untrusted-input) in the top-level crate - /// documentation. - /// - /// The default for this is some reasonable number that permits most - /// patterns to compile successfully. - /// - /// # Example - /// - /// ``` - /// # if !cfg!(target_pointer_width = "64") { return; } // see #1041 - /// use regex::RegexBuilder; - /// - /// // It may surprise you how big some seemingly small patterns can - /// // be! Since \w is Unicode aware, this generates a regex that can - /// // match approximately 140,000 distinct codepoints. - /// assert!(RegexBuilder::new(r"\w").size_limit(45_000).build().is_err()); - /// ``` - pub fn size_limit(&mut self, bytes: usize) -> &mut RegexBuilder { - self.builder.size_limit(bytes); - self - } - - /// Set the approximate capacity, in bytes, of the cache of transitions - /// used by the lazy DFA. - /// - /// While the lazy DFA isn't always used, in tends to be the most - /// commonly use regex engine in default configurations. It tends to - /// adopt the performance profile of a fully build DFA, but without the - /// downside of taking worst case exponential time to build. - /// - /// The downside is that it needs to keep a cache of transitions and - /// states that are built while running a search, and this cache - /// can fill up. When it fills up, the cache will reset itself. Any - /// previously generated states and transitions will then need to be - /// re-generated. If this happens too many times, then this library - /// will bail out of using the lazy DFA and switch to a different regex - /// engine. - /// - /// If your regex provokes this particular downside of the lazy DFA, - /// then it may be beneficial to increase its cache capacity. This will - /// potentially reduce the frequency of cache resetting (ideally to - /// `0`). While it won't fix all potential performance problems with - /// the lazy DFA, increasing the cache capacity does fix some. - /// - /// There is no easy way to determine, a priori, whether increasing - /// this cache capacity will help. In general, the larger your regex, - /// the more cache it's likely to use. But that isn't an ironclad rule. - /// For example, a regex like `[01]*1[01]{N}` would normally produce a - /// fully build DFA that is exponential in size with respect to `N`. - /// The lazy DFA will prevent exponential space blow-up, but it cache - /// is likely to fill up, even when it's large and even for smallish - /// values of `N`. - /// - /// If you aren't sure whether this helps or not, it is sensible to - /// set this to some arbitrarily large number in testing, such as - /// `usize::MAX`. Namely, this represents the amount of capacity that - /// *may* be used. It's probably not a good idea to use `usize::MAX` in - /// production though, since it implies there are no controls on heap - /// memory used by this library during a search. In effect, set it to - /// whatever you're willing to allocate for a single regex search. - pub fn dfa_size_limit(&mut self, bytes: usize) -> &mut RegexBuilder { - self.builder.dfa_size_limit(bytes); - self - } - - /// Set the nesting limit for this parser. - /// - /// The nesting limit controls how deep the abstract syntax tree is - /// allowed to be. If the AST exceeds the given limit (e.g., with too - /// many nested groups), then an error is returned by the parser. - /// - /// The purpose of this limit is to act as a heuristic to prevent stack - /// overflow for consumers that do structural induction on an AST using - /// explicit recursion. While this crate never does this (instead using - /// constant stack space and moving the call stack to the heap), other - /// crates may. - /// - /// This limit is not checked until the entire AST is parsed. - /// Therefore, if callers want to put a limit on the amount of heap - /// space used, then they should impose a limit on the length, in - /// bytes, of the concrete pattern string. In particular, this is - /// viable since this parser implementation will limit itself to heap - /// space proportional to the length of the pattern string. See also - /// the [untrusted inputs](crate#untrusted-input) section in the - /// top-level crate documentation for more information about this. - /// - /// Note that a nest limit of `0` will return a nest limit error for - /// most patterns but not all. For example, a nest limit of `0` permits - /// `a` but not `ab`, since `ab` requires an explicit concatenation, - /// which results in a nest depth of `1`. In general, a nest limit is - /// not something that manifests in an obvious way in the concrete - /// syntax, therefore, it should not be used in a granular way. - /// - /// # Example - /// - /// ``` - /// use regex::RegexBuilder; - /// - /// assert!(RegexBuilder::new(r"a").nest_limit(0).build().is_ok()); - /// assert!(RegexBuilder::new(r"ab").nest_limit(0).build().is_err()); - /// ``` - pub fn nest_limit(&mut self, limit: u32) -> &mut RegexBuilder { - self.builder.nest_limit(limit); - self - } - } - - /// A configurable builder for a [`RegexSet`]. - /// - /// This builder can be used to programmatically set flags such as - /// `i` (case insensitive) and `x` (for verbose mode). This builder - /// can also be used to configure things like the line terminator - /// and a size limit on the compiled regular expression. - #[derive(Clone, Debug)] - pub struct RegexSetBuilder { - builder: Builder, - } - - impl RegexSetBuilder { - /// Create a new builder with a default configuration for the given - /// patterns. - /// - /// If the patterns are invalid or exceed the configured size limits, - /// then an error will be returned when [`RegexSetBuilder::build`] is - /// called. - pub fn new(patterns: I) -> RegexSetBuilder - where - I: IntoIterator, - S: AsRef, - { - RegexSetBuilder { builder: Builder::new(patterns) } - } - - /// Compiles the patterns given to `RegexSetBuilder::new` with the - /// configuration set on this builder. - /// - /// If the patterns aren't valid regexes or if a configured size limit - /// was exceeded, then an error is returned. - pub fn build(&self) -> Result { - self.builder.build_many_string() - } - - /// This configures Unicode mode for the all of the patterns. - /// - /// Enabling Unicode mode does a number of things: - /// - /// * Most fundamentally, it causes the fundamental atom of matching - /// to be a single codepoint. When Unicode mode is disabled, it's a - /// single byte. For example, when Unicode mode is enabled, `.` will - /// match `💩` once, where as it will match 4 times when Unicode mode - /// is disabled. (Since the UTF-8 encoding of `💩` is 4 bytes long.) - /// * Case insensitive matching uses Unicode simple case folding rules. - /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are - /// available. - /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and - /// `\d`. - /// * The word boundary assertions, `\b` and `\B`, use the Unicode - /// definition of a word character. - /// - /// Note that if Unicode mode is disabled, then the regex will fail to - /// compile if it could match invalid UTF-8. For example, when Unicode - /// mode is disabled, then since `.` matches any byte (except for - /// `\n`), then it can match invalid UTF-8 and thus building a regex - /// from it will fail. Another example is `\w` and `\W`. Since `\w` can - /// only match ASCII bytes when Unicode mode is disabled, it's allowed. - /// But `\W` can match more than ASCII bytes, including invalid UTF-8, - /// and so it is not allowed. This restriction can be lifted only by - /// using a [`bytes::RegexSet`](crate::bytes::RegexSet). - /// - /// For more details on the Unicode support in this crate, see the - /// [Unicode section](crate#unicode) in this crate's top-level - /// documentation. - /// - /// The default for this is `true`. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"\w"]) - /// .unicode(false) - /// .build() - /// .unwrap(); - /// // Normally greek letters would be included in \w, but since - /// // Unicode mode is disabled, it only matches ASCII letters. - /// assert!(!re.is_match("δ")); - /// - /// let re = RegexSetBuilder::new([r"s"]) - /// .case_insensitive(true) - /// .unicode(false) - /// .build() - /// .unwrap(); - /// // Normally 'ſ' is included when searching for 's' case - /// // insensitively due to Unicode's simple case folding rules. But - /// // when Unicode mode is disabled, only ASCII case insensitive rules - /// // are used. - /// assert!(!re.is_match("ſ")); - /// ``` - pub fn unicode(&mut self, yes: bool) -> &mut RegexSetBuilder { - self.builder.unicode(yes); - self - } - - /// This configures whether to enable case insensitive matching for all - /// of the patterns. - /// - /// This setting can also be configured using the inline flag `i` - /// in the pattern. For example, `(?i:foo)` matches `foo` case - /// insensitively while `(?-i:foo)` matches `foo` case sensitively. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"foo(?-i:bar)quux"]) - /// .case_insensitive(true) - /// .build() - /// .unwrap(); - /// assert!(re.is_match("FoObarQuUx")); - /// // Even though case insensitive matching is enabled in the builder, - /// // it can be locally disabled within the pattern. In this case, - /// // `bar` is matched case sensitively. - /// assert!(!re.is_match("fooBARquux")); - /// ``` - pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexSetBuilder { - self.builder.case_insensitive(yes); - self - } - - /// This configures multi-line mode for all of the patterns. - /// - /// Enabling multi-line mode changes the behavior of the `^` and `$` - /// anchor assertions. Instead of only matching at the beginning and - /// end of a haystack, respectively, multi-line mode causes them to - /// match at the beginning and end of a line *in addition* to the - /// beginning and end of a haystack. More precisely, `^` will match at - /// the position immediately following a `\n` and `$` will match at the - /// position immediately preceding a `\n`. - /// - /// The behavior of this option can be impacted by other settings too: - /// - /// * The [`RegexSetBuilder::line_terminator`] option changes `\n` - /// above to any ASCII byte. - /// * The [`RegexSetBuilder::crlf`] option changes the line terminator - /// to be either `\r` or `\n`, but never at the position between a `\r` - /// and `\n`. - /// - /// This setting can also be configured using the inline flag `m` in - /// the pattern. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"^foo$"]) - /// .multi_line(true) - /// .build() - /// .unwrap(); - /// assert!(re.is_match("\nfoo\n")); - /// ``` - pub fn multi_line(&mut self, yes: bool) -> &mut RegexSetBuilder { - self.builder.multi_line(yes); - self - } - - /// This configures dot-matches-new-line mode for the entire pattern. - /// - /// Perhaps surprisingly, the default behavior for `.` is not to match - /// any character, but rather, to match any character except for the - /// line terminator (which is `\n` by default). When this mode is - /// enabled, the behavior changes such that `.` truly matches any - /// character. - /// - /// This setting can also be configured using the inline flag `s` in - /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent - /// regexes. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"foo.bar"]) - /// .dot_matches_new_line(true) - /// .build() - /// .unwrap(); - /// let hay = "foo\nbar"; - /// assert!(re.is_match(hay)); - /// ``` - pub fn dot_matches_new_line( - &mut self, - yes: bool, - ) -> &mut RegexSetBuilder { - self.builder.dot_matches_new_line(yes); - self - } - - /// This configures CRLF mode for all of the patterns. - /// - /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for - /// short) and `\n` ("line feed" or LF for short) are treated as line - /// terminators. This results in the following: - /// - /// * Unless dot-matches-new-line mode is enabled, `.` will now match - /// any character except for `\n` and `\r`. - /// * When multi-line mode is enabled, `^` will match immediately - /// following a `\n` or a `\r`. Similarly, `$` will match immediately - /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match - /// between `\r` and `\n`. - /// - /// This setting can also be configured using the inline flag `R` in - /// the pattern. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"^foo$"]) - /// .multi_line(true) - /// .crlf(true) - /// .build() - /// .unwrap(); - /// let hay = "\r\nfoo\r\n"; - /// // If CRLF mode weren't enabled here, then '$' wouldn't match - /// // immediately after 'foo', and thus no match would be found. - /// assert!(re.is_match(hay)); - /// ``` - /// - /// This example demonstrates that `^` will never match at a position - /// between `\r` and `\n`. (`$` will similarly not match between a `\r` - /// and a `\n`.) - /// - /// ``` - /// use regex::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"^\n"]) - /// .multi_line(true) - /// .crlf(true) - /// .build() - /// .unwrap(); - /// assert!(!re.is_match("\r\n")); - /// ``` - pub fn crlf(&mut self, yes: bool) -> &mut RegexSetBuilder { - self.builder.crlf(yes); - self - } - - /// Configures the line terminator to be used by the regex. - /// - /// The line terminator is relevant in two ways for a particular regex: - /// - /// * When dot-matches-new-line mode is *not* enabled (the default), - /// then `.` will match any character except for the configured line - /// terminator. - /// * When multi-line mode is enabled (not the default), then `^` and - /// `$` will match immediately after and before, respectively, a line - /// terminator. - /// - /// In both cases, if CRLF mode is enabled in a particular context, - /// then it takes precedence over any configured line terminator. - /// - /// This option cannot be configured from within the pattern. - /// - /// The default line terminator is `\n`. - /// - /// # Example - /// - /// This shows how to treat the NUL byte as a line terminator. This can - /// be a useful heuristic when searching binary data. - /// - /// ``` - /// use regex::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"^foo$"]) - /// .multi_line(true) - /// .line_terminator(b'\x00') - /// .build() - /// .unwrap(); - /// let hay = "\x00foo\x00"; - /// assert!(re.is_match(hay)); - /// ``` - /// - /// This example shows that the behavior of `.` is impacted by this - /// setting as well: - /// - /// ``` - /// use regex::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"."]) - /// .line_terminator(b'\x00') - /// .build() - /// .unwrap(); - /// assert!(re.is_match("\n")); - /// assert!(!re.is_match("\x00")); - /// ``` - /// - /// This shows that building a regex will fail if the byte given - /// is not ASCII and the pattern could result in matching invalid - /// UTF-8. This is because any singular non-ASCII byte is not valid - /// UTF-8, and it is not permitted for a [`RegexSet`] to match invalid - /// UTF-8. (It is permissible to use a non-ASCII byte when building a - /// [`bytes::RegexSet`](crate::bytes::RegexSet).) - /// - /// ``` - /// use regex::RegexSetBuilder; - /// - /// assert!( - /// RegexSetBuilder::new([r"."]) - /// .line_terminator(0x80) - /// .build() - /// .is_err() - /// ); - /// // Note that using a non-ASCII byte isn't enough on its own to - /// // cause regex compilation to fail. You actually have to make use - /// // of it in the regex in a way that leads to matching invalid - /// // UTF-8. If you don't, then regex compilation will succeed! - /// assert!( - /// RegexSetBuilder::new([r"a"]) - /// .line_terminator(0x80) - /// .build() - /// .is_ok() - /// ); - /// ``` - pub fn line_terminator(&mut self, byte: u8) -> &mut RegexSetBuilder { - self.builder.line_terminator(byte); - self - } - - /// This configures swap-greed mode for all of the patterns. - /// - /// When swap-greed mode is enabled, patterns like `a+` will become - /// non-greedy and patterns like `a+?` will become greedy. In other - /// words, the meanings of `a+` and `a+?` are switched. - /// - /// This setting can also be configured using the inline flag `U` in - /// the pattern. - /// - /// Note that this is generally not useful for a `RegexSet` since a - /// `RegexSet` can only report whether a pattern matches or not. Since - /// greediness never impacts whether a match is found or not (only the - /// offsets of the match), it follows that whether parts of a pattern - /// are greedy or not doesn't matter for a `RegexSet`. - /// - /// The default for this is `false`. - pub fn swap_greed(&mut self, yes: bool) -> &mut RegexSetBuilder { - self.builder.swap_greed(yes); - self - } - - /// This configures verbose mode for all of the patterns. - /// - /// When enabled, whitespace will treated as insignificant in the - /// pattern and `#` can be used to start a comment until the next new - /// line. - /// - /// Normally, in most places in a pattern, whitespace is treated - /// literally. For example ` +` will match one or more ASCII whitespace - /// characters. - /// - /// When verbose mode is enabled, `\#` can be used to match a literal - /// `#` and `\ ` can be used to match a literal ASCII whitespace - /// character. - /// - /// Verbose mode is useful for permitting regexes to be formatted and - /// broken up more nicely. This may make them more easily readable. - /// - /// This setting can also be configured using the inline flag `x` in - /// the pattern. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSetBuilder; - /// - /// let pat = r" - /// \b - /// (?\p{Uppercase}\w*) # always start with uppercase letter - /// [\s--\n]+ # whitespace should separate names - /// (?: # middle name can be an initial! - /// (?:(?\p{Uppercase})\.|(?\p{Uppercase}\w*)) - /// [\s--\n]+ - /// )? - /// (?\p{Uppercase}\w*) - /// \b - /// "; - /// let re = RegexSetBuilder::new([pat]) - /// .ignore_whitespace(true) - /// .build() - /// .unwrap(); - /// assert!(re.is_match("Harry Potter")); - /// assert!(re.is_match("Harry J. Potter")); - /// assert!(re.is_match("Harry James Potter")); - /// assert!(!re.is_match("harry J. Potter")); - /// ``` - pub fn ignore_whitespace( - &mut self, - yes: bool, - ) -> &mut RegexSetBuilder { - self.builder.ignore_whitespace(yes); - self - } - - /// This configures octal mode for all of the patterns. - /// - /// Octal syntax is a little-known way of uttering Unicode codepoints - /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all - /// equivalent patterns, where the last example shows octal syntax. - /// - /// While supporting octal syntax isn't in and of itself a problem, - /// it does make good error messages harder. That is, in PCRE based - /// regex engines, syntax like `\1` invokes a backreference, which is - /// explicitly unsupported this library. However, many users expect - /// backreferences to be supported. Therefore, when octal support - /// is disabled, the error message will explicitly mention that - /// backreferences aren't supported. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSetBuilder; - /// - /// // Normally this pattern would not compile, with an error message - /// // about backreferences not being supported. But with octal mode - /// // enabled, octal escape sequences work. - /// let re = RegexSetBuilder::new([r"\141"]) - /// .octal(true) - /// .build() - /// .unwrap(); - /// assert!(re.is_match("a")); - /// ``` - pub fn octal(&mut self, yes: bool) -> &mut RegexSetBuilder { - self.builder.octal(yes); - self - } - - /// Sets the approximate size limit, in bytes, of the compiled regex. - /// - /// This roughly corresponds to the number of heap memory, in - /// bytes, occupied by a single regex. If the regex would otherwise - /// approximately exceed this limit, then compiling that regex will - /// fail. - /// - /// The main utility of a method like this is to avoid compiling - /// regexes that use an unexpected amount of resources, such as - /// time and memory. Even if the memory usage of a large regex is - /// acceptable, its search time may not be. Namely, worst case time - /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and - /// `n ~ len(haystack)`. That is, search time depends, in part, on the - /// size of the compiled regex. This means that putting a limit on the - /// size of the regex limits how much a regex can impact search time. - /// - /// For more information about regex size limits, see the section on - /// [untrusted inputs](crate#untrusted-input) in the top-level crate - /// documentation. - /// - /// The default for this is some reasonable number that permits most - /// patterns to compile successfully. - /// - /// # Example - /// - /// ``` - /// # if !cfg!(target_pointer_width = "64") { return; } // see #1041 - /// use regex::RegexSetBuilder; - /// - /// // It may surprise you how big some seemingly small patterns can - /// // be! Since \w is Unicode aware, this generates a regex that can - /// // match approximately 140,000 distinct codepoints. - /// assert!( - /// RegexSetBuilder::new([r"\w"]) - /// .size_limit(45_000) - /// .build() - /// .is_err() - /// ); - /// ``` - pub fn size_limit(&mut self, bytes: usize) -> &mut RegexSetBuilder { - self.builder.size_limit(bytes); - self - } - - /// Set the approximate capacity, in bytes, of the cache of transitions - /// used by the lazy DFA. - /// - /// While the lazy DFA isn't always used, in tends to be the most - /// commonly use regex engine in default configurations. It tends to - /// adopt the performance profile of a fully build DFA, but without the - /// downside of taking worst case exponential time to build. - /// - /// The downside is that it needs to keep a cache of transitions and - /// states that are built while running a search, and this cache - /// can fill up. When it fills up, the cache will reset itself. Any - /// previously generated states and transitions will then need to be - /// re-generated. If this happens too many times, then this library - /// will bail out of using the lazy DFA and switch to a different regex - /// engine. - /// - /// If your regex provokes this particular downside of the lazy DFA, - /// then it may be beneficial to increase its cache capacity. This will - /// potentially reduce the frequency of cache resetting (ideally to - /// `0`). While it won't fix all potential performance problems with - /// the lazy DFA, increasing the cache capacity does fix some. - /// - /// There is no easy way to determine, a priori, whether increasing - /// this cache capacity will help. In general, the larger your regex, - /// the more cache it's likely to use. But that isn't an ironclad rule. - /// For example, a regex like `[01]*1[01]{N}` would normally produce a - /// fully build DFA that is exponential in size with respect to `N`. - /// The lazy DFA will prevent exponential space blow-up, but it cache - /// is likely to fill up, even when it's large and even for smallish - /// values of `N`. - /// - /// If you aren't sure whether this helps or not, it is sensible to - /// set this to some arbitrarily large number in testing, such as - /// `usize::MAX`. Namely, this represents the amount of capacity that - /// *may* be used. It's probably not a good idea to use `usize::MAX` in - /// production though, since it implies there are no controls on heap - /// memory used by this library during a search. In effect, set it to - /// whatever you're willing to allocate for a single regex search. - pub fn dfa_size_limit( - &mut self, - bytes: usize, - ) -> &mut RegexSetBuilder { - self.builder.dfa_size_limit(bytes); - self - } - - /// Set the nesting limit for this parser. - /// - /// The nesting limit controls how deep the abstract syntax tree is - /// allowed to be. If the AST exceeds the given limit (e.g., with too - /// many nested groups), then an error is returned by the parser. - /// - /// The purpose of this limit is to act as a heuristic to prevent stack - /// overflow for consumers that do structural induction on an AST using - /// explicit recursion. While this crate never does this (instead using - /// constant stack space and moving the call stack to the heap), other - /// crates may. - /// - /// This limit is not checked until the entire AST is parsed. - /// Therefore, if callers want to put a limit on the amount of heap - /// space used, then they should impose a limit on the length, in - /// bytes, of the concrete pattern string. In particular, this is - /// viable since this parser implementation will limit itself to heap - /// space proportional to the length of the pattern string. See also - /// the [untrusted inputs](crate#untrusted-input) section in the - /// top-level crate documentation for more information about this. - /// - /// Note that a nest limit of `0` will return a nest limit error for - /// most patterns but not all. For example, a nest limit of `0` permits - /// `a` but not `ab`, since `ab` requires an explicit concatenation, - /// which results in a nest depth of `1`. In general, a nest limit is - /// not something that manifests in an obvious way in the concrete - /// syntax, therefore, it should not be used in a granular way. - /// - /// # Example - /// - /// ``` - /// use regex::RegexSetBuilder; - /// - /// assert!(RegexSetBuilder::new([r"a"]).nest_limit(0).build().is_ok()); - /// assert!(RegexSetBuilder::new([r"ab"]).nest_limit(0).build().is_err()); - /// ``` - pub fn nest_limit(&mut self, limit: u32) -> &mut RegexSetBuilder { - self.builder.nest_limit(limit); - self - } - } -} - -pub(crate) mod bytes { - use crate::{ - bytes::{Regex, RegexSet}, - error::Error, - }; - - use super::Builder; - - /// A configurable builder for a [`Regex`]. - /// - /// This builder can be used to programmatically set flags such as `i` - /// (case insensitive) and `x` (for verbose mode). This builder can also be - /// used to configure things like the line terminator and a size limit on - /// the compiled regular expression. - #[derive(Clone, Debug)] - pub struct RegexBuilder { - builder: Builder, - } - - impl RegexBuilder { - /// Create a new builder with a default configuration for the given - /// pattern. - /// - /// If the pattern is invalid or exceeds the configured size limits, - /// then an error will be returned when [`RegexBuilder::build`] is - /// called. - pub fn new(pattern: &str) -> RegexBuilder { - RegexBuilder { builder: Builder::new([pattern]) } - } - - /// Compiles the pattern given to `RegexBuilder::new` with the - /// configuration set on this builder. - /// - /// If the pattern isn't a valid regex or if a configured size limit - /// was exceeded, then an error is returned. - pub fn build(&self) -> Result { - self.builder.build_one_bytes() - } - - /// This configures Unicode mode for the entire pattern. - /// - /// Enabling Unicode mode does a number of things: - /// - /// * Most fundamentally, it causes the fundamental atom of matching - /// to be a single codepoint. When Unicode mode is disabled, it's a - /// single byte. For example, when Unicode mode is enabled, `.` will - /// match `💩` once, where as it will match 4 times when Unicode mode - /// is disabled. (Since the UTF-8 encoding of `💩` is 4 bytes long.) - /// * Case insensitive matching uses Unicode simple case folding rules. - /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are - /// available. - /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and - /// `\d`. - /// * The word boundary assertions, `\b` and `\B`, use the Unicode - /// definition of a word character. - /// - /// Note that unlike the top-level `Regex` for searching `&str`, it - /// is permitted to disable Unicode mode even if the resulting pattern - /// could match invalid UTF-8. For example, `(?-u:.)` is not a valid - /// pattern for a top-level `Regex`, but is valid for a `bytes::Regex`. - /// - /// For more details on the Unicode support in this crate, see the - /// [Unicode section](crate#unicode) in this crate's top-level - /// documentation. - /// - /// The default for this is `true`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"\w") - /// .unicode(false) - /// .build() - /// .unwrap(); - /// // Normally greek letters would be included in \w, but since - /// // Unicode mode is disabled, it only matches ASCII letters. - /// assert!(!re.is_match("δ".as_bytes())); - /// - /// let re = RegexBuilder::new(r"s") - /// .case_insensitive(true) - /// .unicode(false) - /// .build() - /// .unwrap(); - /// // Normally 'ſ' is included when searching for 's' case - /// // insensitively due to Unicode's simple case folding rules. But - /// // when Unicode mode is disabled, only ASCII case insensitive rules - /// // are used. - /// assert!(!re.is_match("ſ".as_bytes())); - /// ``` - /// - /// Since this builder is for constructing a [`bytes::Regex`](Regex), - /// one can disable Unicode mode even if it would match invalid UTF-8: - /// - /// ``` - /// use regex::bytes::RegexBuilder; - /// - /// let re = RegexBuilder::new(r".") - /// .unicode(false) - /// .build() - /// .unwrap(); - /// // Normally greek letters would be included in \w, but since - /// // Unicode mode is disabled, it only matches ASCII letters. - /// assert!(re.is_match(b"\xFF")); - /// ``` - pub fn unicode(&mut self, yes: bool) -> &mut RegexBuilder { - self.builder.unicode(yes); - self - } - - /// This configures whether to enable case insensitive matching for the - /// entire pattern. - /// - /// This setting can also be configured using the inline flag `i` - /// in the pattern. For example, `(?i:foo)` matches `foo` case - /// insensitively while `(?-i:foo)` matches `foo` case sensitively. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"foo(?-i:bar)quux") - /// .case_insensitive(true) - /// .build() - /// .unwrap(); - /// assert!(re.is_match(b"FoObarQuUx")); - /// // Even though case insensitive matching is enabled in the builder, - /// // it can be locally disabled within the pattern. In this case, - /// // `bar` is matched case sensitively. - /// assert!(!re.is_match(b"fooBARquux")); - /// ``` - pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexBuilder { - self.builder.case_insensitive(yes); - self - } - - /// This configures multi-line mode for the entire pattern. - /// - /// Enabling multi-line mode changes the behavior of the `^` and `$` - /// anchor assertions. Instead of only matching at the beginning and - /// end of a haystack, respectively, multi-line mode causes them to - /// match at the beginning and end of a line *in addition* to the - /// beginning and end of a haystack. More precisely, `^` will match at - /// the position immediately following a `\n` and `$` will match at the - /// position immediately preceding a `\n`. - /// - /// The behavior of this option can be impacted by other settings too: - /// - /// * The [`RegexBuilder::line_terminator`] option changes `\n` above - /// to any ASCII byte. - /// * The [`RegexBuilder::crlf`] option changes the line terminator to - /// be either `\r` or `\n`, but never at the position between a `\r` - /// and `\n`. - /// - /// This setting can also be configured using the inline flag `m` in - /// the pattern. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"^foo$") - /// .multi_line(true) - /// .build() - /// .unwrap(); - /// assert_eq!(Some(1..4), re.find(b"\nfoo\n").map(|m| m.range())); - /// ``` - pub fn multi_line(&mut self, yes: bool) -> &mut RegexBuilder { - self.builder.multi_line(yes); - self - } - - /// This configures dot-matches-new-line mode for the entire pattern. - /// - /// Perhaps surprisingly, the default behavior for `.` is not to match - /// any character, but rather, to match any character except for the - /// line terminator (which is `\n` by default). When this mode is - /// enabled, the behavior changes such that `.` truly matches any - /// character. - /// - /// This setting can also be configured using the inline flag `s` in - /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent - /// regexes. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"foo.bar") - /// .dot_matches_new_line(true) - /// .build() - /// .unwrap(); - /// let hay = b"foo\nbar"; - /// assert_eq!(Some(&b"foo\nbar"[..]), re.find(hay).map(|m| m.as_bytes())); - /// ``` - pub fn dot_matches_new_line( - &mut self, - yes: bool, - ) -> &mut RegexBuilder { - self.builder.dot_matches_new_line(yes); - self - } - - /// This configures CRLF mode for the entire pattern. - /// - /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for - /// short) and `\n` ("line feed" or LF for short) are treated as line - /// terminators. This results in the following: - /// - /// * Unless dot-matches-new-line mode is enabled, `.` will now match - /// any character except for `\n` and `\r`. - /// * When multi-line mode is enabled, `^` will match immediately - /// following a `\n` or a `\r`. Similarly, `$` will match immediately - /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match - /// between `\r` and `\n`. - /// - /// This setting can also be configured using the inline flag `R` in - /// the pattern. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"^foo$") - /// .multi_line(true) - /// .crlf(true) - /// .build() - /// .unwrap(); - /// let hay = b"\r\nfoo\r\n"; - /// // If CRLF mode weren't enabled here, then '$' wouldn't match - /// // immediately after 'foo', and thus no match would be found. - /// assert_eq!(Some(&b"foo"[..]), re.find(hay).map(|m| m.as_bytes())); - /// ``` - /// - /// This example demonstrates that `^` will never match at a position - /// between `\r` and `\n`. (`$` will similarly not match between a `\r` - /// and a `\n`.) - /// - /// ``` - /// use regex::bytes::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"^") - /// .multi_line(true) - /// .crlf(true) - /// .build() - /// .unwrap(); - /// let hay = b"\r\n\r\n"; - /// let ranges: Vec<_> = re.find_iter(hay).map(|m| m.range()).collect(); - /// assert_eq!(ranges, vec![0..0, 2..2, 4..4]); - /// ``` - pub fn crlf(&mut self, yes: bool) -> &mut RegexBuilder { - self.builder.crlf(yes); - self - } - - /// Configures the line terminator to be used by the regex. - /// - /// The line terminator is relevant in two ways for a particular regex: - /// - /// * When dot-matches-new-line mode is *not* enabled (the default), - /// then `.` will match any character except for the configured line - /// terminator. - /// * When multi-line mode is enabled (not the default), then `^` and - /// `$` will match immediately after and before, respectively, a line - /// terminator. - /// - /// In both cases, if CRLF mode is enabled in a particular context, - /// then it takes precedence over any configured line terminator. - /// - /// This option cannot be configured from within the pattern. - /// - /// The default line terminator is `\n`. - /// - /// # Example - /// - /// This shows how to treat the NUL byte as a line terminator. This can - /// be a useful heuristic when searching binary data. - /// - /// ``` - /// use regex::bytes::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"^foo$") - /// .multi_line(true) - /// .line_terminator(b'\x00') - /// .build() - /// .unwrap(); - /// let hay = b"\x00foo\x00"; - /// assert_eq!(Some(1..4), re.find(hay).map(|m| m.range())); - /// ``` - /// - /// This example shows that the behavior of `.` is impacted by this - /// setting as well: - /// - /// ``` - /// use regex::bytes::RegexBuilder; - /// - /// let re = RegexBuilder::new(r".") - /// .line_terminator(b'\x00') - /// .build() - /// .unwrap(); - /// assert!(re.is_match(b"\n")); - /// assert!(!re.is_match(b"\x00")); - /// ``` - /// - /// This shows that building a regex will work even when the byte - /// given is not ASCII. This is unlike the top-level `Regex` API where - /// matching invalid UTF-8 is not allowed. - /// - /// Note though that you must disable Unicode mode. This is required - /// because Unicode mode requires matching one codepoint at a time, - /// and there is no way to match a non-ASCII byte as if it were a - /// codepoint. - /// - /// ``` - /// use regex::bytes::RegexBuilder; - /// - /// assert!( - /// RegexBuilder::new(r".") - /// .unicode(false) - /// .line_terminator(0x80) - /// .build() - /// .is_ok(), - /// ); - /// ``` - pub fn line_terminator(&mut self, byte: u8) -> &mut RegexBuilder { - self.builder.line_terminator(byte); - self - } - - /// This configures swap-greed mode for the entire pattern. - /// - /// When swap-greed mode is enabled, patterns like `a+` will become - /// non-greedy and patterns like `a+?` will become greedy. In other - /// words, the meanings of `a+` and `a+?` are switched. - /// - /// This setting can also be configured using the inline flag `U` in - /// the pattern. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexBuilder; - /// - /// let re = RegexBuilder::new(r"a+") - /// .swap_greed(true) - /// .build() - /// .unwrap(); - /// assert_eq!(Some(&b"a"[..]), re.find(b"aaa").map(|m| m.as_bytes())); - /// ``` - pub fn swap_greed(&mut self, yes: bool) -> &mut RegexBuilder { - self.builder.swap_greed(yes); - self - } - - /// This configures verbose mode for the entire pattern. - /// - /// When enabled, whitespace will treated as insignificant in the - /// pattern and `#` can be used to start a comment until the next new - /// line. - /// - /// Normally, in most places in a pattern, whitespace is treated - /// literally. For example ` +` will match one or more ASCII whitespace - /// characters. - /// - /// When verbose mode is enabled, `\#` can be used to match a literal - /// `#` and `\ ` can be used to match a literal ASCII whitespace - /// character. - /// - /// Verbose mode is useful for permitting regexes to be formatted and - /// broken up more nicely. This may make them more easily readable. - /// - /// This setting can also be configured using the inline flag `x` in - /// the pattern. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexBuilder; - /// - /// let pat = r" - /// \b - /// (?\p{Uppercase}\w*) # always start with uppercase letter - /// [\s--\n]+ # whitespace should separate names - /// (?: # middle name can be an initial! - /// (?:(?\p{Uppercase})\.|(?\p{Uppercase}\w*)) - /// [\s--\n]+ - /// )? - /// (?\p{Uppercase}\w*) - /// \b - /// "; - /// let re = RegexBuilder::new(pat) - /// .ignore_whitespace(true) - /// .build() - /// .unwrap(); - /// - /// let caps = re.captures(b"Harry Potter").unwrap(); - /// assert_eq!(&b"Harry"[..], &caps["first"]); - /// assert_eq!(&b"Potter"[..], &caps["last"]); - /// - /// let caps = re.captures(b"Harry J. Potter").unwrap(); - /// assert_eq!(&b"Harry"[..], &caps["first"]); - /// // Since a middle name/initial isn't required for an overall match, - /// // we can't assume that 'initial' or 'middle' will be populated! - /// assert_eq!( - /// Some(&b"J"[..]), - /// caps.name("initial").map(|m| m.as_bytes()), - /// ); - /// assert_eq!(None, caps.name("middle").map(|m| m.as_bytes())); - /// assert_eq!(&b"Potter"[..], &caps["last"]); - /// - /// let caps = re.captures(b"Harry James Potter").unwrap(); - /// assert_eq!(&b"Harry"[..], &caps["first"]); - /// // Since a middle name/initial isn't required for an overall match, - /// // we can't assume that 'initial' or 'middle' will be populated! - /// assert_eq!(None, caps.name("initial").map(|m| m.as_bytes())); - /// assert_eq!( - /// Some(&b"James"[..]), - /// caps.name("middle").map(|m| m.as_bytes()), - /// ); - /// assert_eq!(&b"Potter"[..], &caps["last"]); - /// ``` - pub fn ignore_whitespace(&mut self, yes: bool) -> &mut RegexBuilder { - self.builder.ignore_whitespace(yes); - self - } - - /// This configures octal mode for the entire pattern. - /// - /// Octal syntax is a little-known way of uttering Unicode codepoints - /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all - /// equivalent patterns, where the last example shows octal syntax. - /// - /// While supporting octal syntax isn't in and of itself a problem, - /// it does make good error messages harder. That is, in PCRE based - /// regex engines, syntax like `\1` invokes a backreference, which is - /// explicitly unsupported this library. However, many users expect - /// backreferences to be supported. Therefore, when octal support - /// is disabled, the error message will explicitly mention that - /// backreferences aren't supported. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexBuilder; - /// - /// // Normally this pattern would not compile, with an error message - /// // about backreferences not being supported. But with octal mode - /// // enabled, octal escape sequences work. - /// let re = RegexBuilder::new(r"\141") - /// .octal(true) - /// .build() - /// .unwrap(); - /// assert!(re.is_match(b"a")); - /// ``` - pub fn octal(&mut self, yes: bool) -> &mut RegexBuilder { - self.builder.octal(yes); - self - } - - /// Sets the approximate size limit, in bytes, of the compiled regex. - /// - /// This roughly corresponds to the number of heap memory, in - /// bytes, occupied by a single regex. If the regex would otherwise - /// approximately exceed this limit, then compiling that regex will - /// fail. - /// - /// The main utility of a method like this is to avoid compiling - /// regexes that use an unexpected amount of resources, such as - /// time and memory. Even if the memory usage of a large regex is - /// acceptable, its search time may not be. Namely, worst case time - /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and - /// `n ~ len(haystack)`. That is, search time depends, in part, on the - /// size of the compiled regex. This means that putting a limit on the - /// size of the regex limits how much a regex can impact search time. - /// - /// For more information about regex size limits, see the section on - /// [untrusted inputs](crate#untrusted-input) in the top-level crate - /// documentation. - /// - /// The default for this is some reasonable number that permits most - /// patterns to compile successfully. - /// - /// # Example - /// - /// ``` - /// # if !cfg!(target_pointer_width = "64") { return; } // see #1041 - /// use regex::bytes::RegexBuilder; - /// - /// // It may surprise you how big some seemingly small patterns can - /// // be! Since \w is Unicode aware, this generates a regex that can - /// // match approximately 140,000 distinct codepoints. - /// assert!(RegexBuilder::new(r"\w").size_limit(45_000).build().is_err()); - /// ``` - pub fn size_limit(&mut self, bytes: usize) -> &mut RegexBuilder { - self.builder.size_limit(bytes); - self - } - - /// Set the approximate capacity, in bytes, of the cache of transitions - /// used by the lazy DFA. - /// - /// While the lazy DFA isn't always used, in tends to be the most - /// commonly use regex engine in default configurations. It tends to - /// adopt the performance profile of a fully build DFA, but without the - /// downside of taking worst case exponential time to build. - /// - /// The downside is that it needs to keep a cache of transitions and - /// states that are built while running a search, and this cache - /// can fill up. When it fills up, the cache will reset itself. Any - /// previously generated states and transitions will then need to be - /// re-generated. If this happens too many times, then this library - /// will bail out of using the lazy DFA and switch to a different regex - /// engine. - /// - /// If your regex provokes this particular downside of the lazy DFA, - /// then it may be beneficial to increase its cache capacity. This will - /// potentially reduce the frequency of cache resetting (ideally to - /// `0`). While it won't fix all potential performance problems with - /// the lazy DFA, increasing the cache capacity does fix some. - /// - /// There is no easy way to determine, a priori, whether increasing - /// this cache capacity will help. In general, the larger your regex, - /// the more cache it's likely to use. But that isn't an ironclad rule. - /// For example, a regex like `[01]*1[01]{N}` would normally produce a - /// fully build DFA that is exponential in size with respect to `N`. - /// The lazy DFA will prevent exponential space blow-up, but it cache - /// is likely to fill up, even when it's large and even for smallish - /// values of `N`. - /// - /// If you aren't sure whether this helps or not, it is sensible to - /// set this to some arbitrarily large number in testing, such as - /// `usize::MAX`. Namely, this represents the amount of capacity that - /// *may* be used. It's probably not a good idea to use `usize::MAX` in - /// production though, since it implies there are no controls on heap - /// memory used by this library during a search. In effect, set it to - /// whatever you're willing to allocate for a single regex search. - pub fn dfa_size_limit(&mut self, bytes: usize) -> &mut RegexBuilder { - self.builder.dfa_size_limit(bytes); - self - } - - /// Set the nesting limit for this parser. - /// - /// The nesting limit controls how deep the abstract syntax tree is - /// allowed to be. If the AST exceeds the given limit (e.g., with too - /// many nested groups), then an error is returned by the parser. - /// - /// The purpose of this limit is to act as a heuristic to prevent stack - /// overflow for consumers that do structural induction on an AST using - /// explicit recursion. While this crate never does this (instead using - /// constant stack space and moving the call stack to the heap), other - /// crates may. - /// - /// This limit is not checked until the entire AST is parsed. - /// Therefore, if callers want to put a limit on the amount of heap - /// space used, then they should impose a limit on the length, in - /// bytes, of the concrete pattern string. In particular, this is - /// viable since this parser implementation will limit itself to heap - /// space proportional to the length of the pattern string. See also - /// the [untrusted inputs](crate#untrusted-input) section in the - /// top-level crate documentation for more information about this. - /// - /// Note that a nest limit of `0` will return a nest limit error for - /// most patterns but not all. For example, a nest limit of `0` permits - /// `a` but not `ab`, since `ab` requires an explicit concatenation, - /// which results in a nest depth of `1`. In general, a nest limit is - /// not something that manifests in an obvious way in the concrete - /// syntax, therefore, it should not be used in a granular way. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexBuilder; - /// - /// assert!(RegexBuilder::new(r"a").nest_limit(0).build().is_ok()); - /// assert!(RegexBuilder::new(r"ab").nest_limit(0).build().is_err()); - /// ``` - pub fn nest_limit(&mut self, limit: u32) -> &mut RegexBuilder { - self.builder.nest_limit(limit); - self - } - } - - /// A configurable builder for a [`RegexSet`]. - /// - /// This builder can be used to programmatically set flags such as `i` - /// (case insensitive) and `x` (for verbose mode). This builder can also be - /// used to configure things like the line terminator and a size limit on - /// the compiled regular expression. - #[derive(Clone, Debug)] - pub struct RegexSetBuilder { - builder: Builder, - } - - impl RegexSetBuilder { - /// Create a new builder with a default configuration for the given - /// patterns. - /// - /// If the patterns are invalid or exceed the configured size limits, - /// then an error will be returned when [`RegexSetBuilder::build`] is - /// called. - pub fn new(patterns: I) -> RegexSetBuilder - where - I: IntoIterator, - S: AsRef, - { - RegexSetBuilder { builder: Builder::new(patterns) } - } - - /// Compiles the patterns given to `RegexSetBuilder::new` with the - /// configuration set on this builder. - /// - /// If the patterns aren't valid regexes or if a configured size limit - /// was exceeded, then an error is returned. - pub fn build(&self) -> Result { - self.builder.build_many_bytes() - } - - /// This configures Unicode mode for the all of the patterns. - /// - /// Enabling Unicode mode does a number of things: - /// - /// * Most fundamentally, it causes the fundamental atom of matching - /// to be a single codepoint. When Unicode mode is disabled, it's a - /// single byte. For example, when Unicode mode is enabled, `.` will - /// match `💩` once, where as it will match 4 times when Unicode mode - /// is disabled. (Since the UTF-8 encoding of `💩` is 4 bytes long.) - /// * Case insensitive matching uses Unicode simple case folding rules. - /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are - /// available. - /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and - /// `\d`. - /// * The word boundary assertions, `\b` and `\B`, use the Unicode - /// definition of a word character. - /// - /// Note that unlike the top-level `RegexSet` for searching `&str`, - /// it is permitted to disable Unicode mode even if the resulting - /// pattern could match invalid UTF-8. For example, `(?-u:.)` is not - /// a valid pattern for a top-level `RegexSet`, but is valid for a - /// `bytes::RegexSet`. - /// - /// For more details on the Unicode support in this crate, see the - /// [Unicode section](crate#unicode) in this crate's top-level - /// documentation. - /// - /// The default for this is `true`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"\w"]) - /// .unicode(false) - /// .build() - /// .unwrap(); - /// // Normally greek letters would be included in \w, but since - /// // Unicode mode is disabled, it only matches ASCII letters. - /// assert!(!re.is_match("δ".as_bytes())); - /// - /// let re = RegexSetBuilder::new([r"s"]) - /// .case_insensitive(true) - /// .unicode(false) - /// .build() - /// .unwrap(); - /// // Normally 'ſ' is included when searching for 's' case - /// // insensitively due to Unicode's simple case folding rules. But - /// // when Unicode mode is disabled, only ASCII case insensitive rules - /// // are used. - /// assert!(!re.is_match("ſ".as_bytes())); - /// ``` - /// - /// Since this builder is for constructing a - /// [`bytes::RegexSet`](RegexSet), one can disable Unicode mode even if - /// it would match invalid UTF-8: - /// - /// ``` - /// use regex::bytes::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"."]) - /// .unicode(false) - /// .build() - /// .unwrap(); - /// // Normally greek letters would be included in \w, but since - /// // Unicode mode is disabled, it only matches ASCII letters. - /// assert!(re.is_match(b"\xFF")); - /// ``` - pub fn unicode(&mut self, yes: bool) -> &mut RegexSetBuilder { - self.builder.unicode(yes); - self - } - - /// This configures whether to enable case insensitive matching for all - /// of the patterns. - /// - /// This setting can also be configured using the inline flag `i` - /// in the pattern. For example, `(?i:foo)` matches `foo` case - /// insensitively while `(?-i:foo)` matches `foo` case sensitively. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"foo(?-i:bar)quux"]) - /// .case_insensitive(true) - /// .build() - /// .unwrap(); - /// assert!(re.is_match(b"FoObarQuUx")); - /// // Even though case insensitive matching is enabled in the builder, - /// // it can be locally disabled within the pattern. In this case, - /// // `bar` is matched case sensitively. - /// assert!(!re.is_match(b"fooBARquux")); - /// ``` - pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexSetBuilder { - self.builder.case_insensitive(yes); - self - } - - /// This configures multi-line mode for all of the patterns. - /// - /// Enabling multi-line mode changes the behavior of the `^` and `$` - /// anchor assertions. Instead of only matching at the beginning and - /// end of a haystack, respectively, multi-line mode causes them to - /// match at the beginning and end of a line *in addition* to the - /// beginning and end of a haystack. More precisely, `^` will match at - /// the position immediately following a `\n` and `$` will match at the - /// position immediately preceding a `\n`. - /// - /// The behavior of this option can be impacted by other settings too: - /// - /// * The [`RegexSetBuilder::line_terminator`] option changes `\n` - /// above to any ASCII byte. - /// * The [`RegexSetBuilder::crlf`] option changes the line terminator - /// to be either `\r` or `\n`, but never at the position between a `\r` - /// and `\n`. - /// - /// This setting can also be configured using the inline flag `m` in - /// the pattern. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"^foo$"]) - /// .multi_line(true) - /// .build() - /// .unwrap(); - /// assert!(re.is_match(b"\nfoo\n")); - /// ``` - pub fn multi_line(&mut self, yes: bool) -> &mut RegexSetBuilder { - self.builder.multi_line(yes); - self - } - - /// This configures dot-matches-new-line mode for the entire pattern. - /// - /// Perhaps surprisingly, the default behavior for `.` is not to match - /// any character, but rather, to match any character except for the - /// line terminator (which is `\n` by default). When this mode is - /// enabled, the behavior changes such that `.` truly matches any - /// character. - /// - /// This setting can also be configured using the inline flag `s` in - /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent - /// regexes. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"foo.bar"]) - /// .dot_matches_new_line(true) - /// .build() - /// .unwrap(); - /// let hay = b"foo\nbar"; - /// assert!(re.is_match(hay)); - /// ``` - pub fn dot_matches_new_line( - &mut self, - yes: bool, - ) -> &mut RegexSetBuilder { - self.builder.dot_matches_new_line(yes); - self - } - - /// This configures CRLF mode for all of the patterns. - /// - /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for - /// short) and `\n` ("line feed" or LF for short) are treated as line - /// terminators. This results in the following: - /// - /// * Unless dot-matches-new-line mode is enabled, `.` will now match - /// any character except for `\n` and `\r`. - /// * When multi-line mode is enabled, `^` will match immediately - /// following a `\n` or a `\r`. Similarly, `$` will match immediately - /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match - /// between `\r` and `\n`. - /// - /// This setting can also be configured using the inline flag `R` in - /// the pattern. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"^foo$"]) - /// .multi_line(true) - /// .crlf(true) - /// .build() - /// .unwrap(); - /// let hay = b"\r\nfoo\r\n"; - /// // If CRLF mode weren't enabled here, then '$' wouldn't match - /// // immediately after 'foo', and thus no match would be found. - /// assert!(re.is_match(hay)); - /// ``` - /// - /// This example demonstrates that `^` will never match at a position - /// between `\r` and `\n`. (`$` will similarly not match between a `\r` - /// and a `\n`.) - /// - /// ``` - /// use regex::bytes::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"^\n"]) - /// .multi_line(true) - /// .crlf(true) - /// .build() - /// .unwrap(); - /// assert!(!re.is_match(b"\r\n")); - /// ``` - pub fn crlf(&mut self, yes: bool) -> &mut RegexSetBuilder { - self.builder.crlf(yes); - self - } - - /// Configures the line terminator to be used by the regex. - /// - /// The line terminator is relevant in two ways for a particular regex: - /// - /// * When dot-matches-new-line mode is *not* enabled (the default), - /// then `.` will match any character except for the configured line - /// terminator. - /// * When multi-line mode is enabled (not the default), then `^` and - /// `$` will match immediately after and before, respectively, a line - /// terminator. - /// - /// In both cases, if CRLF mode is enabled in a particular context, - /// then it takes precedence over any configured line terminator. - /// - /// This option cannot be configured from within the pattern. - /// - /// The default line terminator is `\n`. - /// - /// # Example - /// - /// This shows how to treat the NUL byte as a line terminator. This can - /// be a useful heuristic when searching binary data. - /// - /// ``` - /// use regex::bytes::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"^foo$"]) - /// .multi_line(true) - /// .line_terminator(b'\x00') - /// .build() - /// .unwrap(); - /// let hay = b"\x00foo\x00"; - /// assert!(re.is_match(hay)); - /// ``` - /// - /// This example shows that the behavior of `.` is impacted by this - /// setting as well: - /// - /// ``` - /// use regex::bytes::RegexSetBuilder; - /// - /// let re = RegexSetBuilder::new([r"."]) - /// .line_terminator(b'\x00') - /// .build() - /// .unwrap(); - /// assert!(re.is_match(b"\n")); - /// assert!(!re.is_match(b"\x00")); - /// ``` - /// - /// This shows that building a regex will work even when the byte given - /// is not ASCII. This is unlike the top-level `RegexSet` API where - /// matching invalid UTF-8 is not allowed. - /// - /// Note though that you must disable Unicode mode. This is required - /// because Unicode mode requires matching one codepoint at a time, - /// and there is no way to match a non-ASCII byte as if it were a - /// codepoint. - /// - /// ``` - /// use regex::bytes::RegexSetBuilder; - /// - /// assert!( - /// RegexSetBuilder::new([r"."]) - /// .unicode(false) - /// .line_terminator(0x80) - /// .build() - /// .is_ok(), - /// ); - /// ``` - pub fn line_terminator(&mut self, byte: u8) -> &mut RegexSetBuilder { - self.builder.line_terminator(byte); - self - } - - /// This configures swap-greed mode for all of the patterns. - /// - /// When swap-greed mode is enabled, patterns like `a+` will become - /// non-greedy and patterns like `a+?` will become greedy. In other - /// words, the meanings of `a+` and `a+?` are switched. - /// - /// This setting can also be configured using the inline flag `U` in - /// the pattern. - /// - /// Note that this is generally not useful for a `RegexSet` since a - /// `RegexSet` can only report whether a pattern matches or not. Since - /// greediness never impacts whether a match is found or not (only the - /// offsets of the match), it follows that whether parts of a pattern - /// are greedy or not doesn't matter for a `RegexSet`. - /// - /// The default for this is `false`. - pub fn swap_greed(&mut self, yes: bool) -> &mut RegexSetBuilder { - self.builder.swap_greed(yes); - self - } - - /// This configures verbose mode for all of the patterns. - /// - /// When enabled, whitespace will treated as insignificant in the - /// pattern and `#` can be used to start a comment until the next new - /// line. - /// - /// Normally, in most places in a pattern, whitespace is treated - /// literally. For example ` +` will match one or more ASCII whitespace - /// characters. - /// - /// When verbose mode is enabled, `\#` can be used to match a literal - /// `#` and `\ ` can be used to match a literal ASCII whitespace - /// character. - /// - /// Verbose mode is useful for permitting regexes to be formatted and - /// broken up more nicely. This may make them more easily readable. - /// - /// This setting can also be configured using the inline flag `x` in - /// the pattern. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSetBuilder; - /// - /// let pat = r" - /// \b - /// (?\p{Uppercase}\w*) # always start with uppercase letter - /// [\s--\n]+ # whitespace should separate names - /// (?: # middle name can be an initial! - /// (?:(?\p{Uppercase})\.|(?\p{Uppercase}\w*)) - /// [\s--\n]+ - /// )? - /// (?\p{Uppercase}\w*) - /// \b - /// "; - /// let re = RegexSetBuilder::new([pat]) - /// .ignore_whitespace(true) - /// .build() - /// .unwrap(); - /// assert!(re.is_match(b"Harry Potter")); - /// assert!(re.is_match(b"Harry J. Potter")); - /// assert!(re.is_match(b"Harry James Potter")); - /// assert!(!re.is_match(b"harry J. Potter")); - /// ``` - pub fn ignore_whitespace( - &mut self, - yes: bool, - ) -> &mut RegexSetBuilder { - self.builder.ignore_whitespace(yes); - self - } - - /// This configures octal mode for all of the patterns. - /// - /// Octal syntax is a little-known way of uttering Unicode codepoints - /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all - /// equivalent patterns, where the last example shows octal syntax. - /// - /// While supporting octal syntax isn't in and of itself a problem, - /// it does make good error messages harder. That is, in PCRE based - /// regex engines, syntax like `\1` invokes a backreference, which is - /// explicitly unsupported this library. However, many users expect - /// backreferences to be supported. Therefore, when octal support - /// is disabled, the error message will explicitly mention that - /// backreferences aren't supported. - /// - /// The default for this is `false`. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSetBuilder; - /// - /// // Normally this pattern would not compile, with an error message - /// // about backreferences not being supported. But with octal mode - /// // enabled, octal escape sequences work. - /// let re = RegexSetBuilder::new([r"\141"]) - /// .octal(true) - /// .build() - /// .unwrap(); - /// assert!(re.is_match(b"a")); - /// ``` - pub fn octal(&mut self, yes: bool) -> &mut RegexSetBuilder { - self.builder.octal(yes); - self - } - - /// Sets the approximate size limit, in bytes, of the compiled regex. - /// - /// This roughly corresponds to the number of heap memory, in - /// bytes, occupied by a single regex. If the regex would otherwise - /// approximately exceed this limit, then compiling that regex will - /// fail. - /// - /// The main utility of a method like this is to avoid compiling - /// regexes that use an unexpected amount of resources, such as - /// time and memory. Even if the memory usage of a large regex is - /// acceptable, its search time may not be. Namely, worst case time - /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and - /// `n ~ len(haystack)`. That is, search time depends, in part, on the - /// size of the compiled regex. This means that putting a limit on the - /// size of the regex limits how much a regex can impact search time. - /// - /// For more information about regex size limits, see the section on - /// [untrusted inputs](crate#untrusted-input) in the top-level crate - /// documentation. - /// - /// The default for this is some reasonable number that permits most - /// patterns to compile successfully. - /// - /// # Example - /// - /// ``` - /// # if !cfg!(target_pointer_width = "64") { return; } // see #1041 - /// use regex::bytes::RegexSetBuilder; - /// - /// // It may surprise you how big some seemingly small patterns can - /// // be! Since \w is Unicode aware, this generates a regex that can - /// // match approximately 140,000 distinct codepoints. - /// assert!( - /// RegexSetBuilder::new([r"\w"]) - /// .size_limit(45_000) - /// .build() - /// .is_err() - /// ); - /// ``` - pub fn size_limit(&mut self, bytes: usize) -> &mut RegexSetBuilder { - self.builder.size_limit(bytes); - self - } - - /// Set the approximate capacity, in bytes, of the cache of transitions - /// used by the lazy DFA. - /// - /// While the lazy DFA isn't always used, in tends to be the most - /// commonly use regex engine in default configurations. It tends to - /// adopt the performance profile of a fully build DFA, but without the - /// downside of taking worst case exponential time to build. - /// - /// The downside is that it needs to keep a cache of transitions and - /// states that are built while running a search, and this cache - /// can fill up. When it fills up, the cache will reset itself. Any - /// previously generated states and transitions will then need to be - /// re-generated. If this happens too many times, then this library - /// will bail out of using the lazy DFA and switch to a different regex - /// engine. - /// - /// If your regex provokes this particular downside of the lazy DFA, - /// then it may be beneficial to increase its cache capacity. This will - /// potentially reduce the frequency of cache resetting (ideally to - /// `0`). While it won't fix all potential performance problems with - /// the lazy DFA, increasing the cache capacity does fix some. - /// - /// There is no easy way to determine, a priori, whether increasing - /// this cache capacity will help. In general, the larger your regex, - /// the more cache it's likely to use. But that isn't an ironclad rule. - /// For example, a regex like `[01]*1[01]{N}` would normally produce a - /// fully build DFA that is exponential in size with respect to `N`. - /// The lazy DFA will prevent exponential space blow-up, but it cache - /// is likely to fill up, even when it's large and even for smallish - /// values of `N`. - /// - /// If you aren't sure whether this helps or not, it is sensible to - /// set this to some arbitrarily large number in testing, such as - /// `usize::MAX`. Namely, this represents the amount of capacity that - /// *may* be used. It's probably not a good idea to use `usize::MAX` in - /// production though, since it implies there are no controls on heap - /// memory used by this library during a search. In effect, set it to - /// whatever you're willing to allocate for a single regex search. - pub fn dfa_size_limit( - &mut self, - bytes: usize, - ) -> &mut RegexSetBuilder { - self.builder.dfa_size_limit(bytes); - self - } - - /// Set the nesting limit for this parser. - /// - /// The nesting limit controls how deep the abstract syntax tree is - /// allowed to be. If the AST exceeds the given limit (e.g., with too - /// many nested groups), then an error is returned by the parser. - /// - /// The purpose of this limit is to act as a heuristic to prevent stack - /// overflow for consumers that do structural induction on an AST using - /// explicit recursion. While this crate never does this (instead using - /// constant stack space and moving the call stack to the heap), other - /// crates may. - /// - /// This limit is not checked until the entire AST is parsed. - /// Therefore, if callers want to put a limit on the amount of heap - /// space used, then they should impose a limit on the length, in - /// bytes, of the concrete pattern string. In particular, this is - /// viable since this parser implementation will limit itself to heap - /// space proportional to the length of the pattern string. See also - /// the [untrusted inputs](crate#untrusted-input) section in the - /// top-level crate documentation for more information about this. - /// - /// Note that a nest limit of `0` will return a nest limit error for - /// most patterns but not all. For example, a nest limit of `0` permits - /// `a` but not `ab`, since `ab` requires an explicit concatenation, - /// which results in a nest depth of `1`. In general, a nest limit is - /// not something that manifests in an obvious way in the concrete - /// syntax, therefore, it should not be used in a granular way. - /// - /// # Example - /// - /// ``` - /// use regex::bytes::RegexSetBuilder; - /// - /// assert!(RegexSetBuilder::new([r"a"]).nest_limit(0).build().is_ok()); - /// assert!(RegexSetBuilder::new([r"ab"]).nest_limit(0).build().is_err()); - /// ``` - pub fn nest_limit(&mut self, limit: u32) -> &mut RegexSetBuilder { - self.builder.nest_limit(limit); - self - } - } -} diff --git a/vendor/regex/src/bytes.rs b/vendor/regex/src/bytes.rs deleted file mode 100644 index 383ac4a5b59b7a..00000000000000 --- a/vendor/regex/src/bytes.rs +++ /dev/null @@ -1,91 +0,0 @@ -/*! -Search for regex matches in `&[u8]` haystacks. - -This module provides a nearly identical API via [`Regex`] to the one found in -the top-level of this crate. There are two important differences: - -1. Matching is done on `&[u8]` instead of `&str`. Additionally, `Vec` -is used where `String` would have been used in the top-level API. -2. Unicode support can be disabled even when disabling it would result in -matching invalid UTF-8 bytes. - -# Example: match null terminated string - -This shows how to find all null-terminated strings in a slice of bytes. This -works even if a C string contains invalid UTF-8. - -```rust -use regex::bytes::Regex; - -let re = Regex::new(r"(?-u)(?[^\x00]+)\x00").unwrap(); -let hay = b"foo\x00qu\xFFux\x00baz\x00"; - -// Extract all of the strings without the NUL terminator from each match. -// The unwrap is OK here since a match requires the `cstr` capture to match. -let cstrs: Vec<&[u8]> = - re.captures_iter(hay) - .map(|c| c.name("cstr").unwrap().as_bytes()) - .collect(); -assert_eq!(cstrs, vec![&b"foo"[..], &b"qu\xFFux"[..], &b"baz"[..]]); -``` - -# Example: selectively enable Unicode support - -This shows how to match an arbitrary byte pattern followed by a UTF-8 encoded -string (e.g., to extract a title from a Matroska file): - -```rust -use regex::bytes::Regex; - -let re = Regex::new( - r"(?-u)\x7b\xa9(?:[\x80-\xfe]|[\x40-\xff].)(?u:(.*))" -).unwrap(); -let hay = b"\x12\xd0\x3b\x5f\x7b\xa9\x85\xe2\x98\x83\x80\x98\x54\x76\x68\x65"; - -// Notice that despite the `.*` at the end, it will only match valid UTF-8 -// because Unicode mode was enabled with the `u` flag. Without the `u` flag, -// the `.*` would match the rest of the bytes regardless of whether they were -// valid UTF-8. -let (_, [title]) = re.captures(hay).unwrap().extract(); -assert_eq!(title, b"\xE2\x98\x83"); -// We can UTF-8 decode the title now. And the unwrap here -// is correct because the existence of a match guarantees -// that `title` is valid UTF-8. -let title = std::str::from_utf8(title).unwrap(); -assert_eq!(title, "☃"); -``` - -In general, if the Unicode flag is enabled in a capture group and that capture -is part of the overall match, then the capture is *guaranteed* to be valid -UTF-8. - -# Syntax - -The supported syntax is pretty much the same as the syntax for Unicode -regular expressions with a few changes that make sense for matching arbitrary -bytes: - -1. The `u` flag can be disabled even when disabling it might cause the regex to -match invalid UTF-8. When the `u` flag is disabled, the regex is said to be in -"ASCII compatible" mode. -2. In ASCII compatible mode, Unicode character classes are not allowed. Literal -Unicode scalar values outside of character classes are allowed. -3. In ASCII compatible mode, Perl character classes (`\w`, `\d` and `\s`) -revert to their typical ASCII definition. `\w` maps to `[[:word:]]`, `\d` maps -to `[[:digit:]]` and `\s` maps to `[[:space:]]`. -4. In ASCII compatible mode, word boundaries use the ASCII compatible `\w` to -determine whether a byte is a word byte or not. -5. Hexadecimal notation can be used to specify arbitrary bytes instead of -Unicode codepoints. For example, in ASCII compatible mode, `\xFF` matches the -literal byte `\xFF`, while in Unicode mode, `\xFF` is the Unicode codepoint -`U+00FF` that matches its UTF-8 encoding of `\xC3\xBF`. Similarly for octal -notation when enabled. -6. In ASCII compatible mode, `.` matches any *byte* except for `\n`. When the -`s` flag is additionally enabled, `.` matches any byte. - -# Performance - -In general, one should expect performance on `&[u8]` to be roughly similar to -performance on `&str`. -*/ -pub use crate::{builders::bytes::*, regex::bytes::*, regexset::bytes::*}; diff --git a/vendor/regex/src/error.rs b/vendor/regex/src/error.rs deleted file mode 100644 index 9e90d5674283fc..00000000000000 --- a/vendor/regex/src/error.rs +++ /dev/null @@ -1,101 +0,0 @@ -use alloc::string::{String, ToString}; - -use regex_automata::meta; - -/// An error that occurred during parsing or compiling a regular expression. -#[non_exhaustive] -#[derive(Clone, PartialEq)] -pub enum Error { - /// A syntax error. - Syntax(String), - /// The compiled program exceeded the set size - /// limit. The argument is the size limit imposed by - /// [`RegexBuilder::size_limit`](crate::RegexBuilder::size_limit). Even - /// when not configured explicitly, it defaults to a reasonable limit. - /// - /// If you're getting this error, it occurred because your regex has been - /// compiled to an intermediate state that is too big. It is important to - /// note that exceeding this limit does _not_ mean the regex is too big to - /// _work_, but rather, the regex is big enough that it may wind up being - /// surprisingly slow when used in a search. In other words, this error is - /// meant to be a practical heuristic for avoiding a performance footgun, - /// and especially so for the case where the regex pattern is coming from - /// an untrusted source. - /// - /// There are generally two ways to move forward if you hit this error. - /// The first is to find some way to use a smaller regex. The second is to - /// increase the size limit via `RegexBuilder::size_limit`. However, if - /// your regex pattern is not from a trusted source, then neither of these - /// approaches may be appropriate. Instead, you'll have to determine just - /// how big of a regex you want to allow. - CompiledTooBig(usize), -} - -impl Error { - pub(crate) fn from_meta_build_error(err: meta::BuildError) -> Error { - if let Some(size_limit) = err.size_limit() { - Error::CompiledTooBig(size_limit) - } else if let Some(ref err) = err.syntax_error() { - Error::Syntax(err.to_string()) - } else { - // This is a little suspect. Technically there are more ways for - // a meta regex to fail to build other than "exceeded size limit" - // and "syntax error." For example, if there are too many states - // or even too many patterns. But in practice this is probably - // good enough. The worst thing that happens is that Error::Syntax - // represents an error that isn't technically a syntax error, but - // the actual message will still be shown. So... it's not too bad. - // - // We really should have made the Error type in the regex crate - // completely opaque. Rookie mistake. - Error::Syntax(err.to_string()) - } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for Error { - // TODO: Remove this method entirely on the next breaking semver release. - #[allow(deprecated)] - fn description(&self) -> &str { - match *self { - Error::Syntax(ref err) => err, - Error::CompiledTooBig(_) => "compiled program too big", - } - } -} - -impl core::fmt::Display for Error { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match *self { - Error::Syntax(ref err) => err.fmt(f), - Error::CompiledTooBig(limit) => write!( - f, - "Compiled regex exceeds size limit of {limit} bytes.", - ), - } - } -} - -// We implement our own Debug implementation so that we show nicer syntax -// errors when people use `Regex::new(...).unwrap()`. It's a little weird, -// but the `Syntax` variant is already storing a `String` anyway, so we might -// as well format it nicely. -impl core::fmt::Debug for Error { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match *self { - Error::Syntax(ref err) => { - let hr: String = core::iter::repeat('~').take(79).collect(); - writeln!(f, "Syntax(")?; - writeln!(f, "{hr}")?; - writeln!(f, "{err}")?; - writeln!(f, "{hr}")?; - write!(f, ")")?; - Ok(()) - } - Error::CompiledTooBig(limit) => { - f.debug_tuple("CompiledTooBig").field(&limit).finish() - } - } - } -} diff --git a/vendor/regex/src/find_byte.rs b/vendor/regex/src/find_byte.rs deleted file mode 100644 index 9c6915db40cf5e..00000000000000 --- a/vendor/regex/src/find_byte.rs +++ /dev/null @@ -1,17 +0,0 @@ -/// Searches for the given needle in the given haystack. -/// -/// If the perf-literal feature is enabled, then this uses the super optimized -/// memchr crate. Otherwise, it uses the naive byte-at-a-time implementation. -pub(crate) fn find_byte(needle: u8, haystack: &[u8]) -> Option { - #[cfg(not(feature = "perf-literal"))] - fn imp(needle: u8, haystack: &[u8]) -> Option { - haystack.iter().position(|&b| b == needle) - } - - #[cfg(feature = "perf-literal")] - fn imp(needle: u8, haystack: &[u8]) -> Option { - memchr::memchr(needle, haystack) - } - - imp(needle, haystack) -} diff --git a/vendor/regex/src/lib.rs b/vendor/regex/src/lib.rs deleted file mode 100644 index 87e48b7e90b0c7..00000000000000 --- a/vendor/regex/src/lib.rs +++ /dev/null @@ -1,1353 +0,0 @@ -/*! -This crate provides routines for searching strings for matches of a [regular -expression] (aka "regex"). The regex syntax supported by this crate is similar -to other regex engines, but it lacks several features that are not known how to -implement efficiently. This includes, but is not limited to, look-around and -backreferences. In exchange, all regex searches in this crate have worst case -`O(m * n)` time complexity, where `m` is proportional to the size of the regex -and `n` is proportional to the size of the string being searched. - -[regular expression]: https://en.wikipedia.org/wiki/Regular_expression - -If you just want API documentation, then skip to the [`Regex`] type. Otherwise, -here's a quick example showing one way of parsing the output of a grep-like -program: - -```rust -use regex::Regex; - -let re = Regex::new(r"(?m)^([^:]+):([0-9]+):(.+)$").unwrap(); -let hay = "\ -path/to/foo:54:Blue Harvest -path/to/bar:90:Something, Something, Something, Dark Side -path/to/baz:3:It's a Trap! -"; - -let mut results = vec![]; -for (_, [path, lineno, line]) in re.captures_iter(hay).map(|c| c.extract()) { - results.push((path, lineno.parse::()?, line)); -} -assert_eq!(results, vec![ - ("path/to/foo", 54, "Blue Harvest"), - ("path/to/bar", 90, "Something, Something, Something, Dark Side"), - ("path/to/baz", 3, "It's a Trap!"), -]); -# Ok::<(), Box>(()) -``` - -# Overview - -The primary type in this crate is a [`Regex`]. Its most important methods are -as follows: - -* [`Regex::new`] compiles a regex using the default configuration. A -[`RegexBuilder`] permits setting a non-default configuration. (For example, -case insensitive matching, verbose mode and others.) -* [`Regex::is_match`] reports whether a match exists in a particular haystack. -* [`Regex::find`] reports the byte offsets of a match in a haystack, if one -exists. [`Regex::find_iter`] returns an iterator over all such matches. -* [`Regex::captures`] returns a [`Captures`], which reports both the byte -offsets of a match in a haystack and the byte offsets of each matching capture -group from the regex in the haystack. -[`Regex::captures_iter`] returns an iterator over all such matches. - -There is also a [`RegexSet`], which permits searching for multiple regex -patterns simultaneously in a single search. However, it currently only reports -which patterns match and *not* the byte offsets of a match. - -Otherwise, this top-level crate documentation is organized as follows: - -* [Usage](#usage) shows how to add the `regex` crate to your Rust project. -* [Examples](#examples) provides a limited selection of regex search examples. -* [Performance](#performance) provides a brief summary of how to optimize regex -searching speed. -* [Unicode](#unicode) discusses support for non-ASCII patterns. -* [Syntax](#syntax) enumerates the specific regex syntax supported by this -crate. -* [Untrusted input](#untrusted-input) discusses how this crate deals with regex -patterns or haystacks that are untrusted. -* [Crate features](#crate-features) documents the Cargo features that can be -enabled or disabled for this crate. -* [Other crates](#other-crates) links to other crates in the `regex` family. - -# Usage - -The `regex` crate is [on crates.io](https://crates.io/crates/regex) and can be -used by adding `regex` to your dependencies in your project's `Cargo.toml`. -Or more simply, just run `cargo add regex`. - -Here is a complete example that creates a new Rust project, adds a dependency -on `regex`, creates the source code for a regex search and then runs the -program. - -First, create the project in a new directory: - -```text -$ mkdir regex-example -$ cd regex-example -$ cargo init -``` - -Second, add a dependency on `regex`: - -```text -$ cargo add regex -``` - -Third, edit `src/main.rs`. Delete what's there and replace it with this: - -``` -use regex::Regex; - -fn main() { - let re = Regex::new(r"Hello (?\w+)!").unwrap(); - let Some(caps) = re.captures("Hello Murphy!") else { - println!("no match!"); - return; - }; - println!("The name is: {}", &caps["name"]); -} -``` - -Fourth, run it with `cargo run`: - -```text -$ cargo run - Compiling memchr v2.5.0 - Compiling regex-syntax v0.7.1 - Compiling aho-corasick v1.0.1 - Compiling regex v1.8.1 - Compiling regex-example v0.1.0 (/tmp/regex-example) - Finished dev [unoptimized + debuginfo] target(s) in 4.22s - Running `target/debug/regex-example` -The name is: Murphy -``` - -The first time you run the program will show more output like above. But -subsequent runs shouldn't have to re-compile the dependencies. - -# Examples - -This section provides a few examples, in tutorial style, showing how to -search a haystack with a regex. There are more examples throughout the API -documentation. - -Before starting though, it's worth defining a few terms: - -* A **regex** is a Rust value whose type is `Regex`. We use `re` as a -variable name for a regex. -* A **pattern** is the string that is used to build a regex. We use `pat` as -a variable name for a pattern. -* A **haystack** is the string that is searched by a regex. We use `hay` as a -variable name for a haystack. - -Sometimes the words "regex" and "pattern" are used interchangeably. - -General use of regular expressions in this crate proceeds by compiling a -**pattern** into a **regex**, and then using that regex to search, split or -replace parts of a **haystack**. - -### Example: find a middle initial - -We'll start off with a very simple example: a regex that looks for a specific -name but uses a wildcard to match a middle initial. Our pattern serves as -something like a template that will match a particular name with *any* middle -initial. - -```rust -use regex::Regex; - -// We use 'unwrap()' here because it would be a bug in our program if the -// pattern failed to compile to a regex. Panicking in the presence of a bug -// is okay. -let re = Regex::new(r"Homer (.)\. Simpson").unwrap(); -let hay = "Homer J. Simpson"; -let Some(caps) = re.captures(hay) else { return }; -assert_eq!("J", &caps[1]); -``` - -There are a few things worth noticing here in our first example: - -* The `.` is a special pattern meta character that means "match any single -character except for new lines." (More precisely, in this crate, it means -"match any UTF-8 encoding of any Unicode scalar value other than `\n`.") -* We can match an actual `.` literally by escaping it, i.e., `\.`. -* We use Rust's [raw strings] to avoid needing to deal with escape sequences in -both the regex pattern syntax and in Rust's string literal syntax. If we didn't -use raw strings here, we would have had to use `\\.` to match a literal `.` -character. That is, `r"\."` and `"\\."` are equivalent patterns. -* We put our wildcard `.` instruction in parentheses. These parentheses have a -special meaning that says, "make whatever part of the haystack matches within -these parentheses available as a capturing group." After finding a match, we -access this capture group with `&caps[1]`. - -[raw strings]: https://doc.rust-lang.org/stable/reference/tokens.html#raw-string-literals - -Otherwise, we execute a search using `re.captures(hay)` and return from our -function if no match occurred. We then reference the middle initial by asking -for the part of the haystack that matched the capture group indexed at `1`. -(The capture group at index 0 is implicit and always corresponds to the entire -match. In this case, that's `Homer J. Simpson`.) - -### Example: named capture groups - -Continuing from our middle initial example above, we can tweak the pattern -slightly to give a name to the group that matches the middle initial: - -```rust -use regex::Regex; - -// Note that (?P.) is a different way to spell the same thing. -let re = Regex::new(r"Homer (?.)\. Simpson").unwrap(); -let hay = "Homer J. Simpson"; -let Some(caps) = re.captures(hay) else { return }; -assert_eq!("J", &caps["middle"]); -``` - -Giving a name to a group can be useful when there are multiple groups in -a pattern. It makes the code referring to those groups a bit easier to -understand. - -### Example: validating a particular date format - -This examples shows how to confirm whether a haystack, in its entirety, matches -a particular date format: - -```rust -use regex::Regex; - -let re = Regex::new(r"^\d{4}-\d{2}-\d{2}$").unwrap(); -assert!(re.is_match("2010-03-14")); -``` - -Notice the use of the `^` and `$` anchors. In this crate, every regex search is -run with an implicit `(?s:.)*?` at the beginning of its pattern, which allows -the regex to match anywhere in a haystack. Anchors, as above, can be used to -ensure that the full haystack matches a pattern. - -This crate is also Unicode aware by default, which means that `\d` might match -more than you might expect it to. For example: - -```rust -use regex::Regex; - -let re = Regex::new(r"^\d{4}-\d{2}-\d{2}$").unwrap(); -assert!(re.is_match("𝟚𝟘𝟙𝟘-𝟘𝟛-𝟙𝟜")); -``` - -To only match an ASCII decimal digit, all of the following are equivalent: - -* `[0-9]` -* `(?-u:\d)` -* `[[:digit:]]` -* `[\d&&\p{ascii}]` - -### Example: finding dates in a haystack - -In the previous example, we showed how one might validate that a haystack, -in its entirety, corresponded to a particular date format. But what if we wanted -to extract all things that look like dates in a specific format from a haystack? -To do this, we can use an iterator API to find all matches (notice that we've -removed the anchors and switched to looking for ASCII-only digits): - -```rust -use regex::Regex; - -let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap(); -let hay = "What do 1865-04-14, 1881-07-02, 1901-09-06 and 1963-11-22 have in common?"; -// 'm' is a 'Match', and 'as_str()' returns the matching part of the haystack. -let dates: Vec<&str> = re.find_iter(hay).map(|m| m.as_str()).collect(); -assert_eq!(dates, vec![ - "1865-04-14", - "1881-07-02", - "1901-09-06", - "1963-11-22", -]); -``` - -We can also iterate over [`Captures`] values instead of [`Match`] values, and -that in turn permits accessing each component of the date via capturing groups: - -```rust -use regex::Regex; - -let re = Regex::new(r"(?[0-9]{4})-(?[0-9]{2})-(?[0-9]{2})").unwrap(); -let hay = "What do 1865-04-14, 1881-07-02, 1901-09-06 and 1963-11-22 have in common?"; -// 'm' is a 'Match', and 'as_str()' returns the matching part of the haystack. -let dates: Vec<(&str, &str, &str)> = re.captures_iter(hay).map(|caps| { - // The unwraps are okay because every capture group must match if the whole - // regex matches, and in this context, we know we have a match. - // - // Note that we use `caps.name("y").unwrap().as_str()` instead of - // `&caps["y"]` because the lifetime of the former is the same as the - // lifetime of `hay` above, but the lifetime of the latter is tied to the - // lifetime of `caps` due to how the `Index` trait is defined. - let year = caps.name("y").unwrap().as_str(); - let month = caps.name("m").unwrap().as_str(); - let day = caps.name("d").unwrap().as_str(); - (year, month, day) -}).collect(); -assert_eq!(dates, vec![ - ("1865", "04", "14"), - ("1881", "07", "02"), - ("1901", "09", "06"), - ("1963", "11", "22"), -]); -``` - -### Example: simpler capture group extraction - -One can use [`Captures::extract`] to make the code from the previous example a -bit simpler in this case: - -```rust -use regex::Regex; - -let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap(); -let hay = "What do 1865-04-14, 1881-07-02, 1901-09-06 and 1963-11-22 have in common?"; -let dates: Vec<(&str, &str, &str)> = re.captures_iter(hay).map(|caps| { - let (_, [year, month, day]) = caps.extract(); - (year, month, day) -}).collect(); -assert_eq!(dates, vec![ - ("1865", "04", "14"), - ("1881", "07", "02"), - ("1901", "09", "06"), - ("1963", "11", "22"), -]); -``` - -`Captures::extract` works by ensuring that the number of matching groups match -the number of groups requested via the `[year, month, day]` syntax. If they do, -then the substrings for each corresponding capture group are automatically -returned in an appropriately sized array. Rust's syntax for pattern matching -arrays does the rest. - -### Example: replacement with named capture groups - -Building on the previous example, perhaps we'd like to rearrange the date -formats. This can be done by finding each match and replacing it with -something different. The [`Regex::replace_all`] routine provides a convenient -way to do this, including by supporting references to named groups in the -replacement string: - -```rust -use regex::Regex; - -let re = Regex::new(r"(?\d{4})-(?\d{2})-(?\d{2})").unwrap(); -let before = "1973-01-05, 1975-08-25 and 1980-10-18"; -let after = re.replace_all(before, "$m/$d/$y"); -assert_eq!(after, "01/05/1973, 08/25/1975 and 10/18/1980"); -``` - -The replace methods are actually polymorphic in the replacement, which -provides more flexibility than is seen here. (See the documentation for -[`Regex::replace`] for more details.) - -### Example: verbose mode - -When your regex gets complicated, you might consider using something other -than regex. But if you stick with regex, you can use the `x` flag to enable -insignificant whitespace mode or "verbose mode." In this mode, whitespace -is treated as insignificant and one may write comments. This may make your -patterns easier to comprehend. - -```rust -use regex::Regex; - -let re = Regex::new(r"(?x) - (?P\d{4}) # the year, including all Unicode digits - - - (?P\d{2}) # the month, including all Unicode digits - - - (?P\d{2}) # the day, including all Unicode digits -").unwrap(); - -let before = "1973-01-05, 1975-08-25 and 1980-10-18"; -let after = re.replace_all(before, "$m/$d/$y"); -assert_eq!(after, "01/05/1973, 08/25/1975 and 10/18/1980"); -``` - -If you wish to match against whitespace in this mode, you can still use `\s`, -`\n`, `\t`, etc. For escaping a single space character, you can escape it -directly with `\ `, use its hex character code `\x20` or temporarily disable -the `x` flag, e.g., `(?-x: )`. - -### Example: match multiple regular expressions simultaneously - -This demonstrates how to use a [`RegexSet`] to match multiple (possibly -overlapping) regexes in a single scan of a haystack: - -```rust -use regex::RegexSet; - -let set = RegexSet::new(&[ - r"\w+", - r"\d+", - r"\pL+", - r"foo", - r"bar", - r"barfoo", - r"foobar", -]).unwrap(); - -// Iterate over and collect all of the matches. Each match corresponds to the -// ID of the matching pattern. -let matches: Vec<_> = set.matches("foobar").into_iter().collect(); -assert_eq!(matches, vec![0, 2, 3, 4, 6]); - -// You can also test whether a particular regex matched: -let matches = set.matches("foobar"); -assert!(!matches.matched(5)); -assert!(matches.matched(6)); -``` - -# Performance - -This section briefly discusses a few concerns regarding the speed and resource -usage of regexes. - -### Only ask for what you need - -When running a search with a regex, there are generally three different types -of information one can ask for: - -1. Does a regex match in a haystack? -2. Where does a regex match in a haystack? -3. Where do each of the capturing groups match in a haystack? - -Generally speaking, this crate could provide a function to answer only #3, -which would subsume #1 and #2 automatically. However, it can be significantly -more expensive to compute the location of capturing group matches, so it's best -not to do it if you don't need to. - -Therefore, only ask for what you need. For example, don't use [`Regex::find`] -if you only need to test if a regex matches a haystack. Use [`Regex::is_match`] -instead. - -### Unicode can impact memory usage and search speed - -This crate has first class support for Unicode and it is **enabled by default**. -In many cases, the extra memory required to support it will be negligible and -it typically won't impact search speed. But it can in some cases. - -With respect to memory usage, the impact of Unicode principally manifests -through the use of Unicode character classes. Unicode character classes -tend to be quite large. For example, `\w` by default matches around 140,000 -distinct codepoints. This requires additional memory, and tends to slow down -regex compilation. While a `\w` here and there is unlikely to be noticed, -writing `\w{100}` will for example result in quite a large regex by default. -Indeed, `\w` is considerably larger than its ASCII-only version, so if your -requirements are satisfied by ASCII, it's probably a good idea to stick to -ASCII classes. The ASCII-only version of `\w` can be spelled in a number of -ways. All of the following are equivalent: - -* `[0-9A-Za-z_]` -* `(?-u:\w)` -* `[[:word:]]` -* `[\w&&\p{ascii}]` - -With respect to search speed, Unicode tends to be handled pretty well, even when -using large Unicode character classes. However, some of the faster internal -regex engines cannot handle a Unicode aware word boundary assertion. So if you -don't need Unicode-aware word boundary assertions, you might consider using -`(?-u:\b)` instead of `\b`, where the former uses an ASCII-only definition of -a word character. - -### Literals might accelerate searches - -This crate tends to be quite good at recognizing literals in a regex pattern -and using them to accelerate a search. If it is at all possible to include -some kind of literal in your pattern, then it might make search substantially -faster. For example, in the regex `\w+@\w+`, the engine will look for -occurrences of `@` and then try a reverse match for `\w+` to find the start -position. - -### Avoid re-compiling regexes, especially in a loop - -It is an anti-pattern to compile the same pattern in a loop since regex -compilation is typically expensive. (It takes anywhere from a few microseconds -to a few **milliseconds** depending on the size of the pattern.) Not only is -compilation itself expensive, but this also prevents optimizations that reuse -allocations internally to the regex engine. - -In Rust, it can sometimes be a pain to pass regular expressions around if -they're used from inside a helper function. Instead, we recommend using -[`std::sync::LazyLock`], or the [`once_cell`] crate, -if you can't use the standard library. - -This example shows how to use `std::sync::LazyLock`: - -```rust -use std::sync::LazyLock; - -use regex::Regex; - -fn some_helper_function(haystack: &str) -> bool { - static RE: LazyLock = LazyLock::new(|| Regex::new(r"...").unwrap()); - RE.is_match(haystack) -} - -fn main() { - assert!(some_helper_function("abc")); - assert!(!some_helper_function("ac")); -} -``` - -Specifically, in this example, the regex will be compiled when it is used for -the first time. On subsequent uses, it will reuse the previously built `Regex`. -Notice how one can define the `Regex` locally to a specific function. - -[`std::sync::LazyLock`]: https://doc.rust-lang.org/std/sync/struct.LazyLock.html -[`once_cell`]: https://crates.io/crates/once_cell - -### Sharing a regex across threads can result in contention - -While a single `Regex` can be freely used from multiple threads simultaneously, -there is a small synchronization cost that must be paid. Generally speaking, -one shouldn't expect to observe this unless the principal task in each thread -is searching with the regex *and* most searches are on short haystacks. In this -case, internal contention on shared resources can spike and increase latency, -which in turn may slow down each individual search. - -One can work around this by cloning each `Regex` before sending it to another -thread. The cloned regexes will still share the same internal read-only portion -of its compiled state (it's reference counted), but each thread will get -optimized access to the mutable space that is used to run a search. In general, -there is no additional cost in memory to doing this. The only cost is the added -code complexity required to explicitly clone the regex. (If you share the same -`Regex` across multiple threads, each thread still gets its own mutable space, -but accessing that space is slower.) - -# Unicode - -This section discusses what kind of Unicode support this regex library has. -Before showing some examples, we'll summarize the relevant points: - -* This crate almost fully implements "Basic Unicode Support" (Level 1) as -specified by the [Unicode Technical Standard #18][UTS18]. The full details -of what is supported are documented in [UNICODE.md] in the root of the regex -crate repository. There is virtually no support for "Extended Unicode Support" -(Level 2) from UTS#18. -* The top-level [`Regex`] runs searches *as if* iterating over each of the -codepoints in the haystack. That is, the fundamental atom of matching is a -single codepoint. -* [`bytes::Regex`], in contrast, permits disabling Unicode mode for part of all -of your pattern in all cases. When Unicode mode is disabled, then a search is -run *as if* iterating over each byte in the haystack. That is, the fundamental -atom of matching is a single byte. (A top-level `Regex` also permits disabling -Unicode and thus matching *as if* it were one byte at a time, but only when -doing so wouldn't permit matching invalid UTF-8.) -* When Unicode mode is enabled (the default), `.` will match an entire Unicode -scalar value, even when it is encoded using multiple bytes. When Unicode mode -is disabled (e.g., `(?-u:.)`), then `.` will match a single byte in all cases. -* The character classes `\w`, `\d` and `\s` are all Unicode-aware by default. -Use `(?-u:\w)`, `(?-u:\d)` and `(?-u:\s)` to get their ASCII-only definitions. -* Similarly, `\b` and `\B` use a Unicode definition of a "word" character. -To get ASCII-only word boundaries, use `(?-u:\b)` and `(?-u:\B)`. This also -applies to the special word boundary assertions. (That is, `\b{start}`, -`\b{end}`, `\b{start-half}`, `\b{end-half}`.) -* `^` and `$` are **not** Unicode-aware in multi-line mode. Namely, they only -recognize `\n` (assuming CRLF mode is not enabled) and not any of the other -forms of line terminators defined by Unicode. -* Case insensitive searching is Unicode-aware and uses simple case folding. -* Unicode general categories, scripts and many boolean properties are available -by default via the `\p{property name}` syntax. -* In all cases, matches are reported using byte offsets. Or more precisely, -UTF-8 code unit offsets. This permits constant time indexing and slicing of the -haystack. - -[UTS18]: https://unicode.org/reports/tr18/ -[UNICODE.md]: https://github.com/rust-lang/regex/blob/master/UNICODE.md - -Patterns themselves are **only** interpreted as a sequence of Unicode scalar -values. This means you can use Unicode characters directly in your pattern: - -```rust -use regex::Regex; - -let re = Regex::new(r"(?i)Δ+").unwrap(); -let m = re.find("ΔδΔ").unwrap(); -assert_eq!((0, 6), (m.start(), m.end())); -// alternatively: -assert_eq!(0..6, m.range()); -``` - -As noted above, Unicode general categories, scripts, script extensions, ages -and a smattering of boolean properties are available as character classes. For -example, you can match a sequence of numerals, Greek or Cherokee letters: - -```rust -use regex::Regex; - -let re = Regex::new(r"[\pN\p{Greek}\p{Cherokee}]+").unwrap(); -let m = re.find("abcΔᎠβⅠᏴγδⅡxyz").unwrap(); -assert_eq!(3..23, m.range()); -``` - -While not specific to Unicode, this library also supports character class set -operations. Namely, one can nest character classes arbitrarily and perform set -operations on them. Those set operations are union (the default), intersection, -difference and symmetric difference. These set operations tend to be most -useful with Unicode character classes. For example, to match any codepoint -that is both in the `Greek` script and in the `Letter` general category: - -```rust -use regex::Regex; - -let re = Regex::new(r"[\p{Greek}&&\pL]+").unwrap(); -let subs: Vec<&str> = re.find_iter("ΔδΔ𐅌ΔδΔ").map(|m| m.as_str()).collect(); -assert_eq!(subs, vec!["ΔδΔ", "ΔδΔ"]); - -// If we just matches on Greek, then all codepoints would match! -let re = Regex::new(r"\p{Greek}+").unwrap(); -let subs: Vec<&str> = re.find_iter("ΔδΔ𐅌ΔδΔ").map(|m| m.as_str()).collect(); -assert_eq!(subs, vec!["ΔδΔ𐅌ΔδΔ"]); -``` - -### Opt out of Unicode support - -The [`bytes::Regex`] type that can be used to search `&[u8]` haystacks. By -default, haystacks are conventionally treated as UTF-8 just like it is with the -main `Regex` type. However, this behavior can be disabled by turning off the -`u` flag, even if doing so could result in matching invalid UTF-8. For example, -when the `u` flag is disabled, `.` will match any byte instead of any Unicode -scalar value. - -Disabling the `u` flag is also possible with the standard `&str`-based `Regex` -type, but it is only allowed where the UTF-8 invariant is maintained. For -example, `(?-u:\w)` is an ASCII-only `\w` character class and is legal in an -`&str`-based `Regex`, but `(?-u:\W)` will attempt to match *any byte* that -isn't in `(?-u:\w)`, which in turn includes bytes that are invalid UTF-8. -Similarly, `(?-u:\xFF)` will attempt to match the raw byte `\xFF` (instead of -`U+00FF`), which is invalid UTF-8 and therefore is illegal in `&str`-based -regexes. - -Finally, since Unicode support requires bundling large Unicode data -tables, this crate exposes knobs to disable the compilation of those -data tables, which can be useful for shrinking binary size and reducing -compilation times. For details on how to do that, see the section on [crate -features](#crate-features). - -# Syntax - -The syntax supported in this crate is documented below. - -Note that the regular expression parser and abstract syntax are exposed in -a separate crate, [`regex-syntax`](https://docs.rs/regex-syntax). - -### Matching one character - -

, flags: raw::c_int) -> Result - where - P: AsRef, - { - let filename = match filename { - None => None, - Some(ref f) => Some(cstr_cow_from_bytes(f.as_ref().as_bytes())?), - }; - with_dlerror( - move || { - let result = dlopen( - match filename { - None => ptr::null(), - Some(ref f) => f.as_ptr(), - }, - flags, - ); - // ensure filename lives until dlopen completes - drop(filename); - if result.is_null() { - None - } else { - Some(Library { handle: result }) - } - }, - |desc| crate::Error::DlOpen { desc: desc.into() }, - ) - .map_err(|e| e.unwrap_or(crate::Error::DlOpenUnknown)) - } - - unsafe fn get_impl(&self, symbol: &[u8], on_null: F) -> Result, crate::Error> - where - F: FnOnce() -> Result, crate::Error>, - { - ensure_compatible_types::()?; - let symbol = cstr_cow_from_bytes(symbol)?; - // `dlsym` may return nullptr in two cases: when a symbol genuinely points to a null - // pointer or the symbol cannot be found. In order to detect this case a double dlerror - // pattern must be used, which is, sadly, a little bit racy. - // - // We try to leave as little space as possible for this to occur, but we can’t exactly - // fully prevent it. - let result = with_dlerror( - || { - dlerror(); - let symbol = dlsym(self.handle, symbol.as_ptr()); - if symbol.is_null() { - None - } else { - Some(Symbol { - pointer: symbol, - pd: marker::PhantomData, - }) - } - }, - |desc| crate::Error::DlSym { desc: desc.into() }, - ); - match result { - Err(None) => on_null(), - Err(Some(e)) => Err(e), - Ok(x) => Ok(x), - } - } - - /// Get a pointer to a function or static variable by symbol name. - /// - /// The `symbol` may not contain any null bytes, with the exception of the last byte. Providing a - /// null terminated `symbol` may help to avoid an allocation. - /// - /// Symbol is interpreted as-is; no mangling is done. This means that symbols like `x::y` are - /// most likely invalid. - /// - /// # Safety - /// - /// Users of this API must specify the correct type of the function or variable loaded. Using a - /// `Symbol` with a wrong type is undefined. - /// - /// # Platform-specific behaviour - /// - /// Implementation of thread local variables is extremely platform specific and uses of such - /// variables that work on e.g. Linux may have unintended behaviour on other targets. - /// - /// On POSIX implementations where the `dlerror` function is not confirmed to be MT-safe (such - /// as FreeBSD), this function will unconditionally return an error when the underlying `dlsym` - /// call returns a null pointer. There are rare situations where `dlsym` returns a genuine null - /// pointer without it being an error. If loading a null pointer is something you care about, - /// consider using the [`Library::get_singlethreaded`] call. - #[inline(always)] - pub unsafe fn get(&self, symbol: &[u8]) -> Result, crate::Error> { - extern crate cfg_if; - cfg_if::cfg_if! { - // These targets are known to have MT-safe `dlerror`. - if #[cfg(any( - target_os = "linux", - target_os = "android", - target_os = "openbsd", - target_os = "macos", - target_os = "ios", - target_os = "solaris", - target_os = "illumos", - target_os = "redox", - target_os = "fuchsia", - target_os = "cygwin", - ))] { - self.get_singlethreaded(symbol) - } else { - self.get_impl(symbol, || Err(crate::Error::DlSymUnknown)) - } - } - } - - /// Get a pointer to function or static variable by symbol name. - /// - /// The `symbol` may not contain any null bytes, with the exception of the last byte. Providing a - /// null terminated `symbol` may help to avoid an allocation. - /// - /// Symbol is interpreted as-is; no mangling is done. This means that symbols like `x::y` are - /// most likely invalid. - /// - /// # Safety - /// - /// Users of this API must specify the correct type of the function or variable loaded. - /// - /// It is up to the user of this library to ensure that no other calls to an MT-unsafe - /// implementation of `dlerror` occur during the execution of this function. Failing that, the - /// behaviour of this function is not defined. - /// - /// # Platform-specific behaviour - /// - /// The implementation of thread-local variables is extremely platform specific and uses of such - /// variables that work on e.g. Linux may have unintended behaviour on other targets. - #[inline(always)] - pub unsafe fn get_singlethreaded(&self, symbol: &[u8]) -> Result, crate::Error> { - self.get_impl(symbol, || { - Ok(Symbol { - pointer: ptr::null_mut(), - pd: marker::PhantomData, - }) - }) - } - - /// Convert the `Library` to a raw handle. - /// - /// The handle returned by this function shall be usable with APIs which accept handles - /// as returned by `dlopen`. - pub fn into_raw(self) -> *mut raw::c_void { - let handle = self.handle; - mem::forget(self); - handle - } - - /// Convert a raw handle returned by `dlopen`-family of calls to a `Library`. - /// - /// # Safety - /// - /// The pointer shall be a result of a successful call of the `dlopen`-family of functions or a - /// pointer previously returned by `Library::into_raw` call. It must be valid to call `dlclose` - /// with this pointer as an argument. - pub unsafe fn from_raw(handle: *mut raw::c_void) -> Library { - Library { handle } - } - - /// Unload the library. - /// - /// This method might be a no-op, depending on the flags with which the `Library` was opened, - /// what library was opened or other platform specifics. - /// - /// You only need to call this if you are interested in handling any errors that may arise when - /// library is unloaded. Otherwise the implementation of `Drop` for `Library` will close the - /// library and ignore the errors were they arise. - /// - /// The underlying data structures may still get leaked if an error does occur. - pub fn close(self) -> Result<(), crate::Error> { - let result = with_dlerror( - || { - if unsafe { dlclose(self.handle) } == 0 { - Some(()) - } else { - None - } - }, - |desc| crate::Error::DlClose { desc: desc.into() }, - ) - .map_err(|e| e.unwrap_or(crate::Error::DlCloseUnknown)); - // While the library is not free'd yet in case of an error, there is no reason to try - // dropping it again, because all that will do is try calling `dlclose` again. only - // this time it would ignore the return result, which we already seen failing… - std::mem::forget(self); - result - } -} - -impl Drop for Library { - fn drop(&mut self) { - unsafe { - dlclose(self.handle); - } - } -} - -impl fmt::Debug for Library { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(&format!("Library@{:p}", self.handle)) - } -} - -/// Symbol from a library. -/// -/// A major difference compared to the cross-platform `Symbol` is that this does not ensure that the -/// `Symbol` does not outlive the `Library` it comes from. -pub struct Symbol { - pointer: *mut raw::c_void, - pd: marker::PhantomData, -} - -impl Symbol { - /// Convert the loaded `Symbol` into a raw pointer. - pub fn into_raw(self) -> *mut raw::c_void { - self.pointer - } - - /// Convert the loaded `Symbol` into a raw pointer. - /// For unix this does the same as into_raw. - pub fn as_raw_ptr(self) -> *mut raw::c_void { - self.pointer - } -} - -impl Symbol> { - /// Lift Option out of the symbol. - pub fn lift_option(self) -> Option> { - if self.pointer.is_null() { - None - } else { - Some(Symbol { - pointer: self.pointer, - pd: marker::PhantomData, - }) - } - } -} - -unsafe impl Send for Symbol {} -unsafe impl Sync for Symbol {} - -impl Clone for Symbol { - fn clone(&self) -> Symbol { - Symbol { ..*self } - } -} - -impl ::std::ops::Deref for Symbol { - type Target = T; - fn deref(&self) -> &T { - unsafe { - // Additional reference level for a dereference on `deref` return value. - &*(&self.pointer as *const *mut _ as *const T) - } - } -} - -impl fmt::Debug for Symbol { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - unsafe { - let mut info = mem::MaybeUninit::::uninit(); - if dladdr(self.pointer, info.as_mut_ptr()) != 0 { - let info = info.assume_init(); - if info.dli_sname.is_null() { - f.write_str(&format!( - "Symbol@{:p} from {:?}", - self.pointer, - CStr::from_ptr(info.dli_fname) - )) - } else { - f.write_str(&format!( - "Symbol {:?}@{:p} from {:?}", - CStr::from_ptr(info.dli_sname), - self.pointer, - CStr::from_ptr(info.dli_fname) - )) - } - } else { - f.write_str(&format!("Symbol@{:p}", self.pointer)) - } - } - } -} - -// Platform specific things -#[cfg_attr(any(target_os = "linux", target_os = "android"), link(name = "dl"))] -#[cfg_attr(any(target_os = "freebsd", target_os = "dragonfly"), link(name = "c"))] -extern "C" { - fn dlopen(filename: *const raw::c_char, flags: raw::c_int) -> *mut raw::c_void; - fn dlclose(handle: *mut raw::c_void) -> raw::c_int; - fn dlsym(handle: *mut raw::c_void, symbol: *const raw::c_char) -> *mut raw::c_void; - fn dlerror() -> *mut raw::c_char; - fn dladdr(addr: *mut raw::c_void, info: *mut DlInfo) -> raw::c_int; -} - -#[repr(C)] -struct DlInfo { - dli_fname: *const raw::c_char, - dli_fbase: *mut raw::c_void, - dli_sname: *const raw::c_char, - dli_saddr: *mut raw::c_void, -} diff --git a/vendor/libloading/src/os/windows/mod.rs b/vendor/libloading/src/os/windows/mod.rs deleted file mode 100644 index fa6713138690a6..00000000000000 --- a/vendor/libloading/src/os/windows/mod.rs +++ /dev/null @@ -1,590 +0,0 @@ -// A hack for docs.rs to build documentation that has both windows and linux documentation in the -// same rustdoc build visible. -#[cfg(all(libloading_docs, not(windows)))] -mod windows_imports {} -#[cfg(any(not(libloading_docs), windows))] -mod windows_imports { - use super::{BOOL, DWORD, FARPROC, HANDLE, HMODULE}; - pub(super) use std::os::windows::ffi::{OsStrExt, OsStringExt}; - windows_link::link!("kernel32.dll" "system" fn GetLastError() -> DWORD); - windows_link::link!("kernel32.dll" "system" fn SetThreadErrorMode(new_mode: DWORD, old_mode: *mut DWORD) -> BOOL); - windows_link::link!("kernel32.dll" "system" fn GetModuleHandleExW(flags: u32, module_name: *const u16, module: *mut HMODULE) -> BOOL); - windows_link::link!("kernel32.dll" "system" fn FreeLibrary(module: HMODULE) -> BOOL); - windows_link::link!("kernel32.dll" "system" fn LoadLibraryExW(filename: *const u16, file: HANDLE, flags: DWORD) -> HMODULE); - windows_link::link!("kernel32.dll" "system" fn GetModuleFileNameW(module: HMODULE, filename: *mut u16, size: DWORD) -> DWORD); - windows_link::link!("kernel32.dll" "system" fn GetProcAddress(module: HMODULE, procname: *const u8) -> FARPROC); -} - -use self::windows_imports::*; -use std::ffi::{OsStr, OsString}; -use std::os::raw; -use std::{fmt, io, marker, mem, ptr}; -use util::{cstr_cow_from_bytes, ensure_compatible_types}; - -/// The platform-specific counterpart of the cross-platform [`Library`](crate::Library). -pub struct Library(HMODULE); - -unsafe impl Send for Library {} -// Now, this is sort-of-tricky. MSDN documentation does not really make any claims as to safety of -// the Win32 APIs. Sadly, whomever I asked, even current and former Microsoft employees, couldn’t -// say for sure whether the Win32 APIs used to implement `Library` are thread-safe or not. -// -// My investigation ended up with a question about thread-safety properties of the API involved -// being sent to an internal (to MS) general question mailing-list. The conclusion of the mail is -// as such: -// -// * Nobody inside MS (at least out of all of the people who have seen the question) knows for -// sure either; -// * However, the general consensus between MS developers is that one can rely on the API being -// thread-safe. In case it is not thread-safe it should be considered a bug on the Windows -// part. (NB: bugs filed at https://connect.microsoft.com/ against Windows Server) -unsafe impl Sync for Library {} - -impl Library { - /// Find and load a module. - /// - /// If the `filename` specifies a full path, the function only searches that path for the - /// module. Otherwise, if the `filename` specifies a relative path or a module name without a - /// path, the function uses a Windows-specific search strategy to find the module. For more - /// information, see the [Remarks on MSDN][msdn]. - /// - /// If the `filename` specifies a library filename without a path and with the extension omitted, - /// the `.dll` extension is implicitly added. This behaviour may be suppressed by appending a - /// trailing `.` to the `filename`. - /// - /// This is equivalent to [Library::load_with_flags](filename, 0). - /// - /// [msdn]: https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryw#remarks - /// - /// # Safety - /// - /// When a library is loaded, initialisation routines contained within the library are executed. - /// For the purposes of safety, the execution of these routines is conceptually the same calling an - /// unknown foreign function and may impose arbitrary requirements on the caller for the call - /// to be sound. - /// - /// Additionally, the callers of this function must also ensure that execution of the - /// termination routines contained within the library is safe as well. These routines may be - /// executed when the library is unloaded. - #[inline] - pub unsafe fn new>(filename: P) -> Result { - Library::load_with_flags(filename, 0) - } - - /// Get the `Library` representing the original program executable. - /// - /// Note that the behaviour of the `Library` loaded with this method is different from - /// Libraries loaded with [`os::unix::Library::this`]. For more information refer to [MSDN]. - /// - /// Corresponds to `GetModuleHandleExW(0, NULL, _)`. - /// - /// [`os::unix::Library::this`]: crate::os::unix::Library::this - /// [MSDN]: https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-getmodulehandleexw - pub fn this() -> Result { - unsafe { - let mut handle: HMODULE = 0; - with_get_last_error( - |source| crate::Error::GetModuleHandleExW { source }, - || { - let result = GetModuleHandleExW(0, std::ptr::null_mut(), &mut handle); - if result == 0 { - None - } else { - Some(Library(handle)) - } - }, - ) - .map_err(|e| e.unwrap_or(crate::Error::GetModuleHandleExWUnknown)) - } - } - - /// Get a module that is already loaded by the program. - /// - /// This function returns a `Library` corresponding to a module with the given name that is - /// already mapped into the address space of the process. If the module isn't found, an error is - /// returned. - /// - /// If the `filename` does not include a full path and there are multiple different loaded - /// modules corresponding to the `filename`, it is impossible to predict which module handle - /// will be returned. For more information refer to [MSDN]. - /// - /// If the `filename` specifies a library filename without a path and with the extension omitted, - /// the `.dll` extension is implicitly added. This behaviour may be suppressed by appending a - /// trailing `.` to the `filename`. - /// - /// This is equivalent to `GetModuleHandleExW(0, filename, _)`. - /// - /// [MSDN]: https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-getmodulehandleexw - pub fn open_already_loaded>(filename: P) -> Result { - let wide_filename: Vec = filename.as_ref().encode_wide().chain(Some(0)).collect(); - - let ret = unsafe { - let mut handle: HMODULE = 0; - with_get_last_error( - |source| crate::Error::GetModuleHandleExW { source }, - || { - // Make sure no winapi calls as a result of drop happen inside this closure, because - // otherwise that might change the return value of the GetLastError. - let result = GetModuleHandleExW(0, wide_filename.as_ptr(), &mut handle); - if result == 0 { - None - } else { - Some(Library(handle)) - } - }, - ) - .map_err(|e| e.unwrap_or(crate::Error::GetModuleHandleExWUnknown)) - }; - - drop(wide_filename); // Drop wide_filename here to ensure it doesn’t get moved and dropped - // inside the closure by mistake. See comment inside the closure. - ret - } - - /// Find and load a module, additionally adjusting behaviour with flags. - /// - /// See [`Library::new`] for documentation on the handling of the `filename` argument. See the - /// [flag table on MSDN][flags] for information on applicable values for the `flags` argument. - /// - /// Corresponds to `LoadLibraryExW(filename, reserved: NULL, flags)`. - /// - /// [flags]: https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters - /// - /// # Safety - /// - /// When a library is loaded, initialisation routines contained within the library are executed. - /// For the purposes of safety, the execution of these routines is conceptually the same calling an - /// unknown foreign function and may impose arbitrary requirements on the caller for the call - /// to be sound. - /// - /// Additionally, the callers of this function must also ensure that execution of the - /// termination routines contained within the library is safe as well. These routines may be - /// executed when the library is unloaded. - pub unsafe fn load_with_flags>( - filename: P, - flags: LOAD_LIBRARY_FLAGS, - ) -> Result { - let wide_filename: Vec = filename.as_ref().encode_wide().chain(Some(0)).collect(); - let _guard = ErrorModeGuard::new(); - - let ret = with_get_last_error( - |source| crate::Error::LoadLibraryExW { source }, - || { - // Make sure no winapi calls as a result of drop happen inside this closure, because - // otherwise that might change the return value of the GetLastError. - let handle = LoadLibraryExW(wide_filename.as_ptr(), 0, flags); - if handle == 0 { - None - } else { - Some(Library(handle)) - } - }, - ) - .map_err(|e| e.unwrap_or(crate::Error::LoadLibraryExWUnknown)); - drop(wide_filename); // Drop wide_filename here to ensure it doesn’t get moved and dropped - // inside the closure by mistake. See comment inside the closure. - ret - } - - /// Attempts to pin the module represented by the current `Library` into memory. - /// - /// Calls `GetModuleHandleExW` with the flag `GET_MODULE_HANDLE_EX_FLAG_PIN` to pin the module. - /// See the [MSDN documentation][msdn] for more information. - /// - /// [msdn]: https://learn.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-getmodulehandleexw - /// - /// If successful, the module will remain in memory regardless of the refcount for this `Library` - pub fn pin(&self) -> Result<(), crate::Error> { - const GET_MODULE_HANDLE_EX_FLAG_PIN: u32 = 0x1; - const GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS: u32 = 0x4; - unsafe { - let mut handle: HMODULE = 0; - with_get_last_error( - |source| crate::Error::GetModuleHandleExW { source }, - || { - // Make sure no winapi calls as a result of drop happen inside this closure, because - // otherwise that might change the return value of the GetLastError. - - // We use our cached module handle of this `Library` instead of the module name. This works - // if we also pass the flag `GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS` because on Windows, module handles - // are the loaded base address of the module. - let result = GetModuleHandleExW( - GET_MODULE_HANDLE_EX_FLAG_PIN | GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, - self.0 as *const u16, - &mut handle, - ); - if result == 0 { - None - } else { - Some(()) - } - }, - ) - .map_err(|e| e.unwrap_or(crate::Error::GetModuleHandleExWUnknown)) - } - } - - /// Get a pointer to a function or static variable by symbol name. - /// - /// The `symbol` may not contain any null bytes, with the exception of the last byte. A null - /// terminated `symbol` may avoid a string allocation in some cases. - /// - /// Symbol is interpreted as-is; no mangling is done. This means that symbols like `x::y` are - /// most likely invalid. - /// - /// # Safety - /// - /// Users of this API must specify the correct type of the function or variable loaded. - pub unsafe fn get(&self, symbol: &[u8]) -> Result, crate::Error> { - ensure_compatible_types::()?; - let symbol = cstr_cow_from_bytes(symbol)?; - with_get_last_error( - |source| crate::Error::GetProcAddress { source }, - || { - let symbol = GetProcAddress(self.0, symbol.as_ptr().cast()); - if symbol.is_none() { - None - } else { - Some(Symbol { - pointer: symbol, - pd: marker::PhantomData, - }) - } - }, - ) - .map_err(|e| e.unwrap_or(crate::Error::GetProcAddressUnknown)) - } - - /// Get a pointer to a function or static variable by ordinal number. - /// - /// # Safety - /// - /// Users of this API must specify the correct type of the function or variable loaded. - pub unsafe fn get_ordinal(&self, ordinal: u16) -> Result, crate::Error> { - ensure_compatible_types::()?; - with_get_last_error( - |source| crate::Error::GetProcAddress { source }, - || { - let ordinal = ordinal as usize as *const _; - let symbol = GetProcAddress(self.0, ordinal); - if symbol.is_none() { - None - } else { - Some(Symbol { - pointer: symbol, - pd: marker::PhantomData, - }) - } - }, - ) - .map_err(|e| e.unwrap_or(crate::Error::GetProcAddressUnknown)) - } - - /// Convert the `Library` to a raw handle. - pub fn into_raw(self) -> HMODULE { - let handle = self.0; - mem::forget(self); - handle - } - - /// Convert a raw handle to a `Library`. - /// - /// # Safety - /// - /// The handle must be the result of a successful call of `LoadLibraryA`, `LoadLibraryW`, - /// `LoadLibraryExW`, or `LoadLibraryExA`, or a handle previously returned by the - /// `Library::into_raw` call. - pub unsafe fn from_raw(handle: HMODULE) -> Library { - Library(handle) - } - - /// Unload the library. - /// - /// You only need to call this if you are interested in handling any errors that may arise when - /// library is unloaded. Otherwise this will be done when `Library` is dropped. - /// - /// The underlying data structures may still get leaked if an error does occur. - pub fn close(self) -> Result<(), crate::Error> { - let result = with_get_last_error( - |source| crate::Error::FreeLibrary { source }, - || { - if unsafe { FreeLibrary(self.0) == 0 } { - None - } else { - Some(()) - } - }, - ) - .map_err(|e| e.unwrap_or(crate::Error::FreeLibraryUnknown)); - // While the library is not free'd yet in case of an error, there is no reason to try - // dropping it again, because all that will do is try calling `FreeLibrary` again. only - // this time it would ignore the return result, which we already seen failing... - std::mem::forget(self); - result - } -} - -impl Drop for Library { - fn drop(&mut self) { - unsafe { - FreeLibrary(self.0); - } - } -} - -impl fmt::Debug for Library { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - unsafe { - // FIXME: use Maybeuninit::uninit_array when stable - let mut buf = mem::MaybeUninit::<[mem::MaybeUninit; 1024]>::uninit().assume_init(); - let len = GetModuleFileNameW(self.0, buf[..].as_mut_ptr().cast(), 1024) as usize; - if len == 0 { - f.write_str(&format!("Library@{:#x}", self.0)) - } else { - let string: OsString = OsString::from_wide( - // FIXME: use Maybeuninit::slice_get_ref when stable - &*(&buf[..len] as *const [_] as *const [u16]), - ); - f.write_str(&format!("Library@{:#x} from {:?}", self.0, string)) - } - } - } -} - -/// A symbol from a library. -/// -/// A major difference compared to the cross-platform `Symbol` is that this does not ensure that the -/// `Symbol` does not outlive the `Library` that it comes from. -pub struct Symbol { - pointer: FARPROC, - pd: marker::PhantomData, -} - -impl Symbol { - /// Convert the loaded `Symbol` into a handle. - pub fn into_raw(self) -> FARPROC { - self.pointer - } - - /// Convert the loaded `Symbol` into a raw pointer. - pub fn as_raw_ptr(self) -> *mut raw::c_void { - self.pointer - .map(|raw| raw as *mut raw::c_void) - .unwrap_or(std::ptr::null_mut()) - } -} - -impl Symbol> { - /// Lift Option out of the symbol. - pub fn lift_option(self) -> Option> { - if self.pointer.is_none() { - None - } else { - Some(Symbol { - pointer: self.pointer, - pd: marker::PhantomData, - }) - } - } -} - -unsafe impl Send for Symbol {} -unsafe impl Sync for Symbol {} - -impl Clone for Symbol { - fn clone(&self) -> Symbol { - Symbol { ..*self } - } -} - -impl ::std::ops::Deref for Symbol { - type Target = T; - fn deref(&self) -> &T { - unsafe { &*((&self.pointer) as *const FARPROC as *const T) } - } -} - -impl fmt::Debug for Symbol { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.pointer { - None => f.write_str("Symbol@0x0"), - Some(ptr) => f.write_str(&format!("Symbol@{:p}", ptr as *const ())), - } - } -} - -struct ErrorModeGuard(DWORD); - -impl ErrorModeGuard { - #[allow(clippy::if_same_then_else)] - fn new() -> Option { - unsafe { - let mut previous_mode = 0; - if SetThreadErrorMode(SEM_FAILCRITICALERRORS, &mut previous_mode) == 0 { - // How in the world is it possible for what is essentially a simple variable swap - // to fail? For now we just ignore the error -- the worst that can happen here is - // the previous mode staying on and user seeing a dialog error on older Windows - // machines. - None - } else if previous_mode == SEM_FAILCRITICALERRORS { - None - } else { - Some(ErrorModeGuard(previous_mode)) - } - } - } -} - -impl Drop for ErrorModeGuard { - fn drop(&mut self) { - unsafe { - SetThreadErrorMode(self.0, ptr::null_mut()); - } - } -} - -fn with_get_last_error( - wrap: fn(crate::error::WindowsError) -> crate::Error, - closure: F, -) -> Result> -where - F: FnOnce() -> Option, -{ - closure().ok_or_else(|| { - let error = unsafe { GetLastError() }; - if error == 0 { - None - } else { - Some(wrap(crate::error::WindowsError( - io::Error::from_raw_os_error(error as i32), - ))) - } - }) -} - -#[allow(clippy::upper_case_acronyms)] -type BOOL = i32; -#[allow(clippy::upper_case_acronyms)] -type DWORD = u32; -#[allow(clippy::upper_case_acronyms)] -type HANDLE = isize; -#[allow(clippy::upper_case_acronyms)] -type HMODULE = isize; -#[allow(clippy::upper_case_acronyms)] -type FARPROC = Option isize>; -#[allow(non_camel_case_types)] -type LOAD_LIBRARY_FLAGS = DWORD; - -const SEM_FAILCRITICALERRORS: DWORD = 1; - -/// Do not check AppLocker rules or apply Software Restriction Policies for the DLL. -/// -/// This action applies only to the DLL being loaded and not to its dependencies. This value is -/// recommended for use in setup programs that must run extracted DLLs during installation. -/// -/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). -pub const LOAD_IGNORE_CODE_AUTHZ_LEVEL: LOAD_LIBRARY_FLAGS = 0x00000010; - -/// Map the file into the calling process’ virtual address space as if it were a data file. -/// -/// Nothing is done to execute or prepare to execute the mapped file. Therefore, you cannot call -/// functions like [`Library::get`] with this DLL. Using this value causes writes to read-only -/// memory to raise an access violation. Use this flag when you want to load a DLL only to extract -/// messages or resources from it. -/// -/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). -pub const LOAD_LIBRARY_AS_DATAFILE: LOAD_LIBRARY_FLAGS = 0x00000002; - -/// Map the file into the calling process’ virtual address space as if it were a data file. -/// -/// Similar to [`LOAD_LIBRARY_AS_DATAFILE`], except that the DLL file is opened with exclusive -/// write access for the calling process. Other processes cannot open the DLL file for write access -/// while it is in use. However, the DLL can still be opened by other processes. -/// -/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). -pub const LOAD_LIBRARY_AS_DATAFILE_EXCLUSIVE: LOAD_LIBRARY_FLAGS = 0x00000040; - -/// Map the file into the process’ virtual address space as an image file. -/// -/// The loader does not load the static imports or perform the other usual initialisation steps. -/// Use this flag when you want to load a DLL only to extract messages or resources from it. -/// -/// Unless the application depends on the file having the in-memory layout of an image, this value -/// should be used with either [`LOAD_LIBRARY_AS_DATAFILE_EXCLUSIVE`] or -/// [`LOAD_LIBRARY_AS_DATAFILE`]. -/// -/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). -pub const LOAD_LIBRARY_AS_IMAGE_RESOURCE: LOAD_LIBRARY_FLAGS = 0x00000020; - -/// Search the application's installation directory for the DLL and its dependencies. -/// -/// Directories in the standard search path are not searched. This value cannot be combined with -/// [`LOAD_WITH_ALTERED_SEARCH_PATH`]. -/// -/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). -pub const LOAD_LIBRARY_SEARCH_APPLICATION_DIR: LOAD_LIBRARY_FLAGS = 0x00000200; - -/// Search default directories when looking for the DLL and its dependencies. -/// -/// This value is a combination of [`LOAD_LIBRARY_SEARCH_APPLICATION_DIR`], -/// [`LOAD_LIBRARY_SEARCH_SYSTEM32`], and [`LOAD_LIBRARY_SEARCH_USER_DIRS`]. Directories in the -/// standard search path are not searched. This value cannot be combined with -/// [`LOAD_WITH_ALTERED_SEARCH_PATH`]. -/// -/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). -pub const LOAD_LIBRARY_SEARCH_DEFAULT_DIRS: LOAD_LIBRARY_FLAGS = 0x00001000; - -/// Directory that contains the DLL is temporarily added to the beginning of the list of -/// directories that are searched for the DLL’s dependencies. -/// -/// Directories in the standard search path are not searched. -/// -/// The `filename` parameter must specify a fully qualified path. This value cannot be combined -/// with [`LOAD_WITH_ALTERED_SEARCH_PATH`]. -/// -/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). -pub const LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR: LOAD_LIBRARY_FLAGS = 0x00000100; - -/// Search `%windows%\system32` for the DLL and its dependencies. -/// -/// Directories in the standard search path are not searched. This value cannot be combined with -/// [`LOAD_WITH_ALTERED_SEARCH_PATH`]. -/// -/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). -pub const LOAD_LIBRARY_SEARCH_SYSTEM32: LOAD_LIBRARY_FLAGS = 0x00000800; - -/// Directories added using the `AddDllDirectory` or the `SetDllDirectory` function are searched -/// for the DLL and its dependencies. -/// -/// If more than one directory has been added, the order in which the directories are searched is -/// unspecified. Directories in the standard search path are not searched. This value cannot be -/// combined with [`LOAD_WITH_ALTERED_SEARCH_PATH`]. -/// -/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). -pub const LOAD_LIBRARY_SEARCH_USER_DIRS: LOAD_LIBRARY_FLAGS = 0x00000400; - -/// If `filename` specifies an absolute path, the system uses the alternate file search strategy -/// discussed in the [Remarks section] to find associated executable modules that the specified -/// module causes to be loaded. -/// -/// If this value is used and `filename` specifies a relative path, the behaviour is undefined. -/// -/// If this value is not used, or if `filename` does not specify a path, the system uses the -/// standard search strategy discussed in the [Remarks section] to find associated executable -/// modules that the specified module causes to be loaded. -/// -/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). -/// -/// [Remarks]: https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#remarks -pub const LOAD_WITH_ALTERED_SEARCH_PATH: LOAD_LIBRARY_FLAGS = 0x00000008; - -/// Specifies that the digital signature of the binary image must be checked at load time. -/// -/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). -pub const LOAD_LIBRARY_REQUIRE_SIGNED_TARGET: LOAD_LIBRARY_FLAGS = 0x00000080; - -/// Allow loading a DLL for execution from the current directory only if it is under a directory in -/// the Safe load list. -/// -/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). -pub const LOAD_LIBRARY_SAFE_CURRENT_DIRS: LOAD_LIBRARY_FLAGS = 0x00002000; diff --git a/vendor/libloading/src/safe.rs b/vendor/libloading/src/safe.rs deleted file mode 100644 index e217ee394646c1..00000000000000 --- a/vendor/libloading/src/safe.rs +++ /dev/null @@ -1,318 +0,0 @@ -#[cfg(libloading_docs)] -use super::os::unix as imp; // the implementation used here doesn't matter particularly much... -#[cfg(all(not(libloading_docs), unix))] -use super::os::unix as imp; -#[cfg(all(not(libloading_docs), windows))] -use super::os::windows as imp; -use super::Error; -use std::ffi::OsStr; -use std::fmt; -use std::marker; -use std::ops; -use std::os::raw; - -/// A loaded dynamic library. -#[cfg_attr(libloading_docs, doc(cfg(any(unix, windows))))] -pub struct Library(imp::Library); - -impl Library { - /// Find and load a dynamic library. - /// - /// The `filename` argument may be either: - /// - /// * A library filename; - /// * The absolute path to the library; - /// * A relative (to the current working directory) path to the library. - /// - /// # Safety - /// - /// When a library is loaded, initialisation routines contained within it are executed. - /// For the purposes of safety, the execution of these routines is conceptually the same calling an - /// unknown foreign function and may impose arbitrary requirements on the caller for the call - /// to be sound. - /// - /// Additionally, the callers of this function must also ensure that execution of the - /// termination routines contained within the library is safe as well. These routines may be - /// executed when the library is unloaded. - /// - /// # Thread-safety - /// - /// The implementation strives to be as MT-safe as sanely possible, however on certain - /// platforms the underlying error-handling related APIs not always MT-safe. This library - /// shares these limitations on those platforms. In particular, on certain UNIX targets - /// `dlerror` is not MT-safe, resulting in garbage error messages in certain MT-scenarios. - /// - /// Calling this function from multiple threads is not MT-safe if used in conjunction with - /// library filenames and the library search path is modified (`SetDllDirectory` function on - /// Windows, `{DY,}LD_LIBRARY_PATH` environment variable on UNIX). - /// - /// # Platform-specific behaviour - /// - /// When a plain library filename is supplied, the locations in which the library is searched are - /// platform specific and cannot be adjusted in a portable manner. See the documentation for - /// the platform specific [`os::unix::Library::new`] and [`os::windows::Library::new`] methods - /// for further information on library lookup behaviour. - /// - /// If the `filename` specifies a library filename without a path and with the extension omitted, - /// the `.dll` extension is implicitly added on Windows. - /// - /// [`os::unix::Library::new`]: crate::os::unix::Library::new - /// [`os::windows::Library::new`]: crate::os::windows::Library::new - /// - /// # Tips - /// - /// Distributing your dynamic libraries under a filename common to all platforms (e.g. - /// `awesome.module`) allows you to avoid code which has to account for platform’s conventional - /// library filenames. - /// - /// Strive to specify an absolute or at least a relative path to your library, unless - /// system-wide libraries are being loaded. Platform-dependent library search locations - /// combined with various quirks related to path-less filenames may cause flakiness in - /// programs. - /// - /// # Examples - /// - /// ```no_run - /// # use ::libloading::Library; - /// // Any of the following are valid. - /// unsafe { - /// let _ = Library::new("/path/to/awesome.module").unwrap(); - /// let _ = Library::new("../awesome.module").unwrap(); - /// let _ = Library::new("libsomelib.so.1").unwrap(); - /// } - /// ``` - pub unsafe fn new>(filename: P) -> Result { - imp::Library::new(filename).map(From::from) - } - - /// Get a pointer to a function or static variable by symbol name. - /// - /// The `symbol` may not contain any null bytes, with the exception of the last byte. Providing a - /// null-terminated `symbol` may help to avoid an allocation. - /// - /// The symbol is interpreted as-is; no mangling is done. This means that symbols like `x::y` are - /// most likely invalid. - /// - /// # Safety - /// - /// Users of this API must specify the correct type of the function or variable loaded. - /// - /// # Platform-specific behaviour - /// - /// The implementation of thread-local variables is extremely platform specific and uses of such - /// variables that work on e.g. Linux may have unintended behaviour on other targets. - /// - /// On POSIX implementations where the `dlerror` function is not confirmed to be MT-safe (such - /// as FreeBSD), this function will unconditionally return an error when the underlying `dlsym` - /// call returns a null pointer. There are rare situations where `dlsym` returns a genuine null - /// pointer without it being an error. If loading a null pointer is something you care about, - /// consider using the [`os::unix::Library::get_singlethreaded`] call. - /// - /// [`os::unix::Library::get_singlethreaded`]: crate::os::unix::Library::get_singlethreaded - /// - /// # Examples - /// - /// Given a loaded library: - /// - /// ```no_run - /// # use ::libloading::Library; - /// let lib = unsafe { - /// Library::new("/path/to/awesome.module").unwrap() - /// }; - /// ``` - /// - /// Loading and using a function looks like this: - /// - /// ```no_run - /// # use ::libloading::{Library, Symbol}; - /// # let lib = unsafe { - /// # Library::new("/path/to/awesome.module").unwrap() - /// # }; - /// unsafe { - /// let awesome_function: Symbol f64> = - /// lib.get(b"awesome_function\0").unwrap(); - /// awesome_function(0.42); - /// } - /// ``` - /// - /// A static variable may also be loaded and inspected: - /// - /// ```no_run - /// # use ::libloading::{Library, Symbol}; - /// # let lib = unsafe { Library::new("/path/to/awesome.module").unwrap() }; - /// unsafe { - /// let awesome_variable: Symbol<*mut f64> = lib.get(b"awesome_variable\0").unwrap(); - /// **awesome_variable = 42.0; - /// }; - /// ``` - pub unsafe fn get(&self, symbol: &[u8]) -> Result, Error> { - self.0.get(symbol).map(|from| Symbol::from_raw(from, self)) - } - - /// Unload the library. - /// - /// This method might be a no-op, depending on the flags with which the `Library` was opened, - /// what library was opened or other platform specifics. - /// - /// You only need to call this if you are interested in handling any errors that may arise when - /// library is unloaded. Otherwise the implementation of `Drop` for `Library` will close the - /// library and ignore the errors were they arise. - /// - /// The underlying data structures may still get leaked if an error does occur. - pub fn close(self) -> Result<(), Error> { - self.0.close() - } -} - -impl fmt::Debug for Library { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.0.fmt(f) - } -} - -impl From for Library { - fn from(lib: imp::Library) -> Library { - Library(lib) - } -} - -impl From for imp::Library { - fn from(lib: Library) -> imp::Library { - lib.0 - } -} - -unsafe impl Send for Library {} -unsafe impl Sync for Library {} - -/// Symbol from a library. -/// -/// This type is a safeguard against using dynamically loaded symbols after a `Library` is -/// unloaded. The primary method to create an instance of a `Symbol` is via [`Library::get`]. -/// -/// The `Deref` trait implementation allows the use of `Symbol` as if it was a function or variable -/// itself, without taking care to “extract” the function or variable manually most of the time. -/// -/// [`Library::get`]: Library::get -#[cfg_attr(libloading_docs, doc(cfg(any(unix, windows))))] -pub struct Symbol<'lib, T: 'lib> { - inner: imp::Symbol, - pd: marker::PhantomData<&'lib T>, -} - -impl<'lib, T> Symbol<'lib, T> { - /// Extract the wrapped `os::platform::Symbol`. - /// - /// # Safety - /// - /// Using this function relinquishes all the lifetime guarantees. It is up to the developer to - /// ensure the resulting `Symbol` is not used past the lifetime of the `Library` this symbol - /// was loaded from. - /// - /// # Examples - /// - /// ```no_run - /// # use ::libloading::{Library, Symbol}; - /// unsafe { - /// let lib = Library::new("/path/to/awesome.module").unwrap(); - /// let symbol: Symbol<*mut u32> = lib.get(b"symbol\0").unwrap(); - /// let symbol = symbol.into_raw(); - /// } - /// ``` - pub unsafe fn into_raw(self) -> imp::Symbol { - self.inner - } - - /// Wrap the `os::platform::Symbol` into this safe wrapper. - /// - /// Note that, in order to create association between the symbol and the library this symbol - /// came from, this function requires a reference to the library. - /// - /// # Safety - /// - /// The `library` reference must be exactly the library `sym` was loaded from. - /// - /// # Examples - /// - /// ```no_run - /// # use ::libloading::{Library, Symbol}; - /// unsafe { - /// let lib = Library::new("/path/to/awesome.module").unwrap(); - /// let symbol: Symbol<*mut u32> = lib.get(b"symbol\0").unwrap(); - /// let symbol = symbol.into_raw(); - /// let symbol = Symbol::from_raw(symbol, &lib); - /// } - /// ``` - pub unsafe fn from_raw(sym: imp::Symbol, library: &'lib L) -> Symbol<'lib, T> { - let _ = library; // ignore here for documentation purposes. - Symbol { - inner: sym, - pd: marker::PhantomData, - } - } - - /// Try to convert the symbol into a raw pointer. - /// Success depends on the platform. Currently, this fn always succeeds and returns some. - /// - /// # Safety - /// - /// Using this function relinquishes all the lifetime guarantees. It is up to the developer to - /// ensure the resulting `Symbol` is not used past the lifetime of the `Library` this symbol - /// was loaded from. - pub unsafe fn try_as_raw_ptr(self) -> Option<*mut raw::c_void> { - Some( - unsafe { - // SAFE: the calling function has the same soundness invariants as this callee. - self.into_raw() - } - .as_raw_ptr(), - ) - } -} - -impl<'lib, T> Symbol<'lib, Option> { - /// Lift Option out of the symbol. - /// - /// # Examples - /// - /// ```no_run - /// # use ::libloading::{Library, Symbol}; - /// unsafe { - /// let lib = Library::new("/path/to/awesome.module").unwrap(); - /// let symbol: Symbol> = lib.get(b"symbol\0").unwrap(); - /// let symbol: Symbol<*mut u32> = symbol.lift_option().expect("static is not null"); - /// } - /// ``` - pub fn lift_option(self) -> Option> { - self.inner.lift_option().map(|is| Symbol { - inner: is, - pd: marker::PhantomData, - }) - } -} - -impl<'lib, T> Clone for Symbol<'lib, T> { - fn clone(&self) -> Symbol<'lib, T> { - Symbol { - inner: self.inner.clone(), - pd: marker::PhantomData, - } - } -} - -// FIXME: implement FnOnce for callable stuff instead. -impl ops::Deref for Symbol<'_, T> { - type Target = T; - fn deref(&self) -> &T { - ops::Deref::deref(&self.inner) - } -} - -impl fmt::Debug for Symbol<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) - } -} - -unsafe impl Send for Symbol<'_, T> {} -unsafe impl Sync for Symbol<'_, T> {} diff --git a/vendor/libloading/src/test_helpers.rs b/vendor/libloading/src/test_helpers.rs deleted file mode 100644 index 9e3e9924ff86b0..00000000000000 --- a/vendor/libloading/src/test_helpers.rs +++ /dev/null @@ -1,37 +0,0 @@ -//! This is a separate file containing helpers for tests of this library. It is built into a -//! dynamic library by the build.rs script. -#![crate_type="cdylib"] - -#[no_mangle] -pub static mut TEST_STATIC_U32: u32 = 0; - -#[no_mangle] -pub static mut TEST_STATIC_PTR: *mut () = 0 as *mut _; - -#[no_mangle] -pub extern "C" fn test_identity_u32(x: u32) -> u32 { - x -} - -#[repr(C)] -pub struct S { - a: u64, - b: u32, - c: u16, - d: u8 -} - -#[no_mangle] -pub extern "C" fn test_identity_struct(x: S) -> S { - x -} - -#[no_mangle] -pub unsafe extern "C" fn test_get_static_u32() -> u32 { - TEST_STATIC_U32 -} - -#[no_mangle] -pub unsafe extern "C" fn test_check_static_ptr() -> bool { - TEST_STATIC_PTR == (&mut TEST_STATIC_PTR as *mut *mut _ as *mut _) -} diff --git a/vendor/libloading/src/util.rs b/vendor/libloading/src/util.rs deleted file mode 100644 index 599e6c254eaa8c..00000000000000 --- a/vendor/libloading/src/util.rs +++ /dev/null @@ -1,34 +0,0 @@ -use std::borrow::Cow; -use std::ffi::{CStr, CString}; -use std::os::raw; - -use crate::Error; - -/// Checks for the last byte and avoids allocating if it is zero. -/// -/// Non-last null bytes still result in an error. -pub(crate) fn cstr_cow_from_bytes(slice: &[u8]) -> Result, Error> { - static ZERO: raw::c_char = 0; - Ok(match slice.last() { - // Slice out of 0 elements - None => unsafe { Cow::Borrowed(CStr::from_ptr(&ZERO)) }, - // Slice with trailing 0 - Some(&0) => Cow::Borrowed( - CStr::from_bytes_with_nul(slice) - .map_err(|source| Error::CreateCStringWithTrailing { source })?, - ), - // Slice with no trailing 0 - Some(_) => { - Cow::Owned(CString::new(slice).map_err(|source| Error::CreateCString { source })?) - } - }) -} - -#[inline] -pub(crate) fn ensure_compatible_types() -> Result<(), Error> { - if ::std::mem::size_of::() != ::std::mem::size_of::() { - Err(Error::IncompatibleSize) - } else { - Ok(()) - } -} diff --git a/vendor/libloading/tests/constants.rs b/vendor/libloading/tests/constants.rs deleted file mode 100644 index 6ae5a8460aef5b..00000000000000 --- a/vendor/libloading/tests/constants.rs +++ /dev/null @@ -1,13 +0,0 @@ -extern crate libc; -extern crate libloading; -extern crate static_assertions; - -#[cfg(all(test, unix))] -mod unix { - use super::static_assertions::const_assert_eq; - - const_assert_eq!(libloading::os::unix::RTLD_LOCAL, libc::RTLD_LOCAL); - const_assert_eq!(libloading::os::unix::RTLD_GLOBAL, libc::RTLD_GLOBAL); - const_assert_eq!(libloading::os::unix::RTLD_NOW, libc::RTLD_NOW); - const_assert_eq!(libloading::os::unix::RTLD_LAZY, libc::RTLD_LAZY); -} diff --git a/vendor/libloading/tests/functions.rs b/vendor/libloading/tests/functions.rs deleted file mode 100644 index dc6b316e7d79c9..00000000000000 --- a/vendor/libloading/tests/functions.rs +++ /dev/null @@ -1,312 +0,0 @@ -#[cfg(windows)] -extern crate windows_sys; - -extern crate libloading; -use libloading::{Library, Symbol}; -use std::os::raw::c_void; - -const TARGET_DIR: Option<&'static str> = option_env!("CARGO_TARGET_DIR"); -const TARGET_TMPDIR: Option<&'static str> = option_env!("CARGO_TARGET_TMPDIR"); - -fn lib_path() -> std::path::PathBuf { - [ - TARGET_TMPDIR.unwrap_or(TARGET_DIR.unwrap_or("target")), - "libtest_helpers.module", - ] - .iter() - .collect() -} - -fn make_helpers() { - static ONCE: ::std::sync::Once = ::std::sync::Once::new(); - ONCE.call_once(|| { - let rustc = std::env::var_os("RUSTC").unwrap_or_else(|| "rustc".into()); - let mut cmd = ::std::process::Command::new(rustc); - cmd.arg("src/test_helpers.rs").arg("-o").arg(lib_path()); - if let Some(target) = std::env::var_os("TARGET") { - cmd.arg("--target").arg(target); - } else { - eprintln!("WARNING: $TARGET NOT SPECIFIED! BUILDING HELPER MODULE FOR NATIVE TARGET."); - } - assert!(cmd - .status() - .expect("could not compile the test helpers!") - .success()); - }); -} - -#[test] -fn test_id_u32() { - make_helpers(); - unsafe { - let lib = Library::new(lib_path()).unwrap(); - let f: Symbol u32> = lib.get(b"test_identity_u32\0").unwrap(); - assert_eq!(42, f(42)); - } -} - -#[test] -fn test_try_into_ptr() { - make_helpers(); - unsafe { - let lib = Library::new(lib_path()).unwrap(); - let f: Symbol u32> = lib.get(b"test_identity_u32\0").unwrap(); - let ptr: *mut c_void = f.try_as_raw_ptr().unwrap(); - assert!(!ptr.is_null()); - let ptr_casted: extern "C" fn(u32) -> u32 = std::mem::transmute(ptr); - assert_eq!(42, ptr_casted(42)); - } -} - -#[repr(C)] -#[derive(Clone, Copy, PartialEq, Debug)] -struct S { - a: u64, - b: u32, - c: u16, - d: u8, -} - -#[test] -fn test_id_struct() { - make_helpers(); - unsafe { - let lib = Library::new(lib_path()).unwrap(); - let f: Symbol S> = lib.get(b"test_identity_struct\0").unwrap(); - assert_eq!( - S { - a: 1, - b: 2, - c: 3, - d: 4 - }, - f(S { - a: 1, - b: 2, - c: 3, - d: 4 - }) - ); - } -} - -#[test] -fn test_0_no_0() { - make_helpers(); - unsafe { - let lib = Library::new(lib_path()).unwrap(); - let f: Symbol S> = lib.get(b"test_identity_struct\0").unwrap(); - let f2: Symbol S> = lib.get(b"test_identity_struct").unwrap(); - assert_eq!(*f, *f2); - } -} - -#[test] -fn wrong_name_fails() { - unsafe { - Library::new("target/this_location_is_definitely_non existent:^~") - .err() - .unwrap(); - } -} - -#[test] -fn missing_symbol_fails() { - make_helpers(); - unsafe { - let lib = Library::new(lib_path()).unwrap(); - lib.get::<*mut ()>(b"test_does_not_exist").err().unwrap(); - lib.get::<*mut ()>(b"test_does_not_exist\0").err().unwrap(); - } -} - -#[test] -fn interior_null_fails() { - make_helpers(); - unsafe { - let lib = Library::new(lib_path()).unwrap(); - lib.get::<*mut ()>(b"test_does\0_not_exist").err().unwrap(); - lib.get::<*mut ()>(b"test\0_does_not_exist\0") - .err() - .unwrap(); - } -} - -#[test] -fn test_incompatible_type() { - make_helpers(); - unsafe { - let lib = Library::new(lib_path()).unwrap(); - assert!(match lib.get::<()>(b"test_identity_u32\0") { - Err(libloading::Error::IncompatibleSize) => true, - _ => false, - }) - } -} - -#[test] -fn test_incompatible_type_named_fn() { - make_helpers(); - unsafe fn get<'a, T>(l: &'a Library, _: T) -> Result, libloading::Error> { - l.get::(b"test_identity_u32\0") - } - unsafe { - let lib = Library::new(lib_path()).unwrap(); - assert!(match get(&lib, test_incompatible_type_named_fn) { - Err(libloading::Error::IncompatibleSize) => true, - _ => false, - }) - } -} - -#[test] -fn test_static_u32() { - make_helpers(); - unsafe { - let lib = Library::new(lib_path()).unwrap(); - let var: Symbol<*mut u32> = lib.get(b"TEST_STATIC_U32\0").unwrap(); - **var = 42; - let help: Symbol u32> = - lib.get(b"test_get_static_u32\0").unwrap(); - assert_eq!(42, help()); - } -} - -#[test] -fn test_static_ptr() { - make_helpers(); - unsafe { - let lib = Library::new(lib_path()).unwrap(); - let var: Symbol<*mut *mut ()> = lib.get(b"TEST_STATIC_PTR\0").unwrap(); - **var = *var as *mut _; - let works: Symbol bool> = - lib.get(b"test_check_static_ptr\0").unwrap(); - assert!(works()); - } -} - -#[test] -// Something about i686-pc-windows-gnu, makes dll initialisation code call abort when it is loaded -// and unloaded many times. So far it seems like an issue with mingw, not libloading, so ignoring -// the target. Especially since it is very unlikely to be fixed given the state of support its -// support. -#[cfg(not(all(target_arch = "x86", target_os = "windows", target_env = "gnu")))] -// Cygwin returns errors on `close`. -#[cfg(not(target_os = "cygwin"))] -fn manual_close_many_times() { - make_helpers(); - let join_handles: Vec<_> = (0..16) - .map(|_| { - std::thread::spawn(|| unsafe { - for _ in 0..10000 { - let lib = Library::new(lib_path()).expect("open library"); - let _: Symbol u32> = - lib.get(b"test_identity_u32").expect("get fn"); - lib.close().expect("close is successful"); - } - }) - }) - .collect(); - for handle in join_handles { - handle.join().expect("thread should succeed"); - } -} - -#[cfg(unix)] -#[test] -fn library_this_get() { - use libloading::os::unix::Library; - make_helpers(); - // SAFE: functions are never called - unsafe { - let _lib = Library::new(lib_path()).unwrap(); - let this = Library::this(); - // Library we loaded in `_lib` (should be RTLD_LOCAL). - assert!(this - .get::(b"test_identity_u32") - .is_err()); - // Something obscure from libc... - // Cygwin behaves like Windows so ignore it. - #[cfg(not(target_os = "cygwin"))] - assert!(this.get::(b"freopen").is_ok()); - } -} - -#[cfg(windows)] -#[test] -fn library_this() { - use libloading::os::windows::Library; - make_helpers(); - unsafe { - // SAFE: well-known library without initialisers is loaded. - let _lib = Library::new(lib_path()).unwrap(); - let this = Library::this().expect("this library"); - // SAFE: functions are never called. - // Library we loaded in `_lib`. - assert!(this - .get::(b"test_identity_u32") - .is_err()); - // Something "obscure" from kernel32... - assert!(this.get::(b"GetLastError").is_err()); - } -} - -#[cfg(windows)] -#[test] -fn works_getlasterror() { - use libloading::os::windows::{Library, Symbol}; - use windows_sys::Win32::Foundation::{GetLastError, SetLastError}; - - unsafe { - let lib = Library::new("kernel32.dll").unwrap(); - let gle: Symbol u32> = lib.get(b"GetLastError").unwrap(); - SetLastError(42); - assert_eq!(GetLastError(), gle()) - } -} - -#[cfg(windows)] -#[test] -fn works_getlasterror0() { - use libloading::os::windows::{Library, Symbol}; - use windows_sys::Win32::Foundation::{GetLastError, SetLastError}; - - unsafe { - let lib = Library::new("kernel32.dll").unwrap(); - let gle: Symbol u32> = lib.get(b"GetLastError\0").unwrap(); - SetLastError(42); - assert_eq!(GetLastError(), gle()) - } -} - -#[cfg(windows)] -#[test] -fn works_pin_module() { - use libloading::os::windows::Library; - - unsafe { - let lib = Library::new("kernel32.dll").unwrap(); - lib.pin().unwrap(); - } -} - -#[cfg(windows)] -#[test] -fn library_open_already_loaded() { - use libloading::os::windows::Library; - - // Present on Windows systems and NOT used by any other tests to prevent races. - const LIBPATH: &str = "Msftedit.dll"; - - // Not loaded yet. - assert!(match Library::open_already_loaded(LIBPATH) { - Err(libloading::Error::GetModuleHandleExW { .. }) => true, - _ => false, - }); - - unsafe { - let _lib = Library::new(LIBPATH).unwrap(); - // Loaded now. - assert!(Library::open_already_loaded(LIBPATH).is_ok()); - } -} diff --git a/vendor/libloading/tests/library_filename.rs b/vendor/libloading/tests/library_filename.rs deleted file mode 100644 index 4642ece0874853..00000000000000 --- a/vendor/libloading/tests/library_filename.rs +++ /dev/null @@ -1,17 +0,0 @@ -extern crate libloading; -use libloading::library_filename; -use std::path::Path; - -#[cfg(any(target_os = "windows", target_os = "cygwin"))] -const EXPECTED: &str = "audioengine.dll"; -#[cfg(target_os = "linux")] -const EXPECTED: &str = "libaudioengine.so"; -#[cfg(target_os = "macos")] -const EXPECTED: &str = "libaudioengine.dylib"; - -#[test] -fn test_library_filename() { - let name = "audioengine"; - let resolved = library_filename(name); - assert!(Path::new(&resolved).ends_with(EXPECTED)); -} diff --git a/vendor/libloading/tests/markers.rs b/vendor/libloading/tests/markers.rs deleted file mode 100644 index 330c034ad5f45c..00000000000000 --- a/vendor/libloading/tests/markers.rs +++ /dev/null @@ -1,96 +0,0 @@ -extern crate libloading; - -#[cfg(test)] -fn assert_send() {} -#[cfg(test)] -fn assert_sync() {} -#[cfg(test)] -fn assert_display() {} - -#[test] -fn check_error_send() { - assert_send::(); -} - -#[test] -fn check_error_sync() { - assert_sync::(); -} - -#[test] -fn check_error_display() { - assert_display::(); -} - -#[test] -fn check_library_send() { - assert_send::(); -} - -#[cfg(unix)] -#[test] -fn check_unix_library_send() { - assert_send::(); -} - -#[cfg(windows)] -#[test] -fn check_windows_library_send() { - assert_send::(); -} - -#[test] -fn check_library_sync() { - assert_sync::(); -} - -#[cfg(unix)] -#[test] -fn check_unix_library_sync() { - assert_sync::(); -} - -#[cfg(windows)] -#[test] -fn check_windows_library_sync() { - assert_sync::(); -} - -#[test] -fn check_symbol_send() { - assert_send:: ()>>(); - // assert_not_send::>(); -} - -#[cfg(unix)] -#[test] -fn check_unix_symbol_send() { - assert_send:: ()>>(); - // assert_not_send::>(); -} - -#[cfg(windows)] -#[test] -fn check_windows_symbol_send() { - assert_send:: ()>>(); -} - -#[test] -fn check_symbol_sync() { - assert_sync:: ()>>(); - // assert_not_sync::>(); -} - -#[cfg(unix)] -#[test] -fn check_unix_symbol_sync() { - assert_sync:: ()>>(); - // assert_not_sync::>(); -} - -#[cfg(windows)] -#[test] -fn check_windows_symbol_sync() { - assert_sync:: ()>>(); - // assert_not_sync::>(); -} diff --git a/vendor/libloading/tests/windows.rs b/vendor/libloading/tests/windows.rs deleted file mode 100644 index 13a41450288494..00000000000000 --- a/vendor/libloading/tests/windows.rs +++ /dev/null @@ -1,71 +0,0 @@ -#![cfg(windows)] -extern crate libloading; -use libloading::os::windows::*; -use std::ffi::CStr; -use std::os::raw::c_void; -// The ordinal DLL contains exactly one function (other than DllMain, that is) with ordinal number -// 1. This function has the sugnature `fn() -> *const c_char` and returns a string "bunny\0" (in -// reference to WindowsBunny). -// -// Both x86_64 and x86 versions of the .dll are functionally the same. Ideally we would compile the -// dlls with well known ordinals from our own testing helpers library, but rustc does not allow -// specifying a custom .def file (https://github.com/rust-lang/rust/issues/35089) -// -// The DLLs were kindly compiled by WindowsBunny (aka. @retep998). - -#[cfg(target_arch = "x86")] -fn load_ordinal_lib() -> Library { - unsafe { Library::new("tests/nagisa32.dll").expect("nagisa32.dll") } -} - -#[cfg(target_arch = "x86_64")] -fn load_ordinal_lib() -> Library { - unsafe { Library::new("tests/nagisa64.dll").expect("nagisa64.dll") } -} - -#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -#[test] -fn test_ordinal() { - let lib = load_ordinal_lib(); - unsafe { - let windows: Symbol *const i8> = lib.get_ordinal(1).expect("function"); - assert_eq!(CStr::from_ptr(windows()).to_bytes(), b"bunny"); - } -} - -#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -#[test] -fn test_try_into_ptr() { - let lib = load_ordinal_lib(); - unsafe { - let windows: Symbol *const i8> = lib.get_ordinal(1).expect("function"); - let ptr: *mut c_void = windows.as_raw_ptr(); - assert!(!ptr.is_null()); - } -} - -#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -#[test] -fn test_ordinal_missing_fails() { - let lib = load_ordinal_lib(); - unsafe { - let r: Result *const i8>, _> = lib.get_ordinal(2); - r.err().unwrap(); - let r: Result *const i8>, _> = lib.get_ordinal(!0); - r.err().unwrap(); - } -} - -#[test] -fn test_new_kernel23() { - unsafe { - Library::new("kernel23").err().unwrap(); - } -} - -#[test] -fn test_new_kernel32_no_ext() { - unsafe { - Library::new("kernel32").unwrap(); - } -} diff --git a/vendor/log/.cargo-checksum.json b/vendor/log/.cargo-checksum.json deleted file mode 100644 index 763945ad571b1b..00000000000000 --- a/vendor/log/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"d0cb2c582cde22a9d66fb0765cd8370b1ad5f39c4cce9685ccec9e057b1c9e23",".github/workflows/main.yml":"df525d79c4f63dd708126c1379134490d7a02c1729f06486141b2b90316fd39a","CHANGELOG.md":"a52fd4f4ddd7ed2c62e584f62057a5265be24abb53283ea49b1eb46ceb18a701","Cargo.lock":"80665e8b018d0dfe482b1581a138ac4e3e562bde5fd53f889cd6e48e6a96c374","Cargo.toml":"53a23ba91b2b31fb42b2986e95e8a8107fe9cdab34cc85568aeea9ce872b51ae","Cargo.toml.orig":"3b352dad4bca34832854a14a1534988a7d78380556ea8a6106015fe009ce4584","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"486c6cf85f99a2a3ea14dbd5fc6d6075fdc047a0dbf2b93682595db0de6a115f","benches/value.rs":"b613ff353d3cf0ef8cb98e4ca461ea929b8ba553fe299f2eb2942d77a5b1b6a0","src/__private_api.rs":"9f6f76ae924f884115ad52f552c13282e11b43e98ae7b5ffb631913f3cefa11f","src/kv/error.rs":"6dae12424164c33b93915f5e70bd6d99d616c969c8bfb543806721dd9b423981","src/kv/key.rs":"e63fd5b22b62f2bfacbd77fe0913c3667ed39de5eeb6d93292b77b1b1de4208a","src/kv/mod.rs":"e194d44e1e626f33c9a9bf90b4053eb98d7652c795ba811e5ccc24b340be3a6e","src/kv/source.rs":"73fbc180c824072d86f1f41f8c59c014db1d8988a86be38a9128d67d6aab06a5","src/kv/value.rs":"c7cd0faf06adb04aa53d7ba1c305874d5e69364d037b17b9ab4ecf4a3dde1d4e","src/lib.rs":"e0b09715dce40d961b138c66bfd0963f65e1b6aa002461fabbcc8da49036cddc","src/macros.rs":"34c367a645483e21eee4c7846d0efbf97c29a52156d56536c82cdfe1d226a54d","src/serde.rs":"1b261f9df7a97ace311e9ab9b6c951a17ff7e39227a352c7e09cb2731efd9a2f","tests/integration.rs":"0980b3bd85d36863bc9f355e80bc7cf7987d2599adbc87e8e0082861a08a1097","tests/macros.rs":"a94f3cc181c9ecb30af6b5ca8bd2b4e5accc93689c0eb19051b8479a298dc21b","triagebot.toml":"a135e10c777cd13459559bdf74fb704c1379af7c9b0f70bc49fa6f5a837daa81"},"package":"34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432"} \ No newline at end of file diff --git a/vendor/log/.cargo_vcs_info.json b/vendor/log/.cargo_vcs_info.json deleted file mode 100644 index c581ca83614a3c..00000000000000 --- a/vendor/log/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "6e1735597bb21c5d979a077395df85e1d633e077" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/log/.github/workflows/main.yml b/vendor/log/.github/workflows/main.yml deleted file mode 100644 index 3f5988fbd961ad..00000000000000 --- a/vendor/log/.github/workflows/main.yml +++ /dev/null @@ -1,134 +0,0 @@ -name: CI -on: [push, pull_request] - -# Ensure only read permission is granted -permissions: - contents: read - -jobs: - test: - name: Test - runs-on: ${{ matrix.os }} - strategy: - matrix: - include: - - build: stable - os: ubuntu-latest - rust: stable - - build: beta - os: ubuntu-latest - rust: beta - - build: nightly - os: ubuntu-latest - rust: nightly - - build: macos - os: macos-latest - rust: stable - - build: win32 - os: windows-latest - rust: stable-i686-pc-windows-msvc - - build: win64 - os: windows-latest - rust: stable-x86_64-pc-windows-msvc - - build: mingw - os: windows-latest - rust: stable-x86_64-pc-windows-gnu - steps: - - uses: actions/checkout@v4 - - name: Install toolchain - run: | - rustup update ${{ matrix.rust }} --no-self-update - rustup default ${{ matrix.rust }} - cargo +stable install cargo-hack --locked - - run: cargo hack test --feature-powerset --exclude-features max_level_off,max_level_error,max_level_warn,max_level_info,max_level_debug,max_level_trace,release_max_level_off,release_max_level_error,release_max_level_warn,release_max_level_info,release_max_level_debug,release_max_level_trace - - run: cargo run --verbose --manifest-path test_max_level_features/Cargo.toml - - run: cargo run --verbose --manifest-path test_max_level_features/Cargo.toml --release - - check: - name: Check Format and Clippy - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Install toolchain - run: | - rustup update stable --no-self-update - rustup default stable - rustup component add clippy rustfmt - - run: cargo fmt -- --check - - run: cargo fmt --manifest-path test_max_level_features/Cargo.toml -- --check - - run: cargo clippy --verbose - - run: cargo clippy --verbose --manifest-path test_max_level_features/Cargo.toml - - doc: - name: Check Documentation - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Install toolchain - run: | - rustup update stable --no-self-update - rustup default stable - rustup component add rust-docs - - name: Run rustdoc - env: - RUSTDOCFLAGS: "-D warnings" - run: cargo doc --verbose --features std,serde,sval,sval_ref,value-bag,kv,kv_std,kv_sval,kv_serde - - features: - name: Feature check - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Install toolchain - run: | - rustup update nightly --no-self-update - rustup default nightly - - run: cargo build --verbose -Z avoid-dev-deps --features kv - - run: cargo build --verbose -Z avoid-dev-deps --features "kv std" - - run: cargo build --verbose -Z avoid-dev-deps --features "kv kv_sval" - - run: cargo build --verbose -Z avoid-dev-deps --features "kv kv_serde" - - run: cargo build --verbose -Z avoid-dev-deps --features "kv kv_std" - - run: cargo build --verbose -Z avoid-dev-deps --features "kv kv_sval kv_serde" - - minimalv: - name: Minimal versions - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Install toolchain - run: | - rustup update nightly --no-self-update - rustup default nightly - - run: cargo build --verbose -Z minimal-versions --features kv - - run: cargo build --verbose -Z minimal-versions --features "kv std" - - run: cargo build --verbose -Z minimal-versions --features "kv kv_sval" - - run: cargo build --verbose -Z minimal-versions --features "kv kv_serde" - - run: cargo build --verbose -Z minimal-versions --features "kv kv_std" - - run: cargo build --verbose -Z minimal-versions --features "kv kv_sval kv_serde" - - msrv: - name: MSRV - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Install toolchain - run: | - rustup update 1.61.0 --no-self-update - rustup default 1.61.0 - cargo +stable install cargo-hack --locked - - run: cargo hack test --feature-powerset --exclude-features max_level_off,max_level_error,max_level_warn,max_level_info,max_level_debug,max_level_trace,release_max_level_off,release_max_level_error,release_max_level_warn,release_max_level_info,release_max_level_debug,release_max_level_trace - - run: cargo run --verbose --manifest-path test_max_level_features/Cargo.toml - - run: cargo run --verbose --manifest-path test_max_level_features/Cargo.toml --release - - embedded: - name: Embedded - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Install toolchain - run: | - rustup update stable --no-self-update - rustup default stable - - run: rustup target add thumbv6m-none-eabi riscv32imc-unknown-none-elf - - run: cargo build --verbose --target=thumbv6m-none-eabi - - run: cargo build --verbose --target=riscv32imc-unknown-none-elf diff --git a/vendor/log/CHANGELOG.md b/vendor/log/CHANGELOG.md deleted file mode 100644 index 48f6693342a698..00000000000000 --- a/vendor/log/CHANGELOG.md +++ /dev/null @@ -1,410 +0,0 @@ -# Change Log - -## [Unreleased] - -## [0.4.28] - 2025-09-02 - -## What's Changed -* ci: drop really old trick and ensure MSRV for all feature combo by @tisonkun in https://github.com/rust-lang/log/pull/676 -* Chore: delete compare_exchange method for AtomicUsize on platforms without atomics by @HaoliangXu in https://github.com/rust-lang/log/pull/690 -* Add `increment_severity()` and `decrement_severity()` methods for `Level` and `LevelFilter` by @nebkor in https://github.com/rust-lang/log/pull/692 - -## New Contributors -* @xixishidibei made their first contribution in https://github.com/rust-lang/log/pull/677 -* @ZylosLumen made their first contribution in https://github.com/rust-lang/log/pull/688 -* @HaoliangXu made their first contribution in https://github.com/rust-lang/log/pull/690 -* @nebkor made their first contribution in https://github.com/rust-lang/log/pull/692 - -**Full Changelog**: https://github.com/rust-lang/log/compare/0.4.27...0.4.28 - -### Notable Changes -* MSRV is bumped to 1.61.0 in https://github.com/rust-lang/log/pull/676 - -## [0.4.27] - 2025-03-24 - -### What's Changed -* A few minor lint fixes by @nyurik in https://github.com/rust-lang/log/pull/671 -* Enable clippy support for format-like macros by @nyurik in https://github.com/rust-lang/log/pull/665 -* Add an optional logger param by @tisonkun in https://github.com/rust-lang/log/pull/664 -* Pass global logger by value, supplied logger by ref by @KodrAus in https://github.com/rust-lang/log/pull/673 - - -**Full Changelog**: https://github.com/rust-lang/log/compare/0.4.26...0.4.27 - - -## [0.4.26] - 2025-02-18 - -### What's Changed -* Derive `Clone` for `kv::Value` by @SpriteOvO in https://github.com/rust-lang/log/pull/668 -* Add `spdlog-rs` link to crate doc by @SpriteOvO in https://github.com/rust-lang/log/pull/669 - - -**Full Changelog**: https://github.com/rust-lang/log/compare/0.4.25...0.4.26 - -## [0.4.25] - 2025-01-14 - -### What's Changed -* Revert loosening of kv cargo features by @KodrAus in https://github.com/rust-lang/log/pull/662 - - -**Full Changelog**: https://github.com/rust-lang/log/compare/0.4.24...0.4.25 - -## [0.4.24] - 2025-01-11 - -### What's Changed -* Fix up kv feature activation by @KodrAus in https://github.com/rust-lang/log/pull/659 - - -**Full Changelog**: https://github.com/rust-lang/log/compare/0.4.23...0.4.24 - -## [0.4.23] - 2025-01-10 (yanked) - -### What's Changed -* Fix some typos by @Kleinmarb in https://github.com/rust-lang/log/pull/637 -* Add logforth to implementation by @tisonkun in https://github.com/rust-lang/log/pull/638 -* Add `spdlog-rs` link to README by @SpriteOvO in https://github.com/rust-lang/log/pull/639 -* Add correct lifetime to kv::Value::to_borrowed_str by @stevenroose in https://github.com/rust-lang/log/pull/643 -* docs: Add logforth as an impl by @tisonkun in https://github.com/rust-lang/log/pull/642 -* Add clang_log implementation by @DDAN-17 in https://github.com/rust-lang/log/pull/646 -* Bind lifetimes of &str returned from Key by the lifetime of 'k rather than the lifetime of the Key struct by @gbbosak in https://github.com/rust-lang/log/pull/648 -* Fix up key lifetimes and add method to try get a borrowed key by @KodrAus in https://github.com/rust-lang/log/pull/653 -* Add Ftail implementation by @tjardoo in https://github.com/rust-lang/log/pull/652 - -### New Contributors -* @Kleinmarb made their first contribution in https://github.com/rust-lang/log/pull/637 -* @tisonkun made their first contribution in https://github.com/rust-lang/log/pull/638 -* @SpriteOvO made their first contribution in https://github.com/rust-lang/log/pull/639 -* @stevenroose made their first contribution in https://github.com/rust-lang/log/pull/643 -* @DDAN-17 made their first contribution in https://github.com/rust-lang/log/pull/646 -* @gbbosak made their first contribution in https://github.com/rust-lang/log/pull/648 -* @tjardoo made their first contribution in https://github.com/rust-lang/log/pull/652 - -**Full Changelog**: https://github.com/rust-lang/log/compare/0.4.22...0.4.23 - -## [0.4.22] - 2024-06-27 - -### What's Changed -* Add some clarifications to the library docs by @KodrAus in https://github.com/rust-lang/log/pull/620 -* Add links to `colog` crate by @chrivers in https://github.com/rust-lang/log/pull/621 -* adding line_number test + updating some testing infrastructure by @DIvkov575 in https://github.com/rust-lang/log/pull/619 -* Clarify the actual set of functions that can race in _racy variants by @KodrAus in https://github.com/rust-lang/log/pull/623 -* Replace deprecated std::sync::atomic::spin_loop_hint() by @Catamantaloedis in https://github.com/rust-lang/log/pull/625 -* Check usage of max_level features by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/627 -* Remove unneeded import by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/628 -* Loosen orderings for logger initialization in https://github.com/rust-lang/log/pull/632. Originally by @pwoolcoc in https://github.com/rust-lang/log/pull/599 -* Use Location::caller() for file and line info in https://github.com/rust-lang/log/pull/633. Originally by @Cassy343 in https://github.com/rust-lang/log/pull/520 - -### New Contributors -* @chrivers made their first contribution in https://github.com/rust-lang/log/pull/621 -* @DIvkov575 made their first contribution in https://github.com/rust-lang/log/pull/619 -* @Catamantaloedis made their first contribution in https://github.com/rust-lang/log/pull/625 - -**Full Changelog**: https://github.com/rust-lang/log/compare/0.4.21...0.4.22 - -## [0.4.21] - 2024-02-27 - -### What's Changed -* Minor clippy nits by @nyurik in https://github.com/rust-lang/log/pull/578 -* Simplify Display impl by @nyurik in https://github.com/rust-lang/log/pull/579 -* Set all crates to 2021 edition by @nyurik in https://github.com/rust-lang/log/pull/580 -* Various changes based on review by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/583 -* Fix typo in file_static() method doc by @dimo414 in https://github.com/rust-lang/log/pull/590 -* Specialize empty key value pairs by @EFanZh in https://github.com/rust-lang/log/pull/576 -* Fix incorrect lifetime in Value::to_str() by @peterjoel in https://github.com/rust-lang/log/pull/587 -* Remove some API of the key-value feature by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/585 -* Add logcontrol-log and log-reload by @swsnr in https://github.com/rust-lang/log/pull/595 -* Add Serialization section to kv::Value docs by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/593 -* Rename Value::to_str to to_cow_str by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/592 -* Clarify documentation and simplify initialization of `STATIC_MAX_LEVEL` by @ptosi in https://github.com/rust-lang/log/pull/594 -* Update docs to 2021 edition, test by @nyurik in https://github.com/rust-lang/log/pull/577 -* Add "alterable_logger" link to README.md by @brummer-simon in https://github.com/rust-lang/log/pull/589 -* Normalize line ending by @EFanZh in https://github.com/rust-lang/log/pull/602 -* Remove `ok_or` in favor of `Option::ok_or` by @AngelicosPhosphoros in https://github.com/rust-lang/log/pull/607 -* Use `Acquire` ordering for initialization check by @AngelicosPhosphoros in https://github.com/rust-lang/log/pull/610 -* Get structured logging API ready for stabilization by @KodrAus in https://github.com/rust-lang/log/pull/613 - -### New Contributors -* @nyurik made their first contribution in https://github.com/rust-lang/log/pull/578 -* @dimo414 made their first contribution in https://github.com/rust-lang/log/pull/590 -* @peterjoel made their first contribution in https://github.com/rust-lang/log/pull/587 -* @ptosi made their first contribution in https://github.com/rust-lang/log/pull/594 -* @brummer-simon made their first contribution in https://github.com/rust-lang/log/pull/589 -* @AngelicosPhosphoros made their first contribution in https://github.com/rust-lang/log/pull/607 - -## [0.4.20] - 2023-07-11 - -* Remove rustversion dev-dependency by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/568 -* Remove `local_inner_macros` usage by @EFanZh in https://github.com/rust-lang/log/pull/570 - -## [0.4.19] - 2023-06-10 - -* Use target_has_atomic instead of the old atomic_cas cfg by @GuillaumeGomez in https://github.com/rust-lang/log/pull/555 -* Put MSRV into Cargo.toml by @est31 in https://github.com/rust-lang/log/pull/557 - -## [0.4.18] - 2023-05-28 - -* fix Markdown links (again) by @hellow554 in https://github.com/rust-lang/log/pull/513 -* add cargo doc to workflow by @hellow554 in https://github.com/rust-lang/log/pull/515 -* Apply Clippy lints by @hellow554 in https://github.com/rust-lang/log/pull/516 -* Replace ad-hoc eq_ignore_ascii_case with slice::eq_ignore_ascii_case by @glandium in https://github.com/rust-lang/log/pull/519 -* fix up windows targets by @KodrAus in https://github.com/rust-lang/log/pull/528 -* typo fix by @jiangying000 in https://github.com/rust-lang/log/pull/529 -* Remove dependency on cfg_if by @EriKWDev in https://github.com/rust-lang/log/pull/536 -* GitHub Workflows security hardening by @sashashura in https://github.com/rust-lang/log/pull/538 -* Fix build status badge by @atouchet in https://github.com/rust-lang/log/pull/539 -* Add call_logger to the documentation by @a1ecbr0wn in https://github.com/rust-lang/log/pull/547 -* Use stable internals for key-value API by @KodrAus in https://github.com/rust-lang/log/pull/550 -* Change wording of list of implementations by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/553 -* Add std-logger to list of implementations by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/554 -* Add `set_max_level_racy` and gate `set_max_level` by @djkoloski in https://github.com/rust-lang/log/pull/544 -* [doc] src/lib.rs : prefix an unused variable with an underscore by @OccupyMars2025 in https://github.com/rust-lang/log/pull/561 -* [doc] src/macros.rs : correct grammar errors of an example in lib documentation by @OccupyMars2025 in https://github.com/rust-lang/log/pull/562 - -## [0.4.17] - 2022-04-29 - -* Update `kv_unstable` internal dependencies. - -## [0.4.16] - 2022-03-22 - -* Fix a conflict with unqualified `Option` use in macros. - -## [0.4.15] - 2022-02-23 - -* Silence a warning about the deprecated `spin_loop_hint`. -* Relax ordering in the atomic `set_max_level` call. -* Add thumbv4t-none-eabi to targets that don't support atomics -* Allow levels to be iterated over. -* Implement `Log` on some common wrapper types. -* Improvements to test coverage. -* Improvements to documentation. -* Add key-value support to the `log!` macros. -* Tighten `kv_unstable` internal dependencies, so they don't bump past their current alpha. -* Add a simple visit API to `kv_unstable`. -* Support `NonZero*` integers as values in structured logging -* Support static strings as keys in structured logging - -## [0.4.14] - 2021-01-27 - -* Remove the `__private_api_log_lit` special case. -* Fixed incorrect combination of `kv_unstable` and `std` features causing compile failures. -* Remove unstable `Value::to_*` conversions that were incorrectly using `as`. -* Rename unstable `Value::to_error` to `Value::to_borrowed_error`. - -## [0.4.13] - 2021-01-11 - -* This is the same as `0.4.11`, except with a `kv_unstable_std` feature added to aid migrating current dependents to `0.4.14` (which was originally going to be `0.4.13` until it was decided to create a patch from `0.4.11` to minimize disruption). - -## [0.4.12] - 2020-12-24 - -### New - -* Support platforms without atomics by racing instead of failing to compile -* Implement `Log` for `Box` -* Update `cfg-if` to `1.0` -* Internal reworks of the structured logging API. Removed the `Fill` API -and added `source::as_map` and `source::as_list` to easily serialize a `Source` -as either a map of `{key: value, ..}` or as a list of `[(key, value), ..]`. - -### Fixed - -* Fixed deserialization of `LevelFilter` to use their `u64` index variants - -## [0.4.11] - 2020-07-09 - -### New - -* Support coercing structured values into concrete types. -* Reference the `win_dbg_logger` in the readme. - -### Fixed - -* Updates a few deprecated items used internally. -* Fixed issues in docs and expands sections. -* Show the correct build badge in the readme. -* Fix up a possible inference breakage with structured value errors. -* Respect formatting flags in structured value formatting. - -## [0.4.10] - 2019-12-16 (yanked) - -### Fixed - -* Fixed the `log!` macros, so they work in expression context (this regressed in `0.4.9`, which has been yanked). - -## [0.4.9] - 2019-12-12 (yanked) - -### Minimum Supported Rust Version - -This release bumps the minimum compiler version to `1.31.0`. This was mainly needed for `cfg-if`, -but between `1.16.0` and `1.31.0` there are a lot of language and library improvements we now -take advantage of. - -### New - -* Unstable support for capturing key-value pairs in a record using the `log!` macros - -### Improved - -* Better documentation for max level filters. -* Internal updates to line up with bumped MSRV - -## [0.4.8] - 2019-07-28 - -### New - -* Support attempting to get `Record` fields as static strings. - -## [0.4.7] - 2019-07-06 - -### New - -* Support for embedded environments with thread-unsafe initialization. -* Initial unstable support for capturing structured data under the `kv_unstable` -feature gate. This new API doesn't affect existing users and may change in future -patches (so those changes may not appear in the changelog until it stabilizes). - -### Improved - -* Docs for using `log` with the 2018 edition. -* Error messages for macros missing arguments. - -## [0.4.6] - 2018-10-27 - -### Improved - -* Support 2018-style macro import for the `log_enabled!` macro. - -## [0.4.5] - 2018-09-03 - -### Improved - -* Make `log`'s internal helper macros less likely to conflict with user-defined - macros. - -## [0.4.4] - 2018-08-17 - -### Improved - -* Support 2018-style imports of the log macros. - -## [0.4.3] - 2018-06-29 - -### Improved - -* More code generation improvements. - -## [0.4.2] - 2018-06-05 - -### Improved - -* Log invocations now generate less code. - -### Fixed - -* Example Logger implementations now properly set the max log level. - -## [0.4.1] - 2017-12-30 - -### Fixed - -* Some doc links were fixed. - -## [0.4.0] - 2017-12-24 - -The changes in this release include cleanup of some obscure functionality and a more robust public -API designed to support bridges to other logging systems, and provide more flexibility to new -features in the future. - -### Compatibility - -Vast portions of the Rust ecosystem use the 0.3.x release series of log, and we don't want to force -the community to go through the pain of upgrading every crate to 0.4.x at the exact same time. Along -with 0.4.0, we've published a new 0.3.9 release which acts as a "shim" over 0.4.0. This will allow -crates using either version to coexist without losing messages from one side or the other. - -There is one caveat - a log message generated by a crate using 0.4.x but consumed by a logging -implementation using 0.3.x will not have a file name or module path. Applications affected by this -can upgrade their logging implementations to one using 0.4.x to avoid losing this information. The -other direction does not lose any information, fortunately! - -**TL;DR** Libraries should feel comfortable upgrading to 0.4.0 without treating that as a breaking -change. Applications may need to update their logging implementation (e.g. env-logger) to a newer -version using log 0.4.x to avoid losing module and file information. - -### New - -* The crate is now `no_std` by default. -* `Level` and `LevelFilter` now implement `Serialize` and `Deserialize` when the `serde` feature is - enabled. -* The `Record` and `Metadata` types can now be constructed by third-party code via a builder API. -* The `logger` free function returns a reference to the logger implementation. This, along with the - ability to construct `Record`s, makes it possible to bridge from another logging framework to - this one without digging into the private internals of the crate. The standard `error!` `warn!`, - etc., macros now exclusively use the public API of the crate rather than "secret" internal APIs. -* `Log::flush` has been added to allow crates to tell the logging implementation to ensure that all - "in flight" log events have been persisted. This can be used, for example, just before an - application exits to ensure that asynchronous log sinks finish their work. - -### Removed - -* The `shutdown` and `shutdown_raw` functions have been removed. Supporting shutdown significantly - complicated the implementation and imposed a performance cost on each logging operation. -* The `log_panics` function and its associated `nightly` Cargo feature have been removed. Use the - [log-panics](https://crates.io/crates/log-panics) instead. - -### Changed - -* The `Log` prefix has been removed from type names. For example, `LogLevelFilter` is now - `LevelFilter`, and `LogRecord` is now `Record`. -* The `MaxLogLevelFilter` object has been removed in favor of a `set_max_level` free function. -* The `set_logger` free functions have been restructured. The logger is now directly passed to the - functions rather than a closure which returns the logger. `set_logger` now takes a `&'static - Log` and is usable in `no_std` contexts in place of the old `set_logger_raw`. `set_boxed_logger` - is a convenience function which takes a `Box` but otherwise acts like `set_logger`. It - requires the `std` feature. -* The `file` and `module_path` values in `Record` no longer have the `'static` lifetime to support - integration with other logging frameworks that don't provide a `'static` lifetime for the - equivalent values. -* The `file`, `line`, and `module_path` values in `Record` are now `Option`s to support integration - with other logging frameworks that don't provide those values. - -### In the Future - -* We're looking to add support for *structured* logging - the inclusion of extra key-value pairs of - information in a log event in addition to the normal string message. This should be able to be - added in a backwards compatible manner to the 0.4.x series when the design is worked out. - -## Older - -Look at the [release tags] for information about older releases. - -[Unreleased]: https://github.com/rust-lang-nursery/log/compare/0.4.28...HEAD -[0.4.28]: https://github.com/rust-lang/log/compare/0.4.27...0.4.28 -[0.4.27]: https://github.com/rust-lang/log/compare/0.4.26...0.4.27 -[0.4.26]: https://github.com/rust-lang/log/compare/0.4.25...0.4.26 -[0.4.25]: https://github.com/rust-lang/log/compare/0.4.24...0.4.25 -[0.4.24]: https://github.com/rust-lang/log/compare/0.4.23...0.4.24 -[0.4.23]: https://github.com/rust-lang/log/compare/0.4.22...0.4.23 -[0.4.22]: https://github.com/rust-lang/log/compare/0.4.21...0.4.22 -[0.4.21]: https://github.com/rust-lang/log/compare/0.4.20...0.4.21 -[0.4.20]: https://github.com/rust-lang-nursery/log/compare/0.4.19...0.4.20 -[0.4.19]: https://github.com/rust-lang-nursery/log/compare/0.4.18...0.4.19 -[0.4.18]: https://github.com/rust-lang-nursery/log/compare/0.4.17...0.4.18 -[0.4.17]: https://github.com/rust-lang-nursery/log/compare/0.4.16...0.4.17 -[0.4.16]: https://github.com/rust-lang-nursery/log/compare/0.4.15...0.4.16 -[0.4.15]: https://github.com/rust-lang-nursery/log/compare/0.4.13...0.4.15 -[0.4.14]: https://github.com/rust-lang-nursery/log/compare/0.4.13...0.4.14 -[0.4.13]: https://github.com/rust-lang-nursery/log/compare/0.4.11...0.4.13 -[0.4.12]: https://github.com/rust-lang-nursery/log/compare/0.4.11...0.4.12 -[0.4.11]: https://github.com/rust-lang-nursery/log/compare/0.4.10...0.4.11 -[0.4.10]: https://github.com/rust-lang-nursery/log/compare/0.4.9...0.4.10 -[0.4.9]: https://github.com/rust-lang-nursery/log/compare/0.4.8...0.4.9 -[0.4.8]: https://github.com/rust-lang-nursery/log/compare/0.4.7...0.4.8 -[0.4.7]: https://github.com/rust-lang-nursery/log/compare/0.4.6...0.4.7 -[0.4.6]: https://github.com/rust-lang-nursery/log/compare/0.4.5...0.4.6 -[0.4.5]: https://github.com/rust-lang-nursery/log/compare/0.4.4...0.4.5 -[0.4.4]: https://github.com/rust-lang-nursery/log/compare/0.4.3...0.4.4 -[0.4.3]: https://github.com/rust-lang-nursery/log/compare/0.4.2...0.4.3 -[0.4.2]: https://github.com/rust-lang-nursery/log/compare/0.4.1...0.4.2 -[0.4.1]: https://github.com/rust-lang-nursery/log/compare/0.4.0...0.4.1 -[0.4.0]: https://github.com/rust-lang-nursery/log/compare/0.3.8...0.4.0 -[release tags]: https://github.com/rust-lang-nursery/log/releases diff --git a/vendor/log/Cargo.lock b/vendor/log/Cargo.lock deleted file mode 100644 index 349e97e7e8e342..00000000000000 --- a/vendor/log/Cargo.lock +++ /dev/null @@ -1,270 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "erased-serde" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e004d887f51fcb9fef17317a2f3525c887d8aa3f4f50fed920816a688284a5b7" -dependencies = [ - "serde", - "typeid", -] - -[[package]] -name = "itoa" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" - -[[package]] -name = "log" -version = "0.4.28" -dependencies = [ - "proc-macro2", - "serde", - "serde_json", - "serde_test", - "sval", - "sval_derive", - "sval_ref", - "value-bag", -] - -[[package]] -name = "memchr" -version = "2.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" - -[[package]] -name = "proc-macro2" -version = "1.0.101" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "ryu" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" - -[[package]] -name = "serde" -version = "1.0.219" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.219" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_fmt" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d4ddca14104cd60529e8c7f7ba71a2c8acd8f7f5cfcdc2faf97eeb7c3010a4" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_json" -version = "1.0.143" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a" -dependencies = [ - "itoa", - "memchr", - "ryu", - "serde", -] - -[[package]] -name = "serde_test" -version = "1.0.177" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f901ee573cab6b3060453d2d5f0bae4e6d628c23c0a962ff9b5f1d7c8d4f1ed" -dependencies = [ - "serde", -] - -[[package]] -name = "sval" -version = "2.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc9739f56c5d0c44a5ed45473ec868af02eb896af8c05f616673a31e1d1bb09" - -[[package]] -name = "sval_buffer" -version = "2.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f39b07436a8c271b34dad5070c634d1d3d76d6776e938ee97b4a66a5e8003d0b" -dependencies = [ - "sval", - "sval_ref", -] - -[[package]] -name = "sval_derive" -version = "2.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcb59acf1048b0d0472a2393fc4bb3082217103245f51470313298ec7b7fbe6" -dependencies = [ - "sval_derive_macros", -] - -[[package]] -name = "sval_derive_macros" -version = "2.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b0dcdc2dad24659b85a75c0fe56a62e6d7d7ff8168195dc8117e6d98e528fc9" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sval_dynamic" -version = "2.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffcb072d857431bf885580dacecf05ed987bac931230736739a79051dbf3499b" -dependencies = [ - "sval", -] - -[[package]] -name = "sval_fmt" -version = "2.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f214f427ad94a553e5ca5514c95c6be84667cbc5568cce957f03f3477d03d5c" -dependencies = [ - "itoa", - "ryu", - "sval", -] - -[[package]] -name = "sval_json" -version = "2.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "389ed34b32e638dec9a99c8ac92d0aa1220d40041026b625474c2b6a4d6f4feb" -dependencies = [ - "itoa", - "ryu", - "sval", -] - -[[package]] -name = "sval_nested" -version = "2.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14bae8fcb2f24fee2c42c1f19037707f7c9a29a0cda936d2188d48a961c4bb2a" -dependencies = [ - "sval", - "sval_buffer", - "sval_ref", -] - -[[package]] -name = "sval_ref" -version = "2.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a4eaea3821d3046dcba81d4b8489421da42961889902342691fb7eab491d79e" -dependencies = [ - "sval", -] - -[[package]] -name = "sval_serde" -version = "2.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "172dd4aa8cb3b45c8ac8f3b4111d644cd26938b0643ede8f93070812b87fb339" -dependencies = [ - "serde", - "sval", - "sval_nested", -] - -[[package]] -name = "syn" -version = "2.0.106" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "typeid" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc7d623258602320d5c55d1bc22793b57daff0ec7efc270ea7d55ce1d5f5471c" - -[[package]] -name = "unicode-ident" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" - -[[package]] -name = "value-bag" -version = "1.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "943ce29a8a743eb10d6082545d861b24f9d1b160b7d741e0f2cdf726bec909c5" -dependencies = [ - "value-bag-serde1", - "value-bag-sval2", -] - -[[package]] -name = "value-bag-serde1" -version = "1.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35540706617d373b118d550d41f5dfe0b78a0c195dc13c6815e92e2638432306" -dependencies = [ - "erased-serde", - "serde", - "serde_fmt", -] - -[[package]] -name = "value-bag-sval2" -version = "1.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fe7e140a2658cc16f7ee7a86e413e803fc8f9b5127adc8755c19f9fefa63a52" -dependencies = [ - "sval", - "sval_buffer", - "sval_dynamic", - "sval_fmt", - "sval_json", - "sval_ref", - "sval_serde", -] diff --git a/vendor/log/Cargo.toml b/vendor/log/Cargo.toml deleted file mode 100644 index cd0abc6ab8cb2c..00000000000000 --- a/vendor/log/Cargo.toml +++ /dev/null @@ -1,151 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.61.0" -name = "log" -version = "0.4.28" -authors = ["The Rust Project Developers"] -build = false -exclude = ["rfcs/**/*"] -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = """ -A lightweight logging facade for Rust -""" -documentation = "https://docs.rs/log" -readme = "README.md" -keywords = ["logging"] -categories = ["development-tools::debugging"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/log" - -[package.metadata.docs.rs] -features = [ - "std", - "serde", - "kv_std", - "kv_sval", - "kv_serde", -] - -[features] -kv = [] -kv_serde = [ - "kv_std", - "value-bag/serde", - "serde", -] -kv_std = [ - "std", - "kv", - "value-bag/error", -] -kv_sval = [ - "kv", - "value-bag/sval", - "sval", - "sval_ref", -] -kv_unstable = [ - "kv", - "value-bag", -] -kv_unstable_serde = [ - "kv_serde", - "kv_unstable_std", -] -kv_unstable_std = [ - "kv_std", - "kv_unstable", -] -kv_unstable_sval = [ - "kv_sval", - "kv_unstable", -] -max_level_debug = [] -max_level_error = [] -max_level_info = [] -max_level_off = [] -max_level_trace = [] -max_level_warn = [] -release_max_level_debug = [] -release_max_level_error = [] -release_max_level_info = [] -release_max_level_off = [] -release_max_level_trace = [] -release_max_level_warn = [] -std = [] - -[lib] -name = "log" -path = "src/lib.rs" - -[[test]] -name = "integration" -path = "tests/integration.rs" - -[[test]] -name = "macros" -path = "tests/macros.rs" - -[[bench]] -name = "value" -path = "benches/value.rs" - -[dependencies.serde] -version = "1.0" -optional = true -default-features = false - -[dependencies.sval] -version = "2.14.1" -optional = true -default-features = false - -[dependencies.sval_ref] -version = "2.1" -optional = true -default-features = false - -[dependencies.value-bag] -version = "1.7" -features = ["inline-i128"] -optional = true -default-features = false - -[dev-dependencies.proc-macro2] -version = "1.0.63" -default-features = false - -[dev-dependencies.serde] -version = "1.0" -features = ["derive"] - -[dev-dependencies.serde_json] -version = "1.0" - -[dev-dependencies.serde_test] -version = "1.0" - -[dev-dependencies.sval] -version = "2.1" - -[dev-dependencies.sval_derive] -version = "2.1" - -[dev-dependencies.value-bag] -version = "1.7" -features = ["test"] diff --git a/vendor/log/LICENSE-APACHE b/vendor/log/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e802f..00000000000000 --- a/vendor/log/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/log/LICENSE-MIT b/vendor/log/LICENSE-MIT deleted file mode 100644 index 39d4bdb5acd313..00000000000000 --- a/vendor/log/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/log/README.md b/vendor/log/README.md deleted file mode 100644 index 9d5113d2402386..00000000000000 --- a/vendor/log/README.md +++ /dev/null @@ -1,134 +0,0 @@ -log -=== - -A Rust library providing a lightweight logging *facade*. - -[![Build status](https://img.shields.io/github/actions/workflow/status/rust-lang/log/main.yml?branch=master)](https://github.com/rust-lang/log/actions) -[![Latest version](https://img.shields.io/crates/v/log.svg)](https://crates.io/crates/log) -[![Documentation](https://docs.rs/log/badge.svg)](https://docs.rs/log) -![License](https://img.shields.io/crates/l/log.svg) - -* [`log` documentation](https://docs.rs/log) - -A logging facade provides a single logging API that abstracts over the actual -logging implementation. Libraries can use the logging API provided by this -crate, and the consumer of those libraries can choose the logging -implementation that is most suitable for its use case. - - -## Minimum supported `rustc` - -`1.61.0+` - -This version is explicitly tested in CI and may be bumped in any release as needed. Maintaining compatibility with older compilers is a priority though, so the bar for bumping the minimum supported version is set very high. Any changes to the supported minimum version will be called out in the release notes. - -## Usage - -### In libraries - -Libraries should link only to the `log` crate, and use the provided macros to -log whatever information will be useful to downstream consumers: - -```toml -[dependencies] -log = "0.4" -``` - -```rust -use log::{info, trace, warn}; - -pub fn shave_the_yak(yak: &mut Yak) { - trace!("Commencing yak shaving"); - - loop { - match find_a_razor() { - Ok(razor) => { - info!("Razor located: {razor}"); - yak.shave(razor); - break; - } - Err(err) => { - warn!("Unable to locate a razor: {err}, retrying"); - } - } - } -} -``` - -### In executables - -In order to produce log output, executables have to use a logger implementation compatible with the facade. -There are many available implementations to choose from, here are some options: - -* Simple minimal loggers: - * [`env_logger`](https://docs.rs/env_logger/*/env_logger/) - * [`colog`](https://docs.rs/colog/*/colog/) - * [`simple_logger`](https://docs.rs/simple_logger/*/simple_logger/) - * [`simplelog`](https://docs.rs/simplelog/*/simplelog/) - * [`pretty_env_logger`](https://docs.rs/pretty_env_logger/*/pretty_env_logger/) - * [`stderrlog`](https://docs.rs/stderrlog/*/stderrlog/) - * [`flexi_logger`](https://docs.rs/flexi_logger/*/flexi_logger/) - * [`call_logger`](https://docs.rs/call_logger/*/call_logger/) - * [`std-logger`](https://docs.rs/std-logger/*/std_logger/) - * [`structured-logger`](https://docs.rs/structured-logger/latest/structured_logger/) - * [`clang_log`](https://docs.rs/clang_log/latest/clang_log) - * [`ftail`](https://docs.rs/ftail/latest/ftail/) -* Complex configurable frameworks: - * [`log4rs`](https://docs.rs/log4rs/*/log4rs/) - * [`logforth`](https://docs.rs/logforth/*/logforth/) - * [`fern`](https://docs.rs/fern/*/fern/) - * [`spdlog-rs`](https://docs.rs/spdlog-rs/*/spdlog/) -* Adaptors for other facilities: - * [`syslog`](https://docs.rs/syslog/*/syslog/) - * [`systemd-journal-logger`](https://docs.rs/systemd-journal-logger/*/systemd_journal_logger/) - * [`slog-stdlog`](https://docs.rs/slog-stdlog/*/slog_stdlog/) - * [`android_log`](https://docs.rs/android_log/*/android_log/) - * [`win_dbg_logger`](https://docs.rs/win_dbg_logger/*/win_dbg_logger/) - * [`db_logger`](https://docs.rs/db_logger/*/db_logger/) - * [`log-to-defmt`](https://docs.rs/log-to-defmt/*/log_to_defmt/) - * [`logcontrol-log`](https://docs.rs/logcontrol-log/*/logcontrol_log/) -* For WebAssembly binaries: - * [`console_log`](https://docs.rs/console_log/*/console_log/) -* For dynamic libraries: - * You may need to construct [an FFI-safe wrapper over `log`](https://github.com/rust-lang/log/issues/421) to initialize in your libraries. -* Utilities: - * [`log_err`](https://docs.rs/log_err/*/log_err/) - * [`log-reload`](https://docs.rs/log-reload/*/log_reload/) - * [`alterable_logger`](https://docs.rs/alterable_logger/*/alterable_logger) - -Executables should choose a logger implementation and initialize it early in the -runtime of the program. Logger implementations will typically include a -function to do this. Any log messages generated before the logger is -initialized will be ignored. - -The executable itself may use the `log` crate to log as well. - -## Structured logging - -If you enable the `kv` feature, you can associate structured data with your log records: - -```rust -use log::{info, trace, warn}; - -pub fn shave_the_yak(yak: &mut Yak) { - // `yak:serde` will capture `yak` using its `serde::Serialize` impl - // - // You could also use `:?` for `Debug`, or `:%` for `Display`. For a - // full list, see the `log` crate documentation - trace!(target = "yak_events", yak:serde; "Commencing yak shaving"); - - loop { - match find_a_razor() { - Ok(razor) => { - info!(razor; "Razor located"); - yak.shave(razor); - break; - } - Err(e) => { - // `e:err` will capture `e` using its `std::error::Error` impl - warn!(e:err; "Unable to locate a razor, retrying"); - } - } - } -} -``` diff --git a/vendor/log/benches/value.rs b/vendor/log/benches/value.rs deleted file mode 100644 index 3d0f18bfe43e06..00000000000000 --- a/vendor/log/benches/value.rs +++ /dev/null @@ -1,27 +0,0 @@ -#![cfg(feature = "kv")] -#![feature(test)] - -use log::kv::Value; - -#[bench] -fn u8_to_value(b: &mut test::Bencher) { - b.iter(|| Value::from(1u8)); -} - -#[bench] -fn u8_to_value_debug(b: &mut test::Bencher) { - b.iter(|| Value::from_debug(&1u8)); -} - -#[bench] -fn str_to_value_debug(b: &mut test::Bencher) { - b.iter(|| Value::from_debug(&"a string")); -} - -#[bench] -fn custom_to_value_debug(b: &mut test::Bencher) { - #[derive(Debug)] - struct A; - - b.iter(|| Value::from_debug(&A)); -} diff --git a/vendor/log/src/__private_api.rs b/vendor/log/src/__private_api.rs deleted file mode 100644 index 58d4c0fab621dd..00000000000000 --- a/vendor/log/src/__private_api.rs +++ /dev/null @@ -1,151 +0,0 @@ -//! WARNING: this is not part of the crate's public API and is subject to change at any time - -use self::sealed::KVs; -use crate::{logger, Level, Log, Metadata, Record}; -use std::fmt::Arguments; -use std::panic::Location; -pub use std::{format_args, module_path, stringify}; - -#[cfg(not(feature = "kv"))] -pub type Value<'a> = &'a str; - -mod sealed { - /// Types for the `kv` argument. - pub trait KVs<'a> { - fn into_kvs(self) -> Option<&'a [(&'a str, super::Value<'a>)]>; - } -} - -// Types for the `kv` argument. - -impl<'a> KVs<'a> for &'a [(&'a str, Value<'a>)] { - #[inline] - fn into_kvs(self) -> Option<&'a [(&'a str, Value<'a>)]> { - Some(self) - } -} - -impl<'a> KVs<'a> for () { - #[inline] - fn into_kvs(self) -> Option<&'a [(&'a str, Value<'a>)]> { - None - } -} - -// Log implementation. - -/// The global logger proxy. -#[derive(Debug)] -pub struct GlobalLogger; - -impl Log for GlobalLogger { - fn enabled(&self, metadata: &Metadata) -> bool { - logger().enabled(metadata) - } - - fn log(&self, record: &Record) { - logger().log(record) - } - - fn flush(&self) { - logger().flush() - } -} - -// Split from `log` to reduce generics and code size -fn log_impl( - logger: L, - args: Arguments, - level: Level, - &(target, module_path, loc): &(&str, &'static str, &'static Location), - kvs: Option<&[(&str, Value)]>, -) { - #[cfg(not(feature = "kv"))] - if kvs.is_some() { - panic!("key-value support is experimental and must be enabled using the `kv` feature") - } - - let mut builder = Record::builder(); - - builder - .args(args) - .level(level) - .target(target) - .module_path_static(Some(module_path)) - .file_static(Some(loc.file())) - .line(Some(loc.line())); - - #[cfg(feature = "kv")] - builder.key_values(&kvs); - - logger.log(&builder.build()); -} - -pub fn log<'a, K, L>( - logger: L, - args: Arguments, - level: Level, - target_module_path_and_loc: &(&str, &'static str, &'static Location), - kvs: K, -) where - K: KVs<'a>, - L: Log, -{ - log_impl( - logger, - args, - level, - target_module_path_and_loc, - kvs.into_kvs(), - ) -} - -pub fn enabled(logger: L, level: Level, target: &str) -> bool { - logger.enabled(&Metadata::builder().level(level).target(target).build()) -} - -#[track_caller] -pub fn loc() -> &'static Location<'static> { - Location::caller() -} - -#[cfg(feature = "kv")] -mod kv_support { - use crate::kv; - - pub type Value<'a> = kv::Value<'a>; - - // NOTE: Many functions here accept a double reference &&V - // This is so V itself can be ?Sized, while still letting us - // erase it to some dyn Trait (because &T is sized) - - pub fn capture_to_value<'a, V: kv::ToValue + ?Sized>(v: &'a &'a V) -> Value<'a> { - v.to_value() - } - - pub fn capture_debug<'a, V: core::fmt::Debug + ?Sized>(v: &'a &'a V) -> Value<'a> { - Value::from_debug(v) - } - - pub fn capture_display<'a, V: core::fmt::Display + ?Sized>(v: &'a &'a V) -> Value<'a> { - Value::from_display(v) - } - - #[cfg(feature = "kv_std")] - pub fn capture_error<'a>(v: &'a (dyn std::error::Error + 'static)) -> Value<'a> { - Value::from_dyn_error(v) - } - - #[cfg(feature = "kv_sval")] - pub fn capture_sval<'a, V: sval::Value + ?Sized>(v: &'a &'a V) -> Value<'a> { - Value::from_sval(v) - } - - #[cfg(feature = "kv_serde")] - pub fn capture_serde<'a, V: serde::Serialize + ?Sized>(v: &'a &'a V) -> Value<'a> { - Value::from_serde(v) - } -} - -#[cfg(feature = "kv")] -pub use self::kv_support::*; diff --git a/vendor/log/src/kv/error.rs b/vendor/log/src/kv/error.rs deleted file mode 100644 index 7efa5af3612605..00000000000000 --- a/vendor/log/src/kv/error.rs +++ /dev/null @@ -1,94 +0,0 @@ -use std::fmt; - -/// An error encountered while working with structured data. -#[derive(Debug)] -pub struct Error { - inner: Inner, -} - -#[derive(Debug)] -enum Inner { - #[cfg(feature = "std")] - Boxed(std_support::BoxedError), - Msg(&'static str), - #[cfg(feature = "value-bag")] - Value(crate::kv::value::inner::Error), - Fmt, -} - -impl Error { - /// Create an error from a message. - pub fn msg(msg: &'static str) -> Self { - Error { - inner: Inner::Msg(msg), - } - } - - // Not public so we don't leak the `crate::kv::value::inner` API - #[cfg(feature = "value-bag")] - pub(super) fn from_value(err: crate::kv::value::inner::Error) -> Self { - Error { - inner: Inner::Value(err), - } - } - - // Not public so we don't leak the `crate::kv::value::inner` API - #[cfg(feature = "value-bag")] - pub(super) fn into_value(self) -> crate::kv::value::inner::Error { - match self.inner { - Inner::Value(err) => err, - #[cfg(feature = "kv_std")] - _ => crate::kv::value::inner::Error::boxed(self), - #[cfg(not(feature = "kv_std"))] - _ => crate::kv::value::inner::Error::msg("error inspecting a value"), - } - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::Inner::*; - match &self.inner { - #[cfg(feature = "std")] - Boxed(err) => err.fmt(f), - #[cfg(feature = "value-bag")] - Value(err) => err.fmt(f), - Msg(msg) => msg.fmt(f), - Fmt => fmt::Error.fmt(f), - } - } -} - -impl From for Error { - fn from(_: fmt::Error) -> Self { - Error { inner: Inner::Fmt } - } -} - -#[cfg(feature = "std")] -mod std_support { - use super::*; - use std::{error, io}; - - pub(super) type BoxedError = Box; - - impl Error { - /// Create an error from a standard error type. - pub fn boxed(err: E) -> Self - where - E: Into, - { - Error { - inner: Inner::Boxed(err.into()), - } - } - } - - impl error::Error for Error {} - - impl From for Error { - fn from(err: io::Error) -> Self { - Error::boxed(err) - } - } -} diff --git a/vendor/log/src/kv/key.rs b/vendor/log/src/kv/key.rs deleted file mode 100644 index 6e00a2ca86a57f..00000000000000 --- a/vendor/log/src/kv/key.rs +++ /dev/null @@ -1,163 +0,0 @@ -//! Structured keys. - -use std::borrow::Borrow; -use std::fmt; - -/// A type that can be converted into a [`Key`](struct.Key.html). -pub trait ToKey { - /// Perform the conversion. - fn to_key(&self) -> Key; -} - -impl<'a, T> ToKey for &'a T -where - T: ToKey + ?Sized, -{ - fn to_key(&self) -> Key { - (**self).to_key() - } -} - -impl<'k> ToKey for Key<'k> { - fn to_key(&self) -> Key { - Key { key: self.key } - } -} - -impl ToKey for str { - fn to_key(&self) -> Key { - Key::from_str(self) - } -} - -/// A key in a key-value. -// These impls must only be based on the as_str() representation of the key -// If a new field (such as an optional index) is added to the key they must not affect comparison -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct Key<'k> { - // NOTE: This may become `Cow<'k, str>` - key: &'k str, -} - -impl<'k> Key<'k> { - /// Get a key from a borrowed string. - pub fn from_str(key: &'k str) -> Self { - Key { key } - } - - /// Get a borrowed string from this key. - /// - /// The lifetime of the returned string is bound to the borrow of `self` rather - /// than to `'k`. - pub fn as_str(&self) -> &str { - self.key - } - - /// Try get a borrowed string for the lifetime `'k` from this key. - /// - /// If the key is a borrow of a longer lived string, this method will return `Some`. - /// If the key is internally buffered, this method will return `None`. - pub fn to_borrowed_str(&self) -> Option<&'k str> { - // NOTE: If the internals of `Key` support buffering this - // won't be unconditionally `Some` anymore. We want to keep - // this option open - Some(self.key) - } -} - -impl<'k> fmt::Display for Key<'k> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.key.fmt(f) - } -} - -impl<'k> AsRef for Key<'k> { - fn as_ref(&self) -> &str { - self.as_str() - } -} - -impl<'k> Borrow for Key<'k> { - fn borrow(&self) -> &str { - self.as_str() - } -} - -impl<'k> From<&'k str> for Key<'k> { - fn from(s: &'k str) -> Self { - Key::from_str(s) - } -} - -#[cfg(feature = "std")] -mod std_support { - use super::*; - - use std::borrow::Cow; - - impl ToKey for String { - fn to_key(&self) -> Key { - Key::from_str(self) - } - } - - impl<'a> ToKey for Cow<'a, str> { - fn to_key(&self) -> Key { - Key::from_str(self) - } - } -} - -#[cfg(feature = "kv_sval")] -mod sval_support { - use super::*; - - use sval::Value; - use sval_ref::ValueRef; - - impl<'a> Value for Key<'a> { - fn stream<'sval, S: sval::Stream<'sval> + ?Sized>( - &'sval self, - stream: &mut S, - ) -> sval::Result { - self.key.stream(stream) - } - } - - impl<'a> ValueRef<'a> for Key<'a> { - fn stream_ref + ?Sized>(&self, stream: &mut S) -> sval::Result { - self.key.stream(stream) - } - } -} - -#[cfg(feature = "kv_serde")] -mod serde_support { - use super::*; - - use serde::{Serialize, Serializer}; - - impl<'a> Serialize for Key<'a> { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.key.serialize(serializer) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn key_from_string() { - assert_eq!("a key", Key::from_str("a key").as_str()); - } - - #[test] - fn key_to_borrowed() { - assert_eq!("a key", Key::from_str("a key").to_borrowed_str().unwrap()); - } -} diff --git a/vendor/log/src/kv/mod.rs b/vendor/log/src/kv/mod.rs deleted file mode 100644 index 34e61c3ae5d59e..00000000000000 --- a/vendor/log/src/kv/mod.rs +++ /dev/null @@ -1,265 +0,0 @@ -//! Structured logging. -//! -//! Add the `kv` feature to your `Cargo.toml` to enable -//! this module: -//! -//! ```toml -//! [dependencies.log] -//! features = ["kv"] -//! ``` -//! -//! # Structured logging in `log` -//! -//! Structured logging enhances traditional text-based log records with user-defined -//! attributes. Structured logs can be analyzed using a variety of data processing -//! techniques, without needing to find and parse attributes from unstructured text first. -//! -//! In `log`, user-defined attributes are part of a [`Source`] on the log record. -//! Each attribute is a key-value; a pair of [`Key`] and [`Value`]. Keys are strings -//! and values are a datum of any type that can be formatted or serialized. Simple types -//! like strings, booleans, and numbers are supported, as well as arbitrarily complex -//! structures involving nested objects and sequences. -//! -//! ## Adding key-values to log records -//! -//! Key-values appear before the message format in the `log!` macros: -//! -//! ``` -//! # use log::info; -//! info!(a = 1; "Something of interest"); -//! ``` -//! -//! Key-values support the same shorthand identifier syntax as `format_args`: -//! -//! ``` -//! # use log::info; -//! let a = 1; -//! -//! info!(a; "Something of interest"); -//! ``` -//! -//! Values are capturing using the [`ToValue`] trait by default. To capture a value -//! using a different trait implementation, use a modifier after its key. Here's how -//! the same example can capture `a` using its `Debug` implementation instead: -//! -//! ``` -//! # use log::info; -//! info!(a:? = 1; "Something of interest"); -//! ``` -//! -//! The following capturing modifiers are supported: -//! -//! - `:?` will capture the value using `Debug`. -//! - `:debug` will capture the value using `Debug`. -//! - `:%` will capture the value using `Display`. -//! - `:display` will capture the value using `Display`. -//! - `:err` will capture the value using `std::error::Error` (requires the `kv_std` feature). -//! - `:sval` will capture the value using `sval::Value` (requires the `kv_sval` feature). -//! - `:serde` will capture the value using `serde::Serialize` (requires the `kv_serde` feature). -//! -//! ## Working with key-values on log records -//! -//! Use the [`Record::key_values`](../struct.Record.html#method.key_values) method to access key-values. -//! -//! Individual values can be pulled from the source by their key: -//! -//! ``` -//! # fn main() -> Result<(), log::kv::Error> { -//! use log::kv::{Source, Key, Value}; -//! # let record = log::Record::builder().key_values(&[("a", 1)]).build(); -//! -//! // info!(a = 1; "Something of interest"); -//! -//! let a: Value = record.key_values().get(Key::from("a")).unwrap(); -//! assert_eq!(1, a.to_i64().unwrap()); -//! # Ok(()) -//! # } -//! ``` -//! -//! All key-values can also be enumerated using a [`VisitSource`]: -//! -//! ``` -//! # fn main() -> Result<(), log::kv::Error> { -//! use std::collections::BTreeMap; -//! -//! use log::kv::{self, Source, Key, Value, VisitSource}; -//! -//! struct Collect<'kvs>(BTreeMap, Value<'kvs>>); -//! -//! impl<'kvs> VisitSource<'kvs> for Collect<'kvs> { -//! fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), kv::Error> { -//! self.0.insert(key, value); -//! -//! Ok(()) -//! } -//! } -//! -//! let mut visitor = Collect(BTreeMap::new()); -//! -//! # let record = log::Record::builder().key_values(&[("a", 1), ("b", 2), ("c", 3)]).build(); -//! // info!(a = 1, b = 2, c = 3; "Something of interest"); -//! -//! record.key_values().visit(&mut visitor)?; -//! -//! let collected = visitor.0; -//! -//! assert_eq!( -//! vec!["a", "b", "c"], -//! collected -//! .keys() -//! .map(|k| k.as_str()) -//! .collect::>(), -//! ); -//! # Ok(()) -//! # } -//! ``` -//! -//! [`Value`]s have methods for conversions to common types: -//! -//! ``` -//! # fn main() -> Result<(), log::kv::Error> { -//! use log::kv::{Source, Key}; -//! # let record = log::Record::builder().key_values(&[("a", 1)]).build(); -//! -//! // info!(a = 1; "Something of interest"); -//! -//! let a = record.key_values().get(Key::from("a")).unwrap(); -//! -//! assert_eq!(1, a.to_i64().unwrap()); -//! # Ok(()) -//! # } -//! ``` -//! -//! Values also have their own [`VisitValue`] type. Value visitors are a lightweight -//! API for working with primitives types: -//! -//! ``` -//! # fn main() -> Result<(), log::kv::Error> { -//! use log::kv::{self, Source, Key, VisitValue}; -//! # let record = log::Record::builder().key_values(&[("a", 1)]).build(); -//! -//! struct IsNumeric(bool); -//! -//! impl<'kvs> VisitValue<'kvs> for IsNumeric { -//! fn visit_any(&mut self, _value: kv::Value) -> Result<(), kv::Error> { -//! self.0 = false; -//! Ok(()) -//! } -//! -//! fn visit_u64(&mut self, _value: u64) -> Result<(), kv::Error> { -//! self.0 = true; -//! Ok(()) -//! } -//! -//! fn visit_i64(&mut self, _value: i64) -> Result<(), kv::Error> { -//! self.0 = true; -//! Ok(()) -//! } -//! -//! fn visit_u128(&mut self, _value: u128) -> Result<(), kv::Error> { -//! self.0 = true; -//! Ok(()) -//! } -//! -//! fn visit_i128(&mut self, _value: i128) -> Result<(), kv::Error> { -//! self.0 = true; -//! Ok(()) -//! } -//! -//! fn visit_f64(&mut self, _value: f64) -> Result<(), kv::Error> { -//! self.0 = true; -//! Ok(()) -//! } -//! } -//! -//! // info!(a = 1; "Something of interest"); -//! -//! let a = record.key_values().get(Key::from("a")).unwrap(); -//! -//! let mut visitor = IsNumeric(false); -//! -//! a.visit(&mut visitor)?; -//! -//! let is_numeric = visitor.0; -//! -//! assert!(is_numeric); -//! # Ok(()) -//! # } -//! ``` -//! -//! To serialize a value to a format like JSON, you can also use either `serde` or `sval`: -//! -//! ``` -//! # fn main() -> Result<(), Box> { -//! # #[cfg(feature = "serde")] -//! # { -//! # use log::kv::Key; -//! #[derive(serde::Serialize)] -//! struct Data { -//! a: i32, b: bool, -//! c: &'static str, -//! } -//! -//! let data = Data { a: 1, b: true, c: "Some data" }; -//! -//! # let source = [("a", log::kv::Value::from_serde(&data))]; -//! # let record = log::Record::builder().key_values(&source).build(); -//! // info!(a = data; "Something of interest"); -//! -//! let a = record.key_values().get(Key::from("a")).unwrap(); -//! -//! assert_eq!("{\"a\":1,\"b\":true,\"c\":\"Some data\"}", serde_json::to_string(&a)?); -//! # } -//! # Ok(()) -//! # } -//! ``` -//! -//! The choice of serialization framework depends on the needs of the consumer. -//! If you're in a no-std environment, you can use `sval`. In other cases, you can use `serde`. -//! Log producers and log consumers don't need to agree on the serialization framework. -//! A value can be captured using its `serde::Serialize` implementation and still be serialized -//! through `sval` without losing any structure or data. -//! -//! Values can also always be formatted using the standard `Debug` and `Display` -//! traits: -//! -//! ``` -//! # use log::kv::Key; -//! #[derive(Debug)] -//! struct Data { -//! a: i32, -//! b: bool, -//! c: &'static str, -//! } -//! -//! let data = Data { a: 1, b: true, c: "Some data" }; -//! -//! # let source = [("a", log::kv::Value::from_debug(&data))]; -//! # let record = log::Record::builder().key_values(&source).build(); -//! // info!(a = data; "Something of interest"); -//! -//! let a = record.key_values().get(Key::from("a")).unwrap(); -//! -//! assert_eq!("Data { a: 1, b: true, c: \"Some data\" }", format!("{a:?}")); -//! ``` - -mod error; -mod key; - -#[cfg(not(feature = "kv_unstable"))] -mod source; -#[cfg(not(feature = "kv_unstable"))] -mod value; - -pub use self::error::Error; -pub use self::key::{Key, ToKey}; -pub use self::source::{Source, VisitSource}; -pub use self::value::{ToValue, Value, VisitValue}; - -#[cfg(feature = "kv_unstable")] -pub mod source; -#[cfg(feature = "kv_unstable")] -pub mod value; - -#[cfg(feature = "kv_unstable")] -pub use self::source::Visitor; diff --git a/vendor/log/src/kv/source.rs b/vendor/log/src/kv/source.rs deleted file mode 100644 index f463e6d2b68a82..00000000000000 --- a/vendor/log/src/kv/source.rs +++ /dev/null @@ -1,514 +0,0 @@ -//! Sources for key-values. -//! -//! This module defines the [`Source`] type and supporting APIs for -//! working with collections of key-values. - -use crate::kv::{Error, Key, ToKey, ToValue, Value}; -use std::fmt; - -/// A source of key-values. -/// -/// The source may be a single pair, a set of pairs, or a filter over a set of pairs. -/// Use the [`VisitSource`](trait.VisitSource.html) trait to inspect the structured data -/// in a source. -/// -/// A source is like an iterator over its key-values, except with a push-based API -/// instead of a pull-based one. -/// -/// # Examples -/// -/// Enumerating the key-values in a source: -/// -/// ``` -/// # fn main() -> Result<(), log::kv::Error> { -/// use log::kv::{self, Source, Key, Value, VisitSource}; -/// -/// // A `VisitSource` that prints all key-values -/// // VisitSources are fed the key-value pairs of each key-values -/// struct Printer; -/// -/// impl<'kvs> VisitSource<'kvs> for Printer { -/// fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), kv::Error> { -/// println!("{key}: {value}"); -/// -/// Ok(()) -/// } -/// } -/// -/// // A source with 3 key-values -/// // Common collection types implement the `Source` trait -/// let source = &[ -/// ("a", 1), -/// ("b", 2), -/// ("c", 3), -/// ]; -/// -/// // Pass an instance of the `VisitSource` to a `Source` to visit it -/// source.visit(&mut Printer)?; -/// # Ok(()) -/// # } -/// ``` -pub trait Source { - /// Visit key-values. - /// - /// A source doesn't have to guarantee any ordering or uniqueness of key-values. - /// If the given visitor returns an error then the source may early-return with it, - /// even if there are more key-values. - /// - /// # Implementation notes - /// - /// A source should yield the same key-values to a subsequent visitor unless - /// that visitor itself fails. - fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error>; - - /// Get the value for a given key. - /// - /// If the key appears multiple times in the source then which key is returned - /// is implementation specific. - /// - /// # Implementation notes - /// - /// A source that can provide a more efficient implementation of this method - /// should override it. - fn get(&self, key: Key) -> Option> { - get_default(self, key) - } - - /// Count the number of key-values that can be visited. - /// - /// # Implementation notes - /// - /// A source that knows the number of key-values upfront may provide a more - /// efficient implementation. - /// - /// A subsequent call to `visit` should yield the same number of key-values. - fn count(&self) -> usize { - count_default(self) - } -} - -/// The default implementation of `Source::get` -fn get_default<'v>(source: &'v (impl Source + ?Sized), key: Key) -> Option> { - struct Get<'k, 'v> { - key: Key<'k>, - found: Option>, - } - - impl<'k, 'kvs> VisitSource<'kvs> for Get<'k, 'kvs> { - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { - if self.key == key { - self.found = Some(value); - } - - Ok(()) - } - } - - let mut get = Get { key, found: None }; - - let _ = source.visit(&mut get); - get.found -} - -/// The default implementation of `Source::count`. -fn count_default(source: impl Source) -> usize { - struct Count(usize); - - impl<'kvs> VisitSource<'kvs> for Count { - fn visit_pair(&mut self, _: Key<'kvs>, _: Value<'kvs>) -> Result<(), Error> { - self.0 += 1; - - Ok(()) - } - } - - let mut count = Count(0); - let _ = source.visit(&mut count); - count.0 -} - -impl<'a, T> Source for &'a T -where - T: Source + ?Sized, -{ - fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { - Source::visit(&**self, visitor) - } - - fn get(&self, key: Key) -> Option> { - Source::get(&**self, key) - } - - fn count(&self) -> usize { - Source::count(&**self) - } -} - -impl Source for (K, V) -where - K: ToKey, - V: ToValue, -{ - fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { - visitor.visit_pair(self.0.to_key(), self.1.to_value()) - } - - fn get(&self, key: Key) -> Option> { - if self.0.to_key() == key { - Some(self.1.to_value()) - } else { - None - } - } - - fn count(&self) -> usize { - 1 - } -} - -impl Source for [S] -where - S: Source, -{ - fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { - for source in self { - source.visit(visitor)?; - } - - Ok(()) - } - - fn get(&self, key: Key) -> Option> { - for source in self { - if let Some(found) = source.get(key.clone()) { - return Some(found); - } - } - - None - } - - fn count(&self) -> usize { - self.iter().map(Source::count).sum() - } -} - -impl Source for [S; N] -where - S: Source, -{ - fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { - Source::visit(self as &[_], visitor) - } - - fn get(&self, key: Key) -> Option> { - Source::get(self as &[_], key) - } - - fn count(&self) -> usize { - Source::count(self as &[_]) - } -} - -impl Source for Option -where - S: Source, -{ - fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { - if let Some(source) = self { - source.visit(visitor)?; - } - - Ok(()) - } - - fn get(&self, key: Key) -> Option> { - self.as_ref().and_then(|s| s.get(key)) - } - - fn count(&self) -> usize { - self.as_ref().map_or(0, Source::count) - } -} - -/// A visitor for the key-value pairs in a [`Source`](trait.Source.html). -pub trait VisitSource<'kvs> { - /// Visit a key-value pair. - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error>; -} - -impl<'a, 'kvs, T> VisitSource<'kvs> for &'a mut T -where - T: VisitSource<'kvs> + ?Sized, -{ - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { - (**self).visit_pair(key, value) - } -} - -impl<'a, 'b: 'a, 'kvs> VisitSource<'kvs> for fmt::DebugMap<'a, 'b> { - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { - self.entry(&key, &value); - Ok(()) - } -} - -impl<'a, 'b: 'a, 'kvs> VisitSource<'kvs> for fmt::DebugList<'a, 'b> { - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { - self.entry(&(key, value)); - Ok(()) - } -} - -impl<'a, 'b: 'a, 'kvs> VisitSource<'kvs> for fmt::DebugSet<'a, 'b> { - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { - self.entry(&(key, value)); - Ok(()) - } -} - -impl<'a, 'b: 'a, 'kvs> VisitSource<'kvs> for fmt::DebugTuple<'a, 'b> { - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { - self.field(&key); - self.field(&value); - Ok(()) - } -} - -#[cfg(feature = "std")] -mod std_support { - use super::*; - use std::borrow::Borrow; - use std::collections::{BTreeMap, HashMap}; - use std::hash::{BuildHasher, Hash}; - use std::rc::Rc; - use std::sync::Arc; - - impl Source for Box - where - S: Source + ?Sized, - { - fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { - Source::visit(&**self, visitor) - } - - fn get(&self, key: Key) -> Option> { - Source::get(&**self, key) - } - - fn count(&self) -> usize { - Source::count(&**self) - } - } - - impl Source for Arc - where - S: Source + ?Sized, - { - fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { - Source::visit(&**self, visitor) - } - - fn get(&self, key: Key) -> Option> { - Source::get(&**self, key) - } - - fn count(&self) -> usize { - Source::count(&**self) - } - } - - impl Source for Rc - where - S: Source + ?Sized, - { - fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { - Source::visit(&**self, visitor) - } - - fn get(&self, key: Key) -> Option> { - Source::get(&**self, key) - } - - fn count(&self) -> usize { - Source::count(&**self) - } - } - - impl Source for Vec - where - S: Source, - { - fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { - Source::visit(&**self, visitor) - } - - fn get(&self, key: Key) -> Option> { - Source::get(&**self, key) - } - - fn count(&self) -> usize { - Source::count(&**self) - } - } - - impl<'kvs, V> VisitSource<'kvs> for Box - where - V: VisitSource<'kvs> + ?Sized, - { - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { - (**self).visit_pair(key, value) - } - } - - impl Source for HashMap - where - K: ToKey + Borrow + Eq + Hash, - V: ToValue, - S: BuildHasher, - { - fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { - for (key, value) in self { - visitor.visit_pair(key.to_key(), value.to_value())?; - } - Ok(()) - } - - fn get(&self, key: Key) -> Option> { - HashMap::get(self, key.as_str()).map(|v| v.to_value()) - } - - fn count(&self) -> usize { - self.len() - } - } - - impl Source for BTreeMap - where - K: ToKey + Borrow + Ord, - V: ToValue, - { - fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { - for (key, value) in self { - visitor.visit_pair(key.to_key(), value.to_value())?; - } - Ok(()) - } - - fn get(&self, key: Key) -> Option> { - BTreeMap::get(self, key.as_str()).map(|v| v.to_value()) - } - - fn count(&self) -> usize { - self.len() - } - } - - #[cfg(test)] - mod tests { - use crate::kv::value; - - use super::*; - - #[test] - fn count() { - assert_eq!(1, Source::count(&Box::new(("a", 1)))); - assert_eq!(2, Source::count(&vec![("a", 1), ("b", 2)])); - } - - #[test] - fn get() { - let source = vec![("a", 1), ("b", 2), ("a", 1)]; - assert_eq!( - value::inner::Token::I64(1), - Source::get(&source, Key::from_str("a")).unwrap().to_token() - ); - - let source = Box::new(None::<(&str, i32)>); - assert!(Source::get(&source, Key::from_str("a")).is_none()); - } - - #[test] - fn hash_map() { - let mut map = HashMap::new(); - map.insert("a", 1); - map.insert("b", 2); - - assert_eq!(2, Source::count(&map)); - assert_eq!( - value::inner::Token::I64(1), - Source::get(&map, Key::from_str("a")).unwrap().to_token() - ); - } - - #[test] - fn btree_map() { - let mut map = BTreeMap::new(); - map.insert("a", 1); - map.insert("b", 2); - - assert_eq!(2, Source::count(&map)); - assert_eq!( - value::inner::Token::I64(1), - Source::get(&map, Key::from_str("a")).unwrap().to_token() - ); - } - } -} - -// NOTE: Deprecated; but aliases can't carry this attribute -#[cfg(feature = "kv_unstable")] -pub use VisitSource as Visitor; - -#[cfg(test)] -mod tests { - use crate::kv::value; - - use super::*; - - #[test] - fn source_is_object_safe() { - fn _check(_: &dyn Source) {} - } - - #[test] - fn visitor_is_object_safe() { - fn _check(_: &dyn VisitSource) {} - } - - #[test] - fn count() { - struct OnePair { - key: &'static str, - value: i32, - } - - impl Source for OnePair { - fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { - visitor.visit_pair(self.key.to_key(), self.value.to_value()) - } - } - - assert_eq!(1, Source::count(&("a", 1))); - assert_eq!(2, Source::count(&[("a", 1), ("b", 2)] as &[_])); - assert_eq!(0, Source::count(&None::<(&str, i32)>)); - assert_eq!(1, Source::count(&OnePair { key: "a", value: 1 })); - } - - #[test] - fn get() { - let source = &[("a", 1), ("b", 2), ("a", 1)] as &[_]; - assert_eq!( - value::inner::Token::I64(1), - Source::get(source, Key::from_str("a")).unwrap().to_token() - ); - assert_eq!( - value::inner::Token::I64(2), - Source::get(source, Key::from_str("b")).unwrap().to_token() - ); - assert!(Source::get(&source, Key::from_str("c")).is_none()); - - let source = None::<(&str, i32)>; - assert!(Source::get(&source, Key::from_str("a")).is_none()); - } -} diff --git a/vendor/log/src/kv/value.rs b/vendor/log/src/kv/value.rs deleted file mode 100644 index e604c806c61041..00000000000000 --- a/vendor/log/src/kv/value.rs +++ /dev/null @@ -1,1395 +0,0 @@ -//! Structured values. -//! -//! This module defines the [`Value`] type and supporting APIs for -//! capturing and serializing them. - -use std::fmt; - -pub use crate::kv::Error; - -/// A type that can be converted into a [`Value`](struct.Value.html). -pub trait ToValue { - /// Perform the conversion. - fn to_value(&self) -> Value; -} - -impl<'a, T> ToValue for &'a T -where - T: ToValue + ?Sized, -{ - fn to_value(&self) -> Value { - (**self).to_value() - } -} - -impl<'v> ToValue for Value<'v> { - fn to_value(&self) -> Value { - Value { - inner: self.inner.clone(), - } - } -} - -/// A value in a key-value. -/// -/// Values are an anonymous bag containing some structured datum. -/// -/// # Capturing values -/// -/// There are a few ways to capture a value: -/// -/// - Using the `Value::from_*` methods. -/// - Using the `ToValue` trait. -/// - Using the standard `From` trait. -/// -/// ## Using the `Value::from_*` methods -/// -/// `Value` offers a few constructor methods that capture values of different kinds. -/// -/// ``` -/// use log::kv::Value; -/// -/// let value = Value::from_debug(&42i32); -/// -/// assert_eq!(None, value.to_i64()); -/// ``` -/// -/// ## Using the `ToValue` trait -/// -/// The `ToValue` trait can be used to capture values generically. -/// It's the bound used by `Source`. -/// -/// ``` -/// # use log::kv::ToValue; -/// let value = 42i32.to_value(); -/// -/// assert_eq!(Some(42), value.to_i64()); -/// ``` -/// -/// ## Using the standard `From` trait -/// -/// Standard types that implement `ToValue` also implement `From`. -/// -/// ``` -/// use log::kv::Value; -/// -/// let value = Value::from(42i32); -/// -/// assert_eq!(Some(42), value.to_i64()); -/// ``` -/// -/// # Data model -/// -/// Values can hold one of a number of types: -/// -/// - **Null:** The absence of any other meaningful value. Note that -/// `Some(Value::null())` is not the same as `None`. The former is -/// `null` while the latter is `undefined`. This is important to be -/// able to tell the difference between a key-value that was logged, -/// but its value was empty (`Some(Value::null())`) and a key-value -/// that was never logged at all (`None`). -/// - **Strings:** `str`, `char`. -/// - **Booleans:** `bool`. -/// - **Integers:** `u8`-`u128`, `i8`-`i128`, `NonZero*`. -/// - **Floating point numbers:** `f32`-`f64`. -/// - **Errors:** `dyn (Error + 'static)`. -/// - **`serde`:** Any type in `serde`'s data model. -/// - **`sval`:** Any type in `sval`'s data model. -/// -/// # Serialization -/// -/// Values provide a number of ways to be serialized. -/// -/// For basic types the [`Value::visit`] method can be used to extract the -/// underlying typed value. However, this is limited in the amount of types -/// supported (see the [`VisitValue`] trait methods). -/// -/// For more complex types one of the following traits can be used: -/// * `sval::Value`, requires the `kv_sval` feature. -/// * `serde::Serialize`, requires the `kv_serde` feature. -/// -/// You don't need a visitor to serialize values through `serde` or `sval`. -/// -/// A value can always be serialized using any supported framework, regardless -/// of how it was captured. If, for example, a value was captured using its -/// `Display` implementation, it will serialize through `serde` as a string. If it was -/// captured as a struct using `serde`, it will also serialize as a struct -/// through `sval`, or can be formatted using a `Debug`-compatible representation. -#[derive(Clone)] -pub struct Value<'v> { - inner: inner::Inner<'v>, -} - -impl<'v> Value<'v> { - /// Get a value from a type implementing `ToValue`. - pub fn from_any(value: &'v T) -> Self - where - T: ToValue, - { - value.to_value() - } - - /// Get a value from a type implementing `std::fmt::Debug`. - pub fn from_debug(value: &'v T) -> Self - where - T: fmt::Debug, - { - Value { - inner: inner::Inner::from_debug(value), - } - } - - /// Get a value from a type implementing `std::fmt::Display`. - pub fn from_display(value: &'v T) -> Self - where - T: fmt::Display, - { - Value { - inner: inner::Inner::from_display(value), - } - } - - /// Get a value from a type implementing `serde::Serialize`. - #[cfg(feature = "kv_serde")] - pub fn from_serde(value: &'v T) -> Self - where - T: serde::Serialize, - { - Value { - inner: inner::Inner::from_serde1(value), - } - } - - /// Get a value from a type implementing `sval::Value`. - #[cfg(feature = "kv_sval")] - pub fn from_sval(value: &'v T) -> Self - where - T: sval::Value, - { - Value { - inner: inner::Inner::from_sval2(value), - } - } - - /// Get a value from a dynamic `std::fmt::Debug`. - pub fn from_dyn_debug(value: &'v dyn fmt::Debug) -> Self { - Value { - inner: inner::Inner::from_dyn_debug(value), - } - } - - /// Get a value from a dynamic `std::fmt::Display`. - pub fn from_dyn_display(value: &'v dyn fmt::Display) -> Self { - Value { - inner: inner::Inner::from_dyn_display(value), - } - } - - /// Get a value from a dynamic error. - #[cfg(feature = "kv_std")] - pub fn from_dyn_error(err: &'v (dyn std::error::Error + 'static)) -> Self { - Value { - inner: inner::Inner::from_dyn_error(err), - } - } - - /// Get a `null` value. - pub fn null() -> Self { - Value { - inner: inner::Inner::empty(), - } - } - - /// Get a value from an internal primitive. - fn from_inner(value: T) -> Self - where - T: Into>, - { - Value { - inner: value.into(), - } - } - - /// Inspect this value using a simple visitor. - /// - /// When the `kv_serde` or `kv_sval` features are enabled, you can also - /// serialize a value using its `Serialize` or `Value` implementation. - pub fn visit(&self, visitor: impl VisitValue<'v>) -> Result<(), Error> { - inner::visit(&self.inner, visitor) - } -} - -impl<'v> fmt::Debug for Value<'v> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&self.inner, f) - } -} - -impl<'v> fmt::Display for Value<'v> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.inner, f) - } -} - -#[cfg(feature = "kv_serde")] -impl<'v> serde::Serialize for Value<'v> { - fn serialize(&self, s: S) -> Result - where - S: serde::Serializer, - { - self.inner.serialize(s) - } -} - -#[cfg(feature = "kv_sval")] -impl<'v> sval::Value for Value<'v> { - fn stream<'sval, S: sval::Stream<'sval> + ?Sized>(&'sval self, stream: &mut S) -> sval::Result { - sval::Value::stream(&self.inner, stream) - } -} - -#[cfg(feature = "kv_sval")] -impl<'v> sval_ref::ValueRef<'v> for Value<'v> { - fn stream_ref + ?Sized>(&self, stream: &mut S) -> sval::Result { - sval_ref::ValueRef::stream_ref(&self.inner, stream) - } -} - -impl ToValue for str { - fn to_value(&self) -> Value { - Value::from(self) - } -} - -impl<'v> From<&'v str> for Value<'v> { - fn from(value: &'v str) -> Self { - Value::from_inner(value) - } -} - -impl ToValue for () { - fn to_value(&self) -> Value { - Value::from_inner(()) - } -} - -impl ToValue for Option -where - T: ToValue, -{ - fn to_value(&self) -> Value { - match *self { - Some(ref value) => value.to_value(), - None => Value::from_inner(()), - } - } -} - -macro_rules! impl_to_value_primitive { - ($($into_ty:ty,)*) => { - $( - impl ToValue for $into_ty { - fn to_value(&self) -> Value { - Value::from(*self) - } - } - - impl<'v> From<$into_ty> for Value<'v> { - fn from(value: $into_ty) -> Self { - Value::from_inner(value) - } - } - - impl<'v> From<&'v $into_ty> for Value<'v> { - fn from(value: &'v $into_ty) -> Self { - Value::from_inner(*value) - } - } - )* - }; -} - -macro_rules! impl_to_value_nonzero_primitive { - ($($into_ty:ident,)*) => { - $( - impl ToValue for std::num::$into_ty { - fn to_value(&self) -> Value { - Value::from(self.get()) - } - } - - impl<'v> From for Value<'v> { - fn from(value: std::num::$into_ty) -> Self { - Value::from(value.get()) - } - } - - impl<'v> From<&'v std::num::$into_ty> for Value<'v> { - fn from(value: &'v std::num::$into_ty) -> Self { - Value::from(value.get()) - } - } - )* - }; -} - -macro_rules! impl_value_to_primitive { - ($(#[doc = $doc:tt] $into_name:ident -> $into_ty:ty,)*) => { - impl<'v> Value<'v> { - $( - #[doc = $doc] - pub fn $into_name(&self) -> Option<$into_ty> { - self.inner.$into_name() - } - )* - } - } -} - -impl_to_value_primitive![ - usize, u8, u16, u32, u64, u128, isize, i8, i16, i32, i64, i128, f32, f64, char, bool, -]; - -#[rustfmt::skip] -impl_to_value_nonzero_primitive![ - NonZeroUsize, NonZeroU8, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU128, - NonZeroIsize, NonZeroI8, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI128, -]; - -impl_value_to_primitive![ - #[doc = "Try convert this value into a `u64`."] - to_u64 -> u64, - #[doc = "Try convert this value into a `i64`."] - to_i64 -> i64, - #[doc = "Try convert this value into a `u128`."] - to_u128 -> u128, - #[doc = "Try convert this value into a `i128`."] - to_i128 -> i128, - #[doc = "Try convert this value into a `f64`."] - to_f64 -> f64, - #[doc = "Try convert this value into a `char`."] - to_char -> char, - #[doc = "Try convert this value into a `bool`."] - to_bool -> bool, -]; - -impl<'v> Value<'v> { - /// Try to convert this value into an error. - #[cfg(feature = "kv_std")] - pub fn to_borrowed_error(&self) -> Option<&(dyn std::error::Error + 'static)> { - self.inner.to_borrowed_error() - } - - /// Try to convert this value into a borrowed string. - pub fn to_borrowed_str(&self) -> Option<&'v str> { - self.inner.to_borrowed_str() - } -} - -#[cfg(feature = "kv_std")] -mod std_support { - use std::borrow::Cow; - use std::rc::Rc; - use std::sync::Arc; - - use super::*; - - impl ToValue for Box - where - T: ToValue + ?Sized, - { - fn to_value(&self) -> Value { - (**self).to_value() - } - } - - impl ToValue for Arc - where - T: ToValue + ?Sized, - { - fn to_value(&self) -> Value { - (**self).to_value() - } - } - - impl ToValue for Rc - where - T: ToValue + ?Sized, - { - fn to_value(&self) -> Value { - (**self).to_value() - } - } - - impl ToValue for String { - fn to_value(&self) -> Value { - Value::from(&**self) - } - } - - impl<'v> ToValue for Cow<'v, str> { - fn to_value(&self) -> Value { - Value::from(&**self) - } - } - - impl<'v> Value<'v> { - /// Try convert this value into a string. - pub fn to_cow_str(&self) -> Option> { - self.inner.to_str() - } - } - - impl<'v> From<&'v String> for Value<'v> { - fn from(v: &'v String) -> Self { - Value::from(&**v) - } - } -} - -/// A visitor for a [`Value`]. -/// -/// Also see [`Value`'s documentation on serialization]. Value visitors are a simple alternative -/// to a more fully-featured serialization framework like `serde` or `sval`. A value visitor -/// can differentiate primitive types through methods like [`VisitValue::visit_bool`] and -/// [`VisitValue::visit_str`], but more complex types like maps and sequences -/// will fallthrough to [`VisitValue::visit_any`]. -/// -/// If you're trying to serialize a value to a format like JSON, you can use either `serde` -/// or `sval` directly with the value. You don't need a visitor. -/// -/// [`Value`'s documentation on serialization]: Value#serialization -pub trait VisitValue<'v> { - /// Visit a `Value`. - /// - /// This is the only required method on `VisitValue` and acts as a fallback for any - /// more specific methods that aren't overridden. - /// The `Value` may be formatted using its `fmt::Debug` or `fmt::Display` implementation, - /// or serialized using its `sval::Value` or `serde::Serialize` implementation. - fn visit_any(&mut self, value: Value) -> Result<(), Error>; - - /// Visit an empty value. - fn visit_null(&mut self) -> Result<(), Error> { - self.visit_any(Value::null()) - } - - /// Visit an unsigned integer. - fn visit_u64(&mut self, value: u64) -> Result<(), Error> { - self.visit_any(value.into()) - } - - /// Visit a signed integer. - fn visit_i64(&mut self, value: i64) -> Result<(), Error> { - self.visit_any(value.into()) - } - - /// Visit a big unsigned integer. - fn visit_u128(&mut self, value: u128) -> Result<(), Error> { - self.visit_any((value).into()) - } - - /// Visit a big signed integer. - fn visit_i128(&mut self, value: i128) -> Result<(), Error> { - self.visit_any((value).into()) - } - - /// Visit a floating point. - fn visit_f64(&mut self, value: f64) -> Result<(), Error> { - self.visit_any(value.into()) - } - - /// Visit a boolean. - fn visit_bool(&mut self, value: bool) -> Result<(), Error> { - self.visit_any(value.into()) - } - - /// Visit a string. - fn visit_str(&mut self, value: &str) -> Result<(), Error> { - self.visit_any(value.into()) - } - - /// Visit a string. - fn visit_borrowed_str(&mut self, value: &'v str) -> Result<(), Error> { - self.visit_str(value) - } - - /// Visit a Unicode character. - fn visit_char(&mut self, value: char) -> Result<(), Error> { - let mut b = [0; 4]; - self.visit_str(&*value.encode_utf8(&mut b)) - } - - /// Visit an error. - #[cfg(feature = "kv_std")] - fn visit_error(&mut self, err: &(dyn std::error::Error + 'static)) -> Result<(), Error> { - self.visit_any(Value::from_dyn_error(err)) - } - - /// Visit an error. - #[cfg(feature = "kv_std")] - fn visit_borrowed_error( - &mut self, - err: &'v (dyn std::error::Error + 'static), - ) -> Result<(), Error> { - self.visit_any(Value::from_dyn_error(err)) - } -} - -impl<'a, 'v, T: ?Sized> VisitValue<'v> for &'a mut T -where - T: VisitValue<'v>, -{ - fn visit_any(&mut self, value: Value) -> Result<(), Error> { - (**self).visit_any(value) - } - - fn visit_null(&mut self) -> Result<(), Error> { - (**self).visit_null() - } - - fn visit_u64(&mut self, value: u64) -> Result<(), Error> { - (**self).visit_u64(value) - } - - fn visit_i64(&mut self, value: i64) -> Result<(), Error> { - (**self).visit_i64(value) - } - - fn visit_u128(&mut self, value: u128) -> Result<(), Error> { - (**self).visit_u128(value) - } - - fn visit_i128(&mut self, value: i128) -> Result<(), Error> { - (**self).visit_i128(value) - } - - fn visit_f64(&mut self, value: f64) -> Result<(), Error> { - (**self).visit_f64(value) - } - - fn visit_bool(&mut self, value: bool) -> Result<(), Error> { - (**self).visit_bool(value) - } - - fn visit_str(&mut self, value: &str) -> Result<(), Error> { - (**self).visit_str(value) - } - - fn visit_borrowed_str(&mut self, value: &'v str) -> Result<(), Error> { - (**self).visit_borrowed_str(value) - } - - fn visit_char(&mut self, value: char) -> Result<(), Error> { - (**self).visit_char(value) - } - - #[cfg(feature = "kv_std")] - fn visit_error(&mut self, err: &(dyn std::error::Error + 'static)) -> Result<(), Error> { - (**self).visit_error(err) - } - - #[cfg(feature = "kv_std")] - fn visit_borrowed_error( - &mut self, - err: &'v (dyn std::error::Error + 'static), - ) -> Result<(), Error> { - (**self).visit_borrowed_error(err) - } -} - -#[cfg(feature = "value-bag")] -pub(in crate::kv) mod inner { - /** - An implementation of `Value` based on a library called `value_bag`. - - `value_bag` was written specifically for use in `log`'s value, but was split out when it outgrew - the codebase here. It's a general-purpose type-erasure library that handles mapping between - more fully-featured serialization frameworks. - */ - use super::*; - - pub use value_bag::ValueBag as Inner; - - pub use value_bag::Error; - - #[cfg(test)] - pub use value_bag::test::TestToken as Token; - - pub fn visit<'v>( - inner: &Inner<'v>, - visitor: impl VisitValue<'v>, - ) -> Result<(), crate::kv::Error> { - struct InnerVisitValue(V); - - impl<'v, V> value_bag::visit::Visit<'v> for InnerVisitValue - where - V: VisitValue<'v>, - { - fn visit_any(&mut self, value: value_bag::ValueBag) -> Result<(), Error> { - self.0 - .visit_any(Value { inner: value }) - .map_err(crate::kv::Error::into_value) - } - - fn visit_empty(&mut self) -> Result<(), Error> { - self.0.visit_null().map_err(crate::kv::Error::into_value) - } - - fn visit_u64(&mut self, value: u64) -> Result<(), Error> { - self.0 - .visit_u64(value) - .map_err(crate::kv::Error::into_value) - } - - fn visit_i64(&mut self, value: i64) -> Result<(), Error> { - self.0 - .visit_i64(value) - .map_err(crate::kv::Error::into_value) - } - - fn visit_u128(&mut self, value: u128) -> Result<(), Error> { - self.0 - .visit_u128(value) - .map_err(crate::kv::Error::into_value) - } - - fn visit_i128(&mut self, value: i128) -> Result<(), Error> { - self.0 - .visit_i128(value) - .map_err(crate::kv::Error::into_value) - } - - fn visit_f64(&mut self, value: f64) -> Result<(), Error> { - self.0 - .visit_f64(value) - .map_err(crate::kv::Error::into_value) - } - - fn visit_bool(&mut self, value: bool) -> Result<(), Error> { - self.0 - .visit_bool(value) - .map_err(crate::kv::Error::into_value) - } - - fn visit_str(&mut self, value: &str) -> Result<(), Error> { - self.0 - .visit_str(value) - .map_err(crate::kv::Error::into_value) - } - - fn visit_borrowed_str(&mut self, value: &'v str) -> Result<(), Error> { - self.0 - .visit_borrowed_str(value) - .map_err(crate::kv::Error::into_value) - } - - fn visit_char(&mut self, value: char) -> Result<(), Error> { - self.0 - .visit_char(value) - .map_err(crate::kv::Error::into_value) - } - - #[cfg(feature = "kv_std")] - fn visit_error( - &mut self, - err: &(dyn std::error::Error + 'static), - ) -> Result<(), Error> { - self.0 - .visit_error(err) - .map_err(crate::kv::Error::into_value) - } - - #[cfg(feature = "kv_std")] - fn visit_borrowed_error( - &mut self, - err: &'v (dyn std::error::Error + 'static), - ) -> Result<(), Error> { - self.0 - .visit_borrowed_error(err) - .map_err(crate::kv::Error::into_value) - } - } - - inner - .visit(&mut InnerVisitValue(visitor)) - .map_err(crate::kv::Error::from_value) - } -} - -#[cfg(not(feature = "value-bag"))] -pub(in crate::kv) mod inner { - /** - This is a dependency-free implementation of `Value` when there's no serialization frameworks involved. - In these simple cases a more fully featured solution like `value_bag` isn't needed, so we avoid pulling it in. - - There are a few things here that need to remain consistent with the `value_bag`-based implementation: - - 1. Conversions should always produce the same results. If a conversion here returns `Some`, then - the same `value_bag`-based conversion must also. Of particular note here are floats to ints; they're - based on the standard library's `TryInto` conversions, which need to be converted to `i32` or `u32`, - and then to `f64`. - 2. VisitValues should always be called in the same way. If a particular type of value calls `visit_i64`, - then the same `value_bag`-based visitor must also. - */ - use super::*; - - #[derive(Clone)] - pub enum Inner<'v> { - None, - Bool(bool), - Str(&'v str), - Char(char), - I64(i64), - U64(u64), - F64(f64), - I128(i128), - U128(u128), - Debug(&'v dyn fmt::Debug), - Display(&'v dyn fmt::Display), - } - - impl<'v> From<()> for Inner<'v> { - fn from(_: ()) -> Self { - Inner::None - } - } - - impl<'v> From for Inner<'v> { - fn from(v: bool) -> Self { - Inner::Bool(v) - } - } - - impl<'v> From for Inner<'v> { - fn from(v: char) -> Self { - Inner::Char(v) - } - } - - impl<'v> From for Inner<'v> { - fn from(v: f32) -> Self { - Inner::F64(v as f64) - } - } - - impl<'v> From for Inner<'v> { - fn from(v: f64) -> Self { - Inner::F64(v) - } - } - - impl<'v> From for Inner<'v> { - fn from(v: i8) -> Self { - Inner::I64(v as i64) - } - } - - impl<'v> From for Inner<'v> { - fn from(v: i16) -> Self { - Inner::I64(v as i64) - } - } - - impl<'v> From for Inner<'v> { - fn from(v: i32) -> Self { - Inner::I64(v as i64) - } - } - - impl<'v> From for Inner<'v> { - fn from(v: i64) -> Self { - Inner::I64(v as i64) - } - } - - impl<'v> From for Inner<'v> { - fn from(v: isize) -> Self { - Inner::I64(v as i64) - } - } - - impl<'v> From for Inner<'v> { - fn from(v: u8) -> Self { - Inner::U64(v as u64) - } - } - - impl<'v> From for Inner<'v> { - fn from(v: u16) -> Self { - Inner::U64(v as u64) - } - } - - impl<'v> From for Inner<'v> { - fn from(v: u32) -> Self { - Inner::U64(v as u64) - } - } - - impl<'v> From for Inner<'v> { - fn from(v: u64) -> Self { - Inner::U64(v as u64) - } - } - - impl<'v> From for Inner<'v> { - fn from(v: usize) -> Self { - Inner::U64(v as u64) - } - } - - impl<'v> From for Inner<'v> { - fn from(v: i128) -> Self { - Inner::I128(v) - } - } - - impl<'v> From for Inner<'v> { - fn from(v: u128) -> Self { - Inner::U128(v) - } - } - - impl<'v> From<&'v str> for Inner<'v> { - fn from(v: &'v str) -> Self { - Inner::Str(v) - } - } - - impl<'v> fmt::Debug for Inner<'v> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Inner::None => fmt::Debug::fmt(&None::<()>, f), - Inner::Bool(v) => fmt::Debug::fmt(v, f), - Inner::Str(v) => fmt::Debug::fmt(v, f), - Inner::Char(v) => fmt::Debug::fmt(v, f), - Inner::I64(v) => fmt::Debug::fmt(v, f), - Inner::U64(v) => fmt::Debug::fmt(v, f), - Inner::F64(v) => fmt::Debug::fmt(v, f), - Inner::I128(v) => fmt::Debug::fmt(v, f), - Inner::U128(v) => fmt::Debug::fmt(v, f), - Inner::Debug(v) => fmt::Debug::fmt(v, f), - Inner::Display(v) => fmt::Display::fmt(v, f), - } - } - } - - impl<'v> fmt::Display for Inner<'v> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Inner::None => fmt::Debug::fmt(&None::<()>, f), - Inner::Bool(v) => fmt::Display::fmt(v, f), - Inner::Str(v) => fmt::Display::fmt(v, f), - Inner::Char(v) => fmt::Display::fmt(v, f), - Inner::I64(v) => fmt::Display::fmt(v, f), - Inner::U64(v) => fmt::Display::fmt(v, f), - Inner::F64(v) => fmt::Display::fmt(v, f), - Inner::I128(v) => fmt::Display::fmt(v, f), - Inner::U128(v) => fmt::Display::fmt(v, f), - Inner::Debug(v) => fmt::Debug::fmt(v, f), - Inner::Display(v) => fmt::Display::fmt(v, f), - } - } - } - - impl<'v> Inner<'v> { - pub fn from_debug(value: &'v T) -> Self { - Inner::Debug(value) - } - - pub fn from_display(value: &'v T) -> Self { - Inner::Display(value) - } - - pub fn from_dyn_debug(value: &'v dyn fmt::Debug) -> Self { - Inner::Debug(value) - } - - pub fn from_dyn_display(value: &'v dyn fmt::Display) -> Self { - Inner::Display(value) - } - - pub fn empty() -> Self { - Inner::None - } - - pub fn to_bool(&self) -> Option { - match self { - Inner::Bool(v) => Some(*v), - _ => None, - } - } - - pub fn to_char(&self) -> Option { - match self { - Inner::Char(v) => Some(*v), - _ => None, - } - } - - pub fn to_f64(&self) -> Option { - match self { - Inner::F64(v) => Some(*v), - Inner::I64(v) => { - let v: i32 = (*v).try_into().ok()?; - v.try_into().ok() - } - Inner::U64(v) => { - let v: u32 = (*v).try_into().ok()?; - v.try_into().ok() - } - Inner::I128(v) => { - let v: i32 = (*v).try_into().ok()?; - v.try_into().ok() - } - Inner::U128(v) => { - let v: u32 = (*v).try_into().ok()?; - v.try_into().ok() - } - _ => None, - } - } - - pub fn to_i64(&self) -> Option { - match self { - Inner::I64(v) => Some(*v), - Inner::U64(v) => (*v).try_into().ok(), - Inner::I128(v) => (*v).try_into().ok(), - Inner::U128(v) => (*v).try_into().ok(), - _ => None, - } - } - - pub fn to_u64(&self) -> Option { - match self { - Inner::U64(v) => Some(*v), - Inner::I64(v) => (*v).try_into().ok(), - Inner::I128(v) => (*v).try_into().ok(), - Inner::U128(v) => (*v).try_into().ok(), - _ => None, - } - } - - pub fn to_u128(&self) -> Option { - match self { - Inner::U128(v) => Some(*v), - Inner::I64(v) => (*v).try_into().ok(), - Inner::U64(v) => (*v).try_into().ok(), - Inner::I128(v) => (*v).try_into().ok(), - _ => None, - } - } - - pub fn to_i128(&self) -> Option { - match self { - Inner::I128(v) => Some(*v), - Inner::I64(v) => (*v).try_into().ok(), - Inner::U64(v) => (*v).try_into().ok(), - Inner::U128(v) => (*v).try_into().ok(), - _ => None, - } - } - - pub fn to_borrowed_str(&self) -> Option<&'v str> { - match self { - Inner::Str(v) => Some(v), - _ => None, - } - } - - #[cfg(test)] - pub fn to_test_token(&self) -> Token { - match self { - Inner::None => Token::None, - Inner::Bool(v) => Token::Bool(*v), - Inner::Str(v) => Token::Str(*v), - Inner::Char(v) => Token::Char(*v), - Inner::I64(v) => Token::I64(*v), - Inner::U64(v) => Token::U64(*v), - Inner::F64(v) => Token::F64(*v), - Inner::I128(_) => unimplemented!(), - Inner::U128(_) => unimplemented!(), - Inner::Debug(_) => unimplemented!(), - Inner::Display(_) => unimplemented!(), - } - } - } - - #[cfg(test)] - #[derive(Debug, PartialEq)] - pub enum Token<'v> { - None, - Bool(bool), - Char(char), - Str(&'v str), - F64(f64), - I64(i64), - U64(u64), - } - - pub fn visit<'v>( - inner: &Inner<'v>, - mut visitor: impl VisitValue<'v>, - ) -> Result<(), crate::kv::Error> { - match inner { - Inner::None => visitor.visit_null(), - Inner::Bool(v) => visitor.visit_bool(*v), - Inner::Str(v) => visitor.visit_borrowed_str(*v), - Inner::Char(v) => visitor.visit_char(*v), - Inner::I64(v) => visitor.visit_i64(*v), - Inner::U64(v) => visitor.visit_u64(*v), - Inner::F64(v) => visitor.visit_f64(*v), - Inner::I128(v) => visitor.visit_i128(*v), - Inner::U128(v) => visitor.visit_u128(*v), - Inner::Debug(v) => visitor.visit_any(Value::from_dyn_debug(*v)), - Inner::Display(v) => visitor.visit_any(Value::from_dyn_display(*v)), - } - } -} - -impl<'v> Value<'v> { - /// Get a value from a type implementing `std::fmt::Debug`. - #[cfg(feature = "kv_unstable")] - #[deprecated(note = "use `from_debug` instead")] - pub fn capture_debug(value: &'v T) -> Self - where - T: fmt::Debug + 'static, - { - Value::from_debug(value) - } - - /// Get a value from a type implementing `std::fmt::Display`. - #[cfg(feature = "kv_unstable")] - #[deprecated(note = "use `from_display` instead")] - pub fn capture_display(value: &'v T) -> Self - where - T: fmt::Display + 'static, - { - Value::from_display(value) - } - - /// Get a value from an error. - #[cfg(feature = "kv_unstable_std")] - #[deprecated(note = "use `from_dyn_error` instead")] - pub fn capture_error(err: &'v T) -> Self - where - T: std::error::Error + 'static, - { - Value::from_dyn_error(err) - } - - /// Get a value from a type implementing `serde::Serialize`. - #[cfg(feature = "kv_unstable_serde")] - #[deprecated(note = "use `from_serde` instead")] - pub fn capture_serde(value: &'v T) -> Self - where - T: serde::Serialize + 'static, - { - Value::from_serde(value) - } - - /// Get a value from a type implementing `sval::Value`. - #[cfg(feature = "kv_unstable_sval")] - #[deprecated(note = "use `from_sval` instead")] - pub fn capture_sval(value: &'v T) -> Self - where - T: sval::Value + 'static, - { - Value::from_sval(value) - } - - /// Check whether this value can be downcast to `T`. - #[cfg(feature = "kv_unstable")] - #[deprecated( - note = "downcasting has been removed; log an issue at https://github.com/rust-lang/log/issues if this is something you rely on" - )] - pub fn is(&self) -> bool { - false - } - - /// Try downcast this value to `T`. - #[cfg(feature = "kv_unstable")] - #[deprecated( - note = "downcasting has been removed; log an issue at https://github.com/rust-lang/log/issues if this is something you rely on" - )] - pub fn downcast_ref(&self) -> Option<&T> { - None - } -} - -// NOTE: Deprecated; but aliases can't carry this attribute -#[cfg(feature = "kv_unstable")] -pub use VisitValue as Visit; - -/// Get a value from a type implementing `std::fmt::Debug`. -#[cfg(feature = "kv_unstable")] -#[deprecated(note = "use the `key:? = value` macro syntax instead")] -#[macro_export] -macro_rules! as_debug { - ($capture:expr) => { - $crate::kv::Value::from_debug(&$capture) - }; -} - -/// Get a value from a type implementing `std::fmt::Display`. -#[cfg(feature = "kv_unstable")] -#[deprecated(note = "use the `key:% = value` macro syntax instead")] -#[macro_export] -macro_rules! as_display { - ($capture:expr) => { - $crate::kv::Value::from_display(&$capture) - }; -} - -/// Get a value from an error. -#[cfg(feature = "kv_unstable_std")] -#[deprecated(note = "use the `key:err = value` macro syntax instead")] -#[macro_export] -macro_rules! as_error { - ($capture:expr) => { - $crate::kv::Value::from_dyn_error(&$capture) - }; -} - -#[cfg(feature = "kv_unstable_serde")] -#[deprecated(note = "use the `key:serde = value` macro syntax instead")] -/// Get a value from a type implementing `serde::Serialize`. -#[macro_export] -macro_rules! as_serde { - ($capture:expr) => { - $crate::kv::Value::from_serde(&$capture) - }; -} - -/// Get a value from a type implementing `sval::Value`. -#[cfg(feature = "kv_unstable_sval")] -#[deprecated(note = "use the `key:sval = value` macro syntax instead")] -#[macro_export] -macro_rules! as_sval { - ($capture:expr) => { - $crate::kv::Value::from_sval(&$capture) - }; -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - - impl<'v> Value<'v> { - pub(crate) fn to_token(&self) -> inner::Token { - self.inner.to_test_token() - } - } - - fn unsigned() -> impl Iterator> { - vec![ - Value::from(8u8), - Value::from(16u16), - Value::from(32u32), - Value::from(64u64), - Value::from(1usize), - Value::from(std::num::NonZeroU8::new(8).unwrap()), - Value::from(std::num::NonZeroU16::new(16).unwrap()), - Value::from(std::num::NonZeroU32::new(32).unwrap()), - Value::from(std::num::NonZeroU64::new(64).unwrap()), - Value::from(std::num::NonZeroUsize::new(1).unwrap()), - ] - .into_iter() - } - - fn signed() -> impl Iterator> { - vec![ - Value::from(-8i8), - Value::from(-16i16), - Value::from(-32i32), - Value::from(-64i64), - Value::from(-1isize), - Value::from(std::num::NonZeroI8::new(-8).unwrap()), - Value::from(std::num::NonZeroI16::new(-16).unwrap()), - Value::from(std::num::NonZeroI32::new(-32).unwrap()), - Value::from(std::num::NonZeroI64::new(-64).unwrap()), - Value::from(std::num::NonZeroIsize::new(-1).unwrap()), - ] - .into_iter() - } - - fn float() -> impl Iterator> { - vec![Value::from(32.32f32), Value::from(64.64f64)].into_iter() - } - - fn bool() -> impl Iterator> { - vec![Value::from(true), Value::from(false)].into_iter() - } - - fn str() -> impl Iterator> { - vec![Value::from("a string"), Value::from("a loong string")].into_iter() - } - - fn char() -> impl Iterator> { - vec![Value::from('a'), Value::from('⛰')].into_iter() - } - - #[test] - fn test_to_value_display() { - assert_eq!(42u64.to_value().to_string(), "42"); - assert_eq!(42i64.to_value().to_string(), "42"); - assert_eq!(42.01f64.to_value().to_string(), "42.01"); - assert_eq!(true.to_value().to_string(), "true"); - assert_eq!('a'.to_value().to_string(), "a"); - assert_eq!("a loong string".to_value().to_string(), "a loong string"); - assert_eq!(Some(true).to_value().to_string(), "true"); - assert_eq!(().to_value().to_string(), "None"); - assert_eq!(None::.to_value().to_string(), "None"); - } - - #[test] - fn test_to_value_structured() { - assert_eq!(42u64.to_value().to_token(), inner::Token::U64(42)); - assert_eq!(42i64.to_value().to_token(), inner::Token::I64(42)); - assert_eq!(42.01f64.to_value().to_token(), inner::Token::F64(42.01)); - assert_eq!(true.to_value().to_token(), inner::Token::Bool(true)); - assert_eq!('a'.to_value().to_token(), inner::Token::Char('a')); - assert_eq!( - "a loong string".to_value().to_token(), - inner::Token::Str("a loong string".into()) - ); - assert_eq!(Some(true).to_value().to_token(), inner::Token::Bool(true)); - assert_eq!(().to_value().to_token(), inner::Token::None); - assert_eq!(None::.to_value().to_token(), inner::Token::None); - } - - #[test] - fn test_to_number() { - for v in unsigned() { - assert!(v.to_u64().is_some()); - assert!(v.to_i64().is_some()); - } - - for v in signed() { - assert!(v.to_i64().is_some()); - } - - for v in unsigned().chain(signed()).chain(float()) { - assert!(v.to_f64().is_some()); - } - - for v in bool().chain(str()).chain(char()) { - assert!(v.to_u64().is_none()); - assert!(v.to_i64().is_none()); - assert!(v.to_f64().is_none()); - } - } - - #[test] - fn test_to_float() { - // Only integers from i32::MIN..=u32::MAX can be converted into floats - assert!(Value::from(i32::MIN).to_f64().is_some()); - assert!(Value::from(u32::MAX).to_f64().is_some()); - - assert!(Value::from((i32::MIN as i64) - 1).to_f64().is_none()); - assert!(Value::from((u32::MAX as u64) + 1).to_f64().is_none()); - } - - #[test] - fn test_to_cow_str() { - for v in str() { - assert!(v.to_borrowed_str().is_some()); - - #[cfg(feature = "kv_std")] - assert!(v.to_cow_str().is_some()); - } - - let short_lived = String::from("short lived"); - let v = Value::from(&*short_lived); - - assert!(v.to_borrowed_str().is_some()); - - #[cfg(feature = "kv_std")] - assert!(v.to_cow_str().is_some()); - - for v in unsigned().chain(signed()).chain(float()).chain(bool()) { - assert!(v.to_borrowed_str().is_none()); - - #[cfg(feature = "kv_std")] - assert!(v.to_cow_str().is_none()); - } - } - - #[test] - fn test_to_bool() { - for v in bool() { - assert!(v.to_bool().is_some()); - } - - for v in unsigned() - .chain(signed()) - .chain(float()) - .chain(str()) - .chain(char()) - { - assert!(v.to_bool().is_none()); - } - } - - #[test] - fn test_to_char() { - for v in char() { - assert!(v.to_char().is_some()); - } - - for v in unsigned() - .chain(signed()) - .chain(float()) - .chain(str()) - .chain(bool()) - { - assert!(v.to_char().is_none()); - } - } - - #[test] - fn test_visit_integer() { - struct Extract(Option); - - impl<'v> VisitValue<'v> for Extract { - fn visit_any(&mut self, value: Value) -> Result<(), Error> { - unimplemented!("unexpected value: {value:?}") - } - - fn visit_u64(&mut self, value: u64) -> Result<(), Error> { - self.0 = Some(value); - - Ok(()) - } - } - - let mut extract = Extract(None); - Value::from(42u64).visit(&mut extract).unwrap(); - - assert_eq!(Some(42), extract.0); - } - - #[test] - fn test_visit_borrowed_str() { - struct Extract<'v>(Option<&'v str>); - - impl<'v> VisitValue<'v> for Extract<'v> { - fn visit_any(&mut self, value: Value) -> Result<(), Error> { - unimplemented!("unexpected value: {value:?}") - } - - fn visit_borrowed_str(&mut self, value: &'v str) -> Result<(), Error> { - self.0 = Some(value); - - Ok(()) - } - } - - let mut extract = Extract(None); - - let short_lived = String::from("A short-lived string"); - Value::from(&*short_lived).visit(&mut extract).unwrap(); - - assert_eq!(Some("A short-lived string"), extract.0); - } -} diff --git a/vendor/log/src/lib.rs b/vendor/log/src/lib.rs deleted file mode 100644 index 47f2cf13276ab2..00000000000000 --- a/vendor/log/src/lib.rs +++ /dev/null @@ -1,2005 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A lightweight logging facade. -//! -//! The `log` crate provides a single logging API that abstracts over the -//! actual logging implementation. Libraries can use the logging API provided -//! by this crate, and the consumer of those libraries can choose the logging -//! implementation that is most suitable for its use case. -//! -//! If no logging implementation is selected, the facade falls back to a "noop" -//! implementation that ignores all log messages. The overhead in this case -//! is very small - just an integer load, comparison and jump. -//! -//! A log request consists of a _target_, a _level_, and a _body_. A target is a -//! string which defaults to the module path of the location of the log request, -//! though that default may be overridden. Logger implementations typically use -//! the target to filter requests based on some user configuration. -//! -//! # Usage -//! -//! The basic use of the log crate is through the five logging macros: [`error!`], -//! [`warn!`], [`info!`], [`debug!`] and [`trace!`] -//! where `error!` represents the highest-priority log messages -//! and `trace!` the lowest. The log messages are filtered by configuring -//! the log level to exclude messages with a lower priority. -//! Each of these macros accept format strings similarly to [`println!`]. -//! -//! -//! [`error!`]: ./macro.error.html -//! [`warn!`]: ./macro.warn.html -//! [`info!`]: ./macro.info.html -//! [`debug!`]: ./macro.debug.html -//! [`trace!`]: ./macro.trace.html -//! [`println!`]: https://doc.rust-lang.org/stable/std/macro.println.html -//! -//! Avoid writing expressions with side-effects in log statements. They may not be evaluated. -//! -//! ## In libraries -//! -//! Libraries should link only to the `log` crate, and use the provided -//! macros to log whatever information will be useful to downstream consumers. -//! -//! ### Examples -//! -//! ``` -//! # #[derive(Debug)] pub struct Yak(String); -//! # impl Yak { fn shave(&mut self, _: u32) {} } -//! # fn find_a_razor() -> Result { Ok(1) } -//! use log::{info, warn}; -//! -//! pub fn shave_the_yak(yak: &mut Yak) { -//! info!(target: "yak_events", "Commencing yak shaving for {yak:?}"); -//! -//! loop { -//! match find_a_razor() { -//! Ok(razor) => { -//! info!("Razor located: {razor}"); -//! yak.shave(razor); -//! break; -//! } -//! Err(err) => { -//! warn!("Unable to locate a razor: {err}, retrying"); -//! } -//! } -//! } -//! } -//! # fn main() {} -//! ``` -//! -//! ## In executables -//! -//! Executables should choose a logging implementation and initialize it early in the -//! runtime of the program. Logging implementations will typically include a -//! function to do this. Any log messages generated before -//! the implementation is initialized will be ignored. -//! -//! The executable itself may use the `log` crate to log as well. -//! -//! ### Warning -//! -//! The logging system may only be initialized once. -//! -//! ## Structured logging -//! -//! If you enable the `kv` feature you can associate structured values -//! with your log records. If we take the example from before, we can include -//! some additional context besides what's in the formatted message: -//! -//! ``` -//! # use serde::Serialize; -//! # #[derive(Debug, Serialize)] pub struct Yak(String); -//! # impl Yak { fn shave(&mut self, _: u32) {} } -//! # fn find_a_razor() -> Result { Ok(1) } -//! # #[cfg(feature = "kv_serde")] -//! # fn main() { -//! use log::{info, warn}; -//! -//! pub fn shave_the_yak(yak: &mut Yak) { -//! info!(target: "yak_events", yak:serde; "Commencing yak shaving"); -//! -//! loop { -//! match find_a_razor() { -//! Ok(razor) => { -//! info!(razor; "Razor located"); -//! yak.shave(razor); -//! break; -//! } -//! Err(e) => { -//! warn!(e:err; "Unable to locate a razor, retrying"); -//! } -//! } -//! } -//! } -//! # } -//! # #[cfg(not(feature = "kv_serde"))] -//! # fn main() {} -//! ``` -//! -//! See the [`kv`] module documentation for more details. -//! -//! # Available logging implementations -//! -//! In order to produce log output executables have to use -//! a logger implementation compatible with the facade. -//! There are many available implementations to choose from, -//! here are some of the most popular ones: -//! -//! * Simple minimal loggers: -//! * [env_logger] -//! * [colog] -//! * [simple_logger] -//! * [simplelog] -//! * [pretty_env_logger] -//! * [stderrlog] -//! * [flexi_logger] -//! * [call_logger] -//! * [structured-logger] -//! * [clang_log] -//! * [ftail] -//! * Complex configurable frameworks: -//! * [log4rs] -//! * [logforth] -//! * [fern] -//! * [spdlog-rs] -//! * Adaptors for other facilities: -//! * [syslog] -//! * [slog-stdlog] -//! * [systemd-journal-logger] -//! * [android_log] -//! * [win_dbg_logger] -//! * [db_logger] -//! * [log-to-defmt] -//! * [logcontrol-log] -//! * For WebAssembly binaries: -//! * [console_log] -//! * For dynamic libraries: -//! * You may need to construct an FFI-safe wrapper over `log` to initialize in your libraries -//! * Utilities: -//! * [log_err] -//! * [log-reload] -//! -//! # Implementing a Logger -//! -//! Loggers implement the [`Log`] trait. Here's a very basic example that simply -//! logs all messages at the [`Error`][level_link], [`Warn`][level_link] or -//! [`Info`][level_link] levels to stdout: -//! -//! ``` -//! use log::{Record, Level, Metadata}; -//! -//! struct SimpleLogger; -//! -//! impl log::Log for SimpleLogger { -//! fn enabled(&self, metadata: &Metadata) -> bool { -//! metadata.level() <= Level::Info -//! } -//! -//! fn log(&self, record: &Record) { -//! if self.enabled(record.metadata()) { -//! println!("{} - {}", record.level(), record.args()); -//! } -//! } -//! -//! fn flush(&self) {} -//! } -//! -//! # fn main() {} -//! ``` -//! -//! Loggers are installed by calling the [`set_logger`] function. The maximum -//! log level also needs to be adjusted via the [`set_max_level`] function. The -//! logging facade uses this as an optimization to improve performance of log -//! messages at levels that are disabled. It's important to set it, as it -//! defaults to [`Off`][filter_link], so no log messages will ever be captured! -//! In the case of our example logger, we'll want to set the maximum log level -//! to [`Info`][filter_link], since we ignore any [`Debug`][level_link] or -//! [`Trace`][level_link] level log messages. A logging implementation should -//! provide a function that wraps a call to [`set_logger`] and -//! [`set_max_level`], handling initialization of the logger: -//! -//! ``` -//! # use log::{Level, Metadata}; -//! # struct SimpleLogger; -//! # impl log::Log for SimpleLogger { -//! # fn enabled(&self, _: &Metadata) -> bool { false } -//! # fn log(&self, _: &log::Record) {} -//! # fn flush(&self) {} -//! # } -//! # fn main() {} -//! use log::{SetLoggerError, LevelFilter}; -//! -//! static LOGGER: SimpleLogger = SimpleLogger; -//! -//! pub fn init() -> Result<(), SetLoggerError> { -//! log::set_logger(&LOGGER) -//! .map(|()| log::set_max_level(LevelFilter::Info)) -//! } -//! ``` -//! -//! Implementations that adjust their configurations at runtime should take care -//! to adjust the maximum log level as well. -//! -//! # Use with `std` -//! -//! `set_logger` requires you to provide a `&'static Log`, which can be hard to -//! obtain if your logger depends on some runtime configuration. The -//! `set_boxed_logger` function is available with the `std` Cargo feature. It is -//! identical to `set_logger` except that it takes a `Box` rather than a -//! `&'static Log`: -//! -//! ``` -//! # use log::{Level, LevelFilter, Log, SetLoggerError, Metadata}; -//! # struct SimpleLogger; -//! # impl log::Log for SimpleLogger { -//! # fn enabled(&self, _: &Metadata) -> bool { false } -//! # fn log(&self, _: &log::Record) {} -//! # fn flush(&self) {} -//! # } -//! # fn main() {} -//! # #[cfg(feature = "std")] -//! pub fn init() -> Result<(), SetLoggerError> { -//! log::set_boxed_logger(Box::new(SimpleLogger)) -//! .map(|()| log::set_max_level(LevelFilter::Info)) -//! } -//! ``` -//! -//! # Compile time filters -//! -//! Log levels can be statically disabled at compile time by enabling one of these Cargo features: -//! -//! * `max_level_off` -//! * `max_level_error` -//! * `max_level_warn` -//! * `max_level_info` -//! * `max_level_debug` -//! * `max_level_trace` -//! -//! Log invocations at disabled levels will be skipped and will not even be present in the -//! resulting binary. These features control the value of the `STATIC_MAX_LEVEL` constant. The -//! logging macros check this value before logging a message. By default, no levels are disabled. -//! -//! It is possible to override this level for release builds only with the following features: -//! -//! * `release_max_level_off` -//! * `release_max_level_error` -//! * `release_max_level_warn` -//! * `release_max_level_info` -//! * `release_max_level_debug` -//! * `release_max_level_trace` -//! -//! Libraries should avoid using the max level features because they're global and can't be changed -//! once they're set. -//! -//! For example, a crate can disable trace level logs in debug builds and trace, debug, and info -//! level logs in release builds with the following configuration: -//! -//! ```toml -//! [dependencies] -//! log = { version = "0.4", features = ["max_level_debug", "release_max_level_warn"] } -//! ``` -//! # Crate Feature Flags -//! -//! The following crate feature flags are available in addition to the filters. They are -//! configured in your `Cargo.toml`. -//! -//! * `std` allows use of `std` crate instead of the default `core`. Enables using `std::error` and -//! `set_boxed_logger` functionality. -//! * `serde` enables support for serialization and deserialization of `Level` and `LevelFilter`. -//! -//! ```toml -//! [dependencies] -//! log = { version = "0.4", features = ["std", "serde"] } -//! ``` -//! -//! # Version compatibility -//! -//! The 0.3 and 0.4 versions of the `log` crate are almost entirely compatible. Log messages -//! made using `log` 0.3 will forward transparently to a logger implementation using `log` 0.4. Log -//! messages made using `log` 0.4 will forward to a logger implementation using `log` 0.3, but the -//! module path and file name information associated with the message will unfortunately be lost. -//! -//! [`Log`]: trait.Log.html -//! [level_link]: enum.Level.html -//! [filter_link]: enum.LevelFilter.html -//! [`set_logger`]: fn.set_logger.html -//! [`set_max_level`]: fn.set_max_level.html -//! [`try_set_logger_raw`]: fn.try_set_logger_raw.html -//! [`shutdown_logger_raw`]: fn.shutdown_logger_raw.html -//! [env_logger]: https://docs.rs/env_logger/*/env_logger/ -//! [colog]: https://docs.rs/colog/*/colog/ -//! [simple_logger]: https://github.com/borntyping/rust-simple_logger -//! [simplelog]: https://github.com/drakulix/simplelog.rs -//! [pretty_env_logger]: https://docs.rs/pretty_env_logger/*/pretty_env_logger/ -//! [stderrlog]: https://docs.rs/stderrlog/*/stderrlog/ -//! [flexi_logger]: https://docs.rs/flexi_logger/*/flexi_logger/ -//! [call_logger]: https://docs.rs/call_logger/*/call_logger/ -//! [syslog]: https://docs.rs/syslog/*/syslog/ -//! [slog-stdlog]: https://docs.rs/slog-stdlog/*/slog_stdlog/ -//! [log4rs]: https://docs.rs/log4rs/*/log4rs/ -//! [logforth]: https://docs.rs/logforth/*/logforth/ -//! [fern]: https://docs.rs/fern/*/fern/ -//! [spdlog-rs]: https://docs.rs/spdlog-rs/*/spdlog/ -//! [systemd-journal-logger]: https://docs.rs/systemd-journal-logger/*/systemd_journal_logger/ -//! [android_log]: https://docs.rs/android_log/*/android_log/ -//! [win_dbg_logger]: https://docs.rs/win_dbg_logger/*/win_dbg_logger/ -//! [db_logger]: https://docs.rs/db_logger/*/db_logger/ -//! [log-to-defmt]: https://docs.rs/log-to-defmt/*/log_to_defmt/ -//! [console_log]: https://docs.rs/console_log/*/console_log/ -//! [structured-logger]: https://docs.rs/structured-logger/latest/structured_logger/ -//! [logcontrol-log]: https://docs.rs/logcontrol-log/*/logcontrol_log/ -//! [log_err]: https://docs.rs/log_err/*/log_err/ -//! [log-reload]: https://docs.rs/log-reload/*/log_reload/ -//! [clang_log]: https://docs.rs/clang_log/latest/clang_log -//! [ftail]: https://docs.rs/ftail/latest/ftail - -#![doc( - html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://www.rust-lang.org/favicon.ico", - html_root_url = "https://docs.rs/log/0.4.28" -)] -#![warn(missing_docs)] -#![deny(missing_debug_implementations, unconditional_recursion)] -#![cfg_attr(all(not(feature = "std"), not(test)), no_std)] - -#[cfg(any( - all(feature = "max_level_off", feature = "max_level_error"), - all(feature = "max_level_off", feature = "max_level_warn"), - all(feature = "max_level_off", feature = "max_level_info"), - all(feature = "max_level_off", feature = "max_level_debug"), - all(feature = "max_level_off", feature = "max_level_trace"), - all(feature = "max_level_error", feature = "max_level_warn"), - all(feature = "max_level_error", feature = "max_level_info"), - all(feature = "max_level_error", feature = "max_level_debug"), - all(feature = "max_level_error", feature = "max_level_trace"), - all(feature = "max_level_warn", feature = "max_level_info"), - all(feature = "max_level_warn", feature = "max_level_debug"), - all(feature = "max_level_warn", feature = "max_level_trace"), - all(feature = "max_level_info", feature = "max_level_debug"), - all(feature = "max_level_info", feature = "max_level_trace"), - all(feature = "max_level_debug", feature = "max_level_trace"), -))] -compile_error!("multiple max_level_* features set"); - -#[rustfmt::skip] -#[cfg(any( - all(feature = "release_max_level_off", feature = "release_max_level_error"), - all(feature = "release_max_level_off", feature = "release_max_level_warn"), - all(feature = "release_max_level_off", feature = "release_max_level_info"), - all(feature = "release_max_level_off", feature = "release_max_level_debug"), - all(feature = "release_max_level_off", feature = "release_max_level_trace"), - all(feature = "release_max_level_error", feature = "release_max_level_warn"), - all(feature = "release_max_level_error", feature = "release_max_level_info"), - all(feature = "release_max_level_error", feature = "release_max_level_debug"), - all(feature = "release_max_level_error", feature = "release_max_level_trace"), - all(feature = "release_max_level_warn", feature = "release_max_level_info"), - all(feature = "release_max_level_warn", feature = "release_max_level_debug"), - all(feature = "release_max_level_warn", feature = "release_max_level_trace"), - all(feature = "release_max_level_info", feature = "release_max_level_debug"), - all(feature = "release_max_level_info", feature = "release_max_level_trace"), - all(feature = "release_max_level_debug", feature = "release_max_level_trace"), -))] -compile_error!("multiple release_max_level_* features set"); - -#[cfg(all(not(feature = "std"), not(test)))] -extern crate core as std; - -use std::cfg; -#[cfg(feature = "std")] -use std::error; -use std::str::FromStr; -use std::{cmp, fmt, mem}; - -#[macro_use] -mod macros; -mod serde; - -#[cfg(feature = "kv")] -pub mod kv; - -#[cfg(target_has_atomic = "ptr")] -use std::sync::atomic::{AtomicUsize, Ordering}; - -#[cfg(not(target_has_atomic = "ptr"))] -use std::cell::Cell; -#[cfg(not(target_has_atomic = "ptr"))] -use std::sync::atomic::Ordering; - -#[cfg(not(target_has_atomic = "ptr"))] -struct AtomicUsize { - v: Cell, -} - -#[cfg(not(target_has_atomic = "ptr"))] -impl AtomicUsize { - const fn new(v: usize) -> AtomicUsize { - AtomicUsize { v: Cell::new(v) } - } - - fn load(&self, _order: Ordering) -> usize { - self.v.get() - } - - fn store(&self, val: usize, _order: Ordering) { - self.v.set(val) - } -} - -// Any platform without atomics is unlikely to have multiple cores, so -// writing via Cell will not be a race condition. -#[cfg(not(target_has_atomic = "ptr"))] -unsafe impl Sync for AtomicUsize {} - -// The LOGGER static holds a pointer to the global logger. It is protected by -// the STATE static which determines whether LOGGER has been initialized yet. -static mut LOGGER: &dyn Log = &NopLogger; - -static STATE: AtomicUsize = AtomicUsize::new(0); - -// There are three different states that we care about: the logger's -// uninitialized, the logger's initializing (set_logger's been called but -// LOGGER hasn't actually been set yet), or the logger's active. -const UNINITIALIZED: usize = 0; -const INITIALIZING: usize = 1; -const INITIALIZED: usize = 2; - -static MAX_LOG_LEVEL_FILTER: AtomicUsize = AtomicUsize::new(0); - -static LOG_LEVEL_NAMES: [&str; 6] = ["OFF", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"]; - -static SET_LOGGER_ERROR: &str = "attempted to set a logger after the logging system \ - was already initialized"; -static LEVEL_PARSE_ERROR: &str = - "attempted to convert a string that doesn't match an existing log level"; - -/// An enum representing the available verbosity levels of the logger. -/// -/// Typical usage includes: checking if a certain `Level` is enabled with -/// [`log_enabled!`](macro.log_enabled.html), specifying the `Level` of -/// [`log!`](macro.log.html), and comparing a `Level` directly to a -/// [`LevelFilter`](enum.LevelFilter.html). -#[repr(usize)] -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] -pub enum Level { - /// The "error" level. - /// - /// Designates very serious errors. - // This way these line up with the discriminants for LevelFilter below - // This works because Rust treats field-less enums the same way as C does: - // https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-field-less-enumerations - Error = 1, - /// The "warn" level. - /// - /// Designates hazardous situations. - Warn, - /// The "info" level. - /// - /// Designates useful information. - Info, - /// The "debug" level. - /// - /// Designates lower priority information. - Debug, - /// The "trace" level. - /// - /// Designates very low priority, often extremely verbose, information. - Trace, -} - -impl PartialEq for Level { - #[inline] - fn eq(&self, other: &LevelFilter) -> bool { - *self as usize == *other as usize - } -} - -impl PartialOrd for Level { - #[inline] - fn partial_cmp(&self, other: &LevelFilter) -> Option { - Some((*self as usize).cmp(&(*other as usize))) - } -} - -impl FromStr for Level { - type Err = ParseLevelError; - fn from_str(level: &str) -> Result { - LOG_LEVEL_NAMES - .iter() - .position(|&name| name.eq_ignore_ascii_case(level)) - .into_iter() - .filter(|&idx| idx != 0) - .map(|idx| Level::from_usize(idx).unwrap()) - .next() - .ok_or(ParseLevelError(())) - } -} - -impl fmt::Display for Level { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.pad(self.as_str()) - } -} - -impl Level { - fn from_usize(u: usize) -> Option { - match u { - 1 => Some(Level::Error), - 2 => Some(Level::Warn), - 3 => Some(Level::Info), - 4 => Some(Level::Debug), - 5 => Some(Level::Trace), - _ => None, - } - } - - /// Returns the most verbose logging level. - #[inline] - pub fn max() -> Level { - Level::Trace - } - - /// Converts the `Level` to the equivalent `LevelFilter`. - #[inline] - pub fn to_level_filter(&self) -> LevelFilter { - LevelFilter::from_usize(*self as usize).unwrap() - } - - /// Returns the string representation of the `Level`. - /// - /// This returns the same string as the `fmt::Display` implementation. - pub fn as_str(&self) -> &'static str { - LOG_LEVEL_NAMES[*self as usize] - } - - /// Iterate through all supported logging levels. - /// - /// The order of iteration is from more severe to less severe log messages. - /// - /// # Examples - /// - /// ``` - /// use log::Level; - /// - /// let mut levels = Level::iter(); - /// - /// assert_eq!(Some(Level::Error), levels.next()); - /// assert_eq!(Some(Level::Trace), levels.last()); - /// ``` - pub fn iter() -> impl Iterator { - (1..6).map(|i| Self::from_usize(i).unwrap()) - } - - /// Get the next-highest `Level` from this one. - /// - /// If the current `Level` is at the highest level, the returned `Level` will be the same as the - /// current one. - /// - /// # Examples - /// - /// ``` - /// use log::Level; - /// - /// let level = Level::Info; - /// - /// assert_eq!(Level::Debug, level.increment_severity()); - /// assert_eq!(Level::Trace, level.increment_severity().increment_severity()); - /// assert_eq!(Level::Trace, level.increment_severity().increment_severity().increment_severity()); // max level - /// ``` - pub fn increment_severity(&self) -> Self { - let current = *self as usize; - Self::from_usize(current + 1).unwrap_or(*self) - } - - /// Get the next-lowest `Level` from this one. - /// - /// If the current `Level` is at the lowest level, the returned `Level` will be the same as the - /// current one. - /// - /// # Examples - /// - /// ``` - /// use log::Level; - /// - /// let level = Level::Info; - /// - /// assert_eq!(Level::Warn, level.decrement_severity()); - /// assert_eq!(Level::Error, level.decrement_severity().decrement_severity()); - /// assert_eq!(Level::Error, level.decrement_severity().decrement_severity().decrement_severity()); // min level - /// ``` - pub fn decrement_severity(&self) -> Self { - let current = *self as usize; - Self::from_usize(current.saturating_sub(1)).unwrap_or(*self) - } -} - -/// An enum representing the available verbosity level filters of the logger. -/// -/// A `LevelFilter` may be compared directly to a [`Level`]. Use this type -/// to get and set the maximum log level with [`max_level()`] and [`set_max_level`]. -/// -/// [`Level`]: enum.Level.html -/// [`max_level()`]: fn.max_level.html -/// [`set_max_level`]: fn.set_max_level.html -#[repr(usize)] -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] -pub enum LevelFilter { - /// A level lower than all log levels. - Off, - /// Corresponds to the `Error` log level. - Error, - /// Corresponds to the `Warn` log level. - Warn, - /// Corresponds to the `Info` log level. - Info, - /// Corresponds to the `Debug` log level. - Debug, - /// Corresponds to the `Trace` log level. - Trace, -} - -impl PartialEq for LevelFilter { - #[inline] - fn eq(&self, other: &Level) -> bool { - other.eq(self) - } -} - -impl PartialOrd for LevelFilter { - #[inline] - fn partial_cmp(&self, other: &Level) -> Option { - Some((*self as usize).cmp(&(*other as usize))) - } -} - -impl FromStr for LevelFilter { - type Err = ParseLevelError; - fn from_str(level: &str) -> Result { - LOG_LEVEL_NAMES - .iter() - .position(|&name| name.eq_ignore_ascii_case(level)) - .map(|p| LevelFilter::from_usize(p).unwrap()) - .ok_or(ParseLevelError(())) - } -} - -impl fmt::Display for LevelFilter { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.pad(self.as_str()) - } -} - -impl LevelFilter { - fn from_usize(u: usize) -> Option { - match u { - 0 => Some(LevelFilter::Off), - 1 => Some(LevelFilter::Error), - 2 => Some(LevelFilter::Warn), - 3 => Some(LevelFilter::Info), - 4 => Some(LevelFilter::Debug), - 5 => Some(LevelFilter::Trace), - _ => None, - } - } - - /// Returns the most verbose logging level filter. - #[inline] - pub fn max() -> LevelFilter { - LevelFilter::Trace - } - - /// Converts `self` to the equivalent `Level`. - /// - /// Returns `None` if `self` is `LevelFilter::Off`. - #[inline] - pub fn to_level(&self) -> Option { - Level::from_usize(*self as usize) - } - - /// Returns the string representation of the `LevelFilter`. - /// - /// This returns the same string as the `fmt::Display` implementation. - pub fn as_str(&self) -> &'static str { - LOG_LEVEL_NAMES[*self as usize] - } - - /// Iterate through all supported filtering levels. - /// - /// The order of iteration is from less to more verbose filtering. - /// - /// # Examples - /// - /// ``` - /// use log::LevelFilter; - /// - /// let mut levels = LevelFilter::iter(); - /// - /// assert_eq!(Some(LevelFilter::Off), levels.next()); - /// assert_eq!(Some(LevelFilter::Trace), levels.last()); - /// ``` - pub fn iter() -> impl Iterator { - (0..6).map(|i| Self::from_usize(i).unwrap()) - } - - /// Get the next-highest `LevelFilter` from this one. - /// - /// If the current `LevelFilter` is at the highest level, the returned `LevelFilter` will be the - /// same as the current one. - /// - /// # Examples - /// - /// ``` - /// use log::LevelFilter; - /// - /// let level_filter = LevelFilter::Info; - /// - /// assert_eq!(LevelFilter::Debug, level_filter.increment_severity()); - /// assert_eq!(LevelFilter::Trace, level_filter.increment_severity().increment_severity()); - /// assert_eq!(LevelFilter::Trace, level_filter.increment_severity().increment_severity().increment_severity()); // max level - /// ``` - pub fn increment_severity(&self) -> Self { - let current = *self as usize; - Self::from_usize(current + 1).unwrap_or(*self) - } - - /// Get the next-lowest `LevelFilter` from this one. - /// - /// If the current `LevelFilter` is at the lowest level, the returned `LevelFilter` will be the - /// same as the current one. - /// - /// # Examples - /// - /// ``` - /// use log::LevelFilter; - /// - /// let level_filter = LevelFilter::Info; - /// - /// assert_eq!(LevelFilter::Warn, level_filter.decrement_severity()); - /// assert_eq!(LevelFilter::Error, level_filter.decrement_severity().decrement_severity()); - /// assert_eq!(LevelFilter::Off, level_filter.decrement_severity().decrement_severity().decrement_severity()); - /// assert_eq!(LevelFilter::Off, level_filter.decrement_severity().decrement_severity().decrement_severity().decrement_severity()); // min level - /// ``` - pub fn decrement_severity(&self) -> Self { - let current = *self as usize; - Self::from_usize(current.saturating_sub(1)).unwrap_or(*self) - } -} - -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] -enum MaybeStaticStr<'a> { - Static(&'static str), - Borrowed(&'a str), -} - -impl<'a> MaybeStaticStr<'a> { - #[inline] - fn get(&self) -> &'a str { - match *self { - MaybeStaticStr::Static(s) => s, - MaybeStaticStr::Borrowed(s) => s, - } - } -} - -/// The "payload" of a log message. -/// -/// # Use -/// -/// `Record` structures are passed as parameters to the [`log`][method.log] -/// method of the [`Log`] trait. Logger implementors manipulate these -/// structures in order to display log messages. `Record`s are automatically -/// created by the [`log!`] macro and so are not seen by log users. -/// -/// Note that the [`level()`] and [`target()`] accessors are equivalent to -/// `self.metadata().level()` and `self.metadata().target()` respectively. -/// These methods are provided as a convenience for users of this structure. -/// -/// # Example -/// -/// The following example shows a simple logger that displays the level, -/// module path, and message of any `Record` that is passed to it. -/// -/// ``` -/// struct SimpleLogger; -/// -/// impl log::Log for SimpleLogger { -/// fn enabled(&self, _metadata: &log::Metadata) -> bool { -/// true -/// } -/// -/// fn log(&self, record: &log::Record) { -/// if !self.enabled(record.metadata()) { -/// return; -/// } -/// -/// println!("{}:{} -- {}", -/// record.level(), -/// record.target(), -/// record.args()); -/// } -/// fn flush(&self) {} -/// } -/// ``` -/// -/// [method.log]: trait.Log.html#tymethod.log -/// [`Log`]: trait.Log.html -/// [`log!`]: macro.log.html -/// [`level()`]: struct.Record.html#method.level -/// [`target()`]: struct.Record.html#method.target -#[derive(Clone, Debug)] -pub struct Record<'a> { - metadata: Metadata<'a>, - args: fmt::Arguments<'a>, - module_path: Option>, - file: Option>, - line: Option, - #[cfg(feature = "kv")] - key_values: KeyValues<'a>, -} - -// This wrapper type is only needed so we can -// `#[derive(Debug)]` on `Record`. It also -// provides a useful `Debug` implementation for -// the underlying `Source`. -#[cfg(feature = "kv")] -#[derive(Clone)] -struct KeyValues<'a>(&'a dyn kv::Source); - -#[cfg(feature = "kv")] -impl<'a> fmt::Debug for KeyValues<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut visitor = f.debug_map(); - self.0.visit(&mut visitor).map_err(|_| fmt::Error)?; - visitor.finish() - } -} - -impl<'a> Record<'a> { - /// Returns a new builder. - #[inline] - pub fn builder() -> RecordBuilder<'a> { - RecordBuilder::new() - } - - /// The message body. - #[inline] - pub fn args(&self) -> &fmt::Arguments<'a> { - &self.args - } - - /// Metadata about the log directive. - #[inline] - pub fn metadata(&self) -> &Metadata<'a> { - &self.metadata - } - - /// The verbosity level of the message. - #[inline] - pub fn level(&self) -> Level { - self.metadata.level() - } - - /// The name of the target of the directive. - #[inline] - pub fn target(&self) -> &'a str { - self.metadata.target() - } - - /// The module path of the message. - #[inline] - pub fn module_path(&self) -> Option<&'a str> { - self.module_path.map(|s| s.get()) - } - - /// The module path of the message, if it is a `'static` string. - #[inline] - pub fn module_path_static(&self) -> Option<&'static str> { - match self.module_path { - Some(MaybeStaticStr::Static(s)) => Some(s), - _ => None, - } - } - - /// The source file containing the message. - #[inline] - pub fn file(&self) -> Option<&'a str> { - self.file.map(|s| s.get()) - } - - /// The source file containing the message, if it is a `'static` string. - #[inline] - pub fn file_static(&self) -> Option<&'static str> { - match self.file { - Some(MaybeStaticStr::Static(s)) => Some(s), - _ => None, - } - } - - /// The line containing the message. - #[inline] - pub fn line(&self) -> Option { - self.line - } - - /// The structured key-value pairs associated with the message. - #[cfg(feature = "kv")] - #[inline] - pub fn key_values(&self) -> &dyn kv::Source { - self.key_values.0 - } - - /// Create a new [`RecordBuilder`](struct.RecordBuilder.html) based on this record. - #[cfg(feature = "kv")] - #[inline] - pub fn to_builder(&self) -> RecordBuilder { - RecordBuilder { - record: Record { - metadata: Metadata { - level: self.metadata.level, - target: self.metadata.target, - }, - args: self.args, - module_path: self.module_path, - file: self.file, - line: self.line, - key_values: self.key_values.clone(), - }, - } - } -} - -/// Builder for [`Record`](struct.Record.html). -/// -/// Typically should only be used by log library creators or for testing and "shim loggers". -/// The `RecordBuilder` can set the different parameters of `Record` object, and returns -/// the created object when `build` is called. -/// -/// # Examples -/// -/// ``` -/// use log::{Level, Record}; -/// -/// let record = Record::builder() -/// .args(format_args!("Error!")) -/// .level(Level::Error) -/// .target("myApp") -/// .file(Some("server.rs")) -/// .line(Some(144)) -/// .module_path(Some("server")) -/// .build(); -/// ``` -/// -/// Alternatively, use [`MetadataBuilder`](struct.MetadataBuilder.html): -/// -/// ``` -/// use log::{Record, Level, MetadataBuilder}; -/// -/// let error_metadata = MetadataBuilder::new() -/// .target("myApp") -/// .level(Level::Error) -/// .build(); -/// -/// let record = Record::builder() -/// .metadata(error_metadata) -/// .args(format_args!("Error!")) -/// .line(Some(433)) -/// .file(Some("app.rs")) -/// .module_path(Some("server")) -/// .build(); -/// ``` -#[derive(Debug)] -pub struct RecordBuilder<'a> { - record: Record<'a>, -} - -impl<'a> RecordBuilder<'a> { - /// Construct new `RecordBuilder`. - /// - /// The default options are: - /// - /// - `args`: [`format_args!("")`] - /// - `metadata`: [`Metadata::builder().build()`] - /// - `module_path`: `None` - /// - `file`: `None` - /// - `line`: `None` - /// - /// [`format_args!("")`]: https://doc.rust-lang.org/std/macro.format_args.html - /// [`Metadata::builder().build()`]: struct.MetadataBuilder.html#method.build - #[inline] - pub fn new() -> RecordBuilder<'a> { - RecordBuilder { - record: Record { - args: format_args!(""), - metadata: Metadata::builder().build(), - module_path: None, - file: None, - line: None, - #[cfg(feature = "kv")] - key_values: KeyValues(&None::<(kv::Key, kv::Value)>), - }, - } - } - - /// Set [`args`](struct.Record.html#method.args). - #[inline] - pub fn args(&mut self, args: fmt::Arguments<'a>) -> &mut RecordBuilder<'a> { - self.record.args = args; - self - } - - /// Set [`metadata`](struct.Record.html#method.metadata). Construct a `Metadata` object with [`MetadataBuilder`](struct.MetadataBuilder.html). - #[inline] - pub fn metadata(&mut self, metadata: Metadata<'a>) -> &mut RecordBuilder<'a> { - self.record.metadata = metadata; - self - } - - /// Set [`Metadata::level`](struct.Metadata.html#method.level). - #[inline] - pub fn level(&mut self, level: Level) -> &mut RecordBuilder<'a> { - self.record.metadata.level = level; - self - } - - /// Set [`Metadata::target`](struct.Metadata.html#method.target) - #[inline] - pub fn target(&mut self, target: &'a str) -> &mut RecordBuilder<'a> { - self.record.metadata.target = target; - self - } - - /// Set [`module_path`](struct.Record.html#method.module_path) - #[inline] - pub fn module_path(&mut self, path: Option<&'a str>) -> &mut RecordBuilder<'a> { - self.record.module_path = path.map(MaybeStaticStr::Borrowed); - self - } - - /// Set [`module_path`](struct.Record.html#method.module_path) to a `'static` string - #[inline] - pub fn module_path_static(&mut self, path: Option<&'static str>) -> &mut RecordBuilder<'a> { - self.record.module_path = path.map(MaybeStaticStr::Static); - self - } - - /// Set [`file`](struct.Record.html#method.file) - #[inline] - pub fn file(&mut self, file: Option<&'a str>) -> &mut RecordBuilder<'a> { - self.record.file = file.map(MaybeStaticStr::Borrowed); - self - } - - /// Set [`file`](struct.Record.html#method.file) to a `'static` string. - #[inline] - pub fn file_static(&mut self, file: Option<&'static str>) -> &mut RecordBuilder<'a> { - self.record.file = file.map(MaybeStaticStr::Static); - self - } - - /// Set [`line`](struct.Record.html#method.line) - #[inline] - pub fn line(&mut self, line: Option) -> &mut RecordBuilder<'a> { - self.record.line = line; - self - } - - /// Set [`key_values`](struct.Record.html#method.key_values) - #[cfg(feature = "kv")] - #[inline] - pub fn key_values(&mut self, kvs: &'a dyn kv::Source) -> &mut RecordBuilder<'a> { - self.record.key_values = KeyValues(kvs); - self - } - - /// Invoke the builder and return a `Record` - #[inline] - pub fn build(&self) -> Record<'a> { - self.record.clone() - } -} - -impl Default for RecordBuilder<'_> { - fn default() -> Self { - Self::new() - } -} - -/// Metadata about a log message. -/// -/// # Use -/// -/// `Metadata` structs are created when users of the library use -/// logging macros. -/// -/// They are consumed by implementations of the `Log` trait in the -/// `enabled` method. -/// -/// `Record`s use `Metadata` to determine the log message's severity -/// and target. -/// -/// Users should use the `log_enabled!` macro in their code to avoid -/// constructing expensive log messages. -/// -/// # Examples -/// -/// ``` -/// use log::{Record, Level, Metadata}; -/// -/// struct MyLogger; -/// -/// impl log::Log for MyLogger { -/// fn enabled(&self, metadata: &Metadata) -> bool { -/// metadata.level() <= Level::Info -/// } -/// -/// fn log(&self, record: &Record) { -/// if self.enabled(record.metadata()) { -/// println!("{} - {}", record.level(), record.args()); -/// } -/// } -/// fn flush(&self) {} -/// } -/// -/// # fn main(){} -/// ``` -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] -pub struct Metadata<'a> { - level: Level, - target: &'a str, -} - -impl<'a> Metadata<'a> { - /// Returns a new builder. - #[inline] - pub fn builder() -> MetadataBuilder<'a> { - MetadataBuilder::new() - } - - /// The verbosity level of the message. - #[inline] - pub fn level(&self) -> Level { - self.level - } - - /// The name of the target of the directive. - #[inline] - pub fn target(&self) -> &'a str { - self.target - } -} - -/// Builder for [`Metadata`](struct.Metadata.html). -/// -/// Typically should only be used by log library creators or for testing and "shim loggers". -/// The `MetadataBuilder` can set the different parameters of a `Metadata` object, and returns -/// the created object when `build` is called. -/// -/// # Example -/// -/// ``` -/// let target = "myApp"; -/// use log::{Level, MetadataBuilder}; -/// let metadata = MetadataBuilder::new() -/// .level(Level::Debug) -/// .target(target) -/// .build(); -/// ``` -#[derive(Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] -pub struct MetadataBuilder<'a> { - metadata: Metadata<'a>, -} - -impl<'a> MetadataBuilder<'a> { - /// Construct a new `MetadataBuilder`. - /// - /// The default options are: - /// - /// - `level`: `Level::Info` - /// - `target`: `""` - #[inline] - pub fn new() -> MetadataBuilder<'a> { - MetadataBuilder { - metadata: Metadata { - level: Level::Info, - target: "", - }, - } - } - - /// Setter for [`level`](struct.Metadata.html#method.level). - #[inline] - pub fn level(&mut self, arg: Level) -> &mut MetadataBuilder<'a> { - self.metadata.level = arg; - self - } - - /// Setter for [`target`](struct.Metadata.html#method.target). - #[inline] - pub fn target(&mut self, target: &'a str) -> &mut MetadataBuilder<'a> { - self.metadata.target = target; - self - } - - /// Returns a `Metadata` object. - #[inline] - pub fn build(&self) -> Metadata<'a> { - self.metadata.clone() - } -} - -impl Default for MetadataBuilder<'_> { - fn default() -> Self { - Self::new() - } -} - -/// A trait encapsulating the operations required of a logger. -pub trait Log: Sync + Send { - /// Determines if a log message with the specified metadata would be - /// logged. - /// - /// This is used by the `log_enabled!` macro to allow callers to avoid - /// expensive computation of log message arguments if the message would be - /// discarded anyway. - /// - /// # For implementors - /// - /// This method isn't called automatically by the `log!` macros. - /// It's up to an implementation of the `Log` trait to call `enabled` in its own - /// `log` method implementation to guarantee that filtering is applied. - fn enabled(&self, metadata: &Metadata) -> bool; - - /// Logs the `Record`. - /// - /// # For implementors - /// - /// Note that `enabled` is *not* necessarily called before this method. - /// Implementations of `log` should perform all necessary filtering - /// internally. - fn log(&self, record: &Record); - - /// Flushes any buffered records. - /// - /// # For implementors - /// - /// This method isn't called automatically by the `log!` macros. - /// It can be called manually on shut-down to ensure any in-flight records are flushed. - fn flush(&self); -} - -/// A dummy initial value for LOGGER. -struct NopLogger; - -impl Log for NopLogger { - fn enabled(&self, _: &Metadata) -> bool { - false - } - - fn log(&self, _: &Record) {} - fn flush(&self) {} -} - -impl Log for &'_ T -where - T: ?Sized + Log, -{ - fn enabled(&self, metadata: &Metadata) -> bool { - (**self).enabled(metadata) - } - - fn log(&self, record: &Record) { - (**self).log(record); - } - fn flush(&self) { - (**self).flush(); - } -} - -#[cfg(feature = "std")] -impl Log for std::boxed::Box -where - T: ?Sized + Log, -{ - fn enabled(&self, metadata: &Metadata) -> bool { - self.as_ref().enabled(metadata) - } - - fn log(&self, record: &Record) { - self.as_ref().log(record); - } - fn flush(&self) { - self.as_ref().flush(); - } -} - -#[cfg(feature = "std")] -impl Log for std::sync::Arc -where - T: ?Sized + Log, -{ - fn enabled(&self, metadata: &Metadata) -> bool { - self.as_ref().enabled(metadata) - } - - fn log(&self, record: &Record) { - self.as_ref().log(record); - } - fn flush(&self) { - self.as_ref().flush(); - } -} - -/// Sets the global maximum log level. -/// -/// Generally, this should only be called by the active logging implementation. -/// -/// Note that `Trace` is the maximum level, because it provides the maximum amount of detail in the emitted logs. -#[inline] -#[cfg(target_has_atomic = "ptr")] -pub fn set_max_level(level: LevelFilter) { - MAX_LOG_LEVEL_FILTER.store(level as usize, Ordering::Relaxed); -} - -/// A thread-unsafe version of [`set_max_level`]. -/// -/// This function is available on all platforms, even those that do not have -/// support for atomics that is needed by [`set_max_level`]. -/// -/// In almost all cases, [`set_max_level`] should be preferred. -/// -/// # Safety -/// -/// This function is only safe to call when it cannot race with any other -/// calls to `set_max_level` or `set_max_level_racy`. -/// -/// This can be upheld by (for example) making sure that **there are no other -/// threads**, and (on embedded) that **interrupts are disabled**. -/// -/// It is safe to use all other logging functions while this function runs -/// (including all logging macros). -/// -/// [`set_max_level`]: fn.set_max_level.html -#[inline] -pub unsafe fn set_max_level_racy(level: LevelFilter) { - // `MAX_LOG_LEVEL_FILTER` uses a `Cell` as the underlying primitive when a - // platform doesn't support `target_has_atomic = "ptr"`, so even though this looks the same - // as `set_max_level` it may have different safety properties. - MAX_LOG_LEVEL_FILTER.store(level as usize, Ordering::Relaxed); -} - -/// Returns the current maximum log level. -/// -/// The [`log!`], [`error!`], [`warn!`], [`info!`], [`debug!`], and [`trace!`] macros check -/// this value and discard any message logged at a higher level. The maximum -/// log level is set by the [`set_max_level`] function. -/// -/// [`log!`]: macro.log.html -/// [`error!`]: macro.error.html -/// [`warn!`]: macro.warn.html -/// [`info!`]: macro.info.html -/// [`debug!`]: macro.debug.html -/// [`trace!`]: macro.trace.html -/// [`set_max_level`]: fn.set_max_level.html -#[inline(always)] -pub fn max_level() -> LevelFilter { - // Since `LevelFilter` is `repr(usize)`, - // this transmute is sound if and only if `MAX_LOG_LEVEL_FILTER` - // is set to a usize that is a valid discriminant for `LevelFilter`. - // Since `MAX_LOG_LEVEL_FILTER` is private, the only time it's set - // is by `set_max_level` above, i.e. by casting a `LevelFilter` to `usize`. - // So any usize stored in `MAX_LOG_LEVEL_FILTER` is a valid discriminant. - unsafe { mem::transmute(MAX_LOG_LEVEL_FILTER.load(Ordering::Relaxed)) } -} - -/// Sets the global logger to a `Box`. -/// -/// This is a simple convenience wrapper over `set_logger`, which takes a -/// `Box` rather than a `&'static Log`. See the documentation for -/// [`set_logger`] for more details. -/// -/// Requires the `std` feature. -/// -/// # Errors -/// -/// An error is returned if a logger has already been set. -/// -/// [`set_logger`]: fn.set_logger.html -#[cfg(all(feature = "std", target_has_atomic = "ptr"))] -pub fn set_boxed_logger(logger: Box) -> Result<(), SetLoggerError> { - set_logger_inner(|| Box::leak(logger)) -} - -/// Sets the global logger to a `&'static Log`. -/// -/// This function may only be called once in the lifetime of a program. Any log -/// events that occur before the call to `set_logger` completes will be ignored. -/// -/// This function does not typically need to be called manually. Logger -/// implementations should provide an initialization method that installs the -/// logger internally. -/// -/// # Availability -/// -/// This method is available even when the `std` feature is disabled. However, -/// it is currently unavailable on `thumbv6` targets, which lack support for -/// some atomic operations which are used by this function. Even on those -/// targets, [`set_logger_racy`] will be available. -/// -/// # Errors -/// -/// An error is returned if a logger has already been set. -/// -/// # Examples -/// -/// ``` -/// use log::{error, info, warn, Record, Level, Metadata, LevelFilter}; -/// -/// static MY_LOGGER: MyLogger = MyLogger; -/// -/// struct MyLogger; -/// -/// impl log::Log for MyLogger { -/// fn enabled(&self, metadata: &Metadata) -> bool { -/// metadata.level() <= Level::Info -/// } -/// -/// fn log(&self, record: &Record) { -/// if self.enabled(record.metadata()) { -/// println!("{} - {}", record.level(), record.args()); -/// } -/// } -/// fn flush(&self) {} -/// } -/// -/// # fn main(){ -/// log::set_logger(&MY_LOGGER).unwrap(); -/// log::set_max_level(LevelFilter::Info); -/// -/// info!("hello log"); -/// warn!("warning"); -/// error!("oops"); -/// # } -/// ``` -/// -/// [`set_logger_racy`]: fn.set_logger_racy.html -#[cfg(target_has_atomic = "ptr")] -pub fn set_logger(logger: &'static dyn Log) -> Result<(), SetLoggerError> { - set_logger_inner(|| logger) -} - -#[cfg(target_has_atomic = "ptr")] -fn set_logger_inner(make_logger: F) -> Result<(), SetLoggerError> -where - F: FnOnce() -> &'static dyn Log, -{ - match STATE.compare_exchange( - UNINITIALIZED, - INITIALIZING, - Ordering::Acquire, - Ordering::Relaxed, - ) { - Ok(UNINITIALIZED) => { - unsafe { - LOGGER = make_logger(); - } - STATE.store(INITIALIZED, Ordering::Release); - Ok(()) - } - Err(INITIALIZING) => { - while STATE.load(Ordering::Relaxed) == INITIALIZING { - std::hint::spin_loop(); - } - Err(SetLoggerError(())) - } - _ => Err(SetLoggerError(())), - } -} - -/// A thread-unsafe version of [`set_logger`]. -/// -/// This function is available on all platforms, even those that do not have -/// support for atomics that is needed by [`set_logger`]. -/// -/// In almost all cases, [`set_logger`] should be preferred. -/// -/// # Safety -/// -/// This function is only safe to call when it cannot race with any other -/// calls to `set_logger` or `set_logger_racy`. -/// -/// This can be upheld by (for example) making sure that **there are no other -/// threads**, and (on embedded) that **interrupts are disabled**. -/// -/// It is safe to use other logging functions while this function runs -/// (including all logging macros). -/// -/// [`set_logger`]: fn.set_logger.html -pub unsafe fn set_logger_racy(logger: &'static dyn Log) -> Result<(), SetLoggerError> { - match STATE.load(Ordering::Acquire) { - UNINITIALIZED => { - LOGGER = logger; - STATE.store(INITIALIZED, Ordering::Release); - Ok(()) - } - INITIALIZING => { - // This is just plain UB, since we were racing another initialization function - unreachable!("set_logger_racy must not be used with other initialization functions") - } - _ => Err(SetLoggerError(())), - } -} - -/// The type returned by [`set_logger`] if [`set_logger`] has already been called. -/// -/// [`set_logger`]: fn.set_logger.html -#[allow(missing_copy_implementations)] -#[derive(Debug)] -pub struct SetLoggerError(()); - -impl fmt::Display for SetLoggerError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str(SET_LOGGER_ERROR) - } -} - -// The Error trait is not available in libcore -#[cfg(feature = "std")] -impl error::Error for SetLoggerError {} - -/// The type returned by [`from_str`] when the string doesn't match any of the log levels. -/// -/// [`from_str`]: https://doc.rust-lang.org/std/str/trait.FromStr.html#tymethod.from_str -#[allow(missing_copy_implementations)] -#[derive(Debug, PartialEq, Eq)] -pub struct ParseLevelError(()); - -impl fmt::Display for ParseLevelError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str(LEVEL_PARSE_ERROR) - } -} - -// The Error trait is not available in libcore -#[cfg(feature = "std")] -impl error::Error for ParseLevelError {} - -/// Returns a reference to the logger. -/// -/// If a logger has not been set, a no-op implementation is returned. -pub fn logger() -> &'static dyn Log { - // Acquire memory ordering guarantees that current thread would see any - // memory writes that happened before store of the value - // into `STATE` with memory ordering `Release` or stronger. - // - // Since the value `INITIALIZED` is written only after `LOGGER` was - // initialized, observing it after `Acquire` load here makes both - // write to the `LOGGER` static and initialization of the logger - // internal state synchronized with current thread. - if STATE.load(Ordering::Acquire) != INITIALIZED { - static NOP: NopLogger = NopLogger; - &NOP - } else { - unsafe { LOGGER } - } -} - -// WARNING: this is not part of the crate's public API and is subject to change at any time -#[doc(hidden)] -pub mod __private_api; - -/// The statically resolved maximum log level. -/// -/// See the crate level documentation for information on how to configure this. -/// -/// This value is checked by the log macros, but not by the `Log`ger returned by -/// the [`logger`] function. Code that manually calls functions on that value -/// should compare the level against this value. -/// -/// [`logger`]: fn.logger.html -pub const STATIC_MAX_LEVEL: LevelFilter = match cfg!(debug_assertions) { - false if cfg!(feature = "release_max_level_off") => LevelFilter::Off, - false if cfg!(feature = "release_max_level_error") => LevelFilter::Error, - false if cfg!(feature = "release_max_level_warn") => LevelFilter::Warn, - false if cfg!(feature = "release_max_level_info") => LevelFilter::Info, - false if cfg!(feature = "release_max_level_debug") => LevelFilter::Debug, - false if cfg!(feature = "release_max_level_trace") => LevelFilter::Trace, - _ if cfg!(feature = "max_level_off") => LevelFilter::Off, - _ if cfg!(feature = "max_level_error") => LevelFilter::Error, - _ if cfg!(feature = "max_level_warn") => LevelFilter::Warn, - _ if cfg!(feature = "max_level_info") => LevelFilter::Info, - _ if cfg!(feature = "max_level_debug") => LevelFilter::Debug, - _ => LevelFilter::Trace, -}; - -#[cfg(test)] -mod tests { - use super::{Level, LevelFilter, ParseLevelError, STATIC_MAX_LEVEL}; - - #[test] - fn test_levelfilter_from_str() { - let tests = [ - ("off", Ok(LevelFilter::Off)), - ("error", Ok(LevelFilter::Error)), - ("warn", Ok(LevelFilter::Warn)), - ("info", Ok(LevelFilter::Info)), - ("debug", Ok(LevelFilter::Debug)), - ("trace", Ok(LevelFilter::Trace)), - ("OFF", Ok(LevelFilter::Off)), - ("ERROR", Ok(LevelFilter::Error)), - ("WARN", Ok(LevelFilter::Warn)), - ("INFO", Ok(LevelFilter::Info)), - ("DEBUG", Ok(LevelFilter::Debug)), - ("TRACE", Ok(LevelFilter::Trace)), - ("asdf", Err(ParseLevelError(()))), - ]; - for &(s, ref expected) in &tests { - assert_eq!(expected, &s.parse()); - } - } - - #[test] - fn test_level_from_str() { - let tests = [ - ("OFF", Err(ParseLevelError(()))), - ("error", Ok(Level::Error)), - ("warn", Ok(Level::Warn)), - ("info", Ok(Level::Info)), - ("debug", Ok(Level::Debug)), - ("trace", Ok(Level::Trace)), - ("ERROR", Ok(Level::Error)), - ("WARN", Ok(Level::Warn)), - ("INFO", Ok(Level::Info)), - ("DEBUG", Ok(Level::Debug)), - ("TRACE", Ok(Level::Trace)), - ("asdf", Err(ParseLevelError(()))), - ]; - for &(s, ref expected) in &tests { - assert_eq!(expected, &s.parse()); - } - } - - #[test] - fn test_level_as_str() { - let tests = &[ - (Level::Error, "ERROR"), - (Level::Warn, "WARN"), - (Level::Info, "INFO"), - (Level::Debug, "DEBUG"), - (Level::Trace, "TRACE"), - ]; - for (input, expected) in tests { - assert_eq!(*expected, input.as_str()); - } - } - - #[test] - fn test_level_show() { - assert_eq!("INFO", Level::Info.to_string()); - assert_eq!("ERROR", Level::Error.to_string()); - } - - #[test] - fn test_levelfilter_show() { - assert_eq!("OFF", LevelFilter::Off.to_string()); - assert_eq!("ERROR", LevelFilter::Error.to_string()); - } - - #[test] - fn test_cross_cmp() { - assert!(Level::Debug > LevelFilter::Error); - assert!(LevelFilter::Warn < Level::Trace); - assert!(LevelFilter::Off < Level::Error); - } - - #[test] - fn test_cross_eq() { - assert!(Level::Error == LevelFilter::Error); - assert!(LevelFilter::Off != Level::Error); - assert!(Level::Trace == LevelFilter::Trace); - } - - #[test] - fn test_to_level() { - assert_eq!(Some(Level::Error), LevelFilter::Error.to_level()); - assert_eq!(None, LevelFilter::Off.to_level()); - assert_eq!(Some(Level::Debug), LevelFilter::Debug.to_level()); - } - - #[test] - fn test_to_level_filter() { - assert_eq!(LevelFilter::Error, Level::Error.to_level_filter()); - assert_eq!(LevelFilter::Trace, Level::Trace.to_level_filter()); - } - - #[test] - fn test_level_filter_as_str() { - let tests = &[ - (LevelFilter::Off, "OFF"), - (LevelFilter::Error, "ERROR"), - (LevelFilter::Warn, "WARN"), - (LevelFilter::Info, "INFO"), - (LevelFilter::Debug, "DEBUG"), - (LevelFilter::Trace, "TRACE"), - ]; - for (input, expected) in tests { - assert_eq!(*expected, input.as_str()); - } - } - - #[test] - fn test_level_up() { - let info = Level::Info; - let up = info.increment_severity(); - assert_eq!(up, Level::Debug); - - let trace = Level::Trace; - let up = trace.increment_severity(); - // trace is already highest level - assert_eq!(up, trace); - } - - #[test] - fn test_level_filter_up() { - let info = LevelFilter::Info; - let up = info.increment_severity(); - assert_eq!(up, LevelFilter::Debug); - - let trace = LevelFilter::Trace; - let up = trace.increment_severity(); - // trace is already highest level - assert_eq!(up, trace); - } - - #[test] - fn test_level_down() { - let info = Level::Info; - let down = info.decrement_severity(); - assert_eq!(down, Level::Warn); - - let error = Level::Error; - let down = error.decrement_severity(); - // error is already lowest level - assert_eq!(down, error); - } - - #[test] - fn test_level_filter_down() { - let info = LevelFilter::Info; - let down = info.decrement_severity(); - assert_eq!(down, LevelFilter::Warn); - - let error = LevelFilter::Error; - let down = error.decrement_severity(); - assert_eq!(down, LevelFilter::Off); - // Off is already the lowest - assert_eq!(down.decrement_severity(), down); - } - - #[test] - #[cfg_attr(not(debug_assertions), ignore)] - fn test_static_max_level_debug() { - if cfg!(feature = "max_level_off") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Off); - } else if cfg!(feature = "max_level_error") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Error); - } else if cfg!(feature = "max_level_warn") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Warn); - } else if cfg!(feature = "max_level_info") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Info); - } else if cfg!(feature = "max_level_debug") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Debug); - } else { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Trace); - } - } - - #[test] - #[cfg_attr(debug_assertions, ignore)] - fn test_static_max_level_release() { - if cfg!(feature = "release_max_level_off") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Off); - } else if cfg!(feature = "release_max_level_error") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Error); - } else if cfg!(feature = "release_max_level_warn") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Warn); - } else if cfg!(feature = "release_max_level_info") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Info); - } else if cfg!(feature = "release_max_level_debug") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Debug); - } else if cfg!(feature = "release_max_level_trace") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Trace); - } else if cfg!(feature = "max_level_off") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Off); - } else if cfg!(feature = "max_level_error") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Error); - } else if cfg!(feature = "max_level_warn") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Warn); - } else if cfg!(feature = "max_level_info") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Info); - } else if cfg!(feature = "max_level_debug") { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Debug); - } else { - assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Trace); - } - } - - #[test] - #[cfg(feature = "std")] - fn test_error_trait() { - use super::SetLoggerError; - let e = SetLoggerError(()); - assert_eq!( - &e.to_string(), - "attempted to set a logger after the logging system \ - was already initialized" - ); - } - - #[test] - fn test_metadata_builder() { - use super::MetadataBuilder; - let target = "myApp"; - let metadata_test = MetadataBuilder::new() - .level(Level::Debug) - .target(target) - .build(); - assert_eq!(metadata_test.level(), Level::Debug); - assert_eq!(metadata_test.target(), "myApp"); - } - - #[test] - fn test_metadata_convenience_builder() { - use super::Metadata; - let target = "myApp"; - let metadata_test = Metadata::builder() - .level(Level::Debug) - .target(target) - .build(); - assert_eq!(metadata_test.level(), Level::Debug); - assert_eq!(metadata_test.target(), "myApp"); - } - - #[test] - fn test_record_builder() { - use super::{MetadataBuilder, RecordBuilder}; - let target = "myApp"; - let metadata = MetadataBuilder::new().target(target).build(); - let fmt_args = format_args!("hello"); - let record_test = RecordBuilder::new() - .args(fmt_args) - .metadata(metadata) - .module_path(Some("foo")) - .file(Some("bar")) - .line(Some(30)) - .build(); - assert_eq!(record_test.metadata().target(), "myApp"); - assert_eq!(record_test.module_path(), Some("foo")); - assert_eq!(record_test.file(), Some("bar")); - assert_eq!(record_test.line(), Some(30)); - } - - #[test] - fn test_record_convenience_builder() { - use super::{Metadata, Record}; - let target = "myApp"; - let metadata = Metadata::builder().target(target).build(); - let fmt_args = format_args!("hello"); - let record_test = Record::builder() - .args(fmt_args) - .metadata(metadata) - .module_path(Some("foo")) - .file(Some("bar")) - .line(Some(30)) - .build(); - assert_eq!(record_test.target(), "myApp"); - assert_eq!(record_test.module_path(), Some("foo")); - assert_eq!(record_test.file(), Some("bar")); - assert_eq!(record_test.line(), Some(30)); - } - - #[test] - fn test_record_complete_builder() { - use super::{Level, Record}; - let target = "myApp"; - let record_test = Record::builder() - .module_path(Some("foo")) - .file(Some("bar")) - .line(Some(30)) - .target(target) - .level(Level::Error) - .build(); - assert_eq!(record_test.target(), "myApp"); - assert_eq!(record_test.level(), Level::Error); - assert_eq!(record_test.module_path(), Some("foo")); - assert_eq!(record_test.file(), Some("bar")); - assert_eq!(record_test.line(), Some(30)); - } - - #[test] - #[cfg(feature = "kv")] - fn test_record_key_values_builder() { - use super::Record; - use crate::kv::{self, VisitSource}; - - struct TestVisitSource { - seen_pairs: usize, - } - - impl<'kvs> VisitSource<'kvs> for TestVisitSource { - fn visit_pair( - &mut self, - _: kv::Key<'kvs>, - _: kv::Value<'kvs>, - ) -> Result<(), kv::Error> { - self.seen_pairs += 1; - Ok(()) - } - } - - let kvs: &[(&str, i32)] = &[("a", 1), ("b", 2)]; - let record_test = Record::builder().key_values(&kvs).build(); - - let mut visitor = TestVisitSource { seen_pairs: 0 }; - - record_test.key_values().visit(&mut visitor).unwrap(); - - assert_eq!(2, visitor.seen_pairs); - } - - #[test] - #[cfg(feature = "kv")] - fn test_record_key_values_get_coerce() { - use super::Record; - - let kvs: &[(&str, &str)] = &[("a", "1"), ("b", "2")]; - let record = Record::builder().key_values(&kvs).build(); - - assert_eq!( - "2", - record - .key_values() - .get("b".into()) - .expect("missing key") - .to_borrowed_str() - .expect("invalid value") - ); - } - - // Test that the `impl Log for Foo` blocks work - // This test mostly operates on a type level, so failures will be compile errors - #[test] - fn test_foreign_impl() { - use super::Log; - #[cfg(feature = "std")] - use std::sync::Arc; - - fn assert_is_log() {} - - assert_is_log::<&dyn Log>(); - - #[cfg(feature = "std")] - assert_is_log::>(); - - #[cfg(feature = "std")] - assert_is_log::>(); - - // Assert these statements for all T: Log + ?Sized - #[allow(unused)] - fn forall() { - #[cfg(feature = "std")] - assert_is_log::>(); - - assert_is_log::<&T>(); - - #[cfg(feature = "std")] - assert_is_log::>(); - } - } -} diff --git a/vendor/log/src/macros.rs b/vendor/log/src/macros.rs deleted file mode 100644 index 14e4ac64ba72a8..00000000000000 --- a/vendor/log/src/macros.rs +++ /dev/null @@ -1,579 +0,0 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/// The standard logging macro. -/// -/// This macro will generically log with the specified `Level` and `format!` -/// based argument list. -/// -/// ``` -/// use log::{log, Level}; -/// -/// let data = (42, "Forty-two"); -/// let private_data = "private"; -/// -/// log!(Level::Error, "Received errors: {}, {}", data.0, data.1); -/// ``` -/// -/// Optionally, you can specify a `target` argument to attach a specific target -/// to the log record. By default, the target is the module path of the caller. -/// -/// ``` -/// use log::{log, Level}; -/// -/// let data = (42, "Forty-two"); -/// let private_data = "private"; -/// -/// log!( -/// target: "app_events", -/// Level::Error, -/// "Received errors: {}, {}", -/// data.0, data.1 -/// ); -/// ``` -/// -/// And optionally, you can specify a `logger` argument to use a specific logger -/// instead of the default global logger. -/// -/// ``` -/// # struct MyLogger {} -/// # impl Log for MyLogger { -/// # fn enabled(&self, _metadata: &log::Metadata) -> bool { -/// # false -/// # } -/// # fn log(&self, _record: &log::Record) {} -/// # fn flush(&self) {} -/// # } -/// use log::{log, Level, Log}; -/// -/// let data = (42, "Forty-two"); -/// let private_data = "private"; -/// -/// let my_logger = MyLogger {}; -/// log!( -/// logger: my_logger, -/// Level::Error, -/// "Received errors: {}, {}", -/// data.0, data.1 -/// ); -/// ``` -/// -/// The `logger` argument accepts a value that implements the `Log` trait. The value -/// will be borrowed within the macro. -/// -/// Note that the global level set via Cargo features, or through `set_max_level` will -/// still apply, even when a custom logger is supplied with the `logger` argument. -#[macro_export] -#[clippy::format_args] -macro_rules! log { - // log!(logger: my_logger, target: "my_target", Level::Info, "a {} event", "log"); - (logger: $logger:expr, target: $target:expr, $lvl:expr, $($arg:tt)+) => ({ - $crate::__log!( - logger: $crate::__log_logger!($logger), - target: $target, - $lvl, - $($arg)+ - ) - }); - - // log!(logger: my_logger, Level::Info, "a log event") - (logger: $logger:expr, $lvl:expr, $($arg:tt)+) => ({ - $crate::__log!( - logger: $crate::__log_logger!($logger), - target: $crate::__private_api::module_path!(), - $lvl, - $($arg)+ - ) - }); - - // log!(target: "my_target", Level::Info, "a log event") - (target: $target:expr, $lvl:expr, $($arg:tt)+) => ({ - $crate::__log!( - logger: $crate::__log_logger!(__log_global_logger), - target: $target, - $lvl, - $($arg)+ - ) - }); - - // log!(Level::Info, "a log event") - ($lvl:expr, $($arg:tt)+) => ({ - $crate::__log!( - logger: $crate::__log_logger!(__log_global_logger), - target: $crate::__private_api::module_path!(), - $lvl, - $($arg)+ - ) - }); -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __log { - // log!(logger: my_logger, target: "my_target", Level::Info, key1:? = 42, key2 = true; "a {} event", "log"); - (logger: $logger:expr, target: $target:expr, $lvl:expr, $($key:tt $(:$capture:tt)? $(= $value:expr)?),+; $($arg:tt)+) => ({ - let lvl = $lvl; - if lvl <= $crate::STATIC_MAX_LEVEL && lvl <= $crate::max_level() { - $crate::__private_api::log( - $logger, - $crate::__private_api::format_args!($($arg)+), - lvl, - &($target, $crate::__private_api::module_path!(), $crate::__private_api::loc()), - &[$(($crate::__log_key!($key), $crate::__log_value!($key $(:$capture)* = $($value)*))),+] as &[_], - ); - } - }); - - // log!(logger: my_logger, target: "my_target", Level::Info, "a {} event", "log"); - (logger: $logger:expr, target: $target:expr, $lvl:expr, $($arg:tt)+) => ({ - let lvl = $lvl; - if lvl <= $crate::STATIC_MAX_LEVEL && lvl <= $crate::max_level() { - $crate::__private_api::log( - $logger, - $crate::__private_api::format_args!($($arg)+), - lvl, - &($target, $crate::__private_api::module_path!(), $crate::__private_api::loc()), - (), - ); - } - }); -} - -/// Logs a message at the error level. -/// -/// # Examples -/// -/// ``` -/// use log::error; -/// -/// # let my_logger = log::__private_api::GlobalLogger; -/// let (err_info, port) = ("No connection", 22); -/// -/// error!("Error: {err_info} on port {port}"); -/// error!(target: "app_events", "App Error: {err_info}, Port: {port}"); -/// error!(logger: my_logger, "App Error: {err_info}, Port: {port}"); -/// ``` -#[macro_export] -#[clippy::format_args] -macro_rules! error { - // error!(logger: my_logger, target: "my_target", key1 = 42, key2 = true; "a {} event", "log") - // error!(logger: my_logger, target: "my_target", "a {} event", "log") - (logger: $logger:expr, target: $target:expr, $($arg:tt)+) => ({ - $crate::log!(logger: $crate::__log_logger!($logger), target: $target, $crate::Level::Error, $($arg)+) - }); - - // error!(logger: my_logger, key1 = 42, key2 = true; "a {} event", "log") - // error!(logger: my_logger, "a {} event", "log") - (logger: $logger:expr, $($arg:tt)+) => ({ - $crate::log!(logger: $crate::__log_logger!($logger), $crate::Level::Error, $($arg)+) - }); - - // error!(target: "my_target", key1 = 42, key2 = true; "a {} event", "log") - // error!(target: "my_target", "a {} event", "log") - (target: $target:expr, $($arg:tt)+) => ({ - $crate::log!(target: $target, $crate::Level::Error, $($arg)+) - }); - - // error!("a {} event", "log") - ($($arg:tt)+) => ($crate::log!($crate::Level::Error, $($arg)+)) -} - -/// Logs a message at the warn level. -/// -/// # Examples -/// -/// ``` -/// use log::warn; -/// -/// # let my_logger = log::__private_api::GlobalLogger; -/// let warn_description = "Invalid Input"; -/// -/// warn!("Warning! {warn_description}!"); -/// warn!(target: "input_events", "App received warning: {warn_description}"); -/// warn!(logger: my_logger, "App received warning: {warn_description}"); -/// ``` -#[macro_export] -#[clippy::format_args] -macro_rules! warn { - // warn!(logger: my_logger, target: "my_target", key1 = 42, key2 = true; "a {} event", "log") - // warn!(logger: my_logger, target: "my_target", "a {} event", "log") - (logger: $logger:expr, target: $target:expr, $($arg:tt)+) => ({ - $crate::log!(logger: $crate::__log_logger!($logger), target: $target, $crate::Level::Warn, $($arg)+) - }); - - // warn!(logger: my_logger, key1 = 42, key2 = true; "a {} event", "log") - // warn!(logger: my_logger, "a {} event", "log") - (logger: $logger:expr, $($arg:tt)+) => ({ - $crate::log!(logger: $crate::__log_logger!($logger), $crate::Level::Warn, $($arg)+) - }); - - // warn!(target: "my_target", key1 = 42, key2 = true; "a {} event", "log") - // warn!(target: "my_target", "a {} event", "log") - (target: $target:expr, $($arg:tt)+) => ({ - $crate::log!(target: $target, $crate::Level::Warn, $($arg)+) - }); - - // warn!("a {} event", "log") - ($($arg:tt)+) => ($crate::log!($crate::Level::Warn, $($arg)+)) -} - -/// Logs a message at the info level. -/// -/// # Examples -/// -/// ``` -/// use log::info; -/// -/// # let my_logger = log::__private_api::GlobalLogger; -/// # struct Connection { port: u32, speed: f32 } -/// let conn_info = Connection { port: 40, speed: 3.20 }; -/// -/// info!("Connected to port {} at {} Mb/s", conn_info.port, conn_info.speed); -/// info!( -/// target: "connection_events", -/// "Successful connection, port: {}, speed: {}", -/// conn_info.port, conn_info.speed -/// ); -/// info!( -/// logger: my_logger, -/// "Successful connection, port: {}, speed: {}", -/// conn_info.port, conn_info.speed -/// ); -/// ``` -#[macro_export] -#[clippy::format_args] -macro_rules! info { - // info!(logger: my_logger, target: "my_target", key1 = 42, key2 = true; "a {} event", "log") - // info!(logger: my_logger, target: "my_target", "a {} event", "log") - (logger: $logger:expr, target: $target:expr, $($arg:tt)+) => ({ - $crate::log!(logger: $crate::__log_logger!($logger), target: $target, $crate::Level::Info, $($arg)+) - }); - - // info!(logger: my_logger, key1 = 42, key2 = true; "a {} event", "log") - // info!(logger: my_logger, "a {} event", "log") - (logger: $logger:expr, $($arg:tt)+) => ({ - $crate::log!(logger: $crate::__log_logger!($logger), $crate::Level::Info, $($arg)+) - }); - - // info!(target: "my_target", key1 = 42, key2 = true; "a {} event", "log") - // info!(target: "my_target", "a {} event", "log") - (target: $target:expr, $($arg:tt)+) => ({ - $crate::log!(target: $target, $crate::Level::Info, $($arg)+) - }); - - // info!("a {} event", "log") - ($($arg:tt)+) => ($crate::log!($crate::Level::Info, $($arg)+)) -} - -/// Logs a message at the debug level. -/// -/// # Examples -/// -/// ``` -/// use log::debug; -/// -/// # let my_logger = log::__private_api::GlobalLogger; -/// # struct Position { x: f32, y: f32 } -/// let pos = Position { x: 3.234, y: -1.223 }; -/// -/// debug!("New position: x: {}, y: {}", pos.x, pos.y); -/// debug!(target: "app_events", "New position: x: {}, y: {}", pos.x, pos.y); -/// debug!(logger: my_logger, "New position: x: {}, y: {}", pos.x, pos.y); -/// ``` -#[macro_export] -#[clippy::format_args] -macro_rules! debug { - // debug!(logger: my_logger, target: "my_target", key1 = 42, key2 = true; "a {} event", "log") - // debug!(logger: my_logger, target: "my_target", "a {} event", "log") - (logger: $logger:expr, target: $target:expr, $($arg:tt)+) => ({ - $crate::log!(logger: $crate::__log_logger!($logger), target: $target, $crate::Level::Debug, $($arg)+) - }); - - // debug!(logger: my_logger, key1 = 42, key2 = true; "a {} event", "log") - // debug!(logger: my_logger, "a {} event", "log") - (logger: $logger:expr, $($arg:tt)+) => ({ - $crate::log!(logger: $crate::__log_logger!($logger), $crate::Level::Debug, $($arg)+) - }); - - // debug!(target: "my_target", key1 = 42, key2 = true; "a {} event", "log") - // debug!(target: "my_target", "a {} event", "log") - (target: $target:expr, $($arg:tt)+) => ({ - $crate::log!(target: $target, $crate::Level::Debug, $($arg)+) - }); - - // debug!("a {} event", "log") - ($($arg:tt)+) => ($crate::log!($crate::Level::Debug, $($arg)+)) -} - -/// Logs a message at the trace level. -/// -/// # Examples -/// -/// ``` -/// use log::trace; -/// -/// # let my_logger = log::__private_api::GlobalLogger; -/// # struct Position { x: f32, y: f32 } -/// let pos = Position { x: 3.234, y: -1.223 }; -/// -/// trace!("Position is: x: {}, y: {}", pos.x, pos.y); -/// trace!(target: "app_events", "x is {} and y is {}", -/// if pos.x >= 0.0 { "positive" } else { "negative" }, -/// if pos.y >= 0.0 { "positive" } else { "negative" }); -/// trace!(logger: my_logger, "x is {} and y is {}", -/// if pos.x >= 0.0 { "positive" } else { "negative" }, -/// if pos.y >= 0.0 { "positive" } else { "negative" }); -/// ``` -#[macro_export] -#[clippy::format_args] -macro_rules! trace { - // trace!(logger: my_logger, target: "my_target", key1 = 42, key2 = true; "a {} event", "log") - // trace!(logger: my_logger, target: "my_target", "a {} event", "log") - (logger: $logger:expr, target: $target:expr, $($arg:tt)+) => ({ - $crate::log!(logger: $crate::__log_logger!($logger), target: $target, $crate::Level::Trace, $($arg)+) - }); - - // trace!(logger: my_logger, key1 = 42, key2 = true; "a {} event", "log") - // trace!(logger: my_logger, "a {} event", "log") - (logger: $logger:expr, $($arg:tt)+) => ({ - $crate::log!(logger: $crate::__log_logger!($logger), $crate::Level::Trace, $($arg)+) - }); - - // trace!(target: "my_target", key1 = 42, key2 = true; "a {} event", "log") - // trace!(target: "my_target", "a {} event", "log") - (target: $target:expr, $($arg:tt)+) => ({ - $crate::log!(target: $target, $crate::Level::Trace, $($arg)+) - }); - - // trace!("a {} event", "log") - ($($arg:tt)+) => ($crate::log!($crate::Level::Trace, $($arg)+)) -} - -/// Determines if a message logged at the specified level in that module will -/// be logged. -/// -/// This can be used to avoid expensive computation of log message arguments if -/// the message would be ignored anyway. -/// -/// # Examples -/// -/// ``` -/// use log::{debug, log_enabled, Level}; -/// -/// # struct Data { x: u32, y: u32 } -/// # fn expensive_call() -> Data { Data { x: 0, y: 0 } } -/// # let my_logger = log::__private_api::GlobalLogger; -/// if log_enabled!(Level::Debug) { -/// let data = expensive_call(); -/// debug!("expensive debug data: {} {}", data.x, data.y); -/// } -/// -/// if log_enabled!(target: "Global", Level::Debug) { -/// let data = expensive_call(); -/// debug!(target: "Global", "expensive debug data: {} {}", data.x, data.y); -/// } -/// -/// if log_enabled!(logger: my_logger, Level::Debug) { -/// let data = expensive_call(); -/// debug!(target: "Global", "expensive debug data: {} {}", data.x, data.y); -/// } -/// ``` -/// -/// This macro accepts the same `target` and `logger` arguments as [`macro@log`]. -#[macro_export] -macro_rules! log_enabled { - // log_enabled!(logger: my_logger, target: "my_target", Level::Info) - (logger: $logger:expr, target: $target:expr, $lvl:expr) => ({ - $crate::__log_enabled!(logger: $crate::__log_logger!($logger), target: $target, $lvl) - }); - - // log_enabled!(logger: my_logger, Level::Info) - (logger: $logger:expr, $lvl:expr) => ({ - $crate::__log_enabled!(logger: $crate::__log_logger!($logger), target: $crate::__private_api::module_path!(), $lvl) - }); - - // log_enabled!(target: "my_target", Level::Info) - (target: $target:expr, $lvl:expr) => ({ - $crate::__log_enabled!(logger: $crate::__log_logger!(__log_global_logger), target: $target, $lvl) - }); - - // log_enabled!(Level::Info) - ($lvl:expr) => ({ - $crate::__log_enabled!(logger: $crate::__log_logger!(__log_global_logger), target: $crate::__private_api::module_path!(), $lvl) - }); -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __log_enabled { - // log_enabled!(logger: my_logger, target: "my_target", Level::Info) - (logger: $logger:expr, target: $target:expr, $lvl:expr) => {{ - let lvl = $lvl; - lvl <= $crate::STATIC_MAX_LEVEL - && lvl <= $crate::max_level() - && $crate::__private_api::enabled($logger, lvl, $target) - }}; -} - -// Determine the logger to use, and whether to take it by-value or by reference - -#[doc(hidden)] -#[macro_export] -macro_rules! __log_logger { - (__log_global_logger) => {{ - $crate::__private_api::GlobalLogger - }}; - - ($logger:expr) => {{ - &($logger) - }}; -} - -// These macros use a pattern of #[cfg]s to produce nicer error -// messages when log features aren't available - -#[doc(hidden)] -#[macro_export] -#[cfg(feature = "kv")] -macro_rules! __log_key { - // key1 = 42 - ($($args:ident)*) => { - $crate::__private_api::stringify!($($args)*) - }; - // "key1" = 42 - ($($args:expr)*) => { - $($args)* - }; -} - -#[doc(hidden)] -#[macro_export] -#[cfg(not(feature = "kv"))] -macro_rules! __log_key { - ($($args:tt)*) => { - compile_error!("key value support requires the `kv` feature of `log`") - }; -} - -#[doc(hidden)] -#[macro_export] -#[cfg(feature = "kv")] -macro_rules! __log_value { - // Entrypoint - ($key:tt = $args:expr) => { - $crate::__log_value!(($args):value) - }; - ($key:tt :$capture:tt = $args:expr) => { - $crate::__log_value!(($args):$capture) - }; - ($key:ident =) => { - $crate::__log_value!(($key):value) - }; - ($key:ident :$capture:tt =) => { - $crate::__log_value!(($key):$capture) - }; - // ToValue - (($args:expr):value) => { - $crate::__private_api::capture_to_value(&&$args) - }; - // Debug - (($args:expr):?) => { - $crate::__private_api::capture_debug(&&$args) - }; - (($args:expr):debug) => { - $crate::__private_api::capture_debug(&&$args) - }; - // Display - (($args:expr):%) => { - $crate::__private_api::capture_display(&&$args) - }; - (($args:expr):display) => { - $crate::__private_api::capture_display(&&$args) - }; - //Error - (($args:expr):err) => { - $crate::__log_value_error!($args) - }; - // sval::Value - (($args:expr):sval) => { - $crate::__log_value_sval!($args) - }; - // serde::Serialize - (($args:expr):serde) => { - $crate::__log_value_serde!($args) - }; -} - -#[doc(hidden)] -#[macro_export] -#[cfg(not(feature = "kv"))] -macro_rules! __log_value { - ($($args:tt)*) => { - compile_error!("key value support requires the `kv` feature of `log`") - }; -} - -#[doc(hidden)] -#[macro_export] -#[cfg(feature = "kv_sval")] -macro_rules! __log_value_sval { - ($args:expr) => { - $crate::__private_api::capture_sval(&&$args) - }; -} - -#[doc(hidden)] -#[macro_export] -#[cfg(not(feature = "kv_sval"))] -macro_rules! __log_value_sval { - ($args:expr) => { - compile_error!("capturing values as `sval::Value` requites the `kv_sval` feature of `log`") - }; -} - -#[doc(hidden)] -#[macro_export] -#[cfg(feature = "kv_serde")] -macro_rules! __log_value_serde { - ($args:expr) => { - $crate::__private_api::capture_serde(&&$args) - }; -} - -#[doc(hidden)] -#[macro_export] -#[cfg(not(feature = "kv_serde"))] -macro_rules! __log_value_serde { - ($args:expr) => { - compile_error!( - "capturing values as `serde::Serialize` requites the `kv_serde` feature of `log`" - ) - }; -} - -#[doc(hidden)] -#[macro_export] -#[cfg(feature = "kv_std")] -macro_rules! __log_value_error { - ($args:expr) => { - $crate::__private_api::capture_error(&$args) - }; -} - -#[doc(hidden)] -#[macro_export] -#[cfg(not(feature = "kv_std"))] -macro_rules! __log_value_error { - ($args:expr) => { - compile_error!( - "capturing values as `std::error::Error` requites the `kv_std` feature of `log`" - ) - }; -} diff --git a/vendor/log/src/serde.rs b/vendor/log/src/serde.rs deleted file mode 100644 index db732395bd1497..00000000000000 --- a/vendor/log/src/serde.rs +++ /dev/null @@ -1,397 +0,0 @@ -#![cfg(feature = "serde")] - -use serde::de::{ - Deserialize, DeserializeSeed, Deserializer, EnumAccess, Error, Unexpected, VariantAccess, - Visitor, -}; -use serde::ser::{Serialize, Serializer}; - -use crate::{Level, LevelFilter, LOG_LEVEL_NAMES}; - -use std::fmt; -use std::str::{self, FromStr}; - -// The Deserialize impls are handwritten to be case-insensitive using FromStr. - -impl Serialize for Level { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - match *self { - Level::Error => serializer.serialize_unit_variant("Level", 0, "ERROR"), - Level::Warn => serializer.serialize_unit_variant("Level", 1, "WARN"), - Level::Info => serializer.serialize_unit_variant("Level", 2, "INFO"), - Level::Debug => serializer.serialize_unit_variant("Level", 3, "DEBUG"), - Level::Trace => serializer.serialize_unit_variant("Level", 4, "TRACE"), - } - } -} - -impl<'de> Deserialize<'de> for Level { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - struct LevelIdentifier; - - impl<'de> Visitor<'de> for LevelIdentifier { - type Value = Level; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("log level") - } - - fn visit_u64(self, v: u64) -> Result - where - E: Error, - { - let variant = LOG_LEVEL_NAMES[1..] - .get(v as usize) - .ok_or_else(|| Error::invalid_value(Unexpected::Unsigned(v), &self))?; - - self.visit_str(variant) - } - - fn visit_str(self, s: &str) -> Result - where - E: Error, - { - // Case-insensitive. - FromStr::from_str(s).map_err(|_| Error::unknown_variant(s, &LOG_LEVEL_NAMES[1..])) - } - - fn visit_bytes(self, value: &[u8]) -> Result - where - E: Error, - { - let variant = str::from_utf8(value) - .map_err(|_| Error::invalid_value(Unexpected::Bytes(value), &self))?; - - self.visit_str(variant) - } - } - - impl<'de> DeserializeSeed<'de> for LevelIdentifier { - type Value = Level; - - fn deserialize(self, deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_identifier(LevelIdentifier) - } - } - - struct LevelEnum; - - impl<'de> Visitor<'de> for LevelEnum { - type Value = Level; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("log level") - } - - fn visit_enum(self, value: A) -> Result - where - A: EnumAccess<'de>, - { - let (level, variant) = value.variant_seed(LevelIdentifier)?; - // Every variant is a unit variant. - variant.unit_variant()?; - Ok(level) - } - } - - deserializer.deserialize_enum("Level", &LOG_LEVEL_NAMES[1..], LevelEnum) - } -} - -impl Serialize for LevelFilter { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - match *self { - LevelFilter::Off => serializer.serialize_unit_variant("LevelFilter", 0, "OFF"), - LevelFilter::Error => serializer.serialize_unit_variant("LevelFilter", 1, "ERROR"), - LevelFilter::Warn => serializer.serialize_unit_variant("LevelFilter", 2, "WARN"), - LevelFilter::Info => serializer.serialize_unit_variant("LevelFilter", 3, "INFO"), - LevelFilter::Debug => serializer.serialize_unit_variant("LevelFilter", 4, "DEBUG"), - LevelFilter::Trace => serializer.serialize_unit_variant("LevelFilter", 5, "TRACE"), - } - } -} - -impl<'de> Deserialize<'de> for LevelFilter { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - struct LevelFilterIdentifier; - - impl<'de> Visitor<'de> for LevelFilterIdentifier { - type Value = LevelFilter; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("log level filter") - } - - fn visit_u64(self, v: u64) -> Result - where - E: Error, - { - let variant = LOG_LEVEL_NAMES - .get(v as usize) - .ok_or_else(|| Error::invalid_value(Unexpected::Unsigned(v), &self))?; - - self.visit_str(variant) - } - - fn visit_str(self, s: &str) -> Result - where - E: Error, - { - // Case-insensitive. - FromStr::from_str(s).map_err(|_| Error::unknown_variant(s, &LOG_LEVEL_NAMES)) - } - - fn visit_bytes(self, value: &[u8]) -> Result - where - E: Error, - { - let variant = str::from_utf8(value) - .map_err(|_| Error::invalid_value(Unexpected::Bytes(value), &self))?; - - self.visit_str(variant) - } - } - - impl<'de> DeserializeSeed<'de> for LevelFilterIdentifier { - type Value = LevelFilter; - - fn deserialize(self, deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_identifier(LevelFilterIdentifier) - } - } - - struct LevelFilterEnum; - - impl<'de> Visitor<'de> for LevelFilterEnum { - type Value = LevelFilter; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("log level filter") - } - - fn visit_enum(self, value: A) -> Result - where - A: EnumAccess<'de>, - { - let (level_filter, variant) = value.variant_seed(LevelFilterIdentifier)?; - // Every variant is a unit variant. - variant.unit_variant()?; - Ok(level_filter) - } - } - - deserializer.deserialize_enum("LevelFilter", &LOG_LEVEL_NAMES, LevelFilterEnum) - } -} - -#[cfg(test)] -mod tests { - use crate::{Level, LevelFilter}; - use serde_test::{assert_de_tokens, assert_de_tokens_error, assert_tokens, Token}; - - fn level_token(variant: &'static str) -> Token { - Token::UnitVariant { - name: "Level", - variant, - } - } - - fn level_bytes_tokens(variant: &'static [u8]) -> [Token; 3] { - [ - Token::Enum { name: "Level" }, - Token::Bytes(variant), - Token::Unit, - ] - } - - fn level_variant_tokens(variant: u32) -> [Token; 3] { - [ - Token::Enum { name: "Level" }, - Token::U32(variant), - Token::Unit, - ] - } - - fn level_filter_token(variant: &'static str) -> Token { - Token::UnitVariant { - name: "LevelFilter", - variant, - } - } - - fn level_filter_bytes_tokens(variant: &'static [u8]) -> [Token; 3] { - [ - Token::Enum { - name: "LevelFilter", - }, - Token::Bytes(variant), - Token::Unit, - ] - } - - fn level_filter_variant_tokens(variant: u32) -> [Token; 3] { - [ - Token::Enum { - name: "LevelFilter", - }, - Token::U32(variant), - Token::Unit, - ] - } - - #[test] - fn test_level_ser_de() { - let cases = &[ - (Level::Error, [level_token("ERROR")]), - (Level::Warn, [level_token("WARN")]), - (Level::Info, [level_token("INFO")]), - (Level::Debug, [level_token("DEBUG")]), - (Level::Trace, [level_token("TRACE")]), - ]; - - for (s, expected) in cases { - assert_tokens(s, expected); - } - } - - #[test] - fn test_level_case_insensitive() { - let cases = &[ - (Level::Error, [level_token("error")]), - (Level::Warn, [level_token("warn")]), - (Level::Info, [level_token("info")]), - (Level::Debug, [level_token("debug")]), - (Level::Trace, [level_token("trace")]), - ]; - - for (s, expected) in cases { - assert_de_tokens(s, expected); - } - } - - #[test] - fn test_level_de_bytes() { - let cases = &[ - (Level::Error, level_bytes_tokens(b"ERROR")), - (Level::Warn, level_bytes_tokens(b"WARN")), - (Level::Info, level_bytes_tokens(b"INFO")), - (Level::Debug, level_bytes_tokens(b"DEBUG")), - (Level::Trace, level_bytes_tokens(b"TRACE")), - ]; - - for (value, tokens) in cases { - assert_de_tokens(value, tokens); - } - } - - #[test] - fn test_level_de_variant_index() { - let cases = &[ - (Level::Error, level_variant_tokens(0)), - (Level::Warn, level_variant_tokens(1)), - (Level::Info, level_variant_tokens(2)), - (Level::Debug, level_variant_tokens(3)), - (Level::Trace, level_variant_tokens(4)), - ]; - - for (value, tokens) in cases { - assert_de_tokens(value, tokens); - } - } - - #[test] - fn test_level_de_error() { - let msg = "unknown variant `errorx`, expected one of \ - `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`"; - assert_de_tokens_error::(&[level_token("errorx")], msg); - } - - #[test] - fn test_level_filter_ser_de() { - let cases = &[ - (LevelFilter::Off, [level_filter_token("OFF")]), - (LevelFilter::Error, [level_filter_token("ERROR")]), - (LevelFilter::Warn, [level_filter_token("WARN")]), - (LevelFilter::Info, [level_filter_token("INFO")]), - (LevelFilter::Debug, [level_filter_token("DEBUG")]), - (LevelFilter::Trace, [level_filter_token("TRACE")]), - ]; - - for (s, expected) in cases { - assert_tokens(s, expected); - } - } - - #[test] - fn test_level_filter_case_insensitive() { - let cases = &[ - (LevelFilter::Off, [level_filter_token("off")]), - (LevelFilter::Error, [level_filter_token("error")]), - (LevelFilter::Warn, [level_filter_token("warn")]), - (LevelFilter::Info, [level_filter_token("info")]), - (LevelFilter::Debug, [level_filter_token("debug")]), - (LevelFilter::Trace, [level_filter_token("trace")]), - ]; - - for (s, expected) in cases { - assert_de_tokens(s, expected); - } - } - - #[test] - fn test_level_filter_de_bytes() { - let cases = &[ - (LevelFilter::Off, level_filter_bytes_tokens(b"OFF")), - (LevelFilter::Error, level_filter_bytes_tokens(b"ERROR")), - (LevelFilter::Warn, level_filter_bytes_tokens(b"WARN")), - (LevelFilter::Info, level_filter_bytes_tokens(b"INFO")), - (LevelFilter::Debug, level_filter_bytes_tokens(b"DEBUG")), - (LevelFilter::Trace, level_filter_bytes_tokens(b"TRACE")), - ]; - - for (value, tokens) in cases { - assert_de_tokens(value, tokens); - } - } - - #[test] - fn test_level_filter_de_variant_index() { - let cases = &[ - (LevelFilter::Off, level_filter_variant_tokens(0)), - (LevelFilter::Error, level_filter_variant_tokens(1)), - (LevelFilter::Warn, level_filter_variant_tokens(2)), - (LevelFilter::Info, level_filter_variant_tokens(3)), - (LevelFilter::Debug, level_filter_variant_tokens(4)), - (LevelFilter::Trace, level_filter_variant_tokens(5)), - ]; - - for (value, tokens) in cases { - assert_de_tokens(value, tokens); - } - } - - #[test] - fn test_level_filter_de_error() { - let msg = "unknown variant `errorx`, expected one of \ - `OFF`, `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`"; - assert_de_tokens_error::(&[level_filter_token("errorx")], msg); - } -} diff --git a/vendor/log/tests/integration.rs b/vendor/log/tests/integration.rs deleted file mode 100644 index 9bcb0469787984..00000000000000 --- a/vendor/log/tests/integration.rs +++ /dev/null @@ -1,101 +0,0 @@ -#![allow(dead_code, unused_imports)] - -use log::{debug, error, info, trace, warn, Level, LevelFilter, Log, Metadata, Record}; -use std::sync::{Arc, Mutex}; - -struct State { - last_log_level: Mutex>, - last_log_location: Mutex>, -} - -struct Logger(Arc); - -impl Log for Logger { - fn enabled(&self, _: &Metadata) -> bool { - true - } - - fn log(&self, record: &Record) { - *self.0.last_log_level.lock().unwrap() = Some(record.level()); - *self.0.last_log_location.lock().unwrap() = record.line(); - } - fn flush(&self) {} -} - -#[test] -fn test_integration() { - // These tests don't really make sense when static - // max level filtering is applied - #[cfg(not(any( - feature = "max_level_off", - feature = "max_level_error", - feature = "max_level_warn", - feature = "max_level_info", - feature = "max_level_debug", - feature = "max_level_trace", - feature = "release_max_level_off", - feature = "release_max_level_error", - feature = "release_max_level_warn", - feature = "release_max_level_info", - feature = "release_max_level_debug", - feature = "release_max_level_trace", - )))] - { - let me = Arc::new(State { - last_log_level: Mutex::new(None), - last_log_location: Mutex::new(None), - }); - let a = me.clone(); - let logger = Logger(me); - - test_filter(&logger, &a, LevelFilter::Off); - test_filter(&logger, &a, LevelFilter::Error); - test_filter(&logger, &a, LevelFilter::Warn); - test_filter(&logger, &a, LevelFilter::Info); - test_filter(&logger, &a, LevelFilter::Debug); - test_filter(&logger, &a, LevelFilter::Trace); - - test_line_numbers(&logger, &a); - } -} - -fn test_filter(logger: &dyn Log, a: &State, filter: LevelFilter) { - // tests to ensure logs with a level beneath 'max_level' are filtered out - log::set_max_level(filter); - error!(logger: logger, ""); - last(a, t(Level::Error, filter)); - warn!(logger: logger, ""); - last(a, t(Level::Warn, filter)); - info!(logger: logger, ""); - last(a, t(Level::Info, filter)); - debug!(logger: logger, ""); - last(a, t(Level::Debug, filter)); - trace!(logger: logger, ""); - last(a, t(Level::Trace, filter)); - - fn t(lvl: Level, filter: LevelFilter) -> Option { - if lvl <= filter { - Some(lvl) - } else { - None - } - } - fn last(state: &State, expected: Option) { - let lvl = state.last_log_level.lock().unwrap().take(); - assert_eq!(lvl, expected); - } -} - -fn test_line_numbers(logger: &dyn Log, state: &State) { - log::set_max_level(LevelFilter::Trace); - - info!(logger: logger, ""); // ensure check_line function follows log macro - check_log_location(state); - - #[track_caller] - fn check_log_location(state: &State) { - let location = std::panic::Location::caller().line(); // get function calling location - let line_number = state.last_log_location.lock().unwrap().take().unwrap(); // get location of most recent log - assert_eq!(line_number, location - 1); - } -} diff --git a/vendor/log/tests/macros.rs b/vendor/log/tests/macros.rs deleted file mode 100644 index dded475c1c082d..00000000000000 --- a/vendor/log/tests/macros.rs +++ /dev/null @@ -1,429 +0,0 @@ -use log::{log, log_enabled, Log, Metadata, Record}; - -macro_rules! all_log_macros { - ($($arg:tt)*) => ({ - ::log::trace!($($arg)*); - ::log::debug!($($arg)*); - ::log::info!($($arg)*); - ::log::warn!($($arg)*); - ::log::error!($($arg)*); - }); -} - -// Not `Copy` -struct Logger; - -impl Log for Logger { - fn enabled(&self, _: &Metadata) -> bool { - false - } - fn log(&self, _: &Record) {} - fn flush(&self) {} -} - -#[test] -fn no_args() { - let logger = Logger; - - for lvl in log::Level::iter() { - log!(lvl, "hello"); - log!(lvl, "hello",); - - log!(target: "my_target", lvl, "hello"); - log!(target: "my_target", lvl, "hello",); - - log!(logger: logger, lvl, "hello"); - log!(logger: logger, lvl, "hello",); - - log!(logger: logger, target: "my_target", lvl, "hello"); - log!(logger: logger, target: "my_target", lvl, "hello",); - } - - all_log_macros!("hello"); - all_log_macros!("hello",); - - all_log_macros!(target: "my_target", "hello"); - all_log_macros!(target: "my_target", "hello",); - - all_log_macros!(logger: logger, "hello"); - all_log_macros!(logger: logger, "hello",); - - all_log_macros!(logger: logger, target: "my_target", "hello"); - all_log_macros!(logger: logger, target: "my_target", "hello",); -} - -#[test] -fn anonymous_args() { - for lvl in log::Level::iter() { - log!(lvl, "hello {}", "world"); - log!(lvl, "hello {}", "world",); - - log!(target: "my_target", lvl, "hello {}", "world"); - log!(target: "my_target", lvl, "hello {}", "world",); - - log!(lvl, "hello {}", "world"); - log!(lvl, "hello {}", "world",); - } - - all_log_macros!("hello {}", "world"); - all_log_macros!("hello {}", "world",); - - all_log_macros!(target: "my_target", "hello {}", "world"); - all_log_macros!(target: "my_target", "hello {}", "world",); - - let logger = Logger; - - all_log_macros!(logger: logger, "hello {}", "world"); - all_log_macros!(logger: logger, "hello {}", "world",); - - all_log_macros!(logger: logger, target: "my_target", "hello {}", "world"); - all_log_macros!(logger: logger, target: "my_target", "hello {}", "world",); -} - -#[test] -fn named_args() { - for lvl in log::Level::iter() { - log!(lvl, "hello {world}", world = "world"); - log!(lvl, "hello {world}", world = "world",); - - log!(target: "my_target", lvl, "hello {world}", world = "world"); - log!(target: "my_target", lvl, "hello {world}", world = "world",); - - log!(lvl, "hello {world}", world = "world"); - log!(lvl, "hello {world}", world = "world",); - } - - all_log_macros!("hello {world}", world = "world"); - all_log_macros!("hello {world}", world = "world",); - - all_log_macros!(target: "my_target", "hello {world}", world = "world"); - all_log_macros!(target: "my_target", "hello {world}", world = "world",); - - let logger = Logger; - - all_log_macros!(logger: logger, "hello {world}", world = "world"); - all_log_macros!(logger: logger, "hello {world}", world = "world",); - - all_log_macros!(logger: logger, target: "my_target", "hello {world}", world = "world"); - all_log_macros!(logger: logger, target: "my_target", "hello {world}", world = "world",); -} - -#[test] -fn inlined_args() { - let world = "world"; - - for lvl in log::Level::iter() { - log!(lvl, "hello {world}"); - log!(lvl, "hello {world}",); - - log!(target: "my_target", lvl, "hello {world}"); - log!(target: "my_target", lvl, "hello {world}",); - - log!(lvl, "hello {world}"); - log!(lvl, "hello {world}",); - } - - all_log_macros!("hello {world}"); - all_log_macros!("hello {world}",); - - all_log_macros!(target: "my_target", "hello {world}"); - all_log_macros!(target: "my_target", "hello {world}",); - - let logger = Logger; - - all_log_macros!(logger: logger, "hello {world}"); - all_log_macros!(logger: logger, "hello {world}",); - - all_log_macros!(logger: logger, target: "my_target", "hello {world}"); - all_log_macros!(logger: logger, target: "my_target", "hello {world}",); -} - -#[test] -fn enabled() { - let logger = Logger; - - for lvl in log::Level::iter() { - let _enabled = log_enabled!(lvl); - let _enabled = log_enabled!(target: "my_target", lvl); - let _enabled = log_enabled!(logger: logger, target: "my_target", lvl); - let _enabled = log_enabled!(logger: logger, lvl); - } -} - -#[test] -fn expr() { - let logger = Logger; - - for lvl in log::Level::iter() { - log!(lvl, "hello"); - - log!(logger: logger, lvl, "hello"); - } -} - -#[test] -#[cfg(feature = "kv")] -fn kv_no_args() { - let logger = Logger; - - for lvl in log::Level::iter() { - log!(target: "my_target", lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); - log!(lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); - - log!(logger: logger, target: "my_target", lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); - log!(logger: logger, lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); - } - - all_log_macros!(target: "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); - all_log_macros!(target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); - all_log_macros!(cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); - - all_log_macros!(logger: logger, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); - all_log_macros!(logger: logger, target: "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); -} - -#[test] -#[cfg(feature = "kv")] -fn kv_expr_args() { - let logger = Logger; - - for lvl in log::Level::iter() { - log!(target: "my_target", lvl, cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); - - log!(lvl, target = "my_target", cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); - log!(lvl, cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); - - log!(logger: logger, target: "my_target", lvl, cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); - - log!(logger: logger, lvl, target = "my_target", cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); - log!(logger: logger, lvl, cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); - } - - all_log_macros!(target: "my_target", cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); - all_log_macros!(target = "my_target", cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); - all_log_macros!(cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); - - all_log_macros!(logger: logger, target: "my_target", cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); - all_log_macros!(logger: logger, target = "my_target", cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); - all_log_macros!(logger: logger, cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); -} - -#[test] -#[cfg(feature = "kv")] -fn kv_anonymous_args() { - let logger = Logger; - - for lvl in log::Level::iter() { - log!(target: "my_target", lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); - log!(lvl, target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); - - log!(lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); - - log!(logger: logger, target: "my_target", lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); - log!(logger: logger, lvl, target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); - - log!(logger: logger, lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); - } - - all_log_macros!(target: "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); - all_log_macros!(target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); - all_log_macros!(cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); - - all_log_macros!(logger: logger, target: "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); - all_log_macros!(logger: logger, target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); - all_log_macros!(logger: logger, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); -} - -#[test] -#[cfg(feature = "kv")] -fn kv_named_args() { - let logger = Logger; - - for lvl in log::Level::iter() { - log!(target: "my_target", lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); - log!(lvl, target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); - - log!(lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); - - log!(logger: logger, target: "my_target", lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); - log!(logger: logger, lvl, target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); - - log!(logger: logger, lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); - } - - all_log_macros!(target: "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); - all_log_macros!(target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); - all_log_macros!(cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); - - all_log_macros!(logger: logger, target: "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); - all_log_macros!(logger: logger, target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); - all_log_macros!(logger: logger, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); -} - -#[test] -#[cfg(feature = "kv")] -fn kv_ident() { - let cat_1 = "chashu"; - let cat_2 = "nori"; - - all_log_macros!(cat_1, cat_2:%, cat_count = 2; "hello {world}", world = "world"); -} - -#[test] -#[cfg(feature = "kv")] -fn kv_expr_context() { - match "chashu" { - cat_1 => { - log::info!(target: "target", cat_1 = cat_1, cat_2 = "nori"; "hello {}", "cats"); - } - }; -} - -#[test] -fn implicit_named_args() { - let world = "world"; - - for lvl in log::Level::iter() { - log!(lvl, "hello {world}"); - log!(lvl, "hello {world}",); - - log!(target: "my_target", lvl, "hello {world}"); - log!(target: "my_target", lvl, "hello {world}",); - - log!(lvl, "hello {world}"); - log!(lvl, "hello {world}",); - } - - all_log_macros!("hello {world}"); - all_log_macros!("hello {world}",); - - all_log_macros!(target: "my_target", "hello {world}"); - all_log_macros!(target: "my_target", "hello {world}",); - - #[cfg(feature = "kv")] - all_log_macros!(target = "my_target"; "hello {world}"); - #[cfg(feature = "kv")] - all_log_macros!(target = "my_target"; "hello {world}",); -} - -#[test] -#[cfg(feature = "kv")] -fn kv_implicit_named_args() { - let world = "world"; - - for lvl in log::Level::iter() { - log!(target: "my_target", lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}"); - - log!(lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}"); - } - - all_log_macros!(target: "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}"); - all_log_macros!(target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}"); - all_log_macros!(cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}"); -} - -#[test] -#[cfg(feature = "kv")] -fn kv_string_keys() { - for lvl in log::Level::iter() { - log!(target: "my_target", lvl, "also dogs" = "Fílos", "key/that-can't/be/an/ident" = "hi"; "hello {world}", world = "world"); - } - - all_log_macros!(target: "my_target", "also dogs" = "Fílos", "key/that-can't/be/an/ident" = "hi"; "hello {world}", world = "world"); -} - -#[test] -#[cfg(feature = "kv")] -fn kv_common_value_types() { - all_log_macros!( - u8 = 42u8, - u16 = 42u16, - u32 = 42u32, - u64 = 42u64, - u128 = 42u128, - i8 = -42i8, - i16 = -42i16, - i32 = -42i32, - i64 = -42i64, - i128 = -42i128, - f32 = 4.2f32, - f64 = -4.2f64, - bool = true, - str = "string"; - "hello world" - ); -} - -#[test] -#[cfg(feature = "kv")] -fn kv_debug() { - all_log_macros!( - a:? = 42, - b:debug = 42; - "hello world" - ); -} - -#[test] -#[cfg(feature = "kv")] -fn kv_display() { - all_log_macros!( - a:% = 42, - b:display = 42; - "hello world" - ); -} - -#[test] -#[cfg(feature = "kv_std")] -fn kv_error() { - all_log_macros!( - a:err = std::io::Error::new(std::io::ErrorKind::Other, "an error"); - "hello world" - ); -} - -#[test] -#[cfg(feature = "kv_sval")] -fn kv_sval() { - all_log_macros!( - a:sval = 42; - "hello world" - ); -} - -#[test] -#[cfg(feature = "kv_serde")] -fn kv_serde() { - all_log_macros!( - a:serde = 42; - "hello world" - ); -} - -#[test] -fn logger_short_lived() { - all_log_macros!(logger: Logger, "hello"); - all_log_macros!(logger: &Logger, "hello"); -} - -#[test] -fn logger_expr() { - all_log_macros!(logger: { - let logger = Logger; - logger - }, "hello"); -} - -/// Some and None (from Option) are used in the macros. -#[derive(Debug)] -enum Type { - Some, - None, -} - -#[test] -fn regression_issue_494() { - use self::Type::*; - all_log_macros!("some message: {:?}, {:?}", None, Some); -} diff --git a/vendor/log/triagebot.toml b/vendor/log/triagebot.toml deleted file mode 100644 index fa0824ac53c0a9..00000000000000 --- a/vendor/log/triagebot.toml +++ /dev/null @@ -1 +0,0 @@ -[assign] diff --git a/vendor/memchr/.cargo-checksum.json b/vendor/memchr/.cargo-checksum.json deleted file mode 100644 index 2473bfd0ebc44e..00000000000000 --- a/vendor/memchr/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"32184fbeeef54a11ecc1ee09a2bedd94706767377f431a528f48134f56cd3b6f",".ignore":"ae8b19032d4fc418b99ccae9e7cc3996b1386665d0bd5edc5634a158e7d2f6a2",".vim/coc-settings.json":"cdc5e2b88bddbdbd1b85f21389c4d882720e4c4488ad566c43fccd9124f2e3bf","COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.lock":"839877bbdcf9c1ee00d0b290c76d6adc590a2cc9e374eb2bb9dc494f803c0145","Cargo.toml":"5750ca97e8b2643f2ba1d7e98f54dcf54518c0176899e547876a59eb736198a5","Cargo.toml.orig":"b919c7322ecc6da819546cb677da938730e5df32fdfe9ef5d7c2dc54cc768526","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","README.md":"92a74aaffe011bdaa06fbc34a01686a6eba58ca1322e976759417a547fddf734","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","src/arch/aarch64/memchr.rs":"5bb70f915084e629d940dbc322f5b9096b2e658cf63fea8a2f6e7550412e73a0","src/arch/aarch64/mod.rs":"44cd1a614bd66f1e66fc86c541d3c3b8d3a14a644c13e8bf816df3f555eac2d4","src/arch/aarch64/neon/memchr.rs":"e8c00b8fb2c7e2711832ae3cedefe59f32ebedd7dfa4d0ec6de2a566c979daea","src/arch/aarch64/neon/mod.rs":"eab6d56c2b2354db4ee395f40282cd49f97e2ab853547be5de6e65fbe1b2f634","src/arch/aarch64/neon/packedpair.rs":"32d3e4cd0dd9b6c8382e5308cbd896d20242c90b12862c44a5de6a8b4d6126df","src/arch/all/memchr.rs":"b0b1214aa573ed5d02ae62a77c42c773065566b50274d4096e37817d65ab1594","src/arch/all/mod.rs":"05f3fc2b069682eb1545fc6366d167bb620a454365dac8b8dd6cde6cd64de18a","src/arch/all/packedpair/default_rank.rs":"abffd1b5b8b7a3be95c03dd1105b905c246a379854dc56f1e846ea7c4408f2c7","src/arch/all/packedpair/mod.rs":"292b66042c5b5c78bba33db6526aeae6904db803d601fcdd29032b87b3eb3754","src/arch/all/rabinkarp.rs":"236f69c04b90c14c253ae6c8d9b78150b4a56df75bb50af6d63b15145668b7cc","src/arch/all/shiftor.rs":"0d79117f52a1e4795843603a3bb0b45397df4ad5e4184bbc923658dab9dc3b5f","src/arch/all/twoway.rs":"47c97a265bfbafde90a618946643d3e97dfd9a85f01aa4ac758cd4c1573a450d","src/arch/generic/memchr.rs":"cab4636bf8042c81ca1bcc49fe4214b362100992c0a850859ff445fa6a48f327","src/arch/generic/mod.rs":"1dd75f61e0ea2563b8205a08aaa7b55500130aa331d18b9e9f995724b66c7a39","src/arch/generic/packedpair.rs":"a4a6efb29877ced9cf4c4e5ae9f36a79f019a16b831f2b9424899a1513d458ad","src/arch/mod.rs":"ca3960b7e2ed28d1b3c121710a870430531aad792f64d4dcb4ca4709d6cbda30","src/arch/wasm32/memchr.rs":"d88ac79f891d8530f505f5035062d3da274a05d66c611480c75430d52709d052","src/arch/wasm32/mod.rs":"a20377aa8fe07d68594879101dc73061e4f51d9c8d812b593b1f376e3c8add79","src/arch/wasm32/simd128/memchr.rs":"bac2c4c43fe710c83a6f2b1118fede043be89dd821d4b532907f129f09fdb5cf","src/arch/wasm32/simd128/mod.rs":"c157b373faedbfd65323be432e25bc411d97aa1b7bc58e76048614c7b2bf3bf6","src/arch/wasm32/simd128/packedpair.rs":"288ba6e5eee6a7a8e5e45c64cff1aa5d72d996c2a6bc228be372c75789f08e45","src/arch/x86_64/avx2/memchr.rs":"576ec0c30f49874f7fd9f6caeb490d56132c0fbbaa4d877b1aa532cafce19323","src/arch/x86_64/avx2/mod.rs":"0033d1b712d0b10f0f273ef9aa8caa53e05e49f4c56a64f39af0b9df97eec584","src/arch/x86_64/avx2/packedpair.rs":"87b69cb4301815906127db4f6370f572c7c5d5dad35c0946c00ad888dbcaec8c","src/arch/x86_64/memchr.rs":"7426e27c39a334d500a6803acdfd97ffc05fbf2d70ba8e74492a8ad3f22d20da","src/arch/x86_64/mod.rs":"61b2aa876942fd3e78714c2ae21e356c8634545c06995020f443fa50218df027","src/arch/x86_64/sse2/memchr.rs":"0de0444e26d885eaf866220578752aac871e03bebee7b4f5de7fe8a35f5fa97f","src/arch/x86_64/sse2/mod.rs":"38b70ae52a64ec974dbb91d04d6ca8013d9e06d1fe4af852206bbc2faf1c59aa","src/arch/x86_64/sse2/packedpair.rs":"241ea981d8eea6024769f1c9375f726a9bb9700160c5857781d4befd9f5ef55d","src/cow.rs":"34eddd02cb82cc2d5a2c640891d64efe332dabcc1eea5115764200d8f46b66f7","src/ext.rs":"210f89d1e32211bc64414cbd56e97b4f56ce8a8832d321d77a9fe519634e27ea","src/lib.rs":"614f778a41e88a29ea0ceb8e92c839dbb6b5a61c967f8bfd962975e18f932c71","src/macros.rs":"3e4b39252bfa471fad384160a43f113ebfec7bec46a85d16f006622881dd2081","src/memchr.rs":"6ae779ec5d00f443075316e0105edf30b489a38e2e96325bec14ccecd014145b","src/memmem/mod.rs":"1b0a9d6a681fd0887c677c4fc8d4c8f9719ddde250bdd5ea545365c1a7fb9094","src/memmem/searcher.rs":"7763472d43c66df596ca0697c07db0b4666d38a6a14f64f9f298aaf756c4a715","src/tests/memchr/mod.rs":"269f8e4b4f7f5ea458f27a3c174eb1020ffb2484eeba9464170beb51747df69b","src/tests/memchr/naive.rs":"6a0bee033e5edfb5b1d5769a5fa1c78388f7e9ff7bb91cb67f0ad029289e00e7","src/tests/memchr/prop.rs":"1854eea2338c405fe4635aac430f51e10d2069cd37a7489ddaff47da95f8720b","src/tests/mod.rs":"7cec8f809e279310a465c6a7725087970f219a676cc76c83de30c695bb490740","src/tests/packedpair.rs":"b02ec4fbb61a8653cb5f2268c31bc9168b8043347f2abdcc74081acf83b98e15","src/tests/substring/mod.rs":"c7660d10749363ac4687e7da2b5fda60768230425df8ba416c0c28b8d56a5c74","src/tests/substring/naive.rs":"df6f55d165382b8a53762ba4c324926cac13ebc62cde1805f4ce08740b326483","src/tests/substring/prop.rs":"38c15992609b5681a95d838ae6f2933e00a1219f2c971bfba245f96e0729fcdc","src/vector.rs":"e787c4ed2f499802e90910f7aedc7ca41acea39c8ef416b19d6d572c1a540422"},"package":"f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273"} \ No newline at end of file diff --git a/vendor/memchr/.cargo_vcs_info.json b/vendor/memchr/.cargo_vcs_info.json deleted file mode 100644 index 97ecb131fc6c45..00000000000000 --- a/vendor/memchr/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "9ba486e4ba7e865c0510305c5dacba73988d9f31" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/memchr/.ignore b/vendor/memchr/.ignore deleted file mode 100644 index 47ec4742e04a74..00000000000000 --- a/vendor/memchr/.ignore +++ /dev/null @@ -1 +0,0 @@ -!.github diff --git a/vendor/memchr/.vim/coc-settings.json b/vendor/memchr/.vim/coc-settings.json deleted file mode 100644 index 38f35ced55fc24..00000000000000 --- a/vendor/memchr/.vim/coc-settings.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "rust-analyzer.cargo.allFeatures": false, - "rust-analyzer.linkedProjects": [ - "benchmarks/engines/libc/Cargo.toml", - "benchmarks/engines/rust-bytecount/Cargo.toml", - "benchmarks/engines/rust-jetscii/Cargo.toml", - "benchmarks/engines/rust-memchr/Cargo.toml", - "benchmarks/engines/rust-memchrold/Cargo.toml", - "benchmarks/engines/rust-sliceslice/Cargo.toml", - "benchmarks/engines/rust-std/Cargo.toml", - "benchmarks/engines/stringzilla/Cargo.toml", - "benchmarks/shared/Cargo.toml", - "fuzz/Cargo.toml", - "Cargo.toml" - ] -} diff --git a/vendor/memchr/COPYING b/vendor/memchr/COPYING deleted file mode 100644 index bb9c20a094e41b..00000000000000 --- a/vendor/memchr/COPYING +++ /dev/null @@ -1,3 +0,0 @@ -This project is dual-licensed under the Unlicense and MIT licenses. - -You may use this code under the terms of either license. diff --git a/vendor/memchr/Cargo.lock b/vendor/memchr/Cargo.lock deleted file mode 100644 index 55c1cd9105dfee..00000000000000 --- a/vendor/memchr/Cargo.lock +++ /dev/null @@ -1,80 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "getrandom" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "libc" -version = "0.2.153" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" - -[[package]] -name = "log" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" - -[[package]] -name = "memchr" -version = "2.7.6" -dependencies = [ - "log", - "quickcheck", - "rustc-std-workspace-core", -] - -[[package]] -name = "quickcheck" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" -dependencies = [ - "rand", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "rustc-std-workspace-core" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1956f5517128a2b6f23ab2dadf1a976f4f5b27962e7724c2bf3d45e539ec098c" - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" diff --git a/vendor/memchr/Cargo.toml b/vendor/memchr/Cargo.toml deleted file mode 100644 index bd76618cb11fa3..00000000000000 --- a/vendor/memchr/Cargo.toml +++ /dev/null @@ -1,89 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.61" -name = "memchr" -version = "2.7.6" -authors = [ - "Andrew Gallant ", - "bluss", -] -build = false -exclude = [ - "/.github", - "/benchmarks", - "/fuzz", - "/scripts", - "/tmp", -] -autolib = false -autobins = false -autoexamples = false -autotests = false -autobenches = false -description = """ -Provides extremely fast (uses SIMD on x86_64, aarch64 and wasm32) routines for -1, 2 or 3 byte search and single substring search. -""" -homepage = "https://github.com/BurntSushi/memchr" -documentation = "https://docs.rs/memchr/" -readme = "README.md" -keywords = [ - "memchr", - "memmem", - "substring", - "find", - "search", -] -license = "Unlicense OR MIT" -repository = "https://github.com/BurntSushi/memchr" - -[package.metadata.docs.rs] -rustdoc-args = ["--generate-link-to-definition"] - -[features] -alloc = [] -default = ["std"] -libc = [] -logging = ["dep:log"] -rustc-dep-of-std = ["core"] -std = ["alloc"] -use_std = ["std"] - -[lib] -name = "memchr" -path = "src/lib.rs" -bench = false - -[dependencies.core] -version = "1.0.0" -optional = true -package = "rustc-std-workspace-core" - -[dependencies.log] -version = "0.4.20" -optional = true - -[dev-dependencies.quickcheck] -version = "1.0.3" -default-features = false - -[profile.bench] -debug = 2 - -[profile.release] -debug = 2 - -[profile.test] -opt-level = 3 -debug = 2 diff --git a/vendor/memchr/LICENSE-MIT b/vendor/memchr/LICENSE-MIT deleted file mode 100644 index 3b0a5dc09c1e16..00000000000000 --- a/vendor/memchr/LICENSE-MIT +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Andrew Gallant - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/memchr/README.md b/vendor/memchr/README.md deleted file mode 100644 index db00ebbc935648..00000000000000 --- a/vendor/memchr/README.md +++ /dev/null @@ -1,196 +0,0 @@ -memchr -====== -This library provides heavily optimized routines for string search primitives. - -[![Build status](https://github.com/BurntSushi/memchr/workflows/ci/badge.svg)](https://github.com/BurntSushi/memchr/actions) -[![Crates.io](https://img.shields.io/crates/v/memchr.svg)](https://crates.io/crates/memchr) - -Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/). - - -### Documentation - -[https://docs.rs/memchr](https://docs.rs/memchr) - - -### Overview - -* The top-level module provides routines for searching for 1, 2 or 3 bytes - in the forward or reverse direction. When searching for more than one byte, - positions are considered a match if the byte at that position matches any - of the bytes. -* The `memmem` sub-module provides forward and reverse substring search - routines. - -In all such cases, routines operate on `&[u8]` without regard to encoding. This -is exactly what you want when searching either UTF-8 or arbitrary bytes. - -### Compiling without the standard library - -memchr links to the standard library by default, but you can disable the -`std` feature if you want to use it in a `#![no_std]` crate: - -```toml -[dependencies] -memchr = { version = "2", default-features = false } -``` - -On `x86_64` platforms, when the `std` feature is disabled, the SSE2 accelerated -implementations will be used. When `std` is enabled, AVX2 accelerated -implementations will be used if the CPU is determined to support it at runtime. - -SIMD accelerated routines are also available on the `wasm32` and `aarch64` -targets. The `std` feature is not required to use them. - -When a SIMD version is not available, then this crate falls back to -[SWAR](https://en.wikipedia.org/wiki/SWAR) techniques. - -### Minimum Rust version policy - -This crate's minimum supported `rustc` version is `1.61.0`. - -The current policy is that the minimum Rust version required to use this crate -can be increased in minor version updates. For example, if `crate 1.0` requires -Rust 1.20.0, then `crate 1.0.z` for all values of `z` will also require Rust -1.20.0 or newer. However, `crate 1.y` for `y > 0` may require a newer minimum -version of Rust. - -In general, this crate will be conservative with respect to the minimum -supported version of Rust. - - -### Testing strategy - -Given the complexity of the code in this crate, along with the pervasive use -of `unsafe`, this crate has an extensive testing strategy. It combines multiple -approaches: - -* Hand-written tests. -* Exhaustive-style testing meant to exercise all possible branching and offset - calculations. -* Property based testing through [`quickcheck`](https://github.com/BurntSushi/quickcheck). -* Fuzz testing through [`cargo fuzz`](https://github.com/rust-fuzz/cargo-fuzz). -* A huge suite of benchmarks that are also run as tests. Benchmarks always - confirm that the expected result occurs. - -Improvements to the testing infrastructure are very welcome. - - -### Algorithms used - -At time of writing, this crate's implementation of substring search actually -has a few different algorithms to choose from depending on the situation. - -* For very small haystacks, - [Rabin-Karp](https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm) - is used to reduce latency. Rabin-Karp has very small overhead and can often - complete before other searchers have even been constructed. -* For small needles, a variant of the - ["Generic SIMD"](http://0x80.pl/articles/simd-strfind.html#algorithm-1-generic-simd) - algorithm is used. Instead of using the first and last bytes, a heuristic is - used to select bytes based on a background distribution of byte frequencies. -* In all other cases, - [Two-Way](https://en.wikipedia.org/wiki/Two-way_string-matching_algorithm) - is used. If possible, a prefilter based on the "Generic SIMD" algorithm - linked above is used to find candidates quickly. A dynamic heuristic is used - to detect if the prefilter is ineffective, and if so, disables it. - - -### Why is the standard library's substring search so much slower? - -We'll start by establishing what the difference in performance actually -is. There are two relevant benchmark classes to consider: `prebuilt` and -`oneshot`. The `prebuilt` benchmarks are designed to measure---to the extent -possible---search time only. That is, the benchmark first starts by building a -searcher and then only tracking the time for _using_ the searcher: - -``` -$ rebar rank benchmarks/record/x86_64/2023-08-26.csv --intersection -e memchr/memmem/prebuilt -e std/memmem/prebuilt -Engine Version Geometric mean of speed ratios Benchmark count ------- ------- ------------------------------ --------------- -rust/memchr/memmem/prebuilt 2.5.0 1.03 53 -rust/std/memmem/prebuilt 1.73.0-nightly 180dffba1 6.50 53 -``` - -Conversely, the `oneshot` benchmark class measures the time it takes to both -build the searcher _and_ use it: - -``` -$ rebar rank benchmarks/record/x86_64/2023-08-26.csv --intersection -e memchr/memmem/oneshot -e std/memmem/oneshot -Engine Version Geometric mean of speed ratios Benchmark count ------- ------- ------------------------------ --------------- -rust/memchr/memmem/oneshot 2.5.0 1.04 53 -rust/std/memmem/oneshot 1.73.0-nightly 180dffba1 5.26 53 -``` - -**NOTE:** Replace `rebar rank` with `rebar cmp` in the above commands to -explore the specific benchmarks and their differences. - -So in both cases, this crate is quite a bit faster over a broad sampling of -benchmarks regardless of whether you measure only search time or search time -plus construction time. The difference is a little smaller when you include -construction time in your measurements. - -These two different types of benchmark classes make for a nice segue into -one reason why the standard library's substring search can be slower: API -design. In the standard library, the only APIs available to you require -one to re-construct the searcher for every search. While you can benefit -from building a searcher once and iterating over all matches in a single -string, you cannot reuse that searcher to search other strings. This might -come up when, for example, searching a file one line at a time. You'll need -to re-build the searcher for every line searched, and this can [really -matter][burntsushi-bstr-blog]. - -**NOTE:** The `prebuilt` benchmark for the standard library can't actually -avoid measuring searcher construction at some level, because there is no API -for it. Instead, the benchmark consists of building the searcher once and then -finding all matches in a single string via an iterator. This tends to -approximate a benchmark where searcher construction isn't measured, but it -isn't perfect. While this means the comparison is not strictly -apples-to-apples, it does reflect what is maximally possible with the standard -library, and thus reflects the best that one could do in a real world scenario. - -While there is more to the story than just API design here, it's important to -point out that even if the standard library's substring search were a precise -clone of this crate internally, it would still be at a disadvantage in some -workloads because of its API. (The same also applies to C's standard library -`memmem` function. There is no way to amortize construction of the searcher. -You need to pay for it on every call.) - -The other reason for the difference in performance is that -the standard library has trouble using SIMD. In particular, substring search -is implemented in the `core` library, where platform specific code generally -can't exist. That's an issue because in order to utilize SIMD beyond SSE2 -while maintaining portable binaries, one needs to use [dynamic CPU feature -detection][dynamic-cpu], and that in turn requires platform specific code. -While there is [an RFC for enabling target feature detection in -`core`][core-feature], it doesn't yet exist. - -The bottom line here is that `core`'s substring search implementation is -limited to making use of SSE2, but not AVX. - -Still though, this crate does accelerate substring search even when only SSE2 -is available. The standard library could therefore adopt the techniques in this -crate just for SSE2. The reason why that hasn't happened yet isn't totally -clear to me. It likely needs a champion to push it through. The standard -library tends to be more conservative in these things. With that said, the -standard library does use some [SSE2 acceleration on `x86-64`][std-sse2] added -in [this PR][std-sse2-pr]. However, at the time of writing, it is only used -for short needles and doesn't use the frequency based heuristics found in this -crate. - -**NOTE:** Another thing worth mentioning is that the standard library's -substring search routine requires that both the needle and haystack have type -`&str`. Unless you can assume that your data is valid UTF-8, building a `&str` -will come with the overhead of UTF-8 validation. This may in turn result in -overall slower searching depending on your workload. In contrast, the `memchr` -crate permits both the needle and the haystack to have type `&[u8]`, where -`&[u8]` can be created from a `&str` with zero cost. Therefore, the substring -search in this crate is strictly more flexible than what the standard library -provides. - -[burntsushi-bstr-blog]: https://blog.burntsushi.net/bstr/#motivation-based-on-performance -[dynamic-cpu]: https://doc.rust-lang.org/std/arch/index.html#dynamic-cpu-feature-detection -[core-feature]: https://github.com/rust-lang/rfcs/pull/3469 -[std-sse2]: https://github.com/rust-lang/rust/blob/bf9229a2e366b4c311f059014a4aa08af16de5d8/library/core/src/str/pattern.rs#L1719-L1857 -[std-sse2-pr]: https://github.com/rust-lang/rust/pull/103779 diff --git a/vendor/memchr/UNLICENSE b/vendor/memchr/UNLICENSE deleted file mode 100644 index 68a49daad8ff7e..00000000000000 --- a/vendor/memchr/UNLICENSE +++ /dev/null @@ -1,24 +0,0 @@ -This is free and unencumbered software released into the public domain. - -Anyone is free to copy, modify, publish, use, compile, sell, or -distribute this software, either in source code form or as a compiled -binary, for any purpose, commercial or non-commercial, and by any -means. - -In jurisdictions that recognize copyright laws, the author or authors -of this software dedicate any and all copyright interest in the -software to the public domain. We make this dedication for the benefit -of the public at large and to the detriment of our heirs and -successors. We intend this dedication to be an overt act of -relinquishment in perpetuity of all present and future rights to this -software under copyright law. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -For more information, please refer to diff --git a/vendor/memchr/rustfmt.toml b/vendor/memchr/rustfmt.toml deleted file mode 100644 index aa37a218b97e5f..00000000000000 --- a/vendor/memchr/rustfmt.toml +++ /dev/null @@ -1,2 +0,0 @@ -max_width = 79 -use_small_heuristics = "max" diff --git a/vendor/memchr/src/arch/aarch64/memchr.rs b/vendor/memchr/src/arch/aarch64/memchr.rs deleted file mode 100644 index e0053b2a2205b7..00000000000000 --- a/vendor/memchr/src/arch/aarch64/memchr.rs +++ /dev/null @@ -1,137 +0,0 @@ -/*! -Wrapper routines for `memchr` and friends. - -These routines choose the best implementation at compile time. (This is -different from `x86_64` because it is expected that `neon` is almost always -available for `aarch64` targets.) -*/ - -macro_rules! defraw { - ($ty:ident, $find:ident, $start:ident, $end:ident, $($needles:ident),+) => {{ - #[cfg(target_feature = "neon")] - { - use crate::arch::aarch64::neon::memchr::$ty; - - debug!("chose neon for {}", stringify!($ty)); - debug_assert!($ty::is_available()); - // SAFETY: We know that wasm memchr is always available whenever - // code is compiled for `aarch64` with the `neon` target feature - // enabled. - $ty::new_unchecked($($needles),+).$find($start, $end) - } - #[cfg(not(target_feature = "neon"))] - { - use crate::arch::all::memchr::$ty; - - debug!( - "no neon feature available, using fallback for {}", - stringify!($ty), - ); - $ty::new($($needles),+).$find($start, $end) - } - }} -} - -/// memchr, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `One::find_raw`. -#[inline(always)] -pub(crate) unsafe fn memchr_raw( - n1: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - defraw!(One, find_raw, start, end, n1) -} - -/// memrchr, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `One::rfind_raw`. -#[inline(always)] -pub(crate) unsafe fn memrchr_raw( - n1: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - defraw!(One, rfind_raw, start, end, n1) -} - -/// memchr2, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Two::find_raw`. -#[inline(always)] -pub(crate) unsafe fn memchr2_raw( - n1: u8, - n2: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - defraw!(Two, find_raw, start, end, n1, n2) -} - -/// memrchr2, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Two::rfind_raw`. -#[inline(always)] -pub(crate) unsafe fn memrchr2_raw( - n1: u8, - n2: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - defraw!(Two, rfind_raw, start, end, n1, n2) -} - -/// memchr3, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Three::find_raw`. -#[inline(always)] -pub(crate) unsafe fn memchr3_raw( - n1: u8, - n2: u8, - n3: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - defraw!(Three, find_raw, start, end, n1, n2, n3) -} - -/// memrchr3, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Three::rfind_raw`. -#[inline(always)] -pub(crate) unsafe fn memrchr3_raw( - n1: u8, - n2: u8, - n3: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - defraw!(Three, rfind_raw, start, end, n1, n2, n3) -} - -/// Count all matching bytes, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `One::count_raw`. -#[inline(always)] -pub(crate) unsafe fn count_raw( - n1: u8, - start: *const u8, - end: *const u8, -) -> usize { - defraw!(One, count_raw, start, end, n1) -} diff --git a/vendor/memchr/src/arch/aarch64/mod.rs b/vendor/memchr/src/arch/aarch64/mod.rs deleted file mode 100644 index 7b3291257b9360..00000000000000 --- a/vendor/memchr/src/arch/aarch64/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -/*! -Vector algorithms for the `aarch64` target. -*/ - -pub mod neon; - -pub(crate) mod memchr; diff --git a/vendor/memchr/src/arch/aarch64/neon/memchr.rs b/vendor/memchr/src/arch/aarch64/neon/memchr.rs deleted file mode 100644 index 5fcc76237bad13..00000000000000 --- a/vendor/memchr/src/arch/aarch64/neon/memchr.rs +++ /dev/null @@ -1,1031 +0,0 @@ -/*! -This module defines 128-bit vector implementations of `memchr` and friends. - -The main types in this module are [`One`], [`Two`] and [`Three`]. They are for -searching for one, two or three distinct bytes, respectively, in a haystack. -Each type also has corresponding double ended iterators. These searchers are -typically much faster than scalar routines accomplishing the same task. - -The `One` searcher also provides a [`One::count`] routine for efficiently -counting the number of times a single byte occurs in a haystack. This is -useful, for example, for counting the number of lines in a haystack. This -routine exists because it is usually faster, especially with a high match -count, then using [`One::find`] repeatedly. ([`OneIter`] specializes its -`Iterator::count` implementation to use this routine.) - -Only one, two and three bytes are supported because three bytes is about -the point where one sees diminishing returns. Beyond this point and it's -probably (but not necessarily) better to just use a simple `[bool; 256]` array -or similar. However, it depends mightily on the specific work-load and the -expected match frequency. -*/ - -use core::arch::aarch64::uint8x16_t; - -use crate::{arch::generic::memchr as generic, ext::Pointer, vector::Vector}; - -/// Finds all occurrences of a single byte in a haystack. -#[derive(Clone, Copy, Debug)] -pub struct One(generic::One); - -impl One { - /// Create a new searcher that finds occurrences of the needle byte given. - /// - /// This particular searcher is specialized to use neon vector instructions - /// that typically make it quite fast. - /// - /// If neon is unavailable in the current environment, then `None` is - /// returned. - #[inline] - pub fn new(needle: u8) -> Option { - if One::is_available() { - // SAFETY: we check that neon is available above. - unsafe { Some(One::new_unchecked(needle)) } - } else { - None - } - } - - /// Create a new finder specific to neon vectors and routines without - /// checking that neon is available. - /// - /// # Safety - /// - /// Callers must guarantee that it is safe to execute `neon` instructions - /// in the current environment. - /// - /// Note that it is a common misconception that if one compiles for an - /// `x86_64` target, then they therefore automatically have access to neon - /// instructions. While this is almost always the case, it isn't true in - /// 100% of cases. - #[target_feature(enable = "neon")] - #[inline] - pub unsafe fn new_unchecked(needle: u8) -> One { - One(generic::One::new(needle)) - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`One::new`] will return - /// a `Some` value. Similarly, when it is false, it is guaranteed that - /// `One::new` will return a `None` value. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - #[cfg(target_feature = "neon")] - { - true - } - #[cfg(not(target_feature = "neon"))] - { - false - } - } - - /// Return the first occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.find_raw(s, e) - }) - } - } - - /// Return the last occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8]) -> Option { - // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.rfind_raw(s, e) - }) - } - } - - /// Counts all occurrences of this byte in the given haystack. - #[inline] - pub fn count(&self, haystack: &[u8]) -> usize { - // SAFETY: All of our pointers are derived directly from a borrowed - // slice, which is guaranteed to be valid. - unsafe { - let start = haystack.as_ptr(); - let end = start.add(haystack.len()); - self.count_raw(start, end) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < uint8x16_t::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::fwd_byte_by_byte(start, end, |b| { - b == self.0.needle1() - }); - } - // SAFETY: Building a `One` means it's safe to call 'neon' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - self.find_raw_impl(start, end) - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < uint8x16_t::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::rev_byte_by_byte(start, end, |b| { - b == self.0.needle1() - }); - } - // SAFETY: Building a `One` means it's safe to call 'neon' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - self.rfind_raw_impl(start, end) - } - - /// Like `count`, but accepts and returns raw pointers. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize { - if start >= end { - return 0; - } - if end.distance(start) < uint8x16_t::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::count_byte_by_byte(start, end, |b| { - b == self.0.needle1() - }); - } - // SAFETY: Building a `One` means it's safe to call 'neon' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - self.count_raw_impl(start, end) - } - - /// Execute a search using neon vectors and routines. - /// - /// # Safety - /// - /// Same as [`One::find_raw`], except the distance between `start` and - /// `end` must be at least the size of a neon vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `One`, which can only be constructed - /// when it is safe to call `neon` routines.) - #[target_feature(enable = "neon")] - #[inline] - unsafe fn find_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.find_raw(start, end) - } - - /// Execute a search using neon vectors and routines. - /// - /// # Safety - /// - /// Same as [`One::rfind_raw`], except the distance between `start` and - /// `end` must be at least the size of a neon vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `One`, which can only be constructed - /// when it is safe to call `neon` routines.) - #[target_feature(enable = "neon")] - #[inline] - unsafe fn rfind_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.rfind_raw(start, end) - } - - /// Execute a count using neon vectors and routines. - /// - /// # Safety - /// - /// Same as [`One::count_raw`], except the distance between `start` and - /// `end` must be at least the size of a neon vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `One`, which can only be constructed - /// when it is safe to call `neon` routines.) - #[target_feature(enable = "neon")] - #[inline] - unsafe fn count_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> usize { - self.0.count_raw(start, end) - } - - /// Returns an iterator over all occurrences of the needle byte in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - #[inline] - pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> { - OneIter { searcher: self, it: generic::Iter::new(haystack) } - } -} - -/// An iterator over all occurrences of a single byte in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`One::iter`] method. -/// -/// The lifetime parameters are as follows: -/// -/// * `'a` refers to the lifetime of the underlying [`One`] searcher. -/// * `'h` refers to the lifetime of the haystack being searched. -#[derive(Clone, Debug)] -pub struct OneIter<'a, 'h> { - searcher: &'a One, - it: generic::Iter<'h>, -} - -impl<'a, 'h> Iterator for OneIter<'a, 'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'find_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } - } - - #[inline] - fn count(self) -> usize { - self.it.count(|s, e| { - // SAFETY: We rely on our generic iterator to return valid start - // and end pointers. - unsafe { self.searcher.count_raw(s, e) } - }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'rfind_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } - } -} - -impl<'a, 'h> core::iter::FusedIterator for OneIter<'a, 'h> {} - -/// Finds all occurrences of two bytes in a haystack. -/// -/// That is, this reports matches of one of two possible bytes. For example, -/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, -/// `4` and `5`. -#[derive(Clone, Copy, Debug)] -pub struct Two(generic::Two); - -impl Two { - /// Create a new searcher that finds occurrences of the needle bytes given. - /// - /// This particular searcher is specialized to use neon vector instructions - /// that typically make it quite fast. - /// - /// If neon is unavailable in the current environment, then `None` is - /// returned. - #[inline] - pub fn new(needle1: u8, needle2: u8) -> Option { - if Two::is_available() { - // SAFETY: we check that neon is available above. - unsafe { Some(Two::new_unchecked(needle1, needle2)) } - } else { - None - } - } - - /// Create a new finder specific to neon vectors and routines without - /// checking that neon is available. - /// - /// # Safety - /// - /// Callers must guarantee that it is safe to execute `neon` instructions - /// in the current environment. - /// - /// Note that it is a common misconception that if one compiles for an - /// `x86_64` target, then they therefore automatically have access to neon - /// instructions. While this is almost always the case, it isn't true in - /// 100% of cases. - #[target_feature(enable = "neon")] - #[inline] - pub unsafe fn new_unchecked(needle1: u8, needle2: u8) -> Two { - Two(generic::Two::new(needle1, needle2)) - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`Two::new`] will return - /// a `Some` value. Similarly, when it is false, it is guaranteed that - /// `Two::new` will return a `None` value. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - #[cfg(target_feature = "neon")] - { - true - } - #[cfg(not(target_feature = "neon"))] - { - false - } - } - - /// Return the first occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.find_raw(s, e) - }) - } - } - - /// Return the last occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8]) -> Option { - // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.rfind_raw(s, e) - }) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < uint8x16_t::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::fwd_byte_by_byte(start, end, |b| { - b == self.0.needle1() || b == self.0.needle2() - }); - } - // SAFETY: Building a `Two` means it's safe to call 'neon' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - self.find_raw_impl(start, end) - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < uint8x16_t::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::rev_byte_by_byte(start, end, |b| { - b == self.0.needle1() || b == self.0.needle2() - }); - } - // SAFETY: Building a `Two` means it's safe to call 'neon' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - self.rfind_raw_impl(start, end) - } - - /// Execute a search using neon vectors and routines. - /// - /// # Safety - /// - /// Same as [`Two::find_raw`], except the distance between `start` and - /// `end` must be at least the size of a neon vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Two`, which can only be constructed - /// when it is safe to call `neon` routines.) - #[target_feature(enable = "neon")] - #[inline] - unsafe fn find_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.find_raw(start, end) - } - - /// Execute a search using neon vectors and routines. - /// - /// # Safety - /// - /// Same as [`Two::rfind_raw`], except the distance between `start` and - /// `end` must be at least the size of a neon vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Two`, which can only be constructed - /// when it is safe to call `neon` routines.) - #[target_feature(enable = "neon")] - #[inline] - unsafe fn rfind_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.rfind_raw(start, end) - } - - /// Returns an iterator over all occurrences of the needle bytes in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - #[inline] - pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> { - TwoIter { searcher: self, it: generic::Iter::new(haystack) } - } -} - -/// An iterator over all occurrences of two possible bytes in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`Two::iter`] method. -/// -/// The lifetime parameters are as follows: -/// -/// * `'a` refers to the lifetime of the underlying [`Two`] searcher. -/// * `'h` refers to the lifetime of the haystack being searched. -#[derive(Clone, Debug)] -pub struct TwoIter<'a, 'h> { - searcher: &'a Two, - it: generic::Iter<'h>, -} - -impl<'a, 'h> Iterator for TwoIter<'a, 'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'find_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'rfind_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } - } -} - -impl<'a, 'h> core::iter::FusedIterator for TwoIter<'a, 'h> {} - -/// Finds all occurrences of three bytes in a haystack. -/// -/// That is, this reports matches of one of three possible bytes. For example, -/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets -/// `0`, `2`, `3`, `4` and `5`. -#[derive(Clone, Copy, Debug)] -pub struct Three(generic::Three); - -impl Three { - /// Create a new searcher that finds occurrences of the needle bytes given. - /// - /// This particular searcher is specialized to use neon vector instructions - /// that typically make it quite fast. - /// - /// If neon is unavailable in the current environment, then `None` is - /// returned. - #[inline] - pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Option { - if Three::is_available() { - // SAFETY: we check that neon is available above. - unsafe { Some(Three::new_unchecked(needle1, needle2, needle3)) } - } else { - None - } - } - - /// Create a new finder specific to neon vectors and routines without - /// checking that neon is available. - /// - /// # Safety - /// - /// Callers must guarantee that it is safe to execute `neon` instructions - /// in the current environment. - /// - /// Note that it is a common misconception that if one compiles for an - /// `x86_64` target, then they therefore automatically have access to neon - /// instructions. While this is almost always the case, it isn't true in - /// 100% of cases. - #[target_feature(enable = "neon")] - #[inline] - pub unsafe fn new_unchecked( - needle1: u8, - needle2: u8, - needle3: u8, - ) -> Three { - Three(generic::Three::new(needle1, needle2, needle3)) - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`Three::new`] will return - /// a `Some` value. Similarly, when it is false, it is guaranteed that - /// `Three::new` will return a `None` value. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - #[cfg(target_feature = "neon")] - { - true - } - #[cfg(not(target_feature = "neon"))] - { - false - } - } - - /// Return the first occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.find_raw(s, e) - }) - } - } - - /// Return the last occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8]) -> Option { - // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.rfind_raw(s, e) - }) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < uint8x16_t::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::fwd_byte_by_byte(start, end, |b| { - b == self.0.needle1() - || b == self.0.needle2() - || b == self.0.needle3() - }); - } - // SAFETY: Building a `Three` means it's safe to call 'neon' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - self.find_raw_impl(start, end) - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < uint8x16_t::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::rev_byte_by_byte(start, end, |b| { - b == self.0.needle1() - || b == self.0.needle2() - || b == self.0.needle3() - }); - } - // SAFETY: Building a `Three` means it's safe to call 'neon' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - self.rfind_raw_impl(start, end) - } - - /// Execute a search using neon vectors and routines. - /// - /// # Safety - /// - /// Same as [`Three::find_raw`], except the distance between `start` and - /// `end` must be at least the size of a neon vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Three`, which can only be constructed - /// when it is safe to call `neon` routines.) - #[target_feature(enable = "neon")] - #[inline] - unsafe fn find_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.find_raw(start, end) - } - - /// Execute a search using neon vectors and routines. - /// - /// # Safety - /// - /// Same as [`Three::rfind_raw`], except the distance between `start` and - /// `end` must be at least the size of a neon vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Three`, which can only be constructed - /// when it is safe to call `neon` routines.) - #[target_feature(enable = "neon")] - #[inline] - unsafe fn rfind_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.rfind_raw(start, end) - } - - /// Returns an iterator over all occurrences of the needle byte in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - #[inline] - pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> { - ThreeIter { searcher: self, it: generic::Iter::new(haystack) } - } -} - -/// An iterator over all occurrences of three possible bytes in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`Three::iter`] method. -/// -/// The lifetime parameters are as follows: -/// -/// * `'a` refers to the lifetime of the underlying [`Three`] searcher. -/// * `'h` refers to the lifetime of the haystack being searched. -#[derive(Clone, Debug)] -pub struct ThreeIter<'a, 'h> { - searcher: &'a Three, - it: generic::Iter<'h>, -} - -impl<'a, 'h> Iterator for ThreeIter<'a, 'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'find_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'rfind_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } - } -} - -impl<'a, 'h> core::iter::FusedIterator for ThreeIter<'a, 'h> {} - -#[cfg(test)] -mod tests { - use super::*; - - define_memchr_quickcheck!(super); - - #[test] - fn forward_one() { - crate::tests::memchr::Runner::new(1).forward_iter( - |haystack, needles| { - Some(One::new(needles[0])?.iter(haystack).collect()) - }, - ) - } - - #[test] - fn reverse_one() { - crate::tests::memchr::Runner::new(1).reverse_iter( - |haystack, needles| { - Some(One::new(needles[0])?.iter(haystack).rev().collect()) - }, - ) - } - - #[test] - fn count_one() { - crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| { - Some(One::new(needles[0])?.iter(haystack).count()) - }) - } - - #[test] - fn forward_two() { - crate::tests::memchr::Runner::new(2).forward_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - Some(Two::new(n1, n2)?.iter(haystack).collect()) - }, - ) - } - - #[test] - fn reverse_two() { - crate::tests::memchr::Runner::new(2).reverse_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - Some(Two::new(n1, n2)?.iter(haystack).rev().collect()) - }, - ) - } - - #[test] - fn forward_three() { - crate::tests::memchr::Runner::new(3).forward_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - let n3 = needles.get(2).copied()?; - Some(Three::new(n1, n2, n3)?.iter(haystack).collect()) - }, - ) - } - - #[test] - fn reverse_three() { - crate::tests::memchr::Runner::new(3).reverse_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - let n3 = needles.get(2).copied()?; - Some(Three::new(n1, n2, n3)?.iter(haystack).rev().collect()) - }, - ) - } -} diff --git a/vendor/memchr/src/arch/aarch64/neon/mod.rs b/vendor/memchr/src/arch/aarch64/neon/mod.rs deleted file mode 100644 index ccf9cf81f4bf47..00000000000000 --- a/vendor/memchr/src/arch/aarch64/neon/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -/*! -Algorithms for the `aarch64` target using 128-bit vectors via NEON. -*/ - -pub mod memchr; -pub mod packedpair; diff --git a/vendor/memchr/src/arch/aarch64/neon/packedpair.rs b/vendor/memchr/src/arch/aarch64/neon/packedpair.rs deleted file mode 100644 index 5cc2a029697b88..00000000000000 --- a/vendor/memchr/src/arch/aarch64/neon/packedpair.rs +++ /dev/null @@ -1,236 +0,0 @@ -/*! -A 128-bit vector implementation of the "packed pair" SIMD algorithm. - -The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main -difference is that it (by default) uses a background distribution of byte -frequencies to heuristically select the pair of bytes to search for. - -[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last -*/ - -use core::arch::aarch64::uint8x16_t; - -use crate::arch::{all::packedpair::Pair, generic::packedpair}; - -/// A "packed pair" finder that uses 128-bit vector operations. -/// -/// This finder picks two bytes that it believes have high predictive power -/// for indicating an overall match of a needle. Depending on whether -/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets -/// where the needle matches or could match. In the prefilter case, candidates -/// are reported whenever the [`Pair`] of bytes given matches. -#[derive(Clone, Copy, Debug)] -pub struct Finder(packedpair::Finder); - -/// A "packed pair" finder that uses 128-bit vector operations. -/// -/// This finder picks two bytes that it believes have high predictive power -/// for indicating an overall match of a needle. Depending on whether -/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets -/// where the needle matches or could match. In the prefilter case, candidates -/// are reported whenever the [`Pair`] of bytes given matches. -impl Finder { - /// Create a new pair searcher. The searcher returned can either report - /// exact matches of `needle` or act as a prefilter and report candidate - /// positions of `needle`. - /// - /// If neon is unavailable in the current environment or if a [`Pair`] - /// could not be constructed from the needle given, then `None` is - /// returned. - #[inline] - pub fn new(needle: &[u8]) -> Option { - Finder::with_pair(needle, Pair::new(needle)?) - } - - /// Create a new "packed pair" finder using the pair of bytes given. - /// - /// This constructor permits callers to control precisely which pair of - /// bytes is used as a predicate. - /// - /// If neon is unavailable in the current environment, then `None` is - /// returned. - #[inline] - pub fn with_pair(needle: &[u8], pair: Pair) -> Option { - if Finder::is_available() { - // SAFETY: we check that NEON is available above. We are also - // guaranteed to have needle.len() > 1 because we have a valid - // Pair. - unsafe { Some(Finder::with_pair_impl(needle, pair)) } - } else { - None - } - } - - /// Create a new `Finder` specific to neon vectors and routines. - /// - /// # Safety - /// - /// Same as the safety for `packedpair::Finder::new`, and callers must also - /// ensure that neon is available. - #[target_feature(enable = "neon")] - #[inline] - unsafe fn with_pair_impl(needle: &[u8], pair: Pair) -> Finder { - let finder = packedpair::Finder::::new(needle, pair); - Finder(finder) - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`Finder::with_pair`] will - /// return a `Some` value. Similarly, when it is false, it is guaranteed - /// that `Finder::with_pair` will return a `None` value. Notice that this - /// does not guarantee that [`Finder::new`] will return a `Finder`. Namely, - /// even when `Finder::is_available` is true, it is not guaranteed that a - /// valid [`Pair`] can be found from the needle given. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - #[cfg(target_feature = "neon")] - { - true - } - #[cfg(not(target_feature = "neon"))] - { - false - } - } - - /// Execute a search using neon vectors and routines. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - #[inline] - pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option { - // SAFETY: Building a `Finder` means it's safe to call 'neon' routines. - unsafe { self.find_impl(haystack, needle) } - } - - /// Execute a search using neon vectors and routines. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - #[inline] - pub fn find_prefilter(&self, haystack: &[u8]) -> Option { - // SAFETY: Building a `Finder` means it's safe to call 'neon' routines. - unsafe { self.find_prefilter_impl(haystack) } - } - - /// Execute a search using neon vectors and routines. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - /// - /// # Safety - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Finder`, which can only be constructed - /// when it is safe to call `neon` routines.) - #[target_feature(enable = "neon")] - #[inline] - unsafe fn find_impl( - &self, - haystack: &[u8], - needle: &[u8], - ) -> Option { - self.0.find(haystack, needle) - } - - /// Execute a prefilter search using neon vectors and routines. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - /// - /// # Safety - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Finder`, which can only be constructed - /// when it is safe to call `neon` routines.) - #[target_feature(enable = "neon")] - #[inline] - unsafe fn find_prefilter_impl(&self, haystack: &[u8]) -> Option { - self.0.find_prefilter(haystack) - } - - /// Returns the pair of offsets (into the needle) used to check as a - /// predicate before confirming whether a needle exists at a particular - /// position. - #[inline] - pub fn pair(&self) -> &Pair { - self.0.pair() - } - - /// Returns the minimum haystack length that this `Finder` can search. - /// - /// Using a haystack with length smaller than this in a search will result - /// in a panic. The reason for this restriction is that this finder is - /// meant to be a low-level component that is part of a larger substring - /// strategy. In that sense, it avoids trying to handle all cases and - /// instead only handles the cases that it can handle very well. - #[inline] - pub fn min_haystack_len(&self) -> usize { - self.0.min_haystack_len() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn find(haystack: &[u8], needle: &[u8]) -> Option> { - let f = Finder::new(needle)?; - if haystack.len() < f.min_haystack_len() { - return None; - } - Some(f.find(haystack, needle)) - } - - define_substring_forward_quickcheck!(find); - - #[test] - fn forward_substring() { - crate::tests::substring::Runner::new().fwd(find).run() - } - - #[test] - fn forward_packedpair() { - fn find( - haystack: &[u8], - needle: &[u8], - index1: u8, - index2: u8, - ) -> Option> { - let pair = Pair::with_indices(needle, index1, index2)?; - let f = Finder::with_pair(needle, pair)?; - if haystack.len() < f.min_haystack_len() { - return None; - } - Some(f.find(haystack, needle)) - } - crate::tests::packedpair::Runner::new().fwd(find).run() - } - - #[test] - fn forward_packedpair_prefilter() { - fn find( - haystack: &[u8], - needle: &[u8], - index1: u8, - index2: u8, - ) -> Option> { - let pair = Pair::with_indices(needle, index1, index2)?; - let f = Finder::with_pair(needle, pair)?; - if haystack.len() < f.min_haystack_len() { - return None; - } - Some(f.find_prefilter(haystack)) - } - crate::tests::packedpair::Runner::new().fwd(find).run() - } -} diff --git a/vendor/memchr/src/arch/all/memchr.rs b/vendor/memchr/src/arch/all/memchr.rs deleted file mode 100644 index 7f327f86f45732..00000000000000 --- a/vendor/memchr/src/arch/all/memchr.rs +++ /dev/null @@ -1,1022 +0,0 @@ -/*! -Provides architecture independent implementations of `memchr` and friends. - -The main types in this module are [`One`], [`Two`] and [`Three`]. They are for -searching for one, two or three distinct bytes, respectively, in a haystack. -Each type also has corresponding double ended iterators. These searchers -are typically slower than hand-coded vector routines accomplishing the same -task, but are also typically faster than naive scalar code. These routines -effectively work by treating a `usize` as a vector of 8-bit lanes, and thus -achieves some level of data parallelism even without explicit vector support. - -The `One` searcher also provides a [`One::count`] routine for efficiently -counting the number of times a single byte occurs in a haystack. This is -useful, for example, for counting the number of lines in a haystack. This -routine exists because it is usually faster, especially with a high match -count, than using [`One::find`] repeatedly. ([`OneIter`] specializes its -`Iterator::count` implementation to use this routine.) - -Only one, two and three bytes are supported because three bytes is about -the point where one sees diminishing returns. Beyond this point and it's -probably (but not necessarily) better to just use a simple `[bool; 256]` array -or similar. However, it depends mightily on the specific work-load and the -expected match frequency. -*/ - -use crate::{arch::generic::memchr as generic, ext::Pointer}; - -/// The number of bytes in a single `usize` value. -const USIZE_BYTES: usize = (usize::BITS / 8) as usize; -/// The bits that must be zero for a `*const usize` to be properly aligned. -const USIZE_ALIGN: usize = USIZE_BYTES - 1; - -/// Finds all occurrences of a single byte in a haystack. -#[derive(Clone, Copy, Debug)] -pub struct One { - s1: u8, - v1: usize, -} - -impl One { - /// The number of bytes we examine per each iteration of our search loop. - const LOOP_BYTES: usize = 2 * USIZE_BYTES; - - /// Create a new searcher that finds occurrences of the byte given. - #[inline] - pub fn new(needle: u8) -> One { - One { s1: needle, v1: splat(needle) } - } - - /// A test-only routine so that we can bundle a bunch of quickcheck - /// properties into a single macro. Basically, this provides a constructor - /// that makes it identical to most other memchr implementations, which - /// have fallible constructors. - #[cfg(test)] - pub(crate) fn try_new(needle: u8) -> Option { - Some(One::new(needle)) - } - - /// Return the first occurrence of the needle in the given haystack. If no - /// such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value for a non-empty haystack is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.find_raw(s, e) - }) - } - } - - /// Return the last occurrence of the needle in the given haystack. If no - /// such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value for a non-empty haystack is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.rfind_raw(s, e) - }) - } - } - - /// Counts all occurrences of this byte in the given haystack. - #[inline] - pub fn count(&self, haystack: &[u8]) -> usize { - // SAFETY: All of our pointers are derived directly from a borrowed - // slice, which is guaranteed to be valid. - unsafe { - let start = haystack.as_ptr(); - let end = start.add(haystack.len()); - self.count_raw(start, end) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - let confirm = |b| self.confirm(b); - let len = end.distance(start); - if len < USIZE_BYTES { - return generic::fwd_byte_by_byte(start, end, confirm); - } - - // The start of the search may not be aligned to `*const usize`, - // so we do an unaligned load here. - let chunk = start.cast::().read_unaligned(); - if self.has_needle(chunk) { - return generic::fwd_byte_by_byte(start, end, confirm); - } - - // And now we start our search at a guaranteed aligned position. - // The first iteration of the loop below will overlap with the the - // unaligned chunk above in cases where the search starts at an - // unaligned offset, but that's okay as we're only here if that - // above didn't find a match. - let mut cur = - start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN)); - debug_assert!(cur > start); - if len <= One::LOOP_BYTES { - return generic::fwd_byte_by_byte(cur, end, confirm); - } - debug_assert!(end.sub(One::LOOP_BYTES) >= start); - while cur <= end.sub(One::LOOP_BYTES) { - debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); - - let a = cur.cast::().read(); - let b = cur.add(USIZE_BYTES).cast::().read(); - if self.has_needle(a) || self.has_needle(b) { - break; - } - cur = cur.add(One::LOOP_BYTES); - } - generic::fwd_byte_by_byte(cur, end, confirm) - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - let confirm = |b| self.confirm(b); - let len = end.distance(start); - if len < USIZE_BYTES { - return generic::rev_byte_by_byte(start, end, confirm); - } - - let chunk = end.sub(USIZE_BYTES).cast::().read_unaligned(); - if self.has_needle(chunk) { - return generic::rev_byte_by_byte(start, end, confirm); - } - - let mut cur = end.sub(end.as_usize() & USIZE_ALIGN); - debug_assert!(start <= cur && cur <= end); - if len <= One::LOOP_BYTES { - return generic::rev_byte_by_byte(start, cur, confirm); - } - while cur >= start.add(One::LOOP_BYTES) { - debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); - - let a = cur.sub(2 * USIZE_BYTES).cast::().read(); - let b = cur.sub(1 * USIZE_BYTES).cast::().read(); - if self.has_needle(a) || self.has_needle(b) { - break; - } - cur = cur.sub(One::LOOP_BYTES); - } - generic::rev_byte_by_byte(start, cur, confirm) - } - - /// Counts all occurrences of this byte in the given haystack represented - /// by raw pointers. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `0` will always be returned. - #[inline] - pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize { - if start >= end { - return 0; - } - // Sadly I couldn't get the SWAR approach to work here, so we just do - // one byte at a time for now. PRs to improve this are welcome. - let mut ptr = start; - let mut count = 0; - while ptr < end { - count += (ptr.read() == self.s1) as usize; - ptr = ptr.offset(1); - } - count - } - - /// Returns an iterator over all occurrences of the needle byte in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> { - OneIter { searcher: self, it: generic::Iter::new(haystack) } - } - - #[inline(always)] - fn has_needle(&self, chunk: usize) -> bool { - has_zero_byte(self.v1 ^ chunk) - } - - #[inline(always)] - fn confirm(&self, haystack_byte: u8) -> bool { - self.s1 == haystack_byte - } -} - -/// An iterator over all occurrences of a single byte in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`One::iter`] method. -/// -/// The lifetime parameters are as follows: -/// -/// * `'a` refers to the lifetime of the underlying [`One`] searcher. -/// * `'h` refers to the lifetime of the haystack being searched. -#[derive(Clone, Debug)] -pub struct OneIter<'a, 'h> { - /// The underlying memchr searcher. - searcher: &'a One, - /// Generic iterator implementation. - it: generic::Iter<'h>, -} - -impl<'a, 'h> Iterator for OneIter<'a, 'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'find_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } - } - - #[inline] - fn count(self) -> usize { - self.it.count(|s, e| { - // SAFETY: We rely on our generic iterator to return valid start - // and end pointers. - unsafe { self.searcher.count_raw(s, e) } - }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'rfind_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } - } -} - -/// Finds all occurrences of two bytes in a haystack. -/// -/// That is, this reports matches of one of two possible bytes. For example, -/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, -/// `4` and `5`. -#[derive(Clone, Copy, Debug)] -pub struct Two { - s1: u8, - s2: u8, - v1: usize, - v2: usize, -} - -impl Two { - /// Create a new searcher that finds occurrences of the two needle bytes - /// given. - #[inline] - pub fn new(needle1: u8, needle2: u8) -> Two { - Two { - s1: needle1, - s2: needle2, - v1: splat(needle1), - v2: splat(needle2), - } - } - - /// A test-only routine so that we can bundle a bunch of quickcheck - /// properties into a single macro. Basically, this provides a constructor - /// that makes it identical to most other memchr implementations, which - /// have fallible constructors. - #[cfg(test)] - pub(crate) fn try_new(needle1: u8, needle2: u8) -> Option { - Some(Two::new(needle1, needle2)) - } - - /// Return the first occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value for a non-empty haystack is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.find_raw(s, e) - }) - } - } - - /// Return the last occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value for a non-empty haystack is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.rfind_raw(s, e) - }) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - let confirm = |b| self.confirm(b); - let len = end.distance(start); - if len < USIZE_BYTES { - return generic::fwd_byte_by_byte(start, end, confirm); - } - - // The start of the search may not be aligned to `*const usize`, - // so we do an unaligned load here. - let chunk = start.cast::().read_unaligned(); - if self.has_needle(chunk) { - return generic::fwd_byte_by_byte(start, end, confirm); - } - - // And now we start our search at a guaranteed aligned position. - // The first iteration of the loop below will overlap with the - // unaligned chunk above in cases where the search starts at an - // unaligned offset, but that's okay as we're only here if that - // above didn't find a match. - let mut cur = - start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN)); - debug_assert!(cur > start); - debug_assert!(end.sub(USIZE_BYTES) >= start); - while cur <= end.sub(USIZE_BYTES) { - debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); - - let chunk = cur.cast::().read(); - if self.has_needle(chunk) { - break; - } - cur = cur.add(USIZE_BYTES); - } - generic::fwd_byte_by_byte(cur, end, confirm) - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - let confirm = |b| self.confirm(b); - let len = end.distance(start); - if len < USIZE_BYTES { - return generic::rev_byte_by_byte(start, end, confirm); - } - - let chunk = end.sub(USIZE_BYTES).cast::().read_unaligned(); - if self.has_needle(chunk) { - return generic::rev_byte_by_byte(start, end, confirm); - } - - let mut cur = end.sub(end.as_usize() & USIZE_ALIGN); - debug_assert!(start <= cur && cur <= end); - while cur >= start.add(USIZE_BYTES) { - debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); - - let chunk = cur.sub(USIZE_BYTES).cast::().read(); - if self.has_needle(chunk) { - break; - } - cur = cur.sub(USIZE_BYTES); - } - generic::rev_byte_by_byte(start, cur, confirm) - } - - /// Returns an iterator over all occurrences of one of the needle bytes in - /// the given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> { - TwoIter { searcher: self, it: generic::Iter::new(haystack) } - } - - #[inline(always)] - fn has_needle(&self, chunk: usize) -> bool { - has_zero_byte(self.v1 ^ chunk) || has_zero_byte(self.v2 ^ chunk) - } - - #[inline(always)] - fn confirm(&self, haystack_byte: u8) -> bool { - self.s1 == haystack_byte || self.s2 == haystack_byte - } -} - -/// An iterator over all occurrences of two possible bytes in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`Two::iter`] method. -/// -/// The lifetime parameters are as follows: -/// -/// * `'a` refers to the lifetime of the underlying [`Two`] searcher. -/// * `'h` refers to the lifetime of the haystack being searched. -#[derive(Clone, Debug)] -pub struct TwoIter<'a, 'h> { - /// The underlying memchr searcher. - searcher: &'a Two, - /// Generic iterator implementation. - it: generic::Iter<'h>, -} - -impl<'a, 'h> Iterator for TwoIter<'a, 'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'find_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'rfind_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } - } -} - -/// Finds all occurrences of three bytes in a haystack. -/// -/// That is, this reports matches of one of three possible bytes. For example, -/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets -/// `0`, `2`, `3`, `4` and `5`. -#[derive(Clone, Copy, Debug)] -pub struct Three { - s1: u8, - s2: u8, - s3: u8, - v1: usize, - v2: usize, - v3: usize, -} - -impl Three { - /// Create a new searcher that finds occurrences of the three needle bytes - /// given. - #[inline] - pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Three { - Three { - s1: needle1, - s2: needle2, - s3: needle3, - v1: splat(needle1), - v2: splat(needle2), - v3: splat(needle3), - } - } - - /// A test-only routine so that we can bundle a bunch of quickcheck - /// properties into a single macro. Basically, this provides a constructor - /// that makes it identical to most other memchr implementations, which - /// have fallible constructors. - #[cfg(test)] - pub(crate) fn try_new( - needle1: u8, - needle2: u8, - needle3: u8, - ) -> Option { - Some(Three::new(needle1, needle2, needle3)) - } - - /// Return the first occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value for a non-empty haystack is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.find_raw(s, e) - }) - } - } - - /// Return the last occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value for a non-empty haystack is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.rfind_raw(s, e) - }) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - let confirm = |b| self.confirm(b); - let len = end.distance(start); - if len < USIZE_BYTES { - return generic::fwd_byte_by_byte(start, end, confirm); - } - - // The start of the search may not be aligned to `*const usize`, - // so we do an unaligned load here. - let chunk = start.cast::().read_unaligned(); - if self.has_needle(chunk) { - return generic::fwd_byte_by_byte(start, end, confirm); - } - - // And now we start our search at a guaranteed aligned position. - // The first iteration of the loop below will overlap with the - // unaligned chunk above in cases where the search starts at an - // unaligned offset, but that's okay as we're only here if that - // above didn't find a match. - let mut cur = - start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN)); - debug_assert!(cur > start); - debug_assert!(end.sub(USIZE_BYTES) >= start); - while cur <= end.sub(USIZE_BYTES) { - debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); - - let chunk = cur.cast::().read(); - if self.has_needle(chunk) { - break; - } - cur = cur.add(USIZE_BYTES); - } - generic::fwd_byte_by_byte(cur, end, confirm) - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - let confirm = |b| self.confirm(b); - let len = end.distance(start); - if len < USIZE_BYTES { - return generic::rev_byte_by_byte(start, end, confirm); - } - - let chunk = end.sub(USIZE_BYTES).cast::().read_unaligned(); - if self.has_needle(chunk) { - return generic::rev_byte_by_byte(start, end, confirm); - } - - let mut cur = end.sub(end.as_usize() & USIZE_ALIGN); - debug_assert!(start <= cur && cur <= end); - while cur >= start.add(USIZE_BYTES) { - debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); - - let chunk = cur.sub(USIZE_BYTES).cast::().read(); - if self.has_needle(chunk) { - break; - } - cur = cur.sub(USIZE_BYTES); - } - generic::rev_byte_by_byte(start, cur, confirm) - } - - /// Returns an iterator over all occurrences of one of the needle bytes in - /// the given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> { - ThreeIter { searcher: self, it: generic::Iter::new(haystack) } - } - - #[inline(always)] - fn has_needle(&self, chunk: usize) -> bool { - has_zero_byte(self.v1 ^ chunk) - || has_zero_byte(self.v2 ^ chunk) - || has_zero_byte(self.v3 ^ chunk) - } - - #[inline(always)] - fn confirm(&self, haystack_byte: u8) -> bool { - self.s1 == haystack_byte - || self.s2 == haystack_byte - || self.s3 == haystack_byte - } -} - -/// An iterator over all occurrences of three possible bytes in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`Three::iter`] method. -/// -/// The lifetime parameters are as follows: -/// -/// * `'a` refers to the lifetime of the underlying [`Three`] searcher. -/// * `'h` refers to the lifetime of the haystack being searched. -#[derive(Clone, Debug)] -pub struct ThreeIter<'a, 'h> { - /// The underlying memchr searcher. - searcher: &'a Three, - /// Generic iterator implementation. - it: generic::Iter<'h>, -} - -impl<'a, 'h> Iterator for ThreeIter<'a, 'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'find_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'rfind_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } - } -} - -/// Return `true` if `x` contains any zero byte. -/// -/// That is, this routine treats `x` as a register of 8-bit lanes and returns -/// true when any of those lanes is `0`. -/// -/// From "Matters Computational" by J. Arndt. -#[inline(always)] -fn has_zero_byte(x: usize) -> bool { - // "The idea is to subtract one from each of the bytes and then look for - // bytes where the borrow propagated all the way to the most significant - // bit." - const LO: usize = splat(0x01); - const HI: usize = splat(0x80); - - (x.wrapping_sub(LO) & !x & HI) != 0 -} - -/// Repeat the given byte into a word size number. That is, every 8 bits -/// is equivalent to the given byte. For example, if `b` is `\x4E` or -/// `01001110` in binary, then the returned value on a 32-bit system would be: -/// `01001110_01001110_01001110_01001110`. -#[inline(always)] -const fn splat(b: u8) -> usize { - // TODO: use `usize::from` once it can be used in const context. - (b as usize) * (usize::MAX / 255) -} - -#[cfg(test)] -mod tests { - use super::*; - - define_memchr_quickcheck!(super, try_new); - - #[test] - fn forward_one() { - crate::tests::memchr::Runner::new(1).forward_iter( - |haystack, needles| { - Some(One::new(needles[0]).iter(haystack).collect()) - }, - ) - } - - #[test] - fn reverse_one() { - crate::tests::memchr::Runner::new(1).reverse_iter( - |haystack, needles| { - Some(One::new(needles[0]).iter(haystack).rev().collect()) - }, - ) - } - - #[test] - fn count_one() { - crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| { - Some(One::new(needles[0]).iter(haystack).count()) - }) - } - - #[test] - fn forward_two() { - crate::tests::memchr::Runner::new(2).forward_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - Some(Two::new(n1, n2).iter(haystack).collect()) - }, - ) - } - - #[test] - fn reverse_two() { - crate::tests::memchr::Runner::new(2).reverse_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - Some(Two::new(n1, n2).iter(haystack).rev().collect()) - }, - ) - } - - #[test] - fn forward_three() { - crate::tests::memchr::Runner::new(3).forward_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - let n3 = needles.get(2).copied()?; - Some(Three::new(n1, n2, n3).iter(haystack).collect()) - }, - ) - } - - #[test] - fn reverse_three() { - crate::tests::memchr::Runner::new(3).reverse_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - let n3 = needles.get(2).copied()?; - Some(Three::new(n1, n2, n3).iter(haystack).rev().collect()) - }, - ) - } - - // This was found by quickcheck in the course of refactoring this crate - // after memchr 2.5.0. - #[test] - fn regression_double_ended_iterator() { - let finder = One::new(b'a'); - let haystack = "a"; - let mut it = finder.iter(haystack.as_bytes()); - assert_eq!(Some(0), it.next()); - assert_eq!(None, it.next_back()); - } - - // This regression test was caught by ripgrep's test suite on i686 when - // upgrading to memchr 2.6. Namely, something about the \x0B bytes here - // screws with the SWAR counting approach I was using. This regression test - // prompted me to remove the SWAR counting approach and just replace it - // with a byte-at-a-time loop. - #[test] - fn regression_count_new_lines() { - let haystack = "01234567\x0b\n\x0b\n\x0b\n\x0b\nx"; - let count = One::new(b'\n').count(haystack.as_bytes()); - assert_eq!(4, count); - } - - // A test[1] that failed on some big endian targets after a perf - // improvement was merged[2]. - // - // At first it seemed like the test suite somehow missed the regression, - // but in actuality, CI was not running tests with `cross` but instead with - // `cargo` specifically. This is because those steps were using `cargo` - // instead of `${{ env.CARGO }}`. So adding this regression test doesn't - // really help catch that class of failure, but we add it anyway for good - // measure. - // - // [1]: https://github.com/BurntSushi/memchr/issues/152 - // [2]: https://github.com/BurntSushi/memchr/pull/151 - #[test] - fn regression_big_endian1() { - assert_eq!(One::new(b':').find(b"1:23"), Some(1)); - } - - // Interestingly, I couldn't get `regression_big_endian1` to fail for me - // on the `powerpc64-unknown-linux-gnu` target. But I found another case - // through quickcheck that does. - #[test] - fn regression_big_endian2() { - let data = [0, 0, 0, 0, 0, 0, 0, 0]; - assert_eq!(One::new(b'\x00').find(&data), Some(0)); - } -} diff --git a/vendor/memchr/src/arch/all/mod.rs b/vendor/memchr/src/arch/all/mod.rs deleted file mode 100644 index 559cb75104d03a..00000000000000 --- a/vendor/memchr/src/arch/all/mod.rs +++ /dev/null @@ -1,234 +0,0 @@ -/*! -Contains architecture independent routines. - -These routines are often used as a "fallback" implementation when the more -specialized architecture dependent routines are unavailable. -*/ - -pub mod memchr; -pub mod packedpair; -pub mod rabinkarp; -#[cfg(feature = "alloc")] -pub mod shiftor; -pub mod twoway; - -/// Returns true if and only if `needle` is a prefix of `haystack`. -/// -/// This uses a latency optimized variant of `memcmp` internally which *might* -/// make this faster for very short strings. -/// -/// # Inlining -/// -/// This routine is marked `inline(always)`. If you want to call this function -/// in a way that is not always inlined, you'll need to wrap a call to it in -/// another function that is marked as `inline(never)` or just `inline`. -#[inline(always)] -pub fn is_prefix(haystack: &[u8], needle: &[u8]) -> bool { - needle.len() <= haystack.len() - && is_equal(&haystack[..needle.len()], needle) -} - -/// Returns true if and only if `needle` is a suffix of `haystack`. -/// -/// This uses a latency optimized variant of `memcmp` internally which *might* -/// make this faster for very short strings. -/// -/// # Inlining -/// -/// This routine is marked `inline(always)`. If you want to call this function -/// in a way that is not always inlined, you'll need to wrap a call to it in -/// another function that is marked as `inline(never)` or just `inline`. -#[inline(always)] -pub fn is_suffix(haystack: &[u8], needle: &[u8]) -> bool { - needle.len() <= haystack.len() - && is_equal(&haystack[haystack.len() - needle.len()..], needle) -} - -/// Compare corresponding bytes in `x` and `y` for equality. -/// -/// That is, this returns true if and only if `x.len() == y.len()` and -/// `x[i] == y[i]` for all `0 <= i < x.len()`. -/// -/// # Inlining -/// -/// This routine is marked `inline(always)`. If you want to call this function -/// in a way that is not always inlined, you'll need to wrap a call to it in -/// another function that is marked as `inline(never)` or just `inline`. -/// -/// # Motivation -/// -/// Why not use slice equality instead? Well, slice equality usually results in -/// a call out to the current platform's `libc` which might not be inlineable -/// or have other overhead. This routine isn't guaranteed to be a win, but it -/// might be in some cases. -#[inline(always)] -pub fn is_equal(x: &[u8], y: &[u8]) -> bool { - if x.len() != y.len() { - return false; - } - // SAFETY: Our pointers are derived directly from borrowed slices which - // uphold all of our safety guarantees except for length. We account for - // length with the check above. - unsafe { is_equal_raw(x.as_ptr(), y.as_ptr(), x.len()) } -} - -/// Compare `n` bytes at the given pointers for equality. -/// -/// This returns true if and only if `*x.add(i) == *y.add(i)` for all -/// `0 <= i < n`. -/// -/// # Inlining -/// -/// This routine is marked `inline(always)`. If you want to call this function -/// in a way that is not always inlined, you'll need to wrap a call to it in -/// another function that is marked as `inline(never)` or just `inline`. -/// -/// # Motivation -/// -/// Why not use slice equality instead? Well, slice equality usually results in -/// a call out to the current platform's `libc` which might not be inlineable -/// or have other overhead. This routine isn't guaranteed to be a win, but it -/// might be in some cases. -/// -/// # Safety -/// -/// * Both `x` and `y` must be valid for reads of up to `n` bytes. -/// * Both `x` and `y` must point to an initialized value. -/// * Both `x` and `y` must each point to an allocated object and -/// must either be in bounds or at most one byte past the end of the -/// allocated object. `x` and `y` do not need to point to the same allocated -/// object, but they may. -/// * Both `x` and `y` must be _derived from_ a pointer to their respective -/// allocated objects. -/// * The distance between `x` and `x+n` must not overflow `isize`. Similarly -/// for `y` and `y+n`. -/// * The distance being in bounds must not rely on "wrapping around" the -/// address space. -#[inline(always)] -pub unsafe fn is_equal_raw( - mut x: *const u8, - mut y: *const u8, - mut n: usize, -) -> bool { - // When we have 4 or more bytes to compare, then proceed in chunks of 4 at - // a time using unaligned loads. - // - // Also, why do 4 byte loads instead of, say, 8 byte loads? The reason is - // that this particular version of memcmp is likely to be called with tiny - // needles. That means that if we do 8 byte loads, then a higher proportion - // of memcmp calls will use the slower variant above. With that said, this - // is a hypothesis and is only loosely supported by benchmarks. There's - // likely some improvement that could be made here. The main thing here - // though is to optimize for latency, not throughput. - - // SAFETY: The caller is responsible for ensuring the pointers we get are - // valid and readable for at least `n` bytes. We also do unaligned loads, - // so there's no need to ensure we're aligned. (This is justified by this - // routine being specifically for short strings.) - while n >= 4 { - let vx = x.cast::().read_unaligned(); - let vy = y.cast::().read_unaligned(); - if vx != vy { - return false; - } - x = x.add(4); - y = y.add(4); - n -= 4; - } - // If we don't have enough bytes to do 4-byte at a time loads, then - // do partial loads. Note that I used to have a byte-at-a-time - // loop here and that turned out to be quite a bit slower for the - // memmem/pathological/defeat-simple-vector-alphabet benchmark. - if n >= 2 { - let vx = x.cast::().read_unaligned(); - let vy = y.cast::().read_unaligned(); - if vx != vy { - return false; - } - x = x.add(2); - y = y.add(2); - n -= 2; - } - if n > 0 { - if x.read() != y.read() { - return false; - } - } - true -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn equals_different_lengths() { - assert!(!is_equal(b"", b"a")); - assert!(!is_equal(b"a", b"")); - assert!(!is_equal(b"ab", b"a")); - assert!(!is_equal(b"a", b"ab")); - } - - #[test] - fn equals_mismatch() { - let one_mismatch = [ - (&b"a"[..], &b"x"[..]), - (&b"ab"[..], &b"ax"[..]), - (&b"abc"[..], &b"abx"[..]), - (&b"abcd"[..], &b"abcx"[..]), - (&b"abcde"[..], &b"abcdx"[..]), - (&b"abcdef"[..], &b"abcdex"[..]), - (&b"abcdefg"[..], &b"abcdefx"[..]), - (&b"abcdefgh"[..], &b"abcdefgx"[..]), - (&b"abcdefghi"[..], &b"abcdefghx"[..]), - (&b"abcdefghij"[..], &b"abcdefghix"[..]), - (&b"abcdefghijk"[..], &b"abcdefghijx"[..]), - (&b"abcdefghijkl"[..], &b"abcdefghijkx"[..]), - (&b"abcdefghijklm"[..], &b"abcdefghijklx"[..]), - (&b"abcdefghijklmn"[..], &b"abcdefghijklmx"[..]), - ]; - for (x, y) in one_mismatch { - assert_eq!(x.len(), y.len(), "lengths should match"); - assert!(!is_equal(x, y)); - assert!(!is_equal(y, x)); - } - } - - #[test] - fn equals_yes() { - assert!(is_equal(b"", b"")); - assert!(is_equal(b"a", b"a")); - assert!(is_equal(b"ab", b"ab")); - assert!(is_equal(b"abc", b"abc")); - assert!(is_equal(b"abcd", b"abcd")); - assert!(is_equal(b"abcde", b"abcde")); - assert!(is_equal(b"abcdef", b"abcdef")); - assert!(is_equal(b"abcdefg", b"abcdefg")); - assert!(is_equal(b"abcdefgh", b"abcdefgh")); - assert!(is_equal(b"abcdefghi", b"abcdefghi")); - } - - #[test] - fn prefix() { - assert!(is_prefix(b"", b"")); - assert!(is_prefix(b"a", b"")); - assert!(is_prefix(b"ab", b"")); - assert!(is_prefix(b"foo", b"foo")); - assert!(is_prefix(b"foobar", b"foo")); - - assert!(!is_prefix(b"foo", b"fob")); - assert!(!is_prefix(b"foobar", b"fob")); - } - - #[test] - fn suffix() { - assert!(is_suffix(b"", b"")); - assert!(is_suffix(b"a", b"")); - assert!(is_suffix(b"ab", b"")); - assert!(is_suffix(b"foo", b"foo")); - assert!(is_suffix(b"foobar", b"bar")); - - assert!(!is_suffix(b"foo", b"goo")); - assert!(!is_suffix(b"foobar", b"gar")); - } -} diff --git a/vendor/memchr/src/arch/all/packedpair/default_rank.rs b/vendor/memchr/src/arch/all/packedpair/default_rank.rs deleted file mode 100644 index 6aa3895e61ef77..00000000000000 --- a/vendor/memchr/src/arch/all/packedpair/default_rank.rs +++ /dev/null @@ -1,258 +0,0 @@ -pub(crate) const RANK: [u8; 256] = [ - 55, // '\x00' - 52, // '\x01' - 51, // '\x02' - 50, // '\x03' - 49, // '\x04' - 48, // '\x05' - 47, // '\x06' - 46, // '\x07' - 45, // '\x08' - 103, // '\t' - 242, // '\n' - 66, // '\x0b' - 67, // '\x0c' - 229, // '\r' - 44, // '\x0e' - 43, // '\x0f' - 42, // '\x10' - 41, // '\x11' - 40, // '\x12' - 39, // '\x13' - 38, // '\x14' - 37, // '\x15' - 36, // '\x16' - 35, // '\x17' - 34, // '\x18' - 33, // '\x19' - 56, // '\x1a' - 32, // '\x1b' - 31, // '\x1c' - 30, // '\x1d' - 29, // '\x1e' - 28, // '\x1f' - 255, // ' ' - 148, // '!' - 164, // '"' - 149, // '#' - 136, // '$' - 160, // '%' - 155, // '&' - 173, // "'" - 221, // '(' - 222, // ')' - 134, // '*' - 122, // '+' - 232, // ',' - 202, // '-' - 215, // '.' - 224, // '/' - 208, // '0' - 220, // '1' - 204, // '2' - 187, // '3' - 183, // '4' - 179, // '5' - 177, // '6' - 168, // '7' - 178, // '8' - 200, // '9' - 226, // ':' - 195, // ';' - 154, // '<' - 184, // '=' - 174, // '>' - 126, // '?' - 120, // '@' - 191, // 'A' - 157, // 'B' - 194, // 'C' - 170, // 'D' - 189, // 'E' - 162, // 'F' - 161, // 'G' - 150, // 'H' - 193, // 'I' - 142, // 'J' - 137, // 'K' - 171, // 'L' - 176, // 'M' - 185, // 'N' - 167, // 'O' - 186, // 'P' - 112, // 'Q' - 175, // 'R' - 192, // 'S' - 188, // 'T' - 156, // 'U' - 140, // 'V' - 143, // 'W' - 123, // 'X' - 133, // 'Y' - 128, // 'Z' - 147, // '[' - 138, // '\\' - 146, // ']' - 114, // '^' - 223, // '_' - 151, // '`' - 249, // 'a' - 216, // 'b' - 238, // 'c' - 236, // 'd' - 253, // 'e' - 227, // 'f' - 218, // 'g' - 230, // 'h' - 247, // 'i' - 135, // 'j' - 180, // 'k' - 241, // 'l' - 233, // 'm' - 246, // 'n' - 244, // 'o' - 231, // 'p' - 139, // 'q' - 245, // 'r' - 243, // 's' - 251, // 't' - 235, // 'u' - 201, // 'v' - 196, // 'w' - 240, // 'x' - 214, // 'y' - 152, // 'z' - 182, // '{' - 205, // '|' - 181, // '}' - 127, // '~' - 27, // '\x7f' - 212, // '\x80' - 211, // '\x81' - 210, // '\x82' - 213, // '\x83' - 228, // '\x84' - 197, // '\x85' - 169, // '\x86' - 159, // '\x87' - 131, // '\x88' - 172, // '\x89' - 105, // '\x8a' - 80, // '\x8b' - 98, // '\x8c' - 96, // '\x8d' - 97, // '\x8e' - 81, // '\x8f' - 207, // '\x90' - 145, // '\x91' - 116, // '\x92' - 115, // '\x93' - 144, // '\x94' - 130, // '\x95' - 153, // '\x96' - 121, // '\x97' - 107, // '\x98' - 132, // '\x99' - 109, // '\x9a' - 110, // '\x9b' - 124, // '\x9c' - 111, // '\x9d' - 82, // '\x9e' - 108, // '\x9f' - 118, // '\xa0' - 141, // '¡' - 113, // '¢' - 129, // '£' - 119, // '¤' - 125, // '¥' - 165, // '¦' - 117, // '§' - 92, // '¨' - 106, // '©' - 83, // 'ª' - 72, // '«' - 99, // '¬' - 93, // '\xad' - 65, // '®' - 79, // '¯' - 166, // '°' - 237, // '±' - 163, // '²' - 199, // '³' - 190, // '´' - 225, // 'µ' - 209, // '¶' - 203, // '·' - 198, // '¸' - 217, // '¹' - 219, // 'º' - 206, // '»' - 234, // '¼' - 248, // '½' - 158, // '¾' - 239, // '¿' - 255, // 'À' - 255, // 'Á' - 255, // 'Â' - 255, // 'Ã' - 255, // 'Ä' - 255, // 'Å' - 255, // 'Æ' - 255, // 'Ç' - 255, // 'È' - 255, // 'É' - 255, // 'Ê' - 255, // 'Ë' - 255, // 'Ì' - 255, // 'Í' - 255, // 'Î' - 255, // 'Ï' - 255, // 'Ð' - 255, // 'Ñ' - 255, // 'Ò' - 255, // 'Ó' - 255, // 'Ô' - 255, // 'Õ' - 255, // 'Ö' - 255, // '×' - 255, // 'Ø' - 255, // 'Ù' - 255, // 'Ú' - 255, // 'Û' - 255, // 'Ü' - 255, // 'Ý' - 255, // 'Þ' - 255, // 'ß' - 255, // 'à' - 255, // 'á' - 255, // 'â' - 255, // 'ã' - 255, // 'ä' - 255, // 'å' - 255, // 'æ' - 255, // 'ç' - 255, // 'è' - 255, // 'é' - 255, // 'ê' - 255, // 'ë' - 255, // 'ì' - 255, // 'í' - 255, // 'î' - 255, // 'ï' - 255, // 'ð' - 255, // 'ñ' - 255, // 'ò' - 255, // 'ó' - 255, // 'ô' - 255, // 'õ' - 255, // 'ö' - 255, // '÷' - 255, // 'ø' - 255, // 'ù' - 255, // 'ú' - 255, // 'û' - 255, // 'ü' - 255, // 'ý' - 255, // 'þ' - 255, // 'ÿ' -]; diff --git a/vendor/memchr/src/arch/all/packedpair/mod.rs b/vendor/memchr/src/arch/all/packedpair/mod.rs deleted file mode 100644 index 148a985521d817..00000000000000 --- a/vendor/memchr/src/arch/all/packedpair/mod.rs +++ /dev/null @@ -1,359 +0,0 @@ -/*! -Provides an architecture independent implementation of the "packed pair" -algorithm. - -The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main -difference is that it (by default) uses a background distribution of byte -frequencies to heuristically select the pair of bytes to search for. Note that -this module provides an architecture independent version that doesn't do as -good of a job keeping the search for candidates inside a SIMD hot path. It -however can be good enough in many circumstances. - -[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last -*/ - -use crate::memchr; - -mod default_rank; - -/// An architecture independent "packed pair" finder. -/// -/// This finder picks two bytes that it believes have high predictive power for -/// indicating an overall match of a needle. At search time, it reports offsets -/// where the needle could match based on whether the pair of bytes it chose -/// match. -/// -/// This is architecture independent because it utilizes `memchr` to find the -/// occurrence of one of the bytes in the pair, and then checks whether the -/// second byte matches. If it does, in the case of [`Finder::find_prefilter`], -/// the location at which the needle could match is returned. -/// -/// It is generally preferred to use architecture specific routines for a -/// "packed pair" prefilter, but this can be a useful fallback when the -/// architecture independent routines are unavailable. -#[derive(Clone, Copy, Debug)] -pub struct Finder { - pair: Pair, - byte1: u8, - byte2: u8, -} - -impl Finder { - /// Create a new prefilter that reports possible locations where the given - /// needle matches. - #[inline] - pub fn new(needle: &[u8]) -> Option { - Finder::with_pair(needle, Pair::new(needle)?) - } - - /// Create a new prefilter using the pair given. - /// - /// If the prefilter could not be constructed, then `None` is returned. - /// - /// This constructor permits callers to control precisely which pair of - /// bytes is used as a predicate. - #[inline] - pub fn with_pair(needle: &[u8], pair: Pair) -> Option { - let byte1 = needle[usize::from(pair.index1())]; - let byte2 = needle[usize::from(pair.index2())]; - // Currently this can never fail so we could just return a Finder, - // but it's conceivable this could change. - Some(Finder { pair, byte1, byte2 }) - } - - /// Run this finder on the given haystack as a prefilter. - /// - /// If a candidate match is found, then an offset where the needle *could* - /// begin in the haystack is returned. - #[inline] - pub fn find_prefilter(&self, haystack: &[u8]) -> Option { - let mut i = 0; - let index1 = usize::from(self.pair.index1()); - let index2 = usize::from(self.pair.index2()); - loop { - // Use a fast vectorized implementation to skip to the next - // occurrence of the rarest byte (heuristically chosen) in the - // needle. - i += memchr(self.byte1, &haystack[i..])?; - let found = i; - i += 1; - - // If we can't align our first byte match with the haystack, then a - // match is impossible. - let aligned1 = match found.checked_sub(index1) { - None => continue, - Some(aligned1) => aligned1, - }; - - // Now align the second byte match with the haystack. A mismatch - // means that a match is impossible. - let aligned2 = match aligned1.checked_add(index2) { - None => continue, - Some(aligned_index2) => aligned_index2, - }; - if haystack.get(aligned2).map_or(true, |&b| b != self.byte2) { - continue; - } - - // We've done what we can. There might be a match here. - return Some(aligned1); - } - } - - /// Returns the pair of offsets (into the needle) used to check as a - /// predicate before confirming whether a needle exists at a particular - /// position. - #[inline] - pub fn pair(&self) -> &Pair { - &self.pair - } -} - -/// A pair of byte offsets into a needle to use as a predicate. -/// -/// This pair is used as a predicate to quickly filter out positions in a -/// haystack in which a needle cannot match. In some cases, this pair can even -/// be used in vector algorithms such that the vector algorithm only switches -/// over to scalar code once this pair has been found. -/// -/// A pair of offsets can be used in both substring search implementations and -/// in prefilters. The former will report matches of a needle in a haystack -/// where as the latter will only report possible matches of a needle. -/// -/// The offsets are limited each to a maximum of 255 to keep memory usage low. -/// Moreover, it's rarely advantageous to create a predicate using offsets -/// greater than 255 anyway. -/// -/// The only guarantee enforced on the pair of offsets is that they are not -/// equivalent. It is not necessarily the case that `index1 < index2` for -/// example. By convention, `index1` corresponds to the byte in the needle -/// that is believed to be most the predictive. Note also that because of the -/// requirement that the indices be both valid for the needle used to build -/// the pair and not equal, it follows that a pair can only be constructed for -/// needles with length at least 2. -#[derive(Clone, Copy, Debug)] -pub struct Pair { - index1: u8, - index2: u8, -} - -impl Pair { - /// Create a new pair of offsets from the given needle. - /// - /// If a pair could not be created (for example, if the needle is too - /// short), then `None` is returned. - /// - /// This chooses the pair in the needle that is believed to be as - /// predictive of an overall match of the needle as possible. - #[inline] - pub fn new(needle: &[u8]) -> Option { - Pair::with_ranker(needle, DefaultFrequencyRank) - } - - /// Create a new pair of offsets from the given needle and ranker. - /// - /// This permits the caller to choose a background frequency distribution - /// with which bytes are selected. The idea is to select a pair of bytes - /// that is believed to strongly predict a match in the haystack. This - /// usually means selecting bytes that occur rarely in a haystack. - /// - /// If a pair could not be created (for example, if the needle is too - /// short), then `None` is returned. - #[inline] - pub fn with_ranker( - needle: &[u8], - ranker: R, - ) -> Option { - if needle.len() <= 1 { - return None; - } - // Find the rarest two bytes. We make them distinct indices by - // construction. (The actual byte value may be the same in degenerate - // cases, but that's OK.) - let (mut rare1, mut index1) = (needle[0], 0); - let (mut rare2, mut index2) = (needle[1], 1); - if ranker.rank(rare2) < ranker.rank(rare1) { - core::mem::swap(&mut rare1, &mut rare2); - core::mem::swap(&mut index1, &mut index2); - } - let max = usize::from(core::u8::MAX); - for (i, &b) in needle.iter().enumerate().take(max).skip(2) { - if ranker.rank(b) < ranker.rank(rare1) { - rare2 = rare1; - index2 = index1; - rare1 = b; - index1 = u8::try_from(i).unwrap(); - } else if b != rare1 && ranker.rank(b) < ranker.rank(rare2) { - rare2 = b; - index2 = u8::try_from(i).unwrap(); - } - } - // While not strictly required for how a Pair is normally used, we - // really don't want these to be equivalent. If they were, it would - // reduce the effectiveness of candidate searching using these rare - // bytes by increasing the rate of false positives. - assert_ne!(index1, index2); - Some(Pair { index1, index2 }) - } - - /// Create a new pair using the offsets given for the needle given. - /// - /// This bypasses any sort of heuristic process for choosing the offsets - /// and permits the caller to choose the offsets themselves. - /// - /// Indices are limited to valid `u8` values so that a `Pair` uses less - /// memory. It is not possible to create a `Pair` with offsets bigger than - /// `u8::MAX`. It's likely that such a thing is not needed, but if it is, - /// it's suggested to build your own bespoke algorithm because you're - /// likely working on a very niche case. (File an issue if this suggestion - /// does not make sense to you.) - /// - /// If a pair could not be created (for example, if the needle is too - /// short), then `None` is returned. - #[inline] - pub fn with_indices( - needle: &[u8], - index1: u8, - index2: u8, - ) -> Option { - // While not strictly required for how a Pair is normally used, we - // really don't want these to be equivalent. If they were, it would - // reduce the effectiveness of candidate searching using these rare - // bytes by increasing the rate of false positives. - if index1 == index2 { - return None; - } - // Similarly, invalid indices means the Pair is invalid too. - if usize::from(index1) >= needle.len() { - return None; - } - if usize::from(index2) >= needle.len() { - return None; - } - Some(Pair { index1, index2 }) - } - - /// Returns the first offset of the pair. - #[inline] - pub fn index1(&self) -> u8 { - self.index1 - } - - /// Returns the second offset of the pair. - #[inline] - pub fn index2(&self) -> u8 { - self.index2 - } -} - -/// This trait allows the user to customize the heuristic used to determine the -/// relative frequency of a given byte in the dataset being searched. -/// -/// The use of this trait can have a dramatic impact on performance depending -/// on the type of data being searched. The details of why are explained in the -/// docs of [`crate::memmem::Prefilter`]. To summarize, the core algorithm uses -/// a prefilter to quickly identify candidate matches that are later verified -/// more slowly. This prefilter is implemented in terms of trying to find -/// `rare` bytes at specific offsets that will occur less frequently in the -/// dataset. While the concept of a `rare` byte is similar for most datasets, -/// there are some specific datasets (like binary executables) that have -/// dramatically different byte distributions. For these datasets customizing -/// the byte frequency heuristic can have a massive impact on performance, and -/// might even need to be done at runtime. -/// -/// The default implementation of `HeuristicFrequencyRank` reads from the -/// static frequency table defined in `src/memmem/byte_frequencies.rs`. This -/// is optimal for most inputs, so if you are unsure of the impact of using a -/// custom `HeuristicFrequencyRank` you should probably just use the default. -/// -/// # Example -/// -/// ``` -/// use memchr::{ -/// arch::all::packedpair::HeuristicFrequencyRank, -/// memmem::FinderBuilder, -/// }; -/// -/// /// A byte-frequency table that is good for scanning binary executables. -/// struct Binary; -/// -/// impl HeuristicFrequencyRank for Binary { -/// fn rank(&self, byte: u8) -> u8 { -/// const TABLE: [u8; 256] = [ -/// 255, 128, 61, 43, 50, 41, 27, 28, 57, 15, 21, 13, 24, 17, 17, -/// 89, 58, 16, 11, 7, 14, 23, 7, 6, 24, 9, 6, 5, 9, 4, 7, 16, -/// 68, 11, 9, 6, 88, 7, 4, 4, 23, 9, 4, 8, 8, 5, 10, 4, 30, 11, -/// 9, 24, 11, 5, 5, 5, 19, 11, 6, 17, 9, 9, 6, 8, -/// 48, 58, 11, 14, 53, 40, 9, 9, 254, 35, 3, 6, 52, 23, 6, 6, 27, -/// 4, 7, 11, 14, 13, 10, 11, 11, 5, 2, 10, 16, 12, 6, 19, -/// 19, 20, 5, 14, 16, 31, 19, 7, 14, 20, 4, 4, 19, 8, 18, 20, 24, -/// 1, 25, 19, 58, 29, 10, 5, 15, 20, 2, 2, 9, 4, 3, 5, -/// 51, 11, 4, 53, 23, 39, 6, 4, 13, 81, 4, 186, 5, 67, 3, 2, 15, -/// 0, 0, 1, 3, 2, 0, 0, 5, 0, 0, 0, 2, 0, 0, 0, -/// 12, 2, 1, 1, 3, 1, 1, 1, 6, 1, 2, 1, 3, 1, 1, 2, 9, 1, 1, 0, -/// 2, 2, 4, 4, 11, 6, 7, 3, 6, 9, 4, 5, -/// 46, 18, 8, 18, 17, 3, 8, 20, 16, 10, 3, 7, 175, 4, 6, 7, 13, -/// 3, 7, 3, 3, 1, 3, 3, 10, 3, 1, 5, 2, 0, 1, 2, -/// 16, 3, 5, 1, 6, 1, 1, 2, 58, 20, 3, 14, 12, 2, 1, 3, 16, 3, 5, -/// 8, 3, 1, 8, 6, 17, 6, 5, 3, 8, 6, 13, 175, -/// ]; -/// TABLE[byte as usize] -/// } -/// } -/// // Create a new finder with the custom heuristic. -/// let finder = FinderBuilder::new() -/// .build_forward_with_ranker(Binary, b"\x00\x00\xdd\xdd"); -/// // Find needle with custom heuristic. -/// assert!(finder.find(b"\x00\x00\x00\xdd\xdd").is_some()); -/// ``` -pub trait HeuristicFrequencyRank { - /// Return the heuristic frequency rank of the given byte. A lower rank - /// means the byte is believed to occur less frequently in the haystack. - /// - /// Some uses of this heuristic may treat arbitrary absolute rank values as - /// significant. For example, an implementation detail in this crate may - /// determine that heuristic prefilters are inappropriate if every byte in - /// the needle has a "high" rank. - fn rank(&self, byte: u8) -> u8; -} - -/// The default byte frequency heuristic that is good for most haystacks. -pub(crate) struct DefaultFrequencyRank; - -impl HeuristicFrequencyRank for DefaultFrequencyRank { - fn rank(&self, byte: u8) -> u8 { - self::default_rank::RANK[usize::from(byte)] - } -} - -/// This permits passing any implementation of `HeuristicFrequencyRank` as a -/// borrowed version of itself. -impl<'a, R> HeuristicFrequencyRank for &'a R -where - R: HeuristicFrequencyRank, -{ - fn rank(&self, byte: u8) -> u8 { - (**self).rank(byte) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn forward_packedpair() { - fn find( - haystack: &[u8], - needle: &[u8], - _index1: u8, - _index2: u8, - ) -> Option> { - // We ignore the index positions requested since it winds up making - // this test too slow overall. - let f = Finder::new(needle)?; - Some(f.find_prefilter(haystack)) - } - crate::tests::packedpair::Runner::new().fwd(find).run() - } -} diff --git a/vendor/memchr/src/arch/all/rabinkarp.rs b/vendor/memchr/src/arch/all/rabinkarp.rs deleted file mode 100644 index e0bafbac982950..00000000000000 --- a/vendor/memchr/src/arch/all/rabinkarp.rs +++ /dev/null @@ -1,390 +0,0 @@ -/*! -An implementation of the [Rabin-Karp substring search algorithm][rabinkarp]. - -Rabin-Karp works by creating a hash of the needle provided and then computing -a rolling hash for each needle sized window in the haystack. When the rolling -hash matches the hash of the needle, a byte-wise comparison is done to check -if a match exists. The worst case time complexity of Rabin-Karp is `O(m * -n)` where `m ~ len(needle)` and `n ~ len(haystack)`. Its worst case space -complexity is constant. - -The main utility of Rabin-Karp is that the searcher can be constructed very -quickly with very little memory. This makes it especially useful when searching -for small needles in small haystacks, as it might finish its search before a -beefier algorithm (like Two-Way) even starts. - -[rabinkarp]: https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm -*/ - -/* -(This was the comment I wrote for this module originally when it was not -exposed. The comment still looks useful, but it's a bit in the weeds, so it's -not public itself.) - -This module implements the classical Rabin-Karp substring search algorithm, -with no extra frills. While its use would seem to break our time complexity -guarantee of O(m+n) (RK's time complexity is O(mn)), we are careful to only -ever use RK on a constant subset of haystacks. The main point here is that -RK has good latency properties for small needles/haystacks. It's very quick -to compute a needle hash and zip through the haystack when compared to -initializing Two-Way, for example. And this is especially useful for cases -where the haystack is just too short for vector instructions to do much good. - -The hashing function used here is the same one recommended by ESMAJ. - -Another choice instead of Rabin-Karp would be Shift-Or. But its latency -isn't quite as good since its preprocessing time is a bit more expensive -(both in practice and in theory). However, perhaps Shift-Or has a place -somewhere else for short patterns. I think the main problem is that it -requires space proportional to the alphabet and the needle. If we, for -example, supported needles up to length 16, then the total table size would be -len(alphabet)*size_of::()==512 bytes. Which isn't exactly small, and it's -probably bad to put that on the stack. So ideally, we'd throw it on the heap, -but we'd really like to write as much code without using alloc/std as possible. -But maybe it's worth the special casing. It's a TODO to benchmark. - -Wikipedia has a decent explanation, if a bit heavy on the theory: -https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm - -But ESMAJ provides something a bit more concrete: -http://www-igm.univ-mlv.fr/~lecroq/string/node5.html - -Finally, aho-corasick uses Rabin-Karp for multiple pattern match in some cases: -https://github.com/BurntSushi/aho-corasick/blob/3852632f10587db0ff72ef29e88d58bf305a0946/src/packed/rabinkarp.rs -*/ - -use crate::ext::Pointer; - -/// A forward substring searcher using the Rabin-Karp algorithm. -/// -/// Note that, as a lower level API, a `Finder` does not have access to the -/// needle it was constructed with. For this reason, executing a search -/// with a `Finder` requires passing both the needle and the haystack, -/// where the needle is exactly equivalent to the one given to the `Finder` -/// at construction time. This design was chosen so that callers can have -/// more precise control over where and how many times a needle is stored. -/// For example, in cases where Rabin-Karp is just one of several possible -/// substring search algorithms. -#[derive(Clone, Debug)] -pub struct Finder { - /// The actual hash. - hash: Hash, - /// The factor needed to multiply a byte by in order to subtract it from - /// the hash. It is defined to be 2^(n-1) (using wrapping exponentiation), - /// where n is the length of the needle. This is how we "remove" a byte - /// from the hash once the hash window rolls past it. - hash_2pow: u32, -} - -impl Finder { - /// Create a new Rabin-Karp forward searcher for the given `needle`. - /// - /// The needle may be empty. The empty needle matches at every byte offset. - /// - /// Note that callers must pass the same needle to all search calls using - /// this `Finder`. - #[inline] - pub fn new(needle: &[u8]) -> Finder { - let mut s = Finder { hash: Hash::new(), hash_2pow: 1 }; - let first_byte = match needle.get(0) { - None => return s, - Some(&first_byte) => first_byte, - }; - s.hash.add(first_byte); - for b in needle.iter().copied().skip(1) { - s.hash.add(b); - s.hash_2pow = s.hash_2pow.wrapping_shl(1); - } - s - } - - /// Return the first occurrence of the `needle` in the `haystack` - /// given. If no such occurrence exists, then `None` is returned. - /// - /// The `needle` provided must match the needle given to this finder at - /// construction time. - /// - /// The maximum value this can return is `haystack.len()`, which can only - /// occur when the needle and haystack both have length zero. Otherwise, - /// for non-empty haystacks, the maximum value is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option { - unsafe { - let hstart = haystack.as_ptr(); - let hend = hstart.add(haystack.len()); - let nstart = needle.as_ptr(); - let nend = nstart.add(needle.len()); - let found = self.find_raw(hstart, hend, nstart, nend)?; - Some(found.distance(hstart)) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `<= end`. The pointer returned is only ever equivalent - /// to `end` when both the needle and haystack are empty. (That is, the - /// empty string matches the empty string.) - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// Note that `start` and `end` below refer to both pairs of pointers given - /// to this routine. That is, the conditions apply to both `hstart`/`hend` - /// and `nstart`/`nend`. - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// * It must be the case that `start <= end`. - #[inline] - pub unsafe fn find_raw( - &self, - hstart: *const u8, - hend: *const u8, - nstart: *const u8, - nend: *const u8, - ) -> Option<*const u8> { - let hlen = hend.distance(hstart); - let nlen = nend.distance(nstart); - if nlen > hlen { - return None; - } - let mut cur = hstart; - let end = hend.sub(nlen); - let mut hash = Hash::forward(cur, cur.add(nlen)); - loop { - if self.hash == hash && is_equal_raw(cur, nstart, nlen) { - return Some(cur); - } - if cur >= end { - return None; - } - hash.roll(self, cur.read(), cur.add(nlen).read()); - cur = cur.add(1); - } - } -} - -/// A reverse substring searcher using the Rabin-Karp algorithm. -#[derive(Clone, Debug)] -pub struct FinderRev(Finder); - -impl FinderRev { - /// Create a new Rabin-Karp reverse searcher for the given `needle`. - #[inline] - pub fn new(needle: &[u8]) -> FinderRev { - let mut s = FinderRev(Finder { hash: Hash::new(), hash_2pow: 1 }); - let last_byte = match needle.last() { - None => return s, - Some(&last_byte) => last_byte, - }; - s.0.hash.add(last_byte); - for b in needle.iter().rev().copied().skip(1) { - s.0.hash.add(b); - s.0.hash_2pow = s.0.hash_2pow.wrapping_shl(1); - } - s - } - - /// Return the last occurrence of the `needle` in the `haystack` - /// given. If no such occurrence exists, then `None` is returned. - /// - /// The `needle` provided must match the needle given to this finder at - /// construction time. - /// - /// The maximum value this can return is `haystack.len()`, which can only - /// occur when the needle and haystack both have length zero. Otherwise, - /// for non-empty haystacks, the maximum value is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8], needle: &[u8]) -> Option { - unsafe { - let hstart = haystack.as_ptr(); - let hend = hstart.add(haystack.len()); - let nstart = needle.as_ptr(); - let nend = nstart.add(needle.len()); - let found = self.rfind_raw(hstart, hend, nstart, nend)?; - Some(found.distance(hstart)) - } - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `<= end`. The pointer returned is only ever equivalent - /// to `end` when both the needle and haystack are empty. (That is, the - /// empty string matches the empty string.) - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// Note that `start` and `end` below refer to both pairs of pointers given - /// to this routine. That is, the conditions apply to both `hstart`/`hend` - /// and `nstart`/`nend`. - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// * It must be the case that `start <= end`. - #[inline] - pub unsafe fn rfind_raw( - &self, - hstart: *const u8, - hend: *const u8, - nstart: *const u8, - nend: *const u8, - ) -> Option<*const u8> { - let hlen = hend.distance(hstart); - let nlen = nend.distance(nstart); - if nlen > hlen { - return None; - } - let mut cur = hend.sub(nlen); - let start = hstart; - let mut hash = Hash::reverse(cur, cur.add(nlen)); - loop { - if self.0.hash == hash && is_equal_raw(cur, nstart, nlen) { - return Some(cur); - } - if cur <= start { - return None; - } - cur = cur.sub(1); - hash.roll(&self.0, cur.add(nlen).read(), cur.read()); - } - } -} - -/// Whether RK is believed to be very fast for the given needle/haystack. -#[inline] -pub(crate) fn is_fast(haystack: &[u8], _needle: &[u8]) -> bool { - haystack.len() < 16 -} - -/// A Rabin-Karp hash. This might represent the hash of a needle, or the hash -/// of a rolling window in the haystack. -#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] -struct Hash(u32); - -impl Hash { - /// Create a new hash that represents the empty string. - #[inline(always)] - fn new() -> Hash { - Hash(0) - } - - /// Create a new hash from the bytes given for use in forward searches. - /// - /// # Safety - /// - /// The given pointers must be valid to read from within their range. - #[inline(always)] - unsafe fn forward(mut start: *const u8, end: *const u8) -> Hash { - let mut hash = Hash::new(); - while start < end { - hash.add(start.read()); - start = start.add(1); - } - hash - } - - /// Create a new hash from the bytes given for use in reverse searches. - /// - /// # Safety - /// - /// The given pointers must be valid to read from within their range. - #[inline(always)] - unsafe fn reverse(start: *const u8, mut end: *const u8) -> Hash { - let mut hash = Hash::new(); - while start < end { - end = end.sub(1); - hash.add(end.read()); - } - hash - } - - /// Add 'new' and remove 'old' from this hash. The given needle hash should - /// correspond to the hash computed for the needle being searched for. - /// - /// This is meant to be used when the rolling window of the haystack is - /// advanced. - #[inline(always)] - fn roll(&mut self, finder: &Finder, old: u8, new: u8) { - self.del(finder, old); - self.add(new); - } - - /// Add a byte to this hash. - #[inline(always)] - fn add(&mut self, byte: u8) { - self.0 = self.0.wrapping_shl(1).wrapping_add(u32::from(byte)); - } - - /// Remove a byte from this hash. The given needle hash should correspond - /// to the hash computed for the needle being searched for. - #[inline(always)] - fn del(&mut self, finder: &Finder, byte: u8) { - let factor = finder.hash_2pow; - self.0 = self.0.wrapping_sub(u32::from(byte).wrapping_mul(factor)); - } -} - -/// Returns true when `x[i] == y[i]` for all `0 <= i < n`. -/// -/// We forcefully don't inline this to hint at the compiler that it is unlikely -/// to be called. This causes the inner rabinkarp loop above to be a bit -/// tighter and leads to some performance improvement. See the -/// memmem/krate/prebuilt/sliceslice-words/words benchmark. -/// -/// # Safety -/// -/// Same as `crate::arch::all::is_equal_raw`. -#[cold] -#[inline(never)] -unsafe fn is_equal_raw(x: *const u8, y: *const u8, n: usize) -> bool { - crate::arch::all::is_equal_raw(x, y, n) -} - -#[cfg(test)] -mod tests { - use super::*; - - define_substring_forward_quickcheck!(|h, n| Some( - Finder::new(n).find(h, n) - )); - define_substring_reverse_quickcheck!(|h, n| Some( - FinderRev::new(n).rfind(h, n) - )); - - #[test] - fn forward() { - crate::tests::substring::Runner::new() - .fwd(|h, n| Some(Finder::new(n).find(h, n))) - .run(); - } - - #[test] - fn reverse() { - crate::tests::substring::Runner::new() - .rev(|h, n| Some(FinderRev::new(n).rfind(h, n))) - .run(); - } -} diff --git a/vendor/memchr/src/arch/all/shiftor.rs b/vendor/memchr/src/arch/all/shiftor.rs deleted file mode 100644 index b690564a642e9d..00000000000000 --- a/vendor/memchr/src/arch/all/shiftor.rs +++ /dev/null @@ -1,89 +0,0 @@ -/*! -An implementation of the [Shift-Or substring search algorithm][shiftor]. - -[shiftor]: https://en.wikipedia.org/wiki/Bitap_algorithm -*/ - -use alloc::boxed::Box; - -/// The type of our mask. -/// -/// While we don't expose anyway to configure this in the public API, if one -/// really needs less memory usage or support for longer needles, then it is -/// suggested to copy the code from this module and modify it to fit your -/// needs. The code below is written to be correct regardless of whether Mask -/// is a u8, u16, u32, u64 or u128. -type Mask = u16; - -/// A forward substring searcher using the Shift-Or algorithm. -#[derive(Debug)] -pub struct Finder { - masks: Box<[Mask; 256]>, - needle_len: usize, -} - -impl Finder { - const MAX_NEEDLE_LEN: usize = (Mask::BITS - 1) as usize; - - /// Create a new Shift-Or forward searcher for the given `needle`. - /// - /// The needle may be empty. The empty needle matches at every byte offset. - #[inline] - pub fn new(needle: &[u8]) -> Option { - let needle_len = needle.len(); - if needle_len > Finder::MAX_NEEDLE_LEN { - // A match is found when bit 7 is set in 'result' in the search - // routine below. So our needle can't be bigger than 7. We could - // permit bigger needles by using u16, u32 or u64 for our mask - // entries. But this is all we need for this example. - return None; - } - let mut searcher = Finder { masks: Box::from([!0; 256]), needle_len }; - for (i, &byte) in needle.iter().enumerate() { - searcher.masks[usize::from(byte)] &= !(1 << i); - } - Some(searcher) - } - - /// Return the first occurrence of the needle given to `Finder::new` in - /// the `haystack` given. If no such occurrence exists, then `None` is - /// returned. - /// - /// Unlike most other substring search implementations in this crate, this - /// finder does not require passing the needle at search time. A match can - /// be determined without the needle at all since the required information - /// is already encoded into this finder at construction time. - /// - /// The maximum value this can return is `haystack.len()`, which can only - /// occur when the needle and haystack both have length zero. Otherwise, - /// for non-empty haystacks, the maximum value is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - if self.needle_len == 0 { - return Some(0); - } - let mut result = !1; - for (i, &byte) in haystack.iter().enumerate() { - result |= self.masks[usize::from(byte)]; - result <<= 1; - if result & (1 << self.needle_len) == 0 { - return Some(i + 1 - self.needle_len); - } - } - None - } -} - -#[cfg(test)] -mod tests { - use super::*; - - define_substring_forward_quickcheck!(|h, n| Some(Finder::new(n)?.find(h))); - - #[test] - fn forward() { - crate::tests::substring::Runner::new() - .fwd(|h, n| Some(Finder::new(n)?.find(h))) - .run(); - } -} diff --git a/vendor/memchr/src/arch/all/twoway.rs b/vendor/memchr/src/arch/all/twoway.rs deleted file mode 100644 index 0df3b4a86e950c..00000000000000 --- a/vendor/memchr/src/arch/all/twoway.rs +++ /dev/null @@ -1,877 +0,0 @@ -/*! -An implementation of the [Two-Way substring search algorithm][two-way]. - -[`Finder`] can be built for forward searches, while [`FinderRev`] can be built -for reverse searches. - -Two-Way makes for a nice general purpose substring search algorithm because of -its time and space complexity properties. It also performs well in practice. -Namely, with `m = len(needle)` and `n = len(haystack)`, Two-Way takes `O(m)` -time to create a finder, `O(1)` space and `O(n)` search time. In other words, -the preprocessing step is quick, doesn't require any heap memory and the worst -case search time is guaranteed to be linear in the haystack regardless of the -size of the needle. - -While vector algorithms will usually beat Two-Way handedly, vector algorithms -also usually have pathological or edge cases that are better handled by Two-Way. -Moreover, not all targets support vector algorithms or implementations for them -simply may not exist yet. - -Two-Way can be found in the `memmem` implementations in at least [GNU libc] and -[musl]. - -[two-way]: https://en.wikipedia.org/wiki/Two-way_string-matching_algorithm -[GNU libc]: https://www.gnu.org/software/libc/ -[musl]: https://www.musl-libc.org/ -*/ - -use core::cmp; - -use crate::{ - arch::all::{is_prefix, is_suffix}, - memmem::Pre, -}; - -/// A forward substring searcher that uses the Two-Way algorithm. -#[derive(Clone, Copy, Debug)] -pub struct Finder(TwoWay); - -/// A reverse substring searcher that uses the Two-Way algorithm. -#[derive(Clone, Copy, Debug)] -pub struct FinderRev(TwoWay); - -/// An implementation of the TwoWay substring search algorithm. -/// -/// This searcher supports forward and reverse search, although not -/// simultaneously. It runs in `O(n + m)` time and `O(1)` space, where -/// `n ~ len(needle)` and `m ~ len(haystack)`. -/// -/// The implementation here roughly matches that which was developed by -/// Crochemore and Perrin in their 1991 paper "Two-way string-matching." The -/// changes in this implementation are 1) the use of zero-based indices, 2) a -/// heuristic skip table based on the last byte (borrowed from Rust's standard -/// library) and 3) the addition of heuristics for a fast skip loop. For (3), -/// callers can pass any kind of prefilter they want, but usually it's one -/// based on a heuristic that uses an approximate background frequency of bytes -/// to choose rare bytes to quickly look for candidate match positions. Note -/// though that currently, this prefilter functionality is not exposed directly -/// in the public API. (File an issue if you want it and provide a use case -/// please.) -/// -/// The heuristic for fast skipping is automatically shut off if it's -/// detected to be ineffective at search time. Generally, this only occurs in -/// pathological cases. But this is generally necessary in order to preserve -/// a `O(n + m)` time bound. -/// -/// The code below is fairly complex and not obviously correct at all. It's -/// likely necessary to read the Two-Way paper cited above in order to fully -/// grok this code. The essence of it is: -/// -/// 1. Do something to detect a "critical" position in the needle. -/// 2. For the current position in the haystack, look if `needle[critical..]` -/// matches at that position. -/// 3. If so, look if `needle[..critical]` matches. -/// 4. If a mismatch occurs, shift the search by some amount based on the -/// critical position and a pre-computed shift. -/// -/// This type is wrapped in the forward and reverse finders that expose -/// consistent forward or reverse APIs. -#[derive(Clone, Copy, Debug)] -struct TwoWay { - /// A small bitset used as a quick prefilter (in addition to any prefilter - /// given by the caller). Namely, a bit `i` is set if and only if `b%64==i` - /// for any `b == needle[i]`. - /// - /// When used as a prefilter, if the last byte at the current candidate - /// position is NOT in this set, then we can skip that entire candidate - /// position (the length of the needle). This is essentially the shift - /// trick found in Boyer-Moore, but only applied to bytes that don't appear - /// in the needle. - /// - /// N.B. This trick was inspired by something similar in std's - /// implementation of Two-Way. - byteset: ApproximateByteSet, - /// A critical position in needle. Specifically, this position corresponds - /// to beginning of either the minimal or maximal suffix in needle. (N.B. - /// See SuffixType below for why "minimal" isn't quite the correct word - /// here.) - /// - /// This is the position at which every search begins. Namely, search - /// starts by scanning text to the right of this position, and only if - /// there's a match does the text to the left of this position get scanned. - critical_pos: usize, - /// The amount we shift by in the Two-Way search algorithm. This - /// corresponds to the "small period" and "large period" cases. - shift: Shift, -} - -impl Finder { - /// Create a searcher that finds occurrences of the given `needle`. - /// - /// An empty `needle` results in a match at every position in a haystack, - /// including at `haystack.len()`. - #[inline] - pub fn new(needle: &[u8]) -> Finder { - let byteset = ApproximateByteSet::new(needle); - let min_suffix = Suffix::forward(needle, SuffixKind::Minimal); - let max_suffix = Suffix::forward(needle, SuffixKind::Maximal); - let (period_lower_bound, critical_pos) = - if min_suffix.pos > max_suffix.pos { - (min_suffix.period, min_suffix.pos) - } else { - (max_suffix.period, max_suffix.pos) - }; - let shift = Shift::forward(needle, period_lower_bound, critical_pos); - Finder(TwoWay { byteset, critical_pos, shift }) - } - - /// Returns the first occurrence of `needle` in the given `haystack`, or - /// `None` if no such occurrence could be found. - /// - /// The `needle` given must be the same as the `needle` provided to - /// [`Finder::new`]. - /// - /// An empty `needle` results in a match at every position in a haystack, - /// including at `haystack.len()`. - #[inline] - pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option { - self.find_with_prefilter(None, haystack, needle) - } - - /// This is like [`Finder::find`], but it accepts a prefilter for - /// accelerating searches. - /// - /// Currently this is not exposed in the public API because, at the time - /// of writing, I didn't want to spend time thinking about how to expose - /// the prefilter infrastructure (if at all). If you have a compelling use - /// case for exposing this routine, please create an issue. Do *not* open - /// a PR that just exposes `Pre` and friends. Exporting this routine will - /// require API design. - #[inline(always)] - pub(crate) fn find_with_prefilter( - &self, - pre: Option>, - haystack: &[u8], - needle: &[u8], - ) -> Option { - match self.0.shift { - Shift::Small { period } => { - self.find_small_imp(pre, haystack, needle, period) - } - Shift::Large { shift } => { - self.find_large_imp(pre, haystack, needle, shift) - } - } - } - - // Each of the two search implementations below can be accelerated by a - // prefilter, but it is not always enabled. To avoid its overhead when - // its disabled, we explicitly inline each search implementation based on - // whether a prefilter will be used or not. The decision on which to use - // is made in the parent meta searcher. - - #[inline(always)] - fn find_small_imp( - &self, - mut pre: Option>, - haystack: &[u8], - needle: &[u8], - period: usize, - ) -> Option { - let mut pos = 0; - let mut shift = 0; - let last_byte_pos = match needle.len().checked_sub(1) { - None => return Some(pos), - Some(last_byte) => last_byte, - }; - while pos + needle.len() <= haystack.len() { - let mut i = cmp::max(self.0.critical_pos, shift); - if let Some(pre) = pre.as_mut() { - if pre.is_effective() { - pos += pre.find(&haystack[pos..])?; - shift = 0; - i = self.0.critical_pos; - if pos + needle.len() > haystack.len() { - return None; - } - } - } - if !self.0.byteset.contains(haystack[pos + last_byte_pos]) { - pos += needle.len(); - shift = 0; - continue; - } - while i < needle.len() && needle[i] == haystack[pos + i] { - i += 1; - } - if i < needle.len() { - pos += i - self.0.critical_pos + 1; - shift = 0; - } else { - let mut j = self.0.critical_pos; - while j > shift && needle[j] == haystack[pos + j] { - j -= 1; - } - if j <= shift && needle[shift] == haystack[pos + shift] { - return Some(pos); - } - pos += period; - shift = needle.len() - period; - } - } - None - } - - #[inline(always)] - fn find_large_imp( - &self, - mut pre: Option>, - haystack: &[u8], - needle: &[u8], - shift: usize, - ) -> Option { - let mut pos = 0; - let last_byte_pos = match needle.len().checked_sub(1) { - None => return Some(pos), - Some(last_byte) => last_byte, - }; - 'outer: while pos + needle.len() <= haystack.len() { - if let Some(pre) = pre.as_mut() { - if pre.is_effective() { - pos += pre.find(&haystack[pos..])?; - if pos + needle.len() > haystack.len() { - return None; - } - } - } - - if !self.0.byteset.contains(haystack[pos + last_byte_pos]) { - pos += needle.len(); - continue; - } - let mut i = self.0.critical_pos; - while i < needle.len() && needle[i] == haystack[pos + i] { - i += 1; - } - if i < needle.len() { - pos += i - self.0.critical_pos + 1; - } else { - for j in (0..self.0.critical_pos).rev() { - if needle[j] != haystack[pos + j] { - pos += shift; - continue 'outer; - } - } - return Some(pos); - } - } - None - } -} - -impl FinderRev { - /// Create a searcher that finds occurrences of the given `needle`. - /// - /// An empty `needle` results in a match at every position in a haystack, - /// including at `haystack.len()`. - #[inline] - pub fn new(needle: &[u8]) -> FinderRev { - let byteset = ApproximateByteSet::new(needle); - let min_suffix = Suffix::reverse(needle, SuffixKind::Minimal); - let max_suffix = Suffix::reverse(needle, SuffixKind::Maximal); - let (period_lower_bound, critical_pos) = - if min_suffix.pos < max_suffix.pos { - (min_suffix.period, min_suffix.pos) - } else { - (max_suffix.period, max_suffix.pos) - }; - let shift = Shift::reverse(needle, period_lower_bound, critical_pos); - FinderRev(TwoWay { byteset, critical_pos, shift }) - } - - /// Returns the last occurrence of `needle` in the given `haystack`, or - /// `None` if no such occurrence could be found. - /// - /// The `needle` given must be the same as the `needle` provided to - /// [`FinderRev::new`]. - /// - /// An empty `needle` results in a match at every position in a haystack, - /// including at `haystack.len()`. - #[inline] - pub fn rfind(&self, haystack: &[u8], needle: &[u8]) -> Option { - // For the reverse case, we don't use a prefilter. It's plausible that - // perhaps we should, but it's a lot of additional code to do it, and - // it's not clear that it's actually worth it. If you have a really - // compelling use case for this, please file an issue. - match self.0.shift { - Shift::Small { period } => { - self.rfind_small_imp(haystack, needle, period) - } - Shift::Large { shift } => { - self.rfind_large_imp(haystack, needle, shift) - } - } - } - - #[inline(always)] - fn rfind_small_imp( - &self, - haystack: &[u8], - needle: &[u8], - period: usize, - ) -> Option { - let nlen = needle.len(); - let mut pos = haystack.len(); - let mut shift = nlen; - let first_byte = match needle.get(0) { - None => return Some(pos), - Some(&first_byte) => first_byte, - }; - while pos >= nlen { - if !self.0.byteset.contains(haystack[pos - nlen]) { - pos -= nlen; - shift = nlen; - continue; - } - let mut i = cmp::min(self.0.critical_pos, shift); - while i > 0 && needle[i - 1] == haystack[pos - nlen + i - 1] { - i -= 1; - } - if i > 0 || first_byte != haystack[pos - nlen] { - pos -= self.0.critical_pos - i + 1; - shift = nlen; - } else { - let mut j = self.0.critical_pos; - while j < shift && needle[j] == haystack[pos - nlen + j] { - j += 1; - } - if j >= shift { - return Some(pos - nlen); - } - pos -= period; - shift = period; - } - } - None - } - - #[inline(always)] - fn rfind_large_imp( - &self, - haystack: &[u8], - needle: &[u8], - shift: usize, - ) -> Option { - let nlen = needle.len(); - let mut pos = haystack.len(); - let first_byte = match needle.get(0) { - None => return Some(pos), - Some(&first_byte) => first_byte, - }; - while pos >= nlen { - if !self.0.byteset.contains(haystack[pos - nlen]) { - pos -= nlen; - continue; - } - let mut i = self.0.critical_pos; - while i > 0 && needle[i - 1] == haystack[pos - nlen + i - 1] { - i -= 1; - } - if i > 0 || first_byte != haystack[pos - nlen] { - pos -= self.0.critical_pos - i + 1; - } else { - let mut j = self.0.critical_pos; - while j < nlen && needle[j] == haystack[pos - nlen + j] { - j += 1; - } - if j == nlen { - return Some(pos - nlen); - } - pos -= shift; - } - } - None - } -} - -/// A representation of the amount we're allowed to shift by during Two-Way -/// search. -/// -/// When computing a critical factorization of the needle, we find the position -/// of the critical factorization by finding the needle's maximal (or minimal) -/// suffix, along with the period of that suffix. It turns out that the period -/// of that suffix is a lower bound on the period of the needle itself. -/// -/// This lower bound is equivalent to the actual period of the needle in -/// some cases. To describe that case, we denote the needle as `x` where -/// `x = uv` and `v` is the lexicographic maximal suffix of `v`. The lower -/// bound given here is always the period of `v`, which is `<= period(x)`. The -/// case where `period(v) == period(x)` occurs when `len(u) < (len(x) / 2)` and -/// where `u` is a suffix of `v[0..period(v)]`. -/// -/// This case is important because the search algorithm for when the -/// periods are equivalent is slightly different than the search algorithm -/// for when the periods are not equivalent. In particular, when they aren't -/// equivalent, we know that the period of the needle is no less than half its -/// length. In this case, we shift by an amount less than or equal to the -/// period of the needle (determined by the maximum length of the components -/// of the critical factorization of `x`, i.e., `max(len(u), len(v))`).. -/// -/// The above two cases are represented by the variants below. Each entails -/// a different instantiation of the Two-Way search algorithm. -/// -/// N.B. If we could find a way to compute the exact period in all cases, -/// then we could collapse this case analysis and simplify the algorithm. The -/// Two-Way paper suggests this is possible, but more reading is required to -/// grok why the authors didn't pursue that path. -#[derive(Clone, Copy, Debug)] -enum Shift { - Small { period: usize }, - Large { shift: usize }, -} - -impl Shift { - /// Compute the shift for a given needle in the forward direction. - /// - /// This requires a lower bound on the period and a critical position. - /// These can be computed by extracting both the minimal and maximal - /// lexicographic suffixes, and choosing the right-most starting position. - /// The lower bound on the period is then the period of the chosen suffix. - fn forward( - needle: &[u8], - period_lower_bound: usize, - critical_pos: usize, - ) -> Shift { - let large = cmp::max(critical_pos, needle.len() - critical_pos); - if critical_pos * 2 >= needle.len() { - return Shift::Large { shift: large }; - } - - let (u, v) = needle.split_at(critical_pos); - if !is_suffix(&v[..period_lower_bound], u) { - return Shift::Large { shift: large }; - } - Shift::Small { period: period_lower_bound } - } - - /// Compute the shift for a given needle in the reverse direction. - /// - /// This requires a lower bound on the period and a critical position. - /// These can be computed by extracting both the minimal and maximal - /// lexicographic suffixes, and choosing the left-most starting position. - /// The lower bound on the period is then the period of the chosen suffix. - fn reverse( - needle: &[u8], - period_lower_bound: usize, - critical_pos: usize, - ) -> Shift { - let large = cmp::max(critical_pos, needle.len() - critical_pos); - if (needle.len() - critical_pos) * 2 >= needle.len() { - return Shift::Large { shift: large }; - } - - let (v, u) = needle.split_at(critical_pos); - if !is_prefix(&v[v.len() - period_lower_bound..], u) { - return Shift::Large { shift: large }; - } - Shift::Small { period: period_lower_bound } - } -} - -/// A suffix extracted from a needle along with its period. -#[derive(Debug)] -struct Suffix { - /// The starting position of this suffix. - /// - /// If this is a forward suffix, then `&bytes[pos..]` can be used. If this - /// is a reverse suffix, then `&bytes[..pos]` can be used. That is, for - /// forward suffixes, this is an inclusive starting position, where as for - /// reverse suffixes, this is an exclusive ending position. - pos: usize, - /// The period of this suffix. - /// - /// Note that this is NOT necessarily the period of the string from which - /// this suffix comes from. (It is always less than or equal to the period - /// of the original string.) - period: usize, -} - -impl Suffix { - fn forward(needle: &[u8], kind: SuffixKind) -> Suffix { - // suffix represents our maximal (or minimal) suffix, along with - // its period. - let mut suffix = Suffix { pos: 0, period: 1 }; - // The start of a suffix in `needle` that we are considering as a - // more maximal (or minimal) suffix than what's in `suffix`. - let mut candidate_start = 1; - // The current offset of our suffixes that we're comparing. - // - // When the characters at this offset are the same, then we mush on - // to the next position since no decision is possible. When the - // candidate's character is greater (or lesser) than the corresponding - // character than our current maximal (or minimal) suffix, then the - // current suffix is changed over to the candidate and we restart our - // search. Otherwise, the candidate suffix is no good and we restart - // our search on the next candidate. - // - // The three cases above correspond to the three cases in the loop - // below. - let mut offset = 0; - - while candidate_start + offset < needle.len() { - let current = needle[suffix.pos + offset]; - let candidate = needle[candidate_start + offset]; - match kind.cmp(current, candidate) { - SuffixOrdering::Accept => { - suffix = Suffix { pos: candidate_start, period: 1 }; - candidate_start += 1; - offset = 0; - } - SuffixOrdering::Skip => { - candidate_start += offset + 1; - offset = 0; - suffix.period = candidate_start - suffix.pos; - } - SuffixOrdering::Push => { - if offset + 1 == suffix.period { - candidate_start += suffix.period; - offset = 0; - } else { - offset += 1; - } - } - } - } - suffix - } - - fn reverse(needle: &[u8], kind: SuffixKind) -> Suffix { - // See the comments in `forward` for how this works. - let mut suffix = Suffix { pos: needle.len(), period: 1 }; - if needle.len() == 1 { - return suffix; - } - let mut candidate_start = match needle.len().checked_sub(1) { - None => return suffix, - Some(candidate_start) => candidate_start, - }; - let mut offset = 0; - - while offset < candidate_start { - let current = needle[suffix.pos - offset - 1]; - let candidate = needle[candidate_start - offset - 1]; - match kind.cmp(current, candidate) { - SuffixOrdering::Accept => { - suffix = Suffix { pos: candidate_start, period: 1 }; - candidate_start -= 1; - offset = 0; - } - SuffixOrdering::Skip => { - candidate_start -= offset + 1; - offset = 0; - suffix.period = suffix.pos - candidate_start; - } - SuffixOrdering::Push => { - if offset + 1 == suffix.period { - candidate_start -= suffix.period; - offset = 0; - } else { - offset += 1; - } - } - } - } - suffix - } -} - -/// The kind of suffix to extract. -#[derive(Clone, Copy, Debug)] -enum SuffixKind { - /// Extract the smallest lexicographic suffix from a string. - /// - /// Technically, this doesn't actually pick the smallest lexicographic - /// suffix. e.g., Given the choice between `a` and `aa`, this will choose - /// the latter over the former, even though `a < aa`. The reasoning for - /// this isn't clear from the paper, but it still smells like a minimal - /// suffix. - Minimal, - /// Extract the largest lexicographic suffix from a string. - /// - /// Unlike `Minimal`, this really does pick the maximum suffix. e.g., Given - /// the choice between `z` and `zz`, this will choose the latter over the - /// former. - Maximal, -} - -/// The result of comparing corresponding bytes between two suffixes. -#[derive(Clone, Copy, Debug)] -enum SuffixOrdering { - /// This occurs when the given candidate byte indicates that the candidate - /// suffix is better than the current maximal (or minimal) suffix. That is, - /// the current candidate suffix should supplant the current maximal (or - /// minimal) suffix. - Accept, - /// This occurs when the given candidate byte excludes the candidate suffix - /// from being better than the current maximal (or minimal) suffix. That - /// is, the current candidate suffix should be dropped and the next one - /// should be considered. - Skip, - /// This occurs when no decision to accept or skip the candidate suffix - /// can be made, e.g., when corresponding bytes are equivalent. In this - /// case, the next corresponding bytes should be compared. - Push, -} - -impl SuffixKind { - /// Returns true if and only if the given candidate byte indicates that - /// it should replace the current suffix as the maximal (or minimal) - /// suffix. - fn cmp(self, current: u8, candidate: u8) -> SuffixOrdering { - use self::SuffixOrdering::*; - - match self { - SuffixKind::Minimal if candidate < current => Accept, - SuffixKind::Minimal if candidate > current => Skip, - SuffixKind::Minimal => Push, - SuffixKind::Maximal if candidate > current => Accept, - SuffixKind::Maximal if candidate < current => Skip, - SuffixKind::Maximal => Push, - } - } -} - -/// A bitset used to track whether a particular byte exists in a needle or not. -/// -/// Namely, bit 'i' is set if and only if byte%64==i for any byte in the -/// needle. If a particular byte in the haystack is NOT in this set, then one -/// can conclude that it is also not in the needle, and thus, one can advance -/// in the haystack by needle.len() bytes. -#[derive(Clone, Copy, Debug)] -struct ApproximateByteSet(u64); - -impl ApproximateByteSet { - /// Create a new set from the given needle. - fn new(needle: &[u8]) -> ApproximateByteSet { - let mut bits = 0; - for &b in needle { - bits |= 1 << (b % 64); - } - ApproximateByteSet(bits) - } - - /// Return true if and only if the given byte might be in this set. This - /// may return a false positive, but will never return a false negative. - #[inline(always)] - fn contains(&self, byte: u8) -> bool { - self.0 & (1 << (byte % 64)) != 0 - } -} - -#[cfg(test)] -mod tests { - use alloc::vec::Vec; - - use super::*; - - /// Convenience wrapper for computing the suffix as a byte string. - fn get_suffix_forward(needle: &[u8], kind: SuffixKind) -> (&[u8], usize) { - let s = Suffix::forward(needle, kind); - (&needle[s.pos..], s.period) - } - - /// Convenience wrapper for computing the reverse suffix as a byte string. - fn get_suffix_reverse(needle: &[u8], kind: SuffixKind) -> (&[u8], usize) { - let s = Suffix::reverse(needle, kind); - (&needle[..s.pos], s.period) - } - - /// Return all of the non-empty suffixes in the given byte string. - fn suffixes(bytes: &[u8]) -> Vec<&[u8]> { - (0..bytes.len()).map(|i| &bytes[i..]).collect() - } - - /// Return the lexicographically maximal suffix of the given byte string. - fn naive_maximal_suffix_forward(needle: &[u8]) -> &[u8] { - let mut sufs = suffixes(needle); - sufs.sort(); - sufs.pop().unwrap() - } - - /// Return the lexicographically maximal suffix of the reverse of the given - /// byte string. - fn naive_maximal_suffix_reverse(needle: &[u8]) -> Vec { - let mut reversed = needle.to_vec(); - reversed.reverse(); - let mut got = naive_maximal_suffix_forward(&reversed).to_vec(); - got.reverse(); - got - } - - define_substring_forward_quickcheck!(|h, n| Some( - Finder::new(n).find(h, n) - )); - define_substring_reverse_quickcheck!(|h, n| Some( - FinderRev::new(n).rfind(h, n) - )); - - #[test] - fn forward() { - crate::tests::substring::Runner::new() - .fwd(|h, n| Some(Finder::new(n).find(h, n))) - .run(); - } - - #[test] - fn reverse() { - crate::tests::substring::Runner::new() - .rev(|h, n| Some(FinderRev::new(n).rfind(h, n))) - .run(); - } - - #[test] - fn suffix_forward() { - macro_rules! assert_suffix_min { - ($given:expr, $expected:expr, $period:expr) => { - let (got_suffix, got_period) = - get_suffix_forward($given.as_bytes(), SuffixKind::Minimal); - let got_suffix = core::str::from_utf8(got_suffix).unwrap(); - assert_eq!(($expected, $period), (got_suffix, got_period)); - }; - } - - macro_rules! assert_suffix_max { - ($given:expr, $expected:expr, $period:expr) => { - let (got_suffix, got_period) = - get_suffix_forward($given.as_bytes(), SuffixKind::Maximal); - let got_suffix = core::str::from_utf8(got_suffix).unwrap(); - assert_eq!(($expected, $period), (got_suffix, got_period)); - }; - } - - assert_suffix_min!("a", "a", 1); - assert_suffix_max!("a", "a", 1); - - assert_suffix_min!("ab", "ab", 2); - assert_suffix_max!("ab", "b", 1); - - assert_suffix_min!("ba", "a", 1); - assert_suffix_max!("ba", "ba", 2); - - assert_suffix_min!("abc", "abc", 3); - assert_suffix_max!("abc", "c", 1); - - assert_suffix_min!("acb", "acb", 3); - assert_suffix_max!("acb", "cb", 2); - - assert_suffix_min!("cba", "a", 1); - assert_suffix_max!("cba", "cba", 3); - - assert_suffix_min!("abcabc", "abcabc", 3); - assert_suffix_max!("abcabc", "cabc", 3); - - assert_suffix_min!("abcabcabc", "abcabcabc", 3); - assert_suffix_max!("abcabcabc", "cabcabc", 3); - - assert_suffix_min!("abczz", "abczz", 5); - assert_suffix_max!("abczz", "zz", 1); - - assert_suffix_min!("zzabc", "abc", 3); - assert_suffix_max!("zzabc", "zzabc", 5); - - assert_suffix_min!("aaa", "aaa", 1); - assert_suffix_max!("aaa", "aaa", 1); - - assert_suffix_min!("foobar", "ar", 2); - assert_suffix_max!("foobar", "r", 1); - } - - #[test] - fn suffix_reverse() { - macro_rules! assert_suffix_min { - ($given:expr, $expected:expr, $period:expr) => { - let (got_suffix, got_period) = - get_suffix_reverse($given.as_bytes(), SuffixKind::Minimal); - let got_suffix = core::str::from_utf8(got_suffix).unwrap(); - assert_eq!(($expected, $period), (got_suffix, got_period)); - }; - } - - macro_rules! assert_suffix_max { - ($given:expr, $expected:expr, $period:expr) => { - let (got_suffix, got_period) = - get_suffix_reverse($given.as_bytes(), SuffixKind::Maximal); - let got_suffix = core::str::from_utf8(got_suffix).unwrap(); - assert_eq!(($expected, $period), (got_suffix, got_period)); - }; - } - - assert_suffix_min!("a", "a", 1); - assert_suffix_max!("a", "a", 1); - - assert_suffix_min!("ab", "a", 1); - assert_suffix_max!("ab", "ab", 2); - - assert_suffix_min!("ba", "ba", 2); - assert_suffix_max!("ba", "b", 1); - - assert_suffix_min!("abc", "a", 1); - assert_suffix_max!("abc", "abc", 3); - - assert_suffix_min!("acb", "a", 1); - assert_suffix_max!("acb", "ac", 2); - - assert_suffix_min!("cba", "cba", 3); - assert_suffix_max!("cba", "c", 1); - - assert_suffix_min!("abcabc", "abca", 3); - assert_suffix_max!("abcabc", "abcabc", 3); - - assert_suffix_min!("abcabcabc", "abcabca", 3); - assert_suffix_max!("abcabcabc", "abcabcabc", 3); - - assert_suffix_min!("abczz", "a", 1); - assert_suffix_max!("abczz", "abczz", 5); - - assert_suffix_min!("zzabc", "zza", 3); - assert_suffix_max!("zzabc", "zz", 1); - - assert_suffix_min!("aaa", "aaa", 1); - assert_suffix_max!("aaa", "aaa", 1); - } - - #[cfg(not(miri))] - quickcheck::quickcheck! { - fn qc_suffix_forward_maximal(bytes: Vec) -> bool { - if bytes.is_empty() { - return true; - } - - let (got, _) = get_suffix_forward(&bytes, SuffixKind::Maximal); - let expected = naive_maximal_suffix_forward(&bytes); - got == expected - } - - fn qc_suffix_reverse_maximal(bytes: Vec) -> bool { - if bytes.is_empty() { - return true; - } - - let (got, _) = get_suffix_reverse(&bytes, SuffixKind::Maximal); - let expected = naive_maximal_suffix_reverse(&bytes); - expected == got - } - } - - // This is a regression test caught by quickcheck that exercised a bug in - // the reverse small period handling. The bug was that we were using 'if j - // == shift' to determine if a match occurred, but the correct guard is 'if - // j >= shift', which matches the corresponding guard in the forward impl. - #[test] - fn regression_rev_small_period() { - let rfind = |h, n| FinderRev::new(n).rfind(h, n); - let haystack = "ababaz"; - let needle = "abab"; - assert_eq!(Some(0), rfind(haystack.as_bytes(), needle.as_bytes())); - } -} diff --git a/vendor/memchr/src/arch/generic/memchr.rs b/vendor/memchr/src/arch/generic/memchr.rs deleted file mode 100644 index de61fd81d8b24e..00000000000000 --- a/vendor/memchr/src/arch/generic/memchr.rs +++ /dev/null @@ -1,1214 +0,0 @@ -/*! -Generic crate-internal routines for the `memchr` family of functions. -*/ - -// What follows is a vector algorithm generic over the specific vector -// type to detect the position of one, two or three needles in a haystack. -// From what I know, this is a "classic" algorithm, although I don't -// believe it has been published in any peer reviewed journal. I believe -// it can be found in places like glibc and Go's standard library. It -// appears to be well known and is elaborated on in more detail here: -// https://gms.tf/stdfind-and-memchr-optimizations.html -// -// While the routine below is fairly long and perhaps intimidating, the basic -// idea is actually very simple and can be expressed straight-forwardly in -// pseudo code. The pseudo code below is written for 128 bit vectors, but the -// actual code below works for anything that implements the Vector trait. -// -// needle = (n1 << 15) | (n1 << 14) | ... | (n1 << 1) | n1 -// // Note: shift amount is in bytes -// -// while i <= haystack.len() - 16: -// // A 16 byte vector. Each byte in chunk corresponds to a byte in -// // the haystack. -// chunk = haystack[i:i+16] -// // Compare bytes in needle with bytes in chunk. The result is a 16 -// // byte chunk where each byte is 0xFF if the corresponding bytes -// // in needle and chunk were equal, or 0x00 otherwise. -// eqs = cmpeq(needle, chunk) -// // Return a 32 bit integer where the most significant 16 bits -// // are always 0 and the lower 16 bits correspond to whether the -// // most significant bit in the correspond byte in `eqs` is set. -// // In other words, `mask as u16` has bit i set if and only if -// // needle[i] == chunk[i]. -// mask = movemask(eqs) -// -// // Mask is 0 if there is no match, and non-zero otherwise. -// if mask != 0: -// // trailing_zeros tells us the position of the least significant -// // bit that is set. -// return i + trailing_zeros(mask) -// -// // haystack length may not be a multiple of 16, so search the rest. -// while i < haystack.len(): -// if haystack[i] == n1: -// return i -// -// // No match found. -// return NULL -// -// In fact, we could loosely translate the above code to Rust line-for-line -// and it would be a pretty fast algorithm. But, we pull out all the stops -// to go as fast as possible: -// -// 1. We use aligned loads. That is, we do some finagling to make sure our -// primary loop not only proceeds in increments of 16 bytes, but that -// the address of haystack's pointer that we dereference is aligned to -// 16 bytes. 16 is a magic number here because it is the size of SSE2 -// 128-bit vector. (For the AVX2 algorithm, 32 is the magic number.) -// Therefore, to get aligned loads, our pointer's address must be evenly -// divisible by 16. -// 2. Our primary loop proceeds 64 bytes at a time instead of 16. It's -// kind of like loop unrolling, but we combine the equality comparisons -// using a vector OR such that we only need to extract a single mask to -// determine whether a match exists or not. If so, then we do some -// book-keeping to determine the precise location but otherwise mush on. -// 3. We use our "chunk" comparison routine in as many places as possible, -// even if it means using unaligned loads. In particular, if haystack -// starts with an unaligned address, then we do an unaligned load to -// search the first 16 bytes. We then start our primary loop at the -// smallest subsequent aligned address, which will actually overlap with -// previously searched bytes. But we're OK with that. We do a similar -// dance at the end of our primary loop. Finally, to avoid a -// byte-at-a-time loop at the end, we do a final 16 byte unaligned load -// that may overlap with a previous load. This is OK because it converts -// a loop into a small number of very fast vector instructions. The overlap -// is OK because we know the place where the overlap occurs does not -// contain a match. -// -// And that's pretty all there is to it. Note that since the below is -// generic and since it's meant to be inlined into routines with a -// `#[target_feature(enable = "...")]` annotation, we must mark all routines as -// both unsafe and `#[inline(always)]`. -// -// The fact that the code below is generic does somewhat inhibit us. For -// example, I've noticed that introducing an unlineable `#[cold]` function to -// handle the match case in the loop generates tighter assembly, but there is -// no way to do this in the generic code below because the generic code doesn't -// know what `target_feature` annotation to apply to the unlineable function. -// We could make such functions part of the `Vector` trait, but we instead live -// with the slightly sub-optimal codegen for now since it doesn't seem to have -// a noticeable perf difference. - -use crate::{ - ext::Pointer, - vector::{MoveMask, Vector}, -}; - -/// Finds all occurrences of a single byte in a haystack. -#[derive(Clone, Copy, Debug)] -pub(crate) struct One { - s1: u8, - v1: V, -} - -impl One { - /// The number of bytes we examine per each iteration of our search loop. - const LOOP_SIZE: usize = 4 * V::BYTES; - - /// Create a new searcher that finds occurrences of the byte given. - #[inline(always)] - pub(crate) unsafe fn new(needle: u8) -> One { - One { s1: needle, v1: V::splat(needle) } - } - - /// Returns the needle given to `One::new`. - #[inline(always)] - pub(crate) fn needle1(&self) -> u8 { - self.s1 - } - - /// Return a pointer to the first occurrence of the needle in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// # Safety - /// - /// * It must be the case that `start < end` and that the distance between - /// them is at least equal to `V::BYTES`. That is, it must always be valid - /// to do at least an unaligned load of `V` at `start`. - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - #[inline(always)] - pub(crate) unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - // If we want to support vectors bigger than 256 bits, we probably - // need to move up to using a u64 for the masks used below. Currently - // they are 32 bits, which means we're SOL for vectors that need masks - // bigger than 32 bits. Overall unclear until there's a use case. - debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes"); - - let topos = V::Mask::first_offset; - let len = end.distance(start); - debug_assert!( - len >= V::BYTES, - "haystack has length {}, but must be at least {}", - len, - V::BYTES - ); - - // Search a possibly unaligned chunk at `start`. This covers any part - // of the haystack prior to where aligned loads can start. - if let Some(cur) = self.search_chunk(start, topos) { - return Some(cur); - } - // Set `cur` to the first V-aligned pointer greater than `start`. - let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN)); - debug_assert!(cur > start && end.sub(V::BYTES) >= start); - if len >= Self::LOOP_SIZE { - while cur <= end.sub(Self::LOOP_SIZE) { - debug_assert_eq!(0, cur.as_usize() % V::BYTES); - - let a = V::load_aligned(cur); - let b = V::load_aligned(cur.add(1 * V::BYTES)); - let c = V::load_aligned(cur.add(2 * V::BYTES)); - let d = V::load_aligned(cur.add(3 * V::BYTES)); - let eqa = self.v1.cmpeq(a); - let eqb = self.v1.cmpeq(b); - let eqc = self.v1.cmpeq(c); - let eqd = self.v1.cmpeq(d); - let or1 = eqa.or(eqb); - let or2 = eqc.or(eqd); - let or3 = or1.or(or2); - if or3.movemask_will_have_non_zero() { - let mask = eqa.movemask(); - if mask.has_non_zero() { - return Some(cur.add(topos(mask))); - } - - let mask = eqb.movemask(); - if mask.has_non_zero() { - return Some(cur.add(1 * V::BYTES).add(topos(mask))); - } - - let mask = eqc.movemask(); - if mask.has_non_zero() { - return Some(cur.add(2 * V::BYTES).add(topos(mask))); - } - - let mask = eqd.movemask(); - debug_assert!(mask.has_non_zero()); - return Some(cur.add(3 * V::BYTES).add(topos(mask))); - } - cur = cur.add(Self::LOOP_SIZE); - } - } - // Handle any leftovers after the aligned loop above. We use unaligned - // loads here, but I believe we are guaranteed that they are aligned - // since `cur` is aligned. - while cur <= end.sub(V::BYTES) { - debug_assert!(end.distance(cur) >= V::BYTES); - if let Some(cur) = self.search_chunk(cur, topos) { - return Some(cur); - } - cur = cur.add(V::BYTES); - } - // Finally handle any remaining bytes less than the size of V. In this - // case, our pointer may indeed be unaligned and the load may overlap - // with the previous one. But that's okay since we know the previous - // load didn't lead to a match (otherwise we wouldn't be here). - if cur < end { - debug_assert!(end.distance(cur) < V::BYTES); - cur = cur.sub(V::BYTES - end.distance(cur)); - debug_assert_eq!(end.distance(cur), V::BYTES); - return self.search_chunk(cur, topos); - } - None - } - - /// Return a pointer to the last occurrence of the needle in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// # Safety - /// - /// * It must be the case that `start < end` and that the distance between - /// them is at least equal to `V::BYTES`. That is, it must always be valid - /// to do at least an unaligned load of `V` at `start`. - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - #[inline(always)] - pub(crate) unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - // If we want to support vectors bigger than 256 bits, we probably - // need to move up to using a u64 for the masks used below. Currently - // they are 32 bits, which means we're SOL for vectors that need masks - // bigger than 32 bits. Overall unclear until there's a use case. - debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes"); - - let topos = V::Mask::last_offset; - let len = end.distance(start); - debug_assert!( - len >= V::BYTES, - "haystack has length {}, but must be at least {}", - len, - V::BYTES - ); - - if let Some(cur) = self.search_chunk(end.sub(V::BYTES), topos) { - return Some(cur); - } - let mut cur = end.sub(end.as_usize() & V::ALIGN); - debug_assert!(start <= cur && cur <= end); - if len >= Self::LOOP_SIZE { - while cur >= start.add(Self::LOOP_SIZE) { - debug_assert_eq!(0, cur.as_usize() % V::BYTES); - - cur = cur.sub(Self::LOOP_SIZE); - let a = V::load_aligned(cur); - let b = V::load_aligned(cur.add(1 * V::BYTES)); - let c = V::load_aligned(cur.add(2 * V::BYTES)); - let d = V::load_aligned(cur.add(3 * V::BYTES)); - let eqa = self.v1.cmpeq(a); - let eqb = self.v1.cmpeq(b); - let eqc = self.v1.cmpeq(c); - let eqd = self.v1.cmpeq(d); - let or1 = eqa.or(eqb); - let or2 = eqc.or(eqd); - let or3 = or1.or(or2); - if or3.movemask_will_have_non_zero() { - let mask = eqd.movemask(); - if mask.has_non_zero() { - return Some(cur.add(3 * V::BYTES).add(topos(mask))); - } - - let mask = eqc.movemask(); - if mask.has_non_zero() { - return Some(cur.add(2 * V::BYTES).add(topos(mask))); - } - - let mask = eqb.movemask(); - if mask.has_non_zero() { - return Some(cur.add(1 * V::BYTES).add(topos(mask))); - } - - let mask = eqa.movemask(); - debug_assert!(mask.has_non_zero()); - return Some(cur.add(topos(mask))); - } - } - } - while cur >= start.add(V::BYTES) { - debug_assert!(cur.distance(start) >= V::BYTES); - cur = cur.sub(V::BYTES); - if let Some(cur) = self.search_chunk(cur, topos) { - return Some(cur); - } - } - if cur > start { - debug_assert!(cur.distance(start) < V::BYTES); - return self.search_chunk(start, topos); - } - None - } - - /// Return a count of all matching bytes in the given haystack. - /// - /// # Safety - /// - /// * It must be the case that `start < end` and that the distance between - /// them is at least equal to `V::BYTES`. That is, it must always be valid - /// to do at least an unaligned load of `V` at `start`. - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - #[inline(always)] - pub(crate) unsafe fn count_raw( - &self, - start: *const u8, - end: *const u8, - ) -> usize { - debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes"); - - let confirm = |b| b == self.needle1(); - let len = end.distance(start); - debug_assert!( - len >= V::BYTES, - "haystack has length {}, but must be at least {}", - len, - V::BYTES - ); - - // Set `cur` to the first V-aligned pointer greater than `start`. - let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN)); - // Count any matching bytes before we start our aligned loop. - let mut count = count_byte_by_byte(start, cur, confirm); - debug_assert!(cur > start && end.sub(V::BYTES) >= start); - if len >= Self::LOOP_SIZE { - while cur <= end.sub(Self::LOOP_SIZE) { - debug_assert_eq!(0, cur.as_usize() % V::BYTES); - - let a = V::load_aligned(cur); - let b = V::load_aligned(cur.add(1 * V::BYTES)); - let c = V::load_aligned(cur.add(2 * V::BYTES)); - let d = V::load_aligned(cur.add(3 * V::BYTES)); - let eqa = self.v1.cmpeq(a); - let eqb = self.v1.cmpeq(b); - let eqc = self.v1.cmpeq(c); - let eqd = self.v1.cmpeq(d); - count += eqa.movemask().count_ones(); - count += eqb.movemask().count_ones(); - count += eqc.movemask().count_ones(); - count += eqd.movemask().count_ones(); - cur = cur.add(Self::LOOP_SIZE); - } - } - // Handle any leftovers after the aligned loop above. We use unaligned - // loads here, but I believe we are guaranteed that they are aligned - // since `cur` is aligned. - while cur <= end.sub(V::BYTES) { - debug_assert!(end.distance(cur) >= V::BYTES); - let chunk = V::load_unaligned(cur); - count += self.v1.cmpeq(chunk).movemask().count_ones(); - cur = cur.add(V::BYTES); - } - // And finally count any leftovers that weren't caught above. - count += count_byte_by_byte(cur, end, confirm); - count - } - - /// Search `V::BYTES` starting at `cur` via an unaligned load. - /// - /// `mask_to_offset` should be a function that converts a `movemask` to - /// an offset such that `cur.add(offset)` corresponds to a pointer to the - /// match location if one is found. Generally it is expected to use either - /// `mask_to_first_offset` or `mask_to_last_offset`, depending on whether - /// one is implementing a forward or reverse search, respectively. - /// - /// # Safety - /// - /// `cur` must be a valid pointer and it must be valid to do an unaligned - /// load of size `V::BYTES` at `cur`. - #[inline(always)] - unsafe fn search_chunk( - &self, - cur: *const u8, - mask_to_offset: impl Fn(V::Mask) -> usize, - ) -> Option<*const u8> { - let chunk = V::load_unaligned(cur); - let mask = self.v1.cmpeq(chunk).movemask(); - if mask.has_non_zero() { - Some(cur.add(mask_to_offset(mask))) - } else { - None - } - } -} - -/// Finds all occurrences of two bytes in a haystack. -/// -/// That is, this reports matches of one of two possible bytes. For example, -/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, -/// `4` and `5`. -#[derive(Clone, Copy, Debug)] -pub(crate) struct Two { - s1: u8, - s2: u8, - v1: V, - v2: V, -} - -impl Two { - /// The number of bytes we examine per each iteration of our search loop. - const LOOP_SIZE: usize = 2 * V::BYTES; - - /// Create a new searcher that finds occurrences of the byte given. - #[inline(always)] - pub(crate) unsafe fn new(needle1: u8, needle2: u8) -> Two { - Two { - s1: needle1, - s2: needle2, - v1: V::splat(needle1), - v2: V::splat(needle2), - } - } - - /// Returns the first needle given to `Two::new`. - #[inline(always)] - pub(crate) fn needle1(&self) -> u8 { - self.s1 - } - - /// Returns the second needle given to `Two::new`. - #[inline(always)] - pub(crate) fn needle2(&self) -> u8 { - self.s2 - } - - /// Return a pointer to the first occurrence of one of the needles in the - /// given haystack. If no such occurrence exists, then `None` is returned. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// # Safety - /// - /// * It must be the case that `start < end` and that the distance between - /// them is at least equal to `V::BYTES`. That is, it must always be valid - /// to do at least an unaligned load of `V` at `start`. - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - #[inline(always)] - pub(crate) unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - // If we want to support vectors bigger than 256 bits, we probably - // need to move up to using a u64 for the masks used below. Currently - // they are 32 bits, which means we're SOL for vectors that need masks - // bigger than 32 bits. Overall unclear until there's a use case. - debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes"); - - let topos = V::Mask::first_offset; - let len = end.distance(start); - debug_assert!( - len >= V::BYTES, - "haystack has length {}, but must be at least {}", - len, - V::BYTES - ); - - // Search a possibly unaligned chunk at `start`. This covers any part - // of the haystack prior to where aligned loads can start. - if let Some(cur) = self.search_chunk(start, topos) { - return Some(cur); - } - // Set `cur` to the first V-aligned pointer greater than `start`. - let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN)); - debug_assert!(cur > start && end.sub(V::BYTES) >= start); - if len >= Self::LOOP_SIZE { - while cur <= end.sub(Self::LOOP_SIZE) { - debug_assert_eq!(0, cur.as_usize() % V::BYTES); - - let a = V::load_aligned(cur); - let b = V::load_aligned(cur.add(V::BYTES)); - let eqa1 = self.v1.cmpeq(a); - let eqb1 = self.v1.cmpeq(b); - let eqa2 = self.v2.cmpeq(a); - let eqb2 = self.v2.cmpeq(b); - let or1 = eqa1.or(eqb1); - let or2 = eqa2.or(eqb2); - let or3 = or1.or(or2); - if or3.movemask_will_have_non_zero() { - let mask = eqa1.movemask().or(eqa2.movemask()); - if mask.has_non_zero() { - return Some(cur.add(topos(mask))); - } - - let mask = eqb1.movemask().or(eqb2.movemask()); - debug_assert!(mask.has_non_zero()); - return Some(cur.add(V::BYTES).add(topos(mask))); - } - cur = cur.add(Self::LOOP_SIZE); - } - } - // Handle any leftovers after the aligned loop above. We use unaligned - // loads here, but I believe we are guaranteed that they are aligned - // since `cur` is aligned. - while cur <= end.sub(V::BYTES) { - debug_assert!(end.distance(cur) >= V::BYTES); - if let Some(cur) = self.search_chunk(cur, topos) { - return Some(cur); - } - cur = cur.add(V::BYTES); - } - // Finally handle any remaining bytes less than the size of V. In this - // case, our pointer may indeed be unaligned and the load may overlap - // with the previous one. But that's okay since we know the previous - // load didn't lead to a match (otherwise we wouldn't be here). - if cur < end { - debug_assert!(end.distance(cur) < V::BYTES); - cur = cur.sub(V::BYTES - end.distance(cur)); - debug_assert_eq!(end.distance(cur), V::BYTES); - return self.search_chunk(cur, topos); - } - None - } - - /// Return a pointer to the last occurrence of the needle in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// # Safety - /// - /// * It must be the case that `start < end` and that the distance between - /// them is at least equal to `V::BYTES`. That is, it must always be valid - /// to do at least an unaligned load of `V` at `start`. - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - #[inline(always)] - pub(crate) unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - // If we want to support vectors bigger than 256 bits, we probably - // need to move up to using a u64 for the masks used below. Currently - // they are 32 bits, which means we're SOL for vectors that need masks - // bigger than 32 bits. Overall unclear until there's a use case. - debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes"); - - let topos = V::Mask::last_offset; - let len = end.distance(start); - debug_assert!( - len >= V::BYTES, - "haystack has length {}, but must be at least {}", - len, - V::BYTES - ); - - if let Some(cur) = self.search_chunk(end.sub(V::BYTES), topos) { - return Some(cur); - } - let mut cur = end.sub(end.as_usize() & V::ALIGN); - debug_assert!(start <= cur && cur <= end); - if len >= Self::LOOP_SIZE { - while cur >= start.add(Self::LOOP_SIZE) { - debug_assert_eq!(0, cur.as_usize() % V::BYTES); - - cur = cur.sub(Self::LOOP_SIZE); - let a = V::load_aligned(cur); - let b = V::load_aligned(cur.add(V::BYTES)); - let eqa1 = self.v1.cmpeq(a); - let eqb1 = self.v1.cmpeq(b); - let eqa2 = self.v2.cmpeq(a); - let eqb2 = self.v2.cmpeq(b); - let or1 = eqa1.or(eqb1); - let or2 = eqa2.or(eqb2); - let or3 = or1.or(or2); - if or3.movemask_will_have_non_zero() { - let mask = eqb1.movemask().or(eqb2.movemask()); - if mask.has_non_zero() { - return Some(cur.add(V::BYTES).add(topos(mask))); - } - - let mask = eqa1.movemask().or(eqa2.movemask()); - debug_assert!(mask.has_non_zero()); - return Some(cur.add(topos(mask))); - } - } - } - while cur >= start.add(V::BYTES) { - debug_assert!(cur.distance(start) >= V::BYTES); - cur = cur.sub(V::BYTES); - if let Some(cur) = self.search_chunk(cur, topos) { - return Some(cur); - } - } - if cur > start { - debug_assert!(cur.distance(start) < V::BYTES); - return self.search_chunk(start, topos); - } - None - } - - /// Search `V::BYTES` starting at `cur` via an unaligned load. - /// - /// `mask_to_offset` should be a function that converts a `movemask` to - /// an offset such that `cur.add(offset)` corresponds to a pointer to the - /// match location if one is found. Generally it is expected to use either - /// `mask_to_first_offset` or `mask_to_last_offset`, depending on whether - /// one is implementing a forward or reverse search, respectively. - /// - /// # Safety - /// - /// `cur` must be a valid pointer and it must be valid to do an unaligned - /// load of size `V::BYTES` at `cur`. - #[inline(always)] - unsafe fn search_chunk( - &self, - cur: *const u8, - mask_to_offset: impl Fn(V::Mask) -> usize, - ) -> Option<*const u8> { - let chunk = V::load_unaligned(cur); - let eq1 = self.v1.cmpeq(chunk); - let eq2 = self.v2.cmpeq(chunk); - let mask = eq1.or(eq2).movemask(); - if mask.has_non_zero() { - let mask1 = eq1.movemask(); - let mask2 = eq2.movemask(); - Some(cur.add(mask_to_offset(mask1.or(mask2)))) - } else { - None - } - } -} - -/// Finds all occurrences of two bytes in a haystack. -/// -/// That is, this reports matches of one of two possible bytes. For example, -/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, -/// `4` and `5`. -#[derive(Clone, Copy, Debug)] -pub(crate) struct Three { - s1: u8, - s2: u8, - s3: u8, - v1: V, - v2: V, - v3: V, -} - -impl Three { - /// The number of bytes we examine per each iteration of our search loop. - const LOOP_SIZE: usize = 2 * V::BYTES; - - /// Create a new searcher that finds occurrences of the byte given. - #[inline(always)] - pub(crate) unsafe fn new( - needle1: u8, - needle2: u8, - needle3: u8, - ) -> Three { - Three { - s1: needle1, - s2: needle2, - s3: needle3, - v1: V::splat(needle1), - v2: V::splat(needle2), - v3: V::splat(needle3), - } - } - - /// Returns the first needle given to `Three::new`. - #[inline(always)] - pub(crate) fn needle1(&self) -> u8 { - self.s1 - } - - /// Returns the second needle given to `Three::new`. - #[inline(always)] - pub(crate) fn needle2(&self) -> u8 { - self.s2 - } - - /// Returns the third needle given to `Three::new`. - #[inline(always)] - pub(crate) fn needle3(&self) -> u8 { - self.s3 - } - - /// Return a pointer to the first occurrence of one of the needles in the - /// given haystack. If no such occurrence exists, then `None` is returned. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// # Safety - /// - /// * It must be the case that `start < end` and that the distance between - /// them is at least equal to `V::BYTES`. That is, it must always be valid - /// to do at least an unaligned load of `V` at `start`. - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - #[inline(always)] - pub(crate) unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - // If we want to support vectors bigger than 256 bits, we probably - // need to move up to using a u64 for the masks used below. Currently - // they are 32 bits, which means we're SOL for vectors that need masks - // bigger than 32 bits. Overall unclear until there's a use case. - debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes"); - - let topos = V::Mask::first_offset; - let len = end.distance(start); - debug_assert!( - len >= V::BYTES, - "haystack has length {}, but must be at least {}", - len, - V::BYTES - ); - - // Search a possibly unaligned chunk at `start`. This covers any part - // of the haystack prior to where aligned loads can start. - if let Some(cur) = self.search_chunk(start, topos) { - return Some(cur); - } - // Set `cur` to the first V-aligned pointer greater than `start`. - let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN)); - debug_assert!(cur > start && end.sub(V::BYTES) >= start); - if len >= Self::LOOP_SIZE { - while cur <= end.sub(Self::LOOP_SIZE) { - debug_assert_eq!(0, cur.as_usize() % V::BYTES); - - let a = V::load_aligned(cur); - let b = V::load_aligned(cur.add(V::BYTES)); - let eqa1 = self.v1.cmpeq(a); - let eqb1 = self.v1.cmpeq(b); - let eqa2 = self.v2.cmpeq(a); - let eqb2 = self.v2.cmpeq(b); - let eqa3 = self.v3.cmpeq(a); - let eqb3 = self.v3.cmpeq(b); - let or1 = eqa1.or(eqb1); - let or2 = eqa2.or(eqb2); - let or3 = eqa3.or(eqb3); - let or4 = or1.or(or2); - let or5 = or3.or(or4); - if or5.movemask_will_have_non_zero() { - let mask = eqa1 - .movemask() - .or(eqa2.movemask()) - .or(eqa3.movemask()); - if mask.has_non_zero() { - return Some(cur.add(topos(mask))); - } - - let mask = eqb1 - .movemask() - .or(eqb2.movemask()) - .or(eqb3.movemask()); - debug_assert!(mask.has_non_zero()); - return Some(cur.add(V::BYTES).add(topos(mask))); - } - cur = cur.add(Self::LOOP_SIZE); - } - } - // Handle any leftovers after the aligned loop above. We use unaligned - // loads here, but I believe we are guaranteed that they are aligned - // since `cur` is aligned. - while cur <= end.sub(V::BYTES) { - debug_assert!(end.distance(cur) >= V::BYTES); - if let Some(cur) = self.search_chunk(cur, topos) { - return Some(cur); - } - cur = cur.add(V::BYTES); - } - // Finally handle any remaining bytes less than the size of V. In this - // case, our pointer may indeed be unaligned and the load may overlap - // with the previous one. But that's okay since we know the previous - // load didn't lead to a match (otherwise we wouldn't be here). - if cur < end { - debug_assert!(end.distance(cur) < V::BYTES); - cur = cur.sub(V::BYTES - end.distance(cur)); - debug_assert_eq!(end.distance(cur), V::BYTES); - return self.search_chunk(cur, topos); - } - None - } - - /// Return a pointer to the last occurrence of the needle in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// # Safety - /// - /// * It must be the case that `start < end` and that the distance between - /// them is at least equal to `V::BYTES`. That is, it must always be valid - /// to do at least an unaligned load of `V` at `start`. - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - #[inline(always)] - pub(crate) unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - // If we want to support vectors bigger than 256 bits, we probably - // need to move up to using a u64 for the masks used below. Currently - // they are 32 bits, which means we're SOL for vectors that need masks - // bigger than 32 bits. Overall unclear until there's a use case. - debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes"); - - let topos = V::Mask::last_offset; - let len = end.distance(start); - debug_assert!( - len >= V::BYTES, - "haystack has length {}, but must be at least {}", - len, - V::BYTES - ); - - if let Some(cur) = self.search_chunk(end.sub(V::BYTES), topos) { - return Some(cur); - } - let mut cur = end.sub(end.as_usize() & V::ALIGN); - debug_assert!(start <= cur && cur <= end); - if len >= Self::LOOP_SIZE { - while cur >= start.add(Self::LOOP_SIZE) { - debug_assert_eq!(0, cur.as_usize() % V::BYTES); - - cur = cur.sub(Self::LOOP_SIZE); - let a = V::load_aligned(cur); - let b = V::load_aligned(cur.add(V::BYTES)); - let eqa1 = self.v1.cmpeq(a); - let eqb1 = self.v1.cmpeq(b); - let eqa2 = self.v2.cmpeq(a); - let eqb2 = self.v2.cmpeq(b); - let eqa3 = self.v3.cmpeq(a); - let eqb3 = self.v3.cmpeq(b); - let or1 = eqa1.or(eqb1); - let or2 = eqa2.or(eqb2); - let or3 = eqa3.or(eqb3); - let or4 = or1.or(or2); - let or5 = or3.or(or4); - if or5.movemask_will_have_non_zero() { - let mask = eqb1 - .movemask() - .or(eqb2.movemask()) - .or(eqb3.movemask()); - if mask.has_non_zero() { - return Some(cur.add(V::BYTES).add(topos(mask))); - } - - let mask = eqa1 - .movemask() - .or(eqa2.movemask()) - .or(eqa3.movemask()); - debug_assert!(mask.has_non_zero()); - return Some(cur.add(topos(mask))); - } - } - } - while cur >= start.add(V::BYTES) { - debug_assert!(cur.distance(start) >= V::BYTES); - cur = cur.sub(V::BYTES); - if let Some(cur) = self.search_chunk(cur, topos) { - return Some(cur); - } - } - if cur > start { - debug_assert!(cur.distance(start) < V::BYTES); - return self.search_chunk(start, topos); - } - None - } - - /// Search `V::BYTES` starting at `cur` via an unaligned load. - /// - /// `mask_to_offset` should be a function that converts a `movemask` to - /// an offset such that `cur.add(offset)` corresponds to a pointer to the - /// match location if one is found. Generally it is expected to use either - /// `mask_to_first_offset` or `mask_to_last_offset`, depending on whether - /// one is implementing a forward or reverse search, respectively. - /// - /// # Safety - /// - /// `cur` must be a valid pointer and it must be valid to do an unaligned - /// load of size `V::BYTES` at `cur`. - #[inline(always)] - unsafe fn search_chunk( - &self, - cur: *const u8, - mask_to_offset: impl Fn(V::Mask) -> usize, - ) -> Option<*const u8> { - let chunk = V::load_unaligned(cur); - let eq1 = self.v1.cmpeq(chunk); - let eq2 = self.v2.cmpeq(chunk); - let eq3 = self.v3.cmpeq(chunk); - let mask = eq1.or(eq2).or(eq3).movemask(); - if mask.has_non_zero() { - let mask1 = eq1.movemask(); - let mask2 = eq2.movemask(); - let mask3 = eq3.movemask(); - Some(cur.add(mask_to_offset(mask1.or(mask2).or(mask3)))) - } else { - None - } - } -} - -/// An iterator over all occurrences of a set of bytes in a haystack. -/// -/// This iterator implements the routines necessary to provide a -/// `DoubleEndedIterator` impl, which means it can also be used to find -/// occurrences in reverse order. -/// -/// The lifetime parameters are as follows: -/// -/// * `'h` refers to the lifetime of the haystack being searched. -/// -/// This type is intended to be used to implement all iterators for the -/// `memchr` family of functions. It handles a tiny bit of marginally tricky -/// raw pointer math, but otherwise expects the caller to provide `find_raw` -/// and `rfind_raw` routines for each call of `next` and `next_back`, -/// respectively. -#[derive(Clone, Debug)] -pub(crate) struct Iter<'h> { - /// The original starting point into the haystack. We use this to convert - /// pointers to offsets. - original_start: *const u8, - /// The current starting point into the haystack. That is, where the next - /// search will begin. - start: *const u8, - /// The current ending point into the haystack. That is, where the next - /// reverse search will begin. - end: *const u8, - /// A marker for tracking the lifetime of the start/cur_start/cur_end - /// pointers above, which all point into the haystack. - haystack: core::marker::PhantomData<&'h [u8]>, -} - -// SAFETY: Iter contains no shared references to anything that performs any -// interior mutations. Also, the lifetime guarantees that Iter will not outlive -// the haystack. -unsafe impl<'h> Send for Iter<'h> {} - -// SAFETY: Iter perform no interior mutations, therefore no explicit -// synchronization is necessary. Also, the lifetime guarantees that Iter will -// not outlive the haystack. -unsafe impl<'h> Sync for Iter<'h> {} - -impl<'h> Iter<'h> { - /// Create a new generic memchr iterator. - #[inline(always)] - pub(crate) fn new(haystack: &'h [u8]) -> Iter<'h> { - Iter { - original_start: haystack.as_ptr(), - start: haystack.as_ptr(), - end: haystack.as_ptr().wrapping_add(haystack.len()), - haystack: core::marker::PhantomData, - } - } - - /// Returns the next occurrence in the forward direction. - /// - /// # Safety - /// - /// Callers must ensure that if a pointer is returned from the closure - /// provided, then it must be greater than or equal to the start pointer - /// and less than the end pointer. - #[inline(always)] - pub(crate) unsafe fn next( - &mut self, - mut find_raw: impl FnMut(*const u8, *const u8) -> Option<*const u8>, - ) -> Option { - // SAFETY: Pointers are derived directly from the same &[u8] haystack. - // We only ever modify start/end corresponding to a matching offset - // found between start and end. Thus all changes to start/end maintain - // our safety requirements. - // - // The only other assumption we rely on is that the pointer returned - // by `find_raw` satisfies `self.start <= found < self.end`, and that - // safety contract is forwarded to the caller. - let found = find_raw(self.start, self.end)?; - let result = found.distance(self.original_start); - self.start = found.add(1); - Some(result) - } - - /// Returns the number of remaining elements in this iterator. - #[inline(always)] - pub(crate) fn count( - self, - mut count_raw: impl FnMut(*const u8, *const u8) -> usize, - ) -> usize { - // SAFETY: Pointers are derived directly from the same &[u8] haystack. - // We only ever modify start/end corresponding to a matching offset - // found between start and end. Thus all changes to start/end maintain - // our safety requirements. - count_raw(self.start, self.end) - } - - /// Returns the next occurrence in reverse. - /// - /// # Safety - /// - /// Callers must ensure that if a pointer is returned from the closure - /// provided, then it must be greater than or equal to the start pointer - /// and less than the end pointer. - #[inline(always)] - pub(crate) unsafe fn next_back( - &mut self, - mut rfind_raw: impl FnMut(*const u8, *const u8) -> Option<*const u8>, - ) -> Option { - // SAFETY: Pointers are derived directly from the same &[u8] haystack. - // We only ever modify start/end corresponding to a matching offset - // found between start and end. Thus all changes to start/end maintain - // our safety requirements. - // - // The only other assumption we rely on is that the pointer returned - // by `rfind_raw` satisfies `self.start <= found < self.end`, and that - // safety contract is forwarded to the caller. - let found = rfind_raw(self.start, self.end)?; - let result = found.distance(self.original_start); - self.end = found; - Some(result) - } - - /// Provides an implementation of `Iterator::size_hint`. - #[inline(always)] - pub(crate) fn size_hint(&self) -> (usize, Option) { - (0, Some(self.end.as_usize().saturating_sub(self.start.as_usize()))) - } -} - -/// Search a slice using a function that operates on raw pointers. -/// -/// Given a function to search a contiguous sequence of memory for the location -/// of a non-empty set of bytes, this will execute that search on a slice of -/// bytes. The pointer returned by the given function will be converted to an -/// offset relative to the starting point of the given slice. That is, if a -/// match is found, the offset returned by this routine is guaranteed to be a -/// valid index into `haystack`. -/// -/// Callers may use this for a forward or reverse search. -/// -/// # Safety -/// -/// Callers must ensure that if a pointer is returned by `find_raw`, then the -/// pointer must be greater than or equal to the starting pointer and less than -/// the end pointer. -#[inline(always)] -pub(crate) unsafe fn search_slice_with_raw( - haystack: &[u8], - mut find_raw: impl FnMut(*const u8, *const u8) -> Option<*const u8>, -) -> Option { - // SAFETY: We rely on `find_raw` to return a correct and valid pointer, but - // otherwise, `start` and `end` are valid due to the guarantees provided by - // a &[u8]. - let start = haystack.as_ptr(); - let end = start.add(haystack.len()); - let found = find_raw(start, end)?; - Some(found.distance(start)) -} - -/// Performs a forward byte-at-a-time loop until either `ptr >= end_ptr` or -/// until `confirm(*ptr)` returns `true`. If the former occurs, then `None` is -/// returned. If the latter occurs, then the pointer at which `confirm` returns -/// `true` is returned. -/// -/// # Safety -/// -/// Callers must provide valid pointers and they must satisfy `start_ptr <= -/// ptr` and `ptr <= end_ptr`. -#[inline(always)] -pub(crate) unsafe fn fwd_byte_by_byte bool>( - start: *const u8, - end: *const u8, - confirm: F, -) -> Option<*const u8> { - debug_assert!(start <= end); - let mut ptr = start; - while ptr < end { - if confirm(*ptr) { - return Some(ptr); - } - ptr = ptr.offset(1); - } - None -} - -/// Performs a reverse byte-at-a-time loop until either `ptr < start_ptr` or -/// until `confirm(*ptr)` returns `true`. If the former occurs, then `None` is -/// returned. If the latter occurs, then the pointer at which `confirm` returns -/// `true` is returned. -/// -/// # Safety -/// -/// Callers must provide valid pointers and they must satisfy `start_ptr <= -/// ptr` and `ptr <= end_ptr`. -#[inline(always)] -pub(crate) unsafe fn rev_byte_by_byte bool>( - start: *const u8, - end: *const u8, - confirm: F, -) -> Option<*const u8> { - debug_assert!(start <= end); - - let mut ptr = end; - while ptr > start { - ptr = ptr.offset(-1); - if confirm(*ptr) { - return Some(ptr); - } - } - None -} - -/// Performs a forward byte-at-a-time loop until `ptr >= end_ptr` and returns -/// the number of times `confirm(*ptr)` returns `true`. -/// -/// # Safety -/// -/// Callers must provide valid pointers and they must satisfy `start_ptr <= -/// ptr` and `ptr <= end_ptr`. -#[inline(always)] -pub(crate) unsafe fn count_byte_by_byte bool>( - start: *const u8, - end: *const u8, - confirm: F, -) -> usize { - debug_assert!(start <= end); - let mut ptr = start; - let mut count = 0; - while ptr < end { - if confirm(*ptr) { - count += 1; - } - ptr = ptr.offset(1); - } - count -} diff --git a/vendor/memchr/src/arch/generic/mod.rs b/vendor/memchr/src/arch/generic/mod.rs deleted file mode 100644 index 63ee3f0b34ed96..00000000000000 --- a/vendor/memchr/src/arch/generic/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -/*! -This module defines "generic" routines that can be specialized to specific -architectures. - -We don't expose this module primarily because it would require exposing all -of the internal infrastructure required to write these generic routines. -That infrastructure should be treated as an implementation detail so that -it is allowed to evolve. Instead, what we expose are architecture specific -instantiations of these generic implementations. The generic code just lets us -write the code once (usually). -*/ - -pub(crate) mod memchr; -pub(crate) mod packedpair; diff --git a/vendor/memchr/src/arch/generic/packedpair.rs b/vendor/memchr/src/arch/generic/packedpair.rs deleted file mode 100644 index 8d97cf28fad117..00000000000000 --- a/vendor/memchr/src/arch/generic/packedpair.rs +++ /dev/null @@ -1,317 +0,0 @@ -/*! -Generic crate-internal routines for the "packed pair" SIMD algorithm. - -The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main -difference is that it (by default) uses a background distribution of byte -frequencies to heuristically select the pair of bytes to search for. - -[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last -*/ - -use crate::{ - arch::all::{is_equal_raw, packedpair::Pair}, - ext::Pointer, - vector::{MoveMask, Vector}, -}; - -/// A generic architecture dependent "packed pair" finder. -/// -/// This finder picks two bytes that it believes have high predictive power -/// for indicating an overall match of a needle. Depending on whether -/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets -/// where the needle matches or could match. In the prefilter case, candidates -/// are reported whenever the [`Pair`] of bytes given matches. -/// -/// This is architecture dependent because it uses specific vector operations -/// to look for occurrences of the pair of bytes. -/// -/// This type is not meant to be exported and is instead meant to be used as -/// the implementation for architecture specific facades. Why? Because it's a -/// bit of a quirky API that requires `inline(always)` annotations. And pretty -/// much everything has safety obligations due (at least) to the caller needing -/// to inline calls into routines marked with -/// `#[target_feature(enable = "...")]`. -#[derive(Clone, Copy, Debug)] -pub(crate) struct Finder { - pair: Pair, - v1: V, - v2: V, - min_haystack_len: usize, -} - -impl Finder { - /// Create a new pair searcher. The searcher returned can either report - /// exact matches of `needle` or act as a prefilter and report candidate - /// positions of `needle`. - /// - /// # Safety - /// - /// Callers must ensure that whatever vector type this routine is called - /// with is supported by the current environment. - /// - /// Callers must also ensure that `needle.len() >= 2`. - #[inline(always)] - pub(crate) unsafe fn new(needle: &[u8], pair: Pair) -> Finder { - let max_index = pair.index1().max(pair.index2()); - let min_haystack_len = - core::cmp::max(needle.len(), usize::from(max_index) + V::BYTES); - let v1 = V::splat(needle[usize::from(pair.index1())]); - let v2 = V::splat(needle[usize::from(pair.index2())]); - Finder { pair, v1, v2, min_haystack_len } - } - - /// Searches the given haystack for the given needle. The needle given - /// should be the same as the needle that this finder was initialized - /// with. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - /// - /// # Safety - /// - /// Since this is meant to be used with vector functions, callers need to - /// specialize this inside of a function with a `target_feature` attribute. - /// Therefore, callers must ensure that whatever target feature is being - /// used supports the vector functions that this function is specialized - /// for. (For the specific vector functions used, see the Vector trait - /// implementations.) - #[inline(always)] - pub(crate) unsafe fn find( - &self, - haystack: &[u8], - needle: &[u8], - ) -> Option { - assert!( - haystack.len() >= self.min_haystack_len, - "haystack too small, should be at least {} but got {}", - self.min_haystack_len, - haystack.len(), - ); - - let all = V::Mask::all_zeros_except_least_significant(0); - let start = haystack.as_ptr(); - let end = start.add(haystack.len()); - let max = end.sub(self.min_haystack_len); - let mut cur = start; - - // N.B. I did experiment with unrolling the loop to deal with size(V) - // bytes at a time and 2*size(V) bytes at a time. The double unroll - // was marginally faster while the quadruple unroll was unambiguously - // slower. In the end, I decided the complexity from unrolling wasn't - // worth it. I used the memmem/krate/prebuilt/huge-en/ benchmarks to - // compare. - while cur <= max { - if let Some(chunki) = self.find_in_chunk(needle, cur, end, all) { - return Some(matched(start, cur, chunki)); - } - cur = cur.add(V::BYTES); - } - if cur < end { - let remaining = end.distance(cur); - debug_assert!( - remaining < self.min_haystack_len, - "remaining bytes should be smaller than the minimum haystack \ - length of {}, but there are {} bytes remaining", - self.min_haystack_len, - remaining, - ); - if remaining < needle.len() { - return None; - } - debug_assert!( - max < cur, - "after main loop, cur should have exceeded max", - ); - let overlap = cur.distance(max); - debug_assert!( - overlap > 0, - "overlap ({}) must always be non-zero", - overlap, - ); - debug_assert!( - overlap < V::BYTES, - "overlap ({}) cannot possibly be >= than a vector ({})", - overlap, - V::BYTES, - ); - // The mask has all of its bits set except for the first N least - // significant bits, where N=overlap. This way, any matches that - // occur in find_in_chunk within the overlap are automatically - // ignored. - let mask = V::Mask::all_zeros_except_least_significant(overlap); - cur = max; - let m = self.find_in_chunk(needle, cur, end, mask); - if let Some(chunki) = m { - return Some(matched(start, cur, chunki)); - } - } - None - } - - /// Searches the given haystack for offsets that represent candidate - /// matches of the `needle` given to this finder's constructor. The offsets - /// returned, if they are a match, correspond to the starting offset of - /// `needle` in the given `haystack`. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - /// - /// # Safety - /// - /// Since this is meant to be used with vector functions, callers need to - /// specialize this inside of a function with a `target_feature` attribute. - /// Therefore, callers must ensure that whatever target feature is being - /// used supports the vector functions that this function is specialized - /// for. (For the specific vector functions used, see the Vector trait - /// implementations.) - #[inline(always)] - pub(crate) unsafe fn find_prefilter( - &self, - haystack: &[u8], - ) -> Option { - assert!( - haystack.len() >= self.min_haystack_len, - "haystack too small, should be at least {} but got {}", - self.min_haystack_len, - haystack.len(), - ); - - let start = haystack.as_ptr(); - let end = start.add(haystack.len()); - let max = end.sub(self.min_haystack_len); - let mut cur = start; - - // N.B. I did experiment with unrolling the loop to deal with size(V) - // bytes at a time and 2*size(V) bytes at a time. The double unroll - // was marginally faster while the quadruple unroll was unambiguously - // slower. In the end, I decided the complexity from unrolling wasn't - // worth it. I used the memmem/krate/prebuilt/huge-en/ benchmarks to - // compare. - while cur <= max { - if let Some(chunki) = self.find_prefilter_in_chunk(cur) { - return Some(matched(start, cur, chunki)); - } - cur = cur.add(V::BYTES); - } - if cur < end { - // This routine immediately quits if a candidate match is found. - // That means that if we're here, no candidate matches have been - // found at or before 'ptr'. Thus, we don't need to mask anything - // out even though we might technically search part of the haystack - // that we've already searched (because we know it can't match). - cur = max; - if let Some(chunki) = self.find_prefilter_in_chunk(cur) { - return Some(matched(start, cur, chunki)); - } - } - None - } - - /// Search for an occurrence of our byte pair from the needle in the chunk - /// pointed to by cur, with the end of the haystack pointed to by end. - /// When an occurrence is found, memcmp is run to check if a match occurs - /// at the corresponding position. - /// - /// `mask` should have bits set corresponding the positions in the chunk - /// in which matches are considered. This is only used for the last vector - /// load where the beginning of the vector might have overlapped with the - /// last load in the main loop. The mask lets us avoid visiting positions - /// that have already been discarded as matches. - /// - /// # Safety - /// - /// It must be safe to do an unaligned read of size(V) bytes starting at - /// both (cur + self.index1) and (cur + self.index2). It must also be safe - /// to do unaligned loads on cur up to (end - needle.len()). - #[inline(always)] - unsafe fn find_in_chunk( - &self, - needle: &[u8], - cur: *const u8, - end: *const u8, - mask: V::Mask, - ) -> Option { - let index1 = usize::from(self.pair.index1()); - let index2 = usize::from(self.pair.index2()); - let chunk1 = V::load_unaligned(cur.add(index1)); - let chunk2 = V::load_unaligned(cur.add(index2)); - let eq1 = chunk1.cmpeq(self.v1); - let eq2 = chunk2.cmpeq(self.v2); - - let mut offsets = eq1.and(eq2).movemask().and(mask); - while offsets.has_non_zero() { - let offset = offsets.first_offset(); - let cur = cur.add(offset); - if end.sub(needle.len()) < cur { - return None; - } - if is_equal_raw(needle.as_ptr(), cur, needle.len()) { - return Some(offset); - } - offsets = offsets.clear_least_significant_bit(); - } - None - } - - /// Search for an occurrence of our byte pair from the needle in the chunk - /// pointed to by cur, with the end of the haystack pointed to by end. - /// When an occurrence is found, memcmp is run to check if a match occurs - /// at the corresponding position. - /// - /// # Safety - /// - /// It must be safe to do an unaligned read of size(V) bytes starting at - /// both (cur + self.index1) and (cur + self.index2). It must also be safe - /// to do unaligned reads on cur up to (end - needle.len()). - #[inline(always)] - unsafe fn find_prefilter_in_chunk(&self, cur: *const u8) -> Option { - let index1 = usize::from(self.pair.index1()); - let index2 = usize::from(self.pair.index2()); - let chunk1 = V::load_unaligned(cur.add(index1)); - let chunk2 = V::load_unaligned(cur.add(index2)); - let eq1 = chunk1.cmpeq(self.v1); - let eq2 = chunk2.cmpeq(self.v2); - - let offsets = eq1.and(eq2).movemask(); - if !offsets.has_non_zero() { - return None; - } - Some(offsets.first_offset()) - } - - /// Returns the pair of offsets (into the needle) used to check as a - /// predicate before confirming whether a needle exists at a particular - /// position. - #[inline] - pub(crate) fn pair(&self) -> &Pair { - &self.pair - } - - /// Returns the minimum haystack length that this `Finder` can search. - /// - /// Providing a haystack to this `Finder` shorter than this length is - /// guaranteed to result in a panic. - #[inline(always)] - pub(crate) fn min_haystack_len(&self) -> usize { - self.min_haystack_len - } -} - -/// Accepts a chunk-relative offset and returns a haystack relative offset. -/// -/// This used to be marked `#[cold]` and `#[inline(never)]`, but I couldn't -/// observe a consistent measureable difference between that and just inlining -/// it. So we go with inlining it. -/// -/// # Safety -/// -/// Same at `ptr::offset_from` in addition to `cur >= start`. -#[inline(always)] -unsafe fn matched(start: *const u8, cur: *const u8, chunki: usize) -> usize { - cur.distance(start) + chunki -} - -// If you're looking for tests, those are run for each instantiation of the -// above code. So for example, see arch::x86_64::sse2::packedpair. diff --git a/vendor/memchr/src/arch/mod.rs b/vendor/memchr/src/arch/mod.rs deleted file mode 100644 index 10332b64cd5290..00000000000000 --- a/vendor/memchr/src/arch/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -/*! -A module with low-level architecture dependent routines. - -These routines are useful as primitives for tasks not covered by the higher -level crate API. -*/ - -pub mod all; -pub(crate) mod generic; - -#[cfg(target_arch = "aarch64")] -pub mod aarch64; -#[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] -pub mod wasm32; -#[cfg(target_arch = "x86_64")] -pub mod x86_64; diff --git a/vendor/memchr/src/arch/wasm32/memchr.rs b/vendor/memchr/src/arch/wasm32/memchr.rs deleted file mode 100644 index 55c1c1bb472f10..00000000000000 --- a/vendor/memchr/src/arch/wasm32/memchr.rs +++ /dev/null @@ -1,124 +0,0 @@ -/*! -Wrapper routines for `memchr` and friends. - -These routines choose the best implementation at compile time. (This is -different from `x86_64` because it is expected that `simd128` is almost always -available for `wasm32` targets.) -*/ - -macro_rules! defraw { - ($ty:ident, $find:ident, $start:ident, $end:ident, $($needles:ident),+) => {{ - use crate::arch::wasm32::simd128::memchr::$ty; - - debug!("chose simd128 for {}", stringify!($ty)); - debug_assert!($ty::is_available()); - // SAFETY: We know that wasm memchr is always available whenever - // code is compiled for `wasm32` with the `simd128` target feature - // enabled. - $ty::new_unchecked($($needles),+).$find($start, $end) - }} -} - -/// memchr, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `One::find_raw`. -#[inline(always)] -pub(crate) unsafe fn memchr_raw( - n1: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - defraw!(One, find_raw, start, end, n1) -} - -/// memrchr, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `One::rfind_raw`. -#[inline(always)] -pub(crate) unsafe fn memrchr_raw( - n1: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - defraw!(One, rfind_raw, start, end, n1) -} - -/// memchr2, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Two::find_raw`. -#[inline(always)] -pub(crate) unsafe fn memchr2_raw( - n1: u8, - n2: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - defraw!(Two, find_raw, start, end, n1, n2) -} - -/// memrchr2, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Two::rfind_raw`. -#[inline(always)] -pub(crate) unsafe fn memrchr2_raw( - n1: u8, - n2: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - defraw!(Two, rfind_raw, start, end, n1, n2) -} - -/// memchr3, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Three::find_raw`. -#[inline(always)] -pub(crate) unsafe fn memchr3_raw( - n1: u8, - n2: u8, - n3: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - defraw!(Three, find_raw, start, end, n1, n2, n3) -} - -/// memrchr3, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Three::rfind_raw`. -#[inline(always)] -pub(crate) unsafe fn memrchr3_raw( - n1: u8, - n2: u8, - n3: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - defraw!(Three, rfind_raw, start, end, n1, n2, n3) -} - -/// Count all matching bytes, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `One::count_raw`. -#[inline(always)] -pub(crate) unsafe fn count_raw( - n1: u8, - start: *const u8, - end: *const u8, -) -> usize { - defraw!(One, count_raw, start, end, n1) -} diff --git a/vendor/memchr/src/arch/wasm32/mod.rs b/vendor/memchr/src/arch/wasm32/mod.rs deleted file mode 100644 index 209f876cb58376..00000000000000 --- a/vendor/memchr/src/arch/wasm32/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -/*! -Vector algorithms for the `wasm32` target. -*/ - -pub mod simd128; - -pub(crate) mod memchr; diff --git a/vendor/memchr/src/arch/wasm32/simd128/memchr.rs b/vendor/memchr/src/arch/wasm32/simd128/memchr.rs deleted file mode 100644 index fa314c9d18aa6f..00000000000000 --- a/vendor/memchr/src/arch/wasm32/simd128/memchr.rs +++ /dev/null @@ -1,1020 +0,0 @@ -/*! -This module defines 128-bit vector implementations of `memchr` and friends. - -The main types in this module are [`One`], [`Two`] and [`Three`]. They are for -searching for one, two or three distinct bytes, respectively, in a haystack. -Each type also has corresponding double ended iterators. These searchers are -typically much faster than scalar routines accomplishing the same task. - -The `One` searcher also provides a [`One::count`] routine for efficiently -counting the number of times a single byte occurs in a haystack. This is -useful, for example, for counting the number of lines in a haystack. This -routine exists because it is usually faster, especially with a high match -count, then using [`One::find`] repeatedly. ([`OneIter`] specializes its -`Iterator::count` implementation to use this routine.) - -Only one, two and three bytes are supported because three bytes is about -the point where one sees diminishing returns. Beyond this point and it's -probably (but not necessarily) better to just use a simple `[bool; 256]` array -or similar. However, it depends mightily on the specific work-load and the -expected match frequency. -*/ - -use core::arch::wasm32::v128; - -use crate::{arch::generic::memchr as generic, ext::Pointer, vector::Vector}; - -/// Finds all occurrences of a single byte in a haystack. -#[derive(Clone, Copy, Debug)] -pub struct One(generic::One); - -impl One { - /// Create a new searcher that finds occurrences of the needle byte given. - /// - /// This particular searcher is specialized to use simd128 vector - /// instructions that typically make it quite fast. - /// - /// If simd128 is unavailable in the current environment, then `None` is - /// returned. - #[inline] - pub fn new(needle: u8) -> Option { - if One::is_available() { - // SAFETY: we check that simd128 is available above. - unsafe { Some(One::new_unchecked(needle)) } - } else { - None - } - } - - /// Create a new finder specific to simd128 vectors and routines without - /// checking that simd128 is available. - /// - /// # Safety - /// - /// Callers must guarantee that it is safe to execute `simd128` - /// instructions in the current environment. - #[target_feature(enable = "simd128")] - #[inline] - pub unsafe fn new_unchecked(needle: u8) -> One { - One(generic::One::new(needle)) - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`One::new`] will return - /// a `Some` value. Similarly, when it is false, it is guaranteed that - /// `One::new` will return a `None` value. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - #[cfg(target_feature = "simd128")] - { - true - } - #[cfg(not(target_feature = "simd128"))] - { - false - } - } - - /// Return the first occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.find_raw(s, e) - }) - } - } - - /// Return the last occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8]) -> Option { - // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.rfind_raw(s, e) - }) - } - } - - /// Counts all occurrences of this byte in the given haystack. - #[inline] - pub fn count(&self, haystack: &[u8]) -> usize { - // SAFETY: All of our pointers are derived directly from a borrowed - // slice, which is guaranteed to be valid. - unsafe { - let start = haystack.as_ptr(); - let end = start.add(haystack.len()); - self.count_raw(start, end) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < v128::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::fwd_byte_by_byte(start, end, |b| { - b == self.0.needle1() - }); - } - // SAFETY: Building a `One` means it's safe to call 'simd128' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - self.find_raw_impl(start, end) - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < v128::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::rev_byte_by_byte(start, end, |b| { - b == self.0.needle1() - }); - } - // SAFETY: Building a `One` means it's safe to call 'simd128' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - self.rfind_raw_impl(start, end) - } - - /// Counts all occurrences of this byte in the given haystack represented - /// by raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize { - if start >= end { - return 0; - } - if end.distance(start) < v128::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::count_byte_by_byte(start, end, |b| { - b == self.0.needle1() - }); - } - // SAFETY: Building a `One` means it's safe to call 'simd128' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - self.count_raw_impl(start, end) - } - - /// Execute a search using simd128 vectors and routines. - /// - /// # Safety - /// - /// Same as [`One::find_raw`], except the distance between `start` and - /// `end` must be at least the size of a simd128 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `One`, which can only be constructed - /// when it is safe to call `simd128` routines.) - #[target_feature(enable = "simd128")] - #[inline] - unsafe fn find_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.find_raw(start, end) - } - - /// Execute a search using simd128 vectors and routines. - /// - /// # Safety - /// - /// Same as [`One::rfind_raw`], except the distance between `start` and - /// `end` must be at least the size of a simd128 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `One`, which can only be constructed - /// when it is safe to call `simd128` routines.) - #[target_feature(enable = "simd128")] - #[inline] - unsafe fn rfind_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.rfind_raw(start, end) - } - - /// Execute a count using simd128 vectors and routines. - /// - /// # Safety - /// - /// Same as [`One::count_raw`], except the distance between `start` and - /// `end` must be at least the size of a simd128 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `One`, which can only be constructed - /// when it is safe to call `simd128` routines.) - #[target_feature(enable = "simd128")] - #[inline] - unsafe fn count_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> usize { - self.0.count_raw(start, end) - } - - /// Returns an iterator over all occurrences of the needle byte in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - #[inline] - pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> { - OneIter { searcher: self, it: generic::Iter::new(haystack) } - } -} - -/// An iterator over all occurrences of a single byte in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`One::iter`] method. -/// -/// The lifetime parameters are as follows: -/// -/// * `'a` refers to the lifetime of the underlying [`One`] searcher. -/// * `'h` refers to the lifetime of the haystack being searched. -#[derive(Clone, Debug)] -pub struct OneIter<'a, 'h> { - searcher: &'a One, - it: generic::Iter<'h>, -} - -impl<'a, 'h> Iterator for OneIter<'a, 'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'find_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } - } - - #[inline] - fn count(self) -> usize { - self.it.count(|s, e| { - // SAFETY: We rely on our generic iterator to return valid start - // and end pointers. - unsafe { self.searcher.count_raw(s, e) } - }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'rfind_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } - } -} - -impl<'a, 'h> core::iter::FusedIterator for OneIter<'a, 'h> {} - -/// Finds all occurrences of two bytes in a haystack. -/// -/// That is, this reports matches of one of two possible bytes. For example, -/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, -/// `4` and `5`. -#[derive(Clone, Copy, Debug)] -pub struct Two(generic::Two); - -impl Two { - /// Create a new searcher that finds occurrences of the needle bytes given. - /// - /// This particular searcher is specialized to use simd128 vector - /// instructions that typically make it quite fast. - /// - /// If simd128 is unavailable in the current environment, then `None` is - /// returned. - #[inline] - pub fn new(needle1: u8, needle2: u8) -> Option { - if Two::is_available() { - // SAFETY: we check that simd128 is available above. - unsafe { Some(Two::new_unchecked(needle1, needle2)) } - } else { - None - } - } - - /// Create a new finder specific to simd128 vectors and routines without - /// checking that simd128 is available. - /// - /// # Safety - /// - /// Callers must guarantee that it is safe to execute `simd128` - /// instructions in the current environment. - #[target_feature(enable = "simd128")] - #[inline] - pub unsafe fn new_unchecked(needle1: u8, needle2: u8) -> Two { - Two(generic::Two::new(needle1, needle2)) - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`Two::new`] will return - /// a `Some` value. Similarly, when it is false, it is guaranteed that - /// `Two::new` will return a `None` value. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - #[cfg(target_feature = "simd128")] - { - true - } - #[cfg(not(target_feature = "simd128"))] - { - false - } - } - - /// Return the first occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.find_raw(s, e) - }) - } - } - - /// Return the last occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8]) -> Option { - // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.rfind_raw(s, e) - }) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < v128::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::fwd_byte_by_byte(start, end, |b| { - b == self.0.needle1() || b == self.0.needle2() - }); - } - // SAFETY: Building a `Two` means it's safe to call 'simd128' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - self.find_raw_impl(start, end) - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < v128::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::rev_byte_by_byte(start, end, |b| { - b == self.0.needle1() || b == self.0.needle2() - }); - } - // SAFETY: Building a `Two` means it's safe to call 'simd128' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - self.rfind_raw_impl(start, end) - } - - /// Execute a search using simd128 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Two::find_raw`], except the distance between `start` and - /// `end` must be at least the size of a simd128 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Two`, which can only be constructed - /// when it is safe to call `simd128` routines.) - #[target_feature(enable = "simd128")] - #[inline] - unsafe fn find_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.find_raw(start, end) - } - - /// Execute a search using simd128 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Two::rfind_raw`], except the distance between `start` and - /// `end` must be at least the size of a simd128 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Two`, which can only be constructed - /// when it is safe to call `simd128` routines.) - #[target_feature(enable = "simd128")] - #[inline] - unsafe fn rfind_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.rfind_raw(start, end) - } - - /// Returns an iterator over all occurrences of the needle bytes in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - #[inline] - pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> { - TwoIter { searcher: self, it: generic::Iter::new(haystack) } - } -} - -/// An iterator over all occurrences of two possible bytes in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`Two::iter`] method. -/// -/// The lifetime parameters are as follows: -/// -/// * `'a` refers to the lifetime of the underlying [`Two`] searcher. -/// * `'h` refers to the lifetime of the haystack being searched. -#[derive(Clone, Debug)] -pub struct TwoIter<'a, 'h> { - searcher: &'a Two, - it: generic::Iter<'h>, -} - -impl<'a, 'h> Iterator for TwoIter<'a, 'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'find_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'rfind_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } - } -} - -impl<'a, 'h> core::iter::FusedIterator for TwoIter<'a, 'h> {} - -/// Finds all occurrences of three bytes in a haystack. -/// -/// That is, this reports matches of one of three possible bytes. For example, -/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets -/// `0`, `2`, `3`, `4` and `5`. -#[derive(Clone, Copy, Debug)] -pub struct Three(generic::Three); - -impl Three { - /// Create a new searcher that finds occurrences of the needle bytes given. - /// - /// This particular searcher is specialized to use simd128 vector - /// instructions that typically make it quite fast. - /// - /// If simd128 is unavailable in the current environment, then `None` is - /// returned. - #[inline] - pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Option { - if Three::is_available() { - // SAFETY: we check that simd128 is available above. - unsafe { Some(Three::new_unchecked(needle1, needle2, needle3)) } - } else { - None - } - } - - /// Create a new finder specific to simd128 vectors and routines without - /// checking that simd128 is available. - /// - /// # Safety - /// - /// Callers must guarantee that it is safe to execute `simd128` - /// instructions in the current environment. - #[target_feature(enable = "simd128")] - #[inline] - pub unsafe fn new_unchecked( - needle1: u8, - needle2: u8, - needle3: u8, - ) -> Three { - Three(generic::Three::new(needle1, needle2, needle3)) - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`Three::new`] will return - /// a `Some` value. Similarly, when it is false, it is guaranteed that - /// `Three::new` will return a `None` value. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - #[cfg(target_feature = "simd128")] - { - true - } - #[cfg(not(target_feature = "simd128"))] - { - false - } - } - - /// Return the first occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.find_raw(s, e) - }) - } - } - - /// Return the last occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8]) -> Option { - // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.rfind_raw(s, e) - }) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < v128::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::fwd_byte_by_byte(start, end, |b| { - b == self.0.needle1() - || b == self.0.needle2() - || b == self.0.needle3() - }); - } - // SAFETY: Building a `Three` means it's safe to call 'simd128' - // routines. Also, we've checked that our haystack is big enough to run - // on the vector routine. Pointer validity is caller's responsibility. - self.find_raw_impl(start, end) - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < v128::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::rev_byte_by_byte(start, end, |b| { - b == self.0.needle1() - || b == self.0.needle2() - || b == self.0.needle3() - }); - } - // SAFETY: Building a `Three` means it's safe to call 'simd128' - // routines. Also, we've checked that our haystack is big enough to run - // on the vector routine. Pointer validity is caller's responsibility. - self.rfind_raw_impl(start, end) - } - - /// Execute a search using simd128 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Three::find_raw`], except the distance between `start` and - /// `end` must be at least the size of a simd128 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Three`, which can only be constructed - /// when it is safe to call `simd128` routines.) - #[target_feature(enable = "simd128")] - #[inline] - unsafe fn find_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.find_raw(start, end) - } - - /// Execute a search using simd128 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Three::rfind_raw`], except the distance between `start` and - /// `end` must be at least the size of a simd128 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Three`, which can only be constructed - /// when it is safe to call `simd128` routines.) - #[target_feature(enable = "simd128")] - #[inline] - unsafe fn rfind_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.rfind_raw(start, end) - } - - /// Returns an iterator over all occurrences of the needle byte in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - #[inline] - pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> { - ThreeIter { searcher: self, it: generic::Iter::new(haystack) } - } -} - -/// An iterator over all occurrences of three possible bytes in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`Three::iter`] method. -/// -/// The lifetime parameters are as follows: -/// -/// * `'a` refers to the lifetime of the underlying [`Three`] searcher. -/// * `'h` refers to the lifetime of the haystack being searched. -#[derive(Clone, Debug)] -pub struct ThreeIter<'a, 'h> { - searcher: &'a Three, - it: generic::Iter<'h>, -} - -impl<'a, 'h> Iterator for ThreeIter<'a, 'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'find_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'rfind_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } - } -} - -impl<'a, 'h> core::iter::FusedIterator for ThreeIter<'a, 'h> {} - -#[cfg(test)] -mod tests { - use super::*; - - define_memchr_quickcheck!(super); - - #[test] - fn forward_one() { - crate::tests::memchr::Runner::new(1).forward_iter( - |haystack, needles| { - Some(One::new(needles[0])?.iter(haystack).collect()) - }, - ) - } - - #[test] - fn reverse_one() { - crate::tests::memchr::Runner::new(1).reverse_iter( - |haystack, needles| { - Some(One::new(needles[0])?.iter(haystack).rev().collect()) - }, - ) - } - - #[test] - fn count_one() { - crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| { - Some(One::new(needles[0])?.iter(haystack).count()) - }) - } - - #[test] - fn forward_two() { - crate::tests::memchr::Runner::new(2).forward_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - Some(Two::new(n1, n2)?.iter(haystack).collect()) - }, - ) - } - - #[test] - fn reverse_two() { - crate::tests::memchr::Runner::new(2).reverse_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - Some(Two::new(n1, n2)?.iter(haystack).rev().collect()) - }, - ) - } - - #[test] - fn forward_three() { - crate::tests::memchr::Runner::new(3).forward_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - let n3 = needles.get(2).copied()?; - Some(Three::new(n1, n2, n3)?.iter(haystack).collect()) - }, - ) - } - - #[test] - fn reverse_three() { - crate::tests::memchr::Runner::new(3).reverse_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - let n3 = needles.get(2).copied()?; - Some(Three::new(n1, n2, n3)?.iter(haystack).rev().collect()) - }, - ) - } -} diff --git a/vendor/memchr/src/arch/wasm32/simd128/mod.rs b/vendor/memchr/src/arch/wasm32/simd128/mod.rs deleted file mode 100644 index b55d1f07b07406..00000000000000 --- a/vendor/memchr/src/arch/wasm32/simd128/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -/*! -Algorithms for the `wasm32` target using 128-bit vectors via simd128. -*/ - -pub mod memchr; -pub mod packedpair; diff --git a/vendor/memchr/src/arch/wasm32/simd128/packedpair.rs b/vendor/memchr/src/arch/wasm32/simd128/packedpair.rs deleted file mode 100644 index e8cf745a8feb77..00000000000000 --- a/vendor/memchr/src/arch/wasm32/simd128/packedpair.rs +++ /dev/null @@ -1,228 +0,0 @@ -/*! -A 128-bit vector implementation of the "packed pair" SIMD algorithm. - -The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main -difference is that it (by default) uses a background distribution of byte -frequencies to heuristically select the pair of bytes to search for. - -[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last -*/ - -use core::arch::wasm32::v128; - -use crate::arch::{all::packedpair::Pair, generic::packedpair}; - -/// A "packed pair" finder that uses 128-bit vector operations. -/// -/// This finder picks two bytes that it believes have high predictive power -/// for indicating an overall match of a needle. Depending on whether -/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets -/// where the needle matches or could match. In the prefilter case, candidates -/// are reported whenever the [`Pair`] of bytes given matches. -#[derive(Clone, Copy, Debug)] -pub struct Finder(packedpair::Finder); - -impl Finder { - /// Create a new pair searcher. The searcher returned can either report - /// exact matches of `needle` or act as a prefilter and report candidate - /// positions of `needle`. - /// - /// If simd128 is unavailable in the current environment or if a [`Pair`] - /// could not be constructed from the needle given, then `None` is - /// returned. - #[inline] - pub fn new(needle: &[u8]) -> Option { - Finder::with_pair(needle, Pair::new(needle)?) - } - - /// Create a new "packed pair" finder using the pair of bytes given. - /// - /// This constructor permits callers to control precisely which pair of - /// bytes is used as a predicate. - /// - /// If simd128 is unavailable in the current environment, then `None` is - /// returned. - #[inline] - pub fn with_pair(needle: &[u8], pair: Pair) -> Option { - if Finder::is_available() { - // SAFETY: we check that simd128 is available above. We are also - // guaranteed to have needle.len() > 1 because we have a valid - // Pair. - unsafe { Some(Finder::with_pair_impl(needle, pair)) } - } else { - None - } - } - - /// Create a new `Finder` specific to simd128 vectors and routines. - /// - /// # Safety - /// - /// Same as the safety for `packedpair::Finder::new`, and callers must also - /// ensure that simd128 is available. - #[target_feature(enable = "simd128")] - #[inline] - unsafe fn with_pair_impl(needle: &[u8], pair: Pair) -> Finder { - let finder = packedpair::Finder::::new(needle, pair); - Finder(finder) - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`Finder::with_pair`] will - /// return a `Some` value. Similarly, when it is false, it is guaranteed - /// that `Finder::with_pair` will return a `None` value. Notice that this - /// does not guarantee that [`Finder::new`] will return a `Finder`. Namely, - /// even when `Finder::is_available` is true, it is not guaranteed that a - /// valid [`Pair`] can be found from the needle given. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - // We used to gate on `cfg(target_feature = "simd128")` here, but - // we've since required the feature to be enabled at compile time to - // even include this module at all. Therefore, it is always enabled - // in this context. See the linked issue for why this was changed. - // - // Ref: https://github.com/BurntSushi/memchr/issues/144 - true - } - - /// Execute a search using wasm32 v128 vectors and routines. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - #[inline] - pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option { - self.find_impl(haystack, needle) - } - - /// Execute a search using wasm32 v128 vectors and routines. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - #[inline] - pub fn find_prefilter(&self, haystack: &[u8]) -> Option { - self.find_prefilter_impl(haystack) - } - - /// Execute a search using wasm32 v128 vectors and routines. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - /// - /// # Safety - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Finder`, which can only be constructed - /// when it is safe to call `simd128` routines.) - #[target_feature(enable = "simd128")] - #[inline] - fn find_impl(&self, haystack: &[u8], needle: &[u8]) -> Option { - // SAFETY: The target feature safety obligation is automatically - // fulfilled by virtue of being a method on `Finder`, which can only be - // constructed when it is safe to call `simd128` routines. - unsafe { self.0.find(haystack, needle) } - } - - /// Execute a prefilter search using wasm32 v128 vectors and routines. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - /// - /// # Safety - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Finder`, which can only be constructed - /// when it is safe to call `simd128` routines.) - #[target_feature(enable = "simd128")] - #[inline] - fn find_prefilter_impl(&self, haystack: &[u8]) -> Option { - // SAFETY: The target feature safety obligation is automatically - // fulfilled by virtue of being a method on `Finder`, which can only be - // constructed when it is safe to call `simd128` routines. - unsafe { self.0.find_prefilter(haystack) } - } - - /// Returns the pair of offsets (into the needle) used to check as a - /// predicate before confirming whether a needle exists at a particular - /// position. - #[inline] - pub fn pair(&self) -> &Pair { - self.0.pair() - } - - /// Returns the minimum haystack length that this `Finder` can search. - /// - /// Using a haystack with length smaller than this in a search will result - /// in a panic. The reason for this restriction is that this finder is - /// meant to be a low-level component that is part of a larger substring - /// strategy. In that sense, it avoids trying to handle all cases and - /// instead only handles the cases that it can handle very well. - #[inline] - pub fn min_haystack_len(&self) -> usize { - self.0.min_haystack_len() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn find(haystack: &[u8], needle: &[u8]) -> Option> { - let f = Finder::new(needle)?; - if haystack.len() < f.min_haystack_len() { - return None; - } - Some(f.find(haystack, needle)) - } - - define_substring_forward_quickcheck!(find); - - #[test] - fn forward_substring() { - crate::tests::substring::Runner::new().fwd(find).run() - } - - #[test] - fn forward_packedpair() { - fn find( - haystack: &[u8], - needle: &[u8], - index1: u8, - index2: u8, - ) -> Option> { - let pair = Pair::with_indices(needle, index1, index2)?; - let f = Finder::with_pair(needle, pair)?; - if haystack.len() < f.min_haystack_len() { - return None; - } - Some(f.find(haystack, needle)) - } - crate::tests::packedpair::Runner::new().fwd(find).run() - } - - #[test] - fn forward_packedpair_prefilter() { - fn find( - haystack: &[u8], - needle: &[u8], - index1: u8, - index2: u8, - ) -> Option> { - let pair = Pair::with_indices(needle, index1, index2)?; - let f = Finder::with_pair(needle, pair)?; - if haystack.len() < f.min_haystack_len() { - return None; - } - Some(f.find_prefilter(haystack)) - } - crate::tests::packedpair::Runner::new().fwd(find).run() - } -} diff --git a/vendor/memchr/src/arch/x86_64/avx2/memchr.rs b/vendor/memchr/src/arch/x86_64/avx2/memchr.rs deleted file mode 100644 index 59f8c7f7382028..00000000000000 --- a/vendor/memchr/src/arch/x86_64/avx2/memchr.rs +++ /dev/null @@ -1,1352 +0,0 @@ -/*! -This module defines 256-bit vector implementations of `memchr` and friends. - -The main types in this module are [`One`], [`Two`] and [`Three`]. They are for -searching for one, two or three distinct bytes, respectively, in a haystack. -Each type also has corresponding double ended iterators. These searchers are -typically much faster than scalar routines accomplishing the same task. - -The `One` searcher also provides a [`One::count`] routine for efficiently -counting the number of times a single byte occurs in a haystack. This is -useful, for example, for counting the number of lines in a haystack. This -routine exists because it is usually faster, especially with a high match -count, then using [`One::find`] repeatedly. ([`OneIter`] specializes its -`Iterator::count` implementation to use this routine.) - -Only one, two and three bytes are supported because three bytes is about -the point where one sees diminishing returns. Beyond this point and it's -probably (but not necessarily) better to just use a simple `[bool; 256]` array -or similar. However, it depends mightily on the specific work-load and the -expected match frequency. -*/ - -use core::arch::x86_64::{__m128i, __m256i}; - -use crate::{arch::generic::memchr as generic, ext::Pointer, vector::Vector}; - -/// Finds all occurrences of a single byte in a haystack. -#[derive(Clone, Copy, Debug)] -pub struct One { - /// Used for haystacks less than 32 bytes. - sse2: generic::One<__m128i>, - /// Used for haystacks bigger than 32 bytes. - avx2: generic::One<__m256i>, -} - -impl One { - /// Create a new searcher that finds occurrences of the needle byte given. - /// - /// This particular searcher is specialized to use AVX2 vector instructions - /// that typically make it quite fast. (SSE2 is used for haystacks that - /// are too short to accommodate an AVX2 vector.) - /// - /// If either SSE2 or AVX2 is unavailable in the current environment, then - /// `None` is returned. - #[inline] - pub fn new(needle: u8) -> Option { - if One::is_available() { - // SAFETY: we check that sse2 and avx2 are available above. - unsafe { Some(One::new_unchecked(needle)) } - } else { - None - } - } - - /// Create a new finder specific to AVX2 vectors and routines without - /// checking that either SSE2 or AVX2 is available. - /// - /// # Safety - /// - /// Callers must guarantee that it is safe to execute both `sse2` and - /// `avx2` instructions in the current environment. - /// - /// Note that it is a common misconception that if one compiles for an - /// `x86_64` target, then they therefore automatically have access to SSE2 - /// instructions. While this is almost always the case, it isn't true in - /// 100% of cases. - #[target_feature(enable = "sse2", enable = "avx2")] - #[inline] - pub unsafe fn new_unchecked(needle: u8) -> One { - One { - sse2: generic::One::new(needle), - avx2: generic::One::new(needle), - } - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`One::new`] will return - /// a `Some` value. Similarly, when it is false, it is guaranteed that - /// `One::new` will return a `None` value. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - #[cfg(not(target_feature = "sse2"))] - { - false - } - #[cfg(target_feature = "sse2")] - { - #[cfg(target_feature = "avx2")] - { - true - } - #[cfg(not(target_feature = "avx2"))] - { - #[cfg(feature = "std")] - { - std::is_x86_feature_detected!("avx2") - } - #[cfg(not(feature = "std"))] - { - false - } - } - } - } - - /// Return the first occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.find_raw(s, e) - }) - } - } - - /// Return the last occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.rfind_raw(s, e) - }) - } - } - - /// Counts all occurrences of this byte in the given haystack. - #[inline] - pub fn count(&self, haystack: &[u8]) -> usize { - // SAFETY: All of our pointers are derived directly from a borrowed - // slice, which is guaranteed to be valid. - unsafe { - let start = haystack.as_ptr(); - let end = start.add(haystack.len()); - self.count_raw(start, end) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - let len = end.distance(start); - if len < __m256i::BYTES { - return if len < __m128i::BYTES { - // SAFETY: We require the caller to pass valid start/end - // pointers. - generic::fwd_byte_by_byte(start, end, |b| { - b == self.sse2.needle1() - }) - } else { - // SAFETY: We require the caller to pass valid start/end - // pointers. - self.find_raw_sse2(start, end) - }; - } - // SAFETY: Building a `One` means it's safe to call both 'sse2' and - // 'avx2' routines. Also, we've checked that our haystack is big - // enough to run on the vector routine. Pointer validity is caller's - // responsibility. - // - // Note that we could call `self.avx2.find_raw` directly here. But that - // means we'd have to annotate this routine with `target_feature`. - // Which is fine, because this routine is `unsafe` anyway and the - // `target_feature` obligation is met by virtue of building a `One`. - // The real problem is that a routine with a `target_feature` - // annotation generally can't be inlined into caller code unless - // the caller code has the same target feature annotations. Namely, - // the common case (at time of writing) is for calling code to not - // have the `avx2` target feature enabled *at compile time*. Without - // `target_feature` on this routine, it can be inlined which will - // handle some of the short-haystack cases above without touching the - // architecture specific code. - self.find_raw_avx2(start, end) - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - let len = end.distance(start); - if len < __m256i::BYTES { - return if len < __m128i::BYTES { - // SAFETY: We require the caller to pass valid start/end - // pointers. - generic::rev_byte_by_byte(start, end, |b| { - b == self.sse2.needle1() - }) - } else { - // SAFETY: We require the caller to pass valid start/end - // pointers. - self.rfind_raw_sse2(start, end) - }; - } - // SAFETY: Building a `One` means it's safe to call both 'sse2' and - // 'avx2' routines. Also, we've checked that our haystack is big - // enough to run on the vector routine. Pointer validity is caller's - // responsibility. - // - // See note in forward routine above for why we don't just call - // `self.avx2.rfind_raw` directly here. - self.rfind_raw_avx2(start, end) - } - - /// Counts all occurrences of this byte in the given haystack represented - /// by raw pointers. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `0` will always be returned. - #[inline] - pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize { - if start >= end { - return 0; - } - let len = end.distance(start); - if len < __m256i::BYTES { - return if len < __m128i::BYTES { - // SAFETY: We require the caller to pass valid start/end - // pointers. - generic::count_byte_by_byte(start, end, |b| { - b == self.sse2.needle1() - }) - } else { - // SAFETY: We require the caller to pass valid start/end - // pointers. - self.count_raw_sse2(start, end) - }; - } - // SAFETY: Building a `One` means it's safe to call both 'sse2' and - // 'avx2' routines. Also, we've checked that our haystack is big - // enough to run on the vector routine. Pointer validity is caller's - // responsibility. - self.count_raw_avx2(start, end) - } - - /// Execute a search using SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`One::find_raw`], except the distance between `start` and - /// `end` must be at least the size of an SSE2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `One`, which can only be constructed - /// when it is safe to call `sse2`/`avx2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn find_raw_sse2( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.sse2.find_raw(start, end) - } - - /// Execute a search using SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`One::rfind_raw`], except the distance between `start` and - /// `end` must be at least the size of an SSE2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `One`, which can only be constructed - /// when it is safe to call `sse2`/`avx2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn rfind_raw_sse2( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.sse2.rfind_raw(start, end) - } - - /// Execute a count using SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`One::count_raw`], except the distance between `start` and - /// `end` must be at least the size of an SSE2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `One`, which can only be constructed - /// when it is safe to call `sse2`/`avx2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn count_raw_sse2( - &self, - start: *const u8, - end: *const u8, - ) -> usize { - self.sse2.count_raw(start, end) - } - - /// Execute a search using AVX2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`One::find_raw`], except the distance between `start` and - /// `end` must be at least the size of an AVX2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `One`, which can only be constructed - /// when it is safe to call `sse2`/`avx2` routines.) - #[target_feature(enable = "avx2")] - #[inline] - unsafe fn find_raw_avx2( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.avx2.find_raw(start, end) - } - - /// Execute a search using AVX2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`One::rfind_raw`], except the distance between `start` and - /// `end` must be at least the size of an AVX2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `One`, which can only be constructed - /// when it is safe to call `sse2`/`avx2` routines.) - #[target_feature(enable = "avx2")] - #[inline] - unsafe fn rfind_raw_avx2( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.avx2.rfind_raw(start, end) - } - - /// Execute a count using AVX2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`One::count_raw`], except the distance between `start` and - /// `end` must be at least the size of an AVX2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `One`, which can only be constructed - /// when it is safe to call `sse2`/`avx2` routines.) - #[target_feature(enable = "avx2")] - #[inline] - unsafe fn count_raw_avx2( - &self, - start: *const u8, - end: *const u8, - ) -> usize { - self.avx2.count_raw(start, end) - } - - /// Returns an iterator over all occurrences of the needle byte in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - #[inline] - pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> { - OneIter { searcher: self, it: generic::Iter::new(haystack) } - } -} - -/// An iterator over all occurrences of a single byte in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`One::iter`] method. -/// -/// The lifetime parameters are as follows: -/// -/// * `'a` refers to the lifetime of the underlying [`One`] searcher. -/// * `'h` refers to the lifetime of the haystack being searched. -#[derive(Clone, Debug)] -pub struct OneIter<'a, 'h> { - searcher: &'a One, - it: generic::Iter<'h>, -} - -impl<'a, 'h> Iterator for OneIter<'a, 'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'find_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } - } - - #[inline] - fn count(self) -> usize { - self.it.count(|s, e| { - // SAFETY: We rely on our generic iterator to return valid start - // and end pointers. - unsafe { self.searcher.count_raw(s, e) } - }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'rfind_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } - } -} - -impl<'a, 'h> core::iter::FusedIterator for OneIter<'a, 'h> {} - -/// Finds all occurrences of two bytes in a haystack. -/// -/// That is, this reports matches of one of two possible bytes. For example, -/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, -/// `4` and `5`. -#[derive(Clone, Copy, Debug)] -pub struct Two { - /// Used for haystacks less than 32 bytes. - sse2: generic::Two<__m128i>, - /// Used for haystacks bigger than 32 bytes. - avx2: generic::Two<__m256i>, -} - -impl Two { - /// Create a new searcher that finds occurrences of the needle bytes given. - /// - /// This particular searcher is specialized to use AVX2 vector instructions - /// that typically make it quite fast. (SSE2 is used for haystacks that - /// are too short to accommodate an AVX2 vector.) - /// - /// If either SSE2 or AVX2 is unavailable in the current environment, then - /// `None` is returned. - #[inline] - pub fn new(needle1: u8, needle2: u8) -> Option { - if Two::is_available() { - // SAFETY: we check that sse2 and avx2 are available above. - unsafe { Some(Two::new_unchecked(needle1, needle2)) } - } else { - None - } - } - - /// Create a new finder specific to AVX2 vectors and routines without - /// checking that either SSE2 or AVX2 is available. - /// - /// # Safety - /// - /// Callers must guarantee that it is safe to execute both `sse2` and - /// `avx2` instructions in the current environment. - /// - /// Note that it is a common misconception that if one compiles for an - /// `x86_64` target, then they therefore automatically have access to SSE2 - /// instructions. While this is almost always the case, it isn't true in - /// 100% of cases. - #[target_feature(enable = "sse2", enable = "avx2")] - #[inline] - pub unsafe fn new_unchecked(needle1: u8, needle2: u8) -> Two { - Two { - sse2: generic::Two::new(needle1, needle2), - avx2: generic::Two::new(needle1, needle2), - } - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`Two::new`] will return - /// a `Some` value. Similarly, when it is false, it is guaranteed that - /// `Two::new` will return a `None` value. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - #[cfg(not(target_feature = "sse2"))] - { - false - } - #[cfg(target_feature = "sse2")] - { - #[cfg(target_feature = "avx2")] - { - true - } - #[cfg(not(target_feature = "avx2"))] - { - #[cfg(feature = "std")] - { - std::is_x86_feature_detected!("avx2") - } - #[cfg(not(feature = "std"))] - { - false - } - } - } - } - - /// Return the first occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.find_raw(s, e) - }) - } - } - - /// Return the last occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.rfind_raw(s, e) - }) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - let len = end.distance(start); - if len < __m256i::BYTES { - return if len < __m128i::BYTES { - // SAFETY: We require the caller to pass valid start/end - // pointers. - generic::fwd_byte_by_byte(start, end, |b| { - b == self.sse2.needle1() || b == self.sse2.needle2() - }) - } else { - // SAFETY: We require the caller to pass valid start/end - // pointers. - self.find_raw_sse2(start, end) - }; - } - // SAFETY: Building a `Two` means it's safe to call both 'sse2' and - // 'avx2' routines. Also, we've checked that our haystack is big - // enough to run on the vector routine. Pointer validity is caller's - // responsibility. - // - // Note that we could call `self.avx2.find_raw` directly here. But that - // means we'd have to annotate this routine with `target_feature`. - // Which is fine, because this routine is `unsafe` anyway and the - // `target_feature` obligation is met by virtue of building a `Two`. - // The real problem is that a routine with a `target_feature` - // annotation generally can't be inlined into caller code unless - // the caller code has the same target feature annotations. Namely, - // the common case (at time of writing) is for calling code to not - // have the `avx2` target feature enabled *at compile time*. Without - // `target_feature` on this routine, it can be inlined which will - // handle some of the short-haystack cases above without touching the - // architecture specific code. - self.find_raw_avx2(start, end) - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - let len = end.distance(start); - if len < __m256i::BYTES { - return if len < __m128i::BYTES { - // SAFETY: We require the caller to pass valid start/end - // pointers. - generic::rev_byte_by_byte(start, end, |b| { - b == self.sse2.needle1() || b == self.sse2.needle2() - }) - } else { - // SAFETY: We require the caller to pass valid start/end - // pointers. - self.rfind_raw_sse2(start, end) - }; - } - // SAFETY: Building a `Two` means it's safe to call both 'sse2' and - // 'avx2' routines. Also, we've checked that our haystack is big - // enough to run on the vector routine. Pointer validity is caller's - // responsibility. - // - // See note in forward routine above for why we don't just call - // `self.avx2.rfind_raw` directly here. - self.rfind_raw_avx2(start, end) - } - - /// Execute a search using SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Two::find_raw`], except the distance between `start` and - /// `end` must be at least the size of an SSE2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Two`, which can only be constructed - /// when it is safe to call `sse2`/`avx2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn find_raw_sse2( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.sse2.find_raw(start, end) - } - - /// Execute a search using SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Two::rfind_raw`], except the distance between `start` and - /// `end` must be at least the size of an SSE2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Two`, which can only be constructed - /// when it is safe to call `sse2`/`avx2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn rfind_raw_sse2( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.sse2.rfind_raw(start, end) - } - - /// Execute a search using AVX2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Two::find_raw`], except the distance between `start` and - /// `end` must be at least the size of an AVX2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Two`, which can only be constructed - /// when it is safe to call `sse2`/`avx2` routines.) - #[target_feature(enable = "avx2")] - #[inline] - unsafe fn find_raw_avx2( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.avx2.find_raw(start, end) - } - - /// Execute a search using AVX2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Two::rfind_raw`], except the distance between `start` and - /// `end` must be at least the size of an AVX2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Two`, which can only be constructed - /// when it is safe to call `sse2`/`avx2` routines.) - #[target_feature(enable = "avx2")] - #[inline] - unsafe fn rfind_raw_avx2( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.avx2.rfind_raw(start, end) - } - - /// Returns an iterator over all occurrences of the needle bytes in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - #[inline] - pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> { - TwoIter { searcher: self, it: generic::Iter::new(haystack) } - } -} - -/// An iterator over all occurrences of two possible bytes in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`Two::iter`] method. -/// -/// The lifetime parameters are as follows: -/// -/// * `'a` refers to the lifetime of the underlying [`Two`] searcher. -/// * `'h` refers to the lifetime of the haystack being searched. -#[derive(Clone, Debug)] -pub struct TwoIter<'a, 'h> { - searcher: &'a Two, - it: generic::Iter<'h>, -} - -impl<'a, 'h> Iterator for TwoIter<'a, 'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'find_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'rfind_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } - } -} - -impl<'a, 'h> core::iter::FusedIterator for TwoIter<'a, 'h> {} - -/// Finds all occurrences of three bytes in a haystack. -/// -/// That is, this reports matches of one of three possible bytes. For example, -/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets -/// `0`, `2`, `3`, `4` and `5`. -#[derive(Clone, Copy, Debug)] -pub struct Three { - /// Used for haystacks less than 32 bytes. - sse2: generic::Three<__m128i>, - /// Used for haystacks bigger than 32 bytes. - avx2: generic::Three<__m256i>, -} - -impl Three { - /// Create a new searcher that finds occurrences of the needle bytes given. - /// - /// This particular searcher is specialized to use AVX2 vector instructions - /// that typically make it quite fast. (SSE2 is used for haystacks that - /// are too short to accommodate an AVX2 vector.) - /// - /// If either SSE2 or AVX2 is unavailable in the current environment, then - /// `None` is returned. - #[inline] - pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Option { - if Three::is_available() { - // SAFETY: we check that sse2 and avx2 are available above. - unsafe { Some(Three::new_unchecked(needle1, needle2, needle3)) } - } else { - None - } - } - - /// Create a new finder specific to AVX2 vectors and routines without - /// checking that either SSE2 or AVX2 is available. - /// - /// # Safety - /// - /// Callers must guarantee that it is safe to execute both `sse2` and - /// `avx2` instructions in the current environment. - /// - /// Note that it is a common misconception that if one compiles for an - /// `x86_64` target, then they therefore automatically have access to SSE2 - /// instructions. While this is almost always the case, it isn't true in - /// 100% of cases. - #[target_feature(enable = "sse2", enable = "avx2")] - #[inline] - pub unsafe fn new_unchecked( - needle1: u8, - needle2: u8, - needle3: u8, - ) -> Three { - Three { - sse2: generic::Three::new(needle1, needle2, needle3), - avx2: generic::Three::new(needle1, needle2, needle3), - } - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`Three::new`] will return - /// a `Some` value. Similarly, when it is false, it is guaranteed that - /// `Three::new` will return a `None` value. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - #[cfg(not(target_feature = "sse2"))] - { - false - } - #[cfg(target_feature = "sse2")] - { - #[cfg(target_feature = "avx2")] - { - true - } - #[cfg(not(target_feature = "avx2"))] - { - #[cfg(feature = "std")] - { - std::is_x86_feature_detected!("avx2") - } - #[cfg(not(feature = "std"))] - { - false - } - } - } - } - - /// Return the first occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.find_raw(s, e) - }) - } - } - - /// Return the last occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.rfind_raw(s, e) - }) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - let len = end.distance(start); - if len < __m256i::BYTES { - return if len < __m128i::BYTES { - // SAFETY: We require the caller to pass valid start/end - // pointers. - generic::fwd_byte_by_byte(start, end, |b| { - b == self.sse2.needle1() - || b == self.sse2.needle2() - || b == self.sse2.needle3() - }) - } else { - // SAFETY: We require the caller to pass valid start/end - // pointers. - self.find_raw_sse2(start, end) - }; - } - // SAFETY: Building a `Three` means it's safe to call both 'sse2' and - // 'avx2' routines. Also, we've checked that our haystack is big - // enough to run on the vector routine. Pointer validity is caller's - // responsibility. - // - // Note that we could call `self.avx2.find_raw` directly here. But that - // means we'd have to annotate this routine with `target_feature`. - // Which is fine, because this routine is `unsafe` anyway and the - // `target_feature` obligation is met by virtue of building a `Three`. - // The real problem is that a routine with a `target_feature` - // annotation generally can't be inlined into caller code unless - // the caller code has the same target feature annotations. Namely, - // the common case (at time of writing) is for calling code to not - // have the `avx2` target feature enabled *at compile time*. Without - // `target_feature` on this routine, it can be inlined which will - // handle some of the short-haystack cases above without touching the - // architecture specific code. - self.find_raw_avx2(start, end) - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - let len = end.distance(start); - if len < __m256i::BYTES { - return if len < __m128i::BYTES { - // SAFETY: We require the caller to pass valid start/end - // pointers. - generic::rev_byte_by_byte(start, end, |b| { - b == self.sse2.needle1() - || b == self.sse2.needle2() - || b == self.sse2.needle3() - }) - } else { - // SAFETY: We require the caller to pass valid start/end - // pointers. - self.rfind_raw_sse2(start, end) - }; - } - // SAFETY: Building a `Three` means it's safe to call both 'sse2' and - // 'avx2' routines. Also, we've checked that our haystack is big - // enough to run on the vector routine. Pointer validity is caller's - // responsibility. - // - // See note in forward routine above for why we don't just call - // `self.avx2.rfind_raw` directly here. - self.rfind_raw_avx2(start, end) - } - - /// Execute a search using SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Three::find_raw`], except the distance between `start` and - /// `end` must be at least the size of an SSE2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Three`, which can only be constructed - /// when it is safe to call `sse2`/`avx2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn find_raw_sse2( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.sse2.find_raw(start, end) - } - - /// Execute a search using SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Three::rfind_raw`], except the distance between `start` and - /// `end` must be at least the size of an SSE2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Three`, which can only be constructed - /// when it is safe to call `sse2`/`avx2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn rfind_raw_sse2( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.sse2.rfind_raw(start, end) - } - - /// Execute a search using AVX2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Three::find_raw`], except the distance between `start` and - /// `end` must be at least the size of an AVX2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Three`, which can only be constructed - /// when it is safe to call `sse2`/`avx2` routines.) - #[target_feature(enable = "avx2")] - #[inline] - unsafe fn find_raw_avx2( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.avx2.find_raw(start, end) - } - - /// Execute a search using AVX2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Three::rfind_raw`], except the distance between `start` and - /// `end` must be at least the size of an AVX2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Three`, which can only be constructed - /// when it is safe to call `sse2`/`avx2` routines.) - #[target_feature(enable = "avx2")] - #[inline] - unsafe fn rfind_raw_avx2( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.avx2.rfind_raw(start, end) - } - - /// Returns an iterator over all occurrences of the needle bytes in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - #[inline] - pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> { - ThreeIter { searcher: self, it: generic::Iter::new(haystack) } - } -} - -/// An iterator over all occurrences of three possible bytes in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`Three::iter`] method. -/// -/// The lifetime parameters are as follows: -/// -/// * `'a` refers to the lifetime of the underlying [`Three`] searcher. -/// * `'h` refers to the lifetime of the haystack being searched. -#[derive(Clone, Debug)] -pub struct ThreeIter<'a, 'h> { - searcher: &'a Three, - it: generic::Iter<'h>, -} - -impl<'a, 'h> Iterator for ThreeIter<'a, 'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'find_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'rfind_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } - } -} - -impl<'a, 'h> core::iter::FusedIterator for ThreeIter<'a, 'h> {} - -#[cfg(test)] -mod tests { - use super::*; - - define_memchr_quickcheck!(super); - - #[test] - fn forward_one() { - crate::tests::memchr::Runner::new(1).forward_iter( - |haystack, needles| { - Some(One::new(needles[0])?.iter(haystack).collect()) - }, - ) - } - - #[test] - fn reverse_one() { - crate::tests::memchr::Runner::new(1).reverse_iter( - |haystack, needles| { - Some(One::new(needles[0])?.iter(haystack).rev().collect()) - }, - ) - } - - #[test] - fn count_one() { - crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| { - Some(One::new(needles[0])?.iter(haystack).count()) - }) - } - - #[test] - fn forward_two() { - crate::tests::memchr::Runner::new(2).forward_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - Some(Two::new(n1, n2)?.iter(haystack).collect()) - }, - ) - } - - #[test] - fn reverse_two() { - crate::tests::memchr::Runner::new(2).reverse_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - Some(Two::new(n1, n2)?.iter(haystack).rev().collect()) - }, - ) - } - - #[test] - fn forward_three() { - crate::tests::memchr::Runner::new(3).forward_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - let n3 = needles.get(2).copied()?; - Some(Three::new(n1, n2, n3)?.iter(haystack).collect()) - }, - ) - } - - #[test] - fn reverse_three() { - crate::tests::memchr::Runner::new(3).reverse_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - let n3 = needles.get(2).copied()?; - Some(Three::new(n1, n2, n3)?.iter(haystack).rev().collect()) - }, - ) - } -} diff --git a/vendor/memchr/src/arch/x86_64/avx2/mod.rs b/vendor/memchr/src/arch/x86_64/avx2/mod.rs deleted file mode 100644 index ee4097d6f4c34d..00000000000000 --- a/vendor/memchr/src/arch/x86_64/avx2/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -/*! -Algorithms for the `x86_64` target using 256-bit vectors via AVX2. -*/ - -pub mod memchr; -pub mod packedpair; diff --git a/vendor/memchr/src/arch/x86_64/avx2/packedpair.rs b/vendor/memchr/src/arch/x86_64/avx2/packedpair.rs deleted file mode 100644 index efae7b66c72c5b..00000000000000 --- a/vendor/memchr/src/arch/x86_64/avx2/packedpair.rs +++ /dev/null @@ -1,272 +0,0 @@ -/*! -A 256-bit vector implementation of the "packed pair" SIMD algorithm. - -The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main -difference is that it (by default) uses a background distribution of byte -frequencies to heuristically select the pair of bytes to search for. - -[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last -*/ - -use core::arch::x86_64::{__m128i, __m256i}; - -use crate::arch::{all::packedpair::Pair, generic::packedpair}; - -/// A "packed pair" finder that uses 256-bit vector operations. -/// -/// This finder picks two bytes that it believes have high predictive power -/// for indicating an overall match of a needle. Depending on whether -/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets -/// where the needle matches or could match. In the prefilter case, candidates -/// are reported whenever the [`Pair`] of bytes given matches. -#[derive(Clone, Copy, Debug)] -pub struct Finder { - sse2: packedpair::Finder<__m128i>, - avx2: packedpair::Finder<__m256i>, -} - -impl Finder { - /// Create a new pair searcher. The searcher returned can either report - /// exact matches of `needle` or act as a prefilter and report candidate - /// positions of `needle`. - /// - /// If AVX2 is unavailable in the current environment or if a [`Pair`] - /// could not be constructed from the needle given, then `None` is - /// returned. - #[inline] - pub fn new(needle: &[u8]) -> Option { - Finder::with_pair(needle, Pair::new(needle)?) - } - - /// Create a new "packed pair" finder using the pair of bytes given. - /// - /// This constructor permits callers to control precisely which pair of - /// bytes is used as a predicate. - /// - /// If AVX2 is unavailable in the current environment, then `None` is - /// returned. - #[inline] - pub fn with_pair(needle: &[u8], pair: Pair) -> Option { - if Finder::is_available() { - // SAFETY: we check that sse2/avx2 is available above. We are also - // guaranteed to have needle.len() > 1 because we have a valid - // Pair. - unsafe { Some(Finder::with_pair_impl(needle, pair)) } - } else { - None - } - } - - /// Create a new `Finder` specific to SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as the safety for `packedpair::Finder::new`, and callers must also - /// ensure that both SSE2 and AVX2 are available. - #[target_feature(enable = "sse2", enable = "avx2")] - #[inline] - unsafe fn with_pair_impl(needle: &[u8], pair: Pair) -> Finder { - let sse2 = packedpair::Finder::<__m128i>::new(needle, pair); - let avx2 = packedpair::Finder::<__m256i>::new(needle, pair); - Finder { sse2, avx2 } - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`Finder::with_pair`] will - /// return a `Some` value. Similarly, when it is false, it is guaranteed - /// that `Finder::with_pair` will return a `None` value. Notice that this - /// does not guarantee that [`Finder::new`] will return a `Finder`. Namely, - /// even when `Finder::is_available` is true, it is not guaranteed that a - /// valid [`Pair`] can be found from the needle given. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - #[cfg(not(target_feature = "sse2"))] - { - false - } - #[cfg(target_feature = "sse2")] - { - #[cfg(target_feature = "avx2")] - { - true - } - #[cfg(not(target_feature = "avx2"))] - { - #[cfg(feature = "std")] - { - std::is_x86_feature_detected!("avx2") - } - #[cfg(not(feature = "std"))] - { - false - } - } - } - } - - /// Execute a search using AVX2 vectors and routines. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - #[inline] - pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option { - // SAFETY: Building a `Finder` means it's safe to call 'sse2' routines. - unsafe { self.find_impl(haystack, needle) } - } - - /// Run this finder on the given haystack as a prefilter. - /// - /// If a candidate match is found, then an offset where the needle *could* - /// begin in the haystack is returned. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - #[inline] - pub fn find_prefilter(&self, haystack: &[u8]) -> Option { - // SAFETY: Building a `Finder` means it's safe to call 'sse2' routines. - unsafe { self.find_prefilter_impl(haystack) } - } - - /// Execute a search using AVX2 vectors and routines. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - /// - /// # Safety - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Finder`, which can only be constructed - /// when it is safe to call `sse2` and `avx2` routines.) - #[target_feature(enable = "sse2", enable = "avx2")] - #[inline] - unsafe fn find_impl( - &self, - haystack: &[u8], - needle: &[u8], - ) -> Option { - if haystack.len() < self.avx2.min_haystack_len() { - self.sse2.find(haystack, needle) - } else { - self.avx2.find(haystack, needle) - } - } - - /// Execute a prefilter search using AVX2 vectors and routines. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - /// - /// # Safety - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Finder`, which can only be constructed - /// when it is safe to call `sse2` and `avx2` routines.) - #[target_feature(enable = "sse2", enable = "avx2")] - #[inline] - unsafe fn find_prefilter_impl(&self, haystack: &[u8]) -> Option { - if haystack.len() < self.avx2.min_haystack_len() { - self.sse2.find_prefilter(haystack) - } else { - self.avx2.find_prefilter(haystack) - } - } - - /// Returns the pair of offsets (into the needle) used to check as a - /// predicate before confirming whether a needle exists at a particular - /// position. - #[inline] - pub fn pair(&self) -> &Pair { - self.avx2.pair() - } - - /// Returns the minimum haystack length that this `Finder` can search. - /// - /// Using a haystack with length smaller than this in a search will result - /// in a panic. The reason for this restriction is that this finder is - /// meant to be a low-level component that is part of a larger substring - /// strategy. In that sense, it avoids trying to handle all cases and - /// instead only handles the cases that it can handle very well. - #[inline] - pub fn min_haystack_len(&self) -> usize { - // The caller doesn't need to care about AVX2's min_haystack_len - // since this implementation will automatically switch to the SSE2 - // implementation if the haystack is too short for AVX2. Therefore, the - // caller only needs to care about SSE2's min_haystack_len. - // - // This does assume that SSE2's min_haystack_len is less than or - // equal to AVX2's min_haystack_len. In practice, this is true and - // there is no way it could be false based on how this Finder is - // implemented. Namely, both SSE2 and AVX2 use the same `Pair`. If - // they used different pairs, then it's possible (although perhaps - // pathological) for SSE2's min_haystack_len to be bigger than AVX2's. - self.sse2.min_haystack_len() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn find(haystack: &[u8], needle: &[u8]) -> Option> { - let f = Finder::new(needle)?; - if haystack.len() < f.min_haystack_len() { - return None; - } - Some(f.find(haystack, needle)) - } - - define_substring_forward_quickcheck!(find); - - #[test] - fn forward_substring() { - crate::tests::substring::Runner::new().fwd(find).run() - } - - #[test] - fn forward_packedpair() { - fn find( - haystack: &[u8], - needle: &[u8], - index1: u8, - index2: u8, - ) -> Option> { - let pair = Pair::with_indices(needle, index1, index2)?; - let f = Finder::with_pair(needle, pair)?; - if haystack.len() < f.min_haystack_len() { - return None; - } - Some(f.find(haystack, needle)) - } - crate::tests::packedpair::Runner::new().fwd(find).run() - } - - #[test] - fn forward_packedpair_prefilter() { - fn find( - haystack: &[u8], - needle: &[u8], - index1: u8, - index2: u8, - ) -> Option> { - if !cfg!(target_feature = "sse2") { - return None; - } - let pair = Pair::with_indices(needle, index1, index2)?; - let f = Finder::with_pair(needle, pair)?; - if haystack.len() < f.min_haystack_len() { - return None; - } - Some(f.find_prefilter(haystack)) - } - crate::tests::packedpair::Runner::new().fwd(find).run() - } -} diff --git a/vendor/memchr/src/arch/x86_64/memchr.rs b/vendor/memchr/src/arch/x86_64/memchr.rs deleted file mode 100644 index edb6d431d97304..00000000000000 --- a/vendor/memchr/src/arch/x86_64/memchr.rs +++ /dev/null @@ -1,335 +0,0 @@ -/*! -Wrapper routines for `memchr` and friends. - -These routines efficiently dispatch to the best implementation based on what -the CPU supports. -*/ - -/// Provides a way to run a memchr-like function while amortizing the cost of -/// runtime CPU feature detection. -/// -/// This works by loading a function pointer from an atomic global. Initially, -/// this global is set to a function that does CPU feature detection. For -/// example, if AVX2 is enabled, then the AVX2 implementation is used. -/// Otherwise, at least on x86_64, the SSE2 implementation is used. (And -/// in some niche cases, if SSE2 isn't available, then the architecture -/// independent fallback implementation is used.) -/// -/// After the first call to this function, the atomic global is replaced with -/// the specific AVX2, SSE2 or fallback routine chosen. Subsequent calls then -/// will directly call the chosen routine instead of needing to go through the -/// CPU feature detection branching again. -/// -/// This particular macro is specifically written to provide the implementation -/// of functions with the following signature: -/// -/// ```ignore -/// fn memchr(needle1: u8, start: *const u8, end: *const u8) -> Option; -/// ``` -/// -/// Where you can also have `memchr2` and `memchr3`, but with `needle2` and -/// `needle3`, respectively. The `start` and `end` parameters correspond to the -/// start and end of the haystack, respectively. -/// -/// We use raw pointers here instead of the more obvious `haystack: &[u8]` so -/// that the function is compatible with our lower level iterator logic that -/// operates on raw pointers. We use this macro to implement "raw" memchr -/// routines with the signature above, and then define memchr routines using -/// regular slices on top of them. -/// -/// Note that we use `#[cfg(target_feature = "sse2")]` below even though -/// it shouldn't be strictly necessary because without it, it seems to -/// cause the compiler to blow up. I guess it can't handle a function -/// pointer being created with a sse target feature? Dunno. See the -/// `build-for-x86-64-but-non-sse-target` CI job if you want to experiment with -/// this. -/// -/// # Safety -/// -/// Primarily callers must ensure that `$fnty` is a correct function pointer -/// type and not something else. -/// -/// Callers must also ensure that `$memchrty::$memchrfind` corresponds to a -/// routine that returns a valid function pointer when a match is found. That -/// is, a pointer that is `>= start` and `< end`. -/// -/// Callers must also ensure that the `$hay_start` and `$hay_end` identifiers -/// correspond to valid pointers. -macro_rules! unsafe_ifunc { - ( - $memchrty:ident, - $memchrfind:ident, - $fnty:ty, - $retty:ty, - $hay_start:ident, - $hay_end:ident, - $($needle:ident),+ - ) => {{ - #![allow(unused_unsafe)] - - use core::sync::atomic::{AtomicPtr, Ordering}; - - type Fn = *mut (); - type RealFn = $fnty; - static FN: AtomicPtr<()> = AtomicPtr::new(detect as Fn); - - #[cfg(target_feature = "sse2")] - #[target_feature(enable = "sse2", enable = "avx2")] - unsafe fn find_avx2( - $($needle: u8),+, - $hay_start: *const u8, - $hay_end: *const u8, - ) -> $retty { - use crate::arch::x86_64::avx2::memchr::$memchrty; - $memchrty::new_unchecked($($needle),+) - .$memchrfind($hay_start, $hay_end) - } - - #[cfg(target_feature = "sse2")] - #[target_feature(enable = "sse2")] - unsafe fn find_sse2( - $($needle: u8),+, - $hay_start: *const u8, - $hay_end: *const u8, - ) -> $retty { - use crate::arch::x86_64::sse2::memchr::$memchrty; - $memchrty::new_unchecked($($needle),+) - .$memchrfind($hay_start, $hay_end) - } - - unsafe fn find_fallback( - $($needle: u8),+, - $hay_start: *const u8, - $hay_end: *const u8, - ) -> $retty { - use crate::arch::all::memchr::$memchrty; - $memchrty::new($($needle),+).$memchrfind($hay_start, $hay_end) - } - - unsafe fn detect( - $($needle: u8),+, - $hay_start: *const u8, - $hay_end: *const u8, - ) -> $retty { - let fun = { - #[cfg(not(target_feature = "sse2"))] - { - debug!( - "no sse2 feature available, using fallback for {}", - stringify!($memchrty), - ); - find_fallback as RealFn - } - #[cfg(target_feature = "sse2")] - { - use crate::arch::x86_64::{sse2, avx2}; - if avx2::memchr::$memchrty::is_available() { - debug!("chose AVX2 for {}", stringify!($memchrty)); - find_avx2 as RealFn - } else if sse2::memchr::$memchrty::is_available() { - debug!("chose SSE2 for {}", stringify!($memchrty)); - find_sse2 as RealFn - } else { - debug!("chose fallback for {}", stringify!($memchrty)); - find_fallback as RealFn - } - } - }; - FN.store(fun as Fn, Ordering::Relaxed); - // SAFETY: The only thing we need to uphold here is the - // `#[target_feature]` requirements. Since we check is_available - // above before using the corresponding implementation, we are - // guaranteed to only call code that is supported on the current - // CPU. - fun($($needle),+, $hay_start, $hay_end) - } - - // SAFETY: By virtue of the caller contract, RealFn is a function - // pointer, which is always safe to transmute with a *mut (). Also, - // since we use $memchrty::is_available, it is guaranteed to be safe - // to call $memchrty::$memchrfind. - unsafe { - let fun = FN.load(Ordering::Relaxed); - core::mem::transmute::(fun)( - $($needle),+, - $hay_start, - $hay_end, - ) - } - }}; -} - -// The routines below dispatch to AVX2, SSE2 or a fallback routine based on -// what's available in the current environment. The secret sauce here is that -// we only check for which one to use approximately once, and then "cache" that -// choice into a global function pointer. Subsequent invocations then just call -// the appropriate function directly. - -/// memchr, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `One::find_raw`. -#[inline(always)] -pub(crate) fn memchr_raw( - n1: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - // SAFETY: We provide a valid function pointer type. - unsafe_ifunc!( - One, - find_raw, - unsafe fn(u8, *const u8, *const u8) -> Option<*const u8>, - Option<*const u8>, - start, - end, - n1 - ) -} - -/// memrchr, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `One::rfind_raw`. -#[inline(always)] -pub(crate) fn memrchr_raw( - n1: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - // SAFETY: We provide a valid function pointer type. - unsafe_ifunc!( - One, - rfind_raw, - unsafe fn(u8, *const u8, *const u8) -> Option<*const u8>, - Option<*const u8>, - start, - end, - n1 - ) -} - -/// memchr2, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Two::find_raw`. -#[inline(always)] -pub(crate) fn memchr2_raw( - n1: u8, - n2: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - // SAFETY: We provide a valid function pointer type. - unsafe_ifunc!( - Two, - find_raw, - unsafe fn(u8, u8, *const u8, *const u8) -> Option<*const u8>, - Option<*const u8>, - start, - end, - n1, - n2 - ) -} - -/// memrchr2, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Two::rfind_raw`. -#[inline(always)] -pub(crate) fn memrchr2_raw( - n1: u8, - n2: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - // SAFETY: We provide a valid function pointer type. - unsafe_ifunc!( - Two, - rfind_raw, - unsafe fn(u8, u8, *const u8, *const u8) -> Option<*const u8>, - Option<*const u8>, - start, - end, - n1, - n2 - ) -} - -/// memchr3, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Three::find_raw`. -#[inline(always)] -pub(crate) fn memchr3_raw( - n1: u8, - n2: u8, - n3: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - // SAFETY: We provide a valid function pointer type. - unsafe_ifunc!( - Three, - find_raw, - unsafe fn(u8, u8, u8, *const u8, *const u8) -> Option<*const u8>, - Option<*const u8>, - start, - end, - n1, - n2, - n3 - ) -} - -/// memrchr3, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Three::rfind_raw`. -#[inline(always)] -pub(crate) fn memrchr3_raw( - n1: u8, - n2: u8, - n3: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - // SAFETY: We provide a valid function pointer type. - unsafe_ifunc!( - Three, - rfind_raw, - unsafe fn(u8, u8, u8, *const u8, *const u8) -> Option<*const u8>, - Option<*const u8>, - start, - end, - n1, - n2, - n3 - ) -} - -/// Count all matching bytes, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `One::count_raw`. -#[inline(always)] -pub(crate) fn count_raw(n1: u8, start: *const u8, end: *const u8) -> usize { - // SAFETY: We provide a valid function pointer type. - unsafe_ifunc!( - One, - count_raw, - unsafe fn(u8, *const u8, *const u8) -> usize, - usize, - start, - end, - n1 - ) -} diff --git a/vendor/memchr/src/arch/x86_64/mod.rs b/vendor/memchr/src/arch/x86_64/mod.rs deleted file mode 100644 index 5dad7218216b79..00000000000000 --- a/vendor/memchr/src/arch/x86_64/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -/*! -Vector algorithms for the `x86_64` target. -*/ - -pub mod avx2; -pub mod sse2; - -pub(crate) mod memchr; diff --git a/vendor/memchr/src/arch/x86_64/sse2/memchr.rs b/vendor/memchr/src/arch/x86_64/sse2/memchr.rs deleted file mode 100644 index 79572b82b1c618..00000000000000 --- a/vendor/memchr/src/arch/x86_64/sse2/memchr.rs +++ /dev/null @@ -1,1077 +0,0 @@ -/*! -This module defines 128-bit vector implementations of `memchr` and friends. - -The main types in this module are [`One`], [`Two`] and [`Three`]. They are for -searching for one, two or three distinct bytes, respectively, in a haystack. -Each type also has corresponding double ended iterators. These searchers are -typically much faster than scalar routines accomplishing the same task. - -The `One` searcher also provides a [`One::count`] routine for efficiently -counting the number of times a single byte occurs in a haystack. This is -useful, for example, for counting the number of lines in a haystack. This -routine exists because it is usually faster, especially with a high match -count, than using [`One::find`] repeatedly. ([`OneIter`] specializes its -`Iterator::count` implementation to use this routine.) - -Only one, two and three bytes are supported because three bytes is about -the point where one sees diminishing returns. Beyond this point and it's -probably (but not necessarily) better to just use a simple `[bool; 256]` array -or similar. However, it depends mightily on the specific work-load and the -expected match frequency. -*/ - -use core::arch::x86_64::__m128i; - -use crate::{arch::generic::memchr as generic, ext::Pointer, vector::Vector}; - -/// Finds all occurrences of a single byte in a haystack. -#[derive(Clone, Copy, Debug)] -pub struct One(generic::One<__m128i>); - -impl One { - /// Create a new searcher that finds occurrences of the needle byte given. - /// - /// This particular searcher is specialized to use SSE2 vector instructions - /// that typically make it quite fast. - /// - /// If SSE2 is unavailable in the current environment, then `None` is - /// returned. - #[inline] - pub fn new(needle: u8) -> Option { - if One::is_available() { - // SAFETY: we check that sse2 is available above. - unsafe { Some(One::new_unchecked(needle)) } - } else { - None - } - } - - /// Create a new finder specific to SSE2 vectors and routines without - /// checking that SSE2 is available. - /// - /// # Safety - /// - /// Callers must guarantee that it is safe to execute `sse2` instructions - /// in the current environment. - /// - /// Note that it is a common misconception that if one compiles for an - /// `x86_64` target, then they therefore automatically have access to SSE2 - /// instructions. While this is almost always the case, it isn't true in - /// 100% of cases. - #[target_feature(enable = "sse2")] - #[inline] - pub unsafe fn new_unchecked(needle: u8) -> One { - One(generic::One::new(needle)) - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`One::new`] will return - /// a `Some` value. Similarly, when it is false, it is guaranteed that - /// `One::new` will return a `None` value. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - #[cfg(target_feature = "sse2")] - { - true - } - #[cfg(not(target_feature = "sse2"))] - { - false - } - } - - /// Return the first occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.find_raw(s, e) - }) - } - } - - /// Return the last occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8]) -> Option { - // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.rfind_raw(s, e) - }) - } - } - - /// Counts all occurrences of this byte in the given haystack. - #[inline] - pub fn count(&self, haystack: &[u8]) -> usize { - // SAFETY: All of our pointers are derived directly from a borrowed - // slice, which is guaranteed to be valid. - unsafe { - let start = haystack.as_ptr(); - let end = start.add(haystack.len()); - self.count_raw(start, end) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < __m128i::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::fwd_byte_by_byte(start, end, |b| { - b == self.0.needle1() - }); - } - // SAFETY: Building a `One` means it's safe to call 'sse2' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - // - // Note that we could call `self.0.find_raw` directly here. But that - // means we'd have to annotate this routine with `target_feature`. - // Which is fine, because this routine is `unsafe` anyway and the - // `target_feature` obligation is met by virtue of building a `One`. - // The real problem is that a routine with a `target_feature` - // annotation generally can't be inlined into caller code unless the - // caller code has the same target feature annotations. Which is maybe - // okay for SSE2, but we do the same thing for AVX2 where caller code - // probably usually doesn't have AVX2 enabled. That means that this - // routine can be inlined which will handle some of the short-haystack - // cases above without touching the architecture specific code. - self.find_raw_impl(start, end) - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < __m128i::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::rev_byte_by_byte(start, end, |b| { - b == self.0.needle1() - }); - } - // SAFETY: Building a `One` means it's safe to call 'sse2' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - // - // See note in forward routine above for why we don't just call - // `self.0.rfind_raw` directly here. - self.rfind_raw_impl(start, end) - } - - /// Counts all occurrences of this byte in the given haystack represented - /// by raw pointers. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `0` will always be returned. - #[inline] - pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize { - if start >= end { - return 0; - } - if end.distance(start) < __m128i::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::count_byte_by_byte(start, end, |b| { - b == self.0.needle1() - }); - } - // SAFETY: Building a `One` means it's safe to call 'sse2' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - self.count_raw_impl(start, end) - } - - /// Execute a search using SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`One::find_raw`], except the distance between `start` and - /// `end` must be at least the size of an SSE2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `One`, which can only be constructed - /// when it is safe to call `sse2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn find_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.find_raw(start, end) - } - - /// Execute a search using SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`One::rfind_raw`], except the distance between `start` and - /// `end` must be at least the size of an SSE2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `One`, which can only be constructed - /// when it is safe to call `sse2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn rfind_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.rfind_raw(start, end) - } - - /// Execute a count using SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`One::count_raw`], except the distance between `start` and - /// `end` must be at least the size of an SSE2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `One`, which can only be constructed - /// when it is safe to call `sse2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn count_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> usize { - self.0.count_raw(start, end) - } - - /// Returns an iterator over all occurrences of the needle byte in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - #[inline] - pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> { - OneIter { searcher: self, it: generic::Iter::new(haystack) } - } -} - -/// An iterator over all occurrences of a single byte in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`One::iter`] method. -/// -/// The lifetime parameters are as follows: -/// -/// * `'a` refers to the lifetime of the underlying [`One`] searcher. -/// * `'h` refers to the lifetime of the haystack being searched. -#[derive(Clone, Debug)] -pub struct OneIter<'a, 'h> { - searcher: &'a One, - it: generic::Iter<'h>, -} - -impl<'a, 'h> Iterator for OneIter<'a, 'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'find_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } - } - - #[inline] - fn count(self) -> usize { - self.it.count(|s, e| { - // SAFETY: We rely on our generic iterator to return valid start - // and end pointers. - unsafe { self.searcher.count_raw(s, e) } - }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'rfind_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } - } -} - -impl<'a, 'h> core::iter::FusedIterator for OneIter<'a, 'h> {} - -/// Finds all occurrences of two bytes in a haystack. -/// -/// That is, this reports matches of one of two possible bytes. For example, -/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, -/// `4` and `5`. -#[derive(Clone, Copy, Debug)] -pub struct Two(generic::Two<__m128i>); - -impl Two { - /// Create a new searcher that finds occurrences of the needle bytes given. - /// - /// This particular searcher is specialized to use SSE2 vector instructions - /// that typically make it quite fast. - /// - /// If SSE2 is unavailable in the current environment, then `None` is - /// returned. - #[inline] - pub fn new(needle1: u8, needle2: u8) -> Option { - if Two::is_available() { - // SAFETY: we check that sse2 is available above. - unsafe { Some(Two::new_unchecked(needle1, needle2)) } - } else { - None - } - } - - /// Create a new finder specific to SSE2 vectors and routines without - /// checking that SSE2 is available. - /// - /// # Safety - /// - /// Callers must guarantee that it is safe to execute `sse2` instructions - /// in the current environment. - /// - /// Note that it is a common misconception that if one compiles for an - /// `x86_64` target, then they therefore automatically have access to SSE2 - /// instructions. While this is almost always the case, it isn't true in - /// 100% of cases. - #[target_feature(enable = "sse2")] - #[inline] - pub unsafe fn new_unchecked(needle1: u8, needle2: u8) -> Two { - Two(generic::Two::new(needle1, needle2)) - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`Two::new`] will return - /// a `Some` value. Similarly, when it is false, it is guaranteed that - /// `Two::new` will return a `None` value. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - #[cfg(target_feature = "sse2")] - { - true - } - #[cfg(not(target_feature = "sse2"))] - { - false - } - } - - /// Return the first occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.find_raw(s, e) - }) - } - } - - /// Return the last occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8]) -> Option { - // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.rfind_raw(s, e) - }) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < __m128i::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::fwd_byte_by_byte(start, end, |b| { - b == self.0.needle1() || b == self.0.needle2() - }); - } - // SAFETY: Building a `Two` means it's safe to call 'sse2' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - // - // Note that we could call `self.0.find_raw` directly here. But that - // means we'd have to annotate this routine with `target_feature`. - // Which is fine, because this routine is `unsafe` anyway and the - // `target_feature` obligation is met by virtue of building a `Two`. - // The real problem is that a routine with a `target_feature` - // annotation generally can't be inlined into caller code unless the - // caller code has the same target feature annotations. Which is maybe - // okay for SSE2, but we do the same thing for AVX2 where caller code - // probably usually doesn't have AVX2 enabled. That means that this - // routine can be inlined which will handle some of the short-haystack - // cases above without touching the architecture specific code. - self.find_raw_impl(start, end) - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < __m128i::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::rev_byte_by_byte(start, end, |b| { - b == self.0.needle1() || b == self.0.needle2() - }); - } - // SAFETY: Building a `Two` means it's safe to call 'sse2' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - // - // See note in forward routine above for why we don't just call - // `self.0.rfind_raw` directly here. - self.rfind_raw_impl(start, end) - } - - /// Execute a search using SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Two::find_raw`], except the distance between `start` and - /// `end` must be at least the size of an SSE2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Two`, which can only be constructed - /// when it is safe to call `sse2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn find_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.find_raw(start, end) - } - - /// Execute a search using SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Two::rfind_raw`], except the distance between `start` and - /// `end` must be at least the size of an SSE2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Two`, which can only be constructed - /// when it is safe to call `sse2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn rfind_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.rfind_raw(start, end) - } - - /// Returns an iterator over all occurrences of the needle bytes in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - #[inline] - pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> { - TwoIter { searcher: self, it: generic::Iter::new(haystack) } - } -} - -/// An iterator over all occurrences of two possible bytes in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`Two::iter`] method. -/// -/// The lifetime parameters are as follows: -/// -/// * `'a` refers to the lifetime of the underlying [`Two`] searcher. -/// * `'h` refers to the lifetime of the haystack being searched. -#[derive(Clone, Debug)] -pub struct TwoIter<'a, 'h> { - searcher: &'a Two, - it: generic::Iter<'h>, -} - -impl<'a, 'h> Iterator for TwoIter<'a, 'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'find_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'rfind_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } - } -} - -impl<'a, 'h> core::iter::FusedIterator for TwoIter<'a, 'h> {} - -/// Finds all occurrences of three bytes in a haystack. -/// -/// That is, this reports matches of one of three possible bytes. For example, -/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets -/// `0`, `2`, `3`, `4` and `5`. -#[derive(Clone, Copy, Debug)] -pub struct Three(generic::Three<__m128i>); - -impl Three { - /// Create a new searcher that finds occurrences of the needle bytes given. - /// - /// This particular searcher is specialized to use SSE2 vector instructions - /// that typically make it quite fast. - /// - /// If SSE2 is unavailable in the current environment, then `None` is - /// returned. - #[inline] - pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Option { - if Three::is_available() { - // SAFETY: we check that sse2 is available above. - unsafe { Some(Three::new_unchecked(needle1, needle2, needle3)) } - } else { - None - } - } - - /// Create a new finder specific to SSE2 vectors and routines without - /// checking that SSE2 is available. - /// - /// # Safety - /// - /// Callers must guarantee that it is safe to execute `sse2` instructions - /// in the current environment. - /// - /// Note that it is a common misconception that if one compiles for an - /// `x86_64` target, then they therefore automatically have access to SSE2 - /// instructions. While this is almost always the case, it isn't true in - /// 100% of cases. - #[target_feature(enable = "sse2")] - #[inline] - pub unsafe fn new_unchecked( - needle1: u8, - needle2: u8, - needle3: u8, - ) -> Three { - Three(generic::Three::new(needle1, needle2, needle3)) - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`Three::new`] will return - /// a `Some` value. Similarly, when it is false, it is guaranteed that - /// `Three::new` will return a `None` value. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - #[cfg(target_feature = "sse2")] - { - true - } - #[cfg(not(target_feature = "sse2"))] - { - false - } - } - - /// Return the first occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: `find_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.find_raw(s, e) - }) - } - } - - /// Return the last occurrence of one of the needle bytes in the given - /// haystack. If no such occurrence exists, then `None` is returned. - /// - /// The occurrence is reported as an offset into `haystack`. Its maximum - /// value is `haystack.len() - 1`. - #[inline] - pub fn rfind(&self, haystack: &[u8]) -> Option { - // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it - // falls within the bounds of the start and end pointers. - unsafe { - generic::search_slice_with_raw(haystack, |s, e| { - self.rfind_raw(s, e) - }) - } - } - - /// Like `find`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn find_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < __m128i::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::fwd_byte_by_byte(start, end, |b| { - b == self.0.needle1() - || b == self.0.needle2() - || b == self.0.needle3() - }); - } - // SAFETY: Building a `Three` means it's safe to call 'sse2' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - // - // Note that we could call `self.0.find_raw` directly here. But that - // means we'd have to annotate this routine with `target_feature`. - // Which is fine, because this routine is `unsafe` anyway and the - // `target_feature` obligation is met by virtue of building a `Three`. - // The real problem is that a routine with a `target_feature` - // annotation generally can't be inlined into caller code unless the - // caller code has the same target feature annotations. Which is maybe - // okay for SSE2, but we do the same thing for AVX2 where caller code - // probably usually doesn't have AVX2 enabled. That means that this - // routine can be inlined which will handle some of the short-haystack - // cases above without touching the architecture specific code. - self.find_raw_impl(start, end) - } - - /// Like `rfind`, but accepts and returns raw pointers. - /// - /// When a match is found, the pointer returned is guaranteed to be - /// `>= start` and `< end`. - /// - /// This routine is useful if you're already using raw pointers and would - /// like to avoid converting back to a slice before executing a search. - /// - /// # Safety - /// - /// * Both `start` and `end` must be valid for reads. - /// * Both `start` and `end` must point to an initialized value. - /// * Both `start` and `end` must point to the same allocated object and - /// must either be in bounds or at most one byte past the end of the - /// allocated object. - /// * Both `start` and `end` must be _derived from_ a pointer to the same - /// object. - /// * The distance between `start` and `end` must not overflow `isize`. - /// * The distance being in bounds must not rely on "wrapping around" the - /// address space. - /// - /// Note that callers may pass a pair of pointers such that `start >= end`. - /// In that case, `None` will always be returned. - #[inline] - pub unsafe fn rfind_raw( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - if start >= end { - return None; - } - if end.distance(start) < __m128i::BYTES { - // SAFETY: We require the caller to pass valid start/end pointers. - return generic::rev_byte_by_byte(start, end, |b| { - b == self.0.needle1() - || b == self.0.needle2() - || b == self.0.needle3() - }); - } - // SAFETY: Building a `Three` means it's safe to call 'sse2' routines. - // Also, we've checked that our haystack is big enough to run on the - // vector routine. Pointer validity is caller's responsibility. - // - // See note in forward routine above for why we don't just call - // `self.0.rfind_raw` directly here. - self.rfind_raw_impl(start, end) - } - - /// Execute a search using SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Three::find_raw`], except the distance between `start` and - /// `end` must be at least the size of an SSE2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Three`, which can only be constructed - /// when it is safe to call `sse2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn find_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.find_raw(start, end) - } - - /// Execute a search using SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as [`Three::rfind_raw`], except the distance between `start` and - /// `end` must be at least the size of an SSE2 vector (in bytes). - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Three`, which can only be constructed - /// when it is safe to call `sse2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn rfind_raw_impl( - &self, - start: *const u8, - end: *const u8, - ) -> Option<*const u8> { - self.0.rfind_raw(start, end) - } - - /// Returns an iterator over all occurrences of the needle byte in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - #[inline] - pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> { - ThreeIter { searcher: self, it: generic::Iter::new(haystack) } - } -} - -/// An iterator over all occurrences of three possible bytes in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`Three::iter`] method. -/// -/// The lifetime parameters are as follows: -/// -/// * `'a` refers to the lifetime of the underlying [`Three`] searcher. -/// * `'h` refers to the lifetime of the haystack being searched. -#[derive(Clone, Debug)] -pub struct ThreeIter<'a, 'h> { - searcher: &'a Three, - it: generic::Iter<'h>, -} - -impl<'a, 'h> Iterator for ThreeIter<'a, 'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'find_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: We rely on the generic iterator to provide valid start - // and end pointers, but we guarantee that any pointer returned by - // 'rfind_raw' falls within the bounds of the start and end pointer. - unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } - } -} - -impl<'a, 'h> core::iter::FusedIterator for ThreeIter<'a, 'h> {} - -#[cfg(test)] -mod tests { - use super::*; - - define_memchr_quickcheck!(super); - - #[test] - fn forward_one() { - crate::tests::memchr::Runner::new(1).forward_iter( - |haystack, needles| { - Some(One::new(needles[0])?.iter(haystack).collect()) - }, - ) - } - - #[test] - fn reverse_one() { - crate::tests::memchr::Runner::new(1).reverse_iter( - |haystack, needles| { - Some(One::new(needles[0])?.iter(haystack).rev().collect()) - }, - ) - } - - #[test] - fn count_one() { - crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| { - Some(One::new(needles[0])?.iter(haystack).count()) - }) - } - - #[test] - fn forward_two() { - crate::tests::memchr::Runner::new(2).forward_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - Some(Two::new(n1, n2)?.iter(haystack).collect()) - }, - ) - } - - #[test] - fn reverse_two() { - crate::tests::memchr::Runner::new(2).reverse_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - Some(Two::new(n1, n2)?.iter(haystack).rev().collect()) - }, - ) - } - - #[test] - fn forward_three() { - crate::tests::memchr::Runner::new(3).forward_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - let n3 = needles.get(2).copied()?; - Some(Three::new(n1, n2, n3)?.iter(haystack).collect()) - }, - ) - } - - #[test] - fn reverse_three() { - crate::tests::memchr::Runner::new(3).reverse_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - let n3 = needles.get(2).copied()?; - Some(Three::new(n1, n2, n3)?.iter(haystack).rev().collect()) - }, - ) - } -} diff --git a/vendor/memchr/src/arch/x86_64/sse2/mod.rs b/vendor/memchr/src/arch/x86_64/sse2/mod.rs deleted file mode 100644 index bcb830790fbbaa..00000000000000 --- a/vendor/memchr/src/arch/x86_64/sse2/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -/*! -Algorithms for the `x86_64` target using 128-bit vectors via SSE2. -*/ - -pub mod memchr; -pub mod packedpair; diff --git a/vendor/memchr/src/arch/x86_64/sse2/packedpair.rs b/vendor/memchr/src/arch/x86_64/sse2/packedpair.rs deleted file mode 100644 index c8b5b9999b63cd..00000000000000 --- a/vendor/memchr/src/arch/x86_64/sse2/packedpair.rs +++ /dev/null @@ -1,232 +0,0 @@ -/*! -A 128-bit vector implementation of the "packed pair" SIMD algorithm. - -The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main -difference is that it (by default) uses a background distribution of byte -frequencies to heuristically select the pair of bytes to search for. - -[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last -*/ - -use core::arch::x86_64::__m128i; - -use crate::arch::{all::packedpair::Pair, generic::packedpair}; - -/// A "packed pair" finder that uses 128-bit vector operations. -/// -/// This finder picks two bytes that it believes have high predictive power -/// for indicating an overall match of a needle. Depending on whether -/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets -/// where the needle matches or could match. In the prefilter case, candidates -/// are reported whenever the [`Pair`] of bytes given matches. -#[derive(Clone, Copy, Debug)] -pub struct Finder(packedpair::Finder<__m128i>); - -impl Finder { - /// Create a new pair searcher. The searcher returned can either report - /// exact matches of `needle` or act as a prefilter and report candidate - /// positions of `needle`. - /// - /// If SSE2 is unavailable in the current environment or if a [`Pair`] - /// could not be constructed from the needle given, then `None` is - /// returned. - #[inline] - pub fn new(needle: &[u8]) -> Option { - Finder::with_pair(needle, Pair::new(needle)?) - } - - /// Create a new "packed pair" finder using the pair of bytes given. - /// - /// This constructor permits callers to control precisely which pair of - /// bytes is used as a predicate. - /// - /// If SSE2 is unavailable in the current environment, then `None` is - /// returned. - #[inline] - pub fn with_pair(needle: &[u8], pair: Pair) -> Option { - if Finder::is_available() { - // SAFETY: we check that sse2 is available above. We are also - // guaranteed to have needle.len() > 1 because we have a valid - // Pair. - unsafe { Some(Finder::with_pair_impl(needle, pair)) } - } else { - None - } - } - - /// Create a new `Finder` specific to SSE2 vectors and routines. - /// - /// # Safety - /// - /// Same as the safety for `packedpair::Finder::new`, and callers must also - /// ensure that SSE2 is available. - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn with_pair_impl(needle: &[u8], pair: Pair) -> Finder { - let finder = packedpair::Finder::<__m128i>::new(needle, pair); - Finder(finder) - } - - /// Returns true when this implementation is available in the current - /// environment. - /// - /// When this is true, it is guaranteed that [`Finder::with_pair`] will - /// return a `Some` value. Similarly, when it is false, it is guaranteed - /// that `Finder::with_pair` will return a `None` value. Notice that this - /// does not guarantee that [`Finder::new`] will return a `Finder`. Namely, - /// even when `Finder::is_available` is true, it is not guaranteed that a - /// valid [`Pair`] can be found from the needle given. - /// - /// Note also that for the lifetime of a single program, if this returns - /// true then it will always return true. - #[inline] - pub fn is_available() -> bool { - #[cfg(not(target_feature = "sse2"))] - { - false - } - #[cfg(target_feature = "sse2")] - { - true - } - } - - /// Execute a search using SSE2 vectors and routines. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - #[inline] - pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option { - // SAFETY: Building a `Finder` means it's safe to call 'sse2' routines. - unsafe { self.find_impl(haystack, needle) } - } - - /// Run this finder on the given haystack as a prefilter. - /// - /// If a candidate match is found, then an offset where the needle *could* - /// begin in the haystack is returned. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - #[inline] - pub fn find_prefilter(&self, haystack: &[u8]) -> Option { - // SAFETY: Building a `Finder` means it's safe to call 'sse2' routines. - unsafe { self.find_prefilter_impl(haystack) } - } - - /// Execute a search using SSE2 vectors and routines. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - /// - /// # Safety - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Finder`, which can only be constructed - /// when it is safe to call `sse2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn find_impl( - &self, - haystack: &[u8], - needle: &[u8], - ) -> Option { - self.0.find(haystack, needle) - } - - /// Execute a prefilter search using SSE2 vectors and routines. - /// - /// # Panics - /// - /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. - /// - /// # Safety - /// - /// (The target feature safety obligation is automatically fulfilled by - /// virtue of being a method on `Finder`, which can only be constructed - /// when it is safe to call `sse2` routines.) - #[target_feature(enable = "sse2")] - #[inline] - unsafe fn find_prefilter_impl(&self, haystack: &[u8]) -> Option { - self.0.find_prefilter(haystack) - } - - /// Returns the pair of offsets (into the needle) used to check as a - /// predicate before confirming whether a needle exists at a particular - /// position. - #[inline] - pub fn pair(&self) -> &Pair { - self.0.pair() - } - - /// Returns the minimum haystack length that this `Finder` can search. - /// - /// Using a haystack with length smaller than this in a search will result - /// in a panic. The reason for this restriction is that this finder is - /// meant to be a low-level component that is part of a larger substring - /// strategy. In that sense, it avoids trying to handle all cases and - /// instead only handles the cases that it can handle very well. - #[inline] - pub fn min_haystack_len(&self) -> usize { - self.0.min_haystack_len() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn find(haystack: &[u8], needle: &[u8]) -> Option> { - let f = Finder::new(needle)?; - if haystack.len() < f.min_haystack_len() { - return None; - } - Some(f.find(haystack, needle)) - } - - define_substring_forward_quickcheck!(find); - - #[test] - fn forward_substring() { - crate::tests::substring::Runner::new().fwd(find).run() - } - - #[test] - fn forward_packedpair() { - fn find( - haystack: &[u8], - needle: &[u8], - index1: u8, - index2: u8, - ) -> Option> { - let pair = Pair::with_indices(needle, index1, index2)?; - let f = Finder::with_pair(needle, pair)?; - if haystack.len() < f.min_haystack_len() { - return None; - } - Some(f.find(haystack, needle)) - } - crate::tests::packedpair::Runner::new().fwd(find).run() - } - - #[test] - fn forward_packedpair_prefilter() { - fn find( - haystack: &[u8], - needle: &[u8], - index1: u8, - index2: u8, - ) -> Option> { - let pair = Pair::with_indices(needle, index1, index2)?; - let f = Finder::with_pair(needle, pair)?; - if haystack.len() < f.min_haystack_len() { - return None; - } - Some(f.find_prefilter(haystack)) - } - crate::tests::packedpair::Runner::new().fwd(find).run() - } -} diff --git a/vendor/memchr/src/cow.rs b/vendor/memchr/src/cow.rs deleted file mode 100644 index f291645728932c..00000000000000 --- a/vendor/memchr/src/cow.rs +++ /dev/null @@ -1,107 +0,0 @@ -use core::ops; - -/// A specialized copy-on-write byte string. -/// -/// The purpose of this type is to permit usage of a "borrowed or owned -/// byte string" in a way that keeps std/no-std compatibility. That is, in -/// no-std/alloc mode, this type devolves into a simple &[u8] with no owned -/// variant available. We can't just use a plain Cow because Cow is not in -/// core. -#[derive(Clone, Debug)] -pub struct CowBytes<'a>(Imp<'a>); - -// N.B. We don't use alloc::borrow::Cow here since we can get away with a -// Box<[u8]> for our use case, which is 1/3 smaller than the Vec that -// a Cow<[u8]> would use. -#[cfg(feature = "alloc")] -#[derive(Clone, Debug)] -enum Imp<'a> { - Borrowed(&'a [u8]), - Owned(alloc::boxed::Box<[u8]>), -} - -#[cfg(not(feature = "alloc"))] -#[derive(Clone, Debug)] -struct Imp<'a>(&'a [u8]); - -impl<'a> ops::Deref for CowBytes<'a> { - type Target = [u8]; - - #[inline(always)] - fn deref(&self) -> &[u8] { - self.as_slice() - } -} - -impl<'a> CowBytes<'a> { - /// Create a new borrowed CowBytes. - #[inline(always)] - pub(crate) fn new>(bytes: &'a B) -> CowBytes<'a> { - CowBytes(Imp::new(bytes.as_ref())) - } - - /// Create a new owned CowBytes. - #[cfg(feature = "alloc")] - #[inline(always)] - fn new_owned(bytes: alloc::boxed::Box<[u8]>) -> CowBytes<'static> { - CowBytes(Imp::Owned(bytes)) - } - - /// Return a borrowed byte string, regardless of whether this is an owned - /// or borrowed byte string internally. - #[inline(always)] - pub(crate) fn as_slice(&self) -> &[u8] { - self.0.as_slice() - } - - /// Return an owned version of this copy-on-write byte string. - /// - /// If this is already an owned byte string internally, then this is a - /// no-op. Otherwise, the internal byte string is copied. - #[cfg(feature = "alloc")] - #[inline(always)] - pub(crate) fn into_owned(self) -> CowBytes<'static> { - match self.0 { - Imp::Borrowed(b) => { - CowBytes::new_owned(alloc::boxed::Box::from(b)) - } - Imp::Owned(b) => CowBytes::new_owned(b), - } - } -} - -impl<'a> Imp<'a> { - #[inline(always)] - pub fn new(bytes: &'a [u8]) -> Imp<'a> { - #[cfg(feature = "alloc")] - { - Imp::Borrowed(bytes) - } - #[cfg(not(feature = "alloc"))] - { - Imp(bytes) - } - } - - #[cfg(feature = "alloc")] - #[inline(always)] - pub fn as_slice(&self) -> &[u8] { - #[cfg(feature = "alloc")] - { - match self { - Imp::Owned(ref x) => x, - Imp::Borrowed(x) => x, - } - } - #[cfg(not(feature = "alloc"))] - { - self.0 - } - } - - #[cfg(not(feature = "alloc"))] - #[inline(always)] - pub fn as_slice(&self) -> &[u8] { - self.0 - } -} diff --git a/vendor/memchr/src/ext.rs b/vendor/memchr/src/ext.rs deleted file mode 100644 index 802697ab34cc0c..00000000000000 --- a/vendor/memchr/src/ext.rs +++ /dev/null @@ -1,54 +0,0 @@ -/// A trait for adding some helper routines to pointers. -pub(crate) trait Pointer { - /// Returns the distance, in units of `T`, between `self` and `origin`. - /// - /// # Safety - /// - /// Same as `ptr::offset_from` in addition to `self >= origin`. - unsafe fn distance(self, origin: Self) -> usize; - - /// Casts this pointer to `usize`. - /// - /// Callers should not convert the `usize` back to a pointer if at all - /// possible. (And if you believe it's necessary, open an issue to discuss - /// why. Otherwise, it has the potential to violate pointer provenance.) - /// The purpose of this function is just to be able to do arithmetic, i.e., - /// computing offsets or alignments. - fn as_usize(self) -> usize; -} - -impl Pointer for *const T { - unsafe fn distance(self, origin: *const T) -> usize { - // TODO: Replace with `ptr::sub_ptr` once stabilized. - usize::try_from(self.offset_from(origin)).unwrap_unchecked() - } - - fn as_usize(self) -> usize { - self as usize - } -} - -impl Pointer for *mut T { - unsafe fn distance(self, origin: *mut T) -> usize { - (self as *const T).distance(origin as *const T) - } - - fn as_usize(self) -> usize { - (self as *const T).as_usize() - } -} - -/// A trait for adding some helper routines to raw bytes. -#[cfg(test)] -pub(crate) trait Byte { - /// Converts this byte to a `char` if it's ASCII. Otherwise panics. - fn to_char(self) -> char; -} - -#[cfg(test)] -impl Byte for u8 { - fn to_char(self) -> char { - assert!(self.is_ascii()); - char::from(self) - } -} diff --git a/vendor/memchr/src/lib.rs b/vendor/memchr/src/lib.rs deleted file mode 100644 index b3105169cc1dd8..00000000000000 --- a/vendor/memchr/src/lib.rs +++ /dev/null @@ -1,221 +0,0 @@ -/*! -This library provides heavily optimized routines for string search primitives. - -# Overview - -This section gives a brief high level overview of what this crate offers. - -* The top-level module provides routines for searching for 1, 2 or 3 bytes - in the forward or reverse direction. When searching for more than one byte, - positions are considered a match if the byte at that position matches any - of the bytes. -* The [`memmem`] sub-module provides forward and reverse substring search - routines. - -In all such cases, routines operate on `&[u8]` without regard to encoding. This -is exactly what you want when searching either UTF-8 or arbitrary bytes. - -# Example: using `memchr` - -This example shows how to use `memchr` to find the first occurrence of `z` in -a haystack: - -``` -use memchr::memchr; - -let haystack = b"foo bar baz quuz"; -assert_eq!(Some(10), memchr(b'z', haystack)); -``` - -# Example: matching one of three possible bytes - -This examples shows how to use `memrchr3` to find occurrences of `a`, `b` or -`c`, starting at the end of the haystack. - -``` -use memchr::memchr3_iter; - -let haystack = b"xyzaxyzbxyzc"; - -let mut it = memchr3_iter(b'a', b'b', b'c', haystack).rev(); -assert_eq!(Some(11), it.next()); -assert_eq!(Some(7), it.next()); -assert_eq!(Some(3), it.next()); -assert_eq!(None, it.next()); -``` - -# Example: iterating over substring matches - -This example shows how to use the [`memmem`] sub-module to find occurrences of -a substring in a haystack. - -``` -use memchr::memmem; - -let haystack = b"foo bar foo baz foo"; - -let mut it = memmem::find_iter(haystack, "foo"); -assert_eq!(Some(0), it.next()); -assert_eq!(Some(8), it.next()); -assert_eq!(Some(16), it.next()); -assert_eq!(None, it.next()); -``` - -# Example: repeating a search for the same needle - -It may be possible for the overhead of constructing a substring searcher to be -measurable in some workloads. In cases where the same needle is used to search -many haystacks, it is possible to do construction once and thus to avoid it for -subsequent searches. This can be done with a [`memmem::Finder`]: - -``` -use memchr::memmem; - -let finder = memmem::Finder::new("foo"); - -assert_eq!(Some(4), finder.find(b"baz foo quux")); -assert_eq!(None, finder.find(b"quux baz bar")); -``` - -# Why use this crate? - -At first glance, the APIs provided by this crate might seem weird. Why provide -a dedicated routine like `memchr` for something that could be implemented -clearly and trivially in one line: - -``` -fn memchr(needle: u8, haystack: &[u8]) -> Option { - haystack.iter().position(|&b| b == needle) -} -``` - -Or similarly, why does this crate provide substring search routines when Rust's -core library already provides them? - -``` -fn search(haystack: &str, needle: &str) -> Option { - haystack.find(needle) -} -``` - -The primary reason for both of them to exist is performance. When it comes to -performance, at a high level at least, there are two primary ways to look at -it: - -* **Throughput**: For this, think about it as, "given some very large haystack - and a byte that never occurs in that haystack, how long does it take to - search through it and determine that it, in fact, does not occur?" -* **Latency**: For this, think about it as, "given a tiny haystack---just a - few bytes---how long does it take to determine if a byte is in it?" - -The `memchr` routine in this crate has _slightly_ worse latency than the -solution presented above, however, its throughput can easily be over an -order of magnitude faster. This is a good general purpose trade off to make. -You rarely lose, but often gain big. - -**NOTE:** The name `memchr` comes from the corresponding routine in `libc`. A -key advantage of using this library is that its performance is not tied to its -quality of implementation in the `libc` you happen to be using, which can vary -greatly from platform to platform. - -But what about substring search? This one is a bit more complicated. The -primary reason for its existence is still indeed performance, but it's also -useful because Rust's core library doesn't actually expose any substring -search routine on arbitrary bytes. The only substring search routine that -exists works exclusively on valid UTF-8. - -So if you have valid UTF-8, is there a reason to use this over the standard -library substring search routine? Yes. This routine is faster on almost every -metric, including latency. The natural question then, is why isn't this -implementation in the standard library, even if only for searching on UTF-8? -The reason is that the implementation details for using SIMD in the standard -library haven't quite been worked out yet. - -**NOTE:** Currently, only `x86_64`, `wasm32` and `aarch64` targets have vector -accelerated implementations of `memchr` (and friends) and `memmem`. - -# Crate features - -* **std** - When enabled (the default), this will permit features specific to -the standard library. Currently, the only thing used from the standard library -is runtime SIMD CPU feature detection. This means that this feature must be -enabled to get AVX2 accelerated routines on `x86_64` targets without enabling -the `avx2` feature at compile time, for example. When `std` is not enabled, -this crate will still attempt to use SSE2 accelerated routines on `x86_64`. It -will also use AVX2 accelerated routines when the `avx2` feature is enabled at -compile time. In general, enable this feature if you can. -* **alloc** - When enabled (the default), APIs in this crate requiring some -kind of allocation will become available. For example, the -[`memmem::Finder::into_owned`](crate::memmem::Finder::into_owned) API and the -[`arch::all::shiftor`](crate::arch::all::shiftor) substring search -implementation. Otherwise, this crate is designed from the ground up to be -usable in core-only contexts, so the `alloc` feature doesn't add much -currently. Notably, disabling `std` but enabling `alloc` will **not** result -in the use of AVX2 on `x86_64` targets unless the `avx2` feature is enabled -at compile time. (With `std` enabled, AVX2 can be used even without the `avx2` -feature enabled at compile time by way of runtime CPU feature detection.) -* **logging** - When enabled (disabled by default), the `log` crate is used -to emit log messages about what kinds of `memchr` and `memmem` algorithms -are used. Namely, both `memchr` and `memmem` have a number of different -implementation choices depending on the target and CPU, and the log messages -can help show what specific implementations are being used. Generally, this is -useful for debugging performance issues. -* **libc** - **DEPRECATED**. Previously, this enabled the use of the target's -`memchr` function from whatever `libc` was linked into the program. This -feature is now a no-op because this crate's implementation of `memchr` should -now be sufficiently fast on a number of platforms that `libc` should no longer -be needed. (This feature is somewhat of a holdover from this crate's origins. -Originally, this crate was literally just a safe wrapper function around the -`memchr` function from `libc`.) -*/ - -#![deny(missing_docs)] -#![no_std] -// It's just not worth trying to squash all dead code warnings. Pretty -// unfortunate IMO. Not really sure how to fix this other than to either -// live with it or sprinkle a whole mess of `cfg` annotations everywhere. -#![cfg_attr( - not(any( - all(target_arch = "x86_64", target_feature = "sse2"), - all(target_arch = "wasm32", target_feature = "simd128"), - target_arch = "aarch64", - )), - allow(dead_code) -)] -// Same deal for miri. -#![cfg_attr(miri, allow(dead_code, unused_macros))] - -// Supporting 8-bit (or others) would be fine. If you need it, please submit a -// bug report at https://github.com/BurntSushi/memchr -#[cfg(not(any( - target_pointer_width = "16", - target_pointer_width = "32", - target_pointer_width = "64" -)))] -compile_error!("memchr currently not supported on non-{16,32,64}"); - -#[cfg(any(test, feature = "std"))] -extern crate std; - -#[cfg(any(test, feature = "alloc"))] -extern crate alloc; - -pub use crate::memchr::{ - memchr, memchr2, memchr2_iter, memchr3, memchr3_iter, memchr_iter, - memrchr, memrchr2, memrchr2_iter, memrchr3, memrchr3_iter, memrchr_iter, - Memchr, Memchr2, Memchr3, -}; - -#[macro_use] -mod macros; - -#[cfg(test)] -#[macro_use] -mod tests; - -pub mod arch; -mod cow; -mod ext; -mod memchr; -pub mod memmem; -mod vector; diff --git a/vendor/memchr/src/macros.rs b/vendor/memchr/src/macros.rs deleted file mode 100644 index 31b4ca3816ace2..00000000000000 --- a/vendor/memchr/src/macros.rs +++ /dev/null @@ -1,20 +0,0 @@ -// Some feature combinations result in some of these macros never being used. -// Which is fine. Just squash the warnings. -#![allow(unused_macros)] - -macro_rules! log { - ($($tt:tt)*) => { - #[cfg(feature = "logging")] - { - $($tt)* - } - } -} - -macro_rules! debug { - ($($tt:tt)*) => { log!(log::debug!($($tt)*)) } -} - -macro_rules! trace { - ($($tt:tt)*) => { log!(log::trace!($($tt)*)) } -} diff --git a/vendor/memchr/src/memchr.rs b/vendor/memchr/src/memchr.rs deleted file mode 100644 index 92a18bd5fa9c35..00000000000000 --- a/vendor/memchr/src/memchr.rs +++ /dev/null @@ -1,903 +0,0 @@ -use core::iter::Rev; - -use crate::arch::generic::memchr as generic; - -/// Search for the first occurrence of a byte in a slice. -/// -/// This returns the index corresponding to the first occurrence of `needle` in -/// `haystack`, or `None` if one is not found. If an index is returned, it is -/// guaranteed to be less than `haystack.len()`. -/// -/// While this is semantically the same as something like -/// `haystack.iter().position(|&b| b == needle)`, this routine will attempt to -/// use highly optimized vector operations that can be an order of magnitude -/// faster (or more). -/// -/// # Example -/// -/// This shows how to find the first position of a byte in a byte string. -/// -/// ``` -/// use memchr::memchr; -/// -/// let haystack = b"the quick brown fox"; -/// assert_eq!(memchr(b'k', haystack), Some(8)); -/// ``` -#[inline] -pub fn memchr(needle: u8, haystack: &[u8]) -> Option { - // SAFETY: memchr_raw, when a match is found, always returns a valid - // pointer between start and end. - unsafe { - generic::search_slice_with_raw(haystack, |start, end| { - memchr_raw(needle, start, end) - }) - } -} - -/// Search for the last occurrence of a byte in a slice. -/// -/// This returns the index corresponding to the last occurrence of `needle` in -/// `haystack`, or `None` if one is not found. If an index is returned, it is -/// guaranteed to be less than `haystack.len()`. -/// -/// While this is semantically the same as something like -/// `haystack.iter().rposition(|&b| b == needle)`, this routine will attempt to -/// use highly optimized vector operations that can be an order of magnitude -/// faster (or more). -/// -/// # Example -/// -/// This shows how to find the last position of a byte in a byte string. -/// -/// ``` -/// use memchr::memrchr; -/// -/// let haystack = b"the quick brown fox"; -/// assert_eq!(memrchr(b'o', haystack), Some(17)); -/// ``` -#[inline] -pub fn memrchr(needle: u8, haystack: &[u8]) -> Option { - // SAFETY: memrchr_raw, when a match is found, always returns a valid - // pointer between start and end. - unsafe { - generic::search_slice_with_raw(haystack, |start, end| { - memrchr_raw(needle, start, end) - }) - } -} - -/// Search for the first occurrence of two possible bytes in a haystack. -/// -/// This returns the index corresponding to the first occurrence of one of the -/// needle bytes in `haystack`, or `None` if one is not found. If an index is -/// returned, it is guaranteed to be less than `haystack.len()`. -/// -/// While this is semantically the same as something like -/// `haystack.iter().position(|&b| b == needle1 || b == needle2)`, this routine -/// will attempt to use highly optimized vector operations that can be an order -/// of magnitude faster (or more). -/// -/// # Example -/// -/// This shows how to find the first position of one of two possible bytes in a -/// haystack. -/// -/// ``` -/// use memchr::memchr2; -/// -/// let haystack = b"the quick brown fox"; -/// assert_eq!(memchr2(b'k', b'q', haystack), Some(4)); -/// ``` -#[inline] -pub fn memchr2(needle1: u8, needle2: u8, haystack: &[u8]) -> Option { - // SAFETY: memchr2_raw, when a match is found, always returns a valid - // pointer between start and end. - unsafe { - generic::search_slice_with_raw(haystack, |start, end| { - memchr2_raw(needle1, needle2, start, end) - }) - } -} - -/// Search for the last occurrence of two possible bytes in a haystack. -/// -/// This returns the index corresponding to the last occurrence of one of the -/// needle bytes in `haystack`, or `None` if one is not found. If an index is -/// returned, it is guaranteed to be less than `haystack.len()`. -/// -/// While this is semantically the same as something like -/// `haystack.iter().rposition(|&b| b == needle1 || b == needle2)`, this -/// routine will attempt to use highly optimized vector operations that can be -/// an order of magnitude faster (or more). -/// -/// # Example -/// -/// This shows how to find the last position of one of two possible bytes in a -/// haystack. -/// -/// ``` -/// use memchr::memrchr2; -/// -/// let haystack = b"the quick brown fox"; -/// assert_eq!(memrchr2(b'k', b'o', haystack), Some(17)); -/// ``` -#[inline] -pub fn memrchr2(needle1: u8, needle2: u8, haystack: &[u8]) -> Option { - // SAFETY: memrchr2_raw, when a match is found, always returns a valid - // pointer between start and end. - unsafe { - generic::search_slice_with_raw(haystack, |start, end| { - memrchr2_raw(needle1, needle2, start, end) - }) - } -} - -/// Search for the first occurrence of three possible bytes in a haystack. -/// -/// This returns the index corresponding to the first occurrence of one of the -/// needle bytes in `haystack`, or `None` if one is not found. If an index is -/// returned, it is guaranteed to be less than `haystack.len()`. -/// -/// While this is semantically the same as something like -/// `haystack.iter().position(|&b| b == needle1 || b == needle2 || b == needle3)`, -/// this routine will attempt to use highly optimized vector operations that -/// can be an order of magnitude faster (or more). -/// -/// # Example -/// -/// This shows how to find the first position of one of three possible bytes in -/// a haystack. -/// -/// ``` -/// use memchr::memchr3; -/// -/// let haystack = b"the quick brown fox"; -/// assert_eq!(memchr3(b'k', b'q', b'u', haystack), Some(4)); -/// ``` -#[inline] -pub fn memchr3( - needle1: u8, - needle2: u8, - needle3: u8, - haystack: &[u8], -) -> Option { - // SAFETY: memchr3_raw, when a match is found, always returns a valid - // pointer between start and end. - unsafe { - generic::search_slice_with_raw(haystack, |start, end| { - memchr3_raw(needle1, needle2, needle3, start, end) - }) - } -} - -/// Search for the last occurrence of three possible bytes in a haystack. -/// -/// This returns the index corresponding to the last occurrence of one of the -/// needle bytes in `haystack`, or `None` if one is not found. If an index is -/// returned, it is guaranteed to be less than `haystack.len()`. -/// -/// While this is semantically the same as something like -/// `haystack.iter().rposition(|&b| b == needle1 || b == needle2 || b == needle3)`, -/// this routine will attempt to use highly optimized vector operations that -/// can be an order of magnitude faster (or more). -/// -/// # Example -/// -/// This shows how to find the last position of one of three possible bytes in -/// a haystack. -/// -/// ``` -/// use memchr::memrchr3; -/// -/// let haystack = b"the quick brown fox"; -/// assert_eq!(memrchr3(b'k', b'o', b'n', haystack), Some(17)); -/// ``` -#[inline] -pub fn memrchr3( - needle1: u8, - needle2: u8, - needle3: u8, - haystack: &[u8], -) -> Option { - // SAFETY: memrchr3_raw, when a match is found, always returns a valid - // pointer between start and end. - unsafe { - generic::search_slice_with_raw(haystack, |start, end| { - memrchr3_raw(needle1, needle2, needle3, start, end) - }) - } -} - -/// Returns an iterator over all occurrences of the needle in a haystack. -/// -/// The iterator returned implements `DoubleEndedIterator`. This means it -/// can also be used to find occurrences in reverse order. -#[inline] -pub fn memchr_iter<'h>(needle: u8, haystack: &'h [u8]) -> Memchr<'h> { - Memchr::new(needle, haystack) -} - -/// Returns an iterator over all occurrences of the needle in a haystack, in -/// reverse. -#[inline] -pub fn memrchr_iter(needle: u8, haystack: &[u8]) -> Rev> { - Memchr::new(needle, haystack).rev() -} - -/// Returns an iterator over all occurrences of the needles in a haystack. -/// -/// The iterator returned implements `DoubleEndedIterator`. This means it -/// can also be used to find occurrences in reverse order. -#[inline] -pub fn memchr2_iter<'h>( - needle1: u8, - needle2: u8, - haystack: &'h [u8], -) -> Memchr2<'h> { - Memchr2::new(needle1, needle2, haystack) -} - -/// Returns an iterator over all occurrences of the needles in a haystack, in -/// reverse. -#[inline] -pub fn memrchr2_iter( - needle1: u8, - needle2: u8, - haystack: &[u8], -) -> Rev> { - Memchr2::new(needle1, needle2, haystack).rev() -} - -/// Returns an iterator over all occurrences of the needles in a haystack. -/// -/// The iterator returned implements `DoubleEndedIterator`. This means it -/// can also be used to find occurrences in reverse order. -#[inline] -pub fn memchr3_iter<'h>( - needle1: u8, - needle2: u8, - needle3: u8, - haystack: &'h [u8], -) -> Memchr3<'h> { - Memchr3::new(needle1, needle2, needle3, haystack) -} - -/// Returns an iterator over all occurrences of the needles in a haystack, in -/// reverse. -#[inline] -pub fn memrchr3_iter( - needle1: u8, - needle2: u8, - needle3: u8, - haystack: &[u8], -) -> Rev> { - Memchr3::new(needle1, needle2, needle3, haystack).rev() -} - -/// An iterator over all occurrences of a single byte in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`memchr_iter`] or `[memrchr_iter`] -/// functions. It can also be created with the [`Memchr::new`] method. -/// -/// The lifetime parameter `'h` refers to the lifetime of the haystack being -/// searched. -#[derive(Clone, Debug)] -pub struct Memchr<'h> { - needle1: u8, - it: crate::arch::generic::memchr::Iter<'h>, -} - -impl<'h> Memchr<'h> { - /// Returns an iterator over all occurrences of the needle byte in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - #[inline] - pub fn new(needle1: u8, haystack: &'h [u8]) -> Memchr<'h> { - Memchr { - needle1, - it: crate::arch::generic::memchr::Iter::new(haystack), - } - } -} - -impl<'h> Iterator for Memchr<'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: All of our implementations of memchr ensure that any - // pointers returns will fall within the start and end bounds, and this - // upholds the safety contract of `self.it.next`. - unsafe { - // NOTE: I attempted to define an enum of previously created - // searchers and then switch on those here instead of just - // calling `memchr_raw` (or `One::new(..).find_raw(..)`). But - // that turned out to have a fair bit of extra overhead when - // searching very small haystacks. - self.it.next(|s, e| memchr_raw(self.needle1, s, e)) - } - } - - #[inline] - fn count(self) -> usize { - self.it.count(|s, e| { - // SAFETY: We rely on our generic iterator to return valid start - // and end pointers. - unsafe { count_raw(self.needle1, s, e) } - }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'h> DoubleEndedIterator for Memchr<'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: All of our implementations of memchr ensure that any - // pointers returns will fall within the start and end bounds, and this - // upholds the safety contract of `self.it.next_back`. - unsafe { self.it.next_back(|s, e| memrchr_raw(self.needle1, s, e)) } - } -} - -impl<'h> core::iter::FusedIterator for Memchr<'h> {} - -/// An iterator over all occurrences of two possible bytes in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`memchr2_iter`] or `[memrchr2_iter`] -/// functions. It can also be created with the [`Memchr2::new`] method. -/// -/// The lifetime parameter `'h` refers to the lifetime of the haystack being -/// searched. -#[derive(Clone, Debug)] -pub struct Memchr2<'h> { - needle1: u8, - needle2: u8, - it: crate::arch::generic::memchr::Iter<'h>, -} - -impl<'h> Memchr2<'h> { - /// Returns an iterator over all occurrences of the needle bytes in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - #[inline] - pub fn new(needle1: u8, needle2: u8, haystack: &'h [u8]) -> Memchr2<'h> { - Memchr2 { - needle1, - needle2, - it: crate::arch::generic::memchr::Iter::new(haystack), - } - } -} - -impl<'h> Iterator for Memchr2<'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: All of our implementations of memchr ensure that any - // pointers returns will fall within the start and end bounds, and this - // upholds the safety contract of `self.it.next`. - unsafe { - self.it.next(|s, e| memchr2_raw(self.needle1, self.needle2, s, e)) - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'h> DoubleEndedIterator for Memchr2<'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: All of our implementations of memchr ensure that any - // pointers returns will fall within the start and end bounds, and this - // upholds the safety contract of `self.it.next_back`. - unsafe { - self.it.next_back(|s, e| { - memrchr2_raw(self.needle1, self.needle2, s, e) - }) - } - } -} - -impl<'h> core::iter::FusedIterator for Memchr2<'h> {} - -/// An iterator over all occurrences of three possible bytes in a haystack. -/// -/// This iterator implements `DoubleEndedIterator`, which means it can also be -/// used to find occurrences in reverse order. -/// -/// This iterator is created by the [`memchr2_iter`] or `[memrchr2_iter`] -/// functions. It can also be created with the [`Memchr3::new`] method. -/// -/// The lifetime parameter `'h` refers to the lifetime of the haystack being -/// searched. -#[derive(Clone, Debug)] -pub struct Memchr3<'h> { - needle1: u8, - needle2: u8, - needle3: u8, - it: crate::arch::generic::memchr::Iter<'h>, -} - -impl<'h> Memchr3<'h> { - /// Returns an iterator over all occurrences of the needle bytes in the - /// given haystack. - /// - /// The iterator returned implements `DoubleEndedIterator`. This means it - /// can also be used to find occurrences in reverse order. - #[inline] - pub fn new( - needle1: u8, - needle2: u8, - needle3: u8, - haystack: &'h [u8], - ) -> Memchr3<'h> { - Memchr3 { - needle1, - needle2, - needle3, - it: crate::arch::generic::memchr::Iter::new(haystack), - } - } -} - -impl<'h> Iterator for Memchr3<'h> { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - // SAFETY: All of our implementations of memchr ensure that any - // pointers returns will fall within the start and end bounds, and this - // upholds the safety contract of `self.it.next`. - unsafe { - self.it.next(|s, e| { - memchr3_raw(self.needle1, self.needle2, self.needle3, s, e) - }) - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -impl<'h> DoubleEndedIterator for Memchr3<'h> { - #[inline] - fn next_back(&mut self) -> Option { - // SAFETY: All of our implementations of memchr ensure that any - // pointers returns will fall within the start and end bounds, and this - // upholds the safety contract of `self.it.next_back`. - unsafe { - self.it.next_back(|s, e| { - memrchr3_raw(self.needle1, self.needle2, self.needle3, s, e) - }) - } - } -} - -impl<'h> core::iter::FusedIterator for Memchr3<'h> {} - -/// memchr, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `One::find_raw`. -#[inline] -unsafe fn memchr_raw( - needle: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - #[cfg(target_arch = "x86_64")] - { - // x86_64 does CPU feature detection at runtime in order to use AVX2 - // instructions even when the `avx2` feature isn't enabled at compile - // time. This function also handles using a fallback if neither AVX2 - // nor SSE2 (unusual) are available. - crate::arch::x86_64::memchr::memchr_raw(needle, start, end) - } - #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] - { - crate::arch::wasm32::memchr::memchr_raw(needle, start, end) - } - #[cfg(target_arch = "aarch64")] - { - crate::arch::aarch64::memchr::memchr_raw(needle, start, end) - } - #[cfg(not(any( - target_arch = "x86_64", - all(target_arch = "wasm32", target_feature = "simd128"), - target_arch = "aarch64" - )))] - { - crate::arch::all::memchr::One::new(needle).find_raw(start, end) - } -} - -/// memrchr, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `One::rfind_raw`. -#[inline] -unsafe fn memrchr_raw( - needle: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - #[cfg(target_arch = "x86_64")] - { - crate::arch::x86_64::memchr::memrchr_raw(needle, start, end) - } - #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] - { - crate::arch::wasm32::memchr::memrchr_raw(needle, start, end) - } - #[cfg(target_arch = "aarch64")] - { - crate::arch::aarch64::memchr::memrchr_raw(needle, start, end) - } - #[cfg(not(any( - target_arch = "x86_64", - all(target_arch = "wasm32", target_feature = "simd128"), - target_arch = "aarch64" - )))] - { - crate::arch::all::memchr::One::new(needle).rfind_raw(start, end) - } -} - -/// memchr2, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Two::find_raw`. -#[inline] -unsafe fn memchr2_raw( - needle1: u8, - needle2: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - #[cfg(target_arch = "x86_64")] - { - crate::arch::x86_64::memchr::memchr2_raw(needle1, needle2, start, end) - } - #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] - { - crate::arch::wasm32::memchr::memchr2_raw(needle1, needle2, start, end) - } - #[cfg(target_arch = "aarch64")] - { - crate::arch::aarch64::memchr::memchr2_raw(needle1, needle2, start, end) - } - #[cfg(not(any( - target_arch = "x86_64", - all(target_arch = "wasm32", target_feature = "simd128"), - target_arch = "aarch64" - )))] - { - crate::arch::all::memchr::Two::new(needle1, needle2) - .find_raw(start, end) - } -} - -/// memrchr2, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Two::rfind_raw`. -#[inline] -unsafe fn memrchr2_raw( - needle1: u8, - needle2: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - #[cfg(target_arch = "x86_64")] - { - crate::arch::x86_64::memchr::memrchr2_raw(needle1, needle2, start, end) - } - #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] - { - crate::arch::wasm32::memchr::memrchr2_raw(needle1, needle2, start, end) - } - #[cfg(target_arch = "aarch64")] - { - crate::arch::aarch64::memchr::memrchr2_raw( - needle1, needle2, start, end, - ) - } - #[cfg(not(any( - target_arch = "x86_64", - all(target_arch = "wasm32", target_feature = "simd128"), - target_arch = "aarch64" - )))] - { - crate::arch::all::memchr::Two::new(needle1, needle2) - .rfind_raw(start, end) - } -} - -/// memchr3, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Three::find_raw`. -#[inline] -unsafe fn memchr3_raw( - needle1: u8, - needle2: u8, - needle3: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - #[cfg(target_arch = "x86_64")] - { - crate::arch::x86_64::memchr::memchr3_raw( - needle1, needle2, needle3, start, end, - ) - } - #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] - { - crate::arch::wasm32::memchr::memchr3_raw( - needle1, needle2, needle3, start, end, - ) - } - #[cfg(target_arch = "aarch64")] - { - crate::arch::aarch64::memchr::memchr3_raw( - needle1, needle2, needle3, start, end, - ) - } - #[cfg(not(any( - target_arch = "x86_64", - all(target_arch = "wasm32", target_feature = "simd128"), - target_arch = "aarch64" - )))] - { - crate::arch::all::memchr::Three::new(needle1, needle2, needle3) - .find_raw(start, end) - } -} - -/// memrchr3, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `Three::rfind_raw`. -#[inline] -unsafe fn memrchr3_raw( - needle1: u8, - needle2: u8, - needle3: u8, - start: *const u8, - end: *const u8, -) -> Option<*const u8> { - #[cfg(target_arch = "x86_64")] - { - crate::arch::x86_64::memchr::memrchr3_raw( - needle1, needle2, needle3, start, end, - ) - } - #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] - { - crate::arch::wasm32::memchr::memrchr3_raw( - needle1, needle2, needle3, start, end, - ) - } - #[cfg(target_arch = "aarch64")] - { - crate::arch::aarch64::memchr::memrchr3_raw( - needle1, needle2, needle3, start, end, - ) - } - #[cfg(not(any( - target_arch = "x86_64", - all(target_arch = "wasm32", target_feature = "simd128"), - target_arch = "aarch64" - )))] - { - crate::arch::all::memchr::Three::new(needle1, needle2, needle3) - .rfind_raw(start, end) - } -} - -/// Count all matching bytes, but using raw pointers to represent the haystack. -/// -/// # Safety -/// -/// Pointers must be valid. See `One::count_raw`. -#[inline] -unsafe fn count_raw(needle: u8, start: *const u8, end: *const u8) -> usize { - #[cfg(target_arch = "x86_64")] - { - crate::arch::x86_64::memchr::count_raw(needle, start, end) - } - #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] - { - crate::arch::wasm32::memchr::count_raw(needle, start, end) - } - #[cfg(target_arch = "aarch64")] - { - crate::arch::aarch64::memchr::count_raw(needle, start, end) - } - #[cfg(not(any( - target_arch = "x86_64", - all(target_arch = "wasm32", target_feature = "simd128"), - target_arch = "aarch64" - )))] - { - crate::arch::all::memchr::One::new(needle).count_raw(start, end) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn forward1_iter() { - crate::tests::memchr::Runner::new(1).forward_iter( - |haystack, needles| { - Some(memchr_iter(needles[0], haystack).collect()) - }, - ) - } - - #[test] - fn forward1_oneshot() { - crate::tests::memchr::Runner::new(1).forward_oneshot( - |haystack, needles| Some(memchr(needles[0], haystack)), - ) - } - - #[test] - fn reverse1_iter() { - crate::tests::memchr::Runner::new(1).reverse_iter( - |haystack, needles| { - Some(memrchr_iter(needles[0], haystack).collect()) - }, - ) - } - - #[test] - fn reverse1_oneshot() { - crate::tests::memchr::Runner::new(1).reverse_oneshot( - |haystack, needles| Some(memrchr(needles[0], haystack)), - ) - } - - #[test] - fn count1_iter() { - crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| { - Some(memchr_iter(needles[0], haystack).count()) - }) - } - - #[test] - fn forward2_iter() { - crate::tests::memchr::Runner::new(2).forward_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - Some(memchr2_iter(n1, n2, haystack).collect()) - }, - ) - } - - #[test] - fn forward2_oneshot() { - crate::tests::memchr::Runner::new(2).forward_oneshot( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - Some(memchr2(n1, n2, haystack)) - }, - ) - } - - #[test] - fn reverse2_iter() { - crate::tests::memchr::Runner::new(2).reverse_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - Some(memrchr2_iter(n1, n2, haystack).collect()) - }, - ) - } - - #[test] - fn reverse2_oneshot() { - crate::tests::memchr::Runner::new(2).reverse_oneshot( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - Some(memrchr2(n1, n2, haystack)) - }, - ) - } - - #[test] - fn forward3_iter() { - crate::tests::memchr::Runner::new(3).forward_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - let n3 = needles.get(2).copied()?; - Some(memchr3_iter(n1, n2, n3, haystack).collect()) - }, - ) - } - - #[test] - fn forward3_oneshot() { - crate::tests::memchr::Runner::new(3).forward_oneshot( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - let n3 = needles.get(2).copied()?; - Some(memchr3(n1, n2, n3, haystack)) - }, - ) - } - - #[test] - fn reverse3_iter() { - crate::tests::memchr::Runner::new(3).reverse_iter( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - let n3 = needles.get(2).copied()?; - Some(memrchr3_iter(n1, n2, n3, haystack).collect()) - }, - ) - } - - #[test] - fn reverse3_oneshot() { - crate::tests::memchr::Runner::new(3).reverse_oneshot( - |haystack, needles| { - let n1 = needles.get(0).copied()?; - let n2 = needles.get(1).copied()?; - let n3 = needles.get(2).copied()?; - Some(memrchr3(n1, n2, n3, haystack)) - }, - ) - } - - // Prior to memchr 2.6, the memchr iterators both implemented Send and - // Sync. But in memchr 2.6, the iterator changed to use raw pointers - // internally and I didn't add explicit Send/Sync impls. This ended up - // regressing the API. This test ensures we don't do that again. - // - // See: https://github.com/BurntSushi/memchr/issues/133 - #[test] - fn sync_regression() { - use core::panic::{RefUnwindSafe, UnwindSafe}; - - fn assert_send_sync() {} - assert_send_sync::(); - assert_send_sync::(); - assert_send_sync::() - } -} diff --git a/vendor/memchr/src/memmem/mod.rs b/vendor/memchr/src/memmem/mod.rs deleted file mode 100644 index 4f04943e6497c5..00000000000000 --- a/vendor/memchr/src/memmem/mod.rs +++ /dev/null @@ -1,737 +0,0 @@ -/*! -This module provides forward and reverse substring search routines. - -Unlike the standard library's substring search routines, these work on -arbitrary bytes. For all non-empty needles, these routines will report exactly -the same values as the corresponding routines in the standard library. For -the empty needle, the standard library reports matches only at valid UTF-8 -boundaries, where as these routines will report matches at every position. - -Other than being able to work on arbitrary bytes, the primary reason to prefer -these routines over the standard library routines is that these will generally -be faster. In some cases, significantly so. - -# Example: iterating over substring matches - -This example shows how to use [`find_iter`] to find occurrences of a substring -in a haystack. - -``` -use memchr::memmem; - -let haystack = b"foo bar foo baz foo"; - -let mut it = memmem::find_iter(haystack, "foo"); -assert_eq!(Some(0), it.next()); -assert_eq!(Some(8), it.next()); -assert_eq!(Some(16), it.next()); -assert_eq!(None, it.next()); -``` - -# Example: iterating over substring matches in reverse - -This example shows how to use [`rfind_iter`] to find occurrences of a substring -in a haystack starting from the end of the haystack. - -**NOTE:** This module does not implement double ended iterators, so reverse -searches aren't done by calling `rev` on a forward iterator. - -``` -use memchr::memmem; - -let haystack = b"foo bar foo baz foo"; - -let mut it = memmem::rfind_iter(haystack, "foo"); -assert_eq!(Some(16), it.next()); -assert_eq!(Some(8), it.next()); -assert_eq!(Some(0), it.next()); -assert_eq!(None, it.next()); -``` - -# Example: repeating a search for the same needle - -It may be possible for the overhead of constructing a substring searcher to be -measurable in some workloads. In cases where the same needle is used to search -many haystacks, it is possible to do construction once and thus to avoid it for -subsequent searches. This can be done with a [`Finder`] (or a [`FinderRev`] for -reverse searches). - -``` -use memchr::memmem; - -let finder = memmem::Finder::new("foo"); - -assert_eq!(Some(4), finder.find(b"baz foo quux")); -assert_eq!(None, finder.find(b"quux baz bar")); -``` -*/ - -pub use crate::memmem::searcher::PrefilterConfig as Prefilter; - -// This is exported here for use in the crate::arch::all::twoway -// implementation. This is essentially an abstraction breaker. Namely, the -// public API of twoway doesn't support providing a prefilter, but its crate -// internal API does. The main reason for this is that I didn't want to do the -// API design required to support it without a concrete use case. -pub(crate) use crate::memmem::searcher::Pre; - -use crate::{ - arch::all::{ - packedpair::{DefaultFrequencyRank, HeuristicFrequencyRank}, - rabinkarp, - }, - cow::CowBytes, - memmem::searcher::{PrefilterState, Searcher, SearcherRev}, -}; - -mod searcher; - -/// Returns an iterator over all non-overlapping occurrences of a substring in -/// a haystack. -/// -/// # Complexity -/// -/// This routine is guaranteed to have worst case linear time complexity -/// with respect to both the needle and the haystack. That is, this runs -/// in `O(needle.len() + haystack.len())` time. -/// -/// This routine is also guaranteed to have worst case constant space -/// complexity. -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use memchr::memmem; -/// -/// let haystack = b"foo bar foo baz foo"; -/// let mut it = memmem::find_iter(haystack, b"foo"); -/// assert_eq!(Some(0), it.next()); -/// assert_eq!(Some(8), it.next()); -/// assert_eq!(Some(16), it.next()); -/// assert_eq!(None, it.next()); -/// ``` -#[inline] -pub fn find_iter<'h, 'n, N: 'n + ?Sized + AsRef<[u8]>>( - haystack: &'h [u8], - needle: &'n N, -) -> FindIter<'h, 'n> { - FindIter::new(haystack, Finder::new(needle)) -} - -/// Returns a reverse iterator over all non-overlapping occurrences of a -/// substring in a haystack. -/// -/// # Complexity -/// -/// This routine is guaranteed to have worst case linear time complexity -/// with respect to both the needle and the haystack. That is, this runs -/// in `O(needle.len() + haystack.len())` time. -/// -/// This routine is also guaranteed to have worst case constant space -/// complexity. -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use memchr::memmem; -/// -/// let haystack = b"foo bar foo baz foo"; -/// let mut it = memmem::rfind_iter(haystack, b"foo"); -/// assert_eq!(Some(16), it.next()); -/// assert_eq!(Some(8), it.next()); -/// assert_eq!(Some(0), it.next()); -/// assert_eq!(None, it.next()); -/// ``` -#[inline] -pub fn rfind_iter<'h, 'n, N: 'n + ?Sized + AsRef<[u8]>>( - haystack: &'h [u8], - needle: &'n N, -) -> FindRevIter<'h, 'n> { - FindRevIter::new(haystack, FinderRev::new(needle)) -} - -/// Returns the index of the first occurrence of the given needle. -/// -/// Note that if you're are searching for the same needle in many different -/// small haystacks, it may be faster to initialize a [`Finder`] once, -/// and reuse it for each search. -/// -/// # Complexity -/// -/// This routine is guaranteed to have worst case linear time complexity -/// with respect to both the needle and the haystack. That is, this runs -/// in `O(needle.len() + haystack.len())` time. -/// -/// This routine is also guaranteed to have worst case constant space -/// complexity. -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use memchr::memmem; -/// -/// let haystack = b"foo bar baz"; -/// assert_eq!(Some(0), memmem::find(haystack, b"foo")); -/// assert_eq!(Some(4), memmem::find(haystack, b"bar")); -/// assert_eq!(None, memmem::find(haystack, b"quux")); -/// ``` -#[inline] -pub fn find(haystack: &[u8], needle: &[u8]) -> Option { - if haystack.len() < 64 { - rabinkarp::Finder::new(needle).find(haystack, needle) - } else { - Finder::new(needle).find(haystack) - } -} - -/// Returns the index of the last occurrence of the given needle. -/// -/// Note that if you're are searching for the same needle in many different -/// small haystacks, it may be faster to initialize a [`FinderRev`] once, -/// and reuse it for each search. -/// -/// # Complexity -/// -/// This routine is guaranteed to have worst case linear time complexity -/// with respect to both the needle and the haystack. That is, this runs -/// in `O(needle.len() + haystack.len())` time. -/// -/// This routine is also guaranteed to have worst case constant space -/// complexity. -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use memchr::memmem; -/// -/// let haystack = b"foo bar baz"; -/// assert_eq!(Some(0), memmem::rfind(haystack, b"foo")); -/// assert_eq!(Some(4), memmem::rfind(haystack, b"bar")); -/// assert_eq!(Some(8), memmem::rfind(haystack, b"ba")); -/// assert_eq!(None, memmem::rfind(haystack, b"quux")); -/// ``` -#[inline] -pub fn rfind(haystack: &[u8], needle: &[u8]) -> Option { - if haystack.len() < 64 { - rabinkarp::FinderRev::new(needle).rfind(haystack, needle) - } else { - FinderRev::new(needle).rfind(haystack) - } -} - -/// An iterator over non-overlapping substring matches. -/// -/// Matches are reported by the byte offset at which they begin. -/// -/// `'h` is the lifetime of the haystack while `'n` is the lifetime of the -/// needle. -#[derive(Debug, Clone)] -pub struct FindIter<'h, 'n> { - haystack: &'h [u8], - prestate: PrefilterState, - finder: Finder<'n>, - pos: usize, -} - -impl<'h, 'n> FindIter<'h, 'n> { - #[inline(always)] - pub(crate) fn new( - haystack: &'h [u8], - finder: Finder<'n>, - ) -> FindIter<'h, 'n> { - let prestate = PrefilterState::new(); - FindIter { haystack, prestate, finder, pos: 0 } - } - - /// Convert this iterator into its owned variant, such that it no longer - /// borrows the finder and needle. - /// - /// If this is already an owned iterator, then this is a no-op. Otherwise, - /// this copies the needle. - /// - /// This is only available when the `alloc` feature is enabled. - #[cfg(feature = "alloc")] - #[inline] - pub fn into_owned(self) -> FindIter<'h, 'static> { - FindIter { - haystack: self.haystack, - prestate: self.prestate, - finder: self.finder.into_owned(), - pos: self.pos, - } - } -} - -impl<'h, 'n> Iterator for FindIter<'h, 'n> { - type Item = usize; - - fn next(&mut self) -> Option { - let needle = self.finder.needle(); - let haystack = self.haystack.get(self.pos..)?; - let idx = - self.finder.searcher.find(&mut self.prestate, haystack, needle)?; - - let pos = self.pos + idx; - self.pos = pos + needle.len().max(1); - - Some(pos) - } - - fn size_hint(&self) -> (usize, Option) { - // The largest possible number of non-overlapping matches is the - // quotient of the haystack and the needle (or the length of the - // haystack, if the needle is empty) - match self.haystack.len().checked_sub(self.pos) { - None => (0, Some(0)), - Some(haystack_len) => match self.finder.needle().len() { - // Empty needles always succeed and match at every point - // (including the very end) - 0 => ( - haystack_len.saturating_add(1), - haystack_len.checked_add(1), - ), - needle_len => (0, Some(haystack_len / needle_len)), - }, - } - } -} - -/// An iterator over non-overlapping substring matches in reverse. -/// -/// Matches are reported by the byte offset at which they begin. -/// -/// `'h` is the lifetime of the haystack while `'n` is the lifetime of the -/// needle. -#[derive(Clone, Debug)] -pub struct FindRevIter<'h, 'n> { - haystack: &'h [u8], - finder: FinderRev<'n>, - /// When searching with an empty needle, this gets set to `None` after - /// we've yielded the last element at `0`. - pos: Option, -} - -impl<'h, 'n> FindRevIter<'h, 'n> { - #[inline(always)] - pub(crate) fn new( - haystack: &'h [u8], - finder: FinderRev<'n>, - ) -> FindRevIter<'h, 'n> { - let pos = Some(haystack.len()); - FindRevIter { haystack, finder, pos } - } - - /// Convert this iterator into its owned variant, such that it no longer - /// borrows the finder and needle. - /// - /// If this is already an owned iterator, then this is a no-op. Otherwise, - /// this copies the needle. - /// - /// This is only available when the `std` feature is enabled. - #[cfg(feature = "alloc")] - #[inline] - pub fn into_owned(self) -> FindRevIter<'h, 'static> { - FindRevIter { - haystack: self.haystack, - finder: self.finder.into_owned(), - pos: self.pos, - } - } -} - -impl<'h, 'n> Iterator for FindRevIter<'h, 'n> { - type Item = usize; - - fn next(&mut self) -> Option { - let pos = match self.pos { - None => return None, - Some(pos) => pos, - }; - let result = self.finder.rfind(&self.haystack[..pos]); - match result { - None => None, - Some(i) => { - if pos == i { - self.pos = pos.checked_sub(1); - } else { - self.pos = Some(i); - } - Some(i) - } - } - } -} - -/// A single substring searcher fixed to a particular needle. -/// -/// The purpose of this type is to permit callers to construct a substring -/// searcher that can be used to search haystacks without the overhead of -/// constructing the searcher in the first place. This is a somewhat niche -/// concern when it's necessary to re-use the same needle to search multiple -/// different haystacks with as little overhead as possible. In general, using -/// [`find`] is good enough, but `Finder` is useful when you can meaningfully -/// observe searcher construction time in a profile. -/// -/// When the `std` feature is enabled, then this type has an `into_owned` -/// version which permits building a `Finder` that is not connected to -/// the lifetime of its needle. -#[derive(Clone, Debug)] -pub struct Finder<'n> { - needle: CowBytes<'n>, - searcher: Searcher, -} - -impl<'n> Finder<'n> { - /// Create a new finder for the given needle. - #[inline] - pub fn new>(needle: &'n B) -> Finder<'n> { - FinderBuilder::new().build_forward(needle) - } - - /// Returns the index of the first occurrence of this needle in the given - /// haystack. - /// - /// # Complexity - /// - /// This routine is guaranteed to have worst case linear time complexity - /// with respect to both the needle and the haystack. That is, this runs - /// in `O(needle.len() + haystack.len())` time. - /// - /// This routine is also guaranteed to have worst case constant space - /// complexity. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use memchr::memmem::Finder; - /// - /// let haystack = b"foo bar baz"; - /// assert_eq!(Some(0), Finder::new("foo").find(haystack)); - /// assert_eq!(Some(4), Finder::new("bar").find(haystack)); - /// assert_eq!(None, Finder::new("quux").find(haystack)); - /// ``` - #[inline] - pub fn find(&self, haystack: &[u8]) -> Option { - let mut prestate = PrefilterState::new(); - let needle = self.needle.as_slice(); - self.searcher.find(&mut prestate, haystack, needle) - } - - /// Returns an iterator over all occurrences of a substring in a haystack. - /// - /// # Complexity - /// - /// This routine is guaranteed to have worst case linear time complexity - /// with respect to both the needle and the haystack. That is, this runs - /// in `O(needle.len() + haystack.len())` time. - /// - /// This routine is also guaranteed to have worst case constant space - /// complexity. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use memchr::memmem::Finder; - /// - /// let haystack = b"foo bar foo baz foo"; - /// let finder = Finder::new(b"foo"); - /// let mut it = finder.find_iter(haystack); - /// assert_eq!(Some(0), it.next()); - /// assert_eq!(Some(8), it.next()); - /// assert_eq!(Some(16), it.next()); - /// assert_eq!(None, it.next()); - /// ``` - #[inline] - pub fn find_iter<'a, 'h>( - &'a self, - haystack: &'h [u8], - ) -> FindIter<'h, 'a> { - FindIter::new(haystack, self.as_ref()) - } - - /// Convert this finder into its owned variant, such that it no longer - /// borrows the needle. - /// - /// If this is already an owned finder, then this is a no-op. Otherwise, - /// this copies the needle. - /// - /// This is only available when the `alloc` feature is enabled. - #[cfg(feature = "alloc")] - #[inline] - pub fn into_owned(self) -> Finder<'static> { - Finder { - needle: self.needle.into_owned(), - searcher: self.searcher.clone(), - } - } - - /// Convert this finder into its borrowed variant. - /// - /// This is primarily useful if your finder is owned and you'd like to - /// store its borrowed variant in some intermediate data structure. - /// - /// Note that the lifetime parameter of the returned finder is tied to the - /// lifetime of `self`, and may be shorter than the `'n` lifetime of the - /// needle itself. Namely, a finder's needle can be either borrowed or - /// owned, so the lifetime of the needle returned must necessarily be the - /// shorter of the two. - #[inline] - pub fn as_ref(&self) -> Finder<'_> { - Finder { - needle: CowBytes::new(self.needle()), - searcher: self.searcher.clone(), - } - } - - /// Returns the needle that this finder searches for. - /// - /// Note that the lifetime of the needle returned is tied to the lifetime - /// of the finder, and may be shorter than the `'n` lifetime. Namely, a - /// finder's needle can be either borrowed or owned, so the lifetime of the - /// needle returned must necessarily be the shorter of the two. - #[inline] - pub fn needle(&self) -> &[u8] { - self.needle.as_slice() - } -} - -/// A single substring reverse searcher fixed to a particular needle. -/// -/// The purpose of this type is to permit callers to construct a substring -/// searcher that can be used to search haystacks without the overhead of -/// constructing the searcher in the first place. This is a somewhat niche -/// concern when it's necessary to re-use the same needle to search multiple -/// different haystacks with as little overhead as possible. In general, -/// using [`rfind`] is good enough, but `FinderRev` is useful when you can -/// meaningfully observe searcher construction time in a profile. -/// -/// When the `std` feature is enabled, then this type has an `into_owned` -/// version which permits building a `FinderRev` that is not connected to -/// the lifetime of its needle. -#[derive(Clone, Debug)] -pub struct FinderRev<'n> { - needle: CowBytes<'n>, - searcher: SearcherRev, -} - -impl<'n> FinderRev<'n> { - /// Create a new reverse finder for the given needle. - #[inline] - pub fn new>(needle: &'n B) -> FinderRev<'n> { - FinderBuilder::new().build_reverse(needle) - } - - /// Returns the index of the last occurrence of this needle in the given - /// haystack. - /// - /// The haystack may be any type that can be cheaply converted into a - /// `&[u8]`. This includes, but is not limited to, `&str` and `&[u8]`. - /// - /// # Complexity - /// - /// This routine is guaranteed to have worst case linear time complexity - /// with respect to both the needle and the haystack. That is, this runs - /// in `O(needle.len() + haystack.len())` time. - /// - /// This routine is also guaranteed to have worst case constant space - /// complexity. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use memchr::memmem::FinderRev; - /// - /// let haystack = b"foo bar baz"; - /// assert_eq!(Some(0), FinderRev::new("foo").rfind(haystack)); - /// assert_eq!(Some(4), FinderRev::new("bar").rfind(haystack)); - /// assert_eq!(None, FinderRev::new("quux").rfind(haystack)); - /// ``` - pub fn rfind>(&self, haystack: B) -> Option { - self.searcher.rfind(haystack.as_ref(), self.needle.as_slice()) - } - - /// Returns a reverse iterator over all occurrences of a substring in a - /// haystack. - /// - /// # Complexity - /// - /// This routine is guaranteed to have worst case linear time complexity - /// with respect to both the needle and the haystack. That is, this runs - /// in `O(needle.len() + haystack.len())` time. - /// - /// This routine is also guaranteed to have worst case constant space - /// complexity. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use memchr::memmem::FinderRev; - /// - /// let haystack = b"foo bar foo baz foo"; - /// let finder = FinderRev::new(b"foo"); - /// let mut it = finder.rfind_iter(haystack); - /// assert_eq!(Some(16), it.next()); - /// assert_eq!(Some(8), it.next()); - /// assert_eq!(Some(0), it.next()); - /// assert_eq!(None, it.next()); - /// ``` - #[inline] - pub fn rfind_iter<'a, 'h>( - &'a self, - haystack: &'h [u8], - ) -> FindRevIter<'h, 'a> { - FindRevIter::new(haystack, self.as_ref()) - } - - /// Convert this finder into its owned variant, such that it no longer - /// borrows the needle. - /// - /// If this is already an owned finder, then this is a no-op. Otherwise, - /// this copies the needle. - /// - /// This is only available when the `std` feature is enabled. - #[cfg(feature = "alloc")] - #[inline] - pub fn into_owned(self) -> FinderRev<'static> { - FinderRev { - needle: self.needle.into_owned(), - searcher: self.searcher.clone(), - } - } - - /// Convert this finder into its borrowed variant. - /// - /// This is primarily useful if your finder is owned and you'd like to - /// store its borrowed variant in some intermediate data structure. - /// - /// Note that the lifetime parameter of the returned finder is tied to the - /// lifetime of `self`, and may be shorter than the `'n` lifetime of the - /// needle itself. Namely, a finder's needle can be either borrowed or - /// owned, so the lifetime of the needle returned must necessarily be the - /// shorter of the two. - #[inline] - pub fn as_ref(&self) -> FinderRev<'_> { - FinderRev { - needle: CowBytes::new(self.needle()), - searcher: self.searcher.clone(), - } - } - - /// Returns the needle that this finder searches for. - /// - /// Note that the lifetime of the needle returned is tied to the lifetime - /// of the finder, and may be shorter than the `'n` lifetime. Namely, a - /// finder's needle can be either borrowed or owned, so the lifetime of the - /// needle returned must necessarily be the shorter of the two. - #[inline] - pub fn needle(&self) -> &[u8] { - self.needle.as_slice() - } -} - -/// A builder for constructing non-default forward or reverse memmem finders. -/// -/// A builder is primarily useful for configuring a substring searcher. -/// Currently, the only configuration exposed is the ability to disable -/// heuristic prefilters used to speed up certain searches. -#[derive(Clone, Debug, Default)] -pub struct FinderBuilder { - prefilter: Prefilter, -} - -impl FinderBuilder { - /// Create a new finder builder with default settings. - pub fn new() -> FinderBuilder { - FinderBuilder::default() - } - - /// Build a forward finder using the given needle from the current - /// settings. - pub fn build_forward<'n, B: ?Sized + AsRef<[u8]>>( - &self, - needle: &'n B, - ) -> Finder<'n> { - self.build_forward_with_ranker(DefaultFrequencyRank, needle) - } - - /// Build a forward finder using the given needle and a custom heuristic for - /// determining the frequency of a given byte in the dataset. - /// See [`HeuristicFrequencyRank`] for more details. - pub fn build_forward_with_ranker< - 'n, - R: HeuristicFrequencyRank, - B: ?Sized + AsRef<[u8]>, - >( - &self, - ranker: R, - needle: &'n B, - ) -> Finder<'n> { - let needle = needle.as_ref(); - Finder { - needle: CowBytes::new(needle), - searcher: Searcher::new(self.prefilter, ranker, needle), - } - } - - /// Build a reverse finder using the given needle from the current - /// settings. - pub fn build_reverse<'n, B: ?Sized + AsRef<[u8]>>( - &self, - needle: &'n B, - ) -> FinderRev<'n> { - let needle = needle.as_ref(); - FinderRev { - needle: CowBytes::new(needle), - searcher: SearcherRev::new(needle), - } - } - - /// Configure the prefilter setting for the finder. - /// - /// See the documentation for [`Prefilter`] for more discussion on why - /// you might want to configure this. - pub fn prefilter(&mut self, prefilter: Prefilter) -> &mut FinderBuilder { - self.prefilter = prefilter; - self - } -} - -#[cfg(test)] -mod tests { - use super::*; - - define_substring_forward_quickcheck!(|h, n| Some(Finder::new(n).find(h))); - define_substring_reverse_quickcheck!(|h, n| Some( - FinderRev::new(n).rfind(h) - )); - - #[test] - fn forward() { - crate::tests::substring::Runner::new() - .fwd(|h, n| Some(Finder::new(n).find(h))) - .run(); - } - - #[test] - fn reverse() { - crate::tests::substring::Runner::new() - .rev(|h, n| Some(FinderRev::new(n).rfind(h))) - .run(); - } -} diff --git a/vendor/memchr/src/memmem/searcher.rs b/vendor/memchr/src/memmem/searcher.rs deleted file mode 100644 index 2a533e02fcf6d8..00000000000000 --- a/vendor/memchr/src/memmem/searcher.rs +++ /dev/null @@ -1,1030 +0,0 @@ -use crate::arch::all::{ - packedpair::{HeuristicFrequencyRank, Pair}, - rabinkarp, twoway, -}; - -#[cfg(target_arch = "aarch64")] -use crate::arch::aarch64::neon::packedpair as neon; -#[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] -use crate::arch::wasm32::simd128::packedpair as simd128; -#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] -use crate::arch::x86_64::{ - avx2::packedpair as avx2, sse2::packedpair as sse2, -}; - -/// A "meta" substring searcher. -/// -/// To a first approximation, this chooses what it believes to be the "best" -/// substring search implemnetation based on the needle at construction time. -/// Then, every call to `find` will execute that particular implementation. To -/// a second approximation, multiple substring search algorithms may be used, -/// depending on the haystack. For example, for supremely short haystacks, -/// Rabin-Karp is typically used. -/// -/// See the documentation on `Prefilter` for an explanation of the dispatching -/// mechanism. The quick summary is that an enum has too much overhead and -/// we can't use dynamic dispatch via traits because we need to work in a -/// core-only environment. (Dynamic dispatch works in core-only, but you -/// need `&dyn Trait` and we really need a `Box` here. The latter -/// requires `alloc`.) So instead, we use a union and an appropriately paired -/// free function to read from the correct field on the union and execute the -/// chosen substring search implementation. -#[derive(Clone)] -pub(crate) struct Searcher { - call: SearcherKindFn, - kind: SearcherKind, - rabinkarp: rabinkarp::Finder, -} - -impl Searcher { - /// Creates a new "meta" substring searcher that attempts to choose the - /// best algorithm based on the needle, heuristics and what the current - /// target supports. - #[inline] - pub(crate) fn new( - prefilter: PrefilterConfig, - ranker: R, - needle: &[u8], - ) -> Searcher { - let rabinkarp = rabinkarp::Finder::new(needle); - if needle.len() <= 1 { - return if needle.is_empty() { - trace!("building empty substring searcher"); - Searcher { - call: searcher_kind_empty, - kind: SearcherKind { empty: () }, - rabinkarp, - } - } else { - trace!("building one-byte substring searcher"); - debug_assert_eq!(1, needle.len()); - Searcher { - call: searcher_kind_one_byte, - kind: SearcherKind { one_byte: needle[0] }, - rabinkarp, - } - }; - } - let pair = match Pair::with_ranker(needle, &ranker) { - Some(pair) => pair, - None => return Searcher::twoway(needle, rabinkarp, None), - }; - debug_assert_ne!( - pair.index1(), - pair.index2(), - "pair offsets should not be equivalent" - ); - #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] - { - if let Some(pp) = avx2::Finder::with_pair(needle, pair) { - if do_packed_search(needle) { - trace!("building x86_64 AVX2 substring searcher"); - let kind = SearcherKind { avx2: pp }; - Searcher { call: searcher_kind_avx2, kind, rabinkarp } - } else if prefilter.is_none() { - Searcher::twoway(needle, rabinkarp, None) - } else { - let prestrat = Prefilter::avx2(pp, needle); - Searcher::twoway(needle, rabinkarp, Some(prestrat)) - } - } else if let Some(pp) = sse2::Finder::with_pair(needle, pair) { - if do_packed_search(needle) { - trace!("building x86_64 SSE2 substring searcher"); - let kind = SearcherKind { sse2: pp }; - Searcher { call: searcher_kind_sse2, kind, rabinkarp } - } else if prefilter.is_none() { - Searcher::twoway(needle, rabinkarp, None) - } else { - let prestrat = Prefilter::sse2(pp, needle); - Searcher::twoway(needle, rabinkarp, Some(prestrat)) - } - } else if prefilter.is_none() { - Searcher::twoway(needle, rabinkarp, None) - } else { - // We're pretty unlikely to get to this point, but it is - // possible to be running on x86_64 without SSE2. Namely, it's - // really up to the OS whether it wants to support vector - // registers or not. - let prestrat = Prefilter::fallback(ranker, pair, needle); - Searcher::twoway(needle, rabinkarp, prestrat) - } - } - #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] - { - if let Some(pp) = simd128::Finder::with_pair(needle, pair) { - if do_packed_search(needle) { - trace!("building wasm32 simd128 substring searcher"); - let kind = SearcherKind { simd128: pp }; - Searcher { call: searcher_kind_simd128, kind, rabinkarp } - } else if prefilter.is_none() { - Searcher::twoway(needle, rabinkarp, None) - } else { - let prestrat = Prefilter::simd128(pp, needle); - Searcher::twoway(needle, rabinkarp, Some(prestrat)) - } - } else if prefilter.is_none() { - Searcher::twoway(needle, rabinkarp, None) - } else { - let prestrat = Prefilter::fallback(ranker, pair, needle); - Searcher::twoway(needle, rabinkarp, prestrat) - } - } - #[cfg(target_arch = "aarch64")] - { - if let Some(pp) = neon::Finder::with_pair(needle, pair) { - if do_packed_search(needle) { - trace!("building aarch64 neon substring searcher"); - let kind = SearcherKind { neon: pp }; - Searcher { call: searcher_kind_neon, kind, rabinkarp } - } else if prefilter.is_none() { - Searcher::twoway(needle, rabinkarp, None) - } else { - let prestrat = Prefilter::neon(pp, needle); - Searcher::twoway(needle, rabinkarp, Some(prestrat)) - } - } else if prefilter.is_none() { - Searcher::twoway(needle, rabinkarp, None) - } else { - let prestrat = Prefilter::fallback(ranker, pair, needle); - Searcher::twoway(needle, rabinkarp, prestrat) - } - } - #[cfg(not(any( - all(target_arch = "x86_64", target_feature = "sse2"), - all(target_arch = "wasm32", target_feature = "simd128"), - target_arch = "aarch64" - )))] - { - if prefilter.is_none() { - Searcher::twoway(needle, rabinkarp, None) - } else { - let prestrat = Prefilter::fallback(ranker, pair, needle); - Searcher::twoway(needle, rabinkarp, prestrat) - } - } - } - - /// Creates a new searcher that always uses the Two-Way algorithm. This is - /// typically used when vector algorithms are unavailable or inappropriate. - /// (For example, when the needle is "too long.") - /// - /// If a prefilter is given, then the searcher returned will be accelerated - /// by the prefilter. - #[inline] - fn twoway( - needle: &[u8], - rabinkarp: rabinkarp::Finder, - prestrat: Option, - ) -> Searcher { - let finder = twoway::Finder::new(needle); - match prestrat { - None => { - trace!("building scalar two-way substring searcher"); - let kind = SearcherKind { two_way: finder }; - Searcher { call: searcher_kind_two_way, kind, rabinkarp } - } - Some(prestrat) => { - trace!( - "building scalar two-way \ - substring searcher with a prefilter" - ); - let two_way_with_prefilter = - TwoWayWithPrefilter { finder, prestrat }; - let kind = SearcherKind { two_way_with_prefilter }; - Searcher { - call: searcher_kind_two_way_with_prefilter, - kind, - rabinkarp, - } - } - } - } - - /// Searches the given haystack for the given needle. The needle given - /// should be the same as the needle that this finder was initialized - /// with. - /// - /// Inlining this can lead to big wins for latency, and #[inline] doesn't - /// seem to be enough in some cases. - #[inline(always)] - pub(crate) fn find( - &self, - prestate: &mut PrefilterState, - haystack: &[u8], - needle: &[u8], - ) -> Option { - if haystack.len() < needle.len() { - None - } else { - // SAFETY: By construction, we've ensured that the function - // in `self.call` is properly paired with the union used in - // `self.kind`. - unsafe { (self.call)(self, prestate, haystack, needle) } - } - } -} - -impl core::fmt::Debug for Searcher { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - f.debug_struct("Searcher") - .field("call", &"") - .field("kind", &"") - .field("rabinkarp", &self.rabinkarp) - .finish() - } -} - -/// A union indicating one of several possible substring search implementations -/// that are in active use. -/// -/// This union should only be read by one of the functions prefixed with -/// `searcher_kind_`. Namely, the correct function is meant to be paired with -/// the union by the caller, such that the function always reads from the -/// designated union field. -#[derive(Clone, Copy)] -union SearcherKind { - empty: (), - one_byte: u8, - two_way: twoway::Finder, - two_way_with_prefilter: TwoWayWithPrefilter, - #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] - sse2: crate::arch::x86_64::sse2::packedpair::Finder, - #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] - avx2: crate::arch::x86_64::avx2::packedpair::Finder, - #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] - simd128: crate::arch::wasm32::simd128::packedpair::Finder, - #[cfg(target_arch = "aarch64")] - neon: crate::arch::aarch64::neon::packedpair::Finder, -} - -/// A two-way substring searcher with a prefilter. -#[derive(Copy, Clone, Debug)] -struct TwoWayWithPrefilter { - finder: twoway::Finder, - prestrat: Prefilter, -} - -/// The type of a substring search function. -/// -/// # Safety -/// -/// When using a function of this type, callers must ensure that the correct -/// function is paired with the value populated in `SearcherKind` union. -type SearcherKindFn = unsafe fn( - searcher: &Searcher, - prestate: &mut PrefilterState, - haystack: &[u8], - needle: &[u8], -) -> Option; - -/// Reads from the `empty` field of `SearcherKind` to handle the case of -/// searching for the empty needle. Works on all platforms. -/// -/// # Safety -/// -/// Callers must ensure that the `searcher.kind.empty` union field is set. -unsafe fn searcher_kind_empty( - _searcher: &Searcher, - _prestate: &mut PrefilterState, - _haystack: &[u8], - _needle: &[u8], -) -> Option { - Some(0) -} - -/// Reads from the `one_byte` field of `SearcherKind` to handle the case of -/// searching for a single byte needle. Works on all platforms. -/// -/// # Safety -/// -/// Callers must ensure that the `searcher.kind.one_byte` union field is set. -unsafe fn searcher_kind_one_byte( - searcher: &Searcher, - _prestate: &mut PrefilterState, - haystack: &[u8], - _needle: &[u8], -) -> Option { - let needle = searcher.kind.one_byte; - crate::memchr(needle, haystack) -} - -/// Reads from the `two_way` field of `SearcherKind` to handle the case of -/// searching for an arbitrary needle without prefilter acceleration. Works on -/// all platforms. -/// -/// # Safety -/// -/// Callers must ensure that the `searcher.kind.two_way` union field is set. -unsafe fn searcher_kind_two_way( - searcher: &Searcher, - _prestate: &mut PrefilterState, - haystack: &[u8], - needle: &[u8], -) -> Option { - if rabinkarp::is_fast(haystack, needle) { - searcher.rabinkarp.find(haystack, needle) - } else { - searcher.kind.two_way.find(haystack, needle) - } -} - -/// Reads from the `two_way_with_prefilter` field of `SearcherKind` to handle -/// the case of searching for an arbitrary needle with prefilter acceleration. -/// Works on all platforms. -/// -/// # Safety -/// -/// Callers must ensure that the `searcher.kind.two_way_with_prefilter` union -/// field is set. -unsafe fn searcher_kind_two_way_with_prefilter( - searcher: &Searcher, - prestate: &mut PrefilterState, - haystack: &[u8], - needle: &[u8], -) -> Option { - if rabinkarp::is_fast(haystack, needle) { - searcher.rabinkarp.find(haystack, needle) - } else { - let TwoWayWithPrefilter { ref finder, ref prestrat } = - searcher.kind.two_way_with_prefilter; - let pre = Pre { prestate, prestrat }; - finder.find_with_prefilter(Some(pre), haystack, needle) - } -} - -/// Reads from the `sse2` field of `SearcherKind` to execute the x86_64 SSE2 -/// vectorized substring search implementation. -/// -/// # Safety -/// -/// Callers must ensure that the `searcher.kind.sse2` union field is set. -#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] -unsafe fn searcher_kind_sse2( - searcher: &Searcher, - _prestate: &mut PrefilterState, - haystack: &[u8], - needle: &[u8], -) -> Option { - let finder = &searcher.kind.sse2; - if haystack.len() < finder.min_haystack_len() { - searcher.rabinkarp.find(haystack, needle) - } else { - finder.find(haystack, needle) - } -} - -/// Reads from the `avx2` field of `SearcherKind` to execute the x86_64 AVX2 -/// vectorized substring search implementation. -/// -/// # Safety -/// -/// Callers must ensure that the `searcher.kind.avx2` union field is set. -#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] -unsafe fn searcher_kind_avx2( - searcher: &Searcher, - _prestate: &mut PrefilterState, - haystack: &[u8], - needle: &[u8], -) -> Option { - let finder = &searcher.kind.avx2; - if haystack.len() < finder.min_haystack_len() { - searcher.rabinkarp.find(haystack, needle) - } else { - finder.find(haystack, needle) - } -} - -/// Reads from the `simd128` field of `SearcherKind` to execute the wasm32 -/// simd128 vectorized substring search implementation. -/// -/// # Safety -/// -/// Callers must ensure that the `searcher.kind.simd128` union field is set. -#[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] -unsafe fn searcher_kind_simd128( - searcher: &Searcher, - _prestate: &mut PrefilterState, - haystack: &[u8], - needle: &[u8], -) -> Option { - let finder = &searcher.kind.simd128; - if haystack.len() < finder.min_haystack_len() { - searcher.rabinkarp.find(haystack, needle) - } else { - finder.find(haystack, needle) - } -} - -/// Reads from the `neon` field of `SearcherKind` to execute the aarch64 neon -/// vectorized substring search implementation. -/// -/// # Safety -/// -/// Callers must ensure that the `searcher.kind.neon` union field is set. -#[cfg(target_arch = "aarch64")] -unsafe fn searcher_kind_neon( - searcher: &Searcher, - _prestate: &mut PrefilterState, - haystack: &[u8], - needle: &[u8], -) -> Option { - let finder = &searcher.kind.neon; - if haystack.len() < finder.min_haystack_len() { - searcher.rabinkarp.find(haystack, needle) - } else { - finder.find(haystack, needle) - } -} - -/// A reverse substring searcher. -#[derive(Clone, Debug)] -pub(crate) struct SearcherRev { - kind: SearcherRevKind, - rabinkarp: rabinkarp::FinderRev, -} - -/// The kind of the reverse searcher. -/// -/// For the reverse case, we don't do any SIMD acceleration or prefilters. -/// There is no specific technical reason why we don't, but rather don't do it -/// because it's not clear it's worth the extra code to do so. If you have a -/// use case for it, please file an issue. -/// -/// We also don't do the union trick as we do with the forward case and -/// prefilters. Basically for the same reason we don't have prefilters or -/// vector algorithms for reverse searching: it's not clear it's worth doing. -/// Please file an issue if you have a compelling use case for fast reverse -/// substring search. -#[derive(Clone, Debug)] -enum SearcherRevKind { - Empty, - OneByte { needle: u8 }, - TwoWay { finder: twoway::FinderRev }, -} - -impl SearcherRev { - /// Creates a new searcher for finding occurrences of the given needle in - /// reverse. That is, it reports the last (instead of the first) occurrence - /// of a needle in a haystack. - #[inline] - pub(crate) fn new(needle: &[u8]) -> SearcherRev { - let kind = if needle.len() <= 1 { - if needle.is_empty() { - trace!("building empty reverse substring searcher"); - SearcherRevKind::Empty - } else { - trace!("building one-byte reverse substring searcher"); - debug_assert_eq!(1, needle.len()); - SearcherRevKind::OneByte { needle: needle[0] } - } - } else { - trace!("building scalar two-way reverse substring searcher"); - let finder = twoway::FinderRev::new(needle); - SearcherRevKind::TwoWay { finder } - }; - let rabinkarp = rabinkarp::FinderRev::new(needle); - SearcherRev { kind, rabinkarp } - } - - /// Searches the given haystack for the last occurrence of the given - /// needle. The needle given should be the same as the needle that this - /// finder was initialized with. - #[inline] - pub(crate) fn rfind( - &self, - haystack: &[u8], - needle: &[u8], - ) -> Option { - if haystack.len() < needle.len() { - return None; - } - match self.kind { - SearcherRevKind::Empty => Some(haystack.len()), - SearcherRevKind::OneByte { needle } => { - crate::memrchr(needle, haystack) - } - SearcherRevKind::TwoWay { ref finder } => { - if rabinkarp::is_fast(haystack, needle) { - self.rabinkarp.rfind(haystack, needle) - } else { - finder.rfind(haystack, needle) - } - } - } - } -} - -/// Prefilter controls whether heuristics are used to accelerate searching. -/// -/// A prefilter refers to the idea of detecting candidate matches very quickly, -/// and then confirming whether those candidates are full matches. This -/// idea can be quite effective since it's often the case that looking for -/// candidates can be a lot faster than running a complete substring search -/// over the entire input. Namely, looking for candidates can be done with -/// extremely fast vectorized code. -/// -/// The downside of a prefilter is that it assumes false positives (which are -/// candidates generated by a prefilter that aren't matches) are somewhat rare -/// relative to the frequency of full matches. That is, if a lot of false -/// positives are generated, then it's possible for search time to be worse -/// than if the prefilter wasn't enabled in the first place. -/// -/// Another downside of a prefilter is that it can result in highly variable -/// performance, where some cases are extraordinarily fast and others aren't. -/// Typically, variable performance isn't a problem, but it may be for your use -/// case. -/// -/// The use of prefilters in this implementation does use a heuristic to detect -/// when a prefilter might not be carrying its weight, and will dynamically -/// disable its use. Nevertheless, this configuration option gives callers -/// the ability to disable prefilters if you have knowledge that they won't be -/// useful. -#[derive(Clone, Copy, Debug)] -#[non_exhaustive] -pub enum PrefilterConfig { - /// Never used a prefilter in substring search. - None, - /// Automatically detect whether a heuristic prefilter should be used. If - /// it is used, then heuristics will be used to dynamically disable the - /// prefilter if it is believed to not be carrying its weight. - Auto, -} - -impl Default for PrefilterConfig { - fn default() -> PrefilterConfig { - PrefilterConfig::Auto - } -} - -impl PrefilterConfig { - /// Returns true when this prefilter is set to the `None` variant. - fn is_none(&self) -> bool { - matches!(*self, PrefilterConfig::None) - } -} - -/// The implementation of a prefilter. -/// -/// This type encapsulates dispatch to one of several possible choices for a -/// prefilter. Generally speaking, all prefilters have the same approximate -/// algorithm: they choose a couple of bytes from the needle that are believed -/// to be rare, use a fast vector algorithm to look for those bytes and return -/// positions as candidates for some substring search algorithm (currently only -/// Two-Way) to confirm as a match or not. -/// -/// The differences between the algorithms are actually at the vector -/// implementation level. Namely, we need different routines based on both -/// which target architecture we're on and what CPU features are supported. -/// -/// The straight-forwardly obvious approach here is to use an enum, and make -/// `Prefilter::find` do case analysis to determine which algorithm was -/// selected and invoke it. However, I've observed that this leads to poor -/// codegen in some cases, especially in latency sensitive benchmarks. That is, -/// this approach comes with overhead that I wasn't able to eliminate. -/// -/// The second obvious approach is to use dynamic dispatch with traits. Doing -/// that in this context where `Prefilter` owns the selection generally -/// requires heap allocation, and this code is designed to run in core-only -/// environments. -/// -/// So we settle on using a union (that's `PrefilterKind`) and a function -/// pointer (that's `PrefilterKindFn`). We select the right function pointer -/// based on which field in the union we set, and that function in turn -/// knows which field of the union to access. The downside of this approach -/// is that it forces us to think about safety, but the upside is that -/// there are some nice latency improvements to benchmarks. (Especially the -/// `memmem/sliceslice/short` benchmark.) -/// -/// In cases where we've selected a vector algorithm and the haystack given -/// is too short, we fallback to the scalar version of `memchr` on the -/// `rarest_byte`. (The scalar version of `memchr` is still better than a naive -/// byte-at-a-time loop because it will read in `usize`-sized chunks at a -/// time.) -#[derive(Clone, Copy)] -struct Prefilter { - call: PrefilterKindFn, - kind: PrefilterKind, - rarest_byte: u8, - rarest_offset: u8, -} - -impl Prefilter { - /// Return a "fallback" prefilter, but only if it is believed to be - /// effective. - #[inline] - fn fallback( - ranker: R, - pair: Pair, - needle: &[u8], - ) -> Option { - /// The maximum frequency rank permitted for the fallback prefilter. - /// If the rarest byte in the needle has a frequency rank above this - /// value, then no prefilter is used if the fallback prefilter would - /// otherwise be selected. - const MAX_FALLBACK_RANK: u8 = 250; - - trace!("building fallback prefilter"); - let rarest_offset = pair.index1(); - let rarest_byte = needle[usize::from(rarest_offset)]; - let rarest_rank = ranker.rank(rarest_byte); - if rarest_rank > MAX_FALLBACK_RANK { - None - } else { - let finder = crate::arch::all::packedpair::Finder::with_pair( - needle, - pair.clone(), - )?; - let call = prefilter_kind_fallback; - let kind = PrefilterKind { fallback: finder }; - Some(Prefilter { call, kind, rarest_byte, rarest_offset }) - } - } - - /// Return a prefilter using a x86_64 SSE2 vector algorithm. - #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] - #[inline] - fn sse2(finder: sse2::Finder, needle: &[u8]) -> Prefilter { - trace!("building x86_64 SSE2 prefilter"); - let rarest_offset = finder.pair().index1(); - let rarest_byte = needle[usize::from(rarest_offset)]; - Prefilter { - call: prefilter_kind_sse2, - kind: PrefilterKind { sse2: finder }, - rarest_byte, - rarest_offset, - } - } - - /// Return a prefilter using a x86_64 AVX2 vector algorithm. - #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] - #[inline] - fn avx2(finder: avx2::Finder, needle: &[u8]) -> Prefilter { - trace!("building x86_64 AVX2 prefilter"); - let rarest_offset = finder.pair().index1(); - let rarest_byte = needle[usize::from(rarest_offset)]; - Prefilter { - call: prefilter_kind_avx2, - kind: PrefilterKind { avx2: finder }, - rarest_byte, - rarest_offset, - } - } - - /// Return a prefilter using a wasm32 simd128 vector algorithm. - #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] - #[inline] - fn simd128(finder: simd128::Finder, needle: &[u8]) -> Prefilter { - trace!("building wasm32 simd128 prefilter"); - let rarest_offset = finder.pair().index1(); - let rarest_byte = needle[usize::from(rarest_offset)]; - Prefilter { - call: prefilter_kind_simd128, - kind: PrefilterKind { simd128: finder }, - rarest_byte, - rarest_offset, - } - } - - /// Return a prefilter using a aarch64 neon vector algorithm. - #[cfg(target_arch = "aarch64")] - #[inline] - fn neon(finder: neon::Finder, needle: &[u8]) -> Prefilter { - trace!("building aarch64 neon prefilter"); - let rarest_offset = finder.pair().index1(); - let rarest_byte = needle[usize::from(rarest_offset)]; - Prefilter { - call: prefilter_kind_neon, - kind: PrefilterKind { neon: finder }, - rarest_byte, - rarest_offset, - } - } - - /// Return a *candidate* position for a match. - /// - /// When this returns an offset, it implies that a match could begin at - /// that offset, but it may not. That is, it is possible for a false - /// positive to be returned. - /// - /// When `None` is returned, then it is guaranteed that there are no - /// matches for the needle in the given haystack. That is, it is impossible - /// for a false negative to be returned. - /// - /// The purpose of this routine is to look for candidate matching positions - /// as quickly as possible before running a (likely) slower confirmation - /// step. - #[inline] - fn find(&self, haystack: &[u8]) -> Option { - // SAFETY: By construction, we've ensured that the function in - // `self.call` is properly paired with the union used in `self.kind`. - unsafe { (self.call)(self, haystack) } - } - - /// A "simple" prefilter that just looks for the occurrence of the rarest - /// byte from the needle. This is generally only used for very small - /// haystacks. - #[inline] - fn find_simple(&self, haystack: &[u8]) -> Option { - // We don't use crate::memchr here because the haystack should be small - // enough that memchr won't be able to use vector routines anyway. So - // we just skip straight to the fallback implementation which is likely - // faster. (A byte-at-a-time loop is only used when the haystack is - // smaller than `size_of::()`.) - crate::arch::all::memchr::One::new(self.rarest_byte) - .find(haystack) - .map(|i| i.saturating_sub(usize::from(self.rarest_offset))) - } -} - -impl core::fmt::Debug for Prefilter { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - f.debug_struct("Prefilter") - .field("call", &"") - .field("kind", &"") - .field("rarest_byte", &self.rarest_byte) - .field("rarest_offset", &self.rarest_offset) - .finish() - } -} - -/// A union indicating one of several possible prefilters that are in active -/// use. -/// -/// This union should only be read by one of the functions prefixed with -/// `prefilter_kind_`. Namely, the correct function is meant to be paired with -/// the union by the caller, such that the function always reads from the -/// designated union field. -#[derive(Clone, Copy)] -union PrefilterKind { - fallback: crate::arch::all::packedpair::Finder, - #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] - sse2: crate::arch::x86_64::sse2::packedpair::Finder, - #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] - avx2: crate::arch::x86_64::avx2::packedpair::Finder, - #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] - simd128: crate::arch::wasm32::simd128::packedpair::Finder, - #[cfg(target_arch = "aarch64")] - neon: crate::arch::aarch64::neon::packedpair::Finder, -} - -/// The type of a prefilter function. -/// -/// # Safety -/// -/// When using a function of this type, callers must ensure that the correct -/// function is paired with the value populated in `PrefilterKind` union. -type PrefilterKindFn = - unsafe fn(strat: &Prefilter, haystack: &[u8]) -> Option; - -/// Reads from the `fallback` field of `PrefilterKind` to execute the fallback -/// prefilter. Works on all platforms. -/// -/// # Safety -/// -/// Callers must ensure that the `strat.kind.fallback` union field is set. -unsafe fn prefilter_kind_fallback( - strat: &Prefilter, - haystack: &[u8], -) -> Option { - strat.kind.fallback.find_prefilter(haystack) -} - -/// Reads from the `sse2` field of `PrefilterKind` to execute the x86_64 SSE2 -/// prefilter. -/// -/// # Safety -/// -/// Callers must ensure that the `strat.kind.sse2` union field is set. -#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] -unsafe fn prefilter_kind_sse2( - strat: &Prefilter, - haystack: &[u8], -) -> Option { - let finder = &strat.kind.sse2; - if haystack.len() < finder.min_haystack_len() { - strat.find_simple(haystack) - } else { - finder.find_prefilter(haystack) - } -} - -/// Reads from the `avx2` field of `PrefilterKind` to execute the x86_64 AVX2 -/// prefilter. -/// -/// # Safety -/// -/// Callers must ensure that the `strat.kind.avx2` union field is set. -#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] -unsafe fn prefilter_kind_avx2( - strat: &Prefilter, - haystack: &[u8], -) -> Option { - let finder = &strat.kind.avx2; - if haystack.len() < finder.min_haystack_len() { - strat.find_simple(haystack) - } else { - finder.find_prefilter(haystack) - } -} - -/// Reads from the `simd128` field of `PrefilterKind` to execute the wasm32 -/// simd128 prefilter. -/// -/// # Safety -/// -/// Callers must ensure that the `strat.kind.simd128` union field is set. -#[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] -unsafe fn prefilter_kind_simd128( - strat: &Prefilter, - haystack: &[u8], -) -> Option { - let finder = &strat.kind.simd128; - if haystack.len() < finder.min_haystack_len() { - strat.find_simple(haystack) - } else { - finder.find_prefilter(haystack) - } -} - -/// Reads from the `neon` field of `PrefilterKind` to execute the aarch64 neon -/// prefilter. -/// -/// # Safety -/// -/// Callers must ensure that the `strat.kind.neon` union field is set. -#[cfg(target_arch = "aarch64")] -unsafe fn prefilter_kind_neon( - strat: &Prefilter, - haystack: &[u8], -) -> Option { - let finder = &strat.kind.neon; - if haystack.len() < finder.min_haystack_len() { - strat.find_simple(haystack) - } else { - finder.find_prefilter(haystack) - } -} - -/// PrefilterState tracks state associated with the effectiveness of a -/// prefilter. It is used to track how many bytes, on average, are skipped by -/// the prefilter. If this average dips below a certain threshold over time, -/// then the state renders the prefilter inert and stops using it. -/// -/// A prefilter state should be created for each search. (Where creating an -/// iterator is treated as a single search.) A prefilter state should only be -/// created from a `Freqy`. e.g., An inert `Freqy` will produce an inert -/// `PrefilterState`. -#[derive(Clone, Copy, Debug)] -pub(crate) struct PrefilterState { - /// The number of skips that has been executed. This is always 1 greater - /// than the actual number of skips. The special sentinel value of 0 - /// indicates that the prefilter is inert. This is useful to avoid - /// additional checks to determine whether the prefilter is still - /// "effective." Once a prefilter becomes inert, it should no longer be - /// used (according to our heuristics). - skips: u32, - /// The total number of bytes that have been skipped. - skipped: u32, -} - -impl PrefilterState { - /// The minimum number of skip attempts to try before considering whether - /// a prefilter is effective or not. - const MIN_SKIPS: u32 = 50; - - /// The minimum amount of bytes that skipping must average. - /// - /// This value was chosen based on varying it and checking - /// the microbenchmarks. In particular, this can impact the - /// pathological/repeated-{huge,small} benchmarks quite a bit if it's set - /// too low. - const MIN_SKIP_BYTES: u32 = 8; - - /// Create a fresh prefilter state. - #[inline] - pub(crate) fn new() -> PrefilterState { - PrefilterState { skips: 1, skipped: 0 } - } - - /// Update this state with the number of bytes skipped on the last - /// invocation of the prefilter. - #[inline] - fn update(&mut self, skipped: usize) { - self.skips = self.skips.saturating_add(1); - // We need to do this dance since it's technically possible for - // `skipped` to overflow a `u32`. (And we use a `u32` to reduce the - // size of a prefilter state.) - self.skipped = match u32::try_from(skipped) { - Err(_) => core::u32::MAX, - Ok(skipped) => self.skipped.saturating_add(skipped), - }; - } - - /// Return true if and only if this state indicates that a prefilter is - /// still effective. - #[inline] - fn is_effective(&mut self) -> bool { - if self.is_inert() { - return false; - } - if self.skips() < PrefilterState::MIN_SKIPS { - return true; - } - if self.skipped >= PrefilterState::MIN_SKIP_BYTES * self.skips() { - return true; - } - - // We're inert. - self.skips = 0; - false - } - - /// Returns true if the prefilter this state represents should no longer - /// be used. - #[inline] - fn is_inert(&self) -> bool { - self.skips == 0 - } - - /// Returns the total number of times the prefilter has been used. - #[inline] - fn skips(&self) -> u32 { - // Remember, `0` is a sentinel value indicating inertness, so we - // always need to subtract `1` to get our actual number of skips. - self.skips.saturating_sub(1) - } -} - -/// A combination of prefilter effectiveness state and the prefilter itself. -#[derive(Debug)] -pub(crate) struct Pre<'a> { - /// State that tracks the effectiveness of a prefilter. - prestate: &'a mut PrefilterState, - /// The actual prefilter. - prestrat: &'a Prefilter, -} - -impl<'a> Pre<'a> { - /// Call this prefilter on the given haystack with the given needle. - #[inline] - pub(crate) fn find(&mut self, haystack: &[u8]) -> Option { - let result = self.prestrat.find(haystack); - self.prestate.update(result.unwrap_or(haystack.len())); - result - } - - /// Return true if and only if this prefilter should be used. - #[inline] - pub(crate) fn is_effective(&mut self) -> bool { - self.prestate.is_effective() - } -} - -/// Returns true if the needle has the right characteristics for a vector -/// algorithm to handle the entirety of substring search. -/// -/// Vector algorithms can be used for prefilters for other substring search -/// algorithms (like Two-Way), but they can also be used for substring search -/// on their own. When used for substring search, vector algorithms will -/// quickly identify candidate match positions (just like in the prefilter -/// case), but instead of returning the candidate position they will try to -/// confirm the match themselves. Confirmation happens via `memcmp`. This -/// works well for short needles, but can break down when many false candidate -/// positions are generated for large needles. Thus, we only permit vector -/// algorithms to own substring search when the needle is of a certain length. -#[inline] -fn do_packed_search(needle: &[u8]) -> bool { - /// The minimum length of a needle required for this algorithm. The minimum - /// is 2 since a length of 1 should just use memchr and a length of 0 isn't - /// a case handled by this searcher. - const MIN_LEN: usize = 2; - - /// The maximum length of a needle required for this algorithm. - /// - /// In reality, there is no hard max here. The code below can handle any - /// length needle. (Perhaps that suggests there are missing optimizations.) - /// Instead, this is a heuristic and a bound guaranteeing our linear time - /// complexity. - /// - /// It is a heuristic because when a candidate match is found, memcmp is - /// run. For very large needles with lots of false positives, memcmp can - /// make the code run quite slow. - /// - /// It is a bound because the worst case behavior with memcmp is - /// multiplicative in the size of the needle and haystack, and we want - /// to keep that additive. This bound ensures we still meet that bound - /// theoretically, since it's just a constant. We aren't acting in bad - /// faith here, memcmp on tiny needles is so fast that even in pathological - /// cases (see pathological vector benchmarks), this is still just as fast - /// or faster in practice. - /// - /// This specific number was chosen by tweaking a bit and running - /// benchmarks. The rare-medium-needle, for example, gets about 5% faster - /// by using this algorithm instead of a prefilter-accelerated Two-Way. - /// There's also a theoretical desire to keep this number reasonably - /// low, to mitigate the impact of pathological cases. I did try 64, and - /// some benchmarks got a little better, and others (particularly the - /// pathological ones), got a lot worse. So... 32 it is? - const MAX_LEN: usize = 32; - MIN_LEN <= needle.len() && needle.len() <= MAX_LEN -} diff --git a/vendor/memchr/src/tests/memchr/mod.rs b/vendor/memchr/src/tests/memchr/mod.rs deleted file mode 100644 index 0564ad4fbb8a19..00000000000000 --- a/vendor/memchr/src/tests/memchr/mod.rs +++ /dev/null @@ -1,307 +0,0 @@ -use alloc::{ - string::{String, ToString}, - vec, - vec::Vec, -}; - -use crate::ext::Byte; - -pub(crate) mod naive; -#[macro_use] -pub(crate) mod prop; - -const SEEDS: &'static [Seed] = &[ - Seed { haystack: "a", needles: &[b'a'], positions: &[0] }, - Seed { haystack: "aa", needles: &[b'a'], positions: &[0, 1] }, - Seed { haystack: "aaa", needles: &[b'a'], positions: &[0, 1, 2] }, - Seed { haystack: "", needles: &[b'a'], positions: &[] }, - Seed { haystack: "z", needles: &[b'a'], positions: &[] }, - Seed { haystack: "zz", needles: &[b'a'], positions: &[] }, - Seed { haystack: "zza", needles: &[b'a'], positions: &[2] }, - Seed { haystack: "zaza", needles: &[b'a'], positions: &[1, 3] }, - Seed { haystack: "zzza", needles: &[b'a'], positions: &[3] }, - Seed { haystack: "\x00a", needles: &[b'a'], positions: &[1] }, - Seed { haystack: "\x00", needles: &[b'\x00'], positions: &[0] }, - Seed { haystack: "\x00\x00", needles: &[b'\x00'], positions: &[0, 1] }, - Seed { haystack: "\x00a\x00", needles: &[b'\x00'], positions: &[0, 2] }, - Seed { haystack: "zzzzzzzzzzzzzzzza", needles: &[b'a'], positions: &[16] }, - Seed { - haystack: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzza", - needles: &[b'a'], - positions: &[32], - }, - // two needles (applied to memchr2 + memchr3) - Seed { haystack: "az", needles: &[b'a', b'z'], positions: &[0, 1] }, - Seed { haystack: "az", needles: &[b'a', b'z'], positions: &[0, 1] }, - Seed { haystack: "az", needles: &[b'x', b'y'], positions: &[] }, - Seed { haystack: "az", needles: &[b'a', b'y'], positions: &[0] }, - Seed { haystack: "az", needles: &[b'x', b'z'], positions: &[1] }, - Seed { haystack: "yyyyaz", needles: &[b'a', b'z'], positions: &[4, 5] }, - Seed { haystack: "yyyyaz", needles: &[b'z', b'a'], positions: &[4, 5] }, - // three needles (applied to memchr3) - Seed { - haystack: "xyz", - needles: &[b'x', b'y', b'z'], - positions: &[0, 1, 2], - }, - Seed { - haystack: "zxy", - needles: &[b'x', b'y', b'z'], - positions: &[0, 1, 2], - }, - Seed { haystack: "zxy", needles: &[b'x', b'a', b'z'], positions: &[0, 1] }, - Seed { haystack: "zxy", needles: &[b't', b'a', b'z'], positions: &[0] }, - Seed { haystack: "yxz", needles: &[b't', b'a', b'z'], positions: &[2] }, -]; - -/// Runs a host of substring search tests. -/// -/// This has support for "partial" substring search implementations only work -/// for a subset of needles/haystacks. For example, the "packed pair" substring -/// search implementation only works for haystacks of some minimum length based -/// of the pair of bytes selected and the size of the vector used. -pub(crate) struct Runner { - needle_len: usize, -} - -impl Runner { - /// Create a new test runner for forward and reverse byte search - /// implementations. - /// - /// The `needle_len` given must be at most `3` and at least `1`. It - /// corresponds to the number of needle bytes to search for. - pub(crate) fn new(needle_len: usize) -> Runner { - assert!(needle_len >= 1, "needle_len must be at least 1"); - assert!(needle_len <= 3, "needle_len must be at most 3"); - Runner { needle_len } - } - - /// Run all tests. This panics on the first failure. - /// - /// If the implementation being tested returns `None` for a particular - /// haystack/needle combination, then that test is skipped. - pub(crate) fn forward_iter(self, mut test: F) - where - F: FnMut(&[u8], &[u8]) -> Option> + 'static, - { - for seed in SEEDS.iter() { - if seed.needles.len() > self.needle_len { - continue; - } - for t in seed.generate() { - let results = match test(t.haystack.as_bytes(), &t.needles) { - None => continue, - Some(results) => results, - }; - assert_eq!( - t.expected, - results, - "needles: {:?}, haystack: {:?}", - t.needles - .iter() - .map(|&b| b.to_char()) - .collect::>(), - t.haystack, - ); - } - } - } - - /// Run all tests in the reverse direction. This panics on the first - /// failure. - /// - /// If the implementation being tested returns `None` for a particular - /// haystack/needle combination, then that test is skipped. - pub(crate) fn reverse_iter(self, mut test: F) - where - F: FnMut(&[u8], &[u8]) -> Option> + 'static, - { - for seed in SEEDS.iter() { - if seed.needles.len() > self.needle_len { - continue; - } - for t in seed.generate() { - let mut results = match test(t.haystack.as_bytes(), &t.needles) - { - None => continue, - Some(results) => results, - }; - results.reverse(); - assert_eq!( - t.expected, - results, - "needles: {:?}, haystack: {:?}", - t.needles - .iter() - .map(|&b| b.to_char()) - .collect::>(), - t.haystack, - ); - } - } - } - - /// Run all tests as counting tests. This panics on the first failure. - /// - /// That is, this only checks that the number of matches is correct and - /// not whether the offsets of each match are. - pub(crate) fn count_iter(self, mut test: F) - where - F: FnMut(&[u8], &[u8]) -> Option + 'static, - { - for seed in SEEDS.iter() { - if seed.needles.len() > self.needle_len { - continue; - } - for t in seed.generate() { - let got = match test(t.haystack.as_bytes(), &t.needles) { - None => continue, - Some(got) => got, - }; - assert_eq!( - t.expected.len(), - got, - "needles: {:?}, haystack: {:?}", - t.needles - .iter() - .map(|&b| b.to_char()) - .collect::>(), - t.haystack, - ); - } - } - } - - /// Like `Runner::forward`, but for a function that returns only the next - /// match and not all matches. - /// - /// If the function returns `None`, then it is skipped. - pub(crate) fn forward_oneshot(self, mut test: F) - where - F: FnMut(&[u8], &[u8]) -> Option> + 'static, - { - self.forward_iter(move |haystack, needles| { - let mut start = 0; - let mut results = vec![]; - while let Some(i) = test(&haystack[start..], needles)? { - results.push(start + i); - start += i + 1; - } - Some(results) - }) - } - - /// Like `Runner::reverse`, but for a function that returns only the last - /// match and not all matches. - /// - /// If the function returns `None`, then it is skipped. - pub(crate) fn reverse_oneshot(self, mut test: F) - where - F: FnMut(&[u8], &[u8]) -> Option> + 'static, - { - self.reverse_iter(move |haystack, needles| { - let mut end = haystack.len(); - let mut results = vec![]; - while let Some(i) = test(&haystack[..end], needles)? { - results.push(i); - end = i; - } - Some(results) - }) - } -} - -/// A single test for memr?chr{,2,3}. -#[derive(Clone, Debug)] -struct Test { - /// The string to search in. - haystack: String, - /// The needles to look for. - needles: Vec, - /// The offsets that are expected to be found for all needles in the - /// forward direction. - expected: Vec, -} - -impl Test { - fn new(seed: &Seed) -> Test { - Test { - haystack: seed.haystack.to_string(), - needles: seed.needles.to_vec(), - expected: seed.positions.to_vec(), - } - } -} - -/// Data that can be expanded into many memchr tests by padding out the corpus. -#[derive(Clone, Debug)] -struct Seed { - /// The thing to search. We use `&str` instead of `&[u8]` because they - /// are nicer to write in tests, and we don't miss much since memchr - /// doesn't care about UTF-8. - /// - /// Corpora cannot contain either '%' or '#'. We use these bytes when - /// expanding test cases into many test cases, and we assume they are not - /// used. If they are used, `memchr_tests` will panic. - haystack: &'static str, - /// The needles to search for. This is intended to be an alternation of - /// needles. The number of needles may cause this test to be skipped for - /// some memchr variants. For example, a test with 2 needles cannot be used - /// to test `memchr`, but can be used to test `memchr2` and `memchr3`. - /// However, a test with only 1 needle can be used to test all of `memchr`, - /// `memchr2` and `memchr3`. We achieve this by filling in the needles with - /// bytes that we never used in the corpus (such as '#'). - needles: &'static [u8], - /// The positions expected to match for all of the needles. - positions: &'static [usize], -} - -impl Seed { - /// Controls how much we expand the haystack on either side for each test. - /// We lower this on Miri because otherwise running the tests would take - /// forever. - const EXPAND_LEN: usize = { - #[cfg(not(miri))] - { - 515 - } - #[cfg(miri)] - { - 6 - } - }; - - /// Expand this test into many variations of the same test. - /// - /// In particular, this will generate more tests with larger corpus sizes. - /// The expected positions are updated to maintain the integrity of the - /// test. - /// - /// This is important in testing a memchr implementation, because there are - /// often different cases depending on the length of the corpus. - /// - /// Note that we extend the corpus by adding `%` bytes, which we - /// don't otherwise use as a needle. - fn generate(&self) -> impl Iterator { - let mut more = vec![]; - - // Add bytes to the start of the corpus. - for i in 0..Seed::EXPAND_LEN { - let mut t = Test::new(self); - let mut new: String = core::iter::repeat('%').take(i).collect(); - new.push_str(&t.haystack); - t.haystack = new; - t.expected = t.expected.into_iter().map(|p| p + i).collect(); - more.push(t); - } - // Add bytes to the end of the corpus. - for i in 1..Seed::EXPAND_LEN { - let mut t = Test::new(self); - let padding: String = core::iter::repeat('%').take(i).collect(); - t.haystack.push_str(&padding); - more.push(t); - } - - more.into_iter() - } -} diff --git a/vendor/memchr/src/tests/memchr/naive.rs b/vendor/memchr/src/tests/memchr/naive.rs deleted file mode 100644 index 6ebcdaea72a7fd..00000000000000 --- a/vendor/memchr/src/tests/memchr/naive.rs +++ /dev/null @@ -1,33 +0,0 @@ -pub(crate) fn memchr(n1: u8, haystack: &[u8]) -> Option { - haystack.iter().position(|&b| b == n1) -} - -pub(crate) fn memchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option { - haystack.iter().position(|&b| b == n1 || b == n2) -} - -pub(crate) fn memchr3( - n1: u8, - n2: u8, - n3: u8, - haystack: &[u8], -) -> Option { - haystack.iter().position(|&b| b == n1 || b == n2 || b == n3) -} - -pub(crate) fn memrchr(n1: u8, haystack: &[u8]) -> Option { - haystack.iter().rposition(|&b| b == n1) -} - -pub(crate) fn memrchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option { - haystack.iter().rposition(|&b| b == n1 || b == n2) -} - -pub(crate) fn memrchr3( - n1: u8, - n2: u8, - n3: u8, - haystack: &[u8], -) -> Option { - haystack.iter().rposition(|&b| b == n1 || b == n2 || b == n3) -} diff --git a/vendor/memchr/src/tests/memchr/prop.rs b/vendor/memchr/src/tests/memchr/prop.rs deleted file mode 100644 index 949ef1f15abc56..00000000000000 --- a/vendor/memchr/src/tests/memchr/prop.rs +++ /dev/null @@ -1,323 +0,0 @@ -/// Defines a host of quickcheck tests for the given memchr searcher. -#[cfg(miri)] -#[macro_export] -macro_rules! define_memchr_quickcheck { - ($($tt:tt)*) => {}; -} - -/// Defines a host of quickcheck tests for the given memchr searcher. -#[cfg(not(miri))] -#[macro_export] -macro_rules! define_memchr_quickcheck { - ($mod:ident) => { - define_memchr_quickcheck!($mod, new); - }; - ($mod:ident, $cons:ident) => { - use alloc::vec::Vec; - - use quickcheck::TestResult; - - use crate::tests::memchr::{ - naive, - prop::{double_ended_take, naive1_iter, naive2_iter, naive3_iter}, - }; - - quickcheck::quickcheck! { - fn qc_memchr_matches_naive(n1: u8, corpus: Vec) -> TestResult { - let expected = naive::memchr(n1, &corpus); - let got = match $mod::One::$cons(n1) { - None => return TestResult::discard(), - Some(f) => f.find(&corpus), - }; - TestResult::from_bool(expected == got) - } - - fn qc_memrchr_matches_naive(n1: u8, corpus: Vec) -> TestResult { - let expected = naive::memrchr(n1, &corpus); - let got = match $mod::One::$cons(n1) { - None => return TestResult::discard(), - Some(f) => f.rfind(&corpus), - }; - TestResult::from_bool(expected == got) - } - - fn qc_memchr2_matches_naive(n1: u8, n2: u8, corpus: Vec) -> TestResult { - let expected = naive::memchr2(n1, n2, &corpus); - let got = match $mod::Two::$cons(n1, n2) { - None => return TestResult::discard(), - Some(f) => f.find(&corpus), - }; - TestResult::from_bool(expected == got) - } - - fn qc_memrchr2_matches_naive(n1: u8, n2: u8, corpus: Vec) -> TestResult { - let expected = naive::memrchr2(n1, n2, &corpus); - let got = match $mod::Two::$cons(n1, n2) { - None => return TestResult::discard(), - Some(f) => f.rfind(&corpus), - }; - TestResult::from_bool(expected == got) - } - - fn qc_memchr3_matches_naive( - n1: u8, n2: u8, n3: u8, - corpus: Vec - ) -> TestResult { - let expected = naive::memchr3(n1, n2, n3, &corpus); - let got = match $mod::Three::$cons(n1, n2, n3) { - None => return TestResult::discard(), - Some(f) => f.find(&corpus), - }; - TestResult::from_bool(expected == got) - } - - fn qc_memrchr3_matches_naive( - n1: u8, n2: u8, n3: u8, - corpus: Vec - ) -> TestResult { - let expected = naive::memrchr3(n1, n2, n3, &corpus); - let got = match $mod::Three::$cons(n1, n2, n3) { - None => return TestResult::discard(), - Some(f) => f.rfind(&corpus), - }; - TestResult::from_bool(expected == got) - } - - fn qc_memchr_double_ended_iter( - needle: u8, data: Vec, take_side: Vec - ) -> TestResult { - // make nonempty - let mut take_side = take_side; - if take_side.is_empty() { take_side.push(true) }; - - let finder = match $mod::One::$cons(needle) { - None => return TestResult::discard(), - Some(finder) => finder, - }; - let iter = finder.iter(&data); - let got = double_ended_take( - iter, - take_side.iter().cycle().cloned(), - ); - let expected = naive1_iter(needle, &data); - - TestResult::from_bool(got.iter().cloned().eq(expected)) - } - - fn qc_memchr2_double_ended_iter( - needle1: u8, needle2: u8, data: Vec, take_side: Vec - ) -> TestResult { - // make nonempty - let mut take_side = take_side; - if take_side.is_empty() { take_side.push(true) }; - - let finder = match $mod::Two::$cons(needle1, needle2) { - None => return TestResult::discard(), - Some(finder) => finder, - }; - let iter = finder.iter(&data); - let got = double_ended_take( - iter, - take_side.iter().cycle().cloned(), - ); - let expected = naive2_iter(needle1, needle2, &data); - - TestResult::from_bool(got.iter().cloned().eq(expected)) - } - - fn qc_memchr3_double_ended_iter( - needle1: u8, needle2: u8, needle3: u8, - data: Vec, take_side: Vec - ) -> TestResult { - // make nonempty - let mut take_side = take_side; - if take_side.is_empty() { take_side.push(true) }; - - let finder = match $mod::Three::$cons(needle1, needle2, needle3) { - None => return TestResult::discard(), - Some(finder) => finder, - }; - let iter = finder.iter(&data); - let got = double_ended_take( - iter, - take_side.iter().cycle().cloned(), - ); - let expected = naive3_iter(needle1, needle2, needle3, &data); - - TestResult::from_bool(got.iter().cloned().eq(expected)) - } - - fn qc_memchr1_iter(data: Vec) -> TestResult { - let needle = 0; - let finder = match $mod::One::$cons(needle) { - None => return TestResult::discard(), - Some(finder) => finder, - }; - let got = finder.iter(&data); - let expected = naive1_iter(needle, &data); - TestResult::from_bool(got.eq(expected)) - } - - fn qc_memchr1_rev_iter(data: Vec) -> TestResult { - let needle = 0; - - let finder = match $mod::One::$cons(needle) { - None => return TestResult::discard(), - Some(finder) => finder, - }; - let got = finder.iter(&data).rev(); - let expected = naive1_iter(needle, &data).rev(); - TestResult::from_bool(got.eq(expected)) - } - - fn qc_memchr2_iter(data: Vec) -> TestResult { - let needle1 = 0; - let needle2 = 1; - - let finder = match $mod::Two::$cons(needle1, needle2) { - None => return TestResult::discard(), - Some(finder) => finder, - }; - let got = finder.iter(&data); - let expected = naive2_iter(needle1, needle2, &data); - TestResult::from_bool(got.eq(expected)) - } - - fn qc_memchr2_rev_iter(data: Vec) -> TestResult { - let needle1 = 0; - let needle2 = 1; - - let finder = match $mod::Two::$cons(needle1, needle2) { - None => return TestResult::discard(), - Some(finder) => finder, - }; - let got = finder.iter(&data).rev(); - let expected = naive2_iter(needle1, needle2, &data).rev(); - TestResult::from_bool(got.eq(expected)) - } - - fn qc_memchr3_iter(data: Vec) -> TestResult { - let needle1 = 0; - let needle2 = 1; - let needle3 = 2; - - let finder = match $mod::Three::$cons(needle1, needle2, needle3) { - None => return TestResult::discard(), - Some(finder) => finder, - }; - let got = finder.iter(&data); - let expected = naive3_iter(needle1, needle2, needle3, &data); - TestResult::from_bool(got.eq(expected)) - } - - fn qc_memchr3_rev_iter(data: Vec) -> TestResult { - let needle1 = 0; - let needle2 = 1; - let needle3 = 2; - - let finder = match $mod::Three::$cons(needle1, needle2, needle3) { - None => return TestResult::discard(), - Some(finder) => finder, - }; - let got = finder.iter(&data).rev(); - let expected = naive3_iter(needle1, needle2, needle3, &data).rev(); - TestResult::from_bool(got.eq(expected)) - } - - fn qc_memchr1_iter_size_hint(data: Vec) -> TestResult { - // test that the size hint is within reasonable bounds - let needle = 0; - let finder = match $mod::One::$cons(needle) { - None => return TestResult::discard(), - Some(finder) => finder, - }; - let mut iter = finder.iter(&data); - let mut real_count = data - .iter() - .filter(|&&elt| elt == needle) - .count(); - - while let Some(index) = iter.next() { - real_count -= 1; - let (lower, upper) = iter.size_hint(); - assert!(lower <= real_count); - assert!(upper.unwrap() >= real_count); - assert!(upper.unwrap() <= data.len() - index); - } - TestResult::passed() - } - } - }; -} - -// take items from a DEI, taking front for each true and back for each false. -// Return a vector with the concatenation of the fronts and the reverse of the -// backs. -#[cfg(not(miri))] -pub(crate) fn double_ended_take( - mut iter: I, - take_side: J, -) -> alloc::vec::Vec -where - I: DoubleEndedIterator, - J: Iterator, -{ - let mut found_front = alloc::vec![]; - let mut found_back = alloc::vec![]; - - for take_front in take_side { - if take_front { - if let Some(pos) = iter.next() { - found_front.push(pos); - } else { - break; - } - } else { - if let Some(pos) = iter.next_back() { - found_back.push(pos); - } else { - break; - } - }; - } - - let mut all_found = found_front; - all_found.extend(found_back.into_iter().rev()); - all_found -} - -// return an iterator of the 0-based indices of haystack that match the needle -#[cfg(not(miri))] -pub(crate) fn naive1_iter<'a>( - n1: u8, - haystack: &'a [u8], -) -> impl DoubleEndedIterator + 'a { - haystack.iter().enumerate().filter(move |&(_, &b)| b == n1).map(|t| t.0) -} - -#[cfg(not(miri))] -pub(crate) fn naive2_iter<'a>( - n1: u8, - n2: u8, - haystack: &'a [u8], -) -> impl DoubleEndedIterator + 'a { - haystack - .iter() - .enumerate() - .filter(move |&(_, &b)| b == n1 || b == n2) - .map(|t| t.0) -} - -#[cfg(not(miri))] -pub(crate) fn naive3_iter<'a>( - n1: u8, - n2: u8, - n3: u8, - haystack: &'a [u8], -) -> impl DoubleEndedIterator + 'a { - haystack - .iter() - .enumerate() - .filter(move |&(_, &b)| b == n1 || b == n2 || b == n3) - .map(|t| t.0) -} diff --git a/vendor/memchr/src/tests/mod.rs b/vendor/memchr/src/tests/mod.rs deleted file mode 100644 index 259b67827a1422..00000000000000 --- a/vendor/memchr/src/tests/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -#[macro_use] -pub(crate) mod memchr; -pub(crate) mod packedpair; -#[macro_use] -pub(crate) mod substring; - -// For debugging, particularly in CI, print out the byte order of the current -// target. -#[test] -fn byte_order() { - #[cfg(target_endian = "little")] - std::eprintln!("LITTLE ENDIAN"); - #[cfg(target_endian = "big")] - std::eprintln!("BIG ENDIAN"); -} diff --git a/vendor/memchr/src/tests/packedpair.rs b/vendor/memchr/src/tests/packedpair.rs deleted file mode 100644 index 204635b83ea9c1..00000000000000 --- a/vendor/memchr/src/tests/packedpair.rs +++ /dev/null @@ -1,216 +0,0 @@ -use alloc::{boxed::Box, vec, vec::Vec}; - -/// A set of "packed pair" test seeds. Each seed serves as the base for the -/// generation of many other tests. In essence, the seed captures the pair of -/// bytes we used for a predicate and first byte among our needle. The tests -/// generated from each seed essentially vary the length of the needle and -/// haystack, while using the rare/first byte configuration from the seed. -/// -/// The purpose of this is to test many different needle/haystack lengths. -/// In particular, some of the vector optimizations might only have bugs -/// in haystacks of a certain size. -const SEEDS: &[Seed] = &[ - // Why not use different 'first' bytes? It seemed like a good idea to be - // able to configure it, but when I wrote the test generator below, it - // didn't seem necessary to use for reasons that I forget. - Seed { first: b'x', index1: b'y', index2: b'z' }, - Seed { first: b'x', index1: b'x', index2: b'z' }, - Seed { first: b'x', index1: b'y', index2: b'x' }, - Seed { first: b'x', index1: b'x', index2: b'x' }, - Seed { first: b'x', index1: b'y', index2: b'y' }, -]; - -/// Runs a host of "packed pair" search tests. -/// -/// These tests specifically look for the occurrence of a possible substring -/// match based on a pair of bytes matching at the right offsets. -pub(crate) struct Runner { - fwd: Option< - Box< - dyn FnMut(&[u8], &[u8], u8, u8) -> Option> + 'static, - >, - >, -} - -impl Runner { - /// Create a new test runner for "packed pair" substring search. - pub(crate) fn new() -> Runner { - Runner { fwd: None } - } - - /// Run all tests. This panics on the first failure. - /// - /// If the implementation being tested returns `None` for a particular - /// haystack/needle combination, then that test is skipped. - /// - /// This runs tests on both the forward and reverse implementations given. - /// If either (or both) are missing, then tests for that implementation are - /// skipped. - pub(crate) fn run(self) { - if let Some(mut fwd) = self.fwd { - for seed in SEEDS.iter() { - for t in seed.generate() { - match fwd(&t.haystack, &t.needle, t.index1, t.index2) { - None => continue, - Some(result) => { - assert_eq!( - t.fwd, result, - "FORWARD, needle: {:?}, haystack: {:?}, \ - index1: {:?}, index2: {:?}", - t.needle, t.haystack, t.index1, t.index2, - ) - } - } - } - } - } - } - - /// Set the implementation for forward "packed pair" substring search. - /// - /// If the closure returns `None`, then it is assumed that the given - /// test cannot be applied to the particular implementation and it is - /// skipped. For example, if a particular implementation only supports - /// needles or haystacks for some minimum length. - /// - /// If this is not set, then forward "packed pair" search is not tested. - pub(crate) fn fwd( - mut self, - search: impl FnMut(&[u8], &[u8], u8, u8) -> Option> + 'static, - ) -> Runner { - self.fwd = Some(Box::new(search)); - self - } -} - -/// A test that represents the input and expected output to a "packed pair" -/// search function. The test should be able to run with any "packed pair" -/// implementation and get the expected output. -struct Test { - haystack: Vec, - needle: Vec, - index1: u8, - index2: u8, - fwd: Option, -} - -impl Test { - /// Create a new "packed pair" test from a seed and some given offsets to - /// the pair of bytes to use as a predicate in the seed's needle. - /// - /// If a valid test could not be constructed, then None is returned. - /// (Currently, we take the approach of massaging tests to be valid - /// instead of rejecting them outright.) - fn new( - seed: Seed, - index1: usize, - index2: usize, - haystack_len: usize, - needle_len: usize, - fwd: Option, - ) -> Option { - let mut index1: u8 = index1.try_into().unwrap(); - let mut index2: u8 = index2.try_into().unwrap(); - // The '#' byte is never used in a haystack (unless we're expecting - // a match), while the '@' byte is never used in a needle. - let mut haystack = vec![b'@'; haystack_len]; - let mut needle = vec![b'#'; needle_len]; - needle[0] = seed.first; - needle[index1 as usize] = seed.index1; - needle[index2 as usize] = seed.index2; - // If we're expecting a match, then make sure the needle occurs - // in the haystack at the expected position. - if let Some(i) = fwd { - haystack[i..i + needle.len()].copy_from_slice(&needle); - } - // If the operations above lead to rare offsets pointing to the - // non-first occurrence of a byte, then adjust it. This might lead - // to redundant tests, but it's simpler than trying to change the - // generation process I think. - if let Some(i) = crate::memchr(seed.index1, &needle) { - index1 = u8::try_from(i).unwrap(); - } - if let Some(i) = crate::memchr(seed.index2, &needle) { - index2 = u8::try_from(i).unwrap(); - } - Some(Test { haystack, needle, index1, index2, fwd }) - } -} - -/// Data that describes a single prefilter test seed. -#[derive(Clone, Copy)] -struct Seed { - first: u8, - index1: u8, - index2: u8, -} - -impl Seed { - const NEEDLE_LENGTH_LIMIT: usize = { - #[cfg(not(miri))] - { - 33 - } - #[cfg(miri)] - { - 5 - } - }; - - const HAYSTACK_LENGTH_LIMIT: usize = { - #[cfg(not(miri))] - { - 65 - } - #[cfg(miri)] - { - 8 - } - }; - - /// Generate a series of prefilter tests from this seed. - fn generate(self) -> impl Iterator { - let len_start = 2; - // The iterator below generates *a lot* of tests. The number of - // tests was chosen somewhat empirically to be "bearable" when - // running the test suite. - // - // We use an iterator here because the collective haystacks of all - // these test cases add up to enough memory to OOM a conservative - // sandbox or a small laptop. - (len_start..=Seed::NEEDLE_LENGTH_LIMIT).flat_map(move |needle_len| { - let index_start = len_start - 1; - (index_start..needle_len).flat_map(move |index1| { - (index1..needle_len).flat_map(move |index2| { - (needle_len..=Seed::HAYSTACK_LENGTH_LIMIT).flat_map( - move |haystack_len| { - Test::new( - self, - index1, - index2, - haystack_len, - needle_len, - None, - ) - .into_iter() - .chain( - (0..=(haystack_len - needle_len)).flat_map( - move |output| { - Test::new( - self, - index1, - index2, - haystack_len, - needle_len, - Some(output), - ) - }, - ), - ) - }, - ) - }) - }) - }) - } -} diff --git a/vendor/memchr/src/tests/substring/mod.rs b/vendor/memchr/src/tests/substring/mod.rs deleted file mode 100644 index dd10cbdd4b32c5..00000000000000 --- a/vendor/memchr/src/tests/substring/mod.rs +++ /dev/null @@ -1,232 +0,0 @@ -/*! -This module defines tests and test helpers for substring implementations. -*/ - -use alloc::{ - boxed::Box, - format, - string::{String, ToString}, -}; - -pub(crate) mod naive; -#[macro_use] -pub(crate) mod prop; - -const SEEDS: &'static [Seed] = &[ - Seed::new("", "", Some(0), Some(0)), - Seed::new("", "a", Some(0), Some(1)), - Seed::new("", "ab", Some(0), Some(2)), - Seed::new("", "abc", Some(0), Some(3)), - Seed::new("a", "", None, None), - Seed::new("a", "a", Some(0), Some(0)), - Seed::new("a", "aa", Some(0), Some(1)), - Seed::new("a", "ba", Some(1), Some(1)), - Seed::new("a", "bba", Some(2), Some(2)), - Seed::new("a", "bbba", Some(3), Some(3)), - Seed::new("a", "bbbab", Some(3), Some(3)), - Seed::new("a", "bbbabb", Some(3), Some(3)), - Seed::new("a", "bbbabbb", Some(3), Some(3)), - Seed::new("a", "bbbbbb", None, None), - Seed::new("ab", "", None, None), - Seed::new("ab", "a", None, None), - Seed::new("ab", "b", None, None), - Seed::new("ab", "ab", Some(0), Some(0)), - Seed::new("ab", "aab", Some(1), Some(1)), - Seed::new("ab", "aaab", Some(2), Some(2)), - Seed::new("ab", "abaab", Some(0), Some(3)), - Seed::new("ab", "baaab", Some(3), Some(3)), - Seed::new("ab", "acb", None, None), - Seed::new("ab", "abba", Some(0), Some(0)), - Seed::new("abc", "ab", None, None), - Seed::new("abc", "abc", Some(0), Some(0)), - Seed::new("abc", "abcz", Some(0), Some(0)), - Seed::new("abc", "abczz", Some(0), Some(0)), - Seed::new("abc", "zabc", Some(1), Some(1)), - Seed::new("abc", "zzabc", Some(2), Some(2)), - Seed::new("abc", "azbc", None, None), - Seed::new("abc", "abzc", None, None), - Seed::new("abczdef", "abczdefzzzzzzzzzzzzzzzzzzzz", Some(0), Some(0)), - Seed::new("abczdef", "zzzzzzzzzzzzzzzzzzzzabczdef", Some(20), Some(20)), - Seed::new( - "xyz", - "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaxyz", - Some(32), - Some(32), - ), - Seed::new("\u{0}\u{15}", "\u{0}\u{15}\u{15}\u{0}", Some(0), Some(0)), - Seed::new("\u{0}\u{1e}", "\u{1e}\u{0}", None, None), -]; - -/// Runs a host of substring search tests. -/// -/// This has support for "partial" substring search implementations only work -/// for a subset of needles/haystacks. For example, the "packed pair" substring -/// search implementation only works for haystacks of some minimum length based -/// of the pair of bytes selected and the size of the vector used. -pub(crate) struct Runner { - fwd: Option< - Box Option> + 'static>, - >, - rev: Option< - Box Option> + 'static>, - >, -} - -impl Runner { - /// Create a new test runner for forward and reverse substring search - /// implementations. - pub(crate) fn new() -> Runner { - Runner { fwd: None, rev: None } - } - - /// Run all tests. This panics on the first failure. - /// - /// If the implementation being tested returns `None` for a particular - /// haystack/needle combination, then that test is skipped. - /// - /// This runs tests on both the forward and reverse implementations given. - /// If either (or both) are missing, then tests for that implementation are - /// skipped. - pub(crate) fn run(self) { - if let Some(mut fwd) = self.fwd { - for seed in SEEDS.iter() { - for t in seed.generate() { - match fwd(t.haystack.as_bytes(), t.needle.as_bytes()) { - None => continue, - Some(result) => { - assert_eq!( - t.fwd, result, - "FORWARD, needle: {:?}, haystack: {:?}", - t.needle, t.haystack, - ); - } - } - } - } - } - if let Some(mut rev) = self.rev { - for seed in SEEDS.iter() { - for t in seed.generate() { - match rev(t.haystack.as_bytes(), t.needle.as_bytes()) { - None => continue, - Some(result) => { - assert_eq!( - t.rev, result, - "REVERSE, needle: {:?}, haystack: {:?}", - t.needle, t.haystack, - ); - } - } - } - } - } - } - - /// Set the implementation for forward substring search. - /// - /// If the closure returns `None`, then it is assumed that the given - /// test cannot be applied to the particular implementation and it is - /// skipped. For example, if a particular implementation only supports - /// needles or haystacks for some minimum length. - /// - /// If this is not set, then forward substring search is not tested. - pub(crate) fn fwd( - mut self, - search: impl FnMut(&[u8], &[u8]) -> Option> + 'static, - ) -> Runner { - self.fwd = Some(Box::new(search)); - self - } - - /// Set the implementation for reverse substring search. - /// - /// If the closure returns `None`, then it is assumed that the given - /// test cannot be applied to the particular implementation and it is - /// skipped. For example, if a particular implementation only supports - /// needles or haystacks for some minimum length. - /// - /// If this is not set, then reverse substring search is not tested. - pub(crate) fn rev( - mut self, - search: impl FnMut(&[u8], &[u8]) -> Option> + 'static, - ) -> Runner { - self.rev = Some(Box::new(search)); - self - } -} - -/// A single substring test for forward and reverse searches. -#[derive(Clone, Debug)] -struct Test { - needle: String, - haystack: String, - fwd: Option, - rev: Option, -} - -/// A single substring test for forward and reverse searches. -/// -/// Each seed is valid on its own, but it also serves as a starting point -/// to generate more tests. Namely, we pad out the haystacks with other -/// characters so that we get more complete coverage. This is especially useful -/// for testing vector algorithms that tend to have weird special cases for -/// alignment and loop unrolling. -/// -/// Padding works by assuming certain characters never otherwise appear in a -/// needle or a haystack. Neither should contain a `#` character. -#[derive(Clone, Copy, Debug)] -struct Seed { - needle: &'static str, - haystack: &'static str, - fwd: Option, - rev: Option, -} - -impl Seed { - const MAX_PAD: usize = 34; - - const fn new( - needle: &'static str, - haystack: &'static str, - fwd: Option, - rev: Option, - ) -> Seed { - Seed { needle, haystack, fwd, rev } - } - - fn generate(self) -> impl Iterator { - assert!(!self.needle.contains('#'), "needle must not contain '#'"); - assert!(!self.haystack.contains('#'), "haystack must not contain '#'"); - (0..=Seed::MAX_PAD) - // Generate tests for padding at the beginning of haystack. - .map(move |pad| { - let needle = self.needle.to_string(); - let prefix = "#".repeat(pad); - let haystack = format!("{}{}", prefix, self.haystack); - let fwd = if needle.is_empty() { - Some(0) - } else { - self.fwd.map(|i| pad + i) - }; - let rev = if needle.is_empty() { - Some(haystack.len()) - } else { - self.rev.map(|i| pad + i) - }; - Test { needle, haystack, fwd, rev } - }) - // Generate tests for padding at the end of haystack. - .chain((1..=Seed::MAX_PAD).map(move |pad| { - let needle = self.needle.to_string(); - let suffix = "#".repeat(pad); - let haystack = format!("{}{}", self.haystack, suffix); - let fwd = if needle.is_empty() { Some(0) } else { self.fwd }; - let rev = if needle.is_empty() { - Some(haystack.len()) - } else { - self.rev - }; - Test { needle, haystack, fwd, rev } - })) - } -} diff --git a/vendor/memchr/src/tests/substring/naive.rs b/vendor/memchr/src/tests/substring/naive.rs deleted file mode 100644 index 1bc6009849f12d..00000000000000 --- a/vendor/memchr/src/tests/substring/naive.rs +++ /dev/null @@ -1,45 +0,0 @@ -/*! -This module defines "naive" implementations of substring search. - -These are sometimes useful to compare with "real" substring implementations. -The idea is that they are so simple that they are unlikely to be incorrect. -*/ - -/// Naively search forwards for the given needle in the given haystack. -pub(crate) fn find(haystack: &[u8], needle: &[u8]) -> Option { - let end = haystack.len().checked_sub(needle.len()).map_or(0, |i| i + 1); - for i in 0..end { - if needle == &haystack[i..i + needle.len()] { - return Some(i); - } - } - None -} - -/// Naively search in reverse for the given needle in the given haystack. -pub(crate) fn rfind(haystack: &[u8], needle: &[u8]) -> Option { - let end = haystack.len().checked_sub(needle.len()).map_or(0, |i| i + 1); - for i in (0..end).rev() { - if needle == &haystack[i..i + needle.len()] { - return Some(i); - } - } - None -} - -#[cfg(test)] -mod tests { - use crate::tests::substring; - - use super::*; - - #[test] - fn forward() { - substring::Runner::new().fwd(|h, n| Some(find(h, n))).run() - } - - #[test] - fn reverse() { - substring::Runner::new().rev(|h, n| Some(rfind(h, n))).run() - } -} diff --git a/vendor/memchr/src/tests/substring/prop.rs b/vendor/memchr/src/tests/substring/prop.rs deleted file mode 100644 index a8352ec74c5acc..00000000000000 --- a/vendor/memchr/src/tests/substring/prop.rs +++ /dev/null @@ -1,126 +0,0 @@ -/*! -This module defines a few quickcheck properties for substring search. - -It also provides a forward and reverse macro for conveniently defining -quickcheck tests that run these properties over any substring search -implementation. -*/ - -use crate::tests::substring::naive; - -/// $fwd is a `impl FnMut(haystack, needle) -> Option>`. When the -/// routine returns `None`, then it's skipped, which is useful for substring -/// implementations that don't work for all inputs. -#[macro_export] -macro_rules! define_substring_forward_quickcheck { - ($fwd:expr) => { - #[cfg(not(miri))] - quickcheck::quickcheck! { - fn qc_fwd_prefix_is_substring(bs: alloc::vec::Vec) -> bool { - crate::tests::substring::prop::prefix_is_substring(&bs, $fwd) - } - - fn qc_fwd_suffix_is_substring(bs: alloc::vec::Vec) -> bool { - crate::tests::substring::prop::suffix_is_substring(&bs, $fwd) - } - - fn qc_fwd_matches_naive( - haystack: alloc::vec::Vec, - needle: alloc::vec::Vec - ) -> bool { - crate::tests::substring::prop::same_as_naive( - false, - &haystack, - &needle, - $fwd, - ) - } - } - }; -} - -/// $rev is a `impl FnMut(haystack, needle) -> Option>`. When the -/// routine returns `None`, then it's skipped, which is useful for substring -/// implementations that don't work for all inputs. -#[macro_export] -macro_rules! define_substring_reverse_quickcheck { - ($rev:expr) => { - #[cfg(not(miri))] - quickcheck::quickcheck! { - fn qc_rev_prefix_is_substring(bs: alloc::vec::Vec) -> bool { - crate::tests::substring::prop::prefix_is_substring(&bs, $rev) - } - - fn qc_rev_suffix_is_substring(bs: alloc::vec::Vec) -> bool { - crate::tests::substring::prop::suffix_is_substring(&bs, $rev) - } - - fn qc_rev_matches_naive( - haystack: alloc::vec::Vec, - needle: alloc::vec::Vec - ) -> bool { - crate::tests::substring::prop::same_as_naive( - true, - &haystack, - &needle, - $rev, - ) - } - } - }; -} - -/// Check that every prefix of the given byte string is a substring. -pub(crate) fn prefix_is_substring( - bs: &[u8], - mut search: impl FnMut(&[u8], &[u8]) -> Option>, -) -> bool { - for i in 0..bs.len().saturating_sub(1) { - let prefix = &bs[..i]; - let result = match search(bs, prefix) { - None => continue, - Some(result) => result, - }; - if !result.is_some() { - return false; - } - } - true -} - -/// Check that every suffix of the given byte string is a substring. -pub(crate) fn suffix_is_substring( - bs: &[u8], - mut search: impl FnMut(&[u8], &[u8]) -> Option>, -) -> bool { - for i in 0..bs.len().saturating_sub(1) { - let suffix = &bs[i..]; - let result = match search(bs, suffix) { - None => continue, - Some(result) => result, - }; - if !result.is_some() { - return false; - } - } - true -} - -/// Check that naive substring search matches the result of the given search -/// algorithm. -pub(crate) fn same_as_naive( - reverse: bool, - haystack: &[u8], - needle: &[u8], - mut search: impl FnMut(&[u8], &[u8]) -> Option>, -) -> bool { - let result = match search(haystack, needle) { - None => return true, - Some(result) => result, - }; - if reverse { - result == naive::rfind(haystack, needle) - } else { - result == naive::find(haystack, needle) - } -} diff --git a/vendor/memchr/src/vector.rs b/vendor/memchr/src/vector.rs deleted file mode 100644 index 69f2af01b46a44..00000000000000 --- a/vendor/memchr/src/vector.rs +++ /dev/null @@ -1,501 +0,0 @@ -/// A trait for describing vector operations used by vectorized searchers. -/// -/// The trait is highly constrained to low level vector operations needed. -/// In general, it was invented mostly to be generic over x86's __m128i and -/// __m256i types. At time of writing, it also supports wasm and aarch64 -/// 128-bit vector types as well. -/// -/// # Safety -/// -/// All methods are not safe since they are intended to be implemented using -/// vendor intrinsics, which are also not safe. Callers must ensure that the -/// appropriate target features are enabled in the calling function, and that -/// the current CPU supports them. All implementations should avoid marking the -/// routines with #[target_feature] and instead mark them as #[inline(always)] -/// to ensure they get appropriately inlined. (inline(always) cannot be used -/// with target_feature.) -pub(crate) trait Vector: Copy + core::fmt::Debug { - /// The number of bytes in the vector. That is, this is the size of the - /// vector in memory. - const BYTES: usize; - /// The bits that must be zero in order for a `*const u8` pointer to be - /// correctly aligned to read vector values. - const ALIGN: usize; - - /// The type of the value returned by `Vector::movemask`. - /// - /// This supports abstracting over the specific representation used in - /// order to accommodate different representations in different ISAs. - type Mask: MoveMask; - - /// Create a vector with 8-bit lanes with the given byte repeated into each - /// lane. - unsafe fn splat(byte: u8) -> Self; - - /// Read a vector-size number of bytes from the given pointer. The pointer - /// must be aligned to the size of the vector. - /// - /// # Safety - /// - /// Callers must guarantee that at least `BYTES` bytes are readable from - /// `data` and that `data` is aligned to a `BYTES` boundary. - unsafe fn load_aligned(data: *const u8) -> Self; - - /// Read a vector-size number of bytes from the given pointer. The pointer - /// does not need to be aligned. - /// - /// # Safety - /// - /// Callers must guarantee that at least `BYTES` bytes are readable from - /// `data`. - unsafe fn load_unaligned(data: *const u8) -> Self; - - /// _mm_movemask_epi8 or _mm256_movemask_epi8 - unsafe fn movemask(self) -> Self::Mask; - /// _mm_cmpeq_epi8 or _mm256_cmpeq_epi8 - unsafe fn cmpeq(self, vector2: Self) -> Self; - /// _mm_and_si128 or _mm256_and_si256 - unsafe fn and(self, vector2: Self) -> Self; - /// _mm_or or _mm256_or_si256 - unsafe fn or(self, vector2: Self) -> Self; - /// Returns true if and only if `Self::movemask` would return a mask that - /// contains at least one non-zero bit. - unsafe fn movemask_will_have_non_zero(self) -> bool { - self.movemask().has_non_zero() - } -} - -/// A trait that abstracts over a vector-to-scalar operation called -/// "move mask." -/// -/// On x86-64, this is `_mm_movemask_epi8` for SSE2 and `_mm256_movemask_epi8` -/// for AVX2. It takes a vector of `u8` lanes and returns a scalar where the -/// `i`th bit is set if and only if the most significant bit in the `i`th lane -/// of the vector is set. The simd128 ISA for wasm32 also supports this -/// exact same operation natively. -/// -/// ... But aarch64 doesn't. So we have to fake it with more instructions and -/// a slightly different representation. We could do extra work to unify the -/// representations, but then would require additional costs in the hot path -/// for `memchr` and `packedpair`. So instead, we abstraction over the specific -/// representation with this trait and define the operations we actually need. -pub(crate) trait MoveMask: Copy + core::fmt::Debug { - /// Return a mask that is all zeros except for the least significant `n` - /// lanes in a corresponding vector. - fn all_zeros_except_least_significant(n: usize) -> Self; - - /// Returns true if and only if this mask has a a non-zero bit anywhere. - fn has_non_zero(self) -> bool; - - /// Returns the number of bits set to 1 in this mask. - fn count_ones(self) -> usize; - - /// Does a bitwise `and` operation between `self` and `other`. - fn and(self, other: Self) -> Self; - - /// Does a bitwise `or` operation between `self` and `other`. - fn or(self, other: Self) -> Self; - - /// Returns a mask that is equivalent to `self` but with the least - /// significant 1-bit set to 0. - fn clear_least_significant_bit(self) -> Self; - - /// Returns the offset of the first non-zero lane this mask represents. - fn first_offset(self) -> usize; - - /// Returns the offset of the last non-zero lane this mask represents. - fn last_offset(self) -> usize; -} - -/// This is a "sensible" movemask implementation where each bit represents -/// whether the most significant bit is set in each corresponding lane of a -/// vector. This is used on x86-64 and wasm, but such a mask is more expensive -/// to get on aarch64 so we use something a little different. -/// -/// We call this "sensible" because this is what we get using native sse/avx -/// movemask instructions. But neon has no such native equivalent. -#[derive(Clone, Copy, Debug)] -pub(crate) struct SensibleMoveMask(u32); - -impl SensibleMoveMask { - /// Get the mask in a form suitable for computing offsets. - /// - /// Basically, this normalizes to little endian. On big endian, this swaps - /// the bytes. - #[inline(always)] - fn get_for_offset(self) -> u32 { - #[cfg(target_endian = "big")] - { - self.0.swap_bytes() - } - #[cfg(target_endian = "little")] - { - self.0 - } - } -} - -impl MoveMask for SensibleMoveMask { - #[inline(always)] - fn all_zeros_except_least_significant(n: usize) -> SensibleMoveMask { - debug_assert!(n < 32); - SensibleMoveMask(!((1 << n) - 1)) - } - - #[inline(always)] - fn has_non_zero(self) -> bool { - self.0 != 0 - } - - #[inline(always)] - fn count_ones(self) -> usize { - self.0.count_ones() as usize - } - - #[inline(always)] - fn and(self, other: SensibleMoveMask) -> SensibleMoveMask { - SensibleMoveMask(self.0 & other.0) - } - - #[inline(always)] - fn or(self, other: SensibleMoveMask) -> SensibleMoveMask { - SensibleMoveMask(self.0 | other.0) - } - - #[inline(always)] - fn clear_least_significant_bit(self) -> SensibleMoveMask { - SensibleMoveMask(self.0 & (self.0 - 1)) - } - - #[inline(always)] - fn first_offset(self) -> usize { - // We are dealing with little endian here (and if we aren't, we swap - // the bytes so we are in practice), where the most significant byte - // is at a higher address. That means the least significant bit that - // is set corresponds to the position of our first matching byte. - // That position corresponds to the number of zeros after the least - // significant bit. - self.get_for_offset().trailing_zeros() as usize - } - - #[inline(always)] - fn last_offset(self) -> usize { - // We are dealing with little endian here (and if we aren't, we swap - // the bytes so we are in practice), where the most significant byte is - // at a higher address. That means the most significant bit that is set - // corresponds to the position of our last matching byte. The position - // from the end of the mask is therefore the number of leading zeros - // in a 32 bit integer, and the position from the start of the mask is - // therefore 32 - (leading zeros) - 1. - 32 - self.get_for_offset().leading_zeros() as usize - 1 - } -} - -#[cfg(target_arch = "x86_64")] -mod x86sse2 { - use core::arch::x86_64::*; - - use super::{SensibleMoveMask, Vector}; - - impl Vector for __m128i { - const BYTES: usize = 16; - const ALIGN: usize = Self::BYTES - 1; - - type Mask = SensibleMoveMask; - - #[inline(always)] - unsafe fn splat(byte: u8) -> __m128i { - _mm_set1_epi8(byte as i8) - } - - #[inline(always)] - unsafe fn load_aligned(data: *const u8) -> __m128i { - _mm_load_si128(data as *const __m128i) - } - - #[inline(always)] - unsafe fn load_unaligned(data: *const u8) -> __m128i { - _mm_loadu_si128(data as *const __m128i) - } - - #[inline(always)] - unsafe fn movemask(self) -> SensibleMoveMask { - SensibleMoveMask(_mm_movemask_epi8(self) as u32) - } - - #[inline(always)] - unsafe fn cmpeq(self, vector2: Self) -> __m128i { - _mm_cmpeq_epi8(self, vector2) - } - - #[inline(always)] - unsafe fn and(self, vector2: Self) -> __m128i { - _mm_and_si128(self, vector2) - } - - #[inline(always)] - unsafe fn or(self, vector2: Self) -> __m128i { - _mm_or_si128(self, vector2) - } - } -} - -#[cfg(target_arch = "x86_64")] -mod x86avx2 { - use core::arch::x86_64::*; - - use super::{SensibleMoveMask, Vector}; - - impl Vector for __m256i { - const BYTES: usize = 32; - const ALIGN: usize = Self::BYTES - 1; - - type Mask = SensibleMoveMask; - - #[inline(always)] - unsafe fn splat(byte: u8) -> __m256i { - _mm256_set1_epi8(byte as i8) - } - - #[inline(always)] - unsafe fn load_aligned(data: *const u8) -> __m256i { - _mm256_load_si256(data as *const __m256i) - } - - #[inline(always)] - unsafe fn load_unaligned(data: *const u8) -> __m256i { - _mm256_loadu_si256(data as *const __m256i) - } - - #[inline(always)] - unsafe fn movemask(self) -> SensibleMoveMask { - SensibleMoveMask(_mm256_movemask_epi8(self) as u32) - } - - #[inline(always)] - unsafe fn cmpeq(self, vector2: Self) -> __m256i { - _mm256_cmpeq_epi8(self, vector2) - } - - #[inline(always)] - unsafe fn and(self, vector2: Self) -> __m256i { - _mm256_and_si256(self, vector2) - } - - #[inline(always)] - unsafe fn or(self, vector2: Self) -> __m256i { - _mm256_or_si256(self, vector2) - } - } -} - -#[cfg(target_arch = "aarch64")] -mod aarch64neon { - use core::arch::aarch64::*; - - use super::{MoveMask, Vector}; - - impl Vector for uint8x16_t { - const BYTES: usize = 16; - const ALIGN: usize = Self::BYTES - 1; - - type Mask = NeonMoveMask; - - #[inline(always)] - unsafe fn splat(byte: u8) -> uint8x16_t { - vdupq_n_u8(byte) - } - - #[inline(always)] - unsafe fn load_aligned(data: *const u8) -> uint8x16_t { - // I've tried `data.cast::().read()` instead, but - // couldn't observe any benchmark differences. - Self::load_unaligned(data) - } - - #[inline(always)] - unsafe fn load_unaligned(data: *const u8) -> uint8x16_t { - vld1q_u8(data) - } - - #[inline(always)] - unsafe fn movemask(self) -> NeonMoveMask { - let asu16s = vreinterpretq_u16_u8(self); - let mask = vshrn_n_u16(asu16s, 4); - let asu64 = vreinterpret_u64_u8(mask); - let scalar64 = vget_lane_u64(asu64, 0); - NeonMoveMask(scalar64 & 0x8888888888888888) - } - - #[inline(always)] - unsafe fn cmpeq(self, vector2: Self) -> uint8x16_t { - vceqq_u8(self, vector2) - } - - #[inline(always)] - unsafe fn and(self, vector2: Self) -> uint8x16_t { - vandq_u8(self, vector2) - } - - #[inline(always)] - unsafe fn or(self, vector2: Self) -> uint8x16_t { - vorrq_u8(self, vector2) - } - - /// This is the only interesting implementation of this routine. - /// Basically, instead of doing the "shift right narrow" dance, we use - /// adjacent folding max to determine whether there are any non-zero - /// bytes in our mask. If there are, *then* we'll do the "shift right - /// narrow" dance. In benchmarks, this does lead to slightly better - /// throughput, but the win doesn't appear huge. - #[inline(always)] - unsafe fn movemask_will_have_non_zero(self) -> bool { - let low = vreinterpretq_u64_u8(vpmaxq_u8(self, self)); - vgetq_lane_u64(low, 0) != 0 - } - } - - /// Neon doesn't have a `movemask` that works like the one in x86-64, so we - /// wind up using a different method[1]. The different method also produces - /// a mask, but 4 bits are set in the neon case instead of a single bit set - /// in the x86-64 case. We do an extra step to zero out 3 of the 4 bits, - /// but we still wind up with at least 3 zeroes between each set bit. This - /// generally means that we need to do some division by 4 before extracting - /// offsets. - /// - /// In fact, the existence of this type is the entire reason that we have - /// the `MoveMask` trait in the first place. This basically lets us keep - /// the different representations of masks without being forced to unify - /// them into a single representation, which could result in extra and - /// unnecessary work. - /// - /// [1]: https://community.arm.com/arm-community-blogs/b/infrastructure-solutions-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon - #[derive(Clone, Copy, Debug)] - pub(crate) struct NeonMoveMask(u64); - - impl NeonMoveMask { - /// Get the mask in a form suitable for computing offsets. - /// - /// The mask is always already in host-endianness, so this is a no-op. - #[inline(always)] - fn get_for_offset(self) -> u64 { - self.0 - } - } - - impl MoveMask for NeonMoveMask { - #[inline(always)] - fn all_zeros_except_least_significant(n: usize) -> NeonMoveMask { - debug_assert!(n < 16); - NeonMoveMask(!(((1 << n) << 2) - 1)) - } - - #[inline(always)] - fn has_non_zero(self) -> bool { - self.0 != 0 - } - - #[inline(always)] - fn count_ones(self) -> usize { - self.0.count_ones() as usize - } - - #[inline(always)] - fn and(self, other: NeonMoveMask) -> NeonMoveMask { - NeonMoveMask(self.0 & other.0) - } - - #[inline(always)] - fn or(self, other: NeonMoveMask) -> NeonMoveMask { - NeonMoveMask(self.0 | other.0) - } - - #[inline(always)] - fn clear_least_significant_bit(self) -> NeonMoveMask { - NeonMoveMask(self.0 & (self.0 - 1)) - } - - #[inline(always)] - fn first_offset(self) -> usize { - // We are dealing with little endian here (and if we aren't, - // we swap the bytes so we are in practice), where the most - // significant byte is at a higher address. That means the least - // significant bit that is set corresponds to the position of our - // first matching byte. That position corresponds to the number of - // zeros after the least significant bit. - // - // Note that unlike `SensibleMoveMask`, this mask has its bits - // spread out over 64 bits instead of 16 bits (for a 128 bit - // vector). Namely, where as x86-64 will turn - // - // 0x00 0xFF 0x00 0x00 0xFF - // - // into 10010, our neon approach will turn it into - // - // 10000000000010000000 - // - // And this happens because neon doesn't have a native `movemask` - // instruction, so we kind of fake it[1]. Thus, we divide the - // number of trailing zeros by 4 to get the "real" offset. - // - // [1]: https://community.arm.com/arm-community-blogs/b/infrastructure-solutions-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon - (self.get_for_offset().trailing_zeros() >> 2) as usize - } - - #[inline(always)] - fn last_offset(self) -> usize { - // See comment in `first_offset` above. This is basically the same, - // but coming from the other direction. - 16 - (self.get_for_offset().leading_zeros() >> 2) as usize - 1 - } - } -} - -#[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] -mod wasm_simd128 { - use core::arch::wasm32::*; - - use super::{SensibleMoveMask, Vector}; - - impl Vector for v128 { - const BYTES: usize = 16; - const ALIGN: usize = Self::BYTES - 1; - - type Mask = SensibleMoveMask; - - #[inline(always)] - unsafe fn splat(byte: u8) -> v128 { - u8x16_splat(byte) - } - - #[inline(always)] - unsafe fn load_aligned(data: *const u8) -> v128 { - *data.cast() - } - - #[inline(always)] - unsafe fn load_unaligned(data: *const u8) -> v128 { - v128_load(data.cast()) - } - - #[inline(always)] - unsafe fn movemask(self) -> SensibleMoveMask { - SensibleMoveMask(u8x16_bitmask(self).into()) - } - - #[inline(always)] - unsafe fn cmpeq(self, vector2: Self) -> v128 { - u8x16_eq(self, vector2) - } - - #[inline(always)] - unsafe fn and(self, vector2: Self) -> v128 { - v128_and(self, vector2) - } - - #[inline(always)] - unsafe fn or(self, vector2: Self) -> v128 { - v128_or(self, vector2) - } - } -} diff --git a/vendor/minimal-lexical/.cargo-checksum.json b/vendor/minimal-lexical/.cargo-checksum.json deleted file mode 100644 index 20c50c75217ea4..00000000000000 --- a/vendor/minimal-lexical/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"e819b814dde0c854395882e9f65856c3196961ceced92eeab6ade4d350e7cccc",".github/ISSUE_TEMPLATE/bug_report.md":"cce60fa26d7e6afb7aa84755d6bc6431afe1f390823033545ac3ac9d94740b19",".github/ISSUE_TEMPLATE/custom.md":"b52f73fd67ebd71d43f36d5d1a2f3a53d1f32e126f70ccf0126900ff9f2aec3c",".github/ISSUE_TEMPLATE/documentation.md":"986b9a1421dc15af628bdff8691eeb39d92e36bedb7742d2a4d8327f6cb921a3",".github/ISSUE_TEMPLATE/feature_request.md":"e7861c6047eb39fb4dead4198c141817215839fddb43d16cb6e679417428a73e",".github/ISSUE_TEMPLATE/question.md":"75d3de186382ff882e26e1aba65b4b207cbd3822b9491cd92886fa7987a6ba23",".github/PULL_REQUEST_TEMPLATE/bug_fix.md":"8d7bfb13212e583b9cb717ec39ac2d2070d85470bdf81a32f89e91796a14efcc",".github/PULL_REQUEST_TEMPLATE/custom.md":"88e332c54fe5a52842abdc33e129fa12b0b39c1aaa78da16bc3e1ccce0f3e643",".github/PULL_REQUEST_TEMPLATE/documentation.md":"ac8bae6001c6822dc6d2334c085018c38a8f121f0c580b33b770357170a59c76",".github/workflows/Cross.yml":"51cd10949a21f4aa734a45c06021b53f81cebddcde6723e69caf39d6b7a53cc7",".github/workflows/Features.yml":"4b7182995976d3872853555e989d49be03cfacf92a6807317c01623a1de59742",".github/workflows/OSX.yml":"1ffe8ad7703afb4bc67caf52550b53095861f7290e9b1cbd9f7f7e62de82b3b4",".github/workflows/Simple.yml":"6c681d49afdf74a85757fca4d6bfce076daebbb8816409f42345c2782ba5408d",".github/workflows/Valgrind.yml":"5beae6618e643ef75a6cdc6622bb64a586f3bc956401551920716564d4f3c1fe",".gitmodules":"6976207a02c7160a3a1d076c5fae10fe4b78f58cdc0aa66ae47f3855b3c392fb","CHANGELOG":"55ebcf7ee0fd10987829a98fb9757cbc6f68c62198bc70122384bedc08de9915","CODE_OF_CONDUCT.md":"0bd67c62d204ec67cb29969aaf5aac337a77c84b318937bc2d9dc7e3fcbcdcaa","Cargo.toml":"6c485fa605a0d3de6ec0af125b67c55224515354034f990334b1a1a86988c632","Cargo.toml.orig":"8f0cdacc663b3e6a07f803c2c436d18254a9ad55057782ff350aaf4bfb2121e2","LICENSE-APACHE":"8173d5c29b4f956d532781d2b86e4e30f83e6b7878dce18c919451d6ba707c90","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","LICENSE.md":"dbe1fff0fb1314b6af94f161511406275cf01c5a32441fbf24528a57a051d599","README.md":"00d521d93124e88edf58d42b3114786c0daaf437e73118eed27b3c716b7514ad","clippy.toml":"8d3aafbcf358ccf45cc148cd705f5fe71e777dabc391ed9a2bae32af73291fe8","rustfmt.toml":"ae46c79a84842907e151ca5d07e36b8d1932b61c6989465500c0f706507f42cc","src/bellerophon.rs":"bbabeccfa7a70ec314a894d968b424d1162df1b9e5e659baa1fd3a37d985fe75","src/bigint.rs":"93d3332d01bb7745bc569bc6b242c6c71c75eb78835b6a05bad91952b989daf0","src/extended_float.rs":"6040bdd49c03f11f8607b2da1703e1b7b5f57ddac9b02322c6958f9c1684496f","src/fpu.rs":"72d63bac2bbfc545128aa59dc35cfad3c553bac64bf575880775b3c4ace415bc","src/heapvec.rs":"836a49d40e5da90d195508e963d869afd78aaf9adb9cd60a0cb8f92b4e105c4f","src/lemire.rs":"100f3cb293deed0b0d1e7ca6b23152ba160f92f887f8924620b28c9ab77326db","src/lib.rs":"626dfc61992c42d4996dddcbfff5775ff2ffae44d116d2d70f6564a3209c0a9a","src/libm.rs":"ed5a3856eaa44a8a05aa123e27c2048b92ff42e4af1ef3f9fa1aff2a50190f4a","src/mask.rs":"63bcda92d14169a55ac54798f45365cef64a1aecd9625c3c3bc3deae202b2a07","src/num.rs":"dbcab14a5fe8e40e381829426dd75a7db672882592b5c4a08897a2fb6d2ae7ea","src/number.rs":"49d0880a99816ecf904fb88f607a821d6770ec270825b96e800a297ab1a01d78","src/parse.rs":"19559db67eddd17d331274cf87d6c4beeb0724dcdf859de9b8ab5995c4b8e682","src/rounding.rs":"8ba42d31618db1e6a381f8b60ffe1f9d216aaccb931b8fac5f279d8465e35cb7","src/slow.rs":"f096e7f83e8372e71568ec1724bc1c9d2c67ca39b80290ba062e60ae94b1f8d1","src/stackvec.rs":"0c921eb3adbd42cbe7be0f363e08ac85b6d5f1dabd4a7b077becddeff731da16","src/table.rs":"e4288891e9b1d8ba60dcc73edc639754cc2351d3219df8c625e694f3f0e58c5a","src/table_bellerophon.rs":"a2102292b27223a81e60a8a6607c42587efde3424526156921167742a0d5937c","src/table_lemire.rs":"c101c353c38b594f5b8987263b759927095b5dcd72e65607cc1c6a7de0bfd0c8","src/table_small.rs":"61b00e13eb3945622bf8bed374c88e29c63dfedb7384b31b35dd4e543cbe62c1","tests/bellerophon.rs":"b17b87b8963ebcd71f684e4d48c1ce619964e4fb719a5875b0ce4514ed528674","tests/bellerophon_tests.rs":"76b71efa2f4cec56a79535e2d292788a5e1b443c901ec7a234800782f36ddb68","tests/integration_tests.rs":"ed1a1fc46fc239eb4ea718057ad6e9869f633797ef72fc6f05b1757ab80e1641","tests/lemire_tests.rs":"6213bcd9b44def655b44a6b760cee0c0ad82d3bb494f48c2ff100698da34625b","tests/libm_tests.rs":"6941e74d7d0adf021edc93b9919275e82810687ff33147a10361613073b22669","tests/mask_tests.rs":"8c2a3daf434815389b6bf88837e3f382d74d97250104b925d70779366bd3d537","tests/number_tests.rs":"df4b4f0c65478f2f6193bd918fa4aad7163e40598c58df44547c4559e4a8b0c7","tests/parse_tests.rs":"bc0066b9257368f0365276fcffa2662c4699a033eaf9a4a7d6faa0e9b915094a","tests/rounding_tests.rs":"99f38b768ad15e726559c446825f9f1bad67935cdd28ffcc1cbcd3e031a901ea","tests/slow_tests.rs":"36c4c2538d2f5a1c1af5deb26ec4eba47f19f9a3c280a13d10704267a16d3b3f","tests/stackvec.rs":"f040611995bcd1bd2cb47694e74aa02ff4fabdffe007f712c9bb788d82dfb8a7","tests/vec_tests.rs":"09b561160df3b1385876db452bb5a67ef2c9fd2cc36b5687e1dfaf8c58947782"},"package":"68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"} \ No newline at end of file diff --git a/vendor/minimal-lexical/.cargo_vcs_info.json b/vendor/minimal-lexical/.cargo_vcs_info.json deleted file mode 100644 index 33bf0d300b17dc..00000000000000 --- a/vendor/minimal-lexical/.cargo_vcs_info.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "git": { - "sha1": "e997c46656ebe83e696b866bd954da1fa3f64eef" - } -} diff --git a/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/bug_report.md b/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 1839a7fa6bdfef..00000000000000 --- a/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve. -title: "[BUG]" -labels: bug -assignees: Alexhuszagh - ---- - -## Description - -Please include a clear and concise description of the bug. If the bug includes a security vulnerability, you may also privately report the issue to the [maintainer](mailto:ahuszagh@gmail.com). - -## Prerequisites - -Here are a few things you should provide to help me understand the issue: - -- Rust version: `rustc -V` -- minimal-lexical version: - -## Test case - -Please provide a short, complete (with crate import, etc) test case for -the issue, showing clearly the expected and obtained results. - -Example test case: - -``` -#[macro_use] -extern crate minimal_Lexical; - -fn main() { - let integer = b"1"; - let fraction = b"2345"; - let float: f64 = minimal_lexical::parse_float(integer.iter(), fraction.iter(), 0); - assert_eq!(value, 1.2345); -} -``` - -## Additional Context -Add any other context about the problem here. diff --git a/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/custom.md b/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/custom.md deleted file mode 100644 index e12e02934a854b..00000000000000 --- a/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/custom.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -name: Custom issue template -about: Issue template for miscellaneous issues. -title: "[OTHER]" -labels: '' -assignees: Alexhuszagh - ---- - -## Prerequisites - -If applicable to the issue, here are a few things you should provide to help me understand the issue: - -- Rust version: `rustc -V` -- minimal-lexical version: - -## Description -Please include a clear and concise description of the issue. - -## Additional Context -Add any other context or screenshots about the issue here. diff --git a/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/documentation.md b/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/documentation.md deleted file mode 100644 index 2d6b3e4ba1e5ba..00000000000000 --- a/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/documentation.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -name: Documentation -about: Update the project's documentation. -title: "[DOC]" -labels: documentation -assignees: Alexhuszagh - ---- - -## Description -Please include a clear and concise description of the issue. - -Ex: Documentation for `parse_float` contains a typo. - -## Additional Context -Add any other context or screenshots about the issue here. diff --git a/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/feature_request.md b/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index ef7e7fe14fdef7..00000000000000 --- a/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project. -title: "[FEATURE]" -labels: enhancement -assignees: Alexhuszagh - ---- - -## Problem -A clear and concise description of what the problem is. Ex. minimal-lexical does not parse standard-conforming JSON numbers. - -## Solution -A clear and concise description of what you want to happen. - -## Prerequisites - -If applicable to the feature request, here are a few things you should provide to help me understand the issue: - -- Rust version: `rustc -V` -- minimal-lexical version: - -## Alternatives -A clear and concise description of any alternative solutions or features you've considered. - -## Additional Context -Add any other context or screenshots about the feature request here. diff --git a/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/question.md b/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/question.md deleted file mode 100644 index 56ffb70dc2faac..00000000000000 --- a/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/question.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -name: Question -about: Have a question how to use minimal-lexical? -title: "[QUESTION]" -labels: question -assignees: Alexhuszagh - ---- - -## Question -A clear and concise description of what the question is. Ex. how do I use minimal-lexical without a system allocator? diff --git a/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/bug_fix.md b/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/bug_fix.md deleted file mode 100644 index c4b2874c3a2ab3..00000000000000 --- a/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/bug_fix.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -name: Bug fix -about: Fix a bug in minimal-lexical. -title: "[BUG]" -labels: bug -assignees: Alexhuszagh - ---- - -**NOTE:** -- If you have made non-trivial changes to the code, please make sure to run unittests prior to committing. -- If you have made any changes to parsing algorithms, please run at least `test-parse-golang` or `test-parse-unittests` with `feature = -comprehensive_float_test"` enabled prior to committing, to ensure there are no regressions. -- Please run `cargo fmt` on nightly prior to committing. - -## Optional Debugging Information - -If applicable to the issue, here are a few things you should provide to help me understand the issue: - -- Rust version: `rustc -V` -- minimal-lexical version: - -## Description -Please include a clear and concise description of the changes made. - -## Additional Context -Add any other context or screenshots about the bug fix here. diff --git a/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/custom.md b/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/custom.md deleted file mode 100644 index b6cd5d97dac363..00000000000000 --- a/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/custom.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -name: Custom pull request template -about: Pull request template for miscellaneous changes. -title: "[OTHER]" -labels: '' -assignees: Alexhuszagh - ---- - -**NOTE:** -- If you have made non-trivial changes to the code, please make sure to run unittests prior to committing. -- Please run `cargo fmt` on nightly prior to committing. - -## Optional Debugging Information - -If applicable to the issue, here are a few things you should provide to help me understand the issue: - -- Rust version: `rustc -V` -- minimal-lexical version: - -## Description -Please include a clear and concise description of the changes. diff --git a/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/documentation.md b/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/documentation.md deleted file mode 100644 index 233f87d1b46d34..00000000000000 --- a/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/documentation.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -name: Documentation -about: Update the project's documentation. -title: "[DOC]" -labels: documentation -assignees: Alexhuszagh - ---- - -**NOTE:** -- If you have made any changes to doc comments, please run `cargo fmt` on nightly prior to committing. - -## Description -Please include a clear and concise description of fixes made to the documentation. - -Ex: Fixed a backtick leading to improper formatting in README. -Ex: Fixed code sample for `parse_partial` in README. -Ex: Updated outdated doc comments in `parse_float`. - -## Additional Context -Add any other context or screenshots about the issue here. diff --git a/vendor/minimal-lexical/.github/workflows/Cross.yml b/vendor/minimal-lexical/.github/workflows/Cross.yml deleted file mode 100644 index d19d10427146a6..00000000000000 --- a/vendor/minimal-lexical/.github/workflows/Cross.yml +++ /dev/null @@ -1,90 +0,0 @@ -name: Cross - -on: - pull_request: - branches: [main] - workflow_dispatch: - -jobs: - cross: - name: Rust ${{matrix.target}} - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - target: - # Android - - aarch64-linux-android - - arm-linux-androideabi - - armv7-linux-androideabi - - i686-linux-android - - x86_64-linux-android - - # Linux - - aarch64-unknown-linux-gnu - - arm-unknown-linux-gnueabi - - armv7-unknown-linux-gnueabihf - - i686-unknown-linux-gnu - - i686-unknown-linux-musl - - mips-unknown-linux-gnu - - mips64-unknown-linux-gnuabi64 - - mips64el-unknown-linux-gnuabi64 - - mipsel-unknown-linux-gnu - - powerpc64le-unknown-linux-gnu - - x86_64-unknown-linux-gnu - - x86_64-unknown-linux-musl - - # Windows - - x86_64-pc-windows-gnu - - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - target: ${{matrix.target}} - override: true - - uses: actions-rs/cargo@v1 - with: - use-cross: true - command: check - args: --target ${{matrix.target}} - - uses: actions-rs/cargo@v1 - with: - use-cross: true - command: test - args: --target ${{matrix.target}} - - uses: actions-rs/cargo@v1 - with: - use-cross: true - command: test - args: --target ${{matrix.target}} --features=compact - - notest: - name: Rust ${{matrix.target}} - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - target: - # Linux - - powerpc64-unknown-linux-gnu - - s390x-unknown-linux-gnu - - # FreeBSD - - i686-unknown-freebsd - - x86_64-unknown-freebsd - - x86_64-unknown-netbsd - - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - target: ${{matrix.target}} - override: true - - uses: actions-rs/cargo@v1 - with: - use-cross: true - command: check - args: --target ${{matrix.target}} diff --git a/vendor/minimal-lexical/.github/workflows/Features.yml b/vendor/minimal-lexical/.github/workflows/Features.yml deleted file mode 100644 index 2a940e2a69381c..00000000000000 --- a/vendor/minimal-lexical/.github/workflows/Features.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Features - -on: - pull_request: - branches: [main] - workflow_dispatch: - -jobs: - features: - name: Test Feature Combinations - runs-on: ubuntu-latest - strategy: - fail-fast: true - steps: - - uses: actions/checkout@v2 - - name: Install latest nightly - uses: actions-rs/toolchain@v1 - with: - toolchain: nightly - override: true - components: rustfmt, clippy - - run: ci/test.sh - - run: NIGHTLY=1 NO_STD=1 ci/test.sh diff --git a/vendor/minimal-lexical/.github/workflows/OSX.yml b/vendor/minimal-lexical/.github/workflows/OSX.yml deleted file mode 100644 index e835250eca151e..00000000000000 --- a/vendor/minimal-lexical/.github/workflows/OSX.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: OSX - -on: - pull_request: - branches: [main] - workflow_dispatch: - -jobs: - cross: - name: Rust ${{matrix.target}} - runs-on: macos-latest - strategy: - fail-fast: false - matrix: - target: - # iOS targets don't work, since rust-embedded doesn't provide images. - - x86_64-apple-darwin - - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - target: ${{matrix.target}} - override: true - - uses: actions-rs/cargo@v1 - with: - use-cross: true - command: check - args: --target ${{matrix.target}} - - uses: actions-rs/cargo@v1 - with: - use-cross: true - command: test - args: --target ${{matrix.target}} - - uses: actions-rs/cargo@v1 - with: - use-cross: true - command: test - args: --target ${{matrix.target}} --features=compact diff --git a/vendor/minimal-lexical/.github/workflows/Simple.yml b/vendor/minimal-lexical/.github/workflows/Simple.yml deleted file mode 100644 index 02f63af306eec1..00000000000000 --- a/vendor/minimal-lexical/.github/workflows/Simple.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: Simple - -on: - push: - branches: [main] - pull_request: - branches: [main] - -jobs: - test: - name: Rust ${{matrix.rust}} - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - rust: [1.36.0, 1.41.0, 1.46.0, 1.51.0, stable, beta, nightly] - steps: - - uses: actions/checkout@v2 - with: - submodules: recursive - - uses: dtolnay/rust-toolchain@master - with: - toolchain: ${{matrix.rust}} - - run: cargo check - - run: cargo test - - run: cargo test --features=compact - - check: - name: Lint code - runs-on: ubuntu-latest - strategy: - fail-fast: true - steps: - - uses: actions/checkout@v2 - - name: Install latest nightly - uses: actions-rs/toolchain@v1 - with: - toolchain: nightly - override: true - components: rustfmt, clippy - - run: ci/check.sh diff --git a/vendor/minimal-lexical/.github/workflows/Valgrind.yml b/vendor/minimal-lexical/.github/workflows/Valgrind.yml deleted file mode 100644 index 298a5ce109e495..00000000000000 --- a/vendor/minimal-lexical/.github/workflows/Valgrind.yml +++ /dev/null @@ -1,24 +0,0 @@ -name: Valgrind - -on: - pull_request: - branches: [main] - workflow_dispatch: - -jobs: - valgrind: - name: Valgrind Tests - runs-on: ubuntu-latest - strategy: - fail-fast: true - steps: - - uses: actions/checkout@v2 - - name: Install latest nightly - uses: actions-rs/toolchain@v1 - with: - toolchain: nightly - override: true - - run: sudo apt-get install valgrind - - run: cargo +nightly install cargo-valgrind - - run: cargo +nightly valgrind test --release - - run: cargo +nightly valgrind test --all-features --release diff --git a/vendor/minimal-lexical/.gitmodules b/vendor/minimal-lexical/.gitmodules deleted file mode 100644 index f06dee03cf651a..00000000000000 --- a/vendor/minimal-lexical/.gitmodules +++ /dev/null @@ -1,4 +0,0 @@ -[submodule "data/test-parse-golang/parse-number-fxx-test-data"] - path = data/test-parse-golang/parse-number-fxx-test-data - url = https://github.com/nigeltao/parse-number-fxx-test-data - shallow = true diff --git a/vendor/minimal-lexical/CHANGELOG b/vendor/minimal-lexical/CHANGELOG deleted file mode 100644 index b1fd38ac143a7f..00000000000000 --- a/vendor/minimal-lexical/CHANGELOG +++ /dev/null @@ -1,38 +0,0 @@ -# Changelog - -Notes significant changes to minimal-lexical. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [0.1.4] 2021-10-02 -### Added -- Missing license details for `src/bellerophon.rs`. - -## [0.2.0] 2021-09-10 -### Changed -- `no_alloc` feature flag was replaced with an `alloc` feature flag. - -## [0.1.3] 2021-09-04 -### Added -- Added the `compact` feature, which sacrifices performance for smaller binary sizes. -- Added the `nightly` feature, which adds inline ASM to use FPU instructions for to ensure proper rounding on x86 targets using the x87 FPU without SSE2. - -### Changed -- Removed stackvec dependent, even on `no_alloc`. -- Improved the algorithms for parsing. -- Simplified big-integer arithmetic, and the slow path algorithms. -- Reduced the binary sizes. -- Added optimizations for small floats. - -## [0.1.2] 2021-05-09 -### Added -- Remove cached_float and infer exponents rather than store them. - -## [0.1.1] 2021-05-08 -### Added -- Added the Eisel-Lemire algorithm. - -## [0.1.0] 2021-04-27 -### Added -- Initial version. diff --git a/vendor/minimal-lexical/CODE_OF_CONDUCT.md b/vendor/minimal-lexical/CODE_OF_CONDUCT.md deleted file mode 100644 index 74fd657be0273d..00000000000000 --- a/vendor/minimal-lexical/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,141 +0,0 @@ -# Code of Conduct - -## When Something Happens - -If you see a Code of Conduct violation, follow these steps: - -1. Let the person know that what they did is not appropriate and ask them to stop and/or edit their message(s) or commits. -2. That person should immediately stop the behavior and correct the issue. -3. If this doesn’t happen, or if you're uncomfortable speaking up, [contact the maintainers](#contacting-maintainers). -4. As soon as available, a maintainer will look into the issue, and take [further action (see below)](#further-enforcement), starting with a warning, then temporary block, then long-term repo or organization ban. - -When reporting, please include any relevant details, links, screenshots, context, or other information that may be used to better understand and resolve the situation. - -**The maintainer team will prioritize the well-being and comfort of the recipients of the violation over the comfort of the violator.** See [some examples below](#enforcement-examples). - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers of this project pledge to making participation in our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, technical preferences, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - - * Using welcoming and inclusive language. - * Being respectful of differing viewpoints and experiences. - * Gracefully accepting constructive feedback. - * Focusing on what is best for the community. - * Showing empathy and kindness towards other community members. - * Encouraging and raising up your peers in the project so you can all bask in hacks and glory. - -Examples of unacceptable behavior by participants include: - - * The use of sexualized language or imagery and unwelcome sexual attention or advances, including when simulated online. The only exception to sexual topics is channels/spaces specifically for topics of sexual identity. - * Casual mention of slavery or indentured servitude and/or false comparisons of one's occupation or situation to slavery. Please consider using or asking about alternate terminology when referring to such metaphors in technology. - * Making light of/making mocking comments about trigger warnings and content warnings. - * Trolling, insulting/derogatory comments, and personal or political attacks. - * Public or private harassment, deliberate intimidation, or threats. - * Publishing others' private information, such as a physical or electronic address, without explicit permission. This includes any sort of "outing" of any aspect of someone's identity without their consent. - * Publishing private screenshots or quotes of interactions in the context of this project without all quoted users' *explicit* consent. - * Publishing of private communication that doesn't have to do with reporting harrassment. - * Any of the above even when [presented as "ironic" or "joking"](https://en.wikipedia.org/wiki/Hipster_racism). - * Any attempt to present "reverse-ism" versions of the above as violations. Examples of reverse-isms are "reverse racism", "reverse sexism", "heterophobia", and "cisphobia". - * Unsolicited explanations under the assumption that someone doesn't already know it. Ask before you teach! Don't assume what people's knowledge gaps are. - * [Feigning or exaggerating surprise](https://www.recurse.com/manual#no-feigned-surprise) when someone admits to not knowing something. - * "[Well-actuallies](https://www.recurse.com/manual#no-well-actuallys)" - * Other conduct which could reasonably be considered inappropriate in a professional or community setting. - -## Scope - -This Code of Conduct applies both within spaces involving this project and in other spaces involving community members. This includes the repository, its Pull Requests and Issue tracker, private email communications in the context of the project, and any events where members of the project are participating, as well as adjacent communities and venues affecting the project's members. - -Depending on the violation, the maintainers may decide that violations of this code of conduct that have happened outside of the scope of the community may deem an individual unwelcome, and take appropriate action to maintain the comfort and safety of its members. - -### Other Community Standards - -As a project on GitHub, this project is additionally covered by the [GitHub Community Guidelines](https://help.github.com/articles/github-community-guidelines/). - -Enforcement of those guidelines after violations overlapping with the above are the responsibility of the entities, and enforcement may happen in any or all of the services/communities. - -## Maintainer Enforcement Process - -Once the maintainers get involved, they will follow a documented series of steps and do their best to preserve the well-being of project members. This section covers actual concrete steps. - -### Contacting Maintainers - -You may get in touch with the maintainer team through any of the following methods: - - Through email: - ahuszagh@gmail.com (Alex Huszagh) - -### Further Enforcement - -If you've already followed the [initial enforcement steps](#enforcement), these are the steps maintainers will take for further enforcement, as needed: - - 1. Repeat the request to stop. - 2. If the person doubles down, they will have offending messages removed or edited by a maintainers given an official warning. The PR or Issue may be locked. - 3. If the behavior continues or is repeated later, the person will be blocked from participating for 24 hours. - 4. If the behavior continues or is repeated after the temporary block, a long-term (6-12mo) ban will be used. - -On top of this, maintainers may remove any offending messages, images, contributions, etc, as they deem necessary. - -Maintainers reserve full rights to skip any of these steps, at their discretion, if the violation is considered to be a serious and/or immediate threat to the health and well-being of members of the community. These include any threats, serious physical or verbal attacks, and other such behavior that would be completely unacceptable in any social setting that puts our members at risk. - -Members expelled from events or venues with any sort of paid attendance will not be refunded. - -### Who Watches the Watchers? - -Maintainers and other leaders who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. These may include anything from removal from the maintainer team to a permanent ban from the community. - -Additionally, as a project hosted on both GitHub and npm, [their own Codes of Conducts may be applied against maintainers of this project](#other-community-standards), externally of this project's procedures. - -### Enforcement Examples - -#### The Best Case - -The vast majority of situations work out like this. This interaction is common, and generally positive. - -> Alex: "Yeah I used X and it was really crazy!" - -> Patt (not a maintainer): "Hey, could you not use that word? What about 'ridiculous' instead?" - -> Alex: "oh sorry, sure." -> edits old comment to say "it was really confusing!" - -#### The Maintainer Case - -Sometimes, though, you need to get maintainers involved. Maintainers will do their best to resolve conflicts, but people who were harmed by something **will take priority**. - -> Patt: "Honestly, sometimes I just really hate using $library and anyone who uses it probably sucks at their job." - -> Alex: "Whoa there, could you dial it back a bit? There's a CoC thing about attacking folks' tech use like that." - -> Patt: "I'm not attacking anyone, what's your problem?" - -> Alex: "@maintainers hey uh. Can someone look at this issue? Patt is getting a bit aggro. I tried to nudge them about it, but nope." - -> KeeperOfCommitBits: (on issue) "Hey Patt, maintainer here. Could you tone it down? This sort of attack is really not okay in this space." - -> Patt: "Leave me alone I haven't said anything bad wtf is wrong with you." - -> KeeperOfCommitBits: (deletes user's comment), "@patt I mean it. Please refer to the CoC over at (URL to this CoC) if you have questions, but you can consider this an actual warning. I'd appreciate it if you reworded your messages in this thread, since they made folks there uncomfortable. Let's try and be kind, yeah?" - -> Patt: "@keeperofbits Okay sorry. I'm just frustrated and I'm kinda burnt out and I guess I got carried away. I'll DM Alex a note apologizing and edit my messages. Sorry for the trouble." - -> KeeperOfCommitBits: "@patt Thanks for that. I hear you on the stress. Burnout sucks :/. Have a good one!" - -#### The Nope Case - -> PepeTheFrog🐸: "Hi, I am a literal actual nazi and I think white supremacists are quite fashionable." - -> Patt: "NOOOOPE. OH NOPE NOPE." - -> Alex: "JFC NO. NOPE. @keeperofbits NOPE NOPE LOOK HERE" - -> KeeperOfCommitBits: "👀 Nope. NOPE NOPE NOPE. 🔥" - -> PepeTheFrog🐸 has been banned from all organization or user repositories belonging to KeeperOfCommitBits. - -## Attribution - -This Code of Conduct was generated using [WeAllJS Code of Conduct Generator](https://npm.im/weallbehave), which is based on the [WeAllJS Code of Conduct](https://wealljs.org/code-of-conduct), which is itself based on -[Contributor Covenant](http://contributor-covenant.org), version 1.4, available at [http://contributor-covenant.org/version/1/4](http://contributor-covenant.org/version/1/4), and the LGBTQ in Technology Slack [Code of Conduct](http://lgbtq.technology/coc.html). diff --git a/vendor/minimal-lexical/Cargo.toml b/vendor/minimal-lexical/Cargo.toml deleted file mode 100644 index b9b52cbe3ff5a7..00000000000000 --- a/vendor/minimal-lexical/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "minimal-lexical" -version = "0.2.1" -authors = ["Alex Huszagh "] -exclude = ["assets/*", "ci/*", "docs/*", "etc/*", "fuzz/*", "examples/*", "scripts/*"] -autoexamples = false -description = "Fast float parsing conversion routines." -documentation = "https://docs.rs/minimal-lexical" -readme = "README.md" -keywords = ["parsing", "no_std"] -categories = ["parsing", "no-std"] -license = "MIT/Apache-2.0" -repository = "https://github.com/Alexhuszagh/minimal-lexical" - -[features] -alloc = [] -compact = [] -default = ["std"] -lint = [] -nightly = [] -std = [] diff --git a/vendor/minimal-lexical/LICENSE-APACHE b/vendor/minimal-lexical/LICENSE-APACHE deleted file mode 100644 index 11069edd79019f..00000000000000 --- a/vendor/minimal-lexical/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/minimal-lexical/LICENSE-MIT b/vendor/minimal-lexical/LICENSE-MIT deleted file mode 100644 index 31aa79387f27e7..00000000000000 --- a/vendor/minimal-lexical/LICENSE-MIT +++ /dev/null @@ -1,23 +0,0 @@ -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/vendor/minimal-lexical/LICENSE.md b/vendor/minimal-lexical/LICENSE.md deleted file mode 100644 index 2442bbfbf3cc80..00000000000000 --- a/vendor/minimal-lexical/LICENSE.md +++ /dev/null @@ -1,37 +0,0 @@ -Minimal-lexical is dual licensed under the Apache 2.0 license as well as the MIT -license. See the LICENCE-MIT and the LICENCE-APACHE files for the licenses. - ---- - -`src/bellerophon.rs` is loosely based off the Golang implementation, -found [here](https://github.com/golang/go/blob/b10849fbb97a2244c086991b4623ae9f32c212d0/src/strconv/extfloat.go). -That code (used if the `compact` feature is enabled) is subject to a -[3-clause BSD license](https://github.com/golang/go/blob/b10849fbb97a2244c086991b4623ae9f32c212d0/LICENSE): - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/minimal-lexical/README.md b/vendor/minimal-lexical/README.md deleted file mode 100644 index 7805296510719b..00000000000000 --- a/vendor/minimal-lexical/README.md +++ /dev/null @@ -1,102 +0,0 @@ -minimal-lexical -=============== - -This is a minimal version of [rust-lexical](https://github.com/Alexhuszagh/rust-lexical), meant to allow efficient round-trip float parsing. minimal-lexical implements a correct, fast float parser. - -Due to the small, stable nature of minimal-lexical, it is also well-adapted to private forks. If you do privately fork minimal-lexical, I recommend you contact me via [email](mailto:ahuszagh@gmail.com) or [Twitter](https://twitter.com/KardOnIce), so I can notify you of feature updates, bug fixes, or security vulnerabilities, as well as help you implement custom feature requests. I will not use your information for any other purpose, including, but not limited to disclosing your project or organization's use of minimal-lexical. - -minimal-lexical is designed for fast compile times and small binaries sizes, at the expense of a minor amount of performance. For improved performance, feel free to fork minimal-lexical with more aggressive inlining. - -**Similar Projects** - -For a high-level, all-in-one number conversion routines, see [rust-lexical](https://github.com/Alexhuszagh/rust-lexical). - -**Table Of Contents** - -- [Getting Started](#getting-started) -- [Recipes](#recipes) -- [Algorithms](#algorithms) -- [Platform Support](platform-support) -- [Minimum Version Support](minimum-version-support) -- [Changelog](#changelog) -- [License](#license) -- [Contributing](#contributing) - -# Getting Started - -First, add the following to your `Cargo.toml`. - -```toml -[dependencies] -minimal-lexical = "0.2" -``` - -Next, to parse a simple float, use the following: - -```rust -extern crate minimal_lexical; - -// Let's say we want to parse "1.2345". -// First, we need an external parser to extract the integer digits ("1"), -// the fraction digits ("2345"), and then parse the exponent to a 32-bit -// integer (0). -// Warning: -// -------- -// Please note that leading zeros must be trimmed from the integer, -// and trailing zeros must be trimmed from the fraction. This cannot -// be handled by minimal-lexical, since we accept iterators -let integer = b"1"; -let fraction = b"2345"; -let float: f64 = minimal_lexical::parse_float(integer.iter(), fraction.iter(), 0); -println!("float={:?}", float); // 1.235 -``` - -# Recipes - -You may be asking: where is the actual parser? Due to variation in float formats, and the goal of integrating utility for various data-interchange language parsers, such functionality would be beyond the scope of this library. - -For example, the following float is valid in Rust strings, but is invalid in JSON or TOML: -```json -1.e7 -``` - -Therefore, to use the library, you need functionality that extracts the significant digits to pass to `create_float`. Please see [simple-example](https://github.com/Alexhuszagh/minimal-lexical/blob/master/examples/simple.rs) for a simple, annotated example on how to use minimal-lexical as a parser. - -# Algorithms - -For an in-depth explanation on the algorithms minimal-lexical uses, please see [lexical-core#string-to-float](https://github.com/Alexhuszagh/rust-lexical/tree/master/lexical-core#string-to-float). - -# Platform Support - -minimal-lexical is tested on a wide variety of platforms, including big and small-endian systems, to ensure portable code. Supported architectures include: -- x86_64 Linux, Windows, macOS, Android, iOS, FreeBSD, and NetBSD. -- x86 Linux, macOS, Android, iOS, and FreeBSD. -- aarch64 (ARM8v8-A) Linux, Android, and iOS. -- armv7 (ARMv7-A) Linux, Android, and iOS. -- arm (ARMv6) Linux, and Android. -- mips (MIPS) Linux. -- mipsel (MIPS LE) Linux. -- mips64 (MIPS64 BE) Linux. -- mips64el (MIPS64 LE) Linux. -- powerpc (PowerPC) Linux. -- powerpc64 (PPC64) Linux. -- powerpc64le (PPC64LE) Linux. -- s390x (IBM Z) Linux. - -minimal-lexical should also work on a wide variety of other architectures and ISAs. If you have any issue compiling minimal-lexical on any architecture, please file a bug report. - -# Minimum Version Support - -Minimal-lexical is tested to support Rustc 1.36+, including stable, beta, and nightly. Please report any errors compiling a supported lexical version on a compatible Rustc version. Please note we may increment the MSRV for compiler versions older than 18 months, to support at least the current Debian stable version, without breaking changes. - -# Changelog - -All changes are documented in [CHANGELOG](CHANGELOG). - -# License - -Minimal-lexical is dual licensed under the Apache 2.0 license as well as the MIT license. See the [LICENSE.md](LICENSE.md) file for full license details. - -# Contributing - -Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in minimal-lexical by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. diff --git a/vendor/minimal-lexical/clippy.toml b/vendor/minimal-lexical/clippy.toml deleted file mode 100644 index cda8d17eed44c7..00000000000000 --- a/vendor/minimal-lexical/clippy.toml +++ /dev/null @@ -1 +0,0 @@ -avoid-breaking-exported-api = false diff --git a/vendor/minimal-lexical/rustfmt.toml b/vendor/minimal-lexical/rustfmt.toml deleted file mode 100644 index 2361c9d479b295..00000000000000 --- a/vendor/minimal-lexical/rustfmt.toml +++ /dev/null @@ -1,16 +0,0 @@ -# Requires nightly to do proper formatting. -use_small_heuristics = "Off" -use_field_init_shorthand = true -trailing_semicolon = true -newline_style = "Unix" -match_block_trailing_comma = true -empty_item_single_line = false -enum_discrim_align_threshold = 40 -fn_args_layout = "Tall" -fn_single_line = false -format_macro_matchers = true -format_macro_bodies = true -imports_indent = "Block" -imports_layout = "HorizontalVertical" -indent_style = "Block" -match_arm_blocks = true diff --git a/vendor/minimal-lexical/src/bellerophon.rs b/vendor/minimal-lexical/src/bellerophon.rs deleted file mode 100644 index 86a2023d09e295..00000000000000 --- a/vendor/minimal-lexical/src/bellerophon.rs +++ /dev/null @@ -1,391 +0,0 @@ -//! An implementation of Clinger's Bellerophon algorithm. -//! -//! This is a moderate path algorithm that uses an extended-precision -//! float, represented in 80 bits, by calculating the bits of slop -//! and determining if those bits could prevent unambiguous rounding. -//! -//! This algorithm requires less static storage than the Lemire algorithm, -//! and has decent performance, and is therefore used when non-decimal, -//! non-power-of-two strings need to be parsed. Clinger's algorithm -//! is described in depth in "How to Read Floating Point Numbers Accurately.", -//! available online [here](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.45.4152&rep=rep1&type=pdf). -//! -//! This implementation is loosely based off the Golang implementation, -//! found [here](https://github.com/golang/go/blob/b10849fbb97a2244c086991b4623ae9f32c212d0/src/strconv/extfloat.go). -//! This code is therefore subject to a 3-clause BSD license. - -#![cfg(feature = "compact")] -#![doc(hidden)] - -use crate::extended_float::ExtendedFloat; -use crate::mask::{lower_n_halfway, lower_n_mask}; -use crate::num::Float; -use crate::number::Number; -use crate::rounding::{round, round_nearest_tie_even}; -use crate::table::BASE10_POWERS; - -// ALGORITHM -// --------- - -/// Core implementation of the Bellerophon algorithm. -/// -/// Create an extended-precision float, scale it to the proper radix power, -/// calculate the bits of slop, and return the representation. The value -/// will always be guaranteed to be within 1 bit, rounded-down, of the real -/// value. If a negative exponent is returned, this represents we were -/// unable to unambiguously round the significant digits. -/// -/// This has been modified to return a biased, rather than unbiased exponent. -pub fn bellerophon(num: &Number) -> ExtendedFloat { - let fp_zero = ExtendedFloat { - mant: 0, - exp: 0, - }; - let fp_inf = ExtendedFloat { - mant: 0, - exp: F::INFINITE_POWER, - }; - - // Early short-circuit, in case of literal 0 or infinity. - // This allows us to avoid narrow casts causing numeric overflow, - // and is a quick check for any radix. - if num.mantissa == 0 || num.exponent <= -0x1000 { - return fp_zero; - } else if num.exponent >= 0x1000 { - return fp_inf; - } - - // Calculate our indexes for our extended-precision multiplication. - // This narrowing cast is safe, since exponent must be in a valid range. - let exponent = num.exponent as i32 + BASE10_POWERS.bias; - let small_index = exponent % BASE10_POWERS.step; - let large_index = exponent / BASE10_POWERS.step; - - if exponent < 0 { - // Guaranteed underflow (assign 0). - return fp_zero; - } - if large_index as usize >= BASE10_POWERS.large.len() { - // Overflow (assign infinity) - return fp_inf; - } - - // Within the valid exponent range, multiply by the large and small - // exponents and return the resulting value. - - // Track errors to as a factor of unit in last-precision. - let mut errors: u32 = 0; - if num.many_digits { - errors += error_halfscale(); - } - - // Multiply by the small power. - // Check if we can directly multiply by an integer, if not, - // use extended-precision multiplication. - let mut fp = ExtendedFloat { - mant: num.mantissa, - exp: 0, - }; - match fp.mant.overflowing_mul(BASE10_POWERS.get_small_int(small_index as usize)) { - // Overflow, multiplication unsuccessful, go slow path. - (_, true) => { - normalize(&mut fp); - fp = mul(&fp, &BASE10_POWERS.get_small(small_index as usize)); - errors += error_halfscale(); - }, - // No overflow, multiplication successful. - (mant, false) => { - fp.mant = mant; - normalize(&mut fp); - }, - } - - // Multiply by the large power. - fp = mul(&fp, &BASE10_POWERS.get_large(large_index as usize)); - if errors > 0 { - errors += 1; - } - errors += error_halfscale(); - - // Normalize the floating point (and the errors). - let shift = normalize(&mut fp); - errors <<= shift; - fp.exp += F::EXPONENT_BIAS; - - // Check for literal overflow, even with halfway cases. - if -fp.exp + 1 > 65 { - return fp_zero; - } - - // Too many errors accumulated, return an error. - if !error_is_accurate::(errors, &fp) { - // Bias the exponent so we know it's invalid. - fp.exp += F::INVALID_FP; - return fp; - } - - // Check if we have a literal 0 or overflow here. - // If we have an exponent of -63, we can still have a valid shift, - // giving a case where we have too many errors and need to round-up. - if -fp.exp + 1 == 65 { - // Have more than 64 bits below the minimum exponent, must be 0. - return fp_zero; - } - - round::(&mut fp, |f, s| { - round_nearest_tie_even(f, s, |is_odd, is_halfway, is_above| { - is_above || (is_odd && is_halfway) - }); - }); - fp -} - -// ERRORS -// ------ - -// Calculate if the errors in calculating the extended-precision float. -// -// Specifically, we want to know if we are close to a halfway representation, -// or halfway between `b` and `b+1`, or `b+h`. The halfway representation -// has the form: -// SEEEEEEEHMMMMMMMMMMMMMMMMMMMMMMM100... -// where: -// S = Sign Bit -// E = Exponent Bits -// H = Hidden Bit -// M = Mantissa Bits -// -// The halfway representation has a bit set 1-after the mantissa digits, -// and no bits set immediately afterward, making it impossible to -// round between `b` and `b+1` with this representation. - -/// Get the full error scale. -#[inline(always)] -const fn error_scale() -> u32 { - 8 -} - -/// Get the half error scale. -#[inline(always)] -const fn error_halfscale() -> u32 { - error_scale() / 2 -} - -/// Determine if the number of errors is tolerable for float precision. -fn error_is_accurate(errors: u32, fp: &ExtendedFloat) -> bool { - // Check we can't have a literal 0 denormal float. - debug_assert!(fp.exp >= -64); - - // Determine if extended-precision float is a good approximation. - // If the error has affected too many units, the float will be - // inaccurate, or if the representation is too close to halfway - // that any operations could affect this halfway representation. - // See the documentation for dtoa for more information. - - // This is always a valid u32, since `fp.exp >= -64` - // will always be positive and the significand size is {23, 52}. - let mantissa_shift = 64 - F::MANTISSA_SIZE - 1; - - // The unbiased exponent checks is `unbiased_exp <= F::MANTISSA_SIZE - // - F::EXPONENT_BIAS -64 + 1`, or `biased_exp <= F::MANTISSA_SIZE - 63`, - // or `biased_exp <= mantissa_shift`. - let extrabits = match fp.exp <= -mantissa_shift { - // Denormal, since shifting to the hidden bit still has a negative exponent. - // The unbiased check calculation for bits is `1 - F::EXPONENT_BIAS - unbiased_exp`, - // or `1 - biased_exp`. - true => 1 - fp.exp, - false => 64 - F::MANTISSA_SIZE - 1, - }; - - // Our logic is as follows: we want to determine if the actual - // mantissa and the errors during calculation differ significantly - // from the rounding point. The rounding point for round-nearest - // is the halfway point, IE, this when the truncated bits start - // with b1000..., while the rounding point for the round-toward - // is when the truncated bits are equal to 0. - // To do so, we can check whether the rounding point +/- the error - // are >/< the actual lower n bits. - // - // For whether we need to use signed or unsigned types for this - // analysis, see this example, using u8 rather than u64 to simplify - // things. - // - // # Comparisons - // cmp1 = (halfway - errors) < extra - // cmp1 = extra < (halfway + errors) - // - // # Large Extrabits, Low Errors - // - // extrabits = 8 - // halfway = 0b10000000 - // extra = 0b10000010 - // errors = 0b00000100 - // halfway - errors = 0b01111100 - // halfway + errors = 0b10000100 - // - // Unsigned: - // halfway - errors = 124 - // halfway + errors = 132 - // extra = 130 - // cmp1 = true - // cmp2 = true - // Signed: - // halfway - errors = 124 - // halfway + errors = -124 - // extra = -126 - // cmp1 = false - // cmp2 = true - // - // # Conclusion - // - // Since errors will always be small, and since we want to detect - // if the representation is accurate, we need to use an **unsigned** - // type for comparisons. - let maskbits = extrabits as u64; - let errors = errors as u64; - - // Round-to-nearest, need to use the halfway point. - if extrabits > 64 { - // Underflow, we have a shift larger than the mantissa. - // Representation is valid **only** if the value is close enough - // overflow to the next bit within errors. If it overflows, - // the representation is **not** valid. - !fp.mant.overflowing_add(errors).1 - } else { - let mask = lower_n_mask(maskbits); - let extra = fp.mant & mask; - - // Round-to-nearest, need to check if we're close to halfway. - // IE, b10100 | 100000, where `|` signifies the truncation point. - let halfway = lower_n_halfway(maskbits); - let cmp1 = halfway.wrapping_sub(errors) < extra; - let cmp2 = extra < halfway.wrapping_add(errors); - - // If both comparisons are true, we have significant rounding error, - // and the value cannot be exactly represented. Otherwise, the - // representation is valid. - !(cmp1 && cmp2) - } -} - -// MATH -// ---- - -/// Normalize float-point number. -/// -/// Shift the mantissa so the number of leading zeros is 0, or the value -/// itself is 0. -/// -/// Get the number of bytes shifted. -pub fn normalize(fp: &mut ExtendedFloat) -> i32 { - // Note: - // Using the ctlz intrinsic via leading_zeros is way faster (~10x) - // than shifting 1-bit at a time, via while loop, and also way - // faster (~2x) than an unrolled loop that checks at 32, 16, 4, - // 2, and 1 bit. - // - // Using a modulus of pow2 (which will get optimized to a bitwise - // and with 0x3F or faster) is slightly slower than an if/then, - // however, removing the if/then will likely optimize more branched - // code as it removes conditional logic. - - // Calculate the number of leading zeros, and then zero-out - // any overflowing bits, to avoid shl overflow when self.mant == 0. - if fp.mant != 0 { - let shift = fp.mant.leading_zeros() as i32; - fp.mant <<= shift; - fp.exp -= shift; - shift - } else { - 0 - } -} - -/// Multiply two normalized extended-precision floats, as if by `a*b`. -/// -/// The precision is maximal when the numbers are normalized, however, -/// decent precision will occur as long as both values have high bits -/// set. The result is not normalized. -/// -/// Algorithm: -/// 1. Non-signed multiplication of mantissas (requires 2x as many bits as input). -/// 2. Normalization of the result (not done here). -/// 3. Addition of exponents. -pub fn mul(x: &ExtendedFloat, y: &ExtendedFloat) -> ExtendedFloat { - // Logic check, values must be decently normalized prior to multiplication. - debug_assert!(x.mant >> 32 != 0); - debug_assert!(y.mant >> 32 != 0); - - // Extract high-and-low masks. - // Mask is u32::MAX for older Rustc versions. - const LOMASK: u64 = 0xffff_ffff; - let x1 = x.mant >> 32; - let x0 = x.mant & LOMASK; - let y1 = y.mant >> 32; - let y0 = y.mant & LOMASK; - - // Get our products - let x1_y0 = x1 * y0; - let x0_y1 = x0 * y1; - let x0_y0 = x0 * y0; - let x1_y1 = x1 * y1; - - let mut tmp = (x1_y0 & LOMASK) + (x0_y1 & LOMASK) + (x0_y0 >> 32); - // round up - tmp += 1 << (32 - 1); - - ExtendedFloat { - mant: x1_y1 + (x1_y0 >> 32) + (x0_y1 >> 32) + (tmp >> 32), - exp: x.exp + y.exp + 64, - } -} - -// POWERS -// ------ - -/// Precalculated powers of base N for the Bellerophon algorithm. -pub struct BellerophonPowers { - // Pre-calculated small powers. - pub small: &'static [u64], - // Pre-calculated large powers. - pub large: &'static [u64], - /// Pre-calculated small powers as 64-bit integers - pub small_int: &'static [u64], - // Step between large powers and number of small powers. - pub step: i32, - // Exponent bias for the large powers. - pub bias: i32, - /// ceil(log2(radix)) scaled as a multiplier. - pub log2: i64, - /// Bitshift for the log2 multiplier. - pub log2_shift: i32, -} - -/// Allow indexing of values without bounds checking -impl BellerophonPowers { - #[inline] - pub fn get_small(&self, index: usize) -> ExtendedFloat { - let mant = self.small[index]; - let exp = (1 - 64) + ((self.log2 * index as i64) >> self.log2_shift); - ExtendedFloat { - mant, - exp: exp as i32, - } - } - - #[inline] - pub fn get_large(&self, index: usize) -> ExtendedFloat { - let mant = self.large[index]; - let biased_e = index as i64 * self.step as i64 - self.bias as i64; - let exp = (1 - 64) + ((self.log2 * biased_e) >> self.log2_shift); - ExtendedFloat { - mant, - exp: exp as i32, - } - } - - #[inline] - pub fn get_small_int(&self, index: usize) -> u64 { - self.small_int[index] - } -} diff --git a/vendor/minimal-lexical/src/bigint.rs b/vendor/minimal-lexical/src/bigint.rs deleted file mode 100644 index d1d5027a191ee3..00000000000000 --- a/vendor/minimal-lexical/src/bigint.rs +++ /dev/null @@ -1,788 +0,0 @@ -//! A simple big-integer type for slow path algorithms. -//! -//! This includes minimal stackvector for use in big-integer arithmetic. - -#![doc(hidden)] - -#[cfg(feature = "alloc")] -use crate::heapvec::HeapVec; -use crate::num::Float; -#[cfg(not(feature = "alloc"))] -use crate::stackvec::StackVec; -#[cfg(not(feature = "compact"))] -use crate::table::{LARGE_POW5, LARGE_POW5_STEP}; -use core::{cmp, ops, ptr}; - -/// Number of bits in a Bigint. -/// -/// This needs to be at least the number of bits required to store -/// a Bigint, which is `log2(radix**digits)`. -/// ≅ 3600 for base-10, rounded-up. -pub const BIGINT_BITS: usize = 4000; - -/// The number of limbs for the bigint. -pub const BIGINT_LIMBS: usize = BIGINT_BITS / LIMB_BITS; - -#[cfg(feature = "alloc")] -pub type VecType = HeapVec; - -#[cfg(not(feature = "alloc"))] -pub type VecType = StackVec; - -/// Storage for a big integer type. -/// -/// This is used for algorithms when we have a finite number of digits. -/// Specifically, it stores all the significant digits scaled to the -/// proper exponent, as an integral type, and then directly compares -/// these digits. -/// -/// This requires us to store the number of significant bits, plus the -/// number of exponent bits (required) since we scale everything -/// to the same exponent. -#[derive(Clone, PartialEq, Eq)] -pub struct Bigint { - /// Significant digits for the float, stored in a big integer in LE order. - /// - /// This is pretty much the same number of digits for any radix, since the - /// significant digits balances out the zeros from the exponent: - /// 1. Decimal is 1091 digits, 767 mantissa digits + 324 exponent zeros. - /// 2. Base 6 is 1097 digits, or 680 mantissa digits + 417 exponent zeros. - /// 3. Base 36 is 1086 digits, or 877 mantissa digits + 209 exponent zeros. - /// - /// However, the number of bytes required is larger for large radixes: - /// for decimal, we need `log2(10**1091) ≅ 3600`, while for base 36 - /// we need `log2(36**1086) ≅ 5600`. Since we use uninitialized data, - /// we avoid a major performance hit from the large buffer size. - pub data: VecType, -} - -#[allow(clippy::new_without_default)] -impl Bigint { - /// Construct a bigint representing 0. - #[inline(always)] - pub fn new() -> Self { - Self { - data: VecType::new(), - } - } - - /// Construct a bigint from an integer. - #[inline(always)] - pub fn from_u64(value: u64) -> Self { - Self { - data: VecType::from_u64(value), - } - } - - #[inline(always)] - pub fn hi64(&self) -> (u64, bool) { - self.data.hi64() - } - - /// Multiply and assign as if by exponentiation by a power. - #[inline] - pub fn pow(&mut self, base: u32, exp: u32) -> Option<()> { - debug_assert!(base == 2 || base == 5 || base == 10); - if base % 5 == 0 { - pow(&mut self.data, exp)?; - } - if base % 2 == 0 { - shl(&mut self.data, exp as usize)?; - } - Some(()) - } - - /// Calculate the bit-length of the big-integer. - #[inline] - pub fn bit_length(&self) -> u32 { - bit_length(&self.data) - } -} - -impl ops::MulAssign<&Bigint> for Bigint { - fn mul_assign(&mut self, rhs: &Bigint) { - self.data *= &rhs.data; - } -} - -/// REVERSE VIEW - -/// Reverse, immutable view of a sequence. -pub struct ReverseView<'a, T: 'a> { - inner: &'a [T], -} - -impl<'a, T> ops::Index for ReverseView<'a, T> { - type Output = T; - - #[inline] - fn index(&self, index: usize) -> &T { - let len = self.inner.len(); - &(*self.inner)[len - index - 1] - } -} - -/// Create a reverse view of the vector for indexing. -#[inline] -pub fn rview(x: &[Limb]) -> ReverseView { - ReverseView { - inner: x, - } -} - -// COMPARE -// ------- - -/// Compare `x` to `y`, in little-endian order. -#[inline] -pub fn compare(x: &[Limb], y: &[Limb]) -> cmp::Ordering { - match x.len().cmp(&y.len()) { - cmp::Ordering::Equal => { - let iter = x.iter().rev().zip(y.iter().rev()); - for (&xi, yi) in iter { - match xi.cmp(yi) { - cmp::Ordering::Equal => (), - ord => return ord, - } - } - // Equal case. - cmp::Ordering::Equal - }, - ord => ord, - } -} - -// NORMALIZE -// --------- - -/// Normalize the integer, so any leading zero values are removed. -#[inline] -pub fn normalize(x: &mut VecType) { - // We don't care if this wraps: the index is bounds-checked. - while let Some(&value) = x.get(x.len().wrapping_sub(1)) { - if value == 0 { - unsafe { x.set_len(x.len() - 1) }; - } else { - break; - } - } -} - -/// Get if the big integer is normalized. -#[inline] -#[allow(clippy::match_like_matches_macro)] -pub fn is_normalized(x: &[Limb]) -> bool { - // We don't care if this wraps: the index is bounds-checked. - match x.get(x.len().wrapping_sub(1)) { - Some(&0) => false, - _ => true, - } -} - -// FROM -// ---- - -/// Create StackVec from u64 value. -#[inline(always)] -#[allow(clippy::branches_sharing_code)] -pub fn from_u64(x: u64) -> VecType { - let mut vec = VecType::new(); - debug_assert!(vec.capacity() >= 2); - if LIMB_BITS == 32 { - vec.try_push(x as Limb).unwrap(); - vec.try_push((x >> 32) as Limb).unwrap(); - } else { - vec.try_push(x as Limb).unwrap(); - } - vec.normalize(); - vec -} - -// HI -// -- - -/// Check if any of the remaining bits are non-zero. -/// -/// # Safety -/// -/// Safe as long as `rindex <= x.len()`. -#[inline] -pub fn nonzero(x: &[Limb], rindex: usize) -> bool { - debug_assert!(rindex <= x.len()); - - let len = x.len(); - let slc = &x[..len - rindex]; - slc.iter().rev().any(|&x| x != 0) -} - -// These return the high X bits and if the bits were truncated. - -/// Shift 32-bit integer to high 64-bits. -#[inline] -pub fn u32_to_hi64_1(r0: u32) -> (u64, bool) { - u64_to_hi64_1(r0 as u64) -} - -/// Shift 2 32-bit integers to high 64-bits. -#[inline] -pub fn u32_to_hi64_2(r0: u32, r1: u32) -> (u64, bool) { - let r0 = (r0 as u64) << 32; - let r1 = r1 as u64; - u64_to_hi64_1(r0 | r1) -} - -/// Shift 3 32-bit integers to high 64-bits. -#[inline] -pub fn u32_to_hi64_3(r0: u32, r1: u32, r2: u32) -> (u64, bool) { - let r0 = r0 as u64; - let r1 = (r1 as u64) << 32; - let r2 = r2 as u64; - u64_to_hi64_2(r0, r1 | r2) -} - -/// Shift 64-bit integer to high 64-bits. -#[inline] -pub fn u64_to_hi64_1(r0: u64) -> (u64, bool) { - let ls = r0.leading_zeros(); - (r0 << ls, false) -} - -/// Shift 2 64-bit integers to high 64-bits. -#[inline] -pub fn u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool) { - let ls = r0.leading_zeros(); - let rs = 64 - ls; - let v = match ls { - 0 => r0, - _ => (r0 << ls) | (r1 >> rs), - }; - let n = r1 << ls != 0; - (v, n) -} - -/// Extract the hi bits from the buffer. -macro_rules! hi { - // # Safety - // - // Safe as long as the `stackvec.len() >= 1`. - (@1 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{ - $fn($rview[0] as $t) - }}; - - // # Safety - // - // Safe as long as the `stackvec.len() >= 2`. - (@2 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{ - let r0 = $rview[0] as $t; - let r1 = $rview[1] as $t; - $fn(r0, r1) - }}; - - // # Safety - // - // Safe as long as the `stackvec.len() >= 2`. - (@nonzero2 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{ - let (v, n) = hi!(@2 $self, $rview, $t, $fn); - (v, n || nonzero($self, 2 )) - }}; - - // # Safety - // - // Safe as long as the `stackvec.len() >= 3`. - (@3 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{ - let r0 = $rview[0] as $t; - let r1 = $rview[1] as $t; - let r2 = $rview[2] as $t; - $fn(r0, r1, r2) - }}; - - // # Safety - // - // Safe as long as the `stackvec.len() >= 3`. - (@nonzero3 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{ - let (v, n) = hi!(@3 $self, $rview, $t, $fn); - (v, n || nonzero($self, 3)) - }}; -} - -/// Get the high 64 bits from the vector. -#[inline(always)] -pub fn hi64(x: &[Limb]) -> (u64, bool) { - let rslc = rview(x); - // SAFETY: the buffer must be at least length bytes long. - match x.len() { - 0 => (0, false), - 1 if LIMB_BITS == 32 => hi!(@1 x, rslc, u32, u32_to_hi64_1), - 1 => hi!(@1 x, rslc, u64, u64_to_hi64_1), - 2 if LIMB_BITS == 32 => hi!(@2 x, rslc, u32, u32_to_hi64_2), - 2 => hi!(@2 x, rslc, u64, u64_to_hi64_2), - _ if LIMB_BITS == 32 => hi!(@nonzero3 x, rslc, u32, u32_to_hi64_3), - _ => hi!(@nonzero2 x, rslc, u64, u64_to_hi64_2), - } -} - -// POWERS -// ------ - -/// MulAssign by a power of 5. -/// -/// Theoretically... -/// -/// Use an exponentiation by squaring method, since it reduces the time -/// complexity of the multiplication to ~`O(log(n))` for the squaring, -/// and `O(n*m)` for the result. Since `m` is typically a lower-order -/// factor, this significantly reduces the number of multiplications -/// we need to do. Iteratively multiplying by small powers follows -/// the nth triangular number series, which scales as `O(p^2)`, but -/// where `p` is `n+m`. In short, it scales very poorly. -/// -/// Practically.... -/// -/// Exponentiation by Squaring: -/// running 2 tests -/// test bigcomp_f32_lexical ... bench: 1,018 ns/iter (+/- 78) -/// test bigcomp_f64_lexical ... bench: 3,639 ns/iter (+/- 1,007) -/// -/// Exponentiation by Iterative Small Powers: -/// running 2 tests -/// test bigcomp_f32_lexical ... bench: 518 ns/iter (+/- 31) -/// test bigcomp_f64_lexical ... bench: 583 ns/iter (+/- 47) -/// -/// Exponentiation by Iterative Large Powers (of 2): -/// running 2 tests -/// test bigcomp_f32_lexical ... bench: 671 ns/iter (+/- 31) -/// test bigcomp_f64_lexical ... bench: 1,394 ns/iter (+/- 47) -/// -/// The following benchmarks were run on `1 * 5^300`, using native `pow`, -/// a version with only small powers, and one with pre-computed powers -/// of `5^(3 * max_exp)`, rather than `5^(5 * max_exp)`. -/// -/// However, using large powers is crucial for good performance for higher -/// powers. -/// pow/default time: [426.20 ns 427.96 ns 429.89 ns] -/// pow/small time: [2.9270 us 2.9411 us 2.9565 us] -/// pow/large:3 time: [838.51 ns 842.21 ns 846.27 ns] -/// -/// Even using worst-case scenarios, exponentiation by squaring is -/// significantly slower for our workloads. Just multiply by small powers, -/// in simple cases, and use precalculated large powers in other cases. -/// -/// Furthermore, using sufficiently big large powers is also crucial for -/// performance. This is a tradeoff of binary size and performance, and -/// using a single value at ~`5^(5 * max_exp)` seems optimal. -pub fn pow(x: &mut VecType, mut exp: u32) -> Option<()> { - // Minimize the number of iterations for large exponents: just - // do a few steps with a large powers. - #[cfg(not(feature = "compact"))] - { - while exp >= LARGE_POW5_STEP { - large_mul(x, &LARGE_POW5)?; - exp -= LARGE_POW5_STEP; - } - } - - // Now use our pre-computed small powers iteratively. - // This is calculated as `⌊log(2^BITS - 1, 5)⌋`. - let small_step = if LIMB_BITS == 32 { - 13 - } else { - 27 - }; - let max_native = (5 as Limb).pow(small_step); - while exp >= small_step { - small_mul(x, max_native)?; - exp -= small_step; - } - if exp != 0 { - // SAFETY: safe, since `exp < small_step`. - let small_power = unsafe { f64::int_pow_fast_path(exp as usize, 5) }; - small_mul(x, small_power as Limb)?; - } - Some(()) -} - -// SCALAR -// ------ - -/// Add two small integers and return the resulting value and if overflow happens. -#[inline(always)] -pub fn scalar_add(x: Limb, y: Limb) -> (Limb, bool) { - x.overflowing_add(y) -} - -/// Multiply two small integers (with carry) (and return the overflow contribution). -/// -/// Returns the (low, high) components. -#[inline(always)] -pub fn scalar_mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb) { - // Cannot overflow, as long as wide is 2x as wide. This is because - // the following is always true: - // `Wide::MAX - (Narrow::MAX * Narrow::MAX) >= Narrow::MAX` - let z: Wide = (x as Wide) * (y as Wide) + (carry as Wide); - (z as Limb, (z >> LIMB_BITS) as Limb) -} - -// SMALL -// ----- - -/// Add small integer to bigint starting from offset. -#[inline] -pub fn small_add_from(x: &mut VecType, y: Limb, start: usize) -> Option<()> { - let mut index = start; - let mut carry = y; - while carry != 0 && index < x.len() { - let result = scalar_add(x[index], carry); - x[index] = result.0; - carry = result.1 as Limb; - index += 1; - } - // If we carried past all the elements, add to the end of the buffer. - if carry != 0 { - x.try_push(carry)?; - } - Some(()) -} - -/// Add small integer to bigint. -#[inline(always)] -pub fn small_add(x: &mut VecType, y: Limb) -> Option<()> { - small_add_from(x, y, 0) -} - -/// Multiply bigint by small integer. -#[inline] -pub fn small_mul(x: &mut VecType, y: Limb) -> Option<()> { - let mut carry = 0; - for xi in x.iter_mut() { - let result = scalar_mul(*xi, y, carry); - *xi = result.0; - carry = result.1; - } - // If we carried past all the elements, add to the end of the buffer. - if carry != 0 { - x.try_push(carry)?; - } - Some(()) -} - -// LARGE -// ----- - -/// Add bigint to bigint starting from offset. -pub fn large_add_from(x: &mut VecType, y: &[Limb], start: usize) -> Option<()> { - // The effective x buffer is from `xstart..x.len()`, so we need to treat - // that as the current range. If the effective y buffer is longer, need - // to resize to that, + the start index. - if y.len() > x.len().saturating_sub(start) { - // Ensure we panic if we can't extend the buffer. - // This avoids any unsafe behavior afterwards. - x.try_resize(y.len() + start, 0)?; - } - - // Iteratively add elements from y to x. - let mut carry = false; - for (index, &yi) in y.iter().enumerate() { - // We panicked in `try_resize` if this wasn't true. - let xi = x.get_mut(start + index).unwrap(); - - // Only one op of the two ops can overflow, since we added at max - // Limb::max_value() + Limb::max_value(). Add the previous carry, - // and store the current carry for the next. - let result = scalar_add(*xi, yi); - *xi = result.0; - let mut tmp = result.1; - if carry { - let result = scalar_add(*xi, 1); - *xi = result.0; - tmp |= result.1; - } - carry = tmp; - } - - // Handle overflow. - if carry { - small_add_from(x, 1, y.len() + start)?; - } - Some(()) -} - -/// Add bigint to bigint. -#[inline(always)] -pub fn large_add(x: &mut VecType, y: &[Limb]) -> Option<()> { - large_add_from(x, y, 0) -} - -/// Grade-school multiplication algorithm. -/// -/// Slow, naive algorithm, using limb-bit bases and just shifting left for -/// each iteration. This could be optimized with numerous other algorithms, -/// but it's extremely simple, and works in O(n*m) time, which is fine -/// by me. Each iteration, of which there are `m` iterations, requires -/// `n` multiplications, and `n` additions, or grade-school multiplication. -/// -/// Don't use Karatsuba multiplication, since out implementation seems to -/// be slower asymptotically, which is likely just due to the small sizes -/// we deal with here. For example, running on the following data: -/// -/// ```text -/// const SMALL_X: &[u32] = &[ -/// 766857581, 3588187092, 1583923090, 2204542082, 1564708913, 2695310100, 3676050286, -/// 1022770393, 468044626, 446028186 -/// ]; -/// const SMALL_Y: &[u32] = &[ -/// 3945492125, 3250752032, 1282554898, 1708742809, 1131807209, 3171663979, 1353276095, -/// 1678845844, 2373924447, 3640713171 -/// ]; -/// const LARGE_X: &[u32] = &[ -/// 3647536243, 2836434412, 2154401029, 1297917894, 137240595, 790694805, 2260404854, -/// 3872698172, 690585094, 99641546, 3510774932, 1672049983, 2313458559, 2017623719, -/// 638180197, 1140936565, 1787190494, 1797420655, 14113450, 2350476485, 3052941684, -/// 1993594787, 2901001571, 4156930025, 1248016552, 848099908, 2660577483, 4030871206, -/// 692169593, 2835966319, 1781364505, 4266390061, 1813581655, 4210899844, 2137005290, -/// 2346701569, 3715571980, 3386325356, 1251725092, 2267270902, 474686922, 2712200426, -/// 197581715, 3087636290, 1379224439, 1258285015, 3230794403, 2759309199, 1494932094, -/// 326310242 -/// ]; -/// const LARGE_Y: &[u32] = &[ -/// 1574249566, 868970575, 76716509, 3198027972, 1541766986, 1095120699, 3891610505, -/// 2322545818, 1677345138, 865101357, 2650232883, 2831881215, 3985005565, 2294283760, -/// 3468161605, 393539559, 3665153349, 1494067812, 106699483, 2596454134, 797235106, -/// 705031740, 1209732933, 2732145769, 4122429072, 141002534, 790195010, 4014829800, -/// 1303930792, 3649568494, 308065964, 1233648836, 2807326116, 79326486, 1262500691, -/// 621809229, 2258109428, 3819258501, 171115668, 1139491184, 2979680603, 1333372297, -/// 1657496603, 2790845317, 4090236532, 4220374789, 601876604, 1828177209, 2372228171, -/// 2247372529 -/// ]; -/// ``` -/// -/// We get the following results: - -/// ```text -/// mul/small:long time: [220.23 ns 221.47 ns 222.81 ns] -/// Found 4 outliers among 100 measurements (4.00%) -/// 2 (2.00%) high mild -/// 2 (2.00%) high severe -/// mul/small:karatsuba time: [233.88 ns 234.63 ns 235.44 ns] -/// Found 11 outliers among 100 measurements (11.00%) -/// 8 (8.00%) high mild -/// 3 (3.00%) high severe -/// mul/large:long time: [1.9365 us 1.9455 us 1.9558 us] -/// Found 12 outliers among 100 measurements (12.00%) -/// 7 (7.00%) high mild -/// 5 (5.00%) high severe -/// mul/large:karatsuba time: [4.4250 us 4.4515 us 4.4812 us] -/// ``` -/// -/// In short, Karatsuba multiplication is never worthwhile for out use-case. -pub fn long_mul(x: &[Limb], y: &[Limb]) -> Option { - // Using the immutable value, multiply by all the scalars in y, using - // the algorithm defined above. Use a single buffer to avoid - // frequent reallocations. Handle the first case to avoid a redundant - // addition, since we know y.len() >= 1. - let mut z = VecType::try_from(x)?; - if !y.is_empty() { - let y0 = y[0]; - small_mul(&mut z, y0)?; - - for (index, &yi) in y.iter().enumerate().skip(1) { - if yi != 0 { - let mut zi = VecType::try_from(x)?; - small_mul(&mut zi, yi)?; - large_add_from(&mut z, &zi, index)?; - } - } - } - - z.normalize(); - Some(z) -} - -/// Multiply bigint by bigint using grade-school multiplication algorithm. -#[inline(always)] -pub fn large_mul(x: &mut VecType, y: &[Limb]) -> Option<()> { - // Karatsuba multiplication never makes sense, so just use grade school - // multiplication. - if y.len() == 1 { - // SAFETY: safe since `y.len() == 1`. - small_mul(x, y[0])?; - } else { - *x = long_mul(y, x)?; - } - Some(()) -} - -// SHIFT -// ----- - -/// Shift-left `n` bits inside a buffer. -#[inline] -pub fn shl_bits(x: &mut VecType, n: usize) -> Option<()> { - debug_assert!(n != 0); - - // Internally, for each item, we shift left by n, and add the previous - // right shifted limb-bits. - // For example, we transform (for u8) shifted left 2, to: - // b10100100 b01000010 - // b10 b10010001 b00001000 - debug_assert!(n < LIMB_BITS); - let rshift = LIMB_BITS - n; - let lshift = n; - let mut prev: Limb = 0; - for xi in x.iter_mut() { - let tmp = *xi; - *xi <<= lshift; - *xi |= prev >> rshift; - prev = tmp; - } - - // Always push the carry, even if it creates a non-normal result. - let carry = prev >> rshift; - if carry != 0 { - x.try_push(carry)?; - } - - Some(()) -} - -/// Shift-left `n` limbs inside a buffer. -#[inline] -pub fn shl_limbs(x: &mut VecType, n: usize) -> Option<()> { - debug_assert!(n != 0); - if n + x.len() > x.capacity() { - None - } else if !x.is_empty() { - let len = n + x.len(); - // SAFE: since x is not empty, and `x.len() + n <= x.capacity()`. - unsafe { - // Move the elements. - let src = x.as_ptr(); - let dst = x.as_mut_ptr().add(n); - ptr::copy(src, dst, x.len()); - // Write our 0s. - ptr::write_bytes(x.as_mut_ptr(), 0, n); - x.set_len(len); - } - Some(()) - } else { - Some(()) - } -} - -/// Shift-left buffer by n bits. -#[inline] -pub fn shl(x: &mut VecType, n: usize) -> Option<()> { - let rem = n % LIMB_BITS; - let div = n / LIMB_BITS; - if rem != 0 { - shl_bits(x, rem)?; - } - if div != 0 { - shl_limbs(x, div)?; - } - Some(()) -} - -/// Get number of leading zero bits in the storage. -#[inline] -pub fn leading_zeros(x: &[Limb]) -> u32 { - let length = x.len(); - // wrapping_sub is fine, since it'll just return None. - if let Some(&value) = x.get(length.wrapping_sub(1)) { - value.leading_zeros() - } else { - 0 - } -} - -/// Calculate the bit-length of the big-integer. -#[inline] -pub fn bit_length(x: &[Limb]) -> u32 { - let nlz = leading_zeros(x); - LIMB_BITS as u32 * x.len() as u32 - nlz -} - -// LIMB -// ---- - -// Type for a single limb of the big integer. -// -// A limb is analogous to a digit in base10, except, it stores 32-bit -// or 64-bit numbers instead. We want types where 64-bit multiplication -// is well-supported by the architecture, rather than emulated in 3 -// instructions. The quickest way to check this support is using a -// cross-compiler for numerous architectures, along with the following -// source file and command: -// -// Compile with `gcc main.c -c -S -O3 -masm=intel` -// -// And the source code is: -// ```text -// #include -// -// struct i128 { -// uint64_t hi; -// uint64_t lo; -// }; -// -// // Type your code here, or load an example. -// struct i128 square(uint64_t x, uint64_t y) { -// __int128 prod = (__int128)x * (__int128)y; -// struct i128 z; -// z.hi = (uint64_t)(prod >> 64); -// z.lo = (uint64_t)prod; -// return z; -// } -// ``` -// -// If the result contains `call __multi3`, then the multiplication -// is emulated by the compiler. Otherwise, it's natively supported. -// -// This should be all-known 64-bit platforms supported by Rust. -// https://forge.rust-lang.org/platform-support.html -// -// # Supported -// -// Platforms where native 128-bit multiplication is explicitly supported: -// - x86_64 (Supported via `MUL`). -// - mips64 (Supported via `DMULTU`, which `HI` and `LO` can be read-from). -// - s390x (Supported via `MLGR`). -// -// # Efficient -// -// Platforms where native 64-bit multiplication is supported and -// you can extract hi-lo for 64-bit multiplications. -// - aarch64 (Requires `UMULH` and `MUL` to capture high and low bits). -// - powerpc64 (Requires `MULHDU` and `MULLD` to capture high and low bits). -// - riscv64 (Requires `MUL` and `MULH` to capture high and low bits). -// -// # Unsupported -// -// Platforms where native 128-bit multiplication is not supported, -// requiring software emulation. -// sparc64 (`UMUL` only supports double-word arguments). -// sparcv9 (Same as sparc64). -// -// These tests are run via `xcross`, my own library for C cross-compiling, -// which supports numerous targets (far in excess of Rust's tier 1 support, -// or rust-embedded/cross's list). xcross may be found here: -// https://github.com/Alexhuszagh/xcross -// -// To compile for the given target, run: -// `xcross gcc main.c -c -S -O3 --target $target` -// -// All 32-bit architectures inherently do not have support. That means -// we can essentially look for 64-bit architectures that are not SPARC. - -#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] -pub type Limb = u64; -#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] -pub type Wide = u128; -#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] -pub const LIMB_BITS: usize = 64; - -#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] -pub type Limb = u32; -#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] -pub type Wide = u64; -#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] -pub const LIMB_BITS: usize = 32; diff --git a/vendor/minimal-lexical/src/extended_float.rs b/vendor/minimal-lexical/src/extended_float.rs deleted file mode 100644 index 7397e199c84220..00000000000000 --- a/vendor/minimal-lexical/src/extended_float.rs +++ /dev/null @@ -1,24 +0,0 @@ -// FLOAT TYPE - -#![doc(hidden)] - -use crate::num::Float; - -/// Extended precision floating-point type. -/// -/// Private implementation, exposed only for testing purposes. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct ExtendedFloat { - /// Mantissa for the extended-precision float. - pub mant: u64, - /// Binary exponent for the extended-precision float. - pub exp: i32, -} - -/// Converts an `ExtendedFloat` to the closest machine float type. -#[inline(always)] -pub fn extended_to_float(x: ExtendedFloat) -> F { - let mut word = x.mant; - word |= (x.exp as u64) << F::MANTISSA_SIZE; - F::from_bits(word) -} diff --git a/vendor/minimal-lexical/src/fpu.rs b/vendor/minimal-lexical/src/fpu.rs deleted file mode 100644 index 42059a080a5113..00000000000000 --- a/vendor/minimal-lexical/src/fpu.rs +++ /dev/null @@ -1,98 +0,0 @@ -//! Platform-specific, assembly instructions to avoid -//! intermediate rounding on architectures with FPUs. -//! -//! This is adapted from the implementation in the Rust core library, -//! the original implementation can be [here](https://github.com/rust-lang/rust/blob/master/library/core/src/num/dec2flt/fpu.rs). -//! -//! It is therefore also subject to a Apache2.0/MIT license. - -#![cfg(feature = "nightly")] -#![doc(hidden)] - -pub use fpu_precision::set_precision; - -// On x86, the x87 FPU is used for float operations if the SSE/SSE2 extensions are not available. -// The x87 FPU operates with 80 bits of precision by default, which means that operations will -// round to 80 bits causing double rounding to happen when values are eventually represented as -// 32/64 bit float values. To overcome this, the FPU control word can be set so that the -// computations are performed in the desired precision. -#[cfg(all(target_arch = "x86", not(target_feature = "sse2")))] -mod fpu_precision { - use core::mem::size_of; - - /// A structure used to preserve the original value of the FPU control word, so that it can be - /// restored when the structure is dropped. - /// - /// The x87 FPU is a 16-bits register whose fields are as follows: - /// - /// | 12-15 | 10-11 | 8-9 | 6-7 | 5 | 4 | 3 | 2 | 1 | 0 | - /// |------:|------:|----:|----:|---:|---:|---:|---:|---:|---:| - /// | | RC | PC | | PM | UM | OM | ZM | DM | IM | - /// - /// The documentation for all of the fields is available in the IA-32 Architectures Software - /// Developer's Manual (Volume 1). - /// - /// The only field which is relevant for the following code is PC, Precision Control. This - /// field determines the precision of the operations performed by the FPU. It can be set to: - /// - 0b00, single precision i.e., 32-bits - /// - 0b10, double precision i.e., 64-bits - /// - 0b11, double extended precision i.e., 80-bits (default state) - /// The 0b01 value is reserved and should not be used. - pub struct FPUControlWord(u16); - - fn set_cw(cw: u16) { - // SAFETY: the `fldcw` instruction has been audited to be able to work correctly with - // any `u16` - unsafe { - asm!( - "fldcw word ptr [{}]", - in(reg) &cw, - options(nostack), - ) - } - } - - /// Sets the precision field of the FPU to `T` and returns a `FPUControlWord`. - pub fn set_precision() -> FPUControlWord { - let mut cw = 0_u16; - - // Compute the value for the Precision Control field that is appropriate for `T`. - let cw_precision = match size_of::() { - 4 => 0x0000, // 32 bits - 8 => 0x0200, // 64 bits - _ => 0x0300, // default, 80 bits - }; - - // Get the original value of the control word to restore it later, when the - // `FPUControlWord` structure is dropped - // SAFETY: the `fnstcw` instruction has been audited to be able to work correctly with - // any `u16` - unsafe { - asm!( - "fnstcw word ptr [{}]", - in(reg) &mut cw, - options(nostack), - ) - } - - // Set the control word to the desired precision. This is achieved by masking away the old - // precision (bits 8 and 9, 0x300) and replacing it with the precision flag computed above. - set_cw((cw & 0xFCFF) | cw_precision); - - FPUControlWord(cw) - } - - impl Drop for FPUControlWord { - fn drop(&mut self) { - set_cw(self.0) - } - } -} - -// In most architectures, floating point operations have an explicit bit size, therefore the -// precision of the computation is determined on a per-operation basis. -#[cfg(any(not(target_arch = "x86"), target_feature = "sse2"))] -mod fpu_precision { - pub fn set_precision() { - } -} diff --git a/vendor/minimal-lexical/src/heapvec.rs b/vendor/minimal-lexical/src/heapvec.rs deleted file mode 100644 index 035926018a41f8..00000000000000 --- a/vendor/minimal-lexical/src/heapvec.rs +++ /dev/null @@ -1,190 +0,0 @@ -//! Simple heap-allocated vector. - -#![cfg(feature = "alloc")] -#![doc(hidden)] - -use crate::bigint; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; -use core::{cmp, ops}; -#[cfg(feature = "std")] -use std::vec::Vec; - -/// Simple heap vector implementation. -#[derive(Clone)] -pub struct HeapVec { - /// The heap-allocated buffer for the elements. - data: Vec, -} - -#[allow(clippy::new_without_default)] -impl HeapVec { - /// Construct an empty vector. - #[inline] - pub fn new() -> Self { - Self { - data: Vec::with_capacity(bigint::BIGINT_LIMBS), - } - } - - /// Construct a vector from an existing slice. - #[inline] - pub fn try_from(x: &[bigint::Limb]) -> Option { - let mut vec = Self::new(); - vec.try_extend(x)?; - Some(vec) - } - - /// Sets the length of a vector. - /// - /// This will explicitly set the size of the vector, without actually - /// modifying its buffers, so it is up to the caller to ensure that the - /// vector is actually the specified size. - /// - /// # Safety - /// - /// Safe as long as `len` is less than `self.capacity()` and has been initialized. - #[inline] - pub unsafe fn set_len(&mut self, len: usize) { - debug_assert!(len <= bigint::BIGINT_LIMBS); - unsafe { self.data.set_len(len) }; - } - - /// The number of elements stored in the vector. - #[inline] - pub fn len(&self) -> usize { - self.data.len() - } - - /// If the vector is empty. - #[inline] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// The number of items the vector can hold. - #[inline] - pub fn capacity(&self) -> usize { - self.data.capacity() - } - - /// Append an item to the vector. - #[inline] - pub fn try_push(&mut self, value: bigint::Limb) -> Option<()> { - self.data.push(value); - Some(()) - } - - /// Remove an item from the end of the vector and return it, or None if empty. - #[inline] - pub fn pop(&mut self) -> Option { - self.data.pop() - } - - /// Copy elements from a slice and append them to the vector. - #[inline] - pub fn try_extend(&mut self, slc: &[bigint::Limb]) -> Option<()> { - self.data.extend_from_slice(slc); - Some(()) - } - - /// Try to resize the buffer. - /// - /// If the new length is smaller than the current length, truncate - /// the input. If it's larger, then append elements to the buffer. - #[inline] - pub fn try_resize(&mut self, len: usize, value: bigint::Limb) -> Option<()> { - self.data.resize(len, value); - Some(()) - } - - // HI - - /// Get the high 64 bits from the vector. - #[inline(always)] - pub fn hi64(&self) -> (u64, bool) { - bigint::hi64(&self.data) - } - - // FROM - - /// Create StackVec from u64 value. - #[inline(always)] - pub fn from_u64(x: u64) -> Self { - bigint::from_u64(x) - } - - // MATH - - /// Normalize the integer, so any leading zero values are removed. - #[inline] - pub fn normalize(&mut self) { - bigint::normalize(self) - } - - /// Get if the big integer is normalized. - #[inline] - pub fn is_normalized(&self) -> bool { - bigint::is_normalized(self) - } - - /// AddAssign small integer. - #[inline] - pub fn add_small(&mut self, y: bigint::Limb) -> Option<()> { - bigint::small_add(self, y) - } - - /// MulAssign small integer. - #[inline] - pub fn mul_small(&mut self, y: bigint::Limb) -> Option<()> { - bigint::small_mul(self, y) - } -} - -impl PartialEq for HeapVec { - #[inline] - #[allow(clippy::op_ref)] - fn eq(&self, other: &Self) -> bool { - use core::ops::Deref; - self.len() == other.len() && self.deref() == other.deref() - } -} - -impl Eq for HeapVec { -} - -impl cmp::PartialOrd for HeapVec { - #[inline] - fn partial_cmp(&self, other: &Self) -> Option { - Some(bigint::compare(self, other)) - } -} - -impl cmp::Ord for HeapVec { - #[inline] - fn cmp(&self, other: &Self) -> cmp::Ordering { - bigint::compare(self, other) - } -} - -impl ops::Deref for HeapVec { - type Target = [bigint::Limb]; - #[inline] - fn deref(&self) -> &[bigint::Limb] { - &self.data - } -} - -impl ops::DerefMut for HeapVec { - #[inline] - fn deref_mut(&mut self) -> &mut [bigint::Limb] { - &mut self.data - } -} - -impl ops::MulAssign<&[bigint::Limb]> for HeapVec { - #[inline] - fn mul_assign(&mut self, rhs: &[bigint::Limb]) { - bigint::large_mul(self, rhs).unwrap(); - } -} diff --git a/vendor/minimal-lexical/src/lemire.rs b/vendor/minimal-lexical/src/lemire.rs deleted file mode 100644 index 99b1ae705911da..00000000000000 --- a/vendor/minimal-lexical/src/lemire.rs +++ /dev/null @@ -1,225 +0,0 @@ -//! Implementation of the Eisel-Lemire algorithm. -//! -//! This is adapted from [fast-float-rust](https://github.com/aldanor/fast-float-rust), -//! a port of [fast_float](https://github.com/fastfloat/fast_float) to Rust. - -#![cfg(not(feature = "compact"))] -#![doc(hidden)] - -use crate::extended_float::ExtendedFloat; -use crate::num::Float; -use crate::number::Number; -use crate::table::{LARGEST_POWER_OF_FIVE, POWER_OF_FIVE_128, SMALLEST_POWER_OF_FIVE}; - -/// Ensure truncation of digits doesn't affect our computation, by doing 2 passes. -#[inline] -pub fn lemire(num: &Number) -> ExtendedFloat { - // If significant digits were truncated, then we can have rounding error - // only if `mantissa + 1` produces a different result. We also avoid - // redundantly using the Eisel-Lemire algorithm if it was unable to - // correctly round on the first pass. - let mut fp = compute_float::(num.exponent, num.mantissa); - if num.many_digits && fp.exp >= 0 && fp != compute_float::(num.exponent, num.mantissa + 1) { - // Need to re-calculate, since the previous values are rounded - // when the slow path algorithm expects a normalized extended float. - fp = compute_error::(num.exponent, num.mantissa); - } - fp -} - -/// Compute a float using an extended-precision representation. -/// -/// Fast conversion of a the significant digits and decimal exponent -/// a float to a extended representation with a binary float. This -/// algorithm will accurately parse the vast majority of cases, -/// and uses a 128-bit representation (with a fallback 192-bit -/// representation). -/// -/// This algorithm scales the exponent by the decimal exponent -/// using pre-computed powers-of-5, and calculates if the -/// representation can be unambiguously rounded to the nearest -/// machine float. Near-halfway cases are not handled here, -/// and are represented by a negative, biased binary exponent. -/// -/// The algorithm is described in detail in "Daniel Lemire, Number Parsing -/// at a Gigabyte per Second" in section 5, "Fast Algorithm", and -/// section 6, "Exact Numbers And Ties", available online: -/// . -pub fn compute_float(q: i32, mut w: u64) -> ExtendedFloat { - let fp_zero = ExtendedFloat { - mant: 0, - exp: 0, - }; - let fp_inf = ExtendedFloat { - mant: 0, - exp: F::INFINITE_POWER, - }; - - // Short-circuit if the value can only be a literal 0 or infinity. - if w == 0 || q < F::SMALLEST_POWER_OF_TEN { - return fp_zero; - } else if q > F::LARGEST_POWER_OF_TEN { - return fp_inf; - } - // Normalize our significant digits, so the most-significant bit is set. - let lz = w.leading_zeros() as i32; - w <<= lz; - let (lo, hi) = compute_product_approx(q, w, F::MANTISSA_SIZE as usize + 3); - if lo == 0xFFFF_FFFF_FFFF_FFFF { - // If we have failed to approximate w x 5^-q with our 128-bit value. - // Since the addition of 1 could lead to an overflow which could then - // round up over the half-way point, this can lead to improper rounding - // of a float. - // - // However, this can only occur if q ∈ [-27, 55]. The upper bound of q - // is 55 because 5^55 < 2^128, however, this can only happen if 5^q > 2^64, - // since otherwise the product can be represented in 64-bits, producing - // an exact result. For negative exponents, rounding-to-even can - // only occur if 5^-q < 2^64. - // - // For detailed explanations of rounding for negative exponents, see - // . For detailed - // explanations of rounding for positive exponents, see - // . - let inside_safe_exponent = (q >= -27) && (q <= 55); - if !inside_safe_exponent { - return compute_error_scaled::(q, hi, lz); - } - } - let upperbit = (hi >> 63) as i32; - let mut mantissa = hi >> (upperbit + 64 - F::MANTISSA_SIZE - 3); - let mut power2 = power(q) + upperbit - lz - F::MINIMUM_EXPONENT; - if power2 <= 0 { - if -power2 + 1 >= 64 { - // Have more than 64 bits below the minimum exponent, must be 0. - return fp_zero; - } - // Have a subnormal value. - mantissa >>= -power2 + 1; - mantissa += mantissa & 1; - mantissa >>= 1; - power2 = (mantissa >= (1_u64 << F::MANTISSA_SIZE)) as i32; - return ExtendedFloat { - mant: mantissa, - exp: power2, - }; - } - // Need to handle rounding ties. Normally, we need to round up, - // but if we fall right in between and and we have an even basis, we - // need to round down. - // - // This will only occur if: - // 1. The lower 64 bits of the 128-bit representation is 0. - // IE, 5^q fits in single 64-bit word. - // 2. The least-significant bit prior to truncated mantissa is odd. - // 3. All the bits truncated when shifting to mantissa bits + 1 are 0. - // - // Or, we may fall between two floats: we are exactly halfway. - if lo <= 1 - && q >= F::MIN_EXPONENT_ROUND_TO_EVEN - && q <= F::MAX_EXPONENT_ROUND_TO_EVEN - && mantissa & 3 == 1 - && (mantissa << (upperbit + 64 - F::MANTISSA_SIZE - 3)) == hi - { - // Zero the lowest bit, so we don't round up. - mantissa &= !1_u64; - } - // Round-to-even, then shift the significant digits into place. - mantissa += mantissa & 1; - mantissa >>= 1; - if mantissa >= (2_u64 << F::MANTISSA_SIZE) { - // Rounding up overflowed, so the carry bit is set. Set the - // mantissa to 1 (only the implicit, hidden bit is set) and - // increase the exponent. - mantissa = 1_u64 << F::MANTISSA_SIZE; - power2 += 1; - } - // Zero out the hidden bit. - mantissa &= !(1_u64 << F::MANTISSA_SIZE); - if power2 >= F::INFINITE_POWER { - // Exponent is above largest normal value, must be infinite. - return fp_inf; - } - ExtendedFloat { - mant: mantissa, - exp: power2, - } -} - -/// Fallback algorithm to calculate the non-rounded representation. -/// This calculates the extended representation, and then normalizes -/// the resulting representation, so the high bit is set. -#[inline] -pub fn compute_error(q: i32, mut w: u64) -> ExtendedFloat { - let lz = w.leading_zeros() as i32; - w <<= lz; - let hi = compute_product_approx(q, w, F::MANTISSA_SIZE as usize + 3).1; - compute_error_scaled::(q, hi, lz) -} - -/// Compute the error from a mantissa scaled to the exponent. -#[inline] -pub fn compute_error_scaled(q: i32, mut w: u64, lz: i32) -> ExtendedFloat { - // Want to normalize the float, but this is faster than ctlz on most architectures. - let hilz = (w >> 63) as i32 ^ 1; - w <<= hilz; - let power2 = power(q as i32) + F::EXPONENT_BIAS - hilz - lz - 62; - - ExtendedFloat { - mant: w, - exp: power2 + F::INVALID_FP, - } -} - -/// Calculate a base 2 exponent from a decimal exponent. -/// This uses a pre-computed integer approximation for -/// log2(10), where 217706 / 2^16 is accurate for the -/// entire range of non-finite decimal exponents. -#[inline] -fn power(q: i32) -> i32 { - (q.wrapping_mul(152_170 + 65536) >> 16) + 63 -} - -#[inline] -fn full_multiplication(a: u64, b: u64) -> (u64, u64) { - let r = (a as u128) * (b as u128); - (r as u64, (r >> 64) as u64) -} - -// This will compute or rather approximate w * 5**q and return a pair of 64-bit words -// approximating the result, with the "high" part corresponding to the most significant -// bits and the low part corresponding to the least significant bits. -fn compute_product_approx(q: i32, w: u64, precision: usize) -> (u64, u64) { - debug_assert!(q >= SMALLEST_POWER_OF_FIVE); - debug_assert!(q <= LARGEST_POWER_OF_FIVE); - debug_assert!(precision <= 64); - - let mask = if precision < 64 { - 0xFFFF_FFFF_FFFF_FFFF_u64 >> precision - } else { - 0xFFFF_FFFF_FFFF_FFFF_u64 - }; - - // 5^q < 2^64, then the multiplication always provides an exact value. - // That means whenever we need to round ties to even, we always have - // an exact value. - let index = (q - SMALLEST_POWER_OF_FIVE) as usize; - let (lo5, hi5) = POWER_OF_FIVE_128[index]; - // Only need one multiplication as long as there is 1 zero but - // in the explicit mantissa bits, +1 for the hidden bit, +1 to - // determine the rounding direction, +1 for if the computed - // product has a leading zero. - let (mut first_lo, mut first_hi) = full_multiplication(w, lo5); - if first_hi & mask == mask { - // Need to do a second multiplication to get better precision - // for the lower product. This will always be exact - // where q is < 55, since 5^55 < 2^128. If this wraps, - // then we need to need to round up the hi product. - let (_, second_hi) = full_multiplication(w, hi5); - first_lo = first_lo.wrapping_add(second_hi); - if second_hi > first_lo { - first_hi += 1; - } - } - (first_lo, first_hi) -} diff --git a/vendor/minimal-lexical/src/lib.rs b/vendor/minimal-lexical/src/lib.rs deleted file mode 100644 index 75f923475f21e2..00000000000000 --- a/vendor/minimal-lexical/src/lib.rs +++ /dev/null @@ -1,68 +0,0 @@ -//! Fast, minimal float-parsing algorithm. -//! -//! minimal-lexical has a simple, high-level API with a single -//! exported function: [`parse_float`]. -//! -//! [`parse_float`] expects a forward iterator for the integer -//! and fraction digits, as well as a parsed exponent as an [`i32`]. -//! -//! For more examples, please see [simple-example](https://github.com/Alexhuszagh/minimal-lexical/blob/master/examples/simple.rs). -//! -//! EXAMPLES -//! -------- -//! -//! ``` -//! extern crate minimal_lexical; -//! -//! // Let's say we want to parse "1.2345". -//! // First, we need an external parser to extract the integer digits ("1"), -//! // the fraction digits ("2345"), and then parse the exponent to a 32-bit -//! // integer (0). -//! // Warning: -//! // -------- -//! // Please note that leading zeros must be trimmed from the integer, -//! // and trailing zeros must be trimmed from the fraction. This cannot -//! // be handled by minimal-lexical, since we accept iterators. -//! let integer = b"1"; -//! let fraction = b"2345"; -//! let float: f64 = minimal_lexical::parse_float(integer.iter(), fraction.iter(), 0); -//! println!("float={:?}", float); // 1.235 -//! ``` -//! -//! [`parse_float`]: fn.parse_float.html -//! [`i32`]: https://doc.rust-lang.org/stable/std/primitive.i32.html - -// FEATURES - -// We want to have the same safety guarantees as Rust core, -// so we allow unused unsafe to clearly document safety guarantees. -#![allow(unused_unsafe)] -#![cfg_attr(feature = "lint", warn(unsafe_op_in_unsafe_fn))] -#![cfg_attr(not(feature = "std"), no_std)] - -#[cfg(all(feature = "alloc", not(feature = "std")))] -extern crate alloc; - -pub mod bellerophon; -pub mod bigint; -pub mod extended_float; -pub mod fpu; -pub mod heapvec; -pub mod lemire; -pub mod libm; -pub mod mask; -pub mod num; -pub mod number; -pub mod parse; -pub mod rounding; -pub mod slow; -pub mod stackvec; -pub mod table; - -mod table_bellerophon; -mod table_lemire; -mod table_small; - -// API -pub use self::num::Float; -pub use self::parse::parse_float; diff --git a/vendor/minimal-lexical/src/libm.rs b/vendor/minimal-lexical/src/libm.rs deleted file mode 100644 index c9f93d36ac7bc1..00000000000000 --- a/vendor/minimal-lexical/src/libm.rs +++ /dev/null @@ -1,1238 +0,0 @@ -//! A small number of math routines for floats and doubles. -//! -//! These are adapted from libm, a port of musl libc's libm to Rust. -//! libm can be found online [here](https://github.com/rust-lang/libm), -//! and is similarly licensed under an Apache2.0/MIT license - -#![cfg(all(not(feature = "std"), feature = "compact"))] -#![doc(hidden)] - -/* origin: FreeBSD /usr/src/lib/msun/src/e_powf.c */ -/* - * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com. - */ -/* - * ==================================================== - * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. - * - * Developed at SunPro, a Sun Microsystems, Inc. business. - * Permission to use, copy, modify, and distribute this - * software is freely granted, provided that this notice - * is preserved. - * ==================================================== - */ - -/// # Safety -/// -/// Safe if `index < array.len()`. -macro_rules! i { - ($array:ident, $index:expr) => { - // SAFETY: safe if `index < array.len()`. - unsafe { *$array.get_unchecked($index) } - }; -} - -pub fn powf(x: f32, y: f32) -> f32 { - const BP: [f32; 2] = [1.0, 1.5]; - const DP_H: [f32; 2] = [0.0, 5.84960938e-01]; /* 0x3f15c000 */ - const DP_L: [f32; 2] = [0.0, 1.56322085e-06]; /* 0x35d1cfdc */ - const TWO24: f32 = 16777216.0; /* 0x4b800000 */ - const HUGE: f32 = 1.0e30; - const TINY: f32 = 1.0e-30; - const L1: f32 = 6.0000002384e-01; /* 0x3f19999a */ - const L2: f32 = 4.2857143283e-01; /* 0x3edb6db7 */ - const L3: f32 = 3.3333334327e-01; /* 0x3eaaaaab */ - const L4: f32 = 2.7272811532e-01; /* 0x3e8ba305 */ - const L5: f32 = 2.3066075146e-01; /* 0x3e6c3255 */ - const L6: f32 = 2.0697501302e-01; /* 0x3e53f142 */ - const P1: f32 = 1.6666667163e-01; /* 0x3e2aaaab */ - const P2: f32 = -2.7777778450e-03; /* 0xbb360b61 */ - const P3: f32 = 6.6137559770e-05; /* 0x388ab355 */ - const P4: f32 = -1.6533901999e-06; /* 0xb5ddea0e */ - const P5: f32 = 4.1381369442e-08; /* 0x3331bb4c */ - const LG2: f32 = 6.9314718246e-01; /* 0x3f317218 */ - const LG2_H: f32 = 6.93145752e-01; /* 0x3f317200 */ - const LG2_L: f32 = 1.42860654e-06; /* 0x35bfbe8c */ - const OVT: f32 = 4.2995665694e-08; /* -(128-log2(ovfl+.5ulp)) */ - const CP: f32 = 9.6179670095e-01; /* 0x3f76384f =2/(3ln2) */ - const CP_H: f32 = 9.6191406250e-01; /* 0x3f764000 =12b cp */ - const CP_L: f32 = -1.1736857402e-04; /* 0xb8f623c6 =tail of cp_h */ - const IVLN2: f32 = 1.4426950216e+00; - const IVLN2_H: f32 = 1.4426879883e+00; - const IVLN2_L: f32 = 7.0526075433e-06; - - let mut z: f32; - let mut ax: f32; - let z_h: f32; - let z_l: f32; - let mut p_h: f32; - let mut p_l: f32; - let y1: f32; - let mut t1: f32; - let t2: f32; - let mut r: f32; - let s: f32; - let mut sn: f32; - let mut t: f32; - let mut u: f32; - let mut v: f32; - let mut w: f32; - let i: i32; - let mut j: i32; - let mut k: i32; - let mut yisint: i32; - let mut n: i32; - let hx: i32; - let hy: i32; - let mut ix: i32; - let iy: i32; - let mut is: i32; - - hx = x.to_bits() as i32; - hy = y.to_bits() as i32; - - ix = hx & 0x7fffffff; - iy = hy & 0x7fffffff; - - /* x**0 = 1, even if x is NaN */ - if iy == 0 { - return 1.0; - } - - /* 1**y = 1, even if y is NaN */ - if hx == 0x3f800000 { - return 1.0; - } - - /* NaN if either arg is NaN */ - if ix > 0x7f800000 || iy > 0x7f800000 { - return x + y; - } - - /* determine if y is an odd int when x < 0 - * yisint = 0 ... y is not an integer - * yisint = 1 ... y is an odd int - * yisint = 2 ... y is an even int - */ - yisint = 0; - if hx < 0 { - if iy >= 0x4b800000 { - yisint = 2; /* even integer y */ - } else if iy >= 0x3f800000 { - k = (iy >> 23) - 0x7f; /* exponent */ - j = iy >> (23 - k); - if (j << (23 - k)) == iy { - yisint = 2 - (j & 1); - } - } - } - - /* special value of y */ - if iy == 0x7f800000 { - /* y is +-inf */ - if ix == 0x3f800000 { - /* (-1)**+-inf is 1 */ - return 1.0; - } else if ix > 0x3f800000 { - /* (|x|>1)**+-inf = inf,0 */ - return if hy >= 0 { - y - } else { - 0.0 - }; - } else { - /* (|x|<1)**+-inf = 0,inf */ - return if hy >= 0 { - 0.0 - } else { - -y - }; - } - } - if iy == 0x3f800000 { - /* y is +-1 */ - return if hy >= 0 { - x - } else { - 1.0 / x - }; - } - - if hy == 0x40000000 { - /* y is 2 */ - return x * x; - } - - if hy == 0x3f000000 - /* y is 0.5 */ - && hx >= 0 - { - /* x >= +0 */ - return sqrtf(x); - } - - ax = fabsf(x); - /* special value of x */ - if ix == 0x7f800000 || ix == 0 || ix == 0x3f800000 { - /* x is +-0,+-inf,+-1 */ - z = ax; - if hy < 0 { - /* z = (1/|x|) */ - z = 1.0 / z; - } - - if hx < 0 { - if ((ix - 0x3f800000) | yisint) == 0 { - z = (z - z) / (z - z); /* (-1)**non-int is NaN */ - } else if yisint == 1 { - z = -z; /* (x<0)**odd = -(|x|**odd) */ - } - } - return z; - } - - sn = 1.0; /* sign of result */ - if hx < 0 { - if yisint == 0 { - /* (x<0)**(non-int) is NaN */ - return (x - x) / (x - x); - } - - if yisint == 1 { - /* (x<0)**(odd int) */ - sn = -1.0; - } - } - - /* |y| is HUGE */ - if iy > 0x4d000000 { - /* if |y| > 2**27 */ - /* over/underflow if x is not close to one */ - if ix < 0x3f7ffff8 { - return if hy < 0 { - sn * HUGE * HUGE - } else { - sn * TINY * TINY - }; - } - - if ix > 0x3f800007 { - return if hy > 0 { - sn * HUGE * HUGE - } else { - sn * TINY * TINY - }; - } - - /* now |1-x| is TINY <= 2**-20, suffice to compute - log(x) by x-x^2/2+x^3/3-x^4/4 */ - t = ax - 1.; /* t has 20 trailing zeros */ - w = (t * t) * (0.5 - t * (0.333333333333 - t * 0.25)); - u = IVLN2_H * t; /* IVLN2_H has 16 sig. bits */ - v = t * IVLN2_L - w * IVLN2; - t1 = u + v; - is = t1.to_bits() as i32; - t1 = f32::from_bits(is as u32 & 0xfffff000); - t2 = v - (t1 - u); - } else { - let mut s2: f32; - let mut s_h: f32; - let s_l: f32; - let mut t_h: f32; - let mut t_l: f32; - - n = 0; - /* take care subnormal number */ - if ix < 0x00800000 { - ax *= TWO24; - n -= 24; - ix = ax.to_bits() as i32; - } - n += ((ix) >> 23) - 0x7f; - j = ix & 0x007fffff; - /* determine interval */ - ix = j | 0x3f800000; /* normalize ix */ - if j <= 0x1cc471 { - /* |x|> 1) & 0xfffff000) | 0x20000000) as i32; - t_h = f32::from_bits(is as u32 + 0x00400000 + ((k as u32) << 21)); - t_l = ax - (t_h - i!(BP, k as usize)); - s_l = v * ((u - s_h * t_h) - s_h * t_l); - /* compute log(ax) */ - s2 = s * s; - r = s2 * s2 * (L1 + s2 * (L2 + s2 * (L3 + s2 * (L4 + s2 * (L5 + s2 * L6))))); - r += s_l * (s_h + s); - s2 = s_h * s_h; - t_h = 3.0 + s2 + r; - is = t_h.to_bits() as i32; - t_h = f32::from_bits(is as u32 & 0xfffff000); - t_l = r - ((t_h - 3.0) - s2); - /* u+v = s*(1+...) */ - u = s_h * t_h; - v = s_l * t_h + t_l * s; - /* 2/(3log2)*(s+...) */ - p_h = u + v; - is = p_h.to_bits() as i32; - p_h = f32::from_bits(is as u32 & 0xfffff000); - p_l = v - (p_h - u); - z_h = CP_H * p_h; /* cp_h+cp_l = 2/(3*log2) */ - z_l = CP_L * p_h + p_l * CP + i!(DP_L, k as usize); - /* log2(ax) = (s+..)*2/(3*log2) = n + dp_h + z_h + z_l */ - t = n as f32; - t1 = ((z_h + z_l) + i!(DP_H, k as usize)) + t; - is = t1.to_bits() as i32; - t1 = f32::from_bits(is as u32 & 0xfffff000); - t2 = z_l - (((t1 - t) - i!(DP_H, k as usize)) - z_h); - }; - - /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */ - is = y.to_bits() as i32; - y1 = f32::from_bits(is as u32 & 0xfffff000); - p_l = (y - y1) * t1 + y * t2; - p_h = y1 * t1; - z = p_l + p_h; - j = z.to_bits() as i32; - if j > 0x43000000 { - /* if z > 128 */ - return sn * HUGE * HUGE; /* overflow */ - } else if j == 0x43000000 { - /* if z == 128 */ - if p_l + OVT > z - p_h { - return sn * HUGE * HUGE; /* overflow */ - } - } else if (j & 0x7fffffff) > 0x43160000 { - /* z < -150 */ - // FIXME: check should be (uint32_t)j > 0xc3160000 - return sn * TINY * TINY; /* underflow */ - } else if j as u32 == 0xc3160000 - /* z == -150 */ - && p_l <= z - p_h - { - return sn * TINY * TINY; /* underflow */ - } - - /* - * compute 2**(p_h+p_l) - */ - i = j & 0x7fffffff; - k = (i >> 23) - 0x7f; - n = 0; - if i > 0x3f000000 { - /* if |z| > 0.5, set n = [z+0.5] */ - n = j + (0x00800000 >> (k + 1)); - k = ((n & 0x7fffffff) >> 23) - 0x7f; /* new k for n */ - t = f32::from_bits(n as u32 & !(0x007fffff >> k)); - n = ((n & 0x007fffff) | 0x00800000) >> (23 - k); - if j < 0 { - n = -n; - } - p_h -= t; - } - t = p_l + p_h; - is = t.to_bits() as i32; - t = f32::from_bits(is as u32 & 0xffff8000); - u = t * LG2_H; - v = (p_l - (t - p_h)) * LG2 + t * LG2_L; - z = u + v; - w = v - (z - u); - t = z * z; - t1 = z - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5)))); - r = (z * t1) / (t1 - 2.0) - (w + z * w); - z = 1.0 - (r - z); - j = z.to_bits() as i32; - j += n << 23; - if (j >> 23) <= 0 { - /* subnormal output */ - z = scalbnf(z, n); - } else { - z = f32::from_bits(j as u32); - } - sn * z -} - -/* origin: FreeBSD /usr/src/lib/msun/src/e_sqrtf.c */ -/* - * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com. - */ -/* - * ==================================================== - * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. - * - * Developed at SunPro, a Sun Microsystems, Inc. business. - * Permission to use, copy, modify, and distribute this - * software is freely granted, provided that this notice - * is preserved. - * ==================================================== - */ - -pub fn sqrtf(x: f32) -> f32 { - #[cfg(target_feature = "sse")] - { - // Note: This path is unlikely since LLVM will usually have already - // optimized sqrt calls into hardware instructions if sse is available, - // but if someone does end up here they'll apprected the speed increase. - #[cfg(target_arch = "x86")] - use core::arch::x86::*; - #[cfg(target_arch = "x86_64")] - use core::arch::x86_64::*; - // SAFETY: safe, since `_mm_set_ss` takes a 32-bit float, and returns - // a 128-bit type with the lowest 32-bits as `x`, `_mm_sqrt_ss` calculates - // the sqrt of this 128-bit vector, and `_mm_cvtss_f32` extracts the lower - // 32-bits as a 32-bit float. - unsafe { - let m = _mm_set_ss(x); - let m_sqrt = _mm_sqrt_ss(m); - _mm_cvtss_f32(m_sqrt) - } - } - #[cfg(not(target_feature = "sse"))] - { - const TINY: f32 = 1.0e-30; - - let mut z: f32; - let sign: i32 = 0x80000000u32 as i32; - let mut ix: i32; - let mut s: i32; - let mut q: i32; - let mut m: i32; - let mut t: i32; - let mut i: i32; - let mut r: u32; - - ix = x.to_bits() as i32; - - /* take care of Inf and NaN */ - if (ix as u32 & 0x7f800000) == 0x7f800000 { - return x * x + x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf, sqrt(-inf)=sNaN */ - } - - /* take care of zero */ - if ix <= 0 { - if (ix & !sign) == 0 { - return x; /* sqrt(+-0) = +-0 */ - } - if ix < 0 { - return (x - x) / (x - x); /* sqrt(-ve) = sNaN */ - } - } - - /* normalize x */ - m = ix >> 23; - if m == 0 { - /* subnormal x */ - i = 0; - while ix & 0x00800000 == 0 { - ix <<= 1; - i = i + 1; - } - m -= i - 1; - } - m -= 127; /* unbias exponent */ - ix = (ix & 0x007fffff) | 0x00800000; - if m & 1 == 1 { - /* odd m, double x to make it even */ - ix += ix; - } - m >>= 1; /* m = [m/2] */ - - /* generate sqrt(x) bit by bit */ - ix += ix; - q = 0; - s = 0; - r = 0x01000000; /* r = moving bit from right to left */ - - while r != 0 { - t = s + r as i32; - if t <= ix { - s = t + r as i32; - ix -= t; - q += r as i32; - } - ix += ix; - r >>= 1; - } - - /* use floating add to find out rounding direction */ - if ix != 0 { - z = 1.0 - TINY; /* raise inexact flag */ - if z >= 1.0 { - z = 1.0 + TINY; - if z > 1.0 { - q += 2; - } else { - q += q & 1; - } - } - } - - ix = (q >> 1) + 0x3f000000; - ix += m << 23; - f32::from_bits(ix as u32) - } -} - -/// Absolute value (magnitude) (f32) -/// Calculates the absolute value (magnitude) of the argument `x`, -/// by direct manipulation of the bit representation of `x`. -pub fn fabsf(x: f32) -> f32 { - f32::from_bits(x.to_bits() & 0x7fffffff) -} - -pub fn scalbnf(mut x: f32, mut n: i32) -> f32 { - let x1p127 = f32::from_bits(0x7f000000); // 0x1p127f === 2 ^ 127 - let x1p_126 = f32::from_bits(0x800000); // 0x1p-126f === 2 ^ -126 - let x1p24 = f32::from_bits(0x4b800000); // 0x1p24f === 2 ^ 24 - - if n > 127 { - x *= x1p127; - n -= 127; - if n > 127 { - x *= x1p127; - n -= 127; - if n > 127 { - n = 127; - } - } - } else if n < -126 { - x *= x1p_126 * x1p24; - n += 126 - 24; - if n < -126 { - x *= x1p_126 * x1p24; - n += 126 - 24; - if n < -126 { - n = -126; - } - } - } - x * f32::from_bits(((0x7f + n) as u32) << 23) -} - -/* origin: FreeBSD /usr/src/lib/msun/src/e_pow.c */ -/* - * ==================================================== - * Copyright (C) 2004 by Sun Microsystems, Inc. All rights reserved. - * - * Permission to use, copy, modify, and distribute this - * software is freely granted, provided that this notice - * is preserved. - * ==================================================== - */ - -// pow(x,y) return x**y -// -// n -// Method: Let x = 2 * (1+f) -// 1. Compute and return log2(x) in two pieces: -// log2(x) = w1 + w2, -// where w1 has 53-24 = 29 bit trailing zeros. -// 2. Perform y*log2(x) = n+y' by simulating muti-precision -// arithmetic, where |y'|<=0.5. -// 3. Return x**y = 2**n*exp(y'*log2) -// -// Special cases: -// 1. (anything) ** 0 is 1 -// 2. 1 ** (anything) is 1 -// 3. (anything except 1) ** NAN is NAN -// 4. NAN ** (anything except 0) is NAN -// 5. +-(|x| > 1) ** +INF is +INF -// 6. +-(|x| > 1) ** -INF is +0 -// 7. +-(|x| < 1) ** +INF is +0 -// 8. +-(|x| < 1) ** -INF is +INF -// 9. -1 ** +-INF is 1 -// 10. +0 ** (+anything except 0, NAN) is +0 -// 11. -0 ** (+anything except 0, NAN, odd integer) is +0 -// 12. +0 ** (-anything except 0, NAN) is +INF, raise divbyzero -// 13. -0 ** (-anything except 0, NAN, odd integer) is +INF, raise divbyzero -// 14. -0 ** (+odd integer) is -0 -// 15. -0 ** (-odd integer) is -INF, raise divbyzero -// 16. +INF ** (+anything except 0,NAN) is +INF -// 17. +INF ** (-anything except 0,NAN) is +0 -// 18. -INF ** (+odd integer) is -INF -// 19. -INF ** (anything) = -0 ** (-anything), (anything except odd integer) -// 20. (anything) ** 1 is (anything) -// 21. (anything) ** -1 is 1/(anything) -// 22. (-anything) ** (integer) is (-1)**(integer)*(+anything**integer) -// 23. (-anything except 0 and inf) ** (non-integer) is NAN -// -// Accuracy: -// pow(x,y) returns x**y nearly rounded. In particular -// pow(integer,integer) -// always returns the correct integer provided it is -// representable. -// -// Constants : -// The hexadecimal values are the intended ones for the following -// constants. The decimal values may be used, provided that the -// compiler will convert from decimal to binary accurately enough -// to produce the hexadecimal values shown. - -pub fn powd(x: f64, y: f64) -> f64 { - const BP: [f64; 2] = [1.0, 1.5]; - const DP_H: [f64; 2] = [0.0, 5.84962487220764160156e-01]; /* 0x3fe2b803_40000000 */ - const DP_L: [f64; 2] = [0.0, 1.35003920212974897128e-08]; /* 0x3E4CFDEB, 0x43CFD006 */ - const TWO53: f64 = 9007199254740992.0; /* 0x43400000_00000000 */ - const HUGE: f64 = 1.0e300; - const TINY: f64 = 1.0e-300; - - // poly coefs for (3/2)*(log(x)-2s-2/3*s**3: - const L1: f64 = 5.99999999999994648725e-01; /* 0x3fe33333_33333303 */ - const L2: f64 = 4.28571428578550184252e-01; /* 0x3fdb6db6_db6fabff */ - const L3: f64 = 3.33333329818377432918e-01; /* 0x3fd55555_518f264d */ - const L4: f64 = 2.72728123808534006489e-01; /* 0x3fd17460_a91d4101 */ - const L5: f64 = 2.30660745775561754067e-01; /* 0x3fcd864a_93c9db65 */ - const L6: f64 = 2.06975017800338417784e-01; /* 0x3fca7e28_4a454eef */ - const P1: f64 = 1.66666666666666019037e-01; /* 0x3fc55555_5555553e */ - const P2: f64 = -2.77777777770155933842e-03; /* 0xbf66c16c_16bebd93 */ - const P3: f64 = 6.61375632143793436117e-05; /* 0x3f11566a_af25de2c */ - const P4: f64 = -1.65339022054652515390e-06; /* 0xbebbbd41_c5d26bf1 */ - const P5: f64 = 4.13813679705723846039e-08; /* 0x3e663769_72bea4d0 */ - const LG2: f64 = 6.93147180559945286227e-01; /* 0x3fe62e42_fefa39ef */ - const LG2_H: f64 = 6.93147182464599609375e-01; /* 0x3fe62e43_00000000 */ - const LG2_L: f64 = -1.90465429995776804525e-09; /* 0xbe205c61_0ca86c39 */ - const OVT: f64 = 8.0085662595372944372e-017; /* -(1024-log2(ovfl+.5ulp)) */ - const CP: f64 = 9.61796693925975554329e-01; /* 0x3feec709_dc3a03fd =2/(3ln2) */ - const CP_H: f64 = 9.61796700954437255859e-01; /* 0x3feec709_e0000000 =(float)cp */ - const CP_L: f64 = -7.02846165095275826516e-09; /* 0xbe3e2fe0_145b01f5 =tail of cp_h*/ - const IVLN2: f64 = 1.44269504088896338700e+00; /* 0x3ff71547_652b82fe =1/ln2 */ - const IVLN2_H: f64 = 1.44269502162933349609e+00; /* 0x3ff71547_60000000 =24b 1/ln2*/ - const IVLN2_L: f64 = 1.92596299112661746887e-08; /* 0x3e54ae0b_f85ddf44 =1/ln2 tail*/ - - let t1: f64; - let t2: f64; - - let (hx, lx): (i32, u32) = ((x.to_bits() >> 32) as i32, x.to_bits() as u32); - let (hy, ly): (i32, u32) = ((y.to_bits() >> 32) as i32, y.to_bits() as u32); - - let mut ix: i32 = (hx & 0x7fffffff) as i32; - let iy: i32 = (hy & 0x7fffffff) as i32; - - /* x**0 = 1, even if x is NaN */ - if ((iy as u32) | ly) == 0 { - return 1.0; - } - - /* 1**y = 1, even if y is NaN */ - if hx == 0x3ff00000 && lx == 0 { - return 1.0; - } - - /* NaN if either arg is NaN */ - if ix > 0x7ff00000 - || (ix == 0x7ff00000 && lx != 0) - || iy > 0x7ff00000 - || (iy == 0x7ff00000 && ly != 0) - { - return x + y; - } - - /* determine if y is an odd int when x < 0 - * yisint = 0 ... y is not an integer - * yisint = 1 ... y is an odd int - * yisint = 2 ... y is an even int - */ - let mut yisint: i32 = 0; - let mut k: i32; - let mut j: i32; - if hx < 0 { - if iy >= 0x43400000 { - yisint = 2; /* even integer y */ - } else if iy >= 0x3ff00000 { - k = (iy >> 20) - 0x3ff; /* exponent */ - - if k > 20 { - j = (ly >> (52 - k)) as i32; - - if (j << (52 - k)) == (ly as i32) { - yisint = 2 - (j & 1); - } - } else if ly == 0 { - j = iy >> (20 - k); - - if (j << (20 - k)) == iy { - yisint = 2 - (j & 1); - } - } - } - } - - if ly == 0 { - /* special value of y */ - if iy == 0x7ff00000 { - /* y is +-inf */ - - return if ((ix - 0x3ff00000) | (lx as i32)) == 0 { - /* (-1)**+-inf is 1 */ - 1.0 - } else if ix >= 0x3ff00000 { - /* (|x|>1)**+-inf = inf,0 */ - if hy >= 0 { - y - } else { - 0.0 - } - } else { - /* (|x|<1)**+-inf = 0,inf */ - if hy >= 0 { - 0.0 - } else { - -y - } - }; - } - - if iy == 0x3ff00000 { - /* y is +-1 */ - return if hy >= 0 { - x - } else { - 1.0 / x - }; - } - - if hy == 0x40000000 { - /* y is 2 */ - return x * x; - } - - if hy == 0x3fe00000 { - /* y is 0.5 */ - if hx >= 0 { - /* x >= +0 */ - return sqrtd(x); - } - } - } - - let mut ax: f64 = fabsd(x); - if lx == 0 { - /* special value of x */ - if ix == 0x7ff00000 || ix == 0 || ix == 0x3ff00000 { - /* x is +-0,+-inf,+-1 */ - let mut z: f64 = ax; - - if hy < 0 { - /* z = (1/|x|) */ - z = 1.0 / z; - } - - if hx < 0 { - if ((ix - 0x3ff00000) | yisint) == 0 { - z = (z - z) / (z - z); /* (-1)**non-int is NaN */ - } else if yisint == 1 { - z = -z; /* (x<0)**odd = -(|x|**odd) */ - } - } - - return z; - } - } - - let mut s: f64 = 1.0; /* sign of result */ - if hx < 0 { - if yisint == 0 { - /* (x<0)**(non-int) is NaN */ - return (x - x) / (x - x); - } - - if yisint == 1 { - /* (x<0)**(odd int) */ - s = -1.0; - } - } - - /* |y| is HUGE */ - if iy > 0x41e00000 { - /* if |y| > 2**31 */ - if iy > 0x43f00000 { - /* if |y| > 2**64, must o/uflow */ - if ix <= 0x3fefffff { - return if hy < 0 { - HUGE * HUGE - } else { - TINY * TINY - }; - } - - if ix >= 0x3ff00000 { - return if hy > 0 { - HUGE * HUGE - } else { - TINY * TINY - }; - } - } - - /* over/underflow if x is not close to one */ - if ix < 0x3fefffff { - return if hy < 0 { - s * HUGE * HUGE - } else { - s * TINY * TINY - }; - } - if ix > 0x3ff00000 { - return if hy > 0 { - s * HUGE * HUGE - } else { - s * TINY * TINY - }; - } - - /* now |1-x| is TINY <= 2**-20, suffice to compute - log(x) by x-x^2/2+x^3/3-x^4/4 */ - let t: f64 = ax - 1.0; /* t has 20 trailing zeros */ - let w: f64 = (t * t) * (0.5 - t * (0.3333333333333333333333 - t * 0.25)); - let u: f64 = IVLN2_H * t; /* ivln2_h has 21 sig. bits */ - let v: f64 = t * IVLN2_L - w * IVLN2; - t1 = with_set_low_word(u + v, 0); - t2 = v - (t1 - u); - } else { - // double ss,s2,s_h,s_l,t_h,t_l; - let mut n: i32 = 0; - - if ix < 0x00100000 { - /* take care subnormal number */ - ax *= TWO53; - n -= 53; - ix = get_high_word(ax) as i32; - } - - n += (ix >> 20) - 0x3ff; - j = ix & 0x000fffff; - - /* determine interval */ - let k: i32; - ix = j | 0x3ff00000; /* normalize ix */ - if j <= 0x3988E { - /* |x|> 1) | 0x20000000) + 0x00080000 + ((k as u32) << 18), - ); - let t_l: f64 = ax - (t_h - i!(BP, k as usize)); - let s_l: f64 = v * ((u - s_h * t_h) - s_h * t_l); - - /* compute log(ax) */ - let s2: f64 = ss * ss; - let mut r: f64 = s2 * s2 * (L1 + s2 * (L2 + s2 * (L3 + s2 * (L4 + s2 * (L5 + s2 * L6))))); - r += s_l * (s_h + ss); - let s2: f64 = s_h * s_h; - let t_h: f64 = with_set_low_word(3.0 + s2 + r, 0); - let t_l: f64 = r - ((t_h - 3.0) - s2); - - /* u+v = ss*(1+...) */ - let u: f64 = s_h * t_h; - let v: f64 = s_l * t_h + t_l * ss; - - /* 2/(3log2)*(ss+...) */ - let p_h: f64 = with_set_low_word(u + v, 0); - let p_l = v - (p_h - u); - let z_h: f64 = CP_H * p_h; /* cp_h+cp_l = 2/(3*log2) */ - let z_l: f64 = CP_L * p_h + p_l * CP + i!(DP_L, k as usize); - - /* log2(ax) = (ss+..)*2/(3*log2) = n + dp_h + z_h + z_l */ - let t: f64 = n as f64; - t1 = with_set_low_word(((z_h + z_l) + i!(DP_H, k as usize)) + t, 0); - t2 = z_l - (((t1 - t) - i!(DP_H, k as usize)) - z_h); - } - - /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */ - let y1: f64 = with_set_low_word(y, 0); - let p_l: f64 = (y - y1) * t1 + y * t2; - let mut p_h: f64 = y1 * t1; - let z: f64 = p_l + p_h; - let mut j: i32 = (z.to_bits() >> 32) as i32; - let i: i32 = z.to_bits() as i32; - // let (j, i): (i32, i32) = ((z.to_bits() >> 32) as i32, z.to_bits() as i32); - - if j >= 0x40900000 { - /* z >= 1024 */ - if (j - 0x40900000) | i != 0 { - /* if z > 1024 */ - return s * HUGE * HUGE; /* overflow */ - } - - if p_l + OVT > z - p_h { - return s * HUGE * HUGE; /* overflow */ - } - } else if (j & 0x7fffffff) >= 0x4090cc00 { - /* z <= -1075 */ - // FIXME: instead of abs(j) use unsigned j - - if (((j as u32) - 0xc090cc00) | (i as u32)) != 0 { - /* z < -1075 */ - return s * TINY * TINY; /* underflow */ - } - - if p_l <= z - p_h { - return s * TINY * TINY; /* underflow */ - } - } - - /* compute 2**(p_h+p_l) */ - let i: i32 = j & (0x7fffffff as i32); - k = (i >> 20) - 0x3ff; - let mut n: i32 = 0; - - if i > 0x3fe00000 { - /* if |z| > 0.5, set n = [z+0.5] */ - n = j + (0x00100000 >> (k + 1)); - k = ((n & 0x7fffffff) >> 20) - 0x3ff; /* new k for n */ - let t: f64 = with_set_high_word(0.0, (n & !(0x000fffff >> k)) as u32); - n = ((n & 0x000fffff) | 0x00100000) >> (20 - k); - if j < 0 { - n = -n; - } - p_h -= t; - } - - let t: f64 = with_set_low_word(p_l + p_h, 0); - let u: f64 = t * LG2_H; - let v: f64 = (p_l - (t - p_h)) * LG2 + t * LG2_L; - let mut z: f64 = u + v; - let w: f64 = v - (z - u); - let t: f64 = z * z; - let t1: f64 = z - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5)))); - let r: f64 = (z * t1) / (t1 - 2.0) - (w + z * w); - z = 1.0 - (r - z); - j = get_high_word(z) as i32; - j += n << 20; - - if (j >> 20) <= 0 { - /* subnormal output */ - z = scalbnd(z, n); - } else { - z = with_set_high_word(z, j as u32); - } - - s * z -} - -/// Absolute value (magnitude) (f64) -/// Calculates the absolute value (magnitude) of the argument `x`, -/// by direct manipulation of the bit representation of `x`. -pub fn fabsd(x: f64) -> f64 { - f64::from_bits(x.to_bits() & (u64::MAX / 2)) -} - -pub fn scalbnd(x: f64, mut n: i32) -> f64 { - let x1p1023 = f64::from_bits(0x7fe0000000000000); // 0x1p1023 === 2 ^ 1023 - let x1p53 = f64::from_bits(0x4340000000000000); // 0x1p53 === 2 ^ 53 - let x1p_1022 = f64::from_bits(0x0010000000000000); // 0x1p-1022 === 2 ^ (-1022) - - let mut y = x; - - if n > 1023 { - y *= x1p1023; - n -= 1023; - if n > 1023 { - y *= x1p1023; - n -= 1023; - if n > 1023 { - n = 1023; - } - } - } else if n < -1022 { - /* make sure final n < -53 to avoid double - rounding in the subnormal range */ - y *= x1p_1022 * x1p53; - n += 1022 - 53; - if n < -1022 { - y *= x1p_1022 * x1p53; - n += 1022 - 53; - if n < -1022 { - n = -1022; - } - } - } - y * f64::from_bits(((0x3ff + n) as u64) << 52) -} - -/* origin: FreeBSD /usr/src/lib/msun/src/e_sqrt.c */ -/* - * ==================================================== - * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. - * - * Developed at SunSoft, a Sun Microsystems, Inc. business. - * Permission to use, copy, modify, and distribute this - * software is freely granted, provided that this notice - * is preserved. - * ==================================================== - */ -/* sqrt(x) - * Return correctly rounded sqrt. - * ------------------------------------------ - * | Use the hardware sqrt if you have one | - * ------------------------------------------ - * Method: - * Bit by bit method using integer arithmetic. (Slow, but portable) - * 1. Normalization - * Scale x to y in [1,4) with even powers of 2: - * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then - * sqrt(x) = 2^k * sqrt(y) - * 2. Bit by bit computation - * Let q = sqrt(y) truncated to i bit after binary point (q = 1), - * i 0 - * i+1 2 - * s = 2*q , and y = 2 * ( y - q ). (1) - * i i i i - * - * To compute q from q , one checks whether - * i+1 i - * - * -(i+1) 2 - * (q + 2 ) <= y. (2) - * i - * -(i+1) - * If (2) is false, then q = q ; otherwise q = q + 2 . - * i+1 i i+1 i - * - * With some algebraic manipulation, it is not difficult to see - * that (2) is equivalent to - * -(i+1) - * s + 2 <= y (3) - * i i - * - * The advantage of (3) is that s and y can be computed by - * i i - * the following recurrence formula: - * if (3) is false - * - * s = s , y = y ; (4) - * i+1 i i+1 i - * - * otherwise, - * -i -(i+1) - * s = s + 2 , y = y - s - 2 (5) - * i+1 i i+1 i i - * - * One may easily use induction to prove (4) and (5). - * Note. Since the left hand side of (3) contain only i+2 bits, - * it does not necessary to do a full (53-bit) comparison - * in (3). - * 3. Final rounding - * After generating the 53 bits result, we compute one more bit. - * Together with the remainder, we can decide whether the - * result is exact, bigger than 1/2ulp, or less than 1/2ulp - * (it will never equal to 1/2ulp). - * The rounding mode can be detected by checking whether - * huge + tiny is equal to huge, and whether huge - tiny is - * equal to huge for some floating point number "huge" and "tiny". - * - * Special cases: - * sqrt(+-0) = +-0 ... exact - * sqrt(inf) = inf - * sqrt(-ve) = NaN ... with invalid signal - * sqrt(NaN) = NaN ... with invalid signal for signaling NaN - */ - -pub fn sqrtd(x: f64) -> f64 { - #[cfg(target_feature = "sse2")] - { - // Note: This path is unlikely since LLVM will usually have already - // optimized sqrt calls into hardware instructions if sse2 is available, - // but if someone does end up here they'll apprected the speed increase. - #[cfg(target_arch = "x86")] - use core::arch::x86::*; - #[cfg(target_arch = "x86_64")] - use core::arch::x86_64::*; - // SAFETY: safe, since `_mm_set_sd` takes a 64-bit float, and returns - // a 128-bit type with the lowest 64-bits as `x`, `_mm_sqrt_ss` calculates - // the sqrt of this 128-bit vector, and `_mm_cvtss_f64` extracts the lower - // 64-bits as a 64-bit float. - unsafe { - let m = _mm_set_sd(x); - let m_sqrt = _mm_sqrt_pd(m); - _mm_cvtsd_f64(m_sqrt) - } - } - #[cfg(not(target_feature = "sse2"))] - { - use core::num::Wrapping; - - const TINY: f64 = 1.0e-300; - - let mut z: f64; - let sign: Wrapping = Wrapping(0x80000000); - let mut ix0: i32; - let mut s0: i32; - let mut q: i32; - let mut m: i32; - let mut t: i32; - let mut i: i32; - let mut r: Wrapping; - let mut t1: Wrapping; - let mut s1: Wrapping; - let mut ix1: Wrapping; - let mut q1: Wrapping; - - ix0 = (x.to_bits() >> 32) as i32; - ix1 = Wrapping(x.to_bits() as u32); - - /* take care of Inf and NaN */ - if (ix0 & 0x7ff00000) == 0x7ff00000 { - return x * x + x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf, sqrt(-inf)=sNaN */ - } - /* take care of zero */ - if ix0 <= 0 { - if ((ix0 & !(sign.0 as i32)) | ix1.0 as i32) == 0 { - return x; /* sqrt(+-0) = +-0 */ - } - if ix0 < 0 { - return (x - x) / (x - x); /* sqrt(-ve) = sNaN */ - } - } - /* normalize x */ - m = ix0 >> 20; - if m == 0 { - /* subnormal x */ - while ix0 == 0 { - m -= 21; - ix0 |= (ix1 >> 11).0 as i32; - ix1 <<= 21; - } - i = 0; - while (ix0 & 0x00100000) == 0 { - i += 1; - ix0 <<= 1; - } - m -= i - 1; - ix0 |= (ix1 >> (32 - i) as usize).0 as i32; - ix1 = ix1 << i as usize; - } - m -= 1023; /* unbias exponent */ - ix0 = (ix0 & 0x000fffff) | 0x00100000; - if (m & 1) == 1 { - /* odd m, double x to make it even */ - ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32; - ix1 += ix1; - } - m >>= 1; /* m = [m/2] */ - - /* generate sqrt(x) bit by bit */ - ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32; - ix1 += ix1; - q = 0; /* [q,q1] = sqrt(x) */ - q1 = Wrapping(0); - s0 = 0; - s1 = Wrapping(0); - r = Wrapping(0x00200000); /* r = moving bit from right to left */ - - while r != Wrapping(0) { - t = s0 + r.0 as i32; - if t <= ix0 { - s0 = t + r.0 as i32; - ix0 -= t; - q += r.0 as i32; - } - ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32; - ix1 += ix1; - r >>= 1; - } - - r = sign; - while r != Wrapping(0) { - t1 = s1 + r; - t = s0; - if t < ix0 || (t == ix0 && t1 <= ix1) { - s1 = t1 + r; - if (t1 & sign) == sign && (s1 & sign) == Wrapping(0) { - s0 += 1; - } - ix0 -= t; - if ix1 < t1 { - ix0 -= 1; - } - ix1 -= t1; - q1 += r; - } - ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32; - ix1 += ix1; - r >>= 1; - } - - /* use floating add to find out rounding direction */ - if (ix0 as u32 | ix1.0) != 0 { - z = 1.0 - TINY; /* raise inexact flag */ - if z >= 1.0 { - z = 1.0 + TINY; - if q1.0 == 0xffffffff { - q1 = Wrapping(0); - q += 1; - } else if z > 1.0 { - if q1.0 == 0xfffffffe { - q += 1; - } - q1 += Wrapping(2); - } else { - q1 += q1 & Wrapping(1); - } - } - } - ix0 = (q >> 1) + 0x3fe00000; - ix1 = q1 >> 1; - if (q & 1) == 1 { - ix1 |= sign; - } - ix0 += m << 20; - f64::from_bits((ix0 as u64) << 32 | ix1.0 as u64) - } -} - -#[inline] -fn get_high_word(x: f64) -> u32 { - (x.to_bits() >> 32) as u32 -} - -#[inline] -fn with_set_high_word(f: f64, hi: u32) -> f64 { - let mut tmp = f.to_bits(); - tmp &= 0x00000000_ffffffff; - tmp |= (hi as u64) << 32; - f64::from_bits(tmp) -} - -#[inline] -fn with_set_low_word(f: f64, lo: u32) -> f64 { - let mut tmp = f.to_bits(); - tmp &= 0xffffffff_00000000; - tmp |= lo as u64; - f64::from_bits(tmp) -} diff --git a/vendor/minimal-lexical/src/mask.rs b/vendor/minimal-lexical/src/mask.rs deleted file mode 100644 index 1957c8be03e125..00000000000000 --- a/vendor/minimal-lexical/src/mask.rs +++ /dev/null @@ -1,60 +0,0 @@ -//! Utilities to generate bitmasks. - -#![doc(hidden)] - -/// Generate a bitwise mask for the lower `n` bits. -/// -/// # Examples -/// -/// ```rust -/// # use minimal_lexical::mask::lower_n_mask; -/// # pub fn main() { -/// assert_eq!(lower_n_mask(2), 0b11); -/// # } -/// ``` -#[inline] -pub fn lower_n_mask(n: u64) -> u64 { - debug_assert!(n <= 64, "lower_n_mask() overflow in shl."); - - match n == 64 { - // u64::MAX for older Rustc versions. - true => 0xffff_ffff_ffff_ffff, - false => (1 << n) - 1, - } -} - -/// Calculate the halfway point for the lower `n` bits. -/// -/// # Examples -/// -/// ```rust -/// # use minimal_lexical::mask::lower_n_halfway; -/// # pub fn main() { -/// assert_eq!(lower_n_halfway(2), 0b10); -/// # } -/// ``` -#[inline] -pub fn lower_n_halfway(n: u64) -> u64 { - debug_assert!(n <= 64, "lower_n_halfway() overflow in shl."); - - match n == 0 { - true => 0, - false => nth_bit(n - 1), - } -} - -/// Calculate a scalar factor of 2 above the halfway point. -/// -/// # Examples -/// -/// ```rust -/// # use minimal_lexical::mask::nth_bit; -/// # pub fn main() { -/// assert_eq!(nth_bit(2), 0b100); -/// # } -/// ``` -#[inline] -pub fn nth_bit(n: u64) -> u64 { - debug_assert!(n < 64, "nth_bit() overflow in shl."); - 1 << n -} diff --git a/vendor/minimal-lexical/src/num.rs b/vendor/minimal-lexical/src/num.rs deleted file mode 100644 index 9f682b9cbb29c3..00000000000000 --- a/vendor/minimal-lexical/src/num.rs +++ /dev/null @@ -1,308 +0,0 @@ -//! Utilities for Rust numbers. - -#![doc(hidden)] - -#[cfg(all(not(feature = "std"), feature = "compact"))] -use crate::libm::{powd, powf}; -#[cfg(not(feature = "compact"))] -use crate::table::{SMALL_F32_POW10, SMALL_F64_POW10, SMALL_INT_POW10, SMALL_INT_POW5}; -#[cfg(not(feature = "compact"))] -use core::hint; -use core::ops; - -/// Generic floating-point type, to be used in generic code for parsing. -/// -/// Although the trait is part of the public API, the trait provides methods -/// and constants that are effectively non-public: they may be removed -/// at any time without any breaking changes. -pub trait Float: - Sized - + Copy - + PartialEq - + PartialOrd - + Send - + Sync - + ops::Add - + ops::AddAssign - + ops::Div - + ops::DivAssign - + ops::Mul - + ops::MulAssign - + ops::Rem - + ops::RemAssign - + ops::Sub - + ops::SubAssign - + ops::Neg -{ - /// Maximum number of digits that can contribute in the mantissa. - /// - /// We can exactly represent a float in radix `b` from radix 2 if - /// `b` is divisible by 2. This function calculates the exact number of - /// digits required to exactly represent that float. - /// - /// According to the "Handbook of Floating Point Arithmetic", - /// for IEEE754, with emin being the min exponent, p2 being the - /// precision, and b being the radix, the number of digits follows as: - /// - /// `−emin + p2 + ⌊(emin + 1) log(2, b) − log(1 − 2^(−p2), b)⌋` - /// - /// For f32, this follows as: - /// emin = -126 - /// p2 = 24 - /// - /// For f64, this follows as: - /// emin = -1022 - /// p2 = 53 - /// - /// In Python: - /// `-emin + p2 + math.floor((emin+1)*math.log(2, b) - math.log(1-2**(-p2), b))` - /// - /// This was used to calculate the maximum number of digits for [2, 36]. - const MAX_DIGITS: usize; - - // MASKS - - /// Bitmask for the sign bit. - const SIGN_MASK: u64; - /// Bitmask for the exponent, including the hidden bit. - const EXPONENT_MASK: u64; - /// Bitmask for the hidden bit in exponent, which is an implicit 1 in the fraction. - const HIDDEN_BIT_MASK: u64; - /// Bitmask for the mantissa (fraction), excluding the hidden bit. - const MANTISSA_MASK: u64; - - // PROPERTIES - - /// Size of the significand (mantissa) without hidden bit. - const MANTISSA_SIZE: i32; - /// Bias of the exponet - const EXPONENT_BIAS: i32; - /// Exponent portion of a denormal float. - const DENORMAL_EXPONENT: i32; - /// Maximum exponent value in float. - const MAX_EXPONENT: i32; - - // ROUNDING - - /// Mask to determine if a full-carry occurred (1 in bit above hidden bit). - const CARRY_MASK: u64; - - /// Bias for marking an invalid extended float. - // Value is `i16::MIN`, using hard-coded constants for older Rustc versions. - const INVALID_FP: i32 = -0x8000; - - // Maximum mantissa for the fast-path (`1 << 53` for f64). - const MAX_MANTISSA_FAST_PATH: u64 = 2_u64 << Self::MANTISSA_SIZE; - - // Largest exponent value `(1 << EXP_BITS) - 1`. - const INFINITE_POWER: i32 = Self::MAX_EXPONENT + Self::EXPONENT_BIAS; - - // Round-to-even only happens for negative values of q - // when q ≥ −4 in the 64-bit case and when q ≥ −17 in - // the 32-bitcase. - // - // When q ≥ 0,we have that 5^q ≤ 2m+1. In the 64-bit case,we - // have 5^q ≤ 2m+1 ≤ 2^54 or q ≤ 23. In the 32-bit case,we have - // 5^q ≤ 2m+1 ≤ 2^25 or q ≤ 10. - // - // When q < 0, we have w ≥ (2m+1)×5^−q. We must have that w < 2^64 - // so (2m+1)×5^−q < 2^64. We have that 2m+1 > 2^53 (64-bit case) - // or 2m+1 > 2^24 (32-bit case). Hence,we must have 2^53×5^−q < 2^64 - // (64-bit) and 2^24×5^−q < 2^64 (32-bit). Hence we have 5^−q < 2^11 - // or q ≥ −4 (64-bit case) and 5^−q < 2^40 or q ≥ −17 (32-bitcase). - // - // Thus we have that we only need to round ties to even when - // we have that q ∈ [−4,23](in the 64-bit case) or q∈[−17,10] - // (in the 32-bit case). In both cases,the power of five(5^|q|) - // fits in a 64-bit word. - const MIN_EXPONENT_ROUND_TO_EVEN: i32; - const MAX_EXPONENT_ROUND_TO_EVEN: i32; - - /// Minimum normal exponent value `-(1 << (EXPONENT_SIZE - 1)) + 1`. - const MINIMUM_EXPONENT: i32; - - /// Smallest decimal exponent for a non-zero value. - const SMALLEST_POWER_OF_TEN: i32; - - /// Largest decimal exponent for a non-infinite value. - const LARGEST_POWER_OF_TEN: i32; - - /// Minimum exponent that for a fast path case, or `-⌊(MANTISSA_SIZE+1)/log2(10)⌋` - const MIN_EXPONENT_FAST_PATH: i32; - - /// Maximum exponent that for a fast path case, or `⌊(MANTISSA_SIZE+1)/log2(5)⌋` - const MAX_EXPONENT_FAST_PATH: i32; - - /// Maximum exponent that can be represented for a disguised-fast path case. - /// This is `MAX_EXPONENT_FAST_PATH + ⌊(MANTISSA_SIZE+1)/log2(10)⌋` - const MAX_EXPONENT_DISGUISED_FAST_PATH: i32; - - /// Convert 64-bit integer to float. - fn from_u64(u: u64) -> Self; - - // Re-exported methods from std. - fn from_bits(u: u64) -> Self; - fn to_bits(self) -> u64; - - /// Get a small power-of-radix for fast-path multiplication. - /// - /// # Safety - /// - /// Safe as long as the exponent is smaller than the table size. - unsafe fn pow_fast_path(exponent: usize) -> Self; - - /// Get a small, integral power-of-radix for fast-path multiplication. - /// - /// # Safety - /// - /// Safe as long as the exponent is smaller than the table size. - #[inline(always)] - unsafe fn int_pow_fast_path(exponent: usize, radix: u32) -> u64 { - // SAFETY: safe as long as the exponent is smaller than the radix table. - #[cfg(not(feature = "compact"))] - return match radix { - 5 => unsafe { *SMALL_INT_POW5.get_unchecked(exponent) }, - 10 => unsafe { *SMALL_INT_POW10.get_unchecked(exponent) }, - _ => unsafe { hint::unreachable_unchecked() }, - }; - - #[cfg(feature = "compact")] - return (radix as u64).pow(exponent as u32); - } - - /// Returns true if the float is a denormal. - #[inline] - fn is_denormal(self) -> bool { - self.to_bits() & Self::EXPONENT_MASK == 0 - } - - /// Get exponent component from the float. - #[inline] - fn exponent(self) -> i32 { - if self.is_denormal() { - return Self::DENORMAL_EXPONENT; - } - - let bits = self.to_bits(); - let biased_e: i32 = ((bits & Self::EXPONENT_MASK) >> Self::MANTISSA_SIZE) as i32; - biased_e - Self::EXPONENT_BIAS - } - - /// Get mantissa (significand) component from float. - #[inline] - fn mantissa(self) -> u64 { - let bits = self.to_bits(); - let s = bits & Self::MANTISSA_MASK; - if !self.is_denormal() { - s + Self::HIDDEN_BIT_MASK - } else { - s - } - } -} - -impl Float for f32 { - const MAX_DIGITS: usize = 114; - const SIGN_MASK: u64 = 0x80000000; - const EXPONENT_MASK: u64 = 0x7F800000; - const HIDDEN_BIT_MASK: u64 = 0x00800000; - const MANTISSA_MASK: u64 = 0x007FFFFF; - const MANTISSA_SIZE: i32 = 23; - const EXPONENT_BIAS: i32 = 127 + Self::MANTISSA_SIZE; - const DENORMAL_EXPONENT: i32 = 1 - Self::EXPONENT_BIAS; - const MAX_EXPONENT: i32 = 0xFF - Self::EXPONENT_BIAS; - const CARRY_MASK: u64 = 0x1000000; - const MIN_EXPONENT_ROUND_TO_EVEN: i32 = -17; - const MAX_EXPONENT_ROUND_TO_EVEN: i32 = 10; - const MINIMUM_EXPONENT: i32 = -127; - const SMALLEST_POWER_OF_TEN: i32 = -65; - const LARGEST_POWER_OF_TEN: i32 = 38; - const MIN_EXPONENT_FAST_PATH: i32 = -10; - const MAX_EXPONENT_FAST_PATH: i32 = 10; - const MAX_EXPONENT_DISGUISED_FAST_PATH: i32 = 17; - - #[inline(always)] - unsafe fn pow_fast_path(exponent: usize) -> Self { - // SAFETY: safe as long as the exponent is smaller than the radix table. - #[cfg(not(feature = "compact"))] - return unsafe { *SMALL_F32_POW10.get_unchecked(exponent) }; - - #[cfg(feature = "compact")] - return powf(10.0f32, exponent as f32); - } - - #[inline] - fn from_u64(u: u64) -> f32 { - u as _ - } - - #[inline] - fn from_bits(u: u64) -> f32 { - // Constant is `u32::MAX` for older Rustc versions. - debug_assert!(u <= 0xffff_ffff); - f32::from_bits(u as u32) - } - - #[inline] - fn to_bits(self) -> u64 { - f32::to_bits(self) as u64 - } -} - -impl Float for f64 { - const MAX_DIGITS: usize = 769; - const SIGN_MASK: u64 = 0x8000000000000000; - const EXPONENT_MASK: u64 = 0x7FF0000000000000; - const HIDDEN_BIT_MASK: u64 = 0x0010000000000000; - const MANTISSA_MASK: u64 = 0x000FFFFFFFFFFFFF; - const MANTISSA_SIZE: i32 = 52; - const EXPONENT_BIAS: i32 = 1023 + Self::MANTISSA_SIZE; - const DENORMAL_EXPONENT: i32 = 1 - Self::EXPONENT_BIAS; - const MAX_EXPONENT: i32 = 0x7FF - Self::EXPONENT_BIAS; - const CARRY_MASK: u64 = 0x20000000000000; - const MIN_EXPONENT_ROUND_TO_EVEN: i32 = -4; - const MAX_EXPONENT_ROUND_TO_EVEN: i32 = 23; - const MINIMUM_EXPONENT: i32 = -1023; - const SMALLEST_POWER_OF_TEN: i32 = -342; - const LARGEST_POWER_OF_TEN: i32 = 308; - const MIN_EXPONENT_FAST_PATH: i32 = -22; - const MAX_EXPONENT_FAST_PATH: i32 = 22; - const MAX_EXPONENT_DISGUISED_FAST_PATH: i32 = 37; - - #[inline(always)] - unsafe fn pow_fast_path(exponent: usize) -> Self { - // SAFETY: safe as long as the exponent is smaller than the radix table. - #[cfg(not(feature = "compact"))] - return unsafe { *SMALL_F64_POW10.get_unchecked(exponent) }; - - #[cfg(feature = "compact")] - return powd(10.0f64, exponent as f64); - } - - #[inline] - fn from_u64(u: u64) -> f64 { - u as _ - } - - #[inline] - fn from_bits(u: u64) -> f64 { - f64::from_bits(u) - } - - #[inline] - fn to_bits(self) -> u64 { - f64::to_bits(self) - } -} - -#[inline(always)] -#[cfg(all(feature = "std", feature = "compact"))] -pub fn powf(x: f32, y: f32) -> f32 { - x.powf(y) -} - -#[inline(always)] -#[cfg(all(feature = "std", feature = "compact"))] -pub fn powd(x: f64, y: f64) -> f64 { - x.powf(y) -} diff --git a/vendor/minimal-lexical/src/number.rs b/vendor/minimal-lexical/src/number.rs deleted file mode 100644 index 5981f9dd79232f..00000000000000 --- a/vendor/minimal-lexical/src/number.rs +++ /dev/null @@ -1,83 +0,0 @@ -//! Representation of a float as the significant digits and exponent. -//! -//! This is adapted from [fast-float-rust](https://github.com/aldanor/fast-float-rust), -//! a port of [fast_float](https://github.com/fastfloat/fast_float) to Rust. - -#![doc(hidden)] - -#[cfg(feature = "nightly")] -use crate::fpu::set_precision; -use crate::num::Float; - -/// Representation of a number as the significant digits and exponent. -/// -/// This is only used if the exponent base and the significant digit -/// radix are the same, since we need to be able to move powers in and -/// out of the exponent. -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] -pub struct Number { - /// The exponent of the float, scaled to the mantissa. - pub exponent: i32, - /// The significant digits of the float. - pub mantissa: u64, - /// If the significant digits were truncated. - pub many_digits: bool, -} - -impl Number { - /// Detect if the float can be accurately reconstructed from native floats. - #[inline] - pub fn is_fast_path(&self) -> bool { - F::MIN_EXPONENT_FAST_PATH <= self.exponent - && self.exponent <= F::MAX_EXPONENT_DISGUISED_FAST_PATH - && self.mantissa <= F::MAX_MANTISSA_FAST_PATH - && !self.many_digits - } - - /// The fast path algorithmn using machine-sized integers and floats. - /// - /// This is extracted into a separate function so that it can be attempted before constructing - /// a Decimal. This only works if both the mantissa and the exponent - /// can be exactly represented as a machine float, since IEE-754 guarantees - /// no rounding will occur. - /// - /// There is an exception: disguised fast-path cases, where we can shift - /// powers-of-10 from the exponent to the significant digits. - pub fn try_fast_path(&self) -> Option { - // The fast path crucially depends on arithmetic being rounded to the correct number of bits - // without any intermediate rounding. On x86 (without SSE or SSE2) this requires the precision - // of the x87 FPU stack to be changed so that it directly rounds to 64/32 bit. - // The `set_precision` function takes care of setting the precision on architectures which - // require setting it by changing the global state (like the control word of the x87 FPU). - #[cfg(feature = "nightly")] - let _cw = set_precision::(); - - if self.is_fast_path::() { - let max_exponent = F::MAX_EXPONENT_FAST_PATH; - Some(if self.exponent <= max_exponent { - // normal fast path - let value = F::from_u64(self.mantissa); - if self.exponent < 0 { - // SAFETY: safe, since the `exponent <= max_exponent`. - value / unsafe { F::pow_fast_path((-self.exponent) as _) } - } else { - // SAFETY: safe, since the `exponent <= max_exponent`. - value * unsafe { F::pow_fast_path(self.exponent as _) } - } - } else { - // disguised fast path - let shift = self.exponent - max_exponent; - // SAFETY: safe, since `shift <= (max_disguised - max_exponent)`. - let int_power = unsafe { F::int_pow_fast_path(shift as usize, 10) }; - let mantissa = self.mantissa.checked_mul(int_power)?; - if mantissa > F::MAX_MANTISSA_FAST_PATH { - return None; - } - // SAFETY: safe, since the `table.len() - 1 == max_exponent`. - F::from_u64(mantissa) * unsafe { F::pow_fast_path(max_exponent as _) } - }) - } else { - None - } - } -} diff --git a/vendor/minimal-lexical/src/parse.rs b/vendor/minimal-lexical/src/parse.rs deleted file mode 100644 index 9349699eb35e53..00000000000000 --- a/vendor/minimal-lexical/src/parse.rs +++ /dev/null @@ -1,201 +0,0 @@ -//! Parse byte iterators to float. - -#![doc(hidden)] - -#[cfg(feature = "compact")] -use crate::bellerophon::bellerophon; -use crate::extended_float::{extended_to_float, ExtendedFloat}; -#[cfg(not(feature = "compact"))] -use crate::lemire::lemire; -use crate::num::Float; -use crate::number::Number; -use crate::slow::slow; - -/// Try to parse the significant digits quickly. -/// -/// This attempts a very quick parse, to deal with common cases. -/// -/// * `integer` - Slice containing the integer digits. -/// * `fraction` - Slice containing the fraction digits. -#[inline] -fn parse_number_fast<'a, Iter1, Iter2>( - integer: Iter1, - fraction: Iter2, - exponent: i32, -) -> Option -where - Iter1: Iterator, - Iter2: Iterator, -{ - let mut num = Number::default(); - let mut integer_count: usize = 0; - let mut fraction_count: usize = 0; - for &c in integer { - integer_count += 1; - let digit = c - b'0'; - num.mantissa = num.mantissa.wrapping_mul(10).wrapping_add(digit as u64); - } - for &c in fraction { - fraction_count += 1; - let digit = c - b'0'; - num.mantissa = num.mantissa.wrapping_mul(10).wrapping_add(digit as u64); - } - - if integer_count + fraction_count <= 19 { - // Can't overflow, since must be <= 19. - num.exponent = exponent.saturating_sub(fraction_count as i32); - Some(num) - } else { - None - } -} - -/// Parse the significant digits of the float and adjust the exponent. -/// -/// * `integer` - Slice containing the integer digits. -/// * `fraction` - Slice containing the fraction digits. -#[inline] -fn parse_number<'a, Iter1, Iter2>(mut integer: Iter1, mut fraction: Iter2, exponent: i32) -> Number -where - Iter1: Iterator + Clone, - Iter2: Iterator + Clone, -{ - // NOTE: for performance, we do this in 2 passes: - if let Some(num) = parse_number_fast(integer.clone(), fraction.clone(), exponent) { - return num; - } - - // Can only add 19 digits. - let mut num = Number::default(); - let mut count = 0; - while let Some(&c) = integer.next() { - count += 1; - if count == 20 { - // Only the integer digits affect the exponent. - num.many_digits = true; - num.exponent = exponent.saturating_add(into_i32(1 + integer.count())); - return num; - } else { - let digit = c - b'0'; - num.mantissa = num.mantissa * 10 + digit as u64; - } - } - - // Skip leading fraction zeros. - // This is required otherwise we might have a 0 mantissa and many digits. - let mut fraction_count: usize = 0; - if count == 0 { - for &c in &mut fraction { - fraction_count += 1; - if c != b'0' { - count += 1; - let digit = c - b'0'; - num.mantissa = num.mantissa * 10 + digit as u64; - break; - } - } - } - for c in fraction { - fraction_count += 1; - count += 1; - if count == 20 { - num.many_digits = true; - // This can't wrap, since we have at most 20 digits. - // We've adjusted the exponent too high by `fraction_count - 1`. - // Note: -1 is due to incrementing this loop iteration, which we - // didn't use. - num.exponent = exponent.saturating_sub(fraction_count as i32 - 1); - return num; - } else { - let digit = c - b'0'; - num.mantissa = num.mantissa * 10 + digit as u64; - } - } - - // No truncated digits: easy. - // Cannot overflow: <= 20 digits. - num.exponent = exponent.saturating_sub(fraction_count as i32); - num -} - -/// Parse float from extracted float components. -/// -/// * `integer` - Cloneable, forward iterator over integer digits. -/// * `fraction` - Cloneable, forward iterator over integer digits. -/// * `exponent` - Parsed, 32-bit exponent. -/// -/// # Preconditions -/// 1. The integer should not have leading zeros. -/// 2. The fraction should not have trailing zeros. -/// 3. All bytes in `integer` and `fraction` should be valid digits, -/// in the range [`b'0', b'9']. -/// -/// # Panics -/// -/// Although passing garbage input will not cause memory safety issues, -/// it is very likely to cause a panic with a large number of digits, or -/// in debug mode. The big-integer arithmetic without the `alloc` feature -/// assumes a maximum, fixed-width input, which assumes at maximum a -/// value of `10^(769 + 342)`, or ~4000 bits of storage. Passing in -/// nonsensical digits may require up to ~6000 bits of storage, which will -/// panic when attempting to add it to the big integer. It is therefore -/// up to the caller to validate this input. -/// -/// We cannot efficiently remove trailing zeros while only accepting a -/// forward iterator. -pub fn parse_float<'a, F, Iter1, Iter2>(integer: Iter1, fraction: Iter2, exponent: i32) -> F -where - F: Float, - Iter1: Iterator + Clone, - Iter2: Iterator + Clone, -{ - // Parse the mantissa and attempt the fast and moderate-path algorithms. - let num = parse_number(integer.clone(), fraction.clone(), exponent); - // Try the fast-path algorithm. - if let Some(value) = num.try_fast_path() { - return value; - } - - // Now try the moderate path algorithm. - let mut fp = moderate_path::(&num); - if fp.exp < 0 { - // Undo the invalid extended float biasing. - fp.exp -= F::INVALID_FP; - fp = slow::(num, fp, integer, fraction); - } - - // Unable to correctly round the float using the fast or moderate algorithms. - // Fallback to a slower, but always correct algorithm. If we have - // lossy, we can't be here. - extended_to_float::(fp) -} - -/// Wrapper for different moderate-path algorithms. -/// A return exponent of `-1` indicates an invalid value. -#[inline] -pub fn moderate_path(num: &Number) -> ExtendedFloat { - #[cfg(not(feature = "compact"))] - return lemire::(num); - - #[cfg(feature = "compact")] - return bellerophon::(num); -} - -/// Convert usize into i32 without overflow. -/// -/// This is needed to ensure when adjusting the exponent relative to -/// the mantissa we do not overflow for comically-long exponents. -#[inline] -fn into_i32(value: usize) -> i32 { - if value > i32::max_value() as usize { - i32::max_value() - } else { - value as i32 - } -} - -// Add digit to mantissa. -#[inline] -pub fn add_digit(value: u64, digit: u8) -> Option { - value.checked_mul(10)?.checked_add(digit as u64) -} diff --git a/vendor/minimal-lexical/src/rounding.rs b/vendor/minimal-lexical/src/rounding.rs deleted file mode 100644 index 7c466dec4d18e5..00000000000000 --- a/vendor/minimal-lexical/src/rounding.rs +++ /dev/null @@ -1,131 +0,0 @@ -//! Defines rounding schemes for floating-point numbers. - -#![doc(hidden)] - -use crate::extended_float::ExtendedFloat; -use crate::mask::{lower_n_halfway, lower_n_mask}; -use crate::num::Float; - -// ROUNDING -// -------- - -/// Round an extended-precision float to the nearest machine float. -/// -/// Shifts the significant digits into place, adjusts the exponent, -/// so it can be easily converted to a native float. -#[cfg_attr(not(feature = "compact"), inline)] -pub fn round(fp: &mut ExtendedFloat, cb: Cb) -where - F: Float, - Cb: Fn(&mut ExtendedFloat, i32), -{ - let fp_inf = ExtendedFloat { - mant: 0, - exp: F::INFINITE_POWER, - }; - - // Calculate our shift in significant digits. - let mantissa_shift = 64 - F::MANTISSA_SIZE - 1; - - // Check for a denormal float, if after the shift the exponent is negative. - if -fp.exp >= mantissa_shift { - // Have a denormal float that isn't a literal 0. - // The extra 1 is to adjust for the denormal float, which is - // `1 - F::EXPONENT_BIAS`. This works as before, because our - // old logic rounded to `F::DENORMAL_EXPONENT` (now 1), and then - // checked if `exp == F::DENORMAL_EXPONENT` and no hidden mask - // bit was set. Here, we handle that here, rather than later. - // - // This might round-down to 0, but shift will be at **max** 65, - // for halfway cases rounding towards 0. - let shift = -fp.exp + 1; - debug_assert!(shift <= 65); - cb(fp, shift.min(64)); - // Check for round-up: if rounding-nearest carried us to the hidden bit. - fp.exp = (fp.mant >= F::HIDDEN_BIT_MASK) as i32; - return; - } - - // The float is normal, round to the hidden bit. - cb(fp, mantissa_shift); - - // Check if we carried, and if so, shift the bit to the hidden bit. - let carry_mask = F::CARRY_MASK; - if fp.mant & carry_mask == carry_mask { - fp.mant >>= 1; - fp.exp += 1; - } - - // Handle if we carried and check for overflow again. - if fp.exp >= F::INFINITE_POWER { - // Exponent is above largest normal value, must be infinite. - *fp = fp_inf; - return; - } - - // Remove the hidden bit. - fp.mant &= F::MANTISSA_MASK; -} - -/// Shift right N-bytes and round towards a direction. -/// -/// Callback should take the following parameters: -/// 1. is_odd -/// 1. is_halfway -/// 1. is_above -#[cfg_attr(not(feature = "compact"), inline)] -pub fn round_nearest_tie_even(fp: &mut ExtendedFloat, shift: i32, cb: Cb) -where - // is_odd, is_halfway, is_above - Cb: Fn(bool, bool, bool) -> bool, -{ - // Ensure we've already handled denormal values that underflow. - debug_assert!(shift <= 64); - - // Extract the truncated bits using mask. - // Calculate if the value of the truncated bits are either above - // the mid-way point, or equal to it. - // - // For example, for 4 truncated bytes, the mask would be 0b1111 - // and the midway point would be 0b1000. - let mask = lower_n_mask(shift as u64); - let halfway = lower_n_halfway(shift as u64); - let truncated_bits = fp.mant & mask; - let is_above = truncated_bits > halfway; - let is_halfway = truncated_bits == halfway; - - // Bit shift so the leading bit is in the hidden bit. - // This optimixes pretty well: - // ```text - // mov ecx, esi - // shr rdi, cl - // xor eax, eax - // cmp esi, 64 - // cmovne rax, rdi - // ret - // ``` - fp.mant = match shift == 64 { - true => 0, - false => fp.mant >> shift, - }; - fp.exp += shift; - - // Extract the last bit after shifting (and determine if it is odd). - let is_odd = fp.mant & 1 == 1; - - // Calculate if we need to roundup. - // We need to roundup if we are above halfway, or if we are odd - // and at half-way (need to tie-to-even). Avoid the branch here. - fp.mant += cb(is_odd, is_halfway, is_above) as u64; -} - -/// Round our significant digits into place, truncating them. -#[cfg_attr(not(feature = "compact"), inline)] -pub fn round_down(fp: &mut ExtendedFloat, shift: i32) { - // Might have a shift greater than 64 if we have an error. - fp.mant = match shift == 64 { - true => 0, - false => fp.mant >> shift, - }; - fp.exp += shift; -} diff --git a/vendor/minimal-lexical/src/slow.rs b/vendor/minimal-lexical/src/slow.rs deleted file mode 100644 index 59d526ba42343f..00000000000000 --- a/vendor/minimal-lexical/src/slow.rs +++ /dev/null @@ -1,403 +0,0 @@ -//! Slow, fallback cases where we cannot unambiguously round a float. -//! -//! This occurs when we cannot determine the exact representation using -//! both the fast path (native) cases nor the Lemire/Bellerophon algorithms, -//! and therefore must fallback to a slow, arbitrary-precision representation. - -#![doc(hidden)] - -use crate::bigint::{Bigint, Limb, LIMB_BITS}; -use crate::extended_float::{extended_to_float, ExtendedFloat}; -use crate::num::Float; -use crate::number::Number; -use crate::rounding::{round, round_down, round_nearest_tie_even}; -use core::cmp; - -// ALGORITHM -// --------- - -/// Parse the significant digits and biased, binary exponent of a float. -/// -/// This is a fallback algorithm that uses a big-integer representation -/// of the float, and therefore is considerably slower than faster -/// approximations. However, it will always determine how to round -/// the significant digits to the nearest machine float, allowing -/// use to handle near half-way cases. -/// -/// Near half-way cases are halfway between two consecutive machine floats. -/// For example, the float `16777217.0` has a bitwise representation of -/// `100000000000000000000000 1`. Rounding to a single-precision float, -/// the trailing `1` is truncated. Using round-nearest, tie-even, any -/// value above `16777217.0` must be rounded up to `16777218.0`, while -/// any value before or equal to `16777217.0` must be rounded down -/// to `16777216.0`. These near-halfway conversions therefore may require -/// a large number of digits to unambiguously determine how to round. -#[inline] -pub fn slow<'a, F, Iter1, Iter2>( - num: Number, - fp: ExtendedFloat, - integer: Iter1, - fraction: Iter2, -) -> ExtendedFloat -where - F: Float, - Iter1: Iterator + Clone, - Iter2: Iterator + Clone, -{ - // Ensure our preconditions are valid: - // 1. The significant digits are not shifted into place. - debug_assert!(fp.mant & (1 << 63) != 0); - - // This assumes the sign bit has already been parsed, and we're - // starting with the integer digits, and the float format has been - // correctly validated. - let sci_exp = scientific_exponent(&num); - - // We have 2 major algorithms we use for this: - // 1. An algorithm with a finite number of digits and a positive exponent. - // 2. An algorithm with a finite number of digits and a negative exponent. - let (bigmant, digits) = parse_mantissa(integer, fraction, F::MAX_DIGITS); - let exponent = sci_exp + 1 - digits as i32; - if exponent >= 0 { - positive_digit_comp::(bigmant, exponent) - } else { - negative_digit_comp::(bigmant, fp, exponent) - } -} - -/// Generate the significant digits with a positive exponent relative to mantissa. -pub fn positive_digit_comp(mut bigmant: Bigint, exponent: i32) -> ExtendedFloat { - // Simple, we just need to multiply by the power of the radix. - // Now, we can calculate the mantissa and the exponent from this. - // The binary exponent is the binary exponent for the mantissa - // shifted to the hidden bit. - bigmant.pow(10, exponent as u32).unwrap(); - - // Get the exact representation of the float from the big integer. - // hi64 checks **all** the remaining bits after the mantissa, - // so it will check if **any** truncated digits exist. - let (mant, is_truncated) = bigmant.hi64(); - let exp = bigmant.bit_length() as i32 - 64 + F::EXPONENT_BIAS; - let mut fp = ExtendedFloat { - mant, - exp, - }; - - // Shift the digits into position and determine if we need to round-up. - round::(&mut fp, |f, s| { - round_nearest_tie_even(f, s, |is_odd, is_halfway, is_above| { - is_above || (is_halfway && is_truncated) || (is_odd && is_halfway) - }); - }); - fp -} - -/// Generate the significant digits with a negative exponent relative to mantissa. -/// -/// This algorithm is quite simple: we have the significant digits `m1 * b^N1`, -/// where `m1` is the bigint mantissa, `b` is the radix, and `N1` is the radix -/// exponent. We then calculate the theoretical representation of `b+h`, which -/// is `m2 * 2^N2`, where `m2` is the bigint mantissa and `N2` is the binary -/// exponent. If we had infinite, efficient floating precision, this would be -/// equal to `m1 / b^-N1` and then compare it to `m2 * 2^N2`. -/// -/// Since we cannot divide and keep precision, we must multiply the other: -/// if we want to do `m1 / b^-N1 >= m2 * 2^N2`, we can do -/// `m1 >= m2 * b^-N1 * 2^N2` Going to the decimal case, we can show and example -/// and simplify this further: `m1 >= m2 * 2^N2 * 10^-N1`. Since we can remove -/// a power-of-two, this is `m1 >= m2 * 2^(N2 - N1) * 5^-N1`. Therefore, if -/// `N2 - N1 > 0`, we need have `m1 >= m2 * 2^(N2 - N1) * 5^-N1`, otherwise, -/// we have `m1 * 2^(N1 - N2) >= m2 * 5^-N1`, where the resulting exponents -/// are all positive. -/// -/// This allows us to compare both floats using integers efficiently -/// without any loss of precision. -#[allow(clippy::comparison_chain)] -pub fn negative_digit_comp( - bigmant: Bigint, - mut fp: ExtendedFloat, - exponent: i32, -) -> ExtendedFloat { - // Ensure our preconditions are valid: - // 1. The significant digits are not shifted into place. - debug_assert!(fp.mant & (1 << 63) != 0); - - // Get the significant digits and radix exponent for the real digits. - let mut real_digits = bigmant; - let real_exp = exponent; - debug_assert!(real_exp < 0); - - // Round down our extended-precision float and calculate `b`. - let mut b = fp; - round::(&mut b, round_down); - let b = extended_to_float::(b); - - // Get the significant digits and the binary exponent for `b+h`. - let theor = bh(b); - let mut theor_digits = Bigint::from_u64(theor.mant); - let theor_exp = theor.exp; - - // We need to scale the real digits and `b+h` digits to be the same - // order. We currently have `real_exp`, in `radix`, that needs to be - // shifted to `theor_digits` (since it is negative), and `theor_exp` - // to either `theor_digits` or `real_digits` as a power of 2 (since it - // may be positive or negative). Try to remove as many powers of 2 - // as possible. All values are relative to `theor_digits`, that is, - // reflect the power you need to multiply `theor_digits` by. - // - // Both are on opposite-sides of equation, can factor out a - // power of two. - // - // Example: 10^-10, 2^-10 -> ( 0, 10, 0) - // Example: 10^-10, 2^-15 -> (-5, 10, 0) - // Example: 10^-10, 2^-5 -> ( 5, 10, 0) - // Example: 10^-10, 2^5 -> (15, 10, 0) - let binary_exp = theor_exp - real_exp; - let halfradix_exp = -real_exp; - if halfradix_exp != 0 { - theor_digits.pow(5, halfradix_exp as u32).unwrap(); - } - if binary_exp > 0 { - theor_digits.pow(2, binary_exp as u32).unwrap(); - } else if binary_exp < 0 { - real_digits.pow(2, (-binary_exp) as u32).unwrap(); - } - - // Compare our theoretical and real digits and round nearest, tie even. - let ord = real_digits.data.cmp(&theor_digits.data); - round::(&mut fp, |f, s| { - round_nearest_tie_even(f, s, |is_odd, _, _| { - // Can ignore `is_halfway` and `is_above`, since those were - // calculates using less significant digits. - match ord { - cmp::Ordering::Greater => true, - cmp::Ordering::Less => false, - cmp::Ordering::Equal if is_odd => true, - cmp::Ordering::Equal => false, - } - }); - }); - fp -} - -/// Add a digit to the temporary value. -macro_rules! add_digit { - ($c:ident, $value:ident, $counter:ident, $count:ident) => {{ - let digit = $c - b'0'; - $value *= 10 as Limb; - $value += digit as Limb; - - // Increment our counters. - $counter += 1; - $count += 1; - }}; -} - -/// Add a temporary value to our mantissa. -macro_rules! add_temporary { - // Multiply by the small power and add the native value. - (@mul $result:ident, $power:expr, $value:expr) => { - $result.data.mul_small($power).unwrap(); - $result.data.add_small($value).unwrap(); - }; - - // # Safety - // - // Safe is `counter <= step`, or smaller than the table size. - ($format:ident, $result:ident, $counter:ident, $value:ident) => { - if $counter != 0 { - // SAFETY: safe, since `counter <= step`, or smaller than the table size. - let small_power = unsafe { f64::int_pow_fast_path($counter, 10) }; - add_temporary!(@mul $result, small_power as Limb, $value); - $counter = 0; - $value = 0; - } - }; - - // Add a temporary where we won't read the counter results internally. - // - // # Safety - // - // Safe is `counter <= step`, or smaller than the table size. - (@end $format:ident, $result:ident, $counter:ident, $value:ident) => { - if $counter != 0 { - // SAFETY: safe, since `counter <= step`, or smaller than the table size. - let small_power = unsafe { f64::int_pow_fast_path($counter, 10) }; - add_temporary!(@mul $result, small_power as Limb, $value); - } - }; - - // Add the maximum native value. - (@max $format:ident, $result:ident, $counter:ident, $value:ident, $max:ident) => { - add_temporary!(@mul $result, $max, $value); - $counter = 0; - $value = 0; - }; -} - -/// Round-up a truncated value. -macro_rules! round_up_truncated { - ($format:ident, $result:ident, $count:ident) => {{ - // Need to round-up. - // Can't just add 1, since this can accidentally round-up - // values to a halfway point, which can cause invalid results. - add_temporary!(@mul $result, 10, 1); - $count += 1; - }}; -} - -/// Check and round-up the fraction if any non-zero digits exist. -macro_rules! round_up_nonzero { - ($format:ident, $iter:expr, $result:ident, $count:ident) => {{ - for &digit in $iter { - if digit != b'0' { - round_up_truncated!($format, $result, $count); - return ($result, $count); - } - } - }}; -} - -/// Parse the full mantissa into a big integer. -/// -/// Returns the parsed mantissa and the number of digits in the mantissa. -/// The max digits is the maximum number of digits plus one. -pub fn parse_mantissa<'a, Iter1, Iter2>( - mut integer: Iter1, - mut fraction: Iter2, - max_digits: usize, -) -> (Bigint, usize) -where - Iter1: Iterator + Clone, - Iter2: Iterator + Clone, -{ - // Iteratively process all the data in the mantissa. - // We do this via small, intermediate values which once we reach - // the maximum number of digits we can process without overflow, - // we add the temporary to the big integer. - let mut counter: usize = 0; - let mut count: usize = 0; - let mut value: Limb = 0; - let mut result = Bigint::new(); - - // Now use our pre-computed small powers iteratively. - // This is calculated as `⌊log(2^BITS - 1, 10)⌋`. - let step: usize = if LIMB_BITS == 32 { - 9 - } else { - 19 - }; - let max_native = (10 as Limb).pow(step as u32); - - // Process the integer digits. - 'integer: loop { - // Parse a digit at a time, until we reach step. - while counter < step && count < max_digits { - if let Some(&c) = integer.next() { - add_digit!(c, value, counter, count); - } else { - break 'integer; - } - } - - // Check if we've exhausted our max digits. - if count == max_digits { - // Need to check if we're truncated, and round-up accordingly. - // SAFETY: safe since `counter <= step`. - add_temporary!(@end format, result, counter, value); - round_up_nonzero!(format, integer, result, count); - round_up_nonzero!(format, fraction, result, count); - return (result, count); - } else { - // Add our temporary from the loop. - // SAFETY: safe since `counter <= step`. - add_temporary!(@max format, result, counter, value, max_native); - } - } - - // Skip leading fraction zeros. - // Required to get an accurate count. - if count == 0 { - for &c in &mut fraction { - if c != b'0' { - add_digit!(c, value, counter, count); - break; - } - } - } - - // Process the fraction digits. - 'fraction: loop { - // Parse a digit at a time, until we reach step. - while counter < step && count < max_digits { - if let Some(&c) = fraction.next() { - add_digit!(c, value, counter, count); - } else { - break 'fraction; - } - } - - // Check if we've exhausted our max digits. - if count == max_digits { - // SAFETY: safe since `counter <= step`. - add_temporary!(@end format, result, counter, value); - round_up_nonzero!(format, fraction, result, count); - return (result, count); - } else { - // Add our temporary from the loop. - // SAFETY: safe since `counter <= step`. - add_temporary!(@max format, result, counter, value, max_native); - } - } - - // We will always have a remainder, as long as we entered the loop - // once, or counter % step is 0. - // SAFETY: safe since `counter <= step`. - add_temporary!(@end format, result, counter, value); - - (result, count) -} - -// SCALING -// ------- - -/// Calculate the scientific exponent from a `Number` value. -/// Any other attempts would require slowdowns for faster algorithms. -#[inline] -pub fn scientific_exponent(num: &Number) -> i32 { - // Use power reduction to make this faster. - let mut mantissa = num.mantissa; - let mut exponent = num.exponent; - while mantissa >= 10000 { - mantissa /= 10000; - exponent += 4; - } - while mantissa >= 100 { - mantissa /= 100; - exponent += 2; - } - while mantissa >= 10 { - mantissa /= 10; - exponent += 1; - } - exponent as i32 -} - -/// Calculate `b` from a a representation of `b` as a float. -#[inline] -pub fn b(float: F) -> ExtendedFloat { - ExtendedFloat { - mant: float.mantissa(), - exp: float.exponent(), - } -} - -/// Calculate `b+h` from a a representation of `b` as a float. -#[inline] -pub fn bh(float: F) -> ExtendedFloat { - let fp = b(float); - ExtendedFloat { - mant: (fp.mant << 1) + 1, - exp: fp.exp - 1, - } -} diff --git a/vendor/minimal-lexical/src/stackvec.rs b/vendor/minimal-lexical/src/stackvec.rs deleted file mode 100644 index d9bc259555be20..00000000000000 --- a/vendor/minimal-lexical/src/stackvec.rs +++ /dev/null @@ -1,308 +0,0 @@ -//! Simple stack-allocated vector. - -#![cfg(not(feature = "alloc"))] -#![doc(hidden)] - -use crate::bigint; -use core::{cmp, mem, ops, ptr, slice}; - -/// Simple stack vector implementation. -#[derive(Clone)] -pub struct StackVec { - /// The raw buffer for the elements. - data: [mem::MaybeUninit; bigint::BIGINT_LIMBS], - /// The number of elements in the array (we never need more than u16::MAX). - length: u16, -} - -#[allow(clippy::new_without_default)] -impl StackVec { - /// Construct an empty vector. - #[inline] - pub const fn new() -> Self { - Self { - length: 0, - data: [mem::MaybeUninit::uninit(); bigint::BIGINT_LIMBS], - } - } - - /// Construct a vector from an existing slice. - #[inline] - pub fn try_from(x: &[bigint::Limb]) -> Option { - let mut vec = Self::new(); - vec.try_extend(x)?; - Some(vec) - } - - /// Sets the length of a vector. - /// - /// This will explicitly set the size of the vector, without actually - /// modifying its buffers, so it is up to the caller to ensure that the - /// vector is actually the specified size. - /// - /// # Safety - /// - /// Safe as long as `len` is less than `BIGINT_LIMBS`. - #[inline] - pub unsafe fn set_len(&mut self, len: usize) { - // Constant is `u16::MAX` for older Rustc versions. - debug_assert!(len <= 0xffff); - debug_assert!(len <= bigint::BIGINT_LIMBS); - self.length = len as u16; - } - - /// The number of elements stored in the vector. - #[inline] - pub const fn len(&self) -> usize { - self.length as usize - } - - /// If the vector is empty. - #[inline] - pub const fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// The number of items the vector can hold. - #[inline] - pub const fn capacity(&self) -> usize { - bigint::BIGINT_LIMBS as usize - } - - /// Append an item to the vector, without bounds checking. - /// - /// # Safety - /// - /// Safe if `self.len() < self.capacity()`. - #[inline] - pub unsafe fn push_unchecked(&mut self, value: bigint::Limb) { - debug_assert!(self.len() < self.capacity()); - // SAFETY: safe, capacity is less than the current size. - unsafe { - ptr::write(self.as_mut_ptr().add(self.len()), value); - self.length += 1; - } - } - - /// Append an item to the vector. - #[inline] - pub fn try_push(&mut self, value: bigint::Limb) -> Option<()> { - if self.len() < self.capacity() { - // SAFETY: safe, capacity is less than the current size. - unsafe { self.push_unchecked(value) }; - Some(()) - } else { - None - } - } - - /// Remove an item from the end of a vector, without bounds checking. - /// - /// # Safety - /// - /// Safe if `self.len() > 0`. - #[inline] - pub unsafe fn pop_unchecked(&mut self) -> bigint::Limb { - debug_assert!(!self.is_empty()); - // SAFETY: safe if `self.length > 0`. - // We have a trivial drop and copy, so this is safe. - self.length -= 1; - unsafe { ptr::read(self.as_mut_ptr().add(self.len())) } - } - - /// Remove an item from the end of the vector and return it, or None if empty. - #[inline] - pub fn pop(&mut self) -> Option { - if self.is_empty() { - None - } else { - // SAFETY: safe, since `self.len() > 0`. - unsafe { Some(self.pop_unchecked()) } - } - } - - /// Add items from a slice to the vector, without bounds checking. - /// - /// # Safety - /// - /// Safe if `self.len() + slc.len() <= self.capacity()`. - #[inline] - pub unsafe fn extend_unchecked(&mut self, slc: &[bigint::Limb]) { - let index = self.len(); - let new_len = index + slc.len(); - debug_assert!(self.len() + slc.len() <= self.capacity()); - let src = slc.as_ptr(); - // SAFETY: safe if `self.len() + slc.len() <= self.capacity()`. - unsafe { - let dst = self.as_mut_ptr().add(index); - ptr::copy_nonoverlapping(src, dst, slc.len()); - self.set_len(new_len); - } - } - - /// Copy elements from a slice and append them to the vector. - #[inline] - pub fn try_extend(&mut self, slc: &[bigint::Limb]) -> Option<()> { - if self.len() + slc.len() <= self.capacity() { - // SAFETY: safe, since `self.len() + slc.len() <= self.capacity()`. - unsafe { self.extend_unchecked(slc) }; - Some(()) - } else { - None - } - } - - /// Truncate vector to new length, dropping any items after `len`. - /// - /// # Safety - /// - /// Safe as long as `len <= self.capacity()`. - unsafe fn truncate_unchecked(&mut self, len: usize) { - debug_assert!(len <= self.capacity()); - self.length = len as u16; - } - - /// Resize the buffer, without bounds checking. - /// - /// # Safety - /// - /// Safe as long as `len <= self.capacity()`. - #[inline] - pub unsafe fn resize_unchecked(&mut self, len: usize, value: bigint::Limb) { - debug_assert!(len <= self.capacity()); - let old_len = self.len(); - if len > old_len { - // We have a trivial drop, so there's no worry here. - // Just, don't set the length until all values have been written, - // so we don't accidentally read uninitialized memory. - - // SAFETY: safe if `len < self.capacity()`. - let count = len - old_len; - for index in 0..count { - unsafe { - let dst = self.as_mut_ptr().add(old_len + index); - ptr::write(dst, value); - } - } - self.length = len as u16; - } else { - // SAFETY: safe since `len < self.len()`. - unsafe { self.truncate_unchecked(len) }; - } - } - - /// Try to resize the buffer. - /// - /// If the new length is smaller than the current length, truncate - /// the input. If it's larger, then append elements to the buffer. - #[inline] - pub fn try_resize(&mut self, len: usize, value: bigint::Limb) -> Option<()> { - if len > self.capacity() { - None - } else { - // SAFETY: safe, since `len <= self.capacity()`. - unsafe { self.resize_unchecked(len, value) }; - Some(()) - } - } - - // HI - - /// Get the high 64 bits from the vector. - #[inline(always)] - pub fn hi64(&self) -> (u64, bool) { - bigint::hi64(self) - } - - // FROM - - /// Create StackVec from u64 value. - #[inline(always)] - pub fn from_u64(x: u64) -> Self { - bigint::from_u64(x) - } - - // MATH - - /// Normalize the integer, so any leading zero values are removed. - #[inline] - pub fn normalize(&mut self) { - bigint::normalize(self) - } - - /// Get if the big integer is normalized. - #[inline] - pub fn is_normalized(&self) -> bool { - bigint::is_normalized(self) - } - - /// AddAssign small integer. - #[inline] - pub fn add_small(&mut self, y: bigint::Limb) -> Option<()> { - bigint::small_add(self, y) - } - - /// MulAssign small integer. - #[inline] - pub fn mul_small(&mut self, y: bigint::Limb) -> Option<()> { - bigint::small_mul(self, y) - } -} - -impl PartialEq for StackVec { - #[inline] - #[allow(clippy::op_ref)] - fn eq(&self, other: &Self) -> bool { - use core::ops::Deref; - self.len() == other.len() && self.deref() == other.deref() - } -} - -impl Eq for StackVec { -} - -impl cmp::PartialOrd for StackVec { - #[inline] - fn partial_cmp(&self, other: &Self) -> Option { - Some(bigint::compare(self, other)) - } -} - -impl cmp::Ord for StackVec { - #[inline] - fn cmp(&self, other: &Self) -> cmp::Ordering { - bigint::compare(self, other) - } -} - -impl ops::Deref for StackVec { - type Target = [bigint::Limb]; - #[inline] - fn deref(&self) -> &[bigint::Limb] { - // SAFETY: safe since `self.data[..self.len()]` must be initialized - // and `self.len() <= self.capacity()`. - unsafe { - let ptr = self.data.as_ptr() as *const bigint::Limb; - slice::from_raw_parts(ptr, self.len()) - } - } -} - -impl ops::DerefMut for StackVec { - #[inline] - fn deref_mut(&mut self) -> &mut [bigint::Limb] { - // SAFETY: safe since `self.data[..self.len()]` must be initialized - // and `self.len() <= self.capacity()`. - unsafe { - let ptr = self.data.as_mut_ptr() as *mut bigint::Limb; - slice::from_raw_parts_mut(ptr, self.len()) - } - } -} - -impl ops::MulAssign<&[bigint::Limb]> for StackVec { - #[inline] - fn mul_assign(&mut self, rhs: &[bigint::Limb]) { - bigint::large_mul(self, rhs).unwrap(); - } -} diff --git a/vendor/minimal-lexical/src/table.rs b/vendor/minimal-lexical/src/table.rs deleted file mode 100644 index 7b1367e326a1bd..00000000000000 --- a/vendor/minimal-lexical/src/table.rs +++ /dev/null @@ -1,11 +0,0 @@ -//! Pre-computed tables for parsing float strings. - -#![doc(hidden)] - -// Re-export all the feature-specific files. -#[cfg(feature = "compact")] -pub use crate::table_bellerophon::*; -#[cfg(not(feature = "compact"))] -pub use crate::table_lemire::*; -#[cfg(not(feature = "compact"))] -pub use crate::table_small::*; diff --git a/vendor/minimal-lexical/src/table_bellerophon.rs b/vendor/minimal-lexical/src/table_bellerophon.rs deleted file mode 100644 index f85f8e6fb32277..00000000000000 --- a/vendor/minimal-lexical/src/table_bellerophon.rs +++ /dev/null @@ -1,119 +0,0 @@ -//! Cached exponents for basen values with 80-bit extended floats. -//! -//! Exact versions of base**n as an extended-precision float, with both -//! large and small powers. Use the large powers to minimize the amount -//! of compounded error. This is used in the Bellerophon algorithm. -//! -//! These values were calculated using Python, using the arbitrary-precision -//! integer to calculate exact extended-representation of each value. -//! These values are all normalized. -//! -//! DO NOT MODIFY: Generated by `etc/bellerophon_table.py` - -#![cfg(feature = "compact")] -#![doc(hidden)] - -use crate::bellerophon::BellerophonPowers; - -// HIGH LEVEL -// ---------- - -pub const BASE10_POWERS: BellerophonPowers = BellerophonPowers { - small: &BASE10_SMALL_MANTISSA, - large: &BASE10_LARGE_MANTISSA, - small_int: &BASE10_SMALL_INT_POWERS, - step: BASE10_STEP, - bias: BASE10_BIAS, - log2: BASE10_LOG2_MULT, - log2_shift: BASE10_LOG2_SHIFT, -}; - -// LOW-LEVEL -// --------- - -const BASE10_SMALL_MANTISSA: [u64; 10] = [ - 9223372036854775808, // 10^0 - 11529215046068469760, // 10^1 - 14411518807585587200, // 10^2 - 18014398509481984000, // 10^3 - 11258999068426240000, // 10^4 - 14073748835532800000, // 10^5 - 17592186044416000000, // 10^6 - 10995116277760000000, // 10^7 - 13743895347200000000, // 10^8 - 17179869184000000000, // 10^9 -]; -const BASE10_LARGE_MANTISSA: [u64; 66] = [ - 11555125961253852697, // 10^-350 - 13451937075301367670, // 10^-340 - 15660115838168849784, // 10^-330 - 18230774251475056848, // 10^-320 - 10611707258198326947, // 10^-310 - 12353653155963782858, // 10^-300 - 14381545078898527261, // 10^-290 - 16742321987285426889, // 10^-280 - 9745314011399999080, // 10^-270 - 11345038669416679861, // 10^-260 - 13207363278391631158, // 10^-250 - 15375394465392026070, // 10^-240 - 17899314949046850752, // 10^-230 - 10418772551374772303, // 10^-220 - 12129047596099288555, // 10^-210 - 14120069793541087484, // 10^-200 - 16437924692338667210, // 10^-190 - 9568131466127621947, // 10^-180 - 11138771039116687545, // 10^-170 - 12967236152753102995, // 10^-160 - 15095849699286165408, // 10^-150 - 17573882009934360870, // 10^-140 - 10229345649675443343, // 10^-130 - 11908525658859223294, // 10^-120 - 13863348470604074297, // 10^-110 - 16139061738043178685, // 10^-100 - 9394170331095332911, // 10^-90 - 10936253623915059621, // 10^-80 - 12731474852090538039, // 10^-70 - 14821387422376473014, // 10^-60 - 17254365866976409468, // 10^-50 - 10043362776618689222, // 10^-40 - 11692013098647223345, // 10^-30 - 13611294676837538538, // 10^-20 - 15845632502852867518, // 10^-10 - 9223372036854775808, // 10^0 - 10737418240000000000, // 10^10 - 12500000000000000000, // 10^20 - 14551915228366851806, // 10^30 - 16940658945086006781, // 10^40 - 9860761315262647567, // 10^50 - 11479437019748901445, // 10^60 - 13363823550460978230, // 10^70 - 15557538194652854267, // 10^80 - 18111358157653424735, // 10^90 - 10542197943230523224, // 10^100 - 12272733663244316382, // 10^110 - 14287342391028437277, // 10^120 - 16632655625031838749, // 10^130 - 9681479787123295682, // 10^140 - 11270725851789228247, // 10^150 - 13120851772591970218, // 10^160 - 15274681817498023410, // 10^170 - 17782069995880619867, // 10^180 - 10350527006597618960, // 10^190 - 12049599325514420588, // 10^200 - 14027579833653779454, // 10^210 - 16330252207878254650, // 10^220 - 9505457831475799117, // 10^230 - 11065809325636130661, // 10^240 - 12882297539194266616, // 10^250 - 14996968138956309548, // 10^260 - 17458768723248864463, // 10^270 - 10162340898095201970, // 10^280 - 11830521861667747109, // 10^290 - 13772540099066387756, // 10^300 -]; -const BASE10_SMALL_INT_POWERS: [u64; 10] = - [1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000]; -const BASE10_STEP: i32 = 10; -const BASE10_BIAS: i32 = 350; -const BASE10_LOG2_MULT: i64 = 217706; -const BASE10_LOG2_SHIFT: i32 = 16; diff --git a/vendor/minimal-lexical/src/table_lemire.rs b/vendor/minimal-lexical/src/table_lemire.rs deleted file mode 100644 index 110e1dab2bbf96..00000000000000 --- a/vendor/minimal-lexical/src/table_lemire.rs +++ /dev/null @@ -1,676 +0,0 @@ -//! Pre-computed tables powers-of-5 for extended-precision representations. -//! -//! These tables enable fast scaling of the significant digits -//! of a float to the decimal exponent, with minimal rounding -//! errors, in a 128 or 192-bit representation. -//! -//! DO NOT MODIFY: Generated by `etc/lemire_table.py` -//! -//! This adapted from the Rust implementation, based on the fast-float-rust -//! implementation, and is similarly subject to an Apache2.0/MIT license. - -#![doc(hidden)] -#![cfg(not(feature = "compact"))] - -pub const SMALLEST_POWER_OF_FIVE: i32 = -342; -pub const LARGEST_POWER_OF_FIVE: i32 = 308; -pub const N_POWERS_OF_FIVE: usize = (LARGEST_POWER_OF_FIVE - SMALLEST_POWER_OF_FIVE + 1) as usize; - -// Use static to avoid long compile times: Rust compiler errors -// can have the entire table compiled multiple times, and then -// emit code multiple times, even if it's stripped out in -// the final binary. -#[rustfmt::skip] -pub static POWER_OF_FIVE_128: [(u64, u64); N_POWERS_OF_FIVE] = [ - (0xeef453d6923bd65a, 0x113faa2906a13b3f), // 5^-342 - (0x9558b4661b6565f8, 0x4ac7ca59a424c507), // 5^-341 - (0xbaaee17fa23ebf76, 0x5d79bcf00d2df649), // 5^-340 - (0xe95a99df8ace6f53, 0xf4d82c2c107973dc), // 5^-339 - (0x91d8a02bb6c10594, 0x79071b9b8a4be869), // 5^-338 - (0xb64ec836a47146f9, 0x9748e2826cdee284), // 5^-337 - (0xe3e27a444d8d98b7, 0xfd1b1b2308169b25), // 5^-336 - (0x8e6d8c6ab0787f72, 0xfe30f0f5e50e20f7), // 5^-335 - (0xb208ef855c969f4f, 0xbdbd2d335e51a935), // 5^-334 - (0xde8b2b66b3bc4723, 0xad2c788035e61382), // 5^-333 - (0x8b16fb203055ac76, 0x4c3bcb5021afcc31), // 5^-332 - (0xaddcb9e83c6b1793, 0xdf4abe242a1bbf3d), // 5^-331 - (0xd953e8624b85dd78, 0xd71d6dad34a2af0d), // 5^-330 - (0x87d4713d6f33aa6b, 0x8672648c40e5ad68), // 5^-329 - (0xa9c98d8ccb009506, 0x680efdaf511f18c2), // 5^-328 - (0xd43bf0effdc0ba48, 0x212bd1b2566def2), // 5^-327 - (0x84a57695fe98746d, 0x14bb630f7604b57), // 5^-326 - (0xa5ced43b7e3e9188, 0x419ea3bd35385e2d), // 5^-325 - (0xcf42894a5dce35ea, 0x52064cac828675b9), // 5^-324 - (0x818995ce7aa0e1b2, 0x7343efebd1940993), // 5^-323 - (0xa1ebfb4219491a1f, 0x1014ebe6c5f90bf8), // 5^-322 - (0xca66fa129f9b60a6, 0xd41a26e077774ef6), // 5^-321 - (0xfd00b897478238d0, 0x8920b098955522b4), // 5^-320 - (0x9e20735e8cb16382, 0x55b46e5f5d5535b0), // 5^-319 - (0xc5a890362fddbc62, 0xeb2189f734aa831d), // 5^-318 - (0xf712b443bbd52b7b, 0xa5e9ec7501d523e4), // 5^-317 - (0x9a6bb0aa55653b2d, 0x47b233c92125366e), // 5^-316 - (0xc1069cd4eabe89f8, 0x999ec0bb696e840a), // 5^-315 - (0xf148440a256e2c76, 0xc00670ea43ca250d), // 5^-314 - (0x96cd2a865764dbca, 0x380406926a5e5728), // 5^-313 - (0xbc807527ed3e12bc, 0xc605083704f5ecf2), // 5^-312 - (0xeba09271e88d976b, 0xf7864a44c633682e), // 5^-311 - (0x93445b8731587ea3, 0x7ab3ee6afbe0211d), // 5^-310 - (0xb8157268fdae9e4c, 0x5960ea05bad82964), // 5^-309 - (0xe61acf033d1a45df, 0x6fb92487298e33bd), // 5^-308 - (0x8fd0c16206306bab, 0xa5d3b6d479f8e056), // 5^-307 - (0xb3c4f1ba87bc8696, 0x8f48a4899877186c), // 5^-306 - (0xe0b62e2929aba83c, 0x331acdabfe94de87), // 5^-305 - (0x8c71dcd9ba0b4925, 0x9ff0c08b7f1d0b14), // 5^-304 - (0xaf8e5410288e1b6f, 0x7ecf0ae5ee44dd9), // 5^-303 - (0xdb71e91432b1a24a, 0xc9e82cd9f69d6150), // 5^-302 - (0x892731ac9faf056e, 0xbe311c083a225cd2), // 5^-301 - (0xab70fe17c79ac6ca, 0x6dbd630a48aaf406), // 5^-300 - (0xd64d3d9db981787d, 0x92cbbccdad5b108), // 5^-299 - (0x85f0468293f0eb4e, 0x25bbf56008c58ea5), // 5^-298 - (0xa76c582338ed2621, 0xaf2af2b80af6f24e), // 5^-297 - (0xd1476e2c07286faa, 0x1af5af660db4aee1), // 5^-296 - (0x82cca4db847945ca, 0x50d98d9fc890ed4d), // 5^-295 - (0xa37fce126597973c, 0xe50ff107bab528a0), // 5^-294 - (0xcc5fc196fefd7d0c, 0x1e53ed49a96272c8), // 5^-293 - (0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7a), // 5^-292 - (0x9faacf3df73609b1, 0x77b191618c54e9ac), // 5^-291 - (0xc795830d75038c1d, 0xd59df5b9ef6a2417), // 5^-290 - (0xf97ae3d0d2446f25, 0x4b0573286b44ad1d), // 5^-289 - (0x9becce62836ac577, 0x4ee367f9430aec32), // 5^-288 - (0xc2e801fb244576d5, 0x229c41f793cda73f), // 5^-287 - (0xf3a20279ed56d48a, 0x6b43527578c1110f), // 5^-286 - (0x9845418c345644d6, 0x830a13896b78aaa9), // 5^-285 - (0xbe5691ef416bd60c, 0x23cc986bc656d553), // 5^-284 - (0xedec366b11c6cb8f, 0x2cbfbe86b7ec8aa8), // 5^-283 - (0x94b3a202eb1c3f39, 0x7bf7d71432f3d6a9), // 5^-282 - (0xb9e08a83a5e34f07, 0xdaf5ccd93fb0cc53), // 5^-281 - (0xe858ad248f5c22c9, 0xd1b3400f8f9cff68), // 5^-280 - (0x91376c36d99995be, 0x23100809b9c21fa1), // 5^-279 - (0xb58547448ffffb2d, 0xabd40a0c2832a78a), // 5^-278 - (0xe2e69915b3fff9f9, 0x16c90c8f323f516c), // 5^-277 - (0x8dd01fad907ffc3b, 0xae3da7d97f6792e3), // 5^-276 - (0xb1442798f49ffb4a, 0x99cd11cfdf41779c), // 5^-275 - (0xdd95317f31c7fa1d, 0x40405643d711d583), // 5^-274 - (0x8a7d3eef7f1cfc52, 0x482835ea666b2572), // 5^-273 - (0xad1c8eab5ee43b66, 0xda3243650005eecf), // 5^-272 - (0xd863b256369d4a40, 0x90bed43e40076a82), // 5^-271 - (0x873e4f75e2224e68, 0x5a7744a6e804a291), // 5^-270 - (0xa90de3535aaae202, 0x711515d0a205cb36), // 5^-269 - (0xd3515c2831559a83, 0xd5a5b44ca873e03), // 5^-268 - (0x8412d9991ed58091, 0xe858790afe9486c2), // 5^-267 - (0xa5178fff668ae0b6, 0x626e974dbe39a872), // 5^-266 - (0xce5d73ff402d98e3, 0xfb0a3d212dc8128f), // 5^-265 - (0x80fa687f881c7f8e, 0x7ce66634bc9d0b99), // 5^-264 - (0xa139029f6a239f72, 0x1c1fffc1ebc44e80), // 5^-263 - (0xc987434744ac874e, 0xa327ffb266b56220), // 5^-262 - (0xfbe9141915d7a922, 0x4bf1ff9f0062baa8), // 5^-261 - (0x9d71ac8fada6c9b5, 0x6f773fc3603db4a9), // 5^-260 - (0xc4ce17b399107c22, 0xcb550fb4384d21d3), // 5^-259 - (0xf6019da07f549b2b, 0x7e2a53a146606a48), // 5^-258 - (0x99c102844f94e0fb, 0x2eda7444cbfc426d), // 5^-257 - (0xc0314325637a1939, 0xfa911155fefb5308), // 5^-256 - (0xf03d93eebc589f88, 0x793555ab7eba27ca), // 5^-255 - (0x96267c7535b763b5, 0x4bc1558b2f3458de), // 5^-254 - (0xbbb01b9283253ca2, 0x9eb1aaedfb016f16), // 5^-253 - (0xea9c227723ee8bcb, 0x465e15a979c1cadc), // 5^-252 - (0x92a1958a7675175f, 0xbfacd89ec191ec9), // 5^-251 - (0xb749faed14125d36, 0xcef980ec671f667b), // 5^-250 - (0xe51c79a85916f484, 0x82b7e12780e7401a), // 5^-249 - (0x8f31cc0937ae58d2, 0xd1b2ecb8b0908810), // 5^-248 - (0xb2fe3f0b8599ef07, 0x861fa7e6dcb4aa15), // 5^-247 - (0xdfbdcece67006ac9, 0x67a791e093e1d49a), // 5^-246 - (0x8bd6a141006042bd, 0xe0c8bb2c5c6d24e0), // 5^-245 - (0xaecc49914078536d, 0x58fae9f773886e18), // 5^-244 - (0xda7f5bf590966848, 0xaf39a475506a899e), // 5^-243 - (0x888f99797a5e012d, 0x6d8406c952429603), // 5^-242 - (0xaab37fd7d8f58178, 0xc8e5087ba6d33b83), // 5^-241 - (0xd5605fcdcf32e1d6, 0xfb1e4a9a90880a64), // 5^-240 - (0x855c3be0a17fcd26, 0x5cf2eea09a55067f), // 5^-239 - (0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481e), // 5^-238 - (0xd0601d8efc57b08b, 0xf13b94daf124da26), // 5^-237 - (0x823c12795db6ce57, 0x76c53d08d6b70858), // 5^-236 - (0xa2cb1717b52481ed, 0x54768c4b0c64ca6e), // 5^-235 - (0xcb7ddcdda26da268, 0xa9942f5dcf7dfd09), // 5^-234 - (0xfe5d54150b090b02, 0xd3f93b35435d7c4c), // 5^-233 - (0x9efa548d26e5a6e1, 0xc47bc5014a1a6daf), // 5^-232 - (0xc6b8e9b0709f109a, 0x359ab6419ca1091b), // 5^-231 - (0xf867241c8cc6d4c0, 0xc30163d203c94b62), // 5^-230 - (0x9b407691d7fc44f8, 0x79e0de63425dcf1d), // 5^-229 - (0xc21094364dfb5636, 0x985915fc12f542e4), // 5^-228 - (0xf294b943e17a2bc4, 0x3e6f5b7b17b2939d), // 5^-227 - (0x979cf3ca6cec5b5a, 0xa705992ceecf9c42), // 5^-226 - (0xbd8430bd08277231, 0x50c6ff782a838353), // 5^-225 - (0xece53cec4a314ebd, 0xa4f8bf5635246428), // 5^-224 - (0x940f4613ae5ed136, 0x871b7795e136be99), // 5^-223 - (0xb913179899f68584, 0x28e2557b59846e3f), // 5^-222 - (0xe757dd7ec07426e5, 0x331aeada2fe589cf), // 5^-221 - (0x9096ea6f3848984f, 0x3ff0d2c85def7621), // 5^-220 - (0xb4bca50b065abe63, 0xfed077a756b53a9), // 5^-219 - (0xe1ebce4dc7f16dfb, 0xd3e8495912c62894), // 5^-218 - (0x8d3360f09cf6e4bd, 0x64712dd7abbbd95c), // 5^-217 - (0xb080392cc4349dec, 0xbd8d794d96aacfb3), // 5^-216 - (0xdca04777f541c567, 0xecf0d7a0fc5583a0), // 5^-215 - (0x89e42caaf9491b60, 0xf41686c49db57244), // 5^-214 - (0xac5d37d5b79b6239, 0x311c2875c522ced5), // 5^-213 - (0xd77485cb25823ac7, 0x7d633293366b828b), // 5^-212 - (0x86a8d39ef77164bc, 0xae5dff9c02033197), // 5^-211 - (0xa8530886b54dbdeb, 0xd9f57f830283fdfc), // 5^-210 - (0xd267caa862a12d66, 0xd072df63c324fd7b), // 5^-209 - (0x8380dea93da4bc60, 0x4247cb9e59f71e6d), // 5^-208 - (0xa46116538d0deb78, 0x52d9be85f074e608), // 5^-207 - (0xcd795be870516656, 0x67902e276c921f8b), // 5^-206 - (0x806bd9714632dff6, 0xba1cd8a3db53b6), // 5^-205 - (0xa086cfcd97bf97f3, 0x80e8a40eccd228a4), // 5^-204 - (0xc8a883c0fdaf7df0, 0x6122cd128006b2cd), // 5^-203 - (0xfad2a4b13d1b5d6c, 0x796b805720085f81), // 5^-202 - (0x9cc3a6eec6311a63, 0xcbe3303674053bb0), // 5^-201 - (0xc3f490aa77bd60fc, 0xbedbfc4411068a9c), // 5^-200 - (0xf4f1b4d515acb93b, 0xee92fb5515482d44), // 5^-199 - (0x991711052d8bf3c5, 0x751bdd152d4d1c4a), // 5^-198 - (0xbf5cd54678eef0b6, 0xd262d45a78a0635d), // 5^-197 - (0xef340a98172aace4, 0x86fb897116c87c34), // 5^-196 - (0x9580869f0e7aac0e, 0xd45d35e6ae3d4da0), // 5^-195 - (0xbae0a846d2195712, 0x8974836059cca109), // 5^-194 - (0xe998d258869facd7, 0x2bd1a438703fc94b), // 5^-193 - (0x91ff83775423cc06, 0x7b6306a34627ddcf), // 5^-192 - (0xb67f6455292cbf08, 0x1a3bc84c17b1d542), // 5^-191 - (0xe41f3d6a7377eeca, 0x20caba5f1d9e4a93), // 5^-190 - (0x8e938662882af53e, 0x547eb47b7282ee9c), // 5^-189 - (0xb23867fb2a35b28d, 0xe99e619a4f23aa43), // 5^-188 - (0xdec681f9f4c31f31, 0x6405fa00e2ec94d4), // 5^-187 - (0x8b3c113c38f9f37e, 0xde83bc408dd3dd04), // 5^-186 - (0xae0b158b4738705e, 0x9624ab50b148d445), // 5^-185 - (0xd98ddaee19068c76, 0x3badd624dd9b0957), // 5^-184 - (0x87f8a8d4cfa417c9, 0xe54ca5d70a80e5d6), // 5^-183 - (0xa9f6d30a038d1dbc, 0x5e9fcf4ccd211f4c), // 5^-182 - (0xd47487cc8470652b, 0x7647c3200069671f), // 5^-181 - (0x84c8d4dfd2c63f3b, 0x29ecd9f40041e073), // 5^-180 - (0xa5fb0a17c777cf09, 0xf468107100525890), // 5^-179 - (0xcf79cc9db955c2cc, 0x7182148d4066eeb4), // 5^-178 - (0x81ac1fe293d599bf, 0xc6f14cd848405530), // 5^-177 - (0xa21727db38cb002f, 0xb8ada00e5a506a7c), // 5^-176 - (0xca9cf1d206fdc03b, 0xa6d90811f0e4851c), // 5^-175 - (0xfd442e4688bd304a, 0x908f4a166d1da663), // 5^-174 - (0x9e4a9cec15763e2e, 0x9a598e4e043287fe), // 5^-173 - (0xc5dd44271ad3cdba, 0x40eff1e1853f29fd), // 5^-172 - (0xf7549530e188c128, 0xd12bee59e68ef47c), // 5^-171 - (0x9a94dd3e8cf578b9, 0x82bb74f8301958ce), // 5^-170 - (0xc13a148e3032d6e7, 0xe36a52363c1faf01), // 5^-169 - (0xf18899b1bc3f8ca1, 0xdc44e6c3cb279ac1), // 5^-168 - (0x96f5600f15a7b7e5, 0x29ab103a5ef8c0b9), // 5^-167 - (0xbcb2b812db11a5de, 0x7415d448f6b6f0e7), // 5^-166 - (0xebdf661791d60f56, 0x111b495b3464ad21), // 5^-165 - (0x936b9fcebb25c995, 0xcab10dd900beec34), // 5^-164 - (0xb84687c269ef3bfb, 0x3d5d514f40eea742), // 5^-163 - (0xe65829b3046b0afa, 0xcb4a5a3112a5112), // 5^-162 - (0x8ff71a0fe2c2e6dc, 0x47f0e785eaba72ab), // 5^-161 - (0xb3f4e093db73a093, 0x59ed216765690f56), // 5^-160 - (0xe0f218b8d25088b8, 0x306869c13ec3532c), // 5^-159 - (0x8c974f7383725573, 0x1e414218c73a13fb), // 5^-158 - (0xafbd2350644eeacf, 0xe5d1929ef90898fa), // 5^-157 - (0xdbac6c247d62a583, 0xdf45f746b74abf39), // 5^-156 - (0x894bc396ce5da772, 0x6b8bba8c328eb783), // 5^-155 - (0xab9eb47c81f5114f, 0x66ea92f3f326564), // 5^-154 - (0xd686619ba27255a2, 0xc80a537b0efefebd), // 5^-153 - (0x8613fd0145877585, 0xbd06742ce95f5f36), // 5^-152 - (0xa798fc4196e952e7, 0x2c48113823b73704), // 5^-151 - (0xd17f3b51fca3a7a0, 0xf75a15862ca504c5), // 5^-150 - (0x82ef85133de648c4, 0x9a984d73dbe722fb), // 5^-149 - (0xa3ab66580d5fdaf5, 0xc13e60d0d2e0ebba), // 5^-148 - (0xcc963fee10b7d1b3, 0x318df905079926a8), // 5^-147 - (0xffbbcfe994e5c61f, 0xfdf17746497f7052), // 5^-146 - (0x9fd561f1fd0f9bd3, 0xfeb6ea8bedefa633), // 5^-145 - (0xc7caba6e7c5382c8, 0xfe64a52ee96b8fc0), // 5^-144 - (0xf9bd690a1b68637b, 0x3dfdce7aa3c673b0), // 5^-143 - (0x9c1661a651213e2d, 0x6bea10ca65c084e), // 5^-142 - (0xc31bfa0fe5698db8, 0x486e494fcff30a62), // 5^-141 - (0xf3e2f893dec3f126, 0x5a89dba3c3efccfa), // 5^-140 - (0x986ddb5c6b3a76b7, 0xf89629465a75e01c), // 5^-139 - (0xbe89523386091465, 0xf6bbb397f1135823), // 5^-138 - (0xee2ba6c0678b597f, 0x746aa07ded582e2c), // 5^-137 - (0x94db483840b717ef, 0xa8c2a44eb4571cdc), // 5^-136 - (0xba121a4650e4ddeb, 0x92f34d62616ce413), // 5^-135 - (0xe896a0d7e51e1566, 0x77b020baf9c81d17), // 5^-134 - (0x915e2486ef32cd60, 0xace1474dc1d122e), // 5^-133 - (0xb5b5ada8aaff80b8, 0xd819992132456ba), // 5^-132 - (0xe3231912d5bf60e6, 0x10e1fff697ed6c69), // 5^-131 - (0x8df5efabc5979c8f, 0xca8d3ffa1ef463c1), // 5^-130 - (0xb1736b96b6fd83b3, 0xbd308ff8a6b17cb2), // 5^-129 - (0xddd0467c64bce4a0, 0xac7cb3f6d05ddbde), // 5^-128 - (0x8aa22c0dbef60ee4, 0x6bcdf07a423aa96b), // 5^-127 - (0xad4ab7112eb3929d, 0x86c16c98d2c953c6), // 5^-126 - (0xd89d64d57a607744, 0xe871c7bf077ba8b7), // 5^-125 - (0x87625f056c7c4a8b, 0x11471cd764ad4972), // 5^-124 - (0xa93af6c6c79b5d2d, 0xd598e40d3dd89bcf), // 5^-123 - (0xd389b47879823479, 0x4aff1d108d4ec2c3), // 5^-122 - (0x843610cb4bf160cb, 0xcedf722a585139ba), // 5^-121 - (0xa54394fe1eedb8fe, 0xc2974eb4ee658828), // 5^-120 - (0xce947a3da6a9273e, 0x733d226229feea32), // 5^-119 - (0x811ccc668829b887, 0x806357d5a3f525f), // 5^-118 - (0xa163ff802a3426a8, 0xca07c2dcb0cf26f7), // 5^-117 - (0xc9bcff6034c13052, 0xfc89b393dd02f0b5), // 5^-116 - (0xfc2c3f3841f17c67, 0xbbac2078d443ace2), // 5^-115 - (0x9d9ba7832936edc0, 0xd54b944b84aa4c0d), // 5^-114 - (0xc5029163f384a931, 0xa9e795e65d4df11), // 5^-113 - (0xf64335bcf065d37d, 0x4d4617b5ff4a16d5), // 5^-112 - (0x99ea0196163fa42e, 0x504bced1bf8e4e45), // 5^-111 - (0xc06481fb9bcf8d39, 0xe45ec2862f71e1d6), // 5^-110 - (0xf07da27a82c37088, 0x5d767327bb4e5a4c), // 5^-109 - (0x964e858c91ba2655, 0x3a6a07f8d510f86f), // 5^-108 - (0xbbe226efb628afea, 0x890489f70a55368b), // 5^-107 - (0xeadab0aba3b2dbe5, 0x2b45ac74ccea842e), // 5^-106 - (0x92c8ae6b464fc96f, 0x3b0b8bc90012929d), // 5^-105 - (0xb77ada0617e3bbcb, 0x9ce6ebb40173744), // 5^-104 - (0xe55990879ddcaabd, 0xcc420a6a101d0515), // 5^-103 - (0x8f57fa54c2a9eab6, 0x9fa946824a12232d), // 5^-102 - (0xb32df8e9f3546564, 0x47939822dc96abf9), // 5^-101 - (0xdff9772470297ebd, 0x59787e2b93bc56f7), // 5^-100 - (0x8bfbea76c619ef36, 0x57eb4edb3c55b65a), // 5^-99 - (0xaefae51477a06b03, 0xede622920b6b23f1), // 5^-98 - (0xdab99e59958885c4, 0xe95fab368e45eced), // 5^-97 - (0x88b402f7fd75539b, 0x11dbcb0218ebb414), // 5^-96 - (0xaae103b5fcd2a881, 0xd652bdc29f26a119), // 5^-95 - (0xd59944a37c0752a2, 0x4be76d3346f0495f), // 5^-94 - (0x857fcae62d8493a5, 0x6f70a4400c562ddb), // 5^-93 - (0xa6dfbd9fb8e5b88e, 0xcb4ccd500f6bb952), // 5^-92 - (0xd097ad07a71f26b2, 0x7e2000a41346a7a7), // 5^-91 - (0x825ecc24c873782f, 0x8ed400668c0c28c8), // 5^-90 - (0xa2f67f2dfa90563b, 0x728900802f0f32fa), // 5^-89 - (0xcbb41ef979346bca, 0x4f2b40a03ad2ffb9), // 5^-88 - (0xfea126b7d78186bc, 0xe2f610c84987bfa8), // 5^-87 - (0x9f24b832e6b0f436, 0xdd9ca7d2df4d7c9), // 5^-86 - (0xc6ede63fa05d3143, 0x91503d1c79720dbb), // 5^-85 - (0xf8a95fcf88747d94, 0x75a44c6397ce912a), // 5^-84 - (0x9b69dbe1b548ce7c, 0xc986afbe3ee11aba), // 5^-83 - (0xc24452da229b021b, 0xfbe85badce996168), // 5^-82 - (0xf2d56790ab41c2a2, 0xfae27299423fb9c3), // 5^-81 - (0x97c560ba6b0919a5, 0xdccd879fc967d41a), // 5^-80 - (0xbdb6b8e905cb600f, 0x5400e987bbc1c920), // 5^-79 - (0xed246723473e3813, 0x290123e9aab23b68), // 5^-78 - (0x9436c0760c86e30b, 0xf9a0b6720aaf6521), // 5^-77 - (0xb94470938fa89bce, 0xf808e40e8d5b3e69), // 5^-76 - (0xe7958cb87392c2c2, 0xb60b1d1230b20e04), // 5^-75 - (0x90bd77f3483bb9b9, 0xb1c6f22b5e6f48c2), // 5^-74 - (0xb4ecd5f01a4aa828, 0x1e38aeb6360b1af3), // 5^-73 - (0xe2280b6c20dd5232, 0x25c6da63c38de1b0), // 5^-72 - (0x8d590723948a535f, 0x579c487e5a38ad0e), // 5^-71 - (0xb0af48ec79ace837, 0x2d835a9df0c6d851), // 5^-70 - (0xdcdb1b2798182244, 0xf8e431456cf88e65), // 5^-69 - (0x8a08f0f8bf0f156b, 0x1b8e9ecb641b58ff), // 5^-68 - (0xac8b2d36eed2dac5, 0xe272467e3d222f3f), // 5^-67 - (0xd7adf884aa879177, 0x5b0ed81dcc6abb0f), // 5^-66 - (0x86ccbb52ea94baea, 0x98e947129fc2b4e9), // 5^-65 - (0xa87fea27a539e9a5, 0x3f2398d747b36224), // 5^-64 - (0xd29fe4b18e88640e, 0x8eec7f0d19a03aad), // 5^-63 - (0x83a3eeeef9153e89, 0x1953cf68300424ac), // 5^-62 - (0xa48ceaaab75a8e2b, 0x5fa8c3423c052dd7), // 5^-61 - (0xcdb02555653131b6, 0x3792f412cb06794d), // 5^-60 - (0x808e17555f3ebf11, 0xe2bbd88bbee40bd0), // 5^-59 - (0xa0b19d2ab70e6ed6, 0x5b6aceaeae9d0ec4), // 5^-58 - (0xc8de047564d20a8b, 0xf245825a5a445275), // 5^-57 - (0xfb158592be068d2e, 0xeed6e2f0f0d56712), // 5^-56 - (0x9ced737bb6c4183d, 0x55464dd69685606b), // 5^-55 - (0xc428d05aa4751e4c, 0xaa97e14c3c26b886), // 5^-54 - (0xf53304714d9265df, 0xd53dd99f4b3066a8), // 5^-53 - (0x993fe2c6d07b7fab, 0xe546a8038efe4029), // 5^-52 - (0xbf8fdb78849a5f96, 0xde98520472bdd033), // 5^-51 - (0xef73d256a5c0f77c, 0x963e66858f6d4440), // 5^-50 - (0x95a8637627989aad, 0xdde7001379a44aa8), // 5^-49 - (0xbb127c53b17ec159, 0x5560c018580d5d52), // 5^-48 - (0xe9d71b689dde71af, 0xaab8f01e6e10b4a6), // 5^-47 - (0x9226712162ab070d, 0xcab3961304ca70e8), // 5^-46 - (0xb6b00d69bb55c8d1, 0x3d607b97c5fd0d22), // 5^-45 - (0xe45c10c42a2b3b05, 0x8cb89a7db77c506a), // 5^-44 - (0x8eb98a7a9a5b04e3, 0x77f3608e92adb242), // 5^-43 - (0xb267ed1940f1c61c, 0x55f038b237591ed3), // 5^-42 - (0xdf01e85f912e37a3, 0x6b6c46dec52f6688), // 5^-41 - (0x8b61313bbabce2c6, 0x2323ac4b3b3da015), // 5^-40 - (0xae397d8aa96c1b77, 0xabec975e0a0d081a), // 5^-39 - (0xd9c7dced53c72255, 0x96e7bd358c904a21), // 5^-38 - (0x881cea14545c7575, 0x7e50d64177da2e54), // 5^-37 - (0xaa242499697392d2, 0xdde50bd1d5d0b9e9), // 5^-36 - (0xd4ad2dbfc3d07787, 0x955e4ec64b44e864), // 5^-35 - (0x84ec3c97da624ab4, 0xbd5af13bef0b113e), // 5^-34 - (0xa6274bbdd0fadd61, 0xecb1ad8aeacdd58e), // 5^-33 - (0xcfb11ead453994ba, 0x67de18eda5814af2), // 5^-32 - (0x81ceb32c4b43fcf4, 0x80eacf948770ced7), // 5^-31 - (0xa2425ff75e14fc31, 0xa1258379a94d028d), // 5^-30 - (0xcad2f7f5359a3b3e, 0x96ee45813a04330), // 5^-29 - (0xfd87b5f28300ca0d, 0x8bca9d6e188853fc), // 5^-28 - (0x9e74d1b791e07e48, 0x775ea264cf55347e), // 5^-27 - (0xc612062576589dda, 0x95364afe032a819e), // 5^-26 - (0xf79687aed3eec551, 0x3a83ddbd83f52205), // 5^-25 - (0x9abe14cd44753b52, 0xc4926a9672793543), // 5^-24 - (0xc16d9a0095928a27, 0x75b7053c0f178294), // 5^-23 - (0xf1c90080baf72cb1, 0x5324c68b12dd6339), // 5^-22 - (0x971da05074da7bee, 0xd3f6fc16ebca5e04), // 5^-21 - (0xbce5086492111aea, 0x88f4bb1ca6bcf585), // 5^-20 - (0xec1e4a7db69561a5, 0x2b31e9e3d06c32e6), // 5^-19 - (0x9392ee8e921d5d07, 0x3aff322e62439fd0), // 5^-18 - (0xb877aa3236a4b449, 0x9befeb9fad487c3), // 5^-17 - (0xe69594bec44de15b, 0x4c2ebe687989a9b4), // 5^-16 - (0x901d7cf73ab0acd9, 0xf9d37014bf60a11), // 5^-15 - (0xb424dc35095cd80f, 0x538484c19ef38c95), // 5^-14 - (0xe12e13424bb40e13, 0x2865a5f206b06fba), // 5^-13 - (0x8cbccc096f5088cb, 0xf93f87b7442e45d4), // 5^-12 - (0xafebff0bcb24aafe, 0xf78f69a51539d749), // 5^-11 - (0xdbe6fecebdedd5be, 0xb573440e5a884d1c), // 5^-10 - (0x89705f4136b4a597, 0x31680a88f8953031), // 5^-9 - (0xabcc77118461cefc, 0xfdc20d2b36ba7c3e), // 5^-8 - (0xd6bf94d5e57a42bc, 0x3d32907604691b4d), // 5^-7 - (0x8637bd05af6c69b5, 0xa63f9a49c2c1b110), // 5^-6 - (0xa7c5ac471b478423, 0xfcf80dc33721d54), // 5^-5 - (0xd1b71758e219652b, 0xd3c36113404ea4a9), // 5^-4 - (0x83126e978d4fdf3b, 0x645a1cac083126ea), // 5^-3 - (0xa3d70a3d70a3d70a, 0x3d70a3d70a3d70a4), // 5^-2 - (0xcccccccccccccccc, 0xcccccccccccccccd), // 5^-1 - (0x8000000000000000, 0x0), // 5^0 - (0xa000000000000000, 0x0), // 5^1 - (0xc800000000000000, 0x0), // 5^2 - (0xfa00000000000000, 0x0), // 5^3 - (0x9c40000000000000, 0x0), // 5^4 - (0xc350000000000000, 0x0), // 5^5 - (0xf424000000000000, 0x0), // 5^6 - (0x9896800000000000, 0x0), // 5^7 - (0xbebc200000000000, 0x0), // 5^8 - (0xee6b280000000000, 0x0), // 5^9 - (0x9502f90000000000, 0x0), // 5^10 - (0xba43b74000000000, 0x0), // 5^11 - (0xe8d4a51000000000, 0x0), // 5^12 - (0x9184e72a00000000, 0x0), // 5^13 - (0xb5e620f480000000, 0x0), // 5^14 - (0xe35fa931a0000000, 0x0), // 5^15 - (0x8e1bc9bf04000000, 0x0), // 5^16 - (0xb1a2bc2ec5000000, 0x0), // 5^17 - (0xde0b6b3a76400000, 0x0), // 5^18 - (0x8ac7230489e80000, 0x0), // 5^19 - (0xad78ebc5ac620000, 0x0), // 5^20 - (0xd8d726b7177a8000, 0x0), // 5^21 - (0x878678326eac9000, 0x0), // 5^22 - (0xa968163f0a57b400, 0x0), // 5^23 - (0xd3c21bcecceda100, 0x0), // 5^24 - (0x84595161401484a0, 0x0), // 5^25 - (0xa56fa5b99019a5c8, 0x0), // 5^26 - (0xcecb8f27f4200f3a, 0x0), // 5^27 - (0x813f3978f8940984, 0x4000000000000000), // 5^28 - (0xa18f07d736b90be5, 0x5000000000000000), // 5^29 - (0xc9f2c9cd04674ede, 0xa400000000000000), // 5^30 - (0xfc6f7c4045812296, 0x4d00000000000000), // 5^31 - (0x9dc5ada82b70b59d, 0xf020000000000000), // 5^32 - (0xc5371912364ce305, 0x6c28000000000000), // 5^33 - (0xf684df56c3e01bc6, 0xc732000000000000), // 5^34 - (0x9a130b963a6c115c, 0x3c7f400000000000), // 5^35 - (0xc097ce7bc90715b3, 0x4b9f100000000000), // 5^36 - (0xf0bdc21abb48db20, 0x1e86d40000000000), // 5^37 - (0x96769950b50d88f4, 0x1314448000000000), // 5^38 - (0xbc143fa4e250eb31, 0x17d955a000000000), // 5^39 - (0xeb194f8e1ae525fd, 0x5dcfab0800000000), // 5^40 - (0x92efd1b8d0cf37be, 0x5aa1cae500000000), // 5^41 - (0xb7abc627050305ad, 0xf14a3d9e40000000), // 5^42 - (0xe596b7b0c643c719, 0x6d9ccd05d0000000), // 5^43 - (0x8f7e32ce7bea5c6f, 0xe4820023a2000000), // 5^44 - (0xb35dbf821ae4f38b, 0xdda2802c8a800000), // 5^45 - (0xe0352f62a19e306e, 0xd50b2037ad200000), // 5^46 - (0x8c213d9da502de45, 0x4526f422cc340000), // 5^47 - (0xaf298d050e4395d6, 0x9670b12b7f410000), // 5^48 - (0xdaf3f04651d47b4c, 0x3c0cdd765f114000), // 5^49 - (0x88d8762bf324cd0f, 0xa5880a69fb6ac800), // 5^50 - (0xab0e93b6efee0053, 0x8eea0d047a457a00), // 5^51 - (0xd5d238a4abe98068, 0x72a4904598d6d880), // 5^52 - (0x85a36366eb71f041, 0x47a6da2b7f864750), // 5^53 - (0xa70c3c40a64e6c51, 0x999090b65f67d924), // 5^54 - (0xd0cf4b50cfe20765, 0xfff4b4e3f741cf6d), // 5^55 - (0x82818f1281ed449f, 0xbff8f10e7a8921a4), // 5^56 - (0xa321f2d7226895c7, 0xaff72d52192b6a0d), // 5^57 - (0xcbea6f8ceb02bb39, 0x9bf4f8a69f764490), // 5^58 - (0xfee50b7025c36a08, 0x2f236d04753d5b4), // 5^59 - (0x9f4f2726179a2245, 0x1d762422c946590), // 5^60 - (0xc722f0ef9d80aad6, 0x424d3ad2b7b97ef5), // 5^61 - (0xf8ebad2b84e0d58b, 0xd2e0898765a7deb2), // 5^62 - (0x9b934c3b330c8577, 0x63cc55f49f88eb2f), // 5^63 - (0xc2781f49ffcfa6d5, 0x3cbf6b71c76b25fb), // 5^64 - (0xf316271c7fc3908a, 0x8bef464e3945ef7a), // 5^65 - (0x97edd871cfda3a56, 0x97758bf0e3cbb5ac), // 5^66 - (0xbde94e8e43d0c8ec, 0x3d52eeed1cbea317), // 5^67 - (0xed63a231d4c4fb27, 0x4ca7aaa863ee4bdd), // 5^68 - (0x945e455f24fb1cf8, 0x8fe8caa93e74ef6a), // 5^69 - (0xb975d6b6ee39e436, 0xb3e2fd538e122b44), // 5^70 - (0xe7d34c64a9c85d44, 0x60dbbca87196b616), // 5^71 - (0x90e40fbeea1d3a4a, 0xbc8955e946fe31cd), // 5^72 - (0xb51d13aea4a488dd, 0x6babab6398bdbe41), // 5^73 - (0xe264589a4dcdab14, 0xc696963c7eed2dd1), // 5^74 - (0x8d7eb76070a08aec, 0xfc1e1de5cf543ca2), // 5^75 - (0xb0de65388cc8ada8, 0x3b25a55f43294bcb), // 5^76 - (0xdd15fe86affad912, 0x49ef0eb713f39ebe), // 5^77 - (0x8a2dbf142dfcc7ab, 0x6e3569326c784337), // 5^78 - (0xacb92ed9397bf996, 0x49c2c37f07965404), // 5^79 - (0xd7e77a8f87daf7fb, 0xdc33745ec97be906), // 5^80 - (0x86f0ac99b4e8dafd, 0x69a028bb3ded71a3), // 5^81 - (0xa8acd7c0222311bc, 0xc40832ea0d68ce0c), // 5^82 - (0xd2d80db02aabd62b, 0xf50a3fa490c30190), // 5^83 - (0x83c7088e1aab65db, 0x792667c6da79e0fa), // 5^84 - (0xa4b8cab1a1563f52, 0x577001b891185938), // 5^85 - (0xcde6fd5e09abcf26, 0xed4c0226b55e6f86), // 5^86 - (0x80b05e5ac60b6178, 0x544f8158315b05b4), // 5^87 - (0xa0dc75f1778e39d6, 0x696361ae3db1c721), // 5^88 - (0xc913936dd571c84c, 0x3bc3a19cd1e38e9), // 5^89 - (0xfb5878494ace3a5f, 0x4ab48a04065c723), // 5^90 - (0x9d174b2dcec0e47b, 0x62eb0d64283f9c76), // 5^91 - (0xc45d1df942711d9a, 0x3ba5d0bd324f8394), // 5^92 - (0xf5746577930d6500, 0xca8f44ec7ee36479), // 5^93 - (0x9968bf6abbe85f20, 0x7e998b13cf4e1ecb), // 5^94 - (0xbfc2ef456ae276e8, 0x9e3fedd8c321a67e), // 5^95 - (0xefb3ab16c59b14a2, 0xc5cfe94ef3ea101e), // 5^96 - (0x95d04aee3b80ece5, 0xbba1f1d158724a12), // 5^97 - (0xbb445da9ca61281f, 0x2a8a6e45ae8edc97), // 5^98 - (0xea1575143cf97226, 0xf52d09d71a3293bd), // 5^99 - (0x924d692ca61be758, 0x593c2626705f9c56), // 5^100 - (0xb6e0c377cfa2e12e, 0x6f8b2fb00c77836c), // 5^101 - (0xe498f455c38b997a, 0xb6dfb9c0f956447), // 5^102 - (0x8edf98b59a373fec, 0x4724bd4189bd5eac), // 5^103 - (0xb2977ee300c50fe7, 0x58edec91ec2cb657), // 5^104 - (0xdf3d5e9bc0f653e1, 0x2f2967b66737e3ed), // 5^105 - (0x8b865b215899f46c, 0xbd79e0d20082ee74), // 5^106 - (0xae67f1e9aec07187, 0xecd8590680a3aa11), // 5^107 - (0xda01ee641a708de9, 0xe80e6f4820cc9495), // 5^108 - (0x884134fe908658b2, 0x3109058d147fdcdd), // 5^109 - (0xaa51823e34a7eede, 0xbd4b46f0599fd415), // 5^110 - (0xd4e5e2cdc1d1ea96, 0x6c9e18ac7007c91a), // 5^111 - (0x850fadc09923329e, 0x3e2cf6bc604ddb0), // 5^112 - (0xa6539930bf6bff45, 0x84db8346b786151c), // 5^113 - (0xcfe87f7cef46ff16, 0xe612641865679a63), // 5^114 - (0x81f14fae158c5f6e, 0x4fcb7e8f3f60c07e), // 5^115 - (0xa26da3999aef7749, 0xe3be5e330f38f09d), // 5^116 - (0xcb090c8001ab551c, 0x5cadf5bfd3072cc5), // 5^117 - (0xfdcb4fa002162a63, 0x73d9732fc7c8f7f6), // 5^118 - (0x9e9f11c4014dda7e, 0x2867e7fddcdd9afa), // 5^119 - (0xc646d63501a1511d, 0xb281e1fd541501b8), // 5^120 - (0xf7d88bc24209a565, 0x1f225a7ca91a4226), // 5^121 - (0x9ae757596946075f, 0x3375788de9b06958), // 5^122 - (0xc1a12d2fc3978937, 0x52d6b1641c83ae), // 5^123 - (0xf209787bb47d6b84, 0xc0678c5dbd23a49a), // 5^124 - (0x9745eb4d50ce6332, 0xf840b7ba963646e0), // 5^125 - (0xbd176620a501fbff, 0xb650e5a93bc3d898), // 5^126 - (0xec5d3fa8ce427aff, 0xa3e51f138ab4cebe), // 5^127 - (0x93ba47c980e98cdf, 0xc66f336c36b10137), // 5^128 - (0xb8a8d9bbe123f017, 0xb80b0047445d4184), // 5^129 - (0xe6d3102ad96cec1d, 0xa60dc059157491e5), // 5^130 - (0x9043ea1ac7e41392, 0x87c89837ad68db2f), // 5^131 - (0xb454e4a179dd1877, 0x29babe4598c311fb), // 5^132 - (0xe16a1dc9d8545e94, 0xf4296dd6fef3d67a), // 5^133 - (0x8ce2529e2734bb1d, 0x1899e4a65f58660c), // 5^134 - (0xb01ae745b101e9e4, 0x5ec05dcff72e7f8f), // 5^135 - (0xdc21a1171d42645d, 0x76707543f4fa1f73), // 5^136 - (0x899504ae72497eba, 0x6a06494a791c53a8), // 5^137 - (0xabfa45da0edbde69, 0x487db9d17636892), // 5^138 - (0xd6f8d7509292d603, 0x45a9d2845d3c42b6), // 5^139 - (0x865b86925b9bc5c2, 0xb8a2392ba45a9b2), // 5^140 - (0xa7f26836f282b732, 0x8e6cac7768d7141e), // 5^141 - (0xd1ef0244af2364ff, 0x3207d795430cd926), // 5^142 - (0x8335616aed761f1f, 0x7f44e6bd49e807b8), // 5^143 - (0xa402b9c5a8d3a6e7, 0x5f16206c9c6209a6), // 5^144 - (0xcd036837130890a1, 0x36dba887c37a8c0f), // 5^145 - (0x802221226be55a64, 0xc2494954da2c9789), // 5^146 - (0xa02aa96b06deb0fd, 0xf2db9baa10b7bd6c), // 5^147 - (0xc83553c5c8965d3d, 0x6f92829494e5acc7), // 5^148 - (0xfa42a8b73abbf48c, 0xcb772339ba1f17f9), // 5^149 - (0x9c69a97284b578d7, 0xff2a760414536efb), // 5^150 - (0xc38413cf25e2d70d, 0xfef5138519684aba), // 5^151 - (0xf46518c2ef5b8cd1, 0x7eb258665fc25d69), // 5^152 - (0x98bf2f79d5993802, 0xef2f773ffbd97a61), // 5^153 - (0xbeeefb584aff8603, 0xaafb550ffacfd8fa), // 5^154 - (0xeeaaba2e5dbf6784, 0x95ba2a53f983cf38), // 5^155 - (0x952ab45cfa97a0b2, 0xdd945a747bf26183), // 5^156 - (0xba756174393d88df, 0x94f971119aeef9e4), // 5^157 - (0xe912b9d1478ceb17, 0x7a37cd5601aab85d), // 5^158 - (0x91abb422ccb812ee, 0xac62e055c10ab33a), // 5^159 - (0xb616a12b7fe617aa, 0x577b986b314d6009), // 5^160 - (0xe39c49765fdf9d94, 0xed5a7e85fda0b80b), // 5^161 - (0x8e41ade9fbebc27d, 0x14588f13be847307), // 5^162 - (0xb1d219647ae6b31c, 0x596eb2d8ae258fc8), // 5^163 - (0xde469fbd99a05fe3, 0x6fca5f8ed9aef3bb), // 5^164 - (0x8aec23d680043bee, 0x25de7bb9480d5854), // 5^165 - (0xada72ccc20054ae9, 0xaf561aa79a10ae6a), // 5^166 - (0xd910f7ff28069da4, 0x1b2ba1518094da04), // 5^167 - (0x87aa9aff79042286, 0x90fb44d2f05d0842), // 5^168 - (0xa99541bf57452b28, 0x353a1607ac744a53), // 5^169 - (0xd3fa922f2d1675f2, 0x42889b8997915ce8), // 5^170 - (0x847c9b5d7c2e09b7, 0x69956135febada11), // 5^171 - (0xa59bc234db398c25, 0x43fab9837e699095), // 5^172 - (0xcf02b2c21207ef2e, 0x94f967e45e03f4bb), // 5^173 - (0x8161afb94b44f57d, 0x1d1be0eebac278f5), // 5^174 - (0xa1ba1ba79e1632dc, 0x6462d92a69731732), // 5^175 - (0xca28a291859bbf93, 0x7d7b8f7503cfdcfe), // 5^176 - (0xfcb2cb35e702af78, 0x5cda735244c3d43e), // 5^177 - (0x9defbf01b061adab, 0x3a0888136afa64a7), // 5^178 - (0xc56baec21c7a1916, 0x88aaa1845b8fdd0), // 5^179 - (0xf6c69a72a3989f5b, 0x8aad549e57273d45), // 5^180 - (0x9a3c2087a63f6399, 0x36ac54e2f678864b), // 5^181 - (0xc0cb28a98fcf3c7f, 0x84576a1bb416a7dd), // 5^182 - (0xf0fdf2d3f3c30b9f, 0x656d44a2a11c51d5), // 5^183 - (0x969eb7c47859e743, 0x9f644ae5a4b1b325), // 5^184 - (0xbc4665b596706114, 0x873d5d9f0dde1fee), // 5^185 - (0xeb57ff22fc0c7959, 0xa90cb506d155a7ea), // 5^186 - (0x9316ff75dd87cbd8, 0x9a7f12442d588f2), // 5^187 - (0xb7dcbf5354e9bece, 0xc11ed6d538aeb2f), // 5^188 - (0xe5d3ef282a242e81, 0x8f1668c8a86da5fa), // 5^189 - (0x8fa475791a569d10, 0xf96e017d694487bc), // 5^190 - (0xb38d92d760ec4455, 0x37c981dcc395a9ac), // 5^191 - (0xe070f78d3927556a, 0x85bbe253f47b1417), // 5^192 - (0x8c469ab843b89562, 0x93956d7478ccec8e), // 5^193 - (0xaf58416654a6babb, 0x387ac8d1970027b2), // 5^194 - (0xdb2e51bfe9d0696a, 0x6997b05fcc0319e), // 5^195 - (0x88fcf317f22241e2, 0x441fece3bdf81f03), // 5^196 - (0xab3c2fddeeaad25a, 0xd527e81cad7626c3), // 5^197 - (0xd60b3bd56a5586f1, 0x8a71e223d8d3b074), // 5^198 - (0x85c7056562757456, 0xf6872d5667844e49), // 5^199 - (0xa738c6bebb12d16c, 0xb428f8ac016561db), // 5^200 - (0xd106f86e69d785c7, 0xe13336d701beba52), // 5^201 - (0x82a45b450226b39c, 0xecc0024661173473), // 5^202 - (0xa34d721642b06084, 0x27f002d7f95d0190), // 5^203 - (0xcc20ce9bd35c78a5, 0x31ec038df7b441f4), // 5^204 - (0xff290242c83396ce, 0x7e67047175a15271), // 5^205 - (0x9f79a169bd203e41, 0xf0062c6e984d386), // 5^206 - (0xc75809c42c684dd1, 0x52c07b78a3e60868), // 5^207 - (0xf92e0c3537826145, 0xa7709a56ccdf8a82), // 5^208 - (0x9bbcc7a142b17ccb, 0x88a66076400bb691), // 5^209 - (0xc2abf989935ddbfe, 0x6acff893d00ea435), // 5^210 - (0xf356f7ebf83552fe, 0x583f6b8c4124d43), // 5^211 - (0x98165af37b2153de, 0xc3727a337a8b704a), // 5^212 - (0xbe1bf1b059e9a8d6, 0x744f18c0592e4c5c), // 5^213 - (0xeda2ee1c7064130c, 0x1162def06f79df73), // 5^214 - (0x9485d4d1c63e8be7, 0x8addcb5645ac2ba8), // 5^215 - (0xb9a74a0637ce2ee1, 0x6d953e2bd7173692), // 5^216 - (0xe8111c87c5c1ba99, 0xc8fa8db6ccdd0437), // 5^217 - (0x910ab1d4db9914a0, 0x1d9c9892400a22a2), // 5^218 - (0xb54d5e4a127f59c8, 0x2503beb6d00cab4b), // 5^219 - (0xe2a0b5dc971f303a, 0x2e44ae64840fd61d), // 5^220 - (0x8da471a9de737e24, 0x5ceaecfed289e5d2), // 5^221 - (0xb10d8e1456105dad, 0x7425a83e872c5f47), // 5^222 - (0xdd50f1996b947518, 0xd12f124e28f77719), // 5^223 - (0x8a5296ffe33cc92f, 0x82bd6b70d99aaa6f), // 5^224 - (0xace73cbfdc0bfb7b, 0x636cc64d1001550b), // 5^225 - (0xd8210befd30efa5a, 0x3c47f7e05401aa4e), // 5^226 - (0x8714a775e3e95c78, 0x65acfaec34810a71), // 5^227 - (0xa8d9d1535ce3b396, 0x7f1839a741a14d0d), // 5^228 - (0xd31045a8341ca07c, 0x1ede48111209a050), // 5^229 - (0x83ea2b892091e44d, 0x934aed0aab460432), // 5^230 - (0xa4e4b66b68b65d60, 0xf81da84d5617853f), // 5^231 - (0xce1de40642e3f4b9, 0x36251260ab9d668e), // 5^232 - (0x80d2ae83e9ce78f3, 0xc1d72b7c6b426019), // 5^233 - (0xa1075a24e4421730, 0xb24cf65b8612f81f), // 5^234 - (0xc94930ae1d529cfc, 0xdee033f26797b627), // 5^235 - (0xfb9b7cd9a4a7443c, 0x169840ef017da3b1), // 5^236 - (0x9d412e0806e88aa5, 0x8e1f289560ee864e), // 5^237 - (0xc491798a08a2ad4e, 0xf1a6f2bab92a27e2), // 5^238 - (0xf5b5d7ec8acb58a2, 0xae10af696774b1db), // 5^239 - (0x9991a6f3d6bf1765, 0xacca6da1e0a8ef29), // 5^240 - (0xbff610b0cc6edd3f, 0x17fd090a58d32af3), // 5^241 - (0xeff394dcff8a948e, 0xddfc4b4cef07f5b0), // 5^242 - (0x95f83d0a1fb69cd9, 0x4abdaf101564f98e), // 5^243 - (0xbb764c4ca7a4440f, 0x9d6d1ad41abe37f1), // 5^244 - (0xea53df5fd18d5513, 0x84c86189216dc5ed), // 5^245 - (0x92746b9be2f8552c, 0x32fd3cf5b4e49bb4), // 5^246 - (0xb7118682dbb66a77, 0x3fbc8c33221dc2a1), // 5^247 - (0xe4d5e82392a40515, 0xfabaf3feaa5334a), // 5^248 - (0x8f05b1163ba6832d, 0x29cb4d87f2a7400e), // 5^249 - (0xb2c71d5bca9023f8, 0x743e20e9ef511012), // 5^250 - (0xdf78e4b2bd342cf6, 0x914da9246b255416), // 5^251 - (0x8bab8eefb6409c1a, 0x1ad089b6c2f7548e), // 5^252 - (0xae9672aba3d0c320, 0xa184ac2473b529b1), // 5^253 - (0xda3c0f568cc4f3e8, 0xc9e5d72d90a2741e), // 5^254 - (0x8865899617fb1871, 0x7e2fa67c7a658892), // 5^255 - (0xaa7eebfb9df9de8d, 0xddbb901b98feeab7), // 5^256 - (0xd51ea6fa85785631, 0x552a74227f3ea565), // 5^257 - (0x8533285c936b35de, 0xd53a88958f87275f), // 5^258 - (0xa67ff273b8460356, 0x8a892abaf368f137), // 5^259 - (0xd01fef10a657842c, 0x2d2b7569b0432d85), // 5^260 - (0x8213f56a67f6b29b, 0x9c3b29620e29fc73), // 5^261 - (0xa298f2c501f45f42, 0x8349f3ba91b47b8f), // 5^262 - (0xcb3f2f7642717713, 0x241c70a936219a73), // 5^263 - (0xfe0efb53d30dd4d7, 0xed238cd383aa0110), // 5^264 - (0x9ec95d1463e8a506, 0xf4363804324a40aa), // 5^265 - (0xc67bb4597ce2ce48, 0xb143c6053edcd0d5), // 5^266 - (0xf81aa16fdc1b81da, 0xdd94b7868e94050a), // 5^267 - (0x9b10a4e5e9913128, 0xca7cf2b4191c8326), // 5^268 - (0xc1d4ce1f63f57d72, 0xfd1c2f611f63a3f0), // 5^269 - (0xf24a01a73cf2dccf, 0xbc633b39673c8cec), // 5^270 - (0x976e41088617ca01, 0xd5be0503e085d813), // 5^271 - (0xbd49d14aa79dbc82, 0x4b2d8644d8a74e18), // 5^272 - (0xec9c459d51852ba2, 0xddf8e7d60ed1219e), // 5^273 - (0x93e1ab8252f33b45, 0xcabb90e5c942b503), // 5^274 - (0xb8da1662e7b00a17, 0x3d6a751f3b936243), // 5^275 - (0xe7109bfba19c0c9d, 0xcc512670a783ad4), // 5^276 - (0x906a617d450187e2, 0x27fb2b80668b24c5), // 5^277 - (0xb484f9dc9641e9da, 0xb1f9f660802dedf6), // 5^278 - (0xe1a63853bbd26451, 0x5e7873f8a0396973), // 5^279 - (0x8d07e33455637eb2, 0xdb0b487b6423e1e8), // 5^280 - (0xb049dc016abc5e5f, 0x91ce1a9a3d2cda62), // 5^281 - (0xdc5c5301c56b75f7, 0x7641a140cc7810fb), // 5^282 - (0x89b9b3e11b6329ba, 0xa9e904c87fcb0a9d), // 5^283 - (0xac2820d9623bf429, 0x546345fa9fbdcd44), // 5^284 - (0xd732290fbacaf133, 0xa97c177947ad4095), // 5^285 - (0x867f59a9d4bed6c0, 0x49ed8eabcccc485d), // 5^286 - (0xa81f301449ee8c70, 0x5c68f256bfff5a74), // 5^287 - (0xd226fc195c6a2f8c, 0x73832eec6fff3111), // 5^288 - (0x83585d8fd9c25db7, 0xc831fd53c5ff7eab), // 5^289 - (0xa42e74f3d032f525, 0xba3e7ca8b77f5e55), // 5^290 - (0xcd3a1230c43fb26f, 0x28ce1bd2e55f35eb), // 5^291 - (0x80444b5e7aa7cf85, 0x7980d163cf5b81b3), // 5^292 - (0xa0555e361951c366, 0xd7e105bcc332621f), // 5^293 - (0xc86ab5c39fa63440, 0x8dd9472bf3fefaa7), // 5^294 - (0xfa856334878fc150, 0xb14f98f6f0feb951), // 5^295 - (0x9c935e00d4b9d8d2, 0x6ed1bf9a569f33d3), // 5^296 - (0xc3b8358109e84f07, 0xa862f80ec4700c8), // 5^297 - (0xf4a642e14c6262c8, 0xcd27bb612758c0fa), // 5^298 - (0x98e7e9cccfbd7dbd, 0x8038d51cb897789c), // 5^299 - (0xbf21e44003acdd2c, 0xe0470a63e6bd56c3), // 5^300 - (0xeeea5d5004981478, 0x1858ccfce06cac74), // 5^301 - (0x95527a5202df0ccb, 0xf37801e0c43ebc8), // 5^302 - (0xbaa718e68396cffd, 0xd30560258f54e6ba), // 5^303 - (0xe950df20247c83fd, 0x47c6b82ef32a2069), // 5^304 - (0x91d28b7416cdd27e, 0x4cdc331d57fa5441), // 5^305 - (0xb6472e511c81471d, 0xe0133fe4adf8e952), // 5^306 - (0xe3d8f9e563a198e5, 0x58180fddd97723a6), // 5^307 - (0x8e679c2f5e44ff8f, 0x570f09eaa7ea7648), // 5^308 -]; diff --git a/vendor/minimal-lexical/src/table_small.rs b/vendor/minimal-lexical/src/table_small.rs deleted file mode 100644 index 9da69916fba041..00000000000000 --- a/vendor/minimal-lexical/src/table_small.rs +++ /dev/null @@ -1,90 +0,0 @@ -//! Pre-computed small tables for parsing decimal strings. - -#![doc(hidden)] -#![cfg(not(feature = "compact"))] - -/// Pre-computed, small powers-of-5. -pub const SMALL_INT_POW5: [u64; 28] = [ - 1, - 5, - 25, - 125, - 625, - 3125, - 15625, - 78125, - 390625, - 1953125, - 9765625, - 48828125, - 244140625, - 1220703125, - 6103515625, - 30517578125, - 152587890625, - 762939453125, - 3814697265625, - 19073486328125, - 95367431640625, - 476837158203125, - 2384185791015625, - 11920928955078125, - 59604644775390625, - 298023223876953125, - 1490116119384765625, - 7450580596923828125, -]; - -/// Pre-computed, small powers-of-10. -pub const SMALL_INT_POW10: [u64; 20] = [ - 1, - 10, - 100, - 1000, - 10000, - 100000, - 1000000, - 10000000, - 100000000, - 1000000000, - 10000000000, - 100000000000, - 1000000000000, - 10000000000000, - 100000000000000, - 1000000000000000, - 10000000000000000, - 100000000000000000, - 1000000000000000000, - 10000000000000000000, -]; - -/// Pre-computed, small powers-of-10. -pub const SMALL_F32_POW10: [f32; 16] = - [1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 0., 0., 0., 0., 0.]; - -/// Pre-computed, small powers-of-10. -pub const SMALL_F64_POW10: [f64; 32] = [ - 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, - 1e17, 1e18, 1e19, 1e20, 1e21, 1e22, 0., 0., 0., 0., 0., 0., 0., 0., 0., -]; - -/// Pre-computed large power-of-5 for 32-bit limbs. -#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] -pub const LARGE_POW5: [u32; 10] = [ - 4279965485, 329373468, 4020270615, 2137533757, 4287402176, 1057042919, 1071430142, 2440757623, - 381945767, 46164893, -]; - -/// Pre-computed large power-of-5 for 64-bit limbs. -#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] -pub const LARGE_POW5: [u64; 5] = [ - 1414648277510068013, - 9180637584431281687, - 4539964771860779200, - 10482974169319127550, - 198276706040285095, -]; - -/// Step for large power-of-5 for 32-bit limbs. -pub const LARGE_POW5_STEP: u32 = 135; diff --git a/vendor/minimal-lexical/tests/bellerophon.rs b/vendor/minimal-lexical/tests/bellerophon.rs deleted file mode 100644 index 99cd89acfc8241..00000000000000 --- a/vendor/minimal-lexical/tests/bellerophon.rs +++ /dev/null @@ -1,59 +0,0 @@ -#![cfg(feature = "compact")] -#![allow(dead_code)] - -use minimal_lexical::bellerophon::bellerophon; -use minimal_lexical::extended_float::{extended_to_float, ExtendedFloat}; -use minimal_lexical::num::Float; -use minimal_lexical::number::Number; - -pub fn bellerophon_test( - xmant: u64, - xexp: i32, - many_digits: bool, - ymant: u64, - yexp: i32, -) { - let num = Number { - exponent: xexp, - mantissa: xmant, - many_digits, - }; - let xfp = bellerophon::(&num); - let yfp = ExtendedFloat { - mant: ymant, - exp: yexp, - }; - // Given us useful error messages if the floats are valid. - if xfp.exp >= 0 && yfp.exp >= 0 { - assert!( - xfp == yfp, - "x != y, xfp={:?}, yfp={:?}, x={:?}, y={:?}", - xfp, - yfp, - extended_to_float::(xfp), - extended_to_float::(yfp) - ); - } else { - assert_eq!(xfp, yfp); - } -} - -pub fn compute_float32(q: i32, w: u64) -> (i32, u64) { - let num = Number { - exponent: q, - mantissa: w, - many_digits: false, - }; - let fp = bellerophon::(&num); - (fp.exp, fp.mant) -} - -pub fn compute_float64(q: i32, w: u64) -> (i32, u64) { - let num = Number { - exponent: q, - mantissa: w, - many_digits: false, - }; - let fp = bellerophon::(&num); - (fp.exp, fp.mant) -} diff --git a/vendor/minimal-lexical/tests/bellerophon_tests.rs b/vendor/minimal-lexical/tests/bellerophon_tests.rs deleted file mode 100644 index f5826c615d6d0b..00000000000000 --- a/vendor/minimal-lexical/tests/bellerophon_tests.rs +++ /dev/null @@ -1,231 +0,0 @@ -#![cfg(feature = "compact")] - -mod bellerophon; - -use bellerophon::{bellerophon_test, compute_float32, compute_float64}; -use minimal_lexical::num::Float; - -#[test] -fn halfway_round_down_test() { - // Halfway, round-down tests - bellerophon_test::(9007199254740992, 0, false, 0, 1076); - bellerophon_test::( - 9007199254740993, - 0, - false, - 9223372036854776832, - 1065 + f64::INVALID_FP, - ); - bellerophon_test::(9007199254740994, 0, false, 1, 1076); - - bellerophon_test::(18014398509481984, 0, false, 0, 1077); - bellerophon_test::( - 18014398509481986, - 0, - false, - 9223372036854776832, - 1066 + f64::INVALID_FP, - ); - bellerophon_test::(18014398509481988, 0, false, 1, 1077); - - bellerophon_test::(9223372036854775808, 0, false, 0, 1086); - bellerophon_test::( - 9223372036854776832, - 0, - false, - 9223372036854776832, - 1075 + f64::INVALID_FP, - ); - bellerophon_test::(9223372036854777856, 0, false, 1, 1086); - - // Add a 0 but say we're truncated. - bellerophon_test::(9007199254740992000, -3, true, 0, 1076); - bellerophon_test::( - 9007199254740993000, - -3, - true, - 9223372036854776832, - 1065 + f64::INVALID_FP, - ); - bellerophon_test::(9007199254740994000, -3, true, 1, 1076); -} - -#[test] -fn halfway_round_up_test() { - // Halfway, round-up tests - bellerophon_test::(9007199254740994, 0, false, 1, 1076); - bellerophon_test::( - 9007199254740995, - 0, - false, - 9223372036854778880, - 1065 + f64::INVALID_FP, - ); - bellerophon_test::(9007199254740996, 0, false, 2, 1076); - - bellerophon_test::(18014398509481988, 0, false, 1, 1077); - bellerophon_test::( - 18014398509481990, - 0, - false, - 9223372036854778880, - 1066 + f64::INVALID_FP, - ); - bellerophon_test::(18014398509481992, 0, false, 2, 1077); - - bellerophon_test::(9223372036854777856, 0, false, 1, 1086); - bellerophon_test::( - 9223372036854778880, - 0, - false, - 9223372036854778880, - 1075 + f64::INVALID_FP, - ); - bellerophon_test::(9223372036854779904, 0, false, 2, 1086); - - // Add a 0 but say we're truncated. - bellerophon_test::(9007199254740994000, -3, true, 1, 1076); - bellerophon_test::( - 9007199254740994990, - -3, - true, - 9223372036854778869, - 1065 + f64::INVALID_FP, - ); - bellerophon_test::( - 9007199254740995000, - -3, - true, - 9223372036854778879, - 1065 + f64::INVALID_FP, - ); - bellerophon_test::( - 9007199254740995010, - -3, - true, - 9223372036854778890, - 1065 + f64::INVALID_FP, - ); - bellerophon_test::(9007199254740995050, -3, true, 2, 1076); - bellerophon_test::(9007199254740996000, -3, true, 2, 1076); -} - -#[test] -fn extremes_test() { - // Need to check we get proper results with rounding for near-infinity - // and near-zero and/or denormal floats. - bellerophon_test::(5, -324, false, 1, 0); - bellerophon_test::(10, -324, false, 2, 0); - // This is very close to 2.4703282292062327206e-342. - bellerophon_test::( - 2470328229206232720, - -342, - false, - 18446744073709551608, - -64 + f64::INVALID_FP, - ); - bellerophon_test::(2470328229206232721, -342, false, 9223372036854775808, -32831); - bellerophon_test::(2470328229206232725, -342, false, 9223372036854775824, -32831); - bellerophon_test::(2470328229206232726, -342, false, 1, 0); - bellerophon_test::(2470328229206232730, -342, false, 1, 0); - // Check very close to literal infinity. - // 17.976931348623155 - // 1.797693134862315508561243283845062402343434371574593359244049e+308 - // 1.797693134862315708145274237317043567980705675258449965989175e+308 - bellerophon_test::(17976931348623155, 292, false, 4503599627370494, 2046); - bellerophon_test::(17976931348623156, 292, false, 4503599627370494, 2046); - bellerophon_test::(1797693134862315605, 290, false, 4503599627370494, 2046); - bellerophon_test::(1797693134862315607, 290, false, 4503599627370494, 2046); - bellerophon_test::(1797693134862315608, 290, false, 18446744073709548540, -30733); - bellerophon_test::(1797693134862315609, 290, false, 18446744073709548550, -30733); - bellerophon_test::(179769313486231561, 291, false, 4503599627370495, 2046); - bellerophon_test::(17976931348623157, 292, false, 4503599627370495, 2046); - - // Check existing issues and underflow. - bellerophon_test::(2470328229206232726, -343, false, 0, 0); - bellerophon_test::(2470328229206232726, -342, false, 1, 0); - bellerophon_test::(1, -250, false, 1945308223406668, 192); - bellerophon_test::(1, -150, false, 2867420733609077, 524); - bellerophon_test::(1, -45, false, 1924152549665465, 873); - bellerophon_test::(1, -40, false, 400386103400348, 890); - bellerophon_test::(1, -20, false, 2142540351554083, 956); - bellerophon_test::(1, 0, false, 0, 1023); - bellerophon_test::(1, 20, false, 1599915997629504, 1089); - bellerophon_test::(1, 40, false, 3768206498159781, 1155); - bellerophon_test::(1, 150, false, 999684479948463, 1521); - bellerophon_test::(1, 250, false, 1786584717939204, 1853); - // Minimum positive normal float. - bellerophon_test::(22250738585072014, -324, false, 0, 1); - // Maximum positive subnormal float. - bellerophon_test::(2225073858507201, -323, false, 4503599627370495, 0); - // Next highest subnormal float. - bellerophon_test::(22250738585072004, -324, false, 4503599627370494, 0); - bellerophon_test::(22250738585072006, -324, false, 4503599627370494, 0); - bellerophon_test::(22250738585072007, -324, false, 4503599627370495, 0); - bellerophon_test::(222507385850720062, -325, false, 4503599627370494, 0); - bellerophon_test::(222507385850720063, -325, false, 4503599627370494, 0); - bellerophon_test::(222507385850720064, -325, false, 4503599627370494, 0); - bellerophon_test::(2225073858507200641, -326, false, 18446744073709545462, -32779); - bellerophon_test::(2225073858507200642, -326, false, 18446744073709545472, -32779); - bellerophon_test::(222507385850720065, -325, false, 4503599627370495, 0); -} - -#[test] -fn compute_float_f32_test() { - // These test near-halfway cases for single-precision floats. - assert_eq!(compute_float32(0, 16777216), (151, 0)); - assert_eq!(compute_float32(0, 16777217), (111 + f32::INVALID_FP, 9223372586610589696)); - assert_eq!(compute_float32(0, 16777218), (151, 1)); - assert_eq!(compute_float32(0, 16777219), (111 + f32::INVALID_FP, 9223373686122217472)); - assert_eq!(compute_float32(0, 16777220), (151, 2)); - - // These are examples of the above tests, with - // digits from the exponent shifted to the mantissa. - assert_eq!(compute_float32(-10, 167772160000000000), (151, 0)); - assert_eq!( - compute_float32(-10, 167772170000000000), - (111 + f32::INVALID_FP, 9223372586610589696) - ); - assert_eq!(compute_float32(-10, 167772180000000000), (151, 1)); - // Let's check the lines to see if anything is different in table... - assert_eq!( - compute_float32(-10, 167772190000000000), - (111 + f32::INVALID_FP, 9223373686122217472) - ); - assert_eq!(compute_float32(-10, 167772200000000000), (151, 2)); -} - -#[test] -fn compute_float_f64_test() { - // These test near-halfway cases for double-precision floats. - assert_eq!(compute_float64(0, 9007199254740992), (1076, 0)); - assert_eq!(compute_float64(0, 9007199254740993), (1065 + f64::INVALID_FP, 9223372036854776832)); - assert_eq!(compute_float64(0, 9007199254740994), (1076, 1)); - assert_eq!(compute_float64(0, 9007199254740995), (1065 + f64::INVALID_FP, 9223372036854778880)); - assert_eq!(compute_float64(0, 9007199254740996), (1076, 2)); - assert_eq!(compute_float64(0, 18014398509481984), (1077, 0)); - assert_eq!( - compute_float64(0, 18014398509481986), - (1066 + f64::INVALID_FP, 9223372036854776832) - ); - assert_eq!(compute_float64(0, 18014398509481988), (1077, 1)); - assert_eq!( - compute_float64(0, 18014398509481990), - (1066 + f64::INVALID_FP, 9223372036854778880) - ); - assert_eq!(compute_float64(0, 18014398509481992), (1077, 2)); - - // These are examples of the above tests, with - // digits from the exponent shifted to the mantissa. - assert_eq!(compute_float64(-3, 9007199254740992000), (1076, 0)); - assert_eq!( - compute_float64(-3, 9007199254740993000), - (1065 + f64::INVALID_FP, 9223372036854776832) - ); - assert_eq!(compute_float64(-3, 9007199254740994000), (1076, 1)); - assert_eq!( - compute_float64(-3, 9007199254740995000), - (1065 + f64::INVALID_FP, 9223372036854778879) - ); - assert_eq!(compute_float64(-3, 9007199254740996000), (1076, 2)); -} diff --git a/vendor/minimal-lexical/tests/integration_tests.rs b/vendor/minimal-lexical/tests/integration_tests.rs deleted file mode 100644 index a8f2ff8a0ec63b..00000000000000 --- a/vendor/minimal-lexical/tests/integration_tests.rs +++ /dev/null @@ -1,228 +0,0 @@ -/// Find and parse sign and get remaining bytes. -#[inline] -fn parse_sign<'a>(bytes: &'a [u8]) -> (bool, &'a [u8]) { - match bytes.get(0) { - Some(&b'+') => (true, &bytes[1..]), - Some(&b'-') => (false, &bytes[1..]), - _ => (true, bytes), - } -} - -// Convert u8 to digit. -#[inline] -fn to_digit(c: u8) -> Option { - (c as char).to_digit(10) -} - -// Add digit from exponent. -#[inline] -fn add_digit_i32(value: i32, digit: u32) -> Option { - return value.checked_mul(10)?.checked_add(digit as i32); -} - -// Subtract digit from exponent. -#[inline] -fn sub_digit_i32(value: i32, digit: u32) -> Option { - return value.checked_mul(10)?.checked_sub(digit as i32); -} - -// Convert character to digit. -#[inline] -fn is_digit(c: u8) -> bool { - to_digit(c).is_some() -} - -// Split buffer at index. -#[inline] -fn split_at_index<'a>(digits: &'a [u8], index: usize) -> (&'a [u8], &'a [u8]) { - (&digits[..index], &digits[index..]) -} - -/// Consume until a an invalid digit is found. -/// -/// - `digits` - Slice containing 0 or more digits. -#[inline] -fn consume_digits<'a>(digits: &'a [u8]) -> (&'a [u8], &'a [u8]) { - // Consume all digits. - let mut index = 0; - while index < digits.len() && is_digit(digits[index]) { - index += 1; - } - split_at_index(digits, index) -} - -// Trim leading 0s. -#[inline] -fn ltrim_zero<'a>(bytes: &'a [u8]) -> &'a [u8] { - let count = bytes.iter().take_while(|&&si| si == b'0').count(); - &bytes[count..] -} - -// Trim trailing 0s. -#[inline] -fn rtrim_zero<'a>(bytes: &'a [u8]) -> &'a [u8] { - let count = bytes.iter().rev().take_while(|&&si| si == b'0').count(); - let index = bytes.len() - count; - &bytes[..index] -} - -// PARSERS -// ------- - -/// Parse the exponent of the float. -/// -/// * `exponent` - Slice containing the exponent digits. -/// * `is_positive` - If the exponent sign is positive. -fn parse_exponent(exponent: &[u8], is_positive: bool) -> i32 { - // Parse the sign bit or current data. - let mut value: i32 = 0; - match is_positive { - true => { - for c in exponent { - value = match add_digit_i32(value, to_digit(*c).unwrap()) { - Some(v) => v, - None => return i32::max_value(), - }; - } - }, - false => { - for c in exponent { - value = match sub_digit_i32(value, to_digit(*c).unwrap()) { - Some(v) => v, - None => return i32::min_value(), - }; - } - }, - } - - value -} - -pub fn case_insensitive_starts_with<'a, 'b, Iter1, Iter2>(mut x: Iter1, mut y: Iter2) -> bool -where - Iter1: Iterator, - Iter2: Iterator, -{ - // We use a faster optimization here for ASCII letters, which NaN - // and infinite strings **must** be. [A-Z] is 0x41-0x5A, while - // [a-z] is 0x61-0x7A. Therefore, the xor must be 0 or 32 if they - // are case-insensitive equal, but only if at least 1 of the inputs - // is an ASCII letter. - loop { - let yi = y.next(); - if yi.is_none() { - return true; - } - let yi = *yi.unwrap(); - let is_not_equal = x.next().map_or(true, |&xi| { - let xor = xi ^ yi; - xor != 0 && xor != 0x20 - }); - if is_not_equal { - return false; - } - } -} - -/// Parse float from input bytes, returning the float and the remaining bytes. -/// -/// * `bytes` - Array of bytes leading with float-data. -pub fn parse_float<'a, F>(bytes: &'a [u8]) -> (F, &'a [u8]) -where - F: minimal_lexical::Float, -{ - let start = bytes; - - // Parse the sign. - let (is_positive, bytes) = parse_sign(bytes); - - // Check NaN, Inf, Infinity - if case_insensitive_starts_with(bytes.iter(), b"NaN".iter()) { - let mut float = F::from_bits(F::EXPONENT_MASK | (F::HIDDEN_BIT_MASK >> 1)); - if !is_positive { - float = -float; - } - return (float, &bytes[3..]); - } else if case_insensitive_starts_with(bytes.iter(), b"Infinity".iter()) { - let mut float = F::from_bits(F::EXPONENT_MASK); - if !is_positive { - float = -float; - } - return (float, &bytes[8..]); - } else if case_insensitive_starts_with(bytes.iter(), b"inf".iter()) { - let mut float = F::from_bits(F::EXPONENT_MASK); - if !is_positive { - float = -float; - } - return (float, &bytes[3..]); - } - - // Extract and parse the float components: - // 1. Integer - // 2. Fraction - // 3. Exponent - let (integer_slc, bytes) = consume_digits(bytes); - let (fraction_slc, bytes) = match bytes.first() { - Some(&b'.') => consume_digits(&bytes[1..]), - _ => (&bytes[..0], bytes), - }; - let (exponent, bytes) = match bytes.first() { - Some(&b'e') | Some(&b'E') => { - // Extract and parse the exponent. - let (is_positive, bytes) = parse_sign(&bytes[1..]); - let (exponent, bytes) = consume_digits(bytes); - (parse_exponent(exponent, is_positive), bytes) - }, - _ => (0, bytes), - }; - - if bytes.len() == start.len() { - return (F::from_u64(0), bytes); - } - - // Note: You may want to check and validate the float data here: - // 1). Many floats require integer or fraction digits, if a fraction - // is present. - // 2). All floats require either integer or fraction digits. - // 3). Some floats do not allow a '+' sign before the significant digits. - // 4). Many floats require exponent digits after the exponent symbol. - // 5). Some floats do not allow a '+' sign before the exponent. - - // We now need to trim leading and trailing 0s from the integer - // and fraction, respectively. This is required to make the - // fast and moderate paths more efficient, and for the slow - // path. - let integer_slc = ltrim_zero(integer_slc); - let fraction_slc = rtrim_zero(fraction_slc); - - // Create the float and return our data. - let mut float: F = - minimal_lexical::parse_float(integer_slc.iter(), fraction_slc.iter(), exponent); - if !is_positive { - float = -float; - } - - (float, bytes) -} - -macro_rules! b { - ($x:literal) => { - $x.as_bytes() - }; -} - -#[test] -fn f32_test() { - assert_eq!( - (184467440000000000000.0, b!("\x00\x00006")), - parse_float::(b"000184467440737095516150\x00\x00006") - ); -} - -#[test] -fn f64_test() { - assert_eq!( - (184467440737095500000.0, b!("\x00\x00006")), - parse_float::(b"000184467440737095516150\x00\x00006") - ); -} diff --git a/vendor/minimal-lexical/tests/lemire_tests.rs b/vendor/minimal-lexical/tests/lemire_tests.rs deleted file mode 100644 index 0523ca5b2abfc6..00000000000000 --- a/vendor/minimal-lexical/tests/lemire_tests.rs +++ /dev/null @@ -1,378 +0,0 @@ -//! These tests are adapted from the Rust core library's unittests. - -#![cfg(not(feature = "compact"))] - -use minimal_lexical::lemire; -use minimal_lexical::num::Float; - -fn compute_error32(q: i32, w: u64) -> (i32, u64) { - let fp = lemire::compute_error::(q, w); - (fp.exp, fp.mant) -} - -fn compute_error64(q: i32, w: u64) -> (i32, u64) { - let fp = lemire::compute_error::(q, w); - (fp.exp, fp.mant) -} - -fn compute_error_scaled32(q: i32, w: u64, lz: i32) -> (i32, u64) { - let fp = lemire::compute_error_scaled::(q, w, lz); - (fp.exp, fp.mant) -} - -fn compute_error_scaled64(q: i32, w: u64, lz: i32) -> (i32, u64) { - let fp = lemire::compute_error_scaled::(q, w, lz); - (fp.exp, fp.mant) -} - -fn compute_float32(q: i32, w: u64) -> (i32, u64) { - let fp = lemire::compute_float::(q, w); - (fp.exp, fp.mant) -} - -fn compute_float64(q: i32, w: u64) -> (i32, u64) { - let fp = lemire::compute_float::(q, w); - (fp.exp, fp.mant) -} - -#[test] -fn compute_error32_test() { - // These test near-halfway cases for single-precision floats. - assert_eq!(compute_error32(0, 16777216), (111 + f32::INVALID_FP, 9223372036854775808)); - assert_eq!(compute_error32(0, 16777217), (111 + f32::INVALID_FP, 9223372586610589696)); - assert_eq!(compute_error32(0, 16777218), (111 + f32::INVALID_FP, 9223373136366403584)); - assert_eq!(compute_error32(0, 16777219), (111 + f32::INVALID_FP, 9223373686122217472)); - assert_eq!(compute_error32(0, 16777220), (111 + f32::INVALID_FP, 9223374235878031360)); - - // These are examples of the above tests, with - // digits from the exponent shifted to the mantissa. - assert_eq!( - compute_error32(-10, 167772160000000000), - (111 + f32::INVALID_FP, 9223372036854775808) - ); - assert_eq!( - compute_error32(-10, 167772170000000000), - (111 + f32::INVALID_FP, 9223372586610589696) - ); - assert_eq!( - compute_error32(-10, 167772180000000000), - (111 + f32::INVALID_FP, 9223373136366403584) - ); - // Let's check the lines to see if anything is different in table... - assert_eq!( - compute_error32(-10, 167772190000000000), - (111 + f32::INVALID_FP, 9223373686122217472) - ); - assert_eq!( - compute_error32(-10, 167772200000000000), - (111 + f32::INVALID_FP, 9223374235878031360) - ); -} - -#[test] -fn compute_error64_test() { - // These test near-halfway cases for double-precision floats. - assert_eq!(compute_error64(0, 9007199254740992), (1065 + f64::INVALID_FP, 9223372036854775808)); - assert_eq!(compute_error64(0, 9007199254740993), (1065 + f64::INVALID_FP, 9223372036854776832)); - assert_eq!(compute_error64(0, 9007199254740994), (1065 + f64::INVALID_FP, 9223372036854777856)); - assert_eq!(compute_error64(0, 9007199254740995), (1065 + f64::INVALID_FP, 9223372036854778880)); - assert_eq!(compute_error64(0, 9007199254740996), (1065 + f64::INVALID_FP, 9223372036854779904)); - assert_eq!( - compute_error64(0, 18014398509481984), - (1066 + f64::INVALID_FP, 9223372036854775808) - ); - assert_eq!( - compute_error64(0, 18014398509481986), - (1066 + f64::INVALID_FP, 9223372036854776832) - ); - assert_eq!( - compute_error64(0, 18014398509481988), - (1066 + f64::INVALID_FP, 9223372036854777856) - ); - assert_eq!( - compute_error64(0, 18014398509481990), - (1066 + f64::INVALID_FP, 9223372036854778880) - ); - assert_eq!( - compute_error64(0, 18014398509481992), - (1066 + f64::INVALID_FP, 9223372036854779904) - ); - - // Test a much closer set of examples. - assert_eq!( - compute_error64(0, 9007199254740991), - (1064 + f64::INVALID_FP, 18446744073709549568) - ); - assert_eq!( - compute_error64(0, 9223372036854776831), - (1075 + f64::INVALID_FP, 9223372036854776830) - ); - assert_eq!( - compute_error64(0, 9223372036854776832), - (1075 + f64::INVALID_FP, 9223372036854776832) - ); - assert_eq!( - compute_error64(0, 9223372036854776833), - (1075 + f64::INVALID_FP, 9223372036854776832) - ); - assert_eq!( - compute_error64(-42, 9123456727292927), - (925 + f64::INVALID_FP, 13021432563531497894) - ); - assert_eq!( - compute_error64(-43, 91234567272929275), - (925 + f64::INVALID_FP, 13021432563531498606) - ); - assert_eq!( - compute_error64(-42, 9123456727292928), - (925 + f64::INVALID_FP, 13021432563531499320) - ); - - // These are examples of the above tests, with - // digits from the exponent shifted to the mantissa. - assert_eq!( - compute_error64(-3, 9007199254740992000), - (1065 + f64::INVALID_FP, 9223372036854775808) - ); - assert_eq!( - compute_error64(-3, 9007199254740993000), - (1065 + f64::INVALID_FP, 9223372036854776832) - ); - assert_eq!( - compute_error64(-3, 9007199254740994000), - (1065 + f64::INVALID_FP, 9223372036854777856) - ); - assert_eq!( - compute_error64(-3, 9007199254740995000), - (1065 + f64::INVALID_FP, 9223372036854778880) - ); - assert_eq!( - compute_error64(-3, 9007199254740996000), - (1065 + f64::INVALID_FP, 9223372036854779904) - ); - - // Test from errors in atof. - assert_eq!( - compute_error64(-18, 1000000178813934326), - (1012 + f64::INVALID_FP, 9223373686122217470) - ); - - // Check edge-cases from previous errors. - assert_eq!( - compute_error64(-342, 2470328229206232720), - (-64 + f64::INVALID_FP, 18446744073709551608) - ); -} - -#[test] -fn compute_error_scaled32_test() { - // These are the same examples above, just using pre-computed scaled values. - - // These test near-halfway cases for single-precision floats. - assert_eq!( - compute_error_scaled32(0, 4611686018427387904, 39), - (111 + f32::INVALID_FP, 9223372036854775808) - ); - assert_eq!( - compute_error_scaled32(0, 4611686293305294848, 39), - (111 + f32::INVALID_FP, 9223372586610589696) - ); - assert_eq!( - compute_error_scaled32(0, 4611686568183201792, 39), - (111 + f32::INVALID_FP, 9223373136366403584) - ); - assert_eq!( - compute_error_scaled32(0, 4611686843061108736, 39), - (111 + f32::INVALID_FP, 9223373686122217472) - ); - assert_eq!( - compute_error_scaled32(0, 4611687117939015680, 39), - (111 + f32::INVALID_FP, 9223374235878031360) - ); - - assert_eq!( - compute_error_scaled32(-10, 9223372036854775808, 6), - (111 + f32::INVALID_FP, 9223372036854775808) - ); - assert_eq!( - compute_error_scaled32(-10, 9223372586610589696, 6), - (111 + f32::INVALID_FP, 9223372586610589696) - ); - assert_eq!( - compute_error_scaled32(-10, 9223373136366403584, 6), - (111 + f32::INVALID_FP, 9223373136366403584) - ); - assert_eq!( - compute_error_scaled32(-10, 9223373686122217472, 6), - (111 + f32::INVALID_FP, 9223373686122217472) - ); - assert_eq!( - compute_error_scaled32(-10, 9223374235878031360, 6), - (111 + f32::INVALID_FP, 9223374235878031360) - ); -} - -#[test] -fn compute_error_scaled64_test() { - // These are the same examples above, just using pre-computed scaled values. - - // These test near-halfway cases for double-precision floats. - assert_eq!( - compute_error_scaled64(0, 4611686018427387904, 10), - (1065 + f64::INVALID_FP, 9223372036854775808) - ); - assert_eq!( - compute_error_scaled64(0, 4611686018427388416, 10), - (1065 + f64::INVALID_FP, 9223372036854776832) - ); - assert_eq!( - compute_error_scaled64(0, 4611686018427388928, 10), - (1065 + f64::INVALID_FP, 9223372036854777856) - ); - assert_eq!( - compute_error_scaled64(0, 4611686018427389440, 10), - (1065 + f64::INVALID_FP, 9223372036854778880) - ); - assert_eq!( - compute_error_scaled64(0, 4611686018427389952, 10), - (1065 + f64::INVALID_FP, 9223372036854779904) - ); - assert_eq!( - compute_error_scaled64(0, 4611686018427387904, 9), - (1066 + f64::INVALID_FP, 9223372036854775808) - ); - assert_eq!( - compute_error_scaled64(0, 4611686018427388416, 9), - (1066 + f64::INVALID_FP, 9223372036854776832) - ); - assert_eq!( - compute_error_scaled64(0, 4611686018427388928, 9), - (1066 + f64::INVALID_FP, 9223372036854777856) - ); - assert_eq!( - compute_error_scaled64(0, 4611686018427389440, 9), - (1066 + f64::INVALID_FP, 9223372036854778880) - ); - assert_eq!( - compute_error_scaled64(0, 4611686018427389952, 9), - (1066 + f64::INVALID_FP, 9223372036854779904) - ); - - // Test a much closer set of examples. - assert_eq!( - compute_error_scaled64(0, 9223372036854774784, 11), - (1064 + f64::INVALID_FP, 18446744073709549568) - ); - assert_eq!( - compute_error_scaled64(0, 4611686018427388415, 0), - (1075 + f64::INVALID_FP, 9223372036854776830) - ); - assert_eq!( - compute_error_scaled64(0, 4611686018427388416, 0), - (1075 + f64::INVALID_FP, 9223372036854776832) - ); - assert_eq!( - compute_error_scaled64(0, 4611686018427388416, 0), - (1075 + f64::INVALID_FP, 9223372036854776832) - ); - assert_eq!( - compute_error_scaled64(-42, 6510716281765748947, 10), - (925 + f64::INVALID_FP, 13021432563531497894) - ); - assert_eq!( - compute_error_scaled64(-43, 6510716281765749303, 7), - (925 + f64::INVALID_FP, 13021432563531498606) - ); - assert_eq!( - compute_error_scaled64(-42, 6510716281765749660, 10), - (925 + f64::INVALID_FP, 13021432563531499320) - ); - - // These are examples of the above tests, with - // digits from the exponent shifted to the mantissa. - assert_eq!( - compute_error_scaled64(-3, 9223372036854775808, 1), - (1065 + f64::INVALID_FP, 9223372036854775808) - ); - assert_eq!( - compute_error_scaled64(-3, 9223372036854776832, 1), - (1065 + f64::INVALID_FP, 9223372036854776832) - ); - assert_eq!( - compute_error_scaled64(-3, 9223372036854777856, 1), - (1065 + f64::INVALID_FP, 9223372036854777856) - ); - assert_eq!( - compute_error_scaled64(-3, 9223372036854778880, 1), - (1065 + f64::INVALID_FP, 9223372036854778880) - ); - assert_eq!( - compute_error_scaled64(-3, 9223372036854779904, 1), - (1065 + f64::INVALID_FP, 9223372036854779904) - ); - - // Test from errors in atof. - assert_eq!( - compute_error_scaled64(-18, 9223373686122217470, 4), - (1012 + f64::INVALID_FP, 9223373686122217470) - ); - - // Check edge-cases from previous errors. - assert_eq!( - compute_error_scaled64(-342, 9223372036854775804, 2), - (-64 + f64::INVALID_FP, 18446744073709551608) - ); -} - -#[test] -fn compute_float_f32_rounding() { - // These test near-halfway cases for single-precision floats. - assert_eq!(compute_float32(0, 16777216), (151, 0)); - assert_eq!(compute_float32(0, 16777217), (151, 0)); - assert_eq!(compute_float32(0, 16777218), (151, 1)); - assert_eq!(compute_float32(0, 16777219), (151, 2)); - assert_eq!(compute_float32(0, 16777220), (151, 2)); - - // These are examples of the above tests, with - // digits from the exponent shifted to the mantissa. - assert_eq!(compute_float32(-10, 167772160000000000), (151, 0)); - assert_eq!(compute_float32(-10, 167772170000000000), (151, 0)); - assert_eq!(compute_float32(-10, 167772180000000000), (151, 1)); - // Let's check the lines to see if anything is different in table... - assert_eq!(compute_float32(-10, 167772190000000000), (151, 2)); - assert_eq!(compute_float32(-10, 167772200000000000), (151, 2)); -} - -#[test] -fn compute_float_f64_rounding() { - // Also need to check halfway cases **inside** that exponent range. - - // These test near-halfway cases for double-precision floats. - assert_eq!(compute_float64(0, 9007199254740992), (1076, 0)); - assert_eq!(compute_float64(0, 9007199254740993), (1076, 0)); - assert_eq!(compute_float64(0, 9007199254740994), (1076, 1)); - assert_eq!(compute_float64(0, 9007199254740995), (1076, 2)); - assert_eq!(compute_float64(0, 9007199254740996), (1076, 2)); - assert_eq!(compute_float64(0, 18014398509481984), (1077, 0)); - assert_eq!(compute_float64(0, 18014398509481986), (1077, 0)); - assert_eq!(compute_float64(0, 18014398509481988), (1077, 1)); - assert_eq!(compute_float64(0, 18014398509481990), (1077, 2)); - assert_eq!(compute_float64(0, 18014398509481992), (1077, 2)); - - // Test a much closer set of examples. - assert_eq!(compute_float64(0, 9007199254740991), (1075, 4503599627370495)); - assert_eq!(compute_float64(0, 9223372036854776831), (1086, 0)); - assert_eq!(compute_float64(0, 9223372036854776832), (1086, 0)); - assert_eq!(compute_float64(0, 9223372036854776833), (1086, 1)); - assert_eq!(compute_float64(-42, 9123456727292927), (936, 1854521741541368)); - assert_eq!(compute_float64(-43, 91234567272929275), (936, 1854521741541369)); - assert_eq!(compute_float64(-42, 9123456727292928), (936, 1854521741541369)); - - // These are examples of the above tests, with - // digits from the exponent shifted to the mantissa. - assert_eq!(compute_float64(-3, 9007199254740992000), (1076, 0)); - assert_eq!(compute_float64(-3, 9007199254740993000), (1076, 0)); - assert_eq!(compute_float64(-3, 9007199254740994000), (1076, 1)); - assert_eq!(compute_float64(-3, 9007199254740995000), (1076, 2)); - assert_eq!(compute_float64(-3, 9007199254740996000), (1076, 2)); -} diff --git a/vendor/minimal-lexical/tests/libm_tests.rs b/vendor/minimal-lexical/tests/libm_tests.rs deleted file mode 100644 index 7f5352e1938234..00000000000000 --- a/vendor/minimal-lexical/tests/libm_tests.rs +++ /dev/null @@ -1,289 +0,0 @@ -#![cfg(all(not(feature = "std"), feature = "compact"))] - -// These are adapted from libm, a port of musl libc's libm to Rust. -// libm can be found online [here](https://github.com/rust-lang/libm), -// and is similarly licensed under an Apache2.0/MIT license - -use core::f64; -use minimal_lexical::libm; - -#[test] -fn fabsf_sanity_test() { - assert_eq!(libm::fabsf(-1.0), 1.0); - assert_eq!(libm::fabsf(2.8), 2.8); -} - -/// The spec: https://en.cppreference.com/w/cpp/numeric/math/fabs -#[test] -fn fabsf_spec_test() { - assert!(libm::fabsf(f32::NAN).is_nan()); - for f in [0.0, -0.0].iter().copied() { - assert_eq!(libm::fabsf(f), 0.0); - } - for f in [f32::INFINITY, f32::NEG_INFINITY].iter().copied() { - assert_eq!(libm::fabsf(f), f32::INFINITY); - } -} - -#[test] -fn sqrtf_sanity_test() { - assert_eq!(libm::sqrtf(100.0), 10.0); - assert_eq!(libm::sqrtf(4.0), 2.0); -} - -/// The spec: https://en.cppreference.com/w/cpp/numeric/math/sqrt -#[test] -fn sqrtf_spec_test() { - // Not Asserted: FE_INVALID exception is raised if argument is negative. - assert!(libm::sqrtf(-1.0).is_nan()); - assert!(libm::sqrtf(f32::NAN).is_nan()); - for f in [0.0, -0.0, f32::INFINITY].iter().copied() { - assert_eq!(libm::sqrtf(f), f); - } -} - -const POS_ZERO: &[f64] = &[0.0]; -const NEG_ZERO: &[f64] = &[-0.0]; -const POS_ONE: &[f64] = &[1.0]; -const NEG_ONE: &[f64] = &[-1.0]; -const POS_FLOATS: &[f64] = &[99.0 / 70.0, f64::consts::E, f64::consts::PI]; -const NEG_FLOATS: &[f64] = &[-99.0 / 70.0, -f64::consts::E, -f64::consts::PI]; -const POS_SMALL_FLOATS: &[f64] = &[(1.0 / 2.0), f64::MIN_POSITIVE, f64::EPSILON]; -const NEG_SMALL_FLOATS: &[f64] = &[-(1.0 / 2.0), -f64::MIN_POSITIVE, -f64::EPSILON]; -const POS_EVENS: &[f64] = &[2.0, 6.0, 8.0, 10.0, 22.0, 100.0, f64::MAX]; -const NEG_EVENS: &[f64] = &[f64::MIN, -100.0, -22.0, -10.0, -8.0, -6.0, -2.0]; -const POS_ODDS: &[f64] = &[3.0, 7.0]; -const NEG_ODDS: &[f64] = &[-7.0, -3.0]; -const NANS: &[f64] = &[f64::NAN]; -const POS_INF: &[f64] = &[f64::INFINITY]; -const NEG_INF: &[f64] = &[f64::NEG_INFINITY]; - -const ALL: &[&[f64]] = &[ - POS_ZERO, - NEG_ZERO, - NANS, - NEG_SMALL_FLOATS, - POS_SMALL_FLOATS, - NEG_FLOATS, - POS_FLOATS, - NEG_EVENS, - POS_EVENS, - NEG_ODDS, - POS_ODDS, - NEG_INF, - POS_INF, - NEG_ONE, - POS_ONE, -]; -const POS: &[&[f64]] = &[POS_ZERO, POS_ODDS, POS_ONE, POS_FLOATS, POS_EVENS, POS_INF]; -const NEG: &[&[f64]] = &[NEG_ZERO, NEG_ODDS, NEG_ONE, NEG_FLOATS, NEG_EVENS, NEG_INF]; - -fn powd(base: f64, exponent: f64, expected: f64) { - let res = libm::powd(base, exponent); - assert!( - if expected.is_nan() { - res.is_nan() - } else { - libm::powd(base, exponent) == expected - }, - "{} ** {} was {} instead of {}", - base, - exponent, - res, - expected - ); -} - -fn powd_test_sets_as_base(sets: &[&[f64]], exponent: f64, expected: f64) { - sets.iter().for_each(|s| s.iter().for_each(|val| powd(*val, exponent, expected))); -} - -fn powd_test_sets_as_exponent(base: f64, sets: &[&[f64]], expected: f64) { - sets.iter().for_each(|s| s.iter().for_each(|val| powd(base, *val, expected))); -} - -fn powd_test_sets(sets: &[&[f64]], computed: &dyn Fn(f64) -> f64, expected: &dyn Fn(f64) -> f64) { - sets.iter().for_each(|s| { - s.iter().for_each(|val| { - let exp = expected(*val); - let res = computed(*val); - - assert!( - if exp.is_nan() { - res.is_nan() - } else { - exp == res - }, - "test for {} was {} instead of {}", - val, - res, - exp - ); - }) - }); -} - -#[test] -fn powd_zero_as_exponent() { - powd_test_sets_as_base(ALL, 0.0, 1.0); - powd_test_sets_as_base(ALL, -0.0, 1.0); -} - -#[test] -fn powd_one_as_base() { - powd_test_sets_as_exponent(1.0, ALL, 1.0); -} - -#[test] -fn powd_nan_inputs() { - // NAN as the base: - // (NAN ^ anything *but 0* should be NAN) - powd_test_sets_as_exponent(f64::NAN, &ALL[2..], f64::NAN); - - // NAN as the exponent: - // (anything *but 1* ^ NAN should be NAN) - powd_test_sets_as_base(&ALL[..(ALL.len() - 2)], f64::NAN, f64::NAN); -} - -#[test] -fn powd_infinity_as_base() { - // Positive Infinity as the base: - // (+Infinity ^ positive anything but 0 and NAN should be +Infinity) - powd_test_sets_as_exponent(f64::INFINITY, &POS[1..], f64::INFINITY); - - // (+Infinity ^ negative anything except 0 and NAN should be 0.0) - powd_test_sets_as_exponent(f64::INFINITY, &NEG[1..], 0.0); - - // Negative Infinity as the base: - // (-Infinity ^ positive odd ints should be -Infinity) - powd_test_sets_as_exponent(f64::NEG_INFINITY, &[POS_ODDS], f64::NEG_INFINITY); - - // (-Infinity ^ anything but odd ints should be == -0 ^ (-anything)) - // We can lump in pos/neg odd ints here because they don't seem to - // cause panics (div by zero) in release mode (I think). - powd_test_sets(ALL, &|v: f64| libm::powd(f64::NEG_INFINITY, v), &|v: f64| libm::powd(-0.0, -v)); -} - -#[test] -fn infinity_as_exponent() { - // Positive/Negative base greater than 1: - // (pos/neg > 1 ^ Infinity should be Infinity - note this excludes NAN as the base) - powd_test_sets_as_base(&ALL[5..(ALL.len() - 2)], f64::INFINITY, f64::INFINITY); - - // (pos/neg > 1 ^ -Infinity should be 0.0) - powd_test_sets_as_base(&ALL[5..ALL.len() - 2], f64::NEG_INFINITY, 0.0); - - // Positive/Negative base less than 1: - let base_below_one = &[POS_ZERO, NEG_ZERO, NEG_SMALL_FLOATS, POS_SMALL_FLOATS]; - - // (pos/neg < 1 ^ Infinity should be 0.0 - this also excludes NAN as the base) - powd_test_sets_as_base(base_below_one, f64::INFINITY, 0.0); - - // (pos/neg < 1 ^ -Infinity should be Infinity) - powd_test_sets_as_base(base_below_one, f64::NEG_INFINITY, f64::INFINITY); - - // Positive/Negative 1 as the base: - // (pos/neg 1 ^ Infinity should be 1) - powd_test_sets_as_base(&[NEG_ONE, POS_ONE], f64::INFINITY, 1.0); - - // (pos/neg 1 ^ -Infinity should be 1) - powd_test_sets_as_base(&[NEG_ONE, POS_ONE], f64::NEG_INFINITY, 1.0); -} - -#[test] -fn powd_zero_as_base() { - // Positive Zero as the base: - // (+0 ^ anything positive but 0 and NAN should be +0) - powd_test_sets_as_exponent(0.0, &POS[1..], 0.0); - - // (+0 ^ anything negative but 0 and NAN should be Infinity) - // (this should panic because we're dividing by zero) - powd_test_sets_as_exponent(0.0, &NEG[1..], f64::INFINITY); - - // Negative Zero as the base: - // (-0 ^ anything positive but 0, NAN, and odd ints should be +0) - powd_test_sets_as_exponent(-0.0, &POS[3..], 0.0); - - // (-0 ^ anything negative but 0, NAN, and odd ints should be Infinity) - // (should panic because of divide by zero) - powd_test_sets_as_exponent(-0.0, &NEG[3..], f64::INFINITY); - - // (-0 ^ positive odd ints should be -0) - powd_test_sets_as_exponent(-0.0, &[POS_ODDS], -0.0); - - // (-0 ^ negative odd ints should be -Infinity) - // (should panic because of divide by zero) - powd_test_sets_as_exponent(-0.0, &[NEG_ODDS], f64::NEG_INFINITY); -} - -#[test] -fn special_cases() { - // One as the exponent: - // (anything ^ 1 should be anything - i.e. the base) - powd_test_sets(ALL, &|v: f64| libm::powd(v, 1.0), &|v: f64| v); - - // Negative One as the exponent: - // (anything ^ -1 should be 1/anything) - powd_test_sets(ALL, &|v: f64| libm::powd(v, -1.0), &|v: f64| 1.0 / v); - - // Factoring -1 out: - // (negative anything ^ integer should be (-1 ^ integer) * (positive anything ^ integer)) - [POS_ZERO, NEG_ZERO, POS_ONE, NEG_ONE, POS_EVENS, NEG_EVENS].iter().for_each(|int_set| { - int_set.iter().for_each(|int| { - powd_test_sets(ALL, &|v: f64| libm::powd(-v, *int), &|v: f64| { - libm::powd(-1.0, *int) * libm::powd(v, *int) - }); - }) - }); - - // Negative base (imaginary results): - // (-anything except 0 and Infinity ^ non-integer should be NAN) - NEG[1..(NEG.len() - 1)].iter().for_each(|set| { - set.iter().for_each(|val| { - powd_test_sets(&ALL[3..7], &|v: f64| libm::powd(*val, v), &|_| f64::NAN); - }) - }); -} - -#[test] -fn normal_cases() { - assert_eq!(libm::powd(2.0, 20.0), (1 << 20) as f64); - assert_eq!(libm::powd(-1.0, 9.0), -1.0); - assert!(libm::powd(-1.0, 2.2).is_nan()); - assert!(libm::powd(-1.0, -1.14).is_nan()); -} - -#[test] -fn fabsd_sanity_test() { - assert_eq!(libm::fabsd(-1.0), 1.0); - assert_eq!(libm::fabsd(2.8), 2.8); -} - -/// The spec: https://en.cppreference.com/w/cpp/numeric/math/fabs -#[test] -fn fabsd_spec_test() { - assert!(libm::fabsd(f64::NAN).is_nan()); - for f in [0.0, -0.0].iter().copied() { - assert_eq!(libm::fabsd(f), 0.0); - } - for f in [f64::INFINITY, f64::NEG_INFINITY].iter().copied() { - assert_eq!(libm::fabsd(f), f64::INFINITY); - } -} - -#[test] -fn sqrtd_sanity_test() { - assert_eq!(libm::sqrtd(100.0), 10.0); - assert_eq!(libm::sqrtd(4.0), 2.0); -} - -/// The spec: https://en.cppreference.com/w/cpp/numeric/math/sqrt -#[test] -fn sqrtd_spec_test() { - // Not Asserted: FE_INVALID exception is raised if argument is negative. - assert!(libm::sqrtd(-1.0).is_nan()); - assert!(libm::sqrtd(f64::NAN).is_nan()); - for f in [0.0, -0.0, f64::INFINITY].iter().copied() { - assert_eq!(libm::sqrtd(f), f); - } -} diff --git a/vendor/minimal-lexical/tests/mask_tests.rs b/vendor/minimal-lexical/tests/mask_tests.rs deleted file mode 100644 index 97e70a72b8f232..00000000000000 --- a/vendor/minimal-lexical/tests/mask_tests.rs +++ /dev/null @@ -1,16 +0,0 @@ -use minimal_lexical::mask; - -#[test] -fn lower_n_mask_test() { - assert_eq!(mask::lower_n_mask(2), 0b11); -} - -#[test] -fn lower_n_halfway_test() { - assert_eq!(mask::lower_n_halfway(2), 0b10); -} - -#[test] -fn nth_bit_test() { - assert_eq!(mask::nth_bit(2), 0b100); -} diff --git a/vendor/minimal-lexical/tests/number_tests.rs b/vendor/minimal-lexical/tests/number_tests.rs deleted file mode 100644 index 947be394c94f19..00000000000000 --- a/vendor/minimal-lexical/tests/number_tests.rs +++ /dev/null @@ -1,88 +0,0 @@ -use minimal_lexical::number::Number; - -#[test] -fn is_fast_path_test() { - let mut number = Number { - exponent: -4, - mantissa: 12345, - many_digits: false, - }; - assert_eq!(number.is_fast_path::(), true); - assert_eq!(number.is_fast_path::(), true); - - number.exponent = -15; - assert_eq!(number.is_fast_path::(), false); - assert_eq!(number.is_fast_path::(), true); - - number.exponent = -25; - assert_eq!(number.is_fast_path::(), false); - assert_eq!(number.is_fast_path::(), false); - - number.exponent = 25; - assert_eq!(number.is_fast_path::(), false); - assert_eq!(number.is_fast_path::(), true); - - number.exponent = 36; - assert_eq!(number.is_fast_path::(), false); - assert_eq!(number.is_fast_path::(), true); - - number.exponent = 38; - assert_eq!(number.is_fast_path::(), false); - assert_eq!(number.is_fast_path::(), false); - - number.mantissa = 1 << 25; - number.exponent = 0; - assert_eq!(number.is_fast_path::(), false); - assert_eq!(number.is_fast_path::(), true); - - number.mantissa = 1 << 54; - assert_eq!(number.is_fast_path::(), false); - assert_eq!(number.is_fast_path::(), false); - - number.mantissa = 1 << 52; - assert_eq!(number.is_fast_path::(), false); - assert_eq!(number.is_fast_path::(), true); - - number.many_digits = true; - assert_eq!(number.is_fast_path::(), false); - assert_eq!(number.is_fast_path::(), false); -} - -#[test] -fn try_fast_path_test() { - let mut number = Number { - exponent: -4, - mantissa: 12345, - many_digits: false, - }; - assert_eq!(number.try_fast_path::(), Some(1.2345)); - assert_eq!(number.try_fast_path::(), Some(1.2345)); - - number.exponent = -10; - assert_eq!(number.try_fast_path::(), Some(1.2345e-6)); - assert_eq!(number.try_fast_path::(), Some(1.2345e-6)); - - number.exponent = -20; - assert_eq!(number.try_fast_path::(), None); - assert_eq!(number.try_fast_path::(), Some(1.2345e-16)); - - number.exponent = -25; - assert_eq!(number.try_fast_path::(), None); - assert_eq!(number.try_fast_path::(), None); - - number.exponent = 12; - assert_eq!(number.try_fast_path::(), Some(1.2345e16)); - assert_eq!(number.try_fast_path::(), Some(1.2345e16)); - - number.exponent = 25; - assert_eq!(number.try_fast_path::(), None); - assert_eq!(number.try_fast_path::(), Some(1.2345e29)); - - number.exponent = 32; - assert_eq!(number.try_fast_path::(), None); - assert_eq!(number.try_fast_path::(), Some(1.2345e36)); - - number.exponent = 36; - assert_eq!(number.try_fast_path::(), None); - assert_eq!(number.try_fast_path::(), None); -} diff --git a/vendor/minimal-lexical/tests/parse_tests.rs b/vendor/minimal-lexical/tests/parse_tests.rs deleted file mode 100644 index 48856fd1cca365..00000000000000 --- a/vendor/minimal-lexical/tests/parse_tests.rs +++ /dev/null @@ -1,189 +0,0 @@ -use core::f64; -use minimal_lexical::{num, parse}; - -fn check_parse_float(integer: &str, fraction: &str, exponent: i32, expected: F) { - let integer = integer.as_bytes().iter(); - let fraction = fraction.as_bytes().iter(); - assert!(expected == parse::parse_float::(integer, fraction, exponent)); -} - -#[test] -fn parse_f32_test() { - check_parse_float("", "", 0, 0.0_f32); - check_parse_float("1", "2345", 0, 1.2345_f32); - check_parse_float("12", "345", 0, 12.345_f32); - check_parse_float("12345", "6789", 0, 12345.6789_f32); - check_parse_float("1", "2345", 10, 1.2345e10_f32); - check_parse_float("1", "2345", -38, 1.2345e-38_f32); - - // Check expected rounding, using borderline cases. - // Round-down, halfway - check_parse_float("16777216", "", 0, 16777216.0_f32); - check_parse_float("16777217", "", 0, 16777216.0_f32); - check_parse_float("16777218", "", 0, 16777218.0_f32); - check_parse_float("33554432", "", 0, 33554432.0_f32); - check_parse_float("33554434", "", 0, 33554432.0_f32); - check_parse_float("33554436", "", 0, 33554436.0_f32); - check_parse_float("17179869184", "", 0, 17179869184.0_f32); - check_parse_float("17179870208", "", 0, 17179869184.0_f32); - check_parse_float("17179871232", "", 0, 17179871232.0_f32); - - // Round-up, halfway - check_parse_float("16777218", "", 0, 16777218.0_f32); - check_parse_float("16777219", "", 0, 16777220.0_f32); - check_parse_float("16777220", "", 0, 16777220.0_f32); - - check_parse_float("33554436", "", 0, 33554436.0_f32); - check_parse_float("33554438", "", 0, 33554440.0_f32); - check_parse_float("33554440", "", 0, 33554440.0_f32); - - check_parse_float("17179871232", "", 0, 17179871232.0_f32); - check_parse_float("17179872256", "", 0, 17179873280.0_f32); - check_parse_float("17179873280", "", 0, 17179873280.0_f32); - - // Round-up, above halfway - check_parse_float("33554435", "", 0, 33554436.0_f32); - check_parse_float("17179870209", "", 0, 17179871232.0_f32); - - // Check exactly halfway, round-up at halfway - check_parse_float("1", "00000017881393432617187499", 0, 1.0000001_f32); - check_parse_float("1", "000000178813934326171875", 0, 1.0000002_f32); - check_parse_float("1", "00000017881393432617187501", 0, 1.0000002_f32); - - check_parse_float("", "000000000000000000000000000000000000011754943508222875079687365372222456778186655567720875215087517062784172594547271728515625", 0, 1.1754943508222875e-38f32); -} - -#[test] -fn parse_f64_test() { - check_parse_float("", "", 0, 0.0_f64); - check_parse_float("1", "2345", 0, 1.2345_f64); - check_parse_float("12", "345", 0, 12.345_f64); - check_parse_float("12345", "6789", 0, 12345.6789_f64); - check_parse_float("1", "2345", 10, 1.2345e10_f64); - check_parse_float("1", "2345", -308, 1.2345e-308_f64); - - // Check expected rounding, using borderline cases. - // Round-down, halfway - check_parse_float("9007199254740992", "", 0, 9007199254740992.0_f64); - check_parse_float("9007199254740993", "", 0, 9007199254740992.0_f64); - check_parse_float("9007199254740994", "", 0, 9007199254740994.0_f64); - - check_parse_float("18014398509481984", "", 0, 18014398509481984.0_f64); - check_parse_float("18014398509481986", "", 0, 18014398509481984.0_f64); - check_parse_float("18014398509481988", "", 0, 18014398509481988.0_f64); - - check_parse_float("9223372036854775808", "", 0, 9223372036854775808.0_f64); - check_parse_float("9223372036854776832", "", 0, 9223372036854775808.0_f64); - check_parse_float("9223372036854777856", "", 0, 9223372036854777856.0_f64); - - check_parse_float( - "11417981541647679048466287755595961091061972992", - "", - 0, - 11417981541647679048466287755595961091061972992.0_f64, - ); - check_parse_float( - "11417981541647680316116887983825362587765178368", - "", - 0, - 11417981541647679048466287755595961091061972992.0_f64, - ); - check_parse_float( - "11417981541647681583767488212054764084468383744", - "", - 0, - 11417981541647681583767488212054764084468383744.0_f64, - ); - - // Round-up, halfway - check_parse_float("9007199254740994", "", 0, 9007199254740994.0_f64); - check_parse_float("9007199254740995", "", 0, 9007199254740996.0_f64); - check_parse_float("9007199254740996", "", 0, 9007199254740996.0_f64); - - check_parse_float("18014398509481988", "", 0, 18014398509481988.0_f64); - check_parse_float("18014398509481990", "", 0, 18014398509481992.0_f64); - check_parse_float("18014398509481992", "", 0, 18014398509481992.0_f64); - - check_parse_float("9223372036854777856", "", 0, 9223372036854777856.0_f64); - check_parse_float("9223372036854778880", "", 0, 9223372036854779904.0_f64); - check_parse_float("9223372036854779904", "", 0, 9223372036854779904.0_f64); - - check_parse_float( - "11417981541647681583767488212054764084468383744", - "", - 0, - 11417981541647681583767488212054764084468383744.0_f64, - ); - check_parse_float( - "11417981541647682851418088440284165581171589120", - "", - 0, - 11417981541647684119068688668513567077874794496.0_f64, - ); - check_parse_float( - "11417981541647684119068688668513567077874794496", - "", - 0, - 11417981541647684119068688668513567077874794496.0_f64, - ); - - // Round-up, above halfway - check_parse_float("9223372036854776833", "", 0, 9223372036854777856.0_f64); - check_parse_float( - "11417981541647680316116887983825362587765178369", - "", - 0, - 11417981541647681583767488212054764084468383744.0_f64, - ); - - // Rounding error - // Adapted from failures in strtod. - check_parse_float("2", "2250738585072014", -308, 2.2250738585072014e-308_f64); - check_parse_float("2", "2250738585072011360574097967091319759348195463516456480234261097248222220210769455165295239081350879141491589130396211068700864386945946455276572074078206217433799881410632673292535522868813721490129811224514518898490572223072852551331557550159143974763979834118019993239625482890171070818506906306666559949382757725720157630626906633326475653000092458883164330377797918696120494973903778297049050510806099407302629371289589500035837999672072543043602840788957717961509455167482434710307026091446215722898802581825451803257070188608721131280795122334262883686223215037756666225039825343359745688844239002654981983854879482922068947216898310996983658468140228542433306603398508864458040010349339704275671864433837704860378616227717385456230658746790140867233276367187499", -308, 2.225073858507201e-308_f64); - check_parse_float("2", "22507385850720113605740979670913197593481954635164564802342610972482222202107694551652952390813508791414915891303962110687008643869459464552765720740782062174337998814106326732925355228688137214901298112245145188984905722230728525513315575501591439747639798341180199932396254828901710708185069063066665599493827577257201576306269066333264756530000924588831643303777979186961204949739037782970490505108060994073026293712895895000358379996720725430436028407889577179615094551674824347103070260914462157228988025818254518032570701886087211312807951223342628836862232150377566662250398253433597456888442390026549819838548794829220689472168983109969836584681402285424333066033985088644580400103493397042756718644338377048603786162277173854562306587467901408672332763671875", -308, 2.2250738585072014e-308_f64); - check_parse_float("2", "2250738585072011360574097967091319759348195463516456480234261097248222220210769455165295239081350879141491589130396211068700864386945946455276572074078206217433799881410632673292535522868813721490129811224514518898490572223072852551331557550159143974763979834118019993239625482890171070818506906306666559949382757725720157630626906633326475653000092458883164330377797918696120494973903778297049050510806099407302629371289589500035837999672072543043602840788957717961509455167482434710307026091446215722898802581825451803257070188608721131280795122334262883686223215037756666225039825343359745688844239002654981983854879482922068947216898310996983658468140228542433306603398508864458040010349339704275671864433837704860378616227717385456230658746790140867233276367187501", -308, 2.2250738585072014e-308_f64); - check_parse_float("179769313486231580793728971405303415079934132710037826936173778980444968292764750946649017977587207096330286416692887910946555547851940402630657488671505820681908902000708383676273854845817711531764475730270069855571366959622842914819860834936475292719074168444365510704342711559699508093042880177904174497791", "9999999999999999999999999999999999999999999999999999999999999999999999", 0, 1.7976931348623157e+308_f64); - check_parse_float("7", "4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984374999", -324, 5.0e-324_f64); - check_parse_float("7", "4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375", -324, 1.0e-323_f64); - check_parse_float("7", "4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375001", -324, 1.0e-323_f64); - check_parse_float("", "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000024703282292062327208828439643411068618252990130716238221279284125033775363510437593264991818081799618989828234772285886546332835517796989819938739800539093906315035659515570226392290858392449105184435931802849936536152500319370457678249219365623669863658480757001585769269903706311928279558551332927834338409351978015531246597263579574622766465272827220056374006485499977096599470454020828166226237857393450736339007967761930577506740176324673600968951340535537458516661134223766678604162159680461914467291840300530057530849048765391711386591646239524912623653881879636239373280423891018672348497668235089863388587925628302755995657524455507255189313690836254779186948667994968324049705821028513185451396213837722826145437693412532098591327667236328125", 0, 0.0_f64); - - // Rounding error - // Adapted from: - // https://www.exploringbinary.com/how-glibc-strtod-works/ - check_parse_float("", "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000022250738585072008890245868760858598876504231122409594654935248025624400092282356951787758888037591552642309780950434312085877387158357291821993020294379224223559819827501242041788969571311791082261043971979604000454897391938079198936081525613113376149842043271751033627391549782731594143828136275113838604094249464942286316695429105080201815926642134996606517803095075913058719846423906068637102005108723282784678843631944515866135041223479014792369585208321597621066375401613736583044193603714778355306682834535634005074073040135602968046375918583163124224521599262546494300836851861719422417646455137135420132217031370496583210154654068035397417906022589503023501937519773030945763173210852507299305089761582519159720757232455434770912461317493580281734466552734375", 0, 2.2250738585072011e-308_f64); - - // Rounding error - // Adapted from test-parse-random failures. - check_parse_float("1009", "", -31, 1.009e-28_f64); - check_parse_float("18294", "", 304, f64::INFINITY); - - // Rounding error - // Adapted from a @dangrabcad's issue #20. - check_parse_float("7", "689539722041643", 164, 7.689539722041643e164_f64); - check_parse_float("768953972204164300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "", 0, 7.689539722041643e164_f64); - check_parse_float("768953972204164300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 7.689539722041643e164_f64); - - // Check other cases similar to @dangrabcad's issue #20. - check_parse_float("9223372036854776833", "0", 0, 9223372036854777856.0_f64); - check_parse_float( - "11417981541647680316116887983825362587765178369", - "0", - 0, - 11417981541647681583767488212054764084468383744.0_f64, - ); - check_parse_float("9007199254740995", "0", 0, 9007199254740996.0_f64); - check_parse_float("18014398509481990", "0", 0, 18014398509481992.0_f64); - check_parse_float("9223372036854778880", "0", 0, 9223372036854779904.0_f64); - check_parse_float( - "11417981541647682851418088440284165581171589120", - "0", - 0, - 11417981541647684119068688668513567077874794496.0_f64, - ); - - // Check other cases ostensibly identified via proptest. - check_parse_float("71610528364411830000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 71610528364411830000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0_f64); - check_parse_float("126769393745745060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 126769393745745060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0_f64); - check_parse_float("38652960461239320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 38652960461239320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0_f64); -} diff --git a/vendor/minimal-lexical/tests/rounding_tests.rs b/vendor/minimal-lexical/tests/rounding_tests.rs deleted file mode 100644 index 794d696fb827d7..00000000000000 --- a/vendor/minimal-lexical/tests/rounding_tests.rs +++ /dev/null @@ -1,64 +0,0 @@ -use minimal_lexical::extended_float::ExtendedFloat; -use minimal_lexical::rounding; - -#[test] -fn round_test() { - let mut fp = ExtendedFloat { - mant: 9223372036854776832, - exp: -10, - }; - rounding::round::(&mut fp, |f, s| { - f.mant >>= s; - f.exp += s; - }); - assert_eq!(fp.mant, 0); - assert_eq!(fp.exp, 1); - - let mut fp = ExtendedFloat { - mant: 9223372036854776832, - exp: -10, - }; - rounding::round::(&mut fp, |f, s| { - f.mant >>= s; - f.exp += s; - // Round-up. - f.mant += 1; - }); - assert_eq!(fp.mant, 1); - assert_eq!(fp.exp, 1); - - // Round-down - let mut fp = ExtendedFloat { - mant: 9223372036854776832, - exp: -10, - }; - rounding::round::(&mut fp, |f, s| { - rounding::round_nearest_tie_even(f, s, |is_odd, is_halfway, is_above| { - is_above || (is_odd && is_halfway) - }); - }); - assert_eq!(fp.mant, 0); - assert_eq!(fp.exp, 1); - - // Round up - let mut fp = ExtendedFloat { - mant: 9223372036854778880, - exp: -10, - }; - rounding::round::(&mut fp, |f, s| { - rounding::round_nearest_tie_even(f, s, |is_odd, is_halfway, is_above| { - is_above || (is_odd && is_halfway) - }); - }); - assert_eq!(fp.mant, 2); - assert_eq!(fp.exp, 1); - - // Round down - let mut fp = ExtendedFloat { - mant: 9223372036854778880, - exp: -10, - }; - rounding::round::(&mut fp, rounding::round_down); - assert_eq!(fp.mant, 1); - assert_eq!(fp.exp, 1); -} diff --git a/vendor/minimal-lexical/tests/slow_tests.rs b/vendor/minimal-lexical/tests/slow_tests.rs deleted file mode 100644 index 2afea69e908a73..00000000000000 --- a/vendor/minimal-lexical/tests/slow_tests.rs +++ /dev/null @@ -1,337 +0,0 @@ -mod stackvec; - -use minimal_lexical::bigint::Bigint; -use minimal_lexical::extended_float::ExtendedFloat; -use minimal_lexical::num::Float; -use minimal_lexical::number::Number; -use minimal_lexical::slow; -use stackvec::vec_from_u32; - -fn b(float: F) -> (u64, i32) { - let fp = slow::b(float); - (fp.mant, fp.exp) -} - -fn bh(float: F) -> (u64, i32) { - let fp = slow::bh(float); - (fp.mant, fp.exp) -} - -#[test] -fn b_test() { - assert_eq!(b(1e-45_f32), (1, -149)); - assert_eq!(b(5e-324_f64), (1, -1074)); - assert_eq!(b(1e-323_f64), (2, -1074)); - assert_eq!(b(2e-323_f64), (4, -1074)); - assert_eq!(b(3e-323_f64), (6, -1074)); - assert_eq!(b(4e-323_f64), (8, -1074)); - assert_eq!(b(5e-323_f64), (10, -1074)); - assert_eq!(b(6e-323_f64), (12, -1074)); - assert_eq!(b(7e-323_f64), (14, -1074)); - assert_eq!(b(8e-323_f64), (16, -1074)); - assert_eq!(b(9e-323_f64), (18, -1074)); - assert_eq!(b(1_f32), (8388608, -23)); - assert_eq!(b(1_f64), (4503599627370496, -52)); - assert_eq!(b(1e38_f32), (9860761, 103)); - assert_eq!(b(1e308_f64), (5010420900022432, 971)); -} - -#[test] -fn bh_test() { - assert_eq!(bh(1e-45_f32), (3, -150)); - assert_eq!(bh(5e-324_f64), (3, -1075)); - assert_eq!(bh(1_f32), (16777217, -24)); - assert_eq!(bh(1_f64), (9007199254740993, -53)); - assert_eq!(bh(1e38_f32), (19721523, 102)); - assert_eq!(bh(1e308_f64), (10020841800044865, 970)); -} - -#[test] -fn slow_test() { - // 5e-324, round-down. - let integer = b"2"; - let fraction = b"4703282292062327208828439643411068618252990130716238221279284125033775363510437593264991818081799618989828234772285886546332835517796989819938739800539093906315035659515570226392290858392449105184435931802849936536152500319370457678249219365623669863658480757001585769269903706311928279558551332927834338409351978015531246597263579574622766465272827220056374006485499977096599470454020828166226237857393450736339007967761930577506740176324673600968951340535537458516661134223766678604162159680461914467291840300530057530849048765391711386591646239524912623653881879636239373280423891018672348497668235089863388587925628302755995657524455507255189313690836254779186948667994968324049705821028513185451396213837722826145437693412532098591327667236328124999"; - let num = Number { - mantissa: 2470328229206232720, - exponent: -342, - many_digits: true, - }; - let fp = ExtendedFloat { - mant: 1 << 63, - exp: -63, - }; - let result = slow::slow::(num.clone(), fp, integer.iter(), fraction.iter()); - assert_eq!(result.mant, 0); - assert_eq!(result.exp, 0); - - // 5e-324, round-up. - let fraction = b"47032822920623272088284396434110686182529901307162382212792841250337753635104375932649918180817996189898282347722858865463328355177969898199387398005390939063150356595155702263922908583924491051844359318028499365361525003193704576782492193656236698636584807570015857692699037063119282795585513329278343384093519780155312465972635795746227664652728272200563740064854999770965994704540208281662262378573934507363390079677619305775067401763246736009689513405355374585166611342237666786041621596804619144672918403005300575308490487653917113865916462395249126236538818796362393732804238910186723484976682350898633885879256283027559956575244555072551893136908362547791869486679949683240497058210285131854513962138377228261454376934125320985913276672363281251"; - let result = slow::slow::(num.clone(), fp, integer.iter(), fraction.iter()); - assert_eq!(result.mant, 1); - assert_eq!(result.exp, 0); - - // 8.98846567431158e+307 - let integer = b"8"; - let fraction = b"9884656743115805365666807213050294962762414131308158973971342756154045415486693752413698006024096935349884403114202125541629105369684531108613657287705365884742938136589844238179474556051429647415148697857438797685859063890851407391008830874765563025951597582513936655578157348020066364210154316532161708032"; - let num = Number { - mantissa: 8988465674311580536, - exponent: 289, - many_digits: true, - }; - let fp = ExtendedFloat { - mant: 9223372036854776832, - exp: 2035, - }; - let result = slow::slow::(num.clone(), fp, integer.iter(), fraction.iter()); - assert_eq!(result.mant, 0); - assert_eq!(result.exp, 2046); - - // 8.988465674311582e+307 - let fraction = b"98846567431158053656668072130502949627624141313081589739713427561540454154866937524136980060240969353498844031142021255416291053696845311086136572877053658847429381365898442381794745560514296474151486978574387976858590638908514073910088308747655630259515975825139366555781573480200663642101543165321617080321"; - let result = slow::slow::(num.clone(), fp, integer.iter(), fraction.iter()); - assert_eq!(result.mant, 1); - assert_eq!(result.exp, 2046); -} - -#[test] -fn positive_digit_comp_test() { - // 8.98846567431158e+307 - let bigmant = Bigint { - data: vec_from_u32(&[ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1024, 2147483648, - ]), - }; - let exponent = 307 + 1 - 308; - let result = slow::positive_digit_comp::(bigmant, exponent); - assert_eq!(result.mant, 0); - assert_eq!(result.exp, 2046); - - // 8.988465674311582e+307 - let bigmant = Bigint { - data: vec_from_u32(&[ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1024, 2147483648, - ]), - }; - let exponent = 307 + 1 - 308; - let result = slow::positive_digit_comp::(bigmant, exponent); - assert_eq!(result.mant, 1); - assert_eq!(result.exp, 2046); -} - -#[test] -fn negative_digit_comp_test() { - // 5e-324, below halfway, round-down to 0.0. - let bigmant = Bigint { - data: vec_from_u32(&[ - 1727738439, 330069557, 3509095598, 686205316, 156923684, 750687444, 2688855918, - 28211928, 1887482096, 3222998811, 913348873, 1652282845, 1600735541, 1664240266, - 84454144, 1487769792, 1855966778, 2832488299, 507030148, 1410055467, 2513359584, - 3453963205, 779237894, 3456088326, 3671009895, 3094451696, 1250165638, 2682979794, - 357925323, 1713890438, 3271046672, 3485897285, 3934710962, 1813530592, 199705026, - 976390839, 2805488572, 2194288220, 2094065006, 2592523639, 3798974617, 586957244, - 1409218821, 3442050171, 3789534764, 1380190380, 2055222457, 3535299831, 429482276, - 389342206, 133558576, 721875297, 3013586570, 540178306, 2389746866, 2313334501, - 422440635, 1288499129, 864978311, 842263325, 3016323856, 2282442263, 1440906063, - 3931458696, 3511314276, 1884879882, 946366824, 4260548261, 1073379659, 1732329252, - 3828972211, 1915607049, 3665440937, 1844358779, 3735281178, 2646335050, 1457460927, - 2940016422, 1051, - ]), - }; - let fp = ExtendedFloat { - mant: 1 << 63, - exp: -63, - }; - let exponent = -324 + 1 - 755; - let result = slow::negative_digit_comp::(bigmant, fp, exponent); - assert_eq!(result.mant, 0); - assert_eq!(result.exp, 0); - - // 5e-324, halfway, round-down to 0.0. - let bigmant = Bigint { - data: vec_from_u32(&[ - 2084786877, 507136210, 2666388819, 3110242527, 3178432722, 541916566, 208847286, - 3092404665, 83491860, 2893735989, 3973758097, 2600107496, 147629623, 1754010897, - 4226332273, 2587058081, 942453804, 88731834, 1319061990, 173208747, 1982493283, - 3808794987, 3874839738, 1854586992, 3508364323, 2021729080, 1899625710, 2420749567, - 816401711, 3059730605, 1570934109, 3138812023, 1756281367, 3205859133, 2985201975, - 1014588672, 3799556578, 577719905, 4052248225, 3649019757, 398935965, 56421532, - 976366795, 1876047791, 3147705595, 4025764546, 1097271882, 1910500779, 2397021233, - 1340419138, 2753207595, 3067328524, 2210626776, 1280440432, 3940874757, 4172726578, - 1035509558, 1062145421, 1465448826, 2990139501, 1785427751, 2093931515, 4055890033, - 3388365687, 2245484242, 3609657408, 3527114516, 1013577862, 2389075196, 426934091, - 3237939346, 1071362463, 4070999470, 250952461, 2280067948, 1097862995, 2226250520, - 221983348, 1, - ]), - }; - let exponent = -324 + 1 - 752; - let result = slow::negative_digit_comp::(bigmant, fp, exponent); - assert_eq!(result.mant, 0); - assert_eq!(result.exp, 0); - - // 5e-324, above halfway, round-up to 5e-324. - let bigmant = Bigint { - data: vec_from_u32(&[ - 3667999587, 776394808, 894084415, 1037654204, 1719556155, 1124198371, 2088472861, - 859275578, 834918607, 3167556114, 1082875312, 231271193, 1476296236, 360239786, - 3608617070, 100777043, 834603454, 887318342, 305718012, 1732087473, 2645063646, - 3728211506, 93691724, 1366000745, 723904866, 3037421624, 1816387920, 2732659194, - 3869049819, 532534979, 2824439209, 1323349161, 382944493, 1993820262, 4082215981, - 1555952134, 3635827414, 1482231762, 1867776587, 2130459211, 3989359658, 564215320, - 1173733358, 1580608728, 1412284882, 1602939803, 2382784237, 1925138608, 2495375854, - 519289497, 1762272177, 608514174, 631431287, 4214469733, 754041908, 3072560125, - 1765160997, 2031519620, 1769586374, 4131591237, 674408332, 3759445970, 1904194670, - 3818885807, 980005947, 1736835717, 911406800, 1545844036, 2415915482, 4269340915, - 2314622388, 2123690045, 2055289038, 2509524619, 1325843000, 2388695363, 787668722, - 2219833485, 10, - ]), - }; - let exponent = -324 + 1 - 753; - let result = slow::negative_digit_comp::(bigmant, fp, exponent); - assert_eq!(result.mant, 1); - assert_eq!(result.exp, 0); - - // 1e-323, below halfway, round-down to 5e-324. - let bigmant = Bigint { - data: vec_from_u32(&[ - 888248023, 990208672, 1937352202, 2058615950, 470771052, 2252062332, 3771600458, - 84635785, 1367478992, 1079061842, 2740046621, 661881239, 507239328, 697753503, - 253362433, 168342080, 1272933039, 4202497602, 1521090445, 4230166401, 3245111456, - 1771955024, 2337713684, 1778330386, 2423095095, 693420498, 3750496916, 3753972086, - 1073775970, 846704018, 1223205425, 1867757265, 3214198296, 1145624482, 599115079, - 2929172517, 4121498420, 2287897365, 1987227723, 3482603622, 2806989260, 1760871734, - 4227656463, 1736215921, 2778669702, 4140571142, 1870700075, 2015964902, 1288446830, - 1168026618, 400675728, 2165625891, 450825118, 1620534920, 2874273302, 2645036208, - 1267321906, 3865497387, 2594934933, 2526789975, 459036976, 2552359495, 27750894, - 3204441497, 1944008238, 1359672352, 2839100473, 4191710191, 3220138979, 902020460, - 2896982042, 1451853853, 2406388220, 1238109043, 2615908943, 3644037856, 77415486, - 230114675, 3155, - ]), - }; - let fp = ExtendedFloat { - mant: 1 << 63, - exp: -62, - }; - let exponent = -324 + 1 - 755; - let result = slow::negative_digit_comp::(bigmant, fp, exponent); - assert_eq!(result.mant, 1); - assert_eq!(result.exp, 0); - - // 1e-323, halfway, round-up to 1e-323. - let bigmant = Bigint { - data: vec_from_u32(&[ - 1959393335, 1521408631, 3704199161, 740792990, 945363576, 1625749700, 626541858, - 687279403, 250475582, 91273375, 3331339701, 3505355194, 442888870, 967065395, - 4089062228, 3466206949, 2827361413, 266195502, 3957185970, 519626241, 1652512553, - 2836450370, 3034584624, 1268793682, 1935158378, 1770219946, 1403909835, 2967281406, - 2449205134, 589257223, 417835033, 826501478, 973876807, 1027642808, 365671335, - 3043766018, 2808735142, 1733159717, 3566810083, 2357124681, 1196807897, 169264596, - 2929100385, 1333176077, 853182194, 3487359048, 3291815648, 1436535041, 2896096404, - 4021257415, 3964655489, 612050981, 2336913034, 3841321297, 3232689679, 3928245144, - 3106528676, 3186436263, 101379182, 380483912, 1061315959, 1986827250, 3577735508, - 1575162471, 2441485432, 2239037633, 1991408958, 3040733588, 2872258292, 1280802274, - 1123883446, 3214087391, 3623063818, 752857385, 2545236548, 3293588986, 2383784264, - 665950045, 3, - ]), - }; - let exponent = -324 + 1 - 752; - let result = slow::negative_digit_comp::(bigmant, fp, exponent); - assert_eq!(result.mant, 2); - assert_eq!(result.exp, 0); - - // 1e-323, above halfway, round-up to 1e-323. - let bigmant = Bigint { - data: vec_from_u32(&[ - 2414064167, 2329184426, 2682253245, 3112962612, 863701169, 3372595114, 1970451287, - 2577826735, 2504755821, 912733750, 3248625938, 693813579, 133921412, 1080719359, - 2235916618, 302331131, 2503810362, 2661955026, 917154036, 901295123, 3640223643, - 2594699927, 281075174, 4098002235, 2171714598, 522330280, 1154196466, 3903010287, - 3017214866, 1597604939, 4178350331, 3970047484, 1148833479, 1686493490, 3656713352, - 372889108, 2317547651, 151727992, 1308362466, 2096410338, 3378144383, 1692645962, - 3521200074, 446858888, 4236854647, 513852113, 2853385416, 1480448529, 3191160267, - 1557868492, 991849235, 1825542523, 1894293861, 4053474607, 2262125726, 627745783, - 1000515697, 1799591565, 1013791827, 3804839120, 2023224998, 2688403318, 1417616716, - 2866722830, 2940017843, 915539855, 2734220401, 342564812, 2952779151, 4218088154, - 2648899870, 2076102840, 1870899819, 3233606562, 3977529001, 2871118793, 2363006167, - 2364533159, 31, - ]), - }; - let exponent = -324 + 1 - 753; - let result = slow::negative_digit_comp::(bigmant, fp, exponent); - assert_eq!(result.mant, 2); - assert_eq!(result.exp, 0); -} - -#[test] -fn parse_mantissa_test() { - let max_digits = f64::MAX_DIGITS; - - // Large number of digits. - let integer = b"2"; - let fraction = b"4703282292062327208828439643411068618252990130716238221279284125033775363510437593264991818081799618989828234772285886546332835517796989819938739800539093906315035659515570226392290858392449105184435931802849936536152500319370457678249219365623669863658480757001585769269903706311928279558551332927834338409351978015531246597263579574622766465272827220056374006485499977096599470454020828166226237857393450736339007967761930577506740176324673600968951340535537458516661134223766678604162159680461914467291840300530057530849048765391711386591646239524912623653881879636239373280423891018672348497668235089863388587925628302755995657524455507255189313690836254779186948667994968324049705821028513185451396213837722826145437693412532098591327667236328124999"; - let (bigmant, count) = slow::parse_mantissa(integer.iter(), fraction.iter(), max_digits); - let expected = vec_from_u32(&[ - 1727738439, 330069557, 3509095598, 686205316, 156923684, 750687444, 2688855918, 28211928, - 1887482096, 3222998811, 913348873, 1652282845, 1600735541, 1664240266, 84454144, - 1487769792, 1855966778, 2832488299, 507030148, 1410055467, 2513359584, 3453963205, - 779237894, 3456088326, 3671009895, 3094451696, 1250165638, 2682979794, 357925323, - 1713890438, 3271046672, 3485897285, 3934710962, 1813530592, 199705026, 976390839, - 2805488572, 2194288220, 2094065006, 2592523639, 3798974617, 586957244, 1409218821, - 3442050171, 3789534764, 1380190380, 2055222457, 3535299831, 429482276, 389342206, - 133558576, 721875297, 3013586570, 540178306, 2389746866, 2313334501, 422440635, 1288499129, - 864978311, 842263325, 3016323856, 2282442263, 1440906063, 3931458696, 3511314276, - 1884879882, 946366824, 4260548261, 1073379659, 1732329252, 3828972211, 1915607049, - 3665440937, 1844358779, 3735281178, 2646335050, 1457460927, 2940016422, 1051, - ]); - assert_eq!(&*bigmant.data, &*expected); - assert_eq!(count, 755); - - // Truncation. - let integer = b"7"; - let fraction = b"4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375332669816033062329967789262837"; - let (bigmant, count) = slow::parse_mantissa(integer.iter(), fraction.iter(), max_digits); - let expected = vec_from_u32(&[ - 983641521, 2202462645, 4170685875, 1591772364, 529830014, 803977727, 126733331, 1695971390, - 4089590927, 1532849076, 2705586665, 4046282448, 4076195232, 3230469892, 3059053929, - 79035789, 744229654, 2026438108, 3570486781, 2818088662, 3485839733, 3653138023, - 2857937689, 602717004, 3689362390, 283607819, 1783392475, 2053068939, 1888214698, - 550023429, 296880187, 1046779059, 1285361259, 84614934, 1627922685, 2023868765, 1987523901, - 743493573, 3897769089, 2210613570, 2261081349, 3015057659, 3949711644, 3346092916, - 2433639051, 36411806, 1050442, 269209477, 2649742673, 1494221829, 2763524503, 2514491481, - 2325312415, 1741242814, 2479923579, 1098250122, 2416211509, 3612906464, 403420662, - 3663250314, 1993722098, 365907183, 4270226312, 3962131185, 432952495, 2963635838, - 2996289227, 3200289391, 2753231690, 2780286109, 884373163, 1418533204, 3382415762, - 499541562, 3369625401, 3421327641, 3526770155, 3109983188, 1157439767, 734593155, - ]); - assert_eq!(&*bigmant.data, &*expected); - assert_eq!(count, max_digits + 1); - - // No fraction digits. - let integer = b"74109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375332669816033062329967789262837"; - let fraction = b""; - let (bigmant, count) = slow::parse_mantissa(integer.iter(), fraction.iter(), max_digits); - assert_eq!(&*bigmant.data, &*expected); - assert_eq!(count, max_digits + 1); - - // Multiple of step (check we add our temporary correctly). - let integer = b"7410984687618698162648531893023320585475897039214871466383785237510132609053131277979497545424539885696948470431685765963899850655339096945981621940161728171894510697854671067917687257517734731555330779540854980960845750095811137303474765809687100959097544227100475730780971111893578483867565399878350301522805593404659373979179073872386829939581848166016912201945649993128979841136206248449867871357218035220901702390328579173252022052897402080290685402160661237554998340267130003581248647904138574340187552090159017259254714629617513415977493871857473787096164563890871811984127167305601704549300470526959016576377688490826798697257336652176556794107250876433756084600398490497214911746308553955635418864151316847843631308023759629577398300170898437533266981"; - let fraction = b""; - let (bigmant, count) = slow::parse_mantissa(integer.iter(), fraction.iter(), max_digits); - let expected = vec_from_u32(&[ - 617018405, 396211401, 2130402383, 3812547827, 4263683770, 3918012496, 1787721490, - 2493014694, 435464626, 3720854431, 2928509507, 2677932436, 369049650, 3606588290, - 231237141, 2231172875, 3358152367, 95217925, 2777810007, 1016185079, 596681915, 2331711780, - 593487272, 4212730845, 339602972, 4097829793, 262427536, 4182115035, 3414687403, - 3711518952, 4168896929, 483727327, 1657080031, 2785588628, 1009114769, 482126749, - 485376744, 1123705337, 3225501941, 2939050108, 1338451005, 2104263947, 3425461126, - 1834224928, 4061025704, 792093815, 2707019125, 3610271203, 4254101529, 1026215278, - 4117890107, 1748110416, 2535111606, 80965120, 3823822115, 2354910057, 590658512, - 2682089507, 159300272, 1776569442, 3382166479, 3222978591, 540586210, 934713382, - 2014123057, 1455555790, 4119131465, 3685912982, 3019947291, 3437891678, 2660105801, - 2605860762, 394373515, 4177081532, 1616198650, 1580399082, 2017617452, 3327697130, - 315505357, - ]); - assert_eq!(&*bigmant.data, &*expected); - assert_eq!(count, 760); -} diff --git a/vendor/minimal-lexical/tests/stackvec.rs b/vendor/minimal-lexical/tests/stackvec.rs deleted file mode 100644 index d5587f23f8a6aa..00000000000000 --- a/vendor/minimal-lexical/tests/stackvec.rs +++ /dev/null @@ -1,32 +0,0 @@ -use minimal_lexical::bigint; -#[cfg(feature = "alloc")] -pub use minimal_lexical::heapvec::HeapVec as VecType; -#[cfg(not(feature = "alloc"))] -pub use minimal_lexical::stackvec::StackVec as VecType; - -pub fn vec_from_u32(x: &[u32]) -> VecType { - let mut vec = VecType::new(); - #[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] - { - for &xi in x { - vec.try_push(xi as bigint::Limb).unwrap(); - } - } - - #[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] - { - for xi in x.chunks(2) { - match xi.len() { - 1 => vec.try_push(xi[0] as bigint::Limb).unwrap(), - 2 => { - let xi0 = xi[0] as bigint::Limb; - let xi1 = xi[1] as bigint::Limb; - vec.try_push((xi1 << 32) | xi0).unwrap() - }, - _ => unreachable!(), - } - } - } - - vec -} diff --git a/vendor/minimal-lexical/tests/vec_tests.rs b/vendor/minimal-lexical/tests/vec_tests.rs deleted file mode 100644 index 3a5f5886ad6b6e..00000000000000 --- a/vendor/minimal-lexical/tests/vec_tests.rs +++ /dev/null @@ -1,395 +0,0 @@ -mod stackvec; - -use core::cmp; -use minimal_lexical::bigint; -use stackvec::{vec_from_u32, VecType}; - -// u64::MAX and Limb::MAX for older Rustc versions. -const U64_MAX: u64 = 0xffff_ffff_ffff_ffff; -// LIMB_MAX -#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] -const LIMB_MAX: u64 = U64_MAX; -#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] -const LIMB_MAX: u32 = 0xffff_ffff; - -#[test] -fn simple_test() { - // Test the simple properties of the stack vector. - let mut x = VecType::from_u64(1); - assert_eq!(x.len(), 1); - assert_eq!(x.is_empty(), false); - assert_eq!(x.capacity(), bigint::BIGINT_LIMBS); - x.try_push(5).unwrap(); - assert_eq!(x.len(), 2); - assert_eq!(x.pop(), Some(5)); - assert_eq!(x.len(), 1); - assert_eq!(&*x, &[1]); - x.try_extend(&[2, 3, 4]).unwrap(); - assert_eq!(x.len(), 4); - assert_eq!(&*x, &[1, 2, 3, 4]); - x.try_resize(6, 0).unwrap(); - assert_eq!(x.len(), 6); - assert_eq!(&*x, &[1, 2, 3, 4, 0, 0]); - x.try_resize(0, 0).unwrap(); - assert_eq!(x.len(), 0); - assert_eq!(x.is_empty(), true); - - let x = VecType::try_from(&[5, 1]).unwrap(); - assert_eq!(x.len(), 2); - assert_eq!(x.is_empty(), false); - if bigint::LIMB_BITS == 32 { - assert_eq!(x.hi64(), (0x8000000280000000, false)); - } else { - assert_eq!(x.hi64(), (0x8000000000000002, true)); - } - let rview = bigint::rview(&x); - assert_eq!(x[0], 5); - assert_eq!(x[1], 1); - assert_eq!(rview[0], 1); - assert_eq!(rview[1], 5); - assert_eq!(x.len(), 2); - - assert_eq!(VecType::from_u64(U64_MAX).hi64(), (U64_MAX, false)); -} - -#[test] -fn hi64_test() { - assert_eq!(VecType::from_u64(0xA).hi64(), (0xA000000000000000, false)); - assert_eq!(VecType::from_u64(0xAB).hi64(), (0xAB00000000000000, false)); - assert_eq!(VecType::from_u64(0xAB00000000).hi64(), (0xAB00000000000000, false)); - assert_eq!(VecType::from_u64(0xA23456789A).hi64(), (0xA23456789A000000, false)); -} - -#[test] -fn cmp_test() { - // Simple - let x = VecType::from_u64(1); - let y = VecType::from_u64(2); - assert_eq!(x.partial_cmp(&x), Some(cmp::Ordering::Equal)); - assert_eq!(x.cmp(&x), cmp::Ordering::Equal); - assert_eq!(x.cmp(&y), cmp::Ordering::Less); - - // Check asymmetric - let x = VecType::try_from(&[5, 1]).unwrap(); - let y = VecType::from_u64(2); - assert_eq!(x.cmp(&x), cmp::Ordering::Equal); - assert_eq!(x.cmp(&y), cmp::Ordering::Greater); - - // Check when we use reverse ordering properly. - let x = VecType::try_from(&[5, 1, 9]).unwrap(); - let y = VecType::try_from(&[6, 2, 8]).unwrap(); - assert_eq!(x.cmp(&x), cmp::Ordering::Equal); - assert_eq!(x.cmp(&y), cmp::Ordering::Greater); - - // Complex scenario, check it properly uses reverse ordering. - let x = VecType::try_from(&[0, 1, 9]).unwrap(); - let y = VecType::try_from(&[4294967295, 0, 9]).unwrap(); - assert_eq!(x.cmp(&x), cmp::Ordering::Equal); - assert_eq!(x.cmp(&y), cmp::Ordering::Greater); -} - -#[test] -fn math_test() { - let mut x = VecType::try_from(&[0, 1, 9]).unwrap(); - assert_eq!(x.is_normalized(), true); - x.try_push(0).unwrap(); - assert_eq!(&*x, &[0, 1, 9, 0]); - assert_eq!(x.is_normalized(), false); - x.normalize(); - assert_eq!(&*x, &[0, 1, 9]); - assert_eq!(x.is_normalized(), true); - - x.add_small(1); - assert_eq!(&*x, &[1, 1, 9]); - x.add_small(LIMB_MAX); - assert_eq!(&*x, &[0, 2, 9]); - - x.mul_small(3); - assert_eq!(&*x, &[0, 6, 27]); - x.mul_small(LIMB_MAX); - let expected: VecType = if bigint::LIMB_BITS == 32 { - vec_from_u32(&[0, 4294967290, 4294967274, 26]) - } else { - vec_from_u32(&[0, 0, 4294967290, 4294967295, 4294967274, 4294967295, 26]) - }; - assert_eq!(&*x, &*expected); - - let mut x = VecType::from_u64(0xFFFFFFFF); - let y = VecType::from_u64(5); - x *= &y; - let expected: VecType = vec_from_u32(&[0xFFFFFFFB, 0x4]); - assert_eq!(&*x, &*expected); - - // Test with carry - let mut x = VecType::from_u64(1); - assert_eq!(&*x, &[1]); - x.add_small(LIMB_MAX); - assert_eq!(&*x, &[0, 1]); -} - -#[test] -fn scalar_add_test() { - assert_eq!(bigint::scalar_add(5, 5), (10, false)); - assert_eq!(bigint::scalar_add(LIMB_MAX, 1), (0, true)); -} - -#[test] -fn scalar_mul_test() { - assert_eq!(bigint::scalar_mul(5, 5, 0), (25, 0)); - assert_eq!(bigint::scalar_mul(5, 5, 1), (26, 0)); - assert_eq!(bigint::scalar_mul(LIMB_MAX, 2, 0), (LIMB_MAX - 1, 1)); -} - -#[test] -fn small_add_test() { - let mut x = VecType::from_u64(4294967295); - bigint::small_add(&mut x, 5); - let expected: VecType = vec_from_u32(&[4, 1]); - assert_eq!(&*x, &*expected); - - let mut x = VecType::from_u64(5); - bigint::small_add(&mut x, 7); - let expected = VecType::from_u64(12); - assert_eq!(&*x, &*expected); - - // Single carry, internal overflow - let mut x = VecType::from_u64(0x80000000FFFFFFFF); - bigint::small_add(&mut x, 7); - let expected: VecType = vec_from_u32(&[6, 0x80000001]); - assert_eq!(&*x, &*expected); - - // Double carry, overflow - let mut x = VecType::from_u64(0xFFFFFFFFFFFFFFFF); - bigint::small_add(&mut x, 7); - let expected: VecType = vec_from_u32(&[6, 0, 1]); - assert_eq!(&*x, &*expected); -} - -#[test] -fn small_mul_test() { - // No overflow check, 1-int. - let mut x = VecType::from_u64(5); - bigint::small_mul(&mut x, 7); - let expected = VecType::from_u64(35); - assert_eq!(&*x, &*expected); - - // No overflow check, 2-ints. - let mut x = VecType::from_u64(0x4000000040000); - bigint::small_mul(&mut x, 5); - let expected: VecType = vec_from_u32(&[0x00140000, 0x140000]); - assert_eq!(&*x, &*expected); - - // Overflow, 1 carry. - let mut x = VecType::from_u64(0x33333334); - bigint::small_mul(&mut x, 5); - let expected: VecType = vec_from_u32(&[4, 1]); - assert_eq!(&*x, &*expected); - - // Overflow, 1 carry, internal. - let mut x = VecType::from_u64(0x133333334); - bigint::small_mul(&mut x, 5); - let expected: VecType = vec_from_u32(&[4, 6]); - assert_eq!(&*x, &*expected); - - // Overflow, 2 carries. - let mut x = VecType::from_u64(0x3333333333333334); - bigint::small_mul(&mut x, 5); - let expected: VecType = vec_from_u32(&[4, 0, 1]); - assert_eq!(&*x, &*expected); -} - -#[test] -fn pow_test() { - let mut x = VecType::from_u64(1); - bigint::pow(&mut x, 2); - let expected = VecType::from_u64(25); - assert_eq!(&*x, &*expected); - - let mut x = VecType::from_u64(1); - bigint::pow(&mut x, 15); - let expected: VecType = vec_from_u32(&[452807053, 7]); - assert_eq!(&*x, &*expected); - - let mut x = VecType::from_u64(1); - bigint::pow(&mut x, 16); - let expected: VecType = vec_from_u32(&[2264035265, 35]); - assert_eq!(&*x, &*expected); - - let mut x = VecType::from_u64(1); - bigint::pow(&mut x, 17); - let expected: VecType = vec_from_u32(&[2730241733, 177]); - assert_eq!(&*x, &*expected); - - let mut x = VecType::from_u64(1); - bigint::pow(&mut x, 302); - let expected: VecType = vec_from_u32(&[ - 2443090281, 2149694430, 2297493928, 1584384001, 1279504719, 1930002239, 3312868939, - 3735173465, 3523274756, 2025818732, 1641675015, 2431239749, 4292780461, 3719612855, - 4174476133, 3296847770, 2677357556, 638848153, 2198928114, 3285049351, 2159526706, - 626302612, - ]); - assert_eq!(&*x, &*expected); -} - -#[test] -fn large_add_test() { - // Overflow, both single values - let mut x = VecType::from_u64(4294967295); - let y = VecType::from_u64(5); - bigint::large_add(&mut x, &y); - let expected: VecType = vec_from_u32(&[4, 1]); - assert_eq!(&*x, &*expected); - - // No overflow, single value - let mut x = VecType::from_u64(5); - let y = VecType::from_u64(7); - bigint::large_add(&mut x, &y); - let expected = VecType::from_u64(12); - assert_eq!(&*x, &*expected); - - // Single carry, internal overflow - let mut x = VecType::from_u64(0x80000000FFFFFFFF); - let y = VecType::from_u64(7); - bigint::large_add(&mut x, &y); - let expected: VecType = vec_from_u32(&[6, 0x80000001]); - assert_eq!(&*x, &*expected); - - // 1st overflows, 2nd doesn't. - let mut x = VecType::from_u64(0x7FFFFFFFFFFFFFFF); - let y = VecType::from_u64(0x7FFFFFFFFFFFFFFF); - bigint::large_add(&mut x, &y); - let expected: VecType = vec_from_u32(&[0xFFFFFFFE, 0xFFFFFFFF]); - assert_eq!(&*x, &*expected); - - // Both overflow. - let mut x = VecType::from_u64(0x8FFFFFFFFFFFFFFF); - let y = VecType::from_u64(0x7FFFFFFFFFFFFFFF); - bigint::large_add(&mut x, &y); - let expected: VecType = vec_from_u32(&[0xFFFFFFFE, 0x0FFFFFFF, 1]); - assert_eq!(&*x, &*expected); -} - -#[test] -fn large_mul_test() { - // Test by empty - let mut x = VecType::from_u64(0xFFFFFFFF); - let y = VecType::new(); - bigint::large_mul(&mut x, &y); - let expected = VecType::new(); - assert_eq!(&*x, &*expected); - - // Simple case - let mut x = VecType::from_u64(0xFFFFFFFF); - let y = VecType::from_u64(5); - bigint::large_mul(&mut x, &y); - let expected: VecType = vec_from_u32(&[0xFFFFFFFB, 0x4]); - assert_eq!(&*x, &*expected); - - // Large u32, but still just as easy. - let mut x = VecType::from_u64(0xFFFFFFFF); - let y = VecType::from_u64(0xFFFFFFFE); - bigint::large_mul(&mut x, &y); - let expected: VecType = vec_from_u32(&[0x2, 0xFFFFFFFD]); - assert_eq!(&*x, &*expected); - - // Let's multiply two large values together. - let mut x: VecType = vec_from_u32(&[0xFFFFFFFE, 0x0FFFFFFF, 1]); - let y: VecType = vec_from_u32(&[0x99999999, 0x99999999, 0xCCCD9999, 0xCCCC]); - bigint::large_mul(&mut x, &y); - let expected: VecType = - vec_from_u32(&[0xCCCCCCCE, 0x5CCCCCCC, 0x9997FFFF, 0x33319999, 0x999A7333, 0xD999]); - assert_eq!(&*x, &*expected); -} - -#[test] -fn very_large_mul_test() { - // Test cases triggered to that would normally use `karatsuba_mul`. - // Karatsuba multiplication was ripped out, however, these are useful - // test cases. - let mut x: VecType = vec_from_u32(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); - let y: VecType = vec_from_u32(&[4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]); - bigint::large_mul(&mut x, &y); - let expected: VecType = vec_from_u32(&[ - 4, 13, 28, 50, 80, 119, 168, 228, 300, 385, 484, 598, 728, 875, 1040, 1224, 1340, 1435, - 1508, 1558, 1584, 1585, 1560, 1508, 1428, 1319, 1180, 1010, 808, 573, 304, - ]); - assert_eq!(&*x, &*expected); - - // Test cases triggered to that would normally use `karatsuba_uneven_mul`. - let mut x: VecType = vec_from_u32(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); - let y: VecType = vec_from_u32(&[ - 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, - ]); - bigint::large_mul(&mut x, &y); - let expected: VecType = vec_from_u32(&[ - 4, 13, 28, 50, 80, 119, 168, 228, 300, 385, 484, 598, 728, 875, 1040, 1224, 1360, 1496, - 1632, 1768, 1904, 2040, 2176, 2312, 2448, 2584, 2720, 2856, 2992, 3128, 3264, 3400, 3536, - 3672, 3770, 3829, 3848, 3826, 3762, 3655, 3504, 3308, 3066, 2777, 2440, 2054, 1618, 1131, - 592, - ]); - assert_eq!(&*x, &*expected); -} - -#[test] -fn bit_length_test() { - let x: VecType = vec_from_u32(&[0, 0, 0, 1]); - assert_eq!(bigint::bit_length(&x), 97); - - let x: VecType = vec_from_u32(&[0, 0, 0, 3]); - assert_eq!(bigint::bit_length(&x), 98); - - let x = VecType::from_u64(1 << 31); - assert_eq!(bigint::bit_length(&x), 32); -} - -#[test] -fn shl_bits_test() { - let mut x = VecType::from_u64(0xD2210408); - bigint::shl_bits(&mut x, 5); - let expected: VecType = vec_from_u32(&[0x44208100, 0x1A]); - assert_eq!(&*x, &*expected); -} - -#[test] -fn shl_limbs_test() { - let mut x = VecType::from_u64(0xD2210408); - bigint::shl_limbs(&mut x, 2); - let expected: VecType = if bigint::LIMB_BITS == 32 { - vec_from_u32(&[0, 0, 0xD2210408]) - } else { - vec_from_u32(&[0, 0, 0, 0, 0xD2210408]) - }; - assert_eq!(&*x, &*expected); -} - -#[test] -fn shl_test() { - // Pattern generated via `''.join(["1" +"0"*i for i in range(20)])` - let mut x = VecType::from_u64(0xD2210408); - bigint::shl(&mut x, 5); - let expected: VecType = vec_from_u32(&[0x44208100, 0x1A]); - assert_eq!(&*x, &*expected); - - bigint::shl(&mut x, 32); - let expected: VecType = vec_from_u32(&[0, 0x44208100, 0x1A]); - assert_eq!(&*x, &*expected); - - bigint::shl(&mut x, 27); - let expected: VecType = vec_from_u32(&[0, 0, 0xD2210408]); - assert_eq!(&*x, &*expected); - - // 96-bits of previous pattern - let mut x: VecType = vec_from_u32(&[0x20020010, 0x8040100, 0xD2210408]); - bigint::shl(&mut x, 5); - let expected: VecType = vec_from_u32(&[0x400200, 0x802004, 0x44208101, 0x1A]); - assert_eq!(&*x, &*expected); - - bigint::shl(&mut x, 32); - let expected: VecType = vec_from_u32(&[0, 0x400200, 0x802004, 0x44208101, 0x1A]); - assert_eq!(&*x, &*expected); - - bigint::shl(&mut x, 27); - let expected: VecType = vec_from_u32(&[0, 0, 0x20020010, 0x8040100, 0xD2210408]); - assert_eq!(&*x, &*expected); -} diff --git a/vendor/nom/.cargo-checksum.json b/vendor/nom/.cargo-checksum.json deleted file mode 100644 index 89ee5e722e5fd9..00000000000000 --- a/vendor/nom/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".cargo_vcs_info.json":"375660cfef21494aaa1a90768e0054f2c9e6293e2085a28ea3ad0b70efd5bad6","CHANGELOG.md":"fd79f8ab367d50fd57136744fa2e0a198c279c9707c2a45f00ad1b62d0a637ab","Cargo.lock":"3b95f09a31e1fc281ef890f9623ac84f0a58bc3454a96b652d2f0682963f29b5","Cargo.toml":"9d07fd5cc339d8f7c0c164b2568164b808dee1d441b17631cb7b7d44d39ed778","Cargo.toml.orig":"c9cb8a6ee98d4c60dbdd5f9789aed2b8296a74be3f3d5eac265e032008ad34db","LICENSE":"4dbda04344456f09a7a588140455413a9ac59b6b26a1ef7cdf9c800c012d87f0","README.md":"3cbdcb07f9ef189ad617f40423a17f1e48ee5aba3f118f261886c139d64d26ae","doc/nom_recipes.md":"a903a6d8f9e5c935f2a4cbd632f67bc46e41934d1cc8517da9b9e7f3840c9955","src/bits/complete.rs":"640bdcad311a05d94e4b3a1a8b2105c540f80864edb75de0a570f03d4055e5ed","src/bits/mod.rs":"1c6aa26887d379384c1c823684e6f8e91b59df72664eefd8ddf0b6ca5517e669","src/bits/streaming.rs":"304cc5d565cfa30075827c1d6cb173b7cb92b101d8ebe9bc50476a00c92fd5dc","src/branch/mod.rs":"dbe1ed1bb0230310adf8e8d15e6afcf8826c7a111f8429e13fe3e9ebd3fbeae0","src/branch/tests.rs":"9a4a7b0c38fc28881d904f8ad6757a23e543f47a4c2d6fd6a9589eeb97209088","src/bytes/complete.rs":"666fa037c63292b1616cbc04c5b414a53c705d0d2ccd8d84399bbe78f573b7e9","src/bytes/mod.rs":"055e264f71a9fa2245be652cc747cfb2c3e34c3c2ba3b75e9611be51fcebea0b","src/bytes/streaming.rs":"e716e6555fbde14bfc2d7358a3edc2191df0522bc55b1f7735f9809ceb235829","src/bytes/tests.rs":"f0d9eb90d72873346e47e5592d30093eb38cbbb5fbf2e769cda776ccfff4f887","src/character/complete.rs":"7eeb5f00baab7aeaf7f392e5872d141d352951a146c0396721dab71e29b4c87b","src/character/mod.rs":"2fc6a3b19b766a3c37328d62eedbc0c9cb9612aa1d38ececd5cc579b61725fa2","src/character/streaming.rs":"de67ec5663475bc5ffa16f12d121ce9181353b16656b90216704197fca3010fc","src/character/tests.rs":"38958a709f96f077f93a72b32d8ded0a2ad6e488d9aadbe3cf1cfd8adaec06c8","src/combinator/mod.rs":"f7b9c35734f10a4b46d2e2ae874628d48fa1fe0bfc9f44325a89a14b3cfaea02","src/combinator/tests.rs":"1e56e2c1263d93bfbd244d24160a0bea41731e5158d57382e69c215427770b94","src/error.rs":"9d9bf87e76b47cfd9170f8ae50b6deeb02ff1c296aac3eb4f71ee1474dc0fba5","src/internal.rs":"5e670e0f5955af13da4b9a98d444fc721d38835b4637fe8170871fefef4e04cb","src/lib.rs":"9e05f2447ef1e5e9418953300c97d297f26f8f33c0528733a7681d8cb458346e","src/macros.rs":"11ac3dee346e5bf61015f328975cf6ea6f438683eb3b8b6c7c8e6383b0c5b7af","src/multi/mod.rs":"6093bd5909ddae76309442eba3a33716d279d219e5933a0dedef977312d6c5f8","src/multi/tests.rs":"806f89f5f347978c22e9b8cc7f8a49ad1d1fe23bff5974725b643a2ceffe8cb0","src/number/complete.rs":"a4f312c200710a446986142d19ebc78727265cf2c3b07b70dd84339051040bdd","src/number/mod.rs":"ba6eb439ee0befcc0872be7ce43b4836622af45c3dc2fc003b0d909ee42d7b20","src/number/streaming.rs":"1c2137235f093857e545069d687f50712ea457fac03961f3f6ac15c0f7e40c2a","src/sequence/mod.rs":"2dff114a950965e321cafdb215a441990f35d28756b12b2891179f348268fca2","src/sequence/tests.rs":"8dc4ca519b274d4e0694b373b2710c2e947e6074d43ec6a738a74113a09379f5","src/str.rs":"f26aa11f43e8a4a300ea0f310d599fab3f809102cfb29033ddf84f299ee8010c","src/traits.rs":"01934f8a61fc3cc5a03438a10751d3b074c89e5f3bcc96df8e43cf6b09be2308","tests/arithmetic.rs":"725efba4fc6cc811f542f3bcc8c7afd52702a66f64319d4f2796225e2e75d0ca","tests/arithmetic_ast.rs":"c7c28c988640405dd250c86045bbda75fc6ead2a769fb05eafbfbe74d97e0485","tests/css.rs":"36a2198e42e601efc611ebd6b3c6861f3ccb6a63525829ae6a2603bcdc4c2b11","tests/custom_errors.rs":"354d5a82a4f5a24b97901a3b411b4eab038c4d034047971956b9cdc12538e50d","tests/escaped.rs":"c25987ea6d9a7dde74d58a49c332d223da3a495569cb79e3fe921bce51729ead","tests/float.rs":"cdac92fb14afb75cba9d6b8f568e272a630b2cfb9f096b76c91909a3cd016869","tests/fnmut.rs":"dc9b6140eb3405d1497b05675fc4d3050785771a2afa81990d684b2edd0c9746","tests/ini.rs":"f0ce38b90057e9e0fd2329819395c420cbf1400457f9c4279414301faa38b19c","tests/ini_str.rs":"4c8f6ce3a2a245e8365837b873c25d2d8f24887313b791e2edd09a76a2d98947","tests/issues.rs":"1322ffc270ba1bedf39b295eb622ead2715ab7d60db0181af5305a0429c7819e","tests/json.rs":"8672fca70b889d6243a2f0f4c99389e22200e4363f253e83a3f26620b92f765e","tests/mp4.rs":"db6568ee9ccad70a7b567295831b961b369011f66dc2dd406851208007588600","tests/multiline.rs":"aef9768beaf5042b8629599b2094712646abb23eb11fa662b5a9bf3dfa432547","tests/overflow.rs":"a249ebeebfc5228faf9bfd5241a54a8181df476c4699ef87bb7d8a2161b9fc72","tests/reborrow_fold.rs":"66230bacd8d36e1559f1dc919ae8eab3515963c4aef85a079ec56218c9a6e676"},"package":"d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"} \ No newline at end of file diff --git a/vendor/nom/.cargo_vcs_info.json b/vendor/nom/.cargo_vcs_info.json deleted file mode 100644 index c356c323b4ecf5..00000000000000 --- a/vendor/nom/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "869f8972a4383b13cf89574fda28cb7dbfd56517" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/vendor/nom/CHANGELOG.md b/vendor/nom/CHANGELOG.md deleted file mode 100644 index a8f4c02c27dac7..00000000000000 --- a/vendor/nom/CHANGELOG.md +++ /dev/null @@ -1,1551 +0,0 @@ -# Change Log - -## [Unreleased][unreleased] - -### Thanks - -### Changed - -## 7.1.3 - 2023-01-15 - -### Thanks - -- @Shadow53 - -### Fixed - -- panic in `many` and `count` combinators when the output type is zero sized - -## 7.1.2 - 2023-01-01 - -### Thanks - -- @joubs -- @Fyko -- @LoganDark -- @darnuria -- @jkugelman -- @barower -- @puzzlewolf -- @epage -- @cky -- @wolthom -- @w1ll-i-code - -### Changed - -- documentation fixes -- tests fixes -- limit the initial capacity of the result vector of `many_m_n` to 64kiB -- bits parser now accept `Parser` implementors instead of only functions - -### Added - -- implement `Tuple` parsing for the unit type as a special case -- implement `ErrorConvert` on the unit type to make it usable as error type for bits parsers -- bool parser for bits input - -## 7.1.1 - 2022-03-14 - -### Thanks - -- @ThomasdenH -- @@SphinxKnight -- @irevoire -- @doehyunbaek -- @pxeger -- @punkeel -- @max-sixty -- @Xiretza -- @5c077m4n -- @erihsu -- @TheNeikos -- @LoganDark -- @nickelc -- @chotchki -- @ctrlcctrlv - - -### Changed - -- documentation fixes -- more examples - -## 7.1.0 - 2021-11-04 - -### Thanks - -- @nickelc -- @Stargateur -- @NilsIrl -- @clonejo -- @Strytyp -- @schubart -- @jihchi -- @nipunn1313 -- @Gungy2 -- @Drumato -- @Alexhuszagh -- @Aehmlo -- @homersimpsons -- @dne -- @epage -- @saiintbrisson -- @pymongo - -### Changed - -- documentation fixes -- Ci fixes -- the move to minimal-lexical for float parsing introduced bugs that cannot be resolved right now, so this version moves back to using the standard lib' parser. *This is a performance regression**. If you have specific requirements around float parsing, you are strongly encouraged to use [recognize_float](https://docs.rs/nom/latest/nom/number/complete/fn.recognize_float.html) and another library to convert to a f32 or f64 - -### Added - -- alt now works with 1 elment tuples - -## 7.0.0 - 2021-08-21 - -This release fixes dependency compilation issues and strengthen the minimum supported Rust version (MSRV) policy. This is also the first release without the macros that were used since nom's beginning. - -### Thanks - -- @djc -- @homersimpsons -- @lo48576 -- @myrrlyn -- @RalXYZ -- @nickelc -- @cenodis - -### Added - -- `take_until1` combinator -- more `to_owned` implementations -- `fail`: a parser that always fail, useful as default condition in other combinators -- text to number parsers: in the `character::streaming` and `character::complete` modules, there are parsers named `i8, u16, u32, u64, u128` and `u8 ,u16, u32, u64, u128` that recognize decimal digits and directly convert to a number in the target size (checking for max int size) - -### Removed - -- now that function combinators are the main way to write parsers, the old macro combinators are confusing newcomers. THey have been removed -- the `BitSlice` input type from bitvec has been moved into the [nom-bitvec](https://crates.io/crates/nom-bitvec) crate. nom does not depend on bitvec now -- regex parsers have been moved into the [nom-regex](https://crates.io/crates/nom-regex) crate. nom does not depend on regex now -- `ErrorKind::PArseTo` was not needed anymore - -### Changed - -- relax trait bounds -- some performance fixes -- `split_at_position*` functions should now be guaranteed panic free -- the `lexical-core` crate used for float parsing has now been replaced with `minimal-lexical`: the new crate is faster to compile, faster to parse, and has no dependencies - -### Fixed - -- infinite loop in `escaped` combinator -- `many_m_n` now fails if min > max - - -## 6.2.1 - 2021-06-23 - -### Thanks - -This release was done thanks to the hard work of (by order of appearance in the commit list): - -- @homersimpsons - -### Fixed - -- fix documentation building - -## 6.2.0 - 2021-02-15 - -### Thanks - -This release was done thanks to the hard work of (by order of appearance in the commit list): - -- @DavidKorczynski -- @homersimpsons -- @kornelski -- @lf- -- @lewisbelcher -- @ronan-d -- @weirane -- @heymind -- @marcianx -- @Nukesor - -### Added - -- nom is now regularly fuzzed through the OSSFuzz project - -### Changed - -- lots of documentation fixes -- relax trait bounds -- workarounds for dependency issues with bitvec and memchr - -## 6.1.2 - 2021-02-15 - -### Changed - -- Fix cargo feature usage in previous release - -## 6.1.1 - 2021-02-15 - -### Thanks - -This release was done thanks to the hard work of (by order of appearance in the commit list): - -- @nickelc - -### Changed - -- Fix dependenciy incompatibilities: Restrict the bitvec->funty dependency to <=1.1 - -## 6.1.0 - 2021-01-23 - -### Thanks - -This release was done thanks to the hard work of (by order of appearance in the commit list): - -- @sachaarbonel -- @vallentin -- @Lucretiel -- @meiomorphism -- @jufajardini -- @neithernut -- @drwilco - -### Changed - -- readme and documentation fixes -- rewrite of fold_many_m_n -- relax trait bounds on some parsers -- implement `std::error::Error` on `VerboseError` - - -## 6.0.1 - 2020-11-24 - -### Thanks - -This release was done thanks to the hard work of (by order of appearance in the commit list): - -- @Leonqn -- @nickelc -- @toshokan -- @juchiast -- @shssoichiro -- @jlkiri -- @chifflier -- @fkloiber -- @Kaoet -- @Matthew Plant - -### Added - -- `ErrorConvert` implementation for `VerboseError` - -### Changed - -- CI fixes -- `fold_many*` now accept `FnMut` for the accumulation function -- relaxed input bounds on `length_count` - -# Fixed - -- documentation fixes -- the `#[deprecated]` attribute was removed from traits because it does not compile anymore on nightly -- bits and bytes combinators from the bits modules are now converted to use `FnMut` - -## 6.0.0 - 2020-10-31 - -### Thanks - -This release was done thanks to the hard work of (by order of appearance in the commit list): -- @chifflier -- @shepmaster -- @amerelo -- @razican -- @Palladinium -- @0ndorio -- Sebastian Zivota -- @keruspe -- @devonhollowood -- @parasyte -- @nnt0 -- @AntoineCezar -- @GuillaumeGomez -- @eijebong -- @stadelmanma -- @sphynx -- @snawaz -- @fosskers -- @JamesHarrison -- @calebsander -- @jthornber -- @ahmedcharles -- @rljacobson -- @benkay86 -- @georgeclaghorn -- @TianyiShi2001 -- @shnewto -- @alfriadox -- @resistor -- @myrrlyn -- @chipsenkbeil -- @ruza-net -- @fanf2 -- @jameysharp -- @FallenWarrior2k -- @jmg-duarte -- @ericseppanen -- @hbina -- Andreas Molzer -- @nickelc -- @bgourlie - -## Notable changes - -This release is a more polished version of nom 5, that came with a focus on -function parsers, by relaxing the requirements: combinators will return a -`impl FnMut` instead of `impl Fn`, allowing closures that change their context, -and parsers can be any type now, as long as they implement the new `Parser` trait. -That parser trait also comes with a few helper methods. - -Error management was often a pain point, so a lot of work went into making it easier. -Now it integrates with `std:error::Error`, the `IResult::finish()` method allows you -to convert to a more usable type, the `into` combinator can convert the error type -if there's a `From` implementation, and there are more specific error traits like -`ContextError` for the `context` combinator, and `FromExternalError` for `map_res`. -While the `VerboseError` type and its `convert_error` function saw some changes, -not many features ill be added to it, instead you are encouraged to build the error -type that corresponds to your needs if you are building a language parser. - -This version also integrates with the excellent [bitvec](https://crates.io/crates/bitvec) -crate for better bit level parsing. This part of nom was not great and a bit of a hack, -so this will give better options for those parsers. - -At last, documentation! There are now more code examples, functions and macros that require -specific cargo features are now clearly indicated, and there's a new `recipes` module -containing example patterns. - -### Breaking changes - -- the minimal Rust version is now 1.44 (1.37 if building without the `alloc` or `std` features) -- streaming parsers return the number of additional bytes they need, not the total. This was supposed to be the case everywhere, but some parsers were forgotten -- removed the `regexp_macros` cargo feature -- the `context` combinator is not linked to `ParseError` anymore, instead it come with its own `ContextError` trait -- `Needed::Size` now contains a `NonZeroUsize`, so we can reduce the structure's size by 8 bytes. When upgrading, `Needed::Size(number)` can be replaced with `Needed::new(number)` -- there is now a more general `Parser` trait, so parsers can be something else than a function. This trait also comes with combinator methods like `map`, `flat_map`, `or`. Since it is implemented on `Fn*` traits, it should not affect existing code too much -- combinators that returned a `impl Fn` now return a `impl FnMut` to allow parser closures that capture some mutable value from the context -- `separated_list` is now `separated_list0` -- removed the deprecated `methods` module -- removed the deprecated `whitespace` module -- the default error type is now a struct (`nom::error::Error`) instead of a tuple -- the `FromExternalError` allows wrapping the error returned by the function in the `map_res` combinator -- renamed the `dbg!` macro to avoid conflicts with `std::dbg!` -- `separated_list` now allows empty elements - - -### Added - -- function version of regex parsers -- `fill`: attempts to fill the output slice passed as argument -- `success`: returns a value without consuming the input -- `satisfy`: checks a predicate over the next character -- `eof` function combinator -- `consumed`: returns the produced value and the consumed input -- `length_count` function combinator -- `into`: converts a parser's output and error values if `From` implementations are available -- `IResult::finish()`: converts a parser's result to `Result<(I, O), E>` by removing the distinction between `Error` and `Failure` and panicking on `Incomplete` -- non macro versions of `u16`, `i32`, etc, with configurable endianness -- `is_newline` function -- `std::error::Error` implementation for nom's error types -- recipes section of the documentation, outlining common patterns in nom -- custom errors example -- bitstream parsing with the `BitSlice` type from the bitvec crate -- native endianness parsers -- github actions for CI - -### Changed - -- allows lexical-core 0.7 -- number parsers are now generic over the input type -- stabilized the `alloc` feature -- `convert_error` accepts a type that derefs to `&str` -- the JSON example now follows the spec better - -### Fixed -- use `fold_many0c` in the `fold_many0` macro - -## 5.1.1 - 2020-02-24 - -### Thanks - -- @Alexhuszagh for float fixes -- @AlexanderEkdahl, @JoshOrndorff, @akitsu-sanae for docs fixes -- @ignatenkobrain: dependency update -- @derekdreery: `map` implementation for errors -- @Lucretiel for docs fixes and compilation fixes -- adytzu2007: warning fixes -- @lo48576: error management fixes - -### Fixed - -- C symbols compilation errors due to old lexical-core version - -### Added - -- `Err` now has a `map` function - -### Changed - -- Make `error::context()` available without `alloc` feature - -## 5.1.0 - 2020-01-07 - -### Thanks - -- @Hywan, @nickmooney, @jplatte, @ngortheone, @ejmg, @SirWindfield, @demurgos, @spazm, @nyarly, @guedou, @adamnemecek, for docs fixes -- @Alxandr for error management bugfixes -- @Lucretiel for example fixes and optimizations -- @adytzu2007 for optimizations -- @audunhalland for utf8 fixes - -### Fixed - -- panic in `convert_error` -- `compile_error` macro usage - -### Added - -- `std::error::Error`, `std::fmt::Display`, `Eq`, `ToOwned` implementations for errors -- inline attribute for `ToUsize` - -### Changed - -- `convert_error` optimization -- `alt` optimization - -## 5.0.1 - 2019-08-22 - -### Thanks - -- @waywardmonkeys, @phaazon, @dalance for docs fixes -- @kali for `many0_m_n` fixes -- @ia0 for macros fixes - -### Fixed - -- `many0_m_n` now supports the n=1 case -- relaxed trait requirements in `cut` -- `peek!` macro reimplementation -- type inference in `value!` - -## 5.0.0 - 2019-06-24 - -This version comes with a complete rewrite of nom internals to use functions as a base -for parsers, instead of macros. Macros have been updated to use functions under -the hood, so that most existing parsers will work directly or require minimal changes. - -The `CompleteByteSlice` and `CompleteStr` input types were removed. To get different -behaviour related to streaming or complete input, there are different versions of some -parsers in different submodules, like `nom::character::streaming::alpha0` and -`nom::character::complete::alpha0`. - -The `verbose-errors` feature is gone, now the error type is decided through a generic -bound. To get equivalent behaviour to `verbose-errors`, check out `nom::error::VerboseError` - -### Thanks - -- @lowenheim helped in refactoring and error management -- @Keruspe helped in refactoring and fixing tests -- @pingiun, @Songbird0, @jeremystucki, @BeatButton, @NamsooCho, @Waelwindows, @rbtcollins, @MarkMcCaskey for a lot of help in rewriting the documentation and adding code examples -- @GuillaumeGomez for documentation rewriting and checking -- @iosmanthus for bug fixes -- @lo48576 for error management fixes -- @vaffeine for macros visibility fixes -- @webholik and @Havvy for `escaped` and `escaped_transform` fixes -- @proman21 for help on porting bits parsers - -### Added - -- the `VerboseError` type accumulates position info and error codes, and can generate a trace with span information -- the `lexical-core` crate is now used by default (through the `lexical` compilation feature) to parse floats from text -- documentation and code examples for all functions and macros - -### Changed - -- nom now uses functions instead of macros to generate parsers -- macros now use the functions under the hood -- the minimal Rust version is now 1.31 -- the verify combinator's condition function now takes its argument by reference -- `cond` will now return the error of the parser instead of None -- `alpha*`, `digit*`, `hex_digit*`, `alphanumeric*` now recognize only ASCII characters - -### Removed - -- deprecated string parsers (with the `_s` suffix), the normal version can be used instead -- `verbose-errors` is not needed anymore, now the error type can be decided when writing the parsers, and parsers provided by nom are generic over the error type -- `AtEof`, `CompleteByteSlice` and `CompleteStr` are gone, instead some parsers are specialized to work on streaming or complete input, and provided in different modules -- character parsers that were aliases to their `*1` version: eol, alpha, digit, hex_digit, oct_digit, alphanumeric, space, multispace -- `count_fixed` macro -- `whitespace::sp` can be replaced by `character::complete::multispace0` -- method combinators are now in the nom-methods crate -- `take_until_either`, `take_until_either1`, `take_until_either_and_consume` and `take_until_either_and_consume1`: they can be replaced with `is_not` (possibly combined with something else) -- `take_until_and_consume`, `take_until_and_consume1`: they can be replaced with `take_until` combined with `take` -- `sized_buffer` and `length_bytes!`: they can be replaced with the `length_data` function -- `non_empty`, `begin` and `rest_s` function -- `cond_reduce!`, `cond_with_error!`, `closure!`, `apply`, `map_res_err!`, `expr_opt!`, `expr_res!` -- `alt_complete`, `separated_list_complete`, `separated_nonempty_list_complete` - -## 4.2.3 - 2019-03-23 - -### Fixed - -- add missing `build.rs` file to the package -- fix code comparison links in changelog - -## 4.2.2 - 2019-03-04 - -### Fixed - -- regression in do_parse macro import for edition 2018 - -## 4.2.1 - 2019-02-27 - -### Fixed - -- macro expansion error in `do_parse` due to `compile_error` macro usage - -## 4.2.0 - 2019-01-29 - -### Thanks - -- @JoshMcguigan for unit test fixes -- @oza for documentation fixes -- @wackywendell for better error conversion -- @Zebradil for documentation fixes -- @tsraom for new combinators -- @hcpl for minimum Rust version tests -- @KellerFuchs for removing some unsafe uses in float parsing - -### Changed - -- macro import in edition 2018 code should work without importing internal macros now -- the regex parsers do not require the calling code to have imported the regex crate anymore -- error conversions are more ergonomic -- method combinators are now deprecated. They might be moved to a separate crate -- nom now specifies Rust 1.24.1 as minimum version. This was already the case before, now it is made explicit - -### Added - -- `many0_count` and `many1_count` to count applications of a parser instead of -accumulating its results in a `Vec` - -### Fixed - -- overflow in the byte wrapper for bit level parsers -- `f64` parsing does not use `transmute` anymore - -## 4.1.1 - 2018-10-14 - -### Fixed - -- compilation issue in verbose-errors mode for `add_return_error` - -## 4.1.0 - 2018-10-06 - -### Thanks - -- @xfix for fixing warnings, simplifying examples and performance fixes -- @dvberkel for documentation fixes -- @chifflier for fixing warnings -- @myrrlyn for dead code elimination -- @petrochenkov for removing redundant test macros -- @tbelaire for documentation fixes -- @khernyo for fixing warnings -- @linkmauve for documentation fixes -- @ProgVal for documentation fixes, warning fixes and error management -- @Nemo157 for compilation fixes -- @RReverser for documentation fixes -- @xpayn for fixing warnings -- Blas Rodriguez Irizar for documentation fixes -- @badboy for documentation fixes -- @kyrias for compilation fixes -- @kurnevsky for the `rest_len` parser -- @hjr3 for new documentation examples -- @fengalin for error management -- @ithinuel for the pcap example project -- @phaazon for documentation fixes -- @juchiast for documentation fixes -- @jrakow for the `u128` and `i128` parsers -- @smarnach for documentation fixes -- @derekdreery for `pub(crate)` support -- @YaLTeR for `map_res_err!` - -### Added - -- `rest_len` parser, returns the length of the remaining input -- `parse_to` has its own error code now -- `u128` and `i128` parsers in big and little endian modes -- support for `pub(crate)` syntax -- `map_res_err!` combinator that appends the error of its argument function in verbose errors mode - -### Fixed - -- lots of unused imports warnings were removed -- the `bytes` combinator was not compiling in some cases -- the big and little endian combinators now work without external imports -- CI is now faster and uses less cache -- in `add_return_error`, the provided error code is now evaluated only once - -### Changed - -- `fold_many1` will now transmit a `Failure` instead of transforming it to an `Error` -- `float` and `double` now work on all of nom's input types (`&[u8]`, `&str`, `CompleteByteSlice`, `CompleteStr` and any type that implements the required traits). `float_s` and `double_s` got the same modification, but are now deprecated -- `CompleteByteSlice` and `CompleteStr` get a small optimization by inlining some functions - - -## 4.0.0 - 2018-05-14 - -### Thanks - -- @jsgf for the new `AtEof` trait -- @tmccombs for fixes on `escaped*` combinators -- @s3bk for fixes around non Copy input types and documentation help -- @kamarkiewicz for fixes to no_std and CI -- @bheisler for documentation and examples -- @target-san for simplifying the `InputIter` trait for `&[u8]` -- @willmurphyscode for documentation and examples -- @Chaitanya1416 for typo fixes -- @fflorent for `input_len()` usage fixes -- @dbrgn for typo fixes -- @iBelieve for no_std fixes -- @kpp for warning fixes and clippy fixes -- @keruspe for fixes on FindToken -- @dtrebbien for fixes on take_until_and_consume1 -- @Henning-K for typo fixes -- @vthriller for documentation fixes -- @federicomenaquintero and @veprbl for their help fixing the float parsers -- @vmchale for new named_args versions -- @hywan for documentation fixes -- @fbenkstein for typo fixes -- @CAD97 for catching missing trait implementations -- @goldenlentils for &str optimizations -- @passy for typo fixes -- @ayrat555 for typo fixes -- @GuillaumeGomez for documentation fixes -- @jrakow for documentation fixes and fixes for `switch!` -- @phlosioneer for documentation fixes -- @creativcoder for typo fixes -- @derekdreery for typo fixes -- @lucasem for implementing `Deref` on `CompleteStr` and `CompleteByteSlice` -- @lowenheim for `parse_to!` fixes -- @myrrlyn for trait fixes around `CompleteStr` and `CompleteByteSlice` -- @NotBad4U for fixing code coverage analysis -- @murarth for code formatting -- @glandium for fixing build in no_std -- @csharad for regex compatibility with `CompleteStr` -- @FauxFaux for implementing `AsRef` on `CompleteStr` -- @jaje for implementing `std::Error` on `nom:Err` -- @fengalin for warning fixes -- @@khernyo for doc formatting - -Special thanks to @corkami for the logo :) - -### Breaking changes - -- the `IResult` type now becomes a `Result` from the standard library -- `Incomplete` now returns the additional data size needed, not the total data size needed -- verbose-errors is now a superset of basic errors -- all the errors now include the related input slice -- the arguments from `error_position` and other such macros were swapped to be more consistent with the rest of nom -- automatic error conversion: to fix error type inference issues, a custom error type must now implement `std::convert::From` -- the `not!` combinator returns unit `()` -- FindToken's calling convention was swapped -- the `take_*` combinators are now more coherent and stricter, see commit 484f6724ea3ccb for more information -- `many0` and other related parsers will now return `Incomplete` if the reach the end of input without an error of the child parser. They will also return `Incomplete` on an empty input -- the `sep!` combinator for whitespace only consumes whitespace in the prefix, while the `ws!` combinator takes care of consuming the remaining whitespace - -### Added - -- the `AtEof` trait for input type: indicate if we can get more input data later (related to streaming parsers and `Incomplete` handling) -- the `escaped*` parsers now support the `&str`input type -- the `Failure` error variant represents an unrecoverable error, for which `alt` and other combinators will not try other branches. This error means we got in the right part of the code (like, a prefix was checked correctly), but there was an error in the following parts -- the `CompleteByteSlice` and `CompleteStr` input types consider there will be no more refill of the input. They fixed the `Incomplete` related issues when we have all of the data -- the `exact!()` combinator will fail if we did not consume the whole input -- the `take_while_m_n!` combinator will match a specified number of characters -- `ErrorKind::TakeUntilAndConsume1` -- the `recognize_float` parser will match a float number's characters, but will not transform to a `f32` or `f64` -- `alpha` and other basic parsers are now much stricter about partial inputs. We also introduce the `*0` and `*1` versions of those parsers -- `named_args` can now specify the input type as well -- `HexDisplay` is now implemented for `&str` -- `alloc` feature -- the `InputTakeAtposition` trait allows specialized implementations of parsers like `take_while!` - -### Removed - -- the producers and consumers were removed -- the `error_code` and `error_node` macros are not used anymore - -### Fixed - -- `anychar!` now works correctly with multibyte characters -- `take_until_and_consume1!` no longer results in "no method named \`find_substring\`" and "no method named \`slice\`" compilation errors -- `take_until_and_consume1!` returns the correct Incomplete(Needed) amount -- `no_std` compiles properly, and nom can work with `alloc` too -- `parse_to!` now consumes its input - -### Changed - -- `alt` and other combinators will now clone the input if necessary. If the input is already `Copy` there is no performance impact -- the `rest` parser now works on various input types -- `InputIter::Item` for `&[u8]` is now a `u8` directly, not a reference -- we now use the `compile_error` macro to return a compile time error if there was a syntax issue -- the permutation combinator now supports optional child parsers -- the float numbers parsers have been refactored to use one common implementation that is nearly 2 times faster than the previous one -- the float number parsers now accept more variants - - -## 3.2.1 - 2017-10-27 - -### Thanks - -- @ordian for `alt_complete` fixes -- @friedm for documentation fixes -- @kali for improving error management - -### Fixed - -- there were cases where `alt_complete` could return `Incomplete` - -### Added - -- an `into_error_kind` method can be used to transform any error to a common value. This helps when the library is included multiple times as dependency with different feature sets - - -## 3.2.0 - 2017-07-24 - -### Thanks - -- @jedireza for documentation fixes -- @gmorenz for the `bytes` combinator -- @meh for character combinator fixes for UTF-8 -- @jethrogb for avoiding move issues in `separated_list` - -### Changed - -- new layout for the main page of documentation -- `anychar` can now work on any input type -- `length_bytes` is now an alias for `length_data` - -### Fixed - -- `one_of`, `none_of` and `char` will now index correctly UTF-8 characters -- the `compiler_error` macro is now correctly exported - - -### Added - -- the `bytes` combinator transforms a bit stream back to a byte slice for child parsers - -## 3.1.0 - 2017-06-16 - -### Thanks - -- @sdroege: implementing be_i24 and le_i24 -- @Hywan: integrating faster substring search using memchr -- @nizox: fixing type issues in bit stream parsing -- @grissiom: documentation fixes -- @doomrobo: implementing separated_list_complete and separated_nonempty_list_complete -- @CWood1: fixing memchr integration in no_std -- @lu_zero: integrating the compiler_error crate -- @dtolnay: helping debug a type inference issue in map - -### Changed - -- memchr is used for substring search if possible -- if building on nightly, some common syntax errors will display a specific error message. If building no stable, display the documentation to activate those messages -- `count` no longer preallocates its vector - -### Fixed - -- better type inference in alt_complete -- `alt` should now work with whitespace parsing -- `map` should not make type inference errors anymore - -### Added - -- be_i24 and le_i24, parsing big endian and little endian signed 24 bit integers -- `separated_list_complete` and `separated_nonempty_list_complete` will treat incomplete from sub parsers as error - -## 3.0.0 - 2017-05-12 - -### Thanks - -- Chris Pick for some `Incomplete` related refactors -- @dbrgn for documentation fixes -- @valarauca for adding `be_u24` -- @ithinuel for usability fixes -- @evuez for README readability fixes and improvements to `IResult` -- @s3bk for allowing non-`Copy` types as input -- @keruspe for documentation fixes -- @0xd34d10cc for trait fixes on `InputIter` -- @sdleffler for lifetime shenanigans on `named_args` -- @chengsun for type inference fixes in `alt` -- @iBelieve for adding str to no_std -- @Hywan for simplifying code in input traits -- @azerupi for extensive documentation of `alt` and `alt_complete` - -### Breaking Changes - -- `escaped`, `separated_list` and `separated_nonempty_list` can now return `Incomplete` when necessary -- `InputIter` does not require `AsChar` on its `Item` type anymore -- the `core` feature that was putting nom in `no_std` mode has been removed. There is now a `std` feature, activated by default. If it is not activated, nom is in `no_std` -- in `verbose-errors` mode, the error list is now stored in a `Vec` instead of a box based linked list -- `chain!` has finally been removed - -### Changed - -- `Endianness` now implements `Debug`, `PartialEq`, `Eq`, `Clone` and `Copy` -- custom input types can now be cloned if they're not `Copy` -- the infamous 'Cannot infer type for E' error should happen less often now -- `str` is now available in `no_std` mode - -### Fixed - -- `FileProducer` will be marked as `Eof` on full buffer -- `named_args!` now has lifetimes that cannot conflict with the lifetimes from other arguments - -### Added - -- `be_u24`: big endian 24 bit unsigned integer parsing -- `IResult` now has a `unwrap_or` method - - -## 2.2.1 - 2017-04-03 - -### Thanks - -- @Victor-Savu for formatting fixes in the README -- @chifflier for detecting and fixing integer overflows -- @utkarshkukreti for some performance improvements in benchmarks - -### Changed - -- when calculating how much data is needed in `IResult::Incomplete`, the addition could overflow (it is stored as a usize). This would apparently not result in any security vulnerability on release code - -## 2.2.0 - 2017-03-20 - -### Thanks - -- @seppo0010 for fixing `named_args` -- @keruspe for implementing or() on `IResult`, adding the option of default cases in `switch!`, adding support for `cargo-travis` -- @timlyo for documentation fixes -- @JayKickliter for extending `hex_u32` -- @1011X for fixing regex integration -- @Kerollmops for actually marking `chain!` as deprecated -- @joliss for documentation fixes -- @utkarshkukreti for tests refactoring and performance improvement -- @tmccombs for documentation fixes - -### Added - -- `IResult` gets an `or()` method -- `take_until1`, `take_until_and_consume1`, `take_till1!` and `take_till1_s!` require at least 1 character - -### Changed - -- `hex_u32` accepts uppercase digits as well -- the character based combinators leverage the input traits -- the whitespace parsers now work on &str and other types -- `take_while1` returns `Incomplete` on empty input -- `switch!` can now take a default case - -### Fixed - -- `named_args!` now imports `IResult` directly -- the upgrade to regex 0.2 broke the regex combinators, they work now - -## 2.1.0 - 2017-01-27 - -### Thanks - -- @nickbabcock for documentation fixes -- @derekdreery for documentation fixes -- @DirkyJerky for documentation fixes -- @saschagrunert for documentation fixes -- @lucab for documentation fixes -- @hyone for documentation fixes -- @tstorch for factoring `Slice` -- @shepmaster for adding crate categories -- @antoyo for adding `named_args!` - -### Added - -- `verify!` uses a first parser, then applies a function to check that its result satisfies some conditions -- `named_args!` creates a parser function that can accept other arguments along with the input -- `parse_to!` will use the `parse` method from `FromStr` to parse a value. It will automatically translate the input to a string if necessary -- `float`, `float_s`, `double`, `double_s` can recognize floating point numbers in text - -### Changed - -- `escaped!` will now return `Incomplete` if needed -- `permutation!` supports up to 20 child parsers - -## 2.0.1 - 2016-12-10 - -Bugfix release - -*Warning*: there is a small breaking change, `add_error!` is renamed to `add_return_error!`. This was planned for the 2.0 release but was forgotten. This is a small change in a feature that not many people use, for a release that is not yet widely in use, so there will be no 3.0 release for that change. - -### Thanks - -- @nickbabcock for catching and fixing the `add_error!` mixup -- @lucab for documentation fixes -- @jtdowney for noticing that `tag_no_case!` was not working at all for byte slices - -### Fixed - -- `add_error!` has been renamed to `add_return_error!` -- the `not!` combinator now accepts functions -- `tag_no_case!` is now working as accepted (before, it accepted everything) - - -## 2.0 - 2016-11-25 - -The 2.0 release is one of the biggest yet. It was a good opportunity to clean up some badly named combinators and fix invalid behaviours. - -Since this version introduces a few breaking changes, an [upgrade documentation](https://github.com/Geal/nom/blob/main/doc/upgrading_to_nom_2.md) is available, detailing the steps to fix the most common migration issues. After testing on a set of 30 crates, most of them will build directly, a large part will just need to activate the "verbose-errors" compilation feature. The remaining fixes are documented. - -This version also adds a lot of interesting features, like the permutation combinator or whitespace separated formats support. - -### Thanks - -- @lu-zero for license help -- @adamgreig for type inference fixes -- @keruspe for documentation and example fixes, for the `IResult => Result` conversion work, making `AsChar`'s method more consistent, and adding `many_till!` -- @jdeeny for implementing `Offset` on `&str` -- @vickenty for documentation fixes and his refactoring of `length_value!` and `length_bytes!` -- @overdrivenpotato for refactoring some combinators -- @taralx for documentation fixes -- @keeperofdakeys for fixing eol behaviour, writing documentation and adding `named_attr!` -- @jturner314 for writing documentation -- @bozaro for fixing compilation errors -- @uniphil for adding a `crates.io` badge -- @badboy for documentation fixes -- @jugglerchris for fixing `take_s!` -- @AndyShiue for implementing `Error` and `Display` on `ErrorKind` and detecting incorrect UTF-8 string indexing - -### Added - -- the "simple" error management system does not accumulates errors when backtracking. This is a big perf gain, and is activated by default in nom 2.0 -- nom can now work on any type that implement the traits defined in `src/traits.rs`: `InputLength`, `InputIter`, `InputTake`, `Compare`, `FindToken`, `FindSubstring`, `Slice` -- the documentation from Github's wiki has been moved to the `doc/` directory. They are markdown files that you can build with [cargo-external-doc](https://crates.io/crates/cargo-external-doc) -- whitespace separated format support: with the `ws!` combinator, you can automatically introduce whitespace parsers between all parsers and combinators -- the `permutation!` combinator applies its child parsers in any order, as long as they all succeed once, and return a tuple of the results -- `do_parse!` is a simpler alternative to `chain!`, which is now deprecated -- you can now transform an `IResult` in a `std::result::Result` -- `length_data!` parses a length, and returns a subslice of that length -- `tag_no_case!` provides case independent comparison. It works nicely, without any allocation, for ASCII strings, but for UTF-8 strings, it defaults to an unsatisfying (and incorrect) comparison by lowercasing both strings -- `named_attr!` creates functions like `named!` but can add attributes like documentation -- `many_till!` applies repeatedly its first child parser until the second succeeds - -### Changed - -- the "verbose" error management that was available in previous versions is now activated by the "verbose-errors" compilation feature -- code reorganization: most of the parsers were moved in separate files to make the source easier to navigate -- most of the combinators are now independent from the input type -- the `eof` function was replaced with the `eof!` macro -- `error!` and `add_error!` were replaced with `return_error!` and `add_return_error!` to fix the name conflict with the log crate -- the `offset()` method is now in the `Offset` trait -- `length_value!` has been renamed to `length_count!`. The new `length_value!` selects a slice and applies the second parser once on that slice -- `AsChar::is_0_to_9` is now `AsChar::is_dec_digit` -- the combinators with configurable endianness now take an enum instead of a boolean as parameter - -### Fixed -- the `count!`, `count_fixed!` and `length_*!` combinator calculate incomplete data needs correctly -- `eol`, `line_ending` and `not_line_ending` now have a consistent behaviour that works correctly with incomplete data -- `take_s!` didn't correctly handle the case when the slice is exactly the right length - -## 1.2.4 - 2016-07-20 - -### Thanks -- @Phlosioneer for documentation fixes -- @sourrust for fixing offsets in `take_bits!` -- @ChrisMacNaughton for the XFS crate -- @pwoolcoc for `rest_s` -- @fitzgen for more `IResult` methods -- @gtors for the negative lookahead feature -- @frk1 and @jeandudey for little endian float parsing -- @jethrogb for fixing input usage in `many1` -- @acatton for beating me at nom golf :D - -### Added -- the `rest_s` method on `IResult` returns the remaining `&str` input -- `unwrap_err` and `unwrap_inc` methods on `IResult` -- `not!` will peek at the input and return `Done` if the underlying parser returned `Error` or `Incomplete`, without consuming the input -- `le_f32` and `le_f64` parse little endian floating point numbers (IEEE 754) -- - -### Fixed -- documentation fixes -- `take_bits!` is now more precise -- `many1` inccorectly used the `len` function instead of `input_len` -- the INI parser is simpler -- `recognize!` had an early `return` that is removed now - -## 1.2.3 - 2016-05-10 - -### Thanks -- @lu-zero for the contribution guidelines -- @GuillaumeGomez for fixes on `length_bytes` and some documentation -- @Hywan for documentation and test fixes -- @Xirdus for correct trait import issues -- @mspiegel for the new AST example -- @cholcombe973 for adding the `cond_with_error!` combinator -- @tstorch for refactoring `many0!` -- @panicbit for the folding combinators -- @evestera for `separated_list!` fixes -- @DanielKeep for correcting some enum imports - -### Added -- Regular expression combinators starting with `re_bytes_` work on byte slices -- example parsing arithmetic expressions to an AST -- `cond_with_error!` works like `cond!` but will return `None` if the condition is false, and `Some(value)` if the underlying parser succeeded -- `fold_many0!`, `fold_many1!` and `fold_many_m_n!` will take a parser, an initial value and a combining function, and fold over the successful applications of the parser - -### Fixed -- `length_bytes!` converts the result of its child parser to usize -- `take_till!` now imports `InputLength` instead of assuming it's in scope -- `separated_list!` and `separated_nonempty_list!` will not consume the separator if there's no following successfully parsed value -- no more warnings on build - -### Changed -- simpler implementation of `many0!` - -## 1.2.2 - 2016-03-09 - -### Thanks -- @conradev for fixing `take_until_s!` -- @GuillaumeGomez for some documentation fixes -- @frewsxcv for some documentation fixes -- @tstorch for some test refactorings - -### Added -- `nom::Err` now implements `std::error::Error` - -### Fixed -- `hex_u32` does not parses more than 8 chars now -- `take_while!` and `take_while1!` will not perturb the behaviour of `recognize!` anymore - -## 1.2.1 - 2016-02-23 - -### Thanks -- @sourrust for adding methods to `IResult` -- @tstorch for the test refactoring, and for adding methods to `IResult` and `Needed` -- @joelself for fixing the method system - -### Added - -- mapping methods over `IResult` and `Needed` - -### Changed - -- `apply_rf` is renamed to `apply_m`. This will not warrant a major version, since it is part missing from the methods feture added in the 1.2.0 release -- the `regexp_macros` feature that used `regex!` to precompile regular expressions has been replaced by the normal regex engine combined with `lazy_static` - -### Fixed - -- when a parser or combinator was returning an empty buffer as remaining part, it was generating one from a static empty string. This was messing with buffer offset calculation. Now, that empty slice is taken like this: `&input[input.len()..]`. -- The `regexp_macros` and `no_std` feature build again and are now tested with Travis CI - -## 1.2.0 - 2016-02-08 - -### Thanks -- @zentner-kyle for type inference fixes -- @joelself for his work on `&str` parsing and method parsers -- @GuillaumeGomez for implementing methods on `IResult` -- @dirk for the `alt_complete!` combinator -- @tstorch for a lot of refactoring work and unit tests additions -- @jansegre for the hex digit parsers -- @belgum for some documentation fixes -- @lwandrebeck for some documentation fixes and code fixes in `hex_digit` - -### Added -- `take_until_and_consume_s!` for consumption of string data until a tag -- more function patterns in `named!`. The error type can now be specified -- `alt_complete!` works like the `alt!` combinator, but tries the next branch if the current one returned `Incomplete`, instead of returning directly -- more unit tests for a lot of combinators -- hexadecimal digit parsers -- the `tuple!` combinator takes a list of parsers as argument, and applies them serially on the input. If all of them are successful, it willr eturn a tuple accumulating all the values. This combinator will (hopefully) replace most uses of `chain!` -- parsers can now be implemented as a method for a struct thanks to the `method!`, `call_m!` and `apply_rf!` combinators - -### Fixed -- there were type inference issues in a few combinators. They will now be easier to compile -- `peek!` compilation with bare functions -- `&str` parsers were splitting data at the byte level, not at the char level, which can result in inconsistencies in parsing UTF-8 characters. They now use character indexes -- some method implementations were missing on `IResult` (with specified error type instead of implicit) - -## 1.1.0 - 2016-01-01 - -This release adds a lot of features related to `&str` parsing. The previous versions -were focused on `&[u8]` and bit streams parsing, but there's a need for more text -parsing with nom. The parsing functions like `alpha`, `digit` and others will now -accept either a `&[u8]` or a `&str`, so there is no breaking change on that part. - -There are also a few performance improvements and documentation fixes. - -### Thanks -- @Binero for pushing the work on `&str` parsing -- @meh for fixing `Option` and `Vec` imports -- @hoodie for a documentation fix -- @joelself for some documentation fixes -- @vberger for his traits magic making nom functions more generic - -### Added - -- string related parsers: `tag_s!`, `take_s!`, `is_a_s!`, `is_not_s!`, `take_while_s!`, `take_while1_s!`, `take_till_s!` -- `value!` is a combinator that always returns the same value. If a child parser is passed as second argument, that value is returned when the child parser succeeds - -### Changed - -- `tag!` will now compare even on partial input. If it expects "abcd" but receives "ef", it will now return an `Error` instead of `Incomplete` -- `many0!` and others will preallocate a larger vector to avoid some copies and reallocations -- `alpha`, `digit`, `alphanumeric`, `space` and `multispace` now accept as input a `&[u8]` or a `&str`. Additionally, they return an error if they receive an empty input -- `take_while!`, `take_while1!`, `take_while_s!`, `take_while1_s!` wilreturn an error on empty input - -### Fixed - -- if the child parser of `many0!` or `many1!` returns `Incomplete`, it will return `Incomplete` too, possibly updating the needed size -- `Option,` `Some`, `None` and `Vec` are now used with full path imports - -## 1.0.1 - 2015-11-22 - -This releases makes the 1.0 version compatible with Rust 1.2 and 1.3 - -### Thanks -- @steveklabnik for fixing lifetime issues in Producers and Consumers - -## 1.0.0 - 2015-11-16 - -Stable release for nom. A lot of new features, a few breaking changes - -### Thanks -- @ahenry for macro fixes -- @bluss for fixing documentation -- @sourrust for cleaning code and debugging the new streaming utilities -- @meh for inline optimizations -- @ccmtaylor for fixing function imports -- @soro for improvements to the streaming utilities -- @breard-r for catching my typos -- @nelsonjchen for catching my typos too -- @divarvel for hex string parsers -- @mrordinaire for the `length_bytes!` combinator - -### Breaking changes -- `IResult::Error` can now use custom error types, and is generic over the input type -- Producers and consumers have been replaced. The new implementation uses less memory and integrates more with parsers -- `nom::ErrorCode` is now `nom::ErrorKind` -- `filter!` has been renamed to `take_while!` -- `chain!` will count how much data is consumed and use that number to calculate how much data is needed if a parser returned `Incomplete` -- `alt!` returns `Incomplete` if a child parser returned `Incomplete`, instead of skipping to the next parser -- `IResult` does not require a lifetime tag anymore, yay! - -### Added - -- `complete!` will return an error if the child parser returned `Incomplete` -- `add_error!` will wrap an error, but allow backtracking -- `hex_u32` parser - -### Fixed -- the behaviour around `Incomplete` is better for most parsers now - -## 0.5.0 - 2015-10-16 - -This release fixes a few issues and stabilizes the code. - -### Thanks -- @nox for documentation fixes -- @daboross for linting fixes -- @ahenry for fixing `tap!` and extending `dbg!` and `dbg_dmp!` -- @bluss for tracking down and fixing issues with unsafe code -- @meh for inlining parser functions -- @ccmtaylor for fixing import of `str::from_utf8` - -### Fixed -- `tap!`, `dbg!` and `dbg_dmp!` now accept function parameters - -### Changed -- the type used in `count_fixed!` must be `Copy` -- `chain!` calculates how much data is needed if one of the parsers returns `Incomplete -- optional parsers in `chain!` can return `Incomplete` - -## 0.4.0 - 2015-09-08 - -Considering the number of changes since the last release, this version can contain breaking changes, so the version number becomes 0.4.0. A lot of new features and performance improvements! - -### Thanks -- @frewsxcv for documentation fixes -- @ngrewe for his work on producers and consumers -- @meh for fixes on `chain!` and for the `rest` parser -- @daboross for refactoring `many0!` and `many1!` -- @aleksander for the `switch!` combinator idea -- @TechnoMancer for his help with bit level parsing -- @sxeraverx for pointing out a bug in `is_a!` - -### Fixed -- `count_fixed!` must take an explicit type as argument to generate the fixed-size array -- optional parsing behaviour in `chain!` -- `count!` can take 0 elements -- `is_a!` and `is_not!` can now consume the whole input - -### Added -- it is now possible to seek to the end of a `MemProducer` -- `opt!` returns `Done(input, None)` if `the child parser returned `Incomplete` -- `rest` will return the remaining input -- consumers can now seek to and from the end of input -- `switch!` applies a first parser then matches on its result to choose the next parser -- bit-level parsers -- character-level parsers -- regular expression parsers -- implementation of `take_till!`, `take_while!` and `take_while1!` - -### Changed -- `alt!` can return `Incomplete` -- the error analysis functions will now take references to functions instead of moving them -- performance improvements on producers -- performance improvement for `filter!` -- performance improvement for `count!`: a `Vec` of the right size is directly allocated - -## 0.3.11 - 2015-08-04 - -### Thanks -- @bluss for remarking that the crate included random junk lying non committed in my local repository - -### Fixed -- cleanup of my local repository will ship less files in the crates, resulting in a smaller download - -## 0.3.10 - 2015-08-03 - -### Added - -- `bits!` for bit level parsing. It indicates that all child parsers will take a `(&[u8], usize)`as input, with the second parameter indicating the bit offset in the first byte. This allows viewing a byte slice as a bit stream. Most combinators can be used directly under `bits!` -- `take_bits!` takes an integer type and a number of bits, consumes that number of bits and updates the offset, possibly by crossing byte boundaries -- bit level parsers are all written in `src/bits.rs` - -### Changed - -- Parsers that specifically handle bytes have been moved to src/bytes.rs`. This applies to `tag!`, `is_not!`, `is_a!`, `filter!`, `take!`, `take_str!`, `take_until_and_consume!`, `take_until!`, `take_until_either_and_consume!`, `take_until_either!` - -## 0.3.9 - 2015-07-20 - -### Thanks -- @badboy for fixing `filter!` -- @idmit for some documentation fixes - -### Added -- `opt_res!` applies a parser and transform its result in a Result. This parser never fails -- `cond_reduce!` takes an expression as parameter, applies the parser if the expression is true, and returns an error if the expression is false -- `tap!` pass the result of a parser to a block to manipulate it, but do not affect the parser's result -- `AccReader` is a Read+BufRead that supports data accumulation and partial consumption. The `consume` method must be called afterwardsto indicate how much was consumed -- Arithmetic expression evaluation and parsing example -- `u16!`, `u32!`, `u64!`, `i16!`, `i32!`, `i64!` take an expression as parameter, if the expression is true, apply the big endian integer parser, if false, the little endian version -- type information for combinators. This will make the documentation a bit easier to navigate - -### Fixed -- `map_opt!` and `map_res!` had issues with argument order due to bad macros -- `delimited!` did not compile for certain combinations of arguments -- `filter!` did not return a byte slice but a fixed array - -## 0.3.8 - 2015-07-03 - -### Added -- code coverage is now calculated automatically on Travis CI -- `Stepper`: wrap a `Producer`, and call the method `step` with a parser. This method will buffer data if there is not enough, apply the parser if there is, and keep the rest of the input in memory for the next call -- `ReadProducer`: takes something implementing `Read`, and makes a `Producer` out of it - -### Fixed -- the combinators `separated_pair!` and `delimited!` did not work because an implementation macro was not exported -- if a `MemProducer` reached its end, it should always return `Eof` -- `map!` had issues with argument matching - -## 0.3.7 - 2015-06-24 - -### Added -- `expr_res!` and `expr_opt!` evaluate an expression returning a Result or Opt and convert it to IResult -- `AsBytes` is implemented for fixed size arrays. This allows `tag!([41u8, 42u8])` - -### Fixed -- `count_fixed!` argument parsing works again - -## 0.3.6 - 2015-06-15 - -### Added -- documentation for a few functions -- the consumer trait now requires the `failed(&self, error_code)` method in case of parsing error -- `named!` now handles the alternative `named!(pub fun_name, ...)` - -### Fixed -- `filter!` now returns the whole input if the filter function never returned false -- `take!` casts its argument as usize, so it can accepts any integer type now - -## 0.3.5 - 2015-06-10 - -### Thanks -- @cmr for some documentation fixes - -### Added -- `count_fixed!` returns a fixed array - -### Fixed -- `count!` is back to the previous behaviour, returning a `Vec` for sizes known at runtime - -### Changed -- functions and traits exported from `nom::util` are now directly in `nom::` - -## 0.3.4 - 2015-06-09 - -### Thanks -- @andrew-d for fixes on `cond!` -- @keruspe for features in `chain!` - -### Added -- `chain!` can now have mutable fields - -### Fixed -- `cond!` had an infinite macro recursion - -### Changed -- `chain!` generates less code now. No apprent compilation time improvement - -## 0.3.3 - 2015-06-09 - -### Thanks -- @andrew-d for the little endian signed integer parsers -- @keruspe for fixes on `count!` - -### Added -- `le_i8`, `le_i16`, `le_i32`, `le_i64`: little endian signed integer parsers - -### Changed -- the `alt!` parser compiles much faster, even with more than 8 branches -- `count!` can now return a fixed size array instead of a growable vector - -## 0.3.2 - 2015-05-31 - -### Thanks -- @keruspe for the `take_str` parser and the function application combinator - -### Added -- `take_str!`: takes the specified number of bytes and return a UTF-8 string -- `apply!`: do partial application on the parameters of a function - -### Changed -- `Needed::Size` now contains a `usize` instead of a `u32` - -## 0.3.1 - 2015-05-21 - -### Thanks -- @divarvel for the big endian signed integer parsers - -### Added -- `be_i8`, `be_i16`, `be_i32`, `be_i64`: big endian signed integer parsers -- the `core` feature can be passed to cargo to build with `no_std` -- colored hexdump can be generated from error chains - -## 0.3.0 - 2015-05-07 - -### Thanks -- @filipegoncalves for some documentation and the new eof parser -- @CrimsonVoid for putting fully qualified types in the macros -- @lu_zero for some documentation fixes - -### Added -- new error types that can contain an error code, an input slice, and a list of following errors -- `error!` will cut backtracking and return directly from the parser, with a specified error code -- `eof` parser, successful if there is no more input -- specific error codes for the parsers provided by nom - -### Changed -- fully qualified types in macros. A lot of imports are not needed anymore - -### Removed -- `FlatMap`, `FlatpMapOpt` and `Functor` traits (replaced by `map!`, `map_opt!` and `map_res!`) - -## 0.2.2 - 2015-04-12 - -### Thanks -- @filipegoncalves and @thehydroimpulse for debugging an infinite loop in many0 and many1 -- @thehydroimpulse for suggesting public named parsers -- @skade for removing the dependency on the collections gate - -### Added -- `named!` can now declare public functions like this: `named!(pub tst, tag!("abcd"));` -- `pair!(X,Y)` returns a tuple `(x, y)` -- `separated_pair!(X, sep, Y)` returns a tuple `(x, y)` -- `preceded!(opening, X)` returns `x` -- `terminated!(X, closing)` returns `x` -- `delimited(opening, X, closing)` returns `x` -- `separated_list(sep, X)` returns a `Vec` -- `separated_nonempty_list(sep, X)` returns a `Vec` of at list one element - -### Changed -- `many0!` and `many1!` forbid parsers that do not consume input -- `is_a!`, `is_not!`, `alpha`, `digit`, `space`, `multispace` will now return an error if they do not consume at least one byte - -## 0.2.1 - 2015-04-04 - -### Thanks -- @mtsr for catching the remaining debug println! -- @jag426 who killed a lot of warnings -- @skade for removing the dependency on the core feature gate - - -### Added -- little endian unsigned int parsers le_u8, le_u16, le_u32, le_u64 -- `count!` to apply a parser a specified number of times -- `cond!` applies a parser if the condition is met -- more parser development tools in `util::*` - -### Fixed -- in one case, `opt!` would not compile - -### Removed -- most of the feature gates are now removed. The only one still needed is `collections` - -## 0.2.0 - 2015-03-24 -*works with `rustc 1.0.0-dev (81e2396c7 2015-03-19) (built 2015-03-19)`* - -### Thanks -- Ryman for the AsBytes implementation -- jag426 and jaredly for documentation fixes -- eternaleye on #rust IRC for his help on the new macro syntax - -### Changed -- the AsBytes trait improves readability, no more b"...", but "..." instead -- Incomplete will now hold either Needed;;Unknown, or Needed::Size(u32). Matching on Incomplete without caring for the value is done with `Incomplete(_)`, but if more granularity is mandatory, `Needed` can be matched too -- `alt!` can pass the result of the parser to a closure -- the `take_*` macros changed behaviour, the default case is now not to consume the separator. The macros have been renamed as follows: `take_until!` -> `take_until_and_consume!`, `take_until_and_leave!` -> `take_until!`, `take_until_either_and_leave!` -> `take_until_either!`, `take_until_either!` -> `take_until_either_and_consume!` - -### Added -- `peek!` macro: matches the future input but does not consume it -- `length_value!` macro: the first argument is a parser returning a `n` that can cast to usize, then applies the second parser `n` times. The macro has a variant with a third argument indicating the expected input size for the second parser -- benchmarks are available at https://github.com/Geal/nom_benchmarks -- more documentation -- **Unnamed parser syntax**: warning, this is a breaking change. With this new syntax, the macro combinators do not generate functions anymore, they create blocks. That way, they can be nested, for better readability. The `named!` macro is provided to create functions from parsers. Please be aware that nesting parsers comes with a small cost of compilation time, negligible in most cases, but can quickly get to the minutes scale if not careful. If this happens, separate your parsers in multiple subfunctions. -- `named!`, `closure!` and `call!` macros used to support the unnamed syntax -- `map!`, `map_opt!` and `map_res!` to combine a parser with a normal function, transforming the input directly, or returning an `Option` or `Result` - -### Fixed -- `is_a!` is now working properly - -### Removed -- the `o!` macro does less than `chain!`, so it has been removed -- the `fold0!` and `fold1!` macros were too complex and awkward to use, the `many*` combinators will be useful for most uses for now - -## 0.1.6 - 2015-02-24 -### Changed -- consumers must have an end method that will be called after parsing - -### Added -- big endian unsigned int and float parsers: be_u8, be_u16, be_u32, be_u64, be_f32, be_f64 -- producers can seek -- function and macros documentation -- README documentation -### Fixed -- lifetime declarations -- tag! can return Incomplete - -## 0.1.5 - 2015-02-17 -### Changed -- traits were renamed: FlatMapper -> FlatMap, Mapper -> FlatMapOpt, Mapper2 -> Functor - -### Fixed -- woeks with rustc f1bb6c2f4 - -## 0.1.4 - 2015-02-17 -### Changed -- the chaining macro can take optional arguments with '?' - -## 0.1.3 - 2015-02-16 -### Changed -- the chaining macro now takes the closure at the end of the argument list - -## 0.1.2 - 2015-02-16 -### Added -- flat_map implementation for <&[u8], &[u8]> -- chaining macro -- partial MP4 parser example - - -## 0.1.1 - 2015-02-06 -### Fixed -- closure syntax change - -## Compare code - -* [unreleased](https://github.com/Geal/nom/compare/7.1.3...HEAD) -* [7.1.2](https://github.com/Geal/nom/compare/7.1.2...7.1.3) -* [7.1.2](https://github.com/Geal/nom/compare/7.1.1...7.1.2) -* [7.1.1](https://github.com/Geal/nom/compare/7.1.0...7.1.1) -* [7.1.0](https://github.com/Geal/nom/compare/7.0.0...7.1.0) -* [7.0.0](https://github.com/Geal/nom/compare/6.2.1...7.0.0) -* [6.2.1](https://github.com/Geal/nom/compare/6.2.0...6.2.1) -* [6.2.0](https://github.com/Geal/nom/compare/6.1.2...6.2.0) -* [6.1.2](https://github.com/Geal/nom/compare/6.1.1...6.1.2) -* [6.1.1](https://github.com/Geal/nom/compare/6.1.0...6.1.1) -* [6.1.0](https://github.com/Geal/nom/compare/6.0.1...6.1.0) -* [6.0.1](https://github.com/Geal/nom/compare/6.0.0...6.0.1) -* [6.0.0](https://github.com/Geal/nom/compare/5.1.1...6.0.0) -* [5.1.1](https://github.com/Geal/nom/compare/5.1.0...5.1.1) -* [5.1.0](https://github.com/Geal/nom/compare/5.0.1...5.1.0) -* [5.0.1](https://github.com/Geal/nom/compare/5.0.0...5.0.1) -* [5.0.0](https://github.com/Geal/nom/compare/4.2.3...5.0.0) -* [4.2.3](https://github.com/Geal/nom/compare/4.2.2...4.2.3) -* [4.2.2](https://github.com/Geal/nom/compare/4.2.1...4.2.2) -* [4.2.1](https://github.com/Geal/nom/compare/4.2.0...4.2.1) -* [4.2.0](https://github.com/Geal/nom/compare/4.1.1...4.2.0) -* [4.1.1](https://github.com/Geal/nom/compare/4.1.0...4.1.1) -* [4.1.0](https://github.com/Geal/nom/compare/4.0.0...4.1.0) -* [4.0.0](https://github.com/Geal/nom/compare/3.2.1...4.0.0) -* [3.2.1](https://github.com/Geal/nom/compare/3.2.0...3.2.1) -* [3.2.0](https://github.com/Geal/nom/compare/3.1.0...3.2.0) -* [3.1.0](https://github.com/Geal/nom/compare/3.0.0...3.1.0) -* [3.0.0](https://github.com/Geal/nom/compare/2.2.1...3.0.0) -* [2.2.1](https://github.com/Geal/nom/compare/2.2.0...2.2.1) -* [2.2.0](https://github.com/Geal/nom/compare/2.1.0...2.2.0) -* [2.1.0](https://github.com/Geal/nom/compare/2.0.1...2.1.0) -* [2.0.1](https://github.com/Geal/nom/compare/2.0.0...2.0.1) -* [2.0.0](https://github.com/Geal/nom/compare/1.2.4...2.0.0) -* [1.2.4](https://github.com/Geal/nom/compare/1.2.3...1.2.4) -* [1.2.3](https://github.com/Geal/nom/compare/1.2.2...1.2.3) -* [1.2.2](https://github.com/Geal/nom/compare/1.2.1...1.2.2) -* [1.2.1](https://github.com/Geal/nom/compare/1.2.0...1.2.1) -* [1.2.0](https://github.com/Geal/nom/compare/1.1.0...1.2.0) -* [1.1.0](https://github.com/Geal/nom/compare/1.0.1...1.1.0) -* [1.0.1](https://github.com/Geal/nom/compare/1.0.0...1.0.1) -* [1.0.0](https://github.com/Geal/nom/compare/0.5.0...1.0.0) -* [0.5.0](https://github.com/geal/nom/compare/0.4.0...0.5.0) -* [0.4.0](https://github.com/geal/nom/compare/0.3.11...0.4.0) -* [0.3.11](https://github.com/geal/nom/compare/0.3.10...0.3.11) -* [0.3.10](https://github.com/geal/nom/compare/0.3.9...0.3.10) -* [0.3.9](https://github.com/geal/nom/compare/0.3.8...0.3.9) -* [0.3.8](https://github.com/Geal/nom/compare/0.3.7...0.3.8) -* [0.3.7](https://github.com/Geal/nom/compare/0.3.6...0.3.7) -* [0.3.6](https://github.com/Geal/nom/compare/0.3.5...0.3.6) -* [0.3.5](https://github.com/Geal/nom/compare/0.3.4...0.3.5) -* [0.3.4](https://github.com/Geal/nom/compare/0.3.3...0.3.4) -* [0.3.3](https://github.com/Geal/nom/compare/0.3.2...0.3.3) -* [0.3.2](https://github.com/Geal/nom/compare/0.3.1...0.3.2) -* [0.3.1](https://github.com/Geal/nom/compare/0.3.0...0.3.1) -* [0.3.0](https://github.com/Geal/nom/compare/0.2.2...0.3.0) -* [0.2.2](https://github.com/Geal/nom/compare/0.2.1...0.2.2) -* [0.2.1](https://github.com/Geal/nom/compare/0.2.0...0.2.1) -* [0.2.0](https://github.com/Geal/nom/compare/0.1.6...0.2.0) -* [0.1.6](https://github.com/Geal/nom/compare/0.1.5...0.1.6) -* [0.1.5](https://github.com/Geal/nom/compare/0.1.4...0.1.5) -* [0.1.4](https://github.com/Geal/nom/compare/0.1.3...0.1.4) -* [0.1.3](https://github.com/Geal/nom/compare/0.1.2...0.1.3) -* [0.1.2](https://github.com/Geal/nom/compare/0.1.1...0.1.2) -* [0.1.1](https://github.com/Geal/nom/compare/0.1.0...0.1.1) diff --git a/vendor/nom/Cargo.lock b/vendor/nom/Cargo.lock deleted file mode 100644 index c7168342e7d5cd..00000000000000 --- a/vendor/nom/Cargo.lock +++ /dev/null @@ -1,282 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "autocfg" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" - -[[package]] -name = "bit-set" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e11e16035ea35e4e5997b393eacbf6f63983188f7a2ad25bfb13465f5ad59de" -dependencies = [ - "bit-vec", -] - -[[package]] -name = "bit-vec" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "doc-comment" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "getrandom" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.106" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a60553f9a9e039a333b4e9b20573b9e9b9c0bb3a11e201ccc48ef4283456d673" - -[[package]] -name = "memchr" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" - -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - -[[package]] -name = "nom" -version = "7.1.3" -dependencies = [ - "doc-comment", - "memchr", - "minimal-lexical", - "proptest", -] - -[[package]] -name = "num-traits" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" -dependencies = [ - "autocfg", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" - -[[package]] -name = "proptest" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0d9cc07f18492d879586c92b485def06bc850da3118075cd45d50e9c95b0e5" -dependencies = [ - "bit-set", - "bitflags", - "byteorder", - "lazy_static", - "num-traits", - "quick-error 2.0.1", - "rand", - "rand_chacha", - "rand_xorshift", - "regex-syntax", - "rusty-fork", - "tempfile", -] - -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - -[[package]] -name = "quick-error" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" - -[[package]] -name = "rand" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", - "rand_hc", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" -dependencies = [ - "getrandom", -] - -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core", -] - -[[package]] -name = "rand_xorshift" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" -dependencies = [ - "rand_core", -] - -[[package]] -name = "redox_syscall" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" -dependencies = [ - "bitflags", -] - -[[package]] -name = "regex-syntax" -version = "0.6.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - -[[package]] -name = "rusty-fork" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" -dependencies = [ - "fnv", - "quick-error 1.2.3", - "tempfile", - "wait-timeout", -] - -[[package]] -name = "tempfile" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" -dependencies = [ - "cfg-if", - "libc", - "rand", - "redox_syscall", - "remove_dir_all", - "winapi", -] - -[[package]] -name = "wait-timeout" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" -dependencies = [ - "libc", -] - -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/vendor/nom/Cargo.toml b/vendor/nom/Cargo.toml deleted file mode 100644 index 2388b4ceea350c..00000000000000 --- a/vendor/nom/Cargo.toml +++ /dev/null @@ -1,168 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.48" -name = "nom" -version = "7.1.3" -authors = ["contact@geoffroycouprie.com"] -include = [ - "CHANGELOG.md", - "LICENSE", - "README.md", - ".gitignore", - "Cargo.toml", - "src/*.rs", - "src/*/*.rs", - "tests/*.rs", - "doc/nom_recipes.md", -] -autoexamples = false -description = "A byte-oriented, zero-copy, parser combinators library" -documentation = "https://docs.rs/nom" -readme = "README.md" -keywords = [ - "parser", - "parser-combinators", - "parsing", - "streaming", - "bit", -] -categories = ["parsing"] -license = "MIT" -repository = "https://github.com/Geal/nom" - -[package.metadata.docs.rs] -features = [ - "alloc", - "std", - "docsrs", -] -all-features = true - -[profile.bench] -lto = true -codegen-units = 1 -debug = true - -[[example]] -name = "custom_error" -path = "examples/custom_error.rs" -required-features = ["alloc"] - -[[example]] -name = "json" -path = "examples/json.rs" -required-features = ["alloc"] - -[[example]] -name = "json_iterator" -path = "examples/json_iterator.rs" -required-features = ["alloc"] - -[[example]] -name = "iterator" -path = "examples/iterator.rs" - -[[example]] -name = "s_expression" -path = "examples/s_expression.rs" -required-features = ["alloc"] - -[[example]] -name = "string" -path = "examples/string.rs" -required-features = ["alloc"] - -[[test]] -name = "arithmetic" - -[[test]] -name = "arithmetic_ast" -required-features = ["alloc"] - -[[test]] -name = "css" - -[[test]] -name = "custom_errors" - -[[test]] -name = "float" - -[[test]] -name = "ini" -required-features = ["alloc"] - -[[test]] -name = "ini_str" -required-features = ["alloc"] - -[[test]] -name = "issues" -required-features = ["alloc"] - -[[test]] -name = "json" - -[[test]] -name = "mp4" -required-features = ["alloc"] - -[[test]] -name = "multiline" -required-features = ["alloc"] - -[[test]] -name = "overflow" - -[[test]] -name = "reborrow_fold" - -[[test]] -name = "fnmut" -required-features = ["alloc"] - -[dependencies.memchr] -version = "2.3" -default-features = false - -[dependencies.minimal-lexical] -version = "0.2.0" -default-features = false - -[dev-dependencies.doc-comment] -version = "0.3" - -[dev-dependencies.proptest] -version = "1.0.0" - -[features] -alloc = [] -default = ["std"] -docsrs = [] -std = [ - "alloc", - "memchr/std", - "minimal-lexical/std", -] - -[badges.coveralls] -branch = "main" -repository = "Geal/nom" -service = "github" - -[badges.maintenance] -status = "actively-developed" - -[badges.travis-ci] -repository = "Geal/nom" diff --git a/vendor/nom/LICENSE b/vendor/nom/LICENSE deleted file mode 100644 index 88557e44e34e24..00000000000000 --- a/vendor/nom/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2014-2019 Geoffroy Couprie - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/nom/README.md b/vendor/nom/README.md deleted file mode 100644 index f2c1b052863714..00000000000000 --- a/vendor/nom/README.md +++ /dev/null @@ -1,331 +0,0 @@ -# nom, eating data byte by byte - -[![LICENSE](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE) -[![Join the chat at https://gitter.im/Geal/nom](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Geal/nom?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Build Status](https://github.com/Geal/nom/actions/workflows/ci.yml/badge.svg)](https://github.com/Geal/nom/actions/workflows/ci.yml) -[![Coverage Status](https://coveralls.io/repos/github/Geal/nom/badge.svg?branch=main)](https://coveralls.io/github/Geal/nom?branch=main) -[![Crates.io Version](https://img.shields.io/crates/v/nom.svg)](https://crates.io/crates/nom) -[![Minimum rustc version](https://img.shields.io/badge/rustc-1.48.0+-lightgray.svg)](#rust-version-requirements-msrv) - -nom is a parser combinators library written in Rust. Its goal is to provide tools -to build safe parsers without compromising the speed or memory consumption. To -that end, it uses extensively Rust's *strong typing* and *memory safety* to produce -fast and correct parsers, and provides functions, macros and traits to abstract most of the -error prone plumbing. - -![nom logo in CC0 license, by Ange Albertini](https://raw.githubusercontent.com/Geal/nom/main/assets/nom.png) - -*nom will happily take a byte out of your files :)* - - - -- [Example](#example) -- [Documentation](#documentation) -- [Why use nom?](#why-use-nom) - - [Binary format parsers](#binary-format-parsers) - - [Text format parsers](#text-format-parsers) - - [Programming language parsers](#programming-language-parsers) - - [Streaming formats](#streaming-formats) -- [Parser combinators](#parser-combinators) -- [Technical features](#technical-features) -- [Rust version requirements](#rust-version-requirements-msrv) -- [Installation](#installation) -- [Related projects](#related-projects) -- [Parsers written with nom](#parsers-written-with-nom) -- [Contributors](#contributors) - - - -## Example - -[Hexadecimal color](https://developer.mozilla.org/en-US/docs/Web/CSS/color) parser: - -```rust -extern crate nom; -use nom::{ - IResult, - bytes::complete::{tag, take_while_m_n}, - combinator::map_res, - sequence::tuple -}; - -#[derive(Debug,PartialEq)] -pub struct Color { - pub red: u8, - pub green: u8, - pub blue: u8, -} - -fn from_hex(input: &str) -> Result { - u8::from_str_radix(input, 16) -} - -fn is_hex_digit(c: char) -> bool { - c.is_digit(16) -} - -fn hex_primary(input: &str) -> IResult<&str, u8> { - map_res( - take_while_m_n(2, 2, is_hex_digit), - from_hex - )(input) -} - -fn hex_color(input: &str) -> IResult<&str, Color> { - let (input, _) = tag("#")(input)?; - let (input, (red, green, blue)) = tuple((hex_primary, hex_primary, hex_primary))(input)?; - - Ok((input, Color { red, green, blue })) -} - -fn main() {} - -#[test] -fn parse_color() { - assert_eq!(hex_color("#2F14DF"), Ok(("", Color { - red: 47, - green: 20, - blue: 223, - }))); -} -``` - -## Documentation - -- [Reference documentation](https://docs.rs/nom) -- [Various design documents and tutorials](https://github.com/Geal/nom/tree/main/doc) -- [List of combinators and their behaviour](https://github.com/Geal/nom/blob/main/doc/choosing_a_combinator.md) - -If you need any help developing your parsers, please ping `geal` on IRC (libera, geeknode, oftc), go to `#nom-parsers` on Libera IRC, or on the [Gitter chat room](https://gitter.im/Geal/nom). - -## Why use nom - -If you want to write: - -### Binary format parsers - -nom was designed to properly parse binary formats from the beginning. Compared -to the usual handwritten C parsers, nom parsers are just as fast, free from -buffer overflow vulnerabilities, and handle common patterns for you: - -- [TLV](https://en.wikipedia.org/wiki/Type-length-value) -- Bit level parsing -- Hexadecimal viewer in the debugging macros for easy data analysis -- Streaming parsers for network formats and huge files - -Example projects: - -- [FLV parser](https://github.com/rust-av/flavors) -- [Matroska parser](https://github.com/rust-av/matroska) -- [tar parser](https://github.com/Keruspe/tar-parser.rs) - -### Text format parsers - -While nom was made for binary format at first, it soon grew to work just as -well with text formats. From line based formats like CSV, to more complex, nested -formats such as JSON, nom can manage it, and provides you with useful tools: - -- Fast case insensitive comparison -- Recognizers for escaped strings -- Regular expressions can be embedded in nom parsers to represent complex character patterns succinctly -- Special care has been given to managing non ASCII characters properly - -Example projects: - -- [HTTP proxy](https://github.com/sozu-proxy/sozu/tree/main/lib/src/protocol/http/parser) -- [TOML parser](https://github.com/joelself/tomllib) - -### Programming language parsers - -While programming language parsers are usually written manually for more -flexibility and performance, nom can be (and has been successfully) used -as a prototyping parser for a language. - -nom will get you started quickly with powerful custom error types, that you -can leverage with [nom_locate](https://github.com/fflorent/nom_locate) to -pinpoint the exact line and column of the error. No need for separate -tokenizing, lexing and parsing phases: nom can automatically handle whitespace -parsing, and construct an AST in place. - -Example projects: - -- [PHP VM](https://github.com/tagua-vm/parser) -- eve language prototype -- [xshade shading language](https://github.com/xshade-lang/xshade/) - -### Streaming formats - -While a lot of formats (and the code handling them) assume that they can fit -the complete data in memory, there are formats for which we only get a part -of the data at once, like network formats, or huge files. -nom has been designed for a correct behaviour with partial data: If there is -not enough data to decide, nom will tell you it needs more instead of silently -returning a wrong result. Whether your data comes entirely or in chunks, the -result should be the same. - -It allows you to build powerful, deterministic state machines for your protocols. - -Example projects: - -- [HTTP proxy](https://github.com/sozu-proxy/sozu/tree/main/lib/src/protocol/http/parser) -- [Using nom with generators](https://github.com/Geal/generator_nom) - -## Parser combinators - -Parser combinators are an approach to parsers that is very different from -software like [lex](https://en.wikipedia.org/wiki/Lex_(software)) and -[yacc](https://en.wikipedia.org/wiki/Yacc). Instead of writing the grammar -in a separate file and generating the corresponding code, you use very -small functions with very specific purpose, like "take 5 bytes", or -"recognize the word 'HTTP'", and assemble them in meaningful patterns -like "recognize 'HTTP', then a space, then a version". -The resulting code is small, and looks like the grammar you would have -written with other parser approaches. - -This has a few advantages: - -- The parsers are small and easy to write -- The parsers components are easy to reuse (if they're general enough, please add them to nom!) -- The parsers components are easy to test separately (unit tests and property-based tests) -- The parser combination code looks close to the grammar you would have written -- You can build partial parsers, specific to the data you need at the moment, and ignore the rest - -## Technical features - -nom parsers are for: -- [x] **byte-oriented**: The basic type is `&[u8]` and parsers will work as much as possible on byte array slices (but are not limited to them) -- [x] **bit-oriented**: nom can address a byte slice as a bit stream -- [x] **string-oriented**: The same kind of combinators can apply on UTF-8 strings as well -- [x] **zero-copy**: If a parser returns a subset of its input data, it will return a slice of that input, without copying -- [x] **streaming**: nom can work on partial data and detect when it needs more data to produce a correct result -- [x] **descriptive errors**: The parsers can aggregate a list of error codes with pointers to the incriminated input slice. Those error lists can be pattern matched to provide useful messages. -- [x] **custom error types**: You can provide a specific type to improve errors returned by parsers -- [x] **safe parsing**: nom leverages Rust's safe memory handling and powerful types, and parsers are routinely fuzzed and tested with real world data. So far, the only flaws found by fuzzing were in code written outside of nom -- [x] **speed**: Benchmarks have shown that nom parsers often outperform many parser combinators library like Parsec and attoparsec, some regular expression engines and even handwritten C parsers - -Some benchmarks are available on [Github](https://github.com/Geal/nom_benchmarks). - -## Rust version requirements (MSRV) - -The 7.0 series of nom supports **Rustc version 1.48 or greater**. It is known to work properly on Rust 1.41.1 but there is no guarantee it will stay the case through this major release. - -The current policy is that this will only be updated in the next major nom release. - -## Installation - -nom is available on [crates.io](https://crates.io/crates/nom) and can be included in your Cargo enabled project like this: - -```toml -[dependencies] -nom = "7" -``` - -There are a few compilation features: - -* `alloc`: (activated by default) if disabled, nom can work in `no_std` builds without memory allocators. If enabled, combinators that allocate (like `many0`) will be available -* `std`: (activated by default, activates `alloc` too) if disabled, nom can work in `no_std` builds - -You can configure those features like this: - -```toml -[dependencies.nom] -version = "7" -default-features = false -features = ["alloc"] -``` - -# Related projects - -- [Get line and column info in nom's input type](https://github.com/fflorent/nom_locate) -- [Using nom as lexer and parser](https://github.com/Rydgel/monkey-rust) - -# Parsers written with nom - -Here is a (non exhaustive) list of known projects using nom: - -- Text file formats: [Ceph Crush](https://github.com/cholcombe973/crushtool), -[Cronenberg](https://github.com/ayrat555/cronenberg), -[XFS Runtime Stats](https://github.com/ChrisMacNaughton/xfs-rs), -[CSV](https://github.com/GuillaumeGomez/csv-parser), -[FASTA](https://github.com/TianyiShi2001/nom-fasta), -[FASTQ](https://github.com/elij/fastq.rs), -[INI](https://github.com/Geal/nom/blob/main/tests/ini.rs), -[ISO 8601 dates](https://github.com/badboy/iso8601), -[libconfig-like configuration file format](https://github.com/filipegoncalves/rust-config), -[Web archive](https://github.com/sbeckeriv/warc_nom_parser), -[PDB](https://github.com/TianyiShi2001/nom-pdb), -[proto files](https://github.com/tafia/protobuf-parser), -[Fountain screenplay markup](https://github.com/adamchalmers/fountain-rs), -[vimwiki](https://github.com/chipsenkbeil/vimwiki-server/tree/master/vimwiki) & [vimwiki_macros](https://github.com/chipsenkbeil/vimwiki-server/tree/master/vimwiki_macros) -- Programming languages: -[PHP](https://github.com/tagua-vm/parser), -[Basic Calculator](https://github.com/balajisivaraman/basic_calculator_rs), -[GLSL](https://github.com/phaazon/glsl), -[Lua](https://github.com/doomrobo/nom-lua53), -[Python](https://github.com/ProgVal/rust-python-parser), -[SQL](https://github.com/ms705/nom-sql), -[Elm](https://github.com/cout970/Elm-interpreter), -[SystemVerilog](https://github.com/dalance/sv-parser), -[Turtle](https://github.com/vandenoever/rome/tree/master/src/io/turtle), -[CSML](https://github.com/CSML-by-Clevy/csml-interpreter), -[Wasm](https://github.com/Strytyp/wasm-nom), -[Pseudocode](https://github.com/Gungy2/pseudocode) -[Filter for MeiliSearch](https://github.com/meilisearch/meilisearch) -- Interface definition formats: [Thrift](https://github.com/thehydroimpulse/thrust) -- Audio, video and image formats: -[GIF](https://github.com/Geal/gif.rs), -[MagicaVoxel .vox](https://github.com/davidedmonds/dot_vox), -[midi](https://github.com/derekdreery/nom-midi-rs), -[SWF](https://github.com/open-flash/swf-parser), -[WAVE](http://github.com/noise-Labs/wave), -[Matroska (MKV)](https://github.com/rust-av/matroska) -- Document formats: -[TAR](https://github.com/Keruspe/tar-parser.rs), -[GZ](https://github.com/nharward/nom-gzip), -[GDSII](https://github.com/erihsu/gds2-io) -- Cryptographic formats: -[X.509](https://github.com/rusticata/x509-parser) -- Network protocol formats: -[Bencode](https://github.com/jbaum98/bencode.rs), -[D-Bus](https://github.com/toshokan/misato), -[DHCP](https://github.com/rusticata/dhcp-parser), -[HTTP](https://github.com/sozu-proxy/sozu/tree/main/lib/src/protocol/http), -[URI](https://github.com/santifa/rrp/blob/master/src/uri.rs), -[IMAP](https://github.com/djc/tokio-imap), -[IRC](https://github.com/Detegr/RBot-parser), -[Pcap-NG](https://github.com/richo/pcapng-rs), -[Pcap](https://github.com/ithinuel/pcap-rs), -[Pcap + PcapNG](https://github.com/rusticata/pcap-parser), -[IKEv2](https://github.com/rusticata/ipsec-parser), -[NTP](https://github.com/rusticata/ntp-parser), -[SNMP](https://github.com/rusticata/snmp-parser), -[Kerberos v5](https://github.com/rusticata/kerberos-parser), -[DER](https://github.com/rusticata/der-parser), -[TLS](https://github.com/rusticata/tls-parser), -[IPFIX / Netflow v10](https://github.com/dominotree/rs-ipfix), -[GTP](https://github.com/fuerstenau/gorrosion-gtp), -[SIP](https://github.com/armatusmiles/sipcore/tree/master/crates/sipmsg), -[Prometheus](https://github.com/timberio/vector/blob/master/lib/prometheus-parser/src/line.rs) -- Language specifications: -[BNF](https://github.com/snewt/bnf) -- Misc formats: -[Gameboy ROM](https://github.com/MarkMcCaskey/gameboy-rom-parser), -[ANT FIT](https://github.com/stadelmanma/fitparse-rs), -[Version Numbers](https://github.com/fosskers/rs-versions), -[Telcordia/Bellcore SR-4731 SOR OTDR files](https://github.com/JamesHarrison/otdrs), -[MySQL binary log](https://github.com/PrivateRookie/boxercrab), -[URI](https://github.com/Skasselbard/nom-uri), -[Furigana](https://github.com/sachaarbonel/furigana.rs), -[Wordle Result](https://github.com/Fyko/wordle-stats/tree/main/parser) - -Want to create a new parser using `nom`? A list of not yet implemented formats is available [here](https://github.com/Geal/nom/issues/14). - -Want to add your parser here? Create a pull request for it! - -# Contributors - -nom is the fruit of the work of many contributors over the years, many thanks for your help! - - - - diff --git a/vendor/nom/doc/nom_recipes.md b/vendor/nom/doc/nom_recipes.md deleted file mode 100644 index e8626344a7fcf3..00000000000000 --- a/vendor/nom/doc/nom_recipes.md +++ /dev/null @@ -1,395 +0,0 @@ -# Nom Recipes - -These are short recipes for accomplishing common tasks with nom. - -* [Whitespace](#whitespace) - + [Wrapper combinators that eat whitespace before and after a parser](#wrapper-combinators-that-eat-whitespace-before-and-after-a-parser) -* [Comments](#comments) - + [`// C++/EOL-style comments`](#-ceol-style-comments) - + [`/* C-style comments */`](#-c-style-comments-) -* [Identifiers](#identifiers) - + [`Rust-Style Identifiers`](#rust-style-identifiers) -* [Literal Values](#literal-values) - + [Escaped Strings](#escaped-strings) - + [Integers](#integers) - - [Hexadecimal](#hexadecimal) - - [Octal](#octal) - - [Binary](#binary) - - [Decimal](#decimal) - + [Floating Point Numbers](#floating-point-numbers) - -## Whitespace - - - -### Wrapper combinators that eat whitespace before and after a parser - -```rust -use nom::{ - IResult, - error::ParseError, - combinator::value, - sequence::delimited, - character::complete::multispace0, -}; - -/// A combinator that takes a parser `inner` and produces a parser that also consumes both leading and -/// trailing whitespace, returning the output of `inner`. -fn ws<'a, F: 'a, O, E: ParseError<&'a str>>(inner: F) -> impl FnMut(&'a str) -> IResult<&'a str, O, E> - where - F: Fn(&'a str) -> IResult<&'a str, O, E>, -{ - delimited( - multispace0, - inner, - multispace0 - ) -} -``` - -To eat only trailing whitespace, replace `delimited(...)` with `terminated(&inner, multispace0)`. -Likewise, the eat only leading whitespace, replace `delimited(...)` with `preceded(multispace0, -&inner)`. You can use your own parser instead of `multispace0` if you want to skip a different set -of lexemes. - -## Comments - -### `// C++/EOL-style comments` - -This version uses `%` to start a comment, does not consume the newline character, and returns an -output of `()`. - -```rust -use nom::{ - IResult, - error::ParseError, - combinator::value, - sequence::pair, - bytes::complete::is_not, - character::complete::char, -}; - -pub fn peol_comment<'a, E: ParseError<&'a str>>(i: &'a str) -> IResult<&'a str, (), E> -{ - value( - (), // Output is thrown away. - pair(char('%'), is_not("\n\r")) - )(i) -} -``` - -### `/* C-style comments */` - -Inline comments surrounded with sentinel tags `(*` and `*)`. This version returns an output of `()` -and does not handle nested comments. - -```rust -use nom::{ - IResult, - error::ParseError, - combinator::value, - sequence::tuple, - bytes::complete::{tag, take_until}, -}; - -pub fn pinline_comment<'a, E: ParseError<&'a str>>(i: &'a str) -> IResult<&'a str, (), E> { - value( - (), // Output is thrown away. - tuple(( - tag("(*"), - take_until("*)"), - tag("*)") - )) - )(i) -} -``` - -## Identifiers - -### `Rust-Style Identifiers` - -Parsing identifiers that may start with a letter (or underscore) and may contain underscores, -letters and numbers may be parsed like this: - -```rust -use nom::{ - IResult, - branch::alt, - multi::many0_count, - combinator::recognize, - sequence::pair, - character::complete::{alpha1, alphanumeric1}, - bytes::complete::tag, -}; - -pub fn identifier(input: &str) -> IResult<&str, &str> { - recognize( - pair( - alt((alpha1, tag("_"))), - many0_count(alt((alphanumeric1, tag("_")))) - ) - )(input) -} -``` - -Let's say we apply this to the identifier `hello_world123abc`. The first `alt` parser would -recognize `h`. The `pair` combinator ensures that `ello_world123abc` will be piped to the next -`alphanumeric0` parser, which recognizes every remaining character. However, the `pair` combinator -returns a tuple of the results of its sub-parsers. The `recognize` parser produces a `&str` of the -input text that was parsed, which in this case is the entire `&str` `hello_world123abc`. - -## Literal Values - -### Escaped Strings - -This is [one of the examples](https://github.com/Geal/nom/blob/main/examples/string.rs) in the -examples directory. - -### Integers - -The following recipes all return string slices rather than integer values. How to obtain an -integer value instead is demonstrated for hexadecimal integers. The others are similar. - -The parsers allow the grouping character `_`, which allows one to group the digits by byte, for -example: `0xA4_3F_11_28`. If you prefer to exclude the `_` character, the lambda to convert from a -string slice to an integer value is slightly simpler. You can also strip the `_` from the string -slice that is returned, which is demonstrated in the second hexdecimal number parser. - -If you wish to limit the number of digits in a valid integer literal, replace `many1` with -`many_m_n` in the recipes. - -#### Hexadecimal - -The parser outputs the string slice of the digits without the leading `0x`/`0X`. - -```rust -use nom::{ - IResult, - branch::alt, - multi::{many0, many1}, - combinator::recognize, - sequence::{preceded, terminated}, - character::complete::{char, one_of}, - bytes::complete::tag, -}; - -fn hexadecimal(input: &str) -> IResult<&str, &str> { // <'a, E: ParseError<&'a str>> - preceded( - alt((tag("0x"), tag("0X"))), - recognize( - many1( - terminated(one_of("0123456789abcdefABCDEF"), many0(char('_'))) - ) - ) - )(input) -} -``` - -If you want it to return the integer value instead, use map: - -```rust -use nom::{ - IResult, - branch::alt, - multi::{many0, many1}, - combinator::{map_res, recognize}, - sequence::{preceded, terminated}, - character::complete::{char, one_of}, - bytes::complete::tag, -}; - -fn hexadecimal_value(input: &str) -> IResult<&str, i64> { - map_res( - preceded( - alt((tag("0x"), tag("0X"))), - recognize( - many1( - terminated(one_of("0123456789abcdefABCDEF"), many0(char('_'))) - ) - ) - ), - |out: &str| i64::from_str_radix(&str::replace(&out, "_", ""), 16) - )(input) -} -``` - -#### Octal - -```rust -use nom::{ - IResult, - branch::alt, - multi::{many0, many1}, - combinator::recognize, - sequence::{preceded, terminated}, - character::complete::{char, one_of}, - bytes::complete::tag, -}; - -fn octal(input: &str) -> IResult<&str, &str> { - preceded( - alt((tag("0o"), tag("0O"))), - recognize( - many1( - terminated(one_of("01234567"), many0(char('_'))) - ) - ) - )(input) -} -``` - -#### Binary - -```rust -use nom::{ - IResult, - branch::alt, - multi::{many0, many1}, - combinator::recognize, - sequence::{preceded, terminated}, - character::complete::{char, one_of}, - bytes::complete::tag, -}; - -fn binary(input: &str) -> IResult<&str, &str> { - preceded( - alt((tag("0b"), tag("0B"))), - recognize( - many1( - terminated(one_of("01"), many0(char('_'))) - ) - ) - )(input) -} -``` - -#### Decimal - -```rust -use nom::{ - IResult, - multi::{many0, many1}, - combinator::recognize, - sequence::terminated, - character::complete::{char, one_of}, -}; - -fn decimal(input: &str) -> IResult<&str, &str> { - recognize( - many1( - terminated(one_of("0123456789"), many0(char('_'))) - ) - )(input) -} -``` - -### Floating Point Numbers - -The following is adapted from [the Python parser by Valentin Lorentz (ProgVal)](https://github.com/ProgVal/rust-python-parser/blob/master/src/numbers.rs). - -```rust -use nom::{ - IResult, - branch::alt, - multi::{many0, many1}, - combinator::{opt, recognize}, - sequence::{preceded, terminated, tuple}, - character::complete::{char, one_of}, -}; - -fn float(input: &str) -> IResult<&str, &str> { - alt(( - // Case one: .42 - recognize( - tuple(( - char('.'), - decimal, - opt(tuple(( - one_of("eE"), - opt(one_of("+-")), - decimal - ))) - )) - ) - , // Case two: 42e42 and 42.42e42 - recognize( - tuple(( - decimal, - opt(preceded( - char('.'), - decimal, - )), - one_of("eE"), - opt(one_of("+-")), - decimal - )) - ) - , // Case three: 42. and 42.42 - recognize( - tuple(( - decimal, - char('.'), - opt(decimal) - )) - ) - ))(input) -} - -fn decimal(input: &str) -> IResult<&str, &str> { - recognize( - many1( - terminated(one_of("0123456789"), many0(char('_'))) - ) - )(input) -} -``` - -# implementing FromStr - -The [FromStr trait](https://doc.rust-lang.org/std/str/trait.FromStr.html) provides -a common interface to parse from a string. - -```rust -use nom::{ - IResult, Finish, error::Error, - bytes::complete::{tag, take_while}, -}; -use std::str::FromStr; - -// will recognize the name in "Hello, name!" -fn parse_name(input: &str) -> IResult<&str, &str> { - let (i, _) = tag("Hello, ")(input)?; - let (i, name) = take_while(|c:char| c.is_alphabetic())(i)?; - let (i, _) = tag("!")(i)?; - - Ok((i, name)) -} - -// with FromStr, the result cannot be a reference to the input, it must be owned -#[derive(Debug)] -pub struct Name(pub String); - -impl FromStr for Name { - // the error must be owned as well - type Err = Error; - - fn from_str(s: &str) -> Result { - match parse_name(s).finish() { - Ok((_remaining, name)) => Ok(Name(name.to_string())), - Err(Error { input, code }) => Err(Error { - input: input.to_string(), - code, - }) - } - } -} - -fn main() { - // parsed: Ok(Name("nom")) - println!("parsed: {:?}", "Hello, nom!".parse::()); - - // parsed: Err(Error { input: "123!", code: Tag }) - println!("parsed: {:?}", "Hello, 123!".parse::()); -} -``` - diff --git a/vendor/nom/src/bits/complete.rs b/vendor/nom/src/bits/complete.rs deleted file mode 100644 index bf36dcc2aae007..00000000000000 --- a/vendor/nom/src/bits/complete.rs +++ /dev/null @@ -1,197 +0,0 @@ -//! Bit level parsers -//! - -use crate::error::{ErrorKind, ParseError}; -use crate::internal::{Err, IResult}; -use crate::lib::std::ops::{AddAssign, Div, RangeFrom, Shl, Shr}; -use crate::traits::{InputIter, InputLength, Slice, ToUsize}; - -/// Generates a parser taking `count` bits -/// -/// # Example -/// ```rust -/// # use nom::bits::complete::take; -/// # use nom::IResult; -/// # use nom::error::{Error, ErrorKind}; -/// // Input is a tuple of (input: I, bit_offset: usize) -/// fn parser(input: (&[u8], usize), count: usize)-> IResult<(&[u8], usize), u8> { -/// take(count)(input) -/// } -/// -/// // Consumes 0 bits, returns 0 -/// assert_eq!(parser(([0b00010010].as_ref(), 0), 0), Ok((([0b00010010].as_ref(), 0), 0))); -/// -/// // Consumes 4 bits, returns their values and increase offset to 4 -/// assert_eq!(parser(([0b00010010].as_ref(), 0), 4), Ok((([0b00010010].as_ref(), 4), 0b00000001))); -/// -/// // Consumes 4 bits, offset is 4, returns their values and increase offset to 0 of next byte -/// assert_eq!(parser(([0b00010010].as_ref(), 4), 4), Ok((([].as_ref(), 0), 0b00000010))); -/// -/// // Tries to consume 12 bits but only 8 are available -/// assert_eq!(parser(([0b00010010].as_ref(), 0), 12), Err(nom::Err::Error(Error{input: ([0b00010010].as_ref(), 0), code: ErrorKind::Eof }))); -/// ``` -pub fn take>( - count: C, -) -> impl Fn((I, usize)) -> IResult<(I, usize), O, E> -where - I: Slice> + InputIter + InputLength, - C: ToUsize, - O: From + AddAssign + Shl + Shr, -{ - let count = count.to_usize(); - move |(input, bit_offset): (I, usize)| { - if count == 0 { - Ok(((input, bit_offset), 0u8.into())) - } else { - let cnt = (count + bit_offset).div(8); - if input.input_len() * 8 < count + bit_offset { - Err(Err::Error(E::from_error_kind( - (input, bit_offset), - ErrorKind::Eof, - ))) - } else { - let mut acc: O = 0_u8.into(); - let mut offset: usize = bit_offset; - let mut remaining: usize = count; - let mut end_offset: usize = 0; - - for byte in input.iter_elements().take(cnt + 1) { - if remaining == 0 { - break; - } - let val: O = if offset == 0 { - byte.into() - } else { - ((byte << offset) as u8 >> offset).into() - }; - - if remaining < 8 - offset { - acc += val >> (8 - offset - remaining); - end_offset = remaining + offset; - break; - } else { - acc += val << (remaining - (8 - offset)); - remaining -= 8 - offset; - offset = 0; - } - } - Ok(((input.slice(cnt..), end_offset), acc)) - } - } - } -} - -/// Generates a parser taking `count` bits and comparing them to `pattern` -pub fn tag>( - pattern: O, - count: C, -) -> impl Fn((I, usize)) -> IResult<(I, usize), O, E> -where - I: Slice> + InputIter + InputLength + Clone, - C: ToUsize, - O: From + AddAssign + Shl + Shr + PartialEq, -{ - let count = count.to_usize(); - move |input: (I, usize)| { - let inp = input.clone(); - - take(count)(input).and_then(|(i, o)| { - if pattern == o { - Ok((i, o)) - } else { - Err(Err::Error(error_position!(inp, ErrorKind::TagBits))) - } - }) - } -} - -/// Parses one specific bit as a bool. -/// -/// # Example -/// ```rust -/// # use nom::bits::complete::bool; -/// # use nom::IResult; -/// # use nom::error::{Error, ErrorKind}; -/// -/// fn parse(input: (&[u8], usize)) -> IResult<(&[u8], usize), bool> { -/// bool(input) -/// } -/// -/// assert_eq!(parse(([0b10000000].as_ref(), 0)), Ok((([0b10000000].as_ref(), 1), true))); -/// assert_eq!(parse(([0b10000000].as_ref(), 1)), Ok((([0b10000000].as_ref(), 2), false))); -/// ``` -pub fn bool>(input: (I, usize)) -> IResult<(I, usize), bool, E> -where - I: Slice> + InputIter + InputLength, -{ - let (res, bit): (_, u32) = take(1usize)(input)?; - Ok((res, bit != 0)) -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn test_take_0() { - let input = [0b00010010].as_ref(); - let count = 0usize; - assert_eq!(count, 0usize); - let offset = 0usize; - - let result: crate::IResult<(&[u8], usize), usize> = take(count)((input, offset)); - - assert_eq!(result, Ok(((input, offset), 0))); - } - - #[test] - fn test_take_eof() { - let input = [0b00010010].as_ref(); - - let result: crate::IResult<(&[u8], usize), usize> = take(1usize)((input, 8)); - - assert_eq!( - result, - Err(crate::Err::Error(crate::error::Error { - input: (input, 8), - code: ErrorKind::Eof - })) - ) - } - - #[test] - fn test_take_span_over_multiple_bytes() { - let input = [0b00010010, 0b00110100, 0b11111111, 0b11111111].as_ref(); - - let result: crate::IResult<(&[u8], usize), usize> = take(24usize)((input, 4)); - - assert_eq!( - result, - Ok((([0b11111111].as_ref(), 4), 0b1000110100111111111111)) - ); - } - - #[test] - fn test_bool_0() { - let input = [0b10000000].as_ref(); - - let result: crate::IResult<(&[u8], usize), bool> = bool((input, 0)); - - assert_eq!(result, Ok(((input, 1), true))); - } - - #[test] - fn test_bool_eof() { - let input = [0b10000000].as_ref(); - - let result: crate::IResult<(&[u8], usize), bool> = bool((input, 8)); - - assert_eq!( - result, - Err(crate::Err::Error(crate::error::Error { - input: (input, 8), - code: ErrorKind::Eof - })) - ); - } -} diff --git a/vendor/nom/src/bits/mod.rs b/vendor/nom/src/bits/mod.rs deleted file mode 100644 index 0d3f73db25d1a8..00000000000000 --- a/vendor/nom/src/bits/mod.rs +++ /dev/null @@ -1,179 +0,0 @@ -//! Bit level parsers -//! - -pub mod complete; -pub mod streaming; - -use crate::error::{ErrorKind, ParseError}; -use crate::internal::{Err, IResult, Needed, Parser}; -use crate::lib::std::ops::RangeFrom; -use crate::traits::{ErrorConvert, Slice}; - -/// Converts a byte-level input to a bit-level input, for consumption by a parser that uses bits. -/// -/// Afterwards, the input is converted back to a byte-level parser, with any remaining bits thrown -/// away. -/// -/// # Example -/// ``` -/// use nom::bits::{bits, streaming::take}; -/// use nom::error::Error; -/// use nom::sequence::tuple; -/// use nom::IResult; -/// -/// fn parse(input: &[u8]) -> IResult<&[u8], (u8, u8)> { -/// bits::<_, _, Error<(&[u8], usize)>, _, _>(tuple((take(4usize), take(8usize))))(input) -/// } -/// -/// let input = &[0x12, 0x34, 0xff, 0xff]; -/// -/// let output = parse(input).expect("We take 1.5 bytes and the input is longer than 2 bytes"); -/// -/// // The first byte is consumed, the second byte is partially consumed and dropped. -/// let remaining = output.0; -/// assert_eq!(remaining, [0xff, 0xff]); -/// -/// let parsed = output.1; -/// assert_eq!(parsed.0, 0x01); -/// assert_eq!(parsed.1, 0x23); -/// ``` -pub fn bits(mut parser: P) -> impl FnMut(I) -> IResult -where - E1: ParseError<(I, usize)> + ErrorConvert, - E2: ParseError, - I: Slice>, - P: Parser<(I, usize), O, E1>, -{ - move |input: I| match parser.parse((input, 0)) { - Ok(((rest, offset), result)) => { - // If the next byte has been partially read, it will be sliced away as well. - // The parser functions might already slice away all fully read bytes. - // That's why `offset / 8` isn't necessarily needed at all times. - let remaining_bytes_index = offset / 8 + if offset % 8 == 0 { 0 } else { 1 }; - Ok((rest.slice(remaining_bytes_index..), result)) - } - Err(Err::Incomplete(n)) => Err(Err::Incomplete(n.map(|u| u.get() / 8 + 1))), - Err(Err::Error(e)) => Err(Err::Error(e.convert())), - Err(Err::Failure(e)) => Err(Err::Failure(e.convert())), - } -} - -/// Counterpart to `bits`, `bytes` transforms its bit stream input into a byte slice for the underlying -/// parser, allowing byte-slice parsers to work on bit streams. -/// -/// A partial byte remaining in the input will be ignored and the given parser will start parsing -/// at the next full byte. -/// -/// ``` -/// use nom::bits::{bits, bytes, streaming::take}; -/// use nom::combinator::rest; -/// use nom::error::Error; -/// use nom::sequence::tuple; -/// use nom::IResult; -/// -/// fn parse(input: &[u8]) -> IResult<&[u8], (u8, u8, &[u8])> { -/// bits::<_, _, Error<(&[u8], usize)>, _, _>(tuple(( -/// take(4usize), -/// take(8usize), -/// bytes::<_, _, Error<&[u8]>, _, _>(rest) -/// )))(input) -/// } -/// -/// let input = &[0x12, 0x34, 0xff, 0xff]; -/// -/// assert_eq!(parse( input ), Ok(( &[][..], (0x01, 0x23, &[0xff, 0xff][..]) ))); -/// ``` -pub fn bytes(mut parser: P) -> impl FnMut((I, usize)) -> IResult<(I, usize), O, E2> -where - E1: ParseError + ErrorConvert, - E2: ParseError<(I, usize)>, - I: Slice> + Clone, - P: Parser, -{ - move |(input, offset): (I, usize)| { - let inner = if offset % 8 != 0 { - input.slice((1 + offset / 8)..) - } else { - input.slice((offset / 8)..) - }; - let i = (input, offset); - match parser.parse(inner) { - Ok((rest, res)) => Ok(((rest, 0), res)), - Err(Err::Incomplete(Needed::Unknown)) => Err(Err::Incomplete(Needed::Unknown)), - Err(Err::Incomplete(Needed::Size(sz))) => Err(match sz.get().checked_mul(8) { - Some(v) => Err::Incomplete(Needed::new(v)), - None => Err::Failure(E2::from_error_kind(i, ErrorKind::TooLarge)), - }), - Err(Err::Error(e)) => Err(Err::Error(e.convert())), - Err(Err::Failure(e)) => Err(Err::Failure(e.convert())), - } - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::bits::streaming::take; - use crate::error::Error; - use crate::sequence::tuple; - - #[test] - /// Take the `bits` function and assert that remaining bytes are correctly returned, if the - /// previous bytes are fully consumed - fn test_complete_byte_consumption_bits() { - let input = &[0x12, 0x34, 0x56, 0x78]; - - // Take 3 bit slices with sizes [4, 8, 4]. - let result: IResult<&[u8], (u8, u8, u8)> = - bits::<_, _, Error<(&[u8], usize)>, _, _>(tuple((take(4usize), take(8usize), take(4usize))))( - input, - ); - - let output = result.expect("We take 2 bytes and the input is longer than 2 bytes"); - - let remaining = output.0; - assert_eq!(remaining, [0x56, 0x78]); - - let parsed = output.1; - assert_eq!(parsed.0, 0x01); - assert_eq!(parsed.1, 0x23); - assert_eq!(parsed.2, 0x04); - } - - #[test] - /// Take the `bits` function and assert that remaining bytes are correctly returned, if the - /// previous bytes are NOT fully consumed. Partially consumed bytes are supposed to be dropped. - /// I.e. if we consume 1.5 bytes of 4 bytes, 2 bytes will be returned, bits 13-16 will be - /// dropped. - fn test_partial_byte_consumption_bits() { - let input = &[0x12, 0x34, 0x56, 0x78]; - - // Take bit slices with sizes [4, 8]. - let result: IResult<&[u8], (u8, u8)> = - bits::<_, _, Error<(&[u8], usize)>, _, _>(tuple((take(4usize), take(8usize))))(input); - - let output = result.expect("We take 1.5 bytes and the input is longer than 2 bytes"); - - let remaining = output.0; - assert_eq!(remaining, [0x56, 0x78]); - - let parsed = output.1; - assert_eq!(parsed.0, 0x01); - assert_eq!(parsed.1, 0x23); - } - - #[test] - #[cfg(feature = "std")] - /// Ensure that in Incomplete error is thrown, if too few bytes are passed for a given parser. - fn test_incomplete_bits() { - let input = &[0x12]; - - // Take bit slices with sizes [4, 8]. - let result: IResult<&[u8], (u8, u8)> = - bits::<_, _, Error<(&[u8], usize)>, _, _>(tuple((take(4usize), take(8usize))))(input); - - assert!(result.is_err()); - let error = result.err().unwrap(); - assert_eq!("Parsing requires 2 bytes/chars", error.to_string()); - } -} diff --git a/vendor/nom/src/bits/streaming.rs b/vendor/nom/src/bits/streaming.rs deleted file mode 100644 index a7c8d0a67b9f26..00000000000000 --- a/vendor/nom/src/bits/streaming.rs +++ /dev/null @@ -1,170 +0,0 @@ -//! Bit level parsers -//! - -use crate::error::{ErrorKind, ParseError}; -use crate::internal::{Err, IResult, Needed}; -use crate::lib::std::ops::{AddAssign, Div, RangeFrom, Shl, Shr}; -use crate::traits::{InputIter, InputLength, Slice, ToUsize}; - -/// Generates a parser taking `count` bits -pub fn take>( - count: C, -) -> impl Fn((I, usize)) -> IResult<(I, usize), O, E> -where - I: Slice> + InputIter + InputLength, - C: ToUsize, - O: From + AddAssign + Shl + Shr, -{ - let count = count.to_usize(); - move |(input, bit_offset): (I, usize)| { - if count == 0 { - Ok(((input, bit_offset), 0u8.into())) - } else { - let cnt = (count + bit_offset).div(8); - if input.input_len() * 8 < count + bit_offset { - Err(Err::Incomplete(Needed::new(count as usize))) - } else { - let mut acc: O = 0_u8.into(); - let mut offset: usize = bit_offset; - let mut remaining: usize = count; - let mut end_offset: usize = 0; - - for byte in input.iter_elements().take(cnt + 1) { - if remaining == 0 { - break; - } - let val: O = if offset == 0 { - byte.into() - } else { - ((byte << offset) as u8 >> offset).into() - }; - - if remaining < 8 - offset { - acc += val >> (8 - offset - remaining); - end_offset = remaining + offset; - break; - } else { - acc += val << (remaining - (8 - offset)); - remaining -= 8 - offset; - offset = 0; - } - } - Ok(((input.slice(cnt..), end_offset), acc)) - } - } - } -} - -/// Generates a parser taking `count` bits and comparing them to `pattern` -pub fn tag>( - pattern: O, - count: C, -) -> impl Fn((I, usize)) -> IResult<(I, usize), O, E> -where - I: Slice> + InputIter + InputLength + Clone, - C: ToUsize, - O: From + AddAssign + Shl + Shr + PartialEq, -{ - let count = count.to_usize(); - move |input: (I, usize)| { - let inp = input.clone(); - - take(count)(input).and_then(|(i, o)| { - if pattern == o { - Ok((i, o)) - } else { - Err(Err::Error(error_position!(inp, ErrorKind::TagBits))) - } - }) - } -} - -/// Parses one specific bit as a bool. -/// -/// # Example -/// ```rust -/// # use nom::bits::complete::bool; -/// # use nom::IResult; -/// # use nom::error::{Error, ErrorKind}; -/// -/// fn parse(input: (&[u8], usize)) -> IResult<(&[u8], usize), bool> { -/// bool(input) -/// } -/// -/// assert_eq!(parse(([0b10000000].as_ref(), 0)), Ok((([0b10000000].as_ref(), 1), true))); -/// assert_eq!(parse(([0b10000000].as_ref(), 1)), Ok((([0b10000000].as_ref(), 2), false))); -/// ``` -pub fn bool>(input: (I, usize)) -> IResult<(I, usize), bool, E> -where - I: Slice> + InputIter + InputLength, -{ - let (res, bit): (_, u32) = take(1usize)(input)?; - Ok((res, bit != 0)) -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn test_take_0() { - let input = [].as_ref(); - let count = 0usize; - assert_eq!(count, 0usize); - let offset = 0usize; - - let result: crate::IResult<(&[u8], usize), usize> = take(count)((input, offset)); - - assert_eq!(result, Ok(((input, offset), 0))); - } - - #[test] - fn test_tag_ok() { - let input = [0b00011111].as_ref(); - let offset = 0usize; - let bits_to_take = 4usize; - let value_to_tag = 0b0001; - - let result: crate::IResult<(&[u8], usize), usize> = - tag(value_to_tag, bits_to_take)((input, offset)); - - assert_eq!(result, Ok(((input, bits_to_take), value_to_tag))); - } - - #[test] - fn test_tag_err() { - let input = [0b00011111].as_ref(); - let offset = 0usize; - let bits_to_take = 4usize; - let value_to_tag = 0b1111; - - let result: crate::IResult<(&[u8], usize), usize> = - tag(value_to_tag, bits_to_take)((input, offset)); - - assert_eq!( - result, - Err(crate::Err::Error(crate::error::Error { - input: (input, offset), - code: ErrorKind::TagBits - })) - ); - } - - #[test] - fn test_bool_0() { - let input = [0b10000000].as_ref(); - - let result: crate::IResult<(&[u8], usize), bool> = bool((input, 0)); - - assert_eq!(result, Ok(((input, 1), true))); - } - - #[test] - fn test_bool_eof() { - let input = [0b10000000].as_ref(); - - let result: crate::IResult<(&[u8], usize), bool> = bool((input, 8)); - - assert_eq!(result, Err(crate::Err::Incomplete(Needed::new(1)))); - } -} diff --git a/vendor/nom/src/branch/mod.rs b/vendor/nom/src/branch/mod.rs deleted file mode 100644 index e03622cb0c833d..00000000000000 --- a/vendor/nom/src/branch/mod.rs +++ /dev/null @@ -1,267 +0,0 @@ -//! Choice combinators - -#[cfg(test)] -mod tests; - -use crate::error::ErrorKind; -use crate::error::ParseError; -use crate::internal::{Err, IResult, Parser}; - -/// Helper trait for the [alt()] combinator. -/// -/// This trait is implemented for tuples of up to 21 elements -pub trait Alt { - /// Tests each parser in the tuple and returns the result of the first one that succeeds - fn choice(&mut self, input: I) -> IResult; -} - -/// Tests a list of parsers one by one until one succeeds. -/// -/// It takes as argument a tuple of parsers. There is a maximum of 21 -/// parsers. If you need more, it is possible to nest them in other `alt` calls, -/// like this: `alt(parser_a, alt(parser_b, parser_c))` -/// -/// ```rust -/// # use nom::error_position; -/// # use nom::{Err,error::ErrorKind, Needed, IResult}; -/// use nom::character::complete::{alpha1, digit1}; -/// use nom::branch::alt; -/// # fn main() { -/// fn parser(input: &str) -> IResult<&str, &str> { -/// alt((alpha1, digit1))(input) -/// }; -/// -/// // the first parser, alpha1, recognizes the input -/// assert_eq!(parser("abc"), Ok(("", "abc"))); -/// -/// // the first parser returns an error, so alt tries the second one -/// assert_eq!(parser("123456"), Ok(("", "123456"))); -/// -/// // both parsers failed, and with the default error type, alt will return the last error -/// assert_eq!(parser(" "), Err(Err::Error(error_position!(" ", ErrorKind::Digit)))); -/// # } -/// ``` -/// -/// With a custom error type, it is possible to have alt return the error of the parser -/// that went the farthest in the input data -pub fn alt, List: Alt>( - mut l: List, -) -> impl FnMut(I) -> IResult { - move |i: I| l.choice(i) -} - -/// Helper trait for the [permutation()] combinator. -/// -/// This trait is implemented for tuples of up to 21 elements -pub trait Permutation { - /// Tries to apply all parsers in the tuple in various orders until all of them succeed - fn permutation(&mut self, input: I) -> IResult; -} - -/// Applies a list of parsers in any order. -/// -/// Permutation will succeed if all of the child parsers succeeded. -/// It takes as argument a tuple of parsers, and returns a -/// tuple of the parser results. -/// -/// ```rust -/// # use nom::{Err,error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::character::complete::{alpha1, digit1}; -/// use nom::branch::permutation; -/// # fn main() { -/// fn parser(input: &str) -> IResult<&str, (&str, &str)> { -/// permutation((alpha1, digit1))(input) -/// } -/// -/// // permutation recognizes alphabetic characters then digit -/// assert_eq!(parser("abc123"), Ok(("", ("abc", "123")))); -/// -/// // but also in inverse order -/// assert_eq!(parser("123abc"), Ok(("", ("abc", "123")))); -/// -/// // it will fail if one of the parsers failed -/// assert_eq!(parser("abc;"), Err(Err::Error(Error::new(";", ErrorKind::Digit)))); -/// # } -/// ``` -/// -/// The parsers are applied greedily: if there are multiple unapplied parsers -/// that could parse the next slice of input, the first one is used. -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, IResult}; -/// use nom::branch::permutation; -/// use nom::character::complete::{anychar, char}; -/// -/// fn parser(input: &str) -> IResult<&str, (char, char)> { -/// permutation((anychar, char('a')))(input) -/// } -/// -/// // anychar parses 'b', then char('a') parses 'a' -/// assert_eq!(parser("ba"), Ok(("", ('b', 'a')))); -/// -/// // anychar parses 'a', then char('a') fails on 'b', -/// // even though char('a') followed by anychar would succeed -/// assert_eq!(parser("ab"), Err(Err::Error(Error::new("b", ErrorKind::Char)))); -/// ``` -/// -pub fn permutation, List: Permutation>( - mut l: List, -) -> impl FnMut(I) -> IResult { - move |i: I| l.permutation(i) -} - -macro_rules! alt_trait( - ($first:ident $second:ident $($id: ident)+) => ( - alt_trait!(__impl $first $second; $($id)+); - ); - (__impl $($current:ident)*; $head:ident $($id: ident)+) => ( - alt_trait_impl!($($current)*); - - alt_trait!(__impl $($current)* $head; $($id)+); - ); - (__impl $($current:ident)*; $head:ident) => ( - alt_trait_impl!($($current)*); - alt_trait_impl!($($current)* $head); - ); -); - -macro_rules! alt_trait_impl( - ($($id:ident)+) => ( - impl< - Input: Clone, Output, Error: ParseError, - $($id: Parser),+ - > Alt for ( $($id),+ ) { - - fn choice(&mut self, input: Input) -> IResult { - match self.0.parse(input.clone()) { - Err(Err::Error(e)) => alt_trait_inner!(1, self, input, e, $($id)+), - res => res, - } - } - } - ); -); - -macro_rules! alt_trait_inner( - ($it:tt, $self:expr, $input:expr, $err:expr, $head:ident $($id:ident)+) => ( - match $self.$it.parse($input.clone()) { - Err(Err::Error(e)) => { - let err = $err.or(e); - succ!($it, alt_trait_inner!($self, $input, err, $($id)+)) - } - res => res, - } - ); - ($it:tt, $self:expr, $input:expr, $err:expr, $head:ident) => ( - Err(Err::Error(Error::append($input, ErrorKind::Alt, $err))) - ); -); - -alt_trait!(A B C D E F G H I J K L M N O P Q R S T U); - -// Manually implement Alt for (A,), the 1-tuple type -impl, A: Parser> - Alt for (A,) -{ - fn choice(&mut self, input: Input) -> IResult { - self.0.parse(input) - } -} - -macro_rules! permutation_trait( - ( - $name1:ident $ty1:ident $item1:ident - $name2:ident $ty2:ident $item2:ident - $($name3:ident $ty3:ident $item3:ident)* - ) => ( - permutation_trait!(__impl $name1 $ty1 $item1, $name2 $ty2 $item2; $($name3 $ty3 $item3)*); - ); - ( - __impl $($name:ident $ty:ident $item:ident),+; - $name1:ident $ty1:ident $item1:ident $($name2:ident $ty2:ident $item2:ident)* - ) => ( - permutation_trait_impl!($($name $ty $item),+); - permutation_trait!(__impl $($name $ty $item),+ , $name1 $ty1 $item1; $($name2 $ty2 $item2)*); - ); - (__impl $($name:ident $ty:ident $item:ident),+;) => ( - permutation_trait_impl!($($name $ty $item),+); - ); -); - -macro_rules! permutation_trait_impl( - ($($name:ident $ty:ident $item:ident),+) => ( - impl< - Input: Clone, $($ty),+ , Error: ParseError, - $($name: Parser),+ - > Permutation for ( $($name),+ ) { - - fn permutation(&mut self, mut input: Input) -> IResult { - let mut res = ($(Option::<$ty>::None),+); - - loop { - let mut err: Option = None; - permutation_trait_inner!(0, self, input, res, err, $($name)+); - - // If we reach here, every iterator has either been applied before, - // or errored on the remaining input - if let Some(err) = err { - // There are remaining parsers, and all errored on the remaining input - return Err(Err::Error(Error::append(input, ErrorKind::Permutation, err))); - } - - // All parsers were applied - match res { - ($(Some($item)),+) => return Ok((input, ($($item),+))), - _ => unreachable!(), - } - } - } - } - ); -); - -macro_rules! permutation_trait_inner( - ($it:tt, $self:expr, $input:ident, $res:expr, $err:expr, $head:ident $($id:ident)*) => ( - if $res.$it.is_none() { - match $self.$it.parse($input.clone()) { - Ok((i, o)) => { - $input = i; - $res.$it = Some(o); - continue; - } - Err(Err::Error(e)) => { - $err = Some(match $err { - Some(err) => err.or(e), - None => e, - }); - } - Err(e) => return Err(e), - }; - } - succ!($it, permutation_trait_inner!($self, $input, $res, $err, $($id)*)); - ); - ($it:tt, $self:expr, $input:ident, $res:expr, $err:expr,) => (); -); - -permutation_trait!( - FnA A a - FnB B b - FnC C c - FnD D d - FnE E e - FnF F f - FnG G g - FnH H h - FnI I i - FnJ J j - FnK K k - FnL L l - FnM M m - FnN N n - FnO O o - FnP P p - FnQ Q q - FnR R r - FnS S s - FnT T t - FnU U u -); diff --git a/vendor/nom/src/branch/tests.rs b/vendor/nom/src/branch/tests.rs deleted file mode 100644 index ecd44407e93f92..00000000000000 --- a/vendor/nom/src/branch/tests.rs +++ /dev/null @@ -1,142 +0,0 @@ -use crate::branch::{alt, permutation}; -use crate::bytes::streaming::tag; -use crate::error::ErrorKind; -use crate::internal::{Err, IResult, Needed}; -#[cfg(feature = "alloc")] -use crate::{ - error::ParseError, - lib::std::{ - fmt::Debug, - string::{String, ToString}, - }, -}; - -#[cfg(feature = "alloc")] -#[derive(Debug, Clone, PartialEq)] -pub struct ErrorStr(String); - -#[cfg(feature = "alloc")] -impl From for ErrorStr { - fn from(i: u32) -> Self { - ErrorStr(format!("custom error code: {}", i)) - } -} - -#[cfg(feature = "alloc")] -impl<'a> From<&'a str> for ErrorStr { - fn from(i: &'a str) -> Self { - ErrorStr(format!("custom error message: {}", i)) - } -} - -#[cfg(feature = "alloc")] -impl ParseError for ErrorStr { - fn from_error_kind(input: I, kind: ErrorKind) -> Self { - ErrorStr(format!("custom error message: ({:?}, {:?})", input, kind)) - } - - fn append(input: I, kind: ErrorKind, other: Self) -> Self { - ErrorStr(format!( - "custom error message: ({:?}, {:?}) - {:?}", - input, kind, other - )) - } -} - -#[cfg(feature = "alloc")] -#[test] -fn alt_test() { - fn work(input: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { - Ok((&b""[..], input)) - } - - #[allow(unused_variables)] - fn dont_work(input: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { - Err(Err::Error(ErrorStr("abcd".to_string()))) - } - - fn work2(input: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { - Ok((input, &b""[..])) - } - - fn alt1(i: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { - alt((dont_work, dont_work))(i) - } - fn alt2(i: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { - alt((dont_work, work))(i) - } - fn alt3(i: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { - alt((dont_work, dont_work, work2, dont_work))(i) - } - //named!(alt1, alt!(dont_work | dont_work)); - //named!(alt2, alt!(dont_work | work)); - //named!(alt3, alt!(dont_work | dont_work | work2 | dont_work)); - - let a = &b"abcd"[..]; - assert_eq!( - alt1(a), - Err(Err::Error(error_node_position!( - a, - ErrorKind::Alt, - ErrorStr("abcd".to_string()) - ))) - ); - assert_eq!(alt2(a), Ok((&b""[..], a))); - assert_eq!(alt3(a), Ok((a, &b""[..]))); - - fn alt4(i: &[u8]) -> IResult<&[u8], &[u8]> { - alt((tag("abcd"), tag("efgh")))(i) - } - let b = &b"efgh"[..]; - assert_eq!(alt4(a), Ok((&b""[..], a))); - assert_eq!(alt4(b), Ok((&b""[..], b))); -} - -#[test] -fn alt_incomplete() { - fn alt1(i: &[u8]) -> IResult<&[u8], &[u8]> { - alt((tag("a"), tag("bc"), tag("def")))(i) - } - - let a = &b""[..]; - assert_eq!(alt1(a), Err(Err::Incomplete(Needed::new(1)))); - let a = &b"b"[..]; - assert_eq!(alt1(a), Err(Err::Incomplete(Needed::new(1)))); - let a = &b"bcd"[..]; - assert_eq!(alt1(a), Ok((&b"d"[..], &b"bc"[..]))); - let a = &b"cde"[..]; - assert_eq!(alt1(a), Err(Err::Error(error_position!(a, ErrorKind::Tag)))); - let a = &b"de"[..]; - assert_eq!(alt1(a), Err(Err::Incomplete(Needed::new(1)))); - let a = &b"defg"[..]; - assert_eq!(alt1(a), Ok((&b"g"[..], &b"def"[..]))); -} - -#[test] -fn permutation_test() { - fn perm(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8], &[u8])> { - permutation((tag("abcd"), tag("efg"), tag("hi")))(i) - } - - let expected = (&b"abcd"[..], &b"efg"[..], &b"hi"[..]); - - let a = &b"abcdefghijk"[..]; - assert_eq!(perm(a), Ok((&b"jk"[..], expected))); - let b = &b"efgabcdhijk"[..]; - assert_eq!(perm(b), Ok((&b"jk"[..], expected))); - let c = &b"hiefgabcdjk"[..]; - assert_eq!(perm(c), Ok((&b"jk"[..], expected))); - - let d = &b"efgxyzabcdefghi"[..]; - assert_eq!( - perm(d), - Err(Err::Error(error_node_position!( - &b"efgxyzabcdefghi"[..], - ErrorKind::Permutation, - error_position!(&b"xyzabcdefghi"[..], ErrorKind::Tag) - ))) - ); - - let e = &b"efgabc"[..]; - assert_eq!(perm(e), Err(Err::Incomplete(Needed::new(1)))); -} diff --git a/vendor/nom/src/bytes/complete.rs b/vendor/nom/src/bytes/complete.rs deleted file mode 100644 index a5442b53f7428b..00000000000000 --- a/vendor/nom/src/bytes/complete.rs +++ /dev/null @@ -1,756 +0,0 @@ -//! Parsers recognizing bytes streams, complete input version - -use crate::error::ErrorKind; -use crate::error::ParseError; -use crate::internal::{Err, IResult, Parser}; -use crate::lib::std::ops::RangeFrom; -use crate::lib::std::result::Result::*; -use crate::traits::{ - Compare, CompareResult, FindSubstring, FindToken, InputIter, InputLength, InputTake, - InputTakeAtPosition, Slice, ToUsize, -}; - -/// Recognizes a pattern -/// -/// The input data will be compared to the tag combinator's argument and will return the part of -/// the input that matches the argument -/// -/// It will return `Err(Err::Error((_, ErrorKind::Tag)))` if the input doesn't match the pattern -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &str) -> IResult<&str, &str> { -/// tag("Hello")(s) -/// } -/// -/// assert_eq!(parser("Hello, World!"), Ok((", World!", "Hello"))); -/// assert_eq!(parser("Something"), Err(Err::Error(Error::new("Something", ErrorKind::Tag)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Tag)))); -/// ``` -pub fn tag>( - tag: T, -) -> impl Fn(Input) -> IResult -where - Input: InputTake + Compare, - T: InputLength + Clone, -{ - move |i: Input| { - let tag_len = tag.input_len(); - let t = tag.clone(); - let res: IResult<_, _, Error> = match i.compare(t) { - CompareResult::Ok => Ok(i.take_split(tag_len)), - _ => { - let e: ErrorKind = ErrorKind::Tag; - Err(Err::Error(Error::from_error_kind(i, e))) - } - }; - res - } -} - -/// Recognizes a case insensitive pattern. -/// -/// The input data will be compared to the tag combinator's argument and will return the part of -/// the input that matches the argument with no regard to case. -/// -/// It will return `Err(Err::Error((_, ErrorKind::Tag)))` if the input doesn't match the pattern. -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::complete::tag_no_case; -/// -/// fn parser(s: &str) -> IResult<&str, &str> { -/// tag_no_case("hello")(s) -/// } -/// -/// assert_eq!(parser("Hello, World!"), Ok((", World!", "Hello"))); -/// assert_eq!(parser("hello, World!"), Ok((", World!", "hello"))); -/// assert_eq!(parser("HeLlO, World!"), Ok((", World!", "HeLlO"))); -/// assert_eq!(parser("Something"), Err(Err::Error(Error::new("Something", ErrorKind::Tag)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Tag)))); -/// ``` -pub fn tag_no_case>( - tag: T, -) -> impl Fn(Input) -> IResult -where - Input: InputTake + Compare, - T: InputLength + Clone, -{ - move |i: Input| { - let tag_len = tag.input_len(); - let t = tag.clone(); - - let res: IResult<_, _, Error> = match (i).compare_no_case(t) { - CompareResult::Ok => Ok(i.take_split(tag_len)), - _ => { - let e: ErrorKind = ErrorKind::Tag; - Err(Err::Error(Error::from_error_kind(i, e))) - } - }; - res - } -} - -/// Parse till certain characters are met. -/// -/// The parser will return the longest slice till one of the characters of the combinator's argument are met. -/// -/// It doesn't consume the matched character. -/// -/// It will return a `Err::Error(("", ErrorKind::IsNot))` if the pattern wasn't met. -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::complete::is_not; -/// -/// fn not_space(s: &str) -> IResult<&str, &str> { -/// is_not(" \t\r\n")(s) -/// } -/// -/// assert_eq!(not_space("Hello, World!"), Ok((" World!", "Hello,"))); -/// assert_eq!(not_space("Sometimes\t"), Ok(("\t", "Sometimes"))); -/// assert_eq!(not_space("Nospace"), Ok(("", "Nospace"))); -/// assert_eq!(not_space(""), Err(Err::Error(Error::new("", ErrorKind::IsNot)))); -/// ``` -pub fn is_not>( - arr: T, -) -> impl Fn(Input) -> IResult -where - Input: InputTakeAtPosition, - T: FindToken<::Item>, -{ - move |i: Input| { - let e: ErrorKind = ErrorKind::IsNot; - i.split_at_position1_complete(|c| arr.find_token(c), e) - } -} - -/// Returns the longest slice of the matches the pattern. -/// -/// The parser will return the longest slice consisting of the characters in provided in the -/// combinator's argument. -/// -/// It will return a `Err(Err::Error((_, ErrorKind::IsA)))` if the pattern wasn't met. -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::complete::is_a; -/// -/// fn hex(s: &str) -> IResult<&str, &str> { -/// is_a("1234567890ABCDEF")(s) -/// } -/// -/// assert_eq!(hex("123 and voila"), Ok((" and voila", "123"))); -/// assert_eq!(hex("DEADBEEF and others"), Ok((" and others", "DEADBEEF"))); -/// assert_eq!(hex("BADBABEsomething"), Ok(("something", "BADBABE"))); -/// assert_eq!(hex("D15EA5E"), Ok(("", "D15EA5E"))); -/// assert_eq!(hex(""), Err(Err::Error(Error::new("", ErrorKind::IsA)))); -/// ``` -pub fn is_a>( - arr: T, -) -> impl Fn(Input) -> IResult -where - Input: InputTakeAtPosition, - T: FindToken<::Item>, -{ - move |i: Input| { - let e: ErrorKind = ErrorKind::IsA; - i.split_at_position1_complete(|c| !arr.find_token(c), e) - } -} - -/// Returns the longest input slice (if any) that matches the predicate. -/// -/// The parser will return the longest slice that matches the given predicate *(a function that -/// takes the input and returns a bool)*. -/// # Example -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// use nom::bytes::complete::take_while; -/// use nom::character::is_alphabetic; -/// -/// fn alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { -/// take_while(is_alphabetic)(s) -/// } -/// -/// assert_eq!(alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); -/// assert_eq!(alpha(b"12345"), Ok((&b"12345"[..], &b""[..]))); -/// assert_eq!(alpha(b"latin"), Ok((&b""[..], &b"latin"[..]))); -/// assert_eq!(alpha(b""), Ok((&b""[..], &b""[..]))); -/// ``` -pub fn take_while>( - cond: F, -) -> impl Fn(Input) -> IResult -where - Input: InputTakeAtPosition, - F: Fn(::Item) -> bool, -{ - move |i: Input| i.split_at_position_complete(|c| !cond(c)) -} - -/// Returns the longest (at least 1) input slice that matches the predicate. -/// -/// The parser will return the longest slice that matches the given predicate *(a function that -/// takes the input and returns a bool)*. -/// -/// It will return an `Err(Err::Error((_, ErrorKind::TakeWhile1)))` if the pattern wasn't met. -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::complete::take_while1; -/// use nom::character::is_alphabetic; -/// -/// fn alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { -/// take_while1(is_alphabetic)(s) -/// } -/// -/// assert_eq!(alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); -/// assert_eq!(alpha(b"latin"), Ok((&b""[..], &b"latin"[..]))); -/// assert_eq!(alpha(b"12345"), Err(Err::Error(Error::new(&b"12345"[..], ErrorKind::TakeWhile1)))); -/// ``` -pub fn take_while1>( - cond: F, -) -> impl Fn(Input) -> IResult -where - Input: InputTakeAtPosition, - F: Fn(::Item) -> bool, -{ - move |i: Input| { - let e: ErrorKind = ErrorKind::TakeWhile1; - i.split_at_position1_complete(|c| !cond(c), e) - } -} - -/// Returns the longest (m <= len <= n) input slice that matches the predicate. -/// -/// The parser will return the longest slice that matches the given predicate *(a function that -/// takes the input and returns a bool)*. -/// -/// It will return an `Err::Error((_, ErrorKind::TakeWhileMN))` if the pattern wasn't met or is out -/// of range (m <= len <= n). -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::complete::take_while_m_n; -/// use nom::character::is_alphabetic; -/// -/// fn short_alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { -/// take_while_m_n(3, 6, is_alphabetic)(s) -/// } -/// -/// assert_eq!(short_alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); -/// assert_eq!(short_alpha(b"lengthy"), Ok((&b"y"[..], &b"length"[..]))); -/// assert_eq!(short_alpha(b"latin"), Ok((&b""[..], &b"latin"[..]))); -/// assert_eq!(short_alpha(b"ed"), Err(Err::Error(Error::new(&b"ed"[..], ErrorKind::TakeWhileMN)))); -/// assert_eq!(short_alpha(b"12345"), Err(Err::Error(Error::new(&b"12345"[..], ErrorKind::TakeWhileMN)))); -/// ``` -pub fn take_while_m_n>( - m: usize, - n: usize, - cond: F, -) -> impl Fn(Input) -> IResult -where - Input: InputTake + InputIter + InputLength + Slice>, - F: Fn(::Item) -> bool, -{ - move |i: Input| { - let input = i; - - match input.position(|c| !cond(c)) { - Some(idx) => { - if idx >= m { - if idx <= n { - let res: IResult<_, _, Error> = if let Ok(index) = input.slice_index(idx) { - Ok(input.take_split(index)) - } else { - Err(Err::Error(Error::from_error_kind( - input, - ErrorKind::TakeWhileMN, - ))) - }; - res - } else { - let res: IResult<_, _, Error> = if let Ok(index) = input.slice_index(n) { - Ok(input.take_split(index)) - } else { - Err(Err::Error(Error::from_error_kind( - input, - ErrorKind::TakeWhileMN, - ))) - }; - res - } - } else { - let e = ErrorKind::TakeWhileMN; - Err(Err::Error(Error::from_error_kind(input, e))) - } - } - None => { - let len = input.input_len(); - if len >= n { - match input.slice_index(n) { - Ok(index) => Ok(input.take_split(index)), - Err(_needed) => Err(Err::Error(Error::from_error_kind( - input, - ErrorKind::TakeWhileMN, - ))), - } - } else if len >= m && len <= n { - let res: IResult<_, _, Error> = Ok((input.slice(len..), input)); - res - } else { - let e = ErrorKind::TakeWhileMN; - Err(Err::Error(Error::from_error_kind(input, e))) - } - } - } - } -} - -/// Returns the longest input slice (if any) till a predicate is met. -/// -/// The parser will return the longest slice till the given predicate *(a function that -/// takes the input and returns a bool)*. -/// # Example -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// use nom::bytes::complete::take_till; -/// -/// fn till_colon(s: &str) -> IResult<&str, &str> { -/// take_till(|c| c == ':')(s) -/// } -/// -/// assert_eq!(till_colon("latin:123"), Ok((":123", "latin"))); -/// assert_eq!(till_colon(":empty matched"), Ok((":empty matched", ""))); //allowed -/// assert_eq!(till_colon("12345"), Ok(("", "12345"))); -/// assert_eq!(till_colon(""), Ok(("", ""))); -/// ``` -pub fn take_till>( - cond: F, -) -> impl Fn(Input) -> IResult -where - Input: InputTakeAtPosition, - F: Fn(::Item) -> bool, -{ - move |i: Input| i.split_at_position_complete(|c| cond(c)) -} - -/// Returns the longest (at least 1) input slice till a predicate is met. -/// -/// The parser will return the longest slice till the given predicate *(a function that -/// takes the input and returns a bool)*. -/// -/// It will return `Err(Err::Error((_, ErrorKind::TakeTill1)))` if the input is empty or the -/// predicate matches the first input. -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::complete::take_till1; -/// -/// fn till_colon(s: &str) -> IResult<&str, &str> { -/// take_till1(|c| c == ':')(s) -/// } -/// -/// assert_eq!(till_colon("latin:123"), Ok((":123", "latin"))); -/// assert_eq!(till_colon(":empty matched"), Err(Err::Error(Error::new(":empty matched", ErrorKind::TakeTill1)))); -/// assert_eq!(till_colon("12345"), Ok(("", "12345"))); -/// assert_eq!(till_colon(""), Err(Err::Error(Error::new("", ErrorKind::TakeTill1)))); -/// ``` -pub fn take_till1>( - cond: F, -) -> impl Fn(Input) -> IResult -where - Input: InputTakeAtPosition, - F: Fn(::Item) -> bool, -{ - move |i: Input| { - let e: ErrorKind = ErrorKind::TakeTill1; - i.split_at_position1_complete(|c| cond(c), e) - } -} - -/// Returns an input slice containing the first N input elements (Input[..N]). -/// -/// It will return `Err(Err::Error((_, ErrorKind::Eof)))` if the input is shorter than the argument. -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::complete::take; -/// -/// fn take6(s: &str) -> IResult<&str, &str> { -/// take(6usize)(s) -/// } -/// -/// assert_eq!(take6("1234567"), Ok(("7", "123456"))); -/// assert_eq!(take6("things"), Ok(("", "things"))); -/// assert_eq!(take6("short"), Err(Err::Error(Error::new("short", ErrorKind::Eof)))); -/// assert_eq!(take6(""), Err(Err::Error(Error::new("", ErrorKind::Eof)))); -/// ``` -/// -/// The units that are taken will depend on the input type. For example, for a -/// `&str` it will take a number of `char`'s, whereas for a `&[u8]` it will -/// take that many `u8`'s: -/// -/// ```rust -/// use nom::error::Error; -/// use nom::bytes::complete::take; -/// -/// assert_eq!(take::<_, _, Error<_>>(1usize)("💙"), Ok(("", "💙"))); -/// assert_eq!(take::<_, _, Error<_>>(1usize)("💙".as_bytes()), Ok((b"\x9F\x92\x99".as_ref(), b"\xF0".as_ref()))); -/// ``` -pub fn take>( - count: C, -) -> impl Fn(Input) -> IResult -where - Input: InputIter + InputTake, - C: ToUsize, -{ - let c = count.to_usize(); - move |i: Input| match i.slice_index(c) { - Err(_needed) => Err(Err::Error(Error::from_error_kind(i, ErrorKind::Eof))), - Ok(index) => Ok(i.take_split(index)), - } -} - -/// Returns the input slice up to the first occurrence of the pattern. -/// -/// It doesn't consume the pattern. It will return `Err(Err::Error((_, ErrorKind::TakeUntil)))` -/// if the pattern wasn't met. -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::complete::take_until; -/// -/// fn until_eof(s: &str) -> IResult<&str, &str> { -/// take_until("eof")(s) -/// } -/// -/// assert_eq!(until_eof("hello, worldeof"), Ok(("eof", "hello, world"))); -/// assert_eq!(until_eof("hello, world"), Err(Err::Error(Error::new("hello, world", ErrorKind::TakeUntil)))); -/// assert_eq!(until_eof(""), Err(Err::Error(Error::new("", ErrorKind::TakeUntil)))); -/// assert_eq!(until_eof("1eof2eof"), Ok(("eof2eof", "1"))); -/// ``` -pub fn take_until>( - tag: T, -) -> impl Fn(Input) -> IResult -where - Input: InputTake + FindSubstring, - T: InputLength + Clone, -{ - move |i: Input| { - let t = tag.clone(); - let res: IResult<_, _, Error> = match i.find_substring(t) { - None => Err(Err::Error(Error::from_error_kind(i, ErrorKind::TakeUntil))), - Some(index) => Ok(i.take_split(index)), - }; - res - } -} - -/// Returns the non empty input slice up to the first occurrence of the pattern. -/// -/// It doesn't consume the pattern. It will return `Err(Err::Error((_, ErrorKind::TakeUntil)))` -/// if the pattern wasn't met. -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::complete::take_until1; -/// -/// fn until_eof(s: &str) -> IResult<&str, &str> { -/// take_until1("eof")(s) -/// } -/// -/// assert_eq!(until_eof("hello, worldeof"), Ok(("eof", "hello, world"))); -/// assert_eq!(until_eof("hello, world"), Err(Err::Error(Error::new("hello, world", ErrorKind::TakeUntil)))); -/// assert_eq!(until_eof(""), Err(Err::Error(Error::new("", ErrorKind::TakeUntil)))); -/// assert_eq!(until_eof("1eof2eof"), Ok(("eof2eof", "1"))); -/// assert_eq!(until_eof("eof"), Err(Err::Error(Error::new("eof", ErrorKind::TakeUntil)))); -/// ``` -pub fn take_until1>( - tag: T, -) -> impl Fn(Input) -> IResult -where - Input: InputTake + FindSubstring, - T: InputLength + Clone, -{ - move |i: Input| { - let t = tag.clone(); - let res: IResult<_, _, Error> = match i.find_substring(t) { - None => Err(Err::Error(Error::from_error_kind(i, ErrorKind::TakeUntil))), - Some(0) => Err(Err::Error(Error::from_error_kind(i, ErrorKind::TakeUntil))), - Some(index) => Ok(i.take_split(index)), - }; - res - } -} - -/// Matches a byte string with escaped characters. -/// -/// * The first argument matches the normal characters (it must not accept the control character) -/// * The second argument is the control character (like `\` in most languages) -/// * The third argument matches the escaped characters -/// # Example -/// ``` -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// # use nom::character::complete::digit1; -/// use nom::bytes::complete::escaped; -/// use nom::character::complete::one_of; -/// -/// fn esc(s: &str) -> IResult<&str, &str> { -/// escaped(digit1, '\\', one_of(r#""n\"#))(s) -/// } -/// -/// assert_eq!(esc("123;"), Ok((";", "123"))); -/// assert_eq!(esc(r#"12\"34;"#), Ok((";", r#"12\"34"#))); -/// ``` -/// -pub fn escaped<'a, Input: 'a, Error, F, G, O1, O2>( - mut normal: F, - control_char: char, - mut escapable: G, -) -> impl FnMut(Input) -> IResult -where - Input: Clone - + crate::traits::Offset - + InputLength - + InputTake - + InputTakeAtPosition - + Slice> - + InputIter, - ::Item: crate::traits::AsChar, - F: Parser, - G: Parser, - Error: ParseError, -{ - use crate::traits::AsChar; - - move |input: Input| { - let mut i = input.clone(); - - while i.input_len() > 0 { - let current_len = i.input_len(); - - match normal.parse(i.clone()) { - Ok((i2, _)) => { - // return if we consumed everything or if the normal parser - // does not consume anything - if i2.input_len() == 0 { - return Ok((input.slice(input.input_len()..), input)); - } else if i2.input_len() == current_len { - let index = input.offset(&i2); - return Ok(input.take_split(index)); - } else { - i = i2; - } - } - Err(Err::Error(_)) => { - // unwrap() should be safe here since index < $i.input_len() - if i.iter_elements().next().unwrap().as_char() == control_char { - let next = control_char.len_utf8(); - if next >= i.input_len() { - return Err(Err::Error(Error::from_error_kind( - input, - ErrorKind::Escaped, - ))); - } else { - match escapable.parse(i.slice(next..)) { - Ok((i2, _)) => { - if i2.input_len() == 0 { - return Ok((input.slice(input.input_len()..), input)); - } else { - i = i2; - } - } - Err(e) => return Err(e), - } - } - } else { - let index = input.offset(&i); - if index == 0 { - return Err(Err::Error(Error::from_error_kind( - input, - ErrorKind::Escaped, - ))); - } - return Ok(input.take_split(index)); - } - } - Err(e) => { - return Err(e); - } - } - } - - Ok((input.slice(input.input_len()..), input)) - } -} - -/// Matches a byte string with escaped characters. -/// -/// * The first argument matches the normal characters (it must not match the control character) -/// * The second argument is the control character (like `\` in most languages) -/// * The third argument matches the escaped characters and transforms them -/// -/// As an example, the chain `abc\tdef` could be `abc def` (it also consumes the control character) -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// # use std::str::from_utf8; -/// use nom::bytes::complete::{escaped_transform, tag}; -/// use nom::character::complete::alpha1; -/// use nom::branch::alt; -/// use nom::combinator::value; -/// -/// fn parser(input: &str) -> IResult<&str, String> { -/// escaped_transform( -/// alpha1, -/// '\\', -/// alt(( -/// value("\\", tag("\\")), -/// value("\"", tag("\"")), -/// value("\n", tag("n")), -/// )) -/// )(input) -/// } -/// -/// assert_eq!(parser("ab\\\"cd"), Ok(("", String::from("ab\"cd")))); -/// assert_eq!(parser("ab\\ncd"), Ok(("", String::from("ab\ncd")))); -/// ``` -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -pub fn escaped_transform( - mut normal: F, - control_char: char, - mut transform: G, -) -> impl FnMut(Input) -> IResult -where - Input: Clone - + crate::traits::Offset - + InputLength - + InputTake - + InputTakeAtPosition - + Slice> - + InputIter, - Input: crate::traits::ExtendInto, - O1: crate::traits::ExtendInto, - O2: crate::traits::ExtendInto, - ::Item: crate::traits::AsChar, - F: Parser, - G: Parser, - Error: ParseError, -{ - use crate::traits::AsChar; - - move |input: Input| { - let mut index = 0; - let mut res = input.new_builder(); - - let i = input.clone(); - - while index < i.input_len() { - let current_len = i.input_len(); - let remainder = i.slice(index..); - match normal.parse(remainder.clone()) { - Ok((i2, o)) => { - o.extend_into(&mut res); - if i2.input_len() == 0 { - return Ok((i.slice(i.input_len()..), res)); - } else if i2.input_len() == current_len { - return Ok((remainder, res)); - } else { - index = input.offset(&i2); - } - } - Err(Err::Error(_)) => { - // unwrap() should be safe here since index < $i.input_len() - if remainder.iter_elements().next().unwrap().as_char() == control_char { - let next = index + control_char.len_utf8(); - let input_len = input.input_len(); - - if next >= input_len { - return Err(Err::Error(Error::from_error_kind( - remainder, - ErrorKind::EscapedTransform, - ))); - } else { - match transform.parse(i.slice(next..)) { - Ok((i2, o)) => { - o.extend_into(&mut res); - if i2.input_len() == 0 { - return Ok((i.slice(i.input_len()..), res)); - } else { - index = input.offset(&i2); - } - } - Err(e) => return Err(e), - } - } - } else { - if index == 0 { - return Err(Err::Error(Error::from_error_kind( - remainder, - ErrorKind::EscapedTransform, - ))); - } - return Ok((remainder, res)); - } - } - Err(e) => return Err(e), - } - } - Ok((input.slice(index..), res)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn complete_take_while_m_n_utf8_all_matching() { - let result: IResult<&str, &str> = - super::take_while_m_n(1, 4, |c: char| c.is_alphabetic())("øn"); - assert_eq!(result, Ok(("", "øn"))); - } - - #[test] - fn complete_take_while_m_n_utf8_all_matching_substring() { - let result: IResult<&str, &str> = - super::take_while_m_n(1, 1, |c: char| c.is_alphabetic())("øn"); - assert_eq!(result, Ok(("n", "ø"))); - } - - // issue #1336 "escaped hangs if normal parser accepts empty" - fn escaped_string(input: &str) -> IResult<&str, &str> { - use crate::character::complete::{alpha0, one_of}; - escaped(alpha0, '\\', one_of("n"))(input) - } - - // issue #1336 "escaped hangs if normal parser accepts empty" - #[test] - fn escaped_hang() { - escaped_string("7").unwrap(); - escaped_string("a7").unwrap(); - } - - // issue ##1118 escaped does not work with empty string - fn unquote<'a>(input: &'a str) -> IResult<&'a str, &'a str> { - use crate::bytes::complete::*; - use crate::character::complete::*; - use crate::combinator::opt; - use crate::sequence::delimited; - - delimited( - char('"'), - escaped(opt(none_of(r#"\""#)), '\\', one_of(r#"\"rnt"#)), - char('"'), - )(input) - } - - #[test] - fn escaped_hang_1118() { - assert_eq!(unquote(r#""""#), Ok(("", ""))); - } -} diff --git a/vendor/nom/src/bytes/mod.rs b/vendor/nom/src/bytes/mod.rs deleted file mode 100644 index 7bc2d15a79cb99..00000000000000 --- a/vendor/nom/src/bytes/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -//! Parsers recognizing bytes streams - -pub mod complete; -pub mod streaming; -#[cfg(test)] -mod tests; diff --git a/vendor/nom/src/bytes/streaming.rs b/vendor/nom/src/bytes/streaming.rs deleted file mode 100644 index e972760e21e47e..00000000000000 --- a/vendor/nom/src/bytes/streaming.rs +++ /dev/null @@ -1,700 +0,0 @@ -//! Parsers recognizing bytes streams, streaming version - -use crate::error::ErrorKind; -use crate::error::ParseError; -use crate::internal::{Err, IResult, Needed, Parser}; -use crate::lib::std::ops::RangeFrom; -use crate::lib::std::result::Result::*; -use crate::traits::{ - Compare, CompareResult, FindSubstring, FindToken, InputIter, InputLength, InputTake, - InputTakeAtPosition, Slice, ToUsize, -}; - -/// Recognizes a pattern. -/// -/// The input data will be compared to the tag combinator's argument and will return the part of -/// the input that matches the argument. -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::streaming::tag; -/// -/// fn parser(s: &str) -> IResult<&str, &str> { -/// tag("Hello")(s) -/// } -/// -/// assert_eq!(parser("Hello, World!"), Ok((", World!", "Hello"))); -/// assert_eq!(parser("Something"), Err(Err::Error(Error::new("Something", ErrorKind::Tag)))); -/// assert_eq!(parser("S"), Err(Err::Error(Error::new("S", ErrorKind::Tag)))); -/// assert_eq!(parser("H"), Err(Err::Incomplete(Needed::new(4)))); -/// ``` -pub fn tag>( - tag: T, -) -> impl Fn(Input) -> IResult -where - Input: InputTake + InputLength + Compare, - T: InputLength + Clone, -{ - move |i: Input| { - let tag_len = tag.input_len(); - let t = tag.clone(); - - let res: IResult<_, _, Error> = match i.compare(t) { - CompareResult::Ok => Ok(i.take_split(tag_len)), - CompareResult::Incomplete => Err(Err::Incomplete(Needed::new(tag_len - i.input_len()))), - CompareResult::Error => { - let e: ErrorKind = ErrorKind::Tag; - Err(Err::Error(Error::from_error_kind(i, e))) - } - }; - res - } -} - -/// Recognizes a case insensitive pattern. -/// -/// The input data will be compared to the tag combinator's argument and will return the part of -/// the input that matches the argument with no regard to case. -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::streaming::tag_no_case; -/// -/// fn parser(s: &str) -> IResult<&str, &str> { -/// tag_no_case("hello")(s) -/// } -/// -/// assert_eq!(parser("Hello, World!"), Ok((", World!", "Hello"))); -/// assert_eq!(parser("hello, World!"), Ok((", World!", "hello"))); -/// assert_eq!(parser("HeLlO, World!"), Ok((", World!", "HeLlO"))); -/// assert_eq!(parser("Something"), Err(Err::Error(Error::new("Something", ErrorKind::Tag)))); -/// assert_eq!(parser(""), Err(Err::Incomplete(Needed::new(5)))); -/// ``` -pub fn tag_no_case>( - tag: T, -) -> impl Fn(Input) -> IResult -where - Input: InputTake + InputLength + Compare, - T: InputLength + Clone, -{ - move |i: Input| { - let tag_len = tag.input_len(); - let t = tag.clone(); - - let res: IResult<_, _, Error> = match (i).compare_no_case(t) { - CompareResult::Ok => Ok(i.take_split(tag_len)), - CompareResult::Incomplete => Err(Err::Incomplete(Needed::new(tag_len - i.input_len()))), - CompareResult::Error => { - let e: ErrorKind = ErrorKind::Tag; - Err(Err::Error(Error::from_error_kind(i, e))) - } - }; - res - } -} - -/// Parse till certain characters are met. -/// -/// The parser will return the longest slice till one of the characters of the combinator's argument are met. -/// -/// It doesn't consume the matched character. -/// -/// It will return a `Err::Incomplete(Needed::new(1))` if the pattern wasn't met. -/// # Example -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// use nom::bytes::streaming::is_not; -/// -/// fn not_space(s: &str) -> IResult<&str, &str> { -/// is_not(" \t\r\n")(s) -/// } -/// -/// assert_eq!(not_space("Hello, World!"), Ok((" World!", "Hello,"))); -/// assert_eq!(not_space("Sometimes\t"), Ok(("\t", "Sometimes"))); -/// assert_eq!(not_space("Nospace"), Err(Err::Incomplete(Needed::new(1)))); -/// assert_eq!(not_space(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn is_not>( - arr: T, -) -> impl Fn(Input) -> IResult -where - Input: InputTakeAtPosition, - T: FindToken<::Item>, -{ - move |i: Input| { - let e: ErrorKind = ErrorKind::IsNot; - i.split_at_position1(|c| arr.find_token(c), e) - } -} - -/// Returns the longest slice of the matches the pattern. -/// -/// The parser will return the longest slice consisting of the characters in provided in the -/// combinator's argument. -/// -/// # Streaming specific -/// *Streaming version* will return a `Err::Incomplete(Needed::new(1))` if the pattern wasn't met -/// or if the pattern reaches the end of the input. -/// # Example -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// use nom::bytes::streaming::is_a; -/// -/// fn hex(s: &str) -> IResult<&str, &str> { -/// is_a("1234567890ABCDEF")(s) -/// } -/// -/// assert_eq!(hex("123 and voila"), Ok((" and voila", "123"))); -/// assert_eq!(hex("DEADBEEF and others"), Ok((" and others", "DEADBEEF"))); -/// assert_eq!(hex("BADBABEsomething"), Ok(("something", "BADBABE"))); -/// assert_eq!(hex("D15EA5E"), Err(Err::Incomplete(Needed::new(1)))); -/// assert_eq!(hex(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn is_a>( - arr: T, -) -> impl Fn(Input) -> IResult -where - Input: InputTakeAtPosition, - T: FindToken<::Item>, -{ - move |i: Input| { - let e: ErrorKind = ErrorKind::IsA; - i.split_at_position1(|c| !arr.find_token(c), e) - } -} - -/// Returns the longest input slice (if any) that matches the predicate. -/// -/// The parser will return the longest slice that matches the given predicate *(a function that -/// takes the input and returns a bool)*. -/// -/// # Streaming Specific -/// *Streaming version* will return a `Err::Incomplete(Needed::new(1))` if the pattern reaches the end of the input. -/// # Example -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// use nom::bytes::streaming::take_while; -/// use nom::character::is_alphabetic; -/// -/// fn alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { -/// take_while(is_alphabetic)(s) -/// } -/// -/// assert_eq!(alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); -/// assert_eq!(alpha(b"12345"), Ok((&b"12345"[..], &b""[..]))); -/// assert_eq!(alpha(b"latin"), Err(Err::Incomplete(Needed::new(1)))); -/// assert_eq!(alpha(b""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn take_while>( - cond: F, -) -> impl Fn(Input) -> IResult -where - Input: InputTakeAtPosition, - F: Fn(::Item) -> bool, -{ - move |i: Input| i.split_at_position(|c| !cond(c)) -} - -/// Returns the longest (at least 1) input slice that matches the predicate. -/// -/// The parser will return the longest slice that matches the given predicate *(a function that -/// takes the input and returns a bool)*. -/// -/// It will return an `Err(Err::Error((_, ErrorKind::TakeWhile1)))` if the pattern wasn't met. -/// -/// # Streaming Specific -/// *Streaming version* will return a `Err::Incomplete(Needed::new(1))` or if the pattern reaches the end of the input. -/// -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::streaming::take_while1; -/// use nom::character::is_alphabetic; -/// -/// fn alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { -/// take_while1(is_alphabetic)(s) -/// } -/// -/// assert_eq!(alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); -/// assert_eq!(alpha(b"latin"), Err(Err::Incomplete(Needed::new(1)))); -/// assert_eq!(alpha(b"12345"), Err(Err::Error(Error::new(&b"12345"[..], ErrorKind::TakeWhile1)))); -/// ``` -pub fn take_while1>( - cond: F, -) -> impl Fn(Input) -> IResult -where - Input: InputTakeAtPosition, - F: Fn(::Item) -> bool, -{ - move |i: Input| { - let e: ErrorKind = ErrorKind::TakeWhile1; - i.split_at_position1(|c| !cond(c), e) - } -} - -/// Returns the longest (m <= len <= n) input slice that matches the predicate. -/// -/// The parser will return the longest slice that matches the given predicate *(a function that -/// takes the input and returns a bool)*. -/// -/// It will return an `Err::Error((_, ErrorKind::TakeWhileMN))` if the pattern wasn't met. -/// # Streaming Specific -/// *Streaming version* will return a `Err::Incomplete(Needed::new(1))` if the pattern reaches the end of the input or is too short. -/// -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::streaming::take_while_m_n; -/// use nom::character::is_alphabetic; -/// -/// fn short_alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { -/// take_while_m_n(3, 6, is_alphabetic)(s) -/// } -/// -/// assert_eq!(short_alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); -/// assert_eq!(short_alpha(b"lengthy"), Ok((&b"y"[..], &b"length"[..]))); -/// assert_eq!(short_alpha(b"latin"), Err(Err::Incomplete(Needed::new(1)))); -/// assert_eq!(short_alpha(b"ed"), Err(Err::Incomplete(Needed::new(1)))); -/// assert_eq!(short_alpha(b"12345"), Err(Err::Error(Error::new(&b"12345"[..], ErrorKind::TakeWhileMN)))); -/// ``` -pub fn take_while_m_n>( - m: usize, - n: usize, - cond: F, -) -> impl Fn(Input) -> IResult -where - Input: InputTake + InputIter + InputLength, - F: Fn(::Item) -> bool, -{ - move |i: Input| { - let input = i; - - match input.position(|c| !cond(c)) { - Some(idx) => { - if idx >= m { - if idx <= n { - let res: IResult<_, _, Error> = if let Ok(index) = input.slice_index(idx) { - Ok(input.take_split(index)) - } else { - Err(Err::Error(Error::from_error_kind( - input, - ErrorKind::TakeWhileMN, - ))) - }; - res - } else { - let res: IResult<_, _, Error> = if let Ok(index) = input.slice_index(n) { - Ok(input.take_split(index)) - } else { - Err(Err::Error(Error::from_error_kind( - input, - ErrorKind::TakeWhileMN, - ))) - }; - res - } - } else { - let e = ErrorKind::TakeWhileMN; - Err(Err::Error(Error::from_error_kind(input, e))) - } - } - None => { - let len = input.input_len(); - if len >= n { - match input.slice_index(n) { - Ok(index) => Ok(input.take_split(index)), - Err(_needed) => Err(Err::Error(Error::from_error_kind( - input, - ErrorKind::TakeWhileMN, - ))), - } - } else { - let needed = if m > len { m - len } else { 1 }; - Err(Err::Incomplete(Needed::new(needed))) - } - } - } - } -} - -/// Returns the longest input slice (if any) till a predicate is met. -/// -/// The parser will return the longest slice till the given predicate *(a function that -/// takes the input and returns a bool)*. -/// -/// # Streaming Specific -/// *Streaming version* will return a `Err::Incomplete(Needed::new(1))` if the match reaches the -/// end of input or if there was not match. -/// -/// # Example -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// use nom::bytes::streaming::take_till; -/// -/// fn till_colon(s: &str) -> IResult<&str, &str> { -/// take_till(|c| c == ':')(s) -/// } -/// -/// assert_eq!(till_colon("latin:123"), Ok((":123", "latin"))); -/// assert_eq!(till_colon(":empty matched"), Ok((":empty matched", ""))); //allowed -/// assert_eq!(till_colon("12345"), Err(Err::Incomplete(Needed::new(1)))); -/// assert_eq!(till_colon(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn take_till>( - cond: F, -) -> impl Fn(Input) -> IResult -where - Input: InputTakeAtPosition, - F: Fn(::Item) -> bool, -{ - move |i: Input| i.split_at_position(|c| cond(c)) -} - -/// Returns the longest (at least 1) input slice till a predicate is met. -/// -/// The parser will return the longest slice till the given predicate *(a function that -/// takes the input and returns a bool)*. -/// -/// # Streaming Specific -/// *Streaming version* will return a `Err::Incomplete(Needed::new(1))` if the match reaches the -/// end of input or if there was not match. -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::streaming::take_till1; -/// -/// fn till_colon(s: &str) -> IResult<&str, &str> { -/// take_till1(|c| c == ':')(s) -/// } -/// -/// assert_eq!(till_colon("latin:123"), Ok((":123", "latin"))); -/// assert_eq!(till_colon(":empty matched"), Err(Err::Error(Error::new(":empty matched", ErrorKind::TakeTill1)))); -/// assert_eq!(till_colon("12345"), Err(Err::Incomplete(Needed::new(1)))); -/// assert_eq!(till_colon(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn take_till1>( - cond: F, -) -> impl Fn(Input) -> IResult -where - Input: InputTakeAtPosition, - F: Fn(::Item) -> bool, -{ - move |i: Input| { - let e: ErrorKind = ErrorKind::TakeTill1; - i.split_at_position1(|c| cond(c), e) - } -} - -/// Returns an input slice containing the first N input elements (Input[..N]). -/// -/// # Streaming Specific -/// *Streaming version* if the input has less than N elements, `take` will -/// return a `Err::Incomplete(Needed::new(M))` where M is the number of -/// additional bytes the parser would need to succeed. -/// It is well defined for `&[u8]` as the number of elements is the byte size, -/// but for types like `&str`, we cannot know how many bytes correspond for -/// the next few chars, so the result will be `Err::Incomplete(Needed::Unknown)` -/// -/// # Example -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// use nom::bytes::streaming::take; -/// -/// fn take6(s: &str) -> IResult<&str, &str> { -/// take(6usize)(s) -/// } -/// -/// assert_eq!(take6("1234567"), Ok(("7", "123456"))); -/// assert_eq!(take6("things"), Ok(("", "things"))); -/// assert_eq!(take6("short"), Err(Err::Incomplete(Needed::Unknown))); -/// ``` -pub fn take>( - count: C, -) -> impl Fn(Input) -> IResult -where - Input: InputIter + InputTake + InputLength, - C: ToUsize, -{ - let c = count.to_usize(); - move |i: Input| match i.slice_index(c) { - Err(i) => Err(Err::Incomplete(i)), - Ok(index) => Ok(i.take_split(index)), - } -} - -/// Returns the input slice up to the first occurrence of the pattern. -/// -/// It doesn't consume the pattern. -/// -/// # Streaming Specific -/// *Streaming version* will return a `Err::Incomplete(Needed::new(N))` if the input doesn't -/// contain the pattern or if the input is smaller than the pattern. -/// # Example -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// use nom::bytes::streaming::take_until; -/// -/// fn until_eof(s: &str) -> IResult<&str, &str> { -/// take_until("eof")(s) -/// } -/// -/// assert_eq!(until_eof("hello, worldeof"), Ok(("eof", "hello, world"))); -/// assert_eq!(until_eof("hello, world"), Err(Err::Incomplete(Needed::Unknown))); -/// assert_eq!(until_eof("hello, worldeo"), Err(Err::Incomplete(Needed::Unknown))); -/// assert_eq!(until_eof("1eof2eof"), Ok(("eof2eof", "1"))); -/// ``` -pub fn take_until>( - tag: T, -) -> impl Fn(Input) -> IResult -where - Input: InputTake + InputLength + FindSubstring, - T: Clone, -{ - move |i: Input| { - let t = tag.clone(); - - let res: IResult<_, _, Error> = match i.find_substring(t) { - None => Err(Err::Incomplete(Needed::Unknown)), - Some(index) => Ok(i.take_split(index)), - }; - res - } -} - -/// Returns the non empty input slice up to the first occurrence of the pattern. -/// -/// It doesn't consume the pattern. -/// -/// # Streaming Specific -/// *Streaming version* will return a `Err::Incomplete(Needed::new(N))` if the input doesn't -/// contain the pattern or if the input is smaller than the pattern. -/// # Example -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::bytes::streaming::take_until1; -/// -/// fn until_eof(s: &str) -> IResult<&str, &str> { -/// take_until1("eof")(s) -/// } -/// -/// assert_eq!(until_eof("hello, worldeof"), Ok(("eof", "hello, world"))); -/// assert_eq!(until_eof("hello, world"), Err(Err::Incomplete(Needed::Unknown))); -/// assert_eq!(until_eof("hello, worldeo"), Err(Err::Incomplete(Needed::Unknown))); -/// assert_eq!(until_eof("1eof2eof"), Ok(("eof2eof", "1"))); -/// assert_eq!(until_eof("eof"), Err(Err::Error(Error::new("eof", ErrorKind::TakeUntil)))); -/// ``` -pub fn take_until1>( - tag: T, -) -> impl Fn(Input) -> IResult -where - Input: InputTake + InputLength + FindSubstring, - T: Clone, -{ - move |i: Input| { - let t = tag.clone(); - - let res: IResult<_, _, Error> = match i.find_substring(t) { - None => Err(Err::Incomplete(Needed::Unknown)), - Some(0) => Err(Err::Error(Error::from_error_kind(i, ErrorKind::TakeUntil))), - Some(index) => Ok(i.take_split(index)), - }; - res - } -} - -/// Matches a byte string with escaped characters. -/// -/// * The first argument matches the normal characters (it must not accept the control character) -/// * The second argument is the control character (like `\` in most languages) -/// * The third argument matches the escaped characters -/// # Example -/// ``` -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// # use nom::character::complete::digit1; -/// use nom::bytes::streaming::escaped; -/// use nom::character::streaming::one_of; -/// -/// fn esc(s: &str) -> IResult<&str, &str> { -/// escaped(digit1, '\\', one_of("\"n\\"))(s) -/// } -/// -/// assert_eq!(esc("123;"), Ok((";", "123"))); -/// assert_eq!(esc("12\\\"34;"), Ok((";", "12\\\"34"))); -/// ``` -/// -pub fn escaped( - mut normal: F, - control_char: char, - mut escapable: G, -) -> impl FnMut(Input) -> IResult -where - Input: Clone - + crate::traits::Offset - + InputLength - + InputTake - + InputTakeAtPosition - + Slice> - + InputIter, - ::Item: crate::traits::AsChar, - F: Parser, - G: Parser, - Error: ParseError, -{ - use crate::traits::AsChar; - - move |input: Input| { - let mut i = input.clone(); - - while i.input_len() > 0 { - let current_len = i.input_len(); - - match normal.parse(i.clone()) { - Ok((i2, _)) => { - if i2.input_len() == 0 { - return Err(Err::Incomplete(Needed::Unknown)); - } else if i2.input_len() == current_len { - let index = input.offset(&i2); - return Ok(input.take_split(index)); - } else { - i = i2; - } - } - Err(Err::Error(_)) => { - // unwrap() should be safe here since index < $i.input_len() - if i.iter_elements().next().unwrap().as_char() == control_char { - let next = control_char.len_utf8(); - if next >= i.input_len() { - return Err(Err::Incomplete(Needed::new(1))); - } else { - match escapable.parse(i.slice(next..)) { - Ok((i2, _)) => { - if i2.input_len() == 0 { - return Err(Err::Incomplete(Needed::Unknown)); - } else { - i = i2; - } - } - Err(e) => return Err(e), - } - } - } else { - let index = input.offset(&i); - return Ok(input.take_split(index)); - } - } - Err(e) => { - return Err(e); - } - } - } - - Err(Err::Incomplete(Needed::Unknown)) - } -} - -/// Matches a byte string with escaped characters. -/// -/// * The first argument matches the normal characters (it must not match the control character) -/// * The second argument is the control character (like `\` in most languages) -/// * The third argument matches the escaped characters and transforms them -/// -/// As an example, the chain `abc\tdef` could be `abc def` (it also consumes the control character) -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// # use std::str::from_utf8; -/// use nom::bytes::streaming::{escaped_transform, tag}; -/// use nom::character::streaming::alpha1; -/// use nom::branch::alt; -/// use nom::combinator::value; -/// -/// fn parser(input: &str) -> IResult<&str, String> { -/// escaped_transform( -/// alpha1, -/// '\\', -/// alt(( -/// value("\\", tag("\\")), -/// value("\"", tag("\"")), -/// value("\n", tag("n")), -/// )) -/// )(input) -/// } -/// -/// assert_eq!(parser("ab\\\"cd\""), Ok(("\"", String::from("ab\"cd")))); -/// ``` -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -pub fn escaped_transform( - mut normal: F, - control_char: char, - mut transform: G, -) -> impl FnMut(Input) -> IResult -where - Input: Clone - + crate::traits::Offset - + InputLength - + InputTake - + InputTakeAtPosition - + Slice> - + InputIter, - Input: crate::traits::ExtendInto, - O1: crate::traits::ExtendInto, - O2: crate::traits::ExtendInto, - ::Item: crate::traits::AsChar, - F: Parser, - G: Parser, - Error: ParseError, -{ - use crate::traits::AsChar; - - move |input: Input| { - let mut index = 0; - let mut res = input.new_builder(); - - let i = input.clone(); - - while index < i.input_len() { - let current_len = i.input_len(); - let remainder = i.slice(index..); - match normal.parse(remainder.clone()) { - Ok((i2, o)) => { - o.extend_into(&mut res); - if i2.input_len() == 0 { - return Err(Err::Incomplete(Needed::Unknown)); - } else if i2.input_len() == current_len { - return Ok((remainder, res)); - } else { - index = input.offset(&i2); - } - } - Err(Err::Error(_)) => { - // unwrap() should be safe here since index < $i.input_len() - if remainder.iter_elements().next().unwrap().as_char() == control_char { - let next = index + control_char.len_utf8(); - let input_len = input.input_len(); - - if next >= input_len { - return Err(Err::Incomplete(Needed::Unknown)); - } else { - match transform.parse(i.slice(next..)) { - Ok((i2, o)) => { - o.extend_into(&mut res); - if i2.input_len() == 0 { - return Err(Err::Incomplete(Needed::Unknown)); - } else { - index = input.offset(&i2); - } - } - Err(e) => return Err(e), - } - } - } else { - return Ok((remainder, res)); - } - } - Err(e) => return Err(e), - } - } - Err(Err::Incomplete(Needed::Unknown)) - } -} diff --git a/vendor/nom/src/bytes/tests.rs b/vendor/nom/src/bytes/tests.rs deleted file mode 100644 index 159c4b4ffcc693..00000000000000 --- a/vendor/nom/src/bytes/tests.rs +++ /dev/null @@ -1,636 +0,0 @@ -use crate::character::is_alphabetic; -use crate::character::streaming::{ - alpha1 as alpha, alphanumeric1 as alphanumeric, digit1 as digit, hex_digit1 as hex_digit, - multispace1 as multispace, oct_digit1 as oct_digit, space1 as space, -}; -use crate::error::ErrorKind; -use crate::internal::{Err, IResult, Needed}; -#[cfg(feature = "alloc")] -use crate::{ - branch::alt, - bytes::complete::{escaped, escaped_transform, tag}, - combinator::{map, value}, - lib::std::string::String, - lib::std::vec::Vec, -}; - -#[test] -fn is_a() { - use crate::bytes::streaming::is_a; - - fn a_or_b(i: &[u8]) -> IResult<&[u8], &[u8]> { - is_a("ab")(i) - } - - let a = &b"abcd"[..]; - assert_eq!(a_or_b(a), Ok((&b"cd"[..], &b"ab"[..]))); - - let b = &b"bcde"[..]; - assert_eq!(a_or_b(b), Ok((&b"cde"[..], &b"b"[..]))); - - let c = &b"cdef"[..]; - assert_eq!( - a_or_b(c), - Err(Err::Error(error_position!(c, ErrorKind::IsA))) - ); - - let d = &b"bacdef"[..]; - assert_eq!(a_or_b(d), Ok((&b"cdef"[..], &b"ba"[..]))); -} - -#[test] -fn is_not() { - use crate::bytes::streaming::is_not; - - fn a_or_b(i: &[u8]) -> IResult<&[u8], &[u8]> { - is_not("ab")(i) - } - - let a = &b"cdab"[..]; - assert_eq!(a_or_b(a), Ok((&b"ab"[..], &b"cd"[..]))); - - let b = &b"cbde"[..]; - assert_eq!(a_or_b(b), Ok((&b"bde"[..], &b"c"[..]))); - - let c = &b"abab"[..]; - assert_eq!( - a_or_b(c), - Err(Err::Error(error_position!(c, ErrorKind::IsNot))) - ); - - let d = &b"cdefba"[..]; - assert_eq!(a_or_b(d), Ok((&b"ba"[..], &b"cdef"[..]))); - - let e = &b"e"[..]; - assert_eq!(a_or_b(e), Err(Err::Incomplete(Needed::new(1)))); -} - -#[cfg(feature = "alloc")] -#[allow(unused_variables)] -#[test] -fn escaping() { - use crate::character::streaming::one_of; - - fn esc(i: &[u8]) -> IResult<&[u8], &[u8]> { - escaped(alpha, '\\', one_of("\"n\\"))(i) - } - assert_eq!(esc(&b"abcd;"[..]), Ok((&b";"[..], &b"abcd"[..]))); - assert_eq!(esc(&b"ab\\\"cd;"[..]), Ok((&b";"[..], &b"ab\\\"cd"[..]))); - assert_eq!(esc(&b"\\\"abcd;"[..]), Ok((&b";"[..], &b"\\\"abcd"[..]))); - assert_eq!(esc(&b"\\n;"[..]), Ok((&b";"[..], &b"\\n"[..]))); - assert_eq!(esc(&b"ab\\\"12"[..]), Ok((&b"12"[..], &b"ab\\\""[..]))); - assert_eq!( - esc(&b"AB\\"[..]), - Err(Err::Error(error_position!( - &b"AB\\"[..], - ErrorKind::Escaped - ))) - ); - assert_eq!( - esc(&b"AB\\A"[..]), - Err(Err::Error(error_node_position!( - &b"AB\\A"[..], - ErrorKind::Escaped, - error_position!(&b"A"[..], ErrorKind::OneOf) - ))) - ); - - fn esc2(i: &[u8]) -> IResult<&[u8], &[u8]> { - escaped(digit, '\\', one_of("\"n\\"))(i) - } - assert_eq!(esc2(&b"12\\nnn34"[..]), Ok((&b"nn34"[..], &b"12\\n"[..]))); -} - -#[cfg(feature = "alloc")] -#[test] -fn escaping_str() { - use crate::character::streaming::one_of; - - fn esc(i: &str) -> IResult<&str, &str> { - escaped(alpha, '\\', one_of("\"n\\"))(i) - } - assert_eq!(esc("abcd;"), Ok((";", "abcd"))); - assert_eq!(esc("ab\\\"cd;"), Ok((";", "ab\\\"cd"))); - assert_eq!(esc("\\\"abcd;"), Ok((";", "\\\"abcd"))); - assert_eq!(esc("\\n;"), Ok((";", "\\n"))); - assert_eq!(esc("ab\\\"12"), Ok(("12", "ab\\\""))); - assert_eq!( - esc("AB\\"), - Err(Err::Error(error_position!("AB\\", ErrorKind::Escaped))) - ); - assert_eq!( - esc("AB\\A"), - Err(Err::Error(error_node_position!( - "AB\\A", - ErrorKind::Escaped, - error_position!("A", ErrorKind::OneOf) - ))) - ); - - fn esc2(i: &str) -> IResult<&str, &str> { - escaped(digit, '\\', one_of("\"n\\"))(i) - } - assert_eq!(esc2("12\\nnn34"), Ok(("nn34", "12\\n"))); - - fn esc3(i: &str) -> IResult<&str, &str> { - escaped(alpha, '\u{241b}', one_of("\"n"))(i) - } - assert_eq!(esc3("ab␛ncd;"), Ok((";", "ab␛ncd"))); -} - -#[cfg(feature = "alloc")] -fn to_s(i: Vec) -> String { - String::from_utf8_lossy(&i).into_owned() -} - -#[cfg(feature = "alloc")] -#[test] -fn escape_transform() { - fn esc(i: &[u8]) -> IResult<&[u8], String> { - map( - escaped_transform( - alpha, - '\\', - alt(( - value(&b"\\"[..], tag("\\")), - value(&b"\""[..], tag("\"")), - value(&b"\n"[..], tag("n")), - )), - ), - to_s, - )(i) - } - - assert_eq!(esc(&b"abcd;"[..]), Ok((&b";"[..], String::from("abcd")))); - assert_eq!( - esc(&b"ab\\\"cd;"[..]), - Ok((&b";"[..], String::from("ab\"cd"))) - ); - assert_eq!( - esc(&b"\\\"abcd;"[..]), - Ok((&b";"[..], String::from("\"abcd"))) - ); - assert_eq!(esc(&b"\\n;"[..]), Ok((&b";"[..], String::from("\n")))); - assert_eq!( - esc(&b"ab\\\"12"[..]), - Ok((&b"12"[..], String::from("ab\""))) - ); - assert_eq!( - esc(&b"AB\\"[..]), - Err(Err::Error(error_position!( - &b"\\"[..], - ErrorKind::EscapedTransform - ))) - ); - assert_eq!( - esc(&b"AB\\A"[..]), - Err(Err::Error(error_node_position!( - &b"AB\\A"[..], - ErrorKind::EscapedTransform, - error_position!(&b"A"[..], ErrorKind::Tag) - ))) - ); - - fn esc2(i: &[u8]) -> IResult<&[u8], String> { - map( - escaped_transform( - alpha, - '&', - alt(( - value("è".as_bytes(), tag("egrave;")), - value("à".as_bytes(), tag("agrave;")), - )), - ), - to_s, - )(i) - } - assert_eq!( - esc2(&b"abèDEF;"[..]), - Ok((&b";"[..], String::from("abèDEF"))) - ); - assert_eq!( - esc2(&b"abèDàEF;"[..]), - Ok((&b";"[..], String::from("abèDàEF"))) - ); -} - -#[cfg(feature = "std")] -#[test] -fn escape_transform_str() { - fn esc(i: &str) -> IResult<&str, String> { - escaped_transform( - alpha, - '\\', - alt(( - value("\\", tag("\\")), - value("\"", tag("\"")), - value("\n", tag("n")), - )), - )(i) - } - - assert_eq!(esc("abcd;"), Ok((";", String::from("abcd")))); - assert_eq!(esc("ab\\\"cd;"), Ok((";", String::from("ab\"cd")))); - assert_eq!(esc("\\\"abcd;"), Ok((";", String::from("\"abcd")))); - assert_eq!(esc("\\n;"), Ok((";", String::from("\n")))); - assert_eq!(esc("ab\\\"12"), Ok(("12", String::from("ab\"")))); - assert_eq!( - esc("AB\\"), - Err(Err::Error(error_position!( - "\\", - ErrorKind::EscapedTransform - ))) - ); - assert_eq!( - esc("AB\\A"), - Err(Err::Error(error_node_position!( - "AB\\A", - ErrorKind::EscapedTransform, - error_position!("A", ErrorKind::Tag) - ))) - ); - - fn esc2(i: &str) -> IResult<&str, String> { - escaped_transform( - alpha, - '&', - alt((value("è", tag("egrave;")), value("à", tag("agrave;")))), - )(i) - } - assert_eq!(esc2("abèDEF;"), Ok((";", String::from("abèDEF")))); - assert_eq!( - esc2("abèDàEF;"), - Ok((";", String::from("abèDàEF"))) - ); - - fn esc3(i: &str) -> IResult<&str, String> { - escaped_transform( - alpha, - '␛', - alt((value("\0", tag("0")), value("\n", tag("n")))), - )(i) - } - assert_eq!(esc3("a␛0bc␛n"), Ok(("", String::from("a\0bc\n")))); -} - -#[test] -fn take_until_incomplete() { - use crate::bytes::streaming::take_until; - fn y(i: &[u8]) -> IResult<&[u8], &[u8]> { - take_until("end")(i) - } - assert_eq!(y(&b"nd"[..]), Err(Err::Incomplete(Needed::Unknown))); - assert_eq!(y(&b"123"[..]), Err(Err::Incomplete(Needed::Unknown))); - assert_eq!(y(&b"123en"[..]), Err(Err::Incomplete(Needed::Unknown))); -} - -#[test] -fn take_until_incomplete_s() { - use crate::bytes::streaming::take_until; - fn ys(i: &str) -> IResult<&str, &str> { - take_until("end")(i) - } - assert_eq!(ys("123en"), Err(Err::Incomplete(Needed::Unknown))); -} - -#[test] -fn recognize() { - use crate::bytes::streaming::{tag, take}; - use crate::combinator::recognize; - use crate::sequence::delimited; - - fn x(i: &[u8]) -> IResult<&[u8], &[u8]> { - recognize(delimited(tag("")))(i) - } - let r = x(&b" aaa"[..]); - assert_eq!(r, Ok((&b" aaa"[..], &b""[..]))); - - let semicolon = &b";"[..]; - - fn ya(i: &[u8]) -> IResult<&[u8], &[u8]> { - recognize(alpha)(i) - } - let ra = ya(&b"abc;"[..]); - assert_eq!(ra, Ok((semicolon, &b"abc"[..]))); - - fn yd(i: &[u8]) -> IResult<&[u8], &[u8]> { - recognize(digit)(i) - } - let rd = yd(&b"123;"[..]); - assert_eq!(rd, Ok((semicolon, &b"123"[..]))); - - fn yhd(i: &[u8]) -> IResult<&[u8], &[u8]> { - recognize(hex_digit)(i) - } - let rhd = yhd(&b"123abcDEF;"[..]); - assert_eq!(rhd, Ok((semicolon, &b"123abcDEF"[..]))); - - fn yod(i: &[u8]) -> IResult<&[u8], &[u8]> { - recognize(oct_digit)(i) - } - let rod = yod(&b"1234567;"[..]); - assert_eq!(rod, Ok((semicolon, &b"1234567"[..]))); - - fn yan(i: &[u8]) -> IResult<&[u8], &[u8]> { - recognize(alphanumeric)(i) - } - let ran = yan(&b"123abc;"[..]); - assert_eq!(ran, Ok((semicolon, &b"123abc"[..]))); - - fn ys(i: &[u8]) -> IResult<&[u8], &[u8]> { - recognize(space)(i) - } - let rs = ys(&b" \t;"[..]); - assert_eq!(rs, Ok((semicolon, &b" \t"[..]))); - - fn yms(i: &[u8]) -> IResult<&[u8], &[u8]> { - recognize(multispace)(i) - } - let rms = yms(&b" \t\r\n;"[..]); - assert_eq!(rms, Ok((semicolon, &b" \t\r\n"[..]))); -} - -#[test] -fn take_while() { - use crate::bytes::streaming::take_while; - - fn f(i: &[u8]) -> IResult<&[u8], &[u8]> { - take_while(is_alphabetic)(i) - } - let a = b""; - let b = b"abcd"; - let c = b"abcd123"; - let d = b"123"; - - assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(f(&b[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(f(&c[..]), Ok((&d[..], &b[..]))); - assert_eq!(f(&d[..]), Ok((&d[..], &a[..]))); -} - -#[test] -fn take_while1() { - use crate::bytes::streaming::take_while1; - - fn f(i: &[u8]) -> IResult<&[u8], &[u8]> { - take_while1(is_alphabetic)(i) - } - let a = b""; - let b = b"abcd"; - let c = b"abcd123"; - let d = b"123"; - - assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(f(&b[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(f(&c[..]), Ok((&b"123"[..], &b[..]))); - assert_eq!( - f(&d[..]), - Err(Err::Error(error_position!(&d[..], ErrorKind::TakeWhile1))) - ); -} - -#[test] -fn take_while_m_n() { - use crate::bytes::streaming::take_while_m_n; - - fn x(i: &[u8]) -> IResult<&[u8], &[u8]> { - take_while_m_n(2, 4, is_alphabetic)(i) - } - let a = b""; - let b = b"a"; - let c = b"abc"; - let d = b"abc123"; - let e = b"abcde"; - let f = b"123"; - - assert_eq!(x(&a[..]), Err(Err::Incomplete(Needed::new(2)))); - assert_eq!(x(&b[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(x(&c[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(x(&d[..]), Ok((&b"123"[..], &c[..]))); - assert_eq!(x(&e[..]), Ok((&b"e"[..], &b"abcd"[..]))); - assert_eq!( - x(&f[..]), - Err(Err::Error(error_position!(&f[..], ErrorKind::TakeWhileMN))) - ); -} - -#[test] -fn take_till() { - use crate::bytes::streaming::take_till; - - fn f(i: &[u8]) -> IResult<&[u8], &[u8]> { - take_till(is_alphabetic)(i) - } - let a = b""; - let b = b"abcd"; - let c = b"123abcd"; - let d = b"123"; - - assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(f(&b[..]), Ok((&b"abcd"[..], &b""[..]))); - assert_eq!(f(&c[..]), Ok((&b"abcd"[..], &b"123"[..]))); - assert_eq!(f(&d[..]), Err(Err::Incomplete(Needed::new(1)))); -} - -#[test] -fn take_till1() { - use crate::bytes::streaming::take_till1; - - fn f(i: &[u8]) -> IResult<&[u8], &[u8]> { - take_till1(is_alphabetic)(i) - } - let a = b""; - let b = b"abcd"; - let c = b"123abcd"; - let d = b"123"; - - assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!( - f(&b[..]), - Err(Err::Error(error_position!(&b[..], ErrorKind::TakeTill1))) - ); - assert_eq!(f(&c[..]), Ok((&b"abcd"[..], &b"123"[..]))); - assert_eq!(f(&d[..]), Err(Err::Incomplete(Needed::new(1)))); -} - -#[test] -fn take_while_utf8() { - use crate::bytes::streaming::take_while; - - fn f(i: &str) -> IResult<&str, &str> { - take_while(|c| c != '點')(i) - } - - assert_eq!(f(""), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(f("abcd"), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(f("abcd點"), Ok(("點", "abcd"))); - assert_eq!(f("abcd點a"), Ok(("點a", "abcd"))); - - fn g(i: &str) -> IResult<&str, &str> { - take_while(|c| c == '點')(i) - } - - assert_eq!(g(""), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(g("點abcd"), Ok(("abcd", "點"))); - assert_eq!(g("點點點a"), Ok(("a", "點點點"))); -} - -#[test] -fn take_till_utf8() { - use crate::bytes::streaming::take_till; - - fn f(i: &str) -> IResult<&str, &str> { - take_till(|c| c == '點')(i) - } - - assert_eq!(f(""), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(f("abcd"), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(f("abcd點"), Ok(("點", "abcd"))); - assert_eq!(f("abcd點a"), Ok(("點a", "abcd"))); - - fn g(i: &str) -> IResult<&str, &str> { - take_till(|c| c != '點')(i) - } - - assert_eq!(g(""), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(g("點abcd"), Ok(("abcd", "點"))); - assert_eq!(g("點點點a"), Ok(("a", "點點點"))); -} - -#[test] -fn take_utf8() { - use crate::bytes::streaming::{take, take_while}; - - fn f(i: &str) -> IResult<&str, &str> { - take(3_usize)(i) - } - - assert_eq!(f(""), Err(Err::Incomplete(Needed::Unknown))); - assert_eq!(f("ab"), Err(Err::Incomplete(Needed::Unknown))); - assert_eq!(f("點"), Err(Err::Incomplete(Needed::Unknown))); - assert_eq!(f("ab點cd"), Ok(("cd", "ab點"))); - assert_eq!(f("a點bcd"), Ok(("cd", "a點b"))); - assert_eq!(f("a點b"), Ok(("", "a點b"))); - - fn g(i: &str) -> IResult<&str, &str> { - take_while(|c| c == '點')(i) - } - - assert_eq!(g(""), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(g("點abcd"), Ok(("abcd", "點"))); - assert_eq!(g("點點點a"), Ok(("a", "點點點"))); -} - -#[test] -fn take_while_m_n_utf8() { - use crate::bytes::streaming::take_while_m_n; - - fn parser(i: &str) -> IResult<&str, &str> { - take_while_m_n(1, 1, |c| c == 'A' || c == '😃')(i) - } - assert_eq!(parser("A!"), Ok(("!", "A"))); - assert_eq!(parser("😃!"), Ok(("!", "😃"))); -} - -#[test] -fn take_while_m_n_utf8_full_match() { - use crate::bytes::streaming::take_while_m_n; - - fn parser(i: &str) -> IResult<&str, &str> { - take_while_m_n(1, 1, |c: char| c.is_alphabetic())(i) - } - assert_eq!(parser("øn"), Ok(("n", "ø"))); -} - -#[test] -#[cfg(feature = "std")] -fn recognize_take_while() { - use crate::bytes::streaming::take_while; - use crate::character::is_alphanumeric; - use crate::combinator::recognize; - - fn x(i: &[u8]) -> IResult<&[u8], &[u8]> { - take_while(is_alphanumeric)(i) - } - fn y(i: &[u8]) -> IResult<&[u8], &[u8]> { - recognize(x)(i) - } - assert_eq!(x(&b"ab."[..]), Ok((&b"."[..], &b"ab"[..]))); - println!("X: {:?}", x(&b"ab"[..])); - assert_eq!(y(&b"ab."[..]), Ok((&b"."[..], &b"ab"[..]))); -} - -#[test] -fn length_bytes() { - use crate::{bytes::streaming::tag, multi::length_data, number::streaming::le_u8}; - - fn x(i: &[u8]) -> IResult<&[u8], &[u8]> { - length_data(le_u8)(i) - } - assert_eq!(x(b"\x02..>>"), Ok((&b">>"[..], &b".."[..]))); - assert_eq!(x(b"\x02.."), Ok((&[][..], &b".."[..]))); - assert_eq!(x(b"\x02."), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(x(b"\x02"), Err(Err::Incomplete(Needed::new(2)))); - - fn y(i: &[u8]) -> IResult<&[u8], &[u8]> { - let (i, _) = tag("magic")(i)?; - length_data(le_u8)(i) - } - assert_eq!(y(b"magic\x02..>>"), Ok((&b">>"[..], &b".."[..]))); - assert_eq!(y(b"magic\x02.."), Ok((&[][..], &b".."[..]))); - assert_eq!(y(b"magic\x02."), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(y(b"magic\x02"), Err(Err::Incomplete(Needed::new(2)))); -} - -#[cfg(feature = "alloc")] -#[test] -fn case_insensitive() { - use crate::bytes::streaming::tag_no_case; - - fn test(i: &[u8]) -> IResult<&[u8], &[u8]> { - tag_no_case("ABcd")(i) - } - assert_eq!(test(&b"aBCdefgh"[..]), Ok((&b"efgh"[..], &b"aBCd"[..]))); - assert_eq!(test(&b"abcdefgh"[..]), Ok((&b"efgh"[..], &b"abcd"[..]))); - assert_eq!(test(&b"ABCDefgh"[..]), Ok((&b"efgh"[..], &b"ABCD"[..]))); - assert_eq!(test(&b"ab"[..]), Err(Err::Incomplete(Needed::new(2)))); - assert_eq!( - test(&b"Hello"[..]), - Err(Err::Error(error_position!(&b"Hello"[..], ErrorKind::Tag))) - ); - assert_eq!( - test(&b"Hel"[..]), - Err(Err::Error(error_position!(&b"Hel"[..], ErrorKind::Tag))) - ); - - fn test2(i: &str) -> IResult<&str, &str> { - tag_no_case("ABcd")(i) - } - assert_eq!(test2("aBCdefgh"), Ok(("efgh", "aBCd"))); - assert_eq!(test2("abcdefgh"), Ok(("efgh", "abcd"))); - assert_eq!(test2("ABCDefgh"), Ok(("efgh", "ABCD"))); - assert_eq!(test2("ab"), Err(Err::Incomplete(Needed::new(2)))); - assert_eq!( - test2("Hello"), - Err(Err::Error(error_position!(&"Hello"[..], ErrorKind::Tag))) - ); - assert_eq!( - test2("Hel"), - Err(Err::Error(error_position!(&"Hel"[..], ErrorKind::Tag))) - ); -} - -#[test] -fn tag_fixed_size_array() { - use crate::bytes::streaming::tag; - - fn test(i: &[u8]) -> IResult<&[u8], &[u8]> { - tag([0x42])(i) - } - fn test2(i: &[u8]) -> IResult<&[u8], &[u8]> { - tag(&[0x42])(i) - } - let input = [0x42, 0x00]; - assert_eq!(test(&input), Ok((&b"\x00"[..], &b"\x42"[..]))); - assert_eq!(test2(&input), Ok((&b"\x00"[..], &b"\x42"[..]))); -} diff --git a/vendor/nom/src/character/complete.rs b/vendor/nom/src/character/complete.rs deleted file mode 100644 index 7cb760a68361bc..00000000000000 --- a/vendor/nom/src/character/complete.rs +++ /dev/null @@ -1,1227 +0,0 @@ -//! Character specific parsers and combinators, complete input version. -//! -//! Functions recognizing specific characters. - -use crate::branch::alt; -use crate::combinator::opt; -use crate::error::ErrorKind; -use crate::error::ParseError; -use crate::internal::{Err, IResult}; -use crate::lib::std::ops::{Range, RangeFrom, RangeTo}; -use crate::traits::{ - AsChar, FindToken, InputIter, InputLength, InputTake, InputTakeAtPosition, Slice, -}; -use crate::traits::{Compare, CompareResult}; - -/// Recognizes one character. -/// -/// *Complete version*: Will return an error if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{ErrorKind, Error}, IResult}; -/// # use nom::character::complete::char; -/// fn parser(i: &str) -> IResult<&str, char> { -/// char('a')(i) -/// } -/// assert_eq!(parser("abc"), Ok(("bc", 'a'))); -/// assert_eq!(parser(" abc"), Err(Err::Error(Error::new(" abc", ErrorKind::Char)))); -/// assert_eq!(parser("bc"), Err(Err::Error(Error::new("bc", ErrorKind::Char)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Char)))); -/// ``` -pub fn char>(c: char) -> impl Fn(I) -> IResult -where - I: Slice> + InputIter, - ::Item: AsChar, -{ - move |i: I| match (i).iter_elements().next().map(|t| { - let b = t.as_char() == c; - (&c, b) - }) { - Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), - _ => Err(Err::Error(Error::from_char(i, c))), - } -} - -/// Recognizes one character and checks that it satisfies a predicate -/// -/// *Complete version*: Will return an error if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{ErrorKind, Error}, Needed, IResult}; -/// # use nom::character::complete::satisfy; -/// fn parser(i: &str) -> IResult<&str, char> { -/// satisfy(|c| c == 'a' || c == 'b')(i) -/// } -/// assert_eq!(parser("abc"), Ok(("bc", 'a'))); -/// assert_eq!(parser("cd"), Err(Err::Error(Error::new("cd", ErrorKind::Satisfy)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Satisfy)))); -/// ``` -pub fn satisfy>(cond: F) -> impl Fn(I) -> IResult -where - I: Slice> + InputIter, - ::Item: AsChar, - F: Fn(char) -> bool, -{ - move |i: I| match (i).iter_elements().next().map(|t| { - let c = t.as_char(); - let b = cond(c); - (c, b) - }) { - Some((c, true)) => Ok((i.slice(c.len()..), c)), - _ => Err(Err::Error(Error::from_error_kind(i, ErrorKind::Satisfy))), - } -} - -/// Recognizes one of the provided characters. -/// -/// *Complete version*: Will return an error if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind}; -/// # use nom::character::complete::one_of; -/// assert_eq!(one_of::<_, _, (&str, ErrorKind)>("abc")("b"), Ok(("", 'b'))); -/// assert_eq!(one_of::<_, _, (&str, ErrorKind)>("a")("bc"), Err(Err::Error(("bc", ErrorKind::OneOf)))); -/// assert_eq!(one_of::<_, _, (&str, ErrorKind)>("a")(""), Err(Err::Error(("", ErrorKind::OneOf)))); -/// ``` -pub fn one_of>(list: T) -> impl Fn(I) -> IResult -where - I: Slice> + InputIter, - ::Item: AsChar + Copy, - T: FindToken<::Item>, -{ - move |i: I| match (i).iter_elements().next().map(|c| (c, list.find_token(c))) { - Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), - _ => Err(Err::Error(Error::from_error_kind(i, ErrorKind::OneOf))), - } -} - -/// Recognizes a character that is not in the provided characters. -/// -/// *Complete version*: Will return an error if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind}; -/// # use nom::character::complete::none_of; -/// assert_eq!(none_of::<_, _, (&str, ErrorKind)>("abc")("z"), Ok(("", 'z'))); -/// assert_eq!(none_of::<_, _, (&str, ErrorKind)>("ab")("a"), Err(Err::Error(("a", ErrorKind::NoneOf)))); -/// assert_eq!(none_of::<_, _, (&str, ErrorKind)>("a")(""), Err(Err::Error(("", ErrorKind::NoneOf)))); -/// ``` -pub fn none_of>(list: T) -> impl Fn(I) -> IResult -where - I: Slice> + InputIter, - ::Item: AsChar + Copy, - T: FindToken<::Item>, -{ - move |i: I| match (i).iter_elements().next().map(|c| (c, !list.find_token(c))) { - Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), - _ => Err(Err::Error(Error::from_error_kind(i, ErrorKind::NoneOf))), - } -} - -/// Recognizes the string "\r\n". -/// -/// *Complete version*: Will return an error if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{Error, ErrorKind}, IResult}; -/// # use nom::character::complete::crlf; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// crlf(input) -/// } -/// -/// assert_eq!(parser("\r\nc"), Ok(("c", "\r\n"))); -/// assert_eq!(parser("ab\r\nc"), Err(Err::Error(Error::new("ab\r\nc", ErrorKind::CrLf)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::CrLf)))); -/// ``` -pub fn crlf>(input: T) -> IResult -where - T: Slice> + Slice>, - T: InputIter, - T: Compare<&'static str>, -{ - match input.compare("\r\n") { - //FIXME: is this the right index? - CompareResult::Ok => Ok((input.slice(2..), input.slice(0..2))), - _ => { - let e: ErrorKind = ErrorKind::CrLf; - Err(Err::Error(E::from_error_kind(input, e))) - } - } -} - -//FIXME: there's still an incomplete -/// Recognizes a string of any char except '\r\n' or '\n'. -/// -/// *Complete version*: Will return an error if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; -/// # use nom::character::complete::not_line_ending; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// not_line_ending(input) -/// } -/// -/// assert_eq!(parser("ab\r\nc"), Ok(("\r\nc", "ab"))); -/// assert_eq!(parser("ab\nc"), Ok(("\nc", "ab"))); -/// assert_eq!(parser("abc"), Ok(("", "abc"))); -/// assert_eq!(parser(""), Ok(("", ""))); -/// assert_eq!(parser("a\rb\nc"), Err(Err::Error(Error { input: "a\rb\nc", code: ErrorKind::Tag }))); -/// assert_eq!(parser("a\rbc"), Err(Err::Error(Error { input: "a\rbc", code: ErrorKind::Tag }))); -/// ``` -pub fn not_line_ending>(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: InputIter + InputLength, - T: Compare<&'static str>, - ::Item: AsChar, - ::Item: AsChar, -{ - match input.position(|item| { - let c = item.as_char(); - c == '\r' || c == '\n' - }) { - None => Ok((input.slice(input.input_len()..), input)), - Some(index) => { - let mut it = input.slice(index..).iter_elements(); - let nth = it.next().unwrap().as_char(); - if nth == '\r' { - let sliced = input.slice(index..); - let comp = sliced.compare("\r\n"); - match comp { - //FIXME: calculate the right index - CompareResult::Ok => Ok((input.slice(index..), input.slice(..index))), - _ => { - let e: ErrorKind = ErrorKind::Tag; - Err(Err::Error(E::from_error_kind(input, e))) - } - } - } else { - Ok((input.slice(index..), input.slice(..index))) - } - } - } -} - -/// Recognizes an end of line (both '\n' and '\r\n'). -/// -/// *Complete version*: Will return an error if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; -/// # use nom::character::complete::line_ending; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// line_ending(input) -/// } -/// -/// assert_eq!(parser("\r\nc"), Ok(("c", "\r\n"))); -/// assert_eq!(parser("ab\r\nc"), Err(Err::Error(Error::new("ab\r\nc", ErrorKind::CrLf)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::CrLf)))); -/// ``` -pub fn line_ending>(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: InputIter + InputLength, - T: Compare<&'static str>, -{ - match input.compare("\n") { - CompareResult::Ok => Ok((input.slice(1..), input.slice(0..1))), - CompareResult::Incomplete => Err(Err::Error(E::from_error_kind(input, ErrorKind::CrLf))), - CompareResult::Error => { - match input.compare("\r\n") { - //FIXME: is this the right index? - CompareResult::Ok => Ok((input.slice(2..), input.slice(0..2))), - _ => Err(Err::Error(E::from_error_kind(input, ErrorKind::CrLf))), - } - } - } -} - -/// Matches a newline character '\n'. -/// -/// *Complete version*: Will return an error if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; -/// # use nom::character::complete::newline; -/// fn parser(input: &str) -> IResult<&str, char> { -/// newline(input) -/// } -/// -/// assert_eq!(parser("\nc"), Ok(("c", '\n'))); -/// assert_eq!(parser("\r\nc"), Err(Err::Error(Error::new("\r\nc", ErrorKind::Char)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Char)))); -/// ``` -pub fn newline>(input: I) -> IResult -where - I: Slice> + InputIter, - ::Item: AsChar, -{ - char('\n')(input) -} - -/// Matches a tab character '\t'. -/// -/// *Complete version*: Will return an error if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; -/// # use nom::character::complete::tab; -/// fn parser(input: &str) -> IResult<&str, char> { -/// tab(input) -/// } -/// -/// assert_eq!(parser("\tc"), Ok(("c", '\t'))); -/// assert_eq!(parser("\r\nc"), Err(Err::Error(Error::new("\r\nc", ErrorKind::Char)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Char)))); -/// ``` -pub fn tab>(input: I) -> IResult -where - I: Slice> + InputIter, - ::Item: AsChar, -{ - char('\t')(input) -} - -/// Matches one byte as a character. Note that the input type will -/// accept a `str`, but not a `&[u8]`, unlike many other nom parsers. -/// -/// *Complete version*: Will return an error if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{character::complete::anychar, Err, error::{Error, ErrorKind}, IResult}; -/// fn parser(input: &str) -> IResult<&str, char> { -/// anychar(input) -/// } -/// -/// assert_eq!(parser("abc"), Ok(("bc",'a'))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Eof)))); -/// ``` -pub fn anychar>(input: T) -> IResult -where - T: InputIter + InputLength + Slice>, - ::Item: AsChar, -{ - let mut it = input.iter_indices(); - match it.next() { - None => Err(Err::Error(E::from_error_kind(input, ErrorKind::Eof))), - Some((_, c)) => match it.next() { - None => Ok((input.slice(input.input_len()..), c.as_char())), - Some((idx, _)) => Ok((input.slice(idx..), c.as_char())), - }, - } -} - -/// Recognizes zero or more lowercase and uppercase ASCII alphabetic characters: a-z, A-Z -/// -/// *Complete version*: Will return the whole input if no terminating token is found (a non -/// alphabetic character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::complete::alpha0; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// alpha0(input) -/// } -/// -/// assert_eq!(parser("ab1c"), Ok(("1c", "ab"))); -/// assert_eq!(parser("1c"), Ok(("1c", ""))); -/// assert_eq!(parser(""), Ok(("", ""))); -/// ``` -pub fn alpha0>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position_complete(|item| !item.is_alpha()) -} - -/// Recognizes one or more lowercase and uppercase ASCII alphabetic characters: a-z, A-Z -/// -/// *Complete version*: Will return an error if there's not enough input data, -/// or the whole input if no terminating token is found (a non alphabetic character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; -/// # use nom::character::complete::alpha1; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// alpha1(input) -/// } -/// -/// assert_eq!(parser("aB1c"), Ok(("1c", "aB"))); -/// assert_eq!(parser("1c"), Err(Err::Error(Error::new("1c", ErrorKind::Alpha)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Alpha)))); -/// ``` -pub fn alpha1>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position1_complete(|item| !item.is_alpha(), ErrorKind::Alpha) -} - -/// Recognizes zero or more ASCII numerical characters: 0-9 -/// -/// *Complete version*: Will return an error if there's not enough input data, -/// or the whole input if no terminating token is found (a non digit character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::complete::digit0; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// digit0(input) -/// } -/// -/// assert_eq!(parser("21c"), Ok(("c", "21"))); -/// assert_eq!(parser("21"), Ok(("", "21"))); -/// assert_eq!(parser("a21c"), Ok(("a21c", ""))); -/// assert_eq!(parser(""), Ok(("", ""))); -/// ``` -pub fn digit0>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position_complete(|item| !item.is_dec_digit()) -} - -/// Recognizes one or more ASCII numerical characters: 0-9 -/// -/// *Complete version*: Will return an error if there's not enough input data, -/// or the whole input if no terminating token is found (a non digit character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; -/// # use nom::character::complete::digit1; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// digit1(input) -/// } -/// -/// assert_eq!(parser("21c"), Ok(("c", "21"))); -/// assert_eq!(parser("c1"), Err(Err::Error(Error::new("c1", ErrorKind::Digit)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Digit)))); -/// ``` -/// -/// ## Parsing an integer -/// You can use `digit1` in combination with [`map_res`] to parse an integer: -/// -/// ``` -/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; -/// # use nom::combinator::map_res; -/// # use nom::character::complete::digit1; -/// fn parser(input: &str) -> IResult<&str, u32> { -/// map_res(digit1, str::parse)(input) -/// } -/// -/// assert_eq!(parser("416"), Ok(("", 416))); -/// assert_eq!(parser("12b"), Ok(("b", 12))); -/// assert!(parser("b").is_err()); -/// ``` -/// -/// [`map_res`]: crate::combinator::map_res -pub fn digit1>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position1_complete(|item| !item.is_dec_digit(), ErrorKind::Digit) -} - -/// Recognizes zero or more ASCII hexadecimal numerical characters: 0-9, A-F, a-f -/// -/// *Complete version*: Will return the whole input if no terminating token is found (a non hexadecimal digit character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::complete::hex_digit0; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// hex_digit0(input) -/// } -/// -/// assert_eq!(parser("21cZ"), Ok(("Z", "21c"))); -/// assert_eq!(parser("Z21c"), Ok(("Z21c", ""))); -/// assert_eq!(parser(""), Ok(("", ""))); -/// ``` -pub fn hex_digit0>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position_complete(|item| !item.is_hex_digit()) -} -/// Recognizes one or more ASCII hexadecimal numerical characters: 0-9, A-F, a-f -/// -/// *Complete version*: Will return an error if there's not enough input data, -/// or the whole input if no terminating token is found (a non hexadecimal digit character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; -/// # use nom::character::complete::hex_digit1; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// hex_digit1(input) -/// } -/// -/// assert_eq!(parser("21cZ"), Ok(("Z", "21c"))); -/// assert_eq!(parser("H2"), Err(Err::Error(Error::new("H2", ErrorKind::HexDigit)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::HexDigit)))); -/// ``` -pub fn hex_digit1>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position1_complete(|item| !item.is_hex_digit(), ErrorKind::HexDigit) -} - -/// Recognizes zero or more octal characters: 0-7 -/// -/// *Complete version*: Will return the whole input if no terminating token is found (a non octal -/// digit character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::complete::oct_digit0; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// oct_digit0(input) -/// } -/// -/// assert_eq!(parser("21cZ"), Ok(("cZ", "21"))); -/// assert_eq!(parser("Z21c"), Ok(("Z21c", ""))); -/// assert_eq!(parser(""), Ok(("", ""))); -/// ``` -pub fn oct_digit0>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position_complete(|item| !item.is_oct_digit()) -} - -/// Recognizes one or more octal characters: 0-7 -/// -/// *Complete version*: Will return an error if there's not enough input data, -/// or the whole input if no terminating token is found (a non octal digit character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; -/// # use nom::character::complete::oct_digit1; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// oct_digit1(input) -/// } -/// -/// assert_eq!(parser("21cZ"), Ok(("cZ", "21"))); -/// assert_eq!(parser("H2"), Err(Err::Error(Error::new("H2", ErrorKind::OctDigit)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::OctDigit)))); -/// ``` -pub fn oct_digit1>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position1_complete(|item| !item.is_oct_digit(), ErrorKind::OctDigit) -} - -/// Recognizes zero or more ASCII numerical and alphabetic characters: 0-9, a-z, A-Z -/// -/// *Complete version*: Will return the whole input if no terminating token is found (a non -/// alphanumerical character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::complete::alphanumeric0; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// alphanumeric0(input) -/// } -/// -/// assert_eq!(parser("21cZ%1"), Ok(("%1", "21cZ"))); -/// assert_eq!(parser("&Z21c"), Ok(("&Z21c", ""))); -/// assert_eq!(parser(""), Ok(("", ""))); -/// ``` -pub fn alphanumeric0>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position_complete(|item| !item.is_alphanum()) -} - -/// Recognizes one or more ASCII numerical and alphabetic characters: 0-9, a-z, A-Z -/// -/// *Complete version*: Will return an error if there's not enough input data, -/// or the whole input if no terminating token is found (a non alphanumerical character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; -/// # use nom::character::complete::alphanumeric1; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// alphanumeric1(input) -/// } -/// -/// assert_eq!(parser("21cZ%1"), Ok(("%1", "21cZ"))); -/// assert_eq!(parser("&H2"), Err(Err::Error(Error::new("&H2", ErrorKind::AlphaNumeric)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::AlphaNumeric)))); -/// ``` -pub fn alphanumeric1>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position1_complete(|item| !item.is_alphanum(), ErrorKind::AlphaNumeric) -} - -/// Recognizes zero or more spaces and tabs. -/// -/// *Complete version*: Will return the whole input if no terminating token is found (a non space -/// character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::complete::space0; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// space0(input) -/// } -/// -/// assert_eq!(parser(" \t21c"), Ok(("21c", " \t"))); -/// assert_eq!(parser("Z21c"), Ok(("Z21c", ""))); -/// assert_eq!(parser(""), Ok(("", ""))); -/// ``` -pub fn space0>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar + Clone, -{ - input.split_at_position_complete(|item| { - let c = item.as_char(); - !(c == ' ' || c == '\t') - }) -} - -/// Recognizes one or more spaces and tabs. -/// -/// *Complete version*: Will return an error if there's not enough input data, -/// or the whole input if no terminating token is found (a non space character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; -/// # use nom::character::complete::space1; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// space1(input) -/// } -/// -/// assert_eq!(parser(" \t21c"), Ok(("21c", " \t"))); -/// assert_eq!(parser("H2"), Err(Err::Error(Error::new("H2", ErrorKind::Space)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Space)))); -/// ``` -pub fn space1>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar + Clone, -{ - input.split_at_position1_complete( - |item| { - let c = item.as_char(); - !(c == ' ' || c == '\t') - }, - ErrorKind::Space, - ) -} - -/// Recognizes zero or more spaces, tabs, carriage returns and line feeds. -/// -/// *Complete version*: will return the whole input if no terminating token is found (a non space -/// character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::complete::multispace0; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// multispace0(input) -/// } -/// -/// assert_eq!(parser(" \t\n\r21c"), Ok(("21c", " \t\n\r"))); -/// assert_eq!(parser("Z21c"), Ok(("Z21c", ""))); -/// assert_eq!(parser(""), Ok(("", ""))); -/// ``` -pub fn multispace0>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar + Clone, -{ - input.split_at_position_complete(|item| { - let c = item.as_char(); - !(c == ' ' || c == '\t' || c == '\r' || c == '\n') - }) -} - -/// Recognizes one or more spaces, tabs, carriage returns and line feeds. -/// -/// *Complete version*: will return an error if there's not enough input data, -/// or the whole input if no terminating token is found (a non space character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; -/// # use nom::character::complete::multispace1; -/// fn parser(input: &str) -> IResult<&str, &str> { -/// multispace1(input) -/// } -/// -/// assert_eq!(parser(" \t\n\r21c"), Ok(("21c", " \t\n\r"))); -/// assert_eq!(parser("H2"), Err(Err::Error(Error::new("H2", ErrorKind::MultiSpace)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::MultiSpace)))); -/// ``` -pub fn multispace1>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar + Clone, -{ - input.split_at_position1_complete( - |item| { - let c = item.as_char(); - !(c == ' ' || c == '\t' || c == '\r' || c == '\n') - }, - ErrorKind::MultiSpace, - ) -} - -pub(crate) fn sign>(input: T) -> IResult -where - T: Clone + InputTake, - T: for<'a> Compare<&'a [u8]>, -{ - use crate::bytes::complete::tag; - use crate::combinator::value; - - let (i, opt_sign) = opt(alt(( - value(false, tag(&b"-"[..])), - value(true, tag(&b"+"[..])), - )))(input)?; - let sign = opt_sign.unwrap_or(true); - - Ok((i, sign)) -} - -#[doc(hidden)] -macro_rules! ints { - ($($t:tt)+) => { - $( - /// will parse a number in text form to a number - /// - /// *Complete version*: can parse until the end of input. - pub fn $t>(input: T) -> IResult - where - T: InputIter + Slice> + InputLength + InputTake + Clone, - ::Item: AsChar, - T: for <'a> Compare<&'a[u8]>, - { - let (i, sign) = sign(input.clone())?; - - if i.input_len() == 0 { - return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))); - } - - let mut value: $t = 0; - if sign { - for (pos, c) in i.iter_indices() { - match c.as_char().to_digit(10) { - None => { - if pos == 0 { - return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))); - } else { - return Ok((i.slice(pos..), value)); - } - }, - Some(d) => match value.checked_mul(10).and_then(|v| v.checked_add(d as $t)) { - None => return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))), - Some(v) => value = v, - } - } - } - } else { - for (pos, c) in i.iter_indices() { - match c.as_char().to_digit(10) { - None => { - if pos == 0 { - return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))); - } else { - return Ok((i.slice(pos..), value)); - } - }, - Some(d) => match value.checked_mul(10).and_then(|v| v.checked_sub(d as $t)) { - None => return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))), - Some(v) => value = v, - } - } - } - } - - Ok((i.slice(i.input_len()..), value)) - } - )+ - } -} - -ints! { i8 i16 i32 i64 i128 } - -#[doc(hidden)] -macro_rules! uints { - ($($t:tt)+) => { - $( - /// will parse a number in text form to a number - /// - /// *Complete version*: can parse until the end of input. - pub fn $t>(input: T) -> IResult - where - T: InputIter + Slice> + InputLength, - ::Item: AsChar, - { - let i = input; - - if i.input_len() == 0 { - return Err(Err::Error(E::from_error_kind(i, ErrorKind::Digit))); - } - - let mut value: $t = 0; - for (pos, c) in i.iter_indices() { - match c.as_char().to_digit(10) { - None => { - if pos == 0 { - return Err(Err::Error(E::from_error_kind(i, ErrorKind::Digit))); - } else { - return Ok((i.slice(pos..), value)); - } - }, - Some(d) => match value.checked_mul(10).and_then(|v| v.checked_add(d as $t)) { - None => return Err(Err::Error(E::from_error_kind(i, ErrorKind::Digit))), - Some(v) => value = v, - } - } - } - - Ok((i.slice(i.input_len()..), value)) - } - )+ - } -} - -uints! { u8 u16 u32 u64 u128 } - -#[cfg(test)] -mod tests { - use super::*; - use crate::internal::Err; - use crate::traits::ParseTo; - use proptest::prelude::*; - - macro_rules! assert_parse( - ($left: expr, $right: expr) => { - let res: $crate::IResult<_, _, (_, ErrorKind)> = $left; - assert_eq!(res, $right); - }; - ); - - #[test] - fn character() { - let empty: &[u8] = b""; - let a: &[u8] = b"abcd"; - let b: &[u8] = b"1234"; - let c: &[u8] = b"a123"; - let d: &[u8] = "azé12".as_bytes(); - let e: &[u8] = b" "; - let f: &[u8] = b" ;"; - //assert_eq!(alpha1::<_, (_, ErrorKind)>(a), Err(Err::Incomplete(Needed::Size(1)))); - assert_parse!(alpha1(a), Ok((empty, a))); - assert_eq!(alpha1(b), Err(Err::Error((b, ErrorKind::Alpha)))); - assert_eq!(alpha1::<_, (_, ErrorKind)>(c), Ok((&c[1..], &b"a"[..]))); - assert_eq!( - alpha1::<_, (_, ErrorKind)>(d), - Ok(("é12".as_bytes(), &b"az"[..])) - ); - assert_eq!(digit1(a), Err(Err::Error((a, ErrorKind::Digit)))); - assert_eq!(digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); - assert_eq!(digit1(c), Err(Err::Error((c, ErrorKind::Digit)))); - assert_eq!(digit1(d), Err(Err::Error((d, ErrorKind::Digit)))); - assert_eq!(hex_digit1::<_, (_, ErrorKind)>(a), Ok((empty, a))); - assert_eq!(hex_digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); - assert_eq!(hex_digit1::<_, (_, ErrorKind)>(c), Ok((empty, c))); - assert_eq!( - hex_digit1::<_, (_, ErrorKind)>(d), - Ok(("zé12".as_bytes(), &b"a"[..])) - ); - assert_eq!(hex_digit1(e), Err(Err::Error((e, ErrorKind::HexDigit)))); - assert_eq!(oct_digit1(a), Err(Err::Error((a, ErrorKind::OctDigit)))); - assert_eq!(oct_digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); - assert_eq!(oct_digit1(c), Err(Err::Error((c, ErrorKind::OctDigit)))); - assert_eq!(oct_digit1(d), Err(Err::Error((d, ErrorKind::OctDigit)))); - assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(a), Ok((empty, a))); - //assert_eq!(fix_error!(b,(), alphanumeric), Ok((empty, b))); - assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(c), Ok((empty, c))); - assert_eq!( - alphanumeric1::<_, (_, ErrorKind)>(d), - Ok(("é12".as_bytes(), &b"az"[..])) - ); - assert_eq!(space1::<_, (_, ErrorKind)>(e), Ok((empty, e))); - assert_eq!(space1::<_, (_, ErrorKind)>(f), Ok((&b";"[..], &b" "[..]))); - } - - #[cfg(feature = "alloc")] - #[test] - fn character_s() { - let empty = ""; - let a = "abcd"; - let b = "1234"; - let c = "a123"; - let d = "azé12"; - let e = " "; - assert_eq!(alpha1::<_, (_, ErrorKind)>(a), Ok((empty, a))); - assert_eq!(alpha1(b), Err(Err::Error((b, ErrorKind::Alpha)))); - assert_eq!(alpha1::<_, (_, ErrorKind)>(c), Ok((&c[1..], &"a"[..]))); - assert_eq!(alpha1::<_, (_, ErrorKind)>(d), Ok(("é12", &"az"[..]))); - assert_eq!(digit1(a), Err(Err::Error((a, ErrorKind::Digit)))); - assert_eq!(digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); - assert_eq!(digit1(c), Err(Err::Error((c, ErrorKind::Digit)))); - assert_eq!(digit1(d), Err(Err::Error((d, ErrorKind::Digit)))); - assert_eq!(hex_digit1::<_, (_, ErrorKind)>(a), Ok((empty, a))); - assert_eq!(hex_digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); - assert_eq!(hex_digit1::<_, (_, ErrorKind)>(c), Ok((empty, c))); - assert_eq!(hex_digit1::<_, (_, ErrorKind)>(d), Ok(("zé12", &"a"[..]))); - assert_eq!(hex_digit1(e), Err(Err::Error((e, ErrorKind::HexDigit)))); - assert_eq!(oct_digit1(a), Err(Err::Error((a, ErrorKind::OctDigit)))); - assert_eq!(oct_digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); - assert_eq!(oct_digit1(c), Err(Err::Error((c, ErrorKind::OctDigit)))); - assert_eq!(oct_digit1(d), Err(Err::Error((d, ErrorKind::OctDigit)))); - assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(a), Ok((empty, a))); - //assert_eq!(fix_error!(b,(), alphanumeric), Ok((empty, b))); - assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(c), Ok((empty, c))); - assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(d), Ok(("é12", "az"))); - assert_eq!(space1::<_, (_, ErrorKind)>(e), Ok((empty, e))); - } - - use crate::traits::Offset; - #[test] - fn offset() { - let a = &b"abcd;"[..]; - let b = &b"1234;"[..]; - let c = &b"a123;"[..]; - let d = &b" \t;"[..]; - let e = &b" \t\r\n;"[..]; - let f = &b"123abcDEF;"[..]; - - match alpha1::<_, (_, ErrorKind)>(a) { - Ok((i, _)) => { - assert_eq!(a.offset(i) + i.len(), a.len()); - } - _ => panic!("wrong return type in offset test for alpha"), - } - match digit1::<_, (_, ErrorKind)>(b) { - Ok((i, _)) => { - assert_eq!(b.offset(i) + i.len(), b.len()); - } - _ => panic!("wrong return type in offset test for digit"), - } - match alphanumeric1::<_, (_, ErrorKind)>(c) { - Ok((i, _)) => { - assert_eq!(c.offset(i) + i.len(), c.len()); - } - _ => panic!("wrong return type in offset test for alphanumeric"), - } - match space1::<_, (_, ErrorKind)>(d) { - Ok((i, _)) => { - assert_eq!(d.offset(i) + i.len(), d.len()); - } - _ => panic!("wrong return type in offset test for space"), - } - match multispace1::<_, (_, ErrorKind)>(e) { - Ok((i, _)) => { - assert_eq!(e.offset(i) + i.len(), e.len()); - } - _ => panic!("wrong return type in offset test for multispace"), - } - match hex_digit1::<_, (_, ErrorKind)>(f) { - Ok((i, _)) => { - assert_eq!(f.offset(i) + i.len(), f.len()); - } - _ => panic!("wrong return type in offset test for hex_digit"), - } - match oct_digit1::<_, (_, ErrorKind)>(f) { - Ok((i, _)) => { - assert_eq!(f.offset(i) + i.len(), f.len()); - } - _ => panic!("wrong return type in offset test for oct_digit"), - } - } - - #[test] - fn is_not_line_ending_bytes() { - let a: &[u8] = b"ab12cd\nefgh"; - assert_eq!( - not_line_ending::<_, (_, ErrorKind)>(a), - Ok((&b"\nefgh"[..], &b"ab12cd"[..])) - ); - - let b: &[u8] = b"ab12cd\nefgh\nijkl"; - assert_eq!( - not_line_ending::<_, (_, ErrorKind)>(b), - Ok((&b"\nefgh\nijkl"[..], &b"ab12cd"[..])) - ); - - let c: &[u8] = b"ab12cd\r\nefgh\nijkl"; - assert_eq!( - not_line_ending::<_, (_, ErrorKind)>(c), - Ok((&b"\r\nefgh\nijkl"[..], &b"ab12cd"[..])) - ); - - let d: &[u8] = b"ab12cd"; - assert_eq!( - not_line_ending::<_, (_, ErrorKind)>(d), - Ok((&[][..], &d[..])) - ); - } - - #[test] - fn is_not_line_ending_str() { - /* - let a: &str = "ab12cd\nefgh"; - assert_eq!(not_line_ending(a), Ok((&"\nefgh"[..], &"ab12cd"[..]))); - - let b: &str = "ab12cd\nefgh\nijkl"; - assert_eq!(not_line_ending(b), Ok((&"\nefgh\nijkl"[..], &"ab12cd"[..]))); - - let c: &str = "ab12cd\r\nefgh\nijkl"; - assert_eq!(not_line_ending(c), Ok((&"\r\nefgh\nijkl"[..], &"ab12cd"[..]))); - - let d = "βèƒôřè\nÂßÇáƒƭèř"; - assert_eq!(not_line_ending(d), Ok((&"\nÂßÇáƒƭèř"[..], &"βèƒôřè"[..]))); - - let e = "βèƒôřè\r\nÂßÇáƒƭèř"; - assert_eq!(not_line_ending(e), Ok((&"\r\nÂßÇáƒƭèř"[..], &"βèƒôřè"[..]))); - */ - - let f = "βèƒôřè\rÂßÇáƒƭèř"; - assert_eq!(not_line_ending(f), Err(Err::Error((f, ErrorKind::Tag)))); - - let g2: &str = "ab12cd"; - assert_eq!(not_line_ending::<_, (_, ErrorKind)>(g2), Ok(("", g2))); - } - - #[test] - fn hex_digit_test() { - let i = &b"0123456789abcdefABCDEF;"[..]; - assert_parse!(hex_digit1(i), Ok((&b";"[..], &i[..i.len() - 1]))); - - let i = &b"g"[..]; - assert_parse!( - hex_digit1(i), - Err(Err::Error(error_position!(i, ErrorKind::HexDigit))) - ); - - let i = &b"G"[..]; - assert_parse!( - hex_digit1(i), - Err(Err::Error(error_position!(i, ErrorKind::HexDigit))) - ); - - assert!(crate::character::is_hex_digit(b'0')); - assert!(crate::character::is_hex_digit(b'9')); - assert!(crate::character::is_hex_digit(b'a')); - assert!(crate::character::is_hex_digit(b'f')); - assert!(crate::character::is_hex_digit(b'A')); - assert!(crate::character::is_hex_digit(b'F')); - assert!(!crate::character::is_hex_digit(b'g')); - assert!(!crate::character::is_hex_digit(b'G')); - assert!(!crate::character::is_hex_digit(b'/')); - assert!(!crate::character::is_hex_digit(b':')); - assert!(!crate::character::is_hex_digit(b'@')); - assert!(!crate::character::is_hex_digit(b'\x60')); - } - - #[test] - fn oct_digit_test() { - let i = &b"01234567;"[..]; - assert_parse!(oct_digit1(i), Ok((&b";"[..], &i[..i.len() - 1]))); - - let i = &b"8"[..]; - assert_parse!( - oct_digit1(i), - Err(Err::Error(error_position!(i, ErrorKind::OctDigit))) - ); - - assert!(crate::character::is_oct_digit(b'0')); - assert!(crate::character::is_oct_digit(b'7')); - assert!(!crate::character::is_oct_digit(b'8')); - assert!(!crate::character::is_oct_digit(b'9')); - assert!(!crate::character::is_oct_digit(b'a')); - assert!(!crate::character::is_oct_digit(b'A')); - assert!(!crate::character::is_oct_digit(b'/')); - assert!(!crate::character::is_oct_digit(b':')); - assert!(!crate::character::is_oct_digit(b'@')); - assert!(!crate::character::is_oct_digit(b'\x60')); - } - - #[test] - fn full_line_windows() { - use crate::sequence::pair; - fn take_full_line(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> { - pair(not_line_ending, line_ending)(i) - } - let input = b"abc\r\n"; - let output = take_full_line(input); - assert_eq!(output, Ok((&b""[..], (&b"abc"[..], &b"\r\n"[..])))); - } - - #[test] - fn full_line_unix() { - use crate::sequence::pair; - fn take_full_line(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> { - pair(not_line_ending, line_ending)(i) - } - let input = b"abc\n"; - let output = take_full_line(input); - assert_eq!(output, Ok((&b""[..], (&b"abc"[..], &b"\n"[..])))); - } - - #[test] - fn check_windows_lineending() { - let input = b"\r\n"; - let output = line_ending(&input[..]); - assert_parse!(output, Ok((&b""[..], &b"\r\n"[..]))); - } - - #[test] - fn check_unix_lineending() { - let input = b"\n"; - let output = line_ending(&input[..]); - assert_parse!(output, Ok((&b""[..], &b"\n"[..]))); - } - - #[test] - fn cr_lf() { - assert_parse!(crlf(&b"\r\na"[..]), Ok((&b"a"[..], &b"\r\n"[..]))); - assert_parse!( - crlf(&b"\r"[..]), - Err(Err::Error(error_position!(&b"\r"[..], ErrorKind::CrLf))) - ); - assert_parse!( - crlf(&b"\ra"[..]), - Err(Err::Error(error_position!(&b"\ra"[..], ErrorKind::CrLf))) - ); - - assert_parse!(crlf("\r\na"), Ok(("a", "\r\n"))); - assert_parse!( - crlf("\r"), - Err(Err::Error(error_position!(&"\r"[..], ErrorKind::CrLf))) - ); - assert_parse!( - crlf("\ra"), - Err(Err::Error(error_position!("\ra", ErrorKind::CrLf))) - ); - } - - #[test] - fn end_of_line() { - assert_parse!(line_ending(&b"\na"[..]), Ok((&b"a"[..], &b"\n"[..]))); - assert_parse!(line_ending(&b"\r\na"[..]), Ok((&b"a"[..], &b"\r\n"[..]))); - assert_parse!( - line_ending(&b"\r"[..]), - Err(Err::Error(error_position!(&b"\r"[..], ErrorKind::CrLf))) - ); - assert_parse!( - line_ending(&b"\ra"[..]), - Err(Err::Error(error_position!(&b"\ra"[..], ErrorKind::CrLf))) - ); - - assert_parse!(line_ending("\na"), Ok(("a", "\n"))); - assert_parse!(line_ending("\r\na"), Ok(("a", "\r\n"))); - assert_parse!( - line_ending("\r"), - Err(Err::Error(error_position!(&"\r"[..], ErrorKind::CrLf))) - ); - assert_parse!( - line_ending("\ra"), - Err(Err::Error(error_position!("\ra", ErrorKind::CrLf))) - ); - } - - fn digit_to_i16(input: &str) -> IResult<&str, i16> { - let i = input; - let (i, opt_sign) = opt(alt((char('+'), char('-'))))(i)?; - let sign = match opt_sign { - Some('+') => true, - Some('-') => false, - _ => true, - }; - - let (i, s) = match digit1::<_, crate::error::Error<_>>(i) { - Ok((i, s)) => (i, s), - Err(_) => { - return Err(Err::Error(crate::error::Error::from_error_kind( - input, - ErrorKind::Digit, - ))) - } - }; - - match s.parse_to() { - Some(n) => { - if sign { - Ok((i, n)) - } else { - Ok((i, -n)) - } - } - None => Err(Err::Error(crate::error::Error::from_error_kind( - i, - ErrorKind::Digit, - ))), - } - } - - fn digit_to_u32(i: &str) -> IResult<&str, u32> { - let (i, s) = digit1(i)?; - match s.parse_to() { - Some(n) => Ok((i, n)), - None => Err(Err::Error(crate::error::Error::from_error_kind( - i, - ErrorKind::Digit, - ))), - } - } - - proptest! { - #[test] - fn ints(s in "\\PC*") { - let res1 = digit_to_i16(&s); - let res2 = i16(s.as_str()); - assert_eq!(res1, res2); - } - - #[test] - fn uints(s in "\\PC*") { - let res1 = digit_to_u32(&s); - let res2 = u32(s.as_str()); - assert_eq!(res1, res2); - } - } -} diff --git a/vendor/nom/src/character/mod.rs b/vendor/nom/src/character/mod.rs deleted file mode 100644 index 2c5d3bc4ad833e..00000000000000 --- a/vendor/nom/src/character/mod.rs +++ /dev/null @@ -1,116 +0,0 @@ -//! Character specific parsers and combinators -//! -//! Functions recognizing specific characters - -#[cfg(test)] -mod tests; - -pub mod complete; -pub mod streaming; - -/// Tests if byte is ASCII alphabetic: A-Z, a-z -/// -/// # Example -/// -/// ``` -/// # use nom::character::is_alphabetic; -/// assert_eq!(is_alphabetic(b'9'), false); -/// assert_eq!(is_alphabetic(b'a'), true); -/// ``` -#[inline] -pub fn is_alphabetic(chr: u8) -> bool { - (chr >= 0x41 && chr <= 0x5A) || (chr >= 0x61 && chr <= 0x7A) -} - -/// Tests if byte is ASCII digit: 0-9 -/// -/// # Example -/// -/// ``` -/// # use nom::character::is_digit; -/// assert_eq!(is_digit(b'a'), false); -/// assert_eq!(is_digit(b'9'), true); -/// ``` -#[inline] -pub fn is_digit(chr: u8) -> bool { - chr >= 0x30 && chr <= 0x39 -} - -/// Tests if byte is ASCII hex digit: 0-9, A-F, a-f -/// -/// # Example -/// -/// ``` -/// # use nom::character::is_hex_digit; -/// assert_eq!(is_hex_digit(b'a'), true); -/// assert_eq!(is_hex_digit(b'9'), true); -/// assert_eq!(is_hex_digit(b'A'), true); -/// assert_eq!(is_hex_digit(b'x'), false); -/// ``` -#[inline] -pub fn is_hex_digit(chr: u8) -> bool { - (chr >= 0x30 && chr <= 0x39) || (chr >= 0x41 && chr <= 0x46) || (chr >= 0x61 && chr <= 0x66) -} - -/// Tests if byte is ASCII octal digit: 0-7 -/// -/// # Example -/// -/// ``` -/// # use nom::character::is_oct_digit; -/// assert_eq!(is_oct_digit(b'a'), false); -/// assert_eq!(is_oct_digit(b'9'), false); -/// assert_eq!(is_oct_digit(b'6'), true); -/// ``` -#[inline] -pub fn is_oct_digit(chr: u8) -> bool { - chr >= 0x30 && chr <= 0x37 -} - -/// Tests if byte is ASCII alphanumeric: A-Z, a-z, 0-9 -/// -/// # Example -/// -/// ``` -/// # use nom::character::is_alphanumeric; -/// assert_eq!(is_alphanumeric(b'-'), false); -/// assert_eq!(is_alphanumeric(b'a'), true); -/// assert_eq!(is_alphanumeric(b'9'), true); -/// assert_eq!(is_alphanumeric(b'A'), true); -/// ``` -#[inline] -pub fn is_alphanumeric(chr: u8) -> bool { - is_alphabetic(chr) || is_digit(chr) -} - -/// Tests if byte is ASCII space or tab -/// -/// # Example -/// -/// ``` -/// # use nom::character::is_space; -/// assert_eq!(is_space(b'\n'), false); -/// assert_eq!(is_space(b'\r'), false); -/// assert_eq!(is_space(b' '), true); -/// assert_eq!(is_space(b'\t'), true); -/// ``` -#[inline] -pub fn is_space(chr: u8) -> bool { - chr == b' ' || chr == b'\t' -} - -/// Tests if byte is ASCII newline: \n -/// -/// # Example -/// -/// ``` -/// # use nom::character::is_newline; -/// assert_eq!(is_newline(b'\n'), true); -/// assert_eq!(is_newline(b'\r'), false); -/// assert_eq!(is_newline(b' '), false); -/// assert_eq!(is_newline(b'\t'), false); -/// ``` -#[inline] -pub fn is_newline(chr: u8) -> bool { - chr == b'\n' -} diff --git a/vendor/nom/src/character/streaming.rs b/vendor/nom/src/character/streaming.rs deleted file mode 100644 index 88aabba3560596..00000000000000 --- a/vendor/nom/src/character/streaming.rs +++ /dev/null @@ -1,1182 +0,0 @@ -//! Character specific parsers and combinators, streaming version -//! -//! Functions recognizing specific characters - -use crate::branch::alt; -use crate::combinator::opt; -use crate::error::ErrorKind; -use crate::error::ParseError; -use crate::internal::{Err, IResult, Needed}; -use crate::lib::std::ops::{Range, RangeFrom, RangeTo}; -use crate::traits::{ - AsChar, FindToken, InputIter, InputLength, InputTake, InputTakeAtPosition, Slice, -}; -use crate::traits::{Compare, CompareResult}; - -/// Recognizes one character. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{ErrorKind, Error}, Needed, IResult}; -/// # use nom::character::streaming::char; -/// fn parser(i: &str) -> IResult<&str, char> { -/// char('a')(i) -/// } -/// assert_eq!(parser("abc"), Ok(("bc", 'a'))); -/// assert_eq!(parser("bc"), Err(Err::Error(Error::new("bc", ErrorKind::Char)))); -/// assert_eq!(parser(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn char>(c: char) -> impl Fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, - ::Item: AsChar, -{ - move |i: I| match (i).iter_elements().next().map(|t| { - let b = t.as_char() == c; - (&c, b) - }) { - None => Err(Err::Incomplete(Needed::new(c.len() - i.input_len()))), - Some((_, false)) => Err(Err::Error(Error::from_char(i, c))), - Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), - } -} - -/// Recognizes one character and checks that it satisfies a predicate -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{ErrorKind, Error}, Needed, IResult}; -/// # use nom::character::streaming::satisfy; -/// fn parser(i: &str) -> IResult<&str, char> { -/// satisfy(|c| c == 'a' || c == 'b')(i) -/// } -/// assert_eq!(parser("abc"), Ok(("bc", 'a'))); -/// assert_eq!(parser("cd"), Err(Err::Error(Error::new("cd", ErrorKind::Satisfy)))); -/// assert_eq!(parser(""), Err(Err::Incomplete(Needed::Unknown))); -/// ``` -pub fn satisfy>(cond: F) -> impl Fn(I) -> IResult -where - I: Slice> + InputIter, - ::Item: AsChar, - F: Fn(char) -> bool, -{ - move |i: I| match (i).iter_elements().next().map(|t| { - let c = t.as_char(); - let b = cond(c); - (c, b) - }) { - None => Err(Err::Incomplete(Needed::Unknown)), - Some((_, false)) => Err(Err::Error(Error::from_error_kind(i, ErrorKind::Satisfy))), - Some((c, true)) => Ok((i.slice(c.len()..), c)), - } -} - -/// Recognizes one of the provided characters. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::character::streaming::one_of; -/// assert_eq!(one_of::<_, _, (_, ErrorKind)>("abc")("b"), Ok(("", 'b'))); -/// assert_eq!(one_of::<_, _, (_, ErrorKind)>("a")("bc"), Err(Err::Error(("bc", ErrorKind::OneOf)))); -/// assert_eq!(one_of::<_, _, (_, ErrorKind)>("a")(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn one_of>(list: T) -> impl Fn(I) -> IResult -where - I: Slice> + InputIter, - ::Item: AsChar + Copy, - T: FindToken<::Item>, -{ - move |i: I| match (i).iter_elements().next().map(|c| (c, list.find_token(c))) { - None => Err(Err::Incomplete(Needed::new(1))), - Some((_, false)) => Err(Err::Error(Error::from_error_kind(i, ErrorKind::OneOf))), - Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), - } -} - -/// Recognizes a character that is not in the provided characters. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::character::streaming::none_of; -/// assert_eq!(none_of::<_, _, (_, ErrorKind)>("abc")("z"), Ok(("", 'z'))); -/// assert_eq!(none_of::<_, _, (_, ErrorKind)>("ab")("a"), Err(Err::Error(("a", ErrorKind::NoneOf)))); -/// assert_eq!(none_of::<_, _, (_, ErrorKind)>("a")(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn none_of>(list: T) -> impl Fn(I) -> IResult -where - I: Slice> + InputIter, - ::Item: AsChar + Copy, - T: FindToken<::Item>, -{ - move |i: I| match (i).iter_elements().next().map(|c| (c, !list.find_token(c))) { - None => Err(Err::Incomplete(Needed::new(1))), - Some((_, false)) => Err(Err::Error(Error::from_error_kind(i, ErrorKind::NoneOf))), - Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), - } -} - -/// Recognizes the string "\r\n". -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::crlf; -/// assert_eq!(crlf::<_, (_, ErrorKind)>("\r\nc"), Ok(("c", "\r\n"))); -/// assert_eq!(crlf::<_, (_, ErrorKind)>("ab\r\nc"), Err(Err::Error(("ab\r\nc", ErrorKind::CrLf)))); -/// assert_eq!(crlf::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(2)))); -/// ``` -pub fn crlf>(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: InputIter, - T: Compare<&'static str>, -{ - match input.compare("\r\n") { - //FIXME: is this the right index? - CompareResult::Ok => Ok((input.slice(2..), input.slice(0..2))), - CompareResult::Incomplete => Err(Err::Incomplete(Needed::new(2))), - CompareResult::Error => { - let e: ErrorKind = ErrorKind::CrLf; - Err(Err::Error(E::from_error_kind(input, e))) - } - } -} - -/// Recognizes a string of any char except '\r\n' or '\n'. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; -/// # use nom::character::streaming::not_line_ending; -/// assert_eq!(not_line_ending::<_, (_, ErrorKind)>("ab\r\nc"), Ok(("\r\nc", "ab"))); -/// assert_eq!(not_line_ending::<_, (_, ErrorKind)>("abc"), Err(Err::Incomplete(Needed::Unknown))); -/// assert_eq!(not_line_ending::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Unknown))); -/// assert_eq!(not_line_ending::<_, (_, ErrorKind)>("a\rb\nc"), Err(Err::Error(("a\rb\nc", ErrorKind::Tag )))); -/// assert_eq!(not_line_ending::<_, (_, ErrorKind)>("a\rbc"), Err(Err::Error(("a\rbc", ErrorKind::Tag )))); -/// ``` -pub fn not_line_ending>(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: InputIter + InputLength, - T: Compare<&'static str>, - ::Item: AsChar, - ::Item: AsChar, -{ - match input.position(|item| { - let c = item.as_char(); - c == '\r' || c == '\n' - }) { - None => Err(Err::Incomplete(Needed::Unknown)), - Some(index) => { - let mut it = input.slice(index..).iter_elements(); - let nth = it.next().unwrap().as_char(); - if nth == '\r' { - let sliced = input.slice(index..); - let comp = sliced.compare("\r\n"); - match comp { - //FIXME: calculate the right index - CompareResult::Incomplete => Err(Err::Incomplete(Needed::Unknown)), - CompareResult::Error => { - let e: ErrorKind = ErrorKind::Tag; - Err(Err::Error(E::from_error_kind(input, e))) - } - CompareResult::Ok => Ok((input.slice(index..), input.slice(..index))), - } - } else { - Ok((input.slice(index..), input.slice(..index))) - } - } - } -} - -/// Recognizes an end of line (both '\n' and '\r\n'). -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::line_ending; -/// assert_eq!(line_ending::<_, (_, ErrorKind)>("\r\nc"), Ok(("c", "\r\n"))); -/// assert_eq!(line_ending::<_, (_, ErrorKind)>("ab\r\nc"), Err(Err::Error(("ab\r\nc", ErrorKind::CrLf)))); -/// assert_eq!(line_ending::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn line_ending>(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: InputIter + InputLength, - T: Compare<&'static str>, -{ - match input.compare("\n") { - CompareResult::Ok => Ok((input.slice(1..), input.slice(0..1))), - CompareResult::Incomplete => Err(Err::Incomplete(Needed::new(1))), - CompareResult::Error => { - match input.compare("\r\n") { - //FIXME: is this the right index? - CompareResult::Ok => Ok((input.slice(2..), input.slice(0..2))), - CompareResult::Incomplete => Err(Err::Incomplete(Needed::new(2))), - CompareResult::Error => Err(Err::Error(E::from_error_kind(input, ErrorKind::CrLf))), - } - } - } -} - -/// Matches a newline character '\\n'. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::newline; -/// assert_eq!(newline::<_, (_, ErrorKind)>("\nc"), Ok(("c", '\n'))); -/// assert_eq!(newline::<_, (_, ErrorKind)>("\r\nc"), Err(Err::Error(("\r\nc", ErrorKind::Char)))); -/// assert_eq!(newline::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn newline>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, - ::Item: AsChar, -{ - char('\n')(input) -} - -/// Matches a tab character '\t'. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::tab; -/// assert_eq!(tab::<_, (_, ErrorKind)>("\tc"), Ok(("c", '\t'))); -/// assert_eq!(tab::<_, (_, ErrorKind)>("\r\nc"), Err(Err::Error(("\r\nc", ErrorKind::Char)))); -/// assert_eq!(tab::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn tab>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, - ::Item: AsChar, -{ - char('\t')(input) -} - -/// Matches one byte as a character. Note that the input type will -/// accept a `str`, but not a `&[u8]`, unlike many other nom parsers. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. -/// # Example -/// -/// ``` -/// # use nom::{character::streaming::anychar, Err, error::ErrorKind, IResult, Needed}; -/// assert_eq!(anychar::<_, (_, ErrorKind)>("abc"), Ok(("bc",'a'))); -/// assert_eq!(anychar::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn anychar>(input: T) -> IResult -where - T: InputIter + InputLength + Slice>, - ::Item: AsChar, -{ - let mut it = input.iter_indices(); - match it.next() { - None => Err(Err::Incomplete(Needed::new(1))), - Some((_, c)) => match it.next() { - None => Ok((input.slice(input.input_len()..), c.as_char())), - Some((idx, _)) => Ok((input.slice(idx..), c.as_char())), - }, - } -} - -/// Recognizes zero or more lowercase and uppercase ASCII alphabetic characters: a-z, A-Z -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, -/// or if no terminating token is found (a non alphabetic character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::alpha0; -/// assert_eq!(alpha0::<_, (_, ErrorKind)>("ab1c"), Ok(("1c", "ab"))); -/// assert_eq!(alpha0::<_, (_, ErrorKind)>("1c"), Ok(("1c", ""))); -/// assert_eq!(alpha0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn alpha0>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position(|item| !item.is_alpha()) -} - -/// Recognizes one or more lowercase and uppercase ASCII alphabetic characters: a-z, A-Z -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, -/// or if no terminating token is found (a non alphabetic character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::alpha1; -/// assert_eq!(alpha1::<_, (_, ErrorKind)>("aB1c"), Ok(("1c", "aB"))); -/// assert_eq!(alpha1::<_, (_, ErrorKind)>("1c"), Err(Err::Error(("1c", ErrorKind::Alpha)))); -/// assert_eq!(alpha1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn alpha1>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position1(|item| !item.is_alpha(), ErrorKind::Alpha) -} - -/// Recognizes zero or more ASCII numerical characters: 0-9 -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, -/// or if no terminating token is found (a non digit character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::digit0; -/// assert_eq!(digit0::<_, (_, ErrorKind)>("21c"), Ok(("c", "21"))); -/// assert_eq!(digit0::<_, (_, ErrorKind)>("a21c"), Ok(("a21c", ""))); -/// assert_eq!(digit0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn digit0>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position(|item| !item.is_dec_digit()) -} - -/// Recognizes one or more ASCII numerical characters: 0-9 -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, -/// or if no terminating token is found (a non digit character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::digit1; -/// assert_eq!(digit1::<_, (_, ErrorKind)>("21c"), Ok(("c", "21"))); -/// assert_eq!(digit1::<_, (_, ErrorKind)>("c1"), Err(Err::Error(("c1", ErrorKind::Digit)))); -/// assert_eq!(digit1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn digit1>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position1(|item| !item.is_dec_digit(), ErrorKind::Digit) -} - -/// Recognizes zero or more ASCII hexadecimal numerical characters: 0-9, A-F, a-f -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, -/// or if no terminating token is found (a non hexadecimal digit character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::hex_digit0; -/// assert_eq!(hex_digit0::<_, (_, ErrorKind)>("21cZ"), Ok(("Z", "21c"))); -/// assert_eq!(hex_digit0::<_, (_, ErrorKind)>("Z21c"), Ok(("Z21c", ""))); -/// assert_eq!(hex_digit0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn hex_digit0>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position(|item| !item.is_hex_digit()) -} - -/// Recognizes one or more ASCII hexadecimal numerical characters: 0-9, A-F, a-f -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, -/// or if no terminating token is found (a non hexadecimal digit character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::hex_digit1; -/// assert_eq!(hex_digit1::<_, (_, ErrorKind)>("21cZ"), Ok(("Z", "21c"))); -/// assert_eq!(hex_digit1::<_, (_, ErrorKind)>("H2"), Err(Err::Error(("H2", ErrorKind::HexDigit)))); -/// assert_eq!(hex_digit1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn hex_digit1>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position1(|item| !item.is_hex_digit(), ErrorKind::HexDigit) -} - -/// Recognizes zero or more octal characters: 0-7 -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, -/// or if no terminating token is found (a non octal digit character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::oct_digit0; -/// assert_eq!(oct_digit0::<_, (_, ErrorKind)>("21cZ"), Ok(("cZ", "21"))); -/// assert_eq!(oct_digit0::<_, (_, ErrorKind)>("Z21c"), Ok(("Z21c", ""))); -/// assert_eq!(oct_digit0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn oct_digit0>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position(|item| !item.is_oct_digit()) -} - -/// Recognizes one or more octal characters: 0-7 -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, -/// or if no terminating token is found (a non octal digit character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::oct_digit1; -/// assert_eq!(oct_digit1::<_, (_, ErrorKind)>("21cZ"), Ok(("cZ", "21"))); -/// assert_eq!(oct_digit1::<_, (_, ErrorKind)>("H2"), Err(Err::Error(("H2", ErrorKind::OctDigit)))); -/// assert_eq!(oct_digit1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn oct_digit1>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position1(|item| !item.is_oct_digit(), ErrorKind::OctDigit) -} - -/// Recognizes zero or more ASCII numerical and alphabetic characters: 0-9, a-z, A-Z -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, -/// or if no terminating token is found (a non alphanumerical character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::alphanumeric0; -/// assert_eq!(alphanumeric0::<_, (_, ErrorKind)>("21cZ%1"), Ok(("%1", "21cZ"))); -/// assert_eq!(alphanumeric0::<_, (_, ErrorKind)>("&Z21c"), Ok(("&Z21c", ""))); -/// assert_eq!(alphanumeric0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn alphanumeric0>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position(|item| !item.is_alphanum()) -} - -/// Recognizes one or more ASCII numerical and alphabetic characters: 0-9, a-z, A-Z -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, -/// or if no terminating token is found (a non alphanumerical character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::alphanumeric1; -/// assert_eq!(alphanumeric1::<_, (_, ErrorKind)>("21cZ%1"), Ok(("%1", "21cZ"))); -/// assert_eq!(alphanumeric1::<_, (_, ErrorKind)>("&H2"), Err(Err::Error(("&H2", ErrorKind::AlphaNumeric)))); -/// assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn alphanumeric1>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position1(|item| !item.is_alphanum(), ErrorKind::AlphaNumeric) -} - -/// Recognizes zero or more spaces and tabs. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, -/// or if no terminating token is found (a non space character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::space0; -/// assert_eq!(space0::<_, (_, ErrorKind)>(" \t21c"), Ok(("21c", " \t"))); -/// assert_eq!(space0::<_, (_, ErrorKind)>("Z21c"), Ok(("Z21c", ""))); -/// assert_eq!(space0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn space0>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar + Clone, -{ - input.split_at_position(|item| { - let c = item.as_char(); - !(c == ' ' || c == '\t') - }) -} -/// Recognizes one or more spaces and tabs. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, -/// or if no terminating token is found (a non space character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::space1; -/// assert_eq!(space1::<_, (_, ErrorKind)>(" \t21c"), Ok(("21c", " \t"))); -/// assert_eq!(space1::<_, (_, ErrorKind)>("H2"), Err(Err::Error(("H2", ErrorKind::Space)))); -/// assert_eq!(space1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn space1>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar + Clone, -{ - input.split_at_position1( - |item| { - let c = item.as_char(); - !(c == ' ' || c == '\t') - }, - ErrorKind::Space, - ) -} - -/// Recognizes zero or more spaces, tabs, carriage returns and line feeds. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, -/// or if no terminating token is found (a non space character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::multispace0; -/// assert_eq!(multispace0::<_, (_, ErrorKind)>(" \t\n\r21c"), Ok(("21c", " \t\n\r"))); -/// assert_eq!(multispace0::<_, (_, ErrorKind)>("Z21c"), Ok(("Z21c", ""))); -/// assert_eq!(multispace0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn multispace0>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar + Clone, -{ - input.split_at_position(|item| { - let c = item.as_char(); - !(c == ' ' || c == '\t' || c == '\r' || c == '\n') - }) -} - -/// Recognizes one or more spaces, tabs, carriage returns and line feeds. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, -/// or if no terminating token is found (a non space character). -/// # Example -/// -/// ``` -/// # use nom::{Err, error::ErrorKind, IResult, Needed}; -/// # use nom::character::streaming::multispace1; -/// assert_eq!(multispace1::<_, (_, ErrorKind)>(" \t\n\r21c"), Ok(("21c", " \t\n\r"))); -/// assert_eq!(multispace1::<_, (_, ErrorKind)>("H2"), Err(Err::Error(("H2", ErrorKind::MultiSpace)))); -/// assert_eq!(multispace1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -pub fn multispace1>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar + Clone, -{ - input.split_at_position1( - |item| { - let c = item.as_char(); - !(c == ' ' || c == '\t' || c == '\r' || c == '\n') - }, - ErrorKind::MultiSpace, - ) -} - -pub(crate) fn sign>(input: T) -> IResult -where - T: Clone + InputTake + InputLength, - T: for<'a> Compare<&'a [u8]>, -{ - use crate::bytes::streaming::tag; - use crate::combinator::value; - - let (i, opt_sign) = opt(alt(( - value(false, tag(&b"-"[..])), - value(true, tag(&b"+"[..])), - )))(input)?; - let sign = opt_sign.unwrap_or(true); - - Ok((i, sign)) -} - -#[doc(hidden)] -macro_rules! ints { - ($($t:tt)+) => { - $( - /// will parse a number in text form to a number - /// - /// *Complete version*: can parse until the end of input. - pub fn $t>(input: T) -> IResult - where - T: InputIter + Slice> + InputLength + InputTake + Clone, - ::Item: AsChar, - T: for <'a> Compare<&'a[u8]>, - { - let (i, sign) = sign(input.clone())?; - - if i.input_len() == 0 { - return Err(Err::Incomplete(Needed::new(1))); - } - - let mut value: $t = 0; - if sign { - for (pos, c) in i.iter_indices() { - match c.as_char().to_digit(10) { - None => { - if pos == 0 { - return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))); - } else { - return Ok((i.slice(pos..), value)); - } - }, - Some(d) => match value.checked_mul(10).and_then(|v| v.checked_add(d as $t)) { - None => return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))), - Some(v) => value = v, - } - } - } - } else { - for (pos, c) in i.iter_indices() { - match c.as_char().to_digit(10) { - None => { - if pos == 0 { - return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))); - } else { - return Ok((i.slice(pos..), value)); - } - }, - Some(d) => match value.checked_mul(10).and_then(|v| v.checked_sub(d as $t)) { - None => return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))), - Some(v) => value = v, - } - } - } - } - - Err(Err::Incomplete(Needed::new(1))) - } - )+ - } -} - -ints! { i8 i16 i32 i64 i128 } - -#[doc(hidden)] -macro_rules! uints { - ($($t:tt)+) => { - $( - /// will parse a number in text form to a number - /// - /// *Complete version*: can parse until the end of input. - pub fn $t>(input: T) -> IResult - where - T: InputIter + Slice> + InputLength, - ::Item: AsChar, - { - let i = input; - - if i.input_len() == 0 { - return Err(Err::Incomplete(Needed::new(1))); - } - - let mut value: $t = 0; - for (pos, c) in i.iter_indices() { - match c.as_char().to_digit(10) { - None => { - if pos == 0 { - return Err(Err::Error(E::from_error_kind(i, ErrorKind::Digit))); - } else { - return Ok((i.slice(pos..), value)); - } - }, - Some(d) => match value.checked_mul(10).and_then(|v| v.checked_add(d as $t)) { - None => return Err(Err::Error(E::from_error_kind(i, ErrorKind::Digit))), - Some(v) => value = v, - } - } - } - - Err(Err::Incomplete(Needed::new(1))) - } - )+ - } -} - -uints! { u8 u16 u32 u64 u128 } - -#[cfg(test)] -mod tests { - use super::*; - use crate::error::ErrorKind; - use crate::internal::{Err, Needed}; - use crate::sequence::pair; - use crate::traits::ParseTo; - use proptest::prelude::*; - - macro_rules! assert_parse( - ($left: expr, $right: expr) => { - let res: $crate::IResult<_, _, (_, ErrorKind)> = $left; - assert_eq!(res, $right); - }; - ); - - #[test] - fn anychar_str() { - use super::anychar; - assert_eq!(anychar::<_, (&str, ErrorKind)>("Ә"), Ok(("", 'Ә'))); - } - - #[test] - fn character() { - let a: &[u8] = b"abcd"; - let b: &[u8] = b"1234"; - let c: &[u8] = b"a123"; - let d: &[u8] = "azé12".as_bytes(); - let e: &[u8] = b" "; - let f: &[u8] = b" ;"; - //assert_eq!(alpha1::<_, (_, ErrorKind)>(a), Err(Err::Incomplete(Needed::new(1)))); - assert_parse!(alpha1(a), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(alpha1(b), Err(Err::Error((b, ErrorKind::Alpha)))); - assert_eq!(alpha1::<_, (_, ErrorKind)>(c), Ok((&c[1..], &b"a"[..]))); - assert_eq!( - alpha1::<_, (_, ErrorKind)>(d), - Ok(("é12".as_bytes(), &b"az"[..])) - ); - assert_eq!(digit1(a), Err(Err::Error((a, ErrorKind::Digit)))); - assert_eq!( - digit1::<_, (_, ErrorKind)>(b), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!(digit1(c), Err(Err::Error((c, ErrorKind::Digit)))); - assert_eq!(digit1(d), Err(Err::Error((d, ErrorKind::Digit)))); - assert_eq!( - hex_digit1::<_, (_, ErrorKind)>(a), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!( - hex_digit1::<_, (_, ErrorKind)>(b), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!( - hex_digit1::<_, (_, ErrorKind)>(c), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!( - hex_digit1::<_, (_, ErrorKind)>(d), - Ok(("zé12".as_bytes(), &b"a"[..])) - ); - assert_eq!(hex_digit1(e), Err(Err::Error((e, ErrorKind::HexDigit)))); - assert_eq!(oct_digit1(a), Err(Err::Error((a, ErrorKind::OctDigit)))); - assert_eq!( - oct_digit1::<_, (_, ErrorKind)>(b), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!(oct_digit1(c), Err(Err::Error((c, ErrorKind::OctDigit)))); - assert_eq!(oct_digit1(d), Err(Err::Error((d, ErrorKind::OctDigit)))); - assert_eq!( - alphanumeric1::<_, (_, ErrorKind)>(a), - Err(Err::Incomplete(Needed::new(1))) - ); - //assert_eq!(fix_error!(b,(), alphanumeric1), Ok((empty, b))); - assert_eq!( - alphanumeric1::<_, (_, ErrorKind)>(c), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!( - alphanumeric1::<_, (_, ErrorKind)>(d), - Ok(("é12".as_bytes(), &b"az"[..])) - ); - assert_eq!( - space1::<_, (_, ErrorKind)>(e), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!(space1::<_, (_, ErrorKind)>(f), Ok((&b";"[..], &b" "[..]))); - } - - #[cfg(feature = "alloc")] - #[test] - fn character_s() { - let a = "abcd"; - let b = "1234"; - let c = "a123"; - let d = "azé12"; - let e = " "; - assert_eq!( - alpha1::<_, (_, ErrorKind)>(a), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!(alpha1(b), Err(Err::Error((b, ErrorKind::Alpha)))); - assert_eq!(alpha1::<_, (_, ErrorKind)>(c), Ok((&c[1..], &"a"[..]))); - assert_eq!(alpha1::<_, (_, ErrorKind)>(d), Ok(("é12", &"az"[..]))); - assert_eq!(digit1(a), Err(Err::Error((a, ErrorKind::Digit)))); - assert_eq!( - digit1::<_, (_, ErrorKind)>(b), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!(digit1(c), Err(Err::Error((c, ErrorKind::Digit)))); - assert_eq!(digit1(d), Err(Err::Error((d, ErrorKind::Digit)))); - assert_eq!( - hex_digit1::<_, (_, ErrorKind)>(a), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!( - hex_digit1::<_, (_, ErrorKind)>(b), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!( - hex_digit1::<_, (_, ErrorKind)>(c), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!(hex_digit1::<_, (_, ErrorKind)>(d), Ok(("zé12", &"a"[..]))); - assert_eq!(hex_digit1(e), Err(Err::Error((e, ErrorKind::HexDigit)))); - assert_eq!(oct_digit1(a), Err(Err::Error((a, ErrorKind::OctDigit)))); - assert_eq!( - oct_digit1::<_, (_, ErrorKind)>(b), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!(oct_digit1(c), Err(Err::Error((c, ErrorKind::OctDigit)))); - assert_eq!(oct_digit1(d), Err(Err::Error((d, ErrorKind::OctDigit)))); - assert_eq!( - alphanumeric1::<_, (_, ErrorKind)>(a), - Err(Err::Incomplete(Needed::new(1))) - ); - //assert_eq!(fix_error!(b,(), alphanumeric1), Ok((empty, b))); - assert_eq!( - alphanumeric1::<_, (_, ErrorKind)>(c), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(d), Ok(("é12", "az"))); - assert_eq!( - space1::<_, (_, ErrorKind)>(e), - Err(Err::Incomplete(Needed::new(1))) - ); - } - - use crate::traits::Offset; - #[test] - fn offset() { - let a = &b"abcd;"[..]; - let b = &b"1234;"[..]; - let c = &b"a123;"[..]; - let d = &b" \t;"[..]; - let e = &b" \t\r\n;"[..]; - let f = &b"123abcDEF;"[..]; - - match alpha1::<_, (_, ErrorKind)>(a) { - Ok((i, _)) => { - assert_eq!(a.offset(i) + i.len(), a.len()); - } - _ => panic!("wrong return type in offset test for alpha"), - } - match digit1::<_, (_, ErrorKind)>(b) { - Ok((i, _)) => { - assert_eq!(b.offset(i) + i.len(), b.len()); - } - _ => panic!("wrong return type in offset test for digit"), - } - match alphanumeric1::<_, (_, ErrorKind)>(c) { - Ok((i, _)) => { - assert_eq!(c.offset(i) + i.len(), c.len()); - } - _ => panic!("wrong return type in offset test for alphanumeric"), - } - match space1::<_, (_, ErrorKind)>(d) { - Ok((i, _)) => { - assert_eq!(d.offset(i) + i.len(), d.len()); - } - _ => panic!("wrong return type in offset test for space"), - } - match multispace1::<_, (_, ErrorKind)>(e) { - Ok((i, _)) => { - assert_eq!(e.offset(i) + i.len(), e.len()); - } - _ => panic!("wrong return type in offset test for multispace"), - } - match hex_digit1::<_, (_, ErrorKind)>(f) { - Ok((i, _)) => { - assert_eq!(f.offset(i) + i.len(), f.len()); - } - _ => panic!("wrong return type in offset test for hex_digit"), - } - match oct_digit1::<_, (_, ErrorKind)>(f) { - Ok((i, _)) => { - assert_eq!(f.offset(i) + i.len(), f.len()); - } - _ => panic!("wrong return type in offset test for oct_digit"), - } - } - - #[test] - fn is_not_line_ending_bytes() { - let a: &[u8] = b"ab12cd\nefgh"; - assert_eq!( - not_line_ending::<_, (_, ErrorKind)>(a), - Ok((&b"\nefgh"[..], &b"ab12cd"[..])) - ); - - let b: &[u8] = b"ab12cd\nefgh\nijkl"; - assert_eq!( - not_line_ending::<_, (_, ErrorKind)>(b), - Ok((&b"\nefgh\nijkl"[..], &b"ab12cd"[..])) - ); - - let c: &[u8] = b"ab12cd\r\nefgh\nijkl"; - assert_eq!( - not_line_ending::<_, (_, ErrorKind)>(c), - Ok((&b"\r\nefgh\nijkl"[..], &b"ab12cd"[..])) - ); - - let d: &[u8] = b"ab12cd"; - assert_eq!( - not_line_ending::<_, (_, ErrorKind)>(d), - Err(Err::Incomplete(Needed::Unknown)) - ); - } - - #[test] - fn is_not_line_ending_str() { - /* - let a: &str = "ab12cd\nefgh"; - assert_eq!(not_line_ending(a), Ok((&"\nefgh"[..], &"ab12cd"[..]))); - - let b: &str = "ab12cd\nefgh\nijkl"; - assert_eq!(not_line_ending(b), Ok((&"\nefgh\nijkl"[..], &"ab12cd"[..]))); - - let c: &str = "ab12cd\r\nefgh\nijkl"; - assert_eq!(not_line_ending(c), Ok((&"\r\nefgh\nijkl"[..], &"ab12cd"[..]))); - - let d = "βèƒôřè\nÂßÇáƒƭèř"; - assert_eq!(not_line_ending(d), Ok((&"\nÂßÇáƒƭèř"[..], &"βèƒôřè"[..]))); - - let e = "βèƒôřè\r\nÂßÇáƒƭèř"; - assert_eq!(not_line_ending(e), Ok((&"\r\nÂßÇáƒƭèř"[..], &"βèƒôřè"[..]))); - */ - - let f = "βèƒôřè\rÂßÇáƒƭèř"; - assert_eq!(not_line_ending(f), Err(Err::Error((f, ErrorKind::Tag)))); - - let g2: &str = "ab12cd"; - assert_eq!( - not_line_ending::<_, (_, ErrorKind)>(g2), - Err(Err::Incomplete(Needed::Unknown)) - ); - } - - #[test] - fn hex_digit_test() { - let i = &b"0123456789abcdefABCDEF;"[..]; - assert_parse!(hex_digit1(i), Ok((&b";"[..], &i[..i.len() - 1]))); - - let i = &b"g"[..]; - assert_parse!( - hex_digit1(i), - Err(Err::Error(error_position!(i, ErrorKind::HexDigit))) - ); - - let i = &b"G"[..]; - assert_parse!( - hex_digit1(i), - Err(Err::Error(error_position!(i, ErrorKind::HexDigit))) - ); - - assert!(crate::character::is_hex_digit(b'0')); - assert!(crate::character::is_hex_digit(b'9')); - assert!(crate::character::is_hex_digit(b'a')); - assert!(crate::character::is_hex_digit(b'f')); - assert!(crate::character::is_hex_digit(b'A')); - assert!(crate::character::is_hex_digit(b'F')); - assert!(!crate::character::is_hex_digit(b'g')); - assert!(!crate::character::is_hex_digit(b'G')); - assert!(!crate::character::is_hex_digit(b'/')); - assert!(!crate::character::is_hex_digit(b':')); - assert!(!crate::character::is_hex_digit(b'@')); - assert!(!crate::character::is_hex_digit(b'\x60')); - } - - #[test] - fn oct_digit_test() { - let i = &b"01234567;"[..]; - assert_parse!(oct_digit1(i), Ok((&b";"[..], &i[..i.len() - 1]))); - - let i = &b"8"[..]; - assert_parse!( - oct_digit1(i), - Err(Err::Error(error_position!(i, ErrorKind::OctDigit))) - ); - - assert!(crate::character::is_oct_digit(b'0')); - assert!(crate::character::is_oct_digit(b'7')); - assert!(!crate::character::is_oct_digit(b'8')); - assert!(!crate::character::is_oct_digit(b'9')); - assert!(!crate::character::is_oct_digit(b'a')); - assert!(!crate::character::is_oct_digit(b'A')); - assert!(!crate::character::is_oct_digit(b'/')); - assert!(!crate::character::is_oct_digit(b':')); - assert!(!crate::character::is_oct_digit(b'@')); - assert!(!crate::character::is_oct_digit(b'\x60')); - } - - #[test] - fn full_line_windows() { - fn take_full_line(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> { - pair(not_line_ending, line_ending)(i) - } - let input = b"abc\r\n"; - let output = take_full_line(input); - assert_eq!(output, Ok((&b""[..], (&b"abc"[..], &b"\r\n"[..])))); - } - - #[test] - fn full_line_unix() { - fn take_full_line(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> { - pair(not_line_ending, line_ending)(i) - } - let input = b"abc\n"; - let output = take_full_line(input); - assert_eq!(output, Ok((&b""[..], (&b"abc"[..], &b"\n"[..])))); - } - - #[test] - fn check_windows_lineending() { - let input = b"\r\n"; - let output = line_ending(&input[..]); - assert_parse!(output, Ok((&b""[..], &b"\r\n"[..]))); - } - - #[test] - fn check_unix_lineending() { - let input = b"\n"; - let output = line_ending(&input[..]); - assert_parse!(output, Ok((&b""[..], &b"\n"[..]))); - } - - #[test] - fn cr_lf() { - assert_parse!(crlf(&b"\r\na"[..]), Ok((&b"a"[..], &b"\r\n"[..]))); - assert_parse!(crlf(&b"\r"[..]), Err(Err::Incomplete(Needed::new(2)))); - assert_parse!( - crlf(&b"\ra"[..]), - Err(Err::Error(error_position!(&b"\ra"[..], ErrorKind::CrLf))) - ); - - assert_parse!(crlf("\r\na"), Ok(("a", "\r\n"))); - assert_parse!(crlf("\r"), Err(Err::Incomplete(Needed::new(2)))); - assert_parse!( - crlf("\ra"), - Err(Err::Error(error_position!("\ra", ErrorKind::CrLf))) - ); - } - - #[test] - fn end_of_line() { - assert_parse!(line_ending(&b"\na"[..]), Ok((&b"a"[..], &b"\n"[..]))); - assert_parse!(line_ending(&b"\r\na"[..]), Ok((&b"a"[..], &b"\r\n"[..]))); - assert_parse!( - line_ending(&b"\r"[..]), - Err(Err::Incomplete(Needed::new(2))) - ); - assert_parse!( - line_ending(&b"\ra"[..]), - Err(Err::Error(error_position!(&b"\ra"[..], ErrorKind::CrLf))) - ); - - assert_parse!(line_ending("\na"), Ok(("a", "\n"))); - assert_parse!(line_ending("\r\na"), Ok(("a", "\r\n"))); - assert_parse!(line_ending("\r"), Err(Err::Incomplete(Needed::new(2)))); - assert_parse!( - line_ending("\ra"), - Err(Err::Error(error_position!("\ra", ErrorKind::CrLf))) - ); - } - - fn digit_to_i16(input: &str) -> IResult<&str, i16> { - let i = input; - let (i, opt_sign) = opt(alt((char('+'), char('-'))))(i)?; - let sign = match opt_sign { - Some('+') => true, - Some('-') => false, - _ => true, - }; - - let (i, s) = match digit1::<_, crate::error::Error<_>>(i) { - Ok((i, s)) => (i, s), - Err(Err::Incomplete(i)) => return Err(Err::Incomplete(i)), - Err(_) => { - return Err(Err::Error(crate::error::Error::from_error_kind( - input, - ErrorKind::Digit, - ))) - } - }; - match s.parse_to() { - Some(n) => { - if sign { - Ok((i, n)) - } else { - Ok((i, -n)) - } - } - None => Err(Err::Error(crate::error::Error::from_error_kind( - i, - ErrorKind::Digit, - ))), - } - } - - fn digit_to_u32(i: &str) -> IResult<&str, u32> { - let (i, s) = digit1(i)?; - match s.parse_to() { - Some(n) => Ok((i, n)), - None => Err(Err::Error(crate::error::Error::from_error_kind( - i, - ErrorKind::Digit, - ))), - } - } - - proptest! { - #[test] - fn ints(s in "\\PC*") { - let res1 = digit_to_i16(&s); - let res2 = i16(s.as_str()); - assert_eq!(res1, res2); - } - - #[test] - fn uints(s in "\\PC*") { - let res1 = digit_to_u32(&s); - let res2 = u32(s.as_str()); - assert_eq!(res1, res2); - } - } -} diff --git a/vendor/nom/src/character/tests.rs b/vendor/nom/src/character/tests.rs deleted file mode 100644 index 64c2a1c8a7c8c4..00000000000000 --- a/vendor/nom/src/character/tests.rs +++ /dev/null @@ -1,62 +0,0 @@ -use super::streaming::*; -use crate::error::ErrorKind; -use crate::internal::{Err, IResult}; - -#[test] -fn one_of_test() { - fn f(i: &[u8]) -> IResult<&[u8], char> { - one_of("ab")(i) - } - - let a = &b"abcd"[..]; - assert_eq!(f(a), Ok((&b"bcd"[..], 'a'))); - - let b = &b"cde"[..]; - assert_eq!(f(b), Err(Err::Error(error_position!(b, ErrorKind::OneOf)))); - - fn utf8(i: &str) -> IResult<&str, char> { - one_of("+\u{FF0B}")(i) - } - - assert!(utf8("+").is_ok()); - assert!(utf8("\u{FF0B}").is_ok()); -} - -#[test] -fn none_of_test() { - fn f(i: &[u8]) -> IResult<&[u8], char> { - none_of("ab")(i) - } - - let a = &b"abcd"[..]; - assert_eq!(f(a), Err(Err::Error(error_position!(a, ErrorKind::NoneOf)))); - - let b = &b"cde"[..]; - assert_eq!(f(b), Ok((&b"de"[..], 'c'))); -} - -#[test] -fn char_byteslice() { - fn f(i: &[u8]) -> IResult<&[u8], char> { - char('c')(i) - } - - let a = &b"abcd"[..]; - assert_eq!(f(a), Err(Err::Error(error_position!(a, ErrorKind::Char)))); - - let b = &b"cde"[..]; - assert_eq!(f(b), Ok((&b"de"[..], 'c'))); -} - -#[test] -fn char_str() { - fn f(i: &str) -> IResult<&str, char> { - char('c')(i) - } - - let a = &"abcd"[..]; - assert_eq!(f(a), Err(Err::Error(error_position!(a, ErrorKind::Char)))); - - let b = &"cde"[..]; - assert_eq!(f(b), Ok((&"de"[..], 'c'))); -} diff --git a/vendor/nom/src/combinator/mod.rs b/vendor/nom/src/combinator/mod.rs deleted file mode 100644 index fe08d4a1050b48..00000000000000 --- a/vendor/nom/src/combinator/mod.rs +++ /dev/null @@ -1,809 +0,0 @@ -//! General purpose combinators - -#![allow(unused_imports)] - -#[cfg(feature = "alloc")] -use crate::lib::std::boxed::Box; - -use crate::error::{ErrorKind, FromExternalError, ParseError}; -use crate::internal::*; -use crate::lib::std::borrow::Borrow; -use crate::lib::std::convert::Into; -#[cfg(feature = "std")] -use crate::lib::std::fmt::Debug; -use crate::lib::std::mem::transmute; -use crate::lib::std::ops::{Range, RangeFrom, RangeTo}; -use crate::traits::{AsChar, InputIter, InputLength, InputTakeAtPosition, ParseTo}; -use crate::traits::{Compare, CompareResult, Offset, Slice}; - -#[cfg(test)] -mod tests; - -/// Return the remaining input. -/// -/// ```rust -/// # use nom::error::ErrorKind; -/// use nom::combinator::rest; -/// assert_eq!(rest::<_,(_, ErrorKind)>("abc"), Ok(("", "abc"))); -/// assert_eq!(rest::<_,(_, ErrorKind)>(""), Ok(("", ""))); -/// ``` -#[inline] -pub fn rest>(input: T) -> IResult -where - T: Slice>, - T: InputLength, -{ - Ok((input.slice(input.input_len()..), input)) -} - -/// Return the length of the remaining input. -/// -/// ```rust -/// # use nom::error::ErrorKind; -/// use nom::combinator::rest_len; -/// assert_eq!(rest_len::<_,(_, ErrorKind)>("abc"), Ok(("abc", 3))); -/// assert_eq!(rest_len::<_,(_, ErrorKind)>(""), Ok(("", 0))); -/// ``` -#[inline] -pub fn rest_len>(input: T) -> IResult -where - T: InputLength, -{ - let len = input.input_len(); - Ok((input, len)) -} - -/// Maps a function on the result of a parser. -/// -/// ```rust -/// use nom::{Err,error::ErrorKind, IResult,Parser}; -/// use nom::character::complete::digit1; -/// use nom::combinator::map; -/// # fn main() { -/// -/// let mut parser = map(digit1, |s: &str| s.len()); -/// -/// // the parser will count how many characters were returned by digit1 -/// assert_eq!(parser.parse("123456"), Ok(("", 6))); -/// -/// // this will fail if digit1 fails -/// assert_eq!(parser.parse("abc"), Err(Err::Error(("abc", ErrorKind::Digit)))); -/// # } -/// ``` -pub fn map(mut parser: F, mut f: G) -> impl FnMut(I) -> IResult -where - F: Parser, - G: FnMut(O1) -> O2, -{ - move |input: I| { - let (input, o1) = parser.parse(input)?; - Ok((input, f(o1))) - } -} - -/// Applies a function returning a `Result` over the result of a parser. -/// -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult}; -/// use nom::character::complete::digit1; -/// use nom::combinator::map_res; -/// # fn main() { -/// -/// let mut parse = map_res(digit1, |s: &str| s.parse::()); -/// -/// // the parser will convert the result of digit1 to a number -/// assert_eq!(parse("123"), Ok(("", 123))); -/// -/// // this will fail if digit1 fails -/// assert_eq!(parse("abc"), Err(Err::Error(("abc", ErrorKind::Digit)))); -/// -/// // this will fail if the mapped function fails (a `u8` is too small to hold `123456`) -/// assert_eq!(parse("123456"), Err(Err::Error(("123456", ErrorKind::MapRes)))); -/// # } -/// ``` -pub fn map_res, E2, F, G>( - mut parser: F, - mut f: G, -) -> impl FnMut(I) -> IResult -where - F: Parser, - G: FnMut(O1) -> Result, -{ - move |input: I| { - let i = input.clone(); - let (input, o1) = parser.parse(input)?; - match f(o1) { - Ok(o2) => Ok((input, o2)), - Err(e) => Err(Err::Error(E::from_external_error(i, ErrorKind::MapRes, e))), - } - } -} - -/// Applies a function returning an `Option` over the result of a parser. -/// -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult}; -/// use nom::character::complete::digit1; -/// use nom::combinator::map_opt; -/// # fn main() { -/// -/// let mut parse = map_opt(digit1, |s: &str| s.parse::().ok()); -/// -/// // the parser will convert the result of digit1 to a number -/// assert_eq!(parse("123"), Ok(("", 123))); -/// -/// // this will fail if digit1 fails -/// assert_eq!(parse("abc"), Err(Err::Error(("abc", ErrorKind::Digit)))); -/// -/// // this will fail if the mapped function fails (a `u8` is too small to hold `123456`) -/// assert_eq!(parse("123456"), Err(Err::Error(("123456", ErrorKind::MapOpt)))); -/// # } -/// ``` -pub fn map_opt, F, G>( - mut parser: F, - mut f: G, -) -> impl FnMut(I) -> IResult -where - F: Parser, - G: FnMut(O1) -> Option, -{ - move |input: I| { - let i = input.clone(); - let (input, o1) = parser.parse(input)?; - match f(o1) { - Some(o2) => Ok((input, o2)), - None => Err(Err::Error(E::from_error_kind(i, ErrorKind::MapOpt))), - } - } -} - -/// Applies a parser over the result of another one. -/// -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult}; -/// use nom::character::complete::digit1; -/// use nom::bytes::complete::take; -/// use nom::combinator::map_parser; -/// # fn main() { -/// -/// let mut parse = map_parser(take(5u8), digit1); -/// -/// assert_eq!(parse("12345"), Ok(("", "12345"))); -/// assert_eq!(parse("123ab"), Ok(("", "123"))); -/// assert_eq!(parse("123"), Err(Err::Error(("123", ErrorKind::Eof)))); -/// # } -/// ``` -pub fn map_parser, F, G>( - mut parser: F, - mut applied_parser: G, -) -> impl FnMut(I) -> IResult -where - F: Parser, - G: Parser, -{ - move |input: I| { - let (input, o1) = parser.parse(input)?; - let (_, o2) = applied_parser.parse(o1)?; - Ok((input, o2)) - } -} - -/// Creates a new parser from the output of the first parser, then apply that parser over the rest of the input. -/// -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult}; -/// use nom::bytes::complete::take; -/// use nom::number::complete::u8; -/// use nom::combinator::flat_map; -/// # fn main() { -/// -/// let mut parse = flat_map(u8, take); -/// -/// assert_eq!(parse(&[2, 0, 1, 2][..]), Ok((&[2][..], &[0, 1][..]))); -/// assert_eq!(parse(&[4, 0, 1, 2][..]), Err(Err::Error((&[0, 1, 2][..], ErrorKind::Eof)))); -/// # } -/// ``` -pub fn flat_map, F, G, H>( - mut parser: F, - mut applied_parser: G, -) -> impl FnMut(I) -> IResult -where - F: Parser, - G: FnMut(O1) -> H, - H: Parser, -{ - move |input: I| { - let (input, o1) = parser.parse(input)?; - applied_parser(o1).parse(input) - } -} - -/// Optional parser, will return `None` on [`Err::Error`]. -/// -/// To chain an error up, see [`cut`]. -/// -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult}; -/// use nom::combinator::opt; -/// use nom::character::complete::alpha1; -/// # fn main() { -/// -/// fn parser(i: &str) -> IResult<&str, Option<&str>> { -/// opt(alpha1)(i) -/// } -/// -/// assert_eq!(parser("abcd;"), Ok((";", Some("abcd")))); -/// assert_eq!(parser("123;"), Ok(("123;", None))); -/// # } -/// ``` -pub fn opt, F>(mut f: F) -> impl FnMut(I) -> IResult, E> -where - F: Parser, -{ - move |input: I| { - let i = input.clone(); - match f.parse(input) { - Ok((i, o)) => Ok((i, Some(o))), - Err(Err::Error(_)) => Ok((i, None)), - Err(e) => Err(e), - } - } -} - -/// Calls the parser if the condition is met. -/// -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, IResult}; -/// use nom::combinator::cond; -/// use nom::character::complete::alpha1; -/// # fn main() { -/// -/// fn parser(b: bool, i: &str) -> IResult<&str, Option<&str>> { -/// cond(b, alpha1)(i) -/// } -/// -/// assert_eq!(parser(true, "abcd;"), Ok((";", Some("abcd")))); -/// assert_eq!(parser(false, "abcd;"), Ok(("abcd;", None))); -/// assert_eq!(parser(true, "123;"), Err(Err::Error(Error::new("123;", ErrorKind::Alpha)))); -/// assert_eq!(parser(false, "123;"), Ok(("123;", None))); -/// # } -/// ``` -pub fn cond, F>( - b: bool, - mut f: F, -) -> impl FnMut(I) -> IResult, E> -where - F: Parser, -{ - move |input: I| { - if b { - match f.parse(input) { - Ok((i, o)) => Ok((i, Some(o))), - Err(e) => Err(e), - } - } else { - Ok((input, None)) - } - } -} - -/// Tries to apply its parser without consuming the input. -/// -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult}; -/// use nom::combinator::peek; -/// use nom::character::complete::alpha1; -/// # fn main() { -/// -/// let mut parser = peek(alpha1); -/// -/// assert_eq!(parser("abcd;"), Ok(("abcd;", "abcd"))); -/// assert_eq!(parser("123;"), Err(Err::Error(("123;", ErrorKind::Alpha)))); -/// # } -/// ``` -pub fn peek, F>(mut f: F) -> impl FnMut(I) -> IResult -where - F: Parser, -{ - move |input: I| { - let i = input.clone(); - match f.parse(input) { - Ok((_, o)) => Ok((i, o)), - Err(e) => Err(e), - } - } -} - -/// returns its input if it is at the end of input data -/// -/// When we're at the end of the data, this combinator -/// will succeed -/// -/// ``` -/// # use std::str; -/// # use nom::{Err, error::ErrorKind, IResult}; -/// # use nom::combinator::eof; -/// -/// # fn main() { -/// let parser = eof; -/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Eof)))); -/// assert_eq!(parser(""), Ok(("", ""))); -/// # } -/// ``` -pub fn eof>(input: I) -> IResult { - if input.input_len() == 0 { - let clone = input.clone(); - Ok((input, clone)) - } else { - Err(Err::Error(E::from_error_kind(input, ErrorKind::Eof))) - } -} - -/// Transforms Incomplete into `Error`. -/// -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult}; -/// use nom::bytes::streaming::take; -/// use nom::combinator::complete; -/// # fn main() { -/// -/// let mut parser = complete(take(5u8)); -/// -/// assert_eq!(parser("abcdefg"), Ok(("fg", "abcde"))); -/// assert_eq!(parser("abcd"), Err(Err::Error(("abcd", ErrorKind::Complete)))); -/// # } -/// ``` -pub fn complete, F>(mut f: F) -> impl FnMut(I) -> IResult -where - F: Parser, -{ - move |input: I| { - let i = input.clone(); - match f.parse(input) { - Err(Err::Incomplete(_)) => Err(Err::Error(E::from_error_kind(i, ErrorKind::Complete))), - rest => rest, - } - } -} - -/// Succeeds if all the input has been consumed by its child parser. -/// -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult}; -/// use nom::combinator::all_consuming; -/// use nom::character::complete::alpha1; -/// # fn main() { -/// -/// let mut parser = all_consuming(alpha1); -/// -/// assert_eq!(parser("abcd"), Ok(("", "abcd"))); -/// assert_eq!(parser("abcd;"),Err(Err::Error((";", ErrorKind::Eof)))); -/// assert_eq!(parser("123abcd;"),Err(Err::Error(("123abcd;", ErrorKind::Alpha)))); -/// # } -/// ``` -pub fn all_consuming, F>(mut f: F) -> impl FnMut(I) -> IResult -where - I: InputLength, - F: Parser, -{ - move |input: I| { - let (input, res) = f.parse(input)?; - if input.input_len() == 0 { - Ok((input, res)) - } else { - Err(Err::Error(E::from_error_kind(input, ErrorKind::Eof))) - } - } -} - -/// Returns the result of the child parser if it satisfies a verification function. -/// -/// The verification function takes as argument a reference to the output of the -/// parser. -/// -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult}; -/// use nom::combinator::verify; -/// use nom::character::complete::alpha1; -/// # fn main() { -/// -/// let mut parser = verify(alpha1, |s: &str| s.len() == 4); -/// -/// assert_eq!(parser("abcd"), Ok(("", "abcd"))); -/// assert_eq!(parser("abcde"), Err(Err::Error(("abcde", ErrorKind::Verify)))); -/// assert_eq!(parser("123abcd;"),Err(Err::Error(("123abcd;", ErrorKind::Alpha)))); -/// # } -/// ``` -pub fn verify, F, G>( - mut first: F, - second: G, -) -> impl FnMut(I) -> IResult -where - F: Parser, - G: Fn(&O2) -> bool, - O1: Borrow, - O2: ?Sized, -{ - move |input: I| { - let i = input.clone(); - let (input, o) = first.parse(input)?; - - if second(o.borrow()) { - Ok((input, o)) - } else { - Err(Err::Error(E::from_error_kind(i, ErrorKind::Verify))) - } - } -} - -/// Returns the provided value if the child parser succeeds. -/// -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult}; -/// use nom::combinator::value; -/// use nom::character::complete::alpha1; -/// # fn main() { -/// -/// let mut parser = value(1234, alpha1); -/// -/// assert_eq!(parser("abcd"), Ok(("", 1234))); -/// assert_eq!(parser("123abcd;"), Err(Err::Error(("123abcd;", ErrorKind::Alpha)))); -/// # } -/// ``` -pub fn value, F>( - val: O1, - mut parser: F, -) -> impl FnMut(I) -> IResult -where - F: Parser, -{ - move |input: I| parser.parse(input).map(|(i, _)| (i, val.clone())) -} - -/// Succeeds if the child parser returns an error. -/// -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult}; -/// use nom::combinator::not; -/// use nom::character::complete::alpha1; -/// # fn main() { -/// -/// let mut parser = not(alpha1); -/// -/// assert_eq!(parser("123"), Ok(("123", ()))); -/// assert_eq!(parser("abcd"), Err(Err::Error(("abcd", ErrorKind::Not)))); -/// # } -/// ``` -pub fn not, F>(mut parser: F) -> impl FnMut(I) -> IResult -where - F: Parser, -{ - move |input: I| { - let i = input.clone(); - match parser.parse(input) { - Ok(_) => Err(Err::Error(E::from_error_kind(i, ErrorKind::Not))), - Err(Err::Error(_)) => Ok((i, ())), - Err(e) => Err(e), - } - } -} - -/// If the child parser was successful, return the consumed input as produced value. -/// -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult}; -/// use nom::combinator::recognize; -/// use nom::character::complete::{char, alpha1}; -/// use nom::sequence::separated_pair; -/// # fn main() { -/// -/// let mut parser = recognize(separated_pair(alpha1, char(','), alpha1)); -/// -/// assert_eq!(parser("abcd,efgh"), Ok(("", "abcd,efgh"))); -/// assert_eq!(parser("abcd;"),Err(Err::Error((";", ErrorKind::Char)))); -/// # } -/// ``` -pub fn recognize>, O, E: ParseError, F>( - mut parser: F, -) -> impl FnMut(I) -> IResult -where - F: Parser, -{ - move |input: I| { - let i = input.clone(); - match parser.parse(i) { - Ok((i, _)) => { - let index = input.offset(&i); - Ok((i, input.slice(..index))) - } - Err(e) => Err(e), - } - } -} - -/// if the child parser was successful, return the consumed input with the output -/// as a tuple. Functions similarly to [recognize](fn.recognize.html) except it -/// returns the parser output as well. -/// -/// This can be useful especially in cases where the output is not the same type -/// as the input, or the input is a user defined type. -/// -/// Returned tuple is of the format `(consumed input, produced output)`. -/// -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult}; -/// use nom::combinator::{consumed, value, recognize, map}; -/// use nom::character::complete::{char, alpha1}; -/// use nom::bytes::complete::tag; -/// use nom::sequence::separated_pair; -/// -/// fn inner_parser(input: &str) -> IResult<&str, bool> { -/// value(true, tag("1234"))(input) -/// } -/// -/// # fn main() { -/// -/// let mut consumed_parser = consumed(value(true, separated_pair(alpha1, char(','), alpha1))); -/// -/// assert_eq!(consumed_parser("abcd,efgh1"), Ok(("1", ("abcd,efgh", true)))); -/// assert_eq!(consumed_parser("abcd;"),Err(Err::Error((";", ErrorKind::Char)))); -/// -/// -/// // the first output (representing the consumed input) -/// // should be the same as that of the `recognize` parser. -/// let mut recognize_parser = recognize(inner_parser); -/// let mut consumed_parser = map(consumed(inner_parser), |(consumed, output)| consumed); -/// -/// assert_eq!(recognize_parser("1234"), consumed_parser("1234")); -/// assert_eq!(recognize_parser("abcd"), consumed_parser("abcd")); -/// # } -/// ``` -pub fn consumed(mut parser: F) -> impl FnMut(I) -> IResult -where - I: Clone + Offset + Slice>, - E: ParseError, - F: Parser, -{ - move |input: I| { - let i = input.clone(); - match parser.parse(i) { - Ok((remaining, result)) => { - let index = input.offset(&remaining); - let consumed = input.slice(..index); - Ok((remaining, (consumed, result))) - } - Err(e) => Err(e), - } - } -} - -/// Transforms an [`Err::Error`] (recoverable) to [`Err::Failure`] (unrecoverable) -/// -/// This commits the parse result, preventing alternative branch paths like with -/// [`nom::branch::alt`][crate::branch::alt]. -/// -/// # Example -/// -/// Without `cut`: -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult}; -/// # use nom::character::complete::{one_of, digit1}; -/// # use nom::combinator::rest; -/// # use nom::branch::alt; -/// # use nom::sequence::preceded; -/// # fn main() { -/// -/// fn parser(input: &str) -> IResult<&str, &str> { -/// alt(( -/// preceded(one_of("+-"), digit1), -/// rest -/// ))(input) -/// } -/// -/// assert_eq!(parser("+10 ab"), Ok((" ab", "10"))); -/// assert_eq!(parser("ab"), Ok(("", "ab"))); -/// assert_eq!(parser("+"), Ok(("", "+"))); -/// # } -/// ``` -/// -/// With `cut`: -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult, error::Error}; -/// # use nom::character::complete::{one_of, digit1}; -/// # use nom::combinator::rest; -/// # use nom::branch::alt; -/// # use nom::sequence::preceded; -/// use nom::combinator::cut; -/// # fn main() { -/// -/// fn parser(input: &str) -> IResult<&str, &str> { -/// alt(( -/// preceded(one_of("+-"), cut(digit1)), -/// rest -/// ))(input) -/// } -/// -/// assert_eq!(parser("+10 ab"), Ok((" ab", "10"))); -/// assert_eq!(parser("ab"), Ok(("", "ab"))); -/// assert_eq!(parser("+"), Err(Err::Failure(Error { input: "", code: ErrorKind::Digit }))); -/// # } -/// ``` -pub fn cut, F>(mut parser: F) -> impl FnMut(I) -> IResult -where - F: Parser, -{ - move |input: I| match parser.parse(input) { - Err(Err::Error(e)) => Err(Err::Failure(e)), - rest => rest, - } -} - -/// automatically converts the child parser's result to another type -/// -/// it will be able to convert the output value and the error value -/// as long as the `Into` implementations are available -/// -/// ```rust -/// # use nom::IResult; -/// use nom::combinator::into; -/// use nom::character::complete::alpha1; -/// # fn main() { -/// -/// fn parser1(i: &str) -> IResult<&str, &str> { -/// alpha1(i) -/// } -/// -/// let mut parser2 = into(parser1); -/// -/// // the parser converts the &str output of the child parser into a Vec -/// let bytes: IResult<&str, Vec> = parser2("abcd"); -/// assert_eq!(bytes, Ok(("", vec![97, 98, 99, 100]))); -/// # } -/// ``` -pub fn into(mut parser: F) -> impl FnMut(I) -> IResult -where - O1: Into, - E1: Into, - E1: ParseError, - E2: ParseError, - F: Parser, -{ - //map(parser, Into::into) - move |input: I| match parser.parse(input) { - Ok((i, o)) => Ok((i, o.into())), - Err(Err::Error(e)) => Err(Err::Error(e.into())), - Err(Err::Failure(e)) => Err(Err::Failure(e.into())), - Err(Err::Incomplete(e)) => Err(Err::Incomplete(e)), - } -} - -/// Creates an iterator from input data and a parser. -/// -/// Call the iterator's [ParserIterator::finish] method to get the remaining input if successful, -/// or the error value if we encountered an error. -/// -/// On [`Err::Error`], iteration will stop. To instead chain an error up, see [`cut`]. -/// -/// ```rust -/// use nom::{combinator::iterator, IResult, bytes::complete::tag, character::complete::alpha1, sequence::terminated}; -/// use std::collections::HashMap; -/// -/// let data = "abc|defg|hijkl|mnopqr|123"; -/// let mut it = iterator(data, terminated(alpha1, tag("|"))); -/// -/// let parsed = it.map(|v| (v, v.len())).collect::>(); -/// let res: IResult<_,_> = it.finish(); -/// -/// assert_eq!(parsed, [("abc", 3usize), ("defg", 4), ("hijkl", 5), ("mnopqr", 6)].iter().cloned().collect()); -/// assert_eq!(res, Ok(("123", ()))); -/// ``` -pub fn iterator(input: Input, f: F) -> ParserIterator -where - F: Parser, - Error: ParseError, -{ - ParserIterator { - iterator: f, - input, - state: Some(State::Running), - } -} - -/// Main structure associated to the [iterator] function. -pub struct ParserIterator { - iterator: F, - input: I, - state: Option>, -} - -impl ParserIterator { - /// Returns the remaining input if parsing was successful, or the error if we encountered an error. - pub fn finish(mut self) -> IResult { - match self.state.take().unwrap() { - State::Running | State::Done => Ok((self.input, ())), - State::Failure(e) => Err(Err::Failure(e)), - State::Incomplete(i) => Err(Err::Incomplete(i)), - } - } -} - -impl<'a, Input, Output, Error, F> core::iter::Iterator for &'a mut ParserIterator -where - F: FnMut(Input) -> IResult, - Input: Clone, -{ - type Item = Output; - - fn next(&mut self) -> Option { - if let State::Running = self.state.take().unwrap() { - let input = self.input.clone(); - - match (self.iterator)(input) { - Ok((i, o)) => { - self.input = i; - self.state = Some(State::Running); - Some(o) - } - Err(Err::Error(_)) => { - self.state = Some(State::Done); - None - } - Err(Err::Failure(e)) => { - self.state = Some(State::Failure(e)); - None - } - Err(Err::Incomplete(i)) => { - self.state = Some(State::Incomplete(i)); - None - } - } - } else { - None - } - } -} - -enum State { - Running, - Done, - Failure(E), - Incomplete(Needed), -} - -/// a parser which always succeeds with given value without consuming any input. -/// -/// It can be used for example as the last alternative in `alt` to -/// specify the default case. -/// -/// ```rust -/// # use nom::{Err,error::ErrorKind, IResult}; -/// use nom::branch::alt; -/// use nom::combinator::{success, value}; -/// use nom::character::complete::char; -/// # fn main() { -/// -/// let mut parser = success::<_,_,(_,ErrorKind)>(10); -/// assert_eq!(parser("xyz"), Ok(("xyz", 10))); -/// -/// let mut sign = alt((value(-1, char('-')), value(1, char('+')), success::<_,_,(_,ErrorKind)>(1))); -/// assert_eq!(sign("+10"), Ok(("10", 1))); -/// assert_eq!(sign("-10"), Ok(("10", -1))); -/// assert_eq!(sign("10"), Ok(("10", 1))); -/// # } -/// ``` -pub fn success>(val: O) -> impl Fn(I) -> IResult { - move |input: I| Ok((input, val.clone())) -} - -/// A parser which always fails. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, IResult}; -/// use nom::combinator::fail; -/// -/// let s = "string"; -/// assert_eq!(fail::<_, &str, _>(s), Err(Err::Error((s, ErrorKind::Fail)))); -/// ``` -pub fn fail>(i: I) -> IResult { - Err(Err::Error(E::from_error_kind(i, ErrorKind::Fail))) -} diff --git a/vendor/nom/src/combinator/tests.rs b/vendor/nom/src/combinator/tests.rs deleted file mode 100644 index 15d32b8aae3322..00000000000000 --- a/vendor/nom/src/combinator/tests.rs +++ /dev/null @@ -1,275 +0,0 @@ -use super::*; -use crate::bytes::complete::take; -use crate::bytes::streaming::tag; -use crate::error::ErrorKind; -use crate::error::ParseError; -use crate::internal::{Err, IResult, Needed}; -#[cfg(feature = "alloc")] -use crate::lib::std::boxed::Box; -use crate::number::complete::u8; - -macro_rules! assert_parse( - ($left: expr, $right: expr) => { - let res: $crate::IResult<_, _, (_, ErrorKind)> = $left; - assert_eq!(res, $right); - }; -); - -/*#[test] -fn t1() { - let v1:Vec = vec![1,2,3]; - let v2:Vec = vec![4,5,6]; - let d = Ok((&v1[..], &v2[..])); - let res = d.flat_map(print); - assert_eq!(res, Ok((&v2[..], ()))); -}*/ - -#[test] -fn eof_on_slices() { - let not_over: &[u8] = &b"Hello, world!"[..]; - let is_over: &[u8] = &b""[..]; - - let res_not_over = eof(not_over); - assert_parse!( - res_not_over, - Err(Err::Error(error_position!(not_over, ErrorKind::Eof))) - ); - - let res_over = eof(is_over); - assert_parse!(res_over, Ok((is_over, is_over))); -} - -#[test] -fn eof_on_strs() { - let not_over: &str = "Hello, world!"; - let is_over: &str = ""; - - let res_not_over = eof(not_over); - assert_parse!( - res_not_over, - Err(Err::Error(error_position!(not_over, ErrorKind::Eof))) - ); - - let res_over = eof(is_over); - assert_parse!(res_over, Ok((is_over, is_over))); -} - -/* -#[test] -fn end_of_input() { - let not_over = &b"Hello, world!"[..]; - let is_over = &b""[..]; - named!(eof_test, eof!()); - - let res_not_over = eof_test(not_over); - assert_eq!(res_not_over, Err(Err::Error(error_position!(not_over, ErrorKind::Eof)))); - - let res_over = eof_test(is_over); - assert_eq!(res_over, Ok((is_over, is_over))); -} -*/ - -#[test] -fn rest_on_slices() { - let input: &[u8] = &b"Hello, world!"[..]; - let empty: &[u8] = &b""[..]; - assert_parse!(rest(input), Ok((empty, input))); -} - -#[test] -fn rest_on_strs() { - let input: &str = "Hello, world!"; - let empty: &str = ""; - assert_parse!(rest(input), Ok((empty, input))); -} - -#[test] -fn rest_len_on_slices() { - let input: &[u8] = &b"Hello, world!"[..]; - assert_parse!(rest_len(input), Ok((input, input.len()))); -} - -use crate::lib::std::convert::From; -impl From for CustomError { - fn from(_: u32) -> Self { - CustomError - } -} - -impl ParseError for CustomError { - fn from_error_kind(_: I, _: ErrorKind) -> Self { - CustomError - } - - fn append(_: I, _: ErrorKind, _: CustomError) -> Self { - CustomError - } -} - -struct CustomError; -#[allow(dead_code)] -fn custom_error(input: &[u8]) -> IResult<&[u8], &[u8], CustomError> { - //fix_error!(input, CustomError, alphanumeric) - crate::character::streaming::alphanumeric1(input) -} - -#[test] -fn test_flat_map() { - let input: &[u8] = &[3, 100, 101, 102, 103, 104][..]; - assert_parse!( - flat_map(u8, take)(input), - Ok((&[103, 104][..], &[100, 101, 102][..])) - ); -} - -#[test] -fn test_map_opt() { - let input: &[u8] = &[50][..]; - assert_parse!( - map_opt(u8, |u| if u < 20 { Some(u) } else { None })(input), - Err(Err::Error((&[50][..], ErrorKind::MapOpt))) - ); - assert_parse!( - map_opt(u8, |u| if u > 20 { Some(u) } else { None })(input), - Ok((&[][..], 50)) - ); -} - -#[test] -fn test_map_parser() { - let input: &[u8] = &[100, 101, 102, 103, 104][..]; - assert_parse!( - map_parser(take(4usize), take(2usize))(input), - Ok((&[104][..], &[100, 101][..])) - ); -} - -#[test] -fn test_all_consuming() { - let input: &[u8] = &[100, 101, 102][..]; - assert_parse!( - all_consuming(take(2usize))(input), - Err(Err::Error((&[102][..], ErrorKind::Eof))) - ); - assert_parse!( - all_consuming(take(3usize))(input), - Ok((&[][..], &[100, 101, 102][..])) - ); -} - -#[test] -#[allow(unused)] -fn test_verify_ref() { - use crate::bytes::complete::take; - - let mut parser1 = verify(take(3u8), |s: &[u8]| s == &b"abc"[..]); - - assert_eq!(parser1(&b"abcd"[..]), Ok((&b"d"[..], &b"abc"[..]))); - assert_eq!( - parser1(&b"defg"[..]), - Err(Err::Error((&b"defg"[..], ErrorKind::Verify))) - ); - - fn parser2(i: &[u8]) -> IResult<&[u8], u32> { - verify(crate::number::streaming::be_u32, |val: &u32| *val < 3)(i) - } -} - -#[test] -#[cfg(feature = "alloc")] -fn test_verify_alloc() { - use crate::bytes::complete::take; - let mut parser1 = verify(map(take(3u8), |s: &[u8]| s.to_vec()), |s: &[u8]| { - s == &b"abc"[..] - }); - - assert_eq!(parser1(&b"abcd"[..]), Ok((&b"d"[..], (&b"abc").to_vec()))); - assert_eq!( - parser1(&b"defg"[..]), - Err(Err::Error((&b"defg"[..], ErrorKind::Verify))) - ); -} - -#[test] -#[cfg(feature = "std")] -fn test_into() { - use crate::bytes::complete::take; - use crate::{ - error::{Error, ParseError}, - Err, - }; - - let mut parser = into(take::<_, _, Error<_>>(3u8)); - let result: IResult<&[u8], Vec> = parser(&b"abcdefg"[..]); - - assert_eq!(result, Ok((&b"defg"[..], vec![97, 98, 99]))); -} - -#[test] -fn opt_test() { - fn opt_abcd(i: &[u8]) -> IResult<&[u8], Option<&[u8]>> { - opt(tag("abcd"))(i) - } - - let a = &b"abcdef"[..]; - let b = &b"bcdefg"[..]; - let c = &b"ab"[..]; - assert_eq!(opt_abcd(a), Ok((&b"ef"[..], Some(&b"abcd"[..])))); - assert_eq!(opt_abcd(b), Ok((&b"bcdefg"[..], None))); - assert_eq!(opt_abcd(c), Err(Err::Incomplete(Needed::new(2)))); -} - -#[test] -fn peek_test() { - fn peek_tag(i: &[u8]) -> IResult<&[u8], &[u8]> { - peek(tag("abcd"))(i) - } - - assert_eq!(peek_tag(&b"abcdef"[..]), Ok((&b"abcdef"[..], &b"abcd"[..]))); - assert_eq!(peek_tag(&b"ab"[..]), Err(Err::Incomplete(Needed::new(2)))); - assert_eq!( - peek_tag(&b"xxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) - ); -} - -#[test] -fn not_test() { - fn not_aaa(i: &[u8]) -> IResult<&[u8], ()> { - not(tag("aaa"))(i) - } - - assert_eq!( - not_aaa(&b"aaa"[..]), - Err(Err::Error(error_position!(&b"aaa"[..], ErrorKind::Not))) - ); - assert_eq!(not_aaa(&b"aa"[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(not_aaa(&b"abcd"[..]), Ok((&b"abcd"[..], ()))); -} - -#[test] -fn verify_test() { - use crate::bytes::streaming::take; - - fn test(i: &[u8]) -> IResult<&[u8], &[u8]> { - verify(take(5u8), |slice: &[u8]| slice[0] == b'a')(i) - } - assert_eq!(test(&b"bcd"[..]), Err(Err::Incomplete(Needed::new(2)))); - assert_eq!( - test(&b"bcdefg"[..]), - Err(Err::Error(error_position!( - &b"bcdefg"[..], - ErrorKind::Verify - ))) - ); - assert_eq!(test(&b"abcdefg"[..]), Ok((&b"fg"[..], &b"abcde"[..]))); -} - -#[test] -fn fail_test() { - let a = "string"; - let b = "another string"; - - assert_eq!(fail::<_, &str, _>(a), Err(Err::Error((a, ErrorKind::Fail)))); - assert_eq!(fail::<_, &str, _>(b), Err(Err::Error((b, ErrorKind::Fail)))); -} diff --git a/vendor/nom/src/error.rs b/vendor/nom/src/error.rs deleted file mode 100644 index 498b5e135a0973..00000000000000 --- a/vendor/nom/src/error.rs +++ /dev/null @@ -1,831 +0,0 @@ -//! Error management -//! -//! Parsers are generic over their error type, requiring that it implements -//! the `error::ParseError` trait. - -use crate::internal::Parser; -use crate::lib::std::fmt; - -/// This trait must be implemented by the error type of a nom parser. -/// -/// There are already implementations of it for `(Input, ErrorKind)` -/// and `VerboseError`. -/// -/// It provides methods to create an error from some combinators, -/// and combine existing errors in combinators like `alt`. -pub trait ParseError: Sized { - /// Creates an error from the input position and an [ErrorKind] - fn from_error_kind(input: I, kind: ErrorKind) -> Self; - - /// Combines an existing error with a new one created from the input - /// position and an [ErrorKind]. This is useful when backtracking - /// through a parse tree, accumulating error context on the way - fn append(input: I, kind: ErrorKind, other: Self) -> Self; - - /// Creates an error from an input position and an expected character - fn from_char(input: I, _: char) -> Self { - Self::from_error_kind(input, ErrorKind::Char) - } - - /// Combines two existing errors. This function is used to compare errors - /// generated in various branches of `alt`. - fn or(self, other: Self) -> Self { - other - } -} - -/// This trait is required by the `context` combinator to add a static string -/// to an existing error -pub trait ContextError: Sized { - /// Creates a new error from an input position, a static string and an existing error. - /// This is used mainly in the [context] combinator, to add user friendly information - /// to errors when backtracking through a parse tree - fn add_context(_input: I, _ctx: &'static str, other: Self) -> Self { - other - } -} - -/// This trait is required by the `map_res` combinator to integrate -/// error types from external functions, like [std::str::FromStr] -pub trait FromExternalError { - /// Creates a new error from an input position, an [ErrorKind] indicating the - /// wrapping parser, and an external error - fn from_external_error(input: I, kind: ErrorKind, e: E) -> Self; -} - -/// default error type, only contains the error' location and code -#[derive(Debug, PartialEq)] -pub struct Error { - /// position of the error in the input data - pub input: I, - /// nom error code - pub code: ErrorKind, -} - -impl Error { - /// creates a new basic error - pub fn new(input: I, code: ErrorKind) -> Error { - Error { input, code } - } -} - -impl ParseError for Error { - fn from_error_kind(input: I, kind: ErrorKind) -> Self { - Error { input, code: kind } - } - - fn append(_: I, _: ErrorKind, other: Self) -> Self { - other - } -} - -impl ContextError for Error {} - -impl FromExternalError for Error { - /// Create a new error from an input position and an external error - fn from_external_error(input: I, kind: ErrorKind, _e: E) -> Self { - Error { input, code: kind } - } -} - -/// The Display implementation allows the std::error::Error implementation -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "error {:?} at: {}", self.code, self.input) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for Error {} - -// for backward compatibility, keep those trait implementations -// for the previously used error type -impl ParseError for (I, ErrorKind) { - fn from_error_kind(input: I, kind: ErrorKind) -> Self { - (input, kind) - } - - fn append(_: I, _: ErrorKind, other: Self) -> Self { - other - } -} - -impl ContextError for (I, ErrorKind) {} - -impl FromExternalError for (I, ErrorKind) { - fn from_external_error(input: I, kind: ErrorKind, _e: E) -> Self { - (input, kind) - } -} - -impl ParseError for () { - fn from_error_kind(_: I, _: ErrorKind) -> Self {} - - fn append(_: I, _: ErrorKind, _: Self) -> Self {} -} - -impl ContextError for () {} - -impl FromExternalError for () { - fn from_external_error(_input: I, _kind: ErrorKind, _e: E) -> Self {} -} - -/// Creates an error from the input position and an [ErrorKind] -pub fn make_error>(input: I, kind: ErrorKind) -> E { - E::from_error_kind(input, kind) -} - -/// Combines an existing error with a new one created from the input -/// position and an [ErrorKind]. This is useful when backtracking -/// through a parse tree, accumulating error context on the way -pub fn append_error>(input: I, kind: ErrorKind, other: E) -> E { - E::append(input, kind, other) -} - -/// This error type accumulates errors and their position when backtracking -/// through a parse tree. With some post processing (cf `examples/json.rs`), -/// it can be used to display user friendly error messages -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -#[derive(Clone, Debug, PartialEq)] -pub struct VerboseError { - /// List of errors accumulated by `VerboseError`, containing the affected - /// part of input data, and some context - pub errors: crate::lib::std::vec::Vec<(I, VerboseErrorKind)>, -} - -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -#[derive(Clone, Debug, PartialEq)] -/// Error context for `VerboseError` -pub enum VerboseErrorKind { - /// Static string added by the `context` function - Context(&'static str), - /// Indicates which character was expected by the `char` function - Char(char), - /// Error kind given by various nom parsers - Nom(ErrorKind), -} - -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -impl ParseError for VerboseError { - fn from_error_kind(input: I, kind: ErrorKind) -> Self { - VerboseError { - errors: vec![(input, VerboseErrorKind::Nom(kind))], - } - } - - fn append(input: I, kind: ErrorKind, mut other: Self) -> Self { - other.errors.push((input, VerboseErrorKind::Nom(kind))); - other - } - - fn from_char(input: I, c: char) -> Self { - VerboseError { - errors: vec![(input, VerboseErrorKind::Char(c))], - } - } -} - -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -impl ContextError for VerboseError { - fn add_context(input: I, ctx: &'static str, mut other: Self) -> Self { - other.errors.push((input, VerboseErrorKind::Context(ctx))); - other - } -} - -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -impl FromExternalError for VerboseError { - /// Create a new error from an input position and an external error - fn from_external_error(input: I, kind: ErrorKind, _e: E) -> Self { - Self::from_error_kind(input, kind) - } -} - -#[cfg(feature = "alloc")] -impl fmt::Display for VerboseError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!(f, "Parse error:")?; - for (input, error) in &self.errors { - match error { - VerboseErrorKind::Nom(e) => writeln!(f, "{:?} at: {}", e, input)?, - VerboseErrorKind::Char(c) => writeln!(f, "expected '{}' at: {}", c, input)?, - VerboseErrorKind::Context(s) => writeln!(f, "in section '{}', at: {}", s, input)?, - } - } - - Ok(()) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for VerboseError {} - -use crate::internal::{Err, IResult}; - -/// Create a new error from an input position, a static string and an existing error. -/// This is used mainly in the [context] combinator, to add user friendly information -/// to errors when backtracking through a parse tree -pub fn context, F, O>( - context: &'static str, - mut f: F, -) -> impl FnMut(I) -> IResult -where - F: Parser, -{ - move |i: I| match f.parse(i.clone()) { - Ok(o) => Ok(o), - Err(Err::Incomplete(i)) => Err(Err::Incomplete(i)), - Err(Err::Error(e)) => Err(Err::Error(E::add_context(i, context, e))), - Err(Err::Failure(e)) => Err(Err::Failure(E::add_context(i, context, e))), - } -} - -/// Transforms a `VerboseError` into a trace with input position information -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -pub fn convert_error>( - input: I, - e: VerboseError, -) -> crate::lib::std::string::String { - use crate::lib::std::fmt::Write; - use crate::traits::Offset; - - let mut result = crate::lib::std::string::String::new(); - - for (i, (substring, kind)) in e.errors.iter().enumerate() { - let offset = input.offset(substring); - - if input.is_empty() { - match kind { - VerboseErrorKind::Char(c) => { - write!(&mut result, "{}: expected '{}', got empty input\n\n", i, c) - } - VerboseErrorKind::Context(s) => write!(&mut result, "{}: in {}, got empty input\n\n", i, s), - VerboseErrorKind::Nom(e) => write!(&mut result, "{}: in {:?}, got empty input\n\n", i, e), - } - } else { - let prefix = &input.as_bytes()[..offset]; - - // Count the number of newlines in the first `offset` bytes of input - let line_number = prefix.iter().filter(|&&b| b == b'\n').count() + 1; - - // Find the line that includes the subslice: - // Find the *last* newline before the substring starts - let line_begin = prefix - .iter() - .rev() - .position(|&b| b == b'\n') - .map(|pos| offset - pos) - .unwrap_or(0); - - // Find the full line after that newline - let line = input[line_begin..] - .lines() - .next() - .unwrap_or(&input[line_begin..]) - .trim_end(); - - // The (1-indexed) column number is the offset of our substring into that line - let column_number = line.offset(substring) + 1; - - match kind { - VerboseErrorKind::Char(c) => { - if let Some(actual) = substring.chars().next() { - write!( - &mut result, - "{i}: at line {line_number}:\n\ - {line}\n\ - {caret:>column$}\n\ - expected '{expected}', found {actual}\n\n", - i = i, - line_number = line_number, - line = line, - caret = '^', - column = column_number, - expected = c, - actual = actual, - ) - } else { - write!( - &mut result, - "{i}: at line {line_number}:\n\ - {line}\n\ - {caret:>column$}\n\ - expected '{expected}', got end of input\n\n", - i = i, - line_number = line_number, - line = line, - caret = '^', - column = column_number, - expected = c, - ) - } - } - VerboseErrorKind::Context(s) => write!( - &mut result, - "{i}: at line {line_number}, in {context}:\n\ - {line}\n\ - {caret:>column$}\n\n", - i = i, - line_number = line_number, - context = s, - line = line, - caret = '^', - column = column_number, - ), - VerboseErrorKind::Nom(e) => write!( - &mut result, - "{i}: at line {line_number}, in {nom_err:?}:\n\ - {line}\n\ - {caret:>column$}\n\n", - i = i, - line_number = line_number, - nom_err = e, - line = line, - caret = '^', - column = column_number, - ), - } - } - // Because `write!` to a `String` is infallible, this `unwrap` is fine. - .unwrap(); - } - - result -} - -/// Indicates which parser returned an error -#[rustfmt::skip] -#[derive(Debug,PartialEq,Eq,Hash,Clone,Copy)] -#[allow(deprecated,missing_docs)] -pub enum ErrorKind { - Tag, - MapRes, - MapOpt, - Alt, - IsNot, - IsA, - SeparatedList, - SeparatedNonEmptyList, - Many0, - Many1, - ManyTill, - Count, - TakeUntil, - LengthValue, - TagClosure, - Alpha, - Digit, - HexDigit, - OctDigit, - AlphaNumeric, - Space, - MultiSpace, - LengthValueFn, - Eof, - Switch, - TagBits, - OneOf, - NoneOf, - Char, - CrLf, - RegexpMatch, - RegexpMatches, - RegexpFind, - RegexpCapture, - RegexpCaptures, - TakeWhile1, - Complete, - Fix, - Escaped, - EscapedTransform, - NonEmpty, - ManyMN, - Not, - Permutation, - Verify, - TakeTill1, - TakeWhileMN, - TooLarge, - Many0Count, - Many1Count, - Float, - Satisfy, - Fail, -} - -#[rustfmt::skip] -#[allow(deprecated)] -/// Converts an ErrorKind to a number -pub fn error_to_u32(e: &ErrorKind) -> u32 { - match *e { - ErrorKind::Tag => 1, - ErrorKind::MapRes => 2, - ErrorKind::MapOpt => 3, - ErrorKind::Alt => 4, - ErrorKind::IsNot => 5, - ErrorKind::IsA => 6, - ErrorKind::SeparatedList => 7, - ErrorKind::SeparatedNonEmptyList => 8, - ErrorKind::Many1 => 9, - ErrorKind::Count => 10, - ErrorKind::TakeUntil => 12, - ErrorKind::LengthValue => 15, - ErrorKind::TagClosure => 16, - ErrorKind::Alpha => 17, - ErrorKind::Digit => 18, - ErrorKind::AlphaNumeric => 19, - ErrorKind::Space => 20, - ErrorKind::MultiSpace => 21, - ErrorKind::LengthValueFn => 22, - ErrorKind::Eof => 23, - ErrorKind::Switch => 27, - ErrorKind::TagBits => 28, - ErrorKind::OneOf => 29, - ErrorKind::NoneOf => 30, - ErrorKind::Char => 40, - ErrorKind::CrLf => 41, - ErrorKind::RegexpMatch => 42, - ErrorKind::RegexpMatches => 43, - ErrorKind::RegexpFind => 44, - ErrorKind::RegexpCapture => 45, - ErrorKind::RegexpCaptures => 46, - ErrorKind::TakeWhile1 => 47, - ErrorKind::Complete => 48, - ErrorKind::Fix => 49, - ErrorKind::Escaped => 50, - ErrorKind::EscapedTransform => 51, - ErrorKind::NonEmpty => 56, - ErrorKind::ManyMN => 57, - ErrorKind::HexDigit => 59, - ErrorKind::OctDigit => 61, - ErrorKind::Many0 => 62, - ErrorKind::Not => 63, - ErrorKind::Permutation => 64, - ErrorKind::ManyTill => 65, - ErrorKind::Verify => 66, - ErrorKind::TakeTill1 => 67, - ErrorKind::TakeWhileMN => 69, - ErrorKind::TooLarge => 70, - ErrorKind::Many0Count => 71, - ErrorKind::Many1Count => 72, - ErrorKind::Float => 73, - ErrorKind::Satisfy => 74, - ErrorKind::Fail => 75, - } -} - -impl ErrorKind { - #[rustfmt::skip] - #[allow(deprecated)] - /// Converts an ErrorKind to a text description - pub fn description(&self) -> &str { - match *self { - ErrorKind::Tag => "Tag", - ErrorKind::MapRes => "Map on Result", - ErrorKind::MapOpt => "Map on Option", - ErrorKind::Alt => "Alternative", - ErrorKind::IsNot => "IsNot", - ErrorKind::IsA => "IsA", - ErrorKind::SeparatedList => "Separated list", - ErrorKind::SeparatedNonEmptyList => "Separated non empty list", - ErrorKind::Many0 => "Many0", - ErrorKind::Many1 => "Many1", - ErrorKind::Count => "Count", - ErrorKind::TakeUntil => "Take until", - ErrorKind::LengthValue => "Length followed by value", - ErrorKind::TagClosure => "Tag closure", - ErrorKind::Alpha => "Alphabetic", - ErrorKind::Digit => "Digit", - ErrorKind::AlphaNumeric => "AlphaNumeric", - ErrorKind::Space => "Space", - ErrorKind::MultiSpace => "Multiple spaces", - ErrorKind::LengthValueFn => "LengthValueFn", - ErrorKind::Eof => "End of file", - ErrorKind::Switch => "Switch", - ErrorKind::TagBits => "Tag on bitstream", - ErrorKind::OneOf => "OneOf", - ErrorKind::NoneOf => "NoneOf", - ErrorKind::Char => "Char", - ErrorKind::CrLf => "CrLf", - ErrorKind::RegexpMatch => "RegexpMatch", - ErrorKind::RegexpMatches => "RegexpMatches", - ErrorKind::RegexpFind => "RegexpFind", - ErrorKind::RegexpCapture => "RegexpCapture", - ErrorKind::RegexpCaptures => "RegexpCaptures", - ErrorKind::TakeWhile1 => "TakeWhile1", - ErrorKind::Complete => "Complete", - ErrorKind::Fix => "Fix", - ErrorKind::Escaped => "Escaped", - ErrorKind::EscapedTransform => "EscapedTransform", - ErrorKind::NonEmpty => "NonEmpty", - ErrorKind::ManyMN => "Many(m, n)", - ErrorKind::HexDigit => "Hexadecimal Digit", - ErrorKind::OctDigit => "Octal digit", - ErrorKind::Not => "Negation", - ErrorKind::Permutation => "Permutation", - ErrorKind::ManyTill => "ManyTill", - ErrorKind::Verify => "predicate verification", - ErrorKind::TakeTill1 => "TakeTill1", - ErrorKind::TakeWhileMN => "TakeWhileMN", - ErrorKind::TooLarge => "Needed data size is too large", - ErrorKind::Many0Count => "Count occurrence of >=0 patterns", - ErrorKind::Many1Count => "Count occurrence of >=1 patterns", - ErrorKind::Float => "Float", - ErrorKind::Satisfy => "Satisfy", - ErrorKind::Fail => "Fail", - } - } -} - -/// Creates a parse error from a `nom::ErrorKind` -/// and the position in the input -#[allow(unused_variables)] -#[macro_export(local_inner_macros)] -macro_rules! error_position( - ($input:expr, $code:expr) => ({ - $crate::error::make_error($input, $code) - }); -); - -/// Creates a parse error from a `nom::ErrorKind`, -/// the position in the input and the next error in -/// the parsing tree -#[allow(unused_variables)] -#[macro_export(local_inner_macros)] -macro_rules! error_node_position( - ($input:expr, $code:expr, $next:expr) => ({ - $crate::error::append_error($input, $code, $next) - }); -); - -/// Prints a message and the input if the parser fails. -/// -/// The message prints the `Error` or `Incomplete` -/// and the parser's calling code. -/// -/// It also displays the input in hexdump format -/// -/// ```rust -/// use nom::{IResult, error::dbg_dmp, bytes::complete::tag}; -/// -/// fn f(i: &[u8]) -> IResult<&[u8], &[u8]> { -/// dbg_dmp(tag("abcd"), "tag")(i) -/// } -/// -/// let a = &b"efghijkl"[..]; -/// -/// // Will print the following message: -/// // Error(Position(0, [101, 102, 103, 104, 105, 106, 107, 108])) at l.5 by ' tag ! ( "abcd" ) ' -/// // 00000000 65 66 67 68 69 6a 6b 6c efghijkl -/// f(a); -/// ``` -#[cfg(feature = "std")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "std")))] -pub fn dbg_dmp<'a, F, O, E: std::fmt::Debug>( - f: F, - context: &'static str, -) -> impl Fn(&'a [u8]) -> IResult<&'a [u8], O, E> -where - F: Fn(&'a [u8]) -> IResult<&'a [u8], O, E>, -{ - use crate::HexDisplay; - move |i: &'a [u8]| match f(i) { - Err(e) => { - println!("{}: Error({:?}) at:\n{}", context, e, i.to_hex(8)); - Err(e) - } - a => a, - } -} - -#[cfg(test)] -#[cfg(feature = "alloc")] -mod tests { - use super::*; - use crate::character::complete::char; - - #[test] - fn convert_error_panic() { - let input = ""; - - let _result: IResult<_, _, VerboseError<&str>> = char('x')(input); - } -} - -/* -#[cfg(feature = "alloc")] -use lib::std::{vec::Vec, collections::HashMap}; - -#[cfg(feature = "std")] -use lib::std::hash::Hash; - -#[cfg(feature = "std")] -pub fn add_error_pattern<'a, I: Clone + Hash + Eq, O, E: Clone + Hash + Eq>( - h: &mut HashMap, &'a str>, - e: VerboseError, - message: &'a str, -) -> bool { - h.insert(e, message); - true -} - -pub fn slice_to_offsets(input: &[u8], s: &[u8]) -> (usize, usize) { - let start = input.as_ptr(); - let off1 = s.as_ptr() as usize - start as usize; - let off2 = off1 + s.len(); - (off1, off2) -} - -#[cfg(feature = "std")] -pub fn prepare_errors(input: &[u8], e: VerboseError<&[u8]>) -> Option> { - let mut v: Vec<(ErrorKind, usize, usize)> = Vec::new(); - - for (p, kind) in e.errors.drain(..) { - let (o1, o2) = slice_to_offsets(input, p); - v.push((kind, o1, o2)); - } - - v.reverse(); - Some(v) -} - -#[cfg(feature = "std")] -pub fn print_error(input: &[u8], res: VerboseError<&[u8]>) { - if let Some(v) = prepare_errors(input, res) { - let colors = generate_colors(&v); - println!("parser codes: {}", print_codes(&colors, &HashMap::new())); - println!("{}", print_offsets(input, 0, &v)); - } else { - println!("not an error"); - } -} - -#[cfg(feature = "std")] -pub fn generate_colors(v: &[(ErrorKind, usize, usize)]) -> HashMap { - let mut h: HashMap = HashMap::new(); - let mut color = 0; - - for &(ref c, _, _) in v.iter() { - h.insert(error_to_u32(c), color + 31); - color = color + 1 % 7; - } - - h -} - -pub fn code_from_offset(v: &[(ErrorKind, usize, usize)], offset: usize) -> Option { - let mut acc: Option<(u32, usize, usize)> = None; - for &(ref ek, s, e) in v.iter() { - let c = error_to_u32(ek); - if s <= offset && offset <= e { - if let Some((_, start, end)) = acc { - if start <= s && e <= end { - acc = Some((c, s, e)); - } - } else { - acc = Some((c, s, e)); - } - } - } - if let Some((code, _, _)) = acc { - return Some(code); - } else { - return None; - } -} - -#[cfg(feature = "alloc")] -pub fn reset_color(v: &mut Vec) { - v.push(0x1B); - v.push(b'['); - v.push(0); - v.push(b'm'); -} - -#[cfg(feature = "alloc")] -pub fn write_color(v: &mut Vec, color: u8) { - v.push(0x1B); - v.push(b'['); - v.push(1); - v.push(b';'); - let s = color.to_string(); - let bytes = s.as_bytes(); - v.extend(bytes.iter().cloned()); - v.push(b'm'); -} - -#[cfg(feature = "std")] -#[cfg_attr(feature = "cargo-clippy", allow(implicit_hasher))] -pub fn print_codes(colors: &HashMap, names: &HashMap) -> String { - let mut v = Vec::new(); - for (code, &color) in colors { - if let Some(&s) = names.get(code) { - let bytes = s.as_bytes(); - write_color(&mut v, color); - v.extend(bytes.iter().cloned()); - } else { - let s = code.to_string(); - let bytes = s.as_bytes(); - write_color(&mut v, color); - v.extend(bytes.iter().cloned()); - } - reset_color(&mut v); - v.push(b' '); - } - reset_color(&mut v); - - String::from_utf8_lossy(&v[..]).into_owned() -} - -#[cfg(feature = "std")] -pub fn print_offsets(input: &[u8], from: usize, offsets: &[(ErrorKind, usize, usize)]) -> String { - let mut v = Vec::with_capacity(input.len() * 3); - let mut i = from; - let chunk_size = 8; - let mut current_code: Option = None; - let mut current_code2: Option = None; - - let colors = generate_colors(&offsets); - - for chunk in input.chunks(chunk_size) { - let s = format!("{:08x}", i); - for &ch in s.as_bytes().iter() { - v.push(ch); - } - v.push(b'\t'); - - let mut k = i; - let mut l = i; - for &byte in chunk { - if let Some(code) = code_from_offset(&offsets, k) { - if let Some(current) = current_code { - if current != code { - reset_color(&mut v); - current_code = Some(code); - if let Some(&color) = colors.get(&code) { - write_color(&mut v, color); - } - } - } else { - current_code = Some(code); - if let Some(&color) = colors.get(&code) { - write_color(&mut v, color); - } - } - } - v.push(CHARS[(byte >> 4) as usize]); - v.push(CHARS[(byte & 0xf) as usize]); - v.push(b' '); - k = k + 1; - } - - reset_color(&mut v); - - if chunk_size > chunk.len() { - for _ in 0..(chunk_size - chunk.len()) { - v.push(b' '); - v.push(b' '); - v.push(b' '); - } - } - v.push(b'\t'); - - for &byte in chunk { - if let Some(code) = code_from_offset(&offsets, l) { - if let Some(current) = current_code2 { - if current != code { - reset_color(&mut v); - current_code2 = Some(code); - if let Some(&color) = colors.get(&code) { - write_color(&mut v, color); - } - } - } else { - current_code2 = Some(code); - if let Some(&color) = colors.get(&code) { - write_color(&mut v, color); - } - } - } - if (byte >= 32 && byte <= 126) || byte >= 128 { - v.push(byte); - } else { - v.push(b'.'); - } - l = l + 1; - } - reset_color(&mut v); - - v.push(b'\n'); - i = i + chunk_size; - } - - String::from_utf8_lossy(&v[..]).into_owned() -} -*/ diff --git a/vendor/nom/src/internal.rs b/vendor/nom/src/internal.rs deleted file mode 100644 index b7572fbd0a9429..00000000000000 --- a/vendor/nom/src/internal.rs +++ /dev/null @@ -1,489 +0,0 @@ -//! Basic types to build the parsers - -use self::Needed::*; -use crate::error::{self, ErrorKind}; -use crate::lib::std::fmt; -use core::num::NonZeroUsize; - -/// Holds the result of parsing functions -/// -/// It depends on the input type `I`, the output type `O`, and the error type `E` -/// (by default `(I, nom::ErrorKind)`) -/// -/// The `Ok` side is a pair containing the remainder of the input (the part of the data that -/// was not parsed) and the produced value. The `Err` side contains an instance of `nom::Err`. -/// -/// Outside of the parsing code, you can use the [Finish::finish] method to convert -/// it to a more common result type -pub type IResult> = Result<(I, O), Err>; - -/// Helper trait to convert a parser's result to a more manageable type -pub trait Finish { - /// converts the parser's result to a type that is more consumable by error - /// management libraries. It keeps the same `Ok` branch, and merges `Err::Error` - /// and `Err::Failure` into the `Err` side. - /// - /// *warning*: if the result is `Err(Err::Incomplete(_))`, this method will panic. - /// - "complete" parsers: It will not be an issue, `Incomplete` is never used - /// - "streaming" parsers: `Incomplete` will be returned if there's not enough data - /// for the parser to decide, and you should gather more data before parsing again. - /// Once the parser returns either `Ok(_)`, `Err(Err::Error(_))` or `Err(Err::Failure(_))`, - /// you can get out of the parsing loop and call `finish()` on the parser's result - fn finish(self) -> Result<(I, O), E>; -} - -impl Finish for IResult { - fn finish(self) -> Result<(I, O), E> { - match self { - Ok(res) => Ok(res), - Err(Err::Error(e)) | Err(Err::Failure(e)) => Err(e), - Err(Err::Incomplete(_)) => { - panic!("Cannot call `finish()` on `Err(Err::Incomplete(_))`: this result means that the parser does not have enough data to decide, you should gather more data and try to reapply the parser instead") - } - } - } -} - -/// Contains information on needed data if a parser returned `Incomplete` -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] -pub enum Needed { - /// Needs more data, but we do not know how much - Unknown, - /// Contains the required data size in bytes - Size(NonZeroUsize), -} - -impl Needed { - /// Creates `Needed` instance, returns `Needed::Unknown` if the argument is zero - pub fn new(s: usize) -> Self { - match NonZeroUsize::new(s) { - Some(sz) => Needed::Size(sz), - None => Needed::Unknown, - } - } - - /// Indicates if we know how many bytes we need - pub fn is_known(&self) -> bool { - *self != Unknown - } - - /// Maps a `Needed` to `Needed` by applying a function to a contained `Size` value. - #[inline] - pub fn map usize>(self, f: F) -> Needed { - match self { - Unknown => Unknown, - Size(n) => Needed::new(f(n)), - } - } -} - -/// The `Err` enum indicates the parser was not successful -/// -/// It has three cases: -/// -/// * `Incomplete` indicates that more data is needed to decide. The `Needed` enum -/// can contain how many additional bytes are necessary. If you are sure your parser -/// is working on full data, you can wrap your parser with the `complete` combinator -/// to transform that case in `Error` -/// * `Error` means some parser did not succeed, but another one might (as an example, -/// when testing different branches of an `alt` combinator) -/// * `Failure` indicates an unrecoverable error. As an example, if you recognize a prefix -/// to decide on the next parser to apply, and that parser fails, you know there's no need -/// to try other parsers, you were already in the right branch, so the data is invalid -/// -#[derive(Debug, Clone, PartialEq)] -#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] -pub enum Err { - /// There was not enough data - Incomplete(Needed), - /// The parser had an error (recoverable) - Error(E), - /// The parser had an unrecoverable error: we got to the right - /// branch and we know other branches won't work, so backtrack - /// as fast as possible - Failure(E), -} - -impl Err { - /// Tests if the result is Incomplete - pub fn is_incomplete(&self) -> bool { - if let Err::Incomplete(_) = self { - true - } else { - false - } - } - - /// Applies the given function to the inner error - pub fn map(self, f: F) -> Err - where - F: FnOnce(E) -> E2, - { - match self { - Err::Incomplete(n) => Err::Incomplete(n), - Err::Failure(t) => Err::Failure(f(t)), - Err::Error(t) => Err::Error(f(t)), - } - } - - /// Automatically converts between errors if the underlying type supports it - pub fn convert(e: Err) -> Self - where - E: From, - { - e.map(crate::lib::std::convert::Into::into) - } -} - -impl Err<(T, ErrorKind)> { - /// Maps `Err<(T, ErrorKind)>` to `Err<(U, ErrorKind)>` with the given `F: T -> U` - pub fn map_input(self, f: F) -> Err<(U, ErrorKind)> - where - F: FnOnce(T) -> U, - { - match self { - Err::Incomplete(n) => Err::Incomplete(n), - Err::Failure((input, k)) => Err::Failure((f(input), k)), - Err::Error((input, k)) => Err::Error((f(input), k)), - } - } -} - -impl Err> { - /// Maps `Err>` to `Err>` with the given `F: T -> U` - pub fn map_input(self, f: F) -> Err> - where - F: FnOnce(T) -> U, - { - match self { - Err::Incomplete(n) => Err::Incomplete(n), - Err::Failure(error::Error { input, code }) => Err::Failure(error::Error { - input: f(input), - code, - }), - Err::Error(error::Error { input, code }) => Err::Error(error::Error { - input: f(input), - code, - }), - } - } -} - -#[cfg(feature = "alloc")] -use crate::lib::std::{borrow::ToOwned, string::String, vec::Vec}; -#[cfg(feature = "alloc")] -impl Err<(&[u8], ErrorKind)> { - /// Obtaining ownership - #[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] - pub fn to_owned(self) -> Err<(Vec, ErrorKind)> { - self.map_input(ToOwned::to_owned) - } -} - -#[cfg(feature = "alloc")] -impl Err<(&str, ErrorKind)> { - /// Obtaining ownership - #[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] - pub fn to_owned(self) -> Err<(String, ErrorKind)> { - self.map_input(ToOwned::to_owned) - } -} - -#[cfg(feature = "alloc")] -impl Err> { - /// Obtaining ownership - #[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] - pub fn to_owned(self) -> Err>> { - self.map_input(ToOwned::to_owned) - } -} - -#[cfg(feature = "alloc")] -impl Err> { - /// Obtaining ownership - #[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] - pub fn to_owned(self) -> Err> { - self.map_input(ToOwned::to_owned) - } -} - -impl Eq for Err {} - -impl fmt::Display for Err -where - E: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Err::Incomplete(Needed::Size(u)) => write!(f, "Parsing requires {} bytes/chars", u), - Err::Incomplete(Needed::Unknown) => write!(f, "Parsing requires more data"), - Err::Failure(c) => write!(f, "Parsing Failure: {:?}", c), - Err::Error(c) => write!(f, "Parsing Error: {:?}", c), - } - } -} - -#[cfg(feature = "std")] -use std::error::Error; - -#[cfg(feature = "std")] -impl Error for Err -where - E: fmt::Debug, -{ - fn source(&self) -> Option<&(dyn Error + 'static)> { - None // no underlying error - } -} - -/// All nom parsers implement this trait -pub trait Parser { - /// A parser takes in input type, and returns a `Result` containing - /// either the remaining input and the output value, or an error - fn parse(&mut self, input: I) -> IResult; - - /// Maps a function over the result of a parser - fn map(self, g: G) -> Map - where - G: Fn(O) -> O2, - Self: core::marker::Sized, - { - Map { - f: self, - g, - phantom: core::marker::PhantomData, - } - } - - /// Creates a second parser from the output of the first one, then apply over the rest of the input - fn flat_map(self, g: G) -> FlatMap - where - G: FnMut(O) -> H, - H: Parser, - Self: core::marker::Sized, - { - FlatMap { - f: self, - g, - phantom: core::marker::PhantomData, - } - } - - /// Applies a second parser over the output of the first one - fn and_then(self, g: G) -> AndThen - where - G: Parser, - Self: core::marker::Sized, - { - AndThen { - f: self, - g, - phantom: core::marker::PhantomData, - } - } - - /// Applies a second parser after the first one, return their results as a tuple - fn and(self, g: G) -> And - where - G: Parser, - Self: core::marker::Sized, - { - And { f: self, g } - } - - /// Applies a second parser over the input if the first one failed - fn or(self, g: G) -> Or - where - G: Parser, - Self: core::marker::Sized, - { - Or { f: self, g } - } - - /// automatically converts the parser's output and error values to another type, as long as they - /// implement the `From` trait - fn into, E2: From>(self) -> Into - where - Self: core::marker::Sized, - { - Into { - f: self, - phantom_out1: core::marker::PhantomData, - phantom_err1: core::marker::PhantomData, - phantom_out2: core::marker::PhantomData, - phantom_err2: core::marker::PhantomData, - } - } -} - -impl<'a, I, O, E, F> Parser for F -where - F: FnMut(I) -> IResult + 'a, -{ - fn parse(&mut self, i: I) -> IResult { - self(i) - } -} - -#[cfg(feature = "alloc")] -use alloc::boxed::Box; - -#[cfg(feature = "alloc")] -impl<'a, I, O, E> Parser for Box + 'a> { - fn parse(&mut self, input: I) -> IResult { - (**self).parse(input) - } -} - -/// Implementation of `Parser::map` -#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] -pub struct Map { - f: F, - g: G, - phantom: core::marker::PhantomData, -} - -impl<'a, I, O1, O2, E, F: Parser, G: Fn(O1) -> O2> Parser for Map { - fn parse(&mut self, i: I) -> IResult { - match self.f.parse(i) { - Err(e) => Err(e), - Ok((i, o)) => Ok((i, (self.g)(o))), - } - } -} - -/// Implementation of `Parser::flat_map` -#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] -pub struct FlatMap { - f: F, - g: G, - phantom: core::marker::PhantomData, -} - -impl<'a, I, O1, O2, E, F: Parser, G: Fn(O1) -> H, H: Parser> Parser - for FlatMap -{ - fn parse(&mut self, i: I) -> IResult { - let (i, o1) = self.f.parse(i)?; - (self.g)(o1).parse(i) - } -} - -/// Implementation of `Parser::and_then` -#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] -pub struct AndThen { - f: F, - g: G, - phantom: core::marker::PhantomData, -} - -impl<'a, I, O1, O2, E, F: Parser, G: Parser> Parser - for AndThen -{ - fn parse(&mut self, i: I) -> IResult { - let (i, o1) = self.f.parse(i)?; - let (_, o2) = self.g.parse(o1)?; - Ok((i, o2)) - } -} - -/// Implementation of `Parser::and` -#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] -pub struct And { - f: F, - g: G, -} - -impl<'a, I, O1, O2, E, F: Parser, G: Parser> Parser - for And -{ - fn parse(&mut self, i: I) -> IResult { - let (i, o1) = self.f.parse(i)?; - let (i, o2) = self.g.parse(i)?; - Ok((i, (o1, o2))) - } -} - -/// Implementation of `Parser::or` -#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] -pub struct Or { - f: F, - g: G, -} - -impl<'a, I: Clone, O, E: crate::error::ParseError, F: Parser, G: Parser> - Parser for Or -{ - fn parse(&mut self, i: I) -> IResult { - match self.f.parse(i.clone()) { - Err(Err::Error(e1)) => match self.g.parse(i) { - Err(Err::Error(e2)) => Err(Err::Error(e1.or(e2))), - res => res, - }, - res => res, - } - } -} - -/// Implementation of `Parser::into` -#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] -pub struct Into, E1, E2: From> { - f: F, - phantom_out1: core::marker::PhantomData, - phantom_err1: core::marker::PhantomData, - phantom_out2: core::marker::PhantomData, - phantom_err2: core::marker::PhantomData, -} - -impl< - 'a, - I: Clone, - O1, - O2: From, - E1, - E2: crate::error::ParseError + From, - F: Parser, - > Parser for Into -{ - fn parse(&mut self, i: I) -> IResult { - match self.f.parse(i) { - Ok((i, o)) => Ok((i, o.into())), - Err(Err::Error(e)) => Err(Err::Error(e.into())), - Err(Err::Failure(e)) => Err(Err::Failure(e.into())), - Err(Err::Incomplete(e)) => Err(Err::Incomplete(e)), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::error::ErrorKind; - - #[doc(hidden)] - #[macro_export] - macro_rules! assert_size ( - ($t:ty, $sz:expr) => ( - assert_eq!(crate::lib::std::mem::size_of::<$t>(), $sz); - ); - ); - - #[test] - #[cfg(target_pointer_width = "64")] - fn size_test() { - assert_size!(IResult<&[u8], &[u8], (&[u8], u32)>, 40); - //FIXME: since rust 1.65, this is now 32 bytes, likely thanks to https://github.com/rust-lang/rust/pull/94075 - // deactivating that test for now because it'll have different values depending on the rust version - // assert_size!(IResult<&str, &str, u32>, 40); - assert_size!(Needed, 8); - assert_size!(Err, 16); - assert_size!(ErrorKind, 1); - } - - #[test] - fn err_map_test() { - let e = Err::Error(1); - assert_eq!(e.map(|v| v + 1), Err::Error(2)); - } -} diff --git a/vendor/nom/src/lib.rs b/vendor/nom/src/lib.rs deleted file mode 100644 index 3beb2f4179d489..00000000000000 --- a/vendor/nom/src/lib.rs +++ /dev/null @@ -1,464 +0,0 @@ -//! # nom, eating data byte by byte -//! -//! nom is a parser combinator library with a focus on safe parsing, -//! streaming patterns, and as much as possible zero copy. -//! -//! ## Example -//! -//! ```rust -//! use nom::{ -//! IResult, -//! bytes::complete::{tag, take_while_m_n}, -//! combinator::map_res, -//! sequence::tuple}; -//! -//! #[derive(Debug,PartialEq)] -//! pub struct Color { -//! pub red: u8, -//! pub green: u8, -//! pub blue: u8, -//! } -//! -//! fn from_hex(input: &str) -> Result { -//! u8::from_str_radix(input, 16) -//! } -//! -//! fn is_hex_digit(c: char) -> bool { -//! c.is_digit(16) -//! } -//! -//! fn hex_primary(input: &str) -> IResult<&str, u8> { -//! map_res( -//! take_while_m_n(2, 2, is_hex_digit), -//! from_hex -//! )(input) -//! } -//! -//! fn hex_color(input: &str) -> IResult<&str, Color> { -//! let (input, _) = tag("#")(input)?; -//! let (input, (red, green, blue)) = tuple((hex_primary, hex_primary, hex_primary))(input)?; -//! -//! Ok((input, Color { red, green, blue })) -//! } -//! -//! fn main() { -//! assert_eq!(hex_color("#2F14DF"), Ok(("", Color { -//! red: 47, -//! green: 20, -//! blue: 223, -//! }))); -//! } -//! ``` -//! -//! The code is available on [Github](https://github.com/Geal/nom) -//! -//! There are a few [guides](https://github.com/Geal/nom/tree/main/doc) with more details -//! about [how to write parsers](https://github.com/Geal/nom/blob/main/doc/making_a_new_parser_from_scratch.md), -//! or the [error management system](https://github.com/Geal/nom/blob/main/doc/error_management.md). -//! You can also check out the [recipes] module that contains examples of common patterns. -//! -//! **Looking for a specific combinator? Read the -//! ["choose a combinator" guide](https://github.com/Geal/nom/blob/main/doc/choosing_a_combinator.md)** -//! -//! If you are upgrading to nom 5.0, please read the -//! [migration document](https://github.com/Geal/nom/blob/main/doc/upgrading_to_nom_5.md). -//! -//! ## Parser combinators -//! -//! Parser combinators are an approach to parsers that is very different from -//! software like [lex](https://en.wikipedia.org/wiki/Lex_(software)) and -//! [yacc](https://en.wikipedia.org/wiki/Yacc). Instead of writing the grammar -//! in a separate syntax and generating the corresponding code, you use very small -//! functions with very specific purposes, like "take 5 bytes", or "recognize the -//! word 'HTTP'", and assemble them in meaningful patterns like "recognize -//! 'HTTP', then a space, then a version". -//! The resulting code is small, and looks like the grammar you would have -//! written with other parser approaches. -//! -//! This gives us a few advantages: -//! -//! - The parsers are small and easy to write -//! - The parsers components are easy to reuse (if they're general enough, please add them to nom!) -//! - The parsers components are easy to test separately (unit tests and property-based tests) -//! - The parser combination code looks close to the grammar you would have written -//! - You can build partial parsers, specific to the data you need at the moment, and ignore the rest -//! -//! Here is an example of one such parser, to recognize text between parentheses: -//! -//! ```rust -//! use nom::{ -//! IResult, -//! sequence::delimited, -//! // see the "streaming/complete" paragraph lower for an explanation of these submodules -//! character::complete::char, -//! bytes::complete::is_not -//! }; -//! -//! fn parens(input: &str) -> IResult<&str, &str> { -//! delimited(char('('), is_not(")"), char(')'))(input) -//! } -//! ``` -//! -//! It defines a function named `parens` which will recognize a sequence of the -//! character `(`, the longest byte array not containing `)`, then the character -//! `)`, and will return the byte array in the middle. -//! -//! Here is another parser, written without using nom's combinators this time: -//! -//! ```rust -//! use nom::{IResult, Err, Needed}; -//! -//! # fn main() { -//! fn take4(i: &[u8]) -> IResult<&[u8], &[u8]>{ -//! if i.len() < 4 { -//! Err(Err::Incomplete(Needed::new(4))) -//! } else { -//! Ok((&i[4..], &i[0..4])) -//! } -//! } -//! # } -//! ``` -//! -//! This function takes a byte array as input, and tries to consume 4 bytes. -//! Writing all the parsers manually, like this, is dangerous, despite Rust's -//! safety features. There are still a lot of mistakes one can make. That's why -//! nom provides a list of functions to help in developing parsers. -//! -//! With functions, you would write it like this: -//! -//! ```rust -//! use nom::{IResult, bytes::streaming::take}; -//! fn take4(input: &str) -> IResult<&str, &str> { -//! take(4u8)(input) -//! } -//! ``` -//! -//! A parser in nom is a function which, for an input type `I`, an output type `O` -//! and an optional error type `E`, will have the following signature: -//! -//! ```rust,compile_fail -//! fn parser(input: I) -> IResult; -//! ``` -//! -//! Or like this, if you don't want to specify a custom error type (it will be `(I, ErrorKind)` by default): -//! -//! ```rust,compile_fail -//! fn parser(input: I) -> IResult; -//! ``` -//! -//! `IResult` is an alias for the `Result` type: -//! -//! ```rust -//! use nom::{Needed, error::Error}; -//! -//! type IResult> = Result<(I, O), Err>; -//! -//! enum Err { -//! Incomplete(Needed), -//! Error(E), -//! Failure(E), -//! } -//! ``` -//! -//! It can have the following values: -//! -//! - A correct result `Ok((I,O))` with the first element being the remaining of the input (not parsed yet), and the second the output value; -//! - An error `Err(Err::Error(c))` with `c` an error that can be built from the input position and a parser specific error -//! - An error `Err(Err::Incomplete(Needed))` indicating that more input is necessary. `Needed` can indicate how much data is needed -//! - An error `Err(Err::Failure(c))`. It works like the `Error` case, except it indicates an unrecoverable error: We cannot backtrack and test another parser -//! -//! Please refer to the ["choose a combinator" guide](https://github.com/Geal/nom/blob/main/doc/choosing_a_combinator.md) for an exhaustive list of parsers. -//! See also the rest of the documentation [here](https://github.com/Geal/nom/blob/main/doc). -//! -//! ## Making new parsers with function combinators -//! -//! nom is based on functions that generate parsers, with a signature like -//! this: `(arguments) -> impl Fn(Input) -> IResult`. -//! The arguments of a combinator can be direct values (like `take` which uses -//! a number of bytes or character as argument) or even other parsers (like -//! `delimited` which takes as argument 3 parsers, and returns the result of -//! the second one if all are successful). -//! -//! Here are some examples: -//! -//! ```rust -//! use nom::IResult; -//! use nom::bytes::complete::{tag, take}; -//! fn abcd_parser(i: &str) -> IResult<&str, &str> { -//! tag("abcd")(i) // will consume bytes if the input begins with "abcd" -//! } -//! -//! fn take_10(i: &[u8]) -> IResult<&[u8], &[u8]> { -//! take(10u8)(i) // will consume and return 10 bytes of input -//! } -//! ``` -//! -//! ## Combining parsers -//! -//! There are higher level patterns, like the **`alt`** combinator, which -//! provides a choice between multiple parsers. If one branch fails, it tries -//! the next, and returns the result of the first parser that succeeds: -//! -//! ```rust -//! use nom::IResult; -//! use nom::branch::alt; -//! use nom::bytes::complete::tag; -//! -//! let mut alt_tags = alt((tag("abcd"), tag("efgh"))); -//! -//! assert_eq!(alt_tags(&b"abcdxxx"[..]), Ok((&b"xxx"[..], &b"abcd"[..]))); -//! assert_eq!(alt_tags(&b"efghxxx"[..]), Ok((&b"xxx"[..], &b"efgh"[..]))); -//! assert_eq!(alt_tags(&b"ijklxxx"[..]), Err(nom::Err::Error((&b"ijklxxx"[..], nom::error::ErrorKind::Tag)))); -//! ``` -//! -//! The **`opt`** combinator makes a parser optional. If the child parser returns -//! an error, **`opt`** will still succeed and return None: -//! -//! ```rust -//! use nom::{IResult, combinator::opt, bytes::complete::tag}; -//! fn abcd_opt(i: &[u8]) -> IResult<&[u8], Option<&[u8]>> { -//! opt(tag("abcd"))(i) -//! } -//! -//! assert_eq!(abcd_opt(&b"abcdxxx"[..]), Ok((&b"xxx"[..], Some(&b"abcd"[..])))); -//! assert_eq!(abcd_opt(&b"efghxxx"[..]), Ok((&b"efghxxx"[..], None))); -//! ``` -//! -//! **`many0`** applies a parser 0 or more times, and returns a vector of the aggregated results: -//! -//! ```rust -//! # #[cfg(feature = "alloc")] -//! # fn main() { -//! use nom::{IResult, multi::many0, bytes::complete::tag}; -//! use std::str; -//! -//! fn multi(i: &str) -> IResult<&str, Vec<&str>> { -//! many0(tag("abcd"))(i) -//! } -//! -//! let a = "abcdef"; -//! let b = "abcdabcdef"; -//! let c = "azerty"; -//! assert_eq!(multi(a), Ok(("ef", vec!["abcd"]))); -//! assert_eq!(multi(b), Ok(("ef", vec!["abcd", "abcd"]))); -//! assert_eq!(multi(c), Ok(("azerty", Vec::new()))); -//! # } -//! # #[cfg(not(feature = "alloc"))] -//! # fn main() {} -//! ``` -//! -//! Here are some basic combinators available: -//! -//! - **`opt`**: Will make the parser optional (if it returns the `O` type, the new parser returns `Option`) -//! - **`many0`**: Will apply the parser 0 or more times (if it returns the `O` type, the new parser returns `Vec`) -//! - **`many1`**: Will apply the parser 1 or more times -//! -//! There are more complex (and more useful) parsers like `tuple`, which is -//! used to apply a series of parsers then assemble their results. -//! -//! Example with `tuple`: -//! -//! ```rust -//! # fn main() { -//! use nom::{error::ErrorKind, Needed, -//! number::streaming::be_u16, -//! bytes::streaming::{tag, take}, -//! sequence::tuple}; -//! -//! let mut tpl = tuple((be_u16, take(3u8), tag("fg"))); -//! -//! assert_eq!( -//! tpl(&b"abcdefgh"[..]), -//! Ok(( -//! &b"h"[..], -//! (0x6162u16, &b"cde"[..], &b"fg"[..]) -//! )) -//! ); -//! assert_eq!(tpl(&b"abcde"[..]), Err(nom::Err::Incomplete(Needed::new(2)))); -//! let input = &b"abcdejk"[..]; -//! assert_eq!(tpl(input), Err(nom::Err::Error((&input[5..], ErrorKind::Tag)))); -//! # } -//! ``` -//! -//! But you can also use a sequence of combinators written in imperative style, -//! thanks to the `?` operator: -//! -//! ```rust -//! # fn main() { -//! use nom::{IResult, bytes::complete::tag}; -//! -//! #[derive(Debug, PartialEq)] -//! struct A { -//! a: u8, -//! b: u8 -//! } -//! -//! fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Ok((i,1)) } -//! fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Ok((i,2)) } -//! -//! fn f(i: &[u8]) -> IResult<&[u8], A> { -//! // if successful, the parser returns `Ok((remaining_input, output_value))` that we can destructure -//! let (i, _) = tag("abcd")(i)?; -//! let (i, a) = ret_int1(i)?; -//! let (i, _) = tag("efgh")(i)?; -//! let (i, b) = ret_int2(i)?; -//! -//! Ok((i, A { a, b })) -//! } -//! -//! let r = f(b"abcdefghX"); -//! assert_eq!(r, Ok((&b"X"[..], A{a: 1, b: 2}))); -//! # } -//! ``` -//! -//! ## Streaming / Complete -//! -//! Some of nom's modules have `streaming` or `complete` submodules. They hold -//! different variants of the same combinators. -//! -//! A streaming parser assumes that we might not have all of the input data. -//! This can happen with some network protocol or large file parsers, where the -//! input buffer can be full and need to be resized or refilled. -//! -//! A complete parser assumes that we already have all of the input data. -//! This will be the common case with small files that can be read entirely to -//! memory. -//! -//! Here is how it works in practice: -//! -//! ```rust -//! use nom::{IResult, Err, Needed, error::{Error, ErrorKind}, bytes, character}; -//! -//! fn take_streaming(i: &[u8]) -> IResult<&[u8], &[u8]> { -//! bytes::streaming::take(4u8)(i) -//! } -//! -//! fn take_complete(i: &[u8]) -> IResult<&[u8], &[u8]> { -//! bytes::complete::take(4u8)(i) -//! } -//! -//! // both parsers will take 4 bytes as expected -//! assert_eq!(take_streaming(&b"abcde"[..]), Ok((&b"e"[..], &b"abcd"[..]))); -//! assert_eq!(take_complete(&b"abcde"[..]), Ok((&b"e"[..], &b"abcd"[..]))); -//! -//! // if the input is smaller than 4 bytes, the streaming parser -//! // will return `Incomplete` to indicate that we need more data -//! assert_eq!(take_streaming(&b"abc"[..]), Err(Err::Incomplete(Needed::new(1)))); -//! -//! // but the complete parser will return an error -//! assert_eq!(take_complete(&b"abc"[..]), Err(Err::Error(Error::new(&b"abc"[..], ErrorKind::Eof)))); -//! -//! // the alpha0 function recognizes 0 or more alphabetic characters -//! fn alpha0_streaming(i: &str) -> IResult<&str, &str> { -//! character::streaming::alpha0(i) -//! } -//! -//! fn alpha0_complete(i: &str) -> IResult<&str, &str> { -//! character::complete::alpha0(i) -//! } -//! -//! // if there's a clear limit to the recognized characters, both parsers work the same way -//! assert_eq!(alpha0_streaming("abcd;"), Ok((";", "abcd"))); -//! assert_eq!(alpha0_complete("abcd;"), Ok((";", "abcd"))); -//! -//! // but when there's no limit, the streaming version returns `Incomplete`, because it cannot -//! // know if more input data should be recognized. The whole input could be "abcd;", or -//! // "abcde;" -//! assert_eq!(alpha0_streaming("abcd"), Err(Err::Incomplete(Needed::new(1)))); -//! -//! // while the complete version knows that all of the data is there -//! assert_eq!(alpha0_complete("abcd"), Ok(("", "abcd"))); -//! ``` -//! **Going further:** Read the [guides](https://github.com/Geal/nom/tree/main/doc), -//! check out the [recipes]! -#![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(feature = "cargo-clippy", allow(clippy::doc_markdown))] -#![cfg_attr(feature = "docsrs", feature(doc_cfg))] -#![cfg_attr(feature = "docsrs", feature(extended_key_value_attributes))] -#![deny(missing_docs)] -#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] -#[cfg(feature = "alloc")] -#[macro_use] -extern crate alloc; -#[cfg(doctest)] -extern crate doc_comment; - -#[cfg(doctest)] -doc_comment::doctest!("../README.md"); - -/// Lib module to re-export everything needed from `std` or `core`/`alloc`. This is how `serde` does -/// it, albeit there it is not public. -#[cfg_attr(nightly, allow(rustdoc::missing_doc_code_examples))] -pub mod lib { - /// `std` facade allowing `std`/`core` to be interchangeable. Reexports `alloc` crate optionally, - /// as well as `core` or `std` - #[cfg(not(feature = "std"))] - #[cfg_attr(nightly, allow(rustdoc::missing_doc_code_examples))] - /// internal std exports for no_std compatibility - pub mod std { - #[doc(hidden)] - #[cfg(not(feature = "alloc"))] - pub use core::borrow; - - #[cfg(feature = "alloc")] - #[doc(hidden)] - pub use alloc::{borrow, boxed, string, vec}; - - #[doc(hidden)] - pub use core::{cmp, convert, fmt, iter, mem, ops, option, result, slice, str}; - - /// internal reproduction of std prelude - #[doc(hidden)] - pub mod prelude { - pub use core::prelude as v1; - } - } - - #[cfg(feature = "std")] - #[cfg_attr(nightly, allow(rustdoc::missing_doc_code_examples))] - /// internal std exports for no_std compatibility - pub mod std { - #[doc(hidden)] - pub use std::{ - alloc, borrow, boxed, cmp, collections, convert, fmt, hash, iter, mem, ops, option, result, - slice, str, string, vec, - }; - - /// internal reproduction of std prelude - #[doc(hidden)] - pub mod prelude { - pub use std::prelude as v1; - } - } -} - -pub use self::bits::*; -pub use self::internal::*; -pub use self::traits::*; - -pub use self::str::*; - -#[macro_use] -mod macros; -#[macro_use] -pub mod error; - -pub mod branch; -pub mod combinator; -mod internal; -pub mod multi; -pub mod sequence; -mod traits; - -pub mod bits; -pub mod bytes; - -pub mod character; - -mod str; - -pub mod number; - -#[cfg(feature = "docsrs")] -#[cfg_attr(feature = "docsrs", cfg_attr(feature = "docsrs", doc = include_str!("../doc/nom_recipes.md")))] -pub mod recipes {} diff --git a/vendor/nom/src/macros.rs b/vendor/nom/src/macros.rs deleted file mode 100644 index 980d2d90ed28a5..00000000000000 --- a/vendor/nom/src/macros.rs +++ /dev/null @@ -1,23 +0,0 @@ -macro_rules! succ ( - (0, $submac:ident ! ($($rest:tt)*)) => ($submac!(1, $($rest)*)); - (1, $submac:ident ! ($($rest:tt)*)) => ($submac!(2, $($rest)*)); - (2, $submac:ident ! ($($rest:tt)*)) => ($submac!(3, $($rest)*)); - (3, $submac:ident ! ($($rest:tt)*)) => ($submac!(4, $($rest)*)); - (4, $submac:ident ! ($($rest:tt)*)) => ($submac!(5, $($rest)*)); - (5, $submac:ident ! ($($rest:tt)*)) => ($submac!(6, $($rest)*)); - (6, $submac:ident ! ($($rest:tt)*)) => ($submac!(7, $($rest)*)); - (7, $submac:ident ! ($($rest:tt)*)) => ($submac!(8, $($rest)*)); - (8, $submac:ident ! ($($rest:tt)*)) => ($submac!(9, $($rest)*)); - (9, $submac:ident ! ($($rest:tt)*)) => ($submac!(10, $($rest)*)); - (10, $submac:ident ! ($($rest:tt)*)) => ($submac!(11, $($rest)*)); - (11, $submac:ident ! ($($rest:tt)*)) => ($submac!(12, $($rest)*)); - (12, $submac:ident ! ($($rest:tt)*)) => ($submac!(13, $($rest)*)); - (13, $submac:ident ! ($($rest:tt)*)) => ($submac!(14, $($rest)*)); - (14, $submac:ident ! ($($rest:tt)*)) => ($submac!(15, $($rest)*)); - (15, $submac:ident ! ($($rest:tt)*)) => ($submac!(16, $($rest)*)); - (16, $submac:ident ! ($($rest:tt)*)) => ($submac!(17, $($rest)*)); - (17, $submac:ident ! ($($rest:tt)*)) => ($submac!(18, $($rest)*)); - (18, $submac:ident ! ($($rest:tt)*)) => ($submac!(19, $($rest)*)); - (19, $submac:ident ! ($($rest:tt)*)) => ($submac!(20, $($rest)*)); - (20, $submac:ident ! ($($rest:tt)*)) => ($submac!(21, $($rest)*)); -); diff --git a/vendor/nom/src/multi/mod.rs b/vendor/nom/src/multi/mod.rs deleted file mode 100644 index 73129084e2a903..00000000000000 --- a/vendor/nom/src/multi/mod.rs +++ /dev/null @@ -1,1049 +0,0 @@ -//! Combinators applying their child parser multiple times - -#[cfg(test)] -mod tests; - -use crate::error::ErrorKind; -use crate::error::ParseError; -use crate::internal::{Err, IResult, Needed, Parser}; -#[cfg(feature = "alloc")] -use crate::lib::std::vec::Vec; -use crate::traits::{InputLength, InputTake, ToUsize}; -use core::num::NonZeroUsize; - -/// Don't pre-allocate more than 64KiB when calling `Vec::with_capacity`. -/// -/// Pre-allocating memory is a nice optimization but count fields can't -/// always be trusted. We should clamp initial capacities to some reasonable -/// amount. This reduces the risk of a bogus count value triggering a panic -/// due to an OOM error. -/// -/// This does not affect correctness. Nom will always read the full number -/// of elements regardless of the capacity cap. -#[cfg(feature = "alloc")] -const MAX_INITIAL_CAPACITY_BYTES: usize = 65536; - -/// Repeats the embedded parser, gathering the results in a `Vec`. -/// -/// This stops on [`Err::Error`] and returns the results that were accumulated. To instead chain an error up, see -/// [`cut`][crate::combinator::cut]. -/// -/// # Arguments -/// * `f` The parser to apply. -/// -/// *Note*: if the parser passed in accepts empty inputs (like `alpha0` or `digit0`), `many0` will -/// return an error, to prevent going into an infinite loop -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// use nom::multi::many0; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { -/// many0(tag("abc"))(s) -/// } -/// -/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); -/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); -/// assert_eq!(parser("123123"), Ok(("123123", vec![]))); -/// assert_eq!(parser(""), Ok(("", vec![]))); -/// ``` -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -pub fn many0(mut f: F) -> impl FnMut(I) -> IResult, E> -where - I: Clone + InputLength, - F: Parser, - E: ParseError, -{ - move |mut i: I| { - let mut acc = crate::lib::std::vec::Vec::with_capacity(4); - loop { - let len = i.input_len(); - match f.parse(i.clone()) { - Err(Err::Error(_)) => return Ok((i, acc)), - Err(e) => return Err(e), - Ok((i1, o)) => { - // infinite loop check: the parser must always consume - if i1.input_len() == len { - return Err(Err::Error(E::from_error_kind(i, ErrorKind::Many0))); - } - - i = i1; - acc.push(o); - } - } - } - } -} - -/// Runs the embedded parser, gathering the results in a `Vec`. -/// -/// This stops on [`Err::Error`] if there is at least one result, and returns the results that were accumulated. To instead chain an error up, -/// see [`cut`][crate::combinator::cut]. -/// -/// # Arguments -/// * `f` The parser to apply. -/// -/// *Note*: If the parser passed to `many1` accepts empty inputs -/// (like `alpha0` or `digit0`), `many1` will return an error, -/// to prevent going into an infinite loop. -/// -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::multi::many1; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { -/// many1(tag("abc"))(s) -/// } -/// -/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); -/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); -/// assert_eq!(parser("123123"), Err(Err::Error(Error::new("123123", ErrorKind::Tag)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Tag)))); -/// ``` -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -pub fn many1(mut f: F) -> impl FnMut(I) -> IResult, E> -where - I: Clone + InputLength, - F: Parser, - E: ParseError, -{ - move |mut i: I| match f.parse(i.clone()) { - Err(Err::Error(err)) => Err(Err::Error(E::append(i, ErrorKind::Many1, err))), - Err(e) => Err(e), - Ok((i1, o)) => { - let mut acc = crate::lib::std::vec::Vec::with_capacity(4); - acc.push(o); - i = i1; - - loop { - let len = i.input_len(); - match f.parse(i.clone()) { - Err(Err::Error(_)) => return Ok((i, acc)), - Err(e) => return Err(e), - Ok((i1, o)) => { - // infinite loop check: the parser must always consume - if i1.input_len() == len { - return Err(Err::Error(E::from_error_kind(i, ErrorKind::Many1))); - } - - i = i1; - acc.push(o); - } - } - } - } - } -} - -/// Applies the parser `f` until the parser `g` produces a result. -/// -/// Returns a tuple of the results of `f` in a `Vec` and the result of `g`. -/// -/// `f` keeps going so long as `g` produces [`Err::Error`]. To instead chain an error up, see [`cut`][crate::combinator::cut]. -/// -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::multi::many_till; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &str) -> IResult<&str, (Vec<&str>, &str)> { -/// many_till(tag("abc"), tag("end"))(s) -/// }; -/// -/// assert_eq!(parser("abcabcend"), Ok(("", (vec!["abc", "abc"], "end")))); -/// assert_eq!(parser("abc123end"), Err(Err::Error(Error::new("123end", ErrorKind::Tag)))); -/// assert_eq!(parser("123123end"), Err(Err::Error(Error::new("123123end", ErrorKind::Tag)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Tag)))); -/// assert_eq!(parser("abcendefg"), Ok(("efg", (vec!["abc"], "end")))); -/// ``` -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -pub fn many_till( - mut f: F, - mut g: G, -) -> impl FnMut(I) -> IResult, P), E> -where - I: Clone + InputLength, - F: Parser, - G: Parser, - E: ParseError, -{ - move |mut i: I| { - let mut res = crate::lib::std::vec::Vec::new(); - loop { - let len = i.input_len(); - match g.parse(i.clone()) { - Ok((i1, o)) => return Ok((i1, (res, o))), - Err(Err::Error(_)) => { - match f.parse(i.clone()) { - Err(Err::Error(err)) => return Err(Err::Error(E::append(i, ErrorKind::ManyTill, err))), - Err(e) => return Err(e), - Ok((i1, o)) => { - // infinite loop check: the parser must always consume - if i1.input_len() == len { - return Err(Err::Error(E::from_error_kind(i1, ErrorKind::ManyTill))); - } - - res.push(o); - i = i1; - } - } - } - Err(e) => return Err(e), - } - } - } -} - -/// Alternates between two parsers to produce a list of elements. -/// -/// This stops when either parser returns [`Err::Error`] and returns the results that were accumulated. To instead chain an error up, see -/// [`cut`][crate::combinator::cut]. -/// -/// # Arguments -/// * `sep` Parses the separator between list elements. -/// * `f` Parses the elements of the list. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// use nom::multi::separated_list0; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { -/// separated_list0(tag("|"), tag("abc"))(s) -/// } -/// -/// assert_eq!(parser("abc|abc|abc"), Ok(("", vec!["abc", "abc", "abc"]))); -/// assert_eq!(parser("abc123abc"), Ok(("123abc", vec!["abc"]))); -/// assert_eq!(parser("abc|def"), Ok(("|def", vec!["abc"]))); -/// assert_eq!(parser(""), Ok(("", vec![]))); -/// assert_eq!(parser("def|abc"), Ok(("def|abc", vec![]))); -/// ``` -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -pub fn separated_list0( - mut sep: G, - mut f: F, -) -> impl FnMut(I) -> IResult, E> -where - I: Clone + InputLength, - F: Parser, - G: Parser, - E: ParseError, -{ - move |mut i: I| { - let mut res = Vec::new(); - - match f.parse(i.clone()) { - Err(Err::Error(_)) => return Ok((i, res)), - Err(e) => return Err(e), - Ok((i1, o)) => { - res.push(o); - i = i1; - } - } - - loop { - let len = i.input_len(); - match sep.parse(i.clone()) { - Err(Err::Error(_)) => return Ok((i, res)), - Err(e) => return Err(e), - Ok((i1, _)) => { - // infinite loop check: the parser must always consume - if i1.input_len() == len { - return Err(Err::Error(E::from_error_kind(i1, ErrorKind::SeparatedList))); - } - - match f.parse(i1.clone()) { - Err(Err::Error(_)) => return Ok((i, res)), - Err(e) => return Err(e), - Ok((i2, o)) => { - res.push(o); - i = i2; - } - } - } - } - } - } -} - -/// Alternates between two parsers to produce a list of elements until [`Err::Error`]. -/// -/// Fails if the element parser does not produce at least one element.$ -/// -/// This stops when either parser returns [`Err::Error`] and returns the results that were accumulated. To instead chain an error up, see -/// [`cut`][crate::combinator::cut]. -/// -/// # Arguments -/// * `sep` Parses the separator between list elements. -/// * `f` Parses the elements of the list. -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::multi::separated_list1; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { -/// separated_list1(tag("|"), tag("abc"))(s) -/// } -/// -/// assert_eq!(parser("abc|abc|abc"), Ok(("", vec!["abc", "abc", "abc"]))); -/// assert_eq!(parser("abc123abc"), Ok(("123abc", vec!["abc"]))); -/// assert_eq!(parser("abc|def"), Ok(("|def", vec!["abc"]))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Tag)))); -/// assert_eq!(parser("def|abc"), Err(Err::Error(Error::new("def|abc", ErrorKind::Tag)))); -/// ``` -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -pub fn separated_list1( - mut sep: G, - mut f: F, -) -> impl FnMut(I) -> IResult, E> -where - I: Clone + InputLength, - F: Parser, - G: Parser, - E: ParseError, -{ - move |mut i: I| { - let mut res = Vec::new(); - - // Parse the first element - match f.parse(i.clone()) { - Err(e) => return Err(e), - Ok((i1, o)) => { - res.push(o); - i = i1; - } - } - - loop { - let len = i.input_len(); - match sep.parse(i.clone()) { - Err(Err::Error(_)) => return Ok((i, res)), - Err(e) => return Err(e), - Ok((i1, _)) => { - // infinite loop check: the parser must always consume - if i1.input_len() == len { - return Err(Err::Error(E::from_error_kind(i1, ErrorKind::SeparatedList))); - } - - match f.parse(i1.clone()) { - Err(Err::Error(_)) => return Ok((i, res)), - Err(e) => return Err(e), - Ok((i2, o)) => { - res.push(o); - i = i2; - } - } - } - } - } - } -} - -/// Repeats the embedded parser `m..=n` times -/// -/// This stops before `n` when the parser returns [`Err::Error`] and returns the results that were accumulated. To instead chain an error up, see -/// [`cut`][crate::combinator::cut]. -/// -/// # Arguments -/// * `m` The minimum number of iterations. -/// * `n` The maximum number of iterations. -/// * `f` The parser to apply. -/// -/// *Note*: If the parser passed to `many1` accepts empty inputs -/// (like `alpha0` or `digit0`), `many1` will return an error, -/// to prevent going into an infinite loop. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// use nom::multi::many_m_n; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { -/// many_m_n(0, 2, tag("abc"))(s) -/// } -/// -/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); -/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); -/// assert_eq!(parser("123123"), Ok(("123123", vec![]))); -/// assert_eq!(parser(""), Ok(("", vec![]))); -/// assert_eq!(parser("abcabcabc"), Ok(("abc", vec!["abc", "abc"]))); -/// ``` -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -pub fn many_m_n( - min: usize, - max: usize, - mut parse: F, -) -> impl FnMut(I) -> IResult, E> -where - I: Clone + InputLength, - F: Parser, - E: ParseError, -{ - move |mut input: I| { - if min > max { - return Err(Err::Failure(E::from_error_kind(input, ErrorKind::ManyMN))); - } - - let max_initial_capacity = - MAX_INITIAL_CAPACITY_BYTES / crate::lib::std::mem::size_of::().max(1); - let mut res = crate::lib::std::vec::Vec::with_capacity(min.min(max_initial_capacity)); - for count in 0..max { - let len = input.input_len(); - match parse.parse(input.clone()) { - Ok((tail, value)) => { - // infinite loop check: the parser must always consume - if tail.input_len() == len { - return Err(Err::Error(E::from_error_kind(input, ErrorKind::ManyMN))); - } - - res.push(value); - input = tail; - } - Err(Err::Error(e)) => { - if count < min { - return Err(Err::Error(E::append(input, ErrorKind::ManyMN, e))); - } else { - return Ok((input, res)); - } - } - Err(e) => { - return Err(e); - } - } - } - - Ok((input, res)) - } -} - -/// Repeats the embedded parser, counting the results -/// -/// This stops on [`Err::Error`]. To instead chain an error up, see -/// [`cut`][crate::combinator::cut]. -/// -/// # Arguments -/// * `f` The parser to apply. -/// -/// *Note*: if the parser passed in accepts empty inputs (like `alpha0` or `digit0`), `many0` will -/// return an error, to prevent going into an infinite loop -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// use nom::multi::many0_count; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &str) -> IResult<&str, usize> { -/// many0_count(tag("abc"))(s) -/// } -/// -/// assert_eq!(parser("abcabc"), Ok(("", 2))); -/// assert_eq!(parser("abc123"), Ok(("123", 1))); -/// assert_eq!(parser("123123"), Ok(("123123", 0))); -/// assert_eq!(parser(""), Ok(("", 0))); -/// ``` -pub fn many0_count(mut f: F) -> impl FnMut(I) -> IResult -where - I: Clone + InputLength, - F: Parser, - E: ParseError, -{ - move |i: I| { - let mut input = i; - let mut count = 0; - - loop { - let input_ = input.clone(); - let len = input.input_len(); - match f.parse(input_) { - Ok((i, _)) => { - // infinite loop check: the parser must always consume - if i.input_len() == len { - return Err(Err::Error(E::from_error_kind(input, ErrorKind::Many0Count))); - } - - input = i; - count += 1; - } - - Err(Err::Error(_)) => return Ok((input, count)), - - Err(e) => return Err(e), - } - } - } -} - -/// Runs the embedded parser, counting the results. -/// -/// This stops on [`Err::Error`] if there is at least one result. To instead chain an error up, -/// see [`cut`][crate::combinator::cut]. -/// -/// # Arguments -/// * `f` The parser to apply. -/// -/// *Note*: If the parser passed to `many1` accepts empty inputs -/// (like `alpha0` or `digit0`), `many1` will return an error, -/// to prevent going into an infinite loop. -/// -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::multi::many1_count; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &str) -> IResult<&str, usize> { -/// many1_count(tag("abc"))(s) -/// } -/// -/// assert_eq!(parser("abcabc"), Ok(("", 2))); -/// assert_eq!(parser("abc123"), Ok(("123", 1))); -/// assert_eq!(parser("123123"), Err(Err::Error(Error::new("123123", ErrorKind::Many1Count)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Many1Count)))); -/// ``` -pub fn many1_count(mut f: F) -> impl FnMut(I) -> IResult -where - I: Clone + InputLength, - F: Parser, - E: ParseError, -{ - move |i: I| { - let i_ = i.clone(); - match f.parse(i_) { - Err(Err::Error(_)) => Err(Err::Error(E::from_error_kind(i, ErrorKind::Many1Count))), - Err(i) => Err(i), - Ok((i1, _)) => { - let mut count = 1; - let mut input = i1; - - loop { - let len = input.input_len(); - let input_ = input.clone(); - match f.parse(input_) { - Err(Err::Error(_)) => return Ok((input, count)), - Err(e) => return Err(e), - Ok((i, _)) => { - // infinite loop check: the parser must always consume - if i.input_len() == len { - return Err(Err::Error(E::from_error_kind(i, ErrorKind::Many1Count))); - } - - count += 1; - input = i; - } - } - } - } - } - } -} - -/// Runs the embedded parser `count` times, gathering the results in a `Vec` -/// -/// # Arguments -/// * `f` The parser to apply. -/// * `count` How often to apply the parser. -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::multi::count; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { -/// count(tag("abc"), 2)(s) -/// } -/// -/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); -/// assert_eq!(parser("abc123"), Err(Err::Error(Error::new("123", ErrorKind::Tag)))); -/// assert_eq!(parser("123123"), Err(Err::Error(Error::new("123123", ErrorKind::Tag)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Tag)))); -/// assert_eq!(parser("abcabcabc"), Ok(("abc", vec!["abc", "abc"]))); -/// ``` -#[cfg(feature = "alloc")] -#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] -pub fn count(mut f: F, count: usize) -> impl FnMut(I) -> IResult, E> -where - I: Clone + PartialEq, - F: Parser, - E: ParseError, -{ - move |i: I| { - let mut input = i.clone(); - let max_initial_capacity = - MAX_INITIAL_CAPACITY_BYTES / crate::lib::std::mem::size_of::().max(1); - let mut res = crate::lib::std::vec::Vec::with_capacity(count.min(max_initial_capacity)); - - for _ in 0..count { - let input_ = input.clone(); - match f.parse(input_) { - Ok((i, o)) => { - res.push(o); - input = i; - } - Err(Err::Error(e)) => { - return Err(Err::Error(E::append(i, ErrorKind::Count, e))); - } - Err(e) => { - return Err(e); - } - } - } - - Ok((input, res)) - } -} - -/// Runs the embedded parser repeatedly, filling the given slice with results. -/// -/// This parser fails if the input runs out before the given slice is full. -/// -/// # Arguments -/// * `f` The parser to apply. -/// * `buf` The slice to fill -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::multi::fill; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &str) -> IResult<&str, [&str; 2]> { -/// let mut buf = ["", ""]; -/// let (rest, ()) = fill(tag("abc"), &mut buf)(s)?; -/// Ok((rest, buf)) -/// } -/// -/// assert_eq!(parser("abcabc"), Ok(("", ["abc", "abc"]))); -/// assert_eq!(parser("abc123"), Err(Err::Error(Error::new("123", ErrorKind::Tag)))); -/// assert_eq!(parser("123123"), Err(Err::Error(Error::new("123123", ErrorKind::Tag)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Tag)))); -/// assert_eq!(parser("abcabcabc"), Ok(("abc", ["abc", "abc"]))); -/// ``` -pub fn fill<'a, I, O, E, F>(f: F, buf: &'a mut [O]) -> impl FnMut(I) -> IResult + 'a -where - I: Clone + PartialEq, - F: Fn(I) -> IResult + 'a, - E: ParseError, -{ - move |i: I| { - let mut input = i.clone(); - - for elem in buf.iter_mut() { - let input_ = input.clone(); - match f(input_) { - Ok((i, o)) => { - *elem = o; - input = i; - } - Err(Err::Error(e)) => { - return Err(Err::Error(E::append(i, ErrorKind::Count, e))); - } - Err(e) => { - return Err(e); - } - } - } - - Ok((input, ())) - } -} - -/// Repeats the embedded parser, calling `g` to gather the results. -/// -/// This stops on [`Err::Error`]. To instead chain an error up, see -/// [`cut`][crate::combinator::cut]. -/// -/// # Arguments -/// * `f` The parser to apply. -/// * `init` A function returning the initial value. -/// * `g` The function that combines a result of `f` with -/// the current accumulator. -/// -/// *Note*: if the parser passed in accepts empty inputs (like `alpha0` or `digit0`), `many0` will -/// return an error, to prevent going into an infinite loop -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// use nom::multi::fold_many0; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { -/// fold_many0( -/// tag("abc"), -/// Vec::new, -/// |mut acc: Vec<_>, item| { -/// acc.push(item); -/// acc -/// } -/// )(s) -/// } -/// -/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); -/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); -/// assert_eq!(parser("123123"), Ok(("123123", vec![]))); -/// assert_eq!(parser(""), Ok(("", vec![]))); -/// ``` -pub fn fold_many0( - mut f: F, - mut init: H, - mut g: G, -) -> impl FnMut(I) -> IResult -where - I: Clone + InputLength, - F: Parser, - G: FnMut(R, O) -> R, - H: FnMut() -> R, - E: ParseError, -{ - move |i: I| { - let mut res = init(); - let mut input = i; - - loop { - let i_ = input.clone(); - let len = input.input_len(); - match f.parse(i_) { - Ok((i, o)) => { - // infinite loop check: the parser must always consume - if i.input_len() == len { - return Err(Err::Error(E::from_error_kind(input, ErrorKind::Many0))); - } - - res = g(res, o); - input = i; - } - Err(Err::Error(_)) => { - return Ok((input, res)); - } - Err(e) => { - return Err(e); - } - } - } - } -} - -/// Repeats the embedded parser, calling `g` to gather the results. -/// -/// This stops on [`Err::Error`] if there is at least one result. To instead chain an error up, -/// see [`cut`][crate::combinator::cut]. -/// -/// # Arguments -/// * `f` The parser to apply. -/// * `init` A function returning the initial value. -/// * `g` The function that combines a result of `f` with -/// the current accumulator. -/// -/// *Note*: If the parser passed to `many1` accepts empty inputs -/// (like `alpha0` or `digit0`), `many1` will return an error, -/// to prevent going into an infinite loop. -/// -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::multi::fold_many1; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { -/// fold_many1( -/// tag("abc"), -/// Vec::new, -/// |mut acc: Vec<_>, item| { -/// acc.push(item); -/// acc -/// } -/// )(s) -/// } -/// -/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); -/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); -/// assert_eq!(parser("123123"), Err(Err::Error(Error::new("123123", ErrorKind::Many1)))); -/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Many1)))); -/// ``` -pub fn fold_many1( - mut f: F, - mut init: H, - mut g: G, -) -> impl FnMut(I) -> IResult -where - I: Clone + InputLength, - F: Parser, - G: FnMut(R, O) -> R, - H: FnMut() -> R, - E: ParseError, -{ - move |i: I| { - let _i = i.clone(); - let init = init(); - match f.parse(_i) { - Err(Err::Error(_)) => Err(Err::Error(E::from_error_kind(i, ErrorKind::Many1))), - Err(e) => Err(e), - Ok((i1, o1)) => { - let mut acc = g(init, o1); - let mut input = i1; - - loop { - let _input = input.clone(); - let len = input.input_len(); - match f.parse(_input) { - Err(Err::Error(_)) => { - break; - } - Err(e) => return Err(e), - Ok((i, o)) => { - // infinite loop check: the parser must always consume - if i.input_len() == len { - return Err(Err::Failure(E::from_error_kind(i, ErrorKind::Many1))); - } - - acc = g(acc, o); - input = i; - } - } - } - - Ok((input, acc)) - } - } - } -} - -/// Repeats the embedded parser `m..=n` times, calling `g` to gather the results -/// -/// This stops before `n` when the parser returns [`Err::Error`]. To instead chain an error up, see -/// [`cut`][crate::combinator::cut]. -/// -/// # Arguments -/// * `m` The minimum number of iterations. -/// * `n` The maximum number of iterations. -/// * `f` The parser to apply. -/// * `init` A function returning the initial value. -/// * `g` The function that combines a result of `f` with -/// the current accumulator. -/// -/// *Note*: If the parser passed to `many1` accepts empty inputs -/// (like `alpha0` or `digit0`), `many1` will return an error, -/// to prevent going into an infinite loop. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// use nom::multi::fold_many_m_n; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { -/// fold_many_m_n( -/// 0, -/// 2, -/// tag("abc"), -/// Vec::new, -/// |mut acc: Vec<_>, item| { -/// acc.push(item); -/// acc -/// } -/// )(s) -/// } -/// -/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); -/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); -/// assert_eq!(parser("123123"), Ok(("123123", vec![]))); -/// assert_eq!(parser(""), Ok(("", vec![]))); -/// assert_eq!(parser("abcabcabc"), Ok(("abc", vec!["abc", "abc"]))); -/// ``` -pub fn fold_many_m_n( - min: usize, - max: usize, - mut parse: F, - mut init: H, - mut fold: G, -) -> impl FnMut(I) -> IResult -where - I: Clone + InputLength, - F: Parser, - G: FnMut(R, O) -> R, - H: FnMut() -> R, - E: ParseError, -{ - move |mut input: I| { - if min > max { - return Err(Err::Failure(E::from_error_kind(input, ErrorKind::ManyMN))); - } - - let mut acc = init(); - for count in 0..max { - let len = input.input_len(); - match parse.parse(input.clone()) { - Ok((tail, value)) => { - // infinite loop check: the parser must always consume - if tail.input_len() == len { - return Err(Err::Error(E::from_error_kind(tail, ErrorKind::ManyMN))); - } - - acc = fold(acc, value); - input = tail; - } - //FInputXMError: handle failure properly - Err(Err::Error(err)) => { - if count < min { - return Err(Err::Error(E::append(input, ErrorKind::ManyMN, err))); - } else { - break; - } - } - Err(e) => return Err(e), - } - } - - Ok((input, acc)) - } -} - -/// Gets a number from the parser and returns a -/// subslice of the input of that size. -/// If the parser returns `Incomplete`, -/// `length_data` will return an error. -/// # Arguments -/// * `f` The parser to apply. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed, IResult}; -/// use nom::number::complete::be_u16; -/// use nom::multi::length_data; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &[u8]) -> IResult<&[u8], &[u8]> { -/// length_data(be_u16)(s) -/// } -/// -/// assert_eq!(parser(b"\x00\x03abcefg"), Ok((&b"efg"[..], &b"abc"[..]))); -/// assert_eq!(parser(b"\x00\x03a"), Err(Err::Incomplete(Needed::new(2)))); -/// ``` -pub fn length_data(mut f: F) -> impl FnMut(I) -> IResult -where - I: InputLength + InputTake, - N: ToUsize, - F: Parser, - E: ParseError, -{ - move |i: I| { - let (i, length) = f.parse(i)?; - - let length: usize = length.to_usize(); - - if let Some(needed) = length - .checked_sub(i.input_len()) - .and_then(NonZeroUsize::new) - { - Err(Err::Incomplete(Needed::Size(needed))) - } else { - Ok(i.take_split(length)) - } - } -} - -/// Gets a number from the first parser, -/// takes a subslice of the input of that size, -/// then applies the second parser on that subslice. -/// If the second parser returns `Incomplete`, -/// `length_value` will return an error. -/// # Arguments -/// * `f` The parser to apply. -/// * `g` The parser to apply on the subslice. -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::number::complete::be_u16; -/// use nom::multi::length_value; -/// use nom::bytes::complete::tag; -/// -/// fn parser(s: &[u8]) -> IResult<&[u8], &[u8]> { -/// length_value(be_u16, tag("abc"))(s) -/// } -/// -/// assert_eq!(parser(b"\x00\x03abcefg"), Ok((&b"efg"[..], &b"abc"[..]))); -/// assert_eq!(parser(b"\x00\x03123123"), Err(Err::Error(Error::new(&b"123"[..], ErrorKind::Tag)))); -/// assert_eq!(parser(b"\x00\x03a"), Err(Err::Incomplete(Needed::new(2)))); -/// ``` -pub fn length_value(mut f: F, mut g: G) -> impl FnMut(I) -> IResult -where - I: Clone + InputLength + InputTake, - N: ToUsize, - F: Parser, - G: Parser, - E: ParseError, -{ - move |i: I| { - let (i, length) = f.parse(i)?; - - let length: usize = length.to_usize(); - - if let Some(needed) = length - .checked_sub(i.input_len()) - .and_then(NonZeroUsize::new) - { - Err(Err::Incomplete(Needed::Size(needed))) - } else { - let (rest, i) = i.take_split(length); - match g.parse(i.clone()) { - Err(Err::Incomplete(_)) => Err(Err::Error(E::from_error_kind(i, ErrorKind::Complete))), - Err(e) => Err(e), - Ok((_, o)) => Ok((rest, o)), - } - } - } -} - -/// Gets a number from the first parser, -/// then applies the second parser that many times. -/// # Arguments -/// * `f` The parser to apply to obtain the count. -/// * `g` The parser to apply repeatedly. -/// ```rust -/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; -/// use nom::number::complete::u8; -/// use nom::multi::length_count; -/// use nom::bytes::complete::tag; -/// use nom::combinator::map; -/// -/// fn parser(s: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { -/// length_count(map(u8, |i| { -/// println!("got number: {}", i); -/// i -/// }), tag("abc"))(s) -/// } -/// -/// assert_eq!(parser(&b"\x02abcabcabc"[..]), Ok(((&b"abc"[..], vec![&b"abc"[..], &b"abc"[..]])))); -/// assert_eq!(parser(b"\x03123123123"), Err(Err::Error(Error::new(&b"123123123"[..], ErrorKind::Tag)))); -/// ``` -#[cfg(feature = "alloc")] -pub fn length_count(mut f: F, mut g: G) -> impl FnMut(I) -> IResult, E> -where - I: Clone, - N: ToUsize, - F: Parser, - G: Parser, - E: ParseError, -{ - move |i: I| { - let (i, count) = f.parse(i)?; - let mut input = i.clone(); - let mut res = Vec::new(); - - for _ in 0..count.to_usize() { - let input_ = input.clone(); - match g.parse(input_) { - Ok((i, o)) => { - res.push(o); - input = i; - } - Err(Err::Error(e)) => { - return Err(Err::Error(E::append(i, ErrorKind::Count, e))); - } - Err(e) => { - return Err(e); - } - } - } - - Ok((input, res)) - } -} diff --git a/vendor/nom/src/multi/tests.rs b/vendor/nom/src/multi/tests.rs deleted file mode 100644 index 96a65181764e7f..00000000000000 --- a/vendor/nom/src/multi/tests.rs +++ /dev/null @@ -1,534 +0,0 @@ -use super::{length_data, length_value, many0_count, many1_count}; -use crate::{ - bytes::streaming::tag, - character::streaming::digit1 as digit, - error::{ErrorKind, ParseError}, - internal::{Err, IResult, Needed}, - lib::std::str::{self, FromStr}, - number::streaming::{be_u16, be_u8}, - sequence::{pair, tuple}, -}; -#[cfg(feature = "alloc")] -use crate::{ - lib::std::vec::Vec, - multi::{ - count, fold_many0, fold_many1, fold_many_m_n, length_count, many0, many1, many_m_n, many_till, - separated_list0, separated_list1, - }, -}; - -#[test] -#[cfg(feature = "alloc")] -fn separated_list0_test() { - fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - separated_list0(tag(","), tag("abcd"))(i) - } - fn multi_empty(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - separated_list0(tag(","), tag(""))(i) - } - fn empty_sep(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - separated_list0(tag(""), tag("abc"))(i) - } - fn multi_longsep(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - separated_list0(tag(".."), tag("abcd"))(i) - } - - let a = &b"abcdef"[..]; - let b = &b"abcd,abcdef"[..]; - let c = &b"azerty"[..]; - let d = &b",,abc"[..]; - let e = &b"abcd,abcd,ef"[..]; - let f = &b"abc"[..]; - let g = &b"abcd."[..]; - let h = &b"abcd,abc"[..]; - let i = &b"abcabc"[..]; - - let res1 = vec![&b"abcd"[..]]; - assert_eq!(multi(a), Ok((&b"ef"[..], res1))); - let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; - assert_eq!(multi(b), Ok((&b"ef"[..], res2))); - assert_eq!(multi(c), Ok((&b"azerty"[..], Vec::new()))); - let res3 = vec![&b""[..], &b""[..], &b""[..]]; - assert_eq!(multi_empty(d), Ok((&b"abc"[..], res3))); - let i_err_pos = &i[3..]; - assert_eq!( - empty_sep(i), - Err(Err::Error(error_position!( - i_err_pos, - ErrorKind::SeparatedList - ))) - ); - let res4 = vec![&b"abcd"[..], &b"abcd"[..]]; - assert_eq!(multi(e), Ok((&b",ef"[..], res4))); - - assert_eq!(multi(f), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(multi_longsep(g), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(multi(h), Err(Err::Incomplete(Needed::new(1)))); -} - -#[test] -#[cfg(feature = "alloc")] -fn separated_list1_test() { - fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - separated_list1(tag(","), tag("abcd"))(i) - } - fn multi_longsep(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - separated_list1(tag(".."), tag("abcd"))(i) - } - - let a = &b"abcdef"[..]; - let b = &b"abcd,abcdef"[..]; - let c = &b"azerty"[..]; - let d = &b"abcd,abcd,ef"[..]; - - let f = &b"abc"[..]; - let g = &b"abcd."[..]; - let h = &b"abcd,abc"[..]; - - let res1 = vec![&b"abcd"[..]]; - assert_eq!(multi(a), Ok((&b"ef"[..], res1))); - let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; - assert_eq!(multi(b), Ok((&b"ef"[..], res2))); - assert_eq!( - multi(c), - Err(Err::Error(error_position!(c, ErrorKind::Tag))) - ); - let res3 = vec![&b"abcd"[..], &b"abcd"[..]]; - assert_eq!(multi(d), Ok((&b",ef"[..], res3))); - - assert_eq!(multi(f), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(multi_longsep(g), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(multi(h), Err(Err::Incomplete(Needed::new(1)))); -} - -#[test] -#[cfg(feature = "alloc")] -fn many0_test() { - fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - many0(tag("abcd"))(i) - } - fn multi_empty(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - many0(tag(""))(i) - } - - assert_eq!(multi(&b"abcdef"[..]), Ok((&b"ef"[..], vec![&b"abcd"[..]]))); - assert_eq!( - multi(&b"abcdabcdefgh"[..]), - Ok((&b"efgh"[..], vec![&b"abcd"[..], &b"abcd"[..]])) - ); - assert_eq!(multi(&b"azerty"[..]), Ok((&b"azerty"[..], Vec::new()))); - assert_eq!(multi(&b"abcdab"[..]), Err(Err::Incomplete(Needed::new(2)))); - assert_eq!(multi(&b"abcd"[..]), Err(Err::Incomplete(Needed::new(4)))); - assert_eq!(multi(&b""[..]), Err(Err::Incomplete(Needed::new(4)))); - assert_eq!( - multi_empty(&b"abcdef"[..]), - Err(Err::Error(error_position!( - &b"abcdef"[..], - ErrorKind::Many0 - ))) - ); -} - -#[test] -#[cfg(feature = "alloc")] -fn many1_test() { - fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - many1(tag("abcd"))(i) - } - - let a = &b"abcdef"[..]; - let b = &b"abcdabcdefgh"[..]; - let c = &b"azerty"[..]; - let d = &b"abcdab"[..]; - - let res1 = vec![&b"abcd"[..]]; - assert_eq!(multi(a), Ok((&b"ef"[..], res1))); - let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; - assert_eq!(multi(b), Ok((&b"efgh"[..], res2))); - assert_eq!( - multi(c), - Err(Err::Error(error_position!(c, ErrorKind::Tag))) - ); - assert_eq!(multi(d), Err(Err::Incomplete(Needed::new(2)))); -} - -#[test] -#[cfg(feature = "alloc")] -fn many_till_test() { - fn multi(i: &[u8]) -> IResult<&[u8], (Vec<&[u8]>, &[u8])> { - many_till(tag("abcd"), tag("efgh"))(i) - } - - let a = b"abcdabcdefghabcd"; - let b = b"efghabcd"; - let c = b"azerty"; - - let res_a = (vec![&b"abcd"[..], &b"abcd"[..]], &b"efgh"[..]); - let res_b: (Vec<&[u8]>, &[u8]) = (Vec::new(), &b"efgh"[..]); - assert_eq!(multi(&a[..]), Ok((&b"abcd"[..], res_a))); - assert_eq!(multi(&b[..]), Ok((&b"abcd"[..], res_b))); - assert_eq!( - multi(&c[..]), - Err(Err::Error(error_node_position!( - &c[..], - ErrorKind::ManyTill, - error_position!(&c[..], ErrorKind::Tag) - ))) - ); -} - -#[test] -#[cfg(feature = "std")] -fn infinite_many() { - fn tst(input: &[u8]) -> IResult<&[u8], &[u8]> { - println!("input: {:?}", input); - Err(Err::Error(error_position!(input, ErrorKind::Tag))) - } - - // should not go into an infinite loop - fn multi0(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - many0(tst)(i) - } - let a = &b"abcdef"[..]; - assert_eq!(multi0(a), Ok((a, Vec::new()))); - - fn multi1(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - many1(tst)(i) - } - let a = &b"abcdef"[..]; - assert_eq!( - multi1(a), - Err(Err::Error(error_position!(a, ErrorKind::Tag))) - ); -} - -#[test] -#[cfg(feature = "alloc")] -fn many_m_n_test() { - fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - many_m_n(2, 4, tag("Abcd"))(i) - } - - let a = &b"Abcdef"[..]; - let b = &b"AbcdAbcdefgh"[..]; - let c = &b"AbcdAbcdAbcdAbcdefgh"[..]; - let d = &b"AbcdAbcdAbcdAbcdAbcdefgh"[..]; - let e = &b"AbcdAb"[..]; - - assert_eq!( - multi(a), - Err(Err::Error(error_position!(&b"ef"[..], ErrorKind::Tag))) - ); - let res1 = vec![&b"Abcd"[..], &b"Abcd"[..]]; - assert_eq!(multi(b), Ok((&b"efgh"[..], res1))); - let res2 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; - assert_eq!(multi(c), Ok((&b"efgh"[..], res2))); - let res3 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; - assert_eq!(multi(d), Ok((&b"Abcdefgh"[..], res3))); - assert_eq!(multi(e), Err(Err::Incomplete(Needed::new(2)))); -} - -#[test] -#[cfg(feature = "alloc")] -fn count_test() { - const TIMES: usize = 2; - fn cnt_2(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - count(tag("abc"), TIMES)(i) - } - - assert_eq!( - cnt_2(&b"abcabcabcdef"[..]), - Ok((&b"abcdef"[..], vec![&b"abc"[..], &b"abc"[..]])) - ); - assert_eq!(cnt_2(&b"ab"[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(cnt_2(&b"abcab"[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!( - cnt_2(&b"xxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) - ); - assert_eq!( - cnt_2(&b"xxxabcabcdef"[..]), - Err(Err::Error(error_position!( - &b"xxxabcabcdef"[..], - ErrorKind::Tag - ))) - ); - assert_eq!( - cnt_2(&b"abcxxxabcdef"[..]), - Err(Err::Error(error_position!( - &b"xxxabcdef"[..], - ErrorKind::Tag - ))) - ); -} - -#[test] -#[cfg(feature = "alloc")] -fn count_zero() { - const TIMES: usize = 0; - fn counter_2(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - count(tag("abc"), TIMES)(i) - } - - let done = &b"abcabcabcdef"[..]; - let parsed_done = Vec::new(); - let rest = done; - let incomplete_1 = &b"ab"[..]; - let parsed_incompl_1 = Vec::new(); - let incomplete_2 = &b"abcab"[..]; - let parsed_incompl_2 = Vec::new(); - let error = &b"xxx"[..]; - let error_remain = &b"xxx"[..]; - let parsed_err = Vec::new(); - let error_1 = &b"xxxabcabcdef"[..]; - let parsed_err_1 = Vec::new(); - let error_1_remain = &b"xxxabcabcdef"[..]; - let error_2 = &b"abcxxxabcdef"[..]; - let parsed_err_2 = Vec::new(); - let error_2_remain = &b"abcxxxabcdef"[..]; - - assert_eq!(counter_2(done), Ok((rest, parsed_done))); - assert_eq!( - counter_2(incomplete_1), - Ok((incomplete_1, parsed_incompl_1)) - ); - assert_eq!( - counter_2(incomplete_2), - Ok((incomplete_2, parsed_incompl_2)) - ); - assert_eq!(counter_2(error), Ok((error_remain, parsed_err))); - assert_eq!(counter_2(error_1), Ok((error_1_remain, parsed_err_1))); - assert_eq!(counter_2(error_2), Ok((error_2_remain, parsed_err_2))); -} - -#[derive(Debug, Clone, PartialEq)] -pub struct NilError; - -impl From<(I, ErrorKind)> for NilError { - fn from(_: (I, ErrorKind)) -> Self { - NilError - } -} - -impl ParseError for NilError { - fn from_error_kind(_: I, _: ErrorKind) -> NilError { - NilError - } - fn append(_: I, _: ErrorKind, _: NilError) -> NilError { - NilError - } -} - -fn number(i: &[u8]) -> IResult<&[u8], u32> { - use crate::combinator::map_res; - - map_res(map_res(digit, str::from_utf8), FromStr::from_str)(i) -} - -#[test] -#[cfg(feature = "alloc")] -fn length_count_test() { - fn cnt(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - length_count(number, tag("abc"))(i) - } - - assert_eq!( - cnt(&b"2abcabcabcdef"[..]), - Ok((&b"abcdef"[..], vec![&b"abc"[..], &b"abc"[..]])) - ); - assert_eq!(cnt(&b"2ab"[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(cnt(&b"3abcab"[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!( - cnt(&b"xxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Digit))) - ); - assert_eq!( - cnt(&b"2abcxxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) - ); -} - -#[test] -fn length_data_test() { - fn take(i: &[u8]) -> IResult<&[u8], &[u8]> { - length_data(number)(i) - } - - assert_eq!( - take(&b"6abcabcabcdef"[..]), - Ok((&b"abcdef"[..], &b"abcabc"[..])) - ); - assert_eq!(take(&b"3ab"[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!( - take(&b"xxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Digit))) - ); - assert_eq!(take(&b"2abcxxx"[..]), Ok((&b"cxxx"[..], &b"ab"[..]))); -} - -#[test] -fn length_value_test() { - fn length_value_1(i: &[u8]) -> IResult<&[u8], u16> { - length_value(be_u8, be_u16)(i) - } - fn length_value_2(i: &[u8]) -> IResult<&[u8], (u8, u8)> { - length_value(be_u8, tuple((be_u8, be_u8)))(i) - } - - let i1 = [0, 5, 6]; - assert_eq!( - length_value_1(&i1), - Err(Err::Error(error_position!(&b""[..], ErrorKind::Complete))) - ); - assert_eq!( - length_value_2(&i1), - Err(Err::Error(error_position!(&b""[..], ErrorKind::Complete))) - ); - - let i2 = [1, 5, 6, 3]; - assert_eq!( - length_value_1(&i2), - Err(Err::Error(error_position!(&i2[1..2], ErrorKind::Complete))) - ); - assert_eq!( - length_value_2(&i2), - Err(Err::Error(error_position!(&i2[1..2], ErrorKind::Complete))) - ); - - let i3 = [2, 5, 6, 3, 4, 5, 7]; - assert_eq!(length_value_1(&i3), Ok((&i3[3..], 1286))); - assert_eq!(length_value_2(&i3), Ok((&i3[3..], (5, 6)))); - - let i4 = [3, 5, 6, 3, 4, 5]; - assert_eq!(length_value_1(&i4), Ok((&i4[4..], 1286))); - assert_eq!(length_value_2(&i4), Ok((&i4[4..], (5, 6)))); -} - -#[test] -#[cfg(feature = "alloc")] -fn fold_many0_test() { - fn fold_into_vec(mut acc: Vec, item: T) -> Vec { - acc.push(item); - acc - } - fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - fold_many0(tag("abcd"), Vec::new, fold_into_vec)(i) - } - fn multi_empty(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - fold_many0(tag(""), Vec::new, fold_into_vec)(i) - } - - assert_eq!(multi(&b"abcdef"[..]), Ok((&b"ef"[..], vec![&b"abcd"[..]]))); - assert_eq!( - multi(&b"abcdabcdefgh"[..]), - Ok((&b"efgh"[..], vec![&b"abcd"[..], &b"abcd"[..]])) - ); - assert_eq!(multi(&b"azerty"[..]), Ok((&b"azerty"[..], Vec::new()))); - assert_eq!(multi(&b"abcdab"[..]), Err(Err::Incomplete(Needed::new(2)))); - assert_eq!(multi(&b"abcd"[..]), Err(Err::Incomplete(Needed::new(4)))); - assert_eq!(multi(&b""[..]), Err(Err::Incomplete(Needed::new(4)))); - assert_eq!( - multi_empty(&b"abcdef"[..]), - Err(Err::Error(error_position!( - &b"abcdef"[..], - ErrorKind::Many0 - ))) - ); -} - -#[test] -#[cfg(feature = "alloc")] -fn fold_many1_test() { - fn fold_into_vec(mut acc: Vec, item: T) -> Vec { - acc.push(item); - acc - } - fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - fold_many1(tag("abcd"), Vec::new, fold_into_vec)(i) - } - - let a = &b"abcdef"[..]; - let b = &b"abcdabcdefgh"[..]; - let c = &b"azerty"[..]; - let d = &b"abcdab"[..]; - - let res1 = vec![&b"abcd"[..]]; - assert_eq!(multi(a), Ok((&b"ef"[..], res1))); - let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; - assert_eq!(multi(b), Ok((&b"efgh"[..], res2))); - assert_eq!( - multi(c), - Err(Err::Error(error_position!(c, ErrorKind::Many1))) - ); - assert_eq!(multi(d), Err(Err::Incomplete(Needed::new(2)))); -} - -#[test] -#[cfg(feature = "alloc")] -fn fold_many_m_n_test() { - fn fold_into_vec(mut acc: Vec, item: T) -> Vec { - acc.push(item); - acc - } - fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { - fold_many_m_n(2, 4, tag("Abcd"), Vec::new, fold_into_vec)(i) - } - - let a = &b"Abcdef"[..]; - let b = &b"AbcdAbcdefgh"[..]; - let c = &b"AbcdAbcdAbcdAbcdefgh"[..]; - let d = &b"AbcdAbcdAbcdAbcdAbcdefgh"[..]; - let e = &b"AbcdAb"[..]; - - assert_eq!( - multi(a), - Err(Err::Error(error_position!(&b"ef"[..], ErrorKind::Tag))) - ); - let res1 = vec![&b"Abcd"[..], &b"Abcd"[..]]; - assert_eq!(multi(b), Ok((&b"efgh"[..], res1))); - let res2 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; - assert_eq!(multi(c), Ok((&b"efgh"[..], res2))); - let res3 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; - assert_eq!(multi(d), Ok((&b"Abcdefgh"[..], res3))); - assert_eq!(multi(e), Err(Err::Incomplete(Needed::new(2)))); -} - -#[test] -fn many0_count_test() { - fn count0_nums(i: &[u8]) -> IResult<&[u8], usize> { - many0_count(pair(digit, tag(",")))(i) - } - - assert_eq!(count0_nums(&b"123,junk"[..]), Ok((&b"junk"[..], 1))); - - assert_eq!(count0_nums(&b"123,45,junk"[..]), Ok((&b"junk"[..], 2))); - - assert_eq!( - count0_nums(&b"1,2,3,4,5,6,7,8,9,0,junk"[..]), - Ok((&b"junk"[..], 10)) - ); - - assert_eq!(count0_nums(&b"hello"[..]), Ok((&b"hello"[..], 0))); -} - -#[test] -fn many1_count_test() { - fn count1_nums(i: &[u8]) -> IResult<&[u8], usize> { - many1_count(pair(digit, tag(",")))(i) - } - - assert_eq!(count1_nums(&b"123,45,junk"[..]), Ok((&b"junk"[..], 2))); - - assert_eq!( - count1_nums(&b"1,2,3,4,5,6,7,8,9,0,junk"[..]), - Ok((&b"junk"[..], 10)) - ); - - assert_eq!( - count1_nums(&b"hello"[..]), - Err(Err::Error(error_position!( - &b"hello"[..], - ErrorKind::Many1Count - ))) - ); -} diff --git a/vendor/nom/src/number/complete.rs b/vendor/nom/src/number/complete.rs deleted file mode 100644 index 98b8b3abf836c9..00000000000000 --- a/vendor/nom/src/number/complete.rs +++ /dev/null @@ -1,2126 +0,0 @@ -//! Parsers recognizing numbers, complete input version - -use crate::branch::alt; -use crate::bytes::complete::tag; -use crate::character::complete::{char, digit1, sign}; -use crate::combinator::{cut, map, opt, recognize}; -use crate::error::ParseError; -use crate::error::{make_error, ErrorKind}; -use crate::internal::*; -use crate::lib::std::ops::{Range, RangeFrom, RangeTo}; -use crate::sequence::{pair, tuple}; -use crate::traits::{ - AsBytes, AsChar, Compare, InputIter, InputLength, InputTake, InputTakeAtPosition, Offset, Slice, -}; - -/// Recognizes an unsigned 1 byte integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::be_u8; -/// -/// let parser = |s| { -/// be_u8(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); -/// assert_eq!(parser(&b""[..]), Err(Err::Error((&[][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn be_u8>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 1; - if input.input_len() < bound { - Err(Err::Error(make_error(input, ErrorKind::Eof))) - } else { - let res = input.iter_elements().next().unwrap(); - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a big endian unsigned 2 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::be_u16; -/// -/// let parser = |s| { -/// be_u16(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0003))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn be_u16>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 2; - if input.input_len() < bound { - Err(Err::Error(make_error(input, ErrorKind::Eof))) - } else { - let mut res = 0u16; - for byte in input.iter_elements().take(bound) { - res = (res << 8) + byte as u16; - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a big endian unsigned 3 byte integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::be_u24; -/// -/// let parser = |s| { -/// be_u24(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x000305))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn be_u24>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 3; - if input.input_len() < bound { - Err(Err::Error(make_error(input, ErrorKind::Eof))) - } else { - let mut res = 0u32; - for byte in input.iter_elements().take(bound) { - res = (res << 8) + byte as u32; - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a big endian unsigned 4 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::be_u32; -/// -/// let parser = |s| { -/// be_u32(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00030507))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn be_u32>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 4; - if input.input_len() < bound { - Err(Err::Error(make_error(input, ErrorKind::Eof))) - } else { - let mut res = 0u32; - for byte in input.iter_elements().take(bound) { - res = (res << 8) + byte as u32; - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a big endian unsigned 8 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::be_u64; -/// -/// let parser = |s| { -/// be_u64(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0001020304050607))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn be_u64>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 8; - if input.input_len() < bound { - Err(Err::Error(make_error(input, ErrorKind::Eof))) - } else { - let mut res = 0u64; - for byte in input.iter_elements().take(bound) { - res = (res << 8) + byte as u64; - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a big endian unsigned 16 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::be_u128; -/// -/// let parser = |s| { -/// be_u128(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00010203040506070001020304050607))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn be_u128>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 16; - if input.input_len() < bound { - Err(Err::Error(make_error(input, ErrorKind::Eof))) - } else { - let mut res = 0u128; - for byte in input.iter_elements().take(bound) { - res = (res << 8) + byte as u128; - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a signed 1 byte integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::be_i8; -/// -/// let parser = |s| { -/// be_i8(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); -/// assert_eq!(parser(&b""[..]), Err(Err::Error((&[][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn be_i8>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - be_u8.map(|x| x as i8).parse(input) -} - -/// Recognizes a big endian signed 2 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::be_i16; -/// -/// let parser = |s| { -/// be_i16(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0003))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn be_i16>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - be_u16.map(|x| x as i16).parse(input) -} - -/// Recognizes a big endian signed 3 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::be_i24; -/// -/// let parser = |s| { -/// be_i24(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x000305))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn be_i24>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - // Same as the unsigned version but we need to sign-extend manually here - be_u24 - .map(|x| { - if x & 0x80_00_00 != 0 { - (x | 0xff_00_00_00) as i32 - } else { - x as i32 - } - }) - .parse(input) -} - -/// Recognizes a big endian signed 4 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::be_i32; -/// -/// let parser = |s| { -/// be_i32(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00030507))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn be_i32>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - be_u32.map(|x| x as i32).parse(input) -} - -/// Recognizes a big endian signed 8 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::be_i64; -/// -/// let parser = |s| { -/// be_i64(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0001020304050607))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn be_i64>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - be_u64.map(|x| x as i64).parse(input) -} - -/// Recognizes a big endian signed 16 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::be_i128; -/// -/// let parser = |s| { -/// be_i128(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00010203040506070001020304050607))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn be_i128>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - be_u128.map(|x| x as i128).parse(input) -} - -/// Recognizes an unsigned 1 byte integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::le_u8; -/// -/// let parser = |s| { -/// le_u8(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); -/// assert_eq!(parser(&b""[..]), Err(Err::Error((&[][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn le_u8>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 1; - if input.input_len() < bound { - Err(Err::Error(make_error(input, ErrorKind::Eof))) - } else { - let res = input.iter_elements().next().unwrap(); - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a little endian unsigned 2 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::le_u16; -/// -/// let parser = |s| { -/// le_u16(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0300))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn le_u16>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 2; - if input.input_len() < bound { - Err(Err::Error(make_error(input, ErrorKind::Eof))) - } else { - let mut res = 0u16; - for (index, byte) in input.iter_indices().take(bound) { - res += (byte as u16) << (8 * index); - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a little endian unsigned 3 byte integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::le_u24; -/// -/// let parser = |s| { -/// le_u24(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x050300))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn le_u24>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 3; - if input.input_len() < bound { - Err(Err::Error(make_error(input, ErrorKind::Eof))) - } else { - let mut res = 0u32; - for (index, byte) in input.iter_indices().take(bound) { - res += (byte as u32) << (8 * index); - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a little endian unsigned 4 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::le_u32; -/// -/// let parser = |s| { -/// le_u32(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07050300))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn le_u32>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 4; - if input.input_len() < bound { - Err(Err::Error(make_error(input, ErrorKind::Eof))) - } else { - let mut res = 0u32; - for (index, byte) in input.iter_indices().take(bound) { - res += (byte as u32) << (8 * index); - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a little endian unsigned 8 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::le_u64; -/// -/// let parser = |s| { -/// le_u64(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0706050403020100))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn le_u64>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 8; - if input.input_len() < bound { - Err(Err::Error(make_error(input, ErrorKind::Eof))) - } else { - let mut res = 0u64; - for (index, byte) in input.iter_indices().take(bound) { - res += (byte as u64) << (8 * index); - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a little endian unsigned 16 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::le_u128; -/// -/// let parser = |s| { -/// le_u128(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07060504030201000706050403020100))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn le_u128>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 16; - if input.input_len() < bound { - Err(Err::Error(make_error(input, ErrorKind::Eof))) - } else { - let mut res = 0u128; - for (index, byte) in input.iter_indices().take(bound) { - res += (byte as u128) << (8 * index); - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a signed 1 byte integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::le_i8; -/// -/// let parser = |s| { -/// le_i8(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); -/// assert_eq!(parser(&b""[..]), Err(Err::Error((&[][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn le_i8>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - be_u8.map(|x| x as i8).parse(input) -} - -/// Recognizes a little endian signed 2 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::le_i16; -/// -/// let parser = |s| { -/// le_i16(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0300))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn le_i16>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - le_u16.map(|x| x as i16).parse(input) -} - -/// Recognizes a little endian signed 3 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::le_i24; -/// -/// let parser = |s| { -/// le_i24(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x050300))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn le_i24>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - // Same as the unsigned version but we need to sign-extend manually here - le_u24 - .map(|x| { - if x & 0x80_00_00 != 0 { - (x | 0xff_00_00_00) as i32 - } else { - x as i32 - } - }) - .parse(input) -} - -/// Recognizes a little endian signed 4 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::le_i32; -/// -/// let parser = |s| { -/// le_i32(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07050300))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn le_i32>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - le_u32.map(|x| x as i32).parse(input) -} - -/// Recognizes a little endian signed 8 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::le_i64; -/// -/// let parser = |s| { -/// le_i64(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0706050403020100))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn le_i64>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - le_u64.map(|x| x as i64).parse(input) -} - -/// Recognizes a little endian signed 16 bytes integer. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::le_i128; -/// -/// let parser = |s| { -/// le_i128(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07060504030201000706050403020100))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn le_i128>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - le_u128.map(|x| x as i128).parse(input) -} - -/// Recognizes an unsigned 1 byte integer -/// -/// Note that endianness does not apply to 1 byte numbers. -/// *complete version*: returns an error if there is not enough input data -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::u8; -/// -/// let parser = |s| { -/// u8(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); -/// assert_eq!(parser(&b""[..]), Err(Err::Error((&[][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn u8>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 1; - if input.input_len() < bound { - Err(Err::Error(make_error(input, ErrorKind::Eof))) - } else { - let res = input.iter_elements().next().unwrap(); - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes an unsigned 2 bytes integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u16 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian u16 integer. -/// *complete version*: returns an error if there is not enough input data -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::u16; -/// -/// let be_u16 = |s| { -/// u16(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_u16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0003))); -/// assert_eq!(be_u16(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// -/// let le_u16 = |s| { -/// u16(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_u16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0300))); -/// assert_eq!(le_u16(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn u16>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_u16, - crate::number::Endianness::Little => le_u16, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_u16, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_u16, - } -} - -/// Recognizes an unsigned 3 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u24 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian u24 integer. -/// *complete version*: returns an error if there is not enough input data -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::u24; -/// -/// let be_u24 = |s| { -/// u24(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_u24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x000305))); -/// assert_eq!(be_u24(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// -/// let le_u24 = |s| { -/// u24(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_u24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x050300))); -/// assert_eq!(le_u24(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn u24>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_u24, - crate::number::Endianness::Little => le_u24, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_u24, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_u24, - } -} - -/// Recognizes an unsigned 4 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u32 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian u32 integer. -/// *complete version*: returns an error if there is not enough input data -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::u32; -/// -/// let be_u32 = |s| { -/// u32(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_u32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00030507))); -/// assert_eq!(be_u32(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// -/// let le_u32 = |s| { -/// u32(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_u32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07050300))); -/// assert_eq!(le_u32(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn u32>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_u32, - crate::number::Endianness::Little => le_u32, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_u32, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_u32, - } -} - -/// Recognizes an unsigned 8 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u64 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian u64 integer. -/// *complete version*: returns an error if there is not enough input data -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::u64; -/// -/// let be_u64 = |s| { -/// u64(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_u64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0001020304050607))); -/// assert_eq!(be_u64(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// -/// let le_u64 = |s| { -/// u64(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_u64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0706050403020100))); -/// assert_eq!(le_u64(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn u64>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_u64, - crate::number::Endianness::Little => le_u64, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_u64, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_u64, - } -} - -/// Recognizes an unsigned 16 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u128 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian u128 integer. -/// *complete version*: returns an error if there is not enough input data -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::u128; -/// -/// let be_u128 = |s| { -/// u128(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_u128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00010203040506070001020304050607))); -/// assert_eq!(be_u128(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// -/// let le_u128 = |s| { -/// u128(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_u128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07060504030201000706050403020100))); -/// assert_eq!(le_u128(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn u128>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_u128, - crate::number::Endianness::Little => le_u128, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_u128, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_u128, - } -} - -/// Recognizes a signed 1 byte integer -/// -/// Note that endianness does not apply to 1 byte numbers. -/// *complete version*: returns an error if there is not enough input data -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::i8; -/// -/// let parser = |s| { -/// i8(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); -/// assert_eq!(parser(&b""[..]), Err(Err::Error((&[][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn i8>(i: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - u8.map(|x| x as i8).parse(i) -} - -/// Recognizes a signed 2 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i16 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian i16 integer. -/// *complete version*: returns an error if there is not enough input data -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::i16; -/// -/// let be_i16 = |s| { -/// i16(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_i16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0003))); -/// assert_eq!(be_i16(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// -/// let le_i16 = |s| { -/// i16(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_i16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0300))); -/// assert_eq!(le_i16(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn i16>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_i16, - crate::number::Endianness::Little => le_i16, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_i16, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_i16, - } -} - -/// Recognizes a signed 3 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i24 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian i24 integer. -/// *complete version*: returns an error if there is not enough input data -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::i24; -/// -/// let be_i24 = |s| { -/// i24(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_i24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x000305))); -/// assert_eq!(be_i24(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// -/// let le_i24 = |s| { -/// i24(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_i24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x050300))); -/// assert_eq!(le_i24(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn i24>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_i24, - crate::number::Endianness::Little => le_i24, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_i24, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_i24, - } -} - -/// Recognizes a signed 4 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i32 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian i32 integer. -/// *complete version*: returns an error if there is not enough input data -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::i32; -/// -/// let be_i32 = |s| { -/// i32(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_i32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00030507))); -/// assert_eq!(be_i32(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// -/// let le_i32 = |s| { -/// i32(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_i32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07050300))); -/// assert_eq!(le_i32(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn i32>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_i32, - crate::number::Endianness::Little => le_i32, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_i32, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_i32, - } -} - -/// Recognizes a signed 8 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i64 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian i64 integer. -/// *complete version*: returns an error if there is not enough input data -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::i64; -/// -/// let be_i64 = |s| { -/// i64(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_i64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0001020304050607))); -/// assert_eq!(be_i64(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// -/// let le_i64 = |s| { -/// i64(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_i64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0706050403020100))); -/// assert_eq!(le_i64(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn i64>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_i64, - crate::number::Endianness::Little => le_i64, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_i64, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_i64, - } -} - -/// Recognizes a signed 16 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i128 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian i128 integer. -/// *complete version*: returns an error if there is not enough input data -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::i128; -/// -/// let be_i128 = |s| { -/// i128(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_i128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00010203040506070001020304050607))); -/// assert_eq!(be_i128(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// -/// let le_i128 = |s| { -/// i128(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_i128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07060504030201000706050403020100))); -/// assert_eq!(le_i128(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn i128>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_i128, - crate::number::Endianness::Little => le_i128, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_i128, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_i128, - } -} - -/// Recognizes a big endian 4 bytes floating point number. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::be_f32; -/// -/// let parser = |s| { -/// be_f32(s) -/// }; -/// -/// assert_eq!(parser(&[0x41, 0x48, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); -/// assert_eq!(parser(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn be_f32>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match be_u32(input) { - Err(e) => Err(e), - Ok((i, o)) => Ok((i, f32::from_bits(o))), - } -} - -/// Recognizes a big endian 8 bytes floating point number. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::be_f64; -/// -/// let parser = |s| { -/// be_f64(s) -/// }; -/// -/// assert_eq!(parser(&[0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); -/// assert_eq!(parser(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn be_f64>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match be_u64(input) { - Err(e) => Err(e), - Ok((i, o)) => Ok((i, f64::from_bits(o))), - } -} - -/// Recognizes a little endian 4 bytes floating point number. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::le_f32; -/// -/// let parser = |s| { -/// le_f32(s) -/// }; -/// -/// assert_eq!(parser(&[0x00, 0x00, 0x48, 0x41][..]), Ok((&b""[..], 12.5))); -/// assert_eq!(parser(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn le_f32>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match le_u32(input) { - Err(e) => Err(e), - Ok((i, o)) => Ok((i, f32::from_bits(o))), - } -} - -/// Recognizes a little endian 8 bytes floating point number. -/// -/// *Complete version*: Returns an error if there is not enough input data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::le_f64; -/// -/// let parser = |s| { -/// le_f64(s) -/// }; -/// -/// assert_eq!(parser(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40][..]), Ok((&b""[..], 12.5))); -/// assert_eq!(parser(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn le_f64>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match le_u64(input) { - Err(e) => Err(e), - Ok((i, o)) => Ok((i, f64::from_bits(o))), - } -} - -/// Recognizes a 4 byte floating point number -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian f32 float, -/// otherwise if `nom::number::Endianness::Little` parse a little endian f32 float. -/// *complete version*: returns an error if there is not enough input data -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::f32; -/// -/// let be_f32 = |s| { -/// f32(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_f32(&[0x41, 0x48, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); -/// assert_eq!(be_f32(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); -/// -/// let le_f32 = |s| { -/// f32(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_f32(&[0x00, 0x00, 0x48, 0x41][..]), Ok((&b""[..], 12.5))); -/// assert_eq!(le_f32(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn f32>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_f32, - crate::number::Endianness::Little => le_f32, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_f32, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_f32, - } -} - -/// Recognizes an 8 byte floating point number -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian f64 float, -/// otherwise if `nom::number::Endianness::Little` parse a little endian f64 float. -/// *complete version*: returns an error if there is not enough input data -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::f64; -/// -/// let be_f64 = |s| { -/// f64(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_f64(&[0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); -/// assert_eq!(be_f64(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); -/// -/// let le_f64 = |s| { -/// f64(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40][..]), Ok((&b""[..], 12.5))); -/// assert_eq!(le_f64(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); -/// ``` -#[inline] -pub fn f64>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_f64, - crate::number::Endianness::Little => le_f64, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_f64, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_f64, - } -} - -/// Recognizes a hex-encoded integer. -/// -/// *Complete version*: Will parse until the end of input if it has less than 8 bytes. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::hex_u32; -/// -/// let parser = |s| { -/// hex_u32(s) -/// }; -/// -/// assert_eq!(parser(&b"01AE"[..]), Ok((&b""[..], 0x01AE))); -/// assert_eq!(parser(&b"abc"[..]), Ok((&b""[..], 0x0ABC))); -/// assert_eq!(parser(&b"ggg"[..]), Err(Err::Error((&b"ggg"[..], ErrorKind::IsA)))); -/// ``` -#[inline] -pub fn hex_u32<'a, E: ParseError<&'a [u8]>>(input: &'a [u8]) -> IResult<&'a [u8], u32, E> { - let (i, o) = crate::bytes::complete::is_a(&b"0123456789abcdefABCDEF"[..])(input)?; - // Do not parse more than 8 characters for a u32 - let (parsed, remaining) = if o.len() <= 8 { - (o, i) - } else { - (&input[..8], &input[8..]) - }; - - let res = parsed - .iter() - .rev() - .enumerate() - .map(|(k, &v)| { - let digit = v as char; - digit.to_digit(16).unwrap_or(0) << (k * 4) - }) - .sum(); - - Ok((remaining, res)) -} - -/// Recognizes floating point number in a byte string and returns the corresponding slice. -/// -/// *Complete version*: Can parse until the end of input. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::recognize_float; -/// -/// let parser = |s| { -/// recognize_float(s) -/// }; -/// -/// assert_eq!(parser("11e-1"), Ok(("", "11e-1"))); -/// assert_eq!(parser("123E-02"), Ok(("", "123E-02"))); -/// assert_eq!(parser("123K-01"), Ok(("K-01", "123"))); -/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Char)))); -/// ``` -#[rustfmt::skip] -pub fn recognize_float>(input: T) -> IResult -where - T: Slice> + Slice>, - T: Clone + Offset, - T: InputIter, - ::Item: AsChar, - T: InputTakeAtPosition, - ::Item: AsChar, -{ - recognize( - tuple(( - opt(alt((char('+'), char('-')))), - alt(( - map(tuple((digit1, opt(pair(char('.'), opt(digit1))))), |_| ()), - map(tuple((char('.'), digit1)), |_| ()) - )), - opt(tuple(( - alt((char('e'), char('E'))), - opt(alt((char('+'), char('-')))), - cut(digit1) - ))) - )) - )(input) -} - -// workaround until issues with minimal-lexical are fixed -#[doc(hidden)] -pub fn recognize_float_or_exceptions>(input: T) -> IResult -where - T: Slice> + Slice>, - T: Clone + Offset, - T: InputIter + InputTake + Compare<&'static str>, - ::Item: AsChar, - T: InputTakeAtPosition, - ::Item: AsChar, -{ - alt(( - |i: T| { - recognize_float::<_, E>(i.clone()).map_err(|e| match e { - crate::Err::Error(_) => crate::Err::Error(E::from_error_kind(i, ErrorKind::Float)), - crate::Err::Failure(_) => crate::Err::Failure(E::from_error_kind(i, ErrorKind::Float)), - crate::Err::Incomplete(needed) => crate::Err::Incomplete(needed), - }) - }, - |i: T| { - crate::bytes::complete::tag_no_case::<_, _, E>("nan")(i.clone()) - .map_err(|_| crate::Err::Error(E::from_error_kind(i, ErrorKind::Float))) - }, - |i: T| { - crate::bytes::complete::tag_no_case::<_, _, E>("inf")(i.clone()) - .map_err(|_| crate::Err::Error(E::from_error_kind(i, ErrorKind::Float))) - }, - |i: T| { - crate::bytes::complete::tag_no_case::<_, _, E>("infinity")(i.clone()) - .map_err(|_| crate::Err::Error(E::from_error_kind(i, ErrorKind::Float))) - }, - ))(input) -} - -/// Recognizes a floating point number in text format -/// -/// It returns a tuple of (`sign`, `integer part`, `fraction part` and `exponent`) of the input -/// data. -/// -/// *Complete version*: Can parse until the end of input. -/// -pub fn recognize_float_parts>(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: Clone + Offset, - T: InputIter + InputTake, - ::Item: AsChar + Copy, - T: InputTakeAtPosition + InputLength, - ::Item: AsChar, - T: for<'a> Compare<&'a [u8]>, - T: AsBytes, -{ - let (i, sign) = sign(input.clone())?; - - //let (i, zeroes) = take_while(|c: ::Item| c.as_char() == '0')(i)?; - let (i, zeroes) = match i.as_bytes().iter().position(|c| *c != b'0') { - Some(index) => i.take_split(index), - None => i.take_split(i.input_len()), - }; - //let (i, mut integer) = digit0(i)?; - let (i, mut integer) = match i - .as_bytes() - .iter() - .position(|c| !(*c >= b'0' && *c <= b'9')) - { - Some(index) => i.take_split(index), - None => i.take_split(i.input_len()), - }; - - if integer.input_len() == 0 && zeroes.input_len() > 0 { - // keep the last zero if integer is empty - integer = zeroes.slice(zeroes.input_len() - 1..); - } - - let (i, opt_dot) = opt(tag(&b"."[..]))(i)?; - let (i, fraction) = if opt_dot.is_none() { - let i2 = i.clone(); - (i2, i.slice(..0)) - } else { - // match number, trim right zeroes - let mut zero_count = 0usize; - let mut position = None; - for (pos, c) in i.as_bytes().iter().enumerate() { - if *c >= b'0' && *c <= b'9' { - if *c == b'0' { - zero_count += 1; - } else { - zero_count = 0; - } - } else { - position = Some(pos); - break; - } - } - - let position = position.unwrap_or(i.input_len()); - - let index = if zero_count == 0 { - position - } else if zero_count == position { - position - zero_count + 1 - } else { - position - zero_count - }; - - (i.slice(position..), i.slice(..index)) - }; - - if integer.input_len() == 0 && fraction.input_len() == 0 { - return Err(Err::Error(E::from_error_kind(input, ErrorKind::Float))); - } - - let i2 = i.clone(); - let (i, e) = match i.as_bytes().iter().next() { - Some(b'e') => (i.slice(1..), true), - Some(b'E') => (i.slice(1..), true), - _ => (i, false), - }; - - let (i, exp) = if e { - cut(crate::character::complete::i32)(i)? - } else { - (i2, 0) - }; - - Ok((i, (sign, integer, fraction, exp))) -} - -use crate::traits::ParseTo; - -/// Recognizes floating point number in text format and returns a f32. -/// -/// *Complete version*: Can parse until the end of input. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::float; -/// -/// let parser = |s| { -/// float(s) -/// }; -/// -/// assert_eq!(parser("11e-1"), Ok(("", 1.1))); -/// assert_eq!(parser("123E-02"), Ok(("", 1.23))); -/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); -/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Float)))); -/// ``` -pub fn float>(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: Clone + Offset + ParseTo + Compare<&'static str>, - T: InputIter + InputLength + InputTake, - ::Item: AsChar + Copy, - ::IterElem: Clone, - T: InputTakeAtPosition, - ::Item: AsChar, - T: AsBytes, - T: for<'a> Compare<&'a [u8]>, -{ - /* - let (i, (sign, integer, fraction, exponent)) = recognize_float_parts(input)?; - - let mut float: f32 = minimal_lexical::parse_float( - integer.as_bytes().iter(), - fraction.as_bytes().iter(), - exponent, - ); - if !sign { - float = -float; - } - - Ok((i, float)) - */ - let (i, s) = recognize_float_or_exceptions(input)?; - match s.parse_to() { - Some(f) => Ok((i, f)), - None => Err(crate::Err::Error(E::from_error_kind( - i, - crate::error::ErrorKind::Float, - ))), - } -} - -/// Recognizes floating point number in text format and returns a f64. -/// -/// *Complete version*: Can parse until the end of input. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::double; -/// -/// let parser = |s| { -/// double(s) -/// }; -/// -/// assert_eq!(parser("11e-1"), Ok(("", 1.1))); -/// assert_eq!(parser("123E-02"), Ok(("", 1.23))); -/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); -/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Float)))); -/// ``` -pub fn double>(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: Clone + Offset + ParseTo + Compare<&'static str>, - T: InputIter + InputLength + InputTake, - ::Item: AsChar + Copy, - ::IterElem: Clone, - T: InputTakeAtPosition, - ::Item: AsChar, - T: AsBytes, - T: for<'a> Compare<&'a [u8]>, -{ - /* - let (i, (sign, integer, fraction, exponent)) = recognize_float_parts(input)?; - - let mut float: f64 = minimal_lexical::parse_float( - integer.as_bytes().iter(), - fraction.as_bytes().iter(), - exponent, - ); - if !sign { - float = -float; - } - - Ok((i, float)) - */ - let (i, s) = recognize_float_or_exceptions(input)?; - match s.parse_to() { - Some(f) => Ok((i, f)), - None => Err(crate::Err::Error(E::from_error_kind( - i, - crate::error::ErrorKind::Float, - ))), - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::error::ErrorKind; - use crate::internal::Err; - use proptest::prelude::*; - - macro_rules! assert_parse( - ($left: expr, $right: expr) => { - let res: $crate::IResult<_, _, (_, ErrorKind)> = $left; - assert_eq!(res, $right); - }; - ); - - #[test] - fn i8_tests() { - assert_parse!(i8(&[0x00][..]), Ok((&b""[..], 0))); - assert_parse!(i8(&[0x7f][..]), Ok((&b""[..], 127))); - assert_parse!(i8(&[0xff][..]), Ok((&b""[..], -1))); - assert_parse!(i8(&[0x80][..]), Ok((&b""[..], -128))); - } - - #[test] - fn be_i8_tests() { - assert_parse!(be_i8(&[0x00][..]), Ok((&b""[..], 0))); - assert_parse!(be_i8(&[0x7f][..]), Ok((&b""[..], 127))); - assert_parse!(be_i8(&[0xff][..]), Ok((&b""[..], -1))); - assert_parse!(be_i8(&[0x80][..]), Ok((&b""[..], -128))); - } - - #[test] - fn be_i16_tests() { - assert_parse!(be_i16(&[0x00, 0x00][..]), Ok((&b""[..], 0))); - assert_parse!(be_i16(&[0x7f, 0xff][..]), Ok((&b""[..], 32_767_i16))); - assert_parse!(be_i16(&[0xff, 0xff][..]), Ok((&b""[..], -1))); - assert_parse!(be_i16(&[0x80, 0x00][..]), Ok((&b""[..], -32_768_i16))); - } - - #[test] - fn be_u24_tests() { - assert_parse!(be_u24(&[0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); - assert_parse!(be_u24(&[0x00, 0xFF, 0xFF][..]), Ok((&b""[..], 65_535_u32))); - assert_parse!( - be_u24(&[0x12, 0x34, 0x56][..]), - Ok((&b""[..], 1_193_046_u32)) - ); - } - - #[test] - fn be_i24_tests() { - assert_parse!(be_i24(&[0xFF, 0xFF, 0xFF][..]), Ok((&b""[..], -1_i32))); - assert_parse!(be_i24(&[0xFF, 0x00, 0x00][..]), Ok((&b""[..], -65_536_i32))); - assert_parse!( - be_i24(&[0xED, 0xCB, 0xAA][..]), - Ok((&b""[..], -1_193_046_i32)) - ); - } - - #[test] - fn be_i32_tests() { - assert_parse!(be_i32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); - assert_parse!( - be_i32(&[0x7f, 0xff, 0xff, 0xff][..]), - Ok((&b""[..], 2_147_483_647_i32)) - ); - assert_parse!(be_i32(&[0xff, 0xff, 0xff, 0xff][..]), Ok((&b""[..], -1))); - assert_parse!( - be_i32(&[0x80, 0x00, 0x00, 0x00][..]), - Ok((&b""[..], -2_147_483_648_i32)) - ); - } - - #[test] - fn be_i64_tests() { - assert_parse!( - be_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Ok((&b""[..], 0)) - ); - assert_parse!( - be_i64(&[0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff][..]), - Ok((&b""[..], 9_223_372_036_854_775_807_i64)) - ); - assert_parse!( - be_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff][..]), - Ok((&b""[..], -1)) - ); - assert_parse!( - be_i64(&[0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Ok((&b""[..], -9_223_372_036_854_775_808_i64)) - ); - } - - #[test] - fn be_i128_tests() { - assert_parse!( - be_i128( - &[ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00 - ][..] - ), - Ok((&b""[..], 0)) - ); - assert_parse!( - be_i128( - &[ - 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff - ][..] - ), - Ok(( - &b""[..], - 170_141_183_460_469_231_731_687_303_715_884_105_727_i128 - )) - ); - assert_parse!( - be_i128( - &[ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff - ][..] - ), - Ok((&b""[..], -1)) - ); - assert_parse!( - be_i128( - &[ - 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00 - ][..] - ), - Ok(( - &b""[..], - -170_141_183_460_469_231_731_687_303_715_884_105_728_i128 - )) - ); - } - - #[test] - fn le_i8_tests() { - assert_parse!(le_i8(&[0x00][..]), Ok((&b""[..], 0))); - assert_parse!(le_i8(&[0x7f][..]), Ok((&b""[..], 127))); - assert_parse!(le_i8(&[0xff][..]), Ok((&b""[..], -1))); - assert_parse!(le_i8(&[0x80][..]), Ok((&b""[..], -128))); - } - - #[test] - fn le_i16_tests() { - assert_parse!(le_i16(&[0x00, 0x00][..]), Ok((&b""[..], 0))); - assert_parse!(le_i16(&[0xff, 0x7f][..]), Ok((&b""[..], 32_767_i16))); - assert_parse!(le_i16(&[0xff, 0xff][..]), Ok((&b""[..], -1))); - assert_parse!(le_i16(&[0x00, 0x80][..]), Ok((&b""[..], -32_768_i16))); - } - - #[test] - fn le_u24_tests() { - assert_parse!(le_u24(&[0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); - assert_parse!(le_u24(&[0xFF, 0xFF, 0x00][..]), Ok((&b""[..], 65_535_u32))); - assert_parse!( - le_u24(&[0x56, 0x34, 0x12][..]), - Ok((&b""[..], 1_193_046_u32)) - ); - } - - #[test] - fn le_i24_tests() { - assert_parse!(le_i24(&[0xFF, 0xFF, 0xFF][..]), Ok((&b""[..], -1_i32))); - assert_parse!(le_i24(&[0x00, 0x00, 0xFF][..]), Ok((&b""[..], -65_536_i32))); - assert_parse!( - le_i24(&[0xAA, 0xCB, 0xED][..]), - Ok((&b""[..], -1_193_046_i32)) - ); - } - - #[test] - fn le_i32_tests() { - assert_parse!(le_i32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); - assert_parse!( - le_i32(&[0xff, 0xff, 0xff, 0x7f][..]), - Ok((&b""[..], 2_147_483_647_i32)) - ); - assert_parse!(le_i32(&[0xff, 0xff, 0xff, 0xff][..]), Ok((&b""[..], -1))); - assert_parse!( - le_i32(&[0x00, 0x00, 0x00, 0x80][..]), - Ok((&b""[..], -2_147_483_648_i32)) - ); - } - - #[test] - fn le_i64_tests() { - assert_parse!( - le_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Ok((&b""[..], 0)) - ); - assert_parse!( - le_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f][..]), - Ok((&b""[..], 9_223_372_036_854_775_807_i64)) - ); - assert_parse!( - le_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff][..]), - Ok((&b""[..], -1)) - ); - assert_parse!( - le_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80][..]), - Ok((&b""[..], -9_223_372_036_854_775_808_i64)) - ); - } - - #[test] - fn le_i128_tests() { - assert_parse!( - le_i128( - &[ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00 - ][..] - ), - Ok((&b""[..], 0)) - ); - assert_parse!( - le_i128( - &[ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x7f - ][..] - ), - Ok(( - &b""[..], - 170_141_183_460_469_231_731_687_303_715_884_105_727_i128 - )) - ); - assert_parse!( - le_i128( - &[ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff - ][..] - ), - Ok((&b""[..], -1)) - ); - assert_parse!( - le_i128( - &[ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x80 - ][..] - ), - Ok(( - &b""[..], - -170_141_183_460_469_231_731_687_303_715_884_105_728_i128 - )) - ); - } - - #[test] - fn be_f32_tests() { - assert_parse!(be_f32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0_f32))); - assert_parse!( - be_f32(&[0x4d, 0x31, 0x1f, 0xd8][..]), - Ok((&b""[..], 185_728_392_f32)) - ); - } - - #[test] - fn be_f64_tests() { - assert_parse!( - be_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Ok((&b""[..], 0_f64)) - ); - assert_parse!( - be_f64(&[0x41, 0xa6, 0x23, 0xfb, 0x10, 0x00, 0x00, 0x00][..]), - Ok((&b""[..], 185_728_392_f64)) - ); - } - - #[test] - fn le_f32_tests() { - assert_parse!(le_f32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0_f32))); - assert_parse!( - le_f32(&[0xd8, 0x1f, 0x31, 0x4d][..]), - Ok((&b""[..], 185_728_392_f32)) - ); - } - - #[test] - fn le_f64_tests() { - assert_parse!( - le_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Ok((&b""[..], 0_f64)) - ); - assert_parse!( - le_f64(&[0x00, 0x00, 0x00, 0x10, 0xfb, 0x23, 0xa6, 0x41][..]), - Ok((&b""[..], 185_728_392_f64)) - ); - } - - #[test] - fn hex_u32_tests() { - assert_parse!( - hex_u32(&b";"[..]), - Err(Err::Error(error_position!(&b";"[..], ErrorKind::IsA))) - ); - assert_parse!(hex_u32(&b"ff;"[..]), Ok((&b";"[..], 255))); - assert_parse!(hex_u32(&b"1be2;"[..]), Ok((&b";"[..], 7138))); - assert_parse!(hex_u32(&b"c5a31be2;"[..]), Ok((&b";"[..], 3_315_801_058))); - assert_parse!(hex_u32(&b"C5A31be2;"[..]), Ok((&b";"[..], 3_315_801_058))); - assert_parse!(hex_u32(&b"00c5a31be2;"[..]), Ok((&b"e2;"[..], 12_952_347))); - assert_parse!( - hex_u32(&b"c5a31be201;"[..]), - Ok((&b"01;"[..], 3_315_801_058)) - ); - assert_parse!(hex_u32(&b"ffffffff;"[..]), Ok((&b";"[..], 4_294_967_295))); - assert_parse!(hex_u32(&b"0x1be2;"[..]), Ok((&b"x1be2;"[..], 0))); - assert_parse!(hex_u32(&b"12af"[..]), Ok((&b""[..], 0x12af))); - } - - #[test] - #[cfg(feature = "std")] - fn float_test() { - let mut test_cases = vec![ - "+3.14", - "3.14", - "-3.14", - "0", - "0.0", - "1.", - ".789", - "-.5", - "1e7", - "-1E-7", - ".3e-2", - "1.e4", - "1.2e4", - "12.34", - "-1.234E-12", - "-1.234e-12", - "0.00000000000000000087", - ]; - - for test in test_cases.drain(..) { - let expected32 = str::parse::(test).unwrap(); - let expected64 = str::parse::(test).unwrap(); - - println!("now parsing: {} -> {}", test, expected32); - - let larger = format!("{}", test); - assert_parse!(recognize_float(&larger[..]), Ok(("", test))); - - assert_parse!(float(larger.as_bytes()), Ok((&b""[..], expected32))); - assert_parse!(float(&larger[..]), Ok(("", expected32))); - - assert_parse!(double(larger.as_bytes()), Ok((&b""[..], expected64))); - assert_parse!(double(&larger[..]), Ok(("", expected64))); - } - - let remaining_exponent = "-1.234E-"; - assert_parse!( - recognize_float(remaining_exponent), - Err(Err::Failure(("", ErrorKind::Digit))) - ); - - let (_i, nan) = float::<_, ()>("NaN").unwrap(); - assert!(nan.is_nan()); - - let (_i, inf) = float::<_, ()>("inf").unwrap(); - assert!(inf.is_infinite()); - let (_i, inf) = float::<_, ()>("infinite").unwrap(); - assert!(inf.is_infinite()); - } - - #[test] - fn configurable_endianness() { - use crate::number::Endianness; - - fn be_tst16(i: &[u8]) -> IResult<&[u8], u16> { - u16(Endianness::Big)(i) - } - fn le_tst16(i: &[u8]) -> IResult<&[u8], u16> { - u16(Endianness::Little)(i) - } - assert_eq!(be_tst16(&[0x80, 0x00]), Ok((&b""[..], 32_768_u16))); - assert_eq!(le_tst16(&[0x80, 0x00]), Ok((&b""[..], 128_u16))); - - fn be_tst32(i: &[u8]) -> IResult<&[u8], u32> { - u32(Endianness::Big)(i) - } - fn le_tst32(i: &[u8]) -> IResult<&[u8], u32> { - u32(Endianness::Little)(i) - } - assert_eq!( - be_tst32(&[0x12, 0x00, 0x60, 0x00]), - Ok((&b""[..], 302_014_464_u32)) - ); - assert_eq!( - le_tst32(&[0x12, 0x00, 0x60, 0x00]), - Ok((&b""[..], 6_291_474_u32)) - ); - - fn be_tst64(i: &[u8]) -> IResult<&[u8], u64> { - u64(Endianness::Big)(i) - } - fn le_tst64(i: &[u8]) -> IResult<&[u8], u64> { - u64(Endianness::Little)(i) - } - assert_eq!( - be_tst64(&[0x12, 0x00, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), - Ok((&b""[..], 1_297_142_246_100_992_000_u64)) - ); - assert_eq!( - le_tst64(&[0x12, 0x00, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), - Ok((&b""[..], 36_028_874_334_666_770_u64)) - ); - - fn be_tsti16(i: &[u8]) -> IResult<&[u8], i16> { - i16(Endianness::Big)(i) - } - fn le_tsti16(i: &[u8]) -> IResult<&[u8], i16> { - i16(Endianness::Little)(i) - } - assert_eq!(be_tsti16(&[0x00, 0x80]), Ok((&b""[..], 128_i16))); - assert_eq!(le_tsti16(&[0x00, 0x80]), Ok((&b""[..], -32_768_i16))); - - fn be_tsti32(i: &[u8]) -> IResult<&[u8], i32> { - i32(Endianness::Big)(i) - } - fn le_tsti32(i: &[u8]) -> IResult<&[u8], i32> { - i32(Endianness::Little)(i) - } - assert_eq!( - be_tsti32(&[0x00, 0x12, 0x60, 0x00]), - Ok((&b""[..], 1_204_224_i32)) - ); - assert_eq!( - le_tsti32(&[0x00, 0x12, 0x60, 0x00]), - Ok((&b""[..], 6_296_064_i32)) - ); - - fn be_tsti64(i: &[u8]) -> IResult<&[u8], i64> { - i64(Endianness::Big)(i) - } - fn le_tsti64(i: &[u8]) -> IResult<&[u8], i64> { - i64(Endianness::Little)(i) - } - assert_eq!( - be_tsti64(&[0x00, 0xFF, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), - Ok((&b""[..], 71_881_672_479_506_432_i64)) - ); - assert_eq!( - le_tsti64(&[0x00, 0xFF, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), - Ok((&b""[..], 36_028_874_334_732_032_i64)) - ); - } - - #[cfg(feature = "std")] - fn parse_f64(i: &str) -> IResult<&str, f64, ()> { - match recognize_float_or_exceptions(i) { - Err(e) => Err(e), - Ok((i, s)) => { - if s.is_empty() { - return Err(Err::Error(())); - } - match s.parse_to() { - Some(n) => Ok((i, n)), - None => Err(Err::Error(())), - } - } - } - } - - proptest! { - #[test] - #[cfg(feature = "std")] - fn floats(s in "\\PC*") { - println!("testing {}", s); - let res1 = parse_f64(&s); - let res2 = double::<_, ()>(s.as_str()); - assert_eq!(res1, res2); - } - } -} diff --git a/vendor/nom/src/number/mod.rs b/vendor/nom/src/number/mod.rs deleted file mode 100644 index 58c3d51b0bdb6a..00000000000000 --- a/vendor/nom/src/number/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! Parsers recognizing numbers - -pub mod complete; -pub mod streaming; - -/// Configurable endianness -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub enum Endianness { - /// Big endian - Big, - /// Little endian - Little, - /// Will match the host's endianness - Native, -} diff --git a/vendor/nom/src/number/streaming.rs b/vendor/nom/src/number/streaming.rs deleted file mode 100644 index b4e856d2984424..00000000000000 --- a/vendor/nom/src/number/streaming.rs +++ /dev/null @@ -1,2206 +0,0 @@ -//! Parsers recognizing numbers, streaming version - -use crate::branch::alt; -use crate::bytes::streaming::tag; -use crate::character::streaming::{char, digit1, sign}; -use crate::combinator::{cut, map, opt, recognize}; -use crate::error::{ErrorKind, ParseError}; -use crate::internal::*; -use crate::lib::std::ops::{RangeFrom, RangeTo}; -use crate::sequence::{pair, tuple}; -use crate::traits::{ - AsBytes, AsChar, Compare, InputIter, InputLength, InputTake, InputTakeAtPosition, Offset, Slice, -}; - -/// Recognizes an unsigned 1 byte integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::be_u8; -/// -/// let parser = |s| { -/// be_u8::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"\x01abcd"[..], 0x00))); -/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -#[inline] -pub fn be_u8>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 1; - if input.input_len() < bound { - Err(Err::Incomplete(Needed::new(1))) - } else { - let res = input.iter_elements().next().unwrap(); - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a big endian unsigned 2 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::be_u16; -/// -/// let parser = |s| { -/// be_u16::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"abcd"[..], 0x0001))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -#[inline] -pub fn be_u16>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 2; - if input.input_len() < bound { - Err(Err::Incomplete(Needed::new(bound - input.input_len()))) - } else { - let mut res = 0u16; - for byte in input.iter_elements().take(bound) { - res = (res << 8) + byte as u16; - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a big endian unsigned 3 byte integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::be_u24; -/// -/// let parser = |s| { -/// be_u24::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02abcd"[..]), Ok((&b"abcd"[..], 0x000102))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(2)))); -/// ``` -#[inline] -pub fn be_u24>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 3; - if input.input_len() < bound { - Err(Err::Incomplete(Needed::new(bound - input.input_len()))) - } else { - let mut res = 0u32; - for byte in input.iter_elements().take(bound) { - res = (res << 8) + byte as u32; - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a big endian unsigned 4 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::be_u32; -/// -/// let parser = |s| { -/// be_u32::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03abcd"[..]), Ok((&b"abcd"[..], 0x00010203))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(3)))); -/// ``` -#[inline] -pub fn be_u32>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 4; - if input.input_len() < bound { - Err(Err::Incomplete(Needed::new(bound - input.input_len()))) - } else { - let mut res = 0u32; - for byte in input.iter_elements().take(bound) { - res = (res << 8) + byte as u32; - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a big endian unsigned 8 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::be_u64; -/// -/// let parser = |s| { -/// be_u64::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"[..]), Ok((&b"abcd"[..], 0x0001020304050607))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); -/// ``` -#[inline] -pub fn be_u64>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 8; - if input.input_len() < bound { - Err(Err::Incomplete(Needed::new(bound - input.input_len()))) - } else { - let mut res = 0u64; - for byte in input.iter_elements().take(bound) { - res = (res << 8) + byte as u64; - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a big endian unsigned 16 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::be_u128; -/// -/// let parser = |s| { -/// be_u128::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"[..]), Ok((&b"abcd"[..], 0x00010203040506070809101112131415))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); -/// ``` -#[inline] -pub fn be_u128>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 16; - if input.input_len() < bound { - Err(Err::Incomplete(Needed::new(bound - input.input_len()))) - } else { - let mut res = 0u128; - for byte in input.iter_elements().take(bound) { - res = (res << 8) + byte as u128; - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a signed 1 byte integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::be_i8; -/// -/// let parser = be_i8::<_, (_, ErrorKind)>; -/// -/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"\x01abcd"[..], 0x00))); -/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -#[inline] -pub fn be_i8>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - be_u8.map(|x| x as i8).parse(input) -} - -/// Recognizes a big endian signed 2 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::be_i16; -/// -/// let parser = be_i16::<_, (_, ErrorKind)>; -/// -/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"abcd"[..], 0x0001))); -/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(2)))); -/// ``` -#[inline] -pub fn be_i16>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - be_u16.map(|x| x as i16).parse(input) -} - -/// Recognizes a big endian signed 3 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::be_i24; -/// -/// let parser = be_i24::<_, (_, ErrorKind)>; -/// -/// assert_eq!(parser(&b"\x00\x01\x02abcd"[..]), Ok((&b"abcd"[..], 0x000102))); -/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(3)))); -/// ``` -#[inline] -pub fn be_i24>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - // Same as the unsigned version but we need to sign-extend manually here - be_u24 - .map(|x| { - if x & 0x80_00_00 != 0 { - (x | 0xff_00_00_00) as i32 - } else { - x as i32 - } - }) - .parse(input) -} - -/// Recognizes a big endian signed 4 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::be_i32; -/// -/// let parser = be_i32::<_, (_, ErrorKind)>; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03abcd"[..]), Ok((&b"abcd"[..], 0x00010203))); -/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(4)))); -/// ``` -#[inline] -pub fn be_i32>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - be_u32.map(|x| x as i32).parse(input) -} - -/// Recognizes a big endian signed 8 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::be_i64; -/// -/// let parser = be_i64::<_, (_, ErrorKind)>; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"[..]), Ok((&b"abcd"[..], 0x0001020304050607))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); -/// ``` -#[inline] -pub fn be_i64>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - be_u64.map(|x| x as i64).parse(input) -} - -/// Recognizes a big endian signed 16 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::be_i128; -/// -/// let parser = be_i128::<_, (_, ErrorKind)>; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"[..]), Ok((&b"abcd"[..], 0x00010203040506070809101112131415))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); -/// ``` -#[inline] -pub fn be_i128>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - be_u128.map(|x| x as i128).parse(input) -} - -/// Recognizes an unsigned 1 byte integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::le_u8; -/// -/// let parser = le_u8::<_, (_, ErrorKind)>; -/// -/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"\x01abcd"[..], 0x00))); -/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -#[inline] -pub fn le_u8>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 1; - if input.input_len() < bound { - Err(Err::Incomplete(Needed::new(1))) - } else { - let res = input.iter_elements().next().unwrap(); - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a little endian unsigned 2 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::le_u16; -/// -/// let parser = |s| { -/// le_u16::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"abcd"[..], 0x0100))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -#[inline] -pub fn le_u16>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 2; - if input.input_len() < bound { - Err(Err::Incomplete(Needed::new(bound - input.input_len()))) - } else { - let mut res = 0u16; - for (index, byte) in input.iter_indices().take(bound) { - res += (byte as u16) << (8 * index); - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a little endian unsigned 3 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::le_u24; -/// -/// let parser = |s| { -/// le_u24::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02abcd"[..]), Ok((&b"abcd"[..], 0x020100))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(2)))); -/// ``` -#[inline] -pub fn le_u24>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 3; - if input.input_len() < bound { - Err(Err::Incomplete(Needed::new(bound - input.input_len()))) - } else { - let mut res = 0u32; - for (index, byte) in input.iter_indices().take(bound) { - res += (byte as u32) << (8 * index); - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a little endian unsigned 4 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::le_u32; -/// -/// let parser = |s| { -/// le_u32::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03abcd"[..]), Ok((&b"abcd"[..], 0x03020100))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(3)))); -/// ``` -#[inline] -pub fn le_u32>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 4; - if input.input_len() < bound { - Err(Err::Incomplete(Needed::new(bound - input.input_len()))) - } else { - let mut res = 0u32; - for (index, byte) in input.iter_indices().take(bound) { - res += (byte as u32) << (8 * index); - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a little endian unsigned 8 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::le_u64; -/// -/// let parser = |s| { -/// le_u64::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"[..]), Ok((&b"abcd"[..], 0x0706050403020100))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); -/// ``` -#[inline] -pub fn le_u64>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 8; - if input.input_len() < bound { - Err(Err::Incomplete(Needed::new(bound - input.input_len()))) - } else { - let mut res = 0u64; - for (index, byte) in input.iter_indices().take(bound) { - res += (byte as u64) << (8 * index); - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a little endian unsigned 16 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::le_u128; -/// -/// let parser = |s| { -/// le_u128::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"[..]), Ok((&b"abcd"[..], 0x15141312111009080706050403020100))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); -/// ``` -#[inline] -pub fn le_u128>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 16; - if input.input_len() < bound { - Err(Err::Incomplete(Needed::new(bound - input.input_len()))) - } else { - let mut res = 0u128; - for (index, byte) in input.iter_indices().take(bound) { - res += (byte as u128) << (8 * index); - } - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes a signed 1 byte integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::le_i8; -/// -/// let parser = le_i8::<_, (_, ErrorKind)>; -/// -/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"\x01abcd"[..], 0x00))); -/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -#[inline] -pub fn le_i8>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - le_u8.map(|x| x as i8).parse(input) -} - -/// Recognizes a little endian signed 2 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::le_i16; -/// -/// let parser = |s| { -/// le_i16::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"abcd"[..], 0x0100))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -#[inline] -pub fn le_i16>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - le_u16.map(|x| x as i16).parse(input) -} - -/// Recognizes a little endian signed 3 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::le_i24; -/// -/// let parser = |s| { -/// le_i24::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02abcd"[..]), Ok((&b"abcd"[..], 0x020100))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(2)))); -/// ``` -#[inline] -pub fn le_i24>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - // Same as the unsigned version but we need to sign-extend manually here - le_u24 - .map(|x| { - if x & 0x80_00_00 != 0 { - (x | 0xff_00_00_00) as i32 - } else { - x as i32 - } - }) - .parse(input) -} - -/// Recognizes a little endian signed 4 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::le_i32; -/// -/// let parser = |s| { -/// le_i32::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03abcd"[..]), Ok((&b"abcd"[..], 0x03020100))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(3)))); -/// ``` -#[inline] -pub fn le_i32>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - le_u32.map(|x| x as i32).parse(input) -} - -/// Recognizes a little endian signed 8 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::le_i64; -/// -/// let parser = |s| { -/// le_i64::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"[..]), Ok((&b"abcd"[..], 0x0706050403020100))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); -/// ``` -#[inline] -pub fn le_i64>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - le_u64.map(|x| x as i64).parse(input) -} - -/// Recognizes a little endian signed 16 bytes integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::le_i128; -/// -/// let parser = |s| { -/// le_i128::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"[..]), Ok((&b"abcd"[..], 0x15141312111009080706050403020100))); -/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); -/// ``` -#[inline] -pub fn le_i128>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - le_u128.map(|x| x as i128).parse(input) -} - -/// Recognizes an unsigned 1 byte integer -/// -/// Note that endianness does not apply to 1 byte numbers. -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::streaming::u8; -/// -/// let parser = |s| { -/// u8::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); -/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -#[inline] -pub fn u8>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - let bound: usize = 1; - if input.input_len() < bound { - Err(Err::Incomplete(Needed::new(1))) - } else { - let res = input.iter_elements().next().unwrap(); - - Ok((input.slice(bound..), res)) - } -} - -/// Recognizes an unsigned 2 bytes integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u16 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian u16 integer. -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::streaming::u16; -/// -/// let be_u16 = |s| { -/// u16::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_u16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0003))); -/// assert_eq!(be_u16(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(1)))); -/// -/// let le_u16 = |s| { -/// u16::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_u16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0300))); -/// assert_eq!(le_u16(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -#[inline] -pub fn u16>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_u16, - crate::number::Endianness::Little => le_u16, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_u16, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_u16, - } -} - -/// Recognizes an unsigned 3 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u24 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian u24 integer. -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::streaming::u24; -/// -/// let be_u24 = |s| { -/// u24::<_,(_, ErrorKind)>(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_u24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x000305))); -/// assert_eq!(be_u24(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(2)))); -/// -/// let le_u24 = |s| { -/// u24::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_u24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x050300))); -/// assert_eq!(le_u24(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(2)))); -/// ``` -#[inline] -pub fn u24>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_u24, - crate::number::Endianness::Little => le_u24, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_u24, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_u24, - } -} - -/// Recognizes an unsigned 4 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u32 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian u32 integer. -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::streaming::u32; -/// -/// let be_u32 = |s| { -/// u32::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_u32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00030507))); -/// assert_eq!(be_u32(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(3)))); -/// -/// let le_u32 = |s| { -/// u32::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_u32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07050300))); -/// assert_eq!(le_u32(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(3)))); -/// ``` -#[inline] -pub fn u32>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_u32, - crate::number::Endianness::Little => le_u32, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_u32, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_u32, - } -} - -/// Recognizes an unsigned 8 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u64 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian u64 integer. -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::streaming::u64; -/// -/// let be_u64 = |s| { -/// u64::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_u64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0001020304050607))); -/// assert_eq!(be_u64(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); -/// -/// let le_u64 = |s| { -/// u64::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_u64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0706050403020100))); -/// assert_eq!(le_u64(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); -/// ``` -#[inline] -pub fn u64>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_u64, - crate::number::Endianness::Little => le_u64, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_u64, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_u64, - } -} - -/// Recognizes an unsigned 16 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u128 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian u128 integer. -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::streaming::u128; -/// -/// let be_u128 = |s| { -/// u128::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_u128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00010203040506070001020304050607))); -/// assert_eq!(be_u128(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); -/// -/// let le_u128 = |s| { -/// u128::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_u128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07060504030201000706050403020100))); -/// assert_eq!(le_u128(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); -/// ``` -#[inline] -pub fn u128>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_u128, - crate::number::Endianness::Little => le_u128, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_u128, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_u128, - } -} - -/// Recognizes a signed 1 byte integer -/// -/// Note that endianness does not apply to 1 byte numbers. -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::streaming::i8; -/// -/// let parser = |s| { -/// i8::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); -/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -#[inline] -pub fn i8>(i: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - u8.map(|x| x as i8).parse(i) -} - -/// Recognizes a signed 2 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i16 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian i16 integer. -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::streaming::i16; -/// -/// let be_i16 = |s| { -/// i16::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_i16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0003))); -/// assert_eq!(be_i16(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(1)))); -/// -/// let le_i16 = |s| { -/// i16::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_i16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0300))); -/// assert_eq!(le_i16(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -#[inline] -pub fn i16>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_i16, - crate::number::Endianness::Little => le_i16, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_i16, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_i16, - } -} - -/// Recognizes a signed 3 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i24 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian i24 integer. -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::streaming::i24; -/// -/// let be_i24 = |s| { -/// i24::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_i24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x000305))); -/// assert_eq!(be_i24(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(2)))); -/// -/// let le_i24 = |s| { -/// i24::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_i24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x050300))); -/// assert_eq!(le_i24(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(2)))); -/// ``` -#[inline] -pub fn i24>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_i24, - crate::number::Endianness::Little => le_i24, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_i24, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_i24, - } -} - -/// Recognizes a signed 4 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i32 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian i32 integer. -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::streaming::i32; -/// -/// let be_i32 = |s| { -/// i32::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_i32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00030507))); -/// assert_eq!(be_i32(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(3)))); -/// -/// let le_i32 = |s| { -/// i32::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_i32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07050300))); -/// assert_eq!(le_i32(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(3)))); -/// ``` -#[inline] -pub fn i32>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_i32, - crate::number::Endianness::Little => le_i32, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_i32, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_i32, - } -} - -/// Recognizes a signed 8 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i64 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian i64 integer. -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::streaming::i64; -/// -/// let be_i64 = |s| { -/// i64::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_i64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0001020304050607))); -/// assert_eq!(be_i64(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); -/// -/// let le_i64 = |s| { -/// i64::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_i64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0706050403020100))); -/// assert_eq!(le_i64(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); -/// ``` -#[inline] -pub fn i64>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_i64, - crate::number::Endianness::Little => le_i64, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_i64, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_i64, - } -} - -/// Recognizes a signed 16 byte integer -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i128 integer, -/// otherwise if `nom::number::Endianness::Little` parse a little endian i128 integer. -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::streaming::i128; -/// -/// let be_i128 = |s| { -/// i128::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_i128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00010203040506070001020304050607))); -/// assert_eq!(be_i128(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); -/// -/// let le_i128 = |s| { -/// i128::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_i128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07060504030201000706050403020100))); -/// assert_eq!(le_i128(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); -/// ``` -#[inline] -pub fn i128>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_i128, - crate::number::Endianness::Little => le_i128, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_i128, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_i128, - } -} - -/// Recognizes a big endian 4 bytes floating point number. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::be_f32; -/// -/// let parser = |s| { -/// be_f32::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&[0x40, 0x29, 0x00, 0x00][..]), Ok((&b""[..], 2.640625))); -/// assert_eq!(parser(&[0x01][..]), Err(Err::Incomplete(Needed::new(3)))); -/// ``` -#[inline] -pub fn be_f32>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match be_u32(input) { - Err(e) => Err(e), - Ok((i, o)) => Ok((i, f32::from_bits(o))), - } -} - -/// Recognizes a big endian 8 bytes floating point number. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::be_f64; -/// -/// let parser = |s| { -/// be_f64::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&[0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); -/// assert_eq!(parser(&[0x01][..]), Err(Err::Incomplete(Needed::new(7)))); -/// ``` -#[inline] -pub fn be_f64>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match be_u64(input) { - Err(e) => Err(e), - Ok((i, o)) => Ok((i, f64::from_bits(o))), - } -} - -/// Recognizes a little endian 4 bytes floating point number. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::le_f32; -/// -/// let parser = |s| { -/// le_f32::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&[0x00, 0x00, 0x48, 0x41][..]), Ok((&b""[..], 12.5))); -/// assert_eq!(parser(&[0x01][..]), Err(Err::Incomplete(Needed::new(3)))); -/// ``` -#[inline] -pub fn le_f32>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match le_u32(input) { - Err(e) => Err(e), - Ok((i, o)) => Ok((i, f32::from_bits(o))), - } -} - -/// Recognizes a little endian 8 bytes floating point number. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::le_f64; -/// -/// let parser = |s| { -/// le_f64::<_, (_, ErrorKind)>(s) -/// }; -/// -/// assert_eq!(parser(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x41][..]), Ok((&b""[..], 3145728.0))); -/// assert_eq!(parser(&[0x01][..]), Err(Err::Incomplete(Needed::new(7)))); -/// ``` -#[inline] -pub fn le_f64>(input: I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match le_u64(input) { - Err(e) => Err(e), - Ok((i, o)) => Ok((i, f64::from_bits(o))), - } -} - -/// Recognizes a 4 byte floating point number -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian f32 float, -/// otherwise if `nom::number::Endianness::Little` parse a little endian f32 float. -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::streaming::f32; -/// -/// let be_f32 = |s| { -/// f32::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_f32(&[0x41, 0x48, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); -/// assert_eq!(be_f32(&b"abc"[..]), Err(Err::Incomplete(Needed::new(1)))); -/// -/// let le_f32 = |s| { -/// f32::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_f32(&[0x00, 0x00, 0x48, 0x41][..]), Ok((&b""[..], 12.5))); -/// assert_eq!(le_f32(&b"abc"[..]), Err(Err::Incomplete(Needed::new(1)))); -/// ``` -#[inline] -pub fn f32>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_f32, - crate::number::Endianness::Little => le_f32, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_f32, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_f32, - } -} - -/// Recognizes an 8 byte floating point number -/// -/// If the parameter is `nom::number::Endianness::Big`, parse a big endian f64 float, -/// otherwise if `nom::number::Endianness::Little` parse a little endian f64 float. -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::streaming::f64; -/// -/// let be_f64 = |s| { -/// f64::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) -/// }; -/// -/// assert_eq!(be_f64(&[0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); -/// assert_eq!(be_f64(&b"abc"[..]), Err(Err::Incomplete(Needed::new(5)))); -/// -/// let le_f64 = |s| { -/// f64::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) -/// }; -/// -/// assert_eq!(le_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40][..]), Ok((&b""[..], 12.5))); -/// assert_eq!(le_f64(&b"abc"[..]), Err(Err::Incomplete(Needed::new(5)))); -/// ``` -#[inline] -pub fn f64>(endian: crate::number::Endianness) -> fn(I) -> IResult -where - I: Slice> + InputIter + InputLength, -{ - match endian { - crate::number::Endianness::Big => be_f64, - crate::number::Endianness::Little => le_f64, - #[cfg(target_endian = "big")] - crate::number::Endianness::Native => be_f64, - #[cfg(target_endian = "little")] - crate::number::Endianness::Native => le_f64, - } -} - -/// Recognizes a hex-encoded integer. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::hex_u32; -/// -/// let parser = |s| { -/// hex_u32(s) -/// }; -/// -/// assert_eq!(parser(b"01AE;"), Ok((&b";"[..], 0x01AE))); -/// assert_eq!(parser(b"abc"), Err(Err::Incomplete(Needed::new(1)))); -/// assert_eq!(parser(b"ggg"), Err(Err::Error((&b"ggg"[..], ErrorKind::IsA)))); -/// ``` -#[inline] -pub fn hex_u32<'a, E: ParseError<&'a [u8]>>(input: &'a [u8]) -> IResult<&'a [u8], u32, E> { - let (i, o) = crate::bytes::streaming::is_a(&b"0123456789abcdefABCDEF"[..])(input)?; - - // Do not parse more than 8 characters for a u32 - let (parsed, remaining) = if o.len() <= 8 { - (o, i) - } else { - (&input[..8], &input[8..]) - }; - - let res = parsed - .iter() - .rev() - .enumerate() - .map(|(k, &v)| { - let digit = v as char; - digit.to_digit(16).unwrap_or(0) << (k * 4) - }) - .sum(); - - Ok((remaining, res)) -} - -/// Recognizes a floating point number in text format and returns the corresponding part of the input. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if it reaches the end of input. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// use nom::number::streaming::recognize_float; -/// -/// let parser = |s| { -/// recognize_float(s) -/// }; -/// -/// assert_eq!(parser("11e-1;"), Ok((";", "11e-1"))); -/// assert_eq!(parser("123E-02;"), Ok((";", "123E-02"))); -/// assert_eq!(parser("123K-01"), Ok(("K-01", "123"))); -/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Char)))); -/// ``` -#[rustfmt::skip] -pub fn recognize_float>(input: T) -> IResult -where - T: Slice> + Slice>, - T: Clone + Offset, - T: InputIter, - ::Item: AsChar, - T: InputTakeAtPosition + InputLength, - ::Item: AsChar -{ - recognize( - tuple(( - opt(alt((char('+'), char('-')))), - alt(( - map(tuple((digit1, opt(pair(char('.'), opt(digit1))))), |_| ()), - map(tuple((char('.'), digit1)), |_| ()) - )), - opt(tuple(( - alt((char('e'), char('E'))), - opt(alt((char('+'), char('-')))), - cut(digit1) - ))) - )) - )(input) -} - -// workaround until issues with minimal-lexical are fixed -#[doc(hidden)] -pub fn recognize_float_or_exceptions>(input: T) -> IResult -where - T: Slice> + Slice>, - T: Clone + Offset, - T: InputIter + InputTake + InputLength + Compare<&'static str>, - ::Item: AsChar, - T: InputTakeAtPosition, - ::Item: AsChar, -{ - alt(( - |i: T| { - recognize_float::<_, E>(i.clone()).map_err(|e| match e { - crate::Err::Error(_) => crate::Err::Error(E::from_error_kind(i, ErrorKind::Float)), - crate::Err::Failure(_) => crate::Err::Failure(E::from_error_kind(i, ErrorKind::Float)), - crate::Err::Incomplete(needed) => crate::Err::Incomplete(needed), - }) - }, - |i: T| { - crate::bytes::streaming::tag_no_case::<_, _, E>("nan")(i.clone()) - .map_err(|_| crate::Err::Error(E::from_error_kind(i, ErrorKind::Float))) - }, - |i: T| { - crate::bytes::streaming::tag_no_case::<_, _, E>("inf")(i.clone()) - .map_err(|_| crate::Err::Error(E::from_error_kind(i, ErrorKind::Float))) - }, - |i: T| { - crate::bytes::streaming::tag_no_case::<_, _, E>("infinity")(i.clone()) - .map_err(|_| crate::Err::Error(E::from_error_kind(i, ErrorKind::Float))) - }, - ))(input) -} - -/// Recognizes a floating point number in text format -/// -/// It returns a tuple of (`sign`, `integer part`, `fraction part` and `exponent`) of the input -/// data. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -pub fn recognize_float_parts>(input: T) -> IResult -where - T: Slice> + Slice>, - T: Clone + Offset, - T: InputIter + crate::traits::ParseTo, - ::Item: AsChar, - T: InputTakeAtPosition + InputTake + InputLength, - ::Item: AsChar, - T: for<'a> Compare<&'a [u8]>, - T: AsBytes, -{ - let (i, sign) = sign(input.clone())?; - - //let (i, zeroes) = take_while(|c: ::Item| c.as_char() == '0')(i)?; - let (i, zeroes) = match i.as_bytes().iter().position(|c| *c != b'0') { - Some(index) => i.take_split(index), - None => i.take_split(i.input_len()), - }; - - //let (i, mut integer) = digit0(i)?; - let (i, mut integer) = match i - .as_bytes() - .iter() - .position(|c| !(*c >= b'0' && *c <= b'9')) - { - Some(index) => i.take_split(index), - None => i.take_split(i.input_len()), - }; - - if integer.input_len() == 0 && zeroes.input_len() > 0 { - // keep the last zero if integer is empty - integer = zeroes.slice(zeroes.input_len() - 1..); - } - - let (i, opt_dot) = opt(tag(&b"."[..]))(i)?; - let (i, fraction) = if opt_dot.is_none() { - let i2 = i.clone(); - (i2, i.slice(..0)) - } else { - // match number, trim right zeroes - let mut zero_count = 0usize; - let mut position = None; - for (pos, c) in i.as_bytes().iter().enumerate() { - if *c >= b'0' && *c <= b'9' { - if *c == b'0' { - zero_count += 1; - } else { - zero_count = 0; - } - } else { - position = Some(pos); - break; - } - } - - let position = match position { - Some(p) => p, - None => return Err(Err::Incomplete(Needed::new(1))), - }; - - let index = if zero_count == 0 { - position - } else if zero_count == position { - position - zero_count + 1 - } else { - position - zero_count - }; - - (i.slice(position..), i.slice(..index)) - }; - - if integer.input_len() == 0 && fraction.input_len() == 0 { - return Err(Err::Error(E::from_error_kind(input, ErrorKind::Float))); - } - - let i2 = i.clone(); - let (i, e) = match i.as_bytes().iter().next() { - Some(b'e') => (i.slice(1..), true), - Some(b'E') => (i.slice(1..), true), - _ => (i, false), - }; - - let (i, exp) = if e { - cut(crate::character::streaming::i32)(i)? - } else { - (i2, 0) - }; - - Ok((i, (sign, integer, fraction, exp))) -} - -/// Recognizes floating point number in text format and returns a f32. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::float; -/// -/// let parser = |s| { -/// float(s) -/// }; -/// -/// assert_eq!(parser("11e-1"), Ok(("", 1.1))); -/// assert_eq!(parser("123E-02"), Ok(("", 1.23))); -/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); -/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Float)))); -/// ``` -pub fn float>(input: T) -> IResult -where - T: Slice> + Slice>, - T: Clone + Offset, - T: InputIter + InputLength + InputTake + crate::traits::ParseTo + Compare<&'static str>, - ::Item: AsChar, - ::IterElem: Clone, - T: InputTakeAtPosition, - ::Item: AsChar, - T: AsBytes, - T: for<'a> Compare<&'a [u8]>, -{ - /* - let (i, (sign, integer, fraction, exponent)) = recognize_float_parts(input)?; - - let mut float: f32 = minimal_lexical::parse_float( - integer.as_bytes().iter(), - fraction.as_bytes().iter(), - exponent, - ); - if !sign { - float = -float; - } - - Ok((i, float)) - */ - let (i, s) = recognize_float_or_exceptions(input)?; - match s.parse_to() { - Some(f) => Ok((i, f)), - None => Err(crate::Err::Error(E::from_error_kind( - i, - crate::error::ErrorKind::Float, - ))), - } -} - -/// Recognizes floating point number in text format and returns a f64. -/// -/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::number::complete::double; -/// -/// let parser = |s| { -/// double(s) -/// }; -/// -/// assert_eq!(parser("11e-1"), Ok(("", 1.1))); -/// assert_eq!(parser("123E-02"), Ok(("", 1.23))); -/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); -/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Float)))); -/// ``` -pub fn double>(input: T) -> IResult -where - T: Slice> + Slice>, - T: Clone + Offset, - T: InputIter + InputLength + InputTake + crate::traits::ParseTo + Compare<&'static str>, - ::Item: AsChar, - ::IterElem: Clone, - T: InputTakeAtPosition, - ::Item: AsChar, - T: AsBytes, - T: for<'a> Compare<&'a [u8]>, -{ - /* - let (i, (sign, integer, fraction, exponent)) = recognize_float_parts(input)?; - - let mut float: f64 = minimal_lexical::parse_float( - integer.as_bytes().iter(), - fraction.as_bytes().iter(), - exponent, - ); - if !sign { - float = -float; - } - - Ok((i, float)) - */ - let (i, s) = recognize_float_or_exceptions(input)?; - match s.parse_to() { - Some(f) => Ok((i, f)), - None => Err(crate::Err::Error(E::from_error_kind( - i, - crate::error::ErrorKind::Float, - ))), - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::error::ErrorKind; - use crate::internal::{Err, Needed}; - use proptest::prelude::*; - - macro_rules! assert_parse( - ($left: expr, $right: expr) => { - let res: $crate::IResult<_, _, (_, ErrorKind)> = $left; - assert_eq!(res, $right); - }; - ); - - #[test] - fn i8_tests() { - assert_parse!(be_i8(&[0x00][..]), Ok((&b""[..], 0))); - assert_parse!(be_i8(&[0x7f][..]), Ok((&b""[..], 127))); - assert_parse!(be_i8(&[0xff][..]), Ok((&b""[..], -1))); - assert_parse!(be_i8(&[0x80][..]), Ok((&b""[..], -128))); - assert_parse!(be_i8(&[][..]), Err(Err::Incomplete(Needed::new(1)))); - } - - #[test] - fn i16_tests() { - assert_parse!(be_i16(&[0x00, 0x00][..]), Ok((&b""[..], 0))); - assert_parse!(be_i16(&[0x7f, 0xff][..]), Ok((&b""[..], 32_767_i16))); - assert_parse!(be_i16(&[0xff, 0xff][..]), Ok((&b""[..], -1))); - assert_parse!(be_i16(&[0x80, 0x00][..]), Ok((&b""[..], -32_768_i16))); - assert_parse!(be_i16(&[][..]), Err(Err::Incomplete(Needed::new(2)))); - assert_parse!(be_i16(&[0x00][..]), Err(Err::Incomplete(Needed::new(1)))); - } - - #[test] - fn u24_tests() { - assert_parse!(be_u24(&[0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); - assert_parse!(be_u24(&[0x00, 0xFF, 0xFF][..]), Ok((&b""[..], 65_535_u32))); - assert_parse!( - be_u24(&[0x12, 0x34, 0x56][..]), - Ok((&b""[..], 1_193_046_u32)) - ); - assert_parse!(be_u24(&[][..]), Err(Err::Incomplete(Needed::new(3)))); - assert_parse!(be_u24(&[0x00][..]), Err(Err::Incomplete(Needed::new(2)))); - assert_parse!( - be_u24(&[0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(1))) - ); - } - - #[test] - fn i24_tests() { - assert_parse!(be_i24(&[0xFF, 0xFF, 0xFF][..]), Ok((&b""[..], -1_i32))); - assert_parse!(be_i24(&[0xFF, 0x00, 0x00][..]), Ok((&b""[..], -65_536_i32))); - assert_parse!( - be_i24(&[0xED, 0xCB, 0xAA][..]), - Ok((&b""[..], -1_193_046_i32)) - ); - assert_parse!(be_i24(&[][..]), Err(Err::Incomplete(Needed::new(3)))); - assert_parse!(be_i24(&[0x00][..]), Err(Err::Incomplete(Needed::new(2)))); - assert_parse!( - be_i24(&[0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(1))) - ); - } - - #[test] - fn i32_tests() { - assert_parse!(be_i32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); - assert_parse!( - be_i32(&[0x7f, 0xff, 0xff, 0xff][..]), - Ok((&b""[..], 2_147_483_647_i32)) - ); - assert_parse!(be_i32(&[0xff, 0xff, 0xff, 0xff][..]), Ok((&b""[..], -1))); - assert_parse!( - be_i32(&[0x80, 0x00, 0x00, 0x00][..]), - Ok((&b""[..], -2_147_483_648_i32)) - ); - assert_parse!(be_i32(&[][..]), Err(Err::Incomplete(Needed::new(4)))); - assert_parse!(be_i32(&[0x00][..]), Err(Err::Incomplete(Needed::new(3)))); - assert_parse!( - be_i32(&[0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(2))) - ); - assert_parse!( - be_i32(&[0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(1))) - ); - } - - #[test] - fn i64_tests() { - assert_parse!( - be_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Ok((&b""[..], 0)) - ); - assert_parse!( - be_i64(&[0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff][..]), - Ok((&b""[..], 9_223_372_036_854_775_807_i64)) - ); - assert_parse!( - be_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff][..]), - Ok((&b""[..], -1)) - ); - assert_parse!( - be_i64(&[0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Ok((&b""[..], -9_223_372_036_854_775_808_i64)) - ); - assert_parse!(be_i64(&[][..]), Err(Err::Incomplete(Needed::new(8)))); - assert_parse!(be_i64(&[0x00][..]), Err(Err::Incomplete(Needed::new(7)))); - assert_parse!( - be_i64(&[0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(6))) - ); - assert_parse!( - be_i64(&[0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(5))) - ); - assert_parse!( - be_i64(&[0x00, 0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(4))) - ); - assert_parse!( - be_i64(&[0x00, 0x00, 0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(3))) - ); - assert_parse!( - be_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(2))) - ); - assert_parse!( - be_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(1))) - ); - } - - #[test] - fn i128_tests() { - assert_parse!( - be_i128( - &[ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00 - ][..] - ), - Ok((&b""[..], 0)) - ); - assert_parse!( - be_i128( - &[ - 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff - ][..] - ), - Ok(( - &b""[..], - 170_141_183_460_469_231_731_687_303_715_884_105_727_i128 - )) - ); - assert_parse!( - be_i128( - &[ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff - ][..] - ), - Ok((&b""[..], -1)) - ); - assert_parse!( - be_i128( - &[ - 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00 - ][..] - ), - Ok(( - &b""[..], - -170_141_183_460_469_231_731_687_303_715_884_105_728_i128 - )) - ); - assert_parse!(be_i128(&[][..]), Err(Err::Incomplete(Needed::new(16)))); - assert_parse!(be_i128(&[0x00][..]), Err(Err::Incomplete(Needed::new(15)))); - assert_parse!( - be_i128(&[0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(14))) - ); - assert_parse!( - be_i128(&[0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(13))) - ); - assert_parse!( - be_i128(&[0x00, 0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(12))) - ); - assert_parse!( - be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(11))) - ); - assert_parse!( - be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(10))) - ); - assert_parse!( - be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(9))) - ); - assert_parse!( - be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(8))) - ); - assert_parse!( - be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(7))) - ); - assert_parse!( - be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(6))) - ); - assert_parse!( - be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(5))) - ); - assert_parse!( - be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(4))) - ); - assert_parse!( - be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Err(Err::Incomplete(Needed::new(3))) - ); - assert_parse!( - be_i128( - &[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..] - ), - Err(Err::Incomplete(Needed::new(2))) - ); - assert_parse!( - be_i128( - &[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] - [..] - ), - Err(Err::Incomplete(Needed::new(1))) - ); - } - - #[test] - fn le_i8_tests() { - assert_parse!(le_i8(&[0x00][..]), Ok((&b""[..], 0))); - assert_parse!(le_i8(&[0x7f][..]), Ok((&b""[..], 127))); - assert_parse!(le_i8(&[0xff][..]), Ok((&b""[..], -1))); - assert_parse!(le_i8(&[0x80][..]), Ok((&b""[..], -128))); - } - - #[test] - fn le_i16_tests() { - assert_parse!(le_i16(&[0x00, 0x00][..]), Ok((&b""[..], 0))); - assert_parse!(le_i16(&[0xff, 0x7f][..]), Ok((&b""[..], 32_767_i16))); - assert_parse!(le_i16(&[0xff, 0xff][..]), Ok((&b""[..], -1))); - assert_parse!(le_i16(&[0x00, 0x80][..]), Ok((&b""[..], -32_768_i16))); - } - - #[test] - fn le_u24_tests() { - assert_parse!(le_u24(&[0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); - assert_parse!(le_u24(&[0xFF, 0xFF, 0x00][..]), Ok((&b""[..], 65_535_u32))); - assert_parse!( - le_u24(&[0x56, 0x34, 0x12][..]), - Ok((&b""[..], 1_193_046_u32)) - ); - } - - #[test] - fn le_i24_tests() { - assert_parse!(le_i24(&[0xFF, 0xFF, 0xFF][..]), Ok((&b""[..], -1_i32))); - assert_parse!(le_i24(&[0x00, 0x00, 0xFF][..]), Ok((&b""[..], -65_536_i32))); - assert_parse!( - le_i24(&[0xAA, 0xCB, 0xED][..]), - Ok((&b""[..], -1_193_046_i32)) - ); - } - - #[test] - fn le_i32_tests() { - assert_parse!(le_i32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); - assert_parse!( - le_i32(&[0xff, 0xff, 0xff, 0x7f][..]), - Ok((&b""[..], 2_147_483_647_i32)) - ); - assert_parse!(le_i32(&[0xff, 0xff, 0xff, 0xff][..]), Ok((&b""[..], -1))); - assert_parse!( - le_i32(&[0x00, 0x00, 0x00, 0x80][..]), - Ok((&b""[..], -2_147_483_648_i32)) - ); - } - - #[test] - fn le_i64_tests() { - assert_parse!( - le_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Ok((&b""[..], 0)) - ); - assert_parse!( - le_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f][..]), - Ok((&b""[..], 9_223_372_036_854_775_807_i64)) - ); - assert_parse!( - le_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff][..]), - Ok((&b""[..], -1)) - ); - assert_parse!( - le_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80][..]), - Ok((&b""[..], -9_223_372_036_854_775_808_i64)) - ); - } - - #[test] - fn le_i128_tests() { - assert_parse!( - le_i128( - &[ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00 - ][..] - ), - Ok((&b""[..], 0)) - ); - assert_parse!( - le_i128( - &[ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x7f - ][..] - ), - Ok(( - &b""[..], - 170_141_183_460_469_231_731_687_303_715_884_105_727_i128 - )) - ); - assert_parse!( - le_i128( - &[ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff - ][..] - ), - Ok((&b""[..], -1)) - ); - assert_parse!( - le_i128( - &[ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x80 - ][..] - ), - Ok(( - &b""[..], - -170_141_183_460_469_231_731_687_303_715_884_105_728_i128 - )) - ); - } - - #[test] - fn be_f32_tests() { - assert_parse!(be_f32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0_f32))); - assert_parse!( - be_f32(&[0x4d, 0x31, 0x1f, 0xd8][..]), - Ok((&b""[..], 185_728_392_f32)) - ); - } - - #[test] - fn be_f64_tests() { - assert_parse!( - be_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Ok((&b""[..], 0_f64)) - ); - assert_parse!( - be_f64(&[0x41, 0xa6, 0x23, 0xfb, 0x10, 0x00, 0x00, 0x00][..]), - Ok((&b""[..], 185_728_392_f64)) - ); - } - - #[test] - fn le_f32_tests() { - assert_parse!(le_f32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0_f32))); - assert_parse!( - le_f32(&[0xd8, 0x1f, 0x31, 0x4d][..]), - Ok((&b""[..], 185_728_392_f32)) - ); - } - - #[test] - fn le_f64_tests() { - assert_parse!( - le_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), - Ok((&b""[..], 0_f64)) - ); - assert_parse!( - le_f64(&[0x00, 0x00, 0x00, 0x10, 0xfb, 0x23, 0xa6, 0x41][..]), - Ok((&b""[..], 185_728_392_f64)) - ); - } - - #[test] - fn hex_u32_tests() { - assert_parse!( - hex_u32(&b";"[..]), - Err(Err::Error(error_position!(&b";"[..], ErrorKind::IsA))) - ); - assert_parse!(hex_u32(&b"ff;"[..]), Ok((&b";"[..], 255))); - assert_parse!(hex_u32(&b"1be2;"[..]), Ok((&b";"[..], 7138))); - assert_parse!(hex_u32(&b"c5a31be2;"[..]), Ok((&b";"[..], 3_315_801_058))); - assert_parse!(hex_u32(&b"C5A31be2;"[..]), Ok((&b";"[..], 3_315_801_058))); - assert_parse!(hex_u32(&b"00c5a31be2;"[..]), Ok((&b"e2;"[..], 12_952_347))); - assert_parse!( - hex_u32(&b"c5a31be201;"[..]), - Ok((&b"01;"[..], 3_315_801_058)) - ); - assert_parse!(hex_u32(&b"ffffffff;"[..]), Ok((&b";"[..], 4_294_967_295))); - assert_parse!(hex_u32(&b"0x1be2;"[..]), Ok((&b"x1be2;"[..], 0))); - assert_parse!(hex_u32(&b"12af"[..]), Err(Err::Incomplete(Needed::new(1)))); - } - - #[test] - #[cfg(feature = "std")] - fn float_test() { - let mut test_cases = vec![ - "+3.14", - "3.14", - "-3.14", - "0", - "0.0", - "1.", - ".789", - "-.5", - "1e7", - "-1E-7", - ".3e-2", - "1.e4", - "1.2e4", - "12.34", - "-1.234E-12", - "-1.234e-12", - "0.00000000000000000087", - ]; - - for test in test_cases.drain(..) { - let expected32 = str::parse::(test).unwrap(); - let expected64 = str::parse::(test).unwrap(); - - println!("now parsing: {} -> {}", test, expected32); - - let larger = format!("{};", test); - assert_parse!(recognize_float(&larger[..]), Ok((";", test))); - - assert_parse!(float(larger.as_bytes()), Ok((&b";"[..], expected32))); - assert_parse!(float(&larger[..]), Ok((";", expected32))); - - assert_parse!(double(larger.as_bytes()), Ok((&b";"[..], expected64))); - assert_parse!(double(&larger[..]), Ok((";", expected64))); - } - - let remaining_exponent = "-1.234E-"; - assert_parse!( - recognize_float(remaining_exponent), - Err(Err::Incomplete(Needed::new(1))) - ); - - let (_i, nan) = float::<_, ()>("NaN").unwrap(); - assert!(nan.is_nan()); - - let (_i, inf) = float::<_, ()>("inf").unwrap(); - assert!(inf.is_infinite()); - let (_i, inf) = float::<_, ()>("infinite").unwrap(); - assert!(inf.is_infinite()); - } - - #[test] - fn configurable_endianness() { - use crate::number::Endianness; - - fn be_tst16(i: &[u8]) -> IResult<&[u8], u16> { - u16(Endianness::Big)(i) - } - fn le_tst16(i: &[u8]) -> IResult<&[u8], u16> { - u16(Endianness::Little)(i) - } - assert_eq!(be_tst16(&[0x80, 0x00]), Ok((&b""[..], 32_768_u16))); - assert_eq!(le_tst16(&[0x80, 0x00]), Ok((&b""[..], 128_u16))); - - fn be_tst32(i: &[u8]) -> IResult<&[u8], u32> { - u32(Endianness::Big)(i) - } - fn le_tst32(i: &[u8]) -> IResult<&[u8], u32> { - u32(Endianness::Little)(i) - } - assert_eq!( - be_tst32(&[0x12, 0x00, 0x60, 0x00]), - Ok((&b""[..], 302_014_464_u32)) - ); - assert_eq!( - le_tst32(&[0x12, 0x00, 0x60, 0x00]), - Ok((&b""[..], 6_291_474_u32)) - ); - - fn be_tst64(i: &[u8]) -> IResult<&[u8], u64> { - u64(Endianness::Big)(i) - } - fn le_tst64(i: &[u8]) -> IResult<&[u8], u64> { - u64(Endianness::Little)(i) - } - assert_eq!( - be_tst64(&[0x12, 0x00, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), - Ok((&b""[..], 1_297_142_246_100_992_000_u64)) - ); - assert_eq!( - le_tst64(&[0x12, 0x00, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), - Ok((&b""[..], 36_028_874_334_666_770_u64)) - ); - - fn be_tsti16(i: &[u8]) -> IResult<&[u8], i16> { - i16(Endianness::Big)(i) - } - fn le_tsti16(i: &[u8]) -> IResult<&[u8], i16> { - i16(Endianness::Little)(i) - } - assert_eq!(be_tsti16(&[0x00, 0x80]), Ok((&b""[..], 128_i16))); - assert_eq!(le_tsti16(&[0x00, 0x80]), Ok((&b""[..], -32_768_i16))); - - fn be_tsti32(i: &[u8]) -> IResult<&[u8], i32> { - i32(Endianness::Big)(i) - } - fn le_tsti32(i: &[u8]) -> IResult<&[u8], i32> { - i32(Endianness::Little)(i) - } - assert_eq!( - be_tsti32(&[0x00, 0x12, 0x60, 0x00]), - Ok((&b""[..], 1_204_224_i32)) - ); - assert_eq!( - le_tsti32(&[0x00, 0x12, 0x60, 0x00]), - Ok((&b""[..], 6_296_064_i32)) - ); - - fn be_tsti64(i: &[u8]) -> IResult<&[u8], i64> { - i64(Endianness::Big)(i) - } - fn le_tsti64(i: &[u8]) -> IResult<&[u8], i64> { - i64(Endianness::Little)(i) - } - assert_eq!( - be_tsti64(&[0x00, 0xFF, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), - Ok((&b""[..], 71_881_672_479_506_432_i64)) - ); - assert_eq!( - le_tsti64(&[0x00, 0xFF, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), - Ok((&b""[..], 36_028_874_334_732_032_i64)) - ); - } - - #[cfg(feature = "std")] - fn parse_f64(i: &str) -> IResult<&str, f64, ()> { - use crate::traits::ParseTo; - match recognize_float_or_exceptions(i) { - Err(e) => Err(e), - Ok((i, s)) => { - if s.is_empty() { - return Err(Err::Error(())); - } - match s.parse_to() { - Some(n) => Ok((i, n)), - None => Err(Err::Error(())), - } - } - } - } - - proptest! { - #[test] - #[cfg(feature = "std")] - fn floats(s in "\\PC*") { - println!("testing {}", s); - let res1 = parse_f64(&s); - let res2 = double::<_, ()>(s.as_str()); - assert_eq!(res1, res2); - } - } -} diff --git a/vendor/nom/src/sequence/mod.rs b/vendor/nom/src/sequence/mod.rs deleted file mode 100644 index 735ab45cc73485..00000000000000 --- a/vendor/nom/src/sequence/mod.rs +++ /dev/null @@ -1,279 +0,0 @@ -//! Combinators applying parsers in sequence - -#[cfg(test)] -mod tests; - -use crate::error::ParseError; -use crate::internal::{IResult, Parser}; - -/// Gets an object from the first parser, -/// then gets another object from the second parser. -/// -/// # Arguments -/// * `first` The first parser to apply. -/// * `second` The second parser to apply. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::sequence::pair; -/// use nom::bytes::complete::tag; -/// -/// let mut parser = pair(tag("abc"), tag("efg")); -/// -/// assert_eq!(parser("abcefg"), Ok(("", ("abc", "efg")))); -/// assert_eq!(parser("abcefghij"), Ok(("hij", ("abc", "efg")))); -/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); -/// assert_eq!(parser("123"), Err(Err::Error(("123", ErrorKind::Tag)))); -/// ``` -pub fn pair, F, G>( - mut first: F, - mut second: G, -) -> impl FnMut(I) -> IResult -where - F: Parser, - G: Parser, -{ - move |input: I| { - let (input, o1) = first.parse(input)?; - second.parse(input).map(|(i, o2)| (i, (o1, o2))) - } -} - -/// Matches an object from the first parser and discards it, -/// then gets an object from the second parser. -/// -/// # Arguments -/// * `first` The opening parser. -/// * `second` The second parser to get object. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::sequence::preceded; -/// use nom::bytes::complete::tag; -/// -/// let mut parser = preceded(tag("abc"), tag("efg")); -/// -/// assert_eq!(parser("abcefg"), Ok(("", "efg"))); -/// assert_eq!(parser("abcefghij"), Ok(("hij", "efg"))); -/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); -/// assert_eq!(parser("123"), Err(Err::Error(("123", ErrorKind::Tag)))); -/// ``` -pub fn preceded, F, G>( - mut first: F, - mut second: G, -) -> impl FnMut(I) -> IResult -where - F: Parser, - G: Parser, -{ - move |input: I| { - let (input, _) = first.parse(input)?; - second.parse(input) - } -} - -/// Gets an object from the first parser, -/// then matches an object from the second parser and discards it. -/// -/// # Arguments -/// * `first` The first parser to apply. -/// * `second` The second parser to match an object. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::sequence::terminated; -/// use nom::bytes::complete::tag; -/// -/// let mut parser = terminated(tag("abc"), tag("efg")); -/// -/// assert_eq!(parser("abcefg"), Ok(("", "abc"))); -/// assert_eq!(parser("abcefghij"), Ok(("hij", "abc"))); -/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); -/// assert_eq!(parser("123"), Err(Err::Error(("123", ErrorKind::Tag)))); -/// ``` -pub fn terminated, F, G>( - mut first: F, - mut second: G, -) -> impl FnMut(I) -> IResult -where - F: Parser, - G: Parser, -{ - move |input: I| { - let (input, o1) = first.parse(input)?; - second.parse(input).map(|(i, _)| (i, o1)) - } -} - -/// Gets an object from the first parser, -/// then matches an object from the sep_parser and discards it, -/// then gets another object from the second parser. -/// -/// # Arguments -/// * `first` The first parser to apply. -/// * `sep` The separator parser to apply. -/// * `second` The second parser to apply. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::sequence::separated_pair; -/// use nom::bytes::complete::tag; -/// -/// let mut parser = separated_pair(tag("abc"), tag("|"), tag("efg")); -/// -/// assert_eq!(parser("abc|efg"), Ok(("", ("abc", "efg")))); -/// assert_eq!(parser("abc|efghij"), Ok(("hij", ("abc", "efg")))); -/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); -/// assert_eq!(parser("123"), Err(Err::Error(("123", ErrorKind::Tag)))); -/// ``` -pub fn separated_pair, F, G, H>( - mut first: F, - mut sep: G, - mut second: H, -) -> impl FnMut(I) -> IResult -where - F: Parser, - G: Parser, - H: Parser, -{ - move |input: I| { - let (input, o1) = first.parse(input)?; - let (input, _) = sep.parse(input)?; - second.parse(input).map(|(i, o2)| (i, (o1, o2))) - } -} - -/// Matches an object from the first parser and discards it, -/// then gets an object from the second parser, -/// and finally matches an object from the third parser and discards it. -/// -/// # Arguments -/// * `first` The first parser to apply and discard. -/// * `second` The second parser to apply. -/// * `third` The third parser to apply and discard. -/// -/// ```rust -/// # use nom::{Err, error::ErrorKind, Needed}; -/// # use nom::Needed::Size; -/// use nom::sequence::delimited; -/// use nom::bytes::complete::tag; -/// -/// let mut parser = delimited(tag("("), tag("abc"), tag(")")); -/// -/// assert_eq!(parser("(abc)"), Ok(("", "abc"))); -/// assert_eq!(parser("(abc)def"), Ok(("def", "abc"))); -/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); -/// assert_eq!(parser("123"), Err(Err::Error(("123", ErrorKind::Tag)))); -/// ``` -pub fn delimited, F, G, H>( - mut first: F, - mut second: G, - mut third: H, -) -> impl FnMut(I) -> IResult -where - F: Parser, - G: Parser, - H: Parser, -{ - move |input: I| { - let (input, _) = first.parse(input)?; - let (input, o2) = second.parse(input)?; - third.parse(input).map(|(i, _)| (i, o2)) - } -} - -/// Helper trait for the tuple combinator. -/// -/// This trait is implemented for tuples of parsers of up to 21 elements. -pub trait Tuple { - /// Parses the input and returns a tuple of results of each parser. - fn parse(&mut self, input: I) -> IResult; -} - -impl, F: Parser> - Tuple for (F,) -{ - fn parse(&mut self, input: Input) -> IResult { - self.0.parse(input).map(|(i, o)| (i, (o,))) - } -} - -macro_rules! tuple_trait( - ($name1:ident $ty1:ident, $name2: ident $ty2:ident, $($name:ident $ty:ident),*) => ( - tuple_trait!(__impl $name1 $ty1, $name2 $ty2; $($name $ty),*); - ); - (__impl $($name:ident $ty: ident),+; $name1:ident $ty1:ident, $($name2:ident $ty2:ident),*) => ( - tuple_trait_impl!($($name $ty),+); - tuple_trait!(__impl $($name $ty),+ , $name1 $ty1; $($name2 $ty2),*); - ); - (__impl $($name:ident $ty: ident),+; $name1:ident $ty1:ident) => ( - tuple_trait_impl!($($name $ty),+); - tuple_trait_impl!($($name $ty),+, $name1 $ty1); - ); -); - -macro_rules! tuple_trait_impl( - ($($name:ident $ty: ident),+) => ( - impl< - Input: Clone, $($ty),+ , Error: ParseError, - $($name: Parser),+ - > Tuple for ( $($name),+ ) { - - fn parse(&mut self, input: Input) -> IResult { - tuple_trait_inner!(0, self, input, (), $($name)+) - - } - } - ); -); - -macro_rules! tuple_trait_inner( - ($it:tt, $self:expr, $input:expr, (), $head:ident $($id:ident)+) => ({ - let (i, o) = $self.$it.parse($input.clone())?; - - succ!($it, tuple_trait_inner!($self, i, ( o ), $($id)+)) - }); - ($it:tt, $self:expr, $input:expr, ($($parsed:tt)*), $head:ident $($id:ident)+) => ({ - let (i, o) = $self.$it.parse($input.clone())?; - - succ!($it, tuple_trait_inner!($self, i, ($($parsed)* , o), $($id)+)) - }); - ($it:tt, $self:expr, $input:expr, ($($parsed:tt)*), $head:ident) => ({ - let (i, o) = $self.$it.parse($input.clone())?; - - Ok((i, ($($parsed)* , o))) - }); -); - -tuple_trait!(FnA A, FnB B, FnC C, FnD D, FnE E, FnF F, FnG G, FnH H, FnI I, FnJ J, FnK K, FnL L, - FnM M, FnN N, FnO O, FnP P, FnQ Q, FnR R, FnS S, FnT T, FnU U); - -// Special case: implement `Tuple` for `()`, the unit type. -// This can come up in macros which accept a variable number of arguments. -// Literally, `()` is an empty tuple, so it should simply parse nothing. -impl> Tuple for () { - fn parse(&mut self, input: I) -> IResult { - Ok((input, ())) - } -} - -///Applies a tuple of parsers one by one and returns their results as a tuple. -///There is a maximum of 21 parsers -/// ```rust -/// # use nom::{Err, error::ErrorKind}; -/// use nom::sequence::tuple; -/// use nom::character::complete::{alpha1, digit1}; -/// let mut parser = tuple((alpha1, digit1, alpha1)); -/// -/// assert_eq!(parser("abc123def"), Ok(("", ("abc", "123", "def")))); -/// assert_eq!(parser("123def"), Err(Err::Error(("123def", ErrorKind::Alpha)))); -/// ``` -pub fn tuple, List: Tuple>( - mut l: List, -) -> impl FnMut(I) -> IResult { - move |i: I| l.parse(i) -} diff --git a/vendor/nom/src/sequence/tests.rs b/vendor/nom/src/sequence/tests.rs deleted file mode 100644 index 30ad0d67833d81..00000000000000 --- a/vendor/nom/src/sequence/tests.rs +++ /dev/null @@ -1,290 +0,0 @@ -use super::*; -use crate::bytes::streaming::{tag, take}; -use crate::error::{Error, ErrorKind}; -use crate::internal::{Err, IResult, Needed}; -use crate::number::streaming::be_u16; - -#[test] -fn single_element_tuples() { - use crate::character::complete::alpha1; - use crate::{error::ErrorKind, Err}; - - let mut parser = tuple((alpha1,)); - assert_eq!(parser("abc123def"), Ok(("123def", ("abc",)))); - assert_eq!( - parser("123def"), - Err(Err::Error(("123def", ErrorKind::Alpha))) - ); -} - -#[derive(PartialEq, Eq, Debug)] -struct B { - a: u8, - b: u8, -} - -#[derive(PartialEq, Eq, Debug)] -struct C { - a: u8, - b: Option, -} - -/*FIXME: convert code examples to new error management -use util::{add_error_pattern, error_to_list, print_error}; - -#[cfg(feature = "std")] -#[rustfmt::skip] -fn error_to_string(e: &Context) -> &'static str { - let v: Vec<(P, ErrorKind)> = error_to_list(e); - // do it this way if you can use slice patterns - //match &v[..] { - // [ErrorKind::Custom(42), ErrorKind::Tag] => "missing `ijkl` tag", - // [ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag] => "missing `mnop` tag after `ijkl`", - // _ => "unrecognized error" - //} - - let collected: Vec> = v.iter().map(|&(_, ref e)| e.clone()).collect(); - if &collected[..] == [ErrorKind::Custom(42), ErrorKind::Tag] { - "missing `ijkl` tag" - } else if &collected[..] == [ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag] { - "missing `mnop` tag after `ijkl`" - } else { - "unrecognized error" - } -} - -// do it this way if you can use box patterns -//use $crate::lib::std::str; -//fn error_to_string(e:Err) -> String -// match e { -// NodePosition(ErrorKind::Custom(42), i1, box Position(ErrorKind::Tag, i2)) => { -// format!("missing `ijkl` tag, found '{}' instead", str::from_utf8(i2).unwrap()) -// }, -// NodePosition(ErrorKind::Custom(42), i1, box NodePosition(ErrorKind::Custom(128), i2, box Position(ErrorKind::Tag, i3))) => { -// format!("missing `mnop` tag after `ijkl`, found '{}' instead", str::from_utf8(i3).unwrap()) -// }, -// _ => "unrecognized error".to_string() -// } -//} -*/ - -#[test] -fn complete() { - use crate::bytes::complete::tag; - fn err_test(i: &[u8]) -> IResult<&[u8], &[u8]> { - let (i, _) = tag("ijkl")(i)?; - tag("mnop")(i) - } - let a = &b"ijklmn"[..]; - - let res_a = err_test(a); - assert_eq!( - res_a, - Err(Err::Error(error_position!(&b"mn"[..], ErrorKind::Tag))) - ); -} - -#[test] -fn pair_test() { - fn pair_abc_def(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> { - pair(tag("abc"), tag("def"))(i) - } - - assert_eq!( - pair_abc_def(&b"abcdefghijkl"[..]), - Ok((&b"ghijkl"[..], (&b"abc"[..], &b"def"[..]))) - ); - assert_eq!( - pair_abc_def(&b"ab"[..]), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!( - pair_abc_def(&b"abcd"[..]), - Err(Err::Incomplete(Needed::new(2))) - ); - assert_eq!( - pair_abc_def(&b"xxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) - ); - assert_eq!( - pair_abc_def(&b"xxxdef"[..]), - Err(Err::Error(error_position!(&b"xxxdef"[..], ErrorKind::Tag))) - ); - assert_eq!( - pair_abc_def(&b"abcxxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) - ); -} - -#[test] -fn separated_pair_test() { - fn sep_pair_abc_def(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> { - separated_pair(tag("abc"), tag(","), tag("def"))(i) - } - - assert_eq!( - sep_pair_abc_def(&b"abc,defghijkl"[..]), - Ok((&b"ghijkl"[..], (&b"abc"[..], &b"def"[..]))) - ); - assert_eq!( - sep_pair_abc_def(&b"ab"[..]), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!( - sep_pair_abc_def(&b"abc,d"[..]), - Err(Err::Incomplete(Needed::new(2))) - ); - assert_eq!( - sep_pair_abc_def(&b"xxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) - ); - assert_eq!( - sep_pair_abc_def(&b"xxx,def"[..]), - Err(Err::Error(error_position!(&b"xxx,def"[..], ErrorKind::Tag))) - ); - assert_eq!( - sep_pair_abc_def(&b"abc,xxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) - ); -} - -#[test] -fn preceded_test() { - fn preceded_abcd_efgh(i: &[u8]) -> IResult<&[u8], &[u8]> { - preceded(tag("abcd"), tag("efgh"))(i) - } - - assert_eq!( - preceded_abcd_efgh(&b"abcdefghijkl"[..]), - Ok((&b"ijkl"[..], &b"efgh"[..])) - ); - assert_eq!( - preceded_abcd_efgh(&b"ab"[..]), - Err(Err::Incomplete(Needed::new(2))) - ); - assert_eq!( - preceded_abcd_efgh(&b"abcde"[..]), - Err(Err::Incomplete(Needed::new(3))) - ); - assert_eq!( - preceded_abcd_efgh(&b"xxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) - ); - assert_eq!( - preceded_abcd_efgh(&b"xxxxdef"[..]), - Err(Err::Error(error_position!(&b"xxxxdef"[..], ErrorKind::Tag))) - ); - assert_eq!( - preceded_abcd_efgh(&b"abcdxxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) - ); -} - -#[test] -fn terminated_test() { - fn terminated_abcd_efgh(i: &[u8]) -> IResult<&[u8], &[u8]> { - terminated(tag("abcd"), tag("efgh"))(i) - } - - assert_eq!( - terminated_abcd_efgh(&b"abcdefghijkl"[..]), - Ok((&b"ijkl"[..], &b"abcd"[..])) - ); - assert_eq!( - terminated_abcd_efgh(&b"ab"[..]), - Err(Err::Incomplete(Needed::new(2))) - ); - assert_eq!( - terminated_abcd_efgh(&b"abcde"[..]), - Err(Err::Incomplete(Needed::new(3))) - ); - assert_eq!( - terminated_abcd_efgh(&b"xxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) - ); - assert_eq!( - terminated_abcd_efgh(&b"xxxxdef"[..]), - Err(Err::Error(error_position!(&b"xxxxdef"[..], ErrorKind::Tag))) - ); - assert_eq!( - terminated_abcd_efgh(&b"abcdxxxx"[..]), - Err(Err::Error(error_position!(&b"xxxx"[..], ErrorKind::Tag))) - ); -} - -#[test] -fn delimited_test() { - fn delimited_abc_def_ghi(i: &[u8]) -> IResult<&[u8], &[u8]> { - delimited(tag("abc"), tag("def"), tag("ghi"))(i) - } - - assert_eq!( - delimited_abc_def_ghi(&b"abcdefghijkl"[..]), - Ok((&b"jkl"[..], &b"def"[..])) - ); - assert_eq!( - delimited_abc_def_ghi(&b"ab"[..]), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!( - delimited_abc_def_ghi(&b"abcde"[..]), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!( - delimited_abc_def_ghi(&b"abcdefgh"[..]), - Err(Err::Incomplete(Needed::new(1))) - ); - assert_eq!( - delimited_abc_def_ghi(&b"xxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) - ); - assert_eq!( - delimited_abc_def_ghi(&b"xxxdefghi"[..]), - Err(Err::Error(error_position!( - &b"xxxdefghi"[..], - ErrorKind::Tag - ),)) - ); - assert_eq!( - delimited_abc_def_ghi(&b"abcxxxghi"[..]), - Err(Err::Error(error_position!(&b"xxxghi"[..], ErrorKind::Tag))) - ); - assert_eq!( - delimited_abc_def_ghi(&b"abcdefxxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) - ); -} - -#[test] -fn tuple_test() { - fn tuple_3(i: &[u8]) -> IResult<&[u8], (u16, &[u8], &[u8])> { - tuple((be_u16, take(3u8), tag("fg")))(i) - } - - assert_eq!( - tuple_3(&b"abcdefgh"[..]), - Ok((&b"h"[..], (0x6162u16, &b"cde"[..], &b"fg"[..]))) - ); - assert_eq!(tuple_3(&b"abcd"[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(tuple_3(&b"abcde"[..]), Err(Err::Incomplete(Needed::new(2)))); - assert_eq!( - tuple_3(&b"abcdejk"[..]), - Err(Err::Error(error_position!(&b"jk"[..], ErrorKind::Tag))) - ); -} - -#[test] -fn unit_type() { - assert_eq!( - tuple::<&'static str, (), Error<&'static str>, ()>(())("abxsbsh"), - Ok(("abxsbsh", ())) - ); - assert_eq!( - tuple::<&'static str, (), Error<&'static str>, ()>(())("sdfjakdsas"), - Ok(("sdfjakdsas", ())) - ); - assert_eq!( - tuple::<&'static str, (), Error<&'static str>, ()>(())(""), - Ok(("", ())) - ); -} diff --git a/vendor/nom/src/str.rs b/vendor/nom/src/str.rs deleted file mode 100644 index 1a8b8ba2d4952c..00000000000000 --- a/vendor/nom/src/str.rs +++ /dev/null @@ -1,536 +0,0 @@ -#[cfg(test)] -mod test { - #[cfg(feature = "alloc")] - use crate::{branch::alt, bytes::complete::tag_no_case, combinator::recognize, multi::many1}; - use crate::{ - bytes::complete::{is_a, is_not, tag, take, take_till, take_until}, - error::{self, ErrorKind}, - Err, IResult, - }; - - #[test] - fn tagtr_succeed() { - const INPUT: &str = "Hello World!"; - const TAG: &str = "Hello"; - fn test(input: &str) -> IResult<&str, &str> { - tag(TAG)(input) - } - - match test(INPUT) { - Ok((extra, output)) => { - assert!(extra == " World!", "Parser `tag` consumed leftover input."); - assert!( - output == TAG, - "Parser `tag` doesn't return the tag it matched on success. \ - Expected `{}`, got `{}`.", - TAG, - output - ); - } - other => panic!( - "Parser `tag` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - }; - } - - #[test] - fn tagtr_incomplete() { - use crate::bytes::streaming::tag; - - const INPUT: &str = "Hello"; - const TAG: &str = "Hello World!"; - - let res: IResult<_, _, error::Error<_>> = tag(TAG)(INPUT); - match res { - Err(Err::Incomplete(_)) => (), - other => { - panic!( - "Parser `tag` didn't require more input when it should have. \ - Got `{:?}`.", - other - ); - } - }; - } - - #[test] - fn tagtr_error() { - const INPUT: &str = "Hello World!"; - const TAG: &str = "Random"; // TAG must be closer than INPUT. - - let res: IResult<_, _, error::Error<_>> = tag(TAG)(INPUT); - match res { - Err(Err::Error(_)) => (), - other => { - panic!( - "Parser `tag` didn't fail when it should have. Got `{:?}`.`", - other - ); - } - }; - } - - #[test] - fn take_s_succeed() { - const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; - const CONSUMED: &str = "βèƒôřèÂßÇ"; - const LEFTOVER: &str = "áƒƭèř"; - - let res: IResult<_, _, error::Error<_>> = take(9_usize)(INPUT); - match res { - Ok((extra, output)) => { - assert!( - extra == LEFTOVER, - "Parser `take_s` consumed leftover input. Leftover `{}`.", - extra - ); - assert!( - output == CONSUMED, - "Parser `take_s` doesn't return the string it consumed on success. Expected `{}`, got `{}`.", - CONSUMED, - output - ); - } - other => panic!( - "Parser `take_s` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - }; - } - - #[test] - fn take_until_succeed() { - const INPUT: &str = "βèƒôřèÂßÇ∂áƒƭèř"; - const FIND: &str = "ÂßÇ∂"; - const CONSUMED: &str = "βèƒôřè"; - const LEFTOVER: &str = "ÂßÇ∂áƒƭèř"; - - let res: IResult<_, _, (_, ErrorKind)> = take_until(FIND)(INPUT); - match res { - Ok((extra, output)) => { - assert!( - extra == LEFTOVER, - "Parser `take_until`\ - consumed leftover input. Leftover `{}`.", - extra - ); - assert!( - output == CONSUMED, - "Parser `take_until`\ - doesn't return the string it consumed on success. Expected `{}`, got `{}`.", - CONSUMED, - output - ); - } - other => panic!( - "Parser `take_until` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - }; - } - - #[test] - fn take_s_incomplete() { - use crate::bytes::streaming::take; - - const INPUT: &str = "βèƒôřèÂßÇá"; - - let res: IResult<_, _, (_, ErrorKind)> = take(13_usize)(INPUT); - match res { - Err(Err::Incomplete(_)) => (), - other => panic!( - "Parser `take` didn't require more input when it should have. \ - Got `{:?}`.", - other - ), - } - } - - use crate::internal::Needed; - - fn is_alphabetic(c: char) -> bool { - (c as u8 >= 0x41 && c as u8 <= 0x5A) || (c as u8 >= 0x61 && c as u8 <= 0x7A) - } - - #[test] - fn take_while() { - use crate::bytes::streaming::take_while; - - fn f(i: &str) -> IResult<&str, &str> { - take_while(is_alphabetic)(i) - } - let a = ""; - let b = "abcd"; - let c = "abcd123"; - let d = "123"; - - assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(f(&b[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(f(&c[..]), Ok((&d[..], &b[..]))); - assert_eq!(f(&d[..]), Ok((&d[..], &a[..]))); - } - - #[test] - fn take_while1() { - use crate::bytes::streaming::take_while1; - - fn f(i: &str) -> IResult<&str, &str> { - take_while1(is_alphabetic)(i) - } - let a = ""; - let b = "abcd"; - let c = "abcd123"; - let d = "123"; - - assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(f(&b[..]), Err(Err::Incomplete(Needed::new(1)))); - assert_eq!(f(&c[..]), Ok((&"123"[..], &b[..]))); - assert_eq!( - f(&d[..]), - Err(Err::Error(error_position!(&d[..], ErrorKind::TakeWhile1))) - ); - } - - #[test] - fn take_till_s_succeed() { - const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; - const CONSUMED: &str = "βèƒôřèÂßÇ"; - const LEFTOVER: &str = "áƒƭèř"; - fn till_s(c: char) -> bool { - c == 'á' - } - fn test(input: &str) -> IResult<&str, &str> { - take_till(till_s)(input) - } - match test(INPUT) { - Ok((extra, output)) => { - assert!( - extra == LEFTOVER, - "Parser `take_till` consumed leftover input." - ); - assert!( - output == CONSUMED, - "Parser `take_till` doesn't return the string it consumed on success. \ - Expected `{}`, got `{}`.", - CONSUMED, - output - ); - } - other => panic!( - "Parser `take_till` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - }; - } - - #[test] - fn take_while_succeed_none() { - use crate::bytes::complete::take_while; - - const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; - const CONSUMED: &str = ""; - const LEFTOVER: &str = "βèƒôřèÂßÇáƒƭèř"; - fn while_s(c: char) -> bool { - c == '9' - } - fn test(input: &str) -> IResult<&str, &str> { - take_while(while_s)(input) - } - match test(INPUT) { - Ok((extra, output)) => { - assert!( - extra == LEFTOVER, - "Parser `take_while` consumed leftover input." - ); - assert!( - output == CONSUMED, - "Parser `take_while` doesn't return the string it consumed on success. \ - Expected `{}`, got `{}`.", - CONSUMED, - output - ); - } - other => panic!( - "Parser `take_while` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - }; - } - - #[test] - fn is_not_succeed() { - const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; - const AVOID: &str = "£úçƙ¥á"; - const CONSUMED: &str = "βèƒôřèÂßÇ"; - const LEFTOVER: &str = "áƒƭèř"; - fn test(input: &str) -> IResult<&str, &str> { - is_not(AVOID)(input) - } - match test(INPUT) { - Ok((extra, output)) => { - assert!( - extra == LEFTOVER, - "Parser `is_not` consumed leftover input. Leftover `{}`.", - extra - ); - assert!( - output == CONSUMED, - "Parser `is_not` doesn't return the string it consumed on success. Expected `{}`, got `{}`.", - CONSUMED, - output - ); - } - other => panic!( - "Parser `is_not` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - }; - } - - #[test] - fn take_while_succeed_some() { - use crate::bytes::complete::take_while; - - const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; - const CONSUMED: &str = "βèƒôřèÂßÇ"; - const LEFTOVER: &str = "áƒƭèř"; - fn while_s(c: char) -> bool { - c == 'β' - || c == 'è' - || c == 'ƒ' - || c == 'ô' - || c == 'ř' - || c == 'è' - || c == 'Â' - || c == 'ß' - || c == 'Ç' - } - fn test(input: &str) -> IResult<&str, &str> { - take_while(while_s)(input) - } - match test(INPUT) { - Ok((extra, output)) => { - assert!( - extra == LEFTOVER, - "Parser `take_while` consumed leftover input." - ); - assert!( - output == CONSUMED, - "Parser `take_while` doesn't return the string it consumed on success. \ - Expected `{}`, got `{}`.", - CONSUMED, - output - ); - } - other => panic!( - "Parser `take_while` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - }; - } - - #[test] - fn is_not_fail() { - const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; - const AVOID: &str = "βúçƙ¥"; - fn test(input: &str) -> IResult<&str, &str> { - is_not(AVOID)(input) - } - match test(INPUT) { - Err(Err::Error(_)) => (), - other => panic!( - "Parser `is_not` didn't fail when it should have. Got `{:?}`.", - other - ), - }; - } - - #[test] - fn take_while1_succeed() { - use crate::bytes::complete::take_while1; - - const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; - const CONSUMED: &str = "βèƒôřèÂßÇ"; - const LEFTOVER: &str = "áƒƭèř"; - fn while1_s(c: char) -> bool { - c == 'β' - || c == 'è' - || c == 'ƒ' - || c == 'ô' - || c == 'ř' - || c == 'è' - || c == 'Â' - || c == 'ß' - || c == 'Ç' - } - fn test(input: &str) -> IResult<&str, &str> { - take_while1(while1_s)(input) - } - match test(INPUT) { - Ok((extra, output)) => { - assert!( - extra == LEFTOVER, - "Parser `take_while1` consumed leftover input." - ); - assert!( - output == CONSUMED, - "Parser `take_while1` doesn't return the string it consumed on success. \ - Expected `{}`, got `{}`.", - CONSUMED, - output - ); - } - other => panic!( - "Parser `take_while1` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - }; - } - - #[test] - fn take_until_incomplete() { - use crate::bytes::streaming::take_until; - - const INPUT: &str = "βèƒôřè"; - const FIND: &str = "βèƒôřèÂßÇ"; - - let res: IResult<_, _, (_, ErrorKind)> = take_until(FIND)(INPUT); - match res { - Err(Err::Incomplete(_)) => (), - other => panic!( - "Parser `take_until` didn't require more input when it should have. \ - Got `{:?}`.", - other - ), - }; - } - - #[test] - fn is_a_succeed() { - const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; - const MATCH: &str = "βèƒôřèÂßÇ"; - const CONSUMED: &str = "βèƒôřèÂßÇ"; - const LEFTOVER: &str = "áƒƭèř"; - fn test(input: &str) -> IResult<&str, &str> { - is_a(MATCH)(input) - } - match test(INPUT) { - Ok((extra, output)) => { - assert!( - extra == LEFTOVER, - "Parser `is_a` consumed leftover input. Leftover `{}`.", - extra - ); - assert!( - output == CONSUMED, - "Parser `is_a` doesn't return the string it consumed on success. Expected `{}`, got `{}`.", - CONSUMED, - output - ); - } - other => panic!( - "Parser `is_a` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - }; - } - - #[test] - fn take_while1_fail() { - use crate::bytes::complete::take_while1; - - const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; - fn while1_s(c: char) -> bool { - c == '9' - } - fn test(input: &str) -> IResult<&str, &str> { - take_while1(while1_s)(input) - } - match test(INPUT) { - Err(Err::Error(_)) => (), - other => panic!( - "Parser `take_while1` didn't fail when it should have. \ - Got `{:?}`.", - other - ), - }; - } - - #[test] - fn is_a_fail() { - const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; - const MATCH: &str = "Ûñℓúçƙ¥"; - fn test(input: &str) -> IResult<&str, &str> { - is_a(MATCH)(input) - } - match test(INPUT) { - Err(Err::Error(_)) => (), - other => panic!( - "Parser `is_a` didn't fail when it should have. Got `{:?}`.", - other - ), - }; - } - - #[test] - fn take_until_error() { - use crate::bytes::streaming::take_until; - - const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; - const FIND: &str = "Ráñδô₥"; - - let res: IResult<_, _, (_, ErrorKind)> = take_until(FIND)(INPUT); - match res { - Err(Err::Incomplete(_)) => (), - other => panic!( - "Parser `take_until` didn't fail when it should have. \ - Got `{:?}`.", - other - ), - }; - } - - #[test] - #[cfg(feature = "alloc")] - fn recognize_is_a() { - let a = "aabbab"; - let b = "ababcd"; - - fn f(i: &str) -> IResult<&str, &str> { - recognize(many1(alt((tag("a"), tag("b")))))(i) - } - - assert_eq!(f(&a[..]), Ok((&a[6..], &a[..]))); - assert_eq!(f(&b[..]), Ok((&b[4..], &b[..4]))); - } - - #[test] - fn utf8_indexing() { - fn dot(i: &str) -> IResult<&str, &str> { - tag(".")(i) - } - - let _ = dot("點"); - } - - #[cfg(feature = "alloc")] - #[test] - fn case_insensitive() { - fn test(i: &str) -> IResult<&str, &str> { - tag_no_case("ABcd")(i) - } - assert_eq!(test("aBCdefgh"), Ok(("efgh", "aBCd"))); - assert_eq!(test("abcdefgh"), Ok(("efgh", "abcd"))); - assert_eq!(test("ABCDefgh"), Ok(("efgh", "ABCD"))); - } -} diff --git a/vendor/nom/src/traits.rs b/vendor/nom/src/traits.rs deleted file mode 100644 index 394e5bc3a59e04..00000000000000 --- a/vendor/nom/src/traits.rs +++ /dev/null @@ -1,1441 +0,0 @@ -//! Traits input types have to implement to work with nom combinators -use crate::error::{ErrorKind, ParseError}; -use crate::internal::{Err, IResult, Needed}; -use crate::lib::std::iter::{Copied, Enumerate}; -use crate::lib::std::ops::{Range, RangeFrom, RangeFull, RangeTo}; -use crate::lib::std::slice::Iter; -use crate::lib::std::str::from_utf8; -use crate::lib::std::str::CharIndices; -use crate::lib::std::str::Chars; -use crate::lib::std::str::FromStr; - -#[cfg(feature = "alloc")] -use crate::lib::std::string::String; -#[cfg(feature = "alloc")] -use crate::lib::std::vec::Vec; - -/// Abstract method to calculate the input length -pub trait InputLength { - /// Calculates the input length, as indicated by its name, - /// and the name of the trait itself - fn input_len(&self) -> usize; -} - -impl<'a, T> InputLength for &'a [T] { - #[inline] - fn input_len(&self) -> usize { - self.len() - } -} - -impl<'a> InputLength for &'a str { - #[inline] - fn input_len(&self) -> usize { - self.len() - } -} - -impl<'a> InputLength for (&'a [u8], usize) { - #[inline] - fn input_len(&self) -> usize { - //println!("bit input length for ({:?}, {}):", self.0, self.1); - //println!("-> {}", self.0.len() * 8 - self.1); - self.0.len() * 8 - self.1 - } -} - -/// Useful functions to calculate the offset between slices and show a hexdump of a slice -pub trait Offset { - /// Offset between the first byte of self and the first byte of the argument - fn offset(&self, second: &Self) -> usize; -} - -impl Offset for [u8] { - fn offset(&self, second: &Self) -> usize { - let fst = self.as_ptr(); - let snd = second.as_ptr(); - - snd as usize - fst as usize - } -} - -impl<'a> Offset for &'a [u8] { - fn offset(&self, second: &Self) -> usize { - let fst = self.as_ptr(); - let snd = second.as_ptr(); - - snd as usize - fst as usize - } -} - -impl Offset for str { - fn offset(&self, second: &Self) -> usize { - let fst = self.as_ptr(); - let snd = second.as_ptr(); - - snd as usize - fst as usize - } -} - -impl<'a> Offset for &'a str { - fn offset(&self, second: &Self) -> usize { - let fst = self.as_ptr(); - let snd = second.as_ptr(); - - snd as usize - fst as usize - } -} - -/// Helper trait for types that can be viewed as a byte slice -pub trait AsBytes { - /// Casts the input type to a byte slice - fn as_bytes(&self) -> &[u8]; -} - -impl<'a> AsBytes for &'a str { - #[inline(always)] - fn as_bytes(&self) -> &[u8] { - (*self).as_bytes() - } -} - -impl AsBytes for str { - #[inline(always)] - fn as_bytes(&self) -> &[u8] { - self.as_ref() - } -} - -impl<'a> AsBytes for &'a [u8] { - #[inline(always)] - fn as_bytes(&self) -> &[u8] { - *self - } -} - -impl AsBytes for [u8] { - #[inline(always)] - fn as_bytes(&self) -> &[u8] { - self - } -} - -macro_rules! as_bytes_array_impls { - ($($N:expr)+) => { - $( - impl<'a> AsBytes for &'a [u8; $N] { - #[inline(always)] - fn as_bytes(&self) -> &[u8] { - *self - } - } - - impl AsBytes for [u8; $N] { - #[inline(always)] - fn as_bytes(&self) -> &[u8] { - self - } - } - )+ - }; -} - -as_bytes_array_impls! { - 0 1 2 3 4 5 6 7 8 9 - 10 11 12 13 14 15 16 17 18 19 - 20 21 22 23 24 25 26 27 28 29 - 30 31 32 -} - -/// Transforms common types to a char for basic token parsing -pub trait AsChar { - /// makes a char from self - fn as_char(self) -> char; - - /// Tests that self is an alphabetic character - /// - /// Warning: for `&str` it recognizes alphabetic - /// characters outside of the 52 ASCII letters - fn is_alpha(self) -> bool; - - /// Tests that self is an alphabetic character - /// or a decimal digit - fn is_alphanum(self) -> bool; - /// Tests that self is a decimal digit - fn is_dec_digit(self) -> bool; - /// Tests that self is an hex digit - fn is_hex_digit(self) -> bool; - /// Tests that self is an octal digit - fn is_oct_digit(self) -> bool; - /// Gets the len in bytes for self - fn len(self) -> usize; -} - -impl AsChar for u8 { - #[inline] - fn as_char(self) -> char { - self as char - } - #[inline] - fn is_alpha(self) -> bool { - (self >= 0x41 && self <= 0x5A) || (self >= 0x61 && self <= 0x7A) - } - #[inline] - fn is_alphanum(self) -> bool { - self.is_alpha() || self.is_dec_digit() - } - #[inline] - fn is_dec_digit(self) -> bool { - self >= 0x30 && self <= 0x39 - } - #[inline] - fn is_hex_digit(self) -> bool { - (self >= 0x30 && self <= 0x39) - || (self >= 0x41 && self <= 0x46) - || (self >= 0x61 && self <= 0x66) - } - #[inline] - fn is_oct_digit(self) -> bool { - self >= 0x30 && self <= 0x37 - } - #[inline] - fn len(self) -> usize { - 1 - } -} -impl<'a> AsChar for &'a u8 { - #[inline] - fn as_char(self) -> char { - *self as char - } - #[inline] - fn is_alpha(self) -> bool { - (*self >= 0x41 && *self <= 0x5A) || (*self >= 0x61 && *self <= 0x7A) - } - #[inline] - fn is_alphanum(self) -> bool { - self.is_alpha() || self.is_dec_digit() - } - #[inline] - fn is_dec_digit(self) -> bool { - *self >= 0x30 && *self <= 0x39 - } - #[inline] - fn is_hex_digit(self) -> bool { - (*self >= 0x30 && *self <= 0x39) - || (*self >= 0x41 && *self <= 0x46) - || (*self >= 0x61 && *self <= 0x66) - } - #[inline] - fn is_oct_digit(self) -> bool { - *self >= 0x30 && *self <= 0x37 - } - #[inline] - fn len(self) -> usize { - 1 - } -} - -impl AsChar for char { - #[inline] - fn as_char(self) -> char { - self - } - #[inline] - fn is_alpha(self) -> bool { - self.is_ascii_alphabetic() - } - #[inline] - fn is_alphanum(self) -> bool { - self.is_alpha() || self.is_dec_digit() - } - #[inline] - fn is_dec_digit(self) -> bool { - self.is_ascii_digit() - } - #[inline] - fn is_hex_digit(self) -> bool { - self.is_ascii_hexdigit() - } - #[inline] - fn is_oct_digit(self) -> bool { - self.is_digit(8) - } - #[inline] - fn len(self) -> usize { - self.len_utf8() - } -} - -impl<'a> AsChar for &'a char { - #[inline] - fn as_char(self) -> char { - *self - } - #[inline] - fn is_alpha(self) -> bool { - self.is_ascii_alphabetic() - } - #[inline] - fn is_alphanum(self) -> bool { - self.is_alpha() || self.is_dec_digit() - } - #[inline] - fn is_dec_digit(self) -> bool { - self.is_ascii_digit() - } - #[inline] - fn is_hex_digit(self) -> bool { - self.is_ascii_hexdigit() - } - #[inline] - fn is_oct_digit(self) -> bool { - self.is_digit(8) - } - #[inline] - fn len(self) -> usize { - self.len_utf8() - } -} - -/// Abstracts common iteration operations on the input type -pub trait InputIter { - /// The current input type is a sequence of that `Item` type. - /// - /// Example: `u8` for `&[u8]` or `char` for `&str` - type Item; - /// An iterator over the input type, producing the item and its position - /// for use with [Slice]. If we're iterating over `&str`, the position - /// corresponds to the byte index of the character - type Iter: Iterator; - - /// An iterator over the input type, producing the item - type IterElem: Iterator; - - /// Returns an iterator over the elements and their byte offsets - fn iter_indices(&self) -> Self::Iter; - /// Returns an iterator over the elements - fn iter_elements(&self) -> Self::IterElem; - /// Finds the byte position of the element - fn position

( - path: P - ) -> Result - where P: AsRef<::std::ffi::OsStr> { - let library = #library_new?; - #from_library - } - - pub unsafe fn from_library( - library: L - ) -> Result - where L: Into<::libloading::Library> { - let __library = library.into(); - #( #constructor_inits )* - Ok(#lib_ident { - __library, - #( #init_fields ),* - }) - } - - #( #struct_implementation )* - } - } - } - - #[allow(clippy::too_many_arguments)] - pub(crate) fn push_func( - &mut self, - ident: &Ident, - symbol: &str, - abi: ClangAbi, - is_variadic: bool, - is_required: bool, - args: &[TokenStream], - args_identifiers: &[TokenStream], - ret: &TokenStream, - ret_ty: &TokenStream, - attributes: &[TokenStream], - ctx: &BindgenContext, - ) { - if !is_variadic { - assert_eq!(args.len(), args_identifiers.len()); - } - - let signature = quote! { unsafe extern #abi fn ( #( #args),* ) #ret }; - let member = if is_required { - signature - } else { - quote! { Result<#signature, ::libloading::Error> } - }; - - self.struct_members.push(quote! { - pub #ident: #member, - }); - - // N.B: If the signature was required, it won't be wrapped in a Result<...> - // and we can simply call it directly. - let fn_ = if is_required { - quote! { self.#ident } - } else { - quote! { self.#ident.as_ref().expect("Expected function, got error.") } - }; - let call_body = if ctx.options().wrap_unsafe_ops { - quote!(unsafe { (#fn_)(#( #args_identifiers ),*) }) - } else { - quote!((#fn_)(#( #args_identifiers ),*) ) - }; - - // We can't implement variadic functions from C easily, so we allow to - // access the function pointer so that the user can call it just fine. - if !is_variadic { - self.struct_implementation.push(quote! { - #(#attributes)* - pub unsafe fn #ident ( &self, #( #args ),* ) #ret_ty { - #call_body - } - }); - } - - // N.B: Unwrap the signature upon construction if it is required to be resolved. - let symbol_cstr = - codegen::helpers::ast_ty::cstr_expr(symbol.to_string()); - let library_get = if ctx.options().wrap_unsafe_ops { - quote!(unsafe { __library.get(#symbol_cstr) }) - } else { - quote!(__library.get(#symbol_cstr)) - }; - - self.constructor_inits.push(if is_required { - quote! { - let #ident = #library_get.map(|sym| *sym)?; - } - } else { - quote! { - let #ident = #library_get.map(|sym| *sym); - } - }); - - self.init_fields.push(quote! { - #ident - }); - } - - pub fn push_var( - &mut self, - ident: &Ident, - symbol: &str, - ty: &TokenStream, - is_required: bool, - wrap_unsafe_ops: bool, - ) { - let member = if is_required { - quote! { *mut #ty } - } else { - quote! { Result<*mut #ty, ::libloading::Error> } - }; - - self.struct_members.push(quote! { - pub #ident: #member, - }); - - let deref = if is_required { - quote! { self.#ident } - } else { - quote! { *self.#ident.as_ref().expect("Expected variable, got error.") } - }; - self.struct_implementation.push(quote! { - pub unsafe fn #ident (&self) -> *mut #ty { - #deref - } - }); - - let symbol_cstr = - codegen::helpers::ast_ty::cstr_expr(symbol.to_string()); - - let library_get = if wrap_unsafe_ops { - quote!(unsafe { __library.get::<*mut #ty>(#symbol_cstr) }) - } else { - quote!(__library.get::<*mut #ty>(#symbol_cstr)) - }; - - let qmark = if is_required { quote!(?) } else { quote!() }; - - let var_get = quote! { - let #ident = #library_get.map(|sym| *sym)#qmark; - }; - - self.constructor_inits.push(var_get); - - self.init_fields.push(quote! { - #ident - }); - } -} diff --git a/vendor/bindgen/codegen/error.rs b/vendor/bindgen/codegen/error.rs deleted file mode 100644 index b82ba2aef1c5e9..00000000000000 --- a/vendor/bindgen/codegen/error.rs +++ /dev/null @@ -1,52 +0,0 @@ -use std::error; -use std::fmt; - -/// Errors that can occur during code generation. -#[derive(Clone, Debug, PartialEq, Eq)] -pub(crate) enum Error { - /// Tried to generate an opaque blob for a type that did not have a layout. - NoLayoutForOpaqueBlob, - - /// Tried to instantiate an opaque template definition, or a template - /// definition that is too difficult for us to understand (like a partial - /// template specialization). - InstantiationOfOpaqueType, - - /// Function ABI is not supported. - UnsupportedAbi(&'static str), - - /// The pointer type size does not match the target's pointer size. - InvalidPointerSize { - ty_name: String, - ty_size: usize, - ptr_size: usize, - }, -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Error::NoLayoutForOpaqueBlob => { - "Tried to generate an opaque blob, but had no layout.".fmt(f) - } - Error::InstantiationOfOpaqueType => { - "Instantiation of opaque template type or partial template specialization." - .fmt(f) - } - Error::UnsupportedAbi(abi) => { - write!( - f, - "{abi} ABI is not supported by the configured Rust target." - ) - } - Error::InvalidPointerSize { ty_name, ty_size, ptr_size } => { - write!(f, "The {ty_name} pointer type has size {ty_size} but the current target's pointer size is {ptr_size}.") - } - } - } -} - -impl error::Error for Error {} - -/// A `Result` of `T` or an error of `bindgen::codegen::error::Error`. -pub(crate) type Result = ::std::result::Result; diff --git a/vendor/bindgen/codegen/helpers.rs b/vendor/bindgen/codegen/helpers.rs deleted file mode 100644 index 82172f34884137..00000000000000 --- a/vendor/bindgen/codegen/helpers.rs +++ /dev/null @@ -1,395 +0,0 @@ -//! Helpers for code generation that don't need macro expansion. - -use proc_macro2::{Ident, Span}; - -use crate::ir::context::BindgenContext; -use crate::ir::layout::Layout; - -pub(crate) mod attributes { - use proc_macro2::{Ident, Span, TokenStream}; - use std::{borrow::Cow, str::FromStr}; - - pub(crate) fn repr(which: &str) -> TokenStream { - let which = Ident::new(which, Span::call_site()); - quote! { - #[repr( #which )] - } - } - - pub(crate) fn repr_list(which_ones: &[&str]) -> TokenStream { - let which_ones = which_ones - .iter() - .map(|one| TokenStream::from_str(one).expect("repr to be valid")); - quote! { - #[repr( #( #which_ones ),* )] - } - } - - pub(crate) fn derives(which_ones: &[&str]) -> TokenStream { - let which_ones = which_ones - .iter() - .map(|one| TokenStream::from_str(one).expect("derive to be valid")); - quote! { - #[derive( #( #which_ones ),* )] - } - } - - pub(crate) fn inline() -> TokenStream { - quote! { - #[inline] - } - } - - pub(crate) fn must_use() -> TokenStream { - quote! { - #[must_use] - } - } - - pub(crate) fn non_exhaustive() -> TokenStream { - quote! { - #[non_exhaustive] - } - } - - pub(crate) fn doc(comment: &str) -> TokenStream { - if comment.is_empty() { - quote!() - } else { - quote!(#[doc = #comment]) - } - } - - pub(crate) fn link_name(name: &str) -> TokenStream { - // LLVM mangles the name by default but it's already mangled. - // Prefixing the name with \u{1} should tell LLVM to not mangle it. - let name: Cow<'_, str> = if MANGLE { - name.into() - } else { - format!("\u{1}{name}").into() - }; - - quote! { - #[link_name = #name] - } - } -} - -/// The `ffi_safe` argument should be true if this is a type that the user might -/// reasonably use, e.g. not struct padding, where the `__BindgenOpaqueArray` is -/// just noise. -/// TODO: Should this be `MaybeUninit`, since padding bytes are effectively -/// uninitialized? -pub(crate) fn blob( - ctx: &BindgenContext, - layout: Layout, - ffi_safe: bool, -) -> syn::Type { - let opaque = layout.opaque(); - - // FIXME(emilio, #412): We fall back to byte alignment, but there are - // some things that legitimately are more than 8-byte aligned. - // - // Eventually we should be able to `unwrap` here, but... - let ty = opaque.known_rust_type_for_array().unwrap_or_else(|| { - warn!("Found unknown alignment on code generation!"); - syn::parse_quote! { u8 } - }); - - let data_len = opaque.array_size().unwrap_or(layout.size); - - if data_len == 1 { - ty - } else if ffi_safe && ctx.options().rust_features().min_const_generics { - ctx.generated_opaque_array(); - if ctx.options().enable_cxx_namespaces { - syn::parse_quote! { root::__BindgenOpaqueArray<#ty, #data_len> } - } else { - syn::parse_quote! { __BindgenOpaqueArray<#ty, #data_len> } - } - } else { - // This is not FFI safe as an argument; the struct above is - // preferable. - syn::parse_quote! { [ #ty ; #data_len ] } - } -} - -/// Integer type of the same size as the given `Layout`. -pub(crate) fn integer_type(layout: Layout) -> Option { - Layout::known_type_for_size(layout.size) -} - -pub(crate) const BITFIELD_UNIT: &str = "__BindgenBitfieldUnit"; - -/// Generates a bitfield allocation unit type for a type with the given `Layout`. -pub(crate) fn bitfield_unit(ctx: &BindgenContext, layout: Layout) -> syn::Type { - let size = layout.size; - let bitfield_unit_name = Ident::new(BITFIELD_UNIT, Span::call_site()); - let ty = syn::parse_quote! { #bitfield_unit_name<[u8; #size]> }; - - if ctx.options().enable_cxx_namespaces { - return syn::parse_quote! { root::#ty }; - } - - ty -} - -pub(crate) mod ast_ty { - use crate::ir::context::BindgenContext; - use crate::ir::function::FunctionSig; - use crate::ir::layout::Layout; - use crate::ir::ty::{FloatKind, IntKind}; - use crate::RustTarget; - use proc_macro2::TokenStream; - use std::str::FromStr; - - pub(crate) fn c_void(ctx: &BindgenContext) -> syn::Type { - // ctypes_prefix takes precedence - match ctx.options().ctypes_prefix { - Some(ref prefix) => { - let prefix = TokenStream::from_str(prefix.as_str()).unwrap(); - syn::parse_quote! { #prefix::c_void } - } - None => { - if ctx.options().use_core { - syn::parse_quote! { ::core::ffi::c_void } - } else { - syn::parse_quote! { ::std::os::raw::c_void } - } - } - } - } - - pub(crate) fn raw_type(ctx: &BindgenContext, name: &str) -> syn::Type { - let ident = ctx.rust_ident_raw(name); - match ctx.options().ctypes_prefix { - Some(ref prefix) => { - let prefix = TokenStream::from_str(prefix.as_str()).unwrap(); - syn::parse_quote! { #prefix::#ident } - } - None => { - if ctx.options().use_core && - ctx.options().rust_features().core_ffi_c - { - syn::parse_quote! { ::core::ffi::#ident } - } else { - syn::parse_quote! { ::std::os::raw::#ident } - } - } - } - } - - pub(crate) fn int_kind_rust_type( - ctx: &BindgenContext, - ik: IntKind, - layout: Option, - ) -> syn::Type { - match ik { - IntKind::Bool => syn::parse_quote! { bool }, - IntKind::Char { .. } => raw_type(ctx, "c_char"), - // The following is used only when an unusual command-line - // argument is used. bindgen_cchar16_t is not a real type; - // but this allows downstream postprocessors to distinguish - // this case and do something special for C++ bindings - // containing the C++ type char16_t. - IntKind::Char16 => syn::parse_quote! { bindgen_cchar16_t }, - IntKind::SChar => raw_type(ctx, "c_schar"), - IntKind::UChar => raw_type(ctx, "c_uchar"), - IntKind::Short => raw_type(ctx, "c_short"), - IntKind::UShort => raw_type(ctx, "c_ushort"), - IntKind::Int => raw_type(ctx, "c_int"), - IntKind::UInt => raw_type(ctx, "c_uint"), - IntKind::Long => raw_type(ctx, "c_long"), - IntKind::ULong => raw_type(ctx, "c_ulong"), - IntKind::LongLong => raw_type(ctx, "c_longlong"), - IntKind::ULongLong => raw_type(ctx, "c_ulonglong"), - IntKind::WChar => { - let layout = - layout.expect("Couldn't compute wchar_t's layout?"); - Layout::known_type_for_size(layout.size) - .expect("Non-representable wchar_t?") - } - - IntKind::I8 => syn::parse_quote! { i8 }, - IntKind::U8 => syn::parse_quote! { u8 }, - IntKind::I16 => syn::parse_quote! { i16 }, - IntKind::U16 => syn::parse_quote! { u16 }, - IntKind::I32 => syn::parse_quote! { i32 }, - IntKind::U32 => syn::parse_quote! { u32 }, - IntKind::I64 => syn::parse_quote! { i64 }, - IntKind::U64 => syn::parse_quote! { u64 }, - IntKind::Custom { name, .. } => { - syn::parse_str(name).expect("Invalid integer type.") - } - IntKind::U128 => { - if true { - syn::parse_quote! { u128 } - } else { - // Best effort thing, but wrong alignment - // unfortunately. - syn::parse_quote! { [u64; 2] } - } - } - IntKind::I128 => { - if true { - syn::parse_quote! { i128 } - } else { - syn::parse_quote! { [u64; 2] } - } - } - } - } - - pub(crate) fn float_kind_rust_type( - ctx: &BindgenContext, - fk: FloatKind, - layout: Option, - ) -> syn::Type { - // TODO: we probably should take the type layout into account more - // often? - // - // Also, maybe this one shouldn't be the default? - match (fk, ctx.options().convert_floats) { - (FloatKind::Float16, _) => { - // TODO: do f16 when rust lands it - ctx.generated_bindgen_float16(); - if ctx.options().enable_cxx_namespaces { - syn::parse_quote! { root::__BindgenFloat16 } - } else { - syn::parse_quote! { __BindgenFloat16 } - } - } - (FloatKind::Float, true) => syn::parse_quote! { f32 }, - (FloatKind::Double, true) => syn::parse_quote! { f64 }, - (FloatKind::Float, false) => raw_type(ctx, "c_float"), - (FloatKind::Double, false) => raw_type(ctx, "c_double"), - (FloatKind::LongDouble, _) => { - if let Some(layout) = layout { - match layout.size { - 4 => syn::parse_quote! { f32 }, - 8 => syn::parse_quote! { f64 }, - // TODO(emilio): If rust ever gains f128 we should - // use it here and below. - _ => super::integer_type(layout) - .unwrap_or(syn::parse_quote! { f64 }), - } - } else { - debug_assert!( - false, - "How didn't we know the layout for a primitive type?" - ); - syn::parse_quote! { f64 } - } - } - (FloatKind::Float128, _) => { - if true { - syn::parse_quote! { u128 } - } else { - syn::parse_quote! { [u64; 2] } - } - } - } - } - - pub(crate) fn int_expr(val: i64) -> TokenStream { - // Don't use quote! { #val } because that adds the type suffix. - let val = proc_macro2::Literal::i64_unsuffixed(val); - quote!(#val) - } - - pub(crate) fn uint_expr(val: u64) -> TokenStream { - // Don't use quote! { #val } because that adds the type suffix. - let val = proc_macro2::Literal::u64_unsuffixed(val); - quote!(#val) - } - - pub(crate) fn cstr_expr(mut string: String) -> TokenStream { - string.push('\0'); - let b = proc_macro2::Literal::byte_string(string.as_bytes()); - quote! { - #b - } - } - - pub(crate) fn float_expr( - ctx: &BindgenContext, - f: f64, - ) -> Result { - if f.is_finite() { - let val = proc_macro2::Literal::f64_unsuffixed(f); - - return Ok(quote!(#val)); - } - - let prefix = ctx.trait_prefix(); - let rust_target = ctx.options().rust_target; - - if f.is_nan() { - // FIXME: This should be done behind a `RustFeature` instead - #[allow(deprecated)] - let tokens = if rust_target >= RustTarget::Stable_1_43 { - quote! { - f64::NAN - } - } else { - quote! { - ::#prefix::f64::NAN - } - }; - return Ok(tokens); - } - - if f.is_infinite() { - let tokens = if f.is_sign_positive() { - // FIXME: This should be done behind a `RustFeature` instead - #[allow(deprecated)] - if rust_target >= RustTarget::Stable_1_43 { - quote! { - f64::INFINITY - } - } else { - quote! { - ::#prefix::f64::INFINITY - } - } - } else { - // FIXME: This should be done behind a `RustFeature` instead - #[allow(deprecated)] - // Negative infinity - if rust_target >= RustTarget::Stable_1_43 { - quote! { - f64::NEG_INFINITY - } - } else { - quote! { - ::#prefix::f64::NEG_INFINITY - } - } - }; - return Ok(tokens); - } - - warn!("Unknown non-finite float number: {f:?}"); - Err(()) - } - - pub(crate) fn arguments_from_signature( - signature: &FunctionSig, - ctx: &BindgenContext, - ) -> Vec { - let mut unnamed_arguments = 0; - signature - .argument_types() - .iter() - .map(|&(ref name, _ty)| { - let name = if let Some(ref name) = *name { - ctx.rust_ident(name) - } else { - unnamed_arguments += 1; - ctx.rust_ident(format!("arg{unnamed_arguments}")) - }; - quote! { #name } - }) - .collect() - } -} diff --git a/vendor/bindgen/codegen/impl_debug.rs b/vendor/bindgen/codegen/impl_debug.rs deleted file mode 100644 index 058a73bd132157..00000000000000 --- a/vendor/bindgen/codegen/impl_debug.rs +++ /dev/null @@ -1,243 +0,0 @@ -use crate::ir::comp::{BitfieldUnit, CompKind, Field, FieldData, FieldMethods}; -use crate::ir::context::BindgenContext; -use crate::ir::item::{HasTypeParamInArray, IsOpaque, Item, ItemCanonicalName}; -use crate::ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT}; -use std::fmt::Write as _; - -pub(crate) fn gen_debug_impl( - ctx: &BindgenContext, - fields: &[Field], - item: &Item, - kind: CompKind, -) -> proc_macro2::TokenStream { - let struct_name = item.canonical_name(ctx); - let mut format_string = format!("{struct_name} {{{{ "); - let mut tokens = vec![]; - - if item.is_opaque(ctx, &()) { - format_string.push_str("opaque"); - } else { - match kind { - CompKind::Union => { - format_string.push_str("union"); - } - CompKind::Struct => { - let processed_fields = fields.iter().filter_map(|f| match f { - Field::DataMember(ref fd) => fd.impl_debug(ctx, ()), - Field::Bitfields(ref bu) => bu.impl_debug(ctx, ()), - }); - - for (i, (fstring, toks)) in processed_fields.enumerate() { - if i > 0 { - format_string.push_str(", "); - } - tokens.extend(toks); - format_string.push_str(&fstring); - } - } - } - } - - format_string.push_str(" }}"); - tokens.insert(0, quote! { #format_string }); - - let prefix = ctx.trait_prefix(); - - quote! { - fn fmt(&self, f: &mut ::#prefix::fmt::Formatter<'_>) -> ::#prefix ::fmt::Result { - write!(f, #( #tokens ),*) - } - } -} - -/// A trait for the things which we can codegen tokens that contribute towards a -/// generated `impl Debug`. -pub(crate) trait ImplDebug<'a> { - /// Any extra parameter required by this a particular `ImplDebug` implementation. - type Extra; - - /// Generate a format string snippet to be included in the larger `impl Debug` - /// format string, and the code to get the format string's interpolation values. - fn impl_debug( - &self, - ctx: &BindgenContext, - extra: Self::Extra, - ) -> Option<(String, Vec)>; -} - -impl ImplDebug<'_> for FieldData { - type Extra = (); - - fn impl_debug( - &self, - ctx: &BindgenContext, - _: Self::Extra, - ) -> Option<(String, Vec)> { - if let Some(name) = self.name() { - ctx.resolve_item(self.ty()).impl_debug(ctx, name) - } else { - None - } - } -} - -impl ImplDebug<'_> for BitfieldUnit { - type Extra = (); - - fn impl_debug( - &self, - ctx: &BindgenContext, - _: Self::Extra, - ) -> Option<(String, Vec)> { - let mut format_string = String::new(); - let mut tokens = vec![]; - for (i, bitfield) in self.bitfields().iter().enumerate() { - if i > 0 { - format_string.push_str(", "); - } - - if let Some(bitfield_name) = bitfield.name() { - let _ = write!(format_string, "{bitfield_name} : {{:?}}"); - let getter_name = bitfield.getter_name(); - let name_ident = ctx.rust_ident_raw(getter_name); - tokens.push(quote! { - self.#name_ident () - }); - } - } - - Some((format_string, tokens)) - } -} - -impl<'a> ImplDebug<'a> for Item { - type Extra = &'a str; - - fn impl_debug( - &self, - ctx: &BindgenContext, - name: &str, - ) -> Option<(String, Vec)> { - let name_ident = ctx.rust_ident(name); - - // We don't know if blocklisted items `impl Debug` or not, so we can't - // add them to the format string we're building up. - if !ctx.allowlisted_items().contains(&self.id()) { - return None; - } - - let ty = self.as_type()?; - - fn debug_print( - name: &str, - name_ident: &proc_macro2::TokenStream, - ) -> Option<(String, Vec)> { - Some(( - format!("{name}: {{:?}}"), - vec![quote! { - self.#name_ident - }], - )) - } - - match *ty.kind() { - // Handle the simple cases. - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::Complex(..) | - TypeKind::Function(..) | - TypeKind::Enum(..) | - TypeKind::Reference(..) | - TypeKind::UnresolvedTypeRef(..) | - TypeKind::ObjCInterface(..) | - TypeKind::ObjCId | - TypeKind::Comp(..) | - TypeKind::ObjCSel => debug_print(name, "e! { #name_ident }), - - TypeKind::TemplateInstantiation(ref inst) => { - if inst.is_opaque(ctx, self) { - Some((format!("{name}: opaque"), vec![])) - } else { - debug_print(name, "e! { #name_ident }) - } - } - - // The generic is not required to implement Debug, so we can not debug print that type - TypeKind::TypeParam => { - Some((format!("{name}: Non-debuggable generic"), vec![])) - } - - TypeKind::Array(_, len) => { - // Generics are not required to implement Debug - if self.has_type_param_in_array(ctx) { - Some((format!("{name}: Array with length {len}"), vec![])) - } else if len < RUST_DERIVE_IN_ARRAY_LIMIT || - ctx.options().rust_features().larger_arrays - { - // The simple case - debug_print(name, "e! { #name_ident }) - } else if ctx.options().use_core { - // There is no String in core; reducing field visibility to avoid breaking - // no_std setups. - Some((format!("{name}: [...]"), vec![])) - } else { - // Let's implement our own print function - Some(( - format!("{name}: [{{}}]"), - vec![quote! {{ - use std::fmt::Write as _; - let mut output = String::new(); - let mut iter = self.#name_ident.iter(); - if let Some(value) = iter.next() { - let _ = write!(output, "{value:?}"); - for value in iter { - let _ = write!(output, ", {value:?}"); - } - } - output - }}], - )) - } - } - TypeKind::Vector(_, len) => { - if ctx.options().use_core { - // There is no format! in core; reducing field visibility to avoid breaking - // no_std setups. - Some((format!("{name}(...)"), vec![])) - } else { - let self_ids = 0..len; - Some(( - format!("{name}({{}})"), - vec![quote! { - #(format!("{:?}", self.#self_ids)),* - }], - )) - } - } - - TypeKind::ResolvedTypeRef(t) | - TypeKind::TemplateAlias(t, _) | - TypeKind::Alias(t) | - TypeKind::BlockPointer(t) => { - // We follow the aliases - ctx.resolve_item(t).impl_debug(ctx, name) - } - - TypeKind::Pointer(inner) => { - let inner_type = ctx.resolve_type(inner).canonical_type(ctx); - match *inner_type.kind() { - TypeKind::Function(ref sig) - if !sig.function_pointers_can_derive() => - { - Some((format!("{name}: FunctionPointer"), vec![])) - } - _ => debug_print(name, "e! { #name_ident }), - } - } - - TypeKind::Opaque => None, - } - } -} diff --git a/vendor/bindgen/codegen/impl_partialeq.rs b/vendor/bindgen/codegen/impl_partialeq.rs deleted file mode 100644 index c2787967d85230..00000000000000 --- a/vendor/bindgen/codegen/impl_partialeq.rs +++ /dev/null @@ -1,142 +0,0 @@ -use crate::ir::comp::{CompInfo, CompKind, Field, FieldMethods}; -use crate::ir::context::BindgenContext; -use crate::ir::item::{IsOpaque, Item}; -use crate::ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT}; - -/// Generate a manual implementation of `PartialEq` trait for the -/// specified compound type. -pub(crate) fn gen_partialeq_impl( - ctx: &BindgenContext, - comp_info: &CompInfo, - item: &Item, - ty_for_impl: &proc_macro2::TokenStream, -) -> Option { - let mut tokens = vec![]; - - if item.is_opaque(ctx, &()) { - tokens.push(quote! { - &self._bindgen_opaque_blob[..] == &other._bindgen_opaque_blob[..] - }); - } else if comp_info.kind() == CompKind::Union { - assert!(!ctx.options().untagged_union); - tokens.push(quote! { - &self.bindgen_union_field[..] == &other.bindgen_union_field[..] - }); - } else { - for base in comp_info.base_members() { - if !base.requires_storage(ctx) { - continue; - } - - let ty_item = ctx.resolve_item(base.ty); - let field_name = &base.field_name; - - if ty_item.is_opaque(ctx, &()) { - let field_name = ctx.rust_ident(field_name); - tokens.push(quote! { - &self. #field_name [..] == &other. #field_name [..] - }); - } else { - tokens.push(gen_field(ctx, ty_item, field_name)); - } - } - - for field in comp_info.fields() { - match *field { - Field::DataMember(ref fd) => { - let ty_item = ctx.resolve_item(fd.ty()); - let name = fd.name().unwrap(); - tokens.push(gen_field(ctx, ty_item, name)); - } - Field::Bitfields(ref bu) => { - for bitfield in bu.bitfields() { - if bitfield.name().is_some() { - let getter_name = bitfield.getter_name(); - let name_ident = ctx.rust_ident_raw(getter_name); - tokens.push(quote! { - self.#name_ident () == other.#name_ident () - }); - } - } - } - } - } - } - - Some(quote! { - fn eq(&self, other: & #ty_for_impl) -> bool { - #( #tokens )&&* - } - }) -} - -fn gen_field( - ctx: &BindgenContext, - ty_item: &Item, - name: &str, -) -> proc_macro2::TokenStream { - fn quote_equals( - name_ident: &proc_macro2::Ident, - ) -> proc_macro2::TokenStream { - quote! { self.#name_ident == other.#name_ident } - } - - let name_ident = ctx.rust_ident(name); - let ty = ty_item.expect_type(); - - match *ty.kind() { - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(..) | - TypeKind::Complex(..) | - TypeKind::Float(..) | - TypeKind::Enum(..) | - TypeKind::TypeParam | - TypeKind::UnresolvedTypeRef(..) | - TypeKind::Reference(..) | - TypeKind::ObjCInterface(..) | - TypeKind::ObjCId | - TypeKind::ObjCSel | - TypeKind::Comp(..) | - TypeKind::Pointer(_) | - TypeKind::Function(..) | - TypeKind::Opaque => quote_equals(&name_ident), - - TypeKind::TemplateInstantiation(ref inst) => { - if inst.is_opaque(ctx, ty_item) { - quote! { - &self. #name_ident [..] == &other. #name_ident [..] - } - } else { - quote_equals(&name_ident) - } - } - - TypeKind::Array(_, len) => { - if len <= RUST_DERIVE_IN_ARRAY_LIMIT || - ctx.options().rust_features().larger_arrays - { - quote_equals(&name_ident) - } else { - quote! { - &self. #name_ident [..] == &other. #name_ident [..] - } - } - } - TypeKind::Vector(_, len) => { - let self_ids = 0..len; - let other_ids = 0..len; - quote! { - #(self.#self_ids == other.#other_ids &&)* true - } - } - - TypeKind::ResolvedTypeRef(t) | - TypeKind::TemplateAlias(t, _) | - TypeKind::Alias(t) | - TypeKind::BlockPointer(t) => { - let inner_item = ctx.resolve_item(t); - gen_field(ctx, inner_item, name) - } - } -} diff --git a/vendor/bindgen/codegen/mod.rs b/vendor/bindgen/codegen/mod.rs deleted file mode 100644 index 59f2265c09ed1f..00000000000000 --- a/vendor/bindgen/codegen/mod.rs +++ /dev/null @@ -1,5991 +0,0 @@ -mod dyngen; -pub(crate) mod error; - -mod helpers; -mod impl_debug; -mod impl_partialeq; -mod postprocessing; -mod serialize; -pub(crate) mod struct_layout; - -#[cfg(test)] -#[allow(warnings)] -pub(crate) mod bitfield_unit; -#[cfg(all(test, target_endian = "little"))] -mod bitfield_unit_tests; - -use self::dyngen::DynamicItems; -use self::helpers::attributes; -use self::struct_layout::StructLayoutTracker; - -use super::BindgenOptions; - -use crate::callbacks::{ - AttributeInfo, DeriveInfo, DiscoveredItem, DiscoveredItemId, FieldInfo, - TypeKind as DeriveTypeKind, -}; -use crate::codegen::error::Error; -use crate::ir::analysis::{HasVtable, Sizedness}; -use crate::ir::annotations::{ - Annotations, FieldAccessorKind, FieldVisibilityKind, -}; -use crate::ir::comp::{ - Bitfield, BitfieldUnit, CompInfo, CompKind, Field, FieldData, FieldMethods, - Method, MethodKind, -}; -use crate::ir::context::{BindgenContext, ItemId}; -use crate::ir::derive::{ - CanDerive, CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq, - CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd, -}; -use crate::ir::dot; -use crate::ir::enum_ty::{Enum, EnumVariant, EnumVariantValue}; -use crate::ir::function::{ - ClangAbi, Function, FunctionKind, FunctionSig, Linkage, -}; -use crate::ir::int::IntKind; -use crate::ir::item::{IsOpaque, Item, ItemCanonicalName, ItemCanonicalPath}; -use crate::ir::item_kind::ItemKind; -use crate::ir::layout::Layout; -use crate::ir::module::Module; -use crate::ir::objc::{ObjCInterface, ObjCMethod}; -use crate::ir::template::{ - AsTemplateParam, TemplateInstantiation, TemplateParameters, -}; -use crate::ir::ty::{Type, TypeKind}; -use crate::ir::var::Var; - -use proc_macro2::{Ident, Span}; -use quote::{ToTokens, TokenStreamExt}; - -use crate::{Entry, HashMap, HashSet}; -use std::borrow::Cow; -use std::cell::Cell; -use std::collections::VecDeque; -use std::ffi::CStr; -use std::fmt::{self, Write}; -use std::ops; -use std::str::{self, FromStr}; - -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub enum CodegenError { - Serialize { msg: String, loc: String }, - Io(String), -} - -impl From for CodegenError { - fn from(err: std::io::Error) -> Self { - Self::Io(err.to_string()) - } -} - -impl fmt::Display for CodegenError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Serialize { msg, loc } => { - write!(f, "serialization error at {loc}: {msg}") - } - Self::Io(err) => err.fmt(f), - } - } -} - -// Name of type defined in constified enum module -pub(crate) static CONSTIFIED_ENUM_MODULE_REPR_NAME: &str = "Type"; - -fn top_level_path( - ctx: &BindgenContext, - item: &Item, -) -> Vec { - let mut path = vec![quote! { self }]; - - if ctx.options().enable_cxx_namespaces { - for _ in 0..item.codegen_depth(ctx) { - path.push(quote! { super }); - } - } - - path -} - -fn root_import( - ctx: &BindgenContext, - module: &Item, -) -> proc_macro2::TokenStream { - assert!(ctx.options().enable_cxx_namespaces, "Somebody messed it up"); - assert!(module.is_module()); - - let mut path = top_level_path(ctx, module); - - let root = ctx.root_module().canonical_name(ctx); - let root_ident = ctx.rust_ident(root); - path.push(quote! { #root_ident }); - - let mut tokens = quote! {}; - tokens.append_separated(path, quote!(::)); - - quote! { - #[allow(unused_imports)] - use #tokens ; - } -} - -bitflags! { - #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] - struct DerivableTraits: u16 { - const DEBUG = 1 << 0; - const DEFAULT = 1 << 1; - const COPY = 1 << 2; - const CLONE = 1 << 3; - const HASH = 1 << 4; - const PARTIAL_ORD = 1 << 5; - const ORD = 1 << 6; - const PARTIAL_EQ = 1 << 7; - const EQ = 1 << 8; - } -} - -fn derives_of_item( - item: &Item, - ctx: &BindgenContext, - packed: bool, -) -> DerivableTraits { - let mut derivable_traits = DerivableTraits::empty(); - - if item.can_derive_copy(ctx) && !item.annotations().disallow_copy() { - derivable_traits |= DerivableTraits::COPY; - - // FIXME: This requires extra logic if you have a big array in a - // templated struct. The reason for this is that the magic: - // fn clone(&self) -> Self { *self } - // doesn't work for templates. - // - // It's not hard to fix though. - derivable_traits |= DerivableTraits::CLONE; - } else if packed { - // If the struct or union is packed, deriving from Copy is required for - // deriving from any other trait. - return derivable_traits; - } - - if item.can_derive_debug(ctx) && !item.annotations().disallow_debug() { - derivable_traits |= DerivableTraits::DEBUG; - } - - if item.can_derive_default(ctx) && !item.annotations().disallow_default() { - derivable_traits |= DerivableTraits::DEFAULT; - } - - if item.can_derive_hash(ctx) { - derivable_traits |= DerivableTraits::HASH; - } - - if item.can_derive_partialord(ctx) { - derivable_traits |= DerivableTraits::PARTIAL_ORD; - } - - if item.can_derive_ord(ctx) { - derivable_traits |= DerivableTraits::ORD; - } - - if item.can_derive_partialeq(ctx) { - derivable_traits |= DerivableTraits::PARTIAL_EQ; - } - - if item.can_derive_eq(ctx) { - derivable_traits |= DerivableTraits::EQ; - } - - derivable_traits -} - -impl From for Vec<&'static str> { - fn from(derivable_traits: DerivableTraits) -> Vec<&'static str> { - [ - (DerivableTraits::DEBUG, "Debug"), - (DerivableTraits::DEFAULT, "Default"), - (DerivableTraits::COPY, "Copy"), - (DerivableTraits::CLONE, "Clone"), - (DerivableTraits::HASH, "Hash"), - (DerivableTraits::PARTIAL_ORD, "PartialOrd"), - (DerivableTraits::ORD, "Ord"), - (DerivableTraits::PARTIAL_EQ, "PartialEq"), - (DerivableTraits::EQ, "Eq"), - ] - .iter() - .filter_map(|&(flag, derive)| { - Some(derive).filter(|_| derivable_traits.contains(flag)) - }) - .collect() - } -} - -struct WrapAsVariadic { - new_name: String, - idx_of_va_list_arg: usize, -} - -struct CodegenResult<'a> { - items: Vec, - dynamic_items: DynamicItems, - - /// A monotonic counter used to add stable unique ID's to stuff that doesn't - /// need to be referenced by anything. - codegen_id: &'a Cell, - - /// Whether a bindgen union has been generated at least once. - saw_bindgen_union: bool, - - /// Whether an incomplete array has been generated at least once. - saw_incomplete_array: bool, - - /// Whether Objective C types have been seen at least once. - saw_objc: bool, - - /// Whether Apple block types have been seen at least once. - saw_block: bool, - - /// Whether a bitfield allocation unit has been seen at least once. - saw_bitfield_unit: bool, - - items_seen: HashSet, - /// The set of generated function/var names, needed because in C/C++ is - /// legal to do something like: - /// - /// ```c++ - /// extern "C" { - /// void foo(); - /// extern int bar; - /// } - /// - /// extern "C" { - /// void foo(); - /// extern int bar; - /// } - /// ``` - /// - /// Being these two different declarations. - functions_seen: HashSet, - vars_seen: HashSet, - - /// Used for making bindings to overloaded functions. Maps from a canonical - /// function name to the number of overloads we have already codegen'd for - /// that name. This lets us give each overload a unique suffix. - overload_counters: HashMap, - - /// List of items to serialize. With optionally the argument for the wrap as - /// variadic transformation to be applied. - items_to_serialize: Vec<(ItemId, Option)>, -} - -impl<'a> CodegenResult<'a> { - fn new(codegen_id: &'a Cell) -> Self { - CodegenResult { - items: vec![], - dynamic_items: DynamicItems::new(), - saw_bindgen_union: false, - saw_incomplete_array: false, - saw_objc: false, - saw_block: false, - saw_bitfield_unit: false, - codegen_id, - items_seen: Default::default(), - functions_seen: Default::default(), - vars_seen: Default::default(), - overload_counters: Default::default(), - items_to_serialize: Default::default(), - } - } - - fn dynamic_items(&mut self) -> &mut DynamicItems { - &mut self.dynamic_items - } - - fn saw_bindgen_union(&mut self) { - self.saw_bindgen_union = true; - } - - fn saw_incomplete_array(&mut self) { - self.saw_incomplete_array = true; - } - - fn saw_objc(&mut self) { - self.saw_objc = true; - } - - fn saw_block(&mut self) { - self.saw_block = true; - } - - fn saw_bitfield_unit(&mut self) { - self.saw_bitfield_unit = true; - } - - fn seen>(&self, item: Id) -> bool { - self.items_seen.contains(&item.into()) - } - - fn set_seen>(&mut self, item: Id) { - self.items_seen.insert(item.into()); - } - - fn seen_function(&self, name: &str) -> bool { - self.functions_seen.contains(name) - } - - fn saw_function(&mut self, name: &str) { - self.functions_seen.insert(name.into()); - } - - /// Get the overload number for the given function name. Increments the - /// counter internally so the next time we ask for the overload for this - /// name, we get the incremented value, and so on. - fn overload_number(&mut self, name: &str) -> u32 { - let counter = self.overload_counters.entry(name.into()).or_insert(0); - let number = *counter; - *counter += 1; - number - } - - fn seen_var(&self, name: &str) -> bool { - self.vars_seen.contains(name) - } - - fn saw_var(&mut self, name: &str) { - self.vars_seen.insert(name.into()); - } - - fn inner(&mut self, cb: F) -> Vec - where - F: FnOnce(&mut Self), - { - let mut new = Self::new(self.codegen_id); - - cb(&mut new); - - self.saw_incomplete_array |= new.saw_incomplete_array; - self.saw_objc |= new.saw_objc; - self.saw_block |= new.saw_block; - self.saw_bitfield_unit |= new.saw_bitfield_unit; - self.saw_bindgen_union |= new.saw_bindgen_union; - - new.items - } -} - -impl ops::Deref for CodegenResult<'_> { - type Target = Vec; - - fn deref(&self) -> &Self::Target { - &self.items - } -} - -impl ops::DerefMut for CodegenResult<'_> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.items - } -} - -/// A trait to convert a rust type into a pointer, optionally const, to the same -/// type. -trait ToPtr { - fn to_ptr(self, is_const: bool) -> syn::Type; -} - -impl ToPtr for syn::Type { - fn to_ptr(self, is_const: bool) -> syn::Type { - if is_const { - syn::parse_quote! { *const #self } - } else { - syn::parse_quote! { *mut #self } - } - } -} - -/// An extension trait for `syn::Type` that lets us append any implicit -/// template parameters that exist for some type, if necessary. -trait WithImplicitTemplateParams { - fn with_implicit_template_params( - self, - ctx: &BindgenContext, - item: &Item, - ) -> Self; -} - -impl WithImplicitTemplateParams for syn::Type { - fn with_implicit_template_params( - self, - ctx: &BindgenContext, - item: &Item, - ) -> Self { - let item = item.id().into_resolver().through_type_refs().resolve(ctx); - - let params = match *item.expect_type().kind() { - TypeKind::UnresolvedTypeRef(..) => { - unreachable!("already resolved unresolved type refs") - } - TypeKind::ResolvedTypeRef(..) => { - unreachable!("we resolved item through type refs") - } - // None of these types ever have implicit template parameters. - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Pointer(..) | - TypeKind::Reference(..) | - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::Complex(..) | - TypeKind::Array(..) | - TypeKind::TypeParam | - TypeKind::Opaque | - TypeKind::Function(..) | - TypeKind::Enum(..) | - TypeKind::ObjCId | - TypeKind::ObjCSel | - TypeKind::TemplateInstantiation(..) => None, - _ => { - let params = item.used_template_params(ctx); - if params.is_empty() { - None - } else { - Some(params.into_iter().map(|p| { - p.try_to_rust_ty(ctx, &()).expect( - "template params cannot fail to be a rust type", - ) - })) - } - } - }; - - if let Some(params) = params { - syn::parse_quote! { #self<#(#params),*> } - } else { - self - } - } -} - -trait CodeGenerator { - /// Extra information from the caller. - type Extra; - - /// Extra information returned to the caller. - type Return; - - fn codegen( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'_>, - extra: &Self::Extra, - ) -> Self::Return; -} - -impl Item { - fn process_before_codegen( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult, - ) -> bool { - if !self.is_enabled_for_codegen(ctx) { - return false; - } - - if self.is_blocklisted(ctx) || result.seen(self.id()) { - debug!( - "::process_before_codegen: Ignoring hidden or seen: \ - self = {:?}", - self - ); - return false; - } - - if !ctx.codegen_items().contains(&self.id()) { - // TODO(emilio, #453): Figure out what to do when this happens - // legitimately, we could track the opaque stuff and disable the - // assertion there I guess. - warn!("Found non-allowlisted item in code generation: {self:?}"); - } - - result.set_seen(self.id()); - true - } -} - -impl CodeGenerator for Item { - type Extra = (); - type Return = (); - - fn codegen( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'_>, - _extra: &(), - ) { - debug!("::codegen: self = {self:?}"); - if !self.process_before_codegen(ctx, result) { - return; - } - - match *self.kind() { - ItemKind::Module(ref module) => { - module.codegen(ctx, result, self); - } - ItemKind::Function(ref fun) => { - fun.codegen(ctx, result, self); - } - ItemKind::Var(ref var) => { - var.codegen(ctx, result, self); - } - ItemKind::Type(ref ty) => { - ty.codegen(ctx, result, self); - } - } - } -} - -impl CodeGenerator for Module { - type Extra = Item; - type Return = (); - - fn codegen( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'_>, - item: &Item, - ) { - debug!("::codegen: item = {item:?}"); - - let codegen_self = |result: &mut CodegenResult, - found_any: &mut bool| { - for child in self.children() { - if ctx.codegen_items().contains(child) { - *found_any = true; - ctx.resolve_item(*child).codegen(ctx, result, &()); - } - } - - if item.id() == ctx.root_module() { - if result.saw_block { - utils::prepend_block_header(ctx, &mut *result); - } - if result.saw_bindgen_union { - utils::prepend_union_types(ctx, &mut *result); - } - if result.saw_incomplete_array { - utils::prepend_incomplete_array_types(ctx, &mut *result); - } - if ctx.need_bindgen_float16_type() { - utils::prepend_float16_type(&mut *result); - } - if ctx.need_bindgen_complex_type() { - utils::prepend_complex_type(&mut *result); - } - if ctx.need_opaque_array_type() { - utils::prepend_opaque_array_type(&mut *result); - } - if result.saw_objc { - utils::prepend_objc_header(ctx, &mut *result); - } - if result.saw_bitfield_unit { - utils::prepend_bitfield_unit_type(ctx, &mut *result); - } - } - }; - - if !ctx.options().enable_cxx_namespaces || - (self.is_inline() && - !ctx.options().conservative_inline_namespaces) - { - codegen_self(result, &mut false); - return; - } - - let mut found_any = false; - let inner_items = result.inner(|result| { - result.push(root_import(ctx, item)); - - let path = item - .namespace_aware_canonical_path(ctx) - .join("::") - .into_boxed_str(); - if let Some(raw_lines) = ctx.options().module_lines.get(&path) { - for raw_line in raw_lines { - found_any = true; - result.push( - proc_macro2::TokenStream::from_str(raw_line).unwrap(), - ); - } - } - - codegen_self(result, &mut found_any); - }); - - // Don't bother creating an empty module. - if !found_any { - return; - } - - let name = item.canonical_name(ctx); - let ident = ctx.rust_ident(name); - result.push(if item.id() == ctx.root_module() { - quote! { - #[allow(non_snake_case, non_camel_case_types, non_upper_case_globals)] - pub mod #ident { - #( #inner_items )* - } - } - } else { - quote! { - pub mod #ident { - #( #inner_items )* - } - } - }); - } -} - -impl CodeGenerator for Var { - type Extra = Item; - type Return = (); - - fn codegen( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'_>, - item: &Item, - ) { - use crate::ir::var::VarType; - debug!("::codegen: item = {item:?}"); - debug_assert!(item.is_enabled_for_codegen(ctx)); - - let canonical_name = item.canonical_name(ctx); - - if result.seen_var(&canonical_name) { - return; - } - result.saw_var(&canonical_name); - - let canonical_ident = ctx.rust_ident(&canonical_name); - - // We can't generate bindings to static variables of templates. The - // number of actual variables for a single declaration are open ended - // and we don't know what instantiations do or don't exist. - if !item.all_template_params(ctx).is_empty() { - return; - } - - let mut attrs = vec![]; - if let Some(comment) = item.comment(ctx) { - attrs.push(attributes::doc(&comment)); - } - - let var_ty = self.ty(); - let ty = var_ty.to_rust_ty_or_opaque(ctx, &()); - - if let Some(val) = self.val() { - match *val { - VarType::Bool(val) => { - result.push(quote! { - #(#attrs)* - pub const #canonical_ident : #ty = #val ; - }); - } - VarType::Int(val) => { - let int_kind = var_ty - .into_resolver() - .through_type_aliases() - .through_type_refs() - .resolve(ctx) - .expect_type() - .as_integer() - .unwrap(); - let val = if int_kind.is_signed() { - helpers::ast_ty::int_expr(val) - } else { - helpers::ast_ty::uint_expr(val as _) - }; - result.push(quote! { - #(#attrs)* - pub const #canonical_ident : #ty = #val ; - }); - } - VarType::String(ref bytes) => { - let prefix = ctx.trait_prefix(); - - let options = ctx.options(); - let rust_features = options.rust_features; - - let mut cstr_bytes = bytes.clone(); - cstr_bytes.push(0); - let len = proc_macro2::Literal::usize_unsuffixed( - cstr_bytes.len(), - ); - let cstr = - if options.generate_cstr && rust_features.const_cstr { - CStr::from_bytes_with_nul(&cstr_bytes).ok() - } else { - None - }; - - if let Some(cstr) = cstr { - let cstr_ty = quote! { ::#prefix::ffi::CStr }; - if rust_features.literal_cstr { - let cstr = proc_macro2::Literal::c_string(cstr); - result.push(quote! { - #(#attrs)* - pub const #canonical_ident: &#cstr_ty = #cstr; - }); - } else { - let bytes = - proc_macro2::Literal::byte_string(&cstr_bytes); - result.push(quote! { - #(#attrs)* - #[allow(unsafe_code)] - pub const #canonical_ident: &#cstr_ty = unsafe { - #cstr_ty::from_bytes_with_nul_unchecked(#bytes) - }; - }); - } - } else { - // TODO: Here we ignore the type we just made up, probably - // we should refactor how the variable type and ty ID work. - let array_ty = quote! { [u8; #len] }; - let bytes = - proc_macro2::Literal::byte_string(&cstr_bytes); - let lifetime = - if true { None } else { Some(quote! { 'static }) } - .into_iter(); - - result.push(quote! { - #(#attrs)* - pub const #canonical_ident: &#(#lifetime )*#array_ty = #bytes ; - }); - } - } - VarType::Float(f) => { - if let Ok(expr) = helpers::ast_ty::float_expr(ctx, f) { - result.push(quote! { - #(#attrs)* - pub const #canonical_ident : #ty = #expr ; - }); - } - } - VarType::Char(c) => { - result.push(quote! { - #(#attrs)* - pub const #canonical_ident : #ty = #c ; - }); - } - } - } else { - let symbol: &str = self.link_name().unwrap_or_else(|| { - let link_name = - self.mangled_name().unwrap_or_else(|| self.name()); - if utils::names_will_be_identical_after_mangling( - &canonical_name, - link_name, - None, - ) { - canonical_name.as_str() - } else { - attrs.push(attributes::link_name::(link_name)); - link_name - } - }); - - let maybe_mut = if self.is_const() { - quote! {} - } else { - quote! { mut } - }; - - let safety = ctx - .options() - .rust_features - .unsafe_extern_blocks - .then(|| quote!(unsafe)); - - let tokens = quote!( - #safety extern "C" { - #(#attrs)* - pub static #maybe_mut #canonical_ident: #ty; - } - ); - - if ctx.options().dynamic_library_name.is_some() { - result.dynamic_items().push_var( - &canonical_ident, - symbol, - &self - .ty() - .to_rust_ty_or_opaque(ctx, &()) - .into_token_stream(), - ctx.options().dynamic_link_require_all, - ctx.options().wrap_unsafe_ops, - ); - } else { - result.push(tokens); - } - } - } -} - -impl CodeGenerator for Type { - type Extra = Item; - type Return = (); - - fn codegen( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'_>, - item: &Item, - ) { - debug!("::codegen: item = {item:?}"); - debug_assert!(item.is_enabled_for_codegen(ctx)); - - match *self.kind() { - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::Complex(..) | - TypeKind::Array(..) | - TypeKind::Vector(..) | - TypeKind::Pointer(..) | - TypeKind::Reference(..) | - TypeKind::Function(..) | - TypeKind::ResolvedTypeRef(..) | - TypeKind::Opaque | - TypeKind::TypeParam => { - // These items don't need code generation, they only need to be - // converted to rust types in fields, arguments, and such. - // NOTE(emilio): If you add to this list, make sure to also add - // it to BindgenContext::compute_allowlisted_and_codegen_items. - } - TypeKind::TemplateInstantiation(ref inst) => { - inst.codegen(ctx, result, item); - } - TypeKind::BlockPointer(inner) => { - if !ctx.options().generate_block { - return; - } - - let inner_item = - inner.into_resolver().through_type_refs().resolve(ctx); - let name = item.canonical_name(ctx); - - let inner_rust_type = { - if let TypeKind::Function(fnsig) = - inner_item.kind().expect_type().kind() - { - utils::fnsig_block(ctx, fnsig) - } else { - panic!("invalid block typedef: {inner_item:?}") - } - }; - - let rust_name = ctx.rust_ident(name); - - let mut tokens = if let Some(comment) = item.comment(ctx) { - attributes::doc(&comment) - } else { - quote! {} - }; - - tokens.append_all(quote! { - pub type #rust_name = #inner_rust_type ; - }); - - result.push(tokens); - result.saw_block(); - } - TypeKind::Comp(ref ci) => ci.codegen(ctx, result, item), - TypeKind::TemplateAlias(inner, _) | TypeKind::Alias(inner) => { - let inner_item = - inner.into_resolver().through_type_refs().resolve(ctx); - let name = item.canonical_name(ctx); - let path = item.canonical_path(ctx); - - { - let through_type_aliases = inner - .into_resolver() - .through_type_refs() - .through_type_aliases() - .resolve(ctx); - - // Try to catch the common pattern: - // - // typedef struct foo { ... } foo; - // - // here, and also other more complex cases like #946. - if through_type_aliases.canonical_path(ctx) == path { - return; - } - } - - // If this is a known named type, disallow generating anything - // for it too. If size_t -> usize conversions are enabled, we - // need to check that these conversions are permissible, but - // nothing needs to be generated, still. - let spelling = self.name().expect("Unnamed alias?"); - if utils::type_from_named(ctx, spelling).is_some() { - if let "size_t" | "ssize_t" = spelling { - let layout = inner_item - .kind() - .expect_type() - .layout(ctx) - .expect("No layout?"); - assert_eq!( - layout.size, - ctx.target_pointer_size(), - "Target platform requires `--no-size_t-is-usize`. The size of `{spelling}` ({}) does not match the target pointer size ({})", - layout.size, - ctx.target_pointer_size(), - ); - assert_eq!( - layout.align, - ctx.target_pointer_size(), - "Target platform requires `--no-size_t-is-usize`. The alignment of `{spelling}` ({}) does not match the target pointer size ({})", - layout.align, - ctx.target_pointer_size(), - ); - } - return; - } - - let mut outer_params = item.used_template_params(ctx); - - let is_opaque = item.is_opaque(ctx, &()); - let inner_rust_type = if is_opaque { - outer_params = vec![]; - self.to_opaque(ctx, item) - } else { - // Its possible that we have better layout information than - // the inner type does, so fall back to an opaque blob based - // on our layout if converting the inner item fails. - inner_item - .try_to_rust_ty_or_opaque(ctx, &()) - .unwrap_or_else(|_| self.to_opaque(ctx, item)) - .with_implicit_template_params(ctx, inner_item) - }; - - { - // FIXME(emilio): This is a workaround to avoid generating - // incorrect type aliases because of types that we haven't - // been able to resolve (because, eg, they depend on a - // template parameter). - // - // It's kind of a shame not generating them even when they - // could be referenced, but we already do the same for items - // with invalid template parameters, and at least this way - // they can be replaced, instead of generating plain invalid - // code. - let inner_canon_type = - inner_item.expect_type().canonical_type(ctx); - if inner_canon_type.is_invalid_type_param() { - warn!( - "Item contained invalid named type, skipping: \ - {:?}, {:?}", - item, inner_item - ); - return; - } - } - - let rust_name = ctx.rust_ident(&name); - - ctx.options().for_each_callback(|cb| { - cb.new_item_found( - DiscoveredItemId::new(item.id().as_usize()), - DiscoveredItem::Alias { - alias_name: rust_name.to_string(), - alias_for: DiscoveredItemId::new( - inner_item.id().as_usize(), - ), - }, - ); - }); - - let mut tokens = if let Some(comment) = item.comment(ctx) { - attributes::doc(&comment) - } else { - quote! {} - }; - - let alias_style = if ctx.options().type_alias.matches(&name) { - AliasVariation::TypeAlias - } else if ctx.options().new_type_alias.matches(&name) { - AliasVariation::NewType - } else if ctx.options().new_type_alias_deref.matches(&name) { - AliasVariation::NewTypeDeref - } else { - ctx.options().default_alias_style - }; - - // We prefer using `pub use` over `pub type` because of: - // https://github.com/rust-lang/rust/issues/26264 - if matches!(inner_rust_type, syn::Type::Path(_)) && - outer_params.is_empty() && - !is_opaque && - alias_style == AliasVariation::TypeAlias && - inner_item.expect_type().canonical_type(ctx).is_enum() - { - tokens.append_all(quote! { - pub use - }); - let path = top_level_path(ctx, item); - tokens.append_separated(path, quote!(::)); - tokens.append_all(quote! { - :: #inner_rust_type as #rust_name ; - }); - result.push(tokens); - return; - } - - tokens.append_all(match alias_style { - AliasVariation::TypeAlias => quote! { - pub type #rust_name - }, - AliasVariation::NewType | AliasVariation::NewTypeDeref => { - let mut attributes = - vec![attributes::repr("transparent")]; - let packed = false; // Types can't be packed in Rust. - let derivable_traits = - derives_of_item(item, ctx, packed); - let mut derives: Vec<_> = derivable_traits.into(); - // The custom derives callback may return a list of derive attributes; - // add them to the end of the list. - let custom_derives = - ctx.options().all_callbacks(|cb| { - cb.add_derives(&DeriveInfo { - name: &name, - kind: DeriveTypeKind::Struct, - }) - }); - // In most cases this will be a no-op, since custom_derives will be empty. - derives - .extend(custom_derives.iter().map(|s| s.as_str())); - attributes.push(attributes::derives(&derives)); - - let custom_attributes = - ctx.options().all_callbacks(|cb| { - cb.add_attributes(&AttributeInfo { - name: &name, - kind: DeriveTypeKind::Struct, - }) - }); - attributes.extend( - custom_attributes - .iter() - .map(|s| s.parse().unwrap()), - ); - - quote! { - #( #attributes )* - pub struct #rust_name - } - } - }); - - let params: Vec<_> = outer_params - .into_iter() - .filter_map(|p| p.as_template_param(ctx, &())) - .collect(); - if params - .iter() - .any(|p| ctx.resolve_type(*p).is_invalid_type_param()) - { - warn!( - "Item contained invalid template \ - parameter: {:?}", - item - ); - return; - } - let params: Vec<_> = params - .iter() - .map(|p| { - p.try_to_rust_ty(ctx, &()).expect( - "type parameters can always convert to rust ty OK", - ) - }) - .collect(); - - if !params.is_empty() { - tokens.append_all(quote! { - < #( #params ),* > - }); - } - - tokens.append_all(match alias_style { - AliasVariation::TypeAlias => quote! { - = #inner_rust_type ; - }, - AliasVariation::NewType | AliasVariation::NewTypeDeref => { - let visibility = ctx - .options() - .last_callback(|cb| { - cb.field_visibility(FieldInfo { - type_name: &item.canonical_name(ctx), - field_name: "0", - field_type_name: inner_item - .expect_type() - .name(), - }) - }) - .unwrap_or(ctx.options().default_visibility); - let access_spec = access_specifier(visibility); - quote! { - (#access_spec #inner_rust_type) ; - } - } - }); - - if alias_style == AliasVariation::NewTypeDeref { - let prefix = ctx.trait_prefix(); - tokens.append_all(quote! { - impl ::#prefix::ops::Deref for #rust_name { - type Target = #inner_rust_type; - #[inline] - fn deref(&self) -> &Self::Target { - &self.0 - } - } - impl ::#prefix::ops::DerefMut for #rust_name { - #[inline] - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } - } - }); - } - - result.push(tokens); - } - TypeKind::Enum(ref ei) => ei.codegen(ctx, result, item), - TypeKind::ObjCId | TypeKind::ObjCSel => { - result.saw_objc(); - } - TypeKind::ObjCInterface(ref interface) => { - interface.codegen(ctx, result, item); - } - ref u @ TypeKind::UnresolvedTypeRef(..) => { - unreachable!("Should have been resolved after parsing {u:?}!") - } - } - } -} - -struct Vtable<'a> { - item_id: ItemId, - /// A reference to the originating compound object. - #[allow(dead_code)] - comp_info: &'a CompInfo, -} - -impl<'a> Vtable<'a> { - fn new(item_id: ItemId, comp_info: &'a CompInfo) -> Self { - Vtable { item_id, comp_info } - } -} - -impl CodeGenerator for Vtable<'_> { - type Extra = Item; - type Return = (); - - fn codegen( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'_>, - item: &Item, - ) { - assert_eq!(item.id(), self.item_id); - debug_assert!(item.is_enabled_for_codegen(ctx)); - let name = ctx.rust_ident(self.canonical_name(ctx)); - - // For now, we will only generate vtables for classes that: - // - do not inherit from others (compilers merge VTable from primary parent class). - // - do not contain a virtual destructor (requires ordering; platforms generate different vtables). - if ctx.options().vtable_generation && - self.comp_info.base_members().is_empty() && - self.comp_info.destructor().is_none() - { - let class_ident = ctx.rust_ident(self.item_id.canonical_name(ctx)); - - let methods = self - .comp_info - .methods() - .iter() - .filter_map(|m| { - if !m.is_virtual() { - return None; - } - - let function_item = ctx.resolve_item(m.signature()); - let function = function_item.expect_function(); - let signature_item = ctx.resolve_item(function.signature()); - let TypeKind::Function(ref signature) = signature_item.expect_type().kind() else { panic!("Function signature type mismatch") }; - - // FIXME: Is there a canonical name without the class prepended? - let function_name = function_item.canonical_name(ctx); - - // FIXME: Need to account for overloading with times_seen (separately from regular function path). - let function_name = ctx.rust_ident(function_name); - let mut args = utils::fnsig_arguments(ctx, signature); - let ret = utils::fnsig_return_ty(ctx, signature); - - args[0] = if m.is_const() { - quote! { this: *const #class_ident } - } else { - quote! { this: *mut #class_ident } - }; - - Some(quote! { - pub #function_name : unsafe extern "C" fn( #( #args ),* ) #ret - }) - }) - .collect::>(); - - result.push(quote! { - #[repr(C)] - pub struct #name { - #( #methods ),* - } - }); - } else { - // For the cases we don't support, simply generate an empty struct. - let void = helpers::ast_ty::c_void(ctx); - - result.push(quote! { - #[repr(C)] - pub struct #name ( #void ); - }); - } - } -} - -impl ItemCanonicalName for Vtable<'_> { - fn canonical_name(&self, ctx: &BindgenContext) -> String { - format!("{}__bindgen_vtable", self.item_id.canonical_name(ctx)) - } -} - -impl TryToRustTy for Vtable<'_> { - type Extra = (); - - fn try_to_rust_ty( - &self, - ctx: &BindgenContext, - _: &(), - ) -> error::Result { - let name = ctx.rust_ident(self.canonical_name(ctx)); - Ok(syn::parse_quote! { #name }) - } -} - -impl CodeGenerator for TemplateInstantiation { - type Extra = Item; - type Return = (); - - fn codegen( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'_>, - item: &Item, - ) { - debug_assert!(item.is_enabled_for_codegen(ctx)); - - // Although uses of instantiations don't need code generation, and are - // just converted to rust types in fields, vars, etc, we take this - // opportunity to generate tests for their layout here. If the - // instantiation is opaque, then its presumably because we don't - // properly understand it (maybe because of specializations), and so we - // shouldn't emit layout tests either. - if !ctx.options().layout_tests || self.is_opaque(ctx, item) { - return; - } - - // For consistency with other layout tests, gate this on offset_of. - let compile_time = ctx.options().rust_features().offset_of; - - // If there are any unbound type parameters, then we can't generate a - // layout test because we aren't dealing with a concrete type with a - // concrete size and alignment. - if ctx.uses_any_template_parameters(item.id()) { - return; - } - - let layout = item.kind().expect_type().layout(ctx); - - if let Some(layout) = layout { - let size = layout.size; - let align = layout.align; - - let name = item.full_disambiguated_name(ctx); - let fn_name = if compile_time { - None - } else { - let mut fn_name = - format!("__bindgen_test_layout_{name}_instantiation"); - let times_seen = result.overload_number(&fn_name); - if times_seen > 0 { - write!(&mut fn_name, "_{times_seen}").unwrap(); - } - Some(ctx.rust_ident_raw(fn_name)) - }; - - let prefix = ctx.trait_prefix(); - let ident = item.to_rust_ty_or_opaque(ctx, &()); - let size_of_expr = quote! { - ::#prefix::mem::size_of::<#ident>() - }; - let align_of_expr = quote! { - ::#prefix::mem::align_of::<#ident>() - }; - let size_of_err = - format!("Size of template specialization: {name}"); - let align_of_err = - format!("Align of template specialization: {name}"); - - if compile_time { - // In an ideal world this would be assert_eq!, but that is not - // supported in const fn due to the need for string formatting. - // If #size_of_expr > #size, this will index OOB, and if - // #size_of_expr < #size, the subtraction will overflow, both - // of which print enough information to see what has gone wrong. - result.push(quote! { - #[allow(clippy::unnecessary_operation, clippy::identity_op)] - const _: () = { - [#size_of_err][#size_of_expr - #size]; - [#align_of_err][#align_of_expr - #align]; - }; - }); - } else { - result.push(quote! { - #[test] - fn #fn_name() { - assert_eq!(#size_of_expr, #size, #size_of_err); - assert_eq!(#align_of_expr, #align, #align_of_err); - } - }); - } - } - } -} - -/// Trait for implementing the code generation of a struct or union field. -trait FieldCodegen<'a> { - type Extra; - - #[allow(clippy::too_many_arguments)] - fn codegen( - &self, - ctx: &BindgenContext, - visibility_kind: FieldVisibilityKind, - accessor_kind: FieldAccessorKind, - parent: &CompInfo, - parent_item: &Item, - last_field: bool, - result: &mut CodegenResult, - struct_layout: &mut StructLayoutTracker, - fields: &mut F, - methods: &mut M, - extra: Self::Extra, - ) where - F: Extend, - M: Extend; -} - -impl FieldCodegen<'_> for Field { - type Extra = (); - - fn codegen( - &self, - ctx: &BindgenContext, - visibility_kind: FieldVisibilityKind, - accessor_kind: FieldAccessorKind, - parent: &CompInfo, - parent_item: &Item, - last_field: bool, - result: &mut CodegenResult, - struct_layout: &mut StructLayoutTracker, - fields: &mut F, - methods: &mut M, - _: (), - ) where - F: Extend, - M: Extend, - { - match *self { - Field::DataMember(ref data) => { - data.codegen( - ctx, - visibility_kind, - accessor_kind, - parent, - parent_item, - last_field, - result, - struct_layout, - fields, - methods, - (), - ); - } - Field::Bitfields(ref unit) => { - unit.codegen( - ctx, - visibility_kind, - accessor_kind, - parent, - parent_item, - last_field, - result, - struct_layout, - fields, - methods, - (), - ); - } - } - } -} - -fn wrap_union_field_if_needed( - ctx: &BindgenContext, - struct_layout: &StructLayoutTracker, - ty: syn::Type, - result: &mut CodegenResult, -) -> syn::Type { - if struct_layout.is_rust_union() { - if struct_layout.can_copy_union_fields() { - ty - } else { - let prefix = ctx.trait_prefix(); - syn::parse_quote! { ::#prefix::mem::ManuallyDrop<#ty> } - } - } else { - result.saw_bindgen_union(); - if ctx.options().enable_cxx_namespaces { - syn::parse_quote! { root::__BindgenUnionField<#ty> } - } else { - syn::parse_quote! { __BindgenUnionField<#ty> } - } - } -} - -impl FieldCodegen<'_> for FieldData { - type Extra = (); - - fn codegen( - &self, - ctx: &BindgenContext, - parent_visibility_kind: FieldVisibilityKind, - accessor_kind: FieldAccessorKind, - parent: &CompInfo, - parent_item: &Item, - last_field: bool, - result: &mut CodegenResult, - struct_layout: &mut StructLayoutTracker, - fields: &mut F, - methods: &mut M, - _: (), - ) where - F: Extend, - M: Extend, - { - // Bitfields are handled by `FieldCodegen` implementations for - // `BitfieldUnit` and `Bitfield`. - assert!(self.bitfield_width().is_none()); - - let field_item = - self.ty().into_resolver().through_type_refs().resolve(ctx); - let field_ty = field_item.expect_type(); - let ty = self - .ty() - .to_rust_ty_or_opaque(ctx, &()) - .with_implicit_template_params(ctx, field_item); - - // NB: If supported, we use proper `union` types. - let ty = if parent.is_union() { - wrap_union_field_if_needed(ctx, struct_layout, ty, result) - } else if let Some(item) = field_ty.is_incomplete_array(ctx) { - // Only FAM if its the last field - if ctx.options().flexarray_dst && last_field { - struct_layout.saw_flexible_array(); - syn::parse_quote! { FAM } - } else { - result.saw_incomplete_array(); - - let inner = item.to_rust_ty_or_opaque(ctx, &()); - - if ctx.options().enable_cxx_namespaces { - syn::parse_quote! { root::__IncompleteArrayField<#inner> } - } else { - syn::parse_quote! { __IncompleteArrayField<#inner> } - } - } - } else { - ty - }; - - let mut field = quote! {}; - if ctx.options().generate_comments { - if let Some(raw_comment) = self.comment() { - let comment = ctx.options().process_comment(raw_comment); - field = attributes::doc(&comment); - } - } - - let field_name = self - .name() - .map(|name| ctx.rust_mangle(name).into_owned()) - .expect("Each field should have a name in codegen!"); - let field_name = field_name.as_str(); - let field_ident = ctx.rust_ident_raw(field_name); - - if let Some(padding_field) = - struct_layout.saw_field(field_name, field_ty, self.offset()) - { - fields.extend(Some(padding_field)); - } - - let visibility = compute_visibility( - ctx, - self.is_public(), - ctx.options().last_callback(|cb| { - cb.field_visibility(FieldInfo { - type_name: &parent_item.canonical_name(ctx), - field_name, - field_type_name: field_ty.name(), - }) - }), - self.annotations(), - parent_visibility_kind, - ); - let accessor_kind = - self.annotations().accessor_kind().unwrap_or(accessor_kind); - - match visibility { - FieldVisibilityKind::Private => { - field.append_all(quote! { - #field_ident : #ty , - }); - } - FieldVisibilityKind::PublicCrate => { - field.append_all(quote! { - pub(crate) #field_ident : #ty , - }); - } - FieldVisibilityKind::Public => { - field.append_all(quote! { - pub #field_ident : #ty , - }); - } - } - - fields.extend(Some(field)); - - // TODO: Factor the following code out, please! - if accessor_kind == FieldAccessorKind::None { - return; - } - - let getter_name = ctx.rust_ident_raw(format!("get_{field_name}")); - let mutable_getter_name = - ctx.rust_ident_raw(format!("get_{field_name}_mut")); - - methods.extend(Some(match accessor_kind { - FieldAccessorKind::None => unreachable!(), - FieldAccessorKind::Regular => { - quote! { - #[inline] - pub fn #getter_name(&self) -> & #ty { - &self.#field_ident - } - - #[inline] - pub fn #mutable_getter_name(&mut self) -> &mut #ty { - &mut self.#field_ident - } - } - } - FieldAccessorKind::Unsafe => { - quote! { - #[inline] - pub unsafe fn #getter_name(&self) -> & #ty { - &self.#field_ident - } - - #[inline] - pub unsafe fn #mutable_getter_name(&mut self) -> &mut #ty { - &mut self.#field_ident - } - } - } - FieldAccessorKind::Immutable => { - quote! { - #[inline] - pub fn #getter_name(&self) -> & #ty { - &self.#field_ident - } - } - } - })); - } -} - -impl BitfieldUnit { - /// Get the constructor name for this bitfield unit. - fn ctor_name(&self) -> proc_macro2::TokenStream { - let ctor_name = Ident::new( - &format!("new_bitfield_{}", self.nth()), - Span::call_site(), - ); - quote! { - #ctor_name - } - } -} - -impl Bitfield { - /// Extend an under construction bitfield unit constructor with this - /// bitfield. This sets the relevant bits on the `__bindgen_bitfield_unit` - /// variable that's being constructed. - fn extend_ctor_impl( - &self, - ctx: &BindgenContext, - param_name: &proc_macro2::TokenStream, - mut ctor_impl: proc_macro2::TokenStream, - ) -> proc_macro2::TokenStream { - let bitfield_ty = ctx.resolve_type(self.ty()); - let bitfield_ty_layout = bitfield_ty - .layout(ctx) - .expect("Bitfield without layout? Gah!"); - let bitfield_int_ty = helpers::integer_type(bitfield_ty_layout).expect( - "Should already have verified that the bitfield is \ - representable as an int", - ); - - let offset = self.offset_into_unit(); - let width = self.width() as u8; - let prefix = ctx.trait_prefix(); - - ctor_impl.append_all(quote! { - __bindgen_bitfield_unit.set( - #offset, - #width, - { - let #param_name: #bitfield_int_ty = unsafe { - ::#prefix::mem::transmute(#param_name) - }; - #param_name as u64 - } - ); - }); - - ctor_impl - } -} - -fn access_specifier( - visibility: FieldVisibilityKind, -) -> proc_macro2::TokenStream { - match visibility { - FieldVisibilityKind::Private => quote! {}, - FieldVisibilityKind::PublicCrate => quote! { pub(crate) }, - FieldVisibilityKind::Public => quote! { pub }, - } -} - -/// Compute a fields or structs visibility based on multiple conditions. -/// 1. If the element was declared public, and we respect such CXX accesses specs -/// (context option) => By default Public, but this can be overruled by an `annotation`. -/// -/// 2. If the element was declared private, and we respect such CXX accesses specs -/// (context option) => By default Private, but this can be overruled by an `annotation`. -/// -/// 3. If we do not respect visibility modifiers, the result depends on the `annotation`, -/// if any, or the passed `default_kind`. -/// -fn compute_visibility( - ctx: &BindgenContext, - is_declared_public: bool, - callback_override: Option, - annotations: &Annotations, - default_kind: FieldVisibilityKind, -) -> FieldVisibilityKind { - callback_override - .or_else(|| annotations.visibility_kind()) - .unwrap_or_else(|| { - match (is_declared_public, ctx.options().respect_cxx_access_specs) { - (true, true) => { - // declared as public, cxx specs are respected - FieldVisibilityKind::Public - } - (false, true) => { - // declared as private, cxx specs are respected - FieldVisibilityKind::Private - } - (_, false) => { - // cxx specs are not respected, declaration does not matter. - default_kind - } - } - }) -} - -impl FieldCodegen<'_> for BitfieldUnit { - type Extra = (); - - fn codegen( - &self, - ctx: &BindgenContext, - visibility_kind: FieldVisibilityKind, - accessor_kind: FieldAccessorKind, - parent: &CompInfo, - parent_item: &Item, - last_field: bool, - result: &mut CodegenResult, - struct_layout: &mut StructLayoutTracker, - fields: &mut F, - methods: &mut M, - _: (), - ) where - F: Extend, - M: Extend, - { - use crate::ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT; - - result.saw_bitfield_unit(); - - let layout = self.layout(); - let unit_field_ty = helpers::bitfield_unit(ctx, layout); - let field_ty = { - let unit_field_ty = unit_field_ty.clone(); - if parent.is_union() { - wrap_union_field_if_needed( - ctx, - struct_layout, - unit_field_ty, - result, - ) - } else { - unit_field_ty - } - }; - - { - let align_field_name = format!("_bitfield_align_{}", self.nth()); - let align_field_ident = ctx.rust_ident(align_field_name); - let align_ty = match self.layout().align { - n if n >= 8 => quote! { u64 }, - 4 => quote! { u32 }, - 2 => quote! { u16 }, - _ => quote! { u8 }, - }; - let access_spec = access_specifier(visibility_kind); - let align_field = quote! { - #access_spec #align_field_ident: [#align_ty; 0], - }; - fields.extend(Some(align_field)); - } - - let unit_field_name = format!("_bitfield_{}", self.nth()); - let unit_field_ident = ctx.rust_ident(&unit_field_name); - - let ctor_name = self.ctor_name(); - let mut ctor_params = vec![]; - let mut ctor_impl = quote! {}; - - // We cannot generate any constructor if the underlying storage can't - // implement AsRef<[u8]> / AsMut<[u8]> / etc, or can't derive Default. - // - // We don't check `larger_arrays` here because Default does still have - // the 32 items limitation. - let mut generate_ctor = layout.size <= RUST_DERIVE_IN_ARRAY_LIMIT; - - let mut unit_visibility = visibility_kind; - let bfields = self.bitfields(); - for (idx, bf) in bfields.iter().enumerate() { - // Codegen not allowed for anonymous bitfields - if bf.name().is_none() { - continue; - } - - if layout.size > RUST_DERIVE_IN_ARRAY_LIMIT && - !ctx.options().rust_features().larger_arrays - { - continue; - } - - let mut bitfield_representable_as_int = true; - let mut bitfield_visibility = visibility_kind; - bf.codegen( - ctx, - visibility_kind, - accessor_kind, - parent, - parent_item, - last_field && idx == bfields.len() - 1, - result, - struct_layout, - fields, - methods, - ( - &unit_field_name, - &unit_field_ty, - &mut bitfield_representable_as_int, - &mut bitfield_visibility, - ), - ); - if bitfield_visibility < unit_visibility { - unit_visibility = bitfield_visibility; - } - - // Generating a constructor requires the bitfield to be representable as an integer. - if !bitfield_representable_as_int { - generate_ctor = false; - continue; - } - - let param_name = bitfield_getter_name(ctx, bf); - let bitfield_ty_item = ctx.resolve_item(bf.ty()); - let bitfield_ty = bitfield_ty_item.expect_type(); - let bitfield_ty = - bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item); - - ctor_params.push(quote! { - #param_name : #bitfield_ty - }); - ctor_impl = bf.extend_ctor_impl(ctx, ¶m_name, ctor_impl); - } - - let access_spec = access_specifier(unit_visibility); - - let field = quote! { - #access_spec #unit_field_ident : #field_ty , - }; - fields.extend(Some(field)); - - if generate_ctor { - methods.extend(Some(quote! { - #[inline] - #access_spec fn #ctor_name ( #( #ctor_params ),* ) -> #unit_field_ty { - let mut __bindgen_bitfield_unit: #unit_field_ty = Default::default(); - #ctor_impl - __bindgen_bitfield_unit - } - })); - } - - struct_layout.saw_bitfield_unit(layout); - } -} - -fn bitfield_getter_name( - ctx: &BindgenContext, - bitfield: &Bitfield, -) -> proc_macro2::TokenStream { - let name = bitfield.getter_name(); - let name = ctx.rust_ident_raw(name); - quote! { #name } -} - -fn bitfield_raw_getter_name( - ctx: &BindgenContext, - bitfield: &Bitfield, -) -> proc_macro2::TokenStream { - let name = bitfield.getter_name(); - let name = ctx.rust_ident_raw(format!("{name}_raw")); - quote! { #name } -} - -fn bitfield_setter_name( - ctx: &BindgenContext, - bitfield: &Bitfield, -) -> proc_macro2::TokenStream { - let setter = bitfield.setter_name(); - let setter = ctx.rust_ident_raw(setter); - quote! { #setter } -} - -fn bitfield_raw_setter_name( - ctx: &BindgenContext, - bitfield: &Bitfield, -) -> proc_macro2::TokenStream { - let setter = bitfield.setter_name(); - let setter = ctx.rust_ident_raw(format!("{setter}_raw")); - quote! { #setter } -} - -impl<'a> FieldCodegen<'a> for Bitfield { - type Extra = ( - &'a str, - &'a syn::Type, - &'a mut bool, - &'a mut FieldVisibilityKind, - ); - - fn codegen( - &self, - ctx: &BindgenContext, - visibility_kind: FieldVisibilityKind, - _accessor_kind: FieldAccessorKind, - parent: &CompInfo, - parent_item: &Item, - _last_field: bool, - _result: &mut CodegenResult, - struct_layout: &mut StructLayoutTracker, - _fields: &mut F, - methods: &mut M, - ( - unit_field_name, - unit_field_ty, - bitfield_representable_as_int, - bitfield_visibility, - ): ( - &'a str, - &'a syn::Type, - &mut bool, - &'a mut FieldVisibilityKind, - ), - ) where - F: Extend, - M: Extend, - { - let prefix = ctx.trait_prefix(); - let getter_name = bitfield_getter_name(ctx, self); - let setter_name = bitfield_setter_name(ctx, self); - let raw_getter_name = bitfield_raw_getter_name(ctx, self); - let raw_setter_name = bitfield_raw_setter_name(ctx, self); - let unit_field_ident = Ident::new(unit_field_name, Span::call_site()); - - let bitfield_ty_item = ctx.resolve_item(self.ty()); - let bitfield_ty = bitfield_ty_item.expect_type(); - let bitfield_ty_ident = bitfield_ty.name(); - - let bitfield_ty_layout = bitfield_ty - .layout(ctx) - .expect("Bitfield without layout? Gah!"); - let bitfield_int_ty = - if let Some(int_ty) = helpers::integer_type(bitfield_ty_layout) { - *bitfield_representable_as_int = true; - int_ty - } else { - *bitfield_representable_as_int = false; - return; - }; - - let bitfield_ty = - bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item); - - let offset = self.offset_into_unit(); - let width = self.width() as u8; - - let override_visibility = self.name().and_then(|field_name| { - ctx.options().last_callback(|cb| { - cb.field_visibility(FieldInfo { - type_name: &parent_item.canonical_name(ctx), - field_name, - field_type_name: bitfield_ty_ident, - }) - }) - }); - *bitfield_visibility = compute_visibility( - ctx, - self.is_public(), - override_visibility, - self.annotations(), - visibility_kind, - ); - let access_spec = access_specifier(*bitfield_visibility); - - if parent.is_union() && !struct_layout.is_rust_union() { - methods.extend(Some(quote! { - #[inline] - #access_spec fn #getter_name(&self) -> #bitfield_ty { - unsafe { - ::#prefix::mem::transmute( - self.#unit_field_ident.as_ref().get(#offset, #width) - as #bitfield_int_ty - ) - } - } - - #[inline] - #access_spec fn #setter_name(&mut self, val: #bitfield_ty) { - unsafe { - let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); - self.#unit_field_ident.as_mut().set( - #offset, - #width, - val as u64 - ) - } - } - })); - - if ctx.options().rust_features.raw_ref_macros { - methods.extend(Some(quote! { - #[inline] - #access_spec unsafe fn #raw_getter_name(this: *const Self) -> #bitfield_ty { - unsafe { - ::#prefix::mem::transmute(<#unit_field_ty>::raw_get( - (*::#prefix::ptr::addr_of!((*this).#unit_field_ident)).as_ref() as *const _, - #offset, - #width, - ) as #bitfield_int_ty) - } - } - - #[inline] - #access_spec unsafe fn #raw_setter_name(this: *mut Self, val: #bitfield_ty) { - unsafe { - let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); - <#unit_field_ty>::raw_set( - (*::#prefix::ptr::addr_of_mut!((*this).#unit_field_ident)).as_mut() as *mut _, - #offset, - #width, - val as u64, - ) - } - } - })); - } - } else { - methods.extend(Some(quote! { - #[inline] - #access_spec fn #getter_name(&self) -> #bitfield_ty { - unsafe { - ::#prefix::mem::transmute( - self.#unit_field_ident.get(#offset, #width) - as #bitfield_int_ty - ) - } - } - - #[inline] - #access_spec fn #setter_name(&mut self, val: #bitfield_ty) { - unsafe { - let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); - self.#unit_field_ident.set( - #offset, - #width, - val as u64 - ) - } - } - })); - - if ctx.options().rust_features.raw_ref_macros { - methods.extend(Some(quote! { - #[inline] - #access_spec unsafe fn #raw_getter_name(this: *const Self) -> #bitfield_ty { - unsafe { - ::#prefix::mem::transmute(<#unit_field_ty>::raw_get( - ::#prefix::ptr::addr_of!((*this).#unit_field_ident), - #offset, - #width, - ) as #bitfield_int_ty) - } - } - - #[inline] - #access_spec unsafe fn #raw_setter_name(this: *mut Self, val: #bitfield_ty) { - unsafe { - let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); - <#unit_field_ty>::raw_set( - ::#prefix::ptr::addr_of_mut!((*this).#unit_field_ident), - #offset, - #width, - val as u64, - ) - } - } - })); - } - } - } -} - -impl CodeGenerator for CompInfo { - type Extra = Item; - type Return = (); - - fn codegen( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'_>, - item: &Item, - ) { - debug!("::codegen: item = {item:?}"); - debug_assert!(item.is_enabled_for_codegen(ctx)); - - // Don't output classes with template parameters that aren't types, and - // also don't output template specializations, neither total or partial. - if self.has_non_type_template_params() { - return; - } - - let ty = item.expect_type(); - let layout = ty.layout(ctx); - let mut packed = self.is_packed(ctx, layout.as_ref()); - - let canonical_name = item.canonical_name(ctx); - let canonical_ident = ctx.rust_ident(&canonical_name); - - // Generate the vtable from the method list if appropriate. - // - // TODO: I don't know how this could play with virtual methods that are - // not in the list of methods found by us, we'll see. Also, could the - // order of the vtable pointers vary? - // - // FIXME: Once we generate proper vtables, we need to codegen the - // vtable, but *not* generate a field for it in the case that - // HasVtable::has_vtable_ptr is false but HasVtable::has_vtable is true. - // - // Also, we need to generate the vtable in such a way it "inherits" from - // the parent too. - let is_opaque = item.is_opaque(ctx, &()); - let mut fields = vec![]; - let visibility = item - .annotations() - .visibility_kind() - .unwrap_or(ctx.options().default_visibility); - let mut struct_layout = StructLayoutTracker::new( - ctx, - self, - ty, - &canonical_name, - visibility, - packed, - ); - - let mut generic_param_names = vec![]; - - for (idx, ty) in item.used_template_params(ctx).iter().enumerate() { - let param = ctx.resolve_type(*ty); - let name = param.name().unwrap(); - let ident = ctx.rust_ident(name); - generic_param_names.push(ident.clone()); - - let prefix = ctx.trait_prefix(); - let field_name = ctx.rust_ident(format!("_phantom_{idx}")); - fields.push(quote! { - pub #field_name : ::#prefix::marker::PhantomData< - ::#prefix::cell::UnsafeCell<#ident> - > , - }); - } - - if !is_opaque { - if item.has_vtable_ptr(ctx) { - let vtable = Vtable::new(item.id(), self); - vtable.codegen(ctx, result, item); - - let vtable_type = vtable - .try_to_rust_ty(ctx, &()) - .expect("vtable to Rust type conversion is infallible") - .to_ptr(true); - - fields.push(quote! { - pub vtable_: #vtable_type , - }); - - struct_layout.saw_vtable(); - } - - for base in self.base_members() { - if !base.requires_storage(ctx) { - continue; - } - - let inner_item = ctx.resolve_item(base.ty); - let inner = inner_item - .to_rust_ty_or_opaque(ctx, &()) - .with_implicit_template_params(ctx, inner_item); - let field_name = ctx.rust_ident(&base.field_name); - - struct_layout.saw_base(inner_item.expect_type()); - - let visibility = match ( - base.is_public(), - ctx.options().respect_cxx_access_specs, - ) { - (true, true) => FieldVisibilityKind::Public, - (false, true) => FieldVisibilityKind::Private, - _ => ctx.options().default_visibility, - }; - - let access_spec = access_specifier(visibility); - fields.push(quote! { - #access_spec #field_name: #inner, - }); - } - } - - let mut methods = vec![]; - if !is_opaque { - let struct_accessor_kind = item - .annotations() - .accessor_kind() - .unwrap_or(FieldAccessorKind::None); - let field_decls = self.fields(); - for (idx, field) in field_decls.iter().enumerate() { - field.codegen( - ctx, - visibility, - struct_accessor_kind, - self, - item, - idx == field_decls.len() - 1, - result, - &mut struct_layout, - &mut fields, - &mut methods, - (), - ); - } - // Check whether an explicit padding field is needed - // at the end. - if let Some(comp_layout) = layout { - fields.extend( - struct_layout - .add_tail_padding(&canonical_name, comp_layout), - ); - } - } - - if is_opaque { - // Opaque item should not have generated methods, fields. - debug_assert!(fields.is_empty()); - debug_assert!(methods.is_empty()); - } - - let is_union = self.kind() == CompKind::Union; - let layout = item.kind().expect_type().layout(ctx); - let zero_sized = item.is_zero_sized(ctx); - let forward_decl = self.is_forward_declaration(); - - let mut explicit_align = None; - - // C++ requires every struct to be addressable, so what C++ compilers do - // is making the struct 1-byte sized. - // - // This is apparently not the case for C, see: - // https://github.com/rust-lang/rust-bindgen/issues/551 - // - // Just get the layout, and assume C++ if not. - // - // NOTE: This check is conveniently here to avoid the dummy fields we - // may add for unused template parameters. - if !forward_decl && zero_sized { - let has_address = if is_opaque { - // Generate the address field if it's an opaque type and - // couldn't determine the layout of the blob. - layout.is_none() - } else { - layout.map_or(true, |l| l.size != 0) - }; - - if has_address { - let layout = Layout::new(1, 1); - let ty = helpers::blob(ctx, Layout::new(1, 1), false); - struct_layout.saw_field_with_layout( - "_address", - layout, - /* offset = */ Some(0), - ); - fields.push(quote! { - pub _address: #ty, - }); - } - } - - if is_opaque { - match layout { - Some(l) => { - explicit_align = Some(l.align); - - let ty = helpers::blob(ctx, l, false); - fields.push(quote! { - pub _bindgen_opaque_blob: #ty , - }); - } - None => { - if !forward_decl { - warn!("Opaque type without layout! Expect dragons!"); - } - } - } - } else if !is_union && !zero_sized { - if let Some(padding_field) = - layout.and_then(|layout| struct_layout.pad_struct(layout)) - { - fields.push(padding_field); - } - - if let Some(layout) = layout { - if struct_layout.requires_explicit_align(layout) { - if layout.align == 1 { - packed = true; - } else { - explicit_align = Some(layout.align); - } - } - } - } else if is_union && !forward_decl { - if let Some(layout) = layout { - // TODO(emilio): It'd be nice to unify this with the struct path above somehow. - if struct_layout.requires_explicit_align(layout) { - explicit_align = Some(layout.align); - } - if !struct_layout.is_rust_union() { - let ty = helpers::blob(ctx, layout, false); - fields.push(quote! { - pub bindgen_union_field: #ty , - }); - } - } - } - - if forward_decl { - fields.push(quote! { - _unused: [u8; 0], - }); - } - - let (flex_array_generic, flex_inner_ty) = if ctx.options().flexarray_dst - { - match self.flex_array_member(ctx) { - Some(ty) => { - let inner = ty.to_rust_ty_or_opaque(ctx, &()); - ( - Some(quote! { FAM: ?Sized = [ #inner; 0 ] }), - Some(quote! { #inner }), - ) - } - None => (None, None), - } - } else { - (None, None) - }; - - // Generics, including the flexible array member. - // - // generics - generic parameters for the struct declaration - // impl_generics_labels - generic parameters for `impl<...>` - // impl_generics_params - generic parameters for `impl structname<...>` - // - // `impl` blocks are for non-FAM related impls like Default, etc - let (generics, impl_generics_labels, impl_generics_params) = - if !generic_param_names.is_empty() || flex_array_generic.is_some() { - let (flex_sized, flex_fam) = match flex_inner_ty.as_ref() { - None => (None, None), - Some(ty) => ( - Some(quote! { [ #ty; 0 ] }), - Some(quote! { FAM: ?Sized = [ #ty; 0 ] }), - ), - }; - - ( - quote! { - < #( #generic_param_names , )* #flex_fam > - }, - quote! { - < #( #generic_param_names , )* > - }, - quote! { - < #( #generic_param_names , )* #flex_sized > - }, - ) - } else { - (quote! {}, quote! {}, quote! {}) - }; - - let mut attributes = vec![]; - let mut needs_clone_impl = false; - let mut needs_default_impl = false; - let mut needs_debug_impl = false; - let mut needs_partialeq_impl = false; - let needs_flexarray_impl = flex_array_generic.is_some(); - if let Some(comment) = item.comment(ctx) { - attributes.push(attributes::doc(&comment)); - } - - // if a type has both a "packed" attribute and an "align(N)" attribute, then check if the - // "packed" attr is redundant, and do not include it if so. - if packed && - !is_opaque && - !(explicit_align.is_some() && - self.already_packed(ctx).unwrap_or(false)) - { - let n = layout.map_or(1, |l| l.align); - assert!(ctx.options().rust_features().repr_packed_n || n == 1); - let packed_repr = if n == 1 { - "packed".to_string() - } else { - format!("packed({n})") - }; - attributes.push(attributes::repr_list(&["C", &packed_repr])); - } else { - attributes.push(attributes::repr("C")); - } - - if true { - if let Some(explicit) = explicit_align { - // Ensure that the struct has the correct alignment even in - // presence of alignas. - let explicit = helpers::ast_ty::int_expr(explicit as i64); - attributes.push(quote! { - #[repr(align(#explicit))] - }); - } - } - - let derivable_traits = derives_of_item(item, ctx, packed); - if !derivable_traits.contains(DerivableTraits::DEBUG) { - needs_debug_impl = ctx.options().derive_debug && - ctx.options().impl_debug && - !ctx.no_debug_by_name(item) && - !item.annotations().disallow_debug(); - } - - if !derivable_traits.contains(DerivableTraits::DEFAULT) { - needs_default_impl = ctx.options().derive_default && - !self.is_forward_declaration() && - !ctx.no_default_by_name(item) && - !item.annotations().disallow_default(); - } - - let all_template_params = item.all_template_params(ctx); - - if derivable_traits.contains(DerivableTraits::COPY) && - !derivable_traits.contains(DerivableTraits::CLONE) - { - needs_clone_impl = true; - } - - if !derivable_traits.contains(DerivableTraits::PARTIAL_EQ) { - needs_partialeq_impl = ctx.options().derive_partialeq && - ctx.options().impl_partialeq && - ctx.lookup_can_derive_partialeq_or_partialord(item.id()) == - CanDerive::Manually; - } - - let mut derives: Vec<_> = derivable_traits.into(); - derives.extend(item.annotations().derives().iter().map(String::as_str)); - - let is_rust_union = is_union && struct_layout.is_rust_union(); - - let discovered_id = DiscoveredItemId::new(item.id().as_usize()); - ctx.options().for_each_callback(|cb| { - let discovered_item = match self.kind() { - CompKind::Struct => DiscoveredItem::Struct { - original_name: item - .kind() - .expect_type() - .name() - .map(String::from), - final_name: canonical_ident.to_string(), - }, - CompKind::Union => DiscoveredItem::Union { - original_name: item - .kind() - .expect_type() - .name() - .map(String::from), - final_name: canonical_ident.to_string(), - }, - }; - - cb.new_item_found(discovered_id, discovered_item); - }); - - // The custom derives callback may return a list of derive attributes; - // add them to the end of the list. - let custom_derives = ctx.options().all_callbacks(|cb| { - cb.add_derives(&DeriveInfo { - name: &canonical_name, - kind: if is_rust_union { - DeriveTypeKind::Union - } else { - DeriveTypeKind::Struct - }, - }) - }); - // In most cases this will be a no-op, since custom_derives will be empty. - derives.extend(custom_derives.iter().map(|s| s.as_str())); - - if !derives.is_empty() { - attributes.push(attributes::derives(&derives)); - } - - attributes.extend( - item.annotations() - .attributes() - .iter() - .map(|s| s.parse().unwrap()), - ); - - let custom_attributes = ctx.options().all_callbacks(|cb| { - cb.add_attributes(&AttributeInfo { - name: &canonical_name, - kind: if is_rust_union { - DeriveTypeKind::Union - } else { - DeriveTypeKind::Struct - }, - }) - }); - attributes.extend(custom_attributes.iter().map(|s| s.parse().unwrap())); - - if item.must_use(ctx) { - attributes.push(attributes::must_use()); - } - - let mut tokens = if is_rust_union { - quote! { - #( #attributes )* - pub union #canonical_ident - } - } else { - quote! { - #( #attributes )* - pub struct #canonical_ident - } - }; - - tokens.append_all(quote! { - #generics { - #( #fields )* - } - }); - result.push(tokens); - - // Generate the inner types and all that stuff. - // - // TODO: In the future we might want to be smart, and use nested - // modules, and whatnot. - for ty in self.inner_types() { - let child_item = ctx.resolve_item(*ty); - // assert_eq!(child_item.parent_id(), item.id()); - child_item.codegen(ctx, result, &()); - } - - // NOTE: Some unexposed attributes (like alignment attributes) may - // affect layout, so we're bad and pray to the gods for avoid sending - // all the tests to shit when parsing things like max_align_t. - if self.found_unknown_attr() { - warn!("Type {canonical_ident} has an unknown attribute that may affect layout"); - } - - if all_template_params.is_empty() { - if !is_opaque { - for var in self.inner_vars() { - ctx.resolve_item(*var).codegen(ctx, result, &()); - } - } - - if ctx.options().layout_tests && !self.is_forward_declaration() { - if let Some(layout) = layout { - let compile_time = ctx.options().rust_features().offset_of; - let fn_name = if compile_time { - None - } else { - let fn_name = - format!("bindgen_test_layout_{canonical_ident}"); - Some(ctx.rust_ident_raw(fn_name)) - }; - let prefix = ctx.trait_prefix(); - let size_of_expr = quote! { - ::#prefix::mem::size_of::<#canonical_ident>() - }; - let align_of_expr = quote! { - ::#prefix::mem::align_of::<#canonical_ident>() - }; - let size = layout.size; - let align = layout.align; - let size_of_err = format!("Size of {canonical_ident}"); - let align_of_err = - format!("Alignment of {canonical_ident}"); - - let check_struct_align = if compile_time { - quote! { - [#align_of_err][#align_of_expr - #align]; - } - } else { - quote! { - assert_eq!(#align_of_expr, #align, #align_of_err); - } - }; - - let should_skip_field_offset_checks = is_opaque; - - let check_field_offset = if should_skip_field_offset_checks - { - vec![] - } else { - self.fields() - .iter() - .filter_map(|field| { - let Field::DataMember(field) = field else { return None }; - let name = field.name()?; - field.offset().map(|offset| { - let field_offset = offset / 8; - let field_name = ctx.rust_ident(name); - let offset_of_err = format!("Offset of field: {canonical_ident}::{field_name}"); - if compile_time { - quote! { - [#offset_of_err][ - ::#prefix::mem::offset_of!(#canonical_ident, #field_name) - #field_offset - ]; - } - } else { - quote! { - assert_eq!( - unsafe { - ::#prefix::ptr::addr_of!((*ptr).#field_name) as usize - ptr as usize - }, - #field_offset, - #offset_of_err - ); - } - } - }) - }) - .collect() - }; - - let uninit_decl = if check_field_offset.is_empty() || - compile_time - { - None - } else { - // FIXME: When MSRV >= 1.59.0, we can use - // > const PTR: *const #canonical_ident = ::#prefix::mem::MaybeUninit::uninit().as_ptr(); - Some(quote! { - // Use a shared MaybeUninit so that rustc with - // opt-level=0 doesn't take too much stack space, - // see #2218. - const UNINIT: ::#prefix::mem::MaybeUninit<#canonical_ident> = ::#prefix::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - }) - }; - - if compile_time { - result.push(quote! { - #[allow(clippy::unnecessary_operation, clippy::identity_op)] - const _: () = { - [#size_of_err][#size_of_expr - #size]; - #check_struct_align - #( #check_field_offset )* - }; - }); - } else { - result.push(quote! { - #[test] - fn #fn_name() { - #uninit_decl - assert_eq!(#size_of_expr, #size, #size_of_err); - #check_struct_align - #( #check_field_offset )* - } - }); - } - } - } - - let mut method_names = Default::default(); - if ctx.options().codegen_config.methods() { - for method in self.methods() { - assert_ne!(method.kind(), MethodKind::Constructor); - method.codegen_method( - ctx, - &mut methods, - &mut method_names, - result, - self, - discovered_id, - ); - } - } - - if ctx.options().codegen_config.constructors() { - for sig in self.constructors() { - Method::new( - MethodKind::Constructor, - *sig, - /* const */ - false, - ) - .codegen_method( - ctx, - &mut methods, - &mut method_names, - result, - self, - discovered_id, - ); - } - } - - if ctx.options().codegen_config.destructors() { - if let Some((kind, destructor)) = self.destructor() { - debug_assert!(kind.is_destructor()); - Method::new(kind, destructor, false).codegen_method( - ctx, - &mut methods, - &mut method_names, - result, - self, - discovered_id, - ); - } - } - } - - // NB: We can't use to_rust_ty here since for opaque types this tries to - // use the specialization knowledge to generate a blob field. - let ty_for_impl = quote! { - #canonical_ident #impl_generics_params - }; - - if needs_clone_impl { - result.push(quote! { - impl #impl_generics_labels Clone for #ty_for_impl { - fn clone(&self) -> Self { *self } - } - }); - } - - if needs_flexarray_impl { - result.push(self.generate_flexarray( - ctx, - &canonical_ident, - flex_inner_ty.as_ref(), - &generic_param_names, - &impl_generics_labels, - )); - } - - if needs_default_impl { - let prefix = ctx.trait_prefix(); - let body = if ctx.options().rust_features().maybe_uninit { - quote! { - let mut s = ::#prefix::mem::MaybeUninit::::uninit(); - unsafe { - ::#prefix::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } - } else { - quote! { - unsafe { - let mut s: Self = ::#prefix::mem::uninitialized(); - ::#prefix::ptr::write_bytes(&mut s, 0, 1); - s - } - } - }; - // Note we use `ptr::write_bytes()` instead of `mem::zeroed()` because the latter does - // not necessarily ensure padding bytes are zeroed. Some C libraries are sensitive to - // non-zero padding bytes, especially when forwards/backwards compatibility is - // involved. - result.push(quote! { - impl #impl_generics_labels Default for #ty_for_impl { - fn default() -> Self { - #body - } - } - }); - } - - if needs_debug_impl { - let impl_ = impl_debug::gen_debug_impl( - ctx, - self.fields(), - item, - self.kind(), - ); - - let prefix = ctx.trait_prefix(); - - result.push(quote! { - impl #impl_generics_labels ::#prefix::fmt::Debug for #ty_for_impl { - #impl_ - } - }); - } - - if needs_partialeq_impl { - if let Some(impl_) = impl_partialeq::gen_partialeq_impl( - ctx, - self, - item, - &ty_for_impl, - ) { - let partialeq_bounds = if generic_param_names.is_empty() { - quote! {} - } else { - let bounds = generic_param_names.iter().map(|t| { - quote! { #t: PartialEq } - }); - quote! { where #( #bounds ),* } - }; - - let prefix = ctx.trait_prefix(); - result.push(quote! { - impl #impl_generics_labels ::#prefix::cmp::PartialEq for #ty_for_impl #partialeq_bounds { - #impl_ - } - }); - } - } - - if !methods.is_empty() { - result.push(quote! { - impl #impl_generics_labels #ty_for_impl { - #( #methods )* - } - }); - } - } -} - -impl CompInfo { - fn generate_flexarray( - &self, - ctx: &BindgenContext, - canonical_ident: &Ident, - flex_inner_ty: Option<&proc_macro2::TokenStream>, - generic_param_names: &[Ident], - impl_generics_labels: &proc_macro2::TokenStream, - ) -> proc_macro2::TokenStream { - let prefix = ctx.trait_prefix(); - - let flex_array = flex_inner_ty.as_ref().map(|ty| quote! { [ #ty ] }); - - let dst_ty_for_impl = quote! { - #canonical_ident < #( #generic_param_names , )* #flex_array > - - }; - let sized_ty_for_impl = quote! { - #canonical_ident < #( #generic_param_names , )* [ #flex_inner_ty; 0 ] > - }; - - let layout = if ctx.options().rust_features().layout_for_ptr { - quote! { - pub fn layout(len: usize) -> ::#prefix::alloc::Layout { - // SAFETY: Null pointers are OK if we don't deref them - unsafe { - let p: *const Self = ::#prefix::ptr::from_raw_parts(::#prefix::ptr::null::<()>(), len); - ::#prefix::alloc::Layout::for_value_raw(p) - } - } - } - } else { - quote!() - }; - - let (from_ptr_dst, from_ptr_sized) = if ctx - .options() - .rust_features() - .ptr_metadata - { - let flex_ref_inner = ctx.wrap_unsafe_ops(quote! { - Self::flex_ptr(self, len) - }); - let flex_ref_mut_inner = ctx.wrap_unsafe_ops(quote! { - Self::flex_ptr_mut(self, len).assume_init() - }); - let flex_ptr_inner = ctx.wrap_unsafe_ops(quote! { - &*::#prefix::ptr::from_raw_parts(ptr as *const (), len) - }); - let flex_ptr_mut_inner = ctx.wrap_unsafe_ops(quote! { - // Initialize reference without ever exposing it, as its possibly uninitialized - let mut uninit = ::#prefix::mem::MaybeUninit::<&mut #dst_ty_for_impl>::uninit(); - (uninit.as_mut_ptr() as *mut *mut #dst_ty_for_impl) - .write(::#prefix::ptr::from_raw_parts_mut(ptr as *mut (), len)); - - uninit - }); - - ( - quote! { - #[inline] - pub fn fixed(&self) -> (& #sized_ty_for_impl, usize) { - unsafe { - let (ptr, len) = (self as *const Self).to_raw_parts(); - (&*(ptr as *const #sized_ty_for_impl), len) - } - } - - #[inline] - pub fn fixed_mut(&mut self) -> (&mut #sized_ty_for_impl, usize) { - unsafe { - let (ptr, len) = (self as *mut Self).to_raw_parts(); - (&mut *(ptr as *mut #sized_ty_for_impl), len) - } - } - }, - quote! { - /// Convert a sized prefix to an unsized structure with the given length. - /// - /// SAFETY: Underlying storage is initialized up to at least `len` elements. - pub unsafe fn flex_ref(&self, len: usize) -> &#dst_ty_for_impl { - // SAFETY: Reference is always valid as pointer. Caller is guaranteeing `len`. - #flex_ref_inner - } - - /// Convert a mutable sized prefix to an unsized structure with the given length. - /// - /// SAFETY: Underlying storage is initialized up to at least `len` elements. - #[inline] - pub unsafe fn flex_ref_mut(&mut self, len: usize) -> &mut #dst_ty_for_impl { - // SAFETY: Reference is always valid as pointer. Caller is guaranteeing `len`. - #flex_ref_mut_inner - } - - /// Construct DST variant from a pointer and a size. - /// - /// NOTE: lifetime of returned reference is not tied to any underlying storage. - /// SAFETY: `ptr` is valid. Underlying storage is fully initialized up to at least `len` elements. - #[inline] - pub unsafe fn flex_ptr<'unbounded>(ptr: *const Self, len: usize) -> &'unbounded #dst_ty_for_impl { - #flex_ptr_inner - } - - /// Construct mutable DST variant from a pointer and a - /// size. The returned `&mut` reference is initialized - /// pointing to memory referenced by `ptr`, but there's - /// no requirement that that memory be initialized. - /// - /// NOTE: lifetime of returned reference is not tied to any underlying storage. - /// SAFETY: `ptr` is valid. Underlying storage has space for at least `len` elements. - #[inline] - pub unsafe fn flex_ptr_mut<'unbounded>( - ptr: *mut Self, - len: usize, - ) -> ::#prefix::mem::MaybeUninit<&'unbounded mut #dst_ty_for_impl> { - #flex_ptr_mut_inner - } - }, - ) - } else { - (quote!(), quote!()) - }; - - quote! { - impl #impl_generics_labels #dst_ty_for_impl { - #layout - #from_ptr_dst - } - - impl #impl_generics_labels #sized_ty_for_impl { - #from_ptr_sized - } - } - } -} - -impl Method { - fn codegen_method( - &self, - ctx: &BindgenContext, - methods: &mut Vec, - method_names: &mut HashSet, - result: &mut CodegenResult<'_>, - _parent: &CompInfo, - parent_id: DiscoveredItemId, - ) { - assert!({ - let cc = &ctx.options().codegen_config; - match self.kind() { - MethodKind::Constructor => cc.constructors(), - MethodKind::Destructor | - MethodKind::VirtualDestructor { .. } => cc.destructors(), - MethodKind::Static | - MethodKind::Normal | - MethodKind::Virtual { .. } => cc.methods(), - } - }); - - // TODO(emilio): We could generate final stuff at least. - if self.is_virtual() { - return; // FIXME - } - - // First of all, output the actual function. - let function_item = ctx.resolve_item(self.signature()); - let id = DiscoveredItemId::new(function_item.id().as_usize()); - if !function_item.process_before_codegen(ctx, result) { - return; - } - let function = function_item.expect_function(); - let times_seen = function.codegen(ctx, result, function_item); - let Some(times_seen) = times_seen else { return }; - let signature_item = ctx.resolve_item(function.signature()); - let mut name = match self.kind() { - MethodKind::Constructor => "new".into(), - MethodKind::Destructor => "destruct".into(), - _ => function.name().to_owned(), - }; - - let TypeKind::Function(ref signature) = - *signature_item.expect_type().kind() - else { - panic!("How in the world?") - }; - - let supported_abi = signature.abi(ctx, Some(&*name)).is_ok(); - if !supported_abi { - return; - } - - // Do not generate variadic methods, since rust does not allow - // implementing them, and we don't do a good job at it anyway. - if signature.is_variadic() { - return; - } - - if method_names.contains(&name) { - let mut count = 1; - let mut new_name; - - while { - new_name = format!("{name}{count}"); - method_names.contains(&new_name) - } { - count += 1; - } - - name = new_name; - } - - method_names.insert(name.clone()); - - ctx.options().for_each_callback(|cb| { - cb.new_item_found( - id, - DiscoveredItem::Method { - parent: parent_id, - final_name: name.clone(), - }, - ); - }); - - let mut function_name = function_item.canonical_name(ctx); - if times_seen > 0 { - write!(&mut function_name, "{times_seen}").unwrap(); - } - let function_name = ctx.rust_ident(function_name); - let mut args = utils::fnsig_arguments(ctx, signature); - let mut ret = utils::fnsig_return_ty(ctx, signature); - - if !self.is_static() && !self.is_constructor() { - args[0] = if self.is_const() { - quote! { &self } - } else { - quote! { &mut self } - }; - } - - // If it's a constructor, we always return `Self`, and we inject the - // "this" parameter, so there's no need to ask the user for it. - // - // Note that constructors in Clang are represented as functions with - // return-type = void. - if self.is_constructor() { - args.remove(0); - ret = quote! { -> Self }; - } - - let mut exprs = - helpers::ast_ty::arguments_from_signature(signature, ctx); - - let mut stmts = vec![]; - - // If it's a constructor, we need to insert an extra parameter with a - // variable called `__bindgen_tmp` we're going to create. - if self.is_constructor() { - let prefix = ctx.trait_prefix(); - let tmp_variable_decl = if ctx - .options() - .rust_features() - .maybe_uninit - { - exprs[0] = quote! { - __bindgen_tmp.as_mut_ptr() - }; - quote! { - let mut __bindgen_tmp = ::#prefix::mem::MaybeUninit::uninit() - } - } else { - exprs[0] = quote! { - &mut __bindgen_tmp - }; - quote! { - let mut __bindgen_tmp = ::#prefix::mem::uninitialized() - } - }; - stmts.push(tmp_variable_decl); - } else if !self.is_static() { - assert!(!exprs.is_empty()); - exprs[0] = quote! { - self - }; - } - - let call = quote! { - #function_name (#( #exprs ),* ) - }; - - stmts.push(call); - - if self.is_constructor() { - stmts.push(if ctx.options().rust_features().maybe_uninit { - quote! { - __bindgen_tmp.assume_init() - } - } else { - quote! { - __bindgen_tmp - } - }); - } - - let block = ctx.wrap_unsafe_ops(quote! ( #( #stmts );*)); - - let mut attrs = vec![attributes::inline()]; - - if signature.must_use() { - attrs.push(attributes::must_use()); - } - - let name = ctx.rust_ident(&name); - methods.push(quote! { - #(#attrs)* - pub unsafe fn #name ( #( #args ),* ) #ret { - #block - } - }); - } -} - -/// A helper type that represents different enum variations. -#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] -pub enum EnumVariation { - /// The code for this enum will use a Rust enum. Note that creating this in unsafe code - /// (including FFI) with an invalid value will invoke undefined behaviour, whether or not - /// its marked as `#[non_exhaustive]`. - Rust { - /// Indicates whether the generated struct should be `#[non_exhaustive]` - non_exhaustive: bool, - }, - /// The code for this enum will use a newtype - NewType { - /// Indicates whether the newtype will have bitwise operators - is_bitfield: bool, - /// Indicates whether the variants will be represented as global constants - is_global: bool, - }, - /// The code for this enum will use consts - #[default] - Consts, - /// The code for this enum will use a module containing consts - ModuleConsts, -} - -impl EnumVariation { - fn is_rust(self) -> bool { - matches!(self, EnumVariation::Rust { .. }) - } - - /// Both the `Const` and `ModuleConsts` variants will cause this to return - /// true. - fn is_const(self) -> bool { - matches!(self, EnumVariation::Consts | EnumVariation::ModuleConsts) - } -} - -impl fmt::Display for EnumVariation { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let s = match self { - Self::Rust { - non_exhaustive: false, - } => "rust", - Self::Rust { - non_exhaustive: true, - } => "rust_non_exhaustive", - Self::NewType { - is_bitfield: true, .. - } => "bitfield", - Self::NewType { - is_bitfield: false, - is_global, - } => { - if *is_global { - "newtype_global" - } else { - "newtype" - } - } - Self::Consts => "consts", - Self::ModuleConsts => "moduleconsts", - }; - s.fmt(f) - } -} - -impl FromStr for EnumVariation { - type Err = std::io::Error; - - /// Create a `EnumVariation` from a string. - fn from_str(s: &str) -> Result { - match s { - "rust" => Ok(EnumVariation::Rust { - non_exhaustive: false, - }), - "rust_non_exhaustive" => Ok(EnumVariation::Rust { - non_exhaustive: true, - }), - "bitfield" => Ok(EnumVariation::NewType { - is_bitfield: true, - is_global: false, - }), - "consts" => Ok(EnumVariation::Consts), - "moduleconsts" => Ok(EnumVariation::ModuleConsts), - "newtype" => Ok(EnumVariation::NewType { - is_bitfield: false, - is_global: false, - }), - "newtype_global" => Ok(EnumVariation::NewType { - is_bitfield: false, - is_global: true, - }), - _ => Err(std::io::Error::new( - std::io::ErrorKind::InvalidInput, - concat!( - "Got an invalid EnumVariation. Accepted values ", - "are 'rust', 'rust_non_exhaustive', 'bitfield', 'consts',", - "'moduleconsts', 'newtype' and 'newtype_global'." - ), - )), - } - } -} - -struct EnumBuilder { - /// Type identifier of the enum. - /// - /// This is the base name, i.e. for `ModuleConst` enums, this does not include the module name. - enum_type: Ident, - /// Attributes applying to the enum type - attrs: Vec, - /// The representation of the enum, e.g. `u32`. - repr: syn::Type, - /// The enum kind we are generating - kind: EnumBuilderKind, - /// A list of all variants this enum has. - enum_variants: Vec, -} - -/// A helper type to construct different enum variations. -enum EnumBuilderKind { - Rust { - non_exhaustive: bool, - }, - NewType { - is_bitfield: bool, - is_global: bool, - /// if the enum is named or not. - is_anonymous: bool, - }, - Consts { - needs_typedef: bool, - }, - ModuleConsts { - module_name: Ident, - }, -} - -impl EnumBuilder { - /// Returns true if the builder is for a rustified enum. - fn is_rust_enum(&self) -> bool { - matches!(self.kind, EnumBuilderKind::Rust { .. }) - } - - /// Create a new enum given an item builder, a canonical name, a name for - /// the representation, and which variation it should be generated as. - fn new( - name: &str, - attrs: Vec, - repr: &syn::Type, - enum_variation: EnumVariation, - has_typedef: bool, - enum_is_anonymous: bool, - ) -> Self { - let ident = Ident::new(name, Span::call_site()); - // For most variants this is the same - let mut enum_ty = ident.clone(); - - let kind = match enum_variation { - EnumVariation::NewType { - is_bitfield, - is_global, - } => EnumBuilderKind::NewType { - is_bitfield, - is_global, - is_anonymous: enum_is_anonymous, - }, - - EnumVariation::Rust { non_exhaustive } => { - EnumBuilderKind::Rust { non_exhaustive } - } - - EnumVariation::Consts => EnumBuilderKind::Consts { - needs_typedef: !has_typedef, - }, - - EnumVariation::ModuleConsts => { - enum_ty = Ident::new( - CONSTIFIED_ENUM_MODULE_REPR_NAME, - Span::call_site(), - ); - - EnumBuilderKind::ModuleConsts { - module_name: ident.clone(), - } - } - }; - EnumBuilder { - enum_type: enum_ty, - attrs, - repr: repr.clone(), - kind, - enum_variants: vec![], - } - } - - /// Add a variant to this enum. - fn with_variant( - mut self, - ctx: &BindgenContext, - variant: &EnumVariant, - variant_doc: proc_macro2::TokenStream, - mangling_prefix: Option<&str>, - rust_ty: &syn::Type, - is_ty_named: bool, - ) -> Self { - let variant_name = ctx.rust_mangle(variant.name()); - let is_rust_enum = self.is_rust_enum(); - let expr = match variant.val() { - EnumVariantValue::Boolean(v) if is_rust_enum => { - helpers::ast_ty::uint_expr(u64::from(v)) - } - EnumVariantValue::Boolean(v) => quote!(#v), - EnumVariantValue::Signed(v) => helpers::ast_ty::int_expr(v), - EnumVariantValue::Unsigned(v) => helpers::ast_ty::uint_expr(v), - }; - - match self.kind { - EnumBuilderKind::Rust { .. } => { - let name = ctx.rust_ident(variant_name); - self.enum_variants.push(EnumVariantInfo { - variant_name: name, - variant_doc, - value: expr, - }); - self - } - - EnumBuilderKind::NewType { is_global, .. } => { - let variant_ident = if is_ty_named && !is_global { - ctx.rust_ident(variant_name) - } else { - ctx.rust_ident(match mangling_prefix { - Some(prefix) => { - Cow::Owned(format!("{prefix}_{variant_name}")) - } - None => variant_name, - }) - }; - self.enum_variants.push(EnumVariantInfo { - variant_name: variant_ident, - variant_doc, - value: quote! { #rust_ty ( #expr )}, - }); - - self - } - - EnumBuilderKind::Consts { .. } => { - let constant_name = match mangling_prefix { - Some(prefix) => { - Cow::Owned(format!("{prefix}_{variant_name}")) - } - None => variant_name, - }; - - let ident = ctx.rust_ident(constant_name); - self.enum_variants.push(EnumVariantInfo { - variant_name: ident, - variant_doc, - value: quote! { #expr }, - }); - - self - } - EnumBuilderKind::ModuleConsts { .. } => { - let name = ctx.rust_ident(variant_name); - self.enum_variants.push(EnumVariantInfo { - variant_name: name, - variant_doc, - value: quote! { #expr }, - }); - self - } - } - } - - fn newtype_bitfield_impl( - prefix: &Ident, - rust_ty: &syn::Type, - ) -> proc_macro2::TokenStream { - let rust_ty_name = &rust_ty; - quote! { - impl ::#prefix::ops::BitOr<#rust_ty> for #rust_ty { - type Output = Self; - - #[inline] - fn bitor(self, other: Self) -> Self { - #rust_ty_name(self.0 | other.0) - } - } - impl ::#prefix::ops::BitOrAssign for #rust_ty { - #[inline] - fn bitor_assign(&mut self, rhs: #rust_ty) { - self.0 |= rhs.0; - } - } - impl ::#prefix::ops::BitAnd<#rust_ty> for #rust_ty { - type Output = Self; - - #[inline] - fn bitand(self, other: Self) -> Self { - #rust_ty_name(self.0 & other.0) - } - } - impl ::#prefix::ops::BitAndAssign for #rust_ty { - #[inline] - fn bitand_assign(&mut self, rhs: #rust_ty) { - self.0 &= rhs.0; - } - } - } - } - - fn build( - self, - ctx: &BindgenContext, - rust_ty: &syn::Type, - ) -> proc_macro2::TokenStream { - let enum_ident = self.enum_type; - - // 1. Construct a list of the enum variants - let variants = match self.kind { - EnumBuilderKind::Rust { .. } => { - let mut variants = vec![]; - - for v in self.enum_variants { - let variant_doc = &v.variant_doc; - let variant_ident = &v.variant_name; - let variant_value = &v.value; - - variants.push(quote! { - #variant_doc - #variant_ident = #variant_value, - }); - } - - if variants.is_empty() { - variants.push( - quote! {__bindgen_cannot_repr_c_on_empty_enum = 0,}, - ); - } - variants - } - EnumBuilderKind::NewType { .. } => { - let mut variants = vec![]; - - for v in self.enum_variants { - let variant_doc = &v.variant_doc; - let variant_ident = &v.variant_name; - let variant_value = &v.value; - - variants.push(quote! { - #variant_doc - pub const #variant_ident: #enum_ident = #variant_value; - }); - } - variants - } - EnumBuilderKind::Consts { .. } | - EnumBuilderKind::ModuleConsts { .. } => { - let mut variants = vec![]; - - for v in self.enum_variants { - let variant_doc = &v.variant_doc; - let variant_ident = &v.variant_name; - let variant_value = &v.value; - - variants.push(quote! { - #variant_doc - pub const #variant_ident: #enum_ident = #variant_value; - }); - } - variants - } - }; - let attrs = self.attrs; - let enum_repr = &self.repr; - - // 2. Generate the enum representation - match self.kind { - EnumBuilderKind::Rust { non_exhaustive } => { - let non_exhaustive_opt = - non_exhaustive.then(attributes::non_exhaustive); - - quote! { - // Note: repr is on top of attrs to keep the test expectations diff small. - // a future commit could move it further down. - #[repr(#enum_repr)] - #non_exhaustive_opt - #( #attrs )* - pub enum #enum_ident { - #( #variants )* - } - } - } - EnumBuilderKind::NewType { - is_bitfield, - is_global, - is_anonymous, - } => { - // There doesn't seem to be a technical reason why we generate - // anon enum variants as global constants. - // We keep this behavior to avoid breaking changes in the bindings. - let impl_variants = if is_anonymous || is_global { - quote! { - #( #variants )* - } - } else { - quote! { - impl #enum_ident { - #( #variants )* - } - } - }; - - let prefix = ctx.trait_prefix(); - let bitfield_impl_opt = is_bitfield - .then(|| Self::newtype_bitfield_impl(&prefix, rust_ty)); - - quote! { - // Previously variant impls where before the enum definition. - // lets keep this as is for now, to reduce the diff in generated bindings. - #impl_variants - - #bitfield_impl_opt - - #[repr(transparent)] - #( #attrs )* - pub struct #enum_ident (pub #enum_repr); - } - } - EnumBuilderKind::Consts { needs_typedef } => { - let typedef_opt = needs_typedef.then(|| { - quote! { - #( #attrs )* - pub type #enum_ident = #enum_repr; - } - }); - quote! { - #( #variants )* - - #typedef_opt - } - } - EnumBuilderKind::ModuleConsts { module_name, .. } => { - quote! { - // todo: Probably some attributes, e.g. `cfg` should apply to the `mod`. - pub mod #module_name { - #( #attrs )* - pub type #enum_ident = #enum_repr; - - #( #variants )* - } - } - } - } - } -} - -impl CodeGenerator for Enum { - type Extra = Item; - type Return = (); - - fn codegen( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'_>, - item: &Item, - ) { - debug!("::codegen: item = {item:?}"); - debug_assert!(item.is_enabled_for_codegen(ctx)); - - let name = item.canonical_name(ctx); - let ident = ctx.rust_ident(&name); - let enum_ty = item.expect_type(); - let layout = enum_ty.layout(ctx); - let variation = self.computed_enum_variation(ctx, item); - - let repr_translated; - let repr = match self.repr().map(|repr| ctx.resolve_type(repr)) { - Some(repr) - if !ctx.options().translate_enum_integer_types && - !variation.is_rust() => - { - repr - } - repr => { - // An enum's integer type is translated to a native Rust - // integer type in 3 cases: - // * the enum is Rustified and we need a translated type for - // the repr attribute - // * the representation couldn't be determined from the C source - // * it was explicitly requested as a bindgen option - - let kind = if let Some(repr) = repr { - match *repr.canonical_type(ctx).kind() { - TypeKind::Int(int_kind) => int_kind, - _ => panic!("Unexpected type as enum repr"), - } - } else { - warn!( - "Guessing type of enum! Forward declarations of enums \ - shouldn't be legal!" - ); - IntKind::Int - }; - - let signed = kind.is_signed(); - let size = layout - .map(|l| l.size) - .or_else(|| kind.known_size()) - .unwrap_or(0); - - let translated = match (signed, size) { - (true, 1) => IntKind::I8, - (false, 1) => IntKind::U8, - (true, 2) => IntKind::I16, - (false, 2) => IntKind::U16, - (true, 4) => IntKind::I32, - (false, 4) => IntKind::U32, - (true, 8) => IntKind::I64, - (false, 8) => IntKind::U64, - _ => { - warn!( - "invalid enum decl: signed: {signed}, size: {size}" - ); - IntKind::I32 - } - }; - - repr_translated = - Type::new(None, None, TypeKind::Int(translated), false); - &repr_translated - } - }; - - let mut attrs = vec![]; - - if let Some(comment) = item.comment(ctx) { - attrs.push(attributes::doc(&comment)); - } - - if item.must_use(ctx) { - attrs.push(attributes::must_use()); - } - - if !variation.is_const() { - let packed = false; // Enums can't be packed in Rust. - let mut derives = derives_of_item(item, ctx, packed); - // For backwards compat, enums always derive - // Clone/Eq/PartialEq/Hash, even if we don't generate those by - // default. - derives.insert( - DerivableTraits::CLONE | - DerivableTraits::HASH | - DerivableTraits::PARTIAL_EQ | - DerivableTraits::EQ, - ); - let mut derives: Vec<_> = derives.into(); - for derive in item.annotations().derives() { - if !derives.contains(&derive.as_str()) { - derives.push(derive); - } - } - - // The custom derives callback may return a list of derive attributes; - // add them to the end of the list. - let custom_derives = ctx.options().all_callbacks(|cb| { - cb.add_derives(&DeriveInfo { - name: &name, - kind: DeriveTypeKind::Enum, - }) - }); - // In most cases this will be a no-op, since custom_derives will be empty. - derives.extend(custom_derives.iter().map(|s| s.as_str())); - - attrs.extend( - item.annotations() - .attributes() - .iter() - .map(|s| s.parse().unwrap()), - ); - - // The custom attribute callback may return a list of attributes; - // add them to the end of the list. - let custom_attributes = ctx.options().all_callbacks(|cb| { - cb.add_attributes(&AttributeInfo { - name: &name, - kind: DeriveTypeKind::Enum, - }) - }); - attrs.extend(custom_attributes.iter().map(|s| s.parse().unwrap())); - - attrs.push(attributes::derives(&derives)); - } - - fn add_constant( - ctx: &BindgenContext, - enum_: &Type, - // Only to avoid recomputing every time. - enum_canonical_name: &Ident, - // May be the same as "variant" if it's because the - // enum is unnamed and we still haven't seen the - // value. - variant_name: &Ident, - referenced_name: &Ident, - enum_rust_ty: &syn::Type, - result: &mut CodegenResult<'_>, - ) { - let constant_name = if enum_.name().is_some() { - if ctx.options().prepend_enum_name { - format!("{enum_canonical_name}_{variant_name}") - } else { - format!("{variant_name}") - } - } else { - format!("{variant_name}") - }; - let constant_name = ctx.rust_ident(constant_name); - - result.push(quote! { - pub const #constant_name : #enum_rust_ty = - #enum_canonical_name :: #referenced_name ; - }); - } - - let repr = repr.to_rust_ty_or_opaque(ctx, item); - let has_typedef = ctx.is_enum_typedef_combo(item.id()); - - ctx.options().for_each_callback(|cb| { - cb.new_item_found( - DiscoveredItemId::new(item.id().as_usize()), - DiscoveredItem::Enum { - final_name: name.to_string(), - }, - ); - }); - - let mut builder = EnumBuilder::new( - &name, - attrs, - &repr, - variation, - has_typedef, - enum_ty.name().is_none(), - ); - - // A map where we keep a value -> variant relation. - let mut seen_values = HashMap::<_, Ident>::default(); - let enum_rust_ty = item.to_rust_ty_or_opaque(ctx, &()); - let is_toplevel = item.is_toplevel(ctx); - - // Used to mangle the constants we generate in the unnamed-enum case. - let parent_canonical_name = if is_toplevel { - None - } else { - Some(item.parent_id().canonical_name(ctx)) - }; - - let constant_mangling_prefix = if ctx.options().prepend_enum_name { - if enum_ty.name().is_none() { - parent_canonical_name.as_deref() - } else { - Some(&*name) - } - } else { - None - }; - - // NB: We defer the creation of constified variants, in case we find - // another variant with the same value (which is the common thing to - // do). - let mut constified_variants = VecDeque::new(); - - let mut iter = self.variants().iter().peekable(); - while let Some(variant) = - iter.next().or_else(|| constified_variants.pop_front()) - { - if variant.hidden() { - continue; - } - - if variant.force_constification() && iter.peek().is_some() { - constified_variants.push_back(variant); - continue; - } - - let mut variant_doc = quote! {}; - if ctx.options().generate_comments { - if let Some(raw_comment) = variant.comment() { - let processed_comment = - ctx.options().process_comment(raw_comment); - variant_doc = attributes::doc(&processed_comment); - } - } - - match seen_values.entry(variant.val()) { - Entry::Occupied(ref entry) => { - if variation.is_rust() { - let variant_name = ctx.rust_mangle(variant.name()); - let mangled_name = if is_toplevel || - enum_ty.name().is_some() - { - variant_name - } else { - let parent_name = - parent_canonical_name.as_ref().unwrap(); - - Cow::Owned(format!("{parent_name}_{variant_name}")) - }; - - let existing_variant_name = entry.get(); - // Use associated constants for named enums. - if enum_ty.name().is_some() { - let enum_canonical_name = &ident; - let variant_name = - ctx.rust_ident_raw(&*mangled_name); - result.push(quote! { - impl #enum_rust_ty { - pub const #variant_name : #enum_rust_ty = - #enum_canonical_name :: #existing_variant_name ; - } - }); - } else { - add_constant( - ctx, - enum_ty, - &ident, - &Ident::new(&mangled_name, Span::call_site()), - existing_variant_name, - &enum_rust_ty, - result, - ); - } - } else { - builder = builder.with_variant( - ctx, - variant, - variant_doc, - constant_mangling_prefix, - &enum_rust_ty, - enum_ty.name().is_some(), - ); - } - } - Entry::Vacant(entry) => { - builder = builder.with_variant( - ctx, - variant, - variant_doc, - constant_mangling_prefix, - &enum_rust_ty, - enum_ty.name().is_some(), - ); - - let variant_name = ctx.rust_ident(variant.name()); - - // If it's an unnamed enum, or constification is enforced, - // we also generate a constant so it can be properly - // accessed. - if (variation.is_rust() && enum_ty.name().is_none()) || - variant.force_constification() - { - let mangled_name = if is_toplevel { - variant_name.clone() - } else { - let parent_name = - parent_canonical_name.as_ref().unwrap(); - - Ident::new( - &format!("{parent_name}_{variant_name}"), - Span::call_site(), - ) - }; - - add_constant( - ctx, - enum_ty, - &ident, - &mangled_name, - &variant_name, - &enum_rust_ty, - result, - ); - } - - entry.insert(variant_name); - } - } - } - - let item = builder.build(ctx, &enum_rust_ty); - result.push(item); - } -} - -struct EnumVariantInfo { - variant_name: Ident, - variant_doc: proc_macro2::TokenStream, - value: proc_macro2::TokenStream, -} - -/// Enum for the default type of macro constants. -#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] -pub enum MacroTypeVariation { - /// Use i32 or i64 - Signed, - /// Use u32 or u64 - #[default] - Unsigned, -} - -impl fmt::Display for MacroTypeVariation { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let s = match self { - Self::Signed => "signed", - Self::Unsigned => "unsigned", - }; - s.fmt(f) - } -} - -impl FromStr for MacroTypeVariation { - type Err = std::io::Error; - - /// Create a `MacroTypeVariation` from a string. - fn from_str(s: &str) -> Result { - match s { - "signed" => Ok(MacroTypeVariation::Signed), - "unsigned" => Ok(MacroTypeVariation::Unsigned), - _ => Err(std::io::Error::new( - std::io::ErrorKind::InvalidInput, - concat!( - "Got an invalid MacroTypeVariation. Accepted values ", - "are 'signed' and 'unsigned'" - ), - )), - } - } -} - -/// Enum for how aliases should be translated. -#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] -pub enum AliasVariation { - /// Convert to regular Rust alias - #[default] - TypeAlias, - /// Create a new type by wrapping the old type in a struct and using #[repr(transparent)] - NewType, - /// Same as `NewType` but also impl Deref to be able to use the methods of the wrapped type - NewTypeDeref, -} - -impl fmt::Display for AliasVariation { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let s = match self { - Self::TypeAlias => "type_alias", - Self::NewType => "new_type", - Self::NewTypeDeref => "new_type_deref", - }; - - s.fmt(f) - } -} - -impl FromStr for AliasVariation { - type Err = std::io::Error; - - /// Create an `AliasVariation` from a string. - fn from_str(s: &str) -> Result { - match s { - "type_alias" => Ok(AliasVariation::TypeAlias), - "new_type" => Ok(AliasVariation::NewType), - "new_type_deref" => Ok(AliasVariation::NewTypeDeref), - _ => Err(std::io::Error::new( - std::io::ErrorKind::InvalidInput, - concat!( - "Got an invalid AliasVariation. Accepted values ", - "are 'type_alias', 'new_type', and 'new_type_deref'" - ), - )), - } - } -} - -/// Enum for how non-`Copy` `union`s should be translated. -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum NonCopyUnionStyle { - /// Wrap members in a type generated by `bindgen`. - BindgenWrapper, - /// Wrap members in [`::core::mem::ManuallyDrop`]. - /// - /// Note: `ManuallyDrop` was stabilized in Rust 1.20.0, do not use it if your - /// MSRV is lower. - ManuallyDrop, -} - -impl fmt::Display for NonCopyUnionStyle { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let s = match self { - Self::BindgenWrapper => "bindgen_wrapper", - Self::ManuallyDrop => "manually_drop", - }; - - s.fmt(f) - } -} - -impl Default for NonCopyUnionStyle { - fn default() -> Self { - Self::BindgenWrapper - } -} - -impl FromStr for NonCopyUnionStyle { - type Err = std::io::Error; - - fn from_str(s: &str) -> Result { - match s { - "bindgen_wrapper" => Ok(Self::BindgenWrapper), - "manually_drop" => Ok(Self::ManuallyDrop), - _ => Err(std::io::Error::new( - std::io::ErrorKind::InvalidInput, - concat!( - "Got an invalid NonCopyUnionStyle. Accepted values ", - "are 'bindgen_wrapper' and 'manually_drop'" - ), - )), - } - } -} - -/// Fallible conversion to an opaque blob. -/// -/// Implementors of this trait should provide the `try_get_layout` method to -/// fallibly get this thing's layout, which the provided `try_to_opaque` trait -/// method will use to convert the `Layout` into an opaque blob Rust type. -pub(crate) trait TryToOpaque { - type Extra; - - /// Get the layout for this thing, if one is available. - fn try_get_layout( - &self, - ctx: &BindgenContext, - extra: &Self::Extra, - ) -> error::Result; - - /// Do not override this provided trait method. - fn try_to_opaque( - &self, - ctx: &BindgenContext, - extra: &Self::Extra, - ) -> error::Result { - self.try_get_layout(ctx, extra) - .map(|layout| helpers::blob(ctx, layout, true)) - } -} - -/// Infallible conversion of an IR thing to an opaque blob. -/// -/// The resulting layout is best effort, and is unfortunately not guaranteed to -/// be correct. When all else fails, we fall back to a single byte layout as a -/// last resort, because C++ does not permit zero-sized types. See the note in -/// the `ToRustTyOrOpaque` doc comment about fallible versus infallible traits -/// and when each is appropriate. -/// -/// Don't implement this directly. Instead implement `TryToOpaque`, and then -/// leverage the blanket impl for this trait. -pub(crate) trait ToOpaque: TryToOpaque { - fn get_layout(&self, ctx: &BindgenContext, extra: &Self::Extra) -> Layout { - self.try_get_layout(ctx, extra) - .unwrap_or_else(|_| Layout::for_size(ctx, 1)) - } - - fn to_opaque( - &self, - ctx: &BindgenContext, - extra: &Self::Extra, - ) -> syn::Type { - let layout = self.get_layout(ctx, extra); - helpers::blob(ctx, layout, true) - } -} - -impl ToOpaque for T where T: TryToOpaque {} - -/// Fallible conversion from an IR thing to an *equivalent* Rust type. -/// -/// If the C/C++ construct represented by the IR thing cannot (currently) be -/// represented in Rust (for example, instantiations of templates with -/// const-value generic parameters) then the impl should return an `Err`. It -/// should *not* attempt to return an opaque blob with the correct size and -/// alignment. That is the responsibility of the `TryToOpaque` trait. -pub(crate) trait TryToRustTy { - type Extra; - - fn try_to_rust_ty( - &self, - ctx: &BindgenContext, - extra: &Self::Extra, - ) -> error::Result; -} - -/// Fallible conversion to a Rust type or an opaque blob with the correct size -/// and alignment. -/// -/// Don't implement this directly. Instead implement `TryToRustTy` and -/// `TryToOpaque`, and then leverage the blanket impl for this trait below. -pub(crate) trait TryToRustTyOrOpaque: TryToRustTy + TryToOpaque { - type Extra; - - fn try_to_rust_ty_or_opaque( - &self, - ctx: &BindgenContext, - extra: &::Extra, - ) -> error::Result; -} - -impl TryToRustTyOrOpaque for T -where - T: TryToRustTy + TryToOpaque, -{ - type Extra = E; - - fn try_to_rust_ty_or_opaque( - &self, - ctx: &BindgenContext, - extra: &E, - ) -> error::Result { - self.try_to_rust_ty(ctx, extra).or_else(|_| { - if let Ok(layout) = self.try_get_layout(ctx, extra) { - Ok(helpers::blob(ctx, layout, true)) - } else { - Err(Error::NoLayoutForOpaqueBlob) - } - }) - } -} - -/// Infallible conversion to a Rust type, or an opaque blob with a best effort -/// of correct size and alignment. -/// -/// Don't implement this directly. Instead implement `TryToRustTy` and -/// `TryToOpaque`, and then leverage the blanket impl for this trait below. -/// -/// ### Fallible vs. Infallible Conversions to Rust Types -/// -/// When should one use this infallible `ToRustTyOrOpaque` trait versus the -/// fallible `TryTo{RustTy, Opaque, RustTyOrOpaque}` traits? All fallible trait -/// implementations that need to convert another thing into a Rust type or -/// opaque blob in a nested manner should also use fallible trait methods and -/// propagate failure up the stack. Only infallible functions and methods like -/// `CodeGenerator` implementations should use the infallible -/// `ToRustTyOrOpaque`. The further out we push error recovery, the more likely -/// we are to get a usable `Layout` even if we can't generate an equivalent Rust -/// type for a C++ construct. -pub(crate) trait ToRustTyOrOpaque: TryToRustTy + ToOpaque { - type Extra; - - fn to_rust_ty_or_opaque( - &self, - ctx: &BindgenContext, - extra: &::Extra, - ) -> syn::Type; -} - -impl ToRustTyOrOpaque for T -where - T: TryToRustTy + ToOpaque, -{ - type Extra = E; - - fn to_rust_ty_or_opaque( - &self, - ctx: &BindgenContext, - extra: &E, - ) -> syn::Type { - self.try_to_rust_ty(ctx, extra) - .unwrap_or_else(|_| self.to_opaque(ctx, extra)) - } -} - -impl TryToOpaque for T -where - T: Copy + Into, -{ - type Extra = (); - - fn try_get_layout( - &self, - ctx: &BindgenContext, - _: &(), - ) -> error::Result { - ctx.resolve_item((*self).into()).try_get_layout(ctx, &()) - } -} - -impl TryToRustTy for T -where - T: Copy + Into, -{ - type Extra = (); - - fn try_to_rust_ty( - &self, - ctx: &BindgenContext, - _: &(), - ) -> error::Result { - ctx.resolve_item((*self).into()).try_to_rust_ty(ctx, &()) - } -} - -impl TryToOpaque for Item { - type Extra = (); - - fn try_get_layout( - &self, - ctx: &BindgenContext, - _: &(), - ) -> error::Result { - self.kind().expect_type().try_get_layout(ctx, self) - } -} - -impl TryToRustTy for Item { - type Extra = (); - - fn try_to_rust_ty( - &self, - ctx: &BindgenContext, - _: &(), - ) -> error::Result { - self.kind().expect_type().try_to_rust_ty(ctx, self) - } -} - -impl TryToOpaque for Type { - type Extra = Item; - - fn try_get_layout( - &self, - ctx: &BindgenContext, - _: &Item, - ) -> error::Result { - self.layout(ctx).ok_or(Error::NoLayoutForOpaqueBlob) - } -} - -impl TryToRustTy for Type { - type Extra = Item; - - fn try_to_rust_ty( - &self, - ctx: &BindgenContext, - item: &Item, - ) -> error::Result { - use self::helpers::ast_ty::*; - - match *self.kind() { - TypeKind::Void => Ok(c_void(ctx)), - // TODO: we should do something smart with nullptr, or maybe *const - // c_void is enough? - TypeKind::NullPtr => Ok(c_void(ctx).to_ptr(true)), - TypeKind::Int(ik) => { - Ok(int_kind_rust_type(ctx, ik, self.layout(ctx))) - } - TypeKind::Float(fk) => { - Ok(float_kind_rust_type(ctx, fk, self.layout(ctx))) - } - TypeKind::Complex(fk) => { - let float_path = - float_kind_rust_type(ctx, fk, self.layout(ctx)); - - ctx.generated_bindgen_complex(); - Ok(if ctx.options().enable_cxx_namespaces { - syn::parse_quote! { root::__BindgenComplex<#float_path> } - } else { - syn::parse_quote! { __BindgenComplex<#float_path> } - }) - } - TypeKind::Function(ref signature) => { - // We can't rely on the sizeof(Option>) == - // sizeof(NonZero<_>) optimization with opaque blobs (because - // they aren't NonZero), so don't *ever* use an or_opaque - // variant here. - let ty = signature.try_to_rust_ty(ctx, item)?; - - let prefix = ctx.trait_prefix(); - Ok(syn::parse_quote! { ::#prefix::option::Option<#ty> }) - } - TypeKind::Array(item, len) | TypeKind::Vector(item, len) => { - let ty = item.try_to_rust_ty(ctx, &())?; - Ok(syn::parse_quote! { [ #ty ; #len ] }) - } - TypeKind::Enum(..) => { - let path = item.namespace_aware_canonical_path(ctx); - let path = proc_macro2::TokenStream::from_str(&path.join("::")) - .unwrap(); - Ok(syn::parse_quote!(#path)) - } - TypeKind::TemplateInstantiation(ref inst) => { - inst.try_to_rust_ty(ctx, item) - } - TypeKind::ResolvedTypeRef(inner) => inner.try_to_rust_ty(ctx, &()), - TypeKind::TemplateAlias(..) | - TypeKind::Alias(..) | - TypeKind::BlockPointer(..) => { - if self.is_block_pointer() && !ctx.options().generate_block { - let void = c_void(ctx); - return Ok(void.to_ptr(/* is_const = */ false)); - } - - if item.is_opaque(ctx, &()) && - item.used_template_params(ctx) - .into_iter() - .any(|param| param.is_template_param(ctx, &())) - { - self.try_to_opaque(ctx, item) - } else if let Some(ty) = self - .name() - .and_then(|name| utils::type_from_named(ctx, name)) - { - Ok(ty) - } else { - utils::build_path(item, ctx) - } - } - TypeKind::Comp(ref info) => { - let template_params = item.all_template_params(ctx); - if info.has_non_type_template_params() || - (item.is_opaque(ctx, &()) && !template_params.is_empty()) - { - return self.try_to_opaque(ctx, item); - } - - utils::build_path(item, ctx) - } - TypeKind::Opaque => self.try_to_opaque(ctx, item), - TypeKind::Pointer(inner) | TypeKind::Reference(inner) => { - // Check that this type has the same size as the target's pointer type. - let size = self.get_layout(ctx, item).size; - if size != ctx.target_pointer_size() { - return Err(Error::InvalidPointerSize { - ty_name: self.name().unwrap_or("unknown").into(), - ty_size: size, - ptr_size: ctx.target_pointer_size(), - }); - } - - let is_const = ctx.resolve_type(inner).is_const(); - - let inner = - inner.into_resolver().through_type_refs().resolve(ctx); - let inner_ty = inner.expect_type(); - - let is_objc_pointer = - matches!(inner_ty.kind(), TypeKind::ObjCInterface(..)); - - // Regardless if we can properly represent the inner type, we - // should always generate a proper pointer here, so use - // infallible conversion of the inner type. - let ty = inner - .to_rust_ty_or_opaque(ctx, &()) - .with_implicit_template_params(ctx, inner); - - // Avoid the first function pointer level, since it's already - // represented in Rust. - if inner_ty.canonical_type(ctx).is_function() || is_objc_pointer - { - Ok(ty) - } else { - Ok(ty.to_ptr(is_const)) - } - } - TypeKind::TypeParam => { - let name = item.canonical_name(ctx); - let ident = ctx.rust_ident(name); - Ok(syn::parse_quote! { #ident }) - } - TypeKind::ObjCSel => Ok(syn::parse_quote! { objc::runtime::Sel }), - TypeKind::ObjCId => Ok(syn::parse_quote! { id }), - TypeKind::ObjCInterface(ref interface) => { - let name = ctx.rust_ident(interface.name()); - Ok(syn::parse_quote! { #name }) - } - ref u @ TypeKind::UnresolvedTypeRef(..) => { - unreachable!("Should have been resolved after parsing {u:?}!") - } - } - } -} - -impl TryToOpaque for TemplateInstantiation { - type Extra = Item; - - fn try_get_layout( - &self, - ctx: &BindgenContext, - item: &Item, - ) -> error::Result { - item.expect_type() - .layout(ctx) - .ok_or(Error::NoLayoutForOpaqueBlob) - } -} - -impl TryToRustTy for TemplateInstantiation { - type Extra = Item; - - fn try_to_rust_ty( - &self, - ctx: &BindgenContext, - item: &Item, - ) -> error::Result { - if self.is_opaque(ctx, item) { - return Err(Error::InstantiationOfOpaqueType); - } - - let def = self - .template_definition() - .into_resolver() - .through_type_refs() - .resolve(ctx); - - let mut ty = quote! {}; - let def_path = def.namespace_aware_canonical_path(ctx); - ty.append_separated( - def_path.into_iter().map(|p| ctx.rust_ident(p)), - quote!(::), - ); - - let def_params = def.self_template_params(ctx); - if def_params.is_empty() { - // This can happen if we generated an opaque type for a partial - // template specialization, and we've hit an instantiation of - // that partial specialization. - extra_assert!(def.is_opaque(ctx, &())); - return Err(Error::InstantiationOfOpaqueType); - } - - // TODO: If the definition type is a template class/struct - // definition's member template definition, it could rely on - // generic template parameters from its outer template - // class/struct. When we emit bindings for it, it could require - // *more* type arguments than we have here, and we will need to - // reconstruct them somehow. We don't have any means of doing - // that reconstruction at this time. - - let template_args = self - .template_arguments() - .iter() - .zip(def_params.iter()) - // Only pass type arguments for the type parameters that - // the def uses. - .filter(|&(_, param)| ctx.uses_template_parameter(def.id(), *param)) - .map(|(arg, _)| { - let arg = arg.into_resolver().through_type_refs().resolve(ctx); - let ty = arg - .try_to_rust_ty(ctx, &())? - .with_implicit_template_params(ctx, arg); - Ok(ty) - }) - .collect::>>()?; - - Ok(if template_args.is_empty() { - syn::parse_quote! { #ty } - } else { - syn::parse_quote! { #ty<#(#template_args),*> } - }) - } -} - -impl TryToRustTy for FunctionSig { - type Extra = Item; - - fn try_to_rust_ty( - &self, - ctx: &BindgenContext, - item: &Item, - ) -> error::Result { - // TODO: we might want to consider ignoring the reference return value. - let ret = utils::fnsig_return_ty(ctx, self); - let arguments = utils::fnsig_arguments(ctx, self); - - match self.abi(ctx, None) { - Ok(abi) => Ok( - syn::parse_quote! { unsafe extern #abi fn ( #( #arguments ),* ) #ret }, - ), - Err(err) => { - if matches!(err, Error::UnsupportedAbi(_)) { - unsupported_abi_diagnostic( - self.name(), - self.is_variadic(), - item.location(), - ctx, - &err, - ); - } - - Err(err) - } - } - } -} - -impl CodeGenerator for Function { - type Extra = Item; - - /// If we've actually generated the symbol, the number of times we've seen - /// it. - type Return = Option; - - fn codegen( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'_>, - item: &Item, - ) -> Self::Return { - debug!("::codegen: item = {item:?}"); - debug_assert!(item.is_enabled_for_codegen(ctx)); - let id = DiscoveredItemId::new(item.id().as_usize()); - - let is_internal = matches!(self.linkage(), Linkage::Internal); - - let signature_item = ctx.resolve_item(self.signature()); - let signature = signature_item.kind().expect_type().canonical_type(ctx); - let TypeKind::Function(ref signature) = *signature.kind() else { - panic!("Signature kind is not a Function: {signature:?}") - }; - - if is_internal { - if !ctx.options().wrap_static_fns { - // We cannot do anything with internal functions if we are not wrapping them so - // just avoid generating anything for them. - return None; - } - - if signature.is_variadic() { - // We cannot generate wrappers for variadic static functions so we avoid - // generating any code for them. - variadic_fn_diagnostic(self.name(), item.location(), ctx); - return None; - } - } - - let is_pure_virtual = match self.kind() { - FunctionKind::Method(ref method_kind) => { - method_kind.is_pure_virtual() - } - FunctionKind::Function => false, - }; - if is_pure_virtual && !ctx.options().generate_pure_virtual_functions { - // Pure virtual methods have no actual symbol, so we can't generate - // something meaningful for them. Downstream code postprocessors - // might want to find out about them. - return None; - } - - let is_dynamic_function = match self.kind() { - FunctionKind::Function => { - ctx.options().dynamic_library_name.is_some() - } - FunctionKind::Method(_) => false, - }; - - // Similar to static member variables in a class template, we can't - // generate bindings to template functions, because the set of - // instantiations is open ended and we have no way of knowing which - // monomorphizations actually exist. - if !item.all_template_params(ctx).is_empty() { - return None; - } - - let name = self.name(); - let mut canonical_name = item.canonical_name(ctx); - let mangled_name = self.mangled_name(); - - { - let seen_symbol_name = mangled_name.unwrap_or(&canonical_name); - - // TODO: Maybe warn here if there's a type/argument mismatch, or - // something? - if result.seen_function(seen_symbol_name) { - return None; - } - result.saw_function(seen_symbol_name); - } - - let mut attributes = vec![]; - - if true { - let must_use = signature.must_use() || { - let ret_ty = signature - .return_type() - .into_resolver() - .through_type_refs() - .resolve(ctx); - ret_ty.must_use(ctx) - }; - - if must_use { - attributes.push(attributes::must_use()); - } - } - - if let Some(comment) = item.comment(ctx) { - attributes.push(attributes::doc(&comment)); - } - - let abi = match signature.abi(ctx, Some(name)) { - Err(err) => { - if matches!(err, Error::UnsupportedAbi(_)) { - unsupported_abi_diagnostic( - name, - signature.is_variadic(), - item.location(), - ctx, - &err, - ); - } - - return None; - } - Ok(ClangAbi::Unknown(unknown_abi)) => { - panic!( - "Invalid or unknown abi {unknown_abi:?} for function {canonical_name:?} ({self:?})" - ); - } - Ok(abi) => abi, - }; - - // Handle overloaded functions by giving each overload its own unique - // suffix. - let times_seen = result.overload_number(&canonical_name); - if times_seen > 0 { - write!(&mut canonical_name, "{times_seen}").unwrap(); - } - ctx.options().for_each_callback(|cb| { - cb.new_item_found( - id, - DiscoveredItem::Function { - final_name: canonical_name.to_string(), - }, - ); - }); - - let link_name_attr = self.link_name().or_else(|| { - let mangled_name = mangled_name.unwrap_or(name); - (!utils::names_will_be_identical_after_mangling( - &canonical_name, - mangled_name, - Some(abi), - )) - .then_some(mangled_name) - }); - - if let Some(link_name) = link_name_attr { - if !is_dynamic_function { - attributes.push(attributes::link_name::(link_name)); - } - } - - // Unfortunately this can't piggyback on the `attributes` list because - // the #[link(wasm_import_module)] needs to happen before the `extern - // "C"` block. It doesn't get picked up properly otherwise - let wasm_link_attribute = - ctx.options().wasm_import_module_name.as_ref().map(|name| { - quote! { #[link(wasm_import_module = #name)] } - }); - - let should_wrap = is_internal && - ctx.options().wrap_static_fns && - link_name_attr.is_none(); - - if should_wrap { - let name = canonical_name.clone() + ctx.wrap_static_fns_suffix(); - attributes.push(attributes::link_name::(&name)); - } - - let wrap_as_variadic = if should_wrap && !signature.is_variadic() { - utils::wrap_as_variadic_fn(ctx, signature, name) - } else { - None - }; - - let (ident, args) = if let Some(WrapAsVariadic { - idx_of_va_list_arg, - new_name, - }) = &wrap_as_variadic - { - ( - new_name, - utils::fnsig_arguments_iter( - ctx, - // Prune argument at index (idx_of_va_list_arg) - signature.argument_types().iter().enumerate().filter_map( - |(idx, t)| { - if idx == *idx_of_va_list_arg { - None - } else { - Some(t) - } - }, - ), - // and replace it by a `...` (variadic symbol and the end of the signature) - true, - ), - ) - } else { - (&canonical_name, utils::fnsig_arguments(ctx, signature)) - }; - let ret = utils::fnsig_return_ty(ctx, signature); - - let ident = ctx.rust_ident(ident); - - let safety = ctx - .options() - .rust_features - .unsafe_extern_blocks - .then(|| quote!(unsafe)); - - let tokens = quote! { - #wasm_link_attribute - #safety extern #abi { - #(#attributes)* - pub fn #ident ( #( #args ),* ) #ret; - } - }; - - // Add the item to the serialization list if necessary - if should_wrap { - result - .items_to_serialize - .push((item.id(), wrap_as_variadic)); - } - - // If we're doing dynamic binding generation, add to the dynamic items. - if is_dynamic_function { - let ident_str = ident.to_string(); - let symbol = link_name_attr.unwrap_or(&ident_str); - let args_identifiers = - utils::fnsig_argument_identifiers(ctx, signature); - let ret_ty = utils::fnsig_return_ty(ctx, signature); - result.dynamic_items().push_func( - &ident, - symbol, - abi, - signature.is_variadic(), - ctx.options().dynamic_link_require_all, - &args, - &args_identifiers, - &ret, - &ret_ty, - &attributes, - ctx, - ); - } else { - result.push(tokens); - } - Some(times_seen) - } -} - -#[cfg_attr(not(feature = "experimental"), allow(unused_variables))] -fn unsupported_abi_diagnostic( - fn_name: &str, - variadic: bool, - location: Option<&crate::clang::SourceLocation>, - ctx: &BindgenContext, - error: &Error, -) { - warn!( - "Skipping {}function `{fn_name}` because the {error}", - if variadic { "variadic " } else { "" }, - ); - - #[cfg(feature = "experimental")] - if ctx.options().emit_diagnostics { - use crate::diagnostics::{get_line, Diagnostic, Level, Slice}; - - let mut diag = Diagnostic::default(); - diag.with_title( - format!( - "Skipping {}function `{fn_name}` because the {error}", - if variadic { "variadic " } else { "" }, - ), - Level::Warning, - ) - .add_annotation( - "No code will be generated for this function.", - Level::Warning, - ) - .add_annotation( - format!( - "The configured Rust version is {}.", - ctx.options().rust_target - ), - Level::Note, - ); - - if let Some(loc) = location { - let (file, line, col, _) = loc.location(); - - if let Some(filename) = file.name() { - if let Ok(Some(source)) = get_line(&filename, line) { - let mut slice = Slice::default(); - slice - .with_source(source) - .with_location(filename, line, col); - diag.add_slice(slice); - } - } - } - - diag.display(); - } -} - -fn variadic_fn_diagnostic( - fn_name: &str, - _location: Option<&crate::clang::SourceLocation>, - _ctx: &BindgenContext, -) { - warn!( - "Cannot generate wrapper for the static variadic function `{fn_name}`." - ); - - #[cfg(feature = "experimental")] - if _ctx.options().emit_diagnostics { - use crate::diagnostics::{get_line, Diagnostic, Level, Slice}; - - let mut diag = Diagnostic::default(); - - diag.with_title(format!("Cannot generate wrapper for the static function `{fn_name}`."), Level::Warning) - .add_annotation("The `--wrap-static-fns` feature does not support variadic functions.", Level::Note) - .add_annotation("No code will be generated for this function.", Level::Note); - - if let Some(loc) = _location { - let (file, line, col, _) = loc.location(); - - if let Some(filename) = file.name() { - if let Ok(Some(source)) = get_line(&filename, line) { - let mut slice = Slice::default(); - slice - .with_source(source) - .with_location(filename, line, col); - diag.add_slice(slice); - } - } - } - - diag.display(); - } -} - -fn objc_method_codegen( - ctx: &BindgenContext, - method: &ObjCMethod, - methods: &mut Vec, - class_name: Option<&str>, - rust_class_name: &str, - prefix: &str, -) { - // This would ideally resolve the method into an Item, and use - // Item::process_before_codegen; however, ObjC methods are not currently - // made into function items. - let name = format!("{rust_class_name}::{prefix}{}", method.rust_name()); - if ctx.options().blocklisted_items.matches(name) { - return; - } - - let signature = method.signature(); - let fn_args = utils::fnsig_arguments(ctx, signature); - let fn_ret = utils::fnsig_return_ty(ctx, signature); - - let sig = if method.is_class_method() { - quote! { - ( #( #fn_args ),* ) #fn_ret - } - } else { - let self_arr = [quote! { &self }]; - let args = self_arr.iter().chain(fn_args.iter()); - quote! { - ( #( #args ),* ) #fn_ret - } - }; - - let methods_and_args = method.format_method_call(&fn_args); - - let body = { - let body = if method.is_class_method() { - let class_name = ctx.rust_ident( - class_name - .expect("Generating a class method without class name?"), - ); - quote!(msg_send!(class!(#class_name), #methods_and_args)) - } else { - quote!(msg_send!(*self, #methods_and_args)) - }; - - ctx.wrap_unsafe_ops(body) - }; - - let method_name = ctx.rust_ident(format!("{prefix}{}", method.rust_name())); - - methods.push(quote! { - unsafe fn #method_name #sig where ::Target: objc::Message + Sized { - #body - } - }); -} - -impl CodeGenerator for ObjCInterface { - type Extra = Item; - type Return = (); - - fn codegen( - &self, - ctx: &BindgenContext, - result: &mut CodegenResult<'_>, - item: &Item, - ) { - debug_assert!(item.is_enabled_for_codegen(ctx)); - - let mut impl_items = vec![]; - let rust_class_name = item.path_for_allowlisting(ctx)[1..].join("::"); - - for method in self.methods() { - objc_method_codegen( - ctx, - method, - &mut impl_items, - None, - &rust_class_name, - "", - ); - } - - for class_method in self.class_methods() { - let ambiquity = self - .methods() - .iter() - .map(|m| m.rust_name()) - .any(|x| x == class_method.rust_name()); - let prefix = if ambiquity { "class_" } else { "" }; - objc_method_codegen( - ctx, - class_method, - &mut impl_items, - Some(self.name()), - &rust_class_name, - prefix, - ); - } - - let trait_name = ctx.rust_ident(self.rust_name()); - let trait_constraints = quote! { - Sized + std::ops::Deref - }; - let trait_block = if self.is_template() { - let template_names: Vec = self - .template_names - .iter() - .map(|g| ctx.rust_ident(g)) - .collect(); - - quote! { - pub trait #trait_name <#(#template_names:'static),*> : #trait_constraints { - #( #impl_items )* - } - } - } else { - quote! { - pub trait #trait_name : #trait_constraints { - #( #impl_items )* - } - } - }; - - let class_name = ctx.rust_ident(self.name()); - if !self.is_category() && !self.is_protocol() { - let struct_block = quote! { - #[repr(transparent)] - #[derive(Debug, Copy, Clone)] - pub struct #class_name(pub id); - impl std::ops::Deref for #class_name { - type Target = objc::runtime::Object; - fn deref(&self) -> &Self::Target { - unsafe { - &*self.0 - } - } - } - unsafe impl objc::Message for #class_name { } - impl #class_name { - pub fn alloc() -> Self { - Self(unsafe { - msg_send!(class!(#class_name), alloc) - }) - } - } - }; - result.push(struct_block); - let mut protocol_set: HashSet = Default::default(); - for protocol_id in &self.conforms_to { - protocol_set.insert(*protocol_id); - let protocol_name = ctx.rust_ident( - ctx.resolve_type(protocol_id.expect_type_id(ctx)) - .name() - .unwrap(), - ); - let impl_trait = quote! { - impl #protocol_name for #class_name { } - }; - result.push(impl_trait); - } - let mut parent_class = self.parent_class; - while let Some(parent_id) = parent_class { - let parent = parent_id - .expect_type_id(ctx) - .into_resolver() - .through_type_refs() - .resolve(ctx) - .expect_type() - .kind(); - - let TypeKind::ObjCInterface(parent) = parent else { - break; - }; - parent_class = parent.parent_class; - - let parent_name = ctx.rust_ident(parent.rust_name()); - let impl_trait = if parent.is_template() { - let template_names: Vec = parent - .template_names - .iter() - .map(|g| ctx.rust_ident(g)) - .collect(); - quote! { - impl <#(#template_names :'static),*> #parent_name <#(#template_names),*> for #class_name { - } - } - } else { - quote! { - impl #parent_name for #class_name { } - } - }; - result.push(impl_trait); - for protocol_id in &parent.conforms_to { - if protocol_set.insert(*protocol_id) { - let protocol_name = ctx.rust_ident( - ctx.resolve_type(protocol_id.expect_type_id(ctx)) - .name() - .unwrap(), - ); - let impl_trait = quote! { - impl #protocol_name for #class_name { } - }; - result.push(impl_trait); - } - } - if !parent.is_template() { - let parent_struct_name = parent.name(); - let child_struct_name = self.name(); - let parent_struct = ctx.rust_ident(parent_struct_name); - let from_block = quote! { - impl From<#class_name> for #parent_struct { - fn from(child: #class_name) -> #parent_struct { - #parent_struct(child.0) - } - } - }; - result.push(from_block); - - let error_msg = format!( - "This {parent_struct_name} cannot be downcasted to {child_struct_name}" - ); - let try_into_block = quote! { - impl std::convert::TryFrom<#parent_struct> for #class_name { - type Error = &'static str; - fn try_from(parent: #parent_struct) -> Result<#class_name, Self::Error> { - let is_kind_of : bool = unsafe { msg_send!(parent, isKindOfClass:class!(#class_name))}; - if is_kind_of { - Ok(#class_name(parent.0)) - } else { - Err(#error_msg) - } - } - } - }; - result.push(try_into_block); - } - } - } - - if !self.is_protocol() { - let impl_block = if self.is_template() { - let template_names: Vec = self - .template_names - .iter() - .map(|g| ctx.rust_ident(g)) - .collect(); - quote! { - impl <#(#template_names :'static),*> #trait_name <#(#template_names),*> for #class_name { - } - } - } else { - quote! { - impl #trait_name for #class_name { - } - } - }; - result.push(impl_block); - } - - result.push(trait_block); - result.saw_objc(); - } -} - -pub(crate) fn codegen( - context: BindgenContext, -) -> Result<(proc_macro2::TokenStream, BindgenOptions), CodegenError> { - context.gen(|context| { - let _t = context.timer("codegen"); - let counter = Cell::new(0); - let mut result = CodegenResult::new(&counter); - - debug!("codegen: {:?}", context.options()); - - if context.options().emit_ir { - let codegen_items = context.codegen_items(); - for (id, item) in context.items() { - if codegen_items.contains(&id) { - println!("ir: {id:?} = {item:#?}"); - } - } - } - - if let Some(path) = context.options().emit_ir_graphviz.as_ref() { - match dot::write_dot_file(context, path) { - Ok(()) => info!( - "Your dot file was generated successfully into: {path}" - ), - Err(e) => warn!("{e}"), - } - } - - if let Some(spec) = context.options().depfile.as_ref() { - match spec.write(context.deps()) { - Ok(()) => info!( - "Your depfile was generated successfully into: {}", - spec.depfile_path.display() - ), - Err(e) => warn!("{e}"), - } - } - - context.resolve_item(context.root_module()).codegen( - context, - &mut result, - &(), - ); - - if let Some(ref lib_name) = context.options().dynamic_library_name { - let lib_ident = context.rust_ident(lib_name); - let dynamic_items_tokens = - result.dynamic_items().get_tokens(&lib_ident, context); - result.push(dynamic_items_tokens); - } - - utils::serialize_items(&result, context)?; - - Ok(postprocessing::postprocessing( - result.items, - context.options(), - )) - }) -} - -pub(crate) mod utils { - use super::helpers::BITFIELD_UNIT; - use super::serialize::CSerialize; - use super::{error, CodegenError, CodegenResult, ToRustTyOrOpaque}; - use crate::ir::context::BindgenContext; - use crate::ir::context::TypeId; - use crate::ir::function::{Abi, ClangAbi, FunctionSig}; - use crate::ir::item::{Item, ItemCanonicalPath}; - use crate::ir::ty::TypeKind; - use crate::{args_are_cpp, file_is_cpp}; - use std::borrow::Cow; - use std::io::Write; - use std::mem; - use std::path::PathBuf; - use std::str::FromStr; - - pub(super) fn serialize_items( - result: &CodegenResult, - context: &BindgenContext, - ) -> Result<(), CodegenError> { - if result.items_to_serialize.is_empty() { - return Ok(()); - } - - let path = context.options().wrap_static_fns_path.as_ref().map_or_else( - || std::env::temp_dir().join("bindgen").join("extern"), - PathBuf::from, - ); - - let dir = path.parent().unwrap(); - - if !dir.exists() { - std::fs::create_dir_all(dir)?; - } - - let is_cpp = args_are_cpp(&context.options().clang_args) || - context - .options() - .input_headers - .iter() - .any(|h| file_is_cpp(h)); - - let source_path = path.with_extension(if is_cpp { "cpp" } else { "c" }); - - let mut code = Vec::new(); - - if !context.options().input_headers.is_empty() { - for header in &context.options().input_headers { - writeln!(code, "#include \"{header}\"")?; - } - - writeln!(code)?; - } - - if !context.options().input_header_contents.is_empty() { - for (name, contents) in &context.options().input_header_contents { - writeln!(code, "// {name}\n{contents}")?; - } - - writeln!(code)?; - } - - writeln!(code, "// Static wrappers\n")?; - - for (id, wrap_as_variadic) in &result.items_to_serialize { - let item = context.resolve_item(*id); - item.serialize(context, wrap_as_variadic, &mut vec![], &mut code)?; - } - - std::fs::write(source_path, code)?; - - Ok(()) - } - - pub(super) fn wrap_as_variadic_fn( - ctx: &BindgenContext, - signature: &FunctionSig, - name: &str, - ) -> Option { - // Fast path, exclude because: - // - with 0 args: no va_list possible, so no point searching for one - // - with 1 args: cannot have a `va_list` and another arg (required by va_start) - if signature.argument_types().len() <= 1 { - return None; - } - - let mut it = signature.argument_types().iter().enumerate().filter_map( - |(idx, (_name, mut type_id))| { - // Hand rolled visitor that checks for the presence of `va_list` - loop { - let ty = ctx.resolve_type(type_id); - if Some("__builtin_va_list") == ty.name() { - return Some(idx); - } - match ty.kind() { - TypeKind::Alias(type_id_alias) => { - type_id = *type_id_alias; - } - TypeKind::ResolvedTypeRef(type_id_typedef) => { - type_id = *type_id_typedef; - } - _ => break, - } - } - None - }, - ); - - // Return THE idx (by checking that there is no idx after) - // This is done since we cannot handle multiple `va_list` - it.next().filter(|_| it.next().is_none()).and_then(|idx| { - // Call the `wrap_as_variadic_fn` callback - #[cfg(feature = "experimental")] - { - ctx.options() - .last_callback(|c| c.wrap_as_variadic_fn(name)) - .map(|new_name| super::WrapAsVariadic { - new_name, - idx_of_va_list_arg: idx, - }) - } - #[cfg(not(feature = "experimental"))] - { - let _ = name; - let _ = idx; - None - } - }) - } - - pub(crate) fn prepend_bitfield_unit_type( - ctx: &BindgenContext, - result: &mut Vec, - ) { - if ctx.options().blocklisted_items.matches(BITFIELD_UNIT) || - ctx.options().blocklisted_types.matches(BITFIELD_UNIT) - { - return; - } - - let bitfield_unit_src = if ctx.options().rust_features().raw_ref_macros - { - include_str!("./bitfield_unit_raw_ref_macros.rs") - } else { - include_str!("./bitfield_unit.rs") - }; - let bitfield_unit_src = if true { - Cow::Borrowed(bitfield_unit_src) - } else { - Cow::Owned(bitfield_unit_src.replace("const fn ", "fn ")) - }; - let bitfield_unit_type = - proc_macro2::TokenStream::from_str(&bitfield_unit_src).unwrap(); - let bitfield_unit_type = quote!(#bitfield_unit_type); - - let items = vec![bitfield_unit_type]; - let old_items = mem::replace(result, items); - result.extend(old_items); - } - - pub(crate) fn prepend_objc_header( - ctx: &BindgenContext, - result: &mut Vec, - ) { - let use_objc = if ctx.options().objc_extern_crate { - quote! { - #[macro_use] - extern crate objc; - } - } else { - quote! { - use objc::{self, msg_send, sel, sel_impl, class}; - } - }; - - let id_type = quote! { - #[allow(non_camel_case_types)] - pub type id = *mut objc::runtime::Object; - }; - - let items = vec![use_objc, id_type]; - let old_items = mem::replace(result, items); - result.extend(old_items); - } - - pub(crate) fn prepend_block_header( - ctx: &BindgenContext, - result: &mut Vec, - ) { - let use_block = if ctx.options().block_extern_crate { - quote! { - extern crate block; - } - } else { - quote! { - use block; - } - }; - - let items = vec![use_block]; - let old_items = mem::replace(result, items); - result.extend(old_items); - } - - pub(crate) fn prepend_union_types( - ctx: &BindgenContext, - result: &mut Vec, - ) { - let prefix = ctx.trait_prefix(); - - // If the target supports `const fn`, declare eligible functions - // as `const fn` else just `fn`. - let const_fn = if true { - quote! { const fn } - } else { - quote! { fn } - }; - - // TODO(emilio): The fmt::Debug impl could be way nicer with - // std::intrinsics::type_name, but... - let union_field_decl = quote! { - #[repr(C)] - pub struct __BindgenUnionField(::#prefix::marker::PhantomData); - }; - - let transmute = - ctx.wrap_unsafe_ops(quote!(::#prefix::mem::transmute(self))); - - let union_field_impl = quote! { - impl __BindgenUnionField { - #[inline] - pub #const_fn new() -> Self { - __BindgenUnionField(::#prefix::marker::PhantomData) - } - - #[inline] - pub unsafe fn as_ref(&self) -> &T { - #transmute - } - - #[inline] - pub unsafe fn as_mut(&mut self) -> &mut T { - #transmute - } - } - }; - - let union_field_default_impl = quote! { - impl ::#prefix::default::Default for __BindgenUnionField { - #[inline] - fn default() -> Self { - Self::new() - } - } - }; - - let union_field_clone_impl = quote! { - impl ::#prefix::clone::Clone for __BindgenUnionField { - #[inline] - fn clone(&self) -> Self { - *self - } - } - }; - - let union_field_copy_impl = quote! { - impl ::#prefix::marker::Copy for __BindgenUnionField {} - }; - - let union_field_debug_impl = quote! { - impl ::#prefix::fmt::Debug for __BindgenUnionField { - fn fmt(&self, fmt: &mut ::#prefix::fmt::Formatter<'_>) - -> ::#prefix::fmt::Result { - fmt.write_str("__BindgenUnionField") - } - } - }; - - // The actual memory of the filed will be hashed, so that's why these - // field doesn't do anything with the hash. - let union_field_hash_impl = quote! { - impl ::#prefix::hash::Hash for __BindgenUnionField { - fn hash(&self, _state: &mut H) { - } - } - }; - - let union_field_partialeq_impl = quote! { - impl ::#prefix::cmp::PartialEq for __BindgenUnionField { - fn eq(&self, _other: &__BindgenUnionField) -> bool { - true - } - } - }; - - let union_field_eq_impl = quote! { - impl ::#prefix::cmp::Eq for __BindgenUnionField { - } - }; - - let items = vec![ - union_field_decl, - union_field_impl, - union_field_default_impl, - union_field_clone_impl, - union_field_copy_impl, - union_field_debug_impl, - union_field_hash_impl, - union_field_partialeq_impl, - union_field_eq_impl, - ]; - - let old_items = mem::replace(result, items); - result.extend(old_items); - } - - pub(crate) fn prepend_incomplete_array_types( - ctx: &BindgenContext, - result: &mut Vec, - ) { - let prefix = ctx.trait_prefix(); - - // If the target supports `const fn`, declare eligible functions - // as `const fn` else just `fn`. - let const_fn = if true { - quote! { const fn } - } else { - quote! { fn } - }; - - let incomplete_array_decl = quote! { - #[repr(C)] - #[derive(Default)] - pub struct __IncompleteArrayField( - ::#prefix::marker::PhantomData, [T; 0]); - }; - - let from_raw_parts = ctx.wrap_unsafe_ops(quote! ( - ::#prefix::slice::from_raw_parts(self.as_ptr(), len) - )); - let from_raw_parts_mut = ctx.wrap_unsafe_ops(quote! ( - ::#prefix::slice::from_raw_parts_mut(self.as_mut_ptr(), len) - )); - - let incomplete_array_impl = quote! { - impl __IncompleteArrayField { - #[inline] - pub #const_fn new() -> Self { - __IncompleteArrayField(::#prefix::marker::PhantomData, []) - } - - #[inline] - pub fn as_ptr(&self) -> *const T { - self as *const _ as *const T - } - - #[inline] - pub fn as_mut_ptr(&mut self) -> *mut T { - self as *mut _ as *mut T - } - - #[inline] - pub unsafe fn as_slice(&self, len: usize) -> &[T] { - #from_raw_parts - } - - #[inline] - pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { - #from_raw_parts_mut - } - } - }; - - let incomplete_array_debug_impl = quote! { - impl ::#prefix::fmt::Debug for __IncompleteArrayField { - fn fmt(&self, fmt: &mut ::#prefix::fmt::Formatter<'_>) - -> ::#prefix::fmt::Result { - fmt.write_str("__IncompleteArrayField") - } - } - }; - - let items = vec![ - incomplete_array_decl, - incomplete_array_impl, - incomplete_array_debug_impl, - ]; - - let old_items = mem::replace(result, items); - result.extend(old_items); - } - - pub(crate) fn prepend_float16_type( - result: &mut Vec, - ) { - let float16_type = quote! { - #[derive(PartialEq, Copy, Clone, Hash, Debug, Default)] - #[repr(transparent)] - pub struct __BindgenFloat16(pub u16); - }; - - let items = vec![float16_type]; - let old_items = mem::replace(result, items); - result.extend(old_items); - } - - pub(crate) fn prepend_complex_type( - result: &mut Vec, - ) { - let complex_type = quote! { - #[derive(PartialEq, Copy, Clone, Hash, Debug, Default)] - #[repr(C)] - pub struct __BindgenComplex { - pub re: T, - pub im: T - } - }; - - let items = vec![complex_type]; - let old_items = mem::replace(result, items); - result.extend(old_items); - } - - pub(crate) fn prepend_opaque_array_type( - result: &mut Vec, - ) { - let ty = quote! { - /// If Bindgen could only determine the size and alignment of a - /// type, it is represented like this. - #[derive(PartialEq, Copy, Clone, Debug, Hash)] - #[repr(C)] - pub struct __BindgenOpaqueArray(pub [T; N]); - impl Default for __BindgenOpaqueArray { - fn default() -> Self { - Self([::default(); N]) - } - } - }; - - result.insert(0, ty); - } - - pub(crate) fn build_path( - item: &Item, - ctx: &BindgenContext, - ) -> error::Result { - let path = item.namespace_aware_canonical_path(ctx); - let tokens = - proc_macro2::TokenStream::from_str(&path.join("::")).unwrap(); - - Ok(syn::parse_quote! { #tokens }) - } - - fn primitive_ty(ctx: &BindgenContext, name: &str) -> syn::Type { - let ident = ctx.rust_ident_raw(name); - syn::parse_quote! { #ident } - } - - pub(crate) fn type_from_named( - ctx: &BindgenContext, - name: &str, - ) -> Option { - // FIXME: We could use the inner item to check this is really a - // primitive type but, who the heck overrides these anyway? - Some(match name { - "int8_t" => primitive_ty(ctx, "i8"), - "uint8_t" => primitive_ty(ctx, "u8"), - "int16_t" => primitive_ty(ctx, "i16"), - "uint16_t" => primitive_ty(ctx, "u16"), - "int32_t" => primitive_ty(ctx, "i32"), - "uint32_t" => primitive_ty(ctx, "u32"), - "int64_t" => primitive_ty(ctx, "i64"), - "uint64_t" => primitive_ty(ctx, "u64"), - - "size_t" if ctx.options().size_t_is_usize => { - primitive_ty(ctx, "usize") - } - "uintptr_t" => primitive_ty(ctx, "usize"), - - "ssize_t" if ctx.options().size_t_is_usize => { - primitive_ty(ctx, "isize") - } - "intptr_t" | "ptrdiff_t" => primitive_ty(ctx, "isize"), - _ => return None, - }) - } - - fn fnsig_return_ty_internal( - ctx: &BindgenContext, - sig: &FunctionSig, - ) -> syn::Type { - if sig.is_divergent() { - return syn::parse_quote! { ! }; - } - - let canonical_type_kind = sig - .return_type() - .into_resolver() - .through_type_refs() - .through_type_aliases() - .resolve(ctx) - .kind() - .expect_type() - .kind(); - - match canonical_type_kind { - TypeKind::Void => syn::parse_quote! { () }, - _ => sig.return_type().to_rust_ty_or_opaque(ctx, &()), - } - } - - pub(crate) fn fnsig_return_ty( - ctx: &BindgenContext, - sig: &FunctionSig, - ) -> proc_macro2::TokenStream { - match fnsig_return_ty_internal(ctx, sig) { - syn::Type::Tuple(syn::TypeTuple { elems, .. }) - if elems.is_empty() => - { - quote! {} - } - ty => quote! { -> #ty }, - } - } - - pub(crate) fn fnsig_argument_type( - ctx: &BindgenContext, - ty: TypeId, - ) -> syn::Type { - use super::ToPtr; - - let arg_item = ctx.resolve_item(ty); - let arg_ty = arg_item.kind().expect_type(); - - // From the C90 standard[1]: - // - // A declaration of a parameter as "array of type" shall be - // adjusted to "qualified pointer to type", where the type - // qualifiers (if any) are those specified within the [ and ] of - // the array type derivation. - // - // [1]: http://c0x.coding-guidelines.com/6.7.5.3.html - match *arg_ty.canonical_type(ctx).kind() { - TypeKind::Array(t, _) => { - let stream = if ctx.options().array_pointers_in_arguments { - arg_ty.to_rust_ty_or_opaque(ctx, arg_item) - } else { - t.to_rust_ty_or_opaque(ctx, &()) - }; - stream - .to_ptr(ctx.resolve_type(t).is_const() || arg_ty.is_const()) - } - TypeKind::Pointer(inner) => { - let inner = ctx.resolve_item(inner); - let inner_ty = inner.expect_type(); - if let TypeKind::ObjCInterface(ref interface) = - *inner_ty.canonical_type(ctx).kind() - { - let name = ctx.rust_ident(interface.name()); - syn::parse_quote! { #name } - } else { - arg_item.to_rust_ty_or_opaque(ctx, &()) - } - } - _ => arg_item.to_rust_ty_or_opaque(ctx, &()), - } - } - - pub(crate) fn fnsig_arguments_iter< - 'a, - I: Iterator, TypeId)>, - >( - ctx: &BindgenContext, - args_iter: I, - is_variadic: bool, - ) -> Vec { - let mut unnamed_arguments = 0; - let mut args = args_iter - .map(|(name, ty)| { - let arg_ty = fnsig_argument_type(ctx, *ty); - - let arg_name = if let Some(ref name) = *name { - ctx.rust_mangle(name).into_owned() - } else { - unnamed_arguments += 1; - format!("arg{unnamed_arguments}") - }; - - assert!(!arg_name.is_empty()); - let arg_name = ctx.rust_ident(arg_name); - - quote! { - #arg_name : #arg_ty - } - }) - .collect::>(); - - if is_variadic { - args.push(quote! { ... }); - } - - args - } - - pub(crate) fn fnsig_arguments( - ctx: &BindgenContext, - sig: &FunctionSig, - ) -> Vec { - fnsig_arguments_iter( - ctx, - sig.argument_types().iter(), - sig.is_variadic(), - ) - } - - pub(crate) fn fnsig_argument_identifiers( - ctx: &BindgenContext, - sig: &FunctionSig, - ) -> Vec { - let mut unnamed_arguments = 0; - let args = sig - .argument_types() - .iter() - .map(|&(ref name, _ty)| { - let arg_name = if let Some(ref name) = *name { - ctx.rust_mangle(name).into_owned() - } else { - unnamed_arguments += 1; - format!("arg{unnamed_arguments}") - }; - - assert!(!arg_name.is_empty()); - let arg_name = ctx.rust_ident(arg_name); - - quote! { - #arg_name - } - }) - .collect::>(); - - args - } - - pub(crate) fn fnsig_block( - ctx: &BindgenContext, - sig: &FunctionSig, - ) -> proc_macro2::TokenStream { - let args = sig.argument_types().iter().map(|&(_, ty)| { - let arg_item = ctx.resolve_item(ty); - - arg_item.to_rust_ty_or_opaque(ctx, &()) - }); - - let ret_ty = fnsig_return_ty_internal(ctx, sig); - quote! { - *const ::block::Block<(#(#args,)*), #ret_ty> - } - } - - // Returns true if `canonical_name` will end up as `mangled_name` at the - // machine code level, i.e. after LLVM has applied any target specific - // mangling. - pub(crate) fn names_will_be_identical_after_mangling( - canonical_name: &str, - mangled_name: &str, - call_conv: Option, - ) -> bool { - // If the mangled name and the canonical name are the same then no - // mangling can have happened between the two versions. - if canonical_name == mangled_name { - return true; - } - - // Working with &[u8] makes indexing simpler than with &str - let canonical_name = canonical_name.as_bytes(); - let mangled_name = mangled_name.as_bytes(); - - let (mangling_prefix, expect_suffix) = match call_conv { - Some(ClangAbi::Known(Abi::C)) | - // None is the case for global variables - None => { - (b'_', false) - } - Some(ClangAbi::Known(Abi::Stdcall)) => (b'_', true), - Some(ClangAbi::Known(Abi::Fastcall)) => (b'@', true), - - // This is something we don't recognize, stay on the safe side - // by emitting the `#[link_name]` attribute - Some(_) => return false, - }; - - // Check that the mangled name is long enough to at least contain the - // canonical name plus the expected prefix. - if mangled_name.len() < canonical_name.len() + 1 { - return false; - } - - // Return if the mangled name does not start with the prefix expected - // for the given calling convention. - if mangled_name[0] != mangling_prefix { - return false; - } - - // Check that the mangled name contains the canonical name after the - // prefix - if &mangled_name[1..=canonical_name.len()] != canonical_name { - return false; - } - - // If the given calling convention also prescribes a suffix, check that - // it exists too - if expect_suffix { - let suffix = &mangled_name[canonical_name.len() + 1..]; - - // The shortest suffix is "@0" - if suffix.len() < 2 { - return false; - } - - // Check that the suffix starts with '@' and is all ASCII decimals - // after that. - if suffix[0] != b'@' || !suffix[1..].iter().all(u8::is_ascii_digit) - { - return false; - } - } else if mangled_name.len() != canonical_name.len() + 1 { - // If we don't expect a prefix but there is one, we need the - // #[link_name] attribute - return false; - } - - true - } -} diff --git a/vendor/bindgen/codegen/postprocessing/merge_extern_blocks.rs b/vendor/bindgen/codegen/postprocessing/merge_extern_blocks.rs deleted file mode 100644 index e0f6a34baa2284..00000000000000 --- a/vendor/bindgen/codegen/postprocessing/merge_extern_blocks.rs +++ /dev/null @@ -1,72 +0,0 @@ -use syn::{ - visit_mut::{visit_file_mut, visit_item_mod_mut, VisitMut}, - File, Item, ItemForeignMod, ItemMod, -}; - -pub(super) fn merge_extern_blocks(file: &mut File) { - Visitor.visit_file_mut(file); -} - -struct Visitor; - -impl VisitMut for Visitor { - fn visit_file_mut(&mut self, file: &mut File) { - visit_items(&mut file.items); - visit_file_mut(self, file); - } - - fn visit_item_mod_mut(&mut self, item_mod: &mut ItemMod) { - if let Some((_, ref mut items)) = item_mod.content { - visit_items(items); - } - visit_item_mod_mut(self, item_mod); - } -} - -fn visit_items(items: &mut Vec) { - // Keep all the extern blocks in a different `Vec` for faster search. - let mut extern_blocks = Vec::::new(); - - for item in std::mem::take(items) { - if let Item::ForeignMod(ItemForeignMod { - attrs, - abi, - brace_token, - unsafety, - items: extern_block_items, - }) = item - { - let mut exists = false; - for extern_block in &mut extern_blocks { - // Check if there is a extern block with the same ABI and - // attributes. - if extern_block.attrs == attrs && extern_block.abi == abi { - // Merge the items of the two blocks. - extern_block.items.extend_from_slice(&extern_block_items); - exists = true; - break; - } - } - // If no existing extern block had the same ABI and attributes, store - // it. - if !exists { - extern_blocks.push(ItemForeignMod { - attrs, - abi, - brace_token, - unsafety, - items: extern_block_items, - }); - } - } else { - // If the item is not an extern block, we don't have to do anything and just - // push it back. - items.push(item); - } - } - - // Move all the extern blocks alongside the rest of the items. - for extern_block in extern_blocks { - items.push(Item::ForeignMod(extern_block)); - } -} diff --git a/vendor/bindgen/codegen/postprocessing/mod.rs b/vendor/bindgen/codegen/postprocessing/mod.rs deleted file mode 100644 index 964169852100a1..00000000000000 --- a/vendor/bindgen/codegen/postprocessing/mod.rs +++ /dev/null @@ -1,57 +0,0 @@ -use proc_macro2::TokenStream; -use quote::ToTokens; -use syn::{parse2, File}; - -use crate::BindgenOptions; - -mod merge_extern_blocks; -mod sort_semantically; - -use merge_extern_blocks::merge_extern_blocks; -use sort_semantically::sort_semantically; - -struct PostProcessingPass { - should_run: fn(&BindgenOptions) -> bool, - run: fn(&mut File), -} - -// TODO: This can be a const fn when mutable references are allowed in const -// context. -macro_rules! pass { - ($pass:ident) => { - PostProcessingPass { - should_run: |options| options.$pass, - run: |file| $pass(file), - } - }; -} - -const PASSES: &[PostProcessingPass] = - &[pass!(merge_extern_blocks), pass!(sort_semantically)]; - -pub(crate) fn postprocessing( - items: Vec, - options: &BindgenOptions, -) -> TokenStream { - let items = items.into_iter().collect(); - let require_syn = PASSES.iter().any(|pass| (pass.should_run)(options)); - - if !require_syn { - return items; - } - - // This syn business is a hack, for now. This means that we are re-parsing already - // generated code using `syn` (as opposed to `quote`) because `syn` provides us more - // control over the elements. - // The `unwrap` here is deliberate because bindgen should generate valid rust items at all - // times. - let mut file = parse2::(items).unwrap(); - - for pass in PASSES { - if (pass.should_run)(options) { - (pass.run)(&mut file); - } - } - - file.into_token_stream() -} diff --git a/vendor/bindgen/codegen/postprocessing/sort_semantically.rs b/vendor/bindgen/codegen/postprocessing/sort_semantically.rs deleted file mode 100644 index e9bb5dc308a3e6..00000000000000 --- a/vendor/bindgen/codegen/postprocessing/sort_semantically.rs +++ /dev/null @@ -1,46 +0,0 @@ -use syn::{ - visit_mut::{visit_file_mut, visit_item_mod_mut, VisitMut}, - File, Item, ItemMod, -}; - -pub(super) fn sort_semantically(file: &mut File) { - Visitor.visit_file_mut(file); -} - -struct Visitor; - -impl VisitMut for Visitor { - fn visit_file_mut(&mut self, file: &mut File) { - visit_items(&mut file.items); - visit_file_mut(self, file); - } - - fn visit_item_mod_mut(&mut self, item_mod: &mut ItemMod) { - if let Some((_, ref mut items)) = item_mod.content { - visit_items(items); - } - visit_item_mod_mut(self, item_mod); - } -} - -fn visit_items(items: &mut [Item]) { - items.sort_by_key(|item| match item { - Item::Type(_) => 0, - Item::Struct(_) => 1, - Item::Const(_) => 2, - Item::Fn(_) => 3, - Item::Enum(_) => 4, - Item::Union(_) => 5, - Item::Static(_) => 6, - Item::Trait(_) => 7, - Item::TraitAlias(_) => 8, - Item::Impl(_) => 9, - Item::Mod(_) => 10, - Item::Use(_) => 11, - Item::Verbatim(_) => 12, - Item::ExternCrate(_) => 13, - Item::ForeignMod(_) => 14, - Item::Macro(_) => 15, - _ => 18, - }); -} diff --git a/vendor/bindgen/codegen/serialize.rs b/vendor/bindgen/codegen/serialize.rs deleted file mode 100644 index 9af48aa8ffed80..00000000000000 --- a/vendor/bindgen/codegen/serialize.rs +++ /dev/null @@ -1,443 +0,0 @@ -use std::io::Write; - -use crate::callbacks::IntKind; - -use crate::ir::comp::CompKind; -use crate::ir::context::{BindgenContext, TypeId}; -use crate::ir::function::{Function, FunctionKind}; -use crate::ir::item::Item; -use crate::ir::item::ItemCanonicalName; -use crate::ir::item_kind::ItemKind; -use crate::ir::ty::{FloatKind, Type, TypeKind}; - -use super::{CodegenError, WrapAsVariadic}; - -fn get_loc(item: &Item) -> String { - item.location() - .map_or_else(|| "unknown".to_owned(), |x| x.to_string()) -} - -pub(super) trait CSerialize<'a> { - type Extra; - - fn serialize( - &self, - ctx: &BindgenContext, - extra: Self::Extra, - stack: &mut Vec, - writer: &mut W, - ) -> Result<(), CodegenError>; -} - -impl<'a> CSerialize<'a> for Item { - type Extra = &'a Option; - - fn serialize( - &self, - ctx: &BindgenContext, - extra: Self::Extra, - stack: &mut Vec, - writer: &mut W, - ) -> Result<(), CodegenError> { - match self.kind() { - ItemKind::Function(func) => { - func.serialize(ctx, (self, extra), stack, writer) - } - kind => Err(CodegenError::Serialize { - msg: format!("Cannot serialize item kind {kind:?}"), - loc: get_loc(self), - }), - } - } -} - -impl<'a> CSerialize<'a> for Function { - type Extra = (&'a Item, &'a Option); - - fn serialize( - &self, - ctx: &BindgenContext, - (item, wrap_as_variadic): Self::Extra, - stack: &mut Vec, - writer: &mut W, - ) -> Result<(), CodegenError> { - if self.kind() != FunctionKind::Function { - return Err(CodegenError::Serialize { - msg: format!( - "Cannot serialize function kind {:?}", - self.kind(), - ), - loc: get_loc(item), - }); - } - - let TypeKind::Function(signature) = - ctx.resolve_type(self.signature()).kind() - else { - unreachable!() - }; - - assert!(!signature.is_variadic()); - - let name = self.name(); - - // Function arguments stored as `(name, type_id)` tuples. - let args = { - let mut count = 0; - - let idx_to_prune = wrap_as_variadic.as_ref().map( - |WrapAsVariadic { - idx_of_va_list_arg, .. - }| *idx_of_va_list_arg, - ); - - signature - .argument_types() - .iter() - .cloned() - .enumerate() - .filter_map(|(idx, (opt_name, type_id))| { - if Some(idx) == idx_to_prune { - None - } else { - Some(( - opt_name.unwrap_or_else(|| { - let name = format!("arg_{count}"); - count += 1; - name - }), - type_id, - )) - } - }) - .collect::>() - }; - - // The name used for the wrapper self. - let wrap_name = format!("{name}{}", ctx.wrap_static_fns_suffix()); - - // The function's return type - let (ret_item, ret_ty) = { - let type_id = signature.return_type(); - let ret_item = ctx.resolve_item(type_id); - let ret_ty = ret_item.expect_type(); - - // Write `ret_ty`. - ret_ty.serialize(ctx, ret_item, stack, writer)?; - - (ret_item, ret_ty) - }; - - const INDENT: &str = " "; - - // Write `wrap_name(args`. - write!(writer, " {wrap_name}(")?; - serialize_args(&args, ctx, writer)?; - - if wrap_as_variadic.is_none() { - // Write `) { name(` if the function returns void and `) { return name(` if it does not. - if ret_ty.is_void() { - write!(writer, ") {{ {name}(")?; - } else { - write!(writer, ") {{ return {name}(")?; - } - } else { - // Write `, ...) {` - writeln!(writer, ", ...) {{")?; - - // Declare the return type `RET_TY ret;` if their is a need to do so - if !ret_ty.is_void() { - write!(writer, "{INDENT}")?; - ret_ty.serialize(ctx, ret_item, stack, writer)?; - writeln!(writer, " ret;")?; - } - - // Setup va_list - writeln!(writer, "{INDENT}va_list ap;\n")?; - writeln!( - writer, - "{INDENT}va_start(ap, {});", - args.last().unwrap().0 - )?; - - write!(writer, "{INDENT}")?; - // Write `ret = name(` or `name(` depending if the function returns something - if !ret_ty.is_void() { - write!(writer, "ret = ")?; - } - write!(writer, "{name}(")?; - } - - // Get the arguments names and insert at the right place if necessary `ap` - let mut args: Vec<_> = args.into_iter().map(|(name, _)| name).collect(); - if let Some(WrapAsVariadic { - idx_of_va_list_arg, .. - }) = wrap_as_variadic - { - args.insert(*idx_of_va_list_arg, "ap".to_owned()); - } - - // Write `arg_names);`. - serialize_sep(", ", args.iter(), ctx, writer, |name, _, buf| { - write!(buf, "{name}").map_err(From::from) - })?; - #[rustfmt::skip] - write!(writer, ");{}", if wrap_as_variadic.is_none() { " " } else { "\n" })?; - - if wrap_as_variadic.is_some() { - // End va_list and return the result if their is one - writeln!(writer, "{INDENT}va_end(ap);")?; - if !ret_ty.is_void() { - writeln!(writer, "{INDENT}return ret;")?; - } - } - - writeln!(writer, "}}")?; - - Ok(()) - } -} - -impl CSerialize<'_> for TypeId { - type Extra = (); - - fn serialize( - &self, - ctx: &BindgenContext, - (): Self::Extra, - stack: &mut Vec, - writer: &mut W, - ) -> Result<(), CodegenError> { - let item = ctx.resolve_item(*self); - item.expect_type().serialize(ctx, item, stack, writer) - } -} - -impl<'a> CSerialize<'a> for Type { - type Extra = &'a Item; - - fn serialize( - &self, - ctx: &BindgenContext, - item: Self::Extra, - stack: &mut Vec, - writer: &mut W, - ) -> Result<(), CodegenError> { - match self.kind() { - TypeKind::Void => { - if self.is_const() { - write!(writer, "const ")?; - } - write!(writer, "void")?; - } - TypeKind::NullPtr => { - if self.is_const() { - write!(writer, "const ")?; - } - write!(writer, "nullptr_t")?; - } - TypeKind::Int(int_kind) => { - if self.is_const() { - write!(writer, "const ")?; - } - match int_kind { - IntKind::Bool => write!(writer, "bool")?, - IntKind::SChar => write!(writer, "signed char")?, - IntKind::UChar => write!(writer, "unsigned char")?, - IntKind::WChar => write!(writer, "wchar_t")?, - IntKind::Short => write!(writer, "short")?, - IntKind::UShort => write!(writer, "unsigned short")?, - IntKind::Int => write!(writer, "int")?, - IntKind::UInt => write!(writer, "unsigned int")?, - IntKind::Long => write!(writer, "long")?, - IntKind::ULong => write!(writer, "unsigned long")?, - IntKind::LongLong => write!(writer, "long long")?, - IntKind::ULongLong => write!(writer, "unsigned long long")?, - IntKind::Char { .. } => write!(writer, "char")?, - int_kind => { - return Err(CodegenError::Serialize { - msg: format!( - "Cannot serialize integer kind {int_kind:?}" - ), - loc: get_loc(item), - }) - } - } - } - TypeKind::Float(float_kind) => { - if self.is_const() { - write!(writer, "const ")?; - } - match float_kind { - FloatKind::Float16 => write!(writer, "_Float16")?, - FloatKind::Float => write!(writer, "float")?, - FloatKind::Double => write!(writer, "double")?, - FloatKind::LongDouble => write!(writer, "long double")?, - FloatKind::Float128 => write!(writer, "__float128")?, - } - } - TypeKind::Complex(float_kind) => { - if self.is_const() { - write!(writer, "const ")?; - } - match float_kind { - FloatKind::Float16 => write!(writer, "_Float16 complex")?, - FloatKind::Float => write!(writer, "float complex")?, - FloatKind::Double => write!(writer, "double complex")?, - FloatKind::LongDouble => { - write!(writer, "long double complex")?; - } - FloatKind::Float128 => write!(writer, "__complex128")?, - } - } - TypeKind::Alias(type_id) => { - if let Some(name) = self.name() { - if self.is_const() { - write!(writer, "const {name}")?; - } else { - write!(writer, "{name}")?; - } - } else { - type_id.serialize(ctx, (), stack, writer)?; - } - } - TypeKind::Array(type_id, length) => { - type_id.serialize(ctx, (), stack, writer)?; - write!(writer, " [{length}]")?; - } - TypeKind::Function(signature) => { - if self.is_const() { - stack.push("const ".to_string()); - } - - signature.return_type().serialize( - ctx, - (), - &mut vec![], - writer, - )?; - - write!(writer, " (")?; - while let Some(item) = stack.pop() { - write!(writer, "{item}")?; - } - write!(writer, ")")?; - - let args = signature.argument_types(); - if args.is_empty() { - write!(writer, " (void)")?; - } else { - write!(writer, " (")?; - serialize_sep( - ", ", - args.iter(), - ctx, - writer, - |(name, type_id), ctx, buf| { - let mut stack = vec![]; - if let Some(name) = name { - stack.push(name.clone()); - } - type_id.serialize(ctx, (), &mut stack, buf) - }, - )?; - write!(writer, ")")?; - } - } - TypeKind::ResolvedTypeRef(type_id) => { - if self.is_const() { - write!(writer, "const ")?; - } - type_id.serialize(ctx, (), stack, writer)?; - } - TypeKind::Pointer(type_id) => { - if self.is_const() { - stack.push("*const ".to_owned()); - } else { - stack.push("*".to_owned()); - } - type_id.serialize(ctx, (), stack, writer)?; - } - TypeKind::Comp(comp_info) => { - if self.is_const() { - write!(writer, "const ")?; - } - - let name = item.canonical_name(ctx); - - match comp_info.kind() { - CompKind::Struct => write!(writer, "struct {name}")?, - CompKind::Union => write!(writer, "union {name}")?, - } - } - TypeKind::Enum(_enum_ty) => { - if self.is_const() { - write!(writer, "const ")?; - } - - let name = item.canonical_name(ctx); - write!(writer, "enum {name}")?; - } - ty => { - return Err(CodegenError::Serialize { - msg: format!("Cannot serialize type kind {ty:?}"), - loc: get_loc(item), - }) - } - } - - if !stack.is_empty() { - write!(writer, " ")?; - while let Some(item) = stack.pop() { - write!(writer, "{item}")?; - } - } - - Ok(()) - } -} - -fn serialize_args( - args: &[(String, TypeId)], - ctx: &BindgenContext, - writer: &mut W, -) -> Result<(), CodegenError> { - if args.is_empty() { - write!(writer, "void")?; - } else { - serialize_sep( - ", ", - args.iter(), - ctx, - writer, - |(name, type_id), ctx, buf| { - type_id.serialize(ctx, (), &mut vec![name.clone()], buf) - }, - )?; - } - - Ok(()) -} - -fn serialize_sep< - W: Write, - F: FnMut(I::Item, &BindgenContext, &mut W) -> Result<(), CodegenError>, - I: Iterator, ->( - sep: &str, - mut iter: I, - ctx: &BindgenContext, - buf: &mut W, - mut f: F, -) -> Result<(), CodegenError> { - if let Some(item) = iter.next() { - f(item, ctx, buf)?; - let sep = sep.as_bytes(); - for item in iter { - buf.write_all(sep)?; - f(item, ctx, buf)?; - } - } - - Ok(()) -} diff --git a/vendor/bindgen/codegen/struct_layout.rs b/vendor/bindgen/codegen/struct_layout.rs deleted file mode 100644 index 0d2e6a05c57ac0..00000000000000 --- a/vendor/bindgen/codegen/struct_layout.rs +++ /dev/null @@ -1,458 +0,0 @@ -//! Helpers for code generation that need struct layout - -use super::helpers; - -use crate::ir::comp::CompInfo; -use crate::ir::context::BindgenContext; -use crate::ir::layout::Layout; -use crate::ir::ty::{Type, TypeKind}; -use crate::FieldVisibilityKind; -use proc_macro2::{Ident, Span}; -use std::cmp; - -const MAX_GUARANTEED_ALIGN: usize = 8; - -/// Trace the layout of struct. -#[derive(Debug)] -pub(crate) struct StructLayoutTracker<'a> { - name: &'a str, - ctx: &'a BindgenContext, - comp: &'a CompInfo, - is_packed: bool, - known_type_layout: Option, - is_rust_union: bool, - can_copy_union_fields: bool, - latest_offset: usize, - padding_count: usize, - latest_field_layout: Option, - max_field_align: usize, - last_field_was_bitfield: bool, - visibility: FieldVisibilityKind, - last_field_was_flexible_array: bool, -} - -/// Returns a size aligned to a given value. -pub(crate) fn align_to(size: usize, align: usize) -> usize { - if align == 0 { - return size; - } - - let rem = size % align; - if rem == 0 { - return size; - } - - size + align - rem -} - -/// Returns the lower power of two byte count that can hold at most n bits. -pub(crate) fn bytes_from_bits_pow2(mut n: usize) -> usize { - if n == 0 { - return 0; - } - - if n <= 8 { - return 1; - } - - if !n.is_power_of_two() { - n = n.next_power_of_two(); - } - - n / 8 -} - -#[test] -fn test_align_to() { - assert_eq!(align_to(1, 1), 1); - assert_eq!(align_to(1, 2), 2); - assert_eq!(align_to(1, 4), 4); - assert_eq!(align_to(5, 1), 5); - assert_eq!(align_to(17, 4), 20); -} - -#[test] -fn test_bytes_from_bits_pow2() { - assert_eq!(bytes_from_bits_pow2(0), 0); - for i in 1..9 { - assert_eq!(bytes_from_bits_pow2(i), 1); - } - for i in 9..17 { - assert_eq!(bytes_from_bits_pow2(i), 2); - } - for i in 17..33 { - assert_eq!(bytes_from_bits_pow2(i), 4); - } -} - -impl<'a> StructLayoutTracker<'a> { - pub(crate) fn new( - ctx: &'a BindgenContext, - comp: &'a CompInfo, - ty: &'a Type, - name: &'a str, - visibility: FieldVisibilityKind, - is_packed: bool, - ) -> Self { - let known_type_layout = ty.layout(ctx); - let (is_rust_union, can_copy_union_fields) = - comp.is_rust_union(ctx, known_type_layout.as_ref(), name); - StructLayoutTracker { - name, - ctx, - comp, - visibility, - is_packed, - known_type_layout, - is_rust_union, - can_copy_union_fields, - latest_offset: 0, - padding_count: 0, - latest_field_layout: None, - max_field_align: 0, - last_field_was_bitfield: false, - last_field_was_flexible_array: false, - } - } - - pub(crate) fn can_copy_union_fields(&self) -> bool { - self.can_copy_union_fields - } - - pub(crate) fn is_rust_union(&self) -> bool { - self.is_rust_union - } - - pub(crate) fn saw_flexible_array(&mut self) { - self.last_field_was_flexible_array = true; - } - - pub(crate) fn saw_vtable(&mut self) { - debug!("saw vtable for {}", self.name); - - let ptr_size = self.ctx.target_pointer_size(); - self.latest_offset += ptr_size; - self.latest_field_layout = Some(Layout::new(ptr_size, ptr_size)); - self.max_field_align = ptr_size; - } - - pub(crate) fn saw_base(&mut self, base_ty: &Type) { - debug!("saw base for {}", self.name); - if let Some(layout) = base_ty.layout(self.ctx) { - self.align_to_latest_field(layout); - - self.latest_offset += self.padding_bytes(layout) + layout.size; - self.latest_field_layout = Some(layout); - self.max_field_align = cmp::max(self.max_field_align, layout.align); - } - } - - pub(crate) fn saw_bitfield_unit(&mut self, layout: Layout) { - debug!("saw bitfield unit for {}: {layout:?}", self.name); - - self.align_to_latest_field(layout); - - self.latest_offset += layout.size; - - debug!( - "Offset: : {} -> {}", - self.latest_offset - layout.size, - self.latest_offset - ); - - self.latest_field_layout = Some(layout); - self.last_field_was_bitfield = true; - self.max_field_align = cmp::max(self.max_field_align, layout.align); - } - - /// Returns a padding field if necessary for a given new field _before_ - /// adding that field. - pub(crate) fn saw_field( - &mut self, - field_name: &str, - field_ty: &Type, - field_offset: Option, - ) -> Option { - let mut field_layout = field_ty.layout(self.ctx)?; - - if let TypeKind::Array(inner, len) = - *field_ty.canonical_type(self.ctx).kind() - { - // FIXME(emilio): As an _ultra_ hack, we correct the layout returned - // by arrays of structs that have a bigger alignment than what we - // can support. - // - // This means that the structs in the array are super-unsafe to - // access, since they won't be properly aligned, but there's not too - // much we can do about it. - if let Some(layout) = self.ctx.resolve_type(inner).layout(self.ctx) - { - if layout.align > MAX_GUARANTEED_ALIGN { - field_layout.size = - align_to(layout.size, layout.align) * len; - field_layout.align = MAX_GUARANTEED_ALIGN; - } - } - } - self.saw_field_with_layout(field_name, field_layout, field_offset) - } - - pub(crate) fn saw_field_with_layout( - &mut self, - field_name: &str, - field_layout: Layout, - field_offset: Option, - ) -> Option { - let will_merge_with_bitfield = self.align_to_latest_field(field_layout); - - let is_union = self.comp.is_union(); - let padding_bytes = match field_offset { - Some(offset) if offset / 8 > self.latest_offset => { - offset / 8 - self.latest_offset - } - _ => { - if will_merge_with_bitfield || - field_layout.align == 0 || - is_union - { - 0 - } else if !self.is_packed { - self.padding_bytes(field_layout) - } else if let Some(mut l) = self.known_type_layout { - if field_layout.align < l.align { - l.align = field_layout.align; - } - self.padding_bytes(l) - } else { - 0 - } - } - }; - - self.latest_offset += padding_bytes; - - let padding_layout = if self.is_packed || is_union { - None - } else { - let force_padding = self.ctx.options().force_explicit_padding; - - // Otherwise the padding is useless. - let need_padding = force_padding || - padding_bytes >= field_layout.align || - field_layout.align > MAX_GUARANTEED_ALIGN; - - debug!( - "Offset: : {} -> {}", - self.latest_offset - padding_bytes, - self.latest_offset - ); - - debug!( - "align field {field_name} to {}/{} with {padding_bytes} padding bytes {field_layout:?}", - self.latest_offset, - field_offset.unwrap_or(0) / 8, - ); - - let padding_align = if force_padding { - 1 - } else { - cmp::min(field_layout.align, MAX_GUARANTEED_ALIGN) - }; - - if need_padding && padding_bytes != 0 { - Some(Layout::new(padding_bytes, padding_align)) - } else { - None - } - }; - - if is_union { - self.latest_offset = - cmp::max(self.latest_offset, field_layout.size); - } else { - self.latest_offset += field_layout.size; - } - self.latest_field_layout = Some(field_layout); - self.max_field_align = - cmp::max(self.max_field_align, field_layout.align); - self.last_field_was_bitfield = false; - - debug!( - "Offset: {field_name}: {} -> {}", - self.latest_offset - field_layout.size, - self.latest_offset - ); - - padding_layout.map(|layout| self.padding_field(layout)) - } - - pub(crate) fn add_tail_padding( - &mut self, - comp_name: &str, - comp_layout: Layout, - ) -> Option { - // Only emit an padding field at the end of a struct if the - // user configures explicit padding. - if !self.ctx.options().force_explicit_padding { - return None; - } - - // Padding doesn't make sense for rust unions. - if self.is_rust_union { - return None; - } - - // Also doesn't make sense for structs with flexible array members - if self.last_field_was_flexible_array { - return None; - } - - if self.latest_offset == comp_layout.size { - // This struct does not contain tail padding. - return None; - } - - trace!( - "need a tail padding field for {comp_name}: offset {} -> size {}", - self.latest_offset, - comp_layout.size - ); - let size = comp_layout.size - self.latest_offset; - Some(self.padding_field(Layout::new(size, 0))) - } - - pub(crate) fn pad_struct( - &mut self, - layout: Layout, - ) -> Option { - debug!("pad_struct:\n\tself = {self:#?}\n\tlayout = {layout:#?}"); - - if layout.size < self.latest_offset { - warn!( - "Calculated wrong layout for {}, too more {} bytes", - self.name, - self.latest_offset - layout.size - ); - return None; - } - - let padding_bytes = layout.size - self.latest_offset; - if padding_bytes == 0 { - return None; - } - - let repr_align = true; - - // We always pad to get to the correct size if the struct is one of - // those we can't align properly. - // - // Note that if the last field we saw was a bitfield, we may need to pad - // regardless, because bitfields don't respect alignment as strictly as - // other fields. - if padding_bytes >= layout.align || - (self.last_field_was_bitfield && - padding_bytes >= self.latest_field_layout.unwrap().align) || - (!repr_align && layout.align > MAX_GUARANTEED_ALIGN) - { - let layout = if self.is_packed { - Layout::new(padding_bytes, 1) - } else if self.last_field_was_bitfield || - layout.align > MAX_GUARANTEED_ALIGN - { - // We've already given up on alignment here. - Layout::for_size(self.ctx, padding_bytes) - } else { - Layout::new(padding_bytes, layout.align) - }; - - debug!("pad bytes to struct {}, {layout:?}", self.name); - - Some(self.padding_field(layout)) - } else { - None - } - } - - pub(crate) fn requires_explicit_align(&self, layout: Layout) -> bool { - let repr_align = true; - - // Always force explicit repr(align) for stuff more than 16-byte aligned - // to work-around https://github.com/rust-lang/rust/issues/54341. - // - // Worst-case this just generates redundant alignment attributes. - if repr_align && self.max_field_align >= 16 { - return true; - } - - if self.max_field_align >= layout.align { - return false; - } - - // We can only generate up-to a 8-bytes of alignment unless we support - // repr(align). - repr_align || layout.align <= MAX_GUARANTEED_ALIGN - } - - fn padding_bytes(&self, layout: Layout) -> usize { - align_to(self.latest_offset, layout.align) - self.latest_offset - } - - fn padding_field(&mut self, layout: Layout) -> proc_macro2::TokenStream { - let ty = helpers::blob(self.ctx, layout, false); - let padding_count = self.padding_count; - - self.padding_count += 1; - - let padding_field_name = Ident::new( - &format!("__bindgen_padding_{padding_count}"), - Span::call_site(), - ); - - self.max_field_align = cmp::max(self.max_field_align, layout.align); - - let vis = super::access_specifier(self.visibility); - - quote! { - #vis #padding_field_name : #ty , - } - } - - /// Returns whether the new field is known to merge with a bitfield. - /// - /// This is just to avoid doing the same check also in `pad_field`. - fn align_to_latest_field(&mut self, new_field_layout: Layout) -> bool { - if self.is_packed { - // Skip to align fields when packed. - return false; - } - - let Some(layout) = self.latest_field_layout else { - return false; - }; - - // If it was, we may or may not need to align, depending on what the - // current field alignment and the bitfield size and alignment are. - debug!( - "align_to_bitfield? {}: {layout:?} {new_field_layout:?}", - self.last_field_was_bitfield, - ); - - // Avoid divide-by-zero errors if align is 0. - let align = cmp::max(1, layout.align); - - if self.last_field_was_bitfield && - new_field_layout.align <= layout.size % align && - new_field_layout.size <= layout.size % align - { - // The new field will be coalesced into some of the remaining bits. - // - // FIXME(emilio): I think this may not catch everything? - debug!("Will merge with bitfield"); - return true; - } - - // Else, just align the obvious way. - self.latest_offset += self.padding_bytes(layout); - false - } -} diff --git a/vendor/bindgen/deps.rs b/vendor/bindgen/deps.rs deleted file mode 100644 index 3f95ac1e89e5ac..00000000000000 --- a/vendor/bindgen/deps.rs +++ /dev/null @@ -1,61 +0,0 @@ -/// Generating build depfiles from parsed bindings. -use std::{collections::BTreeSet, path::PathBuf}; - -#[derive(Clone, Debug)] -pub(crate) struct DepfileSpec { - pub output_module: String, - pub depfile_path: PathBuf, -} - -impl DepfileSpec { - pub fn write(&self, deps: &BTreeSet>) -> std::io::Result<()> { - std::fs::write(&self.depfile_path, self.to_string(deps)) - } - - fn to_string(&self, deps: &BTreeSet>) -> String { - // Transforms a string by escaping spaces and backslashes. - let escape = |s: &str| s.replace('\\', "\\\\").replace(' ', "\\ "); - - let mut buf = format!("{}:", escape(&self.output_module)); - for file in deps { - buf = format!("{buf} {}", escape(file)); - } - buf - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn escaping_depfile() { - let spec = DepfileSpec { - output_module: "Mod Name".to_owned(), - depfile_path: PathBuf::new(), - }; - - let deps: BTreeSet<_> = vec![ - r"/absolute/path".into(), - r"C:\win\absolute\path".into(), - r"../relative/path".into(), - r"..\win\relative\path".into(), - r"../path/with spaces/in/it".into(), - r"..\win\path\with spaces\in\it".into(), - r"path\with/mixed\separators".into(), - ] - .into_iter() - .collect(); - assert_eq!( - spec.to_string(&deps), - "Mod\\ Name: \ - ../path/with\\ spaces/in/it \ - ../relative/path \ - ..\\\\win\\\\path\\\\with\\ spaces\\\\in\\\\it \ - ..\\\\win\\\\relative\\\\path \ - /absolute/path \ - C:\\\\win\\\\absolute\\\\path \ - path\\\\with/mixed\\\\separators" - ); - } -} diff --git a/vendor/bindgen/diagnostics.rs b/vendor/bindgen/diagnostics.rs deleted file mode 100644 index f22402ac0e541a..00000000000000 --- a/vendor/bindgen/diagnostics.rs +++ /dev/null @@ -1,146 +0,0 @@ -//! Types and function used to emit pretty diagnostics for `bindgen`. -//! -//! The entry point of this module is the [`Diagnostic`] type. - -use std::fmt::Write; -use std::io::{self, BufRead, BufReader}; -use std::{borrow::Cow, fs::File}; - -use annotate_snippets::{Renderer, Snippet}; - -pub(crate) use annotate_snippets::Level; - -/// A `bindgen` diagnostic. -#[derive(Default)] -pub(crate) struct Diagnostic<'a> { - title: Option<(Cow<'a, str>, Level)>, - slices: Vec>, - footer: Vec<(Cow<'a, str>, Level)>, -} - -impl<'a> Diagnostic<'a> { - /// Add a title to the diagnostic and set its type. - pub(crate) fn with_title( - &mut self, - title: impl Into>, - level: Level, - ) -> &mut Self { - self.title = Some((title.into(), level)); - self - } - - /// Add a slice of source code to the diagnostic. - pub(crate) fn add_slice(&mut self, slice: Slice<'a>) -> &mut Self { - self.slices.push(slice); - self - } - - /// Add a footer annotation to the diagnostic. This annotation will have its own type. - pub(crate) fn add_annotation( - &mut self, - msg: impl Into>, - level: Level, - ) -> &mut Self { - self.footer.push((msg.into(), level)); - self - } - - /// Print this diagnostic. - /// - /// The diagnostic is printed using `cargo:warning` if `bindgen` is being invoked by a build - /// script or using `eprintln` otherwise. - pub(crate) fn display(&self) { - std::thread_local! { - static INVOKED_BY_BUILD_SCRIPT: bool = std::env::var_os("CARGO_CFG_TARGET_ARCH").is_some(); - } - - let mut footer = vec![]; - let mut slices = vec![]; - let snippet = if let Some((msg, level)) = &self.title { - (*level).title(msg) - } else { - return; - }; - - for (msg, level) in &self.footer { - footer.push((*level).title(msg)); - } - - // add additional info that this is generated by bindgen - // so as to not confuse with rustc warnings - footer.push( - Level::Info.title("This diagnostic was generated by bindgen."), - ); - - for slice in &self.slices { - if let Some(source) = &slice.source { - let mut snippet = Snippet::source(source) - .line_start(slice.line.unwrap_or_default()); - if let Some(origin) = &slice.filename { - snippet = snippet.origin(origin); - } - slices.push(snippet); - } - } - - let renderer = Renderer::styled(); - let dl = renderer.render(snippet.snippets(slices).footers(footer)); - - if INVOKED_BY_BUILD_SCRIPT.with(Clone::clone) { - // This is just a hack which hides the `warning:` added by cargo at the beginning of - // every line. This should be fine as our diagnostics already have a colorful title. - // FIXME (pvdrz): Could it be that this doesn't work in other languages? - let hide_warning = "\r \r"; - let string = dl.to_string(); - for line in string.lines() { - println!("cargo:warning={hide_warning}{line}"); - } - } else { - eprintln!("{dl}\n"); - } - } -} - -/// A slice of source code. -#[derive(Default)] -pub(crate) struct Slice<'a> { - source: Option>, - filename: Option, - line: Option, -} - -impl<'a> Slice<'a> { - /// Set the source code. - pub(crate) fn with_source( - &mut self, - source: impl Into>, - ) -> &mut Self { - self.source = Some(source.into()); - self - } - - /// Set the file, line and column. - pub(crate) fn with_location( - &mut self, - mut name: String, - line: usize, - col: usize, - ) -> &mut Self { - write!(name, ":{line}:{col}").expect("Writing to a string cannot fail"); - self.filename = Some(name); - self.line = Some(line); - self - } -} - -pub(crate) fn get_line( - filename: &str, - line: usize, -) -> io::Result> { - let file = BufReader::new(File::open(filename)?); - if let Some(line) = file.lines().nth(line.wrapping_sub(1)) { - return line.map(Some); - } - - Ok(None) -} diff --git a/vendor/bindgen/extra_assertions.rs b/vendor/bindgen/extra_assertions.rs deleted file mode 100644 index 8526fd42d2e915..00000000000000 --- a/vendor/bindgen/extra_assertions.rs +++ /dev/null @@ -1,17 +0,0 @@ -//! Macros for defining extra assertions that should only be checked in testing -//! and/or CI when the `__testing_only_extra_assertions` feature is enabled. - -/// Simple macro that forwards to assert! when using -/// `__testing_only_extra_assertions`. -macro_rules! extra_assert { - ( $cond:expr ) => { - if cfg!(feature = "__testing_only_extra_assertions") { - assert!($cond); - } - }; - ( $cond:expr , $( $arg:tt )+ ) => { - if cfg!(feature = "__testing_only_extra_assertions") { - assert!($cond, $( $arg )* ) - } - }; -} diff --git a/vendor/bindgen/features.rs b/vendor/bindgen/features.rs deleted file mode 100644 index 45ea893947188a..00000000000000 --- a/vendor/bindgen/features.rs +++ /dev/null @@ -1,570 +0,0 @@ -//! Contains code for selecting features - -#![deny(unused_extern_crates)] -#![deny(clippy::missing_docs_in_private_items)] -#![allow(deprecated)] - -use std::str::FromStr; -use std::{fmt, io}; - -/// Represents the version of the Rust language to target. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[repr(transparent)] -pub struct RustTarget(Version); - -impl RustTarget { - /// Create a new [`RustTarget`] for a stable release of Rust. - pub fn stable(minor: u64, patch: u64) -> Result { - let target = Self(Version::Stable(minor, patch)); - - if target < EARLIEST_STABLE_RUST { - return Err(InvalidRustTarget::TooEarly); - } - - Ok(target) - } - - const fn minor(&self) -> Option { - match self.0 { - Version::Nightly => None, - Version::Stable(minor, _) => Some(minor), - } - } - - const fn is_compatible(&self, other: &Self) -> bool { - match (self.0, other.0) { - (Version::Stable(minor, _), Version::Stable(other_minor, _)) => { - // We ignore the patch version number as they only include backwards compatible bug - // fixes. - minor >= other_minor - } - // Nightly is compatible with everything - (Version::Nightly, _) => true, - // No stable release is compatible with nightly - (Version::Stable { .. }, Version::Nightly) => false, - } - } -} - -impl Default for RustTarget { - fn default() -> Self { - // Bindgen from build script: default to generating bindings compatible - // with the Rust version currently performing this build. - #[cfg(not(feature = "__cli"))] - { - use std::env; - use std::iter; - use std::process::Command; - use std::sync::OnceLock; - - static CURRENT_RUST: OnceLock> = OnceLock::new(); - - if let Some(current_rust) = *CURRENT_RUST.get_or_init(|| { - let is_build_script = - env::var_os("CARGO_CFG_TARGET_ARCH").is_some(); - if !is_build_script { - return None; - } - - let rustc = env::var_os("RUSTC")?; - let rustc_wrapper = env::var_os("RUSTC_WRAPPER") - .filter(|wrapper| !wrapper.is_empty()); - let wrapped_rustc = - rustc_wrapper.iter().chain(iter::once(&rustc)); - - let mut is_clippy_driver = false; - loop { - let mut wrapped_rustc = wrapped_rustc.clone(); - let mut command = - Command::new(wrapped_rustc.next().unwrap()); - command.args(wrapped_rustc); - if is_clippy_driver { - command.arg("--rustc"); - } - command.arg("--version"); - - let output = command.output().ok()?; - let string = String::from_utf8(output.stdout).ok()?; - - // Version string like "rustc 1.100.0-beta.5 (f0e1d2c3b 2026-10-17)" - let last_line = string.lines().last().unwrap_or(&string); - let (program, rest) = last_line.trim().split_once(' ')?; - if program != "rustc" { - if program.starts_with("clippy") && !is_clippy_driver { - is_clippy_driver = true; - continue; - } - return None; - } - - let number = rest.split([' ', '-', '+']).next()?; - break RustTarget::from_str(number).ok(); - } - }) { - return current_rust; - } - } - - // Bindgen from CLI, or cannot determine compiler version: default to - // generating bindings compatible with the latest stable release of Rust - // that Bindgen knows about. - LATEST_STABLE_RUST - } -} - -impl fmt::Display for RustTarget { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.0 { - Version::Stable(minor, patch) => write!(f, "1.{minor}.{patch}"), - Version::Nightly => "nightly".fmt(f), - } - } -} - -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -enum Version { - Stable(u64, u64), - Nightly, -} - -#[derive(Debug, PartialEq, Eq, Hash)] -pub enum InvalidRustTarget { - TooEarly, -} - -impl fmt::Display for InvalidRustTarget { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::TooEarly => write!(f, "the earliest Rust version supported by bindgen is {EARLIEST_STABLE_RUST}"), - } - } -} - -/// This macro defines the Rust editions supported by bindgen. -macro_rules! define_rust_editions { - ($($variant:ident($value:literal) => $minor:literal,)*) => { - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] - #[doc = "Represents Rust Edition for the generated bindings"] - pub enum RustEdition { - $( - #[doc = concat!("The ", stringify!($value), " edition of Rust.")] - $variant, - )* - } - - impl FromStr for RustEdition { - type Err = InvalidRustEdition; - - fn from_str(s: &str) -> Result { - match s { - $(stringify!($value) => Ok(Self::$variant),)* - _ => Err(InvalidRustEdition(s.to_owned())), - } - } - } - - impl fmt::Display for RustEdition { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - $(Self::$variant => stringify!($value).fmt(f),)* - } - } - } - - impl RustEdition { - pub(crate) const ALL: [Self; [$($value,)*].len()] = [$(Self::$variant,)*]; - - pub(crate) fn is_available(self, target: RustTarget) -> bool { - let Some(minor) = target.minor() else { - return true; - }; - - match self { - $(Self::$variant => $minor <= minor,)* - } - } - } - } -} - -#[derive(Debug)] -pub struct InvalidRustEdition(String); - -impl fmt::Display for InvalidRustEdition { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "\"{}\" is not a valid Rust edition", self.0) - } -} - -impl std::error::Error for InvalidRustEdition {} - -define_rust_editions! { - Edition2018(2018) => 31, - Edition2021(2021) => 56, - Edition2024(2024) => 85, -} - -impl RustTarget { - /// Returns the latest edition supported by this target. - pub(crate) fn latest_edition(self) -> RustEdition { - RustEdition::ALL - .iter() - .rev() - .find(|edition| edition.is_available(self)) - .copied() - .expect("bindgen should always support at least one edition") - } -} - -impl Default for RustEdition { - fn default() -> Self { - RustTarget::default().latest_edition() - } -} - -/// This macro defines the [`RustTarget`] and [`RustFeatures`] types. -macro_rules! define_rust_targets { - ( - Nightly => {$($nightly_feature:ident $(($nightly_edition:literal))|* $(: #$issue:literal)?),* $(,)?} $(,)? - $( - $variant:ident($minor:literal) => {$($feature:ident $(($edition:literal))|* $(: #$pull:literal)?),* $(,)?}, - )* - $(,)? - ) => { - - impl RustTarget { - /// The nightly version of Rust, which introduces the following features:" - $(#[doc = concat!( - "- [`", stringify!($nightly_feature), "`]", - "(", $("https://github.com/rust-lang/rust/pull/", stringify!($issue),)* ")", - )])* - #[deprecated = "The use of this constant is deprecated, please use `RustTarget::nightly` instead."] - pub const Nightly: Self = Self::nightly(); - - /// The nightly version of Rust, which introduces the following features:" - $(#[doc = concat!( - "- [`", stringify!($nightly_feature), "`]", - "(", $("https://github.com/rust-lang/rust/pull/", stringify!($issue),)* ")", - )])* - pub const fn nightly() -> Self { - Self(Version::Nightly) - } - - $( - #[doc = concat!("Version 1.", stringify!($minor), " of Rust, which introduced the following features:")] - $(#[doc = concat!( - "- [`", stringify!($feature), "`]", - "(", $("https://github.com/rust-lang/rust/pull/", stringify!($pull),)* ")", - )])* - #[deprecated = "The use of this constant is deprecated, please use `RustTarget::stable` instead."] - pub const $variant: Self = Self(Version::Stable($minor, 0)); - )* - - const fn stable_releases() -> [(Self, u64); [$($minor,)*].len()] { - [$((Self::$variant, $minor),)*] - } - } - - #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] - pub(crate) struct RustFeatures { - $($(pub(crate) $feature: bool,)*)* - $(pub(crate) $nightly_feature: bool,)* - } - - impl RustFeatures { - /// Compute the features that must be enabled in a specific Rust target with a specific edition. - pub(crate) fn new(target: RustTarget, edition: RustEdition) -> Self { - let mut features = Self { - $($($feature: false,)*)* - $($nightly_feature: false,)* - }; - - if target.is_compatible(&RustTarget::nightly()) { - $( - let editions: &[RustEdition] = &[$(stringify!($nightly_edition).parse::().ok().expect("invalid edition"),)*]; - - if editions.is_empty() || editions.contains(&edition) { - features.$nightly_feature = true; - } - )* - } - - $( - if target.is_compatible(&RustTarget::$variant) { - $( - let editions: &[RustEdition] = &[$(stringify!($edition).parse::().ok().expect("invalid edition"),)*]; - - if editions.is_empty() || editions.contains(&edition) { - features.$feature = true; - } - )* - } - )* - - features - } - } - }; -} - -// NOTE: When adding or removing features here, make sure to add the stabilization PR -// number for the feature if it has been stabilized or the tracking issue number if the feature is -// not stable. -define_rust_targets! { - Nightly => { - vectorcall_abi: #124485, - ptr_metadata: #81513, - layout_for_ptr: #69835, - }, - Stable_1_82(82) => { - unsafe_extern_blocks: #127921, - }, - Stable_1_77(77) => { - offset_of: #106655, - literal_cstr(2021)|(2024): #117472, - }, - Stable_1_73(73) => { thiscall_abi: #42202 }, - Stable_1_71(71) => { c_unwind_abi: #106075 }, - Stable_1_68(68) => { abi_efiapi: #105795 }, - Stable_1_64(64) => { core_ffi_c: #94503 }, - Stable_1_51(51) => { - raw_ref_macros: #80886, - min_const_generics: #74878, - }, - Stable_1_59(59) => { const_cstr: #54745 }, - Stable_1_47(47) => { larger_arrays: #74060 }, - Stable_1_43(43) => { associated_constants: #68952 }, - Stable_1_40(40) => { non_exhaustive: #44109 }, - Stable_1_36(36) => { maybe_uninit: #60445 }, - Stable_1_33(33) => { repr_packed_n: #57049 }, -} - -/// Latest stable release of Rust that is supported by bindgen -pub const LATEST_STABLE_RUST: RustTarget = { - // FIXME: replace all this code by - // ``` - // RustTarget::stable_releases() - // .into_iter() - // .max_by_key(|(_, m)| m) - // .map(|(t, _)| t) - // .unwrap() - // ``` - // once those operations can be used in constants. - let targets = RustTarget::stable_releases(); - - let mut i = 0; - let mut latest_target = None; - let mut latest_minor = 0; - - while i < targets.len() { - let (target, minor) = targets[i]; - - if latest_minor < minor { - latest_minor = minor; - latest_target = Some(target); - } - - i += 1; - } - - match latest_target { - Some(target) => target, - None => unreachable!(), - } -}; - -/// Earliest stable release of Rust that is supported by bindgen -pub const EARLIEST_STABLE_RUST: RustTarget = { - // FIXME: replace all this code by - // ``` - // RustTarget::stable_releases() - // .into_iter() - // .min_by_key(|(_, m)| m) - // .map(|(t, _)| t) - // .unwrap_or(LATEST_STABLE_RUST) - // ``` - // once those operations can be used in constants. - let targets = RustTarget::stable_releases(); - - let mut i = 0; - let mut earliest_target = None; - let Some(mut earliest_minor) = LATEST_STABLE_RUST.minor() else { - unreachable!() - }; - - while i < targets.len() { - let (target, minor) = targets[i]; - - if earliest_minor > minor { - earliest_minor = minor; - earliest_target = Some(target); - } - - i += 1; - } - - match earliest_target { - Some(target) => target, - None => unreachable!(), - } -}; - -fn invalid_input(input: &str, msg: impl fmt::Display) -> io::Error { - io::Error::new( - io::ErrorKind::InvalidInput, - format!("\"{input}\" is not a valid Rust target, {msg}"), - ) -} - -impl FromStr for RustTarget { - type Err = io::Error; - - fn from_str(input: &str) -> Result { - if input == "nightly" { - return Ok(Self::Nightly); - } - - let Some((major_str, tail)) = input.split_once('.') else { - return Err(invalid_input(input, "accepted values are of the form \"1.71\", \"1.71.1\" or \"nightly\"." ) ); - }; - - if major_str != "1" { - return Err(invalid_input( - input, - "The largest major version of Rust released is \"1\"", - )); - } - - let (minor, patch) = if let Some((minor_str, patch_str)) = - tail.split_once('.') - { - let Ok(minor) = minor_str.parse::() else { - return Err(invalid_input(input, "the minor version number must be an unsigned 64-bit integer")); - }; - let Ok(patch) = patch_str.parse::() else { - return Err(invalid_input(input, "the patch version number must be an unsigned 64-bit integer")); - }; - (minor, patch) - } else { - let Ok(minor) = tail.parse::() else { - return Err(invalid_input(input, "the minor version number must be an unsigned 64-bit integer")); - }; - (minor, 0) - }; - - Self::stable(minor, patch).map_err(|err| invalid_input(input, err)) - } -} - -impl RustFeatures { - /// Compute the features that must be enabled in a specific Rust target with the latest edition - /// available in that target. - pub(crate) fn new_with_latest_edition(target: RustTarget) -> Self { - Self::new(target, target.latest_edition()) - } -} - -impl Default for RustFeatures { - fn default() -> Self { - Self::new_with_latest_edition(RustTarget::default()) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn release_versions_for_editions() { - assert_eq!( - "1.33".parse::().unwrap().latest_edition(), - RustEdition::Edition2018 - ); - assert_eq!( - "1.56".parse::().unwrap().latest_edition(), - RustEdition::Edition2021 - ); - assert_eq!( - "1.85".parse::().unwrap().latest_edition(), - RustEdition::Edition2024 - ); - assert_eq!( - "nightly".parse::().unwrap().latest_edition(), - RustEdition::Edition2024 - ); - } - - #[test] - fn target_features() { - let features = - RustFeatures::new_with_latest_edition(RustTarget::Stable_1_71); - assert!( - features.c_unwind_abi && - features.abi_efiapi && - !features.thiscall_abi - ); - - let features = RustFeatures::new( - RustTarget::Stable_1_77, - RustEdition::Edition2018, - ); - assert!(!features.literal_cstr); - - let features = - RustFeatures::new_with_latest_edition(RustTarget::Stable_1_77); - assert!(features.literal_cstr); - - let f_nightly = - RustFeatures::new_with_latest_edition(RustTarget::Nightly); - assert!( - f_nightly.vectorcall_abi && - f_nightly.ptr_metadata && - f_nightly.layout_for_ptr - ); - } - - fn test_target(input: &str, expected: RustTarget) { - // Two targets are equivalent if they enable the same set of features - let expected = RustFeatures::new_with_latest_edition(expected); - let found = RustFeatures::new_with_latest_edition( - input.parse::().unwrap(), - ); - assert_eq!( - expected, - found, - "target {input} enables features:\n{found:#?}\nand should enable features:\n{expected:#?}" - ); - } - - fn test_invalid_target(input: &str) { - assert!( - input.parse::().is_err(), - "{input} should be an invalid target" - ); - } - - #[test] - fn valid_targets() { - test_target("1.71", RustTarget::Stable_1_71); - test_target("1.71.0", RustTarget::Stable_1_71); - test_target("1.71.1", RustTarget::Stable_1_71); - test_target("1.72", RustTarget::Stable_1_71); - test_target("1.73", RustTarget::Stable_1_73); - test_target("1.18446744073709551615", LATEST_STABLE_RUST); - test_target("nightly", RustTarget::Nightly); - } - - #[test] - fn invalid_targets() { - test_invalid_target("2.0"); - test_invalid_target("1.cat"); - test_invalid_target("1.0.cat"); - test_invalid_target("1.18446744073709551616"); - test_invalid_target("1.0.18446744073709551616"); - test_invalid_target("1.-1.0"); - test_invalid_target("1.0.-1"); - test_invalid_target("beta"); - test_invalid_target("1.0.0"); - test_invalid_target("1.32.0"); - } -} diff --git a/vendor/bindgen/ir/analysis/derive.rs b/vendor/bindgen/ir/analysis/derive.rs deleted file mode 100644 index eaa20fff463c9d..00000000000000 --- a/vendor/bindgen/ir/analysis/derive.rs +++ /dev/null @@ -1,726 +0,0 @@ -//! Determining which types for which we cannot emit `#[derive(Trait)]`. - -use std::fmt; - -use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; -use crate::ir::analysis::has_vtable::HasVtable; -use crate::ir::comp::CompKind; -use crate::ir::context::{BindgenContext, ItemId}; -use crate::ir::derive::CanDerive; -use crate::ir::function::FunctionSig; -use crate::ir::item::{IsOpaque, Item}; -use crate::ir::layout::Layout; -use crate::ir::template::TemplateParameters; -use crate::ir::traversal::{EdgeKind, Trace}; -use crate::ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT; -use crate::ir::ty::{Type, TypeKind}; -use crate::{Entry, HashMap, HashSet}; - -/// Which trait to consider when doing the `CannotDerive` analysis. -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub enum DeriveTrait { - /// The `Copy` trait. - Copy, - /// The `Debug` trait. - Debug, - /// The `Default` trait. - Default, - /// The `Hash` trait. - Hash, - /// The `PartialEq` and `PartialOrd` traits. - PartialEqOrPartialOrd, -} - -/// An analysis that finds for each IR item whether a trait cannot be derived. -/// -/// We use the monotone constraint function `cannot_derive`, defined as follows -/// for type T: -/// -/// * If T is Opaque and the layout of the type is known, get this layout as an -/// opaquetype and check whether it can derive using trivial checks. -/// -/// * If T is Array, a trait cannot be derived if the array is incomplete, -/// if the length of the array is larger than the limit (unless the trait -/// allows it), or the trait cannot be derived for the type of data the array -/// contains. -/// -/// * If T is Vector, a trait cannot be derived if the trait cannot be derived -/// for the type of data the vector contains. -/// -/// * If T is a type alias, a templated alias or an indirection to another type, -/// the trait cannot be derived if the trait cannot be derived for type T -/// refers to. -/// -/// * If T is a compound type, the trait cannot be derived if the trait cannot -/// be derived for any of its base members or fields. -/// -/// * If T is an instantiation of an abstract template definition, the trait -/// cannot be derived if any of the template arguments or template definition -/// cannot derive the trait. -/// -/// * For all other (simple) types, compiler and standard library limitations -/// dictate whether the trait is implemented. -#[derive(Debug, Clone)] -pub(crate) struct CannotDerive<'ctx> { - ctx: &'ctx BindgenContext, - - derive_trait: DeriveTrait, - - // The incremental result of this analysis's computation. - // Contains information whether particular item can derive `derive_trait` - can_derive: HashMap, - - // Dependencies saying that if a key ItemId has been inserted into the - // `cannot_derive_partialeq_or_partialord` set, then each of the ids - // in Vec need to be considered again. - // - // This is a subset of the natural IR graph with reversed edges, where we - // only include the edges from the IR graph that can affect whether a type - // can derive `derive_trait`. - dependencies: HashMap>, -} - -type EdgePredicate = fn(EdgeKind) -> bool; - -fn consider_edge_default(kind: EdgeKind) -> bool { - match kind { - // These are the only edges that can affect whether a type can derive - EdgeKind::BaseMember | - EdgeKind::Field | - EdgeKind::TypeReference | - EdgeKind::VarType | - EdgeKind::TemplateArgument | - EdgeKind::TemplateDeclaration | - EdgeKind::TemplateParameterDefinition => true, - - EdgeKind::Constructor | - EdgeKind::Destructor | - EdgeKind::FunctionReturn | - EdgeKind::FunctionParameter | - EdgeKind::InnerType | - EdgeKind::InnerVar | - EdgeKind::Method | - EdgeKind::Generic => false, - } -} - -impl CannotDerive<'_> { - fn insert>( - &mut self, - id: Id, - can_derive: CanDerive, - ) -> ConstrainResult { - let id = id.into(); - trace!( - "inserting {id:?} can_derive<{}>={can_derive:?}", - self.derive_trait, - ); - - if let CanDerive::Yes = can_derive { - return ConstrainResult::Same; - } - - match self.can_derive.entry(id) { - Entry::Occupied(mut entry) => { - if *entry.get() < can_derive { - entry.insert(can_derive); - ConstrainResult::Changed - } else { - ConstrainResult::Same - } - } - Entry::Vacant(entry) => { - entry.insert(can_derive); - ConstrainResult::Changed - } - } - } - - fn constrain_type(&mut self, item: &Item, ty: &Type) -> CanDerive { - if !self.ctx.allowlisted_items().contains(&item.id()) { - let can_derive = self - .ctx - .blocklisted_type_implements_trait(item, self.derive_trait); - match can_derive { - CanDerive::Yes => trace!( - " blocklisted type explicitly implements {}", - self.derive_trait - ), - CanDerive::Manually => trace!( - " blocklisted type requires manual implementation of {}", - self.derive_trait - ), - CanDerive::No => trace!( - " cannot derive {} for blocklisted type", - self.derive_trait - ), - } - return can_derive; - } - - if self.derive_trait.not_by_name(self.ctx, item) { - trace!( - " cannot derive {} for explicitly excluded type", - self.derive_trait - ); - return CanDerive::No; - } - - trace!("ty: {ty:?}"); - if item.is_opaque(self.ctx, &()) { - if !self.derive_trait.can_derive_union() && - ty.is_union() && - self.ctx.options().untagged_union - { - trace!( - " cannot derive {} for Rust unions", - self.derive_trait - ); - return CanDerive::No; - } - - let layout_can_derive = - ty.layout(self.ctx).map_or(CanDerive::Yes, |l| { - l.opaque().array_size_within_derive_limit() - }); - - match layout_can_derive { - CanDerive::Yes => { - trace!( - " we can trivially derive {} for the layout", - self.derive_trait - ); - } - _ => { - trace!( - " we cannot derive {} for the layout", - self.derive_trait - ); - } - } - return layout_can_derive; - } - - match *ty.kind() { - // Handle the simple cases. These can derive traits without further - // information. - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(..) | - TypeKind::Complex(..) | - TypeKind::Float(..) | - TypeKind::Enum(..) | - TypeKind::TypeParam | - TypeKind::UnresolvedTypeRef(..) | - TypeKind::Reference(..) | - TypeKind::ObjCInterface(..) | - TypeKind::ObjCId | - TypeKind::ObjCSel => self.derive_trait.can_derive_simple(ty.kind()), - TypeKind::Pointer(inner) => { - let inner_type = - self.ctx.resolve_type(inner).canonical_type(self.ctx); - if let TypeKind::Function(ref sig) = *inner_type.kind() { - self.derive_trait.can_derive_fnptr(sig) - } else { - self.derive_trait.can_derive_pointer() - } - } - TypeKind::Function(ref sig) => { - self.derive_trait.can_derive_fnptr(sig) - } - - // Complex cases need more information - TypeKind::Array(t, len) => { - let inner_type = - self.can_derive.get(&t.into()).copied().unwrap_or_default(); - if inner_type != CanDerive::Yes { - trace!( - " arrays of T for which we cannot derive {} \ - also cannot derive {}", - self.derive_trait, - self.derive_trait - ); - return CanDerive::No; - } - - if len == 0 && !self.derive_trait.can_derive_incomplete_array() - { - trace!( - " cannot derive {} for incomplete arrays", - self.derive_trait - ); - return CanDerive::No; - } - - if self.derive_trait.can_derive_large_array(self.ctx) { - trace!(" array can derive {}", self.derive_trait); - return CanDerive::Yes; - } - - if len > RUST_DERIVE_IN_ARRAY_LIMIT { - trace!( - " array is too large to derive {}, but it may be implemented", self.derive_trait - ); - return CanDerive::Manually; - } - trace!( - " array is small enough to derive {}", - self.derive_trait - ); - CanDerive::Yes - } - TypeKind::Vector(t, len) => { - let inner_type = - self.can_derive.get(&t.into()).copied().unwrap_or_default(); - if inner_type != CanDerive::Yes { - trace!( - " vectors of T for which we cannot derive {} \ - also cannot derive {}", - self.derive_trait, - self.derive_trait - ); - return CanDerive::No; - } - assert_ne!(len, 0, "vectors cannot have zero length"); - self.derive_trait.can_derive_vector() - } - - TypeKind::Comp(ref info) => { - assert!( - !info.has_non_type_template_params(), - "The early ty.is_opaque check should have handled this case" - ); - - if !self.derive_trait.can_derive_compound_forward_decl() && - info.is_forward_declaration() - { - trace!( - " cannot derive {} for forward decls", - self.derive_trait - ); - return CanDerive::No; - } - - // NOTE: Take into account that while unions in C and C++ are copied by - // default, the may have an explicit destructor in C++, so we can't - // defer this check just for the union case. - if !self.derive_trait.can_derive_compound_with_destructor() && - self.ctx.lookup_has_destructor( - item.id().expect_type_id(self.ctx), - ) - { - trace!( - " comp has destructor which cannot derive {}", - self.derive_trait - ); - return CanDerive::No; - } - - if info.kind() == CompKind::Union { - if self.derive_trait.can_derive_union() { - if self.ctx.options().untagged_union && - // https://github.com/rust-lang/rust/issues/36640 - (!info.self_template_params(self.ctx).is_empty() || - !item.all_template_params(self.ctx).is_empty()) - { - trace!( - " cannot derive {} for Rust union because issue 36640", self.derive_trait - ); - return CanDerive::No; - } - // fall through to be same as non-union handling - } else { - if self.ctx.options().untagged_union { - trace!( - " cannot derive {} for Rust unions", - self.derive_trait - ); - return CanDerive::No; - } - - let layout_can_derive = - ty.layout(self.ctx).map_or(CanDerive::Yes, |l| { - l.opaque().array_size_within_derive_limit() - }); - match layout_can_derive { - CanDerive::Yes => { - trace!( - " union layout can trivially derive {}", - self.derive_trait - ); - } - _ => { - trace!( - " union layout cannot derive {}", - self.derive_trait - ); - } - } - return layout_can_derive; - } - } - - if !self.derive_trait.can_derive_compound_with_vtable() && - item.has_vtable(self.ctx) - { - trace!( - " cannot derive {} for comp with vtable", - self.derive_trait - ); - return CanDerive::No; - } - - // Bitfield units are always represented as arrays of u8, but - // they're not traced as arrays, so we need to check here - // instead. - if !self.derive_trait.can_derive_large_array(self.ctx) && - info.has_too_large_bitfield_unit() && - !item.is_opaque(self.ctx, &()) - { - trace!( - " cannot derive {} for comp with too large bitfield unit", - self.derive_trait - ); - return CanDerive::No; - } - - let pred = self.derive_trait.consider_edge_comp(); - self.constrain_join(item, pred) - } - - TypeKind::ResolvedTypeRef(..) | - TypeKind::TemplateAlias(..) | - TypeKind::Alias(..) | - TypeKind::BlockPointer(..) => { - let pred = self.derive_trait.consider_edge_typeref(); - self.constrain_join(item, pred) - } - - TypeKind::TemplateInstantiation(..) => { - let pred = self.derive_trait.consider_edge_tmpl_inst(); - self.constrain_join(item, pred) - } - - TypeKind::Opaque => unreachable!( - "The early ty.is_opaque check should have handled this case" - ), - } - } - - fn constrain_join( - &mut self, - item: &Item, - consider_edge: EdgePredicate, - ) -> CanDerive { - let mut candidate = None; - - item.trace( - self.ctx, - &mut |sub_id, edge_kind| { - // Ignore ourselves, since union with ourself is a - // no-op. Ignore edges that aren't relevant to the - // analysis. - if sub_id == item.id() || !consider_edge(edge_kind) { - return; - } - - let can_derive = self.can_derive - .get(&sub_id) - .copied() - .unwrap_or_default(); - - match can_derive { - CanDerive::Yes => trace!(" member {sub_id:?} can derive {}", self.derive_trait), - CanDerive::Manually => trace!(" member {sub_id:?} cannot derive {}, but it may be implemented", self.derive_trait), - CanDerive::No => trace!(" member {sub_id:?} cannot derive {}", self.derive_trait), - } - - *candidate.get_or_insert(CanDerive::Yes) |= can_derive; - }, - &(), - ); - - if candidate.is_none() { - trace!( - " can derive {} because there are no members", - self.derive_trait - ); - } - candidate.unwrap_or_default() - } -} - -impl DeriveTrait { - fn not_by_name(self, ctx: &BindgenContext, item: &Item) -> bool { - match self { - DeriveTrait::Copy => ctx.no_copy_by_name(item), - DeriveTrait::Debug => ctx.no_debug_by_name(item), - DeriveTrait::Default => ctx.no_default_by_name(item), - DeriveTrait::Hash => ctx.no_hash_by_name(item), - DeriveTrait::PartialEqOrPartialOrd => { - ctx.no_partialeq_by_name(item) - } - } - } - - fn consider_edge_comp(self) -> EdgePredicate { - match self { - DeriveTrait::PartialEqOrPartialOrd => consider_edge_default, - _ => |kind| matches!(kind, EdgeKind::BaseMember | EdgeKind::Field), - } - } - - fn consider_edge_typeref(self) -> EdgePredicate { - match self { - DeriveTrait::PartialEqOrPartialOrd => consider_edge_default, - _ => |kind| kind == EdgeKind::TypeReference, - } - } - - fn consider_edge_tmpl_inst(self) -> EdgePredicate { - match self { - DeriveTrait::PartialEqOrPartialOrd => consider_edge_default, - _ => |kind| { - matches!( - kind, - EdgeKind::TemplateArgument | EdgeKind::TemplateDeclaration - ) - }, - } - } - - fn can_derive_large_array(self, ctx: &BindgenContext) -> bool { - if ctx.options().rust_features().larger_arrays { - !matches!(self, DeriveTrait::Default) - } else { - matches!(self, DeriveTrait::Copy) - } - } - - fn can_derive_union(self) -> bool { - matches!(self, DeriveTrait::Copy) - } - - fn can_derive_compound_with_destructor(self) -> bool { - !matches!(self, DeriveTrait::Copy) - } - - fn can_derive_compound_with_vtable(self) -> bool { - !matches!(self, DeriveTrait::Default) - } - - fn can_derive_compound_forward_decl(self) -> bool { - matches!(self, DeriveTrait::Copy | DeriveTrait::Debug) - } - - fn can_derive_incomplete_array(self) -> bool { - !matches!( - self, - DeriveTrait::Copy | - DeriveTrait::Hash | - DeriveTrait::PartialEqOrPartialOrd - ) - } - - fn can_derive_fnptr(self, f: &FunctionSig) -> CanDerive { - match (self, f.function_pointers_can_derive()) { - (DeriveTrait::Copy | DeriveTrait::Default, _) | (_, true) => { - trace!(" function pointer can derive {self}"); - CanDerive::Yes - } - (DeriveTrait::Debug, false) => { - trace!(" function pointer cannot derive {self}, but it may be implemented"); - CanDerive::Manually - } - (_, false) => { - trace!(" function pointer cannot derive {self}"); - CanDerive::No - } - } - } - - fn can_derive_vector(self) -> CanDerive { - if self == DeriveTrait::PartialEqOrPartialOrd { - // FIXME: vectors always can derive PartialEq, but they should - // not derive PartialOrd: - // https://github.com/rust-lang-nursery/packed_simd/issues/48 - trace!(" vectors cannot derive PartialOrd"); - CanDerive::No - } else { - trace!(" vector can derive {self}"); - CanDerive::Yes - } - } - - fn can_derive_pointer(self) -> CanDerive { - if self == DeriveTrait::Default { - trace!(" pointer cannot derive Default"); - CanDerive::No - } else { - trace!(" pointer can derive {self}"); - CanDerive::Yes - } - } - - fn can_derive_simple(self, kind: &TypeKind) -> CanDerive { - match (self, kind) { - // === Default === - ( - DeriveTrait::Default, - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Enum(..) | - TypeKind::Reference(..) | - TypeKind::TypeParam | - TypeKind::ObjCInterface(..) | - TypeKind::ObjCId | - TypeKind::ObjCSel, - ) => { - trace!(" types that always cannot derive Default"); - CanDerive::No - } - (DeriveTrait::Default, TypeKind::UnresolvedTypeRef(..)) => { - unreachable!( - "Type with unresolved type ref can't reach derive default" - ) - } - // === Hash === - ( - DeriveTrait::Hash, - TypeKind::Float(..) | TypeKind::Complex(..), - ) => { - trace!(" float cannot derive Hash"); - CanDerive::No - } - // === others === - _ => { - trace!(" simple type that can always derive {self}"); - CanDerive::Yes - } - } - } -} - -impl fmt::Display for DeriveTrait { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let s = match self { - DeriveTrait::Copy => "Copy", - DeriveTrait::Debug => "Debug", - DeriveTrait::Default => "Default", - DeriveTrait::Hash => "Hash", - DeriveTrait::PartialEqOrPartialOrd => "PartialEq/PartialOrd", - }; - s.fmt(f) - } -} - -impl<'ctx> MonotoneFramework for CannotDerive<'ctx> { - type Node = ItemId; - type Extra = (&'ctx BindgenContext, DeriveTrait); - type Output = HashMap; - - fn new( - (ctx, derive_trait): (&'ctx BindgenContext, DeriveTrait), - ) -> CannotDerive<'ctx> { - let can_derive = HashMap::default(); - let dependencies = generate_dependencies(ctx, consider_edge_default); - - CannotDerive { - ctx, - derive_trait, - can_derive, - dependencies, - } - } - - fn initial_worklist(&self) -> Vec { - // The transitive closure of all allowlisted items, including explicitly - // blocklisted items. - self.ctx - .allowlisted_items() - .iter() - .copied() - .flat_map(|i| { - let mut reachable = vec![i]; - i.trace( - self.ctx, - &mut |s, _| { - reachable.push(s); - }, - &(), - ); - reachable - }) - .collect() - } - - fn constrain(&mut self, id: ItemId) -> ConstrainResult { - trace!("constrain: {id:?}"); - - if let Some(CanDerive::No) = self.can_derive.get(&id) { - trace!(" already know it cannot derive {}", self.derive_trait); - return ConstrainResult::Same; - } - - let item = self.ctx.resolve_item(id); - let can_derive = match item.as_type() { - Some(ty) => { - let mut can_derive = self.constrain_type(item, ty); - if let CanDerive::Yes = can_derive { - let is_reached_limit = - |l: Layout| l.align > RUST_DERIVE_IN_ARRAY_LIMIT; - if !self.derive_trait.can_derive_large_array(self.ctx) && - ty.layout(self.ctx).is_some_and(is_reached_limit) - { - // We have to be conservative: the struct *could* have enough - // padding that we emit an array that is longer than - // `RUST_DERIVE_IN_ARRAY_LIMIT`. If we moved padding calculations - // into the IR and computed them before this analysis, then we could - // be precise rather than conservative here. - can_derive = CanDerive::Manually; - } - } - can_derive - } - None => self.constrain_join(item, consider_edge_default), - }; - - self.insert(id, can_derive) - } - - fn each_depending_on(&self, id: ItemId, mut f: F) - where - F: FnMut(ItemId), - { - if let Some(edges) = self.dependencies.get(&id) { - for item in edges { - trace!("enqueue {item:?} into worklist"); - f(*item); - } - } - } -} - -impl<'ctx> From> for HashMap { - fn from(analysis: CannotDerive<'ctx>) -> Self { - extra_assert!(analysis - .can_derive - .values() - .all(|v| *v != CanDerive::Yes)); - - analysis.can_derive - } -} - -/// Convert a `HashMap` into a `HashSet`. -/// -/// Elements that are not `CanDerive::Yes` are kept in the set, so that it -/// represents all items that cannot derive. -pub(crate) fn as_cannot_derive_set( - can_derive: HashMap, -) -> HashSet { - can_derive - .into_iter() - .filter_map(|(k, v)| if v == CanDerive::Yes { None } else { Some(k) }) - .collect() -} diff --git a/vendor/bindgen/ir/analysis/has_destructor.rs b/vendor/bindgen/ir/analysis/has_destructor.rs deleted file mode 100644 index 4893f8f8075db2..00000000000000 --- a/vendor/bindgen/ir/analysis/has_destructor.rs +++ /dev/null @@ -1,175 +0,0 @@ -//! Determining which types have destructors - -use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; -use crate::ir::comp::{CompKind, Field, FieldMethods}; -use crate::ir::context::{BindgenContext, ItemId}; -use crate::ir::traversal::EdgeKind; -use crate::ir::ty::TypeKind; -use crate::{HashMap, HashSet}; - -/// An analysis that finds for each IR item whether it has a destructor or not -/// -/// We use the monotone function `has destructor`, defined as follows: -/// -/// * If T is a type alias, a templated alias, or an indirection to another type, -/// T has a destructor if the type T refers to has a destructor. -/// * If T is a compound type, T has a destructor if we saw a destructor when parsing it, -/// or if it's a struct, T has a destructor if any of its base members has a destructor, -/// or if any of its fields have a destructor. -/// * If T is an instantiation of an abstract template definition, T has -/// a destructor if its template definition has a destructor, -/// or if any of the template arguments has a destructor. -/// * If T is the type of a field, that field has a destructor if it's not a bitfield, -/// and if T has a destructor. -#[derive(Debug, Clone)] -pub(crate) struct HasDestructorAnalysis<'ctx> { - ctx: &'ctx BindgenContext, - - // The incremental result of this analysis's computation. Everything in this - // set definitely has a destructor. - have_destructor: HashSet, - - // Dependencies saying that if a key ItemId has been inserted into the - // `have_destructor` set, then each of the ids in Vec need to be - // considered again. - // - // This is a subset of the natural IR graph with reversed edges, where we - // only include the edges from the IR graph that can affect whether a type - // has a destructor or not. - dependencies: HashMap>, -} - -impl HasDestructorAnalysis<'_> { - fn consider_edge(kind: EdgeKind) -> bool { - // These are the only edges that can affect whether a type has a - // destructor or not. - matches!( - kind, - EdgeKind::TypeReference | - EdgeKind::BaseMember | - EdgeKind::Field | - EdgeKind::TemplateArgument | - EdgeKind::TemplateDeclaration - ) - } - - fn insert>(&mut self, id: Id) -> ConstrainResult { - let id = id.into(); - let was_not_already_in_set = self.have_destructor.insert(id); - assert!( - was_not_already_in_set, - "We shouldn't try and insert {id:?} twice because if it was \ - already in the set, `constrain` should have exited early." - ); - ConstrainResult::Changed - } -} - -impl<'ctx> MonotoneFramework for HasDestructorAnalysis<'ctx> { - type Node = ItemId; - type Extra = &'ctx BindgenContext; - type Output = HashSet; - - fn new(ctx: &'ctx BindgenContext) -> Self { - let have_destructor = HashSet::default(); - let dependencies = generate_dependencies(ctx, Self::consider_edge); - - HasDestructorAnalysis { - ctx, - have_destructor, - dependencies, - } - } - - fn initial_worklist(&self) -> Vec { - self.ctx.allowlisted_items().iter().copied().collect() - } - - fn constrain(&mut self, id: ItemId) -> ConstrainResult { - if self.have_destructor.contains(&id) { - // We've already computed that this type has a destructor and that can't - // change. - return ConstrainResult::Same; - } - - let item = self.ctx.resolve_item(id); - let ty = match item.as_type() { - None => return ConstrainResult::Same, - Some(ty) => ty, - }; - - match *ty.kind() { - TypeKind::TemplateAlias(t, _) | - TypeKind::Alias(t) | - TypeKind::ResolvedTypeRef(t) => { - if self.have_destructor.contains(&t.into()) { - self.insert(id) - } else { - ConstrainResult::Same - } - } - - TypeKind::Comp(ref info) => { - if info.has_own_destructor() { - return self.insert(id); - } - - match info.kind() { - CompKind::Union => ConstrainResult::Same, - CompKind::Struct => { - let base_or_field_destructor = - info.base_members().iter().any(|base| { - self.have_destructor.contains(&base.ty.into()) - }) || info.fields().iter().any( - |field| match *field { - Field::DataMember(ref data) => self - .have_destructor - .contains(&data.ty().into()), - Field::Bitfields(_) => false, - }, - ); - if base_or_field_destructor { - self.insert(id) - } else { - ConstrainResult::Same - } - } - } - } - - TypeKind::TemplateInstantiation(ref inst) => { - let definition_or_arg_destructor = self - .have_destructor - .contains(&inst.template_definition().into()) || - inst.template_arguments().iter().any(|arg| { - self.have_destructor.contains(&arg.into()) - }); - if definition_or_arg_destructor { - self.insert(id) - } else { - ConstrainResult::Same - } - } - - _ => ConstrainResult::Same, - } - } - - fn each_depending_on(&self, id: ItemId, mut f: F) - where - F: FnMut(ItemId), - { - if let Some(edges) = self.dependencies.get(&id) { - for item in edges { - trace!("enqueue {item:?} into worklist"); - f(*item); - } - } - } -} - -impl<'ctx> From> for HashSet { - fn from(analysis: HasDestructorAnalysis<'ctx>) -> Self { - analysis.have_destructor - } -} diff --git a/vendor/bindgen/ir/analysis/has_float.rs b/vendor/bindgen/ir/analysis/has_float.rs deleted file mode 100644 index e2463ccb96e262..00000000000000 --- a/vendor/bindgen/ir/analysis/has_float.rs +++ /dev/null @@ -1,248 +0,0 @@ -//! Determining which types has float. - -use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; -use crate::ir::comp::Field; -use crate::ir::comp::FieldMethods; -use crate::ir::context::{BindgenContext, ItemId}; -use crate::ir::traversal::EdgeKind; -use crate::ir::ty::TypeKind; -use crate::{HashMap, HashSet}; - -/// An analysis that finds for each IR item whether it has float or not. -/// -/// We use the monotone constraint function `has_float`, -/// defined as follows: -/// -/// * If T is float or complex float, T trivially has. -/// * If T is a type alias, a templated alias or an indirection to another type, -/// it has float if the type T refers to has. -/// * If T is a compound type, it has float if any of base memter or field -/// has. -/// * If T is an instantiation of an abstract template definition, T has -/// float if any of the template arguments or template definition -/// has. -#[derive(Debug, Clone)] -pub(crate) struct HasFloat<'ctx> { - ctx: &'ctx BindgenContext, - - // The incremental result of this analysis's computation. Everything in this - // set has float. - has_float: HashSet, - - // Dependencies saying that if a key ItemId has been inserted into the - // `has_float` set, then each of the ids in Vec need to be - // considered again. - // - // This is a subset of the natural IR graph with reversed edges, where we - // only include the edges from the IR graph that can affect whether a type - // has float or not. - dependencies: HashMap>, -} - -impl HasFloat<'_> { - fn consider_edge(kind: EdgeKind) -> bool { - match kind { - EdgeKind::BaseMember | - EdgeKind::Field | - EdgeKind::TypeReference | - EdgeKind::VarType | - EdgeKind::TemplateArgument | - EdgeKind::TemplateDeclaration | - EdgeKind::TemplateParameterDefinition => true, - - EdgeKind::Constructor | - EdgeKind::Destructor | - EdgeKind::FunctionReturn | - EdgeKind::FunctionParameter | - EdgeKind::InnerType | - EdgeKind::InnerVar | - EdgeKind::Method | - EdgeKind::Generic => false, - } - } - - fn insert>(&mut self, id: Id) -> ConstrainResult { - let id = id.into(); - trace!("inserting {id:?} into the has_float set"); - - let was_not_already_in_set = self.has_float.insert(id); - assert!( - was_not_already_in_set, - "We shouldn't try and insert {id:?} twice because if it was \ - already in the set, `constrain` should have exited early." - ); - - ConstrainResult::Changed - } -} - -impl<'ctx> MonotoneFramework for HasFloat<'ctx> { - type Node = ItemId; - type Extra = &'ctx BindgenContext; - type Output = HashSet; - - fn new(ctx: &'ctx BindgenContext) -> HasFloat<'ctx> { - let has_float = HashSet::default(); - let dependencies = generate_dependencies(ctx, Self::consider_edge); - - HasFloat { - ctx, - has_float, - dependencies, - } - } - - fn initial_worklist(&self) -> Vec { - self.ctx.allowlisted_items().iter().copied().collect() - } - - fn constrain(&mut self, id: ItemId) -> ConstrainResult { - trace!("constrain: {id:?}"); - - if self.has_float.contains(&id) { - trace!(" already know it do not have float"); - return ConstrainResult::Same; - } - - let item = self.ctx.resolve_item(id); - let Some(ty) = item.as_type() else { - trace!(" not a type; ignoring"); - return ConstrainResult::Same; - }; - - match *ty.kind() { - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(..) | - TypeKind::Function(..) | - TypeKind::Enum(..) | - TypeKind::Reference(..) | - TypeKind::TypeParam | - TypeKind::Opaque | - TypeKind::Pointer(..) | - TypeKind::UnresolvedTypeRef(..) | - TypeKind::ObjCInterface(..) | - TypeKind::ObjCId | - TypeKind::ObjCSel => { - trace!(" simple type that do not have float"); - ConstrainResult::Same - } - - TypeKind::Float(..) | TypeKind::Complex(..) => { - trace!(" float type has float"); - self.insert(id) - } - - TypeKind::Array(t, _) => { - if self.has_float.contains(&t.into()) { - trace!( - " Array with type T that has float also has float" - ); - return self.insert(id); - } - trace!(" Array with type T that do not have float also do not have float"); - ConstrainResult::Same - } - TypeKind::Vector(t, _) => { - if self.has_float.contains(&t.into()) { - trace!( - " Vector with type T that has float also has float" - ); - return self.insert(id); - } - trace!(" Vector with type T that do not have float also do not have float"); - ConstrainResult::Same - } - - TypeKind::ResolvedTypeRef(t) | - TypeKind::TemplateAlias(t, _) | - TypeKind::Alias(t) | - TypeKind::BlockPointer(t) => { - if self.has_float.contains(&t.into()) { - trace!( - " aliases and type refs to T which have float \ - also have float" - ); - self.insert(id) - } else { - trace!(" aliases and type refs to T which do not have float \ - also do not have floaarrayt"); - ConstrainResult::Same - } - } - - TypeKind::Comp(ref info) => { - let bases_have = info - .base_members() - .iter() - .any(|base| self.has_float.contains(&base.ty.into())); - if bases_have { - trace!(" bases have float, so we also have"); - return self.insert(id); - } - let fields_have = info.fields().iter().any(|f| match *f { - Field::DataMember(ref data) => { - self.has_float.contains(&data.ty().into()) - } - Field::Bitfields(ref bfu) => bfu - .bitfields() - .iter() - .any(|b| self.has_float.contains(&b.ty().into())), - }); - if fields_have { - trace!(" fields have float, so we also have"); - return self.insert(id); - } - - trace!(" comp doesn't have float"); - ConstrainResult::Same - } - - TypeKind::TemplateInstantiation(ref template) => { - let args_have = template - .template_arguments() - .iter() - .any(|arg| self.has_float.contains(&arg.into())); - if args_have { - trace!( - " template args have float, so \ - instantiation also has float" - ); - return self.insert(id); - } - - let def_has = self - .has_float - .contains(&template.template_definition().into()); - if def_has { - trace!( - " template definition has float, so \ - instantiation also has" - ); - return self.insert(id); - } - - trace!(" template instantiation do not have float"); - ConstrainResult::Same - } - } - } - - fn each_depending_on(&self, id: ItemId, mut f: F) - where - F: FnMut(ItemId), - { - if let Some(edges) = self.dependencies.get(&id) { - for item in edges { - trace!("enqueue {item:?} into worklist"); - f(*item); - } - } - } -} - -impl<'ctx> From> for HashSet { - fn from(analysis: HasFloat<'ctx>) -> Self { - analysis.has_float - } -} diff --git a/vendor/bindgen/ir/analysis/has_type_param_in_array.rs b/vendor/bindgen/ir/analysis/has_type_param_in_array.rs deleted file mode 100644 index 687f81560c7783..00000000000000 --- a/vendor/bindgen/ir/analysis/has_type_param_in_array.rs +++ /dev/null @@ -1,242 +0,0 @@ -//! Determining which types has typed parameters in array. - -use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; -use crate::ir::comp::Field; -use crate::ir::comp::FieldMethods; -use crate::ir::context::{BindgenContext, ItemId}; -use crate::ir::traversal::EdgeKind; -use crate::ir::ty::TypeKind; -use crate::{HashMap, HashSet}; - -/// An analysis that finds for each IR item whether it has array or not. -/// -/// We use the monotone constraint function `has_type_parameter_in_array`, -/// defined as follows: -/// -/// * If T is Array type with type parameter, T trivially has. -/// * If T is a type alias, a templated alias or an indirection to another type, -/// it has type parameter in array if the type T refers to has. -/// * If T is a compound type, it has array if any of base memter or field -/// has type parameter in array. -/// * If T is an instantiation of an abstract template definition, T has -/// type parameter in array if any of the template arguments or template definition -/// has. -#[derive(Debug, Clone)] -pub(crate) struct HasTypeParameterInArray<'ctx> { - ctx: &'ctx BindgenContext, - - // The incremental result of this analysis's computation. Everything in this - // set has array. - has_type_parameter_in_array: HashSet, - - // Dependencies saying that if a key ItemId has been inserted into the - // `has_type_parameter_in_array` set, then each of the ids in Vec need to be - // considered again. - // - // This is a subset of the natural IR graph with reversed edges, where we - // only include the edges from the IR graph that can affect whether a type - // has array or not. - dependencies: HashMap>, -} - -impl HasTypeParameterInArray<'_> { - fn consider_edge(kind: EdgeKind) -> bool { - match kind { - // These are the only edges that can affect whether a type has type parameter - // in array or not. - EdgeKind::BaseMember | - EdgeKind::Field | - EdgeKind::TypeReference | - EdgeKind::VarType | - EdgeKind::TemplateArgument | - EdgeKind::TemplateDeclaration | - EdgeKind::TemplateParameterDefinition => true, - - EdgeKind::Constructor | - EdgeKind::Destructor | - EdgeKind::FunctionReturn | - EdgeKind::FunctionParameter | - EdgeKind::InnerType | - EdgeKind::InnerVar | - EdgeKind::Method | - EdgeKind::Generic => false, - } - } - - fn insert>(&mut self, id: Id) -> ConstrainResult { - let id = id.into(); - trace!("inserting {id:?} into the has_type_parameter_in_array set"); - - let was_not_already_in_set = - self.has_type_parameter_in_array.insert(id); - assert!( - was_not_already_in_set, - "We shouldn't try and insert {id:?} twice because if it was \ - already in the set, `constrain` should have exited early." - ); - - ConstrainResult::Changed - } -} - -impl<'ctx> MonotoneFramework for HasTypeParameterInArray<'ctx> { - type Node = ItemId; - type Extra = &'ctx BindgenContext; - type Output = HashSet; - - fn new(ctx: &'ctx BindgenContext) -> HasTypeParameterInArray<'ctx> { - let has_type_parameter_in_array = HashSet::default(); - let dependencies = generate_dependencies(ctx, Self::consider_edge); - - HasTypeParameterInArray { - ctx, - has_type_parameter_in_array, - dependencies, - } - } - - fn initial_worklist(&self) -> Vec { - self.ctx.allowlisted_items().iter().copied().collect() - } - - fn constrain(&mut self, id: ItemId) -> ConstrainResult { - trace!("constrain: {id:?}"); - - if self.has_type_parameter_in_array.contains(&id) { - trace!(" already know it do not have array"); - return ConstrainResult::Same; - } - - let item = self.ctx.resolve_item(id); - let Some(ty) = item.as_type() else { - trace!(" not a type; ignoring"); - return ConstrainResult::Same; - }; - - match *ty.kind() { - // Handle the simple cases. These cannot have array in type parameter - // without further information. - TypeKind::Void | - TypeKind::NullPtr | - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::Vector(..) | - TypeKind::Complex(..) | - TypeKind::Function(..) | - TypeKind::Enum(..) | - TypeKind::Reference(..) | - TypeKind::TypeParam | - TypeKind::Opaque | - TypeKind::Pointer(..) | - TypeKind::UnresolvedTypeRef(..) | - TypeKind::ObjCInterface(..) | - TypeKind::ObjCId | - TypeKind::ObjCSel => { - trace!(" simple type that do not have array"); - ConstrainResult::Same - } - - TypeKind::Array(t, _) => { - let inner_ty = - self.ctx.resolve_type(t).canonical_type(self.ctx); - if let TypeKind::TypeParam = *inner_ty.kind() { - trace!(" Array with Named type has type parameter"); - self.insert(id) - } else { - trace!( - " Array without Named type does have type parameter" - ); - ConstrainResult::Same - } - } - - TypeKind::ResolvedTypeRef(t) | - TypeKind::TemplateAlias(t, _) | - TypeKind::Alias(t) | - TypeKind::BlockPointer(t) => { - if self.has_type_parameter_in_array.contains(&t.into()) { - trace!( - " aliases and type refs to T which have array \ - also have array" - ); - self.insert(id) - } else { - trace!( - " aliases and type refs to T which do not have array \ - also do not have array" - ); - ConstrainResult::Same - } - } - - TypeKind::Comp(ref info) => { - let bases_have = info.base_members().iter().any(|base| { - self.has_type_parameter_in_array.contains(&base.ty.into()) - }); - if bases_have { - trace!(" bases have array, so we also have"); - return self.insert(id); - } - let fields_have = info.fields().iter().any(|f| match *f { - Field::DataMember(ref data) => self - .has_type_parameter_in_array - .contains(&data.ty().into()), - Field::Bitfields(..) => false, - }); - if fields_have { - trace!(" fields have array, so we also have"); - return self.insert(id); - } - - trace!(" comp doesn't have array"); - ConstrainResult::Same - } - - TypeKind::TemplateInstantiation(ref template) => { - let args_have = - template.template_arguments().iter().any(|arg| { - self.has_type_parameter_in_array.contains(&arg.into()) - }); - if args_have { - trace!( - " template args have array, so \ - instantiation also has array" - ); - return self.insert(id); - } - - let def_has = self - .has_type_parameter_in_array - .contains(&template.template_definition().into()); - if def_has { - trace!( - " template definition has array, so \ - instantiation also has" - ); - return self.insert(id); - } - - trace!(" template instantiation do not have array"); - ConstrainResult::Same - } - } - } - - fn each_depending_on(&self, id: ItemId, mut f: F) - where - F: FnMut(ItemId), - { - if let Some(edges) = self.dependencies.get(&id) { - for item in edges { - trace!("enqueue {item:?} into worklist"); - f(*item); - } - } - } -} - -impl<'ctx> From> for HashSet { - fn from(analysis: HasTypeParameterInArray<'ctx>) -> Self { - analysis.has_type_parameter_in_array - } -} diff --git a/vendor/bindgen/ir/analysis/has_vtable.rs b/vendor/bindgen/ir/analysis/has_vtable.rs deleted file mode 100644 index 3ff64a6d2b1a49..00000000000000 --- a/vendor/bindgen/ir/analysis/has_vtable.rs +++ /dev/null @@ -1,235 +0,0 @@ -//! Determining which types has vtable - -use super::{generate_dependencies, ConstrainResult, MonotoneFramework}; -use crate::ir::context::{BindgenContext, ItemId}; -use crate::ir::traversal::EdgeKind; -use crate::ir::ty::TypeKind; -use crate::{Entry, HashMap}; -use std::cmp; -use std::ops; - -/// The result of the `HasVtableAnalysis` for an individual item. -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Default)] -pub(crate) enum HasVtableResult { - /// The item does not have a vtable pointer. - #[default] - No, - - /// The item has a vtable and the actual vtable pointer is within this item. - SelfHasVtable, - - /// The item has a vtable, but the actual vtable pointer is in a base - /// member. - BaseHasVtable, -} - -impl HasVtableResult { - /// Take the least upper bound of `self` and `rhs`. - pub(crate) fn join(self, rhs: Self) -> Self { - cmp::max(self, rhs) - } -} - -impl ops::BitOr for HasVtableResult { - type Output = Self; - - fn bitor(self, rhs: HasVtableResult) -> Self::Output { - self.join(rhs) - } -} - -impl ops::BitOrAssign for HasVtableResult { - fn bitor_assign(&mut self, rhs: HasVtableResult) { - *self = self.join(rhs); - } -} - -/// An analysis that finds for each IR item whether it has vtable or not -/// -/// We use the monotone function `has vtable`, defined as follows: -/// -/// * If T is a type alias, a templated alias, an indirection to another type, -/// or a reference of a type, T has vtable if the type T refers to has vtable. -/// * If T is a compound type, T has vtable if we saw a virtual function when -/// parsing it or any of its base member has vtable. -/// * If T is an instantiation of an abstract template definition, T has -/// vtable if template definition has vtable -#[derive(Debug, Clone)] -pub(crate) struct HasVtableAnalysis<'ctx> { - ctx: &'ctx BindgenContext, - - // The incremental result of this analysis's computation. Everything in this - // set definitely has a vtable. - have_vtable: HashMap, - - // Dependencies saying that if a key ItemId has been inserted into the - // `have_vtable` set, then each of the ids in Vec need to be - // considered again. - // - // This is a subset of the natural IR graph with reversed edges, where we - // only include the edges from the IR graph that can affect whether a type - // has a vtable or not. - dependencies: HashMap>, -} - -impl HasVtableAnalysis<'_> { - fn consider_edge(kind: EdgeKind) -> bool { - // These are the only edges that can affect whether a type has a - // vtable or not. - matches!( - kind, - EdgeKind::TypeReference | - EdgeKind::BaseMember | - EdgeKind::TemplateDeclaration - ) - } - - fn insert>( - &mut self, - id: Id, - result: HasVtableResult, - ) -> ConstrainResult { - if let HasVtableResult::No = result { - return ConstrainResult::Same; - } - - let id = id.into(); - match self.have_vtable.entry(id) { - Entry::Occupied(mut entry) => { - if *entry.get() < result { - entry.insert(result); - ConstrainResult::Changed - } else { - ConstrainResult::Same - } - } - Entry::Vacant(entry) => { - entry.insert(result); - ConstrainResult::Changed - } - } - } - - fn forward(&mut self, from: Id1, to: Id2) -> ConstrainResult - where - Id1: Into, - Id2: Into, - { - let from = from.into(); - let to = to.into(); - - match self.have_vtable.get(&from) { - None => ConstrainResult::Same, - Some(r) => self.insert(to, *r), - } - } -} - -impl<'ctx> MonotoneFramework for HasVtableAnalysis<'ctx> { - type Node = ItemId; - type Extra = &'ctx BindgenContext; - type Output = HashMap; - - fn new(ctx: &'ctx BindgenContext) -> HasVtableAnalysis<'ctx> { - let have_vtable = HashMap::default(); - let dependencies = generate_dependencies(ctx, Self::consider_edge); - - HasVtableAnalysis { - ctx, - have_vtable, - dependencies, - } - } - - fn initial_worklist(&self) -> Vec { - self.ctx.allowlisted_items().iter().copied().collect() - } - - fn constrain(&mut self, id: ItemId) -> ConstrainResult { - trace!("constrain {id:?}"); - - let item = self.ctx.resolve_item(id); - let ty = match item.as_type() { - None => return ConstrainResult::Same, - Some(ty) => ty, - }; - - // TODO #851: figure out a way to handle deriving from template type parameters. - match *ty.kind() { - TypeKind::TemplateAlias(t, _) | - TypeKind::Alias(t) | - TypeKind::ResolvedTypeRef(t) | - TypeKind::Reference(t) => { - trace!( - " aliases and references forward to their inner type" - ); - self.forward(t, id) - } - - TypeKind::Comp(ref info) => { - trace!(" comp considers its own methods and bases"); - let mut result = HasVtableResult::No; - - if info.has_own_virtual_method() { - trace!(" comp has its own virtual method"); - result |= HasVtableResult::SelfHasVtable; - } - - let bases_has_vtable = info.base_members().iter().any(|base| { - trace!(" comp has a base with a vtable: {base:?}"); - self.have_vtable.contains_key(&base.ty.into()) - }); - if bases_has_vtable { - result |= HasVtableResult::BaseHasVtable; - } - - self.insert(id, result) - } - - TypeKind::TemplateInstantiation(ref inst) => { - self.forward(inst.template_definition(), id) - } - - _ => ConstrainResult::Same, - } - } - - fn each_depending_on(&self, id: ItemId, mut f: F) - where - F: FnMut(ItemId), - { - if let Some(edges) = self.dependencies.get(&id) { - for item in edges { - trace!("enqueue {item:?} into worklist"); - f(*item); - } - } - } -} - -impl<'ctx> From> for HashMap { - fn from(analysis: HasVtableAnalysis<'ctx>) -> Self { - // We let the lack of an entry mean "No" to save space. - extra_assert!(analysis - .have_vtable - .values() - .all(|v| { *v != HasVtableResult::No })); - - analysis.have_vtable - } -} - -/// A convenience trait for the things for which we might wonder if they have a -/// vtable during codegen. -/// -/// This is not for _computing_ whether the thing has a vtable, it is for -/// looking up the results of the `HasVtableAnalysis`'s computations for a -/// specific thing. -pub(crate) trait HasVtable { - /// Return `true` if this thing has vtable, `false` otherwise. - fn has_vtable(&self, ctx: &BindgenContext) -> bool; - - /// Return `true` if this thing has an actual vtable pointer in itself, as - /// opposed to transitively in a base member. - fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool; -} diff --git a/vendor/bindgen/ir/analysis/mod.rs b/vendor/bindgen/ir/analysis/mod.rs deleted file mode 100644 index 74a305edfb5ac5..00000000000000 --- a/vendor/bindgen/ir/analysis/mod.rs +++ /dev/null @@ -1,395 +0,0 @@ -//! Fix-point analyses on the IR using the "monotone framework". -//! -//! A lattice is a set with a partial ordering between elements, where there is -//! a single least upper bound and a single greatest least bound for every -//! subset. We are dealing with finite lattices, which means that it has a -//! finite number of elements, and it follows that there exists a single top and -//! a single bottom member of the lattice. For example, the power set of a -//! finite set forms a finite lattice where partial ordering is defined by set -//! inclusion, that is `a <= b` if `a` is a subset of `b`. Here is the finite -//! lattice constructed from the set {0,1,2}: -//! -//! ```text -//! .----- Top = {0,1,2} -----. -//! / | \ -//! / | \ -//! / | \ -//! {0,1} -------. {0,2} .--------- {1,2} -//! | \ / \ / | -//! | / \ | -//! | / \ / \ | -//! {0} --------' {1} `---------- {2} -//! \ | / -//! \ | / -//! \ | / -//! `------ Bottom = {} ------' -//! ``` -//! -//! A monotone function `f` is a function where if `x <= y`, then it holds that -//! `f(x) <= f(y)`. It should be clear that running a monotone function to a -//! fix-point on a finite lattice will always terminate: `f` can only "move" -//! along the lattice in a single direction, and therefore can only either find -//! a fix-point in the middle of the lattice or continue to the top or bottom -//! depending if it is ascending or descending the lattice respectively. -//! -//! For a deeper introduction to the general form of this kind of analysis, see -//! [Static Program Analysis by Anders Møller and Michael I. Schwartzbach][spa]. -//! -//! [spa]: https://cs.au.dk/~amoeller/spa/spa.pdf - -// Re-export individual analyses. -mod template_params; -pub(crate) use self::template_params::UsedTemplateParameters; -mod derive; -pub use self::derive::DeriveTrait; -pub(crate) use self::derive::{as_cannot_derive_set, CannotDerive}; -mod has_vtable; -pub(crate) use self::has_vtable::{ - HasVtable, HasVtableAnalysis, HasVtableResult, -}; -mod has_destructor; -pub(crate) use self::has_destructor::HasDestructorAnalysis; -mod has_type_param_in_array; -pub(crate) use self::has_type_param_in_array::HasTypeParameterInArray; -mod has_float; -pub(crate) use self::has_float::HasFloat; -mod sizedness; -pub(crate) use self::sizedness::{ - Sizedness, SizednessAnalysis, SizednessResult, -}; - -use crate::ir::context::{BindgenContext, ItemId}; - -use crate::ir::traversal::{EdgeKind, Trace}; -use crate::HashMap; -use std::fmt; -use std::ops; - -/// An analysis in the monotone framework. -/// -/// Implementors of this trait must maintain the following two invariants: -/// -/// 1. The concrete data must be a member of a finite-height lattice. -/// 2. The concrete `constrain` method must be monotone: that is, -/// if `x <= y`, then `constrain(x) <= constrain(y)`. -/// -/// If these invariants do not hold, iteration to a fix-point might never -/// complete. -/// -/// For a simple example analysis, see the `ReachableFrom` type in the `tests` -/// module below. -pub(crate) trait MonotoneFramework: Sized + fmt::Debug { - /// The type of node in our dependency graph. - /// - /// This is just generic (and not `ItemId`) so that we can easily unit test - /// without constructing real `Item`s and their `ItemId`s. - type Node: Copy; - - /// Any extra data that is needed during computation. - /// - /// Again, this is just generic (and not `&BindgenContext`) so that we can - /// easily unit test without constructing real `BindgenContext`s full of - /// real `Item`s and real `ItemId`s. - type Extra: Sized; - - /// The final output of this analysis. Once we have reached a fix-point, we - /// convert `self` into this type, and return it as the final result of the - /// analysis. - type Output: From + fmt::Debug; - - /// Construct a new instance of this analysis. - fn new(extra: Self::Extra) -> Self; - - /// Get the initial set of nodes from which to start the analysis. Unless - /// you are sure of some domain-specific knowledge, this should be the - /// complete set of nodes. - fn initial_worklist(&self) -> Vec; - - /// Update the analysis for the given node. - /// - /// If this results in changing our internal state (ie, we discovered that - /// we have not reached a fix-point and iteration should continue), return - /// `ConstrainResult::Changed`. Otherwise, return `ConstrainResult::Same`. - /// When `constrain` returns `ConstrainResult::Same` for all nodes in the - /// set, we have reached a fix-point and the analysis is complete. - fn constrain(&mut self, node: Self::Node) -> ConstrainResult; - - /// For each node `d` that depends on the given `node`'s current answer when - /// running `constrain(d)`, call `f(d)`. This informs us which new nodes to - /// queue up in the worklist when `constrain(node)` reports updated - /// information. - fn each_depending_on(&self, node: Self::Node, f: F) - where - F: FnMut(Self::Node); -} - -/// Whether an analysis's `constrain` function modified the incremental results -/// or not. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] -pub(crate) enum ConstrainResult { - /// The incremental results were updated, and the fix-point computation - /// should continue. - Changed, - - /// The incremental results were not updated. - #[default] - Same, -} - -impl ops::BitOr for ConstrainResult { - type Output = Self; - - fn bitor(self, rhs: ConstrainResult) -> Self::Output { - if self == ConstrainResult::Changed || rhs == ConstrainResult::Changed { - ConstrainResult::Changed - } else { - ConstrainResult::Same - } - } -} - -impl ops::BitOrAssign for ConstrainResult { - fn bitor_assign(&mut self, rhs: ConstrainResult) { - *self = *self | rhs; - } -} - -/// Run an analysis in the monotone framework. -pub(crate) fn analyze(extra: Analysis::Extra) -> Analysis::Output -where - Analysis: MonotoneFramework, -{ - let mut analysis = Analysis::new(extra); - let mut worklist = analysis.initial_worklist(); - - while let Some(node) = worklist.pop() { - if let ConstrainResult::Changed = analysis.constrain(node) { - analysis.each_depending_on(node, |needs_work| { - worklist.push(needs_work); - }); - } - } - - analysis.into() -} - -/// Generate the dependency map for analysis -pub(crate) fn generate_dependencies( - ctx: &BindgenContext, - consider_edge: F, -) -> HashMap> -where - F: Fn(EdgeKind) -> bool, -{ - let mut dependencies = HashMap::default(); - - for &item in ctx.allowlisted_items() { - dependencies.entry(item).or_insert_with(Vec::new); - - { - // We reverse our natural IR graph edges to find dependencies - // between nodes. - item.trace( - ctx, - &mut |sub_item: ItemId, edge_kind| { - if ctx.allowlisted_items().contains(&sub_item) && - consider_edge(edge_kind) - { - dependencies - .entry(sub_item) - .or_insert_with(Vec::new) - .push(item); - } - }, - &(), - ); - } - } - dependencies -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::HashSet; - - // Here we find the set of nodes that are reachable from any given - // node. This is a lattice mapping nodes to subsets of all nodes. Our join - // function is set union. - // - // This is our test graph: - // - // +---+ +---+ - // | | | | - // | 1 | .----| 2 | - // | | | | | - // +---+ | +---+ - // | | ^ - // | | | - // | +---+ '------' - // '----->| | - // | 3 | - // .------| |------. - // | +---+ | - // | ^ | - // v | v - // +---+ | +---+ +---+ - // | | | | | | | - // | 4 | | | 5 |--->| 6 | - // | | | | | | | - // +---+ | +---+ +---+ - // | | | | - // | | | v - // | +---+ | +---+ - // | | | | | | - // '----->| 7 |<-----' | 8 | - // | | | | - // +---+ +---+ - // - // And here is the mapping from a node to the set of nodes that are - // reachable from it within the test graph: - // - // 1: {3,4,5,6,7,8} - // 2: {2} - // 3: {3,4,5,6,7,8} - // 4: {3,4,5,6,7,8} - // 5: {3,4,5,6,7,8} - // 6: {8} - // 7: {3,4,5,6,7,8} - // 8: {} - - #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] - struct Node(usize); - - #[derive(Clone, Debug, Default, PartialEq, Eq)] - struct Graph(HashMap>); - - impl Graph { - fn make_test_graph() -> Graph { - let mut g = Graph::default(); - g.0.insert(Node(1), vec![Node(3)]); - g.0.insert(Node(2), vec![Node(2)]); - g.0.insert(Node(3), vec![Node(4), Node(5)]); - g.0.insert(Node(4), vec![Node(7)]); - g.0.insert(Node(5), vec![Node(6), Node(7)]); - g.0.insert(Node(6), vec![Node(8)]); - g.0.insert(Node(7), vec![Node(3)]); - g.0.insert(Node(8), vec![]); - g - } - - fn reverse(&self) -> Graph { - let mut reversed = Graph::default(); - for (node, edges) in &self.0 { - reversed.0.entry(*node).or_insert_with(Vec::new); - for referent in edges { - reversed - .0 - .entry(*referent) - .or_insert_with(Vec::new) - .push(*node); - } - } - reversed - } - } - - #[derive(Clone, Debug, PartialEq, Eq)] - struct ReachableFrom<'a> { - reachable: HashMap>, - graph: &'a Graph, - reversed: Graph, - } - - impl<'a> MonotoneFramework for ReachableFrom<'a> { - type Node = Node; - type Extra = &'a Graph; - type Output = HashMap>; - - fn new(graph: &'a Graph) -> Self { - let reversed = graph.reverse(); - ReachableFrom { - reachable: Default::default(), - graph, - reversed, - } - } - - fn initial_worklist(&self) -> Vec { - self.graph.0.keys().copied().collect() - } - - fn constrain(&mut self, node: Node) -> ConstrainResult { - // The set of nodes reachable from a node `x` is - // - // reachable(x) = s_0 U s_1 U ... U reachable(s_0) U reachable(s_1) U ... - // - // where there exist edges from `x` to each of `s_0, s_1, ...`. - // - // Yes, what follows is a **terribly** inefficient set union - // implementation. Don't copy this code outside of this test! - - let original_size = self.reachable.entry(node).or_default().len(); - - for sub_node in &self.graph.0[&node] { - self.reachable.get_mut(&node).unwrap().insert(*sub_node); - - let sub_reachable = - self.reachable.entry(*sub_node).or_default().clone(); - - for transitive in sub_reachable { - self.reachable.get_mut(&node).unwrap().insert(transitive); - } - } - - let new_size = self.reachable[&node].len(); - if original_size == new_size { - ConstrainResult::Same - } else { - ConstrainResult::Changed - } - } - - fn each_depending_on(&self, node: Node, mut f: F) - where - F: FnMut(Node), - { - for dep in &self.reversed.0[&node] { - f(*dep); - } - } - } - - impl<'a> From> for HashMap> { - fn from(reachable: ReachableFrom<'a>) -> Self { - reachable.reachable - } - } - - #[test] - fn monotone() { - let g = Graph::make_test_graph(); - let reachable = analyze::(&g); - println!("reachable = {reachable:#?}"); - - fn nodes(nodes: A) -> HashSet - where - A: AsRef<[usize]>, - { - nodes.as_ref().iter().copied().map(Node).collect() - } - - let mut expected = HashMap::default(); - expected.insert(Node(1), nodes([3, 4, 5, 6, 7, 8])); - expected.insert(Node(2), nodes([2])); - expected.insert(Node(3), nodes([3, 4, 5, 6, 7, 8])); - expected.insert(Node(4), nodes([3, 4, 5, 6, 7, 8])); - expected.insert(Node(5), nodes([3, 4, 5, 6, 7, 8])); - expected.insert(Node(6), nodes([8])); - expected.insert(Node(7), nodes([3, 4, 5, 6, 7, 8])); - expected.insert(Node(8), nodes([])); - println!("expected = {expected:#?}"); - - assert_eq!(reachable, expected); - } -} diff --git a/vendor/bindgen/ir/analysis/sizedness.rs b/vendor/bindgen/ir/analysis/sizedness.rs deleted file mode 100644 index ce3c2c3da15a47..00000000000000 --- a/vendor/bindgen/ir/analysis/sizedness.rs +++ /dev/null @@ -1,353 +0,0 @@ -//! Determining the sizedness of types (as base classes and otherwise). - -use super::{ - generate_dependencies, ConstrainResult, HasVtable, MonotoneFramework, -}; -use crate::ir::context::{BindgenContext, TypeId}; -use crate::ir::item::IsOpaque; -use crate::ir::traversal::EdgeKind; -use crate::ir::ty::TypeKind; -use crate::{Entry, HashMap}; -use std::{cmp, ops}; - -/// The result of the `Sizedness` analysis for an individual item. -/// -/// This is a chain lattice of the form: -/// -/// ```ignore -/// NonZeroSized -/// | -/// DependsOnTypeParam -/// | -/// ZeroSized -/// ``` -/// -/// We initially assume that all types are `ZeroSized` and then update our -/// understanding as we learn more about each type. -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Default)] -pub(crate) enum SizednessResult { - /// The type is zero-sized. - /// - /// This means that if it is a C++ type, and is not being used as a base - /// member, then we must add an `_address` byte to enforce the - /// unique-address-per-distinct-object-instance rule. - #[default] - ZeroSized, - - /// Whether this type is zero-sized or not depends on whether a type - /// parameter is zero-sized or not. - /// - /// For example, given these definitions: - /// - /// ```c++ - /// template - /// class Flongo : public T {}; - /// - /// class Empty {}; - /// - /// class NonEmpty { int x; }; - /// ``` - /// - /// Then `Flongo` is zero-sized, and needs an `_address` byte - /// inserted, while `Flongo` is *not* zero-sized, and should *not* - /// have an `_address` byte inserted. - /// - /// We don't properly handle this situation correctly right now: - /// - DependsOnTypeParam, - - /// Has some size that is known to be greater than zero. That doesn't mean - /// it has a static size, but it is not zero sized for sure. In other words, - /// it might contain an incomplete array or some other dynamically sized - /// type. - NonZeroSized, -} - -impl SizednessResult { - /// Take the least upper bound of `self` and `rhs`. - pub(crate) fn join(self, rhs: Self) -> Self { - cmp::max(self, rhs) - } -} - -impl ops::BitOr for SizednessResult { - type Output = Self; - - fn bitor(self, rhs: SizednessResult) -> Self::Output { - self.join(rhs) - } -} - -impl ops::BitOrAssign for SizednessResult { - fn bitor_assign(&mut self, rhs: SizednessResult) { - *self = self.join(rhs); - } -} - -/// An analysis that computes the sizedness of all types. -/// -/// * For types with known sizes -- for example pointers, scalars, etc... -- -/// they are assigned `NonZeroSized`. -/// -/// * For compound structure types with one or more fields, they are assigned -/// `NonZeroSized`. -/// -/// * For compound structure types without any fields, the results of the bases -/// are `join`ed. -/// -/// * For type parameters, `DependsOnTypeParam` is assigned. -#[derive(Debug)] -pub(crate) struct SizednessAnalysis<'ctx> { - ctx: &'ctx BindgenContext, - dependencies: HashMap>, - // Incremental results of the analysis. Missing entries are implicitly - // considered `ZeroSized`. - sized: HashMap, -} - -impl SizednessAnalysis<'_> { - fn consider_edge(kind: EdgeKind) -> bool { - // These are the only edges that can affect whether a type is - // zero-sized or not. - matches!( - kind, - EdgeKind::TemplateArgument | - EdgeKind::TemplateParameterDefinition | - EdgeKind::TemplateDeclaration | - EdgeKind::TypeReference | - EdgeKind::BaseMember | - EdgeKind::Field - ) - } - - /// Insert an incremental result, and return whether this updated our - /// knowledge of types and we should continue the analysis. - fn insert( - &mut self, - id: TypeId, - result: SizednessResult, - ) -> ConstrainResult { - trace!("inserting {result:?} for {id:?}"); - - if let SizednessResult::ZeroSized = result { - return ConstrainResult::Same; - } - - match self.sized.entry(id) { - Entry::Occupied(mut entry) => { - if *entry.get() < result { - entry.insert(result); - ConstrainResult::Changed - } else { - ConstrainResult::Same - } - } - Entry::Vacant(entry) => { - entry.insert(result); - ConstrainResult::Changed - } - } - } - - fn forward(&mut self, from: TypeId, to: TypeId) -> ConstrainResult { - match self.sized.get(&from) { - None => ConstrainResult::Same, - Some(r) => self.insert(to, *r), - } - } -} - -impl<'ctx> MonotoneFramework for SizednessAnalysis<'ctx> { - type Node = TypeId; - type Extra = &'ctx BindgenContext; - type Output = HashMap; - - fn new(ctx: &'ctx BindgenContext) -> SizednessAnalysis<'ctx> { - let dependencies = generate_dependencies(ctx, Self::consider_edge) - .into_iter() - .filter_map(|(id, sub_ids)| { - id.as_type_id(ctx).map(|id| { - ( - id, - sub_ids - .into_iter() - .filter_map(|s| s.as_type_id(ctx)) - .collect::>(), - ) - }) - }) - .collect(); - - let sized = HashMap::default(); - - SizednessAnalysis { - ctx, - dependencies, - sized, - } - } - - fn initial_worklist(&self) -> Vec { - self.ctx - .allowlisted_items() - .iter() - .filter_map(|id| id.as_type_id(self.ctx)) - .collect() - } - - fn constrain(&mut self, id: TypeId) -> ConstrainResult { - trace!("constrain {id:?}"); - - if let Some(SizednessResult::NonZeroSized) = self.sized.get(&id) { - trace!(" already know it is not zero-sized"); - return ConstrainResult::Same; - } - - if id.has_vtable_ptr(self.ctx) { - trace!(" has an explicit vtable pointer, therefore is not zero-sized"); - return self.insert(id, SizednessResult::NonZeroSized); - } - - let ty = self.ctx.resolve_type(id); - - if id.is_opaque(self.ctx, &()) { - trace!(" type is opaque; checking layout..."); - let result = - ty.layout(self.ctx).map_or(SizednessResult::ZeroSized, |l| { - if l.size == 0 { - trace!(" ...layout has size == 0"); - SizednessResult::ZeroSized - } else { - trace!(" ...layout has size > 0"); - SizednessResult::NonZeroSized - } - }); - return self.insert(id, result); - } - - match *ty.kind() { - TypeKind::Void => { - trace!(" void is zero-sized"); - self.insert(id, SizednessResult::ZeroSized) - } - - TypeKind::TypeParam => { - trace!( - " type params sizedness depends on what they're \ - instantiated as" - ); - self.insert(id, SizednessResult::DependsOnTypeParam) - } - - TypeKind::Int(..) | - TypeKind::Float(..) | - TypeKind::Complex(..) | - TypeKind::Function(..) | - TypeKind::Enum(..) | - TypeKind::Reference(..) | - TypeKind::NullPtr | - TypeKind::ObjCId | - TypeKind::ObjCSel | - TypeKind::Pointer(..) => { - trace!(" {:?} is known not to be zero-sized", ty.kind()); - self.insert(id, SizednessResult::NonZeroSized) - } - - TypeKind::ObjCInterface(..) => { - trace!(" obj-c interfaces always have at least the `isa` pointer"); - self.insert(id, SizednessResult::NonZeroSized) - } - - TypeKind::TemplateAlias(t, _) | - TypeKind::Alias(t) | - TypeKind::BlockPointer(t) | - TypeKind::ResolvedTypeRef(t) => { - trace!(" aliases and type refs forward to their inner type"); - self.forward(t, id) - } - - TypeKind::TemplateInstantiation(ref inst) => { - trace!( - " template instantiations are zero-sized if their \ - definition is zero-sized" - ); - self.forward(inst.template_definition(), id) - } - - TypeKind::Array(_, 0) => { - trace!(" arrays of zero elements are zero-sized"); - self.insert(id, SizednessResult::ZeroSized) - } - TypeKind::Array(..) => { - trace!(" arrays of > 0 elements are not zero-sized"); - self.insert(id, SizednessResult::NonZeroSized) - } - TypeKind::Vector(..) => { - trace!(" vectors are not zero-sized"); - self.insert(id, SizednessResult::NonZeroSized) - } - - TypeKind::Comp(ref info) => { - trace!(" comp considers its own fields and bases"); - - if !info.fields().is_empty() { - return self.insert(id, SizednessResult::NonZeroSized); - } - - let result = info - .base_members() - .iter() - .filter_map(|base| self.sized.get(&base.ty)) - .fold(SizednessResult::ZeroSized, |a, b| a.join(*b)); - - self.insert(id, result) - } - - TypeKind::Opaque => { - unreachable!("covered by the .is_opaque() check above") - } - - TypeKind::UnresolvedTypeRef(..) => { - unreachable!("Should have been resolved after parsing!"); - } - } - } - - fn each_depending_on(&self, id: TypeId, mut f: F) - where - F: FnMut(TypeId), - { - if let Some(edges) = self.dependencies.get(&id) { - for ty in edges { - trace!("enqueue {ty:?} into worklist"); - f(*ty); - } - } - } -} - -impl<'ctx> From> for HashMap { - fn from(analysis: SizednessAnalysis<'ctx>) -> Self { - // We let the lack of an entry mean "ZeroSized" to save space. - extra_assert!(analysis - .sized - .values() - .all(|v| { *v != SizednessResult::ZeroSized })); - - analysis.sized - } -} - -/// A convenience trait for querying whether some type or ID is sized. -/// -/// This is not for _computing_ whether the thing is sized, it is for looking up -/// the results of the `Sizedness` analysis's computations for a specific thing. -pub(crate) trait Sizedness { - /// Get the sizedness of this type. - fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult; - - /// Is the sizedness for this type `SizednessResult::ZeroSized`? - fn is_zero_sized(&self, ctx: &BindgenContext) -> bool { - self.sizedness(ctx) == SizednessResult::ZeroSized - } -} diff --git a/vendor/bindgen/ir/analysis/template_params.rs b/vendor/bindgen/ir/analysis/template_params.rs deleted file mode 100644 index df8f861cfe88a4..00000000000000 --- a/vendor/bindgen/ir/analysis/template_params.rs +++ /dev/null @@ -1,601 +0,0 @@ -//! Discover which template type parameters are actually used. -//! -//! ### Why do we care? -//! -//! C++ allows ignoring template parameters, while Rust does not. Usually we can -//! blindly stick a `PhantomData` inside a generic Rust struct to make up for -//! this. That doesn't work for templated type aliases, however: -//! -//! ```C++ -//! template -//! using Fml = int; -//! ``` -//! -//! If we generate the naive Rust code for this alias, we get: -//! -//! ```ignore -//! pub(crate) type Fml = ::std::os::raw::int; -//! ``` -//! -//! And this is rejected by `rustc` due to the unused type parameter. -//! -//! (Aside: in these simple cases, `libclang` will often just give us the -//! aliased type directly, and we will never even know we were dealing with -//! aliases, let alone templated aliases. It's the more convoluted scenarios -//! where we get to have some fun...) -//! -//! For such problematic template aliases, we could generate a tuple whose -//! second member is a `PhantomData`. Or, if we wanted to go the extra mile, -//! we could even generate some smarter wrapper that implements `Deref`, -//! `DerefMut`, `From`, `Into`, `AsRef`, and `AsMut` to the actually aliased -//! type. However, this is still lackluster: -//! -//! 1. Even with a billion conversion-trait implementations, using the generated -//! bindings is rather un-ergonomic. -//! 2. With either of these solutions, we need to keep track of which aliases -//! we've transformed like this in order to generate correct uses of the -//! wrapped type. -//! -//! Given that we have to properly track which template parameters ended up used -//! for (2), we might as well leverage that information to make ergonomic -//! bindings that don't contain any unused type parameters at all, and -//! completely avoid the pain of (1). -//! -//! ### How do we determine which template parameters are used? -//! -//! Determining which template parameters are actually used is a trickier -//! problem than it might seem at a glance. On the one hand, trivial uses are -//! easy to detect: -//! -//! ```C++ -//! template -//! class Foo { -//! T trivial_use_of_t; -//! }; -//! ``` -//! -//! It gets harder when determining if one template parameter is used depends on -//! determining if another template parameter is used. In this example, whether -//! `U` is used depends on whether `T` is used. -//! -//! ```C++ -//! template -//! class DoesntUseT { -//! int x; -//! }; -//! -//! template -//! class Fml { -//! DoesntUseT lololol; -//! }; -//! ``` -//! -//! We can express the set of used template parameters as a constraint solving -//! problem (where the set of template parameters used by a given IR item is the -//! union of its sub-item's used template parameters) and iterate to a -//! fixed-point. -//! -//! We use the `ir::analysis::MonotoneFramework` infrastructure for this -//! fix-point analysis, where our lattice is the mapping from each IR item to -//! the powerset of the template parameters that appear in the input C++ header, -//! our join function is set union. The set of template parameters appearing in -//! the program is finite, as is the number of IR items. We start at our -//! lattice's bottom element: every item mapping to an empty set of template -//! parameters. Our analysis only adds members to each item's set of used -//! template parameters, never removes them, so it is monotone. Because our -//! lattice is finite and our constraint function is monotone, iteration to a -//! fix-point will terminate. -//! -//! See `src/ir/analysis.rs` for more. - -use super::{ConstrainResult, MonotoneFramework}; -use crate::ir::context::{BindgenContext, ItemId}; -use crate::ir::item::{Item, ItemSet}; -use crate::ir::template::{TemplateInstantiation, TemplateParameters}; -use crate::ir::traversal::{EdgeKind, Trace}; -use crate::ir::ty::TypeKind; -use crate::{HashMap, HashSet}; - -/// An analysis that finds for each IR item its set of template parameters that -/// it uses. -/// -/// We use the monotone constraint function `template_param_usage`, defined as -/// follows: -/// -/// * If `T` is a named template type parameter, it trivially uses itself: -/// -/// ```ignore -/// template_param_usage(T) = { T } -/// ``` -/// -/// * If `inst` is a template instantiation, `inst.args` are the template -/// instantiation's template arguments, `inst.def` is the template definition -/// being instantiated, and `inst.def.params` is the template definition's -/// template parameters, then the instantiation's usage is the union of each -/// of its arguments' usages *if* the corresponding template parameter is in -/// turn used by the template definition: -/// -/// ```ignore -/// template_param_usage(inst) = union( -/// template_param_usage(inst.args[i]) -/// for i in 0..length(inst.args.length) -/// if inst.def.params[i] in template_param_usage(inst.def) -/// ) -/// ``` -/// -/// * Finally, for all other IR item kinds, we use our lattice's `join` -/// operation: set union with each successor of the given item's template -/// parameter usage: -/// -/// ```ignore -/// template_param_usage(v) = -/// union(template_param_usage(w) for w in successors(v)) -/// ``` -/// -/// Note that we ignore certain edges in the graph, such as edges from a -/// template declaration to its template parameters' definitions for this -/// analysis. If we didn't, then we would mistakenly determine that ever -/// template parameter is always used. -/// -/// The final wrinkle is handling of blocklisted types. Normally, we say that -/// the set of allowlisted items is the transitive closure of items explicitly -/// called out for allowlisting, *without* any items explicitly called out as -/// blocklisted. However, for the purposes of this analysis's correctness, we -/// simplify and consider run the analysis on the full transitive closure of -/// allowlisted items. We do, however, treat instantiations of blocklisted items -/// specially; see `constrain_instantiation_of_blocklisted_template` and its -/// documentation for details. -#[derive(Debug, Clone)] -pub(crate) struct UsedTemplateParameters<'ctx> { - ctx: &'ctx BindgenContext, - - // The Option is only there for temporary moves out of the hash map. See the - // comments in `UsedTemplateParameters::constrain` below. - used: HashMap>, - - dependencies: HashMap>, - - // The set of allowlisted items, without any blocklisted items reachable - // from the allowlisted items which would otherwise be considered - // allowlisted as well. - allowlisted_items: HashSet, -} - -impl UsedTemplateParameters<'_> { - fn consider_edge(kind: EdgeKind) -> bool { - match kind { - // For each of these kinds of edges, if the referent uses a template - // parameter, then it should be considered that the origin of the - // edge also uses the template parameter. - EdgeKind::TemplateArgument | - EdgeKind::BaseMember | - EdgeKind::Field | - EdgeKind::Constructor | - EdgeKind::Destructor | - EdgeKind::VarType | - EdgeKind::FunctionReturn | - EdgeKind::FunctionParameter | - EdgeKind::TypeReference => true, - - // An inner var or type using a template parameter is orthogonal - // from whether we use it. See template-param-usage-{6,11}.hpp. - EdgeKind::InnerVar | EdgeKind::InnerType => false, - - // We can't emit machine code for new monomorphizations of class - // templates' methods (and don't detect explicit instantiations) so - // we must ignore template parameters that are only used by - // methods. This doesn't apply to a function type's return or - // parameter types, however, because of type aliases of function - // pointers that use template parameters, eg - // tests/headers/struct_with_typedef_template_arg.hpp - EdgeKind::Method => false, - - // If we considered these edges, we would end up mistakenly claiming - // that every template parameter always used. - EdgeKind::TemplateDeclaration | - EdgeKind::TemplateParameterDefinition => false, - - // Since we have to be careful about which edges we consider for - // this analysis to be correct, we ignore generic edges. We also - // avoid a `_` wild card to force authors of new edge kinds to - // determine whether they need to be considered by this analysis. - EdgeKind::Generic => false, - } - } - - fn take_this_id_usage_set>( - &mut self, - this_id: Id, - ) -> ItemSet { - let this_id = this_id.into(); - self.used - .get_mut(&this_id) - .expect( - "Should have a set of used template params for every item \ - id", - ) - .take() - .expect( - "Should maintain the invariant that all used template param \ - sets are `Some` upon entry of `constrain`", - ) - } - - /// We say that blocklisted items use all of their template parameters. The - /// blocklisted type is most likely implemented explicitly by the user, - /// since it won't be in the generated bindings, and we don't know exactly - /// what they'll to with template parameters, but we can push the issue down - /// the line to them. - fn constrain_instantiation_of_blocklisted_template( - &self, - this_id: ItemId, - used_by_this_id: &mut ItemSet, - instantiation: &TemplateInstantiation, - ) { - trace!( - " instantiation of blocklisted template, uses all template \ - arguments" - ); - - let args = instantiation - .template_arguments() - .iter() - .map(|a| { - a.into_resolver() - .through_type_refs() - .through_type_aliases() - .resolve(self.ctx) - .id() - }) - .filter(|a| *a != this_id) - .flat_map(|a| { - self.used - .get(&a) - .expect("Should have a used entry for the template arg") - .as_ref() - .expect( - "Because a != this_id, and all used template \ - param sets other than this_id's are `Some`, \ - a's used template param set should be `Some`", - ) - .iter() - }); - - used_by_this_id.extend(args); - } - - /// A template instantiation's concrete template argument is only used if - /// the template definition uses the corresponding template parameter. - fn constrain_instantiation( - &self, - this_id: ItemId, - used_by_this_id: &mut ItemSet, - instantiation: &TemplateInstantiation, - ) { - trace!(" template instantiation"); - - let decl = self.ctx.resolve_type(instantiation.template_definition()); - let args = instantiation.template_arguments(); - - let params = decl.self_template_params(self.ctx); - - debug_assert!(this_id != instantiation.template_definition()); - let used_by_def = self.used - .get(&instantiation.template_definition().into()) - .expect("Should have a used entry for instantiation's template definition") - .as_ref() - .expect("And it should be Some because only this_id's set is None, and an \ - instantiation's template definition should never be the \ - instantiation itself"); - - for (arg, param) in args.iter().zip(params.iter()) { - trace!( - " instantiation's argument {arg:?} is used if definition's \ - parameter {param:?} is used", - ); - - if used_by_def.contains(¶m.into()) { - trace!(" param is used by template definition"); - - let arg = arg - .into_resolver() - .through_type_refs() - .through_type_aliases() - .resolve(self.ctx) - .id(); - - if arg == this_id { - continue; - } - - let used_by_arg = self - .used - .get(&arg) - .expect("Should have a used entry for the template arg") - .as_ref() - .expect( - "Because arg != this_id, and all used template \ - param sets other than this_id's are `Some`, \ - arg's used template param set should be \ - `Some`", - ) - .iter(); - used_by_this_id.extend(used_by_arg); - } - } - } - - /// The join operation on our lattice: the set union of all of this ID's - /// successors. - fn constrain_join(&self, used_by_this_id: &mut ItemSet, item: &Item) { - trace!(" other item: join with successors' usage"); - - item.trace( - self.ctx, - &mut |sub_id, edge_kind| { - // Ignore ourselves, since union with ourself is a - // no-op. Ignore edges that aren't relevant to the - // analysis. - if sub_id == item.id() || !Self::consider_edge(edge_kind) { - return; - } - - let used_by_sub_id = self - .used - .get(&sub_id) - .expect("Should have a used set for the sub_id successor") - .as_ref() - .expect( - "Because sub_id != id, and all used template \ - param sets other than id's are `Some`, \ - sub_id's used template param set should be \ - `Some`", - ) - .iter(); - - trace!( - " union with {sub_id:?}'s usage: {:?}", - used_by_sub_id.clone().collect::>() - ); - - used_by_this_id.extend(used_by_sub_id); - }, - &(), - ); - } -} - -impl<'ctx> MonotoneFramework for UsedTemplateParameters<'ctx> { - type Node = ItemId; - type Extra = &'ctx BindgenContext; - type Output = HashMap; - - fn new(ctx: &'ctx BindgenContext) -> UsedTemplateParameters<'ctx> { - let mut used = HashMap::default(); - let mut dependencies = HashMap::default(); - let allowlisted_items: HashSet<_> = - ctx.allowlisted_items().iter().copied().collect(); - - let allowlisted_and_blocklisted_items: ItemSet = allowlisted_items - .iter() - .copied() - .flat_map(|i| { - let mut reachable = vec![i]; - i.trace( - ctx, - &mut |s, _| { - reachable.push(s); - }, - &(), - ); - reachable - }) - .collect(); - - for item in allowlisted_and_blocklisted_items { - dependencies.entry(item).or_insert_with(Vec::new); - used.entry(item).or_insert_with(|| Some(ItemSet::new())); - - { - // We reverse our natural IR graph edges to find dependencies - // between nodes. - item.trace( - ctx, - &mut |sub_item: ItemId, _| { - used.entry(sub_item) - .or_insert_with(|| Some(ItemSet::new())); - dependencies - .entry(sub_item) - .or_insert_with(Vec::new) - .push(item); - }, - &(), - ); - } - - // Additionally, whether a template instantiation's template - // arguments are used depends on whether the template declaration's - // generic template parameters are used. - let item_kind = - ctx.resolve_item(item).as_type().map(|ty| ty.kind()); - if let Some(TypeKind::TemplateInstantiation(inst)) = item_kind { - let decl = ctx.resolve_type(inst.template_definition()); - let args = inst.template_arguments(); - - // Although template definitions should always have - // template parameters, there is a single exception: - // opaque templates. Hence the unwrap_or. - let params = decl.self_template_params(ctx); - - for (arg, param) in args.iter().zip(params.iter()) { - let arg = arg - .into_resolver() - .through_type_aliases() - .through_type_refs() - .resolve(ctx) - .id(); - - let param = param - .into_resolver() - .through_type_aliases() - .through_type_refs() - .resolve(ctx) - .id(); - - used.entry(arg).or_insert_with(|| Some(ItemSet::new())); - used.entry(param).or_insert_with(|| Some(ItemSet::new())); - - dependencies - .entry(arg) - .or_insert_with(Vec::new) - .push(param); - } - } - } - - if cfg!(feature = "__testing_only_extra_assertions") { - // Invariant: The `used` map has an entry for every allowlisted - // item, as well as all explicitly blocklisted items that are - // reachable from allowlisted items. - // - // Invariant: the `dependencies` map has an entry for every - // allowlisted item. - // - // (This is so that every item we call `constrain` on is guaranteed - // to have a set of template parameters, and we can allow - // blocklisted templates to use all of their parameters). - for item in &allowlisted_items { - extra_assert!(used.contains_key(item)); - extra_assert!(dependencies.contains_key(item)); - item.trace( - ctx, - &mut |sub_item, _| { - extra_assert!(used.contains_key(&sub_item)); - extra_assert!(dependencies.contains_key(&sub_item)); - }, - &(), - ); - } - } - - UsedTemplateParameters { - ctx, - used, - dependencies, - allowlisted_items, - } - } - - fn initial_worklist(&self) -> Vec { - // The transitive closure of all allowlisted items, including explicitly - // blocklisted items. - self.ctx - .allowlisted_items() - .iter() - .copied() - .flat_map(|i| { - let mut reachable = vec![i]; - i.trace( - self.ctx, - &mut |s, _| { - reachable.push(s); - }, - &(), - ); - reachable - }) - .collect() - } - - fn constrain(&mut self, id: ItemId) -> ConstrainResult { - // Invariant: all hash map entries' values are `Some` upon entering and - // exiting this method. - extra_assert!(self.used.values().all(|v| v.is_some())); - - // Take the set for this ID out of the hash map while we mutate it based - // on other hash map entries. We *must* put it back into the hash map at - // the end of this method. This allows us to side-step HashMap's lack of - // an analog to slice::split_at_mut. - let mut used_by_this_id = self.take_this_id_usage_set(id); - - trace!("constrain {id:?}"); - trace!(" initially, used set is {used_by_this_id:?}"); - - let original_len = used_by_this_id.len(); - - let item = self.ctx.resolve_item(id); - let ty_kind = item.as_type().map(|ty| ty.kind()); - match ty_kind { - // Named template type parameters trivially use themselves. - Some(&TypeKind::TypeParam) => { - trace!(" named type, trivially uses itself"); - used_by_this_id.insert(id); - } - // Template instantiations only use their template arguments if the - // template definition uses the corresponding template parameter. - Some(TypeKind::TemplateInstantiation(inst)) => { - if self - .allowlisted_items - .contains(&inst.template_definition().into()) - { - self.constrain_instantiation( - id, - &mut used_by_this_id, - inst, - ); - } else { - self.constrain_instantiation_of_blocklisted_template( - id, - &mut used_by_this_id, - inst, - ); - } - } - // Otherwise, add the union of each of its referent item's template - // parameter usage. - _ => self.constrain_join(&mut used_by_this_id, item), - } - - trace!(" finally, used set is {used_by_this_id:?}"); - - let new_len = used_by_this_id.len(); - assert!( - new_len >= original_len, - "This is the property that ensures this function is monotone -- \ - if it doesn't hold, the analysis might never terminate!" - ); - - // Put the set back in the hash map and restore our invariant. - debug_assert!(self.used[&id].is_none()); - self.used.insert(id, Some(used_by_this_id)); - extra_assert!(self.used.values().all(|v| v.is_some())); - - if new_len == original_len { - ConstrainResult::Same - } else { - ConstrainResult::Changed - } - } - - fn each_depending_on(&self, item: ItemId, mut f: F) - where - F: FnMut(ItemId), - { - if let Some(edges) = self.dependencies.get(&item) { - for item in edges { - trace!("enqueue {item:?} into worklist"); - f(*item); - } - } - } -} - -impl<'ctx> From> for HashMap { - fn from(used_templ_params: UsedTemplateParameters<'ctx>) -> Self { - used_templ_params - .used - .into_iter() - .map(|(k, v)| (k, v.unwrap())) - .collect() - } -} diff --git a/vendor/bindgen/ir/annotations.rs b/vendor/bindgen/ir/annotations.rs deleted file mode 100644 index 7f5d74b3ee7549..00000000000000 --- a/vendor/bindgen/ir/annotations.rs +++ /dev/null @@ -1,259 +0,0 @@ -//! Types and functions related to bindgen annotation comments. -//! -//! Users can add annotations in doc comments to types that they would like to -//! replace other types with, mark as opaque, etc. This module deals with all of -//! that stuff. - -use std::str::FromStr; - -use crate::clang; - -/// What kind of visibility modifier should be used for a struct or field? -#[derive(Copy, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Default)] -pub enum FieldVisibilityKind { - /// Fields are marked as private, i.e., struct Foo {bar: bool} - Private, - /// Fields are marked as crate public, i.e., struct Foo {pub(crate) bar: bool} - PublicCrate, - /// Fields are marked as public, i.e., struct Foo {pub bar: bool} - #[default] - Public, -} - -impl FromStr for FieldVisibilityKind { - type Err = String; - - fn from_str(s: &str) -> Result { - match s { - "private" => Ok(Self::Private), - "crate" => Ok(Self::PublicCrate), - "public" => Ok(Self::Public), - _ => Err(format!("Invalid visibility kind: `{s}`")), - } - } -} - -impl std::fmt::Display for FieldVisibilityKind { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let s = match self { - FieldVisibilityKind::Private => "private", - FieldVisibilityKind::PublicCrate => "crate", - FieldVisibilityKind::Public => "public", - }; - - s.fmt(f) - } -} - -/// What kind of accessor should we provide for a field? -#[derive(Copy, PartialEq, Eq, Clone, Debug)] -pub(crate) enum FieldAccessorKind { - /// No accessor. - None, - /// Plain accessor. - Regular, - /// Unsafe accessor. - Unsafe, - /// Immutable accessor. - Immutable, -} - -/// Annotations for a given item, or a field. -/// -/// You can see the kind of comments that are accepted in the [Doxygen documentation](https://www.doxygen.nl/manual/docblocks.html). -#[derive(Default, Clone, PartialEq, Eq, Debug)] -pub(crate) struct Annotations { - /// Whether this item is marked as opaque. Only applies to types. - opaque: bool, - /// Whether this item should be hidden from the output. Only applies to - /// types, or enum variants. - hide: bool, - /// Whether this type should be replaced by another. The name is a - /// namespace-aware path. - use_instead_of: Option>, - /// Manually disable deriving copy/clone on this type. Only applies to - /// struct or union types. - disallow_copy: bool, - /// Manually disable deriving debug on this type. - disallow_debug: bool, - /// Manually disable deriving/implement default on this type. - disallow_default: bool, - /// Whether to add a `#[must_use]` annotation to this type. - must_use_type: bool, - /// Visibility of struct fields. You can set this on - /// structs (it will apply to all the fields), or individual fields. - visibility_kind: Option, - /// The kind of accessor this field will have. Also can be applied to - /// structs so all the fields inside share it by default. - accessor_kind: Option, - /// Whether this enum variant should be constified. - /// - /// This is controlled by the `constant` attribute, this way: - /// - /// ```cpp - /// enum Foo { - /// Bar = 0, /**<

{ + #[cfg_attr(feature = "perf-inline", inline(always))] + fn find(&self, haystack: &[u8], span: Span) -> Option { + (**self).find(haystack, span) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn prefix(&self, haystack: &[u8], span: Span) -> Option { + (**self).prefix(haystack, span) + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn memory_usage(&self) -> usize { + (**self).memory_usage() + } + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn is_fast(&self) -> bool { + (&**self).is_fast() + } +} + +/// A type that encapsulates the selection of a prefilter algorithm from a +/// sequence of needles. +/// +/// The existence of this type is a little tricky, because we don't (currently) +/// use it for performing a search. Instead, we really only consume it by +/// converting the underlying prefilter into a trait object, whether that be +/// `dyn PrefilterI` or `dyn Strategy` (for the meta regex engine). In order +/// to avoid re-copying the prefilter selection logic, we isolate it here, and +/// then force anything downstream that wants to convert it to a trait object +/// to do trivial case analysis on it. +/// +/// One wonders whether we *should* use an enum instead of a trait object. +/// At time of writing, I chose trait objects based on instinct because 1) I +/// knew I wasn't going to inline anything and 2) there would potentially be +/// many different choices. However, as of time of writing, I haven't actually +/// compared the trait object approach to the enum approach. That probably +/// should be litigated, but I ran out of steam. +/// +/// Note that if the `alloc` feature is disabled, then values of this type +/// are (and should) never be constructed. Also, in practice, for any of the +/// prefilters to be selected, you'll need at least one of the `perf-literal-*` +/// features enabled. +#[derive(Clone, Debug)] +pub(crate) enum Choice { + Memchr(Memchr), + Memchr2(Memchr2), + Memchr3(Memchr3), + Memmem(Memmem), + Teddy(Teddy), + ByteSet(ByteSet), + AhoCorasick(AhoCorasick), +} + +impl Choice { + /// Select what is believed to be the best prefilter algorithm for the + /// match semantics and sequence of needles given. + /// + /// This selection algorithm uses the needles as given without any + /// modification. For example, if `[bar]` is given, then this doesn't + /// try to select `memchr` for `b`. Instead, it would select `memmem` + /// for `bar`. If callers would want `memchr` selected for `[bar]`, then + /// callers should massages the literals themselves. That is, callers are + /// responsible for heuristics surrounding which sequence of literals is + /// best. + /// + /// What this selection algorithm does is attempt to use the fastest + /// prefilter that works for the literals given. So if `[a, b]`, is given, + /// then `memchr2` is selected. + /// + /// Of course, which prefilter is selected is also subject to what + /// is available. For example, if `alloc` isn't enabled, then + /// that limits which prefilters can be selected. Similarly, if + /// `perf-literal-substring` isn't enabled, then nothing from the `memchr` + /// crate can be returned. + pub(crate) fn new>( + kind: MatchKind, + needles: &[B], + ) -> Option { + // An empty set means the regex matches nothing, so no sense in + // building a prefilter. + if needles.len() == 0 { + debug!("prefilter building failed: found empty set of literals"); + return None; + } + // If the regex can match the empty string, then the prefilter + // will by definition match at every position. This is obviously + // completely ineffective. + if needles.iter().any(|n| n.as_ref().is_empty()) { + debug!("prefilter building failed: literals match empty string"); + return None; + } + // BREADCRUMBS: Perhaps the literal optimizer should special case + // sequences of length two or three if the leading bytes of each are + // "rare"? Or perhaps, if there are two or three total possible leading + // bytes, regardless of the number of literals, and all are rare... + // Then well, perhaps we should use memchr2 or memchr3 in those cases? + if let Some(pre) = Memchr::new(kind, needles) { + debug!("prefilter built: memchr"); + return Some(Choice::Memchr(pre)); + } + if let Some(pre) = Memchr2::new(kind, needles) { + debug!("prefilter built: memchr2"); + return Some(Choice::Memchr2(pre)); + } + if let Some(pre) = Memchr3::new(kind, needles) { + debug!("prefilter built: memchr3"); + return Some(Choice::Memchr3(pre)); + } + if let Some(pre) = Memmem::new(kind, needles) { + debug!("prefilter built: memmem"); + return Some(Choice::Memmem(pre)); + } + if let Some(pre) = Teddy::new(kind, needles) { + debug!("prefilter built: teddy"); + return Some(Choice::Teddy(pre)); + } + if let Some(pre) = ByteSet::new(kind, needles) { + debug!("prefilter built: byteset"); + return Some(Choice::ByteSet(pre)); + } + if let Some(pre) = AhoCorasick::new(kind, needles) { + debug!("prefilter built: aho-corasick"); + return Some(Choice::AhoCorasick(pre)); + } + debug!("prefilter building failed: no strategy could be found"); + None + } +} + +/// Extracts all of the prefix literals from the given HIR expressions into a +/// single `Seq`. The literals in the sequence are ordered with respect to the +/// order of the given HIR expressions and consistent with the match semantics +/// given. +/// +/// The sequence returned is "optimized." That is, they may be shrunk or even +/// truncated according to heuristics with the intent of making them more +/// useful as a prefilter. (Which translates to both using faster algorithms +/// and minimizing the false positive rate.) +/// +/// Note that this erases any connection between the literals and which pattern +/// (or patterns) they came from. +/// +/// The match kind given must correspond to the match semantics of the regex +/// that is represented by the HIRs given. The match semantics may change the +/// literal sequence returned. +#[cfg(feature = "syntax")] +pub(crate) fn prefixes(kind: MatchKind, hirs: &[H]) -> literal::Seq +where + H: core::borrow::Borrow, +{ + let mut extractor = literal::Extractor::new(); + extractor.kind(literal::ExtractKind::Prefix); + + let mut prefixes = literal::Seq::empty(); + for hir in hirs { + prefixes.union(&mut extractor.extract(hir.borrow())); + } + debug!( + "prefixes (len={:?}, exact={:?}) extracted before optimization: {:?}", + prefixes.len(), + prefixes.is_exact(), + prefixes + ); + match kind { + MatchKind::All => { + prefixes.sort(); + prefixes.dedup(); + } + MatchKind::LeftmostFirst => { + prefixes.optimize_for_prefix_by_preference(); + } + } + debug!( + "prefixes (len={:?}, exact={:?}) extracted after optimization: {:?}", + prefixes.len(), + prefixes.is_exact(), + prefixes + ); + prefixes +} + +/// Like `prefixes`, but for all suffixes of all matches for the given HIRs. +#[cfg(feature = "syntax")] +pub(crate) fn suffixes(kind: MatchKind, hirs: &[H]) -> literal::Seq +where + H: core::borrow::Borrow, +{ + let mut extractor = literal::Extractor::new(); + extractor.kind(literal::ExtractKind::Suffix); + + let mut suffixes = literal::Seq::empty(); + for hir in hirs { + suffixes.union(&mut extractor.extract(hir.borrow())); + } + debug!( + "suffixes (len={:?}, exact={:?}) extracted before optimization: {:?}", + suffixes.len(), + suffixes.is_exact(), + suffixes + ); + match kind { + MatchKind::All => { + suffixes.sort(); + suffixes.dedup(); + } + MatchKind::LeftmostFirst => { + suffixes.optimize_for_suffix_by_preference(); + } + } + debug!( + "suffixes (len={:?}, exact={:?}) extracted after optimization: {:?}", + suffixes.len(), + suffixes.is_exact(), + suffixes + ); + suffixes +} diff --git a/vendor/regex-automata/src/util/prefilter/teddy.rs b/vendor/regex-automata/src/util/prefilter/teddy.rs new file mode 100644 index 00000000000000..fc79f2b2f3f1d4 --- /dev/null +++ b/vendor/regex-automata/src/util/prefilter/teddy.rs @@ -0,0 +1,160 @@ +use crate::util::{ + prefilter::PrefilterI, + search::{MatchKind, Span}, +}; + +#[derive(Clone, Debug)] +pub(crate) struct Teddy { + #[cfg(not(feature = "perf-literal-multisubstring"))] + _unused: (), + /// The actual Teddy searcher. + /// + /// Technically, it's possible that Teddy doesn't actually get used, since + /// Teddy does require its haystack to at least be of a certain size + /// (usually around the size of whatever vector is being used, so ~16 + /// or ~32 bytes). For haystacks shorter than that, the implementation + /// currently uses Rabin-Karp. + #[cfg(feature = "perf-literal-multisubstring")] + searcher: aho_corasick::packed::Searcher, + /// When running an anchored search, the packed searcher can't handle it so + /// we defer to Aho-Corasick itself. Kind of sad, but changing the packed + /// searchers to support anchored search would be difficult at worst and + /// annoying at best. Since packed searchers only apply to small numbers of + /// literals, we content ourselves that this is not much of an added cost. + /// (That packed searchers only work with a small number of literals is + /// also why we use a DFA here. Otherwise, the memory usage of a DFA would + /// likely be unacceptable.) + #[cfg(feature = "perf-literal-multisubstring")] + anchored_ac: aho_corasick::dfa::DFA, + /// The length of the smallest literal we look for. + /// + /// We use this as a heuristic to figure out whether this will be "fast" or + /// not. Generally, the longer the better, because longer needles are more + /// discriminating and thus reduce false positive rate. + #[cfg(feature = "perf-literal-multisubstring")] + minimum_len: usize, +} + +impl Teddy { + pub(crate) fn new>( + kind: MatchKind, + needles: &[B], + ) -> Option { + #[cfg(not(feature = "perf-literal-multisubstring"))] + { + None + } + #[cfg(feature = "perf-literal-multisubstring")] + { + // We only really support leftmost-first semantics. In + // theory we could at least support leftmost-longest, as the + // aho-corasick crate does, but regex-automata doesn't know about + // leftmost-longest currently. + // + // And like the aho-corasick prefilter, if we're using `All` + // semantics, then we can still use leftmost semantics for a + // prefilter. (This might be a suspicious choice for the literal + // engine, which uses a prefilter as a regex engine directly, but + // that only happens when using leftmost-first semantics.) + let (packed_match_kind, ac_match_kind) = match kind { + MatchKind::LeftmostFirst | MatchKind::All => ( + aho_corasick::packed::MatchKind::LeftmostFirst, + aho_corasick::MatchKind::LeftmostFirst, + ), + }; + let minimum_len = + needles.iter().map(|n| n.as_ref().len()).min().unwrap_or(0); + let packed = aho_corasick::packed::Config::new() + .match_kind(packed_match_kind) + .builder() + .extend(needles) + .build()?; + let anchored_ac = aho_corasick::dfa::DFA::builder() + .match_kind(ac_match_kind) + .start_kind(aho_corasick::StartKind::Anchored) + .prefilter(false) + .build(needles) + .ok()?; + Some(Teddy { searcher: packed, anchored_ac, minimum_len }) + } + } +} + +impl PrefilterI for Teddy { + fn find(&self, haystack: &[u8], span: Span) -> Option { + #[cfg(not(feature = "perf-literal-multisubstring"))] + { + unreachable!() + } + #[cfg(feature = "perf-literal-multisubstring")] + { + let ac_span = + aho_corasick::Span { start: span.start, end: span.end }; + self.searcher + .find_in(haystack, ac_span) + .map(|m| Span { start: m.start(), end: m.end() }) + } + } + + fn prefix(&self, haystack: &[u8], span: Span) -> Option { + #[cfg(not(feature = "perf-literal-multisubstring"))] + { + unreachable!() + } + #[cfg(feature = "perf-literal-multisubstring")] + { + use aho_corasick::automaton::Automaton; + let input = aho_corasick::Input::new(haystack) + .anchored(aho_corasick::Anchored::Yes) + .span(span.start..span.end); + self.anchored_ac + .try_find(&input) + // OK because we build the DFA with anchored support. + .expect("aho-corasick DFA should never fail") + .map(|m| Span { start: m.start(), end: m.end() }) + } + } + + fn memory_usage(&self) -> usize { + #[cfg(not(feature = "perf-literal-multisubstring"))] + { + unreachable!() + } + #[cfg(feature = "perf-literal-multisubstring")] + { + use aho_corasick::automaton::Automaton; + self.searcher.memory_usage() + self.anchored_ac.memory_usage() + } + } + + fn is_fast(&self) -> bool { + #[cfg(not(feature = "perf-literal-multisubstring"))] + { + unreachable!() + } + #[cfg(feature = "perf-literal-multisubstring")] + { + // Teddy is usually quite fast, but I have seen some cases where + // a large number of literals can overwhelm it and make it not so + // fast. We make an educated but conservative guess at a limit, at + // which point, we're not so comfortable thinking Teddy is "fast." + // + // Well... this used to incorporate a "limit" on the *number* + // of literals, but I have since changed it to a minimum on the + // *smallest* literal. Namely, when there is a very small literal + // (1 or 2 bytes), it is far more likely that it leads to a higher + // false positive rate. (Although, of course, not always. For + // example, 'zq' is likely to have a very low false positive rate.) + // But when we have 3 bytes, we have a really good chance of being + // quite discriminatory and thus fast. + // + // We may still want to add some kind of limit on the number of + // literals here, but keep in mind that Teddy already has its own + // somewhat small limit (64 at time of writing). The main issue + // here is that if 'is_fast' is false, it opens the door for the + // reverse inner optimization to kick in. We really only want to + // resort to the reverse inner optimization if we absolutely must. + self.minimum_len >= 3 + } + } +} diff --git a/vendor/regex-automata/src/util/primitives.rs b/vendor/regex-automata/src/util/primitives.rs new file mode 100644 index 00000000000000..5c5d187b0e6ab8 --- /dev/null +++ b/vendor/regex-automata/src/util/primitives.rs @@ -0,0 +1,776 @@ +/*! +Lower level primitive types that are useful in a variety of circumstances. + +# Overview + +This list represents the principle types in this module and briefly describes +when you might want to use them. + +* [`PatternID`] - A type that represents the identifier of a regex pattern. +This is probably the most widely used type in this module (which is why it's +also re-exported in the crate root). +* [`StateID`] - A type the represents the identifier of a finite automaton +state. This is used for both NFAs and DFAs, with the notable exception of +the hybrid NFA/DFA. (The hybrid NFA/DFA uses a special purpose "lazy" state +identifier.) +* [`SmallIndex`] - The internal representation of both a `PatternID` and a +`StateID`. Its purpose is to serve as a type that can index memory without +being as big as a `usize` on 64-bit targets. The main idea behind this type +is that there are many things in regex engines that will, in practice, never +overflow a 32-bit integer. (For example, like the number of patterns in a regex +or the number of states in an NFA.) Thus, a `SmallIndex` can be used to index +memory without peppering `as` casts everywhere. Moreover, it forces callers +to handle errors in the case where, somehow, the value would otherwise overflow +either a 32-bit integer or a `usize` (e.g., on 16-bit targets). +* [`NonMaxUsize`] - Represents a `usize` that cannot be `usize::MAX`. As a +result, `Option` has the same size in memory as a `usize`. This +useful, for example, when representing the offsets of submatches since it +reduces memory usage by a factor of 2. It is a legal optimization since Rust +guarantees that slices never have a length that exceeds `isize::MAX`. +*/ + +use core::num::NonZeroUsize; + +#[cfg(feature = "alloc")] +use alloc::vec::Vec; + +use crate::util::int::{Usize, U16, U32, U64}; + +/// A `usize` that can never be `usize::MAX`. +/// +/// This is similar to `core::num::NonZeroUsize`, but instead of not permitting +/// a zero value, this does not permit a max value. +/// +/// This is useful in certain contexts where one wants to optimize the memory +/// usage of things that contain match offsets. Namely, since Rust slices +/// are guaranteed to never have a length exceeding `isize::MAX`, we can use +/// `usize::MAX` as a sentinel to indicate that no match was found. Indeed, +/// types like `Option` have exactly the same size in memory as a +/// `usize`. +/// +/// This type is defined to be `repr(transparent)` for +/// `core::num::NonZeroUsize`, which is in turn defined to be +/// `repr(transparent)` for `usize`. +#[derive(Clone, Copy, Eq, Hash, PartialEq, PartialOrd, Ord)] +#[repr(transparent)] +pub struct NonMaxUsize(NonZeroUsize); + +impl NonMaxUsize { + /// Create a new `NonMaxUsize` from the given value. + /// + /// This returns `None` only when the given value is equal to `usize::MAX`. + #[inline] + pub fn new(value: usize) -> Option { + NonZeroUsize::new(value.wrapping_add(1)).map(NonMaxUsize) + } + + /// Return the underlying `usize` value. The returned value is guaranteed + /// to not equal `usize::MAX`. + #[inline] + pub fn get(self) -> usize { + self.0.get().wrapping_sub(1) + } +} + +// We provide our own Debug impl because seeing the internal repr can be quite +// surprising if you aren't expecting it. e.g., 'NonMaxUsize(5)' vs just '5'. +impl core::fmt::Debug for NonMaxUsize { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "{:?}", self.get()) + } +} + +/// A type that represents a "small" index. +/// +/// The main idea of this type is to provide something that can index memory, +/// but uses less memory than `usize` on 64-bit systems. Specifically, its +/// representation is always a `u32` and has `repr(transparent)` enabled. (So +/// it is safe to transmute between a `u32` and a `SmallIndex`.) +/// +/// A small index is typically useful in cases where there is no practical way +/// that the index will overflow a 32-bit integer. A good example of this is +/// an NFA state. If you could somehow build an NFA with `2^30` states, its +/// memory usage would be exorbitant and its runtime execution would be so +/// slow as to be completely worthless. Therefore, this crate generally deems +/// it acceptable to return an error if it would otherwise build an NFA that +/// requires a slice longer than what a 32-bit integer can index. In exchange, +/// we can use 32-bit indices instead of 64-bit indices in various places. +/// +/// This type ensures this by providing a constructor that will return an error +/// if its argument cannot fit into the type. This makes it much easier to +/// handle these sorts of boundary cases that are otherwise extremely subtle. +/// +/// On all targets, this type guarantees that its value will fit in a `u32`, +/// `i32`, `usize` and an `isize`. This means that on 16-bit targets, for +/// example, this type's maximum value will never overflow an `isize`, +/// which means it will never overflow a `i16` even though its internal +/// representation is still a `u32`. +/// +/// The purpose for making the type fit into even signed integer types like +/// `isize` is to guarantee that the difference between any two small indices +/// is itself also a small index. This is useful in certain contexts, e.g., +/// for delta encoding. +/// +/// # Other types +/// +/// The following types wrap `SmallIndex` to provide a more focused use case: +/// +/// * [`PatternID`] is for representing the identifiers of patterns. +/// * [`StateID`] is for representing the identifiers of states in finite +/// automata. It is used for both NFAs and DFAs. +/// +/// # Representation +/// +/// This type is always represented internally by a `u32` and is marked as +/// `repr(transparent)`. Thus, this type always has the same representation as +/// a `u32`. It is thus safe to transmute between a `u32` and a `SmallIndex`. +/// +/// # Indexing +/// +/// For convenience, callers may use a `SmallIndex` to index slices. +/// +/// # Safety +/// +/// While a `SmallIndex` is meant to guarantee that its value fits into `usize` +/// without using as much space as a `usize` on all targets, callers must +/// not rely on this property for safety. Callers may choose to rely on this +/// property for correctness however. For example, creating a `SmallIndex` with +/// an invalid value can be done in entirely safe code. This may in turn result +/// in panics or silent logical errors. +#[derive( + Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord, +)] +#[repr(transparent)] +pub struct SmallIndex(u32); + +impl SmallIndex { + /// The maximum index value. + #[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] + pub const MAX: SmallIndex = + // FIXME: Use as_usize() once const functions in traits are stable. + SmallIndex::new_unchecked(core::i32::MAX as usize - 1); + + /// The maximum index value. + #[cfg(target_pointer_width = "16")] + pub const MAX: SmallIndex = + SmallIndex::new_unchecked(core::isize::MAX - 1); + + /// The total number of values that can be represented as a small index. + pub const LIMIT: usize = SmallIndex::MAX.as_usize() + 1; + + /// The zero index value. + pub const ZERO: SmallIndex = SmallIndex::new_unchecked(0); + + /// The number of bytes that a single small index uses in memory. + pub const SIZE: usize = core::mem::size_of::(); + + /// Create a new small index. + /// + /// If the given index exceeds [`SmallIndex::MAX`], then this returns + /// an error. + #[inline] + pub fn new(index: usize) -> Result { + SmallIndex::try_from(index) + } + + /// Create a new small index without checking whether the given value + /// exceeds [`SmallIndex::MAX`]. + /// + /// Using this routine with an invalid index value will result in + /// unspecified behavior, but *not* undefined behavior. In particular, an + /// invalid index value is likely to cause panics or possibly even silent + /// logical errors. + /// + /// Callers must never rely on a `SmallIndex` to be within a certain range + /// for memory safety. + #[inline] + pub const fn new_unchecked(index: usize) -> SmallIndex { + // FIXME: Use as_u32() once const functions in traits are stable. + SmallIndex(index as u32) + } + + /// Like [`SmallIndex::new`], but panics if the given index is not valid. + #[inline] + pub fn must(index: usize) -> SmallIndex { + SmallIndex::new(index).expect("invalid small index") + } + + /// Return this small index as a `usize`. This is guaranteed to never + /// overflow `usize`. + #[inline] + pub const fn as_usize(&self) -> usize { + // FIXME: Use as_usize() once const functions in traits are stable. + self.0 as usize + } + + /// Return this small index as a `u64`. This is guaranteed to never + /// overflow. + #[inline] + pub const fn as_u64(&self) -> u64 { + // FIXME: Use u64::from() once const functions in traits are stable. + self.0 as u64 + } + + /// Return the internal `u32` of this small index. This is guaranteed to + /// never overflow `u32`. + #[inline] + pub const fn as_u32(&self) -> u32 { + self.0 + } + + /// Return the internal `u32` of this small index represented as an `i32`. + /// This is guaranteed to never overflow an `i32`. + #[inline] + pub const fn as_i32(&self) -> i32 { + // This is OK because we guarantee that our max value is <= i32::MAX. + self.0 as i32 + } + + /// Returns one more than this small index as a usize. + /// + /// Since a small index has constraints on its maximum value, adding `1` to + /// it will always fit in a `usize`, `u32` and a `i32`. + #[inline] + pub fn one_more(&self) -> usize { + self.as_usize() + 1 + } + + /// Decode this small index from the bytes given using the native endian + /// byte order for the current target. + /// + /// If the decoded integer is not representable as a small index for the + /// current target, then this returns an error. + #[inline] + pub fn from_ne_bytes( + bytes: [u8; 4], + ) -> Result { + let id = u32::from_ne_bytes(bytes); + if id > SmallIndex::MAX.as_u32() { + return Err(SmallIndexError { attempted: u64::from(id) }); + } + Ok(SmallIndex::new_unchecked(id.as_usize())) + } + + /// Decode this small index from the bytes given using the native endian + /// byte order for the current target. + /// + /// This is analogous to [`SmallIndex::new_unchecked`] in that is does not + /// check whether the decoded integer is representable as a small index. + #[inline] + pub fn from_ne_bytes_unchecked(bytes: [u8; 4]) -> SmallIndex { + SmallIndex::new_unchecked(u32::from_ne_bytes(bytes).as_usize()) + } + + /// Return the underlying small index integer as raw bytes in native endian + /// format. + #[inline] + pub fn to_ne_bytes(&self) -> [u8; 4] { + self.0.to_ne_bytes() + } +} + +impl core::ops::Index for [T] { + type Output = T; + + #[inline] + fn index(&self, index: SmallIndex) -> &T { + &self[index.as_usize()] + } +} + +impl core::ops::IndexMut for [T] { + #[inline] + fn index_mut(&mut self, index: SmallIndex) -> &mut T { + &mut self[index.as_usize()] + } +} + +#[cfg(feature = "alloc")] +impl core::ops::Index for Vec { + type Output = T; + + #[inline] + fn index(&self, index: SmallIndex) -> &T { + &self[index.as_usize()] + } +} + +#[cfg(feature = "alloc")] +impl core::ops::IndexMut for Vec { + #[inline] + fn index_mut(&mut self, index: SmallIndex) -> &mut T { + &mut self[index.as_usize()] + } +} + +impl From for SmallIndex { + fn from(index: u8) -> SmallIndex { + SmallIndex::new_unchecked(usize::from(index)) + } +} + +impl TryFrom for SmallIndex { + type Error = SmallIndexError; + + fn try_from(index: u16) -> Result { + if u32::from(index) > SmallIndex::MAX.as_u32() { + return Err(SmallIndexError { attempted: u64::from(index) }); + } + Ok(SmallIndex::new_unchecked(index.as_usize())) + } +} + +impl TryFrom for SmallIndex { + type Error = SmallIndexError; + + fn try_from(index: u32) -> Result { + if index > SmallIndex::MAX.as_u32() { + return Err(SmallIndexError { attempted: u64::from(index) }); + } + Ok(SmallIndex::new_unchecked(index.as_usize())) + } +} + +impl TryFrom for SmallIndex { + type Error = SmallIndexError; + + fn try_from(index: u64) -> Result { + if index > SmallIndex::MAX.as_u64() { + return Err(SmallIndexError { attempted: index }); + } + Ok(SmallIndex::new_unchecked(index.as_usize())) + } +} + +impl TryFrom for SmallIndex { + type Error = SmallIndexError; + + fn try_from(index: usize) -> Result { + if index > SmallIndex::MAX.as_usize() { + return Err(SmallIndexError { attempted: index.as_u64() }); + } + Ok(SmallIndex::new_unchecked(index)) + } +} + +#[cfg(test)] +impl quickcheck::Arbitrary for SmallIndex { + fn arbitrary(gen: &mut quickcheck::Gen) -> SmallIndex { + use core::cmp::max; + + let id = max(i32::MIN + 1, i32::arbitrary(gen)).abs(); + if id > SmallIndex::MAX.as_i32() { + SmallIndex::MAX + } else { + SmallIndex::new(usize::try_from(id).unwrap()).unwrap() + } + } +} + +/// This error occurs when a small index could not be constructed. +/// +/// This occurs when given an integer exceeding the maximum small index value. +/// +/// When the `std` feature is enabled, this implements the `Error` trait. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct SmallIndexError { + attempted: u64, +} + +impl SmallIndexError { + /// Returns the value that could not be converted to a small index. + pub fn attempted(&self) -> u64 { + self.attempted + } +} + +#[cfg(feature = "std")] +impl std::error::Error for SmallIndexError {} + +impl core::fmt::Display for SmallIndexError { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!( + f, + "failed to create small index from {:?}, which exceeds {:?}", + self.attempted(), + SmallIndex::MAX, + ) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct SmallIndexIter { + rng: core::ops::Range, +} + +impl Iterator for SmallIndexIter { + type Item = SmallIndex; + + fn next(&mut self) -> Option { + if self.rng.start >= self.rng.end { + return None; + } + let next_id = self.rng.start + 1; + let id = core::mem::replace(&mut self.rng.start, next_id); + // new_unchecked is OK since we asserted that the number of + // elements in this iterator will fit in an ID at construction. + Some(SmallIndex::new_unchecked(id)) + } +} + +macro_rules! index_type_impls { + ($name:ident, $err:ident, $iter:ident, $withiter:ident) => { + impl $name { + /// The maximum value. + pub const MAX: $name = $name(SmallIndex::MAX); + + /// The total number of values that can be represented. + pub const LIMIT: usize = SmallIndex::LIMIT; + + /// The zero value. + pub const ZERO: $name = $name(SmallIndex::ZERO); + + /// The number of bytes that a single value uses in memory. + pub const SIZE: usize = SmallIndex::SIZE; + + /// Create a new value that is represented by a "small index." + /// + /// If the given index exceeds the maximum allowed value, then this + /// returns an error. + #[inline] + pub fn new(value: usize) -> Result<$name, $err> { + SmallIndex::new(value).map($name).map_err($err) + } + + /// Create a new value without checking whether the given argument + /// exceeds the maximum. + /// + /// Using this routine with an invalid value will result in + /// unspecified behavior, but *not* undefined behavior. In + /// particular, an invalid ID value is likely to cause panics or + /// possibly even silent logical errors. + /// + /// Callers must never rely on this type to be within a certain + /// range for memory safety. + #[inline] + pub const fn new_unchecked(value: usize) -> $name { + $name(SmallIndex::new_unchecked(value)) + } + + /// Like `new`, but panics if the given value is not valid. + #[inline] + pub fn must(value: usize) -> $name { + $name::new(value).expect(concat!( + "invalid ", + stringify!($name), + " value" + )) + } + + /// Return the internal value as a `usize`. This is guaranteed to + /// never overflow `usize`. + #[inline] + pub const fn as_usize(&self) -> usize { + self.0.as_usize() + } + + /// Return the internal value as a `u64`. This is guaranteed to + /// never overflow. + #[inline] + pub const fn as_u64(&self) -> u64 { + self.0.as_u64() + } + + /// Return the internal value as a `u32`. This is guaranteed to + /// never overflow `u32`. + #[inline] + pub const fn as_u32(&self) -> u32 { + self.0.as_u32() + } + + /// Return the internal value as a i32`. This is guaranteed to + /// never overflow an `i32`. + #[inline] + pub const fn as_i32(&self) -> i32 { + self.0.as_i32() + } + + /// Returns one more than this value as a usize. + /// + /// Since values represented by a "small index" have constraints + /// on their maximum value, adding `1` to it will always fit in a + /// `usize`, `u32` and a `i32`. + #[inline] + pub fn one_more(&self) -> usize { + self.0.one_more() + } + + /// Decode this value from the bytes given using the native endian + /// byte order for the current target. + /// + /// If the decoded integer is not representable as a small index + /// for the current target, then this returns an error. + #[inline] + pub fn from_ne_bytes(bytes: [u8; 4]) -> Result<$name, $err> { + SmallIndex::from_ne_bytes(bytes).map($name).map_err($err) + } + + /// Decode this value from the bytes given using the native endian + /// byte order for the current target. + /// + /// This is analogous to `new_unchecked` in that is does not check + /// whether the decoded integer is representable as a small index. + #[inline] + pub fn from_ne_bytes_unchecked(bytes: [u8; 4]) -> $name { + $name(SmallIndex::from_ne_bytes_unchecked(bytes)) + } + + /// Return the underlying integer as raw bytes in native endian + /// format. + #[inline] + pub fn to_ne_bytes(&self) -> [u8; 4] { + self.0.to_ne_bytes() + } + + /// Returns an iterator over all values from 0 up to and not + /// including the given length. + /// + /// If the given length exceeds this type's limit, then this + /// panics. + pub(crate) fn iter(len: usize) -> $iter { + $iter::new(len) + } + } + + // We write our own Debug impl so that we get things like PatternID(5) + // instead of PatternID(SmallIndex(5)). + impl core::fmt::Debug for $name { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_tuple(stringify!($name)).field(&self.as_u32()).finish() + } + } + + impl core::ops::Index<$name> for [T] { + type Output = T; + + #[inline] + fn index(&self, index: $name) -> &T { + &self[index.as_usize()] + } + } + + impl core::ops::IndexMut<$name> for [T] { + #[inline] + fn index_mut(&mut self, index: $name) -> &mut T { + &mut self[index.as_usize()] + } + } + + #[cfg(feature = "alloc")] + impl core::ops::Index<$name> for Vec { + type Output = T; + + #[inline] + fn index(&self, index: $name) -> &T { + &self[index.as_usize()] + } + } + + #[cfg(feature = "alloc")] + impl core::ops::IndexMut<$name> for Vec { + #[inline] + fn index_mut(&mut self, index: $name) -> &mut T { + &mut self[index.as_usize()] + } + } + + impl From for $name { + fn from(value: u8) -> $name { + $name(SmallIndex::from(value)) + } + } + + impl TryFrom for $name { + type Error = $err; + + fn try_from(value: u16) -> Result<$name, $err> { + SmallIndex::try_from(value).map($name).map_err($err) + } + } + + impl TryFrom for $name { + type Error = $err; + + fn try_from(value: u32) -> Result<$name, $err> { + SmallIndex::try_from(value).map($name).map_err($err) + } + } + + impl TryFrom for $name { + type Error = $err; + + fn try_from(value: u64) -> Result<$name, $err> { + SmallIndex::try_from(value).map($name).map_err($err) + } + } + + impl TryFrom for $name { + type Error = $err; + + fn try_from(value: usize) -> Result<$name, $err> { + SmallIndex::try_from(value).map($name).map_err($err) + } + } + + #[cfg(test)] + impl quickcheck::Arbitrary for $name { + fn arbitrary(gen: &mut quickcheck::Gen) -> $name { + $name(SmallIndex::arbitrary(gen)) + } + } + + /// This error occurs when a value could not be constructed. + /// + /// This occurs when given an integer exceeding the maximum allowed + /// value. + /// + /// When the `std` feature is enabled, this implements the `Error` + /// trait. + #[derive(Clone, Debug, Eq, PartialEq)] + pub struct $err(SmallIndexError); + + impl $err { + /// Returns the value that could not be converted to an ID. + pub fn attempted(&self) -> u64 { + self.0.attempted() + } + } + + #[cfg(feature = "std")] + impl std::error::Error for $err {} + + impl core::fmt::Display for $err { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!( + f, + "failed to create {} from {:?}, which exceeds {:?}", + stringify!($name), + self.attempted(), + $name::MAX, + ) + } + } + + #[derive(Clone, Debug)] + pub(crate) struct $iter(SmallIndexIter); + + impl $iter { + fn new(len: usize) -> $iter { + assert!( + len <= $name::LIMIT, + "cannot create iterator for {} when number of \ + elements exceed {:?}", + stringify!($name), + $name::LIMIT, + ); + $iter(SmallIndexIter { rng: 0..len }) + } + } + + impl Iterator for $iter { + type Item = $name; + + fn next(&mut self) -> Option<$name> { + self.0.next().map($name) + } + } + + /// An iterator adapter that is like std::iter::Enumerate, but attaches + /// small index values instead. It requires `ExactSizeIterator`. At + /// construction, it ensures that the index of each element in the + /// iterator is representable in the corresponding small index type. + #[derive(Clone, Debug)] + pub(crate) struct $withiter { + it: I, + ids: $iter, + } + + impl $withiter { + fn new(it: I) -> $withiter { + let ids = $name::iter(it.len()); + $withiter { it, ids } + } + } + + impl Iterator for $withiter { + type Item = ($name, I::Item); + + fn next(&mut self) -> Option<($name, I::Item)> { + let item = self.it.next()?; + // Number of elements in this iterator must match, according + // to contract of ExactSizeIterator. + let id = self.ids.next().unwrap(); + Some((id, item)) + } + } + }; +} + +/// The identifier of a regex pattern, represented by a [`SmallIndex`]. +/// +/// The identifier for a pattern corresponds to its relative position among +/// other patterns in a single finite state machine. Namely, when building +/// a multi-pattern regex engine, one must supply a sequence of patterns to +/// match. The position (starting at 0) of each pattern in that sequence +/// represents its identifier. This identifier is in turn used to identify and +/// report matches of that pattern in various APIs. +/// +/// See the [`SmallIndex`] type for more information about what it means for +/// a pattern ID to be a "small index." +/// +/// Note that this type is defined in the +/// [`util::primitives`](crate::util::primitives) module, but it is also +/// re-exported at the crate root due to how common it is. +#[derive(Clone, Copy, Default, Eq, Hash, PartialEq, PartialOrd, Ord)] +#[repr(transparent)] +pub struct PatternID(SmallIndex); + +/// The identifier of a finite automaton state, represented by a +/// [`SmallIndex`]. +/// +/// Most regex engines in this crate are built on top of finite automata. Each +/// state in a finite automaton defines transitions from its state to another. +/// Those transitions point to other states via their identifiers, i.e., a +/// `StateID`. Since finite automata tend to contain many transitions, it is +/// much more memory efficient to define state IDs as small indices. +/// +/// See the [`SmallIndex`] type for more information about what it means for +/// a state ID to be a "small index." +#[derive(Clone, Copy, Default, Eq, Hash, PartialEq, PartialOrd, Ord)] +#[repr(transparent)] +pub struct StateID(SmallIndex); + +index_type_impls!(PatternID, PatternIDError, PatternIDIter, WithPatternIDIter); +index_type_impls!(StateID, StateIDError, StateIDIter, WithStateIDIter); + +/// A utility trait that defines a couple of adapters for making it convenient +/// to access indices as "small index" types. We require ExactSizeIterator so +/// that iterator construction can do a single check to make sure the index of +/// each element is representable by its small index type. +pub(crate) trait IteratorIndexExt: Iterator { + fn with_pattern_ids(self) -> WithPatternIDIter + where + Self: Sized + ExactSizeIterator, + { + WithPatternIDIter::new(self) + } + + fn with_state_ids(self) -> WithStateIDIter + where + Self: Sized + ExactSizeIterator, + { + WithStateIDIter::new(self) + } +} + +impl IteratorIndexExt for I {} diff --git a/vendor/regex-automata/src/util/search.rs b/vendor/regex-automata/src/util/search.rs new file mode 100644 index 00000000000000..3ece11d155411e --- /dev/null +++ b/vendor/regex-automata/src/util/search.rs @@ -0,0 +1,1988 @@ +/*! +Types and routines that support the search APIs of most regex engines. + +This sub-module isn't exposed directly, but rather, its contents are exported +at the crate root due to the universality of most of the types and routines in +this module. +*/ + +use core::ops::{Range, RangeBounds}; + +use crate::util::{escape::DebugByte, primitives::PatternID, utf8}; + +/// The parameters for a regex search including the haystack to search. +/// +/// It turns out that regex searches have a few parameters, and in most cases, +/// those parameters have defaults that work in the vast majority of cases. +/// This `Input` type exists to make that common case seamless while also +/// providing an avenue for changing the parameters of a search. In particular, +/// this type enables doing so without a combinatorial explosion of different +/// methods and/or superfluous parameters in the common cases. +/// +/// An `Input` permits configuring the following things: +/// +/// * Search only a substring of a haystack, while taking the broader context +/// into account for resolving look-around assertions. +/// * Indicating whether to search for all patterns in a regex, or to +/// only search for one pattern in particular. +/// * Whether to perform an anchored on unanchored search. +/// * Whether to report a match as early as possible. +/// +/// All of these parameters, except for the haystack, have sensible default +/// values. This means that the minimal search configuration is simply a call +/// to [`Input::new`] with your haystack. Setting any other parameter is +/// optional. +/// +/// Moreover, for any `H` that implements `AsRef<[u8]>`, there exists a +/// `From for Input` implementation. This is useful because many of the +/// search APIs in this crate accept an `Into`. This means you can +/// provide string or byte strings to these routines directly, and they'll +/// automatically get converted into an `Input` for you. +/// +/// The lifetime parameter `'h` refers to the lifetime of the haystack. +/// +/// # Organization +/// +/// The API of `Input` is split into a few different parts: +/// +/// * A builder-like API that transforms a `Input` by value. Examples: +/// [`Input::span`] and [`Input::anchored`]. +/// * A setter API that permits mutating parameters in place. Examples: +/// [`Input::set_span`] and [`Input::set_anchored`]. +/// * A getter API that permits retrieving any of the search parameters. +/// Examples: [`Input::get_span`] and [`Input::get_anchored`]. +/// * A few convenience getter routines that don't conform to the above naming +/// pattern due to how common they are. Examples: [`Input::haystack`], +/// [`Input::start`] and [`Input::end`]. +/// * Miscellaneous predicates and other helper routines that are useful +/// in some contexts. Examples: [`Input::is_char_boundary`]. +/// +/// A `Input` exposes so much because it is meant to be used by both callers of +/// regex engines _and_ implementors of regex engines. A constraining factor is +/// that regex engines should accept a `&Input` as its lowest level API, which +/// means that implementors should only use the "getter" APIs of a `Input`. +/// +/// # Valid bounds and search termination +/// +/// An `Input` permits setting the bounds of a search via either +/// [`Input::span`] or [`Input::range`]. The bounds set must be valid, or +/// else a panic will occur. Bounds are valid if and only if: +/// +/// * The bounds represent a valid range into the input's haystack. +/// * **or** the end bound is a valid ending bound for the haystack *and* +/// the start bound is exactly one greater than the start bound. +/// +/// In the latter case, [`Input::is_done`] will return true and indicates any +/// search receiving such an input should immediately return with no match. +/// +/// Note that while `Input` is used for reverse searches in this crate, the +/// `Input::is_done` predicate assumes a forward search. Because unsigned +/// offsets are used internally, there is no way to tell from only the offsets +/// whether a reverse search is done or not. +/// +/// # Regex engine support +/// +/// Any regex engine accepting an `Input` must support at least the following +/// things: +/// +/// * Searching a `&[u8]` for matches. +/// * Searching a substring of `&[u8]` for a match, such that any match +/// reported must appear entirely within that substring. +/// * For a forwards search, a match should never be reported when +/// [`Input::is_done`] returns true. (For reverse searches, termination should +/// be handled outside of `Input`.) +/// +/// Supporting other aspects of an `Input` are optional, but regex engines +/// should handle aspects they don't support gracefully. How this is done is +/// generally up to the regex engine. This crate generally treats unsupported +/// anchored modes as an error to report for example, but for simplicity, in +/// the meta regex engine, trying to search with an invalid pattern ID just +/// results in no match being reported. +#[derive(Clone)] +pub struct Input<'h> { + haystack: &'h [u8], + span: Span, + anchored: Anchored, + earliest: bool, +} + +impl<'h> Input<'h> { + /// Create a new search configuration for the given haystack. + #[inline] + pub fn new>(haystack: &'h H) -> Input<'h> { + // Perform only one call to `haystack.as_ref()` to protect from incorrect + // implementations that return different values from multiple calls. + // This is important because there's code that relies on `span` not being + // out of bounds with respect to the stored `haystack`. + let haystack = haystack.as_ref(); + Input { + haystack, + span: Span { start: 0, end: haystack.len() }, + anchored: Anchored::No, + earliest: false, + } + } + + /// Set the span for this search. + /// + /// This routine does not panic if the span given is not a valid range for + /// this search's haystack. If this search is run with an invalid range, + /// then the most likely outcome is that the actual search execution will + /// panic. + /// + /// This routine is generic over how a span is provided. While + /// a [`Span`] may be given directly, one may also provide a + /// `std::ops::Range`. To provide anything supported by range + /// syntax, use the [`Input::range`] method. + /// + /// The default span is the entire haystack. + /// + /// Note that [`Input::range`] overrides this method and vice versa. + /// + /// # Panics + /// + /// This panics if the given span does not correspond to valid bounds in + /// the haystack or the termination of a search. + /// + /// # Example + /// + /// This example shows how the span of the search can impact whether a + /// match is reported or not. This is particularly relevant for look-around + /// operators, which might take things outside of the span into account + /// when determining whether they match. + /// + /// ``` + /// # if cfg!(miri) { return Ok(()); } // miri takes too long + /// use regex_automata::{ + /// nfa::thompson::pikevm::PikeVM, + /// Match, Input, + /// }; + /// + /// // Look for 'at', but as a distinct word. + /// let re = PikeVM::new(r"\bat\b")?; + /// let mut cache = re.create_cache(); + /// let mut caps = re.create_captures(); + /// + /// // Our haystack contains 'at', but not as a distinct word. + /// let haystack = "batter"; + /// + /// // A standard search finds nothing, as expected. + /// let input = Input::new(haystack); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(None, caps.get_match()); + /// + /// // But if we wanted to search starting at position '1', we might + /// // slice the haystack. If we do this, it's impossible for the \b + /// // anchors to take the surrounding context into account! And thus, + /// // a match is produced. + /// let input = Input::new(&haystack[1..3]); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(Some(Match::must(0, 0..2)), caps.get_match()); + /// + /// // But if we specify the span of the search instead of slicing the + /// // haystack, then the regex engine can "see" outside of the span + /// // and resolve the anchors correctly. + /// let input = Input::new(haystack).span(1..3); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(None, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// This may seem a little ham-fisted, but this scenario tends to come up + /// if some other regex engine found the match span and now you need to + /// re-process that span to look for capturing groups. (e.g., Run a faster + /// DFA first, find a match, then run the PikeVM on just the match span to + /// resolve capturing groups.) In order to implement that sort of logic + /// correctly, you need to set the span on the search instead of slicing + /// the haystack directly. + /// + /// The other advantage of using this routine to specify the bounds of the + /// search is that the match offsets are still reported in terms of the + /// original haystack. For example, the second search in the example above + /// reported a match at position `0`, even though `at` starts at offset + /// `1` because we sliced the haystack. + #[inline] + pub fn span>(mut self, span: S) -> Input<'h> { + self.set_span(span); + self + } + + /// Like `Input::span`, but accepts any range instead. + /// + /// This routine does not panic if the range given is not a valid range for + /// this search's haystack. If this search is run with an invalid range, + /// then the most likely outcome is that the actual search execution will + /// panic. + /// + /// The default range is the entire haystack. + /// + /// Note that [`Input::span`] overrides this method and vice versa. + /// + /// # Panics + /// + /// This routine will panic if the given range could not be converted + /// to a valid [`Range`]. For example, this would panic when given + /// `0..=usize::MAX` since it cannot be represented using a half-open + /// interval in terms of `usize`. + /// + /// This also panics if the given range does not correspond to valid bounds + /// in the haystack or the termination of a search. + /// + /// # Example + /// + /// ``` + /// use regex_automata::Input; + /// + /// let input = Input::new("foobar"); + /// assert_eq!(0..6, input.get_range()); + /// + /// let input = Input::new("foobar").range(2..=4); + /// assert_eq!(2..5, input.get_range()); + /// ``` + #[inline] + pub fn range>(mut self, range: R) -> Input<'h> { + self.set_range(range); + self + } + + /// Sets the anchor mode of a search. + /// + /// When a search is anchored (so that's [`Anchored::Yes`] or + /// [`Anchored::Pattern`]), a match must begin at the start of a search. + /// When a search is not anchored (that's [`Anchored::No`]), regex engines + /// will behave as if the pattern started with a `(?s-u:.)*?`. This prefix + /// permits a match to appear anywhere. + /// + /// By default, the anchored mode is [`Anchored::No`]. + /// + /// **WARNING:** this is subtly different than using a `^` at the start of + /// your regex. A `^` forces a regex to match exclusively at the start of + /// a haystack, regardless of where you begin your search. In contrast, + /// anchoring a search will allow your regex to match anywhere in your + /// haystack, but the match must start at the beginning of a search. + /// + /// For example, consider the haystack `aba` and the following searches: + /// + /// 1. The regex `^a` is compiled with `Anchored::No` and searches `aba` + /// starting at position `2`. Since `^` requires the match to start at + /// the beginning of the haystack and `2 > 0`, no match is found. + /// 2. The regex `a` is compiled with `Anchored::Yes` and searches `aba` + /// starting at position `2`. This reports a match at `[2, 3]` since + /// the match starts where the search started. Since there is no `^`, + /// there is no requirement for the match to start at the beginning of + /// the haystack. + /// 3. The regex `a` is compiled with `Anchored::Yes` and searches `aba` + /// starting at position `1`. Since `b` corresponds to position `1` and + /// since the search is anchored, it finds no match. While the regex + /// matches at other positions, configuring the search to be anchored + /// requires that it only report a match that begins at the same offset + /// as the beginning of the search. + /// 4. The regex `a` is compiled with `Anchored::No` and searches `aba` + /// starting at position `1`. Since the search is not anchored and + /// the regex does not start with `^`, the search executes as if there + /// is a `(?s:.)*?` prefix that permits it to match anywhere. Thus, it + /// reports a match at `[2, 3]`. + /// + /// Note that the [`Anchored::Pattern`] mode is like `Anchored::Yes`, + /// except it only reports matches for a particular pattern. + /// + /// # Example + /// + /// This demonstrates the differences between an anchored search and + /// a pattern that begins with `^` (as described in the above warning + /// message). + /// + /// ``` + /// use regex_automata::{ + /// nfa::thompson::pikevm::PikeVM, + /// Anchored, Match, Input, + /// }; + /// + /// let haystack = "aba"; + /// + /// let re = PikeVM::new(r"^a")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let input = Input::new(haystack).span(2..3).anchored(Anchored::No); + /// re.search(&mut cache, &input, &mut caps); + /// // No match is found because 2 is not the beginning of the haystack, + /// // which is what ^ requires. + /// assert_eq!(None, caps.get_match()); + /// + /// let re = PikeVM::new(r"a")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let input = Input::new(haystack).span(2..3).anchored(Anchored::Yes); + /// re.search(&mut cache, &input, &mut caps); + /// // An anchored search can still match anywhere in the haystack, it just + /// // must begin at the start of the search which is '2' in this case. + /// assert_eq!(Some(Match::must(0, 2..3)), caps.get_match()); + /// + /// let re = PikeVM::new(r"a")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let input = Input::new(haystack).span(1..3).anchored(Anchored::Yes); + /// re.search(&mut cache, &input, &mut caps); + /// // No match is found since we start searching at offset 1 which + /// // corresponds to 'b'. Since there is no '(?s:.)*?' prefix, no match + /// // is found. + /// assert_eq!(None, caps.get_match()); + /// + /// let re = PikeVM::new(r"a")?; + /// let (mut cache, mut caps) = (re.create_cache(), re.create_captures()); + /// let input = Input::new(haystack).span(1..3).anchored(Anchored::No); + /// re.search(&mut cache, &input, &mut caps); + /// // Since anchored=no, an implicit '(?s:.)*?' prefix was added to the + /// // pattern. Even though the search starts at 'b', the 'match anything' + /// // prefix allows the search to match 'a'. + /// let expected = Some(Match::must(0, 2..3)); + /// assert_eq!(expected, caps.get_match()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn anchored(mut self, mode: Anchored) -> Input<'h> { + self.set_anchored(mode); + self + } + + /// Whether to execute an "earliest" search or not. + /// + /// When running a non-overlapping search, an "earliest" search will return + /// the match location as early as possible. For example, given a pattern + /// of `foo[0-9]+` and a haystack of `foo12345`, a normal leftmost search + /// will return `foo12345` as a match. But an "earliest" search for regex + /// engines that support "earliest" semantics will return `foo1` as a + /// match, since as soon as the first digit following `foo` is seen, it is + /// known to have found a match. + /// + /// Note that "earliest" semantics generally depend on the regex engine. + /// Different regex engines may determine there is a match at different + /// points. So there is no guarantee that "earliest" matches will always + /// return the same offsets for all regex engines. The "earliest" notion + /// is really about when the particular regex engine determines there is + /// a match rather than a consistent semantic unto itself. This is often + /// useful for implementing "did a match occur or not" predicates, but + /// sometimes the offset is useful as well. + /// + /// This is disabled by default. + /// + /// # Example + /// + /// This example shows the difference between "earliest" searching and + /// normal searching. + /// + /// ``` + /// use regex_automata::{nfa::thompson::pikevm::PikeVM, Match, Input}; + /// + /// let re = PikeVM::new(r"foo[0-9]+")?; + /// let mut cache = re.create_cache(); + /// let mut caps = re.create_captures(); + /// + /// // A normal search implements greediness like you expect. + /// let input = Input::new("foo12345"); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(Some(Match::must(0, 0..8)), caps.get_match()); + /// + /// // When 'earliest' is enabled and the regex engine supports + /// // it, the search will bail once it knows a match has been + /// // found. + /// let input = Input::new("foo12345").earliest(true); + /// re.search(&mut cache, &input, &mut caps); + /// assert_eq!(Some(Match::must(0, 0..4)), caps.get_match()); + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn earliest(mut self, yes: bool) -> Input<'h> { + self.set_earliest(yes); + self + } + + /// Set the span for this search configuration. + /// + /// This is like the [`Input::span`] method, except this mutates the + /// span in place. + /// + /// This routine is generic over how a span is provided. While + /// a [`Span`] may be given directly, one may also provide a + /// `std::ops::Range`. + /// + /// # Panics + /// + /// This panics if the given span does not correspond to valid bounds in + /// the haystack or the termination of a search. + /// + /// # Example + /// + /// ``` + /// use regex_automata::Input; + /// + /// let mut input = Input::new("foobar"); + /// assert_eq!(0..6, input.get_range()); + /// input.set_span(2..4); + /// assert_eq!(2..4, input.get_range()); + /// ``` + #[inline] + pub fn set_span>(&mut self, span: S) { + let span = span.into(); + assert!( + span.end <= self.haystack.len() + && span.start <= span.end.wrapping_add(1), + "invalid span {:?} for haystack of length {}", + span, + self.haystack.len(), + ); + self.span = span; + } + + /// Set the span for this search configuration given any range. + /// + /// This is like the [`Input::range`] method, except this mutates the + /// span in place. + /// + /// This routine does not panic if the range given is not a valid range for + /// this search's haystack. If this search is run with an invalid range, + /// then the most likely outcome is that the actual search execution will + /// panic. + /// + /// # Panics + /// + /// This routine will panic if the given range could not be converted + /// to a valid [`Range`]. For example, this would panic when given + /// `0..=usize::MAX` since it cannot be represented using a half-open + /// interval in terms of `usize`. + /// + /// This also panics if the given span does not correspond to valid bounds + /// in the haystack or the termination of a search. + /// + /// # Example + /// + /// ``` + /// use regex_automata::Input; + /// + /// let mut input = Input::new("foobar"); + /// assert_eq!(0..6, input.get_range()); + /// input.set_range(2..=4); + /// assert_eq!(2..5, input.get_range()); + /// ``` + #[inline] + pub fn set_range>(&mut self, range: R) { + use core::ops::Bound; + + // It's a little weird to convert ranges into spans, and then spans + // back into ranges when we actually slice the haystack. Because + // of that process, we always represent everything as a half-open + // internal. Therefore, handling things like m..=n is a little awkward. + let start = match range.start_bound() { + Bound::Included(&i) => i, + // Can this case ever happen? Range syntax doesn't support it... + Bound::Excluded(&i) => i.checked_add(1).unwrap(), + Bound::Unbounded => 0, + }; + let end = match range.end_bound() { + Bound::Included(&i) => i.checked_add(1).unwrap(), + Bound::Excluded(&i) => i, + Bound::Unbounded => self.haystack().len(), + }; + self.set_span(Span { start, end }); + } + + /// Set the starting offset for the span for this search configuration. + /// + /// This is a convenience routine for only mutating the start of a span + /// without having to set the entire span. + /// + /// # Panics + /// + /// This panics if the span resulting from the new start position does not + /// correspond to valid bounds in the haystack or the termination of a + /// search. + /// + /// # Example + /// + /// ``` + /// use regex_automata::Input; + /// + /// let mut input = Input::new("foobar"); + /// assert_eq!(0..6, input.get_range()); + /// input.set_start(5); + /// assert_eq!(5..6, input.get_range()); + /// ``` + #[inline] + pub fn set_start(&mut self, start: usize) { + self.set_span(Span { start, ..self.get_span() }); + } + + /// Set the ending offset for the span for this search configuration. + /// + /// This is a convenience routine for only mutating the end of a span + /// without having to set the entire span. + /// + /// # Panics + /// + /// This panics if the span resulting from the new end position does not + /// correspond to valid bounds in the haystack or the termination of a + /// search. + /// + /// # Example + /// + /// ``` + /// use regex_automata::Input; + /// + /// let mut input = Input::new("foobar"); + /// assert_eq!(0..6, input.get_range()); + /// input.set_end(5); + /// assert_eq!(0..5, input.get_range()); + /// ``` + #[inline] + pub fn set_end(&mut self, end: usize) { + self.set_span(Span { end, ..self.get_span() }); + } + + /// Set the anchor mode of a search. + /// + /// This is like [`Input::anchored`], except it mutates the search + /// configuration in place. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{Anchored, Input, PatternID}; + /// + /// let mut input = Input::new("foobar"); + /// assert_eq!(Anchored::No, input.get_anchored()); + /// + /// let pid = PatternID::must(5); + /// input.set_anchored(Anchored::Pattern(pid)); + /// assert_eq!(Anchored::Pattern(pid), input.get_anchored()); + /// ``` + #[inline] + pub fn set_anchored(&mut self, mode: Anchored) { + self.anchored = mode; + } + + /// Set whether the search should execute in "earliest" mode or not. + /// + /// This is like [`Input::earliest`], except it mutates the search + /// configuration in place. + /// + /// # Example + /// + /// ``` + /// use regex_automata::Input; + /// + /// let mut input = Input::new("foobar"); + /// assert!(!input.get_earliest()); + /// input.set_earliest(true); + /// assert!(input.get_earliest()); + /// ``` + #[inline] + pub fn set_earliest(&mut self, yes: bool) { + self.earliest = yes; + } + + /// Return a borrow of the underlying haystack as a slice of bytes. + /// + /// # Example + /// + /// ``` + /// use regex_automata::Input; + /// + /// let input = Input::new("foobar"); + /// assert_eq!(b"foobar", input.haystack()); + /// ``` + #[inline] + pub fn haystack(&self) -> &'h [u8] { + self.haystack + } + + /// Return the start position of this search. + /// + /// This is a convenience routine for `search.get_span().start()`. + /// + /// When [`Input::is_done`] is `false`, this is guaranteed to return + /// an offset that is less than or equal to [`Input::end`]. Otherwise, + /// the offset is one greater than [`Input::end`]. + /// + /// # Example + /// + /// ``` + /// use regex_automata::Input; + /// + /// let input = Input::new("foobar"); + /// assert_eq!(0, input.start()); + /// + /// let input = Input::new("foobar").span(2..4); + /// assert_eq!(2, input.start()); + /// ``` + #[inline] + pub fn start(&self) -> usize { + self.get_span().start + } + + /// Return the end position of this search. + /// + /// This is a convenience routine for `search.get_span().end()`. + /// + /// This is guaranteed to return an offset that is a valid exclusive end + /// bound for this input's haystack. + /// + /// # Example + /// + /// ``` + /// use regex_automata::Input; + /// + /// let input = Input::new("foobar"); + /// assert_eq!(6, input.end()); + /// + /// let input = Input::new("foobar").span(2..4); + /// assert_eq!(4, input.end()); + /// ``` + #[inline] + pub fn end(&self) -> usize { + self.get_span().end + } + + /// Return the span for this search configuration. + /// + /// If one was not explicitly set, then the span corresponds to the entire + /// range of the haystack. + /// + /// When [`Input::is_done`] is `false`, the span returned is guaranteed + /// to correspond to valid bounds for this input's haystack. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{Input, Span}; + /// + /// let input = Input::new("foobar"); + /// assert_eq!(Span { start: 0, end: 6 }, input.get_span()); + /// ``` + #[inline] + pub fn get_span(&self) -> Span { + self.span + } + + /// Return the span as a range for this search configuration. + /// + /// If one was not explicitly set, then the span corresponds to the entire + /// range of the haystack. + /// + /// When [`Input::is_done`] is `false`, the range returned is guaranteed + /// to correspond to valid bounds for this input's haystack. + /// + /// # Example + /// + /// ``` + /// use regex_automata::Input; + /// + /// let input = Input::new("foobar"); + /// assert_eq!(0..6, input.get_range()); + /// ``` + #[inline] + pub fn get_range(&self) -> Range { + self.get_span().range() + } + + /// Return the anchored mode for this search configuration. + /// + /// If no anchored mode was set, then it defaults to [`Anchored::No`]. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{Anchored, Input, PatternID}; + /// + /// let mut input = Input::new("foobar"); + /// assert_eq!(Anchored::No, input.get_anchored()); + /// + /// let pid = PatternID::must(5); + /// input.set_anchored(Anchored::Pattern(pid)); + /// assert_eq!(Anchored::Pattern(pid), input.get_anchored()); + /// ``` + #[inline] + pub fn get_anchored(&self) -> Anchored { + self.anchored + } + + /// Return whether this search should execute in "earliest" mode. + /// + /// # Example + /// + /// ``` + /// use regex_automata::Input; + /// + /// let input = Input::new("foobar"); + /// assert!(!input.get_earliest()); + /// ``` + #[inline] + pub fn get_earliest(&self) -> bool { + self.earliest + } + + /// Return true if and only if this search can never return any other + /// matches. + /// + /// This occurs when the start position of this search is greater than the + /// end position of the search. + /// + /// # Example + /// + /// ``` + /// use regex_automata::Input; + /// + /// let mut input = Input::new("foobar"); + /// assert!(!input.is_done()); + /// input.set_start(6); + /// assert!(!input.is_done()); + /// input.set_start(7); + /// assert!(input.is_done()); + /// ``` + #[inline] + pub fn is_done(&self) -> bool { + self.get_span().start > self.get_span().end + } + + /// Returns true if and only if the given offset in this search's haystack + /// falls on a valid UTF-8 encoded codepoint boundary. + /// + /// If the haystack is not valid UTF-8, then the behavior of this routine + /// is unspecified. + /// + /// # Example + /// + /// This shows where codepoint boundaries do and don't exist in valid + /// UTF-8. + /// + /// ``` + /// use regex_automata::Input; + /// + /// let input = Input::new("☃"); + /// assert!(input.is_char_boundary(0)); + /// assert!(!input.is_char_boundary(1)); + /// assert!(!input.is_char_boundary(2)); + /// assert!(input.is_char_boundary(3)); + /// assert!(!input.is_char_boundary(4)); + /// ``` + #[inline] + pub fn is_char_boundary(&self, offset: usize) -> bool { + utf8::is_boundary(self.haystack(), offset) + } +} + +impl<'h> core::fmt::Debug for Input<'h> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + use crate::util::escape::DebugHaystack; + + f.debug_struct("Input") + .field("haystack", &DebugHaystack(self.haystack())) + .field("span", &self.span) + .field("anchored", &self.anchored) + .field("earliest", &self.earliest) + .finish() + } +} + +impl<'h, H: ?Sized + AsRef<[u8]>> From<&'h H> for Input<'h> { + fn from(haystack: &'h H) -> Input<'h> { + Input::new(haystack) + } +} + +/// A representation of a span reported by a regex engine. +/// +/// A span corresponds to the starting and ending _byte offsets_ of a +/// contiguous region of bytes. The starting offset is inclusive while the +/// ending offset is exclusive. That is, a span is a half-open interval. +/// +/// A span is used to report the offsets of a match, but it is also used to +/// convey which region of a haystack should be searched via routines like +/// [`Input::span`]. +/// +/// This is basically equivalent to a `std::ops::Range`, except this +/// type implements `Copy` which makes it more ergonomic to use in the context +/// of this crate. Like a range, this implements `Index` for `[u8]` and `str`, +/// and `IndexMut` for `[u8]`. For convenience, this also impls `From`, +/// which means things like `Span::from(5..10)` work. +#[derive(Clone, Copy, Eq, Hash, PartialEq)] +pub struct Span { + /// The start offset of the span, inclusive. + pub start: usize, + /// The end offset of the span, exclusive. + pub end: usize, +} + +impl Span { + /// Returns this span as a range. + #[inline] + pub fn range(&self) -> Range { + Range::from(*self) + } + + /// Returns true when this span is empty. That is, when `start >= end`. + #[inline] + pub fn is_empty(&self) -> bool { + self.start >= self.end + } + + /// Returns the length of this span. + /// + /// This returns `0` in precisely the cases that `is_empty` returns `true`. + #[inline] + pub fn len(&self) -> usize { + self.end.saturating_sub(self.start) + } + + /// Returns true when the given offset is contained within this span. + /// + /// Note that an empty span contains no offsets and will always return + /// false. + #[inline] + pub fn contains(&self, offset: usize) -> bool { + !self.is_empty() && self.start <= offset && offset <= self.end + } + + /// Returns a new span with `offset` added to this span's `start` and `end` + /// values. + #[inline] + pub fn offset(&self, offset: usize) -> Span { + Span { start: self.start + offset, end: self.end + offset } + } +} + +impl core::fmt::Debug for Span { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "{}..{}", self.start, self.end) + } +} + +impl core::ops::Index for [u8] { + type Output = [u8]; + + #[inline] + fn index(&self, index: Span) -> &[u8] { + &self[index.range()] + } +} + +impl core::ops::IndexMut for [u8] { + #[inline] + fn index_mut(&mut self, index: Span) -> &mut [u8] { + &mut self[index.range()] + } +} + +impl core::ops::Index for str { + type Output = str; + + #[inline] + fn index(&self, index: Span) -> &str { + &self[index.range()] + } +} + +impl From> for Span { + #[inline] + fn from(range: Range) -> Span { + Span { start: range.start, end: range.end } + } +} + +impl From for Range { + #[inline] + fn from(span: Span) -> Range { + Range { start: span.start, end: span.end } + } +} + +impl PartialEq> for Span { + #[inline] + fn eq(&self, range: &Range) -> bool { + self.start == range.start && self.end == range.end + } +} + +impl PartialEq for Range { + #[inline] + fn eq(&self, span: &Span) -> bool { + self.start == span.start && self.end == span.end + } +} + +/// A representation of "half" of a match reported by a DFA. +/// +/// This is called a "half" match because it only includes the end location (or +/// start location for a reverse search) of a match. This corresponds to the +/// information that a single DFA scan can report. Getting the other half of +/// the match requires a second scan with a reversed DFA. +/// +/// A half match also includes the pattern that matched. The pattern is +/// identified by an ID, which corresponds to its position (starting from `0`) +/// relative to other patterns used to construct the corresponding DFA. If only +/// a single pattern is provided to the DFA, then all matches are guaranteed to +/// have a pattern ID of `0`. +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +pub struct HalfMatch { + /// The pattern ID. + pattern: PatternID, + /// The offset of the match. + /// + /// For forward searches, the offset is exclusive. For reverse searches, + /// the offset is inclusive. + offset: usize, +} + +impl HalfMatch { + /// Create a new half match from a pattern ID and a byte offset. + #[inline] + pub fn new(pattern: PatternID, offset: usize) -> HalfMatch { + HalfMatch { pattern, offset } + } + + /// Create a new half match from a pattern ID and a byte offset. + /// + /// This is like [`HalfMatch::new`], but accepts a `usize` instead of a + /// [`PatternID`]. This panics if the given `usize` is not representable + /// as a `PatternID`. + #[inline] + pub fn must(pattern: usize, offset: usize) -> HalfMatch { + HalfMatch::new(PatternID::new(pattern).unwrap(), offset) + } + + /// Returns the ID of the pattern that matched. + /// + /// The ID of a pattern is derived from the position in which it was + /// originally inserted into the corresponding DFA. The first pattern has + /// identifier `0`, and each subsequent pattern is `1`, `2` and so on. + #[inline] + pub fn pattern(&self) -> PatternID { + self.pattern + } + + /// The position of the match. + /// + /// If this match was produced by a forward search, then the offset is + /// exclusive. If this match was produced by a reverse search, then the + /// offset is inclusive. + #[inline] + pub fn offset(&self) -> usize { + self.offset + } +} + +/// A representation of a match reported by a regex engine. +/// +/// A match has two essential pieces of information: the [`PatternID`] that +/// matches, and the [`Span`] of the match in a haystack. +/// +/// The pattern is identified by an ID, which corresponds to its position +/// (starting from `0`) relative to other patterns used to construct the +/// corresponding regex engine. If only a single pattern is provided, then all +/// matches are guaranteed to have a pattern ID of `0`. +/// +/// Every match reported by a regex engine guarantees that its span has its +/// start offset as less than or equal to its end offset. +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +pub struct Match { + /// The pattern ID. + pattern: PatternID, + /// The underlying match span. + span: Span, +} + +impl Match { + /// Create a new match from a pattern ID and a span. + /// + /// This constructor is generic over how a span is provided. While + /// a [`Span`] may be given directly, one may also provide a + /// `std::ops::Range`. + /// + /// # Panics + /// + /// This panics if `end < start`. + /// + /// # Example + /// + /// This shows how to create a match for the first pattern in a regex + /// object using convenient range syntax. + /// + /// ``` + /// use regex_automata::{Match, PatternID}; + /// + /// let m = Match::new(PatternID::ZERO, 5..10); + /// assert_eq!(0, m.pattern().as_usize()); + /// assert_eq!(5, m.start()); + /// assert_eq!(10, m.end()); + /// ``` + #[inline] + pub fn new>(pattern: PatternID, span: S) -> Match { + let span: Span = span.into(); + assert!(span.start <= span.end, "invalid match span"); + Match { pattern, span } + } + + /// Create a new match from a pattern ID and a byte offset span. + /// + /// This constructor is generic over how a span is provided. While + /// a [`Span`] may be given directly, one may also provide a + /// `std::ops::Range`. + /// + /// This is like [`Match::new`], but accepts a `usize` instead of a + /// [`PatternID`]. This panics if the given `usize` is not representable + /// as a `PatternID`. + /// + /// # Panics + /// + /// This panics if `end < start` or if `pattern > PatternID::MAX`. + /// + /// # Example + /// + /// This shows how to create a match for the third pattern in a regex + /// object using convenient range syntax. + /// + /// ``` + /// use regex_automata::Match; + /// + /// let m = Match::must(3, 5..10); + /// assert_eq!(3, m.pattern().as_usize()); + /// assert_eq!(5, m.start()); + /// assert_eq!(10, m.end()); + /// ``` + #[inline] + pub fn must>(pattern: usize, span: S) -> Match { + Match::new(PatternID::must(pattern), span) + } + + /// Returns the ID of the pattern that matched. + /// + /// The ID of a pattern is derived from the position in which it was + /// originally inserted into the corresponding regex engine. The first + /// pattern has identifier `0`, and each subsequent pattern is `1`, `2` and + /// so on. + #[inline] + pub fn pattern(&self) -> PatternID { + self.pattern + } + + /// The starting position of the match. + /// + /// This is a convenience routine for `Match::span().start`. + #[inline] + pub fn start(&self) -> usize { + self.span().start + } + + /// The ending position of the match. + /// + /// This is a convenience routine for `Match::span().end`. + #[inline] + pub fn end(&self) -> usize { + self.span().end + } + + /// Returns the match span as a range. + /// + /// This is a convenience routine for `Match::span().range()`. + #[inline] + pub fn range(&self) -> core::ops::Range { + self.span().range() + } + + /// Returns the span for this match. + #[inline] + pub fn span(&self) -> Span { + self.span + } + + /// Returns true when the span in this match is empty. + /// + /// An empty match can only be returned when the regex itself can match + /// the empty string. + #[inline] + pub fn is_empty(&self) -> bool { + self.span().is_empty() + } + + /// Returns the length of this match. + /// + /// This returns `0` in precisely the cases that `is_empty` returns `true`. + #[inline] + pub fn len(&self) -> usize { + self.span().len() + } +} + +/// A set of `PatternID`s. +/// +/// A set of pattern identifiers is useful for recording which patterns have +/// matched a particular haystack. A pattern set _only_ includes pattern +/// identifiers. It does not include offset information. +/// +/// # Example +/// +/// This shows basic usage of a set. +/// +/// ``` +/// use regex_automata::{PatternID, PatternSet}; +/// +/// let pid1 = PatternID::must(5); +/// let pid2 = PatternID::must(8); +/// // Create a new empty set. +/// let mut set = PatternSet::new(10); +/// // Insert pattern IDs. +/// set.insert(pid1); +/// set.insert(pid2); +/// // Test membership. +/// assert!(set.contains(pid1)); +/// assert!(set.contains(pid2)); +/// // Get all members. +/// assert_eq!( +/// vec![5, 8], +/// set.iter().map(|p| p.as_usize()).collect::>(), +/// ); +/// // Clear the set. +/// set.clear(); +/// // Test that it is indeed empty. +/// assert!(set.is_empty()); +/// ``` +#[cfg(feature = "alloc")] +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct PatternSet { + /// The number of patterns set to 'true' in this set. + len: usize, + /// A map from PatternID to boolean of whether a pattern matches or not. + /// + /// This should probably be a bitset, but it's probably unlikely to matter + /// much in practice. + /// + /// The main downside of this representation (and similarly for a bitset) + /// is that iteration scales with the capacity of the set instead of + /// the length of the set. This doesn't seem likely to be a problem in + /// practice. + /// + /// Another alternative is to just use a 'SparseSet' for this. It does use + /// more memory (quite a bit more), but that seems fine I think compared + /// to the memory being used by the regex engine. The real hiccup with + /// it is that it yields pattern IDs in the order they were inserted. + /// Which is actually kind of nice, but at the time of writing, pattern + /// IDs are yielded in ascending order in the regex crate RegexSet API. + /// If we did change to 'SparseSet', we could provide an additional + /// 'iter_match_order' iterator, but keep the ascending order one for + /// compatibility. + which: alloc::boxed::Box<[bool]>, +} + +#[cfg(feature = "alloc")] +impl PatternSet { + /// Create a new set of pattern identifiers with the given capacity. + /// + /// The given capacity typically corresponds to (at least) the number of + /// patterns in a compiled regex object. + /// + /// # Panics + /// + /// This panics if the given capacity exceeds [`PatternID::LIMIT`]. This is + /// impossible if you use the `pattern_len()` method as defined on any of + /// the regex engines in this crate. Namely, a regex will fail to build by + /// returning an error if the number of patterns given to it exceeds the + /// limit. Therefore, the number of patterns in a valid regex is always + /// a correct capacity to provide here. + pub fn new(capacity: usize) -> PatternSet { + assert!( + capacity <= PatternID::LIMIT, + "pattern set capacity exceeds limit of {}", + PatternID::LIMIT, + ); + PatternSet { + len: 0, + which: alloc::vec![false; capacity].into_boxed_slice(), + } + } + + /// Clear this set such that it contains no pattern IDs. + pub fn clear(&mut self) { + self.len = 0; + for matched in self.which.iter_mut() { + *matched = false; + } + } + + /// Return true if and only if the given pattern identifier is in this set. + pub fn contains(&self, pid: PatternID) -> bool { + pid.as_usize() < self.capacity() && self.which[pid] + } + + /// Insert the given pattern identifier into this set and return `true` if + /// the given pattern ID was not previously in this set. + /// + /// If the pattern identifier is already in this set, then this is a no-op. + /// + /// Use [`PatternSet::try_insert`] for a fallible version of this routine. + /// + /// # Panics + /// + /// This panics if this pattern set has insufficient capacity to + /// store the given pattern ID. + pub fn insert(&mut self, pid: PatternID) -> bool { + self.try_insert(pid) + .expect("PatternSet should have sufficient capacity") + } + + /// Insert the given pattern identifier into this set and return `true` if + /// the given pattern ID was not previously in this set. + /// + /// If the pattern identifier is already in this set, then this is a no-op. + /// + /// # Errors + /// + /// This returns an error if this pattern set has insufficient capacity to + /// store the given pattern ID. + pub fn try_insert( + &mut self, + pid: PatternID, + ) -> Result { + if pid.as_usize() >= self.capacity() { + return Err(PatternSetInsertError { + attempted: pid, + capacity: self.capacity(), + }); + } + if self.which[pid] { + return Ok(false); + } + self.len += 1; + self.which[pid] = true; + Ok(true) + } + + /* + // This is currently commented out because it is unused and it is unclear + // whether it's useful or not. What's the harm in having it? When, if + // we ever wanted to change our representation to a 'SparseSet', then + // supporting this method would be a bit tricky. So in order to keep some + // API evolution flexibility, we leave it out for now. + + /// Remove the given pattern identifier from this set. + /// + /// If the pattern identifier was not previously in this set, then this + /// does not change the set and returns `false`. + /// + /// # Panics + /// + /// This panics if `pid` exceeds the capacity of this set. + pub fn remove(&mut self, pid: PatternID) -> bool { + if !self.which[pid] { + return false; + } + self.len -= 1; + self.which[pid] = false; + true + } + */ + + /// Return true if and only if this set has no pattern identifiers in it. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Return true if and only if this set has the maximum number of pattern + /// identifiers in the set. This occurs precisely when `PatternSet::len() + /// == PatternSet::capacity()`. + /// + /// This particular property is useful to test because it may allow one to + /// stop a search earlier than you might otherwise. Namely, if a search is + /// only reporting which patterns match a haystack and if you know all of + /// the patterns match at a given point, then there's no new information + /// that can be learned by continuing the search. (Because a pattern set + /// does not keep track of offset information.) + pub fn is_full(&self) -> bool { + self.len() == self.capacity() + } + + /// Returns the total number of pattern identifiers in this set. + pub fn len(&self) -> usize { + self.len + } + + /// Returns the total number of pattern identifiers that may be stored + /// in this set. + /// + /// This is guaranteed to be less than or equal to [`PatternID::LIMIT`]. + /// + /// Typically, the capacity of a pattern set matches the number of patterns + /// in a regex object with which you are searching. + pub fn capacity(&self) -> usize { + self.which.len() + } + + /// Returns an iterator over all pattern identifiers in this set. + /// + /// The iterator yields pattern identifiers in ascending order, starting + /// at zero. + pub fn iter(&self) -> PatternSetIter<'_> { + PatternSetIter { it: self.which.iter().enumerate() } + } +} + +/// An error that occurs when a `PatternID` failed to insert into a +/// `PatternSet`. +/// +/// An insert fails when the given `PatternID` exceeds the configured capacity +/// of the `PatternSet`. +/// +/// This error is created by the [`PatternSet::try_insert`] routine. +#[cfg(feature = "alloc")] +#[derive(Clone, Debug)] +pub struct PatternSetInsertError { + attempted: PatternID, + capacity: usize, +} + +#[cfg(feature = "std")] +impl std::error::Error for PatternSetInsertError {} + +#[cfg(feature = "alloc")] +impl core::fmt::Display for PatternSetInsertError { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!( + f, + "failed to insert pattern ID {} into pattern set \ + with insufficient capacity of {}", + self.attempted.as_usize(), + self.capacity, + ) + } +} + +/// An iterator over all pattern identifiers in a [`PatternSet`]. +/// +/// The lifetime parameter `'a` refers to the lifetime of the pattern set being +/// iterated over. +/// +/// This iterator is created by the [`PatternSet::iter`] method. +#[cfg(feature = "alloc")] +#[derive(Clone, Debug)] +pub struct PatternSetIter<'a> { + it: core::iter::Enumerate>, +} + +#[cfg(feature = "alloc")] +impl<'a> Iterator for PatternSetIter<'a> { + type Item = PatternID; + + fn next(&mut self) -> Option { + while let Some((index, &yes)) = self.it.next() { + if yes { + // Only valid 'PatternID' values can be inserted into the set + // and construction of the set panics if the capacity would + // permit storing invalid pattern IDs. Thus, 'yes' is only true + // precisely when 'index' corresponds to a valid 'PatternID'. + return Some(PatternID::new_unchecked(index)); + } + } + None + } + + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +#[cfg(feature = "alloc")] +impl<'a> DoubleEndedIterator for PatternSetIter<'a> { + fn next_back(&mut self) -> Option { + while let Some((index, &yes)) = self.it.next_back() { + if yes { + // Only valid 'PatternID' values can be inserted into the set + // and construction of the set panics if the capacity would + // permit storing invalid pattern IDs. Thus, 'yes' is only true + // precisely when 'index' corresponds to a valid 'PatternID'. + return Some(PatternID::new_unchecked(index)); + } + } + None + } +} + +/// The type of anchored search to perform. +/// +/// This is *almost* a boolean option. That is, you can either do an unanchored +/// search for any pattern in a regex, or you can do an anchored search for any +/// pattern in a regex. +/// +/// A third option exists that, assuming the regex engine supports it, permits +/// you to do an anchored search for a specific pattern. +/// +/// Note that there is no way to run an unanchored search for a specific +/// pattern. If you need that, you'll need to build separate regexes for each +/// pattern. +/// +/// # Errors +/// +/// If a regex engine does not support the anchored mode selected, then the +/// regex engine will return an error. While any non-trivial regex engine +/// should support at least one of the available anchored modes, there is no +/// singular mode that is guaranteed to be universally supported. Some regex +/// engines might only support unanchored searches (DFAs compiled without +/// anchored starting states) and some regex engines might only support +/// anchored searches (like the one-pass DFA). +/// +/// The specific error returned is a [`MatchError`] with a +/// [`MatchErrorKind::UnsupportedAnchored`] kind. The kind includes the +/// `Anchored` value given that is unsupported. +/// +/// Note that regex engines should report "no match" if, for example, an +/// `Anchored::Pattern` is provided with an invalid pattern ID _but_ where +/// anchored searches for a specific pattern are supported. This is smooths out +/// behavior such that it's possible to guarantee that an error never occurs +/// based on how the regex engine is configured. All regex engines in this +/// crate report "no match" when searching for an invalid pattern ID, but where +/// searching for a valid pattern ID is otherwise supported. +/// +/// # Example +/// +/// This example shows how to use the various `Anchored` modes to run a +/// search. We use the [`PikeVM`](crate::nfa::thompson::pikevm::PikeVM) +/// because it supports all modes unconditionally. Some regex engines, like +/// the [`onepass::DFA`](crate::dfa::onepass::DFA) cannot support unanchored +/// searches. +/// +/// ``` +/// # if cfg!(miri) { return Ok(()); } // miri takes too long +/// use regex_automata::{ +/// nfa::thompson::pikevm::PikeVM, +/// Anchored, Input, Match, PatternID, +/// }; +/// +/// let re = PikeVM::new_many(&[ +/// r"Mrs. \w+", +/// r"Miss \w+", +/// r"Mr. \w+", +/// r"Ms. \w+", +/// ])?; +/// let mut cache = re.create_cache(); +/// let hay = "Hello Mr. Springsteen!"; +/// +/// // The default is to do an unanchored search. +/// assert_eq!(Some(Match::must(2, 6..21)), re.find(&mut cache, hay)); +/// // Explicitly ask for an unanchored search. Same as above. +/// let input = Input::new(hay).anchored(Anchored::No); +/// assert_eq!(Some(Match::must(2, 6..21)), re.find(&mut cache, hay)); +/// +/// // Now try an anchored search. Since the match doesn't start at the +/// // beginning of the haystack, no match is found! +/// let input = Input::new(hay).anchored(Anchored::Yes); +/// assert_eq!(None, re.find(&mut cache, input)); +/// +/// // We can try an anchored search again, but move the location of where +/// // we start the search. Note that the offsets reported are still in +/// // terms of the overall haystack and not relative to where we started +/// // the search. +/// let input = Input::new(hay).anchored(Anchored::Yes).range(6..); +/// assert_eq!(Some(Match::must(2, 6..21)), re.find(&mut cache, input)); +/// +/// // Now try an anchored search for a specific pattern. We specifically +/// // choose a pattern that we know doesn't match to prove that the search +/// // only looks for the pattern we provide. +/// let input = Input::new(hay) +/// .anchored(Anchored::Pattern(PatternID::must(1))) +/// .range(6..); +/// assert_eq!(None, re.find(&mut cache, input)); +/// +/// // But if we switch it to the pattern that we know matches, then we find +/// // the match. +/// let input = Input::new(hay) +/// .anchored(Anchored::Pattern(PatternID::must(2))) +/// .range(6..); +/// assert_eq!(Some(Match::must(2, 6..21)), re.find(&mut cache, input)); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Anchored { + /// Run an unanchored search. This means a match may occur anywhere at or + /// after the start position of the search. + /// + /// This search can return a match for any pattern in the regex. + No, + /// Run an anchored search. This means that a match must begin at the + /// start position of the search. + /// + /// This search can return a match for any pattern in the regex. + Yes, + /// Run an anchored search for a specific pattern. This means that a match + /// must be for the given pattern and must begin at the start position of + /// the search. + Pattern(PatternID), +} + +impl Anchored { + /// Returns true if and only if this anchor mode corresponds to any kind of + /// anchored search. + /// + /// # Example + /// + /// This examples shows that both `Anchored::Yes` and `Anchored::Pattern` + /// are considered anchored searches. + /// + /// ``` + /// use regex_automata::{Anchored, PatternID}; + /// + /// assert!(!Anchored::No.is_anchored()); + /// assert!(Anchored::Yes.is_anchored()); + /// assert!(Anchored::Pattern(PatternID::ZERO).is_anchored()); + /// ``` + #[inline] + pub fn is_anchored(&self) -> bool { + matches!(*self, Anchored::Yes | Anchored::Pattern(_)) + } + + /// Returns the pattern ID associated with this configuration if it is an + /// anchored search for a specific pattern. Otherwise `None` is returned. + /// + /// # Example + /// + /// ``` + /// use regex_automata::{Anchored, PatternID}; + /// + /// assert_eq!(None, Anchored::No.pattern()); + /// assert_eq!(None, Anchored::Yes.pattern()); + /// + /// let pid = PatternID::must(5); + /// assert_eq!(Some(pid), Anchored::Pattern(pid).pattern()); + /// ``` + #[inline] + pub fn pattern(&self) -> Option { + match *self { + Anchored::Pattern(pid) => Some(pid), + _ => None, + } + } +} + +/// The kind of match semantics to use for a regex pattern. +/// +/// The default match kind is `LeftmostFirst`, and this corresponds to the +/// match semantics used by most backtracking engines, such as Perl. +/// +/// # Leftmost first or "preference order" match semantics +/// +/// Leftmost-first semantics determine which match to report when there are +/// multiple paths through a regex that match at the same position. The tie is +/// essentially broken by how a backtracker would behave. For example, consider +/// running the regex `foofoofoo|foofoo|foo` on the haystack `foofoo`. In this +/// case, both the `foofoo` and `foo` branches match at position `0`. So should +/// the end of the match be `3` or `6`? +/// +/// A backtracker will conceptually work by trying `foofoofoo` and failing. +/// Then it will try `foofoo`, find the match and stop there. Thus, the +/// leftmost-first match position is `6`. This is called "leftmost-first" or +/// "preference order" because the order of the branches as written in the +/// regex pattern is what determines how to break the tie. +/// +/// (Note that leftmost-longest match semantics, which break ties by always +/// taking the longest matching string, are not currently supported by this +/// crate. These match semantics tend to be found in POSIX regex engines.) +/// +/// This example shows how leftmost-first semantics work, and how it even +/// applies to multi-pattern regexes: +/// +/// ``` +/// use regex_automata::{ +/// nfa::thompson::pikevm::PikeVM, +/// Match, +/// }; +/// +/// let re = PikeVM::new_many(&[ +/// r"foofoofoo", +/// r"foofoo", +/// r"foo", +/// ])?; +/// let mut cache = re.create_cache(); +/// let got: Vec = re.find_iter(&mut cache, "foofoo").collect(); +/// let expected = vec![Match::must(1, 0..6)]; +/// assert_eq!(expected, got); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// # All matches +/// +/// The `All` match semantics report any and all matches, and generally will +/// attempt to match as much as possible. It doesn't respect any sort of match +/// priority at all, so things like non-greedy matching don't work in this +/// mode. +/// +/// The fact that non-greedy matching doesn't work generally makes most forms +/// of unanchored non-overlapping searches have unintuitive behavior. Namely, +/// unanchored searches behave as if there is a `(?s-u:.)*?` prefix at the +/// beginning of the pattern, which is specifically non-greedy. Since it will +/// be treated as greedy in `All` match semantics, this generally means that +/// it will first attempt to consume all of the haystack and is likely to wind +/// up skipping matches. +/// +/// Generally speaking, `All` should only be used in two circumstances: +/// +/// * When running an anchored search and there is a desire to match as much as +/// possible. For example, when building a reverse regex matcher to find the +/// start of a match after finding the end. In this case, the reverse search +/// is anchored to the end of the match found by the forward search. +/// * When running overlapping searches. Since `All` encodes all possible +/// matches, this is generally what you want for an overlapping search. If you +/// try to use leftmost-first in an overlapping search, it is likely to produce +/// counter-intuitive results since leftmost-first specifically excludes some +/// matches from its underlying finite state machine. +/// +/// This example demonstrates the counter-intuitive behavior of `All` semantics +/// when using a standard leftmost unanchored search: +/// +/// ``` +/// use regex_automata::{ +/// nfa::thompson::pikevm::PikeVM, +/// Match, MatchKind, +/// }; +/// +/// let re = PikeVM::builder() +/// .configure(PikeVM::config().match_kind(MatchKind::All)) +/// .build("foo")?; +/// let hay = "first foo second foo wat"; +/// let mut cache = re.create_cache(); +/// let got: Vec = re.find_iter(&mut cache, hay).collect(); +/// // Notice that it completely skips the first 'foo'! +/// let expected = vec![Match::must(0, 17..20)]; +/// assert_eq!(expected, got); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// This second example shows how `All` semantics are useful for an overlapping +/// search. Note that we use lower level lazy DFA APIs here since the NFA +/// engines only currently support a very limited form of overlapping search. +/// +/// ``` +/// use regex_automata::{ +/// hybrid::dfa::{DFA, OverlappingState}, +/// HalfMatch, Input, MatchKind, +/// }; +/// +/// let re = DFA::builder() +/// // If we didn't set 'All' semantics here, then the regex would only +/// // match 'foo' at offset 3 and nothing else. Why? Because the state +/// // machine implements preference order and knows that the 'foofoo' and +/// // 'foofoofoo' branches can never match since 'foo' will always match +/// // when they match and take priority. +/// .configure(DFA::config().match_kind(MatchKind::All)) +/// .build(r"foo|foofoo|foofoofoo")?; +/// let mut cache = re.create_cache(); +/// let mut state = OverlappingState::start(); +/// let input = Input::new("foofoofoo"); +/// let mut got = vec![]; +/// loop { +/// re.try_search_overlapping_fwd(&mut cache, &input, &mut state)?; +/// let m = match state.get_match() { +/// None => break, +/// Some(m) => m, +/// }; +/// got.push(m); +/// } +/// let expected = vec![ +/// HalfMatch::must(0, 3), +/// HalfMatch::must(0, 6), +/// HalfMatch::must(0, 9), +/// ]; +/// assert_eq!(expected, got); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[non_exhaustive] +#[derive(Clone, Copy, Default, Debug, Eq, PartialEq)] +pub enum MatchKind { + /// Report all possible matches. + All, + /// Report only the leftmost matches. When multiple leftmost matches exist, + /// report the match corresponding to the part of the regex that appears + /// first in the syntax. + #[default] + LeftmostFirst, + // There is prior art in RE2 that shows that we should be able to add + // LeftmostLongest too. The tricky part of it is supporting ungreedy + // repetitions. Instead of treating all NFA states as having equivalent + // priority (as in 'All') or treating all NFA states as having distinct + // priority based on order (as in 'LeftmostFirst'), we instead group NFA + // states into sets, and treat members of each set as having equivalent + // priority, but having greater priority than all following members + // of different sets. + // + // However, it's not clear whether it's really worth adding this. After + // all, leftmost-longest can be emulated when using literals by using + // leftmost-first and sorting the literals by length in descending order. + // However, this won't work for arbitrary regexes. e.g., `\w|\w\w` will + // always match `a` in `ab` when using leftmost-first, but leftmost-longest + // would match `ab`. +} + +impl MatchKind { + #[cfg(feature = "alloc")] + pub(crate) fn continue_past_first_match(&self) -> bool { + *self == MatchKind::All + } +} + +/// An error indicating that a search stopped before reporting whether a +/// match exists or not. +/// +/// To be very clear, this error type implies that one cannot assume that no +/// matches occur, since the search stopped before completing. That is, if +/// you're looking for information about where a search determined that no +/// match can occur, then this error type does *not* give you that. (Indeed, at +/// the time of writing, if you need such a thing, you have to write your own +/// search routine.) +/// +/// Normally, when one searches for something, the response is either an +/// affirmative "it was found at this location" or a negative "not found at +/// all." However, in some cases, a regex engine can be configured to stop its +/// search before concluding whether a match exists or not. When this happens, +/// it may be important for the caller to know why the regex engine gave up and +/// where in the input it gave up at. This error type exposes the 'why' and the +/// 'where.' +/// +/// For example, the DFAs provided by this library generally cannot correctly +/// implement Unicode word boundaries. Instead, they provide an option to +/// eagerly support them on ASCII text (since Unicode word boundaries are +/// equivalent to ASCII word boundaries when searching ASCII text), but will +/// "give up" if a non-ASCII byte is seen. In such cases, one is usually +/// required to either report the failure to the caller (unergonomic) or +/// otherwise fall back to some other regex engine (ergonomic, but potentially +/// costly). +/// +/// More generally, some regex engines offer the ability for callers to specify +/// certain bytes that will trigger the regex engine to automatically quit if +/// they are seen. +/// +/// Still yet, there may be other reasons for a failed match. For example, +/// the hybrid DFA provided by this crate can be configured to give up if it +/// believes that it is not efficient. This in turn permits callers to choose a +/// different regex engine. +/// +/// (Note that DFAs are configured by default to never quit or give up in this +/// fashion. For example, by default, a DFA will fail to build if the regex +/// pattern contains a Unicode word boundary. One needs to opt into the "quit" +/// behavior via options, like +/// [`hybrid::dfa::Config::unicode_word_boundary`](crate::hybrid::dfa::Config::unicode_word_boundary).) +/// +/// There are a couple other ways a search +/// can fail. For example, when using the +/// [`BoundedBacktracker`](crate::nfa::thompson::backtrack::BoundedBacktracker) +/// with a haystack that is too long, or trying to run an unanchored search +/// with a [one-pass DFA](crate::dfa::onepass). +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct MatchError( + #[cfg(feature = "alloc")] alloc::boxed::Box, + #[cfg(not(feature = "alloc"))] MatchErrorKind, +); + +impl MatchError { + /// Create a new error value with the given kind. + /// + /// This is a more verbose version of the kind-specific constructors, + /// e.g., `MatchError::quit`. + pub fn new(kind: MatchErrorKind) -> MatchError { + #[cfg(feature = "alloc")] + { + MatchError(alloc::boxed::Box::new(kind)) + } + #[cfg(not(feature = "alloc"))] + { + MatchError(kind) + } + } + + /// Returns a reference to the underlying error kind. + pub fn kind(&self) -> &MatchErrorKind { + &self.0 + } + + /// Create a new "quit" error. The given `byte` corresponds to the value + /// that tripped a search's quit condition, and `offset` corresponds to the + /// location in the haystack at which the search quit. + /// + /// This is the same as calling `MatchError::new` with a + /// [`MatchErrorKind::Quit`] kind. + pub fn quit(byte: u8, offset: usize) -> MatchError { + MatchError::new(MatchErrorKind::Quit { byte, offset }) + } + + /// Create a new "gave up" error. The given `offset` corresponds to the + /// location in the haystack at which the search gave up. + /// + /// This is the same as calling `MatchError::new` with a + /// [`MatchErrorKind::GaveUp`] kind. + pub fn gave_up(offset: usize) -> MatchError { + MatchError::new(MatchErrorKind::GaveUp { offset }) + } + + /// Create a new "haystack too long" error. The given `len` corresponds to + /// the length of the haystack that was problematic. + /// + /// This is the same as calling `MatchError::new` with a + /// [`MatchErrorKind::HaystackTooLong`] kind. + pub fn haystack_too_long(len: usize) -> MatchError { + MatchError::new(MatchErrorKind::HaystackTooLong { len }) + } + + /// Create a new "unsupported anchored" error. This occurs when the caller + /// requests a search with an anchor mode that is not supported by the + /// regex engine. + /// + /// This is the same as calling `MatchError::new` with a + /// [`MatchErrorKind::UnsupportedAnchored`] kind. + pub fn unsupported_anchored(mode: Anchored) -> MatchError { + MatchError::new(MatchErrorKind::UnsupportedAnchored { mode }) + } +} + +/// The underlying kind of a [`MatchError`]. +/// +/// This is a **non-exhaustive** enum. That means new variants may be added in +/// a semver-compatible release. +#[non_exhaustive] +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum MatchErrorKind { + /// The search saw a "quit" byte at which it was instructed to stop + /// searching. + Quit { + /// The "quit" byte that was observed that caused the search to stop. + byte: u8, + /// The offset at which the quit byte was observed. + offset: usize, + }, + /// The search, based on heuristics, determined that it would be better + /// to stop, typically to provide the caller an opportunity to use an + /// alternative regex engine. + /// + /// Currently, the only way for this to occur is via the lazy DFA and + /// only when it is configured to do so (it will not return this error by + /// default). + GaveUp { + /// The offset at which the search stopped. This corresponds to the + /// position immediately following the last byte scanned. + offset: usize, + }, + /// This error occurs if the haystack given to the regex engine was too + /// long to be searched. This occurs, for example, with regex engines + /// like the bounded backtracker that have a configurable fixed amount of + /// capacity that is tied to the length of the haystack. Anything beyond + /// that configured limit will result in an error at search time. + HaystackTooLong { + /// The length of the haystack that exceeded the limit. + len: usize, + }, + /// An error indicating that a particular type of anchored search was + /// requested, but that the regex engine does not support it. + /// + /// Note that this error should not be returned by a regex engine simply + /// because the pattern ID is invalid (i.e., equal to or exceeds the number + /// of patterns in the regex). In that case, the regex engine should report + /// a non-match. + UnsupportedAnchored { + /// The anchored mode given that is unsupported. + mode: Anchored, + }, +} + +#[cfg(feature = "std")] +impl std::error::Error for MatchError {} + +impl core::fmt::Display for MatchError { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + match *self.kind() { + MatchErrorKind::Quit { byte, offset } => write!( + f, + "quit search after observing byte {:?} at offset {}", + DebugByte(byte), + offset, + ), + MatchErrorKind::GaveUp { offset } => { + write!(f, "gave up searching at offset {offset}") + } + MatchErrorKind::HaystackTooLong { len } => { + write!(f, "haystack of length {len} is too long") + } + MatchErrorKind::UnsupportedAnchored { mode: Anchored::Yes } => { + write!(f, "anchored searches are not supported or enabled") + } + MatchErrorKind::UnsupportedAnchored { mode: Anchored::No } => { + write!(f, "unanchored searches are not supported or enabled") + } + MatchErrorKind::UnsupportedAnchored { + mode: Anchored::Pattern(pid), + } => { + write!( + f, + "anchored searches for a specific pattern ({}) are \ + not supported or enabled", + pid.as_usize(), + ) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // We test that our 'MatchError' type is the size we expect. This isn't an + // API guarantee, but if the size increases, we really want to make sure we + // decide to do that intentionally. So this should be a speed bump. And in + // general, we should not increase the size without a very good reason. + // + // Why? Because low level search APIs return Result<.., MatchError>. When + // MatchError gets bigger, so to does the Result type. + // + // Now, when 'alloc' is enabled, we do box the error, which de-emphasizes + // the importance of keeping a small error type. But without 'alloc', we + // still want things to be small. + #[test] + fn match_error_size() { + let expected_size = if cfg!(feature = "alloc") { + core::mem::size_of::() + } else { + 2 * core::mem::size_of::() + }; + assert_eq!(expected_size, core::mem::size_of::()); + } + + // Same as above, but for the underlying match error kind. + #[cfg(target_pointer_width = "64")] + #[test] + fn match_error_kind_size() { + let expected_size = 2 * core::mem::size_of::(); + assert_eq!(expected_size, core::mem::size_of::()); + } + + #[cfg(target_pointer_width = "32")] + #[test] + fn match_error_kind_size() { + let expected_size = 3 * core::mem::size_of::(); + assert_eq!(expected_size, core::mem::size_of::()); + } + + #[test] + fn incorrect_asref_guard() { + struct Bad(std::cell::Cell); + + impl AsRef<[u8]> for Bad { + fn as_ref(&self) -> &[u8] { + if self.0.replace(false) { + &[] + } else { + &[0; 1000] + } + } + } + + let bad = Bad(std::cell::Cell::new(true)); + let input = Input::new(&bad); + assert!(input.end() <= input.haystack().len()); + } +} diff --git a/vendor/regex-automata/src/util/sparse_set.rs b/vendor/regex-automata/src/util/sparse_set.rs new file mode 100644 index 00000000000000..e30d5b9b7f3fe6 --- /dev/null +++ b/vendor/regex-automata/src/util/sparse_set.rs @@ -0,0 +1,239 @@ +/*! +This module defines a sparse set data structure. Its most interesting +properties are: + +* They preserve insertion order. +* Set membership testing is done in constant time. +* Set insertion is done in constant time. +* Clearing the set is done in constant time. + +The cost for doing this is that the capacity of the set needs to be known up +front, and the elements in the set are limited to state identifiers. + +These sets are principally used when traversing an NFA state graph. This +happens at search time, for example, in the PikeVM. It also happens during DFA +determinization. +*/ + +use alloc::{vec, vec::Vec}; + +use crate::util::primitives::StateID; + +/// A pair of sparse sets. +/// +/// This is useful when one needs to compute NFA epsilon closures from a +/// previous set of states derived from an epsilon closure. One set can be the +/// starting states where as the other set can be the destination states after +/// following the transitions for a particular byte of input. +/// +/// There is no significance to 'set1' or 'set2'. They are both sparse sets of +/// the same size. +/// +/// The members of this struct are exposed so that callers may borrow 'set1' +/// and 'set2' individually without being force to borrow both at the same +/// time. +#[derive(Clone, Debug)] +pub(crate) struct SparseSets { + pub(crate) set1: SparseSet, + pub(crate) set2: SparseSet, +} + +impl SparseSets { + /// Create a new pair of sparse sets where each set has the given capacity. + /// + /// This panics if the capacity given is bigger than `StateID::LIMIT`. + pub(crate) fn new(capacity: usize) -> SparseSets { + SparseSets { + set1: SparseSet::new(capacity), + set2: SparseSet::new(capacity), + } + } + + /// Resizes these sparse sets to have the new capacity given. + /// + /// The sets are automatically cleared. + /// + /// This panics if the capacity given is bigger than `StateID::LIMIT`. + #[inline] + pub(crate) fn resize(&mut self, new_capacity: usize) { + self.set1.resize(new_capacity); + self.set2.resize(new_capacity); + } + + /// Clear both sparse sets. + pub(crate) fn clear(&mut self) { + self.set1.clear(); + self.set2.clear(); + } + + /// Swap set1 with set2. + pub(crate) fn swap(&mut self) { + core::mem::swap(&mut self.set1, &mut self.set2); + } + + /// Returns the memory usage, in bytes, used by this pair of sparse sets. + pub(crate) fn memory_usage(&self) -> usize { + self.set1.memory_usage() + self.set2.memory_usage() + } +} + +/// A sparse set used for representing ordered NFA states. +/// +/// This supports constant time addition and membership testing. Clearing an +/// entire set can also be done in constant time. Iteration yields elements +/// in the order in which they were inserted. +/// +/// The data structure is based on: https://research.swtch.com/sparse +/// Note though that we don't actually use uninitialized memory. We generally +/// reuse sparse sets, so the initial allocation cost is bearable. However, its +/// other properties listed above are extremely useful. +#[derive(Clone)] +pub(crate) struct SparseSet { + /// The number of elements currently in this set. + len: usize, + /// Dense contains the ids in the order in which they were inserted. + dense: Vec, + /// Sparse maps ids to their location in dense. + /// + /// A state ID is in the set if and only if + /// sparse[id] < len && id == dense[sparse[id]]. + /// + /// Note that these are indices into 'dense'. It's a little weird to use + /// StateID here, but we know our length can never exceed the bounds of + /// StateID (enforced by 'resize') and StateID will be at most 4 bytes + /// where as a usize is likely double that in most cases. + sparse: Vec, +} + +impl SparseSet { + /// Create a new sparse set with the given capacity. + /// + /// Sparse sets have a fixed size and they cannot grow. Attempting to + /// insert more distinct elements than the total capacity of the set will + /// result in a panic. + /// + /// This panics if the capacity given is bigger than `StateID::LIMIT`. + #[inline] + pub(crate) fn new(capacity: usize) -> SparseSet { + let mut set = SparseSet { len: 0, dense: vec![], sparse: vec![] }; + set.resize(capacity); + set + } + + /// Resizes this sparse set to have the new capacity given. + /// + /// This set is automatically cleared. + /// + /// This panics if the capacity given is bigger than `StateID::LIMIT`. + #[inline] + pub(crate) fn resize(&mut self, new_capacity: usize) { + assert!( + new_capacity <= StateID::LIMIT, + "sparse set capacity cannot exceed {:?}", + StateID::LIMIT + ); + self.clear(); + self.dense.resize(new_capacity, StateID::ZERO); + self.sparse.resize(new_capacity, StateID::ZERO); + } + + /// Returns the capacity of this set. + /// + /// The capacity represents a fixed limit on the number of distinct + /// elements that are allowed in this set. The capacity cannot be changed. + #[inline] + pub(crate) fn capacity(&self) -> usize { + self.dense.len() + } + + /// Returns the number of elements in this set. + #[inline] + pub(crate) fn len(&self) -> usize { + self.len + } + + /// Returns true if and only if this set is empty. + #[inline] + pub(crate) fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Insert the state ID value into this set and return true if the given + /// state ID was not previously in this set. + /// + /// This operation is idempotent. If the given value is already in this + /// set, then this is a no-op. + /// + /// If more than `capacity` ids are inserted, then this panics. + /// + /// This is marked as inline(always) since the compiler won't inline it + /// otherwise, and it's a fairly hot piece of code in DFA determinization. + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn insert(&mut self, id: StateID) -> bool { + if self.contains(id) { + return false; + } + + let i = self.len(); + assert!( + i < self.capacity(), + "{:?} exceeds capacity of {:?} when inserting {:?}", + i, + self.capacity(), + id, + ); + // OK since i < self.capacity() and self.capacity() is guaranteed to + // be <= StateID::LIMIT. + let index = StateID::new_unchecked(i); + self.dense[index] = id; + self.sparse[id] = index; + self.len += 1; + true + } + + /// Returns true if and only if this set contains the given value. + #[inline] + pub(crate) fn contains(&self, id: StateID) -> bool { + let index = self.sparse[id]; + index.as_usize() < self.len() && self.dense[index] == id + } + + /// Clear this set such that it has no members. + #[inline] + pub(crate) fn clear(&mut self) { + self.len = 0; + } + + #[inline] + pub(crate) fn iter(&self) -> SparseSetIter<'_> { + SparseSetIter(self.dense[..self.len()].iter()) + } + + /// Returns the heap memory usage, in bytes, used by this sparse set. + #[inline] + pub(crate) fn memory_usage(&self) -> usize { + self.dense.len() * StateID::SIZE + self.sparse.len() * StateID::SIZE + } +} + +impl core::fmt::Debug for SparseSet { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let elements: Vec = self.iter().collect(); + f.debug_tuple("SparseSet").field(&elements).finish() + } +} + +/// An iterator over all elements in a sparse set. +/// +/// The lifetime `'a` refers to the lifetime of the set being iterated over. +#[derive(Debug)] +pub(crate) struct SparseSetIter<'a>(core::slice::Iter<'a, StateID>); + +impl<'a> Iterator for SparseSetIter<'a> { + type Item = StateID; + + #[cfg_attr(feature = "perf-inline", inline(always))] + fn next(&mut self) -> Option { + self.0.next().copied() + } +} diff --git a/vendor/regex-automata/src/util/start.rs b/vendor/regex-automata/src/util/start.rs new file mode 100644 index 00000000000000..c34b376a6405b0 --- /dev/null +++ b/vendor/regex-automata/src/util/start.rs @@ -0,0 +1,479 @@ +/*! +Provides helpers for dealing with start state configurations in DFAs. +*/ + +use crate::util::{ + look::LookMatcher, + search::{Anchored, Input}, + wire::{self, DeserializeError, SerializeError}, +}; + +/// The configuration used to determine a DFA's start state for a search. +/// +/// A DFA has a single starting state in the typical textbook description. That +/// is, it corresponds to the set of all starting states for the NFA that built +/// it, along with their epsilon closures. In this crate, however, DFAs have +/// many possible start states due to a few factors: +/// +/// * DFAs support the ability to run either anchored or unanchored searches. +/// Each type of search needs its own start state. For example, an unanchored +/// search requires starting at a state corresponding to a regex with a +/// `(?s-u:.)*?` prefix, which will match through anything. +/// * DFAs also optionally support starting an anchored search for any one +/// specific pattern. Each such pattern requires its own start state. +/// * If a look-behind assertion like `^` or `\b` is used in the regex, then +/// the DFA will need to inspect a single byte immediately before the start of +/// the search to choose the correct start state. +/// +/// Indeed, this configuration precisely encapsulates all of the above factors. +/// The [`Config::anchored`] method sets which kind of anchored search to +/// perform while the [`Config::look_behind`] method provides a way to set +/// the byte that occurs immediately before the start of the search. +/// +/// Generally speaking, this type is only useful when you want to run searches +/// without using an [`Input`]. In particular, an `Input` wants a haystack +/// slice, but callers may not have a contiguous sequence of bytes as a +/// haystack in all cases. This type provides a lower level of control such +/// that callers can provide their own anchored configuration and look-behind +/// byte explicitly. +/// +/// # Example +/// +/// This shows basic usage that permits running a search with a DFA without +/// using the `Input` abstraction. +/// +/// ``` +/// use regex_automata::{ +/// dfa::{Automaton, dense}, +/// util::start, +/// Anchored, +/// }; +/// +/// let dfa = dense::DFA::new(r"(?-u)\b\w+\b")?; +/// let haystack = "quartz"; +/// +/// let config = start::Config::new().anchored(Anchored::Yes); +/// let mut state = dfa.start_state(&config)?; +/// for &b in haystack.as_bytes().iter() { +/// state = dfa.next_state(state, b); +/// } +/// state = dfa.next_eoi_state(state); +/// assert!(dfa.is_match_state(state)); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// This example shows how to correctly run a search that doesn't begin at +/// the start of a haystack. Notice how we set the look-behind byte, and as +/// a result, the `\b` assertion does not match. +/// +/// ``` +/// use regex_automata::{ +/// dfa::{Automaton, dense}, +/// util::start, +/// Anchored, +/// }; +/// +/// let dfa = dense::DFA::new(r"(?-u)\b\w+\b")?; +/// let haystack = "quartz"; +/// +/// let config = start::Config::new() +/// .anchored(Anchored::Yes) +/// .look_behind(Some(b'q')); +/// let mut state = dfa.start_state(&config)?; +/// for &b in haystack.as_bytes().iter().skip(1) { +/// state = dfa.next_state(state, b); +/// } +/// state = dfa.next_eoi_state(state); +/// // No match! +/// assert!(!dfa.is_match_state(state)); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// If we had instead not set a look-behind byte, then the DFA would assume +/// that it was starting at the beginning of the haystack, and thus `\b` should +/// match. This in turn would result in erroneously reporting a match: +/// +/// ``` +/// use regex_automata::{ +/// dfa::{Automaton, dense}, +/// util::start, +/// Anchored, +/// }; +/// +/// let dfa = dense::DFA::new(r"(?-u)\b\w+\b")?; +/// let haystack = "quartz"; +/// +/// // Whoops, forgot the look-behind byte... +/// let config = start::Config::new().anchored(Anchored::Yes); +/// let mut state = dfa.start_state(&config)?; +/// for &b in haystack.as_bytes().iter().skip(1) { +/// state = dfa.next_state(state, b); +/// } +/// state = dfa.next_eoi_state(state); +/// // And now we get a match unexpectedly. +/// assert!(dfa.is_match_state(state)); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct Config { + look_behind: Option, + anchored: Anchored, +} + +impl Config { + /// Create a new default start configuration. + /// + /// The default is an unanchored search that starts at the beginning of the + /// haystack. + pub fn new() -> Config { + Config { anchored: Anchored::No, look_behind: None } + } + + /// A convenience routine for building a start configuration from an + /// [`Input`] for a forward search. + /// + /// This automatically sets the look-behind byte to the byte immediately + /// preceding the start of the search. If the start of the search is at + /// offset `0`, then no look-behind byte is set. + pub fn from_input_forward(input: &Input<'_>) -> Config { + let look_behind = input + .start() + .checked_sub(1) + .and_then(|i| input.haystack().get(i).copied()); + Config { look_behind, anchored: input.get_anchored() } + } + + /// A convenience routine for building a start configuration from an + /// [`Input`] for a reverse search. + /// + /// This automatically sets the look-behind byte to the byte immediately + /// following the end of the search. If the end of the search is at + /// offset `haystack.len()`, then no look-behind byte is set. + pub fn from_input_reverse(input: &Input<'_>) -> Config { + let look_behind = input.haystack().get(input.end()).copied(); + Config { look_behind, anchored: input.get_anchored() } + } + + /// Set the look-behind byte at the start of a search. + /// + /// Unless the search is intended to logically start at the beginning of a + /// haystack, this should _always_ be set to the byte immediately preceding + /// the start of the search. If no look-behind byte is set, then the start + /// configuration will assume it is at the beginning of the haystack. For + /// example, the anchor `^` will match. + /// + /// The default is that no look-behind byte is set. + pub fn look_behind(mut self, byte: Option) -> Config { + self.look_behind = byte; + self + } + + /// Set the anchored mode of a search. + /// + /// The default is an unanchored search. + pub fn anchored(mut self, mode: Anchored) -> Config { + self.anchored = mode; + self + } + + /// Return the look-behind byte in this configuration, if one exists. + pub fn get_look_behind(&self) -> Option { + self.look_behind + } + + /// Return the anchored mode in this configuration. + pub fn get_anchored(&self) -> Anchored { + self.anchored + } +} + +/// A map from every possible byte value to its corresponding starting +/// configuration. +/// +/// This map is used in order to lookup the start configuration for a particular +/// position in a haystack. This start configuration is then used in +/// combination with things like the anchored mode and pattern ID to fully +/// determine the start state. +/// +/// Generally speaking, this map is only used for fully compiled DFAs and lazy +/// DFAs. For NFAs (including the one-pass DFA), the start state is generally +/// selected by virtue of traversing the NFA state graph. DFAs do the same +/// thing, but at build time and not search time. (Well, technically the lazy +/// DFA does it at search time, but it does enough work to cache the full +/// result of the epsilon closure that the NFA engines tend to need to do.) +#[derive(Clone)] +pub(crate) struct StartByteMap { + map: [Start; 256], +} + +impl StartByteMap { + /// Create a new map from byte values to their corresponding starting + /// configurations. The map is determined, in part, by how look-around + /// assertions are matched via the matcher given. + pub(crate) fn new(lookm: &LookMatcher) -> StartByteMap { + let mut map = [Start::NonWordByte; 256]; + map[usize::from(b'\n')] = Start::LineLF; + map[usize::from(b'\r')] = Start::LineCR; + map[usize::from(b'_')] = Start::WordByte; + + let mut byte = b'0'; + while byte <= b'9' { + map[usize::from(byte)] = Start::WordByte; + byte += 1; + } + byte = b'A'; + while byte <= b'Z' { + map[usize::from(byte)] = Start::WordByte; + byte += 1; + } + byte = b'a'; + while byte <= b'z' { + map[usize::from(byte)] = Start::WordByte; + byte += 1; + } + + let lineterm = lookm.get_line_terminator(); + // If our line terminator is normal, then it is already handled by + // the LineLF and LineCR configurations. But if it's weird, then we + // overwrite whatever was there before for that terminator with a + // special configuration. The trick here is that if the terminator + // is, say, a word byte like `a`, then callers seeing this start + // configuration need to account for that and build their DFA state as + // if it *also* came from a word byte. + if lineterm != b'\r' && lineterm != b'\n' { + map[usize::from(lineterm)] = Start::CustomLineTerminator; + } + StartByteMap { map } + } + + /// Return the starting configuration for the given look-behind byte. + /// + /// If no look-behind exists, callers should use `Start::Text`. + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn get(&self, byte: u8) -> Start { + self.map[usize::from(byte)] + } + + /// Deserializes a byte class map from the given slice. If the slice is of + /// insufficient length or otherwise contains an impossible mapping, then + /// an error is returned. Upon success, the number of bytes read along with + /// the map are returned. The number of bytes read is always a multiple of + /// 8. + pub(crate) fn from_bytes( + slice: &[u8], + ) -> Result<(StartByteMap, usize), DeserializeError> { + wire::check_slice_len(slice, 256, "start byte map")?; + let mut map = [Start::NonWordByte; 256]; + for (i, &repr) in slice[..256].iter().enumerate() { + map[i] = match Start::from_usize(usize::from(repr)) { + Some(start) => start, + None => { + return Err(DeserializeError::generic( + "found invalid starting configuration", + )) + } + }; + } + Ok((StartByteMap { map }, 256)) + } + + /// Writes this map to the given byte buffer. if the given buffer is too + /// small, then an error is returned. Upon success, the total number of + /// bytes written is returned. The number of bytes written is guaranteed to + /// be a multiple of 8. + pub(crate) fn write_to( + &self, + dst: &mut [u8], + ) -> Result { + let nwrite = self.write_to_len(); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small("start byte map")); + } + for (i, &start) in self.map.iter().enumerate() { + dst[i] = start.as_u8(); + } + Ok(nwrite) + } + + /// Returns the total number of bytes written by `write_to`. + pub(crate) fn write_to_len(&self) -> usize { + 256 + } +} + +impl core::fmt::Debug for StartByteMap { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + use crate::util::escape::DebugByte; + + write!(f, "StartByteMap{{")?; + for byte in 0..=255 { + if byte > 0 { + write!(f, ", ")?; + } + let start = self.map[usize::from(byte)]; + write!(f, "{:?} => {:?}", DebugByte(byte), start)?; + } + write!(f, "}}")?; + Ok(()) + } +} + +/// Represents the six possible starting configurations of a DFA search. +/// +/// The starting configuration is determined by inspecting the beginning +/// of the haystack (up to 1 byte). Ultimately, this along with a pattern ID +/// (if specified) and the type of search (anchored or not) is what selects the +/// start state to use in a DFA. +/// +/// As one example, if a DFA only supports unanchored searches and does not +/// support anchored searches for each pattern, then it will have at most 6 +/// distinct start states. (Some start states may be reused if determinization +/// can determine that they will be equivalent.) If the DFA supports both +/// anchored and unanchored searches, then it will have a maximum of 12 +/// distinct start states. Finally, if the DFA also supports anchored searches +/// for each pattern, then it can have up to `12 + (N * 6)` start states, where +/// `N` is the number of patterns. +/// +/// Handling each of these starting configurations in the context of DFA +/// determinization can be *quite* tricky and subtle. But the code is small +/// and can be found at `crate::util::determinize::set_lookbehind_from_start`. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub(crate) enum Start { + /// This occurs when the starting position is not any of the ones below. + NonWordByte = 0, + /// This occurs when the byte immediately preceding the start of the search + /// is an ASCII word byte. + WordByte = 1, + /// This occurs when the starting position of the search corresponds to the + /// beginning of the haystack. + Text = 2, + /// This occurs when the byte immediately preceding the start of the search + /// is a line terminator. Specifically, `\n`. + LineLF = 3, + /// This occurs when the byte immediately preceding the start of the search + /// is a line terminator. Specifically, `\r`. + LineCR = 4, + /// This occurs when a custom line terminator has been set via a + /// `LookMatcher`, and when that line terminator is neither a `\r` or a + /// `\n`. + /// + /// If the custom line terminator is a word byte, then this start + /// configuration is still selected. DFAs that implement word boundary + /// assertions will likely need to check whether the custom line terminator + /// is a word byte, in which case, it should behave as if the byte + /// satisfies `\b` in addition to multi-line anchors. + CustomLineTerminator = 5, +} + +impl Start { + /// Return the starting state corresponding to the given integer. If no + /// starting state exists for the given integer, then None is returned. + pub(crate) fn from_usize(n: usize) -> Option { + match n { + 0 => Some(Start::NonWordByte), + 1 => Some(Start::WordByte), + 2 => Some(Start::Text), + 3 => Some(Start::LineLF), + 4 => Some(Start::LineCR), + 5 => Some(Start::CustomLineTerminator), + _ => None, + } + } + + /// Returns the total number of starting state configurations. + pub(crate) fn len() -> usize { + 6 + } + + /// Return this starting configuration as `u8` integer. It is guaranteed to + /// be less than `Start::len()`. + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn as_u8(&self) -> u8 { + // AFAIK, 'as' is the only way to zero-cost convert an int enum to an + // actual int. + *self as u8 + } + + /// Return this starting configuration as a `usize` integer. It is + /// guaranteed to be less than `Start::len()`. + #[cfg_attr(feature = "perf-inline", inline(always))] + pub(crate) fn as_usize(&self) -> usize { + usize::from(self.as_u8()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn start_fwd_done_range() { + let smap = StartByteMap::new(&LookMatcher::default()); + let input = Input::new("").range(1..0); + let config = Config::from_input_forward(&input); + let start = + config.get_look_behind().map_or(Start::Text, |b| smap.get(b)); + assert_eq!(Start::Text, start); + } + + #[test] + fn start_rev_done_range() { + let smap = StartByteMap::new(&LookMatcher::default()); + let input = Input::new("").range(1..0); + let config = Config::from_input_reverse(&input); + let start = + config.get_look_behind().map_or(Start::Text, |b| smap.get(b)); + assert_eq!(Start::Text, start); + } + + #[test] + fn start_fwd() { + let f = |haystack, start, end| { + let smap = StartByteMap::new(&LookMatcher::default()); + let input = Input::new(haystack).range(start..end); + let config = Config::from_input_forward(&input); + let start = + config.get_look_behind().map_or(Start::Text, |b| smap.get(b)); + start + }; + + assert_eq!(Start::Text, f("", 0, 0)); + assert_eq!(Start::Text, f("abc", 0, 3)); + assert_eq!(Start::Text, f("\nabc", 0, 3)); + + assert_eq!(Start::LineLF, f("\nabc", 1, 3)); + + assert_eq!(Start::LineCR, f("\rabc", 1, 3)); + + assert_eq!(Start::WordByte, f("abc", 1, 3)); + + assert_eq!(Start::NonWordByte, f(" abc", 1, 3)); + } + + #[test] + fn start_rev() { + let f = |haystack, start, end| { + let smap = StartByteMap::new(&LookMatcher::default()); + let input = Input::new(haystack).range(start..end); + let config = Config::from_input_reverse(&input); + let start = + config.get_look_behind().map_or(Start::Text, |b| smap.get(b)); + start + }; + + assert_eq!(Start::Text, f("", 0, 0)); + assert_eq!(Start::Text, f("abc", 0, 3)); + assert_eq!(Start::Text, f("abc\n", 0, 4)); + + assert_eq!(Start::LineLF, f("abc\nz", 0, 3)); + + assert_eq!(Start::LineCR, f("abc\rz", 0, 3)); + + assert_eq!(Start::WordByte, f("abc", 0, 2)); + + assert_eq!(Start::NonWordByte, f("abc ", 0, 3)); + } +} diff --git a/vendor/regex-automata/src/util/syntax.rs b/vendor/regex-automata/src/util/syntax.rs new file mode 100644 index 00000000000000..3be07bc807581b --- /dev/null +++ b/vendor/regex-automata/src/util/syntax.rs @@ -0,0 +1,482 @@ +/*! +Utilities for dealing with the syntax of a regular expression. + +This module currently only exposes a [`Config`] type that +itself represents a wrapper around the configuration for a +[`regex-syntax::ParserBuilder`](regex_syntax::ParserBuilder). The purpose of +this wrapper is to make configuring syntax options very similar to how other +configuration is done throughout this crate. Namely, instead of duplicating +syntax options across every builder (of which there are many), we instead +create small config objects like this one that can be passed around and +composed. +*/ + +use alloc::{vec, vec::Vec}; + +use regex_syntax::{ + ast, + hir::{self, Hir}, + Error, ParserBuilder, +}; + +/// A convenience routine for parsing a pattern into an HIR value with the +/// default configuration. +/// +/// # Example +/// +/// This shows how to parse a pattern into an HIR value: +/// +/// ``` +/// use regex_automata::util::syntax; +/// +/// let hir = syntax::parse(r"([a-z]+)|([0-9]+)")?; +/// assert_eq!(Some(1), hir.properties().static_explicit_captures_len()); +/// +/// # Ok::<(), Box>(()) +/// ``` +pub fn parse(pattern: &str) -> Result { + parse_with(pattern, &Config::default()) +} + +/// A convenience routine for parsing many patterns into HIR value with the +/// default configuration. +/// +/// # Example +/// +/// This shows how to parse many patterns into an corresponding HIR values: +/// +/// ``` +/// use { +/// regex_automata::util::syntax, +/// regex_syntax::hir::Properties, +/// }; +/// +/// let hirs = syntax::parse_many(&[ +/// r"([a-z]+)|([0-9]+)", +/// r"foo(A-Z]+)bar", +/// ])?; +/// let props = Properties::union(hirs.iter().map(|h| h.properties())); +/// assert_eq!(Some(1), props.static_explicit_captures_len()); +/// +/// # Ok::<(), Box>(()) +/// ``` +pub fn parse_many>(patterns: &[P]) -> Result, Error> { + parse_many_with(patterns, &Config::default()) +} + +/// A convenience routine for parsing a pattern into an HIR value using a +/// `Config`. +/// +/// # Example +/// +/// This shows how to parse a pattern into an HIR value with a non-default +/// configuration: +/// +/// ``` +/// use regex_automata::util::syntax; +/// +/// let hir = syntax::parse_with( +/// r"^[a-z]+$", +/// &syntax::Config::new().multi_line(true).crlf(true), +/// )?; +/// assert!(hir.properties().look_set().contains_anchor_crlf()); +/// +/// # Ok::<(), Box>(()) +/// ``` +pub fn parse_with(pattern: &str, config: &Config) -> Result { + let mut builder = ParserBuilder::new(); + config.apply(&mut builder); + builder.build().parse(pattern) +} + +/// A convenience routine for parsing many patterns into HIR values using a +/// `Config`. +/// +/// # Example +/// +/// This shows how to parse many patterns into an corresponding HIR values +/// with a non-default configuration: +/// +/// ``` +/// use { +/// regex_automata::util::syntax, +/// regex_syntax::hir::Properties, +/// }; +/// +/// let patterns = &[ +/// r"([a-z]+)|([0-9]+)", +/// r"\W", +/// r"foo(A-Z]+)bar", +/// ]; +/// let config = syntax::Config::new().unicode(false).utf8(false); +/// let hirs = syntax::parse_many_with(patterns, &config)?; +/// let props = Properties::union(hirs.iter().map(|h| h.properties())); +/// assert!(!props.is_utf8()); +/// +/// # Ok::<(), Box>(()) +/// ``` +pub fn parse_many_with>( + patterns: &[P], + config: &Config, +) -> Result, Error> { + let mut builder = ParserBuilder::new(); + config.apply(&mut builder); + let mut hirs = vec![]; + for p in patterns.iter() { + hirs.push(builder.build().parse(p.as_ref())?); + } + Ok(hirs) +} + +/// A common set of configuration options that apply to the syntax of a regex. +/// +/// This represents a group of configuration options that specifically apply +/// to how the concrete syntax of a regular expression is interpreted. In +/// particular, they are generally forwarded to the +/// [`ParserBuilder`](https://docs.rs/regex-syntax/*/regex_syntax/struct.ParserBuilder.html) +/// in the +/// [`regex-syntax`](https://docs.rs/regex-syntax) +/// crate when building a regex from its concrete syntax directly. +/// +/// These options are defined as a group since they apply to every regex engine +/// in this crate. Instead of re-defining them on every engine's builder, they +/// are instead provided here as one cohesive unit. +#[derive(Clone, Copy, Debug)] +pub struct Config { + case_insensitive: bool, + multi_line: bool, + dot_matches_new_line: bool, + crlf: bool, + line_terminator: u8, + swap_greed: bool, + ignore_whitespace: bool, + unicode: bool, + utf8: bool, + nest_limit: u32, + octal: bool, +} + +impl Config { + /// Return a new default syntax configuration. + pub fn new() -> Config { + // These defaults match the ones used in regex-syntax. + Config { + case_insensitive: false, + multi_line: false, + dot_matches_new_line: false, + crlf: false, + line_terminator: b'\n', + swap_greed: false, + ignore_whitespace: false, + unicode: true, + utf8: true, + nest_limit: 250, + octal: false, + } + } + + /// Enable or disable the case insensitive flag by default. + /// + /// When Unicode mode is enabled, case insensitivity is Unicode-aware. + /// Specifically, it will apply the "simple" case folding rules as + /// specified by Unicode. + /// + /// By default this is disabled. It may alternatively be selectively + /// enabled in the regular expression itself via the `i` flag. + pub fn case_insensitive(mut self, yes: bool) -> Config { + self.case_insensitive = yes; + self + } + + /// Enable or disable the multi-line matching flag by default. + /// + /// When this is enabled, the `^` and `$` look-around assertions will + /// match immediately after and immediately before a new line character, + /// respectively. Note that the `\A` and `\z` look-around assertions are + /// unaffected by this setting and always correspond to matching at the + /// beginning and end of the input. + /// + /// By default this is disabled. It may alternatively be selectively + /// enabled in the regular expression itself via the `m` flag. + pub fn multi_line(mut self, yes: bool) -> Config { + self.multi_line = yes; + self + } + + /// Enable or disable the "dot matches any character" flag by default. + /// + /// When this is enabled, `.` will match any character. When it's disabled, + /// then `.` will match any character except for a new line character. + /// + /// Note that `.` is impacted by whether the "unicode" setting is enabled + /// or not. When Unicode is enabled (the default), `.` will match any UTF-8 + /// encoding of any Unicode scalar value (sans a new line, depending on + /// whether this "dot matches new line" option is enabled). When Unicode + /// mode is disabled, `.` will match any byte instead. Because of this, + /// when Unicode mode is disabled, `.` can only be used when the "allow + /// invalid UTF-8" option is enabled, since `.` could otherwise match + /// invalid UTF-8. + /// + /// By default this is disabled. It may alternatively be selectively + /// enabled in the regular expression itself via the `s` flag. + pub fn dot_matches_new_line(mut self, yes: bool) -> Config { + self.dot_matches_new_line = yes; + self + } + + /// Enable or disable the "CRLF mode" flag by default. + /// + /// By default this is disabled. It may alternatively be selectively + /// enabled in the regular expression itself via the `R` flag. + /// + /// When CRLF mode is enabled, the following happens: + /// + /// * Unless `dot_matches_new_line` is enabled, `.` will match any character + /// except for `\r` and `\n`. + /// * When `multi_line` mode is enabled, `^` and `$` will treat `\r\n`, + /// `\r` and `\n` as line terminators. And in particular, neither will + /// match between a `\r` and a `\n`. + pub fn crlf(mut self, yes: bool) -> Config { + self.crlf = yes; + self + } + + /// Sets the line terminator for use with `(?u-s:.)` and `(?-us:.)`. + /// + /// Namely, instead of `.` (by default) matching everything except for `\n`, + /// this will cause `.` to match everything except for the byte given. + /// + /// If `.` is used in a context where Unicode mode is enabled and this byte + /// isn't ASCII, then an error will be returned. When Unicode mode is + /// disabled, then any byte is permitted, but will return an error if UTF-8 + /// mode is enabled and it is a non-ASCII byte. + /// + /// In short, any ASCII value for a line terminator is always okay. But a + /// non-ASCII byte might result in an error depending on whether Unicode + /// mode or UTF-8 mode are enabled. + /// + /// Note that if `R` mode is enabled then it always takes precedence and + /// the line terminator will be treated as `\r` and `\n` simultaneously. + /// + /// Note also that this *doesn't* impact the look-around assertions + /// `(?m:^)` and `(?m:$)`. That's usually controlled by additional + /// configuration in the regex engine itself. + pub fn line_terminator(mut self, byte: u8) -> Config { + self.line_terminator = byte; + self + } + + /// Enable or disable the "swap greed" flag by default. + /// + /// When this is enabled, `.*` (for example) will become ungreedy and `.*?` + /// will become greedy. + /// + /// By default this is disabled. It may alternatively be selectively + /// enabled in the regular expression itself via the `U` flag. + pub fn swap_greed(mut self, yes: bool) -> Config { + self.swap_greed = yes; + self + } + + /// Enable verbose mode in the regular expression. + /// + /// When enabled, verbose mode permits insignificant whitespace in many + /// places in the regular expression, as well as comments. Comments are + /// started using `#` and continue until the end of the line. + /// + /// By default, this is disabled. It may be selectively enabled in the + /// regular expression by using the `x` flag regardless of this setting. + pub fn ignore_whitespace(mut self, yes: bool) -> Config { + self.ignore_whitespace = yes; + self + } + + /// Enable or disable the Unicode flag (`u`) by default. + /// + /// By default this is **enabled**. It may alternatively be selectively + /// disabled in the regular expression itself via the `u` flag. + /// + /// Note that unless "allow invalid UTF-8" is enabled (it's disabled by + /// default), a regular expression will fail to parse if Unicode mode is + /// disabled and a sub-expression could possibly match invalid UTF-8. + /// + /// **WARNING**: Unicode mode can greatly increase the size of the compiled + /// DFA, which can noticeably impact both memory usage and compilation + /// time. This is especially noticeable if your regex contains character + /// classes like `\w` that are impacted by whether Unicode is enabled or + /// not. If Unicode is not necessary, you are encouraged to disable it. + pub fn unicode(mut self, yes: bool) -> Config { + self.unicode = yes; + self + } + + /// When disabled, the builder will permit the construction of a regular + /// expression that may match invalid UTF-8. + /// + /// For example, when [`Config::unicode`] is disabled, then + /// expressions like `[^a]` may match invalid UTF-8 since they can match + /// any single byte that is not `a`. By default, these sub-expressions + /// are disallowed to avoid returning offsets that split a UTF-8 + /// encoded codepoint. However, in cases where matching at arbitrary + /// locations is desired, this option can be disabled to permit all such + /// sub-expressions. + /// + /// When enabled (the default), the builder is guaranteed to produce a + /// regex that will only ever match valid UTF-8 (otherwise, the builder + /// will return an error). + pub fn utf8(mut self, yes: bool) -> Config { + self.utf8 = yes; + self + } + + /// Set the nesting limit used for the regular expression parser. + /// + /// The nesting limit controls how deep the abstract syntax tree is allowed + /// to be. If the AST exceeds the given limit (e.g., with too many nested + /// groups), then an error is returned by the parser. + /// + /// The purpose of this limit is to act as a heuristic to prevent stack + /// overflow when building a finite automaton from a regular expression's + /// abstract syntax tree. In particular, construction currently uses + /// recursion. In the future, the implementation may stop using recursion + /// and this option will no longer be necessary. + /// + /// This limit is not checked until the entire AST is parsed. Therefore, + /// if callers want to put a limit on the amount of heap space used, then + /// they should impose a limit on the length, in bytes, of the concrete + /// pattern string. In particular, this is viable since the parser will + /// limit itself to heap space proportional to the length of the pattern + /// string. + /// + /// Note that a nest limit of `0` will return a nest limit error for most + /// patterns but not all. For example, a nest limit of `0` permits `a` but + /// not `ab`, since `ab` requires a concatenation AST item, which results + /// in a nest depth of `1`. In general, a nest limit is not something that + /// manifests in an obvious way in the concrete syntax, therefore, it + /// should not be used in a granular way. + pub fn nest_limit(mut self, limit: u32) -> Config { + self.nest_limit = limit; + self + } + + /// Whether to support octal syntax or not. + /// + /// Octal syntax is a little-known way of uttering Unicode codepoints in + /// a regular expression. For example, `a`, `\x61`, `\u0061` and + /// `\141` are all equivalent regular expressions, where the last example + /// shows octal syntax. + /// + /// While supporting octal syntax isn't in and of itself a problem, it does + /// make good error messages harder. That is, in PCRE based regex engines, + /// syntax like `\1` invokes a backreference, which is explicitly + /// unsupported in Rust's regex engine. However, many users expect it to + /// be supported. Therefore, when octal support is disabled, the error + /// message will explicitly mention that backreferences aren't supported. + /// + /// Octal syntax is disabled by default. + pub fn octal(mut self, yes: bool) -> Config { + self.octal = yes; + self + } + + /// Returns whether "unicode" mode is enabled. + pub fn get_unicode(&self) -> bool { + self.unicode + } + + /// Returns whether "case insensitive" mode is enabled. + pub fn get_case_insensitive(&self) -> bool { + self.case_insensitive + } + + /// Returns whether "multi line" mode is enabled. + pub fn get_multi_line(&self) -> bool { + self.multi_line + } + + /// Returns whether "dot matches new line" mode is enabled. + pub fn get_dot_matches_new_line(&self) -> bool { + self.dot_matches_new_line + } + + /// Returns whether "CRLF" mode is enabled. + pub fn get_crlf(&self) -> bool { + self.crlf + } + + /// Returns the line terminator in this syntax configuration. + pub fn get_line_terminator(&self) -> u8 { + self.line_terminator + } + + /// Returns whether "swap greed" mode is enabled. + pub fn get_swap_greed(&self) -> bool { + self.swap_greed + } + + /// Returns whether "ignore whitespace" mode is enabled. + pub fn get_ignore_whitespace(&self) -> bool { + self.ignore_whitespace + } + + /// Returns whether UTF-8 mode is enabled. + pub fn get_utf8(&self) -> bool { + self.utf8 + } + + /// Returns the "nest limit" setting. + pub fn get_nest_limit(&self) -> u32 { + self.nest_limit + } + + /// Returns whether "octal" mode is enabled. + pub fn get_octal(&self) -> bool { + self.octal + } + + /// Applies this configuration to the given parser. + pub(crate) fn apply(&self, builder: &mut ParserBuilder) { + builder + .unicode(self.unicode) + .case_insensitive(self.case_insensitive) + .multi_line(self.multi_line) + .dot_matches_new_line(self.dot_matches_new_line) + .crlf(self.crlf) + .line_terminator(self.line_terminator) + .swap_greed(self.swap_greed) + .ignore_whitespace(self.ignore_whitespace) + .utf8(self.utf8) + .nest_limit(self.nest_limit) + .octal(self.octal); + } + + /// Applies this configuration to the given AST parser. + pub(crate) fn apply_ast(&self, builder: &mut ast::parse::ParserBuilder) { + builder + .ignore_whitespace(self.ignore_whitespace) + .nest_limit(self.nest_limit) + .octal(self.octal); + } + + /// Applies this configuration to the given AST-to-HIR translator. + pub(crate) fn apply_hir( + &self, + builder: &mut hir::translate::TranslatorBuilder, + ) { + builder + .unicode(self.unicode) + .case_insensitive(self.case_insensitive) + .multi_line(self.multi_line) + .crlf(self.crlf) + .dot_matches_new_line(self.dot_matches_new_line) + .line_terminator(self.line_terminator) + .swap_greed(self.swap_greed) + .utf8(self.utf8); + } +} + +impl Default for Config { + fn default() -> Config { + Config::new() + } +} diff --git a/vendor/regex-automata/src/util/unicode_data/mod.rs b/vendor/regex-automata/src/util/unicode_data/mod.rs new file mode 100644 index 00000000000000..fc7b1c738ab3a1 --- /dev/null +++ b/vendor/regex-automata/src/util/unicode_data/mod.rs @@ -0,0 +1,17 @@ +// This cfg should match the one in src/util/look.rs that uses perl_word. +#[cfg(all( + // We have to explicitly want to support Unicode word boundaries. + feature = "unicode-word-boundary", + not(all( + // If we don't have regex-syntax at all, then we definitely need to + // bring our own \w data table. + feature = "syntax", + // If unicode-perl is enabled, then regex-syntax/unicode-perl is + // also enabled, which in turn means we can use regex-syntax's + // is_word_character routine (and thus use its data tables). But if + // unicode-perl is not enabled, even if syntax is, then we need to + // bring our own. + feature = "unicode-perl", + )), +))] +pub(crate) mod perl_word; diff --git a/vendor/regex-automata/src/util/unicode_data/perl_word.rs b/vendor/regex-automata/src/util/unicode_data/perl_word.rs new file mode 100644 index 00000000000000..21c8c0f9c839c8 --- /dev/null +++ b/vendor/regex-automata/src/util/unicode_data/perl_word.rs @@ -0,0 +1,806 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate perl-word ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const PERL_WORD: &'static [(char, char)] = &[ + ('0', '9'), + ('A', 'Z'), + ('_', '_'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('\u{300}', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('\u{483}', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', 'ՙ'), + ('ՠ', 'ֈ'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('\u{610}', '\u{61a}'), + ('ؠ', '٩'), + ('ٮ', 'ۓ'), + ('ە', '\u{6dc}'), + ('\u{6df}', '\u{6e8}'), + ('\u{6ea}', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', '\u{74a}'), + ('ݍ', 'ޱ'), + ('߀', 'ߵ'), + ('ߺ', 'ߺ'), + ('\u{7fd}', '\u{7fd}'), + ('ࠀ', '\u{82d}'), + ('ࡀ', '\u{85b}'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('\u{897}', '\u{8e1}'), + ('\u{8e3}', '\u{963}'), + ('०', '९'), + ('ॱ', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('\u{9bc}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', 'ৎ'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', '\u{9e3}'), + ('০', 'ৱ'), + ('ৼ', 'ৼ'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', 'ਃ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('੦', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('\u{abc}', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('ૐ', 'ૐ'), + ('ૠ', '\u{ae3}'), + ('૦', '૯'), + ('ૹ', '\u{aff}'), + ('\u{b01}', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('\u{b3c}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', '\u{b63}'), + ('୦', '୯'), + ('ୱ', 'ୱ'), + ('\u{b82}', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('ௐ', 'ௐ'), + ('\u{bd7}', '\u{bd7}'), + ('௦', '௯'), + ('\u{c00}', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('\u{c3c}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', '\u{c63}'), + ('౦', '౯'), + ('ಀ', 'ಃ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('\u{cbc}', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('ೝ', 'ೞ'), + ('ೠ', '\u{ce3}'), + ('೦', '೯'), + ('ೱ', 'ೳ'), + ('\u{d00}', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', 'ൎ'), + ('ൔ', '\u{d57}'), + ('ൟ', '\u{d63}'), + ('൦', '൯'), + ('ൺ', 'ൿ'), + ('\u{d81}', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('෦', '෯'), + ('ෲ', 'ෳ'), + ('ก', '\u{e3a}'), + ('เ', '\u{e4e}'), + ('๐', '๙'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('\u{ec8}', '\u{ece}'), + ('໐', '໙'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('\u{f18}', '\u{f19}'), + ('༠', '༩'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('༾', 'ཇ'), + ('ཉ', 'ཬ'), + ('\u{f71}', '\u{f84}'), + ('\u{f86}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('က', '၉'), + ('ၐ', '\u{109d}'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('\u{135d}', '\u{135f}'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', '\u{1715}'), + ('ᜟ', '\u{1734}'), + ('ᝀ', '\u{1753}'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('\u{1772}', '\u{1773}'), + ('ក', '\u{17d3}'), + ('ៗ', 'ៗ'), + ('ៜ', '\u{17dd}'), + ('០', '៩'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '᠙'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('᥆', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('᧐', '᧙'), + ('ᨀ', '\u{1a1b}'), + ('ᨠ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '᪉'), + ('᪐', '᪙'), + ('ᪧ', 'ᪧ'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', 'ᭌ'), + ('᭐', '᭙'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1bf3}'), + ('ᰀ', '\u{1c37}'), + ('᱀', '᱉'), + ('ᱍ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', 'ᳺ'), + ('ᴀ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('\u{200c}', '\u{200d}'), + ('‿', '⁀'), + ('⁔', '⁔'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('\u{20d0}', '\u{20f0}'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℯ', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⓐ', 'ⓩ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('\u{2d7f}', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('\u{2de0}', '\u{2dff}'), + ('ⸯ', 'ⸯ'), + ('々', '〇'), + ('〡', '\u{302f}'), + ('〱', '〵'), + ('〸', '〼'), + ('ぁ', 'ゖ'), + ('\u{3099}', '\u{309a}'), + ('ゝ', 'ゟ'), + ('ァ', 'ヺ'), + ('ー', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘫ'), + ('Ꙁ', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('ꙿ', '\u{a6f1}'), + ('ꜗ', 'ꜟ'), + ('Ꜣ', 'ꞈ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠧ'), + ('\u{a82c}', '\u{a82c}'), + ('ꡀ', 'ꡳ'), + ('ꢀ', '\u{a8c5}'), + ('꣐', '꣙'), + ('\u{a8e0}', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', '\u{a92d}'), + ('ꤰ', '\u{a953}'), + ('ꥠ', 'ꥼ'), + ('\u{a980}', '\u{a9c0}'), + ('ꧏ', '꧙'), + ('ꧠ', 'ꧾ'), + ('ꨀ', '\u{aa36}'), + ('ꩀ', 'ꩍ'), + ('꩐', '꩙'), + ('ꩠ', 'ꩶ'), + ('ꩺ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫯ'), + ('ꫲ', '\u{aaf6}'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꯪ'), + ('꯬', '\u{abed}'), + ('꯰', '꯹'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('︳', '︴'), + ('﹍', '﹏'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('0', '9'), + ('A', 'Z'), + ('_', '_'), + ('a', 'z'), + ('ヲ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('\u{101fd}', '\u{101fd}'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('\u{102e0}', '\u{102e0}'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '\u{1037a}'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐐀', '𐒝'), + ('𐒠', '𐒩'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '\u{10ae6}'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '\u{10d27}'), + ('𐴰', '𐴹'), + ('𐵀', '𐵥'), + ('\u{10d69}', '\u{10d6d}'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('\u{10eab}', '\u{10eac}'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('\u{10efc}', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '\u{10f50}'), + ('𐽰', '\u{10f85}'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀀', '\u{11046}'), + ('𑁦', '𑁵'), + ('\u{1107f}', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('𑃐', '𑃨'), + ('𑃰', '𑃹'), + ('\u{11100}', '\u{11134}'), + ('𑄶', '𑄿'), + ('𑅄', '𑅇'), + ('𑅐', '\u{11173}'), + ('𑅶', '𑅶'), + ('\u{11180}', '𑇄'), + ('\u{111c9}', '\u{111cc}'), + ('𑇎', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '\u{11237}'), + ('\u{1123e}', '\u{11241}'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '\u{112ea}'), + ('𑋰', '𑋹'), + ('\u{11300}', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('\u{1133b}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('𑍐', '𑍐'), + ('\u{11357}', '\u{11357}'), + ('𑍝', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏓'), + ('\u{113e1}', '\u{113e2}'), + ('𑐀', '𑑊'), + ('𑑐', '𑑙'), + ('\u{1145e}', '𑑡'), + ('𑒀', '𑓅'), + ('𑓇', '𑓇'), + ('𑓐', '𑓙'), + ('𑖀', '\u{115b5}'), + ('𑖸', '\u{115c0}'), + ('𑗘', '\u{115dd}'), + ('𑘀', '\u{11640}'), + ('𑙄', '𑙄'), + ('𑙐', '𑙙'), + ('𑚀', '𑚸'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜀', '𑜚'), + ('\u{1171d}', '\u{1172b}'), + ('𑜰', '𑜹'), + ('𑝀', '𑝆'), + ('𑠀', '\u{1183a}'), + ('𑢠', '𑣩'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{11943}'), + ('𑥐', '𑥙'), + ('𑦠', '𑦧'), + ('𑦪', '\u{119d7}'), + ('\u{119da}', '𑧡'), + ('𑧣', '𑧤'), + ('𑨀', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('𑩐', '\u{11a99}'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑯰', '𑯹'), + ('𑰀', '𑰈'), + ('𑰊', '\u{11c36}'), + ('\u{11c38}', '𑱀'), + ('𑱐', '𑱙'), + ('𑱲', '𑲏'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d47}'), + ('𑵐', '𑵙'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶘'), + ('𑶠', '𑶩'), + ('𑻠', '𑻶'), + ('\u{11f00}', '𑼐'), + ('𑼒', '\u{11f3a}'), + ('𑼾', '\u{11f42}'), + ('𑽐', '\u{11f5a}'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('\u{13440}', '\u{13455}'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄹'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩠', '𖩩'), + ('𖩰', '𖪾'), + ('𖫀', '𖫉'), + ('𖫐', '𖫭'), + ('\u{16af0}', '\u{16af4}'), + ('𖬀', '\u{16b36}'), + ('𖭀', '𖭃'), + ('𖭐', '𖭙'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖵰', '𖵹'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('\u{16f4f}', '𖾇'), + ('\u{16f8f}', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('𜳰', '𜳹'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝟎', '𝟿'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), + ('𞄀', '𞄬'), + ('\u{1e130}', '𞄽'), + ('𞅀', '𞅉'), + ('𞅎', '𞅎'), + ('𞊐', '\u{1e2ae}'), + ('𞋀', '𞋹'), + ('𞓐', '𞓹'), + ('𞗐', '𞗺'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('𞤀', '𞥋'), + ('𞥐', '𞥙'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('🄰', '🅉'), + ('🅐', '🅩'), + ('🅰', '🆉'), + ('🯰', '🯹'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), + ('\u{e0100}', '\u{e01ef}'), +]; diff --git a/vendor/regex-automata/src/util/utf8.rs b/vendor/regex-automata/src/util/utf8.rs new file mode 100644 index 00000000000000..6c86e8d5fd8808 --- /dev/null +++ b/vendor/regex-automata/src/util/utf8.rs @@ -0,0 +1,191 @@ +/*! +Utilities for dealing with UTF-8. + +This module provides some UTF-8 related helper routines, including an +incremental decoder. +*/ + +/// Returns true if and only if the given byte is considered a word character. +/// This only applies to ASCII. +/// +/// This was copied from regex-syntax so that we can use it to determine the +/// starting DFA state while searching without depending on regex-syntax. The +/// definition is never going to change, so there's no maintenance/bit-rot +/// hazard here. +#[cfg_attr(feature = "perf-inline", inline(always))] +pub(crate) fn is_word_byte(b: u8) -> bool { + const fn mkwordset() -> [bool; 256] { + // FIXME: Use as_usize() once const functions in traits are stable. + let mut set = [false; 256]; + set[b'_' as usize] = true; + + let mut byte = b'0'; + while byte <= b'9' { + set[byte as usize] = true; + byte += 1; + } + byte = b'A'; + while byte <= b'Z' { + set[byte as usize] = true; + byte += 1; + } + byte = b'a'; + while byte <= b'z' { + set[byte as usize] = true; + byte += 1; + } + set + } + const WORD: [bool; 256] = mkwordset(); + WORD[b as usize] +} + +/// Decodes the next UTF-8 encoded codepoint from the given byte slice. +/// +/// If no valid encoding of a codepoint exists at the beginning of the given +/// byte slice, then the first byte is returned instead. +/// +/// This returns `None` if and only if `bytes` is empty. +/// +/// This never panics. +/// +/// *WARNING*: This is not designed for performance. If you're looking for a +/// fast UTF-8 decoder, this is not it. If you feel like you need one in this +/// crate, then please file an issue and discuss your use case. +#[cfg_attr(feature = "perf-inline", inline(always))] +pub(crate) fn decode(bytes: &[u8]) -> Option> { + if bytes.is_empty() { + return None; + } + let len = match len(bytes[0]) { + None => return Some(Err(bytes[0])), + Some(len) if len > bytes.len() => return Some(Err(bytes[0])), + Some(1) => return Some(Ok(char::from(bytes[0]))), + Some(len) => len, + }; + match core::str::from_utf8(&bytes[..len]) { + Ok(s) => Some(Ok(s.chars().next().unwrap())), + Err(_) => Some(Err(bytes[0])), + } +} + +/// Decodes the last UTF-8 encoded codepoint from the given byte slice. +/// +/// If no valid encoding of a codepoint exists at the end of the given byte +/// slice, then the last byte is returned instead. +/// +/// This returns `None` if and only if `bytes` is empty. +#[cfg_attr(feature = "perf-inline", inline(always))] +pub(crate) fn decode_last(bytes: &[u8]) -> Option> { + if bytes.is_empty() { + return None; + } + let mut start = bytes.len() - 1; + let limit = bytes.len().saturating_sub(4); + while start > limit && !is_leading_or_invalid_byte(bytes[start]) { + start -= 1; + } + match decode(&bytes[start..]) { + None => None, + Some(Ok(ch)) => Some(Ok(ch)), + Some(Err(_)) => Some(Err(bytes[bytes.len() - 1])), + } +} + +/// Given a UTF-8 leading byte, this returns the total number of code units +/// in the following encoded codepoint. +/// +/// If the given byte is not a valid UTF-8 leading byte, then this returns +/// `None`. +#[cfg_attr(feature = "perf-inline", inline(always))] +fn len(byte: u8) -> Option { + match byte { + 0b0000_0000..=0b0111_1111 => Some(1), + 0b1000_0000..=0b1011_1111 => None, + 0b1100_0000..=0b1101_1111 => Some(2), + 0b1110_0000..=0b1110_1111 => Some(3), + 0b1111_0000..=0b1111_0111 => Some(4), + _ => None, + } +} + +/// Returns true if and only if the given offset in the given bytes falls on a +/// valid UTF-8 encoded codepoint boundary. +/// +/// If `bytes` is not valid UTF-8, then the behavior of this routine is +/// unspecified. +#[cfg_attr(feature = "perf-inline", inline(always))] +pub(crate) fn is_boundary(bytes: &[u8], i: usize) -> bool { + match bytes.get(i) { + // The position at the end of the bytes always represents an empty + // string, which is a valid boundary. But anything after that doesn't + // make much sense to call valid a boundary. + None => i == bytes.len(), + // Other than ASCII (where the most significant bit is never set), + // valid starting bytes always have their most significant two bits + // set, where as continuation bytes never have their second most + // significant bit set. Therefore, this only returns true when bytes[i] + // corresponds to a byte that begins a valid UTF-8 encoding of a + // Unicode scalar value. + Some(&b) => b <= 0b0111_1111 || b >= 0b1100_0000, + } +} + +/// Returns true if and only if the given byte is either a valid leading UTF-8 +/// byte, or is otherwise an invalid byte that can never appear anywhere in a +/// valid UTF-8 sequence. +#[cfg_attr(feature = "perf-inline", inline(always))] +fn is_leading_or_invalid_byte(b: u8) -> bool { + // In the ASCII case, the most significant bit is never set. The leading + // byte of a 2/3/4-byte sequence always has the top two most significant + // bits set. For bytes that can never appear anywhere in valid UTF-8, this + // also returns true, since every such byte has its two most significant + // bits set: + // + // \xC0 :: 11000000 + // \xC1 :: 11000001 + // \xF5 :: 11110101 + // \xF6 :: 11110110 + // \xF7 :: 11110111 + // \xF8 :: 11111000 + // \xF9 :: 11111001 + // \xFA :: 11111010 + // \xFB :: 11111011 + // \xFC :: 11111100 + // \xFD :: 11111101 + // \xFE :: 11111110 + // \xFF :: 11111111 + (b & 0b1100_0000) != 0b1000_0000 +} + +/* +/// Returns the smallest possible index of the next valid UTF-8 sequence +/// starting after `i`. +/// +/// For all inputs, including invalid UTF-8 and any value of `i`, the return +/// value is guaranteed to be greater than `i`. (If there is no value greater +/// than `i` that fits in `usize`, then this panics.) +/// +/// Generally speaking, this should only be called on `text` when it is +/// permitted to assume that it is valid UTF-8 and where either `i >= +/// text.len()` or where `text[i]` is a leading byte of a UTF-8 sequence. +/// +/// NOTE: This method was used in a previous conception of iterators where we +/// specifically tried to skip over empty matches that split a codepoint by +/// simply requiring that our next search begin at the beginning of codepoint. +/// But we ended up changing that technique to always advance by 1 byte and +/// then filter out matches that split a codepoint after-the-fact. Thus, we no +/// longer use this method. But I've kept it around in case we want to switch +/// back to this approach. Its guarantees are a little subtle, so I'd prefer +/// not to rebuild it from whole cloth. +pub(crate) fn next(text: &[u8], i: usize) -> usize { + let b = match text.get(i) { + None => return i.checked_add(1).unwrap(), + Some(&b) => b, + }; + // For cases where we see an invalid UTF-8 byte, there isn't much we can do + // other than just start at the next byte. + let inc = len(b).unwrap_or(1); + i.checked_add(inc).unwrap() +} +*/ diff --git a/vendor/regex-automata/src/util/wire.rs b/vendor/regex-automata/src/util/wire.rs new file mode 100644 index 00000000000000..210ab6f4b24e17 --- /dev/null +++ b/vendor/regex-automata/src/util/wire.rs @@ -0,0 +1,947 @@ +/*! +Types and routines that support the wire format of finite automata. + +Currently, this module just exports a few error types and some small helpers +for deserializing [dense DFAs](crate::dfa::dense::DFA) using correct alignment. +*/ + +/* +A collection of helper functions, types and traits for serializing automata. + +This crate defines its own bespoke serialization mechanism for some structures +provided in the public API, namely, DFAs. A bespoke mechanism was developed +primarily because structures like automata demand a specific binary format. +Attempting to encode their rich structure in an existing serialization +format is just not feasible. Moreover, the format for each structure is +generally designed such that deserialization is cheap. More specifically, that +deserialization can be done in constant time. (The idea being that you can +embed it into your binary or mmap it, and then use it immediately.) + +In order to achieve this, the dense and sparse DFAs in this crate use an +in-memory representation that very closely corresponds to its binary serialized +form. This pervades and complicates everything, and in some cases, requires +dealing with alignment and reasoning about safety. + +This technique does have major advantages. In particular, it permits doing +the potentially costly work of compiling a finite state machine in an offline +manner, and then loading it at runtime not only without having to re-compile +the regex, but even without the code required to do the compilation. This, for +example, permits one to use a pre-compiled DFA not only in environments without +Rust's standard library, but also in environments without a heap. + +In the code below, whenever we insert some kind of padding, it's to enforce a +4-byte alignment, unless otherwise noted. Namely, u32 is the only state ID type +supported. (In a previous version of this library, DFAs were generic over the +state ID representation.) + +Also, serialization generally requires the caller to specify endianness, +where as deserialization always assumes native endianness (otherwise cheap +deserialization would be impossible). This implies that serializing a structure +generally requires serializing both its big-endian and little-endian variants, +and then loading the correct one based on the target's endianness. +*/ + +use core::{cmp, mem::size_of}; + +#[cfg(feature = "alloc")] +use alloc::{vec, vec::Vec}; + +use crate::util::{ + int::Pointer, + primitives::{PatternID, PatternIDError, StateID, StateIDError}, +}; + +/// A hack to align a smaller type `B` with a bigger type `T`. +/// +/// The usual use of this is with `B = [u8]` and `T = u32`. That is, +/// it permits aligning a sequence of bytes on a 4-byte boundary. This +/// is useful in contexts where one wants to embed a serialized [dense +/// DFA](crate::dfa::dense::DFA) into a Rust a program while guaranteeing the +/// alignment required for the DFA. +/// +/// See [`dense::DFA::from_bytes`](crate::dfa::dense::DFA::from_bytes) for an +/// example of how to use this type. +#[repr(C)] +#[derive(Debug)] +pub struct AlignAs { + /// A zero-sized field indicating the alignment we want. + pub _align: [T; 0], + /// A possibly non-sized field containing a sequence of bytes. + pub bytes: B, +} + +/// An error that occurs when serializing an object from this crate. +/// +/// Serialization, as used in this crate, universally refers to the process +/// of transforming a structure (like a DFA) into a custom binary format +/// represented by `&[u8]`. To this end, serialization is generally infallible. +/// However, it can fail when caller provided buffer sizes are too small. When +/// that occurs, a serialization error is reported. +/// +/// A `SerializeError` provides no introspection capabilities. Its only +/// supported operation is conversion to a human readable error message. +/// +/// This error type implements the `std::error::Error` trait only when the +/// `std` feature is enabled. Otherwise, this type is defined in all +/// configurations. +#[derive(Debug)] +pub struct SerializeError { + /// The name of the thing that a buffer is too small for. + /// + /// Currently, the only kind of serialization error is one that is + /// committed by a caller: providing a destination buffer that is too + /// small to fit the serialized object. This makes sense conceptually, + /// since every valid inhabitant of a type should be serializable. + /// + /// This is somewhat exposed in the public API of this crate. For example, + /// the `to_bytes_{big,little}_endian` APIs return a `Vec` and are + /// guaranteed to never panic or error. This is only possible because the + /// implementation guarantees that it will allocate a `Vec` that is + /// big enough. + /// + /// In summary, if a new serialization error kind needs to be added, then + /// it will need careful consideration. + what: &'static str, +} + +impl SerializeError { + pub(crate) fn buffer_too_small(what: &'static str) -> SerializeError { + SerializeError { what } + } +} + +impl core::fmt::Display for SerializeError { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "destination buffer is too small to write {}", self.what) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for SerializeError {} + +/// An error that occurs when deserializing an object defined in this crate. +/// +/// Serialization, as used in this crate, universally refers to the process +/// of transforming a structure (like a DFA) into a custom binary format +/// represented by `&[u8]`. Deserialization, then, refers to the process of +/// cheaply converting this binary format back to the object's in-memory +/// representation as defined in this crate. To the extent possible, +/// deserialization will report this error whenever this process fails. +/// +/// A `DeserializeError` provides no introspection capabilities. Its only +/// supported operation is conversion to a human readable error message. +/// +/// This error type implements the `std::error::Error` trait only when the +/// `std` feature is enabled. Otherwise, this type is defined in all +/// configurations. +#[derive(Debug)] +pub struct DeserializeError(DeserializeErrorKind); + +#[derive(Debug)] +enum DeserializeErrorKind { + Generic { msg: &'static str }, + BufferTooSmall { what: &'static str }, + InvalidUsize { what: &'static str }, + VersionMismatch { expected: u32, found: u32 }, + EndianMismatch { expected: u32, found: u32 }, + AlignmentMismatch { alignment: usize, address: usize }, + LabelMismatch { expected: &'static str }, + ArithmeticOverflow { what: &'static str }, + PatternID { err: PatternIDError, what: &'static str }, + StateID { err: StateIDError, what: &'static str }, +} + +impl DeserializeError { + pub(crate) fn generic(msg: &'static str) -> DeserializeError { + DeserializeError(DeserializeErrorKind::Generic { msg }) + } + + pub(crate) fn buffer_too_small(what: &'static str) -> DeserializeError { + DeserializeError(DeserializeErrorKind::BufferTooSmall { what }) + } + + fn invalid_usize(what: &'static str) -> DeserializeError { + DeserializeError(DeserializeErrorKind::InvalidUsize { what }) + } + + fn version_mismatch(expected: u32, found: u32) -> DeserializeError { + DeserializeError(DeserializeErrorKind::VersionMismatch { + expected, + found, + }) + } + + fn endian_mismatch(expected: u32, found: u32) -> DeserializeError { + DeserializeError(DeserializeErrorKind::EndianMismatch { + expected, + found, + }) + } + + fn alignment_mismatch( + alignment: usize, + address: usize, + ) -> DeserializeError { + DeserializeError(DeserializeErrorKind::AlignmentMismatch { + alignment, + address, + }) + } + + fn label_mismatch(expected: &'static str) -> DeserializeError { + DeserializeError(DeserializeErrorKind::LabelMismatch { expected }) + } + + fn arithmetic_overflow(what: &'static str) -> DeserializeError { + DeserializeError(DeserializeErrorKind::ArithmeticOverflow { what }) + } + + fn pattern_id_error( + err: PatternIDError, + what: &'static str, + ) -> DeserializeError { + DeserializeError(DeserializeErrorKind::PatternID { err, what }) + } + + pub(crate) fn state_id_error( + err: StateIDError, + what: &'static str, + ) -> DeserializeError { + DeserializeError(DeserializeErrorKind::StateID { err, what }) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for DeserializeError {} + +impl core::fmt::Display for DeserializeError { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + use self::DeserializeErrorKind::*; + + match self.0 { + Generic { msg } => write!(f, "{msg}"), + BufferTooSmall { what } => { + write!(f, "buffer is too small to read {what}") + } + InvalidUsize { what } => { + write!(f, "{what} is too big to fit in a usize") + } + VersionMismatch { expected, found } => write!( + f, + "unsupported version: \ + expected version {expected} but found version {found}", + ), + EndianMismatch { expected, found } => write!( + f, + "endianness mismatch: expected 0x{expected:X} but \ + got 0x{found:X}. (Are you trying to load an object \ + serialized with a different endianness?)", + ), + AlignmentMismatch { alignment, address } => write!( + f, + "alignment mismatch: slice starts at address 0x{address:X}, \ + which is not aligned to a {alignment} byte boundary", + ), + LabelMismatch { expected } => write!( + f, + "label mismatch: start of serialized object should \ + contain a NUL terminated {expected:?} label, but a different \ + label was found", + ), + ArithmeticOverflow { what } => { + write!(f, "arithmetic overflow for {what}") + } + PatternID { ref err, what } => { + write!(f, "failed to read pattern ID for {what}: {err}") + } + StateID { ref err, what } => { + write!(f, "failed to read state ID for {what}: {err}") + } + } + } +} + +/// Safely converts a `&[u32]` to `&[StateID]` with zero cost. +#[cfg_attr(feature = "perf-inline", inline(always))] +pub(crate) fn u32s_to_state_ids(slice: &[u32]) -> &[StateID] { + // SAFETY: This is safe because StateID is defined to have the same memory + // representation as a u32 (it is repr(transparent)). While not every u32 + // is a "valid" StateID, callers are not permitted to rely on the validity + // of StateIDs for memory safety. It can only lead to logical errors. (This + // is why StateID::new_unchecked is safe.) + unsafe { + core::slice::from_raw_parts( + slice.as_ptr().cast::(), + slice.len(), + ) + } +} + +/// Safely converts a `&mut [u32]` to `&mut [StateID]` with zero cost. +pub(crate) fn u32s_to_state_ids_mut(slice: &mut [u32]) -> &mut [StateID] { + // SAFETY: This is safe because StateID is defined to have the same memory + // representation as a u32 (it is repr(transparent)). While not every u32 + // is a "valid" StateID, callers are not permitted to rely on the validity + // of StateIDs for memory safety. It can only lead to logical errors. (This + // is why StateID::new_unchecked is safe.) + unsafe { + core::slice::from_raw_parts_mut( + slice.as_mut_ptr().cast::(), + slice.len(), + ) + } +} + +/// Safely converts a `&[u32]` to `&[PatternID]` with zero cost. +#[cfg_attr(feature = "perf-inline", inline(always))] +pub(crate) fn u32s_to_pattern_ids(slice: &[u32]) -> &[PatternID] { + // SAFETY: This is safe because PatternID is defined to have the same + // memory representation as a u32 (it is repr(transparent)). While not + // every u32 is a "valid" PatternID, callers are not permitted to rely + // on the validity of PatternIDs for memory safety. It can only lead to + // logical errors. (This is why PatternID::new_unchecked is safe.) + unsafe { + core::slice::from_raw_parts( + slice.as_ptr().cast::(), + slice.len(), + ) + } +} + +/// Checks that the given slice has an alignment that matches `T`. +/// +/// This is useful for checking that a slice has an appropriate alignment +/// before casting it to a &[T]. Note though that alignment is not itself +/// sufficient to perform the cast for any `T`. +pub(crate) fn check_alignment( + slice: &[u8], +) -> Result<(), DeserializeError> { + let alignment = core::mem::align_of::(); + let address = slice.as_ptr().as_usize(); + if address % alignment == 0 { + return Ok(()); + } + Err(DeserializeError::alignment_mismatch(alignment, address)) +} + +/// Reads a possibly empty amount of padding, up to 7 bytes, from the beginning +/// of the given slice. All padding bytes must be NUL bytes. +/// +/// This is useful because it can be theoretically necessary to pad the +/// beginning of a serialized object with NUL bytes to ensure that it starts +/// at a correctly aligned address. These padding bytes should come immediately +/// before the label. +/// +/// This returns the number of bytes read from the given slice. +pub(crate) fn skip_initial_padding(slice: &[u8]) -> usize { + let mut nread = 0; + while nread < 7 && nread < slice.len() && slice[nread] == 0 { + nread += 1; + } + nread +} + +/// Allocate a byte buffer of the given size, along with some initial padding +/// such that `buf[padding..]` has the same alignment as `T`, where the +/// alignment of `T` must be at most `8`. In particular, callers should treat +/// the first N bytes (second return value) as padding bytes that must not be +/// overwritten. In all cases, the following identity holds: +/// +/// ```ignore +/// let (buf, padding) = alloc_aligned_buffer::(SIZE); +/// assert_eq!(SIZE, buf[padding..].len()); +/// ``` +/// +/// In practice, padding is often zero. +/// +/// The requirement for `8` as a maximum here is somewhat arbitrary. In +/// practice, we never need anything bigger in this crate, and so this function +/// does some sanity asserts under the assumption of a max alignment of `8`. +#[cfg(feature = "alloc")] +pub(crate) fn alloc_aligned_buffer(size: usize) -> (Vec, usize) { + // NOTE: This is a kludge because there's no easy way to allocate a Vec + // with an alignment guaranteed to be greater than 1. We could create a + // Vec, but this cannot be safely transmuted to a Vec without + // concern, since reallocing or dropping the Vec is UB (different + // alignment than the initial allocation). We could define a wrapper type + // to manage this for us, but it seems like more machinery than it's worth. + let buf = vec![0; size]; + let align = core::mem::align_of::(); + let address = buf.as_ptr().as_usize(); + if address % align == 0 { + return (buf, 0); + } + // Let's try this again. We have to create a totally new alloc with + // the maximum amount of bytes we might need. We can't just extend our + // pre-existing 'buf' because that might create a new alloc with a + // different alignment. + let extra = align - 1; + let mut buf = vec![0; size + extra]; + let address = buf.as_ptr().as_usize(); + // The code below handles the case where 'address' is aligned to T, so if + // we got lucky and 'address' is now aligned to T (when it previously + // wasn't), then we're done. + if address % align == 0 { + buf.truncate(size); + return (buf, 0); + } + let padding = ((address & !(align - 1)).checked_add(align).unwrap()) + .checked_sub(address) + .unwrap(); + assert!(padding <= 7, "padding of {padding} is bigger than 7"); + assert!( + padding <= extra, + "padding of {padding} is bigger than extra {extra} bytes", + ); + buf.truncate(size + padding); + assert_eq!(size + padding, buf.len()); + assert_eq!( + 0, + buf[padding..].as_ptr().as_usize() % align, + "expected end of initial padding to be aligned to {align}", + ); + (buf, padding) +} + +/// Reads a NUL terminated label starting at the beginning of the given slice. +/// +/// If a NUL terminated label could not be found, then an error is returned. +/// Similarly, if a label is found but doesn't match the expected label, then +/// an error is returned. +/// +/// Upon success, the total number of bytes read (including padding bytes) is +/// returned. +pub(crate) fn read_label( + slice: &[u8], + expected_label: &'static str, +) -> Result { + // Set an upper bound on how many bytes we scan for a NUL. Since no label + // in this crate is longer than 256 bytes, if we can't find one within that + // range, then we have corrupted data. + let first_nul = + slice[..cmp::min(slice.len(), 256)].iter().position(|&b| b == 0); + let first_nul = match first_nul { + Some(first_nul) => first_nul, + None => { + return Err(DeserializeError::generic( + "could not find NUL terminated label \ + at start of serialized object", + )); + } + }; + let len = first_nul + padding_len(first_nul); + if slice.len() < len { + return Err(DeserializeError::generic( + "could not find properly sized label at start of serialized object" + )); + } + if expected_label.as_bytes() != &slice[..first_nul] { + return Err(DeserializeError::label_mismatch(expected_label)); + } + Ok(len) +} + +/// Writes the given label to the buffer as a NUL terminated string. The label +/// given must not contain NUL, otherwise this will panic. Similarly, the label +/// must not be longer than 255 bytes, otherwise this will panic. +/// +/// Additional NUL bytes are written as necessary to ensure that the number of +/// bytes written is always a multiple of 4. +/// +/// Upon success, the total number of bytes written (including padding) is +/// returned. +pub(crate) fn write_label( + label: &str, + dst: &mut [u8], +) -> Result { + let nwrite = write_label_len(label); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small("label")); + } + dst[..label.len()].copy_from_slice(label.as_bytes()); + for i in 0..(nwrite - label.len()) { + dst[label.len() + i] = 0; + } + assert_eq!(nwrite % 4, 0); + Ok(nwrite) +} + +/// Returns the total number of bytes (including padding) that would be written +/// for the given label. This panics if the given label contains a NUL byte or +/// is longer than 255 bytes. (The size restriction exists so that searching +/// for a label during deserialization can be done in small bounded space.) +pub(crate) fn write_label_len(label: &str) -> usize { + assert!(label.len() <= 255, "label must not be longer than 255 bytes"); + assert!(label.bytes().all(|b| b != 0), "label must not contain NUL bytes"); + let label_len = label.len() + 1; // +1 for the NUL terminator + label_len + padding_len(label_len) +} + +/// Reads the endianness check from the beginning of the given slice and +/// confirms that the endianness of the serialized object matches the expected +/// endianness. If the slice is too small or if the endianness check fails, +/// this returns an error. +/// +/// Upon success, the total number of bytes read is returned. +pub(crate) fn read_endianness_check( + slice: &[u8], +) -> Result { + let (n, nr) = try_read_u32(slice, "endianness check")?; + assert_eq!(nr, write_endianness_check_len()); + if n != 0xFEFF { + return Err(DeserializeError::endian_mismatch(0xFEFF, n)); + } + Ok(nr) +} + +/// Writes 0xFEFF as an integer using the given endianness. +/// +/// This is useful for writing into the header of a serialized object. It can +/// be read during deserialization as a sanity check to ensure the proper +/// endianness is used. +/// +/// Upon success, the total number of bytes written is returned. +pub(crate) fn write_endianness_check( + dst: &mut [u8], +) -> Result { + let nwrite = write_endianness_check_len(); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small("endianness check")); + } + E::write_u32(0xFEFF, dst); + Ok(nwrite) +} + +/// Returns the number of bytes written by the endianness check. +pub(crate) fn write_endianness_check_len() -> usize { + size_of::() +} + +/// Reads a version number from the beginning of the given slice and confirms +/// that is matches the expected version number given. If the slice is too +/// small or if the version numbers aren't equivalent, this returns an error. +/// +/// Upon success, the total number of bytes read is returned. +/// +/// N.B. Currently, we require that the version number is exactly equivalent. +/// In the future, if we bump the version number without a semver bump, then +/// we'll need to relax this a bit and support older versions. +pub(crate) fn read_version( + slice: &[u8], + expected_version: u32, +) -> Result { + let (n, nr) = try_read_u32(slice, "version")?; + assert_eq!(nr, write_version_len()); + if n != expected_version { + return Err(DeserializeError::version_mismatch(expected_version, n)); + } + Ok(nr) +} + +/// Writes the given version number to the beginning of the given slice. +/// +/// This is useful for writing into the header of a serialized object. It can +/// be read during deserialization as a sanity check to ensure that the library +/// code supports the format of the serialized object. +/// +/// Upon success, the total number of bytes written is returned. +pub(crate) fn write_version( + version: u32, + dst: &mut [u8], +) -> Result { + let nwrite = write_version_len(); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small("version number")); + } + E::write_u32(version, dst); + Ok(nwrite) +} + +/// Returns the number of bytes written by writing the version number. +pub(crate) fn write_version_len() -> usize { + size_of::() +} + +/// Reads a pattern ID from the given slice. If the slice has insufficient +/// length, then this panics. If the deserialized integer exceeds the pattern +/// ID limit for the current target, then this returns an error. +/// +/// Upon success, this also returns the number of bytes read. +pub(crate) fn read_pattern_id( + slice: &[u8], + what: &'static str, +) -> Result<(PatternID, usize), DeserializeError> { + let bytes: [u8; PatternID::SIZE] = + slice[..PatternID::SIZE].try_into().unwrap(); + let pid = PatternID::from_ne_bytes(bytes) + .map_err(|err| DeserializeError::pattern_id_error(err, what))?; + Ok((pid, PatternID::SIZE)) +} + +/// Reads a pattern ID from the given slice. If the slice has insufficient +/// length, then this panics. Otherwise, the deserialized integer is assumed +/// to be a valid pattern ID. +/// +/// This also returns the number of bytes read. +pub(crate) fn read_pattern_id_unchecked(slice: &[u8]) -> (PatternID, usize) { + let pid = PatternID::from_ne_bytes_unchecked( + slice[..PatternID::SIZE].try_into().unwrap(), + ); + (pid, PatternID::SIZE) +} + +/// Write the given pattern ID to the beginning of the given slice of bytes +/// using the specified endianness. The given slice must have length at least +/// `PatternID::SIZE`, or else this panics. Upon success, the total number of +/// bytes written is returned. +pub(crate) fn write_pattern_id( + pid: PatternID, + dst: &mut [u8], +) -> usize { + E::write_u32(pid.as_u32(), dst); + PatternID::SIZE +} + +/// Attempts to read a state ID from the given slice. If the slice has an +/// insufficient number of bytes or if the state ID exceeds the limit for +/// the current target, then this returns an error. +/// +/// Upon success, this also returns the number of bytes read. +pub(crate) fn try_read_state_id( + slice: &[u8], + what: &'static str, +) -> Result<(StateID, usize), DeserializeError> { + if slice.len() < StateID::SIZE { + return Err(DeserializeError::buffer_too_small(what)); + } + read_state_id(slice, what) +} + +/// Reads a state ID from the given slice. If the slice has insufficient +/// length, then this panics. If the deserialized integer exceeds the state ID +/// limit for the current target, then this returns an error. +/// +/// Upon success, this also returns the number of bytes read. +pub(crate) fn read_state_id( + slice: &[u8], + what: &'static str, +) -> Result<(StateID, usize), DeserializeError> { + let bytes: [u8; StateID::SIZE] = + slice[..StateID::SIZE].try_into().unwrap(); + let sid = StateID::from_ne_bytes(bytes) + .map_err(|err| DeserializeError::state_id_error(err, what))?; + Ok((sid, StateID::SIZE)) +} + +/// Reads a state ID from the given slice. If the slice has insufficient +/// length, then this panics. Otherwise, the deserialized integer is assumed +/// to be a valid state ID. +/// +/// This also returns the number of bytes read. +pub(crate) fn read_state_id_unchecked(slice: &[u8]) -> (StateID, usize) { + let sid = StateID::from_ne_bytes_unchecked( + slice[..StateID::SIZE].try_into().unwrap(), + ); + (sid, StateID::SIZE) +} + +/// Write the given state ID to the beginning of the given slice of bytes +/// using the specified endianness. The given slice must have length at least +/// `StateID::SIZE`, or else this panics. Upon success, the total number of +/// bytes written is returned. +pub(crate) fn write_state_id( + sid: StateID, + dst: &mut [u8], +) -> usize { + E::write_u32(sid.as_u32(), dst); + StateID::SIZE +} + +/// Try to read a u16 as a usize from the beginning of the given slice in +/// native endian format. If the slice has fewer than 2 bytes or if the +/// deserialized number cannot be represented by usize, then this returns an +/// error. The error message will include the `what` description of what is +/// being deserialized, for better error messages. `what` should be a noun in +/// singular form. +/// +/// Upon success, this also returns the number of bytes read. +pub(crate) fn try_read_u16_as_usize( + slice: &[u8], + what: &'static str, +) -> Result<(usize, usize), DeserializeError> { + try_read_u16(slice, what).and_then(|(n, nr)| { + usize::try_from(n) + .map(|n| (n, nr)) + .map_err(|_| DeserializeError::invalid_usize(what)) + }) +} + +/// Try to read a u32 as a usize from the beginning of the given slice in +/// native endian format. If the slice has fewer than 4 bytes or if the +/// deserialized number cannot be represented by usize, then this returns an +/// error. The error message will include the `what` description of what is +/// being deserialized, for better error messages. `what` should be a noun in +/// singular form. +/// +/// Upon success, this also returns the number of bytes read. +pub(crate) fn try_read_u32_as_usize( + slice: &[u8], + what: &'static str, +) -> Result<(usize, usize), DeserializeError> { + try_read_u32(slice, what).and_then(|(n, nr)| { + usize::try_from(n) + .map(|n| (n, nr)) + .map_err(|_| DeserializeError::invalid_usize(what)) + }) +} + +/// Try to read a u16 from the beginning of the given slice in native endian +/// format. If the slice has fewer than 2 bytes, then this returns an error. +/// The error message will include the `what` description of what is being +/// deserialized, for better error messages. `what` should be a noun in +/// singular form. +/// +/// Upon success, this also returns the number of bytes read. +pub(crate) fn try_read_u16( + slice: &[u8], + what: &'static str, +) -> Result<(u16, usize), DeserializeError> { + check_slice_len(slice, size_of::(), what)?; + Ok((read_u16(slice), size_of::())) +} + +/// Try to read a u32 from the beginning of the given slice in native endian +/// format. If the slice has fewer than 4 bytes, then this returns an error. +/// The error message will include the `what` description of what is being +/// deserialized, for better error messages. `what` should be a noun in +/// singular form. +/// +/// Upon success, this also returns the number of bytes read. +pub(crate) fn try_read_u32( + slice: &[u8], + what: &'static str, +) -> Result<(u32, usize), DeserializeError> { + check_slice_len(slice, size_of::(), what)?; + Ok((read_u32(slice), size_of::())) +} + +/// Try to read a u128 from the beginning of the given slice in native endian +/// format. If the slice has fewer than 16 bytes, then this returns an error. +/// The error message will include the `what` description of what is being +/// deserialized, for better error messages. `what` should be a noun in +/// singular form. +/// +/// Upon success, this also returns the number of bytes read. +pub(crate) fn try_read_u128( + slice: &[u8], + what: &'static str, +) -> Result<(u128, usize), DeserializeError> { + check_slice_len(slice, size_of::(), what)?; + Ok((read_u128(slice), size_of::())) +} + +/// Read a u16 from the beginning of the given slice in native endian format. +/// If the slice has fewer than 2 bytes, then this panics. +/// +/// Marked as inline to speed up sparse searching which decodes integers from +/// its automaton at search time. +#[cfg_attr(feature = "perf-inline", inline(always))] +pub(crate) fn read_u16(slice: &[u8]) -> u16 { + let bytes: [u8; 2] = slice[..size_of::()].try_into().unwrap(); + u16::from_ne_bytes(bytes) +} + +/// Read a u32 from the beginning of the given slice in native endian format. +/// If the slice has fewer than 4 bytes, then this panics. +/// +/// Marked as inline to speed up sparse searching which decodes integers from +/// its automaton at search time. +#[cfg_attr(feature = "perf-inline", inline(always))] +pub(crate) fn read_u32(slice: &[u8]) -> u32 { + let bytes: [u8; 4] = slice[..size_of::()].try_into().unwrap(); + u32::from_ne_bytes(bytes) +} + +/// Read a u128 from the beginning of the given slice in native endian format. +/// If the slice has fewer than 16 bytes, then this panics. +pub(crate) fn read_u128(slice: &[u8]) -> u128 { + let bytes: [u8; 16] = slice[..size_of::()].try_into().unwrap(); + u128::from_ne_bytes(bytes) +} + +/// Checks that the given slice has some minimal length. If it's smaller than +/// the bound given, then a "buffer too small" error is returned with `what` +/// describing what the buffer represents. +pub(crate) fn check_slice_len( + slice: &[T], + at_least_len: usize, + what: &'static str, +) -> Result<(), DeserializeError> { + if slice.len() < at_least_len { + return Err(DeserializeError::buffer_too_small(what)); + } + Ok(()) +} + +/// Multiply the given numbers, and on overflow, return an error that includes +/// 'what' in the error message. +/// +/// This is useful when doing arithmetic with untrusted data. +pub(crate) fn mul( + a: usize, + b: usize, + what: &'static str, +) -> Result { + match a.checked_mul(b) { + Some(c) => Ok(c), + None => Err(DeserializeError::arithmetic_overflow(what)), + } +} + +/// Add the given numbers, and on overflow, return an error that includes +/// 'what' in the error message. +/// +/// This is useful when doing arithmetic with untrusted data. +pub(crate) fn add( + a: usize, + b: usize, + what: &'static str, +) -> Result { + match a.checked_add(b) { + Some(c) => Ok(c), + None => Err(DeserializeError::arithmetic_overflow(what)), + } +} + +/// Shift `a` left by `b`, and on overflow, return an error that includes +/// 'what' in the error message. +/// +/// This is useful when doing arithmetic with untrusted data. +pub(crate) fn shl( + a: usize, + b: usize, + what: &'static str, +) -> Result { + let amount = u32::try_from(b) + .map_err(|_| DeserializeError::arithmetic_overflow(what))?; + match a.checked_shl(amount) { + Some(c) => Ok(c), + None => Err(DeserializeError::arithmetic_overflow(what)), + } +} + +/// Returns the number of additional bytes required to add to the given length +/// in order to make the total length a multiple of 4. The return value is +/// always less than 4. +pub(crate) fn padding_len(non_padding_len: usize) -> usize { + (4 - (non_padding_len & 0b11)) & 0b11 +} + +/// A simple trait for writing code generic over endianness. +/// +/// This is similar to what byteorder provides, but we only need a very small +/// subset. +pub(crate) trait Endian { + /// Writes a u16 to the given destination buffer in a particular + /// endianness. If the destination buffer has a length smaller than 2, then + /// this panics. + fn write_u16(n: u16, dst: &mut [u8]); + + /// Writes a u32 to the given destination buffer in a particular + /// endianness. If the destination buffer has a length smaller than 4, then + /// this panics. + fn write_u32(n: u32, dst: &mut [u8]); + + /// Writes a u128 to the given destination buffer in a particular + /// endianness. If the destination buffer has a length smaller than 16, + /// then this panics. + fn write_u128(n: u128, dst: &mut [u8]); +} + +/// Little endian writing. +pub(crate) enum LE {} +/// Big endian writing. +pub(crate) enum BE {} + +#[cfg(target_endian = "little")] +pub(crate) type NE = LE; +#[cfg(target_endian = "big")] +pub(crate) type NE = BE; + +impl Endian for LE { + fn write_u16(n: u16, dst: &mut [u8]) { + dst[..2].copy_from_slice(&n.to_le_bytes()); + } + + fn write_u32(n: u32, dst: &mut [u8]) { + dst[..4].copy_from_slice(&n.to_le_bytes()); + } + + fn write_u128(n: u128, dst: &mut [u8]) { + dst[..16].copy_from_slice(&n.to_le_bytes()); + } +} + +impl Endian for BE { + fn write_u16(n: u16, dst: &mut [u8]) { + dst[..2].copy_from_slice(&n.to_be_bytes()); + } + + fn write_u32(n: u32, dst: &mut [u8]) { + dst[..4].copy_from_slice(&n.to_be_bytes()); + } + + fn write_u128(n: u128, dst: &mut [u8]) { + dst[..16].copy_from_slice(&n.to_be_bytes()); + } +} + +#[cfg(all(test, feature = "alloc"))] +mod tests { + use super::*; + + #[test] + fn labels() { + let mut buf = [0; 1024]; + + let nwrite = write_label("fooba", &mut buf).unwrap(); + assert_eq!(nwrite, 8); + assert_eq!(&buf[..nwrite], b"fooba\x00\x00\x00"); + + let nread = read_label(&buf, "fooba").unwrap(); + assert_eq!(nread, 8); + } + + #[test] + #[should_panic] + fn bad_label_interior_nul() { + // interior NULs are not allowed + write_label("foo\x00bar", &mut [0; 1024]).unwrap(); + } + + #[test] + fn bad_label_almost_too_long() { + // ok + write_label(&"z".repeat(255), &mut [0; 1024]).unwrap(); + } + + #[test] + #[should_panic] + fn bad_label_too_long() { + // labels longer than 255 bytes are banned + write_label(&"z".repeat(256), &mut [0; 1024]).unwrap(); + } + + #[test] + fn padding() { + assert_eq!(0, padding_len(8)); + assert_eq!(3, padding_len(9)); + assert_eq!(2, padding_len(10)); + assert_eq!(1, padding_len(11)); + assert_eq!(0, padding_len(12)); + assert_eq!(3, padding_len(13)); + assert_eq!(2, padding_len(14)); + assert_eq!(1, padding_len(15)); + assert_eq!(0, padding_len(16)); + } +} diff --git a/vendor/regex-automata/test b/vendor/regex-automata/test new file mode 100755 index 00000000000000..df3e5ae98dea47 --- /dev/null +++ b/vendor/regex-automata/test @@ -0,0 +1,95 @@ +#!/bin/bash + +# This is a script that attempts to *approximately* exhaustively run the test +# suite for regex-automata. The main reason for why 'cargo test' isn't enough +# is because of crate features. regex-automata has a ton of them. This script +# tests many of those feature combinations (although not all) to try to get +# decent coverage in a finite amount of time. + +set -e + +# cd to the directory containing this crate's Cargo.toml so that we don't need +# to pass --manifest-path to every `cargo` command. +cd "$(dirname "$0")" + +echo "===== ALL FEATURES TEST ===" +cargo test --all-features + +# Man I don't *want* to have this many crate features, but... I really want +# folks to be able to slim the crate down to just the things they want. But +# the main downside is that I just can't feasibly test every combination of +# features because there are too many of them. Sad, but I'm not sure if there +# is a better alternative. +features=( + "" + "unicode-word-boundary" + "unicode-word-boundary,syntax,unicode-perl" + "unicode-word-boundary,syntax,dfa-build" + "nfa" + "dfa" + "hybrid" + "nfa,dfa" + "nfa,hybrid" + "dfa,hybrid" + "dfa-onepass" + "nfa-pikevm" + "nfa-backtrack" + "std" + "alloc" + "syntax" + "syntax,nfa-pikevm" + "syntax,hybrid" + "perf-literal-substring" + "perf-literal-multisubstring" + "meta" + "meta,nfa-backtrack" + "meta,hybrid" + "meta,dfa-build" + "meta,dfa-onepass" + "meta,nfa,dfa,hybrid,nfa-backtrack" + "meta,nfa,dfa,hybrid,nfa-backtrack,perf-literal-substring" + "meta,nfa,dfa,hybrid,nfa-backtrack,perf-literal-multisubstring" +) +for f in "${features[@]}"; do + echo "===== LIB FEATURES: $f ===" + # It's actually important to do a standard 'cargo build' in addition to a + # 'cargo test'. In particular, in the latter case, the dev-dependencies may + # wind up enabling features in dependencies (like memchr) that make it look + # like everything is well, but actually isn't. For example, the 'regex-test' + # dev-dependency uses 'bstr' and enables its 'std' feature, which in turn + # unconditionally enables 'memchr's 'std' feature. Since we're specifically + # looking to test that certain feature combinations work as expected, this + # can lead to things testing okay, but would actually fail to build. Yikes. + cargo build --no-default-features --lib --features "$f" + cargo test --no-default-features --lib --features "$f" +done + +# We can also run the integration test suite on stripped down features too. +# But the test suite doesn't do well with things like 'std' and 'unicode' +# disabled, so we always enable them. +features=( + "std,unicode,syntax,nfa-pikevm" + "std,unicode,syntax,nfa-backtrack" + "std,unicode,syntax,hybrid" + "std,unicode,syntax,dfa-onepass" + "std,unicode,syntax,dfa-search" + "std,unicode,syntax,dfa-build" + "std,unicode,meta" + # This one is a little tricky because it causes the backtracker to get used + # in more instances and results in failing tests for the 'earliest' tests. + # The actual results are semantically consistent with the API guarantee + # (the backtracker tends to report greater offsets because it isn't an FSM), + # but our tests are less flexible than the API guarantee and demand offsets + # reported by FSM regex engines. (Which is... all of them except for the + # backtracker.) + # "std,unicode,meta,nfa-backtrack" + "std,unicode,meta,hybrid" + "std,unicode,meta,dfa-onepass" + "std,unicode,meta,dfa-build" + "std,unicode,meta,nfa,dfa-onepass,hybrid" +) +for f in "${features[@]}"; do + echo "===== INTEGRATION FEATURES: $f ===" + cargo build --no-default-features --lib --features "$f" + cargo test --no-default-features --test integration --features "$f" +done diff --git a/vendor/regex-automata/tests/dfa/api.rs b/vendor/regex-automata/tests/dfa/api.rs new file mode 100644 index 00000000000000..8a015ad0fb8852 --- /dev/null +++ b/vendor/regex-automata/tests/dfa/api.rs @@ -0,0 +1,162 @@ +use std::error::Error; + +use regex_automata::{ + dfa::{dense, Automaton, OverlappingState}, + nfa::thompson, + Anchored, HalfMatch, Input, MatchError, +}; + +// Tests that quit bytes in the forward direction work correctly. +#[test] +fn quit_fwd() -> Result<(), Box> { + let dfa = dense::Builder::new() + .configure(dense::Config::new().quit(b'x', true)) + .build("[[:word:]]+$")?; + + assert_eq!( + Err(MatchError::quit(b'x', 3)), + dfa.try_search_fwd(&Input::new(b"abcxyz")) + ); + assert_eq!( + dfa.try_search_overlapping_fwd( + &Input::new(b"abcxyz"), + &mut OverlappingState::start() + ), + Err(MatchError::quit(b'x', 3)), + ); + + Ok(()) +} + +// Tests that quit bytes in the reverse direction work correctly. +#[test] +fn quit_rev() -> Result<(), Box> { + let dfa = dense::Builder::new() + .configure(dense::Config::new().quit(b'x', true)) + .thompson(thompson::Config::new().reverse(true)) + .build("^[[:word:]]+")?; + + assert_eq!( + Err(MatchError::quit(b'x', 3)), + dfa.try_search_rev(&Input::new(b"abcxyz")) + ); + + Ok(()) +} + +// Tests that if we heuristically enable Unicode word boundaries but then +// instruct that a non-ASCII byte should NOT be a quit byte, then the builder +// will panic. +#[test] +#[should_panic] +fn quit_panics() { + dense::Config::new().unicode_word_boundary(true).quit(b'\xFF', false); +} + +// This tests an intesting case where even if the Unicode word boundary option +// is disabled, setting all non-ASCII bytes to be quit bytes will cause Unicode +// word boundaries to be enabled. +#[test] +fn unicode_word_implicitly_works() -> Result<(), Box> { + let mut config = dense::Config::new(); + for b in 0x80..=0xFF { + config = config.quit(b, true); + } + let dfa = dense::Builder::new().configure(config).build(r"\b")?; + let expected = HalfMatch::must(0, 1); + assert_eq!(Ok(Some(expected)), dfa.try_search_fwd(&Input::new(b" a"))); + Ok(()) +} + +// A variant of [`Automaton::is_special_state`]'s doctest, but with universal +// start states. +// +// See: https://github.com/rust-lang/regex/pull/1195 +#[test] +fn universal_start_search() -> Result<(), Box> { + fn find( + dfa: &A, + haystack: &[u8], + ) -> Result, MatchError> { + let mut state = dfa + .universal_start_state(Anchored::No) + .expect("regex should not require lookbehind"); + let mut last_match = None; + // Walk all the bytes in the haystack. We can quit early if we see + // a dead or a quit state. The former means the automaton will + // never transition to any other state. The latter means that the + // automaton entered a condition in which its search failed. + for (i, &b) in haystack.iter().enumerate() { + state = dfa.next_state(state, b); + if dfa.is_special_state(state) { + if dfa.is_match_state(state) { + last_match = + Some(HalfMatch::new(dfa.match_pattern(state, 0), i)); + } else if dfa.is_dead_state(state) { + return Ok(last_match); + } else if dfa.is_quit_state(state) { + // It is possible to enter into a quit state after + // observing a match has occurred. In that case, we + // should return the match instead of an error. + if last_match.is_some() { + return Ok(last_match); + } + return Err(MatchError::quit(b, i)); + } + // Implementors may also want to check for start or accel + // states and handle them differently for performance + // reasons. But it is not necessary for correctness. + } + } + // Matches are always delayed by 1 byte, so we must explicitly walk + // the special "EOI" transition at the end of the search. + state = dfa.next_eoi_state(state); + if dfa.is_match_state(state) { + last_match = Some(HalfMatch::new( + dfa.match_pattern(state, 0), + haystack.len(), + )); + } + Ok(last_match) + } + + fn check_impl( + dfa: impl Automaton, + haystack: &str, + pat: usize, + offset: usize, + ) -> Result<(), Box> { + let haystack = haystack.as_bytes(); + let mat = find(&dfa, haystack)?.unwrap(); + assert_eq!(mat.pattern().as_usize(), pat); + assert_eq!(mat.offset(), offset); + Ok(()) + } + + fn check( + dfa: &dense::DFA>, + haystack: &str, + pat: usize, + offset: usize, + ) -> Result<(), Box> { + check_impl(dfa, haystack, pat, offset)?; + check_impl(dfa.to_sparse()?, haystack, pat, offset)?; + Ok(()) + } + + let dfa = dense::DFA::new(r"[a-z]+")?; + let haystack = "123 foobar 4567"; + check(&dfa, haystack, 0, 10)?; + + let dfa = dense::DFA::new(r"[0-9]{4}")?; + let haystack = "123 foobar 4567"; + check(&dfa, haystack, 0, 15)?; + + let dfa = dense::DFA::new_many(&[r"[a-z]+", r"[0-9]+"])?; + let haystack = "123 foobar 4567"; + check(&dfa, haystack, 1, 3)?; + check(&dfa, &haystack[3..], 0, 7)?; + check(&dfa, &haystack[10..], 1, 5)?; + + Ok(()) +} diff --git a/vendor/regex-automata/tests/dfa/mod.rs b/vendor/regex-automata/tests/dfa/mod.rs new file mode 100644 index 00000000000000..0d8f539db63938 --- /dev/null +++ b/vendor/regex-automata/tests/dfa/mod.rs @@ -0,0 +1,8 @@ +#[cfg(all(feature = "dfa-build", feature = "dfa-search"))] +mod api; +#[cfg(feature = "dfa-onepass")] +mod onepass; +#[cfg(all(feature = "dfa-build", feature = "dfa-search"))] +mod regression; +#[cfg(all(not(miri), feature = "dfa-build", feature = "dfa-search"))] +mod suite; diff --git a/vendor/regex-automata/tests/dfa/onepass/mod.rs b/vendor/regex-automata/tests/dfa/onepass/mod.rs new file mode 100644 index 00000000000000..9d6ab475efef12 --- /dev/null +++ b/vendor/regex-automata/tests/dfa/onepass/mod.rs @@ -0,0 +1,2 @@ +#[cfg(not(miri))] +mod suite; diff --git a/vendor/regex-automata/tests/dfa/onepass/suite.rs b/vendor/regex-automata/tests/dfa/onepass/suite.rs new file mode 100644 index 00000000000000..aba46c86d1bf55 --- /dev/null +++ b/vendor/regex-automata/tests/dfa/onepass/suite.rs @@ -0,0 +1,197 @@ +use { + anyhow::Result, + regex_automata::{ + dfa::onepass::{self, DFA}, + nfa::thompson, + util::{iter, syntax}, + }, + regex_test::{ + CompiledRegex, Match, RegexTest, SearchKind, Span, TestResult, + TestRunner, + }, +}; + +use crate::{create_input, suite, testify_captures, untestify_kind}; + +const EXPANSIONS: &[&str] = &["is_match", "find", "captures"]; + +/// Tests the default configuration of the hybrid NFA/DFA. +#[test] +fn default() -> Result<()> { + let builder = DFA::builder(); + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +/// Tests the hybrid NFA/DFA when 'starts_for_each_pattern' is enabled for all +/// tests. +#[test] +fn starts_for_each_pattern() -> Result<()> { + let mut builder = DFA::builder(); + builder.configure(DFA::config().starts_for_each_pattern(true)); + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +/// Tests the hybrid NFA/DFA when byte classes are disabled. +/// +/// N.B. Disabling byte classes doesn't avoid any indirection at search time. +/// All it does is cause every byte value to be its own distinct equivalence +/// class. +#[test] +fn no_byte_classes() -> Result<()> { + let mut builder = DFA::builder(); + builder.configure(DFA::config().byte_classes(false)); + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +fn compiler( + mut builder: onepass::Builder, +) -> impl FnMut(&RegexTest, &[String]) -> Result { + move |test, regexes| { + // Check if our regex contains things that aren't supported by DFAs. + // That is, Unicode word boundaries when searching non-ASCII text. + if !configure_onepass_builder(test, &mut builder) { + return Ok(CompiledRegex::skip()); + } + let re = match builder.build_many(®exes) { + Ok(re) => re, + Err(err) => { + let msg = err.to_string(); + // This is pretty gross, but when a regex fails to compile as + // a one-pass regex, then we want to be OK with that and just + // skip the test. But we have to be careful to only skip it + // when the expected result is that the regex compiles. If + // the test is specifically checking that the regex does not + // compile, then we should bubble up that error and allow the + // test to pass. + // + // Since our error types are all generally opaque, we just + // look for an error string. Not great, but not the end of the + // world. + if test.compiles() && msg.contains("not one-pass") { + return Ok(CompiledRegex::skip()); + } + return Err(err.into()); + } + }; + let mut cache = re.create_cache(); + Ok(CompiledRegex::compiled(move |test| -> TestResult { + run_test(&re, &mut cache, test) + })) + } +} + +fn run_test( + re: &DFA, + cache: &mut onepass::Cache, + test: &RegexTest, +) -> TestResult { + let input = create_input(test); + match test.additional_name() { + "is_match" => { + TestResult::matched(re.is_match(cache, input.earliest(true))) + } + "find" => match test.search_kind() { + SearchKind::Earliest | SearchKind::Leftmost => { + let input = + input.earliest(test.search_kind() == SearchKind::Earliest); + let mut caps = re.create_captures(); + let it = iter::Searcher::new(input) + .into_matches_iter(|input| { + re.try_search(cache, input, &mut caps)?; + Ok(caps.get_match()) + }) + .infallible() + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|m| Match { + id: m.pattern().as_usize(), + span: Span { start: m.start(), end: m.end() }, + }); + TestResult::matches(it) + } + SearchKind::Overlapping => { + // The one-pass DFA does not support any kind of overlapping + // search. This is not just a matter of not having the API. + // It's fundamentally incompatible with the one-pass concept. + // If overlapping matches were possible, then the one-pass DFA + // would fail to build. + TestResult::skip() + } + }, + "captures" => match test.search_kind() { + SearchKind::Earliest | SearchKind::Leftmost => { + let input = + input.earliest(test.search_kind() == SearchKind::Earliest); + let it = iter::Searcher::new(input) + .into_captures_iter(re.create_captures(), |input, caps| { + re.try_search(cache, input, caps) + }) + .infallible() + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|caps| testify_captures(&caps)); + TestResult::captures(it) + } + SearchKind::Overlapping => { + // The one-pass DFA does not support any kind of overlapping + // search. This is not just a matter of not having the API. + // It's fundamentally incompatible with the one-pass concept. + // If overlapping matches were possible, then the one-pass DFA + // would fail to build. + TestResult::skip() + } + }, + name => TestResult::fail(&format!("unrecognized test name: {name}")), + } +} + +/// Configures the given regex builder with all relevant settings on the given +/// regex test. +/// +/// If the regex test has a setting that is unsupported, then this returns +/// false (implying the test should be skipped). +fn configure_onepass_builder( + test: &RegexTest, + builder: &mut onepass::Builder, +) -> bool { + if !test.anchored() { + return false; + } + let match_kind = match untestify_kind(test.match_kind()) { + None => return false, + Some(k) => k, + }; + + let config = DFA::config().match_kind(match_kind); + builder + .configure(config) + .syntax(config_syntax(test)) + .thompson(config_thompson(test)); + true +} + +/// Configuration of a Thompson NFA compiler from a regex test. +fn config_thompson(test: &RegexTest) -> thompson::Config { + let mut lookm = regex_automata::util::look::LookMatcher::new(); + lookm.set_line_terminator(test.line_terminator()); + thompson::Config::new().utf8(test.utf8()).look_matcher(lookm) +} + +/// Configuration of the regex parser from a regex test. +fn config_syntax(test: &RegexTest) -> syntax::Config { + syntax::Config::new() + .case_insensitive(test.case_insensitive()) + .unicode(test.unicode()) + .utf8(test.utf8()) + .line_terminator(test.line_terminator()) +} diff --git a/vendor/regex-automata/tests/dfa/regression.rs b/vendor/regex-automata/tests/dfa/regression.rs new file mode 100644 index 00000000000000..09caffabcb1f16 --- /dev/null +++ b/vendor/regex-automata/tests/dfa/regression.rs @@ -0,0 +1,48 @@ +// A regression test for checking that minimization correctly translates +// whether a state is a match state or not. Previously, it was possible for +// minimization to mark a non-matching state as matching. +#[test] +#[cfg(not(miri))] +fn minimize_sets_correct_match_states() { + use regex_automata::{ + dfa::{dense::DFA, Automaton, StartKind}, + Anchored, Input, + }; + + let pattern = + // This is a subset of the grapheme matching regex. I couldn't seem + // to get a repro any smaller than this unfortunately. + r"(?x) + (?: + \p{gcb=Prepend}* + (?: + (?: + (?: + \p{gcb=L}* + (?:\p{gcb=V}+|\p{gcb=LV}\p{gcb=V}*|\p{gcb=LVT}) + \p{gcb=T}* + ) + | + \p{gcb=L}+ + | + \p{gcb=T}+ + ) + | + \p{Extended_Pictographic} + (?:\p{gcb=Extend}*\p{gcb=ZWJ}\p{Extended_Pictographic})* + | + [^\p{gcb=Control}\p{gcb=CR}\p{gcb=LF}] + ) + [\p{gcb=Extend}\p{gcb=ZWJ}\p{gcb=SpacingMark}]* + ) + "; + + let dfa = DFA::builder() + .configure( + DFA::config().start_kind(StartKind::Anchored).minimize(true), + ) + .build(pattern) + .unwrap(); + let input = Input::new(b"\xE2").anchored(Anchored::Yes); + assert_eq!(Ok(None), dfa.try_search_fwd(&input)); +} diff --git a/vendor/regex-automata/tests/dfa/suite.rs b/vendor/regex-automata/tests/dfa/suite.rs new file mode 100644 index 00000000000000..8368ffef49367c --- /dev/null +++ b/vendor/regex-automata/tests/dfa/suite.rs @@ -0,0 +1,443 @@ +use { + anyhow::Result, + regex_automata::{ + dfa::{ + self, dense, regex::Regex, sparse, Automaton, OverlappingState, + StartKind, + }, + nfa::thompson, + util::{prefilter::Prefilter, syntax}, + Anchored, Input, PatternSet, + }, + regex_test::{ + CompiledRegex, Match, RegexTest, SearchKind, Span, TestResult, + TestRunner, + }, +}; + +use crate::{create_input, suite, untestify_kind}; + +const EXPANSIONS: &[&str] = &["is_match", "find", "which"]; + +/// Runs the test suite with the default configuration. +#[test] +fn unminimized_default() -> Result<()> { + let builder = Regex::builder(); + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .blacklist("expensive") + .test_iter(suite()?.iter(), dense_compiler(builder)) + .assert(); + Ok(()) +} + +/// Runs the test suite with the default configuration and a prefilter enabled, +/// if one can be built. +#[test] +fn unminimized_prefilter() -> Result<()> { + let my_compiler = |test: &RegexTest, regexes: &[String]| { + // Parse regexes as HIRs so we can get literals to build a prefilter. + let mut hirs = vec![]; + for pattern in regexes.iter() { + hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); + } + let kind = match untestify_kind(test.match_kind()) { + None => return Ok(CompiledRegex::skip()), + Some(kind) => kind, + }; + let pre = Prefilter::from_hirs_prefix(kind, &hirs); + let mut builder = Regex::builder(); + builder.dense(dense::DFA::config().prefilter(pre)); + compiler(builder, |_, _, re| { + Ok(CompiledRegex::compiled(move |test| -> TestResult { + run_test(&re, test) + })) + })(test, regexes) + }; + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .blacklist("expensive") + .test_iter(suite()?.iter(), my_compiler) + .assert(); + Ok(()) +} + +/// Runs the test suite with start states specialized. +#[test] +fn unminimized_specialized_start_states() -> Result<()> { + let mut builder = Regex::builder(); + builder.dense(dense::Config::new().specialize_start_states(true)); + + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .blacklist("expensive") + .test_iter(suite()?.iter(), dense_compiler(builder)) + .assert(); + Ok(()) +} + +/// Runs the test suite with byte classes disabled. +#[test] +fn unminimized_no_byte_class() -> Result<()> { + let mut builder = Regex::builder(); + builder.dense(dense::Config::new().byte_classes(false)); + + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .blacklist("expensive") + .test_iter(suite()?.iter(), dense_compiler(builder)) + .assert(); + Ok(()) +} + +/// Runs the test suite with NFA shrinking enabled. +#[test] +fn unminimized_nfa_shrink() -> Result<()> { + let mut builder = Regex::builder(); + builder.thompson(thompson::Config::new().shrink(true)); + + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .blacklist("expensive") + .test_iter(suite()?.iter(), dense_compiler(builder)) + .assert(); + Ok(()) +} + +/// Runs the test suite on a minimized DFA with an otherwise default +/// configuration. +#[test] +fn minimized_default() -> Result<()> { + let mut builder = Regex::builder(); + builder.dense(dense::Config::new().minimize(true)); + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .blacklist("expensive") + .test_iter(suite()?.iter(), dense_compiler(builder)) + .assert(); + Ok(()) +} + +/// Runs the test suite on a minimized DFA with byte classes disabled. +#[test] +fn minimized_no_byte_class() -> Result<()> { + let mut builder = Regex::builder(); + builder.dense(dense::Config::new().minimize(true).byte_classes(false)); + + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .blacklist("expensive") + .test_iter(suite()?.iter(), dense_compiler(builder)) + .assert(); + Ok(()) +} + +/// Runs the test suite on a sparse unminimized DFA. +#[test] +fn sparse_unminimized_default() -> Result<()> { + let builder = Regex::builder(); + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .blacklist("expensive") + .test_iter(suite()?.iter(), sparse_compiler(builder)) + .assert(); + Ok(()) +} + +/// Runs the test suite on a sparse unminimized DFA with prefilters enabled. +#[test] +fn sparse_unminimized_prefilter() -> Result<()> { + let my_compiler = |test: &RegexTest, regexes: &[String]| { + // Parse regexes as HIRs so we can get literals to build a prefilter. + let mut hirs = vec![]; + for pattern in regexes.iter() { + hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); + } + let kind = match untestify_kind(test.match_kind()) { + None => return Ok(CompiledRegex::skip()), + Some(kind) => kind, + }; + let pre = Prefilter::from_hirs_prefix(kind, &hirs); + let mut builder = Regex::builder(); + builder.dense(dense::DFA::config().prefilter(pre)); + compiler(builder, |builder, _, re| { + let fwd = re.forward().to_sparse()?; + let rev = re.reverse().to_sparse()?; + let re = builder.build_from_dfas(fwd, rev); + Ok(CompiledRegex::compiled(move |test| -> TestResult { + run_test(&re, test) + })) + })(test, regexes) + }; + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .blacklist("expensive") + .test_iter(suite()?.iter(), my_compiler) + .assert(); + Ok(()) +} + +/// Another basic sanity test that checks we can serialize and then deserialize +/// a regex, and that the resulting regex can be used for searching correctly. +#[test] +fn serialization_unminimized_default() -> Result<()> { + let builder = Regex::builder(); + let my_compiler = |builder| { + compiler(builder, |builder, _, re| { + let builder = builder.clone(); + let (fwd_bytes, _) = re.forward().to_bytes_native_endian(); + let (rev_bytes, _) = re.reverse().to_bytes_native_endian(); + Ok(CompiledRegex::compiled(move |test| -> TestResult { + let fwd: dense::DFA<&[u32]> = + dense::DFA::from_bytes(&fwd_bytes).unwrap().0; + let rev: dense::DFA<&[u32]> = + dense::DFA::from_bytes(&rev_bytes).unwrap().0; + let re = builder.build_from_dfas(fwd, rev); + + run_test(&re, test) + })) + }) + }; + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .blacklist("expensive") + .test_iter(suite()?.iter(), my_compiler(builder)) + .assert(); + Ok(()) +} + +/// A basic sanity test that checks we can serialize and then deserialize a +/// regex using sparse DFAs, and that the resulting regex can be used for +/// searching correctly. +#[test] +fn sparse_serialization_unminimized_default() -> Result<()> { + let builder = Regex::builder(); + let my_compiler = |builder| { + compiler(builder, |builder, _, re| { + let builder = builder.clone(); + let fwd_bytes = re.forward().to_sparse()?.to_bytes_native_endian(); + let rev_bytes = re.reverse().to_sparse()?.to_bytes_native_endian(); + Ok(CompiledRegex::compiled(move |test| -> TestResult { + let fwd: sparse::DFA<&[u8]> = + sparse::DFA::from_bytes(&fwd_bytes).unwrap().0; + let rev: sparse::DFA<&[u8]> = + sparse::DFA::from_bytes(&rev_bytes).unwrap().0; + let re = builder.build_from_dfas(fwd, rev); + run_test(&re, test) + })) + }) + }; + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .blacklist("expensive") + .test_iter(suite()?.iter(), my_compiler(builder)) + .assert(); + Ok(()) +} + +fn dense_compiler( + builder: dfa::regex::Builder, +) -> impl FnMut(&RegexTest, &[String]) -> Result { + compiler(builder, |_, _, re| { + Ok(CompiledRegex::compiled(move |test| -> TestResult { + run_test(&re, test) + })) + }) +} + +fn sparse_compiler( + builder: dfa::regex::Builder, +) -> impl FnMut(&RegexTest, &[String]) -> Result { + compiler(builder, |builder, _, re| { + let fwd = re.forward().to_sparse()?; + let rev = re.reverse().to_sparse()?; + let re = builder.build_from_dfas(fwd, rev); + Ok(CompiledRegex::compiled(move |test| -> TestResult { + run_test(&re, test) + })) + }) +} + +fn compiler( + mut builder: dfa::regex::Builder, + mut create_matcher: impl FnMut( + &dfa::regex::Builder, + Option, + Regex, + ) -> Result, +) -> impl FnMut(&RegexTest, &[String]) -> Result { + move |test, regexes| { + // Parse regexes as HIRs for some analysis below. + let mut hirs = vec![]; + for pattern in regexes.iter() { + hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); + } + + // Get a prefilter in case the test wants it. + let kind = match untestify_kind(test.match_kind()) { + None => return Ok(CompiledRegex::skip()), + Some(kind) => kind, + }; + let pre = Prefilter::from_hirs_prefix(kind, &hirs); + + // Check if our regex contains things that aren't supported by DFAs. + // That is, Unicode word boundaries when searching non-ASCII text. + if !test.haystack().is_ascii() { + for hir in hirs.iter() { + if hir.properties().look_set().contains_word_unicode() { + return Ok(CompiledRegex::skip()); + } + } + } + if !configure_regex_builder(test, &mut builder) { + return Ok(CompiledRegex::skip()); + } + create_matcher(&builder, pre, builder.build_many(®exes)?) + } +} + +fn run_test(re: &Regex, test: &RegexTest) -> TestResult { + let input = create_input(test); + match test.additional_name() { + "is_match" => TestResult::matched(re.is_match(input.earliest(true))), + "find" => match test.search_kind() { + SearchKind::Earliest | SearchKind::Leftmost => { + let input = + input.earliest(test.search_kind() == SearchKind::Earliest); + TestResult::matches( + re.find_iter(input) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|m| Match { + id: m.pattern().as_usize(), + span: Span { start: m.start(), end: m.end() }, + }), + ) + } + SearchKind::Overlapping => { + try_search_overlapping(re, &input).unwrap() + } + }, + "which" => match test.search_kind() { + SearchKind::Earliest | SearchKind::Leftmost => { + // There are no "which" APIs for standard searches. + TestResult::skip() + } + SearchKind::Overlapping => { + let dfa = re.forward(); + let mut patset = PatternSet::new(dfa.pattern_len()); + dfa.try_which_overlapping_matches(&input, &mut patset) + .unwrap(); + TestResult::which(patset.iter().map(|p| p.as_usize())) + } + }, + name => TestResult::fail(&format!("unrecognized test name: {name}")), + } +} + +/// Configures the given regex builder with all relevant settings on the given +/// regex test. +/// +/// If the regex test has a setting that is unsupported, then this returns +/// false (implying the test should be skipped). +fn configure_regex_builder( + test: &RegexTest, + builder: &mut dfa::regex::Builder, +) -> bool { + let match_kind = match untestify_kind(test.match_kind()) { + None => return false, + Some(k) => k, + }; + + let starts = if test.anchored() { + StartKind::Anchored + } else { + StartKind::Unanchored + }; + let mut dense_config = dense::Config::new() + .start_kind(starts) + .match_kind(match_kind) + .unicode_word_boundary(true); + // When doing an overlapping search, we might try to find the start of each + // match with a custom search routine. In that case, we need to tell the + // reverse search (for the start offset) which pattern to look for. The + // only way that API works is when anchored starting states are compiled + // for each pattern. This does technically also enable it for the forward + // DFA, but we're okay with that. + if test.search_kind() == SearchKind::Overlapping { + dense_config = dense_config.starts_for_each_pattern(true); + } + + builder + .syntax(config_syntax(test)) + .thompson(config_thompson(test)) + .dense(dense_config); + true +} + +/// Configuration of a Thompson NFA compiler from a regex test. +fn config_thompson(test: &RegexTest) -> thompson::Config { + let mut lookm = regex_automata::util::look::LookMatcher::new(); + lookm.set_line_terminator(test.line_terminator()); + thompson::Config::new().utf8(test.utf8()).look_matcher(lookm) +} + +/// Configuration of the regex syntax from a regex test. +fn config_syntax(test: &RegexTest) -> syntax::Config { + syntax::Config::new() + .case_insensitive(test.case_insensitive()) + .unicode(test.unicode()) + .utf8(test.utf8()) + .line_terminator(test.line_terminator()) +} + +/// Execute an overlapping search, and for each match found, also find its +/// overlapping starting positions. +/// +/// N.B. This routine used to be part of the crate API, but 1) it wasn't clear +/// to me how useful it was and 2) it wasn't clear to me what its semantics +/// should be. In particular, a potentially surprising footgun of this routine +/// that it is worst case *quadratic* in the size of the haystack. Namely, it's +/// possible to report a match at every position, and for every such position, +/// scan all the way to the beginning of the haystack to find the starting +/// position. Typical leftmost non-overlapping searches don't suffer from this +/// because, well, matches can't overlap. So subsequent searches after a match +/// is found don't revisit previously scanned parts of the haystack. +/// +/// Its semantics can be strange for other reasons too. For example, given +/// the regex '.*' and the haystack 'zz', the full set of overlapping matches +/// is: [0, 0], [1, 1], [0, 1], [2, 2], [1, 2], [0, 2]. The ordering of +/// those matches is quite strange, but makes sense when you think about the +/// implementation: an end offset is found left-to-right, and then one or more +/// starting offsets are found right-to-left. +/// +/// Nevertheless, we provide this routine in our test suite because it's +/// useful to test the low level DFA overlapping search and our test suite +/// is written in a way that requires starting offsets. +fn try_search_overlapping( + re: &Regex, + input: &Input<'_>, +) -> Result { + let mut matches = vec![]; + let mut fwd_state = OverlappingState::start(); + let (fwd_dfa, rev_dfa) = (re.forward(), re.reverse()); + while let Some(end) = { + fwd_dfa.try_search_overlapping_fwd(input, &mut fwd_state)?; + fwd_state.get_match() + } { + let revsearch = input + .clone() + .range(input.start()..end.offset()) + .anchored(Anchored::Pattern(end.pattern())) + .earliest(false); + let mut rev_state = OverlappingState::start(); + while let Some(start) = { + rev_dfa.try_search_overlapping_rev(&revsearch, &mut rev_state)?; + rev_state.get_match() + } { + let span = Span { start: start.offset(), end: end.offset() }; + let mat = Match { id: end.pattern().as_usize(), span }; + matches.push(mat); + } + } + Ok(TestResult::matches(matches)) +} diff --git a/vendor/regex-automata/tests/fuzz/dense.rs b/vendor/regex-automata/tests/fuzz/dense.rs new file mode 100644 index 00000000000000..213891b3e8b563 --- /dev/null +++ b/vendor/regex-automata/tests/fuzz/dense.rs @@ -0,0 +1,52 @@ +// This test was found by a fuzzer input that crafted a way to provide +// an invalid serialization of ByteClasses that passed our verification. +// Specifically, the verification step in the deserialization of ByteClasses +// used an iterator that depends on part of the serialized bytes being correct. +// (Specifically, the encoding of the number of classes.) +#[test] +fn invalid_byte_classes() { + let data = include_bytes!( + "testdata/deserialize_dense_crash-9486fb7c8a93b12c12a62166b43d31640c0208a9", + ); + let _ = fuzz_run(data); +} + +#[test] +fn invalid_byte_classes_min() { + let data = include_bytes!( + "testdata/deserialize_dense_minimized-from-9486fb7c8a93b12c12a62166b43d31640c0208a9", + ); + let _ = fuzz_run(data); +} + +// This is the code from the fuzz target. Kind of sucks to duplicate it here, +// but this is fundamentally how we interpret the date. +fn fuzz_run(given_data: &[u8]) -> Option<()> { + use regex_automata::dfa::Automaton; + + if given_data.len() < 2 { + return None; + } + let haystack_len = usize::from(given_data[0]); + let haystack = given_data.get(1..1 + haystack_len)?; + let given_dfa_bytes = given_data.get(1 + haystack_len..)?; + + // We help the fuzzer along by adding a preamble to the bytes that should + // at least make these first parts valid. The preamble expects a very + // specific sequence of bytes, so it makes sense to just force this. + let label = "rust-regex-automata-dfa-dense\x00\x00\x00"; + assert_eq!(0, label.len() % 4); + let endianness_check = 0xFEFFu32.to_ne_bytes().to_vec(); + let version_check = 2u32.to_ne_bytes().to_vec(); + let mut dfa_bytes: Vec = vec![]; + dfa_bytes.extend(label.as_bytes()); + dfa_bytes.extend(&endianness_check); + dfa_bytes.extend(&version_check); + dfa_bytes.extend(given_dfa_bytes); + // This is the real test: checking that any input we give to + // DFA::from_bytes will never result in a panic. + let (dfa, _) = + regex_automata::dfa::dense::DFA::from_bytes(&dfa_bytes).ok()?; + let _ = dfa.try_search_fwd(®ex_automata::Input::new(haystack)); + Some(()) +} diff --git a/vendor/regex-automata/tests/fuzz/mod.rs b/vendor/regex-automata/tests/fuzz/mod.rs new file mode 100644 index 00000000000000..960cb4251ab007 --- /dev/null +++ b/vendor/regex-automata/tests/fuzz/mod.rs @@ -0,0 +1,2 @@ +mod dense; +mod sparse; diff --git a/vendor/regex-automata/tests/fuzz/sparse.rs b/vendor/regex-automata/tests/fuzz/sparse.rs new file mode 100644 index 00000000000000..837ad10147c016 --- /dev/null +++ b/vendor/regex-automata/tests/fuzz/sparse.rs @@ -0,0 +1,132 @@ +// This is a regression test for a bug in how special states are handled. The +// fuzzer found a case where a state returned true for 'is_special_state' but +// *didn't* return true for 'is_dead_state', 'is_quit_state', 'is_match_state', +// 'is_start_state' or 'is_accel_state'. This in turn tripped a debug assertion +// in the core matching loop that requires 'is_special_state' being true to +// imply that one of the other routines returns true. +// +// We fixed this by adding some validation to both dense and sparse DFAs that +// checks that this property is true for every state ID in the DFA. +#[test] +fn invalid_special_state() { + let data = include_bytes!( + "testdata/deserialize_sparse_crash-a1b839d899ced76d5d7d0f78f9edb7a421505838", + ); + let _ = fuzz_run(data); +} + +// This is an interesting case where a fuzzer generated a DFA with +// a transition to a state ID that decoded as a valid state, but +// where the ID itself did not point to one of the two existing +// states for this particular DFA. This combined with marking this +// transition's state ID as special but without actually making one of the +// 'is_{dead,quit,match,start,accel}_state' predicates return true ended up +// tripping the 'debug_assert(dfa.is_quit_state(sid))' code in the search +// routine. +// +// We fixed this in alloc mode by checking that every transition points to a +// valid state ID. Technically this bug still exists in core-only mode, but +// it's not clear how to fix it. And it's worth pointing out that the search +// routine won't panic in production. It will just provide invalid results. And +// that's acceptable within the contract of DFA::from_bytes. +#[test] +fn transition_to_invalid_but_valid_state() { + let data = include_bytes!( + "testdata/deserialize_sparse_crash-dbb8172d3984e7e7d03f4b5f8bb86ecd1460eff9", + ); + let _ = fuzz_run(data); +} + +// Another one caught by the fuzzer where it generated a DFA that reported a +// start state as a match state. Since matches are always delayed by one byte, +// start states specifically cannot be match states. And indeed, the search +// code relies on this. +#[test] +fn start_state_is_not_match_state() { + let data = include_bytes!( + "testdata/deserialize_sparse_crash-0da59c0434eaf35e5a6b470fa9244bb79c72b000", + ); + let _ = fuzz_run(data); +} + +// This is variation on 'transition_to_invalid_but_valid_state', but happens +// to a start state. Namely, the fuzz data here builds a DFA with a start +// state ID that is incorrect but points to a sequence of bytes that satisfies +// state decoding validation. This errant state in turn has a non-zero number +// of transitions, and its those transitions that point to a state that does +// *not* satisfy state decoding validation. But we never checked those. So the +// fix here was to add validation of the transitions off of the start state. +#[test] +fn start_state_has_valid_transitions() { + let data = include_bytes!( + "testdata/deserialize_sparse_crash-61fd8e3003bf9d99f6c1e5a8488727eefd234b98", + ); + let _ = fuzz_run(data); +} + +// This fuzz input generated a DFA with a state whose ID was in the match state +// ID range, but where the state itself was encoded with zero pattern IDs. We +// added validation code to check this case. +#[test] +fn match_state_inconsistency() { + let data = include_bytes!( + "testdata/deserialize_sparse_crash-c383ae07ec5e191422eadc492117439011816570", + ); + let _ = fuzz_run(data); +} + +// This fuzz input generated a DFA with a state whose ID was in the accelerator +// range, but who didn't have any accelerators. This violated an invariant that +// assumes that if 'dfa.is_accel_state(sid)' returns true, then the state must +// have some accelerators. +#[test] +fn invalid_accelerators() { + let data = include_bytes!( + "testdata/deserialize_sparse_crash-d07703ceb94b10dcd9e4acb809f2051420449e2b", + ); + let _ = fuzz_run(data); +} + +// This fuzz input generated a DFA with a state whose EOI transition led to +// a quit state, which is generally considered illegal. Why? Because the EOI +// transition is defined over a special sentinel alphabet element and one +// cannot configure a DFA to "quit" on that sentinel. +#[test] +fn eoi_transition_to_quit_state() { + let data = include_bytes!( + "testdata/deserialize_sparse_crash-18cfc246f2ddfc3dfc92b0c7893178c7cf65efa9", + ); + let _ = fuzz_run(data); +} + +// This is the code from the fuzz target. Kind of sucks to duplicate it here, +// but this is fundamentally how we interpret the date. +fn fuzz_run(given_data: &[u8]) -> Option<()> { + use regex_automata::dfa::Automaton; + + if given_data.len() < 2 { + return None; + } + let haystack_len = usize::from(given_data[0]); + let haystack = given_data.get(1..1 + haystack_len)?; + let given_dfa_bytes = given_data.get(1 + haystack_len..)?; + + // We help the fuzzer along by adding a preamble to the bytes that should + // at least make these first parts valid. The preamble expects a very + // specific sequence of bytes, so it makes sense to just force this. + let label = "rust-regex-automata-dfa-sparse\x00\x00"; + assert_eq!(0, label.len() % 4); + let endianness_check = 0xFEFFu32.to_ne_bytes().to_vec(); + let version_check = 2u32.to_ne_bytes().to_vec(); + let mut dfa_bytes: Vec = vec![]; + dfa_bytes.extend(label.as_bytes()); + dfa_bytes.extend(&endianness_check); + dfa_bytes.extend(&version_check); + dfa_bytes.extend(given_dfa_bytes); + // This is the real test: checking that any input we give to + // DFA::from_bytes will never result in a panic. + let (dfa, _) = + regex_automata::dfa::sparse::DFA::from_bytes(&dfa_bytes).ok()?; + let _ = dfa.try_search_fwd(®ex_automata::Input::new(haystack)); + Some(()) +} diff --git a/vendor/regex-automata/tests/fuzz/testdata/deserialize_dense_crash-9486fb7c8a93b12c12a62166b43d31640c0208a9 b/vendor/regex-automata/tests/fuzz/testdata/deserialize_dense_crash-9486fb7c8a93b12c12a62166b43d31640c0208a9 new file mode 100644 index 0000000000000000000000000000000000000000..972bfb2cd405c68babf906df77a2ab452c01d10a GIT binary patch literal 1894 zcmd^AOHKnZ41F#IbqO~ZiPh}Nif!6ku;3W%D#2~I23M%WhQmN=JL5VtnTSlq4omsH z_<8Ku$q?`u4}IT*0BpEDbBo;e+*AZq>3}y}so)cM8erW2#Q189@%WvVQ3Uk<;cVF; z(v51j6F-tSXD1X1xY04|W^EA*=;(VKxKGl>OpT%n{KWpm0VPFt{`)FS*u{WJy~0kWN^slEmL_7mdmek zEHOE9VH{E@?O1(Tap^dB|JTBux&L~^Glt7g)e*U_mx(9%GJcIUI>ee%fj8A;!TN`Q Q7L&y7j}re3CkH_|0iGi2$p8QV literal 0 HcmV?d00001 diff --git a/vendor/regex-automata/tests/fuzz/testdata/deserialize_dense_minimized-from-9486fb7c8a93b12c12a62166b43d31640c0208a9 b/vendor/regex-automata/tests/fuzz/testdata/deserialize_dense_minimized-from-9486fb7c8a93b12c12a62166b43d31640c0208a9 new file mode 100644 index 0000000000000000000000000000000000000000..72dbdad825d233306b86959937492c336281016e GIT binary patch literal 1882 zcmd^AOHKnZ41F#IbqO~ZiPh}Nif!6ku;3W%D#2~I23M%WhQmN=JL5VtnTSlq4omsH z_<8Ku$q?`u4}IT*0BpEDbBo;e+*AZq>3}y}so)cM8erW2#Q189@%WvVQ3Uk<;cVF; z(v51j6F-tSXD1X1xY04|W^EA*=;(VKxKGl>OpT%n{KWpm0VPFt{`)FS*u{WJy~0kWN^slEmL_7mdmek zEHOE9VH{E@?O1(Tap^dB|JTBux&L~^Glt7g)e*U_mx(9%GJcIUI>ee%fj8A;!TKkF P7Ldg14-x+WC&3d=%n|Cz literal 0 HcmV?d00001 diff --git a/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-0da59c0434eaf35e5a6b470fa9244bb79c72b000 b/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-0da59c0434eaf35e5a6b470fa9244bb79c72b000 new file mode 100644 index 0000000000000000000000000000000000000000..5ce508803ef45322bdbd969c864237e04105a6bc GIT binary patch literal 941 zcmWe-WdH+S1||s2QqNQY5dxAda25l@GBEqke<1h?7Y6D!G%z%%g9~6~ur^~AW`Np> zLy}@)Mu-Xj5eBV+F&#i&(}nR+X+}s$FaSOJ4I~5dF_2Z{}%MVF5EZXrn9#@Q_ggzeqKcI4&K>9Wwbx2a#3=HpqTv5^g|KBqJ E0N%YJX#fBK literal 0 HcmV?d00001 diff --git a/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-18cfc246f2ddfc3dfc92b0c7893178c7cf65efa9 b/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-18cfc246f2ddfc3dfc92b0c7893178c7cf65efa9 new file mode 100644 index 0000000000000000000000000000000000000000..4fa13fbed47013e22bd6e9c9209ca0e8a61937fb GIT binary patch literal 924 zcmZSHK6fTF1H=FS{|_(#X$D3h1_B^IBR&HN3{ilfp`n2)k_boz7-Yj389<$IF!4Xw zr2ou7^C~MUA)pyZ0mVUp$-1)A7Ag#(fP!p5@nEPCXF)WIg31;sSAPPOQCC^nURha# zPzBZd2TU`9yaECsh5>~6|NpW741Ymvu)uGyTR_DB|At@{K&AkQ4+;!c5QD)1Otv#* zHB1Mya1d>1;>l=y5Em}S(g5T+Fq{CAPymyHMgju^cf;pzFiCQ09+EUNp{PdXfzrod w08n=@!h?x6jshk`oB)z4a7u$EegH|3Is+iS4Hm&e025X{RGOs$$Z!B+0PByNd;kCd literal 0 HcmV?d00001 diff --git a/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-61fd8e3003bf9d99f6c1e5a8488727eefd234b98 b/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-61fd8e3003bf9d99f6c1e5a8488727eefd234b98 new file mode 100644 index 0000000000000000000000000000000000000000..0f809f33f44d17fb4e2d04c100226208e407d109 GIT binary patch literal 933 zcmd;OVEFSN2tNI8V5> zA;$bin6?JSbO14QVLVit5$Y;nXnX_87=nTYNHaD-l*vOV<_rd?`5-=eKrsWw*dPj_ zB&G<8G?oD*FHbdj03yoGBN@)!T4Zv@p-1Nn_%68BTLxF@#eAJ4{9t1YjH-Gz(mO8k{xCfQJAZoJAhv2T+?1lGbD(`wJ2u U$^>R12#YBgn)-p11H*d;040QR*8l(j literal 0 HcmV?d00001 diff --git a/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-dbb8172d3984e7e7d03f4b5f8bb86ecd1460eff9 b/vendor/regex-automata/tests/fuzz/testdata/deserialize_sparse_crash-dbb8172d3984e7e7d03f4b5f8bb86ecd1460eff9 new file mode 100644 index 0000000000000000000000000000000000000000..aa72eb1dd6123880d4b062ec55f9adc2c0d2539d GIT binary patch literal 728 zcmZQ-V5qG8_y2!=<$opy1_nz91_K}k1pogV{)d4K6fk2t1Y}ggKy$lFTU!l`2hop2 zsvxm(Fd=#;fGI`>1_21a31(y^NT$8*KWhS11}*_}BAhmWvyd3?;cQVLZ~ze?AR&tE zoLDHw2uQPl*bX291f(Gpgk)-9Fl1oRgbD#2%b{_y2zY8Pa!; literal 0 HcmV?d00001 diff --git a/vendor/regex-automata/tests/gen/README.md b/vendor/regex-automata/tests/gen/README.md new file mode 100644 index 00000000000000..4b7ac1bc90f164 --- /dev/null +++ b/vendor/regex-automata/tests/gen/README.md @@ -0,0 +1,65 @@ +This directory contains tests for serialized objects from the regex-automata +crate. Currently, there are only two supported such objects: dense and sparse +DFAs. + +The idea behind these tests is to commit some serialized objects and run some +basic tests by deserializing them and running searches and ensuring they are +correct. We also make sure these are run under Miri, since deserialization is +one of the biggest places where undefined behavior might occur in this crate +(at the time of writing). + +The main thing we're testing is that the *current* code can still deserialize +*old* objects correctly. Generally speaking, compatibility extends to semver +compatible releases of this crate. Beyond that, no promises are made, although +in practice callers can at least depend on errors occurring. (The serialized +format always includes a version number, and incompatible changes increment +that version number such that an error will occur if an unsupported version is +detected.) + +To generate the dense DFAs, I used this command: + +``` +$ regex-cli generate serialize dense regex \ + MULTI_PATTERN_V2 \ + tests/gen/dense/ \ + --rustfmt \ + --safe \ + --starts-for-each-pattern \ + --specialize-start-states \ + --start-kind both \ + --unicode-word-boundary \ + --minimize \ + '\b[a-zA-Z]+\b' \ + '(?m)^\S+$' \ + '(?Rm)^\S+$' +``` + +And to generate the sparse DFAs, I used this command, which is the same as +above, but with `s/dense/sparse/g`. + +``` +$ regex-cli generate serialize sparse regex \ + MULTI_PATTERN_V2 \ + tests/gen/sparse/ \ + --rustfmt \ + --safe \ + --starts-for-each-pattern \ + --specialize-start-states \ + --start-kind both \ + --unicode-word-boundary \ + --minimize \ + '\b[a-zA-Z]+\b' \ + '(?m)^\S+$' \ + '(?Rm)^\S+$' +``` + +The idea is to try to enable as many of the DFA's options as possible in order +to test that serialization works for all of them. + +Arguably we should increase test coverage here, but this is a start. Note +that in particular, this does not need to test that serialization and +deserialization correctly round-trips on its own. Indeed, the normal regex test +suite has a test that does a serialization round trip for every test supported +by DFAs. So that has very good coverage. What we're interested in testing here +is our compatibility promise: do DFAs generated with an older revision of the +code still deserialize correctly? diff --git a/vendor/regex-automata/tests/gen/dense/mod.rs b/vendor/regex-automata/tests/gen/dense/mod.rs new file mode 100644 index 00000000000000..b4365d4e19d2d1 --- /dev/null +++ b/vendor/regex-automata/tests/gen/dense/mod.rs @@ -0,0 +1,22 @@ +use regex_automata::{Input, Match}; + +mod multi_pattern_v2; + +#[test] +fn multi_pattern_v2() { + use multi_pattern_v2::MULTI_PATTERN_V2 as RE; + + assert_eq!(Some(Match::must(0, 0..4)), RE.find("abcd")); + assert_eq!(Some(Match::must(0, 2..6)), RE.find("@ abcd @")); + assert_eq!(Some(Match::must(1, 0..6)), RE.find("@abcd@")); + assert_eq!(Some(Match::must(0, 1..5)), RE.find("\nabcd\n")); + assert_eq!(Some(Match::must(0, 1..5)), RE.find("\nabcd wxyz\n")); + assert_eq!(Some(Match::must(1, 1..7)), RE.find("\n@abcd@\n")); + assert_eq!(Some(Match::must(2, 0..6)), RE.find("@abcd@\r\n")); + assert_eq!(Some(Match::must(1, 2..8)), RE.find("\r\n@abcd@")); + assert_eq!(Some(Match::must(2, 2..8)), RE.find("\r\n@abcd@\r\n")); + + // Fails because we have heuristic support for Unicode word boundaries + // enabled. + assert!(RE.try_search(&Input::new(b"\xFF@abcd@\xFF")).is_err()); +} diff --git a/vendor/regex-automata/tests/gen/dense/multi_pattern_v2.rs b/vendor/regex-automata/tests/gen/dense/multi_pattern_v2.rs new file mode 100644 index 00000000000000..a95fd204b5ef87 --- /dev/null +++ b/vendor/regex-automata/tests/gen/dense/multi_pattern_v2.rs @@ -0,0 +1,43 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// regex-cli generate serialize dense regex MULTI_PATTERN_V2 tests/gen/dense/ --rustfmt --safe --starts-for-each-pattern --specialize-start-states --start-kind both --unicode-word-boundary --minimize \b[a-zA-Z]+\b (?m)^\S+$ (?Rm)^\S+$ +// +// regex-cli 0.0.1 is available on crates.io. + +use regex_automata::{ + dfa::{dense::DFA, regex::Regex}, + util::{lazy::Lazy, wire::AlignAs}, +}; + +pub static MULTI_PATTERN_V2: Lazy>> = + Lazy::new(|| { + let dfafwd = { + static ALIGNED: &AlignAs<[u8], u32> = &AlignAs { + _align: [], + #[cfg(target_endian = "big")] + bytes: *include_bytes!("multi_pattern_v2_fwd.bigendian.dfa"), + #[cfg(target_endian = "little")] + bytes: *include_bytes!( + "multi_pattern_v2_fwd.littleendian.dfa" + ), + }; + DFA::from_bytes(&ALIGNED.bytes) + .expect("serialized forward DFA should be valid") + .0 + }; + let dfarev = { + static ALIGNED: &AlignAs<[u8], u32> = &AlignAs { + _align: [], + #[cfg(target_endian = "big")] + bytes: *include_bytes!("multi_pattern_v2_rev.bigendian.dfa"), + #[cfg(target_endian = "little")] + bytes: *include_bytes!( + "multi_pattern_v2_rev.littleendian.dfa" + ), + }; + DFA::from_bytes(&ALIGNED.bytes) + .expect("serialized reverse DFA should be valid") + .0 + }; + Regex::builder().build_from_dfas(dfafwd, dfarev) + }); diff --git a/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_fwd.bigendian.dfa b/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_fwd.bigendian.dfa new file mode 100644 index 0000000000000000000000000000000000000000..6d6e040c36f08157a233d1fa08614f9856fbfa87 GIT binary patch literal 11100 zcmeHN+invv5cTG=n{X%Gp+LE}g#3elB@%%rKx$KoM}L~Xj55cbadw@{N~ytJwMUvB zpYg_?V>{VR)RVK*P5w%O!N)^3+ ze=uA_Sv4^V7<=5YuBlc_kpM%@gP0Xd4%`;m{7b!D_P!Jz0W6PTR*wbfYm03DrCu)kU(+y} zAZSe;IU=ejo6O-Ib0$JK|F!WX*Est+dM~ifviCH1D#&MDeLTrEKKpF+S;)HU{^ed* z_b<8ny>Dom_d?NE#eaP*>g}%km$P2qf5|n@zmCp7>#qCnTcr>K^5fWe4teGEn67dC zk-jSa2kblMuKOR!zUtNfOAb6H-;Sw&lkU3ziaf^WUvl6P`FBkIH|ehXFU`7t$$?8t zL0l=%u~wQoas*AA0tF4SD=-K8hs8rWBBSoiaQcvE(lG{ZH!UviGH3dyhUm z>$v@Aj&+W`$DFD5zs*0+fb(yeBi(iX@?B4nKL_K#=b_#B{;7BMzxC(7Qhfd+ zoy&i!^IvH13;Q3|f^+CAE#--E{x|7d{!^X*$|w)ZOe8sQTXfz(`NroSp8td^&Vj@C zAL`$vv-zibQb4@6`lrbn{#ke3e?dNW1@<5PBh4J?T>ep03KYis!v4E4!Q2Kr|6tC? zI+uUcn*#D%kKF$;uH+iO_i#S^9>hAEf2)!}ytewM*&6G?y6gUTz4y_F{dTOGBc02? zd#_S07e!(DeJ^ZCa~tgZgE=4TT>eu%|FrG@K=HoQ8sOSk=klLw|H)E7-`fj%bE630 z{-D5$kw!T-;eG-dys!QX=^HP9S}*!q7P?NP2r+6Cok%~TJenbDR&j-t;Y zZLn(_>f?ZRKe!ONgC++PgQ1Oyz;XbGwLI#V>!?}Bz>?AwhF)%o8Mxf_Ng}YUgYaL~ Ca~hcd literal 0 HcmV?d00001 diff --git a/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_fwd.littleendian.dfa b/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_fwd.littleendian.dfa new file mode 100644 index 0000000000000000000000000000000000000000..a1f4b3da157c729f7146d7e479d5be9e99564c0d GIT binary patch literal 11100 zcmeHL*=`gu5OnT@n{Y#ba3_TQhyImFEIa{H4v9y9n!k)th$z8#(%4nBVYhu@EfA%wGEAuK})>K?dG!?3ixva-51 zf%T1XY))8;Q9E<~D-bNigRSb18>2|6 z2xvZP9|iL~n2@&xWNj2BYA~w#sJ-NP2S(&<0{sX`8Z1)n!+_dD!4MB7cuYRk$R>49%*sT7|Dc4_p2^x6ap`u#b9A+Une9f(K$5UjiZ%?Gvr_r&V|KV}PtcrYPv3!Jxp z+lN{Oi|}AV-X_rV&$B<3xbF0eD(f$8f$71#R^(n3N$XAwgm#2#{2rn|+4Zk%LAM9=eOw1o9!0pb%_Y&#Wl-(k cvGjAEjYD$`T(`gz!NiFMOkm$M*gly425W{InE(I) literal 0 HcmV?d00001 diff --git a/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_rev.bigendian.dfa b/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_rev.bigendian.dfa new file mode 100644 index 0000000000000000000000000000000000000000..74f74ec2a95568ad24c31c70bafe2721122d6e14 GIT binary patch literal 7584 zcmeHL?QRn>5cFLxDJ_&j_$~#?Hznj9{3?+M`~svlmH6pLyCN0i zy{f;0!PX+~L~J+MX~kXO-tPTY>=6$h4u``>kBKM5(}-u!ffxHP8ypg`~7nkBQ=;de2G}jx^xPurDq@iRkAKu@ov*@FBi+ZPG^zY!u!Vq>+>3d~brq7sZ6YP`p9-Yf5~ z$yL0gC-$2I^ylH`Dm7{swfysUN)GUyUqr6K`+e8wN^Vh6AbZLIIqTx@DF?50Gjn?0 zwZ$6Vxn}IQX?5dkiMhC!r3=c~h8RlUC6wnsbvak_?@SgH@I?QG3(8n((5h~7o_DM- z=UV>RZx)pES$eQH&)T9QB0LfEm4FWvW~CepZA<*`IzR5-SwQ>Z*dNog%_ec9D#i{q z)6~5(7TQpEP?EVQ<2oU7vjY^+xiD+&nfX@8w?oc3`0Q?|Gu{g^pHGqI d$#>xAm35(&!ypSn%NiQMsVj%e3S=)u&jI9TR@eXl literal 0 HcmV?d00001 diff --git a/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_rev.littleendian.dfa b/vendor/regex-automata/tests/gen/dense/multi_pattern_v2_rev.littleendian.dfa new file mode 100644 index 0000000000000000000000000000000000000000..663bdb9ead53e60f1c085c2a66b7ba12bd773aed GIT binary patch literal 7584 zcmeHL+foxj5S=7k1Vu335f#N7#{5J7N~(-LsZ|iFe4J04FB8w%ogN0V$(G4TM~hQ+ zINd#$?w;8Vfs?b-$?#d#EX|#aCrFY z^*kKCdHe3Y;=}Rr$H8FmX?Ou`|3+_4V389LEVAGtwf>DY7QHj)sz6YP1xxiG*6N)? zwqc54jP^VV)>$wh?8v2 z9vu@bv|wKEGPYcKuDO@SsIBCD10rP+goge#MpZievqbG`Jo3?j74wjJJ3)L8v z&wAxYo$|3v3|Q5Ryvw3`m$8YD8&NezZ6)U$&?9FN=>5-9Cc&7;Sx_rvz93-1R7+LR z;y(iW0s^0Zz=J8qXTF7k`4-fAk$x!%`{sQ{bx@5_*?;BZ^S{X#EVrO%y#*kArrCc~ z+8Uz<{_bOp{Wti6ofv;{kCcW7}9x@M@L3AKDnMl<62_&5UnGe=GXjgL8Lc hoc3>> = + Lazy::new(|| { + let dfafwd = { + #[cfg(target_endian = "big")] + static BYTES: &'static [u8] = + include_bytes!("multi_pattern_v2_fwd.bigendian.dfa"); + #[cfg(target_endian = "little")] + static BYTES: &'static [u8] = + include_bytes!("multi_pattern_v2_fwd.littleendian.dfa"); + DFA::from_bytes(BYTES) + .expect("serialized forward DFA should be valid") + .0 + }; + let dfarev = { + #[cfg(target_endian = "big")] + static BYTES: &'static [u8] = + include_bytes!("multi_pattern_v2_rev.bigendian.dfa"); + #[cfg(target_endian = "little")] + static BYTES: &'static [u8] = + include_bytes!("multi_pattern_v2_rev.littleendian.dfa"); + DFA::from_bytes(BYTES) + .expect("serialized reverse DFA should be valid") + .0 + }; + Regex::builder().build_from_dfas(dfafwd, dfarev) + }); diff --git a/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_fwd.bigendian.dfa b/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_fwd.bigendian.dfa new file mode 100644 index 0000000000000000000000000000000000000000..aa04f63162709f12d140017f42fdad986ecb4162 GIT binary patch literal 3476 zcmcguy>1gh5Z*iAkMEo~iTMwK1QH;@SmF_=LPtYEZ7G2Y5had9>53{9JOEEYfv6~W z2Rb@x9snhL-|WuWUf&%T5i!#G+nL?@*_oX^A08i#28a8v_TLVM$D=o|hoj+O@8xiC zbTB+T+BcxnGyH^{fr#4}Eiytu*lI=b93|;2wF}BD<(1R{Ei85`X^FJF((CmuT_#;2 zT`lO^b9koG5LuJiX^znI9wjc zG;;#(2u-OtC>I+uPM{OUak^xUCBI>0#~7l_X&k(BKTf$lLJd?QdWyxK9h?hKCCY5f zB}%7Qq*ZK+Fy}C{o=lYI-L4HP>S7CqYyo%&ha%$1^1OpWqFRy_HA{^9)`ggEMN38>mJx` zyd8}jxMM1cL`(8K0C~sDcnKBjwMOJ>(+E0DLdOjibi5jw54+|GsrN`lc;sUso;+SX zHoUG{JU(aIaw?lCD6VT6FYcgw!S3|l_^9q`Q$!`EhL=!M=p54ku3CP)tDKCjU>{75y-p|&@3&XN&THq+v}UduaPqJqu^yVIA(M|D?QGAglD ze5Qy#uO(|3vw3ID@QgGks4}3K2ex;yR>Et{4c+Pcu1!RDDw}!pzgv0n4RE};Yu-d^ z`mgTv)8e7JtE~uQWa={BZKB7Yw<2GxnWBwY8I=LWJP|3O+~*s`%uhFC<^Ket&uh$B zoe=(>M-#4U78K-m6#U$T8v^`SsG=$t|688GVe8D1eM;oN5C6De@XuH4Bli5laXJ{Q qZafE`Oypdj9#jEfQz6)uHRT00Ud2DC#r@p? literal 0 HcmV?d00001 diff --git a/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_fwd.littleendian.dfa b/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_fwd.littleendian.dfa new file mode 100644 index 0000000000000000000000000000000000000000..c27d92abe1d7bccfc30bcfdd12208ec2a27532a3 GIT binary patch literal 3476 zcmchay>1gh5XaY!?ej-sC+0f@2qZv^vBV>|A{`9{wWS0qM3guZWh$yvrqAA3ZkO>h+%V`~9bbb2@H1SF(NZi}KO( zz_~`MM;-6oq&oJPRL73BBe|8U)u=x+NbA3-+stZ0O^#2p#Pq zz%Sw~7CPFNfnzyl(L1UVGWxwXr(A<z*9)u8_jAaU>I$0T{|Y<9y?4&7&4Oa;D2R z@FZxc$)v+KxiDv*4huaioy}2*K3!2$a2l#4h7b&8f}nRR;u1CMbujHbzmm;ROvRwd zAb~+PBTx3#>i0QbQgiO19d8;;%Rtj*kNET8$eT=6_*`;2yJ55v0(@-UGO@X0I z5cG4_{2?{xF^8#_=|IARY)PK%YbHQEA-u1q&WPRbJy}~vOqad$e<*IAOgen$d^!s~ zE1kVsLiFj1I)c+sCGnmDLzy7x=dAf_YTiqcjWr!ec#tj1lSzkf;oZoqdng;e6Tna= z2>O8Zm7z;@7L`1umR_D%U{KQBOru$;{>$$*W#PZcSxO8p8^(tV%*WR!@o(9*^eMcW n@=M{d*z{nQv4y{qf{^{) literal 0 HcmV?d00001 diff --git a/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_rev.bigendian.dfa b/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_rev.bigendian.dfa new file mode 100644 index 0000000000000000000000000000000000000000..89867d30f605af4c8ac4294e76d6a9a8023cbfbf GIT binary patch literal 1920 zcmcgs$!^p@5bbVz7JIg2*h7G@1%|;tzyamLH*m!g11AKUnTSYSa^k>8pm63_IP)L; z0O7srF2^%oloL-XS9Q7Tb=P7qk5}t#xp==g$@1g%#}D~B&)&Yvv(=}3xmpQch z7C^lRiMSA07e#TWf^KgU{Q`+c+K2%#9E}?>A$E4B)9IC~#5Ll2fg3l0TYI-X_V@1) zckgZC;Nbp)HavXv`0$D1>1_6FKA%6&E?`W2Vr)@_V4S+0Ow1%WxWk!Rx2<;@bJ|6W z>vVb(Pv$;@WCkC!*d)g~9A6>VUg3ZqnPRj@xuOx`re0{vntb&6bQ<}PN0o1duvdW( zb_6lt6JGX zO$Qh~%$M|$=@fDn?4b1~^?{+bJi&cOU`e+ImhT=?c z3G*8ahoe%C<_EGrg8VLN^+8LKpqU2uZm;E5h^@{Ba&DTn#Ps}KHg0m{ZyfXP{-Msn77fX3YvPb~hBfz1*+EAzy>mHM9&YG#hSe)>3N5 z$$yv{z~>ZutLI6E^2>|EufV;8t$ymV ia4+Rs5Vh6+6Mhx=$R&;8wDDIMKu&q54$*pF)&BrJ9)}?S literal 0 HcmV?d00001 diff --git a/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_rev.littleendian.dfa b/vendor/regex-automata/tests/gen/sparse/multi_pattern_v2_rev.littleendian.dfa new file mode 100644 index 0000000000000000000000000000000000000000..c0ca807f8947442d59554983659dd07ccee99898 GIT binary patch literal 1920 zcmchXOKuZE5QclkkMY}yd4~Yu6)j-s;j!{@9L_anV!t%@pSTjauz2i^N$~rc@n>Um&CJA$#gbx?)n%8y1M$1XWj5gvX!TEJA*cv7yeCO6PF!Ww}Odk3HAUFDl8|CJKS zXN|yzuY^w#9&CHgi%r0lZ&h8Z)$7rO7fM4Q_Xu6F(XJ+^UD6t5&GPZZ0K2)}-Uq-e zhGwhP*3;Ab(y4YO^mj>7=apEHL+NGUoQ;Xbe>pOsk`4tI@BQK^V9>V z>w|1Gn%uA5>2&iu{t($%^1GnTho*Ei zaRsO>CG0|wBa6)KwFLozZc>I_NBx(5J_6m`oo6UJ^o=I-S@J8Bnws}gVt#Z{h(*3; bVr%pNl-~p&TnvU^AP><1Cwr@~dQJZW?U;ul literal 0 HcmV?d00001 diff --git a/vendor/regex-automata/tests/hybrid/api.rs b/vendor/regex-automata/tests/hybrid/api.rs new file mode 100644 index 00000000000000..4b04c4f8fd2337 --- /dev/null +++ b/vendor/regex-automata/tests/hybrid/api.rs @@ -0,0 +1,171 @@ +use std::error::Error; + +use regex_automata::{ + hybrid::dfa::{OverlappingState, DFA}, + nfa::thompson, + HalfMatch, Input, MatchError, +}; + +// Tests that too many cache resets cause the lazy DFA to quit. +// +// We only test this on 64-bit because the test is gingerly crafted based on +// implementation details of cache sizes. It's not a great test because of +// that, but it does check some interesting properties around how positions are +// reported when a search "gives up." +// +// NOTE: If you change something in lazy DFA implementation that causes this +// test to fail by reporting different "gave up" positions, then it's generally +// okay to update the positions in the test below as long as you're sure your +// changes are correct. Namely, it is expected that if there are changes in the +// cache size (or changes in how big things are inside the cache), then its +// utilization may change slightly and thus impact where a search gives up. +// Precisely where a search gives up is not an API guarantee, so changing the +// offsets here is OK. +#[test] +#[cfg(target_pointer_width = "64")] +#[cfg(not(miri))] +fn too_many_cache_resets_cause_quit() -> Result<(), Box> { + // This is a carefully chosen regex. The idea is to pick one that requires + // some decent number of states (hence the bounded repetition). But we + // specifically choose to create a class with an ASCII letter and a + // non-ASCII letter so that we can check that no new states are created + // once the cache is full. Namely, if we fill up the cache on a haystack + // of 'a's, then in order to match one 'β', a new state will need to be + // created since a 'β' is encoded with multiple bytes. + // + // So we proceed by "filling" up the cache by searching a haystack of just + // 'a's. The cache won't have enough room to add enough states to find the + // match (because of the bounded repetition), which should result in it + // giving up before it finds a match. + // + // Since there's now no more room to create states, we search a haystack + // of 'β' and confirm that it gives up immediately. + let pattern = r"[aβ]{99}"; + let dfa = DFA::builder() + .configure( + // Configure it so that we have the minimum cache capacity + // possible. And that if any resets occur, the search quits. + DFA::config() + .skip_cache_capacity_check(true) + .cache_capacity(0) + .minimum_cache_clear_count(Some(0)), + ) + .thompson(thompson::NFA::config()) + .build(pattern)?; + let mut cache = dfa.create_cache(); + + let haystack = "a".repeat(101).into_bytes(); + let err = MatchError::gave_up(24); + // Notice that we make the same amount of progress in each search! That's + // because the cache is reused and already has states to handle the first + // N bytes. + assert_eq!( + Err(err.clone()), + dfa.try_search_fwd(&mut cache, &Input::new(&haystack)) + ); + assert_eq!( + Err(err.clone()), + dfa.try_search_overlapping_fwd( + &mut cache, + &Input::new(&haystack), + &mut OverlappingState::start() + ), + ); + + let haystack = "β".repeat(101).into_bytes(); + let err = MatchError::gave_up(2); + assert_eq!( + Err(err), + dfa.try_search_fwd(&mut cache, &Input::new(&haystack)) + ); + // no need to test that other find routines quit, since we did that above + + // OK, if we reset the cache, then we should be able to create more states + // and make more progress with searching for betas. + cache.reset(&dfa); + let err = MatchError::gave_up(26); + assert_eq!( + Err(err), + dfa.try_search_fwd(&mut cache, &Input::new(&haystack)) + ); + + // ... switching back to ASCII still makes progress since it just needs to + // set transitions on existing states! + let haystack = "a".repeat(101).into_bytes(); + let err = MatchError::gave_up(13); + assert_eq!( + Err(err), + dfa.try_search_fwd(&mut cache, &Input::new(&haystack)) + ); + + Ok(()) +} + +// Tests that quit bytes in the forward direction work correctly. +#[test] +fn quit_fwd() -> Result<(), Box> { + let dfa = DFA::builder() + .configure(DFA::config().quit(b'x', true)) + .build("[[:word:]]+$")?; + let mut cache = dfa.create_cache(); + + assert_eq!( + dfa.try_search_fwd(&mut cache, &Input::new("abcxyz")), + Err(MatchError::quit(b'x', 3)), + ); + assert_eq!( + dfa.try_search_overlapping_fwd( + &mut cache, + &Input::new(b"abcxyz"), + &mut OverlappingState::start() + ), + Err(MatchError::quit(b'x', 3)), + ); + + Ok(()) +} + +// Tests that quit bytes in the reverse direction work correctly. +#[test] +fn quit_rev() -> Result<(), Box> { + let dfa = DFA::builder() + .configure(DFA::config().quit(b'x', true)) + .thompson(thompson::Config::new().reverse(true)) + .build("^[[:word:]]+")?; + let mut cache = dfa.create_cache(); + + assert_eq!( + dfa.try_search_rev(&mut cache, &Input::new("abcxyz")), + Err(MatchError::quit(b'x', 3)), + ); + + Ok(()) +} + +// Tests that if we heuristically enable Unicode word boundaries but then +// instruct that a non-ASCII byte should NOT be a quit byte, then the builder +// will panic. +#[test] +#[should_panic] +fn quit_panics() { + DFA::config().unicode_word_boundary(true).quit(b'\xFF', false); +} + +// This tests an intesting case where even if the Unicode word boundary option +// is disabled, setting all non-ASCII bytes to be quit bytes will cause Unicode +// word boundaries to be enabled. +#[test] +fn unicode_word_implicitly_works() -> Result<(), Box> { + let mut config = DFA::config(); + for b in 0x80..=0xFF { + config = config.quit(b, true); + } + let dfa = DFA::builder().configure(config).build(r"\b")?; + let mut cache = dfa.create_cache(); + let expected = HalfMatch::must(0, 1); + assert_eq!( + Ok(Some(expected)), + dfa.try_search_fwd(&mut cache, &Input::new(" a")), + ); + Ok(()) +} diff --git a/vendor/regex-automata/tests/hybrid/mod.rs b/vendor/regex-automata/tests/hybrid/mod.rs new file mode 100644 index 00000000000000..36667d09ccc37b --- /dev/null +++ b/vendor/regex-automata/tests/hybrid/mod.rs @@ -0,0 +1,3 @@ +mod api; +#[cfg(not(miri))] +mod suite; diff --git a/vendor/regex-automata/tests/hybrid/suite.rs b/vendor/regex-automata/tests/hybrid/suite.rs new file mode 100644 index 00000000000000..f0c3ebdbcaba7c --- /dev/null +++ b/vendor/regex-automata/tests/hybrid/suite.rs @@ -0,0 +1,347 @@ +use { + anyhow::Result, + regex_automata::{ + hybrid::{ + dfa::{OverlappingState, DFA}, + regex::{self, Regex}, + }, + nfa::thompson, + util::{prefilter::Prefilter, syntax}, + Anchored, Input, PatternSet, + }, + regex_test::{ + CompiledRegex, Match, RegexTest, SearchKind, Span, TestResult, + TestRunner, + }, +}; + +use crate::{create_input, suite, untestify_kind}; + +const EXPANSIONS: &[&str] = &["is_match", "find", "which"]; + +/// Tests the default configuration of the hybrid NFA/DFA. +#[test] +fn default() -> Result<()> { + let builder = Regex::builder(); + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + // Without NFA shrinking, this test blows the default cache capacity. + .blacklist("expensive/regression-many-repeat-no-stack-overflow") + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +/// Tests the hybrid NFA/DFA with prefilters enabled. +#[test] +fn prefilter() -> Result<()> { + let my_compiler = |test: &RegexTest, regexes: &[String]| { + // Parse regexes as HIRs so we can get literals to build a prefilter. + let mut hirs = vec![]; + for pattern in regexes.iter() { + hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); + } + let kind = match untestify_kind(test.match_kind()) { + None => return Ok(CompiledRegex::skip()), + Some(kind) => kind, + }; + let pre = Prefilter::from_hirs_prefix(kind, &hirs); + let mut builder = Regex::builder(); + builder.dfa(DFA::config().prefilter(pre)); + compiler(builder)(test, regexes) + }; + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + // Without NFA shrinking, this test blows the default cache capacity. + .blacklist("expensive/regression-many-repeat-no-stack-overflow") + .test_iter(suite()?.iter(), my_compiler) + .assert(); + Ok(()) +} + +/// Tests the hybrid NFA/DFA with NFA shrinking enabled. +/// +/// This is *usually* not the configuration one wants for a lazy DFA. NFA +/// shrinking is mostly only advantageous when building a full DFA since it +/// can sharply decrease the amount of time determinization takes. But NFA +/// shrinking is itself otherwise fairly expensive currently. Since a lazy DFA +/// has no compilation time (other than for building the NFA of course) before +/// executing a search, it's usually worth it to forgo NFA shrinking. +/// +/// Nevertheless, we test to make sure everything is OK with NFA shrinking. As +/// a bonus, there are some tests we don't need to skip because they now fit in +/// the default cache capacity. +#[test] +fn nfa_shrink() -> Result<()> { + let mut builder = Regex::builder(); + builder.thompson(thompson::Config::new().shrink(true)); + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +/// Tests the hybrid NFA/DFA when 'starts_for_each_pattern' is enabled for all +/// tests. +#[test] +fn starts_for_each_pattern() -> Result<()> { + let mut builder = Regex::builder(); + builder.dfa(DFA::config().starts_for_each_pattern(true)); + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + // Without NFA shrinking, this test blows the default cache capacity. + .blacklist("expensive/regression-many-repeat-no-stack-overflow") + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +/// Tests the hybrid NFA/DFA when 'specialize_start_states' is enabled. +#[test] +fn specialize_start_states() -> Result<()> { + let mut builder = Regex::builder(); + builder.dfa(DFA::config().specialize_start_states(true)); + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + // Without NFA shrinking, this test blows the default cache capacity. + .blacklist("expensive/regression-many-repeat-no-stack-overflow") + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +/// Tests the hybrid NFA/DFA when byte classes are disabled. +/// +/// N.B. Disabling byte classes doesn't avoid any indirection at search time. +/// All it does is cause every byte value to be its own distinct equivalence +/// class. +#[test] +fn no_byte_classes() -> Result<()> { + let mut builder = Regex::builder(); + builder.dfa(DFA::config().byte_classes(false)); + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + // Without NFA shrinking, this test blows the default cache capacity. + .blacklist("expensive/regression-many-repeat-no-stack-overflow") + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +/// Tests that hybrid NFA/DFA never clears its cache for any test with the +/// default capacity. +/// +/// N.B. If a regex suite test is added that causes the cache to be cleared, +/// then this should just skip that test. (Which can be done by calling the +/// 'blacklist' method on 'TestRunner'.) +#[test] +fn no_cache_clearing() -> Result<()> { + let mut builder = Regex::builder(); + builder.dfa(DFA::config().minimum_cache_clear_count(Some(0))); + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + // Without NFA shrinking, this test blows the default cache capacity. + .blacklist("expensive/regression-many-repeat-no-stack-overflow") + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +/// Tests the hybrid NFA/DFA when the minimum cache capacity is set. +#[test] +fn min_cache_capacity() -> Result<()> { + let mut builder = Regex::builder(); + builder + .dfa(DFA::config().cache_capacity(0).skip_cache_capacity_check(true)); + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +fn compiler( + mut builder: regex::Builder, +) -> impl FnMut(&RegexTest, &[String]) -> Result { + move |test, regexes| { + // Parse regexes as HIRs for some analysis below. + let mut hirs = vec![]; + for pattern in regexes.iter() { + hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); + } + + // Check if our regex contains things that aren't supported by DFAs. + // That is, Unicode word boundaries when searching non-ASCII text. + if !test.haystack().is_ascii() { + for hir in hirs.iter() { + if hir.properties().look_set().contains_word_unicode() { + return Ok(CompiledRegex::skip()); + } + } + } + if !configure_regex_builder(test, &mut builder) { + return Ok(CompiledRegex::skip()); + } + let re = builder.build_many(®exes)?; + let mut cache = re.create_cache(); + Ok(CompiledRegex::compiled(move |test| -> TestResult { + run_test(&re, &mut cache, test) + })) + } +} + +fn run_test( + re: &Regex, + cache: &mut regex::Cache, + test: &RegexTest, +) -> TestResult { + let input = create_input(test); + match test.additional_name() { + "is_match" => { + TestResult::matched(re.is_match(cache, input.earliest(true))) + } + "find" => match test.search_kind() { + SearchKind::Earliest | SearchKind::Leftmost => { + let input = + input.earliest(test.search_kind() == SearchKind::Earliest); + TestResult::matches( + re.find_iter(cache, input) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|m| Match { + id: m.pattern().as_usize(), + span: Span { start: m.start(), end: m.end() }, + }), + ) + } + SearchKind::Overlapping => { + try_search_overlapping(re, cache, &input).unwrap() + } + }, + "which" => match test.search_kind() { + SearchKind::Earliest | SearchKind::Leftmost => { + // There are no "which" APIs for standard searches. + TestResult::skip() + } + SearchKind::Overlapping => { + let dfa = re.forward(); + let cache = cache.as_parts_mut().0; + let mut patset = PatternSet::new(dfa.pattern_len()); + dfa.try_which_overlapping_matches(cache, &input, &mut patset) + .unwrap(); + TestResult::which(patset.iter().map(|p| p.as_usize())) + } + }, + name => TestResult::fail(&format!("unrecognized test name: {name}")), + } +} + +/// Configures the given regex builder with all relevant settings on the given +/// regex test. +/// +/// If the regex test has a setting that is unsupported, then this returns +/// false (implying the test should be skipped). +fn configure_regex_builder( + test: &RegexTest, + builder: &mut regex::Builder, +) -> bool { + let match_kind = match untestify_kind(test.match_kind()) { + None => return false, + Some(k) => k, + }; + + let mut dfa_config = + DFA::config().match_kind(match_kind).unicode_word_boundary(true); + // When doing an overlapping search, we might try to find the start of each + // match with a custom search routine. In that case, we need to tell the + // reverse search (for the start offset) which pattern to look for. The + // only way that API works is when anchored starting states are compiled + // for each pattern. This does technically also enable it for the forward + // DFA, but we're okay with that. + if test.search_kind() == SearchKind::Overlapping { + dfa_config = dfa_config.starts_for_each_pattern(true); + } + builder + .syntax(config_syntax(test)) + .thompson(config_thompson(test)) + .dfa(dfa_config); + true +} + +/// Configuration of a Thompson NFA compiler from a regex test. +fn config_thompson(test: &RegexTest) -> thompson::Config { + let mut lookm = regex_automata::util::look::LookMatcher::new(); + lookm.set_line_terminator(test.line_terminator()); + thompson::Config::new().utf8(test.utf8()).look_matcher(lookm) +} + +/// Configuration of the regex parser from a regex test. +fn config_syntax(test: &RegexTest) -> syntax::Config { + syntax::Config::new() + .case_insensitive(test.case_insensitive()) + .unicode(test.unicode()) + .utf8(test.utf8()) + .line_terminator(test.line_terminator()) +} + +/// Execute an overlapping search, and for each match found, also find its +/// overlapping starting positions. +/// +/// N.B. This routine used to be part of the crate API, but 1) it wasn't clear +/// to me how useful it was and 2) it wasn't clear to me what its semantics +/// should be. In particular, a potentially surprising footgun of this routine +/// that it is worst case *quadratic* in the size of the haystack. Namely, it's +/// possible to report a match at every position, and for every such position, +/// scan all the way to the beginning of the haystack to find the starting +/// position. Typical leftmost non-overlapping searches don't suffer from this +/// because, well, matches can't overlap. So subsequent searches after a match +/// is found don't revisit previously scanned parts of the haystack. +/// +/// Its semantics can be strange for other reasons too. For example, given +/// the regex '.*' and the haystack 'zz', the full set of overlapping matches +/// is: [0, 0], [1, 1], [0, 1], [2, 2], [1, 2], [0, 2]. The ordering of +/// those matches is quite strange, but makes sense when you think about the +/// implementation: an end offset is found left-to-right, and then one or more +/// starting offsets are found right-to-left. +/// +/// Nevertheless, we provide this routine in our test suite because it's +/// useful to test the low level DFA overlapping search and our test suite +/// is written in a way that requires starting offsets. +fn try_search_overlapping( + re: &Regex, + cache: &mut regex::Cache, + input: &Input<'_>, +) -> Result { + let mut matches = vec![]; + let mut fwd_state = OverlappingState::start(); + let (fwd_dfa, rev_dfa) = (re.forward(), re.reverse()); + let (fwd_cache, rev_cache) = cache.as_parts_mut(); + while let Some(end) = { + fwd_dfa.try_search_overlapping_fwd( + fwd_cache, + input, + &mut fwd_state, + )?; + fwd_state.get_match() + } { + let revsearch = input + .clone() + .range(input.start()..end.offset()) + .anchored(Anchored::Pattern(end.pattern())) + .earliest(false); + let mut rev_state = OverlappingState::start(); + while let Some(start) = { + rev_dfa.try_search_overlapping_rev( + rev_cache, + &revsearch, + &mut rev_state, + )?; + rev_state.get_match() + } { + let span = Span { start: start.offset(), end: end.offset() }; + let mat = Match { id: end.pattern().as_usize(), span }; + matches.push(mat); + } + } + Ok(TestResult::matches(matches)) +} diff --git a/vendor/regex-automata/tests/lib.rs b/vendor/regex-automata/tests/lib.rs new file mode 100644 index 00000000000000..67c979aa8dc7c7 --- /dev/null +++ b/vendor/regex-automata/tests/lib.rs @@ -0,0 +1,115 @@ +// We have a similar config in the regex-automata crate root. Basically, it is +// just too annoying to deal with dead code when a subset of features is +// enabled. +#![cfg_attr( + not(all( + feature = "std", + feature = "nfa", + feature = "dfa", + feature = "hybrid", + feature = "perf-literal-substring", + feature = "perf-literal-multisubstring", + )), + allow(dead_code, unused_imports, unused_variables) +)] +// Similar deal with Miri. Just let dead code warnings be. +#![cfg_attr(miri, allow(dead_code, unused_imports, unused_variables))] + +#[cfg(any(feature = "dfa-search", feature = "dfa-onepass"))] +mod dfa; +#[cfg(feature = "dfa-search")] +mod fuzz; +#[cfg(feature = "dfa-search")] +mod gen; +#[cfg(feature = "hybrid")] +mod hybrid; +#[cfg(feature = "meta")] +mod meta; +#[cfg(any(feature = "nfa-backtrack", feature = "nfa-pikevm"))] +mod nfa; + +fn suite() -> anyhow::Result { + let _ = env_logger::try_init(); + + let mut tests = regex_test::RegexTests::new(); + macro_rules! load { + ($name:expr) => {{ + const DATA: &[u8] = + include_bytes!(concat!("../../testdata/", $name, ".toml")); + tests.load_slice($name, DATA)?; + }}; + } + + load!("anchored"); + load!("bytes"); + load!("crazy"); + load!("crlf"); + load!("earliest"); + load!("empty"); + load!("expensive"); + load!("flags"); + load!("iter"); + load!("leftmost-all"); + load!("line-terminator"); + load!("misc"); + load!("multiline"); + load!("no-unicode"); + load!("overlapping"); + load!("regression"); + load!("set"); + load!("substring"); + load!("unicode"); + load!("utf8"); + load!("word-boundary"); + load!("word-boundary-special"); + load!("fowler/basic"); + load!("fowler/nullsubexpr"); + load!("fowler/repetition"); + + Ok(tests) +} + +/// Configure a regex_automata::Input with the given test configuration. +fn create_input<'h>( + test: &'h regex_test::RegexTest, +) -> regex_automata::Input<'h> { + use regex_automata::Anchored; + + let bounds = test.bounds(); + let anchored = if test.anchored() { Anchored::Yes } else { Anchored::No }; + regex_automata::Input::new(test.haystack()) + .range(bounds.start..bounds.end) + .anchored(anchored) +} + +/// Convert capture matches into the test suite's capture values. +/// +/// The given captures must represent a valid match, where the first capturing +/// group has a non-None span. Otherwise this panics. +fn testify_captures( + caps: ®ex_automata::util::captures::Captures, +) -> regex_test::Captures { + assert!(caps.is_match(), "expected captures to represent a match"); + let spans = caps.iter().map(|group| { + group.map(|m| regex_test::Span { start: m.start, end: m.end }) + }); + // These unwraps are OK because we assume our 'caps' represents a match, + // and a match always gives a non-zero number of groups with the first + // group being non-None. + regex_test::Captures::new(caps.pattern().unwrap().as_usize(), spans) + .unwrap() +} + +/// Convert a test harness match kind to a regex-automata match kind. If +/// regex-automata doesn't support the harness kind, then `None` is returned. +fn untestify_kind( + kind: regex_test::MatchKind, +) -> Option { + match kind { + regex_test::MatchKind::All => Some(regex_automata::MatchKind::All), + regex_test::MatchKind::LeftmostFirst => { + Some(regex_automata::MatchKind::LeftmostFirst) + } + regex_test::MatchKind::LeftmostLongest => None, + } +} diff --git a/vendor/regex-automata/tests/meta/mod.rs b/vendor/regex-automata/tests/meta/mod.rs new file mode 100644 index 00000000000000..9d6ab475efef12 --- /dev/null +++ b/vendor/regex-automata/tests/meta/mod.rs @@ -0,0 +1,2 @@ +#[cfg(not(miri))] +mod suite; diff --git a/vendor/regex-automata/tests/meta/suite.rs b/vendor/regex-automata/tests/meta/suite.rs new file mode 100644 index 00000000000000..2c3de64fb95663 --- /dev/null +++ b/vendor/regex-automata/tests/meta/suite.rs @@ -0,0 +1,200 @@ +use { + anyhow::Result, + regex_automata::{ + meta::{self, Regex}, + util::syntax, + MatchKind, PatternSet, + }, + regex_test::{ + CompiledRegex, Match, RegexTest, SearchKind, Span, TestResult, + TestRunner, + }, +}; + +use crate::{create_input, suite, testify_captures}; + +const BLACKLIST: &[&str] = &[ + // These 'earliest' tests are blacklisted because the meta searcher doesn't + // give the same offsets that the test expects. This is legal because the + // 'earliest' routines don't guarantee a particular match offset other + // than "the earliest the regex engine can report a match." Some regex + // engines will quit earlier than others. The backtracker, for example, + // can't really quit before finding the full leftmost-first match. Many of + // the literal searchers also don't have the ability to quit fully or it's + // otherwise not worth doing. (A literal searcher not quitting as early as + // possible usually means looking at a few more bytes. That's no biggie.) + "earliest/", +]; + +/// Tests the default configuration of the meta regex engine. +#[test] +fn default() -> Result<()> { + let builder = Regex::builder(); + let mut runner = TestRunner::new()?; + runner + .expand(&["is_match", "find", "captures"], |test| test.compiles()) + .blacklist_iter(BLACKLIST) + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +/// Tests the default configuration minus the full DFA. +#[test] +fn no_dfa() -> Result<()> { + let mut builder = Regex::builder(); + builder.configure(Regex::config().dfa(false)); + let mut runner = TestRunner::new()?; + runner + .expand(&["is_match", "find", "captures"], |test| test.compiles()) + .blacklist_iter(BLACKLIST) + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +/// Tests the default configuration minus the full DFA and lazy DFA. +#[test] +fn no_dfa_hybrid() -> Result<()> { + let mut builder = Regex::builder(); + builder.configure(Regex::config().dfa(false).hybrid(false)); + let mut runner = TestRunner::new()?; + runner + .expand(&["is_match", "find", "captures"], |test| test.compiles()) + .blacklist_iter(BLACKLIST) + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +/// Tests the default configuration minus the full DFA, lazy DFA and one-pass +/// DFA. +#[test] +fn no_dfa_hybrid_onepass() -> Result<()> { + let mut builder = Regex::builder(); + builder.configure(Regex::config().dfa(false).hybrid(false).onepass(false)); + let mut runner = TestRunner::new()?; + runner + .expand(&["is_match", "find", "captures"], |test| test.compiles()) + .blacklist_iter(BLACKLIST) + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +/// Tests the default configuration minus the full DFA, lazy DFA, one-pass +/// DFA and backtracker. +#[test] +fn no_dfa_hybrid_onepass_backtrack() -> Result<()> { + let mut builder = Regex::builder(); + builder.configure( + Regex::config() + .dfa(false) + .hybrid(false) + .onepass(false) + .backtrack(false), + ); + let mut runner = TestRunner::new()?; + runner + .expand(&["is_match", "find", "captures"], |test| test.compiles()) + .blacklist_iter(BLACKLIST) + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +fn compiler( + mut builder: meta::Builder, +) -> impl FnMut(&RegexTest, &[String]) -> Result { + move |test, regexes| { + if !configure_meta_builder(test, &mut builder) { + return Ok(CompiledRegex::skip()); + } + let re = builder.build_many(®exes)?; + Ok(CompiledRegex::compiled(move |test| -> TestResult { + run_test(&re, test) + })) + } +} + +fn run_test(re: &Regex, test: &RegexTest) -> TestResult { + let input = create_input(test); + match test.additional_name() { + "is_match" => TestResult::matched(re.is_match(input)), + "find" => match test.search_kind() { + SearchKind::Earliest => TestResult::matches( + re.find_iter(input.earliest(true)) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|m| Match { + id: m.pattern().as_usize(), + span: Span { start: m.start(), end: m.end() }, + }), + ), + SearchKind::Leftmost => TestResult::matches( + re.find_iter(input) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|m| Match { + id: m.pattern().as_usize(), + span: Span { start: m.start(), end: m.end() }, + }), + ), + SearchKind::Overlapping => { + let mut patset = PatternSet::new(re.pattern_len()); + re.which_overlapping_matches(&input, &mut patset); + TestResult::which(patset.iter().map(|p| p.as_usize())) + } + }, + "captures" => match test.search_kind() { + SearchKind::Earliest => { + let it = re + .captures_iter(input.earliest(true)) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|caps| testify_captures(&caps)); + TestResult::captures(it) + } + SearchKind::Leftmost => { + let it = re + .captures_iter(input) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|caps| testify_captures(&caps)); + TestResult::captures(it) + } + SearchKind::Overlapping => { + // There is no overlapping regex API that supports captures. + TestResult::skip() + } + }, + name => TestResult::fail(&format!("unrecognized test name: {name}")), + } +} + +/// Configures the given regex builder with all relevant settings on the given +/// regex test. +/// +/// If the regex test has a setting that is unsupported, then this returns +/// false (implying the test should be skipped). +fn configure_meta_builder( + test: &RegexTest, + builder: &mut meta::Builder, +) -> bool { + let match_kind = match test.match_kind() { + regex_test::MatchKind::All => MatchKind::All, + regex_test::MatchKind::LeftmostFirst => MatchKind::LeftmostFirst, + regex_test::MatchKind::LeftmostLongest => return false, + }; + let meta_config = Regex::config() + .match_kind(match_kind) + .utf8_empty(test.utf8()) + .line_terminator(test.line_terminator()); + builder.configure(meta_config).syntax(config_syntax(test)); + true +} + +/// Configuration of the regex parser from a regex test. +fn config_syntax(test: &RegexTest) -> syntax::Config { + syntax::Config::new() + .case_insensitive(test.case_insensitive()) + .unicode(test.unicode()) + .utf8(test.utf8()) + .line_terminator(test.line_terminator()) +} diff --git a/vendor/regex-automata/tests/nfa/mod.rs b/vendor/regex-automata/tests/nfa/mod.rs new file mode 100644 index 00000000000000..32686214737f4d --- /dev/null +++ b/vendor/regex-automata/tests/nfa/mod.rs @@ -0,0 +1 @@ +mod thompson; diff --git a/vendor/regex-automata/tests/nfa/thompson/backtrack/mod.rs b/vendor/regex-automata/tests/nfa/thompson/backtrack/mod.rs new file mode 100644 index 00000000000000..9d6ab475efef12 --- /dev/null +++ b/vendor/regex-automata/tests/nfa/thompson/backtrack/mod.rs @@ -0,0 +1,2 @@ +#[cfg(not(miri))] +mod suite; diff --git a/vendor/regex-automata/tests/nfa/thompson/backtrack/suite.rs b/vendor/regex-automata/tests/nfa/thompson/backtrack/suite.rs new file mode 100644 index 00000000000000..c6f3b9f1fc054c --- /dev/null +++ b/vendor/regex-automata/tests/nfa/thompson/backtrack/suite.rs @@ -0,0 +1,213 @@ +use { + anyhow::Result, + regex_automata::{ + nfa::thompson::{ + self, + backtrack::{self, BoundedBacktracker}, + NFA, + }, + util::{prefilter::Prefilter, syntax}, + Input, + }, + regex_test::{ + CompiledRegex, Match, MatchKind, RegexTest, SearchKind, Span, + TestResult, TestRunner, + }, +}; + +use crate::{create_input, suite, testify_captures}; + +/// Tests the default configuration of the bounded backtracker. +#[test] +fn default() -> Result<()> { + let builder = BoundedBacktracker::builder(); + let mut runner = TestRunner::new()?; + runner.expand(&["is_match", "find", "captures"], |test| test.compiles()); + // At the time of writing, every regex search in the test suite fits + // into the backtracker's default visited capacity (except for the + // blacklisted tests below). If regexes are added that blow that capacity, + // then they should be blacklisted here. A tempting alternative is to + // automatically skip them by checking the haystack length against + // BoundedBacktracker::max_haystack_len, but that could wind up hiding + // interesting failure modes. e.g., If the visited capacity is somehow + // wrong or smaller than it should be. + runner.blacklist("expensive/backtrack-blow-visited-capacity"); + runner.test_iter(suite()?.iter(), compiler(builder)).assert(); + Ok(()) +} + +/// Tests the backtracker with prefilters enabled. +#[test] +fn prefilter() -> Result<()> { + let my_compiler = |test: &RegexTest, regexes: &[String]| { + // Parse regexes as HIRs so we can get literals to build a prefilter. + let mut hirs = vec![]; + for pattern in regexes.iter() { + hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); + } + // We can always select leftmost-first here because the backtracker + // only supports leftmost-first matching. + let pre = Prefilter::from_hirs_prefix( + regex_automata::MatchKind::LeftmostFirst, + &hirs, + ); + let mut builder = BoundedBacktracker::builder(); + builder.configure(BoundedBacktracker::config().prefilter(pre)); + compiler(builder)(test, regexes) + }; + let mut runner = TestRunner::new()?; + runner.expand(&["is_match", "find", "captures"], |test| test.compiles()); + runner.blacklist("expensive/backtrack-blow-visited-capacity"); + runner.test_iter(suite()?.iter(), my_compiler).assert(); + Ok(()) +} + +/// Tests the bounded backtracker when its visited capacity is set to its +/// minimum amount. +#[test] +fn min_visited_capacity() -> Result<()> { + let mut runner = TestRunner::new()?; + runner.expand(&["is_match", "find", "captures"], |test| test.compiles()); + runner + .test_iter(suite()?.iter(), move |test, regexes| { + let nfa = NFA::compiler() + .configure(config_thompson(test)) + .syntax(config_syntax(test)) + .build_many(®exes)?; + let mut builder = BoundedBacktracker::builder(); + if !configure_backtrack_builder(test, &mut builder) { + return Ok(CompiledRegex::skip()); + } + // Setup the bounded backtracker so that its visited capacity is + // the absolute minimum required for the test's haystack. + builder.configure(BoundedBacktracker::config().visited_capacity( + backtrack::min_visited_capacity( + &nfa, + &Input::new(test.haystack()), + ), + )); + + let re = builder.build_from_nfa(nfa)?; + let mut cache = re.create_cache(); + Ok(CompiledRegex::compiled(move |test| -> TestResult { + run_test(&re, &mut cache, test) + })) + }) + .assert(); + Ok(()) +} + +fn compiler( + mut builder: backtrack::Builder, +) -> impl FnMut(&RegexTest, &[String]) -> Result { + move |test, regexes| { + if !configure_backtrack_builder(test, &mut builder) { + return Ok(CompiledRegex::skip()); + } + let re = builder.build_many(®exes)?; + let mut cache = re.create_cache(); + Ok(CompiledRegex::compiled(move |test| -> TestResult { + run_test(&re, &mut cache, test) + })) + } +} + +fn run_test( + re: &BoundedBacktracker, + cache: &mut backtrack::Cache, + test: &RegexTest, +) -> TestResult { + let input = create_input(test); + match test.additional_name() { + "is_match" => match test.search_kind() { + SearchKind::Earliest | SearchKind::Overlapping => { + TestResult::skip() + } + SearchKind::Leftmost => { + let input = input.earliest(true); + TestResult::matched(re.try_is_match(cache, input).unwrap()) + } + }, + "find" => match test.search_kind() { + SearchKind::Earliest | SearchKind::Overlapping => { + TestResult::skip() + } + SearchKind::Leftmost => TestResult::matches( + re.try_find_iter(cache, input) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|result| result.unwrap()) + .map(|m| Match { + id: m.pattern().as_usize(), + span: Span { start: m.start(), end: m.end() }, + }), + ), + }, + "captures" => match test.search_kind() { + SearchKind::Earliest | SearchKind::Overlapping => { + TestResult::skip() + } + SearchKind::Leftmost => TestResult::captures( + re.try_captures_iter(cache, input) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|result| result.unwrap()) + .map(|caps| testify_captures(&caps)), + ), + }, + name => TestResult::fail(&format!("unrecognized test name: {name}")), + } +} + +/// Configures the given regex builder with all relevant settings on the given +/// regex test. +/// +/// If the regex test has a setting that is unsupported, then this returns +/// false (implying the test should be skipped). +fn configure_backtrack_builder( + test: &RegexTest, + builder: &mut backtrack::Builder, +) -> bool { + match (test.search_kind(), test.match_kind()) { + // For testing the standard search APIs. This is the only supported + // configuration for the backtracker. + (SearchKind::Leftmost, MatchKind::LeftmostFirst) => {} + // Overlapping APIs not supported at all for backtracker. + (SearchKind::Overlapping, _) => return false, + // Backtracking doesn't really support the notion of 'earliest'. + // Namely, backtracking already works by returning as soon as it knows + // it has found a match. It just so happens that this corresponds to + // the standard 'leftmost' formulation. + // + // The 'earliest' definition in this crate does indeed permit this + // behavior, so this is "fine," but our test suite specifically looks + // for the earliest position at which a match is known, which our + // finite automata based regex engines have no problem providing. So + // for backtracking, we just skip these tests. + (SearchKind::Earliest, _) => return false, + // For backtracking, 'all' semantics don't really make sense. + (_, MatchKind::All) => return false, + // Not supported at all in regex-automata. + (_, MatchKind::LeftmostLongest) => return false, + }; + let backtrack_config = BoundedBacktracker::config(); + builder + .configure(backtrack_config) + .syntax(config_syntax(test)) + .thompson(config_thompson(test)); + true +} + +/// Configuration of a Thompson NFA compiler from a regex test. +fn config_thompson(test: &RegexTest) -> thompson::Config { + let mut lookm = regex_automata::util::look::LookMatcher::new(); + lookm.set_line_terminator(test.line_terminator()); + thompson::Config::new().utf8(test.utf8()).look_matcher(lookm) +} + +/// Configuration of the regex parser from a regex test. +fn config_syntax(test: &RegexTest) -> syntax::Config { + syntax::Config::new() + .case_insensitive(test.case_insensitive()) + .unicode(test.unicode()) + .utf8(test.utf8()) + .line_terminator(test.line_terminator()) +} diff --git a/vendor/regex-automata/tests/nfa/thompson/mod.rs b/vendor/regex-automata/tests/nfa/thompson/mod.rs new file mode 100644 index 00000000000000..b2558f7049c37d --- /dev/null +++ b/vendor/regex-automata/tests/nfa/thompson/mod.rs @@ -0,0 +1,4 @@ +#[cfg(feature = "nfa-backtrack")] +mod backtrack; +#[cfg(feature = "nfa-pikevm")] +mod pikevm; diff --git a/vendor/regex-automata/tests/nfa/thompson/pikevm/mod.rs b/vendor/regex-automata/tests/nfa/thompson/pikevm/mod.rs new file mode 100644 index 00000000000000..9d6ab475efef12 --- /dev/null +++ b/vendor/regex-automata/tests/nfa/thompson/pikevm/mod.rs @@ -0,0 +1,2 @@ +#[cfg(not(miri))] +mod suite; diff --git a/vendor/regex-automata/tests/nfa/thompson/pikevm/suite.rs b/vendor/regex-automata/tests/nfa/thompson/pikevm/suite.rs new file mode 100644 index 00000000000000..1fb3fec9f24464 --- /dev/null +++ b/vendor/regex-automata/tests/nfa/thompson/pikevm/suite.rs @@ -0,0 +1,162 @@ +use { + anyhow::Result, + regex_automata::{ + nfa::thompson::{ + self, + pikevm::{self, PikeVM}, + }, + util::{prefilter::Prefilter, syntax}, + PatternSet, + }, + regex_test::{ + CompiledRegex, Match, RegexTest, SearchKind, Span, TestResult, + TestRunner, + }, +}; + +use crate::{create_input, suite, testify_captures, untestify_kind}; + +/// Tests the default configuration of the hybrid NFA/DFA. +#[test] +fn default() -> Result<()> { + let builder = PikeVM::builder(); + let mut runner = TestRunner::new()?; + runner.expand(&["is_match", "find", "captures"], |test| test.compiles()); + runner.test_iter(suite()?.iter(), compiler(builder)).assert(); + Ok(()) +} + +/// Tests the PikeVM with prefilters enabled. +#[test] +fn prefilter() -> Result<()> { + let my_compiler = |test: &RegexTest, regexes: &[String]| { + // Parse regexes as HIRs so we can get literals to build a prefilter. + let mut hirs = vec![]; + for pattern in regexes.iter() { + hirs.push(syntax::parse_with(pattern, &config_syntax(test))?); + } + let kind = match untestify_kind(test.match_kind()) { + None => return Ok(CompiledRegex::skip()), + Some(kind) => kind, + }; + let pre = Prefilter::from_hirs_prefix(kind, &hirs); + let mut builder = PikeVM::builder(); + builder.configure(PikeVM::config().prefilter(pre)); + compiler(builder)(test, regexes) + }; + let mut runner = TestRunner::new()?; + runner.expand(&["is_match", "find", "captures"], |test| test.compiles()); + runner.test_iter(suite()?.iter(), my_compiler).assert(); + Ok(()) +} + +fn compiler( + mut builder: pikevm::Builder, +) -> impl FnMut(&RegexTest, &[String]) -> Result { + move |test, regexes| { + if !configure_pikevm_builder(test, &mut builder) { + return Ok(CompiledRegex::skip()); + } + let re = builder.build_many(®exes)?; + let mut cache = re.create_cache(); + Ok(CompiledRegex::compiled(move |test| -> TestResult { + run_test(&re, &mut cache, test) + })) + } +} + +fn run_test( + re: &PikeVM, + cache: &mut pikevm::Cache, + test: &RegexTest, +) -> TestResult { + let input = create_input(test); + match test.additional_name() { + "is_match" => TestResult::matched(re.is_match(cache, input)), + "find" => match test.search_kind() { + SearchKind::Earliest => { + let it = re + .find_iter(cache, input.earliest(true)) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|m| Match { + id: m.pattern().as_usize(), + span: Span { start: m.start(), end: m.end() }, + }); + TestResult::matches(it) + } + SearchKind::Leftmost => { + let it = re + .find_iter(cache, input) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|m| Match { + id: m.pattern().as_usize(), + span: Span { start: m.start(), end: m.end() }, + }); + TestResult::matches(it) + } + SearchKind::Overlapping => { + let mut patset = PatternSet::new(re.get_nfa().pattern_len()); + re.which_overlapping_matches(cache, &input, &mut patset); + TestResult::which(patset.iter().map(|p| p.as_usize())) + } + }, + "captures" => match test.search_kind() { + SearchKind::Earliest => { + let it = re + .captures_iter(cache, input.earliest(true)) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|caps| testify_captures(&caps)); + TestResult::captures(it) + } + SearchKind::Leftmost => { + let it = re + .captures_iter(cache, input) + .take(test.match_limit().unwrap_or(std::usize::MAX)) + .map(|caps| testify_captures(&caps)); + TestResult::captures(it) + } + SearchKind::Overlapping => { + // There is no overlapping PikeVM API that supports captures. + TestResult::skip() + } + }, + name => TestResult::fail(&format!("unrecognized test name: {name}")), + } +} + +/// Configures the given regex builder with all relevant settings on the given +/// regex test. +/// +/// If the regex test has a setting that is unsupported, then this returns +/// false (implying the test should be skipped). +fn configure_pikevm_builder( + test: &RegexTest, + builder: &mut pikevm::Builder, +) -> bool { + let match_kind = match untestify_kind(test.match_kind()) { + None => return false, + Some(k) => k, + }; + let pikevm_config = PikeVM::config().match_kind(match_kind); + builder + .configure(pikevm_config) + .syntax(config_syntax(test)) + .thompson(config_thompson(test)); + true +} + +/// Configuration of a Thompson NFA compiler from a regex test. +fn config_thompson(test: &RegexTest) -> thompson::Config { + let mut lookm = regex_automata::util::look::LookMatcher::new(); + lookm.set_line_terminator(test.line_terminator()); + thompson::Config::new().utf8(test.utf8()).look_matcher(lookm) +} + +/// Configuration of the regex parser from a regex test. +fn config_syntax(test: &RegexTest) -> syntax::Config { + syntax::Config::new() + .case_insensitive(test.case_insensitive()) + .unicode(test.unicode()) + .utf8(test.utf8()) + .line_terminator(test.line_terminator()) +} diff --git a/vendor/regex-syntax/.cargo-checksum.json b/vendor/regex-syntax/.cargo-checksum.json new file mode 100644 index 00000000000000..8ddc619c981e6d --- /dev/null +++ b/vendor/regex-syntax/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"4d9740cca04e3e6eb255422e93538de6fde4a9723c3f3bd15562f6e502c246cc","Cargo.lock":"3285efe6948658ea24ca1cc194a2d56dac8422f57a72459daecfe38b2672dff5","Cargo.toml":"2633ef92fd0a0373037e587f23836288e2f965d578b1e02d01288b607252bc57","Cargo.toml.orig":"dc2b090e6ecd06b0ac9aad1a25b3d645e07b5d69d601ec7e8a48670ac0c4d568","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"b2484aa7e66fb92d1378e9a7ce7605af18f77cb12c179866eaf92ba28cfec1d9","benches/bench.rs":"d2b6ae5b939abd6093064f144b981b7739d7f474ec0698a1268052fc92406635","src/ast/mod.rs":"38df06574a3816eae2796c757d9d268b7b42ce3fb1feee86750e7d04de7649c7","src/ast/parse.rs":"af6d82e62e97379a91840e99c10be1279c0ae1298a0d8e5da429e9e7bc3ec339","src/ast/print.rs":"6a681ce021a384c47dda04c71e2868b5dd61c633bb5b1a67628b9a8df16f0413","src/ast/visitor.rs":"2af2efd77727803b8d15a6244af92267e96edbfc1a211bcbea2b32e1b5483918","src/debug.rs":"91b2492394de05bb11ee75329115b33fb378c19e076370d1a1ae665ce1682777","src/either.rs":"1758e3edd056884eccadd995708d1e374ba9aa65846bd0e13b1aae852607c560","src/error.rs":"e308b3ccad0bea927f4e3957170302e9cfa743bfdf9376f3a5f4137b44ca6cfc","src/hir/interval.rs":"74d75837d24ab9a3cff33b375b70694cdd3b9a4610c799137533f365755ba604","src/hir/literal.rs":"61e9f54103c671694dd017c23c5c9263e032735921ef77527940e83b29ced540","src/hir/mod.rs":"13ee5b65fac1f2c9780ce48a500b1e9d198cb0bc07c0d7f4a4391aab87424563","src/hir/print.rs":"e1e1dfa71983c8fea64f500a0b9dfcbd258b4060e12b95d432468015a247a5cb","src/hir/translate.rs":"73bd3e27fe117a92abfaa0ce47fe86b70a9b456e2635e19efe099b94830b947a","src/hir/visitor.rs":"71ca9c93aa48a5ed445399659fa6455093a1bbd9ef44b66bc7095c1b08b2ec1f","src/lib.rs":"c51d1e55a8b6c4608e21a278ed0ef9480f73ab5b814b6ca6127f4a049c4d5007","src/parser.rs":"6b2f4f27e3331a01a25b87c89368dd2e54396bd425dac57941f9c1ebfd238ac8","src/rank.rs":"ff3d58b0cc5ffa69e2e8c56fc7d9ef41dd399d59a639a253a51551b858cb5bbd","src/unicode.rs":"b2084dcbd4331501b9a895fd7e7575d93ff96eb661c6e6adbc8c66bb72685cde","src/unicode_tables/LICENSE-UNICODE":"74db5baf44a41b1000312c673544b3374e4198af5605c7f9080a402cec42cfa3","src/unicode_tables/age.rs":"71b7cf52acdb4aa98b44145303b8efbfa94913235493521941ef1e0092a0ffe2","src/unicode_tables/case_folding_simple.rs":"7622c7f7f03ac0dc2f2bcd51c81a217d64de0cc912f62f1add5f676603a02456","src/unicode_tables/general_category.rs":"9488e3721f7c2ae20e1b77fcff9a59b4ed8f22954b8645ea6d8592eac1856423","src/unicode_tables/grapheme_cluster_break.rs":"0dd9d66bad598f4ec3451b6699f05c17c52079e37d463baf6385bbe51aa218f1","src/unicode_tables/mod.rs":"26c837099cd934c8062e24bc9a0aaecf15fe1de03f9c6da3f3e1e5ac3ca24bee","src/unicode_tables/perl_decimal.rs":"6a59143db81a0bcaf0e8d0af265e711d1a6472e1f091ee9ee4377da5d5d0cd1f","src/unicode_tables/perl_space.rs":"ec9bb22ed7e99feef292249c7e6f4673ee0af9635d4d158f93923494c14cd5ed","src/unicode_tables/perl_word.rs":"30f073baae28ea34c373c7778c00f20c1621c3e644404eff031f7d1cc8e9c9e2","src/unicode_tables/property_bool.rs":"66cf5bd2a1438bf9694152f077a285cf014fbd50b9dd63a97233b2ea61d64962","src/unicode_tables/property_names.rs":"8c93985d1bcb01735667a3c4cb92f7e260d267326bde9d7f048bc77cd7e07855","src/unicode_tables/property_values.rs":"ef9131ce0a575c7327ec6d466aafd8b7c25600d80c232b5a4110bbf0a5a59136","src/unicode_tables/script.rs":"41bd424f1e3a03290cf4995ced678dcf24c94b38c905c62f6819bf67e098a2ec","src/unicode_tables/script_extension.rs":"a314099ddbf50a07fe350bb0835bf2fe494ed5ad278b30e171e21506eb557906","src/unicode_tables/sentence_break.rs":"be84fbe8c5c67e761b16fe6c27f16664dbb145357835cd6b92bc2a4a4c52ee79","src/unicode_tables/word_break.rs":"c551681ad49ec28c7ae32bab1371945821c736ca8f0de410cb89f28066ec2ecf","src/utf8.rs":"193f280f3b48116ed6ca8b5fe80b9d6401b6e733dfa138caf64ec1b017b6f175","test":"c7de5fbc0010d9b5b758cd49956375a64b88601c068167fd366808950257f108"},"package":"7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58"} \ No newline at end of file diff --git a/vendor/regex-syntax/.cargo_vcs_info.json b/vendor/regex-syntax/.cargo_vcs_info.json new file mode 100644 index 00000000000000..2d47b3d9eaea88 --- /dev/null +++ b/vendor/regex-syntax/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "691d51457db276bbdf9ca3de2cafe285c662c59f" + }, + "path_in_vcs": "regex-syntax" +} \ No newline at end of file diff --git a/vendor/regex-syntax/Cargo.lock b/vendor/regex-syntax/Cargo.lock new file mode 100644 index 00000000000000..a6c29c8389f76a --- /dev/null +++ b/vendor/regex-syntax/Cargo.lock @@ -0,0 +1,65 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "arbitrary" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" +dependencies = [ + "derive_arbitrary", +] + +[[package]] +name = "derive_arbitrary" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "regex-syntax" +version = "0.8.8" +dependencies = [ + "arbitrary", +] + +[[package]] +name = "syn" +version = "2.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "unicode-ident" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" diff --git a/vendor/regex-syntax/Cargo.toml b/vendor/regex-syntax/Cargo.toml new file mode 100644 index 00000000000000..02277a31b9e171 --- /dev/null +++ b/vendor/regex-syntax/Cargo.toml @@ -0,0 +1,81 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.65" +name = "regex-syntax" +version = "0.8.8" +authors = [ + "The Rust Project Developers", + "Andrew Gallant ", +] +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "A regular expression parser." +homepage = "https://github.com/rust-lang/regex/tree/master/regex-syntax" +documentation = "https://docs.rs/regex-syntax" +readme = "README.md" +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/regex" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs_regex", +] + +[features] +arbitrary = ["dep:arbitrary"] +default = [ + "std", + "unicode", +] +std = [] +unicode = [ + "unicode-age", + "unicode-bool", + "unicode-case", + "unicode-gencat", + "unicode-perl", + "unicode-script", + "unicode-segment", +] +unicode-age = [] +unicode-bool = [] +unicode-case = [] +unicode-gencat = [] +unicode-perl = [] +unicode-script = [] +unicode-segment = [] + +[lib] +name = "regex_syntax" +path = "src/lib.rs" + +[[bench]] +name = "bench" +path = "benches/bench.rs" + +[dependencies.arbitrary] +version = "1.3.0" +features = ["derive"] +optional = true + +[lints.rust.unexpected_cfgs] +level = "allow" +priority = 0 +check-cfg = ["cfg(docsrs_regex)"] diff --git a/vendor/regex-syntax/LICENSE-APACHE b/vendor/regex-syntax/LICENSE-APACHE new file mode 100644 index 00000000000000..16fe87b06e802f --- /dev/null +++ b/vendor/regex-syntax/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/regex-syntax/LICENSE-MIT b/vendor/regex-syntax/LICENSE-MIT new file mode 100644 index 00000000000000..39d4bdb5acd313 --- /dev/null +++ b/vendor/regex-syntax/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/regex-syntax/README.md b/vendor/regex-syntax/README.md new file mode 100644 index 00000000000000..529513b0c8e979 --- /dev/null +++ b/vendor/regex-syntax/README.md @@ -0,0 +1,96 @@ +regex-syntax +============ +This crate provides a robust regular expression parser. + +[![Build status](https://github.com/rust-lang/regex/workflows/ci/badge.svg)](https://github.com/rust-lang/regex/actions) +[![Crates.io](https://img.shields.io/crates/v/regex-syntax.svg)](https://crates.io/crates/regex-syntax) + + +### Documentation + +https://docs.rs/regex-syntax + + +### Overview + +There are two primary types exported by this crate: `Ast` and `Hir`. The former +is a faithful abstract syntax of a regular expression, and can convert regular +expressions back to their concrete syntax while mostly preserving its original +form. The latter type is a high level intermediate representation of a regular +expression that is amenable to analysis and compilation into byte codes or +automata. An `Hir` achieves this by drastically simplifying the syntactic +structure of the regular expression. While an `Hir` can be converted back to +its equivalent concrete syntax, the result is unlikely to resemble the original +concrete syntax that produced the `Hir`. + + +### Example + +This example shows how to parse a pattern string into its HIR: + +```rust +use regex_syntax::{hir::Hir, parse}; + +let hir = parse("a|b").unwrap(); +assert_eq!(hir, Hir::alternation(vec![ + Hir::literal("a".as_bytes()), + Hir::literal("b".as_bytes()), +])); +``` + + +### Safety + +This crate has no `unsafe` code and sets `forbid(unsafe_code)`. While it's +possible this crate could use `unsafe` code in the future, the standard +for doing so is extremely high. In general, most code in this crate is not +performance critical, since it tends to be dwarfed by the time it takes to +compile a regular expression into an automaton. Therefore, there is little need +for extreme optimization, and therefore, use of `unsafe`. + +The standard for using `unsafe` in this crate is extremely high because this +crate is intended to be reasonably safe to use with user supplied regular +expressions. Therefore, while there may be bugs in the regex parser itself, +they should _never_ result in memory unsafety unless there is either a bug +in the compiler or the standard library. (Since `regex-syntax` has zero +dependencies.) + + +### Crate features + +By default, this crate bundles a fairly large amount of Unicode data tables +(a source size of ~750KB). Because of their large size, one can disable some +or all of these data tables. If a regular expression attempts to use Unicode +data that is not available, then an error will occur when translating the `Ast` +to the `Hir`. + +The full set of features one can disable are +[in the "Crate features" section of the documentation](https://docs.rs/regex-syntax/*/#crate-features). + + +### Testing + +Simply running `cargo test` will give you very good coverage. However, because +of the large number of features exposed by this crate, a `test` script is +included in this directory which will test several feature combinations. This +is the same script that is run in CI. + + +### Motivation + +The primary purpose of this crate is to provide the parser used by `regex`. +Specifically, this crate is treated as an implementation detail of the `regex`, +and is primarily developed for the needs of `regex`. + +Since this crate is an implementation detail of `regex`, it may experience +breaking change releases at a different cadence from `regex`. This is only +possible because this crate is _not_ a public dependency of `regex`. + +Another consequence of this de-coupling is that there is no direct way to +compile a `regex::Regex` from a `regex_syntax::hir::Hir`. Instead, one must +first convert the `Hir` to a string (via its `std::fmt::Display`) and then +compile that via `Regex::new`. While this does repeat some work, compilation +typically takes much longer than parsing. + +Stated differently, the coupling between `regex` and `regex-syntax` exists only +at the level of the concrete syntax. diff --git a/vendor/regex-syntax/benches/bench.rs b/vendor/regex-syntax/benches/bench.rs new file mode 100644 index 00000000000000..d4703d4fc1ebf0 --- /dev/null +++ b/vendor/regex-syntax/benches/bench.rs @@ -0,0 +1,63 @@ +#![feature(test)] + +extern crate test; + +use regex_syntax::Parser; +use test::Bencher; + +#[bench] +fn parse_simple1(b: &mut Bencher) { + b.iter(|| { + let re = r"^bc(d|e)*$"; + Parser::new().parse(re).unwrap() + }); +} + +#[bench] +fn parse_simple2(b: &mut Bencher) { + b.iter(|| { + let re = r"'[a-zA-Z_][a-zA-Z0-9_]*(')\b"; + Parser::new().parse(re).unwrap() + }); +} + +#[bench] +fn parse_small1(b: &mut Bencher) { + b.iter(|| { + let re = r"\p{L}|\p{N}|\s|.|\d"; + Parser::new().parse(re).unwrap() + }); +} + +#[bench] +fn parse_medium1(b: &mut Bencher) { + b.iter(|| { + let re = r"\pL\p{Greek}\p{Hiragana}\p{Alphabetic}\p{Hebrew}\p{Arabic}"; + Parser::new().parse(re).unwrap() + }); +} + +#[bench] +fn parse_medium2(b: &mut Bencher) { + b.iter(|| { + let re = r"\s\S\w\W\d\D"; + Parser::new().parse(re).unwrap() + }); +} + +#[bench] +fn parse_medium3(b: &mut Bencher) { + b.iter(|| { + let re = + r"\p{age:3.2}\p{hira}\p{scx:hira}\p{alphabetic}\p{sc:Greek}\pL"; + Parser::new().parse(re).unwrap() + }); +} + +#[bench] +fn parse_huge(b: &mut Bencher) { + b.iter(|| { + let re = r"\p{L}{100}"; + Parser::new().parse(re).unwrap() + }); +} diff --git a/vendor/regex-syntax/src/ast/mod.rs b/vendor/regex-syntax/src/ast/mod.rs new file mode 100644 index 00000000000000..7e2426dc78fdca --- /dev/null +++ b/vendor/regex-syntax/src/ast/mod.rs @@ -0,0 +1,1807 @@ +/*! +Defines an abstract syntax for regular expressions. +*/ + +use core::cmp::Ordering; + +use alloc::{boxed::Box, string::String, vec, vec::Vec}; + +pub use crate::ast::visitor::{visit, Visitor}; + +pub mod parse; +pub mod print; +mod visitor; + +/// An error that occurred while parsing a regular expression into an abstract +/// syntax tree. +/// +/// Note that not all ASTs represents a valid regular expression. For example, +/// an AST is constructed without error for `\p{Quux}`, but `Quux` is not a +/// valid Unicode property name. That particular error is reported when +/// translating an AST to the high-level intermediate representation (`HIR`). +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Error { + /// The kind of error. + kind: ErrorKind, + /// The original pattern that the parser generated the error from. Every + /// span in an error is a valid range into this string. + pattern: String, + /// The span of this error. + span: Span, +} + +impl Error { + /// Return the type of this error. + pub fn kind(&self) -> &ErrorKind { + &self.kind + } + + /// The original pattern string in which this error occurred. + /// + /// Every span reported by this error is reported in terms of this string. + pub fn pattern(&self) -> &str { + &self.pattern + } + + /// Return the span at which this error occurred. + pub fn span(&self) -> &Span { + &self.span + } + + /// Return an auxiliary span. This span exists only for some errors that + /// benefit from being able to point to two locations in the original + /// regular expression. For example, "duplicate" errors will have the + /// main error position set to the duplicate occurrence while its + /// auxiliary span will be set to the initial occurrence. + pub fn auxiliary_span(&self) -> Option<&Span> { + use self::ErrorKind::*; + match self.kind { + FlagDuplicate { ref original } => Some(original), + FlagRepeatedNegation { ref original, .. } => Some(original), + GroupNameDuplicate { ref original, .. } => Some(original), + _ => None, + } + } +} + +/// The type of an error that occurred while building an AST. +/// +/// This error type is marked as `non_exhaustive`. This means that adding a +/// new variant is not considered a breaking change. +#[non_exhaustive] +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ErrorKind { + /// The capturing group limit was exceeded. + /// + /// Note that this represents a limit on the total number of capturing + /// groups in a regex and not necessarily the number of nested capturing + /// groups. That is, the nest limit can be low and it is still possible for + /// this error to occur. + CaptureLimitExceeded, + /// An invalid escape sequence was found in a character class set. + ClassEscapeInvalid, + /// An invalid character class range was found. An invalid range is any + /// range where the start is greater than the end. + ClassRangeInvalid, + /// An invalid range boundary was found in a character class. Range + /// boundaries must be a single literal codepoint, but this error indicates + /// that something else was found, such as a nested class. + ClassRangeLiteral, + /// An opening `[` was found with no corresponding closing `]`. + ClassUnclosed, + /// Note that this error variant is no longer used. Namely, a decimal + /// number can only appear as a repetition quantifier. When the number + /// in a repetition quantifier is empty, then it gets its own specialized + /// error, `RepetitionCountDecimalEmpty`. + DecimalEmpty, + /// An invalid decimal number was given where one was expected. + DecimalInvalid, + /// A bracketed hex literal was empty. + EscapeHexEmpty, + /// A bracketed hex literal did not correspond to a Unicode scalar value. + EscapeHexInvalid, + /// An invalid hexadecimal digit was found. + EscapeHexInvalidDigit, + /// EOF was found before an escape sequence was completed. + EscapeUnexpectedEof, + /// An unrecognized escape sequence. + EscapeUnrecognized, + /// A dangling negation was used when setting flags, e.g., `i-`. + FlagDanglingNegation, + /// A flag was used twice, e.g., `i-i`. + FlagDuplicate { + /// The position of the original flag. The error position + /// points to the duplicate flag. + original: Span, + }, + /// The negation operator was used twice, e.g., `-i-s`. + FlagRepeatedNegation { + /// The position of the original negation operator. The error position + /// points to the duplicate negation operator. + original: Span, + }, + /// Expected a flag but got EOF, e.g., `(?`. + FlagUnexpectedEof, + /// Unrecognized flag, e.g., `a`. + FlagUnrecognized, + /// A duplicate capture name was found. + GroupNameDuplicate { + /// The position of the initial occurrence of the capture name. The + /// error position itself points to the duplicate occurrence. + original: Span, + }, + /// A capture group name is empty, e.g., `(?P<>abc)`. + GroupNameEmpty, + /// An invalid character was seen for a capture group name. This includes + /// errors where the first character is a digit (even though subsequent + /// characters are allowed to be digits). + GroupNameInvalid, + /// A closing `>` could not be found for a capture group name. + GroupNameUnexpectedEof, + /// An unclosed group, e.g., `(ab`. + /// + /// The span of this error corresponds to the unclosed parenthesis. + GroupUnclosed, + /// An unopened group, e.g., `ab)`. + GroupUnopened, + /// The nest limit was exceeded. The limit stored here is the limit + /// configured in the parser. + NestLimitExceeded(u32), + /// The range provided in a counted repetition operator is invalid. The + /// range is invalid if the start is greater than the end. + RepetitionCountInvalid, + /// An opening `{` was not followed by a valid decimal value. + /// For example, `x{}` or `x{]}` would fail. + RepetitionCountDecimalEmpty, + /// An opening `{` was found with no corresponding closing `}`. + RepetitionCountUnclosed, + /// A repetition operator was applied to a missing sub-expression. This + /// occurs, for example, in the regex consisting of just a `*` or even + /// `(?i)*`. It is, however, possible to create a repetition operating on + /// an empty sub-expression. For example, `()*` is still considered valid. + RepetitionMissing, + /// The special word boundary syntax, `\b{something}`, was used, but + /// either EOF without `}` was seen, or an invalid character in the + /// braces was seen. + SpecialWordBoundaryUnclosed, + /// The special word boundary syntax, `\b{something}`, was used, but + /// `something` was not recognized as a valid word boundary kind. + SpecialWordBoundaryUnrecognized, + /// The syntax `\b{` was observed, but afterwards the end of the pattern + /// was observed without being able to tell whether it was meant to be a + /// bounded repetition on the `\b` or the beginning of a special word + /// boundary assertion. + SpecialWordOrRepetitionUnexpectedEof, + /// The Unicode class is not valid. This typically occurs when a `\p` is + /// followed by something other than a `{`. + UnicodeClassInvalid, + /// When octal support is disabled, this error is produced when an octal + /// escape is used. The octal escape is assumed to be an invocation of + /// a backreference, which is the common case. + UnsupportedBackreference, + /// When syntax similar to PCRE's look-around is used, this error is + /// returned. Some example syntaxes that are rejected include, but are + /// not necessarily limited to, `(?=re)`, `(?!re)`, `(?<=re)` and + /// `(?) -> core::fmt::Result { + crate::error::Formatter::from(self).fmt(f) + } +} + +impl core::fmt::Display for ErrorKind { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + use self::ErrorKind::*; + match *self { + CaptureLimitExceeded => write!( + f, + "exceeded the maximum number of \ + capturing groups ({})", + u32::MAX + ), + ClassEscapeInvalid => { + write!(f, "invalid escape sequence found in character class") + } + ClassRangeInvalid => write!( + f, + "invalid character class range, \ + the start must be <= the end" + ), + ClassRangeLiteral => { + write!(f, "invalid range boundary, must be a literal") + } + ClassUnclosed => write!(f, "unclosed character class"), + DecimalEmpty => write!(f, "decimal literal empty"), + DecimalInvalid => write!(f, "decimal literal invalid"), + EscapeHexEmpty => write!(f, "hexadecimal literal empty"), + EscapeHexInvalid => { + write!(f, "hexadecimal literal is not a Unicode scalar value") + } + EscapeHexInvalidDigit => write!(f, "invalid hexadecimal digit"), + EscapeUnexpectedEof => write!( + f, + "incomplete escape sequence, \ + reached end of pattern prematurely" + ), + EscapeUnrecognized => write!(f, "unrecognized escape sequence"), + FlagDanglingNegation => { + write!(f, "dangling flag negation operator") + } + FlagDuplicate { .. } => write!(f, "duplicate flag"), + FlagRepeatedNegation { .. } => { + write!(f, "flag negation operator repeated") + } + FlagUnexpectedEof => { + write!(f, "expected flag but got end of regex") + } + FlagUnrecognized => write!(f, "unrecognized flag"), + GroupNameDuplicate { .. } => { + write!(f, "duplicate capture group name") + } + GroupNameEmpty => write!(f, "empty capture group name"), + GroupNameInvalid => write!(f, "invalid capture group character"), + GroupNameUnexpectedEof => write!(f, "unclosed capture group name"), + GroupUnclosed => write!(f, "unclosed group"), + GroupUnopened => write!(f, "unopened group"), + NestLimitExceeded(limit) => write!( + f, + "exceed the maximum number of \ + nested parentheses/brackets ({})", + limit + ), + RepetitionCountInvalid => write!( + f, + "invalid repetition count range, \ + the start must be <= the end" + ), + RepetitionCountDecimalEmpty => { + write!(f, "repetition quantifier expects a valid decimal") + } + RepetitionCountUnclosed => { + write!(f, "unclosed counted repetition") + } + RepetitionMissing => { + write!(f, "repetition operator missing expression") + } + SpecialWordBoundaryUnclosed => { + write!( + f, + "special word boundary assertion is either \ + unclosed or contains an invalid character", + ) + } + SpecialWordBoundaryUnrecognized => { + write!( + f, + "unrecognized special word boundary assertion, \ + valid choices are: start, end, start-half \ + or end-half", + ) + } + SpecialWordOrRepetitionUnexpectedEof => { + write!( + f, + "found either the beginning of a special word \ + boundary or a bounded repetition on a \\b with \ + an opening brace, but no closing brace", + ) + } + UnicodeClassInvalid => { + write!(f, "invalid Unicode character class") + } + UnsupportedBackreference => { + write!(f, "backreferences are not supported") + } + UnsupportedLookAround => write!( + f, + "look-around, including look-ahead and look-behind, \ + is not supported" + ), + } + } +} + +/// Span represents the position information of a single AST item. +/// +/// All span positions are absolute byte offsets that can be used on the +/// original regular expression that was parsed. +#[derive(Clone, Copy, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Span { + /// The start byte offset. + pub start: Position, + /// The end byte offset. + pub end: Position, +} + +impl core::fmt::Debug for Span { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "Span({:?}, {:?})", self.start, self.end) + } +} + +impl Ord for Span { + fn cmp(&self, other: &Span) -> Ordering { + (&self.start, &self.end).cmp(&(&other.start, &other.end)) + } +} + +impl PartialOrd for Span { + fn partial_cmp(&self, other: &Span) -> Option { + Some(self.cmp(other)) + } +} + +/// A single position in a regular expression. +/// +/// A position encodes one half of a span, and include the byte offset, line +/// number and column number. +#[derive(Clone, Copy, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Position { + /// The absolute offset of this position, starting at `0` from the + /// beginning of the regular expression pattern string. + pub offset: usize, + /// The line number, starting at `1`. + pub line: usize, + /// The approximate column number, starting at `1`. + pub column: usize, +} + +impl core::fmt::Debug for Position { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "Position(o: {:?}, l: {:?}, c: {:?})", + self.offset, self.line, self.column + ) + } +} + +impl Ord for Position { + fn cmp(&self, other: &Position) -> Ordering { + self.offset.cmp(&other.offset) + } +} + +impl PartialOrd for Position { + fn partial_cmp(&self, other: &Position) -> Option { + Some(self.cmp(other)) + } +} + +impl Span { + /// Create a new span with the given positions. + pub fn new(start: Position, end: Position) -> Span { + Span { start, end } + } + + /// Create a new span using the given position as the start and end. + pub fn splat(pos: Position) -> Span { + Span::new(pos, pos) + } + + /// Create a new span by replacing the starting the position with the one + /// given. + pub fn with_start(self, pos: Position) -> Span { + Span { start: pos, ..self } + } + + /// Create a new span by replacing the ending the position with the one + /// given. + pub fn with_end(self, pos: Position) -> Span { + Span { end: pos, ..self } + } + + /// Returns true if and only if this span occurs on a single line. + pub fn is_one_line(&self) -> bool { + self.start.line == self.end.line + } + + /// Returns true if and only if this span is empty. That is, it points to + /// a single position in the concrete syntax of a regular expression. + pub fn is_empty(&self) -> bool { + self.start.offset == self.end.offset + } +} + +impl Position { + /// Create a new position with the given information. + /// + /// `offset` is the absolute offset of the position, starting at `0` from + /// the beginning of the regular expression pattern string. + /// + /// `line` is the line number, starting at `1`. + /// + /// `column` is the approximate column number, starting at `1`. + pub fn new(offset: usize, line: usize, column: usize) -> Position { + Position { offset, line, column } + } +} + +/// An abstract syntax tree for a singular expression along with comments +/// found. +/// +/// Comments are not stored in the tree itself to avoid complexity. Each +/// comment contains a span of precisely where it occurred in the original +/// regular expression. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct WithComments { + /// The actual ast. + pub ast: Ast, + /// All comments found in the original regular expression. + pub comments: Vec, +} + +/// A comment from a regular expression with an associated span. +/// +/// A regular expression can only contain comments when the `x` flag is +/// enabled. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Comment { + /// The span of this comment, including the beginning `#` and ending `\n`. + pub span: Span, + /// The comment text, starting with the first character following the `#` + /// and ending with the last character preceding the `\n`. + pub comment: String, +} + +/// An abstract syntax tree for a single regular expression. +/// +/// An `Ast`'s `fmt::Display` implementation uses constant stack space and heap +/// space proportional to the size of the `Ast`. +/// +/// This type defines its own destructor that uses constant stack space and +/// heap space proportional to the size of the `Ast`. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum Ast { + /// An empty regex that matches everything. + Empty(Box), + /// A set of flags, e.g., `(?is)`. + Flags(Box), + /// A single character literal, which includes escape sequences. + Literal(Box), + /// The "any character" class. + Dot(Box), + /// A single zero-width assertion. + Assertion(Box), + /// A single Unicode character class, e.g., `\pL` or `\p{Greek}`. + ClassUnicode(Box), + /// A single perl character class, e.g., `\d` or `\W`. + ClassPerl(Box), + /// A single bracketed character class set, which may contain zero or more + /// character ranges and/or zero or more nested classes. e.g., + /// `[a-zA-Z\pL]`. + ClassBracketed(Box), + /// A repetition operator applied to an arbitrary regular expression. + Repetition(Box), + /// A grouped regular expression. + Group(Box), + /// An alternation of regular expressions. + Alternation(Box), + /// A concatenation of regular expressions. + Concat(Box), +} + +impl Ast { + /// Create an "empty" AST item. + pub fn empty(span: Span) -> Ast { + Ast::Empty(Box::new(span)) + } + + /// Create a "flags" AST item. + pub fn flags(e: SetFlags) -> Ast { + Ast::Flags(Box::new(e)) + } + + /// Create a "literal" AST item. + pub fn literal(e: Literal) -> Ast { + Ast::Literal(Box::new(e)) + } + + /// Create a "dot" AST item. + pub fn dot(span: Span) -> Ast { + Ast::Dot(Box::new(span)) + } + + /// Create a "assertion" AST item. + pub fn assertion(e: Assertion) -> Ast { + Ast::Assertion(Box::new(e)) + } + + /// Create a "Unicode class" AST item. + pub fn class_unicode(e: ClassUnicode) -> Ast { + Ast::ClassUnicode(Box::new(e)) + } + + /// Create a "Perl class" AST item. + pub fn class_perl(e: ClassPerl) -> Ast { + Ast::ClassPerl(Box::new(e)) + } + + /// Create a "bracketed class" AST item. + pub fn class_bracketed(e: ClassBracketed) -> Ast { + Ast::ClassBracketed(Box::new(e)) + } + + /// Create a "repetition" AST item. + pub fn repetition(e: Repetition) -> Ast { + Ast::Repetition(Box::new(e)) + } + + /// Create a "group" AST item. + pub fn group(e: Group) -> Ast { + Ast::Group(Box::new(e)) + } + + /// Create a "alternation" AST item. + pub fn alternation(e: Alternation) -> Ast { + Ast::Alternation(Box::new(e)) + } + + /// Create a "concat" AST item. + pub fn concat(e: Concat) -> Ast { + Ast::Concat(Box::new(e)) + } + + /// Return the span of this abstract syntax tree. + pub fn span(&self) -> &Span { + match *self { + Ast::Empty(ref span) => span, + Ast::Flags(ref x) => &x.span, + Ast::Literal(ref x) => &x.span, + Ast::Dot(ref span) => span, + Ast::Assertion(ref x) => &x.span, + Ast::ClassUnicode(ref x) => &x.span, + Ast::ClassPerl(ref x) => &x.span, + Ast::ClassBracketed(ref x) => &x.span, + Ast::Repetition(ref x) => &x.span, + Ast::Group(ref x) => &x.span, + Ast::Alternation(ref x) => &x.span, + Ast::Concat(ref x) => &x.span, + } + } + + /// Return true if and only if this Ast is empty. + pub fn is_empty(&self) -> bool { + match *self { + Ast::Empty(_) => true, + _ => false, + } + } + + /// Returns true if and only if this AST has any (including possibly empty) + /// subexpressions. + fn has_subexprs(&self) -> bool { + match *self { + Ast::Empty(_) + | Ast::Flags(_) + | Ast::Literal(_) + | Ast::Dot(_) + | Ast::Assertion(_) + | Ast::ClassUnicode(_) + | Ast::ClassPerl(_) => false, + Ast::ClassBracketed(_) + | Ast::Repetition(_) + | Ast::Group(_) + | Ast::Alternation(_) + | Ast::Concat(_) => true, + } + } +} + +/// Print a display representation of this Ast. +/// +/// This does not preserve any of the original whitespace formatting that may +/// have originally been present in the concrete syntax from which this Ast +/// was generated. +/// +/// This implementation uses constant stack space and heap space proportional +/// to the size of the `Ast`. +impl core::fmt::Display for Ast { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + use crate::ast::print::Printer; + Printer::new().print(self, f) + } +} + +/// An alternation of regular expressions. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Alternation { + /// The span of this alternation. + pub span: Span, + /// The alternate regular expressions. + pub asts: Vec, +} + +impl Alternation { + /// Return this alternation as an AST. + /// + /// If this alternation contains zero ASTs, then `Ast::empty` is returned. + /// If this alternation contains exactly 1 AST, then the corresponding AST + /// is returned. Otherwise, `Ast::alternation` is returned. + pub fn into_ast(mut self) -> Ast { + match self.asts.len() { + 0 => Ast::empty(self.span), + 1 => self.asts.pop().unwrap(), + _ => Ast::alternation(self), + } + } +} + +/// A concatenation of regular expressions. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Concat { + /// The span of this concatenation. + pub span: Span, + /// The concatenation regular expressions. + pub asts: Vec, +} + +impl Concat { + /// Return this concatenation as an AST. + /// + /// If this alternation contains zero ASTs, then `Ast::empty` is returned. + /// If this alternation contains exactly 1 AST, then the corresponding AST + /// is returned. Otherwise, `Ast::concat` is returned. + pub fn into_ast(mut self) -> Ast { + match self.asts.len() { + 0 => Ast::empty(self.span), + 1 => self.asts.pop().unwrap(), + _ => Ast::concat(self), + } + } +} + +/// A single literal expression. +/// +/// A literal corresponds to a single Unicode scalar value. Literals may be +/// represented in their literal form, e.g., `a` or in their escaped form, +/// e.g., `\x61`. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Literal { + /// The span of this literal. + pub span: Span, + /// The kind of this literal. + pub kind: LiteralKind, + /// The Unicode scalar value corresponding to this literal. + pub c: char, +} + +impl Literal { + /// If this literal was written as a `\x` hex escape, then this returns + /// the corresponding byte value. Otherwise, this returns `None`. + pub fn byte(&self) -> Option { + match self.kind { + LiteralKind::HexFixed(HexLiteralKind::X) => { + u8::try_from(self.c).ok() + } + _ => None, + } + } +} + +/// The kind of a single literal expression. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum LiteralKind { + /// The literal is written verbatim, e.g., `a` or `☃`. + Verbatim, + /// The literal is written as an escape because it is otherwise a special + /// regex meta character, e.g., `\*` or `\[`. + Meta, + /// The literal is written as an escape despite the fact that the escape is + /// unnecessary, e.g., `\%` or `\/`. + Superfluous, + /// The literal is written as an octal escape, e.g., `\141`. + Octal, + /// The literal is written as a hex code with a fixed number of digits + /// depending on the type of the escape, e.g., `\x61` or `\u0061` or + /// `\U00000061`. + HexFixed(HexLiteralKind), + /// The literal is written as a hex code with a bracketed number of + /// digits. The only restriction is that the bracketed hex code must refer + /// to a valid Unicode scalar value. + HexBrace(HexLiteralKind), + /// The literal is written as a specially recognized escape, e.g., `\f` + /// or `\n`. + Special(SpecialLiteralKind), +} + +/// The type of a special literal. +/// +/// A special literal is a special escape sequence recognized by the regex +/// parser, e.g., `\f` or `\n`. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum SpecialLiteralKind { + /// Bell, spelled `\a` (`\x07`). + Bell, + /// Form feed, spelled `\f` (`\x0C`). + FormFeed, + /// Tab, spelled `\t` (`\x09`). + Tab, + /// Line feed, spelled `\n` (`\x0A`). + LineFeed, + /// Carriage return, spelled `\r` (`\x0D`). + CarriageReturn, + /// Vertical tab, spelled `\v` (`\x0B`). + VerticalTab, + /// Space, spelled `\ ` (`\x20`). Note that this can only appear when + /// parsing in verbose mode. + Space, +} + +/// The type of a Unicode hex literal. +/// +/// Note that all variants behave the same when used with brackets. They only +/// differ when used without brackets in the number of hex digits that must +/// follow. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum HexLiteralKind { + /// A `\x` prefix. When used without brackets, this form is limited to + /// two digits. + X, + /// A `\u` prefix. When used without brackets, this form is limited to + /// four digits. + UnicodeShort, + /// A `\U` prefix. When used without brackets, this form is limited to + /// eight digits. + UnicodeLong, +} + +impl HexLiteralKind { + /// The number of digits that must be used with this literal form when + /// used without brackets. When used with brackets, there is no + /// restriction on the number of digits. + pub fn digits(&self) -> u32 { + match *self { + HexLiteralKind::X => 2, + HexLiteralKind::UnicodeShort => 4, + HexLiteralKind::UnicodeLong => 8, + } + } +} + +/// A Perl character class. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct ClassPerl { + /// The span of this class. + pub span: Span, + /// The kind of Perl class. + pub kind: ClassPerlKind, + /// Whether the class is negated or not. e.g., `\d` is not negated but + /// `\D` is. + pub negated: bool, +} + +/// The available Perl character classes. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ClassPerlKind { + /// Decimal numbers. + Digit, + /// Whitespace. + Space, + /// Word characters. + Word, +} + +/// An ASCII character class. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct ClassAscii { + /// The span of this class. + pub span: Span, + /// The kind of ASCII class. + pub kind: ClassAsciiKind, + /// Whether the class is negated or not. e.g., `[[:alpha:]]` is not negated + /// but `[[:^alpha:]]` is. + pub negated: bool, +} + +/// The available ASCII character classes. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ClassAsciiKind { + /// `[0-9A-Za-z]` + Alnum, + /// `[A-Za-z]` + Alpha, + /// `[\x00-\x7F]` + Ascii, + /// `[ \t]` + Blank, + /// `[\x00-\x1F\x7F]` + Cntrl, + /// `[0-9]` + Digit, + /// `[!-~]` + Graph, + /// `[a-z]` + Lower, + /// `[ -~]` + Print, + /// ``[!-/:-@\[-`{-~]`` + Punct, + /// `[\t\n\v\f\r ]` + Space, + /// `[A-Z]` + Upper, + /// `[0-9A-Za-z_]` + Word, + /// `[0-9A-Fa-f]` + Xdigit, +} + +impl ClassAsciiKind { + /// Return the corresponding ClassAsciiKind variant for the given name. + /// + /// The name given should correspond to the lowercase version of the + /// variant name. e.g., `cntrl` is the name for `ClassAsciiKind::Cntrl`. + /// + /// If no variant with the corresponding name exists, then `None` is + /// returned. + pub fn from_name(name: &str) -> Option { + use self::ClassAsciiKind::*; + match name { + "alnum" => Some(Alnum), + "alpha" => Some(Alpha), + "ascii" => Some(Ascii), + "blank" => Some(Blank), + "cntrl" => Some(Cntrl), + "digit" => Some(Digit), + "graph" => Some(Graph), + "lower" => Some(Lower), + "print" => Some(Print), + "punct" => Some(Punct), + "space" => Some(Space), + "upper" => Some(Upper), + "word" => Some(Word), + "xdigit" => Some(Xdigit), + _ => None, + } + } +} + +/// A Unicode character class. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct ClassUnicode { + /// The span of this class. + pub span: Span, + /// Whether this class is negated or not. + /// + /// Note: be careful when using this attribute. This specifically refers + /// to whether the class is written as `\p` or `\P`, where the latter + /// is `negated = true`. However, it also possible to write something like + /// `\P{scx!=Katakana}` which is actually equivalent to + /// `\p{scx=Katakana}` and is therefore not actually negated even though + /// `negated = true` here. To test whether this class is truly negated + /// or not, use the `is_negated` method. + pub negated: bool, + /// The kind of Unicode class. + pub kind: ClassUnicodeKind, +} + +impl ClassUnicode { + /// Returns true if this class has been negated. + /// + /// Note that this takes the Unicode op into account, if it's present. + /// e.g., `is_negated` for `\P{scx!=Katakana}` will return `false`. + pub fn is_negated(&self) -> bool { + match self.kind { + ClassUnicodeKind::NamedValue { + op: ClassUnicodeOpKind::NotEqual, + .. + } => !self.negated, + _ => self.negated, + } + } +} + +/// The available forms of Unicode character classes. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClassUnicodeKind { + /// A one letter abbreviated class, e.g., `\pN`. + OneLetter(char), + /// A binary property, general category or script. The string may be + /// empty. + Named(String), + /// A property name and an associated value. + NamedValue { + /// The type of Unicode op used to associate `name` with `value`. + op: ClassUnicodeOpKind, + /// The property name (which may be empty). + name: String, + /// The property value (which may be empty). + value: String, + }, +} + +#[cfg(feature = "arbitrary")] +impl arbitrary::Arbitrary<'_> for ClassUnicodeKind { + fn arbitrary( + u: &mut arbitrary::Unstructured, + ) -> arbitrary::Result { + #[cfg(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + ))] + { + use alloc::string::ToString; + + use super::unicode_tables::{ + property_names::PROPERTY_NAMES, + property_values::PROPERTY_VALUES, + }; + + match u.choose_index(3)? { + 0 => { + let all = PROPERTY_VALUES + .iter() + .flat_map(|e| e.1.iter()) + .filter(|(name, _)| name.len() == 1) + .count(); + let idx = u.choose_index(all)?; + let value = PROPERTY_VALUES + .iter() + .flat_map(|e| e.1.iter()) + .take(idx + 1) + .last() + .unwrap() + .0 + .chars() + .next() + .unwrap(); + Ok(ClassUnicodeKind::OneLetter(value)) + } + 1 => { + let all = PROPERTY_VALUES + .iter() + .map(|e| e.1.len()) + .sum::() + + PROPERTY_NAMES.len(); + let idx = u.choose_index(all)?; + let name = PROPERTY_VALUES + .iter() + .flat_map(|e| e.1.iter()) + .chain(PROPERTY_NAMES) + .map(|(_, e)| e) + .take(idx + 1) + .last() + .unwrap(); + Ok(ClassUnicodeKind::Named(name.to_string())) + } + 2 => { + let all = PROPERTY_VALUES + .iter() + .map(|e| e.1.len()) + .sum::(); + let idx = u.choose_index(all)?; + let (prop, value) = PROPERTY_VALUES + .iter() + .flat_map(|e| { + e.1.iter().map(|(_, value)| (e.0, value)) + }) + .take(idx + 1) + .last() + .unwrap(); + Ok(ClassUnicodeKind::NamedValue { + op: u.arbitrary()?, + name: prop.to_string(), + value: value.to_string(), + }) + } + _ => unreachable!("index chosen is impossible"), + } + } + #[cfg(not(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + )))] + { + match u.choose_index(3)? { + 0 => Ok(ClassUnicodeKind::OneLetter(u.arbitrary()?)), + 1 => Ok(ClassUnicodeKind::Named(u.arbitrary()?)), + 2 => Ok(ClassUnicodeKind::NamedValue { + op: u.arbitrary()?, + name: u.arbitrary()?, + value: u.arbitrary()?, + }), + _ => unreachable!("index chosen is impossible"), + } + } + } + + fn size_hint(depth: usize) -> (usize, Option) { + #[cfg(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + ))] + { + arbitrary::size_hint::and_all(&[ + usize::size_hint(depth), + usize::size_hint(depth), + arbitrary::size_hint::or( + (0, Some(0)), + ClassUnicodeOpKind::size_hint(depth), + ), + ]) + } + #[cfg(not(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + )))] + { + arbitrary::size_hint::and( + usize::size_hint(depth), + arbitrary::size_hint::or_all(&[ + char::size_hint(depth), + String::size_hint(depth), + arbitrary::size_hint::and_all(&[ + String::size_hint(depth), + String::size_hint(depth), + ClassUnicodeOpKind::size_hint(depth), + ]), + ]), + ) + } + } +} + +/// The type of op used in a Unicode character class. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ClassUnicodeOpKind { + /// A property set to a specific value, e.g., `\p{scx=Katakana}`. + Equal, + /// A property set to a specific value using a colon, e.g., + /// `\p{scx:Katakana}`. + Colon, + /// A property that isn't a particular value, e.g., `\p{scx!=Katakana}`. + NotEqual, +} + +impl ClassUnicodeOpKind { + /// Whether the op is an equality op or not. + pub fn is_equal(&self) -> bool { + match *self { + ClassUnicodeOpKind::Equal | ClassUnicodeOpKind::Colon => true, + _ => false, + } + } +} + +/// A bracketed character class, e.g., `[a-z0-9]`. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct ClassBracketed { + /// The span of this class. + pub span: Span, + /// Whether this class is negated or not. e.g., `[a]` is not negated but + /// `[^a]` is. + pub negated: bool, + /// The type of this set. A set is either a normal union of things, e.g., + /// `[abc]` or a result of applying set operations, e.g., `[\pL--c]`. + pub kind: ClassSet, +} + +/// A character class set. +/// +/// This type corresponds to the internal structure of a bracketed character +/// class. That is, every bracketed character is one of two types: a union of +/// items (literals, ranges, other bracketed classes) or a tree of binary set +/// operations. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ClassSet { + /// An item, which can be a single literal, range, nested character class + /// or a union of items. + Item(ClassSetItem), + /// A single binary operation (i.e., &&, -- or ~~). + BinaryOp(ClassSetBinaryOp), +} + +impl ClassSet { + /// Build a set from a union. + pub fn union(ast: ClassSetUnion) -> ClassSet { + ClassSet::Item(ClassSetItem::Union(ast)) + } + + /// Return the span of this character class set. + pub fn span(&self) -> &Span { + match *self { + ClassSet::Item(ref x) => x.span(), + ClassSet::BinaryOp(ref x) => &x.span, + } + } + + /// Return true if and only if this class set is empty. + fn is_empty(&self) -> bool { + match *self { + ClassSet::Item(ClassSetItem::Empty(_)) => true, + _ => false, + } + } +} + +/// A single component of a character class set. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ClassSetItem { + /// An empty item. + /// + /// Note that a bracketed character class cannot contain a single empty + /// item. Empty items can appear when using one of the binary operators. + /// For example, `[&&]` is the intersection of two empty classes. + Empty(Span), + /// A single literal. + Literal(Literal), + /// A range between two literals. + Range(ClassSetRange), + /// An ASCII character class, e.g., `[:alnum:]` or `[:punct:]`. + Ascii(ClassAscii), + /// A Unicode character class, e.g., `\pL` or `\p{Greek}`. + Unicode(ClassUnicode), + /// A perl character class, e.g., `\d` or `\W`. + Perl(ClassPerl), + /// A bracketed character class set, which may contain zero or more + /// character ranges and/or zero or more nested classes. e.g., + /// `[a-zA-Z\pL]`. + Bracketed(Box), + /// A union of items. + Union(ClassSetUnion), +} + +impl ClassSetItem { + /// Return the span of this character class set item. + pub fn span(&self) -> &Span { + match *self { + ClassSetItem::Empty(ref span) => span, + ClassSetItem::Literal(ref x) => &x.span, + ClassSetItem::Range(ref x) => &x.span, + ClassSetItem::Ascii(ref x) => &x.span, + ClassSetItem::Perl(ref x) => &x.span, + ClassSetItem::Unicode(ref x) => &x.span, + ClassSetItem::Bracketed(ref x) => &x.span, + ClassSetItem::Union(ref x) => &x.span, + } + } +} + +/// A single character class range in a set. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct ClassSetRange { + /// The span of this range. + pub span: Span, + /// The start of this range. + pub start: Literal, + /// The end of this range. + pub end: Literal, +} + +impl ClassSetRange { + /// Returns true if and only if this character class range is valid. + /// + /// The only case where a range is invalid is if its start is greater than + /// its end. + pub fn is_valid(&self) -> bool { + self.start.c <= self.end.c + } +} + +/// A union of items inside a character class set. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct ClassSetUnion { + /// The span of the items in this operation. e.g., the `a-z0-9` in + /// `[^a-z0-9]` + pub span: Span, + /// The sequence of items that make up this union. + pub items: Vec, +} + +impl ClassSetUnion { + /// Push a new item in this union. + /// + /// The ending position of this union's span is updated to the ending + /// position of the span of the item given. If the union is empty, then + /// the starting position of this union is set to the starting position + /// of this item. + /// + /// In other words, if you only use this method to add items to a union + /// and you set the spans on each item correctly, then you should never + /// need to adjust the span of the union directly. + pub fn push(&mut self, item: ClassSetItem) { + if self.items.is_empty() { + self.span.start = item.span().start; + } + self.span.end = item.span().end; + self.items.push(item); + } + + /// Return this union as a character class set item. + /// + /// If this union contains zero items, then an empty union is + /// returned. If this concatenation contains exactly 1 item, then the + /// corresponding item is returned. Otherwise, ClassSetItem::Union is + /// returned. + pub fn into_item(mut self) -> ClassSetItem { + match self.items.len() { + 0 => ClassSetItem::Empty(self.span), + 1 => self.items.pop().unwrap(), + _ => ClassSetItem::Union(self), + } + } +} + +/// A Unicode character class set operation. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct ClassSetBinaryOp { + /// The span of this operation. e.g., the `a-z--[h-p]` in `[a-z--h-p]`. + pub span: Span, + /// The type of this set operation. + pub kind: ClassSetBinaryOpKind, + /// The left hand side of the operation. + pub lhs: Box, + /// The right hand side of the operation. + pub rhs: Box, +} + +/// The type of a Unicode character class set operation. +/// +/// Note that this doesn't explicitly represent union since there is no +/// explicit union operator. Concatenation inside a character class corresponds +/// to the union operation. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ClassSetBinaryOpKind { + /// The intersection of two sets, e.g., `\pN&&[a-z]`. + Intersection, + /// The difference of two sets, e.g., `\pN--[0-9]`. + Difference, + /// The symmetric difference of two sets. The symmetric difference is the + /// set of elements belonging to one but not both sets. + /// e.g., `[\pL~~[:ascii:]]`. + SymmetricDifference, +} + +/// A single zero-width assertion. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Assertion { + /// The span of this assertion. + pub span: Span, + /// The assertion kind, e.g., `\b` or `^`. + pub kind: AssertionKind, +} + +/// An assertion kind. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum AssertionKind { + /// `^` + StartLine, + /// `$` + EndLine, + /// `\A` + StartText, + /// `\z` + EndText, + /// `\b` + WordBoundary, + /// `\B` + NotWordBoundary, + /// `\b{start}` + WordBoundaryStart, + /// `\b{end}` + WordBoundaryEnd, + /// `\<` (alias for `\b{start}`) + WordBoundaryStartAngle, + /// `\>` (alias for `\b{end}`) + WordBoundaryEndAngle, + /// `\b{start-half}` + WordBoundaryStartHalf, + /// `\b{end-half}` + WordBoundaryEndHalf, +} + +/// A repetition operation applied to a regular expression. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Repetition { + /// The span of this operation. + pub span: Span, + /// The actual operation. + pub op: RepetitionOp, + /// Whether this operation was applied greedily or not. + pub greedy: bool, + /// The regular expression under repetition. + pub ast: Box, +} + +/// The repetition operator itself. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct RepetitionOp { + /// The span of this operator. This includes things like `+`, `*?` and + /// `{m,n}`. + pub span: Span, + /// The type of operation. + pub kind: RepetitionKind, +} + +/// The kind of a repetition operator. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum RepetitionKind { + /// `?` + ZeroOrOne, + /// `*` + ZeroOrMore, + /// `+` + OneOrMore, + /// `{m,n}` + Range(RepetitionRange), +} + +/// A range repetition operator. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum RepetitionRange { + /// `{m}` + Exactly(u32), + /// `{m,}` + AtLeast(u32), + /// `{m,n}` + Bounded(u32, u32), +} + +impl RepetitionRange { + /// Returns true if and only if this repetition range is valid. + /// + /// The only case where a repetition range is invalid is if it is bounded + /// and its start is greater than its end. + pub fn is_valid(&self) -> bool { + match *self { + RepetitionRange::Bounded(s, e) if s > e => false, + _ => true, + } + } +} + +/// A grouped regular expression. +/// +/// This includes both capturing and non-capturing groups. This does **not** +/// include flag-only groups like `(?is)`, but does contain any group that +/// contains a sub-expression, e.g., `(a)`, `(?Pa)`, `(?:a)` and +/// `(?is:a)`. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Group { + /// The span of this group. + pub span: Span, + /// The kind of this group. + pub kind: GroupKind, + /// The regular expression in this group. + pub ast: Box, +} + +impl Group { + /// If this group is non-capturing, then this returns the (possibly empty) + /// set of flags. Otherwise, `None` is returned. + pub fn flags(&self) -> Option<&Flags> { + match self.kind { + GroupKind::NonCapturing(ref flags) => Some(flags), + _ => None, + } + } + + /// Returns true if and only if this group is capturing. + pub fn is_capturing(&self) -> bool { + match self.kind { + GroupKind::CaptureIndex(_) | GroupKind::CaptureName { .. } => true, + GroupKind::NonCapturing(_) => false, + } + } + + /// Returns the capture index of this group, if this is a capturing group. + /// + /// This returns a capture index precisely when `is_capturing` is `true`. + pub fn capture_index(&self) -> Option { + match self.kind { + GroupKind::CaptureIndex(i) => Some(i), + GroupKind::CaptureName { ref name, .. } => Some(name.index), + GroupKind::NonCapturing(_) => None, + } + } +} + +/// The kind of a group. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum GroupKind { + /// `(a)` + CaptureIndex(u32), + /// `(?a)` or `(?Pa)` + CaptureName { + /// True if the `?P<` syntax is used and false if the `?<` syntax is used. + starts_with_p: bool, + /// The capture name. + name: CaptureName, + }, + /// `(?:a)` and `(?i:a)` + NonCapturing(Flags), +} + +/// A capture name. +/// +/// This corresponds to the name itself between the angle brackets in, e.g., +/// `(?Pexpr)`. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct CaptureName { + /// The span of this capture name. + pub span: Span, + /// The capture name. + pub name: String, + /// The capture index. + pub index: u32, +} + +#[cfg(feature = "arbitrary")] +impl arbitrary::Arbitrary<'_> for CaptureName { + fn arbitrary( + u: &mut arbitrary::Unstructured, + ) -> arbitrary::Result { + let len = u.arbitrary_len::()?; + if len == 0 { + return Err(arbitrary::Error::NotEnoughData); + } + let mut name: String = String::new(); + for _ in 0..len { + let ch: char = u.arbitrary()?; + let cp = u32::from(ch); + let ascii_letter_offset = u8::try_from(cp % 26).unwrap(); + let ascii_letter = b'a' + ascii_letter_offset; + name.push(char::from(ascii_letter)); + } + Ok(CaptureName { span: u.arbitrary()?, name, index: u.arbitrary()? }) + } + + fn size_hint(depth: usize) -> (usize, Option) { + arbitrary::size_hint::and_all(&[ + Span::size_hint(depth), + usize::size_hint(depth), + u32::size_hint(depth), + ]) + } +} + +/// A group of flags that is not applied to a particular regular expression. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct SetFlags { + /// The span of these flags, including the grouping parentheses. + pub span: Span, + /// The actual sequence of flags. + pub flags: Flags, +} + +/// A group of flags. +/// +/// This corresponds only to the sequence of flags themselves, e.g., `is-u`. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Flags { + /// The span of this group of flags. + pub span: Span, + /// A sequence of flag items. Each item is either a flag or a negation + /// operator. + pub items: Vec, +} + +impl Flags { + /// Add the given item to this sequence of flags. + /// + /// If the item was added successfully, then `None` is returned. If the + /// given item is a duplicate, then `Some(i)` is returned, where + /// `items[i].kind == item.kind`. + pub fn add_item(&mut self, item: FlagsItem) -> Option { + for (i, x) in self.items.iter().enumerate() { + if x.kind == item.kind { + return Some(i); + } + } + self.items.push(item); + None + } + + /// Returns the state of the given flag in this set. + /// + /// If the given flag is in the set but is negated, then `Some(false)` is + /// returned. + /// + /// If the given flag is in the set and is not negated, then `Some(true)` + /// is returned. + /// + /// Otherwise, `None` is returned. + pub fn flag_state(&self, flag: Flag) -> Option { + let mut negated = false; + for x in &self.items { + match x.kind { + FlagsItemKind::Negation => { + negated = true; + } + FlagsItemKind::Flag(ref xflag) if xflag == &flag => { + return Some(!negated); + } + _ => {} + } + } + None + } +} + +/// A single item in a group of flags. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct FlagsItem { + /// The span of this item. + pub span: Span, + /// The kind of this item. + pub kind: FlagsItemKind, +} + +/// The kind of an item in a group of flags. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum FlagsItemKind { + /// A negation operator applied to all subsequent flags in the enclosing + /// group. + Negation, + /// A single flag in a group. + Flag(Flag), +} + +impl FlagsItemKind { + /// Returns true if and only if this item is a negation operator. + pub fn is_negation(&self) -> bool { + match *self { + FlagsItemKind::Negation => true, + _ => false, + } + } +} + +/// A single flag. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum Flag { + /// `i` + CaseInsensitive, + /// `m` + MultiLine, + /// `s` + DotMatchesNewLine, + /// `U` + SwapGreed, + /// `u` + Unicode, + /// `R` + CRLF, + /// `x` + IgnoreWhitespace, +} + +/// A custom `Drop` impl is used for `Ast` such that it uses constant stack +/// space but heap space proportional to the depth of the `Ast`. +impl Drop for Ast { + fn drop(&mut self) { + use core::mem; + + match *self { + Ast::Empty(_) + | Ast::Flags(_) + | Ast::Literal(_) + | Ast::Dot(_) + | Ast::Assertion(_) + | Ast::ClassUnicode(_) + | Ast::ClassPerl(_) + // Bracketed classes are recursive, they get their own Drop impl. + | Ast::ClassBracketed(_) => return, + Ast::Repetition(ref x) if !x.ast.has_subexprs() => return, + Ast::Group(ref x) if !x.ast.has_subexprs() => return, + Ast::Alternation(ref x) if x.asts.is_empty() => return, + Ast::Concat(ref x) if x.asts.is_empty() => return, + _ => {} + } + + let empty_span = || Span::splat(Position::new(0, 0, 0)); + let empty_ast = || Ast::empty(empty_span()); + let mut stack = vec![mem::replace(self, empty_ast())]; + while let Some(mut ast) = stack.pop() { + match ast { + Ast::Empty(_) + | Ast::Flags(_) + | Ast::Literal(_) + | Ast::Dot(_) + | Ast::Assertion(_) + | Ast::ClassUnicode(_) + | Ast::ClassPerl(_) + // Bracketed classes are recursive, so they get their own Drop + // impl. + | Ast::ClassBracketed(_) => {} + Ast::Repetition(ref mut x) => { + stack.push(mem::replace(&mut x.ast, empty_ast())); + } + Ast::Group(ref mut x) => { + stack.push(mem::replace(&mut x.ast, empty_ast())); + } + Ast::Alternation(ref mut x) => { + stack.extend(x.asts.drain(..)); + } + Ast::Concat(ref mut x) => { + stack.extend(x.asts.drain(..)); + } + } + } + } +} + +/// A custom `Drop` impl is used for `ClassSet` such that it uses constant +/// stack space but heap space proportional to the depth of the `ClassSet`. +impl Drop for ClassSet { + fn drop(&mut self) { + use core::mem; + + match *self { + ClassSet::Item(ref item) => match *item { + ClassSetItem::Empty(_) + | ClassSetItem::Literal(_) + | ClassSetItem::Range(_) + | ClassSetItem::Ascii(_) + | ClassSetItem::Unicode(_) + | ClassSetItem::Perl(_) => return, + ClassSetItem::Bracketed(ref x) => { + if x.kind.is_empty() { + return; + } + } + ClassSetItem::Union(ref x) => { + if x.items.is_empty() { + return; + } + } + }, + ClassSet::BinaryOp(ref op) => { + if op.lhs.is_empty() && op.rhs.is_empty() { + return; + } + } + } + + let empty_span = || Span::splat(Position::new(0, 0, 0)); + let empty_set = || ClassSet::Item(ClassSetItem::Empty(empty_span())); + let mut stack = vec![mem::replace(self, empty_set())]; + while let Some(mut set) = stack.pop() { + match set { + ClassSet::Item(ref mut item) => match *item { + ClassSetItem::Empty(_) + | ClassSetItem::Literal(_) + | ClassSetItem::Range(_) + | ClassSetItem::Ascii(_) + | ClassSetItem::Unicode(_) + | ClassSetItem::Perl(_) => {} + ClassSetItem::Bracketed(ref mut x) => { + stack.push(mem::replace(&mut x.kind, empty_set())); + } + ClassSetItem::Union(ref mut x) => { + stack.extend(x.items.drain(..).map(ClassSet::Item)); + } + }, + ClassSet::BinaryOp(ref mut op) => { + stack.push(mem::replace(&mut op.lhs, empty_set())); + stack.push(mem::replace(&mut op.rhs, empty_set())); + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // We use a thread with an explicit stack size to test that our destructor + // for Ast can handle arbitrarily sized expressions in constant stack + // space. In case we run on a platform without threads (WASM?), we limit + // this test to Windows/Unix. + #[test] + #[cfg(any(unix, windows))] + fn no_stack_overflow_on_drop() { + use std::thread; + + let run = || { + let span = || Span::splat(Position::new(0, 0, 0)); + let mut ast = Ast::empty(span()); + for i in 0..200 { + ast = Ast::group(Group { + span: span(), + kind: GroupKind::CaptureIndex(i), + ast: Box::new(ast), + }); + } + assert!(!ast.is_empty()); + }; + + // We run our test on a thread with a small stack size so we can + // force the issue more easily. + // + // NOTE(2023-03-21): It turns out that some platforms (like FreeBSD) + // will just barf with very small stack sizes. So we bump this up a bit + // to give more room to breath. When I did this, I confirmed that if + // I remove the custom `Drop` impl for `Ast`, then this test does + // indeed still fail with a stack overflow. (At the time of writing, I + // had to bump it all the way up to 32K before the test would pass even + // without the custom `Drop` impl. So 16K seems like a safe number + // here.) + // + // See: https://github.com/rust-lang/regex/issues/967 + thread::Builder::new() + .stack_size(16 << 10) + .spawn(run) + .unwrap() + .join() + .unwrap(); + } + + // This tests that our `Ast` has a reasonable size. This isn't a hard rule + // and it can be increased if given a good enough reason. But this test + // exists because the size of `Ast` was at one point over 200 bytes on a + // 64-bit target. Wow. + #[test] + fn ast_size() { + let max = 2 * core::mem::size_of::(); + let size = core::mem::size_of::(); + assert!( + size <= max, + "Ast size of {size} bytes is bigger than suggested max {max}", + ); + } +} diff --git a/vendor/regex-syntax/src/ast/parse.rs b/vendor/regex-syntax/src/ast/parse.rs new file mode 100644 index 00000000000000..bdaab72283857a --- /dev/null +++ b/vendor/regex-syntax/src/ast/parse.rs @@ -0,0 +1,6377 @@ +/*! +This module provides a regular expression parser. +*/ + +use core::{ + borrow::Borrow, + cell::{Cell, RefCell}, + mem, +}; + +use alloc::{ + boxed::Box, + string::{String, ToString}, + vec, + vec::Vec, +}; + +use crate::{ + ast::{self, Ast, Position, Span}, + either::Either, + is_escapeable_character, is_meta_character, +}; + +type Result = core::result::Result; + +/// A primitive is an expression with no sub-expressions. This includes +/// literals, assertions and non-set character classes. This representation +/// is used as intermediate state in the parser. +/// +/// This does not include ASCII character classes, since they can only appear +/// within a set character class. +#[derive(Clone, Debug, Eq, PartialEq)] +enum Primitive { + Literal(ast::Literal), + Assertion(ast::Assertion), + Dot(Span), + Perl(ast::ClassPerl), + Unicode(ast::ClassUnicode), +} + +impl Primitive { + /// Return the span of this primitive. + fn span(&self) -> &Span { + match *self { + Primitive::Literal(ref x) => &x.span, + Primitive::Assertion(ref x) => &x.span, + Primitive::Dot(ref span) => span, + Primitive::Perl(ref x) => &x.span, + Primitive::Unicode(ref x) => &x.span, + } + } + + /// Convert this primitive into a proper AST. + fn into_ast(self) -> Ast { + match self { + Primitive::Literal(lit) => Ast::literal(lit), + Primitive::Assertion(assert) => Ast::assertion(assert), + Primitive::Dot(span) => Ast::dot(span), + Primitive::Perl(cls) => Ast::class_perl(cls), + Primitive::Unicode(cls) => Ast::class_unicode(cls), + } + } + + /// Convert this primitive into an item in a character class. + /// + /// If this primitive is not a legal item (i.e., an assertion or a dot), + /// then return an error. + fn into_class_set_item>( + self, + p: &ParserI<'_, P>, + ) -> Result { + use self::Primitive::*; + use crate::ast::ClassSetItem; + + match self { + Literal(lit) => Ok(ClassSetItem::Literal(lit)), + Perl(cls) => Ok(ClassSetItem::Perl(cls)), + Unicode(cls) => Ok(ClassSetItem::Unicode(cls)), + x => Err(p.error(*x.span(), ast::ErrorKind::ClassEscapeInvalid)), + } + } + + /// Convert this primitive into a literal in a character class. In + /// particular, literals are the only valid items that can appear in + /// ranges. + /// + /// If this primitive is not a legal item (i.e., a class, assertion or a + /// dot), then return an error. + fn into_class_literal>( + self, + p: &ParserI<'_, P>, + ) -> Result { + use self::Primitive::*; + + match self { + Literal(lit) => Ok(lit), + x => Err(p.error(*x.span(), ast::ErrorKind::ClassRangeLiteral)), + } + } +} + +/// Returns true if the given character is a hexadecimal digit. +fn is_hex(c: char) -> bool { + ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F') +} + +/// Returns true if the given character is a valid in a capture group name. +/// +/// If `first` is true, then `c` is treated as the first character in the +/// group name (which must be alphabetic or underscore). +fn is_capture_char(c: char, first: bool) -> bool { + if first { + c == '_' || c.is_alphabetic() + } else { + c == '_' || c == '.' || c == '[' || c == ']' || c.is_alphanumeric() + } +} + +/// A builder for a regular expression parser. +/// +/// This builder permits modifying configuration options for the parser. +#[derive(Clone, Debug)] +pub struct ParserBuilder { + ignore_whitespace: bool, + nest_limit: u32, + octal: bool, + empty_min_range: bool, +} + +impl Default for ParserBuilder { + fn default() -> ParserBuilder { + ParserBuilder::new() + } +} + +impl ParserBuilder { + /// Create a new parser builder with a default configuration. + pub fn new() -> ParserBuilder { + ParserBuilder { + ignore_whitespace: false, + nest_limit: 250, + octal: false, + empty_min_range: false, + } + } + + /// Build a parser from this configuration with the given pattern. + pub fn build(&self) -> Parser { + Parser { + pos: Cell::new(Position { offset: 0, line: 1, column: 1 }), + capture_index: Cell::new(0), + nest_limit: self.nest_limit, + octal: self.octal, + empty_min_range: self.empty_min_range, + initial_ignore_whitespace: self.ignore_whitespace, + ignore_whitespace: Cell::new(self.ignore_whitespace), + comments: RefCell::new(vec![]), + stack_group: RefCell::new(vec![]), + stack_class: RefCell::new(vec![]), + capture_names: RefCell::new(vec![]), + scratch: RefCell::new(String::new()), + } + } + + /// Set the nesting limit for this parser. + /// + /// The nesting limit controls how deep the abstract syntax tree is allowed + /// to be. If the AST exceeds the given limit (e.g., with too many nested + /// groups), then an error is returned by the parser. + /// + /// The purpose of this limit is to act as a heuristic to prevent stack + /// overflow for consumers that do structural induction on an `Ast` using + /// explicit recursion. While this crate never does this (instead using + /// constant stack space and moving the call stack to the heap), other + /// crates may. + /// + /// This limit is not checked until the entire AST is parsed. Therefore, + /// if callers want to put a limit on the amount of heap space used, then + /// they should impose a limit on the length, in bytes, of the concrete + /// pattern string. In particular, this is viable since this parser + /// implementation will limit itself to heap space proportional to the + /// length of the pattern string. + /// + /// Note that a nest limit of `0` will return a nest limit error for most + /// patterns but not all. For example, a nest limit of `0` permits `a` but + /// not `ab`, since `ab` requires a concatenation, which results in a nest + /// depth of `1`. In general, a nest limit is not something that manifests + /// in an obvious way in the concrete syntax, therefore, it should not be + /// used in a granular way. + pub fn nest_limit(&mut self, limit: u32) -> &mut ParserBuilder { + self.nest_limit = limit; + self + } + + /// Whether to support octal syntax or not. + /// + /// Octal syntax is a little-known way of uttering Unicode codepoints in + /// a regular expression. For example, `a`, `\x61`, `\u0061` and + /// `\141` are all equivalent regular expressions, where the last example + /// shows octal syntax. + /// + /// While supporting octal syntax isn't in and of itself a problem, it does + /// make good error messages harder. That is, in PCRE based regex engines, + /// syntax like `\0` invokes a backreference, which is explicitly + /// unsupported in Rust's regex engine. However, many users expect it to + /// be supported. Therefore, when octal support is disabled, the error + /// message will explicitly mention that backreferences aren't supported. + /// + /// Octal syntax is disabled by default. + pub fn octal(&mut self, yes: bool) -> &mut ParserBuilder { + self.octal = yes; + self + } + + /// Enable verbose mode in the regular expression. + /// + /// When enabled, verbose mode permits insignificant whitespace in many + /// places in the regular expression, as well as comments. Comments are + /// started using `#` and continue until the end of the line. + /// + /// By default, this is disabled. It may be selectively enabled in the + /// regular expression by using the `x` flag regardless of this setting. + pub fn ignore_whitespace(&mut self, yes: bool) -> &mut ParserBuilder { + self.ignore_whitespace = yes; + self + } + + /// Allow using `{,n}` as an equivalent to `{0,n}`. + /// + /// When enabled, the parser accepts `{,n}` as valid syntax for `{0,n}`. + /// Most regular expression engines don't support the `{,n}` syntax, but + /// some others do it, namely Python's `re` library. + /// + /// This is disabled by default. + pub fn empty_min_range(&mut self, yes: bool) -> &mut ParserBuilder { + self.empty_min_range = yes; + self + } +} + +/// A regular expression parser. +/// +/// This parses a string representation of a regular expression into an +/// abstract syntax tree. The size of the tree is proportional to the length +/// of the regular expression pattern. +/// +/// A `Parser` can be configured in more detail via a [`ParserBuilder`]. +#[derive(Clone, Debug)] +pub struct Parser { + /// The current position of the parser. + pos: Cell, + /// The current capture index. + capture_index: Cell, + /// The maximum number of open parens/brackets allowed. If the parser + /// exceeds this number, then an error is returned. + nest_limit: u32, + /// Whether to support octal syntax or not. When `false`, the parser will + /// return an error helpfully pointing out that backreferences are not + /// supported. + octal: bool, + /// The initial setting for `ignore_whitespace` as provided by + /// `ParserBuilder`. It is used when resetting the parser's state. + initial_ignore_whitespace: bool, + /// Whether the parser supports `{,n}` repetitions as an equivalent to + /// `{0,n}.` + empty_min_range: bool, + /// Whether whitespace should be ignored. When enabled, comments are + /// also permitted. + ignore_whitespace: Cell, + /// A list of comments, in order of appearance. + comments: RefCell>, + /// A stack of grouped sub-expressions, including alternations. + stack_group: RefCell>, + /// A stack of nested character classes. This is only non-empty when + /// parsing a class. + stack_class: RefCell>, + /// A sorted sequence of capture names. This is used to detect duplicate + /// capture names and report an error if one is detected. + capture_names: RefCell>, + /// A scratch buffer used in various places. Mostly this is used to + /// accumulate relevant characters from parts of a pattern. + scratch: RefCell, +} + +/// ParserI is the internal parser implementation. +/// +/// We use this separate type so that we can carry the provided pattern string +/// along with us. In particular, a `Parser` internal state is not tied to any +/// one pattern, but `ParserI` is. +/// +/// This type also lets us use `ParserI<&Parser>` in production code while +/// retaining the convenience of `ParserI` for tests, which sometimes +/// work against the internal interface of the parser. +#[derive(Clone, Debug)] +struct ParserI<'s, P> { + /// The parser state/configuration. + parser: P, + /// The full regular expression provided by the user. + pattern: &'s str, +} + +/// GroupState represents a single stack frame while parsing nested groups +/// and alternations. Each frame records the state up to an opening parenthesis +/// or a alternating bracket `|`. +#[derive(Clone, Debug)] +enum GroupState { + /// This state is pushed whenever an opening group is found. + Group { + /// The concatenation immediately preceding the opening group. + concat: ast::Concat, + /// The group that has been opened. Its sub-AST is always empty. + group: ast::Group, + /// Whether this group has the `x` flag enabled or not. + ignore_whitespace: bool, + }, + /// This state is pushed whenever a new alternation branch is found. If + /// an alternation branch is found and this state is at the top of the + /// stack, then this state should be modified to include the new + /// alternation. + Alternation(ast::Alternation), +} + +/// ClassState represents a single stack frame while parsing character classes. +/// Each frame records the state up to an intersection, difference, symmetric +/// difference or nested class. +/// +/// Note that a parser's character class stack is only non-empty when parsing +/// a character class. In all other cases, it is empty. +#[derive(Clone, Debug)] +enum ClassState { + /// This state is pushed whenever an opening bracket is found. + Open { + /// The union of class items immediately preceding this class. + union: ast::ClassSetUnion, + /// The class that has been opened. Typically this just corresponds + /// to the `[`, but it can also include `[^` since `^` indicates + /// negation of the class. + set: ast::ClassBracketed, + }, + /// This state is pushed when a operator is seen. When popped, the stored + /// set becomes the left hand side of the operator. + Op { + /// The type of the operation, i.e., &&, -- or ~~. + kind: ast::ClassSetBinaryOpKind, + /// The left-hand side of the operator. + lhs: ast::ClassSet, + }, +} + +impl Parser { + /// Create a new parser with a default configuration. + /// + /// The parser can be run with either the `parse` or `parse_with_comments` + /// methods. The parse methods return an abstract syntax tree. + /// + /// To set configuration options on the parser, use [`ParserBuilder`]. + pub fn new() -> Parser { + ParserBuilder::new().build() + } + + /// Parse the regular expression into an abstract syntax tree. + pub fn parse(&mut self, pattern: &str) -> Result { + ParserI::new(self, pattern).parse() + } + + /// Parse the regular expression and return an abstract syntax tree with + /// all of the comments found in the pattern. + pub fn parse_with_comments( + &mut self, + pattern: &str, + ) -> Result { + ParserI::new(self, pattern).parse_with_comments() + } + + /// Reset the internal state of a parser. + /// + /// This is called at the beginning of every parse. This prevents the + /// parser from running with inconsistent state (say, if a previous + /// invocation returned an error and the parser is reused). + fn reset(&self) { + // These settings should be in line with the construction + // in `ParserBuilder::build`. + self.pos.set(Position { offset: 0, line: 1, column: 1 }); + self.ignore_whitespace.set(self.initial_ignore_whitespace); + self.comments.borrow_mut().clear(); + self.stack_group.borrow_mut().clear(); + self.stack_class.borrow_mut().clear(); + } +} + +impl<'s, P: Borrow> ParserI<'s, P> { + /// Build an internal parser from a parser configuration and a pattern. + fn new(parser: P, pattern: &'s str) -> ParserI<'s, P> { + ParserI { parser, pattern } + } + + /// Return a reference to the parser state. + fn parser(&self) -> &Parser { + self.parser.borrow() + } + + /// Return a reference to the pattern being parsed. + fn pattern(&self) -> &str { + self.pattern + } + + /// Create a new error with the given span and error type. + fn error(&self, span: Span, kind: ast::ErrorKind) -> ast::Error { + ast::Error { kind, pattern: self.pattern().to_string(), span } + } + + /// Return the current offset of the parser. + /// + /// The offset starts at `0` from the beginning of the regular expression + /// pattern string. + fn offset(&self) -> usize { + self.parser().pos.get().offset + } + + /// Return the current line number of the parser. + /// + /// The line number starts at `1`. + fn line(&self) -> usize { + self.parser().pos.get().line + } + + /// Return the current column of the parser. + /// + /// The column number starts at `1` and is reset whenever a `\n` is seen. + fn column(&self) -> usize { + self.parser().pos.get().column + } + + /// Return the next capturing index. Each subsequent call increments the + /// internal index. + /// + /// The span given should correspond to the location of the opening + /// parenthesis. + /// + /// If the capture limit is exceeded, then an error is returned. + fn next_capture_index(&self, span: Span) -> Result { + let current = self.parser().capture_index.get(); + let i = current.checked_add(1).ok_or_else(|| { + self.error(span, ast::ErrorKind::CaptureLimitExceeded) + })?; + self.parser().capture_index.set(i); + Ok(i) + } + + /// Adds the given capture name to this parser. If this capture name has + /// already been used, then an error is returned. + fn add_capture_name(&self, cap: &ast::CaptureName) -> Result<()> { + let mut names = self.parser().capture_names.borrow_mut(); + match names + .binary_search_by_key(&cap.name.as_str(), |c| c.name.as_str()) + { + Err(i) => { + names.insert(i, cap.clone()); + Ok(()) + } + Ok(i) => Err(self.error( + cap.span, + ast::ErrorKind::GroupNameDuplicate { original: names[i].span }, + )), + } + } + + /// Return whether the parser should ignore whitespace or not. + fn ignore_whitespace(&self) -> bool { + self.parser().ignore_whitespace.get() + } + + /// Return the character at the current position of the parser. + /// + /// This panics if the current position does not point to a valid char. + fn char(&self) -> char { + self.char_at(self.offset()) + } + + /// Return the character at the given position. + /// + /// This panics if the given position does not point to a valid char. + fn char_at(&self, i: usize) -> char { + self.pattern()[i..] + .chars() + .next() + .unwrap_or_else(|| panic!("expected char at offset {i}")) + } + + /// Bump the parser to the next Unicode scalar value. + /// + /// If the end of the input has been reached, then `false` is returned. + fn bump(&self) -> bool { + if self.is_eof() { + return false; + } + let Position { mut offset, mut line, mut column } = self.pos(); + if self.char() == '\n' { + line = line.checked_add(1).unwrap(); + column = 1; + } else { + column = column.checked_add(1).unwrap(); + } + offset += self.char().len_utf8(); + self.parser().pos.set(Position { offset, line, column }); + self.pattern()[self.offset()..].chars().next().is_some() + } + + /// If the substring starting at the current position of the parser has + /// the given prefix, then bump the parser to the character immediately + /// following the prefix and return true. Otherwise, don't bump the parser + /// and return false. + fn bump_if(&self, prefix: &str) -> bool { + if self.pattern()[self.offset()..].starts_with(prefix) { + for _ in 0..prefix.chars().count() { + self.bump(); + } + true + } else { + false + } + } + + /// Returns true if and only if the parser is positioned at a look-around + /// prefix. The conditions under which this returns true must always + /// correspond to a regular expression that would otherwise be consider + /// invalid. + /// + /// This should only be called immediately after parsing the opening of + /// a group or a set of flags. + fn is_lookaround_prefix(&self) -> bool { + self.bump_if("?=") + || self.bump_if("?!") + || self.bump_if("?<=") + || self.bump_if("? bool { + if !self.bump() { + return false; + } + self.bump_space(); + !self.is_eof() + } + + /// If the `x` flag is enabled (i.e., whitespace insensitivity with + /// comments), then this will advance the parser through all whitespace + /// and comments to the next non-whitespace non-comment byte. + /// + /// If the `x` flag is disabled, then this is a no-op. + /// + /// This should be used selectively throughout the parser where + /// arbitrary whitespace is permitted when the `x` flag is enabled. For + /// example, `{ 5 , 6}` is equivalent to `{5,6}`. + fn bump_space(&self) { + if !self.ignore_whitespace() { + return; + } + while !self.is_eof() { + if self.char().is_whitespace() { + self.bump(); + } else if self.char() == '#' { + let start = self.pos(); + let mut comment_text = String::new(); + self.bump(); + while !self.is_eof() { + let c = self.char(); + self.bump(); + if c == '\n' { + break; + } + comment_text.push(c); + } + let comment = ast::Comment { + span: Span::new(start, self.pos()), + comment: comment_text, + }; + self.parser().comments.borrow_mut().push(comment); + } else { + break; + } + } + } + + /// Peek at the next character in the input without advancing the parser. + /// + /// If the input has been exhausted, then this returns `None`. + fn peek(&self) -> Option { + if self.is_eof() { + return None; + } + self.pattern()[self.offset() + self.char().len_utf8()..].chars().next() + } + + /// Like peek, but will ignore spaces when the parser is in whitespace + /// insensitive mode. + fn peek_space(&self) -> Option { + if !self.ignore_whitespace() { + return self.peek(); + } + if self.is_eof() { + return None; + } + let mut start = self.offset() + self.char().len_utf8(); + let mut in_comment = false; + for (i, c) in self.pattern()[start..].char_indices() { + if c.is_whitespace() { + continue; + } else if !in_comment && c == '#' { + in_comment = true; + } else if in_comment && c == '\n' { + in_comment = false; + } else { + start += i; + break; + } + } + self.pattern()[start..].chars().next() + } + + /// Returns true if the next call to `bump` would return false. + fn is_eof(&self) -> bool { + self.offset() == self.pattern().len() + } + + /// Return the current position of the parser, which includes the offset, + /// line and column. + fn pos(&self) -> Position { + self.parser().pos.get() + } + + /// Create a span at the current position of the parser. Both the start + /// and end of the span are set. + fn span(&self) -> Span { + Span::splat(self.pos()) + } + + /// Create a span that covers the current character. + fn span_char(&self) -> Span { + let mut next = Position { + offset: self.offset().checked_add(self.char().len_utf8()).unwrap(), + line: self.line(), + column: self.column().checked_add(1).unwrap(), + }; + if self.char() == '\n' { + next.line += 1; + next.column = 1; + } + Span::new(self.pos(), next) + } + + /// Parse and push a single alternation on to the parser's internal stack. + /// If the top of the stack already has an alternation, then add to that + /// instead of pushing a new one. + /// + /// The concatenation given corresponds to a single alternation branch. + /// The concatenation returned starts the next branch and is empty. + /// + /// This assumes the parser is currently positioned at `|` and will advance + /// the parser to the character following `|`. + #[inline(never)] + fn push_alternate(&self, mut concat: ast::Concat) -> Result { + assert_eq!(self.char(), '|'); + concat.span.end = self.pos(); + self.push_or_add_alternation(concat); + self.bump(); + Ok(ast::Concat { span: self.span(), asts: vec![] }) + } + + /// Pushes or adds the given branch of an alternation to the parser's + /// internal stack of state. + fn push_or_add_alternation(&self, concat: ast::Concat) { + use self::GroupState::*; + + let mut stack = self.parser().stack_group.borrow_mut(); + if let Some(&mut Alternation(ref mut alts)) = stack.last_mut() { + alts.asts.push(concat.into_ast()); + return; + } + stack.push(Alternation(ast::Alternation { + span: Span::new(concat.span.start, self.pos()), + asts: vec![concat.into_ast()], + })); + } + + /// Parse and push a group AST (and its parent concatenation) on to the + /// parser's internal stack. Return a fresh concatenation corresponding + /// to the group's sub-AST. + /// + /// If a set of flags was found (with no group), then the concatenation + /// is returned with that set of flags added. + /// + /// This assumes that the parser is currently positioned on the opening + /// parenthesis. It advances the parser to the character at the start + /// of the sub-expression (or adjoining expression). + /// + /// If there was a problem parsing the start of the group, then an error + /// is returned. + #[inline(never)] + fn push_group(&self, mut concat: ast::Concat) -> Result { + assert_eq!(self.char(), '('); + match self.parse_group()? { + Either::Left(set) => { + let ignore = set.flags.flag_state(ast::Flag::IgnoreWhitespace); + if let Some(v) = ignore { + self.parser().ignore_whitespace.set(v); + } + + concat.asts.push(Ast::flags(set)); + Ok(concat) + } + Either::Right(group) => { + let old_ignore_whitespace = self.ignore_whitespace(); + let new_ignore_whitespace = group + .flags() + .and_then(|f| f.flag_state(ast::Flag::IgnoreWhitespace)) + .unwrap_or(old_ignore_whitespace); + self.parser().stack_group.borrow_mut().push( + GroupState::Group { + concat, + group, + ignore_whitespace: old_ignore_whitespace, + }, + ); + self.parser().ignore_whitespace.set(new_ignore_whitespace); + Ok(ast::Concat { span: self.span(), asts: vec![] }) + } + } + } + + /// Pop a group AST from the parser's internal stack and set the group's + /// AST to the given concatenation. Return the concatenation containing + /// the group. + /// + /// This assumes that the parser is currently positioned on the closing + /// parenthesis and advances the parser to the character following the `)`. + /// + /// If no such group could be popped, then an unopened group error is + /// returned. + #[inline(never)] + fn pop_group(&self, mut group_concat: ast::Concat) -> Result { + use self::GroupState::*; + + assert_eq!(self.char(), ')'); + let mut stack = self.parser().stack_group.borrow_mut(); + let (mut prior_concat, mut group, ignore_whitespace, alt) = match stack + .pop() + { + Some(Group { concat, group, ignore_whitespace }) => { + (concat, group, ignore_whitespace, None) + } + Some(Alternation(alt)) => match stack.pop() { + Some(Group { concat, group, ignore_whitespace }) => { + (concat, group, ignore_whitespace, Some(alt)) + } + None | Some(Alternation(_)) => { + return Err(self.error( + self.span_char(), + ast::ErrorKind::GroupUnopened, + )); + } + }, + None => { + return Err(self + .error(self.span_char(), ast::ErrorKind::GroupUnopened)); + } + }; + self.parser().ignore_whitespace.set(ignore_whitespace); + group_concat.span.end = self.pos(); + self.bump(); + group.span.end = self.pos(); + match alt { + Some(mut alt) => { + alt.span.end = group_concat.span.end; + alt.asts.push(group_concat.into_ast()); + group.ast = Box::new(alt.into_ast()); + } + None => { + group.ast = Box::new(group_concat.into_ast()); + } + } + prior_concat.asts.push(Ast::group(group)); + Ok(prior_concat) + } + + /// Pop the last state from the parser's internal stack, if it exists, and + /// add the given concatenation to it. There either must be no state or a + /// single alternation item on the stack. Any other scenario produces an + /// error. + /// + /// This assumes that the parser has advanced to the end. + #[inline(never)] + fn pop_group_end(&self, mut concat: ast::Concat) -> Result { + concat.span.end = self.pos(); + let mut stack = self.parser().stack_group.borrow_mut(); + let ast = match stack.pop() { + None => Ok(concat.into_ast()), + Some(GroupState::Alternation(mut alt)) => { + alt.span.end = self.pos(); + alt.asts.push(concat.into_ast()); + Ok(Ast::alternation(alt)) + } + Some(GroupState::Group { group, .. }) => { + return Err( + self.error(group.span, ast::ErrorKind::GroupUnclosed) + ); + } + }; + // If we try to pop again, there should be nothing. + match stack.pop() { + None => ast, + Some(GroupState::Alternation(_)) => { + // This unreachable is unfortunate. This case can't happen + // because the only way we can be here is if there were two + // `GroupState::Alternation`s adjacent in the parser's stack, + // which we guarantee to never happen because we never push a + // `GroupState::Alternation` if one is already at the top of + // the stack. + unreachable!() + } + Some(GroupState::Group { group, .. }) => { + Err(self.error(group.span, ast::ErrorKind::GroupUnclosed)) + } + } + } + + /// Parse the opening of a character class and push the current class + /// parsing context onto the parser's stack. This assumes that the parser + /// is positioned at an opening `[`. The given union should correspond to + /// the union of set items built up before seeing the `[`. + /// + /// If there was a problem parsing the opening of the class, then an error + /// is returned. Otherwise, a new union of set items for the class is + /// returned (which may be populated with either a `]` or a `-`). + #[inline(never)] + fn push_class_open( + &self, + parent_union: ast::ClassSetUnion, + ) -> Result { + assert_eq!(self.char(), '['); + + let (nested_set, nested_union) = self.parse_set_class_open()?; + self.parser() + .stack_class + .borrow_mut() + .push(ClassState::Open { union: parent_union, set: nested_set }); + Ok(nested_union) + } + + /// Parse the end of a character class set and pop the character class + /// parser stack. The union given corresponds to the last union built + /// before seeing the closing `]`. The union returned corresponds to the + /// parent character class set with the nested class added to it. + /// + /// This assumes that the parser is positioned at a `]` and will advance + /// the parser to the byte immediately following the `]`. + /// + /// If the stack is empty after popping, then this returns the final + /// "top-level" character class AST (where a "top-level" character class + /// is one that is not nested inside any other character class). + /// + /// If there is no corresponding opening bracket on the parser's stack, + /// then an error is returned. + #[inline(never)] + fn pop_class( + &self, + nested_union: ast::ClassSetUnion, + ) -> Result> { + assert_eq!(self.char(), ']'); + + let item = ast::ClassSet::Item(nested_union.into_item()); + let prevset = self.pop_class_op(item); + let mut stack = self.parser().stack_class.borrow_mut(); + match stack.pop() { + None => { + // We can never observe an empty stack: + // + // 1) We are guaranteed to start with a non-empty stack since + // the character class parser is only initiated when it sees + // a `[`. + // 2) If we ever observe an empty stack while popping after + // seeing a `]`, then we signal the character class parser + // to terminate. + panic!("unexpected empty character class stack") + } + Some(ClassState::Op { .. }) => { + // This panic is unfortunate, but this case is impossible + // since we already popped the Op state if one exists above. + // Namely, every push to the class parser stack is guarded by + // whether an existing Op is already on the top of the stack. + // If it is, the existing Op is modified. That is, the stack + // can never have consecutive Op states. + panic!("unexpected ClassState::Op") + } + Some(ClassState::Open { mut union, mut set }) => { + self.bump(); + set.span.end = self.pos(); + set.kind = prevset; + if stack.is_empty() { + Ok(Either::Right(set)) + } else { + union.push(ast::ClassSetItem::Bracketed(Box::new(set))); + Ok(Either::Left(union)) + } + } + } + } + + /// Return an "unclosed class" error whose span points to the most + /// recently opened class. + /// + /// This should only be called while parsing a character class. + #[inline(never)] + fn unclosed_class_error(&self) -> ast::Error { + for state in self.parser().stack_class.borrow().iter().rev() { + if let ClassState::Open { ref set, .. } = *state { + return self.error(set.span, ast::ErrorKind::ClassUnclosed); + } + } + // We are guaranteed to have a non-empty stack with at least + // one open bracket, so we should never get here. + panic!("no open character class found") + } + + /// Push the current set of class items on to the class parser's stack as + /// the left hand side of the given operator. + /// + /// A fresh set union is returned, which should be used to build the right + /// hand side of this operator. + #[inline(never)] + fn push_class_op( + &self, + next_kind: ast::ClassSetBinaryOpKind, + next_union: ast::ClassSetUnion, + ) -> ast::ClassSetUnion { + let item = ast::ClassSet::Item(next_union.into_item()); + let new_lhs = self.pop_class_op(item); + self.parser() + .stack_class + .borrow_mut() + .push(ClassState::Op { kind: next_kind, lhs: new_lhs }); + ast::ClassSetUnion { span: self.span(), items: vec![] } + } + + /// Pop a character class set from the character class parser stack. If the + /// top of the stack is just an item (not an operation), then return the + /// given set unchanged. If the top of the stack is an operation, then the + /// given set will be used as the rhs of the operation on the top of the + /// stack. In that case, the binary operation is returned as a set. + #[inline(never)] + fn pop_class_op(&self, rhs: ast::ClassSet) -> ast::ClassSet { + let mut stack = self.parser().stack_class.borrow_mut(); + let (kind, lhs) = match stack.pop() { + Some(ClassState::Op { kind, lhs }) => (kind, lhs), + Some(state @ ClassState::Open { .. }) => { + stack.push(state); + return rhs; + } + None => unreachable!(), + }; + let span = Span::new(lhs.span().start, rhs.span().end); + ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp { + span, + kind, + lhs: Box::new(lhs), + rhs: Box::new(rhs), + }) + } +} + +impl<'s, P: Borrow> ParserI<'s, P> { + /// Parse the regular expression into an abstract syntax tree. + fn parse(&self) -> Result { + self.parse_with_comments().map(|astc| astc.ast) + } + + /// Parse the regular expression and return an abstract syntax tree with + /// all of the comments found in the pattern. + fn parse_with_comments(&self) -> Result { + assert_eq!(self.offset(), 0, "parser can only be used once"); + self.parser().reset(); + let mut concat = ast::Concat { span: self.span(), asts: vec![] }; + loop { + self.bump_space(); + if self.is_eof() { + break; + } + match self.char() { + '(' => concat = self.push_group(concat)?, + ')' => concat = self.pop_group(concat)?, + '|' => concat = self.push_alternate(concat)?, + '[' => { + let class = self.parse_set_class()?; + concat.asts.push(Ast::class_bracketed(class)); + } + '?' => { + concat = self.parse_uncounted_repetition( + concat, + ast::RepetitionKind::ZeroOrOne, + )?; + } + '*' => { + concat = self.parse_uncounted_repetition( + concat, + ast::RepetitionKind::ZeroOrMore, + )?; + } + '+' => { + concat = self.parse_uncounted_repetition( + concat, + ast::RepetitionKind::OneOrMore, + )?; + } + '{' => { + concat = self.parse_counted_repetition(concat)?; + } + _ => concat.asts.push(self.parse_primitive()?.into_ast()), + } + } + let ast = self.pop_group_end(concat)?; + NestLimiter::new(self).check(&ast)?; + Ok(ast::WithComments { + ast, + comments: mem::replace( + &mut *self.parser().comments.borrow_mut(), + vec![], + ), + }) + } + + /// Parses an uncounted repetition operation. An uncounted repetition + /// operator includes ?, * and +, but does not include the {m,n} syntax. + /// The given `kind` should correspond to the operator observed by the + /// caller. + /// + /// This assumes that the parser is currently positioned at the repetition + /// operator and advances the parser to the first character after the + /// operator. (Note that the operator may include a single additional `?`, + /// which makes the operator ungreedy.) + /// + /// The caller should include the concatenation that is being built. The + /// concatenation returned includes the repetition operator applied to the + /// last expression in the given concatenation. + #[inline(never)] + fn parse_uncounted_repetition( + &self, + mut concat: ast::Concat, + kind: ast::RepetitionKind, + ) -> Result { + assert!( + self.char() == '?' || self.char() == '*' || self.char() == '+' + ); + let op_start = self.pos(); + let ast = match concat.asts.pop() { + Some(ast) => ast, + None => { + return Err( + self.error(self.span(), ast::ErrorKind::RepetitionMissing) + ) + } + }; + match ast { + Ast::Empty(_) | Ast::Flags(_) => { + return Err( + self.error(self.span(), ast::ErrorKind::RepetitionMissing) + ) + } + _ => {} + } + let mut greedy = true; + if self.bump() && self.char() == '?' { + greedy = false; + self.bump(); + } + concat.asts.push(Ast::repetition(ast::Repetition { + span: ast.span().with_end(self.pos()), + op: ast::RepetitionOp { + span: Span::new(op_start, self.pos()), + kind, + }, + greedy, + ast: Box::new(ast), + })); + Ok(concat) + } + + /// Parses a counted repetition operation. A counted repetition operator + /// corresponds to the {m,n} syntax, and does not include the ?, * or + + /// operators. + /// + /// This assumes that the parser is currently positioned at the opening `{` + /// and advances the parser to the first character after the operator. + /// (Note that the operator may include a single additional `?`, which + /// makes the operator ungreedy.) + /// + /// The caller should include the concatenation that is being built. The + /// concatenation returned includes the repetition operator applied to the + /// last expression in the given concatenation. + #[inline(never)] + fn parse_counted_repetition( + &self, + mut concat: ast::Concat, + ) -> Result { + assert!(self.char() == '{'); + let start = self.pos(); + let ast = match concat.asts.pop() { + Some(ast) => ast, + None => { + return Err( + self.error(self.span(), ast::ErrorKind::RepetitionMissing) + ) + } + }; + match ast { + Ast::Empty(_) | Ast::Flags(_) => { + return Err( + self.error(self.span(), ast::ErrorKind::RepetitionMissing) + ) + } + _ => {} + } + if !self.bump_and_bump_space() { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::RepetitionCountUnclosed, + )); + } + let count_start = specialize_err( + self.parse_decimal(), + ast::ErrorKind::DecimalEmpty, + ast::ErrorKind::RepetitionCountDecimalEmpty, + ); + if self.is_eof() { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::RepetitionCountUnclosed, + )); + } + let range = if self.char() == ',' { + if !self.bump_and_bump_space() { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::RepetitionCountUnclosed, + )); + } + if self.char() != '}' { + let count_start = match count_start { + Ok(c) => c, + Err(err) + if err.kind + == ast::ErrorKind::RepetitionCountDecimalEmpty => + { + if self.parser().empty_min_range { + 0 + } else { + return Err(err); + } + } + err => err?, + }; + let count_end = specialize_err( + self.parse_decimal(), + ast::ErrorKind::DecimalEmpty, + ast::ErrorKind::RepetitionCountDecimalEmpty, + )?; + ast::RepetitionRange::Bounded(count_start, count_end) + } else { + ast::RepetitionRange::AtLeast(count_start?) + } + } else { + ast::RepetitionRange::Exactly(count_start?) + }; + + if self.is_eof() || self.char() != '}' { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::RepetitionCountUnclosed, + )); + } + + let mut greedy = true; + if self.bump_and_bump_space() && self.char() == '?' { + greedy = false; + self.bump(); + } + + let op_span = Span::new(start, self.pos()); + if !range.is_valid() { + return Err( + self.error(op_span, ast::ErrorKind::RepetitionCountInvalid) + ); + } + concat.asts.push(Ast::repetition(ast::Repetition { + span: ast.span().with_end(self.pos()), + op: ast::RepetitionOp { + span: op_span, + kind: ast::RepetitionKind::Range(range), + }, + greedy, + ast: Box::new(ast), + })); + Ok(concat) + } + + /// Parse a group (which contains a sub-expression) or a set of flags. + /// + /// If a group was found, then it is returned with an empty AST. If a set + /// of flags is found, then that set is returned. + /// + /// The parser should be positioned at the opening parenthesis. + /// + /// This advances the parser to the character before the start of the + /// sub-expression (in the case of a group) or to the closing parenthesis + /// immediately following the set of flags. + /// + /// # Errors + /// + /// If flags are given and incorrectly specified, then a corresponding + /// error is returned. + /// + /// If a capture name is given and it is incorrectly specified, then a + /// corresponding error is returned. + #[inline(never)] + fn parse_group(&self) -> Result> { + assert_eq!(self.char(), '('); + let open_span = self.span_char(); + self.bump(); + self.bump_space(); + if self.is_lookaround_prefix() { + return Err(self.error( + Span::new(open_span.start, self.span().end), + ast::ErrorKind::UnsupportedLookAround, + )); + } + let inner_span = self.span(); + let mut starts_with_p = true; + if self.bump_if("?P<") || { + starts_with_p = false; + self.bump_if("?<") + } { + let capture_index = self.next_capture_index(open_span)?; + let name = self.parse_capture_name(capture_index)?; + Ok(Either::Right(ast::Group { + span: open_span, + kind: ast::GroupKind::CaptureName { starts_with_p, name }, + ast: Box::new(Ast::empty(self.span())), + })) + } else if self.bump_if("?") { + if self.is_eof() { + return Err( + self.error(open_span, ast::ErrorKind::GroupUnclosed) + ); + } + let flags = self.parse_flags()?; + let char_end = self.char(); + self.bump(); + if char_end == ')' { + // We don't allow empty flags, e.g., `(?)`. We instead + // interpret it as a repetition operator missing its argument. + if flags.items.is_empty() { + return Err(self.error( + inner_span, + ast::ErrorKind::RepetitionMissing, + )); + } + Ok(Either::Left(ast::SetFlags { + span: Span { end: self.pos(), ..open_span }, + flags, + })) + } else { + assert_eq!(char_end, ':'); + Ok(Either::Right(ast::Group { + span: open_span, + kind: ast::GroupKind::NonCapturing(flags), + ast: Box::new(Ast::empty(self.span())), + })) + } + } else { + let capture_index = self.next_capture_index(open_span)?; + Ok(Either::Right(ast::Group { + span: open_span, + kind: ast::GroupKind::CaptureIndex(capture_index), + ast: Box::new(Ast::empty(self.span())), + })) + } + } + + /// Parses a capture group name. Assumes that the parser is positioned at + /// the first character in the name following the opening `<` (and may + /// possibly be EOF). This advances the parser to the first character + /// following the closing `>`. + /// + /// The caller must provide the capture index of the group for this name. + #[inline(never)] + fn parse_capture_name( + &self, + capture_index: u32, + ) -> Result { + if self.is_eof() { + return Err(self + .error(self.span(), ast::ErrorKind::GroupNameUnexpectedEof)); + } + let start = self.pos(); + loop { + if self.char() == '>' { + break; + } + if !is_capture_char(self.char(), self.pos() == start) { + return Err(self.error( + self.span_char(), + ast::ErrorKind::GroupNameInvalid, + )); + } + if !self.bump() { + break; + } + } + let end = self.pos(); + if self.is_eof() { + return Err(self + .error(self.span(), ast::ErrorKind::GroupNameUnexpectedEof)); + } + assert_eq!(self.char(), '>'); + self.bump(); + let name = &self.pattern()[start.offset..end.offset]; + if name.is_empty() { + return Err(self.error( + Span::new(start, start), + ast::ErrorKind::GroupNameEmpty, + )); + } + let capname = ast::CaptureName { + span: Span::new(start, end), + name: name.to_string(), + index: capture_index, + }; + self.add_capture_name(&capname)?; + Ok(capname) + } + + /// Parse a sequence of flags starting at the current character. + /// + /// This advances the parser to the character immediately following the + /// flags, which is guaranteed to be either `:` or `)`. + /// + /// # Errors + /// + /// If any flags are duplicated, then an error is returned. + /// + /// If the negation operator is used more than once, then an error is + /// returned. + /// + /// If no flags could be found or if the negation operation is not followed + /// by any flags, then an error is returned. + #[inline(never)] + fn parse_flags(&self) -> Result { + let mut flags = ast::Flags { span: self.span(), items: vec![] }; + let mut last_was_negation = None; + while self.char() != ':' && self.char() != ')' { + if self.char() == '-' { + last_was_negation = Some(self.span_char()); + let item = ast::FlagsItem { + span: self.span_char(), + kind: ast::FlagsItemKind::Negation, + }; + if let Some(i) = flags.add_item(item) { + return Err(self.error( + self.span_char(), + ast::ErrorKind::FlagRepeatedNegation { + original: flags.items[i].span, + }, + )); + } + } else { + last_was_negation = None; + let item = ast::FlagsItem { + span: self.span_char(), + kind: ast::FlagsItemKind::Flag(self.parse_flag()?), + }; + if let Some(i) = flags.add_item(item) { + return Err(self.error( + self.span_char(), + ast::ErrorKind::FlagDuplicate { + original: flags.items[i].span, + }, + )); + } + } + if !self.bump() { + return Err( + self.error(self.span(), ast::ErrorKind::FlagUnexpectedEof) + ); + } + } + if let Some(span) = last_was_negation { + return Err(self.error(span, ast::ErrorKind::FlagDanglingNegation)); + } + flags.span.end = self.pos(); + Ok(flags) + } + + /// Parse the current character as a flag. Do not advance the parser. + /// + /// # Errors + /// + /// If the flag is not recognized, then an error is returned. + #[inline(never)] + fn parse_flag(&self) -> Result { + match self.char() { + 'i' => Ok(ast::Flag::CaseInsensitive), + 'm' => Ok(ast::Flag::MultiLine), + 's' => Ok(ast::Flag::DotMatchesNewLine), + 'U' => Ok(ast::Flag::SwapGreed), + 'u' => Ok(ast::Flag::Unicode), + 'R' => Ok(ast::Flag::CRLF), + 'x' => Ok(ast::Flag::IgnoreWhitespace), + _ => { + Err(self + .error(self.span_char(), ast::ErrorKind::FlagUnrecognized)) + } + } + } + + /// Parse a primitive AST. e.g., A literal, non-set character class or + /// assertion. + /// + /// This assumes that the parser expects a primitive at the current + /// location. i.e., All other non-primitive cases have been handled. + /// For example, if the parser's position is at `|`, then `|` will be + /// treated as a literal (e.g., inside a character class). + /// + /// This advances the parser to the first character immediately following + /// the primitive. + fn parse_primitive(&self) -> Result { + match self.char() { + '\\' => self.parse_escape(), + '.' => { + let ast = Primitive::Dot(self.span_char()); + self.bump(); + Ok(ast) + } + '^' => { + let ast = Primitive::Assertion(ast::Assertion { + span: self.span_char(), + kind: ast::AssertionKind::StartLine, + }); + self.bump(); + Ok(ast) + } + '$' => { + let ast = Primitive::Assertion(ast::Assertion { + span: self.span_char(), + kind: ast::AssertionKind::EndLine, + }); + self.bump(); + Ok(ast) + } + c => { + let ast = Primitive::Literal(ast::Literal { + span: self.span_char(), + kind: ast::LiteralKind::Verbatim, + c, + }); + self.bump(); + Ok(ast) + } + } + } + + /// Parse an escape sequence as a primitive AST. + /// + /// This assumes the parser is positioned at the start of the escape + /// sequence, i.e., `\`. It advances the parser to the first position + /// immediately following the escape sequence. + #[inline(never)] + fn parse_escape(&self) -> Result { + assert_eq!(self.char(), '\\'); + let start = self.pos(); + if !self.bump() { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::EscapeUnexpectedEof, + )); + } + let c = self.char(); + // Put some of the more complicated routines into helpers. + match c { + '0'..='7' => { + if !self.parser().octal { + return Err(self.error( + Span::new(start, self.span_char().end), + ast::ErrorKind::UnsupportedBackreference, + )); + } + let mut lit = self.parse_octal(); + lit.span.start = start; + return Ok(Primitive::Literal(lit)); + } + '8'..='9' if !self.parser().octal => { + return Err(self.error( + Span::new(start, self.span_char().end), + ast::ErrorKind::UnsupportedBackreference, + )); + } + 'x' | 'u' | 'U' => { + let mut lit = self.parse_hex()?; + lit.span.start = start; + return Ok(Primitive::Literal(lit)); + } + 'p' | 'P' => { + let mut cls = self.parse_unicode_class()?; + cls.span.start = start; + return Ok(Primitive::Unicode(cls)); + } + 'd' | 's' | 'w' | 'D' | 'S' | 'W' => { + let mut cls = self.parse_perl_class(); + cls.span.start = start; + return Ok(Primitive::Perl(cls)); + } + _ => {} + } + + // Handle all of the one letter sequences inline. + self.bump(); + let span = Span::new(start, self.pos()); + if is_meta_character(c) { + return Ok(Primitive::Literal(ast::Literal { + span, + kind: ast::LiteralKind::Meta, + c, + })); + } + if is_escapeable_character(c) { + return Ok(Primitive::Literal(ast::Literal { + span, + kind: ast::LiteralKind::Superfluous, + c, + })); + } + let special = |kind, c| { + Ok(Primitive::Literal(ast::Literal { + span, + kind: ast::LiteralKind::Special(kind), + c, + })) + }; + match c { + 'a' => special(ast::SpecialLiteralKind::Bell, '\x07'), + 'f' => special(ast::SpecialLiteralKind::FormFeed, '\x0C'), + 't' => special(ast::SpecialLiteralKind::Tab, '\t'), + 'n' => special(ast::SpecialLiteralKind::LineFeed, '\n'), + 'r' => special(ast::SpecialLiteralKind::CarriageReturn, '\r'), + 'v' => special(ast::SpecialLiteralKind::VerticalTab, '\x0B'), + 'A' => Ok(Primitive::Assertion(ast::Assertion { + span, + kind: ast::AssertionKind::StartText, + })), + 'z' => Ok(Primitive::Assertion(ast::Assertion { + span, + kind: ast::AssertionKind::EndText, + })), + 'b' => { + let mut wb = ast::Assertion { + span, + kind: ast::AssertionKind::WordBoundary, + }; + // After a \b, we "try" to parse things like \b{start} for + // special word boundary assertions. + if !self.is_eof() && self.char() == '{' { + if let Some(kind) = + self.maybe_parse_special_word_boundary(start)? + { + wb.kind = kind; + wb.span.end = self.pos(); + } + } + Ok(Primitive::Assertion(wb)) + } + 'B' => Ok(Primitive::Assertion(ast::Assertion { + span, + kind: ast::AssertionKind::NotWordBoundary, + })), + '<' => Ok(Primitive::Assertion(ast::Assertion { + span, + kind: ast::AssertionKind::WordBoundaryStartAngle, + })), + '>' => Ok(Primitive::Assertion(ast::Assertion { + span, + kind: ast::AssertionKind::WordBoundaryEndAngle, + })), + _ => Err(self.error(span, ast::ErrorKind::EscapeUnrecognized)), + } + } + + /// Attempt to parse a specialty word boundary. That is, `\b{start}`, + /// `\b{end}`, `\b{start-half}` or `\b{end-half}`. + /// + /// This is similar to `maybe_parse_ascii_class` in that, in most cases, + /// if it fails it will just return `None` with no error. This is done + /// because `\b{5}` is a valid expression and we want to let that be parsed + /// by the existing counted repetition parsing code. (I thought about just + /// invoking the counted repetition code from here, but it seemed a little + /// ham-fisted.) + /// + /// Unlike `maybe_parse_ascii_class` though, this can return an error. + /// Namely, if we definitely know it isn't a counted repetition, then we + /// return an error specific to the specialty word boundaries. + /// + /// This assumes the parser is positioned at a `{` immediately following + /// a `\b`. When `None` is returned, the parser is returned to the position + /// at which it started: pointing at a `{`. + /// + /// The position given should correspond to the start of the `\b`. + fn maybe_parse_special_word_boundary( + &self, + wb_start: Position, + ) -> Result> { + assert_eq!(self.char(), '{'); + + let is_valid_char = |c| match c { + 'A'..='Z' | 'a'..='z' | '-' => true, + _ => false, + }; + let start = self.pos(); + if !self.bump_and_bump_space() { + return Err(self.error( + Span::new(wb_start, self.pos()), + ast::ErrorKind::SpecialWordOrRepetitionUnexpectedEof, + )); + } + let start_contents = self.pos(); + // This is one of the critical bits: if the first non-whitespace + // character isn't in [-A-Za-z] (i.e., this can't be a special word + // boundary), then we bail and let the counted repetition parser deal + // with this. + if !is_valid_char(self.char()) { + self.parser().pos.set(start); + return Ok(None); + } + + // Now collect up our chars until we see a '}'. + let mut scratch = self.parser().scratch.borrow_mut(); + scratch.clear(); + while !self.is_eof() && is_valid_char(self.char()) { + scratch.push(self.char()); + self.bump_and_bump_space(); + } + if self.is_eof() || self.char() != '}' { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::SpecialWordBoundaryUnclosed, + )); + } + let end = self.pos(); + self.bump(); + let kind = match scratch.as_str() { + "start" => ast::AssertionKind::WordBoundaryStart, + "end" => ast::AssertionKind::WordBoundaryEnd, + "start-half" => ast::AssertionKind::WordBoundaryStartHalf, + "end-half" => ast::AssertionKind::WordBoundaryEndHalf, + _ => { + return Err(self.error( + Span::new(start_contents, end), + ast::ErrorKind::SpecialWordBoundaryUnrecognized, + )) + } + }; + Ok(Some(kind)) + } + + /// Parse an octal representation of a Unicode codepoint up to 3 digits + /// long. This expects the parser to be positioned at the first octal + /// digit and advances the parser to the first character immediately + /// following the octal number. This also assumes that parsing octal + /// escapes is enabled. + /// + /// Assuming the preconditions are met, this routine can never fail. + #[inline(never)] + fn parse_octal(&self) -> ast::Literal { + assert!(self.parser().octal); + assert!('0' <= self.char() && self.char() <= '7'); + let start = self.pos(); + // Parse up to two more digits. + while self.bump() + && '0' <= self.char() + && self.char() <= '7' + && self.pos().offset - start.offset <= 2 + {} + let end = self.pos(); + let octal = &self.pattern()[start.offset..end.offset]; + // Parsing the octal should never fail since the above guarantees a + // valid number. + let codepoint = + u32::from_str_radix(octal, 8).expect("valid octal number"); + // The max value for 3 digit octal is 0777 = 511 and [0, 511] has no + // invalid Unicode scalar values. + let c = char::from_u32(codepoint).expect("Unicode scalar value"); + ast::Literal { + span: Span::new(start, end), + kind: ast::LiteralKind::Octal, + c, + } + } + + /// Parse a hex representation of a Unicode codepoint. This handles both + /// hex notations, i.e., `\xFF` and `\x{FFFF}`. This expects the parser to + /// be positioned at the `x`, `u` or `U` prefix. The parser is advanced to + /// the first character immediately following the hexadecimal literal. + #[inline(never)] + fn parse_hex(&self) -> Result { + assert!( + self.char() == 'x' || self.char() == 'u' || self.char() == 'U' + ); + + let hex_kind = match self.char() { + 'x' => ast::HexLiteralKind::X, + 'u' => ast::HexLiteralKind::UnicodeShort, + _ => ast::HexLiteralKind::UnicodeLong, + }; + if !self.bump_and_bump_space() { + return Err( + self.error(self.span(), ast::ErrorKind::EscapeUnexpectedEof) + ); + } + if self.char() == '{' { + self.parse_hex_brace(hex_kind) + } else { + self.parse_hex_digits(hex_kind) + } + } + + /// Parse an N-digit hex representation of a Unicode codepoint. This + /// expects the parser to be positioned at the first digit and will advance + /// the parser to the first character immediately following the escape + /// sequence. + /// + /// The number of digits given must be 2 (for `\xNN`), 4 (for `\uNNNN`) + /// or 8 (for `\UNNNNNNNN`). + #[inline(never)] + fn parse_hex_digits( + &self, + kind: ast::HexLiteralKind, + ) -> Result { + let mut scratch = self.parser().scratch.borrow_mut(); + scratch.clear(); + + let start = self.pos(); + for i in 0..kind.digits() { + if i > 0 && !self.bump_and_bump_space() { + return Err(self + .error(self.span(), ast::ErrorKind::EscapeUnexpectedEof)); + } + if !is_hex(self.char()) { + return Err(self.error( + self.span_char(), + ast::ErrorKind::EscapeHexInvalidDigit, + )); + } + scratch.push(self.char()); + } + // The final bump just moves the parser past the literal, which may + // be EOF. + self.bump_and_bump_space(); + let end = self.pos(); + let hex = scratch.as_str(); + match u32::from_str_radix(hex, 16).ok().and_then(char::from_u32) { + None => Err(self.error( + Span::new(start, end), + ast::ErrorKind::EscapeHexInvalid, + )), + Some(c) => Ok(ast::Literal { + span: Span::new(start, end), + kind: ast::LiteralKind::HexFixed(kind), + c, + }), + } + } + + /// Parse a hex representation of any Unicode scalar value. This expects + /// the parser to be positioned at the opening brace `{` and will advance + /// the parser to the first character following the closing brace `}`. + #[inline(never)] + fn parse_hex_brace( + &self, + kind: ast::HexLiteralKind, + ) -> Result { + let mut scratch = self.parser().scratch.borrow_mut(); + scratch.clear(); + + let brace_pos = self.pos(); + let start = self.span_char().end; + while self.bump_and_bump_space() && self.char() != '}' { + if !is_hex(self.char()) { + return Err(self.error( + self.span_char(), + ast::ErrorKind::EscapeHexInvalidDigit, + )); + } + scratch.push(self.char()); + } + if self.is_eof() { + return Err(self.error( + Span::new(brace_pos, self.pos()), + ast::ErrorKind::EscapeUnexpectedEof, + )); + } + let end = self.pos(); + let hex = scratch.as_str(); + assert_eq!(self.char(), '}'); + self.bump_and_bump_space(); + + if hex.is_empty() { + return Err(self.error( + Span::new(brace_pos, self.pos()), + ast::ErrorKind::EscapeHexEmpty, + )); + } + match u32::from_str_radix(hex, 16).ok().and_then(char::from_u32) { + None => Err(self.error( + Span::new(start, end), + ast::ErrorKind::EscapeHexInvalid, + )), + Some(c) => Ok(ast::Literal { + span: Span::new(start, self.pos()), + kind: ast::LiteralKind::HexBrace(kind), + c, + }), + } + } + + /// Parse a decimal number into a u32 while trimming leading and trailing + /// whitespace. + /// + /// This expects the parser to be positioned at the first position where + /// a decimal digit could occur. This will advance the parser to the byte + /// immediately following the last contiguous decimal digit. + /// + /// If no decimal digit could be found or if there was a problem parsing + /// the complete set of digits into a u32, then an error is returned. + fn parse_decimal(&self) -> Result { + let mut scratch = self.parser().scratch.borrow_mut(); + scratch.clear(); + + while !self.is_eof() && self.char().is_whitespace() { + self.bump(); + } + let start = self.pos(); + while !self.is_eof() && '0' <= self.char() && self.char() <= '9' { + scratch.push(self.char()); + self.bump_and_bump_space(); + } + let span = Span::new(start, self.pos()); + while !self.is_eof() && self.char().is_whitespace() { + self.bump_and_bump_space(); + } + let digits = scratch.as_str(); + if digits.is_empty() { + return Err(self.error(span, ast::ErrorKind::DecimalEmpty)); + } + match u32::from_str_radix(digits, 10).ok() { + Some(n) => Ok(n), + None => Err(self.error(span, ast::ErrorKind::DecimalInvalid)), + } + } + + /// Parse a standard character class consisting primarily of characters or + /// character ranges, but can also contain nested character classes of + /// any type (sans `.`). + /// + /// This assumes the parser is positioned at the opening `[`. If parsing + /// is successful, then the parser is advanced to the position immediately + /// following the closing `]`. + #[inline(never)] + fn parse_set_class(&self) -> Result { + assert_eq!(self.char(), '['); + + let mut union = + ast::ClassSetUnion { span: self.span(), items: vec![] }; + loop { + self.bump_space(); + if self.is_eof() { + return Err(self.unclosed_class_error()); + } + match self.char() { + '[' => { + // If we've already parsed the opening bracket, then + // attempt to treat this as the beginning of an ASCII + // class. If ASCII class parsing fails, then the parser + // backs up to `[`. + if !self.parser().stack_class.borrow().is_empty() { + if let Some(cls) = self.maybe_parse_ascii_class() { + union.push(ast::ClassSetItem::Ascii(cls)); + continue; + } + } + union = self.push_class_open(union)?; + } + ']' => match self.pop_class(union)? { + Either::Left(nested_union) => { + union = nested_union; + } + Either::Right(class) => return Ok(class), + }, + '&' if self.peek() == Some('&') => { + assert!(self.bump_if("&&")); + union = self.push_class_op( + ast::ClassSetBinaryOpKind::Intersection, + union, + ); + } + '-' if self.peek() == Some('-') => { + assert!(self.bump_if("--")); + union = self.push_class_op( + ast::ClassSetBinaryOpKind::Difference, + union, + ); + } + '~' if self.peek() == Some('~') => { + assert!(self.bump_if("~~")); + union = self.push_class_op( + ast::ClassSetBinaryOpKind::SymmetricDifference, + union, + ); + } + _ => { + union.push(self.parse_set_class_range()?); + } + } + } + } + + /// Parse a single primitive item in a character class set. The item to + /// be parsed can either be one of a simple literal character, a range + /// between two simple literal characters or a "primitive" character + /// class like \w or \p{Greek}. + /// + /// If an invalid escape is found, or if a character class is found where + /// a simple literal is expected (e.g., in a range), then an error is + /// returned. + #[inline(never)] + fn parse_set_class_range(&self) -> Result { + let prim1 = self.parse_set_class_item()?; + self.bump_space(); + if self.is_eof() { + return Err(self.unclosed_class_error()); + } + // If the next char isn't a `-`, then we don't have a range. + // There are two exceptions. If the char after a `-` is a `]`, then + // `-` is interpreted as a literal `-`. Alternatively, if the char + // after a `-` is a `-`, then `--` corresponds to a "difference" + // operation. + if self.char() != '-' + || self.peek_space() == Some(']') + || self.peek_space() == Some('-') + { + return prim1.into_class_set_item(self); + } + // OK, now we're parsing a range, so bump past the `-` and parse the + // second half of the range. + if !self.bump_and_bump_space() { + return Err(self.unclosed_class_error()); + } + let prim2 = self.parse_set_class_item()?; + let range = ast::ClassSetRange { + span: Span::new(prim1.span().start, prim2.span().end), + start: prim1.into_class_literal(self)?, + end: prim2.into_class_literal(self)?, + }; + if !range.is_valid() { + return Err( + self.error(range.span, ast::ErrorKind::ClassRangeInvalid) + ); + } + Ok(ast::ClassSetItem::Range(range)) + } + + /// Parse a single item in a character class as a primitive, where the + /// primitive either consists of a verbatim literal or a single escape + /// sequence. + /// + /// This assumes the parser is positioned at the beginning of a primitive, + /// and advances the parser to the first position after the primitive if + /// successful. + /// + /// Note that it is the caller's responsibility to report an error if an + /// illegal primitive was parsed. + #[inline(never)] + fn parse_set_class_item(&self) -> Result { + if self.char() == '\\' { + self.parse_escape() + } else { + let x = Primitive::Literal(ast::Literal { + span: self.span_char(), + kind: ast::LiteralKind::Verbatim, + c: self.char(), + }); + self.bump(); + Ok(x) + } + } + + /// Parses the opening of a character class set. This includes the opening + /// bracket along with `^` if present to indicate negation. This also + /// starts parsing the opening set of unioned items if applicable, since + /// there are special rules applied to certain characters in the opening + /// of a character class. For example, `[^]]` is the class of all + /// characters not equal to `]`. (`]` would need to be escaped in any other + /// position.) Similarly for `-`. + /// + /// In all cases, the op inside the returned `ast::ClassBracketed` is an + /// empty union. This empty union should be replaced with the actual item + /// when it is popped from the parser's stack. + /// + /// This assumes the parser is positioned at the opening `[` and advances + /// the parser to the first non-special byte of the character class. + /// + /// An error is returned if EOF is found. + #[inline(never)] + fn parse_set_class_open( + &self, + ) -> Result<(ast::ClassBracketed, ast::ClassSetUnion)> { + assert_eq!(self.char(), '['); + let start = self.pos(); + if !self.bump_and_bump_space() { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::ClassUnclosed, + )); + } + + let negated = if self.char() != '^' { + false + } else { + if !self.bump_and_bump_space() { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::ClassUnclosed, + )); + } + true + }; + // Accept any number of `-` as literal `-`. + let mut union = + ast::ClassSetUnion { span: self.span(), items: vec![] }; + while self.char() == '-' { + union.push(ast::ClassSetItem::Literal(ast::Literal { + span: self.span_char(), + kind: ast::LiteralKind::Verbatim, + c: '-', + })); + if !self.bump_and_bump_space() { + return Err(self.error( + Span::new(start, start), + ast::ErrorKind::ClassUnclosed, + )); + } + } + // If `]` is the *first* char in a set, then interpret it as a literal + // `]`. That is, an empty class is impossible to write. + if union.items.is_empty() && self.char() == ']' { + union.push(ast::ClassSetItem::Literal(ast::Literal { + span: self.span_char(), + kind: ast::LiteralKind::Verbatim, + c: ']', + })); + if !self.bump_and_bump_space() { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::ClassUnclosed, + )); + } + } + let set = ast::ClassBracketed { + span: Span::new(start, self.pos()), + negated, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: Span::new(union.span.start, union.span.start), + items: vec![], + }), + }; + Ok((set, union)) + } + + /// Attempt to parse an ASCII character class, e.g., `[:alnum:]`. + /// + /// This assumes the parser is positioned at the opening `[`. + /// + /// If no valid ASCII character class could be found, then this does not + /// advance the parser and `None` is returned. Otherwise, the parser is + /// advanced to the first byte following the closing `]` and the + /// corresponding ASCII class is returned. + #[inline(never)] + fn maybe_parse_ascii_class(&self) -> Option { + // ASCII character classes are interesting from a parsing perspective + // because parsing cannot fail with any interesting error. For example, + // in order to use an ASCII character class, it must be enclosed in + // double brackets, e.g., `[[:alnum:]]`. Alternatively, you might think + // of it as "ASCII character classes have the syntax `[:NAME:]` which + // can only appear within character brackets." This means that things + // like `[[:lower:]A]` are legal constructs. + // + // However, if one types an incorrect ASCII character class, e.g., + // `[[:loower:]]`, then we treat that as a normal nested character + // class containing the characters `:elorw`. One might argue that we + // should return an error instead since the repeated colons give away + // the intent to write an ASCII class. But what if the user typed + // `[[:lower]]` instead? How can we tell that was intended to be an + // ASCII class and not just a normal nested class? + // + // Reasonable people can probably disagree over this, but for better + // or worse, we implement semantics that never fails at the expense + // of better failure modes. + assert_eq!(self.char(), '['); + // If parsing fails, then we back up the parser to this starting point. + let start = self.pos(); + let mut negated = false; + if !self.bump() || self.char() != ':' { + self.parser().pos.set(start); + return None; + } + if !self.bump() { + self.parser().pos.set(start); + return None; + } + if self.char() == '^' { + negated = true; + if !self.bump() { + self.parser().pos.set(start); + return None; + } + } + let name_start = self.offset(); + while self.char() != ':' && self.bump() {} + if self.is_eof() { + self.parser().pos.set(start); + return None; + } + let name = &self.pattern()[name_start..self.offset()]; + if !self.bump_if(":]") { + self.parser().pos.set(start); + return None; + } + let kind = match ast::ClassAsciiKind::from_name(name) { + Some(kind) => kind, + None => { + self.parser().pos.set(start); + return None; + } + }; + Some(ast::ClassAscii { + span: Span::new(start, self.pos()), + kind, + negated, + }) + } + + /// Parse a Unicode class in either the single character notation, `\pN` + /// or the multi-character bracketed notation, `\p{Greek}`. This assumes + /// the parser is positioned at the `p` (or `P` for negation) and will + /// advance the parser to the character immediately following the class. + /// + /// Note that this does not check whether the class name is valid or not. + #[inline(never)] + fn parse_unicode_class(&self) -> Result { + assert!(self.char() == 'p' || self.char() == 'P'); + + let mut scratch = self.parser().scratch.borrow_mut(); + scratch.clear(); + + let negated = self.char() == 'P'; + if !self.bump_and_bump_space() { + return Err( + self.error(self.span(), ast::ErrorKind::EscapeUnexpectedEof) + ); + } + let (start, kind) = if self.char() == '{' { + let start = self.span_char().end; + while self.bump_and_bump_space() && self.char() != '}' { + scratch.push(self.char()); + } + if self.is_eof() { + return Err(self + .error(self.span(), ast::ErrorKind::EscapeUnexpectedEof)); + } + assert_eq!(self.char(), '}'); + self.bump(); + + let name = scratch.as_str(); + if let Some(i) = name.find("!=") { + ( + start, + ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::NotEqual, + name: name[..i].to_string(), + value: name[i + 2..].to_string(), + }, + ) + } else if let Some(i) = name.find(':') { + ( + start, + ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::Colon, + name: name[..i].to_string(), + value: name[i + 1..].to_string(), + }, + ) + } else if let Some(i) = name.find('=') { + ( + start, + ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::Equal, + name: name[..i].to_string(), + value: name[i + 1..].to_string(), + }, + ) + } else { + (start, ast::ClassUnicodeKind::Named(name.to_string())) + } + } else { + let start = self.pos(); + let c = self.char(); + if c == '\\' { + return Err(self.error( + self.span_char(), + ast::ErrorKind::UnicodeClassInvalid, + )); + } + self.bump_and_bump_space(); + let kind = ast::ClassUnicodeKind::OneLetter(c); + (start, kind) + }; + Ok(ast::ClassUnicode { + span: Span::new(start, self.pos()), + negated, + kind, + }) + } + + /// Parse a Perl character class, e.g., `\d` or `\W`. This assumes the + /// parser is currently at a valid character class name and will be + /// advanced to the character immediately following the class. + #[inline(never)] + fn parse_perl_class(&self) -> ast::ClassPerl { + let c = self.char(); + let span = self.span_char(); + self.bump(); + let (negated, kind) = match c { + 'd' => (false, ast::ClassPerlKind::Digit), + 'D' => (true, ast::ClassPerlKind::Digit), + 's' => (false, ast::ClassPerlKind::Space), + 'S' => (true, ast::ClassPerlKind::Space), + 'w' => (false, ast::ClassPerlKind::Word), + 'W' => (true, ast::ClassPerlKind::Word), + c => panic!("expected valid Perl class but got '{c}'"), + }; + ast::ClassPerl { span, kind, negated } + } +} + +/// A type that traverses a fully parsed Ast and checks whether its depth +/// exceeds the specified nesting limit. If it does, then an error is returned. +#[derive(Debug)] +struct NestLimiter<'p, 's, P> { + /// The parser that is checking the nest limit. + p: &'p ParserI<'s, P>, + /// The current depth while walking an Ast. + depth: u32, +} + +impl<'p, 's, P: Borrow> NestLimiter<'p, 's, P> { + fn new(p: &'p ParserI<'s, P>) -> NestLimiter<'p, 's, P> { + NestLimiter { p, depth: 0 } + } + + #[inline(never)] + fn check(self, ast: &Ast) -> Result<()> { + ast::visit(ast, self) + } + + fn increment_depth(&mut self, span: &Span) -> Result<()> { + let new = self.depth.checked_add(1).ok_or_else(|| { + self.p.error( + span.clone(), + ast::ErrorKind::NestLimitExceeded(u32::MAX), + ) + })?; + let limit = self.p.parser().nest_limit; + if new > limit { + return Err(self.p.error( + span.clone(), + ast::ErrorKind::NestLimitExceeded(limit), + )); + } + self.depth = new; + Ok(()) + } + + fn decrement_depth(&mut self) { + // Assuming the correctness of the visitor, this should never drop + // below 0. + self.depth = self.depth.checked_sub(1).unwrap(); + } +} + +impl<'p, 's, P: Borrow> ast::Visitor for NestLimiter<'p, 's, P> { + type Output = (); + type Err = ast::Error; + + fn finish(self) -> Result<()> { + Ok(()) + } + + fn visit_pre(&mut self, ast: &Ast) -> Result<()> { + let span = match *ast { + Ast::Empty(_) + | Ast::Flags(_) + | Ast::Literal(_) + | Ast::Dot(_) + | Ast::Assertion(_) + | Ast::ClassUnicode(_) + | Ast::ClassPerl(_) => { + // These are all base cases, so we don't increment depth. + return Ok(()); + } + Ast::ClassBracketed(ref x) => &x.span, + Ast::Repetition(ref x) => &x.span, + Ast::Group(ref x) => &x.span, + Ast::Alternation(ref x) => &x.span, + Ast::Concat(ref x) => &x.span, + }; + self.increment_depth(span) + } + + fn visit_post(&mut self, ast: &Ast) -> Result<()> { + match *ast { + Ast::Empty(_) + | Ast::Flags(_) + | Ast::Literal(_) + | Ast::Dot(_) + | Ast::Assertion(_) + | Ast::ClassUnicode(_) + | Ast::ClassPerl(_) => { + // These are all base cases, so we don't decrement depth. + Ok(()) + } + Ast::ClassBracketed(_) + | Ast::Repetition(_) + | Ast::Group(_) + | Ast::Alternation(_) + | Ast::Concat(_) => { + self.decrement_depth(); + Ok(()) + } + } + } + + fn visit_class_set_item_pre( + &mut self, + ast: &ast::ClassSetItem, + ) -> Result<()> { + let span = match *ast { + ast::ClassSetItem::Empty(_) + | ast::ClassSetItem::Literal(_) + | ast::ClassSetItem::Range(_) + | ast::ClassSetItem::Ascii(_) + | ast::ClassSetItem::Unicode(_) + | ast::ClassSetItem::Perl(_) => { + // These are all base cases, so we don't increment depth. + return Ok(()); + } + ast::ClassSetItem::Bracketed(ref x) => &x.span, + ast::ClassSetItem::Union(ref x) => &x.span, + }; + self.increment_depth(span) + } + + fn visit_class_set_item_post( + &mut self, + ast: &ast::ClassSetItem, + ) -> Result<()> { + match *ast { + ast::ClassSetItem::Empty(_) + | ast::ClassSetItem::Literal(_) + | ast::ClassSetItem::Range(_) + | ast::ClassSetItem::Ascii(_) + | ast::ClassSetItem::Unicode(_) + | ast::ClassSetItem::Perl(_) => { + // These are all base cases, so we don't decrement depth. + Ok(()) + } + ast::ClassSetItem::Bracketed(_) | ast::ClassSetItem::Union(_) => { + self.decrement_depth(); + Ok(()) + } + } + } + + fn visit_class_set_binary_op_pre( + &mut self, + ast: &ast::ClassSetBinaryOp, + ) -> Result<()> { + self.increment_depth(&ast.span) + } + + fn visit_class_set_binary_op_post( + &mut self, + _ast: &ast::ClassSetBinaryOp, + ) -> Result<()> { + self.decrement_depth(); + Ok(()) + } +} + +/// When the result is an error, transforms the ast::ErrorKind from the source +/// Result into another one. This function is used to return clearer error +/// messages when possible. +fn specialize_err( + result: Result, + from: ast::ErrorKind, + to: ast::ErrorKind, +) -> Result { + if let Err(e) = result { + if e.kind == from { + Err(ast::Error { kind: to, pattern: e.pattern, span: e.span }) + } else { + Err(e) + } + } else { + result + } +} + +#[cfg(test)] +mod tests { + use core::ops::Range; + + use alloc::format; + + use super::*; + + // Our own assert_eq, which has slightly better formatting (but honestly + // still kind of crappy). + macro_rules! assert_eq { + ($left:expr, $right:expr) => {{ + match (&$left, &$right) { + (left_val, right_val) => { + if !(*left_val == *right_val) { + panic!( + "assertion failed: `(left == right)`\n\n\ + left: `{:?}`\nright: `{:?}`\n\n", + left_val, right_val + ) + } + } + } + }}; + } + + // We create these errors to compare with real ast::Errors in the tests. + // We define equality between TestError and ast::Error to disregard the + // pattern string in ast::Error, which is annoying to provide in tests. + #[derive(Clone, Debug)] + struct TestError { + span: Span, + kind: ast::ErrorKind, + } + + impl PartialEq for TestError { + fn eq(&self, other: &ast::Error) -> bool { + self.span == other.span && self.kind == other.kind + } + } + + impl PartialEq for ast::Error { + fn eq(&self, other: &TestError) -> bool { + self.span == other.span && self.kind == other.kind + } + } + + fn s(str: &str) -> String { + str.to_string() + } + + fn parser(pattern: &str) -> ParserI<'_, Parser> { + ParserI::new(Parser::new(), pattern) + } + + fn parser_octal(pattern: &str) -> ParserI<'_, Parser> { + let parser = ParserBuilder::new().octal(true).build(); + ParserI::new(parser, pattern) + } + + fn parser_empty_min_range(pattern: &str) -> ParserI<'_, Parser> { + let parser = ParserBuilder::new().empty_min_range(true).build(); + ParserI::new(parser, pattern) + } + + fn parser_nest_limit( + pattern: &str, + nest_limit: u32, + ) -> ParserI<'_, Parser> { + let p = ParserBuilder::new().nest_limit(nest_limit).build(); + ParserI::new(p, pattern) + } + + fn parser_ignore_whitespace(pattern: &str) -> ParserI<'_, Parser> { + let p = ParserBuilder::new().ignore_whitespace(true).build(); + ParserI::new(p, pattern) + } + + /// Short alias for creating a new span. + fn nspan(start: Position, end: Position) -> Span { + Span::new(start, end) + } + + /// Short alias for creating a new position. + fn npos(offset: usize, line: usize, column: usize) -> Position { + Position::new(offset, line, column) + } + + /// Create a new span from the given offset range. This assumes a single + /// line and sets the columns based on the offsets. i.e., This only works + /// out of the box for ASCII, which is fine for most tests. + fn span(range: Range) -> Span { + let start = Position::new(range.start, 1, range.start + 1); + let end = Position::new(range.end, 1, range.end + 1); + Span::new(start, end) + } + + /// Create a new span for the corresponding byte range in the given string. + fn span_range(subject: &str, range: Range) -> Span { + let start = Position { + offset: range.start, + line: 1 + subject[..range.start].matches('\n').count(), + column: 1 + subject[..range.start] + .chars() + .rev() + .position(|c| c == '\n') + .unwrap_or(subject[..range.start].chars().count()), + }; + let end = Position { + offset: range.end, + line: 1 + subject[..range.end].matches('\n').count(), + column: 1 + subject[..range.end] + .chars() + .rev() + .position(|c| c == '\n') + .unwrap_or(subject[..range.end].chars().count()), + }; + Span::new(start, end) + } + + /// Create a verbatim literal starting at the given position. + fn lit(c: char, start: usize) -> Ast { + lit_with(c, span(start..start + c.len_utf8())) + } + + /// Create a meta literal starting at the given position. + fn meta_lit(c: char, span: Span) -> Ast { + Ast::literal(ast::Literal { span, kind: ast::LiteralKind::Meta, c }) + } + + /// Create a verbatim literal with the given span. + fn lit_with(c: char, span: Span) -> Ast { + Ast::literal(ast::Literal { + span, + kind: ast::LiteralKind::Verbatim, + c, + }) + } + + /// Create a concatenation with the given range. + fn concat(range: Range, asts: Vec) -> Ast { + concat_with(span(range), asts) + } + + /// Create a concatenation with the given span. + fn concat_with(span: Span, asts: Vec) -> Ast { + Ast::concat(ast::Concat { span, asts }) + } + + /// Create an alternation with the given span. + fn alt(range: Range, asts: Vec) -> Ast { + Ast::alternation(ast::Alternation { span: span(range), asts }) + } + + /// Create a capturing group with the given span. + fn group(range: Range, index: u32, ast: Ast) -> Ast { + Ast::group(ast::Group { + span: span(range), + kind: ast::GroupKind::CaptureIndex(index), + ast: Box::new(ast), + }) + } + + /// Create an ast::SetFlags. + /// + /// The given pattern should be the full pattern string. The range given + /// should correspond to the byte offsets where the flag set occurs. + /// + /// If negated is true, then the set is interpreted as beginning with a + /// negation. + fn flag_set( + pat: &str, + range: Range, + flag: ast::Flag, + negated: bool, + ) -> Ast { + let mut items = vec![ast::FlagsItem { + span: span_range(pat, (range.end - 2)..(range.end - 1)), + kind: ast::FlagsItemKind::Flag(flag), + }]; + if negated { + items.insert( + 0, + ast::FlagsItem { + span: span_range(pat, (range.start + 2)..(range.end - 2)), + kind: ast::FlagsItemKind::Negation, + }, + ); + } + Ast::flags(ast::SetFlags { + span: span_range(pat, range.clone()), + flags: ast::Flags { + span: span_range(pat, (range.start + 2)..(range.end - 1)), + items, + }, + }) + } + + #[test] + fn parse_nest_limit() { + // A nest limit of 0 still allows some types of regexes. + assert_eq!( + parser_nest_limit("", 0).parse(), + Ok(Ast::empty(span(0..0))) + ); + assert_eq!(parser_nest_limit("a", 0).parse(), Ok(lit('a', 0))); + + // Test repetition operations, which require one level of nesting. + assert_eq!( + parser_nest_limit("a+", 0).parse().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::NestLimitExceeded(0), + } + ); + assert_eq!( + parser_nest_limit("a+", 1).parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..2), + op: ast::RepetitionOp { + span: span(1..2), + kind: ast::RepetitionKind::OneOrMore, + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser_nest_limit("(a)+", 1).parse().unwrap_err(), + TestError { + span: span(0..3), + kind: ast::ErrorKind::NestLimitExceeded(1), + } + ); + assert_eq!( + parser_nest_limit("a+*", 1).parse().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::NestLimitExceeded(1), + } + ); + assert_eq!( + parser_nest_limit("a+*", 2).parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..3), + op: ast::RepetitionOp { + span: span(2..3), + kind: ast::RepetitionKind::ZeroOrMore, + }, + greedy: true, + ast: Box::new(Ast::repetition(ast::Repetition { + span: span(0..2), + op: ast::RepetitionOp { + span: span(1..2), + kind: ast::RepetitionKind::OneOrMore, + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })), + })) + ); + + // Test concatenations. A concatenation requires one level of nesting. + assert_eq!( + parser_nest_limit("ab", 0).parse().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::NestLimitExceeded(0), + } + ); + assert_eq!( + parser_nest_limit("ab", 1).parse(), + Ok(concat(0..2, vec![lit('a', 0), lit('b', 1)])) + ); + assert_eq!( + parser_nest_limit("abc", 1).parse(), + Ok(concat(0..3, vec![lit('a', 0), lit('b', 1), lit('c', 2)])) + ); + + // Test alternations. An alternation requires one level of nesting. + assert_eq!( + parser_nest_limit("a|b", 0).parse().unwrap_err(), + TestError { + span: span(0..3), + kind: ast::ErrorKind::NestLimitExceeded(0), + } + ); + assert_eq!( + parser_nest_limit("a|b", 1).parse(), + Ok(alt(0..3, vec![lit('a', 0), lit('b', 2)])) + ); + assert_eq!( + parser_nest_limit("a|b|c", 1).parse(), + Ok(alt(0..5, vec![lit('a', 0), lit('b', 2), lit('c', 4)])) + ); + + // Test character classes. Classes form their own mini-recursive + // syntax! + assert_eq!( + parser_nest_limit("[a]", 0).parse().unwrap_err(), + TestError { + span: span(0..3), + kind: ast::ErrorKind::NestLimitExceeded(0), + } + ); + assert_eq!( + parser_nest_limit("[a]", 1).parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..3), + negated: false, + kind: ast::ClassSet::Item(ast::ClassSetItem::Literal( + ast::Literal { + span: span(1..2), + kind: ast::LiteralKind::Verbatim, + c: 'a', + } + )), + })) + ); + assert_eq!( + parser_nest_limit("[ab]", 1).parse().unwrap_err(), + TestError { + span: span(1..3), + kind: ast::ErrorKind::NestLimitExceeded(1), + } + ); + assert_eq!( + parser_nest_limit("[ab[cd]]", 2).parse().unwrap_err(), + TestError { + span: span(3..7), + kind: ast::ErrorKind::NestLimitExceeded(2), + } + ); + assert_eq!( + parser_nest_limit("[ab[cd]]", 3).parse().unwrap_err(), + TestError { + span: span(4..6), + kind: ast::ErrorKind::NestLimitExceeded(3), + } + ); + assert_eq!( + parser_nest_limit("[a--b]", 1).parse().unwrap_err(), + TestError { + span: span(1..5), + kind: ast::ErrorKind::NestLimitExceeded(1), + } + ); + assert_eq!( + parser_nest_limit("[a--bc]", 2).parse().unwrap_err(), + TestError { + span: span(4..6), + kind: ast::ErrorKind::NestLimitExceeded(2), + } + ); + } + + #[test] + fn parse_comments() { + let pat = "(?x) +# This is comment 1. +foo # This is comment 2. + # This is comment 3. +bar +# This is comment 4."; + let astc = parser(pat).parse_with_comments().unwrap(); + assert_eq!( + astc.ast, + concat_with( + span_range(pat, 0..pat.len()), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + lit_with('f', span_range(pat, 26..27)), + lit_with('o', span_range(pat, 27..28)), + lit_with('o', span_range(pat, 28..29)), + lit_with('b', span_range(pat, 74..75)), + lit_with('a', span_range(pat, 75..76)), + lit_with('r', span_range(pat, 76..77)), + ] + ) + ); + assert_eq!( + astc.comments, + vec![ + ast::Comment { + span: span_range(pat, 5..26), + comment: s(" This is comment 1."), + }, + ast::Comment { + span: span_range(pat, 30..51), + comment: s(" This is comment 2."), + }, + ast::Comment { + span: span_range(pat, 53..74), + comment: s(" This is comment 3."), + }, + ast::Comment { + span: span_range(pat, 78..98), + comment: s(" This is comment 4."), + }, + ] + ); + } + + #[test] + fn parse_holistic() { + assert_eq!(parser("]").parse(), Ok(lit(']', 0))); + assert_eq!( + parser(r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#\&\-\~").parse(), + Ok(concat( + 0..36, + vec![ + meta_lit('\\', span(0..2)), + meta_lit('.', span(2..4)), + meta_lit('+', span(4..6)), + meta_lit('*', span(6..8)), + meta_lit('?', span(8..10)), + meta_lit('(', span(10..12)), + meta_lit(')', span(12..14)), + meta_lit('|', span(14..16)), + meta_lit('[', span(16..18)), + meta_lit(']', span(18..20)), + meta_lit('{', span(20..22)), + meta_lit('}', span(22..24)), + meta_lit('^', span(24..26)), + meta_lit('$', span(26..28)), + meta_lit('#', span(28..30)), + meta_lit('&', span(30..32)), + meta_lit('-', span(32..34)), + meta_lit('~', span(34..36)), + ] + )) + ); + } + + #[test] + fn parse_ignore_whitespace() { + // Test that basic whitespace insensitivity works. + let pat = "(?x)a b"; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + nspan(npos(0, 1, 1), npos(7, 1, 8)), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + lit_with('a', nspan(npos(4, 1, 5), npos(5, 1, 6))), + lit_with('b', nspan(npos(6, 1, 7), npos(7, 1, 8))), + ] + )) + ); + + // Test that we can toggle whitespace insensitivity. + let pat = "(?x)a b(?-x)a b"; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + nspan(npos(0, 1, 1), npos(15, 1, 16)), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + lit_with('a', nspan(npos(4, 1, 5), npos(5, 1, 6))), + lit_with('b', nspan(npos(6, 1, 7), npos(7, 1, 8))), + flag_set(pat, 7..12, ast::Flag::IgnoreWhitespace, true), + lit_with('a', nspan(npos(12, 1, 13), npos(13, 1, 14))), + lit_with(' ', nspan(npos(13, 1, 14), npos(14, 1, 15))), + lit_with('b', nspan(npos(14, 1, 15), npos(15, 1, 16))), + ] + )) + ); + + // Test that nesting whitespace insensitive flags works. + let pat = "a (?x:a )a "; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..11), + vec![ + lit_with('a', span_range(pat, 0..1)), + lit_with(' ', span_range(pat, 1..2)), + Ast::group(ast::Group { + span: span_range(pat, 2..9), + kind: ast::GroupKind::NonCapturing(ast::Flags { + span: span_range(pat, 4..5), + items: vec![ast::FlagsItem { + span: span_range(pat, 4..5), + kind: ast::FlagsItemKind::Flag( + ast::Flag::IgnoreWhitespace + ), + },], + }), + ast: Box::new(lit_with('a', span_range(pat, 6..7))), + }), + lit_with('a', span_range(pat, 9..10)), + lit_with(' ', span_range(pat, 10..11)), + ] + )) + ); + + // Test that whitespace after an opening paren is insignificant. + let pat = "(?x)( ?P a )"; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..pat.len()), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + Ast::group(ast::Group { + span: span_range(pat, 4..pat.len()), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: span_range(pat, 9..12), + name: s("foo"), + index: 1, + } + }, + ast: Box::new(lit_with('a', span_range(pat, 14..15))), + }), + ] + )) + ); + let pat = "(?x)( a )"; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..pat.len()), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + Ast::group(ast::Group { + span: span_range(pat, 4..pat.len()), + kind: ast::GroupKind::CaptureIndex(1), + ast: Box::new(lit_with('a', span_range(pat, 7..8))), + }), + ] + )) + ); + let pat = "(?x)( ?: a )"; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..pat.len()), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + Ast::group(ast::Group { + span: span_range(pat, 4..pat.len()), + kind: ast::GroupKind::NonCapturing(ast::Flags { + span: span_range(pat, 8..8), + items: vec![], + }), + ast: Box::new(lit_with('a', span_range(pat, 11..12))), + }), + ] + )) + ); + let pat = r"(?x)\x { 53 }"; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..pat.len()), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + Ast::literal(ast::Literal { + span: span(4..13), + kind: ast::LiteralKind::HexBrace( + ast::HexLiteralKind::X + ), + c: 'S', + }), + ] + )) + ); + + // Test that whitespace after an escape is OK. + let pat = r"(?x)\ "; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..pat.len()), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + Ast::literal(ast::Literal { + span: span_range(pat, 4..6), + kind: ast::LiteralKind::Superfluous, + c: ' ', + }), + ] + )) + ); + } + + #[test] + fn parse_newlines() { + let pat = ".\n."; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..3), + vec![ + Ast::dot(span_range(pat, 0..1)), + lit_with('\n', span_range(pat, 1..2)), + Ast::dot(span_range(pat, 2..3)), + ] + )) + ); + + let pat = "foobar\nbaz\nquux\n"; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..pat.len()), + vec![ + lit_with('f', nspan(npos(0, 1, 1), npos(1, 1, 2))), + lit_with('o', nspan(npos(1, 1, 2), npos(2, 1, 3))), + lit_with('o', nspan(npos(2, 1, 3), npos(3, 1, 4))), + lit_with('b', nspan(npos(3, 1, 4), npos(4, 1, 5))), + lit_with('a', nspan(npos(4, 1, 5), npos(5, 1, 6))), + lit_with('r', nspan(npos(5, 1, 6), npos(6, 1, 7))), + lit_with('\n', nspan(npos(6, 1, 7), npos(7, 2, 1))), + lit_with('b', nspan(npos(7, 2, 1), npos(8, 2, 2))), + lit_with('a', nspan(npos(8, 2, 2), npos(9, 2, 3))), + lit_with('z', nspan(npos(9, 2, 3), npos(10, 2, 4))), + lit_with('\n', nspan(npos(10, 2, 4), npos(11, 3, 1))), + lit_with('q', nspan(npos(11, 3, 1), npos(12, 3, 2))), + lit_with('u', nspan(npos(12, 3, 2), npos(13, 3, 3))), + lit_with('u', nspan(npos(13, 3, 3), npos(14, 3, 4))), + lit_with('x', nspan(npos(14, 3, 4), npos(15, 3, 5))), + lit_with('\n', nspan(npos(15, 3, 5), npos(16, 4, 1))), + ] + )) + ); + } + + #[test] + fn parse_uncounted_repetition() { + assert_eq!( + parser(r"a*").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..2), + op: ast::RepetitionOp { + span: span(1..2), + kind: ast::RepetitionKind::ZeroOrMore, + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a+").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..2), + op: ast::RepetitionOp { + span: span(1..2), + kind: ast::RepetitionKind::OneOrMore, + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + + assert_eq!( + parser(r"a?").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..2), + op: ast::RepetitionOp { + span: span(1..2), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a??").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..3), + op: ast::RepetitionOp { + span: span(1..3), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: false, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a?").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..2), + op: ast::RepetitionOp { + span: span(1..2), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a?b").parse(), + Ok(concat( + 0..3, + vec![ + Ast::repetition(ast::Repetition { + span: span(0..2), + op: ast::RepetitionOp { + span: span(1..2), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: true, + ast: Box::new(lit('a', 0)), + }), + lit('b', 2), + ] + )) + ); + assert_eq!( + parser(r"a??b").parse(), + Ok(concat( + 0..4, + vec![ + Ast::repetition(ast::Repetition { + span: span(0..3), + op: ast::RepetitionOp { + span: span(1..3), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: false, + ast: Box::new(lit('a', 0)), + }), + lit('b', 3), + ] + )) + ); + assert_eq!( + parser(r"ab?").parse(), + Ok(concat( + 0..3, + vec![ + lit('a', 0), + Ast::repetition(ast::Repetition { + span: span(1..3), + op: ast::RepetitionOp { + span: span(2..3), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: true, + ast: Box::new(lit('b', 1)), + }), + ] + )) + ); + assert_eq!( + parser(r"(ab)?").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..5), + op: ast::RepetitionOp { + span: span(4..5), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: true, + ast: Box::new(group( + 0..4, + 1, + concat(1..3, vec![lit('a', 1), lit('b', 2),]) + )), + })) + ); + assert_eq!( + parser(r"|a?").parse(), + Ok(alt( + 0..3, + vec![ + Ast::empty(span(0..0)), + Ast::repetition(ast::Repetition { + span: span(1..3), + op: ast::RepetitionOp { + span: span(2..3), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: true, + ast: Box::new(lit('a', 1)), + }), + ] + )) + ); + + assert_eq!( + parser(r"*").parse().unwrap_err(), + TestError { + span: span(0..0), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"(?i)*").parse().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"(*)").parse().unwrap_err(), + TestError { + span: span(1..1), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"(?:?)").parse().unwrap_err(), + TestError { + span: span(3..3), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"+").parse().unwrap_err(), + TestError { + span: span(0..0), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"?").parse().unwrap_err(), + TestError { + span: span(0..0), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"(?)").parse().unwrap_err(), + TestError { + span: span(1..1), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"|*").parse().unwrap_err(), + TestError { + span: span(1..1), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"|+").parse().unwrap_err(), + TestError { + span: span(1..1), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"|?").parse().unwrap_err(), + TestError { + span: span(1..1), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + } + + #[test] + fn parse_counted_repetition() { + assert_eq!( + parser(r"a{5}").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..4), + op: ast::RepetitionOp { + span: span(1..4), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Exactly(5) + ), + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a{5,}").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..5), + op: ast::RepetitionOp { + span: span(1..5), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::AtLeast(5) + ), + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a{5,9}").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..6), + op: ast::RepetitionOp { + span: span(1..6), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Bounded(5, 9) + ), + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a{5}?").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..5), + op: ast::RepetitionOp { + span: span(1..5), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Exactly(5) + ), + }, + greedy: false, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"ab{5}").parse(), + Ok(concat( + 0..5, + vec![ + lit('a', 0), + Ast::repetition(ast::Repetition { + span: span(1..5), + op: ast::RepetitionOp { + span: span(2..5), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Exactly(5) + ), + }, + greedy: true, + ast: Box::new(lit('b', 1)), + }), + ] + )) + ); + assert_eq!( + parser(r"ab{5}c").parse(), + Ok(concat( + 0..6, + vec![ + lit('a', 0), + Ast::repetition(ast::Repetition { + span: span(1..5), + op: ast::RepetitionOp { + span: span(2..5), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Exactly(5) + ), + }, + greedy: true, + ast: Box::new(lit('b', 1)), + }), + lit('c', 5), + ] + )) + ); + + assert_eq!( + parser(r"a{ 5 }").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..6), + op: ast::RepetitionOp { + span: span(1..6), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Exactly(5) + ), + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a{ 5 , 9 }").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..10), + op: ast::RepetitionOp { + span: span(1..10), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Bounded(5, 9) + ), + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser_empty_min_range(r"a{,9}").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..5), + op: ast::RepetitionOp { + span: span(1..5), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Bounded(0, 9) + ), + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser_ignore_whitespace(r"a{5,9} ?").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..8), + op: ast::RepetitionOp { + span: span(1..8), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Bounded(5, 9) + ), + }, + greedy: false, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"\b{5,9}").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..7), + op: ast::RepetitionOp { + span: span(2..7), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Bounded(5, 9) + ), + }, + greedy: true, + ast: Box::new(Ast::assertion(ast::Assertion { + span: span(0..2), + kind: ast::AssertionKind::WordBoundary, + })), + })) + ); + + assert_eq!( + parser(r"(?i){0}").parse().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"(?m){1,1}").parse().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"a{]}").parse().unwrap_err(), + TestError { + span: span(2..2), + kind: ast::ErrorKind::RepetitionCountDecimalEmpty, + } + ); + assert_eq!( + parser(r"a{1,]}").parse().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::RepetitionCountDecimalEmpty, + } + ); + assert_eq!( + parser(r"a{").parse().unwrap_err(), + TestError { + span: span(1..2), + kind: ast::ErrorKind::RepetitionCountUnclosed, + } + ); + assert_eq!( + parser(r"a{}").parse().unwrap_err(), + TestError { + span: span(2..2), + kind: ast::ErrorKind::RepetitionCountDecimalEmpty, + } + ); + assert_eq!( + parser(r"a{a").parse().unwrap_err(), + TestError { + span: span(2..2), + kind: ast::ErrorKind::RepetitionCountDecimalEmpty, + } + ); + assert_eq!( + parser(r"a{9999999999}").parse().unwrap_err(), + TestError { + span: span(2..12), + kind: ast::ErrorKind::DecimalInvalid, + } + ); + assert_eq!( + parser(r"a{9").parse().unwrap_err(), + TestError { + span: span(1..3), + kind: ast::ErrorKind::RepetitionCountUnclosed, + } + ); + assert_eq!( + parser(r"a{9,a").parse().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::RepetitionCountDecimalEmpty, + } + ); + assert_eq!( + parser(r"a{9,9999999999}").parse().unwrap_err(), + TestError { + span: span(4..14), + kind: ast::ErrorKind::DecimalInvalid, + } + ); + assert_eq!( + parser(r"a{9,").parse().unwrap_err(), + TestError { + span: span(1..4), + kind: ast::ErrorKind::RepetitionCountUnclosed, + } + ); + assert_eq!( + parser(r"a{9,11").parse().unwrap_err(), + TestError { + span: span(1..6), + kind: ast::ErrorKind::RepetitionCountUnclosed, + } + ); + assert_eq!( + parser(r"a{2,1}").parse().unwrap_err(), + TestError { + span: span(1..6), + kind: ast::ErrorKind::RepetitionCountInvalid, + } + ); + assert_eq!( + parser(r"{5}").parse().unwrap_err(), + TestError { + span: span(0..0), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"|{5}").parse().unwrap_err(), + TestError { + span: span(1..1), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + } + + #[test] + fn parse_alternate() { + assert_eq!( + parser(r"a|b").parse(), + Ok(Ast::alternation(ast::Alternation { + span: span(0..3), + asts: vec![lit('a', 0), lit('b', 2)], + })) + ); + assert_eq!( + parser(r"(a|b)").parse(), + Ok(group( + 0..5, + 1, + Ast::alternation(ast::Alternation { + span: span(1..4), + asts: vec![lit('a', 1), lit('b', 3)], + }) + )) + ); + + assert_eq!( + parser(r"a|b|c").parse(), + Ok(Ast::alternation(ast::Alternation { + span: span(0..5), + asts: vec![lit('a', 0), lit('b', 2), lit('c', 4)], + })) + ); + assert_eq!( + parser(r"ax|by|cz").parse(), + Ok(Ast::alternation(ast::Alternation { + span: span(0..8), + asts: vec![ + concat(0..2, vec![lit('a', 0), lit('x', 1)]), + concat(3..5, vec![lit('b', 3), lit('y', 4)]), + concat(6..8, vec![lit('c', 6), lit('z', 7)]), + ], + })) + ); + assert_eq!( + parser(r"(ax|by|cz)").parse(), + Ok(group( + 0..10, + 1, + Ast::alternation(ast::Alternation { + span: span(1..9), + asts: vec![ + concat(1..3, vec![lit('a', 1), lit('x', 2)]), + concat(4..6, vec![lit('b', 4), lit('y', 5)]), + concat(7..9, vec![lit('c', 7), lit('z', 8)]), + ], + }) + )) + ); + assert_eq!( + parser(r"(ax|(by|(cz)))").parse(), + Ok(group( + 0..14, + 1, + alt( + 1..13, + vec![ + concat(1..3, vec![lit('a', 1), lit('x', 2)]), + group( + 4..13, + 2, + alt( + 5..12, + vec![ + concat( + 5..7, + vec![lit('b', 5), lit('y', 6)] + ), + group( + 8..12, + 3, + concat( + 9..11, + vec![lit('c', 9), lit('z', 10),] + ) + ), + ] + ) + ), + ] + ) + )) + ); + + assert_eq!( + parser(r"|").parse(), + Ok(alt( + 0..1, + vec![Ast::empty(span(0..0)), Ast::empty(span(1..1)),] + )) + ); + assert_eq!( + parser(r"||").parse(), + Ok(alt( + 0..2, + vec![ + Ast::empty(span(0..0)), + Ast::empty(span(1..1)), + Ast::empty(span(2..2)), + ] + )) + ); + assert_eq!( + parser(r"a|").parse(), + Ok(alt(0..2, vec![lit('a', 0), Ast::empty(span(2..2)),])) + ); + assert_eq!( + parser(r"|a").parse(), + Ok(alt(0..2, vec![Ast::empty(span(0..0)), lit('a', 1),])) + ); + + assert_eq!( + parser(r"(|)").parse(), + Ok(group( + 0..3, + 1, + alt( + 1..2, + vec![Ast::empty(span(1..1)), Ast::empty(span(2..2)),] + ) + )) + ); + assert_eq!( + parser(r"(a|)").parse(), + Ok(group( + 0..4, + 1, + alt(1..3, vec![lit('a', 1), Ast::empty(span(3..3)),]) + )) + ); + assert_eq!( + parser(r"(|a)").parse(), + Ok(group( + 0..4, + 1, + alt(1..3, vec![Ast::empty(span(1..1)), lit('a', 2),]) + )) + ); + + assert_eq!( + parser(r"a|b)").parse().unwrap_err(), + TestError { + span: span(3..4), + kind: ast::ErrorKind::GroupUnopened, + } + ); + assert_eq!( + parser(r"(a|b").parse().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::GroupUnclosed, + } + ); + } + + #[test] + fn parse_unsupported_lookaround() { + assert_eq!( + parser(r"(?=a)").parse().unwrap_err(), + TestError { + span: span(0..3), + kind: ast::ErrorKind::UnsupportedLookAround, + } + ); + assert_eq!( + parser(r"(?!a)").parse().unwrap_err(), + TestError { + span: span(0..3), + kind: ast::ErrorKind::UnsupportedLookAround, + } + ); + assert_eq!( + parser(r"(?<=a)").parse().unwrap_err(), + TestError { + span: span(0..4), + kind: ast::ErrorKind::UnsupportedLookAround, + } + ); + assert_eq!( + parser(r"(?z)").parse(), + Ok(Ast::group(ast::Group { + span: span(0..7), + kind: ast::GroupKind::CaptureName { + starts_with_p: false, + name: ast::CaptureName { + span: span(3..4), + name: s("a"), + index: 1, + } + }, + ast: Box::new(lit('z', 5)), + })) + ); + assert_eq!( + parser("(?Pz)").parse(), + Ok(Ast::group(ast::Group { + span: span(0..8), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: span(4..5), + name: s("a"), + index: 1, + } + }, + ast: Box::new(lit('z', 6)), + })) + ); + assert_eq!( + parser("(?Pz)").parse(), + Ok(Ast::group(ast::Group { + span: span(0..10), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: span(4..7), + name: s("abc"), + index: 1, + } + }, + ast: Box::new(lit('z', 8)), + })) + ); + + assert_eq!( + parser("(?Pz)").parse(), + Ok(Ast::group(ast::Group { + span: span(0..10), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: span(4..7), + name: s("a_1"), + index: 1, + } + }, + ast: Box::new(lit('z', 8)), + })) + ); + + assert_eq!( + parser("(?Pz)").parse(), + Ok(Ast::group(ast::Group { + span: span(0..10), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: span(4..7), + name: s("a.1"), + index: 1, + } + }, + ast: Box::new(lit('z', 8)), + })) + ); + + assert_eq!( + parser("(?Pz)").parse(), + Ok(Ast::group(ast::Group { + span: span(0..11), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: span(4..8), + name: s("a[1]"), + index: 1, + } + }, + ast: Box::new(lit('z', 9)), + })) + ); + + assert_eq!( + parser("(?P)").parse(), + Ok(Ast::group(ast::Group { + span: Span::new( + Position::new(0, 1, 1), + Position::new(9, 1, 9), + ), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: Span::new( + Position::new(4, 1, 5), + Position::new(7, 1, 7), + ), + name: s("a¾"), + index: 1, + } + }, + ast: Box::new(Ast::empty(Span::new( + Position::new(8, 1, 8), + Position::new(8, 1, 8), + ))), + })) + ); + assert_eq!( + parser("(?P<名字>)").parse(), + Ok(Ast::group(ast::Group { + span: Span::new( + Position::new(0, 1, 1), + Position::new(12, 1, 9), + ), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: Span::new( + Position::new(4, 1, 5), + Position::new(10, 1, 7), + ), + name: s("名字"), + index: 1, + } + }, + ast: Box::new(Ast::empty(Span::new( + Position::new(11, 1, 8), + Position::new(11, 1, 8), + ))), + })) + ); + + assert_eq!( + parser("(?P<").parse().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::GroupNameUnexpectedEof, + } + ); + assert_eq!( + parser("(?P<>z)").parse().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::GroupNameEmpty, + } + ); + assert_eq!( + parser("(?Py)(?Pz)").parse().unwrap_err(), + TestError { + span: span(12..13), + kind: ast::ErrorKind::GroupNameDuplicate { + original: span(4..5), + }, + } + ); + assert_eq!( + parser("(?P<5>)").parse().unwrap_err(), + TestError { + span: span(4..5), + kind: ast::ErrorKind::GroupNameInvalid, + } + ); + assert_eq!( + parser("(?P<5a>)").parse().unwrap_err(), + TestError { + span: span(4..5), + kind: ast::ErrorKind::GroupNameInvalid, + } + ); + assert_eq!( + parser("(?P<¾>)").parse().unwrap_err(), + TestError { + span: Span::new( + Position::new(4, 1, 5), + Position::new(6, 1, 6), + ), + kind: ast::ErrorKind::GroupNameInvalid, + } + ); + assert_eq!( + parser("(?P<¾a>)").parse().unwrap_err(), + TestError { + span: Span::new( + Position::new(4, 1, 5), + Position::new(6, 1, 6), + ), + kind: ast::ErrorKind::GroupNameInvalid, + } + ); + assert_eq!( + parser("(?P<☃>)").parse().unwrap_err(), + TestError { + span: Span::new( + Position::new(4, 1, 5), + Position::new(7, 1, 6), + ), + kind: ast::ErrorKind::GroupNameInvalid, + } + ); + assert_eq!( + parser("(?P)").parse().unwrap_err(), + TestError { + span: Span::new( + Position::new(5, 1, 6), + Position::new(8, 1, 7), + ), + kind: ast::ErrorKind::GroupNameInvalid, + } + ); + } + + #[test] + fn parse_flags() { + assert_eq!( + parser("i:").parse_flags(), + Ok(ast::Flags { + span: span(0..1), + items: vec![ast::FlagsItem { + span: span(0..1), + kind: ast::FlagsItemKind::Flag(ast::Flag::CaseInsensitive), + }], + }) + ); + assert_eq!( + parser("i)").parse_flags(), + Ok(ast::Flags { + span: span(0..1), + items: vec![ast::FlagsItem { + span: span(0..1), + kind: ast::FlagsItemKind::Flag(ast::Flag::CaseInsensitive), + }], + }) + ); + + assert_eq!( + parser("isU:").parse_flags(), + Ok(ast::Flags { + span: span(0..3), + items: vec![ + ast::FlagsItem { + span: span(0..1), + kind: ast::FlagsItemKind::Flag( + ast::Flag::CaseInsensitive + ), + }, + ast::FlagsItem { + span: span(1..2), + kind: ast::FlagsItemKind::Flag( + ast::Flag::DotMatchesNewLine + ), + }, + ast::FlagsItem { + span: span(2..3), + kind: ast::FlagsItemKind::Flag(ast::Flag::SwapGreed), + }, + ], + }) + ); + + assert_eq!( + parser("-isU:").parse_flags(), + Ok(ast::Flags { + span: span(0..4), + items: vec![ + ast::FlagsItem { + span: span(0..1), + kind: ast::FlagsItemKind::Negation, + }, + ast::FlagsItem { + span: span(1..2), + kind: ast::FlagsItemKind::Flag( + ast::Flag::CaseInsensitive + ), + }, + ast::FlagsItem { + span: span(2..3), + kind: ast::FlagsItemKind::Flag( + ast::Flag::DotMatchesNewLine + ), + }, + ast::FlagsItem { + span: span(3..4), + kind: ast::FlagsItemKind::Flag(ast::Flag::SwapGreed), + }, + ], + }) + ); + assert_eq!( + parser("i-sU:").parse_flags(), + Ok(ast::Flags { + span: span(0..4), + items: vec![ + ast::FlagsItem { + span: span(0..1), + kind: ast::FlagsItemKind::Flag( + ast::Flag::CaseInsensitive + ), + }, + ast::FlagsItem { + span: span(1..2), + kind: ast::FlagsItemKind::Negation, + }, + ast::FlagsItem { + span: span(2..3), + kind: ast::FlagsItemKind::Flag( + ast::Flag::DotMatchesNewLine + ), + }, + ast::FlagsItem { + span: span(3..4), + kind: ast::FlagsItemKind::Flag(ast::Flag::SwapGreed), + }, + ], + }) + ); + assert_eq!( + parser("i-sR:").parse_flags(), + Ok(ast::Flags { + span: span(0..4), + items: vec![ + ast::FlagsItem { + span: span(0..1), + kind: ast::FlagsItemKind::Flag( + ast::Flag::CaseInsensitive + ), + }, + ast::FlagsItem { + span: span(1..2), + kind: ast::FlagsItemKind::Negation, + }, + ast::FlagsItem { + span: span(2..3), + kind: ast::FlagsItemKind::Flag( + ast::Flag::DotMatchesNewLine + ), + }, + ast::FlagsItem { + span: span(3..4), + kind: ast::FlagsItemKind::Flag(ast::Flag::CRLF), + }, + ], + }) + ); + + assert_eq!( + parser("isU").parse_flags().unwrap_err(), + TestError { + span: span(3..3), + kind: ast::ErrorKind::FlagUnexpectedEof, + } + ); + assert_eq!( + parser("isUa:").parse_flags().unwrap_err(), + TestError { + span: span(3..4), + kind: ast::ErrorKind::FlagUnrecognized, + } + ); + assert_eq!( + parser("isUi:").parse_flags().unwrap_err(), + TestError { + span: span(3..4), + kind: ast::ErrorKind::FlagDuplicate { original: span(0..1) }, + } + ); + assert_eq!( + parser("i-sU-i:").parse_flags().unwrap_err(), + TestError { + span: span(4..5), + kind: ast::ErrorKind::FlagRepeatedNegation { + original: span(1..2), + }, + } + ); + assert_eq!( + parser("-)").parse_flags().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::FlagDanglingNegation, + } + ); + assert_eq!( + parser("i-)").parse_flags().unwrap_err(), + TestError { + span: span(1..2), + kind: ast::ErrorKind::FlagDanglingNegation, + } + ); + assert_eq!( + parser("iU-)").parse_flags().unwrap_err(), + TestError { + span: span(2..3), + kind: ast::ErrorKind::FlagDanglingNegation, + } + ); + } + + #[test] + fn parse_flag() { + assert_eq!(parser("i").parse_flag(), Ok(ast::Flag::CaseInsensitive)); + assert_eq!(parser("m").parse_flag(), Ok(ast::Flag::MultiLine)); + assert_eq!(parser("s").parse_flag(), Ok(ast::Flag::DotMatchesNewLine)); + assert_eq!(parser("U").parse_flag(), Ok(ast::Flag::SwapGreed)); + assert_eq!(parser("u").parse_flag(), Ok(ast::Flag::Unicode)); + assert_eq!(parser("R").parse_flag(), Ok(ast::Flag::CRLF)); + assert_eq!(parser("x").parse_flag(), Ok(ast::Flag::IgnoreWhitespace)); + + assert_eq!( + parser("a").parse_flag().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::FlagUnrecognized, + } + ); + assert_eq!( + parser("☃").parse_flag().unwrap_err(), + TestError { + span: span_range("☃", 0..3), + kind: ast::ErrorKind::FlagUnrecognized, + } + ); + } + + #[test] + fn parse_primitive_non_escape() { + assert_eq!( + parser(r".").parse_primitive(), + Ok(Primitive::Dot(span(0..1))) + ); + assert_eq!( + parser(r"^").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..1), + kind: ast::AssertionKind::StartLine, + })) + ); + assert_eq!( + parser(r"$").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..1), + kind: ast::AssertionKind::EndLine, + })) + ); + + assert_eq!( + parser(r"a").parse_primitive(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..1), + kind: ast::LiteralKind::Verbatim, + c: 'a', + })) + ); + assert_eq!( + parser(r"|").parse_primitive(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..1), + kind: ast::LiteralKind::Verbatim, + c: '|', + })) + ); + assert_eq!( + parser(r"☃").parse_primitive(), + Ok(Primitive::Literal(ast::Literal { + span: span_range("☃", 0..3), + kind: ast::LiteralKind::Verbatim, + c: '☃', + })) + ); + } + + #[test] + fn parse_escape() { + assert_eq!( + parser(r"\|").parse_primitive(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..2), + kind: ast::LiteralKind::Meta, + c: '|', + })) + ); + let specials = &[ + (r"\a", '\x07', ast::SpecialLiteralKind::Bell), + (r"\f", '\x0C', ast::SpecialLiteralKind::FormFeed), + (r"\t", '\t', ast::SpecialLiteralKind::Tab), + (r"\n", '\n', ast::SpecialLiteralKind::LineFeed), + (r"\r", '\r', ast::SpecialLiteralKind::CarriageReturn), + (r"\v", '\x0B', ast::SpecialLiteralKind::VerticalTab), + ]; + for &(pat, c, ref kind) in specials { + assert_eq!( + parser(pat).parse_primitive(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..2), + kind: ast::LiteralKind::Special(kind.clone()), + c, + })) + ); + } + assert_eq!( + parser(r"\A").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..2), + kind: ast::AssertionKind::StartText, + })) + ); + assert_eq!( + parser(r"\z").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..2), + kind: ast::AssertionKind::EndText, + })) + ); + assert_eq!( + parser(r"\b").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..2), + kind: ast::AssertionKind::WordBoundary, + })) + ); + assert_eq!( + parser(r"\b{start}").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..9), + kind: ast::AssertionKind::WordBoundaryStart, + })) + ); + assert_eq!( + parser(r"\b{end}").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..7), + kind: ast::AssertionKind::WordBoundaryEnd, + })) + ); + assert_eq!( + parser(r"\b{start-half}").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..14), + kind: ast::AssertionKind::WordBoundaryStartHalf, + })) + ); + assert_eq!( + parser(r"\b{end-half}").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..12), + kind: ast::AssertionKind::WordBoundaryEndHalf, + })) + ); + assert_eq!( + parser(r"\<").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..2), + kind: ast::AssertionKind::WordBoundaryStartAngle, + })) + ); + assert_eq!( + parser(r"\>").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..2), + kind: ast::AssertionKind::WordBoundaryEndAngle, + })) + ); + assert_eq!( + parser(r"\B").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..2), + kind: ast::AssertionKind::NotWordBoundary, + })) + ); + + // We also support superfluous escapes in most cases now too. + for c in ['!', '@', '%', '"', '\'', '/', ' '] { + let pat = format!(r"\{c}"); + assert_eq!( + parser(&pat).parse_primitive(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..2), + kind: ast::LiteralKind::Superfluous, + c, + })) + ); + } + + // Some superfluous escapes, namely [0-9A-Za-z], are still banned. This + // gives flexibility for future evolution. + assert_eq!( + parser(r"\e").parse_escape().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::EscapeUnrecognized, + } + ); + assert_eq!( + parser(r"\y").parse_escape().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::EscapeUnrecognized, + } + ); + + // Starting a special word boundary without any non-whitespace chars + // after the brace makes it ambiguous whether the user meant to write + // a counted repetition (probably not?) or an actual special word + // boundary assertion. + assert_eq!( + parser(r"\b{").parse_escape().unwrap_err(), + TestError { + span: span(0..3), + kind: ast::ErrorKind::SpecialWordOrRepetitionUnexpectedEof, + } + ); + assert_eq!( + parser_ignore_whitespace(r"\b{ ").parse_escape().unwrap_err(), + TestError { + span: span(0..4), + kind: ast::ErrorKind::SpecialWordOrRepetitionUnexpectedEof, + } + ); + // When 'x' is not enabled, the space is seen as a non-[-A-Za-z] char, + // and thus causes the parser to treat it as a counted repetition. + assert_eq!( + parser(r"\b{ ").parse().unwrap_err(), + TestError { + span: span(2..4), + kind: ast::ErrorKind::RepetitionCountUnclosed, + } + ); + // In this case, we got some valid chars that makes it look like the + // user is writing one of the special word boundary assertions, but + // we forget to close the brace. + assert_eq!( + parser(r"\b{foo").parse_escape().unwrap_err(), + TestError { + span: span(2..6), + kind: ast::ErrorKind::SpecialWordBoundaryUnclosed, + } + ); + // We get the same error as above, except it is provoked by seeing a + // char that we know is invalid before seeing a closing brace. + assert_eq!( + parser(r"\b{foo!}").parse_escape().unwrap_err(), + TestError { + span: span(2..6), + kind: ast::ErrorKind::SpecialWordBoundaryUnclosed, + } + ); + // And this one occurs when, syntactically, everything looks okay, but + // we don't use a valid spelling of a word boundary assertion. + assert_eq!( + parser(r"\b{foo}").parse_escape().unwrap_err(), + TestError { + span: span(3..6), + kind: ast::ErrorKind::SpecialWordBoundaryUnrecognized, + } + ); + + // An unfinished escape is illegal. + assert_eq!( + parser(r"\").parse_escape().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + } + + #[test] + fn parse_unsupported_backreference() { + assert_eq!( + parser(r"\0").parse_escape().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::UnsupportedBackreference, + } + ); + assert_eq!( + parser(r"\9").parse_escape().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::UnsupportedBackreference, + } + ); + } + + #[test] + fn parse_octal() { + for i in 0..511 { + let pat = format!(r"\{i:o}"); + assert_eq!( + parser_octal(&pat).parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..pat.len()), + kind: ast::LiteralKind::Octal, + c: char::from_u32(i).unwrap(), + })) + ); + } + assert_eq!( + parser_octal(r"\778").parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..3), + kind: ast::LiteralKind::Octal, + c: '?', + })) + ); + assert_eq!( + parser_octal(r"\7777").parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..4), + kind: ast::LiteralKind::Octal, + c: '\u{01FF}', + })) + ); + assert_eq!( + parser_octal(r"\778").parse(), + Ok(Ast::concat(ast::Concat { + span: span(0..4), + asts: vec![ + Ast::literal(ast::Literal { + span: span(0..3), + kind: ast::LiteralKind::Octal, + c: '?', + }), + Ast::literal(ast::Literal { + span: span(3..4), + kind: ast::LiteralKind::Verbatim, + c: '8', + }), + ], + })) + ); + assert_eq!( + parser_octal(r"\7777").parse(), + Ok(Ast::concat(ast::Concat { + span: span(0..5), + asts: vec![ + Ast::literal(ast::Literal { + span: span(0..4), + kind: ast::LiteralKind::Octal, + c: '\u{01FF}', + }), + Ast::literal(ast::Literal { + span: span(4..5), + kind: ast::LiteralKind::Verbatim, + c: '7', + }), + ], + })) + ); + + assert_eq!( + parser_octal(r"\8").parse_escape().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::EscapeUnrecognized, + } + ); + } + + #[test] + fn parse_hex_two() { + for i in 0..256 { + let pat = format!(r"\x{i:02x}"); + assert_eq!( + parser(&pat).parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..pat.len()), + kind: ast::LiteralKind::HexFixed(ast::HexLiteralKind::X), + c: char::from_u32(i).unwrap(), + })) + ); + } + + assert_eq!( + parser(r"\xF").parse_escape().unwrap_err(), + TestError { + span: span(3..3), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\xG").parse_escape().unwrap_err(), + TestError { + span: span(2..3), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\xFG").parse_escape().unwrap_err(), + TestError { + span: span(3..4), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + } + + #[test] + fn parse_hex_four() { + for i in 0..65536 { + let c = match char::from_u32(i) { + None => continue, + Some(c) => c, + }; + let pat = format!(r"\u{i:04x}"); + assert_eq!( + parser(&pat).parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..pat.len()), + kind: ast::LiteralKind::HexFixed( + ast::HexLiteralKind::UnicodeShort + ), + c, + })) + ); + } + + assert_eq!( + parser(r"\uF").parse_escape().unwrap_err(), + TestError { + span: span(3..3), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\uG").parse_escape().unwrap_err(), + TestError { + span: span(2..3), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\uFG").parse_escape().unwrap_err(), + TestError { + span: span(3..4), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\uFFG").parse_escape().unwrap_err(), + TestError { + span: span(4..5), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\uFFFG").parse_escape().unwrap_err(), + TestError { + span: span(5..6), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\uD800").parse_escape().unwrap_err(), + TestError { + span: span(2..6), + kind: ast::ErrorKind::EscapeHexInvalid, + } + ); + } + + #[test] + fn parse_hex_eight() { + for i in 0..65536 { + let c = match char::from_u32(i) { + None => continue, + Some(c) => c, + }; + let pat = format!(r"\U{i:08x}"); + assert_eq!( + parser(&pat).parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..pat.len()), + kind: ast::LiteralKind::HexFixed( + ast::HexLiteralKind::UnicodeLong + ), + c, + })) + ); + } + + assert_eq!( + parser(r"\UF").parse_escape().unwrap_err(), + TestError { + span: span(3..3), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\UG").parse_escape().unwrap_err(), + TestError { + span: span(2..3), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\UFG").parse_escape().unwrap_err(), + TestError { + span: span(3..4), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\UFFG").parse_escape().unwrap_err(), + TestError { + span: span(4..5), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\UFFFG").parse_escape().unwrap_err(), + TestError { + span: span(5..6), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\UFFFFG").parse_escape().unwrap_err(), + TestError { + span: span(6..7), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\UFFFFFG").parse_escape().unwrap_err(), + TestError { + span: span(7..8), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\UFFFFFFG").parse_escape().unwrap_err(), + TestError { + span: span(8..9), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\UFFFFFFFG").parse_escape().unwrap_err(), + TestError { + span: span(9..10), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + } + + #[test] + fn parse_hex_brace() { + assert_eq!( + parser(r"\u{26c4}").parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..8), + kind: ast::LiteralKind::HexBrace( + ast::HexLiteralKind::UnicodeShort + ), + c: '⛄', + })) + ); + assert_eq!( + parser(r"\U{26c4}").parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..8), + kind: ast::LiteralKind::HexBrace( + ast::HexLiteralKind::UnicodeLong + ), + c: '⛄', + })) + ); + assert_eq!( + parser(r"\x{26c4}").parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..8), + kind: ast::LiteralKind::HexBrace(ast::HexLiteralKind::X), + c: '⛄', + })) + ); + assert_eq!( + parser(r"\x{26C4}").parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..8), + kind: ast::LiteralKind::HexBrace(ast::HexLiteralKind::X), + c: '⛄', + })) + ); + assert_eq!( + parser(r"\x{10fFfF}").parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..10), + kind: ast::LiteralKind::HexBrace(ast::HexLiteralKind::X), + c: '\u{10FFFF}', + })) + ); + + assert_eq!( + parser(r"\x").parse_escape().unwrap_err(), + TestError { + span: span(2..2), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\x{").parse_escape().unwrap_err(), + TestError { + span: span(2..3), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\x{FF").parse_escape().unwrap_err(), + TestError { + span: span(2..5), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\x{}").parse_escape().unwrap_err(), + TestError { + span: span(2..4), + kind: ast::ErrorKind::EscapeHexEmpty, + } + ); + assert_eq!( + parser(r"\x{FGF}").parse_escape().unwrap_err(), + TestError { + span: span(4..5), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\x{FFFFFF}").parse_escape().unwrap_err(), + TestError { + span: span(3..9), + kind: ast::ErrorKind::EscapeHexInvalid, + } + ); + assert_eq!( + parser(r"\x{D800}").parse_escape().unwrap_err(), + TestError { + span: span(3..7), + kind: ast::ErrorKind::EscapeHexInvalid, + } + ); + assert_eq!( + parser(r"\x{FFFFFFFFF}").parse_escape().unwrap_err(), + TestError { + span: span(3..12), + kind: ast::ErrorKind::EscapeHexInvalid, + } + ); + } + + #[test] + fn parse_decimal() { + assert_eq!(parser("123").parse_decimal(), Ok(123)); + assert_eq!(parser("0").parse_decimal(), Ok(0)); + assert_eq!(parser("01").parse_decimal(), Ok(1)); + + assert_eq!( + parser("-1").parse_decimal().unwrap_err(), + TestError { span: span(0..0), kind: ast::ErrorKind::DecimalEmpty } + ); + assert_eq!( + parser("").parse_decimal().unwrap_err(), + TestError { span: span(0..0), kind: ast::ErrorKind::DecimalEmpty } + ); + assert_eq!( + parser("9999999999").parse_decimal().unwrap_err(), + TestError { + span: span(0..10), + kind: ast::ErrorKind::DecimalInvalid, + } + ); + } + + #[test] + fn parse_set_class() { + fn union(span: Span, items: Vec) -> ast::ClassSet { + ast::ClassSet::union(ast::ClassSetUnion { span, items }) + } + + fn intersection( + span: Span, + lhs: ast::ClassSet, + rhs: ast::ClassSet, + ) -> ast::ClassSet { + ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp { + span, + kind: ast::ClassSetBinaryOpKind::Intersection, + lhs: Box::new(lhs), + rhs: Box::new(rhs), + }) + } + + fn difference( + span: Span, + lhs: ast::ClassSet, + rhs: ast::ClassSet, + ) -> ast::ClassSet { + ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp { + span, + kind: ast::ClassSetBinaryOpKind::Difference, + lhs: Box::new(lhs), + rhs: Box::new(rhs), + }) + } + + fn symdifference( + span: Span, + lhs: ast::ClassSet, + rhs: ast::ClassSet, + ) -> ast::ClassSet { + ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp { + span, + kind: ast::ClassSetBinaryOpKind::SymmetricDifference, + lhs: Box::new(lhs), + rhs: Box::new(rhs), + }) + } + + fn itemset(item: ast::ClassSetItem) -> ast::ClassSet { + ast::ClassSet::Item(item) + } + + fn item_ascii(cls: ast::ClassAscii) -> ast::ClassSetItem { + ast::ClassSetItem::Ascii(cls) + } + + fn item_unicode(cls: ast::ClassUnicode) -> ast::ClassSetItem { + ast::ClassSetItem::Unicode(cls) + } + + fn item_perl(cls: ast::ClassPerl) -> ast::ClassSetItem { + ast::ClassSetItem::Perl(cls) + } + + fn item_bracket(cls: ast::ClassBracketed) -> ast::ClassSetItem { + ast::ClassSetItem::Bracketed(Box::new(cls)) + } + + fn lit(span: Span, c: char) -> ast::ClassSetItem { + ast::ClassSetItem::Literal(ast::Literal { + span, + kind: ast::LiteralKind::Verbatim, + c, + }) + } + + fn empty(span: Span) -> ast::ClassSetItem { + ast::ClassSetItem::Empty(span) + } + + fn range(span: Span, start: char, end: char) -> ast::ClassSetItem { + let pos1 = Position { + offset: span.start.offset + start.len_utf8(), + column: span.start.column + 1, + ..span.start + }; + let pos2 = Position { + offset: span.end.offset - end.len_utf8(), + column: span.end.column - 1, + ..span.end + }; + ast::ClassSetItem::Range(ast::ClassSetRange { + span, + start: ast::Literal { + span: Span { end: pos1, ..span }, + kind: ast::LiteralKind::Verbatim, + c: start, + }, + end: ast::Literal { + span: Span { start: pos2, ..span }, + kind: ast::LiteralKind::Verbatim, + c: end, + }, + }) + } + + fn alnum(span: Span, negated: bool) -> ast::ClassAscii { + ast::ClassAscii { span, kind: ast::ClassAsciiKind::Alnum, negated } + } + + fn lower(span: Span, negated: bool) -> ast::ClassAscii { + ast::ClassAscii { span, kind: ast::ClassAsciiKind::Lower, negated } + } + + assert_eq!( + parser("[[:alnum:]]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..11), + negated: false, + kind: itemset(item_ascii(alnum(span(1..10), false))), + })) + ); + assert_eq!( + parser("[[[:alnum:]]]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..13), + negated: false, + kind: itemset(item_bracket(ast::ClassBracketed { + span: span(1..12), + negated: false, + kind: itemset(item_ascii(alnum(span(2..11), false))), + })), + })) + ); + assert_eq!( + parser("[[:alnum:]&&[:lower:]]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..22), + negated: false, + kind: intersection( + span(1..21), + itemset(item_ascii(alnum(span(1..10), false))), + itemset(item_ascii(lower(span(12..21), false))), + ), + })) + ); + assert_eq!( + parser("[[:alnum:]--[:lower:]]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..22), + negated: false, + kind: difference( + span(1..21), + itemset(item_ascii(alnum(span(1..10), false))), + itemset(item_ascii(lower(span(12..21), false))), + ), + })) + ); + assert_eq!( + parser("[[:alnum:]~~[:lower:]]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..22), + negated: false, + kind: symdifference( + span(1..21), + itemset(item_ascii(alnum(span(1..10), false))), + itemset(item_ascii(lower(span(12..21), false))), + ), + })) + ); + + assert_eq!( + parser("[a]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..3), + negated: false, + kind: itemset(lit(span(1..2), 'a')), + })) + ); + assert_eq!( + parser(r"[a\]]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..5), + negated: false, + kind: union( + span(1..4), + vec![ + lit(span(1..2), 'a'), + ast::ClassSetItem::Literal(ast::Literal { + span: span(2..4), + kind: ast::LiteralKind::Meta, + c: ']', + }), + ] + ), + })) + ); + assert_eq!( + parser(r"[a\-z]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..6), + negated: false, + kind: union( + span(1..5), + vec![ + lit(span(1..2), 'a'), + ast::ClassSetItem::Literal(ast::Literal { + span: span(2..4), + kind: ast::LiteralKind::Meta, + c: '-', + }), + lit(span(4..5), 'z'), + ] + ), + })) + ); + assert_eq!( + parser("[ab]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: union( + span(1..3), + vec![lit(span(1..2), 'a'), lit(span(2..3), 'b'),] + ), + })) + ); + assert_eq!( + parser("[a-]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: union( + span(1..3), + vec![lit(span(1..2), 'a'), lit(span(2..3), '-'),] + ), + })) + ); + assert_eq!( + parser("[-a]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: union( + span(1..3), + vec![lit(span(1..2), '-'), lit(span(2..3), 'a'),] + ), + })) + ); + assert_eq!( + parser(r"[\pL]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..5), + negated: false, + kind: itemset(item_unicode(ast::ClassUnicode { + span: span(1..4), + negated: false, + kind: ast::ClassUnicodeKind::OneLetter('L'), + })), + })) + ); + assert_eq!( + parser(r"[\w]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: itemset(item_perl(ast::ClassPerl { + span: span(1..3), + kind: ast::ClassPerlKind::Word, + negated: false, + })), + })) + ); + assert_eq!( + parser(r"[a\wz]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..6), + negated: false, + kind: union( + span(1..5), + vec![ + lit(span(1..2), 'a'), + item_perl(ast::ClassPerl { + span: span(2..4), + kind: ast::ClassPerlKind::Word, + negated: false, + }), + lit(span(4..5), 'z'), + ] + ), + })) + ); + + assert_eq!( + parser("[a-z]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..5), + negated: false, + kind: itemset(range(span(1..4), 'a', 'z')), + })) + ); + assert_eq!( + parser("[a-cx-z]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..8), + negated: false, + kind: union( + span(1..7), + vec![ + range(span(1..4), 'a', 'c'), + range(span(4..7), 'x', 'z'), + ] + ), + })) + ); + assert_eq!( + parser(r"[\w&&a-cx-z]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..12), + negated: false, + kind: intersection( + span(1..11), + itemset(item_perl(ast::ClassPerl { + span: span(1..3), + kind: ast::ClassPerlKind::Word, + negated: false, + })), + union( + span(5..11), + vec![ + range(span(5..8), 'a', 'c'), + range(span(8..11), 'x', 'z'), + ] + ), + ), + })) + ); + assert_eq!( + parser(r"[a-cx-z&&\w]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..12), + negated: false, + kind: intersection( + span(1..11), + union( + span(1..7), + vec![ + range(span(1..4), 'a', 'c'), + range(span(4..7), 'x', 'z'), + ] + ), + itemset(item_perl(ast::ClassPerl { + span: span(9..11), + kind: ast::ClassPerlKind::Word, + negated: false, + })), + ), + })) + ); + assert_eq!( + parser(r"[a--b--c]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..9), + negated: false, + kind: difference( + span(1..8), + difference( + span(1..5), + itemset(lit(span(1..2), 'a')), + itemset(lit(span(4..5), 'b')), + ), + itemset(lit(span(7..8), 'c')), + ), + })) + ); + assert_eq!( + parser(r"[a~~b~~c]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..9), + negated: false, + kind: symdifference( + span(1..8), + symdifference( + span(1..5), + itemset(lit(span(1..2), 'a')), + itemset(lit(span(4..5), 'b')), + ), + itemset(lit(span(7..8), 'c')), + ), + })) + ); + assert_eq!( + parser(r"[\^&&^]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..7), + negated: false, + kind: intersection( + span(1..6), + itemset(ast::ClassSetItem::Literal(ast::Literal { + span: span(1..3), + kind: ast::LiteralKind::Meta, + c: '^', + })), + itemset(lit(span(5..6), '^')), + ), + })) + ); + assert_eq!( + parser(r"[\&&&&]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..7), + negated: false, + kind: intersection( + span(1..6), + itemset(ast::ClassSetItem::Literal(ast::Literal { + span: span(1..3), + kind: ast::LiteralKind::Meta, + c: '&', + })), + itemset(lit(span(5..6), '&')), + ), + })) + ); + assert_eq!( + parser(r"[&&&&]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..6), + negated: false, + kind: intersection( + span(1..5), + intersection( + span(1..3), + itemset(empty(span(1..1))), + itemset(empty(span(3..3))), + ), + itemset(empty(span(5..5))), + ), + })) + ); + + let pat = "[☃-⛄]"; + assert_eq!( + parser(pat).parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span_range(pat, 0..9), + negated: false, + kind: itemset(ast::ClassSetItem::Range(ast::ClassSetRange { + span: span_range(pat, 1..8), + start: ast::Literal { + span: span_range(pat, 1..4), + kind: ast::LiteralKind::Verbatim, + c: '☃', + }, + end: ast::Literal { + span: span_range(pat, 5..8), + kind: ast::LiteralKind::Verbatim, + c: '⛄', + }, + })), + })) + ); + + assert_eq!( + parser(r"[]]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..3), + negated: false, + kind: itemset(lit(span(1..2), ']')), + })) + ); + assert_eq!( + parser(r"[]\[]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..5), + negated: false, + kind: union( + span(1..4), + vec![ + lit(span(1..2), ']'), + ast::ClassSetItem::Literal(ast::Literal { + span: span(2..4), + kind: ast::LiteralKind::Meta, + c: '[', + }), + ] + ), + })) + ); + assert_eq!( + parser(r"[\[]]").parse(), + Ok(concat( + 0..5, + vec![ + Ast::class_bracketed(ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: itemset(ast::ClassSetItem::Literal( + ast::Literal { + span: span(1..3), + kind: ast::LiteralKind::Meta, + c: '[', + } + )), + }), + Ast::literal(ast::Literal { + span: span(4..5), + kind: ast::LiteralKind::Verbatim, + c: ']', + }), + ] + )) + ); + + assert_eq!( + parser("[").parse().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser("[[").parse().unwrap_err(), + TestError { + span: span(1..2), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser("[[-]").parse().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser("[[[:alnum:]").parse().unwrap_err(), + TestError { + span: span(1..2), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser(r"[\b]").parse().unwrap_err(), + TestError { + span: span(1..3), + kind: ast::ErrorKind::ClassEscapeInvalid, + } + ); + assert_eq!( + parser(r"[\w-a]").parse().unwrap_err(), + TestError { + span: span(1..3), + kind: ast::ErrorKind::ClassRangeLiteral, + } + ); + assert_eq!( + parser(r"[a-\w]").parse().unwrap_err(), + TestError { + span: span(3..5), + kind: ast::ErrorKind::ClassRangeLiteral, + } + ); + assert_eq!( + parser(r"[z-a]").parse().unwrap_err(), + TestError { + span: span(1..4), + kind: ast::ErrorKind::ClassRangeInvalid, + } + ); + + assert_eq!( + parser_ignore_whitespace("[a ").parse().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser_ignore_whitespace("[a- ").parse().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + } + + #[test] + fn parse_set_class_open() { + assert_eq!(parser("[a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..1), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(1..1), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { span: span(1..1), items: vec![] }; + Ok((set, union)) + }); + assert_eq!( + parser_ignore_whitespace("[ a]").parse_set_class_open(), + { + let set = ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(4..4), + items: vec![], + }), + }; + let union = + ast::ClassSetUnion { span: span(4..4), items: vec![] }; + Ok((set, union)) + } + ); + assert_eq!(parser("[^a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..2), + negated: true, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(2..2), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { span: span(2..2), items: vec![] }; + Ok((set, union)) + }); + assert_eq!( + parser_ignore_whitespace("[ ^ a]").parse_set_class_open(), + { + let set = ast::ClassBracketed { + span: span(0..4), + negated: true, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(4..4), + items: vec![], + }), + }; + let union = + ast::ClassSetUnion { span: span(4..4), items: vec![] }; + Ok((set, union)) + } + ); + assert_eq!(parser("[-a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..2), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(1..1), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(1..2), + items: vec![ast::ClassSetItem::Literal(ast::Literal { + span: span(1..2), + kind: ast::LiteralKind::Verbatim, + c: '-', + })], + }; + Ok((set, union)) + }); + assert_eq!( + parser_ignore_whitespace("[ - a]").parse_set_class_open(), + { + let set = ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(2..2), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(2..3), + items: vec![ast::ClassSetItem::Literal(ast::Literal { + span: span(2..3), + kind: ast::LiteralKind::Verbatim, + c: '-', + })], + }; + Ok((set, union)) + } + ); + assert_eq!(parser("[^-a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..3), + negated: true, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(2..2), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(2..3), + items: vec![ast::ClassSetItem::Literal(ast::Literal { + span: span(2..3), + kind: ast::LiteralKind::Verbatim, + c: '-', + })], + }; + Ok((set, union)) + }); + assert_eq!(parser("[--a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..3), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(1..1), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(1..3), + items: vec![ + ast::ClassSetItem::Literal(ast::Literal { + span: span(1..2), + kind: ast::LiteralKind::Verbatim, + c: '-', + }), + ast::ClassSetItem::Literal(ast::Literal { + span: span(2..3), + kind: ast::LiteralKind::Verbatim, + c: '-', + }), + ], + }; + Ok((set, union)) + }); + assert_eq!(parser("[]a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..2), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(1..1), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(1..2), + items: vec![ast::ClassSetItem::Literal(ast::Literal { + span: span(1..2), + kind: ast::LiteralKind::Verbatim, + c: ']', + })], + }; + Ok((set, union)) + }); + assert_eq!( + parser_ignore_whitespace("[ ] a]").parse_set_class_open(), + { + let set = ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(2..2), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(2..3), + items: vec![ast::ClassSetItem::Literal(ast::Literal { + span: span(2..3), + kind: ast::LiteralKind::Verbatim, + c: ']', + })], + }; + Ok((set, union)) + } + ); + assert_eq!(parser("[^]a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..3), + negated: true, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(2..2), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(2..3), + items: vec![ast::ClassSetItem::Literal(ast::Literal { + span: span(2..3), + kind: ast::LiteralKind::Verbatim, + c: ']', + })], + }; + Ok((set, union)) + }); + assert_eq!(parser("[-]a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..2), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(1..1), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(1..2), + items: vec![ast::ClassSetItem::Literal(ast::Literal { + span: span(1..2), + kind: ast::LiteralKind::Verbatim, + c: '-', + })], + }; + Ok((set, union)) + }); + + assert_eq!( + parser("[").parse_set_class_open().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser_ignore_whitespace("[ ") + .parse_set_class_open() + .unwrap_err(), + TestError { + span: span(0..5), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser("[^").parse_set_class_open().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser("[]").parse_set_class_open().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser("[-").parse_set_class_open().unwrap_err(), + TestError { + span: span(0..0), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser("[--").parse_set_class_open().unwrap_err(), + TestError { + span: span(0..0), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + + // See: https://github.com/rust-lang/regex/issues/792 + assert_eq!( + parser("(?x)[-#]").parse_with_comments().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + } + + #[test] + fn maybe_parse_ascii_class() { + assert_eq!( + parser(r"[:alnum:]").maybe_parse_ascii_class(), + Some(ast::ClassAscii { + span: span(0..9), + kind: ast::ClassAsciiKind::Alnum, + negated: false, + }) + ); + assert_eq!( + parser(r"[:alnum:]A").maybe_parse_ascii_class(), + Some(ast::ClassAscii { + span: span(0..9), + kind: ast::ClassAsciiKind::Alnum, + negated: false, + }) + ); + assert_eq!( + parser(r"[:^alnum:]").maybe_parse_ascii_class(), + Some(ast::ClassAscii { + span: span(0..10), + kind: ast::ClassAsciiKind::Alnum, + negated: true, + }) + ); + + let p = parser(r"[:"); + assert_eq!(p.maybe_parse_ascii_class(), None); + assert_eq!(p.offset(), 0); + + let p = parser(r"[:^"); + assert_eq!(p.maybe_parse_ascii_class(), None); + assert_eq!(p.offset(), 0); + + let p = parser(r"[^:alnum:]"); + assert_eq!(p.maybe_parse_ascii_class(), None); + assert_eq!(p.offset(), 0); + + let p = parser(r"[:alnnum:]"); + assert_eq!(p.maybe_parse_ascii_class(), None); + assert_eq!(p.offset(), 0); + + let p = parser(r"[:alnum]"); + assert_eq!(p.maybe_parse_ascii_class(), None); + assert_eq!(p.offset(), 0); + + let p = parser(r"[:alnum:"); + assert_eq!(p.maybe_parse_ascii_class(), None); + assert_eq!(p.offset(), 0); + } + + #[test] + fn parse_unicode_class() { + assert_eq!( + parser(r"\pN").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..3), + negated: false, + kind: ast::ClassUnicodeKind::OneLetter('N'), + })) + ); + assert_eq!( + parser(r"\PN").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..3), + negated: true, + kind: ast::ClassUnicodeKind::OneLetter('N'), + })) + ); + assert_eq!( + parser(r"\p{N}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..5), + negated: false, + kind: ast::ClassUnicodeKind::Named(s("N")), + })) + ); + assert_eq!( + parser(r"\P{N}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..5), + negated: true, + kind: ast::ClassUnicodeKind::Named(s("N")), + })) + ); + assert_eq!( + parser(r"\p{Greek}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..9), + negated: false, + kind: ast::ClassUnicodeKind::Named(s("Greek")), + })) + ); + + assert_eq!( + parser(r"\p{scx:Katakana}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..16), + negated: false, + kind: ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::Colon, + name: s("scx"), + value: s("Katakana"), + }, + })) + ); + assert_eq!( + parser(r"\p{scx=Katakana}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..16), + negated: false, + kind: ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::Equal, + name: s("scx"), + value: s("Katakana"), + }, + })) + ); + assert_eq!( + parser(r"\p{scx!=Katakana}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..17), + negated: false, + kind: ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::NotEqual, + name: s("scx"), + value: s("Katakana"), + }, + })) + ); + + assert_eq!( + parser(r"\p{:}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..5), + negated: false, + kind: ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::Colon, + name: s(""), + value: s(""), + }, + })) + ); + assert_eq!( + parser(r"\p{=}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..5), + negated: false, + kind: ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::Equal, + name: s(""), + value: s(""), + }, + })) + ); + assert_eq!( + parser(r"\p{!=}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..6), + negated: false, + kind: ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::NotEqual, + name: s(""), + value: s(""), + }, + })) + ); + + assert_eq!( + parser(r"\p").parse_escape().unwrap_err(), + TestError { + span: span(2..2), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\p{").parse_escape().unwrap_err(), + TestError { + span: span(3..3), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\p{N").parse_escape().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\p{Greek").parse_escape().unwrap_err(), + TestError { + span: span(8..8), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + + assert_eq!( + parser(r"\pNz").parse(), + Ok(Ast::concat(ast::Concat { + span: span(0..4), + asts: vec![ + Ast::class_unicode(ast::ClassUnicode { + span: span(0..3), + negated: false, + kind: ast::ClassUnicodeKind::OneLetter('N'), + }), + Ast::literal(ast::Literal { + span: span(3..4), + kind: ast::LiteralKind::Verbatim, + c: 'z', + }), + ], + })) + ); + assert_eq!( + parser(r"\p{Greek}z").parse(), + Ok(Ast::concat(ast::Concat { + span: span(0..10), + asts: vec![ + Ast::class_unicode(ast::ClassUnicode { + span: span(0..9), + negated: false, + kind: ast::ClassUnicodeKind::Named(s("Greek")), + }), + Ast::literal(ast::Literal { + span: span(9..10), + kind: ast::LiteralKind::Verbatim, + c: 'z', + }), + ], + })) + ); + assert_eq!( + parser(r"\p\{").parse().unwrap_err(), + TestError { + span: span(2..3), + kind: ast::ErrorKind::UnicodeClassInvalid, + } + ); + assert_eq!( + parser(r"\P\{").parse().unwrap_err(), + TestError { + span: span(2..3), + kind: ast::ErrorKind::UnicodeClassInvalid, + } + ); + } + + #[test] + fn parse_perl_class() { + assert_eq!( + parser(r"\d").parse_escape(), + Ok(Primitive::Perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Digit, + negated: false, + })) + ); + assert_eq!( + parser(r"\D").parse_escape(), + Ok(Primitive::Perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Digit, + negated: true, + })) + ); + assert_eq!( + parser(r"\s").parse_escape(), + Ok(Primitive::Perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Space, + negated: false, + })) + ); + assert_eq!( + parser(r"\S").parse_escape(), + Ok(Primitive::Perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Space, + negated: true, + })) + ); + assert_eq!( + parser(r"\w").parse_escape(), + Ok(Primitive::Perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Word, + negated: false, + })) + ); + assert_eq!( + parser(r"\W").parse_escape(), + Ok(Primitive::Perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Word, + negated: true, + })) + ); + + assert_eq!( + parser(r"\d").parse(), + Ok(Ast::class_perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Digit, + negated: false, + })) + ); + assert_eq!( + parser(r"\dz").parse(), + Ok(Ast::concat(ast::Concat { + span: span(0..3), + asts: vec![ + Ast::class_perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Digit, + negated: false, + }), + Ast::literal(ast::Literal { + span: span(2..3), + kind: ast::LiteralKind::Verbatim, + c: 'z', + }), + ], + })) + ); + } + + // This tests a bug fix where the nest limit checker wasn't decrementing + // its depth during post-traversal, which causes long regexes to trip + // the default limit too aggressively. + #[test] + fn regression_454_nest_too_big() { + let pattern = r#" + 2(?: + [45]\d{3}| + 7(?: + 1[0-267]| + 2[0-289]| + 3[0-29]| + 4[01]| + 5[1-3]| + 6[013]| + 7[0178]| + 91 + )| + 8(?: + 0[125]| + [139][1-6]| + 2[0157-9]| + 41| + 6[1-35]| + 7[1-5]| + 8[1-8]| + 90 + )| + 9(?: + 0[0-2]| + 1[0-4]| + 2[568]| + 3[3-6]| + 5[5-7]| + 6[0167]| + 7[15]| + 8[0146-9] + ) + )\d{4} + "#; + assert!(parser_nest_limit(pattern, 50).parse().is_ok()); + } + + // This tests that we treat a trailing `-` in a character class as a + // literal `-` even when whitespace mode is enabled and there is whitespace + // after the trailing `-`. + #[test] + fn regression_455_trailing_dash_ignore_whitespace() { + assert!(parser("(?x)[ / - ]").parse().is_ok()); + assert!(parser("(?x)[ a - ]").parse().is_ok()); + assert!(parser( + "(?x)[ + a + - ] + " + ) + .parse() + .is_ok()); + assert!(parser( + "(?x)[ + a # wat + - ] + " + ) + .parse() + .is_ok()); + + assert!(parser("(?x)[ / -").parse().is_err()); + assert!(parser("(?x)[ / - ").parse().is_err()); + assert!(parser( + "(?x)[ + / - + " + ) + .parse() + .is_err()); + assert!(parser( + "(?x)[ + / - # wat + " + ) + .parse() + .is_err()); + } +} diff --git a/vendor/regex-syntax/src/ast/print.rs b/vendor/regex-syntax/src/ast/print.rs new file mode 100644 index 00000000000000..556d91f4a0087a --- /dev/null +++ b/vendor/regex-syntax/src/ast/print.rs @@ -0,0 +1,577 @@ +/*! +This module provides a regular expression printer for `Ast`. +*/ + +use core::fmt; + +use crate::ast::{ + self, + visitor::{self, Visitor}, + Ast, +}; + +/// A builder for constructing a printer. +/// +/// Note that since a printer doesn't have any configuration knobs, this type +/// remains unexported. +#[derive(Clone, Debug)] +struct PrinterBuilder { + _priv: (), +} + +impl Default for PrinterBuilder { + fn default() -> PrinterBuilder { + PrinterBuilder::new() + } +} + +impl PrinterBuilder { + fn new() -> PrinterBuilder { + PrinterBuilder { _priv: () } + } + + fn build(&self) -> Printer { + Printer { _priv: () } + } +} + +/// A printer for a regular expression abstract syntax tree. +/// +/// A printer converts an abstract syntax tree (AST) to a regular expression +/// pattern string. This particular printer uses constant stack space and heap +/// space proportional to the size of the AST. +/// +/// This printer will not necessarily preserve the original formatting of the +/// regular expression pattern string. For example, all whitespace and comments +/// are ignored. +#[derive(Debug)] +pub struct Printer { + _priv: (), +} + +impl Printer { + /// Create a new printer. + pub fn new() -> Printer { + PrinterBuilder::new().build() + } + + /// Print the given `Ast` to the given writer. The writer must implement + /// `fmt::Write`. Typical implementations of `fmt::Write` that can be used + /// here are a `fmt::Formatter` (which is available in `fmt::Display` + /// implementations) or a `&mut String`. + pub fn print(&mut self, ast: &Ast, wtr: W) -> fmt::Result { + visitor::visit(ast, Writer { wtr }) + } +} + +#[derive(Debug)] +struct Writer { + wtr: W, +} + +impl Visitor for Writer { + type Output = (); + type Err = fmt::Error; + + fn finish(self) -> fmt::Result { + Ok(()) + } + + fn visit_pre(&mut self, ast: &Ast) -> fmt::Result { + match *ast { + Ast::Group(ref x) => self.fmt_group_pre(x), + Ast::ClassBracketed(ref x) => self.fmt_class_bracketed_pre(x), + _ => Ok(()), + } + } + + fn visit_post(&mut self, ast: &Ast) -> fmt::Result { + match *ast { + Ast::Empty(_) => Ok(()), + Ast::Flags(ref x) => self.fmt_set_flags(x), + Ast::Literal(ref x) => self.fmt_literal(x), + Ast::Dot(_) => self.wtr.write_str("."), + Ast::Assertion(ref x) => self.fmt_assertion(x), + Ast::ClassPerl(ref x) => self.fmt_class_perl(x), + Ast::ClassUnicode(ref x) => self.fmt_class_unicode(x), + Ast::ClassBracketed(ref x) => self.fmt_class_bracketed_post(x), + Ast::Repetition(ref x) => self.fmt_repetition(x), + Ast::Group(ref x) => self.fmt_group_post(x), + Ast::Alternation(_) => Ok(()), + Ast::Concat(_) => Ok(()), + } + } + + fn visit_alternation_in(&mut self) -> fmt::Result { + self.wtr.write_str("|") + } + + fn visit_class_set_item_pre( + &mut self, + ast: &ast::ClassSetItem, + ) -> Result<(), Self::Err> { + match *ast { + ast::ClassSetItem::Bracketed(ref x) => { + self.fmt_class_bracketed_pre(x) + } + _ => Ok(()), + } + } + + fn visit_class_set_item_post( + &mut self, + ast: &ast::ClassSetItem, + ) -> Result<(), Self::Err> { + use crate::ast::ClassSetItem::*; + + match *ast { + Empty(_) => Ok(()), + Literal(ref x) => self.fmt_literal(x), + Range(ref x) => { + self.fmt_literal(&x.start)?; + self.wtr.write_str("-")?; + self.fmt_literal(&x.end)?; + Ok(()) + } + Ascii(ref x) => self.fmt_class_ascii(x), + Unicode(ref x) => self.fmt_class_unicode(x), + Perl(ref x) => self.fmt_class_perl(x), + Bracketed(ref x) => self.fmt_class_bracketed_post(x), + Union(_) => Ok(()), + } + } + + fn visit_class_set_binary_op_in( + &mut self, + ast: &ast::ClassSetBinaryOp, + ) -> Result<(), Self::Err> { + self.fmt_class_set_binary_op_kind(&ast.kind) + } +} + +impl Writer { + fn fmt_group_pre(&mut self, ast: &ast::Group) -> fmt::Result { + use crate::ast::GroupKind::*; + match ast.kind { + CaptureIndex(_) => self.wtr.write_str("("), + CaptureName { ref name, starts_with_p } => { + let start = if starts_with_p { "(?P<" } else { "(?<" }; + self.wtr.write_str(start)?; + self.wtr.write_str(&name.name)?; + self.wtr.write_str(">")?; + Ok(()) + } + NonCapturing(ref flags) => { + self.wtr.write_str("(?")?; + self.fmt_flags(flags)?; + self.wtr.write_str(":")?; + Ok(()) + } + } + } + + fn fmt_group_post(&mut self, _ast: &ast::Group) -> fmt::Result { + self.wtr.write_str(")") + } + + fn fmt_repetition(&mut self, ast: &ast::Repetition) -> fmt::Result { + use crate::ast::RepetitionKind::*; + match ast.op.kind { + ZeroOrOne if ast.greedy => self.wtr.write_str("?"), + ZeroOrOne => self.wtr.write_str("??"), + ZeroOrMore if ast.greedy => self.wtr.write_str("*"), + ZeroOrMore => self.wtr.write_str("*?"), + OneOrMore if ast.greedy => self.wtr.write_str("+"), + OneOrMore => self.wtr.write_str("+?"), + Range(ref x) => { + self.fmt_repetition_range(x)?; + if !ast.greedy { + self.wtr.write_str("?")?; + } + Ok(()) + } + } + } + + fn fmt_repetition_range( + &mut self, + ast: &ast::RepetitionRange, + ) -> fmt::Result { + use crate::ast::RepetitionRange::*; + match *ast { + Exactly(x) => write!(self.wtr, "{{{x}}}"), + AtLeast(x) => write!(self.wtr, "{{{x},}}"), + Bounded(x, y) => write!(self.wtr, "{{{x},{y}}}"), + } + } + + fn fmt_literal(&mut self, ast: &ast::Literal) -> fmt::Result { + use crate::ast::LiteralKind::*; + + match ast.kind { + Verbatim => self.wtr.write_char(ast.c), + Meta | Superfluous => write!(self.wtr, r"\{}", ast.c), + Octal => write!(self.wtr, r"\{:o}", u32::from(ast.c)), + HexFixed(ast::HexLiteralKind::X) => { + write!(self.wtr, r"\x{:02X}", u32::from(ast.c)) + } + HexFixed(ast::HexLiteralKind::UnicodeShort) => { + write!(self.wtr, r"\u{:04X}", u32::from(ast.c)) + } + HexFixed(ast::HexLiteralKind::UnicodeLong) => { + write!(self.wtr, r"\U{:08X}", u32::from(ast.c)) + } + HexBrace(ast::HexLiteralKind::X) => { + write!(self.wtr, r"\x{{{:X}}}", u32::from(ast.c)) + } + HexBrace(ast::HexLiteralKind::UnicodeShort) => { + write!(self.wtr, r"\u{{{:X}}}", u32::from(ast.c)) + } + HexBrace(ast::HexLiteralKind::UnicodeLong) => { + write!(self.wtr, r"\U{{{:X}}}", u32::from(ast.c)) + } + Special(ast::SpecialLiteralKind::Bell) => { + self.wtr.write_str(r"\a") + } + Special(ast::SpecialLiteralKind::FormFeed) => { + self.wtr.write_str(r"\f") + } + Special(ast::SpecialLiteralKind::Tab) => self.wtr.write_str(r"\t"), + Special(ast::SpecialLiteralKind::LineFeed) => { + self.wtr.write_str(r"\n") + } + Special(ast::SpecialLiteralKind::CarriageReturn) => { + self.wtr.write_str(r"\r") + } + Special(ast::SpecialLiteralKind::VerticalTab) => { + self.wtr.write_str(r"\v") + } + Special(ast::SpecialLiteralKind::Space) => { + self.wtr.write_str(r"\ ") + } + } + } + + fn fmt_assertion(&mut self, ast: &ast::Assertion) -> fmt::Result { + use crate::ast::AssertionKind::*; + match ast.kind { + StartLine => self.wtr.write_str("^"), + EndLine => self.wtr.write_str("$"), + StartText => self.wtr.write_str(r"\A"), + EndText => self.wtr.write_str(r"\z"), + WordBoundary => self.wtr.write_str(r"\b"), + NotWordBoundary => self.wtr.write_str(r"\B"), + WordBoundaryStart => self.wtr.write_str(r"\b{start}"), + WordBoundaryEnd => self.wtr.write_str(r"\b{end}"), + WordBoundaryStartAngle => self.wtr.write_str(r"\<"), + WordBoundaryEndAngle => self.wtr.write_str(r"\>"), + WordBoundaryStartHalf => self.wtr.write_str(r"\b{start-half}"), + WordBoundaryEndHalf => self.wtr.write_str(r"\b{end-half}"), + } + } + + fn fmt_set_flags(&mut self, ast: &ast::SetFlags) -> fmt::Result { + self.wtr.write_str("(?")?; + self.fmt_flags(&ast.flags)?; + self.wtr.write_str(")")?; + Ok(()) + } + + fn fmt_flags(&mut self, ast: &ast::Flags) -> fmt::Result { + use crate::ast::{Flag, FlagsItemKind}; + + for item in &ast.items { + match item.kind { + FlagsItemKind::Negation => self.wtr.write_str("-"), + FlagsItemKind::Flag(ref flag) => match *flag { + Flag::CaseInsensitive => self.wtr.write_str("i"), + Flag::MultiLine => self.wtr.write_str("m"), + Flag::DotMatchesNewLine => self.wtr.write_str("s"), + Flag::SwapGreed => self.wtr.write_str("U"), + Flag::Unicode => self.wtr.write_str("u"), + Flag::CRLF => self.wtr.write_str("R"), + Flag::IgnoreWhitespace => self.wtr.write_str("x"), + }, + }?; + } + Ok(()) + } + + fn fmt_class_bracketed_pre( + &mut self, + ast: &ast::ClassBracketed, + ) -> fmt::Result { + if ast.negated { + self.wtr.write_str("[^") + } else { + self.wtr.write_str("[") + } + } + + fn fmt_class_bracketed_post( + &mut self, + _ast: &ast::ClassBracketed, + ) -> fmt::Result { + self.wtr.write_str("]") + } + + fn fmt_class_set_binary_op_kind( + &mut self, + ast: &ast::ClassSetBinaryOpKind, + ) -> fmt::Result { + use crate::ast::ClassSetBinaryOpKind::*; + match *ast { + Intersection => self.wtr.write_str("&&"), + Difference => self.wtr.write_str("--"), + SymmetricDifference => self.wtr.write_str("~~"), + } + } + + fn fmt_class_perl(&mut self, ast: &ast::ClassPerl) -> fmt::Result { + use crate::ast::ClassPerlKind::*; + match ast.kind { + Digit if ast.negated => self.wtr.write_str(r"\D"), + Digit => self.wtr.write_str(r"\d"), + Space if ast.negated => self.wtr.write_str(r"\S"), + Space => self.wtr.write_str(r"\s"), + Word if ast.negated => self.wtr.write_str(r"\W"), + Word => self.wtr.write_str(r"\w"), + } + } + + fn fmt_class_ascii(&mut self, ast: &ast::ClassAscii) -> fmt::Result { + use crate::ast::ClassAsciiKind::*; + match ast.kind { + Alnum if ast.negated => self.wtr.write_str("[:^alnum:]"), + Alnum => self.wtr.write_str("[:alnum:]"), + Alpha if ast.negated => self.wtr.write_str("[:^alpha:]"), + Alpha => self.wtr.write_str("[:alpha:]"), + Ascii if ast.negated => self.wtr.write_str("[:^ascii:]"), + Ascii => self.wtr.write_str("[:ascii:]"), + Blank if ast.negated => self.wtr.write_str("[:^blank:]"), + Blank => self.wtr.write_str("[:blank:]"), + Cntrl if ast.negated => self.wtr.write_str("[:^cntrl:]"), + Cntrl => self.wtr.write_str("[:cntrl:]"), + Digit if ast.negated => self.wtr.write_str("[:^digit:]"), + Digit => self.wtr.write_str("[:digit:]"), + Graph if ast.negated => self.wtr.write_str("[:^graph:]"), + Graph => self.wtr.write_str("[:graph:]"), + Lower if ast.negated => self.wtr.write_str("[:^lower:]"), + Lower => self.wtr.write_str("[:lower:]"), + Print if ast.negated => self.wtr.write_str("[:^print:]"), + Print => self.wtr.write_str("[:print:]"), + Punct if ast.negated => self.wtr.write_str("[:^punct:]"), + Punct => self.wtr.write_str("[:punct:]"), + Space if ast.negated => self.wtr.write_str("[:^space:]"), + Space => self.wtr.write_str("[:space:]"), + Upper if ast.negated => self.wtr.write_str("[:^upper:]"), + Upper => self.wtr.write_str("[:upper:]"), + Word if ast.negated => self.wtr.write_str("[:^word:]"), + Word => self.wtr.write_str("[:word:]"), + Xdigit if ast.negated => self.wtr.write_str("[:^xdigit:]"), + Xdigit => self.wtr.write_str("[:xdigit:]"), + } + } + + fn fmt_class_unicode(&mut self, ast: &ast::ClassUnicode) -> fmt::Result { + use crate::ast::ClassUnicodeKind::*; + use crate::ast::ClassUnicodeOpKind::*; + + if ast.negated { + self.wtr.write_str(r"\P")?; + } else { + self.wtr.write_str(r"\p")?; + } + match ast.kind { + OneLetter(c) => self.wtr.write_char(c), + Named(ref x) => write!(self.wtr, "{{{}}}", x), + NamedValue { op: Equal, ref name, ref value } => { + write!(self.wtr, "{{{}={}}}", name, value) + } + NamedValue { op: Colon, ref name, ref value } => { + write!(self.wtr, "{{{}:{}}}", name, value) + } + NamedValue { op: NotEqual, ref name, ref value } => { + write!(self.wtr, "{{{}!={}}}", name, value) + } + } + } +} + +#[cfg(test)] +mod tests { + use alloc::string::String; + + use crate::ast::parse::ParserBuilder; + + use super::*; + + fn roundtrip(given: &str) { + roundtrip_with(|b| b, given); + } + + fn roundtrip_with(mut f: F, given: &str) + where + F: FnMut(&mut ParserBuilder) -> &mut ParserBuilder, + { + let mut builder = ParserBuilder::new(); + f(&mut builder); + let ast = builder.build().parse(given).unwrap(); + + let mut printer = Printer::new(); + let mut dst = String::new(); + printer.print(&ast, &mut dst).unwrap(); + assert_eq!(given, dst); + } + + #[test] + fn print_literal() { + roundtrip("a"); + roundtrip(r"\["); + roundtrip_with(|b| b.octal(true), r"\141"); + roundtrip(r"\x61"); + roundtrip(r"\x7F"); + roundtrip(r"\u0061"); + roundtrip(r"\U00000061"); + roundtrip(r"\x{61}"); + roundtrip(r"\x{7F}"); + roundtrip(r"\u{61}"); + roundtrip(r"\U{61}"); + + roundtrip(r"\a"); + roundtrip(r"\f"); + roundtrip(r"\t"); + roundtrip(r"\n"); + roundtrip(r"\r"); + roundtrip(r"\v"); + roundtrip(r"(?x)\ "); + } + + #[test] + fn print_dot() { + roundtrip("."); + } + + #[test] + fn print_concat() { + roundtrip("ab"); + roundtrip("abcde"); + roundtrip("a(bcd)ef"); + } + + #[test] + fn print_alternation() { + roundtrip("a|b"); + roundtrip("a|b|c|d|e"); + roundtrip("|a|b|c|d|e"); + roundtrip("|a|b|c|d|e|"); + roundtrip("a(b|c|d)|e|f"); + } + + #[test] + fn print_assertion() { + roundtrip(r"^"); + roundtrip(r"$"); + roundtrip(r"\A"); + roundtrip(r"\z"); + roundtrip(r"\b"); + roundtrip(r"\B"); + } + + #[test] + fn print_repetition() { + roundtrip("a?"); + roundtrip("a??"); + roundtrip("a*"); + roundtrip("a*?"); + roundtrip("a+"); + roundtrip("a+?"); + roundtrip("a{5}"); + roundtrip("a{5}?"); + roundtrip("a{5,}"); + roundtrip("a{5,}?"); + roundtrip("a{5,10}"); + roundtrip("a{5,10}?"); + } + + #[test] + fn print_flags() { + roundtrip("(?i)"); + roundtrip("(?-i)"); + roundtrip("(?s-i)"); + roundtrip("(?-si)"); + roundtrip("(?siUmux)"); + } + + #[test] + fn print_group() { + roundtrip("(?i:a)"); + roundtrip("(?Pa)"); + roundtrip("(?a)"); + roundtrip("(a)"); + } + + #[test] + fn print_class() { + roundtrip(r"[abc]"); + roundtrip(r"[a-z]"); + roundtrip(r"[^a-z]"); + roundtrip(r"[a-z0-9]"); + roundtrip(r"[-a-z0-9]"); + roundtrip(r"[-a-z0-9]"); + roundtrip(r"[a-z0-9---]"); + roundtrip(r"[a-z&&m-n]"); + roundtrip(r"[[a-z&&m-n]]"); + roundtrip(r"[a-z--m-n]"); + roundtrip(r"[a-z~~m-n]"); + roundtrip(r"[a-z[0-9]]"); + roundtrip(r"[a-z[^0-9]]"); + + roundtrip(r"\d"); + roundtrip(r"\D"); + roundtrip(r"\s"); + roundtrip(r"\S"); + roundtrip(r"\w"); + roundtrip(r"\W"); + + roundtrip(r"[[:alnum:]]"); + roundtrip(r"[[:^alnum:]]"); + roundtrip(r"[[:alpha:]]"); + roundtrip(r"[[:^alpha:]]"); + roundtrip(r"[[:ascii:]]"); + roundtrip(r"[[:^ascii:]]"); + roundtrip(r"[[:blank:]]"); + roundtrip(r"[[:^blank:]]"); + roundtrip(r"[[:cntrl:]]"); + roundtrip(r"[[:^cntrl:]]"); + roundtrip(r"[[:digit:]]"); + roundtrip(r"[[:^digit:]]"); + roundtrip(r"[[:graph:]]"); + roundtrip(r"[[:^graph:]]"); + roundtrip(r"[[:lower:]]"); + roundtrip(r"[[:^lower:]]"); + roundtrip(r"[[:print:]]"); + roundtrip(r"[[:^print:]]"); + roundtrip(r"[[:punct:]]"); + roundtrip(r"[[:^punct:]]"); + roundtrip(r"[[:space:]]"); + roundtrip(r"[[:^space:]]"); + roundtrip(r"[[:upper:]]"); + roundtrip(r"[[:^upper:]]"); + roundtrip(r"[[:word:]]"); + roundtrip(r"[[:^word:]]"); + roundtrip(r"[[:xdigit:]]"); + roundtrip(r"[[:^xdigit:]]"); + + roundtrip(r"\pL"); + roundtrip(r"\PL"); + roundtrip(r"\p{L}"); + roundtrip(r"\P{L}"); + roundtrip(r"\p{X=Y}"); + roundtrip(r"\P{X=Y}"); + roundtrip(r"\p{X:Y}"); + roundtrip(r"\P{X:Y}"); + roundtrip(r"\p{X!=Y}"); + roundtrip(r"\P{X!=Y}"); + } +} diff --git a/vendor/regex-syntax/src/ast/visitor.rs b/vendor/regex-syntax/src/ast/visitor.rs new file mode 100644 index 00000000000000..36cd713c0f3dc6 --- /dev/null +++ b/vendor/regex-syntax/src/ast/visitor.rs @@ -0,0 +1,522 @@ +use alloc::{vec, vec::Vec}; + +use crate::ast::{self, Ast}; + +/// A trait for visiting an abstract syntax tree (AST) in depth first order. +/// +/// The principle aim of this trait is to enable callers to perform case +/// analysis on an abstract syntax tree without necessarily using recursion. +/// In particular, this permits callers to do case analysis with constant stack +/// usage, which can be important since the size of an abstract syntax tree +/// may be proportional to end user input. +/// +/// Typical usage of this trait involves providing an implementation and then +/// running it using the [`visit`] function. +/// +/// Note that the abstract syntax tree for a regular expression is quite +/// complex. Unless you specifically need it, you might be able to use the much +/// simpler [high-level intermediate representation](crate::hir::Hir) and its +/// [corresponding `Visitor` trait](crate::hir::Visitor) instead. +pub trait Visitor { + /// The result of visiting an AST. + type Output; + /// An error that visiting an AST might return. + type Err; + + /// All implementors of `Visitor` must provide a `finish` method, which + /// yields the result of visiting the AST or an error. + fn finish(self) -> Result; + + /// This method is called before beginning traversal of the AST. + fn start(&mut self) {} + + /// This method is called on an `Ast` before descending into child `Ast` + /// nodes. + fn visit_pre(&mut self, _ast: &Ast) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called on an `Ast` after descending all of its child + /// `Ast` nodes. + fn visit_post(&mut self, _ast: &Ast) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called between child nodes of an + /// [`Alternation`](ast::Alternation). + fn visit_alternation_in(&mut self) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called between child nodes of a concatenation. + fn visit_concat_in(&mut self) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called on every [`ClassSetItem`](ast::ClassSetItem) + /// before descending into child nodes. + fn visit_class_set_item_pre( + &mut self, + _ast: &ast::ClassSetItem, + ) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called on every [`ClassSetItem`](ast::ClassSetItem) + /// after descending into child nodes. + fn visit_class_set_item_post( + &mut self, + _ast: &ast::ClassSetItem, + ) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called on every + /// [`ClassSetBinaryOp`](ast::ClassSetBinaryOp) before descending into + /// child nodes. + fn visit_class_set_binary_op_pre( + &mut self, + _ast: &ast::ClassSetBinaryOp, + ) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called on every + /// [`ClassSetBinaryOp`](ast::ClassSetBinaryOp) after descending into child + /// nodes. + fn visit_class_set_binary_op_post( + &mut self, + _ast: &ast::ClassSetBinaryOp, + ) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called between the left hand and right hand child nodes + /// of a [`ClassSetBinaryOp`](ast::ClassSetBinaryOp). + fn visit_class_set_binary_op_in( + &mut self, + _ast: &ast::ClassSetBinaryOp, + ) -> Result<(), Self::Err> { + Ok(()) + } +} + +/// Executes an implementation of `Visitor` in constant stack space. +/// +/// This function will visit every node in the given `Ast` while calling the +/// appropriate methods provided by the [`Visitor`] trait. +/// +/// The primary use case for this method is when one wants to perform case +/// analysis over an `Ast` without using a stack size proportional to the depth +/// of the `Ast`. Namely, this method will instead use constant stack size, but +/// will use heap space proportional to the size of the `Ast`. This may be +/// desirable in cases where the size of `Ast` is proportional to end user +/// input. +/// +/// If the visitor returns an error at any point, then visiting is stopped and +/// the error is returned. +pub fn visit(ast: &Ast, visitor: V) -> Result { + HeapVisitor::new().visit(ast, visitor) +} + +/// HeapVisitor visits every item in an `Ast` recursively using constant stack +/// size and a heap size proportional to the size of the `Ast`. +struct HeapVisitor<'a> { + /// A stack of `Ast` nodes. This is roughly analogous to the call stack + /// used in a typical recursive visitor. + stack: Vec<(&'a Ast, Frame<'a>)>, + /// Similar to the `Ast` stack above, but is used only for character + /// classes. In particular, character classes embed their own mini + /// recursive syntax. + stack_class: Vec<(ClassInduct<'a>, ClassFrame<'a>)>, +} + +/// Represents a single stack frame while performing structural induction over +/// an `Ast`. +enum Frame<'a> { + /// A stack frame allocated just before descending into a repetition + /// operator's child node. + Repetition(&'a ast::Repetition), + /// A stack frame allocated just before descending into a group's child + /// node. + Group(&'a ast::Group), + /// The stack frame used while visiting every child node of a concatenation + /// of expressions. + Concat { + /// The child node we are currently visiting. + head: &'a Ast, + /// The remaining child nodes to visit (which may be empty). + tail: &'a [Ast], + }, + /// The stack frame used while visiting every child node of an alternation + /// of expressions. + Alternation { + /// The child node we are currently visiting. + head: &'a Ast, + /// The remaining child nodes to visit (which may be empty). + tail: &'a [Ast], + }, +} + +/// Represents a single stack frame while performing structural induction over +/// a character class. +enum ClassFrame<'a> { + /// The stack frame used while visiting every child node of a union of + /// character class items. + Union { + /// The child node we are currently visiting. + head: &'a ast::ClassSetItem, + /// The remaining child nodes to visit (which may be empty). + tail: &'a [ast::ClassSetItem], + }, + /// The stack frame used while a binary class operation. + Binary { op: &'a ast::ClassSetBinaryOp }, + /// A stack frame allocated just before descending into a binary operator's + /// left hand child node. + BinaryLHS { + op: &'a ast::ClassSetBinaryOp, + lhs: &'a ast::ClassSet, + rhs: &'a ast::ClassSet, + }, + /// A stack frame allocated just before descending into a binary operator's + /// right hand child node. + BinaryRHS { op: &'a ast::ClassSetBinaryOp, rhs: &'a ast::ClassSet }, +} + +/// A representation of the inductive step when performing structural induction +/// over a character class. +/// +/// Note that there is no analogous explicit type for the inductive step for +/// `Ast` nodes because the inductive step is just an `Ast`. For character +/// classes, the inductive step can produce one of two possible child nodes: +/// an item or a binary operation. (An item cannot be a binary operation +/// because that would imply binary operations can be unioned in the concrete +/// syntax, which is not possible.) +enum ClassInduct<'a> { + Item(&'a ast::ClassSetItem), + BinaryOp(&'a ast::ClassSetBinaryOp), +} + +impl<'a> HeapVisitor<'a> { + fn new() -> HeapVisitor<'a> { + HeapVisitor { stack: vec![], stack_class: vec![] } + } + + fn visit( + &mut self, + mut ast: &'a Ast, + mut visitor: V, + ) -> Result { + self.stack.clear(); + self.stack_class.clear(); + + visitor.start(); + loop { + visitor.visit_pre(ast)?; + if let Some(x) = self.induct(ast, &mut visitor)? { + let child = x.child(); + self.stack.push((ast, x)); + ast = child; + continue; + } + // No induction means we have a base case, so we can post visit + // it now. + visitor.visit_post(ast)?; + + // At this point, we now try to pop our call stack until it is + // either empty or we hit another inductive case. + loop { + let (post_ast, frame) = match self.stack.pop() { + None => return visitor.finish(), + Some((post_ast, frame)) => (post_ast, frame), + }; + // If this is a concat/alternate, then we might have additional + // inductive steps to process. + if let Some(x) = self.pop(frame) { + match x { + Frame::Alternation { .. } => { + visitor.visit_alternation_in()?; + } + Frame::Concat { .. } => { + visitor.visit_concat_in()?; + } + _ => {} + } + ast = x.child(); + self.stack.push((post_ast, x)); + break; + } + // Otherwise, we've finished visiting all the child nodes for + // this AST, so we can post visit it now. + visitor.visit_post(post_ast)?; + } + } + } + + /// Build a stack frame for the given AST if one is needed (which occurs if + /// and only if there are child nodes in the AST). Otherwise, return None. + /// + /// If this visits a class, then the underlying visitor implementation may + /// return an error which will be passed on here. + fn induct( + &mut self, + ast: &'a Ast, + visitor: &mut V, + ) -> Result>, V::Err> { + Ok(match *ast { + Ast::ClassBracketed(ref x) => { + self.visit_class(x, visitor)?; + None + } + Ast::Repetition(ref x) => Some(Frame::Repetition(x)), + Ast::Group(ref x) => Some(Frame::Group(x)), + Ast::Concat(ref x) if x.asts.is_empty() => None, + Ast::Concat(ref x) => { + Some(Frame::Concat { head: &x.asts[0], tail: &x.asts[1..] }) + } + Ast::Alternation(ref x) if x.asts.is_empty() => None, + Ast::Alternation(ref x) => Some(Frame::Alternation { + head: &x.asts[0], + tail: &x.asts[1..], + }), + _ => None, + }) + } + + /// Pops the given frame. If the frame has an additional inductive step, + /// then return it, otherwise return `None`. + fn pop(&self, induct: Frame<'a>) -> Option> { + match induct { + Frame::Repetition(_) => None, + Frame::Group(_) => None, + Frame::Concat { tail, .. } => { + if tail.is_empty() { + None + } else { + Some(Frame::Concat { head: &tail[0], tail: &tail[1..] }) + } + } + Frame::Alternation { tail, .. } => { + if tail.is_empty() { + None + } else { + Some(Frame::Alternation { + head: &tail[0], + tail: &tail[1..], + }) + } + } + } + } + + fn visit_class( + &mut self, + ast: &'a ast::ClassBracketed, + visitor: &mut V, + ) -> Result<(), V::Err> { + let mut ast = ClassInduct::from_bracketed(ast); + loop { + self.visit_class_pre(&ast, visitor)?; + if let Some(x) = self.induct_class(&ast) { + let child = x.child(); + self.stack_class.push((ast, x)); + ast = child; + continue; + } + self.visit_class_post(&ast, visitor)?; + + // At this point, we now try to pop our call stack until it is + // either empty or we hit another inductive case. + loop { + let (post_ast, frame) = match self.stack_class.pop() { + None => return Ok(()), + Some((post_ast, frame)) => (post_ast, frame), + }; + // If this is a union or a binary op, then we might have + // additional inductive steps to process. + if let Some(x) = self.pop_class(frame) { + if let ClassFrame::BinaryRHS { ref op, .. } = x { + visitor.visit_class_set_binary_op_in(op)?; + } + ast = x.child(); + self.stack_class.push((post_ast, x)); + break; + } + // Otherwise, we've finished visiting all the child nodes for + // this class node, so we can post visit it now. + self.visit_class_post(&post_ast, visitor)?; + } + } + } + + /// Call the appropriate `Visitor` methods given an inductive step. + fn visit_class_pre( + &self, + ast: &ClassInduct<'a>, + visitor: &mut V, + ) -> Result<(), V::Err> { + match *ast { + ClassInduct::Item(item) => { + visitor.visit_class_set_item_pre(item)?; + } + ClassInduct::BinaryOp(op) => { + visitor.visit_class_set_binary_op_pre(op)?; + } + } + Ok(()) + } + + /// Call the appropriate `Visitor` methods given an inductive step. + fn visit_class_post( + &self, + ast: &ClassInduct<'a>, + visitor: &mut V, + ) -> Result<(), V::Err> { + match *ast { + ClassInduct::Item(item) => { + visitor.visit_class_set_item_post(item)?; + } + ClassInduct::BinaryOp(op) => { + visitor.visit_class_set_binary_op_post(op)?; + } + } + Ok(()) + } + + /// Build a stack frame for the given class node if one is needed (which + /// occurs if and only if there are child nodes). Otherwise, return None. + fn induct_class(&self, ast: &ClassInduct<'a>) -> Option> { + match *ast { + ClassInduct::Item(&ast::ClassSetItem::Bracketed(ref x)) => { + match x.kind { + ast::ClassSet::Item(ref item) => { + Some(ClassFrame::Union { head: item, tail: &[] }) + } + ast::ClassSet::BinaryOp(ref op) => { + Some(ClassFrame::Binary { op }) + } + } + } + ClassInduct::Item(&ast::ClassSetItem::Union(ref x)) => { + if x.items.is_empty() { + None + } else { + Some(ClassFrame::Union { + head: &x.items[0], + tail: &x.items[1..], + }) + } + } + ClassInduct::BinaryOp(op) => { + Some(ClassFrame::BinaryLHS { op, lhs: &op.lhs, rhs: &op.rhs }) + } + _ => None, + } + } + + /// Pops the given frame. If the frame has an additional inductive step, + /// then return it, otherwise return `None`. + fn pop_class(&self, induct: ClassFrame<'a>) -> Option> { + match induct { + ClassFrame::Union { tail, .. } => { + if tail.is_empty() { + None + } else { + Some(ClassFrame::Union { + head: &tail[0], + tail: &tail[1..], + }) + } + } + ClassFrame::Binary { .. } => None, + ClassFrame::BinaryLHS { op, rhs, .. } => { + Some(ClassFrame::BinaryRHS { op, rhs }) + } + ClassFrame::BinaryRHS { .. } => None, + } + } +} + +impl<'a> Frame<'a> { + /// Perform the next inductive step on this frame and return the next + /// child AST node to visit. + fn child(&self) -> &'a Ast { + match *self { + Frame::Repetition(rep) => &rep.ast, + Frame::Group(group) => &group.ast, + Frame::Concat { head, .. } => head, + Frame::Alternation { head, .. } => head, + } + } +} + +impl<'a> ClassFrame<'a> { + /// Perform the next inductive step on this frame and return the next + /// child class node to visit. + fn child(&self) -> ClassInduct<'a> { + match *self { + ClassFrame::Union { head, .. } => ClassInduct::Item(head), + ClassFrame::Binary { op, .. } => ClassInduct::BinaryOp(op), + ClassFrame::BinaryLHS { ref lhs, .. } => { + ClassInduct::from_set(lhs) + } + ClassFrame::BinaryRHS { ref rhs, .. } => { + ClassInduct::from_set(rhs) + } + } + } +} + +impl<'a> ClassInduct<'a> { + fn from_bracketed(ast: &'a ast::ClassBracketed) -> ClassInduct<'a> { + ClassInduct::from_set(&ast.kind) + } + + fn from_set(ast: &'a ast::ClassSet) -> ClassInduct<'a> { + match *ast { + ast::ClassSet::Item(ref item) => ClassInduct::Item(item), + ast::ClassSet::BinaryOp(ref op) => ClassInduct::BinaryOp(op), + } + } +} + +impl<'a> core::fmt::Debug for ClassFrame<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let x = match *self { + ClassFrame::Union { .. } => "Union", + ClassFrame::Binary { .. } => "Binary", + ClassFrame::BinaryLHS { .. } => "BinaryLHS", + ClassFrame::BinaryRHS { .. } => "BinaryRHS", + }; + write!(f, "{x}") + } +} + +impl<'a> core::fmt::Debug for ClassInduct<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let x = match *self { + ClassInduct::Item(it) => match *it { + ast::ClassSetItem::Empty(_) => "Item(Empty)", + ast::ClassSetItem::Literal(_) => "Item(Literal)", + ast::ClassSetItem::Range(_) => "Item(Range)", + ast::ClassSetItem::Ascii(_) => "Item(Ascii)", + ast::ClassSetItem::Perl(_) => "Item(Perl)", + ast::ClassSetItem::Unicode(_) => "Item(Unicode)", + ast::ClassSetItem::Bracketed(_) => "Item(Bracketed)", + ast::ClassSetItem::Union(_) => "Item(Union)", + }, + ClassInduct::BinaryOp(it) => match it.kind { + ast::ClassSetBinaryOpKind::Intersection => { + "BinaryOp(Intersection)" + } + ast::ClassSetBinaryOpKind::Difference => { + "BinaryOp(Difference)" + } + ast::ClassSetBinaryOpKind::SymmetricDifference => { + "BinaryOp(SymmetricDifference)" + } + }, + }; + write!(f, "{x}") + } +} diff --git a/vendor/regex-syntax/src/debug.rs b/vendor/regex-syntax/src/debug.rs new file mode 100644 index 00000000000000..7a47d9de8eb339 --- /dev/null +++ b/vendor/regex-syntax/src/debug.rs @@ -0,0 +1,107 @@ +/// A type that wraps a single byte with a convenient fmt::Debug impl that +/// escapes the byte. +pub(crate) struct Byte(pub(crate) u8); + +impl core::fmt::Debug for Byte { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + // Special case ASCII space. It's too hard to read otherwise, so + // put quotes around it. I sometimes wonder whether just '\x20' would + // be better... + if self.0 == b' ' { + return write!(f, "' '"); + } + // 10 bytes is enough to cover any output from ascii::escape_default. + let mut bytes = [0u8; 10]; + let mut len = 0; + for (i, mut b) in core::ascii::escape_default(self.0).enumerate() { + // capitalize \xab to \xAB + if i >= 2 && b'a' <= b && b <= b'f' { + b -= 32; + } + bytes[len] = b; + len += 1; + } + write!(f, "{}", core::str::from_utf8(&bytes[..len]).unwrap()) + } +} + +/// A type that provides a human readable debug impl for arbitrary bytes. +/// +/// This generally works best when the bytes are presumed to be mostly UTF-8, +/// but will work for anything. +/// +/// N.B. This is copied nearly verbatim from regex-automata. Sigh. +pub(crate) struct Bytes<'a>(pub(crate) &'a [u8]); + +impl<'a> core::fmt::Debug for Bytes<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "\"")?; + // This is a sad re-implementation of a similar impl found in bstr. + let mut bytes = self.0; + while let Some(result) = utf8_decode(bytes) { + let ch = match result { + Ok(ch) => ch, + Err(byte) => { + write!(f, r"\x{byte:02x}")?; + bytes = &bytes[1..]; + continue; + } + }; + bytes = &bytes[ch.len_utf8()..]; + match ch { + '\0' => write!(f, "\\0")?, + // ASCII control characters except \0, \n, \r, \t + '\x01'..='\x08' + | '\x0b' + | '\x0c' + | '\x0e'..='\x19' + | '\x7f' => { + write!(f, "\\x{:02x}", u32::from(ch))?; + } + '\n' | '\r' | '\t' | _ => { + write!(f, "{}", ch.escape_debug())?; + } + } + } + write!(f, "\"")?; + Ok(()) + } +} + +/// Decodes the next UTF-8 encoded codepoint from the given byte slice. +/// +/// If no valid encoding of a codepoint exists at the beginning of the given +/// byte slice, then the first byte is returned instead. +/// +/// This returns `None` if and only if `bytes` is empty. +pub(crate) fn utf8_decode(bytes: &[u8]) -> Option> { + fn len(byte: u8) -> Option { + if byte <= 0x7F { + return Some(1); + } else if byte & 0b1100_0000 == 0b1000_0000 { + return None; + } else if byte <= 0b1101_1111 { + Some(2) + } else if byte <= 0b1110_1111 { + Some(3) + } else if byte <= 0b1111_0111 { + Some(4) + } else { + None + } + } + + if bytes.is_empty() { + return None; + } + let len = match len(bytes[0]) { + None => return Some(Err(bytes[0])), + Some(len) if len > bytes.len() => return Some(Err(bytes[0])), + Some(1) => return Some(Ok(char::from(bytes[0]))), + Some(len) => len, + }; + match core::str::from_utf8(&bytes[..len]) { + Ok(s) => Some(Ok(s.chars().next().unwrap())), + Err(_) => Some(Err(bytes[0])), + } +} diff --git a/vendor/regex-syntax/src/either.rs b/vendor/regex-syntax/src/either.rs new file mode 100644 index 00000000000000..7ae41e4ced7460 --- /dev/null +++ b/vendor/regex-syntax/src/either.rs @@ -0,0 +1,8 @@ +/// A simple binary sum type. +/// +/// This is occasionally useful in an ad hoc fashion. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum Either { + Left(Left), + Right(Right), +} diff --git a/vendor/regex-syntax/src/error.rs b/vendor/regex-syntax/src/error.rs new file mode 100644 index 00000000000000..21e484df96dcd9 --- /dev/null +++ b/vendor/regex-syntax/src/error.rs @@ -0,0 +1,311 @@ +use alloc::{ + format, + string::{String, ToString}, + vec, + vec::Vec, +}; + +use crate::{ast, hir}; + +/// This error type encompasses any error that can be returned by this crate. +/// +/// This error type is marked as `non_exhaustive`. This means that adding a +/// new variant is not considered a breaking change. +#[non_exhaustive] +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum Error { + /// An error that occurred while translating concrete syntax into abstract + /// syntax (AST). + Parse(ast::Error), + /// An error that occurred while translating abstract syntax into a high + /// level intermediate representation (HIR). + Translate(hir::Error), +} + +impl From for Error { + fn from(err: ast::Error) -> Error { + Error::Parse(err) + } +} + +impl From for Error { + fn from(err: hir::Error) -> Error { + Error::Translate(err) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for Error {} + +impl core::fmt::Display for Error { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match *self { + Error::Parse(ref x) => x.fmt(f), + Error::Translate(ref x) => x.fmt(f), + } + } +} + +/// A helper type for formatting nice error messages. +/// +/// This type is responsible for reporting regex parse errors in a nice human +/// readable format. Most of its complexity is from interspersing notational +/// markers pointing out the position where an error occurred. +#[derive(Debug)] +pub struct Formatter<'e, E> { + /// The original regex pattern in which the error occurred. + pattern: &'e str, + /// The error kind. It must impl fmt::Display. + err: &'e E, + /// The primary span of the error. + span: &'e ast::Span, + /// An auxiliary and optional span, in case the error needs to point to + /// two locations (e.g., when reporting a duplicate capture group name). + aux_span: Option<&'e ast::Span>, +} + +impl<'e> From<&'e ast::Error> for Formatter<'e, ast::ErrorKind> { + fn from(err: &'e ast::Error) -> Self { + Formatter { + pattern: err.pattern(), + err: err.kind(), + span: err.span(), + aux_span: err.auxiliary_span(), + } + } +} + +impl<'e> From<&'e hir::Error> for Formatter<'e, hir::ErrorKind> { + fn from(err: &'e hir::Error) -> Self { + Formatter { + pattern: err.pattern(), + err: err.kind(), + span: err.span(), + aux_span: None, + } + } +} + +impl<'e, E: core::fmt::Display> core::fmt::Display for Formatter<'e, E> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let spans = Spans::from_formatter(self); + if self.pattern.contains('\n') { + let divider = repeat_char('~', 79); + + writeln!(f, "regex parse error:")?; + writeln!(f, "{divider}")?; + let notated = spans.notate(); + write!(f, "{notated}")?; + writeln!(f, "{divider}")?; + // If we have error spans that cover multiple lines, then we just + // note the line numbers. + if !spans.multi_line.is_empty() { + let mut notes = vec![]; + for span in &spans.multi_line { + notes.push(format!( + "on line {} (column {}) through line {} (column {})", + span.start.line, + span.start.column, + span.end.line, + span.end.column - 1 + )); + } + writeln!(f, "{}", notes.join("\n"))?; + } + write!(f, "error: {}", self.err)?; + } else { + writeln!(f, "regex parse error:")?; + let notated = Spans::from_formatter(self).notate(); + write!(f, "{notated}")?; + write!(f, "error: {}", self.err)?; + } + Ok(()) + } +} + +/// This type represents an arbitrary number of error spans in a way that makes +/// it convenient to notate the regex pattern. ("Notate" means "point out +/// exactly where the error occurred in the regex pattern.") +/// +/// Technically, we can only ever have two spans given our current error +/// structure. However, after toiling with a specific algorithm for handling +/// two spans, it became obvious that an algorithm to handle an arbitrary +/// number of spans was actually much simpler. +struct Spans<'p> { + /// The original regex pattern string. + pattern: &'p str, + /// The total width that should be used for line numbers. The width is + /// used for left padding the line numbers for alignment. + /// + /// A value of `0` means line numbers should not be displayed. That is, + /// the pattern is itself only one line. + line_number_width: usize, + /// All error spans that occur on a single line. This sequence always has + /// length equivalent to the number of lines in `pattern`, where the index + /// of the sequence represents a line number, starting at `0`. The spans + /// in each line are sorted in ascending order. + by_line: Vec>, + /// All error spans that occur over one or more lines. That is, the start + /// and end position of the span have different line numbers. The spans are + /// sorted in ascending order. + multi_line: Vec, +} + +impl<'p> Spans<'p> { + /// Build a sequence of spans from a formatter. + fn from_formatter<'e, E: core::fmt::Display>( + fmter: &'p Formatter<'e, E>, + ) -> Spans<'p> { + let mut line_count = fmter.pattern.lines().count(); + // If the pattern ends with a `\n` literal, then our line count is + // off by one, since a span can occur immediately after the last `\n`, + // which is consider to be an additional line. + if fmter.pattern.ends_with('\n') { + line_count += 1; + } + let line_number_width = + if line_count <= 1 { 0 } else { line_count.to_string().len() }; + let mut spans = Spans { + pattern: &fmter.pattern, + line_number_width, + by_line: vec![vec![]; line_count], + multi_line: vec![], + }; + spans.add(fmter.span.clone()); + if let Some(span) = fmter.aux_span { + spans.add(span.clone()); + } + spans + } + + /// Add the given span to this sequence, putting it in the right place. + fn add(&mut self, span: ast::Span) { + // This is grossly inefficient since we sort after each add, but right + // now, we only ever add two spans at most. + if span.is_one_line() { + let i = span.start.line - 1; // because lines are 1-indexed + self.by_line[i].push(span); + self.by_line[i].sort(); + } else { + self.multi_line.push(span); + self.multi_line.sort(); + } + } + + /// Notate the pattern string with carets (`^`) pointing at each span + /// location. This only applies to spans that occur within a single line. + fn notate(&self) -> String { + let mut notated = String::new(); + for (i, line) in self.pattern.lines().enumerate() { + if self.line_number_width > 0 { + notated.push_str(&self.left_pad_line_number(i + 1)); + notated.push_str(": "); + } else { + notated.push_str(" "); + } + notated.push_str(line); + notated.push('\n'); + if let Some(notes) = self.notate_line(i) { + notated.push_str(¬es); + notated.push('\n'); + } + } + notated + } + + /// Return notes for the line indexed at `i` (zero-based). If there are no + /// spans for the given line, then `None` is returned. Otherwise, an + /// appropriately space padded string with correctly positioned `^` is + /// returned, accounting for line numbers. + fn notate_line(&self, i: usize) -> Option { + let spans = &self.by_line[i]; + if spans.is_empty() { + return None; + } + let mut notes = String::new(); + for _ in 0..self.line_number_padding() { + notes.push(' '); + } + let mut pos = 0; + for span in spans { + for _ in pos..(span.start.column - 1) { + notes.push(' '); + pos += 1; + } + let note_len = span.end.column.saturating_sub(span.start.column); + for _ in 0..core::cmp::max(1, note_len) { + notes.push('^'); + pos += 1; + } + } + Some(notes) + } + + /// Left pad the given line number with spaces such that it is aligned with + /// other line numbers. + fn left_pad_line_number(&self, n: usize) -> String { + let n = n.to_string(); + let pad = self.line_number_width.checked_sub(n.len()).unwrap(); + let mut result = repeat_char(' ', pad); + result.push_str(&n); + result + } + + /// Return the line number padding beginning at the start of each line of + /// the pattern. + /// + /// If the pattern is only one line, then this returns a fixed padding + /// for visual indentation. + fn line_number_padding(&self) -> usize { + if self.line_number_width == 0 { + 4 + } else { + 2 + self.line_number_width + } + } +} + +fn repeat_char(c: char, count: usize) -> String { + core::iter::repeat(c).take(count).collect() +} + +#[cfg(test)] +mod tests { + use alloc::string::ToString; + + use crate::ast::parse::Parser; + + fn assert_panic_message(pattern: &str, expected_msg: &str) { + let result = Parser::new().parse(pattern); + match result { + Ok(_) => { + panic!("regex should not have parsed"); + } + Err(err) => { + assert_eq!(err.to_string(), expected_msg.trim()); + } + } + } + + // See: https://github.com/rust-lang/regex/issues/464 + #[test] + fn regression_464() { + let err = Parser::new().parse("a{\n").unwrap_err(); + // This test checks that the error formatter doesn't panic. + assert!(!err.to_string().is_empty()); + } + + // See: https://github.com/rust-lang/regex/issues/545 + #[test] + fn repetition_quantifier_expects_a_valid_decimal() { + assert_panic_message( + r"\\u{[^}]*}", + r#" +regex parse error: + \\u{[^}]*} + ^ +error: repetition quantifier expects a valid decimal +"#, + ); + } +} diff --git a/vendor/regex-syntax/src/hir/interval.rs b/vendor/regex-syntax/src/hir/interval.rs new file mode 100644 index 00000000000000..d507ee724d3918 --- /dev/null +++ b/vendor/regex-syntax/src/hir/interval.rs @@ -0,0 +1,564 @@ +use core::{char, cmp, fmt::Debug, slice}; + +use alloc::vec::Vec; + +use crate::unicode; + +// This module contains an *internal* implementation of interval sets. +// +// The primary invariant that interval sets guards is canonical ordering. That +// is, every interval set contains an ordered sequence of intervals where +// no two intervals are overlapping or adjacent. While this invariant is +// occasionally broken within the implementation, it should be impossible for +// callers to observe it. +// +// Since case folding (as implemented below) breaks that invariant, we roll +// that into this API even though it is a little out of place in an otherwise +// generic interval set. (Hence the reason why the `unicode` module is imported +// here.) +// +// Some of the implementation complexity here is a result of me wanting to +// preserve the sequential representation without using additional memory. +// In many cases, we do use linear extra memory, but it is at most 2x and it +// is amortized. If we relaxed the memory requirements, this implementation +// could become much simpler. The extra memory is honestly probably OK, but +// character classes (especially of the Unicode variety) can become quite +// large, and it would be nice to keep regex compilation snappy even in debug +// builds. (In the past, I have been careless with this area of code and it has +// caused slow regex compilations in debug mode, so this isn't entirely +// unwarranted.) +// +// Tests on this are relegated to the public API of HIR in src/hir.rs. + +#[derive(Clone, Debug)] +pub struct IntervalSet { + /// A sorted set of non-overlapping ranges. + ranges: Vec, + /// While not required at all for correctness, we keep track of whether an + /// interval set has been case folded or not. This helps us avoid doing + /// redundant work if, for example, a set has already been cased folded. + /// And note that whether a set is folded or not is preserved through + /// all of the pairwise set operations. That is, if both interval sets + /// have been case folded, then any of difference, union, intersection or + /// symmetric difference all produce a case folded set. + /// + /// Note that when this is true, it *must* be the case that the set is case + /// folded. But when it's false, the set *may* be case folded. In other + /// words, we only set this to true when we know it to be case, but we're + /// okay with it being false if it would otherwise be costly to determine + /// whether it should be true. This means code cannot assume that a false + /// value necessarily indicates that the set is not case folded. + /// + /// Bottom line: this is a performance optimization. + folded: bool, +} + +impl Eq for IntervalSet {} + +// We implement PartialEq manually so that we don't consider the set's internal +// 'folded' property to be part of its identity. The 'folded' property is +// strictly an optimization. +impl PartialEq for IntervalSet { + fn eq(&self, other: &IntervalSet) -> bool { + self.ranges.eq(&other.ranges) + } +} + +impl IntervalSet { + /// Create a new set from a sequence of intervals. Each interval is + /// specified as a pair of bounds, where both bounds are inclusive. + /// + /// The given ranges do not need to be in any specific order, and ranges + /// may overlap. + pub fn new>(intervals: T) -> IntervalSet { + let ranges: Vec = intervals.into_iter().collect(); + // An empty set is case folded. + let folded = ranges.is_empty(); + let mut set = IntervalSet { ranges, folded }; + set.canonicalize(); + set + } + + /// Add a new interval to this set. + pub fn push(&mut self, interval: I) { + // TODO: This could be faster. e.g., Push the interval such that + // it preserves canonicalization. + self.ranges.push(interval); + self.canonicalize(); + // We don't know whether the new interval added here is considered + // case folded, so we conservatively assume that the entire set is + // no longer case folded if it was previously. + self.folded = false; + } + + /// Return an iterator over all intervals in this set. + /// + /// The iterator yields intervals in ascending order. + pub fn iter(&self) -> IntervalSetIter<'_, I> { + IntervalSetIter(self.ranges.iter()) + } + + /// Return an immutable slice of intervals in this set. + /// + /// The sequence returned is in canonical ordering. + pub fn intervals(&self) -> &[I] { + &self.ranges + } + + /// Expand this interval set such that it contains all case folded + /// characters. For example, if this class consists of the range `a-z`, + /// then applying case folding will result in the class containing both the + /// ranges `a-z` and `A-Z`. + /// + /// This returns an error if the necessary case mapping data is not + /// available. + pub fn case_fold_simple(&mut self) -> Result<(), unicode::CaseFoldError> { + if self.folded { + return Ok(()); + } + let len = self.ranges.len(); + for i in 0..len { + let range = self.ranges[i]; + if let Err(err) = range.case_fold_simple(&mut self.ranges) { + self.canonicalize(); + return Err(err); + } + } + self.canonicalize(); + self.folded = true; + Ok(()) + } + + /// Union this set with the given set, in place. + pub fn union(&mut self, other: &IntervalSet) { + if other.ranges.is_empty() || self.ranges == other.ranges { + return; + } + // This could almost certainly be done more efficiently. + self.ranges.extend(&other.ranges); + self.canonicalize(); + self.folded = self.folded && other.folded; + } + + /// Intersect this set with the given set, in place. + pub fn intersect(&mut self, other: &IntervalSet) { + if self.ranges.is_empty() { + return; + } + if other.ranges.is_empty() { + self.ranges.clear(); + // An empty set is case folded. + self.folded = true; + return; + } + + // There should be a way to do this in-place with constant memory, + // but I couldn't figure out a simple way to do it. So just append + // the intersection to the end of this range, and then drain it before + // we're done. + let drain_end = self.ranges.len(); + + let mut ita = 0..drain_end; + let mut itb = 0..other.ranges.len(); + let mut a = ita.next().unwrap(); + let mut b = itb.next().unwrap(); + loop { + if let Some(ab) = self.ranges[a].intersect(&other.ranges[b]) { + self.ranges.push(ab); + } + let (it, aorb) = + if self.ranges[a].upper() < other.ranges[b].upper() { + (&mut ita, &mut a) + } else { + (&mut itb, &mut b) + }; + match it.next() { + Some(v) => *aorb = v, + None => break, + } + } + self.ranges.drain(..drain_end); + self.folded = self.folded && other.folded; + } + + /// Subtract the given set from this set, in place. + pub fn difference(&mut self, other: &IntervalSet) { + if self.ranges.is_empty() || other.ranges.is_empty() { + return; + } + + // This algorithm is (to me) surprisingly complex. A search of the + // interwebs indicate that this is a potentially interesting problem. + // Folks seem to suggest interval or segment trees, but I'd like to + // avoid the overhead (both runtime and conceptual) of that. + // + // The following is basically my Shitty First Draft. Therefore, in + // order to grok it, you probably need to read each line carefully. + // Simplifications are most welcome! + // + // Remember, we can assume the canonical format invariant here, which + // says that all ranges are sorted, not overlapping and not adjacent in + // each class. + let drain_end = self.ranges.len(); + let (mut a, mut b) = (0, 0); + 'LOOP: while a < drain_end && b < other.ranges.len() { + // Basically, the easy cases are when neither range overlaps with + // each other. If the `b` range is less than our current `a` + // range, then we can skip it and move on. + if other.ranges[b].upper() < self.ranges[a].lower() { + b += 1; + continue; + } + // ... similarly for the `a` range. If it's less than the smallest + // `b` range, then we can add it as-is. + if self.ranges[a].upper() < other.ranges[b].lower() { + let range = self.ranges[a]; + self.ranges.push(range); + a += 1; + continue; + } + // Otherwise, we have overlapping ranges. + assert!(!self.ranges[a].is_intersection_empty(&other.ranges[b])); + + // This part is tricky and was non-obvious to me without looking + // at explicit examples (see the tests). The trickiness stems from + // two things: 1) subtracting a range from another range could + // yield two ranges and 2) after subtracting a range, it's possible + // that future ranges can have an impact. The loop below advances + // the `b` ranges until they can't possible impact the current + // range. + // + // For example, if our `a` range is `a-t` and our next three `b` + // ranges are `a-c`, `g-i`, `r-t` and `x-z`, then we need to apply + // subtraction three times before moving on to the next `a` range. + let mut range = self.ranges[a]; + while b < other.ranges.len() + && !range.is_intersection_empty(&other.ranges[b]) + { + let old_range = range; + range = match range.difference(&other.ranges[b]) { + (None, None) => { + // We lost the entire range, so move on to the next + // without adding this one. + a += 1; + continue 'LOOP; + } + (Some(range1), None) | (None, Some(range1)) => range1, + (Some(range1), Some(range2)) => { + self.ranges.push(range1); + range2 + } + }; + // It's possible that the `b` range has more to contribute + // here. In particular, if it is greater than the original + // range, then it might impact the next `a` range *and* it + // has impacted the current `a` range as much as possible, + // so we can quit. We don't bump `b` so that the next `a` + // range can apply it. + if other.ranges[b].upper() > old_range.upper() { + break; + } + // Otherwise, the next `b` range might apply to the current + // `a` range. + b += 1; + } + self.ranges.push(range); + a += 1; + } + while a < drain_end { + let range = self.ranges[a]; + self.ranges.push(range); + a += 1; + } + self.ranges.drain(..drain_end); + self.folded = self.folded && other.folded; + } + + /// Compute the symmetric difference of the two sets, in place. + /// + /// This computes the symmetric difference of two interval sets. This + /// removes all elements in this set that are also in the given set, + /// but also adds all elements from the given set that aren't in this + /// set. That is, the set will contain all elements in either set, + /// but will not contain any elements that are in both sets. + pub fn symmetric_difference(&mut self, other: &IntervalSet) { + // TODO(burntsushi): Fix this so that it amortizes allocation. + let mut intersection = self.clone(); + intersection.intersect(other); + self.union(other); + self.difference(&intersection); + } + + /// Negate this interval set. + /// + /// For all `x` where `x` is any element, if `x` was in this set, then it + /// will not be in this set after negation. + pub fn negate(&mut self) { + if self.ranges.is_empty() { + let (min, max) = (I::Bound::min_value(), I::Bound::max_value()); + self.ranges.push(I::create(min, max)); + // The set containing everything must case folded. + self.folded = true; + return; + } + + // There should be a way to do this in-place with constant memory, + // but I couldn't figure out a simple way to do it. So just append + // the negation to the end of this range, and then drain it before + // we're done. + let drain_end = self.ranges.len(); + + // We do checked arithmetic below because of the canonical ordering + // invariant. + if self.ranges[0].lower() > I::Bound::min_value() { + let upper = self.ranges[0].lower().decrement(); + self.ranges.push(I::create(I::Bound::min_value(), upper)); + } + for i in 1..drain_end { + let lower = self.ranges[i - 1].upper().increment(); + let upper = self.ranges[i].lower().decrement(); + self.ranges.push(I::create(lower, upper)); + } + if self.ranges[drain_end - 1].upper() < I::Bound::max_value() { + let lower = self.ranges[drain_end - 1].upper().increment(); + self.ranges.push(I::create(lower, I::Bound::max_value())); + } + self.ranges.drain(..drain_end); + // We don't need to update whether this set is folded or not, because + // it is conservatively preserved through negation. Namely, if a set + // is not folded, then it is possible that its negation is folded, for + // example, [^☃]. But we're fine with assuming that the set is not + // folded in that case. (`folded` permits false negatives but not false + // positives.) + // + // But what about when a set is folded, is its negation also + // necessarily folded? Yes. Because if a set is folded, then for every + // character in the set, it necessarily included its equivalence class + // of case folded characters. Negating it in turn means that all + // equivalence classes in the set are negated, and any equivalence + // class that was previously not in the set is now entirely in the set. + } + + /// Converts this set into a canonical ordering. + fn canonicalize(&mut self) { + if self.is_canonical() { + return; + } + self.ranges.sort(); + assert!(!self.ranges.is_empty()); + + // Is there a way to do this in-place with constant memory? I couldn't + // figure out a way to do it. So just append the canonicalization to + // the end of this range, and then drain it before we're done. + let drain_end = self.ranges.len(); + for oldi in 0..drain_end { + // If we've added at least one new range, then check if we can + // merge this range in the previously added range. + if self.ranges.len() > drain_end { + let (last, rest) = self.ranges.split_last_mut().unwrap(); + if let Some(union) = last.union(&rest[oldi]) { + *last = union; + continue; + } + } + let range = self.ranges[oldi]; + self.ranges.push(range); + } + self.ranges.drain(..drain_end); + } + + /// Returns true if and only if this class is in a canonical ordering. + fn is_canonical(&self) -> bool { + for pair in self.ranges.windows(2) { + if pair[0] >= pair[1] { + return false; + } + if pair[0].is_contiguous(&pair[1]) { + return false; + } + } + true + } +} + +/// An iterator over intervals. +#[derive(Debug)] +pub struct IntervalSetIter<'a, I>(slice::Iter<'a, I>); + +impl<'a, I> Iterator for IntervalSetIter<'a, I> { + type Item = &'a I; + + fn next(&mut self) -> Option<&'a I> { + self.0.next() + } +} + +pub trait Interval: + Clone + Copy + Debug + Default + Eq + PartialEq + PartialOrd + Ord +{ + type Bound: Bound; + + fn lower(&self) -> Self::Bound; + fn upper(&self) -> Self::Bound; + fn set_lower(&mut self, bound: Self::Bound); + fn set_upper(&mut self, bound: Self::Bound); + fn case_fold_simple( + &self, + intervals: &mut Vec, + ) -> Result<(), unicode::CaseFoldError>; + + /// Create a new interval. + fn create(lower: Self::Bound, upper: Self::Bound) -> Self { + let mut int = Self::default(); + if lower <= upper { + int.set_lower(lower); + int.set_upper(upper); + } else { + int.set_lower(upper); + int.set_upper(lower); + } + int + } + + /// Union the given overlapping range into this range. + /// + /// If the two ranges aren't contiguous, then this returns `None`. + fn union(&self, other: &Self) -> Option { + if !self.is_contiguous(other) { + return None; + } + let lower = cmp::min(self.lower(), other.lower()); + let upper = cmp::max(self.upper(), other.upper()); + Some(Self::create(lower, upper)) + } + + /// Intersect this range with the given range and return the result. + /// + /// If the intersection is empty, then this returns `None`. + fn intersect(&self, other: &Self) -> Option { + let lower = cmp::max(self.lower(), other.lower()); + let upper = cmp::min(self.upper(), other.upper()); + if lower <= upper { + Some(Self::create(lower, upper)) + } else { + None + } + } + + /// Subtract the given range from this range and return the resulting + /// ranges. + /// + /// If subtraction would result in an empty range, then no ranges are + /// returned. + fn difference(&self, other: &Self) -> (Option, Option) { + if self.is_subset(other) { + return (None, None); + } + if self.is_intersection_empty(other) { + return (Some(self.clone()), None); + } + let add_lower = other.lower() > self.lower(); + let add_upper = other.upper() < self.upper(); + // We know this because !self.is_subset(other) and the ranges have + // a non-empty intersection. + assert!(add_lower || add_upper); + let mut ret = (None, None); + if add_lower { + let upper = other.lower().decrement(); + ret.0 = Some(Self::create(self.lower(), upper)); + } + if add_upper { + let lower = other.upper().increment(); + let range = Self::create(lower, self.upper()); + if ret.0.is_none() { + ret.0 = Some(range); + } else { + ret.1 = Some(range); + } + } + ret + } + + /// Returns true if and only if the two ranges are contiguous. Two ranges + /// are contiguous if and only if the ranges are either overlapping or + /// adjacent. + fn is_contiguous(&self, other: &Self) -> bool { + let lower1 = self.lower().as_u32(); + let upper1 = self.upper().as_u32(); + let lower2 = other.lower().as_u32(); + let upper2 = other.upper().as_u32(); + cmp::max(lower1, lower2) <= cmp::min(upper1, upper2).saturating_add(1) + } + + /// Returns true if and only if the intersection of this range and the + /// other range is empty. + fn is_intersection_empty(&self, other: &Self) -> bool { + let (lower1, upper1) = (self.lower(), self.upper()); + let (lower2, upper2) = (other.lower(), other.upper()); + cmp::max(lower1, lower2) > cmp::min(upper1, upper2) + } + + /// Returns true if and only if this range is a subset of the other range. + fn is_subset(&self, other: &Self) -> bool { + let (lower1, upper1) = (self.lower(), self.upper()); + let (lower2, upper2) = (other.lower(), other.upper()); + (lower2 <= lower1 && lower1 <= upper2) + && (lower2 <= upper1 && upper1 <= upper2) + } +} + +pub trait Bound: + Copy + Clone + Debug + Eq + PartialEq + PartialOrd + Ord +{ + fn min_value() -> Self; + fn max_value() -> Self; + fn as_u32(self) -> u32; + fn increment(self) -> Self; + fn decrement(self) -> Self; +} + +impl Bound for u8 { + fn min_value() -> Self { + u8::MIN + } + fn max_value() -> Self { + u8::MAX + } + fn as_u32(self) -> u32 { + u32::from(self) + } + fn increment(self) -> Self { + self.checked_add(1).unwrap() + } + fn decrement(self) -> Self { + self.checked_sub(1).unwrap() + } +} + +impl Bound for char { + fn min_value() -> Self { + '\x00' + } + fn max_value() -> Self { + '\u{10FFFF}' + } + fn as_u32(self) -> u32 { + u32::from(self) + } + + fn increment(self) -> Self { + match self { + '\u{D7FF}' => '\u{E000}', + c => char::from_u32(u32::from(c).checked_add(1).unwrap()).unwrap(), + } + } + + fn decrement(self) -> Self { + match self { + '\u{E000}' => '\u{D7FF}', + c => char::from_u32(u32::from(c).checked_sub(1).unwrap()).unwrap(), + } + } +} + +// Tests for interval sets are written in src/hir.rs against the public API. diff --git a/vendor/regex-syntax/src/hir/literal.rs b/vendor/regex-syntax/src/hir/literal.rs new file mode 100644 index 00000000000000..2a6350e64663ce --- /dev/null +++ b/vendor/regex-syntax/src/hir/literal.rs @@ -0,0 +1,3214 @@ +/*! +Provides literal extraction from `Hir` expressions. + +An [`Extractor`] pulls literals out of [`Hir`] expressions and returns a +[`Seq`] of [`Literal`]s. + +The purpose of literal extraction is generally to provide avenues for +optimizing regex searches. The main idea is that substring searches can be an +order of magnitude faster than a regex search. Therefore, if one can execute +a substring search to find candidate match locations and only run the regex +search at those locations, then it is possible for huge improvements in +performance to be realized. + +With that said, literal optimizations are generally a black art because even +though substring search is generally faster, if the number of candidates +produced is high, then it can create a lot of overhead by ping-ponging between +the substring search and the regex search. + +Here are some heuristics that might be used to help increase the chances of +effective literal optimizations: + +* Stick to small [`Seq`]s. If you search for too many literals, it's likely +to lead to substring search that is only a little faster than a regex search, +and thus the overhead of using literal optimizations in the first place might +make things slower overall. +* The literals in your [`Seq`] shouldn't be too short. In general, longer is +better. A sequence corresponding to single bytes that occur frequently in the +haystack, for example, is probably a bad literal optimization because it's +likely to produce many false positive candidates. Longer literals are less +likely to match, and thus probably produce fewer false positives. +* If it's possible to estimate the approximate frequency of each byte according +to some pre-computed background distribution, it is possible to compute a score +of how "good" a `Seq` is. If a `Seq` isn't good enough, you might consider +skipping the literal optimization and just use the regex engine. + +(It should be noted that there are always pathological cases that can make +any kind of literal optimization be a net slower result. This is why it +might be a good idea to be conservative, or to even provide a means for +literal optimizations to be dynamically disabled if they are determined to be +ineffective according to some measure.) + +You're encouraged to explore the methods on [`Seq`], which permit shrinking +the size of sequences in a preference-order preserving fashion. + +Finally, note that it isn't strictly necessary to use an [`Extractor`]. Namely, +an `Extractor` only uses public APIs of the [`Seq`] and [`Literal`] types, +so it is possible to implement your own extractor. For example, for n-grams +or "inner" literals (i.e., not prefix or suffix literals). The `Extractor` +is mostly responsible for the case analysis over `Hir` expressions. Much of +the "trickier" parts are how to combine literal sequences, and that is all +implemented on [`Seq`]. +*/ + +use core::{cmp, mem, num::NonZeroUsize}; + +use alloc::{vec, vec::Vec}; + +use crate::hir::{self, Hir}; + +/// Extracts prefix or suffix literal sequences from [`Hir`] expressions. +/// +/// Literal extraction is based on the following observations: +/// +/// * Many regexes start with one or a small number of literals. +/// * Substring search for literals is often much faster (sometimes by an order +/// of magnitude) than a regex search. +/// +/// Thus, in many cases, one can search for literals to find candidate starting +/// locations of a match, and then only run the full regex engine at each such +/// location instead of over the full haystack. +/// +/// The main downside of literal extraction is that it can wind up causing a +/// search to be slower overall. For example, if there are many matches or if +/// there are many candidates that don't ultimately lead to a match, then a +/// lot of overhead will be spent in shuffling back-and-forth between substring +/// search and the regex engine. This is the fundamental reason why literal +/// optimizations for regex patterns is sometimes considered a "black art." +/// +/// # Look-around assertions +/// +/// Literal extraction treats all look-around assertions as-if they match every +/// empty string. So for example, the regex `\bquux\b` will yield a sequence +/// containing a single exact literal `quux`. However, not all occurrences +/// of `quux` correspond to a match a of the regex. For example, `\bquux\b` +/// does not match `ZquuxZ` anywhere because `quux` does not fall on a word +/// boundary. +/// +/// In effect, if your regex contains look-around assertions, then a match of +/// an exact literal does not necessarily mean the regex overall matches. So +/// you may still need to run the regex engine in such cases to confirm the +/// match. +/// +/// The precise guarantee you get from a literal sequence is: if every literal +/// in the sequence is exact and the original regex contains zero look-around +/// assertions, then a preference-order multi-substring search of those +/// literals will precisely match a preference-order search of the original +/// regex. +/// +/// # Example +/// +/// This shows how to extract prefixes: +/// +/// ``` +/// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; +/// +/// let hir = parse(r"(a|b|c)(x|y|z)[A-Z]+foo")?; +/// let got = Extractor::new().extract(&hir); +/// // All literals returned are "inexact" because none of them reach the +/// // match state. +/// let expected = Seq::from_iter([ +/// Literal::inexact("ax"), +/// Literal::inexact("ay"), +/// Literal::inexact("az"), +/// Literal::inexact("bx"), +/// Literal::inexact("by"), +/// Literal::inexact("bz"), +/// Literal::inexact("cx"), +/// Literal::inexact("cy"), +/// Literal::inexact("cz"), +/// ]); +/// assert_eq!(expected, got); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// This shows how to extract suffixes: +/// +/// ``` +/// use regex_syntax::{ +/// hir::literal::{Extractor, ExtractKind, Literal, Seq}, +/// parse, +/// }; +/// +/// let hir = parse(r"foo|[A-Z]+bar")?; +/// let got = Extractor::new().kind(ExtractKind::Suffix).extract(&hir); +/// // Since 'foo' gets to a match state, it is considered exact. But 'bar' +/// // does not because of the '[A-Z]+', and thus is marked inexact. +/// let expected = Seq::from_iter([ +/// Literal::exact("foo"), +/// Literal::inexact("bar"), +/// ]); +/// assert_eq!(expected, got); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct Extractor { + kind: ExtractKind, + limit_class: usize, + limit_repeat: usize, + limit_literal_len: usize, + limit_total: usize, +} + +impl Extractor { + /// Create a new extractor with a default configuration. + /// + /// The extractor can be optionally configured before calling + /// [`Extractor::extract`] to get a literal sequence. + pub fn new() -> Extractor { + Extractor { + kind: ExtractKind::Prefix, + limit_class: 10, + limit_repeat: 10, + limit_literal_len: 100, + limit_total: 250, + } + } + + /// Execute the extractor and return a sequence of literals. + pub fn extract(&self, hir: &Hir) -> Seq { + use crate::hir::HirKind::*; + + match *hir.kind() { + Empty | Look(_) => Seq::singleton(self::Literal::exact(vec![])), + Literal(hir::Literal(ref bytes)) => { + let mut seq = + Seq::singleton(self::Literal::exact(bytes.to_vec())); + self.enforce_literal_len(&mut seq); + seq + } + Class(hir::Class::Unicode(ref cls)) => { + self.extract_class_unicode(cls) + } + Class(hir::Class::Bytes(ref cls)) => self.extract_class_bytes(cls), + Repetition(ref rep) => self.extract_repetition(rep), + Capture(hir::Capture { ref sub, .. }) => self.extract(sub), + Concat(ref hirs) => match self.kind { + ExtractKind::Prefix => self.extract_concat(hirs.iter()), + ExtractKind::Suffix => self.extract_concat(hirs.iter().rev()), + }, + Alternation(ref hirs) => { + // Unlike concat, we always union starting from the beginning, + // since the beginning corresponds to the highest preference, + // which doesn't change based on forwards vs reverse. + self.extract_alternation(hirs.iter()) + } + } + } + + /// Set the kind of literal sequence to extract from an [`Hir`] expression. + /// + /// The default is to extract prefixes, but suffixes can be selected + /// instead. The contract for prefixes is that every match of the + /// corresponding `Hir` must start with one of the literals in the sequence + /// returned. Moreover, the _order_ of the sequence returned corresponds to + /// the preference order. + /// + /// Suffixes satisfy a similar contract in that every match of the + /// corresponding `Hir` must end with one of the literals in the sequence + /// returned. However, there is no guarantee that the literals are in + /// preference order. + /// + /// Remember that a sequence can be infinite. For example, unless the + /// limits are configured to be impractically large, attempting to extract + /// prefixes (or suffixes) for the pattern `[A-Z]` will return an infinite + /// sequence. Generally speaking, if the sequence returned is infinite, + /// then it is presumed to be unwise to do prefix (or suffix) optimizations + /// for the pattern. + pub fn kind(&mut self, kind: ExtractKind) -> &mut Extractor { + self.kind = kind; + self + } + + /// Configure a limit on the length of the sequence that is permitted for + /// a character class. If a character class exceeds this limit, then the + /// sequence returned for it is infinite. + /// + /// This prevents classes like `[A-Z]` or `\pL` from getting turned into + /// huge and likely unproductive sequences of literals. + /// + /// # Example + /// + /// This example shows how this limit can be lowered to decrease the tolerance + /// for character classes being turned into literal sequences. + /// + /// ``` + /// use regex_syntax::{hir::literal::{Extractor, Seq}, parse}; + /// + /// let hir = parse(r"[0-9]")?; + /// + /// let got = Extractor::new().extract(&hir); + /// let expected = Seq::new([ + /// "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", + /// ]); + /// assert_eq!(expected, got); + /// + /// // Now let's shrink the limit and see how that changes things. + /// let got = Extractor::new().limit_class(4).extract(&hir); + /// let expected = Seq::infinite(); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn limit_class(&mut self, limit: usize) -> &mut Extractor { + self.limit_class = limit; + self + } + + /// Configure a limit on the total number of repetitions that is permitted + /// before literal extraction is stopped. + /// + /// This is useful for limiting things like `(abcde){50}`, or more + /// insidiously, `(?:){1000000000}`. This limit prevents any one single + /// repetition from adding too much to a literal sequence. + /// + /// With this limit set, repetitions that exceed it will be stopped and any + /// literals extracted up to that point will be made inexact. + /// + /// # Example + /// + /// This shows how to decrease the limit and compares it with the default. + /// + /// ``` + /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; + /// + /// let hir = parse(r"(abc){8}")?; + /// + /// let got = Extractor::new().extract(&hir); + /// let expected = Seq::new(["abcabcabcabcabcabcabcabc"]); + /// assert_eq!(expected, got); + /// + /// // Now let's shrink the limit and see how that changes things. + /// let got = Extractor::new().limit_repeat(4).extract(&hir); + /// let expected = Seq::from_iter([ + /// Literal::inexact("abcabcabcabc"), + /// ]); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn limit_repeat(&mut self, limit: usize) -> &mut Extractor { + self.limit_repeat = limit; + self + } + + /// Configure a limit on the maximum length of any literal in a sequence. + /// + /// This is useful for limiting things like `(abcde){5}{5}{5}{5}`. While + /// each repetition or literal in that regex is small, when all the + /// repetitions are applied, one ends up with a literal of length `5^4 = + /// 625`. + /// + /// With this limit set, literals that exceed it will be made inexact and + /// thus prevented from growing. + /// + /// # Example + /// + /// This shows how to decrease the limit and compares it with the default. + /// + /// ``` + /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; + /// + /// let hir = parse(r"(abc){2}{2}{2}")?; + /// + /// let got = Extractor::new().extract(&hir); + /// let expected = Seq::new(["abcabcabcabcabcabcabcabc"]); + /// assert_eq!(expected, got); + /// + /// // Now let's shrink the limit and see how that changes things. + /// let got = Extractor::new().limit_literal_len(14).extract(&hir); + /// let expected = Seq::from_iter([ + /// Literal::inexact("abcabcabcabcab"), + /// ]); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn limit_literal_len(&mut self, limit: usize) -> &mut Extractor { + self.limit_literal_len = limit; + self + } + + /// Configure a limit on the total number of literals that will be + /// returned. + /// + /// This is useful as a practical measure for avoiding the creation of + /// large sequences of literals. While the extractor will automatically + /// handle local creations of large sequences (for example, `[A-Z]` yields + /// an infinite sequence by default), large sequences can be created + /// through non-local means as well. + /// + /// For example, `[ab]{3}{3}` would yield a sequence of length `512 = 2^9` + /// despite each of the repetitions being small on their own. This limit + /// thus represents a "catch all" for avoiding locally small sequences from + /// combining into large sequences. + /// + /// # Example + /// + /// This example shows how reducing the limit will change the literal + /// sequence returned. + /// + /// ``` + /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; + /// + /// let hir = parse(r"[ab]{2}{2}")?; + /// + /// let got = Extractor::new().extract(&hir); + /// let expected = Seq::new([ + /// "aaaa", "aaab", "aaba", "aabb", + /// "abaa", "abab", "abba", "abbb", + /// "baaa", "baab", "baba", "babb", + /// "bbaa", "bbab", "bbba", "bbbb", + /// ]); + /// assert_eq!(expected, got); + /// + /// // The default limit is not too big, but big enough to extract all + /// // literals from '[ab]{2}{2}'. If we shrink the limit to less than 16, + /// // then we'll get a truncated set. Notice that it returns a sequence of + /// // length 4 even though our limit was 10. This is because the sequence + /// // is difficult to increase without blowing the limit. Notice also + /// // that every literal in the sequence is now inexact because they were + /// // stripped of some suffix. + /// let got = Extractor::new().limit_total(10).extract(&hir); + /// let expected = Seq::from_iter([ + /// Literal::inexact("aa"), + /// Literal::inexact("ab"), + /// Literal::inexact("ba"), + /// Literal::inexact("bb"), + /// ]); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn limit_total(&mut self, limit: usize) -> &mut Extractor { + self.limit_total = limit; + self + } + + /// Extract a sequence from the given concatenation. Sequences from each of + /// the child HIR expressions are combined via cross product. + /// + /// This short circuits once the cross product turns into a sequence + /// containing only inexact literals. + fn extract_concat<'a, I: Iterator>(&self, it: I) -> Seq { + let mut seq = Seq::singleton(self::Literal::exact(vec![])); + for hir in it { + // If every element in the sequence is inexact, then a cross + // product will always be a no-op. Thus, there is nothing else we + // can add to it and can quit early. Note that this also includes + // infinite sequences. + if seq.is_inexact() { + break; + } + // Note that 'cross' also dispatches based on whether we're + // extracting prefixes or suffixes. + seq = self.cross(seq, &mut self.extract(hir)); + } + seq + } + + /// Extract a sequence from the given alternation. + /// + /// This short circuits once the union turns into an infinite sequence. + fn extract_alternation<'a, I: Iterator>( + &self, + it: I, + ) -> Seq { + let mut seq = Seq::empty(); + for hir in it { + // Once our 'seq' is infinite, every subsequent union + // operation on it will itself always result in an + // infinite sequence. Thus, it can never change and we can + // short-circuit. + if !seq.is_finite() { + break; + } + seq = self.union(seq, &mut self.extract(hir)); + } + seq + } + + /// Extract a sequence of literals from the given repetition. We do our + /// best, Some examples: + /// + /// 'a*' => [inexact(a), exact("")] + /// 'a*?' => [exact(""), inexact(a)] + /// 'a+' => [inexact(a)] + /// 'a{3}' => [exact(aaa)] + /// 'a{3,5} => [inexact(aaa)] + /// + /// The key here really is making sure we get the 'inexact' vs 'exact' + /// attributes correct on each of the literals we add. For example, the + /// fact that 'a*' gives us an inexact 'a' and an exact empty string means + /// that a regex like 'ab*c' will result in [inexact(ab), exact(ac)] + /// literals being extracted, which might actually be a better prefilter + /// than just 'a'. + fn extract_repetition(&self, rep: &hir::Repetition) -> Seq { + let mut subseq = self.extract(&rep.sub); + match *rep { + hir::Repetition { min: 0, max, greedy, .. } => { + // When 'max=1', we can retain exactness, since 'a?' is + // equivalent to 'a|'. Similarly below, 'a??' is equivalent to + // '|a'. + if max != Some(1) { + subseq.make_inexact(); + } + let mut empty = Seq::singleton(Literal::exact(vec![])); + if !greedy { + mem::swap(&mut subseq, &mut empty); + } + self.union(subseq, &mut empty) + } + hir::Repetition { min, max: Some(max), .. } if min == max => { + assert!(min > 0); // handled above + let limit = + u32::try_from(self.limit_repeat).unwrap_or(u32::MAX); + let mut seq = Seq::singleton(Literal::exact(vec![])); + for _ in 0..cmp::min(min, limit) { + if seq.is_inexact() { + break; + } + seq = self.cross(seq, &mut subseq.clone()); + } + if usize::try_from(min).is_err() || min > limit { + seq.make_inexact(); + } + seq + } + hir::Repetition { min, .. } => { + assert!(min > 0); // handled above + let limit = + u32::try_from(self.limit_repeat).unwrap_or(u32::MAX); + let mut seq = Seq::singleton(Literal::exact(vec![])); + for _ in 0..cmp::min(min, limit) { + if seq.is_inexact() { + break; + } + seq = self.cross(seq, &mut subseq.clone()); + } + seq.make_inexact(); + seq + } + } + } + + /// Convert the given Unicode class into a sequence of literals if the + /// class is small enough. If the class is too big, return an infinite + /// sequence. + fn extract_class_unicode(&self, cls: &hir::ClassUnicode) -> Seq { + if self.class_over_limit_unicode(cls) { + return Seq::infinite(); + } + let mut seq = Seq::empty(); + for r in cls.iter() { + for ch in r.start()..=r.end() { + seq.push(Literal::from(ch)); + } + } + self.enforce_literal_len(&mut seq); + seq + } + + /// Convert the given byte class into a sequence of literals if the class + /// is small enough. If the class is too big, return an infinite sequence. + fn extract_class_bytes(&self, cls: &hir::ClassBytes) -> Seq { + if self.class_over_limit_bytes(cls) { + return Seq::infinite(); + } + let mut seq = Seq::empty(); + for r in cls.iter() { + for b in r.start()..=r.end() { + seq.push(Literal::from(b)); + } + } + self.enforce_literal_len(&mut seq); + seq + } + + /// Returns true if the given Unicode class exceeds the configured limits + /// on this extractor. + fn class_over_limit_unicode(&self, cls: &hir::ClassUnicode) -> bool { + let mut count = 0; + for r in cls.iter() { + if count > self.limit_class { + return true; + } + count += r.len(); + } + count > self.limit_class + } + + /// Returns true if the given byte class exceeds the configured limits on + /// this extractor. + fn class_over_limit_bytes(&self, cls: &hir::ClassBytes) -> bool { + let mut count = 0; + for r in cls.iter() { + if count > self.limit_class { + return true; + } + count += r.len(); + } + count > self.limit_class + } + + /// Compute the cross product of the two sequences if the result would be + /// within configured limits. Otherwise, make `seq2` infinite and cross the + /// infinite sequence with `seq1`. + fn cross(&self, mut seq1: Seq, seq2: &mut Seq) -> Seq { + if seq1.max_cross_len(seq2).map_or(false, |len| len > self.limit_total) + { + seq2.make_infinite(); + } + if let ExtractKind::Suffix = self.kind { + seq1.cross_reverse(seq2); + } else { + seq1.cross_forward(seq2); + } + assert!(seq1.len().map_or(true, |x| x <= self.limit_total)); + self.enforce_literal_len(&mut seq1); + seq1 + } + + /// Union the two sequences if the result would be within configured + /// limits. Otherwise, make `seq2` infinite and union the infinite sequence + /// with `seq1`. + fn union(&self, mut seq1: Seq, seq2: &mut Seq) -> Seq { + if seq1.max_union_len(seq2).map_or(false, |len| len > self.limit_total) + { + // We try to trim our literal sequences to see if we can make + // room for more literals. The idea is that we'd rather trim down + // literals already in our sequence if it means we can add a few + // more and retain a finite sequence. Otherwise, we'll union with + // an infinite sequence and that infects everything and effectively + // stops literal extraction in its tracks. + // + // We do we keep 4 bytes here? Well, it's a bit of an abstraction + // leakage. Downstream, the literals may wind up getting fed to + // the Teddy algorithm, which supports searching literals up to + // length 4. So that's why we pick that number here. Arguably this + // should be a tunable parameter, but it seems a little tricky to + // describe. And I'm still unsure if this is the right way to go + // about culling literal sequences. + match self.kind { + ExtractKind::Prefix => { + seq1.keep_first_bytes(4); + seq2.keep_first_bytes(4); + } + ExtractKind::Suffix => { + seq1.keep_last_bytes(4); + seq2.keep_last_bytes(4); + } + } + seq1.dedup(); + seq2.dedup(); + if seq1 + .max_union_len(seq2) + .map_or(false, |len| len > self.limit_total) + { + seq2.make_infinite(); + } + } + seq1.union(seq2); + assert!(seq1.len().map_or(true, |x| x <= self.limit_total)); + seq1 + } + + /// Applies the literal length limit to the given sequence. If none of the + /// literals in the sequence exceed the limit, then this is a no-op. + fn enforce_literal_len(&self, seq: &mut Seq) { + let len = self.limit_literal_len; + match self.kind { + ExtractKind::Prefix => seq.keep_first_bytes(len), + ExtractKind::Suffix => seq.keep_last_bytes(len), + } + } +} + +impl Default for Extractor { + fn default() -> Extractor { + Extractor::new() + } +} + +/// The kind of literals to extract from an [`Hir`] expression. +/// +/// The default extraction kind is `Prefix`. +#[non_exhaustive] +#[derive(Clone, Debug)] +pub enum ExtractKind { + /// Extracts only prefix literals from a regex. + Prefix, + /// Extracts only suffix literals from a regex. + /// + /// Note that the sequence returned by suffix literals currently may + /// not correctly represent leftmost-first or "preference" order match + /// semantics. + Suffix, +} + +impl ExtractKind { + /// Returns true if this kind is the `Prefix` variant. + pub fn is_prefix(&self) -> bool { + matches!(*self, ExtractKind::Prefix) + } + + /// Returns true if this kind is the `Suffix` variant. + pub fn is_suffix(&self) -> bool { + matches!(*self, ExtractKind::Suffix) + } +} + +impl Default for ExtractKind { + fn default() -> ExtractKind { + ExtractKind::Prefix + } +} + +/// A sequence of literals. +/// +/// A `Seq` is very much like a set in that it represents a union of its +/// members. That is, it corresponds to a set of literals where at least one +/// must match in order for a particular [`Hir`] expression to match. (Whether +/// this corresponds to the entire `Hir` expression, a prefix of it or a suffix +/// of it depends on how the `Seq` was extracted from the `Hir`.) +/// +/// It is also unlike a set in that multiple identical literals may appear, +/// and that the order of the literals in the `Seq` matters. For example, if +/// the sequence is `[sam, samwise]` and leftmost-first matching is used, then +/// `samwise` can never match and the sequence is equivalent to `[sam]`. +/// +/// # States of a sequence +/// +/// A `Seq` has a few different logical states to consider: +/// +/// * The sequence can represent "any" literal. When this happens, the set does +/// not have a finite size. The purpose of this state is to inhibit callers +/// from making assumptions about what literals are required in order to match +/// a particular [`Hir`] expression. Generally speaking, when a set is in this +/// state, literal optimizations are inhibited. A good example of a regex that +/// will cause this sort of set to appear is `[A-Za-z]`. The character class +/// is just too big (and also too narrow) to be usefully expanded into 52 +/// different literals. (Note that the decision for when a seq should become +/// infinite is determined by the caller. A seq itself has no hard-coded +/// limits.) +/// * The sequence can be empty, in which case, it is an affirmative statement +/// that there are no literals that can match the corresponding `Hir`. +/// Consequently, the `Hir` never matches any input. For example, `[a&&b]`. +/// * The sequence can be non-empty, in which case, at least one of the +/// literals must match in order for the corresponding `Hir` to match. +/// +/// # Example +/// +/// This example shows how literal sequences can be simplified by stripping +/// suffixes and minimizing while maintaining preference order. +/// +/// ``` +/// use regex_syntax::hir::literal::{Literal, Seq}; +/// +/// let mut seq = Seq::new(&[ +/// "farm", +/// "appliance", +/// "faraway", +/// "apple", +/// "fare", +/// "gap", +/// "applicant", +/// "applaud", +/// ]); +/// seq.keep_first_bytes(3); +/// seq.minimize_by_preference(); +/// // Notice that 'far' comes before 'app', which matches the order in the +/// // original sequence. This guarantees that leftmost-first semantics are +/// // not altered by simplifying the set. +/// let expected = Seq::from_iter([ +/// Literal::inexact("far"), +/// Literal::inexact("app"), +/// Literal::exact("gap"), +/// ]); +/// assert_eq!(expected, seq); +/// ``` +#[derive(Clone, Eq, PartialEq)] +pub struct Seq { + /// The members of this seq. + /// + /// When `None`, the seq represents all possible literals. That is, it + /// prevents one from making assumptions about specific literals in the + /// seq, and forces one to treat it as if any literal might be in the seq. + /// + /// Note that `Some(vec![])` is valid and corresponds to the empty seq of + /// literals, i.e., a regex that can never match. For example, `[a&&b]`. + /// It is distinct from `Some(vec![""])`, which corresponds to the seq + /// containing an empty string, which matches at every position. + literals: Option>, +} + +impl Seq { + /// Returns an empty sequence. + /// + /// An empty sequence matches zero literals, and thus corresponds to a + /// regex that itself can never match. + #[inline] + pub fn empty() -> Seq { + Seq { literals: Some(vec![]) } + } + + /// Returns a sequence of literals without a finite size and may contain + /// any literal. + /// + /// A sequence without finite size does not reveal anything about the + /// characteristics of the literals in its set. There are no fixed prefixes + /// or suffixes, nor are lower or upper bounds on the length of the literals + /// in the set known. + /// + /// This is useful to represent constructs in a regex that are "too big" + /// to useful represent as a sequence of literals. For example, `[A-Za-z]`. + /// When sequences get too big, they lose their discriminating nature and + /// are more likely to produce false positives, which in turn makes them + /// less likely to speed up searches. + /// + /// More pragmatically, for many regexes, enumerating all possible literals + /// is itself not possible or might otherwise use too many resources. So + /// constraining the size of sets during extraction is a practical trade + /// off to make. + #[inline] + pub fn infinite() -> Seq { + Seq { literals: None } + } + + /// Returns a sequence containing a single literal. + #[inline] + pub fn singleton(lit: Literal) -> Seq { + Seq { literals: Some(vec![lit]) } + } + + /// Returns a sequence of exact literals from the given byte strings. + #[inline] + pub fn new(it: I) -> Seq + where + I: IntoIterator, + B: AsRef<[u8]>, + { + it.into_iter().map(|b| Literal::exact(b.as_ref())).collect() + } + + /// If this is a finite sequence, return its members as a slice of + /// literals. + /// + /// The slice returned may be empty, in which case, there are no literals + /// that can match this sequence. + #[inline] + pub fn literals(&self) -> Option<&[Literal]> { + self.literals.as_deref() + } + + /// Push a literal to the end of this sequence. + /// + /// If this sequence is not finite, then this is a no-op. + /// + /// Similarly, if the most recently added item of this sequence is + /// equivalent to the literal given, then it is not added. This reflects + /// a `Seq`'s "set like" behavior, and represents a practical trade off. + /// Namely, there is never any need to have two adjacent and equivalent + /// literals in the same sequence, _and_ it is easy to detect in some + /// cases. + #[inline] + pub fn push(&mut self, lit: Literal) { + let lits = match self.literals { + None => return, + Some(ref mut lits) => lits, + }; + if lits.last().map_or(false, |m| m == &lit) { + return; + } + lits.push(lit); + } + + /// Make all of the literals in this sequence inexact. + /// + /// This is a no-op if this sequence is not finite. + #[inline] + pub fn make_inexact(&mut self) { + let lits = match self.literals { + None => return, + Some(ref mut lits) => lits, + }; + for lit in lits.iter_mut() { + lit.make_inexact(); + } + } + + /// Converts this sequence to an infinite sequence. + /// + /// This is a no-op if the sequence is already infinite. + #[inline] + pub fn make_infinite(&mut self) { + self.literals = None; + } + + /// Modify this sequence to contain the cross product between it and the + /// sequence given. + /// + /// The cross product only considers literals in this sequence that are + /// exact. That is, inexact literals are not extended. + /// + /// The literals are always drained from `other`, even if none are used. + /// This permits callers to reuse the sequence allocation elsewhere. + /// + /// If this sequence is infinite, then this is a no-op, regardless of what + /// `other` contains (and in this case, the literals are still drained from + /// `other`). If `other` is infinite and this sequence is finite, then this + /// is a no-op, unless this sequence contains a zero-length literal. In + /// which case, the infiniteness of `other` infects this sequence, and this + /// sequence is itself made infinite. + /// + /// Like [`Seq::union`], this may attempt to deduplicate literals. See + /// [`Seq::dedup`] for how deduplication deals with exact and inexact + /// literals. + /// + /// # Example + /// + /// This example shows basic usage and how exact and inexact literals + /// interact. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// let mut seq2 = Seq::from_iter([ + /// Literal::inexact("quux"), + /// Literal::exact("baz"), + /// ]); + /// seq1.cross_forward(&mut seq2); + /// + /// // The literals are pulled out of seq2. + /// assert_eq!(Some(0), seq2.len()); + /// + /// let expected = Seq::from_iter([ + /// Literal::inexact("fooquux"), + /// Literal::exact("foobaz"), + /// Literal::inexact("bar"), + /// ]); + /// assert_eq!(expected, seq1); + /// ``` + /// + /// This example shows the behavior of when `other` is an infinite + /// sequence. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// let mut seq2 = Seq::infinite(); + /// seq1.cross_forward(&mut seq2); + /// + /// // When seq2 is infinite, cross product doesn't add anything, but + /// // ensures all members of seq1 are inexact. + /// let expected = Seq::from_iter([ + /// Literal::inexact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// assert_eq!(expected, seq1); + /// ``` + /// + /// This example is like the one above, but shows what happens when this + /// sequence contains an empty string. In this case, an infinite `other` + /// sequence infects this sequence (because the empty string means that + /// there are no finite prefixes): + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::exact(""), // inexact provokes same behavior + /// Literal::inexact("bar"), + /// ]); + /// let mut seq2 = Seq::infinite(); + /// seq1.cross_forward(&mut seq2); + /// + /// // seq1 is now infinite! + /// assert!(!seq1.is_finite()); + /// ``` + /// + /// This example shows the behavior of this sequence is infinite. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::infinite(); + /// let mut seq2 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// seq1.cross_forward(&mut seq2); + /// + /// // seq1 remains unchanged. + /// assert!(!seq1.is_finite()); + /// // Even though the literals in seq2 weren't used, it was still drained. + /// assert_eq!(Some(0), seq2.len()); + /// ``` + #[inline] + pub fn cross_forward(&mut self, other: &mut Seq) { + let (lits1, lits2) = match self.cross_preamble(other) { + None => return, + Some((lits1, lits2)) => (lits1, lits2), + }; + let newcap = lits1.len().saturating_mul(lits2.len()); + for selflit in mem::replace(lits1, Vec::with_capacity(newcap)) { + if !selflit.is_exact() { + lits1.push(selflit); + continue; + } + for otherlit in lits2.iter() { + let mut newlit = Literal::exact(Vec::with_capacity( + selflit.len() + otherlit.len(), + )); + newlit.extend(&selflit); + newlit.extend(&otherlit); + if !otherlit.is_exact() { + newlit.make_inexact(); + } + lits1.push(newlit); + } + } + lits2.drain(..); + self.dedup(); + } + + /// Modify this sequence to contain the cross product between it and + /// the sequence given, where the sequences are treated as suffixes + /// instead of prefixes. Namely, the sequence `other` is *prepended* + /// to `self` (as opposed to `other` being *appended* to `self` in + /// [`Seq::cross_forward`]). + /// + /// The cross product only considers literals in this sequence that are + /// exact. That is, inexact literals are not extended. + /// + /// The literals are always drained from `other`, even if none are used. + /// This permits callers to reuse the sequence allocation elsewhere. + /// + /// If this sequence is infinite, then this is a no-op, regardless of what + /// `other` contains (and in this case, the literals are still drained from + /// `other`). If `other` is infinite and this sequence is finite, then this + /// is a no-op, unless this sequence contains a zero-length literal. In + /// which case, the infiniteness of `other` infects this sequence, and this + /// sequence is itself made infinite. + /// + /// Like [`Seq::union`], this may attempt to deduplicate literals. See + /// [`Seq::dedup`] for how deduplication deals with exact and inexact + /// literals. + /// + /// # Example + /// + /// This example shows basic usage and how exact and inexact literals + /// interact. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// let mut seq2 = Seq::from_iter([ + /// Literal::inexact("quux"), + /// Literal::exact("baz"), + /// ]); + /// seq1.cross_reverse(&mut seq2); + /// + /// // The literals are pulled out of seq2. + /// assert_eq!(Some(0), seq2.len()); + /// + /// let expected = Seq::from_iter([ + /// Literal::inexact("quuxfoo"), + /// Literal::inexact("bar"), + /// Literal::exact("bazfoo"), + /// ]); + /// assert_eq!(expected, seq1); + /// ``` + /// + /// This example shows the behavior of when `other` is an infinite + /// sequence. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// let mut seq2 = Seq::infinite(); + /// seq1.cross_reverse(&mut seq2); + /// + /// // When seq2 is infinite, cross product doesn't add anything, but + /// // ensures all members of seq1 are inexact. + /// let expected = Seq::from_iter([ + /// Literal::inexact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// assert_eq!(expected, seq1); + /// ``` + /// + /// This example is like the one above, but shows what happens when this + /// sequence contains an empty string. In this case, an infinite `other` + /// sequence infects this sequence (because the empty string means that + /// there are no finite suffixes): + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::exact(""), // inexact provokes same behavior + /// Literal::inexact("bar"), + /// ]); + /// let mut seq2 = Seq::infinite(); + /// seq1.cross_reverse(&mut seq2); + /// + /// // seq1 is now infinite! + /// assert!(!seq1.is_finite()); + /// ``` + /// + /// This example shows the behavior when this sequence is infinite. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::infinite(); + /// let mut seq2 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// seq1.cross_reverse(&mut seq2); + /// + /// // seq1 remains unchanged. + /// assert!(!seq1.is_finite()); + /// // Even though the literals in seq2 weren't used, it was still drained. + /// assert_eq!(Some(0), seq2.len()); + /// ``` + #[inline] + pub fn cross_reverse(&mut self, other: &mut Seq) { + let (lits1, lits2) = match self.cross_preamble(other) { + None => return, + Some((lits1, lits2)) => (lits1, lits2), + }; + // We basically proceed as we do in 'cross_forward' at this point, + // except that the outer loop is now 'other' and the inner loop is now + // 'self'. That's because 'self' corresponds to suffixes and 'other' + // corresponds to the sequence we want to *prepend* to the suffixes. + let newcap = lits1.len().saturating_mul(lits2.len()); + let selflits = mem::replace(lits1, Vec::with_capacity(newcap)); + for (i, otherlit) in lits2.drain(..).enumerate() { + for selflit in selflits.iter() { + if !selflit.is_exact() { + // If the suffix isn't exact, then we can't prepend + // anything to it. However, we still want to keep it. But + // we only want to keep one of them, to avoid duplication. + // (The duplication is okay from a correctness perspective, + // but wasteful.) + if i == 0 { + lits1.push(selflit.clone()); + } + continue; + } + let mut newlit = Literal::exact(Vec::with_capacity( + otherlit.len() + selflit.len(), + )); + newlit.extend(&otherlit); + newlit.extend(&selflit); + if !otherlit.is_exact() { + newlit.make_inexact(); + } + lits1.push(newlit); + } + } + self.dedup(); + } + + /// A helper function the corresponds to the subtle preamble for both + /// `cross_forward` and `cross_reverse`. In effect, it handles the cases + /// of infinite sequences for both `self` and `other`, as well as ensuring + /// that literals from `other` are drained even if they aren't used. + fn cross_preamble<'a>( + &'a mut self, + other: &'a mut Seq, + ) -> Option<(&'a mut Vec, &'a mut Vec)> { + let lits2 = match other.literals { + None => { + // If our current seq contains the empty string and the seq + // we're adding matches any literal, then it follows that the + // current seq must now also match any literal. + // + // Otherwise, we just have to make sure everything in this + // sequence is inexact. + if self.min_literal_len() == Some(0) { + *self = Seq::infinite(); + } else { + self.make_inexact(); + } + return None; + } + Some(ref mut lits) => lits, + }; + let lits1 = match self.literals { + None => { + // If we aren't going to make it to the end of this routine + // where lits2 is drained, then we need to do it now. + lits2.drain(..); + return None; + } + Some(ref mut lits) => lits, + }; + Some((lits1, lits2)) + } + + /// Unions the `other` sequence into this one. + /// + /// The literals are always drained out of the given `other` sequence, + /// even if they are being unioned into an infinite sequence. This permits + /// the caller to reuse the `other` sequence in another context. + /// + /// Some literal deduping may be performed. If any deduping happens, + /// any leftmost-first or "preference" order match semantics will be + /// preserved. + /// + /// # Example + /// + /// This example shows basic usage. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let mut seq1 = Seq::new(&["foo", "bar"]); + /// let mut seq2 = Seq::new(&["bar", "quux", "foo"]); + /// seq1.union(&mut seq2); + /// + /// // The literals are pulled out of seq2. + /// assert_eq!(Some(0), seq2.len()); + /// + /// // Adjacent literals are deduped, but non-adjacent literals may not be. + /// assert_eq!(Seq::new(&["foo", "bar", "quux", "foo"]), seq1); + /// ``` + /// + /// This example shows that literals are drained from `other` even when + /// they aren't necessarily used. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let mut seq1 = Seq::infinite(); + /// // Infinite sequences have no finite length. + /// assert_eq!(None, seq1.len()); + /// + /// let mut seq2 = Seq::new(&["bar", "quux", "foo"]); + /// seq1.union(&mut seq2); + /// + /// // seq1 is still infinite and seq2 has been drained. + /// assert_eq!(None, seq1.len()); + /// assert_eq!(Some(0), seq2.len()); + /// ``` + #[inline] + pub fn union(&mut self, other: &mut Seq) { + let lits2 = match other.literals { + None => { + // Unioning with an infinite sequence always results in an + // infinite sequence. + self.make_infinite(); + return; + } + Some(ref mut lits) => lits.drain(..), + }; + let lits1 = match self.literals { + None => return, + Some(ref mut lits) => lits, + }; + lits1.extend(lits2); + self.dedup(); + } + + /// Unions the `other` sequence into this one by splice the `other` + /// sequence at the position of the first zero-length literal. + /// + /// This is useful for preserving preference order semantics when combining + /// two literal sequences. For example, in the regex `(a||f)+foo`, the + /// correct preference order prefix sequence is `[a, foo, f]`. + /// + /// The literals are always drained out of the given `other` sequence, + /// even if they are being unioned into an infinite sequence. This permits + /// the caller to reuse the `other` sequence in another context. Note that + /// the literals are drained even if no union is performed as well, i.e., + /// when this sequence does not contain a zero-length literal. + /// + /// Some literal deduping may be performed. If any deduping happens, + /// any leftmost-first or "preference" order match semantics will be + /// preserved. + /// + /// # Example + /// + /// This example shows basic usage. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let mut seq1 = Seq::new(&["a", "", "f", ""]); + /// let mut seq2 = Seq::new(&["foo"]); + /// seq1.union_into_empty(&mut seq2); + /// + /// // The literals are pulled out of seq2. + /// assert_eq!(Some(0), seq2.len()); + /// // 'foo' gets spliced into seq1 where the first empty string occurs. + /// assert_eq!(Seq::new(&["a", "foo", "f"]), seq1); + /// ``` + /// + /// This example shows that literals are drained from `other` even when + /// they aren't necessarily used. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let mut seq1 = Seq::new(&["foo", "bar"]); + /// let mut seq2 = Seq::new(&["bar", "quux", "foo"]); + /// seq1.union_into_empty(&mut seq2); + /// + /// // seq1 has no zero length literals, so no splicing happens. + /// assert_eq!(Seq::new(&["foo", "bar"]), seq1); + /// // Even though no splicing happens, seq2 is still drained. + /// assert_eq!(Some(0), seq2.len()); + /// ``` + #[inline] + pub fn union_into_empty(&mut self, other: &mut Seq) { + let lits2 = other.literals.as_mut().map(|lits| lits.drain(..)); + let lits1 = match self.literals { + None => return, + Some(ref mut lits) => lits, + }; + let first_empty = match lits1.iter().position(|m| m.is_empty()) { + None => return, + Some(i) => i, + }; + let lits2 = match lits2 { + None => { + // Note that we are only here if we've found an empty literal, + // which implies that an infinite sequence infects this seq and + // also turns it into an infinite sequence. + self.literals = None; + return; + } + Some(lits) => lits, + }; + // Clearing out the empties needs to come before the splice because + // the splice might add more empties that we don't want to get rid + // of. Since we're splicing into the position of the first empty, the + // 'first_empty' position computed above is still correct. + lits1.retain(|m| !m.is_empty()); + lits1.splice(first_empty..first_empty, lits2); + self.dedup(); + } + + /// Deduplicate adjacent equivalent literals in this sequence. + /// + /// If adjacent literals are equivalent strings but one is exact and the + /// other inexact, the inexact literal is kept and the exact one is + /// removed. + /// + /// Deduping an infinite sequence is a no-op. + /// + /// # Example + /// + /// This example shows how literals that are duplicate byte strings but + /// are not equivalent with respect to exactness are resolved. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::inexact("foo"), + /// ]); + /// seq.dedup(); + /// + /// assert_eq!(Seq::from_iter([Literal::inexact("foo")]), seq); + /// ``` + #[inline] + pub fn dedup(&mut self) { + if let Some(ref mut lits) = self.literals { + lits.dedup_by(|lit1, lit2| { + if lit1.as_bytes() != lit2.as_bytes() { + return false; + } + if lit1.is_exact() != lit2.is_exact() { + lit1.make_inexact(); + lit2.make_inexact(); + } + true + }); + } + } + + /// Sorts this sequence of literals lexicographically. + /// + /// Note that if, before sorting, if a literal that is a prefix of another + /// literal appears after it, then after sorting, the sequence will not + /// represent the same preference order match semantics. For example, + /// sorting the sequence `[samwise, sam]` yields the sequence `[sam, + /// samwise]`. Under preference order semantics, the latter sequence will + /// never match `samwise` where as the first sequence can. + /// + /// # Example + /// + /// This example shows basic usage. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let mut seq = Seq::new(&["foo", "quux", "bar"]); + /// seq.sort(); + /// + /// assert_eq!(Seq::new(&["bar", "foo", "quux"]), seq); + /// ``` + #[inline] + pub fn sort(&mut self) { + if let Some(ref mut lits) = self.literals { + lits.sort(); + } + } + + /// Reverses all of the literals in this sequence. + /// + /// The order of the sequence itself is preserved. + /// + /// # Example + /// + /// This example shows basic usage. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let mut seq = Seq::new(&["oof", "rab"]); + /// seq.reverse_literals(); + /// assert_eq!(Seq::new(&["foo", "bar"]), seq); + /// ``` + #[inline] + pub fn reverse_literals(&mut self) { + if let Some(ref mut lits) = self.literals { + for lit in lits.iter_mut() { + lit.reverse(); + } + } + } + + /// Shrinks this seq to its minimal size while respecting the preference + /// order of its literals. + /// + /// While this routine will remove duplicate literals from this seq, it + /// will also remove literals that can never match in a leftmost-first or + /// "preference order" search. Similar to [`Seq::dedup`], if a literal is + /// deduped, then the one that remains is made inexact. + /// + /// This is a no-op on seqs that are empty or not finite. + /// + /// # Example + /// + /// This example shows the difference between `{sam, samwise}` and + /// `{samwise, sam}`. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// // If 'sam' comes before 'samwise' and a preference order search is + /// // executed, then 'samwise' can never match. + /// let mut seq = Seq::new(&["sam", "samwise"]); + /// seq.minimize_by_preference(); + /// assert_eq!(Seq::from_iter([Literal::inexact("sam")]), seq); + /// + /// // But if they are reversed, then it's possible for 'samwise' to match + /// // since it is given higher preference. + /// let mut seq = Seq::new(&["samwise", "sam"]); + /// seq.minimize_by_preference(); + /// assert_eq!(Seq::new(&["samwise", "sam"]), seq); + /// ``` + /// + /// This example shows that if an empty string is in this seq, then + /// anything that comes after it can never match. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// // An empty string is a prefix of all strings, so it automatically + /// // inhibits any subsequent strings from matching. + /// let mut seq = Seq::new(&["foo", "bar", "", "quux", "fox"]); + /// seq.minimize_by_preference(); + /// let expected = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::exact("bar"), + /// Literal::inexact(""), + /// ]); + /// assert_eq!(expected, seq); + /// + /// // And of course, if it's at the beginning, then it makes it impossible + /// // for anything else to match. + /// let mut seq = Seq::new(&["", "foo", "quux", "fox"]); + /// seq.minimize_by_preference(); + /// assert_eq!(Seq::from_iter([Literal::inexact("")]), seq); + /// ``` + #[inline] + pub fn minimize_by_preference(&mut self) { + if let Some(ref mut lits) = self.literals { + PreferenceTrie::minimize(lits, false); + } + } + + /// Trims all literals in this seq such that only the first `len` bytes + /// remain. If a literal has less than or equal to `len` bytes, then it + /// remains unchanged. Otherwise, it is trimmed and made inexact. + /// + /// # Example + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq = Seq::new(&["a", "foo", "quux"]); + /// seq.keep_first_bytes(2); + /// + /// let expected = Seq::from_iter([ + /// Literal::exact("a"), + /// Literal::inexact("fo"), + /// Literal::inexact("qu"), + /// ]); + /// assert_eq!(expected, seq); + /// ``` + #[inline] + pub fn keep_first_bytes(&mut self, len: usize) { + if let Some(ref mut lits) = self.literals { + for m in lits.iter_mut() { + m.keep_first_bytes(len); + } + } + } + + /// Trims all literals in this seq such that only the last `len` bytes + /// remain. If a literal has less than or equal to `len` bytes, then it + /// remains unchanged. Otherwise, it is trimmed and made inexact. + /// + /// # Example + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq = Seq::new(&["a", "foo", "quux"]); + /// seq.keep_last_bytes(2); + /// + /// let expected = Seq::from_iter([ + /// Literal::exact("a"), + /// Literal::inexact("oo"), + /// Literal::inexact("ux"), + /// ]); + /// assert_eq!(expected, seq); + /// ``` + #[inline] + pub fn keep_last_bytes(&mut self, len: usize) { + if let Some(ref mut lits) = self.literals { + for m in lits.iter_mut() { + m.keep_last_bytes(len); + } + } + } + + /// Returns true if this sequence is finite. + /// + /// When false, this sequence is infinite and must be treated as if it + /// contains every possible literal. + #[inline] + pub fn is_finite(&self) -> bool { + self.literals.is_some() + } + + /// Returns true if and only if this sequence is finite and empty. + /// + /// An empty sequence never matches anything. It can only be produced by + /// literal extraction when the corresponding regex itself cannot match. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == Some(0) + } + + /// Returns the number of literals in this sequence if the sequence is + /// finite. If the sequence is infinite, then `None` is returned. + #[inline] + pub fn len(&self) -> Option { + self.literals.as_ref().map(|lits| lits.len()) + } + + /// Returns true if and only if all literals in this sequence are exact. + /// + /// This returns false if the sequence is infinite. + #[inline] + pub fn is_exact(&self) -> bool { + self.literals().map_or(false, |lits| lits.iter().all(|x| x.is_exact())) + } + + /// Returns true if and only if all literals in this sequence are inexact. + /// + /// This returns true if the sequence is infinite. + #[inline] + pub fn is_inexact(&self) -> bool { + self.literals().map_or(true, |lits| lits.iter().all(|x| !x.is_exact())) + } + + /// Return the maximum length of the sequence that would result from + /// unioning `self` with `other`. If either set is infinite, then this + /// returns `None`. + #[inline] + pub fn max_union_len(&self, other: &Seq) -> Option { + let len1 = self.len()?; + let len2 = other.len()?; + Some(len1.saturating_add(len2)) + } + + /// Return the maximum length of the sequence that would result from the + /// cross product of `self` with `other`. If either set is infinite, then + /// this returns `None`. + #[inline] + pub fn max_cross_len(&self, other: &Seq) -> Option { + let len1 = self.len()?; + let len2 = other.len()?; + Some(len1.saturating_mul(len2)) + } + + /// Returns the length of the shortest literal in this sequence. + /// + /// If the sequence is infinite or empty, then this returns `None`. + #[inline] + pub fn min_literal_len(&self) -> Option { + self.literals.as_ref()?.iter().map(|x| x.len()).min() + } + + /// Returns the length of the longest literal in this sequence. + /// + /// If the sequence is infinite or empty, then this returns `None`. + #[inline] + pub fn max_literal_len(&self) -> Option { + self.literals.as_ref()?.iter().map(|x| x.len()).max() + } + + /// Returns the longest common prefix from this seq. + /// + /// If the seq matches any literal or other contains no literals, then + /// there is no meaningful prefix and this returns `None`. + /// + /// # Example + /// + /// This shows some example seqs and their longest common prefix. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let seq = Seq::new(&["foo", "foobar", "fo"]); + /// assert_eq!(Some(&b"fo"[..]), seq.longest_common_prefix()); + /// let seq = Seq::new(&["foo", "foo"]); + /// assert_eq!(Some(&b"foo"[..]), seq.longest_common_prefix()); + /// let seq = Seq::new(&["foo", "bar"]); + /// assert_eq!(Some(&b""[..]), seq.longest_common_prefix()); + /// let seq = Seq::new(&[""]); + /// assert_eq!(Some(&b""[..]), seq.longest_common_prefix()); + /// + /// let seq = Seq::infinite(); + /// assert_eq!(None, seq.longest_common_prefix()); + /// let seq = Seq::empty(); + /// assert_eq!(None, seq.longest_common_prefix()); + /// ``` + #[inline] + pub fn longest_common_prefix(&self) -> Option<&[u8]> { + // If we match everything or match nothing, then there's no meaningful + // longest common prefix. + let lits = match self.literals { + None => return None, + Some(ref lits) => lits, + }; + if lits.len() == 0 { + return None; + } + let base = lits[0].as_bytes(); + let mut len = base.len(); + for m in lits.iter().skip(1) { + len = m + .as_bytes() + .iter() + .zip(base[..len].iter()) + .take_while(|&(a, b)| a == b) + .count(); + if len == 0 { + return Some(&[]); + } + } + Some(&base[..len]) + } + + /// Returns the longest common suffix from this seq. + /// + /// If the seq matches any literal or other contains no literals, then + /// there is no meaningful suffix and this returns `None`. + /// + /// # Example + /// + /// This shows some example seqs and their longest common suffix. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let seq = Seq::new(&["oof", "raboof", "of"]); + /// assert_eq!(Some(&b"of"[..]), seq.longest_common_suffix()); + /// let seq = Seq::new(&["foo", "foo"]); + /// assert_eq!(Some(&b"foo"[..]), seq.longest_common_suffix()); + /// let seq = Seq::new(&["foo", "bar"]); + /// assert_eq!(Some(&b""[..]), seq.longest_common_suffix()); + /// let seq = Seq::new(&[""]); + /// assert_eq!(Some(&b""[..]), seq.longest_common_suffix()); + /// + /// let seq = Seq::infinite(); + /// assert_eq!(None, seq.longest_common_suffix()); + /// let seq = Seq::empty(); + /// assert_eq!(None, seq.longest_common_suffix()); + /// ``` + #[inline] + pub fn longest_common_suffix(&self) -> Option<&[u8]> { + // If we match everything or match nothing, then there's no meaningful + // longest common suffix. + let lits = match self.literals { + None => return None, + Some(ref lits) => lits, + }; + if lits.len() == 0 { + return None; + } + let base = lits[0].as_bytes(); + let mut len = base.len(); + for m in lits.iter().skip(1) { + len = m + .as_bytes() + .iter() + .rev() + .zip(base[base.len() - len..].iter().rev()) + .take_while(|&(a, b)| a == b) + .count(); + if len == 0 { + return Some(&[]); + } + } + Some(&base[base.len() - len..]) + } + + /// Optimizes this seq while treating its literals as prefixes and + /// respecting the preference order of its literals. + /// + /// The specific way "optimization" works is meant to be an implementation + /// detail, as it essentially represents a set of heuristics. The goal + /// that optimization tries to accomplish is to make the literals in this + /// set reflect inputs that will result in a more effective prefilter. + /// Principally by reducing the false positive rate of candidates found by + /// the literals in this sequence. That is, when a match of a literal is + /// found, we would like it to be a strong predictor of the overall match + /// of the regex. If it isn't, then much time will be spent starting and + /// stopping the prefilter search and attempting to confirm the match only + /// to have it fail. + /// + /// Some of those heuristics might be: + /// + /// * Identifying a common prefix from a larger sequence of literals, and + /// shrinking the sequence down to that single common prefix. + /// * Rejecting the sequence entirely if it is believed to result in very + /// high false positive rate. When this happens, the sequence is made + /// infinite. + /// * Shrinking the sequence to a smaller number of literals representing + /// prefixes, but not shrinking it so much as to make literals too short. + /// (A sequence with very short literals, of 1 or 2 bytes, will typically + /// result in a higher false positive rate.) + /// + /// Optimization should only be run once extraction is complete. Namely, + /// optimization may make assumptions that do not compose with other + /// operations in the middle of extraction. For example, optimization will + /// reduce `[E(sam), E(samwise)]` to `[E(sam)]`, but such a transformation + /// is only valid if no other extraction will occur. If other extraction + /// may occur, then the correct transformation would be to `[I(sam)]`. + /// + /// The [`Seq::optimize_for_suffix_by_preference`] does the same thing, but + /// for suffixes. + /// + /// # Example + /// + /// This shows how optimization might transform a sequence. Note that + /// the specific behavior is not a documented guarantee. The heuristics + /// used are an implementation detail and may change over time in semver + /// compatible releases. + /// + /// ``` + /// use regex_syntax::hir::literal::{Seq, Literal}; + /// + /// let mut seq = Seq::new(&[ + /// "samantha", + /// "sam", + /// "samwise", + /// "frodo", + /// ]); + /// seq.optimize_for_prefix_by_preference(); + /// assert_eq!(Seq::from_iter([ + /// Literal::exact("samantha"), + /// // Kept exact even though 'samwise' got pruned + /// // because optimization assumes literal extraction + /// // has finished. + /// Literal::exact("sam"), + /// Literal::exact("frodo"), + /// ]), seq); + /// ``` + /// + /// # Example: optimization may make the sequence infinite + /// + /// If the heuristics deem that the sequence could cause a very high false + /// positive rate, then it may make the sequence infinite, effectively + /// disabling its use as a prefilter. + /// + /// ``` + /// use regex_syntax::hir::literal::{Seq, Literal}; + /// + /// let mut seq = Seq::new(&[ + /// "samantha", + /// // An empty string matches at every position, + /// // thus rendering the prefilter completely + /// // ineffective. + /// "", + /// "sam", + /// "samwise", + /// "frodo", + /// ]); + /// seq.optimize_for_prefix_by_preference(); + /// assert!(!seq.is_finite()); + /// ``` + /// + /// Do note that just because there is a `" "` in the sequence, that + /// doesn't mean the sequence will always be made infinite after it is + /// optimized. Namely, if the sequence is considered exact (any match + /// corresponds to an overall match of the original regex), then any match + /// is an overall match, and so the false positive rate is always `0`. + /// + /// To demonstrate this, we remove `samwise` from our sequence. This + /// results in no optimization happening and all literals remain exact. + /// Thus the entire sequence is exact, and it is kept as-is, even though + /// one is an ASCII space: + /// + /// ``` + /// use regex_syntax::hir::literal::{Seq, Literal}; + /// + /// let mut seq = Seq::new(&[ + /// "samantha", + /// " ", + /// "sam", + /// "frodo", + /// ]); + /// seq.optimize_for_prefix_by_preference(); + /// assert!(seq.is_finite()); + /// ``` + #[inline] + pub fn optimize_for_prefix_by_preference(&mut self) { + self.optimize_by_preference(true); + } + + /// Optimizes this seq while treating its literals as suffixes and + /// respecting the preference order of its literals. + /// + /// Optimization should only be run once extraction is complete. + /// + /// The [`Seq::optimize_for_prefix_by_preference`] does the same thing, but + /// for prefixes. See its documentation for more explanation. + #[inline] + pub fn optimize_for_suffix_by_preference(&mut self) { + self.optimize_by_preference(false); + } + + fn optimize_by_preference(&mut self, prefix: bool) { + let origlen = match self.len() { + None => return, + Some(len) => len, + }; + // Just give up now if our sequence contains an empty string. + if self.min_literal_len().map_or(false, |len| len == 0) { + // We squash the sequence so that nobody else gets any bright + // ideas to try and use it. An empty string implies a match at + // every position. A prefilter cannot help you here. + self.make_infinite(); + return; + } + // Make sure we start with the smallest sequence possible. We use a + // special version of preference minimization that retains exactness. + // This is legal because optimization is only expected to occur once + // extraction is complete. + if prefix { + if let Some(ref mut lits) = self.literals { + PreferenceTrie::minimize(lits, true); + } + } + + // Look for a common prefix (or suffix). If we found one of those and + // it's long enough, then it's a good bet that it will be our fastest + // possible prefilter since single-substring search is so fast. + let fix = if prefix { + self.longest_common_prefix() + } else { + self.longest_common_suffix() + }; + if let Some(fix) = fix { + // As a special case, if we have a common prefix and the leading + // byte of that prefix is one that we think probably occurs rarely, + // then strip everything down to just that single byte. This should + // promote the use of memchr. + // + // ... we only do this though if our sequence has more than one + // literal. Otherwise, we'd rather just stick with a single literal + // scan. That is, using memchr is probably better than looking + // for 2 or more literals, but probably not as good as a straight + // memmem search. + // + // ... and also only do this when the prefix is short and probably + // not too discriminatory anyway. If it's longer, then it's + // probably quite discriminatory and thus is likely to have a low + // false positive rate. + if prefix + && origlen > 1 + && fix.len() >= 1 + && fix.len() <= 3 + && rank(fix[0]) < 200 + { + self.keep_first_bytes(1); + self.dedup(); + return; + } + // We only strip down to the common prefix/suffix if we think + // the existing set of literals isn't great, or if the common + // prefix/suffix is expected to be particularly discriminatory. + let isfast = + self.is_exact() && self.len().map_or(false, |len| len <= 16); + let usefix = fix.len() > 4 || (fix.len() > 1 && !isfast); + if usefix { + // If we keep exactly the number of bytes equal to the length + // of the prefix (or suffix), then by the definition of a + // prefix, every literal in the sequence will be equivalent. + // Thus, 'dedup' will leave us with one literal. + // + // We do it this way to avoid an alloc, but also to make sure + // the exactness of literals is kept (or not). + if prefix { + self.keep_first_bytes(fix.len()); + } else { + self.keep_last_bytes(fix.len()); + } + self.dedup(); + assert_eq!(Some(1), self.len()); + // We still fall through here. In particular, we want our + // longest common prefix to be subject to the poison check. + } + } + // If we have an exact sequence, we *probably* just want to keep it + // as-is. But there are some cases where we don't. So we save a copy of + // the exact sequence now, and then try to do some more optimizations + // below. If those don't work out, we go back to this exact sequence. + // + // The specific motivation for this is that we sometimes wind up with + // an exact sequence with a hefty number of literals. Say, 100. If we + // stuck with that, it would be too big for Teddy and would result in + // using Aho-Corasick. Which is fine... but the lazy DFA is plenty + // suitable in such cases. The real issue is that we will wind up not + // using a fast prefilter at all. So in cases like this, even though + // we have an exact sequence, it would be better to try and shrink the + // sequence (which we do below) and use it as a prefilter that can + // produce false positive matches. + // + // But if the shrinking below results in a sequence that "sucks," then + // we don't want to use that because we already have an exact sequence + // in hand. + let exact: Option = + if self.is_exact() { Some(self.clone()) } else { None }; + // Now we attempt to shorten the sequence. The idea here is that we + // don't want to look for too many literals, but we want to shorten + // our sequence enough to improve our odds of using better algorithms + // downstream (such as Teddy). + // + // The pair of numbers in this list corresponds to the maximal prefix + // (in bytes) to keep for all literals and the length of the sequence + // at which to do it. + // + // So for example, the pair (3, 500) would mean, "if we have more than + // 500 literals in our sequence, then truncate all of our literals + // such that they are at most 3 bytes in length and the minimize the + // sequence." + const ATTEMPTS: [(usize, usize); 5] = + [(5, 10), (4, 10), (3, 64), (2, 64), (1, 10)]; + for (keep, limit) in ATTEMPTS { + let len = match self.len() { + None => break, + Some(len) => len, + }; + if len <= limit { + break; + } + if prefix { + self.keep_first_bytes(keep); + } else { + self.keep_last_bytes(keep); + } + if prefix { + if let Some(ref mut lits) = self.literals { + PreferenceTrie::minimize(lits, true); + } + } + } + // Check for a poison literal. A poison literal is one that is short + // and is believed to have a very high match count. These poisons + // generally lead to a prefilter with a very high false positive rate, + // and thus overall worse performance. + // + // We do this last because we could have gone from a non-poisonous + // sequence to a poisonous one. Perhaps we should add some code to + // prevent such transitions in the first place, but then again, we + // likely only made the transition in the first place if the sequence + // was itself huge. And huge sequences are themselves poisonous. So... + if let Some(lits) = self.literals() { + if lits.iter().any(|lit| lit.is_poisonous()) { + self.make_infinite(); + } + } + // OK, if we had an exact sequence before attempting more optimizations + // above and our post-optimized sequence sucks for some reason or + // another, then we go back to the exact sequence. + if let Some(exact) = exact { + // If optimizing resulted in dropping our literals, then certainly + // backup and use the exact sequence that we had. + if !self.is_finite() { + *self = exact; + return; + } + // If our optimized sequence contains a short literal, then it's + // *probably* not so great. So throw it away and revert to the + // exact sequence. + if self.min_literal_len().map_or(true, |len| len <= 2) { + *self = exact; + return; + } + // Finally, if our optimized sequence is "big" (i.e., can't use + // Teddy), then also don't use it and rely on the exact sequence. + if self.len().map_or(true, |len| len > 64) { + *self = exact; + return; + } + } + } +} + +impl core::fmt::Debug for Seq { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "Seq")?; + if let Some(lits) = self.literals() { + f.debug_list().entries(lits.iter()).finish() + } else { + write!(f, "[∞]") + } + } +} + +impl FromIterator for Seq { + fn from_iter>(it: T) -> Seq { + let mut seq = Seq::empty(); + for literal in it { + seq.push(literal); + } + seq + } +} + +/// A single literal extracted from an [`Hir`] expression. +/// +/// A literal is composed of two things: +/// +/// * A sequence of bytes. No guarantees with respect to UTF-8 are provided. +/// In particular, even if the regex a literal is extracted from is UTF-8, the +/// literal extracted may not be valid UTF-8. (For example, if an [`Extractor`] +/// limit resulted in trimming a literal in a way that splits a codepoint.) +/// * Whether the literal is "exact" or not. An "exact" literal means that it +/// has not been trimmed, and may continue to be extended. If a literal is +/// "exact" after visiting the entire `Hir` expression, then this implies that +/// the literal leads to a match state. (Although it doesn't necessarily imply +/// all occurrences of the literal correspond to a match of the regex, since +/// literal extraction ignores look-around assertions.) +#[derive(Clone, Eq, PartialEq, PartialOrd, Ord)] +pub struct Literal { + bytes: Vec, + exact: bool, +} + +impl Literal { + /// Returns a new exact literal containing the bytes given. + #[inline] + pub fn exact>>(bytes: B) -> Literal { + Literal { bytes: bytes.into(), exact: true } + } + + /// Returns a new inexact literal containing the bytes given. + #[inline] + pub fn inexact>>(bytes: B) -> Literal { + Literal { bytes: bytes.into(), exact: false } + } + + /// Returns the bytes in this literal. + #[inline] + pub fn as_bytes(&self) -> &[u8] { + &self.bytes + } + + /// Yields ownership of the bytes inside this literal. + /// + /// Note that this throws away whether the literal is "exact" or not. + #[inline] + pub fn into_bytes(self) -> Vec { + self.bytes + } + + /// Returns the length of this literal in bytes. + #[inline] + pub fn len(&self) -> usize { + self.as_bytes().len() + } + + /// Returns true if and only if this literal has zero bytes. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns true if and only if this literal is exact. + #[inline] + pub fn is_exact(&self) -> bool { + self.exact + } + + /// Marks this literal as inexact. + /// + /// Inexact literals can never be extended. For example, + /// [`Seq::cross_forward`] will not extend inexact literals. + #[inline] + pub fn make_inexact(&mut self) { + self.exact = false; + } + + /// Reverse the bytes in this literal. + #[inline] + pub fn reverse(&mut self) { + self.bytes.reverse(); + } + + /// Extend this literal with the literal given. + /// + /// If this literal is inexact, then this is a no-op. + #[inline] + pub fn extend(&mut self, lit: &Literal) { + if !self.is_exact() { + return; + } + self.bytes.extend_from_slice(&lit.bytes); + } + + /// Trims this literal such that only the first `len` bytes remain. If + /// this literal has fewer than `len` bytes, then it remains unchanged. + /// Otherwise, the literal is marked as inexact. + #[inline] + pub fn keep_first_bytes(&mut self, len: usize) { + if len >= self.len() { + return; + } + self.make_inexact(); + self.bytes.truncate(len); + } + + /// Trims this literal such that only the last `len` bytes remain. If this + /// literal has fewer than `len` bytes, then it remains unchanged. + /// Otherwise, the literal is marked as inexact. + #[inline] + pub fn keep_last_bytes(&mut self, len: usize) { + if len >= self.len() { + return; + } + self.make_inexact(); + self.bytes.drain(..self.len() - len); + } + + /// Returns true if it is believe that this literal is likely to match very + /// frequently, and is thus not a good candidate for a prefilter. + fn is_poisonous(&self) -> bool { + self.is_empty() || (self.len() == 1 && rank(self.as_bytes()[0]) >= 250) + } +} + +impl From for Literal { + fn from(byte: u8) -> Literal { + Literal::exact(vec![byte]) + } +} + +impl From for Literal { + fn from(ch: char) -> Literal { + use alloc::string::ToString; + Literal::exact(ch.encode_utf8(&mut [0; 4]).to_string()) + } +} + +impl AsRef<[u8]> for Literal { + fn as_ref(&self) -> &[u8] { + self.as_bytes() + } +} + +impl core::fmt::Debug for Literal { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let tag = if self.exact { "E" } else { "I" }; + f.debug_tuple(tag) + .field(&crate::debug::Bytes(self.as_bytes())) + .finish() + } +} + +/// A "preference" trie that rejects literals that will never match when +/// executing a leftmost first or "preference" search. +/// +/// For example, if 'sam' is inserted, then trying to insert 'samwise' will be +/// rejected because 'samwise' can never match since 'sam' will always take +/// priority. However, if 'samwise' is inserted first, then inserting 'sam' +/// after it is accepted. In this case, either 'samwise' or 'sam' can match in +/// a "preference" search. +/// +/// Note that we only use this trie as a "set." That is, given a sequence of +/// literals, we insert each one in order. An `insert` will reject a literal +/// if a prefix of that literal already exists in the trie. Thus, to rebuild +/// the "minimal" sequence, we simply only keep literals that were successfully +/// inserted. (Since we don't need traversal, one wonders whether we can make +/// some simplifications here, but I haven't given it a ton of thought and I've +/// never seen this show up on a profile. Because of the heuristic limits +/// imposed on literal extractions, the size of the inputs here is usually +/// very small.) +#[derive(Debug)] +struct PreferenceTrie { + /// The states in this trie. The index of a state in this vector is its ID. + states: Vec, + /// This vec indicates which states are match states. It always has + /// the same length as `states` and is indexed by the same state ID. + /// A state with identifier `sid` is a match state if and only if + /// `matches[sid].is_some()`. The option contains the index of the literal + /// corresponding to the match. The index is offset by 1 so that it fits in + /// a NonZeroUsize. + matches: Vec>, + /// The index to allocate to the next literal added to this trie. Starts at + /// 1 and increments by 1 for every literal successfully added to the trie. + next_literal_index: usize, +} + +/// A single state in a trie. Uses a sparse representation for its transitions. +#[derive(Debug, Default)] +struct State { + /// Sparse representation of the transitions out of this state. Transitions + /// are sorted by byte. There is at most one such transition for any + /// particular byte. + trans: Vec<(u8, usize)>, +} + +impl PreferenceTrie { + /// Minimizes the given sequence of literals while preserving preference + /// order semantics. + /// + /// When `keep_exact` is true, the exactness of every literal retained is + /// kept. This is useful when dealing with a fully extracted `Seq` that + /// only contains exact literals. In that case, we can keep all retained + /// literals as exact because we know we'll never need to match anything + /// after them and because any removed literals are guaranteed to never + /// match. + fn minimize(literals: &mut Vec, keep_exact: bool) { + let mut trie = PreferenceTrie { + states: vec![], + matches: vec![], + next_literal_index: 1, + }; + let mut make_inexact = vec![]; + literals.retain_mut(|lit| match trie.insert(lit.as_bytes()) { + Ok(_) => true, + Err(i) => { + if !keep_exact { + make_inexact.push(i.checked_sub(1).unwrap()); + } + false + } + }); + for i in make_inexact { + literals[i].make_inexact(); + } + } + + /// Returns `Ok` if the given byte string is accepted into this trie and + /// `Err` otherwise. The index for the success case corresponds to the + /// index of the literal added. The index for the error case corresponds to + /// the index of the literal already in the trie that prevented the given + /// byte string from being added. (Which implies it is a prefix of the one + /// given.) + /// + /// In short, the byte string given is accepted into the trie if and only + /// if it is possible for it to match when executing a preference order + /// search. + fn insert(&mut self, bytes: &[u8]) -> Result { + let mut prev = self.root(); + if let Some(idx) = self.matches[prev] { + return Err(idx.get()); + } + for &b in bytes.iter() { + match self.states[prev].trans.binary_search_by_key(&b, |t| t.0) { + Ok(i) => { + prev = self.states[prev].trans[i].1; + if let Some(idx) = self.matches[prev] { + return Err(idx.get()); + } + } + Err(i) => { + let next = self.create_state(); + self.states[prev].trans.insert(i, (b, next)); + prev = next; + } + } + } + let idx = self.next_literal_index; + self.next_literal_index += 1; + self.matches[prev] = NonZeroUsize::new(idx); + Ok(idx) + } + + /// Returns the root state ID, and if it doesn't exist, creates it. + fn root(&mut self) -> usize { + if !self.states.is_empty() { + 0 + } else { + self.create_state() + } + } + + /// Creates a new empty state and returns its ID. + fn create_state(&mut self) -> usize { + let id = self.states.len(); + self.states.push(State::default()); + self.matches.push(None); + id + } +} + +/// Returns the "rank" of the given byte. +/// +/// The minimum rank value is `0` and the maximum rank value is `255`. +/// +/// The rank of a byte is derived from a heuristic background distribution of +/// relative frequencies of bytes. The heuristic says that lower the rank of a +/// byte, the less likely that byte is to appear in any arbitrary haystack. +pub fn rank(byte: u8) -> u8 { + crate::rank::BYTE_FREQUENCIES[usize::from(byte)] +} + +#[cfg(test)] +mod tests { + use super::*; + + fn parse(pattern: &str) -> Hir { + crate::ParserBuilder::new().utf8(false).build().parse(pattern).unwrap() + } + + fn prefixes(pattern: &str) -> Seq { + Extractor::new().kind(ExtractKind::Prefix).extract(&parse(pattern)) + } + + fn suffixes(pattern: &str) -> Seq { + Extractor::new().kind(ExtractKind::Suffix).extract(&parse(pattern)) + } + + fn e(pattern: &str) -> (Seq, Seq) { + (prefixes(pattern), suffixes(pattern)) + } + + #[allow(non_snake_case)] + fn E(x: &str) -> Literal { + Literal::exact(x.as_bytes()) + } + + #[allow(non_snake_case)] + fn I(x: &str) -> Literal { + Literal::inexact(x.as_bytes()) + } + + fn seq>(it: I) -> Seq { + Seq::from_iter(it) + } + + fn infinite() -> (Seq, Seq) { + (Seq::infinite(), Seq::infinite()) + } + + fn inexact(it1: I1, it2: I2) -> (Seq, Seq) + where + I1: IntoIterator, + I2: IntoIterator, + { + (Seq::from_iter(it1), Seq::from_iter(it2)) + } + + fn exact, I: IntoIterator>(it: I) -> (Seq, Seq) { + let s1 = Seq::new(it); + let s2 = s1.clone(); + (s1, s2) + } + + fn opt, I: IntoIterator>(it: I) -> (Seq, Seq) { + let (mut p, mut s) = exact(it); + p.optimize_for_prefix_by_preference(); + s.optimize_for_suffix_by_preference(); + (p, s) + } + + #[test] + fn literal() { + assert_eq!(exact(["a"]), e("a")); + assert_eq!(exact(["aaaaa"]), e("aaaaa")); + assert_eq!(exact(["A", "a"]), e("(?i-u)a")); + assert_eq!(exact(["AB", "Ab", "aB", "ab"]), e("(?i-u)ab")); + assert_eq!(exact(["abC", "abc"]), e("ab(?i-u)c")); + + assert_eq!(exact([b"\xFF"]), e(r"(?-u:\xFF)")); + + #[cfg(feature = "unicode-case")] + { + assert_eq!(exact(["☃"]), e("☃")); + assert_eq!(exact(["☃"]), e("(?i)☃")); + assert_eq!(exact(["☃☃☃☃☃"]), e("☃☃☃☃☃")); + + assert_eq!(exact(["Δ"]), e("Δ")); + assert_eq!(exact(["δ"]), e("δ")); + assert_eq!(exact(["Δ", "δ"]), e("(?i)Δ")); + assert_eq!(exact(["Δ", "δ"]), e("(?i)δ")); + + assert_eq!(exact(["S", "s", "ſ"]), e("(?i)S")); + assert_eq!(exact(["S", "s", "ſ"]), e("(?i)s")); + assert_eq!(exact(["S", "s", "ſ"]), e("(?i)ſ")); + } + + let letters = "ͱͳͷΐάέήίΰαβγδεζηθικλμνξοπρςστυφχψωϊϋ"; + assert_eq!(exact([letters]), e(letters)); + } + + #[test] + fn class() { + assert_eq!(exact(["a", "b", "c"]), e("[abc]")); + assert_eq!(exact(["a1b", "a2b", "a3b"]), e("a[123]b")); + assert_eq!(exact(["δ", "ε"]), e("[εδ]")); + #[cfg(feature = "unicode-case")] + { + assert_eq!(exact(["Δ", "Ε", "δ", "ε", "ϵ"]), e(r"(?i)[εδ]")); + } + } + + #[test] + fn look() { + assert_eq!(exact(["ab"]), e(r"a\Ab")); + assert_eq!(exact(["ab"]), e(r"a\zb")); + assert_eq!(exact(["ab"]), e(r"a(?m:^)b")); + assert_eq!(exact(["ab"]), e(r"a(?m:$)b")); + assert_eq!(exact(["ab"]), e(r"a\bb")); + assert_eq!(exact(["ab"]), e(r"a\Bb")); + assert_eq!(exact(["ab"]), e(r"a(?-u:\b)b")); + assert_eq!(exact(["ab"]), e(r"a(?-u:\B)b")); + + assert_eq!(exact(["ab"]), e(r"^ab")); + assert_eq!(exact(["ab"]), e(r"$ab")); + assert_eq!(exact(["ab"]), e(r"(?m:^)ab")); + assert_eq!(exact(["ab"]), e(r"(?m:$)ab")); + assert_eq!(exact(["ab"]), e(r"\bab")); + assert_eq!(exact(["ab"]), e(r"\Bab")); + assert_eq!(exact(["ab"]), e(r"(?-u:\b)ab")); + assert_eq!(exact(["ab"]), e(r"(?-u:\B)ab")); + + assert_eq!(exact(["ab"]), e(r"ab^")); + assert_eq!(exact(["ab"]), e(r"ab$")); + assert_eq!(exact(["ab"]), e(r"ab(?m:^)")); + assert_eq!(exact(["ab"]), e(r"ab(?m:$)")); + assert_eq!(exact(["ab"]), e(r"ab\b")); + assert_eq!(exact(["ab"]), e(r"ab\B")); + assert_eq!(exact(["ab"]), e(r"ab(?-u:\b)")); + assert_eq!(exact(["ab"]), e(r"ab(?-u:\B)")); + + let expected = (seq([I("aZ"), E("ab")]), seq([I("Zb"), E("ab")])); + assert_eq!(expected, e(r"^aZ*b")); + } + + #[test] + fn repetition() { + assert_eq!(exact(["a", ""]), e(r"a?")); + assert_eq!(exact(["", "a"]), e(r"a??")); + assert_eq!(inexact([I("a"), E("")], [I("a"), E("")]), e(r"a*")); + assert_eq!(inexact([E(""), I("a")], [E(""), I("a")]), e(r"a*?")); + assert_eq!(inexact([I("a")], [I("a")]), e(r"a+")); + assert_eq!(inexact([I("a")], [I("a")]), e(r"(a+)+")); + + assert_eq!(exact(["ab"]), e(r"aZ{0}b")); + assert_eq!(exact(["aZb", "ab"]), e(r"aZ?b")); + assert_eq!(exact(["ab", "aZb"]), e(r"aZ??b")); + assert_eq!( + inexact([I("aZ"), E("ab")], [I("Zb"), E("ab")]), + e(r"aZ*b") + ); + assert_eq!( + inexact([E("ab"), I("aZ")], [E("ab"), I("Zb")]), + e(r"aZ*?b") + ); + assert_eq!(inexact([I("aZ")], [I("Zb")]), e(r"aZ+b")); + assert_eq!(inexact([I("aZ")], [I("Zb")]), e(r"aZ+?b")); + + assert_eq!(exact(["aZZb"]), e(r"aZ{2}b")); + assert_eq!(inexact([I("aZZ")], [I("ZZb")]), e(r"aZ{2,3}b")); + + assert_eq!(exact(["abc", ""]), e(r"(abc)?")); + assert_eq!(exact(["", "abc"]), e(r"(abc)??")); + + assert_eq!(inexact([I("a"), E("b")], [I("ab"), E("b")]), e(r"a*b")); + assert_eq!(inexact([E("b"), I("a")], [E("b"), I("ab")]), e(r"a*?b")); + assert_eq!(inexact([I("ab")], [I("b")]), e(r"ab+")); + assert_eq!(inexact([I("a"), I("b")], [I("b")]), e(r"a*b+")); + + // FIXME: The suffixes for this don't look quite right to me. I think + // the right suffixes would be: [I(ac), I(bc), E(c)]. The main issue I + // think is that suffixes are computed by iterating over concatenations + // in reverse, and then [bc, ac, c] ordering is indeed correct from + // that perspective. We also test a few more equivalent regexes, and + // we get the same result, so it is consistent at least I suppose. + // + // The reason why this isn't an issue is that it only messes up + // preference order, and currently, suffixes are never used in a + // context where preference order matters. For prefixes it matters + // because we sometimes want to use prefilters without confirmation + // when all of the literals are exact (and there's no look-around). But + // we never do that for suffixes. Any time we use suffixes, we always + // include a confirmation step. If that ever changes, then it's likely + // this bug will need to be fixed, but last time I looked, it appears + // hard to do so. + assert_eq!( + inexact([I("a"), I("b"), E("c")], [I("bc"), I("ac"), E("c")]), + e(r"a*b*c") + ); + assert_eq!( + inexact([I("a"), I("b"), E("c")], [I("bc"), I("ac"), E("c")]), + e(r"(a+)?(b+)?c") + ); + assert_eq!( + inexact([I("a"), I("b"), E("c")], [I("bc"), I("ac"), E("c")]), + e(r"(a+|)(b+|)c") + ); + // A few more similarish but not identical regexes. These may have a + // similar problem as above. + assert_eq!( + inexact( + [I("a"), I("b"), I("c"), E("")], + [I("c"), I("b"), I("a"), E("")] + ), + e(r"a*b*c*") + ); + assert_eq!(inexact([I("a"), I("b"), I("c")], [I("c")]), e(r"a*b*c+")); + assert_eq!(inexact([I("a"), I("b")], [I("bc")]), e(r"a*b+c")); + assert_eq!(inexact([I("a"), I("b")], [I("c"), I("b")]), e(r"a*b+c*")); + assert_eq!(inexact([I("ab"), E("a")], [I("b"), E("a")]), e(r"ab*")); + assert_eq!( + inexact([I("ab"), E("ac")], [I("bc"), E("ac")]), + e(r"ab*c") + ); + assert_eq!(inexact([I("ab")], [I("b")]), e(r"ab+")); + assert_eq!(inexact([I("ab")], [I("bc")]), e(r"ab+c")); + + assert_eq!( + inexact([I("z"), E("azb")], [I("zazb"), E("azb")]), + e(r"z*azb") + ); + + let expected = + exact(["aaa", "aab", "aba", "abb", "baa", "bab", "bba", "bbb"]); + assert_eq!(expected, e(r"[ab]{3}")); + let expected = inexact( + [ + I("aaa"), + I("aab"), + I("aba"), + I("abb"), + I("baa"), + I("bab"), + I("bba"), + I("bbb"), + ], + [ + I("aaa"), + I("aab"), + I("aba"), + I("abb"), + I("baa"), + I("bab"), + I("bba"), + I("bbb"), + ], + ); + assert_eq!(expected, e(r"[ab]{3,4}")); + } + + #[test] + fn concat() { + let empty: [&str; 0] = []; + + assert_eq!(exact(["abcxyz"]), e(r"abc()xyz")); + assert_eq!(exact(["abcxyz"]), e(r"(abc)(xyz)")); + assert_eq!(exact(["abcmnoxyz"]), e(r"abc()mno()xyz")); + assert_eq!(exact(empty), e(r"abc[a&&b]xyz")); + assert_eq!(exact(["abcxyz"]), e(r"abc[a&&b]*xyz")); + } + + #[test] + fn alternation() { + assert_eq!(exact(["abc", "mno", "xyz"]), e(r"abc|mno|xyz")); + assert_eq!( + inexact( + [E("abc"), I("mZ"), E("mo"), E("xyz")], + [E("abc"), I("Zo"), E("mo"), E("xyz")] + ), + e(r"abc|mZ*o|xyz") + ); + assert_eq!(exact(["abc", "xyz"]), e(r"abc|M[a&&b]N|xyz")); + assert_eq!(exact(["abc", "MN", "xyz"]), e(r"abc|M[a&&b]*N|xyz")); + + assert_eq!(exact(["aaa", "aaaaa"]), e(r"(?:|aa)aaa")); + assert_eq!( + inexact( + [I("aaa"), E(""), I("aaaaa"), E("aa")], + [I("aaa"), E(""), E("aa")] + ), + e(r"(?:|aa)(?:aaa)*") + ); + assert_eq!( + inexact( + [E(""), I("aaa"), E("aa"), I("aaaaa")], + [E(""), I("aaa"), E("aa")] + ), + e(r"(?:|aa)(?:aaa)*?") + ); + + assert_eq!( + inexact([E("a"), I("b"), E("")], [E("a"), I("b"), E("")]), + e(r"a|b*") + ); + assert_eq!(inexact([E("a"), I("b")], [E("a"), I("b")]), e(r"a|b+")); + + assert_eq!( + inexact([I("a"), E("b"), E("c")], [I("ab"), E("b"), E("c")]), + e(r"a*b|c") + ); + + assert_eq!( + inexact( + [E("a"), E("b"), I("c"), E("")], + [E("a"), E("b"), I("c"), E("")] + ), + e(r"a|(?:b|c*)") + ); + + assert_eq!( + inexact( + [I("a"), I("b"), E("c"), I("a"), I("ab"), E("c")], + [I("ac"), I("bc"), E("c"), I("ac"), I("abc"), E("c")], + ), + e(r"(a|b)*c|(a|ab)*c") + ); + + assert_eq!( + exact(["abef", "abgh", "cdef", "cdgh"]), + e(r"(ab|cd)(ef|gh)") + ); + assert_eq!( + exact([ + "abefij", "abefkl", "abghij", "abghkl", "cdefij", "cdefkl", + "cdghij", "cdghkl", + ]), + e(r"(ab|cd)(ef|gh)(ij|kl)") + ); + + assert_eq!(inexact([E("abab")], [E("abab")]), e(r"(ab){2}")); + + assert_eq!(inexact([I("abab")], [I("abab")]), e(r"(ab){2,3}")); + + assert_eq!(inexact([I("abab")], [I("abab")]), e(r"(ab){2,}")); + } + + #[test] + fn impossible() { + let empty: [&str; 0] = []; + + assert_eq!(exact(empty), e(r"[a&&b]")); + assert_eq!(exact(empty), e(r"a[a&&b]")); + assert_eq!(exact(empty), e(r"[a&&b]b")); + assert_eq!(exact(empty), e(r"a[a&&b]b")); + assert_eq!(exact(["a", "b"]), e(r"a|[a&&b]|b")); + assert_eq!(exact(["a", "b"]), e(r"a|c[a&&b]|b")); + assert_eq!(exact(["a", "b"]), e(r"a|[a&&b]d|b")); + assert_eq!(exact(["a", "b"]), e(r"a|c[a&&b]d|b")); + assert_eq!(exact([""]), e(r"[a&&b]*")); + assert_eq!(exact(["MN"]), e(r"M[a&&b]*N")); + } + + // This tests patterns that contain something that defeats literal + // detection, usually because it would blow some limit on the total number + // of literals that can be returned. + // + // The main idea is that when literal extraction sees something that + // it knows will blow a limit, it replaces it with a marker that says + // "any literal will match here." While not necessarily true, the + // over-estimation is just fine for the purposes of literal extraction, + // because the imprecision doesn't matter: too big is too big. + // + // This is one of the trickier parts of literal extraction, since we need + // to make sure all of our literal extraction operations correctly compose + // with the markers. + #[test] + fn anything() { + assert_eq!(infinite(), e(r".")); + assert_eq!(infinite(), e(r"(?s).")); + assert_eq!(infinite(), e(r"[A-Za-z]")); + assert_eq!(infinite(), e(r"[A-Z]")); + assert_eq!(exact([""]), e(r"[A-Z]{0}")); + assert_eq!(infinite(), e(r"[A-Z]?")); + assert_eq!(infinite(), e(r"[A-Z]*")); + assert_eq!(infinite(), e(r"[A-Z]+")); + assert_eq!((seq([I("1")]), Seq::infinite()), e(r"1[A-Z]")); + assert_eq!((seq([I("1")]), seq([I("2")])), e(r"1[A-Z]2")); + assert_eq!((Seq::infinite(), seq([I("123")])), e(r"[A-Z]+123")); + assert_eq!(infinite(), e(r"[A-Z]+123[A-Z]+")); + assert_eq!(infinite(), e(r"1|[A-Z]|3")); + assert_eq!( + (seq([E("1"), I("2"), E("3")]), Seq::infinite()), + e(r"1|2[A-Z]|3"), + ); + assert_eq!( + (Seq::infinite(), seq([E("1"), I("2"), E("3")])), + e(r"1|[A-Z]2|3"), + ); + assert_eq!( + (seq([E("1"), I("2"), E("4")]), seq([E("1"), I("3"), E("4")])), + e(r"1|2[A-Z]3|4"), + ); + assert_eq!((Seq::infinite(), seq([I("2")])), e(r"(?:|1)[A-Z]2")); + assert_eq!(inexact([I("a")], [I("z")]), e(r"a.z")); + } + + // Like the 'anything' test, but it uses smaller limits in order to test + // the logic for effectively aborting literal extraction when the seqs get + // too big. + #[test] + fn anything_small_limits() { + fn prefixes(pattern: &str) -> Seq { + Extractor::new() + .kind(ExtractKind::Prefix) + .limit_total(10) + .extract(&parse(pattern)) + } + + fn suffixes(pattern: &str) -> Seq { + Extractor::new() + .kind(ExtractKind::Suffix) + .limit_total(10) + .extract(&parse(pattern)) + } + + fn e(pattern: &str) -> (Seq, Seq) { + (prefixes(pattern), suffixes(pattern)) + } + + assert_eq!( + ( + seq([ + I("aaa"), + I("aab"), + I("aba"), + I("abb"), + I("baa"), + I("bab"), + I("bba"), + I("bbb") + ]), + seq([ + I("aaa"), + I("aab"), + I("aba"), + I("abb"), + I("baa"), + I("bab"), + I("bba"), + I("bbb") + ]) + ), + e(r"[ab]{3}{3}") + ); + + assert_eq!(infinite(), e(r"ab|cd|ef|gh|ij|kl|mn|op|qr|st|uv|wx|yz")); + } + + #[test] + fn empty() { + assert_eq!(exact([""]), e(r"")); + assert_eq!(exact([""]), e(r"^")); + assert_eq!(exact([""]), e(r"$")); + assert_eq!(exact([""]), e(r"(?m:^)")); + assert_eq!(exact([""]), e(r"(?m:$)")); + assert_eq!(exact([""]), e(r"\b")); + assert_eq!(exact([""]), e(r"\B")); + assert_eq!(exact([""]), e(r"(?-u:\b)")); + assert_eq!(exact([""]), e(r"(?-u:\B)")); + } + + #[test] + fn odds_and_ends() { + assert_eq!((Seq::infinite(), seq([I("a")])), e(r".a")); + assert_eq!((seq([I("a")]), Seq::infinite()), e(r"a.")); + assert_eq!(infinite(), e(r"a|.")); + assert_eq!(infinite(), e(r".|a")); + + let pat = r"M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]"; + let expected = inexact( + ["Mo'am", "Moam", "Mu'am", "Muam"].map(I), + [ + "ddafi", "ddafy", "dhafi", "dhafy", "dzafi", "dzafy", "dafi", + "dafy", "tdafi", "tdafy", "thafi", "thafy", "tzafi", "tzafy", + "tafi", "tafy", "zdafi", "zdafy", "zhafi", "zhafy", "zzafi", + "zzafy", "zafi", "zafy", + ] + .map(I), + ); + assert_eq!(expected, e(pat)); + + assert_eq!( + (seq(["fn is_", "fn as_"].map(I)), Seq::infinite()), + e(r"fn is_([A-Z]+)|fn as_([A-Z]+)"), + ); + assert_eq!( + inexact([I("foo")], [I("quux")]), + e(r"foo[A-Z]+bar[A-Z]+quux") + ); + assert_eq!(infinite(), e(r"[A-Z]+bar[A-Z]+")); + assert_eq!( + exact(["Sherlock Holmes"]), + e(r"(?m)^Sherlock Holmes|Sherlock Holmes$") + ); + + assert_eq!(exact(["sa", "sb"]), e(r"\bs(?:[ab])")); + } + + // This tests a specific regex along with some heuristic steps to reduce + // the sequences extracted. This is meant to roughly correspond to the + // types of heuristics used to shrink literal sets in practice. (Shrinking + // is done because you want to balance "spend too much work looking for + // too many literals" and "spend too much work processing false positive + // matches from short literals.") + #[test] + #[cfg(feature = "unicode-case")] + fn holmes() { + let expected = inexact( + ["HOL", "HOl", "HoL", "Hol", "hOL", "hOl", "hoL", "hol"].map(I), + [ + "MES", "MEs", "Eſ", "MeS", "Mes", "eſ", "mES", "mEs", "meS", + "mes", + ] + .map(I), + ); + let (mut prefixes, mut suffixes) = e(r"(?i)Holmes"); + prefixes.keep_first_bytes(3); + suffixes.keep_last_bytes(3); + prefixes.minimize_by_preference(); + suffixes.minimize_by_preference(); + assert_eq!(expected, (prefixes, suffixes)); + } + + // This tests that we get some kind of literals extracted for a beefier + // alternation with case insensitive mode enabled. At one point during + // development, this returned nothing, and motivated some special case + // code in Extractor::union to try and trim down the literal sequences + // if the union would blow the limits set. + #[test] + #[cfg(feature = "unicode-case")] + fn holmes_alt() { + let mut pre = + prefixes(r"(?i)Sherlock|Holmes|Watson|Irene|Adler|John|Baker"); + assert!(pre.len().unwrap() > 0); + pre.optimize_for_prefix_by_preference(); + assert!(pre.len().unwrap() > 0); + } + + // See: https://github.com/rust-lang/regex/security/advisories/GHSA-m5pq-gvj9-9vr8 + // See: CVE-2022-24713 + // + // We test this here to ensure literal extraction completes in reasonable + // time and isn't materially impacted by these sorts of pathological + // repeats. + #[test] + fn crazy_repeats() { + assert_eq!(inexact([E("")], [E("")]), e(r"(?:){4294967295}")); + assert_eq!( + inexact([E("")], [E("")]), + e(r"(?:){64}{64}{64}{64}{64}{64}") + ); + assert_eq!(inexact([E("")], [E("")]), e(r"x{0}{4294967295}")); + assert_eq!(inexact([E("")], [E("")]), e(r"(?:|){4294967295}")); + + assert_eq!( + inexact([E("")], [E("")]), + e(r"(?:){8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}") + ); + let repa = "a".repeat(100); + assert_eq!( + inexact([I(&repa)], [I(&repa)]), + e(r"a{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}") + ); + } + + #[test] + fn huge() { + let pat = r#"(?-u) + 2(?: + [45]\d{3}| + 7(?: + 1[0-267]| + 2[0-289]| + 3[0-29]| + 4[01]| + 5[1-3]| + 6[013]| + 7[0178]| + 91 + )| + 8(?: + 0[125]| + [139][1-6]| + 2[0157-9]| + 41| + 6[1-35]| + 7[1-5]| + 8[1-8]| + 90 + )| + 9(?: + 0[0-2]| + 1[0-4]| + 2[568]| + 3[3-6]| + 5[5-7]| + 6[0167]| + 7[15]| + 8[0146-9] + ) + )\d{4}| + 3(?: + 12?[5-7]\d{2}| + 0(?: + 2(?: + [025-79]\d| + [348]\d{1,2} + )| + 3(?: + [2-4]\d| + [56]\d? + ) + )| + 2(?: + 1\d{2}| + 2(?: + [12]\d| + [35]\d{1,2}| + 4\d? + ) + )| + 3(?: + 1\d{2}| + 2(?: + [2356]\d| + 4\d{1,2} + ) + )| + 4(?: + 1\d{2}| + 2(?: + 2\d{1,2}| + [47]| + 5\d{2} + ) + )| + 5(?: + 1\d{2}| + 29 + )| + [67]1\d{2}| + 8(?: + 1\d{2}| + 2(?: + 2\d{2}| + 3| + 4\d + ) + ) + )\d{3}| + 4(?: + 0(?: + 2(?: + [09]\d| + 7 + )| + 33\d{2} + )| + 1\d{3}| + 2(?: + 1\d{2}| + 2(?: + [25]\d?| + [348]\d| + [67]\d{1,2} + ) + )| + 3(?: + 1\d{2}(?: + \d{2} + )?| + 2(?: + [045]\d| + [236-9]\d{1,2} + )| + 32\d{2} + )| + 4(?: + [18]\d{2}| + 2(?: + [2-46]\d{2}| + 3 + )| + 5[25]\d{2} + )| + 5(?: + 1\d{2}| + 2(?: + 3\d| + 5 + ) + )| + 6(?: + [18]\d{2}| + 2(?: + 3(?: + \d{2} + )?| + [46]\d{1,2}| + 5\d{2}| + 7\d + )| + 5(?: + 3\d?| + 4\d| + [57]\d{1,2}| + 6\d{2}| + 8 + ) + )| + 71\d{2}| + 8(?: + [18]\d{2}| + 23\d{2}| + 54\d{2} + )| + 9(?: + [18]\d{2}| + 2[2-5]\d{2}| + 53\d{1,2} + ) + )\d{3}| + 5(?: + 02[03489]\d{2}| + 1\d{2}| + 2(?: + 1\d{2}| + 2(?: + 2(?: + \d{2} + )?| + [457]\d{2} + ) + )| + 3(?: + 1\d{2}| + 2(?: + [37](?: + \d{2} + )?| + [569]\d{2} + ) + )| + 4(?: + 1\d{2}| + 2[46]\d{2} + )| + 5(?: + 1\d{2}| + 26\d{1,2} + )| + 6(?: + [18]\d{2}| + 2| + 53\d{2} + )| + 7(?: + 1| + 24 + )\d{2}| + 8(?: + 1| + 26 + )\d{2}| + 91\d{2} + )\d{3}| + 6(?: + 0(?: + 1\d{2}| + 2(?: + 3\d{2}| + 4\d{1,2} + ) + )| + 2(?: + 2[2-5]\d{2}| + 5(?: + [3-5]\d{2}| + 7 + )| + 8\d{2} + )| + 3(?: + 1| + 2[3478] + )\d{2}| + 4(?: + 1| + 2[34] + )\d{2}| + 5(?: + 1| + 2[47] + )\d{2}| + 6(?: + [18]\d{2}| + 6(?: + 2(?: + 2\d| + [34]\d{2} + )| + 5(?: + [24]\d{2}| + 3\d| + 5\d{1,2} + ) + ) + )| + 72[2-5]\d{2}| + 8(?: + 1\d{2}| + 2[2-5]\d{2} + )| + 9(?: + 1\d{2}| + 2[2-6]\d{2} + ) + )\d{3}| + 7(?: + (?: + 02| + [3-589]1| + 6[12]| + 72[24] + )\d{2}| + 21\d{3}| + 32 + )\d{3}| + 8(?: + (?: + 4[12]| + [5-7]2| + 1\d? + )| + (?: + 0| + 3[12]| + [5-7]1| + 217 + )\d + )\d{4}| + 9(?: + [35]1| + (?: + [024]2| + 81 + )\d| + (?: + 1| + [24]1 + )\d{2} + )\d{3} + "#; + // TODO: This is a good candidate of a seq of literals that could be + // shrunk quite a bit and still be very productive with respect to + // literal optimizations. + let (prefixes, suffixes) = e(pat); + assert!(!suffixes.is_finite()); + assert_eq!(Some(243), prefixes.len()); + } + + #[test] + fn optimize() { + // This gets a common prefix that isn't too short. + let (p, s) = + opt(["foobarfoobar", "foobar", "foobarzfoobar", "foobarfoobar"]); + assert_eq!(seq([I("foobar")]), p); + assert_eq!(seq([I("foobar")]), s); + + // This also finds a common prefix, but since it's only one byte, it + // prefers the multiple literals. + let (p, s) = opt(["abba", "akka", "abccba"]); + assert_eq!(exact(["abba", "akka", "abccba"]), (p, s)); + + let (p, s) = opt(["sam", "samwise"]); + assert_eq!((seq([E("sam")]), seq([E("sam"), E("samwise")])), (p, s)); + + // The empty string is poisonous, so our seq becomes infinite, even + // though all literals are exact. + let (p, s) = opt(["foobarfoo", "foo", "", "foozfoo", "foofoo"]); + assert!(!p.is_finite()); + assert!(!s.is_finite()); + + // A space is also poisonous, so our seq becomes infinite. But this + // only gets triggered when we don't have a completely exact sequence. + // When the sequence is exact, spaces are okay, since we presume that + // any prefilter will match a space more quickly than the regex engine. + // (When the sequence is exact, there's a chance of the prefilter being + // used without needing the regex engine at all.) + let mut p = seq([E("foobarfoo"), I("foo"), E(" "), E("foofoo")]); + p.optimize_for_prefix_by_preference(); + assert!(!p.is_finite()); + } +} diff --git a/vendor/regex-syntax/src/hir/mod.rs b/vendor/regex-syntax/src/hir/mod.rs new file mode 100644 index 00000000000000..6d57fe3fd537c4 --- /dev/null +++ b/vendor/regex-syntax/src/hir/mod.rs @@ -0,0 +1,3873 @@ +/*! +Defines a high-level intermediate (HIR) representation for regular expressions. + +The HIR is represented by the [`Hir`] type, and it principally constructed via +[translation](translate) from an [`Ast`](crate::ast::Ast). Alternatively, users +may use the smart constructors defined on `Hir` to build their own by hand. The +smart constructors simultaneously simplify and "optimize" the HIR, and are also +the same routines used by translation. + +Most regex engines only have an HIR like this, and usually construct it +directly from the concrete syntax. This crate however first parses the +concrete syntax into an `Ast`, and only then creates the HIR from the `Ast`, +as mentioned above. It's done this way to facilitate better error reporting, +and to have a structured representation of a regex that faithfully represents +its concrete syntax. Namely, while an `Hir` value can be converted back to an +equivalent regex pattern string, it is unlikely to look like the original due +to its simplified structure. +*/ + +use core::{char, cmp}; + +use alloc::{ + boxed::Box, + format, + string::{String, ToString}, + vec, + vec::Vec, +}; + +use crate::{ + ast::Span, + hir::interval::{Interval, IntervalSet, IntervalSetIter}, + unicode, +}; + +pub use crate::{ + hir::visitor::{visit, Visitor}, + unicode::CaseFoldError, +}; + +mod interval; +pub mod literal; +pub mod print; +pub mod translate; +mod visitor; + +/// An error that can occur while translating an `Ast` to a `Hir`. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Error { + /// The kind of error. + kind: ErrorKind, + /// The original pattern that the translator's Ast was parsed from. Every + /// span in an error is a valid range into this string. + pattern: String, + /// The span of this error, derived from the Ast given to the translator. + span: Span, +} + +impl Error { + /// Return the type of this error. + pub fn kind(&self) -> &ErrorKind { + &self.kind + } + + /// The original pattern string in which this error occurred. + /// + /// Every span reported by this error is reported in terms of this string. + pub fn pattern(&self) -> &str { + &self.pattern + } + + /// Return the span at which this error occurred. + pub fn span(&self) -> &Span { + &self.span + } +} + +/// The type of an error that occurred while building an `Hir`. +/// +/// This error type is marked as `non_exhaustive`. This means that adding a +/// new variant is not considered a breaking change. +#[non_exhaustive] +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ErrorKind { + /// This error occurs when a Unicode feature is used when Unicode + /// support is disabled. For example `(?-u:\pL)` would trigger this error. + UnicodeNotAllowed, + /// This error occurs when translating a pattern that could match a byte + /// sequence that isn't UTF-8 and `utf8` was enabled. + InvalidUtf8, + /// This error occurs when one uses a non-ASCII byte for a line terminator, + /// but where Unicode mode is enabled and UTF-8 mode is disabled. + InvalidLineTerminator, + /// This occurs when an unrecognized Unicode property name could not + /// be found. + UnicodePropertyNotFound, + /// This occurs when an unrecognized Unicode property value could not + /// be found. + UnicodePropertyValueNotFound, + /// This occurs when a Unicode-aware Perl character class (`\w`, `\s` or + /// `\d`) could not be found. This can occur when the `unicode-perl` + /// crate feature is not enabled. + UnicodePerlClassNotFound, + /// This occurs when the Unicode simple case mapping tables are not + /// available, and the regular expression required Unicode aware case + /// insensitivity. + UnicodeCaseUnavailable, +} + +#[cfg(feature = "std")] +impl std::error::Error for Error {} + +impl core::fmt::Display for Error { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + crate::error::Formatter::from(self).fmt(f) + } +} + +impl core::fmt::Display for ErrorKind { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + use self::ErrorKind::*; + + let msg = match *self { + UnicodeNotAllowed => "Unicode not allowed here", + InvalidUtf8 => "pattern can match invalid UTF-8", + InvalidLineTerminator => "invalid line terminator, must be ASCII", + UnicodePropertyNotFound => "Unicode property not found", + UnicodePropertyValueNotFound => "Unicode property value not found", + UnicodePerlClassNotFound => { + "Unicode-aware Perl class not found \ + (make sure the unicode-perl feature is enabled)" + } + UnicodeCaseUnavailable => { + "Unicode-aware case insensitivity matching is not available \ + (make sure the unicode-case feature is enabled)" + } + }; + f.write_str(msg) + } +} + +/// A high-level intermediate representation (HIR) for a regular expression. +/// +/// An HIR value is a combination of a [`HirKind`] and a set of [`Properties`]. +/// An `HirKind` indicates what kind of regular expression it is (a literal, +/// a repetition, a look-around assertion, etc.), where as a `Properties` +/// describes various facts about the regular expression. For example, whether +/// it matches UTF-8 or if it matches the empty string. +/// +/// The HIR of a regular expression represents an intermediate step between +/// its abstract syntax (a structured description of the concrete syntax) and +/// an actual regex matcher. The purpose of HIR is to make regular expressions +/// easier to analyze. In particular, the AST is much more complex than the +/// HIR. For example, while an AST supports arbitrarily nested character +/// classes, the HIR will flatten all nested classes into a single set. The HIR +/// will also "compile away" every flag present in the concrete syntax. For +/// example, users of HIR expressions never need to worry about case folding; +/// it is handled automatically by the translator (e.g., by translating +/// `(?i:A)` to `[aA]`). +/// +/// The specific type of an HIR expression can be accessed via its `kind` +/// or `into_kind` methods. This extra level of indirection exists for two +/// reasons: +/// +/// 1. Construction of an HIR expression *must* use the constructor methods on +/// this `Hir` type instead of building the `HirKind` values directly. This +/// permits construction to enforce invariants like "concatenations always +/// consist of two or more sub-expressions." +/// 2. Every HIR expression contains attributes that are defined inductively, +/// and can be computed cheaply during the construction process. For example, +/// one such attribute is whether the expression must match at the beginning of +/// the haystack. +/// +/// In particular, if you have an `HirKind` value, then there is intentionally +/// no way to build an `Hir` value from it. You instead need to do case +/// analysis on the `HirKind` value and build the `Hir` value using its smart +/// constructors. +/// +/// # UTF-8 +/// +/// If the HIR was produced by a translator with +/// [`TranslatorBuilder::utf8`](translate::TranslatorBuilder::utf8) enabled, +/// then the HIR is guaranteed to match UTF-8 exclusively for all non-empty +/// matches. +/// +/// For empty matches, those can occur at any position. It is the +/// responsibility of the regex engine to determine whether empty matches are +/// permitted between the code units of a single codepoint. +/// +/// # Stack space +/// +/// This type defines its own destructor that uses constant stack space and +/// heap space proportional to the size of the HIR. +/// +/// Also, an `Hir`'s `fmt::Display` implementation prints an HIR as a regular +/// expression pattern string, and uses constant stack space and heap space +/// proportional to the size of the `Hir`. The regex it prints is guaranteed to +/// be _semantically_ equivalent to the original concrete syntax, but it may +/// look very different. (And potentially not practically readable by a human.) +/// +/// An `Hir`'s `fmt::Debug` implementation currently does not use constant +/// stack space. The implementation will also suppress some details (such as +/// the `Properties` inlined into every `Hir` value to make it less noisy). +#[derive(Clone, Eq, PartialEq)] +pub struct Hir { + /// The underlying HIR kind. + kind: HirKind, + /// Analysis info about this HIR, computed during construction. + props: Properties, +} + +/// Methods for accessing the underlying `HirKind` and `Properties`. +impl Hir { + /// Returns a reference to the underlying HIR kind. + pub fn kind(&self) -> &HirKind { + &self.kind + } + + /// Consumes ownership of this HIR expression and returns its underlying + /// `HirKind`. + pub fn into_kind(mut self) -> HirKind { + core::mem::replace(&mut self.kind, HirKind::Empty) + } + + /// Returns the properties computed for this `Hir`. + pub fn properties(&self) -> &Properties { + &self.props + } + + /// Splits this HIR into its constituent parts. + /// + /// This is useful because `let Hir { kind, props } = hir;` does not work + /// because of `Hir`'s custom `Drop` implementation. + fn into_parts(mut self) -> (HirKind, Properties) { + ( + core::mem::replace(&mut self.kind, HirKind::Empty), + core::mem::replace(&mut self.props, Properties::empty()), + ) + } +} + +/// Smart constructors for HIR values. +/// +/// These constructors are called "smart" because they do inductive work or +/// simplifications. For example, calling `Hir::repetition` with a repetition +/// like `a{0}` will actually return a `Hir` with a `HirKind::Empty` kind +/// since it is equivalent to an empty regex. Another example is calling +/// `Hir::concat(vec![expr])`. Instead of getting a `HirKind::Concat`, you'll +/// just get back the original `expr` since it's precisely equivalent. +/// +/// Smart constructors enable maintaining invariants about the HIR data type +/// while also simultaneously keeping the representation as simple as possible. +impl Hir { + /// Returns an empty HIR expression. + /// + /// An empty HIR expression always matches, including the empty string. + #[inline] + pub fn empty() -> Hir { + let props = Properties::empty(); + Hir { kind: HirKind::Empty, props } + } + + /// Returns an HIR expression that can never match anything. That is, + /// the size of the set of strings in the language described by the HIR + /// returned is `0`. + /// + /// This is distinct from [`Hir::empty`] in that the empty string matches + /// the HIR returned by `Hir::empty`. That is, the set of strings in the + /// language describe described by `Hir::empty` is non-empty. + /// + /// Note that currently, the HIR returned uses an empty character class to + /// indicate that nothing can match. An equivalent expression that cannot + /// match is an empty alternation, but all such "fail" expressions are + /// normalized (via smart constructors) to empty character classes. This is + /// because empty character classes can be spelled in the concrete syntax + /// of a regex (e.g., `\P{any}` or `(?-u:[^\x00-\xFF])` or `[a&&b]`), but + /// empty alternations cannot. + #[inline] + pub fn fail() -> Hir { + let class = Class::Bytes(ClassBytes::empty()); + let props = Properties::class(&class); + // We can't just call Hir::class here because it defers to Hir::fail + // in order to canonicalize the Hir value used to represent "cannot + // match." + Hir { kind: HirKind::Class(class), props } + } + + /// Creates a literal HIR expression. + /// + /// This accepts anything that can be converted into a `Box<[u8]>`. + /// + /// Note that there is no mechanism for storing a `char` or a `Box` + /// in an HIR. Everything is "just bytes." Whether a `Literal` (or + /// any HIR node) matches valid UTF-8 exclusively can be queried via + /// [`Properties::is_utf8`]. + /// + /// # Example + /// + /// This example shows that concatenations of `Literal` HIR values will + /// automatically get flattened and combined together. So for example, even + /// if you concat multiple `Literal` values that are themselves not valid + /// UTF-8, they might add up to valid UTF-8. This also demonstrates just + /// how "smart" Hir's smart constructors are. + /// + /// ``` + /// use regex_syntax::hir::{Hir, HirKind, Literal}; + /// + /// let literals = vec![ + /// Hir::literal([0xE2]), + /// Hir::literal([0x98]), + /// Hir::literal([0x83]), + /// ]; + /// // Each literal, on its own, is invalid UTF-8. + /// assert!(literals.iter().all(|hir| !hir.properties().is_utf8())); + /// + /// let concat = Hir::concat(literals); + /// // But the concatenation is valid UTF-8! + /// assert!(concat.properties().is_utf8()); + /// + /// // And also notice that the literals have been concatenated into a + /// // single `Literal`, to the point where there is no explicit `Concat`! + /// let expected = HirKind::Literal(Literal(Box::from("☃".as_bytes()))); + /// assert_eq!(&expected, concat.kind()); + /// ``` + /// + /// # Example: building a literal from a `char` + /// + /// This example shows how to build a single `Hir` literal from a `char` + /// value. Since a [`Literal`] is just bytes, we just need to UTF-8 + /// encode a `char` value: + /// + /// ``` + /// use regex_syntax::hir::{Hir, HirKind, Literal}; + /// + /// let ch = '☃'; + /// let got = Hir::literal(ch.encode_utf8(&mut [0; 4]).as_bytes()); + /// + /// let expected = HirKind::Literal(Literal(Box::from("☃".as_bytes()))); + /// assert_eq!(&expected, got.kind()); + /// ``` + #[inline] + pub fn literal>>(lit: B) -> Hir { + let bytes = lit.into(); + if bytes.is_empty() { + return Hir::empty(); + } + + let lit = Literal(bytes); + let props = Properties::literal(&lit); + Hir { kind: HirKind::Literal(lit), props } + } + + /// Creates a class HIR expression. The class may either be defined over + /// ranges of Unicode codepoints or ranges of raw byte values. + /// + /// Note that an empty class is permitted. An empty class is equivalent to + /// `Hir::fail()`. + #[inline] + pub fn class(class: Class) -> Hir { + if class.is_empty() { + return Hir::fail(); + } else if let Some(bytes) = class.literal() { + return Hir::literal(bytes); + } + let props = Properties::class(&class); + Hir { kind: HirKind::Class(class), props } + } + + /// Creates a look-around assertion HIR expression. + #[inline] + pub fn look(look: Look) -> Hir { + let props = Properties::look(look); + Hir { kind: HirKind::Look(look), props } + } + + /// Creates a repetition HIR expression. + #[inline] + pub fn repetition(mut rep: Repetition) -> Hir { + // If the sub-expression of a repetition can only match the empty + // string, then we force its maximum to be at most 1. + if rep.sub.properties().maximum_len() == Some(0) { + rep.min = cmp::min(rep.min, 1); + rep.max = rep.max.map(|n| cmp::min(n, 1)).or(Some(1)); + } + // The regex 'a{0}' is always equivalent to the empty regex. This is + // true even when 'a' is an expression that never matches anything + // (like '\P{any}'). + // + // Additionally, the regex 'a{1}' is always equivalent to 'a'. + if rep.min == 0 && rep.max == Some(0) { + return Hir::empty(); + } else if rep.min == 1 && rep.max == Some(1) { + return *rep.sub; + } + let props = Properties::repetition(&rep); + Hir { kind: HirKind::Repetition(rep), props } + } + + /// Creates a capture HIR expression. + /// + /// Note that there is no explicit HIR value for a non-capturing group. + /// Since a non-capturing group only exists to override precedence in the + /// concrete syntax and since an HIR already does its own grouping based on + /// what is parsed, there is no need to explicitly represent non-capturing + /// groups in the HIR. + #[inline] + pub fn capture(capture: Capture) -> Hir { + let props = Properties::capture(&capture); + Hir { kind: HirKind::Capture(capture), props } + } + + /// Returns the concatenation of the given expressions. + /// + /// This attempts to flatten and simplify the concatenation as appropriate. + /// + /// # Example + /// + /// This shows a simple example of basic flattening of both concatenations + /// and literals. + /// + /// ``` + /// use regex_syntax::hir::Hir; + /// + /// let hir = Hir::concat(vec![ + /// Hir::concat(vec![ + /// Hir::literal([b'a']), + /// Hir::literal([b'b']), + /// Hir::literal([b'c']), + /// ]), + /// Hir::concat(vec![ + /// Hir::literal([b'x']), + /// Hir::literal([b'y']), + /// Hir::literal([b'z']), + /// ]), + /// ]); + /// let expected = Hir::literal("abcxyz".as_bytes()); + /// assert_eq!(expected, hir); + /// ``` + pub fn concat(subs: Vec) -> Hir { + // We rebuild the concatenation by simplifying it. Would be nice to do + // it in place, but that seems a little tricky? + let mut new = vec![]; + // This gobbles up any adjacent literals in a concatenation and smushes + // them together. Basically, when we see a literal, we add its bytes + // to 'prior_lit', and whenever we see anything else, we first take + // any bytes in 'prior_lit' and add it to the 'new' concatenation. + let mut prior_lit: Option> = None; + for sub in subs { + let (kind, props) = sub.into_parts(); + match kind { + HirKind::Literal(Literal(bytes)) => { + if let Some(ref mut prior_bytes) = prior_lit { + prior_bytes.extend_from_slice(&bytes); + } else { + prior_lit = Some(bytes.to_vec()); + } + } + // We also flatten concats that are direct children of another + // concat. We only need to do this one level deep since + // Hir::concat is the only way to build concatenations, and so + // flattening happens inductively. + HirKind::Concat(subs2) => { + for sub2 in subs2 { + let (kind2, props2) = sub2.into_parts(); + match kind2 { + HirKind::Literal(Literal(bytes)) => { + if let Some(ref mut prior_bytes) = prior_lit { + prior_bytes.extend_from_slice(&bytes); + } else { + prior_lit = Some(bytes.to_vec()); + } + } + kind2 => { + if let Some(prior_bytes) = prior_lit.take() { + new.push(Hir::literal(prior_bytes)); + } + new.push(Hir { kind: kind2, props: props2 }); + } + } + } + } + // We can just skip empty HIRs. + HirKind::Empty => {} + kind => { + if let Some(prior_bytes) = prior_lit.take() { + new.push(Hir::literal(prior_bytes)); + } + new.push(Hir { kind, props }); + } + } + } + if let Some(prior_bytes) = prior_lit.take() { + new.push(Hir::literal(prior_bytes)); + } + if new.is_empty() { + return Hir::empty(); + } else if new.len() == 1 { + return new.pop().unwrap(); + } + let props = Properties::concat(&new); + Hir { kind: HirKind::Concat(new), props } + } + + /// Returns the alternation of the given expressions. + /// + /// This flattens and simplifies the alternation as appropriate. This may + /// include factoring out common prefixes or even rewriting the alternation + /// as a character class. + /// + /// Note that an empty alternation is equivalent to `Hir::fail()`. (It + /// is not possible for one to write an empty alternation, or even an + /// alternation with a single sub-expression, in the concrete syntax of a + /// regex.) + /// + /// # Example + /// + /// This is a simple example showing how an alternation might get + /// simplified. + /// + /// ``` + /// use regex_syntax::hir::{Hir, Class, ClassUnicode, ClassUnicodeRange}; + /// + /// let hir = Hir::alternation(vec![ + /// Hir::literal([b'a']), + /// Hir::literal([b'b']), + /// Hir::literal([b'c']), + /// Hir::literal([b'd']), + /// Hir::literal([b'e']), + /// Hir::literal([b'f']), + /// ]); + /// let expected = Hir::class(Class::Unicode(ClassUnicode::new([ + /// ClassUnicodeRange::new('a', 'f'), + /// ]))); + /// assert_eq!(expected, hir); + /// ``` + /// + /// And another example showing how common prefixes might get factored + /// out. + /// + /// ``` + /// use regex_syntax::hir::{Hir, Class, ClassUnicode, ClassUnicodeRange}; + /// + /// let hir = Hir::alternation(vec![ + /// Hir::concat(vec![ + /// Hir::literal("abc".as_bytes()), + /// Hir::class(Class::Unicode(ClassUnicode::new([ + /// ClassUnicodeRange::new('A', 'Z'), + /// ]))), + /// ]), + /// Hir::concat(vec![ + /// Hir::literal("abc".as_bytes()), + /// Hir::class(Class::Unicode(ClassUnicode::new([ + /// ClassUnicodeRange::new('a', 'z'), + /// ]))), + /// ]), + /// ]); + /// let expected = Hir::concat(vec![ + /// Hir::literal("abc".as_bytes()), + /// Hir::alternation(vec![ + /// Hir::class(Class::Unicode(ClassUnicode::new([ + /// ClassUnicodeRange::new('A', 'Z'), + /// ]))), + /// Hir::class(Class::Unicode(ClassUnicode::new([ + /// ClassUnicodeRange::new('a', 'z'), + /// ]))), + /// ]), + /// ]); + /// assert_eq!(expected, hir); + /// ``` + /// + /// Note that these sorts of simplifications are not guaranteed. + pub fn alternation(subs: Vec) -> Hir { + // We rebuild the alternation by simplifying it. We proceed similarly + // as the concatenation case. But in this case, there's no literal + // simplification happening. We're just flattening alternations. + let mut new = Vec::with_capacity(subs.len()); + for sub in subs { + let (kind, props) = sub.into_parts(); + match kind { + HirKind::Alternation(subs2) => { + new.extend(subs2); + } + kind => { + new.push(Hir { kind, props }); + } + } + } + if new.is_empty() { + return Hir::fail(); + } else if new.len() == 1 { + return new.pop().unwrap(); + } + // Now that it's completely flattened, look for the special case of + // 'char1|char2|...|charN' and collapse that into a class. Note that + // we look for 'char' first and then bytes. The issue here is that if + // we find both non-ASCII codepoints and non-ASCII singleton bytes, + // then it isn't actually possible to smush them into a single class. + // (Because classes are either "all codepoints" or "all bytes." You + // can have a class that both matches non-ASCII but valid UTF-8 and + // invalid UTF-8.) So we look for all chars and then all bytes, and + // don't handle anything else. + if let Some(singletons) = singleton_chars(&new) { + let it = singletons + .into_iter() + .map(|ch| ClassUnicodeRange { start: ch, end: ch }); + return Hir::class(Class::Unicode(ClassUnicode::new(it))); + } + if let Some(singletons) = singleton_bytes(&new) { + let it = singletons + .into_iter() + .map(|b| ClassBytesRange { start: b, end: b }); + return Hir::class(Class::Bytes(ClassBytes::new(it))); + } + // Similar to singleton chars, we can also look for alternations of + // classes. Those can be smushed into a single class. + if let Some(cls) = class_chars(&new) { + return Hir::class(cls); + } + if let Some(cls) = class_bytes(&new) { + return Hir::class(cls); + } + // Factor out a common prefix if we can, which might potentially + // simplify the expression and unlock other optimizations downstream. + // It also might generally make NFA matching and DFA construction + // faster by reducing the scope of branching in the regex. + new = match lift_common_prefix(new) { + Ok(hir) => return hir, + Err(unchanged) => unchanged, + }; + let props = Properties::alternation(&new); + Hir { kind: HirKind::Alternation(new), props } + } + + /// Returns an HIR expression for `.`. + /// + /// * [`Dot::AnyChar`] maps to `(?su-R:.)`. + /// * [`Dot::AnyByte`] maps to `(?s-Ru:.)`. + /// * [`Dot::AnyCharExceptLF`] maps to `(?u-Rs:.)`. + /// * [`Dot::AnyCharExceptCRLF`] maps to `(?Ru-s:.)`. + /// * [`Dot::AnyByteExceptLF`] maps to `(?-Rsu:.)`. + /// * [`Dot::AnyByteExceptCRLF`] maps to `(?R-su:.)`. + /// + /// # Example + /// + /// Note that this is a convenience routine for constructing the correct + /// character class based on the value of `Dot`. There is no explicit "dot" + /// HIR value. It is just an abbreviation for a common character class. + /// + /// ``` + /// use regex_syntax::hir::{Hir, Dot, Class, ClassBytes, ClassBytesRange}; + /// + /// let hir = Hir::dot(Dot::AnyByte); + /// let expected = Hir::class(Class::Bytes(ClassBytes::new([ + /// ClassBytesRange::new(0x00, 0xFF), + /// ]))); + /// assert_eq!(expected, hir); + /// ``` + #[inline] + pub fn dot(dot: Dot) -> Hir { + match dot { + Dot::AnyChar => Hir::class(Class::Unicode(ClassUnicode::new([ + ClassUnicodeRange::new('\0', '\u{10FFFF}'), + ]))), + Dot::AnyByte => Hir::class(Class::Bytes(ClassBytes::new([ + ClassBytesRange::new(b'\0', b'\xFF'), + ]))), + Dot::AnyCharExcept(ch) => { + let mut cls = + ClassUnicode::new([ClassUnicodeRange::new(ch, ch)]); + cls.negate(); + Hir::class(Class::Unicode(cls)) + } + Dot::AnyCharExceptLF => { + Hir::class(Class::Unicode(ClassUnicode::new([ + ClassUnicodeRange::new('\0', '\x09'), + ClassUnicodeRange::new('\x0B', '\u{10FFFF}'), + ]))) + } + Dot::AnyCharExceptCRLF => { + Hir::class(Class::Unicode(ClassUnicode::new([ + ClassUnicodeRange::new('\0', '\x09'), + ClassUnicodeRange::new('\x0B', '\x0C'), + ClassUnicodeRange::new('\x0E', '\u{10FFFF}'), + ]))) + } + Dot::AnyByteExcept(byte) => { + let mut cls = + ClassBytes::new([ClassBytesRange::new(byte, byte)]); + cls.negate(); + Hir::class(Class::Bytes(cls)) + } + Dot::AnyByteExceptLF => { + Hir::class(Class::Bytes(ClassBytes::new([ + ClassBytesRange::new(b'\0', b'\x09'), + ClassBytesRange::new(b'\x0B', b'\xFF'), + ]))) + } + Dot::AnyByteExceptCRLF => { + Hir::class(Class::Bytes(ClassBytes::new([ + ClassBytesRange::new(b'\0', b'\x09'), + ClassBytesRange::new(b'\x0B', b'\x0C'), + ClassBytesRange::new(b'\x0E', b'\xFF'), + ]))) + } + } + } +} + +/// The underlying kind of an arbitrary [`Hir`] expression. +/// +/// An `HirKind` is principally useful for doing case analysis on the type +/// of a regular expression. If you're looking to build new `Hir` values, +/// then you _must_ use the smart constructors defined on `Hir`, like +/// [`Hir::repetition`], to build new `Hir` values. The API intentionally does +/// not expose any way of building an `Hir` directly from an `HirKind`. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum HirKind { + /// The empty regular expression, which matches everything, including the + /// empty string. + Empty, + /// A literal string that matches exactly these bytes. + Literal(Literal), + /// A single character class that matches any of the characters in the + /// class. A class can either consist of Unicode scalar values as + /// characters, or it can use bytes. + /// + /// A class may be empty. In which case, it matches nothing. + Class(Class), + /// A look-around assertion. A look-around match always has zero length. + Look(Look), + /// A repetition operation applied to a sub-expression. + Repetition(Repetition), + /// A capturing group, which contains a sub-expression. + Capture(Capture), + /// A concatenation of expressions. + /// + /// A concatenation matches only if each of its sub-expressions match one + /// after the other. + /// + /// Concatenations are guaranteed by `Hir`'s smart constructors to always + /// have at least two sub-expressions. + Concat(Vec), + /// An alternation of expressions. + /// + /// An alternation matches only if at least one of its sub-expressions + /// match. If multiple sub-expressions match, then the leftmost is + /// preferred. + /// + /// Alternations are guaranteed by `Hir`'s smart constructors to always + /// have at least two sub-expressions. + Alternation(Vec), +} + +impl HirKind { + /// Returns a slice of this kind's sub-expressions, if any. + pub fn subs(&self) -> &[Hir] { + use core::slice::from_ref; + + match *self { + HirKind::Empty + | HirKind::Literal(_) + | HirKind::Class(_) + | HirKind::Look(_) => &[], + HirKind::Repetition(Repetition { ref sub, .. }) => from_ref(sub), + HirKind::Capture(Capture { ref sub, .. }) => from_ref(sub), + HirKind::Concat(ref subs) => subs, + HirKind::Alternation(ref subs) => subs, + } + } +} + +impl core::fmt::Debug for Hir { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + self.kind.fmt(f) + } +} + +/// Print a display representation of this Hir. +/// +/// The result of this is a valid regular expression pattern string. +/// +/// This implementation uses constant stack space and heap space proportional +/// to the size of the `Hir`. +impl core::fmt::Display for Hir { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + crate::hir::print::Printer::new().print(self, f) + } +} + +/// The high-level intermediate representation of a literal. +/// +/// A literal corresponds to `0` or more bytes that should be matched +/// literally. The smart constructors defined on `Hir` will automatically +/// concatenate adjacent literals into one literal, and will even automatically +/// replace empty literals with `Hir::empty()`. +/// +/// Note that despite a literal being represented by a sequence of bytes, its +/// `Debug` implementation will attempt to print it as a normal string. (That +/// is, not a sequence of decimal numbers.) +#[derive(Clone, Eq, PartialEq)] +pub struct Literal(pub Box<[u8]>); + +impl core::fmt::Debug for Literal { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + crate::debug::Bytes(&self.0).fmt(f) + } +} + +/// The high-level intermediate representation of a character class. +/// +/// A character class corresponds to a set of characters. A character is either +/// defined by a Unicode scalar value or a byte. +/// +/// A character class, regardless of its character type, is represented by a +/// sequence of non-overlapping non-adjacent ranges of characters. +/// +/// There are no guarantees about which class variant is used. Generally +/// speaking, the Unicode variant is used whenever a class needs to contain +/// non-ASCII Unicode scalar values. But the Unicode variant can be used even +/// when Unicode mode is disabled. For example, at the time of writing, the +/// regex `(?-u:a|\xc2\xa0)` will compile down to HIR for the Unicode class +/// `[a\u00A0]` due to optimizations. +/// +/// Note that `Bytes` variant may be produced even when it exclusively matches +/// valid UTF-8. This is because a `Bytes` variant represents an intention by +/// the author of the regular expression to disable Unicode mode, which in turn +/// impacts the semantics of case insensitive matching. For example, `(?i)k` +/// and `(?i-u)k` will not match the same set of strings. +#[derive(Clone, Eq, PartialEq)] +pub enum Class { + /// A set of characters represented by Unicode scalar values. + Unicode(ClassUnicode), + /// A set of characters represented by arbitrary bytes (one byte per + /// character). + Bytes(ClassBytes), +} + +impl Class { + /// Apply Unicode simple case folding to this character class, in place. + /// The character class will be expanded to include all simple case folded + /// character variants. + /// + /// If this is a byte oriented character class, then this will be limited + /// to the ASCII ranges `A-Z` and `a-z`. + /// + /// # Panics + /// + /// This routine panics when the case mapping data necessary for this + /// routine to complete is unavailable. This occurs when the `unicode-case` + /// feature is not enabled and the underlying class is Unicode oriented. + /// + /// Callers should prefer using `try_case_fold_simple` instead, which will + /// return an error instead of panicking. + pub fn case_fold_simple(&mut self) { + match *self { + Class::Unicode(ref mut x) => x.case_fold_simple(), + Class::Bytes(ref mut x) => x.case_fold_simple(), + } + } + + /// Apply Unicode simple case folding to this character class, in place. + /// The character class will be expanded to include all simple case folded + /// character variants. + /// + /// If this is a byte oriented character class, then this will be limited + /// to the ASCII ranges `A-Z` and `a-z`. + /// + /// # Error + /// + /// This routine returns an error when the case mapping data necessary + /// for this routine to complete is unavailable. This occurs when the + /// `unicode-case` feature is not enabled and the underlying class is + /// Unicode oriented. + pub fn try_case_fold_simple( + &mut self, + ) -> core::result::Result<(), CaseFoldError> { + match *self { + Class::Unicode(ref mut x) => x.try_case_fold_simple()?, + Class::Bytes(ref mut x) => x.case_fold_simple(), + } + Ok(()) + } + + /// Negate this character class in place. + /// + /// After completion, this character class will contain precisely the + /// characters that weren't previously in the class. + pub fn negate(&mut self) { + match *self { + Class::Unicode(ref mut x) => x.negate(), + Class::Bytes(ref mut x) => x.negate(), + } + } + + /// Returns true if and only if this character class will only ever match + /// valid UTF-8. + /// + /// A character class can match invalid UTF-8 only when the following + /// conditions are met: + /// + /// 1. The translator was configured to permit generating an expression + /// that can match invalid UTF-8. (By default, this is disabled.) + /// 2. Unicode mode (via the `u` flag) was disabled either in the concrete + /// syntax or in the parser builder. By default, Unicode mode is + /// enabled. + pub fn is_utf8(&self) -> bool { + match *self { + Class::Unicode(_) => true, + Class::Bytes(ref x) => x.is_ascii(), + } + } + + /// Returns the length, in bytes, of the smallest string matched by this + /// character class. + /// + /// For non-empty byte oriented classes, this always returns `1`. For + /// non-empty Unicode oriented classes, this can return `1`, `2`, `3` or + /// `4`. For empty classes, `None` is returned. It is impossible for `0` to + /// be returned. + /// + /// # Example + /// + /// This example shows some examples of regexes and their corresponding + /// minimum length, if any. + /// + /// ``` + /// use regex_syntax::{hir::Properties, parse}; + /// + /// // The empty string has a min length of 0. + /// let hir = parse(r"")?; + /// assert_eq!(Some(0), hir.properties().minimum_len()); + /// // As do other types of regexes that only match the empty string. + /// let hir = parse(r"^$\b\B")?; + /// assert_eq!(Some(0), hir.properties().minimum_len()); + /// // A regex that can match the empty string but match more is still 0. + /// let hir = parse(r"a*")?; + /// assert_eq!(Some(0), hir.properties().minimum_len()); + /// // A regex that matches nothing has no minimum defined. + /// let hir = parse(r"[a&&b]")?; + /// assert_eq!(None, hir.properties().minimum_len()); + /// // Character classes usually have a minimum length of 1. + /// let hir = parse(r"\w")?; + /// assert_eq!(Some(1), hir.properties().minimum_len()); + /// // But sometimes Unicode classes might be bigger! + /// let hir = parse(r"\p{Cyrillic}")?; + /// assert_eq!(Some(2), hir.properties().minimum_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn minimum_len(&self) -> Option { + match *self { + Class::Unicode(ref x) => x.minimum_len(), + Class::Bytes(ref x) => x.minimum_len(), + } + } + + /// Returns the length, in bytes, of the longest string matched by this + /// character class. + /// + /// For non-empty byte oriented classes, this always returns `1`. For + /// non-empty Unicode oriented classes, this can return `1`, `2`, `3` or + /// `4`. For empty classes, `None` is returned. It is impossible for `0` to + /// be returned. + /// + /// # Example + /// + /// This example shows some examples of regexes and their corresponding + /// maximum length, if any. + /// + /// ``` + /// use regex_syntax::{hir::Properties, parse}; + /// + /// // The empty string has a max length of 0. + /// let hir = parse(r"")?; + /// assert_eq!(Some(0), hir.properties().maximum_len()); + /// // As do other types of regexes that only match the empty string. + /// let hir = parse(r"^$\b\B")?; + /// assert_eq!(Some(0), hir.properties().maximum_len()); + /// // A regex that matches nothing has no maximum defined. + /// let hir = parse(r"[a&&b]")?; + /// assert_eq!(None, hir.properties().maximum_len()); + /// // Bounded repeats work as you expect. + /// let hir = parse(r"x{2,10}")?; + /// assert_eq!(Some(10), hir.properties().maximum_len()); + /// // An unbounded repeat means there is no maximum. + /// let hir = parse(r"x{2,}")?; + /// assert_eq!(None, hir.properties().maximum_len()); + /// // With Unicode enabled, \w can match up to 4 bytes! + /// let hir = parse(r"\w")?; + /// assert_eq!(Some(4), hir.properties().maximum_len()); + /// // Without Unicode enabled, \w matches at most 1 byte. + /// let hir = parse(r"(?-u)\w")?; + /// assert_eq!(Some(1), hir.properties().maximum_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn maximum_len(&self) -> Option { + match *self { + Class::Unicode(ref x) => x.maximum_len(), + Class::Bytes(ref x) => x.maximum_len(), + } + } + + /// Returns true if and only if this character class is empty. That is, + /// it has no elements. + /// + /// An empty character can never match anything, including an empty string. + pub fn is_empty(&self) -> bool { + match *self { + Class::Unicode(ref x) => x.ranges().is_empty(), + Class::Bytes(ref x) => x.ranges().is_empty(), + } + } + + /// If this class consists of exactly one element (whether a codepoint or a + /// byte), then return it as a literal byte string. + /// + /// If this class is empty or contains more than one element, then `None` + /// is returned. + pub fn literal(&self) -> Option> { + match *self { + Class::Unicode(ref x) => x.literal(), + Class::Bytes(ref x) => x.literal(), + } + } +} + +impl core::fmt::Debug for Class { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + use crate::debug::Byte; + + let mut fmter = f.debug_set(); + match *self { + Class::Unicode(ref cls) => { + for r in cls.ranges().iter() { + fmter.entry(&(r.start..=r.end)); + } + } + Class::Bytes(ref cls) => { + for r in cls.ranges().iter() { + fmter.entry(&(Byte(r.start)..=Byte(r.end))); + } + } + } + fmter.finish() + } +} + +/// A set of characters represented by Unicode scalar values. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct ClassUnicode { + set: IntervalSet, +} + +impl ClassUnicode { + /// Create a new class from a sequence of ranges. + /// + /// The given ranges do not need to be in any specific order, and ranges + /// may overlap. Ranges will automatically be sorted into a canonical + /// non-overlapping order. + pub fn new(ranges: I) -> ClassUnicode + where + I: IntoIterator, + { + ClassUnicode { set: IntervalSet::new(ranges) } + } + + /// Create a new class with no ranges. + /// + /// An empty class matches nothing. That is, it is equivalent to + /// [`Hir::fail`]. + pub fn empty() -> ClassUnicode { + ClassUnicode::new(vec![]) + } + + /// Add a new range to this set. + pub fn push(&mut self, range: ClassUnicodeRange) { + self.set.push(range); + } + + /// Return an iterator over all ranges in this class. + /// + /// The iterator yields ranges in ascending order. + pub fn iter(&self) -> ClassUnicodeIter<'_> { + ClassUnicodeIter(self.set.iter()) + } + + /// Return the underlying ranges as a slice. + pub fn ranges(&self) -> &[ClassUnicodeRange] { + self.set.intervals() + } + + /// Expand this character class such that it contains all case folded + /// characters, according to Unicode's "simple" mapping. For example, if + /// this class consists of the range `a-z`, then applying case folding will + /// result in the class containing both the ranges `a-z` and `A-Z`. + /// + /// # Panics + /// + /// This routine panics when the case mapping data necessary for this + /// routine to complete is unavailable. This occurs when the `unicode-case` + /// feature is not enabled. + /// + /// Callers should prefer using `try_case_fold_simple` instead, which will + /// return an error instead of panicking. + pub fn case_fold_simple(&mut self) { + self.set + .case_fold_simple() + .expect("unicode-case feature must be enabled"); + } + + /// Expand this character class such that it contains all case folded + /// characters, according to Unicode's "simple" mapping. For example, if + /// this class consists of the range `a-z`, then applying case folding will + /// result in the class containing both the ranges `a-z` and `A-Z`. + /// + /// # Error + /// + /// This routine returns an error when the case mapping data necessary + /// for this routine to complete is unavailable. This occurs when the + /// `unicode-case` feature is not enabled. + pub fn try_case_fold_simple( + &mut self, + ) -> core::result::Result<(), CaseFoldError> { + self.set.case_fold_simple() + } + + /// Negate this character class. + /// + /// For all `c` where `c` is a Unicode scalar value, if `c` was in this + /// set, then it will not be in this set after negation. + pub fn negate(&mut self) { + self.set.negate(); + } + + /// Union this character class with the given character class, in place. + pub fn union(&mut self, other: &ClassUnicode) { + self.set.union(&other.set); + } + + /// Intersect this character class with the given character class, in + /// place. + pub fn intersect(&mut self, other: &ClassUnicode) { + self.set.intersect(&other.set); + } + + /// Subtract the given character class from this character class, in place. + pub fn difference(&mut self, other: &ClassUnicode) { + self.set.difference(&other.set); + } + + /// Compute the symmetric difference of the given character classes, in + /// place. + /// + /// This computes the symmetric difference of two character classes. This + /// removes all elements in this class that are also in the given class, + /// but all adds all elements from the given class that aren't in this + /// class. That is, the class will contain all elements in either class, + /// but will not contain any elements that are in both classes. + pub fn symmetric_difference(&mut self, other: &ClassUnicode) { + self.set.symmetric_difference(&other.set); + } + + /// Returns true if and only if this character class will either match + /// nothing or only ASCII bytes. Stated differently, this returns false + /// if and only if this class contains a non-ASCII codepoint. + pub fn is_ascii(&self) -> bool { + self.set.intervals().last().map_or(true, |r| r.end <= '\x7F') + } + + /// Returns the length, in bytes, of the smallest string matched by this + /// character class. + /// + /// Returns `None` when the class is empty. + pub fn minimum_len(&self) -> Option { + let first = self.ranges().get(0)?; + // Correct because c1 < c2 implies c1.len_utf8() < c2.len_utf8(). + Some(first.start.len_utf8()) + } + + /// Returns the length, in bytes, of the longest string matched by this + /// character class. + /// + /// Returns `None` when the class is empty. + pub fn maximum_len(&self) -> Option { + let last = self.ranges().last()?; + // Correct because c1 < c2 implies c1.len_utf8() < c2.len_utf8(). + Some(last.end.len_utf8()) + } + + /// If this class consists of exactly one codepoint, then return it as + /// a literal byte string. + /// + /// If this class is empty or contains more than one codepoint, then `None` + /// is returned. + pub fn literal(&self) -> Option> { + let rs = self.ranges(); + if rs.len() == 1 && rs[0].start == rs[0].end { + Some(rs[0].start.encode_utf8(&mut [0; 4]).to_string().into_bytes()) + } else { + None + } + } + + /// If this class consists of only ASCII ranges, then return its + /// corresponding and equivalent byte class. + pub fn to_byte_class(&self) -> Option { + if !self.is_ascii() { + return None; + } + Some(ClassBytes::new(self.ranges().iter().map(|r| { + // Since we are guaranteed that our codepoint range is ASCII, the + // 'u8::try_from' calls below are guaranteed to be correct. + ClassBytesRange { + start: u8::try_from(r.start).unwrap(), + end: u8::try_from(r.end).unwrap(), + } + }))) + } +} + +/// An iterator over all ranges in a Unicode character class. +/// +/// The lifetime `'a` refers to the lifetime of the underlying class. +#[derive(Debug)] +pub struct ClassUnicodeIter<'a>(IntervalSetIter<'a, ClassUnicodeRange>); + +impl<'a> Iterator for ClassUnicodeIter<'a> { + type Item = &'a ClassUnicodeRange; + + fn next(&mut self) -> Option<&'a ClassUnicodeRange> { + self.0.next() + } +} + +/// A single range of characters represented by Unicode scalar values. +/// +/// The range is closed. That is, the start and end of the range are included +/// in the range. +#[derive(Clone, Copy, Default, Eq, PartialEq, PartialOrd, Ord)] +pub struct ClassUnicodeRange { + start: char, + end: char, +} + +impl core::fmt::Debug for ClassUnicodeRange { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let start = if !self.start.is_whitespace() && !self.start.is_control() + { + self.start.to_string() + } else { + format!("0x{:X}", u32::from(self.start)) + }; + let end = if !self.end.is_whitespace() && !self.end.is_control() { + self.end.to_string() + } else { + format!("0x{:X}", u32::from(self.end)) + }; + f.debug_struct("ClassUnicodeRange") + .field("start", &start) + .field("end", &end) + .finish() + } +} + +impl Interval for ClassUnicodeRange { + type Bound = char; + + #[inline] + fn lower(&self) -> char { + self.start + } + #[inline] + fn upper(&self) -> char { + self.end + } + #[inline] + fn set_lower(&mut self, bound: char) { + self.start = bound; + } + #[inline] + fn set_upper(&mut self, bound: char) { + self.end = bound; + } + + /// Apply simple case folding to this Unicode scalar value range. + /// + /// Additional ranges are appended to the given vector. Canonical ordering + /// is *not* maintained in the given vector. + fn case_fold_simple( + &self, + ranges: &mut Vec, + ) -> Result<(), unicode::CaseFoldError> { + let mut folder = unicode::SimpleCaseFolder::new()?; + if !folder.overlaps(self.start, self.end) { + return Ok(()); + } + let (start, end) = (u32::from(self.start), u32::from(self.end)); + for cp in (start..=end).filter_map(char::from_u32) { + for &cp_folded in folder.mapping(cp) { + ranges.push(ClassUnicodeRange::new(cp_folded, cp_folded)); + } + } + Ok(()) + } +} + +impl ClassUnicodeRange { + /// Create a new Unicode scalar value range for a character class. + /// + /// The returned range is always in a canonical form. That is, the range + /// returned always satisfies the invariant that `start <= end`. + pub fn new(start: char, end: char) -> ClassUnicodeRange { + ClassUnicodeRange::create(start, end) + } + + /// Return the start of this range. + /// + /// The start of a range is always less than or equal to the end of the + /// range. + pub fn start(&self) -> char { + self.start + } + + /// Return the end of this range. + /// + /// The end of a range is always greater than or equal to the start of the + /// range. + pub fn end(&self) -> char { + self.end + } + + /// Returns the number of codepoints in this range. + pub fn len(&self) -> usize { + let diff = 1 + u32::from(self.end) - u32::from(self.start); + // This is likely to panic in 16-bit targets since a usize can only fit + // 2^16. It's not clear what to do here, other than to return an error + // when building a Unicode class that contains a range whose length + // overflows usize. (Which, to be honest, is probably quite common on + // 16-bit targets. For example, this would imply that '.' and '\p{any}' + // would be impossible to build.) + usize::try_from(diff).expect("char class len fits in usize") + } +} + +/// A set of characters represented by arbitrary bytes. +/// +/// Each byte corresponds to one character. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct ClassBytes { + set: IntervalSet, +} + +impl ClassBytes { + /// Create a new class from a sequence of ranges. + /// + /// The given ranges do not need to be in any specific order, and ranges + /// may overlap. Ranges will automatically be sorted into a canonical + /// non-overlapping order. + pub fn new(ranges: I) -> ClassBytes + where + I: IntoIterator, + { + ClassBytes { set: IntervalSet::new(ranges) } + } + + /// Create a new class with no ranges. + /// + /// An empty class matches nothing. That is, it is equivalent to + /// [`Hir::fail`]. + pub fn empty() -> ClassBytes { + ClassBytes::new(vec![]) + } + + /// Add a new range to this set. + pub fn push(&mut self, range: ClassBytesRange) { + self.set.push(range); + } + + /// Return an iterator over all ranges in this class. + /// + /// The iterator yields ranges in ascending order. + pub fn iter(&self) -> ClassBytesIter<'_> { + ClassBytesIter(self.set.iter()) + } + + /// Return the underlying ranges as a slice. + pub fn ranges(&self) -> &[ClassBytesRange] { + self.set.intervals() + } + + /// Expand this character class such that it contains all case folded + /// characters. For example, if this class consists of the range `a-z`, + /// then applying case folding will result in the class containing both the + /// ranges `a-z` and `A-Z`. + /// + /// Note that this only applies ASCII case folding, which is limited to the + /// characters `a-z` and `A-Z`. + pub fn case_fold_simple(&mut self) { + self.set.case_fold_simple().expect("ASCII case folding never fails"); + } + + /// Negate this byte class. + /// + /// For all `b` where `b` is a any byte, if `b` was in this set, then it + /// will not be in this set after negation. + pub fn negate(&mut self) { + self.set.negate(); + } + + /// Union this byte class with the given byte class, in place. + pub fn union(&mut self, other: &ClassBytes) { + self.set.union(&other.set); + } + + /// Intersect this byte class with the given byte class, in place. + pub fn intersect(&mut self, other: &ClassBytes) { + self.set.intersect(&other.set); + } + + /// Subtract the given byte class from this byte class, in place. + pub fn difference(&mut self, other: &ClassBytes) { + self.set.difference(&other.set); + } + + /// Compute the symmetric difference of the given byte classes, in place. + /// + /// This computes the symmetric difference of two byte classes. This + /// removes all elements in this class that are also in the given class, + /// but all adds all elements from the given class that aren't in this + /// class. That is, the class will contain all elements in either class, + /// but will not contain any elements that are in both classes. + pub fn symmetric_difference(&mut self, other: &ClassBytes) { + self.set.symmetric_difference(&other.set); + } + + /// Returns true if and only if this character class will either match + /// nothing or only ASCII bytes. Stated differently, this returns false + /// if and only if this class contains a non-ASCII byte. + pub fn is_ascii(&self) -> bool { + self.set.intervals().last().map_or(true, |r| r.end <= 0x7F) + } + + /// Returns the length, in bytes, of the smallest string matched by this + /// character class. + /// + /// Returns `None` when the class is empty. + pub fn minimum_len(&self) -> Option { + if self.ranges().is_empty() { + None + } else { + Some(1) + } + } + + /// Returns the length, in bytes, of the longest string matched by this + /// character class. + /// + /// Returns `None` when the class is empty. + pub fn maximum_len(&self) -> Option { + if self.ranges().is_empty() { + None + } else { + Some(1) + } + } + + /// If this class consists of exactly one byte, then return it as + /// a literal byte string. + /// + /// If this class is empty or contains more than one byte, then `None` + /// is returned. + pub fn literal(&self) -> Option> { + let rs = self.ranges(); + if rs.len() == 1 && rs[0].start == rs[0].end { + Some(vec![rs[0].start]) + } else { + None + } + } + + /// If this class consists of only ASCII ranges, then return its + /// corresponding and equivalent Unicode class. + pub fn to_unicode_class(&self) -> Option { + if !self.is_ascii() { + return None; + } + Some(ClassUnicode::new(self.ranges().iter().map(|r| { + // Since we are guaranteed that our byte range is ASCII, the + // 'char::from' calls below are correct and will not erroneously + // convert a raw byte value into its corresponding codepoint. + ClassUnicodeRange { + start: char::from(r.start), + end: char::from(r.end), + } + }))) + } +} + +/// An iterator over all ranges in a byte character class. +/// +/// The lifetime `'a` refers to the lifetime of the underlying class. +#[derive(Debug)] +pub struct ClassBytesIter<'a>(IntervalSetIter<'a, ClassBytesRange>); + +impl<'a> Iterator for ClassBytesIter<'a> { + type Item = &'a ClassBytesRange; + + fn next(&mut self) -> Option<&'a ClassBytesRange> { + self.0.next() + } +} + +/// A single range of characters represented by arbitrary bytes. +/// +/// The range is closed. That is, the start and end of the range are included +/// in the range. +#[derive(Clone, Copy, Default, Eq, PartialEq, PartialOrd, Ord)] +pub struct ClassBytesRange { + start: u8, + end: u8, +} + +impl Interval for ClassBytesRange { + type Bound = u8; + + #[inline] + fn lower(&self) -> u8 { + self.start + } + #[inline] + fn upper(&self) -> u8 { + self.end + } + #[inline] + fn set_lower(&mut self, bound: u8) { + self.start = bound; + } + #[inline] + fn set_upper(&mut self, bound: u8) { + self.end = bound; + } + + /// Apply simple case folding to this byte range. Only ASCII case mappings + /// (for a-z) are applied. + /// + /// Additional ranges are appended to the given vector. Canonical ordering + /// is *not* maintained in the given vector. + fn case_fold_simple( + &self, + ranges: &mut Vec, + ) -> Result<(), unicode::CaseFoldError> { + if !ClassBytesRange::new(b'a', b'z').is_intersection_empty(self) { + let lower = cmp::max(self.start, b'a'); + let upper = cmp::min(self.end, b'z'); + ranges.push(ClassBytesRange::new(lower - 32, upper - 32)); + } + if !ClassBytesRange::new(b'A', b'Z').is_intersection_empty(self) { + let lower = cmp::max(self.start, b'A'); + let upper = cmp::min(self.end, b'Z'); + ranges.push(ClassBytesRange::new(lower + 32, upper + 32)); + } + Ok(()) + } +} + +impl ClassBytesRange { + /// Create a new byte range for a character class. + /// + /// The returned range is always in a canonical form. That is, the range + /// returned always satisfies the invariant that `start <= end`. + pub fn new(start: u8, end: u8) -> ClassBytesRange { + ClassBytesRange::create(start, end) + } + + /// Return the start of this range. + /// + /// The start of a range is always less than or equal to the end of the + /// range. + pub fn start(&self) -> u8 { + self.start + } + + /// Return the end of this range. + /// + /// The end of a range is always greater than or equal to the start of the + /// range. + pub fn end(&self) -> u8 { + self.end + } + + /// Returns the number of bytes in this range. + pub fn len(&self) -> usize { + usize::from(self.end.checked_sub(self.start).unwrap()) + .checked_add(1) + .unwrap() + } +} + +impl core::fmt::Debug for ClassBytesRange { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("ClassBytesRange") + .field("start", &crate::debug::Byte(self.start)) + .field("end", &crate::debug::Byte(self.end)) + .finish() + } +} + +/// The high-level intermediate representation for a look-around assertion. +/// +/// An assertion match is always zero-length. Also called an "empty match." +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Look { + /// Match the beginning of text. Specifically, this matches at the starting + /// position of the input. + Start = 1 << 0, + /// Match the end of text. Specifically, this matches at the ending + /// position of the input. + End = 1 << 1, + /// Match the beginning of a line or the beginning of text. Specifically, + /// this matches at the starting position of the input, or at the position + /// immediately following a `\n` character. + StartLF = 1 << 2, + /// Match the end of a line or the end of text. Specifically, this matches + /// at the end position of the input, or at the position immediately + /// preceding a `\n` character. + EndLF = 1 << 3, + /// Match the beginning of a line or the beginning of text. Specifically, + /// this matches at the starting position of the input, or at the position + /// immediately following either a `\r` or `\n` character, but never after + /// a `\r` when a `\n` follows. + StartCRLF = 1 << 4, + /// Match the end of a line or the end of text. Specifically, this matches + /// at the end position of the input, or at the position immediately + /// preceding a `\r` or `\n` character, but never before a `\n` when a `\r` + /// precedes it. + EndCRLF = 1 << 5, + /// Match an ASCII-only word boundary. That is, this matches a position + /// where the left adjacent character and right adjacent character + /// correspond to a word and non-word or a non-word and word character. + WordAscii = 1 << 6, + /// Match an ASCII-only negation of a word boundary. + WordAsciiNegate = 1 << 7, + /// Match a Unicode-aware word boundary. That is, this matches a position + /// where the left adjacent character and right adjacent character + /// correspond to a word and non-word or a non-word and word character. + WordUnicode = 1 << 8, + /// Match a Unicode-aware negation of a word boundary. + WordUnicodeNegate = 1 << 9, + /// Match the start of an ASCII-only word boundary. That is, this matches a + /// position at either the beginning of the haystack or where the previous + /// character is not a word character and the following character is a word + /// character. + WordStartAscii = 1 << 10, + /// Match the end of an ASCII-only word boundary. That is, this matches + /// a position at either the end of the haystack or where the previous + /// character is a word character and the following character is not a word + /// character. + WordEndAscii = 1 << 11, + /// Match the start of a Unicode word boundary. That is, this matches a + /// position at either the beginning of the haystack or where the previous + /// character is not a word character and the following character is a word + /// character. + WordStartUnicode = 1 << 12, + /// Match the end of a Unicode word boundary. That is, this matches a + /// position at either the end of the haystack or where the previous + /// character is a word character and the following character is not a word + /// character. + WordEndUnicode = 1 << 13, + /// Match the start half of an ASCII-only word boundary. That is, this + /// matches a position at either the beginning of the haystack or where the + /// previous character is not a word character. + WordStartHalfAscii = 1 << 14, + /// Match the end half of an ASCII-only word boundary. That is, this + /// matches a position at either the end of the haystack or where the + /// following character is not a word character. + WordEndHalfAscii = 1 << 15, + /// Match the start half of a Unicode word boundary. That is, this matches + /// a position at either the beginning of the haystack or where the + /// previous character is not a word character. + WordStartHalfUnicode = 1 << 16, + /// Match the end half of a Unicode word boundary. That is, this matches + /// a position at either the end of the haystack or where the following + /// character is not a word character. + WordEndHalfUnicode = 1 << 17, +} + +impl Look { + /// Flip the look-around assertion to its equivalent for reverse searches. + /// For example, `StartLF` gets translated to `EndLF`. + /// + /// Some assertions, such as `WordUnicode`, remain the same since they + /// match the same positions regardless of the direction of the search. + #[inline] + pub const fn reversed(self) -> Look { + match self { + Look::Start => Look::End, + Look::End => Look::Start, + Look::StartLF => Look::EndLF, + Look::EndLF => Look::StartLF, + Look::StartCRLF => Look::EndCRLF, + Look::EndCRLF => Look::StartCRLF, + Look::WordAscii => Look::WordAscii, + Look::WordAsciiNegate => Look::WordAsciiNegate, + Look::WordUnicode => Look::WordUnicode, + Look::WordUnicodeNegate => Look::WordUnicodeNegate, + Look::WordStartAscii => Look::WordEndAscii, + Look::WordEndAscii => Look::WordStartAscii, + Look::WordStartUnicode => Look::WordEndUnicode, + Look::WordEndUnicode => Look::WordStartUnicode, + Look::WordStartHalfAscii => Look::WordEndHalfAscii, + Look::WordEndHalfAscii => Look::WordStartHalfAscii, + Look::WordStartHalfUnicode => Look::WordEndHalfUnicode, + Look::WordEndHalfUnicode => Look::WordStartHalfUnicode, + } + } + + /// Return the underlying representation of this look-around enumeration + /// as an integer. Giving the return value to the [`Look::from_repr`] + /// constructor is guaranteed to return the same look-around variant that + /// one started with within a semver compatible release of this crate. + #[inline] + pub const fn as_repr(self) -> u32 { + // AFAIK, 'as' is the only way to zero-cost convert an int enum to an + // actual int. + self as u32 + } + + /// Given the underlying representation of a `Look` value, return the + /// corresponding `Look` value if the representation is valid. Otherwise + /// `None` is returned. + #[inline] + pub const fn from_repr(repr: u32) -> Option { + match repr { + 0b00_0000_0000_0000_0001 => Some(Look::Start), + 0b00_0000_0000_0000_0010 => Some(Look::End), + 0b00_0000_0000_0000_0100 => Some(Look::StartLF), + 0b00_0000_0000_0000_1000 => Some(Look::EndLF), + 0b00_0000_0000_0001_0000 => Some(Look::StartCRLF), + 0b00_0000_0000_0010_0000 => Some(Look::EndCRLF), + 0b00_0000_0000_0100_0000 => Some(Look::WordAscii), + 0b00_0000_0000_1000_0000 => Some(Look::WordAsciiNegate), + 0b00_0000_0001_0000_0000 => Some(Look::WordUnicode), + 0b00_0000_0010_0000_0000 => Some(Look::WordUnicodeNegate), + 0b00_0000_0100_0000_0000 => Some(Look::WordStartAscii), + 0b00_0000_1000_0000_0000 => Some(Look::WordEndAscii), + 0b00_0001_0000_0000_0000 => Some(Look::WordStartUnicode), + 0b00_0010_0000_0000_0000 => Some(Look::WordEndUnicode), + 0b00_0100_0000_0000_0000 => Some(Look::WordStartHalfAscii), + 0b00_1000_0000_0000_0000 => Some(Look::WordEndHalfAscii), + 0b01_0000_0000_0000_0000 => Some(Look::WordStartHalfUnicode), + 0b10_0000_0000_0000_0000 => Some(Look::WordEndHalfUnicode), + _ => None, + } + } + + /// Returns a convenient single codepoint representation of this + /// look-around assertion. Each assertion is guaranteed to be represented + /// by a distinct character. + /// + /// This is useful for succinctly representing a look-around assertion in + /// human friendly but succinct output intended for a programmer working on + /// regex internals. + #[inline] + pub const fn as_char(self) -> char { + match self { + Look::Start => 'A', + Look::End => 'z', + Look::StartLF => '^', + Look::EndLF => '$', + Look::StartCRLF => 'r', + Look::EndCRLF => 'R', + Look::WordAscii => 'b', + Look::WordAsciiNegate => 'B', + Look::WordUnicode => '𝛃', + Look::WordUnicodeNegate => '𝚩', + Look::WordStartAscii => '<', + Look::WordEndAscii => '>', + Look::WordStartUnicode => '〈', + Look::WordEndUnicode => '〉', + Look::WordStartHalfAscii => '◁', + Look::WordEndHalfAscii => '▷', + Look::WordStartHalfUnicode => '◀', + Look::WordEndHalfUnicode => '▶', + } + } +} + +/// The high-level intermediate representation for a capturing group. +/// +/// A capturing group always has an index and a child expression. It may +/// also have a name associated with it (e.g., `(?P\w)`), but it's not +/// necessary. +/// +/// Note that there is no explicit representation of a non-capturing group +/// in a `Hir`. Instead, non-capturing grouping is handled automatically by +/// the recursive structure of the `Hir` itself. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Capture { + /// The capture index of the capture. + pub index: u32, + /// The name of the capture, if it exists. + pub name: Option>, + /// The expression inside the capturing group, which may be empty. + pub sub: Box, +} + +/// The high-level intermediate representation of a repetition operator. +/// +/// A repetition operator permits the repetition of an arbitrary +/// sub-expression. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Repetition { + /// The minimum range of the repetition. + /// + /// Note that special cases like `?`, `+` and `*` all get translated into + /// the ranges `{0,1}`, `{1,}` and `{0,}`, respectively. + /// + /// When `min` is zero, this expression can match the empty string + /// regardless of what its sub-expression is. + pub min: u32, + /// The maximum range of the repetition. + /// + /// Note that when `max` is `None`, `min` acts as a lower bound but where + /// there is no upper bound. For something like `x{5}` where the min and + /// max are equivalent, `min` will be set to `5` and `max` will be set to + /// `Some(5)`. + pub max: Option, + /// Whether this repetition operator is greedy or not. A greedy operator + /// will match as much as it can. A non-greedy operator will match as + /// little as it can. + /// + /// Typically, operators are greedy by default and are only non-greedy when + /// a `?` suffix is used, e.g., `(expr)*` is greedy while `(expr)*?` is + /// not. However, this can be inverted via the `U` "ungreedy" flag. + pub greedy: bool, + /// The expression being repeated. + pub sub: Box, +} + +impl Repetition { + /// Returns a new repetition with the same `min`, `max` and `greedy` + /// values, but with its sub-expression replaced with the one given. + pub fn with(&self, sub: Hir) -> Repetition { + Repetition { + min: self.min, + max: self.max, + greedy: self.greedy, + sub: Box::new(sub), + } + } +} + +/// A type describing the different flavors of `.`. +/// +/// This type is meant to be used with [`Hir::dot`], which is a convenience +/// routine for building HIR values derived from the `.` regex. +#[non_exhaustive] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Dot { + /// Matches the UTF-8 encoding of any Unicode scalar value. + /// + /// This is equivalent to `(?su:.)` and also `\p{any}`. + AnyChar, + /// Matches any byte value. + /// + /// This is equivalent to `(?s-u:.)` and also `(?-u:[\x00-\xFF])`. + AnyByte, + /// Matches the UTF-8 encoding of any Unicode scalar value except for the + /// `char` given. + /// + /// This is equivalent to using `(?u-s:.)` with the line terminator set + /// to a particular ASCII byte. (Because of peculiarities in the regex + /// engines, a line terminator must be a single byte. It follows that when + /// UTF-8 mode is enabled, this single byte must also be a Unicode scalar + /// value. That is, ti must be ASCII.) + /// + /// (This and `AnyCharExceptLF` both exist because of legacy reasons. + /// `AnyCharExceptLF` will be dropped in the next breaking change release.) + AnyCharExcept(char), + /// Matches the UTF-8 encoding of any Unicode scalar value except for `\n`. + /// + /// This is equivalent to `(?u-s:.)` and also `[\p{any}--\n]`. + AnyCharExceptLF, + /// Matches the UTF-8 encoding of any Unicode scalar value except for `\r` + /// and `\n`. + /// + /// This is equivalent to `(?uR-s:.)` and also `[\p{any}--\r\n]`. + AnyCharExceptCRLF, + /// Matches any byte value except for the `u8` given. + /// + /// This is equivalent to using `(?-us:.)` with the line terminator set + /// to a particular ASCII byte. (Because of peculiarities in the regex + /// engines, a line terminator must be a single byte. It follows that when + /// UTF-8 mode is enabled, this single byte must also be a Unicode scalar + /// value. That is, ti must be ASCII.) + /// + /// (This and `AnyByteExceptLF` both exist because of legacy reasons. + /// `AnyByteExceptLF` will be dropped in the next breaking change release.) + AnyByteExcept(u8), + /// Matches any byte value except for `\n`. + /// + /// This is equivalent to `(?-su:.)` and also `(?-u:[[\x00-\xFF]--\n])`. + AnyByteExceptLF, + /// Matches any byte value except for `\r` and `\n`. + /// + /// This is equivalent to `(?R-su:.)` and also `(?-u:[[\x00-\xFF]--\r\n])`. + AnyByteExceptCRLF, +} + +/// A custom `Drop` impl is used for `HirKind` such that it uses constant stack +/// space but heap space proportional to the depth of the total `Hir`. +impl Drop for Hir { + fn drop(&mut self) { + use core::mem; + + match *self.kind() { + HirKind::Empty + | HirKind::Literal(_) + | HirKind::Class(_) + | HirKind::Look(_) => return, + HirKind::Capture(ref x) if x.sub.kind.subs().is_empty() => return, + HirKind::Repetition(ref x) if x.sub.kind.subs().is_empty() => { + return + } + HirKind::Concat(ref x) if x.is_empty() => return, + HirKind::Alternation(ref x) if x.is_empty() => return, + _ => {} + } + + let mut stack = vec![mem::replace(self, Hir::empty())]; + while let Some(mut expr) = stack.pop() { + match expr.kind { + HirKind::Empty + | HirKind::Literal(_) + | HirKind::Class(_) + | HirKind::Look(_) => {} + HirKind::Capture(ref mut x) => { + stack.push(mem::replace(&mut x.sub, Hir::empty())); + } + HirKind::Repetition(ref mut x) => { + stack.push(mem::replace(&mut x.sub, Hir::empty())); + } + HirKind::Concat(ref mut x) => { + stack.extend(x.drain(..)); + } + HirKind::Alternation(ref mut x) => { + stack.extend(x.drain(..)); + } + } + } + } +} + +/// A type that collects various properties of an HIR value. +/// +/// Properties are always scalar values and represent meta data that is +/// computed inductively on an HIR value. Properties are defined for all +/// HIR values. +/// +/// All methods on a `Properties` value take constant time and are meant to +/// be cheap to call. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Properties(Box); + +/// The property definition. It is split out so that we can box it, and +/// there by make `Properties` use less stack size. This is kind-of important +/// because every HIR value has a `Properties` attached to it. +/// +/// This does have the unfortunate consequence that creating any HIR value +/// always leads to at least one alloc for properties, but this is generally +/// true anyway (for pretty much all HirKinds except for look-arounds). +#[derive(Clone, Debug, Eq, PartialEq)] +struct PropertiesI { + minimum_len: Option, + maximum_len: Option, + look_set: LookSet, + look_set_prefix: LookSet, + look_set_suffix: LookSet, + look_set_prefix_any: LookSet, + look_set_suffix_any: LookSet, + utf8: bool, + explicit_captures_len: usize, + static_explicit_captures_len: Option, + literal: bool, + alternation_literal: bool, +} + +impl Properties { + /// Returns the length (in bytes) of the smallest string matched by this + /// HIR. + /// + /// A return value of `0` is possible and occurs when the HIR can match an + /// empty string. + /// + /// `None` is returned when there is no minimum length. This occurs in + /// precisely the cases where the HIR matches nothing. i.e., The language + /// the regex matches is empty. An example of such a regex is `\P{any}`. + #[inline] + pub fn minimum_len(&self) -> Option { + self.0.minimum_len + } + + /// Returns the length (in bytes) of the longest string matched by this + /// HIR. + /// + /// A return value of `0` is possible and occurs when nothing longer than + /// the empty string is in the language described by this HIR. + /// + /// `None` is returned when there is no longest matching string. This + /// occurs when the HIR matches nothing or when there is no upper bound on + /// the length of matching strings. Example of such regexes are `\P{any}` + /// (matches nothing) and `a+` (has no upper bound). + #[inline] + pub fn maximum_len(&self) -> Option { + self.0.maximum_len + } + + /// Returns a set of all look-around assertions that appear at least once + /// in this HIR value. + #[inline] + pub fn look_set(&self) -> LookSet { + self.0.look_set + } + + /// Returns a set of all look-around assertions that appear as a prefix for + /// this HIR value. That is, the set returned corresponds to the set of + /// assertions that must be passed before matching any bytes in a haystack. + /// + /// For example, `hir.look_set_prefix().contains(Look::Start)` returns true + /// if and only if the HIR is fully anchored at the start. + #[inline] + pub fn look_set_prefix(&self) -> LookSet { + self.0.look_set_prefix + } + + /// Returns a set of all look-around assertions that appear as a _possible_ + /// prefix for this HIR value. That is, the set returned corresponds to the + /// set of assertions that _may_ be passed before matching any bytes in a + /// haystack. + /// + /// For example, `hir.look_set_prefix_any().contains(Look::Start)` returns + /// true if and only if it's possible for the regex to match through a + /// anchored assertion before consuming any input. + #[inline] + pub fn look_set_prefix_any(&self) -> LookSet { + self.0.look_set_prefix_any + } + + /// Returns a set of all look-around assertions that appear as a suffix for + /// this HIR value. That is, the set returned corresponds to the set of + /// assertions that must be passed in order to be considered a match after + /// all other consuming HIR expressions. + /// + /// For example, `hir.look_set_suffix().contains(Look::End)` returns true + /// if and only if the HIR is fully anchored at the end. + #[inline] + pub fn look_set_suffix(&self) -> LookSet { + self.0.look_set_suffix + } + + /// Returns a set of all look-around assertions that appear as a _possible_ + /// suffix for this HIR value. That is, the set returned corresponds to the + /// set of assertions that _may_ be passed before matching any bytes in a + /// haystack. + /// + /// For example, `hir.look_set_suffix_any().contains(Look::End)` returns + /// true if and only if it's possible for the regex to match through a + /// anchored assertion at the end of a match without consuming any input. + #[inline] + pub fn look_set_suffix_any(&self) -> LookSet { + self.0.look_set_suffix_any + } + + /// Return true if and only if the corresponding HIR will always match + /// valid UTF-8. + /// + /// When this returns false, then it is possible for this HIR expression to + /// match invalid UTF-8, including by matching between the code units of + /// a single UTF-8 encoded codepoint. + /// + /// Note that this returns true even when the corresponding HIR can match + /// the empty string. Since an empty string can technically appear between + /// UTF-8 code units, it is possible for a match to be reported that splits + /// a codepoint which could in turn be considered matching invalid UTF-8. + /// However, it is generally assumed that such empty matches are handled + /// specially by the search routine if it is absolutely required that + /// matches not split a codepoint. + /// + /// # Example + /// + /// This code example shows the UTF-8 property of a variety of patterns. + /// + /// ``` + /// use regex_syntax::{ParserBuilder, parse}; + /// + /// // Examples of 'is_utf8() == true'. + /// assert!(parse(r"a")?.properties().is_utf8()); + /// assert!(parse(r"[^a]")?.properties().is_utf8()); + /// assert!(parse(r".")?.properties().is_utf8()); + /// assert!(parse(r"\W")?.properties().is_utf8()); + /// assert!(parse(r"\b")?.properties().is_utf8()); + /// assert!(parse(r"\B")?.properties().is_utf8()); + /// assert!(parse(r"(?-u)\b")?.properties().is_utf8()); + /// assert!(parse(r"(?-u)\B")?.properties().is_utf8()); + /// // Unicode mode is enabled by default, and in + /// // that mode, all \x hex escapes are treated as + /// // codepoints. So this actually matches the UTF-8 + /// // encoding of U+00FF. + /// assert!(parse(r"\xFF")?.properties().is_utf8()); + /// + /// // Now we show examples of 'is_utf8() == false'. + /// // The only way to do this is to force the parser + /// // to permit invalid UTF-8, otherwise all of these + /// // would fail to parse! + /// let parse = |pattern| { + /// ParserBuilder::new().utf8(false).build().parse(pattern) + /// }; + /// assert!(!parse(r"(?-u)[^a]")?.properties().is_utf8()); + /// assert!(!parse(r"(?-u).")?.properties().is_utf8()); + /// assert!(!parse(r"(?-u)\W")?.properties().is_utf8()); + /// // Conversely to the equivalent example above, + /// // when Unicode mode is disabled, \x hex escapes + /// // are treated as their raw byte values. + /// assert!(!parse(r"(?-u)\xFF")?.properties().is_utf8()); + /// // Note that just because we disabled UTF-8 in the + /// // parser doesn't mean we still can't use Unicode. + /// // It is enabled by default, so \xFF is still + /// // equivalent to matching the UTF-8 encoding of + /// // U+00FF by default. + /// assert!(parse(r"\xFF")?.properties().is_utf8()); + /// // Even though we use raw bytes that individually + /// // are not valid UTF-8, when combined together, the + /// // overall expression *does* match valid UTF-8! + /// assert!(parse(r"(?-u)\xE2\x98\x83")?.properties().is_utf8()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn is_utf8(&self) -> bool { + self.0.utf8 + } + + /// Returns the total number of explicit capturing groups in the + /// corresponding HIR. + /// + /// Note that this does not include the implicit capturing group + /// corresponding to the entire match that is typically included by regex + /// engines. + /// + /// # Example + /// + /// This method will return `0` for `a` and `1` for `(a)`: + /// + /// ``` + /// use regex_syntax::parse; + /// + /// assert_eq!(0, parse("a")?.properties().explicit_captures_len()); + /// assert_eq!(1, parse("(a)")?.properties().explicit_captures_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn explicit_captures_len(&self) -> usize { + self.0.explicit_captures_len + } + + /// Returns the total number of explicit capturing groups that appear in + /// every possible match. + /// + /// If the number of capture groups can vary depending on the match, then + /// this returns `None`. That is, a value is only returned when the number + /// of matching groups is invariant or "static." + /// + /// Note that this does not include the implicit capturing group + /// corresponding to the entire match. + /// + /// # Example + /// + /// This shows a few cases where a static number of capture groups is + /// available and a few cases where it is not. + /// + /// ``` + /// use regex_syntax::parse; + /// + /// let len = |pattern| { + /// parse(pattern).map(|h| { + /// h.properties().static_explicit_captures_len() + /// }) + /// }; + /// + /// assert_eq!(Some(0), len("a")?); + /// assert_eq!(Some(1), len("(a)")?); + /// assert_eq!(Some(1), len("(a)|(b)")?); + /// assert_eq!(Some(2), len("(a)(b)|(c)(d)")?); + /// assert_eq!(None, len("(a)|b")?); + /// assert_eq!(None, len("a|(b)")?); + /// assert_eq!(None, len("(b)*")?); + /// assert_eq!(Some(1), len("(b)+")?); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn static_explicit_captures_len(&self) -> Option { + self.0.static_explicit_captures_len + } + + /// Return true if and only if this HIR is a simple literal. This is + /// only true when this HIR expression is either itself a `Literal` or a + /// concatenation of only `Literal`s. + /// + /// For example, `f` and `foo` are literals, but `f+`, `(foo)`, `foo()` and + /// the empty string are not (even though they contain sub-expressions that + /// are literals). + #[inline] + pub fn is_literal(&self) -> bool { + self.0.literal + } + + /// Return true if and only if this HIR is either a simple literal or an + /// alternation of simple literals. This is only + /// true when this HIR expression is either itself a `Literal` or a + /// concatenation of only `Literal`s or an alternation of only `Literal`s. + /// + /// For example, `f`, `foo`, `a|b|c`, and `foo|bar|baz` are alternation + /// literals, but `f+`, `(foo)`, `foo()`, and the empty pattern are not + /// (even though that contain sub-expressions that are literals). + #[inline] + pub fn is_alternation_literal(&self) -> bool { + self.0.alternation_literal + } + + /// Returns the total amount of heap memory usage, in bytes, used by this + /// `Properties` value. + #[inline] + pub fn memory_usage(&self) -> usize { + core::mem::size_of::() + } + + /// Returns a new set of properties that corresponds to the union of the + /// iterator of properties given. + /// + /// This is useful when one has multiple `Hir` expressions and wants + /// to combine them into a single alternation without constructing the + /// corresponding `Hir`. This routine provides a way of combining the + /// properties of each `Hir` expression into one set of properties + /// representing the union of those expressions. + /// + /// # Example: union with HIRs that never match + /// + /// This example shows that unioning properties together with one that + /// represents a regex that never matches will "poison" certain attributes, + /// like the minimum and maximum lengths. + /// + /// ``` + /// use regex_syntax::{hir::Properties, parse}; + /// + /// let hir1 = parse("ab?c?")?; + /// assert_eq!(Some(1), hir1.properties().minimum_len()); + /// assert_eq!(Some(3), hir1.properties().maximum_len()); + /// + /// let hir2 = parse(r"[a&&b]")?; + /// assert_eq!(None, hir2.properties().minimum_len()); + /// assert_eq!(None, hir2.properties().maximum_len()); + /// + /// let hir3 = parse(r"wxy?z?")?; + /// assert_eq!(Some(2), hir3.properties().minimum_len()); + /// assert_eq!(Some(4), hir3.properties().maximum_len()); + /// + /// let unioned = Properties::union([ + /// hir1.properties(), + /// hir2.properties(), + /// hir3.properties(), + /// ]); + /// assert_eq!(None, unioned.minimum_len()); + /// assert_eq!(None, unioned.maximum_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// The maximum length can also be "poisoned" by a pattern that has no + /// upper bound on the length of a match. The minimum length remains + /// unaffected: + /// + /// ``` + /// use regex_syntax::{hir::Properties, parse}; + /// + /// let hir1 = parse("ab?c?")?; + /// assert_eq!(Some(1), hir1.properties().minimum_len()); + /// assert_eq!(Some(3), hir1.properties().maximum_len()); + /// + /// let hir2 = parse(r"a+")?; + /// assert_eq!(Some(1), hir2.properties().minimum_len()); + /// assert_eq!(None, hir2.properties().maximum_len()); + /// + /// let hir3 = parse(r"wxy?z?")?; + /// assert_eq!(Some(2), hir3.properties().minimum_len()); + /// assert_eq!(Some(4), hir3.properties().maximum_len()); + /// + /// let unioned = Properties::union([ + /// hir1.properties(), + /// hir2.properties(), + /// hir3.properties(), + /// ]); + /// assert_eq!(Some(1), unioned.minimum_len()); + /// assert_eq!(None, unioned.maximum_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn union(props: I) -> Properties + where + I: IntoIterator, + P: core::borrow::Borrow, + { + let mut it = props.into_iter().peekable(); + // While empty alternations aren't possible, we still behave as if they + // are. When we have an empty alternate, then clearly the look-around + // prefix and suffix is empty. Otherwise, it is the intersection of all + // prefixes and suffixes (respectively) of the branches. + let fix = if it.peek().is_none() { + LookSet::empty() + } else { + LookSet::full() + }; + // And also, an empty alternate means we have 0 static capture groups, + // but we otherwise start with the number corresponding to the first + // alternate. If any subsequent alternate has a different number of + // static capture groups, then we overall have a variation and not a + // static number of groups. + let static_explicit_captures_len = + it.peek().and_then(|p| p.borrow().static_explicit_captures_len()); + // The base case is an empty alternation, which matches nothing. + // Note though that empty alternations aren't possible, because the + // Hir::alternation smart constructor rewrites those as empty character + // classes. + let mut props = PropertiesI { + minimum_len: None, + maximum_len: None, + look_set: LookSet::empty(), + look_set_prefix: fix, + look_set_suffix: fix, + look_set_prefix_any: LookSet::empty(), + look_set_suffix_any: LookSet::empty(), + utf8: true, + explicit_captures_len: 0, + static_explicit_captures_len, + literal: false, + alternation_literal: true, + }; + let (mut min_poisoned, mut max_poisoned) = (false, false); + // Handle properties that need to visit every child hir. + for prop in it { + let p = prop.borrow(); + props.look_set.set_union(p.look_set()); + props.look_set_prefix.set_intersect(p.look_set_prefix()); + props.look_set_suffix.set_intersect(p.look_set_suffix()); + props.look_set_prefix_any.set_union(p.look_set_prefix_any()); + props.look_set_suffix_any.set_union(p.look_set_suffix_any()); + props.utf8 = props.utf8 && p.is_utf8(); + props.explicit_captures_len = props + .explicit_captures_len + .saturating_add(p.explicit_captures_len()); + if props.static_explicit_captures_len + != p.static_explicit_captures_len() + { + props.static_explicit_captures_len = None; + } + props.alternation_literal = + props.alternation_literal && p.is_literal(); + if !min_poisoned { + if let Some(xmin) = p.minimum_len() { + if props.minimum_len.map_or(true, |pmin| xmin < pmin) { + props.minimum_len = Some(xmin); + } + } else { + props.minimum_len = None; + min_poisoned = true; + } + } + if !max_poisoned { + if let Some(xmax) = p.maximum_len() { + if props.maximum_len.map_or(true, |pmax| xmax > pmax) { + props.maximum_len = Some(xmax); + } + } else { + props.maximum_len = None; + max_poisoned = true; + } + } + } + Properties(Box::new(props)) + } +} + +impl Properties { + /// Create a new set of HIR properties for an empty regex. + fn empty() -> Properties { + let inner = PropertiesI { + minimum_len: Some(0), + maximum_len: Some(0), + look_set: LookSet::empty(), + look_set_prefix: LookSet::empty(), + look_set_suffix: LookSet::empty(), + look_set_prefix_any: LookSet::empty(), + look_set_suffix_any: LookSet::empty(), + // It is debatable whether an empty regex always matches at valid + // UTF-8 boundaries. Strictly speaking, at a byte oriented view, + // it is clearly false. There are, for example, many empty strings + // between the bytes encoding a '☃'. + // + // However, when Unicode mode is enabled, the fundamental atom + // of matching is really a codepoint. And in that scenario, an + // empty regex is defined to only match at valid UTF-8 boundaries + // and to never split a codepoint. It just so happens that this + // enforcement is somewhat tricky to do for regexes that match + // the empty string inside regex engines themselves. It usually + // requires some layer above the regex engine to filter out such + // matches. + // + // In any case, 'true' is really the only coherent option. If it + // were false, for example, then 'a*' would also need to be false + // since it too can match the empty string. + utf8: true, + explicit_captures_len: 0, + static_explicit_captures_len: Some(0), + literal: false, + alternation_literal: false, + }; + Properties(Box::new(inner)) + } + + /// Create a new set of HIR properties for a literal regex. + fn literal(lit: &Literal) -> Properties { + let inner = PropertiesI { + minimum_len: Some(lit.0.len()), + maximum_len: Some(lit.0.len()), + look_set: LookSet::empty(), + look_set_prefix: LookSet::empty(), + look_set_suffix: LookSet::empty(), + look_set_prefix_any: LookSet::empty(), + look_set_suffix_any: LookSet::empty(), + utf8: core::str::from_utf8(&lit.0).is_ok(), + explicit_captures_len: 0, + static_explicit_captures_len: Some(0), + literal: true, + alternation_literal: true, + }; + Properties(Box::new(inner)) + } + + /// Create a new set of HIR properties for a character class. + fn class(class: &Class) -> Properties { + let inner = PropertiesI { + minimum_len: class.minimum_len(), + maximum_len: class.maximum_len(), + look_set: LookSet::empty(), + look_set_prefix: LookSet::empty(), + look_set_suffix: LookSet::empty(), + look_set_prefix_any: LookSet::empty(), + look_set_suffix_any: LookSet::empty(), + utf8: class.is_utf8(), + explicit_captures_len: 0, + static_explicit_captures_len: Some(0), + literal: false, + alternation_literal: false, + }; + Properties(Box::new(inner)) + } + + /// Create a new set of HIR properties for a look-around assertion. + fn look(look: Look) -> Properties { + let inner = PropertiesI { + minimum_len: Some(0), + maximum_len: Some(0), + look_set: LookSet::singleton(look), + look_set_prefix: LookSet::singleton(look), + look_set_suffix: LookSet::singleton(look), + look_set_prefix_any: LookSet::singleton(look), + look_set_suffix_any: LookSet::singleton(look), + // This requires a little explanation. Basically, we don't consider + // matching an empty string to be equivalent to matching invalid + // UTF-8, even though technically matching every empty string will + // split the UTF-8 encoding of a single codepoint when treating a + // UTF-8 encoded string as a sequence of bytes. Our defense here is + // that in such a case, a codepoint should logically be treated as + // the fundamental atom for matching, and thus the only valid match + // points are between codepoints and not bytes. + // + // More practically, this is true here because it's also true + // for 'Hir::empty()', otherwise something like 'a*' would be + // considered to match invalid UTF-8. That in turn makes this + // property borderline useless. + utf8: true, + explicit_captures_len: 0, + static_explicit_captures_len: Some(0), + literal: false, + alternation_literal: false, + }; + Properties(Box::new(inner)) + } + + /// Create a new set of HIR properties for a repetition. + fn repetition(rep: &Repetition) -> Properties { + let p = rep.sub.properties(); + let minimum_len = p.minimum_len().map(|child_min| { + let rep_min = usize::try_from(rep.min).unwrap_or(usize::MAX); + child_min.saturating_mul(rep_min) + }); + let maximum_len = rep.max.and_then(|rep_max| { + let rep_max = usize::try_from(rep_max).ok()?; + let child_max = p.maximum_len()?; + child_max.checked_mul(rep_max) + }); + + let mut inner = PropertiesI { + minimum_len, + maximum_len, + look_set: p.look_set(), + look_set_prefix: LookSet::empty(), + look_set_suffix: LookSet::empty(), + look_set_prefix_any: p.look_set_prefix_any(), + look_set_suffix_any: p.look_set_suffix_any(), + utf8: p.is_utf8(), + explicit_captures_len: p.explicit_captures_len(), + static_explicit_captures_len: p.static_explicit_captures_len(), + literal: false, + alternation_literal: false, + }; + // If the repetition operator can match the empty string, then its + // lookset prefix and suffixes themselves remain empty since they are + // no longer required to match. + if rep.min > 0 { + inner.look_set_prefix = p.look_set_prefix(); + inner.look_set_suffix = p.look_set_suffix(); + } + // If the static captures len of the sub-expression is not known or + // is greater than zero, then it automatically propagates to the + // repetition, regardless of the repetition. Otherwise, it might + // change, but only when the repetition can match 0 times. + if rep.min == 0 + && inner.static_explicit_captures_len.map_or(false, |len| len > 0) + { + // If we require a match 0 times, then our captures len is + // guaranteed to be zero. Otherwise, if we *can* match the empty + // string, then it's impossible to know how many captures will be + // in the resulting match. + if rep.max == Some(0) { + inner.static_explicit_captures_len = Some(0); + } else { + inner.static_explicit_captures_len = None; + } + } + Properties(Box::new(inner)) + } + + /// Create a new set of HIR properties for a capture. + fn capture(capture: &Capture) -> Properties { + let p = capture.sub.properties(); + Properties(Box::new(PropertiesI { + explicit_captures_len: p.explicit_captures_len().saturating_add(1), + static_explicit_captures_len: p + .static_explicit_captures_len() + .map(|len| len.saturating_add(1)), + literal: false, + alternation_literal: false, + ..*p.0.clone() + })) + } + + /// Create a new set of HIR properties for a concatenation. + fn concat(concat: &[Hir]) -> Properties { + // The base case is an empty concatenation, which matches the empty + // string. Note though that empty concatenations aren't possible, + // because the Hir::concat smart constructor rewrites those as + // Hir::empty. + let mut props = PropertiesI { + minimum_len: Some(0), + maximum_len: Some(0), + look_set: LookSet::empty(), + look_set_prefix: LookSet::empty(), + look_set_suffix: LookSet::empty(), + look_set_prefix_any: LookSet::empty(), + look_set_suffix_any: LookSet::empty(), + utf8: true, + explicit_captures_len: 0, + static_explicit_captures_len: Some(0), + literal: true, + alternation_literal: true, + }; + // Handle properties that need to visit every child hir. + for x in concat.iter() { + let p = x.properties(); + props.look_set.set_union(p.look_set()); + props.utf8 = props.utf8 && p.is_utf8(); + props.explicit_captures_len = props + .explicit_captures_len + .saturating_add(p.explicit_captures_len()); + props.static_explicit_captures_len = p + .static_explicit_captures_len() + .and_then(|len1| { + Some((len1, props.static_explicit_captures_len?)) + }) + .and_then(|(len1, len2)| Some(len1.saturating_add(len2))); + props.literal = props.literal && p.is_literal(); + props.alternation_literal = + props.alternation_literal && p.is_alternation_literal(); + if let Some(minimum_len) = props.minimum_len { + match p.minimum_len() { + None => props.minimum_len = None, + Some(len) => { + // We use saturating arithmetic here because the + // minimum is just a lower bound. We can't go any + // higher than what our number types permit. + props.minimum_len = + Some(minimum_len.saturating_add(len)); + } + } + } + if let Some(maximum_len) = props.maximum_len { + match p.maximum_len() { + None => props.maximum_len = None, + Some(len) => { + props.maximum_len = maximum_len.checked_add(len) + } + } + } + } + // Handle the prefix properties, which only requires visiting + // child exprs until one matches more than the empty string. + let mut it = concat.iter(); + while let Some(x) = it.next() { + props.look_set_prefix.set_union(x.properties().look_set_prefix()); + props + .look_set_prefix_any + .set_union(x.properties().look_set_prefix_any()); + if x.properties().maximum_len().map_or(true, |x| x > 0) { + break; + } + } + // Same thing for the suffix properties, but in reverse. + let mut it = concat.iter().rev(); + while let Some(x) = it.next() { + props.look_set_suffix.set_union(x.properties().look_set_suffix()); + props + .look_set_suffix_any + .set_union(x.properties().look_set_suffix_any()); + if x.properties().maximum_len().map_or(true, |x| x > 0) { + break; + } + } + Properties(Box::new(props)) + } + + /// Create a new set of HIR properties for a concatenation. + fn alternation(alts: &[Hir]) -> Properties { + Properties::union(alts.iter().map(|hir| hir.properties())) + } +} + +/// A set of look-around assertions. +/// +/// This is useful for efficiently tracking look-around assertions. For +/// example, an [`Hir`] provides properties that return `LookSet`s. +#[derive(Clone, Copy, Default, Eq, PartialEq)] +pub struct LookSet { + /// The underlying representation this set is exposed to make it possible + /// to store it somewhere efficiently. The representation is that + /// of a bitset, where each assertion occupies bit `i` where `i = + /// Look::as_repr()`. + /// + /// Note that users of this internal representation must permit the full + /// range of `u16` values to be represented. For example, even if the + /// current implementation only makes use of the 10 least significant bits, + /// it may use more bits in a future semver compatible release. + pub bits: u32, +} + +impl LookSet { + /// Create an empty set of look-around assertions. + #[inline] + pub fn empty() -> LookSet { + LookSet { bits: 0 } + } + + /// Create a full set of look-around assertions. + /// + /// This set contains all possible look-around assertions. + #[inline] + pub fn full() -> LookSet { + LookSet { bits: !0 } + } + + /// Create a look-around set containing the look-around assertion given. + /// + /// This is a convenience routine for creating an empty set and inserting + /// one look-around assertions. + #[inline] + pub fn singleton(look: Look) -> LookSet { + LookSet::empty().insert(look) + } + + /// Returns the total number of look-around assertions in this set. + #[inline] + pub fn len(self) -> usize { + // OK because max value always fits in a u8, which in turn always + // fits in a usize, regardless of target. + usize::try_from(self.bits.count_ones()).unwrap() + } + + /// Returns true if and only if this set is empty. + #[inline] + pub fn is_empty(self) -> bool { + self.len() == 0 + } + + /// Returns true if and only if the given look-around assertion is in this + /// set. + #[inline] + pub fn contains(self, look: Look) -> bool { + self.bits & look.as_repr() != 0 + } + + /// Returns true if and only if this set contains any anchor assertions. + /// This includes both "start/end of haystack" and "start/end of line." + #[inline] + pub fn contains_anchor(&self) -> bool { + self.contains_anchor_haystack() || self.contains_anchor_line() + } + + /// Returns true if and only if this set contains any "start/end of + /// haystack" anchors. This doesn't include "start/end of line" anchors. + #[inline] + pub fn contains_anchor_haystack(&self) -> bool { + self.contains(Look::Start) || self.contains(Look::End) + } + + /// Returns true if and only if this set contains any "start/end of line" + /// anchors. This doesn't include "start/end of haystack" anchors. This + /// includes both `\n` line anchors and CRLF (`\r\n`) aware line anchors. + #[inline] + pub fn contains_anchor_line(&self) -> bool { + self.contains(Look::StartLF) + || self.contains(Look::EndLF) + || self.contains(Look::StartCRLF) + || self.contains(Look::EndCRLF) + } + + /// Returns true if and only if this set contains any "start/end of line" + /// anchors that only treat `\n` as line terminators. This does not include + /// haystack anchors or CRLF aware line anchors. + #[inline] + pub fn contains_anchor_lf(&self) -> bool { + self.contains(Look::StartLF) || self.contains(Look::EndLF) + } + + /// Returns true if and only if this set contains any "start/end of line" + /// anchors that are CRLF-aware. This doesn't include "start/end of + /// haystack" or "start/end of line-feed" anchors. + #[inline] + pub fn contains_anchor_crlf(&self) -> bool { + self.contains(Look::StartCRLF) || self.contains(Look::EndCRLF) + } + + /// Returns true if and only if this set contains any word boundary or + /// negated word boundary assertions. This include both Unicode and ASCII + /// word boundaries. + #[inline] + pub fn contains_word(self) -> bool { + self.contains_word_unicode() || self.contains_word_ascii() + } + + /// Returns true if and only if this set contains any Unicode word boundary + /// or negated Unicode word boundary assertions. + #[inline] + pub fn contains_word_unicode(self) -> bool { + self.contains(Look::WordUnicode) + || self.contains(Look::WordUnicodeNegate) + || self.contains(Look::WordStartUnicode) + || self.contains(Look::WordEndUnicode) + || self.contains(Look::WordStartHalfUnicode) + || self.contains(Look::WordEndHalfUnicode) + } + + /// Returns true if and only if this set contains any ASCII word boundary + /// or negated ASCII word boundary assertions. + #[inline] + pub fn contains_word_ascii(self) -> bool { + self.contains(Look::WordAscii) + || self.contains(Look::WordAsciiNegate) + || self.contains(Look::WordStartAscii) + || self.contains(Look::WordEndAscii) + || self.contains(Look::WordStartHalfAscii) + || self.contains(Look::WordEndHalfAscii) + } + + /// Returns an iterator over all of the look-around assertions in this set. + #[inline] + pub fn iter(self) -> LookSetIter { + LookSetIter { set: self } + } + + /// Return a new set that is equivalent to the original, but with the given + /// assertion added to it. If the assertion is already in the set, then the + /// returned set is equivalent to the original. + #[inline] + pub fn insert(self, look: Look) -> LookSet { + LookSet { bits: self.bits | look.as_repr() } + } + + /// Updates this set in place with the result of inserting the given + /// assertion into this set. + #[inline] + pub fn set_insert(&mut self, look: Look) { + *self = self.insert(look); + } + + /// Return a new set that is equivalent to the original, but with the given + /// assertion removed from it. If the assertion is not in the set, then the + /// returned set is equivalent to the original. + #[inline] + pub fn remove(self, look: Look) -> LookSet { + LookSet { bits: self.bits & !look.as_repr() } + } + + /// Updates this set in place with the result of removing the given + /// assertion from this set. + #[inline] + pub fn set_remove(&mut self, look: Look) { + *self = self.remove(look); + } + + /// Returns a new set that is the result of subtracting the given set from + /// this set. + #[inline] + pub fn subtract(self, other: LookSet) -> LookSet { + LookSet { bits: self.bits & !other.bits } + } + + /// Updates this set in place with the result of subtracting the given set + /// from this set. + #[inline] + pub fn set_subtract(&mut self, other: LookSet) { + *self = self.subtract(other); + } + + /// Returns a new set that is the union of this and the one given. + #[inline] + pub fn union(self, other: LookSet) -> LookSet { + LookSet { bits: self.bits | other.bits } + } + + /// Updates this set in place with the result of unioning it with the one + /// given. + #[inline] + pub fn set_union(&mut self, other: LookSet) { + *self = self.union(other); + } + + /// Returns a new set that is the intersection of this and the one given. + #[inline] + pub fn intersect(self, other: LookSet) -> LookSet { + LookSet { bits: self.bits & other.bits } + } + + /// Updates this set in place with the result of intersecting it with the + /// one given. + #[inline] + pub fn set_intersect(&mut self, other: LookSet) { + *self = self.intersect(other); + } + + /// Return a `LookSet` from the slice given as a native endian 32-bit + /// integer. + /// + /// # Panics + /// + /// This panics if `slice.len() < 4`. + #[inline] + pub fn read_repr(slice: &[u8]) -> LookSet { + let bits = u32::from_ne_bytes(slice[..4].try_into().unwrap()); + LookSet { bits } + } + + /// Write a `LookSet` as a native endian 32-bit integer to the beginning + /// of the slice given. + /// + /// # Panics + /// + /// This panics if `slice.len() < 4`. + #[inline] + pub fn write_repr(self, slice: &mut [u8]) { + let raw = self.bits.to_ne_bytes(); + slice[0] = raw[0]; + slice[1] = raw[1]; + slice[2] = raw[2]; + slice[3] = raw[3]; + } +} + +impl core::fmt::Debug for LookSet { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + if self.is_empty() { + return write!(f, "∅"); + } + for look in self.iter() { + write!(f, "{}", look.as_char())?; + } + Ok(()) + } +} + +/// An iterator over all look-around assertions in a [`LookSet`]. +/// +/// This iterator is created by [`LookSet::iter`]. +#[derive(Clone, Debug)] +pub struct LookSetIter { + set: LookSet, +} + +impl Iterator for LookSetIter { + type Item = Look; + + #[inline] + fn next(&mut self) -> Option { + if self.set.is_empty() { + return None; + } + // We'll never have more than u8::MAX distinct look-around assertions, + // so 'bit' will always fit into a u16. + let bit = u16::try_from(self.set.bits.trailing_zeros()).unwrap(); + let look = Look::from_repr(1 << bit)?; + self.set = self.set.remove(look); + Some(look) + } +} + +/// Given a sequence of HIR values where each value corresponds to a Unicode +/// class (or an all-ASCII byte class), return a single Unicode class +/// corresponding to the union of the classes found. +fn class_chars(hirs: &[Hir]) -> Option { + let mut cls = ClassUnicode::new(vec![]); + for hir in hirs.iter() { + match *hir.kind() { + HirKind::Class(Class::Unicode(ref cls2)) => { + cls.union(cls2); + } + HirKind::Class(Class::Bytes(ref cls2)) => { + cls.union(&cls2.to_unicode_class()?); + } + _ => return None, + }; + } + Some(Class::Unicode(cls)) +} + +/// Given a sequence of HIR values where each value corresponds to a byte class +/// (or an all-ASCII Unicode class), return a single byte class corresponding +/// to the union of the classes found. +fn class_bytes(hirs: &[Hir]) -> Option { + let mut cls = ClassBytes::new(vec![]); + for hir in hirs.iter() { + match *hir.kind() { + HirKind::Class(Class::Unicode(ref cls2)) => { + cls.union(&cls2.to_byte_class()?); + } + HirKind::Class(Class::Bytes(ref cls2)) => { + cls.union(cls2); + } + _ => return None, + }; + } + Some(Class::Bytes(cls)) +} + +/// Given a sequence of HIR values where each value corresponds to a literal +/// that is a single `char`, return that sequence of `char`s. Otherwise return +/// None. No deduplication is done. +fn singleton_chars(hirs: &[Hir]) -> Option> { + let mut singletons = vec![]; + for hir in hirs.iter() { + let literal = match *hir.kind() { + HirKind::Literal(Literal(ref bytes)) => bytes, + _ => return None, + }; + let ch = match crate::debug::utf8_decode(literal) { + None => return None, + Some(Err(_)) => return None, + Some(Ok(ch)) => ch, + }; + if literal.len() != ch.len_utf8() { + return None; + } + singletons.push(ch); + } + Some(singletons) +} + +/// Given a sequence of HIR values where each value corresponds to a literal +/// that is a single byte, return that sequence of bytes. Otherwise return +/// None. No deduplication is done. +fn singleton_bytes(hirs: &[Hir]) -> Option> { + let mut singletons = vec![]; + for hir in hirs.iter() { + let literal = match *hir.kind() { + HirKind::Literal(Literal(ref bytes)) => bytes, + _ => return None, + }; + if literal.len() != 1 { + return None; + } + singletons.push(literal[0]); + } + Some(singletons) +} + +/// Looks for a common prefix in the list of alternation branches given. If one +/// is found, then an equivalent but (hopefully) simplified Hir is returned. +/// Otherwise, the original given list of branches is returned unmodified. +/// +/// This is not quite as good as it could be. Right now, it requires that +/// all branches are 'Concat' expressions. It also doesn't do well with +/// literals. For example, given 'foofoo|foobar', it will not refactor it to +/// 'foo(?:foo|bar)' because literals are flattened into their own special +/// concatenation. (One wonders if perhaps 'Literal' should be a single atom +/// instead of a string of bytes because of this. Otherwise, handling the +/// current representation in this routine will be pretty gnarly. Sigh.) +fn lift_common_prefix(hirs: Vec) -> Result> { + if hirs.len() <= 1 { + return Err(hirs); + } + let mut prefix = match hirs[0].kind() { + HirKind::Concat(ref xs) => &**xs, + _ => return Err(hirs), + }; + if prefix.is_empty() { + return Err(hirs); + } + for h in hirs.iter().skip(1) { + let concat = match h.kind() { + HirKind::Concat(ref xs) => xs, + _ => return Err(hirs), + }; + let common_len = prefix + .iter() + .zip(concat.iter()) + .take_while(|(x, y)| x == y) + .count(); + prefix = &prefix[..common_len]; + if prefix.is_empty() { + return Err(hirs); + } + } + let len = prefix.len(); + assert_ne!(0, len); + let mut prefix_concat = vec![]; + let mut suffix_alts = vec![]; + for h in hirs { + let mut concat = match h.into_kind() { + HirKind::Concat(xs) => xs, + // We required all sub-expressions to be + // concats above, so we're only here if we + // have a concat. + _ => unreachable!(), + }; + suffix_alts.push(Hir::concat(concat.split_off(len))); + if prefix_concat.is_empty() { + prefix_concat = concat; + } + } + let mut concat = prefix_concat; + concat.push(Hir::alternation(suffix_alts)); + Ok(Hir::concat(concat)) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn uclass(ranges: &[(char, char)]) -> ClassUnicode { + let ranges: Vec = ranges + .iter() + .map(|&(s, e)| ClassUnicodeRange::new(s, e)) + .collect(); + ClassUnicode::new(ranges) + } + + fn bclass(ranges: &[(u8, u8)]) -> ClassBytes { + let ranges: Vec = + ranges.iter().map(|&(s, e)| ClassBytesRange::new(s, e)).collect(); + ClassBytes::new(ranges) + } + + fn uranges(cls: &ClassUnicode) -> Vec<(char, char)> { + cls.iter().map(|x| (x.start(), x.end())).collect() + } + + #[cfg(feature = "unicode-case")] + fn ucasefold(cls: &ClassUnicode) -> ClassUnicode { + let mut cls_ = cls.clone(); + cls_.case_fold_simple(); + cls_ + } + + fn uunion(cls1: &ClassUnicode, cls2: &ClassUnicode) -> ClassUnicode { + let mut cls_ = cls1.clone(); + cls_.union(cls2); + cls_ + } + + fn uintersect(cls1: &ClassUnicode, cls2: &ClassUnicode) -> ClassUnicode { + let mut cls_ = cls1.clone(); + cls_.intersect(cls2); + cls_ + } + + fn udifference(cls1: &ClassUnicode, cls2: &ClassUnicode) -> ClassUnicode { + let mut cls_ = cls1.clone(); + cls_.difference(cls2); + cls_ + } + + fn usymdifference( + cls1: &ClassUnicode, + cls2: &ClassUnicode, + ) -> ClassUnicode { + let mut cls_ = cls1.clone(); + cls_.symmetric_difference(cls2); + cls_ + } + + fn unegate(cls: &ClassUnicode) -> ClassUnicode { + let mut cls_ = cls.clone(); + cls_.negate(); + cls_ + } + + fn branges(cls: &ClassBytes) -> Vec<(u8, u8)> { + cls.iter().map(|x| (x.start(), x.end())).collect() + } + + fn bcasefold(cls: &ClassBytes) -> ClassBytes { + let mut cls_ = cls.clone(); + cls_.case_fold_simple(); + cls_ + } + + fn bunion(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { + let mut cls_ = cls1.clone(); + cls_.union(cls2); + cls_ + } + + fn bintersect(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { + let mut cls_ = cls1.clone(); + cls_.intersect(cls2); + cls_ + } + + fn bdifference(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { + let mut cls_ = cls1.clone(); + cls_.difference(cls2); + cls_ + } + + fn bsymdifference(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { + let mut cls_ = cls1.clone(); + cls_.symmetric_difference(cls2); + cls_ + } + + fn bnegate(cls: &ClassBytes) -> ClassBytes { + let mut cls_ = cls.clone(); + cls_.negate(); + cls_ + } + + #[test] + fn class_range_canonical_unicode() { + let range = ClassUnicodeRange::new('\u{00FF}', '\0'); + assert_eq!('\0', range.start()); + assert_eq!('\u{00FF}', range.end()); + } + + #[test] + fn class_range_canonical_bytes() { + let range = ClassBytesRange::new(b'\xFF', b'\0'); + assert_eq!(b'\0', range.start()); + assert_eq!(b'\xFF', range.end()); + } + + #[test] + fn class_canonicalize_unicode() { + let cls = uclass(&[('a', 'c'), ('x', 'z')]); + let expected = vec![('a', 'c'), ('x', 'z')]; + assert_eq!(expected, uranges(&cls)); + + let cls = uclass(&[('x', 'z'), ('a', 'c')]); + let expected = vec![('a', 'c'), ('x', 'z')]; + assert_eq!(expected, uranges(&cls)); + + let cls = uclass(&[('x', 'z'), ('w', 'y')]); + let expected = vec![('w', 'z')]; + assert_eq!(expected, uranges(&cls)); + + let cls = uclass(&[ + ('c', 'f'), + ('a', 'g'), + ('d', 'j'), + ('a', 'c'), + ('m', 'p'), + ('l', 's'), + ]); + let expected = vec![('a', 'j'), ('l', 's')]; + assert_eq!(expected, uranges(&cls)); + + let cls = uclass(&[('x', 'z'), ('u', 'w')]); + let expected = vec![('u', 'z')]; + assert_eq!(expected, uranges(&cls)); + + let cls = uclass(&[('\x00', '\u{10FFFF}'), ('\x00', '\u{10FFFF}')]); + let expected = vec![('\x00', '\u{10FFFF}')]; + assert_eq!(expected, uranges(&cls)); + + let cls = uclass(&[('a', 'a'), ('b', 'b')]); + let expected = vec![('a', 'b')]; + assert_eq!(expected, uranges(&cls)); + } + + #[test] + fn class_canonicalize_bytes() { + let cls = bclass(&[(b'a', b'c'), (b'x', b'z')]); + let expected = vec![(b'a', b'c'), (b'x', b'z')]; + assert_eq!(expected, branges(&cls)); + + let cls = bclass(&[(b'x', b'z'), (b'a', b'c')]); + let expected = vec![(b'a', b'c'), (b'x', b'z')]; + assert_eq!(expected, branges(&cls)); + + let cls = bclass(&[(b'x', b'z'), (b'w', b'y')]); + let expected = vec![(b'w', b'z')]; + assert_eq!(expected, branges(&cls)); + + let cls = bclass(&[ + (b'c', b'f'), + (b'a', b'g'), + (b'd', b'j'), + (b'a', b'c'), + (b'm', b'p'), + (b'l', b's'), + ]); + let expected = vec![(b'a', b'j'), (b'l', b's')]; + assert_eq!(expected, branges(&cls)); + + let cls = bclass(&[(b'x', b'z'), (b'u', b'w')]); + let expected = vec![(b'u', b'z')]; + assert_eq!(expected, branges(&cls)); + + let cls = bclass(&[(b'\x00', b'\xFF'), (b'\x00', b'\xFF')]); + let expected = vec![(b'\x00', b'\xFF')]; + assert_eq!(expected, branges(&cls)); + + let cls = bclass(&[(b'a', b'a'), (b'b', b'b')]); + let expected = vec![(b'a', b'b')]; + assert_eq!(expected, branges(&cls)); + } + + #[test] + #[cfg(feature = "unicode-case")] + fn class_case_fold_unicode() { + let cls = uclass(&[ + ('C', 'F'), + ('A', 'G'), + ('D', 'J'), + ('A', 'C'), + ('M', 'P'), + ('L', 'S'), + ('c', 'f'), + ]); + let expected = uclass(&[ + ('A', 'J'), + ('L', 'S'), + ('a', 'j'), + ('l', 's'), + ('\u{17F}', '\u{17F}'), + ]); + assert_eq!(expected, ucasefold(&cls)); + + let cls = uclass(&[('A', 'Z')]); + let expected = uclass(&[ + ('A', 'Z'), + ('a', 'z'), + ('\u{17F}', '\u{17F}'), + ('\u{212A}', '\u{212A}'), + ]); + assert_eq!(expected, ucasefold(&cls)); + + let cls = uclass(&[('a', 'z')]); + let expected = uclass(&[ + ('A', 'Z'), + ('a', 'z'), + ('\u{17F}', '\u{17F}'), + ('\u{212A}', '\u{212A}'), + ]); + assert_eq!(expected, ucasefold(&cls)); + + let cls = uclass(&[('A', 'A'), ('_', '_')]); + let expected = uclass(&[('A', 'A'), ('_', '_'), ('a', 'a')]); + assert_eq!(expected, ucasefold(&cls)); + + let cls = uclass(&[('A', 'A'), ('=', '=')]); + let expected = uclass(&[('=', '='), ('A', 'A'), ('a', 'a')]); + assert_eq!(expected, ucasefold(&cls)); + + let cls = uclass(&[('\x00', '\x10')]); + assert_eq!(cls, ucasefold(&cls)); + + let cls = uclass(&[('k', 'k')]); + let expected = + uclass(&[('K', 'K'), ('k', 'k'), ('\u{212A}', '\u{212A}')]); + assert_eq!(expected, ucasefold(&cls)); + + let cls = uclass(&[('@', '@')]); + assert_eq!(cls, ucasefold(&cls)); + } + + #[test] + #[cfg(not(feature = "unicode-case"))] + fn class_case_fold_unicode_disabled() { + let mut cls = uclass(&[ + ('C', 'F'), + ('A', 'G'), + ('D', 'J'), + ('A', 'C'), + ('M', 'P'), + ('L', 'S'), + ('c', 'f'), + ]); + assert!(cls.try_case_fold_simple().is_err()); + } + + #[test] + #[should_panic] + #[cfg(not(feature = "unicode-case"))] + fn class_case_fold_unicode_disabled_panics() { + let mut cls = uclass(&[ + ('C', 'F'), + ('A', 'G'), + ('D', 'J'), + ('A', 'C'), + ('M', 'P'), + ('L', 'S'), + ('c', 'f'), + ]); + cls.case_fold_simple(); + } + + #[test] + fn class_case_fold_bytes() { + let cls = bclass(&[ + (b'C', b'F'), + (b'A', b'G'), + (b'D', b'J'), + (b'A', b'C'), + (b'M', b'P'), + (b'L', b'S'), + (b'c', b'f'), + ]); + let expected = + bclass(&[(b'A', b'J'), (b'L', b'S'), (b'a', b'j'), (b'l', b's')]); + assert_eq!(expected, bcasefold(&cls)); + + let cls = bclass(&[(b'A', b'Z')]); + let expected = bclass(&[(b'A', b'Z'), (b'a', b'z')]); + assert_eq!(expected, bcasefold(&cls)); + + let cls = bclass(&[(b'a', b'z')]); + let expected = bclass(&[(b'A', b'Z'), (b'a', b'z')]); + assert_eq!(expected, bcasefold(&cls)); + + let cls = bclass(&[(b'A', b'A'), (b'_', b'_')]); + let expected = bclass(&[(b'A', b'A'), (b'_', b'_'), (b'a', b'a')]); + assert_eq!(expected, bcasefold(&cls)); + + let cls = bclass(&[(b'A', b'A'), (b'=', b'=')]); + let expected = bclass(&[(b'=', b'='), (b'A', b'A'), (b'a', b'a')]); + assert_eq!(expected, bcasefold(&cls)); + + let cls = bclass(&[(b'\x00', b'\x10')]); + assert_eq!(cls, bcasefold(&cls)); + + let cls = bclass(&[(b'k', b'k')]); + let expected = bclass(&[(b'K', b'K'), (b'k', b'k')]); + assert_eq!(expected, bcasefold(&cls)); + + let cls = bclass(&[(b'@', b'@')]); + assert_eq!(cls, bcasefold(&cls)); + } + + #[test] + fn class_negate_unicode() { + let cls = uclass(&[('a', 'a')]); + let expected = uclass(&[('\x00', '\x60'), ('\x62', '\u{10FFFF}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('a', 'a'), ('b', 'b')]); + let expected = uclass(&[('\x00', '\x60'), ('\x63', '\u{10FFFF}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('a', 'c'), ('x', 'z')]); + let expected = uclass(&[ + ('\x00', '\x60'), + ('\x64', '\x77'), + ('\x7B', '\u{10FFFF}'), + ]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('\x00', 'a')]); + let expected = uclass(&[('\x62', '\u{10FFFF}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('a', '\u{10FFFF}')]); + let expected = uclass(&[('\x00', '\x60')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('\x00', '\u{10FFFF}')]); + let expected = uclass(&[]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[]); + let expected = uclass(&[('\x00', '\u{10FFFF}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = + uclass(&[('\x00', '\u{10FFFD}'), ('\u{10FFFF}', '\u{10FFFF}')]); + let expected = uclass(&[('\u{10FFFE}', '\u{10FFFE}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('\x00', '\u{D7FF}')]); + let expected = uclass(&[('\u{E000}', '\u{10FFFF}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('\x00', '\u{D7FE}')]); + let expected = uclass(&[('\u{D7FF}', '\u{10FFFF}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('\u{E000}', '\u{10FFFF}')]); + let expected = uclass(&[('\x00', '\u{D7FF}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('\u{E001}', '\u{10FFFF}')]); + let expected = uclass(&[('\x00', '\u{E000}')]); + assert_eq!(expected, unegate(&cls)); + } + + #[test] + fn class_negate_bytes() { + let cls = bclass(&[(b'a', b'a')]); + let expected = bclass(&[(b'\x00', b'\x60'), (b'\x62', b'\xFF')]); + assert_eq!(expected, bnegate(&cls)); + + let cls = bclass(&[(b'a', b'a'), (b'b', b'b')]); + let expected = bclass(&[(b'\x00', b'\x60'), (b'\x63', b'\xFF')]); + assert_eq!(expected, bnegate(&cls)); + + let cls = bclass(&[(b'a', b'c'), (b'x', b'z')]); + let expected = bclass(&[ + (b'\x00', b'\x60'), + (b'\x64', b'\x77'), + (b'\x7B', b'\xFF'), + ]); + assert_eq!(expected, bnegate(&cls)); + + let cls = bclass(&[(b'\x00', b'a')]); + let expected = bclass(&[(b'\x62', b'\xFF')]); + assert_eq!(expected, bnegate(&cls)); + + let cls = bclass(&[(b'a', b'\xFF')]); + let expected = bclass(&[(b'\x00', b'\x60')]); + assert_eq!(expected, bnegate(&cls)); + + let cls = bclass(&[(b'\x00', b'\xFF')]); + let expected = bclass(&[]); + assert_eq!(expected, bnegate(&cls)); + + let cls = bclass(&[]); + let expected = bclass(&[(b'\x00', b'\xFF')]); + assert_eq!(expected, bnegate(&cls)); + + let cls = bclass(&[(b'\x00', b'\xFD'), (b'\xFF', b'\xFF')]); + let expected = bclass(&[(b'\xFE', b'\xFE')]); + assert_eq!(expected, bnegate(&cls)); + } + + #[test] + fn class_union_unicode() { + let cls1 = uclass(&[('a', 'g'), ('m', 't'), ('A', 'C')]); + let cls2 = uclass(&[('a', 'z')]); + let expected = uclass(&[('a', 'z'), ('A', 'C')]); + assert_eq!(expected, uunion(&cls1, &cls2)); + } + + #[test] + fn class_union_bytes() { + let cls1 = bclass(&[(b'a', b'g'), (b'm', b't'), (b'A', b'C')]); + let cls2 = bclass(&[(b'a', b'z')]); + let expected = bclass(&[(b'a', b'z'), (b'A', b'C')]); + assert_eq!(expected, bunion(&cls1, &cls2)); + } + + #[test] + fn class_intersect_unicode() { + let cls1 = uclass(&[]); + let cls2 = uclass(&[('a', 'a')]); + let expected = uclass(&[]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'a')]); + let cls2 = uclass(&[('a', 'a')]); + let expected = uclass(&[('a', 'a')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'a')]); + let cls2 = uclass(&[('b', 'b')]); + let expected = uclass(&[]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'a')]); + let cls2 = uclass(&[('a', 'c')]); + let expected = uclass(&[('a', 'a')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b')]); + let cls2 = uclass(&[('a', 'c')]); + let expected = uclass(&[('a', 'b')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b')]); + let cls2 = uclass(&[('b', 'c')]); + let expected = uclass(&[('b', 'b')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b')]); + let cls2 = uclass(&[('c', 'd')]); + let expected = uclass(&[]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('b', 'c')]); + let cls2 = uclass(&[('a', 'd')]); + let expected = uclass(&[('b', 'c')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); + let cls2 = uclass(&[('a', 'h')]); + let expected = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); + let cls2 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); + let expected = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b'), ('g', 'h')]); + let cls2 = uclass(&[('d', 'e'), ('k', 'l')]); + let expected = uclass(&[]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); + let cls2 = uclass(&[('h', 'h')]); + let expected = uclass(&[('h', 'h')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b'), ('e', 'f'), ('i', 'j')]); + let cls2 = uclass(&[('c', 'd'), ('g', 'h'), ('k', 'l')]); + let expected = uclass(&[]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b'), ('c', 'd'), ('e', 'f')]); + let cls2 = uclass(&[('b', 'c'), ('d', 'e'), ('f', 'g')]); + let expected = uclass(&[('b', 'f')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + } + + #[test] + fn class_intersect_bytes() { + let cls1 = bclass(&[]); + let cls2 = bclass(&[(b'a', b'a')]); + let expected = bclass(&[]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'a')]); + let cls2 = bclass(&[(b'a', b'a')]); + let expected = bclass(&[(b'a', b'a')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'a')]); + let cls2 = bclass(&[(b'b', b'b')]); + let expected = bclass(&[]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'a')]); + let cls2 = bclass(&[(b'a', b'c')]); + let expected = bclass(&[(b'a', b'a')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b')]); + let cls2 = bclass(&[(b'a', b'c')]); + let expected = bclass(&[(b'a', b'b')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b')]); + let cls2 = bclass(&[(b'b', b'c')]); + let expected = bclass(&[(b'b', b'b')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b')]); + let cls2 = bclass(&[(b'c', b'd')]); + let expected = bclass(&[]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'b', b'c')]); + let cls2 = bclass(&[(b'a', b'd')]); + let expected = bclass(&[(b'b', b'c')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); + let cls2 = bclass(&[(b'a', b'h')]); + let expected = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); + let cls2 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); + let expected = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b'), (b'g', b'h')]); + let cls2 = bclass(&[(b'd', b'e'), (b'k', b'l')]); + let expected = bclass(&[]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); + let cls2 = bclass(&[(b'h', b'h')]); + let expected = bclass(&[(b'h', b'h')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b'), (b'e', b'f'), (b'i', b'j')]); + let cls2 = bclass(&[(b'c', b'd'), (b'g', b'h'), (b'k', b'l')]); + let expected = bclass(&[]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b'), (b'c', b'd'), (b'e', b'f')]); + let cls2 = bclass(&[(b'b', b'c'), (b'd', b'e'), (b'f', b'g')]); + let expected = bclass(&[(b'b', b'f')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + } + + #[test] + fn class_difference_unicode() { + let cls1 = uclass(&[('a', 'a')]); + let cls2 = uclass(&[('a', 'a')]); + let expected = uclass(&[]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'a')]); + let cls2 = uclass(&[]); + let expected = uclass(&[('a', 'a')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[]); + let cls2 = uclass(&[('a', 'a')]); + let expected = uclass(&[]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'z')]); + let cls2 = uclass(&[('a', 'a')]); + let expected = uclass(&[('b', 'z')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'z')]); + let cls2 = uclass(&[('z', 'z')]); + let expected = uclass(&[('a', 'y')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'z')]); + let cls2 = uclass(&[('m', 'm')]); + let expected = uclass(&[('a', 'l'), ('n', 'z')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); + let cls2 = uclass(&[('a', 'z')]); + let expected = uclass(&[]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); + let cls2 = uclass(&[('d', 'v')]); + let expected = uclass(&[('a', 'c')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); + let cls2 = uclass(&[('b', 'g'), ('s', 'u')]); + let expected = uclass(&[('a', 'a'), ('h', 'i'), ('r', 'r')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); + let cls2 = uclass(&[('b', 'd'), ('e', 'g'), ('s', 'u')]); + let expected = uclass(&[('a', 'a'), ('h', 'i'), ('r', 'r')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('x', 'z')]); + let cls2 = uclass(&[('a', 'c'), ('e', 'g'), ('s', 'u')]); + let expected = uclass(&[('x', 'z')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'z')]); + let cls2 = uclass(&[('a', 'c'), ('e', 'g'), ('s', 'u')]); + let expected = uclass(&[('d', 'd'), ('h', 'r'), ('v', 'z')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + } + + #[test] + fn class_difference_bytes() { + let cls1 = bclass(&[(b'a', b'a')]); + let cls2 = bclass(&[(b'a', b'a')]); + let expected = bclass(&[]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'a')]); + let cls2 = bclass(&[]); + let expected = bclass(&[(b'a', b'a')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[]); + let cls2 = bclass(&[(b'a', b'a')]); + let expected = bclass(&[]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'z')]); + let cls2 = bclass(&[(b'a', b'a')]); + let expected = bclass(&[(b'b', b'z')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'z')]); + let cls2 = bclass(&[(b'z', b'z')]); + let expected = bclass(&[(b'a', b'y')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'z')]); + let cls2 = bclass(&[(b'm', b'm')]); + let expected = bclass(&[(b'a', b'l'), (b'n', b'z')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); + let cls2 = bclass(&[(b'a', b'z')]); + let expected = bclass(&[]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); + let cls2 = bclass(&[(b'd', b'v')]); + let expected = bclass(&[(b'a', b'c')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); + let cls2 = bclass(&[(b'b', b'g'), (b's', b'u')]); + let expected = bclass(&[(b'a', b'a'), (b'h', b'i'), (b'r', b'r')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); + let cls2 = bclass(&[(b'b', b'd'), (b'e', b'g'), (b's', b'u')]); + let expected = bclass(&[(b'a', b'a'), (b'h', b'i'), (b'r', b'r')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'x', b'z')]); + let cls2 = bclass(&[(b'a', b'c'), (b'e', b'g'), (b's', b'u')]); + let expected = bclass(&[(b'x', b'z')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'z')]); + let cls2 = bclass(&[(b'a', b'c'), (b'e', b'g'), (b's', b'u')]); + let expected = bclass(&[(b'd', b'd'), (b'h', b'r'), (b'v', b'z')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + } + + #[test] + fn class_symmetric_difference_unicode() { + let cls1 = uclass(&[('a', 'm')]); + let cls2 = uclass(&[('g', 't')]); + let expected = uclass(&[('a', 'f'), ('n', 't')]); + assert_eq!(expected, usymdifference(&cls1, &cls2)); + } + + #[test] + fn class_symmetric_difference_bytes() { + let cls1 = bclass(&[(b'a', b'm')]); + let cls2 = bclass(&[(b'g', b't')]); + let expected = bclass(&[(b'a', b'f'), (b'n', b't')]); + assert_eq!(expected, bsymdifference(&cls1, &cls2)); + } + + // We use a thread with an explicit stack size to test that our destructor + // for Hir can handle arbitrarily sized expressions in constant stack + // space. In case we run on a platform without threads (WASM?), we limit + // this test to Windows/Unix. + #[test] + #[cfg(any(unix, windows))] + fn no_stack_overflow_on_drop() { + use std::thread; + + let run = || { + let mut expr = Hir::empty(); + for _ in 0..100 { + expr = Hir::capture(Capture { + index: 1, + name: None, + sub: Box::new(expr), + }); + expr = Hir::repetition(Repetition { + min: 0, + max: Some(1), + greedy: true, + sub: Box::new(expr), + }); + + expr = Hir { + kind: HirKind::Concat(vec![expr]), + props: Properties::empty(), + }; + expr = Hir { + kind: HirKind::Alternation(vec![expr]), + props: Properties::empty(), + }; + } + assert!(!matches!(*expr.kind(), HirKind::Empty)); + }; + + // We run our test on a thread with a small stack size so we can + // force the issue more easily. + // + // NOTE(2023-03-21): See the corresponding test in 'crate::ast::tests' + // for context on the specific stack size chosen here. + thread::Builder::new() + .stack_size(16 << 10) + .spawn(run) + .unwrap() + .join() + .unwrap(); + } + + #[test] + fn look_set_iter() { + let set = LookSet::empty(); + assert_eq!(0, set.iter().count()); + + let set = LookSet::full(); + assert_eq!(18, set.iter().count()); + + let set = + LookSet::empty().insert(Look::StartLF).insert(Look::WordUnicode); + assert_eq!(2, set.iter().count()); + + let set = LookSet::empty().insert(Look::StartLF); + assert_eq!(1, set.iter().count()); + + let set = LookSet::empty().insert(Look::WordAsciiNegate); + assert_eq!(1, set.iter().count()); + } + + #[test] + fn look_set_debug() { + let res = format!("{:?}", LookSet::empty()); + assert_eq!("∅", res); + let res = format!("{:?}", LookSet::full()); + assert_eq!("Az^$rRbB𝛃𝚩<>〈〉◁▷◀▶", res); + } +} diff --git a/vendor/regex-syntax/src/hir/print.rs b/vendor/regex-syntax/src/hir/print.rs new file mode 100644 index 00000000000000..89db08c25bfaf8 --- /dev/null +++ b/vendor/regex-syntax/src/hir/print.rs @@ -0,0 +1,608 @@ +/*! +This module provides a regular expression printer for `Hir`. +*/ + +use core::fmt; + +use crate::{ + hir::{ + self, + visitor::{self, Visitor}, + Hir, HirKind, + }, + is_meta_character, +}; + +/// A builder for constructing a printer. +/// +/// Note that since a printer doesn't have any configuration knobs, this type +/// remains unexported. +#[derive(Clone, Debug)] +struct PrinterBuilder { + _priv: (), +} + +impl Default for PrinterBuilder { + fn default() -> PrinterBuilder { + PrinterBuilder::new() + } +} + +impl PrinterBuilder { + fn new() -> PrinterBuilder { + PrinterBuilder { _priv: () } + } + + fn build(&self) -> Printer { + Printer { _priv: () } + } +} + +/// A printer for a regular expression's high-level intermediate +/// representation. +/// +/// A printer converts a high-level intermediate representation (HIR) to a +/// regular expression pattern string. This particular printer uses constant +/// stack space and heap space proportional to the size of the HIR. +/// +/// Since this printer is only using the HIR, the pattern it prints will likely +/// not resemble the original pattern at all. For example, a pattern like +/// `\pL` will have its entire class written out. +/// +/// The purpose of this printer is to provide a means to mutate an HIR and then +/// build a regular expression from the result of that mutation. (A regex +/// library could provide a constructor from this HIR explicitly, but that +/// creates an unnecessary public coupling between the regex library and this +/// specific HIR representation.) +#[derive(Debug)] +pub struct Printer { + _priv: (), +} + +impl Printer { + /// Create a new printer. + pub fn new() -> Printer { + PrinterBuilder::new().build() + } + + /// Print the given `Ast` to the given writer. The writer must implement + /// `fmt::Write`. Typical implementations of `fmt::Write` that can be used + /// here are a `fmt::Formatter` (which is available in `fmt::Display` + /// implementations) or a `&mut String`. + pub fn print(&mut self, hir: &Hir, wtr: W) -> fmt::Result { + visitor::visit(hir, Writer { wtr }) + } +} + +#[derive(Debug)] +struct Writer { + wtr: W, +} + +impl Visitor for Writer { + type Output = (); + type Err = fmt::Error; + + fn finish(self) -> fmt::Result { + Ok(()) + } + + fn visit_pre(&mut self, hir: &Hir) -> fmt::Result { + match *hir.kind() { + HirKind::Empty => { + // Technically an empty sub-expression could be "printed" by + // just ignoring it, but in practice, you could have a + // repetition operator attached to an empty expression, and you + // really need something in the concrete syntax to make that + // work as you'd expect. + self.wtr.write_str(r"(?:)")?; + } + // Repetition operators are strictly suffix oriented. + HirKind::Repetition(_) => {} + HirKind::Literal(hir::Literal(ref bytes)) => { + // See the comment on the 'Concat' and 'Alternation' case below + // for why we put parens here. Literals are, conceptually, + // a special case of concatenation where each element is a + // character. The HIR flattens this into a Box<[u8]>, but we + // still need to treat it like a concatenation for correct + // printing. As a special case, we don't write parens if there + // is only one character. One character means there is no + // concat so we don't need parens. Adding parens would still be + // correct, but we drop them here because it tends to create + // rather noisy regexes even in simple cases. + let result = core::str::from_utf8(bytes); + let len = result.map_or(bytes.len(), |s| s.chars().count()); + if len > 1 { + self.wtr.write_str(r"(?:")?; + } + match result { + Ok(string) => { + for c in string.chars() { + self.write_literal_char(c)?; + } + } + Err(_) => { + for &b in bytes.iter() { + self.write_literal_byte(b)?; + } + } + } + if len > 1 { + self.wtr.write_str(r")")?; + } + } + HirKind::Class(hir::Class::Unicode(ref cls)) => { + if cls.ranges().is_empty() { + return self.wtr.write_str("[a&&b]"); + } + self.wtr.write_str("[")?; + for range in cls.iter() { + if range.start() == range.end() { + self.write_literal_char(range.start())?; + } else if u32::from(range.start()) + 1 + == u32::from(range.end()) + { + self.write_literal_char(range.start())?; + self.write_literal_char(range.end())?; + } else { + self.write_literal_char(range.start())?; + self.wtr.write_str("-")?; + self.write_literal_char(range.end())?; + } + } + self.wtr.write_str("]")?; + } + HirKind::Class(hir::Class::Bytes(ref cls)) => { + if cls.ranges().is_empty() { + return self.wtr.write_str("[a&&b]"); + } + self.wtr.write_str("(?-u:[")?; + for range in cls.iter() { + if range.start() == range.end() { + self.write_literal_class_byte(range.start())?; + } else if range.start() + 1 == range.end() { + self.write_literal_class_byte(range.start())?; + self.write_literal_class_byte(range.end())?; + } else { + self.write_literal_class_byte(range.start())?; + self.wtr.write_str("-")?; + self.write_literal_class_byte(range.end())?; + } + } + self.wtr.write_str("])")?; + } + HirKind::Look(ref look) => match *look { + hir::Look::Start => { + self.wtr.write_str(r"\A")?; + } + hir::Look::End => { + self.wtr.write_str(r"\z")?; + } + hir::Look::StartLF => { + self.wtr.write_str("(?m:^)")?; + } + hir::Look::EndLF => { + self.wtr.write_str("(?m:$)")?; + } + hir::Look::StartCRLF => { + self.wtr.write_str("(?mR:^)")?; + } + hir::Look::EndCRLF => { + self.wtr.write_str("(?mR:$)")?; + } + hir::Look::WordAscii => { + self.wtr.write_str(r"(?-u:\b)")?; + } + hir::Look::WordAsciiNegate => { + self.wtr.write_str(r"(?-u:\B)")?; + } + hir::Look::WordUnicode => { + self.wtr.write_str(r"\b")?; + } + hir::Look::WordUnicodeNegate => { + self.wtr.write_str(r"\B")?; + } + hir::Look::WordStartAscii => { + self.wtr.write_str(r"(?-u:\b{start})")?; + } + hir::Look::WordEndAscii => { + self.wtr.write_str(r"(?-u:\b{end})")?; + } + hir::Look::WordStartUnicode => { + self.wtr.write_str(r"\b{start}")?; + } + hir::Look::WordEndUnicode => { + self.wtr.write_str(r"\b{end}")?; + } + hir::Look::WordStartHalfAscii => { + self.wtr.write_str(r"(?-u:\b{start-half})")?; + } + hir::Look::WordEndHalfAscii => { + self.wtr.write_str(r"(?-u:\b{end-half})")?; + } + hir::Look::WordStartHalfUnicode => { + self.wtr.write_str(r"\b{start-half}")?; + } + hir::Look::WordEndHalfUnicode => { + self.wtr.write_str(r"\b{end-half}")?; + } + }, + HirKind::Capture(hir::Capture { ref name, .. }) => { + self.wtr.write_str("(")?; + if let Some(ref name) = *name { + write!(self.wtr, "?P<{name}>")?; + } + } + // Why do this? Wrapping concats and alts in non-capturing groups + // is not *always* necessary, but is sometimes necessary. For + // example, 'concat(a, alt(b, c))' should be written as 'a(?:b|c)' + // and not 'ab|c'. The former is clearly the intended meaning, but + // the latter is actually 'alt(concat(a, b), c)'. + // + // It would be possible to only group these things in cases where + // it's strictly necessary, but it requires knowing the parent + // expression. And since this technique is simpler and always + // correct, we take this route. More to the point, it is a non-goal + // of an HIR printer to show a nice easy-to-read regex. Indeed, + // its construction forbids it from doing so. Therefore, inserting + // extra groups where they aren't necessary is perfectly okay. + HirKind::Concat(_) | HirKind::Alternation(_) => { + self.wtr.write_str(r"(?:")?; + } + } + Ok(()) + } + + fn visit_post(&mut self, hir: &Hir) -> fmt::Result { + match *hir.kind() { + // Handled during visit_pre + HirKind::Empty + | HirKind::Literal(_) + | HirKind::Class(_) + | HirKind::Look(_) => {} + HirKind::Repetition(ref x) => { + match (x.min, x.max) { + (0, Some(1)) => { + self.wtr.write_str("?")?; + } + (0, None) => { + self.wtr.write_str("*")?; + } + (1, None) => { + self.wtr.write_str("+")?; + } + (1, Some(1)) => { + // 'a{1}' and 'a{1}?' are exactly equivalent to 'a'. + return Ok(()); + } + (m, None) => { + write!(self.wtr, "{{{m},}}")?; + } + (m, Some(n)) if m == n => { + write!(self.wtr, "{{{m}}}")?; + // a{m} and a{m}? are always exactly equivalent. + return Ok(()); + } + (m, Some(n)) => { + write!(self.wtr, "{{{m},{n}}}")?; + } + } + if !x.greedy { + self.wtr.write_str("?")?; + } + } + HirKind::Capture(_) + | HirKind::Concat(_) + | HirKind::Alternation(_) => { + self.wtr.write_str(r")")?; + } + } + Ok(()) + } + + fn visit_alternation_in(&mut self) -> fmt::Result { + self.wtr.write_str("|") + } +} + +impl Writer { + fn write_literal_char(&mut self, c: char) -> fmt::Result { + if is_meta_character(c) { + self.wtr.write_str("\\")?; + } + self.wtr.write_char(c) + } + + fn write_literal_byte(&mut self, b: u8) -> fmt::Result { + if b <= 0x7F && !b.is_ascii_control() && !b.is_ascii_whitespace() { + self.write_literal_char(char::try_from(b).unwrap()) + } else { + write!(self.wtr, "(?-u:\\x{b:02X})") + } + } + + fn write_literal_class_byte(&mut self, b: u8) -> fmt::Result { + if b <= 0x7F && !b.is_ascii_control() && !b.is_ascii_whitespace() { + self.write_literal_char(char::try_from(b).unwrap()) + } else { + write!(self.wtr, "\\x{b:02X}") + } + } +} + +#[cfg(test)] +mod tests { + use alloc::{ + boxed::Box, + string::{String, ToString}, + }; + + use crate::ParserBuilder; + + use super::*; + + fn roundtrip(given: &str, expected: &str) { + roundtrip_with(|b| b, given, expected); + } + + fn roundtrip_bytes(given: &str, expected: &str) { + roundtrip_with(|b| b.utf8(false), given, expected); + } + + fn roundtrip_with(mut f: F, given: &str, expected: &str) + where + F: FnMut(&mut ParserBuilder) -> &mut ParserBuilder, + { + let mut builder = ParserBuilder::new(); + f(&mut builder); + let hir = builder.build().parse(given).unwrap(); + + let mut printer = Printer::new(); + let mut dst = String::new(); + printer.print(&hir, &mut dst).unwrap(); + + // Check that the result is actually valid. + builder.build().parse(&dst).unwrap(); + + assert_eq!(expected, dst); + } + + #[test] + fn print_literal() { + roundtrip("a", "a"); + roundtrip(r"\xff", "\u{FF}"); + roundtrip_bytes(r"\xff", "\u{FF}"); + roundtrip_bytes(r"(?-u)\xff", r"(?-u:\xFF)"); + roundtrip("☃", "☃"); + } + + #[test] + fn print_class() { + roundtrip(r"[a]", r"a"); + roundtrip(r"[ab]", r"[ab]"); + roundtrip(r"[a-z]", r"[a-z]"); + roundtrip(r"[a-z--b-c--x-y]", r"[ad-wz]"); + roundtrip(r"[^\x01-\u{10FFFF}]", "\u{0}"); + roundtrip(r"[-]", r"\-"); + roundtrip(r"[☃-⛄]", r"[☃-⛄]"); + + roundtrip(r"(?-u)[a]", r"a"); + roundtrip(r"(?-u)[ab]", r"(?-u:[ab])"); + roundtrip(r"(?-u)[a-z]", r"(?-u:[a-z])"); + roundtrip_bytes(r"(?-u)[a-\xFF]", r"(?-u:[a-\xFF])"); + + // The following test that the printer escapes meta characters + // in character classes. + roundtrip(r"[\[]", r"\["); + roundtrip(r"[Z-_]", r"[Z-_]"); + roundtrip(r"[Z-_--Z]", r"[\[-_]"); + + // The following test that the printer escapes meta characters + // in byte oriented character classes. + roundtrip_bytes(r"(?-u)[\[]", r"\["); + roundtrip_bytes(r"(?-u)[Z-_]", r"(?-u:[Z-_])"); + roundtrip_bytes(r"(?-u)[Z-_--Z]", r"(?-u:[\[-_])"); + + // This tests that an empty character class is correctly roundtripped. + #[cfg(feature = "unicode-gencat")] + roundtrip(r"\P{any}", r"[a&&b]"); + roundtrip_bytes(r"(?-u)[^\x00-\xFF]", r"[a&&b]"); + } + + #[test] + fn print_anchor() { + roundtrip(r"^", r"\A"); + roundtrip(r"$", r"\z"); + roundtrip(r"(?m)^", r"(?m:^)"); + roundtrip(r"(?m)$", r"(?m:$)"); + } + + #[test] + fn print_word_boundary() { + roundtrip(r"\b", r"\b"); + roundtrip(r"\B", r"\B"); + roundtrip(r"(?-u)\b", r"(?-u:\b)"); + roundtrip_bytes(r"(?-u)\B", r"(?-u:\B)"); + } + + #[test] + fn print_repetition() { + roundtrip("a?", "a?"); + roundtrip("a??", "a??"); + roundtrip("(?U)a?", "a??"); + + roundtrip("a*", "a*"); + roundtrip("a*?", "a*?"); + roundtrip("(?U)a*", "a*?"); + + roundtrip("a+", "a+"); + roundtrip("a+?", "a+?"); + roundtrip("(?U)a+", "a+?"); + + roundtrip("a{1}", "a"); + roundtrip("a{2}", "a{2}"); + roundtrip("a{1,}", "a+"); + roundtrip("a{1,5}", "a{1,5}"); + roundtrip("a{1}?", "a"); + roundtrip("a{2}?", "a{2}"); + roundtrip("a{1,}?", "a+?"); + roundtrip("a{1,5}?", "a{1,5}?"); + roundtrip("(?U)a{1}", "a"); + roundtrip("(?U)a{2}", "a{2}"); + roundtrip("(?U)a{1,}", "a+?"); + roundtrip("(?U)a{1,5}", "a{1,5}?"); + + // Test that various zero-length repetitions always translate to an + // empty regex. This is more a property of HIR's smart constructors + // than the printer though. + roundtrip("a{0}", "(?:)"); + roundtrip("(?:ab){0}", "(?:)"); + #[cfg(feature = "unicode-gencat")] + { + roundtrip(r"\p{any}{0}", "(?:)"); + roundtrip(r"\P{any}{0}", "(?:)"); + } + } + + #[test] + fn print_group() { + roundtrip("()", "((?:))"); + roundtrip("(?P)", "(?P(?:))"); + roundtrip("(?:)", "(?:)"); + + roundtrip("(a)", "(a)"); + roundtrip("(?Pa)", "(?Pa)"); + roundtrip("(?:a)", "a"); + + roundtrip("((((a))))", "((((a))))"); + } + + #[test] + fn print_alternation() { + roundtrip("|", "(?:(?:)|(?:))"); + roundtrip("||", "(?:(?:)|(?:)|(?:))"); + + roundtrip("a|b", "[ab]"); + roundtrip("ab|cd", "(?:(?:ab)|(?:cd))"); + roundtrip("a|b|c", "[a-c]"); + roundtrip("ab|cd|ef", "(?:(?:ab)|(?:cd)|(?:ef))"); + roundtrip("foo|bar|quux", "(?:(?:foo)|(?:bar)|(?:quux))"); + } + + // This is a regression test that stresses a peculiarity of how the HIR + // is both constructed and printed. Namely, it is legal for a repetition + // to directly contain a concatenation. This particular construct isn't + // really possible to build from the concrete syntax directly, since you'd + // be forced to put the concatenation into (at least) a non-capturing + // group. Concurrently, the printer doesn't consider this case and just + // kind of naively prints the child expression and tacks on the repetition + // operator. + // + // As a result, if you attached '+' to a 'concat(a, b)', the printer gives + // you 'ab+', but clearly it really should be '(?:ab)+'. + // + // This bug isn't easy to surface because most ways of building an HIR + // come directly from the concrete syntax, and as mentioned above, it just + // isn't possible to build this kind of HIR from the concrete syntax. + // Nevertheless, this is definitely a bug. + // + // See: https://github.com/rust-lang/regex/issues/731 + #[test] + fn regression_repetition_concat() { + let expr = Hir::concat(alloc::vec![ + Hir::literal("x".as_bytes()), + Hir::repetition(hir::Repetition { + min: 1, + max: None, + greedy: true, + sub: Box::new(Hir::literal("ab".as_bytes())), + }), + Hir::literal("y".as_bytes()), + ]); + assert_eq!(r"(?:x(?:ab)+y)", expr.to_string()); + + let expr = Hir::concat(alloc::vec![ + Hir::look(hir::Look::Start), + Hir::repetition(hir::Repetition { + min: 1, + max: None, + greedy: true, + sub: Box::new(Hir::concat(alloc::vec![ + Hir::look(hir::Look::Start), + Hir::look(hir::Look::End), + ])), + }), + Hir::look(hir::Look::End), + ]); + assert_eq!(r"(?:\A\A\z\z)", expr.to_string()); + } + + // Just like regression_repetition_concat, but with the repetition using + // an alternation as a child expression instead. + // + // See: https://github.com/rust-lang/regex/issues/731 + #[test] + fn regression_repetition_alternation() { + let expr = Hir::concat(alloc::vec![ + Hir::literal("ab".as_bytes()), + Hir::repetition(hir::Repetition { + min: 1, + max: None, + greedy: true, + sub: Box::new(Hir::alternation(alloc::vec![ + Hir::literal("cd".as_bytes()), + Hir::literal("ef".as_bytes()), + ])), + }), + Hir::literal("gh".as_bytes()), + ]); + assert_eq!(r"(?:(?:ab)(?:(?:cd)|(?:ef))+(?:gh))", expr.to_string()); + + let expr = Hir::concat(alloc::vec![ + Hir::look(hir::Look::Start), + Hir::repetition(hir::Repetition { + min: 1, + max: None, + greedy: true, + sub: Box::new(Hir::alternation(alloc::vec![ + Hir::look(hir::Look::Start), + Hir::look(hir::Look::End), + ])), + }), + Hir::look(hir::Look::End), + ]); + assert_eq!(r"(?:\A(?:\A|\z)\z)", expr.to_string()); + } + + // This regression test is very similar in flavor to + // regression_repetition_concat in that the root of the issue lies in a + // peculiarity of how the HIR is represented and how the printer writes it + // out. Like the other regression, this one is also rooted in the fact that + // you can't produce the peculiar HIR from the concrete syntax. Namely, you + // just can't have a 'concat(a, alt(b, c))' because the 'alt' will normally + // be in (at least) a non-capturing group. Why? Because the '|' has very + // low precedence (lower that concatenation), and so something like 'ab|c' + // is actually 'alt(ab, c)'. + // + // See: https://github.com/rust-lang/regex/issues/516 + #[test] + fn regression_alternation_concat() { + let expr = Hir::concat(alloc::vec![ + Hir::literal("ab".as_bytes()), + Hir::alternation(alloc::vec![ + Hir::literal("mn".as_bytes()), + Hir::literal("xy".as_bytes()), + ]), + ]); + assert_eq!(r"(?:(?:ab)(?:(?:mn)|(?:xy)))", expr.to_string()); + + let expr = Hir::concat(alloc::vec![ + Hir::look(hir::Look::Start), + Hir::alternation(alloc::vec![ + Hir::look(hir::Look::Start), + Hir::look(hir::Look::End), + ]), + ]); + assert_eq!(r"(?:\A(?:\A|\z))", expr.to_string()); + } +} diff --git a/vendor/regex-syntax/src/hir/translate.rs b/vendor/regex-syntax/src/hir/translate.rs new file mode 100644 index 00000000000000..48469f9e1615d0 --- /dev/null +++ b/vendor/regex-syntax/src/hir/translate.rs @@ -0,0 +1,3740 @@ +/*! +Defines a translator that converts an `Ast` to an `Hir`. +*/ + +use core::cell::{Cell, RefCell}; + +use alloc::{boxed::Box, string::ToString, vec, vec::Vec}; + +use crate::{ + ast::{self, Ast, Span, Visitor}, + either::Either, + hir::{self, Error, ErrorKind, Hir, HirKind}, + unicode::{self, ClassQuery}, +}; + +type Result = core::result::Result; + +/// A builder for constructing an AST->HIR translator. +#[derive(Clone, Debug)] +pub struct TranslatorBuilder { + utf8: bool, + line_terminator: u8, + flags: Flags, +} + +impl Default for TranslatorBuilder { + fn default() -> TranslatorBuilder { + TranslatorBuilder::new() + } +} + +impl TranslatorBuilder { + /// Create a new translator builder with a default configuration. + pub fn new() -> TranslatorBuilder { + TranslatorBuilder { + utf8: true, + line_terminator: b'\n', + flags: Flags::default(), + } + } + + /// Build a translator using the current configuration. + pub fn build(&self) -> Translator { + Translator { + stack: RefCell::new(vec![]), + flags: Cell::new(self.flags), + utf8: self.utf8, + line_terminator: self.line_terminator, + } + } + + /// When disabled, translation will permit the construction of a regular + /// expression that may match invalid UTF-8. + /// + /// When enabled (the default), the translator is guaranteed to produce an + /// expression that, for non-empty matches, will only ever produce spans + /// that are entirely valid UTF-8 (otherwise, the translator will return an + /// error). + /// + /// Perhaps surprisingly, when UTF-8 is enabled, an empty regex or even + /// a negated ASCII word boundary (uttered as `(?-u:\B)` in the concrete + /// syntax) will be allowed even though they can produce matches that split + /// a UTF-8 encoded codepoint. This only applies to zero-width or "empty" + /// matches, and it is expected that the regex engine itself must handle + /// these cases if necessary (perhaps by suppressing any zero-width matches + /// that split a codepoint). + pub fn utf8(&mut self, yes: bool) -> &mut TranslatorBuilder { + self.utf8 = yes; + self + } + + /// Sets the line terminator for use with `(?u-s:.)` and `(?-us:.)`. + /// + /// Namely, instead of `.` (by default) matching everything except for `\n`, + /// this will cause `.` to match everything except for the byte given. + /// + /// If `.` is used in a context where Unicode mode is enabled and this byte + /// isn't ASCII, then an error will be returned. When Unicode mode is + /// disabled, then any byte is permitted, but will return an error if UTF-8 + /// mode is enabled and it is a non-ASCII byte. + /// + /// In short, any ASCII value for a line terminator is always okay. But a + /// non-ASCII byte might result in an error depending on whether Unicode + /// mode or UTF-8 mode are enabled. + /// + /// Note that if `R` mode is enabled then it always takes precedence and + /// the line terminator will be treated as `\r` and `\n` simultaneously. + /// + /// Note also that this *doesn't* impact the look-around assertions + /// `(?m:^)` and `(?m:$)`. That's usually controlled by additional + /// configuration in the regex engine itself. + pub fn line_terminator(&mut self, byte: u8) -> &mut TranslatorBuilder { + self.line_terminator = byte; + self + } + + /// Enable or disable the case insensitive flag (`i`) by default. + pub fn case_insensitive(&mut self, yes: bool) -> &mut TranslatorBuilder { + self.flags.case_insensitive = if yes { Some(true) } else { None }; + self + } + + /// Enable or disable the multi-line matching flag (`m`) by default. + pub fn multi_line(&mut self, yes: bool) -> &mut TranslatorBuilder { + self.flags.multi_line = if yes { Some(true) } else { None }; + self + } + + /// Enable or disable the "dot matches any character" flag (`s`) by + /// default. + pub fn dot_matches_new_line( + &mut self, + yes: bool, + ) -> &mut TranslatorBuilder { + self.flags.dot_matches_new_line = if yes { Some(true) } else { None }; + self + } + + /// Enable or disable the CRLF mode flag (`R`) by default. + pub fn crlf(&mut self, yes: bool) -> &mut TranslatorBuilder { + self.flags.crlf = if yes { Some(true) } else { None }; + self + } + + /// Enable or disable the "swap greed" flag (`U`) by default. + pub fn swap_greed(&mut self, yes: bool) -> &mut TranslatorBuilder { + self.flags.swap_greed = if yes { Some(true) } else { None }; + self + } + + /// Enable or disable the Unicode flag (`u`) by default. + pub fn unicode(&mut self, yes: bool) -> &mut TranslatorBuilder { + self.flags.unicode = if yes { None } else { Some(false) }; + self + } +} + +/// A translator maps abstract syntax to a high level intermediate +/// representation. +/// +/// A translator may be benefit from reuse. That is, a translator can translate +/// many abstract syntax trees. +/// +/// A `Translator` can be configured in more detail via a +/// [`TranslatorBuilder`]. +#[derive(Clone, Debug)] +pub struct Translator { + /// Our call stack, but on the heap. + stack: RefCell>, + /// The current flag settings. + flags: Cell, + /// Whether we're allowed to produce HIR that can match arbitrary bytes. + utf8: bool, + /// The line terminator to use for `.`. + line_terminator: u8, +} + +impl Translator { + /// Create a new translator using the default configuration. + pub fn new() -> Translator { + TranslatorBuilder::new().build() + } + + /// Translate the given abstract syntax tree (AST) into a high level + /// intermediate representation (HIR). + /// + /// If there was a problem doing the translation, then an HIR-specific + /// error is returned. + /// + /// The original pattern string used to produce the `Ast` *must* also be + /// provided. The translator does not use the pattern string during any + /// correct translation, but is used for error reporting. + pub fn translate(&mut self, pattern: &str, ast: &Ast) -> Result { + ast::visit(ast, TranslatorI::new(self, pattern)) + } +} + +/// An HirFrame is a single stack frame, represented explicitly, which is +/// created for each item in the Ast that we traverse. +/// +/// Note that technically, this type doesn't represent our entire stack +/// frame. In particular, the Ast visitor represents any state associated with +/// traversing the Ast itself. +#[derive(Clone, Debug)] +enum HirFrame { + /// An arbitrary HIR expression. These get pushed whenever we hit a base + /// case in the Ast. They get popped after an inductive (i.e., recursive) + /// step is complete. + Expr(Hir), + /// A literal that is being constructed, character by character, from the + /// AST. We need this because the AST gives each individual character its + /// own node. So as we see characters, we peek at the top-most HirFrame. + /// If it's a literal, then we add to it. Otherwise, we push a new literal. + /// When it comes time to pop it, we convert it to an Hir via Hir::literal. + Literal(Vec), + /// A Unicode character class. This frame is mutated as we descend into + /// the Ast of a character class (which is itself its own mini recursive + /// structure). + ClassUnicode(hir::ClassUnicode), + /// A byte-oriented character class. This frame is mutated as we descend + /// into the Ast of a character class (which is itself its own mini + /// recursive structure). + /// + /// Byte character classes are created when Unicode mode (`u`) is disabled. + /// If `utf8` is enabled (the default), then a byte character is only + /// permitted to match ASCII text. + ClassBytes(hir::ClassBytes), + /// This is pushed whenever a repetition is observed. After visiting every + /// sub-expression in the repetition, the translator's stack is expected to + /// have this sentinel at the top. + /// + /// This sentinel only exists to stop other things (like flattening + /// literals) from reaching across repetition operators. + Repetition, + /// This is pushed on to the stack upon first seeing any kind of capture, + /// indicated by parentheses (including non-capturing groups). It is popped + /// upon leaving a group. + Group { + /// The old active flags when this group was opened. + /// + /// If this group sets flags, then the new active flags are set to the + /// result of merging the old flags with the flags introduced by this + /// group. If the group doesn't set any flags, then this is simply + /// equivalent to whatever flags were set when the group was opened. + /// + /// When this group is popped, the active flags should be restored to + /// the flags set here. + /// + /// The "active" flags correspond to whatever flags are set in the + /// Translator. + old_flags: Flags, + }, + /// This is pushed whenever a concatenation is observed. After visiting + /// every sub-expression in the concatenation, the translator's stack is + /// popped until it sees a Concat frame. + Concat, + /// This is pushed whenever an alternation is observed. After visiting + /// every sub-expression in the alternation, the translator's stack is + /// popped until it sees an Alternation frame. + Alternation, + /// This is pushed immediately before each sub-expression in an + /// alternation. This separates the branches of an alternation on the + /// stack and prevents literal flattening from reaching across alternation + /// branches. + /// + /// It is popped after each expression in a branch until an 'Alternation' + /// frame is observed when doing a post visit on an alternation. + AlternationBranch, +} + +impl HirFrame { + /// Assert that the current stack frame is an Hir expression and return it. + fn unwrap_expr(self) -> Hir { + match self { + HirFrame::Expr(expr) => expr, + HirFrame::Literal(lit) => Hir::literal(lit), + _ => panic!("tried to unwrap expr from HirFrame, got: {self:?}"), + } + } + + /// Assert that the current stack frame is a Unicode class expression and + /// return it. + fn unwrap_class_unicode(self) -> hir::ClassUnicode { + match self { + HirFrame::ClassUnicode(cls) => cls, + _ => panic!( + "tried to unwrap Unicode class \ + from HirFrame, got: {:?}", + self + ), + } + } + + /// Assert that the current stack frame is a byte class expression and + /// return it. + fn unwrap_class_bytes(self) -> hir::ClassBytes { + match self { + HirFrame::ClassBytes(cls) => cls, + _ => panic!( + "tried to unwrap byte class \ + from HirFrame, got: {:?}", + self + ), + } + } + + /// Assert that the current stack frame is a repetition sentinel. If it + /// isn't, then panic. + fn unwrap_repetition(self) { + match self { + HirFrame::Repetition => {} + _ => { + panic!( + "tried to unwrap repetition from HirFrame, got: {self:?}" + ) + } + } + } + + /// Assert that the current stack frame is a group indicator and return + /// its corresponding flags (the flags that were active at the time the + /// group was entered). + fn unwrap_group(self) -> Flags { + match self { + HirFrame::Group { old_flags } => old_flags, + _ => { + panic!("tried to unwrap group from HirFrame, got: {self:?}") + } + } + } + + /// Assert that the current stack frame is an alternation pipe sentinel. If + /// it isn't, then panic. + fn unwrap_alternation_pipe(self) { + match self { + HirFrame::AlternationBranch => {} + _ => { + panic!("tried to unwrap alt pipe from HirFrame, got: {self:?}") + } + } + } +} + +impl<'t, 'p> Visitor for TranslatorI<'t, 'p> { + type Output = Hir; + type Err = Error; + + fn finish(self) -> Result { + // ... otherwise, we should have exactly one HIR on the stack. + assert_eq!(self.trans().stack.borrow().len(), 1); + Ok(self.pop().unwrap().unwrap_expr()) + } + + fn visit_pre(&mut self, ast: &Ast) -> Result<()> { + match *ast { + Ast::ClassBracketed(_) => { + if self.flags().unicode() { + let cls = hir::ClassUnicode::empty(); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let cls = hir::ClassBytes::empty(); + self.push(HirFrame::ClassBytes(cls)); + } + } + Ast::Repetition(_) => self.push(HirFrame::Repetition), + Ast::Group(ref x) => { + let old_flags = x + .flags() + .map(|ast| self.set_flags(ast)) + .unwrap_or_else(|| self.flags()); + self.push(HirFrame::Group { old_flags }); + } + Ast::Concat(_) => { + self.push(HirFrame::Concat); + } + Ast::Alternation(ref x) => { + self.push(HirFrame::Alternation); + if !x.asts.is_empty() { + self.push(HirFrame::AlternationBranch); + } + } + _ => {} + } + Ok(()) + } + + fn visit_post(&mut self, ast: &Ast) -> Result<()> { + match *ast { + Ast::Empty(_) => { + self.push(HirFrame::Expr(Hir::empty())); + } + Ast::Flags(ref x) => { + self.set_flags(&x.flags); + // Flags in the AST are generally considered directives and + // not actual sub-expressions. However, they can be used in + // the concrete syntax like `((?i))`, and we need some kind of + // indication of an expression there, and Empty is the correct + // choice. + // + // There can also be things like `(?i)+`, but we rule those out + // in the parser. In the future, we might allow them for + // consistency sake. + self.push(HirFrame::Expr(Hir::empty())); + } + Ast::Literal(ref x) => match self.ast_literal_to_scalar(x)? { + Either::Right(byte) => self.push_byte(byte), + Either::Left(ch) => match self.case_fold_char(x.span, ch)? { + None => self.push_char(ch), + Some(expr) => self.push(HirFrame::Expr(expr)), + }, + }, + Ast::Dot(ref span) => { + self.push(HirFrame::Expr(self.hir_dot(**span)?)); + } + Ast::Assertion(ref x) => { + self.push(HirFrame::Expr(self.hir_assertion(x)?)); + } + Ast::ClassPerl(ref x) => { + if self.flags().unicode() { + let cls = self.hir_perl_unicode_class(x)?; + let hcls = hir::Class::Unicode(cls); + self.push(HirFrame::Expr(Hir::class(hcls))); + } else { + let cls = self.hir_perl_byte_class(x)?; + let hcls = hir::Class::Bytes(cls); + self.push(HirFrame::Expr(Hir::class(hcls))); + } + } + Ast::ClassUnicode(ref x) => { + let cls = hir::Class::Unicode(self.hir_unicode_class(x)?); + self.push(HirFrame::Expr(Hir::class(cls))); + } + Ast::ClassBracketed(ref ast) => { + if self.flags().unicode() { + let mut cls = self.pop().unwrap().unwrap_class_unicode(); + self.unicode_fold_and_negate( + &ast.span, + ast.negated, + &mut cls, + )?; + let expr = Hir::class(hir::Class::Unicode(cls)); + self.push(HirFrame::Expr(expr)); + } else { + let mut cls = self.pop().unwrap().unwrap_class_bytes(); + self.bytes_fold_and_negate( + &ast.span, + ast.negated, + &mut cls, + )?; + let expr = Hir::class(hir::Class::Bytes(cls)); + self.push(HirFrame::Expr(expr)); + } + } + Ast::Repetition(ref x) => { + let expr = self.pop().unwrap().unwrap_expr(); + self.pop().unwrap().unwrap_repetition(); + self.push(HirFrame::Expr(self.hir_repetition(x, expr))); + } + Ast::Group(ref x) => { + let expr = self.pop().unwrap().unwrap_expr(); + let old_flags = self.pop().unwrap().unwrap_group(); + self.trans().flags.set(old_flags); + self.push(HirFrame::Expr(self.hir_capture(x, expr))); + } + Ast::Concat(_) => { + let mut exprs = vec![]; + while let Some(expr) = self.pop_concat_expr() { + if !matches!(*expr.kind(), HirKind::Empty) { + exprs.push(expr); + } + } + exprs.reverse(); + self.push(HirFrame::Expr(Hir::concat(exprs))); + } + Ast::Alternation(_) => { + let mut exprs = vec![]; + while let Some(expr) = self.pop_alt_expr() { + self.pop().unwrap().unwrap_alternation_pipe(); + exprs.push(expr); + } + exprs.reverse(); + self.push(HirFrame::Expr(Hir::alternation(exprs))); + } + } + Ok(()) + } + + fn visit_alternation_in(&mut self) -> Result<()> { + self.push(HirFrame::AlternationBranch); + Ok(()) + } + + fn visit_class_set_item_pre( + &mut self, + ast: &ast::ClassSetItem, + ) -> Result<()> { + match *ast { + ast::ClassSetItem::Bracketed(_) => { + if self.flags().unicode() { + let cls = hir::ClassUnicode::empty(); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let cls = hir::ClassBytes::empty(); + self.push(HirFrame::ClassBytes(cls)); + } + } + // We needn't handle the Union case here since the visitor will + // do it for us. + _ => {} + } + Ok(()) + } + + fn visit_class_set_item_post( + &mut self, + ast: &ast::ClassSetItem, + ) -> Result<()> { + match *ast { + ast::ClassSetItem::Empty(_) => {} + ast::ClassSetItem::Literal(ref x) => { + if self.flags().unicode() { + let mut cls = self.pop().unwrap().unwrap_class_unicode(); + cls.push(hir::ClassUnicodeRange::new(x.c, x.c)); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let mut cls = self.pop().unwrap().unwrap_class_bytes(); + let byte = self.class_literal_byte(x)?; + cls.push(hir::ClassBytesRange::new(byte, byte)); + self.push(HirFrame::ClassBytes(cls)); + } + } + ast::ClassSetItem::Range(ref x) => { + if self.flags().unicode() { + let mut cls = self.pop().unwrap().unwrap_class_unicode(); + cls.push(hir::ClassUnicodeRange::new(x.start.c, x.end.c)); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let mut cls = self.pop().unwrap().unwrap_class_bytes(); + let start = self.class_literal_byte(&x.start)?; + let end = self.class_literal_byte(&x.end)?; + cls.push(hir::ClassBytesRange::new(start, end)); + self.push(HirFrame::ClassBytes(cls)); + } + } + ast::ClassSetItem::Ascii(ref x) => { + if self.flags().unicode() { + let xcls = self.hir_ascii_unicode_class(x)?; + let mut cls = self.pop().unwrap().unwrap_class_unicode(); + cls.union(&xcls); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let xcls = self.hir_ascii_byte_class(x)?; + let mut cls = self.pop().unwrap().unwrap_class_bytes(); + cls.union(&xcls); + self.push(HirFrame::ClassBytes(cls)); + } + } + ast::ClassSetItem::Unicode(ref x) => { + let xcls = self.hir_unicode_class(x)?; + let mut cls = self.pop().unwrap().unwrap_class_unicode(); + cls.union(&xcls); + self.push(HirFrame::ClassUnicode(cls)); + } + ast::ClassSetItem::Perl(ref x) => { + if self.flags().unicode() { + let xcls = self.hir_perl_unicode_class(x)?; + let mut cls = self.pop().unwrap().unwrap_class_unicode(); + cls.union(&xcls); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let xcls = self.hir_perl_byte_class(x)?; + let mut cls = self.pop().unwrap().unwrap_class_bytes(); + cls.union(&xcls); + self.push(HirFrame::ClassBytes(cls)); + } + } + ast::ClassSetItem::Bracketed(ref ast) => { + if self.flags().unicode() { + let mut cls1 = self.pop().unwrap().unwrap_class_unicode(); + self.unicode_fold_and_negate( + &ast.span, + ast.negated, + &mut cls1, + )?; + + let mut cls2 = self.pop().unwrap().unwrap_class_unicode(); + cls2.union(&cls1); + self.push(HirFrame::ClassUnicode(cls2)); + } else { + let mut cls1 = self.pop().unwrap().unwrap_class_bytes(); + self.bytes_fold_and_negate( + &ast.span, + ast.negated, + &mut cls1, + )?; + + let mut cls2 = self.pop().unwrap().unwrap_class_bytes(); + cls2.union(&cls1); + self.push(HirFrame::ClassBytes(cls2)); + } + } + // This is handled automatically by the visitor. + ast::ClassSetItem::Union(_) => {} + } + Ok(()) + } + + fn visit_class_set_binary_op_pre( + &mut self, + _op: &ast::ClassSetBinaryOp, + ) -> Result<()> { + if self.flags().unicode() { + let cls = hir::ClassUnicode::empty(); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let cls = hir::ClassBytes::empty(); + self.push(HirFrame::ClassBytes(cls)); + } + Ok(()) + } + + fn visit_class_set_binary_op_in( + &mut self, + _op: &ast::ClassSetBinaryOp, + ) -> Result<()> { + if self.flags().unicode() { + let cls = hir::ClassUnicode::empty(); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let cls = hir::ClassBytes::empty(); + self.push(HirFrame::ClassBytes(cls)); + } + Ok(()) + } + + fn visit_class_set_binary_op_post( + &mut self, + op: &ast::ClassSetBinaryOp, + ) -> Result<()> { + use crate::ast::ClassSetBinaryOpKind::*; + + if self.flags().unicode() { + let mut rhs = self.pop().unwrap().unwrap_class_unicode(); + let mut lhs = self.pop().unwrap().unwrap_class_unicode(); + let mut cls = self.pop().unwrap().unwrap_class_unicode(); + if self.flags().case_insensitive() { + rhs.try_case_fold_simple().map_err(|_| { + self.error( + op.rhs.span().clone(), + ErrorKind::UnicodeCaseUnavailable, + ) + })?; + lhs.try_case_fold_simple().map_err(|_| { + self.error( + op.lhs.span().clone(), + ErrorKind::UnicodeCaseUnavailable, + ) + })?; + } + match op.kind { + Intersection => lhs.intersect(&rhs), + Difference => lhs.difference(&rhs), + SymmetricDifference => lhs.symmetric_difference(&rhs), + } + cls.union(&lhs); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let mut rhs = self.pop().unwrap().unwrap_class_bytes(); + let mut lhs = self.pop().unwrap().unwrap_class_bytes(); + let mut cls = self.pop().unwrap().unwrap_class_bytes(); + if self.flags().case_insensitive() { + rhs.case_fold_simple(); + lhs.case_fold_simple(); + } + match op.kind { + Intersection => lhs.intersect(&rhs), + Difference => lhs.difference(&rhs), + SymmetricDifference => lhs.symmetric_difference(&rhs), + } + cls.union(&lhs); + self.push(HirFrame::ClassBytes(cls)); + } + Ok(()) + } +} + +/// The internal implementation of a translator. +/// +/// This type is responsible for carrying around the original pattern string, +/// which is not tied to the internal state of a translator. +/// +/// A TranslatorI exists for the time it takes to translate a single Ast. +#[derive(Clone, Debug)] +struct TranslatorI<'t, 'p> { + trans: &'t Translator, + pattern: &'p str, +} + +impl<'t, 'p> TranslatorI<'t, 'p> { + /// Build a new internal translator. + fn new(trans: &'t Translator, pattern: &'p str) -> TranslatorI<'t, 'p> { + TranslatorI { trans, pattern } + } + + /// Return a reference to the underlying translator. + fn trans(&self) -> &Translator { + &self.trans + } + + /// Push the given frame on to the call stack. + fn push(&self, frame: HirFrame) { + self.trans().stack.borrow_mut().push(frame); + } + + /// Push the given literal char on to the call stack. + /// + /// If the top-most element of the stack is a literal, then the char + /// is appended to the end of that literal. Otherwise, a new literal + /// containing just the given char is pushed to the top of the stack. + fn push_char(&self, ch: char) { + let mut buf = [0; 4]; + let bytes = ch.encode_utf8(&mut buf).as_bytes(); + let mut stack = self.trans().stack.borrow_mut(); + if let Some(HirFrame::Literal(ref mut literal)) = stack.last_mut() { + literal.extend_from_slice(bytes); + } else { + stack.push(HirFrame::Literal(bytes.to_vec())); + } + } + + /// Push the given literal byte on to the call stack. + /// + /// If the top-most element of the stack is a literal, then the byte + /// is appended to the end of that literal. Otherwise, a new literal + /// containing just the given byte is pushed to the top of the stack. + fn push_byte(&self, byte: u8) { + let mut stack = self.trans().stack.borrow_mut(); + if let Some(HirFrame::Literal(ref mut literal)) = stack.last_mut() { + literal.push(byte); + } else { + stack.push(HirFrame::Literal(vec![byte])); + } + } + + /// Pop the top of the call stack. If the call stack is empty, return None. + fn pop(&self) -> Option { + self.trans().stack.borrow_mut().pop() + } + + /// Pop an HIR expression from the top of the stack for a concatenation. + /// + /// This returns None if the stack is empty or when a concat frame is seen. + /// Otherwise, it panics if it could not find an HIR expression. + fn pop_concat_expr(&self) -> Option { + let frame = self.pop()?; + match frame { + HirFrame::Concat => None, + HirFrame::Expr(expr) => Some(expr), + HirFrame::Literal(lit) => Some(Hir::literal(lit)), + HirFrame::ClassUnicode(_) => { + unreachable!("expected expr or concat, got Unicode class") + } + HirFrame::ClassBytes(_) => { + unreachable!("expected expr or concat, got byte class") + } + HirFrame::Repetition => { + unreachable!("expected expr or concat, got repetition") + } + HirFrame::Group { .. } => { + unreachable!("expected expr or concat, got group") + } + HirFrame::Alternation => { + unreachable!("expected expr or concat, got alt marker") + } + HirFrame::AlternationBranch => { + unreachable!("expected expr or concat, got alt branch marker") + } + } + } + + /// Pop an HIR expression from the top of the stack for an alternation. + /// + /// This returns None if the stack is empty or when an alternation frame is + /// seen. Otherwise, it panics if it could not find an HIR expression. + fn pop_alt_expr(&self) -> Option { + let frame = self.pop()?; + match frame { + HirFrame::Alternation => None, + HirFrame::Expr(expr) => Some(expr), + HirFrame::Literal(lit) => Some(Hir::literal(lit)), + HirFrame::ClassUnicode(_) => { + unreachable!("expected expr or alt, got Unicode class") + } + HirFrame::ClassBytes(_) => { + unreachable!("expected expr or alt, got byte class") + } + HirFrame::Repetition => { + unreachable!("expected expr or alt, got repetition") + } + HirFrame::Group { .. } => { + unreachable!("expected expr or alt, got group") + } + HirFrame::Concat => { + unreachable!("expected expr or alt, got concat marker") + } + HirFrame::AlternationBranch => { + unreachable!("expected expr or alt, got alt branch marker") + } + } + } + + /// Create a new error with the given span and error type. + fn error(&self, span: Span, kind: ErrorKind) -> Error { + Error { kind, pattern: self.pattern.to_string(), span } + } + + /// Return a copy of the active flags. + fn flags(&self) -> Flags { + self.trans().flags.get() + } + + /// Set the flags of this translator from the flags set in the given AST. + /// Then, return the old flags. + fn set_flags(&self, ast_flags: &ast::Flags) -> Flags { + let old_flags = self.flags(); + let mut new_flags = Flags::from_ast(ast_flags); + new_flags.merge(&old_flags); + self.trans().flags.set(new_flags); + old_flags + } + + /// Convert an Ast literal to its scalar representation. + /// + /// When Unicode mode is enabled, then this always succeeds and returns a + /// `char` (Unicode scalar value). + /// + /// When Unicode mode is disabled, then a `char` will still be returned + /// whenever possible. A byte is returned only when invalid UTF-8 is + /// allowed and when the byte is not ASCII. Otherwise, a non-ASCII byte + /// will result in an error when invalid UTF-8 is not allowed. + fn ast_literal_to_scalar( + &self, + lit: &ast::Literal, + ) -> Result> { + if self.flags().unicode() { + return Ok(Either::Left(lit.c)); + } + let byte = match lit.byte() { + None => return Ok(Either::Left(lit.c)), + Some(byte) => byte, + }; + if byte <= 0x7F { + return Ok(Either::Left(char::try_from(byte).unwrap())); + } + if self.trans().utf8 { + return Err(self.error(lit.span, ErrorKind::InvalidUtf8)); + } + Ok(Either::Right(byte)) + } + + fn case_fold_char(&self, span: Span, c: char) -> Result> { + if !self.flags().case_insensitive() { + return Ok(None); + } + if self.flags().unicode() { + // If case folding won't do anything, then don't bother trying. + let map = unicode::SimpleCaseFolder::new() + .map(|f| f.overlaps(c, c)) + .map_err(|_| { + self.error(span, ErrorKind::UnicodeCaseUnavailable) + })?; + if !map { + return Ok(None); + } + let mut cls = + hir::ClassUnicode::new(vec![hir::ClassUnicodeRange::new( + c, c, + )]); + cls.try_case_fold_simple().map_err(|_| { + self.error(span, ErrorKind::UnicodeCaseUnavailable) + })?; + Ok(Some(Hir::class(hir::Class::Unicode(cls)))) + } else { + if !c.is_ascii() { + return Ok(None); + } + // If case folding won't do anything, then don't bother trying. + match c { + 'A'..='Z' | 'a'..='z' => {} + _ => return Ok(None), + } + let mut cls = + hir::ClassBytes::new(vec![hir::ClassBytesRange::new( + // OK because 'c.len_utf8() == 1' which in turn implies + // that 'c' is ASCII. + u8::try_from(c).unwrap(), + u8::try_from(c).unwrap(), + )]); + cls.case_fold_simple(); + Ok(Some(Hir::class(hir::Class::Bytes(cls)))) + } + } + + fn hir_dot(&self, span: Span) -> Result { + let (utf8, lineterm, flags) = + (self.trans().utf8, self.trans().line_terminator, self.flags()); + if utf8 && (!flags.unicode() || !lineterm.is_ascii()) { + return Err(self.error(span, ErrorKind::InvalidUtf8)); + } + let dot = if flags.dot_matches_new_line() { + if flags.unicode() { + hir::Dot::AnyChar + } else { + hir::Dot::AnyByte + } + } else { + if flags.unicode() { + if flags.crlf() { + hir::Dot::AnyCharExceptCRLF + } else { + if !lineterm.is_ascii() { + return Err( + self.error(span, ErrorKind::InvalidLineTerminator) + ); + } + hir::Dot::AnyCharExcept(char::from(lineterm)) + } + } else { + if flags.crlf() { + hir::Dot::AnyByteExceptCRLF + } else { + hir::Dot::AnyByteExcept(lineterm) + } + } + }; + Ok(Hir::dot(dot)) + } + + fn hir_assertion(&self, asst: &ast::Assertion) -> Result { + let unicode = self.flags().unicode(); + let multi_line = self.flags().multi_line(); + let crlf = self.flags().crlf(); + Ok(match asst.kind { + ast::AssertionKind::StartLine => Hir::look(if multi_line { + if crlf { + hir::Look::StartCRLF + } else { + hir::Look::StartLF + } + } else { + hir::Look::Start + }), + ast::AssertionKind::EndLine => Hir::look(if multi_line { + if crlf { + hir::Look::EndCRLF + } else { + hir::Look::EndLF + } + } else { + hir::Look::End + }), + ast::AssertionKind::StartText => Hir::look(hir::Look::Start), + ast::AssertionKind::EndText => Hir::look(hir::Look::End), + ast::AssertionKind::WordBoundary => Hir::look(if unicode { + hir::Look::WordUnicode + } else { + hir::Look::WordAscii + }), + ast::AssertionKind::NotWordBoundary => Hir::look(if unicode { + hir::Look::WordUnicodeNegate + } else { + hir::Look::WordAsciiNegate + }), + ast::AssertionKind::WordBoundaryStart + | ast::AssertionKind::WordBoundaryStartAngle => { + Hir::look(if unicode { + hir::Look::WordStartUnicode + } else { + hir::Look::WordStartAscii + }) + } + ast::AssertionKind::WordBoundaryEnd + | ast::AssertionKind::WordBoundaryEndAngle => { + Hir::look(if unicode { + hir::Look::WordEndUnicode + } else { + hir::Look::WordEndAscii + }) + } + ast::AssertionKind::WordBoundaryStartHalf => { + Hir::look(if unicode { + hir::Look::WordStartHalfUnicode + } else { + hir::Look::WordStartHalfAscii + }) + } + ast::AssertionKind::WordBoundaryEndHalf => Hir::look(if unicode { + hir::Look::WordEndHalfUnicode + } else { + hir::Look::WordEndHalfAscii + }), + }) + } + + fn hir_capture(&self, group: &ast::Group, expr: Hir) -> Hir { + let (index, name) = match group.kind { + ast::GroupKind::CaptureIndex(index) => (index, None), + ast::GroupKind::CaptureName { ref name, .. } => { + (name.index, Some(name.name.clone().into_boxed_str())) + } + // The HIR doesn't need to use non-capturing groups, since the way + // in which the data type is defined handles this automatically. + ast::GroupKind::NonCapturing(_) => return expr, + }; + Hir::capture(hir::Capture { index, name, sub: Box::new(expr) }) + } + + fn hir_repetition(&self, rep: &ast::Repetition, expr: Hir) -> Hir { + let (min, max) = match rep.op.kind { + ast::RepetitionKind::ZeroOrOne => (0, Some(1)), + ast::RepetitionKind::ZeroOrMore => (0, None), + ast::RepetitionKind::OneOrMore => (1, None), + ast::RepetitionKind::Range(ast::RepetitionRange::Exactly(m)) => { + (m, Some(m)) + } + ast::RepetitionKind::Range(ast::RepetitionRange::AtLeast(m)) => { + (m, None) + } + ast::RepetitionKind::Range(ast::RepetitionRange::Bounded( + m, + n, + )) => (m, Some(n)), + }; + let greedy = + if self.flags().swap_greed() { !rep.greedy } else { rep.greedy }; + Hir::repetition(hir::Repetition { + min, + max, + greedy, + sub: Box::new(expr), + }) + } + + fn hir_unicode_class( + &self, + ast_class: &ast::ClassUnicode, + ) -> Result { + use crate::ast::ClassUnicodeKind::*; + + if !self.flags().unicode() { + return Err( + self.error(ast_class.span, ErrorKind::UnicodeNotAllowed) + ); + } + let query = match ast_class.kind { + OneLetter(name) => ClassQuery::OneLetter(name), + Named(ref name) => ClassQuery::Binary(name), + NamedValue { ref name, ref value, .. } => ClassQuery::ByValue { + property_name: name, + property_value: value, + }, + }; + let mut result = self.convert_unicode_class_error( + &ast_class.span, + unicode::class(query), + ); + if let Ok(ref mut class) = result { + self.unicode_fold_and_negate( + &ast_class.span, + ast_class.negated, + class, + )?; + } + result + } + + fn hir_ascii_unicode_class( + &self, + ast: &ast::ClassAscii, + ) -> Result { + let mut cls = hir::ClassUnicode::new( + ascii_class_as_chars(&ast.kind) + .map(|(s, e)| hir::ClassUnicodeRange::new(s, e)), + ); + self.unicode_fold_and_negate(&ast.span, ast.negated, &mut cls)?; + Ok(cls) + } + + fn hir_ascii_byte_class( + &self, + ast: &ast::ClassAscii, + ) -> Result { + let mut cls = hir::ClassBytes::new( + ascii_class(&ast.kind) + .map(|(s, e)| hir::ClassBytesRange::new(s, e)), + ); + self.bytes_fold_and_negate(&ast.span, ast.negated, &mut cls)?; + Ok(cls) + } + + fn hir_perl_unicode_class( + &self, + ast_class: &ast::ClassPerl, + ) -> Result { + use crate::ast::ClassPerlKind::*; + + assert!(self.flags().unicode()); + let result = match ast_class.kind { + Digit => unicode::perl_digit(), + Space => unicode::perl_space(), + Word => unicode::perl_word(), + }; + let mut class = + self.convert_unicode_class_error(&ast_class.span, result)?; + // We needn't apply case folding here because the Perl Unicode classes + // are already closed under Unicode simple case folding. + if ast_class.negated { + class.negate(); + } + Ok(class) + } + + fn hir_perl_byte_class( + &self, + ast_class: &ast::ClassPerl, + ) -> Result { + use crate::ast::ClassPerlKind::*; + + assert!(!self.flags().unicode()); + let mut class = match ast_class.kind { + Digit => hir_ascii_class_bytes(&ast::ClassAsciiKind::Digit), + Space => hir_ascii_class_bytes(&ast::ClassAsciiKind::Space), + Word => hir_ascii_class_bytes(&ast::ClassAsciiKind::Word), + }; + // We needn't apply case folding here because the Perl ASCII classes + // are already closed (under ASCII case folding). + if ast_class.negated { + class.negate(); + } + // Negating a Perl byte class is likely to cause it to match invalid + // UTF-8. That's only OK if the translator is configured to allow such + // things. + if self.trans().utf8 && !class.is_ascii() { + return Err(self.error(ast_class.span, ErrorKind::InvalidUtf8)); + } + Ok(class) + } + + /// Converts the given Unicode specific error to an HIR translation error. + /// + /// The span given should approximate the position at which an error would + /// occur. + fn convert_unicode_class_error( + &self, + span: &Span, + result: core::result::Result, + ) -> Result { + result.map_err(|err| { + let sp = span.clone(); + match err { + unicode::Error::PropertyNotFound => { + self.error(sp, ErrorKind::UnicodePropertyNotFound) + } + unicode::Error::PropertyValueNotFound => { + self.error(sp, ErrorKind::UnicodePropertyValueNotFound) + } + unicode::Error::PerlClassNotFound => { + self.error(sp, ErrorKind::UnicodePerlClassNotFound) + } + } + }) + } + + fn unicode_fold_and_negate( + &self, + span: &Span, + negated: bool, + class: &mut hir::ClassUnicode, + ) -> Result<()> { + // Note that we must apply case folding before negation! + // Consider `(?i)[^x]`. If we applied negation first, then + // the result would be the character class that matched any + // Unicode scalar value. + if self.flags().case_insensitive() { + class.try_case_fold_simple().map_err(|_| { + self.error(span.clone(), ErrorKind::UnicodeCaseUnavailable) + })?; + } + if negated { + class.negate(); + } + Ok(()) + } + + fn bytes_fold_and_negate( + &self, + span: &Span, + negated: bool, + class: &mut hir::ClassBytes, + ) -> Result<()> { + // Note that we must apply case folding before negation! + // Consider `(?i)[^x]`. If we applied negation first, then + // the result would be the character class that matched any + // Unicode scalar value. + if self.flags().case_insensitive() { + class.case_fold_simple(); + } + if negated { + class.negate(); + } + if self.trans().utf8 && !class.is_ascii() { + return Err(self.error(span.clone(), ErrorKind::InvalidUtf8)); + } + Ok(()) + } + + /// Return a scalar byte value suitable for use as a literal in a byte + /// character class. + fn class_literal_byte(&self, ast: &ast::Literal) -> Result { + match self.ast_literal_to_scalar(ast)? { + Either::Right(byte) => Ok(byte), + Either::Left(ch) => { + if ch.is_ascii() { + Ok(u8::try_from(ch).unwrap()) + } else { + // We can't feasibly support Unicode in + // byte oriented classes. Byte classes don't + // do Unicode case folding. + Err(self.error(ast.span, ErrorKind::UnicodeNotAllowed)) + } + } + } + } +} + +/// A translator's representation of a regular expression's flags at any given +/// moment in time. +/// +/// Each flag can be in one of three states: absent, present but disabled or +/// present but enabled. +#[derive(Clone, Copy, Debug, Default)] +struct Flags { + case_insensitive: Option, + multi_line: Option, + dot_matches_new_line: Option, + swap_greed: Option, + unicode: Option, + crlf: Option, + // Note that `ignore_whitespace` is omitted here because it is handled + // entirely in the parser. +} + +impl Flags { + fn from_ast(ast: &ast::Flags) -> Flags { + let mut flags = Flags::default(); + let mut enable = true; + for item in &ast.items { + match item.kind { + ast::FlagsItemKind::Negation => { + enable = false; + } + ast::FlagsItemKind::Flag(ast::Flag::CaseInsensitive) => { + flags.case_insensitive = Some(enable); + } + ast::FlagsItemKind::Flag(ast::Flag::MultiLine) => { + flags.multi_line = Some(enable); + } + ast::FlagsItemKind::Flag(ast::Flag::DotMatchesNewLine) => { + flags.dot_matches_new_line = Some(enable); + } + ast::FlagsItemKind::Flag(ast::Flag::SwapGreed) => { + flags.swap_greed = Some(enable); + } + ast::FlagsItemKind::Flag(ast::Flag::Unicode) => { + flags.unicode = Some(enable); + } + ast::FlagsItemKind::Flag(ast::Flag::CRLF) => { + flags.crlf = Some(enable); + } + ast::FlagsItemKind::Flag(ast::Flag::IgnoreWhitespace) => {} + } + } + flags + } + + fn merge(&mut self, previous: &Flags) { + if self.case_insensitive.is_none() { + self.case_insensitive = previous.case_insensitive; + } + if self.multi_line.is_none() { + self.multi_line = previous.multi_line; + } + if self.dot_matches_new_line.is_none() { + self.dot_matches_new_line = previous.dot_matches_new_line; + } + if self.swap_greed.is_none() { + self.swap_greed = previous.swap_greed; + } + if self.unicode.is_none() { + self.unicode = previous.unicode; + } + if self.crlf.is_none() { + self.crlf = previous.crlf; + } + } + + fn case_insensitive(&self) -> bool { + self.case_insensitive.unwrap_or(false) + } + + fn multi_line(&self) -> bool { + self.multi_line.unwrap_or(false) + } + + fn dot_matches_new_line(&self) -> bool { + self.dot_matches_new_line.unwrap_or(false) + } + + fn swap_greed(&self) -> bool { + self.swap_greed.unwrap_or(false) + } + + fn unicode(&self) -> bool { + self.unicode.unwrap_or(true) + } + + fn crlf(&self) -> bool { + self.crlf.unwrap_or(false) + } +} + +fn hir_ascii_class_bytes(kind: &ast::ClassAsciiKind) -> hir::ClassBytes { + let ranges: Vec<_> = ascii_class(kind) + .map(|(s, e)| hir::ClassBytesRange::new(s, e)) + .collect(); + hir::ClassBytes::new(ranges) +} + +fn ascii_class(kind: &ast::ClassAsciiKind) -> impl Iterator { + use crate::ast::ClassAsciiKind::*; + + let slice: &'static [(u8, u8)] = match *kind { + Alnum => &[(b'0', b'9'), (b'A', b'Z'), (b'a', b'z')], + Alpha => &[(b'A', b'Z'), (b'a', b'z')], + Ascii => &[(b'\x00', b'\x7F')], + Blank => &[(b'\t', b'\t'), (b' ', b' ')], + Cntrl => &[(b'\x00', b'\x1F'), (b'\x7F', b'\x7F')], + Digit => &[(b'0', b'9')], + Graph => &[(b'!', b'~')], + Lower => &[(b'a', b'z')], + Print => &[(b' ', b'~')], + Punct => &[(b'!', b'/'), (b':', b'@'), (b'[', b'`'), (b'{', b'~')], + Space => &[ + (b'\t', b'\t'), + (b'\n', b'\n'), + (b'\x0B', b'\x0B'), + (b'\x0C', b'\x0C'), + (b'\r', b'\r'), + (b' ', b' '), + ], + Upper => &[(b'A', b'Z')], + Word => &[(b'0', b'9'), (b'A', b'Z'), (b'_', b'_'), (b'a', b'z')], + Xdigit => &[(b'0', b'9'), (b'A', b'F'), (b'a', b'f')], + }; + slice.iter().copied() +} + +fn ascii_class_as_chars( + kind: &ast::ClassAsciiKind, +) -> impl Iterator { + ascii_class(kind).map(|(s, e)| (char::from(s), char::from(e))) +} + +#[cfg(test)] +mod tests { + use crate::{ + ast::{parse::ParserBuilder, Position}, + hir::{Look, Properties}, + }; + + use super::*; + + // We create these errors to compare with real hir::Errors in the tests. + // We define equality between TestError and hir::Error to disregard the + // pattern string in hir::Error, which is annoying to provide in tests. + #[derive(Clone, Debug)] + struct TestError { + span: Span, + kind: hir::ErrorKind, + } + + impl PartialEq for TestError { + fn eq(&self, other: &hir::Error) -> bool { + self.span == other.span && self.kind == other.kind + } + } + + impl PartialEq for hir::Error { + fn eq(&self, other: &TestError) -> bool { + self.span == other.span && self.kind == other.kind + } + } + + fn parse(pattern: &str) -> Ast { + ParserBuilder::new().octal(true).build().parse(pattern).unwrap() + } + + fn t(pattern: &str) -> Hir { + TranslatorBuilder::new() + .utf8(true) + .build() + .translate(pattern, &parse(pattern)) + .unwrap() + } + + fn t_err(pattern: &str) -> hir::Error { + TranslatorBuilder::new() + .utf8(true) + .build() + .translate(pattern, &parse(pattern)) + .unwrap_err() + } + + fn t_bytes(pattern: &str) -> Hir { + TranslatorBuilder::new() + .utf8(false) + .build() + .translate(pattern, &parse(pattern)) + .unwrap() + } + + fn props(pattern: &str) -> Properties { + t(pattern).properties().clone() + } + + fn props_bytes(pattern: &str) -> Properties { + t_bytes(pattern).properties().clone() + } + + fn hir_lit(s: &str) -> Hir { + hir_blit(s.as_bytes()) + } + + fn hir_blit(s: &[u8]) -> Hir { + Hir::literal(s) + } + + fn hir_capture(index: u32, expr: Hir) -> Hir { + Hir::capture(hir::Capture { index, name: None, sub: Box::new(expr) }) + } + + fn hir_capture_name(index: u32, name: &str, expr: Hir) -> Hir { + Hir::capture(hir::Capture { + index, + name: Some(name.into()), + sub: Box::new(expr), + }) + } + + fn hir_quest(greedy: bool, expr: Hir) -> Hir { + Hir::repetition(hir::Repetition { + min: 0, + max: Some(1), + greedy, + sub: Box::new(expr), + }) + } + + fn hir_star(greedy: bool, expr: Hir) -> Hir { + Hir::repetition(hir::Repetition { + min: 0, + max: None, + greedy, + sub: Box::new(expr), + }) + } + + fn hir_plus(greedy: bool, expr: Hir) -> Hir { + Hir::repetition(hir::Repetition { + min: 1, + max: None, + greedy, + sub: Box::new(expr), + }) + } + + fn hir_range(greedy: bool, min: u32, max: Option, expr: Hir) -> Hir { + Hir::repetition(hir::Repetition { + min, + max, + greedy, + sub: Box::new(expr), + }) + } + + fn hir_alt(alts: Vec) -> Hir { + Hir::alternation(alts) + } + + fn hir_cat(exprs: Vec) -> Hir { + Hir::concat(exprs) + } + + #[allow(dead_code)] + fn hir_uclass_query(query: ClassQuery<'_>) -> Hir { + Hir::class(hir::Class::Unicode(unicode::class(query).unwrap())) + } + + #[allow(dead_code)] + fn hir_uclass_perl_word() -> Hir { + Hir::class(hir::Class::Unicode(unicode::perl_word().unwrap())) + } + + fn hir_ascii_uclass(kind: &ast::ClassAsciiKind) -> Hir { + Hir::class(hir::Class::Unicode(hir::ClassUnicode::new( + ascii_class_as_chars(kind) + .map(|(s, e)| hir::ClassUnicodeRange::new(s, e)), + ))) + } + + fn hir_ascii_bclass(kind: &ast::ClassAsciiKind) -> Hir { + Hir::class(hir::Class::Bytes(hir::ClassBytes::new( + ascii_class(kind).map(|(s, e)| hir::ClassBytesRange::new(s, e)), + ))) + } + + fn hir_uclass(ranges: &[(char, char)]) -> Hir { + Hir::class(uclass(ranges)) + } + + fn hir_bclass(ranges: &[(u8, u8)]) -> Hir { + Hir::class(bclass(ranges)) + } + + fn hir_case_fold(expr: Hir) -> Hir { + match expr.into_kind() { + HirKind::Class(mut cls) => { + cls.case_fold_simple(); + Hir::class(cls) + } + _ => panic!("cannot case fold non-class Hir expr"), + } + } + + fn hir_negate(expr: Hir) -> Hir { + match expr.into_kind() { + HirKind::Class(mut cls) => { + cls.negate(); + Hir::class(cls) + } + _ => panic!("cannot negate non-class Hir expr"), + } + } + + fn uclass(ranges: &[(char, char)]) -> hir::Class { + let ranges: Vec = ranges + .iter() + .map(|&(s, e)| hir::ClassUnicodeRange::new(s, e)) + .collect(); + hir::Class::Unicode(hir::ClassUnicode::new(ranges)) + } + + fn bclass(ranges: &[(u8, u8)]) -> hir::Class { + let ranges: Vec = ranges + .iter() + .map(|&(s, e)| hir::ClassBytesRange::new(s, e)) + .collect(); + hir::Class::Bytes(hir::ClassBytes::new(ranges)) + } + + #[cfg(feature = "unicode-case")] + fn class_case_fold(mut cls: hir::Class) -> Hir { + cls.case_fold_simple(); + Hir::class(cls) + } + + fn class_negate(mut cls: hir::Class) -> Hir { + cls.negate(); + Hir::class(cls) + } + + #[allow(dead_code)] + fn hir_union(expr1: Hir, expr2: Hir) -> Hir { + use crate::hir::Class::{Bytes, Unicode}; + + match (expr1.into_kind(), expr2.into_kind()) { + (HirKind::Class(Unicode(mut c1)), HirKind::Class(Unicode(c2))) => { + c1.union(&c2); + Hir::class(hir::Class::Unicode(c1)) + } + (HirKind::Class(Bytes(mut c1)), HirKind::Class(Bytes(c2))) => { + c1.union(&c2); + Hir::class(hir::Class::Bytes(c1)) + } + _ => panic!("cannot union non-class Hir exprs"), + } + } + + #[allow(dead_code)] + fn hir_difference(expr1: Hir, expr2: Hir) -> Hir { + use crate::hir::Class::{Bytes, Unicode}; + + match (expr1.into_kind(), expr2.into_kind()) { + (HirKind::Class(Unicode(mut c1)), HirKind::Class(Unicode(c2))) => { + c1.difference(&c2); + Hir::class(hir::Class::Unicode(c1)) + } + (HirKind::Class(Bytes(mut c1)), HirKind::Class(Bytes(c2))) => { + c1.difference(&c2); + Hir::class(hir::Class::Bytes(c1)) + } + _ => panic!("cannot difference non-class Hir exprs"), + } + } + + fn hir_look(look: hir::Look) -> Hir { + Hir::look(look) + } + + #[test] + fn empty() { + assert_eq!(t(""), Hir::empty()); + assert_eq!(t("(?i)"), Hir::empty()); + assert_eq!(t("()"), hir_capture(1, Hir::empty())); + assert_eq!(t("(?:)"), Hir::empty()); + assert_eq!(t("(?P)"), hir_capture_name(1, "wat", Hir::empty())); + assert_eq!(t("|"), hir_alt(vec![Hir::empty(), Hir::empty()])); + assert_eq!( + t("()|()"), + hir_alt(vec![ + hir_capture(1, Hir::empty()), + hir_capture(2, Hir::empty()), + ]) + ); + assert_eq!( + t("(|b)"), + hir_capture(1, hir_alt(vec![Hir::empty(), hir_lit("b"),])) + ); + assert_eq!( + t("(a|)"), + hir_capture(1, hir_alt(vec![hir_lit("a"), Hir::empty(),])) + ); + assert_eq!( + t("(a||c)"), + hir_capture( + 1, + hir_alt(vec![hir_lit("a"), Hir::empty(), hir_lit("c"),]) + ) + ); + assert_eq!( + t("(||)"), + hir_capture( + 1, + hir_alt(vec![Hir::empty(), Hir::empty(), Hir::empty(),]) + ) + ); + } + + #[test] + fn literal() { + assert_eq!(t("a"), hir_lit("a")); + assert_eq!(t("(?-u)a"), hir_lit("a")); + assert_eq!(t("☃"), hir_lit("☃")); + assert_eq!(t("abcd"), hir_lit("abcd")); + + assert_eq!(t_bytes("(?-u)a"), hir_lit("a")); + assert_eq!(t_bytes("(?-u)\x61"), hir_lit("a")); + assert_eq!(t_bytes(r"(?-u)\x61"), hir_lit("a")); + assert_eq!(t_bytes(r"(?-u)\xFF"), hir_blit(b"\xFF")); + + assert_eq!(t("(?-u)☃"), hir_lit("☃")); + assert_eq!( + t_err(r"(?-u)\xFF"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(5, 1, 6), + Position::new(9, 1, 10) + ), + } + ); + } + + #[test] + fn literal_case_insensitive() { + #[cfg(feature = "unicode-case")] + assert_eq!(t("(?i)a"), hir_uclass(&[('A', 'A'), ('a', 'a'),])); + #[cfg(feature = "unicode-case")] + assert_eq!(t("(?i:a)"), hir_uclass(&[('A', 'A'), ('a', 'a')])); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("a(?i)a(?-i)a"), + hir_cat(vec![ + hir_lit("a"), + hir_uclass(&[('A', 'A'), ('a', 'a')]), + hir_lit("a"), + ]) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)ab@c"), + hir_cat(vec![ + hir_uclass(&[('A', 'A'), ('a', 'a')]), + hir_uclass(&[('B', 'B'), ('b', 'b')]), + hir_lit("@"), + hir_uclass(&[('C', 'C'), ('c', 'c')]), + ]) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)β"), + hir_uclass(&[('Β', 'Β'), ('β', 'β'), ('ϐ', 'ϐ'),]) + ); + + assert_eq!(t("(?i-u)a"), hir_bclass(&[(b'A', b'A'), (b'a', b'a'),])); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?-u)a(?i)a(?-i)a"), + hir_cat(vec![ + hir_lit("a"), + hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), + hir_lit("a"), + ]) + ); + assert_eq!( + t("(?i-u)ab@c"), + hir_cat(vec![ + hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), + hir_bclass(&[(b'B', b'B'), (b'b', b'b')]), + hir_lit("@"), + hir_bclass(&[(b'C', b'C'), (b'c', b'c')]), + ]) + ); + + assert_eq!( + t_bytes("(?i-u)a"), + hir_bclass(&[(b'A', b'A'), (b'a', b'a'),]) + ); + assert_eq!( + t_bytes("(?i-u)\x61"), + hir_bclass(&[(b'A', b'A'), (b'a', b'a'),]) + ); + assert_eq!( + t_bytes(r"(?i-u)\x61"), + hir_bclass(&[(b'A', b'A'), (b'a', b'a'),]) + ); + assert_eq!(t_bytes(r"(?i-u)\xFF"), hir_blit(b"\xFF")); + + assert_eq!(t("(?i-u)β"), hir_lit("β"),); + } + + #[test] + fn dot() { + assert_eq!( + t("."), + hir_uclass(&[('\0', '\t'), ('\x0B', '\u{10FFFF}')]) + ); + assert_eq!( + t("(?R)."), + hir_uclass(&[ + ('\0', '\t'), + ('\x0B', '\x0C'), + ('\x0E', '\u{10FFFF}'), + ]) + ); + assert_eq!(t("(?s)."), hir_uclass(&[('\0', '\u{10FFFF}')])); + assert_eq!(t("(?Rs)."), hir_uclass(&[('\0', '\u{10FFFF}')])); + assert_eq!( + t_bytes("(?-u)."), + hir_bclass(&[(b'\0', b'\t'), (b'\x0B', b'\xFF')]) + ); + assert_eq!( + t_bytes("(?R-u)."), + hir_bclass(&[ + (b'\0', b'\t'), + (b'\x0B', b'\x0C'), + (b'\x0E', b'\xFF'), + ]) + ); + assert_eq!(t_bytes("(?s-u)."), hir_bclass(&[(b'\0', b'\xFF'),])); + assert_eq!(t_bytes("(?Rs-u)."), hir_bclass(&[(b'\0', b'\xFF'),])); + + // If invalid UTF-8 isn't allowed, then non-Unicode `.` isn't allowed. + assert_eq!( + t_err("(?-u)."), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(5, 1, 6), + Position::new(6, 1, 7) + ), + } + ); + assert_eq!( + t_err("(?R-u)."), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(6, 1, 7), + Position::new(7, 1, 8) + ), + } + ); + assert_eq!( + t_err("(?s-u)."), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(6, 1, 7), + Position::new(7, 1, 8) + ), + } + ); + assert_eq!( + t_err("(?Rs-u)."), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(7, 1, 8), + Position::new(8, 1, 9) + ), + } + ); + } + + #[test] + fn assertions() { + assert_eq!(t("^"), hir_look(hir::Look::Start)); + assert_eq!(t("$"), hir_look(hir::Look::End)); + assert_eq!(t(r"\A"), hir_look(hir::Look::Start)); + assert_eq!(t(r"\z"), hir_look(hir::Look::End)); + assert_eq!(t("(?m)^"), hir_look(hir::Look::StartLF)); + assert_eq!(t("(?m)$"), hir_look(hir::Look::EndLF)); + assert_eq!(t(r"(?m)\A"), hir_look(hir::Look::Start)); + assert_eq!(t(r"(?m)\z"), hir_look(hir::Look::End)); + + assert_eq!(t(r"\b"), hir_look(hir::Look::WordUnicode)); + assert_eq!(t(r"\B"), hir_look(hir::Look::WordUnicodeNegate)); + assert_eq!(t(r"(?-u)\b"), hir_look(hir::Look::WordAscii)); + assert_eq!(t(r"(?-u)\B"), hir_look(hir::Look::WordAsciiNegate)); + } + + #[test] + fn group() { + assert_eq!(t("(a)"), hir_capture(1, hir_lit("a"))); + assert_eq!( + t("(a)(b)"), + hir_cat(vec![ + hir_capture(1, hir_lit("a")), + hir_capture(2, hir_lit("b")), + ]) + ); + assert_eq!( + t("(a)|(b)"), + hir_alt(vec![ + hir_capture(1, hir_lit("a")), + hir_capture(2, hir_lit("b")), + ]) + ); + assert_eq!(t("(?P)"), hir_capture_name(1, "foo", Hir::empty())); + assert_eq!(t("(?Pa)"), hir_capture_name(1, "foo", hir_lit("a"))); + assert_eq!( + t("(?Pa)(?Pb)"), + hir_cat(vec![ + hir_capture_name(1, "foo", hir_lit("a")), + hir_capture_name(2, "bar", hir_lit("b")), + ]) + ); + assert_eq!(t("(?:)"), Hir::empty()); + assert_eq!(t("(?:a)"), hir_lit("a")); + assert_eq!( + t("(?:a)(b)"), + hir_cat(vec![hir_lit("a"), hir_capture(1, hir_lit("b")),]) + ); + assert_eq!( + t("(a)(?:b)(c)"), + hir_cat(vec![ + hir_capture(1, hir_lit("a")), + hir_lit("b"), + hir_capture(2, hir_lit("c")), + ]) + ); + assert_eq!( + t("(a)(?Pb)(c)"), + hir_cat(vec![ + hir_capture(1, hir_lit("a")), + hir_capture_name(2, "foo", hir_lit("b")), + hir_capture(3, hir_lit("c")), + ]) + ); + assert_eq!(t("()"), hir_capture(1, Hir::empty())); + assert_eq!(t("((?i))"), hir_capture(1, Hir::empty())); + assert_eq!(t("((?x))"), hir_capture(1, Hir::empty())); + assert_eq!( + t("(((?x)))"), + hir_capture(1, hir_capture(2, Hir::empty())) + ); + } + + #[test] + fn line_anchors() { + assert_eq!(t("^"), hir_look(hir::Look::Start)); + assert_eq!(t("$"), hir_look(hir::Look::End)); + assert_eq!(t(r"\A"), hir_look(hir::Look::Start)); + assert_eq!(t(r"\z"), hir_look(hir::Look::End)); + + assert_eq!(t(r"(?m)\A"), hir_look(hir::Look::Start)); + assert_eq!(t(r"(?m)\z"), hir_look(hir::Look::End)); + assert_eq!(t("(?m)^"), hir_look(hir::Look::StartLF)); + assert_eq!(t("(?m)$"), hir_look(hir::Look::EndLF)); + + assert_eq!(t(r"(?R)\A"), hir_look(hir::Look::Start)); + assert_eq!(t(r"(?R)\z"), hir_look(hir::Look::End)); + assert_eq!(t("(?R)^"), hir_look(hir::Look::Start)); + assert_eq!(t("(?R)$"), hir_look(hir::Look::End)); + + assert_eq!(t(r"(?Rm)\A"), hir_look(hir::Look::Start)); + assert_eq!(t(r"(?Rm)\z"), hir_look(hir::Look::End)); + assert_eq!(t("(?Rm)^"), hir_look(hir::Look::StartCRLF)); + assert_eq!(t("(?Rm)$"), hir_look(hir::Look::EndCRLF)); + } + + #[test] + fn flags() { + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i:a)a"), + hir_cat( + vec![hir_uclass(&[('A', 'A'), ('a', 'a')]), hir_lit("a"),] + ) + ); + assert_eq!( + t("(?i-u:a)β"), + hir_cat(vec![ + hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), + hir_lit("β"), + ]) + ); + assert_eq!( + t("(?:(?i-u)a)b"), + hir_cat(vec![ + hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), + hir_lit("b"), + ]) + ); + assert_eq!( + t("((?i-u)a)b"), + hir_cat(vec![ + hir_capture(1, hir_bclass(&[(b'A', b'A'), (b'a', b'a')])), + hir_lit("b"), + ]) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)(?-i:a)a"), + hir_cat( + vec![hir_lit("a"), hir_uclass(&[('A', 'A'), ('a', 'a')]),] + ) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?im)a^"), + hir_cat(vec![ + hir_uclass(&[('A', 'A'), ('a', 'a')]), + hir_look(hir::Look::StartLF), + ]) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?im)a^(?i-m)a^"), + hir_cat(vec![ + hir_uclass(&[('A', 'A'), ('a', 'a')]), + hir_look(hir::Look::StartLF), + hir_uclass(&[('A', 'A'), ('a', 'a')]), + hir_look(hir::Look::Start), + ]) + ); + assert_eq!( + t("(?U)a*a*?(?-U)a*a*?"), + hir_cat(vec![ + hir_star(false, hir_lit("a")), + hir_star(true, hir_lit("a")), + hir_star(true, hir_lit("a")), + hir_star(false, hir_lit("a")), + ]) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?:a(?i)a)a"), + hir_cat(vec![ + hir_cat(vec![ + hir_lit("a"), + hir_uclass(&[('A', 'A'), ('a', 'a')]), + ]), + hir_lit("a"), + ]) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)(?:a(?-i)a)a"), + hir_cat(vec![ + hir_cat(vec![ + hir_uclass(&[('A', 'A'), ('a', 'a')]), + hir_lit("a"), + ]), + hir_uclass(&[('A', 'A'), ('a', 'a')]), + ]) + ); + } + + #[test] + fn escape() { + assert_eq!( + t(r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#"), + hir_lit(r"\.+*?()|[]{}^$#") + ); + } + + #[test] + fn repetition() { + assert_eq!(t("a?"), hir_quest(true, hir_lit("a"))); + assert_eq!(t("a*"), hir_star(true, hir_lit("a"))); + assert_eq!(t("a+"), hir_plus(true, hir_lit("a"))); + assert_eq!(t("a??"), hir_quest(false, hir_lit("a"))); + assert_eq!(t("a*?"), hir_star(false, hir_lit("a"))); + assert_eq!(t("a+?"), hir_plus(false, hir_lit("a"))); + + assert_eq!(t("a{1}"), hir_range(true, 1, Some(1), hir_lit("a"),)); + assert_eq!(t("a{1,}"), hir_range(true, 1, None, hir_lit("a"),)); + assert_eq!(t("a{1,2}"), hir_range(true, 1, Some(2), hir_lit("a"),)); + assert_eq!(t("a{1}?"), hir_range(false, 1, Some(1), hir_lit("a"),)); + assert_eq!(t("a{1,}?"), hir_range(false, 1, None, hir_lit("a"),)); + assert_eq!(t("a{1,2}?"), hir_range(false, 1, Some(2), hir_lit("a"),)); + + assert_eq!( + t("ab?"), + hir_cat(vec![hir_lit("a"), hir_quest(true, hir_lit("b")),]) + ); + assert_eq!(t("(ab)?"), hir_quest(true, hir_capture(1, hir_lit("ab")))); + assert_eq!( + t("a|b?"), + hir_alt(vec![hir_lit("a"), hir_quest(true, hir_lit("b")),]) + ); + } + + #[test] + fn cat_alt() { + let a = || hir_look(hir::Look::Start); + let b = || hir_look(hir::Look::End); + let c = || hir_look(hir::Look::WordUnicode); + let d = || hir_look(hir::Look::WordUnicodeNegate); + + assert_eq!(t("(^$)"), hir_capture(1, hir_cat(vec![a(), b()]))); + assert_eq!(t("^|$"), hir_alt(vec![a(), b()])); + assert_eq!(t(r"^|$|\b"), hir_alt(vec![a(), b(), c()])); + assert_eq!( + t(r"^$|$\b|\b\B"), + hir_alt(vec![ + hir_cat(vec![a(), b()]), + hir_cat(vec![b(), c()]), + hir_cat(vec![c(), d()]), + ]) + ); + assert_eq!(t("(^|$)"), hir_capture(1, hir_alt(vec![a(), b()]))); + assert_eq!( + t(r"(^|$|\b)"), + hir_capture(1, hir_alt(vec![a(), b(), c()])) + ); + assert_eq!( + t(r"(^$|$\b|\b\B)"), + hir_capture( + 1, + hir_alt(vec![ + hir_cat(vec![a(), b()]), + hir_cat(vec![b(), c()]), + hir_cat(vec![c(), d()]), + ]) + ) + ); + assert_eq!( + t(r"(^$|($\b|(\b\B)))"), + hir_capture( + 1, + hir_alt(vec![ + hir_cat(vec![a(), b()]), + hir_capture( + 2, + hir_alt(vec![ + hir_cat(vec![b(), c()]), + hir_capture(3, hir_cat(vec![c(), d()])), + ]) + ), + ]) + ) + ); + } + + // Tests the HIR transformation of things like '[a-z]|[A-Z]' into + // '[A-Za-z]'. In other words, an alternation of just classes is always + // equivalent to a single class corresponding to the union of the branches + // in that class. (Unless some branches match invalid UTF-8 and others + // match non-ASCII Unicode.) + #[test] + fn cat_class_flattened() { + assert_eq!(t(r"[a-z]|[A-Z]"), hir_uclass(&[('A', 'Z'), ('a', 'z')])); + // Combining all of the letter properties should give us the one giant + // letter property. + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"(?x) + \p{Lowercase_Letter} + |\p{Uppercase_Letter} + |\p{Titlecase_Letter} + |\p{Modifier_Letter} + |\p{Other_Letter} + "), + hir_uclass_query(ClassQuery::Binary("letter")) + ); + // Byte classes that can truly match invalid UTF-8 cannot be combined + // with Unicode classes. + assert_eq!( + t_bytes(r"[Δδ]|(?-u:[\x90-\xFF])|[Λλ]"), + hir_alt(vec![ + hir_uclass(&[('Δ', 'Δ'), ('δ', 'δ')]), + hir_bclass(&[(b'\x90', b'\xFF')]), + hir_uclass(&[('Λ', 'Λ'), ('λ', 'λ')]), + ]) + ); + // Byte classes on their own can be combined, even if some are ASCII + // and others are invalid UTF-8. + assert_eq!( + t_bytes(r"[a-z]|(?-u:[\x90-\xFF])|[A-Z]"), + hir_bclass(&[(b'A', b'Z'), (b'a', b'z'), (b'\x90', b'\xFF')]), + ); + } + + #[test] + fn class_ascii() { + assert_eq!( + t("[[:alnum:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Alnum) + ); + assert_eq!( + t("[[:alpha:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Alpha) + ); + assert_eq!( + t("[[:ascii:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Ascii) + ); + assert_eq!( + t("[[:blank:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Blank) + ); + assert_eq!( + t("[[:cntrl:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Cntrl) + ); + assert_eq!( + t("[[:digit:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Digit) + ); + assert_eq!( + t("[[:graph:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Graph) + ); + assert_eq!( + t("[[:lower:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Lower) + ); + assert_eq!( + t("[[:print:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Print) + ); + assert_eq!( + t("[[:punct:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Punct) + ); + assert_eq!( + t("[[:space:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Space) + ); + assert_eq!( + t("[[:upper:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Upper) + ); + assert_eq!( + t("[[:word:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Word) + ); + assert_eq!( + t("[[:xdigit:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Xdigit) + ); + + assert_eq!( + t("[[:^lower:]]"), + hir_negate(hir_ascii_uclass(&ast::ClassAsciiKind::Lower)) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[[:lower:]]"), + hir_uclass(&[ + ('A', 'Z'), + ('a', 'z'), + ('\u{17F}', '\u{17F}'), + ('\u{212A}', '\u{212A}'), + ]) + ); + + assert_eq!( + t("(?-u)[[:lower:]]"), + hir_ascii_bclass(&ast::ClassAsciiKind::Lower) + ); + assert_eq!( + t("(?i-u)[[:lower:]]"), + hir_case_fold(hir_ascii_bclass(&ast::ClassAsciiKind::Lower)) + ); + + assert_eq!( + t_err("(?-u)[[:^lower:]]"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(6, 1, 7), + Position::new(16, 1, 17) + ), + } + ); + assert_eq!( + t_err("(?i-u)[[:^lower:]]"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(7, 1, 8), + Position::new(17, 1, 18) + ), + } + ); + } + + #[test] + fn class_ascii_multiple() { + // See: https://github.com/rust-lang/regex/issues/680 + assert_eq!( + t("[[:alnum:][:^ascii:]]"), + hir_union( + hir_ascii_uclass(&ast::ClassAsciiKind::Alnum), + hir_uclass(&[('\u{80}', '\u{10FFFF}')]), + ), + ); + assert_eq!( + t_bytes("(?-u)[[:alnum:][:^ascii:]]"), + hir_union( + hir_ascii_bclass(&ast::ClassAsciiKind::Alnum), + hir_bclass(&[(0x80, 0xFF)]), + ), + ); + } + + #[test] + #[cfg(feature = "unicode-perl")] + fn class_perl_unicode() { + // Unicode + assert_eq!(t(r"\d"), hir_uclass_query(ClassQuery::Binary("digit"))); + assert_eq!(t(r"\s"), hir_uclass_query(ClassQuery::Binary("space"))); + assert_eq!(t(r"\w"), hir_uclass_perl_word()); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)\d"), + hir_uclass_query(ClassQuery::Binary("digit")) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)\s"), + hir_uclass_query(ClassQuery::Binary("space")) + ); + #[cfg(feature = "unicode-case")] + assert_eq!(t(r"(?i)\w"), hir_uclass_perl_word()); + + // Unicode, negated + assert_eq!( + t(r"\D"), + hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) + ); + assert_eq!( + t(r"\S"), + hir_negate(hir_uclass_query(ClassQuery::Binary("space"))) + ); + assert_eq!(t(r"\W"), hir_negate(hir_uclass_perl_word())); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)\D"), + hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)\S"), + hir_negate(hir_uclass_query(ClassQuery::Binary("space"))) + ); + #[cfg(feature = "unicode-case")] + assert_eq!(t(r"(?i)\W"), hir_negate(hir_uclass_perl_word())); + } + + #[test] + fn class_perl_ascii() { + // ASCII only + assert_eq!( + t(r"(?-u)\d"), + hir_ascii_bclass(&ast::ClassAsciiKind::Digit) + ); + assert_eq!( + t(r"(?-u)\s"), + hir_ascii_bclass(&ast::ClassAsciiKind::Space) + ); + assert_eq!( + t(r"(?-u)\w"), + hir_ascii_bclass(&ast::ClassAsciiKind::Word) + ); + assert_eq!( + t(r"(?i-u)\d"), + hir_ascii_bclass(&ast::ClassAsciiKind::Digit) + ); + assert_eq!( + t(r"(?i-u)\s"), + hir_ascii_bclass(&ast::ClassAsciiKind::Space) + ); + assert_eq!( + t(r"(?i-u)\w"), + hir_ascii_bclass(&ast::ClassAsciiKind::Word) + ); + + // ASCII only, negated + assert_eq!( + t_bytes(r"(?-u)\D"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) + ); + assert_eq!( + t_bytes(r"(?-u)\S"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Space)) + ); + assert_eq!( + t_bytes(r"(?-u)\W"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Word)) + ); + assert_eq!( + t_bytes(r"(?i-u)\D"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) + ); + assert_eq!( + t_bytes(r"(?i-u)\S"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Space)) + ); + assert_eq!( + t_bytes(r"(?i-u)\W"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Word)) + ); + + // ASCII only, negated, with UTF-8 mode enabled. + // In this case, negating any Perl class results in an error because + // all such classes can match invalid UTF-8. + assert_eq!( + t_err(r"(?-u)\D"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(5, 1, 6), + Position::new(7, 1, 8), + ), + }, + ); + assert_eq!( + t_err(r"(?-u)\S"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(5, 1, 6), + Position::new(7, 1, 8), + ), + }, + ); + assert_eq!( + t_err(r"(?-u)\W"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(5, 1, 6), + Position::new(7, 1, 8), + ), + }, + ); + assert_eq!( + t_err(r"(?i-u)\D"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(6, 1, 7), + Position::new(8, 1, 9), + ), + }, + ); + assert_eq!( + t_err(r"(?i-u)\S"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(6, 1, 7), + Position::new(8, 1, 9), + ), + }, + ); + assert_eq!( + t_err(r"(?i-u)\W"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(6, 1, 7), + Position::new(8, 1, 9), + ), + }, + ); + } + + #[test] + #[cfg(not(feature = "unicode-perl"))] + fn class_perl_word_disabled() { + assert_eq!( + t_err(r"\w"), + TestError { + kind: hir::ErrorKind::UnicodePerlClassNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(2, 1, 3) + ), + } + ); + } + + #[test] + #[cfg(all(not(feature = "unicode-perl"), not(feature = "unicode-bool")))] + fn class_perl_space_disabled() { + assert_eq!( + t_err(r"\s"), + TestError { + kind: hir::ErrorKind::UnicodePerlClassNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(2, 1, 3) + ), + } + ); + } + + #[test] + #[cfg(all( + not(feature = "unicode-perl"), + not(feature = "unicode-gencat") + ))] + fn class_perl_digit_disabled() { + assert_eq!( + t_err(r"\d"), + TestError { + kind: hir::ErrorKind::UnicodePerlClassNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(2, 1, 3) + ), + } + ); + } + + #[test] + #[cfg(feature = "unicode-gencat")] + fn class_unicode_gencat() { + assert_eq!(t(r"\pZ"), hir_uclass_query(ClassQuery::Binary("Z"))); + assert_eq!(t(r"\pz"), hir_uclass_query(ClassQuery::Binary("Z"))); + assert_eq!( + t(r"\p{Separator}"), + hir_uclass_query(ClassQuery::Binary("Z")) + ); + assert_eq!( + t(r"\p{se PaRa ToR}"), + hir_uclass_query(ClassQuery::Binary("Z")) + ); + assert_eq!( + t(r"\p{gc:Separator}"), + hir_uclass_query(ClassQuery::Binary("Z")) + ); + assert_eq!( + t(r"\p{gc=Separator}"), + hir_uclass_query(ClassQuery::Binary("Z")) + ); + assert_eq!( + t(r"\p{Other}"), + hir_uclass_query(ClassQuery::Binary("Other")) + ); + assert_eq!(t(r"\pC"), hir_uclass_query(ClassQuery::Binary("Other"))); + + assert_eq!( + t(r"\PZ"), + hir_negate(hir_uclass_query(ClassQuery::Binary("Z"))) + ); + assert_eq!( + t(r"\P{separator}"), + hir_negate(hir_uclass_query(ClassQuery::Binary("Z"))) + ); + assert_eq!( + t(r"\P{gc!=separator}"), + hir_negate(hir_uclass_query(ClassQuery::Binary("Z"))) + ); + + assert_eq!(t(r"\p{any}"), hir_uclass_query(ClassQuery::Binary("Any"))); + assert_eq!( + t(r"\p{assigned}"), + hir_uclass_query(ClassQuery::Binary("Assigned")) + ); + assert_eq!( + t(r"\p{ascii}"), + hir_uclass_query(ClassQuery::Binary("ASCII")) + ); + assert_eq!( + t(r"\p{gc:any}"), + hir_uclass_query(ClassQuery::Binary("Any")) + ); + assert_eq!( + t(r"\p{gc:assigned}"), + hir_uclass_query(ClassQuery::Binary("Assigned")) + ); + assert_eq!( + t(r"\p{gc:ascii}"), + hir_uclass_query(ClassQuery::Binary("ASCII")) + ); + + assert_eq!( + t_err(r"(?-u)\pZ"), + TestError { + kind: hir::ErrorKind::UnicodeNotAllowed, + span: Span::new( + Position::new(5, 1, 6), + Position::new(8, 1, 9) + ), + } + ); + assert_eq!( + t_err(r"(?-u)\p{Separator}"), + TestError { + kind: hir::ErrorKind::UnicodeNotAllowed, + span: Span::new( + Position::new(5, 1, 6), + Position::new(18, 1, 19) + ), + } + ); + assert_eq!( + t_err(r"\pE"), + TestError { + kind: hir::ErrorKind::UnicodePropertyNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(3, 1, 4) + ), + } + ); + assert_eq!( + t_err(r"\p{Foo}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(7, 1, 8) + ), + } + ); + assert_eq!( + t_err(r"\p{gc:Foo}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyValueNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(10, 1, 11) + ), + } + ); + } + + #[test] + #[cfg(not(feature = "unicode-gencat"))] + fn class_unicode_gencat_disabled() { + assert_eq!( + t_err(r"\p{Separator}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(13, 1, 14) + ), + } + ); + + assert_eq!( + t_err(r"\p{Any}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(7, 1, 8) + ), + } + ); + } + + #[test] + #[cfg(feature = "unicode-script")] + fn class_unicode_script() { + assert_eq!( + t(r"\p{Greek}"), + hir_uclass_query(ClassQuery::Binary("Greek")) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)\p{Greek}"), + hir_case_fold(hir_uclass_query(ClassQuery::Binary("Greek"))) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)\P{Greek}"), + hir_negate(hir_case_fold(hir_uclass_query(ClassQuery::Binary( + "Greek" + )))) + ); + + assert_eq!( + t_err(r"\p{sc:Foo}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyValueNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(10, 1, 11) + ), + } + ); + assert_eq!( + t_err(r"\p{scx:Foo}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyValueNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(11, 1, 12) + ), + } + ); + } + + #[test] + #[cfg(not(feature = "unicode-script"))] + fn class_unicode_script_disabled() { + assert_eq!( + t_err(r"\p{Greek}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(9, 1, 10) + ), + } + ); + + assert_eq!( + t_err(r"\p{scx:Greek}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(13, 1, 14) + ), + } + ); + } + + #[test] + #[cfg(feature = "unicode-age")] + fn class_unicode_age() { + assert_eq!( + t_err(r"\p{age:Foo}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyValueNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(11, 1, 12) + ), + } + ); + } + + #[test] + #[cfg(feature = "unicode-gencat")] + fn class_unicode_any_empty() { + assert_eq!(t(r"\P{any}"), hir_uclass(&[]),); + } + + #[test] + #[cfg(not(feature = "unicode-age"))] + fn class_unicode_age_disabled() { + assert_eq!( + t_err(r"\p{age:3.0}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(11, 1, 12) + ), + } + ); + } + + #[test] + fn class_bracketed() { + assert_eq!(t("[a]"), hir_lit("a")); + assert_eq!(t("[ab]"), hir_uclass(&[('a', 'b')])); + assert_eq!(t("[^[a]]"), class_negate(uclass(&[('a', 'a')]))); + assert_eq!(t("[a-z]"), hir_uclass(&[('a', 'z')])); + assert_eq!(t("[a-fd-h]"), hir_uclass(&[('a', 'h')])); + assert_eq!(t("[a-fg-m]"), hir_uclass(&[('a', 'm')])); + assert_eq!(t(r"[\x00]"), hir_uclass(&[('\0', '\0')])); + assert_eq!(t(r"[\n]"), hir_uclass(&[('\n', '\n')])); + assert_eq!(t("[\n]"), hir_uclass(&[('\n', '\n')])); + #[cfg(any(feature = "unicode-perl", feature = "unicode-gencat"))] + assert_eq!(t(r"[\d]"), hir_uclass_query(ClassQuery::Binary("digit"))); + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[\pZ]"), + hir_uclass_query(ClassQuery::Binary("separator")) + ); + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[\p{separator}]"), + hir_uclass_query(ClassQuery::Binary("separator")) + ); + #[cfg(any(feature = "unicode-perl", feature = "unicode-gencat"))] + assert_eq!(t(r"[^\D]"), hir_uclass_query(ClassQuery::Binary("digit"))); + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[^\PZ]"), + hir_uclass_query(ClassQuery::Binary("separator")) + ); + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[^\P{separator}]"), + hir_uclass_query(ClassQuery::Binary("separator")) + ); + #[cfg(all( + feature = "unicode-case", + any(feature = "unicode-perl", feature = "unicode-gencat") + ))] + assert_eq!( + t(r"(?i)[^\D]"), + hir_uclass_query(ClassQuery::Binary("digit")) + ); + #[cfg(all(feature = "unicode-case", feature = "unicode-script"))] + assert_eq!( + t(r"(?i)[^\P{greek}]"), + hir_case_fold(hir_uclass_query(ClassQuery::Binary("greek"))) + ); + + assert_eq!(t("(?-u)[a]"), hir_bclass(&[(b'a', b'a')])); + assert_eq!(t(r"(?-u)[\x00]"), hir_bclass(&[(b'\0', b'\0')])); + assert_eq!(t_bytes(r"(?-u)[\xFF]"), hir_bclass(&[(b'\xFF', b'\xFF')])); + + #[cfg(feature = "unicode-case")] + assert_eq!(t("(?i)[a]"), hir_uclass(&[('A', 'A'), ('a', 'a')])); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[k]"), + hir_uclass(&[('K', 'K'), ('k', 'k'), ('\u{212A}', '\u{212A}'),]) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[β]"), + hir_uclass(&[('Β', 'Β'), ('β', 'β'), ('ϐ', 'ϐ'),]) + ); + assert_eq!(t("(?i-u)[k]"), hir_bclass(&[(b'K', b'K'), (b'k', b'k'),])); + + assert_eq!(t("[^a]"), class_negate(uclass(&[('a', 'a')]))); + assert_eq!(t(r"[^\x00]"), class_negate(uclass(&[('\0', '\0')]))); + assert_eq!( + t_bytes("(?-u)[^a]"), + class_negate(bclass(&[(b'a', b'a')])) + ); + #[cfg(any(feature = "unicode-perl", feature = "unicode-gencat"))] + assert_eq!( + t(r"[^\d]"), + hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) + ); + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[^\pZ]"), + hir_negate(hir_uclass_query(ClassQuery::Binary("separator"))) + ); + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[^\p{separator}]"), + hir_negate(hir_uclass_query(ClassQuery::Binary("separator"))) + ); + #[cfg(all(feature = "unicode-case", feature = "unicode-script"))] + assert_eq!( + t(r"(?i)[^\p{greek}]"), + hir_negate(hir_case_fold(hir_uclass_query(ClassQuery::Binary( + "greek" + )))) + ); + #[cfg(all(feature = "unicode-case", feature = "unicode-script"))] + assert_eq!( + t(r"(?i)[\P{greek}]"), + hir_negate(hir_case_fold(hir_uclass_query(ClassQuery::Binary( + "greek" + )))) + ); + + // Test some weird cases. + assert_eq!(t(r"[\[]"), hir_uclass(&[('[', '[')])); + + assert_eq!(t(r"[&]"), hir_uclass(&[('&', '&')])); + assert_eq!(t(r"[\&]"), hir_uclass(&[('&', '&')])); + assert_eq!(t(r"[\&\&]"), hir_uclass(&[('&', '&')])); + assert_eq!(t(r"[\x00-&]"), hir_uclass(&[('\0', '&')])); + assert_eq!(t(r"[&-\xFF]"), hir_uclass(&[('&', '\u{FF}')])); + + assert_eq!(t(r"[~]"), hir_uclass(&[('~', '~')])); + assert_eq!(t(r"[\~]"), hir_uclass(&[('~', '~')])); + assert_eq!(t(r"[\~\~]"), hir_uclass(&[('~', '~')])); + assert_eq!(t(r"[\x00-~]"), hir_uclass(&[('\0', '~')])); + assert_eq!(t(r"[~-\xFF]"), hir_uclass(&[('~', '\u{FF}')])); + + assert_eq!(t(r"[-]"), hir_uclass(&[('-', '-')])); + assert_eq!(t(r"[\-]"), hir_uclass(&[('-', '-')])); + assert_eq!(t(r"[\-\-]"), hir_uclass(&[('-', '-')])); + assert_eq!(t(r"[\x00-\-]"), hir_uclass(&[('\0', '-')])); + assert_eq!(t(r"[\--\xFF]"), hir_uclass(&[('-', '\u{FF}')])); + + assert_eq!( + t_err("(?-u)[^a]"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(5, 1, 6), + Position::new(9, 1, 10) + ), + } + ); + #[cfg(any(feature = "unicode-perl", feature = "unicode-bool"))] + assert_eq!(t(r"[^\s\S]"), hir_uclass(&[]),); + #[cfg(any(feature = "unicode-perl", feature = "unicode-bool"))] + assert_eq!(t_bytes(r"(?-u)[^\s\S]"), hir_bclass(&[]),); + } + + #[test] + fn class_bracketed_union() { + assert_eq!(t("[a-zA-Z]"), hir_uclass(&[('A', 'Z'), ('a', 'z')])); + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[a\pZb]"), + hir_union( + hir_uclass(&[('a', 'b')]), + hir_uclass_query(ClassQuery::Binary("separator")) + ) + ); + #[cfg(all(feature = "unicode-gencat", feature = "unicode-script"))] + assert_eq!( + t(r"[\pZ\p{Greek}]"), + hir_union( + hir_uclass_query(ClassQuery::Binary("greek")), + hir_uclass_query(ClassQuery::Binary("separator")) + ) + ); + #[cfg(all( + feature = "unicode-age", + feature = "unicode-gencat", + feature = "unicode-script" + ))] + assert_eq!( + t(r"[\p{age:3.0}\pZ\p{Greek}]"), + hir_union( + hir_uclass_query(ClassQuery::ByValue { + property_name: "age", + property_value: "3.0", + }), + hir_union( + hir_uclass_query(ClassQuery::Binary("greek")), + hir_uclass_query(ClassQuery::Binary("separator")) + ) + ) + ); + #[cfg(all( + feature = "unicode-age", + feature = "unicode-gencat", + feature = "unicode-script" + ))] + assert_eq!( + t(r"[[[\p{age:3.0}\pZ]\p{Greek}][\p{Cyrillic}]]"), + hir_union( + hir_uclass_query(ClassQuery::ByValue { + property_name: "age", + property_value: "3.0", + }), + hir_union( + hir_uclass_query(ClassQuery::Binary("cyrillic")), + hir_union( + hir_uclass_query(ClassQuery::Binary("greek")), + hir_uclass_query(ClassQuery::Binary("separator")) + ) + ) + ) + ); + + #[cfg(all( + feature = "unicode-age", + feature = "unicode-case", + feature = "unicode-gencat", + feature = "unicode-script" + ))] + assert_eq!( + t(r"(?i)[\p{age:3.0}\pZ\p{Greek}]"), + hir_case_fold(hir_union( + hir_uclass_query(ClassQuery::ByValue { + property_name: "age", + property_value: "3.0", + }), + hir_union( + hir_uclass_query(ClassQuery::Binary("greek")), + hir_uclass_query(ClassQuery::Binary("separator")) + ) + )) + ); + #[cfg(all( + feature = "unicode-age", + feature = "unicode-gencat", + feature = "unicode-script" + ))] + assert_eq!( + t(r"[^\p{age:3.0}\pZ\p{Greek}]"), + hir_negate(hir_union( + hir_uclass_query(ClassQuery::ByValue { + property_name: "age", + property_value: "3.0", + }), + hir_union( + hir_uclass_query(ClassQuery::Binary("greek")), + hir_uclass_query(ClassQuery::Binary("separator")) + ) + )) + ); + #[cfg(all( + feature = "unicode-age", + feature = "unicode-case", + feature = "unicode-gencat", + feature = "unicode-script" + ))] + assert_eq!( + t(r"(?i)[^\p{age:3.0}\pZ\p{Greek}]"), + hir_negate(hir_case_fold(hir_union( + hir_uclass_query(ClassQuery::ByValue { + property_name: "age", + property_value: "3.0", + }), + hir_union( + hir_uclass_query(ClassQuery::Binary("greek")), + hir_uclass_query(ClassQuery::Binary("separator")) + ) + ))) + ); + } + + #[test] + fn class_bracketed_nested() { + assert_eq!(t(r"[a[^c]]"), class_negate(uclass(&[('c', 'c')]))); + assert_eq!(t(r"[a-b[^c]]"), class_negate(uclass(&[('c', 'c')]))); + assert_eq!(t(r"[a-c[^c]]"), class_negate(uclass(&[]))); + + assert_eq!(t(r"[^a[^c]]"), hir_uclass(&[('c', 'c')])); + assert_eq!(t(r"[^a-b[^c]]"), hir_uclass(&[('c', 'c')])); + + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)[a[^c]]"), + hir_negate(class_case_fold(uclass(&[('c', 'c')]))) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)[a-b[^c]]"), + hir_negate(class_case_fold(uclass(&[('c', 'c')]))) + ); + + #[cfg(feature = "unicode-case")] + assert_eq!(t(r"(?i)[^a[^c]]"), hir_uclass(&[('C', 'C'), ('c', 'c')])); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)[^a-b[^c]]"), + hir_uclass(&[('C', 'C'), ('c', 'c')]) + ); + + assert_eq!(t(r"[^a-c[^c]]"), hir_uclass(&[]),); + #[cfg(feature = "unicode-case")] + assert_eq!(t(r"(?i)[^a-c[^c]]"), hir_uclass(&[]),); + } + + #[test] + fn class_bracketed_intersect() { + assert_eq!(t("[abc&&b-c]"), hir_uclass(&[('b', 'c')])); + assert_eq!(t("[abc&&[b-c]]"), hir_uclass(&[('b', 'c')])); + assert_eq!(t("[[abc]&&[b-c]]"), hir_uclass(&[('b', 'c')])); + assert_eq!(t("[a-z&&b-y&&c-x]"), hir_uclass(&[('c', 'x')])); + assert_eq!(t("[c-da-b&&a-d]"), hir_uclass(&[('a', 'd')])); + assert_eq!(t("[a-d&&c-da-b]"), hir_uclass(&[('a', 'd')])); + assert_eq!(t(r"[a-z&&a-c]"), hir_uclass(&[('a', 'c')])); + assert_eq!(t(r"[[a-z&&a-c]]"), hir_uclass(&[('a', 'c')])); + assert_eq!(t(r"[^[a-z&&a-c]]"), hir_negate(hir_uclass(&[('a', 'c')]))); + + assert_eq!(t("(?-u)[abc&&b-c]"), hir_bclass(&[(b'b', b'c')])); + assert_eq!(t("(?-u)[abc&&[b-c]]"), hir_bclass(&[(b'b', b'c')])); + assert_eq!(t("(?-u)[[abc]&&[b-c]]"), hir_bclass(&[(b'b', b'c')])); + assert_eq!(t("(?-u)[a-z&&b-y&&c-x]"), hir_bclass(&[(b'c', b'x')])); + assert_eq!(t("(?-u)[c-da-b&&a-d]"), hir_bclass(&[(b'a', b'd')])); + assert_eq!(t("(?-u)[a-d&&c-da-b]"), hir_bclass(&[(b'a', b'd')])); + + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[abc&&b-c]"), + hir_case_fold(hir_uclass(&[('b', 'c')])) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[abc&&[b-c]]"), + hir_case_fold(hir_uclass(&[('b', 'c')])) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[[abc]&&[b-c]]"), + hir_case_fold(hir_uclass(&[('b', 'c')])) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[a-z&&b-y&&c-x]"), + hir_case_fold(hir_uclass(&[('c', 'x')])) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[c-da-b&&a-d]"), + hir_case_fold(hir_uclass(&[('a', 'd')])) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[a-d&&c-da-b]"), + hir_case_fold(hir_uclass(&[('a', 'd')])) + ); + + assert_eq!( + t("(?i-u)[abc&&b-c]"), + hir_case_fold(hir_bclass(&[(b'b', b'c')])) + ); + assert_eq!( + t("(?i-u)[abc&&[b-c]]"), + hir_case_fold(hir_bclass(&[(b'b', b'c')])) + ); + assert_eq!( + t("(?i-u)[[abc]&&[b-c]]"), + hir_case_fold(hir_bclass(&[(b'b', b'c')])) + ); + assert_eq!( + t("(?i-u)[a-z&&b-y&&c-x]"), + hir_case_fold(hir_bclass(&[(b'c', b'x')])) + ); + assert_eq!( + t("(?i-u)[c-da-b&&a-d]"), + hir_case_fold(hir_bclass(&[(b'a', b'd')])) + ); + assert_eq!( + t("(?i-u)[a-d&&c-da-b]"), + hir_case_fold(hir_bclass(&[(b'a', b'd')])) + ); + + // In `[a^]`, `^` does not need to be escaped, so it makes sense that + // `^` is also allowed to be unescaped after `&&`. + assert_eq!(t(r"[\^&&^]"), hir_uclass(&[('^', '^')])); + // `]` needs to be escaped after `&&` since it's not at start of class. + assert_eq!(t(r"[]&&\]]"), hir_uclass(&[(']', ']')])); + assert_eq!(t(r"[-&&-]"), hir_uclass(&[('-', '-')])); + assert_eq!(t(r"[\&&&&]"), hir_uclass(&[('&', '&')])); + assert_eq!(t(r"[\&&&\&]"), hir_uclass(&[('&', '&')])); + // Test precedence. + assert_eq!( + t(r"[a-w&&[^c-g]z]"), + hir_uclass(&[('a', 'b'), ('h', 'w')]) + ); + } + + #[test] + fn class_bracketed_intersect_negate() { + #[cfg(feature = "unicode-perl")] + assert_eq!( + t(r"[^\w&&\d]"), + hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) + ); + assert_eq!(t(r"[^[a-z&&a-c]]"), hir_negate(hir_uclass(&[('a', 'c')]))); + #[cfg(feature = "unicode-perl")] + assert_eq!( + t(r"[^[\w&&\d]]"), + hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) + ); + #[cfg(feature = "unicode-perl")] + assert_eq!( + t(r"[^[^\w&&\d]]"), + hir_uclass_query(ClassQuery::Binary("digit")) + ); + #[cfg(feature = "unicode-perl")] + assert_eq!(t(r"[[[^\w]&&[^\d]]]"), hir_negate(hir_uclass_perl_word())); + + #[cfg(feature = "unicode-perl")] + assert_eq!( + t_bytes(r"(?-u)[^\w&&\d]"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) + ); + assert_eq!( + t_bytes(r"(?-u)[^[a-z&&a-c]]"), + hir_negate(hir_bclass(&[(b'a', b'c')])) + ); + assert_eq!( + t_bytes(r"(?-u)[^[\w&&\d]]"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) + ); + assert_eq!( + t_bytes(r"(?-u)[^[^\w&&\d]]"), + hir_ascii_bclass(&ast::ClassAsciiKind::Digit) + ); + assert_eq!( + t_bytes(r"(?-u)[[[^\w]&&[^\d]]]"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Word)) + ); + } + + #[test] + fn class_bracketed_difference() { + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[\pL--[:ascii:]]"), + hir_difference( + hir_uclass_query(ClassQuery::Binary("letter")), + hir_uclass(&[('\0', '\x7F')]) + ) + ); + + assert_eq!( + t(r"(?-u)[[:alpha:]--[:lower:]]"), + hir_bclass(&[(b'A', b'Z')]) + ); + } + + #[test] + fn class_bracketed_symmetric_difference() { + #[cfg(feature = "unicode-script")] + assert_eq!( + t(r"[\p{sc:Greek}~~\p{scx:Greek}]"), + // Class({ + // '·'..='·', + // '\u{300}'..='\u{301}', + // '\u{304}'..='\u{304}', + // '\u{306}'..='\u{306}', + // '\u{308}'..='\u{308}', + // '\u{313}'..='\u{313}', + // '\u{342}'..='\u{342}', + // '\u{345}'..='\u{345}', + // 'ʹ'..='ʹ', + // '\u{1dc0}'..='\u{1dc1}', + // '⁝'..='⁝', + // }) + hir_uclass(&[ + ('·', '·'), + ('\u{0300}', '\u{0301}'), + ('\u{0304}', '\u{0304}'), + ('\u{0306}', '\u{0306}'), + ('\u{0308}', '\u{0308}'), + ('\u{0313}', '\u{0313}'), + ('\u{0342}', '\u{0342}'), + ('\u{0345}', '\u{0345}'), + ('ʹ', 'ʹ'), + ('\u{1DC0}', '\u{1DC1}'), + ('⁝', '⁝'), + ]) + ); + assert_eq!(t(r"[a-g~~c-j]"), hir_uclass(&[('a', 'b'), ('h', 'j')])); + + assert_eq!( + t(r"(?-u)[a-g~~c-j]"), + hir_bclass(&[(b'a', b'b'), (b'h', b'j')]) + ); + } + + #[test] + fn ignore_whitespace() { + assert_eq!(t(r"(?x)\12 3"), hir_lit("\n3")); + assert_eq!(t(r"(?x)\x { 53 }"), hir_lit("S")); + assert_eq!( + t(r"(?x)\x # comment +{ # comment + 53 # comment +} #comment"), + hir_lit("S") + ); + + assert_eq!(t(r"(?x)\x 53"), hir_lit("S")); + assert_eq!( + t(r"(?x)\x # comment + 53 # comment"), + hir_lit("S") + ); + assert_eq!(t(r"(?x)\x5 3"), hir_lit("S")); + + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"(?x)\p # comment +{ # comment + Separator # comment +} # comment"), + hir_uclass_query(ClassQuery::Binary("separator")) + ); + + assert_eq!( + t(r"(?x)a # comment +{ # comment + 5 # comment + , # comment + 10 # comment +} # comment"), + hir_range(true, 5, Some(10), hir_lit("a")) + ); + + assert_eq!(t(r"(?x)a\ # hi there"), hir_lit("a ")); + } + + #[test] + fn analysis_is_utf8() { + // Positive examples. + assert!(props_bytes(r"a").is_utf8()); + assert!(props_bytes(r"ab").is_utf8()); + assert!(props_bytes(r"(?-u)a").is_utf8()); + assert!(props_bytes(r"(?-u)ab").is_utf8()); + assert!(props_bytes(r"\xFF").is_utf8()); + assert!(props_bytes(r"\xFF\xFF").is_utf8()); + assert!(props_bytes(r"[^a]").is_utf8()); + assert!(props_bytes(r"[^a][^a]").is_utf8()); + assert!(props_bytes(r"\b").is_utf8()); + assert!(props_bytes(r"\B").is_utf8()); + assert!(props_bytes(r"(?-u)\b").is_utf8()); + assert!(props_bytes(r"(?-u)\B").is_utf8()); + + // Negative examples. + assert!(!props_bytes(r"(?-u)\xFF").is_utf8()); + assert!(!props_bytes(r"(?-u)\xFF\xFF").is_utf8()); + assert!(!props_bytes(r"(?-u)[^a]").is_utf8()); + assert!(!props_bytes(r"(?-u)[^a][^a]").is_utf8()); + } + + #[test] + fn analysis_captures_len() { + assert_eq!(0, props(r"a").explicit_captures_len()); + assert_eq!(0, props(r"(?:a)").explicit_captures_len()); + assert_eq!(0, props(r"(?i-u:a)").explicit_captures_len()); + assert_eq!(0, props(r"(?i-u)a").explicit_captures_len()); + assert_eq!(1, props(r"(a)").explicit_captures_len()); + assert_eq!(1, props(r"(?Pa)").explicit_captures_len()); + assert_eq!(1, props(r"()").explicit_captures_len()); + assert_eq!(1, props(r"()a").explicit_captures_len()); + assert_eq!(1, props(r"(a)+").explicit_captures_len()); + assert_eq!(2, props(r"(a)(b)").explicit_captures_len()); + assert_eq!(2, props(r"(a)|(b)").explicit_captures_len()); + assert_eq!(2, props(r"((a))").explicit_captures_len()); + assert_eq!(1, props(r"([a&&b])").explicit_captures_len()); + } + + #[test] + fn analysis_static_captures_len() { + let len = |pattern| props(pattern).static_explicit_captures_len(); + assert_eq!(Some(0), len(r"")); + assert_eq!(Some(0), len(r"foo|bar")); + assert_eq!(None, len(r"(foo)|bar")); + assert_eq!(None, len(r"foo|(bar)")); + assert_eq!(Some(1), len(r"(foo|bar)")); + assert_eq!(Some(1), len(r"(a|b|c|d|e|f)")); + assert_eq!(Some(1), len(r"(a)|(b)|(c)|(d)|(e)|(f)")); + assert_eq!(Some(2), len(r"(a)(b)|(c)(d)|(e)(f)")); + assert_eq!(Some(6), len(r"(a)(b)(c)(d)(e)(f)")); + assert_eq!(Some(3), len(r"(a)(b)(extra)|(a)(b)()")); + assert_eq!(Some(3), len(r"(a)(b)((?:extra)?)")); + assert_eq!(None, len(r"(a)(b)(extra)?")); + assert_eq!(Some(1), len(r"(foo)|(bar)")); + assert_eq!(Some(2), len(r"(foo)(bar)")); + assert_eq!(Some(2), len(r"(foo)+(bar)")); + assert_eq!(None, len(r"(foo)*(bar)")); + assert_eq!(Some(0), len(r"(foo)?{0}")); + assert_eq!(None, len(r"(foo)?{1}")); + assert_eq!(Some(1), len(r"(foo){1}")); + assert_eq!(Some(1), len(r"(foo){1,}")); + assert_eq!(Some(1), len(r"(foo){1,}?")); + assert_eq!(None, len(r"(foo){1,}??")); + assert_eq!(None, len(r"(foo){0,}")); + assert_eq!(Some(1), len(r"(foo)(?:bar)")); + assert_eq!(Some(2), len(r"(foo(?:bar)+)(?:baz(boo))")); + assert_eq!(Some(2), len(r"(?Pfoo)(?:bar)(bal|loon)")); + assert_eq!( + Some(2), + len(r#"<(a)[^>]+href="([^"]+)"|<(img)[^>]+src="([^"]+)""#) + ); + } + + #[test] + fn analysis_is_all_assertions() { + // Positive examples. + let p = props(r"\b"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"\B"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"^"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"$"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"\A"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"\z"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"$^\z\A\b\B"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"$|^|\z|\A|\b|\B"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"^$|$^"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"((\b)+())*^"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + // Negative examples. + let p = props(r"^a"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(1)); + } + + #[test] + fn analysis_look_set_prefix_any() { + let p = props(r"(?-u)(?i:(?:\b|_)win(?:32|64|dows)?(?:\b|_))"); + assert!(p.look_set_prefix_any().contains(Look::WordAscii)); + } + + #[test] + fn analysis_is_anchored() { + let is_start = |p| props(p).look_set_prefix().contains(Look::Start); + let is_end = |p| props(p).look_set_suffix().contains(Look::End); + + // Positive examples. + assert!(is_start(r"^")); + assert!(is_end(r"$")); + + assert!(is_start(r"^^")); + assert!(props(r"$$").look_set_suffix().contains(Look::End)); + + assert!(is_start(r"^$")); + assert!(is_end(r"^$")); + + assert!(is_start(r"^foo")); + assert!(is_end(r"foo$")); + + assert!(is_start(r"^foo|^bar")); + assert!(is_end(r"foo$|bar$")); + + assert!(is_start(r"^(foo|bar)")); + assert!(is_end(r"(foo|bar)$")); + + assert!(is_start(r"^+")); + assert!(is_end(r"$+")); + assert!(is_start(r"^++")); + assert!(is_end(r"$++")); + assert!(is_start(r"(^)+")); + assert!(is_end(r"($)+")); + + assert!(is_start(r"$^")); + assert!(is_start(r"$^")); + assert!(is_start(r"$^|^$")); + assert!(is_end(r"$^|^$")); + + assert!(is_start(r"\b^")); + assert!(is_end(r"$\b")); + assert!(is_start(r"^(?m:^)")); + assert!(is_end(r"(?m:$)$")); + assert!(is_start(r"(?m:^)^")); + assert!(is_end(r"$(?m:$)")); + + // Negative examples. + assert!(!is_start(r"(?m)^")); + assert!(!is_end(r"(?m)$")); + assert!(!is_start(r"(?m:^$)|$^")); + assert!(!is_end(r"(?m:^$)|$^")); + assert!(!is_start(r"$^|(?m:^$)")); + assert!(!is_end(r"$^|(?m:^$)")); + + assert!(!is_start(r"a^")); + assert!(!is_start(r"$a")); + + assert!(!is_end(r"a^")); + assert!(!is_end(r"$a")); + + assert!(!is_start(r"^foo|bar")); + assert!(!is_end(r"foo|bar$")); + + assert!(!is_start(r"^*")); + assert!(!is_end(r"$*")); + assert!(!is_start(r"^*+")); + assert!(!is_end(r"$*+")); + assert!(!is_start(r"^+*")); + assert!(!is_end(r"$+*")); + assert!(!is_start(r"(^)*")); + assert!(!is_end(r"($)*")); + } + + #[test] + fn analysis_is_any_anchored() { + let is_start = |p| props(p).look_set().contains(Look::Start); + let is_end = |p| props(p).look_set().contains(Look::End); + + // Positive examples. + assert!(is_start(r"^")); + assert!(is_end(r"$")); + assert!(is_start(r"\A")); + assert!(is_end(r"\z")); + + // Negative examples. + assert!(!is_start(r"(?m)^")); + assert!(!is_end(r"(?m)$")); + assert!(!is_start(r"$")); + assert!(!is_end(r"^")); + } + + #[test] + fn analysis_can_empty() { + // Positive examples. + let assert_empty = + |p| assert_eq!(Some(0), props_bytes(p).minimum_len()); + assert_empty(r""); + assert_empty(r"()"); + assert_empty(r"()*"); + assert_empty(r"()+"); + assert_empty(r"()?"); + assert_empty(r"a*"); + assert_empty(r"a?"); + assert_empty(r"a{0}"); + assert_empty(r"a{0,}"); + assert_empty(r"a{0,1}"); + assert_empty(r"a{0,10}"); + #[cfg(feature = "unicode-gencat")] + assert_empty(r"\pL*"); + assert_empty(r"a*|b"); + assert_empty(r"b|a*"); + assert_empty(r"a|"); + assert_empty(r"|a"); + assert_empty(r"a||b"); + assert_empty(r"a*a?(abcd)*"); + assert_empty(r"^"); + assert_empty(r"$"); + assert_empty(r"(?m)^"); + assert_empty(r"(?m)$"); + assert_empty(r"\A"); + assert_empty(r"\z"); + assert_empty(r"\B"); + assert_empty(r"(?-u)\B"); + assert_empty(r"\b"); + assert_empty(r"(?-u)\b"); + + // Negative examples. + let assert_non_empty = + |p| assert_ne!(Some(0), props_bytes(p).minimum_len()); + assert_non_empty(r"a+"); + assert_non_empty(r"a{1}"); + assert_non_empty(r"a{1,}"); + assert_non_empty(r"a{1,2}"); + assert_non_empty(r"a{1,10}"); + assert_non_empty(r"b|a"); + assert_non_empty(r"a*a+(abcd)*"); + #[cfg(feature = "unicode-gencat")] + assert_non_empty(r"\P{any}"); + assert_non_empty(r"[a--a]"); + assert_non_empty(r"[a&&b]"); + } + + #[test] + fn analysis_is_literal() { + // Positive examples. + assert!(props(r"a").is_literal()); + assert!(props(r"ab").is_literal()); + assert!(props(r"abc").is_literal()); + assert!(props(r"(?m)abc").is_literal()); + assert!(props(r"(?:a)").is_literal()); + assert!(props(r"foo(?:a)").is_literal()); + assert!(props(r"(?:a)foo").is_literal()); + assert!(props(r"[a]").is_literal()); + + // Negative examples. + assert!(!props(r"").is_literal()); + assert!(!props(r"^").is_literal()); + assert!(!props(r"a|b").is_literal()); + assert!(!props(r"(a)").is_literal()); + assert!(!props(r"a+").is_literal()); + assert!(!props(r"foo(a)").is_literal()); + assert!(!props(r"(a)foo").is_literal()); + assert!(!props(r"[ab]").is_literal()); + } + + #[test] + fn analysis_is_alternation_literal() { + // Positive examples. + assert!(props(r"a").is_alternation_literal()); + assert!(props(r"ab").is_alternation_literal()); + assert!(props(r"abc").is_alternation_literal()); + assert!(props(r"(?m)abc").is_alternation_literal()); + assert!(props(r"foo|bar").is_alternation_literal()); + assert!(props(r"foo|bar|baz").is_alternation_literal()); + assert!(props(r"[a]").is_alternation_literal()); + assert!(props(r"(?:ab)|cd").is_alternation_literal()); + assert!(props(r"ab|(?:cd)").is_alternation_literal()); + + // Negative examples. + assert!(!props(r"").is_alternation_literal()); + assert!(!props(r"^").is_alternation_literal()); + assert!(!props(r"(a)").is_alternation_literal()); + assert!(!props(r"a+").is_alternation_literal()); + assert!(!props(r"foo(a)").is_alternation_literal()); + assert!(!props(r"(a)foo").is_alternation_literal()); + assert!(!props(r"[ab]").is_alternation_literal()); + assert!(!props(r"[ab]|b").is_alternation_literal()); + assert!(!props(r"a|[ab]").is_alternation_literal()); + assert!(!props(r"(a)|b").is_alternation_literal()); + assert!(!props(r"a|(b)").is_alternation_literal()); + assert!(!props(r"a|b").is_alternation_literal()); + assert!(!props(r"a|b|c").is_alternation_literal()); + assert!(!props(r"[a]|b").is_alternation_literal()); + assert!(!props(r"a|[b]").is_alternation_literal()); + assert!(!props(r"(?:a)|b").is_alternation_literal()); + assert!(!props(r"a|(?:b)").is_alternation_literal()); + assert!(!props(r"(?:z|xx)@|xx").is_alternation_literal()); + } + + // This tests that the smart Hir::repetition constructors does some basic + // simplifications. + #[test] + fn smart_repetition() { + assert_eq!(t(r"a{0}"), Hir::empty()); + assert_eq!(t(r"a{1}"), hir_lit("a")); + assert_eq!(t(r"\B{32111}"), hir_look(hir::Look::WordUnicodeNegate)); + } + + // This tests that the smart Hir::concat constructor simplifies the given + // exprs in a way we expect. + #[test] + fn smart_concat() { + assert_eq!(t(""), Hir::empty()); + assert_eq!(t("(?:)"), Hir::empty()); + assert_eq!(t("abc"), hir_lit("abc")); + assert_eq!(t("(?:foo)(?:bar)"), hir_lit("foobar")); + assert_eq!(t("quux(?:foo)(?:bar)baz"), hir_lit("quuxfoobarbaz")); + assert_eq!( + t("foo(?:bar^baz)quux"), + hir_cat(vec![ + hir_lit("foobar"), + hir_look(hir::Look::Start), + hir_lit("bazquux"), + ]) + ); + assert_eq!( + t("foo(?:ba(?:r^b)az)quux"), + hir_cat(vec![ + hir_lit("foobar"), + hir_look(hir::Look::Start), + hir_lit("bazquux"), + ]) + ); + } + + // This tests that the smart Hir::alternation constructor simplifies the + // given exprs in a way we expect. + #[test] + fn smart_alternation() { + assert_eq!( + t("(?:foo)|(?:bar)"), + hir_alt(vec![hir_lit("foo"), hir_lit("bar")]) + ); + assert_eq!( + t("quux|(?:abc|def|xyz)|baz"), + hir_alt(vec![ + hir_lit("quux"), + hir_lit("abc"), + hir_lit("def"), + hir_lit("xyz"), + hir_lit("baz"), + ]) + ); + assert_eq!( + t("quux|(?:abc|(?:def|mno)|xyz)|baz"), + hir_alt(vec![ + hir_lit("quux"), + hir_lit("abc"), + hir_lit("def"), + hir_lit("mno"), + hir_lit("xyz"), + hir_lit("baz"), + ]) + ); + assert_eq!( + t("a|b|c|d|e|f|x|y|z"), + hir_uclass(&[('a', 'f'), ('x', 'z')]), + ); + // Tests that we lift common prefixes out of an alternation. + assert_eq!( + t("[A-Z]foo|[A-Z]quux"), + hir_cat(vec![ + hir_uclass(&[('A', 'Z')]), + hir_alt(vec![hir_lit("foo"), hir_lit("quux")]), + ]), + ); + assert_eq!( + t("[A-Z][A-Z]|[A-Z]quux"), + hir_cat(vec![ + hir_uclass(&[('A', 'Z')]), + hir_alt(vec![hir_uclass(&[('A', 'Z')]), hir_lit("quux")]), + ]), + ); + assert_eq!( + t("[A-Z][A-Z]|[A-Z][A-Z]quux"), + hir_cat(vec![ + hir_uclass(&[('A', 'Z')]), + hir_uclass(&[('A', 'Z')]), + hir_alt(vec![Hir::empty(), hir_lit("quux")]), + ]), + ); + assert_eq!( + t("[A-Z]foo|[A-Z]foobar"), + hir_cat(vec![ + hir_uclass(&[('A', 'Z')]), + hir_alt(vec![hir_lit("foo"), hir_lit("foobar")]), + ]), + ); + } + + #[test] + fn regression_alt_empty_concat() { + use crate::ast::{self, Ast}; + + let span = Span::splat(Position::new(0, 0, 0)); + let ast = Ast::alternation(ast::Alternation { + span, + asts: vec![Ast::concat(ast::Concat { span, asts: vec![] })], + }); + + let mut t = Translator::new(); + assert_eq!(Ok(Hir::empty()), t.translate("", &ast)); + } + + #[test] + fn regression_empty_alt() { + use crate::ast::{self, Ast}; + + let span = Span::splat(Position::new(0, 0, 0)); + let ast = Ast::concat(ast::Concat { + span, + asts: vec![Ast::alternation(ast::Alternation { + span, + asts: vec![], + })], + }); + + let mut t = Translator::new(); + assert_eq!(Ok(Hir::fail()), t.translate("", &ast)); + } + + #[test] + fn regression_singleton_alt() { + use crate::{ + ast::{self, Ast}, + hir::Dot, + }; + + let span = Span::splat(Position::new(0, 0, 0)); + let ast = Ast::concat(ast::Concat { + span, + asts: vec![Ast::alternation(ast::Alternation { + span, + asts: vec![Ast::dot(span)], + })], + }); + + let mut t = Translator::new(); + assert_eq!(Ok(Hir::dot(Dot::AnyCharExceptLF)), t.translate("", &ast)); + } + + // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=63168 + #[test] + fn regression_fuzz_match() { + let pat = "[(\u{6} \0-\u{afdf5}] \0 "; + let ast = ParserBuilder::new() + .octal(false) + .ignore_whitespace(true) + .build() + .parse(pat) + .unwrap(); + let hir = TranslatorBuilder::new() + .utf8(true) + .case_insensitive(false) + .multi_line(false) + .dot_matches_new_line(false) + .swap_greed(true) + .unicode(true) + .build() + .translate(pat, &ast) + .unwrap(); + assert_eq!( + hir, + Hir::concat(vec![ + hir_uclass(&[('\0', '\u{afdf5}')]), + hir_lit("\0"), + ]) + ); + } + + // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=63155 + #[cfg(feature = "unicode")] + #[test] + fn regression_fuzz_difference1() { + let pat = r"\W\W|\W[^\v--\W\W\P{Script_Extensions:Pau_Cin_Hau}\u10A1A1-\U{3E3E3}--~~~~--~~~~~~~~------~~~~~~--~~~~~~]*"; + let _ = t(pat); // shouldn't panic + } + + // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=63153 + #[test] + fn regression_fuzz_char_decrement1() { + let pat = "w[w[^w?\rw\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\r\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0w?\rw[^w?\rw[^w?\rw[^w\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\u{1}\0]\0\0\0\0\0\0\0\0\0*\0\0\u{1}\0]\0\0-*\0][^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\u{1}\0]\0\0\0\0\0\0\0\0\0x\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\0\0\0*??\0\u{7f}{2}\u{10}??\0\0\0\0\0\0\0\0\0\u{3}\0\0\0}\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\0\u{1}\0]\0\u{1}\u{1}H-i]-]\0\0\0\0\u{1}\0]\0\0\0\u{1}\0]\0\0-*\0\0\0\0\u{1}9-\u{7f}]\0'|-\u{7f}]\0'|(?i-ux)[-\u{7f}]\0'\u{3}\0\0\0}\0-*\0] Result; + + /// This method is called before beginning traversal of the HIR. + fn start(&mut self) {} + + /// This method is called on an `Hir` before descending into child `Hir` + /// nodes. + fn visit_pre(&mut self, _hir: &Hir) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called on an `Hir` after descending all of its child + /// `Hir` nodes. + fn visit_post(&mut self, _hir: &Hir) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called between child nodes of an alternation. + fn visit_alternation_in(&mut self) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called between child nodes of a concatenation. + fn visit_concat_in(&mut self) -> Result<(), Self::Err> { + Ok(()) + } +} + +/// Executes an implementation of `Visitor` in constant stack space. +/// +/// This function will visit every node in the given `Hir` while calling +/// appropriate methods provided by the [`Visitor`] trait. +/// +/// The primary use case for this method is when one wants to perform case +/// analysis over an `Hir` without using a stack size proportional to the depth +/// of the `Hir`. Namely, this method will instead use constant stack space, +/// but will use heap space proportional to the size of the `Hir`. This may be +/// desirable in cases where the size of `Hir` is proportional to end user +/// input. +/// +/// If the visitor returns an error at any point, then visiting is stopped and +/// the error is returned. +pub fn visit(hir: &Hir, visitor: V) -> Result { + HeapVisitor::new().visit(hir, visitor) +} + +/// HeapVisitor visits every item in an `Hir` recursively using constant stack +/// size and a heap size proportional to the size of the `Hir`. +struct HeapVisitor<'a> { + /// A stack of `Hir` nodes. This is roughly analogous to the call stack + /// used in a typical recursive visitor. + stack: Vec<(&'a Hir, Frame<'a>)>, +} + +/// Represents a single stack frame while performing structural induction over +/// an `Hir`. +enum Frame<'a> { + /// A stack frame allocated just before descending into a repetition + /// operator's child node. + Repetition(&'a hir::Repetition), + /// A stack frame allocated just before descending into a capture's child + /// node. + Capture(&'a hir::Capture), + /// The stack frame used while visiting every child node of a concatenation + /// of expressions. + Concat { + /// The child node we are currently visiting. + head: &'a Hir, + /// The remaining child nodes to visit (which may be empty). + tail: &'a [Hir], + }, + /// The stack frame used while visiting every child node of an alternation + /// of expressions. + Alternation { + /// The child node we are currently visiting. + head: &'a Hir, + /// The remaining child nodes to visit (which may be empty). + tail: &'a [Hir], + }, +} + +impl<'a> HeapVisitor<'a> { + fn new() -> HeapVisitor<'a> { + HeapVisitor { stack: vec![] } + } + + fn visit( + &mut self, + mut hir: &'a Hir, + mut visitor: V, + ) -> Result { + self.stack.clear(); + + visitor.start(); + loop { + visitor.visit_pre(hir)?; + if let Some(x) = self.induct(hir) { + let child = x.child(); + self.stack.push((hir, x)); + hir = child; + continue; + } + // No induction means we have a base case, so we can post visit + // it now. + visitor.visit_post(hir)?; + + // At this point, we now try to pop our call stack until it is + // either empty or we hit another inductive case. + loop { + let (post_hir, frame) = match self.stack.pop() { + None => return visitor.finish(), + Some((post_hir, frame)) => (post_hir, frame), + }; + // If this is a concat/alternate, then we might have additional + // inductive steps to process. + if let Some(x) = self.pop(frame) { + match x { + Frame::Alternation { .. } => { + visitor.visit_alternation_in()?; + } + Frame::Concat { .. } => { + visitor.visit_concat_in()?; + } + _ => {} + } + hir = x.child(); + self.stack.push((post_hir, x)); + break; + } + // Otherwise, we've finished visiting all the child nodes for + // this HIR, so we can post visit it now. + visitor.visit_post(post_hir)?; + } + } + } + + /// Build a stack frame for the given HIR if one is needed (which occurs if + /// and only if there are child nodes in the HIR). Otherwise, return None. + fn induct(&mut self, hir: &'a Hir) -> Option> { + match *hir.kind() { + HirKind::Repetition(ref x) => Some(Frame::Repetition(x)), + HirKind::Capture(ref x) => Some(Frame::Capture(x)), + HirKind::Concat(ref x) if x.is_empty() => None, + HirKind::Concat(ref x) => { + Some(Frame::Concat { head: &x[0], tail: &x[1..] }) + } + HirKind::Alternation(ref x) if x.is_empty() => None, + HirKind::Alternation(ref x) => { + Some(Frame::Alternation { head: &x[0], tail: &x[1..] }) + } + _ => None, + } + } + + /// Pops the given frame. If the frame has an additional inductive step, + /// then return it, otherwise return `None`. + fn pop(&self, induct: Frame<'a>) -> Option> { + match induct { + Frame::Repetition(_) => None, + Frame::Capture(_) => None, + Frame::Concat { tail, .. } => { + if tail.is_empty() { + None + } else { + Some(Frame::Concat { head: &tail[0], tail: &tail[1..] }) + } + } + Frame::Alternation { tail, .. } => { + if tail.is_empty() { + None + } else { + Some(Frame::Alternation { + head: &tail[0], + tail: &tail[1..], + }) + } + } + } + } +} + +impl<'a> Frame<'a> { + /// Perform the next inductive step on this frame and return the next + /// child HIR node to visit. + fn child(&self) -> &'a Hir { + match *self { + Frame::Repetition(rep) => &rep.sub, + Frame::Capture(capture) => &capture.sub, + Frame::Concat { head, .. } => head, + Frame::Alternation { head, .. } => head, + } + } +} diff --git a/vendor/regex-syntax/src/lib.rs b/vendor/regex-syntax/src/lib.rs new file mode 100644 index 00000000000000..a4512e23de360d --- /dev/null +++ b/vendor/regex-syntax/src/lib.rs @@ -0,0 +1,433 @@ +/*! +This crate provides a robust regular expression parser. + +This crate defines two primary types: + +* [`Ast`](ast::Ast) is the abstract syntax of a regular expression. + An abstract syntax corresponds to a *structured representation* of the + concrete syntax of a regular expression, where the concrete syntax is the + pattern string itself (e.g., `foo(bar)+`). Given some abstract syntax, it + can be converted back to the original concrete syntax (modulo some details, + like whitespace). To a first approximation, the abstract syntax is complex + and difficult to analyze. +* [`Hir`](hir::Hir) is the high-level intermediate representation + ("HIR" or "high-level IR" for short) of regular expression. It corresponds to + an intermediate state of a regular expression that sits between the abstract + syntax and the low level compiled opcodes that are eventually responsible for + executing a regular expression search. Given some high-level IR, it is not + possible to produce the original concrete syntax (although it is possible to + produce an equivalent concrete syntax, but it will likely scarcely resemble + the original pattern). To a first approximation, the high-level IR is simple + and easy to analyze. + +These two types come with conversion routines: + +* An [`ast::parse::Parser`] converts concrete syntax (a `&str`) to an +[`Ast`](ast::Ast). +* A [`hir::translate::Translator`] converts an [`Ast`](ast::Ast) to a +[`Hir`](hir::Hir). + +As a convenience, the above two conversion routines are combined into one via +the top-level [`Parser`] type. This `Parser` will first convert your pattern to +an `Ast` and then convert the `Ast` to an `Hir`. It's also exposed as top-level +[`parse`] free function. + + +# Example + +This example shows how to parse a pattern string into its HIR: + +``` +use regex_syntax::{hir::Hir, parse}; + +let hir = parse("a|b")?; +assert_eq!(hir, Hir::alternation(vec![ + Hir::literal("a".as_bytes()), + Hir::literal("b".as_bytes()), +])); +# Ok::<(), Box>(()) +``` + + +# Concrete syntax supported + +The concrete syntax is documented as part of the public API of the +[`regex` crate](https://docs.rs/regex/%2A/regex/#syntax). + + +# Input safety + +A key feature of this library is that it is safe to use with end user facing +input. This plays a significant role in the internal implementation. In +particular: + +1. Parsers provide a `nest_limit` option that permits callers to control how + deeply nested a regular expression is allowed to be. This makes it possible + to do case analysis over an `Ast` or an `Hir` using recursion without + worrying about stack overflow. +2. Since relying on a particular stack size is brittle, this crate goes to + great lengths to ensure that all interactions with both the `Ast` and the + `Hir` do not use recursion. Namely, they use constant stack space and heap + space proportional to the size of the original pattern string (in bytes). + This includes the type's corresponding destructors. (One exception to this + is literal extraction, but this will eventually get fixed.) + + +# Error reporting + +The `Display` implementations on all `Error` types exposed in this library +provide nice human readable errors that are suitable for showing to end users +in a monospace font. + + +# Literal extraction + +This crate provides limited support for [literal extraction from `Hir` +values](hir::literal). Be warned that literal extraction uses recursion, and +therefore, stack size proportional to the size of the `Hir`. + +The purpose of literal extraction is to speed up searches. That is, if you +know a regular expression must match a prefix or suffix literal, then it is +often quicker to search for instances of that literal, and then confirm or deny +the match using the full regular expression engine. These optimizations are +done automatically in the `regex` crate. + + +# Crate features + +An important feature provided by this crate is its Unicode support. This +includes things like case folding, boolean properties, general categories, +scripts and Unicode-aware support for the Perl classes `\w`, `\s` and `\d`. +However, a downside of this support is that it requires bundling several +Unicode data tables that are substantial in size. + +A fair number of use cases do not require full Unicode support. For this +reason, this crate exposes a number of features to control which Unicode +data is available. + +If a regular expression attempts to use a Unicode feature that is not available +because the corresponding crate feature was disabled, then translating that +regular expression to an `Hir` will return an error. (It is still possible +construct an `Ast` for such a regular expression, since Unicode data is not +used until translation to an `Hir`.) Stated differently, enabling or disabling +any of the features below can only add or subtract from the total set of valid +regular expressions. Enabling or disabling a feature will never modify the +match semantics of a regular expression. + +The following features are available: + +* **std** - + Enables support for the standard library. This feature is enabled by default. + When disabled, only `core` and `alloc` are used. Otherwise, enabling `std` + generally just enables `std::error::Error` trait impls for the various error + types. +* **unicode** - + Enables all Unicode features. This feature is enabled by default, and will + always cover all Unicode features, even if more are added in the future. +* **unicode-age** - + Provide the data for the + [Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age). + This makes it possible to use classes like `\p{Age:6.0}` to refer to all + codepoints first introduced in Unicode 6.0 +* **unicode-bool** - + Provide the data for numerous Unicode boolean properties. The full list + is not included here, but contains properties like `Alphabetic`, `Emoji`, + `Lowercase`, `Math`, `Uppercase` and `White_Space`. +* **unicode-case** - + Provide the data for case insensitive matching using + [Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches). +* **unicode-gencat** - + Provide the data for + [Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values). + This includes, but is not limited to, `Decimal_Number`, `Letter`, + `Math_Symbol`, `Number` and `Punctuation`. +* **unicode-perl** - + Provide the data for supporting the Unicode-aware Perl character classes, + corresponding to `\w`, `\s` and `\d`. This is also necessary for using + Unicode-aware word boundary assertions. Note that if this feature is + disabled, the `\s` and `\d` character classes are still available if the + `unicode-bool` and `unicode-gencat` features are enabled, respectively. +* **unicode-script** - + Provide the data for + [Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/). + This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`, + `Latin` and `Thai`. +* **unicode-segment** - + Provide the data necessary to provide the properties used to implement the + [Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/). + This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and + `\p{sb=ATerm}`. +* **arbitrary** - + Enabling this feature introduces a public dependency on the + [`arbitrary`](https://crates.io/crates/arbitrary) + crate. Namely, it implements the `Arbitrary` trait from that crate for the + [`Ast`](crate::ast::Ast) type. This feature is disabled by default. +*/ + +#![no_std] +#![forbid(unsafe_code)] +#![deny(missing_docs, rustdoc::broken_intra_doc_links)] +#![warn(missing_debug_implementations)] +// This adds Cargo feature annotations to items in the rustdoc output. Which is +// sadly hugely beneficial for this crate due to the number of features. +#![cfg_attr(docsrs_regex, feature(doc_cfg))] + +#[cfg(any(test, feature = "std"))] +extern crate std; + +extern crate alloc; + +pub use crate::{ + error::Error, + parser::{parse, Parser, ParserBuilder}, + unicode::UnicodeWordError, +}; + +use alloc::string::String; + +pub mod ast; +mod debug; +mod either; +mod error; +pub mod hir; +mod parser; +mod rank; +mod unicode; +mod unicode_tables; +pub mod utf8; + +/// Escapes all regular expression meta characters in `text`. +/// +/// The string returned may be safely used as a literal in a regular +/// expression. +pub fn escape(text: &str) -> String { + let mut quoted = String::new(); + escape_into(text, &mut quoted); + quoted +} + +/// Escapes all meta characters in `text` and writes the result into `buf`. +/// +/// This will append escape characters into the given buffer. The characters +/// that are appended are safe to use as a literal in a regular expression. +pub fn escape_into(text: &str, buf: &mut String) { + buf.reserve(text.len()); + for c in text.chars() { + if is_meta_character(c) { + buf.push('\\'); + } + buf.push(c); + } +} + +/// Returns true if the given character has significance in a regex. +/// +/// Generally speaking, these are the only characters which _must_ be escaped +/// in order to match their literal meaning. For example, to match a literal +/// `|`, one could write `\|`. Sometimes escaping isn't always necessary. For +/// example, `-` is treated as a meta character because of its significance +/// for writing ranges inside of character classes, but the regex `-` will +/// match a literal `-` because `-` has no special meaning outside of character +/// classes. +/// +/// In order to determine whether a character may be escaped at all, the +/// [`is_escapeable_character`] routine should be used. The difference between +/// `is_meta_character` and `is_escapeable_character` is that the latter will +/// return true for some characters that are _not_ meta characters. For +/// example, `%` and `\%` both match a literal `%` in all contexts. In other +/// words, `is_escapeable_character` includes "superfluous" escapes. +/// +/// Note that the set of characters for which this function returns `true` or +/// `false` is fixed and won't change in a semver compatible release. (In this +/// case, "semver compatible release" actually refers to the `regex` crate +/// itself, since reducing or expanding the set of meta characters would be a +/// breaking change for not just `regex-syntax` but also `regex` itself.) +/// +/// # Example +/// +/// ``` +/// use regex_syntax::is_meta_character; +/// +/// assert!(is_meta_character('?')); +/// assert!(is_meta_character('-')); +/// assert!(is_meta_character('&')); +/// assert!(is_meta_character('#')); +/// +/// assert!(!is_meta_character('%')); +/// assert!(!is_meta_character('/')); +/// assert!(!is_meta_character('!')); +/// assert!(!is_meta_character('"')); +/// assert!(!is_meta_character('e')); +/// ``` +pub fn is_meta_character(c: char) -> bool { + match c { + '\\' | '.' | '+' | '*' | '?' | '(' | ')' | '|' | '[' | ']' | '{' + | '}' | '^' | '$' | '#' | '&' | '-' | '~' => true, + _ => false, + } +} + +/// Returns true if the given character can be escaped in a regex. +/// +/// This returns true in all cases that `is_meta_character` returns true, but +/// also returns true in some cases where `is_meta_character` returns false. +/// For example, `%` is not a meta character, but it is escapable. That is, +/// `%` and `\%` both match a literal `%` in all contexts. +/// +/// The purpose of this routine is to provide knowledge about what characters +/// may be escaped. Namely, most regex engines permit "superfluous" escapes +/// where characters without any special significance may be escaped even +/// though there is no actual _need_ to do so. +/// +/// This will return false for some characters. For example, `e` is not +/// escapable. Therefore, `\e` will either result in a parse error (which is +/// true today), or it could backwards compatibly evolve into a new construct +/// with its own meaning. Indeed, that is the purpose of banning _some_ +/// superfluous escapes: it provides a way to evolve the syntax in a compatible +/// manner. +/// +/// # Example +/// +/// ``` +/// use regex_syntax::is_escapeable_character; +/// +/// assert!(is_escapeable_character('?')); +/// assert!(is_escapeable_character('-')); +/// assert!(is_escapeable_character('&')); +/// assert!(is_escapeable_character('#')); +/// assert!(is_escapeable_character('%')); +/// assert!(is_escapeable_character('/')); +/// assert!(is_escapeable_character('!')); +/// assert!(is_escapeable_character('"')); +/// +/// assert!(!is_escapeable_character('e')); +/// ``` +pub fn is_escapeable_character(c: char) -> bool { + // Certainly escapable if it's a meta character. + if is_meta_character(c) { + return true; + } + // Any character that isn't ASCII is definitely not escapable. There's + // no real need to allow things like \☃ right? + if !c.is_ascii() { + return false; + } + // Otherwise, we basically say that everything is escapable unless it's a + // letter or digit. Things like \3 are either octal (when enabled) or an + // error, and we should keep it that way. Otherwise, letters are reserved + // for adding new syntax in a backwards compatible way. + match c { + '0'..='9' | 'A'..='Z' | 'a'..='z' => false, + // While not currently supported, we keep these as not escapable to + // give us some flexibility with respect to supporting the \< and + // \> word boundary assertions in the future. By rejecting them as + // escapable, \< and \> will result in a parse error. Thus, we can + // turn them into something else in the future without it being a + // backwards incompatible change. + // + // OK, now we support \< and \>, and we need to retain them as *not* + // escapable here since the escape sequence is significant. + '<' | '>' => false, + _ => true, + } +} + +/// Returns true if and only if the given character is a Unicode word +/// character. +/// +/// A Unicode word character is defined by +/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties). +/// In particular, a character +/// is considered a word character if it is in either of the `Alphabetic` or +/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark` +/// or `Connector_Punctuation` general categories. +/// +/// # Panics +/// +/// If the `unicode-perl` feature is not enabled, then this function +/// panics. For this reason, it is recommended that callers use +/// [`try_is_word_character`] instead. +pub fn is_word_character(c: char) -> bool { + try_is_word_character(c).expect("unicode-perl feature must be enabled") +} + +/// Returns true if and only if the given character is a Unicode word +/// character. +/// +/// A Unicode word character is defined by +/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties). +/// In particular, a character +/// is considered a word character if it is in either of the `Alphabetic` or +/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark` +/// or `Connector_Punctuation` general categories. +/// +/// # Errors +/// +/// If the `unicode-perl` feature is not enabled, then this function always +/// returns an error. +pub fn try_is_word_character( + c: char, +) -> core::result::Result { + unicode::is_word_character(c) +} + +/// Returns true if and only if the given character is an ASCII word character. +/// +/// An ASCII word character is defined by the following character class: +/// `[_0-9a-zA-Z]`. +pub fn is_word_byte(c: u8) -> bool { + match c { + b'_' | b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' => true, + _ => false, + } +} + +#[cfg(test)] +mod tests { + use alloc::string::ToString; + + use super::*; + + #[test] + fn escape_meta() { + assert_eq!( + escape(r"\.+*?()|[]{}^$#&-~"), + r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#\&\-\~".to_string() + ); + } + + #[test] + fn word_byte() { + assert!(is_word_byte(b'a')); + assert!(!is_word_byte(b'-')); + } + + #[test] + #[cfg(feature = "unicode-perl")] + fn word_char() { + assert!(is_word_character('a'), "ASCII"); + assert!(is_word_character('à'), "Latin-1"); + assert!(is_word_character('β'), "Greek"); + assert!(is_word_character('\u{11011}'), "Brahmi (Unicode 6.0)"); + assert!(is_word_character('\u{11611}'), "Modi (Unicode 7.0)"); + assert!(is_word_character('\u{11711}'), "Ahom (Unicode 8.0)"); + assert!(is_word_character('\u{17828}'), "Tangut (Unicode 9.0)"); + assert!(is_word_character('\u{1B1B1}'), "Nushu (Unicode 10.0)"); + assert!(is_word_character('\u{16E40}'), "Medefaidrin (Unicode 11.0)"); + assert!(!is_word_character('-')); + assert!(!is_word_character('☃')); + } + + #[test] + #[should_panic] + #[cfg(not(feature = "unicode-perl"))] + fn word_char_disabled_panic() { + assert!(is_word_character('a')); + } + + #[test] + #[cfg(not(feature = "unicode-perl"))] + fn word_char_disabled_error() { + assert!(try_is_word_character('a').is_err()); + } +} diff --git a/vendor/regex-syntax/src/parser.rs b/vendor/regex-syntax/src/parser.rs new file mode 100644 index 00000000000000..f482b84667a7aa --- /dev/null +++ b/vendor/regex-syntax/src/parser.rs @@ -0,0 +1,254 @@ +use crate::{ast, hir, Error}; + +/// A convenience routine for parsing a regex using default options. +/// +/// This is equivalent to `Parser::new().parse(pattern)`. +/// +/// If you need to set non-default options, then use a [`ParserBuilder`]. +/// +/// This routine returns an [`Hir`](hir::Hir) value. Namely, it automatically +/// parses the pattern as an [`Ast`](ast::Ast) and then invokes the translator +/// to convert the `Ast` into an `Hir`. If you need access to the `Ast`, then +/// you should use a [`ast::parse::Parser`]. +pub fn parse(pattern: &str) -> Result { + Parser::new().parse(pattern) +} + +/// A builder for a regular expression parser. +/// +/// This builder permits modifying configuration options for the parser. +/// +/// This type combines the builder options for both the [AST +/// `ParserBuilder`](ast::parse::ParserBuilder) and the [HIR +/// `TranslatorBuilder`](hir::translate::TranslatorBuilder). +#[derive(Clone, Debug, Default)] +pub struct ParserBuilder { + ast: ast::parse::ParserBuilder, + hir: hir::translate::TranslatorBuilder, +} + +impl ParserBuilder { + /// Create a new parser builder with a default configuration. + pub fn new() -> ParserBuilder { + ParserBuilder::default() + } + + /// Build a parser from this configuration with the given pattern. + pub fn build(&self) -> Parser { + Parser { ast: self.ast.build(), hir: self.hir.build() } + } + + /// Set the nesting limit for this parser. + /// + /// The nesting limit controls how deep the abstract syntax tree is allowed + /// to be. If the AST exceeds the given limit (e.g., with too many nested + /// groups), then an error is returned by the parser. + /// + /// The purpose of this limit is to act as a heuristic to prevent stack + /// overflow for consumers that do structural induction on an `Ast` using + /// explicit recursion. While this crate never does this (instead using + /// constant stack space and moving the call stack to the heap), other + /// crates may. + /// + /// This limit is not checked until the entire Ast is parsed. Therefore, + /// if callers want to put a limit on the amount of heap space used, then + /// they should impose a limit on the length, in bytes, of the concrete + /// pattern string. In particular, this is viable since this parser + /// implementation will limit itself to heap space proportional to the + /// length of the pattern string. + /// + /// Note that a nest limit of `0` will return a nest limit error for most + /// patterns but not all. For example, a nest limit of `0` permits `a` but + /// not `ab`, since `ab` requires a concatenation, which results in a nest + /// depth of `1`. In general, a nest limit is not something that manifests + /// in an obvious way in the concrete syntax, therefore, it should not be + /// used in a granular way. + pub fn nest_limit(&mut self, limit: u32) -> &mut ParserBuilder { + self.ast.nest_limit(limit); + self + } + + /// Whether to support octal syntax or not. + /// + /// Octal syntax is a little-known way of uttering Unicode codepoints in + /// a regular expression. For example, `a`, `\x61`, `\u0061` and + /// `\141` are all equivalent regular expressions, where the last example + /// shows octal syntax. + /// + /// While supporting octal syntax isn't in and of itself a problem, it does + /// make good error messages harder. That is, in PCRE based regex engines, + /// syntax like `\0` invokes a backreference, which is explicitly + /// unsupported in Rust's regex engine. However, many users expect it to + /// be supported. Therefore, when octal support is disabled, the error + /// message will explicitly mention that backreferences aren't supported. + /// + /// Octal syntax is disabled by default. + pub fn octal(&mut self, yes: bool) -> &mut ParserBuilder { + self.ast.octal(yes); + self + } + + /// When disabled, translation will permit the construction of a regular + /// expression that may match invalid UTF-8. + /// + /// When enabled (the default), the translator is guaranteed to produce an + /// expression that, for non-empty matches, will only ever produce spans + /// that are entirely valid UTF-8 (otherwise, the translator will return an + /// error). + /// + /// Perhaps surprisingly, when UTF-8 is enabled, an empty regex or even + /// a negated ASCII word boundary (uttered as `(?-u:\B)` in the concrete + /// syntax) will be allowed even though they can produce matches that split + /// a UTF-8 encoded codepoint. This only applies to zero-width or "empty" + /// matches, and it is expected that the regex engine itself must handle + /// these cases if necessary (perhaps by suppressing any zero-width matches + /// that split a codepoint). + pub fn utf8(&mut self, yes: bool) -> &mut ParserBuilder { + self.hir.utf8(yes); + self + } + + /// Enable verbose mode in the regular expression. + /// + /// When enabled, verbose mode permits insignificant whitespace in many + /// places in the regular expression, as well as comments. Comments are + /// started using `#` and continue until the end of the line. + /// + /// By default, this is disabled. It may be selectively enabled in the + /// regular expression by using the `x` flag regardless of this setting. + pub fn ignore_whitespace(&mut self, yes: bool) -> &mut ParserBuilder { + self.ast.ignore_whitespace(yes); + self + } + + /// Enable or disable the case insensitive flag by default. + /// + /// By default this is disabled. It may alternatively be selectively + /// enabled in the regular expression itself via the `i` flag. + pub fn case_insensitive(&mut self, yes: bool) -> &mut ParserBuilder { + self.hir.case_insensitive(yes); + self + } + + /// Enable or disable the multi-line matching flag by default. + /// + /// By default this is disabled. It may alternatively be selectively + /// enabled in the regular expression itself via the `m` flag. + pub fn multi_line(&mut self, yes: bool) -> &mut ParserBuilder { + self.hir.multi_line(yes); + self + } + + /// Enable or disable the "dot matches any character" flag by default. + /// + /// By default this is disabled. It may alternatively be selectively + /// enabled in the regular expression itself via the `s` flag. + pub fn dot_matches_new_line(&mut self, yes: bool) -> &mut ParserBuilder { + self.hir.dot_matches_new_line(yes); + self + } + + /// Enable or disable the CRLF mode flag by default. + /// + /// By default this is disabled. It may alternatively be selectively + /// enabled in the regular expression itself via the `R` flag. + /// + /// When CRLF mode is enabled, the following happens: + /// + /// * Unless `dot_matches_new_line` is enabled, `.` will match any character + /// except for `\r` and `\n`. + /// * When `multi_line` mode is enabled, `^` and `$` will treat `\r\n`, + /// `\r` and `\n` as line terminators. And in particular, neither will + /// match between a `\r` and a `\n`. + pub fn crlf(&mut self, yes: bool) -> &mut ParserBuilder { + self.hir.crlf(yes); + self + } + + /// Sets the line terminator for use with `(?u-s:.)` and `(?-us:.)`. + /// + /// Namely, instead of `.` (by default) matching everything except for `\n`, + /// this will cause `.` to match everything except for the byte given. + /// + /// If `.` is used in a context where Unicode mode is enabled and this byte + /// isn't ASCII, then an error will be returned. When Unicode mode is + /// disabled, then any byte is permitted, but will return an error if UTF-8 + /// mode is enabled and it is a non-ASCII byte. + /// + /// In short, any ASCII value for a line terminator is always okay. But a + /// non-ASCII byte might result in an error depending on whether Unicode + /// mode or UTF-8 mode are enabled. + /// + /// Note that if `R` mode is enabled then it always takes precedence and + /// the line terminator will be treated as `\r` and `\n` simultaneously. + /// + /// Note also that this *doesn't* impact the look-around assertions + /// `(?m:^)` and `(?m:$)`. That's usually controlled by additional + /// configuration in the regex engine itself. + pub fn line_terminator(&mut self, byte: u8) -> &mut ParserBuilder { + self.hir.line_terminator(byte); + self + } + + /// Enable or disable the "swap greed" flag by default. + /// + /// By default this is disabled. It may alternatively be selectively + /// enabled in the regular expression itself via the `U` flag. + pub fn swap_greed(&mut self, yes: bool) -> &mut ParserBuilder { + self.hir.swap_greed(yes); + self + } + + /// Enable or disable the Unicode flag (`u`) by default. + /// + /// By default this is **enabled**. It may alternatively be selectively + /// disabled in the regular expression itself via the `u` flag. + /// + /// Note that unless `utf8` is disabled (it's enabled by default), a + /// regular expression will fail to parse if Unicode mode is disabled and a + /// sub-expression could possibly match invalid UTF-8. + pub fn unicode(&mut self, yes: bool) -> &mut ParserBuilder { + self.hir.unicode(yes); + self + } +} + +/// A convenience parser for regular expressions. +/// +/// This parser takes as input a regular expression pattern string (the +/// "concrete syntax") and returns a high-level intermediate representation +/// (the HIR) suitable for most types of analysis. In particular, this parser +/// hides the intermediate state of producing an AST (the "abstract syntax"). +/// The AST is itself far more complex than the HIR, so this parser serves as a +/// convenience for never having to deal with it at all. +/// +/// If callers have more fine grained use cases that need an AST, then please +/// see the [`ast::parse`] module. +/// +/// A `Parser` can be configured in more detail via a [`ParserBuilder`]. +#[derive(Clone, Debug)] +pub struct Parser { + ast: ast::parse::Parser, + hir: hir::translate::Translator, +} + +impl Parser { + /// Create a new parser with a default configuration. + /// + /// The parser can be run with `parse` method. The parse method returns + /// a high level intermediate representation of the given regular + /// expression. + /// + /// To set configuration options on the parser, use [`ParserBuilder`]. + pub fn new() -> Parser { + ParserBuilder::new().build() + } + + /// Parse the regular expression into a high level intermediate + /// representation. + pub fn parse(&mut self, pattern: &str) -> Result { + let ast = self.ast.parse(pattern)?; + let hir = self.hir.translate(pattern, &ast)?; + Ok(hir) + } +} diff --git a/vendor/regex-syntax/src/rank.rs b/vendor/regex-syntax/src/rank.rs new file mode 100644 index 00000000000000..ccb25a20aedcdf --- /dev/null +++ b/vendor/regex-syntax/src/rank.rs @@ -0,0 +1,258 @@ +pub(crate) const BYTE_FREQUENCIES: [u8; 256] = [ + 55, // '\x00' + 52, // '\x01' + 51, // '\x02' + 50, // '\x03' + 49, // '\x04' + 48, // '\x05' + 47, // '\x06' + 46, // '\x07' + 45, // '\x08' + 103, // '\t' + 242, // '\n' + 66, // '\x0b' + 67, // '\x0c' + 229, // '\r' + 44, // '\x0e' + 43, // '\x0f' + 42, // '\x10' + 41, // '\x11' + 40, // '\x12' + 39, // '\x13' + 38, // '\x14' + 37, // '\x15' + 36, // '\x16' + 35, // '\x17' + 34, // '\x18' + 33, // '\x19' + 56, // '\x1a' + 32, // '\x1b' + 31, // '\x1c' + 30, // '\x1d' + 29, // '\x1e' + 28, // '\x1f' + 255, // ' ' + 148, // '!' + 164, // '"' + 149, // '#' + 136, // '$' + 160, // '%' + 155, // '&' + 173, // "'" + 221, // '(' + 222, // ')' + 134, // '*' + 122, // '+' + 232, // ',' + 202, // '-' + 215, // '.' + 224, // '/' + 208, // '0' + 220, // '1' + 204, // '2' + 187, // '3' + 183, // '4' + 179, // '5' + 177, // '6' + 168, // '7' + 178, // '8' + 200, // '9' + 226, // ':' + 195, // ';' + 154, // '<' + 184, // '=' + 174, // '>' + 126, // '?' + 120, // '@' + 191, // 'A' + 157, // 'B' + 194, // 'C' + 170, // 'D' + 189, // 'E' + 162, // 'F' + 161, // 'G' + 150, // 'H' + 193, // 'I' + 142, // 'J' + 137, // 'K' + 171, // 'L' + 176, // 'M' + 185, // 'N' + 167, // 'O' + 186, // 'P' + 112, // 'Q' + 175, // 'R' + 192, // 'S' + 188, // 'T' + 156, // 'U' + 140, // 'V' + 143, // 'W' + 123, // 'X' + 133, // 'Y' + 128, // 'Z' + 147, // '[' + 138, // '\\' + 146, // ']' + 114, // '^' + 223, // '_' + 151, // '`' + 249, // 'a' + 216, // 'b' + 238, // 'c' + 236, // 'd' + 253, // 'e' + 227, // 'f' + 218, // 'g' + 230, // 'h' + 247, // 'i' + 135, // 'j' + 180, // 'k' + 241, // 'l' + 233, // 'm' + 246, // 'n' + 244, // 'o' + 231, // 'p' + 139, // 'q' + 245, // 'r' + 243, // 's' + 251, // 't' + 235, // 'u' + 201, // 'v' + 196, // 'w' + 240, // 'x' + 214, // 'y' + 152, // 'z' + 182, // '{' + 205, // '|' + 181, // '}' + 127, // '~' + 27, // '\x7f' + 212, // '\x80' + 211, // '\x81' + 210, // '\x82' + 213, // '\x83' + 228, // '\x84' + 197, // '\x85' + 169, // '\x86' + 159, // '\x87' + 131, // '\x88' + 172, // '\x89' + 105, // '\x8a' + 80, // '\x8b' + 98, // '\x8c' + 96, // '\x8d' + 97, // '\x8e' + 81, // '\x8f' + 207, // '\x90' + 145, // '\x91' + 116, // '\x92' + 115, // '\x93' + 144, // '\x94' + 130, // '\x95' + 153, // '\x96' + 121, // '\x97' + 107, // '\x98' + 132, // '\x99' + 109, // '\x9a' + 110, // '\x9b' + 124, // '\x9c' + 111, // '\x9d' + 82, // '\x9e' + 108, // '\x9f' + 118, // '\xa0' + 141, // '¡' + 113, // '¢' + 129, // '£' + 119, // '¤' + 125, // '¥' + 165, // '¦' + 117, // '§' + 92, // '¨' + 106, // '©' + 83, // 'ª' + 72, // '«' + 99, // '¬' + 93, // '\xad' + 65, // '®' + 79, // '¯' + 166, // '°' + 237, // '±' + 163, // '²' + 199, // '³' + 190, // '´' + 225, // 'µ' + 209, // '¶' + 203, // '·' + 198, // '¸' + 217, // '¹' + 219, // 'º' + 206, // '»' + 234, // '¼' + 248, // '½' + 158, // '¾' + 239, // '¿' + 255, // 'À' + 255, // 'Á' + 255, // 'Â' + 255, // 'Ã' + 255, // 'Ä' + 255, // 'Å' + 255, // 'Æ' + 255, // 'Ç' + 255, // 'È' + 255, // 'É' + 255, // 'Ê' + 255, // 'Ë' + 255, // 'Ì' + 255, // 'Í' + 255, // 'Î' + 255, // 'Ï' + 255, // 'Ð' + 255, // 'Ñ' + 255, // 'Ò' + 255, // 'Ó' + 255, // 'Ô' + 255, // 'Õ' + 255, // 'Ö' + 255, // '×' + 255, // 'Ø' + 255, // 'Ù' + 255, // 'Ú' + 255, // 'Û' + 255, // 'Ü' + 255, // 'Ý' + 255, // 'Þ' + 255, // 'ß' + 255, // 'à' + 255, // 'á' + 255, // 'â' + 255, // 'ã' + 255, // 'ä' + 255, // 'å' + 255, // 'æ' + 255, // 'ç' + 255, // 'è' + 255, // 'é' + 255, // 'ê' + 255, // 'ë' + 255, // 'ì' + 255, // 'í' + 255, // 'î' + 255, // 'ï' + 255, // 'ð' + 255, // 'ñ' + 255, // 'ò' + 255, // 'ó' + 255, // 'ô' + 255, // 'õ' + 255, // 'ö' + 255, // '÷' + 255, // 'ø' + 255, // 'ù' + 255, // 'ú' + 255, // 'û' + 255, // 'ü' + 255, // 'ý' + 255, // 'þ' + 255, // 'ÿ' +]; diff --git a/vendor/regex-syntax/src/unicode.rs b/vendor/regex-syntax/src/unicode.rs new file mode 100644 index 00000000000000..07f78194b21eaf --- /dev/null +++ b/vendor/regex-syntax/src/unicode.rs @@ -0,0 +1,1041 @@ +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; + +use crate::hir; + +/// An inclusive range of codepoints from a generated file (hence the static +/// lifetime). +type Range = &'static [(char, char)]; + +/// An error that occurs when dealing with Unicode. +/// +/// We don't impl the Error trait here because these always get converted +/// into other public errors. (This error type isn't exported.) +#[derive(Debug)] +pub enum Error { + PropertyNotFound, + PropertyValueNotFound, + // Not used when unicode-perl is enabled. + #[allow(dead_code)] + PerlClassNotFound, +} + +/// An error that occurs when Unicode-aware simple case folding fails. +/// +/// This error can occur when the case mapping tables necessary for Unicode +/// aware case folding are unavailable. This only occurs when the +/// `unicode-case` feature is disabled. (The feature is enabled by default.) +#[derive(Debug)] +pub struct CaseFoldError(()); + +#[cfg(feature = "std")] +impl std::error::Error for CaseFoldError {} + +impl core::fmt::Display for CaseFoldError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "Unicode-aware case folding is not available \ + (probably because the unicode-case feature is not enabled)" + ) + } +} + +/// An error that occurs when the Unicode-aware `\w` class is unavailable. +/// +/// This error can occur when the data tables necessary for the Unicode aware +/// Perl character class `\w` are unavailable. This only occurs when the +/// `unicode-perl` feature is disabled. (The feature is enabled by default.) +#[derive(Debug)] +pub struct UnicodeWordError(()); + +#[cfg(feature = "std")] +impl std::error::Error for UnicodeWordError {} + +impl core::fmt::Display for UnicodeWordError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "Unicode-aware \\w class is not available \ + (probably because the unicode-perl feature is not enabled)" + ) + } +} + +/// A state oriented traverser of the simple case folding table. +/// +/// A case folder can be constructed via `SimpleCaseFolder::new()`, which will +/// return an error if the underlying case folding table is unavailable. +/// +/// After construction, it is expected that callers will use +/// `SimpleCaseFolder::mapping` by calling it with codepoints in strictly +/// increasing order. For example, calling it on `b` and then on `a` is illegal +/// and will result in a panic. +/// +/// The main idea of this type is that it tries hard to make mapping lookups +/// fast by exploiting the structure of the underlying table, and the ordering +/// assumption enables this. +#[derive(Debug)] +pub struct SimpleCaseFolder { + /// The simple case fold table. It's a sorted association list, where the + /// keys are Unicode scalar values and the values are the corresponding + /// equivalence class (not including the key) of the "simple" case folded + /// Unicode scalar values. + table: &'static [(char, &'static [char])], + /// The last codepoint that was used for a lookup. + last: Option, + /// The index to the entry in `table` corresponding to the smallest key `k` + /// such that `k > k0`, where `k0` is the most recent key lookup. Note that + /// in particular, `k0` may not be in the table! + next: usize, +} + +impl SimpleCaseFolder { + /// Create a new simple case folder, returning an error if the underlying + /// case folding table is unavailable. + pub fn new() -> Result { + #[cfg(not(feature = "unicode-case"))] + { + Err(CaseFoldError(())) + } + #[cfg(feature = "unicode-case")] + { + Ok(SimpleCaseFolder { + table: crate::unicode_tables::case_folding_simple::CASE_FOLDING_SIMPLE, + last: None, + next: 0, + }) + } + } + + /// Return the equivalence class of case folded codepoints for the given + /// codepoint. The equivalence class returned never includes the codepoint + /// given. If the given codepoint has no case folded codepoints (i.e., + /// no entry in the underlying case folding table), then this returns an + /// empty slice. + /// + /// # Panics + /// + /// This panics when called with a `c` that is less than or equal to the + /// previous call. In other words, callers need to use this method with + /// strictly increasing values of `c`. + pub fn mapping(&mut self, c: char) -> &'static [char] { + if let Some(last) = self.last { + assert!( + last < c, + "got codepoint U+{:X} which occurs before \ + last codepoint U+{:X}", + u32::from(c), + u32::from(last), + ); + } + self.last = Some(c); + if self.next >= self.table.len() { + return &[]; + } + let (k, v) = self.table[self.next]; + if k == c { + self.next += 1; + return v; + } + match self.get(c) { + Err(i) => { + self.next = i; + &[] + } + Ok(i) => { + // Since we require lookups to proceed + // in order, anything we find should be + // after whatever we thought might be + // next. Otherwise, the caller is either + // going out of order or we would have + // found our next key at 'self.next'. + assert!(i > self.next); + self.next = i + 1; + self.table[i].1 + } + } + } + + /// Returns true if and only if the given range overlaps with any region + /// of the underlying case folding table. That is, when true, there exists + /// at least one codepoint in the inclusive range `[start, end]` that has + /// a non-trivial equivalence class of case folded codepoints. Conversely, + /// when this returns false, all codepoints in the range `[start, end]` + /// correspond to the trivial equivalence class of case folded codepoints, + /// i.e., itself. + /// + /// This is useful to call before iterating over the codepoints in the + /// range and looking up the mapping for each. If you know none of the + /// mappings will return anything, then you might be able to skip doing it + /// altogether. + /// + /// # Panics + /// + /// This panics when `end < start`. + pub fn overlaps(&self, start: char, end: char) -> bool { + use core::cmp::Ordering; + + assert!(start <= end); + self.table + .binary_search_by(|&(c, _)| { + if start <= c && c <= end { + Ordering::Equal + } else if c > end { + Ordering::Greater + } else { + Ordering::Less + } + }) + .is_ok() + } + + /// Returns the index at which `c` occurs in the simple case fold table. If + /// `c` does not occur, then this returns an `i` such that `table[i-1].0 < + /// c` and `table[i].0 > c`. + fn get(&self, c: char) -> Result { + self.table.binary_search_by_key(&c, |&(c1, _)| c1) + } +} + +/// A query for finding a character class defined by Unicode. This supports +/// either use of a property name directly, or lookup by property value. The +/// former generally refers to Binary properties (see UTS#44, Table 8), but +/// as a special exception (see UTS#18, Section 1.2) both general categories +/// (an enumeration) and scripts (a catalog) are supported as if each of their +/// possible values were a binary property. +/// +/// In all circumstances, property names and values are normalized and +/// canonicalized. That is, `GC == gc == GeneralCategory == general_category`. +/// +/// The lifetime `'a` refers to the shorter of the lifetimes of property name +/// and property value. +#[derive(Debug)] +pub enum ClassQuery<'a> { + /// Return a class corresponding to a Unicode binary property, named by + /// a single letter. + OneLetter(char), + /// Return a class corresponding to a Unicode binary property. + /// + /// Note that, by special exception (see UTS#18, Section 1.2), both + /// general category values and script values are permitted here as if + /// they were a binary property. + Binary(&'a str), + /// Return a class corresponding to all codepoints whose property + /// (identified by `property_name`) corresponds to the given value + /// (identified by `property_value`). + ByValue { + /// A property name. + property_name: &'a str, + /// A property value. + property_value: &'a str, + }, +} + +impl<'a> ClassQuery<'a> { + fn canonicalize(&self) -> Result { + match *self { + ClassQuery::OneLetter(c) => self.canonical_binary(&c.to_string()), + ClassQuery::Binary(name) => self.canonical_binary(name), + ClassQuery::ByValue { property_name, property_value } => { + let property_name = symbolic_name_normalize(property_name); + let property_value = symbolic_name_normalize(property_value); + + let canon_name = match canonical_prop(&property_name)? { + None => return Err(Error::PropertyNotFound), + Some(canon_name) => canon_name, + }; + Ok(match canon_name { + "General_Category" => { + let canon = match canonical_gencat(&property_value)? { + None => return Err(Error::PropertyValueNotFound), + Some(canon) => canon, + }; + CanonicalClassQuery::GeneralCategory(canon) + } + "Script" => { + let canon = match canonical_script(&property_value)? { + None => return Err(Error::PropertyValueNotFound), + Some(canon) => canon, + }; + CanonicalClassQuery::Script(canon) + } + _ => { + let vals = match property_values(canon_name)? { + None => return Err(Error::PropertyValueNotFound), + Some(vals) => vals, + }; + let canon_val = + match canonical_value(vals, &property_value) { + None => { + return Err(Error::PropertyValueNotFound) + } + Some(canon_val) => canon_val, + }; + CanonicalClassQuery::ByValue { + property_name: canon_name, + property_value: canon_val, + } + } + }) + } + } + } + + fn canonical_binary( + &self, + name: &str, + ) -> Result { + let norm = symbolic_name_normalize(name); + + // This is a special case where 'cf' refers to the 'Format' general + // category, but where the 'cf' abbreviation is also an abbreviation + // for the 'Case_Folding' property. But we want to treat it as + // a general category. (Currently, we don't even support the + // 'Case_Folding' property. But if we do in the future, users will be + // required to spell it out.) + // + // Also 'sc' refers to the 'Currency_Symbol' general category, but is + // also the abbreviation for the 'Script' property. So we avoid calling + // 'canonical_prop' for it too, which would erroneously normalize it + // to 'Script'. + // + // Another case: 'lc' is an abbreviation for the 'Cased_Letter' + // general category, but is also an abbreviation for the 'Lowercase_Mapping' + // property. We don't currently support the latter, so as with 'cf' + // above, we treat 'lc' as 'Cased_Letter'. + if norm != "cf" && norm != "sc" && norm != "lc" { + if let Some(canon) = canonical_prop(&norm)? { + return Ok(CanonicalClassQuery::Binary(canon)); + } + } + if let Some(canon) = canonical_gencat(&norm)? { + return Ok(CanonicalClassQuery::GeneralCategory(canon)); + } + if let Some(canon) = canonical_script(&norm)? { + return Ok(CanonicalClassQuery::Script(canon)); + } + Err(Error::PropertyNotFound) + } +} + +/// Like ClassQuery, but its parameters have been canonicalized. This also +/// differentiates binary properties from flattened general categories and +/// scripts. +#[derive(Debug, Eq, PartialEq)] +enum CanonicalClassQuery { + /// The canonical binary property name. + Binary(&'static str), + /// The canonical general category name. + GeneralCategory(&'static str), + /// The canonical script name. + Script(&'static str), + /// An arbitrary association between property and value, both of which + /// have been canonicalized. + /// + /// Note that by construction, the property name of ByValue will never + /// be General_Category or Script. Those two cases are subsumed by the + /// eponymous variants. + ByValue { + /// The canonical property name. + property_name: &'static str, + /// The canonical property value. + property_value: &'static str, + }, +} + +/// Looks up a Unicode class given a query. If one doesn't exist, then +/// `None` is returned. +pub fn class(query: ClassQuery<'_>) -> Result { + use self::CanonicalClassQuery::*; + + match query.canonicalize()? { + Binary(name) => bool_property(name), + GeneralCategory(name) => gencat(name), + Script(name) => script(name), + ByValue { property_name: "Age", property_value } => { + let mut class = hir::ClassUnicode::empty(); + for set in ages(property_value)? { + class.union(&hir_class(set)); + } + Ok(class) + } + ByValue { property_name: "Script_Extensions", property_value } => { + script_extension(property_value) + } + ByValue { + property_name: "Grapheme_Cluster_Break", + property_value, + } => gcb(property_value), + ByValue { property_name: "Sentence_Break", property_value } => { + sb(property_value) + } + ByValue { property_name: "Word_Break", property_value } => { + wb(property_value) + } + _ => { + // What else should we support? + Err(Error::PropertyNotFound) + } + } +} + +/// Returns a Unicode aware class for \w. +/// +/// This returns an error if the data is not available for \w. +pub fn perl_word() -> Result { + #[cfg(not(feature = "unicode-perl"))] + fn imp() -> Result { + Err(Error::PerlClassNotFound) + } + + #[cfg(feature = "unicode-perl")] + fn imp() -> Result { + use crate::unicode_tables::perl_word::PERL_WORD; + Ok(hir_class(PERL_WORD)) + } + + imp() +} + +/// Returns a Unicode aware class for \s. +/// +/// This returns an error if the data is not available for \s. +pub fn perl_space() -> Result { + #[cfg(not(any(feature = "unicode-perl", feature = "unicode-bool")))] + fn imp() -> Result { + Err(Error::PerlClassNotFound) + } + + #[cfg(all(feature = "unicode-perl", not(feature = "unicode-bool")))] + fn imp() -> Result { + use crate::unicode_tables::perl_space::WHITE_SPACE; + Ok(hir_class(WHITE_SPACE)) + } + + #[cfg(feature = "unicode-bool")] + fn imp() -> Result { + use crate::unicode_tables::property_bool::WHITE_SPACE; + Ok(hir_class(WHITE_SPACE)) + } + + imp() +} + +/// Returns a Unicode aware class for \d. +/// +/// This returns an error if the data is not available for \d. +pub fn perl_digit() -> Result { + #[cfg(not(any(feature = "unicode-perl", feature = "unicode-gencat")))] + fn imp() -> Result { + Err(Error::PerlClassNotFound) + } + + #[cfg(all(feature = "unicode-perl", not(feature = "unicode-gencat")))] + fn imp() -> Result { + use crate::unicode_tables::perl_decimal::DECIMAL_NUMBER; + Ok(hir_class(DECIMAL_NUMBER)) + } + + #[cfg(feature = "unicode-gencat")] + fn imp() -> Result { + use crate::unicode_tables::general_category::DECIMAL_NUMBER; + Ok(hir_class(DECIMAL_NUMBER)) + } + + imp() +} + +/// Build a Unicode HIR class from a sequence of Unicode scalar value ranges. +pub fn hir_class(ranges: &[(char, char)]) -> hir::ClassUnicode { + let hir_ranges: Vec = ranges + .iter() + .map(|&(s, e)| hir::ClassUnicodeRange::new(s, e)) + .collect(); + hir::ClassUnicode::new(hir_ranges) +} + +/// Returns true only if the given codepoint is in the `\w` character class. +/// +/// If the `unicode-perl` feature is not enabled, then this returns an error. +pub fn is_word_character(c: char) -> Result { + #[cfg(not(feature = "unicode-perl"))] + fn imp(_: char) -> Result { + Err(UnicodeWordError(())) + } + + #[cfg(feature = "unicode-perl")] + fn imp(c: char) -> Result { + use crate::{is_word_byte, unicode_tables::perl_word::PERL_WORD}; + + if u8::try_from(c).map_or(false, is_word_byte) { + return Ok(true); + } + Ok(PERL_WORD + .binary_search_by(|&(start, end)| { + use core::cmp::Ordering; + + if start <= c && c <= end { + Ordering::Equal + } else if start > c { + Ordering::Greater + } else { + Ordering::Less + } + }) + .is_ok()) + } + + imp(c) +} + +/// A mapping of property values for a specific property. +/// +/// The first element of each tuple is a normalized property value while the +/// second element of each tuple is the corresponding canonical property +/// value. +type PropertyValues = &'static [(&'static str, &'static str)]; + +fn canonical_gencat( + normalized_value: &str, +) -> Result, Error> { + Ok(match normalized_value { + "any" => Some("Any"), + "assigned" => Some("Assigned"), + "ascii" => Some("ASCII"), + _ => { + let gencats = property_values("General_Category")?.unwrap(); + canonical_value(gencats, normalized_value) + } + }) +} + +fn canonical_script( + normalized_value: &str, +) -> Result, Error> { + let scripts = property_values("Script")?.unwrap(); + Ok(canonical_value(scripts, normalized_value)) +} + +/// Find the canonical property name for the given normalized property name. +/// +/// If no such property exists, then `None` is returned. +/// +/// The normalized property name must have been normalized according to +/// UAX44 LM3, which can be done using `symbolic_name_normalize`. +/// +/// If the property names data is not available, then an error is returned. +fn canonical_prop( + normalized_name: &str, +) -> Result, Error> { + #[cfg(not(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + )))] + fn imp(_: &str) -> Result, Error> { + Err(Error::PropertyNotFound) + } + + #[cfg(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + ))] + fn imp(name: &str) -> Result, Error> { + use crate::unicode_tables::property_names::PROPERTY_NAMES; + + Ok(PROPERTY_NAMES + .binary_search_by_key(&name, |&(n, _)| n) + .ok() + .map(|i| PROPERTY_NAMES[i].1)) + } + + imp(normalized_name) +} + +/// Find the canonical property value for the given normalized property +/// value. +/// +/// The given property values should correspond to the values for the property +/// under question, which can be found using `property_values`. +/// +/// If no such property value exists, then `None` is returned. +/// +/// The normalized property value must have been normalized according to +/// UAX44 LM3, which can be done using `symbolic_name_normalize`. +fn canonical_value( + vals: PropertyValues, + normalized_value: &str, +) -> Option<&'static str> { + vals.binary_search_by_key(&normalized_value, |&(n, _)| n) + .ok() + .map(|i| vals[i].1) +} + +/// Return the table of property values for the given property name. +/// +/// If the property values data is not available, then an error is returned. +fn property_values( + canonical_property_name: &'static str, +) -> Result, Error> { + #[cfg(not(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + )))] + fn imp(_: &'static str) -> Result, Error> { + Err(Error::PropertyValueNotFound) + } + + #[cfg(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + ))] + fn imp(name: &'static str) -> Result, Error> { + use crate::unicode_tables::property_values::PROPERTY_VALUES; + + Ok(PROPERTY_VALUES + .binary_search_by_key(&name, |&(n, _)| n) + .ok() + .map(|i| PROPERTY_VALUES[i].1)) + } + + imp(canonical_property_name) +} + +// This is only used in some cases, but small enough to just let it be dead +// instead of figuring out (and maintaining) the right set of features. +#[allow(dead_code)] +fn property_set( + name_map: &'static [(&'static str, Range)], + canonical: &'static str, +) -> Option { + name_map + .binary_search_by_key(&canonical, |x| x.0) + .ok() + .map(|i| name_map[i].1) +} + +/// Returns an iterator over Unicode Age sets. Each item corresponds to a set +/// of codepoints that were added in a particular revision of Unicode. The +/// iterator yields items in chronological order. +/// +/// If the given age value isn't valid or if the data isn't available, then an +/// error is returned instead. +fn ages(canonical_age: &str) -> Result, Error> { + #[cfg(not(feature = "unicode-age"))] + fn imp(_: &str) -> Result, Error> { + use core::option::IntoIter; + Err::, _>(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-age")] + fn imp(canonical_age: &str) -> Result, Error> { + use crate::unicode_tables::age; + + const AGES: &[(&str, Range)] = &[ + ("V1_1", age::V1_1), + ("V2_0", age::V2_0), + ("V2_1", age::V2_1), + ("V3_0", age::V3_0), + ("V3_1", age::V3_1), + ("V3_2", age::V3_2), + ("V4_0", age::V4_0), + ("V4_1", age::V4_1), + ("V5_0", age::V5_0), + ("V5_1", age::V5_1), + ("V5_2", age::V5_2), + ("V6_0", age::V6_0), + ("V6_1", age::V6_1), + ("V6_2", age::V6_2), + ("V6_3", age::V6_3), + ("V7_0", age::V7_0), + ("V8_0", age::V8_0), + ("V9_0", age::V9_0), + ("V10_0", age::V10_0), + ("V11_0", age::V11_0), + ("V12_0", age::V12_0), + ("V12_1", age::V12_1), + ("V13_0", age::V13_0), + ("V14_0", age::V14_0), + ("V15_0", age::V15_0), + ("V15_1", age::V15_1), + ("V16_0", age::V16_0), + ]; + assert_eq!(AGES.len(), age::BY_NAME.len(), "ages are out of sync"); + + let pos = AGES.iter().position(|&(age, _)| canonical_age == age); + match pos { + None => Err(Error::PropertyValueNotFound), + Some(i) => Ok(AGES[..=i].iter().map(|&(_, classes)| classes)), + } + } + + imp(canonical_age) +} + +/// Returns the Unicode HIR class corresponding to the given general category. +/// +/// Name canonicalization is assumed to be performed by the caller. +/// +/// If the given general category could not be found, or if the general +/// category data is not available, then an error is returned. +fn gencat(canonical_name: &'static str) -> Result { + #[cfg(not(feature = "unicode-gencat"))] + fn imp(_: &'static str) -> Result { + Err(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-gencat")] + fn imp(name: &'static str) -> Result { + use crate::unicode_tables::general_category::BY_NAME; + match name { + "ASCII" => Ok(hir_class(&[('\0', '\x7F')])), + "Any" => Ok(hir_class(&[('\0', '\u{10FFFF}')])), + "Assigned" => { + let mut cls = gencat("Unassigned")?; + cls.negate(); + Ok(cls) + } + name => property_set(BY_NAME, name) + .map(hir_class) + .ok_or(Error::PropertyValueNotFound), + } + } + + match canonical_name { + "Decimal_Number" => perl_digit(), + name => imp(name), + } +} + +/// Returns the Unicode HIR class corresponding to the given script. +/// +/// Name canonicalization is assumed to be performed by the caller. +/// +/// If the given script could not be found, or if the script data is not +/// available, then an error is returned. +fn script(canonical_name: &'static str) -> Result { + #[cfg(not(feature = "unicode-script"))] + fn imp(_: &'static str) -> Result { + Err(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-script")] + fn imp(name: &'static str) -> Result { + use crate::unicode_tables::script::BY_NAME; + property_set(BY_NAME, name) + .map(hir_class) + .ok_or(Error::PropertyValueNotFound) + } + + imp(canonical_name) +} + +/// Returns the Unicode HIR class corresponding to the given script extension. +/// +/// Name canonicalization is assumed to be performed by the caller. +/// +/// If the given script extension could not be found, or if the script data is +/// not available, then an error is returned. +fn script_extension( + canonical_name: &'static str, +) -> Result { + #[cfg(not(feature = "unicode-script"))] + fn imp(_: &'static str) -> Result { + Err(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-script")] + fn imp(name: &'static str) -> Result { + use crate::unicode_tables::script_extension::BY_NAME; + property_set(BY_NAME, name) + .map(hir_class) + .ok_or(Error::PropertyValueNotFound) + } + + imp(canonical_name) +} + +/// Returns the Unicode HIR class corresponding to the given Unicode boolean +/// property. +/// +/// Name canonicalization is assumed to be performed by the caller. +/// +/// If the given boolean property could not be found, or if the boolean +/// property data is not available, then an error is returned. +fn bool_property( + canonical_name: &'static str, +) -> Result { + #[cfg(not(feature = "unicode-bool"))] + fn imp(_: &'static str) -> Result { + Err(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-bool")] + fn imp(name: &'static str) -> Result { + use crate::unicode_tables::property_bool::BY_NAME; + property_set(BY_NAME, name) + .map(hir_class) + .ok_or(Error::PropertyNotFound) + } + + match canonical_name { + "Decimal_Number" => perl_digit(), + "White_Space" => perl_space(), + name => imp(name), + } +} + +/// Returns the Unicode HIR class corresponding to the given grapheme cluster +/// break property. +/// +/// Name canonicalization is assumed to be performed by the caller. +/// +/// If the given property could not be found, or if the corresponding data is +/// not available, then an error is returned. +fn gcb(canonical_name: &'static str) -> Result { + #[cfg(not(feature = "unicode-segment"))] + fn imp(_: &'static str) -> Result { + Err(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-segment")] + fn imp(name: &'static str) -> Result { + use crate::unicode_tables::grapheme_cluster_break::BY_NAME; + property_set(BY_NAME, name) + .map(hir_class) + .ok_or(Error::PropertyValueNotFound) + } + + imp(canonical_name) +} + +/// Returns the Unicode HIR class corresponding to the given word break +/// property. +/// +/// Name canonicalization is assumed to be performed by the caller. +/// +/// If the given property could not be found, or if the corresponding data is +/// not available, then an error is returned. +fn wb(canonical_name: &'static str) -> Result { + #[cfg(not(feature = "unicode-segment"))] + fn imp(_: &'static str) -> Result { + Err(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-segment")] + fn imp(name: &'static str) -> Result { + use crate::unicode_tables::word_break::BY_NAME; + property_set(BY_NAME, name) + .map(hir_class) + .ok_or(Error::PropertyValueNotFound) + } + + imp(canonical_name) +} + +/// Returns the Unicode HIR class corresponding to the given sentence +/// break property. +/// +/// Name canonicalization is assumed to be performed by the caller. +/// +/// If the given property could not be found, or if the corresponding data is +/// not available, then an error is returned. +fn sb(canonical_name: &'static str) -> Result { + #[cfg(not(feature = "unicode-segment"))] + fn imp(_: &'static str) -> Result { + Err(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-segment")] + fn imp(name: &'static str) -> Result { + use crate::unicode_tables::sentence_break::BY_NAME; + property_set(BY_NAME, name) + .map(hir_class) + .ok_or(Error::PropertyValueNotFound) + } + + imp(canonical_name) +} + +/// Like symbolic_name_normalize_bytes, but operates on a string. +fn symbolic_name_normalize(x: &str) -> String { + let mut tmp = x.as_bytes().to_vec(); + let len = symbolic_name_normalize_bytes(&mut tmp).len(); + tmp.truncate(len); + // This should always succeed because `symbolic_name_normalize_bytes` + // guarantees that `&tmp[..len]` is always valid UTF-8. + // + // N.B. We could avoid the additional UTF-8 check here, but it's unlikely + // to be worth skipping the additional safety check. A benchmark must + // justify it first. + String::from_utf8(tmp).unwrap() +} + +/// Normalize the given symbolic name in place according to UAX44-LM3. +/// +/// A "symbolic name" typically corresponds to property names and property +/// value aliases. Note, though, that it should not be applied to property +/// string values. +/// +/// The slice returned is guaranteed to be valid UTF-8 for all possible values +/// of `slice`. +/// +/// See: https://unicode.org/reports/tr44/#UAX44-LM3 +fn symbolic_name_normalize_bytes(slice: &mut [u8]) -> &mut [u8] { + // I couldn't find a place in the standard that specified that property + // names/aliases had a particular structure (unlike character names), but + // we assume that it's ASCII only and drop anything that isn't ASCII. + let mut start = 0; + let mut starts_with_is = false; + if slice.len() >= 2 { + // Ignore any "is" prefix. + starts_with_is = slice[0..2] == b"is"[..] + || slice[0..2] == b"IS"[..] + || slice[0..2] == b"iS"[..] + || slice[0..2] == b"Is"[..]; + if starts_with_is { + start = 2; + } + } + let mut next_write = 0; + for i in start..slice.len() { + // VALIDITY ARGUMENT: To guarantee that the resulting slice is valid + // UTF-8, we ensure that the slice contains only ASCII bytes. In + // particular, we drop every non-ASCII byte from the normalized string. + let b = slice[i]; + if b == b' ' || b == b'_' || b == b'-' { + continue; + } else if b'A' <= b && b <= b'Z' { + slice[next_write] = b + (b'a' - b'A'); + next_write += 1; + } else if b <= 0x7F { + slice[next_write] = b; + next_write += 1; + } + } + // Special case: ISO_Comment has a 'isc' abbreviation. Since we generally + // ignore 'is' prefixes, the 'isc' abbreviation gets caught in the cross + // fire and ends up creating an alias for 'c' to 'ISO_Comment', but it + // is actually an alias for the 'Other' general category. + if starts_with_is && next_write == 1 && slice[0] == b'c' { + slice[0] = b'i'; + slice[1] = b's'; + slice[2] = b'c'; + next_write = 3; + } + &mut slice[..next_write] +} + +#[cfg(test)] +mod tests { + use super::*; + + #[cfg(feature = "unicode-case")] + fn simple_fold_ok(c: char) -> impl Iterator { + SimpleCaseFolder::new().unwrap().mapping(c).iter().copied() + } + + #[cfg(feature = "unicode-case")] + fn contains_case_map(start: char, end: char) -> bool { + SimpleCaseFolder::new().unwrap().overlaps(start, end) + } + + #[test] + #[cfg(feature = "unicode-case")] + fn simple_fold_k() { + let xs: Vec = simple_fold_ok('k').collect(); + assert_eq!(xs, alloc::vec!['K', 'K']); + + let xs: Vec = simple_fold_ok('K').collect(); + assert_eq!(xs, alloc::vec!['k', 'K']); + + let xs: Vec = simple_fold_ok('K').collect(); + assert_eq!(xs, alloc::vec!['K', 'k']); + } + + #[test] + #[cfg(feature = "unicode-case")] + fn simple_fold_a() { + let xs: Vec = simple_fold_ok('a').collect(); + assert_eq!(xs, alloc::vec!['A']); + + let xs: Vec = simple_fold_ok('A').collect(); + assert_eq!(xs, alloc::vec!['a']); + } + + #[test] + #[cfg(not(feature = "unicode-case"))] + fn simple_fold_disabled() { + assert!(SimpleCaseFolder::new().is_err()); + } + + #[test] + #[cfg(feature = "unicode-case")] + fn range_contains() { + assert!(contains_case_map('A', 'A')); + assert!(contains_case_map('Z', 'Z')); + assert!(contains_case_map('A', 'Z')); + assert!(contains_case_map('@', 'A')); + assert!(contains_case_map('Z', '[')); + assert!(contains_case_map('☃', 'Ⰰ')); + + assert!(!contains_case_map('[', '[')); + assert!(!contains_case_map('[', '`')); + + assert!(!contains_case_map('☃', '☃')); + } + + #[test] + #[cfg(feature = "unicode-gencat")] + fn regression_466() { + use super::{CanonicalClassQuery, ClassQuery}; + + let q = ClassQuery::OneLetter('C'); + assert_eq!( + q.canonicalize().unwrap(), + CanonicalClassQuery::GeneralCategory("Other") + ); + } + + #[test] + fn sym_normalize() { + let sym_norm = symbolic_name_normalize; + + assert_eq!(sym_norm("Line_Break"), "linebreak"); + assert_eq!(sym_norm("Line-break"), "linebreak"); + assert_eq!(sym_norm("linebreak"), "linebreak"); + assert_eq!(sym_norm("BA"), "ba"); + assert_eq!(sym_norm("ba"), "ba"); + assert_eq!(sym_norm("Greek"), "greek"); + assert_eq!(sym_norm("isGreek"), "greek"); + assert_eq!(sym_norm("IS_Greek"), "greek"); + assert_eq!(sym_norm("isc"), "isc"); + assert_eq!(sym_norm("is c"), "isc"); + assert_eq!(sym_norm("is_c"), "isc"); + } + + #[test] + fn valid_utf8_symbolic() { + let mut x = b"abc\xFFxyz".to_vec(); + let y = symbolic_name_normalize_bytes(&mut x); + assert_eq!(y, b"abcxyz"); + } +} diff --git a/vendor/regex-syntax/src/unicode_tables/LICENSE-UNICODE b/vendor/regex-syntax/src/unicode_tables/LICENSE-UNICODE new file mode 100644 index 00000000000000..b82826bdbdd2c3 --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/LICENSE-UNICODE @@ -0,0 +1,57 @@ +UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE + +Unicode Data Files include all data files under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +Unicode Data Files do not include PDF online code charts under the +directory http://www.unicode.org/Public/. + +Software includes any source code published in the Unicode Standard +or under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +NOTICE TO USER: Carefully read the following legal agreement. +BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S +DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), +YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE +TERMS AND CONDITIONS OF THIS AGREEMENT. +IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE +THE DATA FILES OR SOFTWARE. + +COPYRIGHT AND PERMISSION NOTICE + +Copyright © 1991-2018 Unicode, Inc. All rights reserved. +Distributed under the Terms of Use in http://www.unicode.org/copyright.html. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Unicode data files and any associated documentation +(the "Data Files") or Unicode software and any associated documentation +(the "Software") to deal in the Data Files or Software +without restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, and/or sell copies of +the Data Files or Software, and to permit persons to whom the Data Files +or Software are furnished to do so, provided that either +(a) this copyright and permission notice appear with all copies +of the Data Files or Software, or +(b) this copyright and permission notice appear in associated +Documentation. + +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT OF THIRD PARTY RIGHTS. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS +NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL +DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THE DATA FILES OR SOFTWARE. + +Except as contained in this notice, the name of a copyright holder +shall not be used in advertising or otherwise to promote the sale, +use or other dealings in these Data Files or Software without prior +written authorization of the copyright holder. diff --git a/vendor/regex-syntax/src/unicode_tables/age.rs b/vendor/regex-syntax/src/unicode_tables/age.rs new file mode 100644 index 00000000000000..466510c9e6131e --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/age.rs @@ -0,0 +1,1846 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate age ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("V10_0", V10_0), + ("V11_0", V11_0), + ("V12_0", V12_0), + ("V12_1", V12_1), + ("V13_0", V13_0), + ("V14_0", V14_0), + ("V15_0", V15_0), + ("V15_1", V15_1), + ("V16_0", V16_0), + ("V1_1", V1_1), + ("V2_0", V2_0), + ("V2_1", V2_1), + ("V3_0", V3_0), + ("V3_1", V3_1), + ("V3_2", V3_2), + ("V4_0", V4_0), + ("V4_1", V4_1), + ("V5_0", V5_0), + ("V5_1", V5_1), + ("V5_2", V5_2), + ("V6_0", V6_0), + ("V6_1", V6_1), + ("V6_2", V6_2), + ("V6_3", V6_3), + ("V7_0", V7_0), + ("V8_0", V8_0), + ("V9_0", V9_0), +]; + +pub const V10_0: &'static [(char, char)] = &[ + ('ࡠ', 'ࡪ'), + ('ৼ', '৽'), + ('\u{afa}', '\u{aff}'), + ('\u{d00}', '\u{d00}'), + ('\u{d3b}', '\u{d3c}'), + ('᳷', '᳷'), + ('\u{1df6}', '\u{1df9}'), + ('₿', '₿'), + ('⏿', '⏿'), + ('⯒', '⯒'), + ('⹅', '⹉'), + ('ㄮ', 'ㄮ'), + ('鿖', '鿪'), + ('𐌭', '𐌯'), + ('𑨀', '\u{11a47}'), + ('𑩐', '𑪃'), + ('𑪆', '𑪜'), + ('𑪞', '𑪢'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d47}'), + ('𑵐', '𑵙'), + ('𖿡', '𖿡'), + ('𛀂', '𛄞'), + ('𛅰', '𛋻'), + ('🉠', '🉥'), + ('🛓', '🛔'), + ('🛷', '🛸'), + ('🤀', '🤋'), + ('🤟', '🤟'), + ('🤨', '🤯'), + ('🤱', '🤲'), + ('🥌', '🥌'), + ('🥟', '🥫'), + ('🦒', '🦗'), + ('🧐', '🧦'), + ('𬺰', '𮯠'), +]; + +pub const V11_0: &'static [(char, char)] = &[ + ('ՠ', 'ՠ'), + ('ֈ', 'ֈ'), + ('ׯ', 'ׯ'), + ('\u{7fd}', '߿'), + ('\u{8d3}', '\u{8d3}'), + ('\u{9fe}', '\u{9fe}'), + ('੶', '੶'), + ('\u{c04}', '\u{c04}'), + ('಄', '಄'), + ('ᡸ', 'ᡸ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('⮺', '⮼'), + ('⯓', '⯫'), + ('⯰', '⯾'), + ('⹊', '⹎'), + ('ㄯ', 'ㄯ'), + ('鿫', '鿯'), + ('ꞯ', 'ꞯ'), + ('Ꞹ', 'ꞹ'), + ('ꣾ', '\u{a8ff}'), + ('𐨴', '𐨵'), + ('𐩈', '𐩈'), + ('𐴀', '\u{10d27}'), + ('𐴰', '𐴹'), + ('𐼀', '𐼧'), + ('𐼰', '𐽙'), + ('\u{110cd}', '\u{110cd}'), + ('𑅄', '𑅆'), + ('\u{1133b}', '\u{1133b}'), + ('\u{1145e}', '\u{1145e}'), + ('𑜚', '𑜚'), + ('𑠀', '𑠻'), + ('𑪝', '𑪝'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶘'), + ('𑶠', '𑶩'), + ('𑻠', '𑻸'), + ('𖹀', '𖺚'), + ('𘟭', '𘟱'), + ('𝋠', '𝋳'), + ('𝍲', '𝍸'), + ('𞱱', '𞲴'), + ('🄯', '🄯'), + ('🛹', '🛹'), + ('🟕', '🟘'), + ('🥍', '🥏'), + ('🥬', '🥰'), + ('🥳', '🥶'), + ('🥺', '🥺'), + ('🥼', '🥿'), + ('🦘', '🦢'), + ('🦰', '🦹'), + ('🧁', '🧂'), + ('🧧', '🧿'), + ('🩠', '🩭'), +]; + +pub const V12_0: &'static [(char, char)] = &[ + ('౷', '౷'), + ('ຆ', 'ຆ'), + ('ຉ', 'ຉ'), + ('ຌ', 'ຌ'), + ('ຎ', 'ຓ'), + ('ຘ', 'ຘ'), + ('ຠ', 'ຠ'), + ('ຨ', 'ຩ'), + ('ຬ', 'ຬ'), + ('\u{eba}', '\u{eba}'), + ('ᳺ', 'ᳺ'), + ('⯉', '⯉'), + ('⯿', '⯿'), + ('⹏', '⹏'), + ('Ꞻ', 'ꞿ'), + ('Ꟃ', 'Ᶎ'), + ('ꭦ', 'ꭧ'), + ('𐿠', '𐿶'), + ('𑑟', '𑑟'), + ('𑚸', '𑚸'), + ('𑦠', '𑦧'), + ('𑦪', '\u{119d7}'), + ('\u{119da}', '𑧤'), + ('𑪄', '𑪅'), + ('𑿀', '𑿱'), + ('𑿿', '𑿿'), + ('\u{13430}', '\u{13438}'), + ('𖽅', '𖽊'), + ('\u{16f4f}', '\u{16f4f}'), + ('𖽿', '𖾇'), + ('𖿢', '𖿣'), + ('𘟲', '𘟷'), + ('𛅐', '𛅒'), + ('𛅤', '𛅧'), + ('𞄀', '𞄬'), + ('\u{1e130}', '𞄽'), + ('𞅀', '𞅉'), + ('𞅎', '𞅏'), + ('𞋀', '𞋹'), + ('𞋿', '𞋿'), + ('𞥋', '𞥋'), + ('𞴁', '𞴽'), + ('🅬', '🅬'), + ('🛕', '🛕'), + ('🛺', '🛺'), + ('🟠', '🟫'), + ('🤍', '🤏'), + ('🤿', '🤿'), + ('🥱', '🥱'), + ('🥻', '🥻'), + ('🦥', '🦪'), + ('🦮', '🦯'), + ('🦺', '🦿'), + ('🧃', '🧊'), + ('🧍', '🧏'), + ('🨀', '🩓'), + ('🩰', '🩳'), + ('🩸', '🩺'), + ('🪀', '🪂'), + ('🪐', '🪕'), +]; + +pub const V12_1: &'static [(char, char)] = &[('㋿', '㋿')]; + +pub const V13_0: &'static [(char, char)] = &[ + ('ࢾ', 'ࣇ'), + ('\u{b55}', '\u{b55}'), + ('ഄ', 'ഄ'), + ('\u{d81}', '\u{d81}'), + ('\u{1abf}', '\u{1ac0}'), + ('⮗', '⮗'), + ('⹐', '⹒'), + ('ㆻ', 'ㆿ'), + ('䶶', '䶿'), + ('鿰', '鿼'), + ('Ꟈ', 'ꟊ'), + ('Ꟶ', 'ꟶ'), + ('\u{a82c}', '\u{a82c}'), + ('ꭨ', '꭫'), + ('𐆜', '𐆜'), + ('𐺀', '𐺩'), + ('\u{10eab}', '𐺭'), + ('𐺰', '𐺱'), + ('𐾰', '𐿋'), + ('𑅇', '𑅇'), + ('𑇎', '\u{111cf}'), + ('𑑚', '𑑚'), + ('𑑠', '𑑡'), + ('𑤀', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '𑥆'), + ('𑥐', '𑥙'), + ('𑾰', '𑾰'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('𘫳', '𘳕'), + ('𘴀', '𘴈'), + ('🄍', '🄏'), + ('🅭', '🅯'), + ('🆭', '🆭'), + ('🛖', '🛗'), + ('🛻', '🛼'), + ('🢰', '🢱'), + ('🤌', '🤌'), + ('🥲', '🥲'), + ('🥷', '🥸'), + ('🦣', '🦤'), + ('🦫', '🦭'), + ('🧋', '🧋'), + ('🩴', '🩴'), + ('🪃', '🪆'), + ('🪖', '🪨'), + ('🪰', '🪶'), + ('🫀', '🫂'), + ('🫐', '🫖'), + ('🬀', '🮒'), + ('🮔', '🯊'), + ('🯰', '🯹'), + ('𪛗', '𪛝'), + ('𰀀', '𱍊'), +]; + +pub const V14_0: &'static [(char, char)] = &[ + ('؝', '؝'), + ('ࡰ', 'ࢎ'), + ('\u{890}', '\u{891}'), + ('\u{898}', '\u{89f}'), + ('ࢵ', 'ࢵ'), + ('ࣈ', '\u{8d2}'), + ('\u{c3c}', '\u{c3c}'), + ('ౝ', 'ౝ'), + ('ೝ', 'ೝ'), + ('ᜍ', 'ᜍ'), + ('\u{1715}', '\u{1715}'), + ('ᜟ', 'ᜟ'), + ('\u{180f}', '\u{180f}'), + ('\u{1ac1}', '\u{1ace}'), + ('ᭌ', 'ᭌ'), + ('᭽', '᭾'), + ('\u{1dfa}', '\u{1dfa}'), + ('⃀', '⃀'), + ('Ⱟ', 'Ⱟ'), + ('ⱟ', 'ⱟ'), + ('⹓', '⹝'), + ('鿽', '鿿'), + ('Ꟁ', 'ꟁ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'ꟙ'), + ('ꟲ', 'ꟴ'), + ('﯂', '﯂'), + ('﵀', '﵏'), + ('﷏', '﷏'), + ('﷾', '﷿'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐽰', '𐾉'), + ('\u{11070}', '𑁵'), + ('\u{110c2}', '\u{110c2}'), + ('𑚹', '𑚹'), + ('𑝀', '𑝆'), + ('𑪰', '𑪿'), + ('𒾐', '𒿲'), + ('𖩰', '𖪾'), + ('𖫀', '𖫉'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛄟', '𛄢'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('𜽐', '𜿃'), + ('𝇩', '𝇪'), + ('𝼀', '𝼞'), + ('𞊐', '\u{1e2ae}'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('🛝', '🛟'), + ('🟰', '🟰'), + ('🥹', '🥹'), + ('🧌', '🧌'), + ('🩻', '🩼'), + ('🪩', '🪬'), + ('🪷', '🪺'), + ('🫃', '🫅'), + ('🫗', '🫙'), + ('🫠', '🫧'), + ('🫰', '🫶'), + ('𪛞', '𪛟'), + ('𫜵', '𫜸'), +]; + +pub const V15_0: &'static [(char, char)] = &[ + ('ೳ', 'ೳ'), + ('\u{ece}', '\u{ece}'), + ('\u{10efd}', '\u{10eff}'), + ('𑈿', '\u{11241}'), + ('𑬀', '𑬉'), + ('\u{11f00}', '𑼐'), + ('𑼒', '\u{11f3a}'), + ('𑼾', '𑽙'), + ('𓐯', '𓐯'), + ('\u{13439}', '\u{13455}'), + ('𛄲', '𛄲'), + ('𛅕', '𛅕'), + ('𝋀', '𝋓'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), + ('𞓐', '𞓹'), + ('🛜', '🛜'), + ('🝴', '🝶'), + ('🝻', '🝿'), + ('🟙', '🟙'), + ('🩵', '🩷'), + ('🪇', '🪈'), + ('🪭', '🪯'), + ('🪻', '🪽'), + ('🪿', '🪿'), + ('🫎', '🫏'), + ('🫚', '🫛'), + ('🫨', '🫨'), + ('🫷', '🫸'), + ('𫜹', '𫜹'), + ('𱍐', '𲎯'), +]; + +pub const V15_1: &'static [(char, char)] = + &[('⿼', '⿿'), ('㇯', '㇯'), ('𮯰', '𮹝')]; + +pub const V16_0: &'static [(char, char)] = &[ + ('\u{897}', '\u{897}'), + ('᭎', '᭏'), + ('᭿', '᭿'), + ('Ᲊ', 'ᲊ'), + ('␧', '␩'), + ('㇤', '㇥'), + ('Ɤ', 'ꟍ'), + ('Ꟛ', 'Ƛ'), + ('𐗀', '𐗳'), + ('𐵀', '𐵥'), + ('\u{10d69}', '𐶅'), + ('𐶎', '𐶏'), + ('𐻂', '𐻄'), + ('\u{10efc}', '\u{10efc}'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏕'), + ('𑏗', '𑏘'), + ('\u{113e1}', '\u{113e2}'), + ('𑛐', '𑛣'), + ('𑯀', '𑯡'), + ('𑯰', '𑯹'), + ('\u{11f5a}', '\u{11f5a}'), + ('𓑠', '𔏺'), + ('𖄀', '𖄹'), + ('𖵀', '𖵹'), + ('𘳿', '𘳿'), + ('𜰀', '𜳹'), + ('𜴀', '𜺳'), + ('𞗐', '𞗺'), + ('𞗿', '𞗿'), + ('🢲', '🢻'), + ('🣀', '🣁'), + ('🪉', '🪉'), + ('🪏', '🪏'), + ('🪾', '🪾'), + ('🫆', '🫆'), + ('🫜', '🫜'), + ('🫟', '🫟'), + ('🫩', '🫩'), + ('🯋', '🯯'), +]; + +pub const V1_1: &'static [(char, char)] = &[ + ('\0', 'ǵ'), + ('Ǻ', 'ȗ'), + ('ɐ', 'ʨ'), + ('ʰ', '˞'), + ('ˠ', '˩'), + ('\u{300}', '\u{345}'), + ('\u{360}', '\u{361}'), + ('ʹ', '͵'), + ('ͺ', 'ͺ'), + (';', ';'), + ('΄', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ώ'), + ('ϐ', 'ϖ'), + ('Ϛ', 'Ϛ'), + ('Ϝ', 'Ϝ'), + ('Ϟ', 'Ϟ'), + ('Ϡ', 'Ϡ'), + ('Ϣ', 'ϳ'), + ('Ё', 'Ќ'), + ('Ў', 'я'), + ('ё', 'ќ'), + ('ў', '\u{486}'), + ('Ґ', 'ӄ'), + ('Ӈ', 'ӈ'), + ('Ӌ', 'ӌ'), + ('Ӑ', 'ӫ'), + ('Ӯ', 'ӵ'), + ('Ӹ', 'ӹ'), + ('Ա', 'Ֆ'), + ('ՙ', '՟'), + ('ա', 'և'), + ('։', '։'), + ('\u{5b0}', '\u{5b9}'), + ('\u{5bb}', '׃'), + ('א', 'ת'), + ('װ', '״'), + ('،', '،'), + ('؛', '؛'), + ('؟', '؟'), + ('ء', 'غ'), + ('ـ', '\u{652}'), + ('٠', '٭'), + ('\u{670}', 'ڷ'), + ('ں', 'ھ'), + ('ۀ', 'ێ'), + ('ې', '\u{6ed}'), + ('۰', '۹'), + ('\u{901}', 'ः'), + ('अ', 'ह'), + ('\u{93c}', '\u{94d}'), + ('ॐ', '\u{954}'), + ('क़', '॰'), + ('\u{981}', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9be}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', '\u{9cd}'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', '\u{9e3}'), + ('০', '৺'), + ('\u{a02}', '\u{a02}'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('੦', 'ੴ'), + ('\u{a81}', 'ઃ'), + ('અ', 'ઋ'), + ('ઍ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('\u{abc}', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૠ'), + ('૦', '૯'), + ('\u{b01}', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଶ', 'ହ'), + ('\u{b3c}', '\u{b43}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b56}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('୦', '୰'), + ('\u{b82}', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'வ'), + ('ஷ', 'ஹ'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('\u{bd7}', '\u{bd7}'), + ('௧', '௲'), + ('ఁ', 'ః'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'ళ'), + ('వ', 'హ'), + ('\u{c3e}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('ౠ', 'ౡ'), + ('౦', '౯'), + ('ಂ', 'ಃ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಾ', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('ೞ', 'ೞ'), + ('ೠ', 'ೡ'), + ('೦', '೯'), + ('ം', 'ഃ'), + ('അ', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ന'), + ('പ', 'ഹ'), + ('\u{d3e}', '\u{d43}'), + ('െ', 'ൈ'), + ('ൊ', '\u{d4d}'), + ('\u{d57}', '\u{d57}'), + ('ൠ', 'ൡ'), + ('൦', '൯'), + ('ก', '\u{e3a}'), + ('฿', '๛'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ງ', 'ຈ'), + ('ຊ', 'ຊ'), + ('ຍ', 'ຍ'), + ('ດ', 'ທ'), + ('ນ', 'ຟ'), + ('ມ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ວ'), + ('ສ', 'ຫ'), + ('ອ', '\u{eb9}'), + ('\u{ebb}', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('\u{ec8}', '\u{ecd}'), + ('໐', '໙'), + ('ໜ', 'ໝ'), + ('Ⴀ', 'Ⴥ'), + ('ა', 'ჶ'), + ('჻', '჻'), + ('ᄀ', 'ᅙ'), + ('ᅟ', 'ᆢ'), + ('ᆨ', 'ᇹ'), + ('Ḁ', 'ẚ'), + ('Ạ', 'ỹ'), + ('ἀ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ῄ'), + ('ῆ', 'ΐ'), + ('ῖ', 'Ί'), + ('῝', '`'), + ('ῲ', 'ῴ'), + ('ῶ', '῾'), + ('\u{2000}', '\u{202e}'), + ('‰', '⁆'), + ('\u{206a}', '⁰'), + ('⁴', '₎'), + ('₠', '₪'), + ('\u{20d0}', '\u{20e1}'), + ('℀', 'ℸ'), + ('⅓', 'ↂ'), + ('←', '⇪'), + ('∀', '⋱'), + ('⌀', '⌀'), + ('⌂', '⍺'), + ('␀', '␤'), + ('⑀', '⑊'), + ('①', '⓪'), + ('─', '▕'), + ('■', '◯'), + ('☀', '☓'), + ('☚', '♯'), + ('✁', '✄'), + ('✆', '✉'), + ('✌', '✧'), + ('✩', '❋'), + ('❍', '❍'), + ('❏', '❒'), + ('❖', '❖'), + ('❘', '❞'), + ('❡', '❧'), + ('❶', '➔'), + ('➘', '➯'), + ('➱', '➾'), + ('\u{3000}', '〷'), + ('〿', '〿'), + ('ぁ', 'ゔ'), + ('\u{3099}', 'ゞ'), + ('ァ', 'ヾ'), + ('ㄅ', 'ㄬ'), + ('ㄱ', 'ㆎ'), + ('㆐', '㆟'), + ('㈀', '㈜'), + ('㈠', '㉃'), + ('㉠', '㉻'), + ('㉿', '㊰'), + ('㋀', '㋋'), + ('㋐', '㋾'), + ('㌀', '㍶'), + ('㍻', '㏝'), + ('㏠', '㏾'), + ('一', '龥'), + ('\u{e000}', '鶴'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('\u{fb1e}', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', '﴿'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('\u{fe20}', '\u{fe23}'), + ('︰', '﹄'), + ('﹉', '﹒'), + ('﹔', '﹦'), + ('﹨', '﹫'), + ('ﹰ', 'ﹲ'), + ('ﹴ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('\u{feff}', '\u{feff}'), + ('!', '~'), + ('。', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('¢', '₩'), + ('│', '○'), + ('�', '\u{ffff}'), +]; + +pub const V2_0: &'static [(char, char)] = &[ + ('\u{591}', '\u{5a1}'), + ('\u{5a3}', '\u{5af}'), + ('\u{5c4}', '\u{5c4}'), + ('ༀ', 'ཇ'), + ('ཉ', 'ཀྵ'), + ('\u{f71}', 'ྋ'), + ('\u{f90}', '\u{f95}'), + ('\u{f97}', '\u{f97}'), + ('\u{f99}', '\u{fad}'), + ('\u{fb1}', '\u{fb7}'), + ('\u{fb9}', '\u{fb9}'), + ('ẛ', 'ẛ'), + ('₫', '₫'), + ('가', '힣'), + ('\u{1fffe}', '\u{1ffff}'), + ('\u{2fffe}', '\u{2ffff}'), + ('\u{3fffe}', '\u{3ffff}'), + ('\u{4fffe}', '\u{4ffff}'), + ('\u{5fffe}', '\u{5ffff}'), + ('\u{6fffe}', '\u{6ffff}'), + ('\u{7fffe}', '\u{7ffff}'), + ('\u{8fffe}', '\u{8ffff}'), + ('\u{9fffe}', '\u{9ffff}'), + ('\u{afffe}', '\u{affff}'), + ('\u{bfffe}', '\u{bffff}'), + ('\u{cfffe}', '\u{cffff}'), + ('\u{dfffe}', '\u{dffff}'), + ('\u{efffe}', '\u{10ffff}'), +]; + +pub const V2_1: &'static [(char, char)] = &[('€', '€'), ('', '')]; + +pub const V3_0: &'static [(char, char)] = &[ + ('Ƕ', 'ǹ'), + ('Ș', 'ȟ'), + ('Ȣ', 'ȳ'), + ('ʩ', 'ʭ'), + ('˟', '˟'), + ('˪', 'ˮ'), + ('\u{346}', '\u{34e}'), + ('\u{362}', '\u{362}'), + ('ϗ', 'ϗ'), + ('ϛ', 'ϛ'), + ('ϝ', 'ϝ'), + ('ϟ', 'ϟ'), + ('ϡ', 'ϡ'), + ('Ѐ', 'Ѐ'), + ('Ѝ', 'Ѝ'), + ('ѐ', 'ѐ'), + ('ѝ', 'ѝ'), + ('\u{488}', '\u{489}'), + ('Ҍ', 'ҏ'), + ('Ӭ', 'ӭ'), + ('֊', '֊'), + ('\u{653}', '\u{655}'), + ('ڸ', 'ڹ'), + ('ڿ', 'ڿ'), + ('ۏ', 'ۏ'), + ('ۺ', '۾'), + ('܀', '܍'), + ('\u{70f}', 'ܬ'), + ('\u{730}', '\u{74a}'), + ('ހ', '\u{7b0}'), + ('ං', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('ෲ', '෴'), + ('ཪ', 'ཪ'), + ('\u{f96}', '\u{f96}'), + ('\u{fae}', '\u{fb0}'), + ('\u{fb8}', '\u{fb8}'), + ('\u{fba}', '\u{fbc}'), + ('྾', '࿌'), + ('࿏', '࿏'), + ('က', 'အ'), + ('ဣ', 'ဧ'), + ('ဩ', 'ဪ'), + ('ာ', '\u{1032}'), + ('\u{1036}', '\u{1039}'), + ('၀', '\u{1059}'), + ('ሀ', 'ሆ'), + ('ለ', 'ቆ'), + ('ቈ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኆ'), + ('ኈ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኮ'), + ('ኰ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዎ'), + ('ዐ', 'ዖ'), + ('ዘ', 'ዮ'), + ('ደ', 'ጎ'), + ('ጐ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ጞ'), + ('ጠ', 'ፆ'), + ('ፈ', 'ፚ'), + ('፡', '፼'), + ('Ꭰ', 'Ᏼ'), + ('ᐁ', 'ᙶ'), + ('\u{1680}', '᚜'), + ('ᚠ', 'ᛰ'), + ('ក', 'ៜ'), + ('០', '៩'), + ('᠀', '\u{180e}'), + ('᠐', '᠙'), + ('ᠠ', 'ᡷ'), + ('ᢀ', '\u{18a9}'), + ('\u{202f}', '\u{202f}'), + ('⁈', '⁍'), + ('₭', '₯'), + ('\u{20e2}', '\u{20e3}'), + ('ℹ', '℺'), + ('Ↄ', 'Ↄ'), + ('⇫', '⇳'), + ('⌁', '⌁'), + ('⍻', '⍻'), + ('⍽', '⎚'), + ('␥', '␦'), + ('◰', '◷'), + ('☙', '☙'), + ('♰', '♱'), + ('⠀', '⣿'), + ('⺀', '⺙'), + ('⺛', '⻳'), + ('⼀', '⿕'), + ('⿰', '⿻'), + ('〸', '〺'), + ('〾', '〾'), + ('ㆠ', 'ㆷ'), + ('㐀', '䶵'), + ('ꀀ', 'ꒌ'), + ('꒐', '꒡'), + ('꒤', '꒳'), + ('꒵', '꓀'), + ('꓂', '꓄'), + ('꓆', '꓆'), + ('יִ', 'יִ'), + ('\u{fff9}', '\u{fffb}'), +]; + +pub const V3_1: &'static [(char, char)] = &[ + ('ϴ', 'ϵ'), + ('\u{fdd0}', '\u{fdef}'), + ('𐌀', '𐌞'), + ('𐌠', '𐌣'), + ('𐌰', '𐍊'), + ('𐐀', '𐐥'), + ('𐐨', '𐑍'), + ('𝀀', '𝃵'), + ('𝄀', '𝄦'), + ('𝄪', '𝇝'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓀'), + ('𝓂', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚣'), + ('𝚨', '𝟉'), + ('𝟎', '𝟿'), + ('𠀀', '𪛖'), + ('丽', '𪘀'), + ('\u{e0001}', '\u{e0001}'), + ('\u{e0020}', '\u{e007f}'), +]; + +pub const V3_2: &'static [(char, char)] = &[ + ('Ƞ', 'Ƞ'), + ('\u{34f}', '\u{34f}'), + ('\u{363}', '\u{36f}'), + ('Ϙ', 'ϙ'), + ('϶', '϶'), + ('Ҋ', 'ҋ'), + ('Ӆ', 'ӆ'), + ('Ӊ', 'ӊ'), + ('Ӎ', 'ӎ'), + ('Ԁ', 'ԏ'), + ('ٮ', 'ٯ'), + ('ޱ', 'ޱ'), + ('ჷ', 'ჸ'), + ('ᜀ', 'ᜌ'), + ('ᜎ', '\u{1714}'), + ('ᜠ', '᜶'), + ('ᝀ', '\u{1753}'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('\u{1772}', '\u{1773}'), + ('⁇', '⁇'), + ('⁎', '⁒'), + ('⁗', '⁗'), + ('\u{205f}', '\u{2063}'), + ('ⁱ', 'ⁱ'), + ('₰', '₱'), + ('\u{20e4}', '\u{20ea}'), + ('ℽ', '⅋'), + ('⇴', '⇿'), + ('⋲', '⋿'), + ('⍼', '⍼'), + ('⎛', '⏎'), + ('⓫', '⓾'), + ('▖', '▟'), + ('◸', '◿'), + ('☖', '☗'), + ('♲', '♽'), + ('⚀', '⚉'), + ('❨', '❵'), + ('⟐', '⟫'), + ('⟰', '⟿'), + ('⤀', '⫿'), + ('〻', '〽'), + ('ゕ', 'ゖ'), + ('ゟ', '゠'), + ('ヿ', 'ヿ'), + ('ㇰ', 'ㇿ'), + ('㉑', '㉟'), + ('㊱', '㊿'), + ('꒢', '꒣'), + ('꒴', '꒴'), + ('꓁', '꓁'), + ('꓅', '꓅'), + ('侮', '頻'), + ('﷼', '﷼'), + ('\u{fe00}', '\u{fe0f}'), + ('﹅', '﹆'), + ('ﹳ', 'ﹳ'), + ('⦅', '⦆'), +]; + +pub const V4_0: &'static [(char, char)] = &[ + ('ȡ', 'ȡ'), + ('ȴ', 'ȶ'), + ('ʮ', 'ʯ'), + ('˯', '˿'), + ('\u{350}', '\u{357}'), + ('\u{35d}', '\u{35f}'), + ('Ϸ', 'ϻ'), + ('\u{600}', '\u{603}'), + ('؍', '\u{615}'), + ('\u{656}', '\u{658}'), + ('ۮ', 'ۯ'), + ('ۿ', 'ۿ'), + ('ܭ', 'ܯ'), + ('ݍ', 'ݏ'), + ('ऄ', 'ऄ'), + ('ঽ', 'ঽ'), + ('\u{a01}', '\u{a01}'), + ('ਃ', 'ਃ'), + ('ઌ', 'ઌ'), + ('ૡ', '\u{ae3}'), + ('૱', '૱'), + ('ଵ', 'ଵ'), + ('ୱ', 'ୱ'), + ('௳', '௺'), + ('\u{cbc}', 'ಽ'), + ('\u{17dd}', '\u{17dd}'), + ('៰', '៹'), + ('ᤀ', 'ᤜ'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('᥀', '᥀'), + ('᥄', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('᧠', '᧿'), + ('ᴀ', 'ᵫ'), + ('⁓', '⁔'), + ('℻', '℻'), + ('⏏', '⏐'), + ('⓿', '⓿'), + ('☔', '☕'), + ('⚊', '⚑'), + ('⚠', '⚡'), + ('⬀', '⬍'), + ('㈝', '㈞'), + ('㉐', '㉐'), + ('㉼', '㉽'), + ('㋌', '㋏'), + ('㍷', '㍺'), + ('㏞', '㏟'), + ('㏿', '㏿'), + ('䷀', '䷿'), + ('﷽', '﷽'), + ('﹇', '﹈'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐄀', '𐄂'), + ('𐄇', '𐄳'), + ('𐄷', '𐄿'), + ('𐎀', '𐎝'), + ('𐎟', '𐎟'), + ('𐐦', '𐐧'), + ('𐑎', '𐒝'), + ('𐒠', '𐒩'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐠿'), + ('𝌀', '𝍖'), + ('𝓁', '𝓁'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const V4_1: &'static [(char, char)] = &[ + ('ȷ', 'Ɂ'), + ('\u{358}', '\u{35c}'), + ('ϼ', 'Ͽ'), + ('Ӷ', 'ӷ'), + ('\u{5a2}', '\u{5a2}'), + ('\u{5c5}', '\u{5c7}'), + ('؋', '؋'), + ('؞', '؞'), + ('\u{659}', '\u{65e}'), + ('ݐ', 'ݭ'), + ('ॽ', 'ॽ'), + ('ৎ', 'ৎ'), + ('ஶ', 'ஶ'), + ('௦', '௦'), + ('࿐', '࿑'), + ('ჹ', 'ჺ'), + ('ჼ', 'ჼ'), + ('ሇ', 'ሇ'), + ('ቇ', 'ቇ'), + ('ኇ', 'ኇ'), + ('ኯ', 'ኯ'), + ('ዏ', 'ዏ'), + ('ዯ', 'ዯ'), + ('ጏ', 'ጏ'), + ('ጟ', 'ጟ'), + ('ፇ', 'ፇ'), + ('\u{135f}', '፠'), + ('ᎀ', '᎙'), + ('ᦀ', 'ᦩ'), + ('ᦰ', 'ᧉ'), + ('᧐', '᧙'), + ('᧞', '᧟'), + ('ᨀ', '\u{1a1b}'), + ('᨞', '᨟'), + ('ᵬ', '\u{1dc3}'), + ('⁕', '⁖'), + ('⁘', '⁞'), + ('ₐ', 'ₔ'), + ('₲', '₵'), + ('\u{20eb}', '\u{20eb}'), + ('ℼ', 'ℼ'), + ('⅌', '⅌'), + ('⏑', '⏛'), + ('☘', '☘'), + ('♾', '♿'), + ('⚒', '⚜'), + ('⚢', '⚱'), + ('⟀', '⟆'), + ('⬎', '⬓'), + ('Ⰰ', 'Ⱞ'), + ('ⰰ', 'ⱞ'), + ('Ⲁ', '⳪'), + ('⳹', 'ⴥ'), + ('ⴰ', 'ⵥ'), + ('ⵯ', 'ⵯ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('⸀', '⸗'), + ('⸜', '⸝'), + ('㇀', '㇏'), + ('㉾', '㉾'), + ('龦', '龻'), + ('꜀', '꜖'), + ('ꠀ', '꠫'), + ('並', '龎'), + ('︐', '︙'), + ('𐅀', '𐆊'), + ('𐎠', '𐏃'), + ('𐏈', '𐏕'), + ('𐨀', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨳'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '𐩇'), + ('𐩐', '𐩘'), + ('𝈀', '𝉅'), + ('𝚤', '𝚥'), +]; + +pub const V5_0: &'static [(char, char)] = &[ + ('ɂ', 'ɏ'), + ('ͻ', 'ͽ'), + ('ӏ', 'ӏ'), + ('Ӻ', 'ӿ'), + ('Ԑ', 'ԓ'), + ('\u{5ba}', '\u{5ba}'), + ('߀', 'ߺ'), + ('ॻ', 'ॼ'), + ('ॾ', 'ॿ'), + ('\u{ce2}', '\u{ce3}'), + ('ೱ', 'ೲ'), + ('\u{1b00}', 'ᭋ'), + ('᭐', '᭼'), + ('\u{1dc4}', '\u{1dca}'), + ('\u{1dfe}', '\u{1dff}'), + ('\u{20ec}', '\u{20ef}'), + ('⅍', 'ⅎ'), + ('ↄ', 'ↄ'), + ('⏜', '⏧'), + ('⚲', '⚲'), + ('⟇', '⟊'), + ('⬔', '⬚'), + ('⬠', '⬣'), + ('Ⱡ', 'ⱬ'), + ('ⱴ', 'ⱷ'), + ('ꜗ', 'ꜚ'), + ('꜠', '꜡'), + ('ꡀ', '꡷'), + ('𐤀', '𐤙'), + ('𐤟', '𐤟'), + ('𒀀', '𒍮'), + ('𒐀', '𒑢'), + ('𒑰', '𒑳'), + ('𝍠', '𝍱'), + ('𝟊', '𝟋'), +]; + +pub const V5_1: &'static [(char, char)] = &[ + ('Ͱ', 'ͳ'), + ('Ͷ', 'ͷ'), + ('Ϗ', 'Ϗ'), + ('\u{487}', '\u{487}'), + ('Ԕ', 'ԣ'), + ('؆', '؊'), + ('\u{616}', '\u{61a}'), + ('ػ', 'ؿ'), + ('ݮ', 'ݿ'), + ('ॱ', 'ॲ'), + ('\u{a51}', '\u{a51}'), + ('\u{a75}', '\u{a75}'), + ('\u{b44}', '\u{b44}'), + ('\u{b62}', '\u{b63}'), + ('ௐ', 'ௐ'), + ('ఽ', 'ఽ'), + ('ౘ', 'ౙ'), + ('\u{c62}', '\u{c63}'), + ('౸', '౿'), + ('ഽ', 'ഽ'), + ('\u{d44}', '\u{d44}'), + ('\u{d62}', '\u{d63}'), + ('൰', '൵'), + ('൹', 'ൿ'), + ('ཫ', 'ཬ'), + ('࿎', '࿎'), + ('࿒', '࿔'), + ('ဢ', 'ဢ'), + ('ဨ', 'ဨ'), + ('ါ', 'ါ'), + ('\u{1033}', '\u{1035}'), + ('\u{103a}', 'ဿ'), + ('ၚ', '႙'), + ('႞', '႟'), + ('ᢪ', 'ᢪ'), + ('\u{1b80}', '\u{1baa}'), + ('ᮮ', '᮹'), + ('ᰀ', '\u{1c37}'), + ('᰻', '᱉'), + ('ᱍ', '᱿'), + ('\u{1dcb}', '\u{1de6}'), + ('ẜ', 'ẟ'), + ('Ỻ', 'ỿ'), + ('\u{2064}', '\u{2064}'), + ('\u{20f0}', '\u{20f0}'), + ('⅏', '⅏'), + ('ↅ', 'ↈ'), + ('⚝', '⚝'), + ('⚳', '⚼'), + ('⛀', '⛃'), + ('⟌', '⟌'), + ('⟬', '⟯'), + ('⬛', '⬟'), + ('⬤', '⭌'), + ('⭐', '⭔'), + ('Ɑ', 'Ɐ'), + ('ⱱ', 'ⱳ'), + ('ⱸ', 'ⱽ'), + ('\u{2de0}', '\u{2dff}'), + ('⸘', '⸛'), + ('⸞', '⸰'), + ('ㄭ', 'ㄭ'), + ('㇐', '㇣'), + ('龼', '鿃'), + ('ꔀ', 'ꘫ'), + ('Ꙁ', 'ꙟ'), + ('Ꙣ', '꙳'), + ('\u{a67c}', 'ꚗ'), + ('ꜛ', 'ꜟ'), + ('Ꜣ', 'ꞌ'), + ('ꟻ', 'ꟿ'), + ('ꢀ', '\u{a8c4}'), + ('꣎', '꣙'), + ('꤀', '\u{a953}'), + ('꥟', '꥟'), + ('ꨀ', '\u{aa36}'), + ('ꩀ', 'ꩍ'), + ('꩐', '꩙'), + ('꩜', '꩟'), + ('\u{fe24}', '\u{fe26}'), + ('𐆐', '𐆛'), + ('𐇐', '\u{101fd}'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐤠', '𐤹'), + ('𐤿', '𐤿'), + ('𝄩', '𝄩'), + ('🀀', '🀫'), + ('🀰', '🂓'), +]; + +pub const V5_2: &'static [(char, char)] = &[ + ('Ԥ', 'ԥ'), + ('ࠀ', '\u{82d}'), + ('࠰', '࠾'), + ('\u{900}', '\u{900}'), + ('ॎ', 'ॎ'), + ('\u{955}', '\u{955}'), + ('ॹ', 'ॺ'), + ('৻', '৻'), + ('࿕', '࿘'), + ('ႚ', '\u{109d}'), + ('ᅚ', 'ᅞ'), + ('ᆣ', 'ᆧ'), + ('ᇺ', 'ᇿ'), + ('᐀', '᐀'), + ('ᙷ', 'ᙿ'), + ('ᢰ', 'ᣵ'), + ('ᦪ', 'ᦫ'), + ('᧚', '᧚'), + ('ᨠ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '᪉'), + ('᪐', '᪙'), + ('᪠', '᪭'), + ('\u{1cd0}', 'ᳲ'), + ('\u{1dfd}', '\u{1dfd}'), + ('₶', '₸'), + ('⅐', '⅒'), + ('↉', '↉'), + ('⏨', '⏨'), + ('⚞', '⚟'), + ('⚽', '⚿'), + ('⛄', '⛍'), + ('⛏', '⛡'), + ('⛣', '⛣'), + ('⛨', '⛿'), + ('❗', '❗'), + ('⭕', '⭙'), + ('Ɒ', 'Ɒ'), + ('Ȿ', 'Ɀ'), + ('Ⳬ', '\u{2cf1}'), + ('⸱', '⸱'), + ('㉄', '㉏'), + ('鿄', '鿋'), + ('ꓐ', '꓿'), + ('ꚠ', '꛷'), + ('꠰', '꠹'), + ('\u{a8e0}', 'ꣻ'), + ('ꥠ', 'ꥼ'), + ('\u{a980}', '꧍'), + ('ꧏ', '꧙'), + ('꧞', '꧟'), + ('ꩠ', 'ꩻ'), + ('ꪀ', 'ꫂ'), + ('ꫛ', '꫟'), + ('ꯀ', '\u{abed}'), + ('꯰', '꯹'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('恵', '舘'), + ('𐡀', '𐡕'), + ('𐡗', '𐡟'), + ('𐤚', '𐤛'), + ('𐩠', '𐩿'), + ('𐬀', '𐬵'), + ('𐬹', '𐭕'), + ('𐭘', '𐭲'), + ('𐭸', '𐭿'), + ('𐰀', '𐱈'), + ('𐹠', '𐹾'), + ('\u{11080}', '𑃁'), + ('𓀀', '𓐮'), + ('🄀', '🄊'), + ('🄐', '🄮'), + ('🄱', '🄱'), + ('🄽', '🄽'), + ('🄿', '🄿'), + ('🅂', '🅂'), + ('🅆', '🅆'), + ('🅊', '🅎'), + ('🅗', '🅗'), + ('🅟', '🅟'), + ('🅹', '🅹'), + ('🅻', '🅼'), + ('🅿', '🅿'), + ('🆊', '🆍'), + ('🆐', '🆐'), + ('🈀', '🈀'), + ('🈐', '🈱'), + ('🉀', '🉈'), + ('𪜀', '𫜴'), +]; + +pub const V6_0: &'static [(char, char)] = &[ + ('Ԧ', 'ԧ'), + ('ؠ', 'ؠ'), + ('\u{65f}', '\u{65f}'), + ('ࡀ', '\u{85b}'), + ('࡞', '࡞'), + ('\u{93a}', 'ऻ'), + ('ॏ', 'ॏ'), + ('\u{956}', '\u{957}'), + ('ॳ', 'ॷ'), + ('୲', '୷'), + ('ഩ', 'ഩ'), + ('ഺ', 'ഺ'), + ('ൎ', 'ൎ'), + ('ྌ', '\u{f8f}'), + ('࿙', '࿚'), + ('\u{135d}', '\u{135e}'), + ('ᯀ', '\u{1bf3}'), + ('᯼', '᯿'), + ('\u{1dfc}', '\u{1dfc}'), + ('ₕ', 'ₜ'), + ('₹', '₹'), + ('⏩', '⏳'), + ('⛎', '⛎'), + ('⛢', '⛢'), + ('⛤', '⛧'), + ('✅', '✅'), + ('✊', '✋'), + ('✨', '✨'), + ('❌', '❌'), + ('❎', '❎'), + ('❓', '❕'), + ('❟', '❠'), + ('➕', '➗'), + ('➰', '➰'), + ('➿', '➿'), + ('⟎', '⟏'), + ('⵰', '⵰'), + ('\u{2d7f}', '\u{2d7f}'), + ('ㆸ', 'ㆺ'), + ('Ꙡ', 'ꙡ'), + ('Ɥ', 'ꞎ'), + ('Ꞑ', 'ꞑ'), + ('Ꞡ', 'ꞩ'), + ('ꟺ', 'ꟺ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('﮲', '﯁'), + ('𑀀', '𑁍'), + ('𑁒', '𑁯'), + ('𖠀', '𖨸'), + ('𛀀', '𛀁'), + ('🂠', '🂮'), + ('🂱', '🂾'), + ('🃁', '🃏'), + ('🃑', '🃟'), + ('🄰', '🄰'), + ('🄲', '🄼'), + ('🄾', '🄾'), + ('🅀', '🅁'), + ('🅃', '🅅'), + ('🅇', '🅉'), + ('🅏', '🅖'), + ('🅘', '🅞'), + ('🅠', '🅩'), + ('🅰', '🅸'), + ('🅺', '🅺'), + ('🅽', '🅾'), + ('🆀', '🆉'), + ('🆎', '🆏'), + ('🆑', '🆚'), + ('🇦', '🇿'), + ('🈁', '🈂'), + ('🈲', '🈺'), + ('🉐', '🉑'), + ('🌀', '🌠'), + ('🌰', '🌵'), + ('🌷', '🍼'), + ('🎀', '🎓'), + ('🎠', '🏄'), + ('🏆', '🏊'), + ('🏠', '🏰'), + ('🐀', '🐾'), + ('👀', '👀'), + ('👂', '📷'), + ('📹', '📼'), + ('🔀', '🔽'), + ('🕐', '🕧'), + ('🗻', '🗿'), + ('😁', '😐'), + ('😒', '😔'), + ('😖', '😖'), + ('😘', '😘'), + ('😚', '😚'), + ('😜', '😞'), + ('😠', '😥'), + ('😨', '😫'), + ('😭', '😭'), + ('😰', '😳'), + ('😵', '🙀'), + ('🙅', '🙏'), + ('🚀', '🛅'), + ('🜀', '🝳'), + ('𫝀', '𫠝'), +]; + +pub const V6_1: &'static [(char, char)] = &[ + ('֏', '֏'), + ('\u{604}', '\u{604}'), + ('ࢠ', 'ࢠ'), + ('ࢢ', 'ࢬ'), + ('\u{8e4}', '\u{8fe}'), + ('૰', '૰'), + ('ໞ', 'ໟ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ჽ', 'ჿ'), + ('\u{1bab}', '\u{1bad}'), + ('ᮺ', 'ᮿ'), + ('᳀', '᳇'), + ('ᳳ', 'ᳶ'), + ('⟋', '⟋'), + ('⟍', '⟍'), + ('Ⳳ', 'ⳳ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⵦ', 'ⵧ'), + ('⸲', '⸻'), + ('鿌', '鿌'), + ('\u{a674}', '\u{a67b}'), + ('\u{a69f}', '\u{a69f}'), + ('Ꞓ', 'ꞓ'), + ('Ɦ', 'Ɦ'), + ('ꟸ', 'ꟹ'), + ('ꫠ', '\u{aaf6}'), + ('郞', '隷'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𑃐', '𑃨'), + ('𑃰', '𑃹'), + ('\u{11100}', '\u{11134}'), + ('𑄶', '𑅃'), + ('\u{11180}', '𑇈'), + ('𑇐', '𑇙'), + ('𑚀', '\u{116b7}'), + ('𑛀', '𑛉'), + ('𖼀', '𖽄'), + ('𖽐', '𖽾'), + ('\u{16f8f}', '𖾟'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𞻰', '𞻱'), + ('🅪', '🅫'), + ('🕀', '🕃'), + ('😀', '😀'), + ('😑', '😑'), + ('😕', '😕'), + ('😗', '😗'), + ('😙', '😙'), + ('😛', '😛'), + ('😟', '😟'), + ('😦', '😧'), + ('😬', '😬'), + ('😮', '😯'), + ('😴', '😴'), +]; + +pub const V6_2: &'static [(char, char)] = &[('₺', '₺')]; + +pub const V6_3: &'static [(char, char)] = + &[('\u{61c}', '\u{61c}'), ('\u{2066}', '\u{2069}')]; + +pub const V7_0: &'static [(char, char)] = &[ + ('Ϳ', 'Ϳ'), + ('Ԩ', 'ԯ'), + ('֍', '֎'), + ('\u{605}', '\u{605}'), + ('ࢡ', 'ࢡ'), + ('ࢭ', 'ࢲ'), + ('\u{8ff}', '\u{8ff}'), + ('ॸ', 'ॸ'), + ('ঀ', 'ঀ'), + ('\u{c00}', '\u{c00}'), + ('ఴ', 'ఴ'), + ('\u{c81}', '\u{c81}'), + ('\u{d01}', '\u{d01}'), + ('෦', '෯'), + ('ᛱ', 'ᛸ'), + ('ᤝ', 'ᤞ'), + ('\u{1ab0}', '\u{1abe}'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{1de7}', '\u{1df5}'), + ('₻', '₽'), + ('⏴', '⏺'), + ('✀', '✀'), + ('⭍', '⭏'), + ('⭚', '⭳'), + ('⭶', '⮕'), + ('⮘', '⮹'), + ('⮽', '⯈'), + ('⯊', '⯑'), + ('⸼', '⹂'), + ('Ꚙ', 'ꚝ'), + ('ꞔ', 'ꞟ'), + ('Ɜ', 'Ɬ'), + ('Ʞ', 'Ʇ'), + ('ꟷ', 'ꟷ'), + ('ꧠ', 'ꧾ'), + ('\u{aa7c}', 'ꩿ'), + ('ꬰ', 'ꭟ'), + ('ꭤ', 'ꭥ'), + ('\u{fe27}', '\u{fe2d}'), + ('𐆋', '𐆌'), + ('𐆠', '𐆠'), + ('\u{102e0}', '𐋻'), + ('𐌟', '𐌟'), + ('𐍐', '\u{1037a}'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕯', '𐕯'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐡠', '𐢞'), + ('𐢧', '𐢯'), + ('𐪀', '𐪟'), + ('𐫀', '\u{10ae6}'), + ('𐫫', '𐫶'), + ('𐮀', '𐮑'), + ('𐮙', '𐮜'), + ('𐮩', '𐮯'), + ('\u{1107f}', '\u{1107f}'), + ('𑅐', '𑅶'), + ('𑇍', '𑇍'), + ('𑇚', '𑇚'), + ('𑇡', '𑇴'), + ('𑈀', '𑈑'), + ('𑈓', '𑈽'), + ('𑊰', '\u{112ea}'), + ('𑋰', '𑋹'), + ('\u{11301}', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('\u{1133c}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('𑍝', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('𑒀', '𑓇'), + ('𑓐', '𑓙'), + ('𑖀', '\u{115b5}'), + ('𑖸', '𑗉'), + ('𑘀', '𑙄'), + ('𑙐', '𑙙'), + ('𑢠', '𑣲'), + ('𑣿', '𑣿'), + ('𑫀', '𑫸'), + ('𒍯', '𒎘'), + ('𒑣', '𒑮'), + ('𒑴', '𒑴'), + ('𖩀', '𖩞'), + ('𖩠', '𖩩'), + ('𖩮', '𖩯'), + ('𖫐', '𖫭'), + ('\u{16af0}', '𖫵'), + ('𖬀', '𖭅'), + ('𖭐', '𖭙'), + ('𖭛', '𖭡'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𛲜', '\u{1bca3}'), + ('𞠀', '𞣄'), + ('𞣇', '\u{1e8d6}'), + ('🂿', '🂿'), + ('🃠', '🃵'), + ('🄋', '🄌'), + ('🌡', '🌬'), + ('🌶', '🌶'), + ('🍽', '🍽'), + ('🎔', '🎟'), + ('🏅', '🏅'), + ('🏋', '🏎'), + ('🏔', '🏟'), + ('🏱', '🏷'), + ('🐿', '🐿'), + ('👁', '👁'), + ('📸', '📸'), + ('📽', '📾'), + ('🔾', '🔿'), + ('🕄', '🕊'), + ('🕨', '🕹'), + ('🕻', '🖣'), + ('🖥', '🗺'), + ('🙁', '🙂'), + ('🙐', '🙿'), + ('🛆', '🛏'), + ('🛠', '🛬'), + ('🛰', '🛳'), + ('🞀', '🟔'), + ('🠀', '🠋'), + ('🠐', '🡇'), + ('🡐', '🡙'), + ('🡠', '🢇'), + ('🢐', '🢭'), +]; + +pub const V8_0: &'static [(char, char)] = &[ + ('ࢳ', 'ࢴ'), + ('\u{8e3}', '\u{8e3}'), + ('ૹ', 'ૹ'), + ('ౚ', 'ౚ'), + ('ൟ', 'ൟ'), + ('Ᏽ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('₾', '₾'), + ('↊', '↋'), + ('⯬', '⯯'), + ('鿍', '鿕'), + ('\u{a69e}', '\u{a69e}'), + ('ꞏ', 'ꞏ'), + ('Ʝ', 'ꞷ'), + ('꣼', 'ꣽ'), + ('ꭠ', 'ꭣ'), + ('ꭰ', 'ꮿ'), + ('\u{fe2e}', '\u{fe2f}'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐣻', '𐣿'), + ('𐦼', '𐦽'), + ('𐧀', '𐧏'), + ('𐧒', '𐧿'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐳺', '𐳿'), + ('\u{111c9}', '\u{111cc}'), + ('𑇛', '𑇟'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊩'), + ('\u{11300}', '\u{11300}'), + ('𑍐', '𑍐'), + ('𑗊', '\u{115dd}'), + ('𑜀', '𑜙'), + ('\u{1171d}', '\u{1172b}'), + ('𑜰', '𑜿'), + ('𒎙', '𒎙'), + ('𒒀', '𒕃'), + ('𔐀', '𔙆'), + ('𝇞', '𝇨'), + ('𝠀', '𝪋'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('🌭', '🌯'), + ('🍾', '🍿'), + ('🏏', '🏓'), + ('🏸', '🏿'), + ('📿', '📿'), + ('🕋', '🕏'), + ('🙃', '🙄'), + ('🛐', '🛐'), + ('🤐', '🤘'), + ('🦀', '🦄'), + ('🧀', '🧀'), + ('𫠠', '𬺡'), +]; + +pub const V9_0: &'static [(char, char)] = &[ + ('ࢶ', 'ࢽ'), + ('\u{8d4}', '\u{8e2}'), + ('ಀ', 'ಀ'), + ('൏', '൏'), + ('ൔ', 'ൖ'), + ('൘', '൞'), + ('൶', '൸'), + ('ᲀ', 'ᲈ'), + ('\u{1dfb}', '\u{1dfb}'), + ('⏻', '⏾'), + ('⹃', '⹄'), + ('Ɪ', 'Ɪ'), + ('\u{a8c5}', '\u{a8c5}'), + ('𐆍', '𐆎'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('\u{1123e}', '\u{1123e}'), + ('𑐀', '𑑙'), + ('𑑛', '𑑛'), + ('𑑝', '𑑝'), + ('𑙠', '𑙬'), + ('𑰀', '𑰈'), + ('𑰊', '\u{11c36}'), + ('\u{11c38}', '𑱅'), + ('𑱐', '𑱬'), + ('𑱰', '𑲏'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('𖿠', '𖿠'), + ('𗀀', '𘟬'), + ('𘠀', '𘫲'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('𞤀', '\u{1e94a}'), + ('𞥐', '𞥙'), + ('𞥞', '𞥟'), + ('🆛', '🆬'), + ('🈻', '🈻'), + ('🕺', '🕺'), + ('🖤', '🖤'), + ('🛑', '🛒'), + ('🛴', '🛶'), + ('🤙', '🤞'), + ('🤠', '🤧'), + ('🤰', '🤰'), + ('🤳', '🤾'), + ('🥀', '🥋'), + ('🥐', '🥞'), + ('🦅', '🦑'), +]; diff --git a/vendor/regex-syntax/src/unicode_tables/case_folding_simple.rs b/vendor/regex-syntax/src/unicode_tables/case_folding_simple.rs new file mode 100644 index 00000000000000..07f6ff2f5af7f8 --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/case_folding_simple.rs @@ -0,0 +1,2948 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate case-folding-simple ucd-16.0.0 --chars --all-pairs +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const CASE_FOLDING_SIMPLE: &'static [(char, &'static [char])] = &[ + ('A', &['a']), + ('B', &['b']), + ('C', &['c']), + ('D', &['d']), + ('E', &['e']), + ('F', &['f']), + ('G', &['g']), + ('H', &['h']), + ('I', &['i']), + ('J', &['j']), + ('K', &['k', 'K']), + ('L', &['l']), + ('M', &['m']), + ('N', &['n']), + ('O', &['o']), + ('P', &['p']), + ('Q', &['q']), + ('R', &['r']), + ('S', &['s', 'ſ']), + ('T', &['t']), + ('U', &['u']), + ('V', &['v']), + ('W', &['w']), + ('X', &['x']), + ('Y', &['y']), + ('Z', &['z']), + ('a', &['A']), + ('b', &['B']), + ('c', &['C']), + ('d', &['D']), + ('e', &['E']), + ('f', &['F']), + ('g', &['G']), + ('h', &['H']), + ('i', &['I']), + ('j', &['J']), + ('k', &['K', 'K']), + ('l', &['L']), + ('m', &['M']), + ('n', &['N']), + ('o', &['O']), + ('p', &['P']), + ('q', &['Q']), + ('r', &['R']), + ('s', &['S', 'ſ']), + ('t', &['T']), + ('u', &['U']), + ('v', &['V']), + ('w', &['W']), + ('x', &['X']), + ('y', &['Y']), + ('z', &['Z']), + ('µ', &['Μ', 'μ']), + ('À', &['à']), + ('Á', &['á']), + ('Â', &['â']), + ('Ã', &['ã']), + ('Ä', &['ä']), + ('Å', &['å', 'Å']), + ('Æ', &['æ']), + ('Ç', &['ç']), + ('È', &['è']), + ('É', &['é']), + ('Ê', &['ê']), + ('Ë', &['ë']), + ('Ì', &['ì']), + ('Í', &['í']), + ('Î', &['î']), + ('Ï', &['ï']), + ('Ð', &['ð']), + ('Ñ', &['ñ']), + ('Ò', &['ò']), + ('Ó', &['ó']), + ('Ô', &['ô']), + ('Õ', &['õ']), + ('Ö', &['ö']), + ('Ø', &['ø']), + ('Ù', &['ù']), + ('Ú', &['ú']), + ('Û', &['û']), + ('Ü', &['ü']), + ('Ý', &['ý']), + ('Þ', &['þ']), + ('ß', &['ẞ']), + ('à', &['À']), + ('á', &['Á']), + ('â', &['Â']), + ('ã', &['Ã']), + ('ä', &['Ä']), + ('å', &['Å', 'Å']), + ('æ', &['Æ']), + ('ç', &['Ç']), + ('è', &['È']), + ('é', &['É']), + ('ê', &['Ê']), + ('ë', &['Ë']), + ('ì', &['Ì']), + ('í', &['Í']), + ('î', &['Î']), + ('ï', &['Ï']), + ('ð', &['Ð']), + ('ñ', &['Ñ']), + ('ò', &['Ò']), + ('ó', &['Ó']), + ('ô', &['Ô']), + ('õ', &['Õ']), + ('ö', &['Ö']), + ('ø', &['Ø']), + ('ù', &['Ù']), + ('ú', &['Ú']), + ('û', &['Û']), + ('ü', &['Ü']), + ('ý', &['Ý']), + ('þ', &['Þ']), + ('ÿ', &['Ÿ']), + ('Ā', &['ā']), + ('ā', &['Ā']), + ('Ă', &['ă']), + ('ă', &['Ă']), + ('Ą', &['ą']), + ('ą', &['Ą']), + ('Ć', &['ć']), + ('ć', &['Ć']), + ('Ĉ', &['ĉ']), + ('ĉ', &['Ĉ']), + ('Ċ', &['ċ']), + ('ċ', &['Ċ']), + ('Č', &['č']), + ('č', &['Č']), + ('Ď', &['ď']), + ('ď', &['Ď']), + ('Đ', &['đ']), + ('đ', &['Đ']), + ('Ē', &['ē']), + ('ē', &['Ē']), + ('Ĕ', &['ĕ']), + ('ĕ', &['Ĕ']), + ('Ė', &['ė']), + ('ė', &['Ė']), + ('Ę', &['ę']), + ('ę', &['Ę']), + ('Ě', &['ě']), + ('ě', &['Ě']), + ('Ĝ', &['ĝ']), + ('ĝ', &['Ĝ']), + ('Ğ', &['ğ']), + ('ğ', &['Ğ']), + ('Ġ', &['ġ']), + ('ġ', &['Ġ']), + ('Ģ', &['ģ']), + ('ģ', &['Ģ']), + ('Ĥ', &['ĥ']), + ('ĥ', &['Ĥ']), + ('Ħ', &['ħ']), + ('ħ', &['Ħ']), + ('Ĩ', &['ĩ']), + ('ĩ', &['Ĩ']), + ('Ī', &['ī']), + ('ī', &['Ī']), + ('Ĭ', &['ĭ']), + ('ĭ', &['Ĭ']), + ('Į', &['į']), + ('į', &['Į']), + ('IJ', &['ij']), + ('ij', &['IJ']), + ('Ĵ', &['ĵ']), + ('ĵ', &['Ĵ']), + ('Ķ', &['ķ']), + ('ķ', &['Ķ']), + ('Ĺ', &['ĺ']), + ('ĺ', &['Ĺ']), + ('Ļ', &['ļ']), + ('ļ', &['Ļ']), + ('Ľ', &['ľ']), + ('ľ', &['Ľ']), + ('Ŀ', &['ŀ']), + ('ŀ', &['Ŀ']), + ('Ł', &['ł']), + ('ł', &['Ł']), + ('Ń', &['ń']), + ('ń', &['Ń']), + ('Ņ', &['ņ']), + ('ņ', &['Ņ']), + ('Ň', &['ň']), + ('ň', &['Ň']), + ('Ŋ', &['ŋ']), + ('ŋ', &['Ŋ']), + ('Ō', &['ō']), + ('ō', &['Ō']), + ('Ŏ', &['ŏ']), + ('ŏ', &['Ŏ']), + ('Ő', &['ő']), + ('ő', &['Ő']), + ('Œ', &['œ']), + ('œ', &['Œ']), + ('Ŕ', &['ŕ']), + ('ŕ', &['Ŕ']), + ('Ŗ', &['ŗ']), + ('ŗ', &['Ŗ']), + ('Ř', &['ř']), + ('ř', &['Ř']), + ('Ś', &['ś']), + ('ś', &['Ś']), + ('Ŝ', &['ŝ']), + ('ŝ', &['Ŝ']), + ('Ş', &['ş']), + ('ş', &['Ş']), + ('Š', &['š']), + ('š', &['Š']), + ('Ţ', &['ţ']), + ('ţ', &['Ţ']), + ('Ť', &['ť']), + ('ť', &['Ť']), + ('Ŧ', &['ŧ']), + ('ŧ', &['Ŧ']), + ('Ũ', &['ũ']), + ('ũ', &['Ũ']), + ('Ū', &['ū']), + ('ū', &['Ū']), + ('Ŭ', &['ŭ']), + ('ŭ', &['Ŭ']), + ('Ů', &['ů']), + ('ů', &['Ů']), + ('Ű', &['ű']), + ('ű', &['Ű']), + ('Ų', &['ų']), + ('ų', &['Ų']), + ('Ŵ', &['ŵ']), + ('ŵ', &['Ŵ']), + ('Ŷ', &['ŷ']), + ('ŷ', &['Ŷ']), + ('Ÿ', &['ÿ']), + ('Ź', &['ź']), + ('ź', &['Ź']), + ('Ż', &['ż']), + ('ż', &['Ż']), + ('Ž', &['ž']), + ('ž', &['Ž']), + ('ſ', &['S', 's']), + ('ƀ', &['Ƀ']), + ('Ɓ', &['ɓ']), + ('Ƃ', &['ƃ']), + ('ƃ', &['Ƃ']), + ('Ƅ', &['ƅ']), + ('ƅ', &['Ƅ']), + ('Ɔ', &['ɔ']), + ('Ƈ', &['ƈ']), + ('ƈ', &['Ƈ']), + ('Ɖ', &['ɖ']), + ('Ɗ', &['ɗ']), + ('Ƌ', &['ƌ']), + ('ƌ', &['Ƌ']), + ('Ǝ', &['ǝ']), + ('Ə', &['ə']), + ('Ɛ', &['ɛ']), + ('Ƒ', &['ƒ']), + ('ƒ', &['Ƒ']), + ('Ɠ', &['ɠ']), + ('Ɣ', &['ɣ']), + ('ƕ', &['Ƕ']), + ('Ɩ', &['ɩ']), + ('Ɨ', &['ɨ']), + ('Ƙ', &['ƙ']), + ('ƙ', &['Ƙ']), + ('ƚ', &['Ƚ']), + ('ƛ', &['Ƛ']), + ('Ɯ', &['ɯ']), + ('Ɲ', &['ɲ']), + ('ƞ', &['Ƞ']), + ('Ɵ', &['ɵ']), + ('Ơ', &['ơ']), + ('ơ', &['Ơ']), + ('Ƣ', &['ƣ']), + ('ƣ', &['Ƣ']), + ('Ƥ', &['ƥ']), + ('ƥ', &['Ƥ']), + ('Ʀ', &['ʀ']), + ('Ƨ', &['ƨ']), + ('ƨ', &['Ƨ']), + ('Ʃ', &['ʃ']), + ('Ƭ', &['ƭ']), + ('ƭ', &['Ƭ']), + ('Ʈ', &['ʈ']), + ('Ư', &['ư']), + ('ư', &['Ư']), + ('Ʊ', &['ʊ']), + ('Ʋ', &['ʋ']), + ('Ƴ', &['ƴ']), + ('ƴ', &['Ƴ']), + ('Ƶ', &['ƶ']), + ('ƶ', &['Ƶ']), + ('Ʒ', &['ʒ']), + ('Ƹ', &['ƹ']), + ('ƹ', &['Ƹ']), + ('Ƽ', &['ƽ']), + ('ƽ', &['Ƽ']), + ('ƿ', &['Ƿ']), + ('DŽ', &['Dž', 'dž']), + ('Dž', &['DŽ', 'dž']), + ('dž', &['DŽ', 'Dž']), + ('LJ', &['Lj', 'lj']), + ('Lj', &['LJ', 'lj']), + ('lj', &['LJ', 'Lj']), + ('NJ', &['Nj', 'nj']), + ('Nj', &['NJ', 'nj']), + ('nj', &['NJ', 'Nj']), + ('Ǎ', &['ǎ']), + ('ǎ', &['Ǎ']), + ('Ǐ', &['ǐ']), + ('ǐ', &['Ǐ']), + ('Ǒ', &['ǒ']), + ('ǒ', &['Ǒ']), + ('Ǔ', &['ǔ']), + ('ǔ', &['Ǔ']), + ('Ǖ', &['ǖ']), + ('ǖ', &['Ǖ']), + ('Ǘ', &['ǘ']), + ('ǘ', &['Ǘ']), + ('Ǚ', &['ǚ']), + ('ǚ', &['Ǚ']), + ('Ǜ', &['ǜ']), + ('ǜ', &['Ǜ']), + ('ǝ', &['Ǝ']), + ('Ǟ', &['ǟ']), + ('ǟ', &['Ǟ']), + ('Ǡ', &['ǡ']), + ('ǡ', &['Ǡ']), + ('Ǣ', &['ǣ']), + ('ǣ', &['Ǣ']), + ('Ǥ', &['ǥ']), + ('ǥ', &['Ǥ']), + ('Ǧ', &['ǧ']), + ('ǧ', &['Ǧ']), + ('Ǩ', &['ǩ']), + ('ǩ', &['Ǩ']), + ('Ǫ', &['ǫ']), + ('ǫ', &['Ǫ']), + ('Ǭ', &['ǭ']), + ('ǭ', &['Ǭ']), + ('Ǯ', &['ǯ']), + ('ǯ', &['Ǯ']), + ('DZ', &['Dz', 'dz']), + ('Dz', &['DZ', 'dz']), + ('dz', &['DZ', 'Dz']), + ('Ǵ', &['ǵ']), + ('ǵ', &['Ǵ']), + ('Ƕ', &['ƕ']), + ('Ƿ', &['ƿ']), + ('Ǹ', &['ǹ']), + ('ǹ', &['Ǹ']), + ('Ǻ', &['ǻ']), + ('ǻ', &['Ǻ']), + ('Ǽ', &['ǽ']), + ('ǽ', &['Ǽ']), + ('Ǿ', &['ǿ']), + ('ǿ', &['Ǿ']), + ('Ȁ', &['ȁ']), + ('ȁ', &['Ȁ']), + ('Ȃ', &['ȃ']), + ('ȃ', &['Ȃ']), + ('Ȅ', &['ȅ']), + ('ȅ', &['Ȅ']), + ('Ȇ', &['ȇ']), + ('ȇ', &['Ȇ']), + ('Ȉ', &['ȉ']), + ('ȉ', &['Ȉ']), + ('Ȋ', &['ȋ']), + ('ȋ', &['Ȋ']), + ('Ȍ', &['ȍ']), + ('ȍ', &['Ȍ']), + ('Ȏ', &['ȏ']), + ('ȏ', &['Ȏ']), + ('Ȑ', &['ȑ']), + ('ȑ', &['Ȑ']), + ('Ȓ', &['ȓ']), + ('ȓ', &['Ȓ']), + ('Ȕ', &['ȕ']), + ('ȕ', &['Ȕ']), + ('Ȗ', &['ȗ']), + ('ȗ', &['Ȗ']), + ('Ș', &['ș']), + ('ș', &['Ș']), + ('Ț', &['ț']), + ('ț', &['Ț']), + ('Ȝ', &['ȝ']), + ('ȝ', &['Ȝ']), + ('Ȟ', &['ȟ']), + ('ȟ', &['Ȟ']), + ('Ƞ', &['ƞ']), + ('Ȣ', &['ȣ']), + ('ȣ', &['Ȣ']), + ('Ȥ', &['ȥ']), + ('ȥ', &['Ȥ']), + ('Ȧ', &['ȧ']), + ('ȧ', &['Ȧ']), + ('Ȩ', &['ȩ']), + ('ȩ', &['Ȩ']), + ('Ȫ', &['ȫ']), + ('ȫ', &['Ȫ']), + ('Ȭ', &['ȭ']), + ('ȭ', &['Ȭ']), + ('Ȯ', &['ȯ']), + ('ȯ', &['Ȯ']), + ('Ȱ', &['ȱ']), + ('ȱ', &['Ȱ']), + ('Ȳ', &['ȳ']), + ('ȳ', &['Ȳ']), + ('Ⱥ', &['ⱥ']), + ('Ȼ', &['ȼ']), + ('ȼ', &['Ȼ']), + ('Ƚ', &['ƚ']), + ('Ⱦ', &['ⱦ']), + ('ȿ', &['Ȿ']), + ('ɀ', &['Ɀ']), + ('Ɂ', &['ɂ']), + ('ɂ', &['Ɂ']), + ('Ƀ', &['ƀ']), + ('Ʉ', &['ʉ']), + ('Ʌ', &['ʌ']), + ('Ɇ', &['ɇ']), + ('ɇ', &['Ɇ']), + ('Ɉ', &['ɉ']), + ('ɉ', &['Ɉ']), + ('Ɋ', &['ɋ']), + ('ɋ', &['Ɋ']), + ('Ɍ', &['ɍ']), + ('ɍ', &['Ɍ']), + ('Ɏ', &['ɏ']), + ('ɏ', &['Ɏ']), + ('ɐ', &['Ɐ']), + ('ɑ', &['Ɑ']), + ('ɒ', &['Ɒ']), + ('ɓ', &['Ɓ']), + ('ɔ', &['Ɔ']), + ('ɖ', &['Ɖ']), + ('ɗ', &['Ɗ']), + ('ə', &['Ə']), + ('ɛ', &['Ɛ']), + ('ɜ', &['Ɜ']), + ('ɠ', &['Ɠ']), + ('ɡ', &['Ɡ']), + ('ɣ', &['Ɣ']), + ('ɤ', &['Ɤ']), + ('ɥ', &['Ɥ']), + ('ɦ', &['Ɦ']), + ('ɨ', &['Ɨ']), + ('ɩ', &['Ɩ']), + ('ɪ', &['Ɪ']), + ('ɫ', &['Ɫ']), + ('ɬ', &['Ɬ']), + ('ɯ', &['Ɯ']), + ('ɱ', &['Ɱ']), + ('ɲ', &['Ɲ']), + ('ɵ', &['Ɵ']), + ('ɽ', &['Ɽ']), + ('ʀ', &['Ʀ']), + ('ʂ', &['Ʂ']), + ('ʃ', &['Ʃ']), + ('ʇ', &['Ʇ']), + ('ʈ', &['Ʈ']), + ('ʉ', &['Ʉ']), + ('ʊ', &['Ʊ']), + ('ʋ', &['Ʋ']), + ('ʌ', &['Ʌ']), + ('ʒ', &['Ʒ']), + ('ʝ', &['Ʝ']), + ('ʞ', &['Ʞ']), + ('\u{345}', &['Ι', 'ι', 'ι']), + ('Ͱ', &['ͱ']), + ('ͱ', &['Ͱ']), + ('Ͳ', &['ͳ']), + ('ͳ', &['Ͳ']), + ('Ͷ', &['ͷ']), + ('ͷ', &['Ͷ']), + ('ͻ', &['Ͻ']), + ('ͼ', &['Ͼ']), + ('ͽ', &['Ͽ']), + ('Ϳ', &['ϳ']), + ('Ά', &['ά']), + ('Έ', &['έ']), + ('Ή', &['ή']), + ('Ί', &['ί']), + ('Ό', &['ό']), + ('Ύ', &['ύ']), + ('Ώ', &['ώ']), + ('ΐ', &['ΐ']), + ('Α', &['α']), + ('Β', &['β', 'ϐ']), + ('Γ', &['γ']), + ('Δ', &['δ']), + ('Ε', &['ε', 'ϵ']), + ('Ζ', &['ζ']), + ('Η', &['η']), + ('Θ', &['θ', 'ϑ', 'ϴ']), + ('Ι', &['\u{345}', 'ι', 'ι']), + ('Κ', &['κ', 'ϰ']), + ('Λ', &['λ']), + ('Μ', &['µ', 'μ']), + ('Ν', &['ν']), + ('Ξ', &['ξ']), + ('Ο', &['ο']), + ('Π', &['π', 'ϖ']), + ('Ρ', &['ρ', 'ϱ']), + ('Σ', &['ς', 'σ']), + ('Τ', &['τ']), + ('Υ', &['υ']), + ('Φ', &['φ', 'ϕ']), + ('Χ', &['χ']), + ('Ψ', &['ψ']), + ('Ω', &['ω', 'Ω']), + ('Ϊ', &['ϊ']), + ('Ϋ', &['ϋ']), + ('ά', &['Ά']), + ('έ', &['Έ']), + ('ή', &['Ή']), + ('ί', &['Ί']), + ('ΰ', &['ΰ']), + ('α', &['Α']), + ('β', &['Β', 'ϐ']), + ('γ', &['Γ']), + ('δ', &['Δ']), + ('ε', &['Ε', 'ϵ']), + ('ζ', &['Ζ']), + ('η', &['Η']), + ('θ', &['Θ', 'ϑ', 'ϴ']), + ('ι', &['\u{345}', 'Ι', 'ι']), + ('κ', &['Κ', 'ϰ']), + ('λ', &['Λ']), + ('μ', &['µ', 'Μ']), + ('ν', &['Ν']), + ('ξ', &['Ξ']), + ('ο', &['Ο']), + ('π', &['Π', 'ϖ']), + ('ρ', &['Ρ', 'ϱ']), + ('ς', &['Σ', 'σ']), + ('σ', &['Σ', 'ς']), + ('τ', &['Τ']), + ('υ', &['Υ']), + ('φ', &['Φ', 'ϕ']), + ('χ', &['Χ']), + ('ψ', &['Ψ']), + ('ω', &['Ω', 'Ω']), + ('ϊ', &['Ϊ']), + ('ϋ', &['Ϋ']), + ('ό', &['Ό']), + ('ύ', &['Ύ']), + ('ώ', &['Ώ']), + ('Ϗ', &['ϗ']), + ('ϐ', &['Β', 'β']), + ('ϑ', &['Θ', 'θ', 'ϴ']), + ('ϕ', &['Φ', 'φ']), + ('ϖ', &['Π', 'π']), + ('ϗ', &['Ϗ']), + ('Ϙ', &['ϙ']), + ('ϙ', &['Ϙ']), + ('Ϛ', &['ϛ']), + ('ϛ', &['Ϛ']), + ('Ϝ', &['ϝ']), + ('ϝ', &['Ϝ']), + ('Ϟ', &['ϟ']), + ('ϟ', &['Ϟ']), + ('Ϡ', &['ϡ']), + ('ϡ', &['Ϡ']), + ('Ϣ', &['ϣ']), + ('ϣ', &['Ϣ']), + ('Ϥ', &['ϥ']), + ('ϥ', &['Ϥ']), + ('Ϧ', &['ϧ']), + ('ϧ', &['Ϧ']), + ('Ϩ', &['ϩ']), + ('ϩ', &['Ϩ']), + ('Ϫ', &['ϫ']), + ('ϫ', &['Ϫ']), + ('Ϭ', &['ϭ']), + ('ϭ', &['Ϭ']), + ('Ϯ', &['ϯ']), + ('ϯ', &['Ϯ']), + ('ϰ', &['Κ', 'κ']), + ('ϱ', &['Ρ', 'ρ']), + ('ϲ', &['Ϲ']), + ('ϳ', &['Ϳ']), + ('ϴ', &['Θ', 'θ', 'ϑ']), + ('ϵ', &['Ε', 'ε']), + ('Ϸ', &['ϸ']), + ('ϸ', &['Ϸ']), + ('Ϲ', &['ϲ']), + ('Ϻ', &['ϻ']), + ('ϻ', &['Ϻ']), + ('Ͻ', &['ͻ']), + ('Ͼ', &['ͼ']), + ('Ͽ', &['ͽ']), + ('Ѐ', &['ѐ']), + ('Ё', &['ё']), + ('Ђ', &['ђ']), + ('Ѓ', &['ѓ']), + ('Є', &['є']), + ('Ѕ', &['ѕ']), + ('І', &['і']), + ('Ї', &['ї']), + ('Ј', &['ј']), + ('Љ', &['љ']), + ('Њ', &['њ']), + ('Ћ', &['ћ']), + ('Ќ', &['ќ']), + ('Ѝ', &['ѝ']), + ('Ў', &['ў']), + ('Џ', &['џ']), + ('А', &['а']), + ('Б', &['б']), + ('В', &['в', 'ᲀ']), + ('Г', &['г']), + ('Д', &['д', 'ᲁ']), + ('Е', &['е']), + ('Ж', &['ж']), + ('З', &['з']), + ('И', &['и']), + ('Й', &['й']), + ('К', &['к']), + ('Л', &['л']), + ('М', &['м']), + ('Н', &['н']), + ('О', &['о', 'ᲂ']), + ('П', &['п']), + ('Р', &['р']), + ('С', &['с', 'ᲃ']), + ('Т', &['т', 'ᲄ', 'ᲅ']), + ('У', &['у']), + ('Ф', &['ф']), + ('Х', &['х']), + ('Ц', &['ц']), + ('Ч', &['ч']), + ('Ш', &['ш']), + ('Щ', &['щ']), + ('Ъ', &['ъ', 'ᲆ']), + ('Ы', &['ы']), + ('Ь', &['ь']), + ('Э', &['э']), + ('Ю', &['ю']), + ('Я', &['я']), + ('а', &['А']), + ('б', &['Б']), + ('в', &['В', 'ᲀ']), + ('г', &['Г']), + ('д', &['Д', 'ᲁ']), + ('е', &['Е']), + ('ж', &['Ж']), + ('з', &['З']), + ('и', &['И']), + ('й', &['Й']), + ('к', &['К']), + ('л', &['Л']), + ('м', &['М']), + ('н', &['Н']), + ('о', &['О', 'ᲂ']), + ('п', &['П']), + ('р', &['Р']), + ('с', &['С', 'ᲃ']), + ('т', &['Т', 'ᲄ', 'ᲅ']), + ('у', &['У']), + ('ф', &['Ф']), + ('х', &['Х']), + ('ц', &['Ц']), + ('ч', &['Ч']), + ('ш', &['Ш']), + ('щ', &['Щ']), + ('ъ', &['Ъ', 'ᲆ']), + ('ы', &['Ы']), + ('ь', &['Ь']), + ('э', &['Э']), + ('ю', &['Ю']), + ('я', &['Я']), + ('ѐ', &['Ѐ']), + ('ё', &['Ё']), + ('ђ', &['Ђ']), + ('ѓ', &['Ѓ']), + ('є', &['Є']), + ('ѕ', &['Ѕ']), + ('і', &['І']), + ('ї', &['Ї']), + ('ј', &['Ј']), + ('љ', &['Љ']), + ('њ', &['Њ']), + ('ћ', &['Ћ']), + ('ќ', &['Ќ']), + ('ѝ', &['Ѝ']), + ('ў', &['Ў']), + ('џ', &['Џ']), + ('Ѡ', &['ѡ']), + ('ѡ', &['Ѡ']), + ('Ѣ', &['ѣ', 'ᲇ']), + ('ѣ', &['Ѣ', 'ᲇ']), + ('Ѥ', &['ѥ']), + ('ѥ', &['Ѥ']), + ('Ѧ', &['ѧ']), + ('ѧ', &['Ѧ']), + ('Ѩ', &['ѩ']), + ('ѩ', &['Ѩ']), + ('Ѫ', &['ѫ']), + ('ѫ', &['Ѫ']), + ('Ѭ', &['ѭ']), + ('ѭ', &['Ѭ']), + ('Ѯ', &['ѯ']), + ('ѯ', &['Ѯ']), + ('Ѱ', &['ѱ']), + ('ѱ', &['Ѱ']), + ('Ѳ', &['ѳ']), + ('ѳ', &['Ѳ']), + ('Ѵ', &['ѵ']), + ('ѵ', &['Ѵ']), + ('Ѷ', &['ѷ']), + ('ѷ', &['Ѷ']), + ('Ѹ', &['ѹ']), + ('ѹ', &['Ѹ']), + ('Ѻ', &['ѻ']), + ('ѻ', &['Ѻ']), + ('Ѽ', &['ѽ']), + ('ѽ', &['Ѽ']), + ('Ѿ', &['ѿ']), + ('ѿ', &['Ѿ']), + ('Ҁ', &['ҁ']), + ('ҁ', &['Ҁ']), + ('Ҋ', &['ҋ']), + ('ҋ', &['Ҋ']), + ('Ҍ', &['ҍ']), + ('ҍ', &['Ҍ']), + ('Ҏ', &['ҏ']), + ('ҏ', &['Ҏ']), + ('Ґ', &['ґ']), + ('ґ', &['Ґ']), + ('Ғ', &['ғ']), + ('ғ', &['Ғ']), + ('Ҕ', &['ҕ']), + ('ҕ', &['Ҕ']), + ('Җ', &['җ']), + ('җ', &['Җ']), + ('Ҙ', &['ҙ']), + ('ҙ', &['Ҙ']), + ('Қ', &['қ']), + ('қ', &['Қ']), + ('Ҝ', &['ҝ']), + ('ҝ', &['Ҝ']), + ('Ҟ', &['ҟ']), + ('ҟ', &['Ҟ']), + ('Ҡ', &['ҡ']), + ('ҡ', &['Ҡ']), + ('Ң', &['ң']), + ('ң', &['Ң']), + ('Ҥ', &['ҥ']), + ('ҥ', &['Ҥ']), + ('Ҧ', &['ҧ']), + ('ҧ', &['Ҧ']), + ('Ҩ', &['ҩ']), + ('ҩ', &['Ҩ']), + ('Ҫ', &['ҫ']), + ('ҫ', &['Ҫ']), + ('Ҭ', &['ҭ']), + ('ҭ', &['Ҭ']), + ('Ү', &['ү']), + ('ү', &['Ү']), + ('Ұ', &['ұ']), + ('ұ', &['Ұ']), + ('Ҳ', &['ҳ']), + ('ҳ', &['Ҳ']), + ('Ҵ', &['ҵ']), + ('ҵ', &['Ҵ']), + ('Ҷ', &['ҷ']), + ('ҷ', &['Ҷ']), + ('Ҹ', &['ҹ']), + ('ҹ', &['Ҹ']), + ('Һ', &['һ']), + ('һ', &['Һ']), + ('Ҽ', &['ҽ']), + ('ҽ', &['Ҽ']), + ('Ҿ', &['ҿ']), + ('ҿ', &['Ҿ']), + ('Ӏ', &['ӏ']), + ('Ӂ', &['ӂ']), + ('ӂ', &['Ӂ']), + ('Ӄ', &['ӄ']), + ('ӄ', &['Ӄ']), + ('Ӆ', &['ӆ']), + ('ӆ', &['Ӆ']), + ('Ӈ', &['ӈ']), + ('ӈ', &['Ӈ']), + ('Ӊ', &['ӊ']), + ('ӊ', &['Ӊ']), + ('Ӌ', &['ӌ']), + ('ӌ', &['Ӌ']), + ('Ӎ', &['ӎ']), + ('ӎ', &['Ӎ']), + ('ӏ', &['Ӏ']), + ('Ӑ', &['ӑ']), + ('ӑ', &['Ӑ']), + ('Ӓ', &['ӓ']), + ('ӓ', &['Ӓ']), + ('Ӕ', &['ӕ']), + ('ӕ', &['Ӕ']), + ('Ӗ', &['ӗ']), + ('ӗ', &['Ӗ']), + ('Ә', &['ә']), + ('ә', &['Ә']), + ('Ӛ', &['ӛ']), + ('ӛ', &['Ӛ']), + ('Ӝ', &['ӝ']), + ('ӝ', &['Ӝ']), + ('Ӟ', &['ӟ']), + ('ӟ', &['Ӟ']), + ('Ӡ', &['ӡ']), + ('ӡ', &['Ӡ']), + ('Ӣ', &['ӣ']), + ('ӣ', &['Ӣ']), + ('Ӥ', &['ӥ']), + ('ӥ', &['Ӥ']), + ('Ӧ', &['ӧ']), + ('ӧ', &['Ӧ']), + ('Ө', &['ө']), + ('ө', &['Ө']), + ('Ӫ', &['ӫ']), + ('ӫ', &['Ӫ']), + ('Ӭ', &['ӭ']), + ('ӭ', &['Ӭ']), + ('Ӯ', &['ӯ']), + ('ӯ', &['Ӯ']), + ('Ӱ', &['ӱ']), + ('ӱ', &['Ӱ']), + ('Ӳ', &['ӳ']), + ('ӳ', &['Ӳ']), + ('Ӵ', &['ӵ']), + ('ӵ', &['Ӵ']), + ('Ӷ', &['ӷ']), + ('ӷ', &['Ӷ']), + ('Ӹ', &['ӹ']), + ('ӹ', &['Ӹ']), + ('Ӻ', &['ӻ']), + ('ӻ', &['Ӻ']), + ('Ӽ', &['ӽ']), + ('ӽ', &['Ӽ']), + ('Ӿ', &['ӿ']), + ('ӿ', &['Ӿ']), + ('Ԁ', &['ԁ']), + ('ԁ', &['Ԁ']), + ('Ԃ', &['ԃ']), + ('ԃ', &['Ԃ']), + ('Ԅ', &['ԅ']), + ('ԅ', &['Ԅ']), + ('Ԇ', &['ԇ']), + ('ԇ', &['Ԇ']), + ('Ԉ', &['ԉ']), + ('ԉ', &['Ԉ']), + ('Ԋ', &['ԋ']), + ('ԋ', &['Ԋ']), + ('Ԍ', &['ԍ']), + ('ԍ', &['Ԍ']), + ('Ԏ', &['ԏ']), + ('ԏ', &['Ԏ']), + ('Ԑ', &['ԑ']), + ('ԑ', &['Ԑ']), + ('Ԓ', &['ԓ']), + ('ԓ', &['Ԓ']), + ('Ԕ', &['ԕ']), + ('ԕ', &['Ԕ']), + ('Ԗ', &['ԗ']), + ('ԗ', &['Ԗ']), + ('Ԙ', &['ԙ']), + ('ԙ', &['Ԙ']), + ('Ԛ', &['ԛ']), + ('ԛ', &['Ԛ']), + ('Ԝ', &['ԝ']), + ('ԝ', &['Ԝ']), + ('Ԟ', &['ԟ']), + ('ԟ', &['Ԟ']), + ('Ԡ', &['ԡ']), + ('ԡ', &['Ԡ']), + ('Ԣ', &['ԣ']), + ('ԣ', &['Ԣ']), + ('Ԥ', &['ԥ']), + ('ԥ', &['Ԥ']), + ('Ԧ', &['ԧ']), + ('ԧ', &['Ԧ']), + ('Ԩ', &['ԩ']), + ('ԩ', &['Ԩ']), + ('Ԫ', &['ԫ']), + ('ԫ', &['Ԫ']), + ('Ԭ', &['ԭ']), + ('ԭ', &['Ԭ']), + ('Ԯ', &['ԯ']), + ('ԯ', &['Ԯ']), + ('Ա', &['ա']), + ('Բ', &['բ']), + ('Գ', &['գ']), + ('Դ', &['դ']), + ('Ե', &['ե']), + ('Զ', &['զ']), + ('Է', &['է']), + ('Ը', &['ը']), + ('Թ', &['թ']), + ('Ժ', &['ժ']), + ('Ի', &['ի']), + ('Լ', &['լ']), + ('Խ', &['խ']), + ('Ծ', &['ծ']), + ('Կ', &['կ']), + ('Հ', &['հ']), + ('Ձ', &['ձ']), + ('Ղ', &['ղ']), + ('Ճ', &['ճ']), + ('Մ', &['մ']), + ('Յ', &['յ']), + ('Ն', &['ն']), + ('Շ', &['շ']), + ('Ո', &['ո']), + ('Չ', &['չ']), + ('Պ', &['պ']), + ('Ջ', &['ջ']), + ('Ռ', &['ռ']), + ('Ս', &['ս']), + ('Վ', &['վ']), + ('Տ', &['տ']), + ('Ր', &['ր']), + ('Ց', &['ց']), + ('Ւ', &['ւ']), + ('Փ', &['փ']), + ('Ք', &['ք']), + ('Օ', &['օ']), + ('Ֆ', &['ֆ']), + ('ա', &['Ա']), + ('բ', &['Բ']), + ('գ', &['Գ']), + ('դ', &['Դ']), + ('ե', &['Ե']), + ('զ', &['Զ']), + ('է', &['Է']), + ('ը', &['Ը']), + ('թ', &['Թ']), + ('ժ', &['Ժ']), + ('ի', &['Ի']), + ('լ', &['Լ']), + ('խ', &['Խ']), + ('ծ', &['Ծ']), + ('կ', &['Կ']), + ('հ', &['Հ']), + ('ձ', &['Ձ']), + ('ղ', &['Ղ']), + ('ճ', &['Ճ']), + ('մ', &['Մ']), + ('յ', &['Յ']), + ('ն', &['Ն']), + ('շ', &['Շ']), + ('ո', &['Ո']), + ('չ', &['Չ']), + ('պ', &['Պ']), + ('ջ', &['Ջ']), + ('ռ', &['Ռ']), + ('ս', &['Ս']), + ('վ', &['Վ']), + ('տ', &['Տ']), + ('ր', &['Ր']), + ('ց', &['Ց']), + ('ւ', &['Ւ']), + ('փ', &['Փ']), + ('ք', &['Ք']), + ('օ', &['Օ']), + ('ֆ', &['Ֆ']), + ('Ⴀ', &['ⴀ']), + ('Ⴁ', &['ⴁ']), + ('Ⴂ', &['ⴂ']), + ('Ⴃ', &['ⴃ']), + ('Ⴄ', &['ⴄ']), + ('Ⴅ', &['ⴅ']), + ('Ⴆ', &['ⴆ']), + ('Ⴇ', &['ⴇ']), + ('Ⴈ', &['ⴈ']), + ('Ⴉ', &['ⴉ']), + ('Ⴊ', &['ⴊ']), + ('Ⴋ', &['ⴋ']), + ('Ⴌ', &['ⴌ']), + ('Ⴍ', &['ⴍ']), + ('Ⴎ', &['ⴎ']), + ('Ⴏ', &['ⴏ']), + ('Ⴐ', &['ⴐ']), + ('Ⴑ', &['ⴑ']), + ('Ⴒ', &['ⴒ']), + ('Ⴓ', &['ⴓ']), + ('Ⴔ', &['ⴔ']), + ('Ⴕ', &['ⴕ']), + ('Ⴖ', &['ⴖ']), + ('Ⴗ', &['ⴗ']), + ('Ⴘ', &['ⴘ']), + ('Ⴙ', &['ⴙ']), + ('Ⴚ', &['ⴚ']), + ('Ⴛ', &['ⴛ']), + ('Ⴜ', &['ⴜ']), + ('Ⴝ', &['ⴝ']), + ('Ⴞ', &['ⴞ']), + ('Ⴟ', &['ⴟ']), + ('Ⴠ', &['ⴠ']), + ('Ⴡ', &['ⴡ']), + ('Ⴢ', &['ⴢ']), + ('Ⴣ', &['ⴣ']), + ('Ⴤ', &['ⴤ']), + ('Ⴥ', &['ⴥ']), + ('Ⴧ', &['ⴧ']), + ('Ⴭ', &['ⴭ']), + ('ა', &['Ა']), + ('ბ', &['Ბ']), + ('გ', &['Გ']), + ('დ', &['Დ']), + ('ე', &['Ე']), + ('ვ', &['Ვ']), + ('ზ', &['Ზ']), + ('თ', &['Თ']), + ('ი', &['Ი']), + ('კ', &['Კ']), + ('ლ', &['Ლ']), + ('მ', &['Მ']), + ('ნ', &['Ნ']), + ('ო', &['Ო']), + ('პ', &['Პ']), + ('ჟ', &['Ჟ']), + ('რ', &['Რ']), + ('ს', &['Ს']), + ('ტ', &['Ტ']), + ('უ', &['Უ']), + ('ფ', &['Ფ']), + ('ქ', &['Ქ']), + ('ღ', &['Ღ']), + ('ყ', &['Ყ']), + ('შ', &['Შ']), + ('ჩ', &['Ჩ']), + ('ც', &['Ც']), + ('ძ', &['Ძ']), + ('წ', &['Წ']), + ('ჭ', &['Ჭ']), + ('ხ', &['Ხ']), + ('ჯ', &['Ჯ']), + ('ჰ', &['Ჰ']), + ('ჱ', &['Ჱ']), + ('ჲ', &['Ჲ']), + ('ჳ', &['Ჳ']), + ('ჴ', &['Ჴ']), + ('ჵ', &['Ჵ']), + ('ჶ', &['Ჶ']), + ('ჷ', &['Ჷ']), + ('ჸ', &['Ჸ']), + ('ჹ', &['Ჹ']), + ('ჺ', &['Ჺ']), + ('ჽ', &['Ჽ']), + ('ჾ', &['Ჾ']), + ('ჿ', &['Ჿ']), + ('Ꭰ', &['ꭰ']), + ('Ꭱ', &['ꭱ']), + ('Ꭲ', &['ꭲ']), + ('Ꭳ', &['ꭳ']), + ('Ꭴ', &['ꭴ']), + ('Ꭵ', &['ꭵ']), + ('Ꭶ', &['ꭶ']), + ('Ꭷ', &['ꭷ']), + ('Ꭸ', &['ꭸ']), + ('Ꭹ', &['ꭹ']), + ('Ꭺ', &['ꭺ']), + ('Ꭻ', &['ꭻ']), + ('Ꭼ', &['ꭼ']), + ('Ꭽ', &['ꭽ']), + ('Ꭾ', &['ꭾ']), + ('Ꭿ', &['ꭿ']), + ('Ꮀ', &['ꮀ']), + ('Ꮁ', &['ꮁ']), + ('Ꮂ', &['ꮂ']), + ('Ꮃ', &['ꮃ']), + ('Ꮄ', &['ꮄ']), + ('Ꮅ', &['ꮅ']), + ('Ꮆ', &['ꮆ']), + ('Ꮇ', &['ꮇ']), + ('Ꮈ', &['ꮈ']), + ('Ꮉ', &['ꮉ']), + ('Ꮊ', &['ꮊ']), + ('Ꮋ', &['ꮋ']), + ('Ꮌ', &['ꮌ']), + ('Ꮍ', &['ꮍ']), + ('Ꮎ', &['ꮎ']), + ('Ꮏ', &['ꮏ']), + ('Ꮐ', &['ꮐ']), + ('Ꮑ', &['ꮑ']), + ('Ꮒ', &['ꮒ']), + ('Ꮓ', &['ꮓ']), + ('Ꮔ', &['ꮔ']), + ('Ꮕ', &['ꮕ']), + ('Ꮖ', &['ꮖ']), + ('Ꮗ', &['ꮗ']), + ('Ꮘ', &['ꮘ']), + ('Ꮙ', &['ꮙ']), + ('Ꮚ', &['ꮚ']), + ('Ꮛ', &['ꮛ']), + ('Ꮜ', &['ꮜ']), + ('Ꮝ', &['ꮝ']), + ('Ꮞ', &['ꮞ']), + ('Ꮟ', &['ꮟ']), + ('Ꮠ', &['ꮠ']), + ('Ꮡ', &['ꮡ']), + ('Ꮢ', &['ꮢ']), + ('Ꮣ', &['ꮣ']), + ('Ꮤ', &['ꮤ']), + ('Ꮥ', &['ꮥ']), + ('Ꮦ', &['ꮦ']), + ('Ꮧ', &['ꮧ']), + ('Ꮨ', &['ꮨ']), + ('Ꮩ', &['ꮩ']), + ('Ꮪ', &['ꮪ']), + ('Ꮫ', &['ꮫ']), + ('Ꮬ', &['ꮬ']), + ('Ꮭ', &['ꮭ']), + ('Ꮮ', &['ꮮ']), + ('Ꮯ', &['ꮯ']), + ('Ꮰ', &['ꮰ']), + ('Ꮱ', &['ꮱ']), + ('Ꮲ', &['ꮲ']), + ('Ꮳ', &['ꮳ']), + ('Ꮴ', &['ꮴ']), + ('Ꮵ', &['ꮵ']), + ('Ꮶ', &['ꮶ']), + ('Ꮷ', &['ꮷ']), + ('Ꮸ', &['ꮸ']), + ('Ꮹ', &['ꮹ']), + ('Ꮺ', &['ꮺ']), + ('Ꮻ', &['ꮻ']), + ('Ꮼ', &['ꮼ']), + ('Ꮽ', &['ꮽ']), + ('Ꮾ', &['ꮾ']), + ('Ꮿ', &['ꮿ']), + ('Ᏸ', &['ᏸ']), + ('Ᏹ', &['ᏹ']), + ('Ᏺ', &['ᏺ']), + ('Ᏻ', &['ᏻ']), + ('Ᏼ', &['ᏼ']), + ('Ᏽ', &['ᏽ']), + ('ᏸ', &['Ᏸ']), + ('ᏹ', &['Ᏹ']), + ('ᏺ', &['Ᏺ']), + ('ᏻ', &['Ᏻ']), + ('ᏼ', &['Ᏼ']), + ('ᏽ', &['Ᏽ']), + ('ᲀ', &['В', 'в']), + ('ᲁ', &['Д', 'д']), + ('ᲂ', &['О', 'о']), + ('ᲃ', &['С', 'с']), + ('ᲄ', &['Т', 'т', 'ᲅ']), + ('ᲅ', &['Т', 'т', 'ᲄ']), + ('ᲆ', &['Ъ', 'ъ']), + ('ᲇ', &['Ѣ', 'ѣ']), + ('ᲈ', &['Ꙋ', 'ꙋ']), + ('Ᲊ', &['ᲊ']), + ('ᲊ', &['Ᲊ']), + ('Ა', &['ა']), + ('Ბ', &['ბ']), + ('Გ', &['გ']), + ('Დ', &['დ']), + ('Ე', &['ე']), + ('Ვ', &['ვ']), + ('Ზ', &['ზ']), + ('Თ', &['თ']), + ('Ი', &['ი']), + ('Კ', &['კ']), + ('Ლ', &['ლ']), + ('Მ', &['მ']), + ('Ნ', &['ნ']), + ('Ო', &['ო']), + ('Პ', &['პ']), + ('Ჟ', &['ჟ']), + ('Რ', &['რ']), + ('Ს', &['ს']), + ('Ტ', &['ტ']), + ('Უ', &['უ']), + ('Ფ', &['ფ']), + ('Ქ', &['ქ']), + ('Ღ', &['ღ']), + ('Ყ', &['ყ']), + ('Შ', &['შ']), + ('Ჩ', &['ჩ']), + ('Ც', &['ც']), + ('Ძ', &['ძ']), + ('Წ', &['წ']), + ('Ჭ', &['ჭ']), + ('Ხ', &['ხ']), + ('Ჯ', &['ჯ']), + ('Ჰ', &['ჰ']), + ('Ჱ', &['ჱ']), + ('Ჲ', &['ჲ']), + ('Ჳ', &['ჳ']), + ('Ჴ', &['ჴ']), + ('Ჵ', &['ჵ']), + ('Ჶ', &['ჶ']), + ('Ჷ', &['ჷ']), + ('Ჸ', &['ჸ']), + ('Ჹ', &['ჹ']), + ('Ჺ', &['ჺ']), + ('Ჽ', &['ჽ']), + ('Ჾ', &['ჾ']), + ('Ჿ', &['ჿ']), + ('ᵹ', &['Ᵹ']), + ('ᵽ', &['Ᵽ']), + ('ᶎ', &['Ᶎ']), + ('Ḁ', &['ḁ']), + ('ḁ', &['Ḁ']), + ('Ḃ', &['ḃ']), + ('ḃ', &['Ḃ']), + ('Ḅ', &['ḅ']), + ('ḅ', &['Ḅ']), + ('Ḇ', &['ḇ']), + ('ḇ', &['Ḇ']), + ('Ḉ', &['ḉ']), + ('ḉ', &['Ḉ']), + ('Ḋ', &['ḋ']), + ('ḋ', &['Ḋ']), + ('Ḍ', &['ḍ']), + ('ḍ', &['Ḍ']), + ('Ḏ', &['ḏ']), + ('ḏ', &['Ḏ']), + ('Ḑ', &['ḑ']), + ('ḑ', &['Ḑ']), + ('Ḓ', &['ḓ']), + ('ḓ', &['Ḓ']), + ('Ḕ', &['ḕ']), + ('ḕ', &['Ḕ']), + ('Ḗ', &['ḗ']), + ('ḗ', &['Ḗ']), + ('Ḙ', &['ḙ']), + ('ḙ', &['Ḙ']), + ('Ḛ', &['ḛ']), + ('ḛ', &['Ḛ']), + ('Ḝ', &['ḝ']), + ('ḝ', &['Ḝ']), + ('Ḟ', &['ḟ']), + ('ḟ', &['Ḟ']), + ('Ḡ', &['ḡ']), + ('ḡ', &['Ḡ']), + ('Ḣ', &['ḣ']), + ('ḣ', &['Ḣ']), + ('Ḥ', &['ḥ']), + ('ḥ', &['Ḥ']), + ('Ḧ', &['ḧ']), + ('ḧ', &['Ḧ']), + ('Ḩ', &['ḩ']), + ('ḩ', &['Ḩ']), + ('Ḫ', &['ḫ']), + ('ḫ', &['Ḫ']), + ('Ḭ', &['ḭ']), + ('ḭ', &['Ḭ']), + ('Ḯ', &['ḯ']), + ('ḯ', &['Ḯ']), + ('Ḱ', &['ḱ']), + ('ḱ', &['Ḱ']), + ('Ḳ', &['ḳ']), + ('ḳ', &['Ḳ']), + ('Ḵ', &['ḵ']), + ('ḵ', &['Ḵ']), + ('Ḷ', &['ḷ']), + ('ḷ', &['Ḷ']), + ('Ḹ', &['ḹ']), + ('ḹ', &['Ḹ']), + ('Ḻ', &['ḻ']), + ('ḻ', &['Ḻ']), + ('Ḽ', &['ḽ']), + ('ḽ', &['Ḽ']), + ('Ḿ', &['ḿ']), + ('ḿ', &['Ḿ']), + ('Ṁ', &['ṁ']), + ('ṁ', &['Ṁ']), + ('Ṃ', &['ṃ']), + ('ṃ', &['Ṃ']), + ('Ṅ', &['ṅ']), + ('ṅ', &['Ṅ']), + ('Ṇ', &['ṇ']), + ('ṇ', &['Ṇ']), + ('Ṉ', &['ṉ']), + ('ṉ', &['Ṉ']), + ('Ṋ', &['ṋ']), + ('ṋ', &['Ṋ']), + ('Ṍ', &['ṍ']), + ('ṍ', &['Ṍ']), + ('Ṏ', &['ṏ']), + ('ṏ', &['Ṏ']), + ('Ṑ', &['ṑ']), + ('ṑ', &['Ṑ']), + ('Ṓ', &['ṓ']), + ('ṓ', &['Ṓ']), + ('Ṕ', &['ṕ']), + ('ṕ', &['Ṕ']), + ('Ṗ', &['ṗ']), + ('ṗ', &['Ṗ']), + ('Ṙ', &['ṙ']), + ('ṙ', &['Ṙ']), + ('Ṛ', &['ṛ']), + ('ṛ', &['Ṛ']), + ('Ṝ', &['ṝ']), + ('ṝ', &['Ṝ']), + ('Ṟ', &['ṟ']), + ('ṟ', &['Ṟ']), + ('Ṡ', &['ṡ', 'ẛ']), + ('ṡ', &['Ṡ', 'ẛ']), + ('Ṣ', &['ṣ']), + ('ṣ', &['Ṣ']), + ('Ṥ', &['ṥ']), + ('ṥ', &['Ṥ']), + ('Ṧ', &['ṧ']), + ('ṧ', &['Ṧ']), + ('Ṩ', &['ṩ']), + ('ṩ', &['Ṩ']), + ('Ṫ', &['ṫ']), + ('ṫ', &['Ṫ']), + ('Ṭ', &['ṭ']), + ('ṭ', &['Ṭ']), + ('Ṯ', &['ṯ']), + ('ṯ', &['Ṯ']), + ('Ṱ', &['ṱ']), + ('ṱ', &['Ṱ']), + ('Ṳ', &['ṳ']), + ('ṳ', &['Ṳ']), + ('Ṵ', &['ṵ']), + ('ṵ', &['Ṵ']), + ('Ṷ', &['ṷ']), + ('ṷ', &['Ṷ']), + ('Ṹ', &['ṹ']), + ('ṹ', &['Ṹ']), + ('Ṻ', &['ṻ']), + ('ṻ', &['Ṻ']), + ('Ṽ', &['ṽ']), + ('ṽ', &['Ṽ']), + ('Ṿ', &['ṿ']), + ('ṿ', &['Ṿ']), + ('Ẁ', &['ẁ']), + ('ẁ', &['Ẁ']), + ('Ẃ', &['ẃ']), + ('ẃ', &['Ẃ']), + ('Ẅ', &['ẅ']), + ('ẅ', &['Ẅ']), + ('Ẇ', &['ẇ']), + ('ẇ', &['Ẇ']), + ('Ẉ', &['ẉ']), + ('ẉ', &['Ẉ']), + ('Ẋ', &['ẋ']), + ('ẋ', &['Ẋ']), + ('Ẍ', &['ẍ']), + ('ẍ', &['Ẍ']), + ('Ẏ', &['ẏ']), + ('ẏ', &['Ẏ']), + ('Ẑ', &['ẑ']), + ('ẑ', &['Ẑ']), + ('Ẓ', &['ẓ']), + ('ẓ', &['Ẓ']), + ('Ẕ', &['ẕ']), + ('ẕ', &['Ẕ']), + ('ẛ', &['Ṡ', 'ṡ']), + ('ẞ', &['ß']), + ('Ạ', &['ạ']), + ('ạ', &['Ạ']), + ('Ả', &['ả']), + ('ả', &['Ả']), + ('Ấ', &['ấ']), + ('ấ', &['Ấ']), + ('Ầ', &['ầ']), + ('ầ', &['Ầ']), + ('Ẩ', &['ẩ']), + ('ẩ', &['Ẩ']), + ('Ẫ', &['ẫ']), + ('ẫ', &['Ẫ']), + ('Ậ', &['ậ']), + ('ậ', &['Ậ']), + ('Ắ', &['ắ']), + ('ắ', &['Ắ']), + ('Ằ', &['ằ']), + ('ằ', &['Ằ']), + ('Ẳ', &['ẳ']), + ('ẳ', &['Ẳ']), + ('Ẵ', &['ẵ']), + ('ẵ', &['Ẵ']), + ('Ặ', &['ặ']), + ('ặ', &['Ặ']), + ('Ẹ', &['ẹ']), + ('ẹ', &['Ẹ']), + ('Ẻ', &['ẻ']), + ('ẻ', &['Ẻ']), + ('Ẽ', &['ẽ']), + ('ẽ', &['Ẽ']), + ('Ế', &['ế']), + ('ế', &['Ế']), + ('Ề', &['ề']), + ('ề', &['Ề']), + ('Ể', &['ể']), + ('ể', &['Ể']), + ('Ễ', &['ễ']), + ('ễ', &['Ễ']), + ('Ệ', &['ệ']), + ('ệ', &['Ệ']), + ('Ỉ', &['ỉ']), + ('ỉ', &['Ỉ']), + ('Ị', &['ị']), + ('ị', &['Ị']), + ('Ọ', &['ọ']), + ('ọ', &['Ọ']), + ('Ỏ', &['ỏ']), + ('ỏ', &['Ỏ']), + ('Ố', &['ố']), + ('ố', &['Ố']), + ('Ồ', &['ồ']), + ('ồ', &['Ồ']), + ('Ổ', &['ổ']), + ('ổ', &['Ổ']), + ('Ỗ', &['ỗ']), + ('ỗ', &['Ỗ']), + ('Ộ', &['ộ']), + ('ộ', &['Ộ']), + ('Ớ', &['ớ']), + ('ớ', &['Ớ']), + ('Ờ', &['ờ']), + ('ờ', &['Ờ']), + ('Ở', &['ở']), + ('ở', &['Ở']), + ('Ỡ', &['ỡ']), + ('ỡ', &['Ỡ']), + ('Ợ', &['ợ']), + ('ợ', &['Ợ']), + ('Ụ', &['ụ']), + ('ụ', &['Ụ']), + ('Ủ', &['ủ']), + ('ủ', &['Ủ']), + ('Ứ', &['ứ']), + ('ứ', &['Ứ']), + ('Ừ', &['ừ']), + ('ừ', &['Ừ']), + ('Ử', &['ử']), + ('ử', &['Ử']), + ('Ữ', &['ữ']), + ('ữ', &['Ữ']), + ('Ự', &['ự']), + ('ự', &['Ự']), + ('Ỳ', &['ỳ']), + ('ỳ', &['Ỳ']), + ('Ỵ', &['ỵ']), + ('ỵ', &['Ỵ']), + ('Ỷ', &['ỷ']), + ('ỷ', &['Ỷ']), + ('Ỹ', &['ỹ']), + ('ỹ', &['Ỹ']), + ('Ỻ', &['ỻ']), + ('ỻ', &['Ỻ']), + ('Ỽ', &['ỽ']), + ('ỽ', &['Ỽ']), + ('Ỿ', &['ỿ']), + ('ỿ', &['Ỿ']), + ('ἀ', &['Ἀ']), + ('ἁ', &['Ἁ']), + ('ἂ', &['Ἂ']), + ('ἃ', &['Ἃ']), + ('ἄ', &['Ἄ']), + ('ἅ', &['Ἅ']), + ('ἆ', &['Ἆ']), + ('ἇ', &['Ἇ']), + ('Ἀ', &['ἀ']), + ('Ἁ', &['ἁ']), + ('Ἂ', &['ἂ']), + ('Ἃ', &['ἃ']), + ('Ἄ', &['ἄ']), + ('Ἅ', &['ἅ']), + ('Ἆ', &['ἆ']), + ('Ἇ', &['ἇ']), + ('ἐ', &['Ἐ']), + ('ἑ', &['Ἑ']), + ('ἒ', &['Ἒ']), + ('ἓ', &['Ἓ']), + ('ἔ', &['Ἔ']), + ('ἕ', &['Ἕ']), + ('Ἐ', &['ἐ']), + ('Ἑ', &['ἑ']), + ('Ἒ', &['ἒ']), + ('Ἓ', &['ἓ']), + ('Ἔ', &['ἔ']), + ('Ἕ', &['ἕ']), + ('ἠ', &['Ἠ']), + ('ἡ', &['Ἡ']), + ('ἢ', &['Ἢ']), + ('ἣ', &['Ἣ']), + ('ἤ', &['Ἤ']), + ('ἥ', &['Ἥ']), + ('ἦ', &['Ἦ']), + ('ἧ', &['Ἧ']), + ('Ἠ', &['ἠ']), + ('Ἡ', &['ἡ']), + ('Ἢ', &['ἢ']), + ('Ἣ', &['ἣ']), + ('Ἤ', &['ἤ']), + ('Ἥ', &['ἥ']), + ('Ἦ', &['ἦ']), + ('Ἧ', &['ἧ']), + ('ἰ', &['Ἰ']), + ('ἱ', &['Ἱ']), + ('ἲ', &['Ἲ']), + ('ἳ', &['Ἳ']), + ('ἴ', &['Ἴ']), + ('ἵ', &['Ἵ']), + ('ἶ', &['Ἶ']), + ('ἷ', &['Ἷ']), + ('Ἰ', &['ἰ']), + ('Ἱ', &['ἱ']), + ('Ἲ', &['ἲ']), + ('Ἳ', &['ἳ']), + ('Ἴ', &['ἴ']), + ('Ἵ', &['ἵ']), + ('Ἶ', &['ἶ']), + ('Ἷ', &['ἷ']), + ('ὀ', &['Ὀ']), + ('ὁ', &['Ὁ']), + ('ὂ', &['Ὂ']), + ('ὃ', &['Ὃ']), + ('ὄ', &['Ὄ']), + ('ὅ', &['Ὅ']), + ('Ὀ', &['ὀ']), + ('Ὁ', &['ὁ']), + ('Ὂ', &['ὂ']), + ('Ὃ', &['ὃ']), + ('Ὄ', &['ὄ']), + ('Ὅ', &['ὅ']), + ('ὑ', &['Ὑ']), + ('ὓ', &['Ὓ']), + ('ὕ', &['Ὕ']), + ('ὗ', &['Ὗ']), + ('Ὑ', &['ὑ']), + ('Ὓ', &['ὓ']), + ('Ὕ', &['ὕ']), + ('Ὗ', &['ὗ']), + ('ὠ', &['Ὠ']), + ('ὡ', &['Ὡ']), + ('ὢ', &['Ὢ']), + ('ὣ', &['Ὣ']), + ('ὤ', &['Ὤ']), + ('ὥ', &['Ὥ']), + ('ὦ', &['Ὦ']), + ('ὧ', &['Ὧ']), + ('Ὠ', &['ὠ']), + ('Ὡ', &['ὡ']), + ('Ὢ', &['ὢ']), + ('Ὣ', &['ὣ']), + ('Ὤ', &['ὤ']), + ('Ὥ', &['ὥ']), + ('Ὦ', &['ὦ']), + ('Ὧ', &['ὧ']), + ('ὰ', &['Ὰ']), + ('ά', &['Ά']), + ('ὲ', &['Ὲ']), + ('έ', &['Έ']), + ('ὴ', &['Ὴ']), + ('ή', &['Ή']), + ('ὶ', &['Ὶ']), + ('ί', &['Ί']), + ('ὸ', &['Ὸ']), + ('ό', &['Ό']), + ('ὺ', &['Ὺ']), + ('ύ', &['Ύ']), + ('ὼ', &['Ὼ']), + ('ώ', &['Ώ']), + ('ᾀ', &['ᾈ']), + ('ᾁ', &['ᾉ']), + ('ᾂ', &['ᾊ']), + ('ᾃ', &['ᾋ']), + ('ᾄ', &['ᾌ']), + ('ᾅ', &['ᾍ']), + ('ᾆ', &['ᾎ']), + ('ᾇ', &['ᾏ']), + ('ᾈ', &['ᾀ']), + ('ᾉ', &['ᾁ']), + ('ᾊ', &['ᾂ']), + ('ᾋ', &['ᾃ']), + ('ᾌ', &['ᾄ']), + ('ᾍ', &['ᾅ']), + ('ᾎ', &['ᾆ']), + ('ᾏ', &['ᾇ']), + ('ᾐ', &['ᾘ']), + ('ᾑ', &['ᾙ']), + ('ᾒ', &['ᾚ']), + ('ᾓ', &['ᾛ']), + ('ᾔ', &['ᾜ']), + ('ᾕ', &['ᾝ']), + ('ᾖ', &['ᾞ']), + ('ᾗ', &['ᾟ']), + ('ᾘ', &['ᾐ']), + ('ᾙ', &['ᾑ']), + ('ᾚ', &['ᾒ']), + ('ᾛ', &['ᾓ']), + ('ᾜ', &['ᾔ']), + ('ᾝ', &['ᾕ']), + ('ᾞ', &['ᾖ']), + ('ᾟ', &['ᾗ']), + ('ᾠ', &['ᾨ']), + ('ᾡ', &['ᾩ']), + ('ᾢ', &['ᾪ']), + ('ᾣ', &['ᾫ']), + ('ᾤ', &['ᾬ']), + ('ᾥ', &['ᾭ']), + ('ᾦ', &['ᾮ']), + ('ᾧ', &['ᾯ']), + ('ᾨ', &['ᾠ']), + ('ᾩ', &['ᾡ']), + ('ᾪ', &['ᾢ']), + ('ᾫ', &['ᾣ']), + ('ᾬ', &['ᾤ']), + ('ᾭ', &['ᾥ']), + ('ᾮ', &['ᾦ']), + ('ᾯ', &['ᾧ']), + ('ᾰ', &['Ᾰ']), + ('ᾱ', &['Ᾱ']), + ('ᾳ', &['ᾼ']), + ('Ᾰ', &['ᾰ']), + ('Ᾱ', &['ᾱ']), + ('Ὰ', &['ὰ']), + ('Ά', &['ά']), + ('ᾼ', &['ᾳ']), + ('ι', &['\u{345}', 'Ι', 'ι']), + ('ῃ', &['ῌ']), + ('Ὲ', &['ὲ']), + ('Έ', &['έ']), + ('Ὴ', &['ὴ']), + ('Ή', &['ή']), + ('ῌ', &['ῃ']), + ('ῐ', &['Ῐ']), + ('ῑ', &['Ῑ']), + ('ΐ', &['ΐ']), + ('Ῐ', &['ῐ']), + ('Ῑ', &['ῑ']), + ('Ὶ', &['ὶ']), + ('Ί', &['ί']), + ('ῠ', &['Ῠ']), + ('ῡ', &['Ῡ']), + ('ΰ', &['ΰ']), + ('ῥ', &['Ῥ']), + ('Ῠ', &['ῠ']), + ('Ῡ', &['ῡ']), + ('Ὺ', &['ὺ']), + ('Ύ', &['ύ']), + ('Ῥ', &['ῥ']), + ('ῳ', &['ῼ']), + ('Ὸ', &['ὸ']), + ('Ό', &['ό']), + ('Ὼ', &['ὼ']), + ('Ώ', &['ώ']), + ('ῼ', &['ῳ']), + ('Ω', &['Ω', 'ω']), + ('K', &['K', 'k']), + ('Å', &['Å', 'å']), + ('Ⅎ', &['ⅎ']), + ('ⅎ', &['Ⅎ']), + ('Ⅰ', &['ⅰ']), + ('Ⅱ', &['ⅱ']), + ('Ⅲ', &['ⅲ']), + ('Ⅳ', &['ⅳ']), + ('Ⅴ', &['ⅴ']), + ('Ⅵ', &['ⅵ']), + ('Ⅶ', &['ⅶ']), + ('Ⅷ', &['ⅷ']), + ('Ⅸ', &['ⅸ']), + ('Ⅹ', &['ⅹ']), + ('Ⅺ', &['ⅺ']), + ('Ⅻ', &['ⅻ']), + ('Ⅼ', &['ⅼ']), + ('Ⅽ', &['ⅽ']), + ('Ⅾ', &['ⅾ']), + ('Ⅿ', &['ⅿ']), + ('ⅰ', &['Ⅰ']), + ('ⅱ', &['Ⅱ']), + ('ⅲ', &['Ⅲ']), + ('ⅳ', &['Ⅳ']), + ('ⅴ', &['Ⅴ']), + ('ⅵ', &['Ⅵ']), + ('ⅶ', &['Ⅶ']), + ('ⅷ', &['Ⅷ']), + ('ⅸ', &['Ⅸ']), + ('ⅹ', &['Ⅹ']), + ('ⅺ', &['Ⅺ']), + ('ⅻ', &['Ⅻ']), + ('ⅼ', &['Ⅼ']), + ('ⅽ', &['Ⅽ']), + ('ⅾ', &['Ⅾ']), + ('ⅿ', &['Ⅿ']), + ('Ↄ', &['ↄ']), + ('ↄ', &['Ↄ']), + ('Ⓐ', &['ⓐ']), + ('Ⓑ', &['ⓑ']), + ('Ⓒ', &['ⓒ']), + ('Ⓓ', &['ⓓ']), + ('Ⓔ', &['ⓔ']), + ('Ⓕ', &['ⓕ']), + ('Ⓖ', &['ⓖ']), + ('Ⓗ', &['ⓗ']), + ('Ⓘ', &['ⓘ']), + ('Ⓙ', &['ⓙ']), + ('Ⓚ', &['ⓚ']), + ('Ⓛ', &['ⓛ']), + ('Ⓜ', &['ⓜ']), + ('Ⓝ', &['ⓝ']), + ('Ⓞ', &['ⓞ']), + ('Ⓟ', &['ⓟ']), + ('Ⓠ', &['ⓠ']), + ('Ⓡ', &['ⓡ']), + ('Ⓢ', &['ⓢ']), + ('Ⓣ', &['ⓣ']), + ('Ⓤ', &['ⓤ']), + ('Ⓥ', &['ⓥ']), + ('Ⓦ', &['ⓦ']), + ('Ⓧ', &['ⓧ']), + ('Ⓨ', &['ⓨ']), + ('Ⓩ', &['ⓩ']), + ('ⓐ', &['Ⓐ']), + ('ⓑ', &['Ⓑ']), + ('ⓒ', &['Ⓒ']), + ('ⓓ', &['Ⓓ']), + ('ⓔ', &['Ⓔ']), + ('ⓕ', &['Ⓕ']), + ('ⓖ', &['Ⓖ']), + ('ⓗ', &['Ⓗ']), + ('ⓘ', &['Ⓘ']), + ('ⓙ', &['Ⓙ']), + ('ⓚ', &['Ⓚ']), + ('ⓛ', &['Ⓛ']), + ('ⓜ', &['Ⓜ']), + ('ⓝ', &['Ⓝ']), + ('ⓞ', &['Ⓞ']), + ('ⓟ', &['Ⓟ']), + ('ⓠ', &['Ⓠ']), + ('ⓡ', &['Ⓡ']), + ('ⓢ', &['Ⓢ']), + ('ⓣ', &['Ⓣ']), + ('ⓤ', &['Ⓤ']), + ('ⓥ', &['Ⓥ']), + ('ⓦ', &['Ⓦ']), + ('ⓧ', &['Ⓧ']), + ('ⓨ', &['Ⓨ']), + ('ⓩ', &['Ⓩ']), + ('Ⰰ', &['ⰰ']), + ('Ⰱ', &['ⰱ']), + ('Ⰲ', &['ⰲ']), + ('Ⰳ', &['ⰳ']), + ('Ⰴ', &['ⰴ']), + ('Ⰵ', &['ⰵ']), + ('Ⰶ', &['ⰶ']), + ('Ⰷ', &['ⰷ']), + ('Ⰸ', &['ⰸ']), + ('Ⰹ', &['ⰹ']), + ('Ⰺ', &['ⰺ']), + ('Ⰻ', &['ⰻ']), + ('Ⰼ', &['ⰼ']), + ('Ⰽ', &['ⰽ']), + ('Ⰾ', &['ⰾ']), + ('Ⰿ', &['ⰿ']), + ('Ⱀ', &['ⱀ']), + ('Ⱁ', &['ⱁ']), + ('Ⱂ', &['ⱂ']), + ('Ⱃ', &['ⱃ']), + ('Ⱄ', &['ⱄ']), + ('Ⱅ', &['ⱅ']), + ('Ⱆ', &['ⱆ']), + ('Ⱇ', &['ⱇ']), + ('Ⱈ', &['ⱈ']), + ('Ⱉ', &['ⱉ']), + ('Ⱊ', &['ⱊ']), + ('Ⱋ', &['ⱋ']), + ('Ⱌ', &['ⱌ']), + ('Ⱍ', &['ⱍ']), + ('Ⱎ', &['ⱎ']), + ('Ⱏ', &['ⱏ']), + ('Ⱐ', &['ⱐ']), + ('Ⱑ', &['ⱑ']), + ('Ⱒ', &['ⱒ']), + ('Ⱓ', &['ⱓ']), + ('Ⱔ', &['ⱔ']), + ('Ⱕ', &['ⱕ']), + ('Ⱖ', &['ⱖ']), + ('Ⱗ', &['ⱗ']), + ('Ⱘ', &['ⱘ']), + ('Ⱙ', &['ⱙ']), + ('Ⱚ', &['ⱚ']), + ('Ⱛ', &['ⱛ']), + ('Ⱜ', &['ⱜ']), + ('Ⱝ', &['ⱝ']), + ('Ⱞ', &['ⱞ']), + ('Ⱟ', &['ⱟ']), + ('ⰰ', &['Ⰰ']), + ('ⰱ', &['Ⰱ']), + ('ⰲ', &['Ⰲ']), + ('ⰳ', &['Ⰳ']), + ('ⰴ', &['Ⰴ']), + ('ⰵ', &['Ⰵ']), + ('ⰶ', &['Ⰶ']), + ('ⰷ', &['Ⰷ']), + ('ⰸ', &['Ⰸ']), + ('ⰹ', &['Ⰹ']), + ('ⰺ', &['Ⰺ']), + ('ⰻ', &['Ⰻ']), + ('ⰼ', &['Ⰼ']), + ('ⰽ', &['Ⰽ']), + ('ⰾ', &['Ⰾ']), + ('ⰿ', &['Ⰿ']), + ('ⱀ', &['Ⱀ']), + ('ⱁ', &['Ⱁ']), + ('ⱂ', &['Ⱂ']), + ('ⱃ', &['Ⱃ']), + ('ⱄ', &['Ⱄ']), + ('ⱅ', &['Ⱅ']), + ('ⱆ', &['Ⱆ']), + ('ⱇ', &['Ⱇ']), + ('ⱈ', &['Ⱈ']), + ('ⱉ', &['Ⱉ']), + ('ⱊ', &['Ⱊ']), + ('ⱋ', &['Ⱋ']), + ('ⱌ', &['Ⱌ']), + ('ⱍ', &['Ⱍ']), + ('ⱎ', &['Ⱎ']), + ('ⱏ', &['Ⱏ']), + ('ⱐ', &['Ⱐ']), + ('ⱑ', &['Ⱑ']), + ('ⱒ', &['Ⱒ']), + ('ⱓ', &['Ⱓ']), + ('ⱔ', &['Ⱔ']), + ('ⱕ', &['Ⱕ']), + ('ⱖ', &['Ⱖ']), + ('ⱗ', &['Ⱗ']), + ('ⱘ', &['Ⱘ']), + ('ⱙ', &['Ⱙ']), + ('ⱚ', &['Ⱚ']), + ('ⱛ', &['Ⱛ']), + ('ⱜ', &['Ⱜ']), + ('ⱝ', &['Ⱝ']), + ('ⱞ', &['Ⱞ']), + ('ⱟ', &['Ⱟ']), + ('Ⱡ', &['ⱡ']), + ('ⱡ', &['Ⱡ']), + ('Ɫ', &['ɫ']), + ('Ᵽ', &['ᵽ']), + ('Ɽ', &['ɽ']), + ('ⱥ', &['Ⱥ']), + ('ⱦ', &['Ⱦ']), + ('Ⱨ', &['ⱨ']), + ('ⱨ', &['Ⱨ']), + ('Ⱪ', &['ⱪ']), + ('ⱪ', &['Ⱪ']), + ('Ⱬ', &['ⱬ']), + ('ⱬ', &['Ⱬ']), + ('Ɑ', &['ɑ']), + ('Ɱ', &['ɱ']), + ('Ɐ', &['ɐ']), + ('Ɒ', &['ɒ']), + ('Ⱳ', &['ⱳ']), + ('ⱳ', &['Ⱳ']), + ('Ⱶ', &['ⱶ']), + ('ⱶ', &['Ⱶ']), + ('Ȿ', &['ȿ']), + ('Ɀ', &['ɀ']), + ('Ⲁ', &['ⲁ']), + ('ⲁ', &['Ⲁ']), + ('Ⲃ', &['ⲃ']), + ('ⲃ', &['Ⲃ']), + ('Ⲅ', &['ⲅ']), + ('ⲅ', &['Ⲅ']), + ('Ⲇ', &['ⲇ']), + ('ⲇ', &['Ⲇ']), + ('Ⲉ', &['ⲉ']), + ('ⲉ', &['Ⲉ']), + ('Ⲋ', &['ⲋ']), + ('ⲋ', &['Ⲋ']), + ('Ⲍ', &['ⲍ']), + ('ⲍ', &['Ⲍ']), + ('Ⲏ', &['ⲏ']), + ('ⲏ', &['Ⲏ']), + ('Ⲑ', &['ⲑ']), + ('ⲑ', &['Ⲑ']), + ('Ⲓ', &['ⲓ']), + ('ⲓ', &['Ⲓ']), + ('Ⲕ', &['ⲕ']), + ('ⲕ', &['Ⲕ']), + ('Ⲗ', &['ⲗ']), + ('ⲗ', &['Ⲗ']), + ('Ⲙ', &['ⲙ']), + ('ⲙ', &['Ⲙ']), + ('Ⲛ', &['ⲛ']), + ('ⲛ', &['Ⲛ']), + ('Ⲝ', &['ⲝ']), + ('ⲝ', &['Ⲝ']), + ('Ⲟ', &['ⲟ']), + ('ⲟ', &['Ⲟ']), + ('Ⲡ', &['ⲡ']), + ('ⲡ', &['Ⲡ']), + ('Ⲣ', &['ⲣ']), + ('ⲣ', &['Ⲣ']), + ('Ⲥ', &['ⲥ']), + ('ⲥ', &['Ⲥ']), + ('Ⲧ', &['ⲧ']), + ('ⲧ', &['Ⲧ']), + ('Ⲩ', &['ⲩ']), + ('ⲩ', &['Ⲩ']), + ('Ⲫ', &['ⲫ']), + ('ⲫ', &['Ⲫ']), + ('Ⲭ', &['ⲭ']), + ('ⲭ', &['Ⲭ']), + ('Ⲯ', &['ⲯ']), + ('ⲯ', &['Ⲯ']), + ('Ⲱ', &['ⲱ']), + ('ⲱ', &['Ⲱ']), + ('Ⲳ', &['ⲳ']), + ('ⲳ', &['Ⲳ']), + ('Ⲵ', &['ⲵ']), + ('ⲵ', &['Ⲵ']), + ('Ⲷ', &['ⲷ']), + ('ⲷ', &['Ⲷ']), + ('Ⲹ', &['ⲹ']), + ('ⲹ', &['Ⲹ']), + ('Ⲻ', &['ⲻ']), + ('ⲻ', &['Ⲻ']), + ('Ⲽ', &['ⲽ']), + ('ⲽ', &['Ⲽ']), + ('Ⲿ', &['ⲿ']), + ('ⲿ', &['Ⲿ']), + ('Ⳁ', &['ⳁ']), + ('ⳁ', &['Ⳁ']), + ('Ⳃ', &['ⳃ']), + ('ⳃ', &['Ⳃ']), + ('Ⳅ', &['ⳅ']), + ('ⳅ', &['Ⳅ']), + ('Ⳇ', &['ⳇ']), + ('ⳇ', &['Ⳇ']), + ('Ⳉ', &['ⳉ']), + ('ⳉ', &['Ⳉ']), + ('Ⳋ', &['ⳋ']), + ('ⳋ', &['Ⳋ']), + ('Ⳍ', &['ⳍ']), + ('ⳍ', &['Ⳍ']), + ('Ⳏ', &['ⳏ']), + ('ⳏ', &['Ⳏ']), + ('Ⳑ', &['ⳑ']), + ('ⳑ', &['Ⳑ']), + ('Ⳓ', &['ⳓ']), + ('ⳓ', &['Ⳓ']), + ('Ⳕ', &['ⳕ']), + ('ⳕ', &['Ⳕ']), + ('Ⳗ', &['ⳗ']), + ('ⳗ', &['Ⳗ']), + ('Ⳙ', &['ⳙ']), + ('ⳙ', &['Ⳙ']), + ('Ⳛ', &['ⳛ']), + ('ⳛ', &['Ⳛ']), + ('Ⳝ', &['ⳝ']), + ('ⳝ', &['Ⳝ']), + ('Ⳟ', &['ⳟ']), + ('ⳟ', &['Ⳟ']), + ('Ⳡ', &['ⳡ']), + ('ⳡ', &['Ⳡ']), + ('Ⳣ', &['ⳣ']), + ('ⳣ', &['Ⳣ']), + ('Ⳬ', &['ⳬ']), + ('ⳬ', &['Ⳬ']), + ('Ⳮ', &['ⳮ']), + ('ⳮ', &['Ⳮ']), + ('Ⳳ', &['ⳳ']), + ('ⳳ', &['Ⳳ']), + ('ⴀ', &['Ⴀ']), + ('ⴁ', &['Ⴁ']), + ('ⴂ', &['Ⴂ']), + ('ⴃ', &['Ⴃ']), + ('ⴄ', &['Ⴄ']), + ('ⴅ', &['Ⴅ']), + ('ⴆ', &['Ⴆ']), + ('ⴇ', &['Ⴇ']), + ('ⴈ', &['Ⴈ']), + ('ⴉ', &['Ⴉ']), + ('ⴊ', &['Ⴊ']), + ('ⴋ', &['Ⴋ']), + ('ⴌ', &['Ⴌ']), + ('ⴍ', &['Ⴍ']), + ('ⴎ', &['Ⴎ']), + ('ⴏ', &['Ⴏ']), + ('ⴐ', &['Ⴐ']), + ('ⴑ', &['Ⴑ']), + ('ⴒ', &['Ⴒ']), + ('ⴓ', &['Ⴓ']), + ('ⴔ', &['Ⴔ']), + ('ⴕ', &['Ⴕ']), + ('ⴖ', &['Ⴖ']), + ('ⴗ', &['Ⴗ']), + ('ⴘ', &['Ⴘ']), + ('ⴙ', &['Ⴙ']), + ('ⴚ', &['Ⴚ']), + ('ⴛ', &['Ⴛ']), + ('ⴜ', &['Ⴜ']), + ('ⴝ', &['Ⴝ']), + ('ⴞ', &['Ⴞ']), + ('ⴟ', &['Ⴟ']), + ('ⴠ', &['Ⴠ']), + ('ⴡ', &['Ⴡ']), + ('ⴢ', &['Ⴢ']), + ('ⴣ', &['Ⴣ']), + ('ⴤ', &['Ⴤ']), + ('ⴥ', &['Ⴥ']), + ('ⴧ', &['Ⴧ']), + ('ⴭ', &['Ⴭ']), + ('Ꙁ', &['ꙁ']), + ('ꙁ', &['Ꙁ']), + ('Ꙃ', &['ꙃ']), + ('ꙃ', &['Ꙃ']), + ('Ꙅ', &['ꙅ']), + ('ꙅ', &['Ꙅ']), + ('Ꙇ', &['ꙇ']), + ('ꙇ', &['Ꙇ']), + ('Ꙉ', &['ꙉ']), + ('ꙉ', &['Ꙉ']), + ('Ꙋ', &['ᲈ', 'ꙋ']), + ('ꙋ', &['ᲈ', 'Ꙋ']), + ('Ꙍ', &['ꙍ']), + ('ꙍ', &['Ꙍ']), + ('Ꙏ', &['ꙏ']), + ('ꙏ', &['Ꙏ']), + ('Ꙑ', &['ꙑ']), + ('ꙑ', &['Ꙑ']), + ('Ꙓ', &['ꙓ']), + ('ꙓ', &['Ꙓ']), + ('Ꙕ', &['ꙕ']), + ('ꙕ', &['Ꙕ']), + ('Ꙗ', &['ꙗ']), + ('ꙗ', &['Ꙗ']), + ('Ꙙ', &['ꙙ']), + ('ꙙ', &['Ꙙ']), + ('Ꙛ', &['ꙛ']), + ('ꙛ', &['Ꙛ']), + ('Ꙝ', &['ꙝ']), + ('ꙝ', &['Ꙝ']), + ('Ꙟ', &['ꙟ']), + ('ꙟ', &['Ꙟ']), + ('Ꙡ', &['ꙡ']), + ('ꙡ', &['Ꙡ']), + ('Ꙣ', &['ꙣ']), + ('ꙣ', &['Ꙣ']), + ('Ꙥ', &['ꙥ']), + ('ꙥ', &['Ꙥ']), + ('Ꙧ', &['ꙧ']), + ('ꙧ', &['Ꙧ']), + ('Ꙩ', &['ꙩ']), + ('ꙩ', &['Ꙩ']), + ('Ꙫ', &['ꙫ']), + ('ꙫ', &['Ꙫ']), + ('Ꙭ', &['ꙭ']), + ('ꙭ', &['Ꙭ']), + ('Ꚁ', &['ꚁ']), + ('ꚁ', &['Ꚁ']), + ('Ꚃ', &['ꚃ']), + ('ꚃ', &['Ꚃ']), + ('Ꚅ', &['ꚅ']), + ('ꚅ', &['Ꚅ']), + ('Ꚇ', &['ꚇ']), + ('ꚇ', &['Ꚇ']), + ('Ꚉ', &['ꚉ']), + ('ꚉ', &['Ꚉ']), + ('Ꚋ', &['ꚋ']), + ('ꚋ', &['Ꚋ']), + ('Ꚍ', &['ꚍ']), + ('ꚍ', &['Ꚍ']), + ('Ꚏ', &['ꚏ']), + ('ꚏ', &['Ꚏ']), + ('Ꚑ', &['ꚑ']), + ('ꚑ', &['Ꚑ']), + ('Ꚓ', &['ꚓ']), + ('ꚓ', &['Ꚓ']), + ('Ꚕ', &['ꚕ']), + ('ꚕ', &['Ꚕ']), + ('Ꚗ', &['ꚗ']), + ('ꚗ', &['Ꚗ']), + ('Ꚙ', &['ꚙ']), + ('ꚙ', &['Ꚙ']), + ('Ꚛ', &['ꚛ']), + ('ꚛ', &['Ꚛ']), + ('Ꜣ', &['ꜣ']), + ('ꜣ', &['Ꜣ']), + ('Ꜥ', &['ꜥ']), + ('ꜥ', &['Ꜥ']), + ('Ꜧ', &['ꜧ']), + ('ꜧ', &['Ꜧ']), + ('Ꜩ', &['ꜩ']), + ('ꜩ', &['Ꜩ']), + ('Ꜫ', &['ꜫ']), + ('ꜫ', &['Ꜫ']), + ('Ꜭ', &['ꜭ']), + ('ꜭ', &['Ꜭ']), + ('Ꜯ', &['ꜯ']), + ('ꜯ', &['Ꜯ']), + ('Ꜳ', &['ꜳ']), + ('ꜳ', &['Ꜳ']), + ('Ꜵ', &['ꜵ']), + ('ꜵ', &['Ꜵ']), + ('Ꜷ', &['ꜷ']), + ('ꜷ', &['Ꜷ']), + ('Ꜹ', &['ꜹ']), + ('ꜹ', &['Ꜹ']), + ('Ꜻ', &['ꜻ']), + ('ꜻ', &['Ꜻ']), + ('Ꜽ', &['ꜽ']), + ('ꜽ', &['Ꜽ']), + ('Ꜿ', &['ꜿ']), + ('ꜿ', &['Ꜿ']), + ('Ꝁ', &['ꝁ']), + ('ꝁ', &['Ꝁ']), + ('Ꝃ', &['ꝃ']), + ('ꝃ', &['Ꝃ']), + ('Ꝅ', &['ꝅ']), + ('ꝅ', &['Ꝅ']), + ('Ꝇ', &['ꝇ']), + ('ꝇ', &['Ꝇ']), + ('Ꝉ', &['ꝉ']), + ('ꝉ', &['Ꝉ']), + ('Ꝋ', &['ꝋ']), + ('ꝋ', &['Ꝋ']), + ('Ꝍ', &['ꝍ']), + ('ꝍ', &['Ꝍ']), + ('Ꝏ', &['ꝏ']), + ('ꝏ', &['Ꝏ']), + ('Ꝑ', &['ꝑ']), + ('ꝑ', &['Ꝑ']), + ('Ꝓ', &['ꝓ']), + ('ꝓ', &['Ꝓ']), + ('Ꝕ', &['ꝕ']), + ('ꝕ', &['Ꝕ']), + ('Ꝗ', &['ꝗ']), + ('ꝗ', &['Ꝗ']), + ('Ꝙ', &['ꝙ']), + ('ꝙ', &['Ꝙ']), + ('Ꝛ', &['ꝛ']), + ('ꝛ', &['Ꝛ']), + ('Ꝝ', &['ꝝ']), + ('ꝝ', &['Ꝝ']), + ('Ꝟ', &['ꝟ']), + ('ꝟ', &['Ꝟ']), + ('Ꝡ', &['ꝡ']), + ('ꝡ', &['Ꝡ']), + ('Ꝣ', &['ꝣ']), + ('ꝣ', &['Ꝣ']), + ('Ꝥ', &['ꝥ']), + ('ꝥ', &['Ꝥ']), + ('Ꝧ', &['ꝧ']), + ('ꝧ', &['Ꝧ']), + ('Ꝩ', &['ꝩ']), + ('ꝩ', &['Ꝩ']), + ('Ꝫ', &['ꝫ']), + ('ꝫ', &['Ꝫ']), + ('Ꝭ', &['ꝭ']), + ('ꝭ', &['Ꝭ']), + ('Ꝯ', &['ꝯ']), + ('ꝯ', &['Ꝯ']), + ('Ꝺ', &['ꝺ']), + ('ꝺ', &['Ꝺ']), + ('Ꝼ', &['ꝼ']), + ('ꝼ', &['Ꝼ']), + ('Ᵹ', &['ᵹ']), + ('Ꝿ', &['ꝿ']), + ('ꝿ', &['Ꝿ']), + ('Ꞁ', &['ꞁ']), + ('ꞁ', &['Ꞁ']), + ('Ꞃ', &['ꞃ']), + ('ꞃ', &['Ꞃ']), + ('Ꞅ', &['ꞅ']), + ('ꞅ', &['Ꞅ']), + ('Ꞇ', &['ꞇ']), + ('ꞇ', &['Ꞇ']), + ('Ꞌ', &['ꞌ']), + ('ꞌ', &['Ꞌ']), + ('Ɥ', &['ɥ']), + ('Ꞑ', &['ꞑ']), + ('ꞑ', &['Ꞑ']), + ('Ꞓ', &['ꞓ']), + ('ꞓ', &['Ꞓ']), + ('ꞔ', &['Ꞔ']), + ('Ꞗ', &['ꞗ']), + ('ꞗ', &['Ꞗ']), + ('Ꞙ', &['ꞙ']), + ('ꞙ', &['Ꞙ']), + ('Ꞛ', &['ꞛ']), + ('ꞛ', &['Ꞛ']), + ('Ꞝ', &['ꞝ']), + ('ꞝ', &['Ꞝ']), + ('Ꞟ', &['ꞟ']), + ('ꞟ', &['Ꞟ']), + ('Ꞡ', &['ꞡ']), + ('ꞡ', &['Ꞡ']), + ('Ꞣ', &['ꞣ']), + ('ꞣ', &['Ꞣ']), + ('Ꞥ', &['ꞥ']), + ('ꞥ', &['Ꞥ']), + ('Ꞧ', &['ꞧ']), + ('ꞧ', &['Ꞧ']), + ('Ꞩ', &['ꞩ']), + ('ꞩ', &['Ꞩ']), + ('Ɦ', &['ɦ']), + ('Ɜ', &['ɜ']), + ('Ɡ', &['ɡ']), + ('Ɬ', &['ɬ']), + ('Ɪ', &['ɪ']), + ('Ʞ', &['ʞ']), + ('Ʇ', &['ʇ']), + ('Ʝ', &['ʝ']), + ('Ꭓ', &['ꭓ']), + ('Ꞵ', &['ꞵ']), + ('ꞵ', &['Ꞵ']), + ('Ꞷ', &['ꞷ']), + ('ꞷ', &['Ꞷ']), + ('Ꞹ', &['ꞹ']), + ('ꞹ', &['Ꞹ']), + ('Ꞻ', &['ꞻ']), + ('ꞻ', &['Ꞻ']), + ('Ꞽ', &['ꞽ']), + ('ꞽ', &['Ꞽ']), + ('Ꞿ', &['ꞿ']), + ('ꞿ', &['Ꞿ']), + ('Ꟁ', &['ꟁ']), + ('ꟁ', &['Ꟁ']), + ('Ꟃ', &['ꟃ']), + ('ꟃ', &['Ꟃ']), + ('Ꞔ', &['ꞔ']), + ('Ʂ', &['ʂ']), + ('Ᶎ', &['ᶎ']), + ('Ꟈ', &['ꟈ']), + ('ꟈ', &['Ꟈ']), + ('Ꟊ', &['ꟊ']), + ('ꟊ', &['Ꟊ']), + ('Ɤ', &['ɤ']), + ('Ꟍ', &['ꟍ']), + ('ꟍ', &['Ꟍ']), + ('Ꟑ', &['ꟑ']), + ('ꟑ', &['Ꟑ']), + ('Ꟗ', &['ꟗ']), + ('ꟗ', &['Ꟗ']), + ('Ꟙ', &['ꟙ']), + ('ꟙ', &['Ꟙ']), + ('Ꟛ', &['ꟛ']), + ('ꟛ', &['Ꟛ']), + ('Ƛ', &['ƛ']), + ('Ꟶ', &['ꟶ']), + ('ꟶ', &['Ꟶ']), + ('ꭓ', &['Ꭓ']), + ('ꭰ', &['Ꭰ']), + ('ꭱ', &['Ꭱ']), + ('ꭲ', &['Ꭲ']), + ('ꭳ', &['Ꭳ']), + ('ꭴ', &['Ꭴ']), + ('ꭵ', &['Ꭵ']), + ('ꭶ', &['Ꭶ']), + ('ꭷ', &['Ꭷ']), + ('ꭸ', &['Ꭸ']), + ('ꭹ', &['Ꭹ']), + ('ꭺ', &['Ꭺ']), + ('ꭻ', &['Ꭻ']), + ('ꭼ', &['Ꭼ']), + ('ꭽ', &['Ꭽ']), + ('ꭾ', &['Ꭾ']), + ('ꭿ', &['Ꭿ']), + ('ꮀ', &['Ꮀ']), + ('ꮁ', &['Ꮁ']), + ('ꮂ', &['Ꮂ']), + ('ꮃ', &['Ꮃ']), + ('ꮄ', &['Ꮄ']), + ('ꮅ', &['Ꮅ']), + ('ꮆ', &['Ꮆ']), + ('ꮇ', &['Ꮇ']), + ('ꮈ', &['Ꮈ']), + ('ꮉ', &['Ꮉ']), + ('ꮊ', &['Ꮊ']), + ('ꮋ', &['Ꮋ']), + ('ꮌ', &['Ꮌ']), + ('ꮍ', &['Ꮍ']), + ('ꮎ', &['Ꮎ']), + ('ꮏ', &['Ꮏ']), + ('ꮐ', &['Ꮐ']), + ('ꮑ', &['Ꮑ']), + ('ꮒ', &['Ꮒ']), + ('ꮓ', &['Ꮓ']), + ('ꮔ', &['Ꮔ']), + ('ꮕ', &['Ꮕ']), + ('ꮖ', &['Ꮖ']), + ('ꮗ', &['Ꮗ']), + ('ꮘ', &['Ꮘ']), + ('ꮙ', &['Ꮙ']), + ('ꮚ', &['Ꮚ']), + ('ꮛ', &['Ꮛ']), + ('ꮜ', &['Ꮜ']), + ('ꮝ', &['Ꮝ']), + ('ꮞ', &['Ꮞ']), + ('ꮟ', &['Ꮟ']), + ('ꮠ', &['Ꮠ']), + ('ꮡ', &['Ꮡ']), + ('ꮢ', &['Ꮢ']), + ('ꮣ', &['Ꮣ']), + ('ꮤ', &['Ꮤ']), + ('ꮥ', &['Ꮥ']), + ('ꮦ', &['Ꮦ']), + ('ꮧ', &['Ꮧ']), + ('ꮨ', &['Ꮨ']), + ('ꮩ', &['Ꮩ']), + ('ꮪ', &['Ꮪ']), + ('ꮫ', &['Ꮫ']), + ('ꮬ', &['Ꮬ']), + ('ꮭ', &['Ꮭ']), + ('ꮮ', &['Ꮮ']), + ('ꮯ', &['Ꮯ']), + ('ꮰ', &['Ꮰ']), + ('ꮱ', &['Ꮱ']), + ('ꮲ', &['Ꮲ']), + ('ꮳ', &['Ꮳ']), + ('ꮴ', &['Ꮴ']), + ('ꮵ', &['Ꮵ']), + ('ꮶ', &['Ꮶ']), + ('ꮷ', &['Ꮷ']), + ('ꮸ', &['Ꮸ']), + ('ꮹ', &['Ꮹ']), + ('ꮺ', &['Ꮺ']), + ('ꮻ', &['Ꮻ']), + ('ꮼ', &['Ꮼ']), + ('ꮽ', &['Ꮽ']), + ('ꮾ', &['Ꮾ']), + ('ꮿ', &['Ꮿ']), + ('ſt', &['st']), + ('st', &['ſt']), + ('A', &['a']), + ('B', &['b']), + ('C', &['c']), + ('D', &['d']), + ('E', &['e']), + ('F', &['f']), + ('G', &['g']), + ('H', &['h']), + ('I', &['i']), + ('J', &['j']), + ('K', &['k']), + ('L', &['l']), + ('M', &['m']), + ('N', &['n']), + ('O', &['o']), + ('P', &['p']), + ('Q', &['q']), + ('R', &['r']), + ('S', &['s']), + ('T', &['t']), + ('U', &['u']), + ('V', &['v']), + ('W', &['w']), + ('X', &['x']), + ('Y', &['y']), + ('Z', &['z']), + ('a', &['A']), + ('b', &['B']), + ('c', &['C']), + ('d', &['D']), + ('e', &['E']), + ('f', &['F']), + ('g', &['G']), + ('h', &['H']), + ('i', &['I']), + ('j', &['J']), + ('k', &['K']), + ('l', &['L']), + ('m', &['M']), + ('n', &['N']), + ('o', &['O']), + ('p', &['P']), + ('q', &['Q']), + ('r', &['R']), + ('s', &['S']), + ('t', &['T']), + ('u', &['U']), + ('v', &['V']), + ('w', &['W']), + ('x', &['X']), + ('y', &['Y']), + ('z', &['Z']), + ('𐐀', &['𐐨']), + ('𐐁', &['𐐩']), + ('𐐂', &['𐐪']), + ('𐐃', &['𐐫']), + ('𐐄', &['𐐬']), + ('𐐅', &['𐐭']), + ('𐐆', &['𐐮']), + ('𐐇', &['𐐯']), + ('𐐈', &['𐐰']), + ('𐐉', &['𐐱']), + ('𐐊', &['𐐲']), + ('𐐋', &['𐐳']), + ('𐐌', &['𐐴']), + ('𐐍', &['𐐵']), + ('𐐎', &['𐐶']), + ('𐐏', &['𐐷']), + ('𐐐', &['𐐸']), + ('𐐑', &['𐐹']), + ('𐐒', &['𐐺']), + ('𐐓', &['𐐻']), + ('𐐔', &['𐐼']), + ('𐐕', &['𐐽']), + ('𐐖', &['𐐾']), + ('𐐗', &['𐐿']), + ('𐐘', &['𐑀']), + ('𐐙', &['𐑁']), + ('𐐚', &['𐑂']), + ('𐐛', &['𐑃']), + ('𐐜', &['𐑄']), + ('𐐝', &['𐑅']), + ('𐐞', &['𐑆']), + ('𐐟', &['𐑇']), + ('𐐠', &['𐑈']), + ('𐐡', &['𐑉']), + ('𐐢', &['𐑊']), + ('𐐣', &['𐑋']), + ('𐐤', &['𐑌']), + ('𐐥', &['𐑍']), + ('𐐦', &['𐑎']), + ('𐐧', &['𐑏']), + ('𐐨', &['𐐀']), + ('𐐩', &['𐐁']), + ('𐐪', &['𐐂']), + ('𐐫', &['𐐃']), + ('𐐬', &['𐐄']), + ('𐐭', &['𐐅']), + ('𐐮', &['𐐆']), + ('𐐯', &['𐐇']), + ('𐐰', &['𐐈']), + ('𐐱', &['𐐉']), + ('𐐲', &['𐐊']), + ('𐐳', &['𐐋']), + ('𐐴', &['𐐌']), + ('𐐵', &['𐐍']), + ('𐐶', &['𐐎']), + ('𐐷', &['𐐏']), + ('𐐸', &['𐐐']), + ('𐐹', &['𐐑']), + ('𐐺', &['𐐒']), + ('𐐻', &['𐐓']), + ('𐐼', &['𐐔']), + ('𐐽', &['𐐕']), + ('𐐾', &['𐐖']), + ('𐐿', &['𐐗']), + ('𐑀', &['𐐘']), + ('𐑁', &['𐐙']), + ('𐑂', &['𐐚']), + ('𐑃', &['𐐛']), + ('𐑄', &['𐐜']), + ('𐑅', &['𐐝']), + ('𐑆', &['𐐞']), + ('𐑇', &['𐐟']), + ('𐑈', &['𐐠']), + ('𐑉', &['𐐡']), + ('𐑊', &['𐐢']), + ('𐑋', &['𐐣']), + ('𐑌', &['𐐤']), + ('𐑍', &['𐐥']), + ('𐑎', &['𐐦']), + ('𐑏', &['𐐧']), + ('𐒰', &['𐓘']), + ('𐒱', &['𐓙']), + ('𐒲', &['𐓚']), + ('𐒳', &['𐓛']), + ('𐒴', &['𐓜']), + ('𐒵', &['𐓝']), + ('𐒶', &['𐓞']), + ('𐒷', &['𐓟']), + ('𐒸', &['𐓠']), + ('𐒹', &['𐓡']), + ('𐒺', &['𐓢']), + ('𐒻', &['𐓣']), + ('𐒼', &['𐓤']), + ('𐒽', &['𐓥']), + ('𐒾', &['𐓦']), + ('𐒿', &['𐓧']), + ('𐓀', &['𐓨']), + ('𐓁', &['𐓩']), + ('𐓂', &['𐓪']), + ('𐓃', &['𐓫']), + ('𐓄', &['𐓬']), + ('𐓅', &['𐓭']), + ('𐓆', &['𐓮']), + ('𐓇', &['𐓯']), + ('𐓈', &['𐓰']), + ('𐓉', &['𐓱']), + ('𐓊', &['𐓲']), + ('𐓋', &['𐓳']), + ('𐓌', &['𐓴']), + ('𐓍', &['𐓵']), + ('𐓎', &['𐓶']), + ('𐓏', &['𐓷']), + ('𐓐', &['𐓸']), + ('𐓑', &['𐓹']), + ('𐓒', &['𐓺']), + ('𐓓', &['𐓻']), + ('𐓘', &['𐒰']), + ('𐓙', &['𐒱']), + ('𐓚', &['𐒲']), + ('𐓛', &['𐒳']), + ('𐓜', &['𐒴']), + ('𐓝', &['𐒵']), + ('𐓞', &['𐒶']), + ('𐓟', &['𐒷']), + ('𐓠', &['𐒸']), + ('𐓡', &['𐒹']), + ('𐓢', &['𐒺']), + ('𐓣', &['𐒻']), + ('𐓤', &['𐒼']), + ('𐓥', &['𐒽']), + ('𐓦', &['𐒾']), + ('𐓧', &['𐒿']), + ('𐓨', &['𐓀']), + ('𐓩', &['𐓁']), + ('𐓪', &['𐓂']), + ('𐓫', &['𐓃']), + ('𐓬', &['𐓄']), + ('𐓭', &['𐓅']), + ('𐓮', &['𐓆']), + ('𐓯', &['𐓇']), + ('𐓰', &['𐓈']), + ('𐓱', &['𐓉']), + ('𐓲', &['𐓊']), + ('𐓳', &['𐓋']), + ('𐓴', &['𐓌']), + ('𐓵', &['𐓍']), + ('𐓶', &['𐓎']), + ('𐓷', &['𐓏']), + ('𐓸', &['𐓐']), + ('𐓹', &['𐓑']), + ('𐓺', &['𐓒']), + ('𐓻', &['𐓓']), + ('𐕰', &['𐖗']), + ('𐕱', &['𐖘']), + ('𐕲', &['𐖙']), + ('𐕳', &['𐖚']), + ('𐕴', &['𐖛']), + ('𐕵', &['𐖜']), + ('𐕶', &['𐖝']), + ('𐕷', &['𐖞']), + ('𐕸', &['𐖟']), + ('𐕹', &['𐖠']), + ('𐕺', &['𐖡']), + ('𐕼', &['𐖣']), + ('𐕽', &['𐖤']), + ('𐕾', &['𐖥']), + ('𐕿', &['𐖦']), + ('𐖀', &['𐖧']), + ('𐖁', &['𐖨']), + ('𐖂', &['𐖩']), + ('𐖃', &['𐖪']), + ('𐖄', &['𐖫']), + ('𐖅', &['𐖬']), + ('𐖆', &['𐖭']), + ('𐖇', &['𐖮']), + ('𐖈', &['𐖯']), + ('𐖉', &['𐖰']), + ('𐖊', &['𐖱']), + ('𐖌', &['𐖳']), + ('𐖍', &['𐖴']), + ('𐖎', &['𐖵']), + ('𐖏', &['𐖶']), + ('𐖐', &['𐖷']), + ('𐖑', &['𐖸']), + ('𐖒', &['𐖹']), + ('𐖔', &['𐖻']), + ('𐖕', &['𐖼']), + ('𐖗', &['𐕰']), + ('𐖘', &['𐕱']), + ('𐖙', &['𐕲']), + ('𐖚', &['𐕳']), + ('𐖛', &['𐕴']), + ('𐖜', &['𐕵']), + ('𐖝', &['𐕶']), + ('𐖞', &['𐕷']), + ('𐖟', &['𐕸']), + ('𐖠', &['𐕹']), + ('𐖡', &['𐕺']), + ('𐖣', &['𐕼']), + ('𐖤', &['𐕽']), + ('𐖥', &['𐕾']), + ('𐖦', &['𐕿']), + ('𐖧', &['𐖀']), + ('𐖨', &['𐖁']), + ('𐖩', &['𐖂']), + ('𐖪', &['𐖃']), + ('𐖫', &['𐖄']), + ('𐖬', &['𐖅']), + ('𐖭', &['𐖆']), + ('𐖮', &['𐖇']), + ('𐖯', &['𐖈']), + ('𐖰', &['𐖉']), + ('𐖱', &['𐖊']), + ('𐖳', &['𐖌']), + ('𐖴', &['𐖍']), + ('𐖵', &['𐖎']), + ('𐖶', &['𐖏']), + ('𐖷', &['𐖐']), + ('𐖸', &['𐖑']), + ('𐖹', &['𐖒']), + ('𐖻', &['𐖔']), + ('𐖼', &['𐖕']), + ('𐲀', &['𐳀']), + ('𐲁', &['𐳁']), + ('𐲂', &['𐳂']), + ('𐲃', &['𐳃']), + ('𐲄', &['𐳄']), + ('𐲅', &['𐳅']), + ('𐲆', &['𐳆']), + ('𐲇', &['𐳇']), + ('𐲈', &['𐳈']), + ('𐲉', &['𐳉']), + ('𐲊', &['𐳊']), + ('𐲋', &['𐳋']), + ('𐲌', &['𐳌']), + ('𐲍', &['𐳍']), + ('𐲎', &['𐳎']), + ('𐲏', &['𐳏']), + ('𐲐', &['𐳐']), + ('𐲑', &['𐳑']), + ('𐲒', &['𐳒']), + ('𐲓', &['𐳓']), + ('𐲔', &['𐳔']), + ('𐲕', &['𐳕']), + ('𐲖', &['𐳖']), + ('𐲗', &['𐳗']), + ('𐲘', &['𐳘']), + ('𐲙', &['𐳙']), + ('𐲚', &['𐳚']), + ('𐲛', &['𐳛']), + ('𐲜', &['𐳜']), + ('𐲝', &['𐳝']), + ('𐲞', &['𐳞']), + ('𐲟', &['𐳟']), + ('𐲠', &['𐳠']), + ('𐲡', &['𐳡']), + ('𐲢', &['𐳢']), + ('𐲣', &['𐳣']), + ('𐲤', &['𐳤']), + ('𐲥', &['𐳥']), + ('𐲦', &['𐳦']), + ('𐲧', &['𐳧']), + ('𐲨', &['𐳨']), + ('𐲩', &['𐳩']), + ('𐲪', &['𐳪']), + ('𐲫', &['𐳫']), + ('𐲬', &['𐳬']), + ('𐲭', &['𐳭']), + ('𐲮', &['𐳮']), + ('𐲯', &['𐳯']), + ('𐲰', &['𐳰']), + ('𐲱', &['𐳱']), + ('𐲲', &['𐳲']), + ('𐳀', &['𐲀']), + ('𐳁', &['𐲁']), + ('𐳂', &['𐲂']), + ('𐳃', &['𐲃']), + ('𐳄', &['𐲄']), + ('𐳅', &['𐲅']), + ('𐳆', &['𐲆']), + ('𐳇', &['𐲇']), + ('𐳈', &['𐲈']), + ('𐳉', &['𐲉']), + ('𐳊', &['𐲊']), + ('𐳋', &['𐲋']), + ('𐳌', &['𐲌']), + ('𐳍', &['𐲍']), + ('𐳎', &['𐲎']), + ('𐳏', &['𐲏']), + ('𐳐', &['𐲐']), + ('𐳑', &['𐲑']), + ('𐳒', &['𐲒']), + ('𐳓', &['𐲓']), + ('𐳔', &['𐲔']), + ('𐳕', &['𐲕']), + ('𐳖', &['𐲖']), + ('𐳗', &['𐲗']), + ('𐳘', &['𐲘']), + ('𐳙', &['𐲙']), + ('𐳚', &['𐲚']), + ('𐳛', &['𐲛']), + ('𐳜', &['𐲜']), + ('𐳝', &['𐲝']), + ('𐳞', &['𐲞']), + ('𐳟', &['𐲟']), + ('𐳠', &['𐲠']), + ('𐳡', &['𐲡']), + ('𐳢', &['𐲢']), + ('𐳣', &['𐲣']), + ('𐳤', &['𐲤']), + ('𐳥', &['𐲥']), + ('𐳦', &['𐲦']), + ('𐳧', &['𐲧']), + ('𐳨', &['𐲨']), + ('𐳩', &['𐲩']), + ('𐳪', &['𐲪']), + ('𐳫', &['𐲫']), + ('𐳬', &['𐲬']), + ('𐳭', &['𐲭']), + ('𐳮', &['𐲮']), + ('𐳯', &['𐲯']), + ('𐳰', &['𐲰']), + ('𐳱', &['𐲱']), + ('𐳲', &['𐲲']), + ('𐵐', &['𐵰']), + ('𐵑', &['𐵱']), + ('𐵒', &['𐵲']), + ('𐵓', &['𐵳']), + ('𐵔', &['𐵴']), + ('𐵕', &['𐵵']), + ('𐵖', &['𐵶']), + ('𐵗', &['𐵷']), + ('𐵘', &['𐵸']), + ('𐵙', &['𐵹']), + ('𐵚', &['𐵺']), + ('𐵛', &['𐵻']), + ('𐵜', &['𐵼']), + ('𐵝', &['𐵽']), + ('𐵞', &['𐵾']), + ('𐵟', &['𐵿']), + ('𐵠', &['𐶀']), + ('𐵡', &['𐶁']), + ('𐵢', &['𐶂']), + ('𐵣', &['𐶃']), + ('𐵤', &['𐶄']), + ('𐵥', &['𐶅']), + ('𐵰', &['𐵐']), + ('𐵱', &['𐵑']), + ('𐵲', &['𐵒']), + ('𐵳', &['𐵓']), + ('𐵴', &['𐵔']), + ('𐵵', &['𐵕']), + ('𐵶', &['𐵖']), + ('𐵷', &['𐵗']), + ('𐵸', &['𐵘']), + ('𐵹', &['𐵙']), + ('𐵺', &['𐵚']), + ('𐵻', &['𐵛']), + ('𐵼', &['𐵜']), + ('𐵽', &['𐵝']), + ('𐵾', &['𐵞']), + ('𐵿', &['𐵟']), + ('𐶀', &['𐵠']), + ('𐶁', &['𐵡']), + ('𐶂', &['𐵢']), + ('𐶃', &['𐵣']), + ('𐶄', &['𐵤']), + ('𐶅', &['𐵥']), + ('𑢠', &['𑣀']), + ('𑢡', &['𑣁']), + ('𑢢', &['𑣂']), + ('𑢣', &['𑣃']), + ('𑢤', &['𑣄']), + ('𑢥', &['𑣅']), + ('𑢦', &['𑣆']), + ('𑢧', &['𑣇']), + ('𑢨', &['𑣈']), + ('𑢩', &['𑣉']), + ('𑢪', &['𑣊']), + ('𑢫', &['𑣋']), + ('𑢬', &['𑣌']), + ('𑢭', &['𑣍']), + ('𑢮', &['𑣎']), + ('𑢯', &['𑣏']), + ('𑢰', &['𑣐']), + ('𑢱', &['𑣑']), + ('𑢲', &['𑣒']), + ('𑢳', &['𑣓']), + ('𑢴', &['𑣔']), + ('𑢵', &['𑣕']), + ('𑢶', &['𑣖']), + ('𑢷', &['𑣗']), + ('𑢸', &['𑣘']), + ('𑢹', &['𑣙']), + ('𑢺', &['𑣚']), + ('𑢻', &['𑣛']), + ('𑢼', &['𑣜']), + ('𑢽', &['𑣝']), + ('𑢾', &['𑣞']), + ('𑢿', &['𑣟']), + ('𑣀', &['𑢠']), + ('𑣁', &['𑢡']), + ('𑣂', &['𑢢']), + ('𑣃', &['𑢣']), + ('𑣄', &['𑢤']), + ('𑣅', &['𑢥']), + ('𑣆', &['𑢦']), + ('𑣇', &['𑢧']), + ('𑣈', &['𑢨']), + ('𑣉', &['𑢩']), + ('𑣊', &['𑢪']), + ('𑣋', &['𑢫']), + ('𑣌', &['𑢬']), + ('𑣍', &['𑢭']), + ('𑣎', &['𑢮']), + ('𑣏', &['𑢯']), + ('𑣐', &['𑢰']), + ('𑣑', &['𑢱']), + ('𑣒', &['𑢲']), + ('𑣓', &['𑢳']), + ('𑣔', &['𑢴']), + ('𑣕', &['𑢵']), + ('𑣖', &['𑢶']), + ('𑣗', &['𑢷']), + ('𑣘', &['𑢸']), + ('𑣙', &['𑢹']), + ('𑣚', &['𑢺']), + ('𑣛', &['𑢻']), + ('𑣜', &['𑢼']), + ('𑣝', &['𑢽']), + ('𑣞', &['𑢾']), + ('𑣟', &['𑢿']), + ('𖹀', &['𖹠']), + ('𖹁', &['𖹡']), + ('𖹂', &['𖹢']), + ('𖹃', &['𖹣']), + ('𖹄', &['𖹤']), + ('𖹅', &['𖹥']), + ('𖹆', &['𖹦']), + ('𖹇', &['𖹧']), + ('𖹈', &['𖹨']), + ('𖹉', &['𖹩']), + ('𖹊', &['𖹪']), + ('𖹋', &['𖹫']), + ('𖹌', &['𖹬']), + ('𖹍', &['𖹭']), + ('𖹎', &['𖹮']), + ('𖹏', &['𖹯']), + ('𖹐', &['𖹰']), + ('𖹑', &['𖹱']), + ('𖹒', &['𖹲']), + ('𖹓', &['𖹳']), + ('𖹔', &['𖹴']), + ('𖹕', &['𖹵']), + ('𖹖', &['𖹶']), + ('𖹗', &['𖹷']), + ('𖹘', &['𖹸']), + ('𖹙', &['𖹹']), + ('𖹚', &['𖹺']), + ('𖹛', &['𖹻']), + ('𖹜', &['𖹼']), + ('𖹝', &['𖹽']), + ('𖹞', &['𖹾']), + ('𖹟', &['𖹿']), + ('𖹠', &['𖹀']), + ('𖹡', &['𖹁']), + ('𖹢', &['𖹂']), + ('𖹣', &['𖹃']), + ('𖹤', &['𖹄']), + ('𖹥', &['𖹅']), + ('𖹦', &['𖹆']), + ('𖹧', &['𖹇']), + ('𖹨', &['𖹈']), + ('𖹩', &['𖹉']), + ('𖹪', &['𖹊']), + ('𖹫', &['𖹋']), + ('𖹬', &['𖹌']), + ('𖹭', &['𖹍']), + ('𖹮', &['𖹎']), + ('𖹯', &['𖹏']), + ('𖹰', &['𖹐']), + ('𖹱', &['𖹑']), + ('𖹲', &['𖹒']), + ('𖹳', &['𖹓']), + ('𖹴', &['𖹔']), + ('𖹵', &['𖹕']), + ('𖹶', &['𖹖']), + ('𖹷', &['𖹗']), + ('𖹸', &['𖹘']), + ('𖹹', &['𖹙']), + ('𖹺', &['𖹚']), + ('𖹻', &['𖹛']), + ('𖹼', &['𖹜']), + ('𖹽', &['𖹝']), + ('𖹾', &['𖹞']), + ('𖹿', &['𖹟']), + ('𞤀', &['𞤢']), + ('𞤁', &['𞤣']), + ('𞤂', &['𞤤']), + ('𞤃', &['𞤥']), + ('𞤄', &['𞤦']), + ('𞤅', &['𞤧']), + ('𞤆', &['𞤨']), + ('𞤇', &['𞤩']), + ('𞤈', &['𞤪']), + ('𞤉', &['𞤫']), + ('𞤊', &['𞤬']), + ('𞤋', &['𞤭']), + ('𞤌', &['𞤮']), + ('𞤍', &['𞤯']), + ('𞤎', &['𞤰']), + ('𞤏', &['𞤱']), + ('𞤐', &['𞤲']), + ('𞤑', &['𞤳']), + ('𞤒', &['𞤴']), + ('𞤓', &['𞤵']), + ('𞤔', &['𞤶']), + ('𞤕', &['𞤷']), + ('𞤖', &['𞤸']), + ('𞤗', &['𞤹']), + ('𞤘', &['𞤺']), + ('𞤙', &['𞤻']), + ('𞤚', &['𞤼']), + ('𞤛', &['𞤽']), + ('𞤜', &['𞤾']), + ('𞤝', &['𞤿']), + ('𞤞', &['𞥀']), + ('𞤟', &['𞥁']), + ('𞤠', &['𞥂']), + ('𞤡', &['𞥃']), + ('𞤢', &['𞤀']), + ('𞤣', &['𞤁']), + ('𞤤', &['𞤂']), + ('𞤥', &['𞤃']), + ('𞤦', &['𞤄']), + ('𞤧', &['𞤅']), + ('𞤨', &['𞤆']), + ('𞤩', &['𞤇']), + ('𞤪', &['𞤈']), + ('𞤫', &['𞤉']), + ('𞤬', &['𞤊']), + ('𞤭', &['𞤋']), + ('𞤮', &['𞤌']), + ('𞤯', &['𞤍']), + ('𞤰', &['𞤎']), + ('𞤱', &['𞤏']), + ('𞤲', &['𞤐']), + ('𞤳', &['𞤑']), + ('𞤴', &['𞤒']), + ('𞤵', &['𞤓']), + ('𞤶', &['𞤔']), + ('𞤷', &['𞤕']), + ('𞤸', &['𞤖']), + ('𞤹', &['𞤗']), + ('𞤺', &['𞤘']), + ('𞤻', &['𞤙']), + ('𞤼', &['𞤚']), + ('𞤽', &['𞤛']), + ('𞤾', &['𞤜']), + ('𞤿', &['𞤝']), + ('𞥀', &['𞤞']), + ('𞥁', &['𞤟']), + ('𞥂', &['𞤠']), + ('𞥃', &['𞤡']), +]; diff --git a/vendor/regex-syntax/src/unicode_tables/general_category.rs b/vendor/regex-syntax/src/unicode_tables/general_category.rs new file mode 100644 index 00000000000000..6ff6b5384db836 --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/general_category.rs @@ -0,0 +1,6717 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate general-category ucd-16.0.0 --chars --exclude surrogate +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("Cased_Letter", CASED_LETTER), + ("Close_Punctuation", CLOSE_PUNCTUATION), + ("Connector_Punctuation", CONNECTOR_PUNCTUATION), + ("Control", CONTROL), + ("Currency_Symbol", CURRENCY_SYMBOL), + ("Dash_Punctuation", DASH_PUNCTUATION), + ("Decimal_Number", DECIMAL_NUMBER), + ("Enclosing_Mark", ENCLOSING_MARK), + ("Final_Punctuation", FINAL_PUNCTUATION), + ("Format", FORMAT), + ("Initial_Punctuation", INITIAL_PUNCTUATION), + ("Letter", LETTER), + ("Letter_Number", LETTER_NUMBER), + ("Line_Separator", LINE_SEPARATOR), + ("Lowercase_Letter", LOWERCASE_LETTER), + ("Mark", MARK), + ("Math_Symbol", MATH_SYMBOL), + ("Modifier_Letter", MODIFIER_LETTER), + ("Modifier_Symbol", MODIFIER_SYMBOL), + ("Nonspacing_Mark", NONSPACING_MARK), + ("Number", NUMBER), + ("Open_Punctuation", OPEN_PUNCTUATION), + ("Other", OTHER), + ("Other_Letter", OTHER_LETTER), + ("Other_Number", OTHER_NUMBER), + ("Other_Punctuation", OTHER_PUNCTUATION), + ("Other_Symbol", OTHER_SYMBOL), + ("Paragraph_Separator", PARAGRAPH_SEPARATOR), + ("Private_Use", PRIVATE_USE), + ("Punctuation", PUNCTUATION), + ("Separator", SEPARATOR), + ("Space_Separator", SPACE_SEPARATOR), + ("Spacing_Mark", SPACING_MARK), + ("Symbol", SYMBOL), + ("Titlecase_Letter", TITLECASE_LETTER), + ("Unassigned", UNASSIGNED), + ("Uppercase_Letter", UPPERCASE_LETTER), +]; + +pub const CASED_LETTER: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('µ', 'µ'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ƺ'), + ('Ƽ', 'ƿ'), + ('DŽ', 'ʓ'), + ('ʕ', 'ʯ'), + ('Ͱ', 'ͳ'), + ('Ͷ', 'ͷ'), + ('ͻ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՠ', 'ֈ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჽ', 'ჿ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᴀ', 'ᴫ'), + ('ᵫ', 'ᵷ'), + ('ᵹ', 'ᶚ'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℯ', 'ℴ'), + ('ℹ', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ↄ', 'ↄ'), + ('Ⰰ', 'ⱻ'), + ('Ȿ', 'ⳤ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('Ꙁ', 'ꙭ'), + ('Ꚁ', 'ꚛ'), + ('Ꜣ', 'ꝯ'), + ('ꝱ', 'ꞇ'), + ('Ꞌ', 'ꞎ'), + ('Ꞑ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('Ꟶ', 'ꟶ'), + ('ꟺ', 'ꟺ'), + ('ꬰ', 'ꭚ'), + ('ꭠ', 'ꭨ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('A', 'Z'), + ('a', 'z'), + ('𐐀', '𐑏'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐵐', '𐵥'), + ('𐵰', '𐶅'), + ('𑢠', '𑣟'), + ('𖹀', '𖹿'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝼀', '𝼉'), + ('𝼋', '𝼞'), + ('𝼥', '𝼪'), + ('𞤀', '𞥃'), +]; + +pub const CLOSE_PUNCTUATION: &'static [(char, char)] = &[ + (')', ')'), + (']', ']'), + ('}', '}'), + ('༻', '༻'), + ('༽', '༽'), + ('᚜', '᚜'), + ('⁆', '⁆'), + ('⁾', '⁾'), + ('₎', '₎'), + ('⌉', '⌉'), + ('⌋', '⌋'), + ('〉', '〉'), + ('❩', '❩'), + ('❫', '❫'), + ('❭', '❭'), + ('❯', '❯'), + ('❱', '❱'), + ('❳', '❳'), + ('❵', '❵'), + ('⟆', '⟆'), + ('⟧', '⟧'), + ('⟩', '⟩'), + ('⟫', '⟫'), + ('⟭', '⟭'), + ('⟯', '⟯'), + ('⦄', '⦄'), + ('⦆', '⦆'), + ('⦈', '⦈'), + ('⦊', '⦊'), + ('⦌', '⦌'), + ('⦎', '⦎'), + ('⦐', '⦐'), + ('⦒', '⦒'), + ('⦔', '⦔'), + ('⦖', '⦖'), + ('⦘', '⦘'), + ('⧙', '⧙'), + ('⧛', '⧛'), + ('⧽', '⧽'), + ('⸣', '⸣'), + ('⸥', '⸥'), + ('⸧', '⸧'), + ('⸩', '⸩'), + ('⹖', '⹖'), + ('⹘', '⹘'), + ('⹚', '⹚'), + ('⹜', '⹜'), + ('〉', '〉'), + ('》', '》'), + ('」', '」'), + ('』', '』'), + ('】', '】'), + ('〕', '〕'), + ('〗', '〗'), + ('〙', '〙'), + ('〛', '〛'), + ('〞', '〟'), + ('﴾', '﴾'), + ('︘', '︘'), + ('︶', '︶'), + ('︸', '︸'), + ('︺', '︺'), + ('︼', '︼'), + ('︾', '︾'), + ('﹀', '﹀'), + ('﹂', '﹂'), + ('﹄', '﹄'), + ('﹈', '﹈'), + ('﹚', '﹚'), + ('﹜', '﹜'), + ('﹞', '﹞'), + (')', ')'), + (']', ']'), + ('}', '}'), + ('⦆', '⦆'), + ('」', '」'), +]; + +pub const CONNECTOR_PUNCTUATION: &'static [(char, char)] = &[ + ('_', '_'), + ('‿', '⁀'), + ('⁔', '⁔'), + ('︳', '︴'), + ('﹍', '﹏'), + ('_', '_'), +]; + +pub const CONTROL: &'static [(char, char)] = + &[('\0', '\u{1f}'), ('\u{7f}', '\u{9f}')]; + +pub const CURRENCY_SYMBOL: &'static [(char, char)] = &[ + ('$', '$'), + ('¢', '¥'), + ('֏', '֏'), + ('؋', '؋'), + ('߾', '߿'), + ('৲', '৳'), + ('৻', '৻'), + ('૱', '૱'), + ('௹', '௹'), + ('฿', '฿'), + ('៛', '៛'), + ('₠', '⃀'), + ('꠸', '꠸'), + ('﷼', '﷼'), + ('﹩', '﹩'), + ('$', '$'), + ('¢', '£'), + ('¥', '₩'), + ('𑿝', '𑿠'), + ('𞋿', '𞋿'), + ('𞲰', '𞲰'), +]; + +pub const DASH_PUNCTUATION: &'static [(char, char)] = &[ + ('-', '-'), + ('֊', '֊'), + ('־', '־'), + ('᐀', '᐀'), + ('᠆', '᠆'), + ('‐', '―'), + ('⸗', '⸗'), + ('⸚', '⸚'), + ('⸺', '⸻'), + ('⹀', '⹀'), + ('⹝', '⹝'), + ('〜', '〜'), + ('〰', '〰'), + ('゠', '゠'), + ('︱', '︲'), + ('﹘', '﹘'), + ('﹣', '﹣'), + ('-', '-'), + ('𐵮', '𐵮'), + ('𐺭', '𐺭'), +]; + +pub const DECIMAL_NUMBER: &'static [(char, char)] = &[ + ('0', '9'), + ('٠', '٩'), + ('۰', '۹'), + ('߀', '߉'), + ('०', '९'), + ('০', '৯'), + ('੦', '੯'), + ('૦', '૯'), + ('୦', '୯'), + ('௦', '௯'), + ('౦', '౯'), + ('೦', '೯'), + ('൦', '൯'), + ('෦', '෯'), + ('๐', '๙'), + ('໐', '໙'), + ('༠', '༩'), + ('၀', '၉'), + ('႐', '႙'), + ('០', '៩'), + ('᠐', '᠙'), + ('᥆', '᥏'), + ('᧐', '᧙'), + ('᪀', '᪉'), + ('᪐', '᪙'), + ('᭐', '᭙'), + ('᮰', '᮹'), + ('᱀', '᱉'), + ('᱐', '᱙'), + ('꘠', '꘩'), + ('꣐', '꣙'), + ('꤀', '꤉'), + ('꧐', '꧙'), + ('꧰', '꧹'), + ('꩐', '꩙'), + ('꯰', '꯹'), + ('0', '9'), + ('𐒠', '𐒩'), + ('𐴰', '𐴹'), + ('𐵀', '𐵉'), + ('𑁦', '𑁯'), + ('𑃰', '𑃹'), + ('𑄶', '𑄿'), + ('𑇐', '𑇙'), + ('𑋰', '𑋹'), + ('𑑐', '𑑙'), + ('𑓐', '𑓙'), + ('𑙐', '𑙙'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜰', '𑜹'), + ('𑣠', '𑣩'), + ('𑥐', '𑥙'), + ('𑯰', '𑯹'), + ('𑱐', '𑱙'), + ('𑵐', '𑵙'), + ('𑶠', '𑶩'), + ('𑽐', '𑽙'), + ('𖄰', '𖄹'), + ('𖩠', '𖩩'), + ('𖫀', '𖫉'), + ('𖭐', '𖭙'), + ('𖵰', '𖵹'), + ('𜳰', '𜳹'), + ('𝟎', '𝟿'), + ('𞅀', '𞅉'), + ('𞋰', '𞋹'), + ('𞓰', '𞓹'), + ('𞗱', '𞗺'), + ('𞥐', '𞥙'), + ('🯰', '🯹'), +]; + +pub const ENCLOSING_MARK: &'static [(char, char)] = &[ + ('\u{488}', '\u{489}'), + ('\u{1abe}', '\u{1abe}'), + ('\u{20dd}', '\u{20e0}'), + ('\u{20e2}', '\u{20e4}'), + ('\u{a670}', '\u{a672}'), +]; + +pub const FINAL_PUNCTUATION: &'static [(char, char)] = &[ + ('»', '»'), + ('’', '’'), + ('”', '”'), + ('›', '›'), + ('⸃', '⸃'), + ('⸅', '⸅'), + ('⸊', '⸊'), + ('⸍', '⸍'), + ('⸝', '⸝'), + ('⸡', '⸡'), +]; + +pub const FORMAT: &'static [(char, char)] = &[ + ('\u{ad}', '\u{ad}'), + ('\u{600}', '\u{605}'), + ('\u{61c}', '\u{61c}'), + ('\u{6dd}', '\u{6dd}'), + ('\u{70f}', '\u{70f}'), + ('\u{890}', '\u{891}'), + ('\u{8e2}', '\u{8e2}'), + ('\u{180e}', '\u{180e}'), + ('\u{200b}', '\u{200f}'), + ('\u{202a}', '\u{202e}'), + ('\u{2060}', '\u{2064}'), + ('\u{2066}', '\u{206f}'), + ('\u{feff}', '\u{feff}'), + ('\u{fff9}', '\u{fffb}'), + ('\u{110bd}', '\u{110bd}'), + ('\u{110cd}', '\u{110cd}'), + ('\u{13430}', '\u{1343f}'), + ('\u{1bca0}', '\u{1bca3}'), + ('\u{1d173}', '\u{1d17a}'), + ('\u{e0001}', '\u{e0001}'), + ('\u{e0020}', '\u{e007f}'), +]; + +pub const INITIAL_PUNCTUATION: &'static [(char, char)] = &[ + ('«', '«'), + ('‘', '‘'), + ('‛', '“'), + ('‟', '‟'), + ('‹', '‹'), + ('⸂', '⸂'), + ('⸄', '⸄'), + ('⸉', '⸉'), + ('⸌', '⸌'), + ('⸜', '⸜'), + ('⸠', '⸠'), +]; + +pub const LETTER: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('Ͱ', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', 'ՙ'), + ('ՠ', 'ֈ'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('ؠ', 'ي'), + ('ٮ', 'ٯ'), + ('ٱ', 'ۓ'), + ('ە', 'ە'), + ('ۥ', 'ۦ'), + ('ۮ', 'ۯ'), + ('ۺ', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', 'ܐ'), + ('ܒ', 'ܯ'), + ('ݍ', 'ޥ'), + ('ޱ', 'ޱ'), + ('ߊ', 'ߪ'), + ('ߴ', 'ߵ'), + ('ߺ', 'ߺ'), + ('ࠀ', 'ࠕ'), + ('ࠚ', 'ࠚ'), + ('ࠤ', 'ࠤ'), + ('ࠨ', 'ࠨ'), + ('ࡀ', 'ࡘ'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('ࢠ', 'ࣉ'), + ('ऄ', 'ह'), + ('ऽ', 'ऽ'), + ('ॐ', 'ॐ'), + ('क़', 'ॡ'), + ('ॱ', 'ঀ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', 'ঽ'), + ('ৎ', 'ৎ'), + ('ড়', 'ঢ়'), + ('য়', 'ৡ'), + ('ৰ', 'ৱ'), + ('ৼ', 'ৼ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('ੲ', 'ੴ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', 'ઽ'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૡ'), + ('ૹ', 'ૹ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', 'ଽ'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('ୱ', 'ୱ'), + ('ஃ', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('ௐ', 'ௐ'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ఽ'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', 'ౡ'), + ('ಀ', 'ಀ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ಽ'), + ('ೝ', 'ೞ'), + ('ೠ', 'ೡ'), + ('ೱ', 'ೲ'), + ('ഄ', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', 'ഽ'), + ('ൎ', 'ൎ'), + ('ൔ', 'ൖ'), + ('ൟ', 'ൡ'), + ('ൺ', 'ൿ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('ก', 'ะ'), + ('า', 'ำ'), + ('เ', 'ๆ'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ະ'), + ('າ', 'ຳ'), + ('ຽ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('ཀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('ྈ', 'ྌ'), + ('က', 'ဪ'), + ('ဿ', 'ဿ'), + ('ၐ', 'ၕ'), + ('ၚ', 'ၝ'), + ('ၡ', 'ၡ'), + ('ၥ', 'ၦ'), + ('ၮ', 'ၰ'), + ('ၵ', 'ႁ'), + ('ႎ', 'ႎ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛱ', 'ᛸ'), + ('ᜀ', 'ᜑ'), + ('ᜟ', 'ᜱ'), + ('ᝀ', 'ᝑ'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('ក', 'ឳ'), + ('ៗ', 'ៗ'), + ('ៜ', 'ៜ'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢄ'), + ('ᢇ', 'ᢨ'), + ('ᢪ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('ᥐ', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('ᨀ', 'ᨖ'), + ('ᨠ', 'ᩔ'), + ('ᪧ', 'ᪧ'), + ('ᬅ', 'ᬳ'), + ('ᭅ', 'ᭌ'), + ('ᮃ', 'ᮠ'), + ('ᮮ', 'ᮯ'), + ('ᮺ', 'ᯥ'), + ('ᰀ', 'ᰣ'), + ('ᱍ', 'ᱏ'), + ('ᱚ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', 'ᳶ'), + ('ᳺ', 'ᳺ'), + ('ᴀ', 'ᶿ'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℯ', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ↄ', 'ↄ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('ⸯ', 'ⸯ'), + ('々', '〆'), + ('〱', '〵'), + ('〻', '〼'), + ('ぁ', 'ゖ'), + ('ゝ', 'ゟ'), + ('ァ', 'ヺ'), + ('ー', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘟ'), + ('ꘪ', 'ꘫ'), + ('Ꙁ', 'ꙮ'), + ('ꙿ', 'ꚝ'), + ('ꚠ', 'ꛥ'), + ('ꜗ', 'ꜟ'), + ('Ꜣ', 'ꞈ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠁ'), + ('ꠃ', 'ꠅ'), + ('ꠇ', 'ꠊ'), + ('ꠌ', 'ꠢ'), + ('ꡀ', 'ꡳ'), + ('ꢂ', 'ꢳ'), + ('ꣲ', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', 'ꣾ'), + ('ꤊ', 'ꤥ'), + ('ꤰ', 'ꥆ'), + ('ꥠ', 'ꥼ'), + ('ꦄ', 'ꦲ'), + ('ꧏ', 'ꧏ'), + ('ꧠ', 'ꧤ'), + ('ꧦ', 'ꧯ'), + ('ꧺ', 'ꧾ'), + ('ꨀ', 'ꨨ'), + ('ꩀ', 'ꩂ'), + ('ꩄ', 'ꩋ'), + ('ꩠ', 'ꩶ'), + ('ꩺ', 'ꩺ'), + ('ꩾ', 'ꪯ'), + ('ꪱ', 'ꪱ'), + ('ꪵ', 'ꪶ'), + ('ꪹ', 'ꪽ'), + ('ꫀ', 'ꫀ'), + ('ꫂ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫪ'), + ('ꫲ', 'ꫴ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꯢ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'יִ'), + ('ײַ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('A', 'Z'), + ('a', 'z'), + ('ヲ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐌀', '𐌟'), + ('𐌭', '𐍀'), + ('𐍂', '𐍉'), + ('𐍐', '𐍵'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐐀', '𐒝'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '𐨀'), + ('𐨐', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '𐫤'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '𐴣'), + ('𐵊', '𐵥'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('𐼀', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '𐽅'), + ('𐽰', '𐾁'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀃', '𑀷'), + ('𑁱', '𑁲'), + ('𑁵', '𑁵'), + ('𑂃', '𑂯'), + ('𑃐', '𑃨'), + ('𑄃', '𑄦'), + ('𑅄', '𑅄'), + ('𑅇', '𑅇'), + ('𑅐', '𑅲'), + ('𑅶', '𑅶'), + ('𑆃', '𑆲'), + ('𑇁', '𑇄'), + ('𑇚', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '𑈫'), + ('𑈿', '𑉀'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '𑋞'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑌽'), + ('𑍐', '𑍐'), + ('𑍝', '𑍡'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '𑎷'), + ('𑏑', '𑏑'), + ('𑏓', '𑏓'), + ('𑐀', '𑐴'), + ('𑑇', '𑑊'), + ('𑑟', '𑑡'), + ('𑒀', '𑒯'), + ('𑓄', '𑓅'), + ('𑓇', '𑓇'), + ('𑖀', '𑖮'), + ('𑗘', '𑗛'), + ('𑘀', '𑘯'), + ('𑙄', '𑙄'), + ('𑚀', '𑚪'), + ('𑚸', '𑚸'), + ('𑜀', '𑜚'), + ('𑝀', '𑝆'), + ('𑠀', '𑠫'), + ('𑢠', '𑣟'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤯'), + ('𑤿', '𑤿'), + ('𑥁', '𑥁'), + ('𑦠', '𑦧'), + ('𑦪', '𑧐'), + ('𑧡', '𑧡'), + ('𑧣', '𑧣'), + ('𑨀', '𑨀'), + ('𑨋', '𑨲'), + ('𑨺', '𑨺'), + ('𑩐', '𑩐'), + ('𑩜', '𑪉'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑰀', '𑰈'), + ('𑰊', '𑰮'), + ('𑱀', '𑱀'), + ('𑱲', '𑲏'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '𑴰'), + ('𑵆', '𑵆'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶉'), + ('𑶘', '𑶘'), + ('𑻠', '𑻲'), + ('𑼂', '𑼂'), + ('𑼄', '𑼐'), + ('𑼒', '𑼳'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄝'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩰', '𖪾'), + ('𖫐', '𖫭'), + ('𖬀', '𖬯'), + ('𖭀', '𖭃'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('𖽐', '𖽐'), + ('𖾓', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞄀', '𞄬'), + ('𞄷', '𞄽'), + ('𞅎', '𞅎'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞓐', '𞓫'), + ('𞗐', '𞗭'), + ('𞗰', '𞗰'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞤀', '𞥃'), + ('𞥋', '𞥋'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const LETTER_NUMBER: &'static [(char, char)] = &[ + ('ᛮ', 'ᛰ'), + ('Ⅰ', 'ↂ'), + ('ↅ', 'ↈ'), + ('〇', '〇'), + ('〡', '〩'), + ('〸', '〺'), + ('ꛦ', 'ꛯ'), + ('𐅀', '𐅴'), + ('𐍁', '𐍁'), + ('𐍊', '𐍊'), + ('𐏑', '𐏕'), + ('𒐀', '𒑮'), +]; + +pub const LINE_SEPARATOR: &'static [(char, char)] = + &[('\u{2028}', '\u{2028}')]; + +pub const LOWERCASE_LETTER: &'static [(char, char)] = &[ + ('a', 'z'), + ('µ', 'µ'), + ('ß', 'ö'), + ('ø', 'ÿ'), + ('ā', 'ā'), + ('ă', 'ă'), + ('ą', 'ą'), + ('ć', 'ć'), + ('ĉ', 'ĉ'), + ('ċ', 'ċ'), + ('č', 'č'), + ('ď', 'ď'), + ('đ', 'đ'), + ('ē', 'ē'), + ('ĕ', 'ĕ'), + ('ė', 'ė'), + ('ę', 'ę'), + ('ě', 'ě'), + ('ĝ', 'ĝ'), + ('ğ', 'ğ'), + ('ġ', 'ġ'), + ('ģ', 'ģ'), + ('ĥ', 'ĥ'), + ('ħ', 'ħ'), + ('ĩ', 'ĩ'), + ('ī', 'ī'), + ('ĭ', 'ĭ'), + ('į', 'į'), + ('ı', 'ı'), + ('ij', 'ij'), + ('ĵ', 'ĵ'), + ('ķ', 'ĸ'), + ('ĺ', 'ĺ'), + ('ļ', 'ļ'), + ('ľ', 'ľ'), + ('ŀ', 'ŀ'), + ('ł', 'ł'), + ('ń', 'ń'), + ('ņ', 'ņ'), + ('ň', 'ʼn'), + ('ŋ', 'ŋ'), + ('ō', 'ō'), + ('ŏ', 'ŏ'), + ('ő', 'ő'), + ('œ', 'œ'), + ('ŕ', 'ŕ'), + ('ŗ', 'ŗ'), + ('ř', 'ř'), + ('ś', 'ś'), + ('ŝ', 'ŝ'), + ('ş', 'ş'), + ('š', 'š'), + ('ţ', 'ţ'), + ('ť', 'ť'), + ('ŧ', 'ŧ'), + ('ũ', 'ũ'), + ('ū', 'ū'), + ('ŭ', 'ŭ'), + ('ů', 'ů'), + ('ű', 'ű'), + ('ų', 'ų'), + ('ŵ', 'ŵ'), + ('ŷ', 'ŷ'), + ('ź', 'ź'), + ('ż', 'ż'), + ('ž', 'ƀ'), + ('ƃ', 'ƃ'), + ('ƅ', 'ƅ'), + ('ƈ', 'ƈ'), + ('ƌ', 'ƍ'), + ('ƒ', 'ƒ'), + ('ƕ', 'ƕ'), + ('ƙ', 'ƛ'), + ('ƞ', 'ƞ'), + ('ơ', 'ơ'), + ('ƣ', 'ƣ'), + ('ƥ', 'ƥ'), + ('ƨ', 'ƨ'), + ('ƪ', 'ƫ'), + ('ƭ', 'ƭ'), + ('ư', 'ư'), + ('ƴ', 'ƴ'), + ('ƶ', 'ƶ'), + ('ƹ', 'ƺ'), + ('ƽ', 'ƿ'), + ('dž', 'dž'), + ('lj', 'lj'), + ('nj', 'nj'), + ('ǎ', 'ǎ'), + ('ǐ', 'ǐ'), + ('ǒ', 'ǒ'), + ('ǔ', 'ǔ'), + ('ǖ', 'ǖ'), + ('ǘ', 'ǘ'), + ('ǚ', 'ǚ'), + ('ǜ', 'ǝ'), + ('ǟ', 'ǟ'), + ('ǡ', 'ǡ'), + ('ǣ', 'ǣ'), + ('ǥ', 'ǥ'), + ('ǧ', 'ǧ'), + ('ǩ', 'ǩ'), + ('ǫ', 'ǫ'), + ('ǭ', 'ǭ'), + ('ǯ', 'ǰ'), + ('dz', 'dz'), + ('ǵ', 'ǵ'), + ('ǹ', 'ǹ'), + ('ǻ', 'ǻ'), + ('ǽ', 'ǽ'), + ('ǿ', 'ǿ'), + ('ȁ', 'ȁ'), + ('ȃ', 'ȃ'), + ('ȅ', 'ȅ'), + ('ȇ', 'ȇ'), + ('ȉ', 'ȉ'), + ('ȋ', 'ȋ'), + ('ȍ', 'ȍ'), + ('ȏ', 'ȏ'), + ('ȑ', 'ȑ'), + ('ȓ', 'ȓ'), + ('ȕ', 'ȕ'), + ('ȗ', 'ȗ'), + ('ș', 'ș'), + ('ț', 'ț'), + ('ȝ', 'ȝ'), + ('ȟ', 'ȟ'), + ('ȡ', 'ȡ'), + ('ȣ', 'ȣ'), + ('ȥ', 'ȥ'), + ('ȧ', 'ȧ'), + ('ȩ', 'ȩ'), + ('ȫ', 'ȫ'), + ('ȭ', 'ȭ'), + ('ȯ', 'ȯ'), + ('ȱ', 'ȱ'), + ('ȳ', 'ȹ'), + ('ȼ', 'ȼ'), + ('ȿ', 'ɀ'), + ('ɂ', 'ɂ'), + ('ɇ', 'ɇ'), + ('ɉ', 'ɉ'), + ('ɋ', 'ɋ'), + ('ɍ', 'ɍ'), + ('ɏ', 'ʓ'), + ('ʕ', 'ʯ'), + ('ͱ', 'ͱ'), + ('ͳ', 'ͳ'), + ('ͷ', 'ͷ'), + ('ͻ', 'ͽ'), + ('ΐ', 'ΐ'), + ('ά', 'ώ'), + ('ϐ', 'ϑ'), + ('ϕ', 'ϗ'), + ('ϙ', 'ϙ'), + ('ϛ', 'ϛ'), + ('ϝ', 'ϝ'), + ('ϟ', 'ϟ'), + ('ϡ', 'ϡ'), + ('ϣ', 'ϣ'), + ('ϥ', 'ϥ'), + ('ϧ', 'ϧ'), + ('ϩ', 'ϩ'), + ('ϫ', 'ϫ'), + ('ϭ', 'ϭ'), + ('ϯ', 'ϳ'), + ('ϵ', 'ϵ'), + ('ϸ', 'ϸ'), + ('ϻ', 'ϼ'), + ('а', 'џ'), + ('ѡ', 'ѡ'), + ('ѣ', 'ѣ'), + ('ѥ', 'ѥ'), + ('ѧ', 'ѧ'), + ('ѩ', 'ѩ'), + ('ѫ', 'ѫ'), + ('ѭ', 'ѭ'), + ('ѯ', 'ѯ'), + ('ѱ', 'ѱ'), + ('ѳ', 'ѳ'), + ('ѵ', 'ѵ'), + ('ѷ', 'ѷ'), + ('ѹ', 'ѹ'), + ('ѻ', 'ѻ'), + ('ѽ', 'ѽ'), + ('ѿ', 'ѿ'), + ('ҁ', 'ҁ'), + ('ҋ', 'ҋ'), + ('ҍ', 'ҍ'), + ('ҏ', 'ҏ'), + ('ґ', 'ґ'), + ('ғ', 'ғ'), + ('ҕ', 'ҕ'), + ('җ', 'җ'), + ('ҙ', 'ҙ'), + ('қ', 'қ'), + ('ҝ', 'ҝ'), + ('ҟ', 'ҟ'), + ('ҡ', 'ҡ'), + ('ң', 'ң'), + ('ҥ', 'ҥ'), + ('ҧ', 'ҧ'), + ('ҩ', 'ҩ'), + ('ҫ', 'ҫ'), + ('ҭ', 'ҭ'), + ('ү', 'ү'), + ('ұ', 'ұ'), + ('ҳ', 'ҳ'), + ('ҵ', 'ҵ'), + ('ҷ', 'ҷ'), + ('ҹ', 'ҹ'), + ('һ', 'һ'), + ('ҽ', 'ҽ'), + ('ҿ', 'ҿ'), + ('ӂ', 'ӂ'), + ('ӄ', 'ӄ'), + ('ӆ', 'ӆ'), + ('ӈ', 'ӈ'), + ('ӊ', 'ӊ'), + ('ӌ', 'ӌ'), + ('ӎ', 'ӏ'), + ('ӑ', 'ӑ'), + ('ӓ', 'ӓ'), + ('ӕ', 'ӕ'), + ('ӗ', 'ӗ'), + ('ә', 'ә'), + ('ӛ', 'ӛ'), + ('ӝ', 'ӝ'), + ('ӟ', 'ӟ'), + ('ӡ', 'ӡ'), + ('ӣ', 'ӣ'), + ('ӥ', 'ӥ'), + ('ӧ', 'ӧ'), + ('ө', 'ө'), + ('ӫ', 'ӫ'), + ('ӭ', 'ӭ'), + ('ӯ', 'ӯ'), + ('ӱ', 'ӱ'), + ('ӳ', 'ӳ'), + ('ӵ', 'ӵ'), + ('ӷ', 'ӷ'), + ('ӹ', 'ӹ'), + ('ӻ', 'ӻ'), + ('ӽ', 'ӽ'), + ('ӿ', 'ӿ'), + ('ԁ', 'ԁ'), + ('ԃ', 'ԃ'), + ('ԅ', 'ԅ'), + ('ԇ', 'ԇ'), + ('ԉ', 'ԉ'), + ('ԋ', 'ԋ'), + ('ԍ', 'ԍ'), + ('ԏ', 'ԏ'), + ('ԑ', 'ԑ'), + ('ԓ', 'ԓ'), + ('ԕ', 'ԕ'), + ('ԗ', 'ԗ'), + ('ԙ', 'ԙ'), + ('ԛ', 'ԛ'), + ('ԝ', 'ԝ'), + ('ԟ', 'ԟ'), + ('ԡ', 'ԡ'), + ('ԣ', 'ԣ'), + ('ԥ', 'ԥ'), + ('ԧ', 'ԧ'), + ('ԩ', 'ԩ'), + ('ԫ', 'ԫ'), + ('ԭ', 'ԭ'), + ('ԯ', 'ԯ'), + ('ՠ', 'ֈ'), + ('ა', 'ჺ'), + ('ჽ', 'ჿ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲈ'), + ('ᲊ', 'ᲊ'), + ('ᴀ', 'ᴫ'), + ('ᵫ', 'ᵷ'), + ('ᵹ', 'ᶚ'), + ('ḁ', 'ḁ'), + ('ḃ', 'ḃ'), + ('ḅ', 'ḅ'), + ('ḇ', 'ḇ'), + ('ḉ', 'ḉ'), + ('ḋ', 'ḋ'), + ('ḍ', 'ḍ'), + ('ḏ', 'ḏ'), + ('ḑ', 'ḑ'), + ('ḓ', 'ḓ'), + ('ḕ', 'ḕ'), + ('ḗ', 'ḗ'), + ('ḙ', 'ḙ'), + ('ḛ', 'ḛ'), + ('ḝ', 'ḝ'), + ('ḟ', 'ḟ'), + ('ḡ', 'ḡ'), + ('ḣ', 'ḣ'), + ('ḥ', 'ḥ'), + ('ḧ', 'ḧ'), + ('ḩ', 'ḩ'), + ('ḫ', 'ḫ'), + ('ḭ', 'ḭ'), + ('ḯ', 'ḯ'), + ('ḱ', 'ḱ'), + ('ḳ', 'ḳ'), + ('ḵ', 'ḵ'), + ('ḷ', 'ḷ'), + ('ḹ', 'ḹ'), + ('ḻ', 'ḻ'), + ('ḽ', 'ḽ'), + ('ḿ', 'ḿ'), + ('ṁ', 'ṁ'), + ('ṃ', 'ṃ'), + ('ṅ', 'ṅ'), + ('ṇ', 'ṇ'), + ('ṉ', 'ṉ'), + ('ṋ', 'ṋ'), + ('ṍ', 'ṍ'), + ('ṏ', 'ṏ'), + ('ṑ', 'ṑ'), + ('ṓ', 'ṓ'), + ('ṕ', 'ṕ'), + ('ṗ', 'ṗ'), + ('ṙ', 'ṙ'), + ('ṛ', 'ṛ'), + ('ṝ', 'ṝ'), + ('ṟ', 'ṟ'), + ('ṡ', 'ṡ'), + ('ṣ', 'ṣ'), + ('ṥ', 'ṥ'), + ('ṧ', 'ṧ'), + ('ṩ', 'ṩ'), + ('ṫ', 'ṫ'), + ('ṭ', 'ṭ'), + ('ṯ', 'ṯ'), + ('ṱ', 'ṱ'), + ('ṳ', 'ṳ'), + ('ṵ', 'ṵ'), + ('ṷ', 'ṷ'), + ('ṹ', 'ṹ'), + ('ṻ', 'ṻ'), + ('ṽ', 'ṽ'), + ('ṿ', 'ṿ'), + ('ẁ', 'ẁ'), + ('ẃ', 'ẃ'), + ('ẅ', 'ẅ'), + ('ẇ', 'ẇ'), + ('ẉ', 'ẉ'), + ('ẋ', 'ẋ'), + ('ẍ', 'ẍ'), + ('ẏ', 'ẏ'), + ('ẑ', 'ẑ'), + ('ẓ', 'ẓ'), + ('ẕ', 'ẝ'), + ('ẟ', 'ẟ'), + ('ạ', 'ạ'), + ('ả', 'ả'), + ('ấ', 'ấ'), + ('ầ', 'ầ'), + ('ẩ', 'ẩ'), + ('ẫ', 'ẫ'), + ('ậ', 'ậ'), + ('ắ', 'ắ'), + ('ằ', 'ằ'), + ('ẳ', 'ẳ'), + ('ẵ', 'ẵ'), + ('ặ', 'ặ'), + ('ẹ', 'ẹ'), + ('ẻ', 'ẻ'), + ('ẽ', 'ẽ'), + ('ế', 'ế'), + ('ề', 'ề'), + ('ể', 'ể'), + ('ễ', 'ễ'), + ('ệ', 'ệ'), + ('ỉ', 'ỉ'), + ('ị', 'ị'), + ('ọ', 'ọ'), + ('ỏ', 'ỏ'), + ('ố', 'ố'), + ('ồ', 'ồ'), + ('ổ', 'ổ'), + ('ỗ', 'ỗ'), + ('ộ', 'ộ'), + ('ớ', 'ớ'), + ('ờ', 'ờ'), + ('ở', 'ở'), + ('ỡ', 'ỡ'), + ('ợ', 'ợ'), + ('ụ', 'ụ'), + ('ủ', 'ủ'), + ('ứ', 'ứ'), + ('ừ', 'ừ'), + ('ử', 'ử'), + ('ữ', 'ữ'), + ('ự', 'ự'), + ('ỳ', 'ỳ'), + ('ỵ', 'ỵ'), + ('ỷ', 'ỷ'), + ('ỹ', 'ỹ'), + ('ỻ', 'ỻ'), + ('ỽ', 'ỽ'), + ('ỿ', 'ἇ'), + ('ἐ', 'ἕ'), + ('ἠ', 'ἧ'), + ('ἰ', 'ἷ'), + ('ὀ', 'ὅ'), + ('ὐ', 'ὗ'), + ('ὠ', 'ὧ'), + ('ὰ', 'ώ'), + ('ᾀ', 'ᾇ'), + ('ᾐ', 'ᾗ'), + ('ᾠ', 'ᾧ'), + ('ᾰ', 'ᾴ'), + ('ᾶ', 'ᾷ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῇ'), + ('ῐ', 'ΐ'), + ('ῖ', 'ῗ'), + ('ῠ', 'ῧ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῷ'), + ('ℊ', 'ℊ'), + ('ℎ', 'ℏ'), + ('ℓ', 'ℓ'), + ('ℯ', 'ℯ'), + ('ℴ', 'ℴ'), + ('ℹ', 'ℹ'), + ('ℼ', 'ℽ'), + ('ⅆ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('ↄ', 'ↄ'), + ('ⰰ', 'ⱟ'), + ('ⱡ', 'ⱡ'), + ('ⱥ', 'ⱦ'), + ('ⱨ', 'ⱨ'), + ('ⱪ', 'ⱪ'), + ('ⱬ', 'ⱬ'), + ('ⱱ', 'ⱱ'), + ('ⱳ', 'ⱴ'), + ('ⱶ', 'ⱻ'), + ('ⲁ', 'ⲁ'), + ('ⲃ', 'ⲃ'), + ('ⲅ', 'ⲅ'), + ('ⲇ', 'ⲇ'), + ('ⲉ', 'ⲉ'), + ('ⲋ', 'ⲋ'), + ('ⲍ', 'ⲍ'), + ('ⲏ', 'ⲏ'), + ('ⲑ', 'ⲑ'), + ('ⲓ', 'ⲓ'), + ('ⲕ', 'ⲕ'), + ('ⲗ', 'ⲗ'), + ('ⲙ', 'ⲙ'), + ('ⲛ', 'ⲛ'), + ('ⲝ', 'ⲝ'), + ('ⲟ', 'ⲟ'), + ('ⲡ', 'ⲡ'), + ('ⲣ', 'ⲣ'), + ('ⲥ', 'ⲥ'), + ('ⲧ', 'ⲧ'), + ('ⲩ', 'ⲩ'), + ('ⲫ', 'ⲫ'), + ('ⲭ', 'ⲭ'), + ('ⲯ', 'ⲯ'), + ('ⲱ', 'ⲱ'), + ('ⲳ', 'ⲳ'), + ('ⲵ', 'ⲵ'), + ('ⲷ', 'ⲷ'), + ('ⲹ', 'ⲹ'), + ('ⲻ', 'ⲻ'), + ('ⲽ', 'ⲽ'), + ('ⲿ', 'ⲿ'), + ('ⳁ', 'ⳁ'), + ('ⳃ', 'ⳃ'), + ('ⳅ', 'ⳅ'), + ('ⳇ', 'ⳇ'), + ('ⳉ', 'ⳉ'), + ('ⳋ', 'ⳋ'), + ('ⳍ', 'ⳍ'), + ('ⳏ', 'ⳏ'), + ('ⳑ', 'ⳑ'), + ('ⳓ', 'ⳓ'), + ('ⳕ', 'ⳕ'), + ('ⳗ', 'ⳗ'), + ('ⳙ', 'ⳙ'), + ('ⳛ', 'ⳛ'), + ('ⳝ', 'ⳝ'), + ('ⳟ', 'ⳟ'), + ('ⳡ', 'ⳡ'), + ('ⳣ', 'ⳤ'), + ('ⳬ', 'ⳬ'), + ('ⳮ', 'ⳮ'), + ('ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ꙁ', 'ꙁ'), + ('ꙃ', 'ꙃ'), + ('ꙅ', 'ꙅ'), + ('ꙇ', 'ꙇ'), + ('ꙉ', 'ꙉ'), + ('ꙋ', 'ꙋ'), + ('ꙍ', 'ꙍ'), + ('ꙏ', 'ꙏ'), + ('ꙑ', 'ꙑ'), + ('ꙓ', 'ꙓ'), + ('ꙕ', 'ꙕ'), + ('ꙗ', 'ꙗ'), + ('ꙙ', 'ꙙ'), + ('ꙛ', 'ꙛ'), + ('ꙝ', 'ꙝ'), + ('ꙟ', 'ꙟ'), + ('ꙡ', 'ꙡ'), + ('ꙣ', 'ꙣ'), + ('ꙥ', 'ꙥ'), + ('ꙧ', 'ꙧ'), + ('ꙩ', 'ꙩ'), + ('ꙫ', 'ꙫ'), + ('ꙭ', 'ꙭ'), + ('ꚁ', 'ꚁ'), + ('ꚃ', 'ꚃ'), + ('ꚅ', 'ꚅ'), + ('ꚇ', 'ꚇ'), + ('ꚉ', 'ꚉ'), + ('ꚋ', 'ꚋ'), + ('ꚍ', 'ꚍ'), + ('ꚏ', 'ꚏ'), + ('ꚑ', 'ꚑ'), + ('ꚓ', 'ꚓ'), + ('ꚕ', 'ꚕ'), + ('ꚗ', 'ꚗ'), + ('ꚙ', 'ꚙ'), + ('ꚛ', 'ꚛ'), + ('ꜣ', 'ꜣ'), + ('ꜥ', 'ꜥ'), + ('ꜧ', 'ꜧ'), + ('ꜩ', 'ꜩ'), + ('ꜫ', 'ꜫ'), + ('ꜭ', 'ꜭ'), + ('ꜯ', 'ꜱ'), + ('ꜳ', 'ꜳ'), + ('ꜵ', 'ꜵ'), + ('ꜷ', 'ꜷ'), + ('ꜹ', 'ꜹ'), + ('ꜻ', 'ꜻ'), + ('ꜽ', 'ꜽ'), + ('ꜿ', 'ꜿ'), + ('ꝁ', 'ꝁ'), + ('ꝃ', 'ꝃ'), + ('ꝅ', 'ꝅ'), + ('ꝇ', 'ꝇ'), + ('ꝉ', 'ꝉ'), + ('ꝋ', 'ꝋ'), + ('ꝍ', 'ꝍ'), + ('ꝏ', 'ꝏ'), + ('ꝑ', 'ꝑ'), + ('ꝓ', 'ꝓ'), + ('ꝕ', 'ꝕ'), + ('ꝗ', 'ꝗ'), + ('ꝙ', 'ꝙ'), + ('ꝛ', 'ꝛ'), + ('ꝝ', 'ꝝ'), + ('ꝟ', 'ꝟ'), + ('ꝡ', 'ꝡ'), + ('ꝣ', 'ꝣ'), + ('ꝥ', 'ꝥ'), + ('ꝧ', 'ꝧ'), + ('ꝩ', 'ꝩ'), + ('ꝫ', 'ꝫ'), + ('ꝭ', 'ꝭ'), + ('ꝯ', 'ꝯ'), + ('ꝱ', 'ꝸ'), + ('ꝺ', 'ꝺ'), + ('ꝼ', 'ꝼ'), + ('ꝿ', 'ꝿ'), + ('ꞁ', 'ꞁ'), + ('ꞃ', 'ꞃ'), + ('ꞅ', 'ꞅ'), + ('ꞇ', 'ꞇ'), + ('ꞌ', 'ꞌ'), + ('ꞎ', 'ꞎ'), + ('ꞑ', 'ꞑ'), + ('ꞓ', 'ꞕ'), + ('ꞗ', 'ꞗ'), + ('ꞙ', 'ꞙ'), + ('ꞛ', 'ꞛ'), + ('ꞝ', 'ꞝ'), + ('ꞟ', 'ꞟ'), + ('ꞡ', 'ꞡ'), + ('ꞣ', 'ꞣ'), + ('ꞥ', 'ꞥ'), + ('ꞧ', 'ꞧ'), + ('ꞩ', 'ꞩ'), + ('ꞯ', 'ꞯ'), + ('ꞵ', 'ꞵ'), + ('ꞷ', 'ꞷ'), + ('ꞹ', 'ꞹ'), + ('ꞻ', 'ꞻ'), + ('ꞽ', 'ꞽ'), + ('ꞿ', 'ꞿ'), + ('ꟁ', 'ꟁ'), + ('ꟃ', 'ꟃ'), + ('ꟈ', 'ꟈ'), + ('ꟊ', 'ꟊ'), + ('ꟍ', 'ꟍ'), + ('ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'ꟕ'), + ('ꟗ', 'ꟗ'), + ('ꟙ', 'ꟙ'), + ('ꟛ', 'ꟛ'), + ('ꟶ', 'ꟶ'), + ('ꟺ', 'ꟺ'), + ('ꬰ', 'ꭚ'), + ('ꭠ', 'ꭨ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('a', 'z'), + ('𐐨', '𐑏'), + ('𐓘', '𐓻'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐳀', '𐳲'), + ('𐵰', '𐶅'), + ('𑣀', '𑣟'), + ('𖹠', '𖹿'), + ('𝐚', '𝐳'), + ('𝑎', '𝑔'), + ('𝑖', '𝑧'), + ('𝒂', '𝒛'), + ('𝒶', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝓏'), + ('𝓪', '𝔃'), + ('𝔞', '𝔷'), + ('𝕒', '𝕫'), + ('𝖆', '𝖟'), + ('𝖺', '𝗓'), + ('𝗮', '𝘇'), + ('𝘢', '𝘻'), + ('𝙖', '𝙯'), + ('𝚊', '𝚥'), + ('𝛂', '𝛚'), + ('𝛜', '𝛡'), + ('𝛼', '𝜔'), + ('𝜖', '𝜛'), + ('𝜶', '𝝎'), + ('𝝐', '𝝕'), + ('𝝰', '𝞈'), + ('𝞊', '𝞏'), + ('𝞪', '𝟂'), + ('𝟄', '𝟉'), + ('𝟋', '𝟋'), + ('𝼀', '𝼉'), + ('𝼋', '𝼞'), + ('𝼥', '𝼪'), + ('𞤢', '𞥃'), +]; + +pub const MARK: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{483}', '\u{489}'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6df}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', '\u{7f3}'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{819}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('\u{897}', '\u{89f}'), + ('\u{8ca}', '\u{8e1}'), + ('\u{8e3}', 'ः'), + ('\u{93a}', '\u{93c}'), + ('ा', 'ॏ'), + ('\u{951}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('\u{981}', 'ঃ'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9be}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', '\u{9cd}'), + ('\u{9d7}', '\u{9d7}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', 'ਃ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('\u{abc}', '\u{abc}'), + ('ા', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{aff}'), + ('\u{b01}', 'ଃ'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3e}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', '\u{c04}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', 'ಃ'), + ('\u{cbc}', '\u{cbc}'), + ('ಾ', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{ce2}', '\u{ce3}'), + ('ೳ', 'ೳ'), + ('\u{d00}', 'ഃ'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d3e}', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', '\u{d4d}'), + ('\u{d57}', '\u{d57}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', 'ඃ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('ෲ', 'ෳ'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e47}', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('༾', '༿'), + ('\u{f71}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('ါ', '\u{103e}'), + ('ၖ', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('ၢ', 'ၤ'), + ('ၧ', 'ၭ'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{108d}'), + ('ႏ', 'ႏ'), + ('ႚ', '\u{109d}'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1715}'), + ('\u{1732}', '\u{1734}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('\u{1a17}', '\u{1a1b}'), + ('ᩕ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', 'ᬄ'), + ('\u{1b34}', '\u{1b44}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', 'ᮂ'), + ('ᮡ', '\u{1bad}'), + ('\u{1be6}', '\u{1bf3}'), + ('ᰤ', '\u{1c37}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('᳷', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{20d0}', '\u{20f0}'), + ('\u{2cef}', '\u{2cf1}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('\u{302a}', '\u{302f}'), + ('\u{3099}', '\u{309a}'), + ('\u{a66f}', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('ꠣ', 'ꠧ'), + ('\u{a82c}', '\u{a82c}'), + ('ꢀ', 'ꢁ'), + ('ꢴ', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a953}'), + ('\u{a980}', 'ꦃ'), + ('\u{a9b3}', '\u{a9c0}'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', 'ꩍ'), + ('ꩻ', 'ꩽ'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('ꫫ', 'ꫯ'), + ('ꫵ', '\u{aaf6}'), + ('ꯣ', 'ꯪ'), + ('꯬', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('𑀀', '𑀂'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '𑂂'), + ('𑂰', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{11134}'), + ('𑅅', '𑅆'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '𑆂'), + ('𑆳', '\u{111c0}'), + ('\u{111c9}', '\u{111cc}'), + ('𑇎', '\u{111cf}'), + ('𑈬', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112ea}'), + ('\u{11300}', '𑌃'), + ('\u{1133b}', '\u{1133c}'), + ('\u{1133e}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('𑍢', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113b8}', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('𑐵', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b0}', '\u{114c3}'), + ('\u{115af}', '\u{115b5}'), + ('𑖸', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('𑘰', '\u{11640}'), + ('\u{116ab}', '\u{116b7}'), + ('\u{1171d}', '\u{1172b}'), + ('𑠬', '\u{1183a}'), + ('\u{11930}', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{1193e}'), + ('𑥀', '𑥀'), + ('𑥂', '\u{11943}'), + ('𑧑', '\u{119d7}'), + ('\u{119da}', '\u{119e0}'), + ('𑧤', '𑧤'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '𑨹'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a99}'), + ('𑰯', '\u{11c36}'), + ('\u{11c38}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('𑶊', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '\u{11d97}'), + ('\u{11ef3}', '𑻶'), + ('\u{11f00}', '\u{11f01}'), + ('𑼃', '𑼃'), + ('𑼴', '\u{11f3a}'), + ('𑼾', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13440}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('\u{16f4f}', '\u{16f4f}'), + ('𖽑', '𖾇'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e4ec}', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e94a}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const MATH_SYMBOL: &'static [(char, char)] = &[ + ('+', '+'), + ('<', '>'), + ('|', '|'), + ('~', '~'), + ('¬', '¬'), + ('±', '±'), + ('×', '×'), + ('÷', '÷'), + ('϶', '϶'), + ('؆', '؈'), + ('⁄', '⁄'), + ('⁒', '⁒'), + ('⁺', '⁼'), + ('₊', '₌'), + ('℘', '℘'), + ('⅀', '⅄'), + ('⅋', '⅋'), + ('←', '↔'), + ('↚', '↛'), + ('↠', '↠'), + ('↣', '↣'), + ('↦', '↦'), + ('↮', '↮'), + ('⇎', '⇏'), + ('⇒', '⇒'), + ('⇔', '⇔'), + ('⇴', '⋿'), + ('⌠', '⌡'), + ('⍼', '⍼'), + ('⎛', '⎳'), + ('⏜', '⏡'), + ('▷', '▷'), + ('◁', '◁'), + ('◸', '◿'), + ('♯', '♯'), + ('⟀', '⟄'), + ('⟇', '⟥'), + ('⟰', '⟿'), + ('⤀', '⦂'), + ('⦙', '⧗'), + ('⧜', '⧻'), + ('⧾', '⫿'), + ('⬰', '⭄'), + ('⭇', '⭌'), + ('﬩', '﬩'), + ('﹢', '﹢'), + ('﹤', '﹦'), + ('+', '+'), + ('<', '>'), + ('|', '|'), + ('~', '~'), + ('¬', '¬'), + ('←', '↓'), + ('𐶎', '𐶏'), + ('𝛁', '𝛁'), + ('𝛛', '𝛛'), + ('𝛻', '𝛻'), + ('𝜕', '𝜕'), + ('𝜵', '𝜵'), + ('𝝏', '𝝏'), + ('𝝯', '𝝯'), + ('𝞉', '𝞉'), + ('𝞩', '𝞩'), + ('𝟃', '𝟃'), + ('𞻰', '𞻱'), +]; + +pub const MODIFIER_LETTER: &'static [(char, char)] = &[ + ('ʰ', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('ʹ', 'ʹ'), + ('ͺ', 'ͺ'), + ('ՙ', 'ՙ'), + ('ـ', 'ـ'), + ('ۥ', 'ۦ'), + ('ߴ', 'ߵ'), + ('ߺ', 'ߺ'), + ('ࠚ', 'ࠚ'), + ('ࠤ', 'ࠤ'), + ('ࠨ', 'ࠨ'), + ('ࣉ', 'ࣉ'), + ('ॱ', 'ॱ'), + ('ๆ', 'ๆ'), + ('ໆ', 'ໆ'), + ('ჼ', 'ჼ'), + ('ៗ', 'ៗ'), + ('ᡃ', 'ᡃ'), + ('ᪧ', 'ᪧ'), + ('ᱸ', 'ᱽ'), + ('ᴬ', 'ᵪ'), + ('ᵸ', 'ᵸ'), + ('ᶛ', 'ᶿ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ⱼ', 'ⱽ'), + ('ⵯ', 'ⵯ'), + ('ⸯ', 'ⸯ'), + ('々', '々'), + ('〱', '〵'), + ('〻', '〻'), + ('ゝ', 'ゞ'), + ('ー', 'ヾ'), + ('ꀕ', 'ꀕ'), + ('ꓸ', 'ꓽ'), + ('ꘌ', 'ꘌ'), + ('ꙿ', 'ꙿ'), + ('ꚜ', 'ꚝ'), + ('ꜗ', 'ꜟ'), + ('ꝰ', 'ꝰ'), + ('ꞈ', 'ꞈ'), + ('ꟲ', 'ꟴ'), + ('ꟸ', 'ꟹ'), + ('ꧏ', 'ꧏ'), + ('ꧦ', 'ꧦ'), + ('ꩰ', 'ꩰ'), + ('ꫝ', 'ꫝ'), + ('ꫳ', 'ꫴ'), + ('ꭜ', 'ꭟ'), + ('ꭩ', 'ꭩ'), + ('ー', 'ー'), + ('\u{ff9e}', '\u{ff9f}'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐵎', '𐵎'), + ('𐵯', '𐵯'), + ('𖭀', '𖭃'), + ('𖵀', '𖵂'), + ('𖵫', '𖵬'), + ('𖾓', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𞀰', '𞁭'), + ('𞄷', '𞄽'), + ('𞓫', '𞓫'), + ('𞥋', '𞥋'), +]; + +pub const MODIFIER_SYMBOL: &'static [(char, char)] = &[ + ('^', '^'), + ('`', '`'), + ('¨', '¨'), + ('¯', '¯'), + ('´', '´'), + ('¸', '¸'), + ('˂', '˅'), + ('˒', '˟'), + ('˥', '˫'), + ('˭', '˭'), + ('˯', '˿'), + ('͵', '͵'), + ('΄', '΅'), + ('࢈', '࢈'), + ('᾽', '᾽'), + ('᾿', '῁'), + ('῍', '῏'), + ('῝', '῟'), + ('῭', '`'), + ('´', '῾'), + ('゛', '゜'), + ('꜀', '꜖'), + ('꜠', '꜡'), + ('꞉', '꞊'), + ('꭛', '꭛'), + ('꭪', '꭫'), + ('﮲', '﯂'), + ('^', '^'), + ('`', '`'), + (' ̄', ' ̄'), + ('🏻', '🏿'), +]; + +pub const NONSPACING_MARK: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{483}', '\u{487}'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6df}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', '\u{7f3}'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{819}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('\u{897}', '\u{89f}'), + ('\u{8ca}', '\u{8e1}'), + ('\u{8e3}', '\u{902}'), + ('\u{93a}', '\u{93a}'), + ('\u{93c}', '\u{93c}'), + ('\u{941}', '\u{948}'), + ('\u{94d}', '\u{94d}'), + ('\u{951}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('\u{981}', '\u{981}'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9c1}', '\u{9c4}'), + ('\u{9cd}', '\u{9cd}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', '\u{a02}'), + ('\u{a3c}', '\u{a3c}'), + ('\u{a41}', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', '\u{a82}'), + ('\u{abc}', '\u{abc}'), + ('\u{ac1}', '\u{ac5}'), + ('\u{ac7}', '\u{ac8}'), + ('\u{acd}', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{aff}'), + ('\u{b01}', '\u{b01}'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3f}', '\u{b3f}'), + ('\u{b41}', '\u{b44}'), + ('\u{b4d}', '\u{b4d}'), + ('\u{b55}', '\u{b56}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bc0}', '\u{bc0}'), + ('\u{bcd}', '\u{bcd}'), + ('\u{c00}', '\u{c00}'), + ('\u{c04}', '\u{c04}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', '\u{c40}'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', '\u{c81}'), + ('\u{cbc}', '\u{cbc}'), + ('\u{cbf}', '\u{cbf}'), + ('\u{cc6}', '\u{cc6}'), + ('\u{ccc}', '\u{ccd}'), + ('\u{ce2}', '\u{ce3}'), + ('\u{d00}', '\u{d01}'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d41}', '\u{d44}'), + ('\u{d4d}', '\u{d4d}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', '\u{d81}'), + ('\u{dca}', '\u{dca}'), + ('\u{dd2}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e47}', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('\u{f71}', '\u{f7e}'), + ('\u{f80}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('\u{102d}', '\u{1030}'), + ('\u{1032}', '\u{1037}'), + ('\u{1039}', '\u{103a}'), + ('\u{103d}', '\u{103e}'), + ('\u{1058}', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{1082}'), + ('\u{1085}', '\u{1086}'), + ('\u{108d}', '\u{108d}'), + ('\u{109d}', '\u{109d}'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1714}'), + ('\u{1732}', '\u{1733}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17b5}'), + ('\u{17b7}', '\u{17bd}'), + ('\u{17c6}', '\u{17c6}'), + ('\u{17c9}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', '\u{1922}'), + ('\u{1927}', '\u{1928}'), + ('\u{1932}', '\u{1932}'), + ('\u{1939}', '\u{193b}'), + ('\u{1a17}', '\u{1a18}'), + ('\u{1a1b}', '\u{1a1b}'), + ('\u{1a56}', '\u{1a56}'), + ('\u{1a58}', '\u{1a5e}'), + ('\u{1a60}', '\u{1a60}'), + ('\u{1a62}', '\u{1a62}'), + ('\u{1a65}', '\u{1a6c}'), + ('\u{1a73}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1abd}'), + ('\u{1abf}', '\u{1ace}'), + ('\u{1b00}', '\u{1b03}'), + ('\u{1b34}', '\u{1b34}'), + ('\u{1b36}', '\u{1b3a}'), + ('\u{1b3c}', '\u{1b3c}'), + ('\u{1b42}', '\u{1b42}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1b81}'), + ('\u{1ba2}', '\u{1ba5}'), + ('\u{1ba8}', '\u{1ba9}'), + ('\u{1bab}', '\u{1bad}'), + ('\u{1be6}', '\u{1be6}'), + ('\u{1be8}', '\u{1be9}'), + ('\u{1bed}', '\u{1bed}'), + ('\u{1bef}', '\u{1bf1}'), + ('\u{1c2c}', '\u{1c33}'), + ('\u{1c36}', '\u{1c37}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce0}'), + ('\u{1ce2}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{20d0}', '\u{20dc}'), + ('\u{20e1}', '\u{20e1}'), + ('\u{20e5}', '\u{20f0}'), + ('\u{2cef}', '\u{2cf1}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('\u{302a}', '\u{302d}'), + ('\u{3099}', '\u{309a}'), + ('\u{a66f}', '\u{a66f}'), + ('\u{a674}', '\u{a67d}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('\u{a825}', '\u{a826}'), + ('\u{a82c}', '\u{a82c}'), + ('\u{a8c4}', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a951}'), + ('\u{a980}', '\u{a982}'), + ('\u{a9b3}', '\u{a9b3}'), + ('\u{a9b6}', '\u{a9b9}'), + ('\u{a9bc}', '\u{a9bd}'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa2e}'), + ('\u{aa31}', '\u{aa32}'), + ('\u{aa35}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', '\u{aa4c}'), + ('\u{aa7c}', '\u{aa7c}'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('\u{aaec}', '\u{aaed}'), + ('\u{aaf6}', '\u{aaf6}'), + ('\u{abe5}', '\u{abe5}'), + ('\u{abe8}', '\u{abe8}'), + ('\u{abed}', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('\u{11001}', '\u{11001}'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '\u{11081}'), + ('\u{110b3}', '\u{110b6}'), + ('\u{110b9}', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{1112b}'), + ('\u{1112d}', '\u{11134}'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '\u{11181}'), + ('\u{111b6}', '\u{111be}'), + ('\u{111c9}', '\u{111cc}'), + ('\u{111cf}', '\u{111cf}'), + ('\u{1122f}', '\u{11231}'), + ('\u{11234}', '\u{11234}'), + ('\u{11236}', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112df}'), + ('\u{112e3}', '\u{112ea}'), + ('\u{11300}', '\u{11301}'), + ('\u{1133b}', '\u{1133c}'), + ('\u{11340}', '\u{11340}'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113bb}', '\u{113c0}'), + ('\u{113ce}', '\u{113ce}'), + ('\u{113d0}', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('\u{11438}', '\u{1143f}'), + ('\u{11442}', '\u{11444}'), + ('\u{11446}', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b3}', '\u{114b8}'), + ('\u{114ba}', '\u{114ba}'), + ('\u{114bf}', '\u{114c0}'), + ('\u{114c2}', '\u{114c3}'), + ('\u{115b2}', '\u{115b5}'), + ('\u{115bc}', '\u{115bd}'), + ('\u{115bf}', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('\u{11633}', '\u{1163a}'), + ('\u{1163d}', '\u{1163d}'), + ('\u{1163f}', '\u{11640}'), + ('\u{116ab}', '\u{116ab}'), + ('\u{116ad}', '\u{116ad}'), + ('\u{116b0}', '\u{116b5}'), + ('\u{116b7}', '\u{116b7}'), + ('\u{1171d}', '\u{1171d}'), + ('\u{1171f}', '\u{1171f}'), + ('\u{11722}', '\u{11725}'), + ('\u{11727}', '\u{1172b}'), + ('\u{1182f}', '\u{11837}'), + ('\u{11839}', '\u{1183a}'), + ('\u{1193b}', '\u{1193c}'), + ('\u{1193e}', '\u{1193e}'), + ('\u{11943}', '\u{11943}'), + ('\u{119d4}', '\u{119d7}'), + ('\u{119da}', '\u{119db}'), + ('\u{119e0}', '\u{119e0}'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '\u{11a38}'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a56}'), + ('\u{11a59}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a96}'), + ('\u{11a98}', '\u{11a99}'), + ('\u{11c30}', '\u{11c36}'), + ('\u{11c38}', '\u{11c3d}'), + ('\u{11c3f}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('\u{11caa}', '\u{11cb0}'), + ('\u{11cb2}', '\u{11cb3}'), + ('\u{11cb5}', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('\u{11d90}', '\u{11d91}'), + ('\u{11d95}', '\u{11d95}'), + ('\u{11d97}', '\u{11d97}'), + ('\u{11ef3}', '\u{11ef4}'), + ('\u{11f00}', '\u{11f01}'), + ('\u{11f36}', '\u{11f3a}'), + ('\u{11f40}', '\u{11f40}'), + ('\u{11f42}', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13440}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{16129}'), + ('\u{1612d}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('\u{16f4f}', '\u{16f4f}'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d167}', '\u{1d169}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e4ec}', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e94a}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const NUMBER: &'static [(char, char)] = &[ + ('0', '9'), + ('²', '³'), + ('¹', '¹'), + ('¼', '¾'), + ('٠', '٩'), + ('۰', '۹'), + ('߀', '߉'), + ('०', '९'), + ('০', '৯'), + ('৴', '৹'), + ('੦', '੯'), + ('૦', '૯'), + ('୦', '୯'), + ('୲', '୷'), + ('௦', '௲'), + ('౦', '౯'), + ('౸', '౾'), + ('೦', '೯'), + ('൘', '൞'), + ('൦', '൸'), + ('෦', '෯'), + ('๐', '๙'), + ('໐', '໙'), + ('༠', '༳'), + ('၀', '၉'), + ('႐', '႙'), + ('፩', '፼'), + ('ᛮ', 'ᛰ'), + ('០', '៩'), + ('៰', '៹'), + ('᠐', '᠙'), + ('᥆', '᥏'), + ('᧐', '᧚'), + ('᪀', '᪉'), + ('᪐', '᪙'), + ('᭐', '᭙'), + ('᮰', '᮹'), + ('᱀', '᱉'), + ('᱐', '᱙'), + ('⁰', '⁰'), + ('⁴', '⁹'), + ('₀', '₉'), + ('⅐', 'ↂ'), + ('ↅ', '↉'), + ('①', '⒛'), + ('⓪', '⓿'), + ('❶', '➓'), + ('⳽', '⳽'), + ('〇', '〇'), + ('〡', '〩'), + ('〸', '〺'), + ('㆒', '㆕'), + ('㈠', '㈩'), + ('㉈', '㉏'), + ('㉑', '㉟'), + ('㊀', '㊉'), + ('㊱', '㊿'), + ('꘠', '꘩'), + ('ꛦ', 'ꛯ'), + ('꠰', '꠵'), + ('꣐', '꣙'), + ('꤀', '꤉'), + ('꧐', '꧙'), + ('꧰', '꧹'), + ('꩐', '꩙'), + ('꯰', '꯹'), + ('0', '9'), + ('𐄇', '𐄳'), + ('𐅀', '𐅸'), + ('𐆊', '𐆋'), + ('𐋡', '𐋻'), + ('𐌠', '𐌣'), + ('𐍁', '𐍁'), + ('𐍊', '𐍊'), + ('𐏑', '𐏕'), + ('𐒠', '𐒩'), + ('𐡘', '𐡟'), + ('𐡹', '𐡿'), + ('𐢧', '𐢯'), + ('𐣻', '𐣿'), + ('𐤖', '𐤛'), + ('𐦼', '𐦽'), + ('𐧀', '𐧏'), + ('𐧒', '𐧿'), + ('𐩀', '𐩈'), + ('𐩽', '𐩾'), + ('𐪝', '𐪟'), + ('𐫫', '𐫯'), + ('𐭘', '𐭟'), + ('𐭸', '𐭿'), + ('𐮩', '𐮯'), + ('𐳺', '𐳿'), + ('𐴰', '𐴹'), + ('𐵀', '𐵉'), + ('𐹠', '𐹾'), + ('𐼝', '𐼦'), + ('𐽑', '𐽔'), + ('𐿅', '𐿋'), + ('𑁒', '𑁯'), + ('𑃰', '𑃹'), + ('𑄶', '𑄿'), + ('𑇐', '𑇙'), + ('𑇡', '𑇴'), + ('𑋰', '𑋹'), + ('𑑐', '𑑙'), + ('𑓐', '𑓙'), + ('𑙐', '𑙙'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜰', '𑜻'), + ('𑣠', '𑣲'), + ('𑥐', '𑥙'), + ('𑯰', '𑯹'), + ('𑱐', '𑱬'), + ('𑵐', '𑵙'), + ('𑶠', '𑶩'), + ('𑽐', '𑽙'), + ('𑿀', '𑿔'), + ('𒐀', '𒑮'), + ('𖄰', '𖄹'), + ('𖩠', '𖩩'), + ('𖫀', '𖫉'), + ('𖭐', '𖭙'), + ('𖭛', '𖭡'), + ('𖵰', '𖵹'), + ('𖺀', '𖺖'), + ('𜳰', '𜳹'), + ('𝋀', '𝋓'), + ('𝋠', '𝋳'), + ('𝍠', '𝍸'), + ('𝟎', '𝟿'), + ('𞅀', '𞅉'), + ('𞋰', '𞋹'), + ('𞓰', '𞓹'), + ('𞗱', '𞗺'), + ('𞣇', '𞣏'), + ('𞥐', '𞥙'), + ('𞱱', '𞲫'), + ('𞲭', '𞲯'), + ('𞲱', '𞲴'), + ('𞴁', '𞴭'), + ('𞴯', '𞴽'), + ('🄀', '🄌'), + ('🯰', '🯹'), +]; + +pub const OPEN_PUNCTUATION: &'static [(char, char)] = &[ + ('(', '('), + ('[', '['), + ('{', '{'), + ('༺', '༺'), + ('༼', '༼'), + ('᚛', '᚛'), + ('‚', '‚'), + ('„', '„'), + ('⁅', '⁅'), + ('⁽', '⁽'), + ('₍', '₍'), + ('⌈', '⌈'), + ('⌊', '⌊'), + ('〈', '〈'), + ('❨', '❨'), + ('❪', '❪'), + ('❬', '❬'), + ('❮', '❮'), + ('❰', '❰'), + ('❲', '❲'), + ('❴', '❴'), + ('⟅', '⟅'), + ('⟦', '⟦'), + ('⟨', '⟨'), + ('⟪', '⟪'), + ('⟬', '⟬'), + ('⟮', '⟮'), + ('⦃', '⦃'), + ('⦅', '⦅'), + ('⦇', '⦇'), + ('⦉', '⦉'), + ('⦋', '⦋'), + ('⦍', '⦍'), + ('⦏', '⦏'), + ('⦑', '⦑'), + ('⦓', '⦓'), + ('⦕', '⦕'), + ('⦗', '⦗'), + ('⧘', '⧘'), + ('⧚', '⧚'), + ('⧼', '⧼'), + ('⸢', '⸢'), + ('⸤', '⸤'), + ('⸦', '⸦'), + ('⸨', '⸨'), + ('⹂', '⹂'), + ('⹕', '⹕'), + ('⹗', '⹗'), + ('⹙', '⹙'), + ('⹛', '⹛'), + ('〈', '〈'), + ('《', '《'), + ('「', '「'), + ('『', '『'), + ('【', '【'), + ('〔', '〔'), + ('〖', '〖'), + ('〘', '〘'), + ('〚', '〚'), + ('〝', '〝'), + ('﴿', '﴿'), + ('︗', '︗'), + ('︵', '︵'), + ('︷', '︷'), + ('︹', '︹'), + ('︻', '︻'), + ('︽', '︽'), + ('︿', '︿'), + ('﹁', '﹁'), + ('﹃', '﹃'), + ('﹇', '﹇'), + ('﹙', '﹙'), + ('﹛', '﹛'), + ('﹝', '﹝'), + ('(', '('), + ('[', '['), + ('{', '{'), + ('⦅', '⦅'), + ('「', '「'), +]; + +pub const OTHER: &'static [(char, char)] = &[ + ('\0', '\u{1f}'), + ('\u{7f}', '\u{9f}'), + ('\u{ad}', '\u{ad}'), + ('\u{378}', '\u{379}'), + ('\u{380}', '\u{383}'), + ('\u{38b}', '\u{38b}'), + ('\u{38d}', '\u{38d}'), + ('\u{3a2}', '\u{3a2}'), + ('\u{530}', '\u{530}'), + ('\u{557}', '\u{558}'), + ('\u{58b}', '\u{58c}'), + ('\u{590}', '\u{590}'), + ('\u{5c8}', '\u{5cf}'), + ('\u{5eb}', '\u{5ee}'), + ('\u{5f5}', '\u{605}'), + ('\u{61c}', '\u{61c}'), + ('\u{6dd}', '\u{6dd}'), + ('\u{70e}', '\u{70f}'), + ('\u{74b}', '\u{74c}'), + ('\u{7b2}', '\u{7bf}'), + ('\u{7fb}', '\u{7fc}'), + ('\u{82e}', '\u{82f}'), + ('\u{83f}', '\u{83f}'), + ('\u{85c}', '\u{85d}'), + ('\u{85f}', '\u{85f}'), + ('\u{86b}', '\u{86f}'), + ('\u{88f}', '\u{896}'), + ('\u{8e2}', '\u{8e2}'), + ('\u{984}', '\u{984}'), + ('\u{98d}', '\u{98e}'), + ('\u{991}', '\u{992}'), + ('\u{9a9}', '\u{9a9}'), + ('\u{9b1}', '\u{9b1}'), + ('\u{9b3}', '\u{9b5}'), + ('\u{9ba}', '\u{9bb}'), + ('\u{9c5}', '\u{9c6}'), + ('\u{9c9}', '\u{9ca}'), + ('\u{9cf}', '\u{9d6}'), + ('\u{9d8}', '\u{9db}'), + ('\u{9de}', '\u{9de}'), + ('\u{9e4}', '\u{9e5}'), + ('\u{9ff}', '\u{a00}'), + ('\u{a04}', '\u{a04}'), + ('\u{a0b}', '\u{a0e}'), + ('\u{a11}', '\u{a12}'), + ('\u{a29}', '\u{a29}'), + ('\u{a31}', '\u{a31}'), + ('\u{a34}', '\u{a34}'), + ('\u{a37}', '\u{a37}'), + ('\u{a3a}', '\u{a3b}'), + ('\u{a3d}', '\u{a3d}'), + ('\u{a43}', '\u{a46}'), + ('\u{a49}', '\u{a4a}'), + ('\u{a4e}', '\u{a50}'), + ('\u{a52}', '\u{a58}'), + ('\u{a5d}', '\u{a5d}'), + ('\u{a5f}', '\u{a65}'), + ('\u{a77}', '\u{a80}'), + ('\u{a84}', '\u{a84}'), + ('\u{a8e}', '\u{a8e}'), + ('\u{a92}', '\u{a92}'), + ('\u{aa9}', '\u{aa9}'), + ('\u{ab1}', '\u{ab1}'), + ('\u{ab4}', '\u{ab4}'), + ('\u{aba}', '\u{abb}'), + ('\u{ac6}', '\u{ac6}'), + ('\u{aca}', '\u{aca}'), + ('\u{ace}', '\u{acf}'), + ('\u{ad1}', '\u{adf}'), + ('\u{ae4}', '\u{ae5}'), + ('\u{af2}', '\u{af8}'), + ('\u{b00}', '\u{b00}'), + ('\u{b04}', '\u{b04}'), + ('\u{b0d}', '\u{b0e}'), + ('\u{b11}', '\u{b12}'), + ('\u{b29}', '\u{b29}'), + ('\u{b31}', '\u{b31}'), + ('\u{b34}', '\u{b34}'), + ('\u{b3a}', '\u{b3b}'), + ('\u{b45}', '\u{b46}'), + ('\u{b49}', '\u{b4a}'), + ('\u{b4e}', '\u{b54}'), + ('\u{b58}', '\u{b5b}'), + ('\u{b5e}', '\u{b5e}'), + ('\u{b64}', '\u{b65}'), + ('\u{b78}', '\u{b81}'), + ('\u{b84}', '\u{b84}'), + ('\u{b8b}', '\u{b8d}'), + ('\u{b91}', '\u{b91}'), + ('\u{b96}', '\u{b98}'), + ('\u{b9b}', '\u{b9b}'), + ('\u{b9d}', '\u{b9d}'), + ('\u{ba0}', '\u{ba2}'), + ('\u{ba5}', '\u{ba7}'), + ('\u{bab}', '\u{bad}'), + ('\u{bba}', '\u{bbd}'), + ('\u{bc3}', '\u{bc5}'), + ('\u{bc9}', '\u{bc9}'), + ('\u{bce}', '\u{bcf}'), + ('\u{bd1}', '\u{bd6}'), + ('\u{bd8}', '\u{be5}'), + ('\u{bfb}', '\u{bff}'), + ('\u{c0d}', '\u{c0d}'), + ('\u{c11}', '\u{c11}'), + ('\u{c29}', '\u{c29}'), + ('\u{c3a}', '\u{c3b}'), + ('\u{c45}', '\u{c45}'), + ('\u{c49}', '\u{c49}'), + ('\u{c4e}', '\u{c54}'), + ('\u{c57}', '\u{c57}'), + ('\u{c5b}', '\u{c5c}'), + ('\u{c5e}', '\u{c5f}'), + ('\u{c64}', '\u{c65}'), + ('\u{c70}', '\u{c76}'), + ('\u{c8d}', '\u{c8d}'), + ('\u{c91}', '\u{c91}'), + ('\u{ca9}', '\u{ca9}'), + ('\u{cb4}', '\u{cb4}'), + ('\u{cba}', '\u{cbb}'), + ('\u{cc5}', '\u{cc5}'), + ('\u{cc9}', '\u{cc9}'), + ('\u{cce}', '\u{cd4}'), + ('\u{cd7}', '\u{cdc}'), + ('\u{cdf}', '\u{cdf}'), + ('\u{ce4}', '\u{ce5}'), + ('\u{cf0}', '\u{cf0}'), + ('\u{cf4}', '\u{cff}'), + ('\u{d0d}', '\u{d0d}'), + ('\u{d11}', '\u{d11}'), + ('\u{d45}', '\u{d45}'), + ('\u{d49}', '\u{d49}'), + ('\u{d50}', '\u{d53}'), + ('\u{d64}', '\u{d65}'), + ('\u{d80}', '\u{d80}'), + ('\u{d84}', '\u{d84}'), + ('\u{d97}', '\u{d99}'), + ('\u{db2}', '\u{db2}'), + ('\u{dbc}', '\u{dbc}'), + ('\u{dbe}', '\u{dbf}'), + ('\u{dc7}', '\u{dc9}'), + ('\u{dcb}', '\u{dce}'), + ('\u{dd5}', '\u{dd5}'), + ('\u{dd7}', '\u{dd7}'), + ('\u{de0}', '\u{de5}'), + ('\u{df0}', '\u{df1}'), + ('\u{df5}', '\u{e00}'), + ('\u{e3b}', '\u{e3e}'), + ('\u{e5c}', '\u{e80}'), + ('\u{e83}', '\u{e83}'), + ('\u{e85}', '\u{e85}'), + ('\u{e8b}', '\u{e8b}'), + ('\u{ea4}', '\u{ea4}'), + ('\u{ea6}', '\u{ea6}'), + ('\u{ebe}', '\u{ebf}'), + ('\u{ec5}', '\u{ec5}'), + ('\u{ec7}', '\u{ec7}'), + ('\u{ecf}', '\u{ecf}'), + ('\u{eda}', '\u{edb}'), + ('\u{ee0}', '\u{eff}'), + ('\u{f48}', '\u{f48}'), + ('\u{f6d}', '\u{f70}'), + ('\u{f98}', '\u{f98}'), + ('\u{fbd}', '\u{fbd}'), + ('\u{fcd}', '\u{fcd}'), + ('\u{fdb}', '\u{fff}'), + ('\u{10c6}', '\u{10c6}'), + ('\u{10c8}', '\u{10cc}'), + ('\u{10ce}', '\u{10cf}'), + ('\u{1249}', '\u{1249}'), + ('\u{124e}', '\u{124f}'), + ('\u{1257}', '\u{1257}'), + ('\u{1259}', '\u{1259}'), + ('\u{125e}', '\u{125f}'), + ('\u{1289}', '\u{1289}'), + ('\u{128e}', '\u{128f}'), + ('\u{12b1}', '\u{12b1}'), + ('\u{12b6}', '\u{12b7}'), + ('\u{12bf}', '\u{12bf}'), + ('\u{12c1}', '\u{12c1}'), + ('\u{12c6}', '\u{12c7}'), + ('\u{12d7}', '\u{12d7}'), + ('\u{1311}', '\u{1311}'), + ('\u{1316}', '\u{1317}'), + ('\u{135b}', '\u{135c}'), + ('\u{137d}', '\u{137f}'), + ('\u{139a}', '\u{139f}'), + ('\u{13f6}', '\u{13f7}'), + ('\u{13fe}', '\u{13ff}'), + ('\u{169d}', '\u{169f}'), + ('\u{16f9}', '\u{16ff}'), + ('\u{1716}', '\u{171e}'), + ('\u{1737}', '\u{173f}'), + ('\u{1754}', '\u{175f}'), + ('\u{176d}', '\u{176d}'), + ('\u{1771}', '\u{1771}'), + ('\u{1774}', '\u{177f}'), + ('\u{17de}', '\u{17df}'), + ('\u{17ea}', '\u{17ef}'), + ('\u{17fa}', '\u{17ff}'), + ('\u{180e}', '\u{180e}'), + ('\u{181a}', '\u{181f}'), + ('\u{1879}', '\u{187f}'), + ('\u{18ab}', '\u{18af}'), + ('\u{18f6}', '\u{18ff}'), + ('\u{191f}', '\u{191f}'), + ('\u{192c}', '\u{192f}'), + ('\u{193c}', '\u{193f}'), + ('\u{1941}', '\u{1943}'), + ('\u{196e}', '\u{196f}'), + ('\u{1975}', '\u{197f}'), + ('\u{19ac}', '\u{19af}'), + ('\u{19ca}', '\u{19cf}'), + ('\u{19db}', '\u{19dd}'), + ('\u{1a1c}', '\u{1a1d}'), + ('\u{1a5f}', '\u{1a5f}'), + ('\u{1a7d}', '\u{1a7e}'), + ('\u{1a8a}', '\u{1a8f}'), + ('\u{1a9a}', '\u{1a9f}'), + ('\u{1aae}', '\u{1aaf}'), + ('\u{1acf}', '\u{1aff}'), + ('\u{1b4d}', '\u{1b4d}'), + ('\u{1bf4}', '\u{1bfb}'), + ('\u{1c38}', '\u{1c3a}'), + ('\u{1c4a}', '\u{1c4c}'), + ('\u{1c8b}', '\u{1c8f}'), + ('\u{1cbb}', '\u{1cbc}'), + ('\u{1cc8}', '\u{1ccf}'), + ('\u{1cfb}', '\u{1cff}'), + ('\u{1f16}', '\u{1f17}'), + ('\u{1f1e}', '\u{1f1f}'), + ('\u{1f46}', '\u{1f47}'), + ('\u{1f4e}', '\u{1f4f}'), + ('\u{1f58}', '\u{1f58}'), + ('\u{1f5a}', '\u{1f5a}'), + ('\u{1f5c}', '\u{1f5c}'), + ('\u{1f5e}', '\u{1f5e}'), + ('\u{1f7e}', '\u{1f7f}'), + ('\u{1fb5}', '\u{1fb5}'), + ('\u{1fc5}', '\u{1fc5}'), + ('\u{1fd4}', '\u{1fd5}'), + ('\u{1fdc}', '\u{1fdc}'), + ('\u{1ff0}', '\u{1ff1}'), + ('\u{1ff5}', '\u{1ff5}'), + ('\u{1fff}', '\u{1fff}'), + ('\u{200b}', '\u{200f}'), + ('\u{202a}', '\u{202e}'), + ('\u{2060}', '\u{206f}'), + ('\u{2072}', '\u{2073}'), + ('\u{208f}', '\u{208f}'), + ('\u{209d}', '\u{209f}'), + ('\u{20c1}', '\u{20cf}'), + ('\u{20f1}', '\u{20ff}'), + ('\u{218c}', '\u{218f}'), + ('\u{242a}', '\u{243f}'), + ('\u{244b}', '\u{245f}'), + ('\u{2b74}', '\u{2b75}'), + ('\u{2b96}', '\u{2b96}'), + ('\u{2cf4}', '\u{2cf8}'), + ('\u{2d26}', '\u{2d26}'), + ('\u{2d28}', '\u{2d2c}'), + ('\u{2d2e}', '\u{2d2f}'), + ('\u{2d68}', '\u{2d6e}'), + ('\u{2d71}', '\u{2d7e}'), + ('\u{2d97}', '\u{2d9f}'), + ('\u{2da7}', '\u{2da7}'), + ('\u{2daf}', '\u{2daf}'), + ('\u{2db7}', '\u{2db7}'), + ('\u{2dbf}', '\u{2dbf}'), + ('\u{2dc7}', '\u{2dc7}'), + ('\u{2dcf}', '\u{2dcf}'), + ('\u{2dd7}', '\u{2dd7}'), + ('\u{2ddf}', '\u{2ddf}'), + ('\u{2e5e}', '\u{2e7f}'), + ('\u{2e9a}', '\u{2e9a}'), + ('\u{2ef4}', '\u{2eff}'), + ('\u{2fd6}', '\u{2fef}'), + ('\u{3040}', '\u{3040}'), + ('\u{3097}', '\u{3098}'), + ('\u{3100}', '\u{3104}'), + ('\u{3130}', '\u{3130}'), + ('\u{318f}', '\u{318f}'), + ('\u{31e6}', '\u{31ee}'), + ('\u{321f}', '\u{321f}'), + ('\u{a48d}', '\u{a48f}'), + ('\u{a4c7}', '\u{a4cf}'), + ('\u{a62c}', '\u{a63f}'), + ('\u{a6f8}', '\u{a6ff}'), + ('\u{a7ce}', '\u{a7cf}'), + ('\u{a7d2}', '\u{a7d2}'), + ('\u{a7d4}', '\u{a7d4}'), + ('\u{a7dd}', '\u{a7f1}'), + ('\u{a82d}', '\u{a82f}'), + ('\u{a83a}', '\u{a83f}'), + ('\u{a878}', '\u{a87f}'), + ('\u{a8c6}', '\u{a8cd}'), + ('\u{a8da}', '\u{a8df}'), + ('\u{a954}', '\u{a95e}'), + ('\u{a97d}', '\u{a97f}'), + ('\u{a9ce}', '\u{a9ce}'), + ('\u{a9da}', '\u{a9dd}'), + ('\u{a9ff}', '\u{a9ff}'), + ('\u{aa37}', '\u{aa3f}'), + ('\u{aa4e}', '\u{aa4f}'), + ('\u{aa5a}', '\u{aa5b}'), + ('\u{aac3}', '\u{aada}'), + ('\u{aaf7}', '\u{ab00}'), + ('\u{ab07}', '\u{ab08}'), + ('\u{ab0f}', '\u{ab10}'), + ('\u{ab17}', '\u{ab1f}'), + ('\u{ab27}', '\u{ab27}'), + ('\u{ab2f}', '\u{ab2f}'), + ('\u{ab6c}', '\u{ab6f}'), + ('\u{abee}', '\u{abef}'), + ('\u{abfa}', '\u{abff}'), + ('\u{d7a4}', '\u{d7af}'), + ('\u{d7c7}', '\u{d7ca}'), + ('\u{d7fc}', '\u{f8ff}'), + ('\u{fa6e}', '\u{fa6f}'), + ('\u{fada}', '\u{faff}'), + ('\u{fb07}', '\u{fb12}'), + ('\u{fb18}', '\u{fb1c}'), + ('\u{fb37}', '\u{fb37}'), + ('\u{fb3d}', '\u{fb3d}'), + ('\u{fb3f}', '\u{fb3f}'), + ('\u{fb42}', '\u{fb42}'), + ('\u{fb45}', '\u{fb45}'), + ('\u{fbc3}', '\u{fbd2}'), + ('\u{fd90}', '\u{fd91}'), + ('\u{fdc8}', '\u{fdce}'), + ('\u{fdd0}', '\u{fdef}'), + ('\u{fe1a}', '\u{fe1f}'), + ('\u{fe53}', '\u{fe53}'), + ('\u{fe67}', '\u{fe67}'), + ('\u{fe6c}', '\u{fe6f}'), + ('\u{fe75}', '\u{fe75}'), + ('\u{fefd}', '\u{ff00}'), + ('\u{ffbf}', '\u{ffc1}'), + ('\u{ffc8}', '\u{ffc9}'), + ('\u{ffd0}', '\u{ffd1}'), + ('\u{ffd8}', '\u{ffd9}'), + ('\u{ffdd}', '\u{ffdf}'), + ('\u{ffe7}', '\u{ffe7}'), + ('\u{ffef}', '\u{fffb}'), + ('\u{fffe}', '\u{ffff}'), + ('\u{1000c}', '\u{1000c}'), + ('\u{10027}', '\u{10027}'), + ('\u{1003b}', '\u{1003b}'), + ('\u{1003e}', '\u{1003e}'), + ('\u{1004e}', '\u{1004f}'), + ('\u{1005e}', '\u{1007f}'), + ('\u{100fb}', '\u{100ff}'), + ('\u{10103}', '\u{10106}'), + ('\u{10134}', '\u{10136}'), + ('\u{1018f}', '\u{1018f}'), + ('\u{1019d}', '\u{1019f}'), + ('\u{101a1}', '\u{101cf}'), + ('\u{101fe}', '\u{1027f}'), + ('\u{1029d}', '\u{1029f}'), + ('\u{102d1}', '\u{102df}'), + ('\u{102fc}', '\u{102ff}'), + ('\u{10324}', '\u{1032c}'), + ('\u{1034b}', '\u{1034f}'), + ('\u{1037b}', '\u{1037f}'), + ('\u{1039e}', '\u{1039e}'), + ('\u{103c4}', '\u{103c7}'), + ('\u{103d6}', '\u{103ff}'), + ('\u{1049e}', '\u{1049f}'), + ('\u{104aa}', '\u{104af}'), + ('\u{104d4}', '\u{104d7}'), + ('\u{104fc}', '\u{104ff}'), + ('\u{10528}', '\u{1052f}'), + ('\u{10564}', '\u{1056e}'), + ('\u{1057b}', '\u{1057b}'), + ('\u{1058b}', '\u{1058b}'), + ('\u{10593}', '\u{10593}'), + ('\u{10596}', '\u{10596}'), + ('\u{105a2}', '\u{105a2}'), + ('\u{105b2}', '\u{105b2}'), + ('\u{105ba}', '\u{105ba}'), + ('\u{105bd}', '\u{105bf}'), + ('\u{105f4}', '\u{105ff}'), + ('\u{10737}', '\u{1073f}'), + ('\u{10756}', '\u{1075f}'), + ('\u{10768}', '\u{1077f}'), + ('\u{10786}', '\u{10786}'), + ('\u{107b1}', '\u{107b1}'), + ('\u{107bb}', '\u{107ff}'), + ('\u{10806}', '\u{10807}'), + ('\u{10809}', '\u{10809}'), + ('\u{10836}', '\u{10836}'), + ('\u{10839}', '\u{1083b}'), + ('\u{1083d}', '\u{1083e}'), + ('\u{10856}', '\u{10856}'), + ('\u{1089f}', '\u{108a6}'), + ('\u{108b0}', '\u{108df}'), + ('\u{108f3}', '\u{108f3}'), + ('\u{108f6}', '\u{108fa}'), + ('\u{1091c}', '\u{1091e}'), + ('\u{1093a}', '\u{1093e}'), + ('\u{10940}', '\u{1097f}'), + ('\u{109b8}', '\u{109bb}'), + ('\u{109d0}', '\u{109d1}'), + ('\u{10a04}', '\u{10a04}'), + ('\u{10a07}', '\u{10a0b}'), + ('\u{10a14}', '\u{10a14}'), + ('\u{10a18}', '\u{10a18}'), + ('\u{10a36}', '\u{10a37}'), + ('\u{10a3b}', '\u{10a3e}'), + ('\u{10a49}', '\u{10a4f}'), + ('\u{10a59}', '\u{10a5f}'), + ('\u{10aa0}', '\u{10abf}'), + ('\u{10ae7}', '\u{10aea}'), + ('\u{10af7}', '\u{10aff}'), + ('\u{10b36}', '\u{10b38}'), + ('\u{10b56}', '\u{10b57}'), + ('\u{10b73}', '\u{10b77}'), + ('\u{10b92}', '\u{10b98}'), + ('\u{10b9d}', '\u{10ba8}'), + ('\u{10bb0}', '\u{10bff}'), + ('\u{10c49}', '\u{10c7f}'), + ('\u{10cb3}', '\u{10cbf}'), + ('\u{10cf3}', '\u{10cf9}'), + ('\u{10d28}', '\u{10d2f}'), + ('\u{10d3a}', '\u{10d3f}'), + ('\u{10d66}', '\u{10d68}'), + ('\u{10d86}', '\u{10d8d}'), + ('\u{10d90}', '\u{10e5f}'), + ('\u{10e7f}', '\u{10e7f}'), + ('\u{10eaa}', '\u{10eaa}'), + ('\u{10eae}', '\u{10eaf}'), + ('\u{10eb2}', '\u{10ec1}'), + ('\u{10ec5}', '\u{10efb}'), + ('\u{10f28}', '\u{10f2f}'), + ('\u{10f5a}', '\u{10f6f}'), + ('\u{10f8a}', '\u{10faf}'), + ('\u{10fcc}', '\u{10fdf}'), + ('\u{10ff7}', '\u{10fff}'), + ('\u{1104e}', '\u{11051}'), + ('\u{11076}', '\u{1107e}'), + ('\u{110bd}', '\u{110bd}'), + ('\u{110c3}', '\u{110cf}'), + ('\u{110e9}', '\u{110ef}'), + ('\u{110fa}', '\u{110ff}'), + ('\u{11135}', '\u{11135}'), + ('\u{11148}', '\u{1114f}'), + ('\u{11177}', '\u{1117f}'), + ('\u{111e0}', '\u{111e0}'), + ('\u{111f5}', '\u{111ff}'), + ('\u{11212}', '\u{11212}'), + ('\u{11242}', '\u{1127f}'), + ('\u{11287}', '\u{11287}'), + ('\u{11289}', '\u{11289}'), + ('\u{1128e}', '\u{1128e}'), + ('\u{1129e}', '\u{1129e}'), + ('\u{112aa}', '\u{112af}'), + ('\u{112eb}', '\u{112ef}'), + ('\u{112fa}', '\u{112ff}'), + ('\u{11304}', '\u{11304}'), + ('\u{1130d}', '\u{1130e}'), + ('\u{11311}', '\u{11312}'), + ('\u{11329}', '\u{11329}'), + ('\u{11331}', '\u{11331}'), + ('\u{11334}', '\u{11334}'), + ('\u{1133a}', '\u{1133a}'), + ('\u{11345}', '\u{11346}'), + ('\u{11349}', '\u{1134a}'), + ('\u{1134e}', '\u{1134f}'), + ('\u{11351}', '\u{11356}'), + ('\u{11358}', '\u{1135c}'), + ('\u{11364}', '\u{11365}'), + ('\u{1136d}', '\u{1136f}'), + ('\u{11375}', '\u{1137f}'), + ('\u{1138a}', '\u{1138a}'), + ('\u{1138c}', '\u{1138d}'), + ('\u{1138f}', '\u{1138f}'), + ('\u{113b6}', '\u{113b6}'), + ('\u{113c1}', '\u{113c1}'), + ('\u{113c3}', '\u{113c4}'), + ('\u{113c6}', '\u{113c6}'), + ('\u{113cb}', '\u{113cb}'), + ('\u{113d6}', '\u{113d6}'), + ('\u{113d9}', '\u{113e0}'), + ('\u{113e3}', '\u{113ff}'), + ('\u{1145c}', '\u{1145c}'), + ('\u{11462}', '\u{1147f}'), + ('\u{114c8}', '\u{114cf}'), + ('\u{114da}', '\u{1157f}'), + ('\u{115b6}', '\u{115b7}'), + ('\u{115de}', '\u{115ff}'), + ('\u{11645}', '\u{1164f}'), + ('\u{1165a}', '\u{1165f}'), + ('\u{1166d}', '\u{1167f}'), + ('\u{116ba}', '\u{116bf}'), + ('\u{116ca}', '\u{116cf}'), + ('\u{116e4}', '\u{116ff}'), + ('\u{1171b}', '\u{1171c}'), + ('\u{1172c}', '\u{1172f}'), + ('\u{11747}', '\u{117ff}'), + ('\u{1183c}', '\u{1189f}'), + ('\u{118f3}', '\u{118fe}'), + ('\u{11907}', '\u{11908}'), + ('\u{1190a}', '\u{1190b}'), + ('\u{11914}', '\u{11914}'), + ('\u{11917}', '\u{11917}'), + ('\u{11936}', '\u{11936}'), + ('\u{11939}', '\u{1193a}'), + ('\u{11947}', '\u{1194f}'), + ('\u{1195a}', '\u{1199f}'), + ('\u{119a8}', '\u{119a9}'), + ('\u{119d8}', '\u{119d9}'), + ('\u{119e5}', '\u{119ff}'), + ('\u{11a48}', '\u{11a4f}'), + ('\u{11aa3}', '\u{11aaf}'), + ('\u{11af9}', '\u{11aff}'), + ('\u{11b0a}', '\u{11bbf}'), + ('\u{11be2}', '\u{11bef}'), + ('\u{11bfa}', '\u{11bff}'), + ('\u{11c09}', '\u{11c09}'), + ('\u{11c37}', '\u{11c37}'), + ('\u{11c46}', '\u{11c4f}'), + ('\u{11c6d}', '\u{11c6f}'), + ('\u{11c90}', '\u{11c91}'), + ('\u{11ca8}', '\u{11ca8}'), + ('\u{11cb7}', '\u{11cff}'), + ('\u{11d07}', '\u{11d07}'), + ('\u{11d0a}', '\u{11d0a}'), + ('\u{11d37}', '\u{11d39}'), + ('\u{11d3b}', '\u{11d3b}'), + ('\u{11d3e}', '\u{11d3e}'), + ('\u{11d48}', '\u{11d4f}'), + ('\u{11d5a}', '\u{11d5f}'), + ('\u{11d66}', '\u{11d66}'), + ('\u{11d69}', '\u{11d69}'), + ('\u{11d8f}', '\u{11d8f}'), + ('\u{11d92}', '\u{11d92}'), + ('\u{11d99}', '\u{11d9f}'), + ('\u{11daa}', '\u{11edf}'), + ('\u{11ef9}', '\u{11eff}'), + ('\u{11f11}', '\u{11f11}'), + ('\u{11f3b}', '\u{11f3d}'), + ('\u{11f5b}', '\u{11faf}'), + ('\u{11fb1}', '\u{11fbf}'), + ('\u{11ff2}', '\u{11ffe}'), + ('\u{1239a}', '\u{123ff}'), + ('\u{1246f}', '\u{1246f}'), + ('\u{12475}', '\u{1247f}'), + ('\u{12544}', '\u{12f8f}'), + ('\u{12ff3}', '\u{12fff}'), + ('\u{13430}', '\u{1343f}'), + ('\u{13456}', '\u{1345f}'), + ('\u{143fb}', '\u{143ff}'), + ('\u{14647}', '\u{160ff}'), + ('\u{1613a}', '\u{167ff}'), + ('\u{16a39}', '\u{16a3f}'), + ('\u{16a5f}', '\u{16a5f}'), + ('\u{16a6a}', '\u{16a6d}'), + ('\u{16abf}', '\u{16abf}'), + ('\u{16aca}', '\u{16acf}'), + ('\u{16aee}', '\u{16aef}'), + ('\u{16af6}', '\u{16aff}'), + ('\u{16b46}', '\u{16b4f}'), + ('\u{16b5a}', '\u{16b5a}'), + ('\u{16b62}', '\u{16b62}'), + ('\u{16b78}', '\u{16b7c}'), + ('\u{16b90}', '\u{16d3f}'), + ('\u{16d7a}', '\u{16e3f}'), + ('\u{16e9b}', '\u{16eff}'), + ('\u{16f4b}', '\u{16f4e}'), + ('\u{16f88}', '\u{16f8e}'), + ('\u{16fa0}', '\u{16fdf}'), + ('\u{16fe5}', '\u{16fef}'), + ('\u{16ff2}', '\u{16fff}'), + ('\u{187f8}', '\u{187ff}'), + ('\u{18cd6}', '\u{18cfe}'), + ('\u{18d09}', '\u{1afef}'), + ('\u{1aff4}', '\u{1aff4}'), + ('\u{1affc}', '\u{1affc}'), + ('\u{1afff}', '\u{1afff}'), + ('\u{1b123}', '\u{1b131}'), + ('\u{1b133}', '\u{1b14f}'), + ('\u{1b153}', '\u{1b154}'), + ('\u{1b156}', '\u{1b163}'), + ('\u{1b168}', '\u{1b16f}'), + ('\u{1b2fc}', '\u{1bbff}'), + ('\u{1bc6b}', '\u{1bc6f}'), + ('\u{1bc7d}', '\u{1bc7f}'), + ('\u{1bc89}', '\u{1bc8f}'), + ('\u{1bc9a}', '\u{1bc9b}'), + ('\u{1bca0}', '\u{1cbff}'), + ('\u{1ccfa}', '\u{1ccff}'), + ('\u{1ceb4}', '\u{1ceff}'), + ('\u{1cf2e}', '\u{1cf2f}'), + ('\u{1cf47}', '\u{1cf4f}'), + ('\u{1cfc4}', '\u{1cfff}'), + ('\u{1d0f6}', '\u{1d0ff}'), + ('\u{1d127}', '\u{1d128}'), + ('\u{1d173}', '\u{1d17a}'), + ('\u{1d1eb}', '\u{1d1ff}'), + ('\u{1d246}', '\u{1d2bf}'), + ('\u{1d2d4}', '\u{1d2df}'), + ('\u{1d2f4}', '\u{1d2ff}'), + ('\u{1d357}', '\u{1d35f}'), + ('\u{1d379}', '\u{1d3ff}'), + ('\u{1d455}', '\u{1d455}'), + ('\u{1d49d}', '\u{1d49d}'), + ('\u{1d4a0}', '\u{1d4a1}'), + ('\u{1d4a3}', '\u{1d4a4}'), + ('\u{1d4a7}', '\u{1d4a8}'), + ('\u{1d4ad}', '\u{1d4ad}'), + ('\u{1d4ba}', '\u{1d4ba}'), + ('\u{1d4bc}', '\u{1d4bc}'), + ('\u{1d4c4}', '\u{1d4c4}'), + ('\u{1d506}', '\u{1d506}'), + ('\u{1d50b}', '\u{1d50c}'), + ('\u{1d515}', '\u{1d515}'), + ('\u{1d51d}', '\u{1d51d}'), + ('\u{1d53a}', '\u{1d53a}'), + ('\u{1d53f}', '\u{1d53f}'), + ('\u{1d545}', '\u{1d545}'), + ('\u{1d547}', '\u{1d549}'), + ('\u{1d551}', '\u{1d551}'), + ('\u{1d6a6}', '\u{1d6a7}'), + ('\u{1d7cc}', '\u{1d7cd}'), + ('\u{1da8c}', '\u{1da9a}'), + ('\u{1daa0}', '\u{1daa0}'), + ('\u{1dab0}', '\u{1deff}'), + ('\u{1df1f}', '\u{1df24}'), + ('\u{1df2b}', '\u{1dfff}'), + ('\u{1e007}', '\u{1e007}'), + ('\u{1e019}', '\u{1e01a}'), + ('\u{1e022}', '\u{1e022}'), + ('\u{1e025}', '\u{1e025}'), + ('\u{1e02b}', '\u{1e02f}'), + ('\u{1e06e}', '\u{1e08e}'), + ('\u{1e090}', '\u{1e0ff}'), + ('\u{1e12d}', '\u{1e12f}'), + ('\u{1e13e}', '\u{1e13f}'), + ('\u{1e14a}', '\u{1e14d}'), + ('\u{1e150}', '\u{1e28f}'), + ('\u{1e2af}', '\u{1e2bf}'), + ('\u{1e2fa}', '\u{1e2fe}'), + ('\u{1e300}', '\u{1e4cf}'), + ('\u{1e4fa}', '\u{1e5cf}'), + ('\u{1e5fb}', '\u{1e5fe}'), + ('\u{1e600}', '\u{1e7df}'), + ('\u{1e7e7}', '\u{1e7e7}'), + ('\u{1e7ec}', '\u{1e7ec}'), + ('\u{1e7ef}', '\u{1e7ef}'), + ('\u{1e7ff}', '\u{1e7ff}'), + ('\u{1e8c5}', '\u{1e8c6}'), + ('\u{1e8d7}', '\u{1e8ff}'), + ('\u{1e94c}', '\u{1e94f}'), + ('\u{1e95a}', '\u{1e95d}'), + ('\u{1e960}', '\u{1ec70}'), + ('\u{1ecb5}', '\u{1ed00}'), + ('\u{1ed3e}', '\u{1edff}'), + ('\u{1ee04}', '\u{1ee04}'), + ('\u{1ee20}', '\u{1ee20}'), + ('\u{1ee23}', '\u{1ee23}'), + ('\u{1ee25}', '\u{1ee26}'), + ('\u{1ee28}', '\u{1ee28}'), + ('\u{1ee33}', '\u{1ee33}'), + ('\u{1ee38}', '\u{1ee38}'), + ('\u{1ee3a}', '\u{1ee3a}'), + ('\u{1ee3c}', '\u{1ee41}'), + ('\u{1ee43}', '\u{1ee46}'), + ('\u{1ee48}', '\u{1ee48}'), + ('\u{1ee4a}', '\u{1ee4a}'), + ('\u{1ee4c}', '\u{1ee4c}'), + ('\u{1ee50}', '\u{1ee50}'), + ('\u{1ee53}', '\u{1ee53}'), + ('\u{1ee55}', '\u{1ee56}'), + ('\u{1ee58}', '\u{1ee58}'), + ('\u{1ee5a}', '\u{1ee5a}'), + ('\u{1ee5c}', '\u{1ee5c}'), + ('\u{1ee5e}', '\u{1ee5e}'), + ('\u{1ee60}', '\u{1ee60}'), + ('\u{1ee63}', '\u{1ee63}'), + ('\u{1ee65}', '\u{1ee66}'), + ('\u{1ee6b}', '\u{1ee6b}'), + ('\u{1ee73}', '\u{1ee73}'), + ('\u{1ee78}', '\u{1ee78}'), + ('\u{1ee7d}', '\u{1ee7d}'), + ('\u{1ee7f}', '\u{1ee7f}'), + ('\u{1ee8a}', '\u{1ee8a}'), + ('\u{1ee9c}', '\u{1eea0}'), + ('\u{1eea4}', '\u{1eea4}'), + ('\u{1eeaa}', '\u{1eeaa}'), + ('\u{1eebc}', '\u{1eeef}'), + ('\u{1eef2}', '\u{1efff}'), + ('\u{1f02c}', '\u{1f02f}'), + ('\u{1f094}', '\u{1f09f}'), + ('\u{1f0af}', '\u{1f0b0}'), + ('\u{1f0c0}', '\u{1f0c0}'), + ('\u{1f0d0}', '\u{1f0d0}'), + ('\u{1f0f6}', '\u{1f0ff}'), + ('\u{1f1ae}', '\u{1f1e5}'), + ('\u{1f203}', '\u{1f20f}'), + ('\u{1f23c}', '\u{1f23f}'), + ('\u{1f249}', '\u{1f24f}'), + ('\u{1f252}', '\u{1f25f}'), + ('\u{1f266}', '\u{1f2ff}'), + ('\u{1f6d8}', '\u{1f6db}'), + ('\u{1f6ed}', '\u{1f6ef}'), + ('\u{1f6fd}', '\u{1f6ff}'), + ('\u{1f777}', '\u{1f77a}'), + ('\u{1f7da}', '\u{1f7df}'), + ('\u{1f7ec}', '\u{1f7ef}'), + ('\u{1f7f1}', '\u{1f7ff}'), + ('\u{1f80c}', '\u{1f80f}'), + ('\u{1f848}', '\u{1f84f}'), + ('\u{1f85a}', '\u{1f85f}'), + ('\u{1f888}', '\u{1f88f}'), + ('\u{1f8ae}', '\u{1f8af}'), + ('\u{1f8bc}', '\u{1f8bf}'), + ('\u{1f8c2}', '\u{1f8ff}'), + ('\u{1fa54}', '\u{1fa5f}'), + ('\u{1fa6e}', '\u{1fa6f}'), + ('\u{1fa7d}', '\u{1fa7f}'), + ('\u{1fa8a}', '\u{1fa8e}'), + ('\u{1fac7}', '\u{1facd}'), + ('\u{1fadd}', '\u{1fade}'), + ('\u{1faea}', '\u{1faef}'), + ('\u{1faf9}', '\u{1faff}'), + ('\u{1fb93}', '\u{1fb93}'), + ('\u{1fbfa}', '\u{1ffff}'), + ('\u{2a6e0}', '\u{2a6ff}'), + ('\u{2b73a}', '\u{2b73f}'), + ('\u{2b81e}', '\u{2b81f}'), + ('\u{2cea2}', '\u{2ceaf}'), + ('\u{2ebe1}', '\u{2ebef}'), + ('\u{2ee5e}', '\u{2f7ff}'), + ('\u{2fa1e}', '\u{2ffff}'), + ('\u{3134b}', '\u{3134f}'), + ('\u{323b0}', '\u{e00ff}'), + ('\u{e01f0}', '\u{10ffff}'), +]; + +pub const OTHER_LETTER: &'static [(char, char)] = &[ + ('ª', 'ª'), + ('º', 'º'), + ('ƻ', 'ƻ'), + ('ǀ', 'ǃ'), + ('ʔ', 'ʔ'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('ؠ', 'ؿ'), + ('ف', 'ي'), + ('ٮ', 'ٯ'), + ('ٱ', 'ۓ'), + ('ە', 'ە'), + ('ۮ', 'ۯ'), + ('ۺ', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', 'ܐ'), + ('ܒ', 'ܯ'), + ('ݍ', 'ޥ'), + ('ޱ', 'ޱ'), + ('ߊ', 'ߪ'), + ('ࠀ', 'ࠕ'), + ('ࡀ', 'ࡘ'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('ࢠ', 'ࣈ'), + ('ऄ', 'ह'), + ('ऽ', 'ऽ'), + ('ॐ', 'ॐ'), + ('क़', 'ॡ'), + ('ॲ', 'ঀ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', 'ঽ'), + ('ৎ', 'ৎ'), + ('ড়', 'ঢ়'), + ('য়', 'ৡ'), + ('ৰ', 'ৱ'), + ('ৼ', 'ৼ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('ੲ', 'ੴ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', 'ઽ'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૡ'), + ('ૹ', 'ૹ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', 'ଽ'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('ୱ', 'ୱ'), + ('ஃ', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('ௐ', 'ௐ'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ఽ'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', 'ౡ'), + ('ಀ', 'ಀ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ಽ'), + ('ೝ', 'ೞ'), + ('ೠ', 'ೡ'), + ('ೱ', 'ೲ'), + ('ഄ', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', 'ഽ'), + ('ൎ', 'ൎ'), + ('ൔ', 'ൖ'), + ('ൟ', 'ൡ'), + ('ൺ', 'ൿ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('ก', 'ะ'), + ('า', 'ำ'), + ('เ', 'ๅ'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ະ'), + ('າ', 'ຳ'), + ('ຽ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('ཀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('ྈ', 'ྌ'), + ('က', 'ဪ'), + ('ဿ', 'ဿ'), + ('ၐ', 'ၕ'), + ('ၚ', 'ၝ'), + ('ၡ', 'ၡ'), + ('ၥ', 'ၦ'), + ('ၮ', 'ၰ'), + ('ၵ', 'ႁ'), + ('ႎ', 'ႎ'), + ('ᄀ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('ᎀ', 'ᎏ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛱ', 'ᛸ'), + ('ᜀ', 'ᜑ'), + ('ᜟ', 'ᜱ'), + ('ᝀ', 'ᝑ'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('ក', 'ឳ'), + ('ៜ', 'ៜ'), + ('ᠠ', 'ᡂ'), + ('ᡄ', 'ᡸ'), + ('ᢀ', 'ᢄ'), + ('ᢇ', 'ᢨ'), + ('ᢪ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('ᥐ', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('ᨀ', 'ᨖ'), + ('ᨠ', 'ᩔ'), + ('ᬅ', 'ᬳ'), + ('ᭅ', 'ᭌ'), + ('ᮃ', 'ᮠ'), + ('ᮮ', 'ᮯ'), + ('ᮺ', 'ᯥ'), + ('ᰀ', 'ᰣ'), + ('ᱍ', 'ᱏ'), + ('ᱚ', 'ᱷ'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', 'ᳶ'), + ('ᳺ', 'ᳺ'), + ('ℵ', 'ℸ'), + ('ⴰ', 'ⵧ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('〆', '〆'), + ('〼', '〼'), + ('ぁ', 'ゖ'), + ('ゟ', 'ゟ'), + ('ァ', 'ヺ'), + ('ヿ', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꀔ'), + ('ꀖ', 'ꒌ'), + ('ꓐ', 'ꓷ'), + ('ꔀ', 'ꘋ'), + ('ꘐ', 'ꘟ'), + ('ꘪ', 'ꘫ'), + ('ꙮ', 'ꙮ'), + ('ꚠ', 'ꛥ'), + ('ꞏ', 'ꞏ'), + ('ꟷ', 'ꟷ'), + ('ꟻ', 'ꠁ'), + ('ꠃ', 'ꠅ'), + ('ꠇ', 'ꠊ'), + ('ꠌ', 'ꠢ'), + ('ꡀ', 'ꡳ'), + ('ꢂ', 'ꢳ'), + ('ꣲ', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', 'ꣾ'), + ('ꤊ', 'ꤥ'), + ('ꤰ', 'ꥆ'), + ('ꥠ', 'ꥼ'), + ('ꦄ', 'ꦲ'), + ('ꧠ', 'ꧤ'), + ('ꧧ', 'ꧯ'), + ('ꧺ', 'ꧾ'), + ('ꨀ', 'ꨨ'), + ('ꩀ', 'ꩂ'), + ('ꩄ', 'ꩋ'), + ('ꩠ', 'ꩯ'), + ('ꩱ', 'ꩶ'), + ('ꩺ', 'ꩺ'), + ('ꩾ', 'ꪯ'), + ('ꪱ', 'ꪱ'), + ('ꪵ', 'ꪶ'), + ('ꪹ', 'ꪽ'), + ('ꫀ', 'ꫀ'), + ('ꫂ', 'ꫂ'), + ('ꫛ', 'ꫜ'), + ('ꫠ', 'ꫪ'), + ('ꫲ', 'ꫲ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꯀ', 'ꯢ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('יִ', 'יִ'), + ('ײַ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('ヲ', 'ッ'), + ('ア', 'ン'), + ('ᅠ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐌀', '𐌟'), + ('𐌭', '𐍀'), + ('𐍂', '𐍉'), + ('𐍐', '𐍵'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐑐', '𐒝'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '𐨀'), + ('𐨐', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '𐫤'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐴀', '𐴣'), + ('𐵊', '𐵍'), + ('𐵏', '𐵏'), + ('𐺀', '𐺩'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('𐼀', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '𐽅'), + ('𐽰', '𐾁'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀃', '𑀷'), + ('𑁱', '𑁲'), + ('𑁵', '𑁵'), + ('𑂃', '𑂯'), + ('𑃐', '𑃨'), + ('𑄃', '𑄦'), + ('𑅄', '𑅄'), + ('𑅇', '𑅇'), + ('𑅐', '𑅲'), + ('𑅶', '𑅶'), + ('𑆃', '𑆲'), + ('𑇁', '𑇄'), + ('𑇚', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '𑈫'), + ('𑈿', '𑉀'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '𑋞'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑌽'), + ('𑍐', '𑍐'), + ('𑍝', '𑍡'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '𑎷'), + ('𑏑', '𑏑'), + ('𑏓', '𑏓'), + ('𑐀', '𑐴'), + ('𑑇', '𑑊'), + ('𑑟', '𑑡'), + ('𑒀', '𑒯'), + ('𑓄', '𑓅'), + ('𑓇', '𑓇'), + ('𑖀', '𑖮'), + ('𑗘', '𑗛'), + ('𑘀', '𑘯'), + ('𑙄', '𑙄'), + ('𑚀', '𑚪'), + ('𑚸', '𑚸'), + ('𑜀', '𑜚'), + ('𑝀', '𑝆'), + ('𑠀', '𑠫'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤯'), + ('𑤿', '𑤿'), + ('𑥁', '𑥁'), + ('𑦠', '𑦧'), + ('𑦪', '𑧐'), + ('𑧡', '𑧡'), + ('𑧣', '𑧣'), + ('𑨀', '𑨀'), + ('𑨋', '𑨲'), + ('𑨺', '𑨺'), + ('𑩐', '𑩐'), + ('𑩜', '𑪉'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑰀', '𑰈'), + ('𑰊', '𑰮'), + ('𑱀', '𑱀'), + ('𑱲', '𑲏'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '𑴰'), + ('𑵆', '𑵆'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶉'), + ('𑶘', '𑶘'), + ('𑻠', '𑻲'), + ('𑼂', '𑼂'), + ('𑼄', '𑼐'), + ('𑼒', '𑼳'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄝'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩰', '𖪾'), + ('𖫐', '𖫭'), + ('𖬀', '𖬯'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵃', '𖵪'), + ('𖼀', '𖽊'), + ('𖽐', '𖽐'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𝼊', '𝼊'), + ('𞄀', '𞄬'), + ('𞅎', '𞅎'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞓐', '𞓪'), + ('𞗐', '𞗭'), + ('𞗰', '𞗰'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const OTHER_NUMBER: &'static [(char, char)] = &[ + ('²', '³'), + ('¹', '¹'), + ('¼', '¾'), + ('৴', '৹'), + ('୲', '୷'), + ('௰', '௲'), + ('౸', '౾'), + ('൘', '൞'), + ('൰', '൸'), + ('༪', '༳'), + ('፩', '፼'), + ('៰', '៹'), + ('᧚', '᧚'), + ('⁰', '⁰'), + ('⁴', '⁹'), + ('₀', '₉'), + ('⅐', '⅟'), + ('↉', '↉'), + ('①', '⒛'), + ('⓪', '⓿'), + ('❶', '➓'), + ('⳽', '⳽'), + ('㆒', '㆕'), + ('㈠', '㈩'), + ('㉈', '㉏'), + ('㉑', '㉟'), + ('㊀', '㊉'), + ('㊱', '㊿'), + ('꠰', '꠵'), + ('𐄇', '𐄳'), + ('𐅵', '𐅸'), + ('𐆊', '𐆋'), + ('𐋡', '𐋻'), + ('𐌠', '𐌣'), + ('𐡘', '𐡟'), + ('𐡹', '𐡿'), + ('𐢧', '𐢯'), + ('𐣻', '𐣿'), + ('𐤖', '𐤛'), + ('𐦼', '𐦽'), + ('𐧀', '𐧏'), + ('𐧒', '𐧿'), + ('𐩀', '𐩈'), + ('𐩽', '𐩾'), + ('𐪝', '𐪟'), + ('𐫫', '𐫯'), + ('𐭘', '𐭟'), + ('𐭸', '𐭿'), + ('𐮩', '𐮯'), + ('𐳺', '𐳿'), + ('𐹠', '𐹾'), + ('𐼝', '𐼦'), + ('𐽑', '𐽔'), + ('𐿅', '𐿋'), + ('𑁒', '𑁥'), + ('𑇡', '𑇴'), + ('𑜺', '𑜻'), + ('𑣪', '𑣲'), + ('𑱚', '𑱬'), + ('𑿀', '𑿔'), + ('𖭛', '𖭡'), + ('𖺀', '𖺖'), + ('𝋀', '𝋓'), + ('𝋠', '𝋳'), + ('𝍠', '𝍸'), + ('𞣇', '𞣏'), + ('𞱱', '𞲫'), + ('𞲭', '𞲯'), + ('𞲱', '𞲴'), + ('𞴁', '𞴭'), + ('𞴯', '𞴽'), + ('🄀', '🄌'), +]; + +pub const OTHER_PUNCTUATION: &'static [(char, char)] = &[ + ('!', '#'), + ('%', '\''), + ('*', '*'), + (',', ','), + ('.', '/'), + (':', ';'), + ('?', '@'), + ('\\', '\\'), + ('¡', '¡'), + ('§', '§'), + ('¶', '·'), + ('¿', '¿'), + (';', ';'), + ('·', '·'), + ('՚', '՟'), + ('։', '։'), + ('׀', '׀'), + ('׃', '׃'), + ('׆', '׆'), + ('׳', '״'), + ('؉', '؊'), + ('،', '؍'), + ('؛', '؛'), + ('؝', '؟'), + ('٪', '٭'), + ('۔', '۔'), + ('܀', '܍'), + ('߷', '߹'), + ('࠰', '࠾'), + ('࡞', '࡞'), + ('।', '॥'), + ('॰', '॰'), + ('৽', '৽'), + ('੶', '੶'), + ('૰', '૰'), + ('౷', '౷'), + ('಄', '಄'), + ('෴', '෴'), + ('๏', '๏'), + ('๚', '๛'), + ('༄', '༒'), + ('༔', '༔'), + ('྅', '྅'), + ('࿐', '࿔'), + ('࿙', '࿚'), + ('၊', '၏'), + ('჻', '჻'), + ('፠', '፨'), + ('᙮', '᙮'), + ('᛫', '᛭'), + ('᜵', '᜶'), + ('។', '៖'), + ('៘', '៚'), + ('᠀', '᠅'), + ('᠇', '᠊'), + ('᥄', '᥅'), + ('᨞', '᨟'), + ('᪠', '᪦'), + ('᪨', '᪭'), + ('᭎', '᭏'), + ('᭚', '᭠'), + ('᭽', '᭿'), + ('᯼', '᯿'), + ('᰻', '᰿'), + ('᱾', '᱿'), + ('᳀', '᳇'), + ('᳓', '᳓'), + ('‖', '‗'), + ('†', '‧'), + ('‰', '‸'), + ('※', '‾'), + ('⁁', '⁃'), + ('⁇', '⁑'), + ('⁓', '⁓'), + ('⁕', '⁞'), + ('⳹', '⳼'), + ('⳾', '⳿'), + ('⵰', '⵰'), + ('⸀', '⸁'), + ('⸆', '⸈'), + ('⸋', '⸋'), + ('⸎', '⸖'), + ('⸘', '⸙'), + ('⸛', '⸛'), + ('⸞', '⸟'), + ('⸪', '⸮'), + ('⸰', '⸹'), + ('⸼', '⸿'), + ('⹁', '⹁'), + ('⹃', '⹏'), + ('⹒', '⹔'), + ('、', '〃'), + ('〽', '〽'), + ('・', '・'), + ('꓾', '꓿'), + ('꘍', '꘏'), + ('꙳', '꙳'), + ('꙾', '꙾'), + ('꛲', '꛷'), + ('꡴', '꡷'), + ('꣎', '꣏'), + ('꣸', '꣺'), + ('꣼', '꣼'), + ('꤮', '꤯'), + ('꥟', '꥟'), + ('꧁', '꧍'), + ('꧞', '꧟'), + ('꩜', '꩟'), + ('꫞', '꫟'), + ('꫰', '꫱'), + ('꯫', '꯫'), + ('︐', '︖'), + ('︙', '︙'), + ('︰', '︰'), + ('﹅', '﹆'), + ('﹉', '﹌'), + ('﹐', '﹒'), + ('﹔', '﹗'), + ('﹟', '﹡'), + ('﹨', '﹨'), + ('﹪', '﹫'), + ('!', '#'), + ('%', '''), + ('*', '*'), + (',', ','), + ('.', '/'), + (':', ';'), + ('?', '@'), + ('\', '\'), + ('。', '。'), + ('、', '・'), + ('𐄀', '𐄂'), + ('𐎟', '𐎟'), + ('𐏐', '𐏐'), + ('𐕯', '𐕯'), + ('𐡗', '𐡗'), + ('𐤟', '𐤟'), + ('𐤿', '𐤿'), + ('𐩐', '𐩘'), + ('𐩿', '𐩿'), + ('𐫰', '𐫶'), + ('𐬹', '𐬿'), + ('𐮙', '𐮜'), + ('𐽕', '𐽙'), + ('𐾆', '𐾉'), + ('𑁇', '𑁍'), + ('𑂻', '𑂼'), + ('𑂾', '𑃁'), + ('𑅀', '𑅃'), + ('𑅴', '𑅵'), + ('𑇅', '𑇈'), + ('𑇍', '𑇍'), + ('𑇛', '𑇛'), + ('𑇝', '𑇟'), + ('𑈸', '𑈽'), + ('𑊩', '𑊩'), + ('𑏔', '𑏕'), + ('𑏗', '𑏘'), + ('𑑋', '𑑏'), + ('𑑚', '𑑛'), + ('𑑝', '𑑝'), + ('𑓆', '𑓆'), + ('𑗁', '𑗗'), + ('𑙁', '𑙃'), + ('𑙠', '𑙬'), + ('𑚹', '𑚹'), + ('𑜼', '𑜾'), + ('𑠻', '𑠻'), + ('𑥄', '𑥆'), + ('𑧢', '𑧢'), + ('𑨿', '𑩆'), + ('𑪚', '𑪜'), + ('𑪞', '𑪢'), + ('𑬀', '𑬉'), + ('𑯡', '𑯡'), + ('𑱁', '𑱅'), + ('𑱰', '𑱱'), + ('𑻷', '𑻸'), + ('𑽃', '𑽏'), + ('𑿿', '𑿿'), + ('𒑰', '𒑴'), + ('𒿱', '𒿲'), + ('𖩮', '𖩯'), + ('𖫵', '𖫵'), + ('𖬷', '𖬻'), + ('𖭄', '𖭄'), + ('𖵭', '𖵯'), + ('𖺗', '𖺚'), + ('𖿢', '𖿢'), + ('𛲟', '𛲟'), + ('𝪇', '𝪋'), + ('𞗿', '𞗿'), + ('𞥞', '𞥟'), +]; + +pub const OTHER_SYMBOL: &'static [(char, char)] = &[ + ('¦', '¦'), + ('©', '©'), + ('®', '®'), + ('°', '°'), + ('҂', '҂'), + ('֍', '֎'), + ('؎', '؏'), + ('۞', '۞'), + ('۩', '۩'), + ('۽', '۾'), + ('߶', '߶'), + ('৺', '৺'), + ('୰', '୰'), + ('௳', '௸'), + ('௺', '௺'), + ('౿', '౿'), + ('൏', '൏'), + ('൹', '൹'), + ('༁', '༃'), + ('༓', '༓'), + ('༕', '༗'), + ('༚', '༟'), + ('༴', '༴'), + ('༶', '༶'), + ('༸', '༸'), + ('྾', '࿅'), + ('࿇', '࿌'), + ('࿎', '࿏'), + ('࿕', '࿘'), + ('႞', '႟'), + ('᎐', '᎙'), + ('᙭', '᙭'), + ('᥀', '᥀'), + ('᧞', '᧿'), + ('᭡', '᭪'), + ('᭴', '᭼'), + ('℀', '℁'), + ('℃', '℆'), + ('℈', '℉'), + ('℔', '℔'), + ('№', '℗'), + ('℞', '℣'), + ('℥', '℥'), + ('℧', '℧'), + ('℩', '℩'), + ('℮', '℮'), + ('℺', '℻'), + ('⅊', '⅊'), + ('⅌', '⅍'), + ('⅏', '⅏'), + ('↊', '↋'), + ('↕', '↙'), + ('↜', '↟'), + ('↡', '↢'), + ('↤', '↥'), + ('↧', '↭'), + ('↯', '⇍'), + ('⇐', '⇑'), + ('⇓', '⇓'), + ('⇕', '⇳'), + ('⌀', '⌇'), + ('⌌', '⌟'), + ('⌢', '⌨'), + ('⌫', '⍻'), + ('⍽', '⎚'), + ('⎴', '⏛'), + ('⏢', '␩'), + ('⑀', '⑊'), + ('⒜', 'ⓩ'), + ('─', '▶'), + ('▸', '◀'), + ('◂', '◷'), + ('☀', '♮'), + ('♰', '❧'), + ('➔', '➿'), + ('⠀', '⣿'), + ('⬀', '⬯'), + ('⭅', '⭆'), + ('⭍', '⭳'), + ('⭶', '⮕'), + ('⮗', '⯿'), + ('⳥', '⳪'), + ('⹐', '⹑'), + ('⺀', '⺙'), + ('⺛', '⻳'), + ('⼀', '⿕'), + ('⿰', '⿿'), + ('〄', '〄'), + ('〒', '〓'), + ('〠', '〠'), + ('〶', '〷'), + ('〾', '〿'), + ('㆐', '㆑'), + ('㆖', '㆟'), + ('㇀', '㇥'), + ('㇯', '㇯'), + ('㈀', '㈞'), + ('㈪', '㉇'), + ('㉐', '㉐'), + ('㉠', '㉿'), + ('㊊', '㊰'), + ('㋀', '㏿'), + ('䷀', '䷿'), + ('꒐', '꓆'), + ('꠨', '꠫'), + ('꠶', '꠷'), + ('꠹', '꠹'), + ('꩷', '꩹'), + ('﵀', '﵏'), + ('﷏', '﷏'), + ('﷽', '﷿'), + ('¦', '¦'), + ('│', '│'), + ('■', '○'), + ('', '�'), + ('𐄷', '𐄿'), + ('𐅹', '𐆉'), + ('𐆌', '𐆎'), + ('𐆐', '𐆜'), + ('𐆠', '𐆠'), + ('𐇐', '𐇼'), + ('𐡷', '𐡸'), + ('𐫈', '𐫈'), + ('𑜿', '𑜿'), + ('𑿕', '𑿜'), + ('𑿡', '𑿱'), + ('𖬼', '𖬿'), + ('𖭅', '𖭅'), + ('𛲜', '𛲜'), + ('𜰀', '𜳯'), + ('𜴀', '𜺳'), + ('𜽐', '𜿃'), + ('𝀀', '𝃵'), + ('𝄀', '𝄦'), + ('𝄩', '𝅘𝅥𝅲'), + ('𝅪', '𝅬'), + ('𝆃', '𝆄'), + ('𝆌', '𝆩'), + ('𝆮', '𝇪'), + ('𝈀', '𝉁'), + ('𝉅', '𝉅'), + ('𝌀', '𝍖'), + ('𝠀', '𝧿'), + ('𝨷', '𝨺'), + ('𝩭', '𝩴'), + ('𝩶', '𝪃'), + ('𝪅', '𝪆'), + ('𞅏', '𞅏'), + ('𞲬', '𞲬'), + ('𞴮', '𞴮'), + ('🀀', '🀫'), + ('🀰', '🂓'), + ('🂠', '🂮'), + ('🂱', '🂿'), + ('🃁', '🃏'), + ('🃑', '🃵'), + ('🄍', '🆭'), + ('🇦', '🈂'), + ('🈐', '🈻'), + ('🉀', '🉈'), + ('🉐', '🉑'), + ('🉠', '🉥'), + ('🌀', '🏺'), + ('🐀', '🛗'), + ('🛜', '🛬'), + ('🛰', '🛼'), + ('🜀', '🝶'), + ('🝻', '🟙'), + ('🟠', '🟫'), + ('🟰', '🟰'), + ('🠀', '🠋'), + ('🠐', '🡇'), + ('🡐', '🡙'), + ('🡠', '🢇'), + ('🢐', '🢭'), + ('🢰', '🢻'), + ('🣀', '🣁'), + ('🤀', '🩓'), + ('🩠', '🩭'), + ('🩰', '🩼'), + ('🪀', '🪉'), + ('🪏', '🫆'), + ('🫎', '🫜'), + ('🫟', '🫩'), + ('🫰', '🫸'), + ('🬀', '🮒'), + ('🮔', '🯯'), +]; + +pub const PARAGRAPH_SEPARATOR: &'static [(char, char)] = + &[('\u{2029}', '\u{2029}')]; + +pub const PRIVATE_USE: &'static [(char, char)] = &[ + ('\u{e000}', '\u{f8ff}'), + ('\u{f0000}', '\u{ffffd}'), + ('\u{100000}', '\u{10fffd}'), +]; + +pub const PUNCTUATION: &'static [(char, char)] = &[ + ('!', '#'), + ('%', '*'), + (',', '/'), + (':', ';'), + ('?', '@'), + ('[', ']'), + ('_', '_'), + ('{', '{'), + ('}', '}'), + ('¡', '¡'), + ('§', '§'), + ('«', '«'), + ('¶', '·'), + ('»', '»'), + ('¿', '¿'), + (';', ';'), + ('·', '·'), + ('՚', '՟'), + ('։', '֊'), + ('־', '־'), + ('׀', '׀'), + ('׃', '׃'), + ('׆', '׆'), + ('׳', '״'), + ('؉', '؊'), + ('،', '؍'), + ('؛', '؛'), + ('؝', '؟'), + ('٪', '٭'), + ('۔', '۔'), + ('܀', '܍'), + ('߷', '߹'), + ('࠰', '࠾'), + ('࡞', '࡞'), + ('।', '॥'), + ('॰', '॰'), + ('৽', '৽'), + ('੶', '੶'), + ('૰', '૰'), + ('౷', '౷'), + ('಄', '಄'), + ('෴', '෴'), + ('๏', '๏'), + ('๚', '๛'), + ('༄', '༒'), + ('༔', '༔'), + ('༺', '༽'), + ('྅', '྅'), + ('࿐', '࿔'), + ('࿙', '࿚'), + ('၊', '၏'), + ('჻', '჻'), + ('፠', '፨'), + ('᐀', '᐀'), + ('᙮', '᙮'), + ('᚛', '᚜'), + ('᛫', '᛭'), + ('᜵', '᜶'), + ('។', '៖'), + ('៘', '៚'), + ('᠀', '᠊'), + ('᥄', '᥅'), + ('᨞', '᨟'), + ('᪠', '᪦'), + ('᪨', '᪭'), + ('᭎', '᭏'), + ('᭚', '᭠'), + ('᭽', '᭿'), + ('᯼', '᯿'), + ('᰻', '᰿'), + ('᱾', '᱿'), + ('᳀', '᳇'), + ('᳓', '᳓'), + ('‐', '‧'), + ('‰', '⁃'), + ('⁅', '⁑'), + ('⁓', '⁞'), + ('⁽', '⁾'), + ('₍', '₎'), + ('⌈', '⌋'), + ('〈', '〉'), + ('❨', '❵'), + ('⟅', '⟆'), + ('⟦', '⟯'), + ('⦃', '⦘'), + ('⧘', '⧛'), + ('⧼', '⧽'), + ('⳹', '⳼'), + ('⳾', '⳿'), + ('⵰', '⵰'), + ('⸀', '⸮'), + ('⸰', '⹏'), + ('⹒', '⹝'), + ('、', '〃'), + ('〈', '】'), + ('〔', '〟'), + ('〰', '〰'), + ('〽', '〽'), + ('゠', '゠'), + ('・', '・'), + ('꓾', '꓿'), + ('꘍', '꘏'), + ('꙳', '꙳'), + ('꙾', '꙾'), + ('꛲', '꛷'), + ('꡴', '꡷'), + ('꣎', '꣏'), + ('꣸', '꣺'), + ('꣼', '꣼'), + ('꤮', '꤯'), + ('꥟', '꥟'), + ('꧁', '꧍'), + ('꧞', '꧟'), + ('꩜', '꩟'), + ('꫞', '꫟'), + ('꫰', '꫱'), + ('꯫', '꯫'), + ('﴾', '﴿'), + ('︐', '︙'), + ('︰', '﹒'), + ('﹔', '﹡'), + ('﹣', '﹣'), + ('﹨', '﹨'), + ('﹪', '﹫'), + ('!', '#'), + ('%', '*'), + (',', '/'), + (':', ';'), + ('?', '@'), + ('[', ']'), + ('_', '_'), + ('{', '{'), + ('}', '}'), + ('⦅', '・'), + ('𐄀', '𐄂'), + ('𐎟', '𐎟'), + ('𐏐', '𐏐'), + ('𐕯', '𐕯'), + ('𐡗', '𐡗'), + ('𐤟', '𐤟'), + ('𐤿', '𐤿'), + ('𐩐', '𐩘'), + ('𐩿', '𐩿'), + ('𐫰', '𐫶'), + ('𐬹', '𐬿'), + ('𐮙', '𐮜'), + ('𐵮', '𐵮'), + ('𐺭', '𐺭'), + ('𐽕', '𐽙'), + ('𐾆', '𐾉'), + ('𑁇', '𑁍'), + ('𑂻', '𑂼'), + ('𑂾', '𑃁'), + ('𑅀', '𑅃'), + ('𑅴', '𑅵'), + ('𑇅', '𑇈'), + ('𑇍', '𑇍'), + ('𑇛', '𑇛'), + ('𑇝', '𑇟'), + ('𑈸', '𑈽'), + ('𑊩', '𑊩'), + ('𑏔', '𑏕'), + ('𑏗', '𑏘'), + ('𑑋', '𑑏'), + ('𑑚', '𑑛'), + ('𑑝', '𑑝'), + ('𑓆', '𑓆'), + ('𑗁', '𑗗'), + ('𑙁', '𑙃'), + ('𑙠', '𑙬'), + ('𑚹', '𑚹'), + ('𑜼', '𑜾'), + ('𑠻', '𑠻'), + ('𑥄', '𑥆'), + ('𑧢', '𑧢'), + ('𑨿', '𑩆'), + ('𑪚', '𑪜'), + ('𑪞', '𑪢'), + ('𑬀', '𑬉'), + ('𑯡', '𑯡'), + ('𑱁', '𑱅'), + ('𑱰', '𑱱'), + ('𑻷', '𑻸'), + ('𑽃', '𑽏'), + ('𑿿', '𑿿'), + ('𒑰', '𒑴'), + ('𒿱', '𒿲'), + ('𖩮', '𖩯'), + ('𖫵', '𖫵'), + ('𖬷', '𖬻'), + ('𖭄', '𖭄'), + ('𖵭', '𖵯'), + ('𖺗', '𖺚'), + ('𖿢', '𖿢'), + ('𛲟', '𛲟'), + ('𝪇', '𝪋'), + ('𞗿', '𞗿'), + ('𞥞', '𞥟'), +]; + +pub const SEPARATOR: &'static [(char, char)] = &[ + (' ', ' '), + ('\u{a0}', '\u{a0}'), + ('\u{1680}', '\u{1680}'), + ('\u{2000}', '\u{200a}'), + ('\u{2028}', '\u{2029}'), + ('\u{202f}', '\u{202f}'), + ('\u{205f}', '\u{205f}'), + ('\u{3000}', '\u{3000}'), +]; + +pub const SPACE_SEPARATOR: &'static [(char, char)] = &[ + (' ', ' '), + ('\u{a0}', '\u{a0}'), + ('\u{1680}', '\u{1680}'), + ('\u{2000}', '\u{200a}'), + ('\u{202f}', '\u{202f}'), + ('\u{205f}', '\u{205f}'), + ('\u{3000}', '\u{3000}'), +]; + +pub const SPACING_MARK: &'static [(char, char)] = &[ + ('ः', 'ः'), + ('ऻ', 'ऻ'), + ('ा', 'ी'), + ('ॉ', 'ौ'), + ('ॎ', 'ॏ'), + ('ং', 'ঃ'), + ('\u{9be}', 'ী'), + ('ে', 'ৈ'), + ('ো', 'ৌ'), + ('\u{9d7}', '\u{9d7}'), + ('ਃ', 'ਃ'), + ('ਾ', 'ੀ'), + ('ઃ', 'ઃ'), + ('ા', 'ી'), + ('ૉ', 'ૉ'), + ('ો', 'ૌ'), + ('ଂ', 'ଃ'), + ('\u{b3e}', '\u{b3e}'), + ('ୀ', 'ୀ'), + ('େ', 'ୈ'), + ('ୋ', 'ୌ'), + ('\u{b57}', '\u{b57}'), + ('\u{bbe}', 'ி'), + ('ு', 'ூ'), + ('ெ', 'ை'), + ('ொ', 'ௌ'), + ('\u{bd7}', '\u{bd7}'), + ('ఁ', 'ః'), + ('ు', 'ౄ'), + ('ಂ', 'ಃ'), + ('ಾ', 'ಾ'), + ('\u{cc0}', 'ೄ'), + ('\u{cc7}', '\u{cc8}'), + ('\u{cca}', '\u{ccb}'), + ('\u{cd5}', '\u{cd6}'), + ('ೳ', 'ೳ'), + ('ം', 'ഃ'), + ('\u{d3e}', 'ീ'), + ('െ', 'ൈ'), + ('ൊ', 'ൌ'), + ('\u{d57}', '\u{d57}'), + ('ං', 'ඃ'), + ('\u{dcf}', 'ෑ'), + ('ෘ', '\u{ddf}'), + ('ෲ', 'ෳ'), + ('༾', '༿'), + ('ཿ', 'ཿ'), + ('ါ', 'ာ'), + ('ေ', 'ေ'), + ('း', 'း'), + ('ျ', 'ြ'), + ('ၖ', 'ၗ'), + ('ၢ', 'ၤ'), + ('ၧ', 'ၭ'), + ('ႃ', 'ႄ'), + ('ႇ', 'ႌ'), + ('ႏ', 'ႏ'), + ('ႚ', 'ႜ'), + ('\u{1715}', '\u{1715}'), + ('\u{1734}', '\u{1734}'), + ('ា', 'ា'), + ('ើ', 'ៅ'), + ('ះ', 'ៈ'), + ('ᤣ', 'ᤦ'), + ('ᤩ', 'ᤫ'), + ('ᤰ', 'ᤱ'), + ('ᤳ', 'ᤸ'), + ('ᨙ', 'ᨚ'), + ('ᩕ', 'ᩕ'), + ('ᩗ', 'ᩗ'), + ('ᩡ', 'ᩡ'), + ('ᩣ', 'ᩤ'), + ('ᩭ', 'ᩲ'), + ('ᬄ', 'ᬄ'), + ('\u{1b35}', '\u{1b35}'), + ('\u{1b3b}', '\u{1b3b}'), + ('\u{1b3d}', 'ᭁ'), + ('\u{1b43}', '\u{1b44}'), + ('ᮂ', 'ᮂ'), + ('ᮡ', 'ᮡ'), + ('ᮦ', 'ᮧ'), + ('\u{1baa}', '\u{1baa}'), + ('ᯧ', 'ᯧ'), + ('ᯪ', 'ᯬ'), + ('ᯮ', 'ᯮ'), + ('\u{1bf2}', '\u{1bf3}'), + ('ᰤ', 'ᰫ'), + ('ᰴ', 'ᰵ'), + ('᳡', '᳡'), + ('᳷', '᳷'), + ('\u{302e}', '\u{302f}'), + ('ꠣ', 'ꠤ'), + ('ꠧ', 'ꠧ'), + ('ꢀ', 'ꢁ'), + ('ꢴ', 'ꣃ'), + ('ꥒ', '\u{a953}'), + ('ꦃ', 'ꦃ'), + ('ꦴ', 'ꦵ'), + ('ꦺ', 'ꦻ'), + ('ꦾ', '\u{a9c0}'), + ('ꨯ', 'ꨰ'), + ('ꨳ', 'ꨴ'), + ('ꩍ', 'ꩍ'), + ('ꩻ', 'ꩻ'), + ('ꩽ', 'ꩽ'), + ('ꫫ', 'ꫫ'), + ('ꫮ', 'ꫯ'), + ('ꫵ', 'ꫵ'), + ('ꯣ', 'ꯤ'), + ('ꯦ', 'ꯧ'), + ('ꯩ', 'ꯪ'), + ('꯬', '꯬'), + ('𑀀', '𑀀'), + ('𑀂', '𑀂'), + ('𑂂', '𑂂'), + ('𑂰', '𑂲'), + ('𑂷', '𑂸'), + ('𑄬', '𑄬'), + ('𑅅', '𑅆'), + ('𑆂', '𑆂'), + ('𑆳', '𑆵'), + ('𑆿', '\u{111c0}'), + ('𑇎', '𑇎'), + ('𑈬', '𑈮'), + ('𑈲', '𑈳'), + ('\u{11235}', '\u{11235}'), + ('𑋠', '𑋢'), + ('𑌂', '𑌃'), + ('\u{1133e}', '𑌿'), + ('𑍁', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('𑍢', '𑍣'), + ('\u{113b8}', '𑎺'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏍'), + ('\u{113cf}', '\u{113cf}'), + ('𑐵', '𑐷'), + ('𑑀', '𑑁'), + ('𑑅', '𑑅'), + ('\u{114b0}', '𑒲'), + ('𑒹', '𑒹'), + ('𑒻', '𑒾'), + ('𑓁', '𑓁'), + ('\u{115af}', '𑖱'), + ('𑖸', '𑖻'), + ('𑖾', '𑖾'), + ('𑘰', '𑘲'), + ('𑘻', '𑘼'), + ('𑘾', '𑘾'), + ('𑚬', '𑚬'), + ('𑚮', '𑚯'), + ('\u{116b6}', '\u{116b6}'), + ('𑜞', '𑜞'), + ('𑜠', '𑜡'), + ('𑜦', '𑜦'), + ('𑠬', '𑠮'), + ('𑠸', '𑠸'), + ('\u{11930}', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193d}', '\u{1193d}'), + ('𑥀', '𑥀'), + ('𑥂', '𑥂'), + ('𑧑', '𑧓'), + ('𑧜', '𑧟'), + ('𑧤', '𑧤'), + ('𑨹', '𑨹'), + ('𑩗', '𑩘'), + ('𑪗', '𑪗'), + ('𑰯', '𑰯'), + ('𑰾', '𑰾'), + ('𑲩', '𑲩'), + ('𑲱', '𑲱'), + ('𑲴', '𑲴'), + ('𑶊', '𑶎'), + ('𑶓', '𑶔'), + ('𑶖', '𑶖'), + ('𑻵', '𑻶'), + ('𑼃', '𑼃'), + ('𑼴', '𑼵'), + ('𑼾', '𑼿'), + ('\u{11f41}', '\u{11f41}'), + ('𖄪', '𖄬'), + ('𖽑', '𖾇'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1d165}', '\u{1d166}'), + ('\u{1d16d}', '\u{1d172}'), +]; + +pub const SYMBOL: &'static [(char, char)] = &[ + ('$', '$'), + ('+', '+'), + ('<', '>'), + ('^', '^'), + ('`', '`'), + ('|', '|'), + ('~', '~'), + ('¢', '¦'), + ('¨', '©'), + ('¬', '¬'), + ('®', '±'), + ('´', '´'), + ('¸', '¸'), + ('×', '×'), + ('÷', '÷'), + ('˂', '˅'), + ('˒', '˟'), + ('˥', '˫'), + ('˭', '˭'), + ('˯', '˿'), + ('͵', '͵'), + ('΄', '΅'), + ('϶', '϶'), + ('҂', '҂'), + ('֍', '֏'), + ('؆', '؈'), + ('؋', '؋'), + ('؎', '؏'), + ('۞', '۞'), + ('۩', '۩'), + ('۽', '۾'), + ('߶', '߶'), + ('߾', '߿'), + ('࢈', '࢈'), + ('৲', '৳'), + ('৺', '৻'), + ('૱', '૱'), + ('୰', '୰'), + ('௳', '௺'), + ('౿', '౿'), + ('൏', '൏'), + ('൹', '൹'), + ('฿', '฿'), + ('༁', '༃'), + ('༓', '༓'), + ('༕', '༗'), + ('༚', '༟'), + ('༴', '༴'), + ('༶', '༶'), + ('༸', '༸'), + ('྾', '࿅'), + ('࿇', '࿌'), + ('࿎', '࿏'), + ('࿕', '࿘'), + ('႞', '႟'), + ('᎐', '᎙'), + ('᙭', '᙭'), + ('៛', '៛'), + ('᥀', '᥀'), + ('᧞', '᧿'), + ('᭡', '᭪'), + ('᭴', '᭼'), + ('᾽', '᾽'), + ('᾿', '῁'), + ('῍', '῏'), + ('῝', '῟'), + ('῭', '`'), + ('´', '῾'), + ('⁄', '⁄'), + ('⁒', '⁒'), + ('⁺', '⁼'), + ('₊', '₌'), + ('₠', '⃀'), + ('℀', '℁'), + ('℃', '℆'), + ('℈', '℉'), + ('℔', '℔'), + ('№', '℘'), + ('℞', '℣'), + ('℥', '℥'), + ('℧', '℧'), + ('℩', '℩'), + ('℮', '℮'), + ('℺', '℻'), + ('⅀', '⅄'), + ('⅊', '⅍'), + ('⅏', '⅏'), + ('↊', '↋'), + ('←', '⌇'), + ('⌌', '⌨'), + ('⌫', '␩'), + ('⑀', '⑊'), + ('⒜', 'ⓩ'), + ('─', '❧'), + ('➔', '⟄'), + ('⟇', '⟥'), + ('⟰', '⦂'), + ('⦙', '⧗'), + ('⧜', '⧻'), + ('⧾', '⭳'), + ('⭶', '⮕'), + ('⮗', '⯿'), + ('⳥', '⳪'), + ('⹐', '⹑'), + ('⺀', '⺙'), + ('⺛', '⻳'), + ('⼀', '⿕'), + ('⿰', '⿿'), + ('〄', '〄'), + ('〒', '〓'), + ('〠', '〠'), + ('〶', '〷'), + ('〾', '〿'), + ('゛', '゜'), + ('㆐', '㆑'), + ('㆖', '㆟'), + ('㇀', '㇥'), + ('㇯', '㇯'), + ('㈀', '㈞'), + ('㈪', '㉇'), + ('㉐', '㉐'), + ('㉠', '㉿'), + ('㊊', '㊰'), + ('㋀', '㏿'), + ('䷀', '䷿'), + ('꒐', '꓆'), + ('꜀', '꜖'), + ('꜠', '꜡'), + ('꞉', '꞊'), + ('꠨', '꠫'), + ('꠶', '꠹'), + ('꩷', '꩹'), + ('꭛', '꭛'), + ('꭪', '꭫'), + ('﬩', '﬩'), + ('﮲', '﯂'), + ('﵀', '﵏'), + ('﷏', '﷏'), + ('﷼', '﷿'), + ('﹢', '﹢'), + ('﹤', '﹦'), + ('﹩', '﹩'), + ('$', '$'), + ('+', '+'), + ('<', '>'), + ('^', '^'), + ('`', '`'), + ('|', '|'), + ('~', '~'), + ('¢', '₩'), + ('│', '○'), + ('', '�'), + ('𐄷', '𐄿'), + ('𐅹', '𐆉'), + ('𐆌', '𐆎'), + ('𐆐', '𐆜'), + ('𐆠', '𐆠'), + ('𐇐', '𐇼'), + ('𐡷', '𐡸'), + ('𐫈', '𐫈'), + ('𐶎', '𐶏'), + ('𑜿', '𑜿'), + ('𑿕', '𑿱'), + ('𖬼', '𖬿'), + ('𖭅', '𖭅'), + ('𛲜', '𛲜'), + ('𜰀', '𜳯'), + ('𜴀', '𜺳'), + ('𜽐', '𜿃'), + ('𝀀', '𝃵'), + ('𝄀', '𝄦'), + ('𝄩', '𝅘𝅥𝅲'), + ('𝅪', '𝅬'), + ('𝆃', '𝆄'), + ('𝆌', '𝆩'), + ('𝆮', '𝇪'), + ('𝈀', '𝉁'), + ('𝉅', '𝉅'), + ('𝌀', '𝍖'), + ('𝛁', '𝛁'), + ('𝛛', '𝛛'), + ('𝛻', '𝛻'), + ('𝜕', '𝜕'), + ('𝜵', '𝜵'), + ('𝝏', '𝝏'), + ('𝝯', '𝝯'), + ('𝞉', '𝞉'), + ('𝞩', '𝞩'), + ('𝟃', '𝟃'), + ('𝠀', '𝧿'), + ('𝨷', '𝨺'), + ('𝩭', '𝩴'), + ('𝩶', '𝪃'), + ('𝪅', '𝪆'), + ('𞅏', '𞅏'), + ('𞋿', '𞋿'), + ('𞲬', '𞲬'), + ('𞲰', '𞲰'), + ('𞴮', '𞴮'), + ('𞻰', '𞻱'), + ('🀀', '🀫'), + ('🀰', '🂓'), + ('🂠', '🂮'), + ('🂱', '🂿'), + ('🃁', '🃏'), + ('🃑', '🃵'), + ('🄍', '🆭'), + ('🇦', '🈂'), + ('🈐', '🈻'), + ('🉀', '🉈'), + ('🉐', '🉑'), + ('🉠', '🉥'), + ('🌀', '🛗'), + ('🛜', '🛬'), + ('🛰', '🛼'), + ('🜀', '🝶'), + ('🝻', '🟙'), + ('🟠', '🟫'), + ('🟰', '🟰'), + ('🠀', '🠋'), + ('🠐', '🡇'), + ('🡐', '🡙'), + ('🡠', '🢇'), + ('🢐', '🢭'), + ('🢰', '🢻'), + ('🣀', '🣁'), + ('🤀', '🩓'), + ('🩠', '🩭'), + ('🩰', '🩼'), + ('🪀', '🪉'), + ('🪏', '🫆'), + ('🫎', '🫜'), + ('🫟', '🫩'), + ('🫰', '🫸'), + ('🬀', '🮒'), + ('🮔', '🯯'), +]; + +pub const TITLECASE_LETTER: &'static [(char, char)] = &[ + ('Dž', 'Dž'), + ('Lj', 'Lj'), + ('Nj', 'Nj'), + ('Dz', 'Dz'), + ('ᾈ', 'ᾏ'), + ('ᾘ', 'ᾟ'), + ('ᾨ', 'ᾯ'), + ('ᾼ', 'ᾼ'), + ('ῌ', 'ῌ'), + ('ῼ', 'ῼ'), +]; + +pub const UNASSIGNED: &'static [(char, char)] = &[ + ('\u{378}', '\u{379}'), + ('\u{380}', '\u{383}'), + ('\u{38b}', '\u{38b}'), + ('\u{38d}', '\u{38d}'), + ('\u{3a2}', '\u{3a2}'), + ('\u{530}', '\u{530}'), + ('\u{557}', '\u{558}'), + ('\u{58b}', '\u{58c}'), + ('\u{590}', '\u{590}'), + ('\u{5c8}', '\u{5cf}'), + ('\u{5eb}', '\u{5ee}'), + ('\u{5f5}', '\u{5ff}'), + ('\u{70e}', '\u{70e}'), + ('\u{74b}', '\u{74c}'), + ('\u{7b2}', '\u{7bf}'), + ('\u{7fb}', '\u{7fc}'), + ('\u{82e}', '\u{82f}'), + ('\u{83f}', '\u{83f}'), + ('\u{85c}', '\u{85d}'), + ('\u{85f}', '\u{85f}'), + ('\u{86b}', '\u{86f}'), + ('\u{88f}', '\u{88f}'), + ('\u{892}', '\u{896}'), + ('\u{984}', '\u{984}'), + ('\u{98d}', '\u{98e}'), + ('\u{991}', '\u{992}'), + ('\u{9a9}', '\u{9a9}'), + ('\u{9b1}', '\u{9b1}'), + ('\u{9b3}', '\u{9b5}'), + ('\u{9ba}', '\u{9bb}'), + ('\u{9c5}', '\u{9c6}'), + ('\u{9c9}', '\u{9ca}'), + ('\u{9cf}', '\u{9d6}'), + ('\u{9d8}', '\u{9db}'), + ('\u{9de}', '\u{9de}'), + ('\u{9e4}', '\u{9e5}'), + ('\u{9ff}', '\u{a00}'), + ('\u{a04}', '\u{a04}'), + ('\u{a0b}', '\u{a0e}'), + ('\u{a11}', '\u{a12}'), + ('\u{a29}', '\u{a29}'), + ('\u{a31}', '\u{a31}'), + ('\u{a34}', '\u{a34}'), + ('\u{a37}', '\u{a37}'), + ('\u{a3a}', '\u{a3b}'), + ('\u{a3d}', '\u{a3d}'), + ('\u{a43}', '\u{a46}'), + ('\u{a49}', '\u{a4a}'), + ('\u{a4e}', '\u{a50}'), + ('\u{a52}', '\u{a58}'), + ('\u{a5d}', '\u{a5d}'), + ('\u{a5f}', '\u{a65}'), + ('\u{a77}', '\u{a80}'), + ('\u{a84}', '\u{a84}'), + ('\u{a8e}', '\u{a8e}'), + ('\u{a92}', '\u{a92}'), + ('\u{aa9}', '\u{aa9}'), + ('\u{ab1}', '\u{ab1}'), + ('\u{ab4}', '\u{ab4}'), + ('\u{aba}', '\u{abb}'), + ('\u{ac6}', '\u{ac6}'), + ('\u{aca}', '\u{aca}'), + ('\u{ace}', '\u{acf}'), + ('\u{ad1}', '\u{adf}'), + ('\u{ae4}', '\u{ae5}'), + ('\u{af2}', '\u{af8}'), + ('\u{b00}', '\u{b00}'), + ('\u{b04}', '\u{b04}'), + ('\u{b0d}', '\u{b0e}'), + ('\u{b11}', '\u{b12}'), + ('\u{b29}', '\u{b29}'), + ('\u{b31}', '\u{b31}'), + ('\u{b34}', '\u{b34}'), + ('\u{b3a}', '\u{b3b}'), + ('\u{b45}', '\u{b46}'), + ('\u{b49}', '\u{b4a}'), + ('\u{b4e}', '\u{b54}'), + ('\u{b58}', '\u{b5b}'), + ('\u{b5e}', '\u{b5e}'), + ('\u{b64}', '\u{b65}'), + ('\u{b78}', '\u{b81}'), + ('\u{b84}', '\u{b84}'), + ('\u{b8b}', '\u{b8d}'), + ('\u{b91}', '\u{b91}'), + ('\u{b96}', '\u{b98}'), + ('\u{b9b}', '\u{b9b}'), + ('\u{b9d}', '\u{b9d}'), + ('\u{ba0}', '\u{ba2}'), + ('\u{ba5}', '\u{ba7}'), + ('\u{bab}', '\u{bad}'), + ('\u{bba}', '\u{bbd}'), + ('\u{bc3}', '\u{bc5}'), + ('\u{bc9}', '\u{bc9}'), + ('\u{bce}', '\u{bcf}'), + ('\u{bd1}', '\u{bd6}'), + ('\u{bd8}', '\u{be5}'), + ('\u{bfb}', '\u{bff}'), + ('\u{c0d}', '\u{c0d}'), + ('\u{c11}', '\u{c11}'), + ('\u{c29}', '\u{c29}'), + ('\u{c3a}', '\u{c3b}'), + ('\u{c45}', '\u{c45}'), + ('\u{c49}', '\u{c49}'), + ('\u{c4e}', '\u{c54}'), + ('\u{c57}', '\u{c57}'), + ('\u{c5b}', '\u{c5c}'), + ('\u{c5e}', '\u{c5f}'), + ('\u{c64}', '\u{c65}'), + ('\u{c70}', '\u{c76}'), + ('\u{c8d}', '\u{c8d}'), + ('\u{c91}', '\u{c91}'), + ('\u{ca9}', '\u{ca9}'), + ('\u{cb4}', '\u{cb4}'), + ('\u{cba}', '\u{cbb}'), + ('\u{cc5}', '\u{cc5}'), + ('\u{cc9}', '\u{cc9}'), + ('\u{cce}', '\u{cd4}'), + ('\u{cd7}', '\u{cdc}'), + ('\u{cdf}', '\u{cdf}'), + ('\u{ce4}', '\u{ce5}'), + ('\u{cf0}', '\u{cf0}'), + ('\u{cf4}', '\u{cff}'), + ('\u{d0d}', '\u{d0d}'), + ('\u{d11}', '\u{d11}'), + ('\u{d45}', '\u{d45}'), + ('\u{d49}', '\u{d49}'), + ('\u{d50}', '\u{d53}'), + ('\u{d64}', '\u{d65}'), + ('\u{d80}', '\u{d80}'), + ('\u{d84}', '\u{d84}'), + ('\u{d97}', '\u{d99}'), + ('\u{db2}', '\u{db2}'), + ('\u{dbc}', '\u{dbc}'), + ('\u{dbe}', '\u{dbf}'), + ('\u{dc7}', '\u{dc9}'), + ('\u{dcb}', '\u{dce}'), + ('\u{dd5}', '\u{dd5}'), + ('\u{dd7}', '\u{dd7}'), + ('\u{de0}', '\u{de5}'), + ('\u{df0}', '\u{df1}'), + ('\u{df5}', '\u{e00}'), + ('\u{e3b}', '\u{e3e}'), + ('\u{e5c}', '\u{e80}'), + ('\u{e83}', '\u{e83}'), + ('\u{e85}', '\u{e85}'), + ('\u{e8b}', '\u{e8b}'), + ('\u{ea4}', '\u{ea4}'), + ('\u{ea6}', '\u{ea6}'), + ('\u{ebe}', '\u{ebf}'), + ('\u{ec5}', '\u{ec5}'), + ('\u{ec7}', '\u{ec7}'), + ('\u{ecf}', '\u{ecf}'), + ('\u{eda}', '\u{edb}'), + ('\u{ee0}', '\u{eff}'), + ('\u{f48}', '\u{f48}'), + ('\u{f6d}', '\u{f70}'), + ('\u{f98}', '\u{f98}'), + ('\u{fbd}', '\u{fbd}'), + ('\u{fcd}', '\u{fcd}'), + ('\u{fdb}', '\u{fff}'), + ('\u{10c6}', '\u{10c6}'), + ('\u{10c8}', '\u{10cc}'), + ('\u{10ce}', '\u{10cf}'), + ('\u{1249}', '\u{1249}'), + ('\u{124e}', '\u{124f}'), + ('\u{1257}', '\u{1257}'), + ('\u{1259}', '\u{1259}'), + ('\u{125e}', '\u{125f}'), + ('\u{1289}', '\u{1289}'), + ('\u{128e}', '\u{128f}'), + ('\u{12b1}', '\u{12b1}'), + ('\u{12b6}', '\u{12b7}'), + ('\u{12bf}', '\u{12bf}'), + ('\u{12c1}', '\u{12c1}'), + ('\u{12c6}', '\u{12c7}'), + ('\u{12d7}', '\u{12d7}'), + ('\u{1311}', '\u{1311}'), + ('\u{1316}', '\u{1317}'), + ('\u{135b}', '\u{135c}'), + ('\u{137d}', '\u{137f}'), + ('\u{139a}', '\u{139f}'), + ('\u{13f6}', '\u{13f7}'), + ('\u{13fe}', '\u{13ff}'), + ('\u{169d}', '\u{169f}'), + ('\u{16f9}', '\u{16ff}'), + ('\u{1716}', '\u{171e}'), + ('\u{1737}', '\u{173f}'), + ('\u{1754}', '\u{175f}'), + ('\u{176d}', '\u{176d}'), + ('\u{1771}', '\u{1771}'), + ('\u{1774}', '\u{177f}'), + ('\u{17de}', '\u{17df}'), + ('\u{17ea}', '\u{17ef}'), + ('\u{17fa}', '\u{17ff}'), + ('\u{181a}', '\u{181f}'), + ('\u{1879}', '\u{187f}'), + ('\u{18ab}', '\u{18af}'), + ('\u{18f6}', '\u{18ff}'), + ('\u{191f}', '\u{191f}'), + ('\u{192c}', '\u{192f}'), + ('\u{193c}', '\u{193f}'), + ('\u{1941}', '\u{1943}'), + ('\u{196e}', '\u{196f}'), + ('\u{1975}', '\u{197f}'), + ('\u{19ac}', '\u{19af}'), + ('\u{19ca}', '\u{19cf}'), + ('\u{19db}', '\u{19dd}'), + ('\u{1a1c}', '\u{1a1d}'), + ('\u{1a5f}', '\u{1a5f}'), + ('\u{1a7d}', '\u{1a7e}'), + ('\u{1a8a}', '\u{1a8f}'), + ('\u{1a9a}', '\u{1a9f}'), + ('\u{1aae}', '\u{1aaf}'), + ('\u{1acf}', '\u{1aff}'), + ('\u{1b4d}', '\u{1b4d}'), + ('\u{1bf4}', '\u{1bfb}'), + ('\u{1c38}', '\u{1c3a}'), + ('\u{1c4a}', '\u{1c4c}'), + ('\u{1c8b}', '\u{1c8f}'), + ('\u{1cbb}', '\u{1cbc}'), + ('\u{1cc8}', '\u{1ccf}'), + ('\u{1cfb}', '\u{1cff}'), + ('\u{1f16}', '\u{1f17}'), + ('\u{1f1e}', '\u{1f1f}'), + ('\u{1f46}', '\u{1f47}'), + ('\u{1f4e}', '\u{1f4f}'), + ('\u{1f58}', '\u{1f58}'), + ('\u{1f5a}', '\u{1f5a}'), + ('\u{1f5c}', '\u{1f5c}'), + ('\u{1f5e}', '\u{1f5e}'), + ('\u{1f7e}', '\u{1f7f}'), + ('\u{1fb5}', '\u{1fb5}'), + ('\u{1fc5}', '\u{1fc5}'), + ('\u{1fd4}', '\u{1fd5}'), + ('\u{1fdc}', '\u{1fdc}'), + ('\u{1ff0}', '\u{1ff1}'), + ('\u{1ff5}', '\u{1ff5}'), + ('\u{1fff}', '\u{1fff}'), + ('\u{2065}', '\u{2065}'), + ('\u{2072}', '\u{2073}'), + ('\u{208f}', '\u{208f}'), + ('\u{209d}', '\u{209f}'), + ('\u{20c1}', '\u{20cf}'), + ('\u{20f1}', '\u{20ff}'), + ('\u{218c}', '\u{218f}'), + ('\u{242a}', '\u{243f}'), + ('\u{244b}', '\u{245f}'), + ('\u{2b74}', '\u{2b75}'), + ('\u{2b96}', '\u{2b96}'), + ('\u{2cf4}', '\u{2cf8}'), + ('\u{2d26}', '\u{2d26}'), + ('\u{2d28}', '\u{2d2c}'), + ('\u{2d2e}', '\u{2d2f}'), + ('\u{2d68}', '\u{2d6e}'), + ('\u{2d71}', '\u{2d7e}'), + ('\u{2d97}', '\u{2d9f}'), + ('\u{2da7}', '\u{2da7}'), + ('\u{2daf}', '\u{2daf}'), + ('\u{2db7}', '\u{2db7}'), + ('\u{2dbf}', '\u{2dbf}'), + ('\u{2dc7}', '\u{2dc7}'), + ('\u{2dcf}', '\u{2dcf}'), + ('\u{2dd7}', '\u{2dd7}'), + ('\u{2ddf}', '\u{2ddf}'), + ('\u{2e5e}', '\u{2e7f}'), + ('\u{2e9a}', '\u{2e9a}'), + ('\u{2ef4}', '\u{2eff}'), + ('\u{2fd6}', '\u{2fef}'), + ('\u{3040}', '\u{3040}'), + ('\u{3097}', '\u{3098}'), + ('\u{3100}', '\u{3104}'), + ('\u{3130}', '\u{3130}'), + ('\u{318f}', '\u{318f}'), + ('\u{31e6}', '\u{31ee}'), + ('\u{321f}', '\u{321f}'), + ('\u{a48d}', '\u{a48f}'), + ('\u{a4c7}', '\u{a4cf}'), + ('\u{a62c}', '\u{a63f}'), + ('\u{a6f8}', '\u{a6ff}'), + ('\u{a7ce}', '\u{a7cf}'), + ('\u{a7d2}', '\u{a7d2}'), + ('\u{a7d4}', '\u{a7d4}'), + ('\u{a7dd}', '\u{a7f1}'), + ('\u{a82d}', '\u{a82f}'), + ('\u{a83a}', '\u{a83f}'), + ('\u{a878}', '\u{a87f}'), + ('\u{a8c6}', '\u{a8cd}'), + ('\u{a8da}', '\u{a8df}'), + ('\u{a954}', '\u{a95e}'), + ('\u{a97d}', '\u{a97f}'), + ('\u{a9ce}', '\u{a9ce}'), + ('\u{a9da}', '\u{a9dd}'), + ('\u{a9ff}', '\u{a9ff}'), + ('\u{aa37}', '\u{aa3f}'), + ('\u{aa4e}', '\u{aa4f}'), + ('\u{aa5a}', '\u{aa5b}'), + ('\u{aac3}', '\u{aada}'), + ('\u{aaf7}', '\u{ab00}'), + ('\u{ab07}', '\u{ab08}'), + ('\u{ab0f}', '\u{ab10}'), + ('\u{ab17}', '\u{ab1f}'), + ('\u{ab27}', '\u{ab27}'), + ('\u{ab2f}', '\u{ab2f}'), + ('\u{ab6c}', '\u{ab6f}'), + ('\u{abee}', '\u{abef}'), + ('\u{abfa}', '\u{abff}'), + ('\u{d7a4}', '\u{d7af}'), + ('\u{d7c7}', '\u{d7ca}'), + ('\u{d7fc}', '\u{d7ff}'), + ('\u{fa6e}', '\u{fa6f}'), + ('\u{fada}', '\u{faff}'), + ('\u{fb07}', '\u{fb12}'), + ('\u{fb18}', '\u{fb1c}'), + ('\u{fb37}', '\u{fb37}'), + ('\u{fb3d}', '\u{fb3d}'), + ('\u{fb3f}', '\u{fb3f}'), + ('\u{fb42}', '\u{fb42}'), + ('\u{fb45}', '\u{fb45}'), + ('\u{fbc3}', '\u{fbd2}'), + ('\u{fd90}', '\u{fd91}'), + ('\u{fdc8}', '\u{fdce}'), + ('\u{fdd0}', '\u{fdef}'), + ('\u{fe1a}', '\u{fe1f}'), + ('\u{fe53}', '\u{fe53}'), + ('\u{fe67}', '\u{fe67}'), + ('\u{fe6c}', '\u{fe6f}'), + ('\u{fe75}', '\u{fe75}'), + ('\u{fefd}', '\u{fefe}'), + ('\u{ff00}', '\u{ff00}'), + ('\u{ffbf}', '\u{ffc1}'), + ('\u{ffc8}', '\u{ffc9}'), + ('\u{ffd0}', '\u{ffd1}'), + ('\u{ffd8}', '\u{ffd9}'), + ('\u{ffdd}', '\u{ffdf}'), + ('\u{ffe7}', '\u{ffe7}'), + ('\u{ffef}', '\u{fff8}'), + ('\u{fffe}', '\u{ffff}'), + ('\u{1000c}', '\u{1000c}'), + ('\u{10027}', '\u{10027}'), + ('\u{1003b}', '\u{1003b}'), + ('\u{1003e}', '\u{1003e}'), + ('\u{1004e}', '\u{1004f}'), + ('\u{1005e}', '\u{1007f}'), + ('\u{100fb}', '\u{100ff}'), + ('\u{10103}', '\u{10106}'), + ('\u{10134}', '\u{10136}'), + ('\u{1018f}', '\u{1018f}'), + ('\u{1019d}', '\u{1019f}'), + ('\u{101a1}', '\u{101cf}'), + ('\u{101fe}', '\u{1027f}'), + ('\u{1029d}', '\u{1029f}'), + ('\u{102d1}', '\u{102df}'), + ('\u{102fc}', '\u{102ff}'), + ('\u{10324}', '\u{1032c}'), + ('\u{1034b}', '\u{1034f}'), + ('\u{1037b}', '\u{1037f}'), + ('\u{1039e}', '\u{1039e}'), + ('\u{103c4}', '\u{103c7}'), + ('\u{103d6}', '\u{103ff}'), + ('\u{1049e}', '\u{1049f}'), + ('\u{104aa}', '\u{104af}'), + ('\u{104d4}', '\u{104d7}'), + ('\u{104fc}', '\u{104ff}'), + ('\u{10528}', '\u{1052f}'), + ('\u{10564}', '\u{1056e}'), + ('\u{1057b}', '\u{1057b}'), + ('\u{1058b}', '\u{1058b}'), + ('\u{10593}', '\u{10593}'), + ('\u{10596}', '\u{10596}'), + ('\u{105a2}', '\u{105a2}'), + ('\u{105b2}', '\u{105b2}'), + ('\u{105ba}', '\u{105ba}'), + ('\u{105bd}', '\u{105bf}'), + ('\u{105f4}', '\u{105ff}'), + ('\u{10737}', '\u{1073f}'), + ('\u{10756}', '\u{1075f}'), + ('\u{10768}', '\u{1077f}'), + ('\u{10786}', '\u{10786}'), + ('\u{107b1}', '\u{107b1}'), + ('\u{107bb}', '\u{107ff}'), + ('\u{10806}', '\u{10807}'), + ('\u{10809}', '\u{10809}'), + ('\u{10836}', '\u{10836}'), + ('\u{10839}', '\u{1083b}'), + ('\u{1083d}', '\u{1083e}'), + ('\u{10856}', '\u{10856}'), + ('\u{1089f}', '\u{108a6}'), + ('\u{108b0}', '\u{108df}'), + ('\u{108f3}', '\u{108f3}'), + ('\u{108f6}', '\u{108fa}'), + ('\u{1091c}', '\u{1091e}'), + ('\u{1093a}', '\u{1093e}'), + ('\u{10940}', '\u{1097f}'), + ('\u{109b8}', '\u{109bb}'), + ('\u{109d0}', '\u{109d1}'), + ('\u{10a04}', '\u{10a04}'), + ('\u{10a07}', '\u{10a0b}'), + ('\u{10a14}', '\u{10a14}'), + ('\u{10a18}', '\u{10a18}'), + ('\u{10a36}', '\u{10a37}'), + ('\u{10a3b}', '\u{10a3e}'), + ('\u{10a49}', '\u{10a4f}'), + ('\u{10a59}', '\u{10a5f}'), + ('\u{10aa0}', '\u{10abf}'), + ('\u{10ae7}', '\u{10aea}'), + ('\u{10af7}', '\u{10aff}'), + ('\u{10b36}', '\u{10b38}'), + ('\u{10b56}', '\u{10b57}'), + ('\u{10b73}', '\u{10b77}'), + ('\u{10b92}', '\u{10b98}'), + ('\u{10b9d}', '\u{10ba8}'), + ('\u{10bb0}', '\u{10bff}'), + ('\u{10c49}', '\u{10c7f}'), + ('\u{10cb3}', '\u{10cbf}'), + ('\u{10cf3}', '\u{10cf9}'), + ('\u{10d28}', '\u{10d2f}'), + ('\u{10d3a}', '\u{10d3f}'), + ('\u{10d66}', '\u{10d68}'), + ('\u{10d86}', '\u{10d8d}'), + ('\u{10d90}', '\u{10e5f}'), + ('\u{10e7f}', '\u{10e7f}'), + ('\u{10eaa}', '\u{10eaa}'), + ('\u{10eae}', '\u{10eaf}'), + ('\u{10eb2}', '\u{10ec1}'), + ('\u{10ec5}', '\u{10efb}'), + ('\u{10f28}', '\u{10f2f}'), + ('\u{10f5a}', '\u{10f6f}'), + ('\u{10f8a}', '\u{10faf}'), + ('\u{10fcc}', '\u{10fdf}'), + ('\u{10ff7}', '\u{10fff}'), + ('\u{1104e}', '\u{11051}'), + ('\u{11076}', '\u{1107e}'), + ('\u{110c3}', '\u{110cc}'), + ('\u{110ce}', '\u{110cf}'), + ('\u{110e9}', '\u{110ef}'), + ('\u{110fa}', '\u{110ff}'), + ('\u{11135}', '\u{11135}'), + ('\u{11148}', '\u{1114f}'), + ('\u{11177}', '\u{1117f}'), + ('\u{111e0}', '\u{111e0}'), + ('\u{111f5}', '\u{111ff}'), + ('\u{11212}', '\u{11212}'), + ('\u{11242}', '\u{1127f}'), + ('\u{11287}', '\u{11287}'), + ('\u{11289}', '\u{11289}'), + ('\u{1128e}', '\u{1128e}'), + ('\u{1129e}', '\u{1129e}'), + ('\u{112aa}', '\u{112af}'), + ('\u{112eb}', '\u{112ef}'), + ('\u{112fa}', '\u{112ff}'), + ('\u{11304}', '\u{11304}'), + ('\u{1130d}', '\u{1130e}'), + ('\u{11311}', '\u{11312}'), + ('\u{11329}', '\u{11329}'), + ('\u{11331}', '\u{11331}'), + ('\u{11334}', '\u{11334}'), + ('\u{1133a}', '\u{1133a}'), + ('\u{11345}', '\u{11346}'), + ('\u{11349}', '\u{1134a}'), + ('\u{1134e}', '\u{1134f}'), + ('\u{11351}', '\u{11356}'), + ('\u{11358}', '\u{1135c}'), + ('\u{11364}', '\u{11365}'), + ('\u{1136d}', '\u{1136f}'), + ('\u{11375}', '\u{1137f}'), + ('\u{1138a}', '\u{1138a}'), + ('\u{1138c}', '\u{1138d}'), + ('\u{1138f}', '\u{1138f}'), + ('\u{113b6}', '\u{113b6}'), + ('\u{113c1}', '\u{113c1}'), + ('\u{113c3}', '\u{113c4}'), + ('\u{113c6}', '\u{113c6}'), + ('\u{113cb}', '\u{113cb}'), + ('\u{113d6}', '\u{113d6}'), + ('\u{113d9}', '\u{113e0}'), + ('\u{113e3}', '\u{113ff}'), + ('\u{1145c}', '\u{1145c}'), + ('\u{11462}', '\u{1147f}'), + ('\u{114c8}', '\u{114cf}'), + ('\u{114da}', '\u{1157f}'), + ('\u{115b6}', '\u{115b7}'), + ('\u{115de}', '\u{115ff}'), + ('\u{11645}', '\u{1164f}'), + ('\u{1165a}', '\u{1165f}'), + ('\u{1166d}', '\u{1167f}'), + ('\u{116ba}', '\u{116bf}'), + ('\u{116ca}', '\u{116cf}'), + ('\u{116e4}', '\u{116ff}'), + ('\u{1171b}', '\u{1171c}'), + ('\u{1172c}', '\u{1172f}'), + ('\u{11747}', '\u{117ff}'), + ('\u{1183c}', '\u{1189f}'), + ('\u{118f3}', '\u{118fe}'), + ('\u{11907}', '\u{11908}'), + ('\u{1190a}', '\u{1190b}'), + ('\u{11914}', '\u{11914}'), + ('\u{11917}', '\u{11917}'), + ('\u{11936}', '\u{11936}'), + ('\u{11939}', '\u{1193a}'), + ('\u{11947}', '\u{1194f}'), + ('\u{1195a}', '\u{1199f}'), + ('\u{119a8}', '\u{119a9}'), + ('\u{119d8}', '\u{119d9}'), + ('\u{119e5}', '\u{119ff}'), + ('\u{11a48}', '\u{11a4f}'), + ('\u{11aa3}', '\u{11aaf}'), + ('\u{11af9}', '\u{11aff}'), + ('\u{11b0a}', '\u{11bbf}'), + ('\u{11be2}', '\u{11bef}'), + ('\u{11bfa}', '\u{11bff}'), + ('\u{11c09}', '\u{11c09}'), + ('\u{11c37}', '\u{11c37}'), + ('\u{11c46}', '\u{11c4f}'), + ('\u{11c6d}', '\u{11c6f}'), + ('\u{11c90}', '\u{11c91}'), + ('\u{11ca8}', '\u{11ca8}'), + ('\u{11cb7}', '\u{11cff}'), + ('\u{11d07}', '\u{11d07}'), + ('\u{11d0a}', '\u{11d0a}'), + ('\u{11d37}', '\u{11d39}'), + ('\u{11d3b}', '\u{11d3b}'), + ('\u{11d3e}', '\u{11d3e}'), + ('\u{11d48}', '\u{11d4f}'), + ('\u{11d5a}', '\u{11d5f}'), + ('\u{11d66}', '\u{11d66}'), + ('\u{11d69}', '\u{11d69}'), + ('\u{11d8f}', '\u{11d8f}'), + ('\u{11d92}', '\u{11d92}'), + ('\u{11d99}', '\u{11d9f}'), + ('\u{11daa}', '\u{11edf}'), + ('\u{11ef9}', '\u{11eff}'), + ('\u{11f11}', '\u{11f11}'), + ('\u{11f3b}', '\u{11f3d}'), + ('\u{11f5b}', '\u{11faf}'), + ('\u{11fb1}', '\u{11fbf}'), + ('\u{11ff2}', '\u{11ffe}'), + ('\u{1239a}', '\u{123ff}'), + ('\u{1246f}', '\u{1246f}'), + ('\u{12475}', '\u{1247f}'), + ('\u{12544}', '\u{12f8f}'), + ('\u{12ff3}', '\u{12fff}'), + ('\u{13456}', '\u{1345f}'), + ('\u{143fb}', '\u{143ff}'), + ('\u{14647}', '\u{160ff}'), + ('\u{1613a}', '\u{167ff}'), + ('\u{16a39}', '\u{16a3f}'), + ('\u{16a5f}', '\u{16a5f}'), + ('\u{16a6a}', '\u{16a6d}'), + ('\u{16abf}', '\u{16abf}'), + ('\u{16aca}', '\u{16acf}'), + ('\u{16aee}', '\u{16aef}'), + ('\u{16af6}', '\u{16aff}'), + ('\u{16b46}', '\u{16b4f}'), + ('\u{16b5a}', '\u{16b5a}'), + ('\u{16b62}', '\u{16b62}'), + ('\u{16b78}', '\u{16b7c}'), + ('\u{16b90}', '\u{16d3f}'), + ('\u{16d7a}', '\u{16e3f}'), + ('\u{16e9b}', '\u{16eff}'), + ('\u{16f4b}', '\u{16f4e}'), + ('\u{16f88}', '\u{16f8e}'), + ('\u{16fa0}', '\u{16fdf}'), + ('\u{16fe5}', '\u{16fef}'), + ('\u{16ff2}', '\u{16fff}'), + ('\u{187f8}', '\u{187ff}'), + ('\u{18cd6}', '\u{18cfe}'), + ('\u{18d09}', '\u{1afef}'), + ('\u{1aff4}', '\u{1aff4}'), + ('\u{1affc}', '\u{1affc}'), + ('\u{1afff}', '\u{1afff}'), + ('\u{1b123}', '\u{1b131}'), + ('\u{1b133}', '\u{1b14f}'), + ('\u{1b153}', '\u{1b154}'), + ('\u{1b156}', '\u{1b163}'), + ('\u{1b168}', '\u{1b16f}'), + ('\u{1b2fc}', '\u{1bbff}'), + ('\u{1bc6b}', '\u{1bc6f}'), + ('\u{1bc7d}', '\u{1bc7f}'), + ('\u{1bc89}', '\u{1bc8f}'), + ('\u{1bc9a}', '\u{1bc9b}'), + ('\u{1bca4}', '\u{1cbff}'), + ('\u{1ccfa}', '\u{1ccff}'), + ('\u{1ceb4}', '\u{1ceff}'), + ('\u{1cf2e}', '\u{1cf2f}'), + ('\u{1cf47}', '\u{1cf4f}'), + ('\u{1cfc4}', '\u{1cfff}'), + ('\u{1d0f6}', '\u{1d0ff}'), + ('\u{1d127}', '\u{1d128}'), + ('\u{1d1eb}', '\u{1d1ff}'), + ('\u{1d246}', '\u{1d2bf}'), + ('\u{1d2d4}', '\u{1d2df}'), + ('\u{1d2f4}', '\u{1d2ff}'), + ('\u{1d357}', '\u{1d35f}'), + ('\u{1d379}', '\u{1d3ff}'), + ('\u{1d455}', '\u{1d455}'), + ('\u{1d49d}', '\u{1d49d}'), + ('\u{1d4a0}', '\u{1d4a1}'), + ('\u{1d4a3}', '\u{1d4a4}'), + ('\u{1d4a7}', '\u{1d4a8}'), + ('\u{1d4ad}', '\u{1d4ad}'), + ('\u{1d4ba}', '\u{1d4ba}'), + ('\u{1d4bc}', '\u{1d4bc}'), + ('\u{1d4c4}', '\u{1d4c4}'), + ('\u{1d506}', '\u{1d506}'), + ('\u{1d50b}', '\u{1d50c}'), + ('\u{1d515}', '\u{1d515}'), + ('\u{1d51d}', '\u{1d51d}'), + ('\u{1d53a}', '\u{1d53a}'), + ('\u{1d53f}', '\u{1d53f}'), + ('\u{1d545}', '\u{1d545}'), + ('\u{1d547}', '\u{1d549}'), + ('\u{1d551}', '\u{1d551}'), + ('\u{1d6a6}', '\u{1d6a7}'), + ('\u{1d7cc}', '\u{1d7cd}'), + ('\u{1da8c}', '\u{1da9a}'), + ('\u{1daa0}', '\u{1daa0}'), + ('\u{1dab0}', '\u{1deff}'), + ('\u{1df1f}', '\u{1df24}'), + ('\u{1df2b}', '\u{1dfff}'), + ('\u{1e007}', '\u{1e007}'), + ('\u{1e019}', '\u{1e01a}'), + ('\u{1e022}', '\u{1e022}'), + ('\u{1e025}', '\u{1e025}'), + ('\u{1e02b}', '\u{1e02f}'), + ('\u{1e06e}', '\u{1e08e}'), + ('\u{1e090}', '\u{1e0ff}'), + ('\u{1e12d}', '\u{1e12f}'), + ('\u{1e13e}', '\u{1e13f}'), + ('\u{1e14a}', '\u{1e14d}'), + ('\u{1e150}', '\u{1e28f}'), + ('\u{1e2af}', '\u{1e2bf}'), + ('\u{1e2fa}', '\u{1e2fe}'), + ('\u{1e300}', '\u{1e4cf}'), + ('\u{1e4fa}', '\u{1e5cf}'), + ('\u{1e5fb}', '\u{1e5fe}'), + ('\u{1e600}', '\u{1e7df}'), + ('\u{1e7e7}', '\u{1e7e7}'), + ('\u{1e7ec}', '\u{1e7ec}'), + ('\u{1e7ef}', '\u{1e7ef}'), + ('\u{1e7ff}', '\u{1e7ff}'), + ('\u{1e8c5}', '\u{1e8c6}'), + ('\u{1e8d7}', '\u{1e8ff}'), + ('\u{1e94c}', '\u{1e94f}'), + ('\u{1e95a}', '\u{1e95d}'), + ('\u{1e960}', '\u{1ec70}'), + ('\u{1ecb5}', '\u{1ed00}'), + ('\u{1ed3e}', '\u{1edff}'), + ('\u{1ee04}', '\u{1ee04}'), + ('\u{1ee20}', '\u{1ee20}'), + ('\u{1ee23}', '\u{1ee23}'), + ('\u{1ee25}', '\u{1ee26}'), + ('\u{1ee28}', '\u{1ee28}'), + ('\u{1ee33}', '\u{1ee33}'), + ('\u{1ee38}', '\u{1ee38}'), + ('\u{1ee3a}', '\u{1ee3a}'), + ('\u{1ee3c}', '\u{1ee41}'), + ('\u{1ee43}', '\u{1ee46}'), + ('\u{1ee48}', '\u{1ee48}'), + ('\u{1ee4a}', '\u{1ee4a}'), + ('\u{1ee4c}', '\u{1ee4c}'), + ('\u{1ee50}', '\u{1ee50}'), + ('\u{1ee53}', '\u{1ee53}'), + ('\u{1ee55}', '\u{1ee56}'), + ('\u{1ee58}', '\u{1ee58}'), + ('\u{1ee5a}', '\u{1ee5a}'), + ('\u{1ee5c}', '\u{1ee5c}'), + ('\u{1ee5e}', '\u{1ee5e}'), + ('\u{1ee60}', '\u{1ee60}'), + ('\u{1ee63}', '\u{1ee63}'), + ('\u{1ee65}', '\u{1ee66}'), + ('\u{1ee6b}', '\u{1ee6b}'), + ('\u{1ee73}', '\u{1ee73}'), + ('\u{1ee78}', '\u{1ee78}'), + ('\u{1ee7d}', '\u{1ee7d}'), + ('\u{1ee7f}', '\u{1ee7f}'), + ('\u{1ee8a}', '\u{1ee8a}'), + ('\u{1ee9c}', '\u{1eea0}'), + ('\u{1eea4}', '\u{1eea4}'), + ('\u{1eeaa}', '\u{1eeaa}'), + ('\u{1eebc}', '\u{1eeef}'), + ('\u{1eef2}', '\u{1efff}'), + ('\u{1f02c}', '\u{1f02f}'), + ('\u{1f094}', '\u{1f09f}'), + ('\u{1f0af}', '\u{1f0b0}'), + ('\u{1f0c0}', '\u{1f0c0}'), + ('\u{1f0d0}', '\u{1f0d0}'), + ('\u{1f0f6}', '\u{1f0ff}'), + ('\u{1f1ae}', '\u{1f1e5}'), + ('\u{1f203}', '\u{1f20f}'), + ('\u{1f23c}', '\u{1f23f}'), + ('\u{1f249}', '\u{1f24f}'), + ('\u{1f252}', '\u{1f25f}'), + ('\u{1f266}', '\u{1f2ff}'), + ('\u{1f6d8}', '\u{1f6db}'), + ('\u{1f6ed}', '\u{1f6ef}'), + ('\u{1f6fd}', '\u{1f6ff}'), + ('\u{1f777}', '\u{1f77a}'), + ('\u{1f7da}', '\u{1f7df}'), + ('\u{1f7ec}', '\u{1f7ef}'), + ('\u{1f7f1}', '\u{1f7ff}'), + ('\u{1f80c}', '\u{1f80f}'), + ('\u{1f848}', '\u{1f84f}'), + ('\u{1f85a}', '\u{1f85f}'), + ('\u{1f888}', '\u{1f88f}'), + ('\u{1f8ae}', '\u{1f8af}'), + ('\u{1f8bc}', '\u{1f8bf}'), + ('\u{1f8c2}', '\u{1f8ff}'), + ('\u{1fa54}', '\u{1fa5f}'), + ('\u{1fa6e}', '\u{1fa6f}'), + ('\u{1fa7d}', '\u{1fa7f}'), + ('\u{1fa8a}', '\u{1fa8e}'), + ('\u{1fac7}', '\u{1facd}'), + ('\u{1fadd}', '\u{1fade}'), + ('\u{1faea}', '\u{1faef}'), + ('\u{1faf9}', '\u{1faff}'), + ('\u{1fb93}', '\u{1fb93}'), + ('\u{1fbfa}', '\u{1ffff}'), + ('\u{2a6e0}', '\u{2a6ff}'), + ('\u{2b73a}', '\u{2b73f}'), + ('\u{2b81e}', '\u{2b81f}'), + ('\u{2cea2}', '\u{2ceaf}'), + ('\u{2ebe1}', '\u{2ebef}'), + ('\u{2ee5e}', '\u{2f7ff}'), + ('\u{2fa1e}', '\u{2ffff}'), + ('\u{3134b}', '\u{3134f}'), + ('\u{323b0}', '\u{e0000}'), + ('\u{e0002}', '\u{e001f}'), + ('\u{e0080}', '\u{e00ff}'), + ('\u{e01f0}', '\u{effff}'), + ('\u{ffffe}', '\u{fffff}'), + ('\u{10fffe}', '\u{10ffff}'), +]; + +pub const UPPERCASE_LETTER: &'static [(char, char)] = &[ + ('A', 'Z'), + ('À', 'Ö'), + ('Ø', 'Þ'), + ('Ā', 'Ā'), + ('Ă', 'Ă'), + ('Ą', 'Ą'), + ('Ć', 'Ć'), + ('Ĉ', 'Ĉ'), + ('Ċ', 'Ċ'), + ('Č', 'Č'), + ('Ď', 'Ď'), + ('Đ', 'Đ'), + ('Ē', 'Ē'), + ('Ĕ', 'Ĕ'), + ('Ė', 'Ė'), + ('Ę', 'Ę'), + ('Ě', 'Ě'), + ('Ĝ', 'Ĝ'), + ('Ğ', 'Ğ'), + ('Ġ', 'Ġ'), + ('Ģ', 'Ģ'), + ('Ĥ', 'Ĥ'), + ('Ħ', 'Ħ'), + ('Ĩ', 'Ĩ'), + ('Ī', 'Ī'), + ('Ĭ', 'Ĭ'), + ('Į', 'Į'), + ('İ', 'İ'), + ('IJ', 'IJ'), + ('Ĵ', 'Ĵ'), + ('Ķ', 'Ķ'), + ('Ĺ', 'Ĺ'), + ('Ļ', 'Ļ'), + ('Ľ', 'Ľ'), + ('Ŀ', 'Ŀ'), + ('Ł', 'Ł'), + ('Ń', 'Ń'), + ('Ņ', 'Ņ'), + ('Ň', 'Ň'), + ('Ŋ', 'Ŋ'), + ('Ō', 'Ō'), + ('Ŏ', 'Ŏ'), + ('Ő', 'Ő'), + ('Œ', 'Œ'), + ('Ŕ', 'Ŕ'), + ('Ŗ', 'Ŗ'), + ('Ř', 'Ř'), + ('Ś', 'Ś'), + ('Ŝ', 'Ŝ'), + ('Ş', 'Ş'), + ('Š', 'Š'), + ('Ţ', 'Ţ'), + ('Ť', 'Ť'), + ('Ŧ', 'Ŧ'), + ('Ũ', 'Ũ'), + ('Ū', 'Ū'), + ('Ŭ', 'Ŭ'), + ('Ů', 'Ů'), + ('Ű', 'Ű'), + ('Ų', 'Ų'), + ('Ŵ', 'Ŵ'), + ('Ŷ', 'Ŷ'), + ('Ÿ', 'Ź'), + ('Ż', 'Ż'), + ('Ž', 'Ž'), + ('Ɓ', 'Ƃ'), + ('Ƅ', 'Ƅ'), + ('Ɔ', 'Ƈ'), + ('Ɖ', 'Ƌ'), + ('Ǝ', 'Ƒ'), + ('Ɠ', 'Ɣ'), + ('Ɩ', 'Ƙ'), + ('Ɯ', 'Ɲ'), + ('Ɵ', 'Ơ'), + ('Ƣ', 'Ƣ'), + ('Ƥ', 'Ƥ'), + ('Ʀ', 'Ƨ'), + ('Ʃ', 'Ʃ'), + ('Ƭ', 'Ƭ'), + ('Ʈ', 'Ư'), + ('Ʊ', 'Ƴ'), + ('Ƶ', 'Ƶ'), + ('Ʒ', 'Ƹ'), + ('Ƽ', 'Ƽ'), + ('DŽ', 'DŽ'), + ('LJ', 'LJ'), + ('NJ', 'NJ'), + ('Ǎ', 'Ǎ'), + ('Ǐ', 'Ǐ'), + ('Ǒ', 'Ǒ'), + ('Ǔ', 'Ǔ'), + ('Ǖ', 'Ǖ'), + ('Ǘ', 'Ǘ'), + ('Ǚ', 'Ǚ'), + ('Ǜ', 'Ǜ'), + ('Ǟ', 'Ǟ'), + ('Ǡ', 'Ǡ'), + ('Ǣ', 'Ǣ'), + ('Ǥ', 'Ǥ'), + ('Ǧ', 'Ǧ'), + ('Ǩ', 'Ǩ'), + ('Ǫ', 'Ǫ'), + ('Ǭ', 'Ǭ'), + ('Ǯ', 'Ǯ'), + ('DZ', 'DZ'), + ('Ǵ', 'Ǵ'), + ('Ƕ', 'Ǹ'), + ('Ǻ', 'Ǻ'), + ('Ǽ', 'Ǽ'), + ('Ǿ', 'Ǿ'), + ('Ȁ', 'Ȁ'), + ('Ȃ', 'Ȃ'), + ('Ȅ', 'Ȅ'), + ('Ȇ', 'Ȇ'), + ('Ȉ', 'Ȉ'), + ('Ȋ', 'Ȋ'), + ('Ȍ', 'Ȍ'), + ('Ȏ', 'Ȏ'), + ('Ȑ', 'Ȑ'), + ('Ȓ', 'Ȓ'), + ('Ȕ', 'Ȕ'), + ('Ȗ', 'Ȗ'), + ('Ș', 'Ș'), + ('Ț', 'Ț'), + ('Ȝ', 'Ȝ'), + ('Ȟ', 'Ȟ'), + ('Ƞ', 'Ƞ'), + ('Ȣ', 'Ȣ'), + ('Ȥ', 'Ȥ'), + ('Ȧ', 'Ȧ'), + ('Ȩ', 'Ȩ'), + ('Ȫ', 'Ȫ'), + ('Ȭ', 'Ȭ'), + ('Ȯ', 'Ȯ'), + ('Ȱ', 'Ȱ'), + ('Ȳ', 'Ȳ'), + ('Ⱥ', 'Ȼ'), + ('Ƚ', 'Ⱦ'), + ('Ɂ', 'Ɂ'), + ('Ƀ', 'Ɇ'), + ('Ɉ', 'Ɉ'), + ('Ɋ', 'Ɋ'), + ('Ɍ', 'Ɍ'), + ('Ɏ', 'Ɏ'), + ('Ͱ', 'Ͱ'), + ('Ͳ', 'Ͳ'), + ('Ͷ', 'Ͷ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ώ'), + ('Α', 'Ρ'), + ('Σ', 'Ϋ'), + ('Ϗ', 'Ϗ'), + ('ϒ', 'ϔ'), + ('Ϙ', 'Ϙ'), + ('Ϛ', 'Ϛ'), + ('Ϝ', 'Ϝ'), + ('Ϟ', 'Ϟ'), + ('Ϡ', 'Ϡ'), + ('Ϣ', 'Ϣ'), + ('Ϥ', 'Ϥ'), + ('Ϧ', 'Ϧ'), + ('Ϩ', 'Ϩ'), + ('Ϫ', 'Ϫ'), + ('Ϭ', 'Ϭ'), + ('Ϯ', 'Ϯ'), + ('ϴ', 'ϴ'), + ('Ϸ', 'Ϸ'), + ('Ϲ', 'Ϻ'), + ('Ͻ', 'Я'), + ('Ѡ', 'Ѡ'), + ('Ѣ', 'Ѣ'), + ('Ѥ', 'Ѥ'), + ('Ѧ', 'Ѧ'), + ('Ѩ', 'Ѩ'), + ('Ѫ', 'Ѫ'), + ('Ѭ', 'Ѭ'), + ('Ѯ', 'Ѯ'), + ('Ѱ', 'Ѱ'), + ('Ѳ', 'Ѳ'), + ('Ѵ', 'Ѵ'), + ('Ѷ', 'Ѷ'), + ('Ѹ', 'Ѹ'), + ('Ѻ', 'Ѻ'), + ('Ѽ', 'Ѽ'), + ('Ѿ', 'Ѿ'), + ('Ҁ', 'Ҁ'), + ('Ҋ', 'Ҋ'), + ('Ҍ', 'Ҍ'), + ('Ҏ', 'Ҏ'), + ('Ґ', 'Ґ'), + ('Ғ', 'Ғ'), + ('Ҕ', 'Ҕ'), + ('Җ', 'Җ'), + ('Ҙ', 'Ҙ'), + ('Қ', 'Қ'), + ('Ҝ', 'Ҝ'), + ('Ҟ', 'Ҟ'), + ('Ҡ', 'Ҡ'), + ('Ң', 'Ң'), + ('Ҥ', 'Ҥ'), + ('Ҧ', 'Ҧ'), + ('Ҩ', 'Ҩ'), + ('Ҫ', 'Ҫ'), + ('Ҭ', 'Ҭ'), + ('Ү', 'Ү'), + ('Ұ', 'Ұ'), + ('Ҳ', 'Ҳ'), + ('Ҵ', 'Ҵ'), + ('Ҷ', 'Ҷ'), + ('Ҹ', 'Ҹ'), + ('Һ', 'Һ'), + ('Ҽ', 'Ҽ'), + ('Ҿ', 'Ҿ'), + ('Ӏ', 'Ӂ'), + ('Ӄ', 'Ӄ'), + ('Ӆ', 'Ӆ'), + ('Ӈ', 'Ӈ'), + ('Ӊ', 'Ӊ'), + ('Ӌ', 'Ӌ'), + ('Ӎ', 'Ӎ'), + ('Ӑ', 'Ӑ'), + ('Ӓ', 'Ӓ'), + ('Ӕ', 'Ӕ'), + ('Ӗ', 'Ӗ'), + ('Ә', 'Ә'), + ('Ӛ', 'Ӛ'), + ('Ӝ', 'Ӝ'), + ('Ӟ', 'Ӟ'), + ('Ӡ', 'Ӡ'), + ('Ӣ', 'Ӣ'), + ('Ӥ', 'Ӥ'), + ('Ӧ', 'Ӧ'), + ('Ө', 'Ө'), + ('Ӫ', 'Ӫ'), + ('Ӭ', 'Ӭ'), + ('Ӯ', 'Ӯ'), + ('Ӱ', 'Ӱ'), + ('Ӳ', 'Ӳ'), + ('Ӵ', 'Ӵ'), + ('Ӷ', 'Ӷ'), + ('Ӹ', 'Ӹ'), + ('Ӻ', 'Ӻ'), + ('Ӽ', 'Ӽ'), + ('Ӿ', 'Ӿ'), + ('Ԁ', 'Ԁ'), + ('Ԃ', 'Ԃ'), + ('Ԅ', 'Ԅ'), + ('Ԇ', 'Ԇ'), + ('Ԉ', 'Ԉ'), + ('Ԋ', 'Ԋ'), + ('Ԍ', 'Ԍ'), + ('Ԏ', 'Ԏ'), + ('Ԑ', 'Ԑ'), + ('Ԓ', 'Ԓ'), + ('Ԕ', 'Ԕ'), + ('Ԗ', 'Ԗ'), + ('Ԙ', 'Ԙ'), + ('Ԛ', 'Ԛ'), + ('Ԝ', 'Ԝ'), + ('Ԟ', 'Ԟ'), + ('Ԡ', 'Ԡ'), + ('Ԣ', 'Ԣ'), + ('Ԥ', 'Ԥ'), + ('Ԧ', 'Ԧ'), + ('Ԩ', 'Ԩ'), + ('Ԫ', 'Ԫ'), + ('Ԭ', 'Ԭ'), + ('Ԯ', 'Ԯ'), + ('Ա', 'Ֆ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('Ꭰ', 'Ᏽ'), + ('Ᲊ', 'Ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('Ḁ', 'Ḁ'), + ('Ḃ', 'Ḃ'), + ('Ḅ', 'Ḅ'), + ('Ḇ', 'Ḇ'), + ('Ḉ', 'Ḉ'), + ('Ḋ', 'Ḋ'), + ('Ḍ', 'Ḍ'), + ('Ḏ', 'Ḏ'), + ('Ḑ', 'Ḑ'), + ('Ḓ', 'Ḓ'), + ('Ḕ', 'Ḕ'), + ('Ḗ', 'Ḗ'), + ('Ḙ', 'Ḙ'), + ('Ḛ', 'Ḛ'), + ('Ḝ', 'Ḝ'), + ('Ḟ', 'Ḟ'), + ('Ḡ', 'Ḡ'), + ('Ḣ', 'Ḣ'), + ('Ḥ', 'Ḥ'), + ('Ḧ', 'Ḧ'), + ('Ḩ', 'Ḩ'), + ('Ḫ', 'Ḫ'), + ('Ḭ', 'Ḭ'), + ('Ḯ', 'Ḯ'), + ('Ḱ', 'Ḱ'), + ('Ḳ', 'Ḳ'), + ('Ḵ', 'Ḵ'), + ('Ḷ', 'Ḷ'), + ('Ḹ', 'Ḹ'), + ('Ḻ', 'Ḻ'), + ('Ḽ', 'Ḽ'), + ('Ḿ', 'Ḿ'), + ('Ṁ', 'Ṁ'), + ('Ṃ', 'Ṃ'), + ('Ṅ', 'Ṅ'), + ('Ṇ', 'Ṇ'), + ('Ṉ', 'Ṉ'), + ('Ṋ', 'Ṋ'), + ('Ṍ', 'Ṍ'), + ('Ṏ', 'Ṏ'), + ('Ṑ', 'Ṑ'), + ('Ṓ', 'Ṓ'), + ('Ṕ', 'Ṕ'), + ('Ṗ', 'Ṗ'), + ('Ṙ', 'Ṙ'), + ('Ṛ', 'Ṛ'), + ('Ṝ', 'Ṝ'), + ('Ṟ', 'Ṟ'), + ('Ṡ', 'Ṡ'), + ('Ṣ', 'Ṣ'), + ('Ṥ', 'Ṥ'), + ('Ṧ', 'Ṧ'), + ('Ṩ', 'Ṩ'), + ('Ṫ', 'Ṫ'), + ('Ṭ', 'Ṭ'), + ('Ṯ', 'Ṯ'), + ('Ṱ', 'Ṱ'), + ('Ṳ', 'Ṳ'), + ('Ṵ', 'Ṵ'), + ('Ṷ', 'Ṷ'), + ('Ṹ', 'Ṹ'), + ('Ṻ', 'Ṻ'), + ('Ṽ', 'Ṽ'), + ('Ṿ', 'Ṿ'), + ('Ẁ', 'Ẁ'), + ('Ẃ', 'Ẃ'), + ('Ẅ', 'Ẅ'), + ('Ẇ', 'Ẇ'), + ('Ẉ', 'Ẉ'), + ('Ẋ', 'Ẋ'), + ('Ẍ', 'Ẍ'), + ('Ẏ', 'Ẏ'), + ('Ẑ', 'Ẑ'), + ('Ẓ', 'Ẓ'), + ('Ẕ', 'Ẕ'), + ('ẞ', 'ẞ'), + ('Ạ', 'Ạ'), + ('Ả', 'Ả'), + ('Ấ', 'Ấ'), + ('Ầ', 'Ầ'), + ('Ẩ', 'Ẩ'), + ('Ẫ', 'Ẫ'), + ('Ậ', 'Ậ'), + ('Ắ', 'Ắ'), + ('Ằ', 'Ằ'), + ('Ẳ', 'Ẳ'), + ('Ẵ', 'Ẵ'), + ('Ặ', 'Ặ'), + ('Ẹ', 'Ẹ'), + ('Ẻ', 'Ẻ'), + ('Ẽ', 'Ẽ'), + ('Ế', 'Ế'), + ('Ề', 'Ề'), + ('Ể', 'Ể'), + ('Ễ', 'Ễ'), + ('Ệ', 'Ệ'), + ('Ỉ', 'Ỉ'), + ('Ị', 'Ị'), + ('Ọ', 'Ọ'), + ('Ỏ', 'Ỏ'), + ('Ố', 'Ố'), + ('Ồ', 'Ồ'), + ('Ổ', 'Ổ'), + ('Ỗ', 'Ỗ'), + ('Ộ', 'Ộ'), + ('Ớ', 'Ớ'), + ('Ờ', 'Ờ'), + ('Ở', 'Ở'), + ('Ỡ', 'Ỡ'), + ('Ợ', 'Ợ'), + ('Ụ', 'Ụ'), + ('Ủ', 'Ủ'), + ('Ứ', 'Ứ'), + ('Ừ', 'Ừ'), + ('Ử', 'Ử'), + ('Ữ', 'Ữ'), + ('Ự', 'Ự'), + ('Ỳ', 'Ỳ'), + ('Ỵ', 'Ỵ'), + ('Ỷ', 'Ỷ'), + ('Ỹ', 'Ỹ'), + ('Ỻ', 'Ỻ'), + ('Ỽ', 'Ỽ'), + ('Ỿ', 'Ỿ'), + ('Ἀ', 'Ἇ'), + ('Ἐ', 'Ἕ'), + ('Ἠ', 'Ἧ'), + ('Ἰ', 'Ἷ'), + ('Ὀ', 'Ὅ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'Ὗ'), + ('Ὠ', 'Ὧ'), + ('Ᾰ', 'Ά'), + ('Ὲ', 'Ή'), + ('Ῐ', 'Ί'), + ('Ῠ', 'Ῥ'), + ('Ὸ', 'Ώ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℋ', 'ℍ'), + ('ℐ', 'ℒ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℰ', 'ℳ'), + ('ℾ', 'ℿ'), + ('ⅅ', 'ⅅ'), + ('Ↄ', 'Ↄ'), + ('Ⰰ', 'Ⱟ'), + ('Ⱡ', 'Ⱡ'), + ('Ɫ', 'Ɽ'), + ('Ⱨ', 'Ⱨ'), + ('Ⱪ', 'Ⱪ'), + ('Ⱬ', 'Ⱬ'), + ('Ɑ', 'Ɒ'), + ('Ⱳ', 'Ⱳ'), + ('Ⱶ', 'Ⱶ'), + ('Ȿ', 'Ⲁ'), + ('Ⲃ', 'Ⲃ'), + ('Ⲅ', 'Ⲅ'), + ('Ⲇ', 'Ⲇ'), + ('Ⲉ', 'Ⲉ'), + ('Ⲋ', 'Ⲋ'), + ('Ⲍ', 'Ⲍ'), + ('Ⲏ', 'Ⲏ'), + ('Ⲑ', 'Ⲑ'), + ('Ⲓ', 'Ⲓ'), + ('Ⲕ', 'Ⲕ'), + ('Ⲗ', 'Ⲗ'), + ('Ⲙ', 'Ⲙ'), + ('Ⲛ', 'Ⲛ'), + ('Ⲝ', 'Ⲝ'), + ('Ⲟ', 'Ⲟ'), + ('Ⲡ', 'Ⲡ'), + ('Ⲣ', 'Ⲣ'), + ('Ⲥ', 'Ⲥ'), + ('Ⲧ', 'Ⲧ'), + ('Ⲩ', 'Ⲩ'), + ('Ⲫ', 'Ⲫ'), + ('Ⲭ', 'Ⲭ'), + ('Ⲯ', 'Ⲯ'), + ('Ⲱ', 'Ⲱ'), + ('Ⲳ', 'Ⲳ'), + ('Ⲵ', 'Ⲵ'), + ('Ⲷ', 'Ⲷ'), + ('Ⲹ', 'Ⲹ'), + ('Ⲻ', 'Ⲻ'), + ('Ⲽ', 'Ⲽ'), + ('Ⲿ', 'Ⲿ'), + ('Ⳁ', 'Ⳁ'), + ('Ⳃ', 'Ⳃ'), + ('Ⳅ', 'Ⳅ'), + ('Ⳇ', 'Ⳇ'), + ('Ⳉ', 'Ⳉ'), + ('Ⳋ', 'Ⳋ'), + ('Ⳍ', 'Ⳍ'), + ('Ⳏ', 'Ⳏ'), + ('Ⳑ', 'Ⳑ'), + ('Ⳓ', 'Ⳓ'), + ('Ⳕ', 'Ⳕ'), + ('Ⳗ', 'Ⳗ'), + ('Ⳙ', 'Ⳙ'), + ('Ⳛ', 'Ⳛ'), + ('Ⳝ', 'Ⳝ'), + ('Ⳟ', 'Ⳟ'), + ('Ⳡ', 'Ⳡ'), + ('Ⳣ', 'Ⳣ'), + ('Ⳬ', 'Ⳬ'), + ('Ⳮ', 'Ⳮ'), + ('Ⳳ', 'Ⳳ'), + ('Ꙁ', 'Ꙁ'), + ('Ꙃ', 'Ꙃ'), + ('Ꙅ', 'Ꙅ'), + ('Ꙇ', 'Ꙇ'), + ('Ꙉ', 'Ꙉ'), + ('Ꙋ', 'Ꙋ'), + ('Ꙍ', 'Ꙍ'), + ('Ꙏ', 'Ꙏ'), + ('Ꙑ', 'Ꙑ'), + ('Ꙓ', 'Ꙓ'), + ('Ꙕ', 'Ꙕ'), + ('Ꙗ', 'Ꙗ'), + ('Ꙙ', 'Ꙙ'), + ('Ꙛ', 'Ꙛ'), + ('Ꙝ', 'Ꙝ'), + ('Ꙟ', 'Ꙟ'), + ('Ꙡ', 'Ꙡ'), + ('Ꙣ', 'Ꙣ'), + ('Ꙥ', 'Ꙥ'), + ('Ꙧ', 'Ꙧ'), + ('Ꙩ', 'Ꙩ'), + ('Ꙫ', 'Ꙫ'), + ('Ꙭ', 'Ꙭ'), + ('Ꚁ', 'Ꚁ'), + ('Ꚃ', 'Ꚃ'), + ('Ꚅ', 'Ꚅ'), + ('Ꚇ', 'Ꚇ'), + ('Ꚉ', 'Ꚉ'), + ('Ꚋ', 'Ꚋ'), + ('Ꚍ', 'Ꚍ'), + ('Ꚏ', 'Ꚏ'), + ('Ꚑ', 'Ꚑ'), + ('Ꚓ', 'Ꚓ'), + ('Ꚕ', 'Ꚕ'), + ('Ꚗ', 'Ꚗ'), + ('Ꚙ', 'Ꚙ'), + ('Ꚛ', 'Ꚛ'), + ('Ꜣ', 'Ꜣ'), + ('Ꜥ', 'Ꜥ'), + ('Ꜧ', 'Ꜧ'), + ('Ꜩ', 'Ꜩ'), + ('Ꜫ', 'Ꜫ'), + ('Ꜭ', 'Ꜭ'), + ('Ꜯ', 'Ꜯ'), + ('Ꜳ', 'Ꜳ'), + ('Ꜵ', 'Ꜵ'), + ('Ꜷ', 'Ꜷ'), + ('Ꜹ', 'Ꜹ'), + ('Ꜻ', 'Ꜻ'), + ('Ꜽ', 'Ꜽ'), + ('Ꜿ', 'Ꜿ'), + ('Ꝁ', 'Ꝁ'), + ('Ꝃ', 'Ꝃ'), + ('Ꝅ', 'Ꝅ'), + ('Ꝇ', 'Ꝇ'), + ('Ꝉ', 'Ꝉ'), + ('Ꝋ', 'Ꝋ'), + ('Ꝍ', 'Ꝍ'), + ('Ꝏ', 'Ꝏ'), + ('Ꝑ', 'Ꝑ'), + ('Ꝓ', 'Ꝓ'), + ('Ꝕ', 'Ꝕ'), + ('Ꝗ', 'Ꝗ'), + ('Ꝙ', 'Ꝙ'), + ('Ꝛ', 'Ꝛ'), + ('Ꝝ', 'Ꝝ'), + ('Ꝟ', 'Ꝟ'), + ('Ꝡ', 'Ꝡ'), + ('Ꝣ', 'Ꝣ'), + ('Ꝥ', 'Ꝥ'), + ('Ꝧ', 'Ꝧ'), + ('Ꝩ', 'Ꝩ'), + ('Ꝫ', 'Ꝫ'), + ('Ꝭ', 'Ꝭ'), + ('Ꝯ', 'Ꝯ'), + ('Ꝺ', 'Ꝺ'), + ('Ꝼ', 'Ꝼ'), + ('Ᵹ', 'Ꝿ'), + ('Ꞁ', 'Ꞁ'), + ('Ꞃ', 'Ꞃ'), + ('Ꞅ', 'Ꞅ'), + ('Ꞇ', 'Ꞇ'), + ('Ꞌ', 'Ꞌ'), + ('Ɥ', 'Ɥ'), + ('Ꞑ', 'Ꞑ'), + ('Ꞓ', 'Ꞓ'), + ('Ꞗ', 'Ꞗ'), + ('Ꞙ', 'Ꞙ'), + ('Ꞛ', 'Ꞛ'), + ('Ꞝ', 'Ꞝ'), + ('Ꞟ', 'Ꞟ'), + ('Ꞡ', 'Ꞡ'), + ('Ꞣ', 'Ꞣ'), + ('Ꞥ', 'Ꞥ'), + ('Ꞧ', 'Ꞧ'), + ('Ꞩ', 'Ꞩ'), + ('Ɦ', 'Ɪ'), + ('Ʞ', 'Ꞵ'), + ('Ꞷ', 'Ꞷ'), + ('Ꞹ', 'Ꞹ'), + ('Ꞻ', 'Ꞻ'), + ('Ꞽ', 'Ꞽ'), + ('Ꞿ', 'Ꞿ'), + ('Ꟁ', 'Ꟁ'), + ('Ꟃ', 'Ꟃ'), + ('Ꞔ', 'Ꟈ'), + ('Ꟊ', 'Ꟊ'), + ('Ɤ', 'Ꟍ'), + ('Ꟑ', 'Ꟑ'), + ('Ꟗ', 'Ꟗ'), + ('Ꟙ', 'Ꟙ'), + ('Ꟛ', 'Ꟛ'), + ('Ƛ', 'Ƛ'), + ('Ꟶ', 'Ꟶ'), + ('A', 'Z'), + ('𐐀', '𐐧'), + ('𐒰', '𐓓'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐲀', '𐲲'), + ('𐵐', '𐵥'), + ('𑢠', '𑢿'), + ('𖹀', '𖹟'), + ('𝐀', '𝐙'), + ('𝐴', '𝑍'), + ('𝑨', '𝒁'), + ('𝒜', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒵'), + ('𝓐', '𝓩'), + ('𝔄', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔸', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕬', '𝖅'), + ('𝖠', '𝖹'), + ('𝗔', '𝗭'), + ('𝘈', '𝘡'), + ('𝘼', '𝙕'), + ('𝙰', '𝚉'), + ('𝚨', '𝛀'), + ('𝛢', '𝛺'), + ('𝜜', '𝜴'), + ('𝝖', '𝝮'), + ('𝞐', '𝞨'), + ('𝟊', '𝟊'), + ('𞤀', '𞤡'), +]; diff --git a/vendor/regex-syntax/src/unicode_tables/grapheme_cluster_break.rs b/vendor/regex-syntax/src/unicode_tables/grapheme_cluster_break.rs new file mode 100644 index 00000000000000..6a6ec2af5f25fa --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/grapheme_cluster_break.rs @@ -0,0 +1,1420 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate grapheme-cluster-break ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("CR", CR), + ("Control", CONTROL), + ("Extend", EXTEND), + ("L", L), + ("LF", LF), + ("LV", LV), + ("LVT", LVT), + ("Prepend", PREPEND), + ("Regional_Indicator", REGIONAL_INDICATOR), + ("SpacingMark", SPACINGMARK), + ("T", T), + ("V", V), + ("ZWJ", ZWJ), +]; + +pub const CR: &'static [(char, char)] = &[('\r', '\r')]; + +pub const CONTROL: &'static [(char, char)] = &[ + ('\0', '\t'), + ('\u{b}', '\u{c}'), + ('\u{e}', '\u{1f}'), + ('\u{7f}', '\u{9f}'), + ('\u{ad}', '\u{ad}'), + ('\u{61c}', '\u{61c}'), + ('\u{180e}', '\u{180e}'), + ('\u{200b}', '\u{200b}'), + ('\u{200e}', '\u{200f}'), + ('\u{2028}', '\u{202e}'), + ('\u{2060}', '\u{206f}'), + ('\u{feff}', '\u{feff}'), + ('\u{fff0}', '\u{fffb}'), + ('\u{13430}', '\u{1343f}'), + ('\u{1bca0}', '\u{1bca3}'), + ('\u{1d173}', '\u{1d17a}'), + ('\u{e0000}', '\u{e001f}'), + ('\u{e0080}', '\u{e00ff}'), + ('\u{e01f0}', '\u{e0fff}'), +]; + +pub const EXTEND: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{483}', '\u{489}'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6df}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', '\u{7f3}'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{819}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('\u{897}', '\u{89f}'), + ('\u{8ca}', '\u{8e1}'), + ('\u{8e3}', '\u{902}'), + ('\u{93a}', '\u{93a}'), + ('\u{93c}', '\u{93c}'), + ('\u{941}', '\u{948}'), + ('\u{94d}', '\u{94d}'), + ('\u{951}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('\u{981}', '\u{981}'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9be}', '\u{9be}'), + ('\u{9c1}', '\u{9c4}'), + ('\u{9cd}', '\u{9cd}'), + ('\u{9d7}', '\u{9d7}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', '\u{a02}'), + ('\u{a3c}', '\u{a3c}'), + ('\u{a41}', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', '\u{a82}'), + ('\u{abc}', '\u{abc}'), + ('\u{ac1}', '\u{ac5}'), + ('\u{ac7}', '\u{ac8}'), + ('\u{acd}', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{aff}'), + ('\u{b01}', '\u{b01}'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3e}', '\u{b3f}'), + ('\u{b41}', '\u{b44}'), + ('\u{b4d}', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bbe}', '\u{bbe}'), + ('\u{bc0}', '\u{bc0}'), + ('\u{bcd}', '\u{bcd}'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', '\u{c00}'), + ('\u{c04}', '\u{c04}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', '\u{c40}'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', '\u{c81}'), + ('\u{cbc}', '\u{cbc}'), + ('\u{cbf}', '\u{cc0}'), + ('\u{cc2}', '\u{cc2}'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{ce2}', '\u{ce3}'), + ('\u{d00}', '\u{d01}'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d3e}', '\u{d3e}'), + ('\u{d41}', '\u{d44}'), + ('\u{d4d}', '\u{d4d}'), + ('\u{d57}', '\u{d57}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', '\u{d81}'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dcf}'), + ('\u{dd2}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('\u{ddf}', '\u{ddf}'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e47}', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('\u{f71}', '\u{f7e}'), + ('\u{f80}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('\u{102d}', '\u{1030}'), + ('\u{1032}', '\u{1037}'), + ('\u{1039}', '\u{103a}'), + ('\u{103d}', '\u{103e}'), + ('\u{1058}', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{1082}'), + ('\u{1085}', '\u{1086}'), + ('\u{108d}', '\u{108d}'), + ('\u{109d}', '\u{109d}'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1715}'), + ('\u{1732}', '\u{1734}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17b5}'), + ('\u{17b7}', '\u{17bd}'), + ('\u{17c6}', '\u{17c6}'), + ('\u{17c9}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', '\u{1922}'), + ('\u{1927}', '\u{1928}'), + ('\u{1932}', '\u{1932}'), + ('\u{1939}', '\u{193b}'), + ('\u{1a17}', '\u{1a18}'), + ('\u{1a1b}', '\u{1a1b}'), + ('\u{1a56}', '\u{1a56}'), + ('\u{1a58}', '\u{1a5e}'), + ('\u{1a60}', '\u{1a60}'), + ('\u{1a62}', '\u{1a62}'), + ('\u{1a65}', '\u{1a6c}'), + ('\u{1a73}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', '\u{1b03}'), + ('\u{1b34}', '\u{1b3d}'), + ('\u{1b42}', '\u{1b44}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1b81}'), + ('\u{1ba2}', '\u{1ba5}'), + ('\u{1ba8}', '\u{1bad}'), + ('\u{1be6}', '\u{1be6}'), + ('\u{1be8}', '\u{1be9}'), + ('\u{1bed}', '\u{1bed}'), + ('\u{1bef}', '\u{1bf3}'), + ('\u{1c2c}', '\u{1c33}'), + ('\u{1c36}', '\u{1c37}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce0}'), + ('\u{1ce2}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{200c}', '\u{200c}'), + ('\u{20d0}', '\u{20f0}'), + ('\u{2cef}', '\u{2cf1}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('\u{302a}', '\u{302f}'), + ('\u{3099}', '\u{309a}'), + ('\u{a66f}', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('\u{a825}', '\u{a826}'), + ('\u{a82c}', '\u{a82c}'), + ('\u{a8c4}', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a951}'), + ('\u{a953}', '\u{a953}'), + ('\u{a980}', '\u{a982}'), + ('\u{a9b3}', '\u{a9b3}'), + ('\u{a9b6}', '\u{a9b9}'), + ('\u{a9bc}', '\u{a9bd}'), + ('\u{a9c0}', '\u{a9c0}'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa2e}'), + ('\u{aa31}', '\u{aa32}'), + ('\u{aa35}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', '\u{aa4c}'), + ('\u{aa7c}', '\u{aa7c}'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('\u{aaec}', '\u{aaed}'), + ('\u{aaf6}', '\u{aaf6}'), + ('\u{abe5}', '\u{abe5}'), + ('\u{abe8}', '\u{abe8}'), + ('\u{abed}', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('\u{ff9e}', '\u{ff9f}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('\u{11001}', '\u{11001}'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '\u{11081}'), + ('\u{110b3}', '\u{110b6}'), + ('\u{110b9}', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{1112b}'), + ('\u{1112d}', '\u{11134}'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '\u{11181}'), + ('\u{111b6}', '\u{111be}'), + ('\u{111c0}', '\u{111c0}'), + ('\u{111c9}', '\u{111cc}'), + ('\u{111cf}', '\u{111cf}'), + ('\u{1122f}', '\u{11231}'), + ('\u{11234}', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112df}'), + ('\u{112e3}', '\u{112ea}'), + ('\u{11300}', '\u{11301}'), + ('\u{1133b}', '\u{1133c}'), + ('\u{1133e}', '\u{1133e}'), + ('\u{11340}', '\u{11340}'), + ('\u{1134d}', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113b8}', '\u{113b8}'), + ('\u{113bb}', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '\u{113c9}'), + ('\u{113ce}', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('\u{11438}', '\u{1143f}'), + ('\u{11442}', '\u{11444}'), + ('\u{11446}', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b0}', '\u{114b0}'), + ('\u{114b3}', '\u{114b8}'), + ('\u{114ba}', '\u{114ba}'), + ('\u{114bd}', '\u{114bd}'), + ('\u{114bf}', '\u{114c0}'), + ('\u{114c2}', '\u{114c3}'), + ('\u{115af}', '\u{115af}'), + ('\u{115b2}', '\u{115b5}'), + ('\u{115bc}', '\u{115bd}'), + ('\u{115bf}', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('\u{11633}', '\u{1163a}'), + ('\u{1163d}', '\u{1163d}'), + ('\u{1163f}', '\u{11640}'), + ('\u{116ab}', '\u{116ab}'), + ('\u{116ad}', '\u{116ad}'), + ('\u{116b0}', '\u{116b7}'), + ('\u{1171d}', '\u{1171d}'), + ('\u{1171f}', '\u{1171f}'), + ('\u{11722}', '\u{11725}'), + ('\u{11727}', '\u{1172b}'), + ('\u{1182f}', '\u{11837}'), + ('\u{11839}', '\u{1183a}'), + ('\u{11930}', '\u{11930}'), + ('\u{1193b}', '\u{1193e}'), + ('\u{11943}', '\u{11943}'), + ('\u{119d4}', '\u{119d7}'), + ('\u{119da}', '\u{119db}'), + ('\u{119e0}', '\u{119e0}'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '\u{11a38}'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a56}'), + ('\u{11a59}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a96}'), + ('\u{11a98}', '\u{11a99}'), + ('\u{11c30}', '\u{11c36}'), + ('\u{11c38}', '\u{11c3d}'), + ('\u{11c3f}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('\u{11caa}', '\u{11cb0}'), + ('\u{11cb2}', '\u{11cb3}'), + ('\u{11cb5}', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('\u{11d90}', '\u{11d91}'), + ('\u{11d95}', '\u{11d95}'), + ('\u{11d97}', '\u{11d97}'), + ('\u{11ef3}', '\u{11ef4}'), + ('\u{11f00}', '\u{11f01}'), + ('\u{11f36}', '\u{11f3a}'), + ('\u{11f40}', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13440}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{16129}'), + ('\u{1612d}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('\u{16f4f}', '\u{16f4f}'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e4ec}', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e94a}'), + ('🏻', '🏿'), + ('\u{e0020}', '\u{e007f}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const L: &'static [(char, char)] = &[('ᄀ', 'ᅟ'), ('ꥠ', 'ꥼ')]; + +pub const LF: &'static [(char, char)] = &[('\n', '\n')]; + +pub const LV: &'static [(char, char)] = &[ + ('가', '가'), + ('개', '개'), + ('갸', '갸'), + ('걔', '걔'), + ('거', '거'), + ('게', '게'), + ('겨', '겨'), + ('계', '계'), + ('고', '고'), + ('과', '과'), + ('괘', '괘'), + ('괴', '괴'), + ('교', '교'), + ('구', '구'), + ('궈', '궈'), + ('궤', '궤'), + ('귀', '귀'), + ('규', '규'), + ('그', '그'), + ('긔', '긔'), + ('기', '기'), + ('까', '까'), + ('깨', '깨'), + ('꺄', '꺄'), + ('꺠', '꺠'), + ('꺼', '꺼'), + ('께', '께'), + ('껴', '껴'), + ('꼐', '꼐'), + ('꼬', '꼬'), + ('꽈', '꽈'), + ('꽤', '꽤'), + ('꾀', '꾀'), + ('꾜', '꾜'), + ('꾸', '꾸'), + ('꿔', '꿔'), + ('꿰', '꿰'), + ('뀌', '뀌'), + ('뀨', '뀨'), + ('끄', '끄'), + ('끠', '끠'), + ('끼', '끼'), + ('나', '나'), + ('내', '내'), + ('냐', '냐'), + ('냬', '냬'), + ('너', '너'), + ('네', '네'), + ('녀', '녀'), + ('녜', '녜'), + ('노', '노'), + ('놔', '놔'), + ('놰', '놰'), + ('뇌', '뇌'), + ('뇨', '뇨'), + ('누', '누'), + ('눠', '눠'), + ('눼', '눼'), + ('뉘', '뉘'), + ('뉴', '뉴'), + ('느', '느'), + ('늬', '늬'), + ('니', '니'), + ('다', '다'), + ('대', '대'), + ('댜', '댜'), + ('댸', '댸'), + ('더', '더'), + ('데', '데'), + ('뎌', '뎌'), + ('뎨', '뎨'), + ('도', '도'), + ('돠', '돠'), + ('돼', '돼'), + ('되', '되'), + ('됴', '됴'), + ('두', '두'), + ('둬', '둬'), + ('뒈', '뒈'), + ('뒤', '뒤'), + ('듀', '듀'), + ('드', '드'), + ('듸', '듸'), + ('디', '디'), + ('따', '따'), + ('때', '때'), + ('땨', '땨'), + ('떄', '떄'), + ('떠', '떠'), + ('떼', '떼'), + ('뗘', '뗘'), + ('뗴', '뗴'), + ('또', '또'), + ('똬', '똬'), + ('뙈', '뙈'), + ('뙤', '뙤'), + ('뚀', '뚀'), + ('뚜', '뚜'), + ('뚸', '뚸'), + ('뛔', '뛔'), + ('뛰', '뛰'), + ('뜌', '뜌'), + ('뜨', '뜨'), + ('띄', '띄'), + ('띠', '띠'), + ('라', '라'), + ('래', '래'), + ('랴', '랴'), + ('럐', '럐'), + ('러', '러'), + ('레', '레'), + ('려', '려'), + ('례', '례'), + ('로', '로'), + ('롸', '롸'), + ('뢔', '뢔'), + ('뢰', '뢰'), + ('료', '료'), + ('루', '루'), + ('뤄', '뤄'), + ('뤠', '뤠'), + ('뤼', '뤼'), + ('류', '류'), + ('르', '르'), + ('릐', '릐'), + ('리', '리'), + ('마', '마'), + ('매', '매'), + ('먀', '먀'), + ('먜', '먜'), + ('머', '머'), + ('메', '메'), + ('며', '며'), + ('몌', '몌'), + ('모', '모'), + ('뫄', '뫄'), + ('뫠', '뫠'), + ('뫼', '뫼'), + ('묘', '묘'), + ('무', '무'), + ('뭐', '뭐'), + ('뭬', '뭬'), + ('뮈', '뮈'), + ('뮤', '뮤'), + ('므', '므'), + ('믜', '믜'), + ('미', '미'), + ('바', '바'), + ('배', '배'), + ('뱌', '뱌'), + ('뱨', '뱨'), + ('버', '버'), + ('베', '베'), + ('벼', '벼'), + ('볘', '볘'), + ('보', '보'), + ('봐', '봐'), + ('봬', '봬'), + ('뵈', '뵈'), + ('뵤', '뵤'), + ('부', '부'), + ('붜', '붜'), + ('붸', '붸'), + ('뷔', '뷔'), + ('뷰', '뷰'), + ('브', '브'), + ('븨', '븨'), + ('비', '비'), + ('빠', '빠'), + ('빼', '빼'), + ('뺘', '뺘'), + ('뺴', '뺴'), + ('뻐', '뻐'), + ('뻬', '뻬'), + ('뼈', '뼈'), + ('뼤', '뼤'), + ('뽀', '뽀'), + ('뽜', '뽜'), + ('뽸', '뽸'), + ('뾔', '뾔'), + ('뾰', '뾰'), + ('뿌', '뿌'), + ('뿨', '뿨'), + ('쀄', '쀄'), + ('쀠', '쀠'), + ('쀼', '쀼'), + ('쁘', '쁘'), + ('쁴', '쁴'), + ('삐', '삐'), + ('사', '사'), + ('새', '새'), + ('샤', '샤'), + ('섀', '섀'), + ('서', '서'), + ('세', '세'), + ('셔', '셔'), + ('셰', '셰'), + ('소', '소'), + ('솨', '솨'), + ('쇄', '쇄'), + ('쇠', '쇠'), + ('쇼', '쇼'), + ('수', '수'), + ('숴', '숴'), + ('쉐', '쉐'), + ('쉬', '쉬'), + ('슈', '슈'), + ('스', '스'), + ('싀', '싀'), + ('시', '시'), + ('싸', '싸'), + ('쌔', '쌔'), + ('쌰', '쌰'), + ('썌', '썌'), + ('써', '써'), + ('쎄', '쎄'), + ('쎠', '쎠'), + ('쎼', '쎼'), + ('쏘', '쏘'), + ('쏴', '쏴'), + ('쐐', '쐐'), + ('쐬', '쐬'), + ('쑈', '쑈'), + ('쑤', '쑤'), + ('쒀', '쒀'), + ('쒜', '쒜'), + ('쒸', '쒸'), + ('쓔', '쓔'), + ('쓰', '쓰'), + ('씌', '씌'), + ('씨', '씨'), + ('아', '아'), + ('애', '애'), + ('야', '야'), + ('얘', '얘'), + ('어', '어'), + ('에', '에'), + ('여', '여'), + ('예', '예'), + ('오', '오'), + ('와', '와'), + ('왜', '왜'), + ('외', '외'), + ('요', '요'), + ('우', '우'), + ('워', '워'), + ('웨', '웨'), + ('위', '위'), + ('유', '유'), + ('으', '으'), + ('의', '의'), + ('이', '이'), + ('자', '자'), + ('재', '재'), + ('쟈', '쟈'), + ('쟤', '쟤'), + ('저', '저'), + ('제', '제'), + ('져', '져'), + ('졔', '졔'), + ('조', '조'), + ('좌', '좌'), + ('좨', '좨'), + ('죄', '죄'), + ('죠', '죠'), + ('주', '주'), + ('줘', '줘'), + ('줴', '줴'), + ('쥐', '쥐'), + ('쥬', '쥬'), + ('즈', '즈'), + ('즤', '즤'), + ('지', '지'), + ('짜', '짜'), + ('째', '째'), + ('쨔', '쨔'), + ('쨰', '쨰'), + ('쩌', '쩌'), + ('쩨', '쩨'), + ('쪄', '쪄'), + ('쪠', '쪠'), + ('쪼', '쪼'), + ('쫘', '쫘'), + ('쫴', '쫴'), + ('쬐', '쬐'), + ('쬬', '쬬'), + ('쭈', '쭈'), + ('쭤', '쭤'), + ('쮀', '쮀'), + ('쮜', '쮜'), + ('쮸', '쮸'), + ('쯔', '쯔'), + ('쯰', '쯰'), + ('찌', '찌'), + ('차', '차'), + ('채', '채'), + ('챠', '챠'), + ('챼', '챼'), + ('처', '처'), + ('체', '체'), + ('쳐', '쳐'), + ('쳬', '쳬'), + ('초', '초'), + ('촤', '촤'), + ('쵀', '쵀'), + ('최', '최'), + ('쵸', '쵸'), + ('추', '추'), + ('춰', '춰'), + ('췌', '췌'), + ('취', '취'), + ('츄', '츄'), + ('츠', '츠'), + ('츼', '츼'), + ('치', '치'), + ('카', '카'), + ('캐', '캐'), + ('캬', '캬'), + ('컈', '컈'), + ('커', '커'), + ('케', '케'), + ('켜', '켜'), + ('켸', '켸'), + ('코', '코'), + ('콰', '콰'), + ('쾌', '쾌'), + ('쾨', '쾨'), + ('쿄', '쿄'), + ('쿠', '쿠'), + ('쿼', '쿼'), + ('퀘', '퀘'), + ('퀴', '퀴'), + ('큐', '큐'), + ('크', '크'), + ('킈', '킈'), + ('키', '키'), + ('타', '타'), + ('태', '태'), + ('탸', '탸'), + ('턔', '턔'), + ('터', '터'), + ('테', '테'), + ('텨', '텨'), + ('톄', '톄'), + ('토', '토'), + ('톼', '톼'), + ('퇘', '퇘'), + ('퇴', '퇴'), + ('툐', '툐'), + ('투', '투'), + ('퉈', '퉈'), + ('퉤', '퉤'), + ('튀', '튀'), + ('튜', '튜'), + ('트', '트'), + ('틔', '틔'), + ('티', '티'), + ('파', '파'), + ('패', '패'), + ('퍄', '퍄'), + ('퍠', '퍠'), + ('퍼', '퍼'), + ('페', '페'), + ('펴', '펴'), + ('폐', '폐'), + ('포', '포'), + ('퐈', '퐈'), + ('퐤', '퐤'), + ('푀', '푀'), + ('표', '표'), + ('푸', '푸'), + ('풔', '풔'), + ('풰', '풰'), + ('퓌', '퓌'), + ('퓨', '퓨'), + ('프', '프'), + ('픠', '픠'), + ('피', '피'), + ('하', '하'), + ('해', '해'), + ('햐', '햐'), + ('햬', '햬'), + ('허', '허'), + ('헤', '헤'), + ('혀', '혀'), + ('혜', '혜'), + ('호', '호'), + ('화', '화'), + ('홰', '홰'), + ('회', '회'), + ('효', '효'), + ('후', '후'), + ('훠', '훠'), + ('훼', '훼'), + ('휘', '휘'), + ('휴', '휴'), + ('흐', '흐'), + ('희', '희'), + ('히', '히'), +]; + +pub const LVT: &'static [(char, char)] = &[ + ('각', '갛'), + ('객', '갷'), + ('갹', '걓'), + ('걕', '걯'), + ('걱', '겋'), + ('겍', '겧'), + ('격', '곃'), + ('곅', '곟'), + ('곡', '곻'), + ('곽', '괗'), + ('괙', '괳'), + ('괵', '굏'), + ('굑', '굫'), + ('국', '궇'), + ('궉', '궣'), + ('궥', '궿'), + ('귁', '귛'), + ('귝', '귷'), + ('극', '긓'), + ('긕', '긯'), + ('긱', '깋'), + ('깍', '깧'), + ('깩', '꺃'), + ('꺅', '꺟'), + ('꺡', '꺻'), + ('꺽', '껗'), + ('껙', '껳'), + ('껵', '꼏'), + ('꼑', '꼫'), + ('꼭', '꽇'), + ('꽉', '꽣'), + ('꽥', '꽿'), + ('꾁', '꾛'), + ('꾝', '꾷'), + ('꾹', '꿓'), + ('꿕', '꿯'), + ('꿱', '뀋'), + ('뀍', '뀧'), + ('뀩', '끃'), + ('끅', '끟'), + ('끡', '끻'), + ('끽', '낗'), + ('낙', '낳'), + ('낵', '냏'), + ('냑', '냫'), + ('냭', '넇'), + ('넉', '넣'), + ('넥', '넿'), + ('녁', '녛'), + ('녝', '녷'), + ('녹', '놓'), + ('놕', '놯'), + ('놱', '뇋'), + ('뇍', '뇧'), + ('뇩', '눃'), + ('눅', '눟'), + ('눡', '눻'), + ('눽', '뉗'), + ('뉙', '뉳'), + ('뉵', '늏'), + ('늑', '늫'), + ('늭', '닇'), + ('닉', '닣'), + ('닥', '닿'), + ('댁', '댛'), + ('댝', '댷'), + ('댹', '덓'), + ('덕', '덯'), + ('덱', '뎋'), + ('뎍', '뎧'), + ('뎩', '돃'), + ('독', '돟'), + ('돡', '돻'), + ('돽', '됗'), + ('됙', '됳'), + ('됵', '둏'), + ('둑', '둫'), + ('둭', '뒇'), + ('뒉', '뒣'), + ('뒥', '뒿'), + ('듁', '듛'), + ('득', '듷'), + ('듹', '딓'), + ('딕', '딯'), + ('딱', '땋'), + ('땍', '땧'), + ('땩', '떃'), + ('떅', '떟'), + ('떡', '떻'), + ('떽', '뗗'), + ('뗙', '뗳'), + ('뗵', '똏'), + ('똑', '똫'), + ('똭', '뙇'), + ('뙉', '뙣'), + ('뙥', '뙿'), + ('뚁', '뚛'), + ('뚝', '뚷'), + ('뚹', '뛓'), + ('뛕', '뛯'), + ('뛱', '뜋'), + ('뜍', '뜧'), + ('뜩', '띃'), + ('띅', '띟'), + ('띡', '띻'), + ('락', '랗'), + ('랙', '랳'), + ('략', '럏'), + ('럑', '럫'), + ('럭', '렇'), + ('렉', '렣'), + ('력', '렿'), + ('롁', '롛'), + ('록', '롷'), + ('롹', '뢓'), + ('뢕', '뢯'), + ('뢱', '룋'), + ('룍', '룧'), + ('룩', '뤃'), + ('뤅', '뤟'), + ('뤡', '뤻'), + ('뤽', '륗'), + ('륙', '륳'), + ('륵', '릏'), + ('릑', '릫'), + ('릭', '맇'), + ('막', '맣'), + ('맥', '맿'), + ('먁', '먛'), + ('먝', '먷'), + ('먹', '멓'), + ('멕', '멯'), + ('멱', '몋'), + ('몍', '몧'), + ('목', '뫃'), + ('뫅', '뫟'), + ('뫡', '뫻'), + ('뫽', '묗'), + ('묙', '묳'), + ('묵', '뭏'), + ('뭑', '뭫'), + ('뭭', '뮇'), + ('뮉', '뮣'), + ('뮥', '뮿'), + ('믁', '믛'), + ('믝', '믷'), + ('믹', '밓'), + ('박', '밯'), + ('백', '뱋'), + ('뱍', '뱧'), + ('뱩', '벃'), + ('벅', '벟'), + ('벡', '벻'), + ('벽', '볗'), + ('볙', '볳'), + ('복', '봏'), + ('봑', '봫'), + ('봭', '뵇'), + ('뵉', '뵣'), + ('뵥', '뵿'), + ('북', '붛'), + ('붝', '붷'), + ('붹', '뷓'), + ('뷕', '뷯'), + ('뷱', '븋'), + ('븍', '븧'), + ('븩', '빃'), + ('빅', '빟'), + ('빡', '빻'), + ('빽', '뺗'), + ('뺙', '뺳'), + ('뺵', '뻏'), + ('뻑', '뻫'), + ('뻭', '뼇'), + ('뼉', '뼣'), + ('뼥', '뼿'), + ('뽁', '뽛'), + ('뽝', '뽷'), + ('뽹', '뾓'), + ('뾕', '뾯'), + ('뾱', '뿋'), + ('뿍', '뿧'), + ('뿩', '쀃'), + ('쀅', '쀟'), + ('쀡', '쀻'), + ('쀽', '쁗'), + ('쁙', '쁳'), + ('쁵', '삏'), + ('삑', '삫'), + ('삭', '샇'), + ('색', '샣'), + ('샥', '샿'), + ('섁', '섛'), + ('석', '섷'), + ('섹', '셓'), + ('셕', '셯'), + ('셱', '솋'), + ('속', '솧'), + ('솩', '쇃'), + ('쇅', '쇟'), + ('쇡', '쇻'), + ('쇽', '숗'), + ('숙', '숳'), + ('숵', '쉏'), + ('쉑', '쉫'), + ('쉭', '슇'), + ('슉', '슣'), + ('슥', '슿'), + ('싁', '싛'), + ('식', '싷'), + ('싹', '쌓'), + ('쌕', '쌯'), + ('쌱', '썋'), + ('썍', '썧'), + ('썩', '쎃'), + ('쎅', '쎟'), + ('쎡', '쎻'), + ('쎽', '쏗'), + ('쏙', '쏳'), + ('쏵', '쐏'), + ('쐑', '쐫'), + ('쐭', '쑇'), + ('쑉', '쑣'), + ('쑥', '쑿'), + ('쒁', '쒛'), + ('쒝', '쒷'), + ('쒹', '쓓'), + ('쓕', '쓯'), + ('쓱', '씋'), + ('씍', '씧'), + ('씩', '앃'), + ('악', '앟'), + ('액', '앻'), + ('약', '얗'), + ('얙', '얳'), + ('억', '엏'), + ('엑', '엫'), + ('역', '옇'), + ('옉', '옣'), + ('옥', '옿'), + ('왁', '왛'), + ('왝', '왷'), + ('왹', '욓'), + ('욕', '욯'), + ('욱', '웋'), + ('웍', '웧'), + ('웩', '윃'), + ('윅', '윟'), + ('육', '윻'), + ('윽', '읗'), + ('읙', '읳'), + ('익', '잏'), + ('작', '잫'), + ('잭', '쟇'), + ('쟉', '쟣'), + ('쟥', '쟿'), + ('적', '젛'), + ('젝', '젷'), + ('젹', '졓'), + ('졕', '졯'), + ('족', '좋'), + ('좍', '좧'), + ('좩', '죃'), + ('죅', '죟'), + ('죡', '죻'), + ('죽', '줗'), + ('줙', '줳'), + ('줵', '쥏'), + ('쥑', '쥫'), + ('쥭', '즇'), + ('즉', '즣'), + ('즥', '즿'), + ('직', '짛'), + ('짝', '짷'), + ('짹', '쨓'), + ('쨕', '쨯'), + ('쨱', '쩋'), + ('쩍', '쩧'), + ('쩩', '쪃'), + ('쪅', '쪟'), + ('쪡', '쪻'), + ('쪽', '쫗'), + ('쫙', '쫳'), + ('쫵', '쬏'), + ('쬑', '쬫'), + ('쬭', '쭇'), + ('쭉', '쭣'), + ('쭥', '쭿'), + ('쮁', '쮛'), + ('쮝', '쮷'), + ('쮹', '쯓'), + ('쯕', '쯯'), + ('쯱', '찋'), + ('찍', '찧'), + ('착', '챃'), + ('책', '챟'), + ('챡', '챻'), + ('챽', '첗'), + ('척', '첳'), + ('첵', '쳏'), + ('쳑', '쳫'), + ('쳭', '촇'), + ('촉', '촣'), + ('촥', '촿'), + ('쵁', '쵛'), + ('쵝', '쵷'), + ('쵹', '춓'), + ('축', '춯'), + ('춱', '췋'), + ('췍', '췧'), + ('췩', '츃'), + ('츅', '츟'), + ('측', '츻'), + ('츽', '칗'), + ('칙', '칳'), + ('칵', '캏'), + ('캑', '캫'), + ('캭', '컇'), + ('컉', '컣'), + ('컥', '컿'), + ('켁', '켛'), + ('켝', '켷'), + ('켹', '콓'), + ('콕', '콯'), + ('콱', '쾋'), + ('쾍', '쾧'), + ('쾩', '쿃'), + ('쿅', '쿟'), + ('쿡', '쿻'), + ('쿽', '퀗'), + ('퀙', '퀳'), + ('퀵', '큏'), + ('큑', '큫'), + ('큭', '킇'), + ('킉', '킣'), + ('킥', '킿'), + ('탁', '탛'), + ('택', '탷'), + ('탹', '턓'), + ('턕', '턯'), + ('턱', '텋'), + ('텍', '텧'), + ('텩', '톃'), + ('톅', '톟'), + ('톡', '톻'), + ('톽', '퇗'), + ('퇙', '퇳'), + ('퇵', '툏'), + ('툑', '툫'), + ('툭', '퉇'), + ('퉉', '퉣'), + ('퉥', '퉿'), + ('튁', '튛'), + ('튝', '튷'), + ('특', '틓'), + ('틕', '틯'), + ('틱', '팋'), + ('팍', '팧'), + ('팩', '퍃'), + ('퍅', '퍟'), + ('퍡', '퍻'), + ('퍽', '펗'), + ('펙', '펳'), + ('펵', '폏'), + ('폑', '폫'), + ('폭', '퐇'), + ('퐉', '퐣'), + ('퐥', '퐿'), + ('푁', '푛'), + ('푝', '푷'), + ('푹', '풓'), + ('풕', '풯'), + ('풱', '퓋'), + ('퓍', '퓧'), + ('퓩', '픃'), + ('픅', '픟'), + ('픡', '픻'), + ('픽', '핗'), + ('학', '핳'), + ('핵', '햏'), + ('햑', '햫'), + ('햭', '헇'), + ('헉', '헣'), + ('헥', '헿'), + ('혁', '혛'), + ('혝', '혷'), + ('혹', '홓'), + ('확', '홯'), + ('홱', '횋'), + ('획', '횧'), + ('횩', '훃'), + ('훅', '훟'), + ('훡', '훻'), + ('훽', '휗'), + ('휙', '휳'), + ('휵', '흏'), + ('흑', '흫'), + ('흭', '힇'), + ('힉', '힣'), +]; + +pub const PREPEND: &'static [(char, char)] = &[ + ('\u{600}', '\u{605}'), + ('\u{6dd}', '\u{6dd}'), + ('\u{70f}', '\u{70f}'), + ('\u{890}', '\u{891}'), + ('\u{8e2}', '\u{8e2}'), + ('ൎ', 'ൎ'), + ('\u{110bd}', '\u{110bd}'), + ('\u{110cd}', '\u{110cd}'), + ('𑇂', '𑇃'), + ('𑏑', '𑏑'), + ('𑤿', '𑤿'), + ('𑥁', '𑥁'), + ('𑨺', '𑨺'), + ('𑪄', '𑪉'), + ('𑵆', '𑵆'), + ('𑼂', '𑼂'), +]; + +pub const REGIONAL_INDICATOR: &'static [(char, char)] = &[('🇦', '🇿')]; + +pub const SPACINGMARK: &'static [(char, char)] = &[ + ('ः', 'ः'), + ('ऻ', 'ऻ'), + ('ा', 'ी'), + ('ॉ', 'ौ'), + ('ॎ', 'ॏ'), + ('ং', 'ঃ'), + ('ি', 'ী'), + ('ে', 'ৈ'), + ('ো', 'ৌ'), + ('ਃ', 'ਃ'), + ('ਾ', 'ੀ'), + ('ઃ', 'ઃ'), + ('ા', 'ી'), + ('ૉ', 'ૉ'), + ('ો', 'ૌ'), + ('ଂ', 'ଃ'), + ('ୀ', 'ୀ'), + ('େ', 'ୈ'), + ('ୋ', 'ୌ'), + ('ி', 'ி'), + ('ு', 'ூ'), + ('ெ', 'ை'), + ('ொ', 'ௌ'), + ('ఁ', 'ః'), + ('ు', 'ౄ'), + ('ಂ', 'ಃ'), + ('ಾ', 'ಾ'), + ('ು', 'ು'), + ('ೃ', 'ೄ'), + ('ೳ', 'ೳ'), + ('ം', 'ഃ'), + ('ി', 'ീ'), + ('െ', 'ൈ'), + ('ൊ', 'ൌ'), + ('ං', 'ඃ'), + ('ැ', 'ෑ'), + ('ෘ', 'ෞ'), + ('ෲ', 'ෳ'), + ('ำ', 'ำ'), + ('ຳ', 'ຳ'), + ('༾', '༿'), + ('ཿ', 'ཿ'), + ('ေ', 'ေ'), + ('ျ', 'ြ'), + ('ၖ', 'ၗ'), + ('ႄ', 'ႄ'), + ('ា', 'ា'), + ('ើ', 'ៅ'), + ('ះ', 'ៈ'), + ('ᤣ', 'ᤦ'), + ('ᤩ', 'ᤫ'), + ('ᤰ', 'ᤱ'), + ('ᤳ', 'ᤸ'), + ('ᨙ', 'ᨚ'), + ('ᩕ', 'ᩕ'), + ('ᩗ', 'ᩗ'), + ('ᩭ', 'ᩲ'), + ('ᬄ', 'ᬄ'), + ('ᬾ', 'ᭁ'), + ('ᮂ', 'ᮂ'), + ('ᮡ', 'ᮡ'), + ('ᮦ', 'ᮧ'), + ('ᯧ', 'ᯧ'), + ('ᯪ', 'ᯬ'), + ('ᯮ', 'ᯮ'), + ('ᰤ', 'ᰫ'), + ('ᰴ', 'ᰵ'), + ('᳡', '᳡'), + ('᳷', '᳷'), + ('ꠣ', 'ꠤ'), + ('ꠧ', 'ꠧ'), + ('ꢀ', 'ꢁ'), + ('ꢴ', 'ꣃ'), + ('ꥒ', 'ꥒ'), + ('ꦃ', 'ꦃ'), + ('ꦴ', 'ꦵ'), + ('ꦺ', 'ꦻ'), + ('ꦾ', 'ꦿ'), + ('ꨯ', 'ꨰ'), + ('ꨳ', 'ꨴ'), + ('ꩍ', 'ꩍ'), + ('ꫫ', 'ꫫ'), + ('ꫮ', 'ꫯ'), + ('ꫵ', 'ꫵ'), + ('ꯣ', 'ꯤ'), + ('ꯦ', 'ꯧ'), + ('ꯩ', 'ꯪ'), + ('꯬', '꯬'), + ('𑀀', '𑀀'), + ('𑀂', '𑀂'), + ('𑂂', '𑂂'), + ('𑂰', '𑂲'), + ('𑂷', '𑂸'), + ('𑄬', '𑄬'), + ('𑅅', '𑅆'), + ('𑆂', '𑆂'), + ('𑆳', '𑆵'), + ('𑆿', '𑆿'), + ('𑇎', '𑇎'), + ('𑈬', '𑈮'), + ('𑈲', '𑈳'), + ('𑋠', '𑋢'), + ('𑌂', '𑌃'), + ('𑌿', '𑌿'), + ('𑍁', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '𑍌'), + ('𑍢', '𑍣'), + ('𑎹', '𑎺'), + ('𑏊', '𑏊'), + ('𑏌', '𑏍'), + ('𑐵', '𑐷'), + ('𑑀', '𑑁'), + ('𑑅', '𑑅'), + ('𑒱', '𑒲'), + ('𑒹', '𑒹'), + ('𑒻', '𑒼'), + ('𑒾', '𑒾'), + ('𑓁', '𑓁'), + ('𑖰', '𑖱'), + ('𑖸', '𑖻'), + ('𑖾', '𑖾'), + ('𑘰', '𑘲'), + ('𑘻', '𑘼'), + ('𑘾', '𑘾'), + ('𑚬', '𑚬'), + ('𑚮', '𑚯'), + ('𑜞', '𑜞'), + ('𑜦', '𑜦'), + ('𑠬', '𑠮'), + ('𑠸', '𑠸'), + ('𑤱', '𑤵'), + ('𑤷', '𑤸'), + ('𑥀', '𑥀'), + ('𑥂', '𑥂'), + ('𑧑', '𑧓'), + ('𑧜', '𑧟'), + ('𑧤', '𑧤'), + ('𑨹', '𑨹'), + ('𑩗', '𑩘'), + ('𑪗', '𑪗'), + ('𑰯', '𑰯'), + ('𑰾', '𑰾'), + ('𑲩', '𑲩'), + ('𑲱', '𑲱'), + ('𑲴', '𑲴'), + ('𑶊', '𑶎'), + ('𑶓', '𑶔'), + ('𑶖', '𑶖'), + ('𑻵', '𑻶'), + ('𑼃', '𑼃'), + ('𑼴', '𑼵'), + ('𑼾', '𑼿'), + ('𖄪', '𖄬'), + ('𖽑', '𖾇'), +]; + +pub const T: &'static [(char, char)] = &[('ᆨ', 'ᇿ'), ('ퟋ', 'ퟻ')]; + +pub const V: &'static [(char, char)] = + &[('ᅠ', 'ᆧ'), ('ힰ', 'ퟆ'), ('𖵣', '𖵣'), ('𖵧', '𖵪')]; + +pub const ZWJ: &'static [(char, char)] = &[('\u{200d}', '\u{200d}')]; diff --git a/vendor/regex-syntax/src/unicode_tables/mod.rs b/vendor/regex-syntax/src/unicode_tables/mod.rs new file mode 100644 index 00000000000000..20736c7ac813e4 --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/mod.rs @@ -0,0 +1,57 @@ +#[cfg(feature = "unicode-age")] +pub mod age; + +#[cfg(feature = "unicode-case")] +pub mod case_folding_simple; + +#[cfg(feature = "unicode-gencat")] +pub mod general_category; + +#[cfg(feature = "unicode-segment")] +pub mod grapheme_cluster_break; + +#[cfg(all(feature = "unicode-perl", not(feature = "unicode-gencat")))] +#[allow(dead_code)] +pub mod perl_decimal; + +#[cfg(all(feature = "unicode-perl", not(feature = "unicode-bool")))] +#[allow(dead_code)] +pub mod perl_space; + +#[cfg(feature = "unicode-perl")] +pub mod perl_word; + +#[cfg(feature = "unicode-bool")] +pub mod property_bool; + +#[cfg(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", +))] +pub mod property_names; + +#[cfg(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", +))] +pub mod property_values; + +#[cfg(feature = "unicode-script")] +pub mod script; + +#[cfg(feature = "unicode-script")] +pub mod script_extension; + +#[cfg(feature = "unicode-segment")] +pub mod sentence_break; + +#[cfg(feature = "unicode-segment")] +pub mod word_break; diff --git a/vendor/regex-syntax/src/unicode_tables/perl_decimal.rs b/vendor/regex-syntax/src/unicode_tables/perl_decimal.rs new file mode 100644 index 00000000000000..18996c2bfcb0f4 --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/perl_decimal.rs @@ -0,0 +1,84 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate general-category ucd-16.0.0 --chars --include decimalnumber +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = + &[("Decimal_Number", DECIMAL_NUMBER)]; + +pub const DECIMAL_NUMBER: &'static [(char, char)] = &[ + ('0', '9'), + ('٠', '٩'), + ('۰', '۹'), + ('߀', '߉'), + ('०', '९'), + ('০', '৯'), + ('੦', '੯'), + ('૦', '૯'), + ('୦', '୯'), + ('௦', '௯'), + ('౦', '౯'), + ('೦', '೯'), + ('൦', '൯'), + ('෦', '෯'), + ('๐', '๙'), + ('໐', '໙'), + ('༠', '༩'), + ('၀', '၉'), + ('႐', '႙'), + ('០', '៩'), + ('᠐', '᠙'), + ('᥆', '᥏'), + ('᧐', '᧙'), + ('᪀', '᪉'), + ('᪐', '᪙'), + ('᭐', '᭙'), + ('᮰', '᮹'), + ('᱀', '᱉'), + ('᱐', '᱙'), + ('꘠', '꘩'), + ('꣐', '꣙'), + ('꤀', '꤉'), + ('꧐', '꧙'), + ('꧰', '꧹'), + ('꩐', '꩙'), + ('꯰', '꯹'), + ('0', '9'), + ('𐒠', '𐒩'), + ('𐴰', '𐴹'), + ('𐵀', '𐵉'), + ('𑁦', '𑁯'), + ('𑃰', '𑃹'), + ('𑄶', '𑄿'), + ('𑇐', '𑇙'), + ('𑋰', '𑋹'), + ('𑑐', '𑑙'), + ('𑓐', '𑓙'), + ('𑙐', '𑙙'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜰', '𑜹'), + ('𑣠', '𑣩'), + ('𑥐', '𑥙'), + ('𑯰', '𑯹'), + ('𑱐', '𑱙'), + ('𑵐', '𑵙'), + ('𑶠', '𑶩'), + ('𑽐', '𑽙'), + ('𖄰', '𖄹'), + ('𖩠', '𖩩'), + ('𖫀', '𖫉'), + ('𖭐', '𖭙'), + ('𖵰', '𖵹'), + ('𜳰', '𜳹'), + ('𝟎', '𝟿'), + ('𞅀', '𞅉'), + ('𞋰', '𞋹'), + ('𞓰', '𞓹'), + ('𞗱', '𞗺'), + ('𞥐', '𞥙'), + ('🯰', '🯹'), +]; diff --git a/vendor/regex-syntax/src/unicode_tables/perl_space.rs b/vendor/regex-syntax/src/unicode_tables/perl_space.rs new file mode 100644 index 00000000000000..c969e3733add9a --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/perl_space.rs @@ -0,0 +1,23 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate property-bool ucd-16.0.0 --chars --include whitespace +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = + &[("White_Space", WHITE_SPACE)]; + +pub const WHITE_SPACE: &'static [(char, char)] = &[ + ('\t', '\r'), + (' ', ' '), + ('\u{85}', '\u{85}'), + ('\u{a0}', '\u{a0}'), + ('\u{1680}', '\u{1680}'), + ('\u{2000}', '\u{200a}'), + ('\u{2028}', '\u{2029}'), + ('\u{202f}', '\u{202f}'), + ('\u{205f}', '\u{205f}'), + ('\u{3000}', '\u{3000}'), +]; diff --git a/vendor/regex-syntax/src/unicode_tables/perl_word.rs b/vendor/regex-syntax/src/unicode_tables/perl_word.rs new file mode 100644 index 00000000000000..21c8c0f9c839c8 --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/perl_word.rs @@ -0,0 +1,806 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate perl-word ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const PERL_WORD: &'static [(char, char)] = &[ + ('0', '9'), + ('A', 'Z'), + ('_', '_'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('\u{300}', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('\u{483}', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', 'ՙ'), + ('ՠ', 'ֈ'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('\u{610}', '\u{61a}'), + ('ؠ', '٩'), + ('ٮ', 'ۓ'), + ('ە', '\u{6dc}'), + ('\u{6df}', '\u{6e8}'), + ('\u{6ea}', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', '\u{74a}'), + ('ݍ', 'ޱ'), + ('߀', 'ߵ'), + ('ߺ', 'ߺ'), + ('\u{7fd}', '\u{7fd}'), + ('ࠀ', '\u{82d}'), + ('ࡀ', '\u{85b}'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('\u{897}', '\u{8e1}'), + ('\u{8e3}', '\u{963}'), + ('०', '९'), + ('ॱ', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('\u{9bc}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', 'ৎ'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', '\u{9e3}'), + ('০', 'ৱ'), + ('ৼ', 'ৼ'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', 'ਃ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('੦', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('\u{abc}', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('ૐ', 'ૐ'), + ('ૠ', '\u{ae3}'), + ('૦', '૯'), + ('ૹ', '\u{aff}'), + ('\u{b01}', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('\u{b3c}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', '\u{b63}'), + ('୦', '୯'), + ('ୱ', 'ୱ'), + ('\u{b82}', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('ௐ', 'ௐ'), + ('\u{bd7}', '\u{bd7}'), + ('௦', '௯'), + ('\u{c00}', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('\u{c3c}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', '\u{c63}'), + ('౦', '౯'), + ('ಀ', 'ಃ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('\u{cbc}', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('ೝ', 'ೞ'), + ('ೠ', '\u{ce3}'), + ('೦', '೯'), + ('ೱ', 'ೳ'), + ('\u{d00}', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', 'ൎ'), + ('ൔ', '\u{d57}'), + ('ൟ', '\u{d63}'), + ('൦', '൯'), + ('ൺ', 'ൿ'), + ('\u{d81}', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('෦', '෯'), + ('ෲ', 'ෳ'), + ('ก', '\u{e3a}'), + ('เ', '\u{e4e}'), + ('๐', '๙'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('\u{ec8}', '\u{ece}'), + ('໐', '໙'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('\u{f18}', '\u{f19}'), + ('༠', '༩'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('༾', 'ཇ'), + ('ཉ', 'ཬ'), + ('\u{f71}', '\u{f84}'), + ('\u{f86}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('က', '၉'), + ('ၐ', '\u{109d}'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('\u{135d}', '\u{135f}'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', '\u{1715}'), + ('ᜟ', '\u{1734}'), + ('ᝀ', '\u{1753}'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('\u{1772}', '\u{1773}'), + ('ក', '\u{17d3}'), + ('ៗ', 'ៗ'), + ('ៜ', '\u{17dd}'), + ('០', '៩'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '᠙'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('᥆', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('᧐', '᧙'), + ('ᨀ', '\u{1a1b}'), + ('ᨠ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '᪉'), + ('᪐', '᪙'), + ('ᪧ', 'ᪧ'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', 'ᭌ'), + ('᭐', '᭙'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1bf3}'), + ('ᰀ', '\u{1c37}'), + ('᱀', '᱉'), + ('ᱍ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', 'ᳺ'), + ('ᴀ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('\u{200c}', '\u{200d}'), + ('‿', '⁀'), + ('⁔', '⁔'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('\u{20d0}', '\u{20f0}'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℯ', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⓐ', 'ⓩ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('\u{2d7f}', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('\u{2de0}', '\u{2dff}'), + ('ⸯ', 'ⸯ'), + ('々', '〇'), + ('〡', '\u{302f}'), + ('〱', '〵'), + ('〸', '〼'), + ('ぁ', 'ゖ'), + ('\u{3099}', '\u{309a}'), + ('ゝ', 'ゟ'), + ('ァ', 'ヺ'), + ('ー', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘫ'), + ('Ꙁ', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('ꙿ', '\u{a6f1}'), + ('ꜗ', 'ꜟ'), + ('Ꜣ', 'ꞈ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠧ'), + ('\u{a82c}', '\u{a82c}'), + ('ꡀ', 'ꡳ'), + ('ꢀ', '\u{a8c5}'), + ('꣐', '꣙'), + ('\u{a8e0}', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', '\u{a92d}'), + ('ꤰ', '\u{a953}'), + ('ꥠ', 'ꥼ'), + ('\u{a980}', '\u{a9c0}'), + ('ꧏ', '꧙'), + ('ꧠ', 'ꧾ'), + ('ꨀ', '\u{aa36}'), + ('ꩀ', 'ꩍ'), + ('꩐', '꩙'), + ('ꩠ', 'ꩶ'), + ('ꩺ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫯ'), + ('ꫲ', '\u{aaf6}'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꯪ'), + ('꯬', '\u{abed}'), + ('꯰', '꯹'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('︳', '︴'), + ('﹍', '﹏'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('0', '9'), + ('A', 'Z'), + ('_', '_'), + ('a', 'z'), + ('ヲ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('\u{101fd}', '\u{101fd}'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('\u{102e0}', '\u{102e0}'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '\u{1037a}'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐐀', '𐒝'), + ('𐒠', '𐒩'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '\u{10ae6}'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '\u{10d27}'), + ('𐴰', '𐴹'), + ('𐵀', '𐵥'), + ('\u{10d69}', '\u{10d6d}'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('\u{10eab}', '\u{10eac}'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('\u{10efc}', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '\u{10f50}'), + ('𐽰', '\u{10f85}'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀀', '\u{11046}'), + ('𑁦', '𑁵'), + ('\u{1107f}', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('𑃐', '𑃨'), + ('𑃰', '𑃹'), + ('\u{11100}', '\u{11134}'), + ('𑄶', '𑄿'), + ('𑅄', '𑅇'), + ('𑅐', '\u{11173}'), + ('𑅶', '𑅶'), + ('\u{11180}', '𑇄'), + ('\u{111c9}', '\u{111cc}'), + ('𑇎', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '\u{11237}'), + ('\u{1123e}', '\u{11241}'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '\u{112ea}'), + ('𑋰', '𑋹'), + ('\u{11300}', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('\u{1133b}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('𑍐', '𑍐'), + ('\u{11357}', '\u{11357}'), + ('𑍝', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏓'), + ('\u{113e1}', '\u{113e2}'), + ('𑐀', '𑑊'), + ('𑑐', '𑑙'), + ('\u{1145e}', '𑑡'), + ('𑒀', '𑓅'), + ('𑓇', '𑓇'), + ('𑓐', '𑓙'), + ('𑖀', '\u{115b5}'), + ('𑖸', '\u{115c0}'), + ('𑗘', '\u{115dd}'), + ('𑘀', '\u{11640}'), + ('𑙄', '𑙄'), + ('𑙐', '𑙙'), + ('𑚀', '𑚸'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜀', '𑜚'), + ('\u{1171d}', '\u{1172b}'), + ('𑜰', '𑜹'), + ('𑝀', '𑝆'), + ('𑠀', '\u{1183a}'), + ('𑢠', '𑣩'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{11943}'), + ('𑥐', '𑥙'), + ('𑦠', '𑦧'), + ('𑦪', '\u{119d7}'), + ('\u{119da}', '𑧡'), + ('𑧣', '𑧤'), + ('𑨀', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('𑩐', '\u{11a99}'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑯰', '𑯹'), + ('𑰀', '𑰈'), + ('𑰊', '\u{11c36}'), + ('\u{11c38}', '𑱀'), + ('𑱐', '𑱙'), + ('𑱲', '𑲏'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d47}'), + ('𑵐', '𑵙'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶘'), + ('𑶠', '𑶩'), + ('𑻠', '𑻶'), + ('\u{11f00}', '𑼐'), + ('𑼒', '\u{11f3a}'), + ('𑼾', '\u{11f42}'), + ('𑽐', '\u{11f5a}'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('\u{13440}', '\u{13455}'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄹'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩠', '𖩩'), + ('𖩰', '𖪾'), + ('𖫀', '𖫉'), + ('𖫐', '𖫭'), + ('\u{16af0}', '\u{16af4}'), + ('𖬀', '\u{16b36}'), + ('𖭀', '𖭃'), + ('𖭐', '𖭙'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖵰', '𖵹'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('\u{16f4f}', '𖾇'), + ('\u{16f8f}', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('𜳰', '𜳹'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝟎', '𝟿'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), + ('𞄀', '𞄬'), + ('\u{1e130}', '𞄽'), + ('𞅀', '𞅉'), + ('𞅎', '𞅎'), + ('𞊐', '\u{1e2ae}'), + ('𞋀', '𞋹'), + ('𞓐', '𞓹'), + ('𞗐', '𞗺'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('𞤀', '𞥋'), + ('𞥐', '𞥙'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('🄰', '🅉'), + ('🅐', '🅩'), + ('🅰', '🆉'), + ('🯰', '🯹'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), + ('\u{e0100}', '\u{e01ef}'), +]; diff --git a/vendor/regex-syntax/src/unicode_tables/property_bool.rs b/vendor/regex-syntax/src/unicode_tables/property_bool.rs new file mode 100644 index 00000000000000..3d62edc42317ba --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/property_bool.rs @@ -0,0 +1,12095 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate property-bool ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("ASCII_Hex_Digit", ASCII_HEX_DIGIT), + ("Alphabetic", ALPHABETIC), + ("Bidi_Control", BIDI_CONTROL), + ("Bidi_Mirrored", BIDI_MIRRORED), + ("Case_Ignorable", CASE_IGNORABLE), + ("Cased", CASED), + ("Changes_When_Casefolded", CHANGES_WHEN_CASEFOLDED), + ("Changes_When_Casemapped", CHANGES_WHEN_CASEMAPPED), + ("Changes_When_Lowercased", CHANGES_WHEN_LOWERCASED), + ("Changes_When_Titlecased", CHANGES_WHEN_TITLECASED), + ("Changes_When_Uppercased", CHANGES_WHEN_UPPERCASED), + ("Dash", DASH), + ("Default_Ignorable_Code_Point", DEFAULT_IGNORABLE_CODE_POINT), + ("Deprecated", DEPRECATED), + ("Diacritic", DIACRITIC), + ("Emoji", EMOJI), + ("Emoji_Component", EMOJI_COMPONENT), + ("Emoji_Modifier", EMOJI_MODIFIER), + ("Emoji_Modifier_Base", EMOJI_MODIFIER_BASE), + ("Emoji_Presentation", EMOJI_PRESENTATION), + ("Extended_Pictographic", EXTENDED_PICTOGRAPHIC), + ("Extender", EXTENDER), + ("Grapheme_Base", GRAPHEME_BASE), + ("Grapheme_Extend", GRAPHEME_EXTEND), + ("Grapheme_Link", GRAPHEME_LINK), + ("Hex_Digit", HEX_DIGIT), + ("Hyphen", HYPHEN), + ("IDS_Binary_Operator", IDS_BINARY_OPERATOR), + ("IDS_Trinary_Operator", IDS_TRINARY_OPERATOR), + ("IDS_Unary_Operator", IDS_UNARY_OPERATOR), + ("ID_Compat_Math_Continue", ID_COMPAT_MATH_CONTINUE), + ("ID_Compat_Math_Start", ID_COMPAT_MATH_START), + ("ID_Continue", ID_CONTINUE), + ("ID_Start", ID_START), + ("Ideographic", IDEOGRAPHIC), + ("InCB", INCB), + ("Join_Control", JOIN_CONTROL), + ("Logical_Order_Exception", LOGICAL_ORDER_EXCEPTION), + ("Lowercase", LOWERCASE), + ("Math", MATH), + ("Modifier_Combining_Mark", MODIFIER_COMBINING_MARK), + ("Noncharacter_Code_Point", NONCHARACTER_CODE_POINT), + ("Other_Alphabetic", OTHER_ALPHABETIC), + ("Other_Default_Ignorable_Code_Point", OTHER_DEFAULT_IGNORABLE_CODE_POINT), + ("Other_Grapheme_Extend", OTHER_GRAPHEME_EXTEND), + ("Other_ID_Continue", OTHER_ID_CONTINUE), + ("Other_ID_Start", OTHER_ID_START), + ("Other_Lowercase", OTHER_LOWERCASE), + ("Other_Math", OTHER_MATH), + ("Other_Uppercase", OTHER_UPPERCASE), + ("Pattern_Syntax", PATTERN_SYNTAX), + ("Pattern_White_Space", PATTERN_WHITE_SPACE), + ("Prepended_Concatenation_Mark", PREPENDED_CONCATENATION_MARK), + ("Quotation_Mark", QUOTATION_MARK), + ("Radical", RADICAL), + ("Regional_Indicator", REGIONAL_INDICATOR), + ("Sentence_Terminal", SENTENCE_TERMINAL), + ("Soft_Dotted", SOFT_DOTTED), + ("Terminal_Punctuation", TERMINAL_PUNCTUATION), + ("Unified_Ideograph", UNIFIED_IDEOGRAPH), + ("Uppercase", UPPERCASE), + ("Variation_Selector", VARIATION_SELECTOR), + ("White_Space", WHITE_SPACE), + ("XID_Continue", XID_CONTINUE), + ("XID_Start", XID_START), +]; + +pub const ASCII_HEX_DIGIT: &'static [(char, char)] = + &[('0', '9'), ('A', 'F'), ('a', 'f')]; + +pub const ALPHABETIC: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('\u{345}', '\u{345}'), + ('\u{363}', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', 'ՙ'), + ('ՠ', 'ֈ'), + ('\u{5b0}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('\u{610}', '\u{61a}'), + ('ؠ', '\u{657}'), + ('\u{659}', '\u{65f}'), + ('ٮ', 'ۓ'), + ('ە', '\u{6dc}'), + ('\u{6e1}', '\u{6e8}'), + ('\u{6ed}', 'ۯ'), + ('ۺ', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', '\u{73f}'), + ('ݍ', 'ޱ'), + ('ߊ', 'ߪ'), + ('ߴ', 'ߵ'), + ('ߺ', 'ߺ'), + ('ࠀ', '\u{817}'), + ('ࠚ', '\u{82c}'), + ('ࡀ', 'ࡘ'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('\u{897}', '\u{897}'), + ('ࢠ', 'ࣉ'), + ('\u{8d4}', '\u{8df}'), + ('\u{8e3}', '\u{8e9}'), + ('\u{8f0}', 'ऻ'), + ('ऽ', 'ौ'), + ('ॎ', 'ॐ'), + ('\u{955}', '\u{963}'), + ('ॱ', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', 'ৌ'), + ('ৎ', 'ৎ'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', '\u{9e3}'), + ('ৰ', 'ৱ'), + ('ৼ', 'ৼ'), + ('\u{a01}', 'ਃ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4c}'), + ('\u{a51}', '\u{a51}'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('\u{a70}', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', 'ૌ'), + ('ૐ', 'ૐ'), + ('ૠ', '\u{ae3}'), + ('ૹ', '\u{afc}'), + ('\u{b01}', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', 'ୌ'), + ('\u{b56}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', '\u{b63}'), + ('ୱ', 'ୱ'), + ('\u{b82}', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', 'ௌ'), + ('ௐ', 'ௐ'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4c}'), + ('\u{c55}', '\u{c56}'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', '\u{c63}'), + ('ಀ', 'ಃ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccc}'), + ('\u{cd5}', '\u{cd6}'), + ('ೝ', 'ೞ'), + ('ೠ', '\u{ce3}'), + ('ೱ', 'ೳ'), + ('\u{d00}', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', 'ൌ'), + ('ൎ', 'ൎ'), + ('ൔ', '\u{d57}'), + ('ൟ', '\u{d63}'), + ('ൺ', 'ൿ'), + ('\u{d81}', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('ෲ', 'ෳ'), + ('ก', '\u{e3a}'), + ('เ', 'ๆ'), + ('\u{e4d}', '\u{e4d}'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', '\u{eb9}'), + ('\u{ebb}', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('\u{ecd}', '\u{ecd}'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('ཀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('\u{f71}', '\u{f83}'), + ('ྈ', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('က', '\u{1036}'), + ('း', 'း'), + ('ျ', 'ဿ'), + ('ၐ', 'ႏ'), + ('ႚ', '\u{109d}'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', '\u{1713}'), + ('ᜟ', '\u{1733}'), + ('ᝀ', '\u{1753}'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('\u{1772}', '\u{1773}'), + ('ក', 'ឳ'), + ('ា', 'ៈ'), + ('ៗ', 'ៗ'), + ('ៜ', 'ៜ'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', 'ᤸ'), + ('ᥐ', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('ᨀ', '\u{1a1b}'), + ('ᨠ', '\u{1a5e}'), + ('ᩡ', '\u{1a74}'), + ('ᪧ', 'ᪧ'), + ('\u{1abf}', '\u{1ac0}'), + ('\u{1acc}', '\u{1ace}'), + ('\u{1b00}', 'ᬳ'), + ('\u{1b35}', '\u{1b43}'), + ('ᭅ', 'ᭌ'), + ('\u{1b80}', '\u{1ba9}'), + ('\u{1bac}', 'ᮯ'), + ('ᮺ', 'ᯥ'), + ('ᯧ', '\u{1bf1}'), + ('ᰀ', '\u{1c36}'), + ('ᱍ', 'ᱏ'), + ('ᱚ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', 'ᳶ'), + ('ᳺ', 'ᳺ'), + ('ᴀ', 'ᶿ'), + ('\u{1dd3}', '\u{1df4}'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℯ', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⓐ', 'ⓩ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('\u{2de0}', '\u{2dff}'), + ('ⸯ', 'ⸯ'), + ('々', '〇'), + ('〡', '〩'), + ('〱', '〵'), + ('〸', '〼'), + ('ぁ', 'ゖ'), + ('ゝ', 'ゟ'), + ('ァ', 'ヺ'), + ('ー', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘟ'), + ('ꘪ', 'ꘫ'), + ('Ꙁ', 'ꙮ'), + ('\u{a674}', '\u{a67b}'), + ('ꙿ', 'ꛯ'), + ('ꜗ', 'ꜟ'), + ('Ꜣ', 'ꞈ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠅ'), + ('ꠇ', 'ꠧ'), + ('ꡀ', 'ꡳ'), + ('ꢀ', 'ꣃ'), + ('\u{a8c5}', '\u{a8c5}'), + ('ꣲ', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', '\u{a8ff}'), + ('ꤊ', '\u{a92a}'), + ('ꤰ', 'ꥒ'), + ('ꥠ', 'ꥼ'), + ('\u{a980}', 'ꦲ'), + ('ꦴ', 'ꦿ'), + ('ꧏ', 'ꧏ'), + ('ꧠ', 'ꧯ'), + ('ꧺ', 'ꧾ'), + ('ꨀ', '\u{aa36}'), + ('ꩀ', 'ꩍ'), + ('ꩠ', 'ꩶ'), + ('ꩺ', '\u{aabe}'), + ('ꫀ', 'ꫀ'), + ('ꫂ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫯ'), + ('ꫲ', 'ꫵ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꯪ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('A', 'Z'), + ('a', 'z'), + ('ヲ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '\u{1037a}'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐐀', '𐒝'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '𐫤'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '\u{10d27}'), + ('𐵊', '𐵥'), + ('\u{10d69}', '\u{10d69}'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('\u{10eab}', '\u{10eac}'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('\u{10efc}', '\u{10efc}'), + ('𐼀', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '𐽅'), + ('𐽰', '𐾁'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀀', '\u{11045}'), + ('𑁱', '𑁵'), + ('\u{11080}', '𑂸'), + ('\u{110c2}', '\u{110c2}'), + ('𑃐', '𑃨'), + ('\u{11100}', '\u{11132}'), + ('𑅄', '𑅇'), + ('𑅐', '𑅲'), + ('𑅶', '𑅶'), + ('\u{11180}', '𑆿'), + ('𑇁', '𑇄'), + ('𑇎', '\u{111cf}'), + ('𑇚', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '\u{11234}'), + ('\u{11237}', '\u{11237}'), + ('\u{1123e}', '\u{11241}'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '\u{112e8}'), + ('\u{11300}', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '𑍌'), + ('𑍐', '𑍐'), + ('\u{11357}', '\u{11357}'), + ('𑍝', '𑍣'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏍'), + ('𑏑', '𑏑'), + ('𑏓', '𑏓'), + ('𑐀', '𑑁'), + ('\u{11443}', '𑑅'), + ('𑑇', '𑑊'), + ('𑑟', '𑑡'), + ('𑒀', '𑓁'), + ('𑓄', '𑓅'), + ('𑓇', '𑓇'), + ('𑖀', '\u{115b5}'), + ('𑖸', '𑖾'), + ('𑗘', '\u{115dd}'), + ('𑘀', '𑘾'), + ('\u{11640}', '\u{11640}'), + ('𑙄', '𑙄'), + ('𑚀', '\u{116b5}'), + ('𑚸', '𑚸'), + ('𑜀', '𑜚'), + ('\u{1171d}', '\u{1172a}'), + ('𑝀', '𑝆'), + ('𑠀', '𑠸'), + ('𑢠', '𑣟'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{1193c}'), + ('𑤿', '𑥂'), + ('𑦠', '𑦧'), + ('𑦪', '\u{119d7}'), + ('\u{119da}', '𑧟'), + ('𑧡', '𑧡'), + ('𑧣', '𑧤'), + ('𑨀', '𑨲'), + ('\u{11a35}', '\u{11a3e}'), + ('𑩐', '𑪗'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑰀', '𑰈'), + ('𑰊', '\u{11c36}'), + ('\u{11c38}', '𑰾'), + ('𑱀', '𑱀'), + ('𑱲', '𑲏'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d41}'), + ('\u{11d43}', '\u{11d43}'), + ('𑵆', '\u{11d47}'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶖'), + ('𑶘', '𑶘'), + ('𑻠', '𑻶'), + ('\u{11f00}', '𑼐'), + ('𑼒', '\u{11f3a}'), + ('𑼾', '\u{11f40}'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '\u{1612e}'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩰', '𖪾'), + ('𖫐', '𖫭'), + ('𖬀', '𖬯'), + ('𖭀', '𖭃'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('\u{16f4f}', '𖾇'), + ('\u{16f8f}', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('\u{16ff0}', '\u{16ff1}'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('\u{1bc9e}', '\u{1bc9e}'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), + ('𞄀', '𞄬'), + ('𞄷', '𞄽'), + ('𞅎', '𞅎'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞓐', '𞓫'), + ('𞗐', '𞗭'), + ('𞗰', '𞗰'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞤀', '𞥃'), + ('\u{1e947}', '\u{1e947}'), + ('𞥋', '𞥋'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('🄰', '🅉'), + ('🅐', '🅩'), + ('🅰', '🆉'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const BIDI_CONTROL: &'static [(char, char)] = &[ + ('\u{61c}', '\u{61c}'), + ('\u{200e}', '\u{200f}'), + ('\u{202a}', '\u{202e}'), + ('\u{2066}', '\u{2069}'), +]; + +pub const BIDI_MIRRORED: &'static [(char, char)] = &[ + ('(', ')'), + ('<', '<'), + ('>', '>'), + ('[', '['), + (']', ']'), + ('{', '{'), + ('}', '}'), + ('«', '«'), + ('»', '»'), + ('༺', '༽'), + ('᚛', '᚜'), + ('‹', '›'), + ('⁅', '⁆'), + ('⁽', '⁾'), + ('₍', '₎'), + ('⅀', '⅀'), + ('∁', '∄'), + ('∈', '∍'), + ('∑', '∑'), + ('∕', '∖'), + ('√', '∝'), + ('∟', '∢'), + ('∤', '∤'), + ('∦', '∦'), + ('∫', '∳'), + ('∹', '∹'), + ('∻', '≌'), + ('≒', '≕'), + ('≟', '≠'), + ('≢', '≢'), + ('≤', '≫'), + ('≭', '⊌'), + ('⊏', '⊒'), + ('⊘', '⊘'), + ('⊢', '⊣'), + ('⊦', '⊸'), + ('⊾', '⊿'), + ('⋉', '⋍'), + ('⋐', '⋑'), + ('⋖', '⋭'), + ('⋰', '⋿'), + ('⌈', '⌋'), + ('⌠', '⌡'), + ('〈', '〉'), + ('❨', '❵'), + ('⟀', '⟀'), + ('⟃', '⟆'), + ('⟈', '⟉'), + ('⟋', '⟍'), + ('⟓', '⟖'), + ('⟜', '⟞'), + ('⟢', '⟯'), + ('⦃', '⦘'), + ('⦛', '⦠'), + ('⦢', '⦯'), + ('⦸', '⦸'), + ('⧀', '⧅'), + ('⧉', '⧉'), + ('⧎', '⧒'), + ('⧔', '⧕'), + ('⧘', '⧜'), + ('⧡', '⧡'), + ('⧣', '⧥'), + ('⧨', '⧩'), + ('⧴', '⧹'), + ('⧼', '⧽'), + ('⨊', '⨜'), + ('⨞', '⨡'), + ('⨤', '⨤'), + ('⨦', '⨦'), + ('⨩', '⨩'), + ('⨫', '⨮'), + ('⨴', '⨵'), + ('⨼', '⨾'), + ('⩗', '⩘'), + ('⩤', '⩥'), + ('⩪', '⩭'), + ('⩯', '⩰'), + ('⩳', '⩴'), + ('⩹', '⪣'), + ('⪦', '⪭'), + ('⪯', '⫖'), + ('⫝̸', '⫝̸'), + ('⫞', '⫞'), + ('⫢', '⫦'), + ('⫬', '⫮'), + ('⫳', '⫳'), + ('⫷', '⫻'), + ('⫽', '⫽'), + ('⯾', '⯾'), + ('⸂', '⸅'), + ('⸉', '⸊'), + ('⸌', '⸍'), + ('⸜', '⸝'), + ('⸠', '⸩'), + ('⹕', '⹜'), + ('〈', '】'), + ('〔', '〛'), + ('﹙', '﹞'), + ('﹤', '﹥'), + ('(', ')'), + ('<', '<'), + ('>', '>'), + ('[', '['), + (']', ']'), + ('{', '{'), + ('}', '}'), + ('⦅', '⦆'), + ('「', '」'), + ('𝛛', '𝛛'), + ('𝜕', '𝜕'), + ('𝝏', '𝝏'), + ('𝞉', '𝞉'), + ('𝟃', '𝟃'), +]; + +pub const CASE_IGNORABLE: &'static [(char, char)] = &[ + ('\'', '\''), + ('.', '.'), + (':', ':'), + ('^', '^'), + ('`', '`'), + ('¨', '¨'), + ('\u{ad}', '\u{ad}'), + ('¯', '¯'), + ('´', '´'), + ('·', '¸'), + ('ʰ', '\u{36f}'), + ('ʹ', '͵'), + ('ͺ', 'ͺ'), + ('΄', '΅'), + ('·', '·'), + ('\u{483}', '\u{489}'), + ('ՙ', 'ՙ'), + ('՟', '՟'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('״', '״'), + ('\u{600}', '\u{605}'), + ('\u{610}', '\u{61a}'), + ('\u{61c}', '\u{61c}'), + ('ـ', 'ـ'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dd}'), + ('\u{6df}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{70f}', '\u{70f}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', 'ߵ'), + ('ߺ', 'ߺ'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('࢈', '࢈'), + ('\u{890}', '\u{891}'), + ('\u{897}', '\u{89f}'), + ('ࣉ', '\u{902}'), + ('\u{93a}', '\u{93a}'), + ('\u{93c}', '\u{93c}'), + ('\u{941}', '\u{948}'), + ('\u{94d}', '\u{94d}'), + ('\u{951}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('ॱ', 'ॱ'), + ('\u{981}', '\u{981}'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9c1}', '\u{9c4}'), + ('\u{9cd}', '\u{9cd}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', '\u{a02}'), + ('\u{a3c}', '\u{a3c}'), + ('\u{a41}', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', '\u{a82}'), + ('\u{abc}', '\u{abc}'), + ('\u{ac1}', '\u{ac5}'), + ('\u{ac7}', '\u{ac8}'), + ('\u{acd}', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{aff}'), + ('\u{b01}', '\u{b01}'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3f}', '\u{b3f}'), + ('\u{b41}', '\u{b44}'), + ('\u{b4d}', '\u{b4d}'), + ('\u{b55}', '\u{b56}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bc0}', '\u{bc0}'), + ('\u{bcd}', '\u{bcd}'), + ('\u{c00}', '\u{c00}'), + ('\u{c04}', '\u{c04}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', '\u{c40}'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', '\u{c81}'), + ('\u{cbc}', '\u{cbc}'), + ('\u{cbf}', '\u{cbf}'), + ('\u{cc6}', '\u{cc6}'), + ('\u{ccc}', '\u{ccd}'), + ('\u{ce2}', '\u{ce3}'), + ('\u{d00}', '\u{d01}'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d41}', '\u{d44}'), + ('\u{d4d}', '\u{d4d}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', '\u{d81}'), + ('\u{dca}', '\u{dca}'), + ('\u{dd2}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('ๆ', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('ໆ', 'ໆ'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('\u{f71}', '\u{f7e}'), + ('\u{f80}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('\u{102d}', '\u{1030}'), + ('\u{1032}', '\u{1037}'), + ('\u{1039}', '\u{103a}'), + ('\u{103d}', '\u{103e}'), + ('\u{1058}', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{1082}'), + ('\u{1085}', '\u{1086}'), + ('\u{108d}', '\u{108d}'), + ('\u{109d}', '\u{109d}'), + ('ჼ', 'ჼ'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1714}'), + ('\u{1732}', '\u{1733}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17b5}'), + ('\u{17b7}', '\u{17bd}'), + ('\u{17c6}', '\u{17c6}'), + ('\u{17c9}', '\u{17d3}'), + ('ៗ', 'ៗ'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180f}'), + ('ᡃ', 'ᡃ'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', '\u{1922}'), + ('\u{1927}', '\u{1928}'), + ('\u{1932}', '\u{1932}'), + ('\u{1939}', '\u{193b}'), + ('\u{1a17}', '\u{1a18}'), + ('\u{1a1b}', '\u{1a1b}'), + ('\u{1a56}', '\u{1a56}'), + ('\u{1a58}', '\u{1a5e}'), + ('\u{1a60}', '\u{1a60}'), + ('\u{1a62}', '\u{1a62}'), + ('\u{1a65}', '\u{1a6c}'), + ('\u{1a73}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('ᪧ', 'ᪧ'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', '\u{1b03}'), + ('\u{1b34}', '\u{1b34}'), + ('\u{1b36}', '\u{1b3a}'), + ('\u{1b3c}', '\u{1b3c}'), + ('\u{1b42}', '\u{1b42}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1b81}'), + ('\u{1ba2}', '\u{1ba5}'), + ('\u{1ba8}', '\u{1ba9}'), + ('\u{1bab}', '\u{1bad}'), + ('\u{1be6}', '\u{1be6}'), + ('\u{1be8}', '\u{1be9}'), + ('\u{1bed}', '\u{1bed}'), + ('\u{1bef}', '\u{1bf1}'), + ('\u{1c2c}', '\u{1c33}'), + ('\u{1c36}', '\u{1c37}'), + ('ᱸ', 'ᱽ'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce0}'), + ('\u{1ce2}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('\u{1cf8}', '\u{1cf9}'), + ('ᴬ', 'ᵪ'), + ('ᵸ', 'ᵸ'), + ('ᶛ', '\u{1dff}'), + ('᾽', '᾽'), + ('᾿', '῁'), + ('῍', '῏'), + ('῝', '῟'), + ('῭', '`'), + ('´', '῾'), + ('\u{200b}', '\u{200f}'), + ('‘', '’'), + ('․', '․'), + ('‧', '‧'), + ('\u{202a}', '\u{202e}'), + ('\u{2060}', '\u{2064}'), + ('\u{2066}', '\u{206f}'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('\u{20d0}', '\u{20f0}'), + ('ⱼ', 'ⱽ'), + ('\u{2cef}', '\u{2cf1}'), + ('ⵯ', 'ⵯ'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('ⸯ', 'ⸯ'), + ('々', '々'), + ('\u{302a}', '\u{302d}'), + ('〱', '〵'), + ('〻', '〻'), + ('\u{3099}', 'ゞ'), + ('ー', 'ヾ'), + ('ꀕ', 'ꀕ'), + ('ꓸ', 'ꓽ'), + ('ꘌ', 'ꘌ'), + ('\u{a66f}', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('ꙿ', 'ꙿ'), + ('ꚜ', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('꜀', '꜡'), + ('ꝰ', 'ꝰ'), + ('ꞈ', '꞊'), + ('ꟲ', 'ꟴ'), + ('ꟸ', 'ꟹ'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('\u{a825}', '\u{a826}'), + ('\u{a82c}', '\u{a82c}'), + ('\u{a8c4}', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a951}'), + ('\u{a980}', '\u{a982}'), + ('\u{a9b3}', '\u{a9b3}'), + ('\u{a9b6}', '\u{a9b9}'), + ('\u{a9bc}', '\u{a9bd}'), + ('ꧏ', 'ꧏ'), + ('\u{a9e5}', 'ꧦ'), + ('\u{aa29}', '\u{aa2e}'), + ('\u{aa31}', '\u{aa32}'), + ('\u{aa35}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', '\u{aa4c}'), + ('ꩰ', 'ꩰ'), + ('\u{aa7c}', '\u{aa7c}'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('ꫝ', 'ꫝ'), + ('\u{aaec}', '\u{aaed}'), + ('ꫳ', 'ꫴ'), + ('\u{aaf6}', '\u{aaf6}'), + ('꭛', 'ꭟ'), + ('ꭩ', '꭫'), + ('\u{abe5}', '\u{abe5}'), + ('\u{abe8}', '\u{abe8}'), + ('\u{abed}', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('﮲', '﯂'), + ('\u{fe00}', '\u{fe0f}'), + ('︓', '︓'), + ('\u{fe20}', '\u{fe2f}'), + ('﹒', '﹒'), + ('﹕', '﹕'), + ('\u{feff}', '\u{feff}'), + (''', '''), + ('.', '.'), + (':', ':'), + ('^', '^'), + ('`', '`'), + ('ー', 'ー'), + ('\u{ff9e}', '\u{ff9f}'), + (' ̄', ' ̄'), + ('\u{fff9}', '\u{fffb}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('𐵎', '𐵎'), + ('\u{10d69}', '\u{10d6d}'), + ('𐵯', '𐵯'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('\u{11001}', '\u{11001}'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '\u{11081}'), + ('\u{110b3}', '\u{110b6}'), + ('\u{110b9}', '\u{110ba}'), + ('\u{110bd}', '\u{110bd}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{110cd}', '\u{110cd}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{1112b}'), + ('\u{1112d}', '\u{11134}'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '\u{11181}'), + ('\u{111b6}', '\u{111be}'), + ('\u{111c9}', '\u{111cc}'), + ('\u{111cf}', '\u{111cf}'), + ('\u{1122f}', '\u{11231}'), + ('\u{11234}', '\u{11234}'), + ('\u{11236}', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112df}'), + ('\u{112e3}', '\u{112ea}'), + ('\u{11300}', '\u{11301}'), + ('\u{1133b}', '\u{1133c}'), + ('\u{11340}', '\u{11340}'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113bb}', '\u{113c0}'), + ('\u{113ce}', '\u{113ce}'), + ('\u{113d0}', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('\u{11438}', '\u{1143f}'), + ('\u{11442}', '\u{11444}'), + ('\u{11446}', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b3}', '\u{114b8}'), + ('\u{114ba}', '\u{114ba}'), + ('\u{114bf}', '\u{114c0}'), + ('\u{114c2}', '\u{114c3}'), + ('\u{115b2}', '\u{115b5}'), + ('\u{115bc}', '\u{115bd}'), + ('\u{115bf}', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('\u{11633}', '\u{1163a}'), + ('\u{1163d}', '\u{1163d}'), + ('\u{1163f}', '\u{11640}'), + ('\u{116ab}', '\u{116ab}'), + ('\u{116ad}', '\u{116ad}'), + ('\u{116b0}', '\u{116b5}'), + ('\u{116b7}', '\u{116b7}'), + ('\u{1171d}', '\u{1171d}'), + ('\u{1171f}', '\u{1171f}'), + ('\u{11722}', '\u{11725}'), + ('\u{11727}', '\u{1172b}'), + ('\u{1182f}', '\u{11837}'), + ('\u{11839}', '\u{1183a}'), + ('\u{1193b}', '\u{1193c}'), + ('\u{1193e}', '\u{1193e}'), + ('\u{11943}', '\u{11943}'), + ('\u{119d4}', '\u{119d7}'), + ('\u{119da}', '\u{119db}'), + ('\u{119e0}', '\u{119e0}'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '\u{11a38}'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a56}'), + ('\u{11a59}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a96}'), + ('\u{11a98}', '\u{11a99}'), + ('\u{11c30}', '\u{11c36}'), + ('\u{11c38}', '\u{11c3d}'), + ('\u{11c3f}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('\u{11caa}', '\u{11cb0}'), + ('\u{11cb2}', '\u{11cb3}'), + ('\u{11cb5}', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('\u{11d90}', '\u{11d91}'), + ('\u{11d95}', '\u{11d95}'), + ('\u{11d97}', '\u{11d97}'), + ('\u{11ef3}', '\u{11ef4}'), + ('\u{11f00}', '\u{11f01}'), + ('\u{11f36}', '\u{11f3a}'), + ('\u{11f40}', '\u{11f40}'), + ('\u{11f42}', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13430}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{16129}'), + ('\u{1612d}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('𖭀', '𖭃'), + ('𖵀', '𖵂'), + ('𖵫', '𖵬'), + ('\u{16f4f}', '\u{16f4f}'), + ('\u{16f8f}', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '\u{16fe4}'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1bca0}', '\u{1bca3}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d167}', '\u{1d169}'), + ('\u{1d173}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '𞄽'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('𞓫', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '𞥋'), + ('🏻', '🏿'), + ('\u{e0001}', '\u{e0001}'), + ('\u{e0020}', '\u{e007f}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const CASED: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ƺ'), + ('Ƽ', 'ƿ'), + ('DŽ', 'ʓ'), + ('ʕ', 'ʸ'), + ('ˀ', 'ˁ'), + ('ˠ', 'ˤ'), + ('\u{345}', '\u{345}'), + ('Ͱ', 'ͳ'), + ('Ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՠ', 'ֈ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ჿ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᴀ', 'ᶿ'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℯ', 'ℴ'), + ('ℹ', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ⅿ'), + ('Ↄ', 'ↄ'), + ('Ⓐ', 'ⓩ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('Ꙁ', 'ꙭ'), + ('Ꚁ', 'ꚝ'), + ('Ꜣ', 'ꞇ'), + ('Ꞌ', 'ꞎ'), + ('Ꞑ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꟶ'), + ('ꟸ', 'ꟺ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('A', 'Z'), + ('a', 'z'), + ('𐐀', '𐑏'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐞀', '𐞀'), + ('𐞃', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐵐', '𐵥'), + ('𐵰', '𐶅'), + ('𑢠', '𑣟'), + ('𖹀', '𖹿'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝼀', '𝼉'), + ('𝼋', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞤀', '𞥃'), + ('🄰', '🅉'), + ('🅐', '🅩'), + ('🅰', '🆉'), +]; + +pub const CHANGES_WHEN_CASEFOLDED: &'static [(char, char)] = &[ + ('A', 'Z'), + ('µ', 'µ'), + ('À', 'Ö'), + ('Ø', 'ß'), + ('Ā', 'Ā'), + ('Ă', 'Ă'), + ('Ą', 'Ą'), + ('Ć', 'Ć'), + ('Ĉ', 'Ĉ'), + ('Ċ', 'Ċ'), + ('Č', 'Č'), + ('Ď', 'Ď'), + ('Đ', 'Đ'), + ('Ē', 'Ē'), + ('Ĕ', 'Ĕ'), + ('Ė', 'Ė'), + ('Ę', 'Ę'), + ('Ě', 'Ě'), + ('Ĝ', 'Ĝ'), + ('Ğ', 'Ğ'), + ('Ġ', 'Ġ'), + ('Ģ', 'Ģ'), + ('Ĥ', 'Ĥ'), + ('Ħ', 'Ħ'), + ('Ĩ', 'Ĩ'), + ('Ī', 'Ī'), + ('Ĭ', 'Ĭ'), + ('Į', 'Į'), + ('İ', 'İ'), + ('IJ', 'IJ'), + ('Ĵ', 'Ĵ'), + ('Ķ', 'Ķ'), + ('Ĺ', 'Ĺ'), + ('Ļ', 'Ļ'), + ('Ľ', 'Ľ'), + ('Ŀ', 'Ŀ'), + ('Ł', 'Ł'), + ('Ń', 'Ń'), + ('Ņ', 'Ņ'), + ('Ň', 'Ň'), + ('ʼn', 'Ŋ'), + ('Ō', 'Ō'), + ('Ŏ', 'Ŏ'), + ('Ő', 'Ő'), + ('Œ', 'Œ'), + ('Ŕ', 'Ŕ'), + ('Ŗ', 'Ŗ'), + ('Ř', 'Ř'), + ('Ś', 'Ś'), + ('Ŝ', 'Ŝ'), + ('Ş', 'Ş'), + ('Š', 'Š'), + ('Ţ', 'Ţ'), + ('Ť', 'Ť'), + ('Ŧ', 'Ŧ'), + ('Ũ', 'Ũ'), + ('Ū', 'Ū'), + ('Ŭ', 'Ŭ'), + ('Ů', 'Ů'), + ('Ű', 'Ű'), + ('Ų', 'Ų'), + ('Ŵ', 'Ŵ'), + ('Ŷ', 'Ŷ'), + ('Ÿ', 'Ź'), + ('Ż', 'Ż'), + ('Ž', 'Ž'), + ('ſ', 'ſ'), + ('Ɓ', 'Ƃ'), + ('Ƅ', 'Ƅ'), + ('Ɔ', 'Ƈ'), + ('Ɖ', 'Ƌ'), + ('Ǝ', 'Ƒ'), + ('Ɠ', 'Ɣ'), + ('Ɩ', 'Ƙ'), + ('Ɯ', 'Ɲ'), + ('Ɵ', 'Ơ'), + ('Ƣ', 'Ƣ'), + ('Ƥ', 'Ƥ'), + ('Ʀ', 'Ƨ'), + ('Ʃ', 'Ʃ'), + ('Ƭ', 'Ƭ'), + ('Ʈ', 'Ư'), + ('Ʊ', 'Ƴ'), + ('Ƶ', 'Ƶ'), + ('Ʒ', 'Ƹ'), + ('Ƽ', 'Ƽ'), + ('DŽ', 'Dž'), + ('LJ', 'Lj'), + ('NJ', 'Nj'), + ('Ǎ', 'Ǎ'), + ('Ǐ', 'Ǐ'), + ('Ǒ', 'Ǒ'), + ('Ǔ', 'Ǔ'), + ('Ǖ', 'Ǖ'), + ('Ǘ', 'Ǘ'), + ('Ǚ', 'Ǚ'), + ('Ǜ', 'Ǜ'), + ('Ǟ', 'Ǟ'), + ('Ǡ', 'Ǡ'), + ('Ǣ', 'Ǣ'), + ('Ǥ', 'Ǥ'), + ('Ǧ', 'Ǧ'), + ('Ǩ', 'Ǩ'), + ('Ǫ', 'Ǫ'), + ('Ǭ', 'Ǭ'), + ('Ǯ', 'Ǯ'), + ('DZ', 'Dz'), + ('Ǵ', 'Ǵ'), + ('Ƕ', 'Ǹ'), + ('Ǻ', 'Ǻ'), + ('Ǽ', 'Ǽ'), + ('Ǿ', 'Ǿ'), + ('Ȁ', 'Ȁ'), + ('Ȃ', 'Ȃ'), + ('Ȅ', 'Ȅ'), + ('Ȇ', 'Ȇ'), + ('Ȉ', 'Ȉ'), + ('Ȋ', 'Ȋ'), + ('Ȍ', 'Ȍ'), + ('Ȏ', 'Ȏ'), + ('Ȑ', 'Ȑ'), + ('Ȓ', 'Ȓ'), + ('Ȕ', 'Ȕ'), + ('Ȗ', 'Ȗ'), + ('Ș', 'Ș'), + ('Ț', 'Ț'), + ('Ȝ', 'Ȝ'), + ('Ȟ', 'Ȟ'), + ('Ƞ', 'Ƞ'), + ('Ȣ', 'Ȣ'), + ('Ȥ', 'Ȥ'), + ('Ȧ', 'Ȧ'), + ('Ȩ', 'Ȩ'), + ('Ȫ', 'Ȫ'), + ('Ȭ', 'Ȭ'), + ('Ȯ', 'Ȯ'), + ('Ȱ', 'Ȱ'), + ('Ȳ', 'Ȳ'), + ('Ⱥ', 'Ȼ'), + ('Ƚ', 'Ⱦ'), + ('Ɂ', 'Ɂ'), + ('Ƀ', 'Ɇ'), + ('Ɉ', 'Ɉ'), + ('Ɋ', 'Ɋ'), + ('Ɍ', 'Ɍ'), + ('Ɏ', 'Ɏ'), + ('\u{345}', '\u{345}'), + ('Ͱ', 'Ͱ'), + ('Ͳ', 'Ͳ'), + ('Ͷ', 'Ͷ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ώ'), + ('Α', 'Ρ'), + ('Σ', 'Ϋ'), + ('ς', 'ς'), + ('Ϗ', 'ϑ'), + ('ϕ', 'ϖ'), + ('Ϙ', 'Ϙ'), + ('Ϛ', 'Ϛ'), + ('Ϝ', 'Ϝ'), + ('Ϟ', 'Ϟ'), + ('Ϡ', 'Ϡ'), + ('Ϣ', 'Ϣ'), + ('Ϥ', 'Ϥ'), + ('Ϧ', 'Ϧ'), + ('Ϩ', 'Ϩ'), + ('Ϫ', 'Ϫ'), + ('Ϭ', 'Ϭ'), + ('Ϯ', 'Ϯ'), + ('ϰ', 'ϱ'), + ('ϴ', 'ϵ'), + ('Ϸ', 'Ϸ'), + ('Ϲ', 'Ϻ'), + ('Ͻ', 'Я'), + ('Ѡ', 'Ѡ'), + ('Ѣ', 'Ѣ'), + ('Ѥ', 'Ѥ'), + ('Ѧ', 'Ѧ'), + ('Ѩ', 'Ѩ'), + ('Ѫ', 'Ѫ'), + ('Ѭ', 'Ѭ'), + ('Ѯ', 'Ѯ'), + ('Ѱ', 'Ѱ'), + ('Ѳ', 'Ѳ'), + ('Ѵ', 'Ѵ'), + ('Ѷ', 'Ѷ'), + ('Ѹ', 'Ѹ'), + ('Ѻ', 'Ѻ'), + ('Ѽ', 'Ѽ'), + ('Ѿ', 'Ѿ'), + ('Ҁ', 'Ҁ'), + ('Ҋ', 'Ҋ'), + ('Ҍ', 'Ҍ'), + ('Ҏ', 'Ҏ'), + ('Ґ', 'Ґ'), + ('Ғ', 'Ғ'), + ('Ҕ', 'Ҕ'), + ('Җ', 'Җ'), + ('Ҙ', 'Ҙ'), + ('Қ', 'Қ'), + ('Ҝ', 'Ҝ'), + ('Ҟ', 'Ҟ'), + ('Ҡ', 'Ҡ'), + ('Ң', 'Ң'), + ('Ҥ', 'Ҥ'), + ('Ҧ', 'Ҧ'), + ('Ҩ', 'Ҩ'), + ('Ҫ', 'Ҫ'), + ('Ҭ', 'Ҭ'), + ('Ү', 'Ү'), + ('Ұ', 'Ұ'), + ('Ҳ', 'Ҳ'), + ('Ҵ', 'Ҵ'), + ('Ҷ', 'Ҷ'), + ('Ҹ', 'Ҹ'), + ('Һ', 'Һ'), + ('Ҽ', 'Ҽ'), + ('Ҿ', 'Ҿ'), + ('Ӏ', 'Ӂ'), + ('Ӄ', 'Ӄ'), + ('Ӆ', 'Ӆ'), + ('Ӈ', 'Ӈ'), + ('Ӊ', 'Ӊ'), + ('Ӌ', 'Ӌ'), + ('Ӎ', 'Ӎ'), + ('Ӑ', 'Ӑ'), + ('Ӓ', 'Ӓ'), + ('Ӕ', 'Ӕ'), + ('Ӗ', 'Ӗ'), + ('Ә', 'Ә'), + ('Ӛ', 'Ӛ'), + ('Ӝ', 'Ӝ'), + ('Ӟ', 'Ӟ'), + ('Ӡ', 'Ӡ'), + ('Ӣ', 'Ӣ'), + ('Ӥ', 'Ӥ'), + ('Ӧ', 'Ӧ'), + ('Ө', 'Ө'), + ('Ӫ', 'Ӫ'), + ('Ӭ', 'Ӭ'), + ('Ӯ', 'Ӯ'), + ('Ӱ', 'Ӱ'), + ('Ӳ', 'Ӳ'), + ('Ӵ', 'Ӵ'), + ('Ӷ', 'Ӷ'), + ('Ӹ', 'Ӹ'), + ('Ӻ', 'Ӻ'), + ('Ӽ', 'Ӽ'), + ('Ӿ', 'Ӿ'), + ('Ԁ', 'Ԁ'), + ('Ԃ', 'Ԃ'), + ('Ԅ', 'Ԅ'), + ('Ԇ', 'Ԇ'), + ('Ԉ', 'Ԉ'), + ('Ԋ', 'Ԋ'), + ('Ԍ', 'Ԍ'), + ('Ԏ', 'Ԏ'), + ('Ԑ', 'Ԑ'), + ('Ԓ', 'Ԓ'), + ('Ԕ', 'Ԕ'), + ('Ԗ', 'Ԗ'), + ('Ԙ', 'Ԙ'), + ('Ԛ', 'Ԛ'), + ('Ԝ', 'Ԝ'), + ('Ԟ', 'Ԟ'), + ('Ԡ', 'Ԡ'), + ('Ԣ', 'Ԣ'), + ('Ԥ', 'Ԥ'), + ('Ԧ', 'Ԧ'), + ('Ԩ', 'Ԩ'), + ('Ԫ', 'Ԫ'), + ('Ԭ', 'Ԭ'), + ('Ԯ', 'Ԯ'), + ('Ա', 'Ֆ'), + ('և', 'և'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'Ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('Ḁ', 'Ḁ'), + ('Ḃ', 'Ḃ'), + ('Ḅ', 'Ḅ'), + ('Ḇ', 'Ḇ'), + ('Ḉ', 'Ḉ'), + ('Ḋ', 'Ḋ'), + ('Ḍ', 'Ḍ'), + ('Ḏ', 'Ḏ'), + ('Ḑ', 'Ḑ'), + ('Ḓ', 'Ḓ'), + ('Ḕ', 'Ḕ'), + ('Ḗ', 'Ḗ'), + ('Ḙ', 'Ḙ'), + ('Ḛ', 'Ḛ'), + ('Ḝ', 'Ḝ'), + ('Ḟ', 'Ḟ'), + ('Ḡ', 'Ḡ'), + ('Ḣ', 'Ḣ'), + ('Ḥ', 'Ḥ'), + ('Ḧ', 'Ḧ'), + ('Ḩ', 'Ḩ'), + ('Ḫ', 'Ḫ'), + ('Ḭ', 'Ḭ'), + ('Ḯ', 'Ḯ'), + ('Ḱ', 'Ḱ'), + ('Ḳ', 'Ḳ'), + ('Ḵ', 'Ḵ'), + ('Ḷ', 'Ḷ'), + ('Ḹ', 'Ḹ'), + ('Ḻ', 'Ḻ'), + ('Ḽ', 'Ḽ'), + ('Ḿ', 'Ḿ'), + ('Ṁ', 'Ṁ'), + ('Ṃ', 'Ṃ'), + ('Ṅ', 'Ṅ'), + ('Ṇ', 'Ṇ'), + ('Ṉ', 'Ṉ'), + ('Ṋ', 'Ṋ'), + ('Ṍ', 'Ṍ'), + ('Ṏ', 'Ṏ'), + ('Ṑ', 'Ṑ'), + ('Ṓ', 'Ṓ'), + ('Ṕ', 'Ṕ'), + ('Ṗ', 'Ṗ'), + ('Ṙ', 'Ṙ'), + ('Ṛ', 'Ṛ'), + ('Ṝ', 'Ṝ'), + ('Ṟ', 'Ṟ'), + ('Ṡ', 'Ṡ'), + ('Ṣ', 'Ṣ'), + ('Ṥ', 'Ṥ'), + ('Ṧ', 'Ṧ'), + ('Ṩ', 'Ṩ'), + ('Ṫ', 'Ṫ'), + ('Ṭ', 'Ṭ'), + ('Ṯ', 'Ṯ'), + ('Ṱ', 'Ṱ'), + ('Ṳ', 'Ṳ'), + ('Ṵ', 'Ṵ'), + ('Ṷ', 'Ṷ'), + ('Ṹ', 'Ṹ'), + ('Ṻ', 'Ṻ'), + ('Ṽ', 'Ṽ'), + ('Ṿ', 'Ṿ'), + ('Ẁ', 'Ẁ'), + ('Ẃ', 'Ẃ'), + ('Ẅ', 'Ẅ'), + ('Ẇ', 'Ẇ'), + ('Ẉ', 'Ẉ'), + ('Ẋ', 'Ẋ'), + ('Ẍ', 'Ẍ'), + ('Ẏ', 'Ẏ'), + ('Ẑ', 'Ẑ'), + ('Ẓ', 'Ẓ'), + ('Ẕ', 'Ẕ'), + ('ẚ', 'ẛ'), + ('ẞ', 'ẞ'), + ('Ạ', 'Ạ'), + ('Ả', 'Ả'), + ('Ấ', 'Ấ'), + ('Ầ', 'Ầ'), + ('Ẩ', 'Ẩ'), + ('Ẫ', 'Ẫ'), + ('Ậ', 'Ậ'), + ('Ắ', 'Ắ'), + ('Ằ', 'Ằ'), + ('Ẳ', 'Ẳ'), + ('Ẵ', 'Ẵ'), + ('Ặ', 'Ặ'), + ('Ẹ', 'Ẹ'), + ('Ẻ', 'Ẻ'), + ('Ẽ', 'Ẽ'), + ('Ế', 'Ế'), + ('Ề', 'Ề'), + ('Ể', 'Ể'), + ('Ễ', 'Ễ'), + ('Ệ', 'Ệ'), + ('Ỉ', 'Ỉ'), + ('Ị', 'Ị'), + ('Ọ', 'Ọ'), + ('Ỏ', 'Ỏ'), + ('Ố', 'Ố'), + ('Ồ', 'Ồ'), + ('Ổ', 'Ổ'), + ('Ỗ', 'Ỗ'), + ('Ộ', 'Ộ'), + ('Ớ', 'Ớ'), + ('Ờ', 'Ờ'), + ('Ở', 'Ở'), + ('Ỡ', 'Ỡ'), + ('Ợ', 'Ợ'), + ('Ụ', 'Ụ'), + ('Ủ', 'Ủ'), + ('Ứ', 'Ứ'), + ('Ừ', 'Ừ'), + ('Ử', 'Ử'), + ('Ữ', 'Ữ'), + ('Ự', 'Ự'), + ('Ỳ', 'Ỳ'), + ('Ỵ', 'Ỵ'), + ('Ỷ', 'Ỷ'), + ('Ỹ', 'Ỹ'), + ('Ỻ', 'Ỻ'), + ('Ỽ', 'Ỽ'), + ('Ỿ', 'Ỿ'), + ('Ἀ', 'Ἇ'), + ('Ἐ', 'Ἕ'), + ('Ἠ', 'Ἧ'), + ('Ἰ', 'Ἷ'), + ('Ὀ', 'Ὅ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'Ὗ'), + ('Ὠ', 'Ὧ'), + ('ᾀ', 'ᾯ'), + ('ᾲ', 'ᾴ'), + ('ᾷ', 'ᾼ'), + ('ῂ', 'ῄ'), + ('ῇ', 'ῌ'), + ('Ῐ', 'Ί'), + ('Ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῷ', 'ῼ'), + ('Ω', 'Ω'), + ('K', 'Å'), + ('Ⅎ', 'Ⅎ'), + ('Ⅰ', 'Ⅿ'), + ('Ↄ', 'Ↄ'), + ('Ⓐ', 'Ⓩ'), + ('Ⰰ', 'Ⱟ'), + ('Ⱡ', 'Ⱡ'), + ('Ɫ', 'Ɽ'), + ('Ⱨ', 'Ⱨ'), + ('Ⱪ', 'Ⱪ'), + ('Ⱬ', 'Ⱬ'), + ('Ɑ', 'Ɒ'), + ('Ⱳ', 'Ⱳ'), + ('Ⱶ', 'Ⱶ'), + ('Ȿ', 'Ⲁ'), + ('Ⲃ', 'Ⲃ'), + ('Ⲅ', 'Ⲅ'), + ('Ⲇ', 'Ⲇ'), + ('Ⲉ', 'Ⲉ'), + ('Ⲋ', 'Ⲋ'), + ('Ⲍ', 'Ⲍ'), + ('Ⲏ', 'Ⲏ'), + ('Ⲑ', 'Ⲑ'), + ('Ⲓ', 'Ⲓ'), + ('Ⲕ', 'Ⲕ'), + ('Ⲗ', 'Ⲗ'), + ('Ⲙ', 'Ⲙ'), + ('Ⲛ', 'Ⲛ'), + ('Ⲝ', 'Ⲝ'), + ('Ⲟ', 'Ⲟ'), + ('Ⲡ', 'Ⲡ'), + ('Ⲣ', 'Ⲣ'), + ('Ⲥ', 'Ⲥ'), + ('Ⲧ', 'Ⲧ'), + ('Ⲩ', 'Ⲩ'), + ('Ⲫ', 'Ⲫ'), + ('Ⲭ', 'Ⲭ'), + ('Ⲯ', 'Ⲯ'), + ('Ⲱ', 'Ⲱ'), + ('Ⲳ', 'Ⲳ'), + ('Ⲵ', 'Ⲵ'), + ('Ⲷ', 'Ⲷ'), + ('Ⲹ', 'Ⲹ'), + ('Ⲻ', 'Ⲻ'), + ('Ⲽ', 'Ⲽ'), + ('Ⲿ', 'Ⲿ'), + ('Ⳁ', 'Ⳁ'), + ('Ⳃ', 'Ⳃ'), + ('Ⳅ', 'Ⳅ'), + ('Ⳇ', 'Ⳇ'), + ('Ⳉ', 'Ⳉ'), + ('Ⳋ', 'Ⳋ'), + ('Ⳍ', 'Ⳍ'), + ('Ⳏ', 'Ⳏ'), + ('Ⳑ', 'Ⳑ'), + ('Ⳓ', 'Ⳓ'), + ('Ⳕ', 'Ⳕ'), + ('Ⳗ', 'Ⳗ'), + ('Ⳙ', 'Ⳙ'), + ('Ⳛ', 'Ⳛ'), + ('Ⳝ', 'Ⳝ'), + ('Ⳟ', 'Ⳟ'), + ('Ⳡ', 'Ⳡ'), + ('Ⳣ', 'Ⳣ'), + ('Ⳬ', 'Ⳬ'), + ('Ⳮ', 'Ⳮ'), + ('Ⳳ', 'Ⳳ'), + ('Ꙁ', 'Ꙁ'), + ('Ꙃ', 'Ꙃ'), + ('Ꙅ', 'Ꙅ'), + ('Ꙇ', 'Ꙇ'), + ('Ꙉ', 'Ꙉ'), + ('Ꙋ', 'Ꙋ'), + ('Ꙍ', 'Ꙍ'), + ('Ꙏ', 'Ꙏ'), + ('Ꙑ', 'Ꙑ'), + ('Ꙓ', 'Ꙓ'), + ('Ꙕ', 'Ꙕ'), + ('Ꙗ', 'Ꙗ'), + ('Ꙙ', 'Ꙙ'), + ('Ꙛ', 'Ꙛ'), + ('Ꙝ', 'Ꙝ'), + ('Ꙟ', 'Ꙟ'), + ('Ꙡ', 'Ꙡ'), + ('Ꙣ', 'Ꙣ'), + ('Ꙥ', 'Ꙥ'), + ('Ꙧ', 'Ꙧ'), + ('Ꙩ', 'Ꙩ'), + ('Ꙫ', 'Ꙫ'), + ('Ꙭ', 'Ꙭ'), + ('Ꚁ', 'Ꚁ'), + ('Ꚃ', 'Ꚃ'), + ('Ꚅ', 'Ꚅ'), + ('Ꚇ', 'Ꚇ'), + ('Ꚉ', 'Ꚉ'), + ('Ꚋ', 'Ꚋ'), + ('Ꚍ', 'Ꚍ'), + ('Ꚏ', 'Ꚏ'), + ('Ꚑ', 'Ꚑ'), + ('Ꚓ', 'Ꚓ'), + ('Ꚕ', 'Ꚕ'), + ('Ꚗ', 'Ꚗ'), + ('Ꚙ', 'Ꚙ'), + ('Ꚛ', 'Ꚛ'), + ('Ꜣ', 'Ꜣ'), + ('Ꜥ', 'Ꜥ'), + ('Ꜧ', 'Ꜧ'), + ('Ꜩ', 'Ꜩ'), + ('Ꜫ', 'Ꜫ'), + ('Ꜭ', 'Ꜭ'), + ('Ꜯ', 'Ꜯ'), + ('Ꜳ', 'Ꜳ'), + ('Ꜵ', 'Ꜵ'), + ('Ꜷ', 'Ꜷ'), + ('Ꜹ', 'Ꜹ'), + ('Ꜻ', 'Ꜻ'), + ('Ꜽ', 'Ꜽ'), + ('Ꜿ', 'Ꜿ'), + ('Ꝁ', 'Ꝁ'), + ('Ꝃ', 'Ꝃ'), + ('Ꝅ', 'Ꝅ'), + ('Ꝇ', 'Ꝇ'), + ('Ꝉ', 'Ꝉ'), + ('Ꝋ', 'Ꝋ'), + ('Ꝍ', 'Ꝍ'), + ('Ꝏ', 'Ꝏ'), + ('Ꝑ', 'Ꝑ'), + ('Ꝓ', 'Ꝓ'), + ('Ꝕ', 'Ꝕ'), + ('Ꝗ', 'Ꝗ'), + ('Ꝙ', 'Ꝙ'), + ('Ꝛ', 'Ꝛ'), + ('Ꝝ', 'Ꝝ'), + ('Ꝟ', 'Ꝟ'), + ('Ꝡ', 'Ꝡ'), + ('Ꝣ', 'Ꝣ'), + ('Ꝥ', 'Ꝥ'), + ('Ꝧ', 'Ꝧ'), + ('Ꝩ', 'Ꝩ'), + ('Ꝫ', 'Ꝫ'), + ('Ꝭ', 'Ꝭ'), + ('Ꝯ', 'Ꝯ'), + ('Ꝺ', 'Ꝺ'), + ('Ꝼ', 'Ꝼ'), + ('Ᵹ', 'Ꝿ'), + ('Ꞁ', 'Ꞁ'), + ('Ꞃ', 'Ꞃ'), + ('Ꞅ', 'Ꞅ'), + ('Ꞇ', 'Ꞇ'), + ('Ꞌ', 'Ꞌ'), + ('Ɥ', 'Ɥ'), + ('Ꞑ', 'Ꞑ'), + ('Ꞓ', 'Ꞓ'), + ('Ꞗ', 'Ꞗ'), + ('Ꞙ', 'Ꞙ'), + ('Ꞛ', 'Ꞛ'), + ('Ꞝ', 'Ꞝ'), + ('Ꞟ', 'Ꞟ'), + ('Ꞡ', 'Ꞡ'), + ('Ꞣ', 'Ꞣ'), + ('Ꞥ', 'Ꞥ'), + ('Ꞧ', 'Ꞧ'), + ('Ꞩ', 'Ꞩ'), + ('Ɦ', 'Ɪ'), + ('Ʞ', 'Ꞵ'), + ('Ꞷ', 'Ꞷ'), + ('Ꞹ', 'Ꞹ'), + ('Ꞻ', 'Ꞻ'), + ('Ꞽ', 'Ꞽ'), + ('Ꞿ', 'Ꞿ'), + ('Ꟁ', 'Ꟁ'), + ('Ꟃ', 'Ꟃ'), + ('Ꞔ', 'Ꟈ'), + ('Ꟊ', 'Ꟊ'), + ('Ɤ', 'Ꟍ'), + ('Ꟑ', 'Ꟑ'), + ('Ꟗ', 'Ꟗ'), + ('Ꟙ', 'Ꟙ'), + ('Ꟛ', 'Ꟛ'), + ('Ƛ', 'Ƛ'), + ('Ꟶ', 'Ꟶ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('A', 'Z'), + ('𐐀', '𐐧'), + ('𐒰', '𐓓'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐲀', '𐲲'), + ('𐵐', '𐵥'), + ('𑢠', '𑢿'), + ('𖹀', '𖹟'), + ('𞤀', '𞤡'), +]; + +pub const CHANGES_WHEN_CASEMAPPED: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('µ', 'µ'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ķ'), + ('Ĺ', 'ƌ'), + ('Ǝ', 'Ʃ'), + ('Ƭ', 'ƹ'), + ('Ƽ', 'ƽ'), + ('ƿ', 'ƿ'), + ('DŽ', 'Ƞ'), + ('Ȣ', 'ȳ'), + ('Ⱥ', 'ɔ'), + ('ɖ', 'ɗ'), + ('ə', 'ə'), + ('ɛ', 'ɜ'), + ('ɠ', 'ɡ'), + ('ɣ', 'ɦ'), + ('ɨ', 'ɬ'), + ('ɯ', 'ɯ'), + ('ɱ', 'ɲ'), + ('ɵ', 'ɵ'), + ('ɽ', 'ɽ'), + ('ʀ', 'ʀ'), + ('ʂ', 'ʃ'), + ('ʇ', 'ʌ'), + ('ʒ', 'ʒ'), + ('ʝ', 'ʞ'), + ('\u{345}', '\u{345}'), + ('Ͱ', 'ͳ'), + ('Ͷ', 'ͷ'), + ('ͻ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϑ'), + ('ϕ', 'ϵ'), + ('Ϸ', 'ϻ'), + ('Ͻ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ա', 'և'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჽ', 'ჿ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᵹ', 'ᵹ'), + ('ᵽ', 'ᵽ'), + ('ᶎ', 'ᶎ'), + ('Ḁ', 'ẛ'), + ('ẞ', 'ẞ'), + ('Ạ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('Ω', 'Ω'), + ('K', 'Å'), + ('Ⅎ', 'Ⅎ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ⅿ'), + ('Ↄ', 'ↄ'), + ('Ⓐ', 'ⓩ'), + ('Ⰰ', 'Ɒ'), + ('Ⱳ', 'ⱳ'), + ('Ⱶ', 'ⱶ'), + ('Ȿ', 'ⳣ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('Ꙁ', 'ꙭ'), + ('Ꚁ', 'ꚛ'), + ('Ꜣ', 'ꜯ'), + ('Ꜳ', 'ꝯ'), + ('Ꝺ', 'ꞇ'), + ('Ꞌ', 'Ɥ'), + ('Ꞑ', 'ꞔ'), + ('Ꞗ', 'Ɪ'), + ('Ʞ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('Ꟗ', 'Ƛ'), + ('Ꟶ', 'ꟶ'), + ('ꭓ', 'ꭓ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('A', 'Z'), + ('a', 'z'), + ('𐐀', '𐑏'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐵐', '𐵥'), + ('𐵰', '𐶅'), + ('𑢠', '𑣟'), + ('𖹀', '𖹿'), + ('𞤀', '𞥃'), +]; + +pub const CHANGES_WHEN_LOWERCASED: &'static [(char, char)] = &[ + ('A', 'Z'), + ('À', 'Ö'), + ('Ø', 'Þ'), + ('Ā', 'Ā'), + ('Ă', 'Ă'), + ('Ą', 'Ą'), + ('Ć', 'Ć'), + ('Ĉ', 'Ĉ'), + ('Ċ', 'Ċ'), + ('Č', 'Č'), + ('Ď', 'Ď'), + ('Đ', 'Đ'), + ('Ē', 'Ē'), + ('Ĕ', 'Ĕ'), + ('Ė', 'Ė'), + ('Ę', 'Ę'), + ('Ě', 'Ě'), + ('Ĝ', 'Ĝ'), + ('Ğ', 'Ğ'), + ('Ġ', 'Ġ'), + ('Ģ', 'Ģ'), + ('Ĥ', 'Ĥ'), + ('Ħ', 'Ħ'), + ('Ĩ', 'Ĩ'), + ('Ī', 'Ī'), + ('Ĭ', 'Ĭ'), + ('Į', 'Į'), + ('İ', 'İ'), + ('IJ', 'IJ'), + ('Ĵ', 'Ĵ'), + ('Ķ', 'Ķ'), + ('Ĺ', 'Ĺ'), + ('Ļ', 'Ļ'), + ('Ľ', 'Ľ'), + ('Ŀ', 'Ŀ'), + ('Ł', 'Ł'), + ('Ń', 'Ń'), + ('Ņ', 'Ņ'), + ('Ň', 'Ň'), + ('Ŋ', 'Ŋ'), + ('Ō', 'Ō'), + ('Ŏ', 'Ŏ'), + ('Ő', 'Ő'), + ('Œ', 'Œ'), + ('Ŕ', 'Ŕ'), + ('Ŗ', 'Ŗ'), + ('Ř', 'Ř'), + ('Ś', 'Ś'), + ('Ŝ', 'Ŝ'), + ('Ş', 'Ş'), + ('Š', 'Š'), + ('Ţ', 'Ţ'), + ('Ť', 'Ť'), + ('Ŧ', 'Ŧ'), + ('Ũ', 'Ũ'), + ('Ū', 'Ū'), + ('Ŭ', 'Ŭ'), + ('Ů', 'Ů'), + ('Ű', 'Ű'), + ('Ų', 'Ų'), + ('Ŵ', 'Ŵ'), + ('Ŷ', 'Ŷ'), + ('Ÿ', 'Ź'), + ('Ż', 'Ż'), + ('Ž', 'Ž'), + ('Ɓ', 'Ƃ'), + ('Ƅ', 'Ƅ'), + ('Ɔ', 'Ƈ'), + ('Ɖ', 'Ƌ'), + ('Ǝ', 'Ƒ'), + ('Ɠ', 'Ɣ'), + ('Ɩ', 'Ƙ'), + ('Ɯ', 'Ɲ'), + ('Ɵ', 'Ơ'), + ('Ƣ', 'Ƣ'), + ('Ƥ', 'Ƥ'), + ('Ʀ', 'Ƨ'), + ('Ʃ', 'Ʃ'), + ('Ƭ', 'Ƭ'), + ('Ʈ', 'Ư'), + ('Ʊ', 'Ƴ'), + ('Ƶ', 'Ƶ'), + ('Ʒ', 'Ƹ'), + ('Ƽ', 'Ƽ'), + ('DŽ', 'Dž'), + ('LJ', 'Lj'), + ('NJ', 'Nj'), + ('Ǎ', 'Ǎ'), + ('Ǐ', 'Ǐ'), + ('Ǒ', 'Ǒ'), + ('Ǔ', 'Ǔ'), + ('Ǖ', 'Ǖ'), + ('Ǘ', 'Ǘ'), + ('Ǚ', 'Ǚ'), + ('Ǜ', 'Ǜ'), + ('Ǟ', 'Ǟ'), + ('Ǡ', 'Ǡ'), + ('Ǣ', 'Ǣ'), + ('Ǥ', 'Ǥ'), + ('Ǧ', 'Ǧ'), + ('Ǩ', 'Ǩ'), + ('Ǫ', 'Ǫ'), + ('Ǭ', 'Ǭ'), + ('Ǯ', 'Ǯ'), + ('DZ', 'Dz'), + ('Ǵ', 'Ǵ'), + ('Ƕ', 'Ǹ'), + ('Ǻ', 'Ǻ'), + ('Ǽ', 'Ǽ'), + ('Ǿ', 'Ǿ'), + ('Ȁ', 'Ȁ'), + ('Ȃ', 'Ȃ'), + ('Ȅ', 'Ȅ'), + ('Ȇ', 'Ȇ'), + ('Ȉ', 'Ȉ'), + ('Ȋ', 'Ȋ'), + ('Ȍ', 'Ȍ'), + ('Ȏ', 'Ȏ'), + ('Ȑ', 'Ȑ'), + ('Ȓ', 'Ȓ'), + ('Ȕ', 'Ȕ'), + ('Ȗ', 'Ȗ'), + ('Ș', 'Ș'), + ('Ț', 'Ț'), + ('Ȝ', 'Ȝ'), + ('Ȟ', 'Ȟ'), + ('Ƞ', 'Ƞ'), + ('Ȣ', 'Ȣ'), + ('Ȥ', 'Ȥ'), + ('Ȧ', 'Ȧ'), + ('Ȩ', 'Ȩ'), + ('Ȫ', 'Ȫ'), + ('Ȭ', 'Ȭ'), + ('Ȯ', 'Ȯ'), + ('Ȱ', 'Ȱ'), + ('Ȳ', 'Ȳ'), + ('Ⱥ', 'Ȼ'), + ('Ƚ', 'Ⱦ'), + ('Ɂ', 'Ɂ'), + ('Ƀ', 'Ɇ'), + ('Ɉ', 'Ɉ'), + ('Ɋ', 'Ɋ'), + ('Ɍ', 'Ɍ'), + ('Ɏ', 'Ɏ'), + ('Ͱ', 'Ͱ'), + ('Ͳ', 'Ͳ'), + ('Ͷ', 'Ͷ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ώ'), + ('Α', 'Ρ'), + ('Σ', 'Ϋ'), + ('Ϗ', 'Ϗ'), + ('Ϙ', 'Ϙ'), + ('Ϛ', 'Ϛ'), + ('Ϝ', 'Ϝ'), + ('Ϟ', 'Ϟ'), + ('Ϡ', 'Ϡ'), + ('Ϣ', 'Ϣ'), + ('Ϥ', 'Ϥ'), + ('Ϧ', 'Ϧ'), + ('Ϩ', 'Ϩ'), + ('Ϫ', 'Ϫ'), + ('Ϭ', 'Ϭ'), + ('Ϯ', 'Ϯ'), + ('ϴ', 'ϴ'), + ('Ϸ', 'Ϸ'), + ('Ϲ', 'Ϻ'), + ('Ͻ', 'Я'), + ('Ѡ', 'Ѡ'), + ('Ѣ', 'Ѣ'), + ('Ѥ', 'Ѥ'), + ('Ѧ', 'Ѧ'), + ('Ѩ', 'Ѩ'), + ('Ѫ', 'Ѫ'), + ('Ѭ', 'Ѭ'), + ('Ѯ', 'Ѯ'), + ('Ѱ', 'Ѱ'), + ('Ѳ', 'Ѳ'), + ('Ѵ', 'Ѵ'), + ('Ѷ', 'Ѷ'), + ('Ѹ', 'Ѹ'), + ('Ѻ', 'Ѻ'), + ('Ѽ', 'Ѽ'), + ('Ѿ', 'Ѿ'), + ('Ҁ', 'Ҁ'), + ('Ҋ', 'Ҋ'), + ('Ҍ', 'Ҍ'), + ('Ҏ', 'Ҏ'), + ('Ґ', 'Ґ'), + ('Ғ', 'Ғ'), + ('Ҕ', 'Ҕ'), + ('Җ', 'Җ'), + ('Ҙ', 'Ҙ'), + ('Қ', 'Қ'), + ('Ҝ', 'Ҝ'), + ('Ҟ', 'Ҟ'), + ('Ҡ', 'Ҡ'), + ('Ң', 'Ң'), + ('Ҥ', 'Ҥ'), + ('Ҧ', 'Ҧ'), + ('Ҩ', 'Ҩ'), + ('Ҫ', 'Ҫ'), + ('Ҭ', 'Ҭ'), + ('Ү', 'Ү'), + ('Ұ', 'Ұ'), + ('Ҳ', 'Ҳ'), + ('Ҵ', 'Ҵ'), + ('Ҷ', 'Ҷ'), + ('Ҹ', 'Ҹ'), + ('Һ', 'Һ'), + ('Ҽ', 'Ҽ'), + ('Ҿ', 'Ҿ'), + ('Ӏ', 'Ӂ'), + ('Ӄ', 'Ӄ'), + ('Ӆ', 'Ӆ'), + ('Ӈ', 'Ӈ'), + ('Ӊ', 'Ӊ'), + ('Ӌ', 'Ӌ'), + ('Ӎ', 'Ӎ'), + ('Ӑ', 'Ӑ'), + ('Ӓ', 'Ӓ'), + ('Ӕ', 'Ӕ'), + ('Ӗ', 'Ӗ'), + ('Ә', 'Ә'), + ('Ӛ', 'Ӛ'), + ('Ӝ', 'Ӝ'), + ('Ӟ', 'Ӟ'), + ('Ӡ', 'Ӡ'), + ('Ӣ', 'Ӣ'), + ('Ӥ', 'Ӥ'), + ('Ӧ', 'Ӧ'), + ('Ө', 'Ө'), + ('Ӫ', 'Ӫ'), + ('Ӭ', 'Ӭ'), + ('Ӯ', 'Ӯ'), + ('Ӱ', 'Ӱ'), + ('Ӳ', 'Ӳ'), + ('Ӵ', 'Ӵ'), + ('Ӷ', 'Ӷ'), + ('Ӹ', 'Ӹ'), + ('Ӻ', 'Ӻ'), + ('Ӽ', 'Ӽ'), + ('Ӿ', 'Ӿ'), + ('Ԁ', 'Ԁ'), + ('Ԃ', 'Ԃ'), + ('Ԅ', 'Ԅ'), + ('Ԇ', 'Ԇ'), + ('Ԉ', 'Ԉ'), + ('Ԋ', 'Ԋ'), + ('Ԍ', 'Ԍ'), + ('Ԏ', 'Ԏ'), + ('Ԑ', 'Ԑ'), + ('Ԓ', 'Ԓ'), + ('Ԕ', 'Ԕ'), + ('Ԗ', 'Ԗ'), + ('Ԙ', 'Ԙ'), + ('Ԛ', 'Ԛ'), + ('Ԝ', 'Ԝ'), + ('Ԟ', 'Ԟ'), + ('Ԡ', 'Ԡ'), + ('Ԣ', 'Ԣ'), + ('Ԥ', 'Ԥ'), + ('Ԧ', 'Ԧ'), + ('Ԩ', 'Ԩ'), + ('Ԫ', 'Ԫ'), + ('Ԭ', 'Ԭ'), + ('Ԯ', 'Ԯ'), + ('Ա', 'Ֆ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('Ꭰ', 'Ᏽ'), + ('Ᲊ', 'Ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('Ḁ', 'Ḁ'), + ('Ḃ', 'Ḃ'), + ('Ḅ', 'Ḅ'), + ('Ḇ', 'Ḇ'), + ('Ḉ', 'Ḉ'), + ('Ḋ', 'Ḋ'), + ('Ḍ', 'Ḍ'), + ('Ḏ', 'Ḏ'), + ('Ḑ', 'Ḑ'), + ('Ḓ', 'Ḓ'), + ('Ḕ', 'Ḕ'), + ('Ḗ', 'Ḗ'), + ('Ḙ', 'Ḙ'), + ('Ḛ', 'Ḛ'), + ('Ḝ', 'Ḝ'), + ('Ḟ', 'Ḟ'), + ('Ḡ', 'Ḡ'), + ('Ḣ', 'Ḣ'), + ('Ḥ', 'Ḥ'), + ('Ḧ', 'Ḧ'), + ('Ḩ', 'Ḩ'), + ('Ḫ', 'Ḫ'), + ('Ḭ', 'Ḭ'), + ('Ḯ', 'Ḯ'), + ('Ḱ', 'Ḱ'), + ('Ḳ', 'Ḳ'), + ('Ḵ', 'Ḵ'), + ('Ḷ', 'Ḷ'), + ('Ḹ', 'Ḹ'), + ('Ḻ', 'Ḻ'), + ('Ḽ', 'Ḽ'), + ('Ḿ', 'Ḿ'), + ('Ṁ', 'Ṁ'), + ('Ṃ', 'Ṃ'), + ('Ṅ', 'Ṅ'), + ('Ṇ', 'Ṇ'), + ('Ṉ', 'Ṉ'), + ('Ṋ', 'Ṋ'), + ('Ṍ', 'Ṍ'), + ('Ṏ', 'Ṏ'), + ('Ṑ', 'Ṑ'), + ('Ṓ', 'Ṓ'), + ('Ṕ', 'Ṕ'), + ('Ṗ', 'Ṗ'), + ('Ṙ', 'Ṙ'), + ('Ṛ', 'Ṛ'), + ('Ṝ', 'Ṝ'), + ('Ṟ', 'Ṟ'), + ('Ṡ', 'Ṡ'), + ('Ṣ', 'Ṣ'), + ('Ṥ', 'Ṥ'), + ('Ṧ', 'Ṧ'), + ('Ṩ', 'Ṩ'), + ('Ṫ', 'Ṫ'), + ('Ṭ', 'Ṭ'), + ('Ṯ', 'Ṯ'), + ('Ṱ', 'Ṱ'), + ('Ṳ', 'Ṳ'), + ('Ṵ', 'Ṵ'), + ('Ṷ', 'Ṷ'), + ('Ṹ', 'Ṹ'), + ('Ṻ', 'Ṻ'), + ('Ṽ', 'Ṽ'), + ('Ṿ', 'Ṿ'), + ('Ẁ', 'Ẁ'), + ('Ẃ', 'Ẃ'), + ('Ẅ', 'Ẅ'), + ('Ẇ', 'Ẇ'), + ('Ẉ', 'Ẉ'), + ('Ẋ', 'Ẋ'), + ('Ẍ', 'Ẍ'), + ('Ẏ', 'Ẏ'), + ('Ẑ', 'Ẑ'), + ('Ẓ', 'Ẓ'), + ('Ẕ', 'Ẕ'), + ('ẞ', 'ẞ'), + ('Ạ', 'Ạ'), + ('Ả', 'Ả'), + ('Ấ', 'Ấ'), + ('Ầ', 'Ầ'), + ('Ẩ', 'Ẩ'), + ('Ẫ', 'Ẫ'), + ('Ậ', 'Ậ'), + ('Ắ', 'Ắ'), + ('Ằ', 'Ằ'), + ('Ẳ', 'Ẳ'), + ('Ẵ', 'Ẵ'), + ('Ặ', 'Ặ'), + ('Ẹ', 'Ẹ'), + ('Ẻ', 'Ẻ'), + ('Ẽ', 'Ẽ'), + ('Ế', 'Ế'), + ('Ề', 'Ề'), + ('Ể', 'Ể'), + ('Ễ', 'Ễ'), + ('Ệ', 'Ệ'), + ('Ỉ', 'Ỉ'), + ('Ị', 'Ị'), + ('Ọ', 'Ọ'), + ('Ỏ', 'Ỏ'), + ('Ố', 'Ố'), + ('Ồ', 'Ồ'), + ('Ổ', 'Ổ'), + ('Ỗ', 'Ỗ'), + ('Ộ', 'Ộ'), + ('Ớ', 'Ớ'), + ('Ờ', 'Ờ'), + ('Ở', 'Ở'), + ('Ỡ', 'Ỡ'), + ('Ợ', 'Ợ'), + ('Ụ', 'Ụ'), + ('Ủ', 'Ủ'), + ('Ứ', 'Ứ'), + ('Ừ', 'Ừ'), + ('Ử', 'Ử'), + ('Ữ', 'Ữ'), + ('Ự', 'Ự'), + ('Ỳ', 'Ỳ'), + ('Ỵ', 'Ỵ'), + ('Ỷ', 'Ỷ'), + ('Ỹ', 'Ỹ'), + ('Ỻ', 'Ỻ'), + ('Ỽ', 'Ỽ'), + ('Ỿ', 'Ỿ'), + ('Ἀ', 'Ἇ'), + ('Ἐ', 'Ἕ'), + ('Ἠ', 'Ἧ'), + ('Ἰ', 'Ἷ'), + ('Ὀ', 'Ὅ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'Ὗ'), + ('Ὠ', 'Ὧ'), + ('ᾈ', 'ᾏ'), + ('ᾘ', 'ᾟ'), + ('ᾨ', 'ᾯ'), + ('Ᾰ', 'ᾼ'), + ('Ὲ', 'ῌ'), + ('Ῐ', 'Ί'), + ('Ῠ', 'Ῥ'), + ('Ὸ', 'ῼ'), + ('Ω', 'Ω'), + ('K', 'Å'), + ('Ⅎ', 'Ⅎ'), + ('Ⅰ', 'Ⅿ'), + ('Ↄ', 'Ↄ'), + ('Ⓐ', 'Ⓩ'), + ('Ⰰ', 'Ⱟ'), + ('Ⱡ', 'Ⱡ'), + ('Ɫ', 'Ɽ'), + ('Ⱨ', 'Ⱨ'), + ('Ⱪ', 'Ⱪ'), + ('Ⱬ', 'Ⱬ'), + ('Ɑ', 'Ɒ'), + ('Ⱳ', 'Ⱳ'), + ('Ⱶ', 'Ⱶ'), + ('Ȿ', 'Ⲁ'), + ('Ⲃ', 'Ⲃ'), + ('Ⲅ', 'Ⲅ'), + ('Ⲇ', 'Ⲇ'), + ('Ⲉ', 'Ⲉ'), + ('Ⲋ', 'Ⲋ'), + ('Ⲍ', 'Ⲍ'), + ('Ⲏ', 'Ⲏ'), + ('Ⲑ', 'Ⲑ'), + ('Ⲓ', 'Ⲓ'), + ('Ⲕ', 'Ⲕ'), + ('Ⲗ', 'Ⲗ'), + ('Ⲙ', 'Ⲙ'), + ('Ⲛ', 'Ⲛ'), + ('Ⲝ', 'Ⲝ'), + ('Ⲟ', 'Ⲟ'), + ('Ⲡ', 'Ⲡ'), + ('Ⲣ', 'Ⲣ'), + ('Ⲥ', 'Ⲥ'), + ('Ⲧ', 'Ⲧ'), + ('Ⲩ', 'Ⲩ'), + ('Ⲫ', 'Ⲫ'), + ('Ⲭ', 'Ⲭ'), + ('Ⲯ', 'Ⲯ'), + ('Ⲱ', 'Ⲱ'), + ('Ⲳ', 'Ⲳ'), + ('Ⲵ', 'Ⲵ'), + ('Ⲷ', 'Ⲷ'), + ('Ⲹ', 'Ⲹ'), + ('Ⲻ', 'Ⲻ'), + ('Ⲽ', 'Ⲽ'), + ('Ⲿ', 'Ⲿ'), + ('Ⳁ', 'Ⳁ'), + ('Ⳃ', 'Ⳃ'), + ('Ⳅ', 'Ⳅ'), + ('Ⳇ', 'Ⳇ'), + ('Ⳉ', 'Ⳉ'), + ('Ⳋ', 'Ⳋ'), + ('Ⳍ', 'Ⳍ'), + ('Ⳏ', 'Ⳏ'), + ('Ⳑ', 'Ⳑ'), + ('Ⳓ', 'Ⳓ'), + ('Ⳕ', 'Ⳕ'), + ('Ⳗ', 'Ⳗ'), + ('Ⳙ', 'Ⳙ'), + ('Ⳛ', 'Ⳛ'), + ('Ⳝ', 'Ⳝ'), + ('Ⳟ', 'Ⳟ'), + ('Ⳡ', 'Ⳡ'), + ('Ⳣ', 'Ⳣ'), + ('Ⳬ', 'Ⳬ'), + ('Ⳮ', 'Ⳮ'), + ('Ⳳ', 'Ⳳ'), + ('Ꙁ', 'Ꙁ'), + ('Ꙃ', 'Ꙃ'), + ('Ꙅ', 'Ꙅ'), + ('Ꙇ', 'Ꙇ'), + ('Ꙉ', 'Ꙉ'), + ('Ꙋ', 'Ꙋ'), + ('Ꙍ', 'Ꙍ'), + ('Ꙏ', 'Ꙏ'), + ('Ꙑ', 'Ꙑ'), + ('Ꙓ', 'Ꙓ'), + ('Ꙕ', 'Ꙕ'), + ('Ꙗ', 'Ꙗ'), + ('Ꙙ', 'Ꙙ'), + ('Ꙛ', 'Ꙛ'), + ('Ꙝ', 'Ꙝ'), + ('Ꙟ', 'Ꙟ'), + ('Ꙡ', 'Ꙡ'), + ('Ꙣ', 'Ꙣ'), + ('Ꙥ', 'Ꙥ'), + ('Ꙧ', 'Ꙧ'), + ('Ꙩ', 'Ꙩ'), + ('Ꙫ', 'Ꙫ'), + ('Ꙭ', 'Ꙭ'), + ('Ꚁ', 'Ꚁ'), + ('Ꚃ', 'Ꚃ'), + ('Ꚅ', 'Ꚅ'), + ('Ꚇ', 'Ꚇ'), + ('Ꚉ', 'Ꚉ'), + ('Ꚋ', 'Ꚋ'), + ('Ꚍ', 'Ꚍ'), + ('Ꚏ', 'Ꚏ'), + ('Ꚑ', 'Ꚑ'), + ('Ꚓ', 'Ꚓ'), + ('Ꚕ', 'Ꚕ'), + ('Ꚗ', 'Ꚗ'), + ('Ꚙ', 'Ꚙ'), + ('Ꚛ', 'Ꚛ'), + ('Ꜣ', 'Ꜣ'), + ('Ꜥ', 'Ꜥ'), + ('Ꜧ', 'Ꜧ'), + ('Ꜩ', 'Ꜩ'), + ('Ꜫ', 'Ꜫ'), + ('Ꜭ', 'Ꜭ'), + ('Ꜯ', 'Ꜯ'), + ('Ꜳ', 'Ꜳ'), + ('Ꜵ', 'Ꜵ'), + ('Ꜷ', 'Ꜷ'), + ('Ꜹ', 'Ꜹ'), + ('Ꜻ', 'Ꜻ'), + ('Ꜽ', 'Ꜽ'), + ('Ꜿ', 'Ꜿ'), + ('Ꝁ', 'Ꝁ'), + ('Ꝃ', 'Ꝃ'), + ('Ꝅ', 'Ꝅ'), + ('Ꝇ', 'Ꝇ'), + ('Ꝉ', 'Ꝉ'), + ('Ꝋ', 'Ꝋ'), + ('Ꝍ', 'Ꝍ'), + ('Ꝏ', 'Ꝏ'), + ('Ꝑ', 'Ꝑ'), + ('Ꝓ', 'Ꝓ'), + ('Ꝕ', 'Ꝕ'), + ('Ꝗ', 'Ꝗ'), + ('Ꝙ', 'Ꝙ'), + ('Ꝛ', 'Ꝛ'), + ('Ꝝ', 'Ꝝ'), + ('Ꝟ', 'Ꝟ'), + ('Ꝡ', 'Ꝡ'), + ('Ꝣ', 'Ꝣ'), + ('Ꝥ', 'Ꝥ'), + ('Ꝧ', 'Ꝧ'), + ('Ꝩ', 'Ꝩ'), + ('Ꝫ', 'Ꝫ'), + ('Ꝭ', 'Ꝭ'), + ('Ꝯ', 'Ꝯ'), + ('Ꝺ', 'Ꝺ'), + ('Ꝼ', 'Ꝼ'), + ('Ᵹ', 'Ꝿ'), + ('Ꞁ', 'Ꞁ'), + ('Ꞃ', 'Ꞃ'), + ('Ꞅ', 'Ꞅ'), + ('Ꞇ', 'Ꞇ'), + ('Ꞌ', 'Ꞌ'), + ('Ɥ', 'Ɥ'), + ('Ꞑ', 'Ꞑ'), + ('Ꞓ', 'Ꞓ'), + ('Ꞗ', 'Ꞗ'), + ('Ꞙ', 'Ꞙ'), + ('Ꞛ', 'Ꞛ'), + ('Ꞝ', 'Ꞝ'), + ('Ꞟ', 'Ꞟ'), + ('Ꞡ', 'Ꞡ'), + ('Ꞣ', 'Ꞣ'), + ('Ꞥ', 'Ꞥ'), + ('Ꞧ', 'Ꞧ'), + ('Ꞩ', 'Ꞩ'), + ('Ɦ', 'Ɪ'), + ('Ʞ', 'Ꞵ'), + ('Ꞷ', 'Ꞷ'), + ('Ꞹ', 'Ꞹ'), + ('Ꞻ', 'Ꞻ'), + ('Ꞽ', 'Ꞽ'), + ('Ꞿ', 'Ꞿ'), + ('Ꟁ', 'Ꟁ'), + ('Ꟃ', 'Ꟃ'), + ('Ꞔ', 'Ꟈ'), + ('Ꟊ', 'Ꟊ'), + ('Ɤ', 'Ꟍ'), + ('Ꟑ', 'Ꟑ'), + ('Ꟗ', 'Ꟗ'), + ('Ꟙ', 'Ꟙ'), + ('Ꟛ', 'Ꟛ'), + ('Ƛ', 'Ƛ'), + ('Ꟶ', 'Ꟶ'), + ('A', 'Z'), + ('𐐀', '𐐧'), + ('𐒰', '𐓓'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐲀', '𐲲'), + ('𐵐', '𐵥'), + ('𑢠', '𑢿'), + ('𖹀', '𖹟'), + ('𞤀', '𞤡'), +]; + +pub const CHANGES_WHEN_TITLECASED: &'static [(char, char)] = &[ + ('a', 'z'), + ('µ', 'µ'), + ('ß', 'ö'), + ('ø', 'ÿ'), + ('ā', 'ā'), + ('ă', 'ă'), + ('ą', 'ą'), + ('ć', 'ć'), + ('ĉ', 'ĉ'), + ('ċ', 'ċ'), + ('č', 'č'), + ('ď', 'ď'), + ('đ', 'đ'), + ('ē', 'ē'), + ('ĕ', 'ĕ'), + ('ė', 'ė'), + ('ę', 'ę'), + ('ě', 'ě'), + ('ĝ', 'ĝ'), + ('ğ', 'ğ'), + ('ġ', 'ġ'), + ('ģ', 'ģ'), + ('ĥ', 'ĥ'), + ('ħ', 'ħ'), + ('ĩ', 'ĩ'), + ('ī', 'ī'), + ('ĭ', 'ĭ'), + ('į', 'į'), + ('ı', 'ı'), + ('ij', 'ij'), + ('ĵ', 'ĵ'), + ('ķ', 'ķ'), + ('ĺ', 'ĺ'), + ('ļ', 'ļ'), + ('ľ', 'ľ'), + ('ŀ', 'ŀ'), + ('ł', 'ł'), + ('ń', 'ń'), + ('ņ', 'ņ'), + ('ň', 'ʼn'), + ('ŋ', 'ŋ'), + ('ō', 'ō'), + ('ŏ', 'ŏ'), + ('ő', 'ő'), + ('œ', 'œ'), + ('ŕ', 'ŕ'), + ('ŗ', 'ŗ'), + ('ř', 'ř'), + ('ś', 'ś'), + ('ŝ', 'ŝ'), + ('ş', 'ş'), + ('š', 'š'), + ('ţ', 'ţ'), + ('ť', 'ť'), + ('ŧ', 'ŧ'), + ('ũ', 'ũ'), + ('ū', 'ū'), + ('ŭ', 'ŭ'), + ('ů', 'ů'), + ('ű', 'ű'), + ('ų', 'ų'), + ('ŵ', 'ŵ'), + ('ŷ', 'ŷ'), + ('ź', 'ź'), + ('ż', 'ż'), + ('ž', 'ƀ'), + ('ƃ', 'ƃ'), + ('ƅ', 'ƅ'), + ('ƈ', 'ƈ'), + ('ƌ', 'ƌ'), + ('ƒ', 'ƒ'), + ('ƕ', 'ƕ'), + ('ƙ', 'ƛ'), + ('ƞ', 'ƞ'), + ('ơ', 'ơ'), + ('ƣ', 'ƣ'), + ('ƥ', 'ƥ'), + ('ƨ', 'ƨ'), + ('ƭ', 'ƭ'), + ('ư', 'ư'), + ('ƴ', 'ƴ'), + ('ƶ', 'ƶ'), + ('ƹ', 'ƹ'), + ('ƽ', 'ƽ'), + ('ƿ', 'ƿ'), + ('DŽ', 'DŽ'), + ('dž', 'LJ'), + ('lj', 'NJ'), + ('nj', 'nj'), + ('ǎ', 'ǎ'), + ('ǐ', 'ǐ'), + ('ǒ', 'ǒ'), + ('ǔ', 'ǔ'), + ('ǖ', 'ǖ'), + ('ǘ', 'ǘ'), + ('ǚ', 'ǚ'), + ('ǜ', 'ǝ'), + ('ǟ', 'ǟ'), + ('ǡ', 'ǡ'), + ('ǣ', 'ǣ'), + ('ǥ', 'ǥ'), + ('ǧ', 'ǧ'), + ('ǩ', 'ǩ'), + ('ǫ', 'ǫ'), + ('ǭ', 'ǭ'), + ('ǯ', 'DZ'), + ('dz', 'dz'), + ('ǵ', 'ǵ'), + ('ǹ', 'ǹ'), + ('ǻ', 'ǻ'), + ('ǽ', 'ǽ'), + ('ǿ', 'ǿ'), + ('ȁ', 'ȁ'), + ('ȃ', 'ȃ'), + ('ȅ', 'ȅ'), + ('ȇ', 'ȇ'), + ('ȉ', 'ȉ'), + ('ȋ', 'ȋ'), + ('ȍ', 'ȍ'), + ('ȏ', 'ȏ'), + ('ȑ', 'ȑ'), + ('ȓ', 'ȓ'), + ('ȕ', 'ȕ'), + ('ȗ', 'ȗ'), + ('ș', 'ș'), + ('ț', 'ț'), + ('ȝ', 'ȝ'), + ('ȟ', 'ȟ'), + ('ȣ', 'ȣ'), + ('ȥ', 'ȥ'), + ('ȧ', 'ȧ'), + ('ȩ', 'ȩ'), + ('ȫ', 'ȫ'), + ('ȭ', 'ȭ'), + ('ȯ', 'ȯ'), + ('ȱ', 'ȱ'), + ('ȳ', 'ȳ'), + ('ȼ', 'ȼ'), + ('ȿ', 'ɀ'), + ('ɂ', 'ɂ'), + ('ɇ', 'ɇ'), + ('ɉ', 'ɉ'), + ('ɋ', 'ɋ'), + ('ɍ', 'ɍ'), + ('ɏ', 'ɔ'), + ('ɖ', 'ɗ'), + ('ə', 'ə'), + ('ɛ', 'ɜ'), + ('ɠ', 'ɡ'), + ('ɣ', 'ɦ'), + ('ɨ', 'ɬ'), + ('ɯ', 'ɯ'), + ('ɱ', 'ɲ'), + ('ɵ', 'ɵ'), + ('ɽ', 'ɽ'), + ('ʀ', 'ʀ'), + ('ʂ', 'ʃ'), + ('ʇ', 'ʌ'), + ('ʒ', 'ʒ'), + ('ʝ', 'ʞ'), + ('\u{345}', '\u{345}'), + ('ͱ', 'ͱ'), + ('ͳ', 'ͳ'), + ('ͷ', 'ͷ'), + ('ͻ', 'ͽ'), + ('ΐ', 'ΐ'), + ('ά', 'ώ'), + ('ϐ', 'ϑ'), + ('ϕ', 'ϗ'), + ('ϙ', 'ϙ'), + ('ϛ', 'ϛ'), + ('ϝ', 'ϝ'), + ('ϟ', 'ϟ'), + ('ϡ', 'ϡ'), + ('ϣ', 'ϣ'), + ('ϥ', 'ϥ'), + ('ϧ', 'ϧ'), + ('ϩ', 'ϩ'), + ('ϫ', 'ϫ'), + ('ϭ', 'ϭ'), + ('ϯ', 'ϳ'), + ('ϵ', 'ϵ'), + ('ϸ', 'ϸ'), + ('ϻ', 'ϻ'), + ('а', 'џ'), + ('ѡ', 'ѡ'), + ('ѣ', 'ѣ'), + ('ѥ', 'ѥ'), + ('ѧ', 'ѧ'), + ('ѩ', 'ѩ'), + ('ѫ', 'ѫ'), + ('ѭ', 'ѭ'), + ('ѯ', 'ѯ'), + ('ѱ', 'ѱ'), + ('ѳ', 'ѳ'), + ('ѵ', 'ѵ'), + ('ѷ', 'ѷ'), + ('ѹ', 'ѹ'), + ('ѻ', 'ѻ'), + ('ѽ', 'ѽ'), + ('ѿ', 'ѿ'), + ('ҁ', 'ҁ'), + ('ҋ', 'ҋ'), + ('ҍ', 'ҍ'), + ('ҏ', 'ҏ'), + ('ґ', 'ґ'), + ('ғ', 'ғ'), + ('ҕ', 'ҕ'), + ('җ', 'җ'), + ('ҙ', 'ҙ'), + ('қ', 'қ'), + ('ҝ', 'ҝ'), + ('ҟ', 'ҟ'), + ('ҡ', 'ҡ'), + ('ң', 'ң'), + ('ҥ', 'ҥ'), + ('ҧ', 'ҧ'), + ('ҩ', 'ҩ'), + ('ҫ', 'ҫ'), + ('ҭ', 'ҭ'), + ('ү', 'ү'), + ('ұ', 'ұ'), + ('ҳ', 'ҳ'), + ('ҵ', 'ҵ'), + ('ҷ', 'ҷ'), + ('ҹ', 'ҹ'), + ('һ', 'һ'), + ('ҽ', 'ҽ'), + ('ҿ', 'ҿ'), + ('ӂ', 'ӂ'), + ('ӄ', 'ӄ'), + ('ӆ', 'ӆ'), + ('ӈ', 'ӈ'), + ('ӊ', 'ӊ'), + ('ӌ', 'ӌ'), + ('ӎ', 'ӏ'), + ('ӑ', 'ӑ'), + ('ӓ', 'ӓ'), + ('ӕ', 'ӕ'), + ('ӗ', 'ӗ'), + ('ә', 'ә'), + ('ӛ', 'ӛ'), + ('ӝ', 'ӝ'), + ('ӟ', 'ӟ'), + ('ӡ', 'ӡ'), + ('ӣ', 'ӣ'), + ('ӥ', 'ӥ'), + ('ӧ', 'ӧ'), + ('ө', 'ө'), + ('ӫ', 'ӫ'), + ('ӭ', 'ӭ'), + ('ӯ', 'ӯ'), + ('ӱ', 'ӱ'), + ('ӳ', 'ӳ'), + ('ӵ', 'ӵ'), + ('ӷ', 'ӷ'), + ('ӹ', 'ӹ'), + ('ӻ', 'ӻ'), + ('ӽ', 'ӽ'), + ('ӿ', 'ӿ'), + ('ԁ', 'ԁ'), + ('ԃ', 'ԃ'), + ('ԅ', 'ԅ'), + ('ԇ', 'ԇ'), + ('ԉ', 'ԉ'), + ('ԋ', 'ԋ'), + ('ԍ', 'ԍ'), + ('ԏ', 'ԏ'), + ('ԑ', 'ԑ'), + ('ԓ', 'ԓ'), + ('ԕ', 'ԕ'), + ('ԗ', 'ԗ'), + ('ԙ', 'ԙ'), + ('ԛ', 'ԛ'), + ('ԝ', 'ԝ'), + ('ԟ', 'ԟ'), + ('ԡ', 'ԡ'), + ('ԣ', 'ԣ'), + ('ԥ', 'ԥ'), + ('ԧ', 'ԧ'), + ('ԩ', 'ԩ'), + ('ԫ', 'ԫ'), + ('ԭ', 'ԭ'), + ('ԯ', 'ԯ'), + ('ա', 'և'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲈ'), + ('ᲊ', 'ᲊ'), + ('ᵹ', 'ᵹ'), + ('ᵽ', 'ᵽ'), + ('ᶎ', 'ᶎ'), + ('ḁ', 'ḁ'), + ('ḃ', 'ḃ'), + ('ḅ', 'ḅ'), + ('ḇ', 'ḇ'), + ('ḉ', 'ḉ'), + ('ḋ', 'ḋ'), + ('ḍ', 'ḍ'), + ('ḏ', 'ḏ'), + ('ḑ', 'ḑ'), + ('ḓ', 'ḓ'), + ('ḕ', 'ḕ'), + ('ḗ', 'ḗ'), + ('ḙ', 'ḙ'), + ('ḛ', 'ḛ'), + ('ḝ', 'ḝ'), + ('ḟ', 'ḟ'), + ('ḡ', 'ḡ'), + ('ḣ', 'ḣ'), + ('ḥ', 'ḥ'), + ('ḧ', 'ḧ'), + ('ḩ', 'ḩ'), + ('ḫ', 'ḫ'), + ('ḭ', 'ḭ'), + ('ḯ', 'ḯ'), + ('ḱ', 'ḱ'), + ('ḳ', 'ḳ'), + ('ḵ', 'ḵ'), + ('ḷ', 'ḷ'), + ('ḹ', 'ḹ'), + ('ḻ', 'ḻ'), + ('ḽ', 'ḽ'), + ('ḿ', 'ḿ'), + ('ṁ', 'ṁ'), + ('ṃ', 'ṃ'), + ('ṅ', 'ṅ'), + ('ṇ', 'ṇ'), + ('ṉ', 'ṉ'), + ('ṋ', 'ṋ'), + ('ṍ', 'ṍ'), + ('ṏ', 'ṏ'), + ('ṑ', 'ṑ'), + ('ṓ', 'ṓ'), + ('ṕ', 'ṕ'), + ('ṗ', 'ṗ'), + ('ṙ', 'ṙ'), + ('ṛ', 'ṛ'), + ('ṝ', 'ṝ'), + ('ṟ', 'ṟ'), + ('ṡ', 'ṡ'), + ('ṣ', 'ṣ'), + ('ṥ', 'ṥ'), + ('ṧ', 'ṧ'), + ('ṩ', 'ṩ'), + ('ṫ', 'ṫ'), + ('ṭ', 'ṭ'), + ('ṯ', 'ṯ'), + ('ṱ', 'ṱ'), + ('ṳ', 'ṳ'), + ('ṵ', 'ṵ'), + ('ṷ', 'ṷ'), + ('ṹ', 'ṹ'), + ('ṻ', 'ṻ'), + ('ṽ', 'ṽ'), + ('ṿ', 'ṿ'), + ('ẁ', 'ẁ'), + ('ẃ', 'ẃ'), + ('ẅ', 'ẅ'), + ('ẇ', 'ẇ'), + ('ẉ', 'ẉ'), + ('ẋ', 'ẋ'), + ('ẍ', 'ẍ'), + ('ẏ', 'ẏ'), + ('ẑ', 'ẑ'), + ('ẓ', 'ẓ'), + ('ẕ', 'ẛ'), + ('ạ', 'ạ'), + ('ả', 'ả'), + ('ấ', 'ấ'), + ('ầ', 'ầ'), + ('ẩ', 'ẩ'), + ('ẫ', 'ẫ'), + ('ậ', 'ậ'), + ('ắ', 'ắ'), + ('ằ', 'ằ'), + ('ẳ', 'ẳ'), + ('ẵ', 'ẵ'), + ('ặ', 'ặ'), + ('ẹ', 'ẹ'), + ('ẻ', 'ẻ'), + ('ẽ', 'ẽ'), + ('ế', 'ế'), + ('ề', 'ề'), + ('ể', 'ể'), + ('ễ', 'ễ'), + ('ệ', 'ệ'), + ('ỉ', 'ỉ'), + ('ị', 'ị'), + ('ọ', 'ọ'), + ('ỏ', 'ỏ'), + ('ố', 'ố'), + ('ồ', 'ồ'), + ('ổ', 'ổ'), + ('ỗ', 'ỗ'), + ('ộ', 'ộ'), + ('ớ', 'ớ'), + ('ờ', 'ờ'), + ('ở', 'ở'), + ('ỡ', 'ỡ'), + ('ợ', 'ợ'), + ('ụ', 'ụ'), + ('ủ', 'ủ'), + ('ứ', 'ứ'), + ('ừ', 'ừ'), + ('ử', 'ử'), + ('ữ', 'ữ'), + ('ự', 'ự'), + ('ỳ', 'ỳ'), + ('ỵ', 'ỵ'), + ('ỷ', 'ỷ'), + ('ỹ', 'ỹ'), + ('ỻ', 'ỻ'), + ('ỽ', 'ỽ'), + ('ỿ', 'ἇ'), + ('ἐ', 'ἕ'), + ('ἠ', 'ἧ'), + ('ἰ', 'ἷ'), + ('ὀ', 'ὅ'), + ('ὐ', 'ὗ'), + ('ὠ', 'ὧ'), + ('ὰ', 'ώ'), + ('ᾀ', 'ᾇ'), + ('ᾐ', 'ᾗ'), + ('ᾠ', 'ᾧ'), + ('ᾰ', 'ᾴ'), + ('ᾶ', 'ᾷ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῇ'), + ('ῐ', 'ΐ'), + ('ῖ', 'ῗ'), + ('ῠ', 'ῧ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῷ'), + ('ⅎ', 'ⅎ'), + ('ⅰ', 'ⅿ'), + ('ↄ', 'ↄ'), + ('ⓐ', 'ⓩ'), + ('ⰰ', 'ⱟ'), + ('ⱡ', 'ⱡ'), + ('ⱥ', 'ⱦ'), + ('ⱨ', 'ⱨ'), + ('ⱪ', 'ⱪ'), + ('ⱬ', 'ⱬ'), + ('ⱳ', 'ⱳ'), + ('ⱶ', 'ⱶ'), + ('ⲁ', 'ⲁ'), + ('ⲃ', 'ⲃ'), + ('ⲅ', 'ⲅ'), + ('ⲇ', 'ⲇ'), + ('ⲉ', 'ⲉ'), + ('ⲋ', 'ⲋ'), + ('ⲍ', 'ⲍ'), + ('ⲏ', 'ⲏ'), + ('ⲑ', 'ⲑ'), + ('ⲓ', 'ⲓ'), + ('ⲕ', 'ⲕ'), + ('ⲗ', 'ⲗ'), + ('ⲙ', 'ⲙ'), + ('ⲛ', 'ⲛ'), + ('ⲝ', 'ⲝ'), + ('ⲟ', 'ⲟ'), + ('ⲡ', 'ⲡ'), + ('ⲣ', 'ⲣ'), + ('ⲥ', 'ⲥ'), + ('ⲧ', 'ⲧ'), + ('ⲩ', 'ⲩ'), + ('ⲫ', 'ⲫ'), + ('ⲭ', 'ⲭ'), + ('ⲯ', 'ⲯ'), + ('ⲱ', 'ⲱ'), + ('ⲳ', 'ⲳ'), + ('ⲵ', 'ⲵ'), + ('ⲷ', 'ⲷ'), + ('ⲹ', 'ⲹ'), + ('ⲻ', 'ⲻ'), + ('ⲽ', 'ⲽ'), + ('ⲿ', 'ⲿ'), + ('ⳁ', 'ⳁ'), + ('ⳃ', 'ⳃ'), + ('ⳅ', 'ⳅ'), + ('ⳇ', 'ⳇ'), + ('ⳉ', 'ⳉ'), + ('ⳋ', 'ⳋ'), + ('ⳍ', 'ⳍ'), + ('ⳏ', 'ⳏ'), + ('ⳑ', 'ⳑ'), + ('ⳓ', 'ⳓ'), + ('ⳕ', 'ⳕ'), + ('ⳗ', 'ⳗ'), + ('ⳙ', 'ⳙ'), + ('ⳛ', 'ⳛ'), + ('ⳝ', 'ⳝ'), + ('ⳟ', 'ⳟ'), + ('ⳡ', 'ⳡ'), + ('ⳣ', 'ⳣ'), + ('ⳬ', 'ⳬ'), + ('ⳮ', 'ⳮ'), + ('ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ꙁ', 'ꙁ'), + ('ꙃ', 'ꙃ'), + ('ꙅ', 'ꙅ'), + ('ꙇ', 'ꙇ'), + ('ꙉ', 'ꙉ'), + ('ꙋ', 'ꙋ'), + ('ꙍ', 'ꙍ'), + ('ꙏ', 'ꙏ'), + ('ꙑ', 'ꙑ'), + ('ꙓ', 'ꙓ'), + ('ꙕ', 'ꙕ'), + ('ꙗ', 'ꙗ'), + ('ꙙ', 'ꙙ'), + ('ꙛ', 'ꙛ'), + ('ꙝ', 'ꙝ'), + ('ꙟ', 'ꙟ'), + ('ꙡ', 'ꙡ'), + ('ꙣ', 'ꙣ'), + ('ꙥ', 'ꙥ'), + ('ꙧ', 'ꙧ'), + ('ꙩ', 'ꙩ'), + ('ꙫ', 'ꙫ'), + ('ꙭ', 'ꙭ'), + ('ꚁ', 'ꚁ'), + ('ꚃ', 'ꚃ'), + ('ꚅ', 'ꚅ'), + ('ꚇ', 'ꚇ'), + ('ꚉ', 'ꚉ'), + ('ꚋ', 'ꚋ'), + ('ꚍ', 'ꚍ'), + ('ꚏ', 'ꚏ'), + ('ꚑ', 'ꚑ'), + ('ꚓ', 'ꚓ'), + ('ꚕ', 'ꚕ'), + ('ꚗ', 'ꚗ'), + ('ꚙ', 'ꚙ'), + ('ꚛ', 'ꚛ'), + ('ꜣ', 'ꜣ'), + ('ꜥ', 'ꜥ'), + ('ꜧ', 'ꜧ'), + ('ꜩ', 'ꜩ'), + ('ꜫ', 'ꜫ'), + ('ꜭ', 'ꜭ'), + ('ꜯ', 'ꜯ'), + ('ꜳ', 'ꜳ'), + ('ꜵ', 'ꜵ'), + ('ꜷ', 'ꜷ'), + ('ꜹ', 'ꜹ'), + ('ꜻ', 'ꜻ'), + ('ꜽ', 'ꜽ'), + ('ꜿ', 'ꜿ'), + ('ꝁ', 'ꝁ'), + ('ꝃ', 'ꝃ'), + ('ꝅ', 'ꝅ'), + ('ꝇ', 'ꝇ'), + ('ꝉ', 'ꝉ'), + ('ꝋ', 'ꝋ'), + ('ꝍ', 'ꝍ'), + ('ꝏ', 'ꝏ'), + ('ꝑ', 'ꝑ'), + ('ꝓ', 'ꝓ'), + ('ꝕ', 'ꝕ'), + ('ꝗ', 'ꝗ'), + ('ꝙ', 'ꝙ'), + ('ꝛ', 'ꝛ'), + ('ꝝ', 'ꝝ'), + ('ꝟ', 'ꝟ'), + ('ꝡ', 'ꝡ'), + ('ꝣ', 'ꝣ'), + ('ꝥ', 'ꝥ'), + ('ꝧ', 'ꝧ'), + ('ꝩ', 'ꝩ'), + ('ꝫ', 'ꝫ'), + ('ꝭ', 'ꝭ'), + ('ꝯ', 'ꝯ'), + ('ꝺ', 'ꝺ'), + ('ꝼ', 'ꝼ'), + ('ꝿ', 'ꝿ'), + ('ꞁ', 'ꞁ'), + ('ꞃ', 'ꞃ'), + ('ꞅ', 'ꞅ'), + ('ꞇ', 'ꞇ'), + ('ꞌ', 'ꞌ'), + ('ꞑ', 'ꞑ'), + ('ꞓ', 'ꞔ'), + ('ꞗ', 'ꞗ'), + ('ꞙ', 'ꞙ'), + ('ꞛ', 'ꞛ'), + ('ꞝ', 'ꞝ'), + ('ꞟ', 'ꞟ'), + ('ꞡ', 'ꞡ'), + ('ꞣ', 'ꞣ'), + ('ꞥ', 'ꞥ'), + ('ꞧ', 'ꞧ'), + ('ꞩ', 'ꞩ'), + ('ꞵ', 'ꞵ'), + ('ꞷ', 'ꞷ'), + ('ꞹ', 'ꞹ'), + ('ꞻ', 'ꞻ'), + ('ꞽ', 'ꞽ'), + ('ꞿ', 'ꞿ'), + ('ꟁ', 'ꟁ'), + ('ꟃ', 'ꟃ'), + ('ꟈ', 'ꟈ'), + ('ꟊ', 'ꟊ'), + ('ꟍ', 'ꟍ'), + ('ꟑ', 'ꟑ'), + ('ꟗ', 'ꟗ'), + ('ꟙ', 'ꟙ'), + ('ꟛ', 'ꟛ'), + ('ꟶ', 'ꟶ'), + ('ꭓ', 'ꭓ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('a', 'z'), + ('𐐨', '𐑏'), + ('𐓘', '𐓻'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐳀', '𐳲'), + ('𐵰', '𐶅'), + ('𑣀', '𑣟'), + ('𖹠', '𖹿'), + ('𞤢', '𞥃'), +]; + +pub const CHANGES_WHEN_UPPERCASED: &'static [(char, char)] = &[ + ('a', 'z'), + ('µ', 'µ'), + ('ß', 'ö'), + ('ø', 'ÿ'), + ('ā', 'ā'), + ('ă', 'ă'), + ('ą', 'ą'), + ('ć', 'ć'), + ('ĉ', 'ĉ'), + ('ċ', 'ċ'), + ('č', 'č'), + ('ď', 'ď'), + ('đ', 'đ'), + ('ē', 'ē'), + ('ĕ', 'ĕ'), + ('ė', 'ė'), + ('ę', 'ę'), + ('ě', 'ě'), + ('ĝ', 'ĝ'), + ('ğ', 'ğ'), + ('ġ', 'ġ'), + ('ģ', 'ģ'), + ('ĥ', 'ĥ'), + ('ħ', 'ħ'), + ('ĩ', 'ĩ'), + ('ī', 'ī'), + ('ĭ', 'ĭ'), + ('į', 'į'), + ('ı', 'ı'), + ('ij', 'ij'), + ('ĵ', 'ĵ'), + ('ķ', 'ķ'), + ('ĺ', 'ĺ'), + ('ļ', 'ļ'), + ('ľ', 'ľ'), + ('ŀ', 'ŀ'), + ('ł', 'ł'), + ('ń', 'ń'), + ('ņ', 'ņ'), + ('ň', 'ʼn'), + ('ŋ', 'ŋ'), + ('ō', 'ō'), + ('ŏ', 'ŏ'), + ('ő', 'ő'), + ('œ', 'œ'), + ('ŕ', 'ŕ'), + ('ŗ', 'ŗ'), + ('ř', 'ř'), + ('ś', 'ś'), + ('ŝ', 'ŝ'), + ('ş', 'ş'), + ('š', 'š'), + ('ţ', 'ţ'), + ('ť', 'ť'), + ('ŧ', 'ŧ'), + ('ũ', 'ũ'), + ('ū', 'ū'), + ('ŭ', 'ŭ'), + ('ů', 'ů'), + ('ű', 'ű'), + ('ų', 'ų'), + ('ŵ', 'ŵ'), + ('ŷ', 'ŷ'), + ('ź', 'ź'), + ('ż', 'ż'), + ('ž', 'ƀ'), + ('ƃ', 'ƃ'), + ('ƅ', 'ƅ'), + ('ƈ', 'ƈ'), + ('ƌ', 'ƌ'), + ('ƒ', 'ƒ'), + ('ƕ', 'ƕ'), + ('ƙ', 'ƛ'), + ('ƞ', 'ƞ'), + ('ơ', 'ơ'), + ('ƣ', 'ƣ'), + ('ƥ', 'ƥ'), + ('ƨ', 'ƨ'), + ('ƭ', 'ƭ'), + ('ư', 'ư'), + ('ƴ', 'ƴ'), + ('ƶ', 'ƶ'), + ('ƹ', 'ƹ'), + ('ƽ', 'ƽ'), + ('ƿ', 'ƿ'), + ('Dž', 'dž'), + ('Lj', 'lj'), + ('Nj', 'nj'), + ('ǎ', 'ǎ'), + ('ǐ', 'ǐ'), + ('ǒ', 'ǒ'), + ('ǔ', 'ǔ'), + ('ǖ', 'ǖ'), + ('ǘ', 'ǘ'), + ('ǚ', 'ǚ'), + ('ǜ', 'ǝ'), + ('ǟ', 'ǟ'), + ('ǡ', 'ǡ'), + ('ǣ', 'ǣ'), + ('ǥ', 'ǥ'), + ('ǧ', 'ǧ'), + ('ǩ', 'ǩ'), + ('ǫ', 'ǫ'), + ('ǭ', 'ǭ'), + ('ǯ', 'ǰ'), + ('Dz', 'dz'), + ('ǵ', 'ǵ'), + ('ǹ', 'ǹ'), + ('ǻ', 'ǻ'), + ('ǽ', 'ǽ'), + ('ǿ', 'ǿ'), + ('ȁ', 'ȁ'), + ('ȃ', 'ȃ'), + ('ȅ', 'ȅ'), + ('ȇ', 'ȇ'), + ('ȉ', 'ȉ'), + ('ȋ', 'ȋ'), + ('ȍ', 'ȍ'), + ('ȏ', 'ȏ'), + ('ȑ', 'ȑ'), + ('ȓ', 'ȓ'), + ('ȕ', 'ȕ'), + ('ȗ', 'ȗ'), + ('ș', 'ș'), + ('ț', 'ț'), + ('ȝ', 'ȝ'), + ('ȟ', 'ȟ'), + ('ȣ', 'ȣ'), + ('ȥ', 'ȥ'), + ('ȧ', 'ȧ'), + ('ȩ', 'ȩ'), + ('ȫ', 'ȫ'), + ('ȭ', 'ȭ'), + ('ȯ', 'ȯ'), + ('ȱ', 'ȱ'), + ('ȳ', 'ȳ'), + ('ȼ', 'ȼ'), + ('ȿ', 'ɀ'), + ('ɂ', 'ɂ'), + ('ɇ', 'ɇ'), + ('ɉ', 'ɉ'), + ('ɋ', 'ɋ'), + ('ɍ', 'ɍ'), + ('ɏ', 'ɔ'), + ('ɖ', 'ɗ'), + ('ə', 'ə'), + ('ɛ', 'ɜ'), + ('ɠ', 'ɡ'), + ('ɣ', 'ɦ'), + ('ɨ', 'ɬ'), + ('ɯ', 'ɯ'), + ('ɱ', 'ɲ'), + ('ɵ', 'ɵ'), + ('ɽ', 'ɽ'), + ('ʀ', 'ʀ'), + ('ʂ', 'ʃ'), + ('ʇ', 'ʌ'), + ('ʒ', 'ʒ'), + ('ʝ', 'ʞ'), + ('\u{345}', '\u{345}'), + ('ͱ', 'ͱ'), + ('ͳ', 'ͳ'), + ('ͷ', 'ͷ'), + ('ͻ', 'ͽ'), + ('ΐ', 'ΐ'), + ('ά', 'ώ'), + ('ϐ', 'ϑ'), + ('ϕ', 'ϗ'), + ('ϙ', 'ϙ'), + ('ϛ', 'ϛ'), + ('ϝ', 'ϝ'), + ('ϟ', 'ϟ'), + ('ϡ', 'ϡ'), + ('ϣ', 'ϣ'), + ('ϥ', 'ϥ'), + ('ϧ', 'ϧ'), + ('ϩ', 'ϩ'), + ('ϫ', 'ϫ'), + ('ϭ', 'ϭ'), + ('ϯ', 'ϳ'), + ('ϵ', 'ϵ'), + ('ϸ', 'ϸ'), + ('ϻ', 'ϻ'), + ('а', 'џ'), + ('ѡ', 'ѡ'), + ('ѣ', 'ѣ'), + ('ѥ', 'ѥ'), + ('ѧ', 'ѧ'), + ('ѩ', 'ѩ'), + ('ѫ', 'ѫ'), + ('ѭ', 'ѭ'), + ('ѯ', 'ѯ'), + ('ѱ', 'ѱ'), + ('ѳ', 'ѳ'), + ('ѵ', 'ѵ'), + ('ѷ', 'ѷ'), + ('ѹ', 'ѹ'), + ('ѻ', 'ѻ'), + ('ѽ', 'ѽ'), + ('ѿ', 'ѿ'), + ('ҁ', 'ҁ'), + ('ҋ', 'ҋ'), + ('ҍ', 'ҍ'), + ('ҏ', 'ҏ'), + ('ґ', 'ґ'), + ('ғ', 'ғ'), + ('ҕ', 'ҕ'), + ('җ', 'җ'), + ('ҙ', 'ҙ'), + ('қ', 'қ'), + ('ҝ', 'ҝ'), + ('ҟ', 'ҟ'), + ('ҡ', 'ҡ'), + ('ң', 'ң'), + ('ҥ', 'ҥ'), + ('ҧ', 'ҧ'), + ('ҩ', 'ҩ'), + ('ҫ', 'ҫ'), + ('ҭ', 'ҭ'), + ('ү', 'ү'), + ('ұ', 'ұ'), + ('ҳ', 'ҳ'), + ('ҵ', 'ҵ'), + ('ҷ', 'ҷ'), + ('ҹ', 'ҹ'), + ('һ', 'һ'), + ('ҽ', 'ҽ'), + ('ҿ', 'ҿ'), + ('ӂ', 'ӂ'), + ('ӄ', 'ӄ'), + ('ӆ', 'ӆ'), + ('ӈ', 'ӈ'), + ('ӊ', 'ӊ'), + ('ӌ', 'ӌ'), + ('ӎ', 'ӏ'), + ('ӑ', 'ӑ'), + ('ӓ', 'ӓ'), + ('ӕ', 'ӕ'), + ('ӗ', 'ӗ'), + ('ә', 'ә'), + ('ӛ', 'ӛ'), + ('ӝ', 'ӝ'), + ('ӟ', 'ӟ'), + ('ӡ', 'ӡ'), + ('ӣ', 'ӣ'), + ('ӥ', 'ӥ'), + ('ӧ', 'ӧ'), + ('ө', 'ө'), + ('ӫ', 'ӫ'), + ('ӭ', 'ӭ'), + ('ӯ', 'ӯ'), + ('ӱ', 'ӱ'), + ('ӳ', 'ӳ'), + ('ӵ', 'ӵ'), + ('ӷ', 'ӷ'), + ('ӹ', 'ӹ'), + ('ӻ', 'ӻ'), + ('ӽ', 'ӽ'), + ('ӿ', 'ӿ'), + ('ԁ', 'ԁ'), + ('ԃ', 'ԃ'), + ('ԅ', 'ԅ'), + ('ԇ', 'ԇ'), + ('ԉ', 'ԉ'), + ('ԋ', 'ԋ'), + ('ԍ', 'ԍ'), + ('ԏ', 'ԏ'), + ('ԑ', 'ԑ'), + ('ԓ', 'ԓ'), + ('ԕ', 'ԕ'), + ('ԗ', 'ԗ'), + ('ԙ', 'ԙ'), + ('ԛ', 'ԛ'), + ('ԝ', 'ԝ'), + ('ԟ', 'ԟ'), + ('ԡ', 'ԡ'), + ('ԣ', 'ԣ'), + ('ԥ', 'ԥ'), + ('ԧ', 'ԧ'), + ('ԩ', 'ԩ'), + ('ԫ', 'ԫ'), + ('ԭ', 'ԭ'), + ('ԯ', 'ԯ'), + ('ա', 'և'), + ('ა', 'ჺ'), + ('ჽ', 'ჿ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲈ'), + ('ᲊ', 'ᲊ'), + ('ᵹ', 'ᵹ'), + ('ᵽ', 'ᵽ'), + ('ᶎ', 'ᶎ'), + ('ḁ', 'ḁ'), + ('ḃ', 'ḃ'), + ('ḅ', 'ḅ'), + ('ḇ', 'ḇ'), + ('ḉ', 'ḉ'), + ('ḋ', 'ḋ'), + ('ḍ', 'ḍ'), + ('ḏ', 'ḏ'), + ('ḑ', 'ḑ'), + ('ḓ', 'ḓ'), + ('ḕ', 'ḕ'), + ('ḗ', 'ḗ'), + ('ḙ', 'ḙ'), + ('ḛ', 'ḛ'), + ('ḝ', 'ḝ'), + ('ḟ', 'ḟ'), + ('ḡ', 'ḡ'), + ('ḣ', 'ḣ'), + ('ḥ', 'ḥ'), + ('ḧ', 'ḧ'), + ('ḩ', 'ḩ'), + ('ḫ', 'ḫ'), + ('ḭ', 'ḭ'), + ('ḯ', 'ḯ'), + ('ḱ', 'ḱ'), + ('ḳ', 'ḳ'), + ('ḵ', 'ḵ'), + ('ḷ', 'ḷ'), + ('ḹ', 'ḹ'), + ('ḻ', 'ḻ'), + ('ḽ', 'ḽ'), + ('ḿ', 'ḿ'), + ('ṁ', 'ṁ'), + ('ṃ', 'ṃ'), + ('ṅ', 'ṅ'), + ('ṇ', 'ṇ'), + ('ṉ', 'ṉ'), + ('ṋ', 'ṋ'), + ('ṍ', 'ṍ'), + ('ṏ', 'ṏ'), + ('ṑ', 'ṑ'), + ('ṓ', 'ṓ'), + ('ṕ', 'ṕ'), + ('ṗ', 'ṗ'), + ('ṙ', 'ṙ'), + ('ṛ', 'ṛ'), + ('ṝ', 'ṝ'), + ('ṟ', 'ṟ'), + ('ṡ', 'ṡ'), + ('ṣ', 'ṣ'), + ('ṥ', 'ṥ'), + ('ṧ', 'ṧ'), + ('ṩ', 'ṩ'), + ('ṫ', 'ṫ'), + ('ṭ', 'ṭ'), + ('ṯ', 'ṯ'), + ('ṱ', 'ṱ'), + ('ṳ', 'ṳ'), + ('ṵ', 'ṵ'), + ('ṷ', 'ṷ'), + ('ṹ', 'ṹ'), + ('ṻ', 'ṻ'), + ('ṽ', 'ṽ'), + ('ṿ', 'ṿ'), + ('ẁ', 'ẁ'), + ('ẃ', 'ẃ'), + ('ẅ', 'ẅ'), + ('ẇ', 'ẇ'), + ('ẉ', 'ẉ'), + ('ẋ', 'ẋ'), + ('ẍ', 'ẍ'), + ('ẏ', 'ẏ'), + ('ẑ', 'ẑ'), + ('ẓ', 'ẓ'), + ('ẕ', 'ẛ'), + ('ạ', 'ạ'), + ('ả', 'ả'), + ('ấ', 'ấ'), + ('ầ', 'ầ'), + ('ẩ', 'ẩ'), + ('ẫ', 'ẫ'), + ('ậ', 'ậ'), + ('ắ', 'ắ'), + ('ằ', 'ằ'), + ('ẳ', 'ẳ'), + ('ẵ', 'ẵ'), + ('ặ', 'ặ'), + ('ẹ', 'ẹ'), + ('ẻ', 'ẻ'), + ('ẽ', 'ẽ'), + ('ế', 'ế'), + ('ề', 'ề'), + ('ể', 'ể'), + ('ễ', 'ễ'), + ('ệ', 'ệ'), + ('ỉ', 'ỉ'), + ('ị', 'ị'), + ('ọ', 'ọ'), + ('ỏ', 'ỏ'), + ('ố', 'ố'), + ('ồ', 'ồ'), + ('ổ', 'ổ'), + ('ỗ', 'ỗ'), + ('ộ', 'ộ'), + ('ớ', 'ớ'), + ('ờ', 'ờ'), + ('ở', 'ở'), + ('ỡ', 'ỡ'), + ('ợ', 'ợ'), + ('ụ', 'ụ'), + ('ủ', 'ủ'), + ('ứ', 'ứ'), + ('ừ', 'ừ'), + ('ử', 'ử'), + ('ữ', 'ữ'), + ('ự', 'ự'), + ('ỳ', 'ỳ'), + ('ỵ', 'ỵ'), + ('ỷ', 'ỷ'), + ('ỹ', 'ỹ'), + ('ỻ', 'ỻ'), + ('ỽ', 'ỽ'), + ('ỿ', 'ἇ'), + ('ἐ', 'ἕ'), + ('ἠ', 'ἧ'), + ('ἰ', 'ἷ'), + ('ὀ', 'ὅ'), + ('ὐ', 'ὗ'), + ('ὠ', 'ὧ'), + ('ὰ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾷ'), + ('ᾼ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῇ'), + ('ῌ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'ῗ'), + ('ῠ', 'ῧ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῷ'), + ('ῼ', 'ῼ'), + ('ⅎ', 'ⅎ'), + ('ⅰ', 'ⅿ'), + ('ↄ', 'ↄ'), + ('ⓐ', 'ⓩ'), + ('ⰰ', 'ⱟ'), + ('ⱡ', 'ⱡ'), + ('ⱥ', 'ⱦ'), + ('ⱨ', 'ⱨ'), + ('ⱪ', 'ⱪ'), + ('ⱬ', 'ⱬ'), + ('ⱳ', 'ⱳ'), + ('ⱶ', 'ⱶ'), + ('ⲁ', 'ⲁ'), + ('ⲃ', 'ⲃ'), + ('ⲅ', 'ⲅ'), + ('ⲇ', 'ⲇ'), + ('ⲉ', 'ⲉ'), + ('ⲋ', 'ⲋ'), + ('ⲍ', 'ⲍ'), + ('ⲏ', 'ⲏ'), + ('ⲑ', 'ⲑ'), + ('ⲓ', 'ⲓ'), + ('ⲕ', 'ⲕ'), + ('ⲗ', 'ⲗ'), + ('ⲙ', 'ⲙ'), + ('ⲛ', 'ⲛ'), + ('ⲝ', 'ⲝ'), + ('ⲟ', 'ⲟ'), + ('ⲡ', 'ⲡ'), + ('ⲣ', 'ⲣ'), + ('ⲥ', 'ⲥ'), + ('ⲧ', 'ⲧ'), + ('ⲩ', 'ⲩ'), + ('ⲫ', 'ⲫ'), + ('ⲭ', 'ⲭ'), + ('ⲯ', 'ⲯ'), + ('ⲱ', 'ⲱ'), + ('ⲳ', 'ⲳ'), + ('ⲵ', 'ⲵ'), + ('ⲷ', 'ⲷ'), + ('ⲹ', 'ⲹ'), + ('ⲻ', 'ⲻ'), + ('ⲽ', 'ⲽ'), + ('ⲿ', 'ⲿ'), + ('ⳁ', 'ⳁ'), + ('ⳃ', 'ⳃ'), + ('ⳅ', 'ⳅ'), + ('ⳇ', 'ⳇ'), + ('ⳉ', 'ⳉ'), + ('ⳋ', 'ⳋ'), + ('ⳍ', 'ⳍ'), + ('ⳏ', 'ⳏ'), + ('ⳑ', 'ⳑ'), + ('ⳓ', 'ⳓ'), + ('ⳕ', 'ⳕ'), + ('ⳗ', 'ⳗ'), + ('ⳙ', 'ⳙ'), + ('ⳛ', 'ⳛ'), + ('ⳝ', 'ⳝ'), + ('ⳟ', 'ⳟ'), + ('ⳡ', 'ⳡ'), + ('ⳣ', 'ⳣ'), + ('ⳬ', 'ⳬ'), + ('ⳮ', 'ⳮ'), + ('ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ꙁ', 'ꙁ'), + ('ꙃ', 'ꙃ'), + ('ꙅ', 'ꙅ'), + ('ꙇ', 'ꙇ'), + ('ꙉ', 'ꙉ'), + ('ꙋ', 'ꙋ'), + ('ꙍ', 'ꙍ'), + ('ꙏ', 'ꙏ'), + ('ꙑ', 'ꙑ'), + ('ꙓ', 'ꙓ'), + ('ꙕ', 'ꙕ'), + ('ꙗ', 'ꙗ'), + ('ꙙ', 'ꙙ'), + ('ꙛ', 'ꙛ'), + ('ꙝ', 'ꙝ'), + ('ꙟ', 'ꙟ'), + ('ꙡ', 'ꙡ'), + ('ꙣ', 'ꙣ'), + ('ꙥ', 'ꙥ'), + ('ꙧ', 'ꙧ'), + ('ꙩ', 'ꙩ'), + ('ꙫ', 'ꙫ'), + ('ꙭ', 'ꙭ'), + ('ꚁ', 'ꚁ'), + ('ꚃ', 'ꚃ'), + ('ꚅ', 'ꚅ'), + ('ꚇ', 'ꚇ'), + ('ꚉ', 'ꚉ'), + ('ꚋ', 'ꚋ'), + ('ꚍ', 'ꚍ'), + ('ꚏ', 'ꚏ'), + ('ꚑ', 'ꚑ'), + ('ꚓ', 'ꚓ'), + ('ꚕ', 'ꚕ'), + ('ꚗ', 'ꚗ'), + ('ꚙ', 'ꚙ'), + ('ꚛ', 'ꚛ'), + ('ꜣ', 'ꜣ'), + ('ꜥ', 'ꜥ'), + ('ꜧ', 'ꜧ'), + ('ꜩ', 'ꜩ'), + ('ꜫ', 'ꜫ'), + ('ꜭ', 'ꜭ'), + ('ꜯ', 'ꜯ'), + ('ꜳ', 'ꜳ'), + ('ꜵ', 'ꜵ'), + ('ꜷ', 'ꜷ'), + ('ꜹ', 'ꜹ'), + ('ꜻ', 'ꜻ'), + ('ꜽ', 'ꜽ'), + ('ꜿ', 'ꜿ'), + ('ꝁ', 'ꝁ'), + ('ꝃ', 'ꝃ'), + ('ꝅ', 'ꝅ'), + ('ꝇ', 'ꝇ'), + ('ꝉ', 'ꝉ'), + ('ꝋ', 'ꝋ'), + ('ꝍ', 'ꝍ'), + ('ꝏ', 'ꝏ'), + ('ꝑ', 'ꝑ'), + ('ꝓ', 'ꝓ'), + ('ꝕ', 'ꝕ'), + ('ꝗ', 'ꝗ'), + ('ꝙ', 'ꝙ'), + ('ꝛ', 'ꝛ'), + ('ꝝ', 'ꝝ'), + ('ꝟ', 'ꝟ'), + ('ꝡ', 'ꝡ'), + ('ꝣ', 'ꝣ'), + ('ꝥ', 'ꝥ'), + ('ꝧ', 'ꝧ'), + ('ꝩ', 'ꝩ'), + ('ꝫ', 'ꝫ'), + ('ꝭ', 'ꝭ'), + ('ꝯ', 'ꝯ'), + ('ꝺ', 'ꝺ'), + ('ꝼ', 'ꝼ'), + ('ꝿ', 'ꝿ'), + ('ꞁ', 'ꞁ'), + ('ꞃ', 'ꞃ'), + ('ꞅ', 'ꞅ'), + ('ꞇ', 'ꞇ'), + ('ꞌ', 'ꞌ'), + ('ꞑ', 'ꞑ'), + ('ꞓ', 'ꞔ'), + ('ꞗ', 'ꞗ'), + ('ꞙ', 'ꞙ'), + ('ꞛ', 'ꞛ'), + ('ꞝ', 'ꞝ'), + ('ꞟ', 'ꞟ'), + ('ꞡ', 'ꞡ'), + ('ꞣ', 'ꞣ'), + ('ꞥ', 'ꞥ'), + ('ꞧ', 'ꞧ'), + ('ꞩ', 'ꞩ'), + ('ꞵ', 'ꞵ'), + ('ꞷ', 'ꞷ'), + ('ꞹ', 'ꞹ'), + ('ꞻ', 'ꞻ'), + ('ꞽ', 'ꞽ'), + ('ꞿ', 'ꞿ'), + ('ꟁ', 'ꟁ'), + ('ꟃ', 'ꟃ'), + ('ꟈ', 'ꟈ'), + ('ꟊ', 'ꟊ'), + ('ꟍ', 'ꟍ'), + ('ꟑ', 'ꟑ'), + ('ꟗ', 'ꟗ'), + ('ꟙ', 'ꟙ'), + ('ꟛ', 'ꟛ'), + ('ꟶ', 'ꟶ'), + ('ꭓ', 'ꭓ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('a', 'z'), + ('𐐨', '𐑏'), + ('𐓘', '𐓻'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐳀', '𐳲'), + ('𐵰', '𐶅'), + ('𑣀', '𑣟'), + ('𖹠', '𖹿'), + ('𞤢', '𞥃'), +]; + +pub const DASH: &'static [(char, char)] = &[ + ('-', '-'), + ('֊', '֊'), + ('־', '־'), + ('᐀', '᐀'), + ('᠆', '᠆'), + ('‐', '―'), + ('⁓', '⁓'), + ('⁻', '⁻'), + ('₋', '₋'), + ('−', '−'), + ('⸗', '⸗'), + ('⸚', '⸚'), + ('⸺', '⸻'), + ('⹀', '⹀'), + ('⹝', '⹝'), + ('〜', '〜'), + ('〰', '〰'), + ('゠', '゠'), + ('︱', '︲'), + ('﹘', '﹘'), + ('﹣', '﹣'), + ('-', '-'), + ('𐵮', '𐵮'), + ('𐺭', '𐺭'), +]; + +pub const DEFAULT_IGNORABLE_CODE_POINT: &'static [(char, char)] = &[ + ('\u{ad}', '\u{ad}'), + ('\u{34f}', '\u{34f}'), + ('\u{61c}', '\u{61c}'), + ('ᅟ', 'ᅠ'), + ('\u{17b4}', '\u{17b5}'), + ('\u{180b}', '\u{180f}'), + ('\u{200b}', '\u{200f}'), + ('\u{202a}', '\u{202e}'), + ('\u{2060}', '\u{206f}'), + ('ㅤ', 'ㅤ'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{feff}', '\u{feff}'), + ('ᅠ', 'ᅠ'), + ('\u{fff0}', '\u{fff8}'), + ('\u{1bca0}', '\u{1bca3}'), + ('\u{1d173}', '\u{1d17a}'), + ('\u{e0000}', '\u{e0fff}'), +]; + +pub const DEPRECATED: &'static [(char, char)] = &[ + ('ʼn', 'ʼn'), + ('ٳ', 'ٳ'), + ('\u{f77}', '\u{f77}'), + ('\u{f79}', '\u{f79}'), + ('ឣ', 'ឤ'), + ('\u{206a}', '\u{206f}'), + ('〈', '〉'), + ('\u{e0001}', '\u{e0001}'), +]; + +pub const DIACRITIC: &'static [(char, char)] = &[ + ('^', '^'), + ('`', '`'), + ('¨', '¨'), + ('¯', '¯'), + ('´', '´'), + ('·', '¸'), + ('ʰ', '\u{34e}'), + ('\u{350}', '\u{357}'), + ('\u{35d}', '\u{362}'), + ('ʹ', '͵'), + ('ͺ', 'ͺ'), + ('΄', '΅'), + ('\u{483}', '\u{487}'), + ('ՙ', 'ՙ'), + ('\u{591}', '\u{5a1}'), + ('\u{5a3}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c4}'), + ('\u{64b}', '\u{652}'), + ('\u{657}', '\u{658}'), + ('\u{6df}', '\u{6e0}'), + ('ۥ', 'ۦ'), + ('\u{6ea}', '\u{6ec}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', 'ߵ'), + ('\u{818}', '\u{819}'), + ('\u{898}', '\u{89f}'), + ('ࣉ', '\u{8d2}'), + ('\u{8e3}', '\u{8fe}'), + ('\u{93c}', '\u{93c}'), + ('\u{94d}', '\u{94d}'), + ('\u{951}', '\u{954}'), + ('ॱ', 'ॱ'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9cd}', '\u{9cd}'), + ('\u{a3c}', '\u{a3c}'), + ('\u{a4d}', '\u{a4d}'), + ('\u{abc}', '\u{abc}'), + ('\u{acd}', '\u{acd}'), + ('\u{afd}', '\u{aff}'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b4d}', '\u{b4d}'), + ('\u{b55}', '\u{b55}'), + ('\u{bcd}', '\u{bcd}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c4d}', '\u{c4d}'), + ('\u{cbc}', '\u{cbc}'), + ('\u{ccd}', '\u{ccd}'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d4d}', '\u{d4d}'), + ('\u{dca}', '\u{dca}'), + ('\u{e3a}', '\u{e3a}'), + ('\u{e47}', '\u{e4c}'), + ('\u{e4e}', '\u{e4e}'), + ('\u{eba}', '\u{eba}'), + ('\u{ec8}', '\u{ecc}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('༾', '༿'), + ('\u{f82}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{fc6}', '\u{fc6}'), + ('\u{1037}', '\u{1037}'), + ('\u{1039}', '\u{103a}'), + ('ၣ', 'ၤ'), + ('ၩ', 'ၭ'), + ('ႇ', '\u{108d}'), + ('ႏ', 'ႏ'), + ('ႚ', 'ႛ'), + ('\u{135d}', '\u{135f}'), + ('\u{1714}', '\u{1715}'), + ('\u{1734}', '\u{1734}'), + ('\u{17c9}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{1939}', '\u{193b}'), + ('\u{1a60}', '\u{1a60}'), + ('\u{1a75}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1abe}'), + ('\u{1ac1}', '\u{1acb}'), + ('\u{1b34}', '\u{1b34}'), + ('\u{1b44}', '\u{1b44}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1baa}', '\u{1bab}'), + ('\u{1be6}', '\u{1be6}'), + ('\u{1bf2}', '\u{1bf3}'), + ('\u{1c36}', '\u{1c37}'), + ('ᱸ', 'ᱽ'), + ('\u{1cd0}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('᳷', '\u{1cf9}'), + ('ᴬ', 'ᵪ'), + ('\u{1dc4}', '\u{1dcf}'), + ('\u{1df5}', '\u{1dff}'), + ('᾽', '᾽'), + ('᾿', '῁'), + ('῍', '῏'), + ('῝', '῟'), + ('῭', '`'), + ('´', '῾'), + ('\u{2cef}', '\u{2cf1}'), + ('ⸯ', 'ⸯ'), + ('\u{302a}', '\u{302f}'), + ('\u{3099}', '゜'), + ('ー', 'ー'), + ('\u{a66f}', '\u{a66f}'), + ('\u{a67c}', '\u{a67d}'), + ('ꙿ', 'ꙿ'), + ('ꚜ', 'ꚝ'), + ('\u{a6f0}', '\u{a6f1}'), + ('꜀', '꜡'), + ('ꞈ', '꞊'), + ('ꟸ', 'ꟹ'), + ('\u{a806}', '\u{a806}'), + ('\u{a82c}', '\u{a82c}'), + ('\u{a8c4}', '\u{a8c4}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a92b}', '꤮'), + ('\u{a953}', '\u{a953}'), + ('\u{a9b3}', '\u{a9b3}'), + ('\u{a9c0}', '\u{a9c0}'), + ('\u{a9e5}', '\u{a9e5}'), + ('ꩻ', 'ꩽ'), + ('\u{aabf}', 'ꫂ'), + ('\u{aaf6}', '\u{aaf6}'), + ('꭛', 'ꭟ'), + ('ꭩ', '꭫'), + ('꯬', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe20}', '\u{fe2f}'), + ('^', '^'), + ('`', '`'), + ('ー', 'ー'), + ('\u{ff9e}', '\u{ff9f}'), + (' ̄', ' ̄'), + ('\u{102e0}', '\u{102e0}'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('𐴢', '\u{10d27}'), + ('𐵎', '𐵎'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10efd}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('\u{11046}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{110b9}', '\u{110ba}'), + ('\u{11133}', '\u{11134}'), + ('\u{11173}', '\u{11173}'), + ('\u{111c0}', '\u{111c0}'), + ('\u{111ca}', '\u{111cc}'), + ('\u{11235}', '\u{11236}'), + ('\u{112e9}', '\u{112ea}'), + ('\u{1133b}', '\u{1133c}'), + ('\u{1134d}', '\u{1134d}'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113ce}', '\u{113d0}'), + ('\u{113d2}', '𑏓'), + ('\u{113e1}', '\u{113e2}'), + ('\u{11442}', '\u{11442}'), + ('\u{11446}', '\u{11446}'), + ('\u{114c2}', '\u{114c3}'), + ('\u{115bf}', '\u{115c0}'), + ('\u{1163f}', '\u{1163f}'), + ('\u{116b6}', '\u{116b7}'), + ('\u{1172b}', '\u{1172b}'), + ('\u{11839}', '\u{1183a}'), + ('\u{1193d}', '\u{1193e}'), + ('\u{11943}', '\u{11943}'), + ('\u{119e0}', '\u{119e0}'), + ('\u{11a34}', '\u{11a34}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a99}', '\u{11a99}'), + ('\u{11c3f}', '\u{11c3f}'), + ('\u{11d42}', '\u{11d42}'), + ('\u{11d44}', '\u{11d45}'), + ('\u{11d97}', '\u{11d97}'), + ('\u{11f41}', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13447}', '\u{13455}'), + ('\u{1612f}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('𖵫', '𖵬'), + ('\u{16f8f}', '𖾟'), + ('\u{16ff0}', '\u{16ff1}'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d167}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('𞀰', '𞁭'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e946}'), + ('\u{1e948}', '\u{1e94a}'), +]; + +pub const EMOJI: &'static [(char, char)] = &[ + ('#', '#'), + ('*', '*'), + ('0', '9'), + ('©', '©'), + ('®', '®'), + ('‼', '‼'), + ('⁉', '⁉'), + ('™', '™'), + ('ℹ', 'ℹ'), + ('↔', '↙'), + ('↩', '↪'), + ('⌚', '⌛'), + ('⌨', '⌨'), + ('⏏', '⏏'), + ('⏩', '⏳'), + ('⏸', '⏺'), + ('Ⓜ', 'Ⓜ'), + ('▪', '▫'), + ('▶', '▶'), + ('◀', '◀'), + ('◻', '◾'), + ('☀', '☄'), + ('☎', '☎'), + ('☑', '☑'), + ('☔', '☕'), + ('☘', '☘'), + ('☝', '☝'), + ('☠', '☠'), + ('☢', '☣'), + ('☦', '☦'), + ('☪', '☪'), + ('☮', '☯'), + ('☸', '☺'), + ('♀', '♀'), + ('♂', '♂'), + ('♈', '♓'), + ('♟', '♠'), + ('♣', '♣'), + ('♥', '♦'), + ('♨', '♨'), + ('♻', '♻'), + ('♾', '♿'), + ('⚒', '⚗'), + ('⚙', '⚙'), + ('⚛', '⚜'), + ('⚠', '⚡'), + ('⚧', '⚧'), + ('⚪', '⚫'), + ('⚰', '⚱'), + ('⚽', '⚾'), + ('⛄', '⛅'), + ('⛈', '⛈'), + ('⛎', '⛏'), + ('⛑', '⛑'), + ('⛓', '⛔'), + ('⛩', '⛪'), + ('⛰', '⛵'), + ('⛷', '⛺'), + ('⛽', '⛽'), + ('✂', '✂'), + ('✅', '✅'), + ('✈', '✍'), + ('✏', '✏'), + ('✒', '✒'), + ('✔', '✔'), + ('✖', '✖'), + ('✝', '✝'), + ('✡', '✡'), + ('✨', '✨'), + ('✳', '✴'), + ('❄', '❄'), + ('❇', '❇'), + ('❌', '❌'), + ('❎', '❎'), + ('❓', '❕'), + ('❗', '❗'), + ('❣', '❤'), + ('➕', '➗'), + ('➡', '➡'), + ('➰', '➰'), + ('➿', '➿'), + ('⤴', '⤵'), + ('⬅', '⬇'), + ('⬛', '⬜'), + ('⭐', '⭐'), + ('⭕', '⭕'), + ('〰', '〰'), + ('〽', '〽'), + ('㊗', '㊗'), + ('㊙', '㊙'), + ('🀄', '🀄'), + ('🃏', '🃏'), + ('🅰', '🅱'), + ('🅾', '🅿'), + ('🆎', '🆎'), + ('🆑', '🆚'), + ('🇦', '🇿'), + ('🈁', '🈂'), + ('🈚', '🈚'), + ('🈯', '🈯'), + ('🈲', '🈺'), + ('🉐', '🉑'), + ('🌀', '🌡'), + ('🌤', '🎓'), + ('🎖', '🎗'), + ('🎙', '🎛'), + ('🎞', '🏰'), + ('🏳', '🏵'), + ('🏷', '📽'), + ('📿', '🔽'), + ('🕉', '🕎'), + ('🕐', '🕧'), + ('🕯', '🕰'), + ('🕳', '🕺'), + ('🖇', '🖇'), + ('🖊', '🖍'), + ('🖐', '🖐'), + ('🖕', '🖖'), + ('🖤', '🖥'), + ('🖨', '🖨'), + ('🖱', '🖲'), + ('🖼', '🖼'), + ('🗂', '🗄'), + ('🗑', '🗓'), + ('🗜', '🗞'), + ('🗡', '🗡'), + ('🗣', '🗣'), + ('🗨', '🗨'), + ('🗯', '🗯'), + ('🗳', '🗳'), + ('🗺', '🙏'), + ('🚀', '🛅'), + ('🛋', '🛒'), + ('🛕', '🛗'), + ('🛜', '🛥'), + ('🛩', '🛩'), + ('🛫', '🛬'), + ('🛰', '🛰'), + ('🛳', '🛼'), + ('🟠', '🟫'), + ('🟰', '🟰'), + ('🤌', '🤺'), + ('🤼', '🥅'), + ('🥇', '🧿'), + ('🩰', '🩼'), + ('🪀', '🪉'), + ('🪏', '🫆'), + ('🫎', '🫜'), + ('🫟', '🫩'), + ('🫰', '🫸'), +]; + +pub const EMOJI_COMPONENT: &'static [(char, char)] = &[ + ('#', '#'), + ('*', '*'), + ('0', '9'), + ('\u{200d}', '\u{200d}'), + ('\u{20e3}', '\u{20e3}'), + ('\u{fe0f}', '\u{fe0f}'), + ('🇦', '🇿'), + ('🏻', '🏿'), + ('🦰', '🦳'), + ('\u{e0020}', '\u{e007f}'), +]; + +pub const EMOJI_MODIFIER: &'static [(char, char)] = &[('🏻', '🏿')]; + +pub const EMOJI_MODIFIER_BASE: &'static [(char, char)] = &[ + ('☝', '☝'), + ('⛹', '⛹'), + ('✊', '✍'), + ('🎅', '🎅'), + ('🏂', '🏄'), + ('🏇', '🏇'), + ('🏊', '🏌'), + ('👂', '👃'), + ('👆', '👐'), + ('👦', '👸'), + ('👼', '👼'), + ('💁', '💃'), + ('💅', '💇'), + ('💏', '💏'), + ('💑', '💑'), + ('💪', '💪'), + ('🕴', '🕵'), + ('🕺', '🕺'), + ('🖐', '🖐'), + ('🖕', '🖖'), + ('🙅', '🙇'), + ('🙋', '🙏'), + ('🚣', '🚣'), + ('🚴', '🚶'), + ('🛀', '🛀'), + ('🛌', '🛌'), + ('🤌', '🤌'), + ('🤏', '🤏'), + ('🤘', '🤟'), + ('🤦', '🤦'), + ('🤰', '🤹'), + ('🤼', '🤾'), + ('🥷', '🥷'), + ('🦵', '🦶'), + ('🦸', '🦹'), + ('🦻', '🦻'), + ('🧍', '🧏'), + ('🧑', '🧝'), + ('🫃', '🫅'), + ('🫰', '🫸'), +]; + +pub const EMOJI_PRESENTATION: &'static [(char, char)] = &[ + ('⌚', '⌛'), + ('⏩', '⏬'), + ('⏰', '⏰'), + ('⏳', '⏳'), + ('◽', '◾'), + ('☔', '☕'), + ('♈', '♓'), + ('♿', '♿'), + ('⚓', '⚓'), + ('⚡', '⚡'), + ('⚪', '⚫'), + ('⚽', '⚾'), + ('⛄', '⛅'), + ('⛎', '⛎'), + ('⛔', '⛔'), + ('⛪', '⛪'), + ('⛲', '⛳'), + ('⛵', '⛵'), + ('⛺', '⛺'), + ('⛽', '⛽'), + ('✅', '✅'), + ('✊', '✋'), + ('✨', '✨'), + ('❌', '❌'), + ('❎', '❎'), + ('❓', '❕'), + ('❗', '❗'), + ('➕', '➗'), + ('➰', '➰'), + ('➿', '➿'), + ('⬛', '⬜'), + ('⭐', '⭐'), + ('⭕', '⭕'), + ('🀄', '🀄'), + ('🃏', '🃏'), + ('🆎', '🆎'), + ('🆑', '🆚'), + ('🇦', '🇿'), + ('🈁', '🈁'), + ('🈚', '🈚'), + ('🈯', '🈯'), + ('🈲', '🈶'), + ('🈸', '🈺'), + ('🉐', '🉑'), + ('🌀', '🌠'), + ('🌭', '🌵'), + ('🌷', '🍼'), + ('🍾', '🎓'), + ('🎠', '🏊'), + ('🏏', '🏓'), + ('🏠', '🏰'), + ('🏴', '🏴'), + ('🏸', '🐾'), + ('👀', '👀'), + ('👂', '📼'), + ('📿', '🔽'), + ('🕋', '🕎'), + ('🕐', '🕧'), + ('🕺', '🕺'), + ('🖕', '🖖'), + ('🖤', '🖤'), + ('🗻', '🙏'), + ('🚀', '🛅'), + ('🛌', '🛌'), + ('🛐', '🛒'), + ('🛕', '🛗'), + ('🛜', '🛟'), + ('🛫', '🛬'), + ('🛴', '🛼'), + ('🟠', '🟫'), + ('🟰', '🟰'), + ('🤌', '🤺'), + ('🤼', '🥅'), + ('🥇', '🧿'), + ('🩰', '🩼'), + ('🪀', '🪉'), + ('🪏', '🫆'), + ('🫎', '🫜'), + ('🫟', '🫩'), + ('🫰', '🫸'), +]; + +pub const EXTENDED_PICTOGRAPHIC: &'static [(char, char)] = &[ + ('©', '©'), + ('®', '®'), + ('‼', '‼'), + ('⁉', '⁉'), + ('™', '™'), + ('ℹ', 'ℹ'), + ('↔', '↙'), + ('↩', '↪'), + ('⌚', '⌛'), + ('⌨', '⌨'), + ('⎈', '⎈'), + ('⏏', '⏏'), + ('⏩', '⏳'), + ('⏸', '⏺'), + ('Ⓜ', 'Ⓜ'), + ('▪', '▫'), + ('▶', '▶'), + ('◀', '◀'), + ('◻', '◾'), + ('☀', '★'), + ('☇', '☒'), + ('☔', '⚅'), + ('⚐', '✅'), + ('✈', '✒'), + ('✔', '✔'), + ('✖', '✖'), + ('✝', '✝'), + ('✡', '✡'), + ('✨', '✨'), + ('✳', '✴'), + ('❄', '❄'), + ('❇', '❇'), + ('❌', '❌'), + ('❎', '❎'), + ('❓', '❕'), + ('❗', '❗'), + ('❣', '❧'), + ('➕', '➗'), + ('➡', '➡'), + ('➰', '➰'), + ('➿', '➿'), + ('⤴', '⤵'), + ('⬅', '⬇'), + ('⬛', '⬜'), + ('⭐', '⭐'), + ('⭕', '⭕'), + ('〰', '〰'), + ('〽', '〽'), + ('㊗', '㊗'), + ('㊙', '㊙'), + ('🀀', '\u{1f0ff}'), + ('🄍', '🄏'), + ('🄯', '🄯'), + ('🅬', '🅱'), + ('🅾', '🅿'), + ('🆎', '🆎'), + ('🆑', '🆚'), + ('🆭', '\u{1f1e5}'), + ('🈁', '\u{1f20f}'), + ('🈚', '🈚'), + ('🈯', '🈯'), + ('🈲', '🈺'), + ('\u{1f23c}', '\u{1f23f}'), + ('\u{1f249}', '🏺'), + ('🐀', '🔽'), + ('🕆', '🙏'), + ('🚀', '\u{1f6ff}'), + ('🝴', '🝿'), + ('🟕', '\u{1f7ff}'), + ('\u{1f80c}', '\u{1f80f}'), + ('\u{1f848}', '\u{1f84f}'), + ('\u{1f85a}', '\u{1f85f}'), + ('\u{1f888}', '\u{1f88f}'), + ('\u{1f8ae}', '\u{1f8ff}'), + ('🤌', '🤺'), + ('🤼', '🥅'), + ('🥇', '\u{1faff}'), + ('\u{1fc00}', '\u{1fffd}'), +]; + +pub const EXTENDER: &'static [(char, char)] = &[ + ('·', '·'), + ('ː', 'ˑ'), + ('ـ', 'ـ'), + ('ߺ', 'ߺ'), + ('\u{a71}', '\u{a71}'), + ('\u{afb}', '\u{afb}'), + ('\u{b55}', '\u{b55}'), + ('ๆ', 'ๆ'), + ('ໆ', 'ໆ'), + ('᠊', '᠊'), + ('ᡃ', 'ᡃ'), + ('ᪧ', 'ᪧ'), + ('\u{1c36}', '\u{1c36}'), + ('ᱻ', 'ᱻ'), + ('々', '々'), + ('〱', '〵'), + ('ゝ', 'ゞ'), + ('ー', 'ヾ'), + ('ꀕ', 'ꀕ'), + ('ꘌ', 'ꘌ'), + ('ꧏ', 'ꧏ'), + ('ꧦ', 'ꧦ'), + ('ꩰ', 'ꩰ'), + ('ꫝ', 'ꫝ'), + ('ꫳ', 'ꫴ'), + ('ー', 'ー'), + ('𐞁', '𐞂'), + ('𐵎', '𐵎'), + ('\u{10d6a}', '\u{10d6a}'), + ('𐵯', '𐵯'), + ('\u{11237}', '\u{11237}'), + ('𑍝', '𑍝'), + ('\u{113d2}', '𑏓'), + ('𑗆', '𑗈'), + ('\u{11a98}', '\u{11a98}'), + ('𖭂', '𖭃'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('𞄼', '𞄽'), + ('\u{1e5ef}', '\u{1e5ef}'), + ('\u{1e944}', '\u{1e946}'), +]; + +pub const GRAPHEME_BASE: &'static [(char, char)] = &[ + (' ', '~'), + ('\u{a0}', '¬'), + ('®', '˿'), + ('Ͱ', 'ͷ'), + ('ͺ', 'Ϳ'), + ('΄', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', '҂'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', '֊'), + ('֍', '֏'), + ('־', '־'), + ('׀', '׀'), + ('׃', '׃'), + ('׆', '׆'), + ('א', 'ת'), + ('ׯ', '״'), + ('؆', '؏'), + ('؛', '؛'), + ('؝', 'ي'), + ('٠', 'ٯ'), + ('ٱ', 'ە'), + ('۞', '۞'), + ('ۥ', 'ۦ'), + ('۩', '۩'), + ('ۮ', '܍'), + ('ܐ', 'ܐ'), + ('ܒ', 'ܯ'), + ('ݍ', 'ޥ'), + ('ޱ', 'ޱ'), + ('߀', 'ߪ'), + ('ߴ', 'ߺ'), + ('߾', 'ࠕ'), + ('ࠚ', 'ࠚ'), + ('ࠤ', 'ࠤ'), + ('ࠨ', 'ࠨ'), + ('࠰', '࠾'), + ('ࡀ', 'ࡘ'), + ('࡞', '࡞'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢎ'), + ('ࢠ', 'ࣉ'), + ('ः', 'ह'), + ('ऻ', 'ऻ'), + ('ऽ', 'ी'), + ('ॉ', 'ौ'), + ('ॎ', 'ॐ'), + ('क़', 'ॡ'), + ('।', 'ঀ'), + ('ং', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', 'ঽ'), + ('ি', 'ী'), + ('ে', 'ৈ'), + ('ো', 'ৌ'), + ('ৎ', 'ৎ'), + ('ড়', 'ঢ়'), + ('য়', 'ৡ'), + ('০', '৽'), + ('ਃ', 'ਃ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਾ', 'ੀ'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('੦', '੯'), + ('ੲ', 'ੴ'), + ('੶', '੶'), + ('ઃ', 'ઃ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', 'ી'), + ('ૉ', 'ૉ'), + ('ો', 'ૌ'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૡ'), + ('૦', '૱'), + ('ૹ', 'ૹ'), + ('ଂ', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', 'ଽ'), + ('ୀ', 'ୀ'), + ('େ', 'ୈ'), + ('ୋ', 'ୌ'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('୦', '୷'), + ('ஃ', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('ி', 'ி'), + ('ு', 'ூ'), + ('ெ', 'ை'), + ('ொ', 'ௌ'), + ('ௐ', 'ௐ'), + ('௦', '௺'), + ('ఁ', 'ః'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ఽ'), + ('ు', 'ౄ'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', 'ౡ'), + ('౦', '౯'), + ('౷', 'ಀ'), + ('ಂ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ಾ'), + ('ು', 'ು'), + ('ೃ', 'ೄ'), + ('ೝ', 'ೞ'), + ('ೠ', 'ೡ'), + ('೦', '೯'), + ('ೱ', 'ೳ'), + ('ം', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', 'ഽ'), + ('ി', 'ീ'), + ('െ', 'ൈ'), + ('ൊ', 'ൌ'), + ('ൎ', '൏'), + ('ൔ', 'ൖ'), + ('൘', 'ൡ'), + ('൦', 'ൿ'), + ('ං', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('ැ', 'ෑ'), + ('ෘ', 'ෞ'), + ('෦', '෯'), + ('ෲ', '෴'), + ('ก', 'ะ'), + ('า', 'ำ'), + ('฿', 'ๆ'), + ('๏', '๛'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ະ'), + ('າ', 'ຳ'), + ('ຽ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('໐', '໙'), + ('ໜ', 'ໟ'), + ('ༀ', '༗'), + ('༚', '༴'), + ('༶', '༶'), + ('༸', '༸'), + ('༺', 'ཇ'), + ('ཉ', 'ཬ'), + ('ཿ', 'ཿ'), + ('྅', '྅'), + ('ྈ', 'ྌ'), + ('྾', '࿅'), + ('࿇', '࿌'), + ('࿎', '࿚'), + ('က', 'ာ'), + ('ေ', 'ေ'), + ('း', 'း'), + ('ျ', 'ြ'), + ('ဿ', 'ၗ'), + ('ၚ', 'ၝ'), + ('ၡ', 'ၰ'), + ('ၵ', 'ႁ'), + ('ႃ', 'ႄ'), + ('ႇ', 'ႌ'), + ('ႎ', 'ႜ'), + ('႞', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('፠', '፼'), + ('ᎀ', '᎙'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('᐀', '᚜'), + ('ᚠ', 'ᛸ'), + ('ᜀ', 'ᜑ'), + ('ᜟ', 'ᜱ'), + ('᜵', '᜶'), + ('ᝀ', 'ᝑ'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('ក', 'ឳ'), + ('ា', 'ា'), + ('ើ', 'ៅ'), + ('ះ', 'ៈ'), + ('។', 'ៜ'), + ('០', '៩'), + ('៰', '៹'), + ('᠀', '᠊'), + ('᠐', '᠙'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢄ'), + ('ᢇ', 'ᢨ'), + ('ᢪ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('ᤣ', 'ᤦ'), + ('ᤩ', 'ᤫ'), + ('ᤰ', 'ᤱ'), + ('ᤳ', 'ᤸ'), + ('᥀', '᥀'), + ('᥄', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('᧐', '᧚'), + ('᧞', 'ᨖ'), + ('ᨙ', 'ᨚ'), + ('᨞', 'ᩕ'), + ('ᩗ', 'ᩗ'), + ('ᩡ', 'ᩡ'), + ('ᩣ', 'ᩤ'), + ('ᩭ', 'ᩲ'), + ('᪀', '᪉'), + ('᪐', '᪙'), + ('᪠', '᪭'), + ('ᬄ', 'ᬳ'), + ('ᬾ', 'ᭁ'), + ('ᭅ', 'ᭌ'), + ('᭎', '᭪'), + ('᭴', '᭿'), + ('ᮂ', 'ᮡ'), + ('ᮦ', 'ᮧ'), + ('ᮮ', 'ᯥ'), + ('ᯧ', 'ᯧ'), + ('ᯪ', 'ᯬ'), + ('ᯮ', 'ᯮ'), + ('᯼', 'ᰫ'), + ('ᰴ', 'ᰵ'), + ('᰻', '᱉'), + ('ᱍ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', '᳇'), + ('᳓', '᳓'), + ('᳡', '᳡'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', '᳷'), + ('ᳺ', 'ᳺ'), + ('ᴀ', 'ᶿ'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ῄ'), + ('ῆ', 'ΐ'), + ('ῖ', 'Ί'), + ('῝', '`'), + ('ῲ', 'ῴ'), + ('ῶ', '῾'), + ('\u{2000}', '\u{200a}'), + ('‐', '‧'), + ('\u{202f}', '\u{205f}'), + ('⁰', 'ⁱ'), + ('⁴', '₎'), + ('ₐ', 'ₜ'), + ('₠', '⃀'), + ('℀', '↋'), + ('←', '␩'), + ('⑀', '⑊'), + ('①', '⭳'), + ('⭶', '⮕'), + ('⮗', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('⳹', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', '⵰'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('⸀', '⹝'), + ('⺀', '⺙'), + ('⺛', '⻳'), + ('⼀', '⿕'), + ('⿰', '〩'), + ('〰', '〿'), + ('ぁ', 'ゖ'), + ('゛', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('㆐', '㇥'), + ('㇯', '㈞'), + ('㈠', 'ꒌ'), + ('꒐', '꓆'), + ('ꓐ', 'ꘫ'), + ('Ꙁ', 'ꙮ'), + ('꙳', '꙳'), + ('꙾', 'ꚝ'), + ('ꚠ', 'ꛯ'), + ('꛲', '꛷'), + ('꜀', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠁ'), + ('ꠃ', 'ꠅ'), + ('ꠇ', 'ꠊ'), + ('ꠌ', 'ꠤ'), + ('ꠧ', '꠫'), + ('꠰', '꠹'), + ('ꡀ', '꡷'), + ('ꢀ', 'ꣃ'), + ('꣎', '꣙'), + ('ꣲ', 'ꣾ'), + ('꤀', 'ꤥ'), + ('꤮', 'ꥆ'), + ('ꥒ', 'ꥒ'), + ('꥟', 'ꥼ'), + ('ꦃ', 'ꦲ'), + ('ꦴ', 'ꦵ'), + ('ꦺ', 'ꦻ'), + ('ꦾ', 'ꦿ'), + ('꧁', '꧍'), + ('ꧏ', '꧙'), + ('꧞', 'ꧤ'), + ('ꧦ', 'ꧾ'), + ('ꨀ', 'ꨨ'), + ('ꨯ', 'ꨰ'), + ('ꨳ', 'ꨴ'), + ('ꩀ', 'ꩂ'), + ('ꩄ', 'ꩋ'), + ('ꩍ', 'ꩍ'), + ('꩐', '꩙'), + ('꩜', 'ꩻ'), + ('ꩽ', 'ꪯ'), + ('ꪱ', 'ꪱ'), + ('ꪵ', 'ꪶ'), + ('ꪹ', 'ꪽ'), + ('ꫀ', 'ꫀ'), + ('ꫂ', 'ꫂ'), + ('ꫛ', 'ꫫ'), + ('ꫮ', 'ꫵ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', '꭫'), + ('ꭰ', 'ꯤ'), + ('ꯦ', 'ꯧ'), + ('ꯩ', '꯬'), + ('꯰', '꯹'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'יִ'), + ('ײַ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', '﯂'), + ('ﯓ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('﷏', '﷏'), + ('ﷰ', '﷿'), + ('︐', '︙'), + ('︰', '﹒'), + ('﹔', '﹦'), + ('﹨', '﹫'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('!', 'ン'), + ('ᅠ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('¢', '₩'), + ('│', '○'), + ('', '�'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐄀', '𐄂'), + ('𐄇', '𐄳'), + ('𐄷', '𐆎'), + ('𐆐', '𐆜'), + ('𐆠', '𐆠'), + ('𐇐', '𐇼'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐋡', '𐋻'), + ('𐌀', '𐌣'), + ('𐌭', '𐍊'), + ('𐍐', '𐍵'), + ('𐎀', '𐎝'), + ('𐎟', '𐏃'), + ('𐏈', '𐏕'), + ('𐐀', '𐒝'), + ('𐒠', '𐒩'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕯', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡗', '𐢞'), + ('𐢧', '𐢯'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐣻', '𐤛'), + ('𐤟', '𐤹'), + ('𐤿', '𐤿'), + ('𐦀', '𐦷'), + ('𐦼', '𐧏'), + ('𐧒', '𐨀'), + ('𐨐', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩀', '𐩈'), + ('𐩐', '𐩘'), + ('𐩠', '𐪟'), + ('𐫀', '𐫤'), + ('𐫫', '𐫶'), + ('𐬀', '𐬵'), + ('𐬹', '𐭕'), + ('𐭘', '𐭲'), + ('𐭸', '𐮑'), + ('𐮙', '𐮜'), + ('𐮩', '𐮯'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐳺', '𐴣'), + ('𐴰', '𐴹'), + ('𐵀', '𐵥'), + ('𐵮', '𐶅'), + ('𐶎', '𐶏'), + ('𐹠', '𐹾'), + ('𐺀', '𐺩'), + ('𐺭', '𐺭'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('𐼀', '𐼧'), + ('𐼰', '𐽅'), + ('𐽑', '𐽙'), + ('𐽰', '𐾁'), + ('𐾆', '𐾉'), + ('𐾰', '𐿋'), + ('𐿠', '𐿶'), + ('𑀀', '𑀀'), + ('𑀂', '𑀷'), + ('𑁇', '𑁍'), + ('𑁒', '𑁯'), + ('𑁱', '𑁲'), + ('𑁵', '𑁵'), + ('𑂂', '𑂲'), + ('𑂷', '𑂸'), + ('𑂻', '𑂼'), + ('𑂾', '𑃁'), + ('𑃐', '𑃨'), + ('𑃰', '𑃹'), + ('𑄃', '𑄦'), + ('𑄬', '𑄬'), + ('𑄶', '𑅇'), + ('𑅐', '𑅲'), + ('𑅴', '𑅶'), + ('𑆂', '𑆵'), + ('𑆿', '𑆿'), + ('𑇁', '𑇈'), + ('𑇍', '𑇎'), + ('𑇐', '𑇟'), + ('𑇡', '𑇴'), + ('𑈀', '𑈑'), + ('𑈓', '𑈮'), + ('𑈲', '𑈳'), + ('𑈸', '𑈽'), + ('𑈿', '𑉀'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊩'), + ('𑊰', '𑋞'), + ('𑋠', '𑋢'), + ('𑋰', '𑋹'), + ('𑌂', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑌽'), + ('𑌿', '𑌿'), + ('𑍁', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '𑍌'), + ('𑍐', '𑍐'), + ('𑍝', '𑍣'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '𑎷'), + ('𑎹', '𑎺'), + ('𑏊', '𑏊'), + ('𑏌', '𑏍'), + ('𑏑', '𑏑'), + ('𑏓', '𑏕'), + ('𑏗', '𑏘'), + ('𑐀', '𑐷'), + ('𑑀', '𑑁'), + ('𑑅', '𑑅'), + ('𑑇', '𑑛'), + ('𑑝', '𑑝'), + ('𑑟', '𑑡'), + ('𑒀', '𑒯'), + ('𑒱', '𑒲'), + ('𑒹', '𑒹'), + ('𑒻', '𑒼'), + ('𑒾', '𑒾'), + ('𑓁', '𑓁'), + ('𑓄', '𑓇'), + ('𑓐', '𑓙'), + ('𑖀', '𑖮'), + ('𑖰', '𑖱'), + ('𑖸', '𑖻'), + ('𑖾', '𑖾'), + ('𑗁', '𑗛'), + ('𑘀', '𑘲'), + ('𑘻', '𑘼'), + ('𑘾', '𑘾'), + ('𑙁', '𑙄'), + ('𑙐', '𑙙'), + ('𑙠', '𑙬'), + ('𑚀', '𑚪'), + ('𑚬', '𑚬'), + ('𑚮', '𑚯'), + ('𑚸', '𑚹'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜀', '𑜚'), + ('𑜞', '𑜞'), + ('𑜠', '𑜡'), + ('𑜦', '𑜦'), + ('𑜰', '𑝆'), + ('𑠀', '𑠮'), + ('𑠸', '𑠸'), + ('𑠻', '𑠻'), + ('𑢠', '𑣲'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤯'), + ('𑤱', '𑤵'), + ('𑤷', '𑤸'), + ('𑤿', '𑥂'), + ('𑥄', '𑥆'), + ('𑥐', '𑥙'), + ('𑦠', '𑦧'), + ('𑦪', '𑧓'), + ('𑧜', '𑧟'), + ('𑧡', '𑧤'), + ('𑨀', '𑨀'), + ('𑨋', '𑨲'), + ('𑨹', '𑨺'), + ('𑨿', '𑩆'), + ('𑩐', '𑩐'), + ('𑩗', '𑩘'), + ('𑩜', '𑪉'), + ('𑪗', '𑪗'), + ('𑪚', '𑪢'), + ('𑪰', '𑫸'), + ('𑬀', '𑬉'), + ('𑯀', '𑯡'), + ('𑯰', '𑯹'), + ('𑰀', '𑰈'), + ('𑰊', '𑰯'), + ('𑰾', '𑰾'), + ('𑱀', '𑱅'), + ('𑱐', '𑱬'), + ('𑱰', '𑲏'), + ('𑲩', '𑲩'), + ('𑲱', '𑲱'), + ('𑲴', '𑲴'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '𑴰'), + ('𑵆', '𑵆'), + ('𑵐', '𑵙'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('𑶓', '𑶔'), + ('𑶖', '𑶖'), + ('𑶘', '𑶘'), + ('𑶠', '𑶩'), + ('𑻠', '𑻲'), + ('𑻵', '𑻸'), + ('𑼂', '𑼐'), + ('𑼒', '𑼵'), + ('𑼾', '𑼿'), + ('𑽃', '𑽙'), + ('𑾰', '𑾰'), + ('𑿀', '𑿱'), + ('𑿿', '𒎙'), + ('𒐀', '𒑮'), + ('𒑰', '𒑴'), + ('𒒀', '𒕃'), + ('𒾐', '𒿲'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄝'), + ('𖄪', '𖄬'), + ('𖄰', '𖄹'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩠', '𖩩'), + ('𖩮', '𖪾'), + ('𖫀', '𖫉'), + ('𖫐', '𖫭'), + ('𖫵', '𖫵'), + ('𖬀', '𖬯'), + ('𖬷', '𖭅'), + ('𖭐', '𖭙'), + ('𖭛', '𖭡'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵹'), + ('𖹀', '𖺚'), + ('𖼀', '𖽊'), + ('𖽐', '𖾇'), + ('𖾓', '𖾟'), + ('𖿠', '𖿣'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𛲜', '𛲜'), + ('𛲟', '𛲟'), + ('𜰀', '𜳹'), + ('𜴀', '𜺳'), + ('𜽐', '𜿃'), + ('𝀀', '𝃵'), + ('𝄀', '𝄦'), + ('𝄩', '𝅘𝅥𝅲'), + ('𝅪', '𝅬'), + ('𝆃', '𝆄'), + ('𝆌', '𝆩'), + ('𝆮', '𝇪'), + ('𝈀', '𝉁'), + ('𝉅', '𝉅'), + ('𝋀', '𝋓'), + ('𝋠', '𝋳'), + ('𝌀', '𝍖'), + ('𝍠', '𝍸'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝟋'), + ('𝟎', '𝧿'), + ('𝨷', '𝨺'), + ('𝩭', '𝩴'), + ('𝩶', '𝪃'), + ('𝪅', '𝪋'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞄀', '𞄬'), + ('𞄷', '𞄽'), + ('𞅀', '𞅉'), + ('𞅎', '𞅏'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞋰', '𞋹'), + ('𞋿', '𞋿'), + ('𞓐', '𞓫'), + ('𞓰', '𞓹'), + ('𞗐', '𞗭'), + ('𞗰', '𞗺'), + ('𞗿', '𞗿'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞣇', '𞣏'), + ('𞤀', '𞥃'), + ('𞥋', '𞥋'), + ('𞥐', '𞥙'), + ('𞥞', '𞥟'), + ('𞱱', '𞲴'), + ('𞴁', '𞴽'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𞻰', '𞻱'), + ('🀀', '🀫'), + ('🀰', '🂓'), + ('🂠', '🂮'), + ('🂱', '🂿'), + ('🃁', '🃏'), + ('🃑', '🃵'), + ('🄀', '🆭'), + ('🇦', '🈂'), + ('🈐', '🈻'), + ('🉀', '🉈'), + ('🉐', '🉑'), + ('🉠', '🉥'), + ('🌀', '🛗'), + ('🛜', '🛬'), + ('🛰', '🛼'), + ('🜀', '🝶'), + ('🝻', '🟙'), + ('🟠', '🟫'), + ('🟰', '🟰'), + ('🠀', '🠋'), + ('🠐', '🡇'), + ('🡐', '🡙'), + ('🡠', '🢇'), + ('🢐', '🢭'), + ('🢰', '🢻'), + ('🣀', '🣁'), + ('🤀', '🩓'), + ('🩠', '🩭'), + ('🩰', '🩼'), + ('🪀', '🪉'), + ('🪏', '🫆'), + ('🫎', '🫜'), + ('🫟', '🫩'), + ('🫰', '🫸'), + ('🬀', '🮒'), + ('🮔', '🯹'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const GRAPHEME_EXTEND: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{483}', '\u{489}'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6df}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', '\u{7f3}'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{819}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('\u{897}', '\u{89f}'), + ('\u{8ca}', '\u{8e1}'), + ('\u{8e3}', '\u{902}'), + ('\u{93a}', '\u{93a}'), + ('\u{93c}', '\u{93c}'), + ('\u{941}', '\u{948}'), + ('\u{94d}', '\u{94d}'), + ('\u{951}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('\u{981}', '\u{981}'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9be}', '\u{9be}'), + ('\u{9c1}', '\u{9c4}'), + ('\u{9cd}', '\u{9cd}'), + ('\u{9d7}', '\u{9d7}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', '\u{a02}'), + ('\u{a3c}', '\u{a3c}'), + ('\u{a41}', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', '\u{a82}'), + ('\u{abc}', '\u{abc}'), + ('\u{ac1}', '\u{ac5}'), + ('\u{ac7}', '\u{ac8}'), + ('\u{acd}', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{aff}'), + ('\u{b01}', '\u{b01}'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3e}', '\u{b3f}'), + ('\u{b41}', '\u{b44}'), + ('\u{b4d}', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bbe}', '\u{bbe}'), + ('\u{bc0}', '\u{bc0}'), + ('\u{bcd}', '\u{bcd}'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', '\u{c00}'), + ('\u{c04}', '\u{c04}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', '\u{c40}'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', '\u{c81}'), + ('\u{cbc}', '\u{cbc}'), + ('\u{cbf}', '\u{cc0}'), + ('\u{cc2}', '\u{cc2}'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{ce2}', '\u{ce3}'), + ('\u{d00}', '\u{d01}'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d3e}', '\u{d3e}'), + ('\u{d41}', '\u{d44}'), + ('\u{d4d}', '\u{d4d}'), + ('\u{d57}', '\u{d57}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', '\u{d81}'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dcf}'), + ('\u{dd2}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('\u{ddf}', '\u{ddf}'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e47}', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('\u{f71}', '\u{f7e}'), + ('\u{f80}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('\u{102d}', '\u{1030}'), + ('\u{1032}', '\u{1037}'), + ('\u{1039}', '\u{103a}'), + ('\u{103d}', '\u{103e}'), + ('\u{1058}', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{1082}'), + ('\u{1085}', '\u{1086}'), + ('\u{108d}', '\u{108d}'), + ('\u{109d}', '\u{109d}'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1715}'), + ('\u{1732}', '\u{1734}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17b5}'), + ('\u{17b7}', '\u{17bd}'), + ('\u{17c6}', '\u{17c6}'), + ('\u{17c9}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', '\u{1922}'), + ('\u{1927}', '\u{1928}'), + ('\u{1932}', '\u{1932}'), + ('\u{1939}', '\u{193b}'), + ('\u{1a17}', '\u{1a18}'), + ('\u{1a1b}', '\u{1a1b}'), + ('\u{1a56}', '\u{1a56}'), + ('\u{1a58}', '\u{1a5e}'), + ('\u{1a60}', '\u{1a60}'), + ('\u{1a62}', '\u{1a62}'), + ('\u{1a65}', '\u{1a6c}'), + ('\u{1a73}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', '\u{1b03}'), + ('\u{1b34}', '\u{1b3d}'), + ('\u{1b42}', '\u{1b44}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1b81}'), + ('\u{1ba2}', '\u{1ba5}'), + ('\u{1ba8}', '\u{1bad}'), + ('\u{1be6}', '\u{1be6}'), + ('\u{1be8}', '\u{1be9}'), + ('\u{1bed}', '\u{1bed}'), + ('\u{1bef}', '\u{1bf3}'), + ('\u{1c2c}', '\u{1c33}'), + ('\u{1c36}', '\u{1c37}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce0}'), + ('\u{1ce2}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{200c}', '\u{200c}'), + ('\u{20d0}', '\u{20f0}'), + ('\u{2cef}', '\u{2cf1}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('\u{302a}', '\u{302f}'), + ('\u{3099}', '\u{309a}'), + ('\u{a66f}', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('\u{a825}', '\u{a826}'), + ('\u{a82c}', '\u{a82c}'), + ('\u{a8c4}', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a951}'), + ('\u{a953}', '\u{a953}'), + ('\u{a980}', '\u{a982}'), + ('\u{a9b3}', '\u{a9b3}'), + ('\u{a9b6}', '\u{a9b9}'), + ('\u{a9bc}', '\u{a9bd}'), + ('\u{a9c0}', '\u{a9c0}'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa2e}'), + ('\u{aa31}', '\u{aa32}'), + ('\u{aa35}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', '\u{aa4c}'), + ('\u{aa7c}', '\u{aa7c}'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('\u{aaec}', '\u{aaed}'), + ('\u{aaf6}', '\u{aaf6}'), + ('\u{abe5}', '\u{abe5}'), + ('\u{abe8}', '\u{abe8}'), + ('\u{abed}', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('\u{ff9e}', '\u{ff9f}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('\u{11001}', '\u{11001}'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '\u{11081}'), + ('\u{110b3}', '\u{110b6}'), + ('\u{110b9}', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{1112b}'), + ('\u{1112d}', '\u{11134}'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '\u{11181}'), + ('\u{111b6}', '\u{111be}'), + ('\u{111c0}', '\u{111c0}'), + ('\u{111c9}', '\u{111cc}'), + ('\u{111cf}', '\u{111cf}'), + ('\u{1122f}', '\u{11231}'), + ('\u{11234}', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112df}'), + ('\u{112e3}', '\u{112ea}'), + ('\u{11300}', '\u{11301}'), + ('\u{1133b}', '\u{1133c}'), + ('\u{1133e}', '\u{1133e}'), + ('\u{11340}', '\u{11340}'), + ('\u{1134d}', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113b8}', '\u{113b8}'), + ('\u{113bb}', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '\u{113c9}'), + ('\u{113ce}', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('\u{11438}', '\u{1143f}'), + ('\u{11442}', '\u{11444}'), + ('\u{11446}', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b0}', '\u{114b0}'), + ('\u{114b3}', '\u{114b8}'), + ('\u{114ba}', '\u{114ba}'), + ('\u{114bd}', '\u{114bd}'), + ('\u{114bf}', '\u{114c0}'), + ('\u{114c2}', '\u{114c3}'), + ('\u{115af}', '\u{115af}'), + ('\u{115b2}', '\u{115b5}'), + ('\u{115bc}', '\u{115bd}'), + ('\u{115bf}', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('\u{11633}', '\u{1163a}'), + ('\u{1163d}', '\u{1163d}'), + ('\u{1163f}', '\u{11640}'), + ('\u{116ab}', '\u{116ab}'), + ('\u{116ad}', '\u{116ad}'), + ('\u{116b0}', '\u{116b7}'), + ('\u{1171d}', '\u{1171d}'), + ('\u{1171f}', '\u{1171f}'), + ('\u{11722}', '\u{11725}'), + ('\u{11727}', '\u{1172b}'), + ('\u{1182f}', '\u{11837}'), + ('\u{11839}', '\u{1183a}'), + ('\u{11930}', '\u{11930}'), + ('\u{1193b}', '\u{1193e}'), + ('\u{11943}', '\u{11943}'), + ('\u{119d4}', '\u{119d7}'), + ('\u{119da}', '\u{119db}'), + ('\u{119e0}', '\u{119e0}'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '\u{11a38}'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a56}'), + ('\u{11a59}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a96}'), + ('\u{11a98}', '\u{11a99}'), + ('\u{11c30}', '\u{11c36}'), + ('\u{11c38}', '\u{11c3d}'), + ('\u{11c3f}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('\u{11caa}', '\u{11cb0}'), + ('\u{11cb2}', '\u{11cb3}'), + ('\u{11cb5}', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('\u{11d90}', '\u{11d91}'), + ('\u{11d95}', '\u{11d95}'), + ('\u{11d97}', '\u{11d97}'), + ('\u{11ef3}', '\u{11ef4}'), + ('\u{11f00}', '\u{11f01}'), + ('\u{11f36}', '\u{11f3a}'), + ('\u{11f40}', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13440}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{16129}'), + ('\u{1612d}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('\u{16f4f}', '\u{16f4f}'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e4ec}', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e94a}'), + ('\u{e0020}', '\u{e007f}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const GRAPHEME_LINK: &'static [(char, char)] = &[ + ('\u{94d}', '\u{94d}'), + ('\u{9cd}', '\u{9cd}'), + ('\u{a4d}', '\u{a4d}'), + ('\u{acd}', '\u{acd}'), + ('\u{b4d}', '\u{b4d}'), + ('\u{bcd}', '\u{bcd}'), + ('\u{c4d}', '\u{c4d}'), + ('\u{ccd}', '\u{ccd}'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d4d}', '\u{d4d}'), + ('\u{dca}', '\u{dca}'), + ('\u{e3a}', '\u{e3a}'), + ('\u{eba}', '\u{eba}'), + ('\u{f84}', '\u{f84}'), + ('\u{1039}', '\u{103a}'), + ('\u{1714}', '\u{1715}'), + ('\u{1734}', '\u{1734}'), + ('\u{17d2}', '\u{17d2}'), + ('\u{1a60}', '\u{1a60}'), + ('\u{1b44}', '\u{1b44}'), + ('\u{1baa}', '\u{1bab}'), + ('\u{1bf2}', '\u{1bf3}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{a806}', '\u{a806}'), + ('\u{a82c}', '\u{a82c}'), + ('\u{a8c4}', '\u{a8c4}'), + ('\u{a953}', '\u{a953}'), + ('\u{a9c0}', '\u{a9c0}'), + ('\u{aaf6}', '\u{aaf6}'), + ('\u{abed}', '\u{abed}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{11046}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{1107f}', '\u{1107f}'), + ('\u{110b9}', '\u{110b9}'), + ('\u{11133}', '\u{11134}'), + ('\u{111c0}', '\u{111c0}'), + ('\u{11235}', '\u{11235}'), + ('\u{112ea}', '\u{112ea}'), + ('\u{1134d}', '\u{1134d}'), + ('\u{113ce}', '\u{113d0}'), + ('\u{11442}', '\u{11442}'), + ('\u{114c2}', '\u{114c2}'), + ('\u{115bf}', '\u{115bf}'), + ('\u{1163f}', '\u{1163f}'), + ('\u{116b6}', '\u{116b6}'), + ('\u{1172b}', '\u{1172b}'), + ('\u{11839}', '\u{11839}'), + ('\u{1193d}', '\u{1193e}'), + ('\u{119e0}', '\u{119e0}'), + ('\u{11a34}', '\u{11a34}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a99}', '\u{11a99}'), + ('\u{11c3f}', '\u{11c3f}'), + ('\u{11d44}', '\u{11d45}'), + ('\u{11d97}', '\u{11d97}'), + ('\u{11f41}', '\u{11f42}'), + ('\u{1612f}', '\u{1612f}'), +]; + +pub const HEX_DIGIT: &'static [(char, char)] = &[ + ('0', '9'), + ('A', 'F'), + ('a', 'f'), + ('0', '9'), + ('A', 'F'), + ('a', 'f'), +]; + +pub const HYPHEN: &'static [(char, char)] = &[ + ('-', '-'), + ('\u{ad}', '\u{ad}'), + ('֊', '֊'), + ('᠆', '᠆'), + ('‐', '‑'), + ('⸗', '⸗'), + ('・', '・'), + ('﹣', '﹣'), + ('-', '-'), + ('・', '・'), +]; + +pub const IDS_BINARY_OPERATOR: &'static [(char, char)] = + &[('⿰', '⿱'), ('⿴', '⿽'), ('㇯', '㇯')]; + +pub const IDS_TRINARY_OPERATOR: &'static [(char, char)] = &[('⿲', '⿳')]; + +pub const IDS_UNARY_OPERATOR: &'static [(char, char)] = &[('⿾', '⿿')]; + +pub const ID_COMPAT_MATH_CONTINUE: &'static [(char, char)] = &[ + ('²', '³'), + ('¹', '¹'), + ('⁰', '⁰'), + ('⁴', '⁾'), + ('₀', '₎'), + ('∂', '∂'), + ('∇', '∇'), + ('∞', '∞'), + ('𝛁', '𝛁'), + ('𝛛', '𝛛'), + ('𝛻', '𝛻'), + ('𝜕', '𝜕'), + ('𝜵', '𝜵'), + ('𝝏', '𝝏'), + ('𝝯', '𝝯'), + ('𝞉', '𝞉'), + ('𝞩', '𝞩'), + ('𝟃', '𝟃'), +]; + +pub const ID_COMPAT_MATH_START: &'static [(char, char)] = &[ + ('∂', '∂'), + ('∇', '∇'), + ('∞', '∞'), + ('𝛁', '𝛁'), + ('𝛛', '𝛛'), + ('𝛻', '𝛻'), + ('𝜕', '𝜕'), + ('𝜵', '𝜵'), + ('𝝏', '𝝏'), + ('𝝯', '𝝯'), + ('𝞉', '𝞉'), + ('𝞩', '𝞩'), + ('𝟃', '𝟃'), +]; + +pub const ID_CONTINUE: &'static [(char, char)] = &[ + ('0', '9'), + ('A', 'Z'), + ('_', '_'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('·', '·'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('\u{300}', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('\u{483}', '\u{487}'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', 'ՙ'), + ('ՠ', 'ֈ'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('\u{610}', '\u{61a}'), + ('ؠ', '٩'), + ('ٮ', 'ۓ'), + ('ە', '\u{6dc}'), + ('\u{6df}', '\u{6e8}'), + ('\u{6ea}', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', '\u{74a}'), + ('ݍ', 'ޱ'), + ('߀', 'ߵ'), + ('ߺ', 'ߺ'), + ('\u{7fd}', '\u{7fd}'), + ('ࠀ', '\u{82d}'), + ('ࡀ', '\u{85b}'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('\u{897}', '\u{8e1}'), + ('\u{8e3}', '\u{963}'), + ('०', '९'), + ('ॱ', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('\u{9bc}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', 'ৎ'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', '\u{9e3}'), + ('০', 'ৱ'), + ('ৼ', 'ৼ'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', 'ਃ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('੦', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('\u{abc}', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('ૐ', 'ૐ'), + ('ૠ', '\u{ae3}'), + ('૦', '૯'), + ('ૹ', '\u{aff}'), + ('\u{b01}', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('\u{b3c}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', '\u{b63}'), + ('୦', '୯'), + ('ୱ', 'ୱ'), + ('\u{b82}', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('ௐ', 'ௐ'), + ('\u{bd7}', '\u{bd7}'), + ('௦', '௯'), + ('\u{c00}', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('\u{c3c}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', '\u{c63}'), + ('౦', '౯'), + ('ಀ', 'ಃ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('\u{cbc}', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('ೝ', 'ೞ'), + ('ೠ', '\u{ce3}'), + ('೦', '೯'), + ('ೱ', 'ೳ'), + ('\u{d00}', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', 'ൎ'), + ('ൔ', '\u{d57}'), + ('ൟ', '\u{d63}'), + ('൦', '൯'), + ('ൺ', 'ൿ'), + ('\u{d81}', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('෦', '෯'), + ('ෲ', 'ෳ'), + ('ก', '\u{e3a}'), + ('เ', '\u{e4e}'), + ('๐', '๙'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('\u{ec8}', '\u{ece}'), + ('໐', '໙'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('\u{f18}', '\u{f19}'), + ('༠', '༩'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('༾', 'ཇ'), + ('ཉ', 'ཬ'), + ('\u{f71}', '\u{f84}'), + ('\u{f86}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('က', '၉'), + ('ၐ', '\u{109d}'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('\u{135d}', '\u{135f}'), + ('፩', '፱'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', '\u{1715}'), + ('ᜟ', '\u{1734}'), + ('ᝀ', '\u{1753}'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('\u{1772}', '\u{1773}'), + ('ក', '\u{17d3}'), + ('ៗ', 'ៗ'), + ('ៜ', '\u{17dd}'), + ('០', '៩'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '᠙'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('᥆', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('᧐', '᧚'), + ('ᨀ', '\u{1a1b}'), + ('ᨠ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '᪉'), + ('᪐', '᪙'), + ('ᪧ', 'ᪧ'), + ('\u{1ab0}', '\u{1abd}'), + ('\u{1abf}', '\u{1ace}'), + ('\u{1b00}', 'ᭌ'), + ('᭐', '᭙'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1bf3}'), + ('ᰀ', '\u{1c37}'), + ('᱀', '᱉'), + ('ᱍ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', 'ᳺ'), + ('ᴀ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('\u{200c}', '\u{200d}'), + ('‿', '⁀'), + ('⁔', '⁔'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('\u{20d0}', '\u{20dc}'), + ('\u{20e1}', '\u{20e1}'), + ('\u{20e5}', '\u{20f0}'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('℘', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('\u{2d7f}', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('\u{2de0}', '\u{2dff}'), + ('々', '〇'), + ('〡', '\u{302f}'), + ('〱', '〵'), + ('〸', '〼'), + ('ぁ', 'ゖ'), + ('\u{3099}', 'ゟ'), + ('ァ', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘫ'), + ('Ꙁ', '\u{a66f}'), + ('\u{a674}', '\u{a67d}'), + ('ꙿ', '\u{a6f1}'), + ('ꜗ', 'ꜟ'), + ('Ꜣ', 'ꞈ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠧ'), + ('\u{a82c}', '\u{a82c}'), + ('ꡀ', 'ꡳ'), + ('ꢀ', '\u{a8c5}'), + ('꣐', '꣙'), + ('\u{a8e0}', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', '\u{a92d}'), + ('ꤰ', '\u{a953}'), + ('ꥠ', 'ꥼ'), + ('\u{a980}', '\u{a9c0}'), + ('ꧏ', '꧙'), + ('ꧠ', 'ꧾ'), + ('ꨀ', '\u{aa36}'), + ('ꩀ', 'ꩍ'), + ('꩐', '꩙'), + ('ꩠ', 'ꩶ'), + ('ꩺ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫯ'), + ('ꫲ', '\u{aaf6}'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꯪ'), + ('꯬', '\u{abed}'), + ('꯰', '꯹'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('︳', '︴'), + ('﹍', '﹏'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('0', '9'), + ('A', 'Z'), + ('_', '_'), + ('a', 'z'), + ('・', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('\u{101fd}', '\u{101fd}'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('\u{102e0}', '\u{102e0}'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '\u{1037a}'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐐀', '𐒝'), + ('𐒠', '𐒩'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '\u{10ae6}'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '\u{10d27}'), + ('𐴰', '𐴹'), + ('𐵀', '𐵥'), + ('\u{10d69}', '\u{10d6d}'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('\u{10eab}', '\u{10eac}'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('\u{10efc}', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '\u{10f50}'), + ('𐽰', '\u{10f85}'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀀', '\u{11046}'), + ('𑁦', '𑁵'), + ('\u{1107f}', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('𑃐', '𑃨'), + ('𑃰', '𑃹'), + ('\u{11100}', '\u{11134}'), + ('𑄶', '𑄿'), + ('𑅄', '𑅇'), + ('𑅐', '\u{11173}'), + ('𑅶', '𑅶'), + ('\u{11180}', '𑇄'), + ('\u{111c9}', '\u{111cc}'), + ('𑇎', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '\u{11237}'), + ('\u{1123e}', '\u{11241}'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '\u{112ea}'), + ('𑋰', '𑋹'), + ('\u{11300}', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('\u{1133b}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('𑍐', '𑍐'), + ('\u{11357}', '\u{11357}'), + ('𑍝', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏓'), + ('\u{113e1}', '\u{113e2}'), + ('𑐀', '𑑊'), + ('𑑐', '𑑙'), + ('\u{1145e}', '𑑡'), + ('𑒀', '𑓅'), + ('𑓇', '𑓇'), + ('𑓐', '𑓙'), + ('𑖀', '\u{115b5}'), + ('𑖸', '\u{115c0}'), + ('𑗘', '\u{115dd}'), + ('𑘀', '\u{11640}'), + ('𑙄', '𑙄'), + ('𑙐', '𑙙'), + ('𑚀', '𑚸'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜀', '𑜚'), + ('\u{1171d}', '\u{1172b}'), + ('𑜰', '𑜹'), + ('𑝀', '𑝆'), + ('𑠀', '\u{1183a}'), + ('𑢠', '𑣩'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{11943}'), + ('𑥐', '𑥙'), + ('𑦠', '𑦧'), + ('𑦪', '\u{119d7}'), + ('\u{119da}', '𑧡'), + ('𑧣', '𑧤'), + ('𑨀', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('𑩐', '\u{11a99}'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑯰', '𑯹'), + ('𑰀', '𑰈'), + ('𑰊', '\u{11c36}'), + ('\u{11c38}', '𑱀'), + ('𑱐', '𑱙'), + ('𑱲', '𑲏'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d47}'), + ('𑵐', '𑵙'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶘'), + ('𑶠', '𑶩'), + ('𑻠', '𑻶'), + ('\u{11f00}', '𑼐'), + ('𑼒', '\u{11f3a}'), + ('𑼾', '\u{11f42}'), + ('𑽐', '\u{11f5a}'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('\u{13440}', '\u{13455}'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄹'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩠', '𖩩'), + ('𖩰', '𖪾'), + ('𖫀', '𖫉'), + ('𖫐', '𖫭'), + ('\u{16af0}', '\u{16af4}'), + ('𖬀', '\u{16b36}'), + ('𖭀', '𖭃'), + ('𖭐', '𖭙'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖵰', '𖵹'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('\u{16f4f}', '𖾇'), + ('\u{16f8f}', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('𜳰', '𜳹'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝟎', '𝟿'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), + ('𞄀', '𞄬'), + ('\u{1e130}', '𞄽'), + ('𞅀', '𞅉'), + ('𞅎', '𞅎'), + ('𞊐', '\u{1e2ae}'), + ('𞋀', '𞋹'), + ('𞓐', '𞓹'), + ('𞗐', '𞗺'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('𞤀', '𞥋'), + ('𞥐', '𞥙'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('🯰', '🯹'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const ID_START: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('Ͱ', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', 'ՙ'), + ('ՠ', 'ֈ'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('ؠ', 'ي'), + ('ٮ', 'ٯ'), + ('ٱ', 'ۓ'), + ('ە', 'ە'), + ('ۥ', 'ۦ'), + ('ۮ', 'ۯ'), + ('ۺ', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', 'ܐ'), + ('ܒ', 'ܯ'), + ('ݍ', 'ޥ'), + ('ޱ', 'ޱ'), + ('ߊ', 'ߪ'), + ('ߴ', 'ߵ'), + ('ߺ', 'ߺ'), + ('ࠀ', 'ࠕ'), + ('ࠚ', 'ࠚ'), + ('ࠤ', 'ࠤ'), + ('ࠨ', 'ࠨ'), + ('ࡀ', 'ࡘ'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('ࢠ', 'ࣉ'), + ('ऄ', 'ह'), + ('ऽ', 'ऽ'), + ('ॐ', 'ॐ'), + ('क़', 'ॡ'), + ('ॱ', 'ঀ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', 'ঽ'), + ('ৎ', 'ৎ'), + ('ড়', 'ঢ়'), + ('য়', 'ৡ'), + ('ৰ', 'ৱ'), + ('ৼ', 'ৼ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('ੲ', 'ੴ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', 'ઽ'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૡ'), + ('ૹ', 'ૹ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', 'ଽ'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('ୱ', 'ୱ'), + ('ஃ', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('ௐ', 'ௐ'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ఽ'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', 'ౡ'), + ('ಀ', 'ಀ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ಽ'), + ('ೝ', 'ೞ'), + ('ೠ', 'ೡ'), + ('ೱ', 'ೲ'), + ('ഄ', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', 'ഽ'), + ('ൎ', 'ൎ'), + ('ൔ', 'ൖ'), + ('ൟ', 'ൡ'), + ('ൺ', 'ൿ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('ก', 'ะ'), + ('า', 'ำ'), + ('เ', 'ๆ'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ະ'), + ('າ', 'ຳ'), + ('ຽ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('ཀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('ྈ', 'ྌ'), + ('က', 'ဪ'), + ('ဿ', 'ဿ'), + ('ၐ', 'ၕ'), + ('ၚ', 'ၝ'), + ('ၡ', 'ၡ'), + ('ၥ', 'ၦ'), + ('ၮ', 'ၰ'), + ('ၵ', 'ႁ'), + ('ႎ', 'ႎ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', 'ᜑ'), + ('ᜟ', 'ᜱ'), + ('ᝀ', 'ᝑ'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('ក', 'ឳ'), + ('ៗ', 'ៗ'), + ('ៜ', 'ៜ'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢨ'), + ('ᢪ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('ᥐ', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('ᨀ', 'ᨖ'), + ('ᨠ', 'ᩔ'), + ('ᪧ', 'ᪧ'), + ('ᬅ', 'ᬳ'), + ('ᭅ', 'ᭌ'), + ('ᮃ', 'ᮠ'), + ('ᮮ', 'ᮯ'), + ('ᮺ', 'ᯥ'), + ('ᰀ', 'ᰣ'), + ('ᱍ', 'ᱏ'), + ('ᱚ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', 'ᳶ'), + ('ᳺ', 'ᳺ'), + ('ᴀ', 'ᶿ'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('℘', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('々', '〇'), + ('〡', '〩'), + ('〱', '〵'), + ('〸', '〼'), + ('ぁ', 'ゖ'), + ('゛', 'ゟ'), + ('ァ', 'ヺ'), + ('ー', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘟ'), + ('ꘪ', 'ꘫ'), + ('Ꙁ', 'ꙮ'), + ('ꙿ', 'ꚝ'), + ('ꚠ', 'ꛯ'), + ('ꜗ', 'ꜟ'), + ('Ꜣ', 'ꞈ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠁ'), + ('ꠃ', 'ꠅ'), + ('ꠇ', 'ꠊ'), + ('ꠌ', 'ꠢ'), + ('ꡀ', 'ꡳ'), + ('ꢂ', 'ꢳ'), + ('ꣲ', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', 'ꣾ'), + ('ꤊ', 'ꤥ'), + ('ꤰ', 'ꥆ'), + ('ꥠ', 'ꥼ'), + ('ꦄ', 'ꦲ'), + ('ꧏ', 'ꧏ'), + ('ꧠ', 'ꧤ'), + ('ꧦ', 'ꧯ'), + ('ꧺ', 'ꧾ'), + ('ꨀ', 'ꨨ'), + ('ꩀ', 'ꩂ'), + ('ꩄ', 'ꩋ'), + ('ꩠ', 'ꩶ'), + ('ꩺ', 'ꩺ'), + ('ꩾ', 'ꪯ'), + ('ꪱ', 'ꪱ'), + ('ꪵ', 'ꪶ'), + ('ꪹ', 'ꪽ'), + ('ꫀ', 'ꫀ'), + ('ꫂ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫪ'), + ('ꫲ', 'ꫴ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꯢ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'יִ'), + ('ײַ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('A', 'Z'), + ('a', 'z'), + ('ヲ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '𐍵'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐐀', '𐒝'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '𐨀'), + ('𐨐', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '𐫤'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '𐴣'), + ('𐵊', '𐵥'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('𐼀', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '𐽅'), + ('𐽰', '𐾁'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀃', '𑀷'), + ('𑁱', '𑁲'), + ('𑁵', '𑁵'), + ('𑂃', '𑂯'), + ('𑃐', '𑃨'), + ('𑄃', '𑄦'), + ('𑅄', '𑅄'), + ('𑅇', '𑅇'), + ('𑅐', '𑅲'), + ('𑅶', '𑅶'), + ('𑆃', '𑆲'), + ('𑇁', '𑇄'), + ('𑇚', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '𑈫'), + ('𑈿', '𑉀'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '𑋞'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑌽'), + ('𑍐', '𑍐'), + ('𑍝', '𑍡'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '𑎷'), + ('𑏑', '𑏑'), + ('𑏓', '𑏓'), + ('𑐀', '𑐴'), + ('𑑇', '𑑊'), + ('𑑟', '𑑡'), + ('𑒀', '𑒯'), + ('𑓄', '𑓅'), + ('𑓇', '𑓇'), + ('𑖀', '𑖮'), + ('𑗘', '𑗛'), + ('𑘀', '𑘯'), + ('𑙄', '𑙄'), + ('𑚀', '𑚪'), + ('𑚸', '𑚸'), + ('𑜀', '𑜚'), + ('𑝀', '𑝆'), + ('𑠀', '𑠫'), + ('𑢠', '𑣟'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤯'), + ('𑤿', '𑤿'), + ('𑥁', '𑥁'), + ('𑦠', '𑦧'), + ('𑦪', '𑧐'), + ('𑧡', '𑧡'), + ('𑧣', '𑧣'), + ('𑨀', '𑨀'), + ('𑨋', '𑨲'), + ('𑨺', '𑨺'), + ('𑩐', '𑩐'), + ('𑩜', '𑪉'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑰀', '𑰈'), + ('𑰊', '𑰮'), + ('𑱀', '𑱀'), + ('𑱲', '𑲏'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '𑴰'), + ('𑵆', '𑵆'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶉'), + ('𑶘', '𑶘'), + ('𑻠', '𑻲'), + ('𑼂', '𑼂'), + ('𑼄', '𑼐'), + ('𑼒', '𑼳'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄝'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩰', '𖪾'), + ('𖫐', '𖫭'), + ('𖬀', '𖬯'), + ('𖭀', '𖭃'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('𖽐', '𖽐'), + ('𖾓', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞄀', '𞄬'), + ('𞄷', '𞄽'), + ('𞅎', '𞅎'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞓐', '𞓫'), + ('𞗐', '𞗭'), + ('𞗰', '𞗰'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞤀', '𞥃'), + ('𞥋', '𞥋'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const IDEOGRAPHIC: &'static [(char, char)] = &[ + ('〆', '〇'), + ('〡', '〩'), + ('〸', '〺'), + ('㐀', '䶿'), + ('一', '鿿'), + ('豈', '舘'), + ('並', '龎'), + ('\u{16fe4}', '\u{16fe4}'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𛅰', '𛋻'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const INCB: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{483}', '\u{489}'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6df}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', '\u{7f3}'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{819}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('\u{897}', '\u{89f}'), + ('\u{8ca}', '\u{8e1}'), + ('\u{8e3}', '\u{902}'), + ('क', '\u{93a}'), + ('\u{93c}', '\u{93c}'), + ('\u{941}', '\u{948}'), + ('\u{94d}', '\u{94d}'), + ('\u{951}', 'य़'), + ('\u{962}', '\u{963}'), + ('ॸ', 'ॿ'), + ('\u{981}', '\u{981}'), + ('ক', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9be}', '\u{9be}'), + ('\u{9c1}', '\u{9c4}'), + ('\u{9cd}', '\u{9cd}'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', 'য়'), + ('\u{9e2}', '\u{9e3}'), + ('ৰ', 'ৱ'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', '\u{a02}'), + ('\u{a3c}', '\u{a3c}'), + ('\u{a41}', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', '\u{a82}'), + ('ક', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('\u{abc}', '\u{abc}'), + ('\u{ac1}', '\u{ac5}'), + ('\u{ac7}', '\u{ac8}'), + ('\u{acd}', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('ૹ', '\u{aff}'), + ('\u{b01}', '\u{b01}'), + ('କ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3e}', '\u{b3f}'), + ('\u{b41}', '\u{b44}'), + ('\u{b4d}', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୟ'), + ('\u{b62}', '\u{b63}'), + ('ୱ', 'ୱ'), + ('\u{b82}', '\u{b82}'), + ('\u{bbe}', '\u{bbe}'), + ('\u{bc0}', '\u{bc0}'), + ('\u{bcd}', '\u{bcd}'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', '\u{c00}'), + ('\u{c04}', '\u{c04}'), + ('క', 'న'), + ('ప', 'హ'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', '\u{c40}'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('ౘ', 'ౚ'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', '\u{c81}'), + ('\u{cbc}', '\u{cbc}'), + ('\u{cbf}', '\u{cc0}'), + ('\u{cc2}', '\u{cc2}'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{ce2}', '\u{ce3}'), + ('\u{d00}', '\u{d01}'), + ('ക', '\u{d3c}'), + ('\u{d3e}', '\u{d3e}'), + ('\u{d41}', '\u{d44}'), + ('\u{d4d}', '\u{d4d}'), + ('\u{d57}', '\u{d57}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', '\u{d81}'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dcf}'), + ('\u{dd2}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('\u{ddf}', '\u{ddf}'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e47}', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('\u{f71}', '\u{f7e}'), + ('\u{f80}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('\u{102d}', '\u{1030}'), + ('\u{1032}', '\u{1037}'), + ('\u{1039}', '\u{103a}'), + ('\u{103d}', '\u{103e}'), + ('\u{1058}', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{1082}'), + ('\u{1085}', '\u{1086}'), + ('\u{108d}', '\u{108d}'), + ('\u{109d}', '\u{109d}'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1715}'), + ('\u{1732}', '\u{1734}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17b5}'), + ('\u{17b7}', '\u{17bd}'), + ('\u{17c6}', '\u{17c6}'), + ('\u{17c9}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', '\u{1922}'), + ('\u{1927}', '\u{1928}'), + ('\u{1932}', '\u{1932}'), + ('\u{1939}', '\u{193b}'), + ('\u{1a17}', '\u{1a18}'), + ('\u{1a1b}', '\u{1a1b}'), + ('\u{1a56}', '\u{1a56}'), + ('\u{1a58}', '\u{1a5e}'), + ('\u{1a60}', '\u{1a60}'), + ('\u{1a62}', '\u{1a62}'), + ('\u{1a65}', '\u{1a6c}'), + ('\u{1a73}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', '\u{1b03}'), + ('\u{1b34}', '\u{1b3d}'), + ('\u{1b42}', '\u{1b44}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1b81}'), + ('\u{1ba2}', '\u{1ba5}'), + ('\u{1ba8}', '\u{1bad}'), + ('\u{1be6}', '\u{1be6}'), + ('\u{1be8}', '\u{1be9}'), + ('\u{1bed}', '\u{1bed}'), + ('\u{1bef}', '\u{1bf3}'), + ('\u{1c2c}', '\u{1c33}'), + ('\u{1c36}', '\u{1c37}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce0}'), + ('\u{1ce2}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{200d}', '\u{200d}'), + ('\u{20d0}', '\u{20f0}'), + ('\u{2cef}', '\u{2cf1}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('\u{302a}', '\u{302f}'), + ('\u{3099}', '\u{309a}'), + ('\u{a66f}', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('\u{a825}', '\u{a826}'), + ('\u{a82c}', '\u{a82c}'), + ('\u{a8c4}', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a951}'), + ('\u{a953}', '\u{a953}'), + ('\u{a980}', '\u{a982}'), + ('\u{a9b3}', '\u{a9b3}'), + ('\u{a9b6}', '\u{a9b9}'), + ('\u{a9bc}', '\u{a9bd}'), + ('\u{a9c0}', '\u{a9c0}'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa2e}'), + ('\u{aa31}', '\u{aa32}'), + ('\u{aa35}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', '\u{aa4c}'), + ('\u{aa7c}', '\u{aa7c}'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('\u{aaec}', '\u{aaed}'), + ('\u{aaf6}', '\u{aaf6}'), + ('\u{abe5}', '\u{abe5}'), + ('\u{abe8}', '\u{abe8}'), + ('\u{abed}', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('\u{ff9e}', '\u{ff9f}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('\u{11001}', '\u{11001}'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '\u{11081}'), + ('\u{110b3}', '\u{110b6}'), + ('\u{110b9}', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{1112b}'), + ('\u{1112d}', '\u{11134}'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '\u{11181}'), + ('\u{111b6}', '\u{111be}'), + ('\u{111c0}', '\u{111c0}'), + ('\u{111c9}', '\u{111cc}'), + ('\u{111cf}', '\u{111cf}'), + ('\u{1122f}', '\u{11231}'), + ('\u{11234}', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112df}'), + ('\u{112e3}', '\u{112ea}'), + ('\u{11300}', '\u{11301}'), + ('\u{1133b}', '\u{1133c}'), + ('\u{1133e}', '\u{1133e}'), + ('\u{11340}', '\u{11340}'), + ('\u{1134d}', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113b8}', '\u{113b8}'), + ('\u{113bb}', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '\u{113c9}'), + ('\u{113ce}', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('\u{11438}', '\u{1143f}'), + ('\u{11442}', '\u{11444}'), + ('\u{11446}', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b0}', '\u{114b0}'), + ('\u{114b3}', '\u{114b8}'), + ('\u{114ba}', '\u{114ba}'), + ('\u{114bd}', '\u{114bd}'), + ('\u{114bf}', '\u{114c0}'), + ('\u{114c2}', '\u{114c3}'), + ('\u{115af}', '\u{115af}'), + ('\u{115b2}', '\u{115b5}'), + ('\u{115bc}', '\u{115bd}'), + ('\u{115bf}', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('\u{11633}', '\u{1163a}'), + ('\u{1163d}', '\u{1163d}'), + ('\u{1163f}', '\u{11640}'), + ('\u{116ab}', '\u{116ab}'), + ('\u{116ad}', '\u{116ad}'), + ('\u{116b0}', '\u{116b7}'), + ('\u{1171d}', '\u{1171d}'), + ('\u{1171f}', '\u{1171f}'), + ('\u{11722}', '\u{11725}'), + ('\u{11727}', '\u{1172b}'), + ('\u{1182f}', '\u{11837}'), + ('\u{11839}', '\u{1183a}'), + ('\u{11930}', '\u{11930}'), + ('\u{1193b}', '\u{1193e}'), + ('\u{11943}', '\u{11943}'), + ('\u{119d4}', '\u{119d7}'), + ('\u{119da}', '\u{119db}'), + ('\u{119e0}', '\u{119e0}'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '\u{11a38}'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a56}'), + ('\u{11a59}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a96}'), + ('\u{11a98}', '\u{11a99}'), + ('\u{11c30}', '\u{11c36}'), + ('\u{11c38}', '\u{11c3d}'), + ('\u{11c3f}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('\u{11caa}', '\u{11cb0}'), + ('\u{11cb2}', '\u{11cb3}'), + ('\u{11cb5}', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('\u{11d90}', '\u{11d91}'), + ('\u{11d95}', '\u{11d95}'), + ('\u{11d97}', '\u{11d97}'), + ('\u{11ef3}', '\u{11ef4}'), + ('\u{11f00}', '\u{11f01}'), + ('\u{11f36}', '\u{11f3a}'), + ('\u{11f40}', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13440}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{16129}'), + ('\u{1612d}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('\u{16f4f}', '\u{16f4f}'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e4ec}', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e94a}'), + ('🏻', '🏿'), + ('\u{e0020}', '\u{e007f}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const JOIN_CONTROL: &'static [(char, char)] = &[('\u{200c}', '\u{200d}')]; + +pub const LOGICAL_ORDER_EXCEPTION: &'static [(char, char)] = &[ + ('เ', 'ไ'), + ('ເ', 'ໄ'), + ('ᦵ', 'ᦷ'), + ('ᦺ', 'ᦺ'), + ('ꪵ', 'ꪶ'), + ('ꪹ', 'ꪹ'), + ('ꪻ', 'ꪼ'), +]; + +pub const LOWERCASE: &'static [(char, char)] = &[ + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('ß', 'ö'), + ('ø', 'ÿ'), + ('ā', 'ā'), + ('ă', 'ă'), + ('ą', 'ą'), + ('ć', 'ć'), + ('ĉ', 'ĉ'), + ('ċ', 'ċ'), + ('č', 'č'), + ('ď', 'ď'), + ('đ', 'đ'), + ('ē', 'ē'), + ('ĕ', 'ĕ'), + ('ė', 'ė'), + ('ę', 'ę'), + ('ě', 'ě'), + ('ĝ', 'ĝ'), + ('ğ', 'ğ'), + ('ġ', 'ġ'), + ('ģ', 'ģ'), + ('ĥ', 'ĥ'), + ('ħ', 'ħ'), + ('ĩ', 'ĩ'), + ('ī', 'ī'), + ('ĭ', 'ĭ'), + ('į', 'į'), + ('ı', 'ı'), + ('ij', 'ij'), + ('ĵ', 'ĵ'), + ('ķ', 'ĸ'), + ('ĺ', 'ĺ'), + ('ļ', 'ļ'), + ('ľ', 'ľ'), + ('ŀ', 'ŀ'), + ('ł', 'ł'), + ('ń', 'ń'), + ('ņ', 'ņ'), + ('ň', 'ʼn'), + ('ŋ', 'ŋ'), + ('ō', 'ō'), + ('ŏ', 'ŏ'), + ('ő', 'ő'), + ('œ', 'œ'), + ('ŕ', 'ŕ'), + ('ŗ', 'ŗ'), + ('ř', 'ř'), + ('ś', 'ś'), + ('ŝ', 'ŝ'), + ('ş', 'ş'), + ('š', 'š'), + ('ţ', 'ţ'), + ('ť', 'ť'), + ('ŧ', 'ŧ'), + ('ũ', 'ũ'), + ('ū', 'ū'), + ('ŭ', 'ŭ'), + ('ů', 'ů'), + ('ű', 'ű'), + ('ų', 'ų'), + ('ŵ', 'ŵ'), + ('ŷ', 'ŷ'), + ('ź', 'ź'), + ('ż', 'ż'), + ('ž', 'ƀ'), + ('ƃ', 'ƃ'), + ('ƅ', 'ƅ'), + ('ƈ', 'ƈ'), + ('ƌ', 'ƍ'), + ('ƒ', 'ƒ'), + ('ƕ', 'ƕ'), + ('ƙ', 'ƛ'), + ('ƞ', 'ƞ'), + ('ơ', 'ơ'), + ('ƣ', 'ƣ'), + ('ƥ', 'ƥ'), + ('ƨ', 'ƨ'), + ('ƪ', 'ƫ'), + ('ƭ', 'ƭ'), + ('ư', 'ư'), + ('ƴ', 'ƴ'), + ('ƶ', 'ƶ'), + ('ƹ', 'ƺ'), + ('ƽ', 'ƿ'), + ('dž', 'dž'), + ('lj', 'lj'), + ('nj', 'nj'), + ('ǎ', 'ǎ'), + ('ǐ', 'ǐ'), + ('ǒ', 'ǒ'), + ('ǔ', 'ǔ'), + ('ǖ', 'ǖ'), + ('ǘ', 'ǘ'), + ('ǚ', 'ǚ'), + ('ǜ', 'ǝ'), + ('ǟ', 'ǟ'), + ('ǡ', 'ǡ'), + ('ǣ', 'ǣ'), + ('ǥ', 'ǥ'), + ('ǧ', 'ǧ'), + ('ǩ', 'ǩ'), + ('ǫ', 'ǫ'), + ('ǭ', 'ǭ'), + ('ǯ', 'ǰ'), + ('dz', 'dz'), + ('ǵ', 'ǵ'), + ('ǹ', 'ǹ'), + ('ǻ', 'ǻ'), + ('ǽ', 'ǽ'), + ('ǿ', 'ǿ'), + ('ȁ', 'ȁ'), + ('ȃ', 'ȃ'), + ('ȅ', 'ȅ'), + ('ȇ', 'ȇ'), + ('ȉ', 'ȉ'), + ('ȋ', 'ȋ'), + ('ȍ', 'ȍ'), + ('ȏ', 'ȏ'), + ('ȑ', 'ȑ'), + ('ȓ', 'ȓ'), + ('ȕ', 'ȕ'), + ('ȗ', 'ȗ'), + ('ș', 'ș'), + ('ț', 'ț'), + ('ȝ', 'ȝ'), + ('ȟ', 'ȟ'), + ('ȡ', 'ȡ'), + ('ȣ', 'ȣ'), + ('ȥ', 'ȥ'), + ('ȧ', 'ȧ'), + ('ȩ', 'ȩ'), + ('ȫ', 'ȫ'), + ('ȭ', 'ȭ'), + ('ȯ', 'ȯ'), + ('ȱ', 'ȱ'), + ('ȳ', 'ȹ'), + ('ȼ', 'ȼ'), + ('ȿ', 'ɀ'), + ('ɂ', 'ɂ'), + ('ɇ', 'ɇ'), + ('ɉ', 'ɉ'), + ('ɋ', 'ɋ'), + ('ɍ', 'ɍ'), + ('ɏ', 'ʓ'), + ('ʕ', 'ʸ'), + ('ˀ', 'ˁ'), + ('ˠ', 'ˤ'), + ('\u{345}', '\u{345}'), + ('ͱ', 'ͱ'), + ('ͳ', 'ͳ'), + ('ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('ΐ', 'ΐ'), + ('ά', 'ώ'), + ('ϐ', 'ϑ'), + ('ϕ', 'ϗ'), + ('ϙ', 'ϙ'), + ('ϛ', 'ϛ'), + ('ϝ', 'ϝ'), + ('ϟ', 'ϟ'), + ('ϡ', 'ϡ'), + ('ϣ', 'ϣ'), + ('ϥ', 'ϥ'), + ('ϧ', 'ϧ'), + ('ϩ', 'ϩ'), + ('ϫ', 'ϫ'), + ('ϭ', 'ϭ'), + ('ϯ', 'ϳ'), + ('ϵ', 'ϵ'), + ('ϸ', 'ϸ'), + ('ϻ', 'ϼ'), + ('а', 'џ'), + ('ѡ', 'ѡ'), + ('ѣ', 'ѣ'), + ('ѥ', 'ѥ'), + ('ѧ', 'ѧ'), + ('ѩ', 'ѩ'), + ('ѫ', 'ѫ'), + ('ѭ', 'ѭ'), + ('ѯ', 'ѯ'), + ('ѱ', 'ѱ'), + ('ѳ', 'ѳ'), + ('ѵ', 'ѵ'), + ('ѷ', 'ѷ'), + ('ѹ', 'ѹ'), + ('ѻ', 'ѻ'), + ('ѽ', 'ѽ'), + ('ѿ', 'ѿ'), + ('ҁ', 'ҁ'), + ('ҋ', 'ҋ'), + ('ҍ', 'ҍ'), + ('ҏ', 'ҏ'), + ('ґ', 'ґ'), + ('ғ', 'ғ'), + ('ҕ', 'ҕ'), + ('җ', 'җ'), + ('ҙ', 'ҙ'), + ('қ', 'қ'), + ('ҝ', 'ҝ'), + ('ҟ', 'ҟ'), + ('ҡ', 'ҡ'), + ('ң', 'ң'), + ('ҥ', 'ҥ'), + ('ҧ', 'ҧ'), + ('ҩ', 'ҩ'), + ('ҫ', 'ҫ'), + ('ҭ', 'ҭ'), + ('ү', 'ү'), + ('ұ', 'ұ'), + ('ҳ', 'ҳ'), + ('ҵ', 'ҵ'), + ('ҷ', 'ҷ'), + ('ҹ', 'ҹ'), + ('һ', 'һ'), + ('ҽ', 'ҽ'), + ('ҿ', 'ҿ'), + ('ӂ', 'ӂ'), + ('ӄ', 'ӄ'), + ('ӆ', 'ӆ'), + ('ӈ', 'ӈ'), + ('ӊ', 'ӊ'), + ('ӌ', 'ӌ'), + ('ӎ', 'ӏ'), + ('ӑ', 'ӑ'), + ('ӓ', 'ӓ'), + ('ӕ', 'ӕ'), + ('ӗ', 'ӗ'), + ('ә', 'ә'), + ('ӛ', 'ӛ'), + ('ӝ', 'ӝ'), + ('ӟ', 'ӟ'), + ('ӡ', 'ӡ'), + ('ӣ', 'ӣ'), + ('ӥ', 'ӥ'), + ('ӧ', 'ӧ'), + ('ө', 'ө'), + ('ӫ', 'ӫ'), + ('ӭ', 'ӭ'), + ('ӯ', 'ӯ'), + ('ӱ', 'ӱ'), + ('ӳ', 'ӳ'), + ('ӵ', 'ӵ'), + ('ӷ', 'ӷ'), + ('ӹ', 'ӹ'), + ('ӻ', 'ӻ'), + ('ӽ', 'ӽ'), + ('ӿ', 'ӿ'), + ('ԁ', 'ԁ'), + ('ԃ', 'ԃ'), + ('ԅ', 'ԅ'), + ('ԇ', 'ԇ'), + ('ԉ', 'ԉ'), + ('ԋ', 'ԋ'), + ('ԍ', 'ԍ'), + ('ԏ', 'ԏ'), + ('ԑ', 'ԑ'), + ('ԓ', 'ԓ'), + ('ԕ', 'ԕ'), + ('ԗ', 'ԗ'), + ('ԙ', 'ԙ'), + ('ԛ', 'ԛ'), + ('ԝ', 'ԝ'), + ('ԟ', 'ԟ'), + ('ԡ', 'ԡ'), + ('ԣ', 'ԣ'), + ('ԥ', 'ԥ'), + ('ԧ', 'ԧ'), + ('ԩ', 'ԩ'), + ('ԫ', 'ԫ'), + ('ԭ', 'ԭ'), + ('ԯ', 'ԯ'), + ('ՠ', 'ֈ'), + ('ა', 'ჺ'), + ('ჼ', 'ჿ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲈ'), + ('ᲊ', 'ᲊ'), + ('ᴀ', 'ᶿ'), + ('ḁ', 'ḁ'), + ('ḃ', 'ḃ'), + ('ḅ', 'ḅ'), + ('ḇ', 'ḇ'), + ('ḉ', 'ḉ'), + ('ḋ', 'ḋ'), + ('ḍ', 'ḍ'), + ('ḏ', 'ḏ'), + ('ḑ', 'ḑ'), + ('ḓ', 'ḓ'), + ('ḕ', 'ḕ'), + ('ḗ', 'ḗ'), + ('ḙ', 'ḙ'), + ('ḛ', 'ḛ'), + ('ḝ', 'ḝ'), + ('ḟ', 'ḟ'), + ('ḡ', 'ḡ'), + ('ḣ', 'ḣ'), + ('ḥ', 'ḥ'), + ('ḧ', 'ḧ'), + ('ḩ', 'ḩ'), + ('ḫ', 'ḫ'), + ('ḭ', 'ḭ'), + ('ḯ', 'ḯ'), + ('ḱ', 'ḱ'), + ('ḳ', 'ḳ'), + ('ḵ', 'ḵ'), + ('ḷ', 'ḷ'), + ('ḹ', 'ḹ'), + ('ḻ', 'ḻ'), + ('ḽ', 'ḽ'), + ('ḿ', 'ḿ'), + ('ṁ', 'ṁ'), + ('ṃ', 'ṃ'), + ('ṅ', 'ṅ'), + ('ṇ', 'ṇ'), + ('ṉ', 'ṉ'), + ('ṋ', 'ṋ'), + ('ṍ', 'ṍ'), + ('ṏ', 'ṏ'), + ('ṑ', 'ṑ'), + ('ṓ', 'ṓ'), + ('ṕ', 'ṕ'), + ('ṗ', 'ṗ'), + ('ṙ', 'ṙ'), + ('ṛ', 'ṛ'), + ('ṝ', 'ṝ'), + ('ṟ', 'ṟ'), + ('ṡ', 'ṡ'), + ('ṣ', 'ṣ'), + ('ṥ', 'ṥ'), + ('ṧ', 'ṧ'), + ('ṩ', 'ṩ'), + ('ṫ', 'ṫ'), + ('ṭ', 'ṭ'), + ('ṯ', 'ṯ'), + ('ṱ', 'ṱ'), + ('ṳ', 'ṳ'), + ('ṵ', 'ṵ'), + ('ṷ', 'ṷ'), + ('ṹ', 'ṹ'), + ('ṻ', 'ṻ'), + ('ṽ', 'ṽ'), + ('ṿ', 'ṿ'), + ('ẁ', 'ẁ'), + ('ẃ', 'ẃ'), + ('ẅ', 'ẅ'), + ('ẇ', 'ẇ'), + ('ẉ', 'ẉ'), + ('ẋ', 'ẋ'), + ('ẍ', 'ẍ'), + ('ẏ', 'ẏ'), + ('ẑ', 'ẑ'), + ('ẓ', 'ẓ'), + ('ẕ', 'ẝ'), + ('ẟ', 'ẟ'), + ('ạ', 'ạ'), + ('ả', 'ả'), + ('ấ', 'ấ'), + ('ầ', 'ầ'), + ('ẩ', 'ẩ'), + ('ẫ', 'ẫ'), + ('ậ', 'ậ'), + ('ắ', 'ắ'), + ('ằ', 'ằ'), + ('ẳ', 'ẳ'), + ('ẵ', 'ẵ'), + ('ặ', 'ặ'), + ('ẹ', 'ẹ'), + ('ẻ', 'ẻ'), + ('ẽ', 'ẽ'), + ('ế', 'ế'), + ('ề', 'ề'), + ('ể', 'ể'), + ('ễ', 'ễ'), + ('ệ', 'ệ'), + ('ỉ', 'ỉ'), + ('ị', 'ị'), + ('ọ', 'ọ'), + ('ỏ', 'ỏ'), + ('ố', 'ố'), + ('ồ', 'ồ'), + ('ổ', 'ổ'), + ('ỗ', 'ỗ'), + ('ộ', 'ộ'), + ('ớ', 'ớ'), + ('ờ', 'ờ'), + ('ở', 'ở'), + ('ỡ', 'ỡ'), + ('ợ', 'ợ'), + ('ụ', 'ụ'), + ('ủ', 'ủ'), + ('ứ', 'ứ'), + ('ừ', 'ừ'), + ('ử', 'ử'), + ('ữ', 'ữ'), + ('ự', 'ự'), + ('ỳ', 'ỳ'), + ('ỵ', 'ỵ'), + ('ỷ', 'ỷ'), + ('ỹ', 'ỹ'), + ('ỻ', 'ỻ'), + ('ỽ', 'ỽ'), + ('ỿ', 'ἇ'), + ('ἐ', 'ἕ'), + ('ἠ', 'ἧ'), + ('ἰ', 'ἷ'), + ('ὀ', 'ὅ'), + ('ὐ', 'ὗ'), + ('ὠ', 'ὧ'), + ('ὰ', 'ώ'), + ('ᾀ', 'ᾇ'), + ('ᾐ', 'ᾗ'), + ('ᾠ', 'ᾧ'), + ('ᾰ', 'ᾴ'), + ('ᾶ', 'ᾷ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῇ'), + ('ῐ', 'ΐ'), + ('ῖ', 'ῗ'), + ('ῠ', 'ῧ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῷ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℊ', 'ℊ'), + ('ℎ', 'ℏ'), + ('ℓ', 'ℓ'), + ('ℯ', 'ℯ'), + ('ℴ', 'ℴ'), + ('ℹ', 'ℹ'), + ('ℼ', 'ℽ'), + ('ⅆ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('ⅰ', 'ⅿ'), + ('ↄ', 'ↄ'), + ('ⓐ', 'ⓩ'), + ('ⰰ', 'ⱟ'), + ('ⱡ', 'ⱡ'), + ('ⱥ', 'ⱦ'), + ('ⱨ', 'ⱨ'), + ('ⱪ', 'ⱪ'), + ('ⱬ', 'ⱬ'), + ('ⱱ', 'ⱱ'), + ('ⱳ', 'ⱴ'), + ('ⱶ', 'ⱽ'), + ('ⲁ', 'ⲁ'), + ('ⲃ', 'ⲃ'), + ('ⲅ', 'ⲅ'), + ('ⲇ', 'ⲇ'), + ('ⲉ', 'ⲉ'), + ('ⲋ', 'ⲋ'), + ('ⲍ', 'ⲍ'), + ('ⲏ', 'ⲏ'), + ('ⲑ', 'ⲑ'), + ('ⲓ', 'ⲓ'), + ('ⲕ', 'ⲕ'), + ('ⲗ', 'ⲗ'), + ('ⲙ', 'ⲙ'), + ('ⲛ', 'ⲛ'), + ('ⲝ', 'ⲝ'), + ('ⲟ', 'ⲟ'), + ('ⲡ', 'ⲡ'), + ('ⲣ', 'ⲣ'), + ('ⲥ', 'ⲥ'), + ('ⲧ', 'ⲧ'), + ('ⲩ', 'ⲩ'), + ('ⲫ', 'ⲫ'), + ('ⲭ', 'ⲭ'), + ('ⲯ', 'ⲯ'), + ('ⲱ', 'ⲱ'), + ('ⲳ', 'ⲳ'), + ('ⲵ', 'ⲵ'), + ('ⲷ', 'ⲷ'), + ('ⲹ', 'ⲹ'), + ('ⲻ', 'ⲻ'), + ('ⲽ', 'ⲽ'), + ('ⲿ', 'ⲿ'), + ('ⳁ', 'ⳁ'), + ('ⳃ', 'ⳃ'), + ('ⳅ', 'ⳅ'), + ('ⳇ', 'ⳇ'), + ('ⳉ', 'ⳉ'), + ('ⳋ', 'ⳋ'), + ('ⳍ', 'ⳍ'), + ('ⳏ', 'ⳏ'), + ('ⳑ', 'ⳑ'), + ('ⳓ', 'ⳓ'), + ('ⳕ', 'ⳕ'), + ('ⳗ', 'ⳗ'), + ('ⳙ', 'ⳙ'), + ('ⳛ', 'ⳛ'), + ('ⳝ', 'ⳝ'), + ('ⳟ', 'ⳟ'), + ('ⳡ', 'ⳡ'), + ('ⳣ', 'ⳤ'), + ('ⳬ', 'ⳬ'), + ('ⳮ', 'ⳮ'), + ('ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ꙁ', 'ꙁ'), + ('ꙃ', 'ꙃ'), + ('ꙅ', 'ꙅ'), + ('ꙇ', 'ꙇ'), + ('ꙉ', 'ꙉ'), + ('ꙋ', 'ꙋ'), + ('ꙍ', 'ꙍ'), + ('ꙏ', 'ꙏ'), + ('ꙑ', 'ꙑ'), + ('ꙓ', 'ꙓ'), + ('ꙕ', 'ꙕ'), + ('ꙗ', 'ꙗ'), + ('ꙙ', 'ꙙ'), + ('ꙛ', 'ꙛ'), + ('ꙝ', 'ꙝ'), + ('ꙟ', 'ꙟ'), + ('ꙡ', 'ꙡ'), + ('ꙣ', 'ꙣ'), + ('ꙥ', 'ꙥ'), + ('ꙧ', 'ꙧ'), + ('ꙩ', 'ꙩ'), + ('ꙫ', 'ꙫ'), + ('ꙭ', 'ꙭ'), + ('ꚁ', 'ꚁ'), + ('ꚃ', 'ꚃ'), + ('ꚅ', 'ꚅ'), + ('ꚇ', 'ꚇ'), + ('ꚉ', 'ꚉ'), + ('ꚋ', 'ꚋ'), + ('ꚍ', 'ꚍ'), + ('ꚏ', 'ꚏ'), + ('ꚑ', 'ꚑ'), + ('ꚓ', 'ꚓ'), + ('ꚕ', 'ꚕ'), + ('ꚗ', 'ꚗ'), + ('ꚙ', 'ꚙ'), + ('ꚛ', 'ꚝ'), + ('ꜣ', 'ꜣ'), + ('ꜥ', 'ꜥ'), + ('ꜧ', 'ꜧ'), + ('ꜩ', 'ꜩ'), + ('ꜫ', 'ꜫ'), + ('ꜭ', 'ꜭ'), + ('ꜯ', 'ꜱ'), + ('ꜳ', 'ꜳ'), + ('ꜵ', 'ꜵ'), + ('ꜷ', 'ꜷ'), + ('ꜹ', 'ꜹ'), + ('ꜻ', 'ꜻ'), + ('ꜽ', 'ꜽ'), + ('ꜿ', 'ꜿ'), + ('ꝁ', 'ꝁ'), + ('ꝃ', 'ꝃ'), + ('ꝅ', 'ꝅ'), + ('ꝇ', 'ꝇ'), + ('ꝉ', 'ꝉ'), + ('ꝋ', 'ꝋ'), + ('ꝍ', 'ꝍ'), + ('ꝏ', 'ꝏ'), + ('ꝑ', 'ꝑ'), + ('ꝓ', 'ꝓ'), + ('ꝕ', 'ꝕ'), + ('ꝗ', 'ꝗ'), + ('ꝙ', 'ꝙ'), + ('ꝛ', 'ꝛ'), + ('ꝝ', 'ꝝ'), + ('ꝟ', 'ꝟ'), + ('ꝡ', 'ꝡ'), + ('ꝣ', 'ꝣ'), + ('ꝥ', 'ꝥ'), + ('ꝧ', 'ꝧ'), + ('ꝩ', 'ꝩ'), + ('ꝫ', 'ꝫ'), + ('ꝭ', 'ꝭ'), + ('ꝯ', 'ꝸ'), + ('ꝺ', 'ꝺ'), + ('ꝼ', 'ꝼ'), + ('ꝿ', 'ꝿ'), + ('ꞁ', 'ꞁ'), + ('ꞃ', 'ꞃ'), + ('ꞅ', 'ꞅ'), + ('ꞇ', 'ꞇ'), + ('ꞌ', 'ꞌ'), + ('ꞎ', 'ꞎ'), + ('ꞑ', 'ꞑ'), + ('ꞓ', 'ꞕ'), + ('ꞗ', 'ꞗ'), + ('ꞙ', 'ꞙ'), + ('ꞛ', 'ꞛ'), + ('ꞝ', 'ꞝ'), + ('ꞟ', 'ꞟ'), + ('ꞡ', 'ꞡ'), + ('ꞣ', 'ꞣ'), + ('ꞥ', 'ꞥ'), + ('ꞧ', 'ꞧ'), + ('ꞩ', 'ꞩ'), + ('ꞯ', 'ꞯ'), + ('ꞵ', 'ꞵ'), + ('ꞷ', 'ꞷ'), + ('ꞹ', 'ꞹ'), + ('ꞻ', 'ꞻ'), + ('ꞽ', 'ꞽ'), + ('ꞿ', 'ꞿ'), + ('ꟁ', 'ꟁ'), + ('ꟃ', 'ꟃ'), + ('ꟈ', 'ꟈ'), + ('ꟊ', 'ꟊ'), + ('ꟍ', 'ꟍ'), + ('ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'ꟕ'), + ('ꟗ', 'ꟗ'), + ('ꟙ', 'ꟙ'), + ('ꟛ', 'ꟛ'), + ('ꟲ', 'ꟴ'), + ('ꟶ', 'ꟶ'), + ('ꟸ', 'ꟺ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('a', 'z'), + ('𐐨', '𐑏'), + ('𐓘', '𐓻'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐞀', '𐞀'), + ('𐞃', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐳀', '𐳲'), + ('𐵰', '𐶅'), + ('𑣀', '𑣟'), + ('𖹠', '𖹿'), + ('𝐚', '𝐳'), + ('𝑎', '𝑔'), + ('𝑖', '𝑧'), + ('𝒂', '𝒛'), + ('𝒶', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝓏'), + ('𝓪', '𝔃'), + ('𝔞', '𝔷'), + ('𝕒', '𝕫'), + ('𝖆', '𝖟'), + ('𝖺', '𝗓'), + ('𝗮', '𝘇'), + ('𝘢', '𝘻'), + ('𝙖', '𝙯'), + ('𝚊', '𝚥'), + ('𝛂', '𝛚'), + ('𝛜', '𝛡'), + ('𝛼', '𝜔'), + ('𝜖', '𝜛'), + ('𝜶', '𝝎'), + ('𝝐', '𝝕'), + ('𝝰', '𝞈'), + ('𝞊', '𝞏'), + ('𝞪', '𝟂'), + ('𝟄', '𝟉'), + ('𝟋', '𝟋'), + ('𝼀', '𝼉'), + ('𝼋', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞤢', '𞥃'), +]; + +pub const MATH: &'static [(char, char)] = &[ + ('+', '+'), + ('<', '>'), + ('^', '^'), + ('|', '|'), + ('~', '~'), + ('¬', '¬'), + ('±', '±'), + ('×', '×'), + ('÷', '÷'), + ('ϐ', 'ϒ'), + ('ϕ', 'ϕ'), + ('ϰ', 'ϱ'), + ('ϴ', '϶'), + ('؆', '؈'), + ('‖', '‖'), + ('′', '‴'), + ('⁀', '⁀'), + ('⁄', '⁄'), + ('⁒', '⁒'), + ('\u{2061}', '\u{2064}'), + ('⁺', '⁾'), + ('₊', '₎'), + ('\u{20d0}', '\u{20dc}'), + ('\u{20e1}', '\u{20e1}'), + ('\u{20e5}', '\u{20e6}'), + ('\u{20eb}', '\u{20ef}'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('℘', 'ℝ'), + ('ℤ', 'ℤ'), + ('ℨ', '℩'), + ('ℬ', 'ℭ'), + ('ℯ', 'ℱ'), + ('ℳ', 'ℸ'), + ('ℼ', 'ⅉ'), + ('⅋', '⅋'), + ('←', '↧'), + ('↩', '↮'), + ('↰', '↱'), + ('↶', '↷'), + ('↼', '⇛'), + ('⇝', '⇝'), + ('⇤', '⇥'), + ('⇴', '⋿'), + ('⌈', '⌋'), + ('⌠', '⌡'), + ('⍼', '⍼'), + ('⎛', '⎵'), + ('⎷', '⎷'), + ('⏐', '⏐'), + ('⏜', '⏢'), + ('■', '□'), + ('▮', '▷'), + ('▼', '◁'), + ('◆', '◇'), + ('◊', '○'), + ('●', '◓'), + ('◢', '◢'), + ('◤', '◤'), + ('◧', '◬'), + ('◸', '◿'), + ('★', '☆'), + ('♀', '♀'), + ('♂', '♂'), + ('♠', '♣'), + ('♭', '♯'), + ('⟀', '⟿'), + ('⤀', '⫿'), + ('⬰', '⭄'), + ('⭇', '⭌'), + ('﬩', '﬩'), + ('﹡', '﹦'), + ('﹨', '﹨'), + ('+', '+'), + ('<', '>'), + ('\', '\'), + ('^', '^'), + ('|', '|'), + ('~', '~'), + ('¬', '¬'), + ('←', '↓'), + ('𐶎', '𐶏'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝟋'), + ('𝟎', '𝟿'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𞻰', '𞻱'), +]; + +pub const MODIFIER_COMBINING_MARK: &'static [(char, char)] = &[ + ('\u{654}', '\u{655}'), + ('\u{658}', '\u{658}'), + ('\u{6dc}', '\u{6dc}'), + ('\u{6e3}', '\u{6e3}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{8ca}', '\u{8cb}'), + ('\u{8cd}', '\u{8cf}'), + ('\u{8d3}', '\u{8d3}'), + ('\u{8f3}', '\u{8f3}'), +]; + +pub const NONCHARACTER_CODE_POINT: &'static [(char, char)] = &[ + ('\u{fdd0}', '\u{fdef}'), + ('\u{fffe}', '\u{ffff}'), + ('\u{1fffe}', '\u{1ffff}'), + ('\u{2fffe}', '\u{2ffff}'), + ('\u{3fffe}', '\u{3ffff}'), + ('\u{4fffe}', '\u{4ffff}'), + ('\u{5fffe}', '\u{5ffff}'), + ('\u{6fffe}', '\u{6ffff}'), + ('\u{7fffe}', '\u{7ffff}'), + ('\u{8fffe}', '\u{8ffff}'), + ('\u{9fffe}', '\u{9ffff}'), + ('\u{afffe}', '\u{affff}'), + ('\u{bfffe}', '\u{bffff}'), + ('\u{cfffe}', '\u{cffff}'), + ('\u{dfffe}', '\u{dffff}'), + ('\u{efffe}', '\u{effff}'), + ('\u{ffffe}', '\u{fffff}'), + ('\u{10fffe}', '\u{10ffff}'), +]; + +pub const OTHER_ALPHABETIC: &'static [(char, char)] = &[ + ('\u{345}', '\u{345}'), + ('\u{363}', '\u{36f}'), + ('\u{5b0}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{657}'), + ('\u{659}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6e1}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ed}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{73f}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{816}', '\u{817}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82c}'), + ('\u{897}', '\u{897}'), + ('\u{8d4}', '\u{8df}'), + ('\u{8e3}', '\u{8e9}'), + ('\u{8f0}', 'ः'), + ('\u{93a}', 'ऻ'), + ('ा', 'ौ'), + ('ॎ', 'ॏ'), + ('\u{955}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('\u{981}', 'ঃ'), + ('\u{9be}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', 'ৌ'), + ('\u{9d7}', '\u{9d7}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{a01}', 'ਃ'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4c}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('ા', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', 'ૌ'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{afc}'), + ('\u{b01}', 'ଃ'), + ('\u{b3e}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', 'ୌ'), + ('\u{b56}', '\u{b57}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', 'ௌ'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', '\u{c04}'), + ('\u{c3e}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4c}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', 'ಃ'), + ('ಾ', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccc}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{ce2}', '\u{ce3}'), + ('ೳ', 'ೳ'), + ('\u{d00}', 'ഃ'), + ('\u{d3e}', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', 'ൌ'), + ('\u{d57}', '\u{d57}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', 'ඃ'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('ෲ', 'ෳ'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e4d}', '\u{e4d}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{eb9}'), + ('\u{ebb}', '\u{ebc}'), + ('\u{ecd}', '\u{ecd}'), + ('\u{f71}', '\u{f83}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('ါ', '\u{1036}'), + ('း', 'း'), + ('ျ', '\u{103e}'), + ('ၖ', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('ၢ', 'ၤ'), + ('ၧ', 'ၭ'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{108d}'), + ('ႏ', 'ႏ'), + ('ႚ', '\u{109d}'), + ('\u{1712}', '\u{1713}'), + ('\u{1732}', '\u{1733}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('ា', 'ៈ'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', 'ᤸ'), + ('\u{1a17}', '\u{1a1b}'), + ('ᩕ', '\u{1a5e}'), + ('ᩡ', '\u{1a74}'), + ('\u{1abf}', '\u{1ac0}'), + ('\u{1acc}', '\u{1ace}'), + ('\u{1b00}', 'ᬄ'), + ('\u{1b35}', '\u{1b43}'), + ('\u{1b80}', 'ᮂ'), + ('ᮡ', '\u{1ba9}'), + ('\u{1bac}', '\u{1bad}'), + ('ᯧ', '\u{1bf1}'), + ('ᰤ', '\u{1c36}'), + ('\u{1dd3}', '\u{1df4}'), + ('Ⓐ', 'ⓩ'), + ('\u{2de0}', '\u{2dff}'), + ('\u{a674}', '\u{a67b}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a802}', '\u{a802}'), + ('\u{a80b}', '\u{a80b}'), + ('ꠣ', 'ꠧ'), + ('ꢀ', 'ꢁ'), + ('ꢴ', 'ꣃ'), + ('\u{a8c5}', '\u{a8c5}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92a}'), + ('\u{a947}', 'ꥒ'), + ('\u{a980}', 'ꦃ'), + ('ꦴ', 'ꦿ'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', 'ꩍ'), + ('ꩻ', 'ꩽ'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabe}'), + ('ꫫ', 'ꫯ'), + ('ꫵ', 'ꫵ'), + ('ꯣ', 'ꯪ'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d69}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10efc}'), + ('𑀀', '𑀂'), + ('\u{11038}', '\u{11045}'), + ('\u{11073}', '\u{11074}'), + ('\u{11080}', '𑂂'), + ('𑂰', '𑂸'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{11132}'), + ('𑅅', '𑅆'), + ('\u{11180}', '𑆂'), + ('𑆳', '𑆿'), + ('𑇎', '\u{111cf}'), + ('𑈬', '\u{11234}'), + ('\u{11237}', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112e8}'), + ('\u{11300}', '𑌃'), + ('\u{1133e}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '𑍌'), + ('\u{11357}', '\u{11357}'), + ('𑍢', '𑍣'), + ('\u{113b8}', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏍'), + ('𑐵', '𑑁'), + ('\u{11443}', '𑑅'), + ('\u{114b0}', '𑓁'), + ('\u{115af}', '\u{115b5}'), + ('𑖸', '𑖾'), + ('\u{115dc}', '\u{115dd}'), + ('𑘰', '𑘾'), + ('\u{11640}', '\u{11640}'), + ('\u{116ab}', '\u{116b5}'), + ('\u{1171d}', '\u{1172a}'), + ('𑠬', '𑠸'), + ('\u{11930}', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{1193c}'), + ('𑥀', '𑥀'), + ('𑥂', '𑥂'), + ('𑧑', '\u{119d7}'), + ('\u{119da}', '𑧟'), + ('𑧤', '𑧤'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a35}', '𑨹'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a51}', '\u{11a5b}'), + ('\u{11a8a}', '𑪗'), + ('𑰯', '\u{11c36}'), + ('\u{11c38}', '𑰾'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d41}'), + ('\u{11d43}', '\u{11d43}'), + ('\u{11d47}', '\u{11d47}'), + ('𑶊', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶖'), + ('\u{11ef3}', '𑻶'), + ('\u{11f00}', '\u{11f01}'), + ('𑼃', '𑼃'), + ('𑼴', '\u{11f3a}'), + ('𑼾', '\u{11f40}'), + ('\u{1611e}', '\u{1612e}'), + ('\u{16f4f}', '\u{16f4f}'), + ('𖽑', '𖾇'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1bc9e}', '\u{1bc9e}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e947}', '\u{1e947}'), + ('🄰', '🅉'), + ('🅐', '🅩'), + ('🅰', '🆉'), +]; + +pub const OTHER_DEFAULT_IGNORABLE_CODE_POINT: &'static [(char, char)] = &[ + ('\u{34f}', '\u{34f}'), + ('ᅟ', 'ᅠ'), + ('\u{17b4}', '\u{17b5}'), + ('\u{2065}', '\u{2065}'), + ('ㅤ', 'ㅤ'), + ('ᅠ', 'ᅠ'), + ('\u{fff0}', '\u{fff8}'), + ('\u{e0000}', '\u{e0000}'), + ('\u{e0002}', '\u{e001f}'), + ('\u{e0080}', '\u{e00ff}'), + ('\u{e01f0}', '\u{e0fff}'), +]; + +pub const OTHER_GRAPHEME_EXTEND: &'static [(char, char)] = &[ + ('\u{9be}', '\u{9be}'), + ('\u{9d7}', '\u{9d7}'), + ('\u{b3e}', '\u{b3e}'), + ('\u{b57}', '\u{b57}'), + ('\u{bbe}', '\u{bbe}'), + ('\u{bd7}', '\u{bd7}'), + ('\u{cc0}', '\u{cc0}'), + ('\u{cc2}', '\u{cc2}'), + ('\u{cc7}', '\u{cc8}'), + ('\u{cca}', '\u{ccb}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{d3e}', '\u{d3e}'), + ('\u{d57}', '\u{d57}'), + ('\u{dcf}', '\u{dcf}'), + ('\u{ddf}', '\u{ddf}'), + ('\u{1715}', '\u{1715}'), + ('\u{1734}', '\u{1734}'), + ('\u{1b35}', '\u{1b35}'), + ('\u{1b3b}', '\u{1b3b}'), + ('\u{1b3d}', '\u{1b3d}'), + ('\u{1b43}', '\u{1b44}'), + ('\u{1baa}', '\u{1baa}'), + ('\u{1bf2}', '\u{1bf3}'), + ('\u{200c}', '\u{200c}'), + ('\u{302e}', '\u{302f}'), + ('\u{a953}', '\u{a953}'), + ('\u{a9c0}', '\u{a9c0}'), + ('\u{ff9e}', '\u{ff9f}'), + ('\u{111c0}', '\u{111c0}'), + ('\u{11235}', '\u{11235}'), + ('\u{1133e}', '\u{1133e}'), + ('\u{1134d}', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('\u{113b8}', '\u{113b8}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '\u{113c9}'), + ('\u{113cf}', '\u{113cf}'), + ('\u{114b0}', '\u{114b0}'), + ('\u{114bd}', '\u{114bd}'), + ('\u{115af}', '\u{115af}'), + ('\u{116b6}', '\u{116b6}'), + ('\u{11930}', '\u{11930}'), + ('\u{1193d}', '\u{1193d}'), + ('\u{11f41}', '\u{11f41}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1d165}', '\u{1d166}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{e0020}', '\u{e007f}'), +]; + +pub const OTHER_ID_CONTINUE: &'static [(char, char)] = &[ + ('·', '·'), + ('·', '·'), + ('፩', '፱'), + ('᧚', '᧚'), + ('\u{200c}', '\u{200d}'), + ('・', '・'), + ('・', '・'), +]; + +pub const OTHER_ID_START: &'static [(char, char)] = + &[('\u{1885}', '\u{1886}'), ('℘', '℘'), ('℮', '℮'), ('゛', '゜')]; + +pub const OTHER_LOWERCASE: &'static [(char, char)] = &[ + ('ª', 'ª'), + ('º', 'º'), + ('ʰ', 'ʸ'), + ('ˀ', 'ˁ'), + ('ˠ', 'ˤ'), + ('\u{345}', '\u{345}'), + ('ͺ', 'ͺ'), + ('ჼ', 'ჼ'), + ('ᴬ', 'ᵪ'), + ('ᵸ', 'ᵸ'), + ('ᶛ', 'ᶿ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ⅰ', 'ⅿ'), + ('ⓐ', 'ⓩ'), + ('ⱼ', 'ⱽ'), + ('ꚜ', 'ꚝ'), + ('ꝰ', 'ꝰ'), + ('ꟲ', 'ꟴ'), + ('ꟸ', 'ꟹ'), + ('ꭜ', 'ꭟ'), + ('ꭩ', 'ꭩ'), + ('𐞀', '𐞀'), + ('𐞃', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𞀰', '𞁭'), +]; + +pub const OTHER_MATH: &'static [(char, char)] = &[ + ('^', '^'), + ('ϐ', 'ϒ'), + ('ϕ', 'ϕ'), + ('ϰ', 'ϱ'), + ('ϴ', 'ϵ'), + ('‖', '‖'), + ('′', '‴'), + ('⁀', '⁀'), + ('\u{2061}', '\u{2064}'), + ('⁽', '⁾'), + ('₍', '₎'), + ('\u{20d0}', '\u{20dc}'), + ('\u{20e1}', '\u{20e1}'), + ('\u{20e5}', '\u{20e6}'), + ('\u{20eb}', '\u{20ef}'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('ℨ', '℩'), + ('ℬ', 'ℭ'), + ('ℯ', 'ℱ'), + ('ℳ', 'ℸ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('↕', '↙'), + ('↜', '↟'), + ('↡', '↢'), + ('↤', '↥'), + ('↧', '↧'), + ('↩', '↭'), + ('↰', '↱'), + ('↶', '↷'), + ('↼', '⇍'), + ('⇐', '⇑'), + ('⇓', '⇓'), + ('⇕', '⇛'), + ('⇝', '⇝'), + ('⇤', '⇥'), + ('⌈', '⌋'), + ('⎴', '⎵'), + ('⎷', '⎷'), + ('⏐', '⏐'), + ('⏢', '⏢'), + ('■', '□'), + ('▮', '▶'), + ('▼', '◀'), + ('◆', '◇'), + ('◊', '○'), + ('●', '◓'), + ('◢', '◢'), + ('◤', '◤'), + ('◧', '◬'), + ('★', '☆'), + ('♀', '♀'), + ('♂', '♂'), + ('♠', '♣'), + ('♭', '♮'), + ('⟅', '⟆'), + ('⟦', '⟯'), + ('⦃', '⦘'), + ('⧘', '⧛'), + ('⧼', '⧽'), + ('﹡', '﹡'), + ('﹣', '﹣'), + ('﹨', '﹨'), + ('\', '\'), + ('^', '^'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝟎', '𝟿'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), +]; + +pub const OTHER_UPPERCASE: &'static [(char, char)] = + &[('Ⅰ', 'Ⅿ'), ('Ⓐ', 'Ⓩ'), ('🄰', '🅉'), ('🅐', '🅩'), ('🅰', '🆉')]; + +pub const PATTERN_SYNTAX: &'static [(char, char)] = &[ + ('!', '/'), + (':', '@'), + ('[', '^'), + ('`', '`'), + ('{', '~'), + ('¡', '§'), + ('©', '©'), + ('«', '¬'), + ('®', '®'), + ('°', '±'), + ('¶', '¶'), + ('»', '»'), + ('¿', '¿'), + ('×', '×'), + ('÷', '÷'), + ('‐', '‧'), + ('‰', '‾'), + ('⁁', '⁓'), + ('⁕', '⁞'), + ('←', '\u{245f}'), + ('─', '❵'), + ('➔', '⯿'), + ('⸀', '\u{2e7f}'), + ('、', '〃'), + ('〈', '〠'), + ('〰', '〰'), + ('﴾', '﴿'), + ('﹅', '﹆'), +]; + +pub const PATTERN_WHITE_SPACE: &'static [(char, char)] = &[ + ('\t', '\r'), + (' ', ' '), + ('\u{85}', '\u{85}'), + ('\u{200e}', '\u{200f}'), + ('\u{2028}', '\u{2029}'), +]; + +pub const PREPENDED_CONCATENATION_MARK: &'static [(char, char)] = &[ + ('\u{600}', '\u{605}'), + ('\u{6dd}', '\u{6dd}'), + ('\u{70f}', '\u{70f}'), + ('\u{890}', '\u{891}'), + ('\u{8e2}', '\u{8e2}'), + ('\u{110bd}', '\u{110bd}'), + ('\u{110cd}', '\u{110cd}'), +]; + +pub const QUOTATION_MARK: &'static [(char, char)] = &[ + ('"', '"'), + ('\'', '\''), + ('«', '«'), + ('»', '»'), + ('‘', '‟'), + ('‹', '›'), + ('⹂', '⹂'), + ('「', '』'), + ('〝', '〟'), + ('﹁', '﹄'), + ('"', '"'), + (''', '''), + ('「', '」'), +]; + +pub const RADICAL: &'static [(char, char)] = + &[('⺀', '⺙'), ('⺛', '⻳'), ('⼀', '⿕')]; + +pub const REGIONAL_INDICATOR: &'static [(char, char)] = &[('🇦', '🇿')]; + +pub const SENTENCE_TERMINAL: &'static [(char, char)] = &[ + ('!', '!'), + ('.', '.'), + ('?', '?'), + ('։', '։'), + ('؝', '؟'), + ('۔', '۔'), + ('܀', '܂'), + ('߹', '߹'), + ('࠷', '࠷'), + ('࠹', '࠹'), + ('࠽', '࠾'), + ('।', '॥'), + ('၊', '။'), + ('።', '።'), + ('፧', '፨'), + ('᙮', '᙮'), + ('᜵', '᜶'), + ('។', '៕'), + ('᠃', '᠃'), + ('᠉', '᠉'), + ('᥄', '᥅'), + ('᪨', '᪫'), + ('᭎', '᭏'), + ('᭚', '᭛'), + ('᭞', '᭟'), + ('᭽', '᭿'), + ('᰻', '᰼'), + ('᱾', '᱿'), + ('․', '․'), + ('‼', '‽'), + ('⁇', '⁉'), + ('⳹', '⳻'), + ('⸮', '⸮'), + ('⸼', '⸼'), + ('⹓', '⹔'), + ('。', '。'), + ('꓿', '꓿'), + ('꘎', '꘏'), + ('꛳', '꛳'), + ('꛷', '꛷'), + ('꡶', '꡷'), + ('꣎', '꣏'), + ('꤯', '꤯'), + ('꧈', '꧉'), + ('꩝', '꩟'), + ('꫰', '꫱'), + ('꯫', '꯫'), + ('︒', '︒'), + ('︕', '︖'), + ('﹒', '﹒'), + ('﹖', '﹗'), + ('!', '!'), + ('.', '.'), + ('?', '?'), + ('。', '。'), + ('𐩖', '𐩗'), + ('𐽕', '𐽙'), + ('𐾆', '𐾉'), + ('𑁇', '𑁈'), + ('𑂾', '𑃁'), + ('𑅁', '𑅃'), + ('𑇅', '𑇆'), + ('𑇍', '𑇍'), + ('𑇞', '𑇟'), + ('𑈸', '𑈹'), + ('𑈻', '𑈼'), + ('𑊩', '𑊩'), + ('𑏔', '𑏕'), + ('𑑋', '𑑌'), + ('𑗂', '𑗃'), + ('𑗉', '𑗗'), + ('𑙁', '𑙂'), + ('𑜼', '𑜾'), + ('𑥄', '𑥄'), + ('𑥆', '𑥆'), + ('𑩂', '𑩃'), + ('𑪛', '𑪜'), + ('𑱁', '𑱂'), + ('𑻷', '𑻸'), + ('𑽃', '𑽄'), + ('𖩮', '𖩯'), + ('𖫵', '𖫵'), + ('𖬷', '𖬸'), + ('𖭄', '𖭄'), + ('𖵮', '𖵯'), + ('𖺘', '𖺘'), + ('𛲟', '𛲟'), + ('𝪈', '𝪈'), +]; + +pub const SOFT_DOTTED: &'static [(char, char)] = &[ + ('i', 'j'), + ('į', 'į'), + ('ɉ', 'ɉ'), + ('ɨ', 'ɨ'), + ('ʝ', 'ʝ'), + ('ʲ', 'ʲ'), + ('ϳ', 'ϳ'), + ('і', 'і'), + ('ј', 'ј'), + ('ᵢ', 'ᵢ'), + ('ᶖ', 'ᶖ'), + ('ᶤ', 'ᶤ'), + ('ᶨ', 'ᶨ'), + ('ḭ', 'ḭ'), + ('ị', 'ị'), + ('ⁱ', 'ⁱ'), + ('ⅈ', 'ⅉ'), + ('ⱼ', 'ⱼ'), + ('𝐢', '𝐣'), + ('𝑖', '𝑗'), + ('𝒊', '𝒋'), + ('𝒾', '𝒿'), + ('𝓲', '𝓳'), + ('𝔦', '𝔧'), + ('𝕚', '𝕛'), + ('𝖎', '𝖏'), + ('𝗂', '𝗃'), + ('𝗶', '𝗷'), + ('𝘪', '𝘫'), + ('𝙞', '𝙟'), + ('𝚒', '𝚓'), + ('𝼚', '𝼚'), + ('𞁌', '𞁍'), + ('𞁨', '𞁨'), +]; + +pub const TERMINAL_PUNCTUATION: &'static [(char, char)] = &[ + ('!', '!'), + (',', ','), + ('.', '.'), + (':', ';'), + ('?', '?'), + (';', ';'), + ('·', '·'), + ('։', '։'), + ('׃', '׃'), + ('،', '،'), + ('؛', '؛'), + ('؝', '؟'), + ('۔', '۔'), + ('܀', '܊'), + ('܌', '܌'), + ('߸', '߹'), + ('࠰', '࠵'), + ('࠷', '࠾'), + ('࡞', '࡞'), + ('।', '॥'), + ('๚', '๛'), + ('༈', '༈'), + ('།', '༒'), + ('၊', '။'), + ('፡', '፨'), + ('᙮', '᙮'), + ('᛫', '᛭'), + ('᜵', '᜶'), + ('។', '៖'), + ('៚', '៚'), + ('᠂', '᠅'), + ('᠈', '᠉'), + ('᥄', '᥅'), + ('᪨', '᪫'), + ('᭎', '᭏'), + ('᭚', '᭛'), + ('᭝', '᭟'), + ('᭽', '᭿'), + ('᰻', '᰿'), + ('᱾', '᱿'), + ('․', '․'), + ('‼', '‽'), + ('⁇', '⁉'), + ('⳹', '⳻'), + ('⸮', '⸮'), + ('⸼', '⸼'), + ('⹁', '⹁'), + ('⹌', '⹌'), + ('⹎', '⹏'), + ('⹓', '⹔'), + ('、', '。'), + ('꓾', '꓿'), + ('꘍', '꘏'), + ('꛳', '꛷'), + ('꡶', '꡷'), + ('꣎', '꣏'), + ('꤯', '꤯'), + ('꧇', '꧉'), + ('꩝', '꩟'), + ('꫟', '꫟'), + ('꫰', '꫱'), + ('꯫', '꯫'), + ('︒', '︒'), + ('︕', '︖'), + ('﹐', '﹒'), + ('﹔', '﹗'), + ('!', '!'), + (',', ','), + ('.', '.'), + (':', ';'), + ('?', '?'), + ('。', '。'), + ('、', '、'), + ('𐎟', '𐎟'), + ('𐏐', '𐏐'), + ('𐡗', '𐡗'), + ('𐤟', '𐤟'), + ('𐩖', '𐩗'), + ('𐫰', '𐫵'), + ('𐬺', '𐬿'), + ('𐮙', '𐮜'), + ('𐽕', '𐽙'), + ('𐾆', '𐾉'), + ('𑁇', '𑁍'), + ('𑂾', '𑃁'), + ('𑅁', '𑅃'), + ('𑇅', '𑇆'), + ('𑇍', '𑇍'), + ('𑇞', '𑇟'), + ('𑈸', '𑈼'), + ('𑊩', '𑊩'), + ('𑏔', '𑏕'), + ('𑑋', '𑑍'), + ('𑑚', '𑑛'), + ('𑗂', '𑗅'), + ('𑗉', '𑗗'), + ('𑙁', '𑙂'), + ('𑜼', '𑜾'), + ('𑥄', '𑥄'), + ('𑥆', '𑥆'), + ('𑩂', '𑩃'), + ('𑪛', '𑪜'), + ('𑪡', '𑪢'), + ('𑱁', '𑱃'), + ('𑱱', '𑱱'), + ('𑻷', '𑻸'), + ('𑽃', '𑽄'), + ('𒑰', '𒑴'), + ('𖩮', '𖩯'), + ('𖫵', '𖫵'), + ('𖬷', '𖬹'), + ('𖭄', '𖭄'), + ('𖵮', '𖵯'), + ('𖺗', '𖺘'), + ('𛲟', '𛲟'), + ('𝪇', '𝪊'), +]; + +pub const UNIFIED_IDEOGRAPH: &'static [(char, char)] = &[ + ('㐀', '䶿'), + ('一', '鿿'), + ('﨎', '﨏'), + ('﨑', '﨑'), + ('﨓', '﨔'), + ('﨟', '﨟'), + ('﨡', '﨡'), + ('﨣', '﨤'), + ('﨧', '﨩'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const UPPERCASE: &'static [(char, char)] = &[ + ('A', 'Z'), + ('À', 'Ö'), + ('Ø', 'Þ'), + ('Ā', 'Ā'), + ('Ă', 'Ă'), + ('Ą', 'Ą'), + ('Ć', 'Ć'), + ('Ĉ', 'Ĉ'), + ('Ċ', 'Ċ'), + ('Č', 'Č'), + ('Ď', 'Ď'), + ('Đ', 'Đ'), + ('Ē', 'Ē'), + ('Ĕ', 'Ĕ'), + ('Ė', 'Ė'), + ('Ę', 'Ę'), + ('Ě', 'Ě'), + ('Ĝ', 'Ĝ'), + ('Ğ', 'Ğ'), + ('Ġ', 'Ġ'), + ('Ģ', 'Ģ'), + ('Ĥ', 'Ĥ'), + ('Ħ', 'Ħ'), + ('Ĩ', 'Ĩ'), + ('Ī', 'Ī'), + ('Ĭ', 'Ĭ'), + ('Į', 'Į'), + ('İ', 'İ'), + ('IJ', 'IJ'), + ('Ĵ', 'Ĵ'), + ('Ķ', 'Ķ'), + ('Ĺ', 'Ĺ'), + ('Ļ', 'Ļ'), + ('Ľ', 'Ľ'), + ('Ŀ', 'Ŀ'), + ('Ł', 'Ł'), + ('Ń', 'Ń'), + ('Ņ', 'Ņ'), + ('Ň', 'Ň'), + ('Ŋ', 'Ŋ'), + ('Ō', 'Ō'), + ('Ŏ', 'Ŏ'), + ('Ő', 'Ő'), + ('Œ', 'Œ'), + ('Ŕ', 'Ŕ'), + ('Ŗ', 'Ŗ'), + ('Ř', 'Ř'), + ('Ś', 'Ś'), + ('Ŝ', 'Ŝ'), + ('Ş', 'Ş'), + ('Š', 'Š'), + ('Ţ', 'Ţ'), + ('Ť', 'Ť'), + ('Ŧ', 'Ŧ'), + ('Ũ', 'Ũ'), + ('Ū', 'Ū'), + ('Ŭ', 'Ŭ'), + ('Ů', 'Ů'), + ('Ű', 'Ű'), + ('Ų', 'Ų'), + ('Ŵ', 'Ŵ'), + ('Ŷ', 'Ŷ'), + ('Ÿ', 'Ź'), + ('Ż', 'Ż'), + ('Ž', 'Ž'), + ('Ɓ', 'Ƃ'), + ('Ƅ', 'Ƅ'), + ('Ɔ', 'Ƈ'), + ('Ɖ', 'Ƌ'), + ('Ǝ', 'Ƒ'), + ('Ɠ', 'Ɣ'), + ('Ɩ', 'Ƙ'), + ('Ɯ', 'Ɲ'), + ('Ɵ', 'Ơ'), + ('Ƣ', 'Ƣ'), + ('Ƥ', 'Ƥ'), + ('Ʀ', 'Ƨ'), + ('Ʃ', 'Ʃ'), + ('Ƭ', 'Ƭ'), + ('Ʈ', 'Ư'), + ('Ʊ', 'Ƴ'), + ('Ƶ', 'Ƶ'), + ('Ʒ', 'Ƹ'), + ('Ƽ', 'Ƽ'), + ('DŽ', 'DŽ'), + ('LJ', 'LJ'), + ('NJ', 'NJ'), + ('Ǎ', 'Ǎ'), + ('Ǐ', 'Ǐ'), + ('Ǒ', 'Ǒ'), + ('Ǔ', 'Ǔ'), + ('Ǖ', 'Ǖ'), + ('Ǘ', 'Ǘ'), + ('Ǚ', 'Ǚ'), + ('Ǜ', 'Ǜ'), + ('Ǟ', 'Ǟ'), + ('Ǡ', 'Ǡ'), + ('Ǣ', 'Ǣ'), + ('Ǥ', 'Ǥ'), + ('Ǧ', 'Ǧ'), + ('Ǩ', 'Ǩ'), + ('Ǫ', 'Ǫ'), + ('Ǭ', 'Ǭ'), + ('Ǯ', 'Ǯ'), + ('DZ', 'DZ'), + ('Ǵ', 'Ǵ'), + ('Ƕ', 'Ǹ'), + ('Ǻ', 'Ǻ'), + ('Ǽ', 'Ǽ'), + ('Ǿ', 'Ǿ'), + ('Ȁ', 'Ȁ'), + ('Ȃ', 'Ȃ'), + ('Ȅ', 'Ȅ'), + ('Ȇ', 'Ȇ'), + ('Ȉ', 'Ȉ'), + ('Ȋ', 'Ȋ'), + ('Ȍ', 'Ȍ'), + ('Ȏ', 'Ȏ'), + ('Ȑ', 'Ȑ'), + ('Ȓ', 'Ȓ'), + ('Ȕ', 'Ȕ'), + ('Ȗ', 'Ȗ'), + ('Ș', 'Ș'), + ('Ț', 'Ț'), + ('Ȝ', 'Ȝ'), + ('Ȟ', 'Ȟ'), + ('Ƞ', 'Ƞ'), + ('Ȣ', 'Ȣ'), + ('Ȥ', 'Ȥ'), + ('Ȧ', 'Ȧ'), + ('Ȩ', 'Ȩ'), + ('Ȫ', 'Ȫ'), + ('Ȭ', 'Ȭ'), + ('Ȯ', 'Ȯ'), + ('Ȱ', 'Ȱ'), + ('Ȳ', 'Ȳ'), + ('Ⱥ', 'Ȼ'), + ('Ƚ', 'Ⱦ'), + ('Ɂ', 'Ɂ'), + ('Ƀ', 'Ɇ'), + ('Ɉ', 'Ɉ'), + ('Ɋ', 'Ɋ'), + ('Ɍ', 'Ɍ'), + ('Ɏ', 'Ɏ'), + ('Ͱ', 'Ͱ'), + ('Ͳ', 'Ͳ'), + ('Ͷ', 'Ͷ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ώ'), + ('Α', 'Ρ'), + ('Σ', 'Ϋ'), + ('Ϗ', 'Ϗ'), + ('ϒ', 'ϔ'), + ('Ϙ', 'Ϙ'), + ('Ϛ', 'Ϛ'), + ('Ϝ', 'Ϝ'), + ('Ϟ', 'Ϟ'), + ('Ϡ', 'Ϡ'), + ('Ϣ', 'Ϣ'), + ('Ϥ', 'Ϥ'), + ('Ϧ', 'Ϧ'), + ('Ϩ', 'Ϩ'), + ('Ϫ', 'Ϫ'), + ('Ϭ', 'Ϭ'), + ('Ϯ', 'Ϯ'), + ('ϴ', 'ϴ'), + ('Ϸ', 'Ϸ'), + ('Ϲ', 'Ϻ'), + ('Ͻ', 'Я'), + ('Ѡ', 'Ѡ'), + ('Ѣ', 'Ѣ'), + ('Ѥ', 'Ѥ'), + ('Ѧ', 'Ѧ'), + ('Ѩ', 'Ѩ'), + ('Ѫ', 'Ѫ'), + ('Ѭ', 'Ѭ'), + ('Ѯ', 'Ѯ'), + ('Ѱ', 'Ѱ'), + ('Ѳ', 'Ѳ'), + ('Ѵ', 'Ѵ'), + ('Ѷ', 'Ѷ'), + ('Ѹ', 'Ѹ'), + ('Ѻ', 'Ѻ'), + ('Ѽ', 'Ѽ'), + ('Ѿ', 'Ѿ'), + ('Ҁ', 'Ҁ'), + ('Ҋ', 'Ҋ'), + ('Ҍ', 'Ҍ'), + ('Ҏ', 'Ҏ'), + ('Ґ', 'Ґ'), + ('Ғ', 'Ғ'), + ('Ҕ', 'Ҕ'), + ('Җ', 'Җ'), + ('Ҙ', 'Ҙ'), + ('Қ', 'Қ'), + ('Ҝ', 'Ҝ'), + ('Ҟ', 'Ҟ'), + ('Ҡ', 'Ҡ'), + ('Ң', 'Ң'), + ('Ҥ', 'Ҥ'), + ('Ҧ', 'Ҧ'), + ('Ҩ', 'Ҩ'), + ('Ҫ', 'Ҫ'), + ('Ҭ', 'Ҭ'), + ('Ү', 'Ү'), + ('Ұ', 'Ұ'), + ('Ҳ', 'Ҳ'), + ('Ҵ', 'Ҵ'), + ('Ҷ', 'Ҷ'), + ('Ҹ', 'Ҹ'), + ('Һ', 'Һ'), + ('Ҽ', 'Ҽ'), + ('Ҿ', 'Ҿ'), + ('Ӏ', 'Ӂ'), + ('Ӄ', 'Ӄ'), + ('Ӆ', 'Ӆ'), + ('Ӈ', 'Ӈ'), + ('Ӊ', 'Ӊ'), + ('Ӌ', 'Ӌ'), + ('Ӎ', 'Ӎ'), + ('Ӑ', 'Ӑ'), + ('Ӓ', 'Ӓ'), + ('Ӕ', 'Ӕ'), + ('Ӗ', 'Ӗ'), + ('Ә', 'Ә'), + ('Ӛ', 'Ӛ'), + ('Ӝ', 'Ӝ'), + ('Ӟ', 'Ӟ'), + ('Ӡ', 'Ӡ'), + ('Ӣ', 'Ӣ'), + ('Ӥ', 'Ӥ'), + ('Ӧ', 'Ӧ'), + ('Ө', 'Ө'), + ('Ӫ', 'Ӫ'), + ('Ӭ', 'Ӭ'), + ('Ӯ', 'Ӯ'), + ('Ӱ', 'Ӱ'), + ('Ӳ', 'Ӳ'), + ('Ӵ', 'Ӵ'), + ('Ӷ', 'Ӷ'), + ('Ӹ', 'Ӹ'), + ('Ӻ', 'Ӻ'), + ('Ӽ', 'Ӽ'), + ('Ӿ', 'Ӿ'), + ('Ԁ', 'Ԁ'), + ('Ԃ', 'Ԃ'), + ('Ԅ', 'Ԅ'), + ('Ԇ', 'Ԇ'), + ('Ԉ', 'Ԉ'), + ('Ԋ', 'Ԋ'), + ('Ԍ', 'Ԍ'), + ('Ԏ', 'Ԏ'), + ('Ԑ', 'Ԑ'), + ('Ԓ', 'Ԓ'), + ('Ԕ', 'Ԕ'), + ('Ԗ', 'Ԗ'), + ('Ԙ', 'Ԙ'), + ('Ԛ', 'Ԛ'), + ('Ԝ', 'Ԝ'), + ('Ԟ', 'Ԟ'), + ('Ԡ', 'Ԡ'), + ('Ԣ', 'Ԣ'), + ('Ԥ', 'Ԥ'), + ('Ԧ', 'Ԧ'), + ('Ԩ', 'Ԩ'), + ('Ԫ', 'Ԫ'), + ('Ԭ', 'Ԭ'), + ('Ԯ', 'Ԯ'), + ('Ա', 'Ֆ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('Ꭰ', 'Ᏽ'), + ('Ᲊ', 'Ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('Ḁ', 'Ḁ'), + ('Ḃ', 'Ḃ'), + ('Ḅ', 'Ḅ'), + ('Ḇ', 'Ḇ'), + ('Ḉ', 'Ḉ'), + ('Ḋ', 'Ḋ'), + ('Ḍ', 'Ḍ'), + ('Ḏ', 'Ḏ'), + ('Ḑ', 'Ḑ'), + ('Ḓ', 'Ḓ'), + ('Ḕ', 'Ḕ'), + ('Ḗ', 'Ḗ'), + ('Ḙ', 'Ḙ'), + ('Ḛ', 'Ḛ'), + ('Ḝ', 'Ḝ'), + ('Ḟ', 'Ḟ'), + ('Ḡ', 'Ḡ'), + ('Ḣ', 'Ḣ'), + ('Ḥ', 'Ḥ'), + ('Ḧ', 'Ḧ'), + ('Ḩ', 'Ḩ'), + ('Ḫ', 'Ḫ'), + ('Ḭ', 'Ḭ'), + ('Ḯ', 'Ḯ'), + ('Ḱ', 'Ḱ'), + ('Ḳ', 'Ḳ'), + ('Ḵ', 'Ḵ'), + ('Ḷ', 'Ḷ'), + ('Ḹ', 'Ḹ'), + ('Ḻ', 'Ḻ'), + ('Ḽ', 'Ḽ'), + ('Ḿ', 'Ḿ'), + ('Ṁ', 'Ṁ'), + ('Ṃ', 'Ṃ'), + ('Ṅ', 'Ṅ'), + ('Ṇ', 'Ṇ'), + ('Ṉ', 'Ṉ'), + ('Ṋ', 'Ṋ'), + ('Ṍ', 'Ṍ'), + ('Ṏ', 'Ṏ'), + ('Ṑ', 'Ṑ'), + ('Ṓ', 'Ṓ'), + ('Ṕ', 'Ṕ'), + ('Ṗ', 'Ṗ'), + ('Ṙ', 'Ṙ'), + ('Ṛ', 'Ṛ'), + ('Ṝ', 'Ṝ'), + ('Ṟ', 'Ṟ'), + ('Ṡ', 'Ṡ'), + ('Ṣ', 'Ṣ'), + ('Ṥ', 'Ṥ'), + ('Ṧ', 'Ṧ'), + ('Ṩ', 'Ṩ'), + ('Ṫ', 'Ṫ'), + ('Ṭ', 'Ṭ'), + ('Ṯ', 'Ṯ'), + ('Ṱ', 'Ṱ'), + ('Ṳ', 'Ṳ'), + ('Ṵ', 'Ṵ'), + ('Ṷ', 'Ṷ'), + ('Ṹ', 'Ṹ'), + ('Ṻ', 'Ṻ'), + ('Ṽ', 'Ṽ'), + ('Ṿ', 'Ṿ'), + ('Ẁ', 'Ẁ'), + ('Ẃ', 'Ẃ'), + ('Ẅ', 'Ẅ'), + ('Ẇ', 'Ẇ'), + ('Ẉ', 'Ẉ'), + ('Ẋ', 'Ẋ'), + ('Ẍ', 'Ẍ'), + ('Ẏ', 'Ẏ'), + ('Ẑ', 'Ẑ'), + ('Ẓ', 'Ẓ'), + ('Ẕ', 'Ẕ'), + ('ẞ', 'ẞ'), + ('Ạ', 'Ạ'), + ('Ả', 'Ả'), + ('Ấ', 'Ấ'), + ('Ầ', 'Ầ'), + ('Ẩ', 'Ẩ'), + ('Ẫ', 'Ẫ'), + ('Ậ', 'Ậ'), + ('Ắ', 'Ắ'), + ('Ằ', 'Ằ'), + ('Ẳ', 'Ẳ'), + ('Ẵ', 'Ẵ'), + ('Ặ', 'Ặ'), + ('Ẹ', 'Ẹ'), + ('Ẻ', 'Ẻ'), + ('Ẽ', 'Ẽ'), + ('Ế', 'Ế'), + ('Ề', 'Ề'), + ('Ể', 'Ể'), + ('Ễ', 'Ễ'), + ('Ệ', 'Ệ'), + ('Ỉ', 'Ỉ'), + ('Ị', 'Ị'), + ('Ọ', 'Ọ'), + ('Ỏ', 'Ỏ'), + ('Ố', 'Ố'), + ('Ồ', 'Ồ'), + ('Ổ', 'Ổ'), + ('Ỗ', 'Ỗ'), + ('Ộ', 'Ộ'), + ('Ớ', 'Ớ'), + ('Ờ', 'Ờ'), + ('Ở', 'Ở'), + ('Ỡ', 'Ỡ'), + ('Ợ', 'Ợ'), + ('Ụ', 'Ụ'), + ('Ủ', 'Ủ'), + ('Ứ', 'Ứ'), + ('Ừ', 'Ừ'), + ('Ử', 'Ử'), + ('Ữ', 'Ữ'), + ('Ự', 'Ự'), + ('Ỳ', 'Ỳ'), + ('Ỵ', 'Ỵ'), + ('Ỷ', 'Ỷ'), + ('Ỹ', 'Ỹ'), + ('Ỻ', 'Ỻ'), + ('Ỽ', 'Ỽ'), + ('Ỿ', 'Ỿ'), + ('Ἀ', 'Ἇ'), + ('Ἐ', 'Ἕ'), + ('Ἠ', 'Ἧ'), + ('Ἰ', 'Ἷ'), + ('Ὀ', 'Ὅ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'Ὗ'), + ('Ὠ', 'Ὧ'), + ('Ᾰ', 'Ά'), + ('Ὲ', 'Ή'), + ('Ῐ', 'Ί'), + ('Ῠ', 'Ῥ'), + ('Ὸ', 'Ώ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℋ', 'ℍ'), + ('ℐ', 'ℒ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℰ', 'ℳ'), + ('ℾ', 'ℿ'), + ('ⅅ', 'ⅅ'), + ('Ⅰ', 'Ⅿ'), + ('Ↄ', 'Ↄ'), + ('Ⓐ', 'Ⓩ'), + ('Ⰰ', 'Ⱟ'), + ('Ⱡ', 'Ⱡ'), + ('Ɫ', 'Ɽ'), + ('Ⱨ', 'Ⱨ'), + ('Ⱪ', 'Ⱪ'), + ('Ⱬ', 'Ⱬ'), + ('Ɑ', 'Ɒ'), + ('Ⱳ', 'Ⱳ'), + ('Ⱶ', 'Ⱶ'), + ('Ȿ', 'Ⲁ'), + ('Ⲃ', 'Ⲃ'), + ('Ⲅ', 'Ⲅ'), + ('Ⲇ', 'Ⲇ'), + ('Ⲉ', 'Ⲉ'), + ('Ⲋ', 'Ⲋ'), + ('Ⲍ', 'Ⲍ'), + ('Ⲏ', 'Ⲏ'), + ('Ⲑ', 'Ⲑ'), + ('Ⲓ', 'Ⲓ'), + ('Ⲕ', 'Ⲕ'), + ('Ⲗ', 'Ⲗ'), + ('Ⲙ', 'Ⲙ'), + ('Ⲛ', 'Ⲛ'), + ('Ⲝ', 'Ⲝ'), + ('Ⲟ', 'Ⲟ'), + ('Ⲡ', 'Ⲡ'), + ('Ⲣ', 'Ⲣ'), + ('Ⲥ', 'Ⲥ'), + ('Ⲧ', 'Ⲧ'), + ('Ⲩ', 'Ⲩ'), + ('Ⲫ', 'Ⲫ'), + ('Ⲭ', 'Ⲭ'), + ('Ⲯ', 'Ⲯ'), + ('Ⲱ', 'Ⲱ'), + ('Ⲳ', 'Ⲳ'), + ('Ⲵ', 'Ⲵ'), + ('Ⲷ', 'Ⲷ'), + ('Ⲹ', 'Ⲹ'), + ('Ⲻ', 'Ⲻ'), + ('Ⲽ', 'Ⲽ'), + ('Ⲿ', 'Ⲿ'), + ('Ⳁ', 'Ⳁ'), + ('Ⳃ', 'Ⳃ'), + ('Ⳅ', 'Ⳅ'), + ('Ⳇ', 'Ⳇ'), + ('Ⳉ', 'Ⳉ'), + ('Ⳋ', 'Ⳋ'), + ('Ⳍ', 'Ⳍ'), + ('Ⳏ', 'Ⳏ'), + ('Ⳑ', 'Ⳑ'), + ('Ⳓ', 'Ⳓ'), + ('Ⳕ', 'Ⳕ'), + ('Ⳗ', 'Ⳗ'), + ('Ⳙ', 'Ⳙ'), + ('Ⳛ', 'Ⳛ'), + ('Ⳝ', 'Ⳝ'), + ('Ⳟ', 'Ⳟ'), + ('Ⳡ', 'Ⳡ'), + ('Ⳣ', 'Ⳣ'), + ('Ⳬ', 'Ⳬ'), + ('Ⳮ', 'Ⳮ'), + ('Ⳳ', 'Ⳳ'), + ('Ꙁ', 'Ꙁ'), + ('Ꙃ', 'Ꙃ'), + ('Ꙅ', 'Ꙅ'), + ('Ꙇ', 'Ꙇ'), + ('Ꙉ', 'Ꙉ'), + ('Ꙋ', 'Ꙋ'), + ('Ꙍ', 'Ꙍ'), + ('Ꙏ', 'Ꙏ'), + ('Ꙑ', 'Ꙑ'), + ('Ꙓ', 'Ꙓ'), + ('Ꙕ', 'Ꙕ'), + ('Ꙗ', 'Ꙗ'), + ('Ꙙ', 'Ꙙ'), + ('Ꙛ', 'Ꙛ'), + ('Ꙝ', 'Ꙝ'), + ('Ꙟ', 'Ꙟ'), + ('Ꙡ', 'Ꙡ'), + ('Ꙣ', 'Ꙣ'), + ('Ꙥ', 'Ꙥ'), + ('Ꙧ', 'Ꙧ'), + ('Ꙩ', 'Ꙩ'), + ('Ꙫ', 'Ꙫ'), + ('Ꙭ', 'Ꙭ'), + ('Ꚁ', 'Ꚁ'), + ('Ꚃ', 'Ꚃ'), + ('Ꚅ', 'Ꚅ'), + ('Ꚇ', 'Ꚇ'), + ('Ꚉ', 'Ꚉ'), + ('Ꚋ', 'Ꚋ'), + ('Ꚍ', 'Ꚍ'), + ('Ꚏ', 'Ꚏ'), + ('Ꚑ', 'Ꚑ'), + ('Ꚓ', 'Ꚓ'), + ('Ꚕ', 'Ꚕ'), + ('Ꚗ', 'Ꚗ'), + ('Ꚙ', 'Ꚙ'), + ('Ꚛ', 'Ꚛ'), + ('Ꜣ', 'Ꜣ'), + ('Ꜥ', 'Ꜥ'), + ('Ꜧ', 'Ꜧ'), + ('Ꜩ', 'Ꜩ'), + ('Ꜫ', 'Ꜫ'), + ('Ꜭ', 'Ꜭ'), + ('Ꜯ', 'Ꜯ'), + ('Ꜳ', 'Ꜳ'), + ('Ꜵ', 'Ꜵ'), + ('Ꜷ', 'Ꜷ'), + ('Ꜹ', 'Ꜹ'), + ('Ꜻ', 'Ꜻ'), + ('Ꜽ', 'Ꜽ'), + ('Ꜿ', 'Ꜿ'), + ('Ꝁ', 'Ꝁ'), + ('Ꝃ', 'Ꝃ'), + ('Ꝅ', 'Ꝅ'), + ('Ꝇ', 'Ꝇ'), + ('Ꝉ', 'Ꝉ'), + ('Ꝋ', 'Ꝋ'), + ('Ꝍ', 'Ꝍ'), + ('Ꝏ', 'Ꝏ'), + ('Ꝑ', 'Ꝑ'), + ('Ꝓ', 'Ꝓ'), + ('Ꝕ', 'Ꝕ'), + ('Ꝗ', 'Ꝗ'), + ('Ꝙ', 'Ꝙ'), + ('Ꝛ', 'Ꝛ'), + ('Ꝝ', 'Ꝝ'), + ('Ꝟ', 'Ꝟ'), + ('Ꝡ', 'Ꝡ'), + ('Ꝣ', 'Ꝣ'), + ('Ꝥ', 'Ꝥ'), + ('Ꝧ', 'Ꝧ'), + ('Ꝩ', 'Ꝩ'), + ('Ꝫ', 'Ꝫ'), + ('Ꝭ', 'Ꝭ'), + ('Ꝯ', 'Ꝯ'), + ('Ꝺ', 'Ꝺ'), + ('Ꝼ', 'Ꝼ'), + ('Ᵹ', 'Ꝿ'), + ('Ꞁ', 'Ꞁ'), + ('Ꞃ', 'Ꞃ'), + ('Ꞅ', 'Ꞅ'), + ('Ꞇ', 'Ꞇ'), + ('Ꞌ', 'Ꞌ'), + ('Ɥ', 'Ɥ'), + ('Ꞑ', 'Ꞑ'), + ('Ꞓ', 'Ꞓ'), + ('Ꞗ', 'Ꞗ'), + ('Ꞙ', 'Ꞙ'), + ('Ꞛ', 'Ꞛ'), + ('Ꞝ', 'Ꞝ'), + ('Ꞟ', 'Ꞟ'), + ('Ꞡ', 'Ꞡ'), + ('Ꞣ', 'Ꞣ'), + ('Ꞥ', 'Ꞥ'), + ('Ꞧ', 'Ꞧ'), + ('Ꞩ', 'Ꞩ'), + ('Ɦ', 'Ɪ'), + ('Ʞ', 'Ꞵ'), + ('Ꞷ', 'Ꞷ'), + ('Ꞹ', 'Ꞹ'), + ('Ꞻ', 'Ꞻ'), + ('Ꞽ', 'Ꞽ'), + ('Ꞿ', 'Ꞿ'), + ('Ꟁ', 'Ꟁ'), + ('Ꟃ', 'Ꟃ'), + ('Ꞔ', 'Ꟈ'), + ('Ꟊ', 'Ꟊ'), + ('Ɤ', 'Ꟍ'), + ('Ꟑ', 'Ꟑ'), + ('Ꟗ', 'Ꟗ'), + ('Ꟙ', 'Ꟙ'), + ('Ꟛ', 'Ꟛ'), + ('Ƛ', 'Ƛ'), + ('Ꟶ', 'Ꟶ'), + ('A', 'Z'), + ('𐐀', '𐐧'), + ('𐒰', '𐓓'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐲀', '𐲲'), + ('𐵐', '𐵥'), + ('𑢠', '𑢿'), + ('𖹀', '𖹟'), + ('𝐀', '𝐙'), + ('𝐴', '𝑍'), + ('𝑨', '𝒁'), + ('𝒜', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒵'), + ('𝓐', '𝓩'), + ('𝔄', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔸', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕬', '𝖅'), + ('𝖠', '𝖹'), + ('𝗔', '𝗭'), + ('𝘈', '𝘡'), + ('𝘼', '𝙕'), + ('𝙰', '𝚉'), + ('𝚨', '𝛀'), + ('𝛢', '𝛺'), + ('𝜜', '𝜴'), + ('𝝖', '𝝮'), + ('𝞐', '𝞨'), + ('𝟊', '𝟊'), + ('𞤀', '𞤡'), + ('🄰', '🅉'), + ('🅐', '🅩'), + ('🅰', '🆉'), +]; + +pub const VARIATION_SELECTOR: &'static [(char, char)] = &[ + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const WHITE_SPACE: &'static [(char, char)] = &[ + ('\t', '\r'), + (' ', ' '), + ('\u{85}', '\u{85}'), + ('\u{a0}', '\u{a0}'), + ('\u{1680}', '\u{1680}'), + ('\u{2000}', '\u{200a}'), + ('\u{2028}', '\u{2029}'), + ('\u{202f}', '\u{202f}'), + ('\u{205f}', '\u{205f}'), + ('\u{3000}', '\u{3000}'), +]; + +pub const XID_CONTINUE: &'static [(char, char)] = &[ + ('0', '9'), + ('A', 'Z'), + ('_', '_'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('·', '·'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('\u{300}', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͻ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('\u{483}', '\u{487}'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', 'ՙ'), + ('ՠ', 'ֈ'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('\u{610}', '\u{61a}'), + ('ؠ', '٩'), + ('ٮ', 'ۓ'), + ('ە', '\u{6dc}'), + ('\u{6df}', '\u{6e8}'), + ('\u{6ea}', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', '\u{74a}'), + ('ݍ', 'ޱ'), + ('߀', 'ߵ'), + ('ߺ', 'ߺ'), + ('\u{7fd}', '\u{7fd}'), + ('ࠀ', '\u{82d}'), + ('ࡀ', '\u{85b}'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('\u{897}', '\u{8e1}'), + ('\u{8e3}', '\u{963}'), + ('०', '९'), + ('ॱ', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('\u{9bc}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', 'ৎ'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', '\u{9e3}'), + ('০', 'ৱ'), + ('ৼ', 'ৼ'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', 'ਃ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('੦', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('\u{abc}', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('ૐ', 'ૐ'), + ('ૠ', '\u{ae3}'), + ('૦', '૯'), + ('ૹ', '\u{aff}'), + ('\u{b01}', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('\u{b3c}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', '\u{b63}'), + ('୦', '୯'), + ('ୱ', 'ୱ'), + ('\u{b82}', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('ௐ', 'ௐ'), + ('\u{bd7}', '\u{bd7}'), + ('௦', '௯'), + ('\u{c00}', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('\u{c3c}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', '\u{c63}'), + ('౦', '౯'), + ('ಀ', 'ಃ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('\u{cbc}', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('ೝ', 'ೞ'), + ('ೠ', '\u{ce3}'), + ('೦', '೯'), + ('ೱ', 'ೳ'), + ('\u{d00}', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', 'ൎ'), + ('ൔ', '\u{d57}'), + ('ൟ', '\u{d63}'), + ('൦', '൯'), + ('ൺ', 'ൿ'), + ('\u{d81}', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('෦', '෯'), + ('ෲ', 'ෳ'), + ('ก', '\u{e3a}'), + ('เ', '\u{e4e}'), + ('๐', '๙'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('\u{ec8}', '\u{ece}'), + ('໐', '໙'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('\u{f18}', '\u{f19}'), + ('༠', '༩'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('༾', 'ཇ'), + ('ཉ', 'ཬ'), + ('\u{f71}', '\u{f84}'), + ('\u{f86}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('က', '၉'), + ('ၐ', '\u{109d}'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('\u{135d}', '\u{135f}'), + ('፩', '፱'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', '\u{1715}'), + ('ᜟ', '\u{1734}'), + ('ᝀ', '\u{1753}'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('\u{1772}', '\u{1773}'), + ('ក', '\u{17d3}'), + ('ៗ', 'ៗ'), + ('ៜ', '\u{17dd}'), + ('០', '៩'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '᠙'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('᥆', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('᧐', '᧚'), + ('ᨀ', '\u{1a1b}'), + ('ᨠ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '᪉'), + ('᪐', '᪙'), + ('ᪧ', 'ᪧ'), + ('\u{1ab0}', '\u{1abd}'), + ('\u{1abf}', '\u{1ace}'), + ('\u{1b00}', 'ᭌ'), + ('᭐', '᭙'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1bf3}'), + ('ᰀ', '\u{1c37}'), + ('᱀', '᱉'), + ('ᱍ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', 'ᳺ'), + ('ᴀ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('\u{200c}', '\u{200d}'), + ('‿', '⁀'), + ('⁔', '⁔'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('\u{20d0}', '\u{20dc}'), + ('\u{20e1}', '\u{20e1}'), + ('\u{20e5}', '\u{20f0}'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('℘', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('\u{2d7f}', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('\u{2de0}', '\u{2dff}'), + ('々', '〇'), + ('〡', '\u{302f}'), + ('〱', '〵'), + ('〸', '〼'), + ('ぁ', 'ゖ'), + ('\u{3099}', '\u{309a}'), + ('ゝ', 'ゟ'), + ('ァ', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘫ'), + ('Ꙁ', '\u{a66f}'), + ('\u{a674}', '\u{a67d}'), + ('ꙿ', '\u{a6f1}'), + ('ꜗ', 'ꜟ'), + ('Ꜣ', 'ꞈ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠧ'), + ('\u{a82c}', '\u{a82c}'), + ('ꡀ', 'ꡳ'), + ('ꢀ', '\u{a8c5}'), + ('꣐', '꣙'), + ('\u{a8e0}', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', '\u{a92d}'), + ('ꤰ', '\u{a953}'), + ('ꥠ', 'ꥼ'), + ('\u{a980}', '\u{a9c0}'), + ('ꧏ', '꧙'), + ('ꧠ', 'ꧾ'), + ('ꨀ', '\u{aa36}'), + ('ꩀ', 'ꩍ'), + ('꩐', '꩙'), + ('ꩠ', 'ꩶ'), + ('ꩺ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫯ'), + ('ꫲ', '\u{aaf6}'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꯪ'), + ('꯬', '\u{abed}'), + ('꯰', '꯹'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﱝ'), + ('ﱤ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷹ'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('︳', '︴'), + ('﹍', '﹏'), + ('ﹱ', 'ﹱ'), + ('ﹳ', 'ﹳ'), + ('ﹷ', 'ﹷ'), + ('ﹹ', 'ﹹ'), + ('ﹻ', 'ﹻ'), + ('ﹽ', 'ﹽ'), + ('ﹿ', 'ﻼ'), + ('0', '9'), + ('A', 'Z'), + ('_', '_'), + ('a', 'z'), + ('・', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('\u{101fd}', '\u{101fd}'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('\u{102e0}', '\u{102e0}'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '\u{1037a}'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐐀', '𐒝'), + ('𐒠', '𐒩'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '\u{10ae6}'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '\u{10d27}'), + ('𐴰', '𐴹'), + ('𐵀', '𐵥'), + ('\u{10d69}', '\u{10d6d}'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('\u{10eab}', '\u{10eac}'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('\u{10efc}', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '\u{10f50}'), + ('𐽰', '\u{10f85}'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀀', '\u{11046}'), + ('𑁦', '𑁵'), + ('\u{1107f}', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('𑃐', '𑃨'), + ('𑃰', '𑃹'), + ('\u{11100}', '\u{11134}'), + ('𑄶', '𑄿'), + ('𑅄', '𑅇'), + ('𑅐', '\u{11173}'), + ('𑅶', '𑅶'), + ('\u{11180}', '𑇄'), + ('\u{111c9}', '\u{111cc}'), + ('𑇎', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '\u{11237}'), + ('\u{1123e}', '\u{11241}'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '\u{112ea}'), + ('𑋰', '𑋹'), + ('\u{11300}', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('\u{1133b}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('𑍐', '𑍐'), + ('\u{11357}', '\u{11357}'), + ('𑍝', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏓'), + ('\u{113e1}', '\u{113e2}'), + ('𑐀', '𑑊'), + ('𑑐', '𑑙'), + ('\u{1145e}', '𑑡'), + ('𑒀', '𑓅'), + ('𑓇', '𑓇'), + ('𑓐', '𑓙'), + ('𑖀', '\u{115b5}'), + ('𑖸', '\u{115c0}'), + ('𑗘', '\u{115dd}'), + ('𑘀', '\u{11640}'), + ('𑙄', '𑙄'), + ('𑙐', '𑙙'), + ('𑚀', '𑚸'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜀', '𑜚'), + ('\u{1171d}', '\u{1172b}'), + ('𑜰', '𑜹'), + ('𑝀', '𑝆'), + ('𑠀', '\u{1183a}'), + ('𑢠', '𑣩'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{11943}'), + ('𑥐', '𑥙'), + ('𑦠', '𑦧'), + ('𑦪', '\u{119d7}'), + ('\u{119da}', '𑧡'), + ('𑧣', '𑧤'), + ('𑨀', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('𑩐', '\u{11a99}'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑯰', '𑯹'), + ('𑰀', '𑰈'), + ('𑰊', '\u{11c36}'), + ('\u{11c38}', '𑱀'), + ('𑱐', '𑱙'), + ('𑱲', '𑲏'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d47}'), + ('𑵐', '𑵙'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶘'), + ('𑶠', '𑶩'), + ('𑻠', '𑻶'), + ('\u{11f00}', '𑼐'), + ('𑼒', '\u{11f3a}'), + ('𑼾', '\u{11f42}'), + ('𑽐', '\u{11f5a}'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('\u{13440}', '\u{13455}'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄹'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩠', '𖩩'), + ('𖩰', '𖪾'), + ('𖫀', '𖫉'), + ('𖫐', '𖫭'), + ('\u{16af0}', '\u{16af4}'), + ('𖬀', '\u{16b36}'), + ('𖭀', '𖭃'), + ('𖭐', '𖭙'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖵰', '𖵹'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('\u{16f4f}', '𖾇'), + ('\u{16f8f}', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('𜳰', '𜳹'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝟎', '𝟿'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), + ('𞄀', '𞄬'), + ('\u{1e130}', '𞄽'), + ('𞅀', '𞅉'), + ('𞅎', '𞅎'), + ('𞊐', '\u{1e2ae}'), + ('𞋀', '𞋹'), + ('𞓐', '𞓹'), + ('𞗐', '𞗺'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('𞤀', '𞥋'), + ('𞥐', '𞥙'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('🯰', '🯹'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const XID_START: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('Ͱ', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͻ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', 'ՙ'), + ('ՠ', 'ֈ'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('ؠ', 'ي'), + ('ٮ', 'ٯ'), + ('ٱ', 'ۓ'), + ('ە', 'ە'), + ('ۥ', 'ۦ'), + ('ۮ', 'ۯ'), + ('ۺ', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', 'ܐ'), + ('ܒ', 'ܯ'), + ('ݍ', 'ޥ'), + ('ޱ', 'ޱ'), + ('ߊ', 'ߪ'), + ('ߴ', 'ߵ'), + ('ߺ', 'ߺ'), + ('ࠀ', 'ࠕ'), + ('ࠚ', 'ࠚ'), + ('ࠤ', 'ࠤ'), + ('ࠨ', 'ࠨ'), + ('ࡀ', 'ࡘ'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('ࢠ', 'ࣉ'), + ('ऄ', 'ह'), + ('ऽ', 'ऽ'), + ('ॐ', 'ॐ'), + ('क़', 'ॡ'), + ('ॱ', 'ঀ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', 'ঽ'), + ('ৎ', 'ৎ'), + ('ড়', 'ঢ়'), + ('য়', 'ৡ'), + ('ৰ', 'ৱ'), + ('ৼ', 'ৼ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('ੲ', 'ੴ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', 'ઽ'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૡ'), + ('ૹ', 'ૹ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', 'ଽ'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('ୱ', 'ୱ'), + ('ஃ', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('ௐ', 'ௐ'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ఽ'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', 'ౡ'), + ('ಀ', 'ಀ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ಽ'), + ('ೝ', 'ೞ'), + ('ೠ', 'ೡ'), + ('ೱ', 'ೲ'), + ('ഄ', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', 'ഽ'), + ('ൎ', 'ൎ'), + ('ൔ', 'ൖ'), + ('ൟ', 'ൡ'), + ('ൺ', 'ൿ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('ก', 'ะ'), + ('า', 'า'), + ('เ', 'ๆ'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ະ'), + ('າ', 'າ'), + ('ຽ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('ཀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('ྈ', 'ྌ'), + ('က', 'ဪ'), + ('ဿ', 'ဿ'), + ('ၐ', 'ၕ'), + ('ၚ', 'ၝ'), + ('ၡ', 'ၡ'), + ('ၥ', 'ၦ'), + ('ၮ', 'ၰ'), + ('ၵ', 'ႁ'), + ('ႎ', 'ႎ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', 'ᜑ'), + ('ᜟ', 'ᜱ'), + ('ᝀ', 'ᝑ'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('ក', 'ឳ'), + ('ៗ', 'ៗ'), + ('ៜ', 'ៜ'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢨ'), + ('ᢪ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('ᥐ', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('ᨀ', 'ᨖ'), + ('ᨠ', 'ᩔ'), + ('ᪧ', 'ᪧ'), + ('ᬅ', 'ᬳ'), + ('ᭅ', 'ᭌ'), + ('ᮃ', 'ᮠ'), + ('ᮮ', 'ᮯ'), + ('ᮺ', 'ᯥ'), + ('ᰀ', 'ᰣ'), + ('ᱍ', 'ᱏ'), + ('ᱚ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', 'ᳶ'), + ('ᳺ', 'ᳺ'), + ('ᴀ', 'ᶿ'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('℘', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('々', '〇'), + ('〡', '〩'), + ('〱', '〵'), + ('〸', '〼'), + ('ぁ', 'ゖ'), + ('ゝ', 'ゟ'), + ('ァ', 'ヺ'), + ('ー', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘟ'), + ('ꘪ', 'ꘫ'), + ('Ꙁ', 'ꙮ'), + ('ꙿ', 'ꚝ'), + ('ꚠ', 'ꛯ'), + ('ꜗ', 'ꜟ'), + ('Ꜣ', 'ꞈ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠁ'), + ('ꠃ', 'ꠅ'), + ('ꠇ', 'ꠊ'), + ('ꠌ', 'ꠢ'), + ('ꡀ', 'ꡳ'), + ('ꢂ', 'ꢳ'), + ('ꣲ', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', 'ꣾ'), + ('ꤊ', 'ꤥ'), + ('ꤰ', 'ꥆ'), + ('ꥠ', 'ꥼ'), + ('ꦄ', 'ꦲ'), + ('ꧏ', 'ꧏ'), + ('ꧠ', 'ꧤ'), + ('ꧦ', 'ꧯ'), + ('ꧺ', 'ꧾ'), + ('ꨀ', 'ꨨ'), + ('ꩀ', 'ꩂ'), + ('ꩄ', 'ꩋ'), + ('ꩠ', 'ꩶ'), + ('ꩺ', 'ꩺ'), + ('ꩾ', 'ꪯ'), + ('ꪱ', 'ꪱ'), + ('ꪵ', 'ꪶ'), + ('ꪹ', 'ꪽ'), + ('ꫀ', 'ꫀ'), + ('ꫂ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫪ'), + ('ꫲ', 'ꫴ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꯢ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'יִ'), + ('ײַ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﱝ'), + ('ﱤ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷹ'), + ('ﹱ', 'ﹱ'), + ('ﹳ', 'ﹳ'), + ('ﹷ', 'ﹷ'), + ('ﹹ', 'ﹹ'), + ('ﹻ', 'ﹻ'), + ('ﹽ', 'ﹽ'), + ('ﹿ', 'ﻼ'), + ('A', 'Z'), + ('a', 'z'), + ('ヲ', 'ン'), + ('ᅠ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '𐍵'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐐀', '𐒝'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '𐨀'), + ('𐨐', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '𐫤'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '𐴣'), + ('𐵊', '𐵥'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('𐼀', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '𐽅'), + ('𐽰', '𐾁'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀃', '𑀷'), + ('𑁱', '𑁲'), + ('𑁵', '𑁵'), + ('𑂃', '𑂯'), + ('𑃐', '𑃨'), + ('𑄃', '𑄦'), + ('𑅄', '𑅄'), + ('𑅇', '𑅇'), + ('𑅐', '𑅲'), + ('𑅶', '𑅶'), + ('𑆃', '𑆲'), + ('𑇁', '𑇄'), + ('𑇚', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '𑈫'), + ('𑈿', '𑉀'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '𑋞'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑌽'), + ('𑍐', '𑍐'), + ('𑍝', '𑍡'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '𑎷'), + ('𑏑', '𑏑'), + ('𑏓', '𑏓'), + ('𑐀', '𑐴'), + ('𑑇', '𑑊'), + ('𑑟', '𑑡'), + ('𑒀', '𑒯'), + ('𑓄', '𑓅'), + ('𑓇', '𑓇'), + ('𑖀', '𑖮'), + ('𑗘', '𑗛'), + ('𑘀', '𑘯'), + ('𑙄', '𑙄'), + ('𑚀', '𑚪'), + ('𑚸', '𑚸'), + ('𑜀', '𑜚'), + ('𑝀', '𑝆'), + ('𑠀', '𑠫'), + ('𑢠', '𑣟'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤯'), + ('𑤿', '𑤿'), + ('𑥁', '𑥁'), + ('𑦠', '𑦧'), + ('𑦪', '𑧐'), + ('𑧡', '𑧡'), + ('𑧣', '𑧣'), + ('𑨀', '𑨀'), + ('𑨋', '𑨲'), + ('𑨺', '𑨺'), + ('𑩐', '𑩐'), + ('𑩜', '𑪉'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑰀', '𑰈'), + ('𑰊', '𑰮'), + ('𑱀', '𑱀'), + ('𑱲', '𑲏'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '𑴰'), + ('𑵆', '𑵆'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶉'), + ('𑶘', '𑶘'), + ('𑻠', '𑻲'), + ('𑼂', '𑼂'), + ('𑼄', '𑼐'), + ('𑼒', '𑼳'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄝'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩰', '𖪾'), + ('𖫐', '𖫭'), + ('𖬀', '𖬯'), + ('𖭀', '𖭃'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('𖽐', '𖽐'), + ('𖾓', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞄀', '𞄬'), + ('𞄷', '𞄽'), + ('𞅎', '𞅎'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞓐', '𞓫'), + ('𞗐', '𞗭'), + ('𞗰', '𞗰'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞤀', '𞥃'), + ('𞥋', '𞥋'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; diff --git a/vendor/regex-syntax/src/unicode_tables/property_names.rs b/vendor/regex-syntax/src/unicode_tables/property_names.rs new file mode 100644 index 00000000000000..a27b49133d33ac --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/property_names.rs @@ -0,0 +1,281 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate property-names ucd-16.0.0 +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const PROPERTY_NAMES: &'static [(&'static str, &'static str)] = &[ + ("age", "Age"), + ("ahex", "ASCII_Hex_Digit"), + ("alpha", "Alphabetic"), + ("alphabetic", "Alphabetic"), + ("asciihexdigit", "ASCII_Hex_Digit"), + ("bc", "Bidi_Class"), + ("bidic", "Bidi_Control"), + ("bidiclass", "Bidi_Class"), + ("bidicontrol", "Bidi_Control"), + ("bidim", "Bidi_Mirrored"), + ("bidimirrored", "Bidi_Mirrored"), + ("bidimirroringglyph", "Bidi_Mirroring_Glyph"), + ("bidipairedbracket", "Bidi_Paired_Bracket"), + ("bidipairedbrackettype", "Bidi_Paired_Bracket_Type"), + ("blk", "Block"), + ("block", "Block"), + ("bmg", "Bidi_Mirroring_Glyph"), + ("bpb", "Bidi_Paired_Bracket"), + ("bpt", "Bidi_Paired_Bracket_Type"), + ("canonicalcombiningclass", "Canonical_Combining_Class"), + ("cased", "Cased"), + ("casefolding", "Case_Folding"), + ("caseignorable", "Case_Ignorable"), + ("ccc", "Canonical_Combining_Class"), + ("ce", "Composition_Exclusion"), + ("cf", "Case_Folding"), + ("changeswhencasefolded", "Changes_When_Casefolded"), + ("changeswhencasemapped", "Changes_When_Casemapped"), + ("changeswhenlowercased", "Changes_When_Lowercased"), + ("changeswhennfkccasefolded", "Changes_When_NFKC_Casefolded"), + ("changeswhentitlecased", "Changes_When_Titlecased"), + ("changeswhenuppercased", "Changes_When_Uppercased"), + ("ci", "Case_Ignorable"), + ("cjkaccountingnumeric", "kAccountingNumeric"), + ("cjkcompatibilityvariant", "kCompatibilityVariant"), + ("cjkiicore", "kIICore"), + ("cjkirggsource", "kIRG_GSource"), + ("cjkirghsource", "kIRG_HSource"), + ("cjkirgjsource", "kIRG_JSource"), + ("cjkirgkpsource", "kIRG_KPSource"), + ("cjkirgksource", "kIRG_KSource"), + ("cjkirgmsource", "kIRG_MSource"), + ("cjkirgssource", "kIRG_SSource"), + ("cjkirgtsource", "kIRG_TSource"), + ("cjkirguksource", "kIRG_UKSource"), + ("cjkirgusource", "kIRG_USource"), + ("cjkirgvsource", "kIRG_VSource"), + ("cjkothernumeric", "kOtherNumeric"), + ("cjkprimarynumeric", "kPrimaryNumeric"), + ("cjkrsunicode", "kRSUnicode"), + ("compex", "Full_Composition_Exclusion"), + ("compositionexclusion", "Composition_Exclusion"), + ("cwcf", "Changes_When_Casefolded"), + ("cwcm", "Changes_When_Casemapped"), + ("cwkcf", "Changes_When_NFKC_Casefolded"), + ("cwl", "Changes_When_Lowercased"), + ("cwt", "Changes_When_Titlecased"), + ("cwu", "Changes_When_Uppercased"), + ("dash", "Dash"), + ("decompositionmapping", "Decomposition_Mapping"), + ("decompositiontype", "Decomposition_Type"), + ("defaultignorablecodepoint", "Default_Ignorable_Code_Point"), + ("dep", "Deprecated"), + ("deprecated", "Deprecated"), + ("di", "Default_Ignorable_Code_Point"), + ("dia", "Diacritic"), + ("diacritic", "Diacritic"), + ("dm", "Decomposition_Mapping"), + ("dt", "Decomposition_Type"), + ("ea", "East_Asian_Width"), + ("eastasianwidth", "East_Asian_Width"), + ("ebase", "Emoji_Modifier_Base"), + ("ecomp", "Emoji_Component"), + ("emod", "Emoji_Modifier"), + ("emoji", "Emoji"), + ("emojicomponent", "Emoji_Component"), + ("emojimodifier", "Emoji_Modifier"), + ("emojimodifierbase", "Emoji_Modifier_Base"), + ("emojipresentation", "Emoji_Presentation"), + ("epres", "Emoji_Presentation"), + ("equideo", "Equivalent_Unified_Ideograph"), + ("equivalentunifiedideograph", "Equivalent_Unified_Ideograph"), + ("expandsonnfc", "Expands_On_NFC"), + ("expandsonnfd", "Expands_On_NFD"), + ("expandsonnfkc", "Expands_On_NFKC"), + ("expandsonnfkd", "Expands_On_NFKD"), + ("ext", "Extender"), + ("extendedpictographic", "Extended_Pictographic"), + ("extender", "Extender"), + ("extpict", "Extended_Pictographic"), + ("fcnfkc", "FC_NFKC_Closure"), + ("fcnfkcclosure", "FC_NFKC_Closure"), + ("fullcompositionexclusion", "Full_Composition_Exclusion"), + ("gc", "General_Category"), + ("gcb", "Grapheme_Cluster_Break"), + ("generalcategory", "General_Category"), + ("graphemebase", "Grapheme_Base"), + ("graphemeclusterbreak", "Grapheme_Cluster_Break"), + ("graphemeextend", "Grapheme_Extend"), + ("graphemelink", "Grapheme_Link"), + ("grbase", "Grapheme_Base"), + ("grext", "Grapheme_Extend"), + ("grlink", "Grapheme_Link"), + ("hangulsyllabletype", "Hangul_Syllable_Type"), + ("hex", "Hex_Digit"), + ("hexdigit", "Hex_Digit"), + ("hst", "Hangul_Syllable_Type"), + ("hyphen", "Hyphen"), + ("idc", "ID_Continue"), + ("idcompatmathcontinue", "ID_Compat_Math_Continue"), + ("idcompatmathstart", "ID_Compat_Math_Start"), + ("idcontinue", "ID_Continue"), + ("ideo", "Ideographic"), + ("ideographic", "Ideographic"), + ("ids", "ID_Start"), + ("idsb", "IDS_Binary_Operator"), + ("idsbinaryoperator", "IDS_Binary_Operator"), + ("idst", "IDS_Trinary_Operator"), + ("idstart", "ID_Start"), + ("idstrinaryoperator", "IDS_Trinary_Operator"), + ("idsu", "IDS_Unary_Operator"), + ("idsunaryoperator", "IDS_Unary_Operator"), + ("incb", "Indic_Conjunct_Break"), + ("indicconjunctbreak", "Indic_Conjunct_Break"), + ("indicpositionalcategory", "Indic_Positional_Category"), + ("indicsyllabiccategory", "Indic_Syllabic_Category"), + ("inpc", "Indic_Positional_Category"), + ("insc", "Indic_Syllabic_Category"), + ("isc", "ISO_Comment"), + ("jamoshortname", "Jamo_Short_Name"), + ("jg", "Joining_Group"), + ("joinc", "Join_Control"), + ("joincontrol", "Join_Control"), + ("joininggroup", "Joining_Group"), + ("joiningtype", "Joining_Type"), + ("jsn", "Jamo_Short_Name"), + ("jt", "Joining_Type"), + ("kaccountingnumeric", "kAccountingNumeric"), + ("kcompatibilityvariant", "kCompatibilityVariant"), + ("kehcat", "kEH_Cat"), + ("kehdesc", "kEH_Desc"), + ("kehhg", "kEH_HG"), + ("kehifao", "kEH_IFAO"), + ("kehjsesh", "kEH_JSesh"), + ("kehnomirror", "kEH_NoMirror"), + ("kehnorotate", "kEH_NoRotate"), + ("kiicore", "kIICore"), + ("kirggsource", "kIRG_GSource"), + ("kirghsource", "kIRG_HSource"), + ("kirgjsource", "kIRG_JSource"), + ("kirgkpsource", "kIRG_KPSource"), + ("kirgksource", "kIRG_KSource"), + ("kirgmsource", "kIRG_MSource"), + ("kirgssource", "kIRG_SSource"), + ("kirgtsource", "kIRG_TSource"), + ("kirguksource", "kIRG_UKSource"), + ("kirgusource", "kIRG_USource"), + ("kirgvsource", "kIRG_VSource"), + ("kothernumeric", "kOtherNumeric"), + ("kprimarynumeric", "kPrimaryNumeric"), + ("krsunicode", "kRSUnicode"), + ("lb", "Line_Break"), + ("lc", "Lowercase_Mapping"), + ("linebreak", "Line_Break"), + ("loe", "Logical_Order_Exception"), + ("logicalorderexception", "Logical_Order_Exception"), + ("lower", "Lowercase"), + ("lowercase", "Lowercase"), + ("lowercasemapping", "Lowercase_Mapping"), + ("math", "Math"), + ("mcm", "Modifier_Combining_Mark"), + ("modifiercombiningmark", "Modifier_Combining_Mark"), + ("na", "Name"), + ("na1", "Unicode_1_Name"), + ("name", "Name"), + ("namealias", "Name_Alias"), + ("nchar", "Noncharacter_Code_Point"), + ("nfcqc", "NFC_Quick_Check"), + ("nfcquickcheck", "NFC_Quick_Check"), + ("nfdqc", "NFD_Quick_Check"), + ("nfdquickcheck", "NFD_Quick_Check"), + ("nfkccasefold", "NFKC_Casefold"), + ("nfkccf", "NFKC_Casefold"), + ("nfkcqc", "NFKC_Quick_Check"), + ("nfkcquickcheck", "NFKC_Quick_Check"), + ("nfkcscf", "NFKC_Simple_Casefold"), + ("nfkcsimplecasefold", "NFKC_Simple_Casefold"), + ("nfkdqc", "NFKD_Quick_Check"), + ("nfkdquickcheck", "NFKD_Quick_Check"), + ("noncharactercodepoint", "Noncharacter_Code_Point"), + ("nt", "Numeric_Type"), + ("numerictype", "Numeric_Type"), + ("numericvalue", "Numeric_Value"), + ("nv", "Numeric_Value"), + ("oalpha", "Other_Alphabetic"), + ("ocomment", "ISO_Comment"), + ("odi", "Other_Default_Ignorable_Code_Point"), + ("ogrext", "Other_Grapheme_Extend"), + ("oidc", "Other_ID_Continue"), + ("oids", "Other_ID_Start"), + ("olower", "Other_Lowercase"), + ("omath", "Other_Math"), + ("otheralphabetic", "Other_Alphabetic"), + ("otherdefaultignorablecodepoint", "Other_Default_Ignorable_Code_Point"), + ("othergraphemeextend", "Other_Grapheme_Extend"), + ("otheridcontinue", "Other_ID_Continue"), + ("otheridstart", "Other_ID_Start"), + ("otherlowercase", "Other_Lowercase"), + ("othermath", "Other_Math"), + ("otheruppercase", "Other_Uppercase"), + ("oupper", "Other_Uppercase"), + ("patsyn", "Pattern_Syntax"), + ("patternsyntax", "Pattern_Syntax"), + ("patternwhitespace", "Pattern_White_Space"), + ("patws", "Pattern_White_Space"), + ("pcm", "Prepended_Concatenation_Mark"), + ("prependedconcatenationmark", "Prepended_Concatenation_Mark"), + ("qmark", "Quotation_Mark"), + ("quotationmark", "Quotation_Mark"), + ("radical", "Radical"), + ("regionalindicator", "Regional_Indicator"), + ("ri", "Regional_Indicator"), + ("sb", "Sentence_Break"), + ("sc", "Script"), + ("scf", "Simple_Case_Folding"), + ("script", "Script"), + ("scriptextensions", "Script_Extensions"), + ("scx", "Script_Extensions"), + ("sd", "Soft_Dotted"), + ("sentencebreak", "Sentence_Break"), + ("sentenceterminal", "Sentence_Terminal"), + ("sfc", "Simple_Case_Folding"), + ("simplecasefolding", "Simple_Case_Folding"), + ("simplelowercasemapping", "Simple_Lowercase_Mapping"), + ("simpletitlecasemapping", "Simple_Titlecase_Mapping"), + ("simpleuppercasemapping", "Simple_Uppercase_Mapping"), + ("slc", "Simple_Lowercase_Mapping"), + ("softdotted", "Soft_Dotted"), + ("space", "White_Space"), + ("stc", "Simple_Titlecase_Mapping"), + ("sterm", "Sentence_Terminal"), + ("suc", "Simple_Uppercase_Mapping"), + ("tc", "Titlecase_Mapping"), + ("term", "Terminal_Punctuation"), + ("terminalpunctuation", "Terminal_Punctuation"), + ("titlecasemapping", "Titlecase_Mapping"), + ("uc", "Uppercase_Mapping"), + ("uideo", "Unified_Ideograph"), + ("unicode1name", "Unicode_1_Name"), + ("unicoderadicalstroke", "kRSUnicode"), + ("unifiedideograph", "Unified_Ideograph"), + ("upper", "Uppercase"), + ("uppercase", "Uppercase"), + ("uppercasemapping", "Uppercase_Mapping"), + ("urs", "kRSUnicode"), + ("variationselector", "Variation_Selector"), + ("verticalorientation", "Vertical_Orientation"), + ("vo", "Vertical_Orientation"), + ("vs", "Variation_Selector"), + ("wb", "Word_Break"), + ("whitespace", "White_Space"), + ("wordbreak", "Word_Break"), + ("wspace", "White_Space"), + ("xidc", "XID_Continue"), + ("xidcontinue", "XID_Continue"), + ("xids", "XID_Start"), + ("xidstart", "XID_Start"), + ("xonfc", "Expands_On_NFC"), + ("xonfd", "Expands_On_NFD"), + ("xonfkc", "Expands_On_NFKC"), + ("xonfkd", "Expands_On_NFKD"), +]; diff --git a/vendor/regex-syntax/src/unicode_tables/property_values.rs b/vendor/regex-syntax/src/unicode_tables/property_values.rs new file mode 100644 index 00000000000000..2270d66383735d --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/property_values.rs @@ -0,0 +1,956 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate property-values ucd-16.0.0 --include gc,script,scx,age,gcb,wb,sb +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const PROPERTY_VALUES: &'static [( + &'static str, + &'static [(&'static str, &'static str)], +)] = &[ + ( + "Age", + &[ + ("1.1", "V1_1"), + ("10.0", "V10_0"), + ("11.0", "V11_0"), + ("12.0", "V12_0"), + ("12.1", "V12_1"), + ("13.0", "V13_0"), + ("14.0", "V14_0"), + ("15.0", "V15_0"), + ("15.1", "V15_1"), + ("16.0", "V16_0"), + ("2.0", "V2_0"), + ("2.1", "V2_1"), + ("3.0", "V3_0"), + ("3.1", "V3_1"), + ("3.2", "V3_2"), + ("4.0", "V4_0"), + ("4.1", "V4_1"), + ("5.0", "V5_0"), + ("5.1", "V5_1"), + ("5.2", "V5_2"), + ("6.0", "V6_0"), + ("6.1", "V6_1"), + ("6.2", "V6_2"), + ("6.3", "V6_3"), + ("7.0", "V7_0"), + ("8.0", "V8_0"), + ("9.0", "V9_0"), + ("na", "Unassigned"), + ("unassigned", "Unassigned"), + ("v100", "V10_0"), + ("v11", "V1_1"), + ("v110", "V11_0"), + ("v120", "V12_0"), + ("v121", "V12_1"), + ("v130", "V13_0"), + ("v140", "V14_0"), + ("v150", "V15_0"), + ("v151", "V15_1"), + ("v160", "V16_0"), + ("v20", "V2_0"), + ("v21", "V2_1"), + ("v30", "V3_0"), + ("v31", "V3_1"), + ("v32", "V3_2"), + ("v40", "V4_0"), + ("v41", "V4_1"), + ("v50", "V5_0"), + ("v51", "V5_1"), + ("v52", "V5_2"), + ("v60", "V6_0"), + ("v61", "V6_1"), + ("v62", "V6_2"), + ("v63", "V6_3"), + ("v70", "V7_0"), + ("v80", "V8_0"), + ("v90", "V9_0"), + ], + ), + ( + "General_Category", + &[ + ("c", "Other"), + ("casedletter", "Cased_Letter"), + ("cc", "Control"), + ("cf", "Format"), + ("closepunctuation", "Close_Punctuation"), + ("cn", "Unassigned"), + ("cntrl", "Control"), + ("co", "Private_Use"), + ("combiningmark", "Mark"), + ("connectorpunctuation", "Connector_Punctuation"), + ("control", "Control"), + ("cs", "Surrogate"), + ("currencysymbol", "Currency_Symbol"), + ("dashpunctuation", "Dash_Punctuation"), + ("decimalnumber", "Decimal_Number"), + ("digit", "Decimal_Number"), + ("enclosingmark", "Enclosing_Mark"), + ("finalpunctuation", "Final_Punctuation"), + ("format", "Format"), + ("initialpunctuation", "Initial_Punctuation"), + ("l", "Letter"), + ("lc", "Cased_Letter"), + ("letter", "Letter"), + ("letternumber", "Letter_Number"), + ("lineseparator", "Line_Separator"), + ("ll", "Lowercase_Letter"), + ("lm", "Modifier_Letter"), + ("lo", "Other_Letter"), + ("lowercaseletter", "Lowercase_Letter"), + ("lt", "Titlecase_Letter"), + ("lu", "Uppercase_Letter"), + ("m", "Mark"), + ("mark", "Mark"), + ("mathsymbol", "Math_Symbol"), + ("mc", "Spacing_Mark"), + ("me", "Enclosing_Mark"), + ("mn", "Nonspacing_Mark"), + ("modifierletter", "Modifier_Letter"), + ("modifiersymbol", "Modifier_Symbol"), + ("n", "Number"), + ("nd", "Decimal_Number"), + ("nl", "Letter_Number"), + ("no", "Other_Number"), + ("nonspacingmark", "Nonspacing_Mark"), + ("number", "Number"), + ("openpunctuation", "Open_Punctuation"), + ("other", "Other"), + ("otherletter", "Other_Letter"), + ("othernumber", "Other_Number"), + ("otherpunctuation", "Other_Punctuation"), + ("othersymbol", "Other_Symbol"), + ("p", "Punctuation"), + ("paragraphseparator", "Paragraph_Separator"), + ("pc", "Connector_Punctuation"), + ("pd", "Dash_Punctuation"), + ("pe", "Close_Punctuation"), + ("pf", "Final_Punctuation"), + ("pi", "Initial_Punctuation"), + ("po", "Other_Punctuation"), + ("privateuse", "Private_Use"), + ("ps", "Open_Punctuation"), + ("punct", "Punctuation"), + ("punctuation", "Punctuation"), + ("s", "Symbol"), + ("sc", "Currency_Symbol"), + ("separator", "Separator"), + ("sk", "Modifier_Symbol"), + ("sm", "Math_Symbol"), + ("so", "Other_Symbol"), + ("spaceseparator", "Space_Separator"), + ("spacingmark", "Spacing_Mark"), + ("surrogate", "Surrogate"), + ("symbol", "Symbol"), + ("titlecaseletter", "Titlecase_Letter"), + ("unassigned", "Unassigned"), + ("uppercaseletter", "Uppercase_Letter"), + ("z", "Separator"), + ("zl", "Line_Separator"), + ("zp", "Paragraph_Separator"), + ("zs", "Space_Separator"), + ], + ), + ( + "Grapheme_Cluster_Break", + &[ + ("cn", "Control"), + ("control", "Control"), + ("cr", "CR"), + ("eb", "E_Base"), + ("ebase", "E_Base"), + ("ebasegaz", "E_Base_GAZ"), + ("ebg", "E_Base_GAZ"), + ("em", "E_Modifier"), + ("emodifier", "E_Modifier"), + ("ex", "Extend"), + ("extend", "Extend"), + ("gaz", "Glue_After_Zwj"), + ("glueafterzwj", "Glue_After_Zwj"), + ("l", "L"), + ("lf", "LF"), + ("lv", "LV"), + ("lvt", "LVT"), + ("other", "Other"), + ("pp", "Prepend"), + ("prepend", "Prepend"), + ("regionalindicator", "Regional_Indicator"), + ("ri", "Regional_Indicator"), + ("sm", "SpacingMark"), + ("spacingmark", "SpacingMark"), + ("t", "T"), + ("v", "V"), + ("xx", "Other"), + ("zwj", "ZWJ"), + ], + ), + ( + "Script", + &[ + ("adlam", "Adlam"), + ("adlm", "Adlam"), + ("aghb", "Caucasian_Albanian"), + ("ahom", "Ahom"), + ("anatolianhieroglyphs", "Anatolian_Hieroglyphs"), + ("arab", "Arabic"), + ("arabic", "Arabic"), + ("armenian", "Armenian"), + ("armi", "Imperial_Aramaic"), + ("armn", "Armenian"), + ("avestan", "Avestan"), + ("avst", "Avestan"), + ("bali", "Balinese"), + ("balinese", "Balinese"), + ("bamu", "Bamum"), + ("bamum", "Bamum"), + ("bass", "Bassa_Vah"), + ("bassavah", "Bassa_Vah"), + ("batak", "Batak"), + ("batk", "Batak"), + ("beng", "Bengali"), + ("bengali", "Bengali"), + ("bhaiksuki", "Bhaiksuki"), + ("bhks", "Bhaiksuki"), + ("bopo", "Bopomofo"), + ("bopomofo", "Bopomofo"), + ("brah", "Brahmi"), + ("brahmi", "Brahmi"), + ("brai", "Braille"), + ("braille", "Braille"), + ("bugi", "Buginese"), + ("buginese", "Buginese"), + ("buhd", "Buhid"), + ("buhid", "Buhid"), + ("cakm", "Chakma"), + ("canadianaboriginal", "Canadian_Aboriginal"), + ("cans", "Canadian_Aboriginal"), + ("cari", "Carian"), + ("carian", "Carian"), + ("caucasianalbanian", "Caucasian_Albanian"), + ("chakma", "Chakma"), + ("cham", "Cham"), + ("cher", "Cherokee"), + ("cherokee", "Cherokee"), + ("chorasmian", "Chorasmian"), + ("chrs", "Chorasmian"), + ("common", "Common"), + ("copt", "Coptic"), + ("coptic", "Coptic"), + ("cpmn", "Cypro_Minoan"), + ("cprt", "Cypriot"), + ("cuneiform", "Cuneiform"), + ("cypriot", "Cypriot"), + ("cyprominoan", "Cypro_Minoan"), + ("cyrillic", "Cyrillic"), + ("cyrl", "Cyrillic"), + ("deseret", "Deseret"), + ("deva", "Devanagari"), + ("devanagari", "Devanagari"), + ("diak", "Dives_Akuru"), + ("divesakuru", "Dives_Akuru"), + ("dogr", "Dogra"), + ("dogra", "Dogra"), + ("dsrt", "Deseret"), + ("dupl", "Duployan"), + ("duployan", "Duployan"), + ("egyp", "Egyptian_Hieroglyphs"), + ("egyptianhieroglyphs", "Egyptian_Hieroglyphs"), + ("elba", "Elbasan"), + ("elbasan", "Elbasan"), + ("elym", "Elymaic"), + ("elymaic", "Elymaic"), + ("ethi", "Ethiopic"), + ("ethiopic", "Ethiopic"), + ("gara", "Garay"), + ("garay", "Garay"), + ("geor", "Georgian"), + ("georgian", "Georgian"), + ("glag", "Glagolitic"), + ("glagolitic", "Glagolitic"), + ("gong", "Gunjala_Gondi"), + ("gonm", "Masaram_Gondi"), + ("goth", "Gothic"), + ("gothic", "Gothic"), + ("gran", "Grantha"), + ("grantha", "Grantha"), + ("greek", "Greek"), + ("grek", "Greek"), + ("gujarati", "Gujarati"), + ("gujr", "Gujarati"), + ("gukh", "Gurung_Khema"), + ("gunjalagondi", "Gunjala_Gondi"), + ("gurmukhi", "Gurmukhi"), + ("guru", "Gurmukhi"), + ("gurungkhema", "Gurung_Khema"), + ("han", "Han"), + ("hang", "Hangul"), + ("hangul", "Hangul"), + ("hani", "Han"), + ("hanifirohingya", "Hanifi_Rohingya"), + ("hano", "Hanunoo"), + ("hanunoo", "Hanunoo"), + ("hatr", "Hatran"), + ("hatran", "Hatran"), + ("hebr", "Hebrew"), + ("hebrew", "Hebrew"), + ("hira", "Hiragana"), + ("hiragana", "Hiragana"), + ("hluw", "Anatolian_Hieroglyphs"), + ("hmng", "Pahawh_Hmong"), + ("hmnp", "Nyiakeng_Puachue_Hmong"), + ("hrkt", "Katakana_Or_Hiragana"), + ("hung", "Old_Hungarian"), + ("imperialaramaic", "Imperial_Aramaic"), + ("inherited", "Inherited"), + ("inscriptionalpahlavi", "Inscriptional_Pahlavi"), + ("inscriptionalparthian", "Inscriptional_Parthian"), + ("ital", "Old_Italic"), + ("java", "Javanese"), + ("javanese", "Javanese"), + ("kaithi", "Kaithi"), + ("kali", "Kayah_Li"), + ("kana", "Katakana"), + ("kannada", "Kannada"), + ("katakana", "Katakana"), + ("katakanaorhiragana", "Katakana_Or_Hiragana"), + ("kawi", "Kawi"), + ("kayahli", "Kayah_Li"), + ("khar", "Kharoshthi"), + ("kharoshthi", "Kharoshthi"), + ("khitansmallscript", "Khitan_Small_Script"), + ("khmer", "Khmer"), + ("khmr", "Khmer"), + ("khoj", "Khojki"), + ("khojki", "Khojki"), + ("khudawadi", "Khudawadi"), + ("kiratrai", "Kirat_Rai"), + ("kits", "Khitan_Small_Script"), + ("knda", "Kannada"), + ("krai", "Kirat_Rai"), + ("kthi", "Kaithi"), + ("lana", "Tai_Tham"), + ("lao", "Lao"), + ("laoo", "Lao"), + ("latin", "Latin"), + ("latn", "Latin"), + ("lepc", "Lepcha"), + ("lepcha", "Lepcha"), + ("limb", "Limbu"), + ("limbu", "Limbu"), + ("lina", "Linear_A"), + ("linb", "Linear_B"), + ("lineara", "Linear_A"), + ("linearb", "Linear_B"), + ("lisu", "Lisu"), + ("lyci", "Lycian"), + ("lycian", "Lycian"), + ("lydi", "Lydian"), + ("lydian", "Lydian"), + ("mahajani", "Mahajani"), + ("mahj", "Mahajani"), + ("maka", "Makasar"), + ("makasar", "Makasar"), + ("malayalam", "Malayalam"), + ("mand", "Mandaic"), + ("mandaic", "Mandaic"), + ("mani", "Manichaean"), + ("manichaean", "Manichaean"), + ("marc", "Marchen"), + ("marchen", "Marchen"), + ("masaramgondi", "Masaram_Gondi"), + ("medefaidrin", "Medefaidrin"), + ("medf", "Medefaidrin"), + ("meeteimayek", "Meetei_Mayek"), + ("mend", "Mende_Kikakui"), + ("mendekikakui", "Mende_Kikakui"), + ("merc", "Meroitic_Cursive"), + ("mero", "Meroitic_Hieroglyphs"), + ("meroiticcursive", "Meroitic_Cursive"), + ("meroitichieroglyphs", "Meroitic_Hieroglyphs"), + ("miao", "Miao"), + ("mlym", "Malayalam"), + ("modi", "Modi"), + ("mong", "Mongolian"), + ("mongolian", "Mongolian"), + ("mro", "Mro"), + ("mroo", "Mro"), + ("mtei", "Meetei_Mayek"), + ("mult", "Multani"), + ("multani", "Multani"), + ("myanmar", "Myanmar"), + ("mymr", "Myanmar"), + ("nabataean", "Nabataean"), + ("nagm", "Nag_Mundari"), + ("nagmundari", "Nag_Mundari"), + ("nand", "Nandinagari"), + ("nandinagari", "Nandinagari"), + ("narb", "Old_North_Arabian"), + ("nbat", "Nabataean"), + ("newa", "Newa"), + ("newtailue", "New_Tai_Lue"), + ("nko", "Nko"), + ("nkoo", "Nko"), + ("nshu", "Nushu"), + ("nushu", "Nushu"), + ("nyiakengpuachuehmong", "Nyiakeng_Puachue_Hmong"), + ("ogam", "Ogham"), + ("ogham", "Ogham"), + ("olchiki", "Ol_Chiki"), + ("olck", "Ol_Chiki"), + ("oldhungarian", "Old_Hungarian"), + ("olditalic", "Old_Italic"), + ("oldnortharabian", "Old_North_Arabian"), + ("oldpermic", "Old_Permic"), + ("oldpersian", "Old_Persian"), + ("oldsogdian", "Old_Sogdian"), + ("oldsoutharabian", "Old_South_Arabian"), + ("oldturkic", "Old_Turkic"), + ("olduyghur", "Old_Uyghur"), + ("olonal", "Ol_Onal"), + ("onao", "Ol_Onal"), + ("oriya", "Oriya"), + ("orkh", "Old_Turkic"), + ("orya", "Oriya"), + ("osage", "Osage"), + ("osge", "Osage"), + ("osma", "Osmanya"), + ("osmanya", "Osmanya"), + ("ougr", "Old_Uyghur"), + ("pahawhhmong", "Pahawh_Hmong"), + ("palm", "Palmyrene"), + ("palmyrene", "Palmyrene"), + ("pauc", "Pau_Cin_Hau"), + ("paucinhau", "Pau_Cin_Hau"), + ("perm", "Old_Permic"), + ("phag", "Phags_Pa"), + ("phagspa", "Phags_Pa"), + ("phli", "Inscriptional_Pahlavi"), + ("phlp", "Psalter_Pahlavi"), + ("phnx", "Phoenician"), + ("phoenician", "Phoenician"), + ("plrd", "Miao"), + ("prti", "Inscriptional_Parthian"), + ("psalterpahlavi", "Psalter_Pahlavi"), + ("qaac", "Coptic"), + ("qaai", "Inherited"), + ("rejang", "Rejang"), + ("rjng", "Rejang"), + ("rohg", "Hanifi_Rohingya"), + ("runic", "Runic"), + ("runr", "Runic"), + ("samaritan", "Samaritan"), + ("samr", "Samaritan"), + ("sarb", "Old_South_Arabian"), + ("saur", "Saurashtra"), + ("saurashtra", "Saurashtra"), + ("sgnw", "SignWriting"), + ("sharada", "Sharada"), + ("shavian", "Shavian"), + ("shaw", "Shavian"), + ("shrd", "Sharada"), + ("sidd", "Siddham"), + ("siddham", "Siddham"), + ("signwriting", "SignWriting"), + ("sind", "Khudawadi"), + ("sinh", "Sinhala"), + ("sinhala", "Sinhala"), + ("sogd", "Sogdian"), + ("sogdian", "Sogdian"), + ("sogo", "Old_Sogdian"), + ("sora", "Sora_Sompeng"), + ("sorasompeng", "Sora_Sompeng"), + ("soyo", "Soyombo"), + ("soyombo", "Soyombo"), + ("sund", "Sundanese"), + ("sundanese", "Sundanese"), + ("sunu", "Sunuwar"), + ("sunuwar", "Sunuwar"), + ("sylo", "Syloti_Nagri"), + ("sylotinagri", "Syloti_Nagri"), + ("syrc", "Syriac"), + ("syriac", "Syriac"), + ("tagalog", "Tagalog"), + ("tagb", "Tagbanwa"), + ("tagbanwa", "Tagbanwa"), + ("taile", "Tai_Le"), + ("taitham", "Tai_Tham"), + ("taiviet", "Tai_Viet"), + ("takr", "Takri"), + ("takri", "Takri"), + ("tale", "Tai_Le"), + ("talu", "New_Tai_Lue"), + ("tamil", "Tamil"), + ("taml", "Tamil"), + ("tang", "Tangut"), + ("tangsa", "Tangsa"), + ("tangut", "Tangut"), + ("tavt", "Tai_Viet"), + ("telu", "Telugu"), + ("telugu", "Telugu"), + ("tfng", "Tifinagh"), + ("tglg", "Tagalog"), + ("thaa", "Thaana"), + ("thaana", "Thaana"), + ("thai", "Thai"), + ("tibetan", "Tibetan"), + ("tibt", "Tibetan"), + ("tifinagh", "Tifinagh"), + ("tirh", "Tirhuta"), + ("tirhuta", "Tirhuta"), + ("tnsa", "Tangsa"), + ("todhri", "Todhri"), + ("todr", "Todhri"), + ("toto", "Toto"), + ("tulutigalari", "Tulu_Tigalari"), + ("tutg", "Tulu_Tigalari"), + ("ugar", "Ugaritic"), + ("ugaritic", "Ugaritic"), + ("unknown", "Unknown"), + ("vai", "Vai"), + ("vaii", "Vai"), + ("vith", "Vithkuqi"), + ("vithkuqi", "Vithkuqi"), + ("wancho", "Wancho"), + ("wara", "Warang_Citi"), + ("warangciti", "Warang_Citi"), + ("wcho", "Wancho"), + ("xpeo", "Old_Persian"), + ("xsux", "Cuneiform"), + ("yezi", "Yezidi"), + ("yezidi", "Yezidi"), + ("yi", "Yi"), + ("yiii", "Yi"), + ("zanabazarsquare", "Zanabazar_Square"), + ("zanb", "Zanabazar_Square"), + ("zinh", "Inherited"), + ("zyyy", "Common"), + ("zzzz", "Unknown"), + ], + ), + ( + "Script_Extensions", + &[ + ("adlam", "Adlam"), + ("adlm", "Adlam"), + ("aghb", "Caucasian_Albanian"), + ("ahom", "Ahom"), + ("anatolianhieroglyphs", "Anatolian_Hieroglyphs"), + ("arab", "Arabic"), + ("arabic", "Arabic"), + ("armenian", "Armenian"), + ("armi", "Imperial_Aramaic"), + ("armn", "Armenian"), + ("avestan", "Avestan"), + ("avst", "Avestan"), + ("bali", "Balinese"), + ("balinese", "Balinese"), + ("bamu", "Bamum"), + ("bamum", "Bamum"), + ("bass", "Bassa_Vah"), + ("bassavah", "Bassa_Vah"), + ("batak", "Batak"), + ("batk", "Batak"), + ("beng", "Bengali"), + ("bengali", "Bengali"), + ("bhaiksuki", "Bhaiksuki"), + ("bhks", "Bhaiksuki"), + ("bopo", "Bopomofo"), + ("bopomofo", "Bopomofo"), + ("brah", "Brahmi"), + ("brahmi", "Brahmi"), + ("brai", "Braille"), + ("braille", "Braille"), + ("bugi", "Buginese"), + ("buginese", "Buginese"), + ("buhd", "Buhid"), + ("buhid", "Buhid"), + ("cakm", "Chakma"), + ("canadianaboriginal", "Canadian_Aboriginal"), + ("cans", "Canadian_Aboriginal"), + ("cari", "Carian"), + ("carian", "Carian"), + ("caucasianalbanian", "Caucasian_Albanian"), + ("chakma", "Chakma"), + ("cham", "Cham"), + ("cher", "Cherokee"), + ("cherokee", "Cherokee"), + ("chorasmian", "Chorasmian"), + ("chrs", "Chorasmian"), + ("common", "Common"), + ("copt", "Coptic"), + ("coptic", "Coptic"), + ("cpmn", "Cypro_Minoan"), + ("cprt", "Cypriot"), + ("cuneiform", "Cuneiform"), + ("cypriot", "Cypriot"), + ("cyprominoan", "Cypro_Minoan"), + ("cyrillic", "Cyrillic"), + ("cyrl", "Cyrillic"), + ("deseret", "Deseret"), + ("deva", "Devanagari"), + ("devanagari", "Devanagari"), + ("diak", "Dives_Akuru"), + ("divesakuru", "Dives_Akuru"), + ("dogr", "Dogra"), + ("dogra", "Dogra"), + ("dsrt", "Deseret"), + ("dupl", "Duployan"), + ("duployan", "Duployan"), + ("egyp", "Egyptian_Hieroglyphs"), + ("egyptianhieroglyphs", "Egyptian_Hieroglyphs"), + ("elba", "Elbasan"), + ("elbasan", "Elbasan"), + ("elym", "Elymaic"), + ("elymaic", "Elymaic"), + ("ethi", "Ethiopic"), + ("ethiopic", "Ethiopic"), + ("gara", "Garay"), + ("garay", "Garay"), + ("geor", "Georgian"), + ("georgian", "Georgian"), + ("glag", "Glagolitic"), + ("glagolitic", "Glagolitic"), + ("gong", "Gunjala_Gondi"), + ("gonm", "Masaram_Gondi"), + ("goth", "Gothic"), + ("gothic", "Gothic"), + ("gran", "Grantha"), + ("grantha", "Grantha"), + ("greek", "Greek"), + ("grek", "Greek"), + ("gujarati", "Gujarati"), + ("gujr", "Gujarati"), + ("gukh", "Gurung_Khema"), + ("gunjalagondi", "Gunjala_Gondi"), + ("gurmukhi", "Gurmukhi"), + ("guru", "Gurmukhi"), + ("gurungkhema", "Gurung_Khema"), + ("han", "Han"), + ("hang", "Hangul"), + ("hangul", "Hangul"), + ("hani", "Han"), + ("hanifirohingya", "Hanifi_Rohingya"), + ("hano", "Hanunoo"), + ("hanunoo", "Hanunoo"), + ("hatr", "Hatran"), + ("hatran", "Hatran"), + ("hebr", "Hebrew"), + ("hebrew", "Hebrew"), + ("hira", "Hiragana"), + ("hiragana", "Hiragana"), + ("hluw", "Anatolian_Hieroglyphs"), + ("hmng", "Pahawh_Hmong"), + ("hmnp", "Nyiakeng_Puachue_Hmong"), + ("hrkt", "Katakana_Or_Hiragana"), + ("hung", "Old_Hungarian"), + ("imperialaramaic", "Imperial_Aramaic"), + ("inherited", "Inherited"), + ("inscriptionalpahlavi", "Inscriptional_Pahlavi"), + ("inscriptionalparthian", "Inscriptional_Parthian"), + ("ital", "Old_Italic"), + ("java", "Javanese"), + ("javanese", "Javanese"), + ("kaithi", "Kaithi"), + ("kali", "Kayah_Li"), + ("kana", "Katakana"), + ("kannada", "Kannada"), + ("katakana", "Katakana"), + ("katakanaorhiragana", "Katakana_Or_Hiragana"), + ("kawi", "Kawi"), + ("kayahli", "Kayah_Li"), + ("khar", "Kharoshthi"), + ("kharoshthi", "Kharoshthi"), + ("khitansmallscript", "Khitan_Small_Script"), + ("khmer", "Khmer"), + ("khmr", "Khmer"), + ("khoj", "Khojki"), + ("khojki", "Khojki"), + ("khudawadi", "Khudawadi"), + ("kiratrai", "Kirat_Rai"), + ("kits", "Khitan_Small_Script"), + ("knda", "Kannada"), + ("krai", "Kirat_Rai"), + ("kthi", "Kaithi"), + ("lana", "Tai_Tham"), + ("lao", "Lao"), + ("laoo", "Lao"), + ("latin", "Latin"), + ("latn", "Latin"), + ("lepc", "Lepcha"), + ("lepcha", "Lepcha"), + ("limb", "Limbu"), + ("limbu", "Limbu"), + ("lina", "Linear_A"), + ("linb", "Linear_B"), + ("lineara", "Linear_A"), + ("linearb", "Linear_B"), + ("lisu", "Lisu"), + ("lyci", "Lycian"), + ("lycian", "Lycian"), + ("lydi", "Lydian"), + ("lydian", "Lydian"), + ("mahajani", "Mahajani"), + ("mahj", "Mahajani"), + ("maka", "Makasar"), + ("makasar", "Makasar"), + ("malayalam", "Malayalam"), + ("mand", "Mandaic"), + ("mandaic", "Mandaic"), + ("mani", "Manichaean"), + ("manichaean", "Manichaean"), + ("marc", "Marchen"), + ("marchen", "Marchen"), + ("masaramgondi", "Masaram_Gondi"), + ("medefaidrin", "Medefaidrin"), + ("medf", "Medefaidrin"), + ("meeteimayek", "Meetei_Mayek"), + ("mend", "Mende_Kikakui"), + ("mendekikakui", "Mende_Kikakui"), + ("merc", "Meroitic_Cursive"), + ("mero", "Meroitic_Hieroglyphs"), + ("meroiticcursive", "Meroitic_Cursive"), + ("meroitichieroglyphs", "Meroitic_Hieroglyphs"), + ("miao", "Miao"), + ("mlym", "Malayalam"), + ("modi", "Modi"), + ("mong", "Mongolian"), + ("mongolian", "Mongolian"), + ("mro", "Mro"), + ("mroo", "Mro"), + ("mtei", "Meetei_Mayek"), + ("mult", "Multani"), + ("multani", "Multani"), + ("myanmar", "Myanmar"), + ("mymr", "Myanmar"), + ("nabataean", "Nabataean"), + ("nagm", "Nag_Mundari"), + ("nagmundari", "Nag_Mundari"), + ("nand", "Nandinagari"), + ("nandinagari", "Nandinagari"), + ("narb", "Old_North_Arabian"), + ("nbat", "Nabataean"), + ("newa", "Newa"), + ("newtailue", "New_Tai_Lue"), + ("nko", "Nko"), + ("nkoo", "Nko"), + ("nshu", "Nushu"), + ("nushu", "Nushu"), + ("nyiakengpuachuehmong", "Nyiakeng_Puachue_Hmong"), + ("ogam", "Ogham"), + ("ogham", "Ogham"), + ("olchiki", "Ol_Chiki"), + ("olck", "Ol_Chiki"), + ("oldhungarian", "Old_Hungarian"), + ("olditalic", "Old_Italic"), + ("oldnortharabian", "Old_North_Arabian"), + ("oldpermic", "Old_Permic"), + ("oldpersian", "Old_Persian"), + ("oldsogdian", "Old_Sogdian"), + ("oldsoutharabian", "Old_South_Arabian"), + ("oldturkic", "Old_Turkic"), + ("olduyghur", "Old_Uyghur"), + ("olonal", "Ol_Onal"), + ("onao", "Ol_Onal"), + ("oriya", "Oriya"), + ("orkh", "Old_Turkic"), + ("orya", "Oriya"), + ("osage", "Osage"), + ("osge", "Osage"), + ("osma", "Osmanya"), + ("osmanya", "Osmanya"), + ("ougr", "Old_Uyghur"), + ("pahawhhmong", "Pahawh_Hmong"), + ("palm", "Palmyrene"), + ("palmyrene", "Palmyrene"), + ("pauc", "Pau_Cin_Hau"), + ("paucinhau", "Pau_Cin_Hau"), + ("perm", "Old_Permic"), + ("phag", "Phags_Pa"), + ("phagspa", "Phags_Pa"), + ("phli", "Inscriptional_Pahlavi"), + ("phlp", "Psalter_Pahlavi"), + ("phnx", "Phoenician"), + ("phoenician", "Phoenician"), + ("plrd", "Miao"), + ("prti", "Inscriptional_Parthian"), + ("psalterpahlavi", "Psalter_Pahlavi"), + ("qaac", "Coptic"), + ("qaai", "Inherited"), + ("rejang", "Rejang"), + ("rjng", "Rejang"), + ("rohg", "Hanifi_Rohingya"), + ("runic", "Runic"), + ("runr", "Runic"), + ("samaritan", "Samaritan"), + ("samr", "Samaritan"), + ("sarb", "Old_South_Arabian"), + ("saur", "Saurashtra"), + ("saurashtra", "Saurashtra"), + ("sgnw", "SignWriting"), + ("sharada", "Sharada"), + ("shavian", "Shavian"), + ("shaw", "Shavian"), + ("shrd", "Sharada"), + ("sidd", "Siddham"), + ("siddham", "Siddham"), + ("signwriting", "SignWriting"), + ("sind", "Khudawadi"), + ("sinh", "Sinhala"), + ("sinhala", "Sinhala"), + ("sogd", "Sogdian"), + ("sogdian", "Sogdian"), + ("sogo", "Old_Sogdian"), + ("sora", "Sora_Sompeng"), + ("sorasompeng", "Sora_Sompeng"), + ("soyo", "Soyombo"), + ("soyombo", "Soyombo"), + ("sund", "Sundanese"), + ("sundanese", "Sundanese"), + ("sunu", "Sunuwar"), + ("sunuwar", "Sunuwar"), + ("sylo", "Syloti_Nagri"), + ("sylotinagri", "Syloti_Nagri"), + ("syrc", "Syriac"), + ("syriac", "Syriac"), + ("tagalog", "Tagalog"), + ("tagb", "Tagbanwa"), + ("tagbanwa", "Tagbanwa"), + ("taile", "Tai_Le"), + ("taitham", "Tai_Tham"), + ("taiviet", "Tai_Viet"), + ("takr", "Takri"), + ("takri", "Takri"), + ("tale", "Tai_Le"), + ("talu", "New_Tai_Lue"), + ("tamil", "Tamil"), + ("taml", "Tamil"), + ("tang", "Tangut"), + ("tangsa", "Tangsa"), + ("tangut", "Tangut"), + ("tavt", "Tai_Viet"), + ("telu", "Telugu"), + ("telugu", "Telugu"), + ("tfng", "Tifinagh"), + ("tglg", "Tagalog"), + ("thaa", "Thaana"), + ("thaana", "Thaana"), + ("thai", "Thai"), + ("tibetan", "Tibetan"), + ("tibt", "Tibetan"), + ("tifinagh", "Tifinagh"), + ("tirh", "Tirhuta"), + ("tirhuta", "Tirhuta"), + ("tnsa", "Tangsa"), + ("todhri", "Todhri"), + ("todr", "Todhri"), + ("toto", "Toto"), + ("tulutigalari", "Tulu_Tigalari"), + ("tutg", "Tulu_Tigalari"), + ("ugar", "Ugaritic"), + ("ugaritic", "Ugaritic"), + ("unknown", "Unknown"), + ("vai", "Vai"), + ("vaii", "Vai"), + ("vith", "Vithkuqi"), + ("vithkuqi", "Vithkuqi"), + ("wancho", "Wancho"), + ("wara", "Warang_Citi"), + ("warangciti", "Warang_Citi"), + ("wcho", "Wancho"), + ("xpeo", "Old_Persian"), + ("xsux", "Cuneiform"), + ("yezi", "Yezidi"), + ("yezidi", "Yezidi"), + ("yi", "Yi"), + ("yiii", "Yi"), + ("zanabazarsquare", "Zanabazar_Square"), + ("zanb", "Zanabazar_Square"), + ("zinh", "Inherited"), + ("zyyy", "Common"), + ("zzzz", "Unknown"), + ], + ), + ( + "Sentence_Break", + &[ + ("at", "ATerm"), + ("aterm", "ATerm"), + ("cl", "Close"), + ("close", "Close"), + ("cr", "CR"), + ("ex", "Extend"), + ("extend", "Extend"), + ("fo", "Format"), + ("format", "Format"), + ("le", "OLetter"), + ("lf", "LF"), + ("lo", "Lower"), + ("lower", "Lower"), + ("nu", "Numeric"), + ("numeric", "Numeric"), + ("oletter", "OLetter"), + ("other", "Other"), + ("sc", "SContinue"), + ("scontinue", "SContinue"), + ("se", "Sep"), + ("sep", "Sep"), + ("sp", "Sp"), + ("st", "STerm"), + ("sterm", "STerm"), + ("up", "Upper"), + ("upper", "Upper"), + ("xx", "Other"), + ], + ), + ( + "Word_Break", + &[ + ("aletter", "ALetter"), + ("cr", "CR"), + ("doublequote", "Double_Quote"), + ("dq", "Double_Quote"), + ("eb", "E_Base"), + ("ebase", "E_Base"), + ("ebasegaz", "E_Base_GAZ"), + ("ebg", "E_Base_GAZ"), + ("em", "E_Modifier"), + ("emodifier", "E_Modifier"), + ("ex", "ExtendNumLet"), + ("extend", "Extend"), + ("extendnumlet", "ExtendNumLet"), + ("fo", "Format"), + ("format", "Format"), + ("gaz", "Glue_After_Zwj"), + ("glueafterzwj", "Glue_After_Zwj"), + ("hebrewletter", "Hebrew_Letter"), + ("hl", "Hebrew_Letter"), + ("ka", "Katakana"), + ("katakana", "Katakana"), + ("le", "ALetter"), + ("lf", "LF"), + ("mb", "MidNumLet"), + ("midletter", "MidLetter"), + ("midnum", "MidNum"), + ("midnumlet", "MidNumLet"), + ("ml", "MidLetter"), + ("mn", "MidNum"), + ("newline", "Newline"), + ("nl", "Newline"), + ("nu", "Numeric"), + ("numeric", "Numeric"), + ("other", "Other"), + ("regionalindicator", "Regional_Indicator"), + ("ri", "Regional_Indicator"), + ("singlequote", "Single_Quote"), + ("sq", "Single_Quote"), + ("wsegspace", "WSegSpace"), + ("xx", "Other"), + ("zwj", "ZWJ"), + ], + ), +]; diff --git a/vendor/regex-syntax/src/unicode_tables/script.rs b/vendor/regex-syntax/src/unicode_tables/script.rs new file mode 100644 index 00000000000000..3e437ca9ca73e5 --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/script.rs @@ -0,0 +1,1300 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate script ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("Adlam", ADLAM), + ("Ahom", AHOM), + ("Anatolian_Hieroglyphs", ANATOLIAN_HIEROGLYPHS), + ("Arabic", ARABIC), + ("Armenian", ARMENIAN), + ("Avestan", AVESTAN), + ("Balinese", BALINESE), + ("Bamum", BAMUM), + ("Bassa_Vah", BASSA_VAH), + ("Batak", BATAK), + ("Bengali", BENGALI), + ("Bhaiksuki", BHAIKSUKI), + ("Bopomofo", BOPOMOFO), + ("Brahmi", BRAHMI), + ("Braille", BRAILLE), + ("Buginese", BUGINESE), + ("Buhid", BUHID), + ("Canadian_Aboriginal", CANADIAN_ABORIGINAL), + ("Carian", CARIAN), + ("Caucasian_Albanian", CAUCASIAN_ALBANIAN), + ("Chakma", CHAKMA), + ("Cham", CHAM), + ("Cherokee", CHEROKEE), + ("Chorasmian", CHORASMIAN), + ("Common", COMMON), + ("Coptic", COPTIC), + ("Cuneiform", CUNEIFORM), + ("Cypriot", CYPRIOT), + ("Cypro_Minoan", CYPRO_MINOAN), + ("Cyrillic", CYRILLIC), + ("Deseret", DESERET), + ("Devanagari", DEVANAGARI), + ("Dives_Akuru", DIVES_AKURU), + ("Dogra", DOGRA), + ("Duployan", DUPLOYAN), + ("Egyptian_Hieroglyphs", EGYPTIAN_HIEROGLYPHS), + ("Elbasan", ELBASAN), + ("Elymaic", ELYMAIC), + ("Ethiopic", ETHIOPIC), + ("Garay", GARAY), + ("Georgian", GEORGIAN), + ("Glagolitic", GLAGOLITIC), + ("Gothic", GOTHIC), + ("Grantha", GRANTHA), + ("Greek", GREEK), + ("Gujarati", GUJARATI), + ("Gunjala_Gondi", GUNJALA_GONDI), + ("Gurmukhi", GURMUKHI), + ("Gurung_Khema", GURUNG_KHEMA), + ("Han", HAN), + ("Hangul", HANGUL), + ("Hanifi_Rohingya", HANIFI_ROHINGYA), + ("Hanunoo", HANUNOO), + ("Hatran", HATRAN), + ("Hebrew", HEBREW), + ("Hiragana", HIRAGANA), + ("Imperial_Aramaic", IMPERIAL_ARAMAIC), + ("Inherited", INHERITED), + ("Inscriptional_Pahlavi", INSCRIPTIONAL_PAHLAVI), + ("Inscriptional_Parthian", INSCRIPTIONAL_PARTHIAN), + ("Javanese", JAVANESE), + ("Kaithi", KAITHI), + ("Kannada", KANNADA), + ("Katakana", KATAKANA), + ("Kawi", KAWI), + ("Kayah_Li", KAYAH_LI), + ("Kharoshthi", KHAROSHTHI), + ("Khitan_Small_Script", KHITAN_SMALL_SCRIPT), + ("Khmer", KHMER), + ("Khojki", KHOJKI), + ("Khudawadi", KHUDAWADI), + ("Kirat_Rai", KIRAT_RAI), + ("Lao", LAO), + ("Latin", LATIN), + ("Lepcha", LEPCHA), + ("Limbu", LIMBU), + ("Linear_A", LINEAR_A), + ("Linear_B", LINEAR_B), + ("Lisu", LISU), + ("Lycian", LYCIAN), + ("Lydian", LYDIAN), + ("Mahajani", MAHAJANI), + ("Makasar", MAKASAR), + ("Malayalam", MALAYALAM), + ("Mandaic", MANDAIC), + ("Manichaean", MANICHAEAN), + ("Marchen", MARCHEN), + ("Masaram_Gondi", MASARAM_GONDI), + ("Medefaidrin", MEDEFAIDRIN), + ("Meetei_Mayek", MEETEI_MAYEK), + ("Mende_Kikakui", MENDE_KIKAKUI), + ("Meroitic_Cursive", MEROITIC_CURSIVE), + ("Meroitic_Hieroglyphs", MEROITIC_HIEROGLYPHS), + ("Miao", MIAO), + ("Modi", MODI), + ("Mongolian", MONGOLIAN), + ("Mro", MRO), + ("Multani", MULTANI), + ("Myanmar", MYANMAR), + ("Nabataean", NABATAEAN), + ("Nag_Mundari", NAG_MUNDARI), + ("Nandinagari", NANDINAGARI), + ("New_Tai_Lue", NEW_TAI_LUE), + ("Newa", NEWA), + ("Nko", NKO), + ("Nushu", NUSHU), + ("Nyiakeng_Puachue_Hmong", NYIAKENG_PUACHUE_HMONG), + ("Ogham", OGHAM), + ("Ol_Chiki", OL_CHIKI), + ("Ol_Onal", OL_ONAL), + ("Old_Hungarian", OLD_HUNGARIAN), + ("Old_Italic", OLD_ITALIC), + ("Old_North_Arabian", OLD_NORTH_ARABIAN), + ("Old_Permic", OLD_PERMIC), + ("Old_Persian", OLD_PERSIAN), + ("Old_Sogdian", OLD_SOGDIAN), + ("Old_South_Arabian", OLD_SOUTH_ARABIAN), + ("Old_Turkic", OLD_TURKIC), + ("Old_Uyghur", OLD_UYGHUR), + ("Oriya", ORIYA), + ("Osage", OSAGE), + ("Osmanya", OSMANYA), + ("Pahawh_Hmong", PAHAWH_HMONG), + ("Palmyrene", PALMYRENE), + ("Pau_Cin_Hau", PAU_CIN_HAU), + ("Phags_Pa", PHAGS_PA), + ("Phoenician", PHOENICIAN), + ("Psalter_Pahlavi", PSALTER_PAHLAVI), + ("Rejang", REJANG), + ("Runic", RUNIC), + ("Samaritan", SAMARITAN), + ("Saurashtra", SAURASHTRA), + ("Sharada", SHARADA), + ("Shavian", SHAVIAN), + ("Siddham", SIDDHAM), + ("SignWriting", SIGNWRITING), + ("Sinhala", SINHALA), + ("Sogdian", SOGDIAN), + ("Sora_Sompeng", SORA_SOMPENG), + ("Soyombo", SOYOMBO), + ("Sundanese", SUNDANESE), + ("Sunuwar", SUNUWAR), + ("Syloti_Nagri", SYLOTI_NAGRI), + ("Syriac", SYRIAC), + ("Tagalog", TAGALOG), + ("Tagbanwa", TAGBANWA), + ("Tai_Le", TAI_LE), + ("Tai_Tham", TAI_THAM), + ("Tai_Viet", TAI_VIET), + ("Takri", TAKRI), + ("Tamil", TAMIL), + ("Tangsa", TANGSA), + ("Tangut", TANGUT), + ("Telugu", TELUGU), + ("Thaana", THAANA), + ("Thai", THAI), + ("Tibetan", TIBETAN), + ("Tifinagh", TIFINAGH), + ("Tirhuta", TIRHUTA), + ("Todhri", TODHRI), + ("Toto", TOTO), + ("Tulu_Tigalari", TULU_TIGALARI), + ("Ugaritic", UGARITIC), + ("Vai", VAI), + ("Vithkuqi", VITHKUQI), + ("Wancho", WANCHO), + ("Warang_Citi", WARANG_CITI), + ("Yezidi", YEZIDI), + ("Yi", YI), + ("Zanabazar_Square", ZANABAZAR_SQUARE), +]; + +pub const ADLAM: &'static [(char, char)] = + &[('𞤀', '𞥋'), ('𞥐', '𞥙'), ('𞥞', '𞥟')]; + +pub const AHOM: &'static [(char, char)] = + &[('𑜀', '𑜚'), ('\u{1171d}', '\u{1172b}'), ('𑜰', '𑝆')]; + +pub const ANATOLIAN_HIEROGLYPHS: &'static [(char, char)] = &[('𔐀', '𔙆')]; + +pub const ARABIC: &'static [(char, char)] = &[ + ('\u{600}', '\u{604}'), + ('؆', '؋'), + ('؍', '\u{61a}'), + ('\u{61c}', '؞'), + ('ؠ', 'ؿ'), + ('ف', 'ي'), + ('\u{656}', 'ٯ'), + ('ٱ', '\u{6dc}'), + ('۞', 'ۿ'), + ('ݐ', 'ݿ'), + ('ࡰ', 'ࢎ'), + ('\u{890}', '\u{891}'), + ('\u{897}', '\u{8e1}'), + ('\u{8e3}', '\u{8ff}'), + ('ﭐ', '﯂'), + ('ﯓ', 'ﴽ'), + ('﵀', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('﷏', '﷏'), + ('ﷰ', '﷿'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('𐹠', '𐹾'), + ('𐻂', '𐻄'), + ('\u{10efc}', '\u{10eff}'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𞻰', '𞻱'), +]; + +pub const ARMENIAN: &'static [(char, char)] = + &[('Ա', 'Ֆ'), ('ՙ', '֊'), ('֍', '֏'), ('ﬓ', 'ﬗ')]; + +pub const AVESTAN: &'static [(char, char)] = &[('𐬀', '𐬵'), ('𐬹', '𐬿')]; + +pub const BALINESE: &'static [(char, char)] = &[('\u{1b00}', 'ᭌ'), ('᭎', '᭿')]; + +pub const BAMUM: &'static [(char, char)] = &[('ꚠ', '꛷'), ('𖠀', '𖨸')]; + +pub const BASSA_VAH: &'static [(char, char)] = + &[('𖫐', '𖫭'), ('\u{16af0}', '𖫵')]; + +pub const BATAK: &'static [(char, char)] = &[('ᯀ', '\u{1bf3}'), ('᯼', '᯿')]; + +pub const BENGALI: &'static [(char, char)] = &[ + ('ঀ', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('\u{9bc}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', 'ৎ'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', '\u{9e3}'), + ('০', '\u{9fe}'), +]; + +pub const BHAIKSUKI: &'static [(char, char)] = + &[('𑰀', '𑰈'), ('𑰊', '\u{11c36}'), ('\u{11c38}', '𑱅'), ('𑱐', '𑱬')]; + +pub const BOPOMOFO: &'static [(char, char)] = + &[('˪', '˫'), ('ㄅ', 'ㄯ'), ('ㆠ', 'ㆿ')]; + +pub const BRAHMI: &'static [(char, char)] = + &[('𑀀', '𑁍'), ('𑁒', '𑁵'), ('\u{1107f}', '\u{1107f}')]; + +pub const BRAILLE: &'static [(char, char)] = &[('⠀', '⣿')]; + +pub const BUGINESE: &'static [(char, char)] = &[('ᨀ', '\u{1a1b}'), ('᨞', '᨟')]; + +pub const BUHID: &'static [(char, char)] = &[('ᝀ', '\u{1753}')]; + +pub const CANADIAN_ABORIGINAL: &'static [(char, char)] = + &[('᐀', 'ᙿ'), ('ᢰ', 'ᣵ'), ('𑪰', '𑪿')]; + +pub const CARIAN: &'static [(char, char)] = &[('𐊠', '𐋐')]; + +pub const CAUCASIAN_ALBANIAN: &'static [(char, char)] = + &[('𐔰', '𐕣'), ('𐕯', '𐕯')]; + +pub const CHAKMA: &'static [(char, char)] = + &[('\u{11100}', '\u{11134}'), ('𑄶', '𑅇')]; + +pub const CHAM: &'static [(char, char)] = + &[('ꨀ', '\u{aa36}'), ('ꩀ', 'ꩍ'), ('꩐', '꩙'), ('꩜', '꩟')]; + +pub const CHEROKEE: &'static [(char, char)] = + &[('Ꭰ', 'Ᏽ'), ('ᏸ', 'ᏽ'), ('ꭰ', 'ꮿ')]; + +pub const CHORASMIAN: &'static [(char, char)] = &[('𐾰', '𐿋')]; + +pub const COMMON: &'static [(char, char)] = &[ + ('\0', '@'), + ('[', '`'), + ('{', '©'), + ('«', '¹'), + ('»', '¿'), + ('×', '×'), + ('÷', '÷'), + ('ʹ', '˟'), + ('˥', '˩'), + ('ˬ', '˿'), + ('ʹ', 'ʹ'), + (';', ';'), + ('΅', '΅'), + ('·', '·'), + ('\u{605}', '\u{605}'), + ('،', '،'), + ('؛', '؛'), + ('؟', '؟'), + ('ـ', 'ـ'), + ('\u{6dd}', '\u{6dd}'), + ('\u{8e2}', '\u{8e2}'), + ('।', '॥'), + ('฿', '฿'), + ('࿕', '࿘'), + ('჻', '჻'), + ('᛫', '᛭'), + ('᜵', '᜶'), + ('᠂', '᠃'), + ('᠅', '᠅'), + ('᳓', '᳓'), + ('᳡', '᳡'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', '᳷'), + ('ᳺ', 'ᳺ'), + ('\u{2000}', '\u{200b}'), + ('\u{200e}', '\u{2064}'), + ('\u{2066}', '⁰'), + ('⁴', '⁾'), + ('₀', '₎'), + ('₠', '⃀'), + ('℀', '℥'), + ('℧', '℩'), + ('ℬ', 'ℱ'), + ('ℳ', '⅍'), + ('⅏', '⅟'), + ('↉', '↋'), + ('←', '␩'), + ('⑀', '⑊'), + ('①', '⟿'), + ('⤀', '⭳'), + ('⭶', '⮕'), + ('⮗', '⯿'), + ('⸀', '⹝'), + ('⿰', '〄'), + ('〆', '〆'), + ('〈', '〠'), + ('〰', '〷'), + ('〼', '〿'), + ('゛', '゜'), + ('゠', '゠'), + ('・', 'ー'), + ('㆐', '㆟'), + ('㇀', '㇥'), + ('㇯', '㇯'), + ('㈠', '㉟'), + ('㉿', '㋏'), + ('㋿', '㋿'), + ('㍘', '㏿'), + ('䷀', '䷿'), + ('꜀', '꜡'), + ('ꞈ', '꞊'), + ('꠰', '꠹'), + ('꤮', '꤮'), + ('ꧏ', 'ꧏ'), + ('꭛', '꭛'), + ('꭪', '꭫'), + ('﴾', '﴿'), + ('︐', '︙'), + ('︰', '﹒'), + ('﹔', '﹦'), + ('﹨', '﹫'), + ('\u{feff}', '\u{feff}'), + ('!', '@'), + ('[', '`'), + ('{', '・'), + ('ー', 'ー'), + ('\u{ff9e}', '\u{ff9f}'), + ('¢', '₩'), + ('│', '○'), + ('\u{fff9}', '�'), + ('𐄀', '𐄂'), + ('𐄇', '𐄳'), + ('𐄷', '𐄿'), + ('𐆐', '𐆜'), + ('𐇐', '𐇼'), + ('𐋡', '𐋻'), + ('\u{1bca0}', '\u{1bca3}'), + ('𜰀', '𜳹'), + ('𜴀', '𜺳'), + ('𜽐', '𜿃'), + ('𝀀', '𝃵'), + ('𝄀', '𝄦'), + ('𝄩', '\u{1d166}'), + ('𝅪', '\u{1d17a}'), + ('𝆃', '𝆄'), + ('𝆌', '𝆩'), + ('𝆮', '𝇪'), + ('𝋀', '𝋓'), + ('𝋠', '𝋳'), + ('𝌀', '𝍖'), + ('𝍠', '𝍸'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝟋'), + ('𝟎', '𝟿'), + ('𞱱', '𞲴'), + ('𞴁', '𞴽'), + ('🀀', '🀫'), + ('🀰', '🂓'), + ('🂠', '🂮'), + ('🂱', '🂿'), + ('🃁', '🃏'), + ('🃑', '🃵'), + ('🄀', '🆭'), + ('🇦', '🇿'), + ('🈁', '🈂'), + ('🈐', '🈻'), + ('🉀', '🉈'), + ('🉐', '🉑'), + ('🉠', '🉥'), + ('🌀', '🛗'), + ('🛜', '🛬'), + ('🛰', '🛼'), + ('🜀', '🝶'), + ('🝻', '🟙'), + ('🟠', '🟫'), + ('🟰', '🟰'), + ('🠀', '🠋'), + ('🠐', '🡇'), + ('🡐', '🡙'), + ('🡠', '🢇'), + ('🢐', '🢭'), + ('🢰', '🢻'), + ('🣀', '🣁'), + ('🤀', '🩓'), + ('🩠', '🩭'), + ('🩰', '🩼'), + ('🪀', '🪉'), + ('🪏', '🫆'), + ('🫎', '🫜'), + ('🫟', '🫩'), + ('🫰', '🫸'), + ('🬀', '🮒'), + ('🮔', '🯹'), + ('\u{e0001}', '\u{e0001}'), + ('\u{e0020}', '\u{e007f}'), +]; + +pub const COPTIC: &'static [(char, char)] = + &[('Ϣ', 'ϯ'), ('Ⲁ', 'ⳳ'), ('⳹', '⳿')]; + +pub const CUNEIFORM: &'static [(char, char)] = + &[('𒀀', '𒎙'), ('𒐀', '𒑮'), ('𒑰', '𒑴'), ('𒒀', '𒕃')]; + +pub const CYPRIOT: &'static [(char, char)] = + &[('𐠀', '𐠅'), ('𐠈', '𐠈'), ('𐠊', '𐠵'), ('𐠷', '𐠸'), ('𐠼', '𐠼'), ('𐠿', '𐠿')]; + +pub const CYPRO_MINOAN: &'static [(char, char)] = &[('𒾐', '𒿲')]; + +pub const CYRILLIC: &'static [(char, char)] = &[ + ('Ѐ', '\u{484}'), + ('\u{487}', 'ԯ'), + ('ᲀ', 'ᲊ'), + ('ᴫ', 'ᴫ'), + ('ᵸ', 'ᵸ'), + ('\u{2de0}', '\u{2dff}'), + ('Ꙁ', '\u{a69f}'), + ('\u{fe2e}', '\u{fe2f}'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), +]; + +pub const DESERET: &'static [(char, char)] = &[('𐐀', '𐑏')]; + +pub const DEVANAGARI: &'static [(char, char)] = &[ + ('\u{900}', 'ॐ'), + ('\u{955}', '\u{963}'), + ('०', 'ॿ'), + ('\u{a8e0}', '\u{a8ff}'), + ('𑬀', '𑬉'), +]; + +pub const DIVES_AKURU: &'static [(char, char)] = &[ + ('𑤀', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '𑥆'), + ('𑥐', '𑥙'), +]; + +pub const DOGRA: &'static [(char, char)] = &[('𑠀', '𑠻')]; + +pub const DUPLOYAN: &'static [(char, char)] = + &[('𛰀', '𛱪'), ('𛱰', '𛱼'), ('𛲀', '𛲈'), ('𛲐', '𛲙'), ('𛲜', '𛲟')]; + +pub const EGYPTIAN_HIEROGLYPHS: &'static [(char, char)] = + &[('𓀀', '\u{13455}'), ('𓑠', '𔏺')]; + +pub const ELBASAN: &'static [(char, char)] = &[('𐔀', '𐔧')]; + +pub const ELYMAIC: &'static [(char, char)] = &[('𐿠', '𐿶')]; + +pub const ETHIOPIC: &'static [(char, char)] = &[ + ('ሀ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('\u{135d}', '፼'), + ('ᎀ', '᎙'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), +]; + +pub const GARAY: &'static [(char, char)] = + &[('𐵀', '𐵥'), ('\u{10d69}', '𐶅'), ('𐶎', '𐶏')]; + +pub const GEORGIAN: &'static [(char, char)] = &[ + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ჿ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), +]; + +pub const GLAGOLITIC: &'static [(char, char)] = &[ + ('Ⰰ', 'ⱟ'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), +]; + +pub const GOTHIC: &'static [(char, char)] = &[('𐌰', '𐍊')]; + +pub const GRANTHA: &'static [(char, char)] = &[ + ('\u{11300}', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('\u{1133c}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('𑍐', '𑍐'), + ('\u{11357}', '\u{11357}'), + ('𑍝', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), +]; + +pub const GREEK: &'static [(char, char)] = &[ + ('Ͱ', 'ͳ'), + ('͵', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('΄', '΄'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϡ'), + ('ϰ', 'Ͽ'), + ('ᴦ', 'ᴪ'), + ('ᵝ', 'ᵡ'), + ('ᵦ', 'ᵪ'), + ('ᶿ', 'ᶿ'), + ('ἀ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ῄ'), + ('ῆ', 'ΐ'), + ('ῖ', 'Ί'), + ('῝', '`'), + ('ῲ', 'ῴ'), + ('ῶ', '῾'), + ('Ω', 'Ω'), + ('ꭥ', 'ꭥ'), + ('𐅀', '𐆎'), + ('𐆠', '𐆠'), + ('𝈀', '𝉅'), +]; + +pub const GUJARATI: &'static [(char, char)] = &[ + ('\u{a81}', 'ઃ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('\u{abc}', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('ૐ', 'ૐ'), + ('ૠ', '\u{ae3}'), + ('૦', '૱'), + ('ૹ', '\u{aff}'), +]; + +pub const GUNJALA_GONDI: &'static [(char, char)] = &[ + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶘'), + ('𑶠', '𑶩'), +]; + +pub const GURMUKHI: &'static [(char, char)] = &[ + ('\u{a01}', 'ਃ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('੦', '੶'), +]; + +pub const GURUNG_KHEMA: &'static [(char, char)] = &[('𖄀', '𖄹')]; + +pub const HAN: &'static [(char, char)] = &[ + ('⺀', '⺙'), + ('⺛', '⻳'), + ('⼀', '⿕'), + ('々', '々'), + ('〇', '〇'), + ('〡', '〩'), + ('〸', '〻'), + ('㐀', '䶿'), + ('一', '鿿'), + ('豈', '舘'), + ('並', '龎'), + ('𖿢', '𖿣'), + ('\u{16ff0}', '\u{16ff1}'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const HANGUL: &'static [(char, char)] = &[ + ('ᄀ', 'ᇿ'), + ('\u{302e}', '\u{302f}'), + ('ㄱ', 'ㆎ'), + ('㈀', '㈞'), + ('㉠', '㉾'), + ('ꥠ', 'ꥼ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('ᅠ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), +]; + +pub const HANIFI_ROHINGYA: &'static [(char, char)] = + &[('𐴀', '\u{10d27}'), ('𐴰', '𐴹')]; + +pub const HANUNOO: &'static [(char, char)] = &[('ᜠ', '\u{1734}')]; + +pub const HATRAN: &'static [(char, char)] = + &[('𐣠', '𐣲'), ('𐣴', '𐣵'), ('𐣻', '𐣿')]; + +pub const HEBREW: &'static [(char, char)] = &[ + ('\u{591}', '\u{5c7}'), + ('א', 'ת'), + ('ׯ', '״'), + ('יִ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﭏ'), +]; + +pub const HIRAGANA: &'static [(char, char)] = &[ + ('ぁ', 'ゖ'), + ('ゝ', 'ゟ'), + ('𛀁', '𛄟'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('🈀', '🈀'), +]; + +pub const IMPERIAL_ARAMAIC: &'static [(char, char)] = + &[('𐡀', '𐡕'), ('𐡗', '𐡟')]; + +pub const INHERITED: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{485}', '\u{486}'), + ('\u{64b}', '\u{655}'), + ('\u{670}', '\u{670}'), + ('\u{951}', '\u{954}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce0}'), + ('\u{1ce2}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{200c}', '\u{200d}'), + ('\u{20d0}', '\u{20f0}'), + ('\u{302a}', '\u{302d}'), + ('\u{3099}', '\u{309a}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2d}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{1133b}', '\u{1133b}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d167}', '\u{1d169}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const INSCRIPTIONAL_PAHLAVI: &'static [(char, char)] = + &[('𐭠', '𐭲'), ('𐭸', '𐭿')]; + +pub const INSCRIPTIONAL_PARTHIAN: &'static [(char, char)] = + &[('𐭀', '𐭕'), ('𐭘', '𐭟')]; + +pub const JAVANESE: &'static [(char, char)] = + &[('\u{a980}', '꧍'), ('꧐', '꧙'), ('꧞', '꧟')]; + +pub const KAITHI: &'static [(char, char)] = + &[('\u{11080}', '\u{110c2}'), ('\u{110cd}', '\u{110cd}')]; + +pub const KANNADA: &'static [(char, char)] = &[ + ('ಀ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('\u{cbc}', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('ೝ', 'ೞ'), + ('ೠ', '\u{ce3}'), + ('೦', '೯'), + ('ೱ', 'ೳ'), +]; + +pub const KATAKANA: &'static [(char, char)] = &[ + ('ァ', 'ヺ'), + ('ヽ', 'ヿ'), + ('ㇰ', 'ㇿ'), + ('㋐', '㋾'), + ('㌀', '㍗'), + ('ヲ', 'ッ'), + ('ア', 'ン'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛀀'), + ('𛄠', '𛄢'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), +]; + +pub const KAWI: &'static [(char, char)] = + &[('\u{11f00}', '𑼐'), ('𑼒', '\u{11f3a}'), ('𑼾', '\u{11f5a}')]; + +pub const KAYAH_LI: &'static [(char, char)] = &[('꤀', '\u{a92d}'), ('꤯', '꤯')]; + +pub const KHAROSHTHI: &'static [(char, char)] = &[ + ('𐨀', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '𐩈'), + ('𐩐', '𐩘'), +]; + +pub const KHITAN_SMALL_SCRIPT: &'static [(char, char)] = + &[('\u{16fe4}', '\u{16fe4}'), ('𘬀', '𘳕'), ('𘳿', '𘳿')]; + +pub const KHMER: &'static [(char, char)] = + &[('ក', '\u{17dd}'), ('០', '៩'), ('៰', '៹'), ('᧠', '᧿')]; + +pub const KHOJKI: &'static [(char, char)] = &[('𑈀', '𑈑'), ('𑈓', '\u{11241}')]; + +pub const KHUDAWADI: &'static [(char, char)] = + &[('𑊰', '\u{112ea}'), ('𑋰', '𑋹')]; + +pub const KIRAT_RAI: &'static [(char, char)] = &[('𖵀', '𖵹')]; + +pub const LAO: &'static [(char, char)] = &[ + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('\u{ec8}', '\u{ece}'), + ('໐', '໙'), + ('ໜ', 'ໟ'), +]; + +pub const LATIN: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ʸ'), + ('ˠ', 'ˤ'), + ('ᴀ', 'ᴥ'), + ('ᴬ', 'ᵜ'), + ('ᵢ', 'ᵥ'), + ('ᵫ', 'ᵷ'), + ('ᵹ', 'ᶾ'), + ('Ḁ', 'ỿ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('K', 'Å'), + ('Ⅎ', 'Ⅎ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⱡ', 'Ɀ'), + ('Ꜣ', 'ꞇ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꟿ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭤ'), + ('ꭦ', 'ꭩ'), + ('ff', 'st'), + ('A', 'Z'), + ('a', 'z'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), +]; + +pub const LEPCHA: &'static [(char, char)] = + &[('ᰀ', '\u{1c37}'), ('᰻', '᱉'), ('ᱍ', 'ᱏ')]; + +pub const LIMBU: &'static [(char, char)] = &[ + ('ᤀ', 'ᤞ'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('᥀', '᥀'), + ('᥄', '᥏'), +]; + +pub const LINEAR_A: &'static [(char, char)] = + &[('𐘀', '𐜶'), ('𐝀', '𐝕'), ('𐝠', '𐝧')]; + +pub const LINEAR_B: &'static [(char, char)] = &[ + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), +]; + +pub const LISU: &'static [(char, char)] = &[('ꓐ', '꓿'), ('𑾰', '𑾰')]; + +pub const LYCIAN: &'static [(char, char)] = &[('𐊀', '𐊜')]; + +pub const LYDIAN: &'static [(char, char)] = &[('𐤠', '𐤹'), ('𐤿', '𐤿')]; + +pub const MAHAJANI: &'static [(char, char)] = &[('𑅐', '𑅶')]; + +pub const MAKASAR: &'static [(char, char)] = &[('𑻠', '𑻸')]; + +pub const MALAYALAM: &'static [(char, char)] = &[ + ('\u{d00}', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', '൏'), + ('ൔ', '\u{d63}'), + ('൦', 'ൿ'), +]; + +pub const MANDAIC: &'static [(char, char)] = &[('ࡀ', '\u{85b}'), ('࡞', '࡞')]; + +pub const MANICHAEAN: &'static [(char, char)] = + &[('𐫀', '\u{10ae6}'), ('𐫫', '𐫶')]; + +pub const MARCHEN: &'static [(char, char)] = + &[('𑱰', '𑲏'), ('\u{11c92}', '\u{11ca7}'), ('𑲩', '\u{11cb6}')]; + +pub const MASARAM_GONDI: &'static [(char, char)] = &[ + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d47}'), + ('𑵐', '𑵙'), +]; + +pub const MEDEFAIDRIN: &'static [(char, char)] = &[('𖹀', '𖺚')]; + +pub const MEETEI_MAYEK: &'static [(char, char)] = + &[('ꫠ', '\u{aaf6}'), ('ꯀ', '\u{abed}'), ('꯰', '꯹')]; + +pub const MENDE_KIKAKUI: &'static [(char, char)] = + &[('𞠀', '𞣄'), ('𞣇', '\u{1e8d6}')]; + +pub const MEROITIC_CURSIVE: &'static [(char, char)] = + &[('𐦠', '𐦷'), ('𐦼', '𐧏'), ('𐧒', '𐧿')]; + +pub const MEROITIC_HIEROGLYPHS: &'static [(char, char)] = &[('𐦀', '𐦟')]; + +pub const MIAO: &'static [(char, char)] = + &[('𖼀', '𖽊'), ('\u{16f4f}', '𖾇'), ('\u{16f8f}', '𖾟')]; + +pub const MODI: &'static [(char, char)] = &[('𑘀', '𑙄'), ('𑙐', '𑙙')]; + +pub const MONGOLIAN: &'static [(char, char)] = + &[('᠀', '᠁'), ('᠄', '᠄'), ('᠆', '᠙'), ('ᠠ', 'ᡸ'), ('ᢀ', 'ᢪ'), ('𑙠', '𑙬')]; + +pub const MRO: &'static [(char, char)] = &[('𖩀', '𖩞'), ('𖩠', '𖩩'), ('𖩮', '𖩯')]; + +pub const MULTANI: &'static [(char, char)] = + &[('𑊀', '𑊆'), ('𑊈', '𑊈'), ('𑊊', '𑊍'), ('𑊏', '𑊝'), ('𑊟', '𑊩')]; + +pub const MYANMAR: &'static [(char, char)] = + &[('က', '႟'), ('ꧠ', 'ꧾ'), ('ꩠ', 'ꩿ'), ('𑛐', '𑛣')]; + +pub const NABATAEAN: &'static [(char, char)] = &[('𐢀', '𐢞'), ('𐢧', '𐢯')]; + +pub const NAG_MUNDARI: &'static [(char, char)] = &[('𞓐', '𞓹')]; + +pub const NANDINAGARI: &'static [(char, char)] = + &[('𑦠', '𑦧'), ('𑦪', '\u{119d7}'), ('\u{119da}', '𑧤')]; + +pub const NEW_TAI_LUE: &'static [(char, char)] = + &[('ᦀ', 'ᦫ'), ('ᦰ', 'ᧉ'), ('᧐', '᧚'), ('᧞', '᧟')]; + +pub const NEWA: &'static [(char, char)] = &[('𑐀', '𑑛'), ('𑑝', '𑑡')]; + +pub const NKO: &'static [(char, char)] = &[('߀', 'ߺ'), ('\u{7fd}', '߿')]; + +pub const NUSHU: &'static [(char, char)] = &[('𖿡', '𖿡'), ('𛅰', '𛋻')]; + +pub const NYIAKENG_PUACHUE_HMONG: &'static [(char, char)] = + &[('𞄀', '𞄬'), ('\u{1e130}', '𞄽'), ('𞅀', '𞅉'), ('𞅎', '𞅏')]; + +pub const OGHAM: &'static [(char, char)] = &[('\u{1680}', '᚜')]; + +pub const OL_CHIKI: &'static [(char, char)] = &[('᱐', '᱿')]; + +pub const OL_ONAL: &'static [(char, char)] = &[('𞗐', '𞗺'), ('𞗿', '𞗿')]; + +pub const OLD_HUNGARIAN: &'static [(char, char)] = + &[('𐲀', '𐲲'), ('𐳀', '𐳲'), ('𐳺', '𐳿')]; + +pub const OLD_ITALIC: &'static [(char, char)] = &[('𐌀', '𐌣'), ('𐌭', '𐌯')]; + +pub const OLD_NORTH_ARABIAN: &'static [(char, char)] = &[('𐪀', '𐪟')]; + +pub const OLD_PERMIC: &'static [(char, char)] = &[('𐍐', '\u{1037a}')]; + +pub const OLD_PERSIAN: &'static [(char, char)] = &[('𐎠', '𐏃'), ('𐏈', '𐏕')]; + +pub const OLD_SOGDIAN: &'static [(char, char)] = &[('𐼀', '𐼧')]; + +pub const OLD_SOUTH_ARABIAN: &'static [(char, char)] = &[('𐩠', '𐩿')]; + +pub const OLD_TURKIC: &'static [(char, char)] = &[('𐰀', '𐱈')]; + +pub const OLD_UYGHUR: &'static [(char, char)] = &[('𐽰', '𐾉')]; + +pub const ORIYA: &'static [(char, char)] = &[ + ('\u{b01}', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('\u{b3c}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', '\u{b63}'), + ('୦', '୷'), +]; + +pub const OSAGE: &'static [(char, char)] = &[('𐒰', '𐓓'), ('𐓘', '𐓻')]; + +pub const OSMANYA: &'static [(char, char)] = &[('𐒀', '𐒝'), ('𐒠', '𐒩')]; + +pub const PAHAWH_HMONG: &'static [(char, char)] = + &[('𖬀', '𖭅'), ('𖭐', '𖭙'), ('𖭛', '𖭡'), ('𖭣', '𖭷'), ('𖭽', '𖮏')]; + +pub const PALMYRENE: &'static [(char, char)] = &[('𐡠', '𐡿')]; + +pub const PAU_CIN_HAU: &'static [(char, char)] = &[('𑫀', '𑫸')]; + +pub const PHAGS_PA: &'static [(char, char)] = &[('ꡀ', '꡷')]; + +pub const PHOENICIAN: &'static [(char, char)] = &[('𐤀', '𐤛'), ('𐤟', '𐤟')]; + +pub const PSALTER_PAHLAVI: &'static [(char, char)] = + &[('𐮀', '𐮑'), ('𐮙', '𐮜'), ('𐮩', '𐮯')]; + +pub const REJANG: &'static [(char, char)] = &[('ꤰ', '\u{a953}'), ('꥟', '꥟')]; + +pub const RUNIC: &'static [(char, char)] = &[('ᚠ', 'ᛪ'), ('ᛮ', 'ᛸ')]; + +pub const SAMARITAN: &'static [(char, char)] = &[('ࠀ', '\u{82d}'), ('࠰', '࠾')]; + +pub const SAURASHTRA: &'static [(char, char)] = + &[('ꢀ', '\u{a8c5}'), ('꣎', '꣙')]; + +pub const SHARADA: &'static [(char, char)] = &[('\u{11180}', '𑇟')]; + +pub const SHAVIAN: &'static [(char, char)] = &[('𐑐', '𐑿')]; + +pub const SIDDHAM: &'static [(char, char)] = + &[('𑖀', '\u{115b5}'), ('𑖸', '\u{115dd}')]; + +pub const SIGNWRITING: &'static [(char, char)] = + &[('𝠀', '𝪋'), ('\u{1da9b}', '\u{1da9f}'), ('\u{1daa1}', '\u{1daaf}')]; + +pub const SINHALA: &'static [(char, char)] = &[ + ('\u{d81}', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('෦', '෯'), + ('ෲ', '෴'), + ('𑇡', '𑇴'), +]; + +pub const SOGDIAN: &'static [(char, char)] = &[('𐼰', '𐽙')]; + +pub const SORA_SOMPENG: &'static [(char, char)] = &[('𑃐', '𑃨'), ('𑃰', '𑃹')]; + +pub const SOYOMBO: &'static [(char, char)] = &[('𑩐', '𑪢')]; + +pub const SUNDANESE: &'static [(char, char)] = + &[('\u{1b80}', 'ᮿ'), ('᳀', '᳇')]; + +pub const SUNUWAR: &'static [(char, char)] = &[('𑯀', '𑯡'), ('𑯰', '𑯹')]; + +pub const SYLOTI_NAGRI: &'static [(char, char)] = &[('ꠀ', '\u{a82c}')]; + +pub const SYRIAC: &'static [(char, char)] = + &[('܀', '܍'), ('\u{70f}', '\u{74a}'), ('ݍ', 'ݏ'), ('ࡠ', 'ࡪ')]; + +pub const TAGALOG: &'static [(char, char)] = &[('ᜀ', '\u{1715}'), ('ᜟ', 'ᜟ')]; + +pub const TAGBANWA: &'static [(char, char)] = + &[('ᝠ', 'ᝬ'), ('ᝮ', 'ᝰ'), ('\u{1772}', '\u{1773}')]; + +pub const TAI_LE: &'static [(char, char)] = &[('ᥐ', 'ᥭ'), ('ᥰ', 'ᥴ')]; + +pub const TAI_THAM: &'static [(char, char)] = &[ + ('ᨠ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '᪉'), + ('᪐', '᪙'), + ('᪠', '᪭'), +]; + +pub const TAI_VIET: &'static [(char, char)] = &[('ꪀ', 'ꫂ'), ('ꫛ', '꫟')]; + +pub const TAKRI: &'static [(char, char)] = &[('𑚀', '𑚹'), ('𑛀', '𑛉')]; + +pub const TAMIL: &'static [(char, char)] = &[ + ('\u{b82}', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('ௐ', 'ௐ'), + ('\u{bd7}', '\u{bd7}'), + ('௦', '௺'), + ('𑿀', '𑿱'), + ('𑿿', '𑿿'), +]; + +pub const TANGSA: &'static [(char, char)] = &[('𖩰', '𖪾'), ('𖫀', '𖫉')]; + +pub const TANGUT: &'static [(char, char)] = + &[('𖿠', '𖿠'), ('𗀀', '𘟷'), ('𘠀', '𘫿'), ('𘴀', '𘴈')]; + +pub const TELUGU: &'static [(char, char)] = &[ + ('\u{c00}', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('\u{c3c}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', '\u{c63}'), + ('౦', '౯'), + ('౷', '౿'), +]; + +pub const THAANA: &'static [(char, char)] = &[('ހ', 'ޱ')]; + +pub const THAI: &'static [(char, char)] = &[('ก', '\u{e3a}'), ('เ', '๛')]; + +pub const TIBETAN: &'static [(char, char)] = &[ + ('ༀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('\u{f71}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('྾', '࿌'), + ('࿎', '࿔'), + ('࿙', '࿚'), +]; + +pub const TIFINAGH: &'static [(char, char)] = + &[('ⴰ', 'ⵧ'), ('ⵯ', '⵰'), ('\u{2d7f}', '\u{2d7f}')]; + +pub const TIRHUTA: &'static [(char, char)] = &[('𑒀', '𑓇'), ('𑓐', '𑓙')]; + +pub const TODHRI: &'static [(char, char)] = &[('𐗀', '𐗳')]; + +pub const TOTO: &'static [(char, char)] = &[('𞊐', '\u{1e2ae}')]; + +pub const TULU_TIGALARI: &'static [(char, char)] = &[ + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏕'), + ('𑏗', '𑏘'), + ('\u{113e1}', '\u{113e2}'), +]; + +pub const UGARITIC: &'static [(char, char)] = &[('𐎀', '𐎝'), ('𐎟', '𐎟')]; + +pub const VAI: &'static [(char, char)] = &[('ꔀ', 'ꘫ')]; + +pub const VITHKUQI: &'static [(char, char)] = &[ + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), +]; + +pub const WANCHO: &'static [(char, char)] = &[('𞋀', '𞋹'), ('𞋿', '𞋿')]; + +pub const WARANG_CITI: &'static [(char, char)] = &[('𑢠', '𑣲'), ('𑣿', '𑣿')]; + +pub const YEZIDI: &'static [(char, char)] = + &[('𐺀', '𐺩'), ('\u{10eab}', '𐺭'), ('𐺰', '𐺱')]; + +pub const YI: &'static [(char, char)] = &[('ꀀ', 'ꒌ'), ('꒐', '꓆')]; + +pub const ZANABAZAR_SQUARE: &'static [(char, char)] = &[('𑨀', '\u{11a47}')]; diff --git a/vendor/regex-syntax/src/unicode_tables/script_extension.rs b/vendor/regex-syntax/src/unicode_tables/script_extension.rs new file mode 100644 index 00000000000000..e3f492e2d6bee1 --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/script_extension.rs @@ -0,0 +1,1718 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate script-extension ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("Adlam", ADLAM), + ("Ahom", AHOM), + ("Anatolian_Hieroglyphs", ANATOLIAN_HIEROGLYPHS), + ("Arabic", ARABIC), + ("Armenian", ARMENIAN), + ("Avestan", AVESTAN), + ("Balinese", BALINESE), + ("Bamum", BAMUM), + ("Bassa_Vah", BASSA_VAH), + ("Batak", BATAK), + ("Bengali", BENGALI), + ("Bhaiksuki", BHAIKSUKI), + ("Bopomofo", BOPOMOFO), + ("Brahmi", BRAHMI), + ("Braille", BRAILLE), + ("Buginese", BUGINESE), + ("Buhid", BUHID), + ("Canadian_Aboriginal", CANADIAN_ABORIGINAL), + ("Carian", CARIAN), + ("Caucasian_Albanian", CAUCASIAN_ALBANIAN), + ("Chakma", CHAKMA), + ("Cham", CHAM), + ("Cherokee", CHEROKEE), + ("Chorasmian", CHORASMIAN), + ("Common", COMMON), + ("Coptic", COPTIC), + ("Cuneiform", CUNEIFORM), + ("Cypriot", CYPRIOT), + ("Cypro_Minoan", CYPRO_MINOAN), + ("Cyrillic", CYRILLIC), + ("Deseret", DESERET), + ("Devanagari", DEVANAGARI), + ("Dives_Akuru", DIVES_AKURU), + ("Dogra", DOGRA), + ("Duployan", DUPLOYAN), + ("Egyptian_Hieroglyphs", EGYPTIAN_HIEROGLYPHS), + ("Elbasan", ELBASAN), + ("Elymaic", ELYMAIC), + ("Ethiopic", ETHIOPIC), + ("Garay", GARAY), + ("Georgian", GEORGIAN), + ("Glagolitic", GLAGOLITIC), + ("Gothic", GOTHIC), + ("Grantha", GRANTHA), + ("Greek", GREEK), + ("Gujarati", GUJARATI), + ("Gunjala_Gondi", GUNJALA_GONDI), + ("Gurmukhi", GURMUKHI), + ("Gurung_Khema", GURUNG_KHEMA), + ("Han", HAN), + ("Hangul", HANGUL), + ("Hanifi_Rohingya", HANIFI_ROHINGYA), + ("Hanunoo", HANUNOO), + ("Hatran", HATRAN), + ("Hebrew", HEBREW), + ("Hiragana", HIRAGANA), + ("Imperial_Aramaic", IMPERIAL_ARAMAIC), + ("Inherited", INHERITED), + ("Inscriptional_Pahlavi", INSCRIPTIONAL_PAHLAVI), + ("Inscriptional_Parthian", INSCRIPTIONAL_PARTHIAN), + ("Javanese", JAVANESE), + ("Kaithi", KAITHI), + ("Kannada", KANNADA), + ("Katakana", KATAKANA), + ("Kawi", KAWI), + ("Kayah_Li", KAYAH_LI), + ("Kharoshthi", KHAROSHTHI), + ("Khitan_Small_Script", KHITAN_SMALL_SCRIPT), + ("Khmer", KHMER), + ("Khojki", KHOJKI), + ("Khudawadi", KHUDAWADI), + ("Kirat_Rai", KIRAT_RAI), + ("Lao", LAO), + ("Latin", LATIN), + ("Lepcha", LEPCHA), + ("Limbu", LIMBU), + ("Linear_A", LINEAR_A), + ("Linear_B", LINEAR_B), + ("Lisu", LISU), + ("Lycian", LYCIAN), + ("Lydian", LYDIAN), + ("Mahajani", MAHAJANI), + ("Makasar", MAKASAR), + ("Malayalam", MALAYALAM), + ("Mandaic", MANDAIC), + ("Manichaean", MANICHAEAN), + ("Marchen", MARCHEN), + ("Masaram_Gondi", MASARAM_GONDI), + ("Medefaidrin", MEDEFAIDRIN), + ("Meetei_Mayek", MEETEI_MAYEK), + ("Mende_Kikakui", MENDE_KIKAKUI), + ("Meroitic_Cursive", MEROITIC_CURSIVE), + ("Meroitic_Hieroglyphs", MEROITIC_HIEROGLYPHS), + ("Miao", MIAO), + ("Modi", MODI), + ("Mongolian", MONGOLIAN), + ("Mro", MRO), + ("Multani", MULTANI), + ("Myanmar", MYANMAR), + ("Nabataean", NABATAEAN), + ("Nag_Mundari", NAG_MUNDARI), + ("Nandinagari", NANDINAGARI), + ("New_Tai_Lue", NEW_TAI_LUE), + ("Newa", NEWA), + ("Nko", NKO), + ("Nushu", NUSHU), + ("Nyiakeng_Puachue_Hmong", NYIAKENG_PUACHUE_HMONG), + ("Ogham", OGHAM), + ("Ol_Chiki", OL_CHIKI), + ("Ol_Onal", OL_ONAL), + ("Old_Hungarian", OLD_HUNGARIAN), + ("Old_Italic", OLD_ITALIC), + ("Old_North_Arabian", OLD_NORTH_ARABIAN), + ("Old_Permic", OLD_PERMIC), + ("Old_Persian", OLD_PERSIAN), + ("Old_Sogdian", OLD_SOGDIAN), + ("Old_South_Arabian", OLD_SOUTH_ARABIAN), + ("Old_Turkic", OLD_TURKIC), + ("Old_Uyghur", OLD_UYGHUR), + ("Oriya", ORIYA), + ("Osage", OSAGE), + ("Osmanya", OSMANYA), + ("Pahawh_Hmong", PAHAWH_HMONG), + ("Palmyrene", PALMYRENE), + ("Pau_Cin_Hau", PAU_CIN_HAU), + ("Phags_Pa", PHAGS_PA), + ("Phoenician", PHOENICIAN), + ("Psalter_Pahlavi", PSALTER_PAHLAVI), + ("Rejang", REJANG), + ("Runic", RUNIC), + ("Samaritan", SAMARITAN), + ("Saurashtra", SAURASHTRA), + ("Sharada", SHARADA), + ("Shavian", SHAVIAN), + ("Siddham", SIDDHAM), + ("SignWriting", SIGNWRITING), + ("Sinhala", SINHALA), + ("Sogdian", SOGDIAN), + ("Sora_Sompeng", SORA_SOMPENG), + ("Soyombo", SOYOMBO), + ("Sundanese", SUNDANESE), + ("Sunuwar", SUNUWAR), + ("Syloti_Nagri", SYLOTI_NAGRI), + ("Syriac", SYRIAC), + ("Tagalog", TAGALOG), + ("Tagbanwa", TAGBANWA), + ("Tai_Le", TAI_LE), + ("Tai_Tham", TAI_THAM), + ("Tai_Viet", TAI_VIET), + ("Takri", TAKRI), + ("Tamil", TAMIL), + ("Tangsa", TANGSA), + ("Tangut", TANGUT), + ("Telugu", TELUGU), + ("Thaana", THAANA), + ("Thai", THAI), + ("Tibetan", TIBETAN), + ("Tifinagh", TIFINAGH), + ("Tirhuta", TIRHUTA), + ("Todhri", TODHRI), + ("Toto", TOTO), + ("Tulu_Tigalari", TULU_TIGALARI), + ("Ugaritic", UGARITIC), + ("Vai", VAI), + ("Vithkuqi", VITHKUQI), + ("Wancho", WANCHO), + ("Warang_Citi", WARANG_CITI), + ("Yezidi", YEZIDI), + ("Yi", YI), + ("Zanabazar_Square", ZANABAZAR_SQUARE), +]; + +pub const ADLAM: &'static [(char, char)] = &[ + ('؟', '؟'), + ('ـ', 'ـ'), + ('⁏', '⁏'), + ('⹁', '⹁'), + ('𞤀', '𞥋'), + ('𞥐', '𞥙'), + ('𞥞', '𞥟'), +]; + +pub const AHOM: &'static [(char, char)] = + &[('𑜀', '𑜚'), ('\u{1171d}', '\u{1172b}'), ('𑜰', '𑝆')]; + +pub const ANATOLIAN_HIEROGLYPHS: &'static [(char, char)] = &[('𔐀', '𔙆')]; + +pub const ARABIC: &'static [(char, char)] = &[ + ('\u{600}', '\u{604}'), + ('؆', '\u{6dc}'), + ('۞', 'ۿ'), + ('ݐ', 'ݿ'), + ('ࡰ', 'ࢎ'), + ('\u{890}', '\u{891}'), + ('\u{897}', '\u{8e1}'), + ('\u{8e3}', '\u{8ff}'), + ('⁏', '⁏'), + ('⹁', '⹁'), + ('ﭐ', '﯂'), + ('ﯓ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('﷏', '﷏'), + ('ﷰ', '﷿'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('\u{102e0}', '𐋻'), + ('𐹠', '𐹾'), + ('𐻂', '𐻄'), + ('\u{10efc}', '\u{10eff}'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𞻰', '𞻱'), +]; + +pub const ARMENIAN: &'static [(char, char)] = + &[('\u{308}', '\u{308}'), ('Ա', 'Ֆ'), ('ՙ', '֊'), ('֍', '֏'), ('ﬓ', 'ﬗ')]; + +pub const AVESTAN: &'static [(char, char)] = + &[('·', '·'), ('⸰', '⸱'), ('𐬀', '𐬵'), ('𐬹', '𐬿')]; + +pub const BALINESE: &'static [(char, char)] = &[('\u{1b00}', 'ᭌ'), ('᭎', '᭿')]; + +pub const BAMUM: &'static [(char, char)] = &[('ꚠ', '꛷'), ('𖠀', '𖨸')]; + +pub const BASSA_VAH: &'static [(char, char)] = + &[('𖫐', '𖫭'), ('\u{16af0}', '𖫵')]; + +pub const BATAK: &'static [(char, char)] = &[('ᯀ', '\u{1bf3}'), ('᯼', '᯿')]; + +pub const BENGALI: &'static [(char, char)] = &[ + ('ʼ', 'ʼ'), + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('ঀ', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('\u{9bc}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', 'ৎ'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', '\u{9e3}'), + ('০', '\u{9fe}'), + ('\u{1cd0}', '\u{1cd0}'), + ('\u{1cd2}', '\u{1cd2}'), + ('\u{1cd5}', '\u{1cd6}'), + ('\u{1cd8}', '\u{1cd8}'), + ('᳡', '᳡'), + ('ᳪ', 'ᳪ'), + ('\u{1ced}', '\u{1ced}'), + ('ᳲ', 'ᳲ'), + ('ᳵ', '᳷'), + ('\u{a8f1}', '\u{a8f1}'), +]; + +pub const BHAIKSUKI: &'static [(char, char)] = + &[('𑰀', '𑰈'), ('𑰊', '\u{11c36}'), ('\u{11c38}', '𑱅'), ('𑱐', '𑱬')]; + +pub const BOPOMOFO: &'static [(char, char)] = &[ + ('ˇ', 'ˇ'), + ('ˉ', 'ˋ'), + ('˙', '˙'), + ('˪', '˫'), + ('、', '〃'), + ('〈', '】'), + ('〓', '〟'), + ('\u{302a}', '\u{302d}'), + ('〰', '〰'), + ('〷', '〷'), + ('・', '・'), + ('ㄅ', 'ㄯ'), + ('ㆠ', 'ㆿ'), + ('﹅', '﹆'), + ('。', '・'), +]; + +pub const BRAHMI: &'static [(char, char)] = + &[('𑀀', '𑁍'), ('𑁒', '𑁵'), ('\u{1107f}', '\u{1107f}')]; + +pub const BRAILLE: &'static [(char, char)] = &[('⠀', '⣿')]; + +pub const BUGINESE: &'static [(char, char)] = + &[('ᨀ', '\u{1a1b}'), ('᨞', '᨟'), ('ꧏ', 'ꧏ')]; + +pub const BUHID: &'static [(char, char)] = &[('᜵', '᜶'), ('ᝀ', '\u{1753}')]; + +pub const CANADIAN_ABORIGINAL: &'static [(char, char)] = + &[('᐀', 'ᙿ'), ('ᢰ', 'ᣵ'), ('𑪰', '𑪿')]; + +pub const CARIAN: &'static [(char, char)] = + &[('·', '·'), ('⁚', '⁚'), ('⁝', '⁝'), ('⸱', '⸱'), ('𐊠', '𐋐')]; + +pub const CAUCASIAN_ALBANIAN: &'static [(char, char)] = &[ + ('\u{304}', '\u{304}'), + ('\u{331}', '\u{331}'), + ('\u{35e}', '\u{35e}'), + ('𐔰', '𐕣'), + ('𐕯', '𐕯'), +]; + +pub const CHAKMA: &'static [(char, char)] = + &[('০', '৯'), ('၀', '၉'), ('\u{11100}', '\u{11134}'), ('𑄶', '𑅇')]; + +pub const CHAM: &'static [(char, char)] = + &[('ꨀ', '\u{aa36}'), ('ꩀ', 'ꩍ'), ('꩐', '꩙'), ('꩜', '꩟')]; + +pub const CHEROKEE: &'static [(char, char)] = &[ + ('\u{300}', '\u{302}'), + ('\u{304}', '\u{304}'), + ('\u{30b}', '\u{30c}'), + ('\u{323}', '\u{324}'), + ('\u{330}', '\u{331}'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ꭰ', 'ꮿ'), +]; + +pub const CHORASMIAN: &'static [(char, char)] = &[('𐾰', '𐿋')]; + +pub const COMMON: &'static [(char, char)] = &[ + ('\0', '@'), + ('[', '`'), + ('{', '©'), + ('«', '¶'), + ('¸', '¹'), + ('»', '¿'), + ('×', '×'), + ('÷', '÷'), + ('ʹ', 'ʻ'), + ('ʽ', 'ˆ'), + ('ˈ', 'ˈ'), + ('ˌ', 'ˌ'), + ('ˎ', '˖'), + ('˘', '˘'), + ('˚', '˟'), + ('˥', '˩'), + ('ˬ', '˿'), + (';', ';'), + ('΅', '΅'), + ('·', '·'), + ('\u{605}', '\u{605}'), + ('\u{6dd}', '\u{6dd}'), + ('\u{8e2}', '\u{8e2}'), + ('฿', '฿'), + ('࿕', '࿘'), + ('\u{2000}', '\u{200b}'), + ('\u{200e}', '\u{202e}'), + ('‰', '⁎'), + ('⁐', '⁙'), + ('⁛', '⁜'), + ('⁞', '\u{2064}'), + ('\u{2066}', '⁰'), + ('⁴', '⁾'), + ('₀', '₎'), + ('₠', '⃀'), + ('℀', '℥'), + ('℧', '℩'), + ('ℬ', 'ℱ'), + ('ℳ', '⅍'), + ('⅏', '⅟'), + ('↉', '↋'), + ('←', '␩'), + ('⑀', '⑊'), + ('①', '⟿'), + ('⤀', '⭳'), + ('⭶', '⮕'), + ('⮗', '⯿'), + ('⸀', '⸖'), + ('⸘', 'ⸯ'), + ('⸲', '⸻'), + ('⸽', '⹀'), + ('⹂', '⹂'), + ('⹄', '⹝'), + ('\u{3000}', '\u{3000}'), + ('〄', '〄'), + ('〒', '〒'), + ('〠', '〠'), + ('〶', '〶'), + ('㉈', '㉟'), + ('㉿', '㉿'), + ('㊱', '㊿'), + ('㋌', '㋏'), + ('㍱', '㍺'), + ('㎀', '㏟'), + ('㏿', '㏿'), + ('䷀', '䷿'), + ('꜈', '꜡'), + ('ꞈ', '꞊'), + ('꭛', '꭛'), + ('꭪', '꭫'), + ('︐', '︙'), + ('︰', '﹄'), + ('﹇', '﹒'), + ('﹔', '﹦'), + ('﹨', '﹫'), + ('\u{feff}', '\u{feff}'), + ('!', '@'), + ('[', '`'), + ('{', '⦆'), + ('¢', '₩'), + ('│', '○'), + ('\u{fff9}', '�'), + ('𐆐', '𐆜'), + ('𐇐', '𐇼'), + ('𜰀', '𜳹'), + ('𜴀', '𜺳'), + ('𜽐', '𜿃'), + ('𝀀', '𝃵'), + ('𝄀', '𝄦'), + ('𝄩', '\u{1d166}'), + ('𝅪', '\u{1d17a}'), + ('𝆃', '𝆄'), + ('𝆌', '𝆩'), + ('𝆮', '𝇪'), + ('𝋀', '𝋓'), + ('𝋠', '𝋳'), + ('𝌀', '𝍖'), + ('𝍲', '𝍸'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝟋'), + ('𝟎', '𝟿'), + ('𞱱', '𞲴'), + ('𞴁', '𞴽'), + ('🀀', '🀫'), + ('🀰', '🂓'), + ('🂠', '🂮'), + ('🂱', '🂿'), + ('🃁', '🃏'), + ('🃑', '🃵'), + ('🄀', '🆭'), + ('🇦', '🇿'), + ('🈁', '🈂'), + ('🈐', '🈻'), + ('🉀', '🉈'), + ('🉠', '🉥'), + ('🌀', '🛗'), + ('🛜', '🛬'), + ('🛰', '🛼'), + ('🜀', '🝶'), + ('🝻', '🟙'), + ('🟠', '🟫'), + ('🟰', '🟰'), + ('🠀', '🠋'), + ('🠐', '🡇'), + ('🡐', '🡙'), + ('🡠', '🢇'), + ('🢐', '🢭'), + ('🢰', '🢻'), + ('🣀', '🣁'), + ('🤀', '🩓'), + ('🩠', '🩭'), + ('🩰', '🩼'), + ('🪀', '🪉'), + ('🪏', '🫆'), + ('🫎', '🫜'), + ('🫟', '🫩'), + ('🫰', '🫸'), + ('🬀', '🮒'), + ('🮔', '🯹'), + ('\u{e0001}', '\u{e0001}'), + ('\u{e0020}', '\u{e007f}'), +]; + +pub const COPTIC: &'static [(char, char)] = &[ + ('·', '·'), + ('\u{300}', '\u{300}'), + ('\u{304}', '\u{305}'), + ('\u{307}', '\u{307}'), + ('ʹ', '͵'), + ('Ϣ', 'ϯ'), + ('Ⲁ', 'ⳳ'), + ('⳹', '⳿'), + ('⸗', '⸗'), + ('\u{102e0}', '𐋻'), +]; + +pub const CUNEIFORM: &'static [(char, char)] = + &[('𒀀', '𒎙'), ('𒐀', '𒑮'), ('𒑰', '𒑴'), ('𒒀', '𒕃')]; + +pub const CYPRIOT: &'static [(char, char)] = &[ + ('𐄀', '𐄂'), + ('𐄇', '𐄳'), + ('𐄷', '𐄿'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐠿'), +]; + +pub const CYPRO_MINOAN: &'static [(char, char)] = &[('𐄀', '𐄁'), ('𒾐', '𒿲')]; + +pub const CYRILLIC: &'static [(char, char)] = &[ + ('ʼ', 'ʼ'), + ('\u{300}', '\u{302}'), + ('\u{304}', '\u{304}'), + ('\u{306}', '\u{306}'), + ('\u{308}', '\u{308}'), + ('\u{30b}', '\u{30b}'), + ('\u{311}', '\u{311}'), + ('Ѐ', 'ԯ'), + ('ᲀ', 'ᲊ'), + ('ᴫ', 'ᴫ'), + ('ᵸ', 'ᵸ'), + ('\u{1df8}', '\u{1df8}'), + ('\u{2de0}', '\u{2dff}'), + ('⹃', '⹃'), + ('Ꙁ', '\u{a69f}'), + ('\u{fe2e}', '\u{fe2f}'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), +]; + +pub const DESERET: &'static [(char, char)] = &[('𐐀', '𐑏')]; + +pub const DEVANAGARI: &'static [(char, char)] = &[ + ('ʼ', 'ʼ'), + ('\u{900}', '\u{952}'), + ('\u{955}', 'ॿ'), + ('\u{1cd0}', 'ᳶ'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{20f0}', '\u{20f0}'), + ('꠰', '꠹'), + ('\u{a8e0}', '\u{a8ff}'), + ('𑬀', '𑬉'), +]; + +pub const DIVES_AKURU: &'static [(char, char)] = &[ + ('𑤀', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '𑥆'), + ('𑥐', '𑥙'), +]; + +pub const DOGRA: &'static [(char, char)] = + &[('।', '९'), ('꠰', '꠹'), ('𑠀', '𑠻')]; + +pub const DUPLOYAN: &'static [(char, char)] = &[ + ('·', '·'), + ('\u{307}', '\u{308}'), + ('\u{30a}', '\u{30a}'), + ('\u{323}', '\u{324}'), + ('⸼', '⸼'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𛲜', '\u{1bca3}'), +]; + +pub const EGYPTIAN_HIEROGLYPHS: &'static [(char, char)] = + &[('𓀀', '\u{13455}'), ('𓑠', '𔏺')]; + +pub const ELBASAN: &'static [(char, char)] = + &[('·', '·'), ('\u{305}', '\u{305}'), ('𐔀', '𐔧')]; + +pub const ELYMAIC: &'static [(char, char)] = &[('𐿠', '𐿶')]; + +pub const ETHIOPIC: &'static [(char, char)] = &[ + ('\u{30e}', '\u{30e}'), + ('ሀ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('\u{135d}', '፼'), + ('ᎀ', '᎙'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), +]; + +pub const GARAY: &'static [(char, char)] = &[ + ('،', '،'), + ('؛', '؛'), + ('؟', '؟'), + ('𐵀', '𐵥'), + ('\u{10d69}', '𐶅'), + ('𐶎', '𐶏'), +]; + +pub const GEORGIAN: &'static [(char, char)] = &[ + ('·', '·'), + ('։', '։'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჿ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('⁚', '⁚'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('⸱', '⸱'), +]; + +pub const GLAGOLITIC: &'static [(char, char)] = &[ + ('·', '·'), + ('\u{303}', '\u{303}'), + ('\u{305}', '\u{305}'), + ('\u{484}', '\u{484}'), + ('\u{487}', '\u{487}'), + ('։', '։'), + ('჻', '჻'), + ('⁚', '⁚'), + ('Ⰰ', 'ⱟ'), + ('⹃', '⹃'), + ('\u{a66f}', '\u{a66f}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), +]; + +pub const GOTHIC: &'static [(char, char)] = &[ + ('·', '·'), + ('\u{304}', '\u{305}'), + ('\u{308}', '\u{308}'), + ('\u{331}', '\u{331}'), + ('𐌰', '𐍊'), +]; + +pub const GRANTHA: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('௦', '௳'), + ('\u{1cd0}', '\u{1cd0}'), + ('\u{1cd2}', '᳓'), + ('ᳲ', '\u{1cf4}'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{20f0}', '\u{20f0}'), + ('\u{11300}', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('\u{1133b}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('𑍐', '𑍐'), + ('\u{11357}', '\u{11357}'), + ('𑍝', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('𑿐', '𑿑'), + ('𑿓', '𑿓'), +]; + +pub const GREEK: &'static [(char, char)] = &[ + ('·', '·'), + ('\u{300}', '\u{301}'), + ('\u{304}', '\u{304}'), + ('\u{306}', '\u{306}'), + ('\u{308}', '\u{308}'), + ('\u{313}', '\u{313}'), + ('\u{342}', '\u{342}'), + ('\u{345}', '\u{345}'), + ('Ͱ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('΄', '΄'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϡ'), + ('ϰ', 'Ͽ'), + ('ᴦ', 'ᴪ'), + ('ᵝ', 'ᵡ'), + ('ᵦ', 'ᵪ'), + ('ᶿ', '\u{1dc1}'), + ('ἀ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ῄ'), + ('ῆ', 'ΐ'), + ('ῖ', 'Ί'), + ('῝', '`'), + ('ῲ', 'ῴ'), + ('ῶ', '῾'), + ('⁝', '⁝'), + ('Ω', 'Ω'), + ('ꭥ', 'ꭥ'), + ('𐅀', '𐆎'), + ('𐆠', '𐆠'), + ('𝈀', '𝉅'), +]; + +pub const GUJARATI: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('\u{a81}', 'ઃ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('\u{abc}', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('ૐ', 'ૐ'), + ('ૠ', '\u{ae3}'), + ('૦', '૱'), + ('ૹ', '\u{aff}'), + ('꠰', '꠹'), +]; + +pub const GUNJALA_GONDI: &'static [(char, char)] = &[ + ('·', '·'), + ('।', '॥'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶘'), + ('𑶠', '𑶩'), +]; + +pub const GURMUKHI: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('\u{a01}', 'ਃ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('੦', '੶'), + ('꠰', '꠹'), +]; + +pub const GURUNG_KHEMA: &'static [(char, char)] = &[('॥', '॥'), ('𖄀', '𖄹')]; + +pub const HAN: &'static [(char, char)] = &[ + ('·', '·'), + ('⺀', '⺙'), + ('⺛', '⻳'), + ('⼀', '⿕'), + ('⿰', '⿿'), + ('、', '〃'), + ('々', '】'), + ('〓', '〟'), + ('〡', '\u{302d}'), + ('〰', '〰'), + ('〷', '〿'), + ('・', '・'), + ('㆐', '㆟'), + ('㇀', '㇥'), + ('㇯', '㇯'), + ('㈠', '㉇'), + ('㊀', '㊰'), + ('㋀', '㋋'), + ('㋿', '㋿'), + ('㍘', '㍰'), + ('㍻', '㍿'), + ('㏠', '㏾'), + ('㐀', '䶿'), + ('一', '鿿'), + ('꜀', '꜇'), + ('豈', '舘'), + ('並', '龎'), + ('﹅', '﹆'), + ('。', '・'), + ('𖿢', '𖿣'), + ('\u{16ff0}', '\u{16ff1}'), + ('𝍠', '𝍱'), + ('🉐', '🉑'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const HANGUL: &'static [(char, char)] = &[ + ('ᄀ', 'ᇿ'), + ('、', '〃'), + ('〈', '】'), + ('〓', '〟'), + ('\u{302e}', '〰'), + ('〷', '〷'), + ('・', '・'), + ('ㄱ', 'ㆎ'), + ('㈀', '㈞'), + ('㉠', '㉾'), + ('ꥠ', 'ꥼ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('﹅', '﹆'), + ('。', '・'), + ('ᅠ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), +]; + +pub const HANIFI_ROHINGYA: &'static [(char, char)] = &[ + ('،', '،'), + ('؛', '؛'), + ('؟', '؟'), + ('ـ', 'ـ'), + ('۔', '۔'), + ('𐴀', '\u{10d27}'), + ('𐴰', '𐴹'), +]; + +pub const HANUNOO: &'static [(char, char)] = &[('ᜠ', '᜶')]; + +pub const HATRAN: &'static [(char, char)] = + &[('𐣠', '𐣲'), ('𐣴', '𐣵'), ('𐣻', '𐣿')]; + +pub const HEBREW: &'static [(char, char)] = &[ + ('\u{307}', '\u{308}'), + ('\u{591}', '\u{5c7}'), + ('א', 'ת'), + ('ׯ', '״'), + ('יִ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﭏ'), +]; + +pub const HIRAGANA: &'static [(char, char)] = &[ + ('、', '〃'), + ('〈', '】'), + ('〓', '〟'), + ('〰', '〵'), + ('〷', '〷'), + ('〼', '〽'), + ('ぁ', 'ゖ'), + ('\u{3099}', '゠'), + ('・', 'ー'), + ('﹅', '﹆'), + ('。', '・'), + ('ー', 'ー'), + ('\u{ff9e}', '\u{ff9f}'), + ('𛀁', '𛄟'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('🈀', '🈀'), +]; + +pub const IMPERIAL_ARAMAIC: &'static [(char, char)] = + &[('𐡀', '𐡕'), ('𐡗', '𐡟')]; + +pub const INHERITED: &'static [(char, char)] = &[ + ('\u{30f}', '\u{30f}'), + ('\u{312}', '\u{312}'), + ('\u{314}', '\u{31f}'), + ('\u{321}', '\u{322}'), + ('\u{326}', '\u{32c}'), + ('\u{32f}', '\u{32f}'), + ('\u{332}', '\u{341}'), + ('\u{343}', '\u{344}'), + ('\u{346}', '\u{357}'), + ('\u{359}', '\u{35d}'), + ('\u{35f}', '\u{362}'), + ('\u{953}', '\u{954}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1dc2}', '\u{1df7}'), + ('\u{1df9}', '\u{1df9}'), + ('\u{1dfb}', '\u{1dff}'), + ('\u{200c}', '\u{200d}'), + ('\u{20d0}', '\u{20ef}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2d}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d167}', '\u{1d169}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const INSCRIPTIONAL_PAHLAVI: &'static [(char, char)] = + &[('𐭠', '𐭲'), ('𐭸', '𐭿')]; + +pub const INSCRIPTIONAL_PARTHIAN: &'static [(char, char)] = + &[('𐭀', '𐭕'), ('𐭘', '𐭟')]; + +pub const JAVANESE: &'static [(char, char)] = + &[('\u{a980}', '꧍'), ('ꧏ', '꧙'), ('꧞', '꧟')]; + +pub const KAITHI: &'static [(char, char)] = &[ + ('०', '९'), + ('⸱', '⸱'), + ('꠰', '꠹'), + ('\u{11080}', '\u{110c2}'), + ('\u{110cd}', '\u{110cd}'), +]; + +pub const KANNADA: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('ಀ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('\u{cbc}', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('ೝ', 'ೞ'), + ('ೠ', '\u{ce3}'), + ('೦', '೯'), + ('ೱ', 'ೳ'), + ('\u{1cd0}', '\u{1cd0}'), + ('\u{1cd2}', '᳓'), + ('\u{1cda}', '\u{1cda}'), + ('ᳲ', 'ᳲ'), + ('\u{1cf4}', '\u{1cf4}'), + ('꠰', '꠵'), +]; + +pub const KATAKANA: &'static [(char, char)] = &[ + ('\u{305}', '\u{305}'), + ('\u{323}', '\u{323}'), + ('、', '〃'), + ('〈', '】'), + ('〓', '〟'), + ('〰', '〵'), + ('〷', '〷'), + ('〼', '〽'), + ('\u{3099}', '゜'), + ('゠', 'ヿ'), + ('ㇰ', 'ㇿ'), + ('㋐', '㋾'), + ('㌀', '㍗'), + ('﹅', '﹆'), + ('。', '\u{ff9f}'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛀀'), + ('𛄠', '𛄢'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), +]; + +pub const KAWI: &'static [(char, char)] = + &[('\u{11f00}', '𑼐'), ('𑼒', '\u{11f3a}'), ('𑼾', '\u{11f5a}')]; + +pub const KAYAH_LI: &'static [(char, char)] = &[('꤀', '꤯')]; + +pub const KHAROSHTHI: &'static [(char, char)] = &[ + ('𐨀', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '𐩈'), + ('𐩐', '𐩘'), +]; + +pub const KHITAN_SMALL_SCRIPT: &'static [(char, char)] = + &[('\u{16fe4}', '\u{16fe4}'), ('𘬀', '𘳕'), ('𘳿', '𘳿')]; + +pub const KHMER: &'static [(char, char)] = + &[('ក', '\u{17dd}'), ('០', '៩'), ('៰', '៹'), ('᧠', '᧿')]; + +pub const KHOJKI: &'static [(char, char)] = + &[('૦', '૯'), ('꠰', '꠹'), ('𑈀', '𑈑'), ('𑈓', '\u{11241}')]; + +pub const KHUDAWADI: &'static [(char, char)] = + &[('।', '॥'), ('꠰', '꠹'), ('𑊰', '\u{112ea}'), ('𑋰', '𑋹')]; + +pub const KIRAT_RAI: &'static [(char, char)] = &[('𖵀', '𖵹')]; + +pub const LAO: &'static [(char, char)] = &[ + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('\u{ec8}', '\u{ece}'), + ('໐', '໙'), + ('ໜ', 'ໟ'), +]; + +pub const LATIN: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('·', '·'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ʸ'), + ('ʼ', 'ʼ'), + ('ˇ', 'ˇ'), + ('ˉ', 'ˋ'), + ('ˍ', 'ˍ'), + ('˗', '˗'), + ('˙', '˙'), + ('ˠ', 'ˤ'), + ('\u{300}', '\u{30e}'), + ('\u{310}', '\u{311}'), + ('\u{313}', '\u{313}'), + ('\u{320}', '\u{320}'), + ('\u{323}', '\u{325}'), + ('\u{32d}', '\u{32e}'), + ('\u{330}', '\u{331}'), + ('\u{358}', '\u{358}'), + ('\u{35e}', '\u{35e}'), + ('\u{363}', '\u{36f}'), + ('\u{485}', '\u{486}'), + ('\u{951}', '\u{952}'), + ('჻', '჻'), + ('ᴀ', 'ᴥ'), + ('ᴬ', 'ᵜ'), + ('ᵢ', 'ᵥ'), + ('ᵫ', 'ᵷ'), + ('ᵹ', 'ᶾ'), + ('\u{1df8}', '\u{1df8}'), + ('Ḁ', 'ỿ'), + ('\u{202f}', '\u{202f}'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('\u{20f0}', '\u{20f0}'), + ('K', 'Å'), + ('Ⅎ', 'Ⅎ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⱡ', 'Ɀ'), + ('⸗', '⸗'), + ('꜀', '꜇'), + ('Ꜣ', 'ꞇ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꟿ'), + ('꤮', '꤮'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭤ'), + ('ꭦ', 'ꭩ'), + ('ff', 'st'), + ('A', 'Z'), + ('a', 'z'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), +]; + +pub const LEPCHA: &'static [(char, char)] = + &[('ᰀ', '\u{1c37}'), ('᰻', '᱉'), ('ᱍ', 'ᱏ')]; + +pub const LIMBU: &'static [(char, char)] = &[ + ('॥', '॥'), + ('ᤀ', 'ᤞ'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('᥀', '᥀'), + ('᥄', '᥏'), +]; + +pub const LINEAR_A: &'static [(char, char)] = + &[('𐄇', '𐄳'), ('𐘀', '𐜶'), ('𐝀', '𐝕'), ('𐝠', '𐝧')]; + +pub const LINEAR_B: &'static [(char, char)] = &[ + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐄀', '𐄂'), + ('𐄇', '𐄳'), + ('𐄷', '𐄿'), +]; + +pub const LISU: &'static [(char, char)] = + &[('ʼ', 'ʼ'), ('ˍ', 'ˍ'), ('《', '》'), ('ꓐ', '꓿'), ('𑾰', '𑾰')]; + +pub const LYCIAN: &'static [(char, char)] = &[('⁚', '⁚'), ('𐊀', '𐊜')]; + +pub const LYDIAN: &'static [(char, char)] = + &[('·', '·'), ('⸱', '⸱'), ('𐤠', '𐤹'), ('𐤿', '𐤿')]; + +pub const MAHAJANI: &'static [(char, char)] = + &[('·', '·'), ('।', '९'), ('꠰', '꠹'), ('𑅐', '𑅶')]; + +pub const MAKASAR: &'static [(char, char)] = &[('𑻠', '𑻸')]; + +pub const MALAYALAM: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('\u{d00}', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', '൏'), + ('ൔ', '\u{d63}'), + ('൦', 'ൿ'), + ('\u{1cda}', '\u{1cda}'), + ('ᳲ', 'ᳲ'), + ('꠰', '꠲'), +]; + +pub const MANDAIC: &'static [(char, char)] = + &[('ـ', 'ـ'), ('ࡀ', '\u{85b}'), ('࡞', '࡞')]; + +pub const MANICHAEAN: &'static [(char, char)] = + &[('ـ', 'ـ'), ('𐫀', '\u{10ae6}'), ('𐫫', '𐫶')]; + +pub const MARCHEN: &'static [(char, char)] = + &[('𑱰', '𑲏'), ('\u{11c92}', '\u{11ca7}'), ('𑲩', '\u{11cb6}')]; + +pub const MASARAM_GONDI: &'static [(char, char)] = &[ + ('।', '॥'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d47}'), + ('𑵐', '𑵙'), +]; + +pub const MEDEFAIDRIN: &'static [(char, char)] = &[('𖹀', '𖺚')]; + +pub const MEETEI_MAYEK: &'static [(char, char)] = + &[('ꫠ', '\u{aaf6}'), ('ꯀ', '\u{abed}'), ('꯰', '꯹')]; + +pub const MENDE_KIKAKUI: &'static [(char, char)] = + &[('𞠀', '𞣄'), ('𞣇', '\u{1e8d6}')]; + +pub const MEROITIC_CURSIVE: &'static [(char, char)] = + &[('𐦠', '𐦷'), ('𐦼', '𐧏'), ('𐧒', '𐧿')]; + +pub const MEROITIC_HIEROGLYPHS: &'static [(char, char)] = + &[('⁝', '⁝'), ('𐦀', '𐦟')]; + +pub const MIAO: &'static [(char, char)] = + &[('𖼀', '𖽊'), ('\u{16f4f}', '𖾇'), ('\u{16f8f}', '𖾟')]; + +pub const MODI: &'static [(char, char)] = + &[('꠰', '꠹'), ('𑘀', '𑙄'), ('𑙐', '𑙙')]; + +pub const MONGOLIAN: &'static [(char, char)] = &[ + ('᠀', '᠙'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢪ'), + ('\u{202f}', '\u{202f}'), + ('、', '。'), + ('〈', '》'), + ('𑙠', '𑙬'), +]; + +pub const MRO: &'static [(char, char)] = &[('𖩀', '𖩞'), ('𖩠', '𖩩'), ('𖩮', '𖩯')]; + +pub const MULTANI: &'static [(char, char)] = + &[('੦', '੯'), ('𑊀', '𑊆'), ('𑊈', '𑊈'), ('𑊊', '𑊍'), ('𑊏', '𑊝'), ('𑊟', '𑊩')]; + +pub const MYANMAR: &'static [(char, char)] = + &[('က', '႟'), ('꤮', '꤮'), ('ꧠ', 'ꧾ'), ('ꩠ', 'ꩿ'), ('𑛐', '𑛣')]; + +pub const NABATAEAN: &'static [(char, char)] = &[('𐢀', '𐢞'), ('𐢧', '𐢯')]; + +pub const NAG_MUNDARI: &'static [(char, char)] = &[('𞓐', '𞓹')]; + +pub const NANDINAGARI: &'static [(char, char)] = &[ + ('।', '॥'), + ('೦', '೯'), + ('ᳩ', 'ᳩ'), + ('ᳲ', 'ᳲ'), + ('ᳺ', 'ᳺ'), + ('꠰', '꠵'), + ('𑦠', '𑦧'), + ('𑦪', '\u{119d7}'), + ('\u{119da}', '𑧤'), +]; + +pub const NEW_TAI_LUE: &'static [(char, char)] = + &[('ᦀ', 'ᦫ'), ('ᦰ', 'ᧉ'), ('᧐', '᧚'), ('᧞', '᧟')]; + +pub const NEWA: &'static [(char, char)] = &[('𑐀', '𑑛'), ('𑑝', '𑑡')]; + +pub const NKO: &'static [(char, char)] = &[ + ('،', '،'), + ('؛', '؛'), + ('؟', '؟'), + ('߀', 'ߺ'), + ('\u{7fd}', '߿'), + ('﴾', '﴿'), +]; + +pub const NUSHU: &'static [(char, char)] = &[('𖿡', '𖿡'), ('𛅰', '𛋻')]; + +pub const NYIAKENG_PUACHUE_HMONG: &'static [(char, char)] = + &[('𞄀', '𞄬'), ('\u{1e130}', '𞄽'), ('𞅀', '𞅉'), ('𞅎', '𞅏')]; + +pub const OGHAM: &'static [(char, char)] = &[('\u{1680}', '᚜')]; + +pub const OL_CHIKI: &'static [(char, char)] = &[('᱐', '᱿')]; + +pub const OL_ONAL: &'static [(char, char)] = + &[('।', '॥'), ('𞗐', '𞗺'), ('𞗿', '𞗿')]; + +pub const OLD_HUNGARIAN: &'static [(char, char)] = &[ + ('⁚', '⁚'), + ('⁝', '⁝'), + ('⸱', '⸱'), + ('⹁', '⹁'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐳺', '𐳿'), +]; + +pub const OLD_ITALIC: &'static [(char, char)] = &[('𐌀', '𐌣'), ('𐌭', '𐌯')]; + +pub const OLD_NORTH_ARABIAN: &'static [(char, char)] = &[('𐪀', '𐪟')]; + +pub const OLD_PERMIC: &'static [(char, char)] = &[ + ('·', '·'), + ('\u{300}', '\u{300}'), + ('\u{306}', '\u{308}'), + ('\u{313}', '\u{313}'), + ('\u{483}', '\u{483}'), + ('𐍐', '\u{1037a}'), +]; + +pub const OLD_PERSIAN: &'static [(char, char)] = &[('𐎠', '𐏃'), ('𐏈', '𐏕')]; + +pub const OLD_SOGDIAN: &'static [(char, char)] = &[('𐼀', '𐼧')]; + +pub const OLD_SOUTH_ARABIAN: &'static [(char, char)] = &[('𐩠', '𐩿')]; + +pub const OLD_TURKIC: &'static [(char, char)] = + &[('⁚', '⁚'), ('⸰', '⸰'), ('𐰀', '𐱈')]; + +pub const OLD_UYGHUR: &'static [(char, char)] = + &[('ـ', 'ـ'), ('𐫲', '𐫲'), ('𐽰', '𐾉')]; + +pub const ORIYA: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('\u{b01}', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('\u{b3c}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', '\u{b63}'), + ('୦', '୷'), + ('\u{1cda}', '\u{1cda}'), + ('ᳲ', 'ᳲ'), +]; + +pub const OSAGE: &'static [(char, char)] = &[ + ('\u{301}', '\u{301}'), + ('\u{304}', '\u{304}'), + ('\u{30b}', '\u{30b}'), + ('\u{358}', '\u{358}'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), +]; + +pub const OSMANYA: &'static [(char, char)] = &[('𐒀', '𐒝'), ('𐒠', '𐒩')]; + +pub const PAHAWH_HMONG: &'static [(char, char)] = + &[('𖬀', '𖭅'), ('𖭐', '𖭙'), ('𖭛', '𖭡'), ('𖭣', '𖭷'), ('𖭽', '𖮏')]; + +pub const PALMYRENE: &'static [(char, char)] = &[('𐡠', '𐡿')]; + +pub const PAU_CIN_HAU: &'static [(char, char)] = &[('𑫀', '𑫸')]; + +pub const PHAGS_PA: &'static [(char, char)] = &[ + ('᠂', '᠃'), + ('᠅', '᠅'), + ('\u{202f}', '\u{202f}'), + ('。', '。'), + ('ꡀ', '꡷'), +]; + +pub const PHOENICIAN: &'static [(char, char)] = &[('𐤀', '𐤛'), ('𐤟', '𐤟')]; + +pub const PSALTER_PAHLAVI: &'static [(char, char)] = + &[('ـ', 'ـ'), ('𐮀', '𐮑'), ('𐮙', '𐮜'), ('𐮩', '𐮯')]; + +pub const REJANG: &'static [(char, char)] = &[('ꤰ', '\u{a953}'), ('꥟', '꥟')]; + +pub const RUNIC: &'static [(char, char)] = &[('ᚠ', 'ᛸ')]; + +pub const SAMARITAN: &'static [(char, char)] = + &[('ࠀ', '\u{82d}'), ('࠰', '࠾'), ('⸱', '⸱')]; + +pub const SAURASHTRA: &'static [(char, char)] = + &[('ꢀ', '\u{a8c5}'), ('꣎', '꣙')]; + +pub const SHARADA: &'static [(char, char)] = &[ + ('\u{951}', '\u{951}'), + ('\u{1cd7}', '\u{1cd7}'), + ('\u{1cd9}', '\u{1cd9}'), + ('\u{1cdc}', '\u{1cdd}'), + ('\u{1ce0}', '\u{1ce0}'), + ('꠰', '꠵'), + ('꠸', '꠸'), + ('\u{11180}', '𑇟'), +]; + +pub const SHAVIAN: &'static [(char, char)] = &[('·', '·'), ('𐑐', '𐑿')]; + +pub const SIDDHAM: &'static [(char, char)] = + &[('𑖀', '\u{115b5}'), ('𑖸', '\u{115dd}')]; + +pub const SIGNWRITING: &'static [(char, char)] = + &[('𝠀', '𝪋'), ('\u{1da9b}', '\u{1da9f}'), ('\u{1daa1}', '\u{1daaf}')]; + +pub const SINHALA: &'static [(char, char)] = &[ + ('।', '॥'), + ('\u{d81}', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('෦', '෯'), + ('ෲ', '෴'), + ('ᳲ', 'ᳲ'), + ('𑇡', '𑇴'), +]; + +pub const SOGDIAN: &'static [(char, char)] = &[('ـ', 'ـ'), ('𐼰', '𐽙')]; + +pub const SORA_SOMPENG: &'static [(char, char)] = &[('𑃐', '𑃨'), ('𑃰', '𑃹')]; + +pub const SOYOMBO: &'static [(char, char)] = &[('𑩐', '𑪢')]; + +pub const SUNDANESE: &'static [(char, char)] = + &[('\u{1b80}', 'ᮿ'), ('᳀', '᳇')]; + +pub const SUNUWAR: &'static [(char, char)] = &[ + ('\u{300}', '\u{301}'), + ('\u{303}', '\u{303}'), + ('\u{30d}', '\u{30d}'), + ('\u{310}', '\u{310}'), + ('\u{32d}', '\u{32d}'), + ('\u{331}', '\u{331}'), + ('𑯀', '𑯡'), + ('𑯰', '𑯹'), +]; + +pub const SYLOTI_NAGRI: &'static [(char, char)] = + &[('।', '॥'), ('০', '৯'), ('ꠀ', '\u{a82c}')]; + +pub const SYRIAC: &'static [(char, char)] = &[ + ('\u{303}', '\u{304}'), + ('\u{307}', '\u{308}'), + ('\u{30a}', '\u{30a}'), + ('\u{320}', '\u{320}'), + ('\u{323}', '\u{325}'), + ('\u{32d}', '\u{32e}'), + ('\u{330}', '\u{330}'), + ('،', '،'), + ('؛', '\u{61c}'), + ('؟', '؟'), + ('ـ', 'ـ'), + ('\u{64b}', '\u{655}'), + ('\u{670}', '\u{670}'), + ('܀', '܍'), + ('\u{70f}', '\u{74a}'), + ('ݍ', 'ݏ'), + ('ࡠ', 'ࡪ'), + ('\u{1df8}', '\u{1df8}'), + ('\u{1dfa}', '\u{1dfa}'), +]; + +pub const TAGALOG: &'static [(char, char)] = + &[('ᜀ', '\u{1715}'), ('ᜟ', 'ᜟ'), ('᜵', '᜶')]; + +pub const TAGBANWA: &'static [(char, char)] = + &[('᜵', '᜶'), ('ᝠ', 'ᝬ'), ('ᝮ', 'ᝰ'), ('\u{1772}', '\u{1773}')]; + +pub const TAI_LE: &'static [(char, char)] = &[ + ('\u{300}', '\u{301}'), + ('\u{307}', '\u{308}'), + ('\u{30c}', '\u{30c}'), + ('၀', '၉'), + ('ᥐ', 'ᥭ'), + ('ᥰ', 'ᥴ'), +]; + +pub const TAI_THAM: &'static [(char, char)] = &[ + ('ᨠ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '᪉'), + ('᪐', '᪙'), + ('᪠', '᪭'), +]; + +pub const TAI_VIET: &'static [(char, char)] = &[('ꪀ', 'ꫂ'), ('ꫛ', '꫟')]; + +pub const TAKRI: &'static [(char, char)] = + &[('।', '॥'), ('꠰', '꠹'), ('𑚀', '𑚹'), ('𑛀', '𑛉')]; + +pub const TAMIL: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('\u{b82}', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('ௐ', 'ௐ'), + ('\u{bd7}', '\u{bd7}'), + ('௦', '௺'), + ('\u{1cda}', '\u{1cda}'), + ('ꣳ', 'ꣳ'), + ('\u{11301}', '\u{11301}'), + ('𑌃', '𑌃'), + ('\u{1133b}', '\u{1133c}'), + ('𑿀', '𑿱'), + ('𑿿', '𑿿'), +]; + +pub const TANGSA: &'static [(char, char)] = &[('𖩰', '𖪾'), ('𖫀', '𖫉')]; + +pub const TANGUT: &'static [(char, char)] = &[ + ('⿰', '⿿'), + ('㇯', '㇯'), + ('𖿠', '𖿠'), + ('𗀀', '𘟷'), + ('𘠀', '𘫿'), + ('𘴀', '𘴈'), +]; + +pub const TELUGU: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('\u{c00}', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('\u{c3c}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', '\u{c63}'), + ('౦', '౯'), + ('౷', '౿'), + ('\u{1cda}', '\u{1cda}'), + ('ᳲ', 'ᳲ'), +]; + +pub const THAANA: &'static [(char, char)] = &[ + ('،', '،'), + ('؛', '\u{61c}'), + ('؟', '؟'), + ('٠', '٩'), + ('ހ', 'ޱ'), + ('ﷲ', 'ﷲ'), + ('﷽', '﷽'), +]; + +pub const THAI: &'static [(char, char)] = &[ + ('ʼ', 'ʼ'), + ('˗', '˗'), + ('\u{303}', '\u{303}'), + ('\u{331}', '\u{331}'), + ('ก', '\u{e3a}'), + ('เ', '๛'), +]; + +pub const TIBETAN: &'static [(char, char)] = &[ + ('ༀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('\u{f71}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('྾', '࿌'), + ('࿎', '࿔'), + ('࿙', '࿚'), + ('〈', '》'), +]; + +pub const TIFINAGH: &'static [(char, char)] = &[ + ('\u{302}', '\u{302}'), + ('\u{304}', '\u{304}'), + ('\u{307}', '\u{307}'), + ('\u{309}', '\u{309}'), + ('ⴰ', 'ⵧ'), + ('ⵯ', '⵰'), + ('\u{2d7f}', '\u{2d7f}'), +]; + +pub const TIRHUTA: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('ᳲ', 'ᳲ'), + ('꠰', '꠹'), + ('𑒀', '𑓇'), + ('𑓐', '𑓙'), +]; + +pub const TODHRI: &'static [(char, char)] = &[ + ('\u{301}', '\u{301}'), + ('\u{304}', '\u{304}'), + ('\u{307}', '\u{307}'), + ('\u{311}', '\u{311}'), + ('\u{313}', '\u{313}'), + ('\u{35e}', '\u{35e}'), + ('𐗀', '𐗳'), +]; + +pub const TOTO: &'static [(char, char)] = &[('ʼ', 'ʼ'), ('𞊐', '\u{1e2ae}')]; + +pub const TULU_TIGALARI: &'static [(char, char)] = &[ + ('೦', '೯'), + ('ᳲ', 'ᳲ'), + ('\u{1cf4}', '\u{1cf4}'), + ('꠰', '꠵'), + ('\u{a8f1}', '\u{a8f1}'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏕'), + ('𑏗', '𑏘'), + ('\u{113e1}', '\u{113e2}'), +]; + +pub const UGARITIC: &'static [(char, char)] = &[('𐎀', '𐎝'), ('𐎟', '𐎟')]; + +pub const VAI: &'static [(char, char)] = &[('ꔀ', 'ꘫ')]; + +pub const VITHKUQI: &'static [(char, char)] = &[ + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), +]; + +pub const WANCHO: &'static [(char, char)] = &[('𞋀', '𞋹'), ('𞋿', '𞋿')]; + +pub const WARANG_CITI: &'static [(char, char)] = &[('𑢠', '𑣲'), ('𑣿', '𑣿')]; + +pub const YEZIDI: &'static [(char, char)] = &[ + ('،', '،'), + ('؛', '؛'), + ('؟', '؟'), + ('٠', '٩'), + ('𐺀', '𐺩'), + ('\u{10eab}', '𐺭'), + ('𐺰', '𐺱'), +]; + +pub const YI: &'static [(char, char)] = &[ + ('、', '。'), + ('〈', '】'), + ('〔', '〛'), + ('・', '・'), + ('ꀀ', 'ꒌ'), + ('꒐', '꓆'), + ('。', '・'), +]; + +pub const ZANABAZAR_SQUARE: &'static [(char, char)] = &[('𑨀', '\u{11a47}')]; diff --git a/vendor/regex-syntax/src/unicode_tables/sentence_break.rs b/vendor/regex-syntax/src/unicode_tables/sentence_break.rs new file mode 100644 index 00000000000000..af1c5bea91b6d8 --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/sentence_break.rs @@ -0,0 +1,2530 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate sentence-break ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("ATerm", ATERM), + ("CR", CR), + ("Close", CLOSE), + ("Extend", EXTEND), + ("Format", FORMAT), + ("LF", LF), + ("Lower", LOWER), + ("Numeric", NUMERIC), + ("OLetter", OLETTER), + ("SContinue", SCONTINUE), + ("STerm", STERM), + ("Sep", SEP), + ("Sp", SP), + ("Upper", UPPER), +]; + +pub const ATERM: &'static [(char, char)] = + &[('.', '.'), ('․', '․'), ('﹒', '﹒'), ('.', '.')]; + +pub const CR: &'static [(char, char)] = &[('\r', '\r')]; + +pub const CLOSE: &'static [(char, char)] = &[ + ('"', '"'), + ('\'', ')'), + ('[', '['), + (']', ']'), + ('{', '{'), + ('}', '}'), + ('«', '«'), + ('»', '»'), + ('༺', '༽'), + ('᚛', '᚜'), + ('‘', '‟'), + ('‹', '›'), + ('⁅', '⁆'), + ('⁽', '⁾'), + ('₍', '₎'), + ('⌈', '⌋'), + ('〈', '〉'), + ('❛', '❠'), + ('❨', '❵'), + ('⟅', '⟆'), + ('⟦', '⟯'), + ('⦃', '⦘'), + ('⧘', '⧛'), + ('⧼', '⧽'), + ('⸀', '⸍'), + ('⸜', '⸝'), + ('⸠', '⸩'), + ('⹂', '⹂'), + ('⹕', '⹜'), + ('〈', '】'), + ('〔', '〛'), + ('〝', '〟'), + ('﴾', '﴿'), + ('︗', '︘'), + ('︵', '﹄'), + ('﹇', '﹈'), + ('﹙', '﹞'), + ('(', ')'), + ('[', '['), + (']', ']'), + ('{', '{'), + ('}', '}'), + ('⦅', '⦆'), + ('「', '」'), + ('🙶', '🙸'), +]; + +pub const EXTEND: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{483}', '\u{489}'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6df}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', '\u{7f3}'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{819}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('\u{897}', '\u{89f}'), + ('\u{8ca}', '\u{8e1}'), + ('\u{8e3}', 'ः'), + ('\u{93a}', '\u{93c}'), + ('ा', 'ॏ'), + ('\u{951}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('\u{981}', 'ঃ'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9be}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', '\u{9cd}'), + ('\u{9d7}', '\u{9d7}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', 'ਃ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('\u{abc}', '\u{abc}'), + ('ા', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{aff}'), + ('\u{b01}', 'ଃ'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3e}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', '\u{c04}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', 'ಃ'), + ('\u{cbc}', '\u{cbc}'), + ('ಾ', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{ce2}', '\u{ce3}'), + ('ೳ', 'ೳ'), + ('\u{d00}', 'ഃ'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d3e}', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', '\u{d4d}'), + ('\u{d57}', '\u{d57}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', 'ඃ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('ෲ', 'ෳ'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e47}', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('༾', '༿'), + ('\u{f71}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('ါ', '\u{103e}'), + ('ၖ', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('ၢ', 'ၤ'), + ('ၧ', 'ၭ'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{108d}'), + ('ႏ', 'ႏ'), + ('ႚ', '\u{109d}'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1715}'), + ('\u{1732}', '\u{1734}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('\u{1a17}', '\u{1a1b}'), + ('ᩕ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', 'ᬄ'), + ('\u{1b34}', '\u{1b44}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', 'ᮂ'), + ('ᮡ', '\u{1bad}'), + ('\u{1be6}', '\u{1bf3}'), + ('ᰤ', '\u{1c37}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('᳷', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{200c}', '\u{200d}'), + ('\u{20d0}', '\u{20f0}'), + ('\u{2cef}', '\u{2cf1}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('\u{302a}', '\u{302f}'), + ('\u{3099}', '\u{309a}'), + ('\u{a66f}', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('ꠣ', 'ꠧ'), + ('\u{a82c}', '\u{a82c}'), + ('ꢀ', 'ꢁ'), + ('ꢴ', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a953}'), + ('\u{a980}', 'ꦃ'), + ('\u{a9b3}', '\u{a9c0}'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', 'ꩍ'), + ('ꩻ', 'ꩽ'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('ꫫ', 'ꫯ'), + ('ꫵ', '\u{aaf6}'), + ('ꯣ', 'ꯪ'), + ('꯬', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('\u{ff9e}', '\u{ff9f}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('𑀀', '𑀂'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '𑂂'), + ('𑂰', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{11134}'), + ('𑅅', '𑅆'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '𑆂'), + ('𑆳', '\u{111c0}'), + ('\u{111c9}', '\u{111cc}'), + ('𑇎', '\u{111cf}'), + ('𑈬', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112ea}'), + ('\u{11300}', '𑌃'), + ('\u{1133b}', '\u{1133c}'), + ('\u{1133e}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('𑍢', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113b8}', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('𑐵', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b0}', '\u{114c3}'), + ('\u{115af}', '\u{115b5}'), + ('𑖸', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('𑘰', '\u{11640}'), + ('\u{116ab}', '\u{116b7}'), + ('\u{1171d}', '\u{1172b}'), + ('𑠬', '\u{1183a}'), + ('\u{11930}', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{1193e}'), + ('𑥀', '𑥀'), + ('𑥂', '\u{11943}'), + ('𑧑', '\u{119d7}'), + ('\u{119da}', '\u{119e0}'), + ('𑧤', '𑧤'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '𑨹'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a99}'), + ('𑰯', '\u{11c36}'), + ('\u{11c38}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('𑶊', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '\u{11d97}'), + ('\u{11ef3}', '𑻶'), + ('\u{11f00}', '\u{11f01}'), + ('𑼃', '𑼃'), + ('𑼴', '\u{11f3a}'), + ('𑼾', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13440}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('\u{16f4f}', '\u{16f4f}'), + ('𖽑', '𖾇'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e4ec}', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e94a}'), + ('\u{e0020}', '\u{e007f}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const FORMAT: &'static [(char, char)] = &[ + ('\u{ad}', '\u{ad}'), + ('\u{61c}', '\u{61c}'), + ('\u{70f}', '\u{70f}'), + ('\u{180e}', '\u{180e}'), + ('\u{200b}', '\u{200b}'), + ('\u{200e}', '\u{200f}'), + ('\u{202a}', '\u{202e}'), + ('\u{2060}', '\u{2064}'), + ('\u{2066}', '\u{206f}'), + ('\u{feff}', '\u{feff}'), + ('\u{fff9}', '\u{fffb}'), + ('\u{13430}', '\u{1343f}'), + ('\u{1bca0}', '\u{1bca3}'), + ('\u{1d173}', '\u{1d17a}'), + ('\u{e0001}', '\u{e0001}'), +]; + +pub const LF: &'static [(char, char)] = &[('\n', '\n')]; + +pub const LOWER: &'static [(char, char)] = &[ + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('ß', 'ö'), + ('ø', 'ÿ'), + ('ā', 'ā'), + ('ă', 'ă'), + ('ą', 'ą'), + ('ć', 'ć'), + ('ĉ', 'ĉ'), + ('ċ', 'ċ'), + ('č', 'č'), + ('ď', 'ď'), + ('đ', 'đ'), + ('ē', 'ē'), + ('ĕ', 'ĕ'), + ('ė', 'ė'), + ('ę', 'ę'), + ('ě', 'ě'), + ('ĝ', 'ĝ'), + ('ğ', 'ğ'), + ('ġ', 'ġ'), + ('ģ', 'ģ'), + ('ĥ', 'ĥ'), + ('ħ', 'ħ'), + ('ĩ', 'ĩ'), + ('ī', 'ī'), + ('ĭ', 'ĭ'), + ('į', 'į'), + ('ı', 'ı'), + ('ij', 'ij'), + ('ĵ', 'ĵ'), + ('ķ', 'ĸ'), + ('ĺ', 'ĺ'), + ('ļ', 'ļ'), + ('ľ', 'ľ'), + ('ŀ', 'ŀ'), + ('ł', 'ł'), + ('ń', 'ń'), + ('ņ', 'ņ'), + ('ň', 'ʼn'), + ('ŋ', 'ŋ'), + ('ō', 'ō'), + ('ŏ', 'ŏ'), + ('ő', 'ő'), + ('œ', 'œ'), + ('ŕ', 'ŕ'), + ('ŗ', 'ŗ'), + ('ř', 'ř'), + ('ś', 'ś'), + ('ŝ', 'ŝ'), + ('ş', 'ş'), + ('š', 'š'), + ('ţ', 'ţ'), + ('ť', 'ť'), + ('ŧ', 'ŧ'), + ('ũ', 'ũ'), + ('ū', 'ū'), + ('ŭ', 'ŭ'), + ('ů', 'ů'), + ('ű', 'ű'), + ('ų', 'ų'), + ('ŵ', 'ŵ'), + ('ŷ', 'ŷ'), + ('ź', 'ź'), + ('ż', 'ż'), + ('ž', 'ƀ'), + ('ƃ', 'ƃ'), + ('ƅ', 'ƅ'), + ('ƈ', 'ƈ'), + ('ƌ', 'ƍ'), + ('ƒ', 'ƒ'), + ('ƕ', 'ƕ'), + ('ƙ', 'ƛ'), + ('ƞ', 'ƞ'), + ('ơ', 'ơ'), + ('ƣ', 'ƣ'), + ('ƥ', 'ƥ'), + ('ƨ', 'ƨ'), + ('ƪ', 'ƫ'), + ('ƭ', 'ƭ'), + ('ư', 'ư'), + ('ƴ', 'ƴ'), + ('ƶ', 'ƶ'), + ('ƹ', 'ƺ'), + ('ƽ', 'ƿ'), + ('dž', 'dž'), + ('lj', 'lj'), + ('nj', 'nj'), + ('ǎ', 'ǎ'), + ('ǐ', 'ǐ'), + ('ǒ', 'ǒ'), + ('ǔ', 'ǔ'), + ('ǖ', 'ǖ'), + ('ǘ', 'ǘ'), + ('ǚ', 'ǚ'), + ('ǜ', 'ǝ'), + ('ǟ', 'ǟ'), + ('ǡ', 'ǡ'), + ('ǣ', 'ǣ'), + ('ǥ', 'ǥ'), + ('ǧ', 'ǧ'), + ('ǩ', 'ǩ'), + ('ǫ', 'ǫ'), + ('ǭ', 'ǭ'), + ('ǯ', 'ǰ'), + ('dz', 'dz'), + ('ǵ', 'ǵ'), + ('ǹ', 'ǹ'), + ('ǻ', 'ǻ'), + ('ǽ', 'ǽ'), + ('ǿ', 'ǿ'), + ('ȁ', 'ȁ'), + ('ȃ', 'ȃ'), + ('ȅ', 'ȅ'), + ('ȇ', 'ȇ'), + ('ȉ', 'ȉ'), + ('ȋ', 'ȋ'), + ('ȍ', 'ȍ'), + ('ȏ', 'ȏ'), + ('ȑ', 'ȑ'), + ('ȓ', 'ȓ'), + ('ȕ', 'ȕ'), + ('ȗ', 'ȗ'), + ('ș', 'ș'), + ('ț', 'ț'), + ('ȝ', 'ȝ'), + ('ȟ', 'ȟ'), + ('ȡ', 'ȡ'), + ('ȣ', 'ȣ'), + ('ȥ', 'ȥ'), + ('ȧ', 'ȧ'), + ('ȩ', 'ȩ'), + ('ȫ', 'ȫ'), + ('ȭ', 'ȭ'), + ('ȯ', 'ȯ'), + ('ȱ', 'ȱ'), + ('ȳ', 'ȹ'), + ('ȼ', 'ȼ'), + ('ȿ', 'ɀ'), + ('ɂ', 'ɂ'), + ('ɇ', 'ɇ'), + ('ɉ', 'ɉ'), + ('ɋ', 'ɋ'), + ('ɍ', 'ɍ'), + ('ɏ', 'ʓ'), + ('ʕ', 'ʸ'), + ('ˀ', 'ˁ'), + ('ˠ', 'ˤ'), + ('ͱ', 'ͱ'), + ('ͳ', 'ͳ'), + ('ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('ΐ', 'ΐ'), + ('ά', 'ώ'), + ('ϐ', 'ϑ'), + ('ϕ', 'ϗ'), + ('ϙ', 'ϙ'), + ('ϛ', 'ϛ'), + ('ϝ', 'ϝ'), + ('ϟ', 'ϟ'), + ('ϡ', 'ϡ'), + ('ϣ', 'ϣ'), + ('ϥ', 'ϥ'), + ('ϧ', 'ϧ'), + ('ϩ', 'ϩ'), + ('ϫ', 'ϫ'), + ('ϭ', 'ϭ'), + ('ϯ', 'ϳ'), + ('ϵ', 'ϵ'), + ('ϸ', 'ϸ'), + ('ϻ', 'ϼ'), + ('а', 'џ'), + ('ѡ', 'ѡ'), + ('ѣ', 'ѣ'), + ('ѥ', 'ѥ'), + ('ѧ', 'ѧ'), + ('ѩ', 'ѩ'), + ('ѫ', 'ѫ'), + ('ѭ', 'ѭ'), + ('ѯ', 'ѯ'), + ('ѱ', 'ѱ'), + ('ѳ', 'ѳ'), + ('ѵ', 'ѵ'), + ('ѷ', 'ѷ'), + ('ѹ', 'ѹ'), + ('ѻ', 'ѻ'), + ('ѽ', 'ѽ'), + ('ѿ', 'ѿ'), + ('ҁ', 'ҁ'), + ('ҋ', 'ҋ'), + ('ҍ', 'ҍ'), + ('ҏ', 'ҏ'), + ('ґ', 'ґ'), + ('ғ', 'ғ'), + ('ҕ', 'ҕ'), + ('җ', 'җ'), + ('ҙ', 'ҙ'), + ('қ', 'қ'), + ('ҝ', 'ҝ'), + ('ҟ', 'ҟ'), + ('ҡ', 'ҡ'), + ('ң', 'ң'), + ('ҥ', 'ҥ'), + ('ҧ', 'ҧ'), + ('ҩ', 'ҩ'), + ('ҫ', 'ҫ'), + ('ҭ', 'ҭ'), + ('ү', 'ү'), + ('ұ', 'ұ'), + ('ҳ', 'ҳ'), + ('ҵ', 'ҵ'), + ('ҷ', 'ҷ'), + ('ҹ', 'ҹ'), + ('һ', 'һ'), + ('ҽ', 'ҽ'), + ('ҿ', 'ҿ'), + ('ӂ', 'ӂ'), + ('ӄ', 'ӄ'), + ('ӆ', 'ӆ'), + ('ӈ', 'ӈ'), + ('ӊ', 'ӊ'), + ('ӌ', 'ӌ'), + ('ӎ', 'ӏ'), + ('ӑ', 'ӑ'), + ('ӓ', 'ӓ'), + ('ӕ', 'ӕ'), + ('ӗ', 'ӗ'), + ('ә', 'ә'), + ('ӛ', 'ӛ'), + ('ӝ', 'ӝ'), + ('ӟ', 'ӟ'), + ('ӡ', 'ӡ'), + ('ӣ', 'ӣ'), + ('ӥ', 'ӥ'), + ('ӧ', 'ӧ'), + ('ө', 'ө'), + ('ӫ', 'ӫ'), + ('ӭ', 'ӭ'), + ('ӯ', 'ӯ'), + ('ӱ', 'ӱ'), + ('ӳ', 'ӳ'), + ('ӵ', 'ӵ'), + ('ӷ', 'ӷ'), + ('ӹ', 'ӹ'), + ('ӻ', 'ӻ'), + ('ӽ', 'ӽ'), + ('ӿ', 'ӿ'), + ('ԁ', 'ԁ'), + ('ԃ', 'ԃ'), + ('ԅ', 'ԅ'), + ('ԇ', 'ԇ'), + ('ԉ', 'ԉ'), + ('ԋ', 'ԋ'), + ('ԍ', 'ԍ'), + ('ԏ', 'ԏ'), + ('ԑ', 'ԑ'), + ('ԓ', 'ԓ'), + ('ԕ', 'ԕ'), + ('ԗ', 'ԗ'), + ('ԙ', 'ԙ'), + ('ԛ', 'ԛ'), + ('ԝ', 'ԝ'), + ('ԟ', 'ԟ'), + ('ԡ', 'ԡ'), + ('ԣ', 'ԣ'), + ('ԥ', 'ԥ'), + ('ԧ', 'ԧ'), + ('ԩ', 'ԩ'), + ('ԫ', 'ԫ'), + ('ԭ', 'ԭ'), + ('ԯ', 'ԯ'), + ('ՠ', 'ֈ'), + ('ჼ', 'ჼ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲈ'), + ('ᲊ', 'ᲊ'), + ('ᴀ', 'ᶿ'), + ('ḁ', 'ḁ'), + ('ḃ', 'ḃ'), + ('ḅ', 'ḅ'), + ('ḇ', 'ḇ'), + ('ḉ', 'ḉ'), + ('ḋ', 'ḋ'), + ('ḍ', 'ḍ'), + ('ḏ', 'ḏ'), + ('ḑ', 'ḑ'), + ('ḓ', 'ḓ'), + ('ḕ', 'ḕ'), + ('ḗ', 'ḗ'), + ('ḙ', 'ḙ'), + ('ḛ', 'ḛ'), + ('ḝ', 'ḝ'), + ('ḟ', 'ḟ'), + ('ḡ', 'ḡ'), + ('ḣ', 'ḣ'), + ('ḥ', 'ḥ'), + ('ḧ', 'ḧ'), + ('ḩ', 'ḩ'), + ('ḫ', 'ḫ'), + ('ḭ', 'ḭ'), + ('ḯ', 'ḯ'), + ('ḱ', 'ḱ'), + ('ḳ', 'ḳ'), + ('ḵ', 'ḵ'), + ('ḷ', 'ḷ'), + ('ḹ', 'ḹ'), + ('ḻ', 'ḻ'), + ('ḽ', 'ḽ'), + ('ḿ', 'ḿ'), + ('ṁ', 'ṁ'), + ('ṃ', 'ṃ'), + ('ṅ', 'ṅ'), + ('ṇ', 'ṇ'), + ('ṉ', 'ṉ'), + ('ṋ', 'ṋ'), + ('ṍ', 'ṍ'), + ('ṏ', 'ṏ'), + ('ṑ', 'ṑ'), + ('ṓ', 'ṓ'), + ('ṕ', 'ṕ'), + ('ṗ', 'ṗ'), + ('ṙ', 'ṙ'), + ('ṛ', 'ṛ'), + ('ṝ', 'ṝ'), + ('ṟ', 'ṟ'), + ('ṡ', 'ṡ'), + ('ṣ', 'ṣ'), + ('ṥ', 'ṥ'), + ('ṧ', 'ṧ'), + ('ṩ', 'ṩ'), + ('ṫ', 'ṫ'), + ('ṭ', 'ṭ'), + ('ṯ', 'ṯ'), + ('ṱ', 'ṱ'), + ('ṳ', 'ṳ'), + ('ṵ', 'ṵ'), + ('ṷ', 'ṷ'), + ('ṹ', 'ṹ'), + ('ṻ', 'ṻ'), + ('ṽ', 'ṽ'), + ('ṿ', 'ṿ'), + ('ẁ', 'ẁ'), + ('ẃ', 'ẃ'), + ('ẅ', 'ẅ'), + ('ẇ', 'ẇ'), + ('ẉ', 'ẉ'), + ('ẋ', 'ẋ'), + ('ẍ', 'ẍ'), + ('ẏ', 'ẏ'), + ('ẑ', 'ẑ'), + ('ẓ', 'ẓ'), + ('ẕ', 'ẝ'), + ('ẟ', 'ẟ'), + ('ạ', 'ạ'), + ('ả', 'ả'), + ('ấ', 'ấ'), + ('ầ', 'ầ'), + ('ẩ', 'ẩ'), + ('ẫ', 'ẫ'), + ('ậ', 'ậ'), + ('ắ', 'ắ'), + ('ằ', 'ằ'), + ('ẳ', 'ẳ'), + ('ẵ', 'ẵ'), + ('ặ', 'ặ'), + ('ẹ', 'ẹ'), + ('ẻ', 'ẻ'), + ('ẽ', 'ẽ'), + ('ế', 'ế'), + ('ề', 'ề'), + ('ể', 'ể'), + ('ễ', 'ễ'), + ('ệ', 'ệ'), + ('ỉ', 'ỉ'), + ('ị', 'ị'), + ('ọ', 'ọ'), + ('ỏ', 'ỏ'), + ('ố', 'ố'), + ('ồ', 'ồ'), + ('ổ', 'ổ'), + ('ỗ', 'ỗ'), + ('ộ', 'ộ'), + ('ớ', 'ớ'), + ('ờ', 'ờ'), + ('ở', 'ở'), + ('ỡ', 'ỡ'), + ('ợ', 'ợ'), + ('ụ', 'ụ'), + ('ủ', 'ủ'), + ('ứ', 'ứ'), + ('ừ', 'ừ'), + ('ử', 'ử'), + ('ữ', 'ữ'), + ('ự', 'ự'), + ('ỳ', 'ỳ'), + ('ỵ', 'ỵ'), + ('ỷ', 'ỷ'), + ('ỹ', 'ỹ'), + ('ỻ', 'ỻ'), + ('ỽ', 'ỽ'), + ('ỿ', 'ἇ'), + ('ἐ', 'ἕ'), + ('ἠ', 'ἧ'), + ('ἰ', 'ἷ'), + ('ὀ', 'ὅ'), + ('ὐ', 'ὗ'), + ('ὠ', 'ὧ'), + ('ὰ', 'ώ'), + ('ᾀ', 'ᾇ'), + ('ᾐ', 'ᾗ'), + ('ᾠ', 'ᾧ'), + ('ᾰ', 'ᾴ'), + ('ᾶ', 'ᾷ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῇ'), + ('ῐ', 'ΐ'), + ('ῖ', 'ῗ'), + ('ῠ', 'ῧ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῷ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℊ', 'ℊ'), + ('ℎ', 'ℏ'), + ('ℓ', 'ℓ'), + ('ℯ', 'ℯ'), + ('ℴ', 'ℴ'), + ('ℹ', 'ℹ'), + ('ℼ', 'ℽ'), + ('ⅆ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('ⅰ', 'ⅿ'), + ('ↄ', 'ↄ'), + ('ⓐ', 'ⓩ'), + ('ⰰ', 'ⱟ'), + ('ⱡ', 'ⱡ'), + ('ⱥ', 'ⱦ'), + ('ⱨ', 'ⱨ'), + ('ⱪ', 'ⱪ'), + ('ⱬ', 'ⱬ'), + ('ⱱ', 'ⱱ'), + ('ⱳ', 'ⱴ'), + ('ⱶ', 'ⱽ'), + ('ⲁ', 'ⲁ'), + ('ⲃ', 'ⲃ'), + ('ⲅ', 'ⲅ'), + ('ⲇ', 'ⲇ'), + ('ⲉ', 'ⲉ'), + ('ⲋ', 'ⲋ'), + ('ⲍ', 'ⲍ'), + ('ⲏ', 'ⲏ'), + ('ⲑ', 'ⲑ'), + ('ⲓ', 'ⲓ'), + ('ⲕ', 'ⲕ'), + ('ⲗ', 'ⲗ'), + ('ⲙ', 'ⲙ'), + ('ⲛ', 'ⲛ'), + ('ⲝ', 'ⲝ'), + ('ⲟ', 'ⲟ'), + ('ⲡ', 'ⲡ'), + ('ⲣ', 'ⲣ'), + ('ⲥ', 'ⲥ'), + ('ⲧ', 'ⲧ'), + ('ⲩ', 'ⲩ'), + ('ⲫ', 'ⲫ'), + ('ⲭ', 'ⲭ'), + ('ⲯ', 'ⲯ'), + ('ⲱ', 'ⲱ'), + ('ⲳ', 'ⲳ'), + ('ⲵ', 'ⲵ'), + ('ⲷ', 'ⲷ'), + ('ⲹ', 'ⲹ'), + ('ⲻ', 'ⲻ'), + ('ⲽ', 'ⲽ'), + ('ⲿ', 'ⲿ'), + ('ⳁ', 'ⳁ'), + ('ⳃ', 'ⳃ'), + ('ⳅ', 'ⳅ'), + ('ⳇ', 'ⳇ'), + ('ⳉ', 'ⳉ'), + ('ⳋ', 'ⳋ'), + ('ⳍ', 'ⳍ'), + ('ⳏ', 'ⳏ'), + ('ⳑ', 'ⳑ'), + ('ⳓ', 'ⳓ'), + ('ⳕ', 'ⳕ'), + ('ⳗ', 'ⳗ'), + ('ⳙ', 'ⳙ'), + ('ⳛ', 'ⳛ'), + ('ⳝ', 'ⳝ'), + ('ⳟ', 'ⳟ'), + ('ⳡ', 'ⳡ'), + ('ⳣ', 'ⳤ'), + ('ⳬ', 'ⳬ'), + ('ⳮ', 'ⳮ'), + ('ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ꙁ', 'ꙁ'), + ('ꙃ', 'ꙃ'), + ('ꙅ', 'ꙅ'), + ('ꙇ', 'ꙇ'), + ('ꙉ', 'ꙉ'), + ('ꙋ', 'ꙋ'), + ('ꙍ', 'ꙍ'), + ('ꙏ', 'ꙏ'), + ('ꙑ', 'ꙑ'), + ('ꙓ', 'ꙓ'), + ('ꙕ', 'ꙕ'), + ('ꙗ', 'ꙗ'), + ('ꙙ', 'ꙙ'), + ('ꙛ', 'ꙛ'), + ('ꙝ', 'ꙝ'), + ('ꙟ', 'ꙟ'), + ('ꙡ', 'ꙡ'), + ('ꙣ', 'ꙣ'), + ('ꙥ', 'ꙥ'), + ('ꙧ', 'ꙧ'), + ('ꙩ', 'ꙩ'), + ('ꙫ', 'ꙫ'), + ('ꙭ', 'ꙭ'), + ('ꚁ', 'ꚁ'), + ('ꚃ', 'ꚃ'), + ('ꚅ', 'ꚅ'), + ('ꚇ', 'ꚇ'), + ('ꚉ', 'ꚉ'), + ('ꚋ', 'ꚋ'), + ('ꚍ', 'ꚍ'), + ('ꚏ', 'ꚏ'), + ('ꚑ', 'ꚑ'), + ('ꚓ', 'ꚓ'), + ('ꚕ', 'ꚕ'), + ('ꚗ', 'ꚗ'), + ('ꚙ', 'ꚙ'), + ('ꚛ', 'ꚝ'), + ('ꜣ', 'ꜣ'), + ('ꜥ', 'ꜥ'), + ('ꜧ', 'ꜧ'), + ('ꜩ', 'ꜩ'), + ('ꜫ', 'ꜫ'), + ('ꜭ', 'ꜭ'), + ('ꜯ', 'ꜱ'), + ('ꜳ', 'ꜳ'), + ('ꜵ', 'ꜵ'), + ('ꜷ', 'ꜷ'), + ('ꜹ', 'ꜹ'), + ('ꜻ', 'ꜻ'), + ('ꜽ', 'ꜽ'), + ('ꜿ', 'ꜿ'), + ('ꝁ', 'ꝁ'), + ('ꝃ', 'ꝃ'), + ('ꝅ', 'ꝅ'), + ('ꝇ', 'ꝇ'), + ('ꝉ', 'ꝉ'), + ('ꝋ', 'ꝋ'), + ('ꝍ', 'ꝍ'), + ('ꝏ', 'ꝏ'), + ('ꝑ', 'ꝑ'), + ('ꝓ', 'ꝓ'), + ('ꝕ', 'ꝕ'), + ('ꝗ', 'ꝗ'), + ('ꝙ', 'ꝙ'), + ('ꝛ', 'ꝛ'), + ('ꝝ', 'ꝝ'), + ('ꝟ', 'ꝟ'), + ('ꝡ', 'ꝡ'), + ('ꝣ', 'ꝣ'), + ('ꝥ', 'ꝥ'), + ('ꝧ', 'ꝧ'), + ('ꝩ', 'ꝩ'), + ('ꝫ', 'ꝫ'), + ('ꝭ', 'ꝭ'), + ('ꝯ', 'ꝸ'), + ('ꝺ', 'ꝺ'), + ('ꝼ', 'ꝼ'), + ('ꝿ', 'ꝿ'), + ('ꞁ', 'ꞁ'), + ('ꞃ', 'ꞃ'), + ('ꞅ', 'ꞅ'), + ('ꞇ', 'ꞇ'), + ('ꞌ', 'ꞌ'), + ('ꞎ', 'ꞎ'), + ('ꞑ', 'ꞑ'), + ('ꞓ', 'ꞕ'), + ('ꞗ', 'ꞗ'), + ('ꞙ', 'ꞙ'), + ('ꞛ', 'ꞛ'), + ('ꞝ', 'ꞝ'), + ('ꞟ', 'ꞟ'), + ('ꞡ', 'ꞡ'), + ('ꞣ', 'ꞣ'), + ('ꞥ', 'ꞥ'), + ('ꞧ', 'ꞧ'), + ('ꞩ', 'ꞩ'), + ('ꞯ', 'ꞯ'), + ('ꞵ', 'ꞵ'), + ('ꞷ', 'ꞷ'), + ('ꞹ', 'ꞹ'), + ('ꞻ', 'ꞻ'), + ('ꞽ', 'ꞽ'), + ('ꞿ', 'ꞿ'), + ('ꟁ', 'ꟁ'), + ('ꟃ', 'ꟃ'), + ('ꟈ', 'ꟈ'), + ('ꟊ', 'ꟊ'), + ('ꟍ', 'ꟍ'), + ('ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'ꟕ'), + ('ꟗ', 'ꟗ'), + ('ꟙ', 'ꟙ'), + ('ꟛ', 'ꟛ'), + ('ꟲ', 'ꟴ'), + ('ꟶ', 'ꟶ'), + ('ꟸ', 'ꟺ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('a', 'z'), + ('𐐨', '𐑏'), + ('𐓘', '𐓻'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐞀', '𐞀'), + ('𐞃', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐳀', '𐳲'), + ('𐵰', '𐶅'), + ('𑣀', '𑣟'), + ('𖹠', '𖹿'), + ('𝐚', '𝐳'), + ('𝑎', '𝑔'), + ('𝑖', '𝑧'), + ('𝒂', '𝒛'), + ('𝒶', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝓏'), + ('𝓪', '𝔃'), + ('𝔞', '𝔷'), + ('𝕒', '𝕫'), + ('𝖆', '𝖟'), + ('𝖺', '𝗓'), + ('𝗮', '𝘇'), + ('𝘢', '𝘻'), + ('𝙖', '𝙯'), + ('𝚊', '𝚥'), + ('𝛂', '𝛚'), + ('𝛜', '𝛡'), + ('𝛼', '𝜔'), + ('𝜖', '𝜛'), + ('𝜶', '𝝎'), + ('𝝐', '𝝕'), + ('𝝰', '𝞈'), + ('𝞊', '𝞏'), + ('𝞪', '𝟂'), + ('𝟄', '𝟉'), + ('𝟋', '𝟋'), + ('𝼀', '𝼉'), + ('𝼋', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞤢', '𞥃'), +]; + +pub const NUMERIC: &'static [(char, char)] = &[ + ('0', '9'), + ('\u{600}', '\u{605}'), + ('٠', '٩'), + ('٫', '٬'), + ('\u{6dd}', '\u{6dd}'), + ('۰', '۹'), + ('߀', '߉'), + ('\u{890}', '\u{891}'), + ('\u{8e2}', '\u{8e2}'), + ('०', '९'), + ('০', '৯'), + ('੦', '੯'), + ('૦', '૯'), + ('୦', '୯'), + ('௦', '௯'), + ('౦', '౯'), + ('೦', '೯'), + ('൦', '൯'), + ('෦', '෯'), + ('๐', '๙'), + ('໐', '໙'), + ('༠', '༩'), + ('၀', '၉'), + ('႐', '႙'), + ('០', '៩'), + ('᠐', '᠙'), + ('᥆', '᥏'), + ('᧐', '᧚'), + ('᪀', '᪉'), + ('᪐', '᪙'), + ('᭐', '᭙'), + ('᮰', '᮹'), + ('᱀', '᱉'), + ('᱐', '᱙'), + ('꘠', '꘩'), + ('꣐', '꣙'), + ('꤀', '꤉'), + ('꧐', '꧙'), + ('꧰', '꧹'), + ('꩐', '꩙'), + ('꯰', '꯹'), + ('0', '9'), + ('𐒠', '𐒩'), + ('𐴰', '𐴹'), + ('𐵀', '𐵉'), + ('𑁦', '𑁯'), + ('\u{110bd}', '\u{110bd}'), + ('\u{110cd}', '\u{110cd}'), + ('𑃰', '𑃹'), + ('𑄶', '𑄿'), + ('𑇐', '𑇙'), + ('𑋰', '𑋹'), + ('𑑐', '𑑙'), + ('𑓐', '𑓙'), + ('𑙐', '𑙙'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜰', '𑜹'), + ('𑣠', '𑣩'), + ('𑥐', '𑥙'), + ('𑯰', '𑯹'), + ('𑱐', '𑱙'), + ('𑵐', '𑵙'), + ('𑶠', '𑶩'), + ('𑽐', '𑽙'), + ('𖄰', '𖄹'), + ('𖩠', '𖩩'), + ('𖫀', '𖫉'), + ('𖭐', '𖭙'), + ('𖵰', '𖵹'), + ('𜳰', '𜳹'), + ('𝟎', '𝟿'), + ('𞅀', '𞅉'), + ('𞋰', '𞋹'), + ('𞓰', '𞓹'), + ('𞗱', '𞗺'), + ('𞥐', '𞥙'), + ('🯰', '🯹'), +]; + +pub const OLETTER: &'static [(char, char)] = &[ + ('ƻ', 'ƻ'), + ('ǀ', 'ǃ'), + ('ʔ', 'ʔ'), + ('ʹ', 'ʿ'), + ('ˆ', 'ˑ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('ʹ', 'ʹ'), + ('ՙ', 'ՙ'), + ('א', 'ת'), + ('ׯ', '׳'), + ('ؠ', 'ي'), + ('ٮ', 'ٯ'), + ('ٱ', 'ۓ'), + ('ە', 'ە'), + ('ۥ', 'ۦ'), + ('ۮ', 'ۯ'), + ('ۺ', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', 'ܐ'), + ('ܒ', 'ܯ'), + ('ݍ', 'ޥ'), + ('ޱ', 'ޱ'), + ('ߊ', 'ߪ'), + ('ߴ', 'ߵ'), + ('ߺ', 'ߺ'), + ('ࠀ', 'ࠕ'), + ('ࠚ', 'ࠚ'), + ('ࠤ', 'ࠤ'), + ('ࠨ', 'ࠨ'), + ('ࡀ', 'ࡘ'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('ࢠ', 'ࣉ'), + ('ऄ', 'ह'), + ('ऽ', 'ऽ'), + ('ॐ', 'ॐ'), + ('क़', 'ॡ'), + ('ॱ', 'ঀ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', 'ঽ'), + ('ৎ', 'ৎ'), + ('ড়', 'ঢ়'), + ('য়', 'ৡ'), + ('ৰ', 'ৱ'), + ('ৼ', 'ৼ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('ੲ', 'ੴ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', 'ઽ'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૡ'), + ('ૹ', 'ૹ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', 'ଽ'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('ୱ', 'ୱ'), + ('ஃ', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('ௐ', 'ௐ'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ఽ'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', 'ౡ'), + ('ಀ', 'ಀ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ಽ'), + ('ೝ', 'ೞ'), + ('ೠ', 'ೡ'), + ('ೱ', 'ೲ'), + ('ഄ', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', 'ഽ'), + ('ൎ', 'ൎ'), + ('ൔ', 'ൖ'), + ('ൟ', 'ൡ'), + ('ൺ', 'ൿ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('ก', 'ะ'), + ('า', 'ำ'), + ('เ', 'ๆ'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ະ'), + ('າ', 'ຳ'), + ('ຽ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('ཀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('ྈ', 'ྌ'), + ('က', 'ဪ'), + ('ဿ', 'ဿ'), + ('ၐ', 'ၕ'), + ('ၚ', 'ၝ'), + ('ၡ', 'ၡ'), + ('ၥ', 'ၦ'), + ('ၮ', 'ၰ'), + ('ၵ', 'ႁ'), + ('ႎ', 'ႎ'), + ('ა', 'ჺ'), + ('ჽ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('ᎀ', 'ᎏ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', 'ᜑ'), + ('ᜟ', 'ᜱ'), + ('ᝀ', 'ᝑ'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('ក', 'ឳ'), + ('ៗ', 'ៗ'), + ('ៜ', 'ៜ'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢄ'), + ('ᢇ', 'ᢨ'), + ('ᢪ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('ᥐ', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('ᨀ', 'ᨖ'), + ('ᨠ', 'ᩔ'), + ('ᪧ', 'ᪧ'), + ('ᬅ', 'ᬳ'), + ('ᭅ', 'ᭌ'), + ('ᮃ', 'ᮠ'), + ('ᮮ', 'ᮯ'), + ('ᮺ', 'ᯥ'), + ('ᰀ', 'ᰣ'), + ('ᱍ', 'ᱏ'), + ('ᱚ', 'ᱽ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', 'ᳶ'), + ('ᳺ', 'ᳺ'), + ('ℵ', 'ℸ'), + ('ↀ', 'ↂ'), + ('ↅ', 'ↈ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('ⸯ', 'ⸯ'), + ('々', '〇'), + ('〡', '〩'), + ('〱', '〵'), + ('〸', '〼'), + ('ぁ', 'ゖ'), + ('ゝ', 'ゟ'), + ('ァ', 'ヺ'), + ('ー', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘟ'), + ('ꘪ', 'ꘫ'), + ('ꙮ', 'ꙮ'), + ('ꙿ', 'ꙿ'), + ('ꚠ', 'ꛯ'), + ('ꜗ', 'ꜟ'), + ('ꞈ', 'ꞈ'), + ('ꞏ', 'ꞏ'), + ('ꟷ', 'ꟷ'), + ('ꟻ', 'ꠁ'), + ('ꠃ', 'ꠅ'), + ('ꠇ', 'ꠊ'), + ('ꠌ', 'ꠢ'), + ('ꡀ', 'ꡳ'), + ('ꢂ', 'ꢳ'), + ('ꣲ', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', 'ꣾ'), + ('ꤊ', 'ꤥ'), + ('ꤰ', 'ꥆ'), + ('ꥠ', 'ꥼ'), + ('ꦄ', 'ꦲ'), + ('ꧏ', 'ꧏ'), + ('ꧠ', 'ꧤ'), + ('ꧦ', 'ꧯ'), + ('ꧺ', 'ꧾ'), + ('ꨀ', 'ꨨ'), + ('ꩀ', 'ꩂ'), + ('ꩄ', 'ꩋ'), + ('ꩠ', 'ꩶ'), + ('ꩺ', 'ꩺ'), + ('ꩾ', 'ꪯ'), + ('ꪱ', 'ꪱ'), + ('ꪵ', 'ꪶ'), + ('ꪹ', 'ꪽ'), + ('ꫀ', 'ꫀ'), + ('ꫂ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫪ'), + ('ꫲ', 'ꫴ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꯀ', 'ꯢ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('יִ', 'יִ'), + ('ײַ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('ヲ', 'ン'), + ('ᅠ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '𐍵'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐑐', '𐒝'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞁', '𐞂'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '𐨀'), + ('𐨐', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '𐫤'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐴀', '𐴣'), + ('𐵊', '𐵏'), + ('𐵯', '𐵯'), + ('𐺀', '𐺩'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('𐼀', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '𐽅'), + ('𐽰', '𐾁'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀃', '𑀷'), + ('𑁱', '𑁲'), + ('𑁵', '𑁵'), + ('𑂃', '𑂯'), + ('𑃐', '𑃨'), + ('𑄃', '𑄦'), + ('𑅄', '𑅄'), + ('𑅇', '𑅇'), + ('𑅐', '𑅲'), + ('𑅶', '𑅶'), + ('𑆃', '𑆲'), + ('𑇁', '𑇄'), + ('𑇚', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '𑈫'), + ('𑈿', '𑉀'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '𑋞'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑌽'), + ('𑍐', '𑍐'), + ('𑍝', '𑍡'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '𑎷'), + ('𑏑', '𑏑'), + ('𑏓', '𑏓'), + ('𑐀', '𑐴'), + ('𑑇', '𑑊'), + ('𑑟', '𑑡'), + ('𑒀', '𑒯'), + ('𑓄', '𑓅'), + ('𑓇', '𑓇'), + ('𑖀', '𑖮'), + ('𑗘', '𑗛'), + ('𑘀', '𑘯'), + ('𑙄', '𑙄'), + ('𑚀', '𑚪'), + ('𑚸', '𑚸'), + ('𑜀', '𑜚'), + ('𑝀', '𑝆'), + ('𑠀', '𑠫'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤯'), + ('𑤿', '𑤿'), + ('𑥁', '𑥁'), + ('𑦠', '𑦧'), + ('𑦪', '𑧐'), + ('𑧡', '𑧡'), + ('𑧣', '𑧣'), + ('𑨀', '𑨀'), + ('𑨋', '𑨲'), + ('𑨺', '𑨺'), + ('𑩐', '𑩐'), + ('𑩜', '𑪉'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑰀', '𑰈'), + ('𑰊', '𑰮'), + ('𑱀', '𑱀'), + ('𑱲', '𑲏'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '𑴰'), + ('𑵆', '𑵆'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶉'), + ('𑶘', '𑶘'), + ('𑻠', '𑻲'), + ('𑼂', '𑼂'), + ('𑼄', '𑼐'), + ('𑼒', '𑼳'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄝'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩰', '𖪾'), + ('𖫐', '𖫭'), + ('𖬀', '𖬯'), + ('𖭀', '𖭃'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖼀', '𖽊'), + ('𖽐', '𖽐'), + ('𖾓', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𝼊', '𝼊'), + ('𞄀', '𞄬'), + ('𞄷', '𞄽'), + ('𞅎', '𞅎'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞓐', '𞓫'), + ('𞗐', '𞗭'), + ('𞗰', '𞗰'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞥋', '𞥋'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const SCONTINUE: &'static [(char, char)] = &[ + (',', '-'), + (':', ';'), + (';', ';'), + ('՝', '՝'), + ('،', '؍'), + ('߸', '߸'), + ('᠂', '᠂'), + ('᠈', '᠈'), + ('–', '—'), + ('、', '、'), + ('︐', '︑'), + ('︓', '︔'), + ('︱', '︲'), + ('﹐', '﹑'), + ('﹔', '﹕'), + ('﹘', '﹘'), + ('﹣', '﹣'), + (',', '-'), + (':', ';'), + ('、', '、'), +]; + +pub const STERM: &'static [(char, char)] = &[ + ('!', '!'), + ('?', '?'), + ('։', '։'), + ('؝', '؟'), + ('۔', '۔'), + ('܀', '܂'), + ('߹', '߹'), + ('࠷', '࠷'), + ('࠹', '࠹'), + ('࠽', '࠾'), + ('।', '॥'), + ('၊', '။'), + ('።', '።'), + ('፧', '፨'), + ('᙮', '᙮'), + ('᜵', '᜶'), + ('។', '៕'), + ('᠃', '᠃'), + ('᠉', '᠉'), + ('᥄', '᥅'), + ('᪨', '᪫'), + ('᭎', '᭏'), + ('᭚', '᭛'), + ('᭞', '᭟'), + ('᭽', '᭿'), + ('᰻', '᰼'), + ('᱾', '᱿'), + ('‼', '‽'), + ('⁇', '⁉'), + ('⳹', '⳻'), + ('⸮', '⸮'), + ('⸼', '⸼'), + ('⹓', '⹔'), + ('。', '。'), + ('꓿', '꓿'), + ('꘎', '꘏'), + ('꛳', '꛳'), + ('꛷', '꛷'), + ('꡶', '꡷'), + ('꣎', '꣏'), + ('꤯', '꤯'), + ('꧈', '꧉'), + ('꩝', '꩟'), + ('꫰', '꫱'), + ('꯫', '꯫'), + ('︒', '︒'), + ('︕', '︖'), + ('﹖', '﹗'), + ('!', '!'), + ('?', '?'), + ('。', '。'), + ('𐩖', '𐩗'), + ('𐽕', '𐽙'), + ('𐾆', '𐾉'), + ('𑁇', '𑁈'), + ('𑂾', '𑃁'), + ('𑅁', '𑅃'), + ('𑇅', '𑇆'), + ('𑇍', '𑇍'), + ('𑇞', '𑇟'), + ('𑈸', '𑈹'), + ('𑈻', '𑈼'), + ('𑊩', '𑊩'), + ('𑏔', '𑏕'), + ('𑑋', '𑑌'), + ('𑗂', '𑗃'), + ('𑗉', '𑗗'), + ('𑙁', '𑙂'), + ('𑜼', '𑜾'), + ('𑥄', '𑥄'), + ('𑥆', '𑥆'), + ('𑩂', '𑩃'), + ('𑪛', '𑪜'), + ('𑱁', '𑱂'), + ('𑻷', '𑻸'), + ('𑽃', '𑽄'), + ('𖩮', '𖩯'), + ('𖫵', '𖫵'), + ('𖬷', '𖬸'), + ('𖭄', '𖭄'), + ('𖵮', '𖵯'), + ('𖺘', '𖺘'), + ('𛲟', '𛲟'), + ('𝪈', '𝪈'), +]; + +pub const SEP: &'static [(char, char)] = + &[('\u{85}', '\u{85}'), ('\u{2028}', '\u{2029}')]; + +pub const SP: &'static [(char, char)] = &[ + ('\t', '\t'), + ('\u{b}', '\u{c}'), + (' ', ' '), + ('\u{a0}', '\u{a0}'), + ('\u{1680}', '\u{1680}'), + ('\u{2000}', '\u{200a}'), + ('\u{202f}', '\u{202f}'), + ('\u{205f}', '\u{205f}'), + ('\u{3000}', '\u{3000}'), +]; + +pub const UPPER: &'static [(char, char)] = &[ + ('A', 'Z'), + ('À', 'Ö'), + ('Ø', 'Þ'), + ('Ā', 'Ā'), + ('Ă', 'Ă'), + ('Ą', 'Ą'), + ('Ć', 'Ć'), + ('Ĉ', 'Ĉ'), + ('Ċ', 'Ċ'), + ('Č', 'Č'), + ('Ď', 'Ď'), + ('Đ', 'Đ'), + ('Ē', 'Ē'), + ('Ĕ', 'Ĕ'), + ('Ė', 'Ė'), + ('Ę', 'Ę'), + ('Ě', 'Ě'), + ('Ĝ', 'Ĝ'), + ('Ğ', 'Ğ'), + ('Ġ', 'Ġ'), + ('Ģ', 'Ģ'), + ('Ĥ', 'Ĥ'), + ('Ħ', 'Ħ'), + ('Ĩ', 'Ĩ'), + ('Ī', 'Ī'), + ('Ĭ', 'Ĭ'), + ('Į', 'Į'), + ('İ', 'İ'), + ('IJ', 'IJ'), + ('Ĵ', 'Ĵ'), + ('Ķ', 'Ķ'), + ('Ĺ', 'Ĺ'), + ('Ļ', 'Ļ'), + ('Ľ', 'Ľ'), + ('Ŀ', 'Ŀ'), + ('Ł', 'Ł'), + ('Ń', 'Ń'), + ('Ņ', 'Ņ'), + ('Ň', 'Ň'), + ('Ŋ', 'Ŋ'), + ('Ō', 'Ō'), + ('Ŏ', 'Ŏ'), + ('Ő', 'Ő'), + ('Œ', 'Œ'), + ('Ŕ', 'Ŕ'), + ('Ŗ', 'Ŗ'), + ('Ř', 'Ř'), + ('Ś', 'Ś'), + ('Ŝ', 'Ŝ'), + ('Ş', 'Ş'), + ('Š', 'Š'), + ('Ţ', 'Ţ'), + ('Ť', 'Ť'), + ('Ŧ', 'Ŧ'), + ('Ũ', 'Ũ'), + ('Ū', 'Ū'), + ('Ŭ', 'Ŭ'), + ('Ů', 'Ů'), + ('Ű', 'Ű'), + ('Ų', 'Ų'), + ('Ŵ', 'Ŵ'), + ('Ŷ', 'Ŷ'), + ('Ÿ', 'Ź'), + ('Ż', 'Ż'), + ('Ž', 'Ž'), + ('Ɓ', 'Ƃ'), + ('Ƅ', 'Ƅ'), + ('Ɔ', 'Ƈ'), + ('Ɖ', 'Ƌ'), + ('Ǝ', 'Ƒ'), + ('Ɠ', 'Ɣ'), + ('Ɩ', 'Ƙ'), + ('Ɯ', 'Ɲ'), + ('Ɵ', 'Ơ'), + ('Ƣ', 'Ƣ'), + ('Ƥ', 'Ƥ'), + ('Ʀ', 'Ƨ'), + ('Ʃ', 'Ʃ'), + ('Ƭ', 'Ƭ'), + ('Ʈ', 'Ư'), + ('Ʊ', 'Ƴ'), + ('Ƶ', 'Ƶ'), + ('Ʒ', 'Ƹ'), + ('Ƽ', 'Ƽ'), + ('DŽ', 'Dž'), + ('LJ', 'Lj'), + ('NJ', 'Nj'), + ('Ǎ', 'Ǎ'), + ('Ǐ', 'Ǐ'), + ('Ǒ', 'Ǒ'), + ('Ǔ', 'Ǔ'), + ('Ǖ', 'Ǖ'), + ('Ǘ', 'Ǘ'), + ('Ǚ', 'Ǚ'), + ('Ǜ', 'Ǜ'), + ('Ǟ', 'Ǟ'), + ('Ǡ', 'Ǡ'), + ('Ǣ', 'Ǣ'), + ('Ǥ', 'Ǥ'), + ('Ǧ', 'Ǧ'), + ('Ǩ', 'Ǩ'), + ('Ǫ', 'Ǫ'), + ('Ǭ', 'Ǭ'), + ('Ǯ', 'Ǯ'), + ('DZ', 'Dz'), + ('Ǵ', 'Ǵ'), + ('Ƕ', 'Ǹ'), + ('Ǻ', 'Ǻ'), + ('Ǽ', 'Ǽ'), + ('Ǿ', 'Ǿ'), + ('Ȁ', 'Ȁ'), + ('Ȃ', 'Ȃ'), + ('Ȅ', 'Ȅ'), + ('Ȇ', 'Ȇ'), + ('Ȉ', 'Ȉ'), + ('Ȋ', 'Ȋ'), + ('Ȍ', 'Ȍ'), + ('Ȏ', 'Ȏ'), + ('Ȑ', 'Ȑ'), + ('Ȓ', 'Ȓ'), + ('Ȕ', 'Ȕ'), + ('Ȗ', 'Ȗ'), + ('Ș', 'Ș'), + ('Ț', 'Ț'), + ('Ȝ', 'Ȝ'), + ('Ȟ', 'Ȟ'), + ('Ƞ', 'Ƞ'), + ('Ȣ', 'Ȣ'), + ('Ȥ', 'Ȥ'), + ('Ȧ', 'Ȧ'), + ('Ȩ', 'Ȩ'), + ('Ȫ', 'Ȫ'), + ('Ȭ', 'Ȭ'), + ('Ȯ', 'Ȯ'), + ('Ȱ', 'Ȱ'), + ('Ȳ', 'Ȳ'), + ('Ⱥ', 'Ȼ'), + ('Ƚ', 'Ⱦ'), + ('Ɂ', 'Ɂ'), + ('Ƀ', 'Ɇ'), + ('Ɉ', 'Ɉ'), + ('Ɋ', 'Ɋ'), + ('Ɍ', 'Ɍ'), + ('Ɏ', 'Ɏ'), + ('Ͱ', 'Ͱ'), + ('Ͳ', 'Ͳ'), + ('Ͷ', 'Ͷ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ώ'), + ('Α', 'Ρ'), + ('Σ', 'Ϋ'), + ('Ϗ', 'Ϗ'), + ('ϒ', 'ϔ'), + ('Ϙ', 'Ϙ'), + ('Ϛ', 'Ϛ'), + ('Ϝ', 'Ϝ'), + ('Ϟ', 'Ϟ'), + ('Ϡ', 'Ϡ'), + ('Ϣ', 'Ϣ'), + ('Ϥ', 'Ϥ'), + ('Ϧ', 'Ϧ'), + ('Ϩ', 'Ϩ'), + ('Ϫ', 'Ϫ'), + ('Ϭ', 'Ϭ'), + ('Ϯ', 'Ϯ'), + ('ϴ', 'ϴ'), + ('Ϸ', 'Ϸ'), + ('Ϲ', 'Ϻ'), + ('Ͻ', 'Я'), + ('Ѡ', 'Ѡ'), + ('Ѣ', 'Ѣ'), + ('Ѥ', 'Ѥ'), + ('Ѧ', 'Ѧ'), + ('Ѩ', 'Ѩ'), + ('Ѫ', 'Ѫ'), + ('Ѭ', 'Ѭ'), + ('Ѯ', 'Ѯ'), + ('Ѱ', 'Ѱ'), + ('Ѳ', 'Ѳ'), + ('Ѵ', 'Ѵ'), + ('Ѷ', 'Ѷ'), + ('Ѹ', 'Ѹ'), + ('Ѻ', 'Ѻ'), + ('Ѽ', 'Ѽ'), + ('Ѿ', 'Ѿ'), + ('Ҁ', 'Ҁ'), + ('Ҋ', 'Ҋ'), + ('Ҍ', 'Ҍ'), + ('Ҏ', 'Ҏ'), + ('Ґ', 'Ґ'), + ('Ғ', 'Ғ'), + ('Ҕ', 'Ҕ'), + ('Җ', 'Җ'), + ('Ҙ', 'Ҙ'), + ('Қ', 'Қ'), + ('Ҝ', 'Ҝ'), + ('Ҟ', 'Ҟ'), + ('Ҡ', 'Ҡ'), + ('Ң', 'Ң'), + ('Ҥ', 'Ҥ'), + ('Ҧ', 'Ҧ'), + ('Ҩ', 'Ҩ'), + ('Ҫ', 'Ҫ'), + ('Ҭ', 'Ҭ'), + ('Ү', 'Ү'), + ('Ұ', 'Ұ'), + ('Ҳ', 'Ҳ'), + ('Ҵ', 'Ҵ'), + ('Ҷ', 'Ҷ'), + ('Ҹ', 'Ҹ'), + ('Һ', 'Һ'), + ('Ҽ', 'Ҽ'), + ('Ҿ', 'Ҿ'), + ('Ӏ', 'Ӂ'), + ('Ӄ', 'Ӄ'), + ('Ӆ', 'Ӆ'), + ('Ӈ', 'Ӈ'), + ('Ӊ', 'Ӊ'), + ('Ӌ', 'Ӌ'), + ('Ӎ', 'Ӎ'), + ('Ӑ', 'Ӑ'), + ('Ӓ', 'Ӓ'), + ('Ӕ', 'Ӕ'), + ('Ӗ', 'Ӗ'), + ('Ә', 'Ә'), + ('Ӛ', 'Ӛ'), + ('Ӝ', 'Ӝ'), + ('Ӟ', 'Ӟ'), + ('Ӡ', 'Ӡ'), + ('Ӣ', 'Ӣ'), + ('Ӥ', 'Ӥ'), + ('Ӧ', 'Ӧ'), + ('Ө', 'Ө'), + ('Ӫ', 'Ӫ'), + ('Ӭ', 'Ӭ'), + ('Ӯ', 'Ӯ'), + ('Ӱ', 'Ӱ'), + ('Ӳ', 'Ӳ'), + ('Ӵ', 'Ӵ'), + ('Ӷ', 'Ӷ'), + ('Ӹ', 'Ӹ'), + ('Ӻ', 'Ӻ'), + ('Ӽ', 'Ӽ'), + ('Ӿ', 'Ӿ'), + ('Ԁ', 'Ԁ'), + ('Ԃ', 'Ԃ'), + ('Ԅ', 'Ԅ'), + ('Ԇ', 'Ԇ'), + ('Ԉ', 'Ԉ'), + ('Ԋ', 'Ԋ'), + ('Ԍ', 'Ԍ'), + ('Ԏ', 'Ԏ'), + ('Ԑ', 'Ԑ'), + ('Ԓ', 'Ԓ'), + ('Ԕ', 'Ԕ'), + ('Ԗ', 'Ԗ'), + ('Ԙ', 'Ԙ'), + ('Ԛ', 'Ԛ'), + ('Ԝ', 'Ԝ'), + ('Ԟ', 'Ԟ'), + ('Ԡ', 'Ԡ'), + ('Ԣ', 'Ԣ'), + ('Ԥ', 'Ԥ'), + ('Ԧ', 'Ԧ'), + ('Ԩ', 'Ԩ'), + ('Ԫ', 'Ԫ'), + ('Ԭ', 'Ԭ'), + ('Ԯ', 'Ԯ'), + ('Ա', 'Ֆ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('Ꭰ', 'Ᏽ'), + ('Ᲊ', 'Ᲊ'), + ('Ḁ', 'Ḁ'), + ('Ḃ', 'Ḃ'), + ('Ḅ', 'Ḅ'), + ('Ḇ', 'Ḇ'), + ('Ḉ', 'Ḉ'), + ('Ḋ', 'Ḋ'), + ('Ḍ', 'Ḍ'), + ('Ḏ', 'Ḏ'), + ('Ḑ', 'Ḑ'), + ('Ḓ', 'Ḓ'), + ('Ḕ', 'Ḕ'), + ('Ḗ', 'Ḗ'), + ('Ḙ', 'Ḙ'), + ('Ḛ', 'Ḛ'), + ('Ḝ', 'Ḝ'), + ('Ḟ', 'Ḟ'), + ('Ḡ', 'Ḡ'), + ('Ḣ', 'Ḣ'), + ('Ḥ', 'Ḥ'), + ('Ḧ', 'Ḧ'), + ('Ḩ', 'Ḩ'), + ('Ḫ', 'Ḫ'), + ('Ḭ', 'Ḭ'), + ('Ḯ', 'Ḯ'), + ('Ḱ', 'Ḱ'), + ('Ḳ', 'Ḳ'), + ('Ḵ', 'Ḵ'), + ('Ḷ', 'Ḷ'), + ('Ḹ', 'Ḹ'), + ('Ḻ', 'Ḻ'), + ('Ḽ', 'Ḽ'), + ('Ḿ', 'Ḿ'), + ('Ṁ', 'Ṁ'), + ('Ṃ', 'Ṃ'), + ('Ṅ', 'Ṅ'), + ('Ṇ', 'Ṇ'), + ('Ṉ', 'Ṉ'), + ('Ṋ', 'Ṋ'), + ('Ṍ', 'Ṍ'), + ('Ṏ', 'Ṏ'), + ('Ṑ', 'Ṑ'), + ('Ṓ', 'Ṓ'), + ('Ṕ', 'Ṕ'), + ('Ṗ', 'Ṗ'), + ('Ṙ', 'Ṙ'), + ('Ṛ', 'Ṛ'), + ('Ṝ', 'Ṝ'), + ('Ṟ', 'Ṟ'), + ('Ṡ', 'Ṡ'), + ('Ṣ', 'Ṣ'), + ('Ṥ', 'Ṥ'), + ('Ṧ', 'Ṧ'), + ('Ṩ', 'Ṩ'), + ('Ṫ', 'Ṫ'), + ('Ṭ', 'Ṭ'), + ('Ṯ', 'Ṯ'), + ('Ṱ', 'Ṱ'), + ('Ṳ', 'Ṳ'), + ('Ṵ', 'Ṵ'), + ('Ṷ', 'Ṷ'), + ('Ṹ', 'Ṹ'), + ('Ṻ', 'Ṻ'), + ('Ṽ', 'Ṽ'), + ('Ṿ', 'Ṿ'), + ('Ẁ', 'Ẁ'), + ('Ẃ', 'Ẃ'), + ('Ẅ', 'Ẅ'), + ('Ẇ', 'Ẇ'), + ('Ẉ', 'Ẉ'), + ('Ẋ', 'Ẋ'), + ('Ẍ', 'Ẍ'), + ('Ẏ', 'Ẏ'), + ('Ẑ', 'Ẑ'), + ('Ẓ', 'Ẓ'), + ('Ẕ', 'Ẕ'), + ('ẞ', 'ẞ'), + ('Ạ', 'Ạ'), + ('Ả', 'Ả'), + ('Ấ', 'Ấ'), + ('Ầ', 'Ầ'), + ('Ẩ', 'Ẩ'), + ('Ẫ', 'Ẫ'), + ('Ậ', 'Ậ'), + ('Ắ', 'Ắ'), + ('Ằ', 'Ằ'), + ('Ẳ', 'Ẳ'), + ('Ẵ', 'Ẵ'), + ('Ặ', 'Ặ'), + ('Ẹ', 'Ẹ'), + ('Ẻ', 'Ẻ'), + ('Ẽ', 'Ẽ'), + ('Ế', 'Ế'), + ('Ề', 'Ề'), + ('Ể', 'Ể'), + ('Ễ', 'Ễ'), + ('Ệ', 'Ệ'), + ('Ỉ', 'Ỉ'), + ('Ị', 'Ị'), + ('Ọ', 'Ọ'), + ('Ỏ', 'Ỏ'), + ('Ố', 'Ố'), + ('Ồ', 'Ồ'), + ('Ổ', 'Ổ'), + ('Ỗ', 'Ỗ'), + ('Ộ', 'Ộ'), + ('Ớ', 'Ớ'), + ('Ờ', 'Ờ'), + ('Ở', 'Ở'), + ('Ỡ', 'Ỡ'), + ('Ợ', 'Ợ'), + ('Ụ', 'Ụ'), + ('Ủ', 'Ủ'), + ('Ứ', 'Ứ'), + ('Ừ', 'Ừ'), + ('Ử', 'Ử'), + ('Ữ', 'Ữ'), + ('Ự', 'Ự'), + ('Ỳ', 'Ỳ'), + ('Ỵ', 'Ỵ'), + ('Ỷ', 'Ỷ'), + ('Ỹ', 'Ỹ'), + ('Ỻ', 'Ỻ'), + ('Ỽ', 'Ỽ'), + ('Ỿ', 'Ỿ'), + ('Ἀ', 'Ἇ'), + ('Ἐ', 'Ἕ'), + ('Ἠ', 'Ἧ'), + ('Ἰ', 'Ἷ'), + ('Ὀ', 'Ὅ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'Ὗ'), + ('Ὠ', 'Ὧ'), + ('ᾈ', 'ᾏ'), + ('ᾘ', 'ᾟ'), + ('ᾨ', 'ᾯ'), + ('Ᾰ', 'ᾼ'), + ('Ὲ', 'ῌ'), + ('Ῐ', 'Ί'), + ('Ῠ', 'Ῥ'), + ('Ὸ', 'ῼ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℋ', 'ℍ'), + ('ℐ', 'ℒ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℰ', 'ℳ'), + ('ℾ', 'ℿ'), + ('ⅅ', 'ⅅ'), + ('Ⅰ', 'Ⅿ'), + ('Ↄ', 'Ↄ'), + ('Ⓐ', 'Ⓩ'), + ('Ⰰ', 'Ⱟ'), + ('Ⱡ', 'Ⱡ'), + ('Ɫ', 'Ɽ'), + ('Ⱨ', 'Ⱨ'), + ('Ⱪ', 'Ⱪ'), + ('Ⱬ', 'Ⱬ'), + ('Ɑ', 'Ɒ'), + ('Ⱳ', 'Ⱳ'), + ('Ⱶ', 'Ⱶ'), + ('Ȿ', 'Ⲁ'), + ('Ⲃ', 'Ⲃ'), + ('Ⲅ', 'Ⲅ'), + ('Ⲇ', 'Ⲇ'), + ('Ⲉ', 'Ⲉ'), + ('Ⲋ', 'Ⲋ'), + ('Ⲍ', 'Ⲍ'), + ('Ⲏ', 'Ⲏ'), + ('Ⲑ', 'Ⲑ'), + ('Ⲓ', 'Ⲓ'), + ('Ⲕ', 'Ⲕ'), + ('Ⲗ', 'Ⲗ'), + ('Ⲙ', 'Ⲙ'), + ('Ⲛ', 'Ⲛ'), + ('Ⲝ', 'Ⲝ'), + ('Ⲟ', 'Ⲟ'), + ('Ⲡ', 'Ⲡ'), + ('Ⲣ', 'Ⲣ'), + ('Ⲥ', 'Ⲥ'), + ('Ⲧ', 'Ⲧ'), + ('Ⲩ', 'Ⲩ'), + ('Ⲫ', 'Ⲫ'), + ('Ⲭ', 'Ⲭ'), + ('Ⲯ', 'Ⲯ'), + ('Ⲱ', 'Ⲱ'), + ('Ⲳ', 'Ⲳ'), + ('Ⲵ', 'Ⲵ'), + ('Ⲷ', 'Ⲷ'), + ('Ⲹ', 'Ⲹ'), + ('Ⲻ', 'Ⲻ'), + ('Ⲽ', 'Ⲽ'), + ('Ⲿ', 'Ⲿ'), + ('Ⳁ', 'Ⳁ'), + ('Ⳃ', 'Ⳃ'), + ('Ⳅ', 'Ⳅ'), + ('Ⳇ', 'Ⳇ'), + ('Ⳉ', 'Ⳉ'), + ('Ⳋ', 'Ⳋ'), + ('Ⳍ', 'Ⳍ'), + ('Ⳏ', 'Ⳏ'), + ('Ⳑ', 'Ⳑ'), + ('Ⳓ', 'Ⳓ'), + ('Ⳕ', 'Ⳕ'), + ('Ⳗ', 'Ⳗ'), + ('Ⳙ', 'Ⳙ'), + ('Ⳛ', 'Ⳛ'), + ('Ⳝ', 'Ⳝ'), + ('Ⳟ', 'Ⳟ'), + ('Ⳡ', 'Ⳡ'), + ('Ⳣ', 'Ⳣ'), + ('Ⳬ', 'Ⳬ'), + ('Ⳮ', 'Ⳮ'), + ('Ⳳ', 'Ⳳ'), + ('Ꙁ', 'Ꙁ'), + ('Ꙃ', 'Ꙃ'), + ('Ꙅ', 'Ꙅ'), + ('Ꙇ', 'Ꙇ'), + ('Ꙉ', 'Ꙉ'), + ('Ꙋ', 'Ꙋ'), + ('Ꙍ', 'Ꙍ'), + ('Ꙏ', 'Ꙏ'), + ('Ꙑ', 'Ꙑ'), + ('Ꙓ', 'Ꙓ'), + ('Ꙕ', 'Ꙕ'), + ('Ꙗ', 'Ꙗ'), + ('Ꙙ', 'Ꙙ'), + ('Ꙛ', 'Ꙛ'), + ('Ꙝ', 'Ꙝ'), + ('Ꙟ', 'Ꙟ'), + ('Ꙡ', 'Ꙡ'), + ('Ꙣ', 'Ꙣ'), + ('Ꙥ', 'Ꙥ'), + ('Ꙧ', 'Ꙧ'), + ('Ꙩ', 'Ꙩ'), + ('Ꙫ', 'Ꙫ'), + ('Ꙭ', 'Ꙭ'), + ('Ꚁ', 'Ꚁ'), + ('Ꚃ', 'Ꚃ'), + ('Ꚅ', 'Ꚅ'), + ('Ꚇ', 'Ꚇ'), + ('Ꚉ', 'Ꚉ'), + ('Ꚋ', 'Ꚋ'), + ('Ꚍ', 'Ꚍ'), + ('Ꚏ', 'Ꚏ'), + ('Ꚑ', 'Ꚑ'), + ('Ꚓ', 'Ꚓ'), + ('Ꚕ', 'Ꚕ'), + ('Ꚗ', 'Ꚗ'), + ('Ꚙ', 'Ꚙ'), + ('Ꚛ', 'Ꚛ'), + ('Ꜣ', 'Ꜣ'), + ('Ꜥ', 'Ꜥ'), + ('Ꜧ', 'Ꜧ'), + ('Ꜩ', 'Ꜩ'), + ('Ꜫ', 'Ꜫ'), + ('Ꜭ', 'Ꜭ'), + ('Ꜯ', 'Ꜯ'), + ('Ꜳ', 'Ꜳ'), + ('Ꜵ', 'Ꜵ'), + ('Ꜷ', 'Ꜷ'), + ('Ꜹ', 'Ꜹ'), + ('Ꜻ', 'Ꜻ'), + ('Ꜽ', 'Ꜽ'), + ('Ꜿ', 'Ꜿ'), + ('Ꝁ', 'Ꝁ'), + ('Ꝃ', 'Ꝃ'), + ('Ꝅ', 'Ꝅ'), + ('Ꝇ', 'Ꝇ'), + ('Ꝉ', 'Ꝉ'), + ('Ꝋ', 'Ꝋ'), + ('Ꝍ', 'Ꝍ'), + ('Ꝏ', 'Ꝏ'), + ('Ꝑ', 'Ꝑ'), + ('Ꝓ', 'Ꝓ'), + ('Ꝕ', 'Ꝕ'), + ('Ꝗ', 'Ꝗ'), + ('Ꝙ', 'Ꝙ'), + ('Ꝛ', 'Ꝛ'), + ('Ꝝ', 'Ꝝ'), + ('Ꝟ', 'Ꝟ'), + ('Ꝡ', 'Ꝡ'), + ('Ꝣ', 'Ꝣ'), + ('Ꝥ', 'Ꝥ'), + ('Ꝧ', 'Ꝧ'), + ('Ꝩ', 'Ꝩ'), + ('Ꝫ', 'Ꝫ'), + ('Ꝭ', 'Ꝭ'), + ('Ꝯ', 'Ꝯ'), + ('Ꝺ', 'Ꝺ'), + ('Ꝼ', 'Ꝼ'), + ('Ᵹ', 'Ꝿ'), + ('Ꞁ', 'Ꞁ'), + ('Ꞃ', 'Ꞃ'), + ('Ꞅ', 'Ꞅ'), + ('Ꞇ', 'Ꞇ'), + ('Ꞌ', 'Ꞌ'), + ('Ɥ', 'Ɥ'), + ('Ꞑ', 'Ꞑ'), + ('Ꞓ', 'Ꞓ'), + ('Ꞗ', 'Ꞗ'), + ('Ꞙ', 'Ꞙ'), + ('Ꞛ', 'Ꞛ'), + ('Ꞝ', 'Ꞝ'), + ('Ꞟ', 'Ꞟ'), + ('Ꞡ', 'Ꞡ'), + ('Ꞣ', 'Ꞣ'), + ('Ꞥ', 'Ꞥ'), + ('Ꞧ', 'Ꞧ'), + ('Ꞩ', 'Ꞩ'), + ('Ɦ', 'Ɪ'), + ('Ʞ', 'Ꞵ'), + ('Ꞷ', 'Ꞷ'), + ('Ꞹ', 'Ꞹ'), + ('Ꞻ', 'Ꞻ'), + ('Ꞽ', 'Ꞽ'), + ('Ꞿ', 'Ꞿ'), + ('Ꟁ', 'Ꟁ'), + ('Ꟃ', 'Ꟃ'), + ('Ꞔ', 'Ꟈ'), + ('Ꟊ', 'Ꟊ'), + ('Ɤ', 'Ꟍ'), + ('Ꟑ', 'Ꟑ'), + ('Ꟗ', 'Ꟗ'), + ('Ꟙ', 'Ꟙ'), + ('Ꟛ', 'Ꟛ'), + ('Ƛ', 'Ƛ'), + ('Ꟶ', 'Ꟶ'), + ('A', 'Z'), + ('𐐀', '𐐧'), + ('𐒰', '𐓓'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐲀', '𐲲'), + ('𐵐', '𐵥'), + ('𑢠', '𑢿'), + ('𖹀', '𖹟'), + ('𝐀', '𝐙'), + ('𝐴', '𝑍'), + ('𝑨', '𝒁'), + ('𝒜', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒵'), + ('𝓐', '𝓩'), + ('𝔄', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔸', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕬', '𝖅'), + ('𝖠', '𝖹'), + ('𝗔', '𝗭'), + ('𝘈', '𝘡'), + ('𝘼', '𝙕'), + ('𝙰', '𝚉'), + ('𝚨', '𝛀'), + ('𝛢', '𝛺'), + ('𝜜', '𝜴'), + ('𝝖', '𝝮'), + ('𝞐', '𝞨'), + ('𝟊', '𝟊'), + ('𞤀', '𞤡'), + ('🄰', '🅉'), + ('🅐', '🅩'), + ('🅰', '🆉'), +]; diff --git a/vendor/regex-syntax/src/unicode_tables/word_break.rs b/vendor/regex-syntax/src/unicode_tables/word_break.rs new file mode 100644 index 00000000000000..b764d34ac72451 --- /dev/null +++ b/vendor/regex-syntax/src/unicode_tables/word_break.rs @@ -0,0 +1,1152 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate word-break ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("ALetter", ALETTER), + ("CR", CR), + ("Double_Quote", DOUBLE_QUOTE), + ("Extend", EXTEND), + ("ExtendNumLet", EXTENDNUMLET), + ("Format", FORMAT), + ("Hebrew_Letter", HEBREW_LETTER), + ("Katakana", KATAKANA), + ("LF", LF), + ("MidLetter", MIDLETTER), + ("MidNum", MIDNUM), + ("MidNumLet", MIDNUMLET), + ("Newline", NEWLINE), + ("Numeric", NUMERIC), + ("Regional_Indicator", REGIONAL_INDICATOR), + ("Single_Quote", SINGLE_QUOTE), + ("WSegSpace", WSEGSPACE), + ("ZWJ", ZWJ), +]; + +pub const ALETTER: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', '˗'), + ('˞', '˿'), + ('Ͱ', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', '՜'), + ('՞', '՞'), + ('ՠ', 'ֈ'), + ('֊', '֊'), + ('׳', '׳'), + ('ؠ', 'ي'), + ('ٮ', 'ٯ'), + ('ٱ', 'ۓ'), + ('ە', 'ە'), + ('ۥ', 'ۦ'), + ('ۮ', 'ۯ'), + ('ۺ', 'ۼ'), + ('ۿ', 'ۿ'), + ('\u{70f}', 'ܐ'), + ('ܒ', 'ܯ'), + ('ݍ', 'ޥ'), + ('ޱ', 'ޱ'), + ('ߊ', 'ߪ'), + ('ߴ', 'ߵ'), + ('ߺ', 'ߺ'), + ('ࠀ', 'ࠕ'), + ('ࠚ', 'ࠚ'), + ('ࠤ', 'ࠤ'), + ('ࠨ', 'ࠨ'), + ('ࡀ', 'ࡘ'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('ࢠ', 'ࣉ'), + ('ऄ', 'ह'), + ('ऽ', 'ऽ'), + ('ॐ', 'ॐ'), + ('क़', 'ॡ'), + ('ॱ', 'ঀ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', 'ঽ'), + ('ৎ', 'ৎ'), + ('ড়', 'ঢ়'), + ('য়', 'ৡ'), + ('ৰ', 'ৱ'), + ('ৼ', 'ৼ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('ੲ', 'ੴ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', 'ઽ'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૡ'), + ('ૹ', 'ૹ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', 'ଽ'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('ୱ', 'ୱ'), + ('ஃ', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('ௐ', 'ௐ'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ఽ'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', 'ౡ'), + ('ಀ', 'ಀ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ಽ'), + ('ೝ', 'ೞ'), + ('ೠ', 'ೡ'), + ('ೱ', 'ೲ'), + ('ഄ', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', 'ഽ'), + ('ൎ', 'ൎ'), + ('ൔ', 'ൖ'), + ('ൟ', 'ൡ'), + ('ൺ', 'ൿ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('ༀ', 'ༀ'), + ('ཀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('ྈ', 'ྌ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', 'ᜑ'), + ('ᜟ', 'ᜱ'), + ('ᝀ', 'ᝑ'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢄ'), + ('ᢇ', 'ᢨ'), + ('ᢪ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('ᨀ', 'ᨖ'), + ('ᬅ', 'ᬳ'), + ('ᭅ', 'ᭌ'), + ('ᮃ', 'ᮠ'), + ('ᮮ', 'ᮯ'), + ('ᮺ', 'ᯥ'), + ('ᰀ', 'ᰣ'), + ('ᱍ', 'ᱏ'), + ('ᱚ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', 'ᳶ'), + ('ᳺ', 'ᳺ'), + ('ᴀ', 'ᶿ'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℯ', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⓐ', 'ⓩ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('ⸯ', 'ⸯ'), + ('々', '々'), + ('〻', '〼'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ꀀ', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘟ'), + ('ꘪ', 'ꘫ'), + ('Ꙁ', 'ꙮ'), + ('ꙿ', 'ꚝ'), + ('ꚠ', 'ꛯ'), + ('꜈', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠁ'), + ('ꠃ', 'ꠅ'), + ('ꠇ', 'ꠊ'), + ('ꠌ', 'ꠢ'), + ('ꡀ', 'ꡳ'), + ('ꢂ', 'ꢳ'), + ('ꣲ', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', 'ꣾ'), + ('ꤊ', 'ꤥ'), + ('ꤰ', 'ꥆ'), + ('ꥠ', 'ꥼ'), + ('ꦄ', 'ꦲ'), + ('ꧏ', 'ꧏ'), + ('ꨀ', 'ꨨ'), + ('ꩀ', 'ꩂ'), + ('ꩄ', 'ꩋ'), + ('ꫠ', 'ꫪ'), + ('ꫲ', 'ꫴ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭩ'), + ('ꭰ', 'ꯢ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('ﭐ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('A', 'Z'), + ('a', 'z'), + ('ᅠ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '𐍵'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐐀', '𐒝'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '𐨀'), + ('𐨐', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '𐫤'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '𐴣'), + ('𐵊', '𐵥'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('𐼀', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '𐽅'), + ('𐽰', '𐾁'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀃', '𑀷'), + ('𑁱', '𑁲'), + ('𑁵', '𑁵'), + ('𑂃', '𑂯'), + ('𑃐', '𑃨'), + ('𑄃', '𑄦'), + ('𑅄', '𑅄'), + ('𑅇', '𑅇'), + ('𑅐', '𑅲'), + ('𑅶', '𑅶'), + ('𑆃', '𑆲'), + ('𑇁', '𑇄'), + ('𑇚', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '𑈫'), + ('𑈿', '𑉀'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '𑋞'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑌽'), + ('𑍐', '𑍐'), + ('𑍝', '𑍡'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '𑎷'), + ('𑏑', '𑏑'), + ('𑏓', '𑏓'), + ('𑐀', '𑐴'), + ('𑑇', '𑑊'), + ('𑑟', '𑑡'), + ('𑒀', '𑒯'), + ('𑓄', '𑓅'), + ('𑓇', '𑓇'), + ('𑖀', '𑖮'), + ('𑗘', '𑗛'), + ('𑘀', '𑘯'), + ('𑙄', '𑙄'), + ('𑚀', '𑚪'), + ('𑚸', '𑚸'), + ('𑠀', '𑠫'), + ('𑢠', '𑣟'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤯'), + ('𑤿', '𑤿'), + ('𑥁', '𑥁'), + ('𑦠', '𑦧'), + ('𑦪', '𑧐'), + ('𑧡', '𑧡'), + ('𑧣', '𑧣'), + ('𑨀', '𑨀'), + ('𑨋', '𑨲'), + ('𑨺', '𑨺'), + ('𑩐', '𑩐'), + ('𑩜', '𑪉'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑰀', '𑰈'), + ('𑰊', '𑰮'), + ('𑱀', '𑱀'), + ('𑱲', '𑲏'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '𑴰'), + ('𑵆', '𑵆'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶉'), + ('𑶘', '𑶘'), + ('𑻠', '𑻲'), + ('𑼂', '𑼂'), + ('𑼄', '𑼐'), + ('𑼒', '𑼳'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄝'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩰', '𖪾'), + ('𖫐', '𖫭'), + ('𖬀', '𖬯'), + ('𖭀', '𖭃'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('𖽐', '𖽐'), + ('𖾓', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞄀', '𞄬'), + ('𞄷', '𞄽'), + ('𞅎', '𞅎'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞓐', '𞓫'), + ('𞗐', '𞗭'), + ('𞗰', '𞗰'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞤀', '𞥃'), + ('𞥋', '𞥋'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('🄰', '🅉'), + ('🅐', '🅩'), + ('🅰', '🆉'), +]; + +pub const CR: &'static [(char, char)] = &[('\r', '\r')]; + +pub const DOUBLE_QUOTE: &'static [(char, char)] = &[('"', '"')]; + +pub const EXTEND: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{483}', '\u{489}'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6df}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', '\u{7f3}'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{819}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('\u{897}', '\u{89f}'), + ('\u{8ca}', '\u{8e1}'), + ('\u{8e3}', 'ः'), + ('\u{93a}', '\u{93c}'), + ('ा', 'ॏ'), + ('\u{951}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('\u{981}', 'ঃ'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9be}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', '\u{9cd}'), + ('\u{9d7}', '\u{9d7}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', 'ਃ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('\u{abc}', '\u{abc}'), + ('ા', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{aff}'), + ('\u{b01}', 'ଃ'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3e}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', '\u{c04}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', 'ಃ'), + ('\u{cbc}', '\u{cbc}'), + ('ಾ', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{ce2}', '\u{ce3}'), + ('ೳ', 'ೳ'), + ('\u{d00}', 'ഃ'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d3e}', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', '\u{d4d}'), + ('\u{d57}', '\u{d57}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', 'ඃ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('ෲ', 'ෳ'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e47}', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('༾', '༿'), + ('\u{f71}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('ါ', '\u{103e}'), + ('ၖ', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('ၢ', 'ၤ'), + ('ၧ', 'ၭ'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{108d}'), + ('ႏ', 'ႏ'), + ('ႚ', '\u{109d}'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1715}'), + ('\u{1732}', '\u{1734}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('\u{1a17}', '\u{1a1b}'), + ('ᩕ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', 'ᬄ'), + ('\u{1b34}', '\u{1b44}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', 'ᮂ'), + ('ᮡ', '\u{1bad}'), + ('\u{1be6}', '\u{1bf3}'), + ('ᰤ', '\u{1c37}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('᳷', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{200c}', '\u{200c}'), + ('\u{20d0}', '\u{20f0}'), + ('\u{2cef}', '\u{2cf1}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('\u{302a}', '\u{302f}'), + ('\u{3099}', '\u{309a}'), + ('\u{a66f}', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('ꠣ', 'ꠧ'), + ('\u{a82c}', '\u{a82c}'), + ('ꢀ', 'ꢁ'), + ('ꢴ', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a953}'), + ('\u{a980}', 'ꦃ'), + ('\u{a9b3}', '\u{a9c0}'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', 'ꩍ'), + ('ꩻ', 'ꩽ'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('ꫫ', 'ꫯ'), + ('ꫵ', '\u{aaf6}'), + ('ꯣ', 'ꯪ'), + ('꯬', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('\u{ff9e}', '\u{ff9f}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('𑀀', '𑀂'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '𑂂'), + ('𑂰', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{11134}'), + ('𑅅', '𑅆'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '𑆂'), + ('𑆳', '\u{111c0}'), + ('\u{111c9}', '\u{111cc}'), + ('𑇎', '\u{111cf}'), + ('𑈬', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112ea}'), + ('\u{11300}', '𑌃'), + ('\u{1133b}', '\u{1133c}'), + ('\u{1133e}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('𑍢', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113b8}', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('𑐵', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b0}', '\u{114c3}'), + ('\u{115af}', '\u{115b5}'), + ('𑖸', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('𑘰', '\u{11640}'), + ('\u{116ab}', '\u{116b7}'), + ('\u{1171d}', '\u{1172b}'), + ('𑠬', '\u{1183a}'), + ('\u{11930}', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{1193e}'), + ('𑥀', '𑥀'), + ('𑥂', '\u{11943}'), + ('𑧑', '\u{119d7}'), + ('\u{119da}', '\u{119e0}'), + ('𑧤', '𑧤'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '𑨹'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a99}'), + ('𑰯', '\u{11c36}'), + ('\u{11c38}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('𑶊', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '\u{11d97}'), + ('\u{11ef3}', '𑻶'), + ('\u{11f00}', '\u{11f01}'), + ('𑼃', '𑼃'), + ('𑼴', '\u{11f3a}'), + ('𑼾', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13440}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('\u{16f4f}', '\u{16f4f}'), + ('𖽑', '𖾇'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e4ec}', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e94a}'), + ('🏻', '🏿'), + ('\u{e0020}', '\u{e007f}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const EXTENDNUMLET: &'static [(char, char)] = &[ + ('_', '_'), + ('\u{202f}', '\u{202f}'), + ('‿', '⁀'), + ('⁔', '⁔'), + ('︳', '︴'), + ('﹍', '﹏'), + ('_', '_'), +]; + +pub const FORMAT: &'static [(char, char)] = &[ + ('\u{ad}', '\u{ad}'), + ('\u{61c}', '\u{61c}'), + ('\u{180e}', '\u{180e}'), + ('\u{200e}', '\u{200f}'), + ('\u{202a}', '\u{202e}'), + ('\u{2060}', '\u{2064}'), + ('\u{2066}', '\u{206f}'), + ('\u{feff}', '\u{feff}'), + ('\u{fff9}', '\u{fffb}'), + ('\u{13430}', '\u{1343f}'), + ('\u{1bca0}', '\u{1bca3}'), + ('\u{1d173}', '\u{1d17a}'), + ('\u{e0001}', '\u{e0001}'), +]; + +pub const HEBREW_LETTER: &'static [(char, char)] = &[ + ('א', 'ת'), + ('ׯ', 'ײ'), + ('יִ', 'יִ'), + ('ײַ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﭏ'), +]; + +pub const KATAKANA: &'static [(char, char)] = &[ + ('〱', '〵'), + ('゛', '゜'), + ('゠', 'ヺ'), + ('ー', 'ヿ'), + ('ㇰ', 'ㇿ'), + ('㋐', '㋾'), + ('㌀', '㍗'), + ('ヲ', 'ン'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛀀'), + ('𛄠', '𛄢'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), +]; + +pub const LF: &'static [(char, char)] = &[('\n', '\n')]; + +pub const MIDLETTER: &'static [(char, char)] = &[ + (':', ':'), + ('·', '·'), + ('·', '·'), + ('՟', '՟'), + ('״', '״'), + ('‧', '‧'), + ('︓', '︓'), + ('﹕', '﹕'), + (':', ':'), +]; + +pub const MIDNUM: &'static [(char, char)] = &[ + (',', ','), + (';', ';'), + (';', ';'), + ('։', '։'), + ('،', '؍'), + ('٬', '٬'), + ('߸', '߸'), + ('⁄', '⁄'), + ('﹐', '﹐'), + ('﹔', '﹔'), + (',', ','), + (';', ';'), +]; + +pub const MIDNUMLET: &'static [(char, char)] = &[ + ('.', '.'), + ('‘', '’'), + ('․', '․'), + ('﹒', '﹒'), + (''', '''), + ('.', '.'), +]; + +pub const NEWLINE: &'static [(char, char)] = + &[('\u{b}', '\u{c}'), ('\u{85}', '\u{85}'), ('\u{2028}', '\u{2029}')]; + +pub const NUMERIC: &'static [(char, char)] = &[ + ('0', '9'), + ('\u{600}', '\u{605}'), + ('٠', '٩'), + ('٫', '٫'), + ('\u{6dd}', '\u{6dd}'), + ('۰', '۹'), + ('߀', '߉'), + ('\u{890}', '\u{891}'), + ('\u{8e2}', '\u{8e2}'), + ('०', '९'), + ('০', '৯'), + ('੦', '੯'), + ('૦', '૯'), + ('୦', '୯'), + ('௦', '௯'), + ('౦', '౯'), + ('೦', '೯'), + ('൦', '൯'), + ('෦', '෯'), + ('๐', '๙'), + ('໐', '໙'), + ('༠', '༩'), + ('၀', '၉'), + ('႐', '႙'), + ('០', '៩'), + ('᠐', '᠙'), + ('᥆', '᥏'), + ('᧐', '᧚'), + ('᪀', '᪉'), + ('᪐', '᪙'), + ('᭐', '᭙'), + ('᮰', '᮹'), + ('᱀', '᱉'), + ('᱐', '᱙'), + ('꘠', '꘩'), + ('꣐', '꣙'), + ('꤀', '꤉'), + ('꧐', '꧙'), + ('꧰', '꧹'), + ('꩐', '꩙'), + ('꯰', '꯹'), + ('0', '9'), + ('𐒠', '𐒩'), + ('𐴰', '𐴹'), + ('𐵀', '𐵉'), + ('𑁦', '𑁯'), + ('\u{110bd}', '\u{110bd}'), + ('\u{110cd}', '\u{110cd}'), + ('𑃰', '𑃹'), + ('𑄶', '𑄿'), + ('𑇐', '𑇙'), + ('𑋰', '𑋹'), + ('𑑐', '𑑙'), + ('𑓐', '𑓙'), + ('𑙐', '𑙙'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜰', '𑜹'), + ('𑣠', '𑣩'), + ('𑥐', '𑥙'), + ('𑯰', '𑯹'), + ('𑱐', '𑱙'), + ('𑵐', '𑵙'), + ('𑶠', '𑶩'), + ('𑽐', '𑽙'), + ('𖄰', '𖄹'), + ('𖩠', '𖩩'), + ('𖫀', '𖫉'), + ('𖭐', '𖭙'), + ('𖵰', '𖵹'), + ('𜳰', '𜳹'), + ('𝟎', '𝟿'), + ('𞅀', '𞅉'), + ('𞋰', '𞋹'), + ('𞓰', '𞓹'), + ('𞗱', '𞗺'), + ('𞥐', '𞥙'), + ('🯰', '🯹'), +]; + +pub const REGIONAL_INDICATOR: &'static [(char, char)] = &[('🇦', '🇿')]; + +pub const SINGLE_QUOTE: &'static [(char, char)] = &[('\'', '\'')]; + +pub const WSEGSPACE: &'static [(char, char)] = &[ + (' ', ' '), + ('\u{1680}', '\u{1680}'), + ('\u{2000}', '\u{2006}'), + ('\u{2008}', '\u{200a}'), + ('\u{205f}', '\u{205f}'), + ('\u{3000}', '\u{3000}'), +]; + +pub const ZWJ: &'static [(char, char)] = &[('\u{200d}', '\u{200d}')]; diff --git a/vendor/regex-syntax/src/utf8.rs b/vendor/regex-syntax/src/utf8.rs new file mode 100644 index 00000000000000..537035ed1d99b3 --- /dev/null +++ b/vendor/regex-syntax/src/utf8.rs @@ -0,0 +1,592 @@ +/*! +Converts ranges of Unicode scalar values to equivalent ranges of UTF-8 bytes. + +This is sub-module is useful for constructing byte based automatons that need +to embed UTF-8 decoding. The most common use of this module is in conjunction +with the [`hir::ClassUnicodeRange`](crate::hir::ClassUnicodeRange) type. + +See the documentation on the `Utf8Sequences` iterator for more details and +an example. + +# Wait, what is this? + +This is simplest to explain with an example. Let's say you wanted to test +whether a particular byte sequence was a Cyrillic character. One possible +scalar value range is `[0400-04FF]`. The set of allowed bytes for this +range can be expressed as a sequence of byte ranges: + +```text +[D0-D3][80-BF] +``` + +This is simple enough: simply encode the boundaries, `0400` encodes to +`D0 80` and `04FF` encodes to `D3 BF`, and create ranges from each +corresponding pair of bytes: `D0` to `D3` and `80` to `BF`. + +However, what if you wanted to add the Cyrillic Supplementary characters to +your range? Your range might then become `[0400-052F]`. The same procedure +as above doesn't quite work because `052F` encodes to `D4 AF`. The byte ranges +you'd get from the previous transformation would be `[D0-D4][80-AF]`. However, +this isn't quite correct because this range doesn't capture many characters, +for example, `04FF` (because its last byte, `BF` isn't in the range `80-AF`). + +Instead, you need multiple sequences of byte ranges: + +```text +[D0-D3][80-BF] # matches codepoints 0400-04FF +[D4][80-AF] # matches codepoints 0500-052F +``` + +This gets even more complicated if you want bigger ranges, particularly if +they naively contain surrogate codepoints. For example, the sequence of byte +ranges for the basic multilingual plane (`[0000-FFFF]`) look like this: + +```text +[0-7F] +[C2-DF][80-BF] +[E0][A0-BF][80-BF] +[E1-EC][80-BF][80-BF] +[ED][80-9F][80-BF] +[EE-EF][80-BF][80-BF] +``` + +Note that the byte ranges above will *not* match any erroneous encoding of +UTF-8, including encodings of surrogate codepoints. + +And, of course, for all of Unicode (`[000000-10FFFF]`): + +```text +[0-7F] +[C2-DF][80-BF] +[E0][A0-BF][80-BF] +[E1-EC][80-BF][80-BF] +[ED][80-9F][80-BF] +[EE-EF][80-BF][80-BF] +[F0][90-BF][80-BF][80-BF] +[F1-F3][80-BF][80-BF][80-BF] +[F4][80-8F][80-BF][80-BF] +``` + +This module automates the process of creating these byte ranges from ranges of +Unicode scalar values. + +# Lineage + +I got the idea and general implementation strategy from Russ Cox in his +[article on regexps](https://web.archive.org/web/20160404141123/https://swtch.com/~rsc/regexp/regexp3.html) and RE2. +Russ Cox got it from Ken Thompson's `grep` (no source, folk lore?). +I also got the idea from +[Lucene](https://github.com/apache/lucene-solr/blob/ae93f4e7ac6a3908046391de35d4f50a0d3c59ca/lucene/core/src/java/org/apache/lucene/util/automaton/UTF32ToUTF8.java), +which uses it for executing automata on their term index. +*/ + +use core::{char, fmt, iter::FusedIterator, slice}; + +use alloc::{vec, vec::Vec}; + +const MAX_UTF8_BYTES: usize = 4; + +/// Utf8Sequence represents a sequence of byte ranges. +/// +/// To match a Utf8Sequence, a candidate byte sequence must match each +/// successive range. +/// +/// For example, if there are two ranges, `[C2-DF][80-BF]`, then the byte +/// sequence `\xDD\x61` would not match because `0x61 < 0x80`. +#[derive(Copy, Clone, Eq, PartialEq, PartialOrd, Ord)] +pub enum Utf8Sequence { + /// One byte range. + One(Utf8Range), + /// Two successive byte ranges. + Two([Utf8Range; 2]), + /// Three successive byte ranges. + Three([Utf8Range; 3]), + /// Four successive byte ranges. + Four([Utf8Range; 4]), +} + +impl Utf8Sequence { + /// Creates a new UTF-8 sequence from the encoded bytes of a scalar value + /// range. + /// + /// This assumes that `start` and `end` have the same length. + fn from_encoded_range(start: &[u8], end: &[u8]) -> Self { + assert_eq!(start.len(), end.len()); + match start.len() { + 2 => Utf8Sequence::Two([ + Utf8Range::new(start[0], end[0]), + Utf8Range::new(start[1], end[1]), + ]), + 3 => Utf8Sequence::Three([ + Utf8Range::new(start[0], end[0]), + Utf8Range::new(start[1], end[1]), + Utf8Range::new(start[2], end[2]), + ]), + 4 => Utf8Sequence::Four([ + Utf8Range::new(start[0], end[0]), + Utf8Range::new(start[1], end[1]), + Utf8Range::new(start[2], end[2]), + Utf8Range::new(start[3], end[3]), + ]), + n => unreachable!("invalid encoded length: {n}"), + } + } + + /// Returns the underlying sequence of byte ranges as a slice. + pub fn as_slice(&self) -> &[Utf8Range] { + use self::Utf8Sequence::*; + match *self { + One(ref r) => slice::from_ref(r), + Two(ref r) => &r[..], + Three(ref r) => &r[..], + Four(ref r) => &r[..], + } + } + + /// Returns the number of byte ranges in this sequence. + /// + /// The length is guaranteed to be in the closed interval `[1, 4]`. + pub fn len(&self) -> usize { + self.as_slice().len() + } + + /// Reverses the ranges in this sequence. + /// + /// For example, if this corresponds to the following sequence: + /// + /// ```text + /// [D0-D3][80-BF] + /// ``` + /// + /// Then after reversal, it will be + /// + /// ```text + /// [80-BF][D0-D3] + /// ``` + /// + /// This is useful when one is constructing a UTF-8 automaton to match + /// character classes in reverse. + pub fn reverse(&mut self) { + match *self { + Utf8Sequence::One(_) => {} + Utf8Sequence::Two(ref mut x) => x.reverse(), + Utf8Sequence::Three(ref mut x) => x.reverse(), + Utf8Sequence::Four(ref mut x) => x.reverse(), + } + } + + /// Returns true if and only if a prefix of `bytes` matches this sequence + /// of byte ranges. + pub fn matches(&self, bytes: &[u8]) -> bool { + if bytes.len() < self.len() { + return false; + } + for (&b, r) in bytes.iter().zip(self) { + if !r.matches(b) { + return false; + } + } + true + } +} + +impl<'a> IntoIterator for &'a Utf8Sequence { + type IntoIter = slice::Iter<'a, Utf8Range>; + type Item = &'a Utf8Range; + + fn into_iter(self) -> Self::IntoIter { + self.as_slice().iter() + } +} + +impl fmt::Debug for Utf8Sequence { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use self::Utf8Sequence::*; + match *self { + One(ref r) => write!(f, "{r:?}"), + Two(ref r) => write!(f, "{:?}{:?}", r[0], r[1]), + Three(ref r) => write!(f, "{:?}{:?}{:?}", r[0], r[1], r[2]), + Four(ref r) => { + write!(f, "{:?}{:?}{:?}{:?}", r[0], r[1], r[2], r[3]) + } + } + } +} + +/// A single inclusive range of UTF-8 bytes. +#[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord)] +pub struct Utf8Range { + /// Start of byte range (inclusive). + pub start: u8, + /// End of byte range (inclusive). + pub end: u8, +} + +impl Utf8Range { + fn new(start: u8, end: u8) -> Self { + Utf8Range { start, end } + } + + /// Returns true if and only if the given byte is in this range. + pub fn matches(&self, b: u8) -> bool { + self.start <= b && b <= self.end + } +} + +impl fmt::Debug for Utf8Range { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.start == self.end { + write!(f, "[{:X}]", self.start) + } else { + write!(f, "[{:X}-{:X}]", self.start, self.end) + } + } +} + +/// An iterator over ranges of matching UTF-8 byte sequences. +/// +/// The iteration represents an alternation of comprehensive byte sequences +/// that match precisely the set of UTF-8 encoded scalar values. +/// +/// A byte sequence corresponds to one of the scalar values in the range given +/// if and only if it completely matches exactly one of the sequences of byte +/// ranges produced by this iterator. +/// +/// Each sequence of byte ranges matches a unique set of bytes. That is, no two +/// sequences will match the same bytes. +/// +/// # Example +/// +/// This shows how to match an arbitrary byte sequence against a range of +/// scalar values. +/// +/// ```rust +/// use regex_syntax::utf8::{Utf8Sequences, Utf8Sequence}; +/// +/// fn matches(seqs: &[Utf8Sequence], bytes: &[u8]) -> bool { +/// for range in seqs { +/// if range.matches(bytes) { +/// return true; +/// } +/// } +/// false +/// } +/// +/// // Test the basic multilingual plane. +/// let seqs: Vec<_> = Utf8Sequences::new('\u{0}', '\u{FFFF}').collect(); +/// +/// // UTF-8 encoding of 'a'. +/// assert!(matches(&seqs, &[0x61])); +/// // UTF-8 encoding of '☃' (`\u{2603}`). +/// assert!(matches(&seqs, &[0xE2, 0x98, 0x83])); +/// // UTF-8 encoding of `\u{10348}` (outside the BMP). +/// assert!(!matches(&seqs, &[0xF0, 0x90, 0x8D, 0x88])); +/// // Tries to match against a UTF-8 encoding of a surrogate codepoint, +/// // which is invalid UTF-8, and therefore fails, despite the fact that +/// // the corresponding codepoint (0xD800) falls in the range given. +/// assert!(!matches(&seqs, &[0xED, 0xA0, 0x80])); +/// // And fails against plain old invalid UTF-8. +/// assert!(!matches(&seqs, &[0xFF, 0xFF])); +/// ``` +/// +/// If this example seems circuitous, that's because it is! It's meant to be +/// illustrative. In practice, you could just try to decode your byte sequence +/// and compare it with the scalar value range directly. However, this is not +/// always possible (for example, in a byte based automaton). +#[derive(Debug)] +pub struct Utf8Sequences { + range_stack: Vec, +} + +impl Utf8Sequences { + /// Create a new iterator over UTF-8 byte ranges for the scalar value range + /// given. + pub fn new(start: char, end: char) -> Self { + let range = + ScalarRange { start: u32::from(start), end: u32::from(end) }; + Utf8Sequences { range_stack: vec![range] } + } + + /// reset resets the scalar value range. + /// Any existing state is cleared, but resources may be reused. + /// + /// N.B. Benchmarks say that this method is dubious. + #[doc(hidden)] + pub fn reset(&mut self, start: char, end: char) { + self.range_stack.clear(); + self.push(u32::from(start), u32::from(end)); + } + + fn push(&mut self, start: u32, end: u32) { + self.range_stack.push(ScalarRange { start, end }); + } +} + +struct ScalarRange { + start: u32, + end: u32, +} + +impl fmt::Debug for ScalarRange { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "ScalarRange({:X}, {:X})", self.start, self.end) + } +} + +impl Iterator for Utf8Sequences { + type Item = Utf8Sequence; + + fn next(&mut self) -> Option { + 'TOP: while let Some(mut r) = self.range_stack.pop() { + 'INNER: loop { + if let Some((r1, r2)) = r.split() { + self.push(r2.start, r2.end); + r.start = r1.start; + r.end = r1.end; + continue 'INNER; + } + if !r.is_valid() { + continue 'TOP; + } + for i in 1..MAX_UTF8_BYTES { + let max = max_scalar_value(i); + if r.start <= max && max < r.end { + self.push(max + 1, r.end); + r.end = max; + continue 'INNER; + } + } + if let Some(ascii_range) = r.as_ascii() { + return Some(Utf8Sequence::One(ascii_range)); + } + for i in 1..MAX_UTF8_BYTES { + let m = (1 << (6 * i)) - 1; + if (r.start & !m) != (r.end & !m) { + if (r.start & m) != 0 { + self.push((r.start | m) + 1, r.end); + r.end = r.start | m; + continue 'INNER; + } + if (r.end & m) != m { + self.push(r.end & !m, r.end); + r.end = (r.end & !m) - 1; + continue 'INNER; + } + } + } + let mut start = [0; MAX_UTF8_BYTES]; + let mut end = [0; MAX_UTF8_BYTES]; + let n = r.encode(&mut start, &mut end); + return Some(Utf8Sequence::from_encoded_range( + &start[0..n], + &end[0..n], + )); + } + } + None + } +} + +impl FusedIterator for Utf8Sequences {} + +impl ScalarRange { + /// split splits this range if it overlaps with a surrogate codepoint. + /// + /// Either or both ranges may be invalid. + fn split(&self) -> Option<(ScalarRange, ScalarRange)> { + if self.start < 0xE000 && self.end > 0xD7FF { + Some(( + ScalarRange { start: self.start, end: 0xD7FF }, + ScalarRange { start: 0xE000, end: self.end }, + )) + } else { + None + } + } + + /// is_valid returns true if and only if start <= end. + fn is_valid(&self) -> bool { + self.start <= self.end + } + + /// as_ascii returns this range as a Utf8Range if and only if all scalar + /// values in this range can be encoded as a single byte. + fn as_ascii(&self) -> Option { + if self.is_ascii() { + let start = u8::try_from(self.start).unwrap(); + let end = u8::try_from(self.end).unwrap(); + Some(Utf8Range::new(start, end)) + } else { + None + } + } + + /// is_ascii returns true if the range is ASCII only (i.e., takes a single + /// byte to encode any scalar value). + fn is_ascii(&self) -> bool { + self.is_valid() && self.end <= 0x7f + } + + /// encode writes the UTF-8 encoding of the start and end of this range + /// to the corresponding destination slices, and returns the number of + /// bytes written. + /// + /// The slices should have room for at least `MAX_UTF8_BYTES`. + fn encode(&self, start: &mut [u8], end: &mut [u8]) -> usize { + let cs = char::from_u32(self.start).unwrap(); + let ce = char::from_u32(self.end).unwrap(); + let ss = cs.encode_utf8(start); + let se = ce.encode_utf8(end); + assert_eq!(ss.len(), se.len()); + ss.len() + } +} + +fn max_scalar_value(nbytes: usize) -> u32 { + match nbytes { + 1 => 0x007F, + 2 => 0x07FF, + 3 => 0xFFFF, + 4 => 0x0010_FFFF, + _ => unreachable!("invalid UTF-8 byte sequence size"), + } +} + +#[cfg(test)] +mod tests { + use core::char; + + use alloc::{vec, vec::Vec}; + + use crate::utf8::{Utf8Range, Utf8Sequences}; + + fn rutf8(s: u8, e: u8) -> Utf8Range { + Utf8Range::new(s, e) + } + + fn never_accepts_surrogate_codepoints(start: char, end: char) { + for cp in 0xD800..0xE000 { + let buf = encode_surrogate(cp); + for r in Utf8Sequences::new(start, end) { + if r.matches(&buf) { + panic!( + "Sequence ({:X}, {:X}) contains range {:?}, \ + which matches surrogate code point {:X} \ + with encoded bytes {:?}", + u32::from(start), + u32::from(end), + r, + cp, + buf, + ); + } + } + } + } + + #[test] + fn codepoints_no_surrogates() { + never_accepts_surrogate_codepoints('\u{0}', '\u{FFFF}'); + never_accepts_surrogate_codepoints('\u{0}', '\u{10FFFF}'); + never_accepts_surrogate_codepoints('\u{0}', '\u{10FFFE}'); + never_accepts_surrogate_codepoints('\u{80}', '\u{10FFFF}'); + never_accepts_surrogate_codepoints('\u{D7FF}', '\u{E000}'); + } + + #[test] + fn single_codepoint_one_sequence() { + // Tests that every range of scalar values that contains a single + // scalar value is recognized by one sequence of byte ranges. + for i in 0x0..=0x0010_FFFF { + let c = match char::from_u32(i) { + None => continue, + Some(c) => c, + }; + let seqs: Vec<_> = Utf8Sequences::new(c, c).collect(); + assert_eq!(seqs.len(), 1); + } + } + + #[test] + fn bmp() { + use crate::utf8::Utf8Sequence::*; + + let seqs = Utf8Sequences::new('\u{0}', '\u{FFFF}').collect::>(); + assert_eq!( + seqs, + vec![ + One(rutf8(0x0, 0x7F)), + Two([rutf8(0xC2, 0xDF), rutf8(0x80, 0xBF)]), + Three([ + rutf8(0xE0, 0xE0), + rutf8(0xA0, 0xBF), + rutf8(0x80, 0xBF) + ]), + Three([ + rutf8(0xE1, 0xEC), + rutf8(0x80, 0xBF), + rutf8(0x80, 0xBF) + ]), + Three([ + rutf8(0xED, 0xED), + rutf8(0x80, 0x9F), + rutf8(0x80, 0xBF) + ]), + Three([ + rutf8(0xEE, 0xEF), + rutf8(0x80, 0xBF), + rutf8(0x80, 0xBF) + ]), + ] + ); + } + + #[test] + fn reverse() { + use crate::utf8::Utf8Sequence::*; + + let mut s = One(rutf8(0xA, 0xB)); + s.reverse(); + assert_eq!(s.as_slice(), &[rutf8(0xA, 0xB)]); + + let mut s = Two([rutf8(0xA, 0xB), rutf8(0xB, 0xC)]); + s.reverse(); + assert_eq!(s.as_slice(), &[rutf8(0xB, 0xC), rutf8(0xA, 0xB)]); + + let mut s = Three([rutf8(0xA, 0xB), rutf8(0xB, 0xC), rutf8(0xC, 0xD)]); + s.reverse(); + assert_eq!( + s.as_slice(), + &[rutf8(0xC, 0xD), rutf8(0xB, 0xC), rutf8(0xA, 0xB)] + ); + + let mut s = Four([ + rutf8(0xA, 0xB), + rutf8(0xB, 0xC), + rutf8(0xC, 0xD), + rutf8(0xD, 0xE), + ]); + s.reverse(); + assert_eq!( + s.as_slice(), + &[ + rutf8(0xD, 0xE), + rutf8(0xC, 0xD), + rutf8(0xB, 0xC), + rutf8(0xA, 0xB) + ] + ); + } + + fn encode_surrogate(cp: u32) -> [u8; 3] { + const TAG_CONT: u8 = 0b1000_0000; + const TAG_THREE_B: u8 = 0b1110_0000; + + assert!(0xD800 <= cp && cp < 0xE000); + let mut dst = [0; 3]; + dst[0] = u8::try_from(cp >> 12 & 0x0F).unwrap() | TAG_THREE_B; + dst[1] = u8::try_from(cp >> 6 & 0x3F).unwrap() | TAG_CONT; + dst[2] = u8::try_from(cp & 0x3F).unwrap() | TAG_CONT; + dst + } +} diff --git a/vendor/regex-syntax/test b/vendor/regex-syntax/test new file mode 100755 index 00000000000000..8626c3bfccbabd --- /dev/null +++ b/vendor/regex-syntax/test @@ -0,0 +1,30 @@ +#!/bin/bash + +set -e + +# cd to the directory containing this crate's Cargo.toml so that we don't need +# to pass --manifest-path to every `cargo` command. +cd "$(dirname "$0")" + +# This is a convenience script for running a broad swath of the syntax tests. +echo "===== DEFAULT FEATURES ===" +cargo test + +features=( + std + unicode + unicode-age + unicode-bool + unicode-case + unicode-gencat + unicode-perl + unicode-script + unicode-segment +) +for f in "${features[@]}"; do + echo "=== FEATURE: $f ===" + # We only run library tests because I couldn't figure out how to easily + # make doc tests run in 'no_std' mode. In particular, without the Error + # trait, using '?' in doc tests seems tricky. + cargo test --no-default-features --lib --features "$f" +done diff --git a/vendor/regex/.cargo-checksum.json b/vendor/regex/.cargo-checksum.json new file mode 100644 index 00000000000000..334adc622d2168 --- /dev/null +++ b/vendor/regex/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"5b9b771da9f8ff576a830ff12bc819aa5fad56cb5c079f3d87cb5ddf7e79b9b2",".vim/coc-settings.json":"87b0e2edd6fc8170b3f918bfbf92a78cd77a15033f718a8733c6d6277bf3e1fe","CHANGELOG.md":"154fdf1ae0e8cbc50e8cb8457f61c403c5d9a1a53cef78f19e48660af6e5d22a","Cargo.lock":"b089faa224c30c8416766f4289c9c4319a2cf88c1884d20754e9845b9e1e0c71","Cargo.toml":"709b6ec1da93140957cb14d7b57367e0aa180c8efc26368f761c34682f67f0bc","Cargo.toml.orig":"8501f3490d81b4d822457510173df5d2edfbdd70851ff0fb798681adeaf6b9ae","Cross.toml":"4a11d6c63ecc919016b59fa0fe23674eb05682fb91ffbe677a4a7077e9e684ff","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"2e5ffce9b5781a2c286517f0fb81e7e00d9736ffa938c9a34b5e92f30352a115","UNICODE.md":"91ee848bf40a67626940d242d3ef05e90c7d5ef72d23bcf626033b5394aee0ea","bench/README.md":"0aee42206b0e5edcb400a11faa2c536f512bcc6086e5ffdda001b9bfe4d19808","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","src/builders.rs":"d08f5867d8b994395546e318860d05e00cd70347223505b43d578b8d1477fe8f","src/bytes.rs":"cce2b7012f5896cf82fc3086bf8128dc9efe2b69bf6917d041c1a171eabacdc0","src/error.rs":"362c126a701852b355906acdb2c19ee31230570a408bbe52deb2803a1dc77039","src/find_byte.rs":"e17cd3b765467685946707840b92ea4e37d3c11081fbf316174a15858cd4bd99","src/lib.rs":"033460754d7a51fb9fa90ad096f76dbaaf10dc4c49f1195bb088fe23d35ded75","src/pattern.rs":"53971d02dde4f8e69055c36e7c56c6c872f0302161bf0977a02b97dc8a152d46","src/regex/bytes.rs":"fae9e125ff320e85fe5e59e2a32ae24d85f6ca9f38c737c4e929a8376b9b53b0","src/regex/mod.rs":"c220b6dd7a5e1945f8e743d1dcd796c5f782c91b0c34eb9915c588174a517fe8","src/regex/string.rs":"9f7686e10535fe385a767063132d39ee1a1af1a20a119d78df479f110822e274","src/regexset/bytes.rs":"25c8d896e4b9caf627cce46e3c305d2e640aeeacea96c40526699f86960d1868","src/regexset/mod.rs":"c220b6dd7a5e1945f8e743d1dcd796c5f782c91b0c34eb9915c588174a517fe8","src/regexset/string.rs":"ac3fc9c8d2d58379e63bcd92ab2f8ee1c32a1210dceec63925d0c23f1d9dfedd","test":"c0122c20a2c9b7ba6e9a8aaeb2b7d9910315ef31063539949f28d9501ef3193c","testdata/README.md":"8c06d771da52048ac5b67de8b61f386a4aa70c904a7da4efec1aa86c710b0be5","testdata/anchored.toml":"7a1b5cd81deed2099796a451bf764a3f9bd21f0d60c0fa46accd3a35666866f2","testdata/bytes.toml":"1d84179165fd25f3b94bd2bfbeb43fc8a162041f7bf98b717e0f85cef7fb652b","testdata/crazy.toml":"a146e2d2e23f1a57168979d9b1fc193c2ba38dca66294b61140d6d2a2958ec86","testdata/crlf.toml":"d19cf22756434d145dd20946c00af01c102a556a252070405c3c8294129d9ece","testdata/earliest.toml":"d561e643623ee1889b5b049fdcf3c7cb71b0c746d7eb822ddbd09d0acda2620b","testdata/empty.toml":"738dbe92fbd8971385a1cf3affb0e956e5b692c858b9b48439d718f10801c08e","testdata/expensive.toml":"5ce2f60209c99cdd2cdcb9d3069d1d5ca13d5e08a85e913efe57267b2f5f0e9d","testdata/flags.toml":"9a7e001808195c84f2a7d3e18bc0a82c7386e60f03a616e99af00c3f7f2c3fd4","testdata/fowler/basic.toml":"a82c7e233451cd7cfe0c3d817f3a1ab44478bb81ae62432efdd515fa8370275e","testdata/fowler/dat/README":"e53d6c37b5931cb26dc9ae4c40358eea63f7a469c4db6ca816c072a8ced6a61a","testdata/fowler/dat/basic.dat":"b1126dda59075c08f574987090273c9977790115f1e1941d0708c0b82b256905","testdata/fowler/dat/nullsubexpr.dat":"f880940907754dbfddee886605b65f9e743a820411c3955b31ddeb494d07e839","testdata/fowler/dat/repetition.dat":"2b8b2b191229a804fba49e6b888d8194bf488f7744057b550da9d95a2aa6617a","testdata/fowler/nullsubexpr.toml":"cd812e7e8fa0469253b34f0db93b5883c9d8b9740fc4f7825a38e7df880a4eed","testdata/fowler/repetition.toml":"8c09164f064b3db81309c53483863bdcec493781644de162416e9f485e772615","testdata/iter.toml":"6875460302974a5b3073a7304a865c45aba9653c54afea2c4d26e1ea248a81f7","testdata/leftmost-all.toml":"903bfbeff888b7664296f4d5aa367ce53d1dafe249ab0a3359223ae94d596396","testdata/line-terminator.toml":"02148068137b69d95587966917bdf0697bf7eb41ad6d47387f2eb30f67d04fd9","testdata/misc.toml":"32c9591655c6fb118dfefcb4de49a04820a63cb960533dfc2538cdaabf4f4047","testdata/multiline.toml":"eb07cf5427e6ddbcf61f4cc64c2d74ff41b5ef75ef857959651b20196f3cd157","testdata/no-unicode.toml":"d209da04506900fd5f69e48170cddaad0702355ac6176c3a75ab3ff96974457c","testdata/overlapping.toml":"5d96497a7233566d40b05ba22047e483fa8662e45515a9be86da45cf6c28703a","testdata/regex-lite.toml":"fecca7cc8c9cea2e1f84f846a89fd9b3ca7011c83698211a2eeda8924deb900c","testdata/regression.toml":"6006ef4fcfbfd7155ce5ce8b8427904f7261c5549396f20cb065c0294733686d","testdata/set.toml":"dfd265dc1aee80026e881616840df0236ae9abf12467d7ec0e141a52c236128c","testdata/substring.toml":"48122d9f3477ed81f95e3ad42c06e9bb25f849b66994601a75ceae0693b81866","testdata/unicode.toml":"7e4b013039b0cdd85fa73f32d15d096182fe901643d4e40c0910087a736cd46d","testdata/utf8.toml":"2eabce0582bcacb2073e08bbe7ca413f096d14d06e917b107949691e24f84b20","testdata/word-boundary-special.toml":"7d0ea2f796478d1ca2a6954430cb1cfbd04031a182f8611cb50a7c73e443ce33","testdata/word-boundary.toml":"51bc1c498ab825420340a2dd3e6623de4054937ba6d5020ff8cd14b1c1e45271","tests/lib.rs":"9bffc95568c09ac95b6a3e7ca64b6e858a0552d0c0b0fca2c447da3b9c0a45a2","tests/misc.rs":"1aeadbeb8860bd5f5b99a0adb459baf77dd3af4f23ac6c56ecf537f793407cca","tests/regression.rs":"3490aac99fdbf3f0949ba1f338d5184a84b505ebd96d0b6d6145c610587aa60b","tests/regression_fuzz.rs":"57e0bcba0fdfa7797865e35ae547cd7fe1c6132b80a7bfdfb06eb053a568b00d","tests/replace.rs":"78ff9bf7f78783ad83a78041bb7ee0705c7efc85b4d12301581d0ce5b2a59325","tests/searcher.rs":"04152e5c86431deec0c196d2564a11bc4ec36f14c77e8c16a2f9d1cbc9fc574e","tests/suite_bytes.rs":"75fb0a332527c36d31e126f6032c8ccf1f81ba47ee785affa834404bc1a79f4c","tests/suite_bytes_set.rs":"db85513e87429fc68904270a0f414e75ae0b7c6b7deb1c66f05eb4f98b09c67a","tests/suite_string.rs":"249d707dba99d23ada40558a7526f028c1d3fdf715d3866a106f3435da01bf66","tests/suite_string_set.rs":"c839fb3c08a23348230591a49118406373353bc6c9a87528f36e4e9635e7b9ac"},"package":"843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4"} \ No newline at end of file diff --git a/vendor/regex/.cargo_vcs_info.json b/vendor/regex/.cargo_vcs_info.json new file mode 100644 index 00000000000000..de15d531d9c53f --- /dev/null +++ b/vendor/regex/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "5ea3eb1e95f0338e283f5f0b4681f0891a1cd836" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/regex/.vim/coc-settings.json b/vendor/regex/.vim/coc-settings.json new file mode 100644 index 00000000000000..d75676750938f6 --- /dev/null +++ b/vendor/regex/.vim/coc-settings.json @@ -0,0 +1,6 @@ +{ + "rust-analyzer.linkedProjects": [ + "fuzz/Cargo.toml", + "Cargo.toml" + ] +} diff --git a/vendor/regex/CHANGELOG.md b/vendor/regex/CHANGELOG.md new file mode 100644 index 00000000000000..1bd16a1e53deea --- /dev/null +++ b/vendor/regex/CHANGELOG.md @@ -0,0 +1,1742 @@ +1.12.2 (2025-10-13) +=================== +This release fixes a `cargo doc` breakage on nightly when `--cfg docsrs` is +enabled. This caused documentation to fail to build on docs.rs. + +Bug fixes: + +* [BUG #1305](https://github.com/rust-lang/regex/issues/1305): +Switches the `doc_auto_cfg` feature to `doc_cfg` on nightly for docs.rs builds. + + +1.12.1 (2025-10-10) +=================== +This release makes a bug fix in the new `regex::Captures::get_match` API +introduced in `1.12.0`. There was an oversight with the lifetime parameter +for the `Match` returned. This is technically a breaking change, but given +that it was caught almost immediately and I've yanked the `1.12.0` release, +I think this is fine. + + +1.12.0 (2025-10-10) +=================== +This release contains a smattering of bug fixes, a fix for excessive memory +consumption in some cases and a new `regex::Captures::get_match` API. + +Improvements: + +* [FEATURE #1146](https://github.com/rust-lang/regex/issues/1146): +Add `Capture::get_match` for returning the overall match without `unwrap()`. + +Bug fixes: + +* [BUG #1083](https://github.com/rust-lang/regex/issues/1083): +Fixes a panic in the lazy DFA (can only occur for especially large regexes). +* [BUG #1116](https://github.com/rust-lang/regex/issues/1116): +Fixes a memory usage regression for large regexes (introduced in `regex 1.9`). +* [BUG #1195](https://github.com/rust-lang/regex/issues/1195): +Fix universal start states in sparse DFA. +* [BUG #1295](https://github.com/rust-lang/regex/pull/1295): +Fixes a panic when deserializing a corrupted dense DFA. +* [BUG 8f5d9479](https://github.com/rust-lang/regex/commit/8f5d9479d0f1da5726488a530d7fd66a73d05b80): +Make `regex_automata::meta::Regex::find` consistently return `None` when +`WhichCaptures::None` is used. + + +1.11.3 (2025-09-25) +=================== +This is a small patch release with an improvement in memory usage in some +cases. + +Improvements: + +* [BUG #1297](https://github.com/rust-lang/regex/issues/1297): +Improve memory usage by trimming excess memory capacity in some spots. + + +1.11.2 (2025-08-24) +=================== +This is a new patch release of `regex` with some minor fixes. A larger number +of typo or lint fix patches were merged. Also, we now finally recommend using +`std::sync::LazyLock`. + +Improvements: + +* [BUG #1217](https://github.com/rust-lang/regex/issues/1217): +Switch recommendation from `once_cell` to `std::sync::LazyLock`. +* [BUG #1225](https://github.com/rust-lang/regex/issues/1225): +Add `DFA::set_prefilter` to `regex-automata`. + +Bug fixes: + +* [BUG #1165](https://github.com/rust-lang/regex/pull/1150): +Remove `std` dependency from `perf-literal-multisubstring` crate feature. +* [BUG #1165](https://github.com/rust-lang/regex/pull/1165): +Clarify the meaning of `(?R)$` in the documentation. +* [BUG #1281](https://github.com/rust-lang/regex/pull/1281): +Remove `fuzz/` and `record/` directories from published crate on crates.io. + + +1.11.1 (2024-10-24) +=================== +This is a new patch release of `regex` that fixes compilation on nightly +Rust when the unstable `pattern` crate feature is enabled. Users on nightly +Rust without this feature enabled are unaffected. + +Bug fixes: + +* [BUG #1231](https://github.com/rust-lang/regex/issues/1231): +Fix the `Pattern` trait implementation as a result of nightly API breakage. + + +1.11.0 (2024-09-29) +=================== +This is a new minor release of `regex` that brings in an update to the +Unicode Character Database. Specifically, this updates the Unicode data +used by `regex` internally to the version 16 release. + +New features: + +* [FEATURE #1228](https://github.com/rust-lang/regex/pull/1228): +Add new `regex::SetMatches::matched_all` method. +* [FEATURE #1229](https://github.com/rust-lang/regex/pull/1229): +Update to Unicode Character Database (UCD) version 16. + + +1.10.6 (2024-08-02) +=================== +This is a new patch release with a fix for the `unstable` crate feature that +enables `std::str::Pattern` trait integration. + +Bug fixes: + +* [BUG #1219](https://github.com/rust-lang/regex/pull/1219): +Fix the `Pattern` trait implementation as a result of nightly API breakage. + + +1.10.5 (2024-06-09) +=================== +This is a new patch release with some minor fixes. + +Bug fixes: + +* [BUG #1203](https://github.com/rust-lang/regex/pull/1203): +Escape invalid UTF-8 when in the `Debug` impl of `regex::bytes::Match`. + + +1.10.4 (2024-03-22) +=================== +This is a new patch release with some minor fixes. + +* [BUG #1169](https://github.com/rust-lang/regex/issues/1169): +Fixes a bug with compiling a reverse NFA automaton in `regex-automata`. +* [BUG #1178](https://github.com/rust-lang/regex/pull/1178): +Clarifies that when `Cow::Borrowed` is returned from replace APIs, it is +equivalent to the input. + + +1.10.3 (2024-01-21) +=================== +This is a new patch release that fixes the feature configuration of optional +dependencies, and fixes an unsound use of bounds check elision. + +Bug fixes: + +* [BUG #1147](https://github.com/rust-lang/regex/issues/1147): +Set `default-features=false` for the `memchr` and `aho-corasick` dependencies. +* [BUG #1154](https://github.com/rust-lang/regex/pull/1154): +Fix unsound bounds check elision. + + +1.10.2 (2023-10-16) +=================== +This is a new patch release that fixes a search regression where incorrect +matches could be reported. + +Bug fixes: + +* [BUG #1110](https://github.com/rust-lang/regex/issues/1110): +Revert broadening of reverse suffix literal optimization introduced in 1.10.1. + + +1.10.1 (2023-10-14) +=================== +This is a new patch release with a minor increase in the number of valid +patterns and a broadening of some literal optimizations. + +New features: + +* [FEATURE 04f5d7be](https://github.com/rust-lang/regex/commit/04f5d7be4efc542864cc400f5d43fbea4eb9bab6): +Loosen ASCII-compatible rules such that regexes like `(?-u:☃)` are now allowed. + +Performance improvements: + +* [PERF 8a8d599f](https://github.com/rust-lang/regex/commit/8a8d599f9d2f2d78e9ad84e4084788c2d563afa5): +Broader the reverse suffix optimization to apply in more cases. + + +1.10.0 (2023-10-09) +=================== +This is a new minor release of `regex` that adds support for start and end +word boundary assertions. That is, `\<` and `\>`. The minimum supported Rust +version has also been raised to 1.65, which was released about one year ago. + +The new word boundary assertions are: + +* `\<` or `\b{start}`: a Unicode start-of-word boundary (`\W|\A` on the left, +`\w` on the right). +* `\>` or `\b{end}`: a Unicode end-of-word boundary (`\w` on the left, `\W|\z` +on the right). +* `\b{start-half}`: half of a Unicode start-of-word boundary (`\W|\A` on the +left). +* `\b{end-half}`: half of a Unicode end-of-word boundary (`\W|\z` on the +right). + +The `\<` and `\>` are GNU extensions to POSIX regexes. They have been added +to the `regex` crate because they enjoy somewhat broad support in other regex +engines as well (for example, vim). The `\b{start}` and `\b{end}` assertions +are aliases for `\<` and `\>`, respectively. + +The `\b{start-half}` and `\b{end-half}` assertions are not found in any +other regex engine (although regex engines with general look-around support +can certainly express them). They were added principally to support the +implementation of word matching in grep programs, where one generally wants to +be a bit more flexible in what is considered a word boundary. + +New features: + +* [FEATURE #469](https://github.com/rust-lang/regex/issues/469): +Add support for `\<` and `\>` word boundary assertions. +* [FEATURE(regex-automata) #1031](https://github.com/rust-lang/regex/pull/1031): +DFAs now have a `start_state` method that doesn't use an `Input`. + +Performance improvements: + +* [PERF #1051](https://github.com/rust-lang/regex/pull/1051): +Unicode character class operations have been optimized in `regex-syntax`. +* [PERF #1090](https://github.com/rust-lang/regex/issues/1090): +Make patterns containing lots of literal characters use less memory. + +Bug fixes: + +* [BUG #1046](https://github.com/rust-lang/regex/issues/1046): +Fix a bug that could result in incorrect match spans when using a Unicode word +boundary and searching non-ASCII strings. +* [BUG(regex-syntax) #1047](https://github.com/rust-lang/regex/issues/1047): +Fix panics that can occur in `Ast->Hir` translation (not reachable from `regex` +crate). +* [BUG(regex-syntax) #1088](https://github.com/rust-lang/regex/issues/1088): +Remove guarantees in the API that connect the `u` flag with a specific HIR +representation. + +`regex-automata` breaking change release: + +This release includes a `regex-automata 0.4.0` breaking change release, which +was necessary in order to support the new word boundary assertions. For +example, the `Look` enum has new variants and the `LookSet` type now uses `u32` +instead of `u16` to represent a bitset of look-around assertions. These are +overall very minor changes, and most users of `regex-automata` should be able +to move to `0.4` from `0.3` without any changes at all. + +`regex-syntax` breaking change release: + +This release also includes a `regex-syntax 0.8.0` breaking change release, +which, like `regex-automata`, was necessary in order to support the new word +boundary assertions. This release also includes some changes to the `Ast` +type to reduce heap usage in some cases. If you are using the `Ast` type +directly, your code may require some minor modifications. Otherwise, users of +`regex-syntax 0.7` should be able to migrate to `0.8` without any code changes. + +`regex-lite` release: + +The `regex-lite 0.1.1` release contains support for the new word boundary +assertions. There are no breaking changes. + + +1.9.6 (2023-09-30) +================== +This is a patch release that fixes a panic that can occur when the default +regex size limit is increased to a large number. + +* [BUG aa4e4c71](https://github.com/rust-lang/regex/commit/aa4e4c7120b0090ce0624e3c42a2ed06dd8b918a): +Fix a bug where computing the maximum haystack length for the bounded +backtracker could result underflow and thus provoke a panic later in a search +due to a broken invariant. + + +1.9.5 (2023-09-02) +================== +This is a patch release that hopefully mostly fixes a performance bug that +occurs when sharing a regex across multiple threads. + +Issue [#934](https://github.com/rust-lang/regex/issues/934) +explains this in more detail. It is [also noted in the crate +documentation](https://docs.rs/regex/latest/regex/#sharing-a-regex-across-threads-can-result-in-contention). +The bug can appear when sharing a regex across multiple threads simultaneously, +as might be the case when using a regex from a `OnceLock`, `lazy_static` or +similar primitive. Usually high contention only results when using many threads +to execute searches on small haystacks. + +One can avoid the contention problem entirely through one of two methods. +The first is to use lower level APIs from `regex-automata` that require passing +state explicitly, such as [`meta::Regex::search_with`](https://docs.rs/regex-automata/latest/regex_automata/meta/struct.Regex.html#method.search_with). +The second is to clone a regex and send it to other threads explicitly. This +will not use any additional memory usage compared to sharing the regex. The +only downside of this approach is that it may be less convenient, for example, +it won't work with things like `OnceLock` or `lazy_static` or `once_cell`. + +With that said, as of this release, the contention performance problems have +been greatly reduced. This was achieved by changing the free-list so that it +was sharded across threads, and that ensuring each sharded mutex occupies a +single cache line to mitigate false sharing. So while contention may still +impact performance in some cases, it should be a lot better now. + +Because of the changes to how the free-list works, please report any issues you +find with this release. That not only includes search time regressions but also +significant regressions in memory usage. Reporting improvements is also welcome +as well! If possible, provide a reproduction. + +Bug fixes: + +* [BUG #934](https://github.com/rust-lang/regex/issues/934): +Fix a performance bug where high contention on a single regex led to massive +slow-downs. + + +1.9.4 (2023-08-26) +================== +This is a patch release that fixes a bug where `RegexSet::is_match(..)` could +incorrectly return false (even when `RegexSet::matches(..).matched_any()` +returns true). + +Bug fixes: + +* [BUG #1070](https://github.com/rust-lang/regex/issues/1070): +Fix a bug where a prefilter was incorrectly configured for a `RegexSet`. + + +1.9.3 (2023-08-05) +================== +This is a patch release that fixes a bug where some searches could result in +incorrect match offsets being reported. It is difficult to characterize the +types of regexes susceptible to this bug. They generally involve patterns +that contain no prefix or suffix literals, but have an inner literal along with +a regex prefix that can conditionally match. + +Bug fixes: + +* [BUG #1060](https://github.com/rust-lang/regex/issues/1060): +Fix a bug with the reverse inner literal optimization reporting incorrect match +offsets. + + +1.9.2 (2023-08-05) +================== +This is a patch release that fixes another memory usage regression. This +particular regression occurred only when using a `RegexSet`. In some cases, +much more heap memory (by one or two orders of magnitude) was allocated than in +versions prior to 1.9.0. + +Bug fixes: + +* [BUG #1059](https://github.com/rust-lang/regex/issues/1059): +Fix a memory usage regression when using a `RegexSet`. + + +1.9.1 (2023-07-07) +================== +This is a patch release which fixes a memory usage regression. In the regex +1.9 release, one of the internal engines used a more aggressive allocation +strategy than what was done previously. This patch release reverts to the +prior on-demand strategy. + +Bug fixes: + +* [BUG #1027](https://github.com/rust-lang/regex/issues/1027): +Change the allocation strategy for the backtracker to be less aggressive. + + +1.9.0 (2023-07-05) +================== +This release marks the end of a [years long rewrite of the regex crate +internals](https://github.com/rust-lang/regex/issues/656). Since this is +such a big release, please report any issues or regressions you find. We would +also love to hear about improvements as well. + +In addition to many internal improvements that should hopefully result in +"my regex searches are faster," there have also been a few API additions: + +* A new `Captures::extract` method for quickly accessing the substrings +that match each capture group in a regex. +* A new inline flag, `R`, which enables CRLF mode. This makes `.` match any +Unicode scalar value except for `\r` and `\n`, and also makes `(?m:^)` and +`(?m:$)` match after and before both `\r` and `\n`, respectively, but never +between a `\r` and `\n`. +* `RegexBuilder::line_terminator` was added to further customize the line +terminator used by `(?m:^)` and `(?m:$)` to be any arbitrary byte. +* The `std` Cargo feature is now actually optional. That is, the `regex` crate +can be used without the standard library. +* Because `regex 1.9` may make binary size and compile times even worse, a +new experimental crate called `regex-lite` has been published. It prioritizes +binary size and compile times over functionality (like Unicode) and +performance. It shares no code with the `regex` crate. + +New features: + +* [FEATURE #244](https://github.com/rust-lang/regex/issues/244): +One can opt into CRLF mode via the `R` flag. +e.g., `(?mR:$)` matches just before `\r\n`. +* [FEATURE #259](https://github.com/rust-lang/regex/issues/259): +Multi-pattern searches with offsets can be done with `regex-automata 0.3`. +* [FEATURE #476](https://github.com/rust-lang/regex/issues/476): +`std` is now an optional feature. `regex` may be used with only `alloc`. +* [FEATURE #644](https://github.com/rust-lang/regex/issues/644): +`RegexBuilder::line_terminator` configures how `(?m:^)` and `(?m:$)` behave. +* [FEATURE #675](https://github.com/rust-lang/regex/issues/675): +Anchored search APIs are now available in `regex-automata 0.3`. +* [FEATURE #824](https://github.com/rust-lang/regex/issues/824): +Add new `Captures::extract` method for easier capture group access. +* [FEATURE #961](https://github.com/rust-lang/regex/issues/961): +Add `regex-lite` crate with smaller binary sizes and faster compile times. +* [FEATURE #1022](https://github.com/rust-lang/regex/pull/1022): +Add `TryFrom` implementations for the `Regex` type. + +Performance improvements: + +* [PERF #68](https://github.com/rust-lang/regex/issues/68): +Added a one-pass DFA engine for faster capture group matching. +* [PERF #510](https://github.com/rust-lang/regex/issues/510): +Inner literals are now used to accelerate searches, e.g., `\w+@\w+` will scan +for `@`. +* [PERF #787](https://github.com/rust-lang/regex/issues/787), +[PERF #891](https://github.com/rust-lang/regex/issues/891): +Makes literal optimizations apply to regexes of the form `\b(foo|bar|quux)\b`. + +(There are many more performance improvements as well, but not all of them have +specific issues devoted to them.) + +Bug fixes: + +* [BUG #429](https://github.com/rust-lang/regex/issues/429): +Fix matching bugs related to `\B` and inconsistencies across internal engines. +* [BUG #517](https://github.com/rust-lang/regex/issues/517): +Fix matching bug with capture groups. +* [BUG #579](https://github.com/rust-lang/regex/issues/579): +Fix matching bug with word boundaries. +* [BUG #779](https://github.com/rust-lang/regex/issues/779): +Fix bug where some regexes like `(re)+` were not equivalent to `(re)(re)*`. +* [BUG #850](https://github.com/rust-lang/regex/issues/850): +Fix matching bug inconsistency between NFA and DFA engines. +* [BUG #921](https://github.com/rust-lang/regex/issues/921): +Fix matching bug where literal extraction got confused by `$`. +* [BUG #976](https://github.com/rust-lang/regex/issues/976): +Add documentation to replacement routines about dealing with fallibility. +* [BUG #1002](https://github.com/rust-lang/regex/issues/1002): +Use corpus rejection in fuzz testing. + + +1.8.4 (2023-06-05) +================== +This is a patch release that fixes a bug where `(?-u:\B)` was allowed in +Unicode regexes, despite the fact that the current matching engines can report +match offsets between the code units of a single UTF-8 encoded codepoint. That +in turn means that match offsets that split a codepoint could be reported, +which in turn results in panicking when one uses them to slice a `&str`. + +This bug occurred in the transition to `regex 1.8` because the underlying +syntactical error that prevented this regex from compiling was intentionally +removed. That's because `(?-u:\B)` will be permitted in Unicode regexes in +`regex 1.9`, but the matching engines will guarantee to never report match +offsets that split a codepoint. When the underlying syntactical error was +removed, no code was added to ensure that `(?-u:\B)` didn't compile in the +`regex 1.8` transition release. This release, `regex 1.8.4`, adds that code +such that `Regex::new(r"(?-u:\B)")` returns to the `regex <1.8` behavior of +not compiling. (A `bytes::Regex` can still of course compile it.) + +Bug fixes: + +* [BUG #1006](https://github.com/rust-lang/regex/issues/1006): +Fix a bug where `(?-u:\B)` was allowed in Unicode regexes, and in turn could +lead to match offsets that split a codepoint in `&str`. + + +1.8.3 (2023-05-25) +================== +This is a patch release that fixes a bug where the regex would report a +match at every position even when it shouldn't. This could occur in a very +small subset of regexes, usually an alternation of simple literals that +have particular properties. (See the issue linked below for a more precise +description.) + +Bug fixes: + +* [BUG #999](https://github.com/rust-lang/regex/issues/999): +Fix a bug where a match at every position is erroneously reported. + + +1.8.2 (2023-05-22) +================== +This is a patch release that fixes a bug where regex compilation could panic +in debug mode for regexes with large counted repetitions. For example, +`a{2147483516}{2147483416}{5}` resulted in an integer overflow that wrapped +in release mode but panicking in debug mode. Despite the unintended wrapping +arithmetic in release mode, it didn't cause any other logical bugs since the +errant code was for new analysis that wasn't used yet. + +Bug fixes: + +* [BUG #995](https://github.com/rust-lang/regex/issues/995): +Fix a bug where regex compilation with large counted repetitions could panic. + + +1.8.1 (2023-04-21) +================== +This is a patch release that fixes a bug where a regex match could be reported +where none was found. Specifically, the bug occurs when a pattern contains some +literal prefixes that could be extracted _and_ an optional word boundary in the +prefix. + +Bug fixes: + +* [BUG #981](https://github.com/rust-lang/regex/issues/981): +Fix a bug where a word boundary could interact with prefix literal +optimizations and lead to a false positive match. + + +1.8.0 (2023-04-20) +================== +This is a sizeable release that will be soon followed by another sizeable +release. Both of them will combined close over 40 existing issues and PRs. + +This first release, despite its size, essentially represents preparatory work +for the second release, which will be even bigger. Namely, this release: + +* Increases the MSRV to Rust 1.60.0, which was released about 1 year ago. +* Upgrades its dependency on `aho-corasick` to the recently released 1.0 +version. +* Upgrades its dependency on `regex-syntax` to the simultaneously released +`0.7` version. The changes to `regex-syntax` principally revolve around a +rewrite of its literal extraction code and a number of simplifications and +optimizations to its high-level intermediate representation (HIR). + +The second release, which will follow ~shortly after the release above, will +contain a soup-to-nuts rewrite of every regex engine. This will be done by +bringing [`regex-automata`](https://github.com/BurntSushi/regex-automata) into +this repository, and then changing the `regex` crate to be nothing but an API +shim layer on top of `regex-automata`'s API. + +These tandem releases are the culmination of about 3 +years of on-and-off work that [began in earnest in March +2020](https://github.com/rust-lang/regex/issues/656). + +Because of the scale of changes involved in these releases, I would love to +hear about your experience. Especially if you notice undocumented changes in +behavior or performance changes (positive *or* negative). + +Most changes in the first release are listed below. For more details, please +see the commit log, which reflects a linear and decently documented history +of all changes. + +New features: + +* [FEATURE #501](https://github.com/rust-lang/regex/issues/501): +Permit many more characters to be escaped, even if they have no significance. +More specifically, any ASCII character except for `[0-9A-Za-z<>]` can now be +escaped. Also, a new routine, `is_escapeable_character`, has been added to +`regex-syntax` to query whether a character is escapable or not. +* [FEATURE #547](https://github.com/rust-lang/regex/issues/547): +Add `Regex::captures_at`. This fills a hole in the API, but doesn't otherwise +introduce any new expressive power. +* [FEATURE #595](https://github.com/rust-lang/regex/issues/595): +Capture group names are now Unicode-aware. They can now begin with either a `_` +or any "alphabetic" codepoint. After the first codepoint, subsequent codepoints +can be any sequence of alphanumeric codepoints, along with `_`, `.`, `[` and +`]`. Note that replacement syntax has not changed. +* [FEATURE #810](https://github.com/rust-lang/regex/issues/810): +Add `Match::is_empty` and `Match::len` APIs. +* [FEATURE #905](https://github.com/rust-lang/regex/issues/905): +Add an `impl Default for RegexSet`, with the default being the empty set. +* [FEATURE #908](https://github.com/rust-lang/regex/issues/908): +A new method, `Regex::static_captures_len`, has been added which returns the +number of capture groups in the pattern if and only if every possible match +always contains the same number of matching groups. +* [FEATURE #955](https://github.com/rust-lang/regex/issues/955): +Named captures can now be written as `(?re)` in addition to +`(?Pre)`. +* FEATURE: `regex-syntax` now supports empty character classes. +* FEATURE: `regex-syntax` now has an optional `std` feature. (This will come +to `regex` in the second release.) +* FEATURE: The `Hir` type in `regex-syntax` has had a number of simplifications +made to it. +* FEATURE: `regex-syntax` has support for a new `R` flag for enabling CRLF +mode. This will be supported in `regex` proper in the second release. +* FEATURE: `regex-syntax` now has proper support for "regex that never +matches" via `Hir::fail()`. +* FEATURE: The `hir::literal` module of `regex-syntax` has been completely +re-worked. It now has more documentation, examples and advice. +* FEATURE: The `allow_invalid_utf8` option in `regex-syntax` has been renamed +to `utf8`, and the meaning of the boolean has been flipped. + +Performance improvements: + +* PERF: The upgrade to `aho-corasick 1.0` may improve performance in some +cases. It's difficult to characterize exactly which patterns this might impact, +but if there are a small number of longish (>= 4 bytes) prefix literals, then +it might be faster than before. + +Bug fixes: + +* [BUG #514](https://github.com/rust-lang/regex/issues/514): +Improve `Debug` impl for `Match` so that it doesn't show the entire haystack. +* BUGS [#516](https://github.com/rust-lang/regex/issues/516), +[#731](https://github.com/rust-lang/regex/issues/731): +Fix a number of issues with printing `Hir` values as regex patterns. +* [BUG #610](https://github.com/rust-lang/regex/issues/610): +Add explicit example of `foo|bar` in the regex syntax docs. +* [BUG #625](https://github.com/rust-lang/regex/issues/625): +Clarify that `SetMatches::len` does not (regrettably) refer to the number of +matches in the set. +* [BUG #660](https://github.com/rust-lang/regex/issues/660): +Clarify "verbose mode" in regex syntax documentation. +* BUG [#738](https://github.com/rust-lang/regex/issues/738), +[#950](https://github.com/rust-lang/regex/issues/950): +Fix `CaptureLocations::get` so that it never panics. +* [BUG #747](https://github.com/rust-lang/regex/issues/747): +Clarify documentation for `Regex::shortest_match`. +* [BUG #835](https://github.com/rust-lang/regex/issues/835): +Fix `\p{Sc}` so that it is equivalent to `\p{Currency_Symbol}`. +* [BUG #846](https://github.com/rust-lang/regex/issues/846): +Add more clarifying documentation to the `CompiledTooBig` error variant. +* [BUG #854](https://github.com/rust-lang/regex/issues/854): +Clarify that `regex::Regex` searches as if the haystack is a sequence of +Unicode scalar values. +* [BUG #884](https://github.com/rust-lang/regex/issues/884): +Replace `__Nonexhaustive` variants with `#[non_exhaustive]` attribute. +* [BUG #893](https://github.com/rust-lang/regex/pull/893): +Optimize case folding since it can get quite slow in some pathological cases. +* [BUG #895](https://github.com/rust-lang/regex/issues/895): +Reject `(?-u:\W)` in `regex::Regex` APIs. +* [BUG #942](https://github.com/rust-lang/regex/issues/942): +Add a missing `void` keyword to indicate "no parameters" in C API. +* [BUG #965](https://github.com/rust-lang/regex/issues/965): +Fix `\p{Lc}` so that it is equivalent to `\p{Cased_Letter}`. +* [BUG #975](https://github.com/rust-lang/regex/issues/975): +Clarify documentation for `\pX` syntax. + + +1.7.3 (2023-03-24) +================== +This is a small release that fixes a bug in `Regex::shortest_match_at` that +could cause it to panic, even when the offset given is valid. + +Bug fixes: + +* [BUG #969](https://github.com/rust-lang/regex/issues/969): + Fix a bug in how the reverse DFA was called for `Regex::shortest_match_at`. + + +1.7.2 (2023-03-21) +================== +This is a small release that fixes a failing test on FreeBSD. + +Bug fixes: + +* [BUG #967](https://github.com/rust-lang/regex/issues/967): + Fix "no stack overflow" test which can fail due to the small stack size. + + +1.7.1 (2023-01-09) +================== +This release was done principally to try and fix the doc.rs rendering for the +regex crate. + +Performance improvements: + +* [PERF #930](https://github.com/rust-lang/regex/pull/930): + Optimize `replacen`. This also applies to `replace`, but not `replace_all`. + +Bug fixes: + +* [BUG #945](https://github.com/rust-lang/regex/issues/945): + Maybe fix rustdoc rendering by just bumping a new release? + + +1.7.0 (2022-11-05) +================== +This release principally includes an upgrade to Unicode 15. + +New features: + +* [FEATURE #832](https://github.com/rust-lang/regex/issues/916): + Upgrade to Unicode 15. + + +1.6.0 (2022-07-05) +================== +This release principally includes an upgrade to Unicode 14. + +New features: + +* [FEATURE #832](https://github.com/rust-lang/regex/pull/832): + Clarify that `Captures::len` includes all groups, not just matching groups. +* [FEATURE #857](https://github.com/rust-lang/regex/pull/857): + Add an `ExactSizeIterator` impl for `SubCaptureMatches`. +* [FEATURE #861](https://github.com/rust-lang/regex/pull/861): + Improve `RegexSet` documentation examples. +* [FEATURE #877](https://github.com/rust-lang/regex/issues/877): + Upgrade to Unicode 14. + +Bug fixes: + +* [BUG #792](https://github.com/rust-lang/regex/issues/792): + Fix error message rendering bug. + + +1.5.6 (2022-05-20) +================== +This release includes a few bug fixes, including a bug that produced incorrect +matches when a non-greedy `?` operator was used. + +* [BUG #680](https://github.com/rust-lang/regex/issues/680): + Fixes a bug where `[[:alnum:][:^ascii:]]` dropped `[:alnum:]` from the class. +* [BUG #859](https://github.com/rust-lang/regex/issues/859): + Fixes a bug where `Hir::is_match_empty` returned `false` for `\b`. +* [BUG #862](https://github.com/rust-lang/regex/issues/862): + Fixes a bug where 'ab??' matches 'ab' instead of 'a' in 'ab'. + + +1.5.5 (2022-03-08) +================== +This releases fixes a security bug in the regex compiler. This bug permits a +vector for a denial-of-service attack in cases where the regex being compiled +is untrusted. There are no known problems where the regex is itself trusted, +including in cases of untrusted haystacks. + +* [SECURITY #GHSA-m5pq-gvj9-9vr8](https://github.com/rust-lang/regex/security/advisories/GHSA-m5pq-gvj9-9vr8): + Fixes a bug in the regex compiler where empty sub-expressions subverted the + existing mitigations in place to enforce a size limit on compiled regexes. + The Rust Security Response WG published an advisory about this: + https://groups.google.com/g/rustlang-security-announcements/c/NcNNL1Jq7Yw + + +1.5.4 (2021-05-06) +================== +This release fixes another compilation failure when building regex. This time, +the fix is for when the `pattern` feature is enabled, which only works on +nightly Rust. CI has been updated to test this case. + +* [BUG #772](https://github.com/rust-lang/regex/pull/772): + Fix build when `pattern` feature is enabled. + + +1.5.3 (2021-05-01) +================== +This releases fixes a bug when building regex with only the `unicode-perl` +feature. It turns out that while CI was building this configuration, it wasn't +actually failing the overall build on a failed compilation. + +* [BUG #769](https://github.com/rust-lang/regex/issues/769): + Fix build in `regex-syntax` when only the `unicode-perl` feature is enabled. + + +1.5.2 (2021-05-01) +================== +This release fixes a performance bug when Unicode word boundaries are used. +Namely, for certain regexes on certain inputs, it's possible for the lazy DFA +to stop searching (causing a fallback to a slower engine) when it doesn't +actually need to. + +[PR #768](https://github.com/rust-lang/regex/pull/768) fixes the bug, which was +originally reported in +[ripgrep#1860](https://github.com/BurntSushi/ripgrep/issues/1860). + + +1.5.1 (2021-04-30) +================== +This is a patch release that fixes a compilation error when the `perf-literal` +feature is not enabled. + + +1.5.0 (2021-04-30) +================== +This release primarily updates to Rust 2018 (finally) and bumps the MSRV to +Rust 1.41 (from Rust 1.28). Rust 1.41 was chosen because it's still reasonably +old, and is what's in Debian stable at the time of writing. + +This release also drops this crate's own bespoke substring search algorithms +in favor of a new +[`memmem` implementation provided by the `memchr` crate](https://docs.rs/memchr/2.4.0/memchr/memmem/index.html). +This will change the performance profile of some regexes, sometimes getting a +little worse, and hopefully more frequently, getting a lot better. Please +report any serious performance regressions if you find them. + + +1.4.6 (2021-04-22) +================== +This is a small patch release that fixes the compiler's size check on how much +heap memory a regex uses. Previously, the compiler did not account for the +heap usage of Unicode character classes. Now it does. It's possible that this +may make some regexes fail to compile that previously did compile. If that +happens, please file an issue. + +* [BUG OSS-fuzz#33579](https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=33579): + Some regexes can use more heap memory than one would expect. + + +1.4.5 (2021-03-14) +================== +This is a small patch release that fixes a regression in the size of a `Regex` +in the 1.4.4 release. Prior to 1.4.4, a `Regex` was 552 bytes. In the 1.4.4 +release, it was 856 bytes due to internal changes. In this release, a `Regex` +is now 16 bytes. In general, the size of a `Regex` was never something that was +on my radar, but this increased size in the 1.4.4 release seems to have crossed +a threshold and resulted in stack overflows in some programs. + +* [BUG #750](https://github.com/rust-lang/regex/pull/750): + Fixes stack overflows seemingly caused by a large `Regex` size by decreasing + its size. + + +1.4.4 (2021-03-11) +================== +This is a small patch release that contains some bug fixes. Notably, it also +drops the `thread_local` (and `lazy_static`, via transitivity) dependencies. + +Bug fixes: + +* [BUG #362](https://github.com/rust-lang/regex/pull/362): + Memory leaks caused by an internal caching strategy should now be fixed. +* [BUG #576](https://github.com/rust-lang/regex/pull/576): + All regex types now implement `UnwindSafe` and `RefUnwindSafe`. +* [BUG #728](https://github.com/rust-lang/regex/pull/749): + Add missing `Replacer` impls for `Vec`, `String`, `Cow`, etc. + + +1.4.3 (2021-01-08) +================== +This is a small patch release that adds some missing standard trait +implementations for some types in the public API. + +Bug fixes: + +* [BUG #734](https://github.com/rust-lang/regex/pull/734): + Add `FusedIterator` and `ExactSizeIterator` impls to iterator types. +* [BUG #735](https://github.com/rust-lang/regex/pull/735): + Add missing `Debug` impls to public API types. + + +1.4.2 (2020-11-01) +================== +This is a small bug fix release that bans `\P{any}`. We previously banned empty +classes like `[^\w\W]`, but missed the `\P{any}` case. In the future, we hope +to permit empty classes. + +* [BUG #722](https://github.com/rust-lang/regex/issues/722): + Ban `\P{any}` to avoid a panic in the regex compiler. Found by OSS-Fuzz. + + +1.4.1 (2020-10-13) +================== +This is a small bug fix release that makes `\p{cf}` work. Previously, it would +report "property not found" even though `cf` is a valid abbreviation for the +`Format` general category. + +* [BUG #719](https://github.com/rust-lang/regex/issues/719): + Fixes bug that prevented `\p{cf}` from working. + + +1.4.0 (2020-10-11) +================== +This releases has a few minor documentation fixes as well as some very minor +API additions. The MSRV remains at Rust 1.28 for now, but this is intended to +increase to at least Rust 1.41.1 soon. + +This release also adds support for OSS-Fuzz. Kudos to +[@DavidKorczynski](https://github.com/DavidKorczynski) +for doing the heavy lifting for that! + +New features: + +* [FEATURE #649](https://github.com/rust-lang/regex/issues/649): + Support `[`, `]` and `.` in capture group names. +* [FEATURE #687](https://github.com/rust-lang/regex/issues/687): + Add `is_empty` predicate to `RegexSet`. +* [FEATURE #689](https://github.com/rust-lang/regex/issues/689): + Implement `Clone` for `SubCaptureMatches`. +* [FEATURE #715](https://github.com/rust-lang/regex/issues/715): + Add `empty` constructor to `RegexSet` for convenience. + +Bug fixes: + +* [BUG #694](https://github.com/rust-lang/regex/issues/694): + Fix doc example for `Replacer::replace_append`. +* [BUG #698](https://github.com/rust-lang/regex/issues/698): + Clarify docs for `s` flag when using a `bytes::Regex`. +* [BUG #711](https://github.com/rust-lang/regex/issues/711): + Clarify `is_match` docs to indicate that it can match anywhere in string. + + +1.3.9 (2020-05-28) +================== +This release fixes a MSRV (Minimum Support Rust Version) regression in the +1.3.8 release. Namely, while 1.3.8 compiles on Rust 1.28, it actually does not +compile on other Rust versions, such as Rust 1.39. + +Bug fixes: + +* [BUG #685](https://github.com/rust-lang/regex/issues/685): + Remove use of `doc_comment` crate, which cannot be used before Rust 1.43. + + +1.3.8 (2020-05-28) +================== +This release contains a couple of important bug fixes driven +by better support for empty-subexpressions in regexes. For +example, regexes like `b|` are now allowed. Major thanks to +[@sliquister](https://github.com/sliquister) for implementing support for this +in [#677](https://github.com/rust-lang/regex/pull/677). + +Bug fixes: + +* [BUG #523](https://github.com/rust-lang/regex/pull/523): + Add note to documentation that spaces can be escaped in `x` mode. +* [BUG #524](https://github.com/rust-lang/regex/issues/524): + Add support for empty sub-expressions, including empty alternations. +* [BUG #659](https://github.com/rust-lang/regex/issues/659): + Fix match bug caused by an empty sub-expression miscompilation. + + +1.3.7 (2020-04-17) +================== +This release contains a small bug fix that fixes how `regex` forwards crate +features to `regex-syntax`. In particular, this will reduce recompilations in +some cases. + +Bug fixes: + +* [BUG #665](https://github.com/rust-lang/regex/pull/665): + Fix feature forwarding to `regex-syntax`. + + +1.3.6 (2020-03-24) +================== +This release contains a sizable (~30%) performance improvement when compiling +some kinds of large regular expressions. + +Performance improvements: + +* [PERF #657](https://github.com/rust-lang/regex/pull/657): + Improvement performance of compiling large regular expressions. + + +1.3.5 (2020-03-12) +================== +This release updates this crate to Unicode 13. + +New features: + +* [FEATURE #653](https://github.com/rust-lang/regex/pull/653): + Update `regex-syntax` to Unicode 13. + + +1.3.4 (2020-01-30) +================== +This is a small bug fix release that fixes a bug related to the scoping of +flags in a regex. Namely, before this fix, a regex like `((?i)a)b)` would +match `aB` despite the fact that `b` should not be matched case insensitively. + +Bug fixes: + +* [BUG #640](https://github.com/rust-lang/regex/issues/640): + Fix bug related to the scoping of flags in a regex. + + +1.3.3 (2020-01-09) +================== +This is a small maintenance release that upgrades the dependency on +`thread_local` from `0.3` to `1.0`. The minimum supported Rust version remains +at Rust 1.28. + + +1.3.2 (2020-01-09) +================== +This is a small maintenance release with some house cleaning and bug fixes. + +New features: + +* [FEATURE #631](https://github.com/rust-lang/regex/issues/631): + Add a `Match::range` method an a `From for Range` impl. + +Bug fixes: + +* [BUG #521](https://github.com/rust-lang/regex/issues/521): + Corrects `/-/.splitn("a", 2)` to return `["a"]` instead of `["a", ""]`. +* [BUG #594](https://github.com/rust-lang/regex/pull/594): + Improve error reporting when writing `\p\`. +* [BUG #627](https://github.com/rust-lang/regex/issues/627): + Corrects `/-/.split("a-")` to return `["a", ""]` instead of `["a"]`. +* [BUG #633](https://github.com/rust-lang/regex/pull/633): + Squash deprecation warnings for the `std::error::Error::description` method. + + +1.3.1 (2019-09-04) +================== +This is a maintenance release with no changes in order to try to work around +a [docs.rs/Cargo issue](https://github.com/rust-lang/docs.rs/issues/400). + + +1.3.0 (2019-09-03) +================== +This release adds a plethora of new crate features that permit users of regex +to shrink its size considerably, in exchange for giving up either functionality +(such as Unicode support) or runtime performance. When all such features are +disabled, the dependency tree for `regex` shrinks to exactly 1 crate +(`regex-syntax`). More information about the new crate features can be +[found in the docs](https://docs.rs/regex/*/#crate-features). + +Note that while this is a new minor version release, the minimum supported +Rust version for this crate remains at `1.28.0`. + +New features: + +* [FEATURE #474](https://github.com/rust-lang/regex/issues/474): + The `use_std` feature has been deprecated in favor of the `std` feature. + The `use_std` feature will be removed in regex 2. Until then, `use_std` will + remain as an alias for the `std` feature. +* [FEATURE #583](https://github.com/rust-lang/regex/issues/583): + Add a substantial number of crate features shrinking `regex`. + + +1.2.1 (2019-08-03) +================== +This release does a bit of house cleaning. Namely: + +* This repository is now using rustfmt. +* License headers have been removed from all files, in following suit with the + Rust project. +* Teddy has been removed from the `regex` crate, and is now part of the + `aho-corasick` crate. + [See `aho-corasick`'s new `packed` submodule for details](https://docs.rs/aho-corasick/0.7.6/aho_corasick/packed/index.html). +* The `utf8-ranges` crate has been deprecated, with its functionality moving + into the + [`utf8` sub-module of `regex-syntax`](https://docs.rs/regex-syntax/0.6.11/regex_syntax/utf8/index.html). +* The `ucd-util` dependency has been dropped, in favor of implementing what + little we need inside of `regex-syntax` itself. + +In general, this is part of an ongoing (long term) effort to make optimizations +in the regex engine easier to reason about. The current code is too convoluted, +and thus it is very easy to introduce new bugs. This simplification effort is +the primary motivation behind re-working the `aho-corasick` crate to not only +bundle algorithms like Teddy, but to also provide regex-like match semantics +automatically. + +Moving forward, the plan is to join up with the `bstr` and `regex-automata` +crates, with the former providing more sophisticated substring search +algorithms (thereby deleting existing code in `regex`) and the latter providing +ahead-of-time compiled DFAs for cases where they are inexpensive to compute. + + +1.2.0 (2019-07-20) +================== +This release updates regex's minimum supported Rust version to 1.28, which was +release almost 1 year ago. This release also updates regex's Unicode data +tables to 12.1.0. + + +1.1.9 (2019-07-06) +================== +This release contains a bug fix that caused regex's tests to fail, due to a +dependency on an unreleased behavior in regex-syntax. + +* [BUG #593](https://github.com/rust-lang/regex/issues/593): + Move an integration-style test on error messages into regex-syntax. + + +1.1.8 (2019-07-04) +================== +This release contains a few small internal refactorings. One of which fixes +an instance of undefined behavior in a part of the SIMD code. + +Bug fixes: + +* [BUG #545](https://github.com/rust-lang/regex/issues/545): + Improves error messages when a repetition operator is used without a number. +* [BUG #588](https://github.com/rust-lang/regex/issues/588): + Removes use of a repr(Rust) union used for type punning in the Teddy matcher. +* [BUG #591](https://github.com/rust-lang/regex/issues/591): + Update docs for running benchmarks and improve failure modes. + + +1.1.7 (2019-06-09) +================== +This release fixes up a few warnings as a result of recent deprecations. + + +1.1.6 (2019-04-16) +================== +This release fixes a regression introduced by a bug fix (for +[BUG #557](https://github.com/rust-lang/regex/issues/557)) which could cause +the regex engine to enter an infinite loop. This bug was originally +[reported against ripgrep](https://github.com/BurntSushi/ripgrep/issues/1247). + + +1.1.5 (2019-04-01) +================== +This release fixes a bug in regex's dependency specification where it requires +a newer version of regex-syntax, but this wasn't communicated correctly in the +Cargo.toml. This would have been caught by a minimal version check, but this +check was disabled because the `rand` crate itself advertises incorrect +dependency specifications. + +Bug fixes: + +* [BUG #570](https://github.com/rust-lang/regex/pull/570): + Fix regex-syntax minimal version. + + +1.1.4 (2019-03-31) +================== +This release fixes a backwards compatibility regression where Regex was no +longer UnwindSafe. This was caused by the upgrade to aho-corasick 0.7, whose +AhoCorasick type was itself not UnwindSafe. This has been fixed in aho-corasick +0.7.4, which we now require. + +Bug fixes: + +* [BUG #568](https://github.com/rust-lang/regex/pull/568): + Fix an API regression where Regex was no longer UnwindSafe. + + +1.1.3 (2019-03-30) +================== +This releases fixes a few bugs and adds a performance improvement when a regex +is a simple alternation of literals. + +Performance improvements: + +* [OPT #566](https://github.com/rust-lang/regex/pull/566): + Upgrades `aho-corasick` to 0.7 and uses it for `foo|bar|...|quux` regexes. + +Bug fixes: + +* [BUG #527](https://github.com/rust-lang/regex/issues/527): + Fix a bug where the parser would panic on patterns like `((?x))`. +* [BUG #555](https://github.com/rust-lang/regex/issues/555): + Fix a bug where the parser would panic on patterns like `(?m){1,1}`. +* [BUG #557](https://github.com/rust-lang/regex/issues/557): + Fix a bug where captures could lead to an incorrect match. + + +1.1.2 (2019-02-27) +================== +This release fixes a bug found in the fix introduced in 1.1.1. + +Bug fixes: + +* [BUG edf45e6f](https://github.com/rust-lang/regex/commit/edf45e6f): + Fix bug introduced in reverse suffix literal matcher in the 1.1.1 release. + + +1.1.1 (2019-02-27) +================== +This is a small release with one fix for a bug caused by literal optimizations. + +Bug fixes: + +* [BUG 661bf53d](https://github.com/rust-lang/regex/commit/661bf53d): + Fixes a bug in the reverse suffix literal optimization. This was originally + reported + [against ripgrep](https://github.com/BurntSushi/ripgrep/issues/1203). + + +1.1.0 (2018-11-30) +================== +This is a small release with a couple small enhancements. This release also +increases the minimal supported Rust version (MSRV) to 1.24.1 (from 1.20.0). In +accordance with this crate's MSRV policy, this release bumps the minor version +number. + +Performance improvements: + +* [OPT #511](https://github.com/rust-lang/regex/pull/511), + [OPT #540](https://github.com/rust-lang/regex/pull/540): + Improve lazy DFA construction for large regex sets. + +New features: + +* [FEATURE #538](https://github.com/rust-lang/regex/pull/538): + Add Emoji and "break" Unicode properties. See [UNICODE.md](UNICODE.md). + +Bug fixes: + +* [BUG #530](https://github.com/rust-lang/regex/pull/530): + Add Unicode license (for data tables). +* Various typo/doc fixups. + + +1.0.6 (2018-11-06) +================== +This is a small release. + +Performance improvements: + +* [OPT #513](https://github.com/rust-lang/regex/pull/513): + Improve performance of compiling large Unicode classes by 8-10%. + +Bug fixes: + +* [BUG #533](https://github.com/rust-lang/regex/issues/533): + Fix definition of `[[:blank:]]` class that regressed in `regex-syntax 0.5`. + + +1.0.5 (2018-09-06) +================== +This is a small release with an API enhancement. + +New features: + +* [FEATURE #509](https://github.com/rust-lang/regex/pull/509): + Generalize impls of the `Replacer` trait. + + +1.0.4 (2018-08-25) +================== +This is a small release that bumps the quickcheck dependency. + + +1.0.3 (2018-08-24) +================== +This is a small bug fix release. + +Bug fixes: + +* [BUG #504](https://github.com/rust-lang/regex/pull/504): + Fix for Cargo's "minimal version" support. +* [BUG 1e39165f](https://github.com/rust-lang/regex/commit/1e39165f): + Fix doc examples for byte regexes. + + +1.0.2 (2018-07-18) +================== +This release exposes some new lower level APIs on `Regex` that permit +amortizing allocation and controlling the location at which a search is +performed in a more granular way. Most users of the regex crate will not +need or want to use these APIs. + +New features: + +* [FEATURE #493](https://github.com/rust-lang/regex/pull/493): + Add a few lower level APIs for amortizing allocation and more fine-grained + searching. + +Bug fixes: + +* [BUG 3981d2ad](https://github.com/rust-lang/regex/commit/3981d2ad): + Correct outdated documentation on `RegexBuilder::dot_matches_new_line`. +* [BUG 7ebe4ae0](https://github.com/rust-lang/regex/commit/7ebe4ae0): + Correct outdated documentation on `Parser::allow_invalid_utf8` in the + `regex-syntax` crate. +* [BUG 24c7770b](https://github.com/rust-lang/regex/commit/24c7770b): + Fix a bug in the HIR printer where it wouldn't correctly escape meta + characters in character classes. + + +1.0.1 (2018-06-19) +================== +This release upgrades regex's Unicode tables to Unicode 11, and enables SIMD +optimizations automatically on Rust stable (1.27 or newer). + +New features: + +* [FEATURE #486](https://github.com/rust-lang/regex/pull/486): + Implement `size_hint` on `RegexSet` match iterators. +* [FEATURE #488](https://github.com/rust-lang/regex/pull/488): + Update Unicode tables for Unicode 11. +* [FEATURE #490](https://github.com/rust-lang/regex/pull/490): + SIMD optimizations are now enabled automatically in Rust stable, for versions + 1.27 and up. No compilation flags or features need to be set. CPU support + SIMD is detected automatically at runtime. + +Bug fixes: + +* [BUG #482](https://github.com/rust-lang/regex/pull/482): + Present a better compilation error when the `use_std` feature isn't used. + + +1.0.0 (2018-05-01) +================== +This release marks the 1.0 release of regex. + +While this release includes some breaking changes, most users of older versions +of the regex library should be able to migrate to 1.0 by simply bumping the +version number. The important changes are as follows: + +* We adopt Rust 1.20 as the new minimum supported version of Rust for regex. + We also tentatively adopt a policy that permits bumping the minimum supported + version of Rust in minor version releases of regex, but no patch releases. + That is, with respect to semver, we do not strictly consider bumping the + minimum version of Rust to be a breaking change, but adopt a conservative + stance as a compromise. +* Octal syntax in regular expressions has been disabled by default. This + permits better error messages that inform users that backreferences aren't + available. Octal syntax can be re-enabled via the corresponding option on + `RegexBuilder`. +* `(?-u:\B)` is no longer allowed in Unicode regexes since it can match at + invalid UTF-8 code unit boundaries. `(?-u:\b)` is still allowed in Unicode + regexes. +* The `From` impl has been removed. This formally removes + the public dependency on `regex-syntax`. +* A new feature, `use_std`, has been added and enabled by default. Disabling + the feature will result in a compilation error. In the future, this may + permit us to support `no_std` environments (w/ `alloc`) in a backwards + compatible way. + +For more information and discussion, please see +[1.0 release tracking issue](https://github.com/rust-lang/regex/issues/457). + + +0.2.11 (2018-05-01) +=================== +This release primarily contains bug fixes. Some of them resolve bugs where +the parser could panic. + +New features: + +* [FEATURE #459](https://github.com/rust-lang/regex/pull/459): + Include C++'s standard regex library and Boost's regex library in the + benchmark harness. We now include D/libphobos, C++/std, C++/boost, Oniguruma, + PCRE1, PCRE2, RE2 and Tcl in the harness. + +Bug fixes: + +* [BUG #445](https://github.com/rust-lang/regex/issues/445): + Clarify order of indices returned by RegexSet match iterator. +* [BUG #461](https://github.com/rust-lang/regex/issues/461): + Improve error messages for invalid regexes like `[\d-a]`. +* [BUG #464](https://github.com/rust-lang/regex/issues/464): + Fix a bug in the error message pretty printer that could cause a panic when + a regex contained a literal `\n` character. +* [BUG #465](https://github.com/rust-lang/regex/issues/465): + Fix a panic in the parser that was caused by applying a repetition operator + to `(?flags)`. +* [BUG #466](https://github.com/rust-lang/regex/issues/466): + Fix a bug where `\pC` was not recognized as an alias for `\p{Other}`. +* [BUG #470](https://github.com/rust-lang/regex/pull/470): + Fix a bug where literal searches did more work than necessary for anchored + regexes. + + +0.2.10 (2018-03-16) +=================== +This release primarily updates the regex crate to changes made in `std::arch` +on nightly Rust. + +New features: + +* [FEATURE #458](https://github.com/rust-lang/regex/pull/458): + The `Hir` type in `regex-syntax` now has a printer. + + +0.2.9 (2018-03-12) +================== +This release introduces a new nightly only feature, `unstable`, which enables +SIMD optimizations for certain types of regexes. No additional compile time +options are necessary, and the regex crate will automatically choose the +best CPU features at run time. As a result, the `simd` (nightly only) crate +dependency has been dropped. + +New features: + +* [FEATURE #456](https://github.com/rust-lang/regex/pull/456): + The regex crate now includes AVX2 optimizations in addition to the extant + SSSE3 optimization. + +Bug fixes: + +* [BUG #455](https://github.com/rust-lang/regex/pull/455): + Fix a bug where `(?x)[ / - ]` failed to parse. + + +0.2.8 (2018-03-12) +================== +Bug fixes: + +* [BUG #454](https://github.com/rust-lang/regex/pull/454): + Fix a bug in the nest limit checker being too aggressive. + + +0.2.7 (2018-03-07) +================== +This release includes a ground-up rewrite of the regex-syntax crate, which has +been in development for over a year. +731 +New features: + +* Error messages for invalid regexes have been greatly improved. You get these + automatically; you don't need to do anything. In addition to better + formatting, error messages will now explicitly call out the use of look + around. When regex 1.0 is released, this will happen for backreferences as + well. +* Full support for intersection, difference and symmetric difference of + character classes. These can be used via the `&&`, `--` and `~~` binary + operators within classes. +* A Unicode Level 1 conformant implementation of `\p{..}` character classes. + Things like `\p{scx:Hira}`, `\p{age:3.2}` or `\p{Changes_When_Casefolded}` + now work. All property name and value aliases are supported, and properties + are selected via loose matching. e.g., `\p{Greek}` is the same as + `\p{G r E e K}`. +* A new `UNICODE.md` document has been added to this repository that + exhaustively documents support for UTS#18. +* Empty sub-expressions are now permitted in most places. That is, `()+` is + now a valid regex. +* Almost everything in regex-syntax now uses constant stack space, even when + performing analysis that requires structural induction. This reduces the risk + of a user provided regular expression causing a stack overflow. +* [FEATURE #174](https://github.com/rust-lang/regex/issues/174): + The `Ast` type in `regex-syntax` now contains span information. +* [FEATURE #424](https://github.com/rust-lang/regex/issues/424): + Support `\u`, `\u{...}`, `\U` and `\U{...}` syntax for specifying code points + in a regular expression. +* [FEATURE #449](https://github.com/rust-lang/regex/pull/449): + Add a `Replace::by_ref` adapter for use of a replacer without consuming it. + +Bug fixes: + +* [BUG #446](https://github.com/rust-lang/regex/issues/446): + We re-enable the Boyer-Moore literal matcher. + + +0.2.6 (2018-02-08) +================== +Bug fixes: + +* [BUG #446](https://github.com/rust-lang/regex/issues/446): + Fixes a bug in the new Boyer-Moore searcher that results in a match failure. + We fix this bug by temporarily disabling Boyer-Moore. + + +0.2.5 (2017-12-30) +================== +Bug fixes: + +* [BUG #437](https://github.com/rust-lang/regex/issues/437): + Fixes a bug in the new Boyer-Moore searcher that results in a panic. + + +0.2.4 (2017-12-30) +================== +New features: + +* [FEATURE #348](https://github.com/rust-lang/regex/pull/348): + Improve performance for capture searches on anchored regex. + (Contributed by @ethanpailes. Nice work!) +* [FEATURE #419](https://github.com/rust-lang/regex/pull/419): + Expand literal searching to include Tuned Boyer-Moore in some cases. + (Contributed by @ethanpailes. Nice work!) + +Bug fixes: + +* [BUG](https://github.com/rust-lang/regex/pull/436): + The regex compiler plugin has been removed. +* [BUG](https://github.com/rust-lang/regex/pull/436): + `simd` has been bumped to `0.2.1`, which fixes a Rust nightly build error. +* [BUG](https://github.com/rust-lang/regex/pull/436): + Bring the benchmark harness up to date. + + +0.2.3 (2017-11-30) +================== +New features: + +* [FEATURE #374](https://github.com/rust-lang/regex/pull/374): + Add `impl From for &str`. +* [FEATURE #380](https://github.com/rust-lang/regex/pull/380): + Derive `Clone` and `PartialEq` on `Error`. +* [FEATURE #400](https://github.com/rust-lang/regex/pull/400): + Update to Unicode 10. + +Bug fixes: + +* [BUG #375](https://github.com/rust-lang/regex/issues/375): + Fix a bug that prevented the bounded backtracker from terminating. +* [BUG #393](https://github.com/rust-lang/regex/issues/393), + [BUG #394](https://github.com/rust-lang/regex/issues/394): + Fix bug with `replace` methods for empty matches. + + +0.2.2 (2017-05-21) +================== +New features: + +* [FEATURE #341](https://github.com/rust-lang/regex/issues/341): + Support nested character classes and intersection operation. + For example, `[\p{Greek}&&\pL]` matches greek letters and + `[[0-9]&&[^4]]` matches every decimal digit except `4`. + (Much thanks to @robinst, who contributed this awesome feature.) + +Bug fixes: + +* [BUG #321](https://github.com/rust-lang/regex/issues/321): + Fix bug in literal extraction and UTF-8 decoding. +* [BUG #326](https://github.com/rust-lang/regex/issues/326): + Add documentation tip about the `(?x)` flag. +* [BUG #333](https://github.com/rust-lang/regex/issues/333): + Show additional replacement example using curly braces. +* [BUG #334](https://github.com/rust-lang/regex/issues/334): + Fix bug when resolving captures after a match. +* [BUG #338](https://github.com/rust-lang/regex/issues/338): + Add example that uses `Captures::get` to API documentation. +* [BUG #353](https://github.com/rust-lang/regex/issues/353): + Fix RegexSet bug that caused match failure in some cases. +* [BUG #354](https://github.com/rust-lang/regex/pull/354): + Fix panic in parser when `(?x)` is used. +* [BUG #358](https://github.com/rust-lang/regex/issues/358): + Fix literal optimization bug with RegexSet. +* [BUG #359](https://github.com/rust-lang/regex/issues/359): + Fix example code in README. +* [BUG #365](https://github.com/rust-lang/regex/pull/365): + Fix bug in `rure_captures_len` in the C binding. +* [BUG #367](https://github.com/rust-lang/regex/issues/367): + Fix byte class bug that caused a panic. + + +0.2.1 +===== +One major bug with `replace_all` has been fixed along with a couple of other +touch-ups. + +* [BUG #312](https://github.com/rust-lang/regex/issues/312): + Fix documentation for `NoExpand` to reference correct lifetime parameter. +* [BUG #314](https://github.com/rust-lang/regex/issues/314): + Fix a bug with `replace_all` when replacing a match with the empty string. +* [BUG #316](https://github.com/rust-lang/regex/issues/316): + Note a missing breaking change from the `0.2.0` CHANGELOG entry. + (`RegexBuilder::compile` was renamed to `RegexBuilder::build`.) +* [BUG #324](https://github.com/rust-lang/regex/issues/324): + Compiling `regex` should only require one version of `memchr` crate. + + +0.2.0 +===== +This is a new major release of the regex crate, and is an implementation of the +[regex 1.0 RFC](https://github.com/rust-lang/rfcs/blob/master/text/1620-regex-1.0.md). +We are releasing a `0.2` first, and if there are no major problems, we will +release a `1.0` shortly. For `0.2`, the minimum *supported* Rust version is +1.12. + +There are a number of **breaking changes** in `0.2`. They are split into two +types. The first type correspond to breaking changes in regular expression +syntax. The second type correspond to breaking changes in the API. + +Breaking changes for regex syntax: + +* POSIX character classes now require double bracketing. Previously, the regex + `[:upper:]` would parse as the `upper` POSIX character class. Now it parses + as the character class containing the characters `:upper:`. The fix to this + change is to use `[[:upper:]]` instead. Note that variants like + `[[:upper:][:blank:]]` continue to work. +* The character `[` must always be escaped inside a character class. +* The characters `&`, `-` and `~` must be escaped if any one of them are + repeated consecutively. For example, `[&]`, `[\&]`, `[\&\&]`, `[&-&]` are all + equivalent while `[&&]` is illegal. (The motivation for this and the prior + change is to provide a backwards compatible path for adding character class + set notation.) +* A `bytes::Regex` now has Unicode mode enabled by default (like the main + `Regex` type). This means regexes compiled with `bytes::Regex::new` that + don't have the Unicode flag set should add `(?-u)` to recover the original + behavior. + +Breaking changes for the regex API: + +* `find` and `find_iter` now **return `Match` values instead of + `(usize, usize)`.** `Match` values have `start` and `end` methods, which + return the match offsets. `Match` values also have an `as_str` method, + which returns the text of the match itself. +* The `Captures` type now only provides a single iterator over all capturing + matches, which should replace uses of `iter` and `iter_pos`. Uses of + `iter_named` should use the `capture_names` method on `Regex`. +* The `at` method on the `Captures` type has been renamed to `get`, and it + now returns a `Match`. Similarly, the `name` method on `Captures` now returns + a `Match`. +* The `replace` methods now return `Cow` values. The `Cow::Borrowed` variant + is returned when no replacements are made. +* The `Replacer` trait has been completely overhauled. This should only + impact clients that implement this trait explicitly. Standard uses of + the `replace` methods should continue to work unchanged. If you implement + the `Replacer` trait, please consult the new documentation. +* The `quote` free function has been renamed to `escape`. +* The `Regex::with_size_limit` method has been removed. It is replaced by + `RegexBuilder::size_limit`. +* The `RegexBuilder` type has switched from owned `self` method receivers to + `&mut self` method receivers. Most uses will continue to work unchanged, but + some code may require naming an intermediate variable to hold the builder. +* The `compile` method on `RegexBuilder` has been renamed to `build`. +* The free `is_match` function has been removed. It is replaced by compiling + a `Regex` and calling its `is_match` method. +* The `PartialEq` and `Eq` impls on `Regex` have been dropped. If you relied + on these impls, the fix is to define a wrapper type around `Regex`, impl + `Deref` on it and provide the necessary impls. +* The `is_empty` method on `Captures` has been removed. This always returns + `false`, so its use is superfluous. +* The `Syntax` variant of the `Error` type now contains a string instead of + a `regex_syntax::Error`. If you were examining syntax errors more closely, + you'll need to explicitly use the `regex_syntax` crate to re-parse the regex. +* The `InvalidSet` variant of the `Error` type has been removed since it is + no longer used. +* Most of the iterator types have been renamed to match conventions. If you + were using these iterator types explicitly, please consult the documentation + for its new name. For example, `RegexSplits` has been renamed to `Split`. + +A number of bugs have been fixed: + +* [BUG #151](https://github.com/rust-lang/regex/issues/151): + The `Replacer` trait has been changed to permit the caller to control + allocation. +* [BUG #165](https://github.com/rust-lang/regex/issues/165): + Remove the free `is_match` function. +* [BUG #166](https://github.com/rust-lang/regex/issues/166): + Expose more knobs (available in `0.1`) and remove `with_size_limit`. +* [BUG #168](https://github.com/rust-lang/regex/issues/168): + Iterators produced by `Captures` now have the correct lifetime parameters. +* [BUG #175](https://github.com/rust-lang/regex/issues/175): + Fix a corner case in the parsing of POSIX character classes. +* [BUG #178](https://github.com/rust-lang/regex/issues/178): + Drop the `PartialEq` and `Eq` impls on `Regex`. +* [BUG #179](https://github.com/rust-lang/regex/issues/179): + Remove `is_empty` from `Captures` since it always returns false. +* [BUG #276](https://github.com/rust-lang/regex/issues/276): + Position of named capture can now be retrieved from a `Captures`. +* [BUG #296](https://github.com/rust-lang/regex/issues/296): + Remove winapi/kernel32-sys dependency on UNIX. +* [BUG #307](https://github.com/rust-lang/regex/issues/307): + Fix error on emscripten. + + +0.1.80 +====== +* [PR #292](https://github.com/rust-lang/regex/pull/292): + Fixes bug #291, which was introduced by PR #290. + +0.1.79 +====== +* Require regex-syntax 0.3.8. + +0.1.78 +====== +* [PR #290](https://github.com/rust-lang/regex/pull/290): + Fixes bug #289, which caused some regexes with a certain combination + of literals to match incorrectly. + +0.1.77 +====== +* [PR #281](https://github.com/rust-lang/regex/pull/281): + Fixes bug #280 by disabling all literal optimizations when a pattern + is partially anchored. + +0.1.76 +====== +* Tweak criteria for using the Teddy literal matcher. + +0.1.75 +====== +* [PR #275](https://github.com/rust-lang/regex/pull/275): + Improves match verification performance in the Teddy SIMD searcher. +* [PR #278](https://github.com/rust-lang/regex/pull/278): + Replaces slow substring loop in the Teddy SIMD searcher with Aho-Corasick. +* Implemented DoubleEndedIterator on regex set match iterators. + +0.1.74 +====== +* Release regex-syntax 0.3.5 with a minor bug fix. +* Fix bug #272. +* Fix bug #277. +* [PR #270](https://github.com/rust-lang/regex/pull/270): + Fixes bugs #264, #268 and an unreported where the DFA cache size could be + drastically underestimated in some cases (leading to high unexpected memory + usage). + +0.1.73 +====== +* Release `regex-syntax 0.3.4`. +* Bump `regex-syntax` dependency version for `regex` to `0.3.4`. + +0.1.72 +====== +* [PR #262](https://github.com/rust-lang/regex/pull/262): + Fixes a number of small bugs caught by fuzz testing (AFL). + +0.1.71 +====== +* [PR #236](https://github.com/rust-lang/regex/pull/236): + Fix a bug in how suffix literals were extracted, which could lead + to invalid match behavior in some cases. + +0.1.70 +====== +* [PR #231](https://github.com/rust-lang/regex/pull/231): + Add SIMD accelerated multiple pattern search. +* [PR #228](https://github.com/rust-lang/regex/pull/228): + Reintroduce the reverse suffix literal optimization. +* [PR #226](https://github.com/rust-lang/regex/pull/226): + Implements NFA state compression in the lazy DFA. +* [PR #223](https://github.com/rust-lang/regex/pull/223): + A fully anchored RegexSet can now short-circuit. + +0.1.69 +====== +* [PR #216](https://github.com/rust-lang/regex/pull/216): + Tweak the threshold for running backtracking. +* [PR #217](https://github.com/rust-lang/regex/pull/217): + Add upper limit (from the DFA) to capture search (for the NFA). +* [PR #218](https://github.com/rust-lang/regex/pull/218): + Add rure, a C API. + +0.1.68 +====== +* [PR #210](https://github.com/rust-lang/regex/pull/210): + Fixed a performance bug in `bytes::Regex::replace` where `extend` was used + instead of `extend_from_slice`. +* [PR #211](https://github.com/rust-lang/regex/pull/211): + Fixed a bug in the handling of word boundaries in the DFA. +* [PR #213](https://github.com/rust-lang/pull/213): + Added RE2 and Tcl to the benchmark harness. Also added a CLI utility from + running regexes using any of the following regex engines: PCRE1, PCRE2, + Oniguruma, RE2, Tcl and of course Rust's own regexes. + +0.1.67 +====== +* [PR #201](https://github.com/rust-lang/regex/pull/201): + Fix undefined behavior in the `regex!` compiler plugin macro. +* [PR #205](https://github.com/rust-lang/regex/pull/205): + More improvements to DFA performance. Competitive with RE2. See PR for + benchmarks. +* [PR #209](https://github.com/rust-lang/regex/pull/209): + Release 0.1.66 was semver incompatible since it required a newer version + of Rust than previous releases. This PR fixes that. (And `0.1.66` was + yanked.) + +0.1.66 +====== +* Speculative support for Unicode word boundaries was added to the DFA. This + should remove the last common case that disqualified use of the DFA. +* An optimization that scanned for suffix literals and then matched the regular + expression in reverse was removed because it had worst case quadratic time + complexity. It was replaced with a more limited optimization where, given any + regex of the form `re$`, it will be matched in reverse from the end of the + haystack. +* [PR #202](https://github.com/rust-lang/regex/pull/202): + The inner loop of the DFA was heavily optimized to improve cache locality + and reduce the overall number of instructions run on each iteration. This + represents the first use of `unsafe` in `regex` (to elide bounds checks). +* [PR #200](https://github.com/rust-lang/regex/pull/200): + Use of the `mempool` crate (which used thread local storage) was replaced + with a faster version of a similar API in @Amanieu's `thread_local` crate. + It should reduce contention when using a regex from multiple threads + simultaneously. +* PCRE2 JIT benchmarks were added. A benchmark comparison can be found + [here](https://gist.github.com/anonymous/14683c01993e91689f7206a18675901b). + (Includes a comparison with PCRE1's JIT and Oniguruma.) +* A bug where word boundaries weren't being matched correctly in the DFA was + fixed. This only affected use of `bytes::Regex`. +* [#160](https://github.com/rust-lang/regex/issues/160): + `Captures` now has a `Debug` impl. diff --git a/vendor/regex/Cargo.lock b/vendor/regex/Cargo.lock new file mode 100644 index 00000000000000..5e119bb19d0d98 --- /dev/null +++ b/vendor/regex/Cargo.lock @@ -0,0 +1,383 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "log", + "memchr", +] + +[[package]] +name = "anyhow" +version = "1.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "bstr" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "cfg-if" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" + +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + +[[package]] +name = "env_logger" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" +dependencies = [ + "atty", + "humantime", + "log", + "termcolor", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "humantime" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" + +[[package]] +name = "indexmap" +version = "2.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "libc" +version = "0.2.177" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" + +[[package]] +name = "log" +version = "0.4.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +dependencies = [ + "log", +] + +[[package]] +name = "proc-macro2" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quickcheck" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" +dependencies = [ + "rand", +] + +[[package]] +name = "quote" +version = "1.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "regex" +version = "1.12.2" +dependencies = [ + "aho-corasick", + "anyhow", + "doc-comment", + "env_logger", + "memchr", + "quickcheck", + "regex-automata", + "regex-syntax", + "regex-test", +] + +[[package]] +name = "regex-automata" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +dependencies = [ + "aho-corasick", + "log", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" + +[[package]] +name = "regex-test" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da40f0939bc4c598b4326abdbb363a8987aa43d0526e5624aefcf3ed90344e62" +dependencies = [ + "anyhow", + "bstr", + "serde", + "toml", +] + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + +[[package]] +name = "syn" +version = "2.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + +[[package]] +name = "unicode-ident" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "winnow" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +dependencies = [ + "memchr", +] diff --git a/vendor/regex/Cargo.toml b/vendor/regex/Cargo.toml new file mode 100644 index 00000000000000..31fd135a8fd905 --- /dev/null +++ b/vendor/regex/Cargo.toml @@ -0,0 +1,207 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.65" +name = "regex" +version = "1.12.2" +authors = [ + "The Rust Project Developers", + "Andrew Gallant ", +] +build = false +exclude = [ + "/fuzz/*", + "/record/*", + "/scripts/*", + "tests/fuzz/*", + "/.github/*", +] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +An implementation of regular expressions for Rust. This implementation uses +finite automata and guarantees linear time matching on all inputs. +""" +homepage = "https://github.com/rust-lang/regex" +documentation = "https://docs.rs/regex" +readme = "README.md" +categories = ["text-processing"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/regex" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs_regex", +] + +[features] +default = [ + "std", + "perf", + "unicode", + "regex-syntax/default", +] +logging = [ + "aho-corasick?/logging", + "memchr?/logging", + "regex-automata/logging", +] +pattern = [] +perf = [ + "perf-cache", + "perf-dfa", + "perf-onepass", + "perf-backtrack", + "perf-inline", + "perf-literal", +] +perf-backtrack = ["regex-automata/nfa-backtrack"] +perf-cache = [] +perf-dfa = ["regex-automata/hybrid"] +perf-dfa-full = [ + "regex-automata/dfa-build", + "regex-automata/dfa-search", +] +perf-inline = ["regex-automata/perf-inline"] +perf-literal = [ + "dep:aho-corasick", + "dep:memchr", + "regex-automata/perf-literal", +] +perf-onepass = ["regex-automata/dfa-onepass"] +std = [ + "aho-corasick?/std", + "memchr?/std", + "regex-automata/std", + "regex-syntax/std", +] +unicode = [ + "unicode-age", + "unicode-bool", + "unicode-case", + "unicode-gencat", + "unicode-perl", + "unicode-script", + "unicode-segment", + "regex-automata/unicode", + "regex-syntax/unicode", +] +unicode-age = [ + "regex-automata/unicode-age", + "regex-syntax/unicode-age", +] +unicode-bool = [ + "regex-automata/unicode-bool", + "regex-syntax/unicode-bool", +] +unicode-case = [ + "regex-automata/unicode-case", + "regex-syntax/unicode-case", +] +unicode-gencat = [ + "regex-automata/unicode-gencat", + "regex-syntax/unicode-gencat", +] +unicode-perl = [ + "regex-automata/unicode-perl", + "regex-automata/unicode-word-boundary", + "regex-syntax/unicode-perl", +] +unicode-script = [ + "regex-automata/unicode-script", + "regex-syntax/unicode-script", +] +unicode-segment = [ + "regex-automata/unicode-segment", + "regex-syntax/unicode-segment", +] +unstable = ["pattern"] +use_std = ["std"] + +[lib] +name = "regex" +path = "src/lib.rs" + +[[test]] +name = "integration" +path = "tests/lib.rs" + +[dependencies.aho-corasick] +version = "1.0.0" +optional = true +default-features = false + +[dependencies.memchr] +version = "2.6.0" +optional = true +default-features = false + +[dependencies.regex-automata] +version = "0.4.12" +features = [ + "alloc", + "syntax", + "meta", + "nfa-pikevm", +] +default-features = false + +[dependencies.regex-syntax] +version = "0.8.5" +default-features = false + +[dev-dependencies.anyhow] +version = "1.0.69" + +[dev-dependencies.doc-comment] +version = "0.3" + +[dev-dependencies.env_logger] +version = "0.9.3" +features = [ + "atty", + "humantime", + "termcolor", +] +default-features = false + +[dev-dependencies.quickcheck] +version = "1.0.3" +default-features = false + +[dev-dependencies.regex-test] +version = "0.1.0" + +[lints.rust.unexpected_cfgs] +level = "allow" +priority = 0 +check-cfg = ["cfg(docsrs_regex)"] + +[profile.bench] +debug = 2 + +[profile.dev] +opt-level = 3 +debug = 2 + +[profile.release] +debug = 2 + +[profile.test] +opt-level = 3 +debug = 2 diff --git a/vendor/regex/Cross.toml b/vendor/regex/Cross.toml new file mode 100644 index 00000000000000..5415e7a45195f6 --- /dev/null +++ b/vendor/regex/Cross.toml @@ -0,0 +1,7 @@ +[build.env] +passthrough = [ + "RUST_BACKTRACE", + "RUST_LOG", + "REGEX_TEST", + "REGEX_TEST_VERBOSE", +] diff --git a/vendor/regex/LICENSE-APACHE b/vendor/regex/LICENSE-APACHE new file mode 100644 index 00000000000000..16fe87b06e802f --- /dev/null +++ b/vendor/regex/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/regex/LICENSE-MIT b/vendor/regex/LICENSE-MIT new file mode 100644 index 00000000000000..39d4bdb5acd313 --- /dev/null +++ b/vendor/regex/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/regex/README.md b/vendor/regex/README.md new file mode 100644 index 00000000000000..0af313dea4676f --- /dev/null +++ b/vendor/regex/README.md @@ -0,0 +1,336 @@ +regex +===== +This crate provides routines for searching strings for matches of a [regular +expression] (aka "regex"). The regex syntax supported by this crate is similar +to other regex engines, but it lacks several features that are not known how to +implement efficiently. This includes, but is not limited to, look-around and +backreferences. In exchange, all regex searches in this crate have worst case +`O(m * n)` time complexity, where `m` is proportional to the size of the regex +and `n` is proportional to the size of the string being searched. + +[regular expression]: https://en.wikipedia.org/wiki/Regular_expression + +[![Build status](https://github.com/rust-lang/regex/workflows/ci/badge.svg)](https://github.com/rust-lang/regex/actions) +[![Crates.io](https://img.shields.io/crates/v/regex.svg)](https://crates.io/crates/regex) + +### Documentation + +[Module documentation with examples](https://docs.rs/regex). +The module documentation also includes a comprehensive description of the +syntax supported. + +Documentation with examples for the various matching functions and iterators +can be found on the +[`Regex` type](https://docs.rs/regex/*/regex/struct.Regex.html). + +### Usage + +To bring this crate into your repository, either add `regex` to your +`Cargo.toml`, or run `cargo add regex`. + +Here's a simple example that matches a date in YYYY-MM-DD format and prints the +year, month and day: + +```rust +use regex::Regex; + +fn main() { + let re = Regex::new(r"(?x) +(?P\d{4}) # the year +- +(?P\d{2}) # the month +- +(?P\d{2}) # the day +").unwrap(); + + let caps = re.captures("2010-03-14").unwrap(); + assert_eq!("2010", &caps["year"]); + assert_eq!("03", &caps["month"]); + assert_eq!("14", &caps["day"]); +} +``` + +If you have lots of dates in text that you'd like to iterate over, then it's +easy to adapt the above example with an iterator: + +```rust +use regex::Regex; + +fn main() { + let re = Regex::new(r"(\d{4})-(\d{2})-(\d{2})").unwrap(); + let hay = "On 2010-03-14, foo happened. On 2014-10-14, bar happened."; + + let mut dates = vec![]; + for (_, [year, month, day]) in re.captures_iter(hay).map(|c| c.extract()) { + dates.push((year, month, day)); + } + assert_eq!(dates, vec![ + ("2010", "03", "14"), + ("2014", "10", "14"), + ]); +} +``` + +### Usage: Avoid compiling the same regex in a loop + +It is an anti-pattern to compile the same regular expression in a loop since +compilation is typically expensive. (It takes anywhere from a few microseconds +to a few **milliseconds** depending on the size of the regex.) Not only is +compilation itself expensive, but this also prevents optimizations that reuse +allocations internally to the matching engines. + +In Rust, it can sometimes be a pain to pass regular expressions around if +they're used from inside a helper function. Instead, we recommend using +[`std::sync::LazyLock`], or the [`once_cell`] crate, +if you can't use the standard library. + +This example shows how to use `std::sync::LazyLock`: + +```rust +use std::sync::LazyLock; + +use regex::Regex; + +fn some_helper_function(haystack: &str) -> bool { + static RE: LazyLock = LazyLock::new(|| Regex::new(r"...").unwrap()); + RE.is_match(haystack) +} + +fn main() { + assert!(some_helper_function("abc")); + assert!(!some_helper_function("ac")); +} +``` + +Specifically, in this example, the regex will be compiled when it is used for +the first time. On subsequent uses, it will reuse the previous compilation. + +[`std::sync::LazyLock`]: https://doc.rust-lang.org/std/sync/struct.LazyLock.html +[`once_cell`]: https://crates.io/crates/once_cell + +### Usage: match regular expressions on `&[u8]` + +The main API of this crate (`regex::Regex`) requires the caller to pass a +`&str` for searching. In Rust, an `&str` is required to be valid UTF-8, which +means the main API can't be used for searching arbitrary bytes. + +To match on arbitrary bytes, use the `regex::bytes::Regex` API. The API is +identical to the main API, except that it takes an `&[u8]` to search on instead +of an `&str`. The `&[u8]` APIs also permit disabling Unicode mode in the regex +even when the pattern would match invalid UTF-8. For example, `(?-u:.)` is +not allowed in `regex::Regex` but is allowed in `regex::bytes::Regex` since +`(?-u:.)` matches any byte except for `\n`. Conversely, `.` will match the +UTF-8 encoding of any Unicode scalar value except for `\n`. + +This example shows how to find all null-terminated strings in a slice of bytes: + +```rust +use regex::bytes::Regex; + +let re = Regex::new(r"(?-u)(?[^\x00]+)\x00").unwrap(); +let text = b"foo\xFFbar\x00baz\x00"; + +// Extract all of the strings without the null terminator from each match. +// The unwrap is OK here since a match requires the `cstr` capture to match. +let cstrs: Vec<&[u8]> = + re.captures_iter(text) + .map(|c| c.name("cstr").unwrap().as_bytes()) + .collect(); +assert_eq!(vec![&b"foo\xFFbar"[..], &b"baz"[..]], cstrs); +``` + +Notice here that the `[^\x00]+` will match any *byte* except for `NUL`, +including bytes like `\xFF` which are not valid UTF-8. When using the main API, +`[^\x00]+` would instead match any valid UTF-8 sequence except for `NUL`. + +### Usage: match multiple regular expressions simultaneously + +This demonstrates how to use a `RegexSet` to match multiple (possibly +overlapping) regular expressions in a single scan of the search text: + +```rust +use regex::RegexSet; + +let set = RegexSet::new(&[ + r"\w+", + r"\d+", + r"\pL+", + r"foo", + r"bar", + r"barfoo", + r"foobar", +]).unwrap(); + +// Iterate over and collect all of the matches. +let matches: Vec<_> = set.matches("foobar").into_iter().collect(); +assert_eq!(matches, vec![0, 2, 3, 4, 6]); + +// You can also test whether a particular regex matched: +let matches = set.matches("foobar"); +assert!(!matches.matched(5)); +assert!(matches.matched(6)); +``` + + +### Usage: regex internals as a library + +The [`regex-automata` directory](./regex-automata/) contains a crate that +exposes all the internal matching engines used by the `regex` crate. The +idea is that the `regex` crate exposes a simple API for 99% of use cases, but +`regex-automata` exposes oodles of customizable behaviors. + +[Documentation for `regex-automata`.](https://docs.rs/regex-automata) + + +### Usage: a regular expression parser + +This repository contains a crate that provides a well tested regular expression +parser, abstract syntax and a high-level intermediate representation for +convenient analysis. It provides no facilities for compilation or execution. +This may be useful if you're implementing your own regex engine or otherwise +need to do analysis on the syntax of a regular expression. It is otherwise not +recommended for general use. + +[Documentation for `regex-syntax`.](https://docs.rs/regex-syntax) + + +### Crate features + +This crate comes with several features that permit tweaking the trade-off +between binary size, compilation time and runtime performance. Users of this +crate can selectively disable Unicode tables, or choose from a variety of +optimizations performed by this crate to disable. + +When all of these features are disabled, runtime match performance may be much +worse, but if you're matching on short strings, or if high performance isn't +necessary, then such a configuration is perfectly serviceable. To disable +all such features, use the following `Cargo.toml` dependency configuration: + +```toml +[dependencies.regex] +version = "1.3" +default-features = false +# Unless you have a specific reason not to, it's good sense to enable standard +# library support. It enables several optimizations and avoids spin locks. It +# also shouldn't meaningfully impact compile times or binary size. +features = ["std"] +``` + +This will reduce the dependency tree of `regex` down to two crates: +`regex-syntax` and `regex-automata`. + +The full set of features one can disable are +[in the "Crate features" section of the documentation](https://docs.rs/regex/1.*/#crate-features). + + +### Performance + +One of the goals of this crate is for the regex engine to be "fast." What that +is a somewhat nebulous goal, it is usually interpreted in one of two ways. +First, it means that all searches take worst case `O(m * n)` time, where +`m` is proportional to `len(regex)` and `n` is proportional to `len(haystack)`. +Second, it means that even aside from the time complexity constraint, regex +searches are "fast" in practice. + +While the first interpretation is pretty unambiguous, the second one remains +nebulous. While nebulous, it guides this crate's architecture and the sorts of +the trade-offs it makes. For example, here are some general architectural +statements that follow as a result of the goal to be "fast": + +* When given the choice between faster regex searches and faster _Rust compile +times_, this crate will generally choose faster regex searches. +* When given the choice between faster regex searches and faster _regex compile +times_, this crate will generally choose faster regex searches. That is, it is +generally acceptable for `Regex::new` to get a little slower if it means that +searches get faster. (This is a somewhat delicate balance to strike, because +the speed of `Regex::new` needs to remain somewhat reasonable. But this is why +one should avoid re-compiling the same regex over and over again.) +* When given the choice between faster regex searches and simpler API +design, this crate will generally choose faster regex searches. For example, +if one didn't care about performance, we could like get rid of both of +the `Regex::is_match` and `Regex::find` APIs and instead just rely on +`Regex::captures`. + +There are perhaps more ways that being "fast" influences things. + +While this repository used to provide its own benchmark suite, it has since +been moved to [rebar](https://github.com/BurntSushi/rebar). The benchmarks are +quite extensive, and there are many more than what is shown in rebar's README +(which is just limited to a "curated" set meant to compare performance between +regex engines). To run all of this crate's benchmarks, first start by cloning +and installing `rebar`: + +```text +$ git clone https://github.com/BurntSushi/rebar +$ cd rebar +$ cargo install --path ./ +``` + +Then build the benchmark harness for just this crate: + +```text +$ rebar build -e '^rust/regex$' +``` + +Run all benchmarks for this crate as tests (each benchmark is executed once to +ensure it works): + +```text +$ rebar measure -e '^rust/regex$' -t +``` + +Record measurements for all benchmarks and save them to a CSV file: + +```text +$ rebar measure -e '^rust/regex$' | tee results.csv +``` + +Explore benchmark timings: + +```text +$ rebar cmp results.csv +``` + +See the `rebar` documentation for more details on how it works and how to +compare results with other regex engines. + + +### Hacking + +The `regex` crate is, for the most part, a pretty thin wrapper around the +[`meta::Regex`](https://docs.rs/regex-automata/latest/regex_automata/meta/struct.Regex.html) +from the +[`regex-automata` crate](https://docs.rs/regex-automata/latest/regex_automata/). +Therefore, if you're looking to work on the internals of this crate, you'll +likely either want to look in `regex-syntax` (for parsing) or `regex-automata` +(for construction of finite automata and the search routines). + +My [blog on regex internals](https://burntsushi.net/regex-internals/) +goes into more depth. + + +### Minimum Rust version policy + +This crate's minimum supported `rustc` version is `1.65.0`. + +The policy is that the minimum Rust version required to use this crate can be +increased in minor version updates. For example, if regex 1.0 requires Rust +1.20.0, then regex 1.0.z for all values of `z` will also require Rust 1.20.0 or +newer. However, regex 1.y for `y > 0` may require a newer minimum version of +Rust. + + +### License + +This project is licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + https://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or + https://opensource.org/licenses/MIT) + +at your option. + +The data in `regex-syntax/src/unicode_tables/` is licensed under the Unicode +License Agreement +([LICENSE-UNICODE](https://www.unicode.org/copyright.html#License)). diff --git a/vendor/regex/UNICODE.md b/vendor/regex/UNICODE.md new file mode 100644 index 00000000000000..2b62567f12c6e6 --- /dev/null +++ b/vendor/regex/UNICODE.md @@ -0,0 +1,258 @@ +# Unicode conformance + +This document describes the regex crate's conformance to Unicode's +[UTS#18](https://unicode.org/reports/tr18/) +report, which lays out 3 levels of support: Basic, Extended and Tailored. + +Full support for Level 1 ("Basic Unicode Support") is provided with two +exceptions: + +1. Line boundaries are not Unicode aware. Namely, only the `\n` + (`END OF LINE`) character is recognized as a line boundary by default. + One can opt into `\r\n|\r|\n` being a line boundary via CRLF mode. +2. The compatibility properties specified by + [RL1.2a](https://unicode.org/reports/tr18/#RL1.2a) + are ASCII-only definitions. + +Little to no support is provided for either Level 2 or Level 3. For the most +part, this is because the features are either complex/hard to implement, or at +the very least, very difficult to implement without sacrificing performance. +For example, tackling canonical equivalence such that matching worked as one +would expect regardless of normalization form would be a significant +undertaking. This is at least partially a result of the fact that this regex +engine is based on finite automata, which admits less flexibility normally +associated with backtracking implementations. + + +## RL1.1 Hex Notation + +[UTS#18 RL1.1](https://unicode.org/reports/tr18/#Hex_notation) + +Hex Notation refers to the ability to specify a Unicode code point in a regular +expression via its hexadecimal code point representation. This is useful in +environments that have poor Unicode font rendering or if you need to express a +code point that is not normally displayable. All forms of hexadecimal notation +are supported + + \x7F hex character code (exactly two digits) + \x{10FFFF} any hex character code corresponding to a Unicode code point + \u007F hex character code (exactly four digits) + \u{7F} any hex character code corresponding to a Unicode code point + \U0000007F hex character code (exactly eight digits) + \U{7F} any hex character code corresponding to a Unicode code point + +Briefly, the `\x{...}`, `\u{...}` and `\U{...}` are all exactly equivalent ways +of expressing hexadecimal code points. Any number of digits can be written +within the brackets. In contrast, `\xNN`, `\uNNNN`, `\UNNNNNNNN` are all +fixed-width variants of the same idea. + +Note that when Unicode mode is disabled, any non-ASCII Unicode codepoint is +banned. Additionally, the `\xNN` syntax represents arbitrary bytes when Unicode +mode is disabled. That is, the regex `\xFF` matches the Unicode codepoint +U+00FF (encoded as `\xC3\xBF` in UTF-8) while the regex `(?-u)\xFF` matches +the literal byte `\xFF`. + + +## RL1.2 Properties + +[UTS#18 RL1.2](https://unicode.org/reports/tr18/#Categories) + +Full support for Unicode property syntax is provided. Unicode properties +provide a convenient way to construct character classes of groups of code +points specified by Unicode. The regex crate does not provide exhaustive +support, but covers a useful subset. In particular: + +* [General categories](https://unicode.org/reports/tr18/#General_Category_Property) +* [Scripts and Script Extensions](https://unicode.org/reports/tr18/#Script_Property) +* [Age](https://unicode.org/reports/tr18/#Age) +* A smattering of boolean properties, including all of those specified by + [RL1.2](https://unicode.org/reports/tr18/#RL1.2) explicitly. + +In all cases, property name and value abbreviations are supported, and all +names/values are matched loosely without regard for case, whitespace or +underscores. Property name aliases can be found in Unicode's +[`PropertyAliases.txt`](https://www.unicode.org/Public/UCD/latest/ucd/PropertyAliases.txt) +file, while property value aliases can be found in Unicode's +[`PropertyValueAliases.txt`](https://www.unicode.org/Public/UCD/latest/ucd/PropertyValueAliases.txt) +file. + +The syntax supported is also consistent with the UTS#18 recommendation: + +* `\p{Greek}` selects the `Greek` script. Equivalent expressions follow: + `\p{sc:Greek}`, `\p{Script:Greek}`, `\p{Sc=Greek}`, `\p{script=Greek}`, + `\P{sc!=Greek}`. Similarly for `General_Category` (or `gc` for short) and + `Script_Extensions` (or `scx` for short). +* `\p{age:3.2}` selects all code points in Unicode 3.2. +* `\p{Alphabetic}` selects the "alphabetic" property and can be abbreviated + via `\p{alpha}` (for example). +* Single letter variants for properties with single letter abbreviations. + For example, `\p{Letter}` can be equivalently written as `\pL`. + +The following is a list of all properties supported by the regex crate (starred +properties correspond to properties required by RL1.2): + +* `General_Category` \* (including `Any`, `ASCII` and `Assigned`) +* `Script` \* +* `Script_Extensions` \* +* `Age` +* `ASCII_Hex_Digit` +* `Alphabetic` \* +* `Bidi_Control` +* `Case_Ignorable` +* `Cased` +* `Changes_When_Casefolded` +* `Changes_When_Casemapped` +* `Changes_When_Lowercased` +* `Changes_When_Titlecased` +* `Changes_When_Uppercased` +* `Dash` +* `Default_Ignorable_Code_Point` \* +* `Deprecated` +* `Diacritic` +* `Emoji` +* `Emoji_Presentation` +* `Emoji_Modifier` +* `Emoji_Modifier_Base` +* `Emoji_Component` +* `Extended_Pictographic` +* `Extender` +* `Grapheme_Base` +* `Grapheme_Cluster_Break` +* `Grapheme_Extend` +* `Hex_Digit` +* `IDS_Binary_Operator` +* `IDS_Trinary_Operator` +* `ID_Continue` +* `ID_Start` +* `Join_Control` +* `Logical_Order_Exception` +* `Lowercase` \* +* `Math` +* `Noncharacter_Code_Point` \* +* `Pattern_Syntax` +* `Pattern_White_Space` +* `Prepended_Concatenation_Mark` +* `Quotation_Mark` +* `Radical` +* `Regional_Indicator` +* `Sentence_Break` +* `Sentence_Terminal` +* `Soft_Dotted` +* `Terminal_Punctuation` +* `Unified_Ideograph` +* `Uppercase` \* +* `Variation_Selector` +* `White_Space` \* +* `Word_Break` +* `XID_Continue` +* `XID_Start` + + +## RL1.2a Compatibility Properties + +[UTS#18 RL1.2a](https://unicode.org/reports/tr18/#RL1.2a) + +The regex crate only provides ASCII definitions of the +[compatibility properties documented in UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties) +(sans the `\X` class, for matching grapheme clusters, which isn't provided +at all). This is because it seems to be consistent with most other regular +expression engines, and in particular, because these are often referred to as +"ASCII" or "POSIX" character classes. + +Note that the `\w`, `\s` and `\d` character classes **are** Unicode aware. +Their traditional ASCII definition can be used by disabling Unicode. That is, +`[[:word:]]` and `(?-u)\w` are equivalent. + + +## RL1.3 Subtraction and Intersection + +[UTS#18 RL1.3](https://unicode.org/reports/tr18/#Subtraction_and_Intersection) + +The regex crate provides full support for nested character classes, along with +union, intersection (`&&`), difference (`--`) and symmetric difference (`~~`) +operations on arbitrary character classes. + +For example, to match all non-ASCII letters, you could use either +`[\p{Letter}--\p{Ascii}]` (difference) or `[\p{Letter}&&[^\p{Ascii}]]` +(intersecting the negation). + + +## RL1.4 Simple Word Boundaries + +[UTS#18 RL1.4](https://unicode.org/reports/tr18/#Simple_Word_Boundaries) + +The regex crate provides basic Unicode aware word boundary assertions. A word +boundary assertion can be written as `\b`, or `\B` as its negation. A word +boundary negation corresponds to a zero-width match, where its adjacent +characters correspond to word and non-word, or non-word and word characters. + +Conformance in this case chooses to define word character in the same way that +the `\w` character class is defined: a code point that is a member of one of +the following classes: + +* `\p{Alphabetic}` +* `\p{Join_Control}` +* `\p{gc:Mark}` +* `\p{gc:Decimal_Number}` +* `\p{gc:Connector_Punctuation}` + +In particular, this differs slightly from the +[prescription given in RL1.4](https://unicode.org/reports/tr18/#Simple_Word_Boundaries) +but is permissible according to +[UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties). +Namely, it is convenient and simpler to have `\w` and `\b` be in sync with +one another. + +Finally, Unicode word boundaries can be disabled, which will cause ASCII word +boundaries to be used instead. That is, `\b` is a Unicode word boundary while +`(?-u)\b` is an ASCII-only word boundary. This can occasionally be beneficial +if performance is important, since the implementation of Unicode word +boundaries is currently suboptimal on non-ASCII text. + + +## RL1.5 Simple Loose Matches + +[UTS#18 RL1.5](https://unicode.org/reports/tr18/#Simple_Loose_Matches) + +The regex crate provides full support for case-insensitive matching in +accordance with RL1.5. That is, it uses the "simple" case folding mapping. The +"simple" mapping was chosen because of a key convenient property: every +"simple" mapping is a mapping from exactly one code point to exactly one other +code point. This makes case-insensitive matching of character classes, for +example, straight-forward to implement. + +When case-insensitive mode is enabled (e.g., `(?i)[a]` is equivalent to `a|A`), +then all characters classes are case folded as well. + + +## RL1.6 Line Boundaries + +[UTS#18 RL1.6](https://unicode.org/reports/tr18/#Line_Boundaries) + +The regex crate only provides support for recognizing the `\n` (`END OF LINE`) +character as a line boundary by default. One can also opt into treating +`\r\n|\r|\n` as a line boundary via CRLF mode. This choice was made mostly for +implementation convenience, and to avoid performance cliffs that Unicode word +boundaries are subject to. + + +## RL1.7 Code Points + +[UTS#18 RL1.7](https://unicode.org/reports/tr18/#Supplementary_Characters) + +The regex crate provides full support for Unicode code point matching. Namely, +the fundamental atom of any match is always a single code point. + +Given Rust's strong ties to UTF-8, the following guarantees are also provided: + +* All matches are reported on valid UTF-8 code unit boundaries. That is, any + match range returned by the public regex API is guaranteed to successfully + slice the string that was searched. +* By consequence of the above, it is impossible to match surrogate code points. + No support for UTF-16 is provided, so this is never necessary. + +Note that when Unicode mode is disabled, the fundamental atom of matching is +no longer a code point but a single byte. When Unicode mode is disabled, many +Unicode features are disabled as well. For example, `(?-u)\pL` is not a valid +regex but `\pL(?-u)\xFF` (matches any Unicode `Letter` followed by the literal +byte `\xFF`) is, for example. diff --git a/vendor/regex/bench/README.md b/vendor/regex/bench/README.md new file mode 100644 index 00000000000000..3cc6a1a7afa60e --- /dev/null +++ b/vendor/regex/bench/README.md @@ -0,0 +1,2 @@ +Benchmarks for this crate have been moved into the rebar project: +https://github.com/BurntSushi/rebar diff --git a/vendor/regex/rustfmt.toml b/vendor/regex/rustfmt.toml new file mode 100644 index 00000000000000..aa37a218b97e5f --- /dev/null +++ b/vendor/regex/rustfmt.toml @@ -0,0 +1,2 @@ +max_width = 79 +use_small_heuristics = "max" diff --git a/vendor/regex/src/builders.rs b/vendor/regex/src/builders.rs new file mode 100644 index 00000000000000..3bb08de8bfe5e5 --- /dev/null +++ b/vendor/regex/src/builders.rs @@ -0,0 +1,2539 @@ +#![allow(warnings)] + +// This module defines an internal builder that encapsulates all interaction +// with meta::Regex construction, and then 4 public API builders that wrap +// around it. The docs are essentially repeated on each of the 4 public +// builders, with tweaks to the examples as needed. +// +// The reason why there are so many builders is partially because of a misstep +// in the initial API design: the builder constructor takes in the pattern +// strings instead of using the `build` method to accept the pattern strings. +// This means `new` has a different signature for each builder. It probably +// would have been nicer to to use one builder with `fn new()`, and then add +// `build(pat)` and `build_many(pats)` constructors. +// +// The other reason is because I think the `bytes` module should probably +// have its own builder type. That way, it is completely isolated from the +// top-level API. +// +// If I could do it again, I'd probably have a `regex::Builder` and a +// `regex::bytes::Builder`. Each would have `build` and `build_set` (or +// `build_many`) methods for constructing a single pattern `Regex` and a +// multi-pattern `RegexSet`, respectively. + +use alloc::{ + string::{String, ToString}, + sync::Arc, + vec, + vec::Vec, +}; + +use regex_automata::{ + meta, nfa::thompson::WhichCaptures, util::syntax, MatchKind, +}; + +use crate::error::Error; + +/// A builder for constructing a `Regex`, `bytes::Regex`, `RegexSet` or a +/// `bytes::RegexSet`. +/// +/// This is essentially the implementation of the four different builder types +/// in the public API: `RegexBuilder`, `bytes::RegexBuilder`, `RegexSetBuilder` +/// and `bytes::RegexSetBuilder`. +#[derive(Clone, Debug)] +struct Builder { + pats: Vec, + metac: meta::Config, + syntaxc: syntax::Config, +} + +impl Default for Builder { + fn default() -> Builder { + let metac = meta::Config::new() + .nfa_size_limit(Some(10 * (1 << 20))) + .hybrid_cache_capacity(2 * (1 << 20)); + Builder { pats: vec![], metac, syntaxc: syntax::Config::default() } + } +} + +impl Builder { + fn new(patterns: I) -> Builder + where + S: AsRef, + I: IntoIterator, + { + let mut b = Builder::default(); + b.pats.extend(patterns.into_iter().map(|p| p.as_ref().to_string())); + b + } + + fn build_one_string(&self) -> Result { + assert_eq!(1, self.pats.len()); + let metac = self + .metac + .clone() + .match_kind(MatchKind::LeftmostFirst) + .utf8_empty(true); + let syntaxc = self.syntaxc.clone().utf8(true); + let pattern = Arc::from(self.pats[0].as_str()); + meta::Builder::new() + .configure(metac) + .syntax(syntaxc) + .build(&pattern) + .map(|meta| crate::Regex { meta, pattern }) + .map_err(Error::from_meta_build_error) + } + + fn build_one_bytes(&self) -> Result { + assert_eq!(1, self.pats.len()); + let metac = self + .metac + .clone() + .match_kind(MatchKind::LeftmostFirst) + .utf8_empty(false); + let syntaxc = self.syntaxc.clone().utf8(false); + let pattern = Arc::from(self.pats[0].as_str()); + meta::Builder::new() + .configure(metac) + .syntax(syntaxc) + .build(&pattern) + .map(|meta| crate::bytes::Regex { meta, pattern }) + .map_err(Error::from_meta_build_error) + } + + fn build_many_string(&self) -> Result { + let metac = self + .metac + .clone() + .match_kind(MatchKind::All) + .utf8_empty(true) + .which_captures(WhichCaptures::None); + let syntaxc = self.syntaxc.clone().utf8(true); + let patterns = Arc::from(self.pats.as_slice()); + meta::Builder::new() + .configure(metac) + .syntax(syntaxc) + .build_many(&patterns) + .map(|meta| crate::RegexSet { meta, patterns }) + .map_err(Error::from_meta_build_error) + } + + fn build_many_bytes(&self) -> Result { + let metac = self + .metac + .clone() + .match_kind(MatchKind::All) + .utf8_empty(false) + .which_captures(WhichCaptures::None); + let syntaxc = self.syntaxc.clone().utf8(false); + let patterns = Arc::from(self.pats.as_slice()); + meta::Builder::new() + .configure(metac) + .syntax(syntaxc) + .build_many(&patterns) + .map(|meta| crate::bytes::RegexSet { meta, patterns }) + .map_err(Error::from_meta_build_error) + } + + fn case_insensitive(&mut self, yes: bool) -> &mut Builder { + self.syntaxc = self.syntaxc.case_insensitive(yes); + self + } + + fn multi_line(&mut self, yes: bool) -> &mut Builder { + self.syntaxc = self.syntaxc.multi_line(yes); + self + } + + fn dot_matches_new_line(&mut self, yes: bool) -> &mut Builder { + self.syntaxc = self.syntaxc.dot_matches_new_line(yes); + self + } + + fn crlf(&mut self, yes: bool) -> &mut Builder { + self.syntaxc = self.syntaxc.crlf(yes); + self + } + + fn line_terminator(&mut self, byte: u8) -> &mut Builder { + self.metac = self.metac.clone().line_terminator(byte); + self.syntaxc = self.syntaxc.line_terminator(byte); + self + } + + fn swap_greed(&mut self, yes: bool) -> &mut Builder { + self.syntaxc = self.syntaxc.swap_greed(yes); + self + } + + fn ignore_whitespace(&mut self, yes: bool) -> &mut Builder { + self.syntaxc = self.syntaxc.ignore_whitespace(yes); + self + } + + fn unicode(&mut self, yes: bool) -> &mut Builder { + self.syntaxc = self.syntaxc.unicode(yes); + self + } + + fn octal(&mut self, yes: bool) -> &mut Builder { + self.syntaxc = self.syntaxc.octal(yes); + self + } + + fn size_limit(&mut self, limit: usize) -> &mut Builder { + self.metac = self.metac.clone().nfa_size_limit(Some(limit)); + self + } + + fn dfa_size_limit(&mut self, limit: usize) -> &mut Builder { + self.metac = self.metac.clone().hybrid_cache_capacity(limit); + self + } + + fn nest_limit(&mut self, limit: u32) -> &mut Builder { + self.syntaxc = self.syntaxc.nest_limit(limit); + self + } +} + +pub(crate) mod string { + use crate::{error::Error, Regex, RegexSet}; + + use super::Builder; + + /// A configurable builder for a [`Regex`]. + /// + /// This builder can be used to programmatically set flags such as `i` + /// (case insensitive) and `x` (for verbose mode). This builder can also be + /// used to configure things like the line terminator and a size limit on + /// the compiled regular expression. + #[derive(Clone, Debug)] + pub struct RegexBuilder { + builder: Builder, + } + + impl RegexBuilder { + /// Create a new builder with a default configuration for the given + /// pattern. + /// + /// If the pattern is invalid or exceeds the configured size limits, + /// then an error will be returned when [`RegexBuilder::build`] is + /// called. + pub fn new(pattern: &str) -> RegexBuilder { + RegexBuilder { builder: Builder::new([pattern]) } + } + + /// Compiles the pattern given to `RegexBuilder::new` with the + /// configuration set on this builder. + /// + /// If the pattern isn't a valid regex or if a configured size limit + /// was exceeded, then an error is returned. + pub fn build(&self) -> Result { + self.builder.build_one_string() + } + + /// This configures Unicode mode for the entire pattern. + /// + /// Enabling Unicode mode does a number of things: + /// + /// * Most fundamentally, it causes the fundamental atom of matching + /// to be a single codepoint. When Unicode mode is disabled, it's a + /// single byte. For example, when Unicode mode is enabled, `.` will + /// match `💩` once, where as it will match 4 times when Unicode mode + /// is disabled. (Since the UTF-8 encoding of `💩` is 4 bytes long.) + /// * Case insensitive matching uses Unicode simple case folding rules. + /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are + /// available. + /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and + /// `\d`. + /// * The word boundary assertions, `\b` and `\B`, use the Unicode + /// definition of a word character. + /// + /// Note that if Unicode mode is disabled, then the regex will fail to + /// compile if it could match invalid UTF-8. For example, when Unicode + /// mode is disabled, then since `.` matches any byte (except for + /// `\n`), then it can match invalid UTF-8 and thus building a regex + /// from it will fail. Another example is `\w` and `\W`. Since `\w` can + /// only match ASCII bytes when Unicode mode is disabled, it's allowed. + /// But `\W` can match more than ASCII bytes, including invalid UTF-8, + /// and so it is not allowed. This restriction can be lifted only by + /// using a [`bytes::Regex`](crate::bytes::Regex). + /// + /// For more details on the Unicode support in this crate, see the + /// [Unicode section](crate#unicode) in this crate's top-level + /// documentation. + /// + /// The default for this is `true`. + /// + /// # Example + /// + /// ``` + /// use regex::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"\w") + /// .unicode(false) + /// .build() + /// .unwrap(); + /// // Normally greek letters would be included in \w, but since + /// // Unicode mode is disabled, it only matches ASCII letters. + /// assert!(!re.is_match("δ")); + /// + /// let re = RegexBuilder::new(r"s") + /// .case_insensitive(true) + /// .unicode(false) + /// .build() + /// .unwrap(); + /// // Normally 'ſ' is included when searching for 's' case + /// // insensitively due to Unicode's simple case folding rules. But + /// // when Unicode mode is disabled, only ASCII case insensitive rules + /// // are used. + /// assert!(!re.is_match("ſ")); + /// ``` + pub fn unicode(&mut self, yes: bool) -> &mut RegexBuilder { + self.builder.unicode(yes); + self + } + + /// This configures whether to enable case insensitive matching for the + /// entire pattern. + /// + /// This setting can also be configured using the inline flag `i` + /// in the pattern. For example, `(?i:foo)` matches `foo` case + /// insensitively while `(?-i:foo)` matches `foo` case sensitively. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"foo(?-i:bar)quux") + /// .case_insensitive(true) + /// .build() + /// .unwrap(); + /// assert!(re.is_match("FoObarQuUx")); + /// // Even though case insensitive matching is enabled in the builder, + /// // it can be locally disabled within the pattern. In this case, + /// // `bar` is matched case sensitively. + /// assert!(!re.is_match("fooBARquux")); + /// ``` + pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexBuilder { + self.builder.case_insensitive(yes); + self + } + + /// This configures multi-line mode for the entire pattern. + /// + /// Enabling multi-line mode changes the behavior of the `^` and `$` + /// anchor assertions. Instead of only matching at the beginning and + /// end of a haystack, respectively, multi-line mode causes them to + /// match at the beginning and end of a line *in addition* to the + /// beginning and end of a haystack. More precisely, `^` will match at + /// the position immediately following a `\n` and `$` will match at the + /// position immediately preceding a `\n`. + /// + /// The behavior of this option can be impacted by other settings too: + /// + /// * The [`RegexBuilder::line_terminator`] option changes `\n` above + /// to any ASCII byte. + /// * The [`RegexBuilder::crlf`] option changes the line terminator to + /// be either `\r` or `\n`, but never at the position between a `\r` + /// and `\n`. + /// + /// This setting can also be configured using the inline flag `m` in + /// the pattern. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"^foo$") + /// .multi_line(true) + /// .build() + /// .unwrap(); + /// assert_eq!(Some(1..4), re.find("\nfoo\n").map(|m| m.range())); + /// ``` + pub fn multi_line(&mut self, yes: bool) -> &mut RegexBuilder { + self.builder.multi_line(yes); + self + } + + /// This configures dot-matches-new-line mode for the entire pattern. + /// + /// Perhaps surprisingly, the default behavior for `.` is not to match + /// any character, but rather, to match any character except for the + /// line terminator (which is `\n` by default). When this mode is + /// enabled, the behavior changes such that `.` truly matches any + /// character. + /// + /// This setting can also be configured using the inline flag `s` in + /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent + /// regexes. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"foo.bar") + /// .dot_matches_new_line(true) + /// .build() + /// .unwrap(); + /// let hay = "foo\nbar"; + /// assert_eq!(Some("foo\nbar"), re.find(hay).map(|m| m.as_str())); + /// ``` + pub fn dot_matches_new_line( + &mut self, + yes: bool, + ) -> &mut RegexBuilder { + self.builder.dot_matches_new_line(yes); + self + } + + /// This configures CRLF mode for the entire pattern. + /// + /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for + /// short) and `\n` ("line feed" or LF for short) are treated as line + /// terminators. This results in the following: + /// + /// * Unless dot-matches-new-line mode is enabled, `.` will now match + /// any character except for `\n` and `\r`. + /// * When multi-line mode is enabled, `^` will match immediately + /// following a `\n` or a `\r`. Similarly, `$` will match immediately + /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match + /// between `\r` and `\n`. + /// + /// This setting can also be configured using the inline flag `R` in + /// the pattern. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"^foo$") + /// .multi_line(true) + /// .crlf(true) + /// .build() + /// .unwrap(); + /// let hay = "\r\nfoo\r\n"; + /// // If CRLF mode weren't enabled here, then '$' wouldn't match + /// // immediately after 'foo', and thus no match would be found. + /// assert_eq!(Some("foo"), re.find(hay).map(|m| m.as_str())); + /// ``` + /// + /// This example demonstrates that `^` will never match at a position + /// between `\r` and `\n`. (`$` will similarly not match between a `\r` + /// and a `\n`.) + /// + /// ``` + /// use regex::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"^") + /// .multi_line(true) + /// .crlf(true) + /// .build() + /// .unwrap(); + /// let hay = "\r\n\r\n"; + /// let ranges: Vec<_> = re.find_iter(hay).map(|m| m.range()).collect(); + /// assert_eq!(ranges, vec![0..0, 2..2, 4..4]); + /// ``` + pub fn crlf(&mut self, yes: bool) -> &mut RegexBuilder { + self.builder.crlf(yes); + self + } + + /// Configures the line terminator to be used by the regex. + /// + /// The line terminator is relevant in two ways for a particular regex: + /// + /// * When dot-matches-new-line mode is *not* enabled (the default), + /// then `.` will match any character except for the configured line + /// terminator. + /// * When multi-line mode is enabled (not the default), then `^` and + /// `$` will match immediately after and before, respectively, a line + /// terminator. + /// + /// In both cases, if CRLF mode is enabled in a particular context, + /// then it takes precedence over any configured line terminator. + /// + /// This option cannot be configured from within the pattern. + /// + /// The default line terminator is `\n`. + /// + /// # Example + /// + /// This shows how to treat the NUL byte as a line terminator. This can + /// be a useful heuristic when searching binary data. + /// + /// ``` + /// use regex::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"^foo$") + /// .multi_line(true) + /// .line_terminator(b'\x00') + /// .build() + /// .unwrap(); + /// let hay = "\x00foo\x00"; + /// assert_eq!(Some(1..4), re.find(hay).map(|m| m.range())); + /// ``` + /// + /// This example shows that the behavior of `.` is impacted by this + /// setting as well: + /// + /// ``` + /// use regex::RegexBuilder; + /// + /// let re = RegexBuilder::new(r".") + /// .line_terminator(b'\x00') + /// .build() + /// .unwrap(); + /// assert!(re.is_match("\n")); + /// assert!(!re.is_match("\x00")); + /// ``` + /// + /// This shows that building a regex will fail if the byte given + /// is not ASCII and the pattern could result in matching invalid + /// UTF-8. This is because any singular non-ASCII byte is not valid + /// UTF-8, and it is not permitted for a [`Regex`] to match invalid + /// UTF-8. (It is permissible to use a non-ASCII byte when building a + /// [`bytes::Regex`](crate::bytes::Regex).) + /// + /// ``` + /// use regex::RegexBuilder; + /// + /// assert!(RegexBuilder::new(r".").line_terminator(0x80).build().is_err()); + /// // Note that using a non-ASCII byte isn't enough on its own to + /// // cause regex compilation to fail. You actually have to make use + /// // of it in the regex in a way that leads to matching invalid + /// // UTF-8. If you don't, then regex compilation will succeed! + /// assert!(RegexBuilder::new(r"a").line_terminator(0x80).build().is_ok()); + /// ``` + pub fn line_terminator(&mut self, byte: u8) -> &mut RegexBuilder { + self.builder.line_terminator(byte); + self + } + + /// This configures swap-greed mode for the entire pattern. + /// + /// When swap-greed mode is enabled, patterns like `a+` will become + /// non-greedy and patterns like `a+?` will become greedy. In other + /// words, the meanings of `a+` and `a+?` are switched. + /// + /// This setting can also be configured using the inline flag `U` in + /// the pattern. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"a+") + /// .swap_greed(true) + /// .build() + /// .unwrap(); + /// assert_eq!(Some("a"), re.find("aaa").map(|m| m.as_str())); + /// ``` + pub fn swap_greed(&mut self, yes: bool) -> &mut RegexBuilder { + self.builder.swap_greed(yes); + self + } + + /// This configures verbose mode for the entire pattern. + /// + /// When enabled, whitespace will treated as insignificant in the + /// pattern and `#` can be used to start a comment until the next new + /// line. + /// + /// Normally, in most places in a pattern, whitespace is treated + /// literally. For example ` +` will match one or more ASCII whitespace + /// characters. + /// + /// When verbose mode is enabled, `\#` can be used to match a literal + /// `#` and `\ ` can be used to match a literal ASCII whitespace + /// character. + /// + /// Verbose mode is useful for permitting regexes to be formatted and + /// broken up more nicely. This may make them more easily readable. + /// + /// This setting can also be configured using the inline flag `x` in + /// the pattern. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::RegexBuilder; + /// + /// let pat = r" + /// \b + /// (?\p{Uppercase}\w*) # always start with uppercase letter + /// [\s--\n]+ # whitespace should separate names + /// (?: # middle name can be an initial! + /// (?:(?\p{Uppercase})\.|(?\p{Uppercase}\w*)) + /// [\s--\n]+ + /// )? + /// (?\p{Uppercase}\w*) + /// \b + /// "; + /// let re = RegexBuilder::new(pat) + /// .ignore_whitespace(true) + /// .build() + /// .unwrap(); + /// + /// let caps = re.captures("Harry Potter").unwrap(); + /// assert_eq!("Harry", &caps["first"]); + /// assert_eq!("Potter", &caps["last"]); + /// + /// let caps = re.captures("Harry J. Potter").unwrap(); + /// assert_eq!("Harry", &caps["first"]); + /// // Since a middle name/initial isn't required for an overall match, + /// // we can't assume that 'initial' or 'middle' will be populated! + /// assert_eq!(Some("J"), caps.name("initial").map(|m| m.as_str())); + /// assert_eq!(None, caps.name("middle").map(|m| m.as_str())); + /// assert_eq!("Potter", &caps["last"]); + /// + /// let caps = re.captures("Harry James Potter").unwrap(); + /// assert_eq!("Harry", &caps["first"]); + /// // Since a middle name/initial isn't required for an overall match, + /// // we can't assume that 'initial' or 'middle' will be populated! + /// assert_eq!(None, caps.name("initial").map(|m| m.as_str())); + /// assert_eq!(Some("James"), caps.name("middle").map(|m| m.as_str())); + /// assert_eq!("Potter", &caps["last"]); + /// ``` + pub fn ignore_whitespace(&mut self, yes: bool) -> &mut RegexBuilder { + self.builder.ignore_whitespace(yes); + self + } + + /// This configures octal mode for the entire pattern. + /// + /// Octal syntax is a little-known way of uttering Unicode codepoints + /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all + /// equivalent patterns, where the last example shows octal syntax. + /// + /// While supporting octal syntax isn't in and of itself a problem, + /// it does make good error messages harder. That is, in PCRE based + /// regex engines, syntax like `\1` invokes a backreference, which is + /// explicitly unsupported this library. However, many users expect + /// backreferences to be supported. Therefore, when octal support + /// is disabled, the error message will explicitly mention that + /// backreferences aren't supported. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::RegexBuilder; + /// + /// // Normally this pattern would not compile, with an error message + /// // about backreferences not being supported. But with octal mode + /// // enabled, octal escape sequences work. + /// let re = RegexBuilder::new(r"\141") + /// .octal(true) + /// .build() + /// .unwrap(); + /// assert!(re.is_match("a")); + /// ``` + pub fn octal(&mut self, yes: bool) -> &mut RegexBuilder { + self.builder.octal(yes); + self + } + + /// Sets the approximate size limit, in bytes, of the compiled regex. + /// + /// This roughly corresponds to the number of heap memory, in + /// bytes, occupied by a single regex. If the regex would otherwise + /// approximately exceed this limit, then compiling that regex will + /// fail. + /// + /// The main utility of a method like this is to avoid compiling + /// regexes that use an unexpected amount of resources, such as + /// time and memory. Even if the memory usage of a large regex is + /// acceptable, its search time may not be. Namely, worst case time + /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and + /// `n ~ len(haystack)`. That is, search time depends, in part, on the + /// size of the compiled regex. This means that putting a limit on the + /// size of the regex limits how much a regex can impact search time. + /// + /// For more information about regex size limits, see the section on + /// [untrusted inputs](crate#untrusted-input) in the top-level crate + /// documentation. + /// + /// The default for this is some reasonable number that permits most + /// patterns to compile successfully. + /// + /// # Example + /// + /// ``` + /// # if !cfg!(target_pointer_width = "64") { return; } // see #1041 + /// use regex::RegexBuilder; + /// + /// // It may surprise you how big some seemingly small patterns can + /// // be! Since \w is Unicode aware, this generates a regex that can + /// // match approximately 140,000 distinct codepoints. + /// assert!(RegexBuilder::new(r"\w").size_limit(45_000).build().is_err()); + /// ``` + pub fn size_limit(&mut self, bytes: usize) -> &mut RegexBuilder { + self.builder.size_limit(bytes); + self + } + + /// Set the approximate capacity, in bytes, of the cache of transitions + /// used by the lazy DFA. + /// + /// While the lazy DFA isn't always used, in tends to be the most + /// commonly use regex engine in default configurations. It tends to + /// adopt the performance profile of a fully build DFA, but without the + /// downside of taking worst case exponential time to build. + /// + /// The downside is that it needs to keep a cache of transitions and + /// states that are built while running a search, and this cache + /// can fill up. When it fills up, the cache will reset itself. Any + /// previously generated states and transitions will then need to be + /// re-generated. If this happens too many times, then this library + /// will bail out of using the lazy DFA and switch to a different regex + /// engine. + /// + /// If your regex provokes this particular downside of the lazy DFA, + /// then it may be beneficial to increase its cache capacity. This will + /// potentially reduce the frequency of cache resetting (ideally to + /// `0`). While it won't fix all potential performance problems with + /// the lazy DFA, increasing the cache capacity does fix some. + /// + /// There is no easy way to determine, a priori, whether increasing + /// this cache capacity will help. In general, the larger your regex, + /// the more cache it's likely to use. But that isn't an ironclad rule. + /// For example, a regex like `[01]*1[01]{N}` would normally produce a + /// fully build DFA that is exponential in size with respect to `N`. + /// The lazy DFA will prevent exponential space blow-up, but it cache + /// is likely to fill up, even when it's large and even for smallish + /// values of `N`. + /// + /// If you aren't sure whether this helps or not, it is sensible to + /// set this to some arbitrarily large number in testing, such as + /// `usize::MAX`. Namely, this represents the amount of capacity that + /// *may* be used. It's probably not a good idea to use `usize::MAX` in + /// production though, since it implies there are no controls on heap + /// memory used by this library during a search. In effect, set it to + /// whatever you're willing to allocate for a single regex search. + pub fn dfa_size_limit(&mut self, bytes: usize) -> &mut RegexBuilder { + self.builder.dfa_size_limit(bytes); + self + } + + /// Set the nesting limit for this parser. + /// + /// The nesting limit controls how deep the abstract syntax tree is + /// allowed to be. If the AST exceeds the given limit (e.g., with too + /// many nested groups), then an error is returned by the parser. + /// + /// The purpose of this limit is to act as a heuristic to prevent stack + /// overflow for consumers that do structural induction on an AST using + /// explicit recursion. While this crate never does this (instead using + /// constant stack space and moving the call stack to the heap), other + /// crates may. + /// + /// This limit is not checked until the entire AST is parsed. + /// Therefore, if callers want to put a limit on the amount of heap + /// space used, then they should impose a limit on the length, in + /// bytes, of the concrete pattern string. In particular, this is + /// viable since this parser implementation will limit itself to heap + /// space proportional to the length of the pattern string. See also + /// the [untrusted inputs](crate#untrusted-input) section in the + /// top-level crate documentation for more information about this. + /// + /// Note that a nest limit of `0` will return a nest limit error for + /// most patterns but not all. For example, a nest limit of `0` permits + /// `a` but not `ab`, since `ab` requires an explicit concatenation, + /// which results in a nest depth of `1`. In general, a nest limit is + /// not something that manifests in an obvious way in the concrete + /// syntax, therefore, it should not be used in a granular way. + /// + /// # Example + /// + /// ``` + /// use regex::RegexBuilder; + /// + /// assert!(RegexBuilder::new(r"a").nest_limit(0).build().is_ok()); + /// assert!(RegexBuilder::new(r"ab").nest_limit(0).build().is_err()); + /// ``` + pub fn nest_limit(&mut self, limit: u32) -> &mut RegexBuilder { + self.builder.nest_limit(limit); + self + } + } + + /// A configurable builder for a [`RegexSet`]. + /// + /// This builder can be used to programmatically set flags such as + /// `i` (case insensitive) and `x` (for verbose mode). This builder + /// can also be used to configure things like the line terminator + /// and a size limit on the compiled regular expression. + #[derive(Clone, Debug)] + pub struct RegexSetBuilder { + builder: Builder, + } + + impl RegexSetBuilder { + /// Create a new builder with a default configuration for the given + /// patterns. + /// + /// If the patterns are invalid or exceed the configured size limits, + /// then an error will be returned when [`RegexSetBuilder::build`] is + /// called. + pub fn new(patterns: I) -> RegexSetBuilder + where + I: IntoIterator, + S: AsRef, + { + RegexSetBuilder { builder: Builder::new(patterns) } + } + + /// Compiles the patterns given to `RegexSetBuilder::new` with the + /// configuration set on this builder. + /// + /// If the patterns aren't valid regexes or if a configured size limit + /// was exceeded, then an error is returned. + pub fn build(&self) -> Result { + self.builder.build_many_string() + } + + /// This configures Unicode mode for the all of the patterns. + /// + /// Enabling Unicode mode does a number of things: + /// + /// * Most fundamentally, it causes the fundamental atom of matching + /// to be a single codepoint. When Unicode mode is disabled, it's a + /// single byte. For example, when Unicode mode is enabled, `.` will + /// match `💩` once, where as it will match 4 times when Unicode mode + /// is disabled. (Since the UTF-8 encoding of `💩` is 4 bytes long.) + /// * Case insensitive matching uses Unicode simple case folding rules. + /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are + /// available. + /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and + /// `\d`. + /// * The word boundary assertions, `\b` and `\B`, use the Unicode + /// definition of a word character. + /// + /// Note that if Unicode mode is disabled, then the regex will fail to + /// compile if it could match invalid UTF-8. For example, when Unicode + /// mode is disabled, then since `.` matches any byte (except for + /// `\n`), then it can match invalid UTF-8 and thus building a regex + /// from it will fail. Another example is `\w` and `\W`. Since `\w` can + /// only match ASCII bytes when Unicode mode is disabled, it's allowed. + /// But `\W` can match more than ASCII bytes, including invalid UTF-8, + /// and so it is not allowed. This restriction can be lifted only by + /// using a [`bytes::RegexSet`](crate::bytes::RegexSet). + /// + /// For more details on the Unicode support in this crate, see the + /// [Unicode section](crate#unicode) in this crate's top-level + /// documentation. + /// + /// The default for this is `true`. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"\w"]) + /// .unicode(false) + /// .build() + /// .unwrap(); + /// // Normally greek letters would be included in \w, but since + /// // Unicode mode is disabled, it only matches ASCII letters. + /// assert!(!re.is_match("δ")); + /// + /// let re = RegexSetBuilder::new([r"s"]) + /// .case_insensitive(true) + /// .unicode(false) + /// .build() + /// .unwrap(); + /// // Normally 'ſ' is included when searching for 's' case + /// // insensitively due to Unicode's simple case folding rules. But + /// // when Unicode mode is disabled, only ASCII case insensitive rules + /// // are used. + /// assert!(!re.is_match("ſ")); + /// ``` + pub fn unicode(&mut self, yes: bool) -> &mut RegexSetBuilder { + self.builder.unicode(yes); + self + } + + /// This configures whether to enable case insensitive matching for all + /// of the patterns. + /// + /// This setting can also be configured using the inline flag `i` + /// in the pattern. For example, `(?i:foo)` matches `foo` case + /// insensitively while `(?-i:foo)` matches `foo` case sensitively. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"foo(?-i:bar)quux"]) + /// .case_insensitive(true) + /// .build() + /// .unwrap(); + /// assert!(re.is_match("FoObarQuUx")); + /// // Even though case insensitive matching is enabled in the builder, + /// // it can be locally disabled within the pattern. In this case, + /// // `bar` is matched case sensitively. + /// assert!(!re.is_match("fooBARquux")); + /// ``` + pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexSetBuilder { + self.builder.case_insensitive(yes); + self + } + + /// This configures multi-line mode for all of the patterns. + /// + /// Enabling multi-line mode changes the behavior of the `^` and `$` + /// anchor assertions. Instead of only matching at the beginning and + /// end of a haystack, respectively, multi-line mode causes them to + /// match at the beginning and end of a line *in addition* to the + /// beginning and end of a haystack. More precisely, `^` will match at + /// the position immediately following a `\n` and `$` will match at the + /// position immediately preceding a `\n`. + /// + /// The behavior of this option can be impacted by other settings too: + /// + /// * The [`RegexSetBuilder::line_terminator`] option changes `\n` + /// above to any ASCII byte. + /// * The [`RegexSetBuilder::crlf`] option changes the line terminator + /// to be either `\r` or `\n`, but never at the position between a `\r` + /// and `\n`. + /// + /// This setting can also be configured using the inline flag `m` in + /// the pattern. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"^foo$"]) + /// .multi_line(true) + /// .build() + /// .unwrap(); + /// assert!(re.is_match("\nfoo\n")); + /// ``` + pub fn multi_line(&mut self, yes: bool) -> &mut RegexSetBuilder { + self.builder.multi_line(yes); + self + } + + /// This configures dot-matches-new-line mode for the entire pattern. + /// + /// Perhaps surprisingly, the default behavior for `.` is not to match + /// any character, but rather, to match any character except for the + /// line terminator (which is `\n` by default). When this mode is + /// enabled, the behavior changes such that `.` truly matches any + /// character. + /// + /// This setting can also be configured using the inline flag `s` in + /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent + /// regexes. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"foo.bar"]) + /// .dot_matches_new_line(true) + /// .build() + /// .unwrap(); + /// let hay = "foo\nbar"; + /// assert!(re.is_match(hay)); + /// ``` + pub fn dot_matches_new_line( + &mut self, + yes: bool, + ) -> &mut RegexSetBuilder { + self.builder.dot_matches_new_line(yes); + self + } + + /// This configures CRLF mode for all of the patterns. + /// + /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for + /// short) and `\n` ("line feed" or LF for short) are treated as line + /// terminators. This results in the following: + /// + /// * Unless dot-matches-new-line mode is enabled, `.` will now match + /// any character except for `\n` and `\r`. + /// * When multi-line mode is enabled, `^` will match immediately + /// following a `\n` or a `\r`. Similarly, `$` will match immediately + /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match + /// between `\r` and `\n`. + /// + /// This setting can also be configured using the inline flag `R` in + /// the pattern. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"^foo$"]) + /// .multi_line(true) + /// .crlf(true) + /// .build() + /// .unwrap(); + /// let hay = "\r\nfoo\r\n"; + /// // If CRLF mode weren't enabled here, then '$' wouldn't match + /// // immediately after 'foo', and thus no match would be found. + /// assert!(re.is_match(hay)); + /// ``` + /// + /// This example demonstrates that `^` will never match at a position + /// between `\r` and `\n`. (`$` will similarly not match between a `\r` + /// and a `\n`.) + /// + /// ``` + /// use regex::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"^\n"]) + /// .multi_line(true) + /// .crlf(true) + /// .build() + /// .unwrap(); + /// assert!(!re.is_match("\r\n")); + /// ``` + pub fn crlf(&mut self, yes: bool) -> &mut RegexSetBuilder { + self.builder.crlf(yes); + self + } + + /// Configures the line terminator to be used by the regex. + /// + /// The line terminator is relevant in two ways for a particular regex: + /// + /// * When dot-matches-new-line mode is *not* enabled (the default), + /// then `.` will match any character except for the configured line + /// terminator. + /// * When multi-line mode is enabled (not the default), then `^` and + /// `$` will match immediately after and before, respectively, a line + /// terminator. + /// + /// In both cases, if CRLF mode is enabled in a particular context, + /// then it takes precedence over any configured line terminator. + /// + /// This option cannot be configured from within the pattern. + /// + /// The default line terminator is `\n`. + /// + /// # Example + /// + /// This shows how to treat the NUL byte as a line terminator. This can + /// be a useful heuristic when searching binary data. + /// + /// ``` + /// use regex::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"^foo$"]) + /// .multi_line(true) + /// .line_terminator(b'\x00') + /// .build() + /// .unwrap(); + /// let hay = "\x00foo\x00"; + /// assert!(re.is_match(hay)); + /// ``` + /// + /// This example shows that the behavior of `.` is impacted by this + /// setting as well: + /// + /// ``` + /// use regex::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"."]) + /// .line_terminator(b'\x00') + /// .build() + /// .unwrap(); + /// assert!(re.is_match("\n")); + /// assert!(!re.is_match("\x00")); + /// ``` + /// + /// This shows that building a regex will fail if the byte given + /// is not ASCII and the pattern could result in matching invalid + /// UTF-8. This is because any singular non-ASCII byte is not valid + /// UTF-8, and it is not permitted for a [`RegexSet`] to match invalid + /// UTF-8. (It is permissible to use a non-ASCII byte when building a + /// [`bytes::RegexSet`](crate::bytes::RegexSet).) + /// + /// ``` + /// use regex::RegexSetBuilder; + /// + /// assert!( + /// RegexSetBuilder::new([r"."]) + /// .line_terminator(0x80) + /// .build() + /// .is_err() + /// ); + /// // Note that using a non-ASCII byte isn't enough on its own to + /// // cause regex compilation to fail. You actually have to make use + /// // of it in the regex in a way that leads to matching invalid + /// // UTF-8. If you don't, then regex compilation will succeed! + /// assert!( + /// RegexSetBuilder::new([r"a"]) + /// .line_terminator(0x80) + /// .build() + /// .is_ok() + /// ); + /// ``` + pub fn line_terminator(&mut self, byte: u8) -> &mut RegexSetBuilder { + self.builder.line_terminator(byte); + self + } + + /// This configures swap-greed mode for all of the patterns. + /// + /// When swap-greed mode is enabled, patterns like `a+` will become + /// non-greedy and patterns like `a+?` will become greedy. In other + /// words, the meanings of `a+` and `a+?` are switched. + /// + /// This setting can also be configured using the inline flag `U` in + /// the pattern. + /// + /// Note that this is generally not useful for a `RegexSet` since a + /// `RegexSet` can only report whether a pattern matches or not. Since + /// greediness never impacts whether a match is found or not (only the + /// offsets of the match), it follows that whether parts of a pattern + /// are greedy or not doesn't matter for a `RegexSet`. + /// + /// The default for this is `false`. + pub fn swap_greed(&mut self, yes: bool) -> &mut RegexSetBuilder { + self.builder.swap_greed(yes); + self + } + + /// This configures verbose mode for all of the patterns. + /// + /// When enabled, whitespace will treated as insignificant in the + /// pattern and `#` can be used to start a comment until the next new + /// line. + /// + /// Normally, in most places in a pattern, whitespace is treated + /// literally. For example ` +` will match one or more ASCII whitespace + /// characters. + /// + /// When verbose mode is enabled, `\#` can be used to match a literal + /// `#` and `\ ` can be used to match a literal ASCII whitespace + /// character. + /// + /// Verbose mode is useful for permitting regexes to be formatted and + /// broken up more nicely. This may make them more easily readable. + /// + /// This setting can also be configured using the inline flag `x` in + /// the pattern. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSetBuilder; + /// + /// let pat = r" + /// \b + /// (?\p{Uppercase}\w*) # always start with uppercase letter + /// [\s--\n]+ # whitespace should separate names + /// (?: # middle name can be an initial! + /// (?:(?\p{Uppercase})\.|(?\p{Uppercase}\w*)) + /// [\s--\n]+ + /// )? + /// (?\p{Uppercase}\w*) + /// \b + /// "; + /// let re = RegexSetBuilder::new([pat]) + /// .ignore_whitespace(true) + /// .build() + /// .unwrap(); + /// assert!(re.is_match("Harry Potter")); + /// assert!(re.is_match("Harry J. Potter")); + /// assert!(re.is_match("Harry James Potter")); + /// assert!(!re.is_match("harry J. Potter")); + /// ``` + pub fn ignore_whitespace( + &mut self, + yes: bool, + ) -> &mut RegexSetBuilder { + self.builder.ignore_whitespace(yes); + self + } + + /// This configures octal mode for all of the patterns. + /// + /// Octal syntax is a little-known way of uttering Unicode codepoints + /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all + /// equivalent patterns, where the last example shows octal syntax. + /// + /// While supporting octal syntax isn't in and of itself a problem, + /// it does make good error messages harder. That is, in PCRE based + /// regex engines, syntax like `\1` invokes a backreference, which is + /// explicitly unsupported this library. However, many users expect + /// backreferences to be supported. Therefore, when octal support + /// is disabled, the error message will explicitly mention that + /// backreferences aren't supported. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSetBuilder; + /// + /// // Normally this pattern would not compile, with an error message + /// // about backreferences not being supported. But with octal mode + /// // enabled, octal escape sequences work. + /// let re = RegexSetBuilder::new([r"\141"]) + /// .octal(true) + /// .build() + /// .unwrap(); + /// assert!(re.is_match("a")); + /// ``` + pub fn octal(&mut self, yes: bool) -> &mut RegexSetBuilder { + self.builder.octal(yes); + self + } + + /// Sets the approximate size limit, in bytes, of the compiled regex. + /// + /// This roughly corresponds to the number of heap memory, in + /// bytes, occupied by a single regex. If the regex would otherwise + /// approximately exceed this limit, then compiling that regex will + /// fail. + /// + /// The main utility of a method like this is to avoid compiling + /// regexes that use an unexpected amount of resources, such as + /// time and memory. Even if the memory usage of a large regex is + /// acceptable, its search time may not be. Namely, worst case time + /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and + /// `n ~ len(haystack)`. That is, search time depends, in part, on the + /// size of the compiled regex. This means that putting a limit on the + /// size of the regex limits how much a regex can impact search time. + /// + /// For more information about regex size limits, see the section on + /// [untrusted inputs](crate#untrusted-input) in the top-level crate + /// documentation. + /// + /// The default for this is some reasonable number that permits most + /// patterns to compile successfully. + /// + /// # Example + /// + /// ``` + /// # if !cfg!(target_pointer_width = "64") { return; } // see #1041 + /// use regex::RegexSetBuilder; + /// + /// // It may surprise you how big some seemingly small patterns can + /// // be! Since \w is Unicode aware, this generates a regex that can + /// // match approximately 140,000 distinct codepoints. + /// assert!( + /// RegexSetBuilder::new([r"\w"]) + /// .size_limit(45_000) + /// .build() + /// .is_err() + /// ); + /// ``` + pub fn size_limit(&mut self, bytes: usize) -> &mut RegexSetBuilder { + self.builder.size_limit(bytes); + self + } + + /// Set the approximate capacity, in bytes, of the cache of transitions + /// used by the lazy DFA. + /// + /// While the lazy DFA isn't always used, in tends to be the most + /// commonly use regex engine in default configurations. It tends to + /// adopt the performance profile of a fully build DFA, but without the + /// downside of taking worst case exponential time to build. + /// + /// The downside is that it needs to keep a cache of transitions and + /// states that are built while running a search, and this cache + /// can fill up. When it fills up, the cache will reset itself. Any + /// previously generated states and transitions will then need to be + /// re-generated. If this happens too many times, then this library + /// will bail out of using the lazy DFA and switch to a different regex + /// engine. + /// + /// If your regex provokes this particular downside of the lazy DFA, + /// then it may be beneficial to increase its cache capacity. This will + /// potentially reduce the frequency of cache resetting (ideally to + /// `0`). While it won't fix all potential performance problems with + /// the lazy DFA, increasing the cache capacity does fix some. + /// + /// There is no easy way to determine, a priori, whether increasing + /// this cache capacity will help. In general, the larger your regex, + /// the more cache it's likely to use. But that isn't an ironclad rule. + /// For example, a regex like `[01]*1[01]{N}` would normally produce a + /// fully build DFA that is exponential in size with respect to `N`. + /// The lazy DFA will prevent exponential space blow-up, but it cache + /// is likely to fill up, even when it's large and even for smallish + /// values of `N`. + /// + /// If you aren't sure whether this helps or not, it is sensible to + /// set this to some arbitrarily large number in testing, such as + /// `usize::MAX`. Namely, this represents the amount of capacity that + /// *may* be used. It's probably not a good idea to use `usize::MAX` in + /// production though, since it implies there are no controls on heap + /// memory used by this library during a search. In effect, set it to + /// whatever you're willing to allocate for a single regex search. + pub fn dfa_size_limit( + &mut self, + bytes: usize, + ) -> &mut RegexSetBuilder { + self.builder.dfa_size_limit(bytes); + self + } + + /// Set the nesting limit for this parser. + /// + /// The nesting limit controls how deep the abstract syntax tree is + /// allowed to be. If the AST exceeds the given limit (e.g., with too + /// many nested groups), then an error is returned by the parser. + /// + /// The purpose of this limit is to act as a heuristic to prevent stack + /// overflow for consumers that do structural induction on an AST using + /// explicit recursion. While this crate never does this (instead using + /// constant stack space and moving the call stack to the heap), other + /// crates may. + /// + /// This limit is not checked until the entire AST is parsed. + /// Therefore, if callers want to put a limit on the amount of heap + /// space used, then they should impose a limit on the length, in + /// bytes, of the concrete pattern string. In particular, this is + /// viable since this parser implementation will limit itself to heap + /// space proportional to the length of the pattern string. See also + /// the [untrusted inputs](crate#untrusted-input) section in the + /// top-level crate documentation for more information about this. + /// + /// Note that a nest limit of `0` will return a nest limit error for + /// most patterns but not all. For example, a nest limit of `0` permits + /// `a` but not `ab`, since `ab` requires an explicit concatenation, + /// which results in a nest depth of `1`. In general, a nest limit is + /// not something that manifests in an obvious way in the concrete + /// syntax, therefore, it should not be used in a granular way. + /// + /// # Example + /// + /// ``` + /// use regex::RegexSetBuilder; + /// + /// assert!(RegexSetBuilder::new([r"a"]).nest_limit(0).build().is_ok()); + /// assert!(RegexSetBuilder::new([r"ab"]).nest_limit(0).build().is_err()); + /// ``` + pub fn nest_limit(&mut self, limit: u32) -> &mut RegexSetBuilder { + self.builder.nest_limit(limit); + self + } + } +} + +pub(crate) mod bytes { + use crate::{ + bytes::{Regex, RegexSet}, + error::Error, + }; + + use super::Builder; + + /// A configurable builder for a [`Regex`]. + /// + /// This builder can be used to programmatically set flags such as `i` + /// (case insensitive) and `x` (for verbose mode). This builder can also be + /// used to configure things like the line terminator and a size limit on + /// the compiled regular expression. + #[derive(Clone, Debug)] + pub struct RegexBuilder { + builder: Builder, + } + + impl RegexBuilder { + /// Create a new builder with a default configuration for the given + /// pattern. + /// + /// If the pattern is invalid or exceeds the configured size limits, + /// then an error will be returned when [`RegexBuilder::build`] is + /// called. + pub fn new(pattern: &str) -> RegexBuilder { + RegexBuilder { builder: Builder::new([pattern]) } + } + + /// Compiles the pattern given to `RegexBuilder::new` with the + /// configuration set on this builder. + /// + /// If the pattern isn't a valid regex or if a configured size limit + /// was exceeded, then an error is returned. + pub fn build(&self) -> Result { + self.builder.build_one_bytes() + } + + /// This configures Unicode mode for the entire pattern. + /// + /// Enabling Unicode mode does a number of things: + /// + /// * Most fundamentally, it causes the fundamental atom of matching + /// to be a single codepoint. When Unicode mode is disabled, it's a + /// single byte. For example, when Unicode mode is enabled, `.` will + /// match `💩` once, where as it will match 4 times when Unicode mode + /// is disabled. (Since the UTF-8 encoding of `💩` is 4 bytes long.) + /// * Case insensitive matching uses Unicode simple case folding rules. + /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are + /// available. + /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and + /// `\d`. + /// * The word boundary assertions, `\b` and `\B`, use the Unicode + /// definition of a word character. + /// + /// Note that unlike the top-level `Regex` for searching `&str`, it + /// is permitted to disable Unicode mode even if the resulting pattern + /// could match invalid UTF-8. For example, `(?-u:.)` is not a valid + /// pattern for a top-level `Regex`, but is valid for a `bytes::Regex`. + /// + /// For more details on the Unicode support in this crate, see the + /// [Unicode section](crate#unicode) in this crate's top-level + /// documentation. + /// + /// The default for this is `true`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"\w") + /// .unicode(false) + /// .build() + /// .unwrap(); + /// // Normally greek letters would be included in \w, but since + /// // Unicode mode is disabled, it only matches ASCII letters. + /// assert!(!re.is_match("δ".as_bytes())); + /// + /// let re = RegexBuilder::new(r"s") + /// .case_insensitive(true) + /// .unicode(false) + /// .build() + /// .unwrap(); + /// // Normally 'ſ' is included when searching for 's' case + /// // insensitively due to Unicode's simple case folding rules. But + /// // when Unicode mode is disabled, only ASCII case insensitive rules + /// // are used. + /// assert!(!re.is_match("ſ".as_bytes())); + /// ``` + /// + /// Since this builder is for constructing a [`bytes::Regex`](Regex), + /// one can disable Unicode mode even if it would match invalid UTF-8: + /// + /// ``` + /// use regex::bytes::RegexBuilder; + /// + /// let re = RegexBuilder::new(r".") + /// .unicode(false) + /// .build() + /// .unwrap(); + /// // Normally greek letters would be included in \w, but since + /// // Unicode mode is disabled, it only matches ASCII letters. + /// assert!(re.is_match(b"\xFF")); + /// ``` + pub fn unicode(&mut self, yes: bool) -> &mut RegexBuilder { + self.builder.unicode(yes); + self + } + + /// This configures whether to enable case insensitive matching for the + /// entire pattern. + /// + /// This setting can also be configured using the inline flag `i` + /// in the pattern. For example, `(?i:foo)` matches `foo` case + /// insensitively while `(?-i:foo)` matches `foo` case sensitively. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"foo(?-i:bar)quux") + /// .case_insensitive(true) + /// .build() + /// .unwrap(); + /// assert!(re.is_match(b"FoObarQuUx")); + /// // Even though case insensitive matching is enabled in the builder, + /// // it can be locally disabled within the pattern. In this case, + /// // `bar` is matched case sensitively. + /// assert!(!re.is_match(b"fooBARquux")); + /// ``` + pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexBuilder { + self.builder.case_insensitive(yes); + self + } + + /// This configures multi-line mode for the entire pattern. + /// + /// Enabling multi-line mode changes the behavior of the `^` and `$` + /// anchor assertions. Instead of only matching at the beginning and + /// end of a haystack, respectively, multi-line mode causes them to + /// match at the beginning and end of a line *in addition* to the + /// beginning and end of a haystack. More precisely, `^` will match at + /// the position immediately following a `\n` and `$` will match at the + /// position immediately preceding a `\n`. + /// + /// The behavior of this option can be impacted by other settings too: + /// + /// * The [`RegexBuilder::line_terminator`] option changes `\n` above + /// to any ASCII byte. + /// * The [`RegexBuilder::crlf`] option changes the line terminator to + /// be either `\r` or `\n`, but never at the position between a `\r` + /// and `\n`. + /// + /// This setting can also be configured using the inline flag `m` in + /// the pattern. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"^foo$") + /// .multi_line(true) + /// .build() + /// .unwrap(); + /// assert_eq!(Some(1..4), re.find(b"\nfoo\n").map(|m| m.range())); + /// ``` + pub fn multi_line(&mut self, yes: bool) -> &mut RegexBuilder { + self.builder.multi_line(yes); + self + } + + /// This configures dot-matches-new-line mode for the entire pattern. + /// + /// Perhaps surprisingly, the default behavior for `.` is not to match + /// any character, but rather, to match any character except for the + /// line terminator (which is `\n` by default). When this mode is + /// enabled, the behavior changes such that `.` truly matches any + /// character. + /// + /// This setting can also be configured using the inline flag `s` in + /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent + /// regexes. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"foo.bar") + /// .dot_matches_new_line(true) + /// .build() + /// .unwrap(); + /// let hay = b"foo\nbar"; + /// assert_eq!(Some(&b"foo\nbar"[..]), re.find(hay).map(|m| m.as_bytes())); + /// ``` + pub fn dot_matches_new_line( + &mut self, + yes: bool, + ) -> &mut RegexBuilder { + self.builder.dot_matches_new_line(yes); + self + } + + /// This configures CRLF mode for the entire pattern. + /// + /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for + /// short) and `\n` ("line feed" or LF for short) are treated as line + /// terminators. This results in the following: + /// + /// * Unless dot-matches-new-line mode is enabled, `.` will now match + /// any character except for `\n` and `\r`. + /// * When multi-line mode is enabled, `^` will match immediately + /// following a `\n` or a `\r`. Similarly, `$` will match immediately + /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match + /// between `\r` and `\n`. + /// + /// This setting can also be configured using the inline flag `R` in + /// the pattern. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"^foo$") + /// .multi_line(true) + /// .crlf(true) + /// .build() + /// .unwrap(); + /// let hay = b"\r\nfoo\r\n"; + /// // If CRLF mode weren't enabled here, then '$' wouldn't match + /// // immediately after 'foo', and thus no match would be found. + /// assert_eq!(Some(&b"foo"[..]), re.find(hay).map(|m| m.as_bytes())); + /// ``` + /// + /// This example demonstrates that `^` will never match at a position + /// between `\r` and `\n`. (`$` will similarly not match between a `\r` + /// and a `\n`.) + /// + /// ``` + /// use regex::bytes::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"^") + /// .multi_line(true) + /// .crlf(true) + /// .build() + /// .unwrap(); + /// let hay = b"\r\n\r\n"; + /// let ranges: Vec<_> = re.find_iter(hay).map(|m| m.range()).collect(); + /// assert_eq!(ranges, vec![0..0, 2..2, 4..4]); + /// ``` + pub fn crlf(&mut self, yes: bool) -> &mut RegexBuilder { + self.builder.crlf(yes); + self + } + + /// Configures the line terminator to be used by the regex. + /// + /// The line terminator is relevant in two ways for a particular regex: + /// + /// * When dot-matches-new-line mode is *not* enabled (the default), + /// then `.` will match any character except for the configured line + /// terminator. + /// * When multi-line mode is enabled (not the default), then `^` and + /// `$` will match immediately after and before, respectively, a line + /// terminator. + /// + /// In both cases, if CRLF mode is enabled in a particular context, + /// then it takes precedence over any configured line terminator. + /// + /// This option cannot be configured from within the pattern. + /// + /// The default line terminator is `\n`. + /// + /// # Example + /// + /// This shows how to treat the NUL byte as a line terminator. This can + /// be a useful heuristic when searching binary data. + /// + /// ``` + /// use regex::bytes::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"^foo$") + /// .multi_line(true) + /// .line_terminator(b'\x00') + /// .build() + /// .unwrap(); + /// let hay = b"\x00foo\x00"; + /// assert_eq!(Some(1..4), re.find(hay).map(|m| m.range())); + /// ``` + /// + /// This example shows that the behavior of `.` is impacted by this + /// setting as well: + /// + /// ``` + /// use regex::bytes::RegexBuilder; + /// + /// let re = RegexBuilder::new(r".") + /// .line_terminator(b'\x00') + /// .build() + /// .unwrap(); + /// assert!(re.is_match(b"\n")); + /// assert!(!re.is_match(b"\x00")); + /// ``` + /// + /// This shows that building a regex will work even when the byte + /// given is not ASCII. This is unlike the top-level `Regex` API where + /// matching invalid UTF-8 is not allowed. + /// + /// Note though that you must disable Unicode mode. This is required + /// because Unicode mode requires matching one codepoint at a time, + /// and there is no way to match a non-ASCII byte as if it were a + /// codepoint. + /// + /// ``` + /// use regex::bytes::RegexBuilder; + /// + /// assert!( + /// RegexBuilder::new(r".") + /// .unicode(false) + /// .line_terminator(0x80) + /// .build() + /// .is_ok(), + /// ); + /// ``` + pub fn line_terminator(&mut self, byte: u8) -> &mut RegexBuilder { + self.builder.line_terminator(byte); + self + } + + /// This configures swap-greed mode for the entire pattern. + /// + /// When swap-greed mode is enabled, patterns like `a+` will become + /// non-greedy and patterns like `a+?` will become greedy. In other + /// words, the meanings of `a+` and `a+?` are switched. + /// + /// This setting can also be configured using the inline flag `U` in + /// the pattern. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexBuilder; + /// + /// let re = RegexBuilder::new(r"a+") + /// .swap_greed(true) + /// .build() + /// .unwrap(); + /// assert_eq!(Some(&b"a"[..]), re.find(b"aaa").map(|m| m.as_bytes())); + /// ``` + pub fn swap_greed(&mut self, yes: bool) -> &mut RegexBuilder { + self.builder.swap_greed(yes); + self + } + + /// This configures verbose mode for the entire pattern. + /// + /// When enabled, whitespace will treated as insignificant in the + /// pattern and `#` can be used to start a comment until the next new + /// line. + /// + /// Normally, in most places in a pattern, whitespace is treated + /// literally. For example ` +` will match one or more ASCII whitespace + /// characters. + /// + /// When verbose mode is enabled, `\#` can be used to match a literal + /// `#` and `\ ` can be used to match a literal ASCII whitespace + /// character. + /// + /// Verbose mode is useful for permitting regexes to be formatted and + /// broken up more nicely. This may make them more easily readable. + /// + /// This setting can also be configured using the inline flag `x` in + /// the pattern. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexBuilder; + /// + /// let pat = r" + /// \b + /// (?\p{Uppercase}\w*) # always start with uppercase letter + /// [\s--\n]+ # whitespace should separate names + /// (?: # middle name can be an initial! + /// (?:(?\p{Uppercase})\.|(?\p{Uppercase}\w*)) + /// [\s--\n]+ + /// )? + /// (?\p{Uppercase}\w*) + /// \b + /// "; + /// let re = RegexBuilder::new(pat) + /// .ignore_whitespace(true) + /// .build() + /// .unwrap(); + /// + /// let caps = re.captures(b"Harry Potter").unwrap(); + /// assert_eq!(&b"Harry"[..], &caps["first"]); + /// assert_eq!(&b"Potter"[..], &caps["last"]); + /// + /// let caps = re.captures(b"Harry J. Potter").unwrap(); + /// assert_eq!(&b"Harry"[..], &caps["first"]); + /// // Since a middle name/initial isn't required for an overall match, + /// // we can't assume that 'initial' or 'middle' will be populated! + /// assert_eq!( + /// Some(&b"J"[..]), + /// caps.name("initial").map(|m| m.as_bytes()), + /// ); + /// assert_eq!(None, caps.name("middle").map(|m| m.as_bytes())); + /// assert_eq!(&b"Potter"[..], &caps["last"]); + /// + /// let caps = re.captures(b"Harry James Potter").unwrap(); + /// assert_eq!(&b"Harry"[..], &caps["first"]); + /// // Since a middle name/initial isn't required for an overall match, + /// // we can't assume that 'initial' or 'middle' will be populated! + /// assert_eq!(None, caps.name("initial").map(|m| m.as_bytes())); + /// assert_eq!( + /// Some(&b"James"[..]), + /// caps.name("middle").map(|m| m.as_bytes()), + /// ); + /// assert_eq!(&b"Potter"[..], &caps["last"]); + /// ``` + pub fn ignore_whitespace(&mut self, yes: bool) -> &mut RegexBuilder { + self.builder.ignore_whitespace(yes); + self + } + + /// This configures octal mode for the entire pattern. + /// + /// Octal syntax is a little-known way of uttering Unicode codepoints + /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all + /// equivalent patterns, where the last example shows octal syntax. + /// + /// While supporting octal syntax isn't in and of itself a problem, + /// it does make good error messages harder. That is, in PCRE based + /// regex engines, syntax like `\1` invokes a backreference, which is + /// explicitly unsupported this library. However, many users expect + /// backreferences to be supported. Therefore, when octal support + /// is disabled, the error message will explicitly mention that + /// backreferences aren't supported. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexBuilder; + /// + /// // Normally this pattern would not compile, with an error message + /// // about backreferences not being supported. But with octal mode + /// // enabled, octal escape sequences work. + /// let re = RegexBuilder::new(r"\141") + /// .octal(true) + /// .build() + /// .unwrap(); + /// assert!(re.is_match(b"a")); + /// ``` + pub fn octal(&mut self, yes: bool) -> &mut RegexBuilder { + self.builder.octal(yes); + self + } + + /// Sets the approximate size limit, in bytes, of the compiled regex. + /// + /// This roughly corresponds to the number of heap memory, in + /// bytes, occupied by a single regex. If the regex would otherwise + /// approximately exceed this limit, then compiling that regex will + /// fail. + /// + /// The main utility of a method like this is to avoid compiling + /// regexes that use an unexpected amount of resources, such as + /// time and memory. Even if the memory usage of a large regex is + /// acceptable, its search time may not be. Namely, worst case time + /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and + /// `n ~ len(haystack)`. That is, search time depends, in part, on the + /// size of the compiled regex. This means that putting a limit on the + /// size of the regex limits how much a regex can impact search time. + /// + /// For more information about regex size limits, see the section on + /// [untrusted inputs](crate#untrusted-input) in the top-level crate + /// documentation. + /// + /// The default for this is some reasonable number that permits most + /// patterns to compile successfully. + /// + /// # Example + /// + /// ``` + /// # if !cfg!(target_pointer_width = "64") { return; } // see #1041 + /// use regex::bytes::RegexBuilder; + /// + /// // It may surprise you how big some seemingly small patterns can + /// // be! Since \w is Unicode aware, this generates a regex that can + /// // match approximately 140,000 distinct codepoints. + /// assert!(RegexBuilder::new(r"\w").size_limit(45_000).build().is_err()); + /// ``` + pub fn size_limit(&mut self, bytes: usize) -> &mut RegexBuilder { + self.builder.size_limit(bytes); + self + } + + /// Set the approximate capacity, in bytes, of the cache of transitions + /// used by the lazy DFA. + /// + /// While the lazy DFA isn't always used, in tends to be the most + /// commonly use regex engine in default configurations. It tends to + /// adopt the performance profile of a fully build DFA, but without the + /// downside of taking worst case exponential time to build. + /// + /// The downside is that it needs to keep a cache of transitions and + /// states that are built while running a search, and this cache + /// can fill up. When it fills up, the cache will reset itself. Any + /// previously generated states and transitions will then need to be + /// re-generated. If this happens too many times, then this library + /// will bail out of using the lazy DFA and switch to a different regex + /// engine. + /// + /// If your regex provokes this particular downside of the lazy DFA, + /// then it may be beneficial to increase its cache capacity. This will + /// potentially reduce the frequency of cache resetting (ideally to + /// `0`). While it won't fix all potential performance problems with + /// the lazy DFA, increasing the cache capacity does fix some. + /// + /// There is no easy way to determine, a priori, whether increasing + /// this cache capacity will help. In general, the larger your regex, + /// the more cache it's likely to use. But that isn't an ironclad rule. + /// For example, a regex like `[01]*1[01]{N}` would normally produce a + /// fully build DFA that is exponential in size with respect to `N`. + /// The lazy DFA will prevent exponential space blow-up, but it cache + /// is likely to fill up, even when it's large and even for smallish + /// values of `N`. + /// + /// If you aren't sure whether this helps or not, it is sensible to + /// set this to some arbitrarily large number in testing, such as + /// `usize::MAX`. Namely, this represents the amount of capacity that + /// *may* be used. It's probably not a good idea to use `usize::MAX` in + /// production though, since it implies there are no controls on heap + /// memory used by this library during a search. In effect, set it to + /// whatever you're willing to allocate for a single regex search. + pub fn dfa_size_limit(&mut self, bytes: usize) -> &mut RegexBuilder { + self.builder.dfa_size_limit(bytes); + self + } + + /// Set the nesting limit for this parser. + /// + /// The nesting limit controls how deep the abstract syntax tree is + /// allowed to be. If the AST exceeds the given limit (e.g., with too + /// many nested groups), then an error is returned by the parser. + /// + /// The purpose of this limit is to act as a heuristic to prevent stack + /// overflow for consumers that do structural induction on an AST using + /// explicit recursion. While this crate never does this (instead using + /// constant stack space and moving the call stack to the heap), other + /// crates may. + /// + /// This limit is not checked until the entire AST is parsed. + /// Therefore, if callers want to put a limit on the amount of heap + /// space used, then they should impose a limit on the length, in + /// bytes, of the concrete pattern string. In particular, this is + /// viable since this parser implementation will limit itself to heap + /// space proportional to the length of the pattern string. See also + /// the [untrusted inputs](crate#untrusted-input) section in the + /// top-level crate documentation for more information about this. + /// + /// Note that a nest limit of `0` will return a nest limit error for + /// most patterns but not all. For example, a nest limit of `0` permits + /// `a` but not `ab`, since `ab` requires an explicit concatenation, + /// which results in a nest depth of `1`. In general, a nest limit is + /// not something that manifests in an obvious way in the concrete + /// syntax, therefore, it should not be used in a granular way. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexBuilder; + /// + /// assert!(RegexBuilder::new(r"a").nest_limit(0).build().is_ok()); + /// assert!(RegexBuilder::new(r"ab").nest_limit(0).build().is_err()); + /// ``` + pub fn nest_limit(&mut self, limit: u32) -> &mut RegexBuilder { + self.builder.nest_limit(limit); + self + } + } + + /// A configurable builder for a [`RegexSet`]. + /// + /// This builder can be used to programmatically set flags such as `i` + /// (case insensitive) and `x` (for verbose mode). This builder can also be + /// used to configure things like the line terminator and a size limit on + /// the compiled regular expression. + #[derive(Clone, Debug)] + pub struct RegexSetBuilder { + builder: Builder, + } + + impl RegexSetBuilder { + /// Create a new builder with a default configuration for the given + /// patterns. + /// + /// If the patterns are invalid or exceed the configured size limits, + /// then an error will be returned when [`RegexSetBuilder::build`] is + /// called. + pub fn new(patterns: I) -> RegexSetBuilder + where + I: IntoIterator, + S: AsRef, + { + RegexSetBuilder { builder: Builder::new(patterns) } + } + + /// Compiles the patterns given to `RegexSetBuilder::new` with the + /// configuration set on this builder. + /// + /// If the patterns aren't valid regexes or if a configured size limit + /// was exceeded, then an error is returned. + pub fn build(&self) -> Result { + self.builder.build_many_bytes() + } + + /// This configures Unicode mode for the all of the patterns. + /// + /// Enabling Unicode mode does a number of things: + /// + /// * Most fundamentally, it causes the fundamental atom of matching + /// to be a single codepoint. When Unicode mode is disabled, it's a + /// single byte. For example, when Unicode mode is enabled, `.` will + /// match `💩` once, where as it will match 4 times when Unicode mode + /// is disabled. (Since the UTF-8 encoding of `💩` is 4 bytes long.) + /// * Case insensitive matching uses Unicode simple case folding rules. + /// * Unicode character classes like `\p{Letter}` and `\p{Greek}` are + /// available. + /// * Perl character classes are Unicode aware. That is, `\w`, `\s` and + /// `\d`. + /// * The word boundary assertions, `\b` and `\B`, use the Unicode + /// definition of a word character. + /// + /// Note that unlike the top-level `RegexSet` for searching `&str`, + /// it is permitted to disable Unicode mode even if the resulting + /// pattern could match invalid UTF-8. For example, `(?-u:.)` is not + /// a valid pattern for a top-level `RegexSet`, but is valid for a + /// `bytes::RegexSet`. + /// + /// For more details on the Unicode support in this crate, see the + /// [Unicode section](crate#unicode) in this crate's top-level + /// documentation. + /// + /// The default for this is `true`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"\w"]) + /// .unicode(false) + /// .build() + /// .unwrap(); + /// // Normally greek letters would be included in \w, but since + /// // Unicode mode is disabled, it only matches ASCII letters. + /// assert!(!re.is_match("δ".as_bytes())); + /// + /// let re = RegexSetBuilder::new([r"s"]) + /// .case_insensitive(true) + /// .unicode(false) + /// .build() + /// .unwrap(); + /// // Normally 'ſ' is included when searching for 's' case + /// // insensitively due to Unicode's simple case folding rules. But + /// // when Unicode mode is disabled, only ASCII case insensitive rules + /// // are used. + /// assert!(!re.is_match("ſ".as_bytes())); + /// ``` + /// + /// Since this builder is for constructing a + /// [`bytes::RegexSet`](RegexSet), one can disable Unicode mode even if + /// it would match invalid UTF-8: + /// + /// ``` + /// use regex::bytes::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"."]) + /// .unicode(false) + /// .build() + /// .unwrap(); + /// // Normally greek letters would be included in \w, but since + /// // Unicode mode is disabled, it only matches ASCII letters. + /// assert!(re.is_match(b"\xFF")); + /// ``` + pub fn unicode(&mut self, yes: bool) -> &mut RegexSetBuilder { + self.builder.unicode(yes); + self + } + + /// This configures whether to enable case insensitive matching for all + /// of the patterns. + /// + /// This setting can also be configured using the inline flag `i` + /// in the pattern. For example, `(?i:foo)` matches `foo` case + /// insensitively while `(?-i:foo)` matches `foo` case sensitively. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"foo(?-i:bar)quux"]) + /// .case_insensitive(true) + /// .build() + /// .unwrap(); + /// assert!(re.is_match(b"FoObarQuUx")); + /// // Even though case insensitive matching is enabled in the builder, + /// // it can be locally disabled within the pattern. In this case, + /// // `bar` is matched case sensitively. + /// assert!(!re.is_match(b"fooBARquux")); + /// ``` + pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexSetBuilder { + self.builder.case_insensitive(yes); + self + } + + /// This configures multi-line mode for all of the patterns. + /// + /// Enabling multi-line mode changes the behavior of the `^` and `$` + /// anchor assertions. Instead of only matching at the beginning and + /// end of a haystack, respectively, multi-line mode causes them to + /// match at the beginning and end of a line *in addition* to the + /// beginning and end of a haystack. More precisely, `^` will match at + /// the position immediately following a `\n` and `$` will match at the + /// position immediately preceding a `\n`. + /// + /// The behavior of this option can be impacted by other settings too: + /// + /// * The [`RegexSetBuilder::line_terminator`] option changes `\n` + /// above to any ASCII byte. + /// * The [`RegexSetBuilder::crlf`] option changes the line terminator + /// to be either `\r` or `\n`, but never at the position between a `\r` + /// and `\n`. + /// + /// This setting can also be configured using the inline flag `m` in + /// the pattern. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"^foo$"]) + /// .multi_line(true) + /// .build() + /// .unwrap(); + /// assert!(re.is_match(b"\nfoo\n")); + /// ``` + pub fn multi_line(&mut self, yes: bool) -> &mut RegexSetBuilder { + self.builder.multi_line(yes); + self + } + + /// This configures dot-matches-new-line mode for the entire pattern. + /// + /// Perhaps surprisingly, the default behavior for `.` is not to match + /// any character, but rather, to match any character except for the + /// line terminator (which is `\n` by default). When this mode is + /// enabled, the behavior changes such that `.` truly matches any + /// character. + /// + /// This setting can also be configured using the inline flag `s` in + /// the pattern. For example, `(?s:.)` and `\p{any}` are equivalent + /// regexes. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"foo.bar"]) + /// .dot_matches_new_line(true) + /// .build() + /// .unwrap(); + /// let hay = b"foo\nbar"; + /// assert!(re.is_match(hay)); + /// ``` + pub fn dot_matches_new_line( + &mut self, + yes: bool, + ) -> &mut RegexSetBuilder { + self.builder.dot_matches_new_line(yes); + self + } + + /// This configures CRLF mode for all of the patterns. + /// + /// When CRLF mode is enabled, both `\r` ("carriage return" or CR for + /// short) and `\n` ("line feed" or LF for short) are treated as line + /// terminators. This results in the following: + /// + /// * Unless dot-matches-new-line mode is enabled, `.` will now match + /// any character except for `\n` and `\r`. + /// * When multi-line mode is enabled, `^` will match immediately + /// following a `\n` or a `\r`. Similarly, `$` will match immediately + /// preceding a `\n` or a `\r`. Neither `^` nor `$` will ever match + /// between `\r` and `\n`. + /// + /// This setting can also be configured using the inline flag `R` in + /// the pattern. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"^foo$"]) + /// .multi_line(true) + /// .crlf(true) + /// .build() + /// .unwrap(); + /// let hay = b"\r\nfoo\r\n"; + /// // If CRLF mode weren't enabled here, then '$' wouldn't match + /// // immediately after 'foo', and thus no match would be found. + /// assert!(re.is_match(hay)); + /// ``` + /// + /// This example demonstrates that `^` will never match at a position + /// between `\r` and `\n`. (`$` will similarly not match between a `\r` + /// and a `\n`.) + /// + /// ``` + /// use regex::bytes::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"^\n"]) + /// .multi_line(true) + /// .crlf(true) + /// .build() + /// .unwrap(); + /// assert!(!re.is_match(b"\r\n")); + /// ``` + pub fn crlf(&mut self, yes: bool) -> &mut RegexSetBuilder { + self.builder.crlf(yes); + self + } + + /// Configures the line terminator to be used by the regex. + /// + /// The line terminator is relevant in two ways for a particular regex: + /// + /// * When dot-matches-new-line mode is *not* enabled (the default), + /// then `.` will match any character except for the configured line + /// terminator. + /// * When multi-line mode is enabled (not the default), then `^` and + /// `$` will match immediately after and before, respectively, a line + /// terminator. + /// + /// In both cases, if CRLF mode is enabled in a particular context, + /// then it takes precedence over any configured line terminator. + /// + /// This option cannot be configured from within the pattern. + /// + /// The default line terminator is `\n`. + /// + /// # Example + /// + /// This shows how to treat the NUL byte as a line terminator. This can + /// be a useful heuristic when searching binary data. + /// + /// ``` + /// use regex::bytes::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"^foo$"]) + /// .multi_line(true) + /// .line_terminator(b'\x00') + /// .build() + /// .unwrap(); + /// let hay = b"\x00foo\x00"; + /// assert!(re.is_match(hay)); + /// ``` + /// + /// This example shows that the behavior of `.` is impacted by this + /// setting as well: + /// + /// ``` + /// use regex::bytes::RegexSetBuilder; + /// + /// let re = RegexSetBuilder::new([r"."]) + /// .line_terminator(b'\x00') + /// .build() + /// .unwrap(); + /// assert!(re.is_match(b"\n")); + /// assert!(!re.is_match(b"\x00")); + /// ``` + /// + /// This shows that building a regex will work even when the byte given + /// is not ASCII. This is unlike the top-level `RegexSet` API where + /// matching invalid UTF-8 is not allowed. + /// + /// Note though that you must disable Unicode mode. This is required + /// because Unicode mode requires matching one codepoint at a time, + /// and there is no way to match a non-ASCII byte as if it were a + /// codepoint. + /// + /// ``` + /// use regex::bytes::RegexSetBuilder; + /// + /// assert!( + /// RegexSetBuilder::new([r"."]) + /// .unicode(false) + /// .line_terminator(0x80) + /// .build() + /// .is_ok(), + /// ); + /// ``` + pub fn line_terminator(&mut self, byte: u8) -> &mut RegexSetBuilder { + self.builder.line_terminator(byte); + self + } + + /// This configures swap-greed mode for all of the patterns. + /// + /// When swap-greed mode is enabled, patterns like `a+` will become + /// non-greedy and patterns like `a+?` will become greedy. In other + /// words, the meanings of `a+` and `a+?` are switched. + /// + /// This setting can also be configured using the inline flag `U` in + /// the pattern. + /// + /// Note that this is generally not useful for a `RegexSet` since a + /// `RegexSet` can only report whether a pattern matches or not. Since + /// greediness never impacts whether a match is found or not (only the + /// offsets of the match), it follows that whether parts of a pattern + /// are greedy or not doesn't matter for a `RegexSet`. + /// + /// The default for this is `false`. + pub fn swap_greed(&mut self, yes: bool) -> &mut RegexSetBuilder { + self.builder.swap_greed(yes); + self + } + + /// This configures verbose mode for all of the patterns. + /// + /// When enabled, whitespace will treated as insignificant in the + /// pattern and `#` can be used to start a comment until the next new + /// line. + /// + /// Normally, in most places in a pattern, whitespace is treated + /// literally. For example ` +` will match one or more ASCII whitespace + /// characters. + /// + /// When verbose mode is enabled, `\#` can be used to match a literal + /// `#` and `\ ` can be used to match a literal ASCII whitespace + /// character. + /// + /// Verbose mode is useful for permitting regexes to be formatted and + /// broken up more nicely. This may make them more easily readable. + /// + /// This setting can also be configured using the inline flag `x` in + /// the pattern. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSetBuilder; + /// + /// let pat = r" + /// \b + /// (?\p{Uppercase}\w*) # always start with uppercase letter + /// [\s--\n]+ # whitespace should separate names + /// (?: # middle name can be an initial! + /// (?:(?\p{Uppercase})\.|(?\p{Uppercase}\w*)) + /// [\s--\n]+ + /// )? + /// (?\p{Uppercase}\w*) + /// \b + /// "; + /// let re = RegexSetBuilder::new([pat]) + /// .ignore_whitespace(true) + /// .build() + /// .unwrap(); + /// assert!(re.is_match(b"Harry Potter")); + /// assert!(re.is_match(b"Harry J. Potter")); + /// assert!(re.is_match(b"Harry James Potter")); + /// assert!(!re.is_match(b"harry J. Potter")); + /// ``` + pub fn ignore_whitespace( + &mut self, + yes: bool, + ) -> &mut RegexSetBuilder { + self.builder.ignore_whitespace(yes); + self + } + + /// This configures octal mode for all of the patterns. + /// + /// Octal syntax is a little-known way of uttering Unicode codepoints + /// in a pattern. For example, `a`, `\x61`, `\u0061` and `\141` are all + /// equivalent patterns, where the last example shows octal syntax. + /// + /// While supporting octal syntax isn't in and of itself a problem, + /// it does make good error messages harder. That is, in PCRE based + /// regex engines, syntax like `\1` invokes a backreference, which is + /// explicitly unsupported this library. However, many users expect + /// backreferences to be supported. Therefore, when octal support + /// is disabled, the error message will explicitly mention that + /// backreferences aren't supported. + /// + /// The default for this is `false`. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSetBuilder; + /// + /// // Normally this pattern would not compile, with an error message + /// // about backreferences not being supported. But with octal mode + /// // enabled, octal escape sequences work. + /// let re = RegexSetBuilder::new([r"\141"]) + /// .octal(true) + /// .build() + /// .unwrap(); + /// assert!(re.is_match(b"a")); + /// ``` + pub fn octal(&mut self, yes: bool) -> &mut RegexSetBuilder { + self.builder.octal(yes); + self + } + + /// Sets the approximate size limit, in bytes, of the compiled regex. + /// + /// This roughly corresponds to the number of heap memory, in + /// bytes, occupied by a single regex. If the regex would otherwise + /// approximately exceed this limit, then compiling that regex will + /// fail. + /// + /// The main utility of a method like this is to avoid compiling + /// regexes that use an unexpected amount of resources, such as + /// time and memory. Even if the memory usage of a large regex is + /// acceptable, its search time may not be. Namely, worst case time + /// complexity for search is `O(m * n)`, where `m ~ len(pattern)` and + /// `n ~ len(haystack)`. That is, search time depends, in part, on the + /// size of the compiled regex. This means that putting a limit on the + /// size of the regex limits how much a regex can impact search time. + /// + /// For more information about regex size limits, see the section on + /// [untrusted inputs](crate#untrusted-input) in the top-level crate + /// documentation. + /// + /// The default for this is some reasonable number that permits most + /// patterns to compile successfully. + /// + /// # Example + /// + /// ``` + /// # if !cfg!(target_pointer_width = "64") { return; } // see #1041 + /// use regex::bytes::RegexSetBuilder; + /// + /// // It may surprise you how big some seemingly small patterns can + /// // be! Since \w is Unicode aware, this generates a regex that can + /// // match approximately 140,000 distinct codepoints. + /// assert!( + /// RegexSetBuilder::new([r"\w"]) + /// .size_limit(45_000) + /// .build() + /// .is_err() + /// ); + /// ``` + pub fn size_limit(&mut self, bytes: usize) -> &mut RegexSetBuilder { + self.builder.size_limit(bytes); + self + } + + /// Set the approximate capacity, in bytes, of the cache of transitions + /// used by the lazy DFA. + /// + /// While the lazy DFA isn't always used, in tends to be the most + /// commonly use regex engine in default configurations. It tends to + /// adopt the performance profile of a fully build DFA, but without the + /// downside of taking worst case exponential time to build. + /// + /// The downside is that it needs to keep a cache of transitions and + /// states that are built while running a search, and this cache + /// can fill up. When it fills up, the cache will reset itself. Any + /// previously generated states and transitions will then need to be + /// re-generated. If this happens too many times, then this library + /// will bail out of using the lazy DFA and switch to a different regex + /// engine. + /// + /// If your regex provokes this particular downside of the lazy DFA, + /// then it may be beneficial to increase its cache capacity. This will + /// potentially reduce the frequency of cache resetting (ideally to + /// `0`). While it won't fix all potential performance problems with + /// the lazy DFA, increasing the cache capacity does fix some. + /// + /// There is no easy way to determine, a priori, whether increasing + /// this cache capacity will help. In general, the larger your regex, + /// the more cache it's likely to use. But that isn't an ironclad rule. + /// For example, a regex like `[01]*1[01]{N}` would normally produce a + /// fully build DFA that is exponential in size with respect to `N`. + /// The lazy DFA will prevent exponential space blow-up, but it cache + /// is likely to fill up, even when it's large and even for smallish + /// values of `N`. + /// + /// If you aren't sure whether this helps or not, it is sensible to + /// set this to some arbitrarily large number in testing, such as + /// `usize::MAX`. Namely, this represents the amount of capacity that + /// *may* be used. It's probably not a good idea to use `usize::MAX` in + /// production though, since it implies there are no controls on heap + /// memory used by this library during a search. In effect, set it to + /// whatever you're willing to allocate for a single regex search. + pub fn dfa_size_limit( + &mut self, + bytes: usize, + ) -> &mut RegexSetBuilder { + self.builder.dfa_size_limit(bytes); + self + } + + /// Set the nesting limit for this parser. + /// + /// The nesting limit controls how deep the abstract syntax tree is + /// allowed to be. If the AST exceeds the given limit (e.g., with too + /// many nested groups), then an error is returned by the parser. + /// + /// The purpose of this limit is to act as a heuristic to prevent stack + /// overflow for consumers that do structural induction on an AST using + /// explicit recursion. While this crate never does this (instead using + /// constant stack space and moving the call stack to the heap), other + /// crates may. + /// + /// This limit is not checked until the entire AST is parsed. + /// Therefore, if callers want to put a limit on the amount of heap + /// space used, then they should impose a limit on the length, in + /// bytes, of the concrete pattern string. In particular, this is + /// viable since this parser implementation will limit itself to heap + /// space proportional to the length of the pattern string. See also + /// the [untrusted inputs](crate#untrusted-input) section in the + /// top-level crate documentation for more information about this. + /// + /// Note that a nest limit of `0` will return a nest limit error for + /// most patterns but not all. For example, a nest limit of `0` permits + /// `a` but not `ab`, since `ab` requires an explicit concatenation, + /// which results in a nest depth of `1`. In general, a nest limit is + /// not something that manifests in an obvious way in the concrete + /// syntax, therefore, it should not be used in a granular way. + /// + /// # Example + /// + /// ``` + /// use regex::bytes::RegexSetBuilder; + /// + /// assert!(RegexSetBuilder::new([r"a"]).nest_limit(0).build().is_ok()); + /// assert!(RegexSetBuilder::new([r"ab"]).nest_limit(0).build().is_err()); + /// ``` + pub fn nest_limit(&mut self, limit: u32) -> &mut RegexSetBuilder { + self.builder.nest_limit(limit); + self + } + } +} diff --git a/vendor/regex/src/bytes.rs b/vendor/regex/src/bytes.rs new file mode 100644 index 00000000000000..383ac4a5b59b7a --- /dev/null +++ b/vendor/regex/src/bytes.rs @@ -0,0 +1,91 @@ +/*! +Search for regex matches in `&[u8]` haystacks. + +This module provides a nearly identical API via [`Regex`] to the one found in +the top-level of this crate. There are two important differences: + +1. Matching is done on `&[u8]` instead of `&str`. Additionally, `Vec` +is used where `String` would have been used in the top-level API. +2. Unicode support can be disabled even when disabling it would result in +matching invalid UTF-8 bytes. + +# Example: match null terminated string + +This shows how to find all null-terminated strings in a slice of bytes. This +works even if a C string contains invalid UTF-8. + +```rust +use regex::bytes::Regex; + +let re = Regex::new(r"(?-u)(?[^\x00]+)\x00").unwrap(); +let hay = b"foo\x00qu\xFFux\x00baz\x00"; + +// Extract all of the strings without the NUL terminator from each match. +// The unwrap is OK here since a match requires the `cstr` capture to match. +let cstrs: Vec<&[u8]> = + re.captures_iter(hay) + .map(|c| c.name("cstr").unwrap().as_bytes()) + .collect(); +assert_eq!(cstrs, vec![&b"foo"[..], &b"qu\xFFux"[..], &b"baz"[..]]); +``` + +# Example: selectively enable Unicode support + +This shows how to match an arbitrary byte pattern followed by a UTF-8 encoded +string (e.g., to extract a title from a Matroska file): + +```rust +use regex::bytes::Regex; + +let re = Regex::new( + r"(?-u)\x7b\xa9(?:[\x80-\xfe]|[\x40-\xff].)(?u:(.*))" +).unwrap(); +let hay = b"\x12\xd0\x3b\x5f\x7b\xa9\x85\xe2\x98\x83\x80\x98\x54\x76\x68\x65"; + +// Notice that despite the `.*` at the end, it will only match valid UTF-8 +// because Unicode mode was enabled with the `u` flag. Without the `u` flag, +// the `.*` would match the rest of the bytes regardless of whether they were +// valid UTF-8. +let (_, [title]) = re.captures(hay).unwrap().extract(); +assert_eq!(title, b"\xE2\x98\x83"); +// We can UTF-8 decode the title now. And the unwrap here +// is correct because the existence of a match guarantees +// that `title` is valid UTF-8. +let title = std::str::from_utf8(title).unwrap(); +assert_eq!(title, "☃"); +``` + +In general, if the Unicode flag is enabled in a capture group and that capture +is part of the overall match, then the capture is *guaranteed* to be valid +UTF-8. + +# Syntax + +The supported syntax is pretty much the same as the syntax for Unicode +regular expressions with a few changes that make sense for matching arbitrary +bytes: + +1. The `u` flag can be disabled even when disabling it might cause the regex to +match invalid UTF-8. When the `u` flag is disabled, the regex is said to be in +"ASCII compatible" mode. +2. In ASCII compatible mode, Unicode character classes are not allowed. Literal +Unicode scalar values outside of character classes are allowed. +3. In ASCII compatible mode, Perl character classes (`\w`, `\d` and `\s`) +revert to their typical ASCII definition. `\w` maps to `[[:word:]]`, `\d` maps +to `[[:digit:]]` and `\s` maps to `[[:space:]]`. +4. In ASCII compatible mode, word boundaries use the ASCII compatible `\w` to +determine whether a byte is a word byte or not. +5. Hexadecimal notation can be used to specify arbitrary bytes instead of +Unicode codepoints. For example, in ASCII compatible mode, `\xFF` matches the +literal byte `\xFF`, while in Unicode mode, `\xFF` is the Unicode codepoint +`U+00FF` that matches its UTF-8 encoding of `\xC3\xBF`. Similarly for octal +notation when enabled. +6. In ASCII compatible mode, `.` matches any *byte* except for `\n`. When the +`s` flag is additionally enabled, `.` matches any byte. + +# Performance + +In general, one should expect performance on `&[u8]` to be roughly similar to +performance on `&str`. +*/ +pub use crate::{builders::bytes::*, regex::bytes::*, regexset::bytes::*}; diff --git a/vendor/regex/src/error.rs b/vendor/regex/src/error.rs new file mode 100644 index 00000000000000..9e90d5674283fc --- /dev/null +++ b/vendor/regex/src/error.rs @@ -0,0 +1,101 @@ +use alloc::string::{String, ToString}; + +use regex_automata::meta; + +/// An error that occurred during parsing or compiling a regular expression. +#[non_exhaustive] +#[derive(Clone, PartialEq)] +pub enum Error { + /// A syntax error. + Syntax(String), + /// The compiled program exceeded the set size + /// limit. The argument is the size limit imposed by + /// [`RegexBuilder::size_limit`](crate::RegexBuilder::size_limit). Even + /// when not configured explicitly, it defaults to a reasonable limit. + /// + /// If you're getting this error, it occurred because your regex has been + /// compiled to an intermediate state that is too big. It is important to + /// note that exceeding this limit does _not_ mean the regex is too big to + /// _work_, but rather, the regex is big enough that it may wind up being + /// surprisingly slow when used in a search. In other words, this error is + /// meant to be a practical heuristic for avoiding a performance footgun, + /// and especially so for the case where the regex pattern is coming from + /// an untrusted source. + /// + /// There are generally two ways to move forward if you hit this error. + /// The first is to find some way to use a smaller regex. The second is to + /// increase the size limit via `RegexBuilder::size_limit`. However, if + /// your regex pattern is not from a trusted source, then neither of these + /// approaches may be appropriate. Instead, you'll have to determine just + /// how big of a regex you want to allow. + CompiledTooBig(usize), +} + +impl Error { + pub(crate) fn from_meta_build_error(err: meta::BuildError) -> Error { + if let Some(size_limit) = err.size_limit() { + Error::CompiledTooBig(size_limit) + } else if let Some(ref err) = err.syntax_error() { + Error::Syntax(err.to_string()) + } else { + // This is a little suspect. Technically there are more ways for + // a meta regex to fail to build other than "exceeded size limit" + // and "syntax error." For example, if there are too many states + // or even too many patterns. But in practice this is probably + // good enough. The worst thing that happens is that Error::Syntax + // represents an error that isn't technically a syntax error, but + // the actual message will still be shown. So... it's not too bad. + // + // We really should have made the Error type in the regex crate + // completely opaque. Rookie mistake. + Error::Syntax(err.to_string()) + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for Error { + // TODO: Remove this method entirely on the next breaking semver release. + #[allow(deprecated)] + fn description(&self) -> &str { + match *self { + Error::Syntax(ref err) => err, + Error::CompiledTooBig(_) => "compiled program too big", + } + } +} + +impl core::fmt::Display for Error { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match *self { + Error::Syntax(ref err) => err.fmt(f), + Error::CompiledTooBig(limit) => write!( + f, + "Compiled regex exceeds size limit of {limit} bytes.", + ), + } + } +} + +// We implement our own Debug implementation so that we show nicer syntax +// errors when people use `Regex::new(...).unwrap()`. It's a little weird, +// but the `Syntax` variant is already storing a `String` anyway, so we might +// as well format it nicely. +impl core::fmt::Debug for Error { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match *self { + Error::Syntax(ref err) => { + let hr: String = core::iter::repeat('~').take(79).collect(); + writeln!(f, "Syntax(")?; + writeln!(f, "{hr}")?; + writeln!(f, "{err}")?; + writeln!(f, "{hr}")?; + write!(f, ")")?; + Ok(()) + } + Error::CompiledTooBig(limit) => { + f.debug_tuple("CompiledTooBig").field(&limit).finish() + } + } + } +} diff --git a/vendor/regex/src/find_byte.rs b/vendor/regex/src/find_byte.rs new file mode 100644 index 00000000000000..9c6915db40cf5e --- /dev/null +++ b/vendor/regex/src/find_byte.rs @@ -0,0 +1,17 @@ +/// Searches for the given needle in the given haystack. +/// +/// If the perf-literal feature is enabled, then this uses the super optimized +/// memchr crate. Otherwise, it uses the naive byte-at-a-time implementation. +pub(crate) fn find_byte(needle: u8, haystack: &[u8]) -> Option { + #[cfg(not(feature = "perf-literal"))] + fn imp(needle: u8, haystack: &[u8]) -> Option { + haystack.iter().position(|&b| b == needle) + } + + #[cfg(feature = "perf-literal")] + fn imp(needle: u8, haystack: &[u8]) -> Option { + memchr::memchr(needle, haystack) + } + + imp(needle, haystack) +} diff --git a/vendor/regex/src/lib.rs b/vendor/regex/src/lib.rs new file mode 100644 index 00000000000000..87e48b7e90b0c7 --- /dev/null +++ b/vendor/regex/src/lib.rs @@ -0,0 +1,1353 @@ +/*! +This crate provides routines for searching strings for matches of a [regular +expression] (aka "regex"). The regex syntax supported by this crate is similar +to other regex engines, but it lacks several features that are not known how to +implement efficiently. This includes, but is not limited to, look-around and +backreferences. In exchange, all regex searches in this crate have worst case +`O(m * n)` time complexity, where `m` is proportional to the size of the regex +and `n` is proportional to the size of the string being searched. + +[regular expression]: https://en.wikipedia.org/wiki/Regular_expression + +If you just want API documentation, then skip to the [`Regex`] type. Otherwise, +here's a quick example showing one way of parsing the output of a grep-like +program: + +```rust +use regex::Regex; + +let re = Regex::new(r"(?m)^([^:]+):([0-9]+):(.+)$").unwrap(); +let hay = "\ +path/to/foo:54:Blue Harvest +path/to/bar:90:Something, Something, Something, Dark Side +path/to/baz:3:It's a Trap! +"; + +let mut results = vec![]; +for (_, [path, lineno, line]) in re.captures_iter(hay).map(|c| c.extract()) { + results.push((path, lineno.parse::()?, line)); +} +assert_eq!(results, vec![ + ("path/to/foo", 54, "Blue Harvest"), + ("path/to/bar", 90, "Something, Something, Something, Dark Side"), + ("path/to/baz", 3, "It's a Trap!"), +]); +# Ok::<(), Box>(()) +``` + +# Overview + +The primary type in this crate is a [`Regex`]. Its most important methods are +as follows: + +* [`Regex::new`] compiles a regex using the default configuration. A +[`RegexBuilder`] permits setting a non-default configuration. (For example, +case insensitive matching, verbose mode and others.) +* [`Regex::is_match`] reports whether a match exists in a particular haystack. +* [`Regex::find`] reports the byte offsets of a match in a haystack, if one +exists. [`Regex::find_iter`] returns an iterator over all such matches. +* [`Regex::captures`] returns a [`Captures`], which reports both the byte +offsets of a match in a haystack and the byte offsets of each matching capture +group from the regex in the haystack. +[`Regex::captures_iter`] returns an iterator over all such matches. + +There is also a [`RegexSet`], which permits searching for multiple regex +patterns simultaneously in a single search. However, it currently only reports +which patterns match and *not* the byte offsets of a match. + +Otherwise, this top-level crate documentation is organized as follows: + +* [Usage](#usage) shows how to add the `regex` crate to your Rust project. +* [Examples](#examples) provides a limited selection of regex search examples. +* [Performance](#performance) provides a brief summary of how to optimize regex +searching speed. +* [Unicode](#unicode) discusses support for non-ASCII patterns. +* [Syntax](#syntax) enumerates the specific regex syntax supported by this +crate. +* [Untrusted input](#untrusted-input) discusses how this crate deals with regex +patterns or haystacks that are untrusted. +* [Crate features](#crate-features) documents the Cargo features that can be +enabled or disabled for this crate. +* [Other crates](#other-crates) links to other crates in the `regex` family. + +# Usage + +The `regex` crate is [on crates.io](https://crates.io/crates/regex) and can be +used by adding `regex` to your dependencies in your project's `Cargo.toml`. +Or more simply, just run `cargo add regex`. + +Here is a complete example that creates a new Rust project, adds a dependency +on `regex`, creates the source code for a regex search and then runs the +program. + +First, create the project in a new directory: + +```text +$ mkdir regex-example +$ cd regex-example +$ cargo init +``` + +Second, add a dependency on `regex`: + +```text +$ cargo add regex +``` + +Third, edit `src/main.rs`. Delete what's there and replace it with this: + +``` +use regex::Regex; + +fn main() { + let re = Regex::new(r"Hello (?\w+)!").unwrap(); + let Some(caps) = re.captures("Hello Murphy!") else { + println!("no match!"); + return; + }; + println!("The name is: {}", &caps["name"]); +} +``` + +Fourth, run it with `cargo run`: + +```text +$ cargo run + Compiling memchr v2.5.0 + Compiling regex-syntax v0.7.1 + Compiling aho-corasick v1.0.1 + Compiling regex v1.8.1 + Compiling regex-example v0.1.0 (/tmp/regex-example) + Finished dev [unoptimized + debuginfo] target(s) in 4.22s + Running `target/debug/regex-example` +The name is: Murphy +``` + +The first time you run the program will show more output like above. But +subsequent runs shouldn't have to re-compile the dependencies. + +# Examples + +This section provides a few examples, in tutorial style, showing how to +search a haystack with a regex. There are more examples throughout the API +documentation. + +Before starting though, it's worth defining a few terms: + +* A **regex** is a Rust value whose type is `Regex`. We use `re` as a +variable name for a regex. +* A **pattern** is the string that is used to build a regex. We use `pat` as +a variable name for a pattern. +* A **haystack** is the string that is searched by a regex. We use `hay` as a +variable name for a haystack. + +Sometimes the words "regex" and "pattern" are used interchangeably. + +General use of regular expressions in this crate proceeds by compiling a +**pattern** into a **regex**, and then using that regex to search, split or +replace parts of a **haystack**. + +### Example: find a middle initial + +We'll start off with a very simple example: a regex that looks for a specific +name but uses a wildcard to match a middle initial. Our pattern serves as +something like a template that will match a particular name with *any* middle +initial. + +```rust +use regex::Regex; + +// We use 'unwrap()' here because it would be a bug in our program if the +// pattern failed to compile to a regex. Panicking in the presence of a bug +// is okay. +let re = Regex::new(r"Homer (.)\. Simpson").unwrap(); +let hay = "Homer J. Simpson"; +let Some(caps) = re.captures(hay) else { return }; +assert_eq!("J", &caps[1]); +``` + +There are a few things worth noticing here in our first example: + +* The `.` is a special pattern meta character that means "match any single +character except for new lines." (More precisely, in this crate, it means +"match any UTF-8 encoding of any Unicode scalar value other than `\n`.") +* We can match an actual `.` literally by escaping it, i.e., `\.`. +* We use Rust's [raw strings] to avoid needing to deal with escape sequences in +both the regex pattern syntax and in Rust's string literal syntax. If we didn't +use raw strings here, we would have had to use `\\.` to match a literal `.` +character. That is, `r"\."` and `"\\."` are equivalent patterns. +* We put our wildcard `.` instruction in parentheses. These parentheses have a +special meaning that says, "make whatever part of the haystack matches within +these parentheses available as a capturing group." After finding a match, we +access this capture group with `&caps[1]`. + +[raw strings]: https://doc.rust-lang.org/stable/reference/tokens.html#raw-string-literals + +Otherwise, we execute a search using `re.captures(hay)` and return from our +function if no match occurred. We then reference the middle initial by asking +for the part of the haystack that matched the capture group indexed at `1`. +(The capture group at index 0 is implicit and always corresponds to the entire +match. In this case, that's `Homer J. Simpson`.) + +### Example: named capture groups + +Continuing from our middle initial example above, we can tweak the pattern +slightly to give a name to the group that matches the middle initial: + +```rust +use regex::Regex; + +// Note that (?P.) is a different way to spell the same thing. +let re = Regex::new(r"Homer (?.)\. Simpson").unwrap(); +let hay = "Homer J. Simpson"; +let Some(caps) = re.captures(hay) else { return }; +assert_eq!("J", &caps["middle"]); +``` + +Giving a name to a group can be useful when there are multiple groups in +a pattern. It makes the code referring to those groups a bit easier to +understand. + +### Example: validating a particular date format + +This examples shows how to confirm whether a haystack, in its entirety, matches +a particular date format: + +```rust +use regex::Regex; + +let re = Regex::new(r"^\d{4}-\d{2}-\d{2}$").unwrap(); +assert!(re.is_match("2010-03-14")); +``` + +Notice the use of the `^` and `$` anchors. In this crate, every regex search is +run with an implicit `(?s:.)*?` at the beginning of its pattern, which allows +the regex to match anywhere in a haystack. Anchors, as above, can be used to +ensure that the full haystack matches a pattern. + +This crate is also Unicode aware by default, which means that `\d` might match +more than you might expect it to. For example: + +```rust +use regex::Regex; + +let re = Regex::new(r"^\d{4}-\d{2}-\d{2}$").unwrap(); +assert!(re.is_match("𝟚𝟘𝟙𝟘-𝟘𝟛-𝟙𝟜")); +``` + +To only match an ASCII decimal digit, all of the following are equivalent: + +* `[0-9]` +* `(?-u:\d)` +* `[[:digit:]]` +* `[\d&&\p{ascii}]` + +### Example: finding dates in a haystack + +In the previous example, we showed how one might validate that a haystack, +in its entirety, corresponded to a particular date format. But what if we wanted +to extract all things that look like dates in a specific format from a haystack? +To do this, we can use an iterator API to find all matches (notice that we've +removed the anchors and switched to looking for ASCII-only digits): + +```rust +use regex::Regex; + +let re = Regex::new(r"[0-9]{4}-[0-9]{2}-[0-9]{2}").unwrap(); +let hay = "What do 1865-04-14, 1881-07-02, 1901-09-06 and 1963-11-22 have in common?"; +// 'm' is a 'Match', and 'as_str()' returns the matching part of the haystack. +let dates: Vec<&str> = re.find_iter(hay).map(|m| m.as_str()).collect(); +assert_eq!(dates, vec![ + "1865-04-14", + "1881-07-02", + "1901-09-06", + "1963-11-22", +]); +``` + +We can also iterate over [`Captures`] values instead of [`Match`] values, and +that in turn permits accessing each component of the date via capturing groups: + +```rust +use regex::Regex; + +let re = Regex::new(r"(?[0-9]{4})-(?[0-9]{2})-(?[0-9]{2})").unwrap(); +let hay = "What do 1865-04-14, 1881-07-02, 1901-09-06 and 1963-11-22 have in common?"; +// 'm' is a 'Match', and 'as_str()' returns the matching part of the haystack. +let dates: Vec<(&str, &str, &str)> = re.captures_iter(hay).map(|caps| { + // The unwraps are okay because every capture group must match if the whole + // regex matches, and in this context, we know we have a match. + // + // Note that we use `caps.name("y").unwrap().as_str()` instead of + // `&caps["y"]` because the lifetime of the former is the same as the + // lifetime of `hay` above, but the lifetime of the latter is tied to the + // lifetime of `caps` due to how the `Index` trait is defined. + let year = caps.name("y").unwrap().as_str(); + let month = caps.name("m").unwrap().as_str(); + let day = caps.name("d").unwrap().as_str(); + (year, month, day) +}).collect(); +assert_eq!(dates, vec![ + ("1865", "04", "14"), + ("1881", "07", "02"), + ("1901", "09", "06"), + ("1963", "11", "22"), +]); +``` + +### Example: simpler capture group extraction + +One can use [`Captures::extract`] to make the code from the previous example a +bit simpler in this case: + +```rust +use regex::Regex; + +let re = Regex::new(r"([0-9]{4})-([0-9]{2})-([0-9]{2})").unwrap(); +let hay = "What do 1865-04-14, 1881-07-02, 1901-09-06 and 1963-11-22 have in common?"; +let dates: Vec<(&str, &str, &str)> = re.captures_iter(hay).map(|caps| { + let (_, [year, month, day]) = caps.extract(); + (year, month, day) +}).collect(); +assert_eq!(dates, vec![ + ("1865", "04", "14"), + ("1881", "07", "02"), + ("1901", "09", "06"), + ("1963", "11", "22"), +]); +``` + +`Captures::extract` works by ensuring that the number of matching groups match +the number of groups requested via the `[year, month, day]` syntax. If they do, +then the substrings for each corresponding capture group are automatically +returned in an appropriately sized array. Rust's syntax for pattern matching +arrays does the rest. + +### Example: replacement with named capture groups + +Building on the previous example, perhaps we'd like to rearrange the date +formats. This can be done by finding each match and replacing it with +something different. The [`Regex::replace_all`] routine provides a convenient +way to do this, including by supporting references to named groups in the +replacement string: + +```rust +use regex::Regex; + +let re = Regex::new(r"(?\d{4})-(?\d{2})-(?\d{2})").unwrap(); +let before = "1973-01-05, 1975-08-25 and 1980-10-18"; +let after = re.replace_all(before, "$m/$d/$y"); +assert_eq!(after, "01/05/1973, 08/25/1975 and 10/18/1980"); +``` + +The replace methods are actually polymorphic in the replacement, which +provides more flexibility than is seen here. (See the documentation for +[`Regex::replace`] for more details.) + +### Example: verbose mode + +When your regex gets complicated, you might consider using something other +than regex. But if you stick with regex, you can use the `x` flag to enable +insignificant whitespace mode or "verbose mode." In this mode, whitespace +is treated as insignificant and one may write comments. This may make your +patterns easier to comprehend. + +```rust +use regex::Regex; + +let re = Regex::new(r"(?x) + (?P\d{4}) # the year, including all Unicode digits + - + (?P\d{2}) # the month, including all Unicode digits + - + (?P\d{2}) # the day, including all Unicode digits +").unwrap(); + +let before = "1973-01-05, 1975-08-25 and 1980-10-18"; +let after = re.replace_all(before, "$m/$d/$y"); +assert_eq!(after, "01/05/1973, 08/25/1975 and 10/18/1980"); +``` + +If you wish to match against whitespace in this mode, you can still use `\s`, +`\n`, `\t`, etc. For escaping a single space character, you can escape it +directly with `\ `, use its hex character code `\x20` or temporarily disable +the `x` flag, e.g., `(?-x: )`. + +### Example: match multiple regular expressions simultaneously + +This demonstrates how to use a [`RegexSet`] to match multiple (possibly +overlapping) regexes in a single scan of a haystack: + +```rust +use regex::RegexSet; + +let set = RegexSet::new(&[ + r"\w+", + r"\d+", + r"\pL+", + r"foo", + r"bar", + r"barfoo", + r"foobar", +]).unwrap(); + +// Iterate over and collect all of the matches. Each match corresponds to the +// ID of the matching pattern. +let matches: Vec<_> = set.matches("foobar").into_iter().collect(); +assert_eq!(matches, vec![0, 2, 3, 4, 6]); + +// You can also test whether a particular regex matched: +let matches = set.matches("foobar"); +assert!(!matches.matched(5)); +assert!(matches.matched(6)); +``` + +# Performance + +This section briefly discusses a few concerns regarding the speed and resource +usage of regexes. + +### Only ask for what you need + +When running a search with a regex, there are generally three different types +of information one can ask for: + +1. Does a regex match in a haystack? +2. Where does a regex match in a haystack? +3. Where do each of the capturing groups match in a haystack? + +Generally speaking, this crate could provide a function to answer only #3, +which would subsume #1 and #2 automatically. However, it can be significantly +more expensive to compute the location of capturing group matches, so it's best +not to do it if you don't need to. + +Therefore, only ask for what you need. For example, don't use [`Regex::find`] +if you only need to test if a regex matches a haystack. Use [`Regex::is_match`] +instead. + +### Unicode can impact memory usage and search speed + +This crate has first class support for Unicode and it is **enabled by default**. +In many cases, the extra memory required to support it will be negligible and +it typically won't impact search speed. But it can in some cases. + +With respect to memory usage, the impact of Unicode principally manifests +through the use of Unicode character classes. Unicode character classes +tend to be quite large. For example, `\w` by default matches around 140,000 +distinct codepoints. This requires additional memory, and tends to slow down +regex compilation. While a `\w` here and there is unlikely to be noticed, +writing `\w{100}` will for example result in quite a large regex by default. +Indeed, `\w` is considerably larger than its ASCII-only version, so if your +requirements are satisfied by ASCII, it's probably a good idea to stick to +ASCII classes. The ASCII-only version of `\w` can be spelled in a number of +ways. All of the following are equivalent: + +* `[0-9A-Za-z_]` +* `(?-u:\w)` +* `[[:word:]]` +* `[\w&&\p{ascii}]` + +With respect to search speed, Unicode tends to be handled pretty well, even when +using large Unicode character classes. However, some of the faster internal +regex engines cannot handle a Unicode aware word boundary assertion. So if you +don't need Unicode-aware word boundary assertions, you might consider using +`(?-u:\b)` instead of `\b`, where the former uses an ASCII-only definition of +a word character. + +### Literals might accelerate searches + +This crate tends to be quite good at recognizing literals in a regex pattern +and using them to accelerate a search. If it is at all possible to include +some kind of literal in your pattern, then it might make search substantially +faster. For example, in the regex `\w+@\w+`, the engine will look for +occurrences of `@` and then try a reverse match for `\w+` to find the start +position. + +### Avoid re-compiling regexes, especially in a loop + +It is an anti-pattern to compile the same pattern in a loop since regex +compilation is typically expensive. (It takes anywhere from a few microseconds +to a few **milliseconds** depending on the size of the pattern.) Not only is +compilation itself expensive, but this also prevents optimizations that reuse +allocations internally to the regex engine. + +In Rust, it can sometimes be a pain to pass regular expressions around if +they're used from inside a helper function. Instead, we recommend using +[`std::sync::LazyLock`], or the [`once_cell`] crate, +if you can't use the standard library. + +This example shows how to use `std::sync::LazyLock`: + +```rust +use std::sync::LazyLock; + +use regex::Regex; + +fn some_helper_function(haystack: &str) -> bool { + static RE: LazyLock = LazyLock::new(|| Regex::new(r"...").unwrap()); + RE.is_match(haystack) +} + +fn main() { + assert!(some_helper_function("abc")); + assert!(!some_helper_function("ac")); +} +``` + +Specifically, in this example, the regex will be compiled when it is used for +the first time. On subsequent uses, it will reuse the previously built `Regex`. +Notice how one can define the `Regex` locally to a specific function. + +[`std::sync::LazyLock`]: https://doc.rust-lang.org/std/sync/struct.LazyLock.html +[`once_cell`]: https://crates.io/crates/once_cell + +### Sharing a regex across threads can result in contention + +While a single `Regex` can be freely used from multiple threads simultaneously, +there is a small synchronization cost that must be paid. Generally speaking, +one shouldn't expect to observe this unless the principal task in each thread +is searching with the regex *and* most searches are on short haystacks. In this +case, internal contention on shared resources can spike and increase latency, +which in turn may slow down each individual search. + +One can work around this by cloning each `Regex` before sending it to another +thread. The cloned regexes will still share the same internal read-only portion +of its compiled state (it's reference counted), but each thread will get +optimized access to the mutable space that is used to run a search. In general, +there is no additional cost in memory to doing this. The only cost is the added +code complexity required to explicitly clone the regex. (If you share the same +`Regex` across multiple threads, each thread still gets its own mutable space, +but accessing that space is slower.) + +# Unicode + +This section discusses what kind of Unicode support this regex library has. +Before showing some examples, we'll summarize the relevant points: + +* This crate almost fully implements "Basic Unicode Support" (Level 1) as +specified by the [Unicode Technical Standard #18][UTS18]. The full details +of what is supported are documented in [UNICODE.md] in the root of the regex +crate repository. There is virtually no support for "Extended Unicode Support" +(Level 2) from UTS#18. +* The top-level [`Regex`] runs searches *as if* iterating over each of the +codepoints in the haystack. That is, the fundamental atom of matching is a +single codepoint. +* [`bytes::Regex`], in contrast, permits disabling Unicode mode for part of all +of your pattern in all cases. When Unicode mode is disabled, then a search is +run *as if* iterating over each byte in the haystack. That is, the fundamental +atom of matching is a single byte. (A top-level `Regex` also permits disabling +Unicode and thus matching *as if* it were one byte at a time, but only when +doing so wouldn't permit matching invalid UTF-8.) +* When Unicode mode is enabled (the default), `.` will match an entire Unicode +scalar value, even when it is encoded using multiple bytes. When Unicode mode +is disabled (e.g., `(?-u:.)`), then `.` will match a single byte in all cases. +* The character classes `\w`, `\d` and `\s` are all Unicode-aware by default. +Use `(?-u:\w)`, `(?-u:\d)` and `(?-u:\s)` to get their ASCII-only definitions. +* Similarly, `\b` and `\B` use a Unicode definition of a "word" character. +To get ASCII-only word boundaries, use `(?-u:\b)` and `(?-u:\B)`. This also +applies to the special word boundary assertions. (That is, `\b{start}`, +`\b{end}`, `\b{start-half}`, `\b{end-half}`.) +* `^` and `$` are **not** Unicode-aware in multi-line mode. Namely, they only +recognize `\n` (assuming CRLF mode is not enabled) and not any of the other +forms of line terminators defined by Unicode. +* Case insensitive searching is Unicode-aware and uses simple case folding. +* Unicode general categories, scripts and many boolean properties are available +by default via the `\p{property name}` syntax. +* In all cases, matches are reported using byte offsets. Or more precisely, +UTF-8 code unit offsets. This permits constant time indexing and slicing of the +haystack. + +[UTS18]: https://unicode.org/reports/tr18/ +[UNICODE.md]: https://github.com/rust-lang/regex/blob/master/UNICODE.md + +Patterns themselves are **only** interpreted as a sequence of Unicode scalar +values. This means you can use Unicode characters directly in your pattern: + +```rust +use regex::Regex; + +let re = Regex::new(r"(?i)Δ+").unwrap(); +let m = re.find("ΔδΔ").unwrap(); +assert_eq!((0, 6), (m.start(), m.end())); +// alternatively: +assert_eq!(0..6, m.range()); +``` + +As noted above, Unicode general categories, scripts, script extensions, ages +and a smattering of boolean properties are available as character classes. For +example, you can match a sequence of numerals, Greek or Cherokee letters: + +```rust +use regex::Regex; + +let re = Regex::new(r"[\pN\p{Greek}\p{Cherokee}]+").unwrap(); +let m = re.find("abcΔᎠβⅠᏴγδⅡxyz").unwrap(); +assert_eq!(3..23, m.range()); +``` + +While not specific to Unicode, this library also supports character class set +operations. Namely, one can nest character classes arbitrarily and perform set +operations on them. Those set operations are union (the default), intersection, +difference and symmetric difference. These set operations tend to be most +useful with Unicode character classes. For example, to match any codepoint +that is both in the `Greek` script and in the `Letter` general category: + +```rust +use regex::Regex; + +let re = Regex::new(r"[\p{Greek}&&\pL]+").unwrap(); +let subs: Vec<&str> = re.find_iter("ΔδΔ𐅌ΔδΔ").map(|m| m.as_str()).collect(); +assert_eq!(subs, vec!["ΔδΔ", "ΔδΔ"]); + +// If we just matches on Greek, then all codepoints would match! +let re = Regex::new(r"\p{Greek}+").unwrap(); +let subs: Vec<&str> = re.find_iter("ΔδΔ𐅌ΔδΔ").map(|m| m.as_str()).collect(); +assert_eq!(subs, vec!["ΔδΔ𐅌ΔδΔ"]); +``` + +### Opt out of Unicode support + +The [`bytes::Regex`] type that can be used to search `&[u8]` haystacks. By +default, haystacks are conventionally treated as UTF-8 just like it is with the +main `Regex` type. However, this behavior can be disabled by turning off the +`u` flag, even if doing so could result in matching invalid UTF-8. For example, +when the `u` flag is disabled, `.` will match any byte instead of any Unicode +scalar value. + +Disabling the `u` flag is also possible with the standard `&str`-based `Regex` +type, but it is only allowed where the UTF-8 invariant is maintained. For +example, `(?-u:\w)` is an ASCII-only `\w` character class and is legal in an +`&str`-based `Regex`, but `(?-u:\W)` will attempt to match *any byte* that +isn't in `(?-u:\w)`, which in turn includes bytes that are invalid UTF-8. +Similarly, `(?-u:\xFF)` will attempt to match the raw byte `\xFF` (instead of +`U+00FF`), which is invalid UTF-8 and therefore is illegal in `&str`-based +regexes. + +Finally, since Unicode support requires bundling large Unicode data +tables, this crate exposes knobs to disable the compilation of those +data tables, which can be useful for shrinking binary size and reducing +compilation times. For details on how to do that, see the section on [crate +features](#crate-features). + +# Syntax + +The syntax supported in this crate is documented below. + +Note that the regular expression parser and abstract syntax are exposed in +a separate crate, [`regex-syntax`](https://docs.rs/regex-syntax). + +### Matching one character + +

, flags: raw::c_int) -> Result + where + P: AsRef, + { + let filename = match filename { + None => None, + Some(ref f) => Some(cstr_cow_from_bytes(f.as_ref().as_bytes())?), + }; + with_dlerror( + move || { + let result = dlopen( + match filename { + None => ptr::null(), + Some(ref f) => f.as_ptr(), + }, + flags, + ); + // ensure filename lives until dlopen completes + drop(filename); + if result.is_null() { + None + } else { + Some(Library { handle: result }) + } + }, + |desc| crate::Error::DlOpen { desc: desc.into() }, + ) + .map_err(|e| e.unwrap_or(crate::Error::DlOpenUnknown)) + } + + unsafe fn get_impl(&self, symbol: &[u8], on_null: F) -> Result, crate::Error> + where + F: FnOnce() -> Result, crate::Error>, + { + ensure_compatible_types::()?; + let symbol = cstr_cow_from_bytes(symbol)?; + // `dlsym` may return nullptr in two cases: when a symbol genuinely points to a null + // pointer or the symbol cannot be found. In order to detect this case a double dlerror + // pattern must be used, which is, sadly, a little bit racy. + // + // We try to leave as little space as possible for this to occur, but we can’t exactly + // fully prevent it. + let result = with_dlerror( + || { + dlerror(); + let symbol = dlsym(self.handle, symbol.as_ptr()); + if symbol.is_null() { + None + } else { + Some(Symbol { + pointer: symbol, + pd: marker::PhantomData, + }) + } + }, + |desc| crate::Error::DlSym { desc: desc.into() }, + ); + match result { + Err(None) => on_null(), + Err(Some(e)) => Err(e), + Ok(x) => Ok(x), + } + } + + /// Get a pointer to a function or static variable by symbol name. + /// + /// The `symbol` may not contain any null bytes, with the exception of the last byte. Providing a + /// null terminated `symbol` may help to avoid an allocation. + /// + /// Symbol is interpreted as-is; no mangling is done. This means that symbols like `x::y` are + /// most likely invalid. + /// + /// # Safety + /// + /// Users of this API must specify the correct type of the function or variable loaded. Using a + /// `Symbol` with a wrong type is undefined. + /// + /// # Platform-specific behaviour + /// + /// Implementation of thread local variables is extremely platform specific and uses of such + /// variables that work on e.g. Linux may have unintended behaviour on other targets. + /// + /// On POSIX implementations where the `dlerror` function is not confirmed to be MT-safe (such + /// as FreeBSD), this function will unconditionally return an error when the underlying `dlsym` + /// call returns a null pointer. There are rare situations where `dlsym` returns a genuine null + /// pointer without it being an error. If loading a null pointer is something you care about, + /// consider using the [`Library::get_singlethreaded`] call. + #[inline(always)] + pub unsafe fn get(&self, symbol: &[u8]) -> Result, crate::Error> { + extern crate cfg_if; + cfg_if::cfg_if! { + // These targets are known to have MT-safe `dlerror`. + if #[cfg(any( + target_os = "linux", + target_os = "android", + target_os = "openbsd", + target_os = "macos", + target_os = "ios", + target_os = "solaris", + target_os = "illumos", + target_os = "redox", + target_os = "fuchsia", + target_os = "cygwin", + ))] { + self.get_singlethreaded(symbol) + } else { + self.get_impl(symbol, || Err(crate::Error::DlSymUnknown)) + } + } + } + + /// Get a pointer to function or static variable by symbol name. + /// + /// The `symbol` may not contain any null bytes, with the exception of the last byte. Providing a + /// null terminated `symbol` may help to avoid an allocation. + /// + /// Symbol is interpreted as-is; no mangling is done. This means that symbols like `x::y` are + /// most likely invalid. + /// + /// # Safety + /// + /// Users of this API must specify the correct type of the function or variable loaded. + /// + /// It is up to the user of this library to ensure that no other calls to an MT-unsafe + /// implementation of `dlerror` occur during the execution of this function. Failing that, the + /// behaviour of this function is not defined. + /// + /// # Platform-specific behaviour + /// + /// The implementation of thread-local variables is extremely platform specific and uses of such + /// variables that work on e.g. Linux may have unintended behaviour on other targets. + #[inline(always)] + pub unsafe fn get_singlethreaded(&self, symbol: &[u8]) -> Result, crate::Error> { + self.get_impl(symbol, || { + Ok(Symbol { + pointer: ptr::null_mut(), + pd: marker::PhantomData, + }) + }) + } + + /// Convert the `Library` to a raw handle. + /// + /// The handle returned by this function shall be usable with APIs which accept handles + /// as returned by `dlopen`. + pub fn into_raw(self) -> *mut raw::c_void { + let handle = self.handle; + mem::forget(self); + handle + } + + /// Convert a raw handle returned by `dlopen`-family of calls to a `Library`. + /// + /// # Safety + /// + /// The pointer shall be a result of a successful call of the `dlopen`-family of functions or a + /// pointer previously returned by `Library::into_raw` call. It must be valid to call `dlclose` + /// with this pointer as an argument. + pub unsafe fn from_raw(handle: *mut raw::c_void) -> Library { + Library { handle } + } + + /// Unload the library. + /// + /// This method might be a no-op, depending on the flags with which the `Library` was opened, + /// what library was opened or other platform specifics. + /// + /// You only need to call this if you are interested in handling any errors that may arise when + /// library is unloaded. Otherwise the implementation of `Drop` for `Library` will close the + /// library and ignore the errors were they arise. + /// + /// The underlying data structures may still get leaked if an error does occur. + pub fn close(self) -> Result<(), crate::Error> { + let result = with_dlerror( + || { + if unsafe { dlclose(self.handle) } == 0 { + Some(()) + } else { + None + } + }, + |desc| crate::Error::DlClose { desc: desc.into() }, + ) + .map_err(|e| e.unwrap_or(crate::Error::DlCloseUnknown)); + // While the library is not free'd yet in case of an error, there is no reason to try + // dropping it again, because all that will do is try calling `dlclose` again. only + // this time it would ignore the return result, which we already seen failing… + std::mem::forget(self); + result + } +} + +impl Drop for Library { + fn drop(&mut self) { + unsafe { + dlclose(self.handle); + } + } +} + +impl fmt::Debug for Library { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&format!("Library@{:p}", self.handle)) + } +} + +/// Symbol from a library. +/// +/// A major difference compared to the cross-platform `Symbol` is that this does not ensure that the +/// `Symbol` does not outlive the `Library` it comes from. +pub struct Symbol { + pointer: *mut raw::c_void, + pd: marker::PhantomData, +} + +impl Symbol { + /// Convert the loaded `Symbol` into a raw pointer. + pub fn into_raw(self) -> *mut raw::c_void { + self.pointer + } + + /// Convert the loaded `Symbol` into a raw pointer. + /// For unix this does the same as into_raw. + pub fn as_raw_ptr(self) -> *mut raw::c_void { + self.pointer + } +} + +impl Symbol> { + /// Lift Option out of the symbol. + pub fn lift_option(self) -> Option> { + if self.pointer.is_null() { + None + } else { + Some(Symbol { + pointer: self.pointer, + pd: marker::PhantomData, + }) + } + } +} + +unsafe impl Send for Symbol {} +unsafe impl Sync for Symbol {} + +impl Clone for Symbol { + fn clone(&self) -> Symbol { + Symbol { ..*self } + } +} + +impl ::std::ops::Deref for Symbol { + type Target = T; + fn deref(&self) -> &T { + unsafe { + // Additional reference level for a dereference on `deref` return value. + &*(&self.pointer as *const *mut _ as *const T) + } + } +} + +impl fmt::Debug for Symbol { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + unsafe { + let mut info = mem::MaybeUninit::::uninit(); + if dladdr(self.pointer, info.as_mut_ptr()) != 0 { + let info = info.assume_init(); + if info.dli_sname.is_null() { + f.write_str(&format!( + "Symbol@{:p} from {:?}", + self.pointer, + CStr::from_ptr(info.dli_fname) + )) + } else { + f.write_str(&format!( + "Symbol {:?}@{:p} from {:?}", + CStr::from_ptr(info.dli_sname), + self.pointer, + CStr::from_ptr(info.dli_fname) + )) + } + } else { + f.write_str(&format!("Symbol@{:p}", self.pointer)) + } + } + } +} + +// Platform specific things +#[cfg_attr(any(target_os = "linux", target_os = "android"), link(name = "dl"))] +#[cfg_attr(any(target_os = "freebsd", target_os = "dragonfly"), link(name = "c"))] +extern "C" { + fn dlopen(filename: *const raw::c_char, flags: raw::c_int) -> *mut raw::c_void; + fn dlclose(handle: *mut raw::c_void) -> raw::c_int; + fn dlsym(handle: *mut raw::c_void, symbol: *const raw::c_char) -> *mut raw::c_void; + fn dlerror() -> *mut raw::c_char; + fn dladdr(addr: *mut raw::c_void, info: *mut DlInfo) -> raw::c_int; +} + +#[repr(C)] +struct DlInfo { + dli_fname: *const raw::c_char, + dli_fbase: *mut raw::c_void, + dli_sname: *const raw::c_char, + dli_saddr: *mut raw::c_void, +} diff --git a/vendor/libloading/src/os/windows/mod.rs b/vendor/libloading/src/os/windows/mod.rs new file mode 100644 index 00000000000000..fa6713138690a6 --- /dev/null +++ b/vendor/libloading/src/os/windows/mod.rs @@ -0,0 +1,590 @@ +// A hack for docs.rs to build documentation that has both windows and linux documentation in the +// same rustdoc build visible. +#[cfg(all(libloading_docs, not(windows)))] +mod windows_imports {} +#[cfg(any(not(libloading_docs), windows))] +mod windows_imports { + use super::{BOOL, DWORD, FARPROC, HANDLE, HMODULE}; + pub(super) use std::os::windows::ffi::{OsStrExt, OsStringExt}; + windows_link::link!("kernel32.dll" "system" fn GetLastError() -> DWORD); + windows_link::link!("kernel32.dll" "system" fn SetThreadErrorMode(new_mode: DWORD, old_mode: *mut DWORD) -> BOOL); + windows_link::link!("kernel32.dll" "system" fn GetModuleHandleExW(flags: u32, module_name: *const u16, module: *mut HMODULE) -> BOOL); + windows_link::link!("kernel32.dll" "system" fn FreeLibrary(module: HMODULE) -> BOOL); + windows_link::link!("kernel32.dll" "system" fn LoadLibraryExW(filename: *const u16, file: HANDLE, flags: DWORD) -> HMODULE); + windows_link::link!("kernel32.dll" "system" fn GetModuleFileNameW(module: HMODULE, filename: *mut u16, size: DWORD) -> DWORD); + windows_link::link!("kernel32.dll" "system" fn GetProcAddress(module: HMODULE, procname: *const u8) -> FARPROC); +} + +use self::windows_imports::*; +use std::ffi::{OsStr, OsString}; +use std::os::raw; +use std::{fmt, io, marker, mem, ptr}; +use util::{cstr_cow_from_bytes, ensure_compatible_types}; + +/// The platform-specific counterpart of the cross-platform [`Library`](crate::Library). +pub struct Library(HMODULE); + +unsafe impl Send for Library {} +// Now, this is sort-of-tricky. MSDN documentation does not really make any claims as to safety of +// the Win32 APIs. Sadly, whomever I asked, even current and former Microsoft employees, couldn’t +// say for sure whether the Win32 APIs used to implement `Library` are thread-safe or not. +// +// My investigation ended up with a question about thread-safety properties of the API involved +// being sent to an internal (to MS) general question mailing-list. The conclusion of the mail is +// as such: +// +// * Nobody inside MS (at least out of all of the people who have seen the question) knows for +// sure either; +// * However, the general consensus between MS developers is that one can rely on the API being +// thread-safe. In case it is not thread-safe it should be considered a bug on the Windows +// part. (NB: bugs filed at https://connect.microsoft.com/ against Windows Server) +unsafe impl Sync for Library {} + +impl Library { + /// Find and load a module. + /// + /// If the `filename` specifies a full path, the function only searches that path for the + /// module. Otherwise, if the `filename` specifies a relative path or a module name without a + /// path, the function uses a Windows-specific search strategy to find the module. For more + /// information, see the [Remarks on MSDN][msdn]. + /// + /// If the `filename` specifies a library filename without a path and with the extension omitted, + /// the `.dll` extension is implicitly added. This behaviour may be suppressed by appending a + /// trailing `.` to the `filename`. + /// + /// This is equivalent to [Library::load_with_flags](filename, 0). + /// + /// [msdn]: https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryw#remarks + /// + /// # Safety + /// + /// When a library is loaded, initialisation routines contained within the library are executed. + /// For the purposes of safety, the execution of these routines is conceptually the same calling an + /// unknown foreign function and may impose arbitrary requirements on the caller for the call + /// to be sound. + /// + /// Additionally, the callers of this function must also ensure that execution of the + /// termination routines contained within the library is safe as well. These routines may be + /// executed when the library is unloaded. + #[inline] + pub unsafe fn new>(filename: P) -> Result { + Library::load_with_flags(filename, 0) + } + + /// Get the `Library` representing the original program executable. + /// + /// Note that the behaviour of the `Library` loaded with this method is different from + /// Libraries loaded with [`os::unix::Library::this`]. For more information refer to [MSDN]. + /// + /// Corresponds to `GetModuleHandleExW(0, NULL, _)`. + /// + /// [`os::unix::Library::this`]: crate::os::unix::Library::this + /// [MSDN]: https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-getmodulehandleexw + pub fn this() -> Result { + unsafe { + let mut handle: HMODULE = 0; + with_get_last_error( + |source| crate::Error::GetModuleHandleExW { source }, + || { + let result = GetModuleHandleExW(0, std::ptr::null_mut(), &mut handle); + if result == 0 { + None + } else { + Some(Library(handle)) + } + }, + ) + .map_err(|e| e.unwrap_or(crate::Error::GetModuleHandleExWUnknown)) + } + } + + /// Get a module that is already loaded by the program. + /// + /// This function returns a `Library` corresponding to a module with the given name that is + /// already mapped into the address space of the process. If the module isn't found, an error is + /// returned. + /// + /// If the `filename` does not include a full path and there are multiple different loaded + /// modules corresponding to the `filename`, it is impossible to predict which module handle + /// will be returned. For more information refer to [MSDN]. + /// + /// If the `filename` specifies a library filename without a path and with the extension omitted, + /// the `.dll` extension is implicitly added. This behaviour may be suppressed by appending a + /// trailing `.` to the `filename`. + /// + /// This is equivalent to `GetModuleHandleExW(0, filename, _)`. + /// + /// [MSDN]: https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-getmodulehandleexw + pub fn open_already_loaded>(filename: P) -> Result { + let wide_filename: Vec = filename.as_ref().encode_wide().chain(Some(0)).collect(); + + let ret = unsafe { + let mut handle: HMODULE = 0; + with_get_last_error( + |source| crate::Error::GetModuleHandleExW { source }, + || { + // Make sure no winapi calls as a result of drop happen inside this closure, because + // otherwise that might change the return value of the GetLastError. + let result = GetModuleHandleExW(0, wide_filename.as_ptr(), &mut handle); + if result == 0 { + None + } else { + Some(Library(handle)) + } + }, + ) + .map_err(|e| e.unwrap_or(crate::Error::GetModuleHandleExWUnknown)) + }; + + drop(wide_filename); // Drop wide_filename here to ensure it doesn’t get moved and dropped + // inside the closure by mistake. See comment inside the closure. + ret + } + + /// Find and load a module, additionally adjusting behaviour with flags. + /// + /// See [`Library::new`] for documentation on the handling of the `filename` argument. See the + /// [flag table on MSDN][flags] for information on applicable values for the `flags` argument. + /// + /// Corresponds to `LoadLibraryExW(filename, reserved: NULL, flags)`. + /// + /// [flags]: https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters + /// + /// # Safety + /// + /// When a library is loaded, initialisation routines contained within the library are executed. + /// For the purposes of safety, the execution of these routines is conceptually the same calling an + /// unknown foreign function and may impose arbitrary requirements on the caller for the call + /// to be sound. + /// + /// Additionally, the callers of this function must also ensure that execution of the + /// termination routines contained within the library is safe as well. These routines may be + /// executed when the library is unloaded. + pub unsafe fn load_with_flags>( + filename: P, + flags: LOAD_LIBRARY_FLAGS, + ) -> Result { + let wide_filename: Vec = filename.as_ref().encode_wide().chain(Some(0)).collect(); + let _guard = ErrorModeGuard::new(); + + let ret = with_get_last_error( + |source| crate::Error::LoadLibraryExW { source }, + || { + // Make sure no winapi calls as a result of drop happen inside this closure, because + // otherwise that might change the return value of the GetLastError. + let handle = LoadLibraryExW(wide_filename.as_ptr(), 0, flags); + if handle == 0 { + None + } else { + Some(Library(handle)) + } + }, + ) + .map_err(|e| e.unwrap_or(crate::Error::LoadLibraryExWUnknown)); + drop(wide_filename); // Drop wide_filename here to ensure it doesn’t get moved and dropped + // inside the closure by mistake. See comment inside the closure. + ret + } + + /// Attempts to pin the module represented by the current `Library` into memory. + /// + /// Calls `GetModuleHandleExW` with the flag `GET_MODULE_HANDLE_EX_FLAG_PIN` to pin the module. + /// See the [MSDN documentation][msdn] for more information. + /// + /// [msdn]: https://learn.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-getmodulehandleexw + /// + /// If successful, the module will remain in memory regardless of the refcount for this `Library` + pub fn pin(&self) -> Result<(), crate::Error> { + const GET_MODULE_HANDLE_EX_FLAG_PIN: u32 = 0x1; + const GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS: u32 = 0x4; + unsafe { + let mut handle: HMODULE = 0; + with_get_last_error( + |source| crate::Error::GetModuleHandleExW { source }, + || { + // Make sure no winapi calls as a result of drop happen inside this closure, because + // otherwise that might change the return value of the GetLastError. + + // We use our cached module handle of this `Library` instead of the module name. This works + // if we also pass the flag `GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS` because on Windows, module handles + // are the loaded base address of the module. + let result = GetModuleHandleExW( + GET_MODULE_HANDLE_EX_FLAG_PIN | GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, + self.0 as *const u16, + &mut handle, + ); + if result == 0 { + None + } else { + Some(()) + } + }, + ) + .map_err(|e| e.unwrap_or(crate::Error::GetModuleHandleExWUnknown)) + } + } + + /// Get a pointer to a function or static variable by symbol name. + /// + /// The `symbol` may not contain any null bytes, with the exception of the last byte. A null + /// terminated `symbol` may avoid a string allocation in some cases. + /// + /// Symbol is interpreted as-is; no mangling is done. This means that symbols like `x::y` are + /// most likely invalid. + /// + /// # Safety + /// + /// Users of this API must specify the correct type of the function or variable loaded. + pub unsafe fn get(&self, symbol: &[u8]) -> Result, crate::Error> { + ensure_compatible_types::()?; + let symbol = cstr_cow_from_bytes(symbol)?; + with_get_last_error( + |source| crate::Error::GetProcAddress { source }, + || { + let symbol = GetProcAddress(self.0, symbol.as_ptr().cast()); + if symbol.is_none() { + None + } else { + Some(Symbol { + pointer: symbol, + pd: marker::PhantomData, + }) + } + }, + ) + .map_err(|e| e.unwrap_or(crate::Error::GetProcAddressUnknown)) + } + + /// Get a pointer to a function or static variable by ordinal number. + /// + /// # Safety + /// + /// Users of this API must specify the correct type of the function or variable loaded. + pub unsafe fn get_ordinal(&self, ordinal: u16) -> Result, crate::Error> { + ensure_compatible_types::()?; + with_get_last_error( + |source| crate::Error::GetProcAddress { source }, + || { + let ordinal = ordinal as usize as *const _; + let symbol = GetProcAddress(self.0, ordinal); + if symbol.is_none() { + None + } else { + Some(Symbol { + pointer: symbol, + pd: marker::PhantomData, + }) + } + }, + ) + .map_err(|e| e.unwrap_or(crate::Error::GetProcAddressUnknown)) + } + + /// Convert the `Library` to a raw handle. + pub fn into_raw(self) -> HMODULE { + let handle = self.0; + mem::forget(self); + handle + } + + /// Convert a raw handle to a `Library`. + /// + /// # Safety + /// + /// The handle must be the result of a successful call of `LoadLibraryA`, `LoadLibraryW`, + /// `LoadLibraryExW`, or `LoadLibraryExA`, or a handle previously returned by the + /// `Library::into_raw` call. + pub unsafe fn from_raw(handle: HMODULE) -> Library { + Library(handle) + } + + /// Unload the library. + /// + /// You only need to call this if you are interested in handling any errors that may arise when + /// library is unloaded. Otherwise this will be done when `Library` is dropped. + /// + /// The underlying data structures may still get leaked if an error does occur. + pub fn close(self) -> Result<(), crate::Error> { + let result = with_get_last_error( + |source| crate::Error::FreeLibrary { source }, + || { + if unsafe { FreeLibrary(self.0) == 0 } { + None + } else { + Some(()) + } + }, + ) + .map_err(|e| e.unwrap_or(crate::Error::FreeLibraryUnknown)); + // While the library is not free'd yet in case of an error, there is no reason to try + // dropping it again, because all that will do is try calling `FreeLibrary` again. only + // this time it would ignore the return result, which we already seen failing... + std::mem::forget(self); + result + } +} + +impl Drop for Library { + fn drop(&mut self) { + unsafe { + FreeLibrary(self.0); + } + } +} + +impl fmt::Debug for Library { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + unsafe { + // FIXME: use Maybeuninit::uninit_array when stable + let mut buf = mem::MaybeUninit::<[mem::MaybeUninit; 1024]>::uninit().assume_init(); + let len = GetModuleFileNameW(self.0, buf[..].as_mut_ptr().cast(), 1024) as usize; + if len == 0 { + f.write_str(&format!("Library@{:#x}", self.0)) + } else { + let string: OsString = OsString::from_wide( + // FIXME: use Maybeuninit::slice_get_ref when stable + &*(&buf[..len] as *const [_] as *const [u16]), + ); + f.write_str(&format!("Library@{:#x} from {:?}", self.0, string)) + } + } + } +} + +/// A symbol from a library. +/// +/// A major difference compared to the cross-platform `Symbol` is that this does not ensure that the +/// `Symbol` does not outlive the `Library` that it comes from. +pub struct Symbol { + pointer: FARPROC, + pd: marker::PhantomData, +} + +impl Symbol { + /// Convert the loaded `Symbol` into a handle. + pub fn into_raw(self) -> FARPROC { + self.pointer + } + + /// Convert the loaded `Symbol` into a raw pointer. + pub fn as_raw_ptr(self) -> *mut raw::c_void { + self.pointer + .map(|raw| raw as *mut raw::c_void) + .unwrap_or(std::ptr::null_mut()) + } +} + +impl Symbol> { + /// Lift Option out of the symbol. + pub fn lift_option(self) -> Option> { + if self.pointer.is_none() { + None + } else { + Some(Symbol { + pointer: self.pointer, + pd: marker::PhantomData, + }) + } + } +} + +unsafe impl Send for Symbol {} +unsafe impl Sync for Symbol {} + +impl Clone for Symbol { + fn clone(&self) -> Symbol { + Symbol { ..*self } + } +} + +impl ::std::ops::Deref for Symbol { + type Target = T; + fn deref(&self) -> &T { + unsafe { &*((&self.pointer) as *const FARPROC as *const T) } + } +} + +impl fmt::Debug for Symbol { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.pointer { + None => f.write_str("Symbol@0x0"), + Some(ptr) => f.write_str(&format!("Symbol@{:p}", ptr as *const ())), + } + } +} + +struct ErrorModeGuard(DWORD); + +impl ErrorModeGuard { + #[allow(clippy::if_same_then_else)] + fn new() -> Option { + unsafe { + let mut previous_mode = 0; + if SetThreadErrorMode(SEM_FAILCRITICALERRORS, &mut previous_mode) == 0 { + // How in the world is it possible for what is essentially a simple variable swap + // to fail? For now we just ignore the error -- the worst that can happen here is + // the previous mode staying on and user seeing a dialog error on older Windows + // machines. + None + } else if previous_mode == SEM_FAILCRITICALERRORS { + None + } else { + Some(ErrorModeGuard(previous_mode)) + } + } + } +} + +impl Drop for ErrorModeGuard { + fn drop(&mut self) { + unsafe { + SetThreadErrorMode(self.0, ptr::null_mut()); + } + } +} + +fn with_get_last_error( + wrap: fn(crate::error::WindowsError) -> crate::Error, + closure: F, +) -> Result> +where + F: FnOnce() -> Option, +{ + closure().ok_or_else(|| { + let error = unsafe { GetLastError() }; + if error == 0 { + None + } else { + Some(wrap(crate::error::WindowsError( + io::Error::from_raw_os_error(error as i32), + ))) + } + }) +} + +#[allow(clippy::upper_case_acronyms)] +type BOOL = i32; +#[allow(clippy::upper_case_acronyms)] +type DWORD = u32; +#[allow(clippy::upper_case_acronyms)] +type HANDLE = isize; +#[allow(clippy::upper_case_acronyms)] +type HMODULE = isize; +#[allow(clippy::upper_case_acronyms)] +type FARPROC = Option isize>; +#[allow(non_camel_case_types)] +type LOAD_LIBRARY_FLAGS = DWORD; + +const SEM_FAILCRITICALERRORS: DWORD = 1; + +/// Do not check AppLocker rules or apply Software Restriction Policies for the DLL. +/// +/// This action applies only to the DLL being loaded and not to its dependencies. This value is +/// recommended for use in setup programs that must run extracted DLLs during installation. +/// +/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). +pub const LOAD_IGNORE_CODE_AUTHZ_LEVEL: LOAD_LIBRARY_FLAGS = 0x00000010; + +/// Map the file into the calling process’ virtual address space as if it were a data file. +/// +/// Nothing is done to execute or prepare to execute the mapped file. Therefore, you cannot call +/// functions like [`Library::get`] with this DLL. Using this value causes writes to read-only +/// memory to raise an access violation. Use this flag when you want to load a DLL only to extract +/// messages or resources from it. +/// +/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). +pub const LOAD_LIBRARY_AS_DATAFILE: LOAD_LIBRARY_FLAGS = 0x00000002; + +/// Map the file into the calling process’ virtual address space as if it were a data file. +/// +/// Similar to [`LOAD_LIBRARY_AS_DATAFILE`], except that the DLL file is opened with exclusive +/// write access for the calling process. Other processes cannot open the DLL file for write access +/// while it is in use. However, the DLL can still be opened by other processes. +/// +/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). +pub const LOAD_LIBRARY_AS_DATAFILE_EXCLUSIVE: LOAD_LIBRARY_FLAGS = 0x00000040; + +/// Map the file into the process’ virtual address space as an image file. +/// +/// The loader does not load the static imports or perform the other usual initialisation steps. +/// Use this flag when you want to load a DLL only to extract messages or resources from it. +/// +/// Unless the application depends on the file having the in-memory layout of an image, this value +/// should be used with either [`LOAD_LIBRARY_AS_DATAFILE_EXCLUSIVE`] or +/// [`LOAD_LIBRARY_AS_DATAFILE`]. +/// +/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). +pub const LOAD_LIBRARY_AS_IMAGE_RESOURCE: LOAD_LIBRARY_FLAGS = 0x00000020; + +/// Search the application's installation directory for the DLL and its dependencies. +/// +/// Directories in the standard search path are not searched. This value cannot be combined with +/// [`LOAD_WITH_ALTERED_SEARCH_PATH`]. +/// +/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). +pub const LOAD_LIBRARY_SEARCH_APPLICATION_DIR: LOAD_LIBRARY_FLAGS = 0x00000200; + +/// Search default directories when looking for the DLL and its dependencies. +/// +/// This value is a combination of [`LOAD_LIBRARY_SEARCH_APPLICATION_DIR`], +/// [`LOAD_LIBRARY_SEARCH_SYSTEM32`], and [`LOAD_LIBRARY_SEARCH_USER_DIRS`]. Directories in the +/// standard search path are not searched. This value cannot be combined with +/// [`LOAD_WITH_ALTERED_SEARCH_PATH`]. +/// +/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). +pub const LOAD_LIBRARY_SEARCH_DEFAULT_DIRS: LOAD_LIBRARY_FLAGS = 0x00001000; + +/// Directory that contains the DLL is temporarily added to the beginning of the list of +/// directories that are searched for the DLL’s dependencies. +/// +/// Directories in the standard search path are not searched. +/// +/// The `filename` parameter must specify a fully qualified path. This value cannot be combined +/// with [`LOAD_WITH_ALTERED_SEARCH_PATH`]. +/// +/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). +pub const LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR: LOAD_LIBRARY_FLAGS = 0x00000100; + +/// Search `%windows%\system32` for the DLL and its dependencies. +/// +/// Directories in the standard search path are not searched. This value cannot be combined with +/// [`LOAD_WITH_ALTERED_SEARCH_PATH`]. +/// +/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). +pub const LOAD_LIBRARY_SEARCH_SYSTEM32: LOAD_LIBRARY_FLAGS = 0x00000800; + +/// Directories added using the `AddDllDirectory` or the `SetDllDirectory` function are searched +/// for the DLL and its dependencies. +/// +/// If more than one directory has been added, the order in which the directories are searched is +/// unspecified. Directories in the standard search path are not searched. This value cannot be +/// combined with [`LOAD_WITH_ALTERED_SEARCH_PATH`]. +/// +/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). +pub const LOAD_LIBRARY_SEARCH_USER_DIRS: LOAD_LIBRARY_FLAGS = 0x00000400; + +/// If `filename` specifies an absolute path, the system uses the alternate file search strategy +/// discussed in the [Remarks section] to find associated executable modules that the specified +/// module causes to be loaded. +/// +/// If this value is used and `filename` specifies a relative path, the behaviour is undefined. +/// +/// If this value is not used, or if `filename` does not specify a path, the system uses the +/// standard search strategy discussed in the [Remarks section] to find associated executable +/// modules that the specified module causes to be loaded. +/// +/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). +/// +/// [Remarks]: https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#remarks +pub const LOAD_WITH_ALTERED_SEARCH_PATH: LOAD_LIBRARY_FLAGS = 0x00000008; + +/// Specifies that the digital signature of the binary image must be checked at load time. +/// +/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). +pub const LOAD_LIBRARY_REQUIRE_SIGNED_TARGET: LOAD_LIBRARY_FLAGS = 0x00000080; + +/// Allow loading a DLL for execution from the current directory only if it is under a directory in +/// the Safe load list. +/// +/// See [flag documentation on MSDN](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexw#parameters). +pub const LOAD_LIBRARY_SAFE_CURRENT_DIRS: LOAD_LIBRARY_FLAGS = 0x00002000; diff --git a/vendor/libloading/src/safe.rs b/vendor/libloading/src/safe.rs new file mode 100644 index 00000000000000..e217ee394646c1 --- /dev/null +++ b/vendor/libloading/src/safe.rs @@ -0,0 +1,318 @@ +#[cfg(libloading_docs)] +use super::os::unix as imp; // the implementation used here doesn't matter particularly much... +#[cfg(all(not(libloading_docs), unix))] +use super::os::unix as imp; +#[cfg(all(not(libloading_docs), windows))] +use super::os::windows as imp; +use super::Error; +use std::ffi::OsStr; +use std::fmt; +use std::marker; +use std::ops; +use std::os::raw; + +/// A loaded dynamic library. +#[cfg_attr(libloading_docs, doc(cfg(any(unix, windows))))] +pub struct Library(imp::Library); + +impl Library { + /// Find and load a dynamic library. + /// + /// The `filename` argument may be either: + /// + /// * A library filename; + /// * The absolute path to the library; + /// * A relative (to the current working directory) path to the library. + /// + /// # Safety + /// + /// When a library is loaded, initialisation routines contained within it are executed. + /// For the purposes of safety, the execution of these routines is conceptually the same calling an + /// unknown foreign function and may impose arbitrary requirements on the caller for the call + /// to be sound. + /// + /// Additionally, the callers of this function must also ensure that execution of the + /// termination routines contained within the library is safe as well. These routines may be + /// executed when the library is unloaded. + /// + /// # Thread-safety + /// + /// The implementation strives to be as MT-safe as sanely possible, however on certain + /// platforms the underlying error-handling related APIs not always MT-safe. This library + /// shares these limitations on those platforms. In particular, on certain UNIX targets + /// `dlerror` is not MT-safe, resulting in garbage error messages in certain MT-scenarios. + /// + /// Calling this function from multiple threads is not MT-safe if used in conjunction with + /// library filenames and the library search path is modified (`SetDllDirectory` function on + /// Windows, `{DY,}LD_LIBRARY_PATH` environment variable on UNIX). + /// + /// # Platform-specific behaviour + /// + /// When a plain library filename is supplied, the locations in which the library is searched are + /// platform specific and cannot be adjusted in a portable manner. See the documentation for + /// the platform specific [`os::unix::Library::new`] and [`os::windows::Library::new`] methods + /// for further information on library lookup behaviour. + /// + /// If the `filename` specifies a library filename without a path and with the extension omitted, + /// the `.dll` extension is implicitly added on Windows. + /// + /// [`os::unix::Library::new`]: crate::os::unix::Library::new + /// [`os::windows::Library::new`]: crate::os::windows::Library::new + /// + /// # Tips + /// + /// Distributing your dynamic libraries under a filename common to all platforms (e.g. + /// `awesome.module`) allows you to avoid code which has to account for platform’s conventional + /// library filenames. + /// + /// Strive to specify an absolute or at least a relative path to your library, unless + /// system-wide libraries are being loaded. Platform-dependent library search locations + /// combined with various quirks related to path-less filenames may cause flakiness in + /// programs. + /// + /// # Examples + /// + /// ```no_run + /// # use ::libloading::Library; + /// // Any of the following are valid. + /// unsafe { + /// let _ = Library::new("/path/to/awesome.module").unwrap(); + /// let _ = Library::new("../awesome.module").unwrap(); + /// let _ = Library::new("libsomelib.so.1").unwrap(); + /// } + /// ``` + pub unsafe fn new>(filename: P) -> Result { + imp::Library::new(filename).map(From::from) + } + + /// Get a pointer to a function or static variable by symbol name. + /// + /// The `symbol` may not contain any null bytes, with the exception of the last byte. Providing a + /// null-terminated `symbol` may help to avoid an allocation. + /// + /// The symbol is interpreted as-is; no mangling is done. This means that symbols like `x::y` are + /// most likely invalid. + /// + /// # Safety + /// + /// Users of this API must specify the correct type of the function or variable loaded. + /// + /// # Platform-specific behaviour + /// + /// The implementation of thread-local variables is extremely platform specific and uses of such + /// variables that work on e.g. Linux may have unintended behaviour on other targets. + /// + /// On POSIX implementations where the `dlerror` function is not confirmed to be MT-safe (such + /// as FreeBSD), this function will unconditionally return an error when the underlying `dlsym` + /// call returns a null pointer. There are rare situations where `dlsym` returns a genuine null + /// pointer without it being an error. If loading a null pointer is something you care about, + /// consider using the [`os::unix::Library::get_singlethreaded`] call. + /// + /// [`os::unix::Library::get_singlethreaded`]: crate::os::unix::Library::get_singlethreaded + /// + /// # Examples + /// + /// Given a loaded library: + /// + /// ```no_run + /// # use ::libloading::Library; + /// let lib = unsafe { + /// Library::new("/path/to/awesome.module").unwrap() + /// }; + /// ``` + /// + /// Loading and using a function looks like this: + /// + /// ```no_run + /// # use ::libloading::{Library, Symbol}; + /// # let lib = unsafe { + /// # Library::new("/path/to/awesome.module").unwrap() + /// # }; + /// unsafe { + /// let awesome_function: Symbol f64> = + /// lib.get(b"awesome_function\0").unwrap(); + /// awesome_function(0.42); + /// } + /// ``` + /// + /// A static variable may also be loaded and inspected: + /// + /// ```no_run + /// # use ::libloading::{Library, Symbol}; + /// # let lib = unsafe { Library::new("/path/to/awesome.module").unwrap() }; + /// unsafe { + /// let awesome_variable: Symbol<*mut f64> = lib.get(b"awesome_variable\0").unwrap(); + /// **awesome_variable = 42.0; + /// }; + /// ``` + pub unsafe fn get(&self, symbol: &[u8]) -> Result, Error> { + self.0.get(symbol).map(|from| Symbol::from_raw(from, self)) + } + + /// Unload the library. + /// + /// This method might be a no-op, depending on the flags with which the `Library` was opened, + /// what library was opened or other platform specifics. + /// + /// You only need to call this if you are interested in handling any errors that may arise when + /// library is unloaded. Otherwise the implementation of `Drop` for `Library` will close the + /// library and ignore the errors were they arise. + /// + /// The underlying data structures may still get leaked if an error does occur. + pub fn close(self) -> Result<(), Error> { + self.0.close() + } +} + +impl fmt::Debug for Library { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +impl From for Library { + fn from(lib: imp::Library) -> Library { + Library(lib) + } +} + +impl From for imp::Library { + fn from(lib: Library) -> imp::Library { + lib.0 + } +} + +unsafe impl Send for Library {} +unsafe impl Sync for Library {} + +/// Symbol from a library. +/// +/// This type is a safeguard against using dynamically loaded symbols after a `Library` is +/// unloaded. The primary method to create an instance of a `Symbol` is via [`Library::get`]. +/// +/// The `Deref` trait implementation allows the use of `Symbol` as if it was a function or variable +/// itself, without taking care to “extract” the function or variable manually most of the time. +/// +/// [`Library::get`]: Library::get +#[cfg_attr(libloading_docs, doc(cfg(any(unix, windows))))] +pub struct Symbol<'lib, T: 'lib> { + inner: imp::Symbol, + pd: marker::PhantomData<&'lib T>, +} + +impl<'lib, T> Symbol<'lib, T> { + /// Extract the wrapped `os::platform::Symbol`. + /// + /// # Safety + /// + /// Using this function relinquishes all the lifetime guarantees. It is up to the developer to + /// ensure the resulting `Symbol` is not used past the lifetime of the `Library` this symbol + /// was loaded from. + /// + /// # Examples + /// + /// ```no_run + /// # use ::libloading::{Library, Symbol}; + /// unsafe { + /// let lib = Library::new("/path/to/awesome.module").unwrap(); + /// let symbol: Symbol<*mut u32> = lib.get(b"symbol\0").unwrap(); + /// let symbol = symbol.into_raw(); + /// } + /// ``` + pub unsafe fn into_raw(self) -> imp::Symbol { + self.inner + } + + /// Wrap the `os::platform::Symbol` into this safe wrapper. + /// + /// Note that, in order to create association between the symbol and the library this symbol + /// came from, this function requires a reference to the library. + /// + /// # Safety + /// + /// The `library` reference must be exactly the library `sym` was loaded from. + /// + /// # Examples + /// + /// ```no_run + /// # use ::libloading::{Library, Symbol}; + /// unsafe { + /// let lib = Library::new("/path/to/awesome.module").unwrap(); + /// let symbol: Symbol<*mut u32> = lib.get(b"symbol\0").unwrap(); + /// let symbol = symbol.into_raw(); + /// let symbol = Symbol::from_raw(symbol, &lib); + /// } + /// ``` + pub unsafe fn from_raw(sym: imp::Symbol, library: &'lib L) -> Symbol<'lib, T> { + let _ = library; // ignore here for documentation purposes. + Symbol { + inner: sym, + pd: marker::PhantomData, + } + } + + /// Try to convert the symbol into a raw pointer. + /// Success depends on the platform. Currently, this fn always succeeds and returns some. + /// + /// # Safety + /// + /// Using this function relinquishes all the lifetime guarantees. It is up to the developer to + /// ensure the resulting `Symbol` is not used past the lifetime of the `Library` this symbol + /// was loaded from. + pub unsafe fn try_as_raw_ptr(self) -> Option<*mut raw::c_void> { + Some( + unsafe { + // SAFE: the calling function has the same soundness invariants as this callee. + self.into_raw() + } + .as_raw_ptr(), + ) + } +} + +impl<'lib, T> Symbol<'lib, Option> { + /// Lift Option out of the symbol. + /// + /// # Examples + /// + /// ```no_run + /// # use ::libloading::{Library, Symbol}; + /// unsafe { + /// let lib = Library::new("/path/to/awesome.module").unwrap(); + /// let symbol: Symbol> = lib.get(b"symbol\0").unwrap(); + /// let symbol: Symbol<*mut u32> = symbol.lift_option().expect("static is not null"); + /// } + /// ``` + pub fn lift_option(self) -> Option> { + self.inner.lift_option().map(|is| Symbol { + inner: is, + pd: marker::PhantomData, + }) + } +} + +impl<'lib, T> Clone for Symbol<'lib, T> { + fn clone(&self) -> Symbol<'lib, T> { + Symbol { + inner: self.inner.clone(), + pd: marker::PhantomData, + } + } +} + +// FIXME: implement FnOnce for callable stuff instead. +impl ops::Deref for Symbol<'_, T> { + type Target = T; + fn deref(&self) -> &T { + ops::Deref::deref(&self.inner) + } +} + +impl fmt::Debug for Symbol<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} + +unsafe impl Send for Symbol<'_, T> {} +unsafe impl Sync for Symbol<'_, T> {} diff --git a/vendor/libloading/src/test_helpers.rs b/vendor/libloading/src/test_helpers.rs new file mode 100644 index 00000000000000..9e3e9924ff86b0 --- /dev/null +++ b/vendor/libloading/src/test_helpers.rs @@ -0,0 +1,37 @@ +//! This is a separate file containing helpers for tests of this library. It is built into a +//! dynamic library by the build.rs script. +#![crate_type="cdylib"] + +#[no_mangle] +pub static mut TEST_STATIC_U32: u32 = 0; + +#[no_mangle] +pub static mut TEST_STATIC_PTR: *mut () = 0 as *mut _; + +#[no_mangle] +pub extern "C" fn test_identity_u32(x: u32) -> u32 { + x +} + +#[repr(C)] +pub struct S { + a: u64, + b: u32, + c: u16, + d: u8 +} + +#[no_mangle] +pub extern "C" fn test_identity_struct(x: S) -> S { + x +} + +#[no_mangle] +pub unsafe extern "C" fn test_get_static_u32() -> u32 { + TEST_STATIC_U32 +} + +#[no_mangle] +pub unsafe extern "C" fn test_check_static_ptr() -> bool { + TEST_STATIC_PTR == (&mut TEST_STATIC_PTR as *mut *mut _ as *mut _) +} diff --git a/vendor/libloading/src/util.rs b/vendor/libloading/src/util.rs new file mode 100644 index 00000000000000..599e6c254eaa8c --- /dev/null +++ b/vendor/libloading/src/util.rs @@ -0,0 +1,34 @@ +use std::borrow::Cow; +use std::ffi::{CStr, CString}; +use std::os::raw; + +use crate::Error; + +/// Checks for the last byte and avoids allocating if it is zero. +/// +/// Non-last null bytes still result in an error. +pub(crate) fn cstr_cow_from_bytes(slice: &[u8]) -> Result, Error> { + static ZERO: raw::c_char = 0; + Ok(match slice.last() { + // Slice out of 0 elements + None => unsafe { Cow::Borrowed(CStr::from_ptr(&ZERO)) }, + // Slice with trailing 0 + Some(&0) => Cow::Borrowed( + CStr::from_bytes_with_nul(slice) + .map_err(|source| Error::CreateCStringWithTrailing { source })?, + ), + // Slice with no trailing 0 + Some(_) => { + Cow::Owned(CString::new(slice).map_err(|source| Error::CreateCString { source })?) + } + }) +} + +#[inline] +pub(crate) fn ensure_compatible_types() -> Result<(), Error> { + if ::std::mem::size_of::() != ::std::mem::size_of::() { + Err(Error::IncompatibleSize) + } else { + Ok(()) + } +} diff --git a/vendor/libloading/tests/constants.rs b/vendor/libloading/tests/constants.rs new file mode 100644 index 00000000000000..6ae5a8460aef5b --- /dev/null +++ b/vendor/libloading/tests/constants.rs @@ -0,0 +1,13 @@ +extern crate libc; +extern crate libloading; +extern crate static_assertions; + +#[cfg(all(test, unix))] +mod unix { + use super::static_assertions::const_assert_eq; + + const_assert_eq!(libloading::os::unix::RTLD_LOCAL, libc::RTLD_LOCAL); + const_assert_eq!(libloading::os::unix::RTLD_GLOBAL, libc::RTLD_GLOBAL); + const_assert_eq!(libloading::os::unix::RTLD_NOW, libc::RTLD_NOW); + const_assert_eq!(libloading::os::unix::RTLD_LAZY, libc::RTLD_LAZY); +} diff --git a/vendor/libloading/tests/functions.rs b/vendor/libloading/tests/functions.rs new file mode 100644 index 00000000000000..dc6b316e7d79c9 --- /dev/null +++ b/vendor/libloading/tests/functions.rs @@ -0,0 +1,312 @@ +#[cfg(windows)] +extern crate windows_sys; + +extern crate libloading; +use libloading::{Library, Symbol}; +use std::os::raw::c_void; + +const TARGET_DIR: Option<&'static str> = option_env!("CARGO_TARGET_DIR"); +const TARGET_TMPDIR: Option<&'static str> = option_env!("CARGO_TARGET_TMPDIR"); + +fn lib_path() -> std::path::PathBuf { + [ + TARGET_TMPDIR.unwrap_or(TARGET_DIR.unwrap_or("target")), + "libtest_helpers.module", + ] + .iter() + .collect() +} + +fn make_helpers() { + static ONCE: ::std::sync::Once = ::std::sync::Once::new(); + ONCE.call_once(|| { + let rustc = std::env::var_os("RUSTC").unwrap_or_else(|| "rustc".into()); + let mut cmd = ::std::process::Command::new(rustc); + cmd.arg("src/test_helpers.rs").arg("-o").arg(lib_path()); + if let Some(target) = std::env::var_os("TARGET") { + cmd.arg("--target").arg(target); + } else { + eprintln!("WARNING: $TARGET NOT SPECIFIED! BUILDING HELPER MODULE FOR NATIVE TARGET."); + } + assert!(cmd + .status() + .expect("could not compile the test helpers!") + .success()); + }); +} + +#[test] +fn test_id_u32() { + make_helpers(); + unsafe { + let lib = Library::new(lib_path()).unwrap(); + let f: Symbol u32> = lib.get(b"test_identity_u32\0").unwrap(); + assert_eq!(42, f(42)); + } +} + +#[test] +fn test_try_into_ptr() { + make_helpers(); + unsafe { + let lib = Library::new(lib_path()).unwrap(); + let f: Symbol u32> = lib.get(b"test_identity_u32\0").unwrap(); + let ptr: *mut c_void = f.try_as_raw_ptr().unwrap(); + assert!(!ptr.is_null()); + let ptr_casted: extern "C" fn(u32) -> u32 = std::mem::transmute(ptr); + assert_eq!(42, ptr_casted(42)); + } +} + +#[repr(C)] +#[derive(Clone, Copy, PartialEq, Debug)] +struct S { + a: u64, + b: u32, + c: u16, + d: u8, +} + +#[test] +fn test_id_struct() { + make_helpers(); + unsafe { + let lib = Library::new(lib_path()).unwrap(); + let f: Symbol S> = lib.get(b"test_identity_struct\0").unwrap(); + assert_eq!( + S { + a: 1, + b: 2, + c: 3, + d: 4 + }, + f(S { + a: 1, + b: 2, + c: 3, + d: 4 + }) + ); + } +} + +#[test] +fn test_0_no_0() { + make_helpers(); + unsafe { + let lib = Library::new(lib_path()).unwrap(); + let f: Symbol S> = lib.get(b"test_identity_struct\0").unwrap(); + let f2: Symbol S> = lib.get(b"test_identity_struct").unwrap(); + assert_eq!(*f, *f2); + } +} + +#[test] +fn wrong_name_fails() { + unsafe { + Library::new("target/this_location_is_definitely_non existent:^~") + .err() + .unwrap(); + } +} + +#[test] +fn missing_symbol_fails() { + make_helpers(); + unsafe { + let lib = Library::new(lib_path()).unwrap(); + lib.get::<*mut ()>(b"test_does_not_exist").err().unwrap(); + lib.get::<*mut ()>(b"test_does_not_exist\0").err().unwrap(); + } +} + +#[test] +fn interior_null_fails() { + make_helpers(); + unsafe { + let lib = Library::new(lib_path()).unwrap(); + lib.get::<*mut ()>(b"test_does\0_not_exist").err().unwrap(); + lib.get::<*mut ()>(b"test\0_does_not_exist\0") + .err() + .unwrap(); + } +} + +#[test] +fn test_incompatible_type() { + make_helpers(); + unsafe { + let lib = Library::new(lib_path()).unwrap(); + assert!(match lib.get::<()>(b"test_identity_u32\0") { + Err(libloading::Error::IncompatibleSize) => true, + _ => false, + }) + } +} + +#[test] +fn test_incompatible_type_named_fn() { + make_helpers(); + unsafe fn get<'a, T>(l: &'a Library, _: T) -> Result, libloading::Error> { + l.get::(b"test_identity_u32\0") + } + unsafe { + let lib = Library::new(lib_path()).unwrap(); + assert!(match get(&lib, test_incompatible_type_named_fn) { + Err(libloading::Error::IncompatibleSize) => true, + _ => false, + }) + } +} + +#[test] +fn test_static_u32() { + make_helpers(); + unsafe { + let lib = Library::new(lib_path()).unwrap(); + let var: Symbol<*mut u32> = lib.get(b"TEST_STATIC_U32\0").unwrap(); + **var = 42; + let help: Symbol u32> = + lib.get(b"test_get_static_u32\0").unwrap(); + assert_eq!(42, help()); + } +} + +#[test] +fn test_static_ptr() { + make_helpers(); + unsafe { + let lib = Library::new(lib_path()).unwrap(); + let var: Symbol<*mut *mut ()> = lib.get(b"TEST_STATIC_PTR\0").unwrap(); + **var = *var as *mut _; + let works: Symbol bool> = + lib.get(b"test_check_static_ptr\0").unwrap(); + assert!(works()); + } +} + +#[test] +// Something about i686-pc-windows-gnu, makes dll initialisation code call abort when it is loaded +// and unloaded many times. So far it seems like an issue with mingw, not libloading, so ignoring +// the target. Especially since it is very unlikely to be fixed given the state of support its +// support. +#[cfg(not(all(target_arch = "x86", target_os = "windows", target_env = "gnu")))] +// Cygwin returns errors on `close`. +#[cfg(not(target_os = "cygwin"))] +fn manual_close_many_times() { + make_helpers(); + let join_handles: Vec<_> = (0..16) + .map(|_| { + std::thread::spawn(|| unsafe { + for _ in 0..10000 { + let lib = Library::new(lib_path()).expect("open library"); + let _: Symbol u32> = + lib.get(b"test_identity_u32").expect("get fn"); + lib.close().expect("close is successful"); + } + }) + }) + .collect(); + for handle in join_handles { + handle.join().expect("thread should succeed"); + } +} + +#[cfg(unix)] +#[test] +fn library_this_get() { + use libloading::os::unix::Library; + make_helpers(); + // SAFE: functions are never called + unsafe { + let _lib = Library::new(lib_path()).unwrap(); + let this = Library::this(); + // Library we loaded in `_lib` (should be RTLD_LOCAL). + assert!(this + .get::(b"test_identity_u32") + .is_err()); + // Something obscure from libc... + // Cygwin behaves like Windows so ignore it. + #[cfg(not(target_os = "cygwin"))] + assert!(this.get::(b"freopen").is_ok()); + } +} + +#[cfg(windows)] +#[test] +fn library_this() { + use libloading::os::windows::Library; + make_helpers(); + unsafe { + // SAFE: well-known library without initialisers is loaded. + let _lib = Library::new(lib_path()).unwrap(); + let this = Library::this().expect("this library"); + // SAFE: functions are never called. + // Library we loaded in `_lib`. + assert!(this + .get::(b"test_identity_u32") + .is_err()); + // Something "obscure" from kernel32... + assert!(this.get::(b"GetLastError").is_err()); + } +} + +#[cfg(windows)] +#[test] +fn works_getlasterror() { + use libloading::os::windows::{Library, Symbol}; + use windows_sys::Win32::Foundation::{GetLastError, SetLastError}; + + unsafe { + let lib = Library::new("kernel32.dll").unwrap(); + let gle: Symbol u32> = lib.get(b"GetLastError").unwrap(); + SetLastError(42); + assert_eq!(GetLastError(), gle()) + } +} + +#[cfg(windows)] +#[test] +fn works_getlasterror0() { + use libloading::os::windows::{Library, Symbol}; + use windows_sys::Win32::Foundation::{GetLastError, SetLastError}; + + unsafe { + let lib = Library::new("kernel32.dll").unwrap(); + let gle: Symbol u32> = lib.get(b"GetLastError\0").unwrap(); + SetLastError(42); + assert_eq!(GetLastError(), gle()) + } +} + +#[cfg(windows)] +#[test] +fn works_pin_module() { + use libloading::os::windows::Library; + + unsafe { + let lib = Library::new("kernel32.dll").unwrap(); + lib.pin().unwrap(); + } +} + +#[cfg(windows)] +#[test] +fn library_open_already_loaded() { + use libloading::os::windows::Library; + + // Present on Windows systems and NOT used by any other tests to prevent races. + const LIBPATH: &str = "Msftedit.dll"; + + // Not loaded yet. + assert!(match Library::open_already_loaded(LIBPATH) { + Err(libloading::Error::GetModuleHandleExW { .. }) => true, + _ => false, + }); + + unsafe { + let _lib = Library::new(LIBPATH).unwrap(); + // Loaded now. + assert!(Library::open_already_loaded(LIBPATH).is_ok()); + } +} diff --git a/vendor/libloading/tests/library_filename.rs b/vendor/libloading/tests/library_filename.rs new file mode 100644 index 00000000000000..4642ece0874853 --- /dev/null +++ b/vendor/libloading/tests/library_filename.rs @@ -0,0 +1,17 @@ +extern crate libloading; +use libloading::library_filename; +use std::path::Path; + +#[cfg(any(target_os = "windows", target_os = "cygwin"))] +const EXPECTED: &str = "audioengine.dll"; +#[cfg(target_os = "linux")] +const EXPECTED: &str = "libaudioengine.so"; +#[cfg(target_os = "macos")] +const EXPECTED: &str = "libaudioengine.dylib"; + +#[test] +fn test_library_filename() { + let name = "audioengine"; + let resolved = library_filename(name); + assert!(Path::new(&resolved).ends_with(EXPECTED)); +} diff --git a/vendor/libloading/tests/markers.rs b/vendor/libloading/tests/markers.rs new file mode 100644 index 00000000000000..330c034ad5f45c --- /dev/null +++ b/vendor/libloading/tests/markers.rs @@ -0,0 +1,96 @@ +extern crate libloading; + +#[cfg(test)] +fn assert_send() {} +#[cfg(test)] +fn assert_sync() {} +#[cfg(test)] +fn assert_display() {} + +#[test] +fn check_error_send() { + assert_send::(); +} + +#[test] +fn check_error_sync() { + assert_sync::(); +} + +#[test] +fn check_error_display() { + assert_display::(); +} + +#[test] +fn check_library_send() { + assert_send::(); +} + +#[cfg(unix)] +#[test] +fn check_unix_library_send() { + assert_send::(); +} + +#[cfg(windows)] +#[test] +fn check_windows_library_send() { + assert_send::(); +} + +#[test] +fn check_library_sync() { + assert_sync::(); +} + +#[cfg(unix)] +#[test] +fn check_unix_library_sync() { + assert_sync::(); +} + +#[cfg(windows)] +#[test] +fn check_windows_library_sync() { + assert_sync::(); +} + +#[test] +fn check_symbol_send() { + assert_send:: ()>>(); + // assert_not_send::>(); +} + +#[cfg(unix)] +#[test] +fn check_unix_symbol_send() { + assert_send:: ()>>(); + // assert_not_send::>(); +} + +#[cfg(windows)] +#[test] +fn check_windows_symbol_send() { + assert_send:: ()>>(); +} + +#[test] +fn check_symbol_sync() { + assert_sync:: ()>>(); + // assert_not_sync::>(); +} + +#[cfg(unix)] +#[test] +fn check_unix_symbol_sync() { + assert_sync:: ()>>(); + // assert_not_sync::>(); +} + +#[cfg(windows)] +#[test] +fn check_windows_symbol_sync() { + assert_sync:: ()>>(); + // assert_not_sync::>(); +} diff --git a/vendor/libloading/tests/windows.rs b/vendor/libloading/tests/windows.rs new file mode 100644 index 00000000000000..13a41450288494 --- /dev/null +++ b/vendor/libloading/tests/windows.rs @@ -0,0 +1,71 @@ +#![cfg(windows)] +extern crate libloading; +use libloading::os::windows::*; +use std::ffi::CStr; +use std::os::raw::c_void; +// The ordinal DLL contains exactly one function (other than DllMain, that is) with ordinal number +// 1. This function has the sugnature `fn() -> *const c_char` and returns a string "bunny\0" (in +// reference to WindowsBunny). +// +// Both x86_64 and x86 versions of the .dll are functionally the same. Ideally we would compile the +// dlls with well known ordinals from our own testing helpers library, but rustc does not allow +// specifying a custom .def file (https://github.com/rust-lang/rust/issues/35089) +// +// The DLLs were kindly compiled by WindowsBunny (aka. @retep998). + +#[cfg(target_arch = "x86")] +fn load_ordinal_lib() -> Library { + unsafe { Library::new("tests/nagisa32.dll").expect("nagisa32.dll") } +} + +#[cfg(target_arch = "x86_64")] +fn load_ordinal_lib() -> Library { + unsafe { Library::new("tests/nagisa64.dll").expect("nagisa64.dll") } +} + +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +#[test] +fn test_ordinal() { + let lib = load_ordinal_lib(); + unsafe { + let windows: Symbol *const i8> = lib.get_ordinal(1).expect("function"); + assert_eq!(CStr::from_ptr(windows()).to_bytes(), b"bunny"); + } +} + +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +#[test] +fn test_try_into_ptr() { + let lib = load_ordinal_lib(); + unsafe { + let windows: Symbol *const i8> = lib.get_ordinal(1).expect("function"); + let ptr: *mut c_void = windows.as_raw_ptr(); + assert!(!ptr.is_null()); + } +} + +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +#[test] +fn test_ordinal_missing_fails() { + let lib = load_ordinal_lib(); + unsafe { + let r: Result *const i8>, _> = lib.get_ordinal(2); + r.err().unwrap(); + let r: Result *const i8>, _> = lib.get_ordinal(!0); + r.err().unwrap(); + } +} + +#[test] +fn test_new_kernel23() { + unsafe { + Library::new("kernel23").err().unwrap(); + } +} + +#[test] +fn test_new_kernel32_no_ext() { + unsafe { + Library::new("kernel32").unwrap(); + } +} diff --git a/vendor/log/.cargo-checksum.json b/vendor/log/.cargo-checksum.json new file mode 100644 index 00000000000000..763945ad571b1b --- /dev/null +++ b/vendor/log/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"d0cb2c582cde22a9d66fb0765cd8370b1ad5f39c4cce9685ccec9e057b1c9e23",".github/workflows/main.yml":"df525d79c4f63dd708126c1379134490d7a02c1729f06486141b2b90316fd39a","CHANGELOG.md":"a52fd4f4ddd7ed2c62e584f62057a5265be24abb53283ea49b1eb46ceb18a701","Cargo.lock":"80665e8b018d0dfe482b1581a138ac4e3e562bde5fd53f889cd6e48e6a96c374","Cargo.toml":"53a23ba91b2b31fb42b2986e95e8a8107fe9cdab34cc85568aeea9ce872b51ae","Cargo.toml.orig":"3b352dad4bca34832854a14a1534988a7d78380556ea8a6106015fe009ce4584","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"486c6cf85f99a2a3ea14dbd5fc6d6075fdc047a0dbf2b93682595db0de6a115f","benches/value.rs":"b613ff353d3cf0ef8cb98e4ca461ea929b8ba553fe299f2eb2942d77a5b1b6a0","src/__private_api.rs":"9f6f76ae924f884115ad52f552c13282e11b43e98ae7b5ffb631913f3cefa11f","src/kv/error.rs":"6dae12424164c33b93915f5e70bd6d99d616c969c8bfb543806721dd9b423981","src/kv/key.rs":"e63fd5b22b62f2bfacbd77fe0913c3667ed39de5eeb6d93292b77b1b1de4208a","src/kv/mod.rs":"e194d44e1e626f33c9a9bf90b4053eb98d7652c795ba811e5ccc24b340be3a6e","src/kv/source.rs":"73fbc180c824072d86f1f41f8c59c014db1d8988a86be38a9128d67d6aab06a5","src/kv/value.rs":"c7cd0faf06adb04aa53d7ba1c305874d5e69364d037b17b9ab4ecf4a3dde1d4e","src/lib.rs":"e0b09715dce40d961b138c66bfd0963f65e1b6aa002461fabbcc8da49036cddc","src/macros.rs":"34c367a645483e21eee4c7846d0efbf97c29a52156d56536c82cdfe1d226a54d","src/serde.rs":"1b261f9df7a97ace311e9ab9b6c951a17ff7e39227a352c7e09cb2731efd9a2f","tests/integration.rs":"0980b3bd85d36863bc9f355e80bc7cf7987d2599adbc87e8e0082861a08a1097","tests/macros.rs":"a94f3cc181c9ecb30af6b5ca8bd2b4e5accc93689c0eb19051b8479a298dc21b","triagebot.toml":"a135e10c777cd13459559bdf74fb704c1379af7c9b0f70bc49fa6f5a837daa81"},"package":"34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432"} \ No newline at end of file diff --git a/vendor/log/.cargo_vcs_info.json b/vendor/log/.cargo_vcs_info.json new file mode 100644 index 00000000000000..c581ca83614a3c --- /dev/null +++ b/vendor/log/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "6e1735597bb21c5d979a077395df85e1d633e077" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/log/.github/workflows/main.yml b/vendor/log/.github/workflows/main.yml new file mode 100644 index 00000000000000..3f5988fbd961ad --- /dev/null +++ b/vendor/log/.github/workflows/main.yml @@ -0,0 +1,134 @@ +name: CI +on: [push, pull_request] + +# Ensure only read permission is granted +permissions: + contents: read + +jobs: + test: + name: Test + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + - build: stable + os: ubuntu-latest + rust: stable + - build: beta + os: ubuntu-latest + rust: beta + - build: nightly + os: ubuntu-latest + rust: nightly + - build: macos + os: macos-latest + rust: stable + - build: win32 + os: windows-latest + rust: stable-i686-pc-windows-msvc + - build: win64 + os: windows-latest + rust: stable-x86_64-pc-windows-msvc + - build: mingw + os: windows-latest + rust: stable-x86_64-pc-windows-gnu + steps: + - uses: actions/checkout@v4 + - name: Install toolchain + run: | + rustup update ${{ matrix.rust }} --no-self-update + rustup default ${{ matrix.rust }} + cargo +stable install cargo-hack --locked + - run: cargo hack test --feature-powerset --exclude-features max_level_off,max_level_error,max_level_warn,max_level_info,max_level_debug,max_level_trace,release_max_level_off,release_max_level_error,release_max_level_warn,release_max_level_info,release_max_level_debug,release_max_level_trace + - run: cargo run --verbose --manifest-path test_max_level_features/Cargo.toml + - run: cargo run --verbose --manifest-path test_max_level_features/Cargo.toml --release + + check: + name: Check Format and Clippy + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install toolchain + run: | + rustup update stable --no-self-update + rustup default stable + rustup component add clippy rustfmt + - run: cargo fmt -- --check + - run: cargo fmt --manifest-path test_max_level_features/Cargo.toml -- --check + - run: cargo clippy --verbose + - run: cargo clippy --verbose --manifest-path test_max_level_features/Cargo.toml + + doc: + name: Check Documentation + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install toolchain + run: | + rustup update stable --no-self-update + rustup default stable + rustup component add rust-docs + - name: Run rustdoc + env: + RUSTDOCFLAGS: "-D warnings" + run: cargo doc --verbose --features std,serde,sval,sval_ref,value-bag,kv,kv_std,kv_sval,kv_serde + + features: + name: Feature check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install toolchain + run: | + rustup update nightly --no-self-update + rustup default nightly + - run: cargo build --verbose -Z avoid-dev-deps --features kv + - run: cargo build --verbose -Z avoid-dev-deps --features "kv std" + - run: cargo build --verbose -Z avoid-dev-deps --features "kv kv_sval" + - run: cargo build --verbose -Z avoid-dev-deps --features "kv kv_serde" + - run: cargo build --verbose -Z avoid-dev-deps --features "kv kv_std" + - run: cargo build --verbose -Z avoid-dev-deps --features "kv kv_sval kv_serde" + + minimalv: + name: Minimal versions + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install toolchain + run: | + rustup update nightly --no-self-update + rustup default nightly + - run: cargo build --verbose -Z minimal-versions --features kv + - run: cargo build --verbose -Z minimal-versions --features "kv std" + - run: cargo build --verbose -Z minimal-versions --features "kv kv_sval" + - run: cargo build --verbose -Z minimal-versions --features "kv kv_serde" + - run: cargo build --verbose -Z minimal-versions --features "kv kv_std" + - run: cargo build --verbose -Z minimal-versions --features "kv kv_sval kv_serde" + + msrv: + name: MSRV + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install toolchain + run: | + rustup update 1.61.0 --no-self-update + rustup default 1.61.0 + cargo +stable install cargo-hack --locked + - run: cargo hack test --feature-powerset --exclude-features max_level_off,max_level_error,max_level_warn,max_level_info,max_level_debug,max_level_trace,release_max_level_off,release_max_level_error,release_max_level_warn,release_max_level_info,release_max_level_debug,release_max_level_trace + - run: cargo run --verbose --manifest-path test_max_level_features/Cargo.toml + - run: cargo run --verbose --manifest-path test_max_level_features/Cargo.toml --release + + embedded: + name: Embedded + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install toolchain + run: | + rustup update stable --no-self-update + rustup default stable + - run: rustup target add thumbv6m-none-eabi riscv32imc-unknown-none-elf + - run: cargo build --verbose --target=thumbv6m-none-eabi + - run: cargo build --verbose --target=riscv32imc-unknown-none-elf diff --git a/vendor/log/CHANGELOG.md b/vendor/log/CHANGELOG.md new file mode 100644 index 00000000000000..48f6693342a698 --- /dev/null +++ b/vendor/log/CHANGELOG.md @@ -0,0 +1,410 @@ +# Change Log + +## [Unreleased] + +## [0.4.28] - 2025-09-02 + +## What's Changed +* ci: drop really old trick and ensure MSRV for all feature combo by @tisonkun in https://github.com/rust-lang/log/pull/676 +* Chore: delete compare_exchange method for AtomicUsize on platforms without atomics by @HaoliangXu in https://github.com/rust-lang/log/pull/690 +* Add `increment_severity()` and `decrement_severity()` methods for `Level` and `LevelFilter` by @nebkor in https://github.com/rust-lang/log/pull/692 + +## New Contributors +* @xixishidibei made their first contribution in https://github.com/rust-lang/log/pull/677 +* @ZylosLumen made their first contribution in https://github.com/rust-lang/log/pull/688 +* @HaoliangXu made their first contribution in https://github.com/rust-lang/log/pull/690 +* @nebkor made their first contribution in https://github.com/rust-lang/log/pull/692 + +**Full Changelog**: https://github.com/rust-lang/log/compare/0.4.27...0.4.28 + +### Notable Changes +* MSRV is bumped to 1.61.0 in https://github.com/rust-lang/log/pull/676 + +## [0.4.27] - 2025-03-24 + +### What's Changed +* A few minor lint fixes by @nyurik in https://github.com/rust-lang/log/pull/671 +* Enable clippy support for format-like macros by @nyurik in https://github.com/rust-lang/log/pull/665 +* Add an optional logger param by @tisonkun in https://github.com/rust-lang/log/pull/664 +* Pass global logger by value, supplied logger by ref by @KodrAus in https://github.com/rust-lang/log/pull/673 + + +**Full Changelog**: https://github.com/rust-lang/log/compare/0.4.26...0.4.27 + + +## [0.4.26] - 2025-02-18 + +### What's Changed +* Derive `Clone` for `kv::Value` by @SpriteOvO in https://github.com/rust-lang/log/pull/668 +* Add `spdlog-rs` link to crate doc by @SpriteOvO in https://github.com/rust-lang/log/pull/669 + + +**Full Changelog**: https://github.com/rust-lang/log/compare/0.4.25...0.4.26 + +## [0.4.25] - 2025-01-14 + +### What's Changed +* Revert loosening of kv cargo features by @KodrAus in https://github.com/rust-lang/log/pull/662 + + +**Full Changelog**: https://github.com/rust-lang/log/compare/0.4.24...0.4.25 + +## [0.4.24] - 2025-01-11 + +### What's Changed +* Fix up kv feature activation by @KodrAus in https://github.com/rust-lang/log/pull/659 + + +**Full Changelog**: https://github.com/rust-lang/log/compare/0.4.23...0.4.24 + +## [0.4.23] - 2025-01-10 (yanked) + +### What's Changed +* Fix some typos by @Kleinmarb in https://github.com/rust-lang/log/pull/637 +* Add logforth to implementation by @tisonkun in https://github.com/rust-lang/log/pull/638 +* Add `spdlog-rs` link to README by @SpriteOvO in https://github.com/rust-lang/log/pull/639 +* Add correct lifetime to kv::Value::to_borrowed_str by @stevenroose in https://github.com/rust-lang/log/pull/643 +* docs: Add logforth as an impl by @tisonkun in https://github.com/rust-lang/log/pull/642 +* Add clang_log implementation by @DDAN-17 in https://github.com/rust-lang/log/pull/646 +* Bind lifetimes of &str returned from Key by the lifetime of 'k rather than the lifetime of the Key struct by @gbbosak in https://github.com/rust-lang/log/pull/648 +* Fix up key lifetimes and add method to try get a borrowed key by @KodrAus in https://github.com/rust-lang/log/pull/653 +* Add Ftail implementation by @tjardoo in https://github.com/rust-lang/log/pull/652 + +### New Contributors +* @Kleinmarb made their first contribution in https://github.com/rust-lang/log/pull/637 +* @tisonkun made their first contribution in https://github.com/rust-lang/log/pull/638 +* @SpriteOvO made their first contribution in https://github.com/rust-lang/log/pull/639 +* @stevenroose made their first contribution in https://github.com/rust-lang/log/pull/643 +* @DDAN-17 made their first contribution in https://github.com/rust-lang/log/pull/646 +* @gbbosak made their first contribution in https://github.com/rust-lang/log/pull/648 +* @tjardoo made their first contribution in https://github.com/rust-lang/log/pull/652 + +**Full Changelog**: https://github.com/rust-lang/log/compare/0.4.22...0.4.23 + +## [0.4.22] - 2024-06-27 + +### What's Changed +* Add some clarifications to the library docs by @KodrAus in https://github.com/rust-lang/log/pull/620 +* Add links to `colog` crate by @chrivers in https://github.com/rust-lang/log/pull/621 +* adding line_number test + updating some testing infrastructure by @DIvkov575 in https://github.com/rust-lang/log/pull/619 +* Clarify the actual set of functions that can race in _racy variants by @KodrAus in https://github.com/rust-lang/log/pull/623 +* Replace deprecated std::sync::atomic::spin_loop_hint() by @Catamantaloedis in https://github.com/rust-lang/log/pull/625 +* Check usage of max_level features by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/627 +* Remove unneeded import by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/628 +* Loosen orderings for logger initialization in https://github.com/rust-lang/log/pull/632. Originally by @pwoolcoc in https://github.com/rust-lang/log/pull/599 +* Use Location::caller() for file and line info in https://github.com/rust-lang/log/pull/633. Originally by @Cassy343 in https://github.com/rust-lang/log/pull/520 + +### New Contributors +* @chrivers made their first contribution in https://github.com/rust-lang/log/pull/621 +* @DIvkov575 made their first contribution in https://github.com/rust-lang/log/pull/619 +* @Catamantaloedis made their first contribution in https://github.com/rust-lang/log/pull/625 + +**Full Changelog**: https://github.com/rust-lang/log/compare/0.4.21...0.4.22 + +## [0.4.21] - 2024-02-27 + +### What's Changed +* Minor clippy nits by @nyurik in https://github.com/rust-lang/log/pull/578 +* Simplify Display impl by @nyurik in https://github.com/rust-lang/log/pull/579 +* Set all crates to 2021 edition by @nyurik in https://github.com/rust-lang/log/pull/580 +* Various changes based on review by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/583 +* Fix typo in file_static() method doc by @dimo414 in https://github.com/rust-lang/log/pull/590 +* Specialize empty key value pairs by @EFanZh in https://github.com/rust-lang/log/pull/576 +* Fix incorrect lifetime in Value::to_str() by @peterjoel in https://github.com/rust-lang/log/pull/587 +* Remove some API of the key-value feature by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/585 +* Add logcontrol-log and log-reload by @swsnr in https://github.com/rust-lang/log/pull/595 +* Add Serialization section to kv::Value docs by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/593 +* Rename Value::to_str to to_cow_str by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/592 +* Clarify documentation and simplify initialization of `STATIC_MAX_LEVEL` by @ptosi in https://github.com/rust-lang/log/pull/594 +* Update docs to 2021 edition, test by @nyurik in https://github.com/rust-lang/log/pull/577 +* Add "alterable_logger" link to README.md by @brummer-simon in https://github.com/rust-lang/log/pull/589 +* Normalize line ending by @EFanZh in https://github.com/rust-lang/log/pull/602 +* Remove `ok_or` in favor of `Option::ok_or` by @AngelicosPhosphoros in https://github.com/rust-lang/log/pull/607 +* Use `Acquire` ordering for initialization check by @AngelicosPhosphoros in https://github.com/rust-lang/log/pull/610 +* Get structured logging API ready for stabilization by @KodrAus in https://github.com/rust-lang/log/pull/613 + +### New Contributors +* @nyurik made their first contribution in https://github.com/rust-lang/log/pull/578 +* @dimo414 made their first contribution in https://github.com/rust-lang/log/pull/590 +* @peterjoel made their first contribution in https://github.com/rust-lang/log/pull/587 +* @ptosi made their first contribution in https://github.com/rust-lang/log/pull/594 +* @brummer-simon made their first contribution in https://github.com/rust-lang/log/pull/589 +* @AngelicosPhosphoros made their first contribution in https://github.com/rust-lang/log/pull/607 + +## [0.4.20] - 2023-07-11 + +* Remove rustversion dev-dependency by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/568 +* Remove `local_inner_macros` usage by @EFanZh in https://github.com/rust-lang/log/pull/570 + +## [0.4.19] - 2023-06-10 + +* Use target_has_atomic instead of the old atomic_cas cfg by @GuillaumeGomez in https://github.com/rust-lang/log/pull/555 +* Put MSRV into Cargo.toml by @est31 in https://github.com/rust-lang/log/pull/557 + +## [0.4.18] - 2023-05-28 + +* fix Markdown links (again) by @hellow554 in https://github.com/rust-lang/log/pull/513 +* add cargo doc to workflow by @hellow554 in https://github.com/rust-lang/log/pull/515 +* Apply Clippy lints by @hellow554 in https://github.com/rust-lang/log/pull/516 +* Replace ad-hoc eq_ignore_ascii_case with slice::eq_ignore_ascii_case by @glandium in https://github.com/rust-lang/log/pull/519 +* fix up windows targets by @KodrAus in https://github.com/rust-lang/log/pull/528 +* typo fix by @jiangying000 in https://github.com/rust-lang/log/pull/529 +* Remove dependency on cfg_if by @EriKWDev in https://github.com/rust-lang/log/pull/536 +* GitHub Workflows security hardening by @sashashura in https://github.com/rust-lang/log/pull/538 +* Fix build status badge by @atouchet in https://github.com/rust-lang/log/pull/539 +* Add call_logger to the documentation by @a1ecbr0wn in https://github.com/rust-lang/log/pull/547 +* Use stable internals for key-value API by @KodrAus in https://github.com/rust-lang/log/pull/550 +* Change wording of list of implementations by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/553 +* Add std-logger to list of implementations by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/554 +* Add `set_max_level_racy` and gate `set_max_level` by @djkoloski in https://github.com/rust-lang/log/pull/544 +* [doc] src/lib.rs : prefix an unused variable with an underscore by @OccupyMars2025 in https://github.com/rust-lang/log/pull/561 +* [doc] src/macros.rs : correct grammar errors of an example in lib documentation by @OccupyMars2025 in https://github.com/rust-lang/log/pull/562 + +## [0.4.17] - 2022-04-29 + +* Update `kv_unstable` internal dependencies. + +## [0.4.16] - 2022-03-22 + +* Fix a conflict with unqualified `Option` use in macros. + +## [0.4.15] - 2022-02-23 + +* Silence a warning about the deprecated `spin_loop_hint`. +* Relax ordering in the atomic `set_max_level` call. +* Add thumbv4t-none-eabi to targets that don't support atomics +* Allow levels to be iterated over. +* Implement `Log` on some common wrapper types. +* Improvements to test coverage. +* Improvements to documentation. +* Add key-value support to the `log!` macros. +* Tighten `kv_unstable` internal dependencies, so they don't bump past their current alpha. +* Add a simple visit API to `kv_unstable`. +* Support `NonZero*` integers as values in structured logging +* Support static strings as keys in structured logging + +## [0.4.14] - 2021-01-27 + +* Remove the `__private_api_log_lit` special case. +* Fixed incorrect combination of `kv_unstable` and `std` features causing compile failures. +* Remove unstable `Value::to_*` conversions that were incorrectly using `as`. +* Rename unstable `Value::to_error` to `Value::to_borrowed_error`. + +## [0.4.13] - 2021-01-11 + +* This is the same as `0.4.11`, except with a `kv_unstable_std` feature added to aid migrating current dependents to `0.4.14` (which was originally going to be `0.4.13` until it was decided to create a patch from `0.4.11` to minimize disruption). + +## [0.4.12] - 2020-12-24 + +### New + +* Support platforms without atomics by racing instead of failing to compile +* Implement `Log` for `Box` +* Update `cfg-if` to `1.0` +* Internal reworks of the structured logging API. Removed the `Fill` API +and added `source::as_map` and `source::as_list` to easily serialize a `Source` +as either a map of `{key: value, ..}` or as a list of `[(key, value), ..]`. + +### Fixed + +* Fixed deserialization of `LevelFilter` to use their `u64` index variants + +## [0.4.11] - 2020-07-09 + +### New + +* Support coercing structured values into concrete types. +* Reference the `win_dbg_logger` in the readme. + +### Fixed + +* Updates a few deprecated items used internally. +* Fixed issues in docs and expands sections. +* Show the correct build badge in the readme. +* Fix up a possible inference breakage with structured value errors. +* Respect formatting flags in structured value formatting. + +## [0.4.10] - 2019-12-16 (yanked) + +### Fixed + +* Fixed the `log!` macros, so they work in expression context (this regressed in `0.4.9`, which has been yanked). + +## [0.4.9] - 2019-12-12 (yanked) + +### Minimum Supported Rust Version + +This release bumps the minimum compiler version to `1.31.0`. This was mainly needed for `cfg-if`, +but between `1.16.0` and `1.31.0` there are a lot of language and library improvements we now +take advantage of. + +### New + +* Unstable support for capturing key-value pairs in a record using the `log!` macros + +### Improved + +* Better documentation for max level filters. +* Internal updates to line up with bumped MSRV + +## [0.4.8] - 2019-07-28 + +### New + +* Support attempting to get `Record` fields as static strings. + +## [0.4.7] - 2019-07-06 + +### New + +* Support for embedded environments with thread-unsafe initialization. +* Initial unstable support for capturing structured data under the `kv_unstable` +feature gate. This new API doesn't affect existing users and may change in future +patches (so those changes may not appear in the changelog until it stabilizes). + +### Improved + +* Docs for using `log` with the 2018 edition. +* Error messages for macros missing arguments. + +## [0.4.6] - 2018-10-27 + +### Improved + +* Support 2018-style macro import for the `log_enabled!` macro. + +## [0.4.5] - 2018-09-03 + +### Improved + +* Make `log`'s internal helper macros less likely to conflict with user-defined + macros. + +## [0.4.4] - 2018-08-17 + +### Improved + +* Support 2018-style imports of the log macros. + +## [0.4.3] - 2018-06-29 + +### Improved + +* More code generation improvements. + +## [0.4.2] - 2018-06-05 + +### Improved + +* Log invocations now generate less code. + +### Fixed + +* Example Logger implementations now properly set the max log level. + +## [0.4.1] - 2017-12-30 + +### Fixed + +* Some doc links were fixed. + +## [0.4.0] - 2017-12-24 + +The changes in this release include cleanup of some obscure functionality and a more robust public +API designed to support bridges to other logging systems, and provide more flexibility to new +features in the future. + +### Compatibility + +Vast portions of the Rust ecosystem use the 0.3.x release series of log, and we don't want to force +the community to go through the pain of upgrading every crate to 0.4.x at the exact same time. Along +with 0.4.0, we've published a new 0.3.9 release which acts as a "shim" over 0.4.0. This will allow +crates using either version to coexist without losing messages from one side or the other. + +There is one caveat - a log message generated by a crate using 0.4.x but consumed by a logging +implementation using 0.3.x will not have a file name or module path. Applications affected by this +can upgrade their logging implementations to one using 0.4.x to avoid losing this information. The +other direction does not lose any information, fortunately! + +**TL;DR** Libraries should feel comfortable upgrading to 0.4.0 without treating that as a breaking +change. Applications may need to update their logging implementation (e.g. env-logger) to a newer +version using log 0.4.x to avoid losing module and file information. + +### New + +* The crate is now `no_std` by default. +* `Level` and `LevelFilter` now implement `Serialize` and `Deserialize` when the `serde` feature is + enabled. +* The `Record` and `Metadata` types can now be constructed by third-party code via a builder API. +* The `logger` free function returns a reference to the logger implementation. This, along with the + ability to construct `Record`s, makes it possible to bridge from another logging framework to + this one without digging into the private internals of the crate. The standard `error!` `warn!`, + etc., macros now exclusively use the public API of the crate rather than "secret" internal APIs. +* `Log::flush` has been added to allow crates to tell the logging implementation to ensure that all + "in flight" log events have been persisted. This can be used, for example, just before an + application exits to ensure that asynchronous log sinks finish their work. + +### Removed + +* The `shutdown` and `shutdown_raw` functions have been removed. Supporting shutdown significantly + complicated the implementation and imposed a performance cost on each logging operation. +* The `log_panics` function and its associated `nightly` Cargo feature have been removed. Use the + [log-panics](https://crates.io/crates/log-panics) instead. + +### Changed + +* The `Log` prefix has been removed from type names. For example, `LogLevelFilter` is now + `LevelFilter`, and `LogRecord` is now `Record`. +* The `MaxLogLevelFilter` object has been removed in favor of a `set_max_level` free function. +* The `set_logger` free functions have been restructured. The logger is now directly passed to the + functions rather than a closure which returns the logger. `set_logger` now takes a `&'static + Log` and is usable in `no_std` contexts in place of the old `set_logger_raw`. `set_boxed_logger` + is a convenience function which takes a `Box` but otherwise acts like `set_logger`. It + requires the `std` feature. +* The `file` and `module_path` values in `Record` no longer have the `'static` lifetime to support + integration with other logging frameworks that don't provide a `'static` lifetime for the + equivalent values. +* The `file`, `line`, and `module_path` values in `Record` are now `Option`s to support integration + with other logging frameworks that don't provide those values. + +### In the Future + +* We're looking to add support for *structured* logging - the inclusion of extra key-value pairs of + information in a log event in addition to the normal string message. This should be able to be + added in a backwards compatible manner to the 0.4.x series when the design is worked out. + +## Older + +Look at the [release tags] for information about older releases. + +[Unreleased]: https://github.com/rust-lang-nursery/log/compare/0.4.28...HEAD +[0.4.28]: https://github.com/rust-lang/log/compare/0.4.27...0.4.28 +[0.4.27]: https://github.com/rust-lang/log/compare/0.4.26...0.4.27 +[0.4.26]: https://github.com/rust-lang/log/compare/0.4.25...0.4.26 +[0.4.25]: https://github.com/rust-lang/log/compare/0.4.24...0.4.25 +[0.4.24]: https://github.com/rust-lang/log/compare/0.4.23...0.4.24 +[0.4.23]: https://github.com/rust-lang/log/compare/0.4.22...0.4.23 +[0.4.22]: https://github.com/rust-lang/log/compare/0.4.21...0.4.22 +[0.4.21]: https://github.com/rust-lang/log/compare/0.4.20...0.4.21 +[0.4.20]: https://github.com/rust-lang-nursery/log/compare/0.4.19...0.4.20 +[0.4.19]: https://github.com/rust-lang-nursery/log/compare/0.4.18...0.4.19 +[0.4.18]: https://github.com/rust-lang-nursery/log/compare/0.4.17...0.4.18 +[0.4.17]: https://github.com/rust-lang-nursery/log/compare/0.4.16...0.4.17 +[0.4.16]: https://github.com/rust-lang-nursery/log/compare/0.4.15...0.4.16 +[0.4.15]: https://github.com/rust-lang-nursery/log/compare/0.4.13...0.4.15 +[0.4.14]: https://github.com/rust-lang-nursery/log/compare/0.4.13...0.4.14 +[0.4.13]: https://github.com/rust-lang-nursery/log/compare/0.4.11...0.4.13 +[0.4.12]: https://github.com/rust-lang-nursery/log/compare/0.4.11...0.4.12 +[0.4.11]: https://github.com/rust-lang-nursery/log/compare/0.4.10...0.4.11 +[0.4.10]: https://github.com/rust-lang-nursery/log/compare/0.4.9...0.4.10 +[0.4.9]: https://github.com/rust-lang-nursery/log/compare/0.4.8...0.4.9 +[0.4.8]: https://github.com/rust-lang-nursery/log/compare/0.4.7...0.4.8 +[0.4.7]: https://github.com/rust-lang-nursery/log/compare/0.4.6...0.4.7 +[0.4.6]: https://github.com/rust-lang-nursery/log/compare/0.4.5...0.4.6 +[0.4.5]: https://github.com/rust-lang-nursery/log/compare/0.4.4...0.4.5 +[0.4.4]: https://github.com/rust-lang-nursery/log/compare/0.4.3...0.4.4 +[0.4.3]: https://github.com/rust-lang-nursery/log/compare/0.4.2...0.4.3 +[0.4.2]: https://github.com/rust-lang-nursery/log/compare/0.4.1...0.4.2 +[0.4.1]: https://github.com/rust-lang-nursery/log/compare/0.4.0...0.4.1 +[0.4.0]: https://github.com/rust-lang-nursery/log/compare/0.3.8...0.4.0 +[release tags]: https://github.com/rust-lang-nursery/log/releases diff --git a/vendor/log/Cargo.lock b/vendor/log/Cargo.lock new file mode 100644 index 00000000000000..349e97e7e8e342 --- /dev/null +++ b/vendor/log/Cargo.lock @@ -0,0 +1,270 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "erased-serde" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e004d887f51fcb9fef17317a2f3525c887d8aa3f4f50fed920816a688284a5b7" +dependencies = [ + "serde", + "typeid", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "log" +version = "0.4.28" +dependencies = [ + "proc-macro2", + "serde", + "serde_json", + "serde_test", + "sval", + "sval_derive", + "sval_ref", + "value-bag", +] + +[[package]] +name = "memchr" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + +[[package]] +name = "proc-macro2" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_fmt" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d4ddca14104cd60529e8c7f7ba71a2c8acd8f7f5cfcdc2faf97eeb7c3010a4" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_json" +version = "1.0.143" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_test" +version = "1.0.177" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f901ee573cab6b3060453d2d5f0bae4e6d628c23c0a962ff9b5f1d7c8d4f1ed" +dependencies = [ + "serde", +] + +[[package]] +name = "sval" +version = "2.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cc9739f56c5d0c44a5ed45473ec868af02eb896af8c05f616673a31e1d1bb09" + +[[package]] +name = "sval_buffer" +version = "2.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f39b07436a8c271b34dad5070c634d1d3d76d6776e938ee97b4a66a5e8003d0b" +dependencies = [ + "sval", + "sval_ref", +] + +[[package]] +name = "sval_derive" +version = "2.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fcb59acf1048b0d0472a2393fc4bb3082217103245f51470313298ec7b7fbe6" +dependencies = [ + "sval_derive_macros", +] + +[[package]] +name = "sval_derive_macros" +version = "2.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b0dcdc2dad24659b85a75c0fe56a62e6d7d7ff8168195dc8117e6d98e528fc9" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sval_dynamic" +version = "2.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffcb072d857431bf885580dacecf05ed987bac931230736739a79051dbf3499b" +dependencies = [ + "sval", +] + +[[package]] +name = "sval_fmt" +version = "2.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f214f427ad94a553e5ca5514c95c6be84667cbc5568cce957f03f3477d03d5c" +dependencies = [ + "itoa", + "ryu", + "sval", +] + +[[package]] +name = "sval_json" +version = "2.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "389ed34b32e638dec9a99c8ac92d0aa1220d40041026b625474c2b6a4d6f4feb" +dependencies = [ + "itoa", + "ryu", + "sval", +] + +[[package]] +name = "sval_nested" +version = "2.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14bae8fcb2f24fee2c42c1f19037707f7c9a29a0cda936d2188d48a961c4bb2a" +dependencies = [ + "sval", + "sval_buffer", + "sval_ref", +] + +[[package]] +name = "sval_ref" +version = "2.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a4eaea3821d3046dcba81d4b8489421da42961889902342691fb7eab491d79e" +dependencies = [ + "sval", +] + +[[package]] +name = "sval_serde" +version = "2.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "172dd4aa8cb3b45c8ac8f3b4111d644cd26938b0643ede8f93070812b87fb339" +dependencies = [ + "serde", + "sval", + "sval_nested", +] + +[[package]] +name = "syn" +version = "2.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "typeid" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc7d623258602320d5c55d1bc22793b57daff0ec7efc270ea7d55ce1d5f5471c" + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "value-bag" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "943ce29a8a743eb10d6082545d861b24f9d1b160b7d741e0f2cdf726bec909c5" +dependencies = [ + "value-bag-serde1", + "value-bag-sval2", +] + +[[package]] +name = "value-bag-serde1" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35540706617d373b118d550d41f5dfe0b78a0c195dc13c6815e92e2638432306" +dependencies = [ + "erased-serde", + "serde", + "serde_fmt", +] + +[[package]] +name = "value-bag-sval2" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe7e140a2658cc16f7ee7a86e413e803fc8f9b5127adc8755c19f9fefa63a52" +dependencies = [ + "sval", + "sval_buffer", + "sval_dynamic", + "sval_fmt", + "sval_json", + "sval_ref", + "sval_serde", +] diff --git a/vendor/log/Cargo.toml b/vendor/log/Cargo.toml new file mode 100644 index 00000000000000..cd0abc6ab8cb2c --- /dev/null +++ b/vendor/log/Cargo.toml @@ -0,0 +1,151 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.61.0" +name = "log" +version = "0.4.28" +authors = ["The Rust Project Developers"] +build = false +exclude = ["rfcs/**/*"] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +A lightweight logging facade for Rust +""" +documentation = "https://docs.rs/log" +readme = "README.md" +keywords = ["logging"] +categories = ["development-tools::debugging"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/log" + +[package.metadata.docs.rs] +features = [ + "std", + "serde", + "kv_std", + "kv_sval", + "kv_serde", +] + +[features] +kv = [] +kv_serde = [ + "kv_std", + "value-bag/serde", + "serde", +] +kv_std = [ + "std", + "kv", + "value-bag/error", +] +kv_sval = [ + "kv", + "value-bag/sval", + "sval", + "sval_ref", +] +kv_unstable = [ + "kv", + "value-bag", +] +kv_unstable_serde = [ + "kv_serde", + "kv_unstable_std", +] +kv_unstable_std = [ + "kv_std", + "kv_unstable", +] +kv_unstable_sval = [ + "kv_sval", + "kv_unstable", +] +max_level_debug = [] +max_level_error = [] +max_level_info = [] +max_level_off = [] +max_level_trace = [] +max_level_warn = [] +release_max_level_debug = [] +release_max_level_error = [] +release_max_level_info = [] +release_max_level_off = [] +release_max_level_trace = [] +release_max_level_warn = [] +std = [] + +[lib] +name = "log" +path = "src/lib.rs" + +[[test]] +name = "integration" +path = "tests/integration.rs" + +[[test]] +name = "macros" +path = "tests/macros.rs" + +[[bench]] +name = "value" +path = "benches/value.rs" + +[dependencies.serde] +version = "1.0" +optional = true +default-features = false + +[dependencies.sval] +version = "2.14.1" +optional = true +default-features = false + +[dependencies.sval_ref] +version = "2.1" +optional = true +default-features = false + +[dependencies.value-bag] +version = "1.7" +features = ["inline-i128"] +optional = true +default-features = false + +[dev-dependencies.proc-macro2] +version = "1.0.63" +default-features = false + +[dev-dependencies.serde] +version = "1.0" +features = ["derive"] + +[dev-dependencies.serde_json] +version = "1.0" + +[dev-dependencies.serde_test] +version = "1.0" + +[dev-dependencies.sval] +version = "2.1" + +[dev-dependencies.sval_derive] +version = "2.1" + +[dev-dependencies.value-bag] +version = "1.7" +features = ["test"] diff --git a/vendor/log/LICENSE-APACHE b/vendor/log/LICENSE-APACHE new file mode 100644 index 00000000000000..16fe87b06e802f --- /dev/null +++ b/vendor/log/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/log/LICENSE-MIT b/vendor/log/LICENSE-MIT new file mode 100644 index 00000000000000..39d4bdb5acd313 --- /dev/null +++ b/vendor/log/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/log/README.md b/vendor/log/README.md new file mode 100644 index 00000000000000..9d5113d2402386 --- /dev/null +++ b/vendor/log/README.md @@ -0,0 +1,134 @@ +log +=== + +A Rust library providing a lightweight logging *facade*. + +[![Build status](https://img.shields.io/github/actions/workflow/status/rust-lang/log/main.yml?branch=master)](https://github.com/rust-lang/log/actions) +[![Latest version](https://img.shields.io/crates/v/log.svg)](https://crates.io/crates/log) +[![Documentation](https://docs.rs/log/badge.svg)](https://docs.rs/log) +![License](https://img.shields.io/crates/l/log.svg) + +* [`log` documentation](https://docs.rs/log) + +A logging facade provides a single logging API that abstracts over the actual +logging implementation. Libraries can use the logging API provided by this +crate, and the consumer of those libraries can choose the logging +implementation that is most suitable for its use case. + + +## Minimum supported `rustc` + +`1.61.0+` + +This version is explicitly tested in CI and may be bumped in any release as needed. Maintaining compatibility with older compilers is a priority though, so the bar for bumping the minimum supported version is set very high. Any changes to the supported minimum version will be called out in the release notes. + +## Usage + +### In libraries + +Libraries should link only to the `log` crate, and use the provided macros to +log whatever information will be useful to downstream consumers: + +```toml +[dependencies] +log = "0.4" +``` + +```rust +use log::{info, trace, warn}; + +pub fn shave_the_yak(yak: &mut Yak) { + trace!("Commencing yak shaving"); + + loop { + match find_a_razor() { + Ok(razor) => { + info!("Razor located: {razor}"); + yak.shave(razor); + break; + } + Err(err) => { + warn!("Unable to locate a razor: {err}, retrying"); + } + } + } +} +``` + +### In executables + +In order to produce log output, executables have to use a logger implementation compatible with the facade. +There are many available implementations to choose from, here are some options: + +* Simple minimal loggers: + * [`env_logger`](https://docs.rs/env_logger/*/env_logger/) + * [`colog`](https://docs.rs/colog/*/colog/) + * [`simple_logger`](https://docs.rs/simple_logger/*/simple_logger/) + * [`simplelog`](https://docs.rs/simplelog/*/simplelog/) + * [`pretty_env_logger`](https://docs.rs/pretty_env_logger/*/pretty_env_logger/) + * [`stderrlog`](https://docs.rs/stderrlog/*/stderrlog/) + * [`flexi_logger`](https://docs.rs/flexi_logger/*/flexi_logger/) + * [`call_logger`](https://docs.rs/call_logger/*/call_logger/) + * [`std-logger`](https://docs.rs/std-logger/*/std_logger/) + * [`structured-logger`](https://docs.rs/structured-logger/latest/structured_logger/) + * [`clang_log`](https://docs.rs/clang_log/latest/clang_log) + * [`ftail`](https://docs.rs/ftail/latest/ftail/) +* Complex configurable frameworks: + * [`log4rs`](https://docs.rs/log4rs/*/log4rs/) + * [`logforth`](https://docs.rs/logforth/*/logforth/) + * [`fern`](https://docs.rs/fern/*/fern/) + * [`spdlog-rs`](https://docs.rs/spdlog-rs/*/spdlog/) +* Adaptors for other facilities: + * [`syslog`](https://docs.rs/syslog/*/syslog/) + * [`systemd-journal-logger`](https://docs.rs/systemd-journal-logger/*/systemd_journal_logger/) + * [`slog-stdlog`](https://docs.rs/slog-stdlog/*/slog_stdlog/) + * [`android_log`](https://docs.rs/android_log/*/android_log/) + * [`win_dbg_logger`](https://docs.rs/win_dbg_logger/*/win_dbg_logger/) + * [`db_logger`](https://docs.rs/db_logger/*/db_logger/) + * [`log-to-defmt`](https://docs.rs/log-to-defmt/*/log_to_defmt/) + * [`logcontrol-log`](https://docs.rs/logcontrol-log/*/logcontrol_log/) +* For WebAssembly binaries: + * [`console_log`](https://docs.rs/console_log/*/console_log/) +* For dynamic libraries: + * You may need to construct [an FFI-safe wrapper over `log`](https://github.com/rust-lang/log/issues/421) to initialize in your libraries. +* Utilities: + * [`log_err`](https://docs.rs/log_err/*/log_err/) + * [`log-reload`](https://docs.rs/log-reload/*/log_reload/) + * [`alterable_logger`](https://docs.rs/alterable_logger/*/alterable_logger) + +Executables should choose a logger implementation and initialize it early in the +runtime of the program. Logger implementations will typically include a +function to do this. Any log messages generated before the logger is +initialized will be ignored. + +The executable itself may use the `log` crate to log as well. + +## Structured logging + +If you enable the `kv` feature, you can associate structured data with your log records: + +```rust +use log::{info, trace, warn}; + +pub fn shave_the_yak(yak: &mut Yak) { + // `yak:serde` will capture `yak` using its `serde::Serialize` impl + // + // You could also use `:?` for `Debug`, or `:%` for `Display`. For a + // full list, see the `log` crate documentation + trace!(target = "yak_events", yak:serde; "Commencing yak shaving"); + + loop { + match find_a_razor() { + Ok(razor) => { + info!(razor; "Razor located"); + yak.shave(razor); + break; + } + Err(e) => { + // `e:err` will capture `e` using its `std::error::Error` impl + warn!(e:err; "Unable to locate a razor, retrying"); + } + } + } +} +``` diff --git a/vendor/log/benches/value.rs b/vendor/log/benches/value.rs new file mode 100644 index 00000000000000..3d0f18bfe43e06 --- /dev/null +++ b/vendor/log/benches/value.rs @@ -0,0 +1,27 @@ +#![cfg(feature = "kv")] +#![feature(test)] + +use log::kv::Value; + +#[bench] +fn u8_to_value(b: &mut test::Bencher) { + b.iter(|| Value::from(1u8)); +} + +#[bench] +fn u8_to_value_debug(b: &mut test::Bencher) { + b.iter(|| Value::from_debug(&1u8)); +} + +#[bench] +fn str_to_value_debug(b: &mut test::Bencher) { + b.iter(|| Value::from_debug(&"a string")); +} + +#[bench] +fn custom_to_value_debug(b: &mut test::Bencher) { + #[derive(Debug)] + struct A; + + b.iter(|| Value::from_debug(&A)); +} diff --git a/vendor/log/src/__private_api.rs b/vendor/log/src/__private_api.rs new file mode 100644 index 00000000000000..58d4c0fab621dd --- /dev/null +++ b/vendor/log/src/__private_api.rs @@ -0,0 +1,151 @@ +//! WARNING: this is not part of the crate's public API and is subject to change at any time + +use self::sealed::KVs; +use crate::{logger, Level, Log, Metadata, Record}; +use std::fmt::Arguments; +use std::panic::Location; +pub use std::{format_args, module_path, stringify}; + +#[cfg(not(feature = "kv"))] +pub type Value<'a> = &'a str; + +mod sealed { + /// Types for the `kv` argument. + pub trait KVs<'a> { + fn into_kvs(self) -> Option<&'a [(&'a str, super::Value<'a>)]>; + } +} + +// Types for the `kv` argument. + +impl<'a> KVs<'a> for &'a [(&'a str, Value<'a>)] { + #[inline] + fn into_kvs(self) -> Option<&'a [(&'a str, Value<'a>)]> { + Some(self) + } +} + +impl<'a> KVs<'a> for () { + #[inline] + fn into_kvs(self) -> Option<&'a [(&'a str, Value<'a>)]> { + None + } +} + +// Log implementation. + +/// The global logger proxy. +#[derive(Debug)] +pub struct GlobalLogger; + +impl Log for GlobalLogger { + fn enabled(&self, metadata: &Metadata) -> bool { + logger().enabled(metadata) + } + + fn log(&self, record: &Record) { + logger().log(record) + } + + fn flush(&self) { + logger().flush() + } +} + +// Split from `log` to reduce generics and code size +fn log_impl( + logger: L, + args: Arguments, + level: Level, + &(target, module_path, loc): &(&str, &'static str, &'static Location), + kvs: Option<&[(&str, Value)]>, +) { + #[cfg(not(feature = "kv"))] + if kvs.is_some() { + panic!("key-value support is experimental and must be enabled using the `kv` feature") + } + + let mut builder = Record::builder(); + + builder + .args(args) + .level(level) + .target(target) + .module_path_static(Some(module_path)) + .file_static(Some(loc.file())) + .line(Some(loc.line())); + + #[cfg(feature = "kv")] + builder.key_values(&kvs); + + logger.log(&builder.build()); +} + +pub fn log<'a, K, L>( + logger: L, + args: Arguments, + level: Level, + target_module_path_and_loc: &(&str, &'static str, &'static Location), + kvs: K, +) where + K: KVs<'a>, + L: Log, +{ + log_impl( + logger, + args, + level, + target_module_path_and_loc, + kvs.into_kvs(), + ) +} + +pub fn enabled(logger: L, level: Level, target: &str) -> bool { + logger.enabled(&Metadata::builder().level(level).target(target).build()) +} + +#[track_caller] +pub fn loc() -> &'static Location<'static> { + Location::caller() +} + +#[cfg(feature = "kv")] +mod kv_support { + use crate::kv; + + pub type Value<'a> = kv::Value<'a>; + + // NOTE: Many functions here accept a double reference &&V + // This is so V itself can be ?Sized, while still letting us + // erase it to some dyn Trait (because &T is sized) + + pub fn capture_to_value<'a, V: kv::ToValue + ?Sized>(v: &'a &'a V) -> Value<'a> { + v.to_value() + } + + pub fn capture_debug<'a, V: core::fmt::Debug + ?Sized>(v: &'a &'a V) -> Value<'a> { + Value::from_debug(v) + } + + pub fn capture_display<'a, V: core::fmt::Display + ?Sized>(v: &'a &'a V) -> Value<'a> { + Value::from_display(v) + } + + #[cfg(feature = "kv_std")] + pub fn capture_error<'a>(v: &'a (dyn std::error::Error + 'static)) -> Value<'a> { + Value::from_dyn_error(v) + } + + #[cfg(feature = "kv_sval")] + pub fn capture_sval<'a, V: sval::Value + ?Sized>(v: &'a &'a V) -> Value<'a> { + Value::from_sval(v) + } + + #[cfg(feature = "kv_serde")] + pub fn capture_serde<'a, V: serde::Serialize + ?Sized>(v: &'a &'a V) -> Value<'a> { + Value::from_serde(v) + } +} + +#[cfg(feature = "kv")] +pub use self::kv_support::*; diff --git a/vendor/log/src/kv/error.rs b/vendor/log/src/kv/error.rs new file mode 100644 index 00000000000000..7efa5af3612605 --- /dev/null +++ b/vendor/log/src/kv/error.rs @@ -0,0 +1,94 @@ +use std::fmt; + +/// An error encountered while working with structured data. +#[derive(Debug)] +pub struct Error { + inner: Inner, +} + +#[derive(Debug)] +enum Inner { + #[cfg(feature = "std")] + Boxed(std_support::BoxedError), + Msg(&'static str), + #[cfg(feature = "value-bag")] + Value(crate::kv::value::inner::Error), + Fmt, +} + +impl Error { + /// Create an error from a message. + pub fn msg(msg: &'static str) -> Self { + Error { + inner: Inner::Msg(msg), + } + } + + // Not public so we don't leak the `crate::kv::value::inner` API + #[cfg(feature = "value-bag")] + pub(super) fn from_value(err: crate::kv::value::inner::Error) -> Self { + Error { + inner: Inner::Value(err), + } + } + + // Not public so we don't leak the `crate::kv::value::inner` API + #[cfg(feature = "value-bag")] + pub(super) fn into_value(self) -> crate::kv::value::inner::Error { + match self.inner { + Inner::Value(err) => err, + #[cfg(feature = "kv_std")] + _ => crate::kv::value::inner::Error::boxed(self), + #[cfg(not(feature = "kv_std"))] + _ => crate::kv::value::inner::Error::msg("error inspecting a value"), + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::Inner::*; + match &self.inner { + #[cfg(feature = "std")] + Boxed(err) => err.fmt(f), + #[cfg(feature = "value-bag")] + Value(err) => err.fmt(f), + Msg(msg) => msg.fmt(f), + Fmt => fmt::Error.fmt(f), + } + } +} + +impl From for Error { + fn from(_: fmt::Error) -> Self { + Error { inner: Inner::Fmt } + } +} + +#[cfg(feature = "std")] +mod std_support { + use super::*; + use std::{error, io}; + + pub(super) type BoxedError = Box; + + impl Error { + /// Create an error from a standard error type. + pub fn boxed(err: E) -> Self + where + E: Into, + { + Error { + inner: Inner::Boxed(err.into()), + } + } + } + + impl error::Error for Error {} + + impl From for Error { + fn from(err: io::Error) -> Self { + Error::boxed(err) + } + } +} diff --git a/vendor/log/src/kv/key.rs b/vendor/log/src/kv/key.rs new file mode 100644 index 00000000000000..6e00a2ca86a57f --- /dev/null +++ b/vendor/log/src/kv/key.rs @@ -0,0 +1,163 @@ +//! Structured keys. + +use std::borrow::Borrow; +use std::fmt; + +/// A type that can be converted into a [`Key`](struct.Key.html). +pub trait ToKey { + /// Perform the conversion. + fn to_key(&self) -> Key; +} + +impl<'a, T> ToKey for &'a T +where + T: ToKey + ?Sized, +{ + fn to_key(&self) -> Key { + (**self).to_key() + } +} + +impl<'k> ToKey for Key<'k> { + fn to_key(&self) -> Key { + Key { key: self.key } + } +} + +impl ToKey for str { + fn to_key(&self) -> Key { + Key::from_str(self) + } +} + +/// A key in a key-value. +// These impls must only be based on the as_str() representation of the key +// If a new field (such as an optional index) is added to the key they must not affect comparison +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Key<'k> { + // NOTE: This may become `Cow<'k, str>` + key: &'k str, +} + +impl<'k> Key<'k> { + /// Get a key from a borrowed string. + pub fn from_str(key: &'k str) -> Self { + Key { key } + } + + /// Get a borrowed string from this key. + /// + /// The lifetime of the returned string is bound to the borrow of `self` rather + /// than to `'k`. + pub fn as_str(&self) -> &str { + self.key + } + + /// Try get a borrowed string for the lifetime `'k` from this key. + /// + /// If the key is a borrow of a longer lived string, this method will return `Some`. + /// If the key is internally buffered, this method will return `None`. + pub fn to_borrowed_str(&self) -> Option<&'k str> { + // NOTE: If the internals of `Key` support buffering this + // won't be unconditionally `Some` anymore. We want to keep + // this option open + Some(self.key) + } +} + +impl<'k> fmt::Display for Key<'k> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.key.fmt(f) + } +} + +impl<'k> AsRef for Key<'k> { + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl<'k> Borrow for Key<'k> { + fn borrow(&self) -> &str { + self.as_str() + } +} + +impl<'k> From<&'k str> for Key<'k> { + fn from(s: &'k str) -> Self { + Key::from_str(s) + } +} + +#[cfg(feature = "std")] +mod std_support { + use super::*; + + use std::borrow::Cow; + + impl ToKey for String { + fn to_key(&self) -> Key { + Key::from_str(self) + } + } + + impl<'a> ToKey for Cow<'a, str> { + fn to_key(&self) -> Key { + Key::from_str(self) + } + } +} + +#[cfg(feature = "kv_sval")] +mod sval_support { + use super::*; + + use sval::Value; + use sval_ref::ValueRef; + + impl<'a> Value for Key<'a> { + fn stream<'sval, S: sval::Stream<'sval> + ?Sized>( + &'sval self, + stream: &mut S, + ) -> sval::Result { + self.key.stream(stream) + } + } + + impl<'a> ValueRef<'a> for Key<'a> { + fn stream_ref + ?Sized>(&self, stream: &mut S) -> sval::Result { + self.key.stream(stream) + } + } +} + +#[cfg(feature = "kv_serde")] +mod serde_support { + use super::*; + + use serde::{Serialize, Serializer}; + + impl<'a> Serialize for Key<'a> { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.key.serialize(serializer) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn key_from_string() { + assert_eq!("a key", Key::from_str("a key").as_str()); + } + + #[test] + fn key_to_borrowed() { + assert_eq!("a key", Key::from_str("a key").to_borrowed_str().unwrap()); + } +} diff --git a/vendor/log/src/kv/mod.rs b/vendor/log/src/kv/mod.rs new file mode 100644 index 00000000000000..34e61c3ae5d59e --- /dev/null +++ b/vendor/log/src/kv/mod.rs @@ -0,0 +1,265 @@ +//! Structured logging. +//! +//! Add the `kv` feature to your `Cargo.toml` to enable +//! this module: +//! +//! ```toml +//! [dependencies.log] +//! features = ["kv"] +//! ``` +//! +//! # Structured logging in `log` +//! +//! Structured logging enhances traditional text-based log records with user-defined +//! attributes. Structured logs can be analyzed using a variety of data processing +//! techniques, without needing to find and parse attributes from unstructured text first. +//! +//! In `log`, user-defined attributes are part of a [`Source`] on the log record. +//! Each attribute is a key-value; a pair of [`Key`] and [`Value`]. Keys are strings +//! and values are a datum of any type that can be formatted or serialized. Simple types +//! like strings, booleans, and numbers are supported, as well as arbitrarily complex +//! structures involving nested objects and sequences. +//! +//! ## Adding key-values to log records +//! +//! Key-values appear before the message format in the `log!` macros: +//! +//! ``` +//! # use log::info; +//! info!(a = 1; "Something of interest"); +//! ``` +//! +//! Key-values support the same shorthand identifier syntax as `format_args`: +//! +//! ``` +//! # use log::info; +//! let a = 1; +//! +//! info!(a; "Something of interest"); +//! ``` +//! +//! Values are capturing using the [`ToValue`] trait by default. To capture a value +//! using a different trait implementation, use a modifier after its key. Here's how +//! the same example can capture `a` using its `Debug` implementation instead: +//! +//! ``` +//! # use log::info; +//! info!(a:? = 1; "Something of interest"); +//! ``` +//! +//! The following capturing modifiers are supported: +//! +//! - `:?` will capture the value using `Debug`. +//! - `:debug` will capture the value using `Debug`. +//! - `:%` will capture the value using `Display`. +//! - `:display` will capture the value using `Display`. +//! - `:err` will capture the value using `std::error::Error` (requires the `kv_std` feature). +//! - `:sval` will capture the value using `sval::Value` (requires the `kv_sval` feature). +//! - `:serde` will capture the value using `serde::Serialize` (requires the `kv_serde` feature). +//! +//! ## Working with key-values on log records +//! +//! Use the [`Record::key_values`](../struct.Record.html#method.key_values) method to access key-values. +//! +//! Individual values can be pulled from the source by their key: +//! +//! ``` +//! # fn main() -> Result<(), log::kv::Error> { +//! use log::kv::{Source, Key, Value}; +//! # let record = log::Record::builder().key_values(&[("a", 1)]).build(); +//! +//! // info!(a = 1; "Something of interest"); +//! +//! let a: Value = record.key_values().get(Key::from("a")).unwrap(); +//! assert_eq!(1, a.to_i64().unwrap()); +//! # Ok(()) +//! # } +//! ``` +//! +//! All key-values can also be enumerated using a [`VisitSource`]: +//! +//! ``` +//! # fn main() -> Result<(), log::kv::Error> { +//! use std::collections::BTreeMap; +//! +//! use log::kv::{self, Source, Key, Value, VisitSource}; +//! +//! struct Collect<'kvs>(BTreeMap, Value<'kvs>>); +//! +//! impl<'kvs> VisitSource<'kvs> for Collect<'kvs> { +//! fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), kv::Error> { +//! self.0.insert(key, value); +//! +//! Ok(()) +//! } +//! } +//! +//! let mut visitor = Collect(BTreeMap::new()); +//! +//! # let record = log::Record::builder().key_values(&[("a", 1), ("b", 2), ("c", 3)]).build(); +//! // info!(a = 1, b = 2, c = 3; "Something of interest"); +//! +//! record.key_values().visit(&mut visitor)?; +//! +//! let collected = visitor.0; +//! +//! assert_eq!( +//! vec!["a", "b", "c"], +//! collected +//! .keys() +//! .map(|k| k.as_str()) +//! .collect::>(), +//! ); +//! # Ok(()) +//! # } +//! ``` +//! +//! [`Value`]s have methods for conversions to common types: +//! +//! ``` +//! # fn main() -> Result<(), log::kv::Error> { +//! use log::kv::{Source, Key}; +//! # let record = log::Record::builder().key_values(&[("a", 1)]).build(); +//! +//! // info!(a = 1; "Something of interest"); +//! +//! let a = record.key_values().get(Key::from("a")).unwrap(); +//! +//! assert_eq!(1, a.to_i64().unwrap()); +//! # Ok(()) +//! # } +//! ``` +//! +//! Values also have their own [`VisitValue`] type. Value visitors are a lightweight +//! API for working with primitives types: +//! +//! ``` +//! # fn main() -> Result<(), log::kv::Error> { +//! use log::kv::{self, Source, Key, VisitValue}; +//! # let record = log::Record::builder().key_values(&[("a", 1)]).build(); +//! +//! struct IsNumeric(bool); +//! +//! impl<'kvs> VisitValue<'kvs> for IsNumeric { +//! fn visit_any(&mut self, _value: kv::Value) -> Result<(), kv::Error> { +//! self.0 = false; +//! Ok(()) +//! } +//! +//! fn visit_u64(&mut self, _value: u64) -> Result<(), kv::Error> { +//! self.0 = true; +//! Ok(()) +//! } +//! +//! fn visit_i64(&mut self, _value: i64) -> Result<(), kv::Error> { +//! self.0 = true; +//! Ok(()) +//! } +//! +//! fn visit_u128(&mut self, _value: u128) -> Result<(), kv::Error> { +//! self.0 = true; +//! Ok(()) +//! } +//! +//! fn visit_i128(&mut self, _value: i128) -> Result<(), kv::Error> { +//! self.0 = true; +//! Ok(()) +//! } +//! +//! fn visit_f64(&mut self, _value: f64) -> Result<(), kv::Error> { +//! self.0 = true; +//! Ok(()) +//! } +//! } +//! +//! // info!(a = 1; "Something of interest"); +//! +//! let a = record.key_values().get(Key::from("a")).unwrap(); +//! +//! let mut visitor = IsNumeric(false); +//! +//! a.visit(&mut visitor)?; +//! +//! let is_numeric = visitor.0; +//! +//! assert!(is_numeric); +//! # Ok(()) +//! # } +//! ``` +//! +//! To serialize a value to a format like JSON, you can also use either `serde` or `sval`: +//! +//! ``` +//! # fn main() -> Result<(), Box> { +//! # #[cfg(feature = "serde")] +//! # { +//! # use log::kv::Key; +//! #[derive(serde::Serialize)] +//! struct Data { +//! a: i32, b: bool, +//! c: &'static str, +//! } +//! +//! let data = Data { a: 1, b: true, c: "Some data" }; +//! +//! # let source = [("a", log::kv::Value::from_serde(&data))]; +//! # let record = log::Record::builder().key_values(&source).build(); +//! // info!(a = data; "Something of interest"); +//! +//! let a = record.key_values().get(Key::from("a")).unwrap(); +//! +//! assert_eq!("{\"a\":1,\"b\":true,\"c\":\"Some data\"}", serde_json::to_string(&a)?); +//! # } +//! # Ok(()) +//! # } +//! ``` +//! +//! The choice of serialization framework depends on the needs of the consumer. +//! If you're in a no-std environment, you can use `sval`. In other cases, you can use `serde`. +//! Log producers and log consumers don't need to agree on the serialization framework. +//! A value can be captured using its `serde::Serialize` implementation and still be serialized +//! through `sval` without losing any structure or data. +//! +//! Values can also always be formatted using the standard `Debug` and `Display` +//! traits: +//! +//! ``` +//! # use log::kv::Key; +//! #[derive(Debug)] +//! struct Data { +//! a: i32, +//! b: bool, +//! c: &'static str, +//! } +//! +//! let data = Data { a: 1, b: true, c: "Some data" }; +//! +//! # let source = [("a", log::kv::Value::from_debug(&data))]; +//! # let record = log::Record::builder().key_values(&source).build(); +//! // info!(a = data; "Something of interest"); +//! +//! let a = record.key_values().get(Key::from("a")).unwrap(); +//! +//! assert_eq!("Data { a: 1, b: true, c: \"Some data\" }", format!("{a:?}")); +//! ``` + +mod error; +mod key; + +#[cfg(not(feature = "kv_unstable"))] +mod source; +#[cfg(not(feature = "kv_unstable"))] +mod value; + +pub use self::error::Error; +pub use self::key::{Key, ToKey}; +pub use self::source::{Source, VisitSource}; +pub use self::value::{ToValue, Value, VisitValue}; + +#[cfg(feature = "kv_unstable")] +pub mod source; +#[cfg(feature = "kv_unstable")] +pub mod value; + +#[cfg(feature = "kv_unstable")] +pub use self::source::Visitor; diff --git a/vendor/log/src/kv/source.rs b/vendor/log/src/kv/source.rs new file mode 100644 index 00000000000000..f463e6d2b68a82 --- /dev/null +++ b/vendor/log/src/kv/source.rs @@ -0,0 +1,514 @@ +//! Sources for key-values. +//! +//! This module defines the [`Source`] type and supporting APIs for +//! working with collections of key-values. + +use crate::kv::{Error, Key, ToKey, ToValue, Value}; +use std::fmt; + +/// A source of key-values. +/// +/// The source may be a single pair, a set of pairs, or a filter over a set of pairs. +/// Use the [`VisitSource`](trait.VisitSource.html) trait to inspect the structured data +/// in a source. +/// +/// A source is like an iterator over its key-values, except with a push-based API +/// instead of a pull-based one. +/// +/// # Examples +/// +/// Enumerating the key-values in a source: +/// +/// ``` +/// # fn main() -> Result<(), log::kv::Error> { +/// use log::kv::{self, Source, Key, Value, VisitSource}; +/// +/// // A `VisitSource` that prints all key-values +/// // VisitSources are fed the key-value pairs of each key-values +/// struct Printer; +/// +/// impl<'kvs> VisitSource<'kvs> for Printer { +/// fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), kv::Error> { +/// println!("{key}: {value}"); +/// +/// Ok(()) +/// } +/// } +/// +/// // A source with 3 key-values +/// // Common collection types implement the `Source` trait +/// let source = &[ +/// ("a", 1), +/// ("b", 2), +/// ("c", 3), +/// ]; +/// +/// // Pass an instance of the `VisitSource` to a `Source` to visit it +/// source.visit(&mut Printer)?; +/// # Ok(()) +/// # } +/// ``` +pub trait Source { + /// Visit key-values. + /// + /// A source doesn't have to guarantee any ordering or uniqueness of key-values. + /// If the given visitor returns an error then the source may early-return with it, + /// even if there are more key-values. + /// + /// # Implementation notes + /// + /// A source should yield the same key-values to a subsequent visitor unless + /// that visitor itself fails. + fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error>; + + /// Get the value for a given key. + /// + /// If the key appears multiple times in the source then which key is returned + /// is implementation specific. + /// + /// # Implementation notes + /// + /// A source that can provide a more efficient implementation of this method + /// should override it. + fn get(&self, key: Key) -> Option> { + get_default(self, key) + } + + /// Count the number of key-values that can be visited. + /// + /// # Implementation notes + /// + /// A source that knows the number of key-values upfront may provide a more + /// efficient implementation. + /// + /// A subsequent call to `visit` should yield the same number of key-values. + fn count(&self) -> usize { + count_default(self) + } +} + +/// The default implementation of `Source::get` +fn get_default<'v>(source: &'v (impl Source + ?Sized), key: Key) -> Option> { + struct Get<'k, 'v> { + key: Key<'k>, + found: Option>, + } + + impl<'k, 'kvs> VisitSource<'kvs> for Get<'k, 'kvs> { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + if self.key == key { + self.found = Some(value); + } + + Ok(()) + } + } + + let mut get = Get { key, found: None }; + + let _ = source.visit(&mut get); + get.found +} + +/// The default implementation of `Source::count`. +fn count_default(source: impl Source) -> usize { + struct Count(usize); + + impl<'kvs> VisitSource<'kvs> for Count { + fn visit_pair(&mut self, _: Key<'kvs>, _: Value<'kvs>) -> Result<(), Error> { + self.0 += 1; + + Ok(()) + } + } + + let mut count = Count(0); + let _ = source.visit(&mut count); + count.0 +} + +impl<'a, T> Source for &'a T +where + T: Source + ?Sized, +{ + fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { + Source::visit(&**self, visitor) + } + + fn get(&self, key: Key) -> Option> { + Source::get(&**self, key) + } + + fn count(&self) -> usize { + Source::count(&**self) + } +} + +impl Source for (K, V) +where + K: ToKey, + V: ToValue, +{ + fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { + visitor.visit_pair(self.0.to_key(), self.1.to_value()) + } + + fn get(&self, key: Key) -> Option> { + if self.0.to_key() == key { + Some(self.1.to_value()) + } else { + None + } + } + + fn count(&self) -> usize { + 1 + } +} + +impl Source for [S] +where + S: Source, +{ + fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { + for source in self { + source.visit(visitor)?; + } + + Ok(()) + } + + fn get(&self, key: Key) -> Option> { + for source in self { + if let Some(found) = source.get(key.clone()) { + return Some(found); + } + } + + None + } + + fn count(&self) -> usize { + self.iter().map(Source::count).sum() + } +} + +impl Source for [S; N] +where + S: Source, +{ + fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { + Source::visit(self as &[_], visitor) + } + + fn get(&self, key: Key) -> Option> { + Source::get(self as &[_], key) + } + + fn count(&self) -> usize { + Source::count(self as &[_]) + } +} + +impl Source for Option +where + S: Source, +{ + fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { + if let Some(source) = self { + source.visit(visitor)?; + } + + Ok(()) + } + + fn get(&self, key: Key) -> Option> { + self.as_ref().and_then(|s| s.get(key)) + } + + fn count(&self) -> usize { + self.as_ref().map_or(0, Source::count) + } +} + +/// A visitor for the key-value pairs in a [`Source`](trait.Source.html). +pub trait VisitSource<'kvs> { + /// Visit a key-value pair. + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error>; +} + +impl<'a, 'kvs, T> VisitSource<'kvs> for &'a mut T +where + T: VisitSource<'kvs> + ?Sized, +{ + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + (**self).visit_pair(key, value) + } +} + +impl<'a, 'b: 'a, 'kvs> VisitSource<'kvs> for fmt::DebugMap<'a, 'b> { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + self.entry(&key, &value); + Ok(()) + } +} + +impl<'a, 'b: 'a, 'kvs> VisitSource<'kvs> for fmt::DebugList<'a, 'b> { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + self.entry(&(key, value)); + Ok(()) + } +} + +impl<'a, 'b: 'a, 'kvs> VisitSource<'kvs> for fmt::DebugSet<'a, 'b> { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + self.entry(&(key, value)); + Ok(()) + } +} + +impl<'a, 'b: 'a, 'kvs> VisitSource<'kvs> for fmt::DebugTuple<'a, 'b> { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + self.field(&key); + self.field(&value); + Ok(()) + } +} + +#[cfg(feature = "std")] +mod std_support { + use super::*; + use std::borrow::Borrow; + use std::collections::{BTreeMap, HashMap}; + use std::hash::{BuildHasher, Hash}; + use std::rc::Rc; + use std::sync::Arc; + + impl Source for Box + where + S: Source + ?Sized, + { + fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { + Source::visit(&**self, visitor) + } + + fn get(&self, key: Key) -> Option> { + Source::get(&**self, key) + } + + fn count(&self) -> usize { + Source::count(&**self) + } + } + + impl Source for Arc + where + S: Source + ?Sized, + { + fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { + Source::visit(&**self, visitor) + } + + fn get(&self, key: Key) -> Option> { + Source::get(&**self, key) + } + + fn count(&self) -> usize { + Source::count(&**self) + } + } + + impl Source for Rc + where + S: Source + ?Sized, + { + fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { + Source::visit(&**self, visitor) + } + + fn get(&self, key: Key) -> Option> { + Source::get(&**self, key) + } + + fn count(&self) -> usize { + Source::count(&**self) + } + } + + impl Source for Vec + where + S: Source, + { + fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { + Source::visit(&**self, visitor) + } + + fn get(&self, key: Key) -> Option> { + Source::get(&**self, key) + } + + fn count(&self) -> usize { + Source::count(&**self) + } + } + + impl<'kvs, V> VisitSource<'kvs> for Box + where + V: VisitSource<'kvs> + ?Sized, + { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + (**self).visit_pair(key, value) + } + } + + impl Source for HashMap + where + K: ToKey + Borrow + Eq + Hash, + V: ToValue, + S: BuildHasher, + { + fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { + for (key, value) in self { + visitor.visit_pair(key.to_key(), value.to_value())?; + } + Ok(()) + } + + fn get(&self, key: Key) -> Option> { + HashMap::get(self, key.as_str()).map(|v| v.to_value()) + } + + fn count(&self) -> usize { + self.len() + } + } + + impl Source for BTreeMap + where + K: ToKey + Borrow + Ord, + V: ToValue, + { + fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { + for (key, value) in self { + visitor.visit_pair(key.to_key(), value.to_value())?; + } + Ok(()) + } + + fn get(&self, key: Key) -> Option> { + BTreeMap::get(self, key.as_str()).map(|v| v.to_value()) + } + + fn count(&self) -> usize { + self.len() + } + } + + #[cfg(test)] + mod tests { + use crate::kv::value; + + use super::*; + + #[test] + fn count() { + assert_eq!(1, Source::count(&Box::new(("a", 1)))); + assert_eq!(2, Source::count(&vec![("a", 1), ("b", 2)])); + } + + #[test] + fn get() { + let source = vec![("a", 1), ("b", 2), ("a", 1)]; + assert_eq!( + value::inner::Token::I64(1), + Source::get(&source, Key::from_str("a")).unwrap().to_token() + ); + + let source = Box::new(None::<(&str, i32)>); + assert!(Source::get(&source, Key::from_str("a")).is_none()); + } + + #[test] + fn hash_map() { + let mut map = HashMap::new(); + map.insert("a", 1); + map.insert("b", 2); + + assert_eq!(2, Source::count(&map)); + assert_eq!( + value::inner::Token::I64(1), + Source::get(&map, Key::from_str("a")).unwrap().to_token() + ); + } + + #[test] + fn btree_map() { + let mut map = BTreeMap::new(); + map.insert("a", 1); + map.insert("b", 2); + + assert_eq!(2, Source::count(&map)); + assert_eq!( + value::inner::Token::I64(1), + Source::get(&map, Key::from_str("a")).unwrap().to_token() + ); + } + } +} + +// NOTE: Deprecated; but aliases can't carry this attribute +#[cfg(feature = "kv_unstable")] +pub use VisitSource as Visitor; + +#[cfg(test)] +mod tests { + use crate::kv::value; + + use super::*; + + #[test] + fn source_is_object_safe() { + fn _check(_: &dyn Source) {} + } + + #[test] + fn visitor_is_object_safe() { + fn _check(_: &dyn VisitSource) {} + } + + #[test] + fn count() { + struct OnePair { + key: &'static str, + value: i32, + } + + impl Source for OnePair { + fn visit<'kvs>(&'kvs self, visitor: &mut dyn VisitSource<'kvs>) -> Result<(), Error> { + visitor.visit_pair(self.key.to_key(), self.value.to_value()) + } + } + + assert_eq!(1, Source::count(&("a", 1))); + assert_eq!(2, Source::count(&[("a", 1), ("b", 2)] as &[_])); + assert_eq!(0, Source::count(&None::<(&str, i32)>)); + assert_eq!(1, Source::count(&OnePair { key: "a", value: 1 })); + } + + #[test] + fn get() { + let source = &[("a", 1), ("b", 2), ("a", 1)] as &[_]; + assert_eq!( + value::inner::Token::I64(1), + Source::get(source, Key::from_str("a")).unwrap().to_token() + ); + assert_eq!( + value::inner::Token::I64(2), + Source::get(source, Key::from_str("b")).unwrap().to_token() + ); + assert!(Source::get(&source, Key::from_str("c")).is_none()); + + let source = None::<(&str, i32)>; + assert!(Source::get(&source, Key::from_str("a")).is_none()); + } +} diff --git a/vendor/log/src/kv/value.rs b/vendor/log/src/kv/value.rs new file mode 100644 index 00000000000000..e604c806c61041 --- /dev/null +++ b/vendor/log/src/kv/value.rs @@ -0,0 +1,1395 @@ +//! Structured values. +//! +//! This module defines the [`Value`] type and supporting APIs for +//! capturing and serializing them. + +use std::fmt; + +pub use crate::kv::Error; + +/// A type that can be converted into a [`Value`](struct.Value.html). +pub trait ToValue { + /// Perform the conversion. + fn to_value(&self) -> Value; +} + +impl<'a, T> ToValue for &'a T +where + T: ToValue + ?Sized, +{ + fn to_value(&self) -> Value { + (**self).to_value() + } +} + +impl<'v> ToValue for Value<'v> { + fn to_value(&self) -> Value { + Value { + inner: self.inner.clone(), + } + } +} + +/// A value in a key-value. +/// +/// Values are an anonymous bag containing some structured datum. +/// +/// # Capturing values +/// +/// There are a few ways to capture a value: +/// +/// - Using the `Value::from_*` methods. +/// - Using the `ToValue` trait. +/// - Using the standard `From` trait. +/// +/// ## Using the `Value::from_*` methods +/// +/// `Value` offers a few constructor methods that capture values of different kinds. +/// +/// ``` +/// use log::kv::Value; +/// +/// let value = Value::from_debug(&42i32); +/// +/// assert_eq!(None, value.to_i64()); +/// ``` +/// +/// ## Using the `ToValue` trait +/// +/// The `ToValue` trait can be used to capture values generically. +/// It's the bound used by `Source`. +/// +/// ``` +/// # use log::kv::ToValue; +/// let value = 42i32.to_value(); +/// +/// assert_eq!(Some(42), value.to_i64()); +/// ``` +/// +/// ## Using the standard `From` trait +/// +/// Standard types that implement `ToValue` also implement `From`. +/// +/// ``` +/// use log::kv::Value; +/// +/// let value = Value::from(42i32); +/// +/// assert_eq!(Some(42), value.to_i64()); +/// ``` +/// +/// # Data model +/// +/// Values can hold one of a number of types: +/// +/// - **Null:** The absence of any other meaningful value. Note that +/// `Some(Value::null())` is not the same as `None`. The former is +/// `null` while the latter is `undefined`. This is important to be +/// able to tell the difference between a key-value that was logged, +/// but its value was empty (`Some(Value::null())`) and a key-value +/// that was never logged at all (`None`). +/// - **Strings:** `str`, `char`. +/// - **Booleans:** `bool`. +/// - **Integers:** `u8`-`u128`, `i8`-`i128`, `NonZero*`. +/// - **Floating point numbers:** `f32`-`f64`. +/// - **Errors:** `dyn (Error + 'static)`. +/// - **`serde`:** Any type in `serde`'s data model. +/// - **`sval`:** Any type in `sval`'s data model. +/// +/// # Serialization +/// +/// Values provide a number of ways to be serialized. +/// +/// For basic types the [`Value::visit`] method can be used to extract the +/// underlying typed value. However, this is limited in the amount of types +/// supported (see the [`VisitValue`] trait methods). +/// +/// For more complex types one of the following traits can be used: +/// * `sval::Value`, requires the `kv_sval` feature. +/// * `serde::Serialize`, requires the `kv_serde` feature. +/// +/// You don't need a visitor to serialize values through `serde` or `sval`. +/// +/// A value can always be serialized using any supported framework, regardless +/// of how it was captured. If, for example, a value was captured using its +/// `Display` implementation, it will serialize through `serde` as a string. If it was +/// captured as a struct using `serde`, it will also serialize as a struct +/// through `sval`, or can be formatted using a `Debug`-compatible representation. +#[derive(Clone)] +pub struct Value<'v> { + inner: inner::Inner<'v>, +} + +impl<'v> Value<'v> { + /// Get a value from a type implementing `ToValue`. + pub fn from_any(value: &'v T) -> Self + where + T: ToValue, + { + value.to_value() + } + + /// Get a value from a type implementing `std::fmt::Debug`. + pub fn from_debug(value: &'v T) -> Self + where + T: fmt::Debug, + { + Value { + inner: inner::Inner::from_debug(value), + } + } + + /// Get a value from a type implementing `std::fmt::Display`. + pub fn from_display(value: &'v T) -> Self + where + T: fmt::Display, + { + Value { + inner: inner::Inner::from_display(value), + } + } + + /// Get a value from a type implementing `serde::Serialize`. + #[cfg(feature = "kv_serde")] + pub fn from_serde(value: &'v T) -> Self + where + T: serde::Serialize, + { + Value { + inner: inner::Inner::from_serde1(value), + } + } + + /// Get a value from a type implementing `sval::Value`. + #[cfg(feature = "kv_sval")] + pub fn from_sval(value: &'v T) -> Self + where + T: sval::Value, + { + Value { + inner: inner::Inner::from_sval2(value), + } + } + + /// Get a value from a dynamic `std::fmt::Debug`. + pub fn from_dyn_debug(value: &'v dyn fmt::Debug) -> Self { + Value { + inner: inner::Inner::from_dyn_debug(value), + } + } + + /// Get a value from a dynamic `std::fmt::Display`. + pub fn from_dyn_display(value: &'v dyn fmt::Display) -> Self { + Value { + inner: inner::Inner::from_dyn_display(value), + } + } + + /// Get a value from a dynamic error. + #[cfg(feature = "kv_std")] + pub fn from_dyn_error(err: &'v (dyn std::error::Error + 'static)) -> Self { + Value { + inner: inner::Inner::from_dyn_error(err), + } + } + + /// Get a `null` value. + pub fn null() -> Self { + Value { + inner: inner::Inner::empty(), + } + } + + /// Get a value from an internal primitive. + fn from_inner(value: T) -> Self + where + T: Into>, + { + Value { + inner: value.into(), + } + } + + /// Inspect this value using a simple visitor. + /// + /// When the `kv_serde` or `kv_sval` features are enabled, you can also + /// serialize a value using its `Serialize` or `Value` implementation. + pub fn visit(&self, visitor: impl VisitValue<'v>) -> Result<(), Error> { + inner::visit(&self.inner, visitor) + } +} + +impl<'v> fmt::Debug for Value<'v> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.inner, f) + } +} + +impl<'v> fmt::Display for Value<'v> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.inner, f) + } +} + +#[cfg(feature = "kv_serde")] +impl<'v> serde::Serialize for Value<'v> { + fn serialize(&self, s: S) -> Result + where + S: serde::Serializer, + { + self.inner.serialize(s) + } +} + +#[cfg(feature = "kv_sval")] +impl<'v> sval::Value for Value<'v> { + fn stream<'sval, S: sval::Stream<'sval> + ?Sized>(&'sval self, stream: &mut S) -> sval::Result { + sval::Value::stream(&self.inner, stream) + } +} + +#[cfg(feature = "kv_sval")] +impl<'v> sval_ref::ValueRef<'v> for Value<'v> { + fn stream_ref + ?Sized>(&self, stream: &mut S) -> sval::Result { + sval_ref::ValueRef::stream_ref(&self.inner, stream) + } +} + +impl ToValue for str { + fn to_value(&self) -> Value { + Value::from(self) + } +} + +impl<'v> From<&'v str> for Value<'v> { + fn from(value: &'v str) -> Self { + Value::from_inner(value) + } +} + +impl ToValue for () { + fn to_value(&self) -> Value { + Value::from_inner(()) + } +} + +impl ToValue for Option +where + T: ToValue, +{ + fn to_value(&self) -> Value { + match *self { + Some(ref value) => value.to_value(), + None => Value::from_inner(()), + } + } +} + +macro_rules! impl_to_value_primitive { + ($($into_ty:ty,)*) => { + $( + impl ToValue for $into_ty { + fn to_value(&self) -> Value { + Value::from(*self) + } + } + + impl<'v> From<$into_ty> for Value<'v> { + fn from(value: $into_ty) -> Self { + Value::from_inner(value) + } + } + + impl<'v> From<&'v $into_ty> for Value<'v> { + fn from(value: &'v $into_ty) -> Self { + Value::from_inner(*value) + } + } + )* + }; +} + +macro_rules! impl_to_value_nonzero_primitive { + ($($into_ty:ident,)*) => { + $( + impl ToValue for std::num::$into_ty { + fn to_value(&self) -> Value { + Value::from(self.get()) + } + } + + impl<'v> From for Value<'v> { + fn from(value: std::num::$into_ty) -> Self { + Value::from(value.get()) + } + } + + impl<'v> From<&'v std::num::$into_ty> for Value<'v> { + fn from(value: &'v std::num::$into_ty) -> Self { + Value::from(value.get()) + } + } + )* + }; +} + +macro_rules! impl_value_to_primitive { + ($(#[doc = $doc:tt] $into_name:ident -> $into_ty:ty,)*) => { + impl<'v> Value<'v> { + $( + #[doc = $doc] + pub fn $into_name(&self) -> Option<$into_ty> { + self.inner.$into_name() + } + )* + } + } +} + +impl_to_value_primitive![ + usize, u8, u16, u32, u64, u128, isize, i8, i16, i32, i64, i128, f32, f64, char, bool, +]; + +#[rustfmt::skip] +impl_to_value_nonzero_primitive![ + NonZeroUsize, NonZeroU8, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU128, + NonZeroIsize, NonZeroI8, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI128, +]; + +impl_value_to_primitive![ + #[doc = "Try convert this value into a `u64`."] + to_u64 -> u64, + #[doc = "Try convert this value into a `i64`."] + to_i64 -> i64, + #[doc = "Try convert this value into a `u128`."] + to_u128 -> u128, + #[doc = "Try convert this value into a `i128`."] + to_i128 -> i128, + #[doc = "Try convert this value into a `f64`."] + to_f64 -> f64, + #[doc = "Try convert this value into a `char`."] + to_char -> char, + #[doc = "Try convert this value into a `bool`."] + to_bool -> bool, +]; + +impl<'v> Value<'v> { + /// Try to convert this value into an error. + #[cfg(feature = "kv_std")] + pub fn to_borrowed_error(&self) -> Option<&(dyn std::error::Error + 'static)> { + self.inner.to_borrowed_error() + } + + /// Try to convert this value into a borrowed string. + pub fn to_borrowed_str(&self) -> Option<&'v str> { + self.inner.to_borrowed_str() + } +} + +#[cfg(feature = "kv_std")] +mod std_support { + use std::borrow::Cow; + use std::rc::Rc; + use std::sync::Arc; + + use super::*; + + impl ToValue for Box + where + T: ToValue + ?Sized, + { + fn to_value(&self) -> Value { + (**self).to_value() + } + } + + impl ToValue for Arc + where + T: ToValue + ?Sized, + { + fn to_value(&self) -> Value { + (**self).to_value() + } + } + + impl ToValue for Rc + where + T: ToValue + ?Sized, + { + fn to_value(&self) -> Value { + (**self).to_value() + } + } + + impl ToValue for String { + fn to_value(&self) -> Value { + Value::from(&**self) + } + } + + impl<'v> ToValue for Cow<'v, str> { + fn to_value(&self) -> Value { + Value::from(&**self) + } + } + + impl<'v> Value<'v> { + /// Try convert this value into a string. + pub fn to_cow_str(&self) -> Option> { + self.inner.to_str() + } + } + + impl<'v> From<&'v String> for Value<'v> { + fn from(v: &'v String) -> Self { + Value::from(&**v) + } + } +} + +/// A visitor for a [`Value`]. +/// +/// Also see [`Value`'s documentation on serialization]. Value visitors are a simple alternative +/// to a more fully-featured serialization framework like `serde` or `sval`. A value visitor +/// can differentiate primitive types through methods like [`VisitValue::visit_bool`] and +/// [`VisitValue::visit_str`], but more complex types like maps and sequences +/// will fallthrough to [`VisitValue::visit_any`]. +/// +/// If you're trying to serialize a value to a format like JSON, you can use either `serde` +/// or `sval` directly with the value. You don't need a visitor. +/// +/// [`Value`'s documentation on serialization]: Value#serialization +pub trait VisitValue<'v> { + /// Visit a `Value`. + /// + /// This is the only required method on `VisitValue` and acts as a fallback for any + /// more specific methods that aren't overridden. + /// The `Value` may be formatted using its `fmt::Debug` or `fmt::Display` implementation, + /// or serialized using its `sval::Value` or `serde::Serialize` implementation. + fn visit_any(&mut self, value: Value) -> Result<(), Error>; + + /// Visit an empty value. + fn visit_null(&mut self) -> Result<(), Error> { + self.visit_any(Value::null()) + } + + /// Visit an unsigned integer. + fn visit_u64(&mut self, value: u64) -> Result<(), Error> { + self.visit_any(value.into()) + } + + /// Visit a signed integer. + fn visit_i64(&mut self, value: i64) -> Result<(), Error> { + self.visit_any(value.into()) + } + + /// Visit a big unsigned integer. + fn visit_u128(&mut self, value: u128) -> Result<(), Error> { + self.visit_any((value).into()) + } + + /// Visit a big signed integer. + fn visit_i128(&mut self, value: i128) -> Result<(), Error> { + self.visit_any((value).into()) + } + + /// Visit a floating point. + fn visit_f64(&mut self, value: f64) -> Result<(), Error> { + self.visit_any(value.into()) + } + + /// Visit a boolean. + fn visit_bool(&mut self, value: bool) -> Result<(), Error> { + self.visit_any(value.into()) + } + + /// Visit a string. + fn visit_str(&mut self, value: &str) -> Result<(), Error> { + self.visit_any(value.into()) + } + + /// Visit a string. + fn visit_borrowed_str(&mut self, value: &'v str) -> Result<(), Error> { + self.visit_str(value) + } + + /// Visit a Unicode character. + fn visit_char(&mut self, value: char) -> Result<(), Error> { + let mut b = [0; 4]; + self.visit_str(&*value.encode_utf8(&mut b)) + } + + /// Visit an error. + #[cfg(feature = "kv_std")] + fn visit_error(&mut self, err: &(dyn std::error::Error + 'static)) -> Result<(), Error> { + self.visit_any(Value::from_dyn_error(err)) + } + + /// Visit an error. + #[cfg(feature = "kv_std")] + fn visit_borrowed_error( + &mut self, + err: &'v (dyn std::error::Error + 'static), + ) -> Result<(), Error> { + self.visit_any(Value::from_dyn_error(err)) + } +} + +impl<'a, 'v, T: ?Sized> VisitValue<'v> for &'a mut T +where + T: VisitValue<'v>, +{ + fn visit_any(&mut self, value: Value) -> Result<(), Error> { + (**self).visit_any(value) + } + + fn visit_null(&mut self) -> Result<(), Error> { + (**self).visit_null() + } + + fn visit_u64(&mut self, value: u64) -> Result<(), Error> { + (**self).visit_u64(value) + } + + fn visit_i64(&mut self, value: i64) -> Result<(), Error> { + (**self).visit_i64(value) + } + + fn visit_u128(&mut self, value: u128) -> Result<(), Error> { + (**self).visit_u128(value) + } + + fn visit_i128(&mut self, value: i128) -> Result<(), Error> { + (**self).visit_i128(value) + } + + fn visit_f64(&mut self, value: f64) -> Result<(), Error> { + (**self).visit_f64(value) + } + + fn visit_bool(&mut self, value: bool) -> Result<(), Error> { + (**self).visit_bool(value) + } + + fn visit_str(&mut self, value: &str) -> Result<(), Error> { + (**self).visit_str(value) + } + + fn visit_borrowed_str(&mut self, value: &'v str) -> Result<(), Error> { + (**self).visit_borrowed_str(value) + } + + fn visit_char(&mut self, value: char) -> Result<(), Error> { + (**self).visit_char(value) + } + + #[cfg(feature = "kv_std")] + fn visit_error(&mut self, err: &(dyn std::error::Error + 'static)) -> Result<(), Error> { + (**self).visit_error(err) + } + + #[cfg(feature = "kv_std")] + fn visit_borrowed_error( + &mut self, + err: &'v (dyn std::error::Error + 'static), + ) -> Result<(), Error> { + (**self).visit_borrowed_error(err) + } +} + +#[cfg(feature = "value-bag")] +pub(in crate::kv) mod inner { + /** + An implementation of `Value` based on a library called `value_bag`. + + `value_bag` was written specifically for use in `log`'s value, but was split out when it outgrew + the codebase here. It's a general-purpose type-erasure library that handles mapping between + more fully-featured serialization frameworks. + */ + use super::*; + + pub use value_bag::ValueBag as Inner; + + pub use value_bag::Error; + + #[cfg(test)] + pub use value_bag::test::TestToken as Token; + + pub fn visit<'v>( + inner: &Inner<'v>, + visitor: impl VisitValue<'v>, + ) -> Result<(), crate::kv::Error> { + struct InnerVisitValue(V); + + impl<'v, V> value_bag::visit::Visit<'v> for InnerVisitValue + where + V: VisitValue<'v>, + { + fn visit_any(&mut self, value: value_bag::ValueBag) -> Result<(), Error> { + self.0 + .visit_any(Value { inner: value }) + .map_err(crate::kv::Error::into_value) + } + + fn visit_empty(&mut self) -> Result<(), Error> { + self.0.visit_null().map_err(crate::kv::Error::into_value) + } + + fn visit_u64(&mut self, value: u64) -> Result<(), Error> { + self.0 + .visit_u64(value) + .map_err(crate::kv::Error::into_value) + } + + fn visit_i64(&mut self, value: i64) -> Result<(), Error> { + self.0 + .visit_i64(value) + .map_err(crate::kv::Error::into_value) + } + + fn visit_u128(&mut self, value: u128) -> Result<(), Error> { + self.0 + .visit_u128(value) + .map_err(crate::kv::Error::into_value) + } + + fn visit_i128(&mut self, value: i128) -> Result<(), Error> { + self.0 + .visit_i128(value) + .map_err(crate::kv::Error::into_value) + } + + fn visit_f64(&mut self, value: f64) -> Result<(), Error> { + self.0 + .visit_f64(value) + .map_err(crate::kv::Error::into_value) + } + + fn visit_bool(&mut self, value: bool) -> Result<(), Error> { + self.0 + .visit_bool(value) + .map_err(crate::kv::Error::into_value) + } + + fn visit_str(&mut self, value: &str) -> Result<(), Error> { + self.0 + .visit_str(value) + .map_err(crate::kv::Error::into_value) + } + + fn visit_borrowed_str(&mut self, value: &'v str) -> Result<(), Error> { + self.0 + .visit_borrowed_str(value) + .map_err(crate::kv::Error::into_value) + } + + fn visit_char(&mut self, value: char) -> Result<(), Error> { + self.0 + .visit_char(value) + .map_err(crate::kv::Error::into_value) + } + + #[cfg(feature = "kv_std")] + fn visit_error( + &mut self, + err: &(dyn std::error::Error + 'static), + ) -> Result<(), Error> { + self.0 + .visit_error(err) + .map_err(crate::kv::Error::into_value) + } + + #[cfg(feature = "kv_std")] + fn visit_borrowed_error( + &mut self, + err: &'v (dyn std::error::Error + 'static), + ) -> Result<(), Error> { + self.0 + .visit_borrowed_error(err) + .map_err(crate::kv::Error::into_value) + } + } + + inner + .visit(&mut InnerVisitValue(visitor)) + .map_err(crate::kv::Error::from_value) + } +} + +#[cfg(not(feature = "value-bag"))] +pub(in crate::kv) mod inner { + /** + This is a dependency-free implementation of `Value` when there's no serialization frameworks involved. + In these simple cases a more fully featured solution like `value_bag` isn't needed, so we avoid pulling it in. + + There are a few things here that need to remain consistent with the `value_bag`-based implementation: + + 1. Conversions should always produce the same results. If a conversion here returns `Some`, then + the same `value_bag`-based conversion must also. Of particular note here are floats to ints; they're + based on the standard library's `TryInto` conversions, which need to be converted to `i32` or `u32`, + and then to `f64`. + 2. VisitValues should always be called in the same way. If a particular type of value calls `visit_i64`, + then the same `value_bag`-based visitor must also. + */ + use super::*; + + #[derive(Clone)] + pub enum Inner<'v> { + None, + Bool(bool), + Str(&'v str), + Char(char), + I64(i64), + U64(u64), + F64(f64), + I128(i128), + U128(u128), + Debug(&'v dyn fmt::Debug), + Display(&'v dyn fmt::Display), + } + + impl<'v> From<()> for Inner<'v> { + fn from(_: ()) -> Self { + Inner::None + } + } + + impl<'v> From for Inner<'v> { + fn from(v: bool) -> Self { + Inner::Bool(v) + } + } + + impl<'v> From for Inner<'v> { + fn from(v: char) -> Self { + Inner::Char(v) + } + } + + impl<'v> From for Inner<'v> { + fn from(v: f32) -> Self { + Inner::F64(v as f64) + } + } + + impl<'v> From for Inner<'v> { + fn from(v: f64) -> Self { + Inner::F64(v) + } + } + + impl<'v> From for Inner<'v> { + fn from(v: i8) -> Self { + Inner::I64(v as i64) + } + } + + impl<'v> From for Inner<'v> { + fn from(v: i16) -> Self { + Inner::I64(v as i64) + } + } + + impl<'v> From for Inner<'v> { + fn from(v: i32) -> Self { + Inner::I64(v as i64) + } + } + + impl<'v> From for Inner<'v> { + fn from(v: i64) -> Self { + Inner::I64(v as i64) + } + } + + impl<'v> From for Inner<'v> { + fn from(v: isize) -> Self { + Inner::I64(v as i64) + } + } + + impl<'v> From for Inner<'v> { + fn from(v: u8) -> Self { + Inner::U64(v as u64) + } + } + + impl<'v> From for Inner<'v> { + fn from(v: u16) -> Self { + Inner::U64(v as u64) + } + } + + impl<'v> From for Inner<'v> { + fn from(v: u32) -> Self { + Inner::U64(v as u64) + } + } + + impl<'v> From for Inner<'v> { + fn from(v: u64) -> Self { + Inner::U64(v as u64) + } + } + + impl<'v> From for Inner<'v> { + fn from(v: usize) -> Self { + Inner::U64(v as u64) + } + } + + impl<'v> From for Inner<'v> { + fn from(v: i128) -> Self { + Inner::I128(v) + } + } + + impl<'v> From for Inner<'v> { + fn from(v: u128) -> Self { + Inner::U128(v) + } + } + + impl<'v> From<&'v str> for Inner<'v> { + fn from(v: &'v str) -> Self { + Inner::Str(v) + } + } + + impl<'v> fmt::Debug for Inner<'v> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Inner::None => fmt::Debug::fmt(&None::<()>, f), + Inner::Bool(v) => fmt::Debug::fmt(v, f), + Inner::Str(v) => fmt::Debug::fmt(v, f), + Inner::Char(v) => fmt::Debug::fmt(v, f), + Inner::I64(v) => fmt::Debug::fmt(v, f), + Inner::U64(v) => fmt::Debug::fmt(v, f), + Inner::F64(v) => fmt::Debug::fmt(v, f), + Inner::I128(v) => fmt::Debug::fmt(v, f), + Inner::U128(v) => fmt::Debug::fmt(v, f), + Inner::Debug(v) => fmt::Debug::fmt(v, f), + Inner::Display(v) => fmt::Display::fmt(v, f), + } + } + } + + impl<'v> fmt::Display for Inner<'v> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Inner::None => fmt::Debug::fmt(&None::<()>, f), + Inner::Bool(v) => fmt::Display::fmt(v, f), + Inner::Str(v) => fmt::Display::fmt(v, f), + Inner::Char(v) => fmt::Display::fmt(v, f), + Inner::I64(v) => fmt::Display::fmt(v, f), + Inner::U64(v) => fmt::Display::fmt(v, f), + Inner::F64(v) => fmt::Display::fmt(v, f), + Inner::I128(v) => fmt::Display::fmt(v, f), + Inner::U128(v) => fmt::Display::fmt(v, f), + Inner::Debug(v) => fmt::Debug::fmt(v, f), + Inner::Display(v) => fmt::Display::fmt(v, f), + } + } + } + + impl<'v> Inner<'v> { + pub fn from_debug(value: &'v T) -> Self { + Inner::Debug(value) + } + + pub fn from_display(value: &'v T) -> Self { + Inner::Display(value) + } + + pub fn from_dyn_debug(value: &'v dyn fmt::Debug) -> Self { + Inner::Debug(value) + } + + pub fn from_dyn_display(value: &'v dyn fmt::Display) -> Self { + Inner::Display(value) + } + + pub fn empty() -> Self { + Inner::None + } + + pub fn to_bool(&self) -> Option { + match self { + Inner::Bool(v) => Some(*v), + _ => None, + } + } + + pub fn to_char(&self) -> Option { + match self { + Inner::Char(v) => Some(*v), + _ => None, + } + } + + pub fn to_f64(&self) -> Option { + match self { + Inner::F64(v) => Some(*v), + Inner::I64(v) => { + let v: i32 = (*v).try_into().ok()?; + v.try_into().ok() + } + Inner::U64(v) => { + let v: u32 = (*v).try_into().ok()?; + v.try_into().ok() + } + Inner::I128(v) => { + let v: i32 = (*v).try_into().ok()?; + v.try_into().ok() + } + Inner::U128(v) => { + let v: u32 = (*v).try_into().ok()?; + v.try_into().ok() + } + _ => None, + } + } + + pub fn to_i64(&self) -> Option { + match self { + Inner::I64(v) => Some(*v), + Inner::U64(v) => (*v).try_into().ok(), + Inner::I128(v) => (*v).try_into().ok(), + Inner::U128(v) => (*v).try_into().ok(), + _ => None, + } + } + + pub fn to_u64(&self) -> Option { + match self { + Inner::U64(v) => Some(*v), + Inner::I64(v) => (*v).try_into().ok(), + Inner::I128(v) => (*v).try_into().ok(), + Inner::U128(v) => (*v).try_into().ok(), + _ => None, + } + } + + pub fn to_u128(&self) -> Option { + match self { + Inner::U128(v) => Some(*v), + Inner::I64(v) => (*v).try_into().ok(), + Inner::U64(v) => (*v).try_into().ok(), + Inner::I128(v) => (*v).try_into().ok(), + _ => None, + } + } + + pub fn to_i128(&self) -> Option { + match self { + Inner::I128(v) => Some(*v), + Inner::I64(v) => (*v).try_into().ok(), + Inner::U64(v) => (*v).try_into().ok(), + Inner::U128(v) => (*v).try_into().ok(), + _ => None, + } + } + + pub fn to_borrowed_str(&self) -> Option<&'v str> { + match self { + Inner::Str(v) => Some(v), + _ => None, + } + } + + #[cfg(test)] + pub fn to_test_token(&self) -> Token { + match self { + Inner::None => Token::None, + Inner::Bool(v) => Token::Bool(*v), + Inner::Str(v) => Token::Str(*v), + Inner::Char(v) => Token::Char(*v), + Inner::I64(v) => Token::I64(*v), + Inner::U64(v) => Token::U64(*v), + Inner::F64(v) => Token::F64(*v), + Inner::I128(_) => unimplemented!(), + Inner::U128(_) => unimplemented!(), + Inner::Debug(_) => unimplemented!(), + Inner::Display(_) => unimplemented!(), + } + } + } + + #[cfg(test)] + #[derive(Debug, PartialEq)] + pub enum Token<'v> { + None, + Bool(bool), + Char(char), + Str(&'v str), + F64(f64), + I64(i64), + U64(u64), + } + + pub fn visit<'v>( + inner: &Inner<'v>, + mut visitor: impl VisitValue<'v>, + ) -> Result<(), crate::kv::Error> { + match inner { + Inner::None => visitor.visit_null(), + Inner::Bool(v) => visitor.visit_bool(*v), + Inner::Str(v) => visitor.visit_borrowed_str(*v), + Inner::Char(v) => visitor.visit_char(*v), + Inner::I64(v) => visitor.visit_i64(*v), + Inner::U64(v) => visitor.visit_u64(*v), + Inner::F64(v) => visitor.visit_f64(*v), + Inner::I128(v) => visitor.visit_i128(*v), + Inner::U128(v) => visitor.visit_u128(*v), + Inner::Debug(v) => visitor.visit_any(Value::from_dyn_debug(*v)), + Inner::Display(v) => visitor.visit_any(Value::from_dyn_display(*v)), + } + } +} + +impl<'v> Value<'v> { + /// Get a value from a type implementing `std::fmt::Debug`. + #[cfg(feature = "kv_unstable")] + #[deprecated(note = "use `from_debug` instead")] + pub fn capture_debug(value: &'v T) -> Self + where + T: fmt::Debug + 'static, + { + Value::from_debug(value) + } + + /// Get a value from a type implementing `std::fmt::Display`. + #[cfg(feature = "kv_unstable")] + #[deprecated(note = "use `from_display` instead")] + pub fn capture_display(value: &'v T) -> Self + where + T: fmt::Display + 'static, + { + Value::from_display(value) + } + + /// Get a value from an error. + #[cfg(feature = "kv_unstable_std")] + #[deprecated(note = "use `from_dyn_error` instead")] + pub fn capture_error(err: &'v T) -> Self + where + T: std::error::Error + 'static, + { + Value::from_dyn_error(err) + } + + /// Get a value from a type implementing `serde::Serialize`. + #[cfg(feature = "kv_unstable_serde")] + #[deprecated(note = "use `from_serde` instead")] + pub fn capture_serde(value: &'v T) -> Self + where + T: serde::Serialize + 'static, + { + Value::from_serde(value) + } + + /// Get a value from a type implementing `sval::Value`. + #[cfg(feature = "kv_unstable_sval")] + #[deprecated(note = "use `from_sval` instead")] + pub fn capture_sval(value: &'v T) -> Self + where + T: sval::Value + 'static, + { + Value::from_sval(value) + } + + /// Check whether this value can be downcast to `T`. + #[cfg(feature = "kv_unstable")] + #[deprecated( + note = "downcasting has been removed; log an issue at https://github.com/rust-lang/log/issues if this is something you rely on" + )] + pub fn is(&self) -> bool { + false + } + + /// Try downcast this value to `T`. + #[cfg(feature = "kv_unstable")] + #[deprecated( + note = "downcasting has been removed; log an issue at https://github.com/rust-lang/log/issues if this is something you rely on" + )] + pub fn downcast_ref(&self) -> Option<&T> { + None + } +} + +// NOTE: Deprecated; but aliases can't carry this attribute +#[cfg(feature = "kv_unstable")] +pub use VisitValue as Visit; + +/// Get a value from a type implementing `std::fmt::Debug`. +#[cfg(feature = "kv_unstable")] +#[deprecated(note = "use the `key:? = value` macro syntax instead")] +#[macro_export] +macro_rules! as_debug { + ($capture:expr) => { + $crate::kv::Value::from_debug(&$capture) + }; +} + +/// Get a value from a type implementing `std::fmt::Display`. +#[cfg(feature = "kv_unstable")] +#[deprecated(note = "use the `key:% = value` macro syntax instead")] +#[macro_export] +macro_rules! as_display { + ($capture:expr) => { + $crate::kv::Value::from_display(&$capture) + }; +} + +/// Get a value from an error. +#[cfg(feature = "kv_unstable_std")] +#[deprecated(note = "use the `key:err = value` macro syntax instead")] +#[macro_export] +macro_rules! as_error { + ($capture:expr) => { + $crate::kv::Value::from_dyn_error(&$capture) + }; +} + +#[cfg(feature = "kv_unstable_serde")] +#[deprecated(note = "use the `key:serde = value` macro syntax instead")] +/// Get a value from a type implementing `serde::Serialize`. +#[macro_export] +macro_rules! as_serde { + ($capture:expr) => { + $crate::kv::Value::from_serde(&$capture) + }; +} + +/// Get a value from a type implementing `sval::Value`. +#[cfg(feature = "kv_unstable_sval")] +#[deprecated(note = "use the `key:sval = value` macro syntax instead")] +#[macro_export] +macro_rules! as_sval { + ($capture:expr) => { + $crate::kv::Value::from_sval(&$capture) + }; +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + + impl<'v> Value<'v> { + pub(crate) fn to_token(&self) -> inner::Token { + self.inner.to_test_token() + } + } + + fn unsigned() -> impl Iterator> { + vec![ + Value::from(8u8), + Value::from(16u16), + Value::from(32u32), + Value::from(64u64), + Value::from(1usize), + Value::from(std::num::NonZeroU8::new(8).unwrap()), + Value::from(std::num::NonZeroU16::new(16).unwrap()), + Value::from(std::num::NonZeroU32::new(32).unwrap()), + Value::from(std::num::NonZeroU64::new(64).unwrap()), + Value::from(std::num::NonZeroUsize::new(1).unwrap()), + ] + .into_iter() + } + + fn signed() -> impl Iterator> { + vec![ + Value::from(-8i8), + Value::from(-16i16), + Value::from(-32i32), + Value::from(-64i64), + Value::from(-1isize), + Value::from(std::num::NonZeroI8::new(-8).unwrap()), + Value::from(std::num::NonZeroI16::new(-16).unwrap()), + Value::from(std::num::NonZeroI32::new(-32).unwrap()), + Value::from(std::num::NonZeroI64::new(-64).unwrap()), + Value::from(std::num::NonZeroIsize::new(-1).unwrap()), + ] + .into_iter() + } + + fn float() -> impl Iterator> { + vec![Value::from(32.32f32), Value::from(64.64f64)].into_iter() + } + + fn bool() -> impl Iterator> { + vec![Value::from(true), Value::from(false)].into_iter() + } + + fn str() -> impl Iterator> { + vec![Value::from("a string"), Value::from("a loong string")].into_iter() + } + + fn char() -> impl Iterator> { + vec![Value::from('a'), Value::from('⛰')].into_iter() + } + + #[test] + fn test_to_value_display() { + assert_eq!(42u64.to_value().to_string(), "42"); + assert_eq!(42i64.to_value().to_string(), "42"); + assert_eq!(42.01f64.to_value().to_string(), "42.01"); + assert_eq!(true.to_value().to_string(), "true"); + assert_eq!('a'.to_value().to_string(), "a"); + assert_eq!("a loong string".to_value().to_string(), "a loong string"); + assert_eq!(Some(true).to_value().to_string(), "true"); + assert_eq!(().to_value().to_string(), "None"); + assert_eq!(None::.to_value().to_string(), "None"); + } + + #[test] + fn test_to_value_structured() { + assert_eq!(42u64.to_value().to_token(), inner::Token::U64(42)); + assert_eq!(42i64.to_value().to_token(), inner::Token::I64(42)); + assert_eq!(42.01f64.to_value().to_token(), inner::Token::F64(42.01)); + assert_eq!(true.to_value().to_token(), inner::Token::Bool(true)); + assert_eq!('a'.to_value().to_token(), inner::Token::Char('a')); + assert_eq!( + "a loong string".to_value().to_token(), + inner::Token::Str("a loong string".into()) + ); + assert_eq!(Some(true).to_value().to_token(), inner::Token::Bool(true)); + assert_eq!(().to_value().to_token(), inner::Token::None); + assert_eq!(None::.to_value().to_token(), inner::Token::None); + } + + #[test] + fn test_to_number() { + for v in unsigned() { + assert!(v.to_u64().is_some()); + assert!(v.to_i64().is_some()); + } + + for v in signed() { + assert!(v.to_i64().is_some()); + } + + for v in unsigned().chain(signed()).chain(float()) { + assert!(v.to_f64().is_some()); + } + + for v in bool().chain(str()).chain(char()) { + assert!(v.to_u64().is_none()); + assert!(v.to_i64().is_none()); + assert!(v.to_f64().is_none()); + } + } + + #[test] + fn test_to_float() { + // Only integers from i32::MIN..=u32::MAX can be converted into floats + assert!(Value::from(i32::MIN).to_f64().is_some()); + assert!(Value::from(u32::MAX).to_f64().is_some()); + + assert!(Value::from((i32::MIN as i64) - 1).to_f64().is_none()); + assert!(Value::from((u32::MAX as u64) + 1).to_f64().is_none()); + } + + #[test] + fn test_to_cow_str() { + for v in str() { + assert!(v.to_borrowed_str().is_some()); + + #[cfg(feature = "kv_std")] + assert!(v.to_cow_str().is_some()); + } + + let short_lived = String::from("short lived"); + let v = Value::from(&*short_lived); + + assert!(v.to_borrowed_str().is_some()); + + #[cfg(feature = "kv_std")] + assert!(v.to_cow_str().is_some()); + + for v in unsigned().chain(signed()).chain(float()).chain(bool()) { + assert!(v.to_borrowed_str().is_none()); + + #[cfg(feature = "kv_std")] + assert!(v.to_cow_str().is_none()); + } + } + + #[test] + fn test_to_bool() { + for v in bool() { + assert!(v.to_bool().is_some()); + } + + for v in unsigned() + .chain(signed()) + .chain(float()) + .chain(str()) + .chain(char()) + { + assert!(v.to_bool().is_none()); + } + } + + #[test] + fn test_to_char() { + for v in char() { + assert!(v.to_char().is_some()); + } + + for v in unsigned() + .chain(signed()) + .chain(float()) + .chain(str()) + .chain(bool()) + { + assert!(v.to_char().is_none()); + } + } + + #[test] + fn test_visit_integer() { + struct Extract(Option); + + impl<'v> VisitValue<'v> for Extract { + fn visit_any(&mut self, value: Value) -> Result<(), Error> { + unimplemented!("unexpected value: {value:?}") + } + + fn visit_u64(&mut self, value: u64) -> Result<(), Error> { + self.0 = Some(value); + + Ok(()) + } + } + + let mut extract = Extract(None); + Value::from(42u64).visit(&mut extract).unwrap(); + + assert_eq!(Some(42), extract.0); + } + + #[test] + fn test_visit_borrowed_str() { + struct Extract<'v>(Option<&'v str>); + + impl<'v> VisitValue<'v> for Extract<'v> { + fn visit_any(&mut self, value: Value) -> Result<(), Error> { + unimplemented!("unexpected value: {value:?}") + } + + fn visit_borrowed_str(&mut self, value: &'v str) -> Result<(), Error> { + self.0 = Some(value); + + Ok(()) + } + } + + let mut extract = Extract(None); + + let short_lived = String::from("A short-lived string"); + Value::from(&*short_lived).visit(&mut extract).unwrap(); + + assert_eq!(Some("A short-lived string"), extract.0); + } +} diff --git a/vendor/log/src/lib.rs b/vendor/log/src/lib.rs new file mode 100644 index 00000000000000..47f2cf13276ab2 --- /dev/null +++ b/vendor/log/src/lib.rs @@ -0,0 +1,2005 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A lightweight logging facade. +//! +//! The `log` crate provides a single logging API that abstracts over the +//! actual logging implementation. Libraries can use the logging API provided +//! by this crate, and the consumer of those libraries can choose the logging +//! implementation that is most suitable for its use case. +//! +//! If no logging implementation is selected, the facade falls back to a "noop" +//! implementation that ignores all log messages. The overhead in this case +//! is very small - just an integer load, comparison and jump. +//! +//! A log request consists of a _target_, a _level_, and a _body_. A target is a +//! string which defaults to the module path of the location of the log request, +//! though that default may be overridden. Logger implementations typically use +//! the target to filter requests based on some user configuration. +//! +//! # Usage +//! +//! The basic use of the log crate is through the five logging macros: [`error!`], +//! [`warn!`], [`info!`], [`debug!`] and [`trace!`] +//! where `error!` represents the highest-priority log messages +//! and `trace!` the lowest. The log messages are filtered by configuring +//! the log level to exclude messages with a lower priority. +//! Each of these macros accept format strings similarly to [`println!`]. +//! +//! +//! [`error!`]: ./macro.error.html +//! [`warn!`]: ./macro.warn.html +//! [`info!`]: ./macro.info.html +//! [`debug!`]: ./macro.debug.html +//! [`trace!`]: ./macro.trace.html +//! [`println!`]: https://doc.rust-lang.org/stable/std/macro.println.html +//! +//! Avoid writing expressions with side-effects in log statements. They may not be evaluated. +//! +//! ## In libraries +//! +//! Libraries should link only to the `log` crate, and use the provided +//! macros to log whatever information will be useful to downstream consumers. +//! +//! ### Examples +//! +//! ``` +//! # #[derive(Debug)] pub struct Yak(String); +//! # impl Yak { fn shave(&mut self, _: u32) {} } +//! # fn find_a_razor() -> Result { Ok(1) } +//! use log::{info, warn}; +//! +//! pub fn shave_the_yak(yak: &mut Yak) { +//! info!(target: "yak_events", "Commencing yak shaving for {yak:?}"); +//! +//! loop { +//! match find_a_razor() { +//! Ok(razor) => { +//! info!("Razor located: {razor}"); +//! yak.shave(razor); +//! break; +//! } +//! Err(err) => { +//! warn!("Unable to locate a razor: {err}, retrying"); +//! } +//! } +//! } +//! } +//! # fn main() {} +//! ``` +//! +//! ## In executables +//! +//! Executables should choose a logging implementation and initialize it early in the +//! runtime of the program. Logging implementations will typically include a +//! function to do this. Any log messages generated before +//! the implementation is initialized will be ignored. +//! +//! The executable itself may use the `log` crate to log as well. +//! +//! ### Warning +//! +//! The logging system may only be initialized once. +//! +//! ## Structured logging +//! +//! If you enable the `kv` feature you can associate structured values +//! with your log records. If we take the example from before, we can include +//! some additional context besides what's in the formatted message: +//! +//! ``` +//! # use serde::Serialize; +//! # #[derive(Debug, Serialize)] pub struct Yak(String); +//! # impl Yak { fn shave(&mut self, _: u32) {} } +//! # fn find_a_razor() -> Result { Ok(1) } +//! # #[cfg(feature = "kv_serde")] +//! # fn main() { +//! use log::{info, warn}; +//! +//! pub fn shave_the_yak(yak: &mut Yak) { +//! info!(target: "yak_events", yak:serde; "Commencing yak shaving"); +//! +//! loop { +//! match find_a_razor() { +//! Ok(razor) => { +//! info!(razor; "Razor located"); +//! yak.shave(razor); +//! break; +//! } +//! Err(e) => { +//! warn!(e:err; "Unable to locate a razor, retrying"); +//! } +//! } +//! } +//! } +//! # } +//! # #[cfg(not(feature = "kv_serde"))] +//! # fn main() {} +//! ``` +//! +//! See the [`kv`] module documentation for more details. +//! +//! # Available logging implementations +//! +//! In order to produce log output executables have to use +//! a logger implementation compatible with the facade. +//! There are many available implementations to choose from, +//! here are some of the most popular ones: +//! +//! * Simple minimal loggers: +//! * [env_logger] +//! * [colog] +//! * [simple_logger] +//! * [simplelog] +//! * [pretty_env_logger] +//! * [stderrlog] +//! * [flexi_logger] +//! * [call_logger] +//! * [structured-logger] +//! * [clang_log] +//! * [ftail] +//! * Complex configurable frameworks: +//! * [log4rs] +//! * [logforth] +//! * [fern] +//! * [spdlog-rs] +//! * Adaptors for other facilities: +//! * [syslog] +//! * [slog-stdlog] +//! * [systemd-journal-logger] +//! * [android_log] +//! * [win_dbg_logger] +//! * [db_logger] +//! * [log-to-defmt] +//! * [logcontrol-log] +//! * For WebAssembly binaries: +//! * [console_log] +//! * For dynamic libraries: +//! * You may need to construct an FFI-safe wrapper over `log` to initialize in your libraries +//! * Utilities: +//! * [log_err] +//! * [log-reload] +//! +//! # Implementing a Logger +//! +//! Loggers implement the [`Log`] trait. Here's a very basic example that simply +//! logs all messages at the [`Error`][level_link], [`Warn`][level_link] or +//! [`Info`][level_link] levels to stdout: +//! +//! ``` +//! use log::{Record, Level, Metadata}; +//! +//! struct SimpleLogger; +//! +//! impl log::Log for SimpleLogger { +//! fn enabled(&self, metadata: &Metadata) -> bool { +//! metadata.level() <= Level::Info +//! } +//! +//! fn log(&self, record: &Record) { +//! if self.enabled(record.metadata()) { +//! println!("{} - {}", record.level(), record.args()); +//! } +//! } +//! +//! fn flush(&self) {} +//! } +//! +//! # fn main() {} +//! ``` +//! +//! Loggers are installed by calling the [`set_logger`] function. The maximum +//! log level also needs to be adjusted via the [`set_max_level`] function. The +//! logging facade uses this as an optimization to improve performance of log +//! messages at levels that are disabled. It's important to set it, as it +//! defaults to [`Off`][filter_link], so no log messages will ever be captured! +//! In the case of our example logger, we'll want to set the maximum log level +//! to [`Info`][filter_link], since we ignore any [`Debug`][level_link] or +//! [`Trace`][level_link] level log messages. A logging implementation should +//! provide a function that wraps a call to [`set_logger`] and +//! [`set_max_level`], handling initialization of the logger: +//! +//! ``` +//! # use log::{Level, Metadata}; +//! # struct SimpleLogger; +//! # impl log::Log for SimpleLogger { +//! # fn enabled(&self, _: &Metadata) -> bool { false } +//! # fn log(&self, _: &log::Record) {} +//! # fn flush(&self) {} +//! # } +//! # fn main() {} +//! use log::{SetLoggerError, LevelFilter}; +//! +//! static LOGGER: SimpleLogger = SimpleLogger; +//! +//! pub fn init() -> Result<(), SetLoggerError> { +//! log::set_logger(&LOGGER) +//! .map(|()| log::set_max_level(LevelFilter::Info)) +//! } +//! ``` +//! +//! Implementations that adjust their configurations at runtime should take care +//! to adjust the maximum log level as well. +//! +//! # Use with `std` +//! +//! `set_logger` requires you to provide a `&'static Log`, which can be hard to +//! obtain if your logger depends on some runtime configuration. The +//! `set_boxed_logger` function is available with the `std` Cargo feature. It is +//! identical to `set_logger` except that it takes a `Box` rather than a +//! `&'static Log`: +//! +//! ``` +//! # use log::{Level, LevelFilter, Log, SetLoggerError, Metadata}; +//! # struct SimpleLogger; +//! # impl log::Log for SimpleLogger { +//! # fn enabled(&self, _: &Metadata) -> bool { false } +//! # fn log(&self, _: &log::Record) {} +//! # fn flush(&self) {} +//! # } +//! # fn main() {} +//! # #[cfg(feature = "std")] +//! pub fn init() -> Result<(), SetLoggerError> { +//! log::set_boxed_logger(Box::new(SimpleLogger)) +//! .map(|()| log::set_max_level(LevelFilter::Info)) +//! } +//! ``` +//! +//! # Compile time filters +//! +//! Log levels can be statically disabled at compile time by enabling one of these Cargo features: +//! +//! * `max_level_off` +//! * `max_level_error` +//! * `max_level_warn` +//! * `max_level_info` +//! * `max_level_debug` +//! * `max_level_trace` +//! +//! Log invocations at disabled levels will be skipped and will not even be present in the +//! resulting binary. These features control the value of the `STATIC_MAX_LEVEL` constant. The +//! logging macros check this value before logging a message. By default, no levels are disabled. +//! +//! It is possible to override this level for release builds only with the following features: +//! +//! * `release_max_level_off` +//! * `release_max_level_error` +//! * `release_max_level_warn` +//! * `release_max_level_info` +//! * `release_max_level_debug` +//! * `release_max_level_trace` +//! +//! Libraries should avoid using the max level features because they're global and can't be changed +//! once they're set. +//! +//! For example, a crate can disable trace level logs in debug builds and trace, debug, and info +//! level logs in release builds with the following configuration: +//! +//! ```toml +//! [dependencies] +//! log = { version = "0.4", features = ["max_level_debug", "release_max_level_warn"] } +//! ``` +//! # Crate Feature Flags +//! +//! The following crate feature flags are available in addition to the filters. They are +//! configured in your `Cargo.toml`. +//! +//! * `std` allows use of `std` crate instead of the default `core`. Enables using `std::error` and +//! `set_boxed_logger` functionality. +//! * `serde` enables support for serialization and deserialization of `Level` and `LevelFilter`. +//! +//! ```toml +//! [dependencies] +//! log = { version = "0.4", features = ["std", "serde"] } +//! ``` +//! +//! # Version compatibility +//! +//! The 0.3 and 0.4 versions of the `log` crate are almost entirely compatible. Log messages +//! made using `log` 0.3 will forward transparently to a logger implementation using `log` 0.4. Log +//! messages made using `log` 0.4 will forward to a logger implementation using `log` 0.3, but the +//! module path and file name information associated with the message will unfortunately be lost. +//! +//! [`Log`]: trait.Log.html +//! [level_link]: enum.Level.html +//! [filter_link]: enum.LevelFilter.html +//! [`set_logger`]: fn.set_logger.html +//! [`set_max_level`]: fn.set_max_level.html +//! [`try_set_logger_raw`]: fn.try_set_logger_raw.html +//! [`shutdown_logger_raw`]: fn.shutdown_logger_raw.html +//! [env_logger]: https://docs.rs/env_logger/*/env_logger/ +//! [colog]: https://docs.rs/colog/*/colog/ +//! [simple_logger]: https://github.com/borntyping/rust-simple_logger +//! [simplelog]: https://github.com/drakulix/simplelog.rs +//! [pretty_env_logger]: https://docs.rs/pretty_env_logger/*/pretty_env_logger/ +//! [stderrlog]: https://docs.rs/stderrlog/*/stderrlog/ +//! [flexi_logger]: https://docs.rs/flexi_logger/*/flexi_logger/ +//! [call_logger]: https://docs.rs/call_logger/*/call_logger/ +//! [syslog]: https://docs.rs/syslog/*/syslog/ +//! [slog-stdlog]: https://docs.rs/slog-stdlog/*/slog_stdlog/ +//! [log4rs]: https://docs.rs/log4rs/*/log4rs/ +//! [logforth]: https://docs.rs/logforth/*/logforth/ +//! [fern]: https://docs.rs/fern/*/fern/ +//! [spdlog-rs]: https://docs.rs/spdlog-rs/*/spdlog/ +//! [systemd-journal-logger]: https://docs.rs/systemd-journal-logger/*/systemd_journal_logger/ +//! [android_log]: https://docs.rs/android_log/*/android_log/ +//! [win_dbg_logger]: https://docs.rs/win_dbg_logger/*/win_dbg_logger/ +//! [db_logger]: https://docs.rs/db_logger/*/db_logger/ +//! [log-to-defmt]: https://docs.rs/log-to-defmt/*/log_to_defmt/ +//! [console_log]: https://docs.rs/console_log/*/console_log/ +//! [structured-logger]: https://docs.rs/structured-logger/latest/structured_logger/ +//! [logcontrol-log]: https://docs.rs/logcontrol-log/*/logcontrol_log/ +//! [log_err]: https://docs.rs/log_err/*/log_err/ +//! [log-reload]: https://docs.rs/log-reload/*/log_reload/ +//! [clang_log]: https://docs.rs/clang_log/latest/clang_log +//! [ftail]: https://docs.rs/ftail/latest/ftail + +#![doc( + html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://www.rust-lang.org/favicon.ico", + html_root_url = "https://docs.rs/log/0.4.28" +)] +#![warn(missing_docs)] +#![deny(missing_debug_implementations, unconditional_recursion)] +#![cfg_attr(all(not(feature = "std"), not(test)), no_std)] + +#[cfg(any( + all(feature = "max_level_off", feature = "max_level_error"), + all(feature = "max_level_off", feature = "max_level_warn"), + all(feature = "max_level_off", feature = "max_level_info"), + all(feature = "max_level_off", feature = "max_level_debug"), + all(feature = "max_level_off", feature = "max_level_trace"), + all(feature = "max_level_error", feature = "max_level_warn"), + all(feature = "max_level_error", feature = "max_level_info"), + all(feature = "max_level_error", feature = "max_level_debug"), + all(feature = "max_level_error", feature = "max_level_trace"), + all(feature = "max_level_warn", feature = "max_level_info"), + all(feature = "max_level_warn", feature = "max_level_debug"), + all(feature = "max_level_warn", feature = "max_level_trace"), + all(feature = "max_level_info", feature = "max_level_debug"), + all(feature = "max_level_info", feature = "max_level_trace"), + all(feature = "max_level_debug", feature = "max_level_trace"), +))] +compile_error!("multiple max_level_* features set"); + +#[rustfmt::skip] +#[cfg(any( + all(feature = "release_max_level_off", feature = "release_max_level_error"), + all(feature = "release_max_level_off", feature = "release_max_level_warn"), + all(feature = "release_max_level_off", feature = "release_max_level_info"), + all(feature = "release_max_level_off", feature = "release_max_level_debug"), + all(feature = "release_max_level_off", feature = "release_max_level_trace"), + all(feature = "release_max_level_error", feature = "release_max_level_warn"), + all(feature = "release_max_level_error", feature = "release_max_level_info"), + all(feature = "release_max_level_error", feature = "release_max_level_debug"), + all(feature = "release_max_level_error", feature = "release_max_level_trace"), + all(feature = "release_max_level_warn", feature = "release_max_level_info"), + all(feature = "release_max_level_warn", feature = "release_max_level_debug"), + all(feature = "release_max_level_warn", feature = "release_max_level_trace"), + all(feature = "release_max_level_info", feature = "release_max_level_debug"), + all(feature = "release_max_level_info", feature = "release_max_level_trace"), + all(feature = "release_max_level_debug", feature = "release_max_level_trace"), +))] +compile_error!("multiple release_max_level_* features set"); + +#[cfg(all(not(feature = "std"), not(test)))] +extern crate core as std; + +use std::cfg; +#[cfg(feature = "std")] +use std::error; +use std::str::FromStr; +use std::{cmp, fmt, mem}; + +#[macro_use] +mod macros; +mod serde; + +#[cfg(feature = "kv")] +pub mod kv; + +#[cfg(target_has_atomic = "ptr")] +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[cfg(not(target_has_atomic = "ptr"))] +use std::cell::Cell; +#[cfg(not(target_has_atomic = "ptr"))] +use std::sync::atomic::Ordering; + +#[cfg(not(target_has_atomic = "ptr"))] +struct AtomicUsize { + v: Cell, +} + +#[cfg(not(target_has_atomic = "ptr"))] +impl AtomicUsize { + const fn new(v: usize) -> AtomicUsize { + AtomicUsize { v: Cell::new(v) } + } + + fn load(&self, _order: Ordering) -> usize { + self.v.get() + } + + fn store(&self, val: usize, _order: Ordering) { + self.v.set(val) + } +} + +// Any platform without atomics is unlikely to have multiple cores, so +// writing via Cell will not be a race condition. +#[cfg(not(target_has_atomic = "ptr"))] +unsafe impl Sync for AtomicUsize {} + +// The LOGGER static holds a pointer to the global logger. It is protected by +// the STATE static which determines whether LOGGER has been initialized yet. +static mut LOGGER: &dyn Log = &NopLogger; + +static STATE: AtomicUsize = AtomicUsize::new(0); + +// There are three different states that we care about: the logger's +// uninitialized, the logger's initializing (set_logger's been called but +// LOGGER hasn't actually been set yet), or the logger's active. +const UNINITIALIZED: usize = 0; +const INITIALIZING: usize = 1; +const INITIALIZED: usize = 2; + +static MAX_LOG_LEVEL_FILTER: AtomicUsize = AtomicUsize::new(0); + +static LOG_LEVEL_NAMES: [&str; 6] = ["OFF", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"]; + +static SET_LOGGER_ERROR: &str = "attempted to set a logger after the logging system \ + was already initialized"; +static LEVEL_PARSE_ERROR: &str = + "attempted to convert a string that doesn't match an existing log level"; + +/// An enum representing the available verbosity levels of the logger. +/// +/// Typical usage includes: checking if a certain `Level` is enabled with +/// [`log_enabled!`](macro.log_enabled.html), specifying the `Level` of +/// [`log!`](macro.log.html), and comparing a `Level` directly to a +/// [`LevelFilter`](enum.LevelFilter.html). +#[repr(usize)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub enum Level { + /// The "error" level. + /// + /// Designates very serious errors. + // This way these line up with the discriminants for LevelFilter below + // This works because Rust treats field-less enums the same way as C does: + // https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-field-less-enumerations + Error = 1, + /// The "warn" level. + /// + /// Designates hazardous situations. + Warn, + /// The "info" level. + /// + /// Designates useful information. + Info, + /// The "debug" level. + /// + /// Designates lower priority information. + Debug, + /// The "trace" level. + /// + /// Designates very low priority, often extremely verbose, information. + Trace, +} + +impl PartialEq for Level { + #[inline] + fn eq(&self, other: &LevelFilter) -> bool { + *self as usize == *other as usize + } +} + +impl PartialOrd for Level { + #[inline] + fn partial_cmp(&self, other: &LevelFilter) -> Option { + Some((*self as usize).cmp(&(*other as usize))) + } +} + +impl FromStr for Level { + type Err = ParseLevelError; + fn from_str(level: &str) -> Result { + LOG_LEVEL_NAMES + .iter() + .position(|&name| name.eq_ignore_ascii_case(level)) + .into_iter() + .filter(|&idx| idx != 0) + .map(|idx| Level::from_usize(idx).unwrap()) + .next() + .ok_or(ParseLevelError(())) + } +} + +impl fmt::Display for Level { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.pad(self.as_str()) + } +} + +impl Level { + fn from_usize(u: usize) -> Option { + match u { + 1 => Some(Level::Error), + 2 => Some(Level::Warn), + 3 => Some(Level::Info), + 4 => Some(Level::Debug), + 5 => Some(Level::Trace), + _ => None, + } + } + + /// Returns the most verbose logging level. + #[inline] + pub fn max() -> Level { + Level::Trace + } + + /// Converts the `Level` to the equivalent `LevelFilter`. + #[inline] + pub fn to_level_filter(&self) -> LevelFilter { + LevelFilter::from_usize(*self as usize).unwrap() + } + + /// Returns the string representation of the `Level`. + /// + /// This returns the same string as the `fmt::Display` implementation. + pub fn as_str(&self) -> &'static str { + LOG_LEVEL_NAMES[*self as usize] + } + + /// Iterate through all supported logging levels. + /// + /// The order of iteration is from more severe to less severe log messages. + /// + /// # Examples + /// + /// ``` + /// use log::Level; + /// + /// let mut levels = Level::iter(); + /// + /// assert_eq!(Some(Level::Error), levels.next()); + /// assert_eq!(Some(Level::Trace), levels.last()); + /// ``` + pub fn iter() -> impl Iterator { + (1..6).map(|i| Self::from_usize(i).unwrap()) + } + + /// Get the next-highest `Level` from this one. + /// + /// If the current `Level` is at the highest level, the returned `Level` will be the same as the + /// current one. + /// + /// # Examples + /// + /// ``` + /// use log::Level; + /// + /// let level = Level::Info; + /// + /// assert_eq!(Level::Debug, level.increment_severity()); + /// assert_eq!(Level::Trace, level.increment_severity().increment_severity()); + /// assert_eq!(Level::Trace, level.increment_severity().increment_severity().increment_severity()); // max level + /// ``` + pub fn increment_severity(&self) -> Self { + let current = *self as usize; + Self::from_usize(current + 1).unwrap_or(*self) + } + + /// Get the next-lowest `Level` from this one. + /// + /// If the current `Level` is at the lowest level, the returned `Level` will be the same as the + /// current one. + /// + /// # Examples + /// + /// ``` + /// use log::Level; + /// + /// let level = Level::Info; + /// + /// assert_eq!(Level::Warn, level.decrement_severity()); + /// assert_eq!(Level::Error, level.decrement_severity().decrement_severity()); + /// assert_eq!(Level::Error, level.decrement_severity().decrement_severity().decrement_severity()); // min level + /// ``` + pub fn decrement_severity(&self) -> Self { + let current = *self as usize; + Self::from_usize(current.saturating_sub(1)).unwrap_or(*self) + } +} + +/// An enum representing the available verbosity level filters of the logger. +/// +/// A `LevelFilter` may be compared directly to a [`Level`]. Use this type +/// to get and set the maximum log level with [`max_level()`] and [`set_max_level`]. +/// +/// [`Level`]: enum.Level.html +/// [`max_level()`]: fn.max_level.html +/// [`set_max_level`]: fn.set_max_level.html +#[repr(usize)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub enum LevelFilter { + /// A level lower than all log levels. + Off, + /// Corresponds to the `Error` log level. + Error, + /// Corresponds to the `Warn` log level. + Warn, + /// Corresponds to the `Info` log level. + Info, + /// Corresponds to the `Debug` log level. + Debug, + /// Corresponds to the `Trace` log level. + Trace, +} + +impl PartialEq for LevelFilter { + #[inline] + fn eq(&self, other: &Level) -> bool { + other.eq(self) + } +} + +impl PartialOrd for LevelFilter { + #[inline] + fn partial_cmp(&self, other: &Level) -> Option { + Some((*self as usize).cmp(&(*other as usize))) + } +} + +impl FromStr for LevelFilter { + type Err = ParseLevelError; + fn from_str(level: &str) -> Result { + LOG_LEVEL_NAMES + .iter() + .position(|&name| name.eq_ignore_ascii_case(level)) + .map(|p| LevelFilter::from_usize(p).unwrap()) + .ok_or(ParseLevelError(())) + } +} + +impl fmt::Display for LevelFilter { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.pad(self.as_str()) + } +} + +impl LevelFilter { + fn from_usize(u: usize) -> Option { + match u { + 0 => Some(LevelFilter::Off), + 1 => Some(LevelFilter::Error), + 2 => Some(LevelFilter::Warn), + 3 => Some(LevelFilter::Info), + 4 => Some(LevelFilter::Debug), + 5 => Some(LevelFilter::Trace), + _ => None, + } + } + + /// Returns the most verbose logging level filter. + #[inline] + pub fn max() -> LevelFilter { + LevelFilter::Trace + } + + /// Converts `self` to the equivalent `Level`. + /// + /// Returns `None` if `self` is `LevelFilter::Off`. + #[inline] + pub fn to_level(&self) -> Option { + Level::from_usize(*self as usize) + } + + /// Returns the string representation of the `LevelFilter`. + /// + /// This returns the same string as the `fmt::Display` implementation. + pub fn as_str(&self) -> &'static str { + LOG_LEVEL_NAMES[*self as usize] + } + + /// Iterate through all supported filtering levels. + /// + /// The order of iteration is from less to more verbose filtering. + /// + /// # Examples + /// + /// ``` + /// use log::LevelFilter; + /// + /// let mut levels = LevelFilter::iter(); + /// + /// assert_eq!(Some(LevelFilter::Off), levels.next()); + /// assert_eq!(Some(LevelFilter::Trace), levels.last()); + /// ``` + pub fn iter() -> impl Iterator { + (0..6).map(|i| Self::from_usize(i).unwrap()) + } + + /// Get the next-highest `LevelFilter` from this one. + /// + /// If the current `LevelFilter` is at the highest level, the returned `LevelFilter` will be the + /// same as the current one. + /// + /// # Examples + /// + /// ``` + /// use log::LevelFilter; + /// + /// let level_filter = LevelFilter::Info; + /// + /// assert_eq!(LevelFilter::Debug, level_filter.increment_severity()); + /// assert_eq!(LevelFilter::Trace, level_filter.increment_severity().increment_severity()); + /// assert_eq!(LevelFilter::Trace, level_filter.increment_severity().increment_severity().increment_severity()); // max level + /// ``` + pub fn increment_severity(&self) -> Self { + let current = *self as usize; + Self::from_usize(current + 1).unwrap_or(*self) + } + + /// Get the next-lowest `LevelFilter` from this one. + /// + /// If the current `LevelFilter` is at the lowest level, the returned `LevelFilter` will be the + /// same as the current one. + /// + /// # Examples + /// + /// ``` + /// use log::LevelFilter; + /// + /// let level_filter = LevelFilter::Info; + /// + /// assert_eq!(LevelFilter::Warn, level_filter.decrement_severity()); + /// assert_eq!(LevelFilter::Error, level_filter.decrement_severity().decrement_severity()); + /// assert_eq!(LevelFilter::Off, level_filter.decrement_severity().decrement_severity().decrement_severity()); + /// assert_eq!(LevelFilter::Off, level_filter.decrement_severity().decrement_severity().decrement_severity().decrement_severity()); // min level + /// ``` + pub fn decrement_severity(&self) -> Self { + let current = *self as usize; + Self::from_usize(current.saturating_sub(1)).unwrap_or(*self) + } +} + +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] +enum MaybeStaticStr<'a> { + Static(&'static str), + Borrowed(&'a str), +} + +impl<'a> MaybeStaticStr<'a> { + #[inline] + fn get(&self) -> &'a str { + match *self { + MaybeStaticStr::Static(s) => s, + MaybeStaticStr::Borrowed(s) => s, + } + } +} + +/// The "payload" of a log message. +/// +/// # Use +/// +/// `Record` structures are passed as parameters to the [`log`][method.log] +/// method of the [`Log`] trait. Logger implementors manipulate these +/// structures in order to display log messages. `Record`s are automatically +/// created by the [`log!`] macro and so are not seen by log users. +/// +/// Note that the [`level()`] and [`target()`] accessors are equivalent to +/// `self.metadata().level()` and `self.metadata().target()` respectively. +/// These methods are provided as a convenience for users of this structure. +/// +/// # Example +/// +/// The following example shows a simple logger that displays the level, +/// module path, and message of any `Record` that is passed to it. +/// +/// ``` +/// struct SimpleLogger; +/// +/// impl log::Log for SimpleLogger { +/// fn enabled(&self, _metadata: &log::Metadata) -> bool { +/// true +/// } +/// +/// fn log(&self, record: &log::Record) { +/// if !self.enabled(record.metadata()) { +/// return; +/// } +/// +/// println!("{}:{} -- {}", +/// record.level(), +/// record.target(), +/// record.args()); +/// } +/// fn flush(&self) {} +/// } +/// ``` +/// +/// [method.log]: trait.Log.html#tymethod.log +/// [`Log`]: trait.Log.html +/// [`log!`]: macro.log.html +/// [`level()`]: struct.Record.html#method.level +/// [`target()`]: struct.Record.html#method.target +#[derive(Clone, Debug)] +pub struct Record<'a> { + metadata: Metadata<'a>, + args: fmt::Arguments<'a>, + module_path: Option>, + file: Option>, + line: Option, + #[cfg(feature = "kv")] + key_values: KeyValues<'a>, +} + +// This wrapper type is only needed so we can +// `#[derive(Debug)]` on `Record`. It also +// provides a useful `Debug` implementation for +// the underlying `Source`. +#[cfg(feature = "kv")] +#[derive(Clone)] +struct KeyValues<'a>(&'a dyn kv::Source); + +#[cfg(feature = "kv")] +impl<'a> fmt::Debug for KeyValues<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut visitor = f.debug_map(); + self.0.visit(&mut visitor).map_err(|_| fmt::Error)?; + visitor.finish() + } +} + +impl<'a> Record<'a> { + /// Returns a new builder. + #[inline] + pub fn builder() -> RecordBuilder<'a> { + RecordBuilder::new() + } + + /// The message body. + #[inline] + pub fn args(&self) -> &fmt::Arguments<'a> { + &self.args + } + + /// Metadata about the log directive. + #[inline] + pub fn metadata(&self) -> &Metadata<'a> { + &self.metadata + } + + /// The verbosity level of the message. + #[inline] + pub fn level(&self) -> Level { + self.metadata.level() + } + + /// The name of the target of the directive. + #[inline] + pub fn target(&self) -> &'a str { + self.metadata.target() + } + + /// The module path of the message. + #[inline] + pub fn module_path(&self) -> Option<&'a str> { + self.module_path.map(|s| s.get()) + } + + /// The module path of the message, if it is a `'static` string. + #[inline] + pub fn module_path_static(&self) -> Option<&'static str> { + match self.module_path { + Some(MaybeStaticStr::Static(s)) => Some(s), + _ => None, + } + } + + /// The source file containing the message. + #[inline] + pub fn file(&self) -> Option<&'a str> { + self.file.map(|s| s.get()) + } + + /// The source file containing the message, if it is a `'static` string. + #[inline] + pub fn file_static(&self) -> Option<&'static str> { + match self.file { + Some(MaybeStaticStr::Static(s)) => Some(s), + _ => None, + } + } + + /// The line containing the message. + #[inline] + pub fn line(&self) -> Option { + self.line + } + + /// The structured key-value pairs associated with the message. + #[cfg(feature = "kv")] + #[inline] + pub fn key_values(&self) -> &dyn kv::Source { + self.key_values.0 + } + + /// Create a new [`RecordBuilder`](struct.RecordBuilder.html) based on this record. + #[cfg(feature = "kv")] + #[inline] + pub fn to_builder(&self) -> RecordBuilder { + RecordBuilder { + record: Record { + metadata: Metadata { + level: self.metadata.level, + target: self.metadata.target, + }, + args: self.args, + module_path: self.module_path, + file: self.file, + line: self.line, + key_values: self.key_values.clone(), + }, + } + } +} + +/// Builder for [`Record`](struct.Record.html). +/// +/// Typically should only be used by log library creators or for testing and "shim loggers". +/// The `RecordBuilder` can set the different parameters of `Record` object, and returns +/// the created object when `build` is called. +/// +/// # Examples +/// +/// ``` +/// use log::{Level, Record}; +/// +/// let record = Record::builder() +/// .args(format_args!("Error!")) +/// .level(Level::Error) +/// .target("myApp") +/// .file(Some("server.rs")) +/// .line(Some(144)) +/// .module_path(Some("server")) +/// .build(); +/// ``` +/// +/// Alternatively, use [`MetadataBuilder`](struct.MetadataBuilder.html): +/// +/// ``` +/// use log::{Record, Level, MetadataBuilder}; +/// +/// let error_metadata = MetadataBuilder::new() +/// .target("myApp") +/// .level(Level::Error) +/// .build(); +/// +/// let record = Record::builder() +/// .metadata(error_metadata) +/// .args(format_args!("Error!")) +/// .line(Some(433)) +/// .file(Some("app.rs")) +/// .module_path(Some("server")) +/// .build(); +/// ``` +#[derive(Debug)] +pub struct RecordBuilder<'a> { + record: Record<'a>, +} + +impl<'a> RecordBuilder<'a> { + /// Construct new `RecordBuilder`. + /// + /// The default options are: + /// + /// - `args`: [`format_args!("")`] + /// - `metadata`: [`Metadata::builder().build()`] + /// - `module_path`: `None` + /// - `file`: `None` + /// - `line`: `None` + /// + /// [`format_args!("")`]: https://doc.rust-lang.org/std/macro.format_args.html + /// [`Metadata::builder().build()`]: struct.MetadataBuilder.html#method.build + #[inline] + pub fn new() -> RecordBuilder<'a> { + RecordBuilder { + record: Record { + args: format_args!(""), + metadata: Metadata::builder().build(), + module_path: None, + file: None, + line: None, + #[cfg(feature = "kv")] + key_values: KeyValues(&None::<(kv::Key, kv::Value)>), + }, + } + } + + /// Set [`args`](struct.Record.html#method.args). + #[inline] + pub fn args(&mut self, args: fmt::Arguments<'a>) -> &mut RecordBuilder<'a> { + self.record.args = args; + self + } + + /// Set [`metadata`](struct.Record.html#method.metadata). Construct a `Metadata` object with [`MetadataBuilder`](struct.MetadataBuilder.html). + #[inline] + pub fn metadata(&mut self, metadata: Metadata<'a>) -> &mut RecordBuilder<'a> { + self.record.metadata = metadata; + self + } + + /// Set [`Metadata::level`](struct.Metadata.html#method.level). + #[inline] + pub fn level(&mut self, level: Level) -> &mut RecordBuilder<'a> { + self.record.metadata.level = level; + self + } + + /// Set [`Metadata::target`](struct.Metadata.html#method.target) + #[inline] + pub fn target(&mut self, target: &'a str) -> &mut RecordBuilder<'a> { + self.record.metadata.target = target; + self + } + + /// Set [`module_path`](struct.Record.html#method.module_path) + #[inline] + pub fn module_path(&mut self, path: Option<&'a str>) -> &mut RecordBuilder<'a> { + self.record.module_path = path.map(MaybeStaticStr::Borrowed); + self + } + + /// Set [`module_path`](struct.Record.html#method.module_path) to a `'static` string + #[inline] + pub fn module_path_static(&mut self, path: Option<&'static str>) -> &mut RecordBuilder<'a> { + self.record.module_path = path.map(MaybeStaticStr::Static); + self + } + + /// Set [`file`](struct.Record.html#method.file) + #[inline] + pub fn file(&mut self, file: Option<&'a str>) -> &mut RecordBuilder<'a> { + self.record.file = file.map(MaybeStaticStr::Borrowed); + self + } + + /// Set [`file`](struct.Record.html#method.file) to a `'static` string. + #[inline] + pub fn file_static(&mut self, file: Option<&'static str>) -> &mut RecordBuilder<'a> { + self.record.file = file.map(MaybeStaticStr::Static); + self + } + + /// Set [`line`](struct.Record.html#method.line) + #[inline] + pub fn line(&mut self, line: Option) -> &mut RecordBuilder<'a> { + self.record.line = line; + self + } + + /// Set [`key_values`](struct.Record.html#method.key_values) + #[cfg(feature = "kv")] + #[inline] + pub fn key_values(&mut self, kvs: &'a dyn kv::Source) -> &mut RecordBuilder<'a> { + self.record.key_values = KeyValues(kvs); + self + } + + /// Invoke the builder and return a `Record` + #[inline] + pub fn build(&self) -> Record<'a> { + self.record.clone() + } +} + +impl Default for RecordBuilder<'_> { + fn default() -> Self { + Self::new() + } +} + +/// Metadata about a log message. +/// +/// # Use +/// +/// `Metadata` structs are created when users of the library use +/// logging macros. +/// +/// They are consumed by implementations of the `Log` trait in the +/// `enabled` method. +/// +/// `Record`s use `Metadata` to determine the log message's severity +/// and target. +/// +/// Users should use the `log_enabled!` macro in their code to avoid +/// constructing expensive log messages. +/// +/// # Examples +/// +/// ``` +/// use log::{Record, Level, Metadata}; +/// +/// struct MyLogger; +/// +/// impl log::Log for MyLogger { +/// fn enabled(&self, metadata: &Metadata) -> bool { +/// metadata.level() <= Level::Info +/// } +/// +/// fn log(&self, record: &Record) { +/// if self.enabled(record.metadata()) { +/// println!("{} - {}", record.level(), record.args()); +/// } +/// } +/// fn flush(&self) {} +/// } +/// +/// # fn main(){} +/// ``` +#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] +pub struct Metadata<'a> { + level: Level, + target: &'a str, +} + +impl<'a> Metadata<'a> { + /// Returns a new builder. + #[inline] + pub fn builder() -> MetadataBuilder<'a> { + MetadataBuilder::new() + } + + /// The verbosity level of the message. + #[inline] + pub fn level(&self) -> Level { + self.level + } + + /// The name of the target of the directive. + #[inline] + pub fn target(&self) -> &'a str { + self.target + } +} + +/// Builder for [`Metadata`](struct.Metadata.html). +/// +/// Typically should only be used by log library creators or for testing and "shim loggers". +/// The `MetadataBuilder` can set the different parameters of a `Metadata` object, and returns +/// the created object when `build` is called. +/// +/// # Example +/// +/// ``` +/// let target = "myApp"; +/// use log::{Level, MetadataBuilder}; +/// let metadata = MetadataBuilder::new() +/// .level(Level::Debug) +/// .target(target) +/// .build(); +/// ``` +#[derive(Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] +pub struct MetadataBuilder<'a> { + metadata: Metadata<'a>, +} + +impl<'a> MetadataBuilder<'a> { + /// Construct a new `MetadataBuilder`. + /// + /// The default options are: + /// + /// - `level`: `Level::Info` + /// - `target`: `""` + #[inline] + pub fn new() -> MetadataBuilder<'a> { + MetadataBuilder { + metadata: Metadata { + level: Level::Info, + target: "", + }, + } + } + + /// Setter for [`level`](struct.Metadata.html#method.level). + #[inline] + pub fn level(&mut self, arg: Level) -> &mut MetadataBuilder<'a> { + self.metadata.level = arg; + self + } + + /// Setter for [`target`](struct.Metadata.html#method.target). + #[inline] + pub fn target(&mut self, target: &'a str) -> &mut MetadataBuilder<'a> { + self.metadata.target = target; + self + } + + /// Returns a `Metadata` object. + #[inline] + pub fn build(&self) -> Metadata<'a> { + self.metadata.clone() + } +} + +impl Default for MetadataBuilder<'_> { + fn default() -> Self { + Self::new() + } +} + +/// A trait encapsulating the operations required of a logger. +pub trait Log: Sync + Send { + /// Determines if a log message with the specified metadata would be + /// logged. + /// + /// This is used by the `log_enabled!` macro to allow callers to avoid + /// expensive computation of log message arguments if the message would be + /// discarded anyway. + /// + /// # For implementors + /// + /// This method isn't called automatically by the `log!` macros. + /// It's up to an implementation of the `Log` trait to call `enabled` in its own + /// `log` method implementation to guarantee that filtering is applied. + fn enabled(&self, metadata: &Metadata) -> bool; + + /// Logs the `Record`. + /// + /// # For implementors + /// + /// Note that `enabled` is *not* necessarily called before this method. + /// Implementations of `log` should perform all necessary filtering + /// internally. + fn log(&self, record: &Record); + + /// Flushes any buffered records. + /// + /// # For implementors + /// + /// This method isn't called automatically by the `log!` macros. + /// It can be called manually on shut-down to ensure any in-flight records are flushed. + fn flush(&self); +} + +/// A dummy initial value for LOGGER. +struct NopLogger; + +impl Log for NopLogger { + fn enabled(&self, _: &Metadata) -> bool { + false + } + + fn log(&self, _: &Record) {} + fn flush(&self) {} +} + +impl Log for &'_ T +where + T: ?Sized + Log, +{ + fn enabled(&self, metadata: &Metadata) -> bool { + (**self).enabled(metadata) + } + + fn log(&self, record: &Record) { + (**self).log(record); + } + fn flush(&self) { + (**self).flush(); + } +} + +#[cfg(feature = "std")] +impl Log for std::boxed::Box +where + T: ?Sized + Log, +{ + fn enabled(&self, metadata: &Metadata) -> bool { + self.as_ref().enabled(metadata) + } + + fn log(&self, record: &Record) { + self.as_ref().log(record); + } + fn flush(&self) { + self.as_ref().flush(); + } +} + +#[cfg(feature = "std")] +impl Log for std::sync::Arc +where + T: ?Sized + Log, +{ + fn enabled(&self, metadata: &Metadata) -> bool { + self.as_ref().enabled(metadata) + } + + fn log(&self, record: &Record) { + self.as_ref().log(record); + } + fn flush(&self) { + self.as_ref().flush(); + } +} + +/// Sets the global maximum log level. +/// +/// Generally, this should only be called by the active logging implementation. +/// +/// Note that `Trace` is the maximum level, because it provides the maximum amount of detail in the emitted logs. +#[inline] +#[cfg(target_has_atomic = "ptr")] +pub fn set_max_level(level: LevelFilter) { + MAX_LOG_LEVEL_FILTER.store(level as usize, Ordering::Relaxed); +} + +/// A thread-unsafe version of [`set_max_level`]. +/// +/// This function is available on all platforms, even those that do not have +/// support for atomics that is needed by [`set_max_level`]. +/// +/// In almost all cases, [`set_max_level`] should be preferred. +/// +/// # Safety +/// +/// This function is only safe to call when it cannot race with any other +/// calls to `set_max_level` or `set_max_level_racy`. +/// +/// This can be upheld by (for example) making sure that **there are no other +/// threads**, and (on embedded) that **interrupts are disabled**. +/// +/// It is safe to use all other logging functions while this function runs +/// (including all logging macros). +/// +/// [`set_max_level`]: fn.set_max_level.html +#[inline] +pub unsafe fn set_max_level_racy(level: LevelFilter) { + // `MAX_LOG_LEVEL_FILTER` uses a `Cell` as the underlying primitive when a + // platform doesn't support `target_has_atomic = "ptr"`, so even though this looks the same + // as `set_max_level` it may have different safety properties. + MAX_LOG_LEVEL_FILTER.store(level as usize, Ordering::Relaxed); +} + +/// Returns the current maximum log level. +/// +/// The [`log!`], [`error!`], [`warn!`], [`info!`], [`debug!`], and [`trace!`] macros check +/// this value and discard any message logged at a higher level. The maximum +/// log level is set by the [`set_max_level`] function. +/// +/// [`log!`]: macro.log.html +/// [`error!`]: macro.error.html +/// [`warn!`]: macro.warn.html +/// [`info!`]: macro.info.html +/// [`debug!`]: macro.debug.html +/// [`trace!`]: macro.trace.html +/// [`set_max_level`]: fn.set_max_level.html +#[inline(always)] +pub fn max_level() -> LevelFilter { + // Since `LevelFilter` is `repr(usize)`, + // this transmute is sound if and only if `MAX_LOG_LEVEL_FILTER` + // is set to a usize that is a valid discriminant for `LevelFilter`. + // Since `MAX_LOG_LEVEL_FILTER` is private, the only time it's set + // is by `set_max_level` above, i.e. by casting a `LevelFilter` to `usize`. + // So any usize stored in `MAX_LOG_LEVEL_FILTER` is a valid discriminant. + unsafe { mem::transmute(MAX_LOG_LEVEL_FILTER.load(Ordering::Relaxed)) } +} + +/// Sets the global logger to a `Box`. +/// +/// This is a simple convenience wrapper over `set_logger`, which takes a +/// `Box` rather than a `&'static Log`. See the documentation for +/// [`set_logger`] for more details. +/// +/// Requires the `std` feature. +/// +/// # Errors +/// +/// An error is returned if a logger has already been set. +/// +/// [`set_logger`]: fn.set_logger.html +#[cfg(all(feature = "std", target_has_atomic = "ptr"))] +pub fn set_boxed_logger(logger: Box) -> Result<(), SetLoggerError> { + set_logger_inner(|| Box::leak(logger)) +} + +/// Sets the global logger to a `&'static Log`. +/// +/// This function may only be called once in the lifetime of a program. Any log +/// events that occur before the call to `set_logger` completes will be ignored. +/// +/// This function does not typically need to be called manually. Logger +/// implementations should provide an initialization method that installs the +/// logger internally. +/// +/// # Availability +/// +/// This method is available even when the `std` feature is disabled. However, +/// it is currently unavailable on `thumbv6` targets, which lack support for +/// some atomic operations which are used by this function. Even on those +/// targets, [`set_logger_racy`] will be available. +/// +/// # Errors +/// +/// An error is returned if a logger has already been set. +/// +/// # Examples +/// +/// ``` +/// use log::{error, info, warn, Record, Level, Metadata, LevelFilter}; +/// +/// static MY_LOGGER: MyLogger = MyLogger; +/// +/// struct MyLogger; +/// +/// impl log::Log for MyLogger { +/// fn enabled(&self, metadata: &Metadata) -> bool { +/// metadata.level() <= Level::Info +/// } +/// +/// fn log(&self, record: &Record) { +/// if self.enabled(record.metadata()) { +/// println!("{} - {}", record.level(), record.args()); +/// } +/// } +/// fn flush(&self) {} +/// } +/// +/// # fn main(){ +/// log::set_logger(&MY_LOGGER).unwrap(); +/// log::set_max_level(LevelFilter::Info); +/// +/// info!("hello log"); +/// warn!("warning"); +/// error!("oops"); +/// # } +/// ``` +/// +/// [`set_logger_racy`]: fn.set_logger_racy.html +#[cfg(target_has_atomic = "ptr")] +pub fn set_logger(logger: &'static dyn Log) -> Result<(), SetLoggerError> { + set_logger_inner(|| logger) +} + +#[cfg(target_has_atomic = "ptr")] +fn set_logger_inner(make_logger: F) -> Result<(), SetLoggerError> +where + F: FnOnce() -> &'static dyn Log, +{ + match STATE.compare_exchange( + UNINITIALIZED, + INITIALIZING, + Ordering::Acquire, + Ordering::Relaxed, + ) { + Ok(UNINITIALIZED) => { + unsafe { + LOGGER = make_logger(); + } + STATE.store(INITIALIZED, Ordering::Release); + Ok(()) + } + Err(INITIALIZING) => { + while STATE.load(Ordering::Relaxed) == INITIALIZING { + std::hint::spin_loop(); + } + Err(SetLoggerError(())) + } + _ => Err(SetLoggerError(())), + } +} + +/// A thread-unsafe version of [`set_logger`]. +/// +/// This function is available on all platforms, even those that do not have +/// support for atomics that is needed by [`set_logger`]. +/// +/// In almost all cases, [`set_logger`] should be preferred. +/// +/// # Safety +/// +/// This function is only safe to call when it cannot race with any other +/// calls to `set_logger` or `set_logger_racy`. +/// +/// This can be upheld by (for example) making sure that **there are no other +/// threads**, and (on embedded) that **interrupts are disabled**. +/// +/// It is safe to use other logging functions while this function runs +/// (including all logging macros). +/// +/// [`set_logger`]: fn.set_logger.html +pub unsafe fn set_logger_racy(logger: &'static dyn Log) -> Result<(), SetLoggerError> { + match STATE.load(Ordering::Acquire) { + UNINITIALIZED => { + LOGGER = logger; + STATE.store(INITIALIZED, Ordering::Release); + Ok(()) + } + INITIALIZING => { + // This is just plain UB, since we were racing another initialization function + unreachable!("set_logger_racy must not be used with other initialization functions") + } + _ => Err(SetLoggerError(())), + } +} + +/// The type returned by [`set_logger`] if [`set_logger`] has already been called. +/// +/// [`set_logger`]: fn.set_logger.html +#[allow(missing_copy_implementations)] +#[derive(Debug)] +pub struct SetLoggerError(()); + +impl fmt::Display for SetLoggerError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str(SET_LOGGER_ERROR) + } +} + +// The Error trait is not available in libcore +#[cfg(feature = "std")] +impl error::Error for SetLoggerError {} + +/// The type returned by [`from_str`] when the string doesn't match any of the log levels. +/// +/// [`from_str`]: https://doc.rust-lang.org/std/str/trait.FromStr.html#tymethod.from_str +#[allow(missing_copy_implementations)] +#[derive(Debug, PartialEq, Eq)] +pub struct ParseLevelError(()); + +impl fmt::Display for ParseLevelError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str(LEVEL_PARSE_ERROR) + } +} + +// The Error trait is not available in libcore +#[cfg(feature = "std")] +impl error::Error for ParseLevelError {} + +/// Returns a reference to the logger. +/// +/// If a logger has not been set, a no-op implementation is returned. +pub fn logger() -> &'static dyn Log { + // Acquire memory ordering guarantees that current thread would see any + // memory writes that happened before store of the value + // into `STATE` with memory ordering `Release` or stronger. + // + // Since the value `INITIALIZED` is written only after `LOGGER` was + // initialized, observing it after `Acquire` load here makes both + // write to the `LOGGER` static and initialization of the logger + // internal state synchronized with current thread. + if STATE.load(Ordering::Acquire) != INITIALIZED { + static NOP: NopLogger = NopLogger; + &NOP + } else { + unsafe { LOGGER } + } +} + +// WARNING: this is not part of the crate's public API and is subject to change at any time +#[doc(hidden)] +pub mod __private_api; + +/// The statically resolved maximum log level. +/// +/// See the crate level documentation for information on how to configure this. +/// +/// This value is checked by the log macros, but not by the `Log`ger returned by +/// the [`logger`] function. Code that manually calls functions on that value +/// should compare the level against this value. +/// +/// [`logger`]: fn.logger.html +pub const STATIC_MAX_LEVEL: LevelFilter = match cfg!(debug_assertions) { + false if cfg!(feature = "release_max_level_off") => LevelFilter::Off, + false if cfg!(feature = "release_max_level_error") => LevelFilter::Error, + false if cfg!(feature = "release_max_level_warn") => LevelFilter::Warn, + false if cfg!(feature = "release_max_level_info") => LevelFilter::Info, + false if cfg!(feature = "release_max_level_debug") => LevelFilter::Debug, + false if cfg!(feature = "release_max_level_trace") => LevelFilter::Trace, + _ if cfg!(feature = "max_level_off") => LevelFilter::Off, + _ if cfg!(feature = "max_level_error") => LevelFilter::Error, + _ if cfg!(feature = "max_level_warn") => LevelFilter::Warn, + _ if cfg!(feature = "max_level_info") => LevelFilter::Info, + _ if cfg!(feature = "max_level_debug") => LevelFilter::Debug, + _ => LevelFilter::Trace, +}; + +#[cfg(test)] +mod tests { + use super::{Level, LevelFilter, ParseLevelError, STATIC_MAX_LEVEL}; + + #[test] + fn test_levelfilter_from_str() { + let tests = [ + ("off", Ok(LevelFilter::Off)), + ("error", Ok(LevelFilter::Error)), + ("warn", Ok(LevelFilter::Warn)), + ("info", Ok(LevelFilter::Info)), + ("debug", Ok(LevelFilter::Debug)), + ("trace", Ok(LevelFilter::Trace)), + ("OFF", Ok(LevelFilter::Off)), + ("ERROR", Ok(LevelFilter::Error)), + ("WARN", Ok(LevelFilter::Warn)), + ("INFO", Ok(LevelFilter::Info)), + ("DEBUG", Ok(LevelFilter::Debug)), + ("TRACE", Ok(LevelFilter::Trace)), + ("asdf", Err(ParseLevelError(()))), + ]; + for &(s, ref expected) in &tests { + assert_eq!(expected, &s.parse()); + } + } + + #[test] + fn test_level_from_str() { + let tests = [ + ("OFF", Err(ParseLevelError(()))), + ("error", Ok(Level::Error)), + ("warn", Ok(Level::Warn)), + ("info", Ok(Level::Info)), + ("debug", Ok(Level::Debug)), + ("trace", Ok(Level::Trace)), + ("ERROR", Ok(Level::Error)), + ("WARN", Ok(Level::Warn)), + ("INFO", Ok(Level::Info)), + ("DEBUG", Ok(Level::Debug)), + ("TRACE", Ok(Level::Trace)), + ("asdf", Err(ParseLevelError(()))), + ]; + for &(s, ref expected) in &tests { + assert_eq!(expected, &s.parse()); + } + } + + #[test] + fn test_level_as_str() { + let tests = &[ + (Level::Error, "ERROR"), + (Level::Warn, "WARN"), + (Level::Info, "INFO"), + (Level::Debug, "DEBUG"), + (Level::Trace, "TRACE"), + ]; + for (input, expected) in tests { + assert_eq!(*expected, input.as_str()); + } + } + + #[test] + fn test_level_show() { + assert_eq!("INFO", Level::Info.to_string()); + assert_eq!("ERROR", Level::Error.to_string()); + } + + #[test] + fn test_levelfilter_show() { + assert_eq!("OFF", LevelFilter::Off.to_string()); + assert_eq!("ERROR", LevelFilter::Error.to_string()); + } + + #[test] + fn test_cross_cmp() { + assert!(Level::Debug > LevelFilter::Error); + assert!(LevelFilter::Warn < Level::Trace); + assert!(LevelFilter::Off < Level::Error); + } + + #[test] + fn test_cross_eq() { + assert!(Level::Error == LevelFilter::Error); + assert!(LevelFilter::Off != Level::Error); + assert!(Level::Trace == LevelFilter::Trace); + } + + #[test] + fn test_to_level() { + assert_eq!(Some(Level::Error), LevelFilter::Error.to_level()); + assert_eq!(None, LevelFilter::Off.to_level()); + assert_eq!(Some(Level::Debug), LevelFilter::Debug.to_level()); + } + + #[test] + fn test_to_level_filter() { + assert_eq!(LevelFilter::Error, Level::Error.to_level_filter()); + assert_eq!(LevelFilter::Trace, Level::Trace.to_level_filter()); + } + + #[test] + fn test_level_filter_as_str() { + let tests = &[ + (LevelFilter::Off, "OFF"), + (LevelFilter::Error, "ERROR"), + (LevelFilter::Warn, "WARN"), + (LevelFilter::Info, "INFO"), + (LevelFilter::Debug, "DEBUG"), + (LevelFilter::Trace, "TRACE"), + ]; + for (input, expected) in tests { + assert_eq!(*expected, input.as_str()); + } + } + + #[test] + fn test_level_up() { + let info = Level::Info; + let up = info.increment_severity(); + assert_eq!(up, Level::Debug); + + let trace = Level::Trace; + let up = trace.increment_severity(); + // trace is already highest level + assert_eq!(up, trace); + } + + #[test] + fn test_level_filter_up() { + let info = LevelFilter::Info; + let up = info.increment_severity(); + assert_eq!(up, LevelFilter::Debug); + + let trace = LevelFilter::Trace; + let up = trace.increment_severity(); + // trace is already highest level + assert_eq!(up, trace); + } + + #[test] + fn test_level_down() { + let info = Level::Info; + let down = info.decrement_severity(); + assert_eq!(down, Level::Warn); + + let error = Level::Error; + let down = error.decrement_severity(); + // error is already lowest level + assert_eq!(down, error); + } + + #[test] + fn test_level_filter_down() { + let info = LevelFilter::Info; + let down = info.decrement_severity(); + assert_eq!(down, LevelFilter::Warn); + + let error = LevelFilter::Error; + let down = error.decrement_severity(); + assert_eq!(down, LevelFilter::Off); + // Off is already the lowest + assert_eq!(down.decrement_severity(), down); + } + + #[test] + #[cfg_attr(not(debug_assertions), ignore)] + fn test_static_max_level_debug() { + if cfg!(feature = "max_level_off") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Off); + } else if cfg!(feature = "max_level_error") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Error); + } else if cfg!(feature = "max_level_warn") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Warn); + } else if cfg!(feature = "max_level_info") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Info); + } else if cfg!(feature = "max_level_debug") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Debug); + } else { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Trace); + } + } + + #[test] + #[cfg_attr(debug_assertions, ignore)] + fn test_static_max_level_release() { + if cfg!(feature = "release_max_level_off") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Off); + } else if cfg!(feature = "release_max_level_error") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Error); + } else if cfg!(feature = "release_max_level_warn") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Warn); + } else if cfg!(feature = "release_max_level_info") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Info); + } else if cfg!(feature = "release_max_level_debug") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Debug); + } else if cfg!(feature = "release_max_level_trace") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Trace); + } else if cfg!(feature = "max_level_off") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Off); + } else if cfg!(feature = "max_level_error") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Error); + } else if cfg!(feature = "max_level_warn") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Warn); + } else if cfg!(feature = "max_level_info") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Info); + } else if cfg!(feature = "max_level_debug") { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Debug); + } else { + assert_eq!(STATIC_MAX_LEVEL, LevelFilter::Trace); + } + } + + #[test] + #[cfg(feature = "std")] + fn test_error_trait() { + use super::SetLoggerError; + let e = SetLoggerError(()); + assert_eq!( + &e.to_string(), + "attempted to set a logger after the logging system \ + was already initialized" + ); + } + + #[test] + fn test_metadata_builder() { + use super::MetadataBuilder; + let target = "myApp"; + let metadata_test = MetadataBuilder::new() + .level(Level::Debug) + .target(target) + .build(); + assert_eq!(metadata_test.level(), Level::Debug); + assert_eq!(metadata_test.target(), "myApp"); + } + + #[test] + fn test_metadata_convenience_builder() { + use super::Metadata; + let target = "myApp"; + let metadata_test = Metadata::builder() + .level(Level::Debug) + .target(target) + .build(); + assert_eq!(metadata_test.level(), Level::Debug); + assert_eq!(metadata_test.target(), "myApp"); + } + + #[test] + fn test_record_builder() { + use super::{MetadataBuilder, RecordBuilder}; + let target = "myApp"; + let metadata = MetadataBuilder::new().target(target).build(); + let fmt_args = format_args!("hello"); + let record_test = RecordBuilder::new() + .args(fmt_args) + .metadata(metadata) + .module_path(Some("foo")) + .file(Some("bar")) + .line(Some(30)) + .build(); + assert_eq!(record_test.metadata().target(), "myApp"); + assert_eq!(record_test.module_path(), Some("foo")); + assert_eq!(record_test.file(), Some("bar")); + assert_eq!(record_test.line(), Some(30)); + } + + #[test] + fn test_record_convenience_builder() { + use super::{Metadata, Record}; + let target = "myApp"; + let metadata = Metadata::builder().target(target).build(); + let fmt_args = format_args!("hello"); + let record_test = Record::builder() + .args(fmt_args) + .metadata(metadata) + .module_path(Some("foo")) + .file(Some("bar")) + .line(Some(30)) + .build(); + assert_eq!(record_test.target(), "myApp"); + assert_eq!(record_test.module_path(), Some("foo")); + assert_eq!(record_test.file(), Some("bar")); + assert_eq!(record_test.line(), Some(30)); + } + + #[test] + fn test_record_complete_builder() { + use super::{Level, Record}; + let target = "myApp"; + let record_test = Record::builder() + .module_path(Some("foo")) + .file(Some("bar")) + .line(Some(30)) + .target(target) + .level(Level::Error) + .build(); + assert_eq!(record_test.target(), "myApp"); + assert_eq!(record_test.level(), Level::Error); + assert_eq!(record_test.module_path(), Some("foo")); + assert_eq!(record_test.file(), Some("bar")); + assert_eq!(record_test.line(), Some(30)); + } + + #[test] + #[cfg(feature = "kv")] + fn test_record_key_values_builder() { + use super::Record; + use crate::kv::{self, VisitSource}; + + struct TestVisitSource { + seen_pairs: usize, + } + + impl<'kvs> VisitSource<'kvs> for TestVisitSource { + fn visit_pair( + &mut self, + _: kv::Key<'kvs>, + _: kv::Value<'kvs>, + ) -> Result<(), kv::Error> { + self.seen_pairs += 1; + Ok(()) + } + } + + let kvs: &[(&str, i32)] = &[("a", 1), ("b", 2)]; + let record_test = Record::builder().key_values(&kvs).build(); + + let mut visitor = TestVisitSource { seen_pairs: 0 }; + + record_test.key_values().visit(&mut visitor).unwrap(); + + assert_eq!(2, visitor.seen_pairs); + } + + #[test] + #[cfg(feature = "kv")] + fn test_record_key_values_get_coerce() { + use super::Record; + + let kvs: &[(&str, &str)] = &[("a", "1"), ("b", "2")]; + let record = Record::builder().key_values(&kvs).build(); + + assert_eq!( + "2", + record + .key_values() + .get("b".into()) + .expect("missing key") + .to_borrowed_str() + .expect("invalid value") + ); + } + + // Test that the `impl Log for Foo` blocks work + // This test mostly operates on a type level, so failures will be compile errors + #[test] + fn test_foreign_impl() { + use super::Log; + #[cfg(feature = "std")] + use std::sync::Arc; + + fn assert_is_log() {} + + assert_is_log::<&dyn Log>(); + + #[cfg(feature = "std")] + assert_is_log::>(); + + #[cfg(feature = "std")] + assert_is_log::>(); + + // Assert these statements for all T: Log + ?Sized + #[allow(unused)] + fn forall() { + #[cfg(feature = "std")] + assert_is_log::>(); + + assert_is_log::<&T>(); + + #[cfg(feature = "std")] + assert_is_log::>(); + } + } +} diff --git a/vendor/log/src/macros.rs b/vendor/log/src/macros.rs new file mode 100644 index 00000000000000..14e4ac64ba72a8 --- /dev/null +++ b/vendor/log/src/macros.rs @@ -0,0 +1,579 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// The standard logging macro. +/// +/// This macro will generically log with the specified `Level` and `format!` +/// based argument list. +/// +/// ``` +/// use log::{log, Level}; +/// +/// let data = (42, "Forty-two"); +/// let private_data = "private"; +/// +/// log!(Level::Error, "Received errors: {}, {}", data.0, data.1); +/// ``` +/// +/// Optionally, you can specify a `target` argument to attach a specific target +/// to the log record. By default, the target is the module path of the caller. +/// +/// ``` +/// use log::{log, Level}; +/// +/// let data = (42, "Forty-two"); +/// let private_data = "private"; +/// +/// log!( +/// target: "app_events", +/// Level::Error, +/// "Received errors: {}, {}", +/// data.0, data.1 +/// ); +/// ``` +/// +/// And optionally, you can specify a `logger` argument to use a specific logger +/// instead of the default global logger. +/// +/// ``` +/// # struct MyLogger {} +/// # impl Log for MyLogger { +/// # fn enabled(&self, _metadata: &log::Metadata) -> bool { +/// # false +/// # } +/// # fn log(&self, _record: &log::Record) {} +/// # fn flush(&self) {} +/// # } +/// use log::{log, Level, Log}; +/// +/// let data = (42, "Forty-two"); +/// let private_data = "private"; +/// +/// let my_logger = MyLogger {}; +/// log!( +/// logger: my_logger, +/// Level::Error, +/// "Received errors: {}, {}", +/// data.0, data.1 +/// ); +/// ``` +/// +/// The `logger` argument accepts a value that implements the `Log` trait. The value +/// will be borrowed within the macro. +/// +/// Note that the global level set via Cargo features, or through `set_max_level` will +/// still apply, even when a custom logger is supplied with the `logger` argument. +#[macro_export] +#[clippy::format_args] +macro_rules! log { + // log!(logger: my_logger, target: "my_target", Level::Info, "a {} event", "log"); + (logger: $logger:expr, target: $target:expr, $lvl:expr, $($arg:tt)+) => ({ + $crate::__log!( + logger: $crate::__log_logger!($logger), + target: $target, + $lvl, + $($arg)+ + ) + }); + + // log!(logger: my_logger, Level::Info, "a log event") + (logger: $logger:expr, $lvl:expr, $($arg:tt)+) => ({ + $crate::__log!( + logger: $crate::__log_logger!($logger), + target: $crate::__private_api::module_path!(), + $lvl, + $($arg)+ + ) + }); + + // log!(target: "my_target", Level::Info, "a log event") + (target: $target:expr, $lvl:expr, $($arg:tt)+) => ({ + $crate::__log!( + logger: $crate::__log_logger!(__log_global_logger), + target: $target, + $lvl, + $($arg)+ + ) + }); + + // log!(Level::Info, "a log event") + ($lvl:expr, $($arg:tt)+) => ({ + $crate::__log!( + logger: $crate::__log_logger!(__log_global_logger), + target: $crate::__private_api::module_path!(), + $lvl, + $($arg)+ + ) + }); +} + +#[doc(hidden)] +#[macro_export] +macro_rules! __log { + // log!(logger: my_logger, target: "my_target", Level::Info, key1:? = 42, key2 = true; "a {} event", "log"); + (logger: $logger:expr, target: $target:expr, $lvl:expr, $($key:tt $(:$capture:tt)? $(= $value:expr)?),+; $($arg:tt)+) => ({ + let lvl = $lvl; + if lvl <= $crate::STATIC_MAX_LEVEL && lvl <= $crate::max_level() { + $crate::__private_api::log( + $logger, + $crate::__private_api::format_args!($($arg)+), + lvl, + &($target, $crate::__private_api::module_path!(), $crate::__private_api::loc()), + &[$(($crate::__log_key!($key), $crate::__log_value!($key $(:$capture)* = $($value)*))),+] as &[_], + ); + } + }); + + // log!(logger: my_logger, target: "my_target", Level::Info, "a {} event", "log"); + (logger: $logger:expr, target: $target:expr, $lvl:expr, $($arg:tt)+) => ({ + let lvl = $lvl; + if lvl <= $crate::STATIC_MAX_LEVEL && lvl <= $crate::max_level() { + $crate::__private_api::log( + $logger, + $crate::__private_api::format_args!($($arg)+), + lvl, + &($target, $crate::__private_api::module_path!(), $crate::__private_api::loc()), + (), + ); + } + }); +} + +/// Logs a message at the error level. +/// +/// # Examples +/// +/// ``` +/// use log::error; +/// +/// # let my_logger = log::__private_api::GlobalLogger; +/// let (err_info, port) = ("No connection", 22); +/// +/// error!("Error: {err_info} on port {port}"); +/// error!(target: "app_events", "App Error: {err_info}, Port: {port}"); +/// error!(logger: my_logger, "App Error: {err_info}, Port: {port}"); +/// ``` +#[macro_export] +#[clippy::format_args] +macro_rules! error { + // error!(logger: my_logger, target: "my_target", key1 = 42, key2 = true; "a {} event", "log") + // error!(logger: my_logger, target: "my_target", "a {} event", "log") + (logger: $logger:expr, target: $target:expr, $($arg:tt)+) => ({ + $crate::log!(logger: $crate::__log_logger!($logger), target: $target, $crate::Level::Error, $($arg)+) + }); + + // error!(logger: my_logger, key1 = 42, key2 = true; "a {} event", "log") + // error!(logger: my_logger, "a {} event", "log") + (logger: $logger:expr, $($arg:tt)+) => ({ + $crate::log!(logger: $crate::__log_logger!($logger), $crate::Level::Error, $($arg)+) + }); + + // error!(target: "my_target", key1 = 42, key2 = true; "a {} event", "log") + // error!(target: "my_target", "a {} event", "log") + (target: $target:expr, $($arg:tt)+) => ({ + $crate::log!(target: $target, $crate::Level::Error, $($arg)+) + }); + + // error!("a {} event", "log") + ($($arg:tt)+) => ($crate::log!($crate::Level::Error, $($arg)+)) +} + +/// Logs a message at the warn level. +/// +/// # Examples +/// +/// ``` +/// use log::warn; +/// +/// # let my_logger = log::__private_api::GlobalLogger; +/// let warn_description = "Invalid Input"; +/// +/// warn!("Warning! {warn_description}!"); +/// warn!(target: "input_events", "App received warning: {warn_description}"); +/// warn!(logger: my_logger, "App received warning: {warn_description}"); +/// ``` +#[macro_export] +#[clippy::format_args] +macro_rules! warn { + // warn!(logger: my_logger, target: "my_target", key1 = 42, key2 = true; "a {} event", "log") + // warn!(logger: my_logger, target: "my_target", "a {} event", "log") + (logger: $logger:expr, target: $target:expr, $($arg:tt)+) => ({ + $crate::log!(logger: $crate::__log_logger!($logger), target: $target, $crate::Level::Warn, $($arg)+) + }); + + // warn!(logger: my_logger, key1 = 42, key2 = true; "a {} event", "log") + // warn!(logger: my_logger, "a {} event", "log") + (logger: $logger:expr, $($arg:tt)+) => ({ + $crate::log!(logger: $crate::__log_logger!($logger), $crate::Level::Warn, $($arg)+) + }); + + // warn!(target: "my_target", key1 = 42, key2 = true; "a {} event", "log") + // warn!(target: "my_target", "a {} event", "log") + (target: $target:expr, $($arg:tt)+) => ({ + $crate::log!(target: $target, $crate::Level::Warn, $($arg)+) + }); + + // warn!("a {} event", "log") + ($($arg:tt)+) => ($crate::log!($crate::Level::Warn, $($arg)+)) +} + +/// Logs a message at the info level. +/// +/// # Examples +/// +/// ``` +/// use log::info; +/// +/// # let my_logger = log::__private_api::GlobalLogger; +/// # struct Connection { port: u32, speed: f32 } +/// let conn_info = Connection { port: 40, speed: 3.20 }; +/// +/// info!("Connected to port {} at {} Mb/s", conn_info.port, conn_info.speed); +/// info!( +/// target: "connection_events", +/// "Successful connection, port: {}, speed: {}", +/// conn_info.port, conn_info.speed +/// ); +/// info!( +/// logger: my_logger, +/// "Successful connection, port: {}, speed: {}", +/// conn_info.port, conn_info.speed +/// ); +/// ``` +#[macro_export] +#[clippy::format_args] +macro_rules! info { + // info!(logger: my_logger, target: "my_target", key1 = 42, key2 = true; "a {} event", "log") + // info!(logger: my_logger, target: "my_target", "a {} event", "log") + (logger: $logger:expr, target: $target:expr, $($arg:tt)+) => ({ + $crate::log!(logger: $crate::__log_logger!($logger), target: $target, $crate::Level::Info, $($arg)+) + }); + + // info!(logger: my_logger, key1 = 42, key2 = true; "a {} event", "log") + // info!(logger: my_logger, "a {} event", "log") + (logger: $logger:expr, $($arg:tt)+) => ({ + $crate::log!(logger: $crate::__log_logger!($logger), $crate::Level::Info, $($arg)+) + }); + + // info!(target: "my_target", key1 = 42, key2 = true; "a {} event", "log") + // info!(target: "my_target", "a {} event", "log") + (target: $target:expr, $($arg:tt)+) => ({ + $crate::log!(target: $target, $crate::Level::Info, $($arg)+) + }); + + // info!("a {} event", "log") + ($($arg:tt)+) => ($crate::log!($crate::Level::Info, $($arg)+)) +} + +/// Logs a message at the debug level. +/// +/// # Examples +/// +/// ``` +/// use log::debug; +/// +/// # let my_logger = log::__private_api::GlobalLogger; +/// # struct Position { x: f32, y: f32 } +/// let pos = Position { x: 3.234, y: -1.223 }; +/// +/// debug!("New position: x: {}, y: {}", pos.x, pos.y); +/// debug!(target: "app_events", "New position: x: {}, y: {}", pos.x, pos.y); +/// debug!(logger: my_logger, "New position: x: {}, y: {}", pos.x, pos.y); +/// ``` +#[macro_export] +#[clippy::format_args] +macro_rules! debug { + // debug!(logger: my_logger, target: "my_target", key1 = 42, key2 = true; "a {} event", "log") + // debug!(logger: my_logger, target: "my_target", "a {} event", "log") + (logger: $logger:expr, target: $target:expr, $($arg:tt)+) => ({ + $crate::log!(logger: $crate::__log_logger!($logger), target: $target, $crate::Level::Debug, $($arg)+) + }); + + // debug!(logger: my_logger, key1 = 42, key2 = true; "a {} event", "log") + // debug!(logger: my_logger, "a {} event", "log") + (logger: $logger:expr, $($arg:tt)+) => ({ + $crate::log!(logger: $crate::__log_logger!($logger), $crate::Level::Debug, $($arg)+) + }); + + // debug!(target: "my_target", key1 = 42, key2 = true; "a {} event", "log") + // debug!(target: "my_target", "a {} event", "log") + (target: $target:expr, $($arg:tt)+) => ({ + $crate::log!(target: $target, $crate::Level::Debug, $($arg)+) + }); + + // debug!("a {} event", "log") + ($($arg:tt)+) => ($crate::log!($crate::Level::Debug, $($arg)+)) +} + +/// Logs a message at the trace level. +/// +/// # Examples +/// +/// ``` +/// use log::trace; +/// +/// # let my_logger = log::__private_api::GlobalLogger; +/// # struct Position { x: f32, y: f32 } +/// let pos = Position { x: 3.234, y: -1.223 }; +/// +/// trace!("Position is: x: {}, y: {}", pos.x, pos.y); +/// trace!(target: "app_events", "x is {} and y is {}", +/// if pos.x >= 0.0 { "positive" } else { "negative" }, +/// if pos.y >= 0.0 { "positive" } else { "negative" }); +/// trace!(logger: my_logger, "x is {} and y is {}", +/// if pos.x >= 0.0 { "positive" } else { "negative" }, +/// if pos.y >= 0.0 { "positive" } else { "negative" }); +/// ``` +#[macro_export] +#[clippy::format_args] +macro_rules! trace { + // trace!(logger: my_logger, target: "my_target", key1 = 42, key2 = true; "a {} event", "log") + // trace!(logger: my_logger, target: "my_target", "a {} event", "log") + (logger: $logger:expr, target: $target:expr, $($arg:tt)+) => ({ + $crate::log!(logger: $crate::__log_logger!($logger), target: $target, $crate::Level::Trace, $($arg)+) + }); + + // trace!(logger: my_logger, key1 = 42, key2 = true; "a {} event", "log") + // trace!(logger: my_logger, "a {} event", "log") + (logger: $logger:expr, $($arg:tt)+) => ({ + $crate::log!(logger: $crate::__log_logger!($logger), $crate::Level::Trace, $($arg)+) + }); + + // trace!(target: "my_target", key1 = 42, key2 = true; "a {} event", "log") + // trace!(target: "my_target", "a {} event", "log") + (target: $target:expr, $($arg:tt)+) => ({ + $crate::log!(target: $target, $crate::Level::Trace, $($arg)+) + }); + + // trace!("a {} event", "log") + ($($arg:tt)+) => ($crate::log!($crate::Level::Trace, $($arg)+)) +} + +/// Determines if a message logged at the specified level in that module will +/// be logged. +/// +/// This can be used to avoid expensive computation of log message arguments if +/// the message would be ignored anyway. +/// +/// # Examples +/// +/// ``` +/// use log::{debug, log_enabled, Level}; +/// +/// # struct Data { x: u32, y: u32 } +/// # fn expensive_call() -> Data { Data { x: 0, y: 0 } } +/// # let my_logger = log::__private_api::GlobalLogger; +/// if log_enabled!(Level::Debug) { +/// let data = expensive_call(); +/// debug!("expensive debug data: {} {}", data.x, data.y); +/// } +/// +/// if log_enabled!(target: "Global", Level::Debug) { +/// let data = expensive_call(); +/// debug!(target: "Global", "expensive debug data: {} {}", data.x, data.y); +/// } +/// +/// if log_enabled!(logger: my_logger, Level::Debug) { +/// let data = expensive_call(); +/// debug!(target: "Global", "expensive debug data: {} {}", data.x, data.y); +/// } +/// ``` +/// +/// This macro accepts the same `target` and `logger` arguments as [`macro@log`]. +#[macro_export] +macro_rules! log_enabled { + // log_enabled!(logger: my_logger, target: "my_target", Level::Info) + (logger: $logger:expr, target: $target:expr, $lvl:expr) => ({ + $crate::__log_enabled!(logger: $crate::__log_logger!($logger), target: $target, $lvl) + }); + + // log_enabled!(logger: my_logger, Level::Info) + (logger: $logger:expr, $lvl:expr) => ({ + $crate::__log_enabled!(logger: $crate::__log_logger!($logger), target: $crate::__private_api::module_path!(), $lvl) + }); + + // log_enabled!(target: "my_target", Level::Info) + (target: $target:expr, $lvl:expr) => ({ + $crate::__log_enabled!(logger: $crate::__log_logger!(__log_global_logger), target: $target, $lvl) + }); + + // log_enabled!(Level::Info) + ($lvl:expr) => ({ + $crate::__log_enabled!(logger: $crate::__log_logger!(__log_global_logger), target: $crate::__private_api::module_path!(), $lvl) + }); +} + +#[doc(hidden)] +#[macro_export] +macro_rules! __log_enabled { + // log_enabled!(logger: my_logger, target: "my_target", Level::Info) + (logger: $logger:expr, target: $target:expr, $lvl:expr) => {{ + let lvl = $lvl; + lvl <= $crate::STATIC_MAX_LEVEL + && lvl <= $crate::max_level() + && $crate::__private_api::enabled($logger, lvl, $target) + }}; +} + +// Determine the logger to use, and whether to take it by-value or by reference + +#[doc(hidden)] +#[macro_export] +macro_rules! __log_logger { + (__log_global_logger) => {{ + $crate::__private_api::GlobalLogger + }}; + + ($logger:expr) => {{ + &($logger) + }}; +} + +// These macros use a pattern of #[cfg]s to produce nicer error +// messages when log features aren't available + +#[doc(hidden)] +#[macro_export] +#[cfg(feature = "kv")] +macro_rules! __log_key { + // key1 = 42 + ($($args:ident)*) => { + $crate::__private_api::stringify!($($args)*) + }; + // "key1" = 42 + ($($args:expr)*) => { + $($args)* + }; +} + +#[doc(hidden)] +#[macro_export] +#[cfg(not(feature = "kv"))] +macro_rules! __log_key { + ($($args:tt)*) => { + compile_error!("key value support requires the `kv` feature of `log`") + }; +} + +#[doc(hidden)] +#[macro_export] +#[cfg(feature = "kv")] +macro_rules! __log_value { + // Entrypoint + ($key:tt = $args:expr) => { + $crate::__log_value!(($args):value) + }; + ($key:tt :$capture:tt = $args:expr) => { + $crate::__log_value!(($args):$capture) + }; + ($key:ident =) => { + $crate::__log_value!(($key):value) + }; + ($key:ident :$capture:tt =) => { + $crate::__log_value!(($key):$capture) + }; + // ToValue + (($args:expr):value) => { + $crate::__private_api::capture_to_value(&&$args) + }; + // Debug + (($args:expr):?) => { + $crate::__private_api::capture_debug(&&$args) + }; + (($args:expr):debug) => { + $crate::__private_api::capture_debug(&&$args) + }; + // Display + (($args:expr):%) => { + $crate::__private_api::capture_display(&&$args) + }; + (($args:expr):display) => { + $crate::__private_api::capture_display(&&$args) + }; + //Error + (($args:expr):err) => { + $crate::__log_value_error!($args) + }; + // sval::Value + (($args:expr):sval) => { + $crate::__log_value_sval!($args) + }; + // serde::Serialize + (($args:expr):serde) => { + $crate::__log_value_serde!($args) + }; +} + +#[doc(hidden)] +#[macro_export] +#[cfg(not(feature = "kv"))] +macro_rules! __log_value { + ($($args:tt)*) => { + compile_error!("key value support requires the `kv` feature of `log`") + }; +} + +#[doc(hidden)] +#[macro_export] +#[cfg(feature = "kv_sval")] +macro_rules! __log_value_sval { + ($args:expr) => { + $crate::__private_api::capture_sval(&&$args) + }; +} + +#[doc(hidden)] +#[macro_export] +#[cfg(not(feature = "kv_sval"))] +macro_rules! __log_value_sval { + ($args:expr) => { + compile_error!("capturing values as `sval::Value` requites the `kv_sval` feature of `log`") + }; +} + +#[doc(hidden)] +#[macro_export] +#[cfg(feature = "kv_serde")] +macro_rules! __log_value_serde { + ($args:expr) => { + $crate::__private_api::capture_serde(&&$args) + }; +} + +#[doc(hidden)] +#[macro_export] +#[cfg(not(feature = "kv_serde"))] +macro_rules! __log_value_serde { + ($args:expr) => { + compile_error!( + "capturing values as `serde::Serialize` requites the `kv_serde` feature of `log`" + ) + }; +} + +#[doc(hidden)] +#[macro_export] +#[cfg(feature = "kv_std")] +macro_rules! __log_value_error { + ($args:expr) => { + $crate::__private_api::capture_error(&$args) + }; +} + +#[doc(hidden)] +#[macro_export] +#[cfg(not(feature = "kv_std"))] +macro_rules! __log_value_error { + ($args:expr) => { + compile_error!( + "capturing values as `std::error::Error` requites the `kv_std` feature of `log`" + ) + }; +} diff --git a/vendor/log/src/serde.rs b/vendor/log/src/serde.rs new file mode 100644 index 00000000000000..db732395bd1497 --- /dev/null +++ b/vendor/log/src/serde.rs @@ -0,0 +1,397 @@ +#![cfg(feature = "serde")] + +use serde::de::{ + Deserialize, DeserializeSeed, Deserializer, EnumAccess, Error, Unexpected, VariantAccess, + Visitor, +}; +use serde::ser::{Serialize, Serializer}; + +use crate::{Level, LevelFilter, LOG_LEVEL_NAMES}; + +use std::fmt; +use std::str::{self, FromStr}; + +// The Deserialize impls are handwritten to be case-insensitive using FromStr. + +impl Serialize for Level { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + Level::Error => serializer.serialize_unit_variant("Level", 0, "ERROR"), + Level::Warn => serializer.serialize_unit_variant("Level", 1, "WARN"), + Level::Info => serializer.serialize_unit_variant("Level", 2, "INFO"), + Level::Debug => serializer.serialize_unit_variant("Level", 3, "DEBUG"), + Level::Trace => serializer.serialize_unit_variant("Level", 4, "TRACE"), + } + } +} + +impl<'de> Deserialize<'de> for Level { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct LevelIdentifier; + + impl<'de> Visitor<'de> for LevelIdentifier { + type Value = Level; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("log level") + } + + fn visit_u64(self, v: u64) -> Result + where + E: Error, + { + let variant = LOG_LEVEL_NAMES[1..] + .get(v as usize) + .ok_or_else(|| Error::invalid_value(Unexpected::Unsigned(v), &self))?; + + self.visit_str(variant) + } + + fn visit_str(self, s: &str) -> Result + where + E: Error, + { + // Case-insensitive. + FromStr::from_str(s).map_err(|_| Error::unknown_variant(s, &LOG_LEVEL_NAMES[1..])) + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + E: Error, + { + let variant = str::from_utf8(value) + .map_err(|_| Error::invalid_value(Unexpected::Bytes(value), &self))?; + + self.visit_str(variant) + } + } + + impl<'de> DeserializeSeed<'de> for LevelIdentifier { + type Value = Level; + + fn deserialize(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_identifier(LevelIdentifier) + } + } + + struct LevelEnum; + + impl<'de> Visitor<'de> for LevelEnum { + type Value = Level; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("log level") + } + + fn visit_enum(self, value: A) -> Result + where + A: EnumAccess<'de>, + { + let (level, variant) = value.variant_seed(LevelIdentifier)?; + // Every variant is a unit variant. + variant.unit_variant()?; + Ok(level) + } + } + + deserializer.deserialize_enum("Level", &LOG_LEVEL_NAMES[1..], LevelEnum) + } +} + +impl Serialize for LevelFilter { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + LevelFilter::Off => serializer.serialize_unit_variant("LevelFilter", 0, "OFF"), + LevelFilter::Error => serializer.serialize_unit_variant("LevelFilter", 1, "ERROR"), + LevelFilter::Warn => serializer.serialize_unit_variant("LevelFilter", 2, "WARN"), + LevelFilter::Info => serializer.serialize_unit_variant("LevelFilter", 3, "INFO"), + LevelFilter::Debug => serializer.serialize_unit_variant("LevelFilter", 4, "DEBUG"), + LevelFilter::Trace => serializer.serialize_unit_variant("LevelFilter", 5, "TRACE"), + } + } +} + +impl<'de> Deserialize<'de> for LevelFilter { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct LevelFilterIdentifier; + + impl<'de> Visitor<'de> for LevelFilterIdentifier { + type Value = LevelFilter; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("log level filter") + } + + fn visit_u64(self, v: u64) -> Result + where + E: Error, + { + let variant = LOG_LEVEL_NAMES + .get(v as usize) + .ok_or_else(|| Error::invalid_value(Unexpected::Unsigned(v), &self))?; + + self.visit_str(variant) + } + + fn visit_str(self, s: &str) -> Result + where + E: Error, + { + // Case-insensitive. + FromStr::from_str(s).map_err(|_| Error::unknown_variant(s, &LOG_LEVEL_NAMES)) + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + E: Error, + { + let variant = str::from_utf8(value) + .map_err(|_| Error::invalid_value(Unexpected::Bytes(value), &self))?; + + self.visit_str(variant) + } + } + + impl<'de> DeserializeSeed<'de> for LevelFilterIdentifier { + type Value = LevelFilter; + + fn deserialize(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_identifier(LevelFilterIdentifier) + } + } + + struct LevelFilterEnum; + + impl<'de> Visitor<'de> for LevelFilterEnum { + type Value = LevelFilter; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("log level filter") + } + + fn visit_enum(self, value: A) -> Result + where + A: EnumAccess<'de>, + { + let (level_filter, variant) = value.variant_seed(LevelFilterIdentifier)?; + // Every variant is a unit variant. + variant.unit_variant()?; + Ok(level_filter) + } + } + + deserializer.deserialize_enum("LevelFilter", &LOG_LEVEL_NAMES, LevelFilterEnum) + } +} + +#[cfg(test)] +mod tests { + use crate::{Level, LevelFilter}; + use serde_test::{assert_de_tokens, assert_de_tokens_error, assert_tokens, Token}; + + fn level_token(variant: &'static str) -> Token { + Token::UnitVariant { + name: "Level", + variant, + } + } + + fn level_bytes_tokens(variant: &'static [u8]) -> [Token; 3] { + [ + Token::Enum { name: "Level" }, + Token::Bytes(variant), + Token::Unit, + ] + } + + fn level_variant_tokens(variant: u32) -> [Token; 3] { + [ + Token::Enum { name: "Level" }, + Token::U32(variant), + Token::Unit, + ] + } + + fn level_filter_token(variant: &'static str) -> Token { + Token::UnitVariant { + name: "LevelFilter", + variant, + } + } + + fn level_filter_bytes_tokens(variant: &'static [u8]) -> [Token; 3] { + [ + Token::Enum { + name: "LevelFilter", + }, + Token::Bytes(variant), + Token::Unit, + ] + } + + fn level_filter_variant_tokens(variant: u32) -> [Token; 3] { + [ + Token::Enum { + name: "LevelFilter", + }, + Token::U32(variant), + Token::Unit, + ] + } + + #[test] + fn test_level_ser_de() { + let cases = &[ + (Level::Error, [level_token("ERROR")]), + (Level::Warn, [level_token("WARN")]), + (Level::Info, [level_token("INFO")]), + (Level::Debug, [level_token("DEBUG")]), + (Level::Trace, [level_token("TRACE")]), + ]; + + for (s, expected) in cases { + assert_tokens(s, expected); + } + } + + #[test] + fn test_level_case_insensitive() { + let cases = &[ + (Level::Error, [level_token("error")]), + (Level::Warn, [level_token("warn")]), + (Level::Info, [level_token("info")]), + (Level::Debug, [level_token("debug")]), + (Level::Trace, [level_token("trace")]), + ]; + + for (s, expected) in cases { + assert_de_tokens(s, expected); + } + } + + #[test] + fn test_level_de_bytes() { + let cases = &[ + (Level::Error, level_bytes_tokens(b"ERROR")), + (Level::Warn, level_bytes_tokens(b"WARN")), + (Level::Info, level_bytes_tokens(b"INFO")), + (Level::Debug, level_bytes_tokens(b"DEBUG")), + (Level::Trace, level_bytes_tokens(b"TRACE")), + ]; + + for (value, tokens) in cases { + assert_de_tokens(value, tokens); + } + } + + #[test] + fn test_level_de_variant_index() { + let cases = &[ + (Level::Error, level_variant_tokens(0)), + (Level::Warn, level_variant_tokens(1)), + (Level::Info, level_variant_tokens(2)), + (Level::Debug, level_variant_tokens(3)), + (Level::Trace, level_variant_tokens(4)), + ]; + + for (value, tokens) in cases { + assert_de_tokens(value, tokens); + } + } + + #[test] + fn test_level_de_error() { + let msg = "unknown variant `errorx`, expected one of \ + `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`"; + assert_de_tokens_error::(&[level_token("errorx")], msg); + } + + #[test] + fn test_level_filter_ser_de() { + let cases = &[ + (LevelFilter::Off, [level_filter_token("OFF")]), + (LevelFilter::Error, [level_filter_token("ERROR")]), + (LevelFilter::Warn, [level_filter_token("WARN")]), + (LevelFilter::Info, [level_filter_token("INFO")]), + (LevelFilter::Debug, [level_filter_token("DEBUG")]), + (LevelFilter::Trace, [level_filter_token("TRACE")]), + ]; + + for (s, expected) in cases { + assert_tokens(s, expected); + } + } + + #[test] + fn test_level_filter_case_insensitive() { + let cases = &[ + (LevelFilter::Off, [level_filter_token("off")]), + (LevelFilter::Error, [level_filter_token("error")]), + (LevelFilter::Warn, [level_filter_token("warn")]), + (LevelFilter::Info, [level_filter_token("info")]), + (LevelFilter::Debug, [level_filter_token("debug")]), + (LevelFilter::Trace, [level_filter_token("trace")]), + ]; + + for (s, expected) in cases { + assert_de_tokens(s, expected); + } + } + + #[test] + fn test_level_filter_de_bytes() { + let cases = &[ + (LevelFilter::Off, level_filter_bytes_tokens(b"OFF")), + (LevelFilter::Error, level_filter_bytes_tokens(b"ERROR")), + (LevelFilter::Warn, level_filter_bytes_tokens(b"WARN")), + (LevelFilter::Info, level_filter_bytes_tokens(b"INFO")), + (LevelFilter::Debug, level_filter_bytes_tokens(b"DEBUG")), + (LevelFilter::Trace, level_filter_bytes_tokens(b"TRACE")), + ]; + + for (value, tokens) in cases { + assert_de_tokens(value, tokens); + } + } + + #[test] + fn test_level_filter_de_variant_index() { + let cases = &[ + (LevelFilter::Off, level_filter_variant_tokens(0)), + (LevelFilter::Error, level_filter_variant_tokens(1)), + (LevelFilter::Warn, level_filter_variant_tokens(2)), + (LevelFilter::Info, level_filter_variant_tokens(3)), + (LevelFilter::Debug, level_filter_variant_tokens(4)), + (LevelFilter::Trace, level_filter_variant_tokens(5)), + ]; + + for (value, tokens) in cases { + assert_de_tokens(value, tokens); + } + } + + #[test] + fn test_level_filter_de_error() { + let msg = "unknown variant `errorx`, expected one of \ + `OFF`, `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`"; + assert_de_tokens_error::(&[level_filter_token("errorx")], msg); + } +} diff --git a/vendor/log/tests/integration.rs b/vendor/log/tests/integration.rs new file mode 100644 index 00000000000000..9bcb0469787984 --- /dev/null +++ b/vendor/log/tests/integration.rs @@ -0,0 +1,101 @@ +#![allow(dead_code, unused_imports)] + +use log::{debug, error, info, trace, warn, Level, LevelFilter, Log, Metadata, Record}; +use std::sync::{Arc, Mutex}; + +struct State { + last_log_level: Mutex>, + last_log_location: Mutex>, +} + +struct Logger(Arc); + +impl Log for Logger { + fn enabled(&self, _: &Metadata) -> bool { + true + } + + fn log(&self, record: &Record) { + *self.0.last_log_level.lock().unwrap() = Some(record.level()); + *self.0.last_log_location.lock().unwrap() = record.line(); + } + fn flush(&self) {} +} + +#[test] +fn test_integration() { + // These tests don't really make sense when static + // max level filtering is applied + #[cfg(not(any( + feature = "max_level_off", + feature = "max_level_error", + feature = "max_level_warn", + feature = "max_level_info", + feature = "max_level_debug", + feature = "max_level_trace", + feature = "release_max_level_off", + feature = "release_max_level_error", + feature = "release_max_level_warn", + feature = "release_max_level_info", + feature = "release_max_level_debug", + feature = "release_max_level_trace", + )))] + { + let me = Arc::new(State { + last_log_level: Mutex::new(None), + last_log_location: Mutex::new(None), + }); + let a = me.clone(); + let logger = Logger(me); + + test_filter(&logger, &a, LevelFilter::Off); + test_filter(&logger, &a, LevelFilter::Error); + test_filter(&logger, &a, LevelFilter::Warn); + test_filter(&logger, &a, LevelFilter::Info); + test_filter(&logger, &a, LevelFilter::Debug); + test_filter(&logger, &a, LevelFilter::Trace); + + test_line_numbers(&logger, &a); + } +} + +fn test_filter(logger: &dyn Log, a: &State, filter: LevelFilter) { + // tests to ensure logs with a level beneath 'max_level' are filtered out + log::set_max_level(filter); + error!(logger: logger, ""); + last(a, t(Level::Error, filter)); + warn!(logger: logger, ""); + last(a, t(Level::Warn, filter)); + info!(logger: logger, ""); + last(a, t(Level::Info, filter)); + debug!(logger: logger, ""); + last(a, t(Level::Debug, filter)); + trace!(logger: logger, ""); + last(a, t(Level::Trace, filter)); + + fn t(lvl: Level, filter: LevelFilter) -> Option { + if lvl <= filter { + Some(lvl) + } else { + None + } + } + fn last(state: &State, expected: Option) { + let lvl = state.last_log_level.lock().unwrap().take(); + assert_eq!(lvl, expected); + } +} + +fn test_line_numbers(logger: &dyn Log, state: &State) { + log::set_max_level(LevelFilter::Trace); + + info!(logger: logger, ""); // ensure check_line function follows log macro + check_log_location(state); + + #[track_caller] + fn check_log_location(state: &State) { + let location = std::panic::Location::caller().line(); // get function calling location + let line_number = state.last_log_location.lock().unwrap().take().unwrap(); // get location of most recent log + assert_eq!(line_number, location - 1); + } +} diff --git a/vendor/log/tests/macros.rs b/vendor/log/tests/macros.rs new file mode 100644 index 00000000000000..dded475c1c082d --- /dev/null +++ b/vendor/log/tests/macros.rs @@ -0,0 +1,429 @@ +use log::{log, log_enabled, Log, Metadata, Record}; + +macro_rules! all_log_macros { + ($($arg:tt)*) => ({ + ::log::trace!($($arg)*); + ::log::debug!($($arg)*); + ::log::info!($($arg)*); + ::log::warn!($($arg)*); + ::log::error!($($arg)*); + }); +} + +// Not `Copy` +struct Logger; + +impl Log for Logger { + fn enabled(&self, _: &Metadata) -> bool { + false + } + fn log(&self, _: &Record) {} + fn flush(&self) {} +} + +#[test] +fn no_args() { + let logger = Logger; + + for lvl in log::Level::iter() { + log!(lvl, "hello"); + log!(lvl, "hello",); + + log!(target: "my_target", lvl, "hello"); + log!(target: "my_target", lvl, "hello",); + + log!(logger: logger, lvl, "hello"); + log!(logger: logger, lvl, "hello",); + + log!(logger: logger, target: "my_target", lvl, "hello"); + log!(logger: logger, target: "my_target", lvl, "hello",); + } + + all_log_macros!("hello"); + all_log_macros!("hello",); + + all_log_macros!(target: "my_target", "hello"); + all_log_macros!(target: "my_target", "hello",); + + all_log_macros!(logger: logger, "hello"); + all_log_macros!(logger: logger, "hello",); + + all_log_macros!(logger: logger, target: "my_target", "hello"); + all_log_macros!(logger: logger, target: "my_target", "hello",); +} + +#[test] +fn anonymous_args() { + for lvl in log::Level::iter() { + log!(lvl, "hello {}", "world"); + log!(lvl, "hello {}", "world",); + + log!(target: "my_target", lvl, "hello {}", "world"); + log!(target: "my_target", lvl, "hello {}", "world",); + + log!(lvl, "hello {}", "world"); + log!(lvl, "hello {}", "world",); + } + + all_log_macros!("hello {}", "world"); + all_log_macros!("hello {}", "world",); + + all_log_macros!(target: "my_target", "hello {}", "world"); + all_log_macros!(target: "my_target", "hello {}", "world",); + + let logger = Logger; + + all_log_macros!(logger: logger, "hello {}", "world"); + all_log_macros!(logger: logger, "hello {}", "world",); + + all_log_macros!(logger: logger, target: "my_target", "hello {}", "world"); + all_log_macros!(logger: logger, target: "my_target", "hello {}", "world",); +} + +#[test] +fn named_args() { + for lvl in log::Level::iter() { + log!(lvl, "hello {world}", world = "world"); + log!(lvl, "hello {world}", world = "world",); + + log!(target: "my_target", lvl, "hello {world}", world = "world"); + log!(target: "my_target", lvl, "hello {world}", world = "world",); + + log!(lvl, "hello {world}", world = "world"); + log!(lvl, "hello {world}", world = "world",); + } + + all_log_macros!("hello {world}", world = "world"); + all_log_macros!("hello {world}", world = "world",); + + all_log_macros!(target: "my_target", "hello {world}", world = "world"); + all_log_macros!(target: "my_target", "hello {world}", world = "world",); + + let logger = Logger; + + all_log_macros!(logger: logger, "hello {world}", world = "world"); + all_log_macros!(logger: logger, "hello {world}", world = "world",); + + all_log_macros!(logger: logger, target: "my_target", "hello {world}", world = "world"); + all_log_macros!(logger: logger, target: "my_target", "hello {world}", world = "world",); +} + +#[test] +fn inlined_args() { + let world = "world"; + + for lvl in log::Level::iter() { + log!(lvl, "hello {world}"); + log!(lvl, "hello {world}",); + + log!(target: "my_target", lvl, "hello {world}"); + log!(target: "my_target", lvl, "hello {world}",); + + log!(lvl, "hello {world}"); + log!(lvl, "hello {world}",); + } + + all_log_macros!("hello {world}"); + all_log_macros!("hello {world}",); + + all_log_macros!(target: "my_target", "hello {world}"); + all_log_macros!(target: "my_target", "hello {world}",); + + let logger = Logger; + + all_log_macros!(logger: logger, "hello {world}"); + all_log_macros!(logger: logger, "hello {world}",); + + all_log_macros!(logger: logger, target: "my_target", "hello {world}"); + all_log_macros!(logger: logger, target: "my_target", "hello {world}",); +} + +#[test] +fn enabled() { + let logger = Logger; + + for lvl in log::Level::iter() { + let _enabled = log_enabled!(lvl); + let _enabled = log_enabled!(target: "my_target", lvl); + let _enabled = log_enabled!(logger: logger, target: "my_target", lvl); + let _enabled = log_enabled!(logger: logger, lvl); + } +} + +#[test] +fn expr() { + let logger = Logger; + + for lvl in log::Level::iter() { + log!(lvl, "hello"); + + log!(logger: logger, lvl, "hello"); + } +} + +#[test] +#[cfg(feature = "kv")] +fn kv_no_args() { + let logger = Logger; + + for lvl in log::Level::iter() { + log!(target: "my_target", lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); + log!(lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); + + log!(logger: logger, target: "my_target", lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); + log!(logger: logger, lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); + } + + all_log_macros!(target: "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); + all_log_macros!(target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); + all_log_macros!(cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); + + all_log_macros!(logger: logger, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); + all_log_macros!(logger: logger, target: "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello"); +} + +#[test] +#[cfg(feature = "kv")] +fn kv_expr_args() { + let logger = Logger; + + for lvl in log::Level::iter() { + log!(target: "my_target", lvl, cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); + + log!(lvl, target = "my_target", cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); + log!(lvl, cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); + + log!(logger: logger, target: "my_target", lvl, cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); + + log!(logger: logger, lvl, target = "my_target", cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); + log!(logger: logger, lvl, cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); + } + + all_log_macros!(target: "my_target", cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); + all_log_macros!(target = "my_target", cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); + all_log_macros!(cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); + + all_log_macros!(logger: logger, target: "my_target", cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); + all_log_macros!(logger: logger, target = "my_target", cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); + all_log_macros!(logger: logger, cat_math = { let mut x = 0; x += 1; x + 1 }; "hello"); +} + +#[test] +#[cfg(feature = "kv")] +fn kv_anonymous_args() { + let logger = Logger; + + for lvl in log::Level::iter() { + log!(target: "my_target", lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); + log!(lvl, target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); + + log!(lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); + + log!(logger: logger, target: "my_target", lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); + log!(logger: logger, lvl, target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); + + log!(logger: logger, lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); + } + + all_log_macros!(target: "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); + all_log_macros!(target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); + all_log_macros!(cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); + + all_log_macros!(logger: logger, target: "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); + all_log_macros!(logger: logger, target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); + all_log_macros!(logger: logger, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {}", "world"); +} + +#[test] +#[cfg(feature = "kv")] +fn kv_named_args() { + let logger = Logger; + + for lvl in log::Level::iter() { + log!(target: "my_target", lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); + log!(lvl, target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); + + log!(lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); + + log!(logger: logger, target: "my_target", lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); + log!(logger: logger, lvl, target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); + + log!(logger: logger, lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); + } + + all_log_macros!(target: "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); + all_log_macros!(target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); + all_log_macros!(cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); + + all_log_macros!(logger: logger, target: "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); + all_log_macros!(logger: logger, target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); + all_log_macros!(logger: logger, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}", world = "world"); +} + +#[test] +#[cfg(feature = "kv")] +fn kv_ident() { + let cat_1 = "chashu"; + let cat_2 = "nori"; + + all_log_macros!(cat_1, cat_2:%, cat_count = 2; "hello {world}", world = "world"); +} + +#[test] +#[cfg(feature = "kv")] +fn kv_expr_context() { + match "chashu" { + cat_1 => { + log::info!(target: "target", cat_1 = cat_1, cat_2 = "nori"; "hello {}", "cats"); + } + }; +} + +#[test] +fn implicit_named_args() { + let world = "world"; + + for lvl in log::Level::iter() { + log!(lvl, "hello {world}"); + log!(lvl, "hello {world}",); + + log!(target: "my_target", lvl, "hello {world}"); + log!(target: "my_target", lvl, "hello {world}",); + + log!(lvl, "hello {world}"); + log!(lvl, "hello {world}",); + } + + all_log_macros!("hello {world}"); + all_log_macros!("hello {world}",); + + all_log_macros!(target: "my_target", "hello {world}"); + all_log_macros!(target: "my_target", "hello {world}",); + + #[cfg(feature = "kv")] + all_log_macros!(target = "my_target"; "hello {world}"); + #[cfg(feature = "kv")] + all_log_macros!(target = "my_target"; "hello {world}",); +} + +#[test] +#[cfg(feature = "kv")] +fn kv_implicit_named_args() { + let world = "world"; + + for lvl in log::Level::iter() { + log!(target: "my_target", lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}"); + + log!(lvl, cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}"); + } + + all_log_macros!(target: "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}"); + all_log_macros!(target = "my_target", cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}"); + all_log_macros!(cat_1 = "chashu", cat_2 = "nori", cat_count = 2; "hello {world}"); +} + +#[test] +#[cfg(feature = "kv")] +fn kv_string_keys() { + for lvl in log::Level::iter() { + log!(target: "my_target", lvl, "also dogs" = "Fílos", "key/that-can't/be/an/ident" = "hi"; "hello {world}", world = "world"); + } + + all_log_macros!(target: "my_target", "also dogs" = "Fílos", "key/that-can't/be/an/ident" = "hi"; "hello {world}", world = "world"); +} + +#[test] +#[cfg(feature = "kv")] +fn kv_common_value_types() { + all_log_macros!( + u8 = 42u8, + u16 = 42u16, + u32 = 42u32, + u64 = 42u64, + u128 = 42u128, + i8 = -42i8, + i16 = -42i16, + i32 = -42i32, + i64 = -42i64, + i128 = -42i128, + f32 = 4.2f32, + f64 = -4.2f64, + bool = true, + str = "string"; + "hello world" + ); +} + +#[test] +#[cfg(feature = "kv")] +fn kv_debug() { + all_log_macros!( + a:? = 42, + b:debug = 42; + "hello world" + ); +} + +#[test] +#[cfg(feature = "kv")] +fn kv_display() { + all_log_macros!( + a:% = 42, + b:display = 42; + "hello world" + ); +} + +#[test] +#[cfg(feature = "kv_std")] +fn kv_error() { + all_log_macros!( + a:err = std::io::Error::new(std::io::ErrorKind::Other, "an error"); + "hello world" + ); +} + +#[test] +#[cfg(feature = "kv_sval")] +fn kv_sval() { + all_log_macros!( + a:sval = 42; + "hello world" + ); +} + +#[test] +#[cfg(feature = "kv_serde")] +fn kv_serde() { + all_log_macros!( + a:serde = 42; + "hello world" + ); +} + +#[test] +fn logger_short_lived() { + all_log_macros!(logger: Logger, "hello"); + all_log_macros!(logger: &Logger, "hello"); +} + +#[test] +fn logger_expr() { + all_log_macros!(logger: { + let logger = Logger; + logger + }, "hello"); +} + +/// Some and None (from Option) are used in the macros. +#[derive(Debug)] +enum Type { + Some, + None, +} + +#[test] +fn regression_issue_494() { + use self::Type::*; + all_log_macros!("some message: {:?}, {:?}", None, Some); +} diff --git a/vendor/log/triagebot.toml b/vendor/log/triagebot.toml new file mode 100644 index 00000000000000..fa0824ac53c0a9 --- /dev/null +++ b/vendor/log/triagebot.toml @@ -0,0 +1 @@ +[assign] diff --git a/vendor/memchr/.cargo-checksum.json b/vendor/memchr/.cargo-checksum.json new file mode 100644 index 00000000000000..2473bfd0ebc44e --- /dev/null +++ b/vendor/memchr/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"32184fbeeef54a11ecc1ee09a2bedd94706767377f431a528f48134f56cd3b6f",".ignore":"ae8b19032d4fc418b99ccae9e7cc3996b1386665d0bd5edc5634a158e7d2f6a2",".vim/coc-settings.json":"cdc5e2b88bddbdbd1b85f21389c4d882720e4c4488ad566c43fccd9124f2e3bf","COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.lock":"839877bbdcf9c1ee00d0b290c76d6adc590a2cc9e374eb2bb9dc494f803c0145","Cargo.toml":"5750ca97e8b2643f2ba1d7e98f54dcf54518c0176899e547876a59eb736198a5","Cargo.toml.orig":"b919c7322ecc6da819546cb677da938730e5df32fdfe9ef5d7c2dc54cc768526","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","README.md":"92a74aaffe011bdaa06fbc34a01686a6eba58ca1322e976759417a547fddf734","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","src/arch/aarch64/memchr.rs":"5bb70f915084e629d940dbc322f5b9096b2e658cf63fea8a2f6e7550412e73a0","src/arch/aarch64/mod.rs":"44cd1a614bd66f1e66fc86c541d3c3b8d3a14a644c13e8bf816df3f555eac2d4","src/arch/aarch64/neon/memchr.rs":"e8c00b8fb2c7e2711832ae3cedefe59f32ebedd7dfa4d0ec6de2a566c979daea","src/arch/aarch64/neon/mod.rs":"eab6d56c2b2354db4ee395f40282cd49f97e2ab853547be5de6e65fbe1b2f634","src/arch/aarch64/neon/packedpair.rs":"32d3e4cd0dd9b6c8382e5308cbd896d20242c90b12862c44a5de6a8b4d6126df","src/arch/all/memchr.rs":"b0b1214aa573ed5d02ae62a77c42c773065566b50274d4096e37817d65ab1594","src/arch/all/mod.rs":"05f3fc2b069682eb1545fc6366d167bb620a454365dac8b8dd6cde6cd64de18a","src/arch/all/packedpair/default_rank.rs":"abffd1b5b8b7a3be95c03dd1105b905c246a379854dc56f1e846ea7c4408f2c7","src/arch/all/packedpair/mod.rs":"292b66042c5b5c78bba33db6526aeae6904db803d601fcdd29032b87b3eb3754","src/arch/all/rabinkarp.rs":"236f69c04b90c14c253ae6c8d9b78150b4a56df75bb50af6d63b15145668b7cc","src/arch/all/shiftor.rs":"0d79117f52a1e4795843603a3bb0b45397df4ad5e4184bbc923658dab9dc3b5f","src/arch/all/twoway.rs":"47c97a265bfbafde90a618946643d3e97dfd9a85f01aa4ac758cd4c1573a450d","src/arch/generic/memchr.rs":"cab4636bf8042c81ca1bcc49fe4214b362100992c0a850859ff445fa6a48f327","src/arch/generic/mod.rs":"1dd75f61e0ea2563b8205a08aaa7b55500130aa331d18b9e9f995724b66c7a39","src/arch/generic/packedpair.rs":"a4a6efb29877ced9cf4c4e5ae9f36a79f019a16b831f2b9424899a1513d458ad","src/arch/mod.rs":"ca3960b7e2ed28d1b3c121710a870430531aad792f64d4dcb4ca4709d6cbda30","src/arch/wasm32/memchr.rs":"d88ac79f891d8530f505f5035062d3da274a05d66c611480c75430d52709d052","src/arch/wasm32/mod.rs":"a20377aa8fe07d68594879101dc73061e4f51d9c8d812b593b1f376e3c8add79","src/arch/wasm32/simd128/memchr.rs":"bac2c4c43fe710c83a6f2b1118fede043be89dd821d4b532907f129f09fdb5cf","src/arch/wasm32/simd128/mod.rs":"c157b373faedbfd65323be432e25bc411d97aa1b7bc58e76048614c7b2bf3bf6","src/arch/wasm32/simd128/packedpair.rs":"288ba6e5eee6a7a8e5e45c64cff1aa5d72d996c2a6bc228be372c75789f08e45","src/arch/x86_64/avx2/memchr.rs":"576ec0c30f49874f7fd9f6caeb490d56132c0fbbaa4d877b1aa532cafce19323","src/arch/x86_64/avx2/mod.rs":"0033d1b712d0b10f0f273ef9aa8caa53e05e49f4c56a64f39af0b9df97eec584","src/arch/x86_64/avx2/packedpair.rs":"87b69cb4301815906127db4f6370f572c7c5d5dad35c0946c00ad888dbcaec8c","src/arch/x86_64/memchr.rs":"7426e27c39a334d500a6803acdfd97ffc05fbf2d70ba8e74492a8ad3f22d20da","src/arch/x86_64/mod.rs":"61b2aa876942fd3e78714c2ae21e356c8634545c06995020f443fa50218df027","src/arch/x86_64/sse2/memchr.rs":"0de0444e26d885eaf866220578752aac871e03bebee7b4f5de7fe8a35f5fa97f","src/arch/x86_64/sse2/mod.rs":"38b70ae52a64ec974dbb91d04d6ca8013d9e06d1fe4af852206bbc2faf1c59aa","src/arch/x86_64/sse2/packedpair.rs":"241ea981d8eea6024769f1c9375f726a9bb9700160c5857781d4befd9f5ef55d","src/cow.rs":"34eddd02cb82cc2d5a2c640891d64efe332dabcc1eea5115764200d8f46b66f7","src/ext.rs":"210f89d1e32211bc64414cbd56e97b4f56ce8a8832d321d77a9fe519634e27ea","src/lib.rs":"614f778a41e88a29ea0ceb8e92c839dbb6b5a61c967f8bfd962975e18f932c71","src/macros.rs":"3e4b39252bfa471fad384160a43f113ebfec7bec46a85d16f006622881dd2081","src/memchr.rs":"6ae779ec5d00f443075316e0105edf30b489a38e2e96325bec14ccecd014145b","src/memmem/mod.rs":"1b0a9d6a681fd0887c677c4fc8d4c8f9719ddde250bdd5ea545365c1a7fb9094","src/memmem/searcher.rs":"7763472d43c66df596ca0697c07db0b4666d38a6a14f64f9f298aaf756c4a715","src/tests/memchr/mod.rs":"269f8e4b4f7f5ea458f27a3c174eb1020ffb2484eeba9464170beb51747df69b","src/tests/memchr/naive.rs":"6a0bee033e5edfb5b1d5769a5fa1c78388f7e9ff7bb91cb67f0ad029289e00e7","src/tests/memchr/prop.rs":"1854eea2338c405fe4635aac430f51e10d2069cd37a7489ddaff47da95f8720b","src/tests/mod.rs":"7cec8f809e279310a465c6a7725087970f219a676cc76c83de30c695bb490740","src/tests/packedpair.rs":"b02ec4fbb61a8653cb5f2268c31bc9168b8043347f2abdcc74081acf83b98e15","src/tests/substring/mod.rs":"c7660d10749363ac4687e7da2b5fda60768230425df8ba416c0c28b8d56a5c74","src/tests/substring/naive.rs":"df6f55d165382b8a53762ba4c324926cac13ebc62cde1805f4ce08740b326483","src/tests/substring/prop.rs":"38c15992609b5681a95d838ae6f2933e00a1219f2c971bfba245f96e0729fcdc","src/vector.rs":"e787c4ed2f499802e90910f7aedc7ca41acea39c8ef416b19d6d572c1a540422"},"package":"f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273"} \ No newline at end of file diff --git a/vendor/memchr/.cargo_vcs_info.json b/vendor/memchr/.cargo_vcs_info.json new file mode 100644 index 00000000000000..97ecb131fc6c45 --- /dev/null +++ b/vendor/memchr/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "9ba486e4ba7e865c0510305c5dacba73988d9f31" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/memchr/.ignore b/vendor/memchr/.ignore new file mode 100644 index 00000000000000..47ec4742e04a74 --- /dev/null +++ b/vendor/memchr/.ignore @@ -0,0 +1 @@ +!.github diff --git a/vendor/memchr/.vim/coc-settings.json b/vendor/memchr/.vim/coc-settings.json new file mode 100644 index 00000000000000..38f35ced55fc24 --- /dev/null +++ b/vendor/memchr/.vim/coc-settings.json @@ -0,0 +1,16 @@ +{ + "rust-analyzer.cargo.allFeatures": false, + "rust-analyzer.linkedProjects": [ + "benchmarks/engines/libc/Cargo.toml", + "benchmarks/engines/rust-bytecount/Cargo.toml", + "benchmarks/engines/rust-jetscii/Cargo.toml", + "benchmarks/engines/rust-memchr/Cargo.toml", + "benchmarks/engines/rust-memchrold/Cargo.toml", + "benchmarks/engines/rust-sliceslice/Cargo.toml", + "benchmarks/engines/rust-std/Cargo.toml", + "benchmarks/engines/stringzilla/Cargo.toml", + "benchmarks/shared/Cargo.toml", + "fuzz/Cargo.toml", + "Cargo.toml" + ] +} diff --git a/vendor/memchr/COPYING b/vendor/memchr/COPYING new file mode 100644 index 00000000000000..bb9c20a094e41b --- /dev/null +++ b/vendor/memchr/COPYING @@ -0,0 +1,3 @@ +This project is dual-licensed under the Unlicense and MIT licenses. + +You may use this code under the terms of either license. diff --git a/vendor/memchr/Cargo.lock b/vendor/memchr/Cargo.lock new file mode 100644 index 00000000000000..55c1cd9105dfee --- /dev/null +++ b/vendor/memchr/Cargo.lock @@ -0,0 +1,80 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "getrandom" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "libc" +version = "0.2.153" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" + +[[package]] +name = "log" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" + +[[package]] +name = "memchr" +version = "2.7.6" +dependencies = [ + "log", + "quickcheck", + "rustc-std-workspace-core", +] + +[[package]] +name = "quickcheck" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" +dependencies = [ + "rand", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rustc-std-workspace-core" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1956f5517128a2b6f23ab2dadf1a976f4f5b27962e7724c2bf3d45e539ec098c" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" diff --git a/vendor/memchr/Cargo.toml b/vendor/memchr/Cargo.toml new file mode 100644 index 00000000000000..bd76618cb11fa3 --- /dev/null +++ b/vendor/memchr/Cargo.toml @@ -0,0 +1,89 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.61" +name = "memchr" +version = "2.7.6" +authors = [ + "Andrew Gallant ", + "bluss", +] +build = false +exclude = [ + "/.github", + "/benchmarks", + "/fuzz", + "/scripts", + "/tmp", +] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +Provides extremely fast (uses SIMD on x86_64, aarch64 and wasm32) routines for +1, 2 or 3 byte search and single substring search. +""" +homepage = "https://github.com/BurntSushi/memchr" +documentation = "https://docs.rs/memchr/" +readme = "README.md" +keywords = [ + "memchr", + "memmem", + "substring", + "find", + "search", +] +license = "Unlicense OR MIT" +repository = "https://github.com/BurntSushi/memchr" + +[package.metadata.docs.rs] +rustdoc-args = ["--generate-link-to-definition"] + +[features] +alloc = [] +default = ["std"] +libc = [] +logging = ["dep:log"] +rustc-dep-of-std = ["core"] +std = ["alloc"] +use_std = ["std"] + +[lib] +name = "memchr" +path = "src/lib.rs" +bench = false + +[dependencies.core] +version = "1.0.0" +optional = true +package = "rustc-std-workspace-core" + +[dependencies.log] +version = "0.4.20" +optional = true + +[dev-dependencies.quickcheck] +version = "1.0.3" +default-features = false + +[profile.bench] +debug = 2 + +[profile.release] +debug = 2 + +[profile.test] +opt-level = 3 +debug = 2 diff --git a/vendor/memchr/LICENSE-MIT b/vendor/memchr/LICENSE-MIT new file mode 100644 index 00000000000000..3b0a5dc09c1e16 --- /dev/null +++ b/vendor/memchr/LICENSE-MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andrew Gallant + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/memchr/README.md b/vendor/memchr/README.md new file mode 100644 index 00000000000000..db00ebbc935648 --- /dev/null +++ b/vendor/memchr/README.md @@ -0,0 +1,196 @@ +memchr +====== +This library provides heavily optimized routines for string search primitives. + +[![Build status](https://github.com/BurntSushi/memchr/workflows/ci/badge.svg)](https://github.com/BurntSushi/memchr/actions) +[![Crates.io](https://img.shields.io/crates/v/memchr.svg)](https://crates.io/crates/memchr) + +Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/). + + +### Documentation + +[https://docs.rs/memchr](https://docs.rs/memchr) + + +### Overview + +* The top-level module provides routines for searching for 1, 2 or 3 bytes + in the forward or reverse direction. When searching for more than one byte, + positions are considered a match if the byte at that position matches any + of the bytes. +* The `memmem` sub-module provides forward and reverse substring search + routines. + +In all such cases, routines operate on `&[u8]` without regard to encoding. This +is exactly what you want when searching either UTF-8 or arbitrary bytes. + +### Compiling without the standard library + +memchr links to the standard library by default, but you can disable the +`std` feature if you want to use it in a `#![no_std]` crate: + +```toml +[dependencies] +memchr = { version = "2", default-features = false } +``` + +On `x86_64` platforms, when the `std` feature is disabled, the SSE2 accelerated +implementations will be used. When `std` is enabled, AVX2 accelerated +implementations will be used if the CPU is determined to support it at runtime. + +SIMD accelerated routines are also available on the `wasm32` and `aarch64` +targets. The `std` feature is not required to use them. + +When a SIMD version is not available, then this crate falls back to +[SWAR](https://en.wikipedia.org/wiki/SWAR) techniques. + +### Minimum Rust version policy + +This crate's minimum supported `rustc` version is `1.61.0`. + +The current policy is that the minimum Rust version required to use this crate +can be increased in minor version updates. For example, if `crate 1.0` requires +Rust 1.20.0, then `crate 1.0.z` for all values of `z` will also require Rust +1.20.0 or newer. However, `crate 1.y` for `y > 0` may require a newer minimum +version of Rust. + +In general, this crate will be conservative with respect to the minimum +supported version of Rust. + + +### Testing strategy + +Given the complexity of the code in this crate, along with the pervasive use +of `unsafe`, this crate has an extensive testing strategy. It combines multiple +approaches: + +* Hand-written tests. +* Exhaustive-style testing meant to exercise all possible branching and offset + calculations. +* Property based testing through [`quickcheck`](https://github.com/BurntSushi/quickcheck). +* Fuzz testing through [`cargo fuzz`](https://github.com/rust-fuzz/cargo-fuzz). +* A huge suite of benchmarks that are also run as tests. Benchmarks always + confirm that the expected result occurs. + +Improvements to the testing infrastructure are very welcome. + + +### Algorithms used + +At time of writing, this crate's implementation of substring search actually +has a few different algorithms to choose from depending on the situation. + +* For very small haystacks, + [Rabin-Karp](https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm) + is used to reduce latency. Rabin-Karp has very small overhead and can often + complete before other searchers have even been constructed. +* For small needles, a variant of the + ["Generic SIMD"](http://0x80.pl/articles/simd-strfind.html#algorithm-1-generic-simd) + algorithm is used. Instead of using the first and last bytes, a heuristic is + used to select bytes based on a background distribution of byte frequencies. +* In all other cases, + [Two-Way](https://en.wikipedia.org/wiki/Two-way_string-matching_algorithm) + is used. If possible, a prefilter based on the "Generic SIMD" algorithm + linked above is used to find candidates quickly. A dynamic heuristic is used + to detect if the prefilter is ineffective, and if so, disables it. + + +### Why is the standard library's substring search so much slower? + +We'll start by establishing what the difference in performance actually +is. There are two relevant benchmark classes to consider: `prebuilt` and +`oneshot`. The `prebuilt` benchmarks are designed to measure---to the extent +possible---search time only. That is, the benchmark first starts by building a +searcher and then only tracking the time for _using_ the searcher: + +``` +$ rebar rank benchmarks/record/x86_64/2023-08-26.csv --intersection -e memchr/memmem/prebuilt -e std/memmem/prebuilt +Engine Version Geometric mean of speed ratios Benchmark count +------ ------- ------------------------------ --------------- +rust/memchr/memmem/prebuilt 2.5.0 1.03 53 +rust/std/memmem/prebuilt 1.73.0-nightly 180dffba1 6.50 53 +``` + +Conversely, the `oneshot` benchmark class measures the time it takes to both +build the searcher _and_ use it: + +``` +$ rebar rank benchmarks/record/x86_64/2023-08-26.csv --intersection -e memchr/memmem/oneshot -e std/memmem/oneshot +Engine Version Geometric mean of speed ratios Benchmark count +------ ------- ------------------------------ --------------- +rust/memchr/memmem/oneshot 2.5.0 1.04 53 +rust/std/memmem/oneshot 1.73.0-nightly 180dffba1 5.26 53 +``` + +**NOTE:** Replace `rebar rank` with `rebar cmp` in the above commands to +explore the specific benchmarks and their differences. + +So in both cases, this crate is quite a bit faster over a broad sampling of +benchmarks regardless of whether you measure only search time or search time +plus construction time. The difference is a little smaller when you include +construction time in your measurements. + +These two different types of benchmark classes make for a nice segue into +one reason why the standard library's substring search can be slower: API +design. In the standard library, the only APIs available to you require +one to re-construct the searcher for every search. While you can benefit +from building a searcher once and iterating over all matches in a single +string, you cannot reuse that searcher to search other strings. This might +come up when, for example, searching a file one line at a time. You'll need +to re-build the searcher for every line searched, and this can [really +matter][burntsushi-bstr-blog]. + +**NOTE:** The `prebuilt` benchmark for the standard library can't actually +avoid measuring searcher construction at some level, because there is no API +for it. Instead, the benchmark consists of building the searcher once and then +finding all matches in a single string via an iterator. This tends to +approximate a benchmark where searcher construction isn't measured, but it +isn't perfect. While this means the comparison is not strictly +apples-to-apples, it does reflect what is maximally possible with the standard +library, and thus reflects the best that one could do in a real world scenario. + +While there is more to the story than just API design here, it's important to +point out that even if the standard library's substring search were a precise +clone of this crate internally, it would still be at a disadvantage in some +workloads because of its API. (The same also applies to C's standard library +`memmem` function. There is no way to amortize construction of the searcher. +You need to pay for it on every call.) + +The other reason for the difference in performance is that +the standard library has trouble using SIMD. In particular, substring search +is implemented in the `core` library, where platform specific code generally +can't exist. That's an issue because in order to utilize SIMD beyond SSE2 +while maintaining portable binaries, one needs to use [dynamic CPU feature +detection][dynamic-cpu], and that in turn requires platform specific code. +While there is [an RFC for enabling target feature detection in +`core`][core-feature], it doesn't yet exist. + +The bottom line here is that `core`'s substring search implementation is +limited to making use of SSE2, but not AVX. + +Still though, this crate does accelerate substring search even when only SSE2 +is available. The standard library could therefore adopt the techniques in this +crate just for SSE2. The reason why that hasn't happened yet isn't totally +clear to me. It likely needs a champion to push it through. The standard +library tends to be more conservative in these things. With that said, the +standard library does use some [SSE2 acceleration on `x86-64`][std-sse2] added +in [this PR][std-sse2-pr]. However, at the time of writing, it is only used +for short needles and doesn't use the frequency based heuristics found in this +crate. + +**NOTE:** Another thing worth mentioning is that the standard library's +substring search routine requires that both the needle and haystack have type +`&str`. Unless you can assume that your data is valid UTF-8, building a `&str` +will come with the overhead of UTF-8 validation. This may in turn result in +overall slower searching depending on your workload. In contrast, the `memchr` +crate permits both the needle and the haystack to have type `&[u8]`, where +`&[u8]` can be created from a `&str` with zero cost. Therefore, the substring +search in this crate is strictly more flexible than what the standard library +provides. + +[burntsushi-bstr-blog]: https://blog.burntsushi.net/bstr/#motivation-based-on-performance +[dynamic-cpu]: https://doc.rust-lang.org/std/arch/index.html#dynamic-cpu-feature-detection +[core-feature]: https://github.com/rust-lang/rfcs/pull/3469 +[std-sse2]: https://github.com/rust-lang/rust/blob/bf9229a2e366b4c311f059014a4aa08af16de5d8/library/core/src/str/pattern.rs#L1719-L1857 +[std-sse2-pr]: https://github.com/rust-lang/rust/pull/103779 diff --git a/vendor/memchr/UNLICENSE b/vendor/memchr/UNLICENSE new file mode 100644 index 00000000000000..68a49daad8ff7e --- /dev/null +++ b/vendor/memchr/UNLICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/vendor/memchr/rustfmt.toml b/vendor/memchr/rustfmt.toml new file mode 100644 index 00000000000000..aa37a218b97e5f --- /dev/null +++ b/vendor/memchr/rustfmt.toml @@ -0,0 +1,2 @@ +max_width = 79 +use_small_heuristics = "max" diff --git a/vendor/memchr/src/arch/aarch64/memchr.rs b/vendor/memchr/src/arch/aarch64/memchr.rs new file mode 100644 index 00000000000000..e0053b2a2205b7 --- /dev/null +++ b/vendor/memchr/src/arch/aarch64/memchr.rs @@ -0,0 +1,137 @@ +/*! +Wrapper routines for `memchr` and friends. + +These routines choose the best implementation at compile time. (This is +different from `x86_64` because it is expected that `neon` is almost always +available for `aarch64` targets.) +*/ + +macro_rules! defraw { + ($ty:ident, $find:ident, $start:ident, $end:ident, $($needles:ident),+) => {{ + #[cfg(target_feature = "neon")] + { + use crate::arch::aarch64::neon::memchr::$ty; + + debug!("chose neon for {}", stringify!($ty)); + debug_assert!($ty::is_available()); + // SAFETY: We know that wasm memchr is always available whenever + // code is compiled for `aarch64` with the `neon` target feature + // enabled. + $ty::new_unchecked($($needles),+).$find($start, $end) + } + #[cfg(not(target_feature = "neon"))] + { + use crate::arch::all::memchr::$ty; + + debug!( + "no neon feature available, using fallback for {}", + stringify!($ty), + ); + $ty::new($($needles),+).$find($start, $end) + } + }} +} + +/// memchr, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `One::find_raw`. +#[inline(always)] +pub(crate) unsafe fn memchr_raw( + n1: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + defraw!(One, find_raw, start, end, n1) +} + +/// memrchr, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `One::rfind_raw`. +#[inline(always)] +pub(crate) unsafe fn memrchr_raw( + n1: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + defraw!(One, rfind_raw, start, end, n1) +} + +/// memchr2, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Two::find_raw`. +#[inline(always)] +pub(crate) unsafe fn memchr2_raw( + n1: u8, + n2: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + defraw!(Two, find_raw, start, end, n1, n2) +} + +/// memrchr2, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Two::rfind_raw`. +#[inline(always)] +pub(crate) unsafe fn memrchr2_raw( + n1: u8, + n2: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + defraw!(Two, rfind_raw, start, end, n1, n2) +} + +/// memchr3, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Three::find_raw`. +#[inline(always)] +pub(crate) unsafe fn memchr3_raw( + n1: u8, + n2: u8, + n3: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + defraw!(Three, find_raw, start, end, n1, n2, n3) +} + +/// memrchr3, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Three::rfind_raw`. +#[inline(always)] +pub(crate) unsafe fn memrchr3_raw( + n1: u8, + n2: u8, + n3: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + defraw!(Three, rfind_raw, start, end, n1, n2, n3) +} + +/// Count all matching bytes, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `One::count_raw`. +#[inline(always)] +pub(crate) unsafe fn count_raw( + n1: u8, + start: *const u8, + end: *const u8, +) -> usize { + defraw!(One, count_raw, start, end, n1) +} diff --git a/vendor/memchr/src/arch/aarch64/mod.rs b/vendor/memchr/src/arch/aarch64/mod.rs new file mode 100644 index 00000000000000..7b3291257b9360 --- /dev/null +++ b/vendor/memchr/src/arch/aarch64/mod.rs @@ -0,0 +1,7 @@ +/*! +Vector algorithms for the `aarch64` target. +*/ + +pub mod neon; + +pub(crate) mod memchr; diff --git a/vendor/memchr/src/arch/aarch64/neon/memchr.rs b/vendor/memchr/src/arch/aarch64/neon/memchr.rs new file mode 100644 index 00000000000000..5fcc76237bad13 --- /dev/null +++ b/vendor/memchr/src/arch/aarch64/neon/memchr.rs @@ -0,0 +1,1031 @@ +/*! +This module defines 128-bit vector implementations of `memchr` and friends. + +The main types in this module are [`One`], [`Two`] and [`Three`]. They are for +searching for one, two or three distinct bytes, respectively, in a haystack. +Each type also has corresponding double ended iterators. These searchers are +typically much faster than scalar routines accomplishing the same task. + +The `One` searcher also provides a [`One::count`] routine for efficiently +counting the number of times a single byte occurs in a haystack. This is +useful, for example, for counting the number of lines in a haystack. This +routine exists because it is usually faster, especially with a high match +count, then using [`One::find`] repeatedly. ([`OneIter`] specializes its +`Iterator::count` implementation to use this routine.) + +Only one, two and three bytes are supported because three bytes is about +the point where one sees diminishing returns. Beyond this point and it's +probably (but not necessarily) better to just use a simple `[bool; 256]` array +or similar. However, it depends mightily on the specific work-load and the +expected match frequency. +*/ + +use core::arch::aarch64::uint8x16_t; + +use crate::{arch::generic::memchr as generic, ext::Pointer, vector::Vector}; + +/// Finds all occurrences of a single byte in a haystack. +#[derive(Clone, Copy, Debug)] +pub struct One(generic::One); + +impl One { + /// Create a new searcher that finds occurrences of the needle byte given. + /// + /// This particular searcher is specialized to use neon vector instructions + /// that typically make it quite fast. + /// + /// If neon is unavailable in the current environment, then `None` is + /// returned. + #[inline] + pub fn new(needle: u8) -> Option { + if One::is_available() { + // SAFETY: we check that neon is available above. + unsafe { Some(One::new_unchecked(needle)) } + } else { + None + } + } + + /// Create a new finder specific to neon vectors and routines without + /// checking that neon is available. + /// + /// # Safety + /// + /// Callers must guarantee that it is safe to execute `neon` instructions + /// in the current environment. + /// + /// Note that it is a common misconception that if one compiles for an + /// `x86_64` target, then they therefore automatically have access to neon + /// instructions. While this is almost always the case, it isn't true in + /// 100% of cases. + #[target_feature(enable = "neon")] + #[inline] + pub unsafe fn new_unchecked(needle: u8) -> One { + One(generic::One::new(needle)) + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`One::new`] will return + /// a `Some` value. Similarly, when it is false, it is guaranteed that + /// `One::new` will return a `None` value. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + #[cfg(target_feature = "neon")] + { + true + } + #[cfg(not(target_feature = "neon"))] + { + false + } + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option { + // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Counts all occurrences of this byte in the given haystack. + #[inline] + pub fn count(&self, haystack: &[u8]) -> usize { + // SAFETY: All of our pointers are derived directly from a borrowed + // slice, which is guaranteed to be valid. + unsafe { + let start = haystack.as_ptr(); + let end = start.add(haystack.len()); + self.count_raw(start, end) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < uint8x16_t::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::fwd_byte_by_byte(start, end, |b| { + b == self.0.needle1() + }); + } + // SAFETY: Building a `One` means it's safe to call 'neon' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + self.find_raw_impl(start, end) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < uint8x16_t::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::rev_byte_by_byte(start, end, |b| { + b == self.0.needle1() + }); + } + // SAFETY: Building a `One` means it's safe to call 'neon' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + self.rfind_raw_impl(start, end) + } + + /// Like `count`, but accepts and returns raw pointers. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize { + if start >= end { + return 0; + } + if end.distance(start) < uint8x16_t::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::count_byte_by_byte(start, end, |b| { + b == self.0.needle1() + }); + } + // SAFETY: Building a `One` means it's safe to call 'neon' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + self.count_raw_impl(start, end) + } + + /// Execute a search using neon vectors and routines. + /// + /// # Safety + /// + /// Same as [`One::find_raw`], except the distance between `start` and + /// `end` must be at least the size of a neon vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `One`, which can only be constructed + /// when it is safe to call `neon` routines.) + #[target_feature(enable = "neon")] + #[inline] + unsafe fn find_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.find_raw(start, end) + } + + /// Execute a search using neon vectors and routines. + /// + /// # Safety + /// + /// Same as [`One::rfind_raw`], except the distance between `start` and + /// `end` must be at least the size of a neon vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `One`, which can only be constructed + /// when it is safe to call `neon` routines.) + #[target_feature(enable = "neon")] + #[inline] + unsafe fn rfind_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.rfind_raw(start, end) + } + + /// Execute a count using neon vectors and routines. + /// + /// # Safety + /// + /// Same as [`One::count_raw`], except the distance between `start` and + /// `end` must be at least the size of a neon vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `One`, which can only be constructed + /// when it is safe to call `neon` routines.) + #[target_feature(enable = "neon")] + #[inline] + unsafe fn count_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> usize { + self.0.count_raw(start, end) + } + + /// Returns an iterator over all occurrences of the needle byte in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + #[inline] + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> { + OneIter { searcher: self, it: generic::Iter::new(haystack) } + } +} + +/// An iterator over all occurrences of a single byte in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`One::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`One`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct OneIter<'a, 'h> { + searcher: &'a One, + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for OneIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn count(self) -> usize { + self.it.count(|s, e| { + // SAFETY: We rely on our generic iterator to return valid start + // and end pointers. + unsafe { self.searcher.count_raw(s, e) } + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +impl<'a, 'h> core::iter::FusedIterator for OneIter<'a, 'h> {} + +/// Finds all occurrences of two bytes in a haystack. +/// +/// That is, this reports matches of one of two possible bytes. For example, +/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, +/// `4` and `5`. +#[derive(Clone, Copy, Debug)] +pub struct Two(generic::Two); + +impl Two { + /// Create a new searcher that finds occurrences of the needle bytes given. + /// + /// This particular searcher is specialized to use neon vector instructions + /// that typically make it quite fast. + /// + /// If neon is unavailable in the current environment, then `None` is + /// returned. + #[inline] + pub fn new(needle1: u8, needle2: u8) -> Option { + if Two::is_available() { + // SAFETY: we check that neon is available above. + unsafe { Some(Two::new_unchecked(needle1, needle2)) } + } else { + None + } + } + + /// Create a new finder specific to neon vectors and routines without + /// checking that neon is available. + /// + /// # Safety + /// + /// Callers must guarantee that it is safe to execute `neon` instructions + /// in the current environment. + /// + /// Note that it is a common misconception that if one compiles for an + /// `x86_64` target, then they therefore automatically have access to neon + /// instructions. While this is almost always the case, it isn't true in + /// 100% of cases. + #[target_feature(enable = "neon")] + #[inline] + pub unsafe fn new_unchecked(needle1: u8, needle2: u8) -> Two { + Two(generic::Two::new(needle1, needle2)) + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`Two::new`] will return + /// a `Some` value. Similarly, when it is false, it is guaranteed that + /// `Two::new` will return a `None` value. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + #[cfg(target_feature = "neon")] + { + true + } + #[cfg(not(target_feature = "neon"))] + { + false + } + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option { + // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < uint8x16_t::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::fwd_byte_by_byte(start, end, |b| { + b == self.0.needle1() || b == self.0.needle2() + }); + } + // SAFETY: Building a `Two` means it's safe to call 'neon' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + self.find_raw_impl(start, end) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < uint8x16_t::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::rev_byte_by_byte(start, end, |b| { + b == self.0.needle1() || b == self.0.needle2() + }); + } + // SAFETY: Building a `Two` means it's safe to call 'neon' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + self.rfind_raw_impl(start, end) + } + + /// Execute a search using neon vectors and routines. + /// + /// # Safety + /// + /// Same as [`Two::find_raw`], except the distance between `start` and + /// `end` must be at least the size of a neon vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Two`, which can only be constructed + /// when it is safe to call `neon` routines.) + #[target_feature(enable = "neon")] + #[inline] + unsafe fn find_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.find_raw(start, end) + } + + /// Execute a search using neon vectors and routines. + /// + /// # Safety + /// + /// Same as [`Two::rfind_raw`], except the distance between `start` and + /// `end` must be at least the size of a neon vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Two`, which can only be constructed + /// when it is safe to call `neon` routines.) + #[target_feature(enable = "neon")] + #[inline] + unsafe fn rfind_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.rfind_raw(start, end) + } + + /// Returns an iterator over all occurrences of the needle bytes in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + #[inline] + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> { + TwoIter { searcher: self, it: generic::Iter::new(haystack) } + } +} + +/// An iterator over all occurrences of two possible bytes in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`Two::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`Two`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct TwoIter<'a, 'h> { + searcher: &'a Two, + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for TwoIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +impl<'a, 'h> core::iter::FusedIterator for TwoIter<'a, 'h> {} + +/// Finds all occurrences of three bytes in a haystack. +/// +/// That is, this reports matches of one of three possible bytes. For example, +/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets +/// `0`, `2`, `3`, `4` and `5`. +#[derive(Clone, Copy, Debug)] +pub struct Three(generic::Three); + +impl Three { + /// Create a new searcher that finds occurrences of the needle bytes given. + /// + /// This particular searcher is specialized to use neon vector instructions + /// that typically make it quite fast. + /// + /// If neon is unavailable in the current environment, then `None` is + /// returned. + #[inline] + pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Option { + if Three::is_available() { + // SAFETY: we check that neon is available above. + unsafe { Some(Three::new_unchecked(needle1, needle2, needle3)) } + } else { + None + } + } + + /// Create a new finder specific to neon vectors and routines without + /// checking that neon is available. + /// + /// # Safety + /// + /// Callers must guarantee that it is safe to execute `neon` instructions + /// in the current environment. + /// + /// Note that it is a common misconception that if one compiles for an + /// `x86_64` target, then they therefore automatically have access to neon + /// instructions. While this is almost always the case, it isn't true in + /// 100% of cases. + #[target_feature(enable = "neon")] + #[inline] + pub unsafe fn new_unchecked( + needle1: u8, + needle2: u8, + needle3: u8, + ) -> Three { + Three(generic::Three::new(needle1, needle2, needle3)) + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`Three::new`] will return + /// a `Some` value. Similarly, when it is false, it is guaranteed that + /// `Three::new` will return a `None` value. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + #[cfg(target_feature = "neon")] + { + true + } + #[cfg(not(target_feature = "neon"))] + { + false + } + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option { + // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < uint8x16_t::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::fwd_byte_by_byte(start, end, |b| { + b == self.0.needle1() + || b == self.0.needle2() + || b == self.0.needle3() + }); + } + // SAFETY: Building a `Three` means it's safe to call 'neon' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + self.find_raw_impl(start, end) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < uint8x16_t::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::rev_byte_by_byte(start, end, |b| { + b == self.0.needle1() + || b == self.0.needle2() + || b == self.0.needle3() + }); + } + // SAFETY: Building a `Three` means it's safe to call 'neon' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + self.rfind_raw_impl(start, end) + } + + /// Execute a search using neon vectors and routines. + /// + /// # Safety + /// + /// Same as [`Three::find_raw`], except the distance between `start` and + /// `end` must be at least the size of a neon vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Three`, which can only be constructed + /// when it is safe to call `neon` routines.) + #[target_feature(enable = "neon")] + #[inline] + unsafe fn find_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.find_raw(start, end) + } + + /// Execute a search using neon vectors and routines. + /// + /// # Safety + /// + /// Same as [`Three::rfind_raw`], except the distance between `start` and + /// `end` must be at least the size of a neon vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Three`, which can only be constructed + /// when it is safe to call `neon` routines.) + #[target_feature(enable = "neon")] + #[inline] + unsafe fn rfind_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.rfind_raw(start, end) + } + + /// Returns an iterator over all occurrences of the needle byte in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + #[inline] + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> { + ThreeIter { searcher: self, it: generic::Iter::new(haystack) } + } +} + +/// An iterator over all occurrences of three possible bytes in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`Three::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`Three`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct ThreeIter<'a, 'h> { + searcher: &'a Three, + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for ThreeIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +impl<'a, 'h> core::iter::FusedIterator for ThreeIter<'a, 'h> {} + +#[cfg(test)] +mod tests { + use super::*; + + define_memchr_quickcheck!(super); + + #[test] + fn forward_one() { + crate::tests::memchr::Runner::new(1).forward_iter( + |haystack, needles| { + Some(One::new(needles[0])?.iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_one() { + crate::tests::memchr::Runner::new(1).reverse_iter( + |haystack, needles| { + Some(One::new(needles[0])?.iter(haystack).rev().collect()) + }, + ) + } + + #[test] + fn count_one() { + crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| { + Some(One::new(needles[0])?.iter(haystack).count()) + }) + } + + #[test] + fn forward_two() { + crate::tests::memchr::Runner::new(2).forward_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(Two::new(n1, n2)?.iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_two() { + crate::tests::memchr::Runner::new(2).reverse_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(Two::new(n1, n2)?.iter(haystack).rev().collect()) + }, + ) + } + + #[test] + fn forward_three() { + crate::tests::memchr::Runner::new(3).forward_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(Three::new(n1, n2, n3)?.iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_three() { + crate::tests::memchr::Runner::new(3).reverse_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(Three::new(n1, n2, n3)?.iter(haystack).rev().collect()) + }, + ) + } +} diff --git a/vendor/memchr/src/arch/aarch64/neon/mod.rs b/vendor/memchr/src/arch/aarch64/neon/mod.rs new file mode 100644 index 00000000000000..ccf9cf81f4bf47 --- /dev/null +++ b/vendor/memchr/src/arch/aarch64/neon/mod.rs @@ -0,0 +1,6 @@ +/*! +Algorithms for the `aarch64` target using 128-bit vectors via NEON. +*/ + +pub mod memchr; +pub mod packedpair; diff --git a/vendor/memchr/src/arch/aarch64/neon/packedpair.rs b/vendor/memchr/src/arch/aarch64/neon/packedpair.rs new file mode 100644 index 00000000000000..5cc2a029697b88 --- /dev/null +++ b/vendor/memchr/src/arch/aarch64/neon/packedpair.rs @@ -0,0 +1,236 @@ +/*! +A 128-bit vector implementation of the "packed pair" SIMD algorithm. + +The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main +difference is that it (by default) uses a background distribution of byte +frequencies to heuristically select the pair of bytes to search for. + +[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last +*/ + +use core::arch::aarch64::uint8x16_t; + +use crate::arch::{all::packedpair::Pair, generic::packedpair}; + +/// A "packed pair" finder that uses 128-bit vector operations. +/// +/// This finder picks two bytes that it believes have high predictive power +/// for indicating an overall match of a needle. Depending on whether +/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets +/// where the needle matches or could match. In the prefilter case, candidates +/// are reported whenever the [`Pair`] of bytes given matches. +#[derive(Clone, Copy, Debug)] +pub struct Finder(packedpair::Finder); + +/// A "packed pair" finder that uses 128-bit vector operations. +/// +/// This finder picks two bytes that it believes have high predictive power +/// for indicating an overall match of a needle. Depending on whether +/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets +/// where the needle matches or could match. In the prefilter case, candidates +/// are reported whenever the [`Pair`] of bytes given matches. +impl Finder { + /// Create a new pair searcher. The searcher returned can either report + /// exact matches of `needle` or act as a prefilter and report candidate + /// positions of `needle`. + /// + /// If neon is unavailable in the current environment or if a [`Pair`] + /// could not be constructed from the needle given, then `None` is + /// returned. + #[inline] + pub fn new(needle: &[u8]) -> Option { + Finder::with_pair(needle, Pair::new(needle)?) + } + + /// Create a new "packed pair" finder using the pair of bytes given. + /// + /// This constructor permits callers to control precisely which pair of + /// bytes is used as a predicate. + /// + /// If neon is unavailable in the current environment, then `None` is + /// returned. + #[inline] + pub fn with_pair(needle: &[u8], pair: Pair) -> Option { + if Finder::is_available() { + // SAFETY: we check that NEON is available above. We are also + // guaranteed to have needle.len() > 1 because we have a valid + // Pair. + unsafe { Some(Finder::with_pair_impl(needle, pair)) } + } else { + None + } + } + + /// Create a new `Finder` specific to neon vectors and routines. + /// + /// # Safety + /// + /// Same as the safety for `packedpair::Finder::new`, and callers must also + /// ensure that neon is available. + #[target_feature(enable = "neon")] + #[inline] + unsafe fn with_pair_impl(needle: &[u8], pair: Pair) -> Finder { + let finder = packedpair::Finder::::new(needle, pair); + Finder(finder) + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`Finder::with_pair`] will + /// return a `Some` value. Similarly, when it is false, it is guaranteed + /// that `Finder::with_pair` will return a `None` value. Notice that this + /// does not guarantee that [`Finder::new`] will return a `Finder`. Namely, + /// even when `Finder::is_available` is true, it is not guaranteed that a + /// valid [`Pair`] can be found from the needle given. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + #[cfg(target_feature = "neon")] + { + true + } + #[cfg(not(target_feature = "neon"))] + { + false + } + } + + /// Execute a search using neon vectors and routines. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + #[inline] + pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option { + // SAFETY: Building a `Finder` means it's safe to call 'neon' routines. + unsafe { self.find_impl(haystack, needle) } + } + + /// Execute a search using neon vectors and routines. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + #[inline] + pub fn find_prefilter(&self, haystack: &[u8]) -> Option { + // SAFETY: Building a `Finder` means it's safe to call 'neon' routines. + unsafe { self.find_prefilter_impl(haystack) } + } + + /// Execute a search using neon vectors and routines. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + /// + /// # Safety + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Finder`, which can only be constructed + /// when it is safe to call `neon` routines.) + #[target_feature(enable = "neon")] + #[inline] + unsafe fn find_impl( + &self, + haystack: &[u8], + needle: &[u8], + ) -> Option { + self.0.find(haystack, needle) + } + + /// Execute a prefilter search using neon vectors and routines. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + /// + /// # Safety + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Finder`, which can only be constructed + /// when it is safe to call `neon` routines.) + #[target_feature(enable = "neon")] + #[inline] + unsafe fn find_prefilter_impl(&self, haystack: &[u8]) -> Option { + self.0.find_prefilter(haystack) + } + + /// Returns the pair of offsets (into the needle) used to check as a + /// predicate before confirming whether a needle exists at a particular + /// position. + #[inline] + pub fn pair(&self) -> &Pair { + self.0.pair() + } + + /// Returns the minimum haystack length that this `Finder` can search. + /// + /// Using a haystack with length smaller than this in a search will result + /// in a panic. The reason for this restriction is that this finder is + /// meant to be a low-level component that is part of a larger substring + /// strategy. In that sense, it avoids trying to handle all cases and + /// instead only handles the cases that it can handle very well. + #[inline] + pub fn min_haystack_len(&self) -> usize { + self.0.min_haystack_len() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn find(haystack: &[u8], needle: &[u8]) -> Option> { + let f = Finder::new(needle)?; + if haystack.len() < f.min_haystack_len() { + return None; + } + Some(f.find(haystack, needle)) + } + + define_substring_forward_quickcheck!(find); + + #[test] + fn forward_substring() { + crate::tests::substring::Runner::new().fwd(find).run() + } + + #[test] + fn forward_packedpair() { + fn find( + haystack: &[u8], + needle: &[u8], + index1: u8, + index2: u8, + ) -> Option> { + let pair = Pair::with_indices(needle, index1, index2)?; + let f = Finder::with_pair(needle, pair)?; + if haystack.len() < f.min_haystack_len() { + return None; + } + Some(f.find(haystack, needle)) + } + crate::tests::packedpair::Runner::new().fwd(find).run() + } + + #[test] + fn forward_packedpair_prefilter() { + fn find( + haystack: &[u8], + needle: &[u8], + index1: u8, + index2: u8, + ) -> Option> { + let pair = Pair::with_indices(needle, index1, index2)?; + let f = Finder::with_pair(needle, pair)?; + if haystack.len() < f.min_haystack_len() { + return None; + } + Some(f.find_prefilter(haystack)) + } + crate::tests::packedpair::Runner::new().fwd(find).run() + } +} diff --git a/vendor/memchr/src/arch/all/memchr.rs b/vendor/memchr/src/arch/all/memchr.rs new file mode 100644 index 00000000000000..7f327f86f45732 --- /dev/null +++ b/vendor/memchr/src/arch/all/memchr.rs @@ -0,0 +1,1022 @@ +/*! +Provides architecture independent implementations of `memchr` and friends. + +The main types in this module are [`One`], [`Two`] and [`Three`]. They are for +searching for one, two or three distinct bytes, respectively, in a haystack. +Each type also has corresponding double ended iterators. These searchers +are typically slower than hand-coded vector routines accomplishing the same +task, but are also typically faster than naive scalar code. These routines +effectively work by treating a `usize` as a vector of 8-bit lanes, and thus +achieves some level of data parallelism even without explicit vector support. + +The `One` searcher also provides a [`One::count`] routine for efficiently +counting the number of times a single byte occurs in a haystack. This is +useful, for example, for counting the number of lines in a haystack. This +routine exists because it is usually faster, especially with a high match +count, than using [`One::find`] repeatedly. ([`OneIter`] specializes its +`Iterator::count` implementation to use this routine.) + +Only one, two and three bytes are supported because three bytes is about +the point where one sees diminishing returns. Beyond this point and it's +probably (but not necessarily) better to just use a simple `[bool; 256]` array +or similar. However, it depends mightily on the specific work-load and the +expected match frequency. +*/ + +use crate::{arch::generic::memchr as generic, ext::Pointer}; + +/// The number of bytes in a single `usize` value. +const USIZE_BYTES: usize = (usize::BITS / 8) as usize; +/// The bits that must be zero for a `*const usize` to be properly aligned. +const USIZE_ALIGN: usize = USIZE_BYTES - 1; + +/// Finds all occurrences of a single byte in a haystack. +#[derive(Clone, Copy, Debug)] +pub struct One { + s1: u8, + v1: usize, +} + +impl One { + /// The number of bytes we examine per each iteration of our search loop. + const LOOP_BYTES: usize = 2 * USIZE_BYTES; + + /// Create a new searcher that finds occurrences of the byte given. + #[inline] + pub fn new(needle: u8) -> One { + One { s1: needle, v1: splat(needle) } + } + + /// A test-only routine so that we can bundle a bunch of quickcheck + /// properties into a single macro. Basically, this provides a constructor + /// that makes it identical to most other memchr implementations, which + /// have fallible constructors. + #[cfg(test)] + pub(crate) fn try_new(needle: u8) -> Option { + Some(One::new(needle)) + } + + /// Return the first occurrence of the needle in the given haystack. If no + /// such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value for a non-empty haystack is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of the needle in the given haystack. If no + /// such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value for a non-empty haystack is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Counts all occurrences of this byte in the given haystack. + #[inline] + pub fn count(&self, haystack: &[u8]) -> usize { + // SAFETY: All of our pointers are derived directly from a borrowed + // slice, which is guaranteed to be valid. + unsafe { + let start = haystack.as_ptr(); + let end = start.add(haystack.len()); + self.count_raw(start, end) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let confirm = |b| self.confirm(b); + let len = end.distance(start); + if len < USIZE_BYTES { + return generic::fwd_byte_by_byte(start, end, confirm); + } + + // The start of the search may not be aligned to `*const usize`, + // so we do an unaligned load here. + let chunk = start.cast::().read_unaligned(); + if self.has_needle(chunk) { + return generic::fwd_byte_by_byte(start, end, confirm); + } + + // And now we start our search at a guaranteed aligned position. + // The first iteration of the loop below will overlap with the the + // unaligned chunk above in cases where the search starts at an + // unaligned offset, but that's okay as we're only here if that + // above didn't find a match. + let mut cur = + start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN)); + debug_assert!(cur > start); + if len <= One::LOOP_BYTES { + return generic::fwd_byte_by_byte(cur, end, confirm); + } + debug_assert!(end.sub(One::LOOP_BYTES) >= start); + while cur <= end.sub(One::LOOP_BYTES) { + debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); + + let a = cur.cast::().read(); + let b = cur.add(USIZE_BYTES).cast::().read(); + if self.has_needle(a) || self.has_needle(b) { + break; + } + cur = cur.add(One::LOOP_BYTES); + } + generic::fwd_byte_by_byte(cur, end, confirm) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let confirm = |b| self.confirm(b); + let len = end.distance(start); + if len < USIZE_BYTES { + return generic::rev_byte_by_byte(start, end, confirm); + } + + let chunk = end.sub(USIZE_BYTES).cast::().read_unaligned(); + if self.has_needle(chunk) { + return generic::rev_byte_by_byte(start, end, confirm); + } + + let mut cur = end.sub(end.as_usize() & USIZE_ALIGN); + debug_assert!(start <= cur && cur <= end); + if len <= One::LOOP_BYTES { + return generic::rev_byte_by_byte(start, cur, confirm); + } + while cur >= start.add(One::LOOP_BYTES) { + debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); + + let a = cur.sub(2 * USIZE_BYTES).cast::().read(); + let b = cur.sub(1 * USIZE_BYTES).cast::().read(); + if self.has_needle(a) || self.has_needle(b) { + break; + } + cur = cur.sub(One::LOOP_BYTES); + } + generic::rev_byte_by_byte(start, cur, confirm) + } + + /// Counts all occurrences of this byte in the given haystack represented + /// by raw pointers. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `0` will always be returned. + #[inline] + pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize { + if start >= end { + return 0; + } + // Sadly I couldn't get the SWAR approach to work here, so we just do + // one byte at a time for now. PRs to improve this are welcome. + let mut ptr = start; + let mut count = 0; + while ptr < end { + count += (ptr.read() == self.s1) as usize; + ptr = ptr.offset(1); + } + count + } + + /// Returns an iterator over all occurrences of the needle byte in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> { + OneIter { searcher: self, it: generic::Iter::new(haystack) } + } + + #[inline(always)] + fn has_needle(&self, chunk: usize) -> bool { + has_zero_byte(self.v1 ^ chunk) + } + + #[inline(always)] + fn confirm(&self, haystack_byte: u8) -> bool { + self.s1 == haystack_byte + } +} + +/// An iterator over all occurrences of a single byte in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`One::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`One`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct OneIter<'a, 'h> { + /// The underlying memchr searcher. + searcher: &'a One, + /// Generic iterator implementation. + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for OneIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn count(self) -> usize { + self.it.count(|s, e| { + // SAFETY: We rely on our generic iterator to return valid start + // and end pointers. + unsafe { self.searcher.count_raw(s, e) } + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +/// Finds all occurrences of two bytes in a haystack. +/// +/// That is, this reports matches of one of two possible bytes. For example, +/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, +/// `4` and `5`. +#[derive(Clone, Copy, Debug)] +pub struct Two { + s1: u8, + s2: u8, + v1: usize, + v2: usize, +} + +impl Two { + /// Create a new searcher that finds occurrences of the two needle bytes + /// given. + #[inline] + pub fn new(needle1: u8, needle2: u8) -> Two { + Two { + s1: needle1, + s2: needle2, + v1: splat(needle1), + v2: splat(needle2), + } + } + + /// A test-only routine so that we can bundle a bunch of quickcheck + /// properties into a single macro. Basically, this provides a constructor + /// that makes it identical to most other memchr implementations, which + /// have fallible constructors. + #[cfg(test)] + pub(crate) fn try_new(needle1: u8, needle2: u8) -> Option { + Some(Two::new(needle1, needle2)) + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value for a non-empty haystack is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value for a non-empty haystack is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let confirm = |b| self.confirm(b); + let len = end.distance(start); + if len < USIZE_BYTES { + return generic::fwd_byte_by_byte(start, end, confirm); + } + + // The start of the search may not be aligned to `*const usize`, + // so we do an unaligned load here. + let chunk = start.cast::().read_unaligned(); + if self.has_needle(chunk) { + return generic::fwd_byte_by_byte(start, end, confirm); + } + + // And now we start our search at a guaranteed aligned position. + // The first iteration of the loop below will overlap with the + // unaligned chunk above in cases where the search starts at an + // unaligned offset, but that's okay as we're only here if that + // above didn't find a match. + let mut cur = + start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN)); + debug_assert!(cur > start); + debug_assert!(end.sub(USIZE_BYTES) >= start); + while cur <= end.sub(USIZE_BYTES) { + debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); + + let chunk = cur.cast::().read(); + if self.has_needle(chunk) { + break; + } + cur = cur.add(USIZE_BYTES); + } + generic::fwd_byte_by_byte(cur, end, confirm) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let confirm = |b| self.confirm(b); + let len = end.distance(start); + if len < USIZE_BYTES { + return generic::rev_byte_by_byte(start, end, confirm); + } + + let chunk = end.sub(USIZE_BYTES).cast::().read_unaligned(); + if self.has_needle(chunk) { + return generic::rev_byte_by_byte(start, end, confirm); + } + + let mut cur = end.sub(end.as_usize() & USIZE_ALIGN); + debug_assert!(start <= cur && cur <= end); + while cur >= start.add(USIZE_BYTES) { + debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); + + let chunk = cur.sub(USIZE_BYTES).cast::().read(); + if self.has_needle(chunk) { + break; + } + cur = cur.sub(USIZE_BYTES); + } + generic::rev_byte_by_byte(start, cur, confirm) + } + + /// Returns an iterator over all occurrences of one of the needle bytes in + /// the given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> { + TwoIter { searcher: self, it: generic::Iter::new(haystack) } + } + + #[inline(always)] + fn has_needle(&self, chunk: usize) -> bool { + has_zero_byte(self.v1 ^ chunk) || has_zero_byte(self.v2 ^ chunk) + } + + #[inline(always)] + fn confirm(&self, haystack_byte: u8) -> bool { + self.s1 == haystack_byte || self.s2 == haystack_byte + } +} + +/// An iterator over all occurrences of two possible bytes in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`Two::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`Two`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct TwoIter<'a, 'h> { + /// The underlying memchr searcher. + searcher: &'a Two, + /// Generic iterator implementation. + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for TwoIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +/// Finds all occurrences of three bytes in a haystack. +/// +/// That is, this reports matches of one of three possible bytes. For example, +/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets +/// `0`, `2`, `3`, `4` and `5`. +#[derive(Clone, Copy, Debug)] +pub struct Three { + s1: u8, + s2: u8, + s3: u8, + v1: usize, + v2: usize, + v3: usize, +} + +impl Three { + /// Create a new searcher that finds occurrences of the three needle bytes + /// given. + #[inline] + pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Three { + Three { + s1: needle1, + s2: needle2, + s3: needle3, + v1: splat(needle1), + v2: splat(needle2), + v3: splat(needle3), + } + } + + /// A test-only routine so that we can bundle a bunch of quickcheck + /// properties into a single macro. Basically, this provides a constructor + /// that makes it identical to most other memchr implementations, which + /// have fallible constructors. + #[cfg(test)] + pub(crate) fn try_new( + needle1: u8, + needle2: u8, + needle3: u8, + ) -> Option { + Some(Three::new(needle1, needle2, needle3)) + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value for a non-empty haystack is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value for a non-empty haystack is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let confirm = |b| self.confirm(b); + let len = end.distance(start); + if len < USIZE_BYTES { + return generic::fwd_byte_by_byte(start, end, confirm); + } + + // The start of the search may not be aligned to `*const usize`, + // so we do an unaligned load here. + let chunk = start.cast::().read_unaligned(); + if self.has_needle(chunk) { + return generic::fwd_byte_by_byte(start, end, confirm); + } + + // And now we start our search at a guaranteed aligned position. + // The first iteration of the loop below will overlap with the + // unaligned chunk above in cases where the search starts at an + // unaligned offset, but that's okay as we're only here if that + // above didn't find a match. + let mut cur = + start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN)); + debug_assert!(cur > start); + debug_assert!(end.sub(USIZE_BYTES) >= start); + while cur <= end.sub(USIZE_BYTES) { + debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); + + let chunk = cur.cast::().read(); + if self.has_needle(chunk) { + break; + } + cur = cur.add(USIZE_BYTES); + } + generic::fwd_byte_by_byte(cur, end, confirm) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let confirm = |b| self.confirm(b); + let len = end.distance(start); + if len < USIZE_BYTES { + return generic::rev_byte_by_byte(start, end, confirm); + } + + let chunk = end.sub(USIZE_BYTES).cast::().read_unaligned(); + if self.has_needle(chunk) { + return generic::rev_byte_by_byte(start, end, confirm); + } + + let mut cur = end.sub(end.as_usize() & USIZE_ALIGN); + debug_assert!(start <= cur && cur <= end); + while cur >= start.add(USIZE_BYTES) { + debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); + + let chunk = cur.sub(USIZE_BYTES).cast::().read(); + if self.has_needle(chunk) { + break; + } + cur = cur.sub(USIZE_BYTES); + } + generic::rev_byte_by_byte(start, cur, confirm) + } + + /// Returns an iterator over all occurrences of one of the needle bytes in + /// the given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> { + ThreeIter { searcher: self, it: generic::Iter::new(haystack) } + } + + #[inline(always)] + fn has_needle(&self, chunk: usize) -> bool { + has_zero_byte(self.v1 ^ chunk) + || has_zero_byte(self.v2 ^ chunk) + || has_zero_byte(self.v3 ^ chunk) + } + + #[inline(always)] + fn confirm(&self, haystack_byte: u8) -> bool { + self.s1 == haystack_byte + || self.s2 == haystack_byte + || self.s3 == haystack_byte + } +} + +/// An iterator over all occurrences of three possible bytes in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`Three::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`Three`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct ThreeIter<'a, 'h> { + /// The underlying memchr searcher. + searcher: &'a Three, + /// Generic iterator implementation. + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for ThreeIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +/// Return `true` if `x` contains any zero byte. +/// +/// That is, this routine treats `x` as a register of 8-bit lanes and returns +/// true when any of those lanes is `0`. +/// +/// From "Matters Computational" by J. Arndt. +#[inline(always)] +fn has_zero_byte(x: usize) -> bool { + // "The idea is to subtract one from each of the bytes and then look for + // bytes where the borrow propagated all the way to the most significant + // bit." + const LO: usize = splat(0x01); + const HI: usize = splat(0x80); + + (x.wrapping_sub(LO) & !x & HI) != 0 +} + +/// Repeat the given byte into a word size number. That is, every 8 bits +/// is equivalent to the given byte. For example, if `b` is `\x4E` or +/// `01001110` in binary, then the returned value on a 32-bit system would be: +/// `01001110_01001110_01001110_01001110`. +#[inline(always)] +const fn splat(b: u8) -> usize { + // TODO: use `usize::from` once it can be used in const context. + (b as usize) * (usize::MAX / 255) +} + +#[cfg(test)] +mod tests { + use super::*; + + define_memchr_quickcheck!(super, try_new); + + #[test] + fn forward_one() { + crate::tests::memchr::Runner::new(1).forward_iter( + |haystack, needles| { + Some(One::new(needles[0]).iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_one() { + crate::tests::memchr::Runner::new(1).reverse_iter( + |haystack, needles| { + Some(One::new(needles[0]).iter(haystack).rev().collect()) + }, + ) + } + + #[test] + fn count_one() { + crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| { + Some(One::new(needles[0]).iter(haystack).count()) + }) + } + + #[test] + fn forward_two() { + crate::tests::memchr::Runner::new(2).forward_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(Two::new(n1, n2).iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_two() { + crate::tests::memchr::Runner::new(2).reverse_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(Two::new(n1, n2).iter(haystack).rev().collect()) + }, + ) + } + + #[test] + fn forward_three() { + crate::tests::memchr::Runner::new(3).forward_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(Three::new(n1, n2, n3).iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_three() { + crate::tests::memchr::Runner::new(3).reverse_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(Three::new(n1, n2, n3).iter(haystack).rev().collect()) + }, + ) + } + + // This was found by quickcheck in the course of refactoring this crate + // after memchr 2.5.0. + #[test] + fn regression_double_ended_iterator() { + let finder = One::new(b'a'); + let haystack = "a"; + let mut it = finder.iter(haystack.as_bytes()); + assert_eq!(Some(0), it.next()); + assert_eq!(None, it.next_back()); + } + + // This regression test was caught by ripgrep's test suite on i686 when + // upgrading to memchr 2.6. Namely, something about the \x0B bytes here + // screws with the SWAR counting approach I was using. This regression test + // prompted me to remove the SWAR counting approach and just replace it + // with a byte-at-a-time loop. + #[test] + fn regression_count_new_lines() { + let haystack = "01234567\x0b\n\x0b\n\x0b\n\x0b\nx"; + let count = One::new(b'\n').count(haystack.as_bytes()); + assert_eq!(4, count); + } + + // A test[1] that failed on some big endian targets after a perf + // improvement was merged[2]. + // + // At first it seemed like the test suite somehow missed the regression, + // but in actuality, CI was not running tests with `cross` but instead with + // `cargo` specifically. This is because those steps were using `cargo` + // instead of `${{ env.CARGO }}`. So adding this regression test doesn't + // really help catch that class of failure, but we add it anyway for good + // measure. + // + // [1]: https://github.com/BurntSushi/memchr/issues/152 + // [2]: https://github.com/BurntSushi/memchr/pull/151 + #[test] + fn regression_big_endian1() { + assert_eq!(One::new(b':').find(b"1:23"), Some(1)); + } + + // Interestingly, I couldn't get `regression_big_endian1` to fail for me + // on the `powerpc64-unknown-linux-gnu` target. But I found another case + // through quickcheck that does. + #[test] + fn regression_big_endian2() { + let data = [0, 0, 0, 0, 0, 0, 0, 0]; + assert_eq!(One::new(b'\x00').find(&data), Some(0)); + } +} diff --git a/vendor/memchr/src/arch/all/mod.rs b/vendor/memchr/src/arch/all/mod.rs new file mode 100644 index 00000000000000..559cb75104d03a --- /dev/null +++ b/vendor/memchr/src/arch/all/mod.rs @@ -0,0 +1,234 @@ +/*! +Contains architecture independent routines. + +These routines are often used as a "fallback" implementation when the more +specialized architecture dependent routines are unavailable. +*/ + +pub mod memchr; +pub mod packedpair; +pub mod rabinkarp; +#[cfg(feature = "alloc")] +pub mod shiftor; +pub mod twoway; + +/// Returns true if and only if `needle` is a prefix of `haystack`. +/// +/// This uses a latency optimized variant of `memcmp` internally which *might* +/// make this faster for very short strings. +/// +/// # Inlining +/// +/// This routine is marked `inline(always)`. If you want to call this function +/// in a way that is not always inlined, you'll need to wrap a call to it in +/// another function that is marked as `inline(never)` or just `inline`. +#[inline(always)] +pub fn is_prefix(haystack: &[u8], needle: &[u8]) -> bool { + needle.len() <= haystack.len() + && is_equal(&haystack[..needle.len()], needle) +} + +/// Returns true if and only if `needle` is a suffix of `haystack`. +/// +/// This uses a latency optimized variant of `memcmp` internally which *might* +/// make this faster for very short strings. +/// +/// # Inlining +/// +/// This routine is marked `inline(always)`. If you want to call this function +/// in a way that is not always inlined, you'll need to wrap a call to it in +/// another function that is marked as `inline(never)` or just `inline`. +#[inline(always)] +pub fn is_suffix(haystack: &[u8], needle: &[u8]) -> bool { + needle.len() <= haystack.len() + && is_equal(&haystack[haystack.len() - needle.len()..], needle) +} + +/// Compare corresponding bytes in `x` and `y` for equality. +/// +/// That is, this returns true if and only if `x.len() == y.len()` and +/// `x[i] == y[i]` for all `0 <= i < x.len()`. +/// +/// # Inlining +/// +/// This routine is marked `inline(always)`. If you want to call this function +/// in a way that is not always inlined, you'll need to wrap a call to it in +/// another function that is marked as `inline(never)` or just `inline`. +/// +/// # Motivation +/// +/// Why not use slice equality instead? Well, slice equality usually results in +/// a call out to the current platform's `libc` which might not be inlineable +/// or have other overhead. This routine isn't guaranteed to be a win, but it +/// might be in some cases. +#[inline(always)] +pub fn is_equal(x: &[u8], y: &[u8]) -> bool { + if x.len() != y.len() { + return false; + } + // SAFETY: Our pointers are derived directly from borrowed slices which + // uphold all of our safety guarantees except for length. We account for + // length with the check above. + unsafe { is_equal_raw(x.as_ptr(), y.as_ptr(), x.len()) } +} + +/// Compare `n` bytes at the given pointers for equality. +/// +/// This returns true if and only if `*x.add(i) == *y.add(i)` for all +/// `0 <= i < n`. +/// +/// # Inlining +/// +/// This routine is marked `inline(always)`. If you want to call this function +/// in a way that is not always inlined, you'll need to wrap a call to it in +/// another function that is marked as `inline(never)` or just `inline`. +/// +/// # Motivation +/// +/// Why not use slice equality instead? Well, slice equality usually results in +/// a call out to the current platform's `libc` which might not be inlineable +/// or have other overhead. This routine isn't guaranteed to be a win, but it +/// might be in some cases. +/// +/// # Safety +/// +/// * Both `x` and `y` must be valid for reads of up to `n` bytes. +/// * Both `x` and `y` must point to an initialized value. +/// * Both `x` and `y` must each point to an allocated object and +/// must either be in bounds or at most one byte past the end of the +/// allocated object. `x` and `y` do not need to point to the same allocated +/// object, but they may. +/// * Both `x` and `y` must be _derived from_ a pointer to their respective +/// allocated objects. +/// * The distance between `x` and `x+n` must not overflow `isize`. Similarly +/// for `y` and `y+n`. +/// * The distance being in bounds must not rely on "wrapping around" the +/// address space. +#[inline(always)] +pub unsafe fn is_equal_raw( + mut x: *const u8, + mut y: *const u8, + mut n: usize, +) -> bool { + // When we have 4 or more bytes to compare, then proceed in chunks of 4 at + // a time using unaligned loads. + // + // Also, why do 4 byte loads instead of, say, 8 byte loads? The reason is + // that this particular version of memcmp is likely to be called with tiny + // needles. That means that if we do 8 byte loads, then a higher proportion + // of memcmp calls will use the slower variant above. With that said, this + // is a hypothesis and is only loosely supported by benchmarks. There's + // likely some improvement that could be made here. The main thing here + // though is to optimize for latency, not throughput. + + // SAFETY: The caller is responsible for ensuring the pointers we get are + // valid and readable for at least `n` bytes. We also do unaligned loads, + // so there's no need to ensure we're aligned. (This is justified by this + // routine being specifically for short strings.) + while n >= 4 { + let vx = x.cast::().read_unaligned(); + let vy = y.cast::().read_unaligned(); + if vx != vy { + return false; + } + x = x.add(4); + y = y.add(4); + n -= 4; + } + // If we don't have enough bytes to do 4-byte at a time loads, then + // do partial loads. Note that I used to have a byte-at-a-time + // loop here and that turned out to be quite a bit slower for the + // memmem/pathological/defeat-simple-vector-alphabet benchmark. + if n >= 2 { + let vx = x.cast::().read_unaligned(); + let vy = y.cast::().read_unaligned(); + if vx != vy { + return false; + } + x = x.add(2); + y = y.add(2); + n -= 2; + } + if n > 0 { + if x.read() != y.read() { + return false; + } + } + true +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn equals_different_lengths() { + assert!(!is_equal(b"", b"a")); + assert!(!is_equal(b"a", b"")); + assert!(!is_equal(b"ab", b"a")); + assert!(!is_equal(b"a", b"ab")); + } + + #[test] + fn equals_mismatch() { + let one_mismatch = [ + (&b"a"[..], &b"x"[..]), + (&b"ab"[..], &b"ax"[..]), + (&b"abc"[..], &b"abx"[..]), + (&b"abcd"[..], &b"abcx"[..]), + (&b"abcde"[..], &b"abcdx"[..]), + (&b"abcdef"[..], &b"abcdex"[..]), + (&b"abcdefg"[..], &b"abcdefx"[..]), + (&b"abcdefgh"[..], &b"abcdefgx"[..]), + (&b"abcdefghi"[..], &b"abcdefghx"[..]), + (&b"abcdefghij"[..], &b"abcdefghix"[..]), + (&b"abcdefghijk"[..], &b"abcdefghijx"[..]), + (&b"abcdefghijkl"[..], &b"abcdefghijkx"[..]), + (&b"abcdefghijklm"[..], &b"abcdefghijklx"[..]), + (&b"abcdefghijklmn"[..], &b"abcdefghijklmx"[..]), + ]; + for (x, y) in one_mismatch { + assert_eq!(x.len(), y.len(), "lengths should match"); + assert!(!is_equal(x, y)); + assert!(!is_equal(y, x)); + } + } + + #[test] + fn equals_yes() { + assert!(is_equal(b"", b"")); + assert!(is_equal(b"a", b"a")); + assert!(is_equal(b"ab", b"ab")); + assert!(is_equal(b"abc", b"abc")); + assert!(is_equal(b"abcd", b"abcd")); + assert!(is_equal(b"abcde", b"abcde")); + assert!(is_equal(b"abcdef", b"abcdef")); + assert!(is_equal(b"abcdefg", b"abcdefg")); + assert!(is_equal(b"abcdefgh", b"abcdefgh")); + assert!(is_equal(b"abcdefghi", b"abcdefghi")); + } + + #[test] + fn prefix() { + assert!(is_prefix(b"", b"")); + assert!(is_prefix(b"a", b"")); + assert!(is_prefix(b"ab", b"")); + assert!(is_prefix(b"foo", b"foo")); + assert!(is_prefix(b"foobar", b"foo")); + + assert!(!is_prefix(b"foo", b"fob")); + assert!(!is_prefix(b"foobar", b"fob")); + } + + #[test] + fn suffix() { + assert!(is_suffix(b"", b"")); + assert!(is_suffix(b"a", b"")); + assert!(is_suffix(b"ab", b"")); + assert!(is_suffix(b"foo", b"foo")); + assert!(is_suffix(b"foobar", b"bar")); + + assert!(!is_suffix(b"foo", b"goo")); + assert!(!is_suffix(b"foobar", b"gar")); + } +} diff --git a/vendor/memchr/src/arch/all/packedpair/default_rank.rs b/vendor/memchr/src/arch/all/packedpair/default_rank.rs new file mode 100644 index 00000000000000..6aa3895e61ef77 --- /dev/null +++ b/vendor/memchr/src/arch/all/packedpair/default_rank.rs @@ -0,0 +1,258 @@ +pub(crate) const RANK: [u8; 256] = [ + 55, // '\x00' + 52, // '\x01' + 51, // '\x02' + 50, // '\x03' + 49, // '\x04' + 48, // '\x05' + 47, // '\x06' + 46, // '\x07' + 45, // '\x08' + 103, // '\t' + 242, // '\n' + 66, // '\x0b' + 67, // '\x0c' + 229, // '\r' + 44, // '\x0e' + 43, // '\x0f' + 42, // '\x10' + 41, // '\x11' + 40, // '\x12' + 39, // '\x13' + 38, // '\x14' + 37, // '\x15' + 36, // '\x16' + 35, // '\x17' + 34, // '\x18' + 33, // '\x19' + 56, // '\x1a' + 32, // '\x1b' + 31, // '\x1c' + 30, // '\x1d' + 29, // '\x1e' + 28, // '\x1f' + 255, // ' ' + 148, // '!' + 164, // '"' + 149, // '#' + 136, // '$' + 160, // '%' + 155, // '&' + 173, // "'" + 221, // '(' + 222, // ')' + 134, // '*' + 122, // '+' + 232, // ',' + 202, // '-' + 215, // '.' + 224, // '/' + 208, // '0' + 220, // '1' + 204, // '2' + 187, // '3' + 183, // '4' + 179, // '5' + 177, // '6' + 168, // '7' + 178, // '8' + 200, // '9' + 226, // ':' + 195, // ';' + 154, // '<' + 184, // '=' + 174, // '>' + 126, // '?' + 120, // '@' + 191, // 'A' + 157, // 'B' + 194, // 'C' + 170, // 'D' + 189, // 'E' + 162, // 'F' + 161, // 'G' + 150, // 'H' + 193, // 'I' + 142, // 'J' + 137, // 'K' + 171, // 'L' + 176, // 'M' + 185, // 'N' + 167, // 'O' + 186, // 'P' + 112, // 'Q' + 175, // 'R' + 192, // 'S' + 188, // 'T' + 156, // 'U' + 140, // 'V' + 143, // 'W' + 123, // 'X' + 133, // 'Y' + 128, // 'Z' + 147, // '[' + 138, // '\\' + 146, // ']' + 114, // '^' + 223, // '_' + 151, // '`' + 249, // 'a' + 216, // 'b' + 238, // 'c' + 236, // 'd' + 253, // 'e' + 227, // 'f' + 218, // 'g' + 230, // 'h' + 247, // 'i' + 135, // 'j' + 180, // 'k' + 241, // 'l' + 233, // 'm' + 246, // 'n' + 244, // 'o' + 231, // 'p' + 139, // 'q' + 245, // 'r' + 243, // 's' + 251, // 't' + 235, // 'u' + 201, // 'v' + 196, // 'w' + 240, // 'x' + 214, // 'y' + 152, // 'z' + 182, // '{' + 205, // '|' + 181, // '}' + 127, // '~' + 27, // '\x7f' + 212, // '\x80' + 211, // '\x81' + 210, // '\x82' + 213, // '\x83' + 228, // '\x84' + 197, // '\x85' + 169, // '\x86' + 159, // '\x87' + 131, // '\x88' + 172, // '\x89' + 105, // '\x8a' + 80, // '\x8b' + 98, // '\x8c' + 96, // '\x8d' + 97, // '\x8e' + 81, // '\x8f' + 207, // '\x90' + 145, // '\x91' + 116, // '\x92' + 115, // '\x93' + 144, // '\x94' + 130, // '\x95' + 153, // '\x96' + 121, // '\x97' + 107, // '\x98' + 132, // '\x99' + 109, // '\x9a' + 110, // '\x9b' + 124, // '\x9c' + 111, // '\x9d' + 82, // '\x9e' + 108, // '\x9f' + 118, // '\xa0' + 141, // '¡' + 113, // '¢' + 129, // '£' + 119, // '¤' + 125, // '¥' + 165, // '¦' + 117, // '§' + 92, // '¨' + 106, // '©' + 83, // 'ª' + 72, // '«' + 99, // '¬' + 93, // '\xad' + 65, // '®' + 79, // '¯' + 166, // '°' + 237, // '±' + 163, // '²' + 199, // '³' + 190, // '´' + 225, // 'µ' + 209, // '¶' + 203, // '·' + 198, // '¸' + 217, // '¹' + 219, // 'º' + 206, // '»' + 234, // '¼' + 248, // '½' + 158, // '¾' + 239, // '¿' + 255, // 'À' + 255, // 'Á' + 255, // 'Â' + 255, // 'Ã' + 255, // 'Ä' + 255, // 'Å' + 255, // 'Æ' + 255, // 'Ç' + 255, // 'È' + 255, // 'É' + 255, // 'Ê' + 255, // 'Ë' + 255, // 'Ì' + 255, // 'Í' + 255, // 'Î' + 255, // 'Ï' + 255, // 'Ð' + 255, // 'Ñ' + 255, // 'Ò' + 255, // 'Ó' + 255, // 'Ô' + 255, // 'Õ' + 255, // 'Ö' + 255, // '×' + 255, // 'Ø' + 255, // 'Ù' + 255, // 'Ú' + 255, // 'Û' + 255, // 'Ü' + 255, // 'Ý' + 255, // 'Þ' + 255, // 'ß' + 255, // 'à' + 255, // 'á' + 255, // 'â' + 255, // 'ã' + 255, // 'ä' + 255, // 'å' + 255, // 'æ' + 255, // 'ç' + 255, // 'è' + 255, // 'é' + 255, // 'ê' + 255, // 'ë' + 255, // 'ì' + 255, // 'í' + 255, // 'î' + 255, // 'ï' + 255, // 'ð' + 255, // 'ñ' + 255, // 'ò' + 255, // 'ó' + 255, // 'ô' + 255, // 'õ' + 255, // 'ö' + 255, // '÷' + 255, // 'ø' + 255, // 'ù' + 255, // 'ú' + 255, // 'û' + 255, // 'ü' + 255, // 'ý' + 255, // 'þ' + 255, // 'ÿ' +]; diff --git a/vendor/memchr/src/arch/all/packedpair/mod.rs b/vendor/memchr/src/arch/all/packedpair/mod.rs new file mode 100644 index 00000000000000..148a985521d817 --- /dev/null +++ b/vendor/memchr/src/arch/all/packedpair/mod.rs @@ -0,0 +1,359 @@ +/*! +Provides an architecture independent implementation of the "packed pair" +algorithm. + +The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main +difference is that it (by default) uses a background distribution of byte +frequencies to heuristically select the pair of bytes to search for. Note that +this module provides an architecture independent version that doesn't do as +good of a job keeping the search for candidates inside a SIMD hot path. It +however can be good enough in many circumstances. + +[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last +*/ + +use crate::memchr; + +mod default_rank; + +/// An architecture independent "packed pair" finder. +/// +/// This finder picks two bytes that it believes have high predictive power for +/// indicating an overall match of a needle. At search time, it reports offsets +/// where the needle could match based on whether the pair of bytes it chose +/// match. +/// +/// This is architecture independent because it utilizes `memchr` to find the +/// occurrence of one of the bytes in the pair, and then checks whether the +/// second byte matches. If it does, in the case of [`Finder::find_prefilter`], +/// the location at which the needle could match is returned. +/// +/// It is generally preferred to use architecture specific routines for a +/// "packed pair" prefilter, but this can be a useful fallback when the +/// architecture independent routines are unavailable. +#[derive(Clone, Copy, Debug)] +pub struct Finder { + pair: Pair, + byte1: u8, + byte2: u8, +} + +impl Finder { + /// Create a new prefilter that reports possible locations where the given + /// needle matches. + #[inline] + pub fn new(needle: &[u8]) -> Option { + Finder::with_pair(needle, Pair::new(needle)?) + } + + /// Create a new prefilter using the pair given. + /// + /// If the prefilter could not be constructed, then `None` is returned. + /// + /// This constructor permits callers to control precisely which pair of + /// bytes is used as a predicate. + #[inline] + pub fn with_pair(needle: &[u8], pair: Pair) -> Option { + let byte1 = needle[usize::from(pair.index1())]; + let byte2 = needle[usize::from(pair.index2())]; + // Currently this can never fail so we could just return a Finder, + // but it's conceivable this could change. + Some(Finder { pair, byte1, byte2 }) + } + + /// Run this finder on the given haystack as a prefilter. + /// + /// If a candidate match is found, then an offset where the needle *could* + /// begin in the haystack is returned. + #[inline] + pub fn find_prefilter(&self, haystack: &[u8]) -> Option { + let mut i = 0; + let index1 = usize::from(self.pair.index1()); + let index2 = usize::from(self.pair.index2()); + loop { + // Use a fast vectorized implementation to skip to the next + // occurrence of the rarest byte (heuristically chosen) in the + // needle. + i += memchr(self.byte1, &haystack[i..])?; + let found = i; + i += 1; + + // If we can't align our first byte match with the haystack, then a + // match is impossible. + let aligned1 = match found.checked_sub(index1) { + None => continue, + Some(aligned1) => aligned1, + }; + + // Now align the second byte match with the haystack. A mismatch + // means that a match is impossible. + let aligned2 = match aligned1.checked_add(index2) { + None => continue, + Some(aligned_index2) => aligned_index2, + }; + if haystack.get(aligned2).map_or(true, |&b| b != self.byte2) { + continue; + } + + // We've done what we can. There might be a match here. + return Some(aligned1); + } + } + + /// Returns the pair of offsets (into the needle) used to check as a + /// predicate before confirming whether a needle exists at a particular + /// position. + #[inline] + pub fn pair(&self) -> &Pair { + &self.pair + } +} + +/// A pair of byte offsets into a needle to use as a predicate. +/// +/// This pair is used as a predicate to quickly filter out positions in a +/// haystack in which a needle cannot match. In some cases, this pair can even +/// be used in vector algorithms such that the vector algorithm only switches +/// over to scalar code once this pair has been found. +/// +/// A pair of offsets can be used in both substring search implementations and +/// in prefilters. The former will report matches of a needle in a haystack +/// where as the latter will only report possible matches of a needle. +/// +/// The offsets are limited each to a maximum of 255 to keep memory usage low. +/// Moreover, it's rarely advantageous to create a predicate using offsets +/// greater than 255 anyway. +/// +/// The only guarantee enforced on the pair of offsets is that they are not +/// equivalent. It is not necessarily the case that `index1 < index2` for +/// example. By convention, `index1` corresponds to the byte in the needle +/// that is believed to be most the predictive. Note also that because of the +/// requirement that the indices be both valid for the needle used to build +/// the pair and not equal, it follows that a pair can only be constructed for +/// needles with length at least 2. +#[derive(Clone, Copy, Debug)] +pub struct Pair { + index1: u8, + index2: u8, +} + +impl Pair { + /// Create a new pair of offsets from the given needle. + /// + /// If a pair could not be created (for example, if the needle is too + /// short), then `None` is returned. + /// + /// This chooses the pair in the needle that is believed to be as + /// predictive of an overall match of the needle as possible. + #[inline] + pub fn new(needle: &[u8]) -> Option { + Pair::with_ranker(needle, DefaultFrequencyRank) + } + + /// Create a new pair of offsets from the given needle and ranker. + /// + /// This permits the caller to choose a background frequency distribution + /// with which bytes are selected. The idea is to select a pair of bytes + /// that is believed to strongly predict a match in the haystack. This + /// usually means selecting bytes that occur rarely in a haystack. + /// + /// If a pair could not be created (for example, if the needle is too + /// short), then `None` is returned. + #[inline] + pub fn with_ranker( + needle: &[u8], + ranker: R, + ) -> Option { + if needle.len() <= 1 { + return None; + } + // Find the rarest two bytes. We make them distinct indices by + // construction. (The actual byte value may be the same in degenerate + // cases, but that's OK.) + let (mut rare1, mut index1) = (needle[0], 0); + let (mut rare2, mut index2) = (needle[1], 1); + if ranker.rank(rare2) < ranker.rank(rare1) { + core::mem::swap(&mut rare1, &mut rare2); + core::mem::swap(&mut index1, &mut index2); + } + let max = usize::from(core::u8::MAX); + for (i, &b) in needle.iter().enumerate().take(max).skip(2) { + if ranker.rank(b) < ranker.rank(rare1) { + rare2 = rare1; + index2 = index1; + rare1 = b; + index1 = u8::try_from(i).unwrap(); + } else if b != rare1 && ranker.rank(b) < ranker.rank(rare2) { + rare2 = b; + index2 = u8::try_from(i).unwrap(); + } + } + // While not strictly required for how a Pair is normally used, we + // really don't want these to be equivalent. If they were, it would + // reduce the effectiveness of candidate searching using these rare + // bytes by increasing the rate of false positives. + assert_ne!(index1, index2); + Some(Pair { index1, index2 }) + } + + /// Create a new pair using the offsets given for the needle given. + /// + /// This bypasses any sort of heuristic process for choosing the offsets + /// and permits the caller to choose the offsets themselves. + /// + /// Indices are limited to valid `u8` values so that a `Pair` uses less + /// memory. It is not possible to create a `Pair` with offsets bigger than + /// `u8::MAX`. It's likely that such a thing is not needed, but if it is, + /// it's suggested to build your own bespoke algorithm because you're + /// likely working on a very niche case. (File an issue if this suggestion + /// does not make sense to you.) + /// + /// If a pair could not be created (for example, if the needle is too + /// short), then `None` is returned. + #[inline] + pub fn with_indices( + needle: &[u8], + index1: u8, + index2: u8, + ) -> Option { + // While not strictly required for how a Pair is normally used, we + // really don't want these to be equivalent. If they were, it would + // reduce the effectiveness of candidate searching using these rare + // bytes by increasing the rate of false positives. + if index1 == index2 { + return None; + } + // Similarly, invalid indices means the Pair is invalid too. + if usize::from(index1) >= needle.len() { + return None; + } + if usize::from(index2) >= needle.len() { + return None; + } + Some(Pair { index1, index2 }) + } + + /// Returns the first offset of the pair. + #[inline] + pub fn index1(&self) -> u8 { + self.index1 + } + + /// Returns the second offset of the pair. + #[inline] + pub fn index2(&self) -> u8 { + self.index2 + } +} + +/// This trait allows the user to customize the heuristic used to determine the +/// relative frequency of a given byte in the dataset being searched. +/// +/// The use of this trait can have a dramatic impact on performance depending +/// on the type of data being searched. The details of why are explained in the +/// docs of [`crate::memmem::Prefilter`]. To summarize, the core algorithm uses +/// a prefilter to quickly identify candidate matches that are later verified +/// more slowly. This prefilter is implemented in terms of trying to find +/// `rare` bytes at specific offsets that will occur less frequently in the +/// dataset. While the concept of a `rare` byte is similar for most datasets, +/// there are some specific datasets (like binary executables) that have +/// dramatically different byte distributions. For these datasets customizing +/// the byte frequency heuristic can have a massive impact on performance, and +/// might even need to be done at runtime. +/// +/// The default implementation of `HeuristicFrequencyRank` reads from the +/// static frequency table defined in `src/memmem/byte_frequencies.rs`. This +/// is optimal for most inputs, so if you are unsure of the impact of using a +/// custom `HeuristicFrequencyRank` you should probably just use the default. +/// +/// # Example +/// +/// ``` +/// use memchr::{ +/// arch::all::packedpair::HeuristicFrequencyRank, +/// memmem::FinderBuilder, +/// }; +/// +/// /// A byte-frequency table that is good for scanning binary executables. +/// struct Binary; +/// +/// impl HeuristicFrequencyRank for Binary { +/// fn rank(&self, byte: u8) -> u8 { +/// const TABLE: [u8; 256] = [ +/// 255, 128, 61, 43, 50, 41, 27, 28, 57, 15, 21, 13, 24, 17, 17, +/// 89, 58, 16, 11, 7, 14, 23, 7, 6, 24, 9, 6, 5, 9, 4, 7, 16, +/// 68, 11, 9, 6, 88, 7, 4, 4, 23, 9, 4, 8, 8, 5, 10, 4, 30, 11, +/// 9, 24, 11, 5, 5, 5, 19, 11, 6, 17, 9, 9, 6, 8, +/// 48, 58, 11, 14, 53, 40, 9, 9, 254, 35, 3, 6, 52, 23, 6, 6, 27, +/// 4, 7, 11, 14, 13, 10, 11, 11, 5, 2, 10, 16, 12, 6, 19, +/// 19, 20, 5, 14, 16, 31, 19, 7, 14, 20, 4, 4, 19, 8, 18, 20, 24, +/// 1, 25, 19, 58, 29, 10, 5, 15, 20, 2, 2, 9, 4, 3, 5, +/// 51, 11, 4, 53, 23, 39, 6, 4, 13, 81, 4, 186, 5, 67, 3, 2, 15, +/// 0, 0, 1, 3, 2, 0, 0, 5, 0, 0, 0, 2, 0, 0, 0, +/// 12, 2, 1, 1, 3, 1, 1, 1, 6, 1, 2, 1, 3, 1, 1, 2, 9, 1, 1, 0, +/// 2, 2, 4, 4, 11, 6, 7, 3, 6, 9, 4, 5, +/// 46, 18, 8, 18, 17, 3, 8, 20, 16, 10, 3, 7, 175, 4, 6, 7, 13, +/// 3, 7, 3, 3, 1, 3, 3, 10, 3, 1, 5, 2, 0, 1, 2, +/// 16, 3, 5, 1, 6, 1, 1, 2, 58, 20, 3, 14, 12, 2, 1, 3, 16, 3, 5, +/// 8, 3, 1, 8, 6, 17, 6, 5, 3, 8, 6, 13, 175, +/// ]; +/// TABLE[byte as usize] +/// } +/// } +/// // Create a new finder with the custom heuristic. +/// let finder = FinderBuilder::new() +/// .build_forward_with_ranker(Binary, b"\x00\x00\xdd\xdd"); +/// // Find needle with custom heuristic. +/// assert!(finder.find(b"\x00\x00\x00\xdd\xdd").is_some()); +/// ``` +pub trait HeuristicFrequencyRank { + /// Return the heuristic frequency rank of the given byte. A lower rank + /// means the byte is believed to occur less frequently in the haystack. + /// + /// Some uses of this heuristic may treat arbitrary absolute rank values as + /// significant. For example, an implementation detail in this crate may + /// determine that heuristic prefilters are inappropriate if every byte in + /// the needle has a "high" rank. + fn rank(&self, byte: u8) -> u8; +} + +/// The default byte frequency heuristic that is good for most haystacks. +pub(crate) struct DefaultFrequencyRank; + +impl HeuristicFrequencyRank for DefaultFrequencyRank { + fn rank(&self, byte: u8) -> u8 { + self::default_rank::RANK[usize::from(byte)] + } +} + +/// This permits passing any implementation of `HeuristicFrequencyRank` as a +/// borrowed version of itself. +impl<'a, R> HeuristicFrequencyRank for &'a R +where + R: HeuristicFrequencyRank, +{ + fn rank(&self, byte: u8) -> u8 { + (**self).rank(byte) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn forward_packedpair() { + fn find( + haystack: &[u8], + needle: &[u8], + _index1: u8, + _index2: u8, + ) -> Option> { + // We ignore the index positions requested since it winds up making + // this test too slow overall. + let f = Finder::new(needle)?; + Some(f.find_prefilter(haystack)) + } + crate::tests::packedpair::Runner::new().fwd(find).run() + } +} diff --git a/vendor/memchr/src/arch/all/rabinkarp.rs b/vendor/memchr/src/arch/all/rabinkarp.rs new file mode 100644 index 00000000000000..e0bafbac982950 --- /dev/null +++ b/vendor/memchr/src/arch/all/rabinkarp.rs @@ -0,0 +1,390 @@ +/*! +An implementation of the [Rabin-Karp substring search algorithm][rabinkarp]. + +Rabin-Karp works by creating a hash of the needle provided and then computing +a rolling hash for each needle sized window in the haystack. When the rolling +hash matches the hash of the needle, a byte-wise comparison is done to check +if a match exists. The worst case time complexity of Rabin-Karp is `O(m * +n)` where `m ~ len(needle)` and `n ~ len(haystack)`. Its worst case space +complexity is constant. + +The main utility of Rabin-Karp is that the searcher can be constructed very +quickly with very little memory. This makes it especially useful when searching +for small needles in small haystacks, as it might finish its search before a +beefier algorithm (like Two-Way) even starts. + +[rabinkarp]: https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm +*/ + +/* +(This was the comment I wrote for this module originally when it was not +exposed. The comment still looks useful, but it's a bit in the weeds, so it's +not public itself.) + +This module implements the classical Rabin-Karp substring search algorithm, +with no extra frills. While its use would seem to break our time complexity +guarantee of O(m+n) (RK's time complexity is O(mn)), we are careful to only +ever use RK on a constant subset of haystacks. The main point here is that +RK has good latency properties for small needles/haystacks. It's very quick +to compute a needle hash and zip through the haystack when compared to +initializing Two-Way, for example. And this is especially useful for cases +where the haystack is just too short for vector instructions to do much good. + +The hashing function used here is the same one recommended by ESMAJ. + +Another choice instead of Rabin-Karp would be Shift-Or. But its latency +isn't quite as good since its preprocessing time is a bit more expensive +(both in practice and in theory). However, perhaps Shift-Or has a place +somewhere else for short patterns. I think the main problem is that it +requires space proportional to the alphabet and the needle. If we, for +example, supported needles up to length 16, then the total table size would be +len(alphabet)*size_of::()==512 bytes. Which isn't exactly small, and it's +probably bad to put that on the stack. So ideally, we'd throw it on the heap, +but we'd really like to write as much code without using alloc/std as possible. +But maybe it's worth the special casing. It's a TODO to benchmark. + +Wikipedia has a decent explanation, if a bit heavy on the theory: +https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm + +But ESMAJ provides something a bit more concrete: +http://www-igm.univ-mlv.fr/~lecroq/string/node5.html + +Finally, aho-corasick uses Rabin-Karp for multiple pattern match in some cases: +https://github.com/BurntSushi/aho-corasick/blob/3852632f10587db0ff72ef29e88d58bf305a0946/src/packed/rabinkarp.rs +*/ + +use crate::ext::Pointer; + +/// A forward substring searcher using the Rabin-Karp algorithm. +/// +/// Note that, as a lower level API, a `Finder` does not have access to the +/// needle it was constructed with. For this reason, executing a search +/// with a `Finder` requires passing both the needle and the haystack, +/// where the needle is exactly equivalent to the one given to the `Finder` +/// at construction time. This design was chosen so that callers can have +/// more precise control over where and how many times a needle is stored. +/// For example, in cases where Rabin-Karp is just one of several possible +/// substring search algorithms. +#[derive(Clone, Debug)] +pub struct Finder { + /// The actual hash. + hash: Hash, + /// The factor needed to multiply a byte by in order to subtract it from + /// the hash. It is defined to be 2^(n-1) (using wrapping exponentiation), + /// where n is the length of the needle. This is how we "remove" a byte + /// from the hash once the hash window rolls past it. + hash_2pow: u32, +} + +impl Finder { + /// Create a new Rabin-Karp forward searcher for the given `needle`. + /// + /// The needle may be empty. The empty needle matches at every byte offset. + /// + /// Note that callers must pass the same needle to all search calls using + /// this `Finder`. + #[inline] + pub fn new(needle: &[u8]) -> Finder { + let mut s = Finder { hash: Hash::new(), hash_2pow: 1 }; + let first_byte = match needle.get(0) { + None => return s, + Some(&first_byte) => first_byte, + }; + s.hash.add(first_byte); + for b in needle.iter().copied().skip(1) { + s.hash.add(b); + s.hash_2pow = s.hash_2pow.wrapping_shl(1); + } + s + } + + /// Return the first occurrence of the `needle` in the `haystack` + /// given. If no such occurrence exists, then `None` is returned. + /// + /// The `needle` provided must match the needle given to this finder at + /// construction time. + /// + /// The maximum value this can return is `haystack.len()`, which can only + /// occur when the needle and haystack both have length zero. Otherwise, + /// for non-empty haystacks, the maximum value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option { + unsafe { + let hstart = haystack.as_ptr(); + let hend = hstart.add(haystack.len()); + let nstart = needle.as_ptr(); + let nend = nstart.add(needle.len()); + let found = self.find_raw(hstart, hend, nstart, nend)?; + Some(found.distance(hstart)) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `<= end`. The pointer returned is only ever equivalent + /// to `end` when both the needle and haystack are empty. (That is, the + /// empty string matches the empty string.) + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// Note that `start` and `end` below refer to both pairs of pointers given + /// to this routine. That is, the conditions apply to both `hstart`/`hend` + /// and `nstart`/`nend`. + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// * It must be the case that `start <= end`. + #[inline] + pub unsafe fn find_raw( + &self, + hstart: *const u8, + hend: *const u8, + nstart: *const u8, + nend: *const u8, + ) -> Option<*const u8> { + let hlen = hend.distance(hstart); + let nlen = nend.distance(nstart); + if nlen > hlen { + return None; + } + let mut cur = hstart; + let end = hend.sub(nlen); + let mut hash = Hash::forward(cur, cur.add(nlen)); + loop { + if self.hash == hash && is_equal_raw(cur, nstart, nlen) { + return Some(cur); + } + if cur >= end { + return None; + } + hash.roll(self, cur.read(), cur.add(nlen).read()); + cur = cur.add(1); + } + } +} + +/// A reverse substring searcher using the Rabin-Karp algorithm. +#[derive(Clone, Debug)] +pub struct FinderRev(Finder); + +impl FinderRev { + /// Create a new Rabin-Karp reverse searcher for the given `needle`. + #[inline] + pub fn new(needle: &[u8]) -> FinderRev { + let mut s = FinderRev(Finder { hash: Hash::new(), hash_2pow: 1 }); + let last_byte = match needle.last() { + None => return s, + Some(&last_byte) => last_byte, + }; + s.0.hash.add(last_byte); + for b in needle.iter().rev().copied().skip(1) { + s.0.hash.add(b); + s.0.hash_2pow = s.0.hash_2pow.wrapping_shl(1); + } + s + } + + /// Return the last occurrence of the `needle` in the `haystack` + /// given. If no such occurrence exists, then `None` is returned. + /// + /// The `needle` provided must match the needle given to this finder at + /// construction time. + /// + /// The maximum value this can return is `haystack.len()`, which can only + /// occur when the needle and haystack both have length zero. Otherwise, + /// for non-empty haystacks, the maximum value is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8], needle: &[u8]) -> Option { + unsafe { + let hstart = haystack.as_ptr(); + let hend = hstart.add(haystack.len()); + let nstart = needle.as_ptr(); + let nend = nstart.add(needle.len()); + let found = self.rfind_raw(hstart, hend, nstart, nend)?; + Some(found.distance(hstart)) + } + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `<= end`. The pointer returned is only ever equivalent + /// to `end` when both the needle and haystack are empty. (That is, the + /// empty string matches the empty string.) + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// Note that `start` and `end` below refer to both pairs of pointers given + /// to this routine. That is, the conditions apply to both `hstart`/`hend` + /// and `nstart`/`nend`. + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// * It must be the case that `start <= end`. + #[inline] + pub unsafe fn rfind_raw( + &self, + hstart: *const u8, + hend: *const u8, + nstart: *const u8, + nend: *const u8, + ) -> Option<*const u8> { + let hlen = hend.distance(hstart); + let nlen = nend.distance(nstart); + if nlen > hlen { + return None; + } + let mut cur = hend.sub(nlen); + let start = hstart; + let mut hash = Hash::reverse(cur, cur.add(nlen)); + loop { + if self.0.hash == hash && is_equal_raw(cur, nstart, nlen) { + return Some(cur); + } + if cur <= start { + return None; + } + cur = cur.sub(1); + hash.roll(&self.0, cur.add(nlen).read(), cur.read()); + } + } +} + +/// Whether RK is believed to be very fast for the given needle/haystack. +#[inline] +pub(crate) fn is_fast(haystack: &[u8], _needle: &[u8]) -> bool { + haystack.len() < 16 +} + +/// A Rabin-Karp hash. This might represent the hash of a needle, or the hash +/// of a rolling window in the haystack. +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] +struct Hash(u32); + +impl Hash { + /// Create a new hash that represents the empty string. + #[inline(always)] + fn new() -> Hash { + Hash(0) + } + + /// Create a new hash from the bytes given for use in forward searches. + /// + /// # Safety + /// + /// The given pointers must be valid to read from within their range. + #[inline(always)] + unsafe fn forward(mut start: *const u8, end: *const u8) -> Hash { + let mut hash = Hash::new(); + while start < end { + hash.add(start.read()); + start = start.add(1); + } + hash + } + + /// Create a new hash from the bytes given for use in reverse searches. + /// + /// # Safety + /// + /// The given pointers must be valid to read from within their range. + #[inline(always)] + unsafe fn reverse(start: *const u8, mut end: *const u8) -> Hash { + let mut hash = Hash::new(); + while start < end { + end = end.sub(1); + hash.add(end.read()); + } + hash + } + + /// Add 'new' and remove 'old' from this hash. The given needle hash should + /// correspond to the hash computed for the needle being searched for. + /// + /// This is meant to be used when the rolling window of the haystack is + /// advanced. + #[inline(always)] + fn roll(&mut self, finder: &Finder, old: u8, new: u8) { + self.del(finder, old); + self.add(new); + } + + /// Add a byte to this hash. + #[inline(always)] + fn add(&mut self, byte: u8) { + self.0 = self.0.wrapping_shl(1).wrapping_add(u32::from(byte)); + } + + /// Remove a byte from this hash. The given needle hash should correspond + /// to the hash computed for the needle being searched for. + #[inline(always)] + fn del(&mut self, finder: &Finder, byte: u8) { + let factor = finder.hash_2pow; + self.0 = self.0.wrapping_sub(u32::from(byte).wrapping_mul(factor)); + } +} + +/// Returns true when `x[i] == y[i]` for all `0 <= i < n`. +/// +/// We forcefully don't inline this to hint at the compiler that it is unlikely +/// to be called. This causes the inner rabinkarp loop above to be a bit +/// tighter and leads to some performance improvement. See the +/// memmem/krate/prebuilt/sliceslice-words/words benchmark. +/// +/// # Safety +/// +/// Same as `crate::arch::all::is_equal_raw`. +#[cold] +#[inline(never)] +unsafe fn is_equal_raw(x: *const u8, y: *const u8, n: usize) -> bool { + crate::arch::all::is_equal_raw(x, y, n) +} + +#[cfg(test)] +mod tests { + use super::*; + + define_substring_forward_quickcheck!(|h, n| Some( + Finder::new(n).find(h, n) + )); + define_substring_reverse_quickcheck!(|h, n| Some( + FinderRev::new(n).rfind(h, n) + )); + + #[test] + fn forward() { + crate::tests::substring::Runner::new() + .fwd(|h, n| Some(Finder::new(n).find(h, n))) + .run(); + } + + #[test] + fn reverse() { + crate::tests::substring::Runner::new() + .rev(|h, n| Some(FinderRev::new(n).rfind(h, n))) + .run(); + } +} diff --git a/vendor/memchr/src/arch/all/shiftor.rs b/vendor/memchr/src/arch/all/shiftor.rs new file mode 100644 index 00000000000000..b690564a642e9d --- /dev/null +++ b/vendor/memchr/src/arch/all/shiftor.rs @@ -0,0 +1,89 @@ +/*! +An implementation of the [Shift-Or substring search algorithm][shiftor]. + +[shiftor]: https://en.wikipedia.org/wiki/Bitap_algorithm +*/ + +use alloc::boxed::Box; + +/// The type of our mask. +/// +/// While we don't expose anyway to configure this in the public API, if one +/// really needs less memory usage or support for longer needles, then it is +/// suggested to copy the code from this module and modify it to fit your +/// needs. The code below is written to be correct regardless of whether Mask +/// is a u8, u16, u32, u64 or u128. +type Mask = u16; + +/// A forward substring searcher using the Shift-Or algorithm. +#[derive(Debug)] +pub struct Finder { + masks: Box<[Mask; 256]>, + needle_len: usize, +} + +impl Finder { + const MAX_NEEDLE_LEN: usize = (Mask::BITS - 1) as usize; + + /// Create a new Shift-Or forward searcher for the given `needle`. + /// + /// The needle may be empty. The empty needle matches at every byte offset. + #[inline] + pub fn new(needle: &[u8]) -> Option { + let needle_len = needle.len(); + if needle_len > Finder::MAX_NEEDLE_LEN { + // A match is found when bit 7 is set in 'result' in the search + // routine below. So our needle can't be bigger than 7. We could + // permit bigger needles by using u16, u32 or u64 for our mask + // entries. But this is all we need for this example. + return None; + } + let mut searcher = Finder { masks: Box::from([!0; 256]), needle_len }; + for (i, &byte) in needle.iter().enumerate() { + searcher.masks[usize::from(byte)] &= !(1 << i); + } + Some(searcher) + } + + /// Return the first occurrence of the needle given to `Finder::new` in + /// the `haystack` given. If no such occurrence exists, then `None` is + /// returned. + /// + /// Unlike most other substring search implementations in this crate, this + /// finder does not require passing the needle at search time. A match can + /// be determined without the needle at all since the required information + /// is already encoded into this finder at construction time. + /// + /// The maximum value this can return is `haystack.len()`, which can only + /// occur when the needle and haystack both have length zero. Otherwise, + /// for non-empty haystacks, the maximum value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + if self.needle_len == 0 { + return Some(0); + } + let mut result = !1; + for (i, &byte) in haystack.iter().enumerate() { + result |= self.masks[usize::from(byte)]; + result <<= 1; + if result & (1 << self.needle_len) == 0 { + return Some(i + 1 - self.needle_len); + } + } + None + } +} + +#[cfg(test)] +mod tests { + use super::*; + + define_substring_forward_quickcheck!(|h, n| Some(Finder::new(n)?.find(h))); + + #[test] + fn forward() { + crate::tests::substring::Runner::new() + .fwd(|h, n| Some(Finder::new(n)?.find(h))) + .run(); + } +} diff --git a/vendor/memchr/src/arch/all/twoway.rs b/vendor/memchr/src/arch/all/twoway.rs new file mode 100644 index 00000000000000..0df3b4a86e950c --- /dev/null +++ b/vendor/memchr/src/arch/all/twoway.rs @@ -0,0 +1,877 @@ +/*! +An implementation of the [Two-Way substring search algorithm][two-way]. + +[`Finder`] can be built for forward searches, while [`FinderRev`] can be built +for reverse searches. + +Two-Way makes for a nice general purpose substring search algorithm because of +its time and space complexity properties. It also performs well in practice. +Namely, with `m = len(needle)` and `n = len(haystack)`, Two-Way takes `O(m)` +time to create a finder, `O(1)` space and `O(n)` search time. In other words, +the preprocessing step is quick, doesn't require any heap memory and the worst +case search time is guaranteed to be linear in the haystack regardless of the +size of the needle. + +While vector algorithms will usually beat Two-Way handedly, vector algorithms +also usually have pathological or edge cases that are better handled by Two-Way. +Moreover, not all targets support vector algorithms or implementations for them +simply may not exist yet. + +Two-Way can be found in the `memmem` implementations in at least [GNU libc] and +[musl]. + +[two-way]: https://en.wikipedia.org/wiki/Two-way_string-matching_algorithm +[GNU libc]: https://www.gnu.org/software/libc/ +[musl]: https://www.musl-libc.org/ +*/ + +use core::cmp; + +use crate::{ + arch::all::{is_prefix, is_suffix}, + memmem::Pre, +}; + +/// A forward substring searcher that uses the Two-Way algorithm. +#[derive(Clone, Copy, Debug)] +pub struct Finder(TwoWay); + +/// A reverse substring searcher that uses the Two-Way algorithm. +#[derive(Clone, Copy, Debug)] +pub struct FinderRev(TwoWay); + +/// An implementation of the TwoWay substring search algorithm. +/// +/// This searcher supports forward and reverse search, although not +/// simultaneously. It runs in `O(n + m)` time and `O(1)` space, where +/// `n ~ len(needle)` and `m ~ len(haystack)`. +/// +/// The implementation here roughly matches that which was developed by +/// Crochemore and Perrin in their 1991 paper "Two-way string-matching." The +/// changes in this implementation are 1) the use of zero-based indices, 2) a +/// heuristic skip table based on the last byte (borrowed from Rust's standard +/// library) and 3) the addition of heuristics for a fast skip loop. For (3), +/// callers can pass any kind of prefilter they want, but usually it's one +/// based on a heuristic that uses an approximate background frequency of bytes +/// to choose rare bytes to quickly look for candidate match positions. Note +/// though that currently, this prefilter functionality is not exposed directly +/// in the public API. (File an issue if you want it and provide a use case +/// please.) +/// +/// The heuristic for fast skipping is automatically shut off if it's +/// detected to be ineffective at search time. Generally, this only occurs in +/// pathological cases. But this is generally necessary in order to preserve +/// a `O(n + m)` time bound. +/// +/// The code below is fairly complex and not obviously correct at all. It's +/// likely necessary to read the Two-Way paper cited above in order to fully +/// grok this code. The essence of it is: +/// +/// 1. Do something to detect a "critical" position in the needle. +/// 2. For the current position in the haystack, look if `needle[critical..]` +/// matches at that position. +/// 3. If so, look if `needle[..critical]` matches. +/// 4. If a mismatch occurs, shift the search by some amount based on the +/// critical position and a pre-computed shift. +/// +/// This type is wrapped in the forward and reverse finders that expose +/// consistent forward or reverse APIs. +#[derive(Clone, Copy, Debug)] +struct TwoWay { + /// A small bitset used as a quick prefilter (in addition to any prefilter + /// given by the caller). Namely, a bit `i` is set if and only if `b%64==i` + /// for any `b == needle[i]`. + /// + /// When used as a prefilter, if the last byte at the current candidate + /// position is NOT in this set, then we can skip that entire candidate + /// position (the length of the needle). This is essentially the shift + /// trick found in Boyer-Moore, but only applied to bytes that don't appear + /// in the needle. + /// + /// N.B. This trick was inspired by something similar in std's + /// implementation of Two-Way. + byteset: ApproximateByteSet, + /// A critical position in needle. Specifically, this position corresponds + /// to beginning of either the minimal or maximal suffix in needle. (N.B. + /// See SuffixType below for why "minimal" isn't quite the correct word + /// here.) + /// + /// This is the position at which every search begins. Namely, search + /// starts by scanning text to the right of this position, and only if + /// there's a match does the text to the left of this position get scanned. + critical_pos: usize, + /// The amount we shift by in the Two-Way search algorithm. This + /// corresponds to the "small period" and "large period" cases. + shift: Shift, +} + +impl Finder { + /// Create a searcher that finds occurrences of the given `needle`. + /// + /// An empty `needle` results in a match at every position in a haystack, + /// including at `haystack.len()`. + #[inline] + pub fn new(needle: &[u8]) -> Finder { + let byteset = ApproximateByteSet::new(needle); + let min_suffix = Suffix::forward(needle, SuffixKind::Minimal); + let max_suffix = Suffix::forward(needle, SuffixKind::Maximal); + let (period_lower_bound, critical_pos) = + if min_suffix.pos > max_suffix.pos { + (min_suffix.period, min_suffix.pos) + } else { + (max_suffix.period, max_suffix.pos) + }; + let shift = Shift::forward(needle, period_lower_bound, critical_pos); + Finder(TwoWay { byteset, critical_pos, shift }) + } + + /// Returns the first occurrence of `needle` in the given `haystack`, or + /// `None` if no such occurrence could be found. + /// + /// The `needle` given must be the same as the `needle` provided to + /// [`Finder::new`]. + /// + /// An empty `needle` results in a match at every position in a haystack, + /// including at `haystack.len()`. + #[inline] + pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option { + self.find_with_prefilter(None, haystack, needle) + } + + /// This is like [`Finder::find`], but it accepts a prefilter for + /// accelerating searches. + /// + /// Currently this is not exposed in the public API because, at the time + /// of writing, I didn't want to spend time thinking about how to expose + /// the prefilter infrastructure (if at all). If you have a compelling use + /// case for exposing this routine, please create an issue. Do *not* open + /// a PR that just exposes `Pre` and friends. Exporting this routine will + /// require API design. + #[inline(always)] + pub(crate) fn find_with_prefilter( + &self, + pre: Option>, + haystack: &[u8], + needle: &[u8], + ) -> Option { + match self.0.shift { + Shift::Small { period } => { + self.find_small_imp(pre, haystack, needle, period) + } + Shift::Large { shift } => { + self.find_large_imp(pre, haystack, needle, shift) + } + } + } + + // Each of the two search implementations below can be accelerated by a + // prefilter, but it is not always enabled. To avoid its overhead when + // its disabled, we explicitly inline each search implementation based on + // whether a prefilter will be used or not. The decision on which to use + // is made in the parent meta searcher. + + #[inline(always)] + fn find_small_imp( + &self, + mut pre: Option>, + haystack: &[u8], + needle: &[u8], + period: usize, + ) -> Option { + let mut pos = 0; + let mut shift = 0; + let last_byte_pos = match needle.len().checked_sub(1) { + None => return Some(pos), + Some(last_byte) => last_byte, + }; + while pos + needle.len() <= haystack.len() { + let mut i = cmp::max(self.0.critical_pos, shift); + if let Some(pre) = pre.as_mut() { + if pre.is_effective() { + pos += pre.find(&haystack[pos..])?; + shift = 0; + i = self.0.critical_pos; + if pos + needle.len() > haystack.len() { + return None; + } + } + } + if !self.0.byteset.contains(haystack[pos + last_byte_pos]) { + pos += needle.len(); + shift = 0; + continue; + } + while i < needle.len() && needle[i] == haystack[pos + i] { + i += 1; + } + if i < needle.len() { + pos += i - self.0.critical_pos + 1; + shift = 0; + } else { + let mut j = self.0.critical_pos; + while j > shift && needle[j] == haystack[pos + j] { + j -= 1; + } + if j <= shift && needle[shift] == haystack[pos + shift] { + return Some(pos); + } + pos += period; + shift = needle.len() - period; + } + } + None + } + + #[inline(always)] + fn find_large_imp( + &self, + mut pre: Option>, + haystack: &[u8], + needle: &[u8], + shift: usize, + ) -> Option { + let mut pos = 0; + let last_byte_pos = match needle.len().checked_sub(1) { + None => return Some(pos), + Some(last_byte) => last_byte, + }; + 'outer: while pos + needle.len() <= haystack.len() { + if let Some(pre) = pre.as_mut() { + if pre.is_effective() { + pos += pre.find(&haystack[pos..])?; + if pos + needle.len() > haystack.len() { + return None; + } + } + } + + if !self.0.byteset.contains(haystack[pos + last_byte_pos]) { + pos += needle.len(); + continue; + } + let mut i = self.0.critical_pos; + while i < needle.len() && needle[i] == haystack[pos + i] { + i += 1; + } + if i < needle.len() { + pos += i - self.0.critical_pos + 1; + } else { + for j in (0..self.0.critical_pos).rev() { + if needle[j] != haystack[pos + j] { + pos += shift; + continue 'outer; + } + } + return Some(pos); + } + } + None + } +} + +impl FinderRev { + /// Create a searcher that finds occurrences of the given `needle`. + /// + /// An empty `needle` results in a match at every position in a haystack, + /// including at `haystack.len()`. + #[inline] + pub fn new(needle: &[u8]) -> FinderRev { + let byteset = ApproximateByteSet::new(needle); + let min_suffix = Suffix::reverse(needle, SuffixKind::Minimal); + let max_suffix = Suffix::reverse(needle, SuffixKind::Maximal); + let (period_lower_bound, critical_pos) = + if min_suffix.pos < max_suffix.pos { + (min_suffix.period, min_suffix.pos) + } else { + (max_suffix.period, max_suffix.pos) + }; + let shift = Shift::reverse(needle, period_lower_bound, critical_pos); + FinderRev(TwoWay { byteset, critical_pos, shift }) + } + + /// Returns the last occurrence of `needle` in the given `haystack`, or + /// `None` if no such occurrence could be found. + /// + /// The `needle` given must be the same as the `needle` provided to + /// [`FinderRev::new`]. + /// + /// An empty `needle` results in a match at every position in a haystack, + /// including at `haystack.len()`. + #[inline] + pub fn rfind(&self, haystack: &[u8], needle: &[u8]) -> Option { + // For the reverse case, we don't use a prefilter. It's plausible that + // perhaps we should, but it's a lot of additional code to do it, and + // it's not clear that it's actually worth it. If you have a really + // compelling use case for this, please file an issue. + match self.0.shift { + Shift::Small { period } => { + self.rfind_small_imp(haystack, needle, period) + } + Shift::Large { shift } => { + self.rfind_large_imp(haystack, needle, shift) + } + } + } + + #[inline(always)] + fn rfind_small_imp( + &self, + haystack: &[u8], + needle: &[u8], + period: usize, + ) -> Option { + let nlen = needle.len(); + let mut pos = haystack.len(); + let mut shift = nlen; + let first_byte = match needle.get(0) { + None => return Some(pos), + Some(&first_byte) => first_byte, + }; + while pos >= nlen { + if !self.0.byteset.contains(haystack[pos - nlen]) { + pos -= nlen; + shift = nlen; + continue; + } + let mut i = cmp::min(self.0.critical_pos, shift); + while i > 0 && needle[i - 1] == haystack[pos - nlen + i - 1] { + i -= 1; + } + if i > 0 || first_byte != haystack[pos - nlen] { + pos -= self.0.critical_pos - i + 1; + shift = nlen; + } else { + let mut j = self.0.critical_pos; + while j < shift && needle[j] == haystack[pos - nlen + j] { + j += 1; + } + if j >= shift { + return Some(pos - nlen); + } + pos -= period; + shift = period; + } + } + None + } + + #[inline(always)] + fn rfind_large_imp( + &self, + haystack: &[u8], + needle: &[u8], + shift: usize, + ) -> Option { + let nlen = needle.len(); + let mut pos = haystack.len(); + let first_byte = match needle.get(0) { + None => return Some(pos), + Some(&first_byte) => first_byte, + }; + while pos >= nlen { + if !self.0.byteset.contains(haystack[pos - nlen]) { + pos -= nlen; + continue; + } + let mut i = self.0.critical_pos; + while i > 0 && needle[i - 1] == haystack[pos - nlen + i - 1] { + i -= 1; + } + if i > 0 || first_byte != haystack[pos - nlen] { + pos -= self.0.critical_pos - i + 1; + } else { + let mut j = self.0.critical_pos; + while j < nlen && needle[j] == haystack[pos - nlen + j] { + j += 1; + } + if j == nlen { + return Some(pos - nlen); + } + pos -= shift; + } + } + None + } +} + +/// A representation of the amount we're allowed to shift by during Two-Way +/// search. +/// +/// When computing a critical factorization of the needle, we find the position +/// of the critical factorization by finding the needle's maximal (or minimal) +/// suffix, along with the period of that suffix. It turns out that the period +/// of that suffix is a lower bound on the period of the needle itself. +/// +/// This lower bound is equivalent to the actual period of the needle in +/// some cases. To describe that case, we denote the needle as `x` where +/// `x = uv` and `v` is the lexicographic maximal suffix of `v`. The lower +/// bound given here is always the period of `v`, which is `<= period(x)`. The +/// case where `period(v) == period(x)` occurs when `len(u) < (len(x) / 2)` and +/// where `u` is a suffix of `v[0..period(v)]`. +/// +/// This case is important because the search algorithm for when the +/// periods are equivalent is slightly different than the search algorithm +/// for when the periods are not equivalent. In particular, when they aren't +/// equivalent, we know that the period of the needle is no less than half its +/// length. In this case, we shift by an amount less than or equal to the +/// period of the needle (determined by the maximum length of the components +/// of the critical factorization of `x`, i.e., `max(len(u), len(v))`).. +/// +/// The above two cases are represented by the variants below. Each entails +/// a different instantiation of the Two-Way search algorithm. +/// +/// N.B. If we could find a way to compute the exact period in all cases, +/// then we could collapse this case analysis and simplify the algorithm. The +/// Two-Way paper suggests this is possible, but more reading is required to +/// grok why the authors didn't pursue that path. +#[derive(Clone, Copy, Debug)] +enum Shift { + Small { period: usize }, + Large { shift: usize }, +} + +impl Shift { + /// Compute the shift for a given needle in the forward direction. + /// + /// This requires a lower bound on the period and a critical position. + /// These can be computed by extracting both the minimal and maximal + /// lexicographic suffixes, and choosing the right-most starting position. + /// The lower bound on the period is then the period of the chosen suffix. + fn forward( + needle: &[u8], + period_lower_bound: usize, + critical_pos: usize, + ) -> Shift { + let large = cmp::max(critical_pos, needle.len() - critical_pos); + if critical_pos * 2 >= needle.len() { + return Shift::Large { shift: large }; + } + + let (u, v) = needle.split_at(critical_pos); + if !is_suffix(&v[..period_lower_bound], u) { + return Shift::Large { shift: large }; + } + Shift::Small { period: period_lower_bound } + } + + /// Compute the shift for a given needle in the reverse direction. + /// + /// This requires a lower bound on the period and a critical position. + /// These can be computed by extracting both the minimal and maximal + /// lexicographic suffixes, and choosing the left-most starting position. + /// The lower bound on the period is then the period of the chosen suffix. + fn reverse( + needle: &[u8], + period_lower_bound: usize, + critical_pos: usize, + ) -> Shift { + let large = cmp::max(critical_pos, needle.len() - critical_pos); + if (needle.len() - critical_pos) * 2 >= needle.len() { + return Shift::Large { shift: large }; + } + + let (v, u) = needle.split_at(critical_pos); + if !is_prefix(&v[v.len() - period_lower_bound..], u) { + return Shift::Large { shift: large }; + } + Shift::Small { period: period_lower_bound } + } +} + +/// A suffix extracted from a needle along with its period. +#[derive(Debug)] +struct Suffix { + /// The starting position of this suffix. + /// + /// If this is a forward suffix, then `&bytes[pos..]` can be used. If this + /// is a reverse suffix, then `&bytes[..pos]` can be used. That is, for + /// forward suffixes, this is an inclusive starting position, where as for + /// reverse suffixes, this is an exclusive ending position. + pos: usize, + /// The period of this suffix. + /// + /// Note that this is NOT necessarily the period of the string from which + /// this suffix comes from. (It is always less than or equal to the period + /// of the original string.) + period: usize, +} + +impl Suffix { + fn forward(needle: &[u8], kind: SuffixKind) -> Suffix { + // suffix represents our maximal (or minimal) suffix, along with + // its period. + let mut suffix = Suffix { pos: 0, period: 1 }; + // The start of a suffix in `needle` that we are considering as a + // more maximal (or minimal) suffix than what's in `suffix`. + let mut candidate_start = 1; + // The current offset of our suffixes that we're comparing. + // + // When the characters at this offset are the same, then we mush on + // to the next position since no decision is possible. When the + // candidate's character is greater (or lesser) than the corresponding + // character than our current maximal (or minimal) suffix, then the + // current suffix is changed over to the candidate and we restart our + // search. Otherwise, the candidate suffix is no good and we restart + // our search on the next candidate. + // + // The three cases above correspond to the three cases in the loop + // below. + let mut offset = 0; + + while candidate_start + offset < needle.len() { + let current = needle[suffix.pos + offset]; + let candidate = needle[candidate_start + offset]; + match kind.cmp(current, candidate) { + SuffixOrdering::Accept => { + suffix = Suffix { pos: candidate_start, period: 1 }; + candidate_start += 1; + offset = 0; + } + SuffixOrdering::Skip => { + candidate_start += offset + 1; + offset = 0; + suffix.period = candidate_start - suffix.pos; + } + SuffixOrdering::Push => { + if offset + 1 == suffix.period { + candidate_start += suffix.period; + offset = 0; + } else { + offset += 1; + } + } + } + } + suffix + } + + fn reverse(needle: &[u8], kind: SuffixKind) -> Suffix { + // See the comments in `forward` for how this works. + let mut suffix = Suffix { pos: needle.len(), period: 1 }; + if needle.len() == 1 { + return suffix; + } + let mut candidate_start = match needle.len().checked_sub(1) { + None => return suffix, + Some(candidate_start) => candidate_start, + }; + let mut offset = 0; + + while offset < candidate_start { + let current = needle[suffix.pos - offset - 1]; + let candidate = needle[candidate_start - offset - 1]; + match kind.cmp(current, candidate) { + SuffixOrdering::Accept => { + suffix = Suffix { pos: candidate_start, period: 1 }; + candidate_start -= 1; + offset = 0; + } + SuffixOrdering::Skip => { + candidate_start -= offset + 1; + offset = 0; + suffix.period = suffix.pos - candidate_start; + } + SuffixOrdering::Push => { + if offset + 1 == suffix.period { + candidate_start -= suffix.period; + offset = 0; + } else { + offset += 1; + } + } + } + } + suffix + } +} + +/// The kind of suffix to extract. +#[derive(Clone, Copy, Debug)] +enum SuffixKind { + /// Extract the smallest lexicographic suffix from a string. + /// + /// Technically, this doesn't actually pick the smallest lexicographic + /// suffix. e.g., Given the choice between `a` and `aa`, this will choose + /// the latter over the former, even though `a < aa`. The reasoning for + /// this isn't clear from the paper, but it still smells like a minimal + /// suffix. + Minimal, + /// Extract the largest lexicographic suffix from a string. + /// + /// Unlike `Minimal`, this really does pick the maximum suffix. e.g., Given + /// the choice between `z` and `zz`, this will choose the latter over the + /// former. + Maximal, +} + +/// The result of comparing corresponding bytes between two suffixes. +#[derive(Clone, Copy, Debug)] +enum SuffixOrdering { + /// This occurs when the given candidate byte indicates that the candidate + /// suffix is better than the current maximal (or minimal) suffix. That is, + /// the current candidate suffix should supplant the current maximal (or + /// minimal) suffix. + Accept, + /// This occurs when the given candidate byte excludes the candidate suffix + /// from being better than the current maximal (or minimal) suffix. That + /// is, the current candidate suffix should be dropped and the next one + /// should be considered. + Skip, + /// This occurs when no decision to accept or skip the candidate suffix + /// can be made, e.g., when corresponding bytes are equivalent. In this + /// case, the next corresponding bytes should be compared. + Push, +} + +impl SuffixKind { + /// Returns true if and only if the given candidate byte indicates that + /// it should replace the current suffix as the maximal (or minimal) + /// suffix. + fn cmp(self, current: u8, candidate: u8) -> SuffixOrdering { + use self::SuffixOrdering::*; + + match self { + SuffixKind::Minimal if candidate < current => Accept, + SuffixKind::Minimal if candidate > current => Skip, + SuffixKind::Minimal => Push, + SuffixKind::Maximal if candidate > current => Accept, + SuffixKind::Maximal if candidate < current => Skip, + SuffixKind::Maximal => Push, + } + } +} + +/// A bitset used to track whether a particular byte exists in a needle or not. +/// +/// Namely, bit 'i' is set if and only if byte%64==i for any byte in the +/// needle. If a particular byte in the haystack is NOT in this set, then one +/// can conclude that it is also not in the needle, and thus, one can advance +/// in the haystack by needle.len() bytes. +#[derive(Clone, Copy, Debug)] +struct ApproximateByteSet(u64); + +impl ApproximateByteSet { + /// Create a new set from the given needle. + fn new(needle: &[u8]) -> ApproximateByteSet { + let mut bits = 0; + for &b in needle { + bits |= 1 << (b % 64); + } + ApproximateByteSet(bits) + } + + /// Return true if and only if the given byte might be in this set. This + /// may return a false positive, but will never return a false negative. + #[inline(always)] + fn contains(&self, byte: u8) -> bool { + self.0 & (1 << (byte % 64)) != 0 + } +} + +#[cfg(test)] +mod tests { + use alloc::vec::Vec; + + use super::*; + + /// Convenience wrapper for computing the suffix as a byte string. + fn get_suffix_forward(needle: &[u8], kind: SuffixKind) -> (&[u8], usize) { + let s = Suffix::forward(needle, kind); + (&needle[s.pos..], s.period) + } + + /// Convenience wrapper for computing the reverse suffix as a byte string. + fn get_suffix_reverse(needle: &[u8], kind: SuffixKind) -> (&[u8], usize) { + let s = Suffix::reverse(needle, kind); + (&needle[..s.pos], s.period) + } + + /// Return all of the non-empty suffixes in the given byte string. + fn suffixes(bytes: &[u8]) -> Vec<&[u8]> { + (0..bytes.len()).map(|i| &bytes[i..]).collect() + } + + /// Return the lexicographically maximal suffix of the given byte string. + fn naive_maximal_suffix_forward(needle: &[u8]) -> &[u8] { + let mut sufs = suffixes(needle); + sufs.sort(); + sufs.pop().unwrap() + } + + /// Return the lexicographically maximal suffix of the reverse of the given + /// byte string. + fn naive_maximal_suffix_reverse(needle: &[u8]) -> Vec { + let mut reversed = needle.to_vec(); + reversed.reverse(); + let mut got = naive_maximal_suffix_forward(&reversed).to_vec(); + got.reverse(); + got + } + + define_substring_forward_quickcheck!(|h, n| Some( + Finder::new(n).find(h, n) + )); + define_substring_reverse_quickcheck!(|h, n| Some( + FinderRev::new(n).rfind(h, n) + )); + + #[test] + fn forward() { + crate::tests::substring::Runner::new() + .fwd(|h, n| Some(Finder::new(n).find(h, n))) + .run(); + } + + #[test] + fn reverse() { + crate::tests::substring::Runner::new() + .rev(|h, n| Some(FinderRev::new(n).rfind(h, n))) + .run(); + } + + #[test] + fn suffix_forward() { + macro_rules! assert_suffix_min { + ($given:expr, $expected:expr, $period:expr) => { + let (got_suffix, got_period) = + get_suffix_forward($given.as_bytes(), SuffixKind::Minimal); + let got_suffix = core::str::from_utf8(got_suffix).unwrap(); + assert_eq!(($expected, $period), (got_suffix, got_period)); + }; + } + + macro_rules! assert_suffix_max { + ($given:expr, $expected:expr, $period:expr) => { + let (got_suffix, got_period) = + get_suffix_forward($given.as_bytes(), SuffixKind::Maximal); + let got_suffix = core::str::from_utf8(got_suffix).unwrap(); + assert_eq!(($expected, $period), (got_suffix, got_period)); + }; + } + + assert_suffix_min!("a", "a", 1); + assert_suffix_max!("a", "a", 1); + + assert_suffix_min!("ab", "ab", 2); + assert_suffix_max!("ab", "b", 1); + + assert_suffix_min!("ba", "a", 1); + assert_suffix_max!("ba", "ba", 2); + + assert_suffix_min!("abc", "abc", 3); + assert_suffix_max!("abc", "c", 1); + + assert_suffix_min!("acb", "acb", 3); + assert_suffix_max!("acb", "cb", 2); + + assert_suffix_min!("cba", "a", 1); + assert_suffix_max!("cba", "cba", 3); + + assert_suffix_min!("abcabc", "abcabc", 3); + assert_suffix_max!("abcabc", "cabc", 3); + + assert_suffix_min!("abcabcabc", "abcabcabc", 3); + assert_suffix_max!("abcabcabc", "cabcabc", 3); + + assert_suffix_min!("abczz", "abczz", 5); + assert_suffix_max!("abczz", "zz", 1); + + assert_suffix_min!("zzabc", "abc", 3); + assert_suffix_max!("zzabc", "zzabc", 5); + + assert_suffix_min!("aaa", "aaa", 1); + assert_suffix_max!("aaa", "aaa", 1); + + assert_suffix_min!("foobar", "ar", 2); + assert_suffix_max!("foobar", "r", 1); + } + + #[test] + fn suffix_reverse() { + macro_rules! assert_suffix_min { + ($given:expr, $expected:expr, $period:expr) => { + let (got_suffix, got_period) = + get_suffix_reverse($given.as_bytes(), SuffixKind::Minimal); + let got_suffix = core::str::from_utf8(got_suffix).unwrap(); + assert_eq!(($expected, $period), (got_suffix, got_period)); + }; + } + + macro_rules! assert_suffix_max { + ($given:expr, $expected:expr, $period:expr) => { + let (got_suffix, got_period) = + get_suffix_reverse($given.as_bytes(), SuffixKind::Maximal); + let got_suffix = core::str::from_utf8(got_suffix).unwrap(); + assert_eq!(($expected, $period), (got_suffix, got_period)); + }; + } + + assert_suffix_min!("a", "a", 1); + assert_suffix_max!("a", "a", 1); + + assert_suffix_min!("ab", "a", 1); + assert_suffix_max!("ab", "ab", 2); + + assert_suffix_min!("ba", "ba", 2); + assert_suffix_max!("ba", "b", 1); + + assert_suffix_min!("abc", "a", 1); + assert_suffix_max!("abc", "abc", 3); + + assert_suffix_min!("acb", "a", 1); + assert_suffix_max!("acb", "ac", 2); + + assert_suffix_min!("cba", "cba", 3); + assert_suffix_max!("cba", "c", 1); + + assert_suffix_min!("abcabc", "abca", 3); + assert_suffix_max!("abcabc", "abcabc", 3); + + assert_suffix_min!("abcabcabc", "abcabca", 3); + assert_suffix_max!("abcabcabc", "abcabcabc", 3); + + assert_suffix_min!("abczz", "a", 1); + assert_suffix_max!("abczz", "abczz", 5); + + assert_suffix_min!("zzabc", "zza", 3); + assert_suffix_max!("zzabc", "zz", 1); + + assert_suffix_min!("aaa", "aaa", 1); + assert_suffix_max!("aaa", "aaa", 1); + } + + #[cfg(not(miri))] + quickcheck::quickcheck! { + fn qc_suffix_forward_maximal(bytes: Vec) -> bool { + if bytes.is_empty() { + return true; + } + + let (got, _) = get_suffix_forward(&bytes, SuffixKind::Maximal); + let expected = naive_maximal_suffix_forward(&bytes); + got == expected + } + + fn qc_suffix_reverse_maximal(bytes: Vec) -> bool { + if bytes.is_empty() { + return true; + } + + let (got, _) = get_suffix_reverse(&bytes, SuffixKind::Maximal); + let expected = naive_maximal_suffix_reverse(&bytes); + expected == got + } + } + + // This is a regression test caught by quickcheck that exercised a bug in + // the reverse small period handling. The bug was that we were using 'if j + // == shift' to determine if a match occurred, but the correct guard is 'if + // j >= shift', which matches the corresponding guard in the forward impl. + #[test] + fn regression_rev_small_period() { + let rfind = |h, n| FinderRev::new(n).rfind(h, n); + let haystack = "ababaz"; + let needle = "abab"; + assert_eq!(Some(0), rfind(haystack.as_bytes(), needle.as_bytes())); + } +} diff --git a/vendor/memchr/src/arch/generic/memchr.rs b/vendor/memchr/src/arch/generic/memchr.rs new file mode 100644 index 00000000000000..de61fd81d8b24e --- /dev/null +++ b/vendor/memchr/src/arch/generic/memchr.rs @@ -0,0 +1,1214 @@ +/*! +Generic crate-internal routines for the `memchr` family of functions. +*/ + +// What follows is a vector algorithm generic over the specific vector +// type to detect the position of one, two or three needles in a haystack. +// From what I know, this is a "classic" algorithm, although I don't +// believe it has been published in any peer reviewed journal. I believe +// it can be found in places like glibc and Go's standard library. It +// appears to be well known and is elaborated on in more detail here: +// https://gms.tf/stdfind-and-memchr-optimizations.html +// +// While the routine below is fairly long and perhaps intimidating, the basic +// idea is actually very simple and can be expressed straight-forwardly in +// pseudo code. The pseudo code below is written for 128 bit vectors, but the +// actual code below works for anything that implements the Vector trait. +// +// needle = (n1 << 15) | (n1 << 14) | ... | (n1 << 1) | n1 +// // Note: shift amount is in bytes +// +// while i <= haystack.len() - 16: +// // A 16 byte vector. Each byte in chunk corresponds to a byte in +// // the haystack. +// chunk = haystack[i:i+16] +// // Compare bytes in needle with bytes in chunk. The result is a 16 +// // byte chunk where each byte is 0xFF if the corresponding bytes +// // in needle and chunk were equal, or 0x00 otherwise. +// eqs = cmpeq(needle, chunk) +// // Return a 32 bit integer where the most significant 16 bits +// // are always 0 and the lower 16 bits correspond to whether the +// // most significant bit in the correspond byte in `eqs` is set. +// // In other words, `mask as u16` has bit i set if and only if +// // needle[i] == chunk[i]. +// mask = movemask(eqs) +// +// // Mask is 0 if there is no match, and non-zero otherwise. +// if mask != 0: +// // trailing_zeros tells us the position of the least significant +// // bit that is set. +// return i + trailing_zeros(mask) +// +// // haystack length may not be a multiple of 16, so search the rest. +// while i < haystack.len(): +// if haystack[i] == n1: +// return i +// +// // No match found. +// return NULL +// +// In fact, we could loosely translate the above code to Rust line-for-line +// and it would be a pretty fast algorithm. But, we pull out all the stops +// to go as fast as possible: +// +// 1. We use aligned loads. That is, we do some finagling to make sure our +// primary loop not only proceeds in increments of 16 bytes, but that +// the address of haystack's pointer that we dereference is aligned to +// 16 bytes. 16 is a magic number here because it is the size of SSE2 +// 128-bit vector. (For the AVX2 algorithm, 32 is the magic number.) +// Therefore, to get aligned loads, our pointer's address must be evenly +// divisible by 16. +// 2. Our primary loop proceeds 64 bytes at a time instead of 16. It's +// kind of like loop unrolling, but we combine the equality comparisons +// using a vector OR such that we only need to extract a single mask to +// determine whether a match exists or not. If so, then we do some +// book-keeping to determine the precise location but otherwise mush on. +// 3. We use our "chunk" comparison routine in as many places as possible, +// even if it means using unaligned loads. In particular, if haystack +// starts with an unaligned address, then we do an unaligned load to +// search the first 16 bytes. We then start our primary loop at the +// smallest subsequent aligned address, which will actually overlap with +// previously searched bytes. But we're OK with that. We do a similar +// dance at the end of our primary loop. Finally, to avoid a +// byte-at-a-time loop at the end, we do a final 16 byte unaligned load +// that may overlap with a previous load. This is OK because it converts +// a loop into a small number of very fast vector instructions. The overlap +// is OK because we know the place where the overlap occurs does not +// contain a match. +// +// And that's pretty all there is to it. Note that since the below is +// generic and since it's meant to be inlined into routines with a +// `#[target_feature(enable = "...")]` annotation, we must mark all routines as +// both unsafe and `#[inline(always)]`. +// +// The fact that the code below is generic does somewhat inhibit us. For +// example, I've noticed that introducing an unlineable `#[cold]` function to +// handle the match case in the loop generates tighter assembly, but there is +// no way to do this in the generic code below because the generic code doesn't +// know what `target_feature` annotation to apply to the unlineable function. +// We could make such functions part of the `Vector` trait, but we instead live +// with the slightly sub-optimal codegen for now since it doesn't seem to have +// a noticeable perf difference. + +use crate::{ + ext::Pointer, + vector::{MoveMask, Vector}, +}; + +/// Finds all occurrences of a single byte in a haystack. +#[derive(Clone, Copy, Debug)] +pub(crate) struct One { + s1: u8, + v1: V, +} + +impl One { + /// The number of bytes we examine per each iteration of our search loop. + const LOOP_SIZE: usize = 4 * V::BYTES; + + /// Create a new searcher that finds occurrences of the byte given. + #[inline(always)] + pub(crate) unsafe fn new(needle: u8) -> One { + One { s1: needle, v1: V::splat(needle) } + } + + /// Returns the needle given to `One::new`. + #[inline(always)] + pub(crate) fn needle1(&self) -> u8 { + self.s1 + } + + /// Return a pointer to the first occurrence of the needle in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// # Safety + /// + /// * It must be the case that `start < end` and that the distance between + /// them is at least equal to `V::BYTES`. That is, it must always be valid + /// to do at least an unaligned load of `V` at `start`. + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + #[inline(always)] + pub(crate) unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + // If we want to support vectors bigger than 256 bits, we probably + // need to move up to using a u64 for the masks used below. Currently + // they are 32 bits, which means we're SOL for vectors that need masks + // bigger than 32 bits. Overall unclear until there's a use case. + debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes"); + + let topos = V::Mask::first_offset; + let len = end.distance(start); + debug_assert!( + len >= V::BYTES, + "haystack has length {}, but must be at least {}", + len, + V::BYTES + ); + + // Search a possibly unaligned chunk at `start`. This covers any part + // of the haystack prior to where aligned loads can start. + if let Some(cur) = self.search_chunk(start, topos) { + return Some(cur); + } + // Set `cur` to the first V-aligned pointer greater than `start`. + let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN)); + debug_assert!(cur > start && end.sub(V::BYTES) >= start); + if len >= Self::LOOP_SIZE { + while cur <= end.sub(Self::LOOP_SIZE) { + debug_assert_eq!(0, cur.as_usize() % V::BYTES); + + let a = V::load_aligned(cur); + let b = V::load_aligned(cur.add(1 * V::BYTES)); + let c = V::load_aligned(cur.add(2 * V::BYTES)); + let d = V::load_aligned(cur.add(3 * V::BYTES)); + let eqa = self.v1.cmpeq(a); + let eqb = self.v1.cmpeq(b); + let eqc = self.v1.cmpeq(c); + let eqd = self.v1.cmpeq(d); + let or1 = eqa.or(eqb); + let or2 = eqc.or(eqd); + let or3 = or1.or(or2); + if or3.movemask_will_have_non_zero() { + let mask = eqa.movemask(); + if mask.has_non_zero() { + return Some(cur.add(topos(mask))); + } + + let mask = eqb.movemask(); + if mask.has_non_zero() { + return Some(cur.add(1 * V::BYTES).add(topos(mask))); + } + + let mask = eqc.movemask(); + if mask.has_non_zero() { + return Some(cur.add(2 * V::BYTES).add(topos(mask))); + } + + let mask = eqd.movemask(); + debug_assert!(mask.has_non_zero()); + return Some(cur.add(3 * V::BYTES).add(topos(mask))); + } + cur = cur.add(Self::LOOP_SIZE); + } + } + // Handle any leftovers after the aligned loop above. We use unaligned + // loads here, but I believe we are guaranteed that they are aligned + // since `cur` is aligned. + while cur <= end.sub(V::BYTES) { + debug_assert!(end.distance(cur) >= V::BYTES); + if let Some(cur) = self.search_chunk(cur, topos) { + return Some(cur); + } + cur = cur.add(V::BYTES); + } + // Finally handle any remaining bytes less than the size of V. In this + // case, our pointer may indeed be unaligned and the load may overlap + // with the previous one. But that's okay since we know the previous + // load didn't lead to a match (otherwise we wouldn't be here). + if cur < end { + debug_assert!(end.distance(cur) < V::BYTES); + cur = cur.sub(V::BYTES - end.distance(cur)); + debug_assert_eq!(end.distance(cur), V::BYTES); + return self.search_chunk(cur, topos); + } + None + } + + /// Return a pointer to the last occurrence of the needle in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// # Safety + /// + /// * It must be the case that `start < end` and that the distance between + /// them is at least equal to `V::BYTES`. That is, it must always be valid + /// to do at least an unaligned load of `V` at `start`. + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + #[inline(always)] + pub(crate) unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + // If we want to support vectors bigger than 256 bits, we probably + // need to move up to using a u64 for the masks used below. Currently + // they are 32 bits, which means we're SOL for vectors that need masks + // bigger than 32 bits. Overall unclear until there's a use case. + debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes"); + + let topos = V::Mask::last_offset; + let len = end.distance(start); + debug_assert!( + len >= V::BYTES, + "haystack has length {}, but must be at least {}", + len, + V::BYTES + ); + + if let Some(cur) = self.search_chunk(end.sub(V::BYTES), topos) { + return Some(cur); + } + let mut cur = end.sub(end.as_usize() & V::ALIGN); + debug_assert!(start <= cur && cur <= end); + if len >= Self::LOOP_SIZE { + while cur >= start.add(Self::LOOP_SIZE) { + debug_assert_eq!(0, cur.as_usize() % V::BYTES); + + cur = cur.sub(Self::LOOP_SIZE); + let a = V::load_aligned(cur); + let b = V::load_aligned(cur.add(1 * V::BYTES)); + let c = V::load_aligned(cur.add(2 * V::BYTES)); + let d = V::load_aligned(cur.add(3 * V::BYTES)); + let eqa = self.v1.cmpeq(a); + let eqb = self.v1.cmpeq(b); + let eqc = self.v1.cmpeq(c); + let eqd = self.v1.cmpeq(d); + let or1 = eqa.or(eqb); + let or2 = eqc.or(eqd); + let or3 = or1.or(or2); + if or3.movemask_will_have_non_zero() { + let mask = eqd.movemask(); + if mask.has_non_zero() { + return Some(cur.add(3 * V::BYTES).add(topos(mask))); + } + + let mask = eqc.movemask(); + if mask.has_non_zero() { + return Some(cur.add(2 * V::BYTES).add(topos(mask))); + } + + let mask = eqb.movemask(); + if mask.has_non_zero() { + return Some(cur.add(1 * V::BYTES).add(topos(mask))); + } + + let mask = eqa.movemask(); + debug_assert!(mask.has_non_zero()); + return Some(cur.add(topos(mask))); + } + } + } + while cur >= start.add(V::BYTES) { + debug_assert!(cur.distance(start) >= V::BYTES); + cur = cur.sub(V::BYTES); + if let Some(cur) = self.search_chunk(cur, topos) { + return Some(cur); + } + } + if cur > start { + debug_assert!(cur.distance(start) < V::BYTES); + return self.search_chunk(start, topos); + } + None + } + + /// Return a count of all matching bytes in the given haystack. + /// + /// # Safety + /// + /// * It must be the case that `start < end` and that the distance between + /// them is at least equal to `V::BYTES`. That is, it must always be valid + /// to do at least an unaligned load of `V` at `start`. + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + #[inline(always)] + pub(crate) unsafe fn count_raw( + &self, + start: *const u8, + end: *const u8, + ) -> usize { + debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes"); + + let confirm = |b| b == self.needle1(); + let len = end.distance(start); + debug_assert!( + len >= V::BYTES, + "haystack has length {}, but must be at least {}", + len, + V::BYTES + ); + + // Set `cur` to the first V-aligned pointer greater than `start`. + let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN)); + // Count any matching bytes before we start our aligned loop. + let mut count = count_byte_by_byte(start, cur, confirm); + debug_assert!(cur > start && end.sub(V::BYTES) >= start); + if len >= Self::LOOP_SIZE { + while cur <= end.sub(Self::LOOP_SIZE) { + debug_assert_eq!(0, cur.as_usize() % V::BYTES); + + let a = V::load_aligned(cur); + let b = V::load_aligned(cur.add(1 * V::BYTES)); + let c = V::load_aligned(cur.add(2 * V::BYTES)); + let d = V::load_aligned(cur.add(3 * V::BYTES)); + let eqa = self.v1.cmpeq(a); + let eqb = self.v1.cmpeq(b); + let eqc = self.v1.cmpeq(c); + let eqd = self.v1.cmpeq(d); + count += eqa.movemask().count_ones(); + count += eqb.movemask().count_ones(); + count += eqc.movemask().count_ones(); + count += eqd.movemask().count_ones(); + cur = cur.add(Self::LOOP_SIZE); + } + } + // Handle any leftovers after the aligned loop above. We use unaligned + // loads here, but I believe we are guaranteed that they are aligned + // since `cur` is aligned. + while cur <= end.sub(V::BYTES) { + debug_assert!(end.distance(cur) >= V::BYTES); + let chunk = V::load_unaligned(cur); + count += self.v1.cmpeq(chunk).movemask().count_ones(); + cur = cur.add(V::BYTES); + } + // And finally count any leftovers that weren't caught above. + count += count_byte_by_byte(cur, end, confirm); + count + } + + /// Search `V::BYTES` starting at `cur` via an unaligned load. + /// + /// `mask_to_offset` should be a function that converts a `movemask` to + /// an offset such that `cur.add(offset)` corresponds to a pointer to the + /// match location if one is found. Generally it is expected to use either + /// `mask_to_first_offset` or `mask_to_last_offset`, depending on whether + /// one is implementing a forward or reverse search, respectively. + /// + /// # Safety + /// + /// `cur` must be a valid pointer and it must be valid to do an unaligned + /// load of size `V::BYTES` at `cur`. + #[inline(always)] + unsafe fn search_chunk( + &self, + cur: *const u8, + mask_to_offset: impl Fn(V::Mask) -> usize, + ) -> Option<*const u8> { + let chunk = V::load_unaligned(cur); + let mask = self.v1.cmpeq(chunk).movemask(); + if mask.has_non_zero() { + Some(cur.add(mask_to_offset(mask))) + } else { + None + } + } +} + +/// Finds all occurrences of two bytes in a haystack. +/// +/// That is, this reports matches of one of two possible bytes. For example, +/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, +/// `4` and `5`. +#[derive(Clone, Copy, Debug)] +pub(crate) struct Two { + s1: u8, + s2: u8, + v1: V, + v2: V, +} + +impl Two { + /// The number of bytes we examine per each iteration of our search loop. + const LOOP_SIZE: usize = 2 * V::BYTES; + + /// Create a new searcher that finds occurrences of the byte given. + #[inline(always)] + pub(crate) unsafe fn new(needle1: u8, needle2: u8) -> Two { + Two { + s1: needle1, + s2: needle2, + v1: V::splat(needle1), + v2: V::splat(needle2), + } + } + + /// Returns the first needle given to `Two::new`. + #[inline(always)] + pub(crate) fn needle1(&self) -> u8 { + self.s1 + } + + /// Returns the second needle given to `Two::new`. + #[inline(always)] + pub(crate) fn needle2(&self) -> u8 { + self.s2 + } + + /// Return a pointer to the first occurrence of one of the needles in the + /// given haystack. If no such occurrence exists, then `None` is returned. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// # Safety + /// + /// * It must be the case that `start < end` and that the distance between + /// them is at least equal to `V::BYTES`. That is, it must always be valid + /// to do at least an unaligned load of `V` at `start`. + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + #[inline(always)] + pub(crate) unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + // If we want to support vectors bigger than 256 bits, we probably + // need to move up to using a u64 for the masks used below. Currently + // they are 32 bits, which means we're SOL for vectors that need masks + // bigger than 32 bits. Overall unclear until there's a use case. + debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes"); + + let topos = V::Mask::first_offset; + let len = end.distance(start); + debug_assert!( + len >= V::BYTES, + "haystack has length {}, but must be at least {}", + len, + V::BYTES + ); + + // Search a possibly unaligned chunk at `start`. This covers any part + // of the haystack prior to where aligned loads can start. + if let Some(cur) = self.search_chunk(start, topos) { + return Some(cur); + } + // Set `cur` to the first V-aligned pointer greater than `start`. + let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN)); + debug_assert!(cur > start && end.sub(V::BYTES) >= start); + if len >= Self::LOOP_SIZE { + while cur <= end.sub(Self::LOOP_SIZE) { + debug_assert_eq!(0, cur.as_usize() % V::BYTES); + + let a = V::load_aligned(cur); + let b = V::load_aligned(cur.add(V::BYTES)); + let eqa1 = self.v1.cmpeq(a); + let eqb1 = self.v1.cmpeq(b); + let eqa2 = self.v2.cmpeq(a); + let eqb2 = self.v2.cmpeq(b); + let or1 = eqa1.or(eqb1); + let or2 = eqa2.or(eqb2); + let or3 = or1.or(or2); + if or3.movemask_will_have_non_zero() { + let mask = eqa1.movemask().or(eqa2.movemask()); + if mask.has_non_zero() { + return Some(cur.add(topos(mask))); + } + + let mask = eqb1.movemask().or(eqb2.movemask()); + debug_assert!(mask.has_non_zero()); + return Some(cur.add(V::BYTES).add(topos(mask))); + } + cur = cur.add(Self::LOOP_SIZE); + } + } + // Handle any leftovers after the aligned loop above. We use unaligned + // loads here, but I believe we are guaranteed that they are aligned + // since `cur` is aligned. + while cur <= end.sub(V::BYTES) { + debug_assert!(end.distance(cur) >= V::BYTES); + if let Some(cur) = self.search_chunk(cur, topos) { + return Some(cur); + } + cur = cur.add(V::BYTES); + } + // Finally handle any remaining bytes less than the size of V. In this + // case, our pointer may indeed be unaligned and the load may overlap + // with the previous one. But that's okay since we know the previous + // load didn't lead to a match (otherwise we wouldn't be here). + if cur < end { + debug_assert!(end.distance(cur) < V::BYTES); + cur = cur.sub(V::BYTES - end.distance(cur)); + debug_assert_eq!(end.distance(cur), V::BYTES); + return self.search_chunk(cur, topos); + } + None + } + + /// Return a pointer to the last occurrence of the needle in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// # Safety + /// + /// * It must be the case that `start < end` and that the distance between + /// them is at least equal to `V::BYTES`. That is, it must always be valid + /// to do at least an unaligned load of `V` at `start`. + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + #[inline(always)] + pub(crate) unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + // If we want to support vectors bigger than 256 bits, we probably + // need to move up to using a u64 for the masks used below. Currently + // they are 32 bits, which means we're SOL for vectors that need masks + // bigger than 32 bits. Overall unclear until there's a use case. + debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes"); + + let topos = V::Mask::last_offset; + let len = end.distance(start); + debug_assert!( + len >= V::BYTES, + "haystack has length {}, but must be at least {}", + len, + V::BYTES + ); + + if let Some(cur) = self.search_chunk(end.sub(V::BYTES), topos) { + return Some(cur); + } + let mut cur = end.sub(end.as_usize() & V::ALIGN); + debug_assert!(start <= cur && cur <= end); + if len >= Self::LOOP_SIZE { + while cur >= start.add(Self::LOOP_SIZE) { + debug_assert_eq!(0, cur.as_usize() % V::BYTES); + + cur = cur.sub(Self::LOOP_SIZE); + let a = V::load_aligned(cur); + let b = V::load_aligned(cur.add(V::BYTES)); + let eqa1 = self.v1.cmpeq(a); + let eqb1 = self.v1.cmpeq(b); + let eqa2 = self.v2.cmpeq(a); + let eqb2 = self.v2.cmpeq(b); + let or1 = eqa1.or(eqb1); + let or2 = eqa2.or(eqb2); + let or3 = or1.or(or2); + if or3.movemask_will_have_non_zero() { + let mask = eqb1.movemask().or(eqb2.movemask()); + if mask.has_non_zero() { + return Some(cur.add(V::BYTES).add(topos(mask))); + } + + let mask = eqa1.movemask().or(eqa2.movemask()); + debug_assert!(mask.has_non_zero()); + return Some(cur.add(topos(mask))); + } + } + } + while cur >= start.add(V::BYTES) { + debug_assert!(cur.distance(start) >= V::BYTES); + cur = cur.sub(V::BYTES); + if let Some(cur) = self.search_chunk(cur, topos) { + return Some(cur); + } + } + if cur > start { + debug_assert!(cur.distance(start) < V::BYTES); + return self.search_chunk(start, topos); + } + None + } + + /// Search `V::BYTES` starting at `cur` via an unaligned load. + /// + /// `mask_to_offset` should be a function that converts a `movemask` to + /// an offset such that `cur.add(offset)` corresponds to a pointer to the + /// match location if one is found. Generally it is expected to use either + /// `mask_to_first_offset` or `mask_to_last_offset`, depending on whether + /// one is implementing a forward or reverse search, respectively. + /// + /// # Safety + /// + /// `cur` must be a valid pointer and it must be valid to do an unaligned + /// load of size `V::BYTES` at `cur`. + #[inline(always)] + unsafe fn search_chunk( + &self, + cur: *const u8, + mask_to_offset: impl Fn(V::Mask) -> usize, + ) -> Option<*const u8> { + let chunk = V::load_unaligned(cur); + let eq1 = self.v1.cmpeq(chunk); + let eq2 = self.v2.cmpeq(chunk); + let mask = eq1.or(eq2).movemask(); + if mask.has_non_zero() { + let mask1 = eq1.movemask(); + let mask2 = eq2.movemask(); + Some(cur.add(mask_to_offset(mask1.or(mask2)))) + } else { + None + } + } +} + +/// Finds all occurrences of two bytes in a haystack. +/// +/// That is, this reports matches of one of two possible bytes. For example, +/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, +/// `4` and `5`. +#[derive(Clone, Copy, Debug)] +pub(crate) struct Three { + s1: u8, + s2: u8, + s3: u8, + v1: V, + v2: V, + v3: V, +} + +impl Three { + /// The number of bytes we examine per each iteration of our search loop. + const LOOP_SIZE: usize = 2 * V::BYTES; + + /// Create a new searcher that finds occurrences of the byte given. + #[inline(always)] + pub(crate) unsafe fn new( + needle1: u8, + needle2: u8, + needle3: u8, + ) -> Three { + Three { + s1: needle1, + s2: needle2, + s3: needle3, + v1: V::splat(needle1), + v2: V::splat(needle2), + v3: V::splat(needle3), + } + } + + /// Returns the first needle given to `Three::new`. + #[inline(always)] + pub(crate) fn needle1(&self) -> u8 { + self.s1 + } + + /// Returns the second needle given to `Three::new`. + #[inline(always)] + pub(crate) fn needle2(&self) -> u8 { + self.s2 + } + + /// Returns the third needle given to `Three::new`. + #[inline(always)] + pub(crate) fn needle3(&self) -> u8 { + self.s3 + } + + /// Return a pointer to the first occurrence of one of the needles in the + /// given haystack. If no such occurrence exists, then `None` is returned. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// # Safety + /// + /// * It must be the case that `start < end` and that the distance between + /// them is at least equal to `V::BYTES`. That is, it must always be valid + /// to do at least an unaligned load of `V` at `start`. + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + #[inline(always)] + pub(crate) unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + // If we want to support vectors bigger than 256 bits, we probably + // need to move up to using a u64 for the masks used below. Currently + // they are 32 bits, which means we're SOL for vectors that need masks + // bigger than 32 bits. Overall unclear until there's a use case. + debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes"); + + let topos = V::Mask::first_offset; + let len = end.distance(start); + debug_assert!( + len >= V::BYTES, + "haystack has length {}, but must be at least {}", + len, + V::BYTES + ); + + // Search a possibly unaligned chunk at `start`. This covers any part + // of the haystack prior to where aligned loads can start. + if let Some(cur) = self.search_chunk(start, topos) { + return Some(cur); + } + // Set `cur` to the first V-aligned pointer greater than `start`. + let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN)); + debug_assert!(cur > start && end.sub(V::BYTES) >= start); + if len >= Self::LOOP_SIZE { + while cur <= end.sub(Self::LOOP_SIZE) { + debug_assert_eq!(0, cur.as_usize() % V::BYTES); + + let a = V::load_aligned(cur); + let b = V::load_aligned(cur.add(V::BYTES)); + let eqa1 = self.v1.cmpeq(a); + let eqb1 = self.v1.cmpeq(b); + let eqa2 = self.v2.cmpeq(a); + let eqb2 = self.v2.cmpeq(b); + let eqa3 = self.v3.cmpeq(a); + let eqb3 = self.v3.cmpeq(b); + let or1 = eqa1.or(eqb1); + let or2 = eqa2.or(eqb2); + let or3 = eqa3.or(eqb3); + let or4 = or1.or(or2); + let or5 = or3.or(or4); + if or5.movemask_will_have_non_zero() { + let mask = eqa1 + .movemask() + .or(eqa2.movemask()) + .or(eqa3.movemask()); + if mask.has_non_zero() { + return Some(cur.add(topos(mask))); + } + + let mask = eqb1 + .movemask() + .or(eqb2.movemask()) + .or(eqb3.movemask()); + debug_assert!(mask.has_non_zero()); + return Some(cur.add(V::BYTES).add(topos(mask))); + } + cur = cur.add(Self::LOOP_SIZE); + } + } + // Handle any leftovers after the aligned loop above. We use unaligned + // loads here, but I believe we are guaranteed that they are aligned + // since `cur` is aligned. + while cur <= end.sub(V::BYTES) { + debug_assert!(end.distance(cur) >= V::BYTES); + if let Some(cur) = self.search_chunk(cur, topos) { + return Some(cur); + } + cur = cur.add(V::BYTES); + } + // Finally handle any remaining bytes less than the size of V. In this + // case, our pointer may indeed be unaligned and the load may overlap + // with the previous one. But that's okay since we know the previous + // load didn't lead to a match (otherwise we wouldn't be here). + if cur < end { + debug_assert!(end.distance(cur) < V::BYTES); + cur = cur.sub(V::BYTES - end.distance(cur)); + debug_assert_eq!(end.distance(cur), V::BYTES); + return self.search_chunk(cur, topos); + } + None + } + + /// Return a pointer to the last occurrence of the needle in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// # Safety + /// + /// * It must be the case that `start < end` and that the distance between + /// them is at least equal to `V::BYTES`. That is, it must always be valid + /// to do at least an unaligned load of `V` at `start`. + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + #[inline(always)] + pub(crate) unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + // If we want to support vectors bigger than 256 bits, we probably + // need to move up to using a u64 for the masks used below. Currently + // they are 32 bits, which means we're SOL for vectors that need masks + // bigger than 32 bits. Overall unclear until there's a use case. + debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes"); + + let topos = V::Mask::last_offset; + let len = end.distance(start); + debug_assert!( + len >= V::BYTES, + "haystack has length {}, but must be at least {}", + len, + V::BYTES + ); + + if let Some(cur) = self.search_chunk(end.sub(V::BYTES), topos) { + return Some(cur); + } + let mut cur = end.sub(end.as_usize() & V::ALIGN); + debug_assert!(start <= cur && cur <= end); + if len >= Self::LOOP_SIZE { + while cur >= start.add(Self::LOOP_SIZE) { + debug_assert_eq!(0, cur.as_usize() % V::BYTES); + + cur = cur.sub(Self::LOOP_SIZE); + let a = V::load_aligned(cur); + let b = V::load_aligned(cur.add(V::BYTES)); + let eqa1 = self.v1.cmpeq(a); + let eqb1 = self.v1.cmpeq(b); + let eqa2 = self.v2.cmpeq(a); + let eqb2 = self.v2.cmpeq(b); + let eqa3 = self.v3.cmpeq(a); + let eqb3 = self.v3.cmpeq(b); + let or1 = eqa1.or(eqb1); + let or2 = eqa2.or(eqb2); + let or3 = eqa3.or(eqb3); + let or4 = or1.or(or2); + let or5 = or3.or(or4); + if or5.movemask_will_have_non_zero() { + let mask = eqb1 + .movemask() + .or(eqb2.movemask()) + .or(eqb3.movemask()); + if mask.has_non_zero() { + return Some(cur.add(V::BYTES).add(topos(mask))); + } + + let mask = eqa1 + .movemask() + .or(eqa2.movemask()) + .or(eqa3.movemask()); + debug_assert!(mask.has_non_zero()); + return Some(cur.add(topos(mask))); + } + } + } + while cur >= start.add(V::BYTES) { + debug_assert!(cur.distance(start) >= V::BYTES); + cur = cur.sub(V::BYTES); + if let Some(cur) = self.search_chunk(cur, topos) { + return Some(cur); + } + } + if cur > start { + debug_assert!(cur.distance(start) < V::BYTES); + return self.search_chunk(start, topos); + } + None + } + + /// Search `V::BYTES` starting at `cur` via an unaligned load. + /// + /// `mask_to_offset` should be a function that converts a `movemask` to + /// an offset such that `cur.add(offset)` corresponds to a pointer to the + /// match location if one is found. Generally it is expected to use either + /// `mask_to_first_offset` or `mask_to_last_offset`, depending on whether + /// one is implementing a forward or reverse search, respectively. + /// + /// # Safety + /// + /// `cur` must be a valid pointer and it must be valid to do an unaligned + /// load of size `V::BYTES` at `cur`. + #[inline(always)] + unsafe fn search_chunk( + &self, + cur: *const u8, + mask_to_offset: impl Fn(V::Mask) -> usize, + ) -> Option<*const u8> { + let chunk = V::load_unaligned(cur); + let eq1 = self.v1.cmpeq(chunk); + let eq2 = self.v2.cmpeq(chunk); + let eq3 = self.v3.cmpeq(chunk); + let mask = eq1.or(eq2).or(eq3).movemask(); + if mask.has_non_zero() { + let mask1 = eq1.movemask(); + let mask2 = eq2.movemask(); + let mask3 = eq3.movemask(); + Some(cur.add(mask_to_offset(mask1.or(mask2).or(mask3)))) + } else { + None + } + } +} + +/// An iterator over all occurrences of a set of bytes in a haystack. +/// +/// This iterator implements the routines necessary to provide a +/// `DoubleEndedIterator` impl, which means it can also be used to find +/// occurrences in reverse order. +/// +/// The lifetime parameters are as follows: +/// +/// * `'h` refers to the lifetime of the haystack being searched. +/// +/// This type is intended to be used to implement all iterators for the +/// `memchr` family of functions. It handles a tiny bit of marginally tricky +/// raw pointer math, but otherwise expects the caller to provide `find_raw` +/// and `rfind_raw` routines for each call of `next` and `next_back`, +/// respectively. +#[derive(Clone, Debug)] +pub(crate) struct Iter<'h> { + /// The original starting point into the haystack. We use this to convert + /// pointers to offsets. + original_start: *const u8, + /// The current starting point into the haystack. That is, where the next + /// search will begin. + start: *const u8, + /// The current ending point into the haystack. That is, where the next + /// reverse search will begin. + end: *const u8, + /// A marker for tracking the lifetime of the start/cur_start/cur_end + /// pointers above, which all point into the haystack. + haystack: core::marker::PhantomData<&'h [u8]>, +} + +// SAFETY: Iter contains no shared references to anything that performs any +// interior mutations. Also, the lifetime guarantees that Iter will not outlive +// the haystack. +unsafe impl<'h> Send for Iter<'h> {} + +// SAFETY: Iter perform no interior mutations, therefore no explicit +// synchronization is necessary. Also, the lifetime guarantees that Iter will +// not outlive the haystack. +unsafe impl<'h> Sync for Iter<'h> {} + +impl<'h> Iter<'h> { + /// Create a new generic memchr iterator. + #[inline(always)] + pub(crate) fn new(haystack: &'h [u8]) -> Iter<'h> { + Iter { + original_start: haystack.as_ptr(), + start: haystack.as_ptr(), + end: haystack.as_ptr().wrapping_add(haystack.len()), + haystack: core::marker::PhantomData, + } + } + + /// Returns the next occurrence in the forward direction. + /// + /// # Safety + /// + /// Callers must ensure that if a pointer is returned from the closure + /// provided, then it must be greater than or equal to the start pointer + /// and less than the end pointer. + #[inline(always)] + pub(crate) unsafe fn next( + &mut self, + mut find_raw: impl FnMut(*const u8, *const u8) -> Option<*const u8>, + ) -> Option { + // SAFETY: Pointers are derived directly from the same &[u8] haystack. + // We only ever modify start/end corresponding to a matching offset + // found between start and end. Thus all changes to start/end maintain + // our safety requirements. + // + // The only other assumption we rely on is that the pointer returned + // by `find_raw` satisfies `self.start <= found < self.end`, and that + // safety contract is forwarded to the caller. + let found = find_raw(self.start, self.end)?; + let result = found.distance(self.original_start); + self.start = found.add(1); + Some(result) + } + + /// Returns the number of remaining elements in this iterator. + #[inline(always)] + pub(crate) fn count( + self, + mut count_raw: impl FnMut(*const u8, *const u8) -> usize, + ) -> usize { + // SAFETY: Pointers are derived directly from the same &[u8] haystack. + // We only ever modify start/end corresponding to a matching offset + // found between start and end. Thus all changes to start/end maintain + // our safety requirements. + count_raw(self.start, self.end) + } + + /// Returns the next occurrence in reverse. + /// + /// # Safety + /// + /// Callers must ensure that if a pointer is returned from the closure + /// provided, then it must be greater than or equal to the start pointer + /// and less than the end pointer. + #[inline(always)] + pub(crate) unsafe fn next_back( + &mut self, + mut rfind_raw: impl FnMut(*const u8, *const u8) -> Option<*const u8>, + ) -> Option { + // SAFETY: Pointers are derived directly from the same &[u8] haystack. + // We only ever modify start/end corresponding to a matching offset + // found between start and end. Thus all changes to start/end maintain + // our safety requirements. + // + // The only other assumption we rely on is that the pointer returned + // by `rfind_raw` satisfies `self.start <= found < self.end`, and that + // safety contract is forwarded to the caller. + let found = rfind_raw(self.start, self.end)?; + let result = found.distance(self.original_start); + self.end = found; + Some(result) + } + + /// Provides an implementation of `Iterator::size_hint`. + #[inline(always)] + pub(crate) fn size_hint(&self) -> (usize, Option) { + (0, Some(self.end.as_usize().saturating_sub(self.start.as_usize()))) + } +} + +/// Search a slice using a function that operates on raw pointers. +/// +/// Given a function to search a contiguous sequence of memory for the location +/// of a non-empty set of bytes, this will execute that search on a slice of +/// bytes. The pointer returned by the given function will be converted to an +/// offset relative to the starting point of the given slice. That is, if a +/// match is found, the offset returned by this routine is guaranteed to be a +/// valid index into `haystack`. +/// +/// Callers may use this for a forward or reverse search. +/// +/// # Safety +/// +/// Callers must ensure that if a pointer is returned by `find_raw`, then the +/// pointer must be greater than or equal to the starting pointer and less than +/// the end pointer. +#[inline(always)] +pub(crate) unsafe fn search_slice_with_raw( + haystack: &[u8], + mut find_raw: impl FnMut(*const u8, *const u8) -> Option<*const u8>, +) -> Option { + // SAFETY: We rely on `find_raw` to return a correct and valid pointer, but + // otherwise, `start` and `end` are valid due to the guarantees provided by + // a &[u8]. + let start = haystack.as_ptr(); + let end = start.add(haystack.len()); + let found = find_raw(start, end)?; + Some(found.distance(start)) +} + +/// Performs a forward byte-at-a-time loop until either `ptr >= end_ptr` or +/// until `confirm(*ptr)` returns `true`. If the former occurs, then `None` is +/// returned. If the latter occurs, then the pointer at which `confirm` returns +/// `true` is returned. +/// +/// # Safety +/// +/// Callers must provide valid pointers and they must satisfy `start_ptr <= +/// ptr` and `ptr <= end_ptr`. +#[inline(always)] +pub(crate) unsafe fn fwd_byte_by_byte bool>( + start: *const u8, + end: *const u8, + confirm: F, +) -> Option<*const u8> { + debug_assert!(start <= end); + let mut ptr = start; + while ptr < end { + if confirm(*ptr) { + return Some(ptr); + } + ptr = ptr.offset(1); + } + None +} + +/// Performs a reverse byte-at-a-time loop until either `ptr < start_ptr` or +/// until `confirm(*ptr)` returns `true`. If the former occurs, then `None` is +/// returned. If the latter occurs, then the pointer at which `confirm` returns +/// `true` is returned. +/// +/// # Safety +/// +/// Callers must provide valid pointers and they must satisfy `start_ptr <= +/// ptr` and `ptr <= end_ptr`. +#[inline(always)] +pub(crate) unsafe fn rev_byte_by_byte bool>( + start: *const u8, + end: *const u8, + confirm: F, +) -> Option<*const u8> { + debug_assert!(start <= end); + + let mut ptr = end; + while ptr > start { + ptr = ptr.offset(-1); + if confirm(*ptr) { + return Some(ptr); + } + } + None +} + +/// Performs a forward byte-at-a-time loop until `ptr >= end_ptr` and returns +/// the number of times `confirm(*ptr)` returns `true`. +/// +/// # Safety +/// +/// Callers must provide valid pointers and they must satisfy `start_ptr <= +/// ptr` and `ptr <= end_ptr`. +#[inline(always)] +pub(crate) unsafe fn count_byte_by_byte bool>( + start: *const u8, + end: *const u8, + confirm: F, +) -> usize { + debug_assert!(start <= end); + let mut ptr = start; + let mut count = 0; + while ptr < end { + if confirm(*ptr) { + count += 1; + } + ptr = ptr.offset(1); + } + count +} diff --git a/vendor/memchr/src/arch/generic/mod.rs b/vendor/memchr/src/arch/generic/mod.rs new file mode 100644 index 00000000000000..63ee3f0b34ed96 --- /dev/null +++ b/vendor/memchr/src/arch/generic/mod.rs @@ -0,0 +1,14 @@ +/*! +This module defines "generic" routines that can be specialized to specific +architectures. + +We don't expose this module primarily because it would require exposing all +of the internal infrastructure required to write these generic routines. +That infrastructure should be treated as an implementation detail so that +it is allowed to evolve. Instead, what we expose are architecture specific +instantiations of these generic implementations. The generic code just lets us +write the code once (usually). +*/ + +pub(crate) mod memchr; +pub(crate) mod packedpair; diff --git a/vendor/memchr/src/arch/generic/packedpair.rs b/vendor/memchr/src/arch/generic/packedpair.rs new file mode 100644 index 00000000000000..8d97cf28fad117 --- /dev/null +++ b/vendor/memchr/src/arch/generic/packedpair.rs @@ -0,0 +1,317 @@ +/*! +Generic crate-internal routines for the "packed pair" SIMD algorithm. + +The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main +difference is that it (by default) uses a background distribution of byte +frequencies to heuristically select the pair of bytes to search for. + +[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last +*/ + +use crate::{ + arch::all::{is_equal_raw, packedpair::Pair}, + ext::Pointer, + vector::{MoveMask, Vector}, +}; + +/// A generic architecture dependent "packed pair" finder. +/// +/// This finder picks two bytes that it believes have high predictive power +/// for indicating an overall match of a needle. Depending on whether +/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets +/// where the needle matches or could match. In the prefilter case, candidates +/// are reported whenever the [`Pair`] of bytes given matches. +/// +/// This is architecture dependent because it uses specific vector operations +/// to look for occurrences of the pair of bytes. +/// +/// This type is not meant to be exported and is instead meant to be used as +/// the implementation for architecture specific facades. Why? Because it's a +/// bit of a quirky API that requires `inline(always)` annotations. And pretty +/// much everything has safety obligations due (at least) to the caller needing +/// to inline calls into routines marked with +/// `#[target_feature(enable = "...")]`. +#[derive(Clone, Copy, Debug)] +pub(crate) struct Finder { + pair: Pair, + v1: V, + v2: V, + min_haystack_len: usize, +} + +impl Finder { + /// Create a new pair searcher. The searcher returned can either report + /// exact matches of `needle` or act as a prefilter and report candidate + /// positions of `needle`. + /// + /// # Safety + /// + /// Callers must ensure that whatever vector type this routine is called + /// with is supported by the current environment. + /// + /// Callers must also ensure that `needle.len() >= 2`. + #[inline(always)] + pub(crate) unsafe fn new(needle: &[u8], pair: Pair) -> Finder { + let max_index = pair.index1().max(pair.index2()); + let min_haystack_len = + core::cmp::max(needle.len(), usize::from(max_index) + V::BYTES); + let v1 = V::splat(needle[usize::from(pair.index1())]); + let v2 = V::splat(needle[usize::from(pair.index2())]); + Finder { pair, v1, v2, min_haystack_len } + } + + /// Searches the given haystack for the given needle. The needle given + /// should be the same as the needle that this finder was initialized + /// with. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + /// + /// # Safety + /// + /// Since this is meant to be used with vector functions, callers need to + /// specialize this inside of a function with a `target_feature` attribute. + /// Therefore, callers must ensure that whatever target feature is being + /// used supports the vector functions that this function is specialized + /// for. (For the specific vector functions used, see the Vector trait + /// implementations.) + #[inline(always)] + pub(crate) unsafe fn find( + &self, + haystack: &[u8], + needle: &[u8], + ) -> Option { + assert!( + haystack.len() >= self.min_haystack_len, + "haystack too small, should be at least {} but got {}", + self.min_haystack_len, + haystack.len(), + ); + + let all = V::Mask::all_zeros_except_least_significant(0); + let start = haystack.as_ptr(); + let end = start.add(haystack.len()); + let max = end.sub(self.min_haystack_len); + let mut cur = start; + + // N.B. I did experiment with unrolling the loop to deal with size(V) + // bytes at a time and 2*size(V) bytes at a time. The double unroll + // was marginally faster while the quadruple unroll was unambiguously + // slower. In the end, I decided the complexity from unrolling wasn't + // worth it. I used the memmem/krate/prebuilt/huge-en/ benchmarks to + // compare. + while cur <= max { + if let Some(chunki) = self.find_in_chunk(needle, cur, end, all) { + return Some(matched(start, cur, chunki)); + } + cur = cur.add(V::BYTES); + } + if cur < end { + let remaining = end.distance(cur); + debug_assert!( + remaining < self.min_haystack_len, + "remaining bytes should be smaller than the minimum haystack \ + length of {}, but there are {} bytes remaining", + self.min_haystack_len, + remaining, + ); + if remaining < needle.len() { + return None; + } + debug_assert!( + max < cur, + "after main loop, cur should have exceeded max", + ); + let overlap = cur.distance(max); + debug_assert!( + overlap > 0, + "overlap ({}) must always be non-zero", + overlap, + ); + debug_assert!( + overlap < V::BYTES, + "overlap ({}) cannot possibly be >= than a vector ({})", + overlap, + V::BYTES, + ); + // The mask has all of its bits set except for the first N least + // significant bits, where N=overlap. This way, any matches that + // occur in find_in_chunk within the overlap are automatically + // ignored. + let mask = V::Mask::all_zeros_except_least_significant(overlap); + cur = max; + let m = self.find_in_chunk(needle, cur, end, mask); + if let Some(chunki) = m { + return Some(matched(start, cur, chunki)); + } + } + None + } + + /// Searches the given haystack for offsets that represent candidate + /// matches of the `needle` given to this finder's constructor. The offsets + /// returned, if they are a match, correspond to the starting offset of + /// `needle` in the given `haystack`. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + /// + /// # Safety + /// + /// Since this is meant to be used with vector functions, callers need to + /// specialize this inside of a function with a `target_feature` attribute. + /// Therefore, callers must ensure that whatever target feature is being + /// used supports the vector functions that this function is specialized + /// for. (For the specific vector functions used, see the Vector trait + /// implementations.) + #[inline(always)] + pub(crate) unsafe fn find_prefilter( + &self, + haystack: &[u8], + ) -> Option { + assert!( + haystack.len() >= self.min_haystack_len, + "haystack too small, should be at least {} but got {}", + self.min_haystack_len, + haystack.len(), + ); + + let start = haystack.as_ptr(); + let end = start.add(haystack.len()); + let max = end.sub(self.min_haystack_len); + let mut cur = start; + + // N.B. I did experiment with unrolling the loop to deal with size(V) + // bytes at a time and 2*size(V) bytes at a time. The double unroll + // was marginally faster while the quadruple unroll was unambiguously + // slower. In the end, I decided the complexity from unrolling wasn't + // worth it. I used the memmem/krate/prebuilt/huge-en/ benchmarks to + // compare. + while cur <= max { + if let Some(chunki) = self.find_prefilter_in_chunk(cur) { + return Some(matched(start, cur, chunki)); + } + cur = cur.add(V::BYTES); + } + if cur < end { + // This routine immediately quits if a candidate match is found. + // That means that if we're here, no candidate matches have been + // found at or before 'ptr'. Thus, we don't need to mask anything + // out even though we might technically search part of the haystack + // that we've already searched (because we know it can't match). + cur = max; + if let Some(chunki) = self.find_prefilter_in_chunk(cur) { + return Some(matched(start, cur, chunki)); + } + } + None + } + + /// Search for an occurrence of our byte pair from the needle in the chunk + /// pointed to by cur, with the end of the haystack pointed to by end. + /// When an occurrence is found, memcmp is run to check if a match occurs + /// at the corresponding position. + /// + /// `mask` should have bits set corresponding the positions in the chunk + /// in which matches are considered. This is only used for the last vector + /// load where the beginning of the vector might have overlapped with the + /// last load in the main loop. The mask lets us avoid visiting positions + /// that have already been discarded as matches. + /// + /// # Safety + /// + /// It must be safe to do an unaligned read of size(V) bytes starting at + /// both (cur + self.index1) and (cur + self.index2). It must also be safe + /// to do unaligned loads on cur up to (end - needle.len()). + #[inline(always)] + unsafe fn find_in_chunk( + &self, + needle: &[u8], + cur: *const u8, + end: *const u8, + mask: V::Mask, + ) -> Option { + let index1 = usize::from(self.pair.index1()); + let index2 = usize::from(self.pair.index2()); + let chunk1 = V::load_unaligned(cur.add(index1)); + let chunk2 = V::load_unaligned(cur.add(index2)); + let eq1 = chunk1.cmpeq(self.v1); + let eq2 = chunk2.cmpeq(self.v2); + + let mut offsets = eq1.and(eq2).movemask().and(mask); + while offsets.has_non_zero() { + let offset = offsets.first_offset(); + let cur = cur.add(offset); + if end.sub(needle.len()) < cur { + return None; + } + if is_equal_raw(needle.as_ptr(), cur, needle.len()) { + return Some(offset); + } + offsets = offsets.clear_least_significant_bit(); + } + None + } + + /// Search for an occurrence of our byte pair from the needle in the chunk + /// pointed to by cur, with the end of the haystack pointed to by end. + /// When an occurrence is found, memcmp is run to check if a match occurs + /// at the corresponding position. + /// + /// # Safety + /// + /// It must be safe to do an unaligned read of size(V) bytes starting at + /// both (cur + self.index1) and (cur + self.index2). It must also be safe + /// to do unaligned reads on cur up to (end - needle.len()). + #[inline(always)] + unsafe fn find_prefilter_in_chunk(&self, cur: *const u8) -> Option { + let index1 = usize::from(self.pair.index1()); + let index2 = usize::from(self.pair.index2()); + let chunk1 = V::load_unaligned(cur.add(index1)); + let chunk2 = V::load_unaligned(cur.add(index2)); + let eq1 = chunk1.cmpeq(self.v1); + let eq2 = chunk2.cmpeq(self.v2); + + let offsets = eq1.and(eq2).movemask(); + if !offsets.has_non_zero() { + return None; + } + Some(offsets.first_offset()) + } + + /// Returns the pair of offsets (into the needle) used to check as a + /// predicate before confirming whether a needle exists at a particular + /// position. + #[inline] + pub(crate) fn pair(&self) -> &Pair { + &self.pair + } + + /// Returns the minimum haystack length that this `Finder` can search. + /// + /// Providing a haystack to this `Finder` shorter than this length is + /// guaranteed to result in a panic. + #[inline(always)] + pub(crate) fn min_haystack_len(&self) -> usize { + self.min_haystack_len + } +} + +/// Accepts a chunk-relative offset and returns a haystack relative offset. +/// +/// This used to be marked `#[cold]` and `#[inline(never)]`, but I couldn't +/// observe a consistent measureable difference between that and just inlining +/// it. So we go with inlining it. +/// +/// # Safety +/// +/// Same at `ptr::offset_from` in addition to `cur >= start`. +#[inline(always)] +unsafe fn matched(start: *const u8, cur: *const u8, chunki: usize) -> usize { + cur.distance(start) + chunki +} + +// If you're looking for tests, those are run for each instantiation of the +// above code. So for example, see arch::x86_64::sse2::packedpair. diff --git a/vendor/memchr/src/arch/mod.rs b/vendor/memchr/src/arch/mod.rs new file mode 100644 index 00000000000000..10332b64cd5290 --- /dev/null +++ b/vendor/memchr/src/arch/mod.rs @@ -0,0 +1,16 @@ +/*! +A module with low-level architecture dependent routines. + +These routines are useful as primitives for tasks not covered by the higher +level crate API. +*/ + +pub mod all; +pub(crate) mod generic; + +#[cfg(target_arch = "aarch64")] +pub mod aarch64; +#[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] +pub mod wasm32; +#[cfg(target_arch = "x86_64")] +pub mod x86_64; diff --git a/vendor/memchr/src/arch/wasm32/memchr.rs b/vendor/memchr/src/arch/wasm32/memchr.rs new file mode 100644 index 00000000000000..55c1c1bb472f10 --- /dev/null +++ b/vendor/memchr/src/arch/wasm32/memchr.rs @@ -0,0 +1,124 @@ +/*! +Wrapper routines for `memchr` and friends. + +These routines choose the best implementation at compile time. (This is +different from `x86_64` because it is expected that `simd128` is almost always +available for `wasm32` targets.) +*/ + +macro_rules! defraw { + ($ty:ident, $find:ident, $start:ident, $end:ident, $($needles:ident),+) => {{ + use crate::arch::wasm32::simd128::memchr::$ty; + + debug!("chose simd128 for {}", stringify!($ty)); + debug_assert!($ty::is_available()); + // SAFETY: We know that wasm memchr is always available whenever + // code is compiled for `wasm32` with the `simd128` target feature + // enabled. + $ty::new_unchecked($($needles),+).$find($start, $end) + }} +} + +/// memchr, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `One::find_raw`. +#[inline(always)] +pub(crate) unsafe fn memchr_raw( + n1: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + defraw!(One, find_raw, start, end, n1) +} + +/// memrchr, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `One::rfind_raw`. +#[inline(always)] +pub(crate) unsafe fn memrchr_raw( + n1: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + defraw!(One, rfind_raw, start, end, n1) +} + +/// memchr2, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Two::find_raw`. +#[inline(always)] +pub(crate) unsafe fn memchr2_raw( + n1: u8, + n2: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + defraw!(Two, find_raw, start, end, n1, n2) +} + +/// memrchr2, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Two::rfind_raw`. +#[inline(always)] +pub(crate) unsafe fn memrchr2_raw( + n1: u8, + n2: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + defraw!(Two, rfind_raw, start, end, n1, n2) +} + +/// memchr3, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Three::find_raw`. +#[inline(always)] +pub(crate) unsafe fn memchr3_raw( + n1: u8, + n2: u8, + n3: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + defraw!(Three, find_raw, start, end, n1, n2, n3) +} + +/// memrchr3, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Three::rfind_raw`. +#[inline(always)] +pub(crate) unsafe fn memrchr3_raw( + n1: u8, + n2: u8, + n3: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + defraw!(Three, rfind_raw, start, end, n1, n2, n3) +} + +/// Count all matching bytes, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `One::count_raw`. +#[inline(always)] +pub(crate) unsafe fn count_raw( + n1: u8, + start: *const u8, + end: *const u8, +) -> usize { + defraw!(One, count_raw, start, end, n1) +} diff --git a/vendor/memchr/src/arch/wasm32/mod.rs b/vendor/memchr/src/arch/wasm32/mod.rs new file mode 100644 index 00000000000000..209f876cb58376 --- /dev/null +++ b/vendor/memchr/src/arch/wasm32/mod.rs @@ -0,0 +1,7 @@ +/*! +Vector algorithms for the `wasm32` target. +*/ + +pub mod simd128; + +pub(crate) mod memchr; diff --git a/vendor/memchr/src/arch/wasm32/simd128/memchr.rs b/vendor/memchr/src/arch/wasm32/simd128/memchr.rs new file mode 100644 index 00000000000000..fa314c9d18aa6f --- /dev/null +++ b/vendor/memchr/src/arch/wasm32/simd128/memchr.rs @@ -0,0 +1,1020 @@ +/*! +This module defines 128-bit vector implementations of `memchr` and friends. + +The main types in this module are [`One`], [`Two`] and [`Three`]. They are for +searching for one, two or three distinct bytes, respectively, in a haystack. +Each type also has corresponding double ended iterators. These searchers are +typically much faster than scalar routines accomplishing the same task. + +The `One` searcher also provides a [`One::count`] routine for efficiently +counting the number of times a single byte occurs in a haystack. This is +useful, for example, for counting the number of lines in a haystack. This +routine exists because it is usually faster, especially with a high match +count, then using [`One::find`] repeatedly. ([`OneIter`] specializes its +`Iterator::count` implementation to use this routine.) + +Only one, two and three bytes are supported because three bytes is about +the point where one sees diminishing returns. Beyond this point and it's +probably (but not necessarily) better to just use a simple `[bool; 256]` array +or similar. However, it depends mightily on the specific work-load and the +expected match frequency. +*/ + +use core::arch::wasm32::v128; + +use crate::{arch::generic::memchr as generic, ext::Pointer, vector::Vector}; + +/// Finds all occurrences of a single byte in a haystack. +#[derive(Clone, Copy, Debug)] +pub struct One(generic::One); + +impl One { + /// Create a new searcher that finds occurrences of the needle byte given. + /// + /// This particular searcher is specialized to use simd128 vector + /// instructions that typically make it quite fast. + /// + /// If simd128 is unavailable in the current environment, then `None` is + /// returned. + #[inline] + pub fn new(needle: u8) -> Option { + if One::is_available() { + // SAFETY: we check that simd128 is available above. + unsafe { Some(One::new_unchecked(needle)) } + } else { + None + } + } + + /// Create a new finder specific to simd128 vectors and routines without + /// checking that simd128 is available. + /// + /// # Safety + /// + /// Callers must guarantee that it is safe to execute `simd128` + /// instructions in the current environment. + #[target_feature(enable = "simd128")] + #[inline] + pub unsafe fn new_unchecked(needle: u8) -> One { + One(generic::One::new(needle)) + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`One::new`] will return + /// a `Some` value. Similarly, when it is false, it is guaranteed that + /// `One::new` will return a `None` value. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + #[cfg(target_feature = "simd128")] + { + true + } + #[cfg(not(target_feature = "simd128"))] + { + false + } + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option { + // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Counts all occurrences of this byte in the given haystack. + #[inline] + pub fn count(&self, haystack: &[u8]) -> usize { + // SAFETY: All of our pointers are derived directly from a borrowed + // slice, which is guaranteed to be valid. + unsafe { + let start = haystack.as_ptr(); + let end = start.add(haystack.len()); + self.count_raw(start, end) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < v128::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::fwd_byte_by_byte(start, end, |b| { + b == self.0.needle1() + }); + } + // SAFETY: Building a `One` means it's safe to call 'simd128' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + self.find_raw_impl(start, end) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < v128::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::rev_byte_by_byte(start, end, |b| { + b == self.0.needle1() + }); + } + // SAFETY: Building a `One` means it's safe to call 'simd128' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + self.rfind_raw_impl(start, end) + } + + /// Counts all occurrences of this byte in the given haystack represented + /// by raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize { + if start >= end { + return 0; + } + if end.distance(start) < v128::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::count_byte_by_byte(start, end, |b| { + b == self.0.needle1() + }); + } + // SAFETY: Building a `One` means it's safe to call 'simd128' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + self.count_raw_impl(start, end) + } + + /// Execute a search using simd128 vectors and routines. + /// + /// # Safety + /// + /// Same as [`One::find_raw`], except the distance between `start` and + /// `end` must be at least the size of a simd128 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `One`, which can only be constructed + /// when it is safe to call `simd128` routines.) + #[target_feature(enable = "simd128")] + #[inline] + unsafe fn find_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.find_raw(start, end) + } + + /// Execute a search using simd128 vectors and routines. + /// + /// # Safety + /// + /// Same as [`One::rfind_raw`], except the distance between `start` and + /// `end` must be at least the size of a simd128 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `One`, which can only be constructed + /// when it is safe to call `simd128` routines.) + #[target_feature(enable = "simd128")] + #[inline] + unsafe fn rfind_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.rfind_raw(start, end) + } + + /// Execute a count using simd128 vectors and routines. + /// + /// # Safety + /// + /// Same as [`One::count_raw`], except the distance between `start` and + /// `end` must be at least the size of a simd128 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `One`, which can only be constructed + /// when it is safe to call `simd128` routines.) + #[target_feature(enable = "simd128")] + #[inline] + unsafe fn count_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> usize { + self.0.count_raw(start, end) + } + + /// Returns an iterator over all occurrences of the needle byte in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + #[inline] + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> { + OneIter { searcher: self, it: generic::Iter::new(haystack) } + } +} + +/// An iterator over all occurrences of a single byte in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`One::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`One`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct OneIter<'a, 'h> { + searcher: &'a One, + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for OneIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn count(self) -> usize { + self.it.count(|s, e| { + // SAFETY: We rely on our generic iterator to return valid start + // and end pointers. + unsafe { self.searcher.count_raw(s, e) } + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +impl<'a, 'h> core::iter::FusedIterator for OneIter<'a, 'h> {} + +/// Finds all occurrences of two bytes in a haystack. +/// +/// That is, this reports matches of one of two possible bytes. For example, +/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, +/// `4` and `5`. +#[derive(Clone, Copy, Debug)] +pub struct Two(generic::Two); + +impl Two { + /// Create a new searcher that finds occurrences of the needle bytes given. + /// + /// This particular searcher is specialized to use simd128 vector + /// instructions that typically make it quite fast. + /// + /// If simd128 is unavailable in the current environment, then `None` is + /// returned. + #[inline] + pub fn new(needle1: u8, needle2: u8) -> Option { + if Two::is_available() { + // SAFETY: we check that simd128 is available above. + unsafe { Some(Two::new_unchecked(needle1, needle2)) } + } else { + None + } + } + + /// Create a new finder specific to simd128 vectors and routines without + /// checking that simd128 is available. + /// + /// # Safety + /// + /// Callers must guarantee that it is safe to execute `simd128` + /// instructions in the current environment. + #[target_feature(enable = "simd128")] + #[inline] + pub unsafe fn new_unchecked(needle1: u8, needle2: u8) -> Two { + Two(generic::Two::new(needle1, needle2)) + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`Two::new`] will return + /// a `Some` value. Similarly, when it is false, it is guaranteed that + /// `Two::new` will return a `None` value. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + #[cfg(target_feature = "simd128")] + { + true + } + #[cfg(not(target_feature = "simd128"))] + { + false + } + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option { + // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < v128::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::fwd_byte_by_byte(start, end, |b| { + b == self.0.needle1() || b == self.0.needle2() + }); + } + // SAFETY: Building a `Two` means it's safe to call 'simd128' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + self.find_raw_impl(start, end) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < v128::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::rev_byte_by_byte(start, end, |b| { + b == self.0.needle1() || b == self.0.needle2() + }); + } + // SAFETY: Building a `Two` means it's safe to call 'simd128' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + self.rfind_raw_impl(start, end) + } + + /// Execute a search using simd128 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Two::find_raw`], except the distance between `start` and + /// `end` must be at least the size of a simd128 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Two`, which can only be constructed + /// when it is safe to call `simd128` routines.) + #[target_feature(enable = "simd128")] + #[inline] + unsafe fn find_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.find_raw(start, end) + } + + /// Execute a search using simd128 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Two::rfind_raw`], except the distance between `start` and + /// `end` must be at least the size of a simd128 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Two`, which can only be constructed + /// when it is safe to call `simd128` routines.) + #[target_feature(enable = "simd128")] + #[inline] + unsafe fn rfind_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.rfind_raw(start, end) + } + + /// Returns an iterator over all occurrences of the needle bytes in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + #[inline] + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> { + TwoIter { searcher: self, it: generic::Iter::new(haystack) } + } +} + +/// An iterator over all occurrences of two possible bytes in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`Two::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`Two`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct TwoIter<'a, 'h> { + searcher: &'a Two, + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for TwoIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +impl<'a, 'h> core::iter::FusedIterator for TwoIter<'a, 'h> {} + +/// Finds all occurrences of three bytes in a haystack. +/// +/// That is, this reports matches of one of three possible bytes. For example, +/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets +/// `0`, `2`, `3`, `4` and `5`. +#[derive(Clone, Copy, Debug)] +pub struct Three(generic::Three); + +impl Three { + /// Create a new searcher that finds occurrences of the needle bytes given. + /// + /// This particular searcher is specialized to use simd128 vector + /// instructions that typically make it quite fast. + /// + /// If simd128 is unavailable in the current environment, then `None` is + /// returned. + #[inline] + pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Option { + if Three::is_available() { + // SAFETY: we check that simd128 is available above. + unsafe { Some(Three::new_unchecked(needle1, needle2, needle3)) } + } else { + None + } + } + + /// Create a new finder specific to simd128 vectors and routines without + /// checking that simd128 is available. + /// + /// # Safety + /// + /// Callers must guarantee that it is safe to execute `simd128` + /// instructions in the current environment. + #[target_feature(enable = "simd128")] + #[inline] + pub unsafe fn new_unchecked( + needle1: u8, + needle2: u8, + needle3: u8, + ) -> Three { + Three(generic::Three::new(needle1, needle2, needle3)) + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`Three::new`] will return + /// a `Some` value. Similarly, when it is false, it is guaranteed that + /// `Three::new` will return a `None` value. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + #[cfg(target_feature = "simd128")] + { + true + } + #[cfg(not(target_feature = "simd128"))] + { + false + } + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option { + // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < v128::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::fwd_byte_by_byte(start, end, |b| { + b == self.0.needle1() + || b == self.0.needle2() + || b == self.0.needle3() + }); + } + // SAFETY: Building a `Three` means it's safe to call 'simd128' + // routines. Also, we've checked that our haystack is big enough to run + // on the vector routine. Pointer validity is caller's responsibility. + self.find_raw_impl(start, end) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < v128::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::rev_byte_by_byte(start, end, |b| { + b == self.0.needle1() + || b == self.0.needle2() + || b == self.0.needle3() + }); + } + // SAFETY: Building a `Three` means it's safe to call 'simd128' + // routines. Also, we've checked that our haystack is big enough to run + // on the vector routine. Pointer validity is caller's responsibility. + self.rfind_raw_impl(start, end) + } + + /// Execute a search using simd128 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Three::find_raw`], except the distance between `start` and + /// `end` must be at least the size of a simd128 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Three`, which can only be constructed + /// when it is safe to call `simd128` routines.) + #[target_feature(enable = "simd128")] + #[inline] + unsafe fn find_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.find_raw(start, end) + } + + /// Execute a search using simd128 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Three::rfind_raw`], except the distance between `start` and + /// `end` must be at least the size of a simd128 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Three`, which can only be constructed + /// when it is safe to call `simd128` routines.) + #[target_feature(enable = "simd128")] + #[inline] + unsafe fn rfind_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.rfind_raw(start, end) + } + + /// Returns an iterator over all occurrences of the needle byte in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + #[inline] + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> { + ThreeIter { searcher: self, it: generic::Iter::new(haystack) } + } +} + +/// An iterator over all occurrences of three possible bytes in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`Three::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`Three`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct ThreeIter<'a, 'h> { + searcher: &'a Three, + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for ThreeIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +impl<'a, 'h> core::iter::FusedIterator for ThreeIter<'a, 'h> {} + +#[cfg(test)] +mod tests { + use super::*; + + define_memchr_quickcheck!(super); + + #[test] + fn forward_one() { + crate::tests::memchr::Runner::new(1).forward_iter( + |haystack, needles| { + Some(One::new(needles[0])?.iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_one() { + crate::tests::memchr::Runner::new(1).reverse_iter( + |haystack, needles| { + Some(One::new(needles[0])?.iter(haystack).rev().collect()) + }, + ) + } + + #[test] + fn count_one() { + crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| { + Some(One::new(needles[0])?.iter(haystack).count()) + }) + } + + #[test] + fn forward_two() { + crate::tests::memchr::Runner::new(2).forward_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(Two::new(n1, n2)?.iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_two() { + crate::tests::memchr::Runner::new(2).reverse_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(Two::new(n1, n2)?.iter(haystack).rev().collect()) + }, + ) + } + + #[test] + fn forward_three() { + crate::tests::memchr::Runner::new(3).forward_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(Three::new(n1, n2, n3)?.iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_three() { + crate::tests::memchr::Runner::new(3).reverse_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(Three::new(n1, n2, n3)?.iter(haystack).rev().collect()) + }, + ) + } +} diff --git a/vendor/memchr/src/arch/wasm32/simd128/mod.rs b/vendor/memchr/src/arch/wasm32/simd128/mod.rs new file mode 100644 index 00000000000000..b55d1f07b07406 --- /dev/null +++ b/vendor/memchr/src/arch/wasm32/simd128/mod.rs @@ -0,0 +1,6 @@ +/*! +Algorithms for the `wasm32` target using 128-bit vectors via simd128. +*/ + +pub mod memchr; +pub mod packedpair; diff --git a/vendor/memchr/src/arch/wasm32/simd128/packedpair.rs b/vendor/memchr/src/arch/wasm32/simd128/packedpair.rs new file mode 100644 index 00000000000000..e8cf745a8feb77 --- /dev/null +++ b/vendor/memchr/src/arch/wasm32/simd128/packedpair.rs @@ -0,0 +1,228 @@ +/*! +A 128-bit vector implementation of the "packed pair" SIMD algorithm. + +The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main +difference is that it (by default) uses a background distribution of byte +frequencies to heuristically select the pair of bytes to search for. + +[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last +*/ + +use core::arch::wasm32::v128; + +use crate::arch::{all::packedpair::Pair, generic::packedpair}; + +/// A "packed pair" finder that uses 128-bit vector operations. +/// +/// This finder picks two bytes that it believes have high predictive power +/// for indicating an overall match of a needle. Depending on whether +/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets +/// where the needle matches or could match. In the prefilter case, candidates +/// are reported whenever the [`Pair`] of bytes given matches. +#[derive(Clone, Copy, Debug)] +pub struct Finder(packedpair::Finder); + +impl Finder { + /// Create a new pair searcher. The searcher returned can either report + /// exact matches of `needle` or act as a prefilter and report candidate + /// positions of `needle`. + /// + /// If simd128 is unavailable in the current environment or if a [`Pair`] + /// could not be constructed from the needle given, then `None` is + /// returned. + #[inline] + pub fn new(needle: &[u8]) -> Option { + Finder::with_pair(needle, Pair::new(needle)?) + } + + /// Create a new "packed pair" finder using the pair of bytes given. + /// + /// This constructor permits callers to control precisely which pair of + /// bytes is used as a predicate. + /// + /// If simd128 is unavailable in the current environment, then `None` is + /// returned. + #[inline] + pub fn with_pair(needle: &[u8], pair: Pair) -> Option { + if Finder::is_available() { + // SAFETY: we check that simd128 is available above. We are also + // guaranteed to have needle.len() > 1 because we have a valid + // Pair. + unsafe { Some(Finder::with_pair_impl(needle, pair)) } + } else { + None + } + } + + /// Create a new `Finder` specific to simd128 vectors and routines. + /// + /// # Safety + /// + /// Same as the safety for `packedpair::Finder::new`, and callers must also + /// ensure that simd128 is available. + #[target_feature(enable = "simd128")] + #[inline] + unsafe fn with_pair_impl(needle: &[u8], pair: Pair) -> Finder { + let finder = packedpair::Finder::::new(needle, pair); + Finder(finder) + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`Finder::with_pair`] will + /// return a `Some` value. Similarly, when it is false, it is guaranteed + /// that `Finder::with_pair` will return a `None` value. Notice that this + /// does not guarantee that [`Finder::new`] will return a `Finder`. Namely, + /// even when `Finder::is_available` is true, it is not guaranteed that a + /// valid [`Pair`] can be found from the needle given. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + // We used to gate on `cfg(target_feature = "simd128")` here, but + // we've since required the feature to be enabled at compile time to + // even include this module at all. Therefore, it is always enabled + // in this context. See the linked issue for why this was changed. + // + // Ref: https://github.com/BurntSushi/memchr/issues/144 + true + } + + /// Execute a search using wasm32 v128 vectors and routines. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + #[inline] + pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option { + self.find_impl(haystack, needle) + } + + /// Execute a search using wasm32 v128 vectors and routines. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + #[inline] + pub fn find_prefilter(&self, haystack: &[u8]) -> Option { + self.find_prefilter_impl(haystack) + } + + /// Execute a search using wasm32 v128 vectors and routines. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + /// + /// # Safety + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Finder`, which can only be constructed + /// when it is safe to call `simd128` routines.) + #[target_feature(enable = "simd128")] + #[inline] + fn find_impl(&self, haystack: &[u8], needle: &[u8]) -> Option { + // SAFETY: The target feature safety obligation is automatically + // fulfilled by virtue of being a method on `Finder`, which can only be + // constructed when it is safe to call `simd128` routines. + unsafe { self.0.find(haystack, needle) } + } + + /// Execute a prefilter search using wasm32 v128 vectors and routines. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + /// + /// # Safety + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Finder`, which can only be constructed + /// when it is safe to call `simd128` routines.) + #[target_feature(enable = "simd128")] + #[inline] + fn find_prefilter_impl(&self, haystack: &[u8]) -> Option { + // SAFETY: The target feature safety obligation is automatically + // fulfilled by virtue of being a method on `Finder`, which can only be + // constructed when it is safe to call `simd128` routines. + unsafe { self.0.find_prefilter(haystack) } + } + + /// Returns the pair of offsets (into the needle) used to check as a + /// predicate before confirming whether a needle exists at a particular + /// position. + #[inline] + pub fn pair(&self) -> &Pair { + self.0.pair() + } + + /// Returns the minimum haystack length that this `Finder` can search. + /// + /// Using a haystack with length smaller than this in a search will result + /// in a panic. The reason for this restriction is that this finder is + /// meant to be a low-level component that is part of a larger substring + /// strategy. In that sense, it avoids trying to handle all cases and + /// instead only handles the cases that it can handle very well. + #[inline] + pub fn min_haystack_len(&self) -> usize { + self.0.min_haystack_len() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn find(haystack: &[u8], needle: &[u8]) -> Option> { + let f = Finder::new(needle)?; + if haystack.len() < f.min_haystack_len() { + return None; + } + Some(f.find(haystack, needle)) + } + + define_substring_forward_quickcheck!(find); + + #[test] + fn forward_substring() { + crate::tests::substring::Runner::new().fwd(find).run() + } + + #[test] + fn forward_packedpair() { + fn find( + haystack: &[u8], + needle: &[u8], + index1: u8, + index2: u8, + ) -> Option> { + let pair = Pair::with_indices(needle, index1, index2)?; + let f = Finder::with_pair(needle, pair)?; + if haystack.len() < f.min_haystack_len() { + return None; + } + Some(f.find(haystack, needle)) + } + crate::tests::packedpair::Runner::new().fwd(find).run() + } + + #[test] + fn forward_packedpair_prefilter() { + fn find( + haystack: &[u8], + needle: &[u8], + index1: u8, + index2: u8, + ) -> Option> { + let pair = Pair::with_indices(needle, index1, index2)?; + let f = Finder::with_pair(needle, pair)?; + if haystack.len() < f.min_haystack_len() { + return None; + } + Some(f.find_prefilter(haystack)) + } + crate::tests::packedpair::Runner::new().fwd(find).run() + } +} diff --git a/vendor/memchr/src/arch/x86_64/avx2/memchr.rs b/vendor/memchr/src/arch/x86_64/avx2/memchr.rs new file mode 100644 index 00000000000000..59f8c7f7382028 --- /dev/null +++ b/vendor/memchr/src/arch/x86_64/avx2/memchr.rs @@ -0,0 +1,1352 @@ +/*! +This module defines 256-bit vector implementations of `memchr` and friends. + +The main types in this module are [`One`], [`Two`] and [`Three`]. They are for +searching for one, two or three distinct bytes, respectively, in a haystack. +Each type also has corresponding double ended iterators. These searchers are +typically much faster than scalar routines accomplishing the same task. + +The `One` searcher also provides a [`One::count`] routine for efficiently +counting the number of times a single byte occurs in a haystack. This is +useful, for example, for counting the number of lines in a haystack. This +routine exists because it is usually faster, especially with a high match +count, then using [`One::find`] repeatedly. ([`OneIter`] specializes its +`Iterator::count` implementation to use this routine.) + +Only one, two and three bytes are supported because three bytes is about +the point where one sees diminishing returns. Beyond this point and it's +probably (but not necessarily) better to just use a simple `[bool; 256]` array +or similar. However, it depends mightily on the specific work-load and the +expected match frequency. +*/ + +use core::arch::x86_64::{__m128i, __m256i}; + +use crate::{arch::generic::memchr as generic, ext::Pointer, vector::Vector}; + +/// Finds all occurrences of a single byte in a haystack. +#[derive(Clone, Copy, Debug)] +pub struct One { + /// Used for haystacks less than 32 bytes. + sse2: generic::One<__m128i>, + /// Used for haystacks bigger than 32 bytes. + avx2: generic::One<__m256i>, +} + +impl One { + /// Create a new searcher that finds occurrences of the needle byte given. + /// + /// This particular searcher is specialized to use AVX2 vector instructions + /// that typically make it quite fast. (SSE2 is used for haystacks that + /// are too short to accommodate an AVX2 vector.) + /// + /// If either SSE2 or AVX2 is unavailable in the current environment, then + /// `None` is returned. + #[inline] + pub fn new(needle: u8) -> Option { + if One::is_available() { + // SAFETY: we check that sse2 and avx2 are available above. + unsafe { Some(One::new_unchecked(needle)) } + } else { + None + } + } + + /// Create a new finder specific to AVX2 vectors and routines without + /// checking that either SSE2 or AVX2 is available. + /// + /// # Safety + /// + /// Callers must guarantee that it is safe to execute both `sse2` and + /// `avx2` instructions in the current environment. + /// + /// Note that it is a common misconception that if one compiles for an + /// `x86_64` target, then they therefore automatically have access to SSE2 + /// instructions. While this is almost always the case, it isn't true in + /// 100% of cases. + #[target_feature(enable = "sse2", enable = "avx2")] + #[inline] + pub unsafe fn new_unchecked(needle: u8) -> One { + One { + sse2: generic::One::new(needle), + avx2: generic::One::new(needle), + } + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`One::new`] will return + /// a `Some` value. Similarly, when it is false, it is guaranteed that + /// `One::new` will return a `None` value. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + #[cfg(not(target_feature = "sse2"))] + { + false + } + #[cfg(target_feature = "sse2")] + { + #[cfg(target_feature = "avx2")] + { + true + } + #[cfg(not(target_feature = "avx2"))] + { + #[cfg(feature = "std")] + { + std::is_x86_feature_detected!("avx2") + } + #[cfg(not(feature = "std"))] + { + false + } + } + } + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Counts all occurrences of this byte in the given haystack. + #[inline] + pub fn count(&self, haystack: &[u8]) -> usize { + // SAFETY: All of our pointers are derived directly from a borrowed + // slice, which is guaranteed to be valid. + unsafe { + let start = haystack.as_ptr(); + let end = start.add(haystack.len()); + self.count_raw(start, end) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let len = end.distance(start); + if len < __m256i::BYTES { + return if len < __m128i::BYTES { + // SAFETY: We require the caller to pass valid start/end + // pointers. + generic::fwd_byte_by_byte(start, end, |b| { + b == self.sse2.needle1() + }) + } else { + // SAFETY: We require the caller to pass valid start/end + // pointers. + self.find_raw_sse2(start, end) + }; + } + // SAFETY: Building a `One` means it's safe to call both 'sse2' and + // 'avx2' routines. Also, we've checked that our haystack is big + // enough to run on the vector routine. Pointer validity is caller's + // responsibility. + // + // Note that we could call `self.avx2.find_raw` directly here. But that + // means we'd have to annotate this routine with `target_feature`. + // Which is fine, because this routine is `unsafe` anyway and the + // `target_feature` obligation is met by virtue of building a `One`. + // The real problem is that a routine with a `target_feature` + // annotation generally can't be inlined into caller code unless + // the caller code has the same target feature annotations. Namely, + // the common case (at time of writing) is for calling code to not + // have the `avx2` target feature enabled *at compile time*. Without + // `target_feature` on this routine, it can be inlined which will + // handle some of the short-haystack cases above without touching the + // architecture specific code. + self.find_raw_avx2(start, end) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let len = end.distance(start); + if len < __m256i::BYTES { + return if len < __m128i::BYTES { + // SAFETY: We require the caller to pass valid start/end + // pointers. + generic::rev_byte_by_byte(start, end, |b| { + b == self.sse2.needle1() + }) + } else { + // SAFETY: We require the caller to pass valid start/end + // pointers. + self.rfind_raw_sse2(start, end) + }; + } + // SAFETY: Building a `One` means it's safe to call both 'sse2' and + // 'avx2' routines. Also, we've checked that our haystack is big + // enough to run on the vector routine. Pointer validity is caller's + // responsibility. + // + // See note in forward routine above for why we don't just call + // `self.avx2.rfind_raw` directly here. + self.rfind_raw_avx2(start, end) + } + + /// Counts all occurrences of this byte in the given haystack represented + /// by raw pointers. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `0` will always be returned. + #[inline] + pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize { + if start >= end { + return 0; + } + let len = end.distance(start); + if len < __m256i::BYTES { + return if len < __m128i::BYTES { + // SAFETY: We require the caller to pass valid start/end + // pointers. + generic::count_byte_by_byte(start, end, |b| { + b == self.sse2.needle1() + }) + } else { + // SAFETY: We require the caller to pass valid start/end + // pointers. + self.count_raw_sse2(start, end) + }; + } + // SAFETY: Building a `One` means it's safe to call both 'sse2' and + // 'avx2' routines. Also, we've checked that our haystack is big + // enough to run on the vector routine. Pointer validity is caller's + // responsibility. + self.count_raw_avx2(start, end) + } + + /// Execute a search using SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`One::find_raw`], except the distance between `start` and + /// `end` must be at least the size of an SSE2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `One`, which can only be constructed + /// when it is safe to call `sse2`/`avx2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn find_raw_sse2( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.sse2.find_raw(start, end) + } + + /// Execute a search using SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`One::rfind_raw`], except the distance between `start` and + /// `end` must be at least the size of an SSE2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `One`, which can only be constructed + /// when it is safe to call `sse2`/`avx2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn rfind_raw_sse2( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.sse2.rfind_raw(start, end) + } + + /// Execute a count using SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`One::count_raw`], except the distance between `start` and + /// `end` must be at least the size of an SSE2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `One`, which can only be constructed + /// when it is safe to call `sse2`/`avx2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn count_raw_sse2( + &self, + start: *const u8, + end: *const u8, + ) -> usize { + self.sse2.count_raw(start, end) + } + + /// Execute a search using AVX2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`One::find_raw`], except the distance between `start` and + /// `end` must be at least the size of an AVX2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `One`, which can only be constructed + /// when it is safe to call `sse2`/`avx2` routines.) + #[target_feature(enable = "avx2")] + #[inline] + unsafe fn find_raw_avx2( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.avx2.find_raw(start, end) + } + + /// Execute a search using AVX2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`One::rfind_raw`], except the distance between `start` and + /// `end` must be at least the size of an AVX2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `One`, which can only be constructed + /// when it is safe to call `sse2`/`avx2` routines.) + #[target_feature(enable = "avx2")] + #[inline] + unsafe fn rfind_raw_avx2( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.avx2.rfind_raw(start, end) + } + + /// Execute a count using AVX2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`One::count_raw`], except the distance between `start` and + /// `end` must be at least the size of an AVX2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `One`, which can only be constructed + /// when it is safe to call `sse2`/`avx2` routines.) + #[target_feature(enable = "avx2")] + #[inline] + unsafe fn count_raw_avx2( + &self, + start: *const u8, + end: *const u8, + ) -> usize { + self.avx2.count_raw(start, end) + } + + /// Returns an iterator over all occurrences of the needle byte in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + #[inline] + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> { + OneIter { searcher: self, it: generic::Iter::new(haystack) } + } +} + +/// An iterator over all occurrences of a single byte in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`One::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`One`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct OneIter<'a, 'h> { + searcher: &'a One, + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for OneIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn count(self) -> usize { + self.it.count(|s, e| { + // SAFETY: We rely on our generic iterator to return valid start + // and end pointers. + unsafe { self.searcher.count_raw(s, e) } + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +impl<'a, 'h> core::iter::FusedIterator for OneIter<'a, 'h> {} + +/// Finds all occurrences of two bytes in a haystack. +/// +/// That is, this reports matches of one of two possible bytes. For example, +/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, +/// `4` and `5`. +#[derive(Clone, Copy, Debug)] +pub struct Two { + /// Used for haystacks less than 32 bytes. + sse2: generic::Two<__m128i>, + /// Used for haystacks bigger than 32 bytes. + avx2: generic::Two<__m256i>, +} + +impl Two { + /// Create a new searcher that finds occurrences of the needle bytes given. + /// + /// This particular searcher is specialized to use AVX2 vector instructions + /// that typically make it quite fast. (SSE2 is used for haystacks that + /// are too short to accommodate an AVX2 vector.) + /// + /// If either SSE2 or AVX2 is unavailable in the current environment, then + /// `None` is returned. + #[inline] + pub fn new(needle1: u8, needle2: u8) -> Option { + if Two::is_available() { + // SAFETY: we check that sse2 and avx2 are available above. + unsafe { Some(Two::new_unchecked(needle1, needle2)) } + } else { + None + } + } + + /// Create a new finder specific to AVX2 vectors and routines without + /// checking that either SSE2 or AVX2 is available. + /// + /// # Safety + /// + /// Callers must guarantee that it is safe to execute both `sse2` and + /// `avx2` instructions in the current environment. + /// + /// Note that it is a common misconception that if one compiles for an + /// `x86_64` target, then they therefore automatically have access to SSE2 + /// instructions. While this is almost always the case, it isn't true in + /// 100% of cases. + #[target_feature(enable = "sse2", enable = "avx2")] + #[inline] + pub unsafe fn new_unchecked(needle1: u8, needle2: u8) -> Two { + Two { + sse2: generic::Two::new(needle1, needle2), + avx2: generic::Two::new(needle1, needle2), + } + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`Two::new`] will return + /// a `Some` value. Similarly, when it is false, it is guaranteed that + /// `Two::new` will return a `None` value. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + #[cfg(not(target_feature = "sse2"))] + { + false + } + #[cfg(target_feature = "sse2")] + { + #[cfg(target_feature = "avx2")] + { + true + } + #[cfg(not(target_feature = "avx2"))] + { + #[cfg(feature = "std")] + { + std::is_x86_feature_detected!("avx2") + } + #[cfg(not(feature = "std"))] + { + false + } + } + } + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let len = end.distance(start); + if len < __m256i::BYTES { + return if len < __m128i::BYTES { + // SAFETY: We require the caller to pass valid start/end + // pointers. + generic::fwd_byte_by_byte(start, end, |b| { + b == self.sse2.needle1() || b == self.sse2.needle2() + }) + } else { + // SAFETY: We require the caller to pass valid start/end + // pointers. + self.find_raw_sse2(start, end) + }; + } + // SAFETY: Building a `Two` means it's safe to call both 'sse2' and + // 'avx2' routines. Also, we've checked that our haystack is big + // enough to run on the vector routine. Pointer validity is caller's + // responsibility. + // + // Note that we could call `self.avx2.find_raw` directly here. But that + // means we'd have to annotate this routine with `target_feature`. + // Which is fine, because this routine is `unsafe` anyway and the + // `target_feature` obligation is met by virtue of building a `Two`. + // The real problem is that a routine with a `target_feature` + // annotation generally can't be inlined into caller code unless + // the caller code has the same target feature annotations. Namely, + // the common case (at time of writing) is for calling code to not + // have the `avx2` target feature enabled *at compile time*. Without + // `target_feature` on this routine, it can be inlined which will + // handle some of the short-haystack cases above without touching the + // architecture specific code. + self.find_raw_avx2(start, end) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let len = end.distance(start); + if len < __m256i::BYTES { + return if len < __m128i::BYTES { + // SAFETY: We require the caller to pass valid start/end + // pointers. + generic::rev_byte_by_byte(start, end, |b| { + b == self.sse2.needle1() || b == self.sse2.needle2() + }) + } else { + // SAFETY: We require the caller to pass valid start/end + // pointers. + self.rfind_raw_sse2(start, end) + }; + } + // SAFETY: Building a `Two` means it's safe to call both 'sse2' and + // 'avx2' routines. Also, we've checked that our haystack is big + // enough to run on the vector routine. Pointer validity is caller's + // responsibility. + // + // See note in forward routine above for why we don't just call + // `self.avx2.rfind_raw` directly here. + self.rfind_raw_avx2(start, end) + } + + /// Execute a search using SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Two::find_raw`], except the distance between `start` and + /// `end` must be at least the size of an SSE2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Two`, which can only be constructed + /// when it is safe to call `sse2`/`avx2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn find_raw_sse2( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.sse2.find_raw(start, end) + } + + /// Execute a search using SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Two::rfind_raw`], except the distance between `start` and + /// `end` must be at least the size of an SSE2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Two`, which can only be constructed + /// when it is safe to call `sse2`/`avx2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn rfind_raw_sse2( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.sse2.rfind_raw(start, end) + } + + /// Execute a search using AVX2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Two::find_raw`], except the distance between `start` and + /// `end` must be at least the size of an AVX2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Two`, which can only be constructed + /// when it is safe to call `sse2`/`avx2` routines.) + #[target_feature(enable = "avx2")] + #[inline] + unsafe fn find_raw_avx2( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.avx2.find_raw(start, end) + } + + /// Execute a search using AVX2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Two::rfind_raw`], except the distance between `start` and + /// `end` must be at least the size of an AVX2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Two`, which can only be constructed + /// when it is safe to call `sse2`/`avx2` routines.) + #[target_feature(enable = "avx2")] + #[inline] + unsafe fn rfind_raw_avx2( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.avx2.rfind_raw(start, end) + } + + /// Returns an iterator over all occurrences of the needle bytes in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + #[inline] + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> { + TwoIter { searcher: self, it: generic::Iter::new(haystack) } + } +} + +/// An iterator over all occurrences of two possible bytes in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`Two::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`Two`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct TwoIter<'a, 'h> { + searcher: &'a Two, + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for TwoIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +impl<'a, 'h> core::iter::FusedIterator for TwoIter<'a, 'h> {} + +/// Finds all occurrences of three bytes in a haystack. +/// +/// That is, this reports matches of one of three possible bytes. For example, +/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets +/// `0`, `2`, `3`, `4` and `5`. +#[derive(Clone, Copy, Debug)] +pub struct Three { + /// Used for haystacks less than 32 bytes. + sse2: generic::Three<__m128i>, + /// Used for haystacks bigger than 32 bytes. + avx2: generic::Three<__m256i>, +} + +impl Three { + /// Create a new searcher that finds occurrences of the needle bytes given. + /// + /// This particular searcher is specialized to use AVX2 vector instructions + /// that typically make it quite fast. (SSE2 is used for haystacks that + /// are too short to accommodate an AVX2 vector.) + /// + /// If either SSE2 or AVX2 is unavailable in the current environment, then + /// `None` is returned. + #[inline] + pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Option { + if Three::is_available() { + // SAFETY: we check that sse2 and avx2 are available above. + unsafe { Some(Three::new_unchecked(needle1, needle2, needle3)) } + } else { + None + } + } + + /// Create a new finder specific to AVX2 vectors and routines without + /// checking that either SSE2 or AVX2 is available. + /// + /// # Safety + /// + /// Callers must guarantee that it is safe to execute both `sse2` and + /// `avx2` instructions in the current environment. + /// + /// Note that it is a common misconception that if one compiles for an + /// `x86_64` target, then they therefore automatically have access to SSE2 + /// instructions. While this is almost always the case, it isn't true in + /// 100% of cases. + #[target_feature(enable = "sse2", enable = "avx2")] + #[inline] + pub unsafe fn new_unchecked( + needle1: u8, + needle2: u8, + needle3: u8, + ) -> Three { + Three { + sse2: generic::Three::new(needle1, needle2, needle3), + avx2: generic::Three::new(needle1, needle2, needle3), + } + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`Three::new`] will return + /// a `Some` value. Similarly, when it is false, it is guaranteed that + /// `Three::new` will return a `None` value. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + #[cfg(not(target_feature = "sse2"))] + { + false + } + #[cfg(target_feature = "sse2")] + { + #[cfg(target_feature = "avx2")] + { + true + } + #[cfg(not(target_feature = "avx2"))] + { + #[cfg(feature = "std")] + { + std::is_x86_feature_detected!("avx2") + } + #[cfg(not(feature = "std"))] + { + false + } + } + } + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let len = end.distance(start); + if len < __m256i::BYTES { + return if len < __m128i::BYTES { + // SAFETY: We require the caller to pass valid start/end + // pointers. + generic::fwd_byte_by_byte(start, end, |b| { + b == self.sse2.needle1() + || b == self.sse2.needle2() + || b == self.sse2.needle3() + }) + } else { + // SAFETY: We require the caller to pass valid start/end + // pointers. + self.find_raw_sse2(start, end) + }; + } + // SAFETY: Building a `Three` means it's safe to call both 'sse2' and + // 'avx2' routines. Also, we've checked that our haystack is big + // enough to run on the vector routine. Pointer validity is caller's + // responsibility. + // + // Note that we could call `self.avx2.find_raw` directly here. But that + // means we'd have to annotate this routine with `target_feature`. + // Which is fine, because this routine is `unsafe` anyway and the + // `target_feature` obligation is met by virtue of building a `Three`. + // The real problem is that a routine with a `target_feature` + // annotation generally can't be inlined into caller code unless + // the caller code has the same target feature annotations. Namely, + // the common case (at time of writing) is for calling code to not + // have the `avx2` target feature enabled *at compile time*. Without + // `target_feature` on this routine, it can be inlined which will + // handle some of the short-haystack cases above without touching the + // architecture specific code. + self.find_raw_avx2(start, end) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let len = end.distance(start); + if len < __m256i::BYTES { + return if len < __m128i::BYTES { + // SAFETY: We require the caller to pass valid start/end + // pointers. + generic::rev_byte_by_byte(start, end, |b| { + b == self.sse2.needle1() + || b == self.sse2.needle2() + || b == self.sse2.needle3() + }) + } else { + // SAFETY: We require the caller to pass valid start/end + // pointers. + self.rfind_raw_sse2(start, end) + }; + } + // SAFETY: Building a `Three` means it's safe to call both 'sse2' and + // 'avx2' routines. Also, we've checked that our haystack is big + // enough to run on the vector routine. Pointer validity is caller's + // responsibility. + // + // See note in forward routine above for why we don't just call + // `self.avx2.rfind_raw` directly here. + self.rfind_raw_avx2(start, end) + } + + /// Execute a search using SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Three::find_raw`], except the distance between `start` and + /// `end` must be at least the size of an SSE2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Three`, which can only be constructed + /// when it is safe to call `sse2`/`avx2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn find_raw_sse2( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.sse2.find_raw(start, end) + } + + /// Execute a search using SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Three::rfind_raw`], except the distance between `start` and + /// `end` must be at least the size of an SSE2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Three`, which can only be constructed + /// when it is safe to call `sse2`/`avx2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn rfind_raw_sse2( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.sse2.rfind_raw(start, end) + } + + /// Execute a search using AVX2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Three::find_raw`], except the distance between `start` and + /// `end` must be at least the size of an AVX2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Three`, which can only be constructed + /// when it is safe to call `sse2`/`avx2` routines.) + #[target_feature(enable = "avx2")] + #[inline] + unsafe fn find_raw_avx2( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.avx2.find_raw(start, end) + } + + /// Execute a search using AVX2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Three::rfind_raw`], except the distance between `start` and + /// `end` must be at least the size of an AVX2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Three`, which can only be constructed + /// when it is safe to call `sse2`/`avx2` routines.) + #[target_feature(enable = "avx2")] + #[inline] + unsafe fn rfind_raw_avx2( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.avx2.rfind_raw(start, end) + } + + /// Returns an iterator over all occurrences of the needle bytes in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + #[inline] + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> { + ThreeIter { searcher: self, it: generic::Iter::new(haystack) } + } +} + +/// An iterator over all occurrences of three possible bytes in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`Three::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`Three`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct ThreeIter<'a, 'h> { + searcher: &'a Three, + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for ThreeIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +impl<'a, 'h> core::iter::FusedIterator for ThreeIter<'a, 'h> {} + +#[cfg(test)] +mod tests { + use super::*; + + define_memchr_quickcheck!(super); + + #[test] + fn forward_one() { + crate::tests::memchr::Runner::new(1).forward_iter( + |haystack, needles| { + Some(One::new(needles[0])?.iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_one() { + crate::tests::memchr::Runner::new(1).reverse_iter( + |haystack, needles| { + Some(One::new(needles[0])?.iter(haystack).rev().collect()) + }, + ) + } + + #[test] + fn count_one() { + crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| { + Some(One::new(needles[0])?.iter(haystack).count()) + }) + } + + #[test] + fn forward_two() { + crate::tests::memchr::Runner::new(2).forward_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(Two::new(n1, n2)?.iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_two() { + crate::tests::memchr::Runner::new(2).reverse_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(Two::new(n1, n2)?.iter(haystack).rev().collect()) + }, + ) + } + + #[test] + fn forward_three() { + crate::tests::memchr::Runner::new(3).forward_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(Three::new(n1, n2, n3)?.iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_three() { + crate::tests::memchr::Runner::new(3).reverse_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(Three::new(n1, n2, n3)?.iter(haystack).rev().collect()) + }, + ) + } +} diff --git a/vendor/memchr/src/arch/x86_64/avx2/mod.rs b/vendor/memchr/src/arch/x86_64/avx2/mod.rs new file mode 100644 index 00000000000000..ee4097d6f4c34d --- /dev/null +++ b/vendor/memchr/src/arch/x86_64/avx2/mod.rs @@ -0,0 +1,6 @@ +/*! +Algorithms for the `x86_64` target using 256-bit vectors via AVX2. +*/ + +pub mod memchr; +pub mod packedpair; diff --git a/vendor/memchr/src/arch/x86_64/avx2/packedpair.rs b/vendor/memchr/src/arch/x86_64/avx2/packedpair.rs new file mode 100644 index 00000000000000..efae7b66c72c5b --- /dev/null +++ b/vendor/memchr/src/arch/x86_64/avx2/packedpair.rs @@ -0,0 +1,272 @@ +/*! +A 256-bit vector implementation of the "packed pair" SIMD algorithm. + +The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main +difference is that it (by default) uses a background distribution of byte +frequencies to heuristically select the pair of bytes to search for. + +[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last +*/ + +use core::arch::x86_64::{__m128i, __m256i}; + +use crate::arch::{all::packedpair::Pair, generic::packedpair}; + +/// A "packed pair" finder that uses 256-bit vector operations. +/// +/// This finder picks two bytes that it believes have high predictive power +/// for indicating an overall match of a needle. Depending on whether +/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets +/// where the needle matches or could match. In the prefilter case, candidates +/// are reported whenever the [`Pair`] of bytes given matches. +#[derive(Clone, Copy, Debug)] +pub struct Finder { + sse2: packedpair::Finder<__m128i>, + avx2: packedpair::Finder<__m256i>, +} + +impl Finder { + /// Create a new pair searcher. The searcher returned can either report + /// exact matches of `needle` or act as a prefilter and report candidate + /// positions of `needle`. + /// + /// If AVX2 is unavailable in the current environment or if a [`Pair`] + /// could not be constructed from the needle given, then `None` is + /// returned. + #[inline] + pub fn new(needle: &[u8]) -> Option { + Finder::with_pair(needle, Pair::new(needle)?) + } + + /// Create a new "packed pair" finder using the pair of bytes given. + /// + /// This constructor permits callers to control precisely which pair of + /// bytes is used as a predicate. + /// + /// If AVX2 is unavailable in the current environment, then `None` is + /// returned. + #[inline] + pub fn with_pair(needle: &[u8], pair: Pair) -> Option { + if Finder::is_available() { + // SAFETY: we check that sse2/avx2 is available above. We are also + // guaranteed to have needle.len() > 1 because we have a valid + // Pair. + unsafe { Some(Finder::with_pair_impl(needle, pair)) } + } else { + None + } + } + + /// Create a new `Finder` specific to SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as the safety for `packedpair::Finder::new`, and callers must also + /// ensure that both SSE2 and AVX2 are available. + #[target_feature(enable = "sse2", enable = "avx2")] + #[inline] + unsafe fn with_pair_impl(needle: &[u8], pair: Pair) -> Finder { + let sse2 = packedpair::Finder::<__m128i>::new(needle, pair); + let avx2 = packedpair::Finder::<__m256i>::new(needle, pair); + Finder { sse2, avx2 } + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`Finder::with_pair`] will + /// return a `Some` value. Similarly, when it is false, it is guaranteed + /// that `Finder::with_pair` will return a `None` value. Notice that this + /// does not guarantee that [`Finder::new`] will return a `Finder`. Namely, + /// even when `Finder::is_available` is true, it is not guaranteed that a + /// valid [`Pair`] can be found from the needle given. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + #[cfg(not(target_feature = "sse2"))] + { + false + } + #[cfg(target_feature = "sse2")] + { + #[cfg(target_feature = "avx2")] + { + true + } + #[cfg(not(target_feature = "avx2"))] + { + #[cfg(feature = "std")] + { + std::is_x86_feature_detected!("avx2") + } + #[cfg(not(feature = "std"))] + { + false + } + } + } + } + + /// Execute a search using AVX2 vectors and routines. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + #[inline] + pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option { + // SAFETY: Building a `Finder` means it's safe to call 'sse2' routines. + unsafe { self.find_impl(haystack, needle) } + } + + /// Run this finder on the given haystack as a prefilter. + /// + /// If a candidate match is found, then an offset where the needle *could* + /// begin in the haystack is returned. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + #[inline] + pub fn find_prefilter(&self, haystack: &[u8]) -> Option { + // SAFETY: Building a `Finder` means it's safe to call 'sse2' routines. + unsafe { self.find_prefilter_impl(haystack) } + } + + /// Execute a search using AVX2 vectors and routines. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + /// + /// # Safety + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Finder`, which can only be constructed + /// when it is safe to call `sse2` and `avx2` routines.) + #[target_feature(enable = "sse2", enable = "avx2")] + #[inline] + unsafe fn find_impl( + &self, + haystack: &[u8], + needle: &[u8], + ) -> Option { + if haystack.len() < self.avx2.min_haystack_len() { + self.sse2.find(haystack, needle) + } else { + self.avx2.find(haystack, needle) + } + } + + /// Execute a prefilter search using AVX2 vectors and routines. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + /// + /// # Safety + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Finder`, which can only be constructed + /// when it is safe to call `sse2` and `avx2` routines.) + #[target_feature(enable = "sse2", enable = "avx2")] + #[inline] + unsafe fn find_prefilter_impl(&self, haystack: &[u8]) -> Option { + if haystack.len() < self.avx2.min_haystack_len() { + self.sse2.find_prefilter(haystack) + } else { + self.avx2.find_prefilter(haystack) + } + } + + /// Returns the pair of offsets (into the needle) used to check as a + /// predicate before confirming whether a needle exists at a particular + /// position. + #[inline] + pub fn pair(&self) -> &Pair { + self.avx2.pair() + } + + /// Returns the minimum haystack length that this `Finder` can search. + /// + /// Using a haystack with length smaller than this in a search will result + /// in a panic. The reason for this restriction is that this finder is + /// meant to be a low-level component that is part of a larger substring + /// strategy. In that sense, it avoids trying to handle all cases and + /// instead only handles the cases that it can handle very well. + #[inline] + pub fn min_haystack_len(&self) -> usize { + // The caller doesn't need to care about AVX2's min_haystack_len + // since this implementation will automatically switch to the SSE2 + // implementation if the haystack is too short for AVX2. Therefore, the + // caller only needs to care about SSE2's min_haystack_len. + // + // This does assume that SSE2's min_haystack_len is less than or + // equal to AVX2's min_haystack_len. In practice, this is true and + // there is no way it could be false based on how this Finder is + // implemented. Namely, both SSE2 and AVX2 use the same `Pair`. If + // they used different pairs, then it's possible (although perhaps + // pathological) for SSE2's min_haystack_len to be bigger than AVX2's. + self.sse2.min_haystack_len() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn find(haystack: &[u8], needle: &[u8]) -> Option> { + let f = Finder::new(needle)?; + if haystack.len() < f.min_haystack_len() { + return None; + } + Some(f.find(haystack, needle)) + } + + define_substring_forward_quickcheck!(find); + + #[test] + fn forward_substring() { + crate::tests::substring::Runner::new().fwd(find).run() + } + + #[test] + fn forward_packedpair() { + fn find( + haystack: &[u8], + needle: &[u8], + index1: u8, + index2: u8, + ) -> Option> { + let pair = Pair::with_indices(needle, index1, index2)?; + let f = Finder::with_pair(needle, pair)?; + if haystack.len() < f.min_haystack_len() { + return None; + } + Some(f.find(haystack, needle)) + } + crate::tests::packedpair::Runner::new().fwd(find).run() + } + + #[test] + fn forward_packedpair_prefilter() { + fn find( + haystack: &[u8], + needle: &[u8], + index1: u8, + index2: u8, + ) -> Option> { + if !cfg!(target_feature = "sse2") { + return None; + } + let pair = Pair::with_indices(needle, index1, index2)?; + let f = Finder::with_pair(needle, pair)?; + if haystack.len() < f.min_haystack_len() { + return None; + } + Some(f.find_prefilter(haystack)) + } + crate::tests::packedpair::Runner::new().fwd(find).run() + } +} diff --git a/vendor/memchr/src/arch/x86_64/memchr.rs b/vendor/memchr/src/arch/x86_64/memchr.rs new file mode 100644 index 00000000000000..edb6d431d97304 --- /dev/null +++ b/vendor/memchr/src/arch/x86_64/memchr.rs @@ -0,0 +1,335 @@ +/*! +Wrapper routines for `memchr` and friends. + +These routines efficiently dispatch to the best implementation based on what +the CPU supports. +*/ + +/// Provides a way to run a memchr-like function while amortizing the cost of +/// runtime CPU feature detection. +/// +/// This works by loading a function pointer from an atomic global. Initially, +/// this global is set to a function that does CPU feature detection. For +/// example, if AVX2 is enabled, then the AVX2 implementation is used. +/// Otherwise, at least on x86_64, the SSE2 implementation is used. (And +/// in some niche cases, if SSE2 isn't available, then the architecture +/// independent fallback implementation is used.) +/// +/// After the first call to this function, the atomic global is replaced with +/// the specific AVX2, SSE2 or fallback routine chosen. Subsequent calls then +/// will directly call the chosen routine instead of needing to go through the +/// CPU feature detection branching again. +/// +/// This particular macro is specifically written to provide the implementation +/// of functions with the following signature: +/// +/// ```ignore +/// fn memchr(needle1: u8, start: *const u8, end: *const u8) -> Option; +/// ``` +/// +/// Where you can also have `memchr2` and `memchr3`, but with `needle2` and +/// `needle3`, respectively. The `start` and `end` parameters correspond to the +/// start and end of the haystack, respectively. +/// +/// We use raw pointers here instead of the more obvious `haystack: &[u8]` so +/// that the function is compatible with our lower level iterator logic that +/// operates on raw pointers. We use this macro to implement "raw" memchr +/// routines with the signature above, and then define memchr routines using +/// regular slices on top of them. +/// +/// Note that we use `#[cfg(target_feature = "sse2")]` below even though +/// it shouldn't be strictly necessary because without it, it seems to +/// cause the compiler to blow up. I guess it can't handle a function +/// pointer being created with a sse target feature? Dunno. See the +/// `build-for-x86-64-but-non-sse-target` CI job if you want to experiment with +/// this. +/// +/// # Safety +/// +/// Primarily callers must ensure that `$fnty` is a correct function pointer +/// type and not something else. +/// +/// Callers must also ensure that `$memchrty::$memchrfind` corresponds to a +/// routine that returns a valid function pointer when a match is found. That +/// is, a pointer that is `>= start` and `< end`. +/// +/// Callers must also ensure that the `$hay_start` and `$hay_end` identifiers +/// correspond to valid pointers. +macro_rules! unsafe_ifunc { + ( + $memchrty:ident, + $memchrfind:ident, + $fnty:ty, + $retty:ty, + $hay_start:ident, + $hay_end:ident, + $($needle:ident),+ + ) => {{ + #![allow(unused_unsafe)] + + use core::sync::atomic::{AtomicPtr, Ordering}; + + type Fn = *mut (); + type RealFn = $fnty; + static FN: AtomicPtr<()> = AtomicPtr::new(detect as Fn); + + #[cfg(target_feature = "sse2")] + #[target_feature(enable = "sse2", enable = "avx2")] + unsafe fn find_avx2( + $($needle: u8),+, + $hay_start: *const u8, + $hay_end: *const u8, + ) -> $retty { + use crate::arch::x86_64::avx2::memchr::$memchrty; + $memchrty::new_unchecked($($needle),+) + .$memchrfind($hay_start, $hay_end) + } + + #[cfg(target_feature = "sse2")] + #[target_feature(enable = "sse2")] + unsafe fn find_sse2( + $($needle: u8),+, + $hay_start: *const u8, + $hay_end: *const u8, + ) -> $retty { + use crate::arch::x86_64::sse2::memchr::$memchrty; + $memchrty::new_unchecked($($needle),+) + .$memchrfind($hay_start, $hay_end) + } + + unsafe fn find_fallback( + $($needle: u8),+, + $hay_start: *const u8, + $hay_end: *const u8, + ) -> $retty { + use crate::arch::all::memchr::$memchrty; + $memchrty::new($($needle),+).$memchrfind($hay_start, $hay_end) + } + + unsafe fn detect( + $($needle: u8),+, + $hay_start: *const u8, + $hay_end: *const u8, + ) -> $retty { + let fun = { + #[cfg(not(target_feature = "sse2"))] + { + debug!( + "no sse2 feature available, using fallback for {}", + stringify!($memchrty), + ); + find_fallback as RealFn + } + #[cfg(target_feature = "sse2")] + { + use crate::arch::x86_64::{sse2, avx2}; + if avx2::memchr::$memchrty::is_available() { + debug!("chose AVX2 for {}", stringify!($memchrty)); + find_avx2 as RealFn + } else if sse2::memchr::$memchrty::is_available() { + debug!("chose SSE2 for {}", stringify!($memchrty)); + find_sse2 as RealFn + } else { + debug!("chose fallback for {}", stringify!($memchrty)); + find_fallback as RealFn + } + } + }; + FN.store(fun as Fn, Ordering::Relaxed); + // SAFETY: The only thing we need to uphold here is the + // `#[target_feature]` requirements. Since we check is_available + // above before using the corresponding implementation, we are + // guaranteed to only call code that is supported on the current + // CPU. + fun($($needle),+, $hay_start, $hay_end) + } + + // SAFETY: By virtue of the caller contract, RealFn is a function + // pointer, which is always safe to transmute with a *mut (). Also, + // since we use $memchrty::is_available, it is guaranteed to be safe + // to call $memchrty::$memchrfind. + unsafe { + let fun = FN.load(Ordering::Relaxed); + core::mem::transmute::(fun)( + $($needle),+, + $hay_start, + $hay_end, + ) + } + }}; +} + +// The routines below dispatch to AVX2, SSE2 or a fallback routine based on +// what's available in the current environment. The secret sauce here is that +// we only check for which one to use approximately once, and then "cache" that +// choice into a global function pointer. Subsequent invocations then just call +// the appropriate function directly. + +/// memchr, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `One::find_raw`. +#[inline(always)] +pub(crate) fn memchr_raw( + n1: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + // SAFETY: We provide a valid function pointer type. + unsafe_ifunc!( + One, + find_raw, + unsafe fn(u8, *const u8, *const u8) -> Option<*const u8>, + Option<*const u8>, + start, + end, + n1 + ) +} + +/// memrchr, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `One::rfind_raw`. +#[inline(always)] +pub(crate) fn memrchr_raw( + n1: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + // SAFETY: We provide a valid function pointer type. + unsafe_ifunc!( + One, + rfind_raw, + unsafe fn(u8, *const u8, *const u8) -> Option<*const u8>, + Option<*const u8>, + start, + end, + n1 + ) +} + +/// memchr2, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Two::find_raw`. +#[inline(always)] +pub(crate) fn memchr2_raw( + n1: u8, + n2: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + // SAFETY: We provide a valid function pointer type. + unsafe_ifunc!( + Two, + find_raw, + unsafe fn(u8, u8, *const u8, *const u8) -> Option<*const u8>, + Option<*const u8>, + start, + end, + n1, + n2 + ) +} + +/// memrchr2, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Two::rfind_raw`. +#[inline(always)] +pub(crate) fn memrchr2_raw( + n1: u8, + n2: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + // SAFETY: We provide a valid function pointer type. + unsafe_ifunc!( + Two, + rfind_raw, + unsafe fn(u8, u8, *const u8, *const u8) -> Option<*const u8>, + Option<*const u8>, + start, + end, + n1, + n2 + ) +} + +/// memchr3, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Three::find_raw`. +#[inline(always)] +pub(crate) fn memchr3_raw( + n1: u8, + n2: u8, + n3: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + // SAFETY: We provide a valid function pointer type. + unsafe_ifunc!( + Three, + find_raw, + unsafe fn(u8, u8, u8, *const u8, *const u8) -> Option<*const u8>, + Option<*const u8>, + start, + end, + n1, + n2, + n3 + ) +} + +/// memrchr3, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Three::rfind_raw`. +#[inline(always)] +pub(crate) fn memrchr3_raw( + n1: u8, + n2: u8, + n3: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + // SAFETY: We provide a valid function pointer type. + unsafe_ifunc!( + Three, + rfind_raw, + unsafe fn(u8, u8, u8, *const u8, *const u8) -> Option<*const u8>, + Option<*const u8>, + start, + end, + n1, + n2, + n3 + ) +} + +/// Count all matching bytes, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `One::count_raw`. +#[inline(always)] +pub(crate) fn count_raw(n1: u8, start: *const u8, end: *const u8) -> usize { + // SAFETY: We provide a valid function pointer type. + unsafe_ifunc!( + One, + count_raw, + unsafe fn(u8, *const u8, *const u8) -> usize, + usize, + start, + end, + n1 + ) +} diff --git a/vendor/memchr/src/arch/x86_64/mod.rs b/vendor/memchr/src/arch/x86_64/mod.rs new file mode 100644 index 00000000000000..5dad7218216b79 --- /dev/null +++ b/vendor/memchr/src/arch/x86_64/mod.rs @@ -0,0 +1,8 @@ +/*! +Vector algorithms for the `x86_64` target. +*/ + +pub mod avx2; +pub mod sse2; + +pub(crate) mod memchr; diff --git a/vendor/memchr/src/arch/x86_64/sse2/memchr.rs b/vendor/memchr/src/arch/x86_64/sse2/memchr.rs new file mode 100644 index 00000000000000..79572b82b1c618 --- /dev/null +++ b/vendor/memchr/src/arch/x86_64/sse2/memchr.rs @@ -0,0 +1,1077 @@ +/*! +This module defines 128-bit vector implementations of `memchr` and friends. + +The main types in this module are [`One`], [`Two`] and [`Three`]. They are for +searching for one, two or three distinct bytes, respectively, in a haystack. +Each type also has corresponding double ended iterators. These searchers are +typically much faster than scalar routines accomplishing the same task. + +The `One` searcher also provides a [`One::count`] routine for efficiently +counting the number of times a single byte occurs in a haystack. This is +useful, for example, for counting the number of lines in a haystack. This +routine exists because it is usually faster, especially with a high match +count, than using [`One::find`] repeatedly. ([`OneIter`] specializes its +`Iterator::count` implementation to use this routine.) + +Only one, two and three bytes are supported because three bytes is about +the point where one sees diminishing returns. Beyond this point and it's +probably (but not necessarily) better to just use a simple `[bool; 256]` array +or similar. However, it depends mightily on the specific work-load and the +expected match frequency. +*/ + +use core::arch::x86_64::__m128i; + +use crate::{arch::generic::memchr as generic, ext::Pointer, vector::Vector}; + +/// Finds all occurrences of a single byte in a haystack. +#[derive(Clone, Copy, Debug)] +pub struct One(generic::One<__m128i>); + +impl One { + /// Create a new searcher that finds occurrences of the needle byte given. + /// + /// This particular searcher is specialized to use SSE2 vector instructions + /// that typically make it quite fast. + /// + /// If SSE2 is unavailable in the current environment, then `None` is + /// returned. + #[inline] + pub fn new(needle: u8) -> Option { + if One::is_available() { + // SAFETY: we check that sse2 is available above. + unsafe { Some(One::new_unchecked(needle)) } + } else { + None + } + } + + /// Create a new finder specific to SSE2 vectors and routines without + /// checking that SSE2 is available. + /// + /// # Safety + /// + /// Callers must guarantee that it is safe to execute `sse2` instructions + /// in the current environment. + /// + /// Note that it is a common misconception that if one compiles for an + /// `x86_64` target, then they therefore automatically have access to SSE2 + /// instructions. While this is almost always the case, it isn't true in + /// 100% of cases. + #[target_feature(enable = "sse2")] + #[inline] + pub unsafe fn new_unchecked(needle: u8) -> One { + One(generic::One::new(needle)) + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`One::new`] will return + /// a `Some` value. Similarly, when it is false, it is guaranteed that + /// `One::new` will return a `None` value. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + #[cfg(target_feature = "sse2")] + { + true + } + #[cfg(not(target_feature = "sse2"))] + { + false + } + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option { + // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Counts all occurrences of this byte in the given haystack. + #[inline] + pub fn count(&self, haystack: &[u8]) -> usize { + // SAFETY: All of our pointers are derived directly from a borrowed + // slice, which is guaranteed to be valid. + unsafe { + let start = haystack.as_ptr(); + let end = start.add(haystack.len()); + self.count_raw(start, end) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < __m128i::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::fwd_byte_by_byte(start, end, |b| { + b == self.0.needle1() + }); + } + // SAFETY: Building a `One` means it's safe to call 'sse2' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + // + // Note that we could call `self.0.find_raw` directly here. But that + // means we'd have to annotate this routine with `target_feature`. + // Which is fine, because this routine is `unsafe` anyway and the + // `target_feature` obligation is met by virtue of building a `One`. + // The real problem is that a routine with a `target_feature` + // annotation generally can't be inlined into caller code unless the + // caller code has the same target feature annotations. Which is maybe + // okay for SSE2, but we do the same thing for AVX2 where caller code + // probably usually doesn't have AVX2 enabled. That means that this + // routine can be inlined which will handle some of the short-haystack + // cases above without touching the architecture specific code. + self.find_raw_impl(start, end) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < __m128i::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::rev_byte_by_byte(start, end, |b| { + b == self.0.needle1() + }); + } + // SAFETY: Building a `One` means it's safe to call 'sse2' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + // + // See note in forward routine above for why we don't just call + // `self.0.rfind_raw` directly here. + self.rfind_raw_impl(start, end) + } + + /// Counts all occurrences of this byte in the given haystack represented + /// by raw pointers. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `0` will always be returned. + #[inline] + pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize { + if start >= end { + return 0; + } + if end.distance(start) < __m128i::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::count_byte_by_byte(start, end, |b| { + b == self.0.needle1() + }); + } + // SAFETY: Building a `One` means it's safe to call 'sse2' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + self.count_raw_impl(start, end) + } + + /// Execute a search using SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`One::find_raw`], except the distance between `start` and + /// `end` must be at least the size of an SSE2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `One`, which can only be constructed + /// when it is safe to call `sse2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn find_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.find_raw(start, end) + } + + /// Execute a search using SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`One::rfind_raw`], except the distance between `start` and + /// `end` must be at least the size of an SSE2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `One`, which can only be constructed + /// when it is safe to call `sse2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn rfind_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.rfind_raw(start, end) + } + + /// Execute a count using SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`One::count_raw`], except the distance between `start` and + /// `end` must be at least the size of an SSE2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `One`, which can only be constructed + /// when it is safe to call `sse2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn count_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> usize { + self.0.count_raw(start, end) + } + + /// Returns an iterator over all occurrences of the needle byte in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + #[inline] + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> { + OneIter { searcher: self, it: generic::Iter::new(haystack) } + } +} + +/// An iterator over all occurrences of a single byte in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`One::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`One`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct OneIter<'a, 'h> { + searcher: &'a One, + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for OneIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn count(self) -> usize { + self.it.count(|s, e| { + // SAFETY: We rely on our generic iterator to return valid start + // and end pointers. + unsafe { self.searcher.count_raw(s, e) } + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +impl<'a, 'h> core::iter::FusedIterator for OneIter<'a, 'h> {} + +/// Finds all occurrences of two bytes in a haystack. +/// +/// That is, this reports matches of one of two possible bytes. For example, +/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, +/// `4` and `5`. +#[derive(Clone, Copy, Debug)] +pub struct Two(generic::Two<__m128i>); + +impl Two { + /// Create a new searcher that finds occurrences of the needle bytes given. + /// + /// This particular searcher is specialized to use SSE2 vector instructions + /// that typically make it quite fast. + /// + /// If SSE2 is unavailable in the current environment, then `None` is + /// returned. + #[inline] + pub fn new(needle1: u8, needle2: u8) -> Option { + if Two::is_available() { + // SAFETY: we check that sse2 is available above. + unsafe { Some(Two::new_unchecked(needle1, needle2)) } + } else { + None + } + } + + /// Create a new finder specific to SSE2 vectors and routines without + /// checking that SSE2 is available. + /// + /// # Safety + /// + /// Callers must guarantee that it is safe to execute `sse2` instructions + /// in the current environment. + /// + /// Note that it is a common misconception that if one compiles for an + /// `x86_64` target, then they therefore automatically have access to SSE2 + /// instructions. While this is almost always the case, it isn't true in + /// 100% of cases. + #[target_feature(enable = "sse2")] + #[inline] + pub unsafe fn new_unchecked(needle1: u8, needle2: u8) -> Two { + Two(generic::Two::new(needle1, needle2)) + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`Two::new`] will return + /// a `Some` value. Similarly, when it is false, it is guaranteed that + /// `Two::new` will return a `None` value. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + #[cfg(target_feature = "sse2")] + { + true + } + #[cfg(not(target_feature = "sse2"))] + { + false + } + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option { + // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < __m128i::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::fwd_byte_by_byte(start, end, |b| { + b == self.0.needle1() || b == self.0.needle2() + }); + } + // SAFETY: Building a `Two` means it's safe to call 'sse2' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + // + // Note that we could call `self.0.find_raw` directly here. But that + // means we'd have to annotate this routine with `target_feature`. + // Which is fine, because this routine is `unsafe` anyway and the + // `target_feature` obligation is met by virtue of building a `Two`. + // The real problem is that a routine with a `target_feature` + // annotation generally can't be inlined into caller code unless the + // caller code has the same target feature annotations. Which is maybe + // okay for SSE2, but we do the same thing for AVX2 where caller code + // probably usually doesn't have AVX2 enabled. That means that this + // routine can be inlined which will handle some of the short-haystack + // cases above without touching the architecture specific code. + self.find_raw_impl(start, end) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < __m128i::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::rev_byte_by_byte(start, end, |b| { + b == self.0.needle1() || b == self.0.needle2() + }); + } + // SAFETY: Building a `Two` means it's safe to call 'sse2' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + // + // See note in forward routine above for why we don't just call + // `self.0.rfind_raw` directly here. + self.rfind_raw_impl(start, end) + } + + /// Execute a search using SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Two::find_raw`], except the distance between `start` and + /// `end` must be at least the size of an SSE2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Two`, which can only be constructed + /// when it is safe to call `sse2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn find_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.find_raw(start, end) + } + + /// Execute a search using SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Two::rfind_raw`], except the distance between `start` and + /// `end` must be at least the size of an SSE2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Two`, which can only be constructed + /// when it is safe to call `sse2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn rfind_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.rfind_raw(start, end) + } + + /// Returns an iterator over all occurrences of the needle bytes in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + #[inline] + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> { + TwoIter { searcher: self, it: generic::Iter::new(haystack) } + } +} + +/// An iterator over all occurrences of two possible bytes in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`Two::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`Two`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct TwoIter<'a, 'h> { + searcher: &'a Two, + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for TwoIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +impl<'a, 'h> core::iter::FusedIterator for TwoIter<'a, 'h> {} + +/// Finds all occurrences of three bytes in a haystack. +/// +/// That is, this reports matches of one of three possible bytes. For example, +/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets +/// `0`, `2`, `3`, `4` and `5`. +#[derive(Clone, Copy, Debug)] +pub struct Three(generic::Three<__m128i>); + +impl Three { + /// Create a new searcher that finds occurrences of the needle bytes given. + /// + /// This particular searcher is specialized to use SSE2 vector instructions + /// that typically make it quite fast. + /// + /// If SSE2 is unavailable in the current environment, then `None` is + /// returned. + #[inline] + pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Option { + if Three::is_available() { + // SAFETY: we check that sse2 is available above. + unsafe { Some(Three::new_unchecked(needle1, needle2, needle3)) } + } else { + None + } + } + + /// Create a new finder specific to SSE2 vectors and routines without + /// checking that SSE2 is available. + /// + /// # Safety + /// + /// Callers must guarantee that it is safe to execute `sse2` instructions + /// in the current environment. + /// + /// Note that it is a common misconception that if one compiles for an + /// `x86_64` target, then they therefore automatically have access to SSE2 + /// instructions. While this is almost always the case, it isn't true in + /// 100% of cases. + #[target_feature(enable = "sse2")] + #[inline] + pub unsafe fn new_unchecked( + needle1: u8, + needle2: u8, + needle3: u8, + ) -> Three { + Three(generic::Three::new(needle1, needle2, needle3)) + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`Three::new`] will return + /// a `Some` value. Similarly, when it is false, it is guaranteed that + /// `Three::new` will return a `None` value. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + #[cfg(target_feature = "sse2")] + { + true + } + #[cfg(not(target_feature = "sse2"))] + { + false + } + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option { + // SAFETY: `rfind_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < __m128i::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::fwd_byte_by_byte(start, end, |b| { + b == self.0.needle1() + || b == self.0.needle2() + || b == self.0.needle3() + }); + } + // SAFETY: Building a `Three` means it's safe to call 'sse2' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + // + // Note that we could call `self.0.find_raw` directly here. But that + // means we'd have to annotate this routine with `target_feature`. + // Which is fine, because this routine is `unsafe` anyway and the + // `target_feature` obligation is met by virtue of building a `Three`. + // The real problem is that a routine with a `target_feature` + // annotation generally can't be inlined into caller code unless the + // caller code has the same target feature annotations. Which is maybe + // okay for SSE2, but we do the same thing for AVX2 where caller code + // probably usually doesn't have AVX2 enabled. That means that this + // routine can be inlined which will handle some of the short-haystack + // cases above without touching the architecture specific code. + self.find_raw_impl(start, end) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + if end.distance(start) < __m128i::BYTES { + // SAFETY: We require the caller to pass valid start/end pointers. + return generic::rev_byte_by_byte(start, end, |b| { + b == self.0.needle1() + || b == self.0.needle2() + || b == self.0.needle3() + }); + } + // SAFETY: Building a `Three` means it's safe to call 'sse2' routines. + // Also, we've checked that our haystack is big enough to run on the + // vector routine. Pointer validity is caller's responsibility. + // + // See note in forward routine above for why we don't just call + // `self.0.rfind_raw` directly here. + self.rfind_raw_impl(start, end) + } + + /// Execute a search using SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Three::find_raw`], except the distance between `start` and + /// `end` must be at least the size of an SSE2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Three`, which can only be constructed + /// when it is safe to call `sse2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn find_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.find_raw(start, end) + } + + /// Execute a search using SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as [`Three::rfind_raw`], except the distance between `start` and + /// `end` must be at least the size of an SSE2 vector (in bytes). + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Three`, which can only be constructed + /// when it is safe to call `sse2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn rfind_raw_impl( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + self.0.rfind_raw(start, end) + } + + /// Returns an iterator over all occurrences of the needle byte in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + #[inline] + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> { + ThreeIter { searcher: self, it: generic::Iter::new(haystack) } + } +} + +/// An iterator over all occurrences of three possible bytes in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`Three::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`Three`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct ThreeIter<'a, 'h> { + searcher: &'a Three, + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for ThreeIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +impl<'a, 'h> core::iter::FusedIterator for ThreeIter<'a, 'h> {} + +#[cfg(test)] +mod tests { + use super::*; + + define_memchr_quickcheck!(super); + + #[test] + fn forward_one() { + crate::tests::memchr::Runner::new(1).forward_iter( + |haystack, needles| { + Some(One::new(needles[0])?.iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_one() { + crate::tests::memchr::Runner::new(1).reverse_iter( + |haystack, needles| { + Some(One::new(needles[0])?.iter(haystack).rev().collect()) + }, + ) + } + + #[test] + fn count_one() { + crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| { + Some(One::new(needles[0])?.iter(haystack).count()) + }) + } + + #[test] + fn forward_two() { + crate::tests::memchr::Runner::new(2).forward_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(Two::new(n1, n2)?.iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_two() { + crate::tests::memchr::Runner::new(2).reverse_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(Two::new(n1, n2)?.iter(haystack).rev().collect()) + }, + ) + } + + #[test] + fn forward_three() { + crate::tests::memchr::Runner::new(3).forward_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(Three::new(n1, n2, n3)?.iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_three() { + crate::tests::memchr::Runner::new(3).reverse_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(Three::new(n1, n2, n3)?.iter(haystack).rev().collect()) + }, + ) + } +} diff --git a/vendor/memchr/src/arch/x86_64/sse2/mod.rs b/vendor/memchr/src/arch/x86_64/sse2/mod.rs new file mode 100644 index 00000000000000..bcb830790fbbaa --- /dev/null +++ b/vendor/memchr/src/arch/x86_64/sse2/mod.rs @@ -0,0 +1,6 @@ +/*! +Algorithms for the `x86_64` target using 128-bit vectors via SSE2. +*/ + +pub mod memchr; +pub mod packedpair; diff --git a/vendor/memchr/src/arch/x86_64/sse2/packedpair.rs b/vendor/memchr/src/arch/x86_64/sse2/packedpair.rs new file mode 100644 index 00000000000000..c8b5b9999b63cd --- /dev/null +++ b/vendor/memchr/src/arch/x86_64/sse2/packedpair.rs @@ -0,0 +1,232 @@ +/*! +A 128-bit vector implementation of the "packed pair" SIMD algorithm. + +The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main +difference is that it (by default) uses a background distribution of byte +frequencies to heuristically select the pair of bytes to search for. + +[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last +*/ + +use core::arch::x86_64::__m128i; + +use crate::arch::{all::packedpair::Pair, generic::packedpair}; + +/// A "packed pair" finder that uses 128-bit vector operations. +/// +/// This finder picks two bytes that it believes have high predictive power +/// for indicating an overall match of a needle. Depending on whether +/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets +/// where the needle matches or could match. In the prefilter case, candidates +/// are reported whenever the [`Pair`] of bytes given matches. +#[derive(Clone, Copy, Debug)] +pub struct Finder(packedpair::Finder<__m128i>); + +impl Finder { + /// Create a new pair searcher. The searcher returned can either report + /// exact matches of `needle` or act as a prefilter and report candidate + /// positions of `needle`. + /// + /// If SSE2 is unavailable in the current environment or if a [`Pair`] + /// could not be constructed from the needle given, then `None` is + /// returned. + #[inline] + pub fn new(needle: &[u8]) -> Option { + Finder::with_pair(needle, Pair::new(needle)?) + } + + /// Create a new "packed pair" finder using the pair of bytes given. + /// + /// This constructor permits callers to control precisely which pair of + /// bytes is used as a predicate. + /// + /// If SSE2 is unavailable in the current environment, then `None` is + /// returned. + #[inline] + pub fn with_pair(needle: &[u8], pair: Pair) -> Option { + if Finder::is_available() { + // SAFETY: we check that sse2 is available above. We are also + // guaranteed to have needle.len() > 1 because we have a valid + // Pair. + unsafe { Some(Finder::with_pair_impl(needle, pair)) } + } else { + None + } + } + + /// Create a new `Finder` specific to SSE2 vectors and routines. + /// + /// # Safety + /// + /// Same as the safety for `packedpair::Finder::new`, and callers must also + /// ensure that SSE2 is available. + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn with_pair_impl(needle: &[u8], pair: Pair) -> Finder { + let finder = packedpair::Finder::<__m128i>::new(needle, pair); + Finder(finder) + } + + /// Returns true when this implementation is available in the current + /// environment. + /// + /// When this is true, it is guaranteed that [`Finder::with_pair`] will + /// return a `Some` value. Similarly, when it is false, it is guaranteed + /// that `Finder::with_pair` will return a `None` value. Notice that this + /// does not guarantee that [`Finder::new`] will return a `Finder`. Namely, + /// even when `Finder::is_available` is true, it is not guaranteed that a + /// valid [`Pair`] can be found from the needle given. + /// + /// Note also that for the lifetime of a single program, if this returns + /// true then it will always return true. + #[inline] + pub fn is_available() -> bool { + #[cfg(not(target_feature = "sse2"))] + { + false + } + #[cfg(target_feature = "sse2")] + { + true + } + } + + /// Execute a search using SSE2 vectors and routines. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + #[inline] + pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option { + // SAFETY: Building a `Finder` means it's safe to call 'sse2' routines. + unsafe { self.find_impl(haystack, needle) } + } + + /// Run this finder on the given haystack as a prefilter. + /// + /// If a candidate match is found, then an offset where the needle *could* + /// begin in the haystack is returned. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + #[inline] + pub fn find_prefilter(&self, haystack: &[u8]) -> Option { + // SAFETY: Building a `Finder` means it's safe to call 'sse2' routines. + unsafe { self.find_prefilter_impl(haystack) } + } + + /// Execute a search using SSE2 vectors and routines. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + /// + /// # Safety + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Finder`, which can only be constructed + /// when it is safe to call `sse2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn find_impl( + &self, + haystack: &[u8], + needle: &[u8], + ) -> Option { + self.0.find(haystack, needle) + } + + /// Execute a prefilter search using SSE2 vectors and routines. + /// + /// # Panics + /// + /// When `haystack.len()` is less than [`Finder::min_haystack_len`]. + /// + /// # Safety + /// + /// (The target feature safety obligation is automatically fulfilled by + /// virtue of being a method on `Finder`, which can only be constructed + /// when it is safe to call `sse2` routines.) + #[target_feature(enable = "sse2")] + #[inline] + unsafe fn find_prefilter_impl(&self, haystack: &[u8]) -> Option { + self.0.find_prefilter(haystack) + } + + /// Returns the pair of offsets (into the needle) used to check as a + /// predicate before confirming whether a needle exists at a particular + /// position. + #[inline] + pub fn pair(&self) -> &Pair { + self.0.pair() + } + + /// Returns the minimum haystack length that this `Finder` can search. + /// + /// Using a haystack with length smaller than this in a search will result + /// in a panic. The reason for this restriction is that this finder is + /// meant to be a low-level component that is part of a larger substring + /// strategy. In that sense, it avoids trying to handle all cases and + /// instead only handles the cases that it can handle very well. + #[inline] + pub fn min_haystack_len(&self) -> usize { + self.0.min_haystack_len() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn find(haystack: &[u8], needle: &[u8]) -> Option> { + let f = Finder::new(needle)?; + if haystack.len() < f.min_haystack_len() { + return None; + } + Some(f.find(haystack, needle)) + } + + define_substring_forward_quickcheck!(find); + + #[test] + fn forward_substring() { + crate::tests::substring::Runner::new().fwd(find).run() + } + + #[test] + fn forward_packedpair() { + fn find( + haystack: &[u8], + needle: &[u8], + index1: u8, + index2: u8, + ) -> Option> { + let pair = Pair::with_indices(needle, index1, index2)?; + let f = Finder::with_pair(needle, pair)?; + if haystack.len() < f.min_haystack_len() { + return None; + } + Some(f.find(haystack, needle)) + } + crate::tests::packedpair::Runner::new().fwd(find).run() + } + + #[test] + fn forward_packedpair_prefilter() { + fn find( + haystack: &[u8], + needle: &[u8], + index1: u8, + index2: u8, + ) -> Option> { + let pair = Pair::with_indices(needle, index1, index2)?; + let f = Finder::with_pair(needle, pair)?; + if haystack.len() < f.min_haystack_len() { + return None; + } + Some(f.find_prefilter(haystack)) + } + crate::tests::packedpair::Runner::new().fwd(find).run() + } +} diff --git a/vendor/memchr/src/cow.rs b/vendor/memchr/src/cow.rs new file mode 100644 index 00000000000000..f291645728932c --- /dev/null +++ b/vendor/memchr/src/cow.rs @@ -0,0 +1,107 @@ +use core::ops; + +/// A specialized copy-on-write byte string. +/// +/// The purpose of this type is to permit usage of a "borrowed or owned +/// byte string" in a way that keeps std/no-std compatibility. That is, in +/// no-std/alloc mode, this type devolves into a simple &[u8] with no owned +/// variant available. We can't just use a plain Cow because Cow is not in +/// core. +#[derive(Clone, Debug)] +pub struct CowBytes<'a>(Imp<'a>); + +// N.B. We don't use alloc::borrow::Cow here since we can get away with a +// Box<[u8]> for our use case, which is 1/3 smaller than the Vec that +// a Cow<[u8]> would use. +#[cfg(feature = "alloc")] +#[derive(Clone, Debug)] +enum Imp<'a> { + Borrowed(&'a [u8]), + Owned(alloc::boxed::Box<[u8]>), +} + +#[cfg(not(feature = "alloc"))] +#[derive(Clone, Debug)] +struct Imp<'a>(&'a [u8]); + +impl<'a> ops::Deref for CowBytes<'a> { + type Target = [u8]; + + #[inline(always)] + fn deref(&self) -> &[u8] { + self.as_slice() + } +} + +impl<'a> CowBytes<'a> { + /// Create a new borrowed CowBytes. + #[inline(always)] + pub(crate) fn new>(bytes: &'a B) -> CowBytes<'a> { + CowBytes(Imp::new(bytes.as_ref())) + } + + /// Create a new owned CowBytes. + #[cfg(feature = "alloc")] + #[inline(always)] + fn new_owned(bytes: alloc::boxed::Box<[u8]>) -> CowBytes<'static> { + CowBytes(Imp::Owned(bytes)) + } + + /// Return a borrowed byte string, regardless of whether this is an owned + /// or borrowed byte string internally. + #[inline(always)] + pub(crate) fn as_slice(&self) -> &[u8] { + self.0.as_slice() + } + + /// Return an owned version of this copy-on-write byte string. + /// + /// If this is already an owned byte string internally, then this is a + /// no-op. Otherwise, the internal byte string is copied. + #[cfg(feature = "alloc")] + #[inline(always)] + pub(crate) fn into_owned(self) -> CowBytes<'static> { + match self.0 { + Imp::Borrowed(b) => { + CowBytes::new_owned(alloc::boxed::Box::from(b)) + } + Imp::Owned(b) => CowBytes::new_owned(b), + } + } +} + +impl<'a> Imp<'a> { + #[inline(always)] + pub fn new(bytes: &'a [u8]) -> Imp<'a> { + #[cfg(feature = "alloc")] + { + Imp::Borrowed(bytes) + } + #[cfg(not(feature = "alloc"))] + { + Imp(bytes) + } + } + + #[cfg(feature = "alloc")] + #[inline(always)] + pub fn as_slice(&self) -> &[u8] { + #[cfg(feature = "alloc")] + { + match self { + Imp::Owned(ref x) => x, + Imp::Borrowed(x) => x, + } + } + #[cfg(not(feature = "alloc"))] + { + self.0 + } + } + + #[cfg(not(feature = "alloc"))] + #[inline(always)] + pub fn as_slice(&self) -> &[u8] { + self.0 + } +} diff --git a/vendor/memchr/src/ext.rs b/vendor/memchr/src/ext.rs new file mode 100644 index 00000000000000..802697ab34cc0c --- /dev/null +++ b/vendor/memchr/src/ext.rs @@ -0,0 +1,54 @@ +/// A trait for adding some helper routines to pointers. +pub(crate) trait Pointer { + /// Returns the distance, in units of `T`, between `self` and `origin`. + /// + /// # Safety + /// + /// Same as `ptr::offset_from` in addition to `self >= origin`. + unsafe fn distance(self, origin: Self) -> usize; + + /// Casts this pointer to `usize`. + /// + /// Callers should not convert the `usize` back to a pointer if at all + /// possible. (And if you believe it's necessary, open an issue to discuss + /// why. Otherwise, it has the potential to violate pointer provenance.) + /// The purpose of this function is just to be able to do arithmetic, i.e., + /// computing offsets or alignments. + fn as_usize(self) -> usize; +} + +impl Pointer for *const T { + unsafe fn distance(self, origin: *const T) -> usize { + // TODO: Replace with `ptr::sub_ptr` once stabilized. + usize::try_from(self.offset_from(origin)).unwrap_unchecked() + } + + fn as_usize(self) -> usize { + self as usize + } +} + +impl Pointer for *mut T { + unsafe fn distance(self, origin: *mut T) -> usize { + (self as *const T).distance(origin as *const T) + } + + fn as_usize(self) -> usize { + (self as *const T).as_usize() + } +} + +/// A trait for adding some helper routines to raw bytes. +#[cfg(test)] +pub(crate) trait Byte { + /// Converts this byte to a `char` if it's ASCII. Otherwise panics. + fn to_char(self) -> char; +} + +#[cfg(test)] +impl Byte for u8 { + fn to_char(self) -> char { + assert!(self.is_ascii()); + char::from(self) + } +} diff --git a/vendor/memchr/src/lib.rs b/vendor/memchr/src/lib.rs new file mode 100644 index 00000000000000..b3105169cc1dd8 --- /dev/null +++ b/vendor/memchr/src/lib.rs @@ -0,0 +1,221 @@ +/*! +This library provides heavily optimized routines for string search primitives. + +# Overview + +This section gives a brief high level overview of what this crate offers. + +* The top-level module provides routines for searching for 1, 2 or 3 bytes + in the forward or reverse direction. When searching for more than one byte, + positions are considered a match if the byte at that position matches any + of the bytes. +* The [`memmem`] sub-module provides forward and reverse substring search + routines. + +In all such cases, routines operate on `&[u8]` without regard to encoding. This +is exactly what you want when searching either UTF-8 or arbitrary bytes. + +# Example: using `memchr` + +This example shows how to use `memchr` to find the first occurrence of `z` in +a haystack: + +``` +use memchr::memchr; + +let haystack = b"foo bar baz quuz"; +assert_eq!(Some(10), memchr(b'z', haystack)); +``` + +# Example: matching one of three possible bytes + +This examples shows how to use `memrchr3` to find occurrences of `a`, `b` or +`c`, starting at the end of the haystack. + +``` +use memchr::memchr3_iter; + +let haystack = b"xyzaxyzbxyzc"; + +let mut it = memchr3_iter(b'a', b'b', b'c', haystack).rev(); +assert_eq!(Some(11), it.next()); +assert_eq!(Some(7), it.next()); +assert_eq!(Some(3), it.next()); +assert_eq!(None, it.next()); +``` + +# Example: iterating over substring matches + +This example shows how to use the [`memmem`] sub-module to find occurrences of +a substring in a haystack. + +``` +use memchr::memmem; + +let haystack = b"foo bar foo baz foo"; + +let mut it = memmem::find_iter(haystack, "foo"); +assert_eq!(Some(0), it.next()); +assert_eq!(Some(8), it.next()); +assert_eq!(Some(16), it.next()); +assert_eq!(None, it.next()); +``` + +# Example: repeating a search for the same needle + +It may be possible for the overhead of constructing a substring searcher to be +measurable in some workloads. In cases where the same needle is used to search +many haystacks, it is possible to do construction once and thus to avoid it for +subsequent searches. This can be done with a [`memmem::Finder`]: + +``` +use memchr::memmem; + +let finder = memmem::Finder::new("foo"); + +assert_eq!(Some(4), finder.find(b"baz foo quux")); +assert_eq!(None, finder.find(b"quux baz bar")); +``` + +# Why use this crate? + +At first glance, the APIs provided by this crate might seem weird. Why provide +a dedicated routine like `memchr` for something that could be implemented +clearly and trivially in one line: + +``` +fn memchr(needle: u8, haystack: &[u8]) -> Option { + haystack.iter().position(|&b| b == needle) +} +``` + +Or similarly, why does this crate provide substring search routines when Rust's +core library already provides them? + +``` +fn search(haystack: &str, needle: &str) -> Option { + haystack.find(needle) +} +``` + +The primary reason for both of them to exist is performance. When it comes to +performance, at a high level at least, there are two primary ways to look at +it: + +* **Throughput**: For this, think about it as, "given some very large haystack + and a byte that never occurs in that haystack, how long does it take to + search through it and determine that it, in fact, does not occur?" +* **Latency**: For this, think about it as, "given a tiny haystack---just a + few bytes---how long does it take to determine if a byte is in it?" + +The `memchr` routine in this crate has _slightly_ worse latency than the +solution presented above, however, its throughput can easily be over an +order of magnitude faster. This is a good general purpose trade off to make. +You rarely lose, but often gain big. + +**NOTE:** The name `memchr` comes from the corresponding routine in `libc`. A +key advantage of using this library is that its performance is not tied to its +quality of implementation in the `libc` you happen to be using, which can vary +greatly from platform to platform. + +But what about substring search? This one is a bit more complicated. The +primary reason for its existence is still indeed performance, but it's also +useful because Rust's core library doesn't actually expose any substring +search routine on arbitrary bytes. The only substring search routine that +exists works exclusively on valid UTF-8. + +So if you have valid UTF-8, is there a reason to use this over the standard +library substring search routine? Yes. This routine is faster on almost every +metric, including latency. The natural question then, is why isn't this +implementation in the standard library, even if only for searching on UTF-8? +The reason is that the implementation details for using SIMD in the standard +library haven't quite been worked out yet. + +**NOTE:** Currently, only `x86_64`, `wasm32` and `aarch64` targets have vector +accelerated implementations of `memchr` (and friends) and `memmem`. + +# Crate features + +* **std** - When enabled (the default), this will permit features specific to +the standard library. Currently, the only thing used from the standard library +is runtime SIMD CPU feature detection. This means that this feature must be +enabled to get AVX2 accelerated routines on `x86_64` targets without enabling +the `avx2` feature at compile time, for example. When `std` is not enabled, +this crate will still attempt to use SSE2 accelerated routines on `x86_64`. It +will also use AVX2 accelerated routines when the `avx2` feature is enabled at +compile time. In general, enable this feature if you can. +* **alloc** - When enabled (the default), APIs in this crate requiring some +kind of allocation will become available. For example, the +[`memmem::Finder::into_owned`](crate::memmem::Finder::into_owned) API and the +[`arch::all::shiftor`](crate::arch::all::shiftor) substring search +implementation. Otherwise, this crate is designed from the ground up to be +usable in core-only contexts, so the `alloc` feature doesn't add much +currently. Notably, disabling `std` but enabling `alloc` will **not** result +in the use of AVX2 on `x86_64` targets unless the `avx2` feature is enabled +at compile time. (With `std` enabled, AVX2 can be used even without the `avx2` +feature enabled at compile time by way of runtime CPU feature detection.) +* **logging** - When enabled (disabled by default), the `log` crate is used +to emit log messages about what kinds of `memchr` and `memmem` algorithms +are used. Namely, both `memchr` and `memmem` have a number of different +implementation choices depending on the target and CPU, and the log messages +can help show what specific implementations are being used. Generally, this is +useful for debugging performance issues. +* **libc** - **DEPRECATED**. Previously, this enabled the use of the target's +`memchr` function from whatever `libc` was linked into the program. This +feature is now a no-op because this crate's implementation of `memchr` should +now be sufficiently fast on a number of platforms that `libc` should no longer +be needed. (This feature is somewhat of a holdover from this crate's origins. +Originally, this crate was literally just a safe wrapper function around the +`memchr` function from `libc`.) +*/ + +#![deny(missing_docs)] +#![no_std] +// It's just not worth trying to squash all dead code warnings. Pretty +// unfortunate IMO. Not really sure how to fix this other than to either +// live with it or sprinkle a whole mess of `cfg` annotations everywhere. +#![cfg_attr( + not(any( + all(target_arch = "x86_64", target_feature = "sse2"), + all(target_arch = "wasm32", target_feature = "simd128"), + target_arch = "aarch64", + )), + allow(dead_code) +)] +// Same deal for miri. +#![cfg_attr(miri, allow(dead_code, unused_macros))] + +// Supporting 8-bit (or others) would be fine. If you need it, please submit a +// bug report at https://github.com/BurntSushi/memchr +#[cfg(not(any( + target_pointer_width = "16", + target_pointer_width = "32", + target_pointer_width = "64" +)))] +compile_error!("memchr currently not supported on non-{16,32,64}"); + +#[cfg(any(test, feature = "std"))] +extern crate std; + +#[cfg(any(test, feature = "alloc"))] +extern crate alloc; + +pub use crate::memchr::{ + memchr, memchr2, memchr2_iter, memchr3, memchr3_iter, memchr_iter, + memrchr, memrchr2, memrchr2_iter, memrchr3, memrchr3_iter, memrchr_iter, + Memchr, Memchr2, Memchr3, +}; + +#[macro_use] +mod macros; + +#[cfg(test)] +#[macro_use] +mod tests; + +pub mod arch; +mod cow; +mod ext; +mod memchr; +pub mod memmem; +mod vector; diff --git a/vendor/memchr/src/macros.rs b/vendor/memchr/src/macros.rs new file mode 100644 index 00000000000000..31b4ca3816ace2 --- /dev/null +++ b/vendor/memchr/src/macros.rs @@ -0,0 +1,20 @@ +// Some feature combinations result in some of these macros never being used. +// Which is fine. Just squash the warnings. +#![allow(unused_macros)] + +macro_rules! log { + ($($tt:tt)*) => { + #[cfg(feature = "logging")] + { + $($tt)* + } + } +} + +macro_rules! debug { + ($($tt:tt)*) => { log!(log::debug!($($tt)*)) } +} + +macro_rules! trace { + ($($tt:tt)*) => { log!(log::trace!($($tt)*)) } +} diff --git a/vendor/memchr/src/memchr.rs b/vendor/memchr/src/memchr.rs new file mode 100644 index 00000000000000..92a18bd5fa9c35 --- /dev/null +++ b/vendor/memchr/src/memchr.rs @@ -0,0 +1,903 @@ +use core::iter::Rev; + +use crate::arch::generic::memchr as generic; + +/// Search for the first occurrence of a byte in a slice. +/// +/// This returns the index corresponding to the first occurrence of `needle` in +/// `haystack`, or `None` if one is not found. If an index is returned, it is +/// guaranteed to be less than `haystack.len()`. +/// +/// While this is semantically the same as something like +/// `haystack.iter().position(|&b| b == needle)`, this routine will attempt to +/// use highly optimized vector operations that can be an order of magnitude +/// faster (or more). +/// +/// # Example +/// +/// This shows how to find the first position of a byte in a byte string. +/// +/// ``` +/// use memchr::memchr; +/// +/// let haystack = b"the quick brown fox"; +/// assert_eq!(memchr(b'k', haystack), Some(8)); +/// ``` +#[inline] +pub fn memchr(needle: u8, haystack: &[u8]) -> Option { + // SAFETY: memchr_raw, when a match is found, always returns a valid + // pointer between start and end. + unsafe { + generic::search_slice_with_raw(haystack, |start, end| { + memchr_raw(needle, start, end) + }) + } +} + +/// Search for the last occurrence of a byte in a slice. +/// +/// This returns the index corresponding to the last occurrence of `needle` in +/// `haystack`, or `None` if one is not found. If an index is returned, it is +/// guaranteed to be less than `haystack.len()`. +/// +/// While this is semantically the same as something like +/// `haystack.iter().rposition(|&b| b == needle)`, this routine will attempt to +/// use highly optimized vector operations that can be an order of magnitude +/// faster (or more). +/// +/// # Example +/// +/// This shows how to find the last position of a byte in a byte string. +/// +/// ``` +/// use memchr::memrchr; +/// +/// let haystack = b"the quick brown fox"; +/// assert_eq!(memrchr(b'o', haystack), Some(17)); +/// ``` +#[inline] +pub fn memrchr(needle: u8, haystack: &[u8]) -> Option { + // SAFETY: memrchr_raw, when a match is found, always returns a valid + // pointer between start and end. + unsafe { + generic::search_slice_with_raw(haystack, |start, end| { + memrchr_raw(needle, start, end) + }) + } +} + +/// Search for the first occurrence of two possible bytes in a haystack. +/// +/// This returns the index corresponding to the first occurrence of one of the +/// needle bytes in `haystack`, or `None` if one is not found. If an index is +/// returned, it is guaranteed to be less than `haystack.len()`. +/// +/// While this is semantically the same as something like +/// `haystack.iter().position(|&b| b == needle1 || b == needle2)`, this routine +/// will attempt to use highly optimized vector operations that can be an order +/// of magnitude faster (or more). +/// +/// # Example +/// +/// This shows how to find the first position of one of two possible bytes in a +/// haystack. +/// +/// ``` +/// use memchr::memchr2; +/// +/// let haystack = b"the quick brown fox"; +/// assert_eq!(memchr2(b'k', b'q', haystack), Some(4)); +/// ``` +#[inline] +pub fn memchr2(needle1: u8, needle2: u8, haystack: &[u8]) -> Option { + // SAFETY: memchr2_raw, when a match is found, always returns a valid + // pointer between start and end. + unsafe { + generic::search_slice_with_raw(haystack, |start, end| { + memchr2_raw(needle1, needle2, start, end) + }) + } +} + +/// Search for the last occurrence of two possible bytes in a haystack. +/// +/// This returns the index corresponding to the last occurrence of one of the +/// needle bytes in `haystack`, or `None` if one is not found. If an index is +/// returned, it is guaranteed to be less than `haystack.len()`. +/// +/// While this is semantically the same as something like +/// `haystack.iter().rposition(|&b| b == needle1 || b == needle2)`, this +/// routine will attempt to use highly optimized vector operations that can be +/// an order of magnitude faster (or more). +/// +/// # Example +/// +/// This shows how to find the last position of one of two possible bytes in a +/// haystack. +/// +/// ``` +/// use memchr::memrchr2; +/// +/// let haystack = b"the quick brown fox"; +/// assert_eq!(memrchr2(b'k', b'o', haystack), Some(17)); +/// ``` +#[inline] +pub fn memrchr2(needle1: u8, needle2: u8, haystack: &[u8]) -> Option { + // SAFETY: memrchr2_raw, when a match is found, always returns a valid + // pointer between start and end. + unsafe { + generic::search_slice_with_raw(haystack, |start, end| { + memrchr2_raw(needle1, needle2, start, end) + }) + } +} + +/// Search for the first occurrence of three possible bytes in a haystack. +/// +/// This returns the index corresponding to the first occurrence of one of the +/// needle bytes in `haystack`, or `None` if one is not found. If an index is +/// returned, it is guaranteed to be less than `haystack.len()`. +/// +/// While this is semantically the same as something like +/// `haystack.iter().position(|&b| b == needle1 || b == needle2 || b == needle3)`, +/// this routine will attempt to use highly optimized vector operations that +/// can be an order of magnitude faster (or more). +/// +/// # Example +/// +/// This shows how to find the first position of one of three possible bytes in +/// a haystack. +/// +/// ``` +/// use memchr::memchr3; +/// +/// let haystack = b"the quick brown fox"; +/// assert_eq!(memchr3(b'k', b'q', b'u', haystack), Some(4)); +/// ``` +#[inline] +pub fn memchr3( + needle1: u8, + needle2: u8, + needle3: u8, + haystack: &[u8], +) -> Option { + // SAFETY: memchr3_raw, when a match is found, always returns a valid + // pointer between start and end. + unsafe { + generic::search_slice_with_raw(haystack, |start, end| { + memchr3_raw(needle1, needle2, needle3, start, end) + }) + } +} + +/// Search for the last occurrence of three possible bytes in a haystack. +/// +/// This returns the index corresponding to the last occurrence of one of the +/// needle bytes in `haystack`, or `None` if one is not found. If an index is +/// returned, it is guaranteed to be less than `haystack.len()`. +/// +/// While this is semantically the same as something like +/// `haystack.iter().rposition(|&b| b == needle1 || b == needle2 || b == needle3)`, +/// this routine will attempt to use highly optimized vector operations that +/// can be an order of magnitude faster (or more). +/// +/// # Example +/// +/// This shows how to find the last position of one of three possible bytes in +/// a haystack. +/// +/// ``` +/// use memchr::memrchr3; +/// +/// let haystack = b"the quick brown fox"; +/// assert_eq!(memrchr3(b'k', b'o', b'n', haystack), Some(17)); +/// ``` +#[inline] +pub fn memrchr3( + needle1: u8, + needle2: u8, + needle3: u8, + haystack: &[u8], +) -> Option { + // SAFETY: memrchr3_raw, when a match is found, always returns a valid + // pointer between start and end. + unsafe { + generic::search_slice_with_raw(haystack, |start, end| { + memrchr3_raw(needle1, needle2, needle3, start, end) + }) + } +} + +/// Returns an iterator over all occurrences of the needle in a haystack. +/// +/// The iterator returned implements `DoubleEndedIterator`. This means it +/// can also be used to find occurrences in reverse order. +#[inline] +pub fn memchr_iter<'h>(needle: u8, haystack: &'h [u8]) -> Memchr<'h> { + Memchr::new(needle, haystack) +} + +/// Returns an iterator over all occurrences of the needle in a haystack, in +/// reverse. +#[inline] +pub fn memrchr_iter(needle: u8, haystack: &[u8]) -> Rev> { + Memchr::new(needle, haystack).rev() +} + +/// Returns an iterator over all occurrences of the needles in a haystack. +/// +/// The iterator returned implements `DoubleEndedIterator`. This means it +/// can also be used to find occurrences in reverse order. +#[inline] +pub fn memchr2_iter<'h>( + needle1: u8, + needle2: u8, + haystack: &'h [u8], +) -> Memchr2<'h> { + Memchr2::new(needle1, needle2, haystack) +} + +/// Returns an iterator over all occurrences of the needles in a haystack, in +/// reverse. +#[inline] +pub fn memrchr2_iter( + needle1: u8, + needle2: u8, + haystack: &[u8], +) -> Rev> { + Memchr2::new(needle1, needle2, haystack).rev() +} + +/// Returns an iterator over all occurrences of the needles in a haystack. +/// +/// The iterator returned implements `DoubleEndedIterator`. This means it +/// can also be used to find occurrences in reverse order. +#[inline] +pub fn memchr3_iter<'h>( + needle1: u8, + needle2: u8, + needle3: u8, + haystack: &'h [u8], +) -> Memchr3<'h> { + Memchr3::new(needle1, needle2, needle3, haystack) +} + +/// Returns an iterator over all occurrences of the needles in a haystack, in +/// reverse. +#[inline] +pub fn memrchr3_iter( + needle1: u8, + needle2: u8, + needle3: u8, + haystack: &[u8], +) -> Rev> { + Memchr3::new(needle1, needle2, needle3, haystack).rev() +} + +/// An iterator over all occurrences of a single byte in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`memchr_iter`] or `[memrchr_iter`] +/// functions. It can also be created with the [`Memchr::new`] method. +/// +/// The lifetime parameter `'h` refers to the lifetime of the haystack being +/// searched. +#[derive(Clone, Debug)] +pub struct Memchr<'h> { + needle1: u8, + it: crate::arch::generic::memchr::Iter<'h>, +} + +impl<'h> Memchr<'h> { + /// Returns an iterator over all occurrences of the needle byte in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + #[inline] + pub fn new(needle1: u8, haystack: &'h [u8]) -> Memchr<'h> { + Memchr { + needle1, + it: crate::arch::generic::memchr::Iter::new(haystack), + } + } +} + +impl<'h> Iterator for Memchr<'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: All of our implementations of memchr ensure that any + // pointers returns will fall within the start and end bounds, and this + // upholds the safety contract of `self.it.next`. + unsafe { + // NOTE: I attempted to define an enum of previously created + // searchers and then switch on those here instead of just + // calling `memchr_raw` (or `One::new(..).find_raw(..)`). But + // that turned out to have a fair bit of extra overhead when + // searching very small haystacks. + self.it.next(|s, e| memchr_raw(self.needle1, s, e)) + } + } + + #[inline] + fn count(self) -> usize { + self.it.count(|s, e| { + // SAFETY: We rely on our generic iterator to return valid start + // and end pointers. + unsafe { count_raw(self.needle1, s, e) } + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'h> DoubleEndedIterator for Memchr<'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: All of our implementations of memchr ensure that any + // pointers returns will fall within the start and end bounds, and this + // upholds the safety contract of `self.it.next_back`. + unsafe { self.it.next_back(|s, e| memrchr_raw(self.needle1, s, e)) } + } +} + +impl<'h> core::iter::FusedIterator for Memchr<'h> {} + +/// An iterator over all occurrences of two possible bytes in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`memchr2_iter`] or `[memrchr2_iter`] +/// functions. It can also be created with the [`Memchr2::new`] method. +/// +/// The lifetime parameter `'h` refers to the lifetime of the haystack being +/// searched. +#[derive(Clone, Debug)] +pub struct Memchr2<'h> { + needle1: u8, + needle2: u8, + it: crate::arch::generic::memchr::Iter<'h>, +} + +impl<'h> Memchr2<'h> { + /// Returns an iterator over all occurrences of the needle bytes in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + #[inline] + pub fn new(needle1: u8, needle2: u8, haystack: &'h [u8]) -> Memchr2<'h> { + Memchr2 { + needle1, + needle2, + it: crate::arch::generic::memchr::Iter::new(haystack), + } + } +} + +impl<'h> Iterator for Memchr2<'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: All of our implementations of memchr ensure that any + // pointers returns will fall within the start and end bounds, and this + // upholds the safety contract of `self.it.next`. + unsafe { + self.it.next(|s, e| memchr2_raw(self.needle1, self.needle2, s, e)) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'h> DoubleEndedIterator for Memchr2<'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: All of our implementations of memchr ensure that any + // pointers returns will fall within the start and end bounds, and this + // upholds the safety contract of `self.it.next_back`. + unsafe { + self.it.next_back(|s, e| { + memrchr2_raw(self.needle1, self.needle2, s, e) + }) + } + } +} + +impl<'h> core::iter::FusedIterator for Memchr2<'h> {} + +/// An iterator over all occurrences of three possible bytes in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`memchr2_iter`] or `[memrchr2_iter`] +/// functions. It can also be created with the [`Memchr3::new`] method. +/// +/// The lifetime parameter `'h` refers to the lifetime of the haystack being +/// searched. +#[derive(Clone, Debug)] +pub struct Memchr3<'h> { + needle1: u8, + needle2: u8, + needle3: u8, + it: crate::arch::generic::memchr::Iter<'h>, +} + +impl<'h> Memchr3<'h> { + /// Returns an iterator over all occurrences of the needle bytes in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + #[inline] + pub fn new( + needle1: u8, + needle2: u8, + needle3: u8, + haystack: &'h [u8], + ) -> Memchr3<'h> { + Memchr3 { + needle1, + needle2, + needle3, + it: crate::arch::generic::memchr::Iter::new(haystack), + } + } +} + +impl<'h> Iterator for Memchr3<'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: All of our implementations of memchr ensure that any + // pointers returns will fall within the start and end bounds, and this + // upholds the safety contract of `self.it.next`. + unsafe { + self.it.next(|s, e| { + memchr3_raw(self.needle1, self.needle2, self.needle3, s, e) + }) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +impl<'h> DoubleEndedIterator for Memchr3<'h> { + #[inline] + fn next_back(&mut self) -> Option { + // SAFETY: All of our implementations of memchr ensure that any + // pointers returns will fall within the start and end bounds, and this + // upholds the safety contract of `self.it.next_back`. + unsafe { + self.it.next_back(|s, e| { + memrchr3_raw(self.needle1, self.needle2, self.needle3, s, e) + }) + } + } +} + +impl<'h> core::iter::FusedIterator for Memchr3<'h> {} + +/// memchr, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `One::find_raw`. +#[inline] +unsafe fn memchr_raw( + needle: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + #[cfg(target_arch = "x86_64")] + { + // x86_64 does CPU feature detection at runtime in order to use AVX2 + // instructions even when the `avx2` feature isn't enabled at compile + // time. This function also handles using a fallback if neither AVX2 + // nor SSE2 (unusual) are available. + crate::arch::x86_64::memchr::memchr_raw(needle, start, end) + } + #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] + { + crate::arch::wasm32::memchr::memchr_raw(needle, start, end) + } + #[cfg(target_arch = "aarch64")] + { + crate::arch::aarch64::memchr::memchr_raw(needle, start, end) + } + #[cfg(not(any( + target_arch = "x86_64", + all(target_arch = "wasm32", target_feature = "simd128"), + target_arch = "aarch64" + )))] + { + crate::arch::all::memchr::One::new(needle).find_raw(start, end) + } +} + +/// memrchr, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `One::rfind_raw`. +#[inline] +unsafe fn memrchr_raw( + needle: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + #[cfg(target_arch = "x86_64")] + { + crate::arch::x86_64::memchr::memrchr_raw(needle, start, end) + } + #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] + { + crate::arch::wasm32::memchr::memrchr_raw(needle, start, end) + } + #[cfg(target_arch = "aarch64")] + { + crate::arch::aarch64::memchr::memrchr_raw(needle, start, end) + } + #[cfg(not(any( + target_arch = "x86_64", + all(target_arch = "wasm32", target_feature = "simd128"), + target_arch = "aarch64" + )))] + { + crate::arch::all::memchr::One::new(needle).rfind_raw(start, end) + } +} + +/// memchr2, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Two::find_raw`. +#[inline] +unsafe fn memchr2_raw( + needle1: u8, + needle2: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + #[cfg(target_arch = "x86_64")] + { + crate::arch::x86_64::memchr::memchr2_raw(needle1, needle2, start, end) + } + #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] + { + crate::arch::wasm32::memchr::memchr2_raw(needle1, needle2, start, end) + } + #[cfg(target_arch = "aarch64")] + { + crate::arch::aarch64::memchr::memchr2_raw(needle1, needle2, start, end) + } + #[cfg(not(any( + target_arch = "x86_64", + all(target_arch = "wasm32", target_feature = "simd128"), + target_arch = "aarch64" + )))] + { + crate::arch::all::memchr::Two::new(needle1, needle2) + .find_raw(start, end) + } +} + +/// memrchr2, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Two::rfind_raw`. +#[inline] +unsafe fn memrchr2_raw( + needle1: u8, + needle2: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + #[cfg(target_arch = "x86_64")] + { + crate::arch::x86_64::memchr::memrchr2_raw(needle1, needle2, start, end) + } + #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] + { + crate::arch::wasm32::memchr::memrchr2_raw(needle1, needle2, start, end) + } + #[cfg(target_arch = "aarch64")] + { + crate::arch::aarch64::memchr::memrchr2_raw( + needle1, needle2, start, end, + ) + } + #[cfg(not(any( + target_arch = "x86_64", + all(target_arch = "wasm32", target_feature = "simd128"), + target_arch = "aarch64" + )))] + { + crate::arch::all::memchr::Two::new(needle1, needle2) + .rfind_raw(start, end) + } +} + +/// memchr3, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Three::find_raw`. +#[inline] +unsafe fn memchr3_raw( + needle1: u8, + needle2: u8, + needle3: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + #[cfg(target_arch = "x86_64")] + { + crate::arch::x86_64::memchr::memchr3_raw( + needle1, needle2, needle3, start, end, + ) + } + #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] + { + crate::arch::wasm32::memchr::memchr3_raw( + needle1, needle2, needle3, start, end, + ) + } + #[cfg(target_arch = "aarch64")] + { + crate::arch::aarch64::memchr::memchr3_raw( + needle1, needle2, needle3, start, end, + ) + } + #[cfg(not(any( + target_arch = "x86_64", + all(target_arch = "wasm32", target_feature = "simd128"), + target_arch = "aarch64" + )))] + { + crate::arch::all::memchr::Three::new(needle1, needle2, needle3) + .find_raw(start, end) + } +} + +/// memrchr3, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `Three::rfind_raw`. +#[inline] +unsafe fn memrchr3_raw( + needle1: u8, + needle2: u8, + needle3: u8, + start: *const u8, + end: *const u8, +) -> Option<*const u8> { + #[cfg(target_arch = "x86_64")] + { + crate::arch::x86_64::memchr::memrchr3_raw( + needle1, needle2, needle3, start, end, + ) + } + #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] + { + crate::arch::wasm32::memchr::memrchr3_raw( + needle1, needle2, needle3, start, end, + ) + } + #[cfg(target_arch = "aarch64")] + { + crate::arch::aarch64::memchr::memrchr3_raw( + needle1, needle2, needle3, start, end, + ) + } + #[cfg(not(any( + target_arch = "x86_64", + all(target_arch = "wasm32", target_feature = "simd128"), + target_arch = "aarch64" + )))] + { + crate::arch::all::memchr::Three::new(needle1, needle2, needle3) + .rfind_raw(start, end) + } +} + +/// Count all matching bytes, but using raw pointers to represent the haystack. +/// +/// # Safety +/// +/// Pointers must be valid. See `One::count_raw`. +#[inline] +unsafe fn count_raw(needle: u8, start: *const u8, end: *const u8) -> usize { + #[cfg(target_arch = "x86_64")] + { + crate::arch::x86_64::memchr::count_raw(needle, start, end) + } + #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] + { + crate::arch::wasm32::memchr::count_raw(needle, start, end) + } + #[cfg(target_arch = "aarch64")] + { + crate::arch::aarch64::memchr::count_raw(needle, start, end) + } + #[cfg(not(any( + target_arch = "x86_64", + all(target_arch = "wasm32", target_feature = "simd128"), + target_arch = "aarch64" + )))] + { + crate::arch::all::memchr::One::new(needle).count_raw(start, end) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn forward1_iter() { + crate::tests::memchr::Runner::new(1).forward_iter( + |haystack, needles| { + Some(memchr_iter(needles[0], haystack).collect()) + }, + ) + } + + #[test] + fn forward1_oneshot() { + crate::tests::memchr::Runner::new(1).forward_oneshot( + |haystack, needles| Some(memchr(needles[0], haystack)), + ) + } + + #[test] + fn reverse1_iter() { + crate::tests::memchr::Runner::new(1).reverse_iter( + |haystack, needles| { + Some(memrchr_iter(needles[0], haystack).collect()) + }, + ) + } + + #[test] + fn reverse1_oneshot() { + crate::tests::memchr::Runner::new(1).reverse_oneshot( + |haystack, needles| Some(memrchr(needles[0], haystack)), + ) + } + + #[test] + fn count1_iter() { + crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| { + Some(memchr_iter(needles[0], haystack).count()) + }) + } + + #[test] + fn forward2_iter() { + crate::tests::memchr::Runner::new(2).forward_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(memchr2_iter(n1, n2, haystack).collect()) + }, + ) + } + + #[test] + fn forward2_oneshot() { + crate::tests::memchr::Runner::new(2).forward_oneshot( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(memchr2(n1, n2, haystack)) + }, + ) + } + + #[test] + fn reverse2_iter() { + crate::tests::memchr::Runner::new(2).reverse_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(memrchr2_iter(n1, n2, haystack).collect()) + }, + ) + } + + #[test] + fn reverse2_oneshot() { + crate::tests::memchr::Runner::new(2).reverse_oneshot( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(memrchr2(n1, n2, haystack)) + }, + ) + } + + #[test] + fn forward3_iter() { + crate::tests::memchr::Runner::new(3).forward_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(memchr3_iter(n1, n2, n3, haystack).collect()) + }, + ) + } + + #[test] + fn forward3_oneshot() { + crate::tests::memchr::Runner::new(3).forward_oneshot( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(memchr3(n1, n2, n3, haystack)) + }, + ) + } + + #[test] + fn reverse3_iter() { + crate::tests::memchr::Runner::new(3).reverse_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(memrchr3_iter(n1, n2, n3, haystack).collect()) + }, + ) + } + + #[test] + fn reverse3_oneshot() { + crate::tests::memchr::Runner::new(3).reverse_oneshot( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(memrchr3(n1, n2, n3, haystack)) + }, + ) + } + + // Prior to memchr 2.6, the memchr iterators both implemented Send and + // Sync. But in memchr 2.6, the iterator changed to use raw pointers + // internally and I didn't add explicit Send/Sync impls. This ended up + // regressing the API. This test ensures we don't do that again. + // + // See: https://github.com/BurntSushi/memchr/issues/133 + #[test] + fn sync_regression() { + use core::panic::{RefUnwindSafe, UnwindSafe}; + + fn assert_send_sync() {} + assert_send_sync::(); + assert_send_sync::(); + assert_send_sync::() + } +} diff --git a/vendor/memchr/src/memmem/mod.rs b/vendor/memchr/src/memmem/mod.rs new file mode 100644 index 00000000000000..4f04943e6497c5 --- /dev/null +++ b/vendor/memchr/src/memmem/mod.rs @@ -0,0 +1,737 @@ +/*! +This module provides forward and reverse substring search routines. + +Unlike the standard library's substring search routines, these work on +arbitrary bytes. For all non-empty needles, these routines will report exactly +the same values as the corresponding routines in the standard library. For +the empty needle, the standard library reports matches only at valid UTF-8 +boundaries, where as these routines will report matches at every position. + +Other than being able to work on arbitrary bytes, the primary reason to prefer +these routines over the standard library routines is that these will generally +be faster. In some cases, significantly so. + +# Example: iterating over substring matches + +This example shows how to use [`find_iter`] to find occurrences of a substring +in a haystack. + +``` +use memchr::memmem; + +let haystack = b"foo bar foo baz foo"; + +let mut it = memmem::find_iter(haystack, "foo"); +assert_eq!(Some(0), it.next()); +assert_eq!(Some(8), it.next()); +assert_eq!(Some(16), it.next()); +assert_eq!(None, it.next()); +``` + +# Example: iterating over substring matches in reverse + +This example shows how to use [`rfind_iter`] to find occurrences of a substring +in a haystack starting from the end of the haystack. + +**NOTE:** This module does not implement double ended iterators, so reverse +searches aren't done by calling `rev` on a forward iterator. + +``` +use memchr::memmem; + +let haystack = b"foo bar foo baz foo"; + +let mut it = memmem::rfind_iter(haystack, "foo"); +assert_eq!(Some(16), it.next()); +assert_eq!(Some(8), it.next()); +assert_eq!(Some(0), it.next()); +assert_eq!(None, it.next()); +``` + +# Example: repeating a search for the same needle + +It may be possible for the overhead of constructing a substring searcher to be +measurable in some workloads. In cases where the same needle is used to search +many haystacks, it is possible to do construction once and thus to avoid it for +subsequent searches. This can be done with a [`Finder`] (or a [`FinderRev`] for +reverse searches). + +``` +use memchr::memmem; + +let finder = memmem::Finder::new("foo"); + +assert_eq!(Some(4), finder.find(b"baz foo quux")); +assert_eq!(None, finder.find(b"quux baz bar")); +``` +*/ + +pub use crate::memmem::searcher::PrefilterConfig as Prefilter; + +// This is exported here for use in the crate::arch::all::twoway +// implementation. This is essentially an abstraction breaker. Namely, the +// public API of twoway doesn't support providing a prefilter, but its crate +// internal API does. The main reason for this is that I didn't want to do the +// API design required to support it without a concrete use case. +pub(crate) use crate::memmem::searcher::Pre; + +use crate::{ + arch::all::{ + packedpair::{DefaultFrequencyRank, HeuristicFrequencyRank}, + rabinkarp, + }, + cow::CowBytes, + memmem::searcher::{PrefilterState, Searcher, SearcherRev}, +}; + +mod searcher; + +/// Returns an iterator over all non-overlapping occurrences of a substring in +/// a haystack. +/// +/// # Complexity +/// +/// This routine is guaranteed to have worst case linear time complexity +/// with respect to both the needle and the haystack. That is, this runs +/// in `O(needle.len() + haystack.len())` time. +/// +/// This routine is also guaranteed to have worst case constant space +/// complexity. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use memchr::memmem; +/// +/// let haystack = b"foo bar foo baz foo"; +/// let mut it = memmem::find_iter(haystack, b"foo"); +/// assert_eq!(Some(0), it.next()); +/// assert_eq!(Some(8), it.next()); +/// assert_eq!(Some(16), it.next()); +/// assert_eq!(None, it.next()); +/// ``` +#[inline] +pub fn find_iter<'h, 'n, N: 'n + ?Sized + AsRef<[u8]>>( + haystack: &'h [u8], + needle: &'n N, +) -> FindIter<'h, 'n> { + FindIter::new(haystack, Finder::new(needle)) +} + +/// Returns a reverse iterator over all non-overlapping occurrences of a +/// substring in a haystack. +/// +/// # Complexity +/// +/// This routine is guaranteed to have worst case linear time complexity +/// with respect to both the needle and the haystack. That is, this runs +/// in `O(needle.len() + haystack.len())` time. +/// +/// This routine is also guaranteed to have worst case constant space +/// complexity. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use memchr::memmem; +/// +/// let haystack = b"foo bar foo baz foo"; +/// let mut it = memmem::rfind_iter(haystack, b"foo"); +/// assert_eq!(Some(16), it.next()); +/// assert_eq!(Some(8), it.next()); +/// assert_eq!(Some(0), it.next()); +/// assert_eq!(None, it.next()); +/// ``` +#[inline] +pub fn rfind_iter<'h, 'n, N: 'n + ?Sized + AsRef<[u8]>>( + haystack: &'h [u8], + needle: &'n N, +) -> FindRevIter<'h, 'n> { + FindRevIter::new(haystack, FinderRev::new(needle)) +} + +/// Returns the index of the first occurrence of the given needle. +/// +/// Note that if you're are searching for the same needle in many different +/// small haystacks, it may be faster to initialize a [`Finder`] once, +/// and reuse it for each search. +/// +/// # Complexity +/// +/// This routine is guaranteed to have worst case linear time complexity +/// with respect to both the needle and the haystack. That is, this runs +/// in `O(needle.len() + haystack.len())` time. +/// +/// This routine is also guaranteed to have worst case constant space +/// complexity. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use memchr::memmem; +/// +/// let haystack = b"foo bar baz"; +/// assert_eq!(Some(0), memmem::find(haystack, b"foo")); +/// assert_eq!(Some(4), memmem::find(haystack, b"bar")); +/// assert_eq!(None, memmem::find(haystack, b"quux")); +/// ``` +#[inline] +pub fn find(haystack: &[u8], needle: &[u8]) -> Option { + if haystack.len() < 64 { + rabinkarp::Finder::new(needle).find(haystack, needle) + } else { + Finder::new(needle).find(haystack) + } +} + +/// Returns the index of the last occurrence of the given needle. +/// +/// Note that if you're are searching for the same needle in many different +/// small haystacks, it may be faster to initialize a [`FinderRev`] once, +/// and reuse it for each search. +/// +/// # Complexity +/// +/// This routine is guaranteed to have worst case linear time complexity +/// with respect to both the needle and the haystack. That is, this runs +/// in `O(needle.len() + haystack.len())` time. +/// +/// This routine is also guaranteed to have worst case constant space +/// complexity. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use memchr::memmem; +/// +/// let haystack = b"foo bar baz"; +/// assert_eq!(Some(0), memmem::rfind(haystack, b"foo")); +/// assert_eq!(Some(4), memmem::rfind(haystack, b"bar")); +/// assert_eq!(Some(8), memmem::rfind(haystack, b"ba")); +/// assert_eq!(None, memmem::rfind(haystack, b"quux")); +/// ``` +#[inline] +pub fn rfind(haystack: &[u8], needle: &[u8]) -> Option { + if haystack.len() < 64 { + rabinkarp::FinderRev::new(needle).rfind(haystack, needle) + } else { + FinderRev::new(needle).rfind(haystack) + } +} + +/// An iterator over non-overlapping substring matches. +/// +/// Matches are reported by the byte offset at which they begin. +/// +/// `'h` is the lifetime of the haystack while `'n` is the lifetime of the +/// needle. +#[derive(Debug, Clone)] +pub struct FindIter<'h, 'n> { + haystack: &'h [u8], + prestate: PrefilterState, + finder: Finder<'n>, + pos: usize, +} + +impl<'h, 'n> FindIter<'h, 'n> { + #[inline(always)] + pub(crate) fn new( + haystack: &'h [u8], + finder: Finder<'n>, + ) -> FindIter<'h, 'n> { + let prestate = PrefilterState::new(); + FindIter { haystack, prestate, finder, pos: 0 } + } + + /// Convert this iterator into its owned variant, such that it no longer + /// borrows the finder and needle. + /// + /// If this is already an owned iterator, then this is a no-op. Otherwise, + /// this copies the needle. + /// + /// This is only available when the `alloc` feature is enabled. + #[cfg(feature = "alloc")] + #[inline] + pub fn into_owned(self) -> FindIter<'h, 'static> { + FindIter { + haystack: self.haystack, + prestate: self.prestate, + finder: self.finder.into_owned(), + pos: self.pos, + } + } +} + +impl<'h, 'n> Iterator for FindIter<'h, 'n> { + type Item = usize; + + fn next(&mut self) -> Option { + let needle = self.finder.needle(); + let haystack = self.haystack.get(self.pos..)?; + let idx = + self.finder.searcher.find(&mut self.prestate, haystack, needle)?; + + let pos = self.pos + idx; + self.pos = pos + needle.len().max(1); + + Some(pos) + } + + fn size_hint(&self) -> (usize, Option) { + // The largest possible number of non-overlapping matches is the + // quotient of the haystack and the needle (or the length of the + // haystack, if the needle is empty) + match self.haystack.len().checked_sub(self.pos) { + None => (0, Some(0)), + Some(haystack_len) => match self.finder.needle().len() { + // Empty needles always succeed and match at every point + // (including the very end) + 0 => ( + haystack_len.saturating_add(1), + haystack_len.checked_add(1), + ), + needle_len => (0, Some(haystack_len / needle_len)), + }, + } + } +} + +/// An iterator over non-overlapping substring matches in reverse. +/// +/// Matches are reported by the byte offset at which they begin. +/// +/// `'h` is the lifetime of the haystack while `'n` is the lifetime of the +/// needle. +#[derive(Clone, Debug)] +pub struct FindRevIter<'h, 'n> { + haystack: &'h [u8], + finder: FinderRev<'n>, + /// When searching with an empty needle, this gets set to `None` after + /// we've yielded the last element at `0`. + pos: Option, +} + +impl<'h, 'n> FindRevIter<'h, 'n> { + #[inline(always)] + pub(crate) fn new( + haystack: &'h [u8], + finder: FinderRev<'n>, + ) -> FindRevIter<'h, 'n> { + let pos = Some(haystack.len()); + FindRevIter { haystack, finder, pos } + } + + /// Convert this iterator into its owned variant, such that it no longer + /// borrows the finder and needle. + /// + /// If this is already an owned iterator, then this is a no-op. Otherwise, + /// this copies the needle. + /// + /// This is only available when the `std` feature is enabled. + #[cfg(feature = "alloc")] + #[inline] + pub fn into_owned(self) -> FindRevIter<'h, 'static> { + FindRevIter { + haystack: self.haystack, + finder: self.finder.into_owned(), + pos: self.pos, + } + } +} + +impl<'h, 'n> Iterator for FindRevIter<'h, 'n> { + type Item = usize; + + fn next(&mut self) -> Option { + let pos = match self.pos { + None => return None, + Some(pos) => pos, + }; + let result = self.finder.rfind(&self.haystack[..pos]); + match result { + None => None, + Some(i) => { + if pos == i { + self.pos = pos.checked_sub(1); + } else { + self.pos = Some(i); + } + Some(i) + } + } + } +} + +/// A single substring searcher fixed to a particular needle. +/// +/// The purpose of this type is to permit callers to construct a substring +/// searcher that can be used to search haystacks without the overhead of +/// constructing the searcher in the first place. This is a somewhat niche +/// concern when it's necessary to re-use the same needle to search multiple +/// different haystacks with as little overhead as possible. In general, using +/// [`find`] is good enough, but `Finder` is useful when you can meaningfully +/// observe searcher construction time in a profile. +/// +/// When the `std` feature is enabled, then this type has an `into_owned` +/// version which permits building a `Finder` that is not connected to +/// the lifetime of its needle. +#[derive(Clone, Debug)] +pub struct Finder<'n> { + needle: CowBytes<'n>, + searcher: Searcher, +} + +impl<'n> Finder<'n> { + /// Create a new finder for the given needle. + #[inline] + pub fn new>(needle: &'n B) -> Finder<'n> { + FinderBuilder::new().build_forward(needle) + } + + /// Returns the index of the first occurrence of this needle in the given + /// haystack. + /// + /// # Complexity + /// + /// This routine is guaranteed to have worst case linear time complexity + /// with respect to both the needle and the haystack. That is, this runs + /// in `O(needle.len() + haystack.len())` time. + /// + /// This routine is also guaranteed to have worst case constant space + /// complexity. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use memchr::memmem::Finder; + /// + /// let haystack = b"foo bar baz"; + /// assert_eq!(Some(0), Finder::new("foo").find(haystack)); + /// assert_eq!(Some(4), Finder::new("bar").find(haystack)); + /// assert_eq!(None, Finder::new("quux").find(haystack)); + /// ``` + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option { + let mut prestate = PrefilterState::new(); + let needle = self.needle.as_slice(); + self.searcher.find(&mut prestate, haystack, needle) + } + + /// Returns an iterator over all occurrences of a substring in a haystack. + /// + /// # Complexity + /// + /// This routine is guaranteed to have worst case linear time complexity + /// with respect to both the needle and the haystack. That is, this runs + /// in `O(needle.len() + haystack.len())` time. + /// + /// This routine is also guaranteed to have worst case constant space + /// complexity. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use memchr::memmem::Finder; + /// + /// let haystack = b"foo bar foo baz foo"; + /// let finder = Finder::new(b"foo"); + /// let mut it = finder.find_iter(haystack); + /// assert_eq!(Some(0), it.next()); + /// assert_eq!(Some(8), it.next()); + /// assert_eq!(Some(16), it.next()); + /// assert_eq!(None, it.next()); + /// ``` + #[inline] + pub fn find_iter<'a, 'h>( + &'a self, + haystack: &'h [u8], + ) -> FindIter<'h, 'a> { + FindIter::new(haystack, self.as_ref()) + } + + /// Convert this finder into its owned variant, such that it no longer + /// borrows the needle. + /// + /// If this is already an owned finder, then this is a no-op. Otherwise, + /// this copies the needle. + /// + /// This is only available when the `alloc` feature is enabled. + #[cfg(feature = "alloc")] + #[inline] + pub fn into_owned(self) -> Finder<'static> { + Finder { + needle: self.needle.into_owned(), + searcher: self.searcher.clone(), + } + } + + /// Convert this finder into its borrowed variant. + /// + /// This is primarily useful if your finder is owned and you'd like to + /// store its borrowed variant in some intermediate data structure. + /// + /// Note that the lifetime parameter of the returned finder is tied to the + /// lifetime of `self`, and may be shorter than the `'n` lifetime of the + /// needle itself. Namely, a finder's needle can be either borrowed or + /// owned, so the lifetime of the needle returned must necessarily be the + /// shorter of the two. + #[inline] + pub fn as_ref(&self) -> Finder<'_> { + Finder { + needle: CowBytes::new(self.needle()), + searcher: self.searcher.clone(), + } + } + + /// Returns the needle that this finder searches for. + /// + /// Note that the lifetime of the needle returned is tied to the lifetime + /// of the finder, and may be shorter than the `'n` lifetime. Namely, a + /// finder's needle can be either borrowed or owned, so the lifetime of the + /// needle returned must necessarily be the shorter of the two. + #[inline] + pub fn needle(&self) -> &[u8] { + self.needle.as_slice() + } +} + +/// A single substring reverse searcher fixed to a particular needle. +/// +/// The purpose of this type is to permit callers to construct a substring +/// searcher that can be used to search haystacks without the overhead of +/// constructing the searcher in the first place. This is a somewhat niche +/// concern when it's necessary to re-use the same needle to search multiple +/// different haystacks with as little overhead as possible. In general, +/// using [`rfind`] is good enough, but `FinderRev` is useful when you can +/// meaningfully observe searcher construction time in a profile. +/// +/// When the `std` feature is enabled, then this type has an `into_owned` +/// version which permits building a `FinderRev` that is not connected to +/// the lifetime of its needle. +#[derive(Clone, Debug)] +pub struct FinderRev<'n> { + needle: CowBytes<'n>, + searcher: SearcherRev, +} + +impl<'n> FinderRev<'n> { + /// Create a new reverse finder for the given needle. + #[inline] + pub fn new>(needle: &'n B) -> FinderRev<'n> { + FinderBuilder::new().build_reverse(needle) + } + + /// Returns the index of the last occurrence of this needle in the given + /// haystack. + /// + /// The haystack may be any type that can be cheaply converted into a + /// `&[u8]`. This includes, but is not limited to, `&str` and `&[u8]`. + /// + /// # Complexity + /// + /// This routine is guaranteed to have worst case linear time complexity + /// with respect to both the needle and the haystack. That is, this runs + /// in `O(needle.len() + haystack.len())` time. + /// + /// This routine is also guaranteed to have worst case constant space + /// complexity. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use memchr::memmem::FinderRev; + /// + /// let haystack = b"foo bar baz"; + /// assert_eq!(Some(0), FinderRev::new("foo").rfind(haystack)); + /// assert_eq!(Some(4), FinderRev::new("bar").rfind(haystack)); + /// assert_eq!(None, FinderRev::new("quux").rfind(haystack)); + /// ``` + pub fn rfind>(&self, haystack: B) -> Option { + self.searcher.rfind(haystack.as_ref(), self.needle.as_slice()) + } + + /// Returns a reverse iterator over all occurrences of a substring in a + /// haystack. + /// + /// # Complexity + /// + /// This routine is guaranteed to have worst case linear time complexity + /// with respect to both the needle and the haystack. That is, this runs + /// in `O(needle.len() + haystack.len())` time. + /// + /// This routine is also guaranteed to have worst case constant space + /// complexity. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use memchr::memmem::FinderRev; + /// + /// let haystack = b"foo bar foo baz foo"; + /// let finder = FinderRev::new(b"foo"); + /// let mut it = finder.rfind_iter(haystack); + /// assert_eq!(Some(16), it.next()); + /// assert_eq!(Some(8), it.next()); + /// assert_eq!(Some(0), it.next()); + /// assert_eq!(None, it.next()); + /// ``` + #[inline] + pub fn rfind_iter<'a, 'h>( + &'a self, + haystack: &'h [u8], + ) -> FindRevIter<'h, 'a> { + FindRevIter::new(haystack, self.as_ref()) + } + + /// Convert this finder into its owned variant, such that it no longer + /// borrows the needle. + /// + /// If this is already an owned finder, then this is a no-op. Otherwise, + /// this copies the needle. + /// + /// This is only available when the `std` feature is enabled. + #[cfg(feature = "alloc")] + #[inline] + pub fn into_owned(self) -> FinderRev<'static> { + FinderRev { + needle: self.needle.into_owned(), + searcher: self.searcher.clone(), + } + } + + /// Convert this finder into its borrowed variant. + /// + /// This is primarily useful if your finder is owned and you'd like to + /// store its borrowed variant in some intermediate data structure. + /// + /// Note that the lifetime parameter of the returned finder is tied to the + /// lifetime of `self`, and may be shorter than the `'n` lifetime of the + /// needle itself. Namely, a finder's needle can be either borrowed or + /// owned, so the lifetime of the needle returned must necessarily be the + /// shorter of the two. + #[inline] + pub fn as_ref(&self) -> FinderRev<'_> { + FinderRev { + needle: CowBytes::new(self.needle()), + searcher: self.searcher.clone(), + } + } + + /// Returns the needle that this finder searches for. + /// + /// Note that the lifetime of the needle returned is tied to the lifetime + /// of the finder, and may be shorter than the `'n` lifetime. Namely, a + /// finder's needle can be either borrowed or owned, so the lifetime of the + /// needle returned must necessarily be the shorter of the two. + #[inline] + pub fn needle(&self) -> &[u8] { + self.needle.as_slice() + } +} + +/// A builder for constructing non-default forward or reverse memmem finders. +/// +/// A builder is primarily useful for configuring a substring searcher. +/// Currently, the only configuration exposed is the ability to disable +/// heuristic prefilters used to speed up certain searches. +#[derive(Clone, Debug, Default)] +pub struct FinderBuilder { + prefilter: Prefilter, +} + +impl FinderBuilder { + /// Create a new finder builder with default settings. + pub fn new() -> FinderBuilder { + FinderBuilder::default() + } + + /// Build a forward finder using the given needle from the current + /// settings. + pub fn build_forward<'n, B: ?Sized + AsRef<[u8]>>( + &self, + needle: &'n B, + ) -> Finder<'n> { + self.build_forward_with_ranker(DefaultFrequencyRank, needle) + } + + /// Build a forward finder using the given needle and a custom heuristic for + /// determining the frequency of a given byte in the dataset. + /// See [`HeuristicFrequencyRank`] for more details. + pub fn build_forward_with_ranker< + 'n, + R: HeuristicFrequencyRank, + B: ?Sized + AsRef<[u8]>, + >( + &self, + ranker: R, + needle: &'n B, + ) -> Finder<'n> { + let needle = needle.as_ref(); + Finder { + needle: CowBytes::new(needle), + searcher: Searcher::new(self.prefilter, ranker, needle), + } + } + + /// Build a reverse finder using the given needle from the current + /// settings. + pub fn build_reverse<'n, B: ?Sized + AsRef<[u8]>>( + &self, + needle: &'n B, + ) -> FinderRev<'n> { + let needle = needle.as_ref(); + FinderRev { + needle: CowBytes::new(needle), + searcher: SearcherRev::new(needle), + } + } + + /// Configure the prefilter setting for the finder. + /// + /// See the documentation for [`Prefilter`] for more discussion on why + /// you might want to configure this. + pub fn prefilter(&mut self, prefilter: Prefilter) -> &mut FinderBuilder { + self.prefilter = prefilter; + self + } +} + +#[cfg(test)] +mod tests { + use super::*; + + define_substring_forward_quickcheck!(|h, n| Some(Finder::new(n).find(h))); + define_substring_reverse_quickcheck!(|h, n| Some( + FinderRev::new(n).rfind(h) + )); + + #[test] + fn forward() { + crate::tests::substring::Runner::new() + .fwd(|h, n| Some(Finder::new(n).find(h))) + .run(); + } + + #[test] + fn reverse() { + crate::tests::substring::Runner::new() + .rev(|h, n| Some(FinderRev::new(n).rfind(h))) + .run(); + } +} diff --git a/vendor/memchr/src/memmem/searcher.rs b/vendor/memchr/src/memmem/searcher.rs new file mode 100644 index 00000000000000..2a533e02fcf6d8 --- /dev/null +++ b/vendor/memchr/src/memmem/searcher.rs @@ -0,0 +1,1030 @@ +use crate::arch::all::{ + packedpair::{HeuristicFrequencyRank, Pair}, + rabinkarp, twoway, +}; + +#[cfg(target_arch = "aarch64")] +use crate::arch::aarch64::neon::packedpair as neon; +#[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] +use crate::arch::wasm32::simd128::packedpair as simd128; +#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] +use crate::arch::x86_64::{ + avx2::packedpair as avx2, sse2::packedpair as sse2, +}; + +/// A "meta" substring searcher. +/// +/// To a first approximation, this chooses what it believes to be the "best" +/// substring search implemnetation based on the needle at construction time. +/// Then, every call to `find` will execute that particular implementation. To +/// a second approximation, multiple substring search algorithms may be used, +/// depending on the haystack. For example, for supremely short haystacks, +/// Rabin-Karp is typically used. +/// +/// See the documentation on `Prefilter` for an explanation of the dispatching +/// mechanism. The quick summary is that an enum has too much overhead and +/// we can't use dynamic dispatch via traits because we need to work in a +/// core-only environment. (Dynamic dispatch works in core-only, but you +/// need `&dyn Trait` and we really need a `Box` here. The latter +/// requires `alloc`.) So instead, we use a union and an appropriately paired +/// free function to read from the correct field on the union and execute the +/// chosen substring search implementation. +#[derive(Clone)] +pub(crate) struct Searcher { + call: SearcherKindFn, + kind: SearcherKind, + rabinkarp: rabinkarp::Finder, +} + +impl Searcher { + /// Creates a new "meta" substring searcher that attempts to choose the + /// best algorithm based on the needle, heuristics and what the current + /// target supports. + #[inline] + pub(crate) fn new( + prefilter: PrefilterConfig, + ranker: R, + needle: &[u8], + ) -> Searcher { + let rabinkarp = rabinkarp::Finder::new(needle); + if needle.len() <= 1 { + return if needle.is_empty() { + trace!("building empty substring searcher"); + Searcher { + call: searcher_kind_empty, + kind: SearcherKind { empty: () }, + rabinkarp, + } + } else { + trace!("building one-byte substring searcher"); + debug_assert_eq!(1, needle.len()); + Searcher { + call: searcher_kind_one_byte, + kind: SearcherKind { one_byte: needle[0] }, + rabinkarp, + } + }; + } + let pair = match Pair::with_ranker(needle, &ranker) { + Some(pair) => pair, + None => return Searcher::twoway(needle, rabinkarp, None), + }; + debug_assert_ne!( + pair.index1(), + pair.index2(), + "pair offsets should not be equivalent" + ); + #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] + { + if let Some(pp) = avx2::Finder::with_pair(needle, pair) { + if do_packed_search(needle) { + trace!("building x86_64 AVX2 substring searcher"); + let kind = SearcherKind { avx2: pp }; + Searcher { call: searcher_kind_avx2, kind, rabinkarp } + } else if prefilter.is_none() { + Searcher::twoway(needle, rabinkarp, None) + } else { + let prestrat = Prefilter::avx2(pp, needle); + Searcher::twoway(needle, rabinkarp, Some(prestrat)) + } + } else if let Some(pp) = sse2::Finder::with_pair(needle, pair) { + if do_packed_search(needle) { + trace!("building x86_64 SSE2 substring searcher"); + let kind = SearcherKind { sse2: pp }; + Searcher { call: searcher_kind_sse2, kind, rabinkarp } + } else if prefilter.is_none() { + Searcher::twoway(needle, rabinkarp, None) + } else { + let prestrat = Prefilter::sse2(pp, needle); + Searcher::twoway(needle, rabinkarp, Some(prestrat)) + } + } else if prefilter.is_none() { + Searcher::twoway(needle, rabinkarp, None) + } else { + // We're pretty unlikely to get to this point, but it is + // possible to be running on x86_64 without SSE2. Namely, it's + // really up to the OS whether it wants to support vector + // registers or not. + let prestrat = Prefilter::fallback(ranker, pair, needle); + Searcher::twoway(needle, rabinkarp, prestrat) + } + } + #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] + { + if let Some(pp) = simd128::Finder::with_pair(needle, pair) { + if do_packed_search(needle) { + trace!("building wasm32 simd128 substring searcher"); + let kind = SearcherKind { simd128: pp }; + Searcher { call: searcher_kind_simd128, kind, rabinkarp } + } else if prefilter.is_none() { + Searcher::twoway(needle, rabinkarp, None) + } else { + let prestrat = Prefilter::simd128(pp, needle); + Searcher::twoway(needle, rabinkarp, Some(prestrat)) + } + } else if prefilter.is_none() { + Searcher::twoway(needle, rabinkarp, None) + } else { + let prestrat = Prefilter::fallback(ranker, pair, needle); + Searcher::twoway(needle, rabinkarp, prestrat) + } + } + #[cfg(target_arch = "aarch64")] + { + if let Some(pp) = neon::Finder::with_pair(needle, pair) { + if do_packed_search(needle) { + trace!("building aarch64 neon substring searcher"); + let kind = SearcherKind { neon: pp }; + Searcher { call: searcher_kind_neon, kind, rabinkarp } + } else if prefilter.is_none() { + Searcher::twoway(needle, rabinkarp, None) + } else { + let prestrat = Prefilter::neon(pp, needle); + Searcher::twoway(needle, rabinkarp, Some(prestrat)) + } + } else if prefilter.is_none() { + Searcher::twoway(needle, rabinkarp, None) + } else { + let prestrat = Prefilter::fallback(ranker, pair, needle); + Searcher::twoway(needle, rabinkarp, prestrat) + } + } + #[cfg(not(any( + all(target_arch = "x86_64", target_feature = "sse2"), + all(target_arch = "wasm32", target_feature = "simd128"), + target_arch = "aarch64" + )))] + { + if prefilter.is_none() { + Searcher::twoway(needle, rabinkarp, None) + } else { + let prestrat = Prefilter::fallback(ranker, pair, needle); + Searcher::twoway(needle, rabinkarp, prestrat) + } + } + } + + /// Creates a new searcher that always uses the Two-Way algorithm. This is + /// typically used when vector algorithms are unavailable or inappropriate. + /// (For example, when the needle is "too long.") + /// + /// If a prefilter is given, then the searcher returned will be accelerated + /// by the prefilter. + #[inline] + fn twoway( + needle: &[u8], + rabinkarp: rabinkarp::Finder, + prestrat: Option, + ) -> Searcher { + let finder = twoway::Finder::new(needle); + match prestrat { + None => { + trace!("building scalar two-way substring searcher"); + let kind = SearcherKind { two_way: finder }; + Searcher { call: searcher_kind_two_way, kind, rabinkarp } + } + Some(prestrat) => { + trace!( + "building scalar two-way \ + substring searcher with a prefilter" + ); + let two_way_with_prefilter = + TwoWayWithPrefilter { finder, prestrat }; + let kind = SearcherKind { two_way_with_prefilter }; + Searcher { + call: searcher_kind_two_way_with_prefilter, + kind, + rabinkarp, + } + } + } + } + + /// Searches the given haystack for the given needle. The needle given + /// should be the same as the needle that this finder was initialized + /// with. + /// + /// Inlining this can lead to big wins for latency, and #[inline] doesn't + /// seem to be enough in some cases. + #[inline(always)] + pub(crate) fn find( + &self, + prestate: &mut PrefilterState, + haystack: &[u8], + needle: &[u8], + ) -> Option { + if haystack.len() < needle.len() { + None + } else { + // SAFETY: By construction, we've ensured that the function + // in `self.call` is properly paired with the union used in + // `self.kind`. + unsafe { (self.call)(self, prestate, haystack, needle) } + } + } +} + +impl core::fmt::Debug for Searcher { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_struct("Searcher") + .field("call", &"") + .field("kind", &"") + .field("rabinkarp", &self.rabinkarp) + .finish() + } +} + +/// A union indicating one of several possible substring search implementations +/// that are in active use. +/// +/// This union should only be read by one of the functions prefixed with +/// `searcher_kind_`. Namely, the correct function is meant to be paired with +/// the union by the caller, such that the function always reads from the +/// designated union field. +#[derive(Clone, Copy)] +union SearcherKind { + empty: (), + one_byte: u8, + two_way: twoway::Finder, + two_way_with_prefilter: TwoWayWithPrefilter, + #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] + sse2: crate::arch::x86_64::sse2::packedpair::Finder, + #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] + avx2: crate::arch::x86_64::avx2::packedpair::Finder, + #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] + simd128: crate::arch::wasm32::simd128::packedpair::Finder, + #[cfg(target_arch = "aarch64")] + neon: crate::arch::aarch64::neon::packedpair::Finder, +} + +/// A two-way substring searcher with a prefilter. +#[derive(Copy, Clone, Debug)] +struct TwoWayWithPrefilter { + finder: twoway::Finder, + prestrat: Prefilter, +} + +/// The type of a substring search function. +/// +/// # Safety +/// +/// When using a function of this type, callers must ensure that the correct +/// function is paired with the value populated in `SearcherKind` union. +type SearcherKindFn = unsafe fn( + searcher: &Searcher, + prestate: &mut PrefilterState, + haystack: &[u8], + needle: &[u8], +) -> Option; + +/// Reads from the `empty` field of `SearcherKind` to handle the case of +/// searching for the empty needle. Works on all platforms. +/// +/// # Safety +/// +/// Callers must ensure that the `searcher.kind.empty` union field is set. +unsafe fn searcher_kind_empty( + _searcher: &Searcher, + _prestate: &mut PrefilterState, + _haystack: &[u8], + _needle: &[u8], +) -> Option { + Some(0) +} + +/// Reads from the `one_byte` field of `SearcherKind` to handle the case of +/// searching for a single byte needle. Works on all platforms. +/// +/// # Safety +/// +/// Callers must ensure that the `searcher.kind.one_byte` union field is set. +unsafe fn searcher_kind_one_byte( + searcher: &Searcher, + _prestate: &mut PrefilterState, + haystack: &[u8], + _needle: &[u8], +) -> Option { + let needle = searcher.kind.one_byte; + crate::memchr(needle, haystack) +} + +/// Reads from the `two_way` field of `SearcherKind` to handle the case of +/// searching for an arbitrary needle without prefilter acceleration. Works on +/// all platforms. +/// +/// # Safety +/// +/// Callers must ensure that the `searcher.kind.two_way` union field is set. +unsafe fn searcher_kind_two_way( + searcher: &Searcher, + _prestate: &mut PrefilterState, + haystack: &[u8], + needle: &[u8], +) -> Option { + if rabinkarp::is_fast(haystack, needle) { + searcher.rabinkarp.find(haystack, needle) + } else { + searcher.kind.two_way.find(haystack, needle) + } +} + +/// Reads from the `two_way_with_prefilter` field of `SearcherKind` to handle +/// the case of searching for an arbitrary needle with prefilter acceleration. +/// Works on all platforms. +/// +/// # Safety +/// +/// Callers must ensure that the `searcher.kind.two_way_with_prefilter` union +/// field is set. +unsafe fn searcher_kind_two_way_with_prefilter( + searcher: &Searcher, + prestate: &mut PrefilterState, + haystack: &[u8], + needle: &[u8], +) -> Option { + if rabinkarp::is_fast(haystack, needle) { + searcher.rabinkarp.find(haystack, needle) + } else { + let TwoWayWithPrefilter { ref finder, ref prestrat } = + searcher.kind.two_way_with_prefilter; + let pre = Pre { prestate, prestrat }; + finder.find_with_prefilter(Some(pre), haystack, needle) + } +} + +/// Reads from the `sse2` field of `SearcherKind` to execute the x86_64 SSE2 +/// vectorized substring search implementation. +/// +/// # Safety +/// +/// Callers must ensure that the `searcher.kind.sse2` union field is set. +#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] +unsafe fn searcher_kind_sse2( + searcher: &Searcher, + _prestate: &mut PrefilterState, + haystack: &[u8], + needle: &[u8], +) -> Option { + let finder = &searcher.kind.sse2; + if haystack.len() < finder.min_haystack_len() { + searcher.rabinkarp.find(haystack, needle) + } else { + finder.find(haystack, needle) + } +} + +/// Reads from the `avx2` field of `SearcherKind` to execute the x86_64 AVX2 +/// vectorized substring search implementation. +/// +/// # Safety +/// +/// Callers must ensure that the `searcher.kind.avx2` union field is set. +#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] +unsafe fn searcher_kind_avx2( + searcher: &Searcher, + _prestate: &mut PrefilterState, + haystack: &[u8], + needle: &[u8], +) -> Option { + let finder = &searcher.kind.avx2; + if haystack.len() < finder.min_haystack_len() { + searcher.rabinkarp.find(haystack, needle) + } else { + finder.find(haystack, needle) + } +} + +/// Reads from the `simd128` field of `SearcherKind` to execute the wasm32 +/// simd128 vectorized substring search implementation. +/// +/// # Safety +/// +/// Callers must ensure that the `searcher.kind.simd128` union field is set. +#[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] +unsafe fn searcher_kind_simd128( + searcher: &Searcher, + _prestate: &mut PrefilterState, + haystack: &[u8], + needle: &[u8], +) -> Option { + let finder = &searcher.kind.simd128; + if haystack.len() < finder.min_haystack_len() { + searcher.rabinkarp.find(haystack, needle) + } else { + finder.find(haystack, needle) + } +} + +/// Reads from the `neon` field of `SearcherKind` to execute the aarch64 neon +/// vectorized substring search implementation. +/// +/// # Safety +/// +/// Callers must ensure that the `searcher.kind.neon` union field is set. +#[cfg(target_arch = "aarch64")] +unsafe fn searcher_kind_neon( + searcher: &Searcher, + _prestate: &mut PrefilterState, + haystack: &[u8], + needle: &[u8], +) -> Option { + let finder = &searcher.kind.neon; + if haystack.len() < finder.min_haystack_len() { + searcher.rabinkarp.find(haystack, needle) + } else { + finder.find(haystack, needle) + } +} + +/// A reverse substring searcher. +#[derive(Clone, Debug)] +pub(crate) struct SearcherRev { + kind: SearcherRevKind, + rabinkarp: rabinkarp::FinderRev, +} + +/// The kind of the reverse searcher. +/// +/// For the reverse case, we don't do any SIMD acceleration or prefilters. +/// There is no specific technical reason why we don't, but rather don't do it +/// because it's not clear it's worth the extra code to do so. If you have a +/// use case for it, please file an issue. +/// +/// We also don't do the union trick as we do with the forward case and +/// prefilters. Basically for the same reason we don't have prefilters or +/// vector algorithms for reverse searching: it's not clear it's worth doing. +/// Please file an issue if you have a compelling use case for fast reverse +/// substring search. +#[derive(Clone, Debug)] +enum SearcherRevKind { + Empty, + OneByte { needle: u8 }, + TwoWay { finder: twoway::FinderRev }, +} + +impl SearcherRev { + /// Creates a new searcher for finding occurrences of the given needle in + /// reverse. That is, it reports the last (instead of the first) occurrence + /// of a needle in a haystack. + #[inline] + pub(crate) fn new(needle: &[u8]) -> SearcherRev { + let kind = if needle.len() <= 1 { + if needle.is_empty() { + trace!("building empty reverse substring searcher"); + SearcherRevKind::Empty + } else { + trace!("building one-byte reverse substring searcher"); + debug_assert_eq!(1, needle.len()); + SearcherRevKind::OneByte { needle: needle[0] } + } + } else { + trace!("building scalar two-way reverse substring searcher"); + let finder = twoway::FinderRev::new(needle); + SearcherRevKind::TwoWay { finder } + }; + let rabinkarp = rabinkarp::FinderRev::new(needle); + SearcherRev { kind, rabinkarp } + } + + /// Searches the given haystack for the last occurrence of the given + /// needle. The needle given should be the same as the needle that this + /// finder was initialized with. + #[inline] + pub(crate) fn rfind( + &self, + haystack: &[u8], + needle: &[u8], + ) -> Option { + if haystack.len() < needle.len() { + return None; + } + match self.kind { + SearcherRevKind::Empty => Some(haystack.len()), + SearcherRevKind::OneByte { needle } => { + crate::memrchr(needle, haystack) + } + SearcherRevKind::TwoWay { ref finder } => { + if rabinkarp::is_fast(haystack, needle) { + self.rabinkarp.rfind(haystack, needle) + } else { + finder.rfind(haystack, needle) + } + } + } + } +} + +/// Prefilter controls whether heuristics are used to accelerate searching. +/// +/// A prefilter refers to the idea of detecting candidate matches very quickly, +/// and then confirming whether those candidates are full matches. This +/// idea can be quite effective since it's often the case that looking for +/// candidates can be a lot faster than running a complete substring search +/// over the entire input. Namely, looking for candidates can be done with +/// extremely fast vectorized code. +/// +/// The downside of a prefilter is that it assumes false positives (which are +/// candidates generated by a prefilter that aren't matches) are somewhat rare +/// relative to the frequency of full matches. That is, if a lot of false +/// positives are generated, then it's possible for search time to be worse +/// than if the prefilter wasn't enabled in the first place. +/// +/// Another downside of a prefilter is that it can result in highly variable +/// performance, where some cases are extraordinarily fast and others aren't. +/// Typically, variable performance isn't a problem, but it may be for your use +/// case. +/// +/// The use of prefilters in this implementation does use a heuristic to detect +/// when a prefilter might not be carrying its weight, and will dynamically +/// disable its use. Nevertheless, this configuration option gives callers +/// the ability to disable prefilters if you have knowledge that they won't be +/// useful. +#[derive(Clone, Copy, Debug)] +#[non_exhaustive] +pub enum PrefilterConfig { + /// Never used a prefilter in substring search. + None, + /// Automatically detect whether a heuristic prefilter should be used. If + /// it is used, then heuristics will be used to dynamically disable the + /// prefilter if it is believed to not be carrying its weight. + Auto, +} + +impl Default for PrefilterConfig { + fn default() -> PrefilterConfig { + PrefilterConfig::Auto + } +} + +impl PrefilterConfig { + /// Returns true when this prefilter is set to the `None` variant. + fn is_none(&self) -> bool { + matches!(*self, PrefilterConfig::None) + } +} + +/// The implementation of a prefilter. +/// +/// This type encapsulates dispatch to one of several possible choices for a +/// prefilter. Generally speaking, all prefilters have the same approximate +/// algorithm: they choose a couple of bytes from the needle that are believed +/// to be rare, use a fast vector algorithm to look for those bytes and return +/// positions as candidates for some substring search algorithm (currently only +/// Two-Way) to confirm as a match or not. +/// +/// The differences between the algorithms are actually at the vector +/// implementation level. Namely, we need different routines based on both +/// which target architecture we're on and what CPU features are supported. +/// +/// The straight-forwardly obvious approach here is to use an enum, and make +/// `Prefilter::find` do case analysis to determine which algorithm was +/// selected and invoke it. However, I've observed that this leads to poor +/// codegen in some cases, especially in latency sensitive benchmarks. That is, +/// this approach comes with overhead that I wasn't able to eliminate. +/// +/// The second obvious approach is to use dynamic dispatch with traits. Doing +/// that in this context where `Prefilter` owns the selection generally +/// requires heap allocation, and this code is designed to run in core-only +/// environments. +/// +/// So we settle on using a union (that's `PrefilterKind`) and a function +/// pointer (that's `PrefilterKindFn`). We select the right function pointer +/// based on which field in the union we set, and that function in turn +/// knows which field of the union to access. The downside of this approach +/// is that it forces us to think about safety, but the upside is that +/// there are some nice latency improvements to benchmarks. (Especially the +/// `memmem/sliceslice/short` benchmark.) +/// +/// In cases where we've selected a vector algorithm and the haystack given +/// is too short, we fallback to the scalar version of `memchr` on the +/// `rarest_byte`. (The scalar version of `memchr` is still better than a naive +/// byte-at-a-time loop because it will read in `usize`-sized chunks at a +/// time.) +#[derive(Clone, Copy)] +struct Prefilter { + call: PrefilterKindFn, + kind: PrefilterKind, + rarest_byte: u8, + rarest_offset: u8, +} + +impl Prefilter { + /// Return a "fallback" prefilter, but only if it is believed to be + /// effective. + #[inline] + fn fallback( + ranker: R, + pair: Pair, + needle: &[u8], + ) -> Option { + /// The maximum frequency rank permitted for the fallback prefilter. + /// If the rarest byte in the needle has a frequency rank above this + /// value, then no prefilter is used if the fallback prefilter would + /// otherwise be selected. + const MAX_FALLBACK_RANK: u8 = 250; + + trace!("building fallback prefilter"); + let rarest_offset = pair.index1(); + let rarest_byte = needle[usize::from(rarest_offset)]; + let rarest_rank = ranker.rank(rarest_byte); + if rarest_rank > MAX_FALLBACK_RANK { + None + } else { + let finder = crate::arch::all::packedpair::Finder::with_pair( + needle, + pair.clone(), + )?; + let call = prefilter_kind_fallback; + let kind = PrefilterKind { fallback: finder }; + Some(Prefilter { call, kind, rarest_byte, rarest_offset }) + } + } + + /// Return a prefilter using a x86_64 SSE2 vector algorithm. + #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] + #[inline] + fn sse2(finder: sse2::Finder, needle: &[u8]) -> Prefilter { + trace!("building x86_64 SSE2 prefilter"); + let rarest_offset = finder.pair().index1(); + let rarest_byte = needle[usize::from(rarest_offset)]; + Prefilter { + call: prefilter_kind_sse2, + kind: PrefilterKind { sse2: finder }, + rarest_byte, + rarest_offset, + } + } + + /// Return a prefilter using a x86_64 AVX2 vector algorithm. + #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] + #[inline] + fn avx2(finder: avx2::Finder, needle: &[u8]) -> Prefilter { + trace!("building x86_64 AVX2 prefilter"); + let rarest_offset = finder.pair().index1(); + let rarest_byte = needle[usize::from(rarest_offset)]; + Prefilter { + call: prefilter_kind_avx2, + kind: PrefilterKind { avx2: finder }, + rarest_byte, + rarest_offset, + } + } + + /// Return a prefilter using a wasm32 simd128 vector algorithm. + #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] + #[inline] + fn simd128(finder: simd128::Finder, needle: &[u8]) -> Prefilter { + trace!("building wasm32 simd128 prefilter"); + let rarest_offset = finder.pair().index1(); + let rarest_byte = needle[usize::from(rarest_offset)]; + Prefilter { + call: prefilter_kind_simd128, + kind: PrefilterKind { simd128: finder }, + rarest_byte, + rarest_offset, + } + } + + /// Return a prefilter using a aarch64 neon vector algorithm. + #[cfg(target_arch = "aarch64")] + #[inline] + fn neon(finder: neon::Finder, needle: &[u8]) -> Prefilter { + trace!("building aarch64 neon prefilter"); + let rarest_offset = finder.pair().index1(); + let rarest_byte = needle[usize::from(rarest_offset)]; + Prefilter { + call: prefilter_kind_neon, + kind: PrefilterKind { neon: finder }, + rarest_byte, + rarest_offset, + } + } + + /// Return a *candidate* position for a match. + /// + /// When this returns an offset, it implies that a match could begin at + /// that offset, but it may not. That is, it is possible for a false + /// positive to be returned. + /// + /// When `None` is returned, then it is guaranteed that there are no + /// matches for the needle in the given haystack. That is, it is impossible + /// for a false negative to be returned. + /// + /// The purpose of this routine is to look for candidate matching positions + /// as quickly as possible before running a (likely) slower confirmation + /// step. + #[inline] + fn find(&self, haystack: &[u8]) -> Option { + // SAFETY: By construction, we've ensured that the function in + // `self.call` is properly paired with the union used in `self.kind`. + unsafe { (self.call)(self, haystack) } + } + + /// A "simple" prefilter that just looks for the occurrence of the rarest + /// byte from the needle. This is generally only used for very small + /// haystacks. + #[inline] + fn find_simple(&self, haystack: &[u8]) -> Option { + // We don't use crate::memchr here because the haystack should be small + // enough that memchr won't be able to use vector routines anyway. So + // we just skip straight to the fallback implementation which is likely + // faster. (A byte-at-a-time loop is only used when the haystack is + // smaller than `size_of::()`.) + crate::arch::all::memchr::One::new(self.rarest_byte) + .find(haystack) + .map(|i| i.saturating_sub(usize::from(self.rarest_offset))) + } +} + +impl core::fmt::Debug for Prefilter { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_struct("Prefilter") + .field("call", &"") + .field("kind", &"") + .field("rarest_byte", &self.rarest_byte) + .field("rarest_offset", &self.rarest_offset) + .finish() + } +} + +/// A union indicating one of several possible prefilters that are in active +/// use. +/// +/// This union should only be read by one of the functions prefixed with +/// `prefilter_kind_`. Namely, the correct function is meant to be paired with +/// the union by the caller, such that the function always reads from the +/// designated union field. +#[derive(Clone, Copy)] +union PrefilterKind { + fallback: crate::arch::all::packedpair::Finder, + #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] + sse2: crate::arch::x86_64::sse2::packedpair::Finder, + #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] + avx2: crate::arch::x86_64::avx2::packedpair::Finder, + #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] + simd128: crate::arch::wasm32::simd128::packedpair::Finder, + #[cfg(target_arch = "aarch64")] + neon: crate::arch::aarch64::neon::packedpair::Finder, +} + +/// The type of a prefilter function. +/// +/// # Safety +/// +/// When using a function of this type, callers must ensure that the correct +/// function is paired with the value populated in `PrefilterKind` union. +type PrefilterKindFn = + unsafe fn(strat: &Prefilter, haystack: &[u8]) -> Option; + +/// Reads from the `fallback` field of `PrefilterKind` to execute the fallback +/// prefilter. Works on all platforms. +/// +/// # Safety +/// +/// Callers must ensure that the `strat.kind.fallback` union field is set. +unsafe fn prefilter_kind_fallback( + strat: &Prefilter, + haystack: &[u8], +) -> Option { + strat.kind.fallback.find_prefilter(haystack) +} + +/// Reads from the `sse2` field of `PrefilterKind` to execute the x86_64 SSE2 +/// prefilter. +/// +/// # Safety +/// +/// Callers must ensure that the `strat.kind.sse2` union field is set. +#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] +unsafe fn prefilter_kind_sse2( + strat: &Prefilter, + haystack: &[u8], +) -> Option { + let finder = &strat.kind.sse2; + if haystack.len() < finder.min_haystack_len() { + strat.find_simple(haystack) + } else { + finder.find_prefilter(haystack) + } +} + +/// Reads from the `avx2` field of `PrefilterKind` to execute the x86_64 AVX2 +/// prefilter. +/// +/// # Safety +/// +/// Callers must ensure that the `strat.kind.avx2` union field is set. +#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] +unsafe fn prefilter_kind_avx2( + strat: &Prefilter, + haystack: &[u8], +) -> Option { + let finder = &strat.kind.avx2; + if haystack.len() < finder.min_haystack_len() { + strat.find_simple(haystack) + } else { + finder.find_prefilter(haystack) + } +} + +/// Reads from the `simd128` field of `PrefilterKind` to execute the wasm32 +/// simd128 prefilter. +/// +/// # Safety +/// +/// Callers must ensure that the `strat.kind.simd128` union field is set. +#[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] +unsafe fn prefilter_kind_simd128( + strat: &Prefilter, + haystack: &[u8], +) -> Option { + let finder = &strat.kind.simd128; + if haystack.len() < finder.min_haystack_len() { + strat.find_simple(haystack) + } else { + finder.find_prefilter(haystack) + } +} + +/// Reads from the `neon` field of `PrefilterKind` to execute the aarch64 neon +/// prefilter. +/// +/// # Safety +/// +/// Callers must ensure that the `strat.kind.neon` union field is set. +#[cfg(target_arch = "aarch64")] +unsafe fn prefilter_kind_neon( + strat: &Prefilter, + haystack: &[u8], +) -> Option { + let finder = &strat.kind.neon; + if haystack.len() < finder.min_haystack_len() { + strat.find_simple(haystack) + } else { + finder.find_prefilter(haystack) + } +} + +/// PrefilterState tracks state associated with the effectiveness of a +/// prefilter. It is used to track how many bytes, on average, are skipped by +/// the prefilter. If this average dips below a certain threshold over time, +/// then the state renders the prefilter inert and stops using it. +/// +/// A prefilter state should be created for each search. (Where creating an +/// iterator is treated as a single search.) A prefilter state should only be +/// created from a `Freqy`. e.g., An inert `Freqy` will produce an inert +/// `PrefilterState`. +#[derive(Clone, Copy, Debug)] +pub(crate) struct PrefilterState { + /// The number of skips that has been executed. This is always 1 greater + /// than the actual number of skips. The special sentinel value of 0 + /// indicates that the prefilter is inert. This is useful to avoid + /// additional checks to determine whether the prefilter is still + /// "effective." Once a prefilter becomes inert, it should no longer be + /// used (according to our heuristics). + skips: u32, + /// The total number of bytes that have been skipped. + skipped: u32, +} + +impl PrefilterState { + /// The minimum number of skip attempts to try before considering whether + /// a prefilter is effective or not. + const MIN_SKIPS: u32 = 50; + + /// The minimum amount of bytes that skipping must average. + /// + /// This value was chosen based on varying it and checking + /// the microbenchmarks. In particular, this can impact the + /// pathological/repeated-{huge,small} benchmarks quite a bit if it's set + /// too low. + const MIN_SKIP_BYTES: u32 = 8; + + /// Create a fresh prefilter state. + #[inline] + pub(crate) fn new() -> PrefilterState { + PrefilterState { skips: 1, skipped: 0 } + } + + /// Update this state with the number of bytes skipped on the last + /// invocation of the prefilter. + #[inline] + fn update(&mut self, skipped: usize) { + self.skips = self.skips.saturating_add(1); + // We need to do this dance since it's technically possible for + // `skipped` to overflow a `u32`. (And we use a `u32` to reduce the + // size of a prefilter state.) + self.skipped = match u32::try_from(skipped) { + Err(_) => core::u32::MAX, + Ok(skipped) => self.skipped.saturating_add(skipped), + }; + } + + /// Return true if and only if this state indicates that a prefilter is + /// still effective. + #[inline] + fn is_effective(&mut self) -> bool { + if self.is_inert() { + return false; + } + if self.skips() < PrefilterState::MIN_SKIPS { + return true; + } + if self.skipped >= PrefilterState::MIN_SKIP_BYTES * self.skips() { + return true; + } + + // We're inert. + self.skips = 0; + false + } + + /// Returns true if the prefilter this state represents should no longer + /// be used. + #[inline] + fn is_inert(&self) -> bool { + self.skips == 0 + } + + /// Returns the total number of times the prefilter has been used. + #[inline] + fn skips(&self) -> u32 { + // Remember, `0` is a sentinel value indicating inertness, so we + // always need to subtract `1` to get our actual number of skips. + self.skips.saturating_sub(1) + } +} + +/// A combination of prefilter effectiveness state and the prefilter itself. +#[derive(Debug)] +pub(crate) struct Pre<'a> { + /// State that tracks the effectiveness of a prefilter. + prestate: &'a mut PrefilterState, + /// The actual prefilter. + prestrat: &'a Prefilter, +} + +impl<'a> Pre<'a> { + /// Call this prefilter on the given haystack with the given needle. + #[inline] + pub(crate) fn find(&mut self, haystack: &[u8]) -> Option { + let result = self.prestrat.find(haystack); + self.prestate.update(result.unwrap_or(haystack.len())); + result + } + + /// Return true if and only if this prefilter should be used. + #[inline] + pub(crate) fn is_effective(&mut self) -> bool { + self.prestate.is_effective() + } +} + +/// Returns true if the needle has the right characteristics for a vector +/// algorithm to handle the entirety of substring search. +/// +/// Vector algorithms can be used for prefilters for other substring search +/// algorithms (like Two-Way), but they can also be used for substring search +/// on their own. When used for substring search, vector algorithms will +/// quickly identify candidate match positions (just like in the prefilter +/// case), but instead of returning the candidate position they will try to +/// confirm the match themselves. Confirmation happens via `memcmp`. This +/// works well for short needles, but can break down when many false candidate +/// positions are generated for large needles. Thus, we only permit vector +/// algorithms to own substring search when the needle is of a certain length. +#[inline] +fn do_packed_search(needle: &[u8]) -> bool { + /// The minimum length of a needle required for this algorithm. The minimum + /// is 2 since a length of 1 should just use memchr and a length of 0 isn't + /// a case handled by this searcher. + const MIN_LEN: usize = 2; + + /// The maximum length of a needle required for this algorithm. + /// + /// In reality, there is no hard max here. The code below can handle any + /// length needle. (Perhaps that suggests there are missing optimizations.) + /// Instead, this is a heuristic and a bound guaranteeing our linear time + /// complexity. + /// + /// It is a heuristic because when a candidate match is found, memcmp is + /// run. For very large needles with lots of false positives, memcmp can + /// make the code run quite slow. + /// + /// It is a bound because the worst case behavior with memcmp is + /// multiplicative in the size of the needle and haystack, and we want + /// to keep that additive. This bound ensures we still meet that bound + /// theoretically, since it's just a constant. We aren't acting in bad + /// faith here, memcmp on tiny needles is so fast that even in pathological + /// cases (see pathological vector benchmarks), this is still just as fast + /// or faster in practice. + /// + /// This specific number was chosen by tweaking a bit and running + /// benchmarks. The rare-medium-needle, for example, gets about 5% faster + /// by using this algorithm instead of a prefilter-accelerated Two-Way. + /// There's also a theoretical desire to keep this number reasonably + /// low, to mitigate the impact of pathological cases. I did try 64, and + /// some benchmarks got a little better, and others (particularly the + /// pathological ones), got a lot worse. So... 32 it is? + const MAX_LEN: usize = 32; + MIN_LEN <= needle.len() && needle.len() <= MAX_LEN +} diff --git a/vendor/memchr/src/tests/memchr/mod.rs b/vendor/memchr/src/tests/memchr/mod.rs new file mode 100644 index 00000000000000..0564ad4fbb8a19 --- /dev/null +++ b/vendor/memchr/src/tests/memchr/mod.rs @@ -0,0 +1,307 @@ +use alloc::{ + string::{String, ToString}, + vec, + vec::Vec, +}; + +use crate::ext::Byte; + +pub(crate) mod naive; +#[macro_use] +pub(crate) mod prop; + +const SEEDS: &'static [Seed] = &[ + Seed { haystack: "a", needles: &[b'a'], positions: &[0] }, + Seed { haystack: "aa", needles: &[b'a'], positions: &[0, 1] }, + Seed { haystack: "aaa", needles: &[b'a'], positions: &[0, 1, 2] }, + Seed { haystack: "", needles: &[b'a'], positions: &[] }, + Seed { haystack: "z", needles: &[b'a'], positions: &[] }, + Seed { haystack: "zz", needles: &[b'a'], positions: &[] }, + Seed { haystack: "zza", needles: &[b'a'], positions: &[2] }, + Seed { haystack: "zaza", needles: &[b'a'], positions: &[1, 3] }, + Seed { haystack: "zzza", needles: &[b'a'], positions: &[3] }, + Seed { haystack: "\x00a", needles: &[b'a'], positions: &[1] }, + Seed { haystack: "\x00", needles: &[b'\x00'], positions: &[0] }, + Seed { haystack: "\x00\x00", needles: &[b'\x00'], positions: &[0, 1] }, + Seed { haystack: "\x00a\x00", needles: &[b'\x00'], positions: &[0, 2] }, + Seed { haystack: "zzzzzzzzzzzzzzzza", needles: &[b'a'], positions: &[16] }, + Seed { + haystack: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzza", + needles: &[b'a'], + positions: &[32], + }, + // two needles (applied to memchr2 + memchr3) + Seed { haystack: "az", needles: &[b'a', b'z'], positions: &[0, 1] }, + Seed { haystack: "az", needles: &[b'a', b'z'], positions: &[0, 1] }, + Seed { haystack: "az", needles: &[b'x', b'y'], positions: &[] }, + Seed { haystack: "az", needles: &[b'a', b'y'], positions: &[0] }, + Seed { haystack: "az", needles: &[b'x', b'z'], positions: &[1] }, + Seed { haystack: "yyyyaz", needles: &[b'a', b'z'], positions: &[4, 5] }, + Seed { haystack: "yyyyaz", needles: &[b'z', b'a'], positions: &[4, 5] }, + // three needles (applied to memchr3) + Seed { + haystack: "xyz", + needles: &[b'x', b'y', b'z'], + positions: &[0, 1, 2], + }, + Seed { + haystack: "zxy", + needles: &[b'x', b'y', b'z'], + positions: &[0, 1, 2], + }, + Seed { haystack: "zxy", needles: &[b'x', b'a', b'z'], positions: &[0, 1] }, + Seed { haystack: "zxy", needles: &[b't', b'a', b'z'], positions: &[0] }, + Seed { haystack: "yxz", needles: &[b't', b'a', b'z'], positions: &[2] }, +]; + +/// Runs a host of substring search tests. +/// +/// This has support for "partial" substring search implementations only work +/// for a subset of needles/haystacks. For example, the "packed pair" substring +/// search implementation only works for haystacks of some minimum length based +/// of the pair of bytes selected and the size of the vector used. +pub(crate) struct Runner { + needle_len: usize, +} + +impl Runner { + /// Create a new test runner for forward and reverse byte search + /// implementations. + /// + /// The `needle_len` given must be at most `3` and at least `1`. It + /// corresponds to the number of needle bytes to search for. + pub(crate) fn new(needle_len: usize) -> Runner { + assert!(needle_len >= 1, "needle_len must be at least 1"); + assert!(needle_len <= 3, "needle_len must be at most 3"); + Runner { needle_len } + } + + /// Run all tests. This panics on the first failure. + /// + /// If the implementation being tested returns `None` for a particular + /// haystack/needle combination, then that test is skipped. + pub(crate) fn forward_iter(self, mut test: F) + where + F: FnMut(&[u8], &[u8]) -> Option> + 'static, + { + for seed in SEEDS.iter() { + if seed.needles.len() > self.needle_len { + continue; + } + for t in seed.generate() { + let results = match test(t.haystack.as_bytes(), &t.needles) { + None => continue, + Some(results) => results, + }; + assert_eq!( + t.expected, + results, + "needles: {:?}, haystack: {:?}", + t.needles + .iter() + .map(|&b| b.to_char()) + .collect::>(), + t.haystack, + ); + } + } + } + + /// Run all tests in the reverse direction. This panics on the first + /// failure. + /// + /// If the implementation being tested returns `None` for a particular + /// haystack/needle combination, then that test is skipped. + pub(crate) fn reverse_iter(self, mut test: F) + where + F: FnMut(&[u8], &[u8]) -> Option> + 'static, + { + for seed in SEEDS.iter() { + if seed.needles.len() > self.needle_len { + continue; + } + for t in seed.generate() { + let mut results = match test(t.haystack.as_bytes(), &t.needles) + { + None => continue, + Some(results) => results, + }; + results.reverse(); + assert_eq!( + t.expected, + results, + "needles: {:?}, haystack: {:?}", + t.needles + .iter() + .map(|&b| b.to_char()) + .collect::>(), + t.haystack, + ); + } + } + } + + /// Run all tests as counting tests. This panics on the first failure. + /// + /// That is, this only checks that the number of matches is correct and + /// not whether the offsets of each match are. + pub(crate) fn count_iter(self, mut test: F) + where + F: FnMut(&[u8], &[u8]) -> Option + 'static, + { + for seed in SEEDS.iter() { + if seed.needles.len() > self.needle_len { + continue; + } + for t in seed.generate() { + let got = match test(t.haystack.as_bytes(), &t.needles) { + None => continue, + Some(got) => got, + }; + assert_eq!( + t.expected.len(), + got, + "needles: {:?}, haystack: {:?}", + t.needles + .iter() + .map(|&b| b.to_char()) + .collect::>(), + t.haystack, + ); + } + } + } + + /// Like `Runner::forward`, but for a function that returns only the next + /// match and not all matches. + /// + /// If the function returns `None`, then it is skipped. + pub(crate) fn forward_oneshot(self, mut test: F) + where + F: FnMut(&[u8], &[u8]) -> Option> + 'static, + { + self.forward_iter(move |haystack, needles| { + let mut start = 0; + let mut results = vec![]; + while let Some(i) = test(&haystack[start..], needles)? { + results.push(start + i); + start += i + 1; + } + Some(results) + }) + } + + /// Like `Runner::reverse`, but for a function that returns only the last + /// match and not all matches. + /// + /// If the function returns `None`, then it is skipped. + pub(crate) fn reverse_oneshot(self, mut test: F) + where + F: FnMut(&[u8], &[u8]) -> Option> + 'static, + { + self.reverse_iter(move |haystack, needles| { + let mut end = haystack.len(); + let mut results = vec![]; + while let Some(i) = test(&haystack[..end], needles)? { + results.push(i); + end = i; + } + Some(results) + }) + } +} + +/// A single test for memr?chr{,2,3}. +#[derive(Clone, Debug)] +struct Test { + /// The string to search in. + haystack: String, + /// The needles to look for. + needles: Vec, + /// The offsets that are expected to be found for all needles in the + /// forward direction. + expected: Vec, +} + +impl Test { + fn new(seed: &Seed) -> Test { + Test { + haystack: seed.haystack.to_string(), + needles: seed.needles.to_vec(), + expected: seed.positions.to_vec(), + } + } +} + +/// Data that can be expanded into many memchr tests by padding out the corpus. +#[derive(Clone, Debug)] +struct Seed { + /// The thing to search. We use `&str` instead of `&[u8]` because they + /// are nicer to write in tests, and we don't miss much since memchr + /// doesn't care about UTF-8. + /// + /// Corpora cannot contain either '%' or '#'. We use these bytes when + /// expanding test cases into many test cases, and we assume they are not + /// used. If they are used, `memchr_tests` will panic. + haystack: &'static str, + /// The needles to search for. This is intended to be an alternation of + /// needles. The number of needles may cause this test to be skipped for + /// some memchr variants. For example, a test with 2 needles cannot be used + /// to test `memchr`, but can be used to test `memchr2` and `memchr3`. + /// However, a test with only 1 needle can be used to test all of `memchr`, + /// `memchr2` and `memchr3`. We achieve this by filling in the needles with + /// bytes that we never used in the corpus (such as '#'). + needles: &'static [u8], + /// The positions expected to match for all of the needles. + positions: &'static [usize], +} + +impl Seed { + /// Controls how much we expand the haystack on either side for each test. + /// We lower this on Miri because otherwise running the tests would take + /// forever. + const EXPAND_LEN: usize = { + #[cfg(not(miri))] + { + 515 + } + #[cfg(miri)] + { + 6 + } + }; + + /// Expand this test into many variations of the same test. + /// + /// In particular, this will generate more tests with larger corpus sizes. + /// The expected positions are updated to maintain the integrity of the + /// test. + /// + /// This is important in testing a memchr implementation, because there are + /// often different cases depending on the length of the corpus. + /// + /// Note that we extend the corpus by adding `%` bytes, which we + /// don't otherwise use as a needle. + fn generate(&self) -> impl Iterator { + let mut more = vec![]; + + // Add bytes to the start of the corpus. + for i in 0..Seed::EXPAND_LEN { + let mut t = Test::new(self); + let mut new: String = core::iter::repeat('%').take(i).collect(); + new.push_str(&t.haystack); + t.haystack = new; + t.expected = t.expected.into_iter().map(|p| p + i).collect(); + more.push(t); + } + // Add bytes to the end of the corpus. + for i in 1..Seed::EXPAND_LEN { + let mut t = Test::new(self); + let padding: String = core::iter::repeat('%').take(i).collect(); + t.haystack.push_str(&padding); + more.push(t); + } + + more.into_iter() + } +} diff --git a/vendor/memchr/src/tests/memchr/naive.rs b/vendor/memchr/src/tests/memchr/naive.rs new file mode 100644 index 00000000000000..6ebcdaea72a7fd --- /dev/null +++ b/vendor/memchr/src/tests/memchr/naive.rs @@ -0,0 +1,33 @@ +pub(crate) fn memchr(n1: u8, haystack: &[u8]) -> Option { + haystack.iter().position(|&b| b == n1) +} + +pub(crate) fn memchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option { + haystack.iter().position(|&b| b == n1 || b == n2) +} + +pub(crate) fn memchr3( + n1: u8, + n2: u8, + n3: u8, + haystack: &[u8], +) -> Option { + haystack.iter().position(|&b| b == n1 || b == n2 || b == n3) +} + +pub(crate) fn memrchr(n1: u8, haystack: &[u8]) -> Option { + haystack.iter().rposition(|&b| b == n1) +} + +pub(crate) fn memrchr2(n1: u8, n2: u8, haystack: &[u8]) -> Option { + haystack.iter().rposition(|&b| b == n1 || b == n2) +} + +pub(crate) fn memrchr3( + n1: u8, + n2: u8, + n3: u8, + haystack: &[u8], +) -> Option { + haystack.iter().rposition(|&b| b == n1 || b == n2 || b == n3) +} diff --git a/vendor/memchr/src/tests/memchr/prop.rs b/vendor/memchr/src/tests/memchr/prop.rs new file mode 100644 index 00000000000000..949ef1f15abc56 --- /dev/null +++ b/vendor/memchr/src/tests/memchr/prop.rs @@ -0,0 +1,323 @@ +/// Defines a host of quickcheck tests for the given memchr searcher. +#[cfg(miri)] +#[macro_export] +macro_rules! define_memchr_quickcheck { + ($($tt:tt)*) => {}; +} + +/// Defines a host of quickcheck tests for the given memchr searcher. +#[cfg(not(miri))] +#[macro_export] +macro_rules! define_memchr_quickcheck { + ($mod:ident) => { + define_memchr_quickcheck!($mod, new); + }; + ($mod:ident, $cons:ident) => { + use alloc::vec::Vec; + + use quickcheck::TestResult; + + use crate::tests::memchr::{ + naive, + prop::{double_ended_take, naive1_iter, naive2_iter, naive3_iter}, + }; + + quickcheck::quickcheck! { + fn qc_memchr_matches_naive(n1: u8, corpus: Vec) -> TestResult { + let expected = naive::memchr(n1, &corpus); + let got = match $mod::One::$cons(n1) { + None => return TestResult::discard(), + Some(f) => f.find(&corpus), + }; + TestResult::from_bool(expected == got) + } + + fn qc_memrchr_matches_naive(n1: u8, corpus: Vec) -> TestResult { + let expected = naive::memrchr(n1, &corpus); + let got = match $mod::One::$cons(n1) { + None => return TestResult::discard(), + Some(f) => f.rfind(&corpus), + }; + TestResult::from_bool(expected == got) + } + + fn qc_memchr2_matches_naive(n1: u8, n2: u8, corpus: Vec) -> TestResult { + let expected = naive::memchr2(n1, n2, &corpus); + let got = match $mod::Two::$cons(n1, n2) { + None => return TestResult::discard(), + Some(f) => f.find(&corpus), + }; + TestResult::from_bool(expected == got) + } + + fn qc_memrchr2_matches_naive(n1: u8, n2: u8, corpus: Vec) -> TestResult { + let expected = naive::memrchr2(n1, n2, &corpus); + let got = match $mod::Two::$cons(n1, n2) { + None => return TestResult::discard(), + Some(f) => f.rfind(&corpus), + }; + TestResult::from_bool(expected == got) + } + + fn qc_memchr3_matches_naive( + n1: u8, n2: u8, n3: u8, + corpus: Vec + ) -> TestResult { + let expected = naive::memchr3(n1, n2, n3, &corpus); + let got = match $mod::Three::$cons(n1, n2, n3) { + None => return TestResult::discard(), + Some(f) => f.find(&corpus), + }; + TestResult::from_bool(expected == got) + } + + fn qc_memrchr3_matches_naive( + n1: u8, n2: u8, n3: u8, + corpus: Vec + ) -> TestResult { + let expected = naive::memrchr3(n1, n2, n3, &corpus); + let got = match $mod::Three::$cons(n1, n2, n3) { + None => return TestResult::discard(), + Some(f) => f.rfind(&corpus), + }; + TestResult::from_bool(expected == got) + } + + fn qc_memchr_double_ended_iter( + needle: u8, data: Vec, take_side: Vec + ) -> TestResult { + // make nonempty + let mut take_side = take_side; + if take_side.is_empty() { take_side.push(true) }; + + let finder = match $mod::One::$cons(needle) { + None => return TestResult::discard(), + Some(finder) => finder, + }; + let iter = finder.iter(&data); + let got = double_ended_take( + iter, + take_side.iter().cycle().cloned(), + ); + let expected = naive1_iter(needle, &data); + + TestResult::from_bool(got.iter().cloned().eq(expected)) + } + + fn qc_memchr2_double_ended_iter( + needle1: u8, needle2: u8, data: Vec, take_side: Vec + ) -> TestResult { + // make nonempty + let mut take_side = take_side; + if take_side.is_empty() { take_side.push(true) }; + + let finder = match $mod::Two::$cons(needle1, needle2) { + None => return TestResult::discard(), + Some(finder) => finder, + }; + let iter = finder.iter(&data); + let got = double_ended_take( + iter, + take_side.iter().cycle().cloned(), + ); + let expected = naive2_iter(needle1, needle2, &data); + + TestResult::from_bool(got.iter().cloned().eq(expected)) + } + + fn qc_memchr3_double_ended_iter( + needle1: u8, needle2: u8, needle3: u8, + data: Vec, take_side: Vec + ) -> TestResult { + // make nonempty + let mut take_side = take_side; + if take_side.is_empty() { take_side.push(true) }; + + let finder = match $mod::Three::$cons(needle1, needle2, needle3) { + None => return TestResult::discard(), + Some(finder) => finder, + }; + let iter = finder.iter(&data); + let got = double_ended_take( + iter, + take_side.iter().cycle().cloned(), + ); + let expected = naive3_iter(needle1, needle2, needle3, &data); + + TestResult::from_bool(got.iter().cloned().eq(expected)) + } + + fn qc_memchr1_iter(data: Vec) -> TestResult { + let needle = 0; + let finder = match $mod::One::$cons(needle) { + None => return TestResult::discard(), + Some(finder) => finder, + }; + let got = finder.iter(&data); + let expected = naive1_iter(needle, &data); + TestResult::from_bool(got.eq(expected)) + } + + fn qc_memchr1_rev_iter(data: Vec) -> TestResult { + let needle = 0; + + let finder = match $mod::One::$cons(needle) { + None => return TestResult::discard(), + Some(finder) => finder, + }; + let got = finder.iter(&data).rev(); + let expected = naive1_iter(needle, &data).rev(); + TestResult::from_bool(got.eq(expected)) + } + + fn qc_memchr2_iter(data: Vec) -> TestResult { + let needle1 = 0; + let needle2 = 1; + + let finder = match $mod::Two::$cons(needle1, needle2) { + None => return TestResult::discard(), + Some(finder) => finder, + }; + let got = finder.iter(&data); + let expected = naive2_iter(needle1, needle2, &data); + TestResult::from_bool(got.eq(expected)) + } + + fn qc_memchr2_rev_iter(data: Vec) -> TestResult { + let needle1 = 0; + let needle2 = 1; + + let finder = match $mod::Two::$cons(needle1, needle2) { + None => return TestResult::discard(), + Some(finder) => finder, + }; + let got = finder.iter(&data).rev(); + let expected = naive2_iter(needle1, needle2, &data).rev(); + TestResult::from_bool(got.eq(expected)) + } + + fn qc_memchr3_iter(data: Vec) -> TestResult { + let needle1 = 0; + let needle2 = 1; + let needle3 = 2; + + let finder = match $mod::Three::$cons(needle1, needle2, needle3) { + None => return TestResult::discard(), + Some(finder) => finder, + }; + let got = finder.iter(&data); + let expected = naive3_iter(needle1, needle2, needle3, &data); + TestResult::from_bool(got.eq(expected)) + } + + fn qc_memchr3_rev_iter(data: Vec) -> TestResult { + let needle1 = 0; + let needle2 = 1; + let needle3 = 2; + + let finder = match $mod::Three::$cons(needle1, needle2, needle3) { + None => return TestResult::discard(), + Some(finder) => finder, + }; + let got = finder.iter(&data).rev(); + let expected = naive3_iter(needle1, needle2, needle3, &data).rev(); + TestResult::from_bool(got.eq(expected)) + } + + fn qc_memchr1_iter_size_hint(data: Vec) -> TestResult { + // test that the size hint is within reasonable bounds + let needle = 0; + let finder = match $mod::One::$cons(needle) { + None => return TestResult::discard(), + Some(finder) => finder, + }; + let mut iter = finder.iter(&data); + let mut real_count = data + .iter() + .filter(|&&elt| elt == needle) + .count(); + + while let Some(index) = iter.next() { + real_count -= 1; + let (lower, upper) = iter.size_hint(); + assert!(lower <= real_count); + assert!(upper.unwrap() >= real_count); + assert!(upper.unwrap() <= data.len() - index); + } + TestResult::passed() + } + } + }; +} + +// take items from a DEI, taking front for each true and back for each false. +// Return a vector with the concatenation of the fronts and the reverse of the +// backs. +#[cfg(not(miri))] +pub(crate) fn double_ended_take( + mut iter: I, + take_side: J, +) -> alloc::vec::Vec +where + I: DoubleEndedIterator, + J: Iterator, +{ + let mut found_front = alloc::vec![]; + let mut found_back = alloc::vec![]; + + for take_front in take_side { + if take_front { + if let Some(pos) = iter.next() { + found_front.push(pos); + } else { + break; + } + } else { + if let Some(pos) = iter.next_back() { + found_back.push(pos); + } else { + break; + } + }; + } + + let mut all_found = found_front; + all_found.extend(found_back.into_iter().rev()); + all_found +} + +// return an iterator of the 0-based indices of haystack that match the needle +#[cfg(not(miri))] +pub(crate) fn naive1_iter<'a>( + n1: u8, + haystack: &'a [u8], +) -> impl DoubleEndedIterator + 'a { + haystack.iter().enumerate().filter(move |&(_, &b)| b == n1).map(|t| t.0) +} + +#[cfg(not(miri))] +pub(crate) fn naive2_iter<'a>( + n1: u8, + n2: u8, + haystack: &'a [u8], +) -> impl DoubleEndedIterator + 'a { + haystack + .iter() + .enumerate() + .filter(move |&(_, &b)| b == n1 || b == n2) + .map(|t| t.0) +} + +#[cfg(not(miri))] +pub(crate) fn naive3_iter<'a>( + n1: u8, + n2: u8, + n3: u8, + haystack: &'a [u8], +) -> impl DoubleEndedIterator + 'a { + haystack + .iter() + .enumerate() + .filter(move |&(_, &b)| b == n1 || b == n2 || b == n3) + .map(|t| t.0) +} diff --git a/vendor/memchr/src/tests/mod.rs b/vendor/memchr/src/tests/mod.rs new file mode 100644 index 00000000000000..259b67827a1422 --- /dev/null +++ b/vendor/memchr/src/tests/mod.rs @@ -0,0 +1,15 @@ +#[macro_use] +pub(crate) mod memchr; +pub(crate) mod packedpair; +#[macro_use] +pub(crate) mod substring; + +// For debugging, particularly in CI, print out the byte order of the current +// target. +#[test] +fn byte_order() { + #[cfg(target_endian = "little")] + std::eprintln!("LITTLE ENDIAN"); + #[cfg(target_endian = "big")] + std::eprintln!("BIG ENDIAN"); +} diff --git a/vendor/memchr/src/tests/packedpair.rs b/vendor/memchr/src/tests/packedpair.rs new file mode 100644 index 00000000000000..204635b83ea9c1 --- /dev/null +++ b/vendor/memchr/src/tests/packedpair.rs @@ -0,0 +1,216 @@ +use alloc::{boxed::Box, vec, vec::Vec}; + +/// A set of "packed pair" test seeds. Each seed serves as the base for the +/// generation of many other tests. In essence, the seed captures the pair of +/// bytes we used for a predicate and first byte among our needle. The tests +/// generated from each seed essentially vary the length of the needle and +/// haystack, while using the rare/first byte configuration from the seed. +/// +/// The purpose of this is to test many different needle/haystack lengths. +/// In particular, some of the vector optimizations might only have bugs +/// in haystacks of a certain size. +const SEEDS: &[Seed] = &[ + // Why not use different 'first' bytes? It seemed like a good idea to be + // able to configure it, but when I wrote the test generator below, it + // didn't seem necessary to use for reasons that I forget. + Seed { first: b'x', index1: b'y', index2: b'z' }, + Seed { first: b'x', index1: b'x', index2: b'z' }, + Seed { first: b'x', index1: b'y', index2: b'x' }, + Seed { first: b'x', index1: b'x', index2: b'x' }, + Seed { first: b'x', index1: b'y', index2: b'y' }, +]; + +/// Runs a host of "packed pair" search tests. +/// +/// These tests specifically look for the occurrence of a possible substring +/// match based on a pair of bytes matching at the right offsets. +pub(crate) struct Runner { + fwd: Option< + Box< + dyn FnMut(&[u8], &[u8], u8, u8) -> Option> + 'static, + >, + >, +} + +impl Runner { + /// Create a new test runner for "packed pair" substring search. + pub(crate) fn new() -> Runner { + Runner { fwd: None } + } + + /// Run all tests. This panics on the first failure. + /// + /// If the implementation being tested returns `None` for a particular + /// haystack/needle combination, then that test is skipped. + /// + /// This runs tests on both the forward and reverse implementations given. + /// If either (or both) are missing, then tests for that implementation are + /// skipped. + pub(crate) fn run(self) { + if let Some(mut fwd) = self.fwd { + for seed in SEEDS.iter() { + for t in seed.generate() { + match fwd(&t.haystack, &t.needle, t.index1, t.index2) { + None => continue, + Some(result) => { + assert_eq!( + t.fwd, result, + "FORWARD, needle: {:?}, haystack: {:?}, \ + index1: {:?}, index2: {:?}", + t.needle, t.haystack, t.index1, t.index2, + ) + } + } + } + } + } + } + + /// Set the implementation for forward "packed pair" substring search. + /// + /// If the closure returns `None`, then it is assumed that the given + /// test cannot be applied to the particular implementation and it is + /// skipped. For example, if a particular implementation only supports + /// needles or haystacks for some minimum length. + /// + /// If this is not set, then forward "packed pair" search is not tested. + pub(crate) fn fwd( + mut self, + search: impl FnMut(&[u8], &[u8], u8, u8) -> Option> + 'static, + ) -> Runner { + self.fwd = Some(Box::new(search)); + self + } +} + +/// A test that represents the input and expected output to a "packed pair" +/// search function. The test should be able to run with any "packed pair" +/// implementation and get the expected output. +struct Test { + haystack: Vec, + needle: Vec, + index1: u8, + index2: u8, + fwd: Option, +} + +impl Test { + /// Create a new "packed pair" test from a seed and some given offsets to + /// the pair of bytes to use as a predicate in the seed's needle. + /// + /// If a valid test could not be constructed, then None is returned. + /// (Currently, we take the approach of massaging tests to be valid + /// instead of rejecting them outright.) + fn new( + seed: Seed, + index1: usize, + index2: usize, + haystack_len: usize, + needle_len: usize, + fwd: Option, + ) -> Option { + let mut index1: u8 = index1.try_into().unwrap(); + let mut index2: u8 = index2.try_into().unwrap(); + // The '#' byte is never used in a haystack (unless we're expecting + // a match), while the '@' byte is never used in a needle. + let mut haystack = vec![b'@'; haystack_len]; + let mut needle = vec![b'#'; needle_len]; + needle[0] = seed.first; + needle[index1 as usize] = seed.index1; + needle[index2 as usize] = seed.index2; + // If we're expecting a match, then make sure the needle occurs + // in the haystack at the expected position. + if let Some(i) = fwd { + haystack[i..i + needle.len()].copy_from_slice(&needle); + } + // If the operations above lead to rare offsets pointing to the + // non-first occurrence of a byte, then adjust it. This might lead + // to redundant tests, but it's simpler than trying to change the + // generation process I think. + if let Some(i) = crate::memchr(seed.index1, &needle) { + index1 = u8::try_from(i).unwrap(); + } + if let Some(i) = crate::memchr(seed.index2, &needle) { + index2 = u8::try_from(i).unwrap(); + } + Some(Test { haystack, needle, index1, index2, fwd }) + } +} + +/// Data that describes a single prefilter test seed. +#[derive(Clone, Copy)] +struct Seed { + first: u8, + index1: u8, + index2: u8, +} + +impl Seed { + const NEEDLE_LENGTH_LIMIT: usize = { + #[cfg(not(miri))] + { + 33 + } + #[cfg(miri)] + { + 5 + } + }; + + const HAYSTACK_LENGTH_LIMIT: usize = { + #[cfg(not(miri))] + { + 65 + } + #[cfg(miri)] + { + 8 + } + }; + + /// Generate a series of prefilter tests from this seed. + fn generate(self) -> impl Iterator { + let len_start = 2; + // The iterator below generates *a lot* of tests. The number of + // tests was chosen somewhat empirically to be "bearable" when + // running the test suite. + // + // We use an iterator here because the collective haystacks of all + // these test cases add up to enough memory to OOM a conservative + // sandbox or a small laptop. + (len_start..=Seed::NEEDLE_LENGTH_LIMIT).flat_map(move |needle_len| { + let index_start = len_start - 1; + (index_start..needle_len).flat_map(move |index1| { + (index1..needle_len).flat_map(move |index2| { + (needle_len..=Seed::HAYSTACK_LENGTH_LIMIT).flat_map( + move |haystack_len| { + Test::new( + self, + index1, + index2, + haystack_len, + needle_len, + None, + ) + .into_iter() + .chain( + (0..=(haystack_len - needle_len)).flat_map( + move |output| { + Test::new( + self, + index1, + index2, + haystack_len, + needle_len, + Some(output), + ) + }, + ), + ) + }, + ) + }) + }) + }) + } +} diff --git a/vendor/memchr/src/tests/substring/mod.rs b/vendor/memchr/src/tests/substring/mod.rs new file mode 100644 index 00000000000000..dd10cbdd4b32c5 --- /dev/null +++ b/vendor/memchr/src/tests/substring/mod.rs @@ -0,0 +1,232 @@ +/*! +This module defines tests and test helpers for substring implementations. +*/ + +use alloc::{ + boxed::Box, + format, + string::{String, ToString}, +}; + +pub(crate) mod naive; +#[macro_use] +pub(crate) mod prop; + +const SEEDS: &'static [Seed] = &[ + Seed::new("", "", Some(0), Some(0)), + Seed::new("", "a", Some(0), Some(1)), + Seed::new("", "ab", Some(0), Some(2)), + Seed::new("", "abc", Some(0), Some(3)), + Seed::new("a", "", None, None), + Seed::new("a", "a", Some(0), Some(0)), + Seed::new("a", "aa", Some(0), Some(1)), + Seed::new("a", "ba", Some(1), Some(1)), + Seed::new("a", "bba", Some(2), Some(2)), + Seed::new("a", "bbba", Some(3), Some(3)), + Seed::new("a", "bbbab", Some(3), Some(3)), + Seed::new("a", "bbbabb", Some(3), Some(3)), + Seed::new("a", "bbbabbb", Some(3), Some(3)), + Seed::new("a", "bbbbbb", None, None), + Seed::new("ab", "", None, None), + Seed::new("ab", "a", None, None), + Seed::new("ab", "b", None, None), + Seed::new("ab", "ab", Some(0), Some(0)), + Seed::new("ab", "aab", Some(1), Some(1)), + Seed::new("ab", "aaab", Some(2), Some(2)), + Seed::new("ab", "abaab", Some(0), Some(3)), + Seed::new("ab", "baaab", Some(3), Some(3)), + Seed::new("ab", "acb", None, None), + Seed::new("ab", "abba", Some(0), Some(0)), + Seed::new("abc", "ab", None, None), + Seed::new("abc", "abc", Some(0), Some(0)), + Seed::new("abc", "abcz", Some(0), Some(0)), + Seed::new("abc", "abczz", Some(0), Some(0)), + Seed::new("abc", "zabc", Some(1), Some(1)), + Seed::new("abc", "zzabc", Some(2), Some(2)), + Seed::new("abc", "azbc", None, None), + Seed::new("abc", "abzc", None, None), + Seed::new("abczdef", "abczdefzzzzzzzzzzzzzzzzzzzz", Some(0), Some(0)), + Seed::new("abczdef", "zzzzzzzzzzzzzzzzzzzzabczdef", Some(20), Some(20)), + Seed::new( + "xyz", + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaxyz", + Some(32), + Some(32), + ), + Seed::new("\u{0}\u{15}", "\u{0}\u{15}\u{15}\u{0}", Some(0), Some(0)), + Seed::new("\u{0}\u{1e}", "\u{1e}\u{0}", None, None), +]; + +/// Runs a host of substring search tests. +/// +/// This has support for "partial" substring search implementations only work +/// for a subset of needles/haystacks. For example, the "packed pair" substring +/// search implementation only works for haystacks of some minimum length based +/// of the pair of bytes selected and the size of the vector used. +pub(crate) struct Runner { + fwd: Option< + Box Option> + 'static>, + >, + rev: Option< + Box Option> + 'static>, + >, +} + +impl Runner { + /// Create a new test runner for forward and reverse substring search + /// implementations. + pub(crate) fn new() -> Runner { + Runner { fwd: None, rev: None } + } + + /// Run all tests. This panics on the first failure. + /// + /// If the implementation being tested returns `None` for a particular + /// haystack/needle combination, then that test is skipped. + /// + /// This runs tests on both the forward and reverse implementations given. + /// If either (or both) are missing, then tests for that implementation are + /// skipped. + pub(crate) fn run(self) { + if let Some(mut fwd) = self.fwd { + for seed in SEEDS.iter() { + for t in seed.generate() { + match fwd(t.haystack.as_bytes(), t.needle.as_bytes()) { + None => continue, + Some(result) => { + assert_eq!( + t.fwd, result, + "FORWARD, needle: {:?}, haystack: {:?}", + t.needle, t.haystack, + ); + } + } + } + } + } + if let Some(mut rev) = self.rev { + for seed in SEEDS.iter() { + for t in seed.generate() { + match rev(t.haystack.as_bytes(), t.needle.as_bytes()) { + None => continue, + Some(result) => { + assert_eq!( + t.rev, result, + "REVERSE, needle: {:?}, haystack: {:?}", + t.needle, t.haystack, + ); + } + } + } + } + } + } + + /// Set the implementation for forward substring search. + /// + /// If the closure returns `None`, then it is assumed that the given + /// test cannot be applied to the particular implementation and it is + /// skipped. For example, if a particular implementation only supports + /// needles or haystacks for some minimum length. + /// + /// If this is not set, then forward substring search is not tested. + pub(crate) fn fwd( + mut self, + search: impl FnMut(&[u8], &[u8]) -> Option> + 'static, + ) -> Runner { + self.fwd = Some(Box::new(search)); + self + } + + /// Set the implementation for reverse substring search. + /// + /// If the closure returns `None`, then it is assumed that the given + /// test cannot be applied to the particular implementation and it is + /// skipped. For example, if a particular implementation only supports + /// needles or haystacks for some minimum length. + /// + /// If this is not set, then reverse substring search is not tested. + pub(crate) fn rev( + mut self, + search: impl FnMut(&[u8], &[u8]) -> Option> + 'static, + ) -> Runner { + self.rev = Some(Box::new(search)); + self + } +} + +/// A single substring test for forward and reverse searches. +#[derive(Clone, Debug)] +struct Test { + needle: String, + haystack: String, + fwd: Option, + rev: Option, +} + +/// A single substring test for forward and reverse searches. +/// +/// Each seed is valid on its own, but it also serves as a starting point +/// to generate more tests. Namely, we pad out the haystacks with other +/// characters so that we get more complete coverage. This is especially useful +/// for testing vector algorithms that tend to have weird special cases for +/// alignment and loop unrolling. +/// +/// Padding works by assuming certain characters never otherwise appear in a +/// needle or a haystack. Neither should contain a `#` character. +#[derive(Clone, Copy, Debug)] +struct Seed { + needle: &'static str, + haystack: &'static str, + fwd: Option, + rev: Option, +} + +impl Seed { + const MAX_PAD: usize = 34; + + const fn new( + needle: &'static str, + haystack: &'static str, + fwd: Option, + rev: Option, + ) -> Seed { + Seed { needle, haystack, fwd, rev } + } + + fn generate(self) -> impl Iterator { + assert!(!self.needle.contains('#'), "needle must not contain '#'"); + assert!(!self.haystack.contains('#'), "haystack must not contain '#'"); + (0..=Seed::MAX_PAD) + // Generate tests for padding at the beginning of haystack. + .map(move |pad| { + let needle = self.needle.to_string(); + let prefix = "#".repeat(pad); + let haystack = format!("{}{}", prefix, self.haystack); + let fwd = if needle.is_empty() { + Some(0) + } else { + self.fwd.map(|i| pad + i) + }; + let rev = if needle.is_empty() { + Some(haystack.len()) + } else { + self.rev.map(|i| pad + i) + }; + Test { needle, haystack, fwd, rev } + }) + // Generate tests for padding at the end of haystack. + .chain((1..=Seed::MAX_PAD).map(move |pad| { + let needle = self.needle.to_string(); + let suffix = "#".repeat(pad); + let haystack = format!("{}{}", self.haystack, suffix); + let fwd = if needle.is_empty() { Some(0) } else { self.fwd }; + let rev = if needle.is_empty() { + Some(haystack.len()) + } else { + self.rev + }; + Test { needle, haystack, fwd, rev } + })) + } +} diff --git a/vendor/memchr/src/tests/substring/naive.rs b/vendor/memchr/src/tests/substring/naive.rs new file mode 100644 index 00000000000000..1bc6009849f12d --- /dev/null +++ b/vendor/memchr/src/tests/substring/naive.rs @@ -0,0 +1,45 @@ +/*! +This module defines "naive" implementations of substring search. + +These are sometimes useful to compare with "real" substring implementations. +The idea is that they are so simple that they are unlikely to be incorrect. +*/ + +/// Naively search forwards for the given needle in the given haystack. +pub(crate) fn find(haystack: &[u8], needle: &[u8]) -> Option { + let end = haystack.len().checked_sub(needle.len()).map_or(0, |i| i + 1); + for i in 0..end { + if needle == &haystack[i..i + needle.len()] { + return Some(i); + } + } + None +} + +/// Naively search in reverse for the given needle in the given haystack. +pub(crate) fn rfind(haystack: &[u8], needle: &[u8]) -> Option { + let end = haystack.len().checked_sub(needle.len()).map_or(0, |i| i + 1); + for i in (0..end).rev() { + if needle == &haystack[i..i + needle.len()] { + return Some(i); + } + } + None +} + +#[cfg(test)] +mod tests { + use crate::tests::substring; + + use super::*; + + #[test] + fn forward() { + substring::Runner::new().fwd(|h, n| Some(find(h, n))).run() + } + + #[test] + fn reverse() { + substring::Runner::new().rev(|h, n| Some(rfind(h, n))).run() + } +} diff --git a/vendor/memchr/src/tests/substring/prop.rs b/vendor/memchr/src/tests/substring/prop.rs new file mode 100644 index 00000000000000..a8352ec74c5acc --- /dev/null +++ b/vendor/memchr/src/tests/substring/prop.rs @@ -0,0 +1,126 @@ +/*! +This module defines a few quickcheck properties for substring search. + +It also provides a forward and reverse macro for conveniently defining +quickcheck tests that run these properties over any substring search +implementation. +*/ + +use crate::tests::substring::naive; + +/// $fwd is a `impl FnMut(haystack, needle) -> Option>`. When the +/// routine returns `None`, then it's skipped, which is useful for substring +/// implementations that don't work for all inputs. +#[macro_export] +macro_rules! define_substring_forward_quickcheck { + ($fwd:expr) => { + #[cfg(not(miri))] + quickcheck::quickcheck! { + fn qc_fwd_prefix_is_substring(bs: alloc::vec::Vec) -> bool { + crate::tests::substring::prop::prefix_is_substring(&bs, $fwd) + } + + fn qc_fwd_suffix_is_substring(bs: alloc::vec::Vec) -> bool { + crate::tests::substring::prop::suffix_is_substring(&bs, $fwd) + } + + fn qc_fwd_matches_naive( + haystack: alloc::vec::Vec, + needle: alloc::vec::Vec + ) -> bool { + crate::tests::substring::prop::same_as_naive( + false, + &haystack, + &needle, + $fwd, + ) + } + } + }; +} + +/// $rev is a `impl FnMut(haystack, needle) -> Option>`. When the +/// routine returns `None`, then it's skipped, which is useful for substring +/// implementations that don't work for all inputs. +#[macro_export] +macro_rules! define_substring_reverse_quickcheck { + ($rev:expr) => { + #[cfg(not(miri))] + quickcheck::quickcheck! { + fn qc_rev_prefix_is_substring(bs: alloc::vec::Vec) -> bool { + crate::tests::substring::prop::prefix_is_substring(&bs, $rev) + } + + fn qc_rev_suffix_is_substring(bs: alloc::vec::Vec) -> bool { + crate::tests::substring::prop::suffix_is_substring(&bs, $rev) + } + + fn qc_rev_matches_naive( + haystack: alloc::vec::Vec, + needle: alloc::vec::Vec + ) -> bool { + crate::tests::substring::prop::same_as_naive( + true, + &haystack, + &needle, + $rev, + ) + } + } + }; +} + +/// Check that every prefix of the given byte string is a substring. +pub(crate) fn prefix_is_substring( + bs: &[u8], + mut search: impl FnMut(&[u8], &[u8]) -> Option>, +) -> bool { + for i in 0..bs.len().saturating_sub(1) { + let prefix = &bs[..i]; + let result = match search(bs, prefix) { + None => continue, + Some(result) => result, + }; + if !result.is_some() { + return false; + } + } + true +} + +/// Check that every suffix of the given byte string is a substring. +pub(crate) fn suffix_is_substring( + bs: &[u8], + mut search: impl FnMut(&[u8], &[u8]) -> Option>, +) -> bool { + for i in 0..bs.len().saturating_sub(1) { + let suffix = &bs[i..]; + let result = match search(bs, suffix) { + None => continue, + Some(result) => result, + }; + if !result.is_some() { + return false; + } + } + true +} + +/// Check that naive substring search matches the result of the given search +/// algorithm. +pub(crate) fn same_as_naive( + reverse: bool, + haystack: &[u8], + needle: &[u8], + mut search: impl FnMut(&[u8], &[u8]) -> Option>, +) -> bool { + let result = match search(haystack, needle) { + None => return true, + Some(result) => result, + }; + if reverse { + result == naive::rfind(haystack, needle) + } else { + result == naive::find(haystack, needle) + } +} diff --git a/vendor/memchr/src/vector.rs b/vendor/memchr/src/vector.rs new file mode 100644 index 00000000000000..69f2af01b46a44 --- /dev/null +++ b/vendor/memchr/src/vector.rs @@ -0,0 +1,501 @@ +/// A trait for describing vector operations used by vectorized searchers. +/// +/// The trait is highly constrained to low level vector operations needed. +/// In general, it was invented mostly to be generic over x86's __m128i and +/// __m256i types. At time of writing, it also supports wasm and aarch64 +/// 128-bit vector types as well. +/// +/// # Safety +/// +/// All methods are not safe since they are intended to be implemented using +/// vendor intrinsics, which are also not safe. Callers must ensure that the +/// appropriate target features are enabled in the calling function, and that +/// the current CPU supports them. All implementations should avoid marking the +/// routines with #[target_feature] and instead mark them as #[inline(always)] +/// to ensure they get appropriately inlined. (inline(always) cannot be used +/// with target_feature.) +pub(crate) trait Vector: Copy + core::fmt::Debug { + /// The number of bytes in the vector. That is, this is the size of the + /// vector in memory. + const BYTES: usize; + /// The bits that must be zero in order for a `*const u8` pointer to be + /// correctly aligned to read vector values. + const ALIGN: usize; + + /// The type of the value returned by `Vector::movemask`. + /// + /// This supports abstracting over the specific representation used in + /// order to accommodate different representations in different ISAs. + type Mask: MoveMask; + + /// Create a vector with 8-bit lanes with the given byte repeated into each + /// lane. + unsafe fn splat(byte: u8) -> Self; + + /// Read a vector-size number of bytes from the given pointer. The pointer + /// must be aligned to the size of the vector. + /// + /// # Safety + /// + /// Callers must guarantee that at least `BYTES` bytes are readable from + /// `data` and that `data` is aligned to a `BYTES` boundary. + unsafe fn load_aligned(data: *const u8) -> Self; + + /// Read a vector-size number of bytes from the given pointer. The pointer + /// does not need to be aligned. + /// + /// # Safety + /// + /// Callers must guarantee that at least `BYTES` bytes are readable from + /// `data`. + unsafe fn load_unaligned(data: *const u8) -> Self; + + /// _mm_movemask_epi8 or _mm256_movemask_epi8 + unsafe fn movemask(self) -> Self::Mask; + /// _mm_cmpeq_epi8 or _mm256_cmpeq_epi8 + unsafe fn cmpeq(self, vector2: Self) -> Self; + /// _mm_and_si128 or _mm256_and_si256 + unsafe fn and(self, vector2: Self) -> Self; + /// _mm_or or _mm256_or_si256 + unsafe fn or(self, vector2: Self) -> Self; + /// Returns true if and only if `Self::movemask` would return a mask that + /// contains at least one non-zero bit. + unsafe fn movemask_will_have_non_zero(self) -> bool { + self.movemask().has_non_zero() + } +} + +/// A trait that abstracts over a vector-to-scalar operation called +/// "move mask." +/// +/// On x86-64, this is `_mm_movemask_epi8` for SSE2 and `_mm256_movemask_epi8` +/// for AVX2. It takes a vector of `u8` lanes and returns a scalar where the +/// `i`th bit is set if and only if the most significant bit in the `i`th lane +/// of the vector is set. The simd128 ISA for wasm32 also supports this +/// exact same operation natively. +/// +/// ... But aarch64 doesn't. So we have to fake it with more instructions and +/// a slightly different representation. We could do extra work to unify the +/// representations, but then would require additional costs in the hot path +/// for `memchr` and `packedpair`. So instead, we abstraction over the specific +/// representation with this trait and define the operations we actually need. +pub(crate) trait MoveMask: Copy + core::fmt::Debug { + /// Return a mask that is all zeros except for the least significant `n` + /// lanes in a corresponding vector. + fn all_zeros_except_least_significant(n: usize) -> Self; + + /// Returns true if and only if this mask has a a non-zero bit anywhere. + fn has_non_zero(self) -> bool; + + /// Returns the number of bits set to 1 in this mask. + fn count_ones(self) -> usize; + + /// Does a bitwise `and` operation between `self` and `other`. + fn and(self, other: Self) -> Self; + + /// Does a bitwise `or` operation between `self` and `other`. + fn or(self, other: Self) -> Self; + + /// Returns a mask that is equivalent to `self` but with the least + /// significant 1-bit set to 0. + fn clear_least_significant_bit(self) -> Self; + + /// Returns the offset of the first non-zero lane this mask represents. + fn first_offset(self) -> usize; + + /// Returns the offset of the last non-zero lane this mask represents. + fn last_offset(self) -> usize; +} + +/// This is a "sensible" movemask implementation where each bit represents +/// whether the most significant bit is set in each corresponding lane of a +/// vector. This is used on x86-64 and wasm, but such a mask is more expensive +/// to get on aarch64 so we use something a little different. +/// +/// We call this "sensible" because this is what we get using native sse/avx +/// movemask instructions. But neon has no such native equivalent. +#[derive(Clone, Copy, Debug)] +pub(crate) struct SensibleMoveMask(u32); + +impl SensibleMoveMask { + /// Get the mask in a form suitable for computing offsets. + /// + /// Basically, this normalizes to little endian. On big endian, this swaps + /// the bytes. + #[inline(always)] + fn get_for_offset(self) -> u32 { + #[cfg(target_endian = "big")] + { + self.0.swap_bytes() + } + #[cfg(target_endian = "little")] + { + self.0 + } + } +} + +impl MoveMask for SensibleMoveMask { + #[inline(always)] + fn all_zeros_except_least_significant(n: usize) -> SensibleMoveMask { + debug_assert!(n < 32); + SensibleMoveMask(!((1 << n) - 1)) + } + + #[inline(always)] + fn has_non_zero(self) -> bool { + self.0 != 0 + } + + #[inline(always)] + fn count_ones(self) -> usize { + self.0.count_ones() as usize + } + + #[inline(always)] + fn and(self, other: SensibleMoveMask) -> SensibleMoveMask { + SensibleMoveMask(self.0 & other.0) + } + + #[inline(always)] + fn or(self, other: SensibleMoveMask) -> SensibleMoveMask { + SensibleMoveMask(self.0 | other.0) + } + + #[inline(always)] + fn clear_least_significant_bit(self) -> SensibleMoveMask { + SensibleMoveMask(self.0 & (self.0 - 1)) + } + + #[inline(always)] + fn first_offset(self) -> usize { + // We are dealing with little endian here (and if we aren't, we swap + // the bytes so we are in practice), where the most significant byte + // is at a higher address. That means the least significant bit that + // is set corresponds to the position of our first matching byte. + // That position corresponds to the number of zeros after the least + // significant bit. + self.get_for_offset().trailing_zeros() as usize + } + + #[inline(always)] + fn last_offset(self) -> usize { + // We are dealing with little endian here (and if we aren't, we swap + // the bytes so we are in practice), where the most significant byte is + // at a higher address. That means the most significant bit that is set + // corresponds to the position of our last matching byte. The position + // from the end of the mask is therefore the number of leading zeros + // in a 32 bit integer, and the position from the start of the mask is + // therefore 32 - (leading zeros) - 1. + 32 - self.get_for_offset().leading_zeros() as usize - 1 + } +} + +#[cfg(target_arch = "x86_64")] +mod x86sse2 { + use core::arch::x86_64::*; + + use super::{SensibleMoveMask, Vector}; + + impl Vector for __m128i { + const BYTES: usize = 16; + const ALIGN: usize = Self::BYTES - 1; + + type Mask = SensibleMoveMask; + + #[inline(always)] + unsafe fn splat(byte: u8) -> __m128i { + _mm_set1_epi8(byte as i8) + } + + #[inline(always)] + unsafe fn load_aligned(data: *const u8) -> __m128i { + _mm_load_si128(data as *const __m128i) + } + + #[inline(always)] + unsafe fn load_unaligned(data: *const u8) -> __m128i { + _mm_loadu_si128(data as *const __m128i) + } + + #[inline(always)] + unsafe fn movemask(self) -> SensibleMoveMask { + SensibleMoveMask(_mm_movemask_epi8(self) as u32) + } + + #[inline(always)] + unsafe fn cmpeq(self, vector2: Self) -> __m128i { + _mm_cmpeq_epi8(self, vector2) + } + + #[inline(always)] + unsafe fn and(self, vector2: Self) -> __m128i { + _mm_and_si128(self, vector2) + } + + #[inline(always)] + unsafe fn or(self, vector2: Self) -> __m128i { + _mm_or_si128(self, vector2) + } + } +} + +#[cfg(target_arch = "x86_64")] +mod x86avx2 { + use core::arch::x86_64::*; + + use super::{SensibleMoveMask, Vector}; + + impl Vector for __m256i { + const BYTES: usize = 32; + const ALIGN: usize = Self::BYTES - 1; + + type Mask = SensibleMoveMask; + + #[inline(always)] + unsafe fn splat(byte: u8) -> __m256i { + _mm256_set1_epi8(byte as i8) + } + + #[inline(always)] + unsafe fn load_aligned(data: *const u8) -> __m256i { + _mm256_load_si256(data as *const __m256i) + } + + #[inline(always)] + unsafe fn load_unaligned(data: *const u8) -> __m256i { + _mm256_loadu_si256(data as *const __m256i) + } + + #[inline(always)] + unsafe fn movemask(self) -> SensibleMoveMask { + SensibleMoveMask(_mm256_movemask_epi8(self) as u32) + } + + #[inline(always)] + unsafe fn cmpeq(self, vector2: Self) -> __m256i { + _mm256_cmpeq_epi8(self, vector2) + } + + #[inline(always)] + unsafe fn and(self, vector2: Self) -> __m256i { + _mm256_and_si256(self, vector2) + } + + #[inline(always)] + unsafe fn or(self, vector2: Self) -> __m256i { + _mm256_or_si256(self, vector2) + } + } +} + +#[cfg(target_arch = "aarch64")] +mod aarch64neon { + use core::arch::aarch64::*; + + use super::{MoveMask, Vector}; + + impl Vector for uint8x16_t { + const BYTES: usize = 16; + const ALIGN: usize = Self::BYTES - 1; + + type Mask = NeonMoveMask; + + #[inline(always)] + unsafe fn splat(byte: u8) -> uint8x16_t { + vdupq_n_u8(byte) + } + + #[inline(always)] + unsafe fn load_aligned(data: *const u8) -> uint8x16_t { + // I've tried `data.cast::().read()` instead, but + // couldn't observe any benchmark differences. + Self::load_unaligned(data) + } + + #[inline(always)] + unsafe fn load_unaligned(data: *const u8) -> uint8x16_t { + vld1q_u8(data) + } + + #[inline(always)] + unsafe fn movemask(self) -> NeonMoveMask { + let asu16s = vreinterpretq_u16_u8(self); + let mask = vshrn_n_u16(asu16s, 4); + let asu64 = vreinterpret_u64_u8(mask); + let scalar64 = vget_lane_u64(asu64, 0); + NeonMoveMask(scalar64 & 0x8888888888888888) + } + + #[inline(always)] + unsafe fn cmpeq(self, vector2: Self) -> uint8x16_t { + vceqq_u8(self, vector2) + } + + #[inline(always)] + unsafe fn and(self, vector2: Self) -> uint8x16_t { + vandq_u8(self, vector2) + } + + #[inline(always)] + unsafe fn or(self, vector2: Self) -> uint8x16_t { + vorrq_u8(self, vector2) + } + + /// This is the only interesting implementation of this routine. + /// Basically, instead of doing the "shift right narrow" dance, we use + /// adjacent folding max to determine whether there are any non-zero + /// bytes in our mask. If there are, *then* we'll do the "shift right + /// narrow" dance. In benchmarks, this does lead to slightly better + /// throughput, but the win doesn't appear huge. + #[inline(always)] + unsafe fn movemask_will_have_non_zero(self) -> bool { + let low = vreinterpretq_u64_u8(vpmaxq_u8(self, self)); + vgetq_lane_u64(low, 0) != 0 + } + } + + /// Neon doesn't have a `movemask` that works like the one in x86-64, so we + /// wind up using a different method[1]. The different method also produces + /// a mask, but 4 bits are set in the neon case instead of a single bit set + /// in the x86-64 case. We do an extra step to zero out 3 of the 4 bits, + /// but we still wind up with at least 3 zeroes between each set bit. This + /// generally means that we need to do some division by 4 before extracting + /// offsets. + /// + /// In fact, the existence of this type is the entire reason that we have + /// the `MoveMask` trait in the first place. This basically lets us keep + /// the different representations of masks without being forced to unify + /// them into a single representation, which could result in extra and + /// unnecessary work. + /// + /// [1]: https://community.arm.com/arm-community-blogs/b/infrastructure-solutions-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon + #[derive(Clone, Copy, Debug)] + pub(crate) struct NeonMoveMask(u64); + + impl NeonMoveMask { + /// Get the mask in a form suitable for computing offsets. + /// + /// The mask is always already in host-endianness, so this is a no-op. + #[inline(always)] + fn get_for_offset(self) -> u64 { + self.0 + } + } + + impl MoveMask for NeonMoveMask { + #[inline(always)] + fn all_zeros_except_least_significant(n: usize) -> NeonMoveMask { + debug_assert!(n < 16); + NeonMoveMask(!(((1 << n) << 2) - 1)) + } + + #[inline(always)] + fn has_non_zero(self) -> bool { + self.0 != 0 + } + + #[inline(always)] + fn count_ones(self) -> usize { + self.0.count_ones() as usize + } + + #[inline(always)] + fn and(self, other: NeonMoveMask) -> NeonMoveMask { + NeonMoveMask(self.0 & other.0) + } + + #[inline(always)] + fn or(self, other: NeonMoveMask) -> NeonMoveMask { + NeonMoveMask(self.0 | other.0) + } + + #[inline(always)] + fn clear_least_significant_bit(self) -> NeonMoveMask { + NeonMoveMask(self.0 & (self.0 - 1)) + } + + #[inline(always)] + fn first_offset(self) -> usize { + // We are dealing with little endian here (and if we aren't, + // we swap the bytes so we are in practice), where the most + // significant byte is at a higher address. That means the least + // significant bit that is set corresponds to the position of our + // first matching byte. That position corresponds to the number of + // zeros after the least significant bit. + // + // Note that unlike `SensibleMoveMask`, this mask has its bits + // spread out over 64 bits instead of 16 bits (for a 128 bit + // vector). Namely, where as x86-64 will turn + // + // 0x00 0xFF 0x00 0x00 0xFF + // + // into 10010, our neon approach will turn it into + // + // 10000000000010000000 + // + // And this happens because neon doesn't have a native `movemask` + // instruction, so we kind of fake it[1]. Thus, we divide the + // number of trailing zeros by 4 to get the "real" offset. + // + // [1]: https://community.arm.com/arm-community-blogs/b/infrastructure-solutions-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon + (self.get_for_offset().trailing_zeros() >> 2) as usize + } + + #[inline(always)] + fn last_offset(self) -> usize { + // See comment in `first_offset` above. This is basically the same, + // but coming from the other direction. + 16 - (self.get_for_offset().leading_zeros() >> 2) as usize - 1 + } + } +} + +#[cfg(all(target_arch = "wasm32", target_feature = "simd128"))] +mod wasm_simd128 { + use core::arch::wasm32::*; + + use super::{SensibleMoveMask, Vector}; + + impl Vector for v128 { + const BYTES: usize = 16; + const ALIGN: usize = Self::BYTES - 1; + + type Mask = SensibleMoveMask; + + #[inline(always)] + unsafe fn splat(byte: u8) -> v128 { + u8x16_splat(byte) + } + + #[inline(always)] + unsafe fn load_aligned(data: *const u8) -> v128 { + *data.cast() + } + + #[inline(always)] + unsafe fn load_unaligned(data: *const u8) -> v128 { + v128_load(data.cast()) + } + + #[inline(always)] + unsafe fn movemask(self) -> SensibleMoveMask { + SensibleMoveMask(u8x16_bitmask(self).into()) + } + + #[inline(always)] + unsafe fn cmpeq(self, vector2: Self) -> v128 { + u8x16_eq(self, vector2) + } + + #[inline(always)] + unsafe fn and(self, vector2: Self) -> v128 { + v128_and(self, vector2) + } + + #[inline(always)] + unsafe fn or(self, vector2: Self) -> v128 { + v128_or(self, vector2) + } + } +} diff --git a/vendor/minimal-lexical/.cargo-checksum.json b/vendor/minimal-lexical/.cargo-checksum.json new file mode 100644 index 00000000000000..20c50c75217ea4 --- /dev/null +++ b/vendor/minimal-lexical/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"e819b814dde0c854395882e9f65856c3196961ceced92eeab6ade4d350e7cccc",".github/ISSUE_TEMPLATE/bug_report.md":"cce60fa26d7e6afb7aa84755d6bc6431afe1f390823033545ac3ac9d94740b19",".github/ISSUE_TEMPLATE/custom.md":"b52f73fd67ebd71d43f36d5d1a2f3a53d1f32e126f70ccf0126900ff9f2aec3c",".github/ISSUE_TEMPLATE/documentation.md":"986b9a1421dc15af628bdff8691eeb39d92e36bedb7742d2a4d8327f6cb921a3",".github/ISSUE_TEMPLATE/feature_request.md":"e7861c6047eb39fb4dead4198c141817215839fddb43d16cb6e679417428a73e",".github/ISSUE_TEMPLATE/question.md":"75d3de186382ff882e26e1aba65b4b207cbd3822b9491cd92886fa7987a6ba23",".github/PULL_REQUEST_TEMPLATE/bug_fix.md":"8d7bfb13212e583b9cb717ec39ac2d2070d85470bdf81a32f89e91796a14efcc",".github/PULL_REQUEST_TEMPLATE/custom.md":"88e332c54fe5a52842abdc33e129fa12b0b39c1aaa78da16bc3e1ccce0f3e643",".github/PULL_REQUEST_TEMPLATE/documentation.md":"ac8bae6001c6822dc6d2334c085018c38a8f121f0c580b33b770357170a59c76",".github/workflows/Cross.yml":"51cd10949a21f4aa734a45c06021b53f81cebddcde6723e69caf39d6b7a53cc7",".github/workflows/Features.yml":"4b7182995976d3872853555e989d49be03cfacf92a6807317c01623a1de59742",".github/workflows/OSX.yml":"1ffe8ad7703afb4bc67caf52550b53095861f7290e9b1cbd9f7f7e62de82b3b4",".github/workflows/Simple.yml":"6c681d49afdf74a85757fca4d6bfce076daebbb8816409f42345c2782ba5408d",".github/workflows/Valgrind.yml":"5beae6618e643ef75a6cdc6622bb64a586f3bc956401551920716564d4f3c1fe",".gitmodules":"6976207a02c7160a3a1d076c5fae10fe4b78f58cdc0aa66ae47f3855b3c392fb","CHANGELOG":"55ebcf7ee0fd10987829a98fb9757cbc6f68c62198bc70122384bedc08de9915","CODE_OF_CONDUCT.md":"0bd67c62d204ec67cb29969aaf5aac337a77c84b318937bc2d9dc7e3fcbcdcaa","Cargo.toml":"6c485fa605a0d3de6ec0af125b67c55224515354034f990334b1a1a86988c632","Cargo.toml.orig":"8f0cdacc663b3e6a07f803c2c436d18254a9ad55057782ff350aaf4bfb2121e2","LICENSE-APACHE":"8173d5c29b4f956d532781d2b86e4e30f83e6b7878dce18c919451d6ba707c90","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","LICENSE.md":"dbe1fff0fb1314b6af94f161511406275cf01c5a32441fbf24528a57a051d599","README.md":"00d521d93124e88edf58d42b3114786c0daaf437e73118eed27b3c716b7514ad","clippy.toml":"8d3aafbcf358ccf45cc148cd705f5fe71e777dabc391ed9a2bae32af73291fe8","rustfmt.toml":"ae46c79a84842907e151ca5d07e36b8d1932b61c6989465500c0f706507f42cc","src/bellerophon.rs":"bbabeccfa7a70ec314a894d968b424d1162df1b9e5e659baa1fd3a37d985fe75","src/bigint.rs":"93d3332d01bb7745bc569bc6b242c6c71c75eb78835b6a05bad91952b989daf0","src/extended_float.rs":"6040bdd49c03f11f8607b2da1703e1b7b5f57ddac9b02322c6958f9c1684496f","src/fpu.rs":"72d63bac2bbfc545128aa59dc35cfad3c553bac64bf575880775b3c4ace415bc","src/heapvec.rs":"836a49d40e5da90d195508e963d869afd78aaf9adb9cd60a0cb8f92b4e105c4f","src/lemire.rs":"100f3cb293deed0b0d1e7ca6b23152ba160f92f887f8924620b28c9ab77326db","src/lib.rs":"626dfc61992c42d4996dddcbfff5775ff2ffae44d116d2d70f6564a3209c0a9a","src/libm.rs":"ed5a3856eaa44a8a05aa123e27c2048b92ff42e4af1ef3f9fa1aff2a50190f4a","src/mask.rs":"63bcda92d14169a55ac54798f45365cef64a1aecd9625c3c3bc3deae202b2a07","src/num.rs":"dbcab14a5fe8e40e381829426dd75a7db672882592b5c4a08897a2fb6d2ae7ea","src/number.rs":"49d0880a99816ecf904fb88f607a821d6770ec270825b96e800a297ab1a01d78","src/parse.rs":"19559db67eddd17d331274cf87d6c4beeb0724dcdf859de9b8ab5995c4b8e682","src/rounding.rs":"8ba42d31618db1e6a381f8b60ffe1f9d216aaccb931b8fac5f279d8465e35cb7","src/slow.rs":"f096e7f83e8372e71568ec1724bc1c9d2c67ca39b80290ba062e60ae94b1f8d1","src/stackvec.rs":"0c921eb3adbd42cbe7be0f363e08ac85b6d5f1dabd4a7b077becddeff731da16","src/table.rs":"e4288891e9b1d8ba60dcc73edc639754cc2351d3219df8c625e694f3f0e58c5a","src/table_bellerophon.rs":"a2102292b27223a81e60a8a6607c42587efde3424526156921167742a0d5937c","src/table_lemire.rs":"c101c353c38b594f5b8987263b759927095b5dcd72e65607cc1c6a7de0bfd0c8","src/table_small.rs":"61b00e13eb3945622bf8bed374c88e29c63dfedb7384b31b35dd4e543cbe62c1","tests/bellerophon.rs":"b17b87b8963ebcd71f684e4d48c1ce619964e4fb719a5875b0ce4514ed528674","tests/bellerophon_tests.rs":"76b71efa2f4cec56a79535e2d292788a5e1b443c901ec7a234800782f36ddb68","tests/integration_tests.rs":"ed1a1fc46fc239eb4ea718057ad6e9869f633797ef72fc6f05b1757ab80e1641","tests/lemire_tests.rs":"6213bcd9b44def655b44a6b760cee0c0ad82d3bb494f48c2ff100698da34625b","tests/libm_tests.rs":"6941e74d7d0adf021edc93b9919275e82810687ff33147a10361613073b22669","tests/mask_tests.rs":"8c2a3daf434815389b6bf88837e3f382d74d97250104b925d70779366bd3d537","tests/number_tests.rs":"df4b4f0c65478f2f6193bd918fa4aad7163e40598c58df44547c4559e4a8b0c7","tests/parse_tests.rs":"bc0066b9257368f0365276fcffa2662c4699a033eaf9a4a7d6faa0e9b915094a","tests/rounding_tests.rs":"99f38b768ad15e726559c446825f9f1bad67935cdd28ffcc1cbcd3e031a901ea","tests/slow_tests.rs":"36c4c2538d2f5a1c1af5deb26ec4eba47f19f9a3c280a13d10704267a16d3b3f","tests/stackvec.rs":"f040611995bcd1bd2cb47694e74aa02ff4fabdffe007f712c9bb788d82dfb8a7","tests/vec_tests.rs":"09b561160df3b1385876db452bb5a67ef2c9fd2cc36b5687e1dfaf8c58947782"},"package":"68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"} \ No newline at end of file diff --git a/vendor/minimal-lexical/.cargo_vcs_info.json b/vendor/minimal-lexical/.cargo_vcs_info.json new file mode 100644 index 00000000000000..33bf0d300b17dc --- /dev/null +++ b/vendor/minimal-lexical/.cargo_vcs_info.json @@ -0,0 +1,5 @@ +{ + "git": { + "sha1": "e997c46656ebe83e696b866bd954da1fa3f64eef" + } +} diff --git a/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/bug_report.md b/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000000000..1839a7fa6bdfef --- /dev/null +++ b/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,41 @@ +--- +name: Bug report +about: Create a report to help us improve. +title: "[BUG]" +labels: bug +assignees: Alexhuszagh + +--- + +## Description + +Please include a clear and concise description of the bug. If the bug includes a security vulnerability, you may also privately report the issue to the [maintainer](mailto:ahuszagh@gmail.com). + +## Prerequisites + +Here are a few things you should provide to help me understand the issue: + +- Rust version: `rustc -V` +- minimal-lexical version: + +## Test case + +Please provide a short, complete (with crate import, etc) test case for +the issue, showing clearly the expected and obtained results. + +Example test case: + +``` +#[macro_use] +extern crate minimal_Lexical; + +fn main() { + let integer = b"1"; + let fraction = b"2345"; + let float: f64 = minimal_lexical::parse_float(integer.iter(), fraction.iter(), 0); + assert_eq!(value, 1.2345); +} +``` + +## Additional Context +Add any other context about the problem here. diff --git a/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/custom.md b/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/custom.md new file mode 100644 index 00000000000000..e12e02934a854b --- /dev/null +++ b/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/custom.md @@ -0,0 +1,21 @@ +--- +name: Custom issue template +about: Issue template for miscellaneous issues. +title: "[OTHER]" +labels: '' +assignees: Alexhuszagh + +--- + +## Prerequisites + +If applicable to the issue, here are a few things you should provide to help me understand the issue: + +- Rust version: `rustc -V` +- minimal-lexical version: + +## Description +Please include a clear and concise description of the issue. + +## Additional Context +Add any other context or screenshots about the issue here. diff --git a/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/documentation.md b/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/documentation.md new file mode 100644 index 00000000000000..2d6b3e4ba1e5ba --- /dev/null +++ b/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/documentation.md @@ -0,0 +1,16 @@ +--- +name: Documentation +about: Update the project's documentation. +title: "[DOC]" +labels: documentation +assignees: Alexhuszagh + +--- + +## Description +Please include a clear and concise description of the issue. + +Ex: Documentation for `parse_float` contains a typo. + +## Additional Context +Add any other context or screenshots about the issue here. diff --git a/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/feature_request.md b/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000000000..ef7e7fe14fdef7 --- /dev/null +++ b/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,27 @@ +--- +name: Feature request +about: Suggest an idea for this project. +title: "[FEATURE]" +labels: enhancement +assignees: Alexhuszagh + +--- + +## Problem +A clear and concise description of what the problem is. Ex. minimal-lexical does not parse standard-conforming JSON numbers. + +## Solution +A clear and concise description of what you want to happen. + +## Prerequisites + +If applicable to the feature request, here are a few things you should provide to help me understand the issue: + +- Rust version: `rustc -V` +- minimal-lexical version: + +## Alternatives +A clear and concise description of any alternative solutions or features you've considered. + +## Additional Context +Add any other context or screenshots about the feature request here. diff --git a/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/question.md b/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 00000000000000..56ffb70dc2faac --- /dev/null +++ b/vendor/minimal-lexical/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,11 @@ +--- +name: Question +about: Have a question how to use minimal-lexical? +title: "[QUESTION]" +labels: question +assignees: Alexhuszagh + +--- + +## Question +A clear and concise description of what the question is. Ex. how do I use minimal-lexical without a system allocator? diff --git a/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/bug_fix.md b/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/bug_fix.md new file mode 100644 index 00000000000000..c4b2874c3a2ab3 --- /dev/null +++ b/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/bug_fix.md @@ -0,0 +1,27 @@ +--- +name: Bug fix +about: Fix a bug in minimal-lexical. +title: "[BUG]" +labels: bug +assignees: Alexhuszagh + +--- + +**NOTE:** +- If you have made non-trivial changes to the code, please make sure to run unittests prior to committing. +- If you have made any changes to parsing algorithms, please run at least `test-parse-golang` or `test-parse-unittests` with `feature = +comprehensive_float_test"` enabled prior to committing, to ensure there are no regressions. +- Please run `cargo fmt` on nightly prior to committing. + +## Optional Debugging Information + +If applicable to the issue, here are a few things you should provide to help me understand the issue: + +- Rust version: `rustc -V` +- minimal-lexical version: + +## Description +Please include a clear and concise description of the changes made. + +## Additional Context +Add any other context or screenshots about the bug fix here. diff --git a/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/custom.md b/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/custom.md new file mode 100644 index 00000000000000..b6cd5d97dac363 --- /dev/null +++ b/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/custom.md @@ -0,0 +1,22 @@ +--- +name: Custom pull request template +about: Pull request template for miscellaneous changes. +title: "[OTHER]" +labels: '' +assignees: Alexhuszagh + +--- + +**NOTE:** +- If you have made non-trivial changes to the code, please make sure to run unittests prior to committing. +- Please run `cargo fmt` on nightly prior to committing. + +## Optional Debugging Information + +If applicable to the issue, here are a few things you should provide to help me understand the issue: + +- Rust version: `rustc -V` +- minimal-lexical version: + +## Description +Please include a clear and concise description of the changes. diff --git a/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/documentation.md b/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/documentation.md new file mode 100644 index 00000000000000..233f87d1b46d34 --- /dev/null +++ b/vendor/minimal-lexical/.github/PULL_REQUEST_TEMPLATE/documentation.md @@ -0,0 +1,21 @@ +--- +name: Documentation +about: Update the project's documentation. +title: "[DOC]" +labels: documentation +assignees: Alexhuszagh + +--- + +**NOTE:** +- If you have made any changes to doc comments, please run `cargo fmt` on nightly prior to committing. + +## Description +Please include a clear and concise description of fixes made to the documentation. + +Ex: Fixed a backtick leading to improper formatting in README. +Ex: Fixed code sample for `parse_partial` in README. +Ex: Updated outdated doc comments in `parse_float`. + +## Additional Context +Add any other context or screenshots about the issue here. diff --git a/vendor/minimal-lexical/.github/workflows/Cross.yml b/vendor/minimal-lexical/.github/workflows/Cross.yml new file mode 100644 index 00000000000000..d19d10427146a6 --- /dev/null +++ b/vendor/minimal-lexical/.github/workflows/Cross.yml @@ -0,0 +1,90 @@ +name: Cross + +on: + pull_request: + branches: [main] + workflow_dispatch: + +jobs: + cross: + name: Rust ${{matrix.target}} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + target: + # Android + - aarch64-linux-android + - arm-linux-androideabi + - armv7-linux-androideabi + - i686-linux-android + - x86_64-linux-android + + # Linux + - aarch64-unknown-linux-gnu + - arm-unknown-linux-gnueabi + - armv7-unknown-linux-gnueabihf + - i686-unknown-linux-gnu + - i686-unknown-linux-musl + - mips-unknown-linux-gnu + - mips64-unknown-linux-gnuabi64 + - mips64el-unknown-linux-gnuabi64 + - mipsel-unknown-linux-gnu + - powerpc64le-unknown-linux-gnu + - x86_64-unknown-linux-gnu + - x86_64-unknown-linux-musl + + # Windows + - x86_64-pc-windows-gnu + + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: ${{matrix.target}} + override: true + - uses: actions-rs/cargo@v1 + with: + use-cross: true + command: check + args: --target ${{matrix.target}} + - uses: actions-rs/cargo@v1 + with: + use-cross: true + command: test + args: --target ${{matrix.target}} + - uses: actions-rs/cargo@v1 + with: + use-cross: true + command: test + args: --target ${{matrix.target}} --features=compact + + notest: + name: Rust ${{matrix.target}} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + target: + # Linux + - powerpc64-unknown-linux-gnu + - s390x-unknown-linux-gnu + + # FreeBSD + - i686-unknown-freebsd + - x86_64-unknown-freebsd + - x86_64-unknown-netbsd + + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: ${{matrix.target}} + override: true + - uses: actions-rs/cargo@v1 + with: + use-cross: true + command: check + args: --target ${{matrix.target}} diff --git a/vendor/minimal-lexical/.github/workflows/Features.yml b/vendor/minimal-lexical/.github/workflows/Features.yml new file mode 100644 index 00000000000000..2a940e2a69381c --- /dev/null +++ b/vendor/minimal-lexical/.github/workflows/Features.yml @@ -0,0 +1,23 @@ +name: Features + +on: + pull_request: + branches: [main] + workflow_dispatch: + +jobs: + features: + name: Test Feature Combinations + runs-on: ubuntu-latest + strategy: + fail-fast: true + steps: + - uses: actions/checkout@v2 + - name: Install latest nightly + uses: actions-rs/toolchain@v1 + with: + toolchain: nightly + override: true + components: rustfmt, clippy + - run: ci/test.sh + - run: NIGHTLY=1 NO_STD=1 ci/test.sh diff --git a/vendor/minimal-lexical/.github/workflows/OSX.yml b/vendor/minimal-lexical/.github/workflows/OSX.yml new file mode 100644 index 00000000000000..e835250eca151e --- /dev/null +++ b/vendor/minimal-lexical/.github/workflows/OSX.yml @@ -0,0 +1,40 @@ +name: OSX + +on: + pull_request: + branches: [main] + workflow_dispatch: + +jobs: + cross: + name: Rust ${{matrix.target}} + runs-on: macos-latest + strategy: + fail-fast: false + matrix: + target: + # iOS targets don't work, since rust-embedded doesn't provide images. + - x86_64-apple-darwin + + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: ${{matrix.target}} + override: true + - uses: actions-rs/cargo@v1 + with: + use-cross: true + command: check + args: --target ${{matrix.target}} + - uses: actions-rs/cargo@v1 + with: + use-cross: true + command: test + args: --target ${{matrix.target}} + - uses: actions-rs/cargo@v1 + with: + use-cross: true + command: test + args: --target ${{matrix.target}} --features=compact diff --git a/vendor/minimal-lexical/.github/workflows/Simple.yml b/vendor/minimal-lexical/.github/workflows/Simple.yml new file mode 100644 index 00000000000000..02f63af306eec1 --- /dev/null +++ b/vendor/minimal-lexical/.github/workflows/Simple.yml @@ -0,0 +1,41 @@ +name: Simple + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + test: + name: Rust ${{matrix.rust}} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + rust: [1.36.0, 1.41.0, 1.46.0, 1.51.0, stable, beta, nightly] + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{matrix.rust}} + - run: cargo check + - run: cargo test + - run: cargo test --features=compact + + check: + name: Lint code + runs-on: ubuntu-latest + strategy: + fail-fast: true + steps: + - uses: actions/checkout@v2 + - name: Install latest nightly + uses: actions-rs/toolchain@v1 + with: + toolchain: nightly + override: true + components: rustfmt, clippy + - run: ci/check.sh diff --git a/vendor/minimal-lexical/.github/workflows/Valgrind.yml b/vendor/minimal-lexical/.github/workflows/Valgrind.yml new file mode 100644 index 00000000000000..298a5ce109e495 --- /dev/null +++ b/vendor/minimal-lexical/.github/workflows/Valgrind.yml @@ -0,0 +1,24 @@ +name: Valgrind + +on: + pull_request: + branches: [main] + workflow_dispatch: + +jobs: + valgrind: + name: Valgrind Tests + runs-on: ubuntu-latest + strategy: + fail-fast: true + steps: + - uses: actions/checkout@v2 + - name: Install latest nightly + uses: actions-rs/toolchain@v1 + with: + toolchain: nightly + override: true + - run: sudo apt-get install valgrind + - run: cargo +nightly install cargo-valgrind + - run: cargo +nightly valgrind test --release + - run: cargo +nightly valgrind test --all-features --release diff --git a/vendor/minimal-lexical/.gitmodules b/vendor/minimal-lexical/.gitmodules new file mode 100644 index 00000000000000..f06dee03cf651a --- /dev/null +++ b/vendor/minimal-lexical/.gitmodules @@ -0,0 +1,4 @@ +[submodule "data/test-parse-golang/parse-number-fxx-test-data"] + path = data/test-parse-golang/parse-number-fxx-test-data + url = https://github.com/nigeltao/parse-number-fxx-test-data + shallow = true diff --git a/vendor/minimal-lexical/CHANGELOG b/vendor/minimal-lexical/CHANGELOG new file mode 100644 index 00000000000000..b1fd38ac143a7f --- /dev/null +++ b/vendor/minimal-lexical/CHANGELOG @@ -0,0 +1,38 @@ +# Changelog + +Notes significant changes to minimal-lexical. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.1.4] 2021-10-02 +### Added +- Missing license details for `src/bellerophon.rs`. + +## [0.2.0] 2021-09-10 +### Changed +- `no_alloc` feature flag was replaced with an `alloc` feature flag. + +## [0.1.3] 2021-09-04 +### Added +- Added the `compact` feature, which sacrifices performance for smaller binary sizes. +- Added the `nightly` feature, which adds inline ASM to use FPU instructions for to ensure proper rounding on x86 targets using the x87 FPU without SSE2. + +### Changed +- Removed stackvec dependent, even on `no_alloc`. +- Improved the algorithms for parsing. +- Simplified big-integer arithmetic, and the slow path algorithms. +- Reduced the binary sizes. +- Added optimizations for small floats. + +## [0.1.2] 2021-05-09 +### Added +- Remove cached_float and infer exponents rather than store them. + +## [0.1.1] 2021-05-08 +### Added +- Added the Eisel-Lemire algorithm. + +## [0.1.0] 2021-04-27 +### Added +- Initial version. diff --git a/vendor/minimal-lexical/CODE_OF_CONDUCT.md b/vendor/minimal-lexical/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000000..74fd657be0273d --- /dev/null +++ b/vendor/minimal-lexical/CODE_OF_CONDUCT.md @@ -0,0 +1,141 @@ +# Code of Conduct + +## When Something Happens + +If you see a Code of Conduct violation, follow these steps: + +1. Let the person know that what they did is not appropriate and ask them to stop and/or edit their message(s) or commits. +2. That person should immediately stop the behavior and correct the issue. +3. If this doesn’t happen, or if you're uncomfortable speaking up, [contact the maintainers](#contacting-maintainers). +4. As soon as available, a maintainer will look into the issue, and take [further action (see below)](#further-enforcement), starting with a warning, then temporary block, then long-term repo or organization ban. + +When reporting, please include any relevant details, links, screenshots, context, or other information that may be used to better understand and resolve the situation. + +**The maintainer team will prioritize the well-being and comfort of the recipients of the violation over the comfort of the violator.** See [some examples below](#enforcement-examples). + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers of this project pledge to making participation in our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, technical preferences, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + + * Using welcoming and inclusive language. + * Being respectful of differing viewpoints and experiences. + * Gracefully accepting constructive feedback. + * Focusing on what is best for the community. + * Showing empathy and kindness towards other community members. + * Encouraging and raising up your peers in the project so you can all bask in hacks and glory. + +Examples of unacceptable behavior by participants include: + + * The use of sexualized language or imagery and unwelcome sexual attention or advances, including when simulated online. The only exception to sexual topics is channels/spaces specifically for topics of sexual identity. + * Casual mention of slavery or indentured servitude and/or false comparisons of one's occupation or situation to slavery. Please consider using or asking about alternate terminology when referring to such metaphors in technology. + * Making light of/making mocking comments about trigger warnings and content warnings. + * Trolling, insulting/derogatory comments, and personal or political attacks. + * Public or private harassment, deliberate intimidation, or threats. + * Publishing others' private information, such as a physical or electronic address, without explicit permission. This includes any sort of "outing" of any aspect of someone's identity without their consent. + * Publishing private screenshots or quotes of interactions in the context of this project without all quoted users' *explicit* consent. + * Publishing of private communication that doesn't have to do with reporting harrassment. + * Any of the above even when [presented as "ironic" or "joking"](https://en.wikipedia.org/wiki/Hipster_racism). + * Any attempt to present "reverse-ism" versions of the above as violations. Examples of reverse-isms are "reverse racism", "reverse sexism", "heterophobia", and "cisphobia". + * Unsolicited explanations under the assumption that someone doesn't already know it. Ask before you teach! Don't assume what people's knowledge gaps are. + * [Feigning or exaggerating surprise](https://www.recurse.com/manual#no-feigned-surprise) when someone admits to not knowing something. + * "[Well-actuallies](https://www.recurse.com/manual#no-well-actuallys)" + * Other conduct which could reasonably be considered inappropriate in a professional or community setting. + +## Scope + +This Code of Conduct applies both within spaces involving this project and in other spaces involving community members. This includes the repository, its Pull Requests and Issue tracker, private email communications in the context of the project, and any events where members of the project are participating, as well as adjacent communities and venues affecting the project's members. + +Depending on the violation, the maintainers may decide that violations of this code of conduct that have happened outside of the scope of the community may deem an individual unwelcome, and take appropriate action to maintain the comfort and safety of its members. + +### Other Community Standards + +As a project on GitHub, this project is additionally covered by the [GitHub Community Guidelines](https://help.github.com/articles/github-community-guidelines/). + +Enforcement of those guidelines after violations overlapping with the above are the responsibility of the entities, and enforcement may happen in any or all of the services/communities. + +## Maintainer Enforcement Process + +Once the maintainers get involved, they will follow a documented series of steps and do their best to preserve the well-being of project members. This section covers actual concrete steps. + +### Contacting Maintainers + +You may get in touch with the maintainer team through any of the following methods: + + Through email: + ahuszagh@gmail.com (Alex Huszagh) + +### Further Enforcement + +If you've already followed the [initial enforcement steps](#enforcement), these are the steps maintainers will take for further enforcement, as needed: + + 1. Repeat the request to stop. + 2. If the person doubles down, they will have offending messages removed or edited by a maintainers given an official warning. The PR or Issue may be locked. + 3. If the behavior continues or is repeated later, the person will be blocked from participating for 24 hours. + 4. If the behavior continues or is repeated after the temporary block, a long-term (6-12mo) ban will be used. + +On top of this, maintainers may remove any offending messages, images, contributions, etc, as they deem necessary. + +Maintainers reserve full rights to skip any of these steps, at their discretion, if the violation is considered to be a serious and/or immediate threat to the health and well-being of members of the community. These include any threats, serious physical or verbal attacks, and other such behavior that would be completely unacceptable in any social setting that puts our members at risk. + +Members expelled from events or venues with any sort of paid attendance will not be refunded. + +### Who Watches the Watchers? + +Maintainers and other leaders who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. These may include anything from removal from the maintainer team to a permanent ban from the community. + +Additionally, as a project hosted on both GitHub and npm, [their own Codes of Conducts may be applied against maintainers of this project](#other-community-standards), externally of this project's procedures. + +### Enforcement Examples + +#### The Best Case + +The vast majority of situations work out like this. This interaction is common, and generally positive. + +> Alex: "Yeah I used X and it was really crazy!" + +> Patt (not a maintainer): "Hey, could you not use that word? What about 'ridiculous' instead?" + +> Alex: "oh sorry, sure." -> edits old comment to say "it was really confusing!" + +#### The Maintainer Case + +Sometimes, though, you need to get maintainers involved. Maintainers will do their best to resolve conflicts, but people who were harmed by something **will take priority**. + +> Patt: "Honestly, sometimes I just really hate using $library and anyone who uses it probably sucks at their job." + +> Alex: "Whoa there, could you dial it back a bit? There's a CoC thing about attacking folks' tech use like that." + +> Patt: "I'm not attacking anyone, what's your problem?" + +> Alex: "@maintainers hey uh. Can someone look at this issue? Patt is getting a bit aggro. I tried to nudge them about it, but nope." + +> KeeperOfCommitBits: (on issue) "Hey Patt, maintainer here. Could you tone it down? This sort of attack is really not okay in this space." + +> Patt: "Leave me alone I haven't said anything bad wtf is wrong with you." + +> KeeperOfCommitBits: (deletes user's comment), "@patt I mean it. Please refer to the CoC over at (URL to this CoC) if you have questions, but you can consider this an actual warning. I'd appreciate it if you reworded your messages in this thread, since they made folks there uncomfortable. Let's try and be kind, yeah?" + +> Patt: "@keeperofbits Okay sorry. I'm just frustrated and I'm kinda burnt out and I guess I got carried away. I'll DM Alex a note apologizing and edit my messages. Sorry for the trouble." + +> KeeperOfCommitBits: "@patt Thanks for that. I hear you on the stress. Burnout sucks :/. Have a good one!" + +#### The Nope Case + +> PepeTheFrog🐸: "Hi, I am a literal actual nazi and I think white supremacists are quite fashionable." + +> Patt: "NOOOOPE. OH NOPE NOPE." + +> Alex: "JFC NO. NOPE. @keeperofbits NOPE NOPE LOOK HERE" + +> KeeperOfCommitBits: "👀 Nope. NOPE NOPE NOPE. 🔥" + +> PepeTheFrog🐸 has been banned from all organization or user repositories belonging to KeeperOfCommitBits. + +## Attribution + +This Code of Conduct was generated using [WeAllJS Code of Conduct Generator](https://npm.im/weallbehave), which is based on the [WeAllJS Code of Conduct](https://wealljs.org/code-of-conduct), which is itself based on +[Contributor Covenant](http://contributor-covenant.org), version 1.4, available at [http://contributor-covenant.org/version/1/4](http://contributor-covenant.org/version/1/4), and the LGBTQ in Technology Slack [Code of Conduct](http://lgbtq.technology/coc.html). diff --git a/vendor/minimal-lexical/Cargo.toml b/vendor/minimal-lexical/Cargo.toml new file mode 100644 index 00000000000000..b9b52cbe3ff5a7 --- /dev/null +++ b/vendor/minimal-lexical/Cargo.toml @@ -0,0 +1,33 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "minimal-lexical" +version = "0.2.1" +authors = ["Alex Huszagh "] +exclude = ["assets/*", "ci/*", "docs/*", "etc/*", "fuzz/*", "examples/*", "scripts/*"] +autoexamples = false +description = "Fast float parsing conversion routines." +documentation = "https://docs.rs/minimal-lexical" +readme = "README.md" +keywords = ["parsing", "no_std"] +categories = ["parsing", "no-std"] +license = "MIT/Apache-2.0" +repository = "https://github.com/Alexhuszagh/minimal-lexical" + +[features] +alloc = [] +compact = [] +default = ["std"] +lint = [] +nightly = [] +std = [] diff --git a/vendor/minimal-lexical/LICENSE-APACHE b/vendor/minimal-lexical/LICENSE-APACHE new file mode 100644 index 00000000000000..11069edd79019f --- /dev/null +++ b/vendor/minimal-lexical/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/minimal-lexical/LICENSE-MIT b/vendor/minimal-lexical/LICENSE-MIT new file mode 100644 index 00000000000000..31aa79387f27e7 --- /dev/null +++ b/vendor/minimal-lexical/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/minimal-lexical/LICENSE.md b/vendor/minimal-lexical/LICENSE.md new file mode 100644 index 00000000000000..2442bbfbf3cc80 --- /dev/null +++ b/vendor/minimal-lexical/LICENSE.md @@ -0,0 +1,37 @@ +Minimal-lexical is dual licensed under the Apache 2.0 license as well as the MIT +license. See the LICENCE-MIT and the LICENCE-APACHE files for the licenses. + +--- + +`src/bellerophon.rs` is loosely based off the Golang implementation, +found [here](https://github.com/golang/go/blob/b10849fbb97a2244c086991b4623ae9f32c212d0/src/strconv/extfloat.go). +That code (used if the `compact` feature is enabled) is subject to a +[3-clause BSD license](https://github.com/golang/go/blob/b10849fbb97a2244c086991b4623ae9f32c212d0/LICENSE): + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/minimal-lexical/README.md b/vendor/minimal-lexical/README.md new file mode 100644 index 00000000000000..7805296510719b --- /dev/null +++ b/vendor/minimal-lexical/README.md @@ -0,0 +1,102 @@ +minimal-lexical +=============== + +This is a minimal version of [rust-lexical](https://github.com/Alexhuszagh/rust-lexical), meant to allow efficient round-trip float parsing. minimal-lexical implements a correct, fast float parser. + +Due to the small, stable nature of minimal-lexical, it is also well-adapted to private forks. If you do privately fork minimal-lexical, I recommend you contact me via [email](mailto:ahuszagh@gmail.com) or [Twitter](https://twitter.com/KardOnIce), so I can notify you of feature updates, bug fixes, or security vulnerabilities, as well as help you implement custom feature requests. I will not use your information for any other purpose, including, but not limited to disclosing your project or organization's use of minimal-lexical. + +minimal-lexical is designed for fast compile times and small binaries sizes, at the expense of a minor amount of performance. For improved performance, feel free to fork minimal-lexical with more aggressive inlining. + +**Similar Projects** + +For a high-level, all-in-one number conversion routines, see [rust-lexical](https://github.com/Alexhuszagh/rust-lexical). + +**Table Of Contents** + +- [Getting Started](#getting-started) +- [Recipes](#recipes) +- [Algorithms](#algorithms) +- [Platform Support](platform-support) +- [Minimum Version Support](minimum-version-support) +- [Changelog](#changelog) +- [License](#license) +- [Contributing](#contributing) + +# Getting Started + +First, add the following to your `Cargo.toml`. + +```toml +[dependencies] +minimal-lexical = "0.2" +``` + +Next, to parse a simple float, use the following: + +```rust +extern crate minimal_lexical; + +// Let's say we want to parse "1.2345". +// First, we need an external parser to extract the integer digits ("1"), +// the fraction digits ("2345"), and then parse the exponent to a 32-bit +// integer (0). +// Warning: +// -------- +// Please note that leading zeros must be trimmed from the integer, +// and trailing zeros must be trimmed from the fraction. This cannot +// be handled by minimal-lexical, since we accept iterators +let integer = b"1"; +let fraction = b"2345"; +let float: f64 = minimal_lexical::parse_float(integer.iter(), fraction.iter(), 0); +println!("float={:?}", float); // 1.235 +``` + +# Recipes + +You may be asking: where is the actual parser? Due to variation in float formats, and the goal of integrating utility for various data-interchange language parsers, such functionality would be beyond the scope of this library. + +For example, the following float is valid in Rust strings, but is invalid in JSON or TOML: +```json +1.e7 +``` + +Therefore, to use the library, you need functionality that extracts the significant digits to pass to `create_float`. Please see [simple-example](https://github.com/Alexhuszagh/minimal-lexical/blob/master/examples/simple.rs) for a simple, annotated example on how to use minimal-lexical as a parser. + +# Algorithms + +For an in-depth explanation on the algorithms minimal-lexical uses, please see [lexical-core#string-to-float](https://github.com/Alexhuszagh/rust-lexical/tree/master/lexical-core#string-to-float). + +# Platform Support + +minimal-lexical is tested on a wide variety of platforms, including big and small-endian systems, to ensure portable code. Supported architectures include: +- x86_64 Linux, Windows, macOS, Android, iOS, FreeBSD, and NetBSD. +- x86 Linux, macOS, Android, iOS, and FreeBSD. +- aarch64 (ARM8v8-A) Linux, Android, and iOS. +- armv7 (ARMv7-A) Linux, Android, and iOS. +- arm (ARMv6) Linux, and Android. +- mips (MIPS) Linux. +- mipsel (MIPS LE) Linux. +- mips64 (MIPS64 BE) Linux. +- mips64el (MIPS64 LE) Linux. +- powerpc (PowerPC) Linux. +- powerpc64 (PPC64) Linux. +- powerpc64le (PPC64LE) Linux. +- s390x (IBM Z) Linux. + +minimal-lexical should also work on a wide variety of other architectures and ISAs. If you have any issue compiling minimal-lexical on any architecture, please file a bug report. + +# Minimum Version Support + +Minimal-lexical is tested to support Rustc 1.36+, including stable, beta, and nightly. Please report any errors compiling a supported lexical version on a compatible Rustc version. Please note we may increment the MSRV for compiler versions older than 18 months, to support at least the current Debian stable version, without breaking changes. + +# Changelog + +All changes are documented in [CHANGELOG](CHANGELOG). + +# License + +Minimal-lexical is dual licensed under the Apache 2.0 license as well as the MIT license. See the [LICENSE.md](LICENSE.md) file for full license details. + +# Contributing + +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in minimal-lexical by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. diff --git a/vendor/minimal-lexical/clippy.toml b/vendor/minimal-lexical/clippy.toml new file mode 100644 index 00000000000000..cda8d17eed44c7 --- /dev/null +++ b/vendor/minimal-lexical/clippy.toml @@ -0,0 +1 @@ +avoid-breaking-exported-api = false diff --git a/vendor/minimal-lexical/rustfmt.toml b/vendor/minimal-lexical/rustfmt.toml new file mode 100644 index 00000000000000..2361c9d479b295 --- /dev/null +++ b/vendor/minimal-lexical/rustfmt.toml @@ -0,0 +1,16 @@ +# Requires nightly to do proper formatting. +use_small_heuristics = "Off" +use_field_init_shorthand = true +trailing_semicolon = true +newline_style = "Unix" +match_block_trailing_comma = true +empty_item_single_line = false +enum_discrim_align_threshold = 40 +fn_args_layout = "Tall" +fn_single_line = false +format_macro_matchers = true +format_macro_bodies = true +imports_indent = "Block" +imports_layout = "HorizontalVertical" +indent_style = "Block" +match_arm_blocks = true diff --git a/vendor/minimal-lexical/src/bellerophon.rs b/vendor/minimal-lexical/src/bellerophon.rs new file mode 100644 index 00000000000000..86a2023d09e295 --- /dev/null +++ b/vendor/minimal-lexical/src/bellerophon.rs @@ -0,0 +1,391 @@ +//! An implementation of Clinger's Bellerophon algorithm. +//! +//! This is a moderate path algorithm that uses an extended-precision +//! float, represented in 80 bits, by calculating the bits of slop +//! and determining if those bits could prevent unambiguous rounding. +//! +//! This algorithm requires less static storage than the Lemire algorithm, +//! and has decent performance, and is therefore used when non-decimal, +//! non-power-of-two strings need to be parsed. Clinger's algorithm +//! is described in depth in "How to Read Floating Point Numbers Accurately.", +//! available online [here](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.45.4152&rep=rep1&type=pdf). +//! +//! This implementation is loosely based off the Golang implementation, +//! found [here](https://github.com/golang/go/blob/b10849fbb97a2244c086991b4623ae9f32c212d0/src/strconv/extfloat.go). +//! This code is therefore subject to a 3-clause BSD license. + +#![cfg(feature = "compact")] +#![doc(hidden)] + +use crate::extended_float::ExtendedFloat; +use crate::mask::{lower_n_halfway, lower_n_mask}; +use crate::num::Float; +use crate::number::Number; +use crate::rounding::{round, round_nearest_tie_even}; +use crate::table::BASE10_POWERS; + +// ALGORITHM +// --------- + +/// Core implementation of the Bellerophon algorithm. +/// +/// Create an extended-precision float, scale it to the proper radix power, +/// calculate the bits of slop, and return the representation. The value +/// will always be guaranteed to be within 1 bit, rounded-down, of the real +/// value. If a negative exponent is returned, this represents we were +/// unable to unambiguously round the significant digits. +/// +/// This has been modified to return a biased, rather than unbiased exponent. +pub fn bellerophon(num: &Number) -> ExtendedFloat { + let fp_zero = ExtendedFloat { + mant: 0, + exp: 0, + }; + let fp_inf = ExtendedFloat { + mant: 0, + exp: F::INFINITE_POWER, + }; + + // Early short-circuit, in case of literal 0 or infinity. + // This allows us to avoid narrow casts causing numeric overflow, + // and is a quick check for any radix. + if num.mantissa == 0 || num.exponent <= -0x1000 { + return fp_zero; + } else if num.exponent >= 0x1000 { + return fp_inf; + } + + // Calculate our indexes for our extended-precision multiplication. + // This narrowing cast is safe, since exponent must be in a valid range. + let exponent = num.exponent as i32 + BASE10_POWERS.bias; + let small_index = exponent % BASE10_POWERS.step; + let large_index = exponent / BASE10_POWERS.step; + + if exponent < 0 { + // Guaranteed underflow (assign 0). + return fp_zero; + } + if large_index as usize >= BASE10_POWERS.large.len() { + // Overflow (assign infinity) + return fp_inf; + } + + // Within the valid exponent range, multiply by the large and small + // exponents and return the resulting value. + + // Track errors to as a factor of unit in last-precision. + let mut errors: u32 = 0; + if num.many_digits { + errors += error_halfscale(); + } + + // Multiply by the small power. + // Check if we can directly multiply by an integer, if not, + // use extended-precision multiplication. + let mut fp = ExtendedFloat { + mant: num.mantissa, + exp: 0, + }; + match fp.mant.overflowing_mul(BASE10_POWERS.get_small_int(small_index as usize)) { + // Overflow, multiplication unsuccessful, go slow path. + (_, true) => { + normalize(&mut fp); + fp = mul(&fp, &BASE10_POWERS.get_small(small_index as usize)); + errors += error_halfscale(); + }, + // No overflow, multiplication successful. + (mant, false) => { + fp.mant = mant; + normalize(&mut fp); + }, + } + + // Multiply by the large power. + fp = mul(&fp, &BASE10_POWERS.get_large(large_index as usize)); + if errors > 0 { + errors += 1; + } + errors += error_halfscale(); + + // Normalize the floating point (and the errors). + let shift = normalize(&mut fp); + errors <<= shift; + fp.exp += F::EXPONENT_BIAS; + + // Check for literal overflow, even with halfway cases. + if -fp.exp + 1 > 65 { + return fp_zero; + } + + // Too many errors accumulated, return an error. + if !error_is_accurate::(errors, &fp) { + // Bias the exponent so we know it's invalid. + fp.exp += F::INVALID_FP; + return fp; + } + + // Check if we have a literal 0 or overflow here. + // If we have an exponent of -63, we can still have a valid shift, + // giving a case where we have too many errors and need to round-up. + if -fp.exp + 1 == 65 { + // Have more than 64 bits below the minimum exponent, must be 0. + return fp_zero; + } + + round::(&mut fp, |f, s| { + round_nearest_tie_even(f, s, |is_odd, is_halfway, is_above| { + is_above || (is_odd && is_halfway) + }); + }); + fp +} + +// ERRORS +// ------ + +// Calculate if the errors in calculating the extended-precision float. +// +// Specifically, we want to know if we are close to a halfway representation, +// or halfway between `b` and `b+1`, or `b+h`. The halfway representation +// has the form: +// SEEEEEEEHMMMMMMMMMMMMMMMMMMMMMMM100... +// where: +// S = Sign Bit +// E = Exponent Bits +// H = Hidden Bit +// M = Mantissa Bits +// +// The halfway representation has a bit set 1-after the mantissa digits, +// and no bits set immediately afterward, making it impossible to +// round between `b` and `b+1` with this representation. + +/// Get the full error scale. +#[inline(always)] +const fn error_scale() -> u32 { + 8 +} + +/// Get the half error scale. +#[inline(always)] +const fn error_halfscale() -> u32 { + error_scale() / 2 +} + +/// Determine if the number of errors is tolerable for float precision. +fn error_is_accurate(errors: u32, fp: &ExtendedFloat) -> bool { + // Check we can't have a literal 0 denormal float. + debug_assert!(fp.exp >= -64); + + // Determine if extended-precision float is a good approximation. + // If the error has affected too many units, the float will be + // inaccurate, or if the representation is too close to halfway + // that any operations could affect this halfway representation. + // See the documentation for dtoa for more information. + + // This is always a valid u32, since `fp.exp >= -64` + // will always be positive and the significand size is {23, 52}. + let mantissa_shift = 64 - F::MANTISSA_SIZE - 1; + + // The unbiased exponent checks is `unbiased_exp <= F::MANTISSA_SIZE + // - F::EXPONENT_BIAS -64 + 1`, or `biased_exp <= F::MANTISSA_SIZE - 63`, + // or `biased_exp <= mantissa_shift`. + let extrabits = match fp.exp <= -mantissa_shift { + // Denormal, since shifting to the hidden bit still has a negative exponent. + // The unbiased check calculation for bits is `1 - F::EXPONENT_BIAS - unbiased_exp`, + // or `1 - biased_exp`. + true => 1 - fp.exp, + false => 64 - F::MANTISSA_SIZE - 1, + }; + + // Our logic is as follows: we want to determine if the actual + // mantissa and the errors during calculation differ significantly + // from the rounding point. The rounding point for round-nearest + // is the halfway point, IE, this when the truncated bits start + // with b1000..., while the rounding point for the round-toward + // is when the truncated bits are equal to 0. + // To do so, we can check whether the rounding point +/- the error + // are >/< the actual lower n bits. + // + // For whether we need to use signed or unsigned types for this + // analysis, see this example, using u8 rather than u64 to simplify + // things. + // + // # Comparisons + // cmp1 = (halfway - errors) < extra + // cmp1 = extra < (halfway + errors) + // + // # Large Extrabits, Low Errors + // + // extrabits = 8 + // halfway = 0b10000000 + // extra = 0b10000010 + // errors = 0b00000100 + // halfway - errors = 0b01111100 + // halfway + errors = 0b10000100 + // + // Unsigned: + // halfway - errors = 124 + // halfway + errors = 132 + // extra = 130 + // cmp1 = true + // cmp2 = true + // Signed: + // halfway - errors = 124 + // halfway + errors = -124 + // extra = -126 + // cmp1 = false + // cmp2 = true + // + // # Conclusion + // + // Since errors will always be small, and since we want to detect + // if the representation is accurate, we need to use an **unsigned** + // type for comparisons. + let maskbits = extrabits as u64; + let errors = errors as u64; + + // Round-to-nearest, need to use the halfway point. + if extrabits > 64 { + // Underflow, we have a shift larger than the mantissa. + // Representation is valid **only** if the value is close enough + // overflow to the next bit within errors. If it overflows, + // the representation is **not** valid. + !fp.mant.overflowing_add(errors).1 + } else { + let mask = lower_n_mask(maskbits); + let extra = fp.mant & mask; + + // Round-to-nearest, need to check if we're close to halfway. + // IE, b10100 | 100000, where `|` signifies the truncation point. + let halfway = lower_n_halfway(maskbits); + let cmp1 = halfway.wrapping_sub(errors) < extra; + let cmp2 = extra < halfway.wrapping_add(errors); + + // If both comparisons are true, we have significant rounding error, + // and the value cannot be exactly represented. Otherwise, the + // representation is valid. + !(cmp1 && cmp2) + } +} + +// MATH +// ---- + +/// Normalize float-point number. +/// +/// Shift the mantissa so the number of leading zeros is 0, or the value +/// itself is 0. +/// +/// Get the number of bytes shifted. +pub fn normalize(fp: &mut ExtendedFloat) -> i32 { + // Note: + // Using the ctlz intrinsic via leading_zeros is way faster (~10x) + // than shifting 1-bit at a time, via while loop, and also way + // faster (~2x) than an unrolled loop that checks at 32, 16, 4, + // 2, and 1 bit. + // + // Using a modulus of pow2 (which will get optimized to a bitwise + // and with 0x3F or faster) is slightly slower than an if/then, + // however, removing the if/then will likely optimize more branched + // code as it removes conditional logic. + + // Calculate the number of leading zeros, and then zero-out + // any overflowing bits, to avoid shl overflow when self.mant == 0. + if fp.mant != 0 { + let shift = fp.mant.leading_zeros() as i32; + fp.mant <<= shift; + fp.exp -= shift; + shift + } else { + 0 + } +} + +/// Multiply two normalized extended-precision floats, as if by `a*b`. +/// +/// The precision is maximal when the numbers are normalized, however, +/// decent precision will occur as long as both values have high bits +/// set. The result is not normalized. +/// +/// Algorithm: +/// 1. Non-signed multiplication of mantissas (requires 2x as many bits as input). +/// 2. Normalization of the result (not done here). +/// 3. Addition of exponents. +pub fn mul(x: &ExtendedFloat, y: &ExtendedFloat) -> ExtendedFloat { + // Logic check, values must be decently normalized prior to multiplication. + debug_assert!(x.mant >> 32 != 0); + debug_assert!(y.mant >> 32 != 0); + + // Extract high-and-low masks. + // Mask is u32::MAX for older Rustc versions. + const LOMASK: u64 = 0xffff_ffff; + let x1 = x.mant >> 32; + let x0 = x.mant & LOMASK; + let y1 = y.mant >> 32; + let y0 = y.mant & LOMASK; + + // Get our products + let x1_y0 = x1 * y0; + let x0_y1 = x0 * y1; + let x0_y0 = x0 * y0; + let x1_y1 = x1 * y1; + + let mut tmp = (x1_y0 & LOMASK) + (x0_y1 & LOMASK) + (x0_y0 >> 32); + // round up + tmp += 1 << (32 - 1); + + ExtendedFloat { + mant: x1_y1 + (x1_y0 >> 32) + (x0_y1 >> 32) + (tmp >> 32), + exp: x.exp + y.exp + 64, + } +} + +// POWERS +// ------ + +/// Precalculated powers of base N for the Bellerophon algorithm. +pub struct BellerophonPowers { + // Pre-calculated small powers. + pub small: &'static [u64], + // Pre-calculated large powers. + pub large: &'static [u64], + /// Pre-calculated small powers as 64-bit integers + pub small_int: &'static [u64], + // Step between large powers and number of small powers. + pub step: i32, + // Exponent bias for the large powers. + pub bias: i32, + /// ceil(log2(radix)) scaled as a multiplier. + pub log2: i64, + /// Bitshift for the log2 multiplier. + pub log2_shift: i32, +} + +/// Allow indexing of values without bounds checking +impl BellerophonPowers { + #[inline] + pub fn get_small(&self, index: usize) -> ExtendedFloat { + let mant = self.small[index]; + let exp = (1 - 64) + ((self.log2 * index as i64) >> self.log2_shift); + ExtendedFloat { + mant, + exp: exp as i32, + } + } + + #[inline] + pub fn get_large(&self, index: usize) -> ExtendedFloat { + let mant = self.large[index]; + let biased_e = index as i64 * self.step as i64 - self.bias as i64; + let exp = (1 - 64) + ((self.log2 * biased_e) >> self.log2_shift); + ExtendedFloat { + mant, + exp: exp as i32, + } + } + + #[inline] + pub fn get_small_int(&self, index: usize) -> u64 { + self.small_int[index] + } +} diff --git a/vendor/minimal-lexical/src/bigint.rs b/vendor/minimal-lexical/src/bigint.rs new file mode 100644 index 00000000000000..d1d5027a191ee3 --- /dev/null +++ b/vendor/minimal-lexical/src/bigint.rs @@ -0,0 +1,788 @@ +//! A simple big-integer type for slow path algorithms. +//! +//! This includes minimal stackvector for use in big-integer arithmetic. + +#![doc(hidden)] + +#[cfg(feature = "alloc")] +use crate::heapvec::HeapVec; +use crate::num::Float; +#[cfg(not(feature = "alloc"))] +use crate::stackvec::StackVec; +#[cfg(not(feature = "compact"))] +use crate::table::{LARGE_POW5, LARGE_POW5_STEP}; +use core::{cmp, ops, ptr}; + +/// Number of bits in a Bigint. +/// +/// This needs to be at least the number of bits required to store +/// a Bigint, which is `log2(radix**digits)`. +/// ≅ 3600 for base-10, rounded-up. +pub const BIGINT_BITS: usize = 4000; + +/// The number of limbs for the bigint. +pub const BIGINT_LIMBS: usize = BIGINT_BITS / LIMB_BITS; + +#[cfg(feature = "alloc")] +pub type VecType = HeapVec; + +#[cfg(not(feature = "alloc"))] +pub type VecType = StackVec; + +/// Storage for a big integer type. +/// +/// This is used for algorithms when we have a finite number of digits. +/// Specifically, it stores all the significant digits scaled to the +/// proper exponent, as an integral type, and then directly compares +/// these digits. +/// +/// This requires us to store the number of significant bits, plus the +/// number of exponent bits (required) since we scale everything +/// to the same exponent. +#[derive(Clone, PartialEq, Eq)] +pub struct Bigint { + /// Significant digits for the float, stored in a big integer in LE order. + /// + /// This is pretty much the same number of digits for any radix, since the + /// significant digits balances out the zeros from the exponent: + /// 1. Decimal is 1091 digits, 767 mantissa digits + 324 exponent zeros. + /// 2. Base 6 is 1097 digits, or 680 mantissa digits + 417 exponent zeros. + /// 3. Base 36 is 1086 digits, or 877 mantissa digits + 209 exponent zeros. + /// + /// However, the number of bytes required is larger for large radixes: + /// for decimal, we need `log2(10**1091) ≅ 3600`, while for base 36 + /// we need `log2(36**1086) ≅ 5600`. Since we use uninitialized data, + /// we avoid a major performance hit from the large buffer size. + pub data: VecType, +} + +#[allow(clippy::new_without_default)] +impl Bigint { + /// Construct a bigint representing 0. + #[inline(always)] + pub fn new() -> Self { + Self { + data: VecType::new(), + } + } + + /// Construct a bigint from an integer. + #[inline(always)] + pub fn from_u64(value: u64) -> Self { + Self { + data: VecType::from_u64(value), + } + } + + #[inline(always)] + pub fn hi64(&self) -> (u64, bool) { + self.data.hi64() + } + + /// Multiply and assign as if by exponentiation by a power. + #[inline] + pub fn pow(&mut self, base: u32, exp: u32) -> Option<()> { + debug_assert!(base == 2 || base == 5 || base == 10); + if base % 5 == 0 { + pow(&mut self.data, exp)?; + } + if base % 2 == 0 { + shl(&mut self.data, exp as usize)?; + } + Some(()) + } + + /// Calculate the bit-length of the big-integer. + #[inline] + pub fn bit_length(&self) -> u32 { + bit_length(&self.data) + } +} + +impl ops::MulAssign<&Bigint> for Bigint { + fn mul_assign(&mut self, rhs: &Bigint) { + self.data *= &rhs.data; + } +} + +/// REVERSE VIEW + +/// Reverse, immutable view of a sequence. +pub struct ReverseView<'a, T: 'a> { + inner: &'a [T], +} + +impl<'a, T> ops::Index for ReverseView<'a, T> { + type Output = T; + + #[inline] + fn index(&self, index: usize) -> &T { + let len = self.inner.len(); + &(*self.inner)[len - index - 1] + } +} + +/// Create a reverse view of the vector for indexing. +#[inline] +pub fn rview(x: &[Limb]) -> ReverseView { + ReverseView { + inner: x, + } +} + +// COMPARE +// ------- + +/// Compare `x` to `y`, in little-endian order. +#[inline] +pub fn compare(x: &[Limb], y: &[Limb]) -> cmp::Ordering { + match x.len().cmp(&y.len()) { + cmp::Ordering::Equal => { + let iter = x.iter().rev().zip(y.iter().rev()); + for (&xi, yi) in iter { + match xi.cmp(yi) { + cmp::Ordering::Equal => (), + ord => return ord, + } + } + // Equal case. + cmp::Ordering::Equal + }, + ord => ord, + } +} + +// NORMALIZE +// --------- + +/// Normalize the integer, so any leading zero values are removed. +#[inline] +pub fn normalize(x: &mut VecType) { + // We don't care if this wraps: the index is bounds-checked. + while let Some(&value) = x.get(x.len().wrapping_sub(1)) { + if value == 0 { + unsafe { x.set_len(x.len() - 1) }; + } else { + break; + } + } +} + +/// Get if the big integer is normalized. +#[inline] +#[allow(clippy::match_like_matches_macro)] +pub fn is_normalized(x: &[Limb]) -> bool { + // We don't care if this wraps: the index is bounds-checked. + match x.get(x.len().wrapping_sub(1)) { + Some(&0) => false, + _ => true, + } +} + +// FROM +// ---- + +/// Create StackVec from u64 value. +#[inline(always)] +#[allow(clippy::branches_sharing_code)] +pub fn from_u64(x: u64) -> VecType { + let mut vec = VecType::new(); + debug_assert!(vec.capacity() >= 2); + if LIMB_BITS == 32 { + vec.try_push(x as Limb).unwrap(); + vec.try_push((x >> 32) as Limb).unwrap(); + } else { + vec.try_push(x as Limb).unwrap(); + } + vec.normalize(); + vec +} + +// HI +// -- + +/// Check if any of the remaining bits are non-zero. +/// +/// # Safety +/// +/// Safe as long as `rindex <= x.len()`. +#[inline] +pub fn nonzero(x: &[Limb], rindex: usize) -> bool { + debug_assert!(rindex <= x.len()); + + let len = x.len(); + let slc = &x[..len - rindex]; + slc.iter().rev().any(|&x| x != 0) +} + +// These return the high X bits and if the bits were truncated. + +/// Shift 32-bit integer to high 64-bits. +#[inline] +pub fn u32_to_hi64_1(r0: u32) -> (u64, bool) { + u64_to_hi64_1(r0 as u64) +} + +/// Shift 2 32-bit integers to high 64-bits. +#[inline] +pub fn u32_to_hi64_2(r0: u32, r1: u32) -> (u64, bool) { + let r0 = (r0 as u64) << 32; + let r1 = r1 as u64; + u64_to_hi64_1(r0 | r1) +} + +/// Shift 3 32-bit integers to high 64-bits. +#[inline] +pub fn u32_to_hi64_3(r0: u32, r1: u32, r2: u32) -> (u64, bool) { + let r0 = r0 as u64; + let r1 = (r1 as u64) << 32; + let r2 = r2 as u64; + u64_to_hi64_2(r0, r1 | r2) +} + +/// Shift 64-bit integer to high 64-bits. +#[inline] +pub fn u64_to_hi64_1(r0: u64) -> (u64, bool) { + let ls = r0.leading_zeros(); + (r0 << ls, false) +} + +/// Shift 2 64-bit integers to high 64-bits. +#[inline] +pub fn u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool) { + let ls = r0.leading_zeros(); + let rs = 64 - ls; + let v = match ls { + 0 => r0, + _ => (r0 << ls) | (r1 >> rs), + }; + let n = r1 << ls != 0; + (v, n) +} + +/// Extract the hi bits from the buffer. +macro_rules! hi { + // # Safety + // + // Safe as long as the `stackvec.len() >= 1`. + (@1 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{ + $fn($rview[0] as $t) + }}; + + // # Safety + // + // Safe as long as the `stackvec.len() >= 2`. + (@2 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{ + let r0 = $rview[0] as $t; + let r1 = $rview[1] as $t; + $fn(r0, r1) + }}; + + // # Safety + // + // Safe as long as the `stackvec.len() >= 2`. + (@nonzero2 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{ + let (v, n) = hi!(@2 $self, $rview, $t, $fn); + (v, n || nonzero($self, 2 )) + }}; + + // # Safety + // + // Safe as long as the `stackvec.len() >= 3`. + (@3 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{ + let r0 = $rview[0] as $t; + let r1 = $rview[1] as $t; + let r2 = $rview[2] as $t; + $fn(r0, r1, r2) + }}; + + // # Safety + // + // Safe as long as the `stackvec.len() >= 3`. + (@nonzero3 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{ + let (v, n) = hi!(@3 $self, $rview, $t, $fn); + (v, n || nonzero($self, 3)) + }}; +} + +/// Get the high 64 bits from the vector. +#[inline(always)] +pub fn hi64(x: &[Limb]) -> (u64, bool) { + let rslc = rview(x); + // SAFETY: the buffer must be at least length bytes long. + match x.len() { + 0 => (0, false), + 1 if LIMB_BITS == 32 => hi!(@1 x, rslc, u32, u32_to_hi64_1), + 1 => hi!(@1 x, rslc, u64, u64_to_hi64_1), + 2 if LIMB_BITS == 32 => hi!(@2 x, rslc, u32, u32_to_hi64_2), + 2 => hi!(@2 x, rslc, u64, u64_to_hi64_2), + _ if LIMB_BITS == 32 => hi!(@nonzero3 x, rslc, u32, u32_to_hi64_3), + _ => hi!(@nonzero2 x, rslc, u64, u64_to_hi64_2), + } +} + +// POWERS +// ------ + +/// MulAssign by a power of 5. +/// +/// Theoretically... +/// +/// Use an exponentiation by squaring method, since it reduces the time +/// complexity of the multiplication to ~`O(log(n))` for the squaring, +/// and `O(n*m)` for the result. Since `m` is typically a lower-order +/// factor, this significantly reduces the number of multiplications +/// we need to do. Iteratively multiplying by small powers follows +/// the nth triangular number series, which scales as `O(p^2)`, but +/// where `p` is `n+m`. In short, it scales very poorly. +/// +/// Practically.... +/// +/// Exponentiation by Squaring: +/// running 2 tests +/// test bigcomp_f32_lexical ... bench: 1,018 ns/iter (+/- 78) +/// test bigcomp_f64_lexical ... bench: 3,639 ns/iter (+/- 1,007) +/// +/// Exponentiation by Iterative Small Powers: +/// running 2 tests +/// test bigcomp_f32_lexical ... bench: 518 ns/iter (+/- 31) +/// test bigcomp_f64_lexical ... bench: 583 ns/iter (+/- 47) +/// +/// Exponentiation by Iterative Large Powers (of 2): +/// running 2 tests +/// test bigcomp_f32_lexical ... bench: 671 ns/iter (+/- 31) +/// test bigcomp_f64_lexical ... bench: 1,394 ns/iter (+/- 47) +/// +/// The following benchmarks were run on `1 * 5^300`, using native `pow`, +/// a version with only small powers, and one with pre-computed powers +/// of `5^(3 * max_exp)`, rather than `5^(5 * max_exp)`. +/// +/// However, using large powers is crucial for good performance for higher +/// powers. +/// pow/default time: [426.20 ns 427.96 ns 429.89 ns] +/// pow/small time: [2.9270 us 2.9411 us 2.9565 us] +/// pow/large:3 time: [838.51 ns 842.21 ns 846.27 ns] +/// +/// Even using worst-case scenarios, exponentiation by squaring is +/// significantly slower for our workloads. Just multiply by small powers, +/// in simple cases, and use precalculated large powers in other cases. +/// +/// Furthermore, using sufficiently big large powers is also crucial for +/// performance. This is a tradeoff of binary size and performance, and +/// using a single value at ~`5^(5 * max_exp)` seems optimal. +pub fn pow(x: &mut VecType, mut exp: u32) -> Option<()> { + // Minimize the number of iterations for large exponents: just + // do a few steps with a large powers. + #[cfg(not(feature = "compact"))] + { + while exp >= LARGE_POW5_STEP { + large_mul(x, &LARGE_POW5)?; + exp -= LARGE_POW5_STEP; + } + } + + // Now use our pre-computed small powers iteratively. + // This is calculated as `⌊log(2^BITS - 1, 5)⌋`. + let small_step = if LIMB_BITS == 32 { + 13 + } else { + 27 + }; + let max_native = (5 as Limb).pow(small_step); + while exp >= small_step { + small_mul(x, max_native)?; + exp -= small_step; + } + if exp != 0 { + // SAFETY: safe, since `exp < small_step`. + let small_power = unsafe { f64::int_pow_fast_path(exp as usize, 5) }; + small_mul(x, small_power as Limb)?; + } + Some(()) +} + +// SCALAR +// ------ + +/// Add two small integers and return the resulting value and if overflow happens. +#[inline(always)] +pub fn scalar_add(x: Limb, y: Limb) -> (Limb, bool) { + x.overflowing_add(y) +} + +/// Multiply two small integers (with carry) (and return the overflow contribution). +/// +/// Returns the (low, high) components. +#[inline(always)] +pub fn scalar_mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb) { + // Cannot overflow, as long as wide is 2x as wide. This is because + // the following is always true: + // `Wide::MAX - (Narrow::MAX * Narrow::MAX) >= Narrow::MAX` + let z: Wide = (x as Wide) * (y as Wide) + (carry as Wide); + (z as Limb, (z >> LIMB_BITS) as Limb) +} + +// SMALL +// ----- + +/// Add small integer to bigint starting from offset. +#[inline] +pub fn small_add_from(x: &mut VecType, y: Limb, start: usize) -> Option<()> { + let mut index = start; + let mut carry = y; + while carry != 0 && index < x.len() { + let result = scalar_add(x[index], carry); + x[index] = result.0; + carry = result.1 as Limb; + index += 1; + } + // If we carried past all the elements, add to the end of the buffer. + if carry != 0 { + x.try_push(carry)?; + } + Some(()) +} + +/// Add small integer to bigint. +#[inline(always)] +pub fn small_add(x: &mut VecType, y: Limb) -> Option<()> { + small_add_from(x, y, 0) +} + +/// Multiply bigint by small integer. +#[inline] +pub fn small_mul(x: &mut VecType, y: Limb) -> Option<()> { + let mut carry = 0; + for xi in x.iter_mut() { + let result = scalar_mul(*xi, y, carry); + *xi = result.0; + carry = result.1; + } + // If we carried past all the elements, add to the end of the buffer. + if carry != 0 { + x.try_push(carry)?; + } + Some(()) +} + +// LARGE +// ----- + +/// Add bigint to bigint starting from offset. +pub fn large_add_from(x: &mut VecType, y: &[Limb], start: usize) -> Option<()> { + // The effective x buffer is from `xstart..x.len()`, so we need to treat + // that as the current range. If the effective y buffer is longer, need + // to resize to that, + the start index. + if y.len() > x.len().saturating_sub(start) { + // Ensure we panic if we can't extend the buffer. + // This avoids any unsafe behavior afterwards. + x.try_resize(y.len() + start, 0)?; + } + + // Iteratively add elements from y to x. + let mut carry = false; + for (index, &yi) in y.iter().enumerate() { + // We panicked in `try_resize` if this wasn't true. + let xi = x.get_mut(start + index).unwrap(); + + // Only one op of the two ops can overflow, since we added at max + // Limb::max_value() + Limb::max_value(). Add the previous carry, + // and store the current carry for the next. + let result = scalar_add(*xi, yi); + *xi = result.0; + let mut tmp = result.1; + if carry { + let result = scalar_add(*xi, 1); + *xi = result.0; + tmp |= result.1; + } + carry = tmp; + } + + // Handle overflow. + if carry { + small_add_from(x, 1, y.len() + start)?; + } + Some(()) +} + +/// Add bigint to bigint. +#[inline(always)] +pub fn large_add(x: &mut VecType, y: &[Limb]) -> Option<()> { + large_add_from(x, y, 0) +} + +/// Grade-school multiplication algorithm. +/// +/// Slow, naive algorithm, using limb-bit bases and just shifting left for +/// each iteration. This could be optimized with numerous other algorithms, +/// but it's extremely simple, and works in O(n*m) time, which is fine +/// by me. Each iteration, of which there are `m` iterations, requires +/// `n` multiplications, and `n` additions, or grade-school multiplication. +/// +/// Don't use Karatsuba multiplication, since out implementation seems to +/// be slower asymptotically, which is likely just due to the small sizes +/// we deal with here. For example, running on the following data: +/// +/// ```text +/// const SMALL_X: &[u32] = &[ +/// 766857581, 3588187092, 1583923090, 2204542082, 1564708913, 2695310100, 3676050286, +/// 1022770393, 468044626, 446028186 +/// ]; +/// const SMALL_Y: &[u32] = &[ +/// 3945492125, 3250752032, 1282554898, 1708742809, 1131807209, 3171663979, 1353276095, +/// 1678845844, 2373924447, 3640713171 +/// ]; +/// const LARGE_X: &[u32] = &[ +/// 3647536243, 2836434412, 2154401029, 1297917894, 137240595, 790694805, 2260404854, +/// 3872698172, 690585094, 99641546, 3510774932, 1672049983, 2313458559, 2017623719, +/// 638180197, 1140936565, 1787190494, 1797420655, 14113450, 2350476485, 3052941684, +/// 1993594787, 2901001571, 4156930025, 1248016552, 848099908, 2660577483, 4030871206, +/// 692169593, 2835966319, 1781364505, 4266390061, 1813581655, 4210899844, 2137005290, +/// 2346701569, 3715571980, 3386325356, 1251725092, 2267270902, 474686922, 2712200426, +/// 197581715, 3087636290, 1379224439, 1258285015, 3230794403, 2759309199, 1494932094, +/// 326310242 +/// ]; +/// const LARGE_Y: &[u32] = &[ +/// 1574249566, 868970575, 76716509, 3198027972, 1541766986, 1095120699, 3891610505, +/// 2322545818, 1677345138, 865101357, 2650232883, 2831881215, 3985005565, 2294283760, +/// 3468161605, 393539559, 3665153349, 1494067812, 106699483, 2596454134, 797235106, +/// 705031740, 1209732933, 2732145769, 4122429072, 141002534, 790195010, 4014829800, +/// 1303930792, 3649568494, 308065964, 1233648836, 2807326116, 79326486, 1262500691, +/// 621809229, 2258109428, 3819258501, 171115668, 1139491184, 2979680603, 1333372297, +/// 1657496603, 2790845317, 4090236532, 4220374789, 601876604, 1828177209, 2372228171, +/// 2247372529 +/// ]; +/// ``` +/// +/// We get the following results: + +/// ```text +/// mul/small:long time: [220.23 ns 221.47 ns 222.81 ns] +/// Found 4 outliers among 100 measurements (4.00%) +/// 2 (2.00%) high mild +/// 2 (2.00%) high severe +/// mul/small:karatsuba time: [233.88 ns 234.63 ns 235.44 ns] +/// Found 11 outliers among 100 measurements (11.00%) +/// 8 (8.00%) high mild +/// 3 (3.00%) high severe +/// mul/large:long time: [1.9365 us 1.9455 us 1.9558 us] +/// Found 12 outliers among 100 measurements (12.00%) +/// 7 (7.00%) high mild +/// 5 (5.00%) high severe +/// mul/large:karatsuba time: [4.4250 us 4.4515 us 4.4812 us] +/// ``` +/// +/// In short, Karatsuba multiplication is never worthwhile for out use-case. +pub fn long_mul(x: &[Limb], y: &[Limb]) -> Option { + // Using the immutable value, multiply by all the scalars in y, using + // the algorithm defined above. Use a single buffer to avoid + // frequent reallocations. Handle the first case to avoid a redundant + // addition, since we know y.len() >= 1. + let mut z = VecType::try_from(x)?; + if !y.is_empty() { + let y0 = y[0]; + small_mul(&mut z, y0)?; + + for (index, &yi) in y.iter().enumerate().skip(1) { + if yi != 0 { + let mut zi = VecType::try_from(x)?; + small_mul(&mut zi, yi)?; + large_add_from(&mut z, &zi, index)?; + } + } + } + + z.normalize(); + Some(z) +} + +/// Multiply bigint by bigint using grade-school multiplication algorithm. +#[inline(always)] +pub fn large_mul(x: &mut VecType, y: &[Limb]) -> Option<()> { + // Karatsuba multiplication never makes sense, so just use grade school + // multiplication. + if y.len() == 1 { + // SAFETY: safe since `y.len() == 1`. + small_mul(x, y[0])?; + } else { + *x = long_mul(y, x)?; + } + Some(()) +} + +// SHIFT +// ----- + +/// Shift-left `n` bits inside a buffer. +#[inline] +pub fn shl_bits(x: &mut VecType, n: usize) -> Option<()> { + debug_assert!(n != 0); + + // Internally, for each item, we shift left by n, and add the previous + // right shifted limb-bits. + // For example, we transform (for u8) shifted left 2, to: + // b10100100 b01000010 + // b10 b10010001 b00001000 + debug_assert!(n < LIMB_BITS); + let rshift = LIMB_BITS - n; + let lshift = n; + let mut prev: Limb = 0; + for xi in x.iter_mut() { + let tmp = *xi; + *xi <<= lshift; + *xi |= prev >> rshift; + prev = tmp; + } + + // Always push the carry, even if it creates a non-normal result. + let carry = prev >> rshift; + if carry != 0 { + x.try_push(carry)?; + } + + Some(()) +} + +/// Shift-left `n` limbs inside a buffer. +#[inline] +pub fn shl_limbs(x: &mut VecType, n: usize) -> Option<()> { + debug_assert!(n != 0); + if n + x.len() > x.capacity() { + None + } else if !x.is_empty() { + let len = n + x.len(); + // SAFE: since x is not empty, and `x.len() + n <= x.capacity()`. + unsafe { + // Move the elements. + let src = x.as_ptr(); + let dst = x.as_mut_ptr().add(n); + ptr::copy(src, dst, x.len()); + // Write our 0s. + ptr::write_bytes(x.as_mut_ptr(), 0, n); + x.set_len(len); + } + Some(()) + } else { + Some(()) + } +} + +/// Shift-left buffer by n bits. +#[inline] +pub fn shl(x: &mut VecType, n: usize) -> Option<()> { + let rem = n % LIMB_BITS; + let div = n / LIMB_BITS; + if rem != 0 { + shl_bits(x, rem)?; + } + if div != 0 { + shl_limbs(x, div)?; + } + Some(()) +} + +/// Get number of leading zero bits in the storage. +#[inline] +pub fn leading_zeros(x: &[Limb]) -> u32 { + let length = x.len(); + // wrapping_sub is fine, since it'll just return None. + if let Some(&value) = x.get(length.wrapping_sub(1)) { + value.leading_zeros() + } else { + 0 + } +} + +/// Calculate the bit-length of the big-integer. +#[inline] +pub fn bit_length(x: &[Limb]) -> u32 { + let nlz = leading_zeros(x); + LIMB_BITS as u32 * x.len() as u32 - nlz +} + +// LIMB +// ---- + +// Type for a single limb of the big integer. +// +// A limb is analogous to a digit in base10, except, it stores 32-bit +// or 64-bit numbers instead. We want types where 64-bit multiplication +// is well-supported by the architecture, rather than emulated in 3 +// instructions. The quickest way to check this support is using a +// cross-compiler for numerous architectures, along with the following +// source file and command: +// +// Compile with `gcc main.c -c -S -O3 -masm=intel` +// +// And the source code is: +// ```text +// #include +// +// struct i128 { +// uint64_t hi; +// uint64_t lo; +// }; +// +// // Type your code here, or load an example. +// struct i128 square(uint64_t x, uint64_t y) { +// __int128 prod = (__int128)x * (__int128)y; +// struct i128 z; +// z.hi = (uint64_t)(prod >> 64); +// z.lo = (uint64_t)prod; +// return z; +// } +// ``` +// +// If the result contains `call __multi3`, then the multiplication +// is emulated by the compiler. Otherwise, it's natively supported. +// +// This should be all-known 64-bit platforms supported by Rust. +// https://forge.rust-lang.org/platform-support.html +// +// # Supported +// +// Platforms where native 128-bit multiplication is explicitly supported: +// - x86_64 (Supported via `MUL`). +// - mips64 (Supported via `DMULTU`, which `HI` and `LO` can be read-from). +// - s390x (Supported via `MLGR`). +// +// # Efficient +// +// Platforms where native 64-bit multiplication is supported and +// you can extract hi-lo for 64-bit multiplications. +// - aarch64 (Requires `UMULH` and `MUL` to capture high and low bits). +// - powerpc64 (Requires `MULHDU` and `MULLD` to capture high and low bits). +// - riscv64 (Requires `MUL` and `MULH` to capture high and low bits). +// +// # Unsupported +// +// Platforms where native 128-bit multiplication is not supported, +// requiring software emulation. +// sparc64 (`UMUL` only supports double-word arguments). +// sparcv9 (Same as sparc64). +// +// These tests are run via `xcross`, my own library for C cross-compiling, +// which supports numerous targets (far in excess of Rust's tier 1 support, +// or rust-embedded/cross's list). xcross may be found here: +// https://github.com/Alexhuszagh/xcross +// +// To compile for the given target, run: +// `xcross gcc main.c -c -S -O3 --target $target` +// +// All 32-bit architectures inherently do not have support. That means +// we can essentially look for 64-bit architectures that are not SPARC. + +#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] +pub type Limb = u64; +#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] +pub type Wide = u128; +#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] +pub const LIMB_BITS: usize = 64; + +#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] +pub type Limb = u32; +#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] +pub type Wide = u64; +#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] +pub const LIMB_BITS: usize = 32; diff --git a/vendor/minimal-lexical/src/extended_float.rs b/vendor/minimal-lexical/src/extended_float.rs new file mode 100644 index 00000000000000..7397e199c84220 --- /dev/null +++ b/vendor/minimal-lexical/src/extended_float.rs @@ -0,0 +1,24 @@ +// FLOAT TYPE + +#![doc(hidden)] + +use crate::num::Float; + +/// Extended precision floating-point type. +/// +/// Private implementation, exposed only for testing purposes. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct ExtendedFloat { + /// Mantissa for the extended-precision float. + pub mant: u64, + /// Binary exponent for the extended-precision float. + pub exp: i32, +} + +/// Converts an `ExtendedFloat` to the closest machine float type. +#[inline(always)] +pub fn extended_to_float(x: ExtendedFloat) -> F { + let mut word = x.mant; + word |= (x.exp as u64) << F::MANTISSA_SIZE; + F::from_bits(word) +} diff --git a/vendor/minimal-lexical/src/fpu.rs b/vendor/minimal-lexical/src/fpu.rs new file mode 100644 index 00000000000000..42059a080a5113 --- /dev/null +++ b/vendor/minimal-lexical/src/fpu.rs @@ -0,0 +1,98 @@ +//! Platform-specific, assembly instructions to avoid +//! intermediate rounding on architectures with FPUs. +//! +//! This is adapted from the implementation in the Rust core library, +//! the original implementation can be [here](https://github.com/rust-lang/rust/blob/master/library/core/src/num/dec2flt/fpu.rs). +//! +//! It is therefore also subject to a Apache2.0/MIT license. + +#![cfg(feature = "nightly")] +#![doc(hidden)] + +pub use fpu_precision::set_precision; + +// On x86, the x87 FPU is used for float operations if the SSE/SSE2 extensions are not available. +// The x87 FPU operates with 80 bits of precision by default, which means that operations will +// round to 80 bits causing double rounding to happen when values are eventually represented as +// 32/64 bit float values. To overcome this, the FPU control word can be set so that the +// computations are performed in the desired precision. +#[cfg(all(target_arch = "x86", not(target_feature = "sse2")))] +mod fpu_precision { + use core::mem::size_of; + + /// A structure used to preserve the original value of the FPU control word, so that it can be + /// restored when the structure is dropped. + /// + /// The x87 FPU is a 16-bits register whose fields are as follows: + /// + /// | 12-15 | 10-11 | 8-9 | 6-7 | 5 | 4 | 3 | 2 | 1 | 0 | + /// |------:|------:|----:|----:|---:|---:|---:|---:|---:|---:| + /// | | RC | PC | | PM | UM | OM | ZM | DM | IM | + /// + /// The documentation for all of the fields is available in the IA-32 Architectures Software + /// Developer's Manual (Volume 1). + /// + /// The only field which is relevant for the following code is PC, Precision Control. This + /// field determines the precision of the operations performed by the FPU. It can be set to: + /// - 0b00, single precision i.e., 32-bits + /// - 0b10, double precision i.e., 64-bits + /// - 0b11, double extended precision i.e., 80-bits (default state) + /// The 0b01 value is reserved and should not be used. + pub struct FPUControlWord(u16); + + fn set_cw(cw: u16) { + // SAFETY: the `fldcw` instruction has been audited to be able to work correctly with + // any `u16` + unsafe { + asm!( + "fldcw word ptr [{}]", + in(reg) &cw, + options(nostack), + ) + } + } + + /// Sets the precision field of the FPU to `T` and returns a `FPUControlWord`. + pub fn set_precision() -> FPUControlWord { + let mut cw = 0_u16; + + // Compute the value for the Precision Control field that is appropriate for `T`. + let cw_precision = match size_of::() { + 4 => 0x0000, // 32 bits + 8 => 0x0200, // 64 bits + _ => 0x0300, // default, 80 bits + }; + + // Get the original value of the control word to restore it later, when the + // `FPUControlWord` structure is dropped + // SAFETY: the `fnstcw` instruction has been audited to be able to work correctly with + // any `u16` + unsafe { + asm!( + "fnstcw word ptr [{}]", + in(reg) &mut cw, + options(nostack), + ) + } + + // Set the control word to the desired precision. This is achieved by masking away the old + // precision (bits 8 and 9, 0x300) and replacing it with the precision flag computed above. + set_cw((cw & 0xFCFF) | cw_precision); + + FPUControlWord(cw) + } + + impl Drop for FPUControlWord { + fn drop(&mut self) { + set_cw(self.0) + } + } +} + +// In most architectures, floating point operations have an explicit bit size, therefore the +// precision of the computation is determined on a per-operation basis. +#[cfg(any(not(target_arch = "x86"), target_feature = "sse2"))] +mod fpu_precision { + pub fn set_precision() { + } +} diff --git a/vendor/minimal-lexical/src/heapvec.rs b/vendor/minimal-lexical/src/heapvec.rs new file mode 100644 index 00000000000000..035926018a41f8 --- /dev/null +++ b/vendor/minimal-lexical/src/heapvec.rs @@ -0,0 +1,190 @@ +//! Simple heap-allocated vector. + +#![cfg(feature = "alloc")] +#![doc(hidden)] + +use crate::bigint; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; +use core::{cmp, ops}; +#[cfg(feature = "std")] +use std::vec::Vec; + +/// Simple heap vector implementation. +#[derive(Clone)] +pub struct HeapVec { + /// The heap-allocated buffer for the elements. + data: Vec, +} + +#[allow(clippy::new_without_default)] +impl HeapVec { + /// Construct an empty vector. + #[inline] + pub fn new() -> Self { + Self { + data: Vec::with_capacity(bigint::BIGINT_LIMBS), + } + } + + /// Construct a vector from an existing slice. + #[inline] + pub fn try_from(x: &[bigint::Limb]) -> Option { + let mut vec = Self::new(); + vec.try_extend(x)?; + Some(vec) + } + + /// Sets the length of a vector. + /// + /// This will explicitly set the size of the vector, without actually + /// modifying its buffers, so it is up to the caller to ensure that the + /// vector is actually the specified size. + /// + /// # Safety + /// + /// Safe as long as `len` is less than `self.capacity()` and has been initialized. + #[inline] + pub unsafe fn set_len(&mut self, len: usize) { + debug_assert!(len <= bigint::BIGINT_LIMBS); + unsafe { self.data.set_len(len) }; + } + + /// The number of elements stored in the vector. + #[inline] + pub fn len(&self) -> usize { + self.data.len() + } + + /// If the vector is empty. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// The number of items the vector can hold. + #[inline] + pub fn capacity(&self) -> usize { + self.data.capacity() + } + + /// Append an item to the vector. + #[inline] + pub fn try_push(&mut self, value: bigint::Limb) -> Option<()> { + self.data.push(value); + Some(()) + } + + /// Remove an item from the end of the vector and return it, or None if empty. + #[inline] + pub fn pop(&mut self) -> Option { + self.data.pop() + } + + /// Copy elements from a slice and append them to the vector. + #[inline] + pub fn try_extend(&mut self, slc: &[bigint::Limb]) -> Option<()> { + self.data.extend_from_slice(slc); + Some(()) + } + + /// Try to resize the buffer. + /// + /// If the new length is smaller than the current length, truncate + /// the input. If it's larger, then append elements to the buffer. + #[inline] + pub fn try_resize(&mut self, len: usize, value: bigint::Limb) -> Option<()> { + self.data.resize(len, value); + Some(()) + } + + // HI + + /// Get the high 64 bits from the vector. + #[inline(always)] + pub fn hi64(&self) -> (u64, bool) { + bigint::hi64(&self.data) + } + + // FROM + + /// Create StackVec from u64 value. + #[inline(always)] + pub fn from_u64(x: u64) -> Self { + bigint::from_u64(x) + } + + // MATH + + /// Normalize the integer, so any leading zero values are removed. + #[inline] + pub fn normalize(&mut self) { + bigint::normalize(self) + } + + /// Get if the big integer is normalized. + #[inline] + pub fn is_normalized(&self) -> bool { + bigint::is_normalized(self) + } + + /// AddAssign small integer. + #[inline] + pub fn add_small(&mut self, y: bigint::Limb) -> Option<()> { + bigint::small_add(self, y) + } + + /// MulAssign small integer. + #[inline] + pub fn mul_small(&mut self, y: bigint::Limb) -> Option<()> { + bigint::small_mul(self, y) + } +} + +impl PartialEq for HeapVec { + #[inline] + #[allow(clippy::op_ref)] + fn eq(&self, other: &Self) -> bool { + use core::ops::Deref; + self.len() == other.len() && self.deref() == other.deref() + } +} + +impl Eq for HeapVec { +} + +impl cmp::PartialOrd for HeapVec { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + Some(bigint::compare(self, other)) + } +} + +impl cmp::Ord for HeapVec { + #[inline] + fn cmp(&self, other: &Self) -> cmp::Ordering { + bigint::compare(self, other) + } +} + +impl ops::Deref for HeapVec { + type Target = [bigint::Limb]; + #[inline] + fn deref(&self) -> &[bigint::Limb] { + &self.data + } +} + +impl ops::DerefMut for HeapVec { + #[inline] + fn deref_mut(&mut self) -> &mut [bigint::Limb] { + &mut self.data + } +} + +impl ops::MulAssign<&[bigint::Limb]> for HeapVec { + #[inline] + fn mul_assign(&mut self, rhs: &[bigint::Limb]) { + bigint::large_mul(self, rhs).unwrap(); + } +} diff --git a/vendor/minimal-lexical/src/lemire.rs b/vendor/minimal-lexical/src/lemire.rs new file mode 100644 index 00000000000000..99b1ae705911da --- /dev/null +++ b/vendor/minimal-lexical/src/lemire.rs @@ -0,0 +1,225 @@ +//! Implementation of the Eisel-Lemire algorithm. +//! +//! This is adapted from [fast-float-rust](https://github.com/aldanor/fast-float-rust), +//! a port of [fast_float](https://github.com/fastfloat/fast_float) to Rust. + +#![cfg(not(feature = "compact"))] +#![doc(hidden)] + +use crate::extended_float::ExtendedFloat; +use crate::num::Float; +use crate::number::Number; +use crate::table::{LARGEST_POWER_OF_FIVE, POWER_OF_FIVE_128, SMALLEST_POWER_OF_FIVE}; + +/// Ensure truncation of digits doesn't affect our computation, by doing 2 passes. +#[inline] +pub fn lemire(num: &Number) -> ExtendedFloat { + // If significant digits were truncated, then we can have rounding error + // only if `mantissa + 1` produces a different result. We also avoid + // redundantly using the Eisel-Lemire algorithm if it was unable to + // correctly round on the first pass. + let mut fp = compute_float::(num.exponent, num.mantissa); + if num.many_digits && fp.exp >= 0 && fp != compute_float::(num.exponent, num.mantissa + 1) { + // Need to re-calculate, since the previous values are rounded + // when the slow path algorithm expects a normalized extended float. + fp = compute_error::(num.exponent, num.mantissa); + } + fp +} + +/// Compute a float using an extended-precision representation. +/// +/// Fast conversion of a the significant digits and decimal exponent +/// a float to a extended representation with a binary float. This +/// algorithm will accurately parse the vast majority of cases, +/// and uses a 128-bit representation (with a fallback 192-bit +/// representation). +/// +/// This algorithm scales the exponent by the decimal exponent +/// using pre-computed powers-of-5, and calculates if the +/// representation can be unambiguously rounded to the nearest +/// machine float. Near-halfway cases are not handled here, +/// and are represented by a negative, biased binary exponent. +/// +/// The algorithm is described in detail in "Daniel Lemire, Number Parsing +/// at a Gigabyte per Second" in section 5, "Fast Algorithm", and +/// section 6, "Exact Numbers And Ties", available online: +/// . +pub fn compute_float(q: i32, mut w: u64) -> ExtendedFloat { + let fp_zero = ExtendedFloat { + mant: 0, + exp: 0, + }; + let fp_inf = ExtendedFloat { + mant: 0, + exp: F::INFINITE_POWER, + }; + + // Short-circuit if the value can only be a literal 0 or infinity. + if w == 0 || q < F::SMALLEST_POWER_OF_TEN { + return fp_zero; + } else if q > F::LARGEST_POWER_OF_TEN { + return fp_inf; + } + // Normalize our significant digits, so the most-significant bit is set. + let lz = w.leading_zeros() as i32; + w <<= lz; + let (lo, hi) = compute_product_approx(q, w, F::MANTISSA_SIZE as usize + 3); + if lo == 0xFFFF_FFFF_FFFF_FFFF { + // If we have failed to approximate w x 5^-q with our 128-bit value. + // Since the addition of 1 could lead to an overflow which could then + // round up over the half-way point, this can lead to improper rounding + // of a float. + // + // However, this can only occur if q ∈ [-27, 55]. The upper bound of q + // is 55 because 5^55 < 2^128, however, this can only happen if 5^q > 2^64, + // since otherwise the product can be represented in 64-bits, producing + // an exact result. For negative exponents, rounding-to-even can + // only occur if 5^-q < 2^64. + // + // For detailed explanations of rounding for negative exponents, see + // . For detailed + // explanations of rounding for positive exponents, see + // . + let inside_safe_exponent = (q >= -27) && (q <= 55); + if !inside_safe_exponent { + return compute_error_scaled::(q, hi, lz); + } + } + let upperbit = (hi >> 63) as i32; + let mut mantissa = hi >> (upperbit + 64 - F::MANTISSA_SIZE - 3); + let mut power2 = power(q) + upperbit - lz - F::MINIMUM_EXPONENT; + if power2 <= 0 { + if -power2 + 1 >= 64 { + // Have more than 64 bits below the minimum exponent, must be 0. + return fp_zero; + } + // Have a subnormal value. + mantissa >>= -power2 + 1; + mantissa += mantissa & 1; + mantissa >>= 1; + power2 = (mantissa >= (1_u64 << F::MANTISSA_SIZE)) as i32; + return ExtendedFloat { + mant: mantissa, + exp: power2, + }; + } + // Need to handle rounding ties. Normally, we need to round up, + // but if we fall right in between and and we have an even basis, we + // need to round down. + // + // This will only occur if: + // 1. The lower 64 bits of the 128-bit representation is 0. + // IE, 5^q fits in single 64-bit word. + // 2. The least-significant bit prior to truncated mantissa is odd. + // 3. All the bits truncated when shifting to mantissa bits + 1 are 0. + // + // Or, we may fall between two floats: we are exactly halfway. + if lo <= 1 + && q >= F::MIN_EXPONENT_ROUND_TO_EVEN + && q <= F::MAX_EXPONENT_ROUND_TO_EVEN + && mantissa & 3 == 1 + && (mantissa << (upperbit + 64 - F::MANTISSA_SIZE - 3)) == hi + { + // Zero the lowest bit, so we don't round up. + mantissa &= !1_u64; + } + // Round-to-even, then shift the significant digits into place. + mantissa += mantissa & 1; + mantissa >>= 1; + if mantissa >= (2_u64 << F::MANTISSA_SIZE) { + // Rounding up overflowed, so the carry bit is set. Set the + // mantissa to 1 (only the implicit, hidden bit is set) and + // increase the exponent. + mantissa = 1_u64 << F::MANTISSA_SIZE; + power2 += 1; + } + // Zero out the hidden bit. + mantissa &= !(1_u64 << F::MANTISSA_SIZE); + if power2 >= F::INFINITE_POWER { + // Exponent is above largest normal value, must be infinite. + return fp_inf; + } + ExtendedFloat { + mant: mantissa, + exp: power2, + } +} + +/// Fallback algorithm to calculate the non-rounded representation. +/// This calculates the extended representation, and then normalizes +/// the resulting representation, so the high bit is set. +#[inline] +pub fn compute_error(q: i32, mut w: u64) -> ExtendedFloat { + let lz = w.leading_zeros() as i32; + w <<= lz; + let hi = compute_product_approx(q, w, F::MANTISSA_SIZE as usize + 3).1; + compute_error_scaled::(q, hi, lz) +} + +/// Compute the error from a mantissa scaled to the exponent. +#[inline] +pub fn compute_error_scaled(q: i32, mut w: u64, lz: i32) -> ExtendedFloat { + // Want to normalize the float, but this is faster than ctlz on most architectures. + let hilz = (w >> 63) as i32 ^ 1; + w <<= hilz; + let power2 = power(q as i32) + F::EXPONENT_BIAS - hilz - lz - 62; + + ExtendedFloat { + mant: w, + exp: power2 + F::INVALID_FP, + } +} + +/// Calculate a base 2 exponent from a decimal exponent. +/// This uses a pre-computed integer approximation for +/// log2(10), where 217706 / 2^16 is accurate for the +/// entire range of non-finite decimal exponents. +#[inline] +fn power(q: i32) -> i32 { + (q.wrapping_mul(152_170 + 65536) >> 16) + 63 +} + +#[inline] +fn full_multiplication(a: u64, b: u64) -> (u64, u64) { + let r = (a as u128) * (b as u128); + (r as u64, (r >> 64) as u64) +} + +// This will compute or rather approximate w * 5**q and return a pair of 64-bit words +// approximating the result, with the "high" part corresponding to the most significant +// bits and the low part corresponding to the least significant bits. +fn compute_product_approx(q: i32, w: u64, precision: usize) -> (u64, u64) { + debug_assert!(q >= SMALLEST_POWER_OF_FIVE); + debug_assert!(q <= LARGEST_POWER_OF_FIVE); + debug_assert!(precision <= 64); + + let mask = if precision < 64 { + 0xFFFF_FFFF_FFFF_FFFF_u64 >> precision + } else { + 0xFFFF_FFFF_FFFF_FFFF_u64 + }; + + // 5^q < 2^64, then the multiplication always provides an exact value. + // That means whenever we need to round ties to even, we always have + // an exact value. + let index = (q - SMALLEST_POWER_OF_FIVE) as usize; + let (lo5, hi5) = POWER_OF_FIVE_128[index]; + // Only need one multiplication as long as there is 1 zero but + // in the explicit mantissa bits, +1 for the hidden bit, +1 to + // determine the rounding direction, +1 for if the computed + // product has a leading zero. + let (mut first_lo, mut first_hi) = full_multiplication(w, lo5); + if first_hi & mask == mask { + // Need to do a second multiplication to get better precision + // for the lower product. This will always be exact + // where q is < 55, since 5^55 < 2^128. If this wraps, + // then we need to need to round up the hi product. + let (_, second_hi) = full_multiplication(w, hi5); + first_lo = first_lo.wrapping_add(second_hi); + if second_hi > first_lo { + first_hi += 1; + } + } + (first_lo, first_hi) +} diff --git a/vendor/minimal-lexical/src/lib.rs b/vendor/minimal-lexical/src/lib.rs new file mode 100644 index 00000000000000..75f923475f21e2 --- /dev/null +++ b/vendor/minimal-lexical/src/lib.rs @@ -0,0 +1,68 @@ +//! Fast, minimal float-parsing algorithm. +//! +//! minimal-lexical has a simple, high-level API with a single +//! exported function: [`parse_float`]. +//! +//! [`parse_float`] expects a forward iterator for the integer +//! and fraction digits, as well as a parsed exponent as an [`i32`]. +//! +//! For more examples, please see [simple-example](https://github.com/Alexhuszagh/minimal-lexical/blob/master/examples/simple.rs). +//! +//! EXAMPLES +//! -------- +//! +//! ``` +//! extern crate minimal_lexical; +//! +//! // Let's say we want to parse "1.2345". +//! // First, we need an external parser to extract the integer digits ("1"), +//! // the fraction digits ("2345"), and then parse the exponent to a 32-bit +//! // integer (0). +//! // Warning: +//! // -------- +//! // Please note that leading zeros must be trimmed from the integer, +//! // and trailing zeros must be trimmed from the fraction. This cannot +//! // be handled by minimal-lexical, since we accept iterators. +//! let integer = b"1"; +//! let fraction = b"2345"; +//! let float: f64 = minimal_lexical::parse_float(integer.iter(), fraction.iter(), 0); +//! println!("float={:?}", float); // 1.235 +//! ``` +//! +//! [`parse_float`]: fn.parse_float.html +//! [`i32`]: https://doc.rust-lang.org/stable/std/primitive.i32.html + +// FEATURES + +// We want to have the same safety guarantees as Rust core, +// so we allow unused unsafe to clearly document safety guarantees. +#![allow(unused_unsafe)] +#![cfg_attr(feature = "lint", warn(unsafe_op_in_unsafe_fn))] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(all(feature = "alloc", not(feature = "std")))] +extern crate alloc; + +pub mod bellerophon; +pub mod bigint; +pub mod extended_float; +pub mod fpu; +pub mod heapvec; +pub mod lemire; +pub mod libm; +pub mod mask; +pub mod num; +pub mod number; +pub mod parse; +pub mod rounding; +pub mod slow; +pub mod stackvec; +pub mod table; + +mod table_bellerophon; +mod table_lemire; +mod table_small; + +// API +pub use self::num::Float; +pub use self::parse::parse_float; diff --git a/vendor/minimal-lexical/src/libm.rs b/vendor/minimal-lexical/src/libm.rs new file mode 100644 index 00000000000000..c9f93d36ac7bc1 --- /dev/null +++ b/vendor/minimal-lexical/src/libm.rs @@ -0,0 +1,1238 @@ +//! A small number of math routines for floats and doubles. +//! +//! These are adapted from libm, a port of musl libc's libm to Rust. +//! libm can be found online [here](https://github.com/rust-lang/libm), +//! and is similarly licensed under an Apache2.0/MIT license + +#![cfg(all(not(feature = "std"), feature = "compact"))] +#![doc(hidden)] + +/* origin: FreeBSD /usr/src/lib/msun/src/e_powf.c */ +/* + * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com. + */ +/* + * ==================================================== + * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + * + * Developed at SunPro, a Sun Microsystems, Inc. business. + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ + +/// # Safety +/// +/// Safe if `index < array.len()`. +macro_rules! i { + ($array:ident, $index:expr) => { + // SAFETY: safe if `index < array.len()`. + unsafe { *$array.get_unchecked($index) } + }; +} + +pub fn powf(x: f32, y: f32) -> f32 { + const BP: [f32; 2] = [1.0, 1.5]; + const DP_H: [f32; 2] = [0.0, 5.84960938e-01]; /* 0x3f15c000 */ + const DP_L: [f32; 2] = [0.0, 1.56322085e-06]; /* 0x35d1cfdc */ + const TWO24: f32 = 16777216.0; /* 0x4b800000 */ + const HUGE: f32 = 1.0e30; + const TINY: f32 = 1.0e-30; + const L1: f32 = 6.0000002384e-01; /* 0x3f19999a */ + const L2: f32 = 4.2857143283e-01; /* 0x3edb6db7 */ + const L3: f32 = 3.3333334327e-01; /* 0x3eaaaaab */ + const L4: f32 = 2.7272811532e-01; /* 0x3e8ba305 */ + const L5: f32 = 2.3066075146e-01; /* 0x3e6c3255 */ + const L6: f32 = 2.0697501302e-01; /* 0x3e53f142 */ + const P1: f32 = 1.6666667163e-01; /* 0x3e2aaaab */ + const P2: f32 = -2.7777778450e-03; /* 0xbb360b61 */ + const P3: f32 = 6.6137559770e-05; /* 0x388ab355 */ + const P4: f32 = -1.6533901999e-06; /* 0xb5ddea0e */ + const P5: f32 = 4.1381369442e-08; /* 0x3331bb4c */ + const LG2: f32 = 6.9314718246e-01; /* 0x3f317218 */ + const LG2_H: f32 = 6.93145752e-01; /* 0x3f317200 */ + const LG2_L: f32 = 1.42860654e-06; /* 0x35bfbe8c */ + const OVT: f32 = 4.2995665694e-08; /* -(128-log2(ovfl+.5ulp)) */ + const CP: f32 = 9.6179670095e-01; /* 0x3f76384f =2/(3ln2) */ + const CP_H: f32 = 9.6191406250e-01; /* 0x3f764000 =12b cp */ + const CP_L: f32 = -1.1736857402e-04; /* 0xb8f623c6 =tail of cp_h */ + const IVLN2: f32 = 1.4426950216e+00; + const IVLN2_H: f32 = 1.4426879883e+00; + const IVLN2_L: f32 = 7.0526075433e-06; + + let mut z: f32; + let mut ax: f32; + let z_h: f32; + let z_l: f32; + let mut p_h: f32; + let mut p_l: f32; + let y1: f32; + let mut t1: f32; + let t2: f32; + let mut r: f32; + let s: f32; + let mut sn: f32; + let mut t: f32; + let mut u: f32; + let mut v: f32; + let mut w: f32; + let i: i32; + let mut j: i32; + let mut k: i32; + let mut yisint: i32; + let mut n: i32; + let hx: i32; + let hy: i32; + let mut ix: i32; + let iy: i32; + let mut is: i32; + + hx = x.to_bits() as i32; + hy = y.to_bits() as i32; + + ix = hx & 0x7fffffff; + iy = hy & 0x7fffffff; + + /* x**0 = 1, even if x is NaN */ + if iy == 0 { + return 1.0; + } + + /* 1**y = 1, even if y is NaN */ + if hx == 0x3f800000 { + return 1.0; + } + + /* NaN if either arg is NaN */ + if ix > 0x7f800000 || iy > 0x7f800000 { + return x + y; + } + + /* determine if y is an odd int when x < 0 + * yisint = 0 ... y is not an integer + * yisint = 1 ... y is an odd int + * yisint = 2 ... y is an even int + */ + yisint = 0; + if hx < 0 { + if iy >= 0x4b800000 { + yisint = 2; /* even integer y */ + } else if iy >= 0x3f800000 { + k = (iy >> 23) - 0x7f; /* exponent */ + j = iy >> (23 - k); + if (j << (23 - k)) == iy { + yisint = 2 - (j & 1); + } + } + } + + /* special value of y */ + if iy == 0x7f800000 { + /* y is +-inf */ + if ix == 0x3f800000 { + /* (-1)**+-inf is 1 */ + return 1.0; + } else if ix > 0x3f800000 { + /* (|x|>1)**+-inf = inf,0 */ + return if hy >= 0 { + y + } else { + 0.0 + }; + } else { + /* (|x|<1)**+-inf = 0,inf */ + return if hy >= 0 { + 0.0 + } else { + -y + }; + } + } + if iy == 0x3f800000 { + /* y is +-1 */ + return if hy >= 0 { + x + } else { + 1.0 / x + }; + } + + if hy == 0x40000000 { + /* y is 2 */ + return x * x; + } + + if hy == 0x3f000000 + /* y is 0.5 */ + && hx >= 0 + { + /* x >= +0 */ + return sqrtf(x); + } + + ax = fabsf(x); + /* special value of x */ + if ix == 0x7f800000 || ix == 0 || ix == 0x3f800000 { + /* x is +-0,+-inf,+-1 */ + z = ax; + if hy < 0 { + /* z = (1/|x|) */ + z = 1.0 / z; + } + + if hx < 0 { + if ((ix - 0x3f800000) | yisint) == 0 { + z = (z - z) / (z - z); /* (-1)**non-int is NaN */ + } else if yisint == 1 { + z = -z; /* (x<0)**odd = -(|x|**odd) */ + } + } + return z; + } + + sn = 1.0; /* sign of result */ + if hx < 0 { + if yisint == 0 { + /* (x<0)**(non-int) is NaN */ + return (x - x) / (x - x); + } + + if yisint == 1 { + /* (x<0)**(odd int) */ + sn = -1.0; + } + } + + /* |y| is HUGE */ + if iy > 0x4d000000 { + /* if |y| > 2**27 */ + /* over/underflow if x is not close to one */ + if ix < 0x3f7ffff8 { + return if hy < 0 { + sn * HUGE * HUGE + } else { + sn * TINY * TINY + }; + } + + if ix > 0x3f800007 { + return if hy > 0 { + sn * HUGE * HUGE + } else { + sn * TINY * TINY + }; + } + + /* now |1-x| is TINY <= 2**-20, suffice to compute + log(x) by x-x^2/2+x^3/3-x^4/4 */ + t = ax - 1.; /* t has 20 trailing zeros */ + w = (t * t) * (0.5 - t * (0.333333333333 - t * 0.25)); + u = IVLN2_H * t; /* IVLN2_H has 16 sig. bits */ + v = t * IVLN2_L - w * IVLN2; + t1 = u + v; + is = t1.to_bits() as i32; + t1 = f32::from_bits(is as u32 & 0xfffff000); + t2 = v - (t1 - u); + } else { + let mut s2: f32; + let mut s_h: f32; + let s_l: f32; + let mut t_h: f32; + let mut t_l: f32; + + n = 0; + /* take care subnormal number */ + if ix < 0x00800000 { + ax *= TWO24; + n -= 24; + ix = ax.to_bits() as i32; + } + n += ((ix) >> 23) - 0x7f; + j = ix & 0x007fffff; + /* determine interval */ + ix = j | 0x3f800000; /* normalize ix */ + if j <= 0x1cc471 { + /* |x|> 1) & 0xfffff000) | 0x20000000) as i32; + t_h = f32::from_bits(is as u32 + 0x00400000 + ((k as u32) << 21)); + t_l = ax - (t_h - i!(BP, k as usize)); + s_l = v * ((u - s_h * t_h) - s_h * t_l); + /* compute log(ax) */ + s2 = s * s; + r = s2 * s2 * (L1 + s2 * (L2 + s2 * (L3 + s2 * (L4 + s2 * (L5 + s2 * L6))))); + r += s_l * (s_h + s); + s2 = s_h * s_h; + t_h = 3.0 + s2 + r; + is = t_h.to_bits() as i32; + t_h = f32::from_bits(is as u32 & 0xfffff000); + t_l = r - ((t_h - 3.0) - s2); + /* u+v = s*(1+...) */ + u = s_h * t_h; + v = s_l * t_h + t_l * s; + /* 2/(3log2)*(s+...) */ + p_h = u + v; + is = p_h.to_bits() as i32; + p_h = f32::from_bits(is as u32 & 0xfffff000); + p_l = v - (p_h - u); + z_h = CP_H * p_h; /* cp_h+cp_l = 2/(3*log2) */ + z_l = CP_L * p_h + p_l * CP + i!(DP_L, k as usize); + /* log2(ax) = (s+..)*2/(3*log2) = n + dp_h + z_h + z_l */ + t = n as f32; + t1 = ((z_h + z_l) + i!(DP_H, k as usize)) + t; + is = t1.to_bits() as i32; + t1 = f32::from_bits(is as u32 & 0xfffff000); + t2 = z_l - (((t1 - t) - i!(DP_H, k as usize)) - z_h); + }; + + /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */ + is = y.to_bits() as i32; + y1 = f32::from_bits(is as u32 & 0xfffff000); + p_l = (y - y1) * t1 + y * t2; + p_h = y1 * t1; + z = p_l + p_h; + j = z.to_bits() as i32; + if j > 0x43000000 { + /* if z > 128 */ + return sn * HUGE * HUGE; /* overflow */ + } else if j == 0x43000000 { + /* if z == 128 */ + if p_l + OVT > z - p_h { + return sn * HUGE * HUGE; /* overflow */ + } + } else if (j & 0x7fffffff) > 0x43160000 { + /* z < -150 */ + // FIXME: check should be (uint32_t)j > 0xc3160000 + return sn * TINY * TINY; /* underflow */ + } else if j as u32 == 0xc3160000 + /* z == -150 */ + && p_l <= z - p_h + { + return sn * TINY * TINY; /* underflow */ + } + + /* + * compute 2**(p_h+p_l) + */ + i = j & 0x7fffffff; + k = (i >> 23) - 0x7f; + n = 0; + if i > 0x3f000000 { + /* if |z| > 0.5, set n = [z+0.5] */ + n = j + (0x00800000 >> (k + 1)); + k = ((n & 0x7fffffff) >> 23) - 0x7f; /* new k for n */ + t = f32::from_bits(n as u32 & !(0x007fffff >> k)); + n = ((n & 0x007fffff) | 0x00800000) >> (23 - k); + if j < 0 { + n = -n; + } + p_h -= t; + } + t = p_l + p_h; + is = t.to_bits() as i32; + t = f32::from_bits(is as u32 & 0xffff8000); + u = t * LG2_H; + v = (p_l - (t - p_h)) * LG2 + t * LG2_L; + z = u + v; + w = v - (z - u); + t = z * z; + t1 = z - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5)))); + r = (z * t1) / (t1 - 2.0) - (w + z * w); + z = 1.0 - (r - z); + j = z.to_bits() as i32; + j += n << 23; + if (j >> 23) <= 0 { + /* subnormal output */ + z = scalbnf(z, n); + } else { + z = f32::from_bits(j as u32); + } + sn * z +} + +/* origin: FreeBSD /usr/src/lib/msun/src/e_sqrtf.c */ +/* + * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com. + */ +/* + * ==================================================== + * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + * + * Developed at SunPro, a Sun Microsystems, Inc. business. + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ + +pub fn sqrtf(x: f32) -> f32 { + #[cfg(target_feature = "sse")] + { + // Note: This path is unlikely since LLVM will usually have already + // optimized sqrt calls into hardware instructions if sse is available, + // but if someone does end up here they'll apprected the speed increase. + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64::*; + // SAFETY: safe, since `_mm_set_ss` takes a 32-bit float, and returns + // a 128-bit type with the lowest 32-bits as `x`, `_mm_sqrt_ss` calculates + // the sqrt of this 128-bit vector, and `_mm_cvtss_f32` extracts the lower + // 32-bits as a 32-bit float. + unsafe { + let m = _mm_set_ss(x); + let m_sqrt = _mm_sqrt_ss(m); + _mm_cvtss_f32(m_sqrt) + } + } + #[cfg(not(target_feature = "sse"))] + { + const TINY: f32 = 1.0e-30; + + let mut z: f32; + let sign: i32 = 0x80000000u32 as i32; + let mut ix: i32; + let mut s: i32; + let mut q: i32; + let mut m: i32; + let mut t: i32; + let mut i: i32; + let mut r: u32; + + ix = x.to_bits() as i32; + + /* take care of Inf and NaN */ + if (ix as u32 & 0x7f800000) == 0x7f800000 { + return x * x + x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf, sqrt(-inf)=sNaN */ + } + + /* take care of zero */ + if ix <= 0 { + if (ix & !sign) == 0 { + return x; /* sqrt(+-0) = +-0 */ + } + if ix < 0 { + return (x - x) / (x - x); /* sqrt(-ve) = sNaN */ + } + } + + /* normalize x */ + m = ix >> 23; + if m == 0 { + /* subnormal x */ + i = 0; + while ix & 0x00800000 == 0 { + ix <<= 1; + i = i + 1; + } + m -= i - 1; + } + m -= 127; /* unbias exponent */ + ix = (ix & 0x007fffff) | 0x00800000; + if m & 1 == 1 { + /* odd m, double x to make it even */ + ix += ix; + } + m >>= 1; /* m = [m/2] */ + + /* generate sqrt(x) bit by bit */ + ix += ix; + q = 0; + s = 0; + r = 0x01000000; /* r = moving bit from right to left */ + + while r != 0 { + t = s + r as i32; + if t <= ix { + s = t + r as i32; + ix -= t; + q += r as i32; + } + ix += ix; + r >>= 1; + } + + /* use floating add to find out rounding direction */ + if ix != 0 { + z = 1.0 - TINY; /* raise inexact flag */ + if z >= 1.0 { + z = 1.0 + TINY; + if z > 1.0 { + q += 2; + } else { + q += q & 1; + } + } + } + + ix = (q >> 1) + 0x3f000000; + ix += m << 23; + f32::from_bits(ix as u32) + } +} + +/// Absolute value (magnitude) (f32) +/// Calculates the absolute value (magnitude) of the argument `x`, +/// by direct manipulation of the bit representation of `x`. +pub fn fabsf(x: f32) -> f32 { + f32::from_bits(x.to_bits() & 0x7fffffff) +} + +pub fn scalbnf(mut x: f32, mut n: i32) -> f32 { + let x1p127 = f32::from_bits(0x7f000000); // 0x1p127f === 2 ^ 127 + let x1p_126 = f32::from_bits(0x800000); // 0x1p-126f === 2 ^ -126 + let x1p24 = f32::from_bits(0x4b800000); // 0x1p24f === 2 ^ 24 + + if n > 127 { + x *= x1p127; + n -= 127; + if n > 127 { + x *= x1p127; + n -= 127; + if n > 127 { + n = 127; + } + } + } else if n < -126 { + x *= x1p_126 * x1p24; + n += 126 - 24; + if n < -126 { + x *= x1p_126 * x1p24; + n += 126 - 24; + if n < -126 { + n = -126; + } + } + } + x * f32::from_bits(((0x7f + n) as u32) << 23) +} + +/* origin: FreeBSD /usr/src/lib/msun/src/e_pow.c */ +/* + * ==================================================== + * Copyright (C) 2004 by Sun Microsystems, Inc. All rights reserved. + * + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ + +// pow(x,y) return x**y +// +// n +// Method: Let x = 2 * (1+f) +// 1. Compute and return log2(x) in two pieces: +// log2(x) = w1 + w2, +// where w1 has 53-24 = 29 bit trailing zeros. +// 2. Perform y*log2(x) = n+y' by simulating muti-precision +// arithmetic, where |y'|<=0.5. +// 3. Return x**y = 2**n*exp(y'*log2) +// +// Special cases: +// 1. (anything) ** 0 is 1 +// 2. 1 ** (anything) is 1 +// 3. (anything except 1) ** NAN is NAN +// 4. NAN ** (anything except 0) is NAN +// 5. +-(|x| > 1) ** +INF is +INF +// 6. +-(|x| > 1) ** -INF is +0 +// 7. +-(|x| < 1) ** +INF is +0 +// 8. +-(|x| < 1) ** -INF is +INF +// 9. -1 ** +-INF is 1 +// 10. +0 ** (+anything except 0, NAN) is +0 +// 11. -0 ** (+anything except 0, NAN, odd integer) is +0 +// 12. +0 ** (-anything except 0, NAN) is +INF, raise divbyzero +// 13. -0 ** (-anything except 0, NAN, odd integer) is +INF, raise divbyzero +// 14. -0 ** (+odd integer) is -0 +// 15. -0 ** (-odd integer) is -INF, raise divbyzero +// 16. +INF ** (+anything except 0,NAN) is +INF +// 17. +INF ** (-anything except 0,NAN) is +0 +// 18. -INF ** (+odd integer) is -INF +// 19. -INF ** (anything) = -0 ** (-anything), (anything except odd integer) +// 20. (anything) ** 1 is (anything) +// 21. (anything) ** -1 is 1/(anything) +// 22. (-anything) ** (integer) is (-1)**(integer)*(+anything**integer) +// 23. (-anything except 0 and inf) ** (non-integer) is NAN +// +// Accuracy: +// pow(x,y) returns x**y nearly rounded. In particular +// pow(integer,integer) +// always returns the correct integer provided it is +// representable. +// +// Constants : +// The hexadecimal values are the intended ones for the following +// constants. The decimal values may be used, provided that the +// compiler will convert from decimal to binary accurately enough +// to produce the hexadecimal values shown. + +pub fn powd(x: f64, y: f64) -> f64 { + const BP: [f64; 2] = [1.0, 1.5]; + const DP_H: [f64; 2] = [0.0, 5.84962487220764160156e-01]; /* 0x3fe2b803_40000000 */ + const DP_L: [f64; 2] = [0.0, 1.35003920212974897128e-08]; /* 0x3E4CFDEB, 0x43CFD006 */ + const TWO53: f64 = 9007199254740992.0; /* 0x43400000_00000000 */ + const HUGE: f64 = 1.0e300; + const TINY: f64 = 1.0e-300; + + // poly coefs for (3/2)*(log(x)-2s-2/3*s**3: + const L1: f64 = 5.99999999999994648725e-01; /* 0x3fe33333_33333303 */ + const L2: f64 = 4.28571428578550184252e-01; /* 0x3fdb6db6_db6fabff */ + const L3: f64 = 3.33333329818377432918e-01; /* 0x3fd55555_518f264d */ + const L4: f64 = 2.72728123808534006489e-01; /* 0x3fd17460_a91d4101 */ + const L5: f64 = 2.30660745775561754067e-01; /* 0x3fcd864a_93c9db65 */ + const L6: f64 = 2.06975017800338417784e-01; /* 0x3fca7e28_4a454eef */ + const P1: f64 = 1.66666666666666019037e-01; /* 0x3fc55555_5555553e */ + const P2: f64 = -2.77777777770155933842e-03; /* 0xbf66c16c_16bebd93 */ + const P3: f64 = 6.61375632143793436117e-05; /* 0x3f11566a_af25de2c */ + const P4: f64 = -1.65339022054652515390e-06; /* 0xbebbbd41_c5d26bf1 */ + const P5: f64 = 4.13813679705723846039e-08; /* 0x3e663769_72bea4d0 */ + const LG2: f64 = 6.93147180559945286227e-01; /* 0x3fe62e42_fefa39ef */ + const LG2_H: f64 = 6.93147182464599609375e-01; /* 0x3fe62e43_00000000 */ + const LG2_L: f64 = -1.90465429995776804525e-09; /* 0xbe205c61_0ca86c39 */ + const OVT: f64 = 8.0085662595372944372e-017; /* -(1024-log2(ovfl+.5ulp)) */ + const CP: f64 = 9.61796693925975554329e-01; /* 0x3feec709_dc3a03fd =2/(3ln2) */ + const CP_H: f64 = 9.61796700954437255859e-01; /* 0x3feec709_e0000000 =(float)cp */ + const CP_L: f64 = -7.02846165095275826516e-09; /* 0xbe3e2fe0_145b01f5 =tail of cp_h*/ + const IVLN2: f64 = 1.44269504088896338700e+00; /* 0x3ff71547_652b82fe =1/ln2 */ + const IVLN2_H: f64 = 1.44269502162933349609e+00; /* 0x3ff71547_60000000 =24b 1/ln2*/ + const IVLN2_L: f64 = 1.92596299112661746887e-08; /* 0x3e54ae0b_f85ddf44 =1/ln2 tail*/ + + let t1: f64; + let t2: f64; + + let (hx, lx): (i32, u32) = ((x.to_bits() >> 32) as i32, x.to_bits() as u32); + let (hy, ly): (i32, u32) = ((y.to_bits() >> 32) as i32, y.to_bits() as u32); + + let mut ix: i32 = (hx & 0x7fffffff) as i32; + let iy: i32 = (hy & 0x7fffffff) as i32; + + /* x**0 = 1, even if x is NaN */ + if ((iy as u32) | ly) == 0 { + return 1.0; + } + + /* 1**y = 1, even if y is NaN */ + if hx == 0x3ff00000 && lx == 0 { + return 1.0; + } + + /* NaN if either arg is NaN */ + if ix > 0x7ff00000 + || (ix == 0x7ff00000 && lx != 0) + || iy > 0x7ff00000 + || (iy == 0x7ff00000 && ly != 0) + { + return x + y; + } + + /* determine if y is an odd int when x < 0 + * yisint = 0 ... y is not an integer + * yisint = 1 ... y is an odd int + * yisint = 2 ... y is an even int + */ + let mut yisint: i32 = 0; + let mut k: i32; + let mut j: i32; + if hx < 0 { + if iy >= 0x43400000 { + yisint = 2; /* even integer y */ + } else if iy >= 0x3ff00000 { + k = (iy >> 20) - 0x3ff; /* exponent */ + + if k > 20 { + j = (ly >> (52 - k)) as i32; + + if (j << (52 - k)) == (ly as i32) { + yisint = 2 - (j & 1); + } + } else if ly == 0 { + j = iy >> (20 - k); + + if (j << (20 - k)) == iy { + yisint = 2 - (j & 1); + } + } + } + } + + if ly == 0 { + /* special value of y */ + if iy == 0x7ff00000 { + /* y is +-inf */ + + return if ((ix - 0x3ff00000) | (lx as i32)) == 0 { + /* (-1)**+-inf is 1 */ + 1.0 + } else if ix >= 0x3ff00000 { + /* (|x|>1)**+-inf = inf,0 */ + if hy >= 0 { + y + } else { + 0.0 + } + } else { + /* (|x|<1)**+-inf = 0,inf */ + if hy >= 0 { + 0.0 + } else { + -y + } + }; + } + + if iy == 0x3ff00000 { + /* y is +-1 */ + return if hy >= 0 { + x + } else { + 1.0 / x + }; + } + + if hy == 0x40000000 { + /* y is 2 */ + return x * x; + } + + if hy == 0x3fe00000 { + /* y is 0.5 */ + if hx >= 0 { + /* x >= +0 */ + return sqrtd(x); + } + } + } + + let mut ax: f64 = fabsd(x); + if lx == 0 { + /* special value of x */ + if ix == 0x7ff00000 || ix == 0 || ix == 0x3ff00000 { + /* x is +-0,+-inf,+-1 */ + let mut z: f64 = ax; + + if hy < 0 { + /* z = (1/|x|) */ + z = 1.0 / z; + } + + if hx < 0 { + if ((ix - 0x3ff00000) | yisint) == 0 { + z = (z - z) / (z - z); /* (-1)**non-int is NaN */ + } else if yisint == 1 { + z = -z; /* (x<0)**odd = -(|x|**odd) */ + } + } + + return z; + } + } + + let mut s: f64 = 1.0; /* sign of result */ + if hx < 0 { + if yisint == 0 { + /* (x<0)**(non-int) is NaN */ + return (x - x) / (x - x); + } + + if yisint == 1 { + /* (x<0)**(odd int) */ + s = -1.0; + } + } + + /* |y| is HUGE */ + if iy > 0x41e00000 { + /* if |y| > 2**31 */ + if iy > 0x43f00000 { + /* if |y| > 2**64, must o/uflow */ + if ix <= 0x3fefffff { + return if hy < 0 { + HUGE * HUGE + } else { + TINY * TINY + }; + } + + if ix >= 0x3ff00000 { + return if hy > 0 { + HUGE * HUGE + } else { + TINY * TINY + }; + } + } + + /* over/underflow if x is not close to one */ + if ix < 0x3fefffff { + return if hy < 0 { + s * HUGE * HUGE + } else { + s * TINY * TINY + }; + } + if ix > 0x3ff00000 { + return if hy > 0 { + s * HUGE * HUGE + } else { + s * TINY * TINY + }; + } + + /* now |1-x| is TINY <= 2**-20, suffice to compute + log(x) by x-x^2/2+x^3/3-x^4/4 */ + let t: f64 = ax - 1.0; /* t has 20 trailing zeros */ + let w: f64 = (t * t) * (0.5 - t * (0.3333333333333333333333 - t * 0.25)); + let u: f64 = IVLN2_H * t; /* ivln2_h has 21 sig. bits */ + let v: f64 = t * IVLN2_L - w * IVLN2; + t1 = with_set_low_word(u + v, 0); + t2 = v - (t1 - u); + } else { + // double ss,s2,s_h,s_l,t_h,t_l; + let mut n: i32 = 0; + + if ix < 0x00100000 { + /* take care subnormal number */ + ax *= TWO53; + n -= 53; + ix = get_high_word(ax) as i32; + } + + n += (ix >> 20) - 0x3ff; + j = ix & 0x000fffff; + + /* determine interval */ + let k: i32; + ix = j | 0x3ff00000; /* normalize ix */ + if j <= 0x3988E { + /* |x|> 1) | 0x20000000) + 0x00080000 + ((k as u32) << 18), + ); + let t_l: f64 = ax - (t_h - i!(BP, k as usize)); + let s_l: f64 = v * ((u - s_h * t_h) - s_h * t_l); + + /* compute log(ax) */ + let s2: f64 = ss * ss; + let mut r: f64 = s2 * s2 * (L1 + s2 * (L2 + s2 * (L3 + s2 * (L4 + s2 * (L5 + s2 * L6))))); + r += s_l * (s_h + ss); + let s2: f64 = s_h * s_h; + let t_h: f64 = with_set_low_word(3.0 + s2 + r, 0); + let t_l: f64 = r - ((t_h - 3.0) - s2); + + /* u+v = ss*(1+...) */ + let u: f64 = s_h * t_h; + let v: f64 = s_l * t_h + t_l * ss; + + /* 2/(3log2)*(ss+...) */ + let p_h: f64 = with_set_low_word(u + v, 0); + let p_l = v - (p_h - u); + let z_h: f64 = CP_H * p_h; /* cp_h+cp_l = 2/(3*log2) */ + let z_l: f64 = CP_L * p_h + p_l * CP + i!(DP_L, k as usize); + + /* log2(ax) = (ss+..)*2/(3*log2) = n + dp_h + z_h + z_l */ + let t: f64 = n as f64; + t1 = with_set_low_word(((z_h + z_l) + i!(DP_H, k as usize)) + t, 0); + t2 = z_l - (((t1 - t) - i!(DP_H, k as usize)) - z_h); + } + + /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */ + let y1: f64 = with_set_low_word(y, 0); + let p_l: f64 = (y - y1) * t1 + y * t2; + let mut p_h: f64 = y1 * t1; + let z: f64 = p_l + p_h; + let mut j: i32 = (z.to_bits() >> 32) as i32; + let i: i32 = z.to_bits() as i32; + // let (j, i): (i32, i32) = ((z.to_bits() >> 32) as i32, z.to_bits() as i32); + + if j >= 0x40900000 { + /* z >= 1024 */ + if (j - 0x40900000) | i != 0 { + /* if z > 1024 */ + return s * HUGE * HUGE; /* overflow */ + } + + if p_l + OVT > z - p_h { + return s * HUGE * HUGE; /* overflow */ + } + } else if (j & 0x7fffffff) >= 0x4090cc00 { + /* z <= -1075 */ + // FIXME: instead of abs(j) use unsigned j + + if (((j as u32) - 0xc090cc00) | (i as u32)) != 0 { + /* z < -1075 */ + return s * TINY * TINY; /* underflow */ + } + + if p_l <= z - p_h { + return s * TINY * TINY; /* underflow */ + } + } + + /* compute 2**(p_h+p_l) */ + let i: i32 = j & (0x7fffffff as i32); + k = (i >> 20) - 0x3ff; + let mut n: i32 = 0; + + if i > 0x3fe00000 { + /* if |z| > 0.5, set n = [z+0.5] */ + n = j + (0x00100000 >> (k + 1)); + k = ((n & 0x7fffffff) >> 20) - 0x3ff; /* new k for n */ + let t: f64 = with_set_high_word(0.0, (n & !(0x000fffff >> k)) as u32); + n = ((n & 0x000fffff) | 0x00100000) >> (20 - k); + if j < 0 { + n = -n; + } + p_h -= t; + } + + let t: f64 = with_set_low_word(p_l + p_h, 0); + let u: f64 = t * LG2_H; + let v: f64 = (p_l - (t - p_h)) * LG2 + t * LG2_L; + let mut z: f64 = u + v; + let w: f64 = v - (z - u); + let t: f64 = z * z; + let t1: f64 = z - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5)))); + let r: f64 = (z * t1) / (t1 - 2.0) - (w + z * w); + z = 1.0 - (r - z); + j = get_high_word(z) as i32; + j += n << 20; + + if (j >> 20) <= 0 { + /* subnormal output */ + z = scalbnd(z, n); + } else { + z = with_set_high_word(z, j as u32); + } + + s * z +} + +/// Absolute value (magnitude) (f64) +/// Calculates the absolute value (magnitude) of the argument `x`, +/// by direct manipulation of the bit representation of `x`. +pub fn fabsd(x: f64) -> f64 { + f64::from_bits(x.to_bits() & (u64::MAX / 2)) +} + +pub fn scalbnd(x: f64, mut n: i32) -> f64 { + let x1p1023 = f64::from_bits(0x7fe0000000000000); // 0x1p1023 === 2 ^ 1023 + let x1p53 = f64::from_bits(0x4340000000000000); // 0x1p53 === 2 ^ 53 + let x1p_1022 = f64::from_bits(0x0010000000000000); // 0x1p-1022 === 2 ^ (-1022) + + let mut y = x; + + if n > 1023 { + y *= x1p1023; + n -= 1023; + if n > 1023 { + y *= x1p1023; + n -= 1023; + if n > 1023 { + n = 1023; + } + } + } else if n < -1022 { + /* make sure final n < -53 to avoid double + rounding in the subnormal range */ + y *= x1p_1022 * x1p53; + n += 1022 - 53; + if n < -1022 { + y *= x1p_1022 * x1p53; + n += 1022 - 53; + if n < -1022 { + n = -1022; + } + } + } + y * f64::from_bits(((0x3ff + n) as u64) << 52) +} + +/* origin: FreeBSD /usr/src/lib/msun/src/e_sqrt.c */ +/* + * ==================================================== + * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + * + * Developed at SunSoft, a Sun Microsystems, Inc. business. + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ +/* sqrt(x) + * Return correctly rounded sqrt. + * ------------------------------------------ + * | Use the hardware sqrt if you have one | + * ------------------------------------------ + * Method: + * Bit by bit method using integer arithmetic. (Slow, but portable) + * 1. Normalization + * Scale x to y in [1,4) with even powers of 2: + * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then + * sqrt(x) = 2^k * sqrt(y) + * 2. Bit by bit computation + * Let q = sqrt(y) truncated to i bit after binary point (q = 1), + * i 0 + * i+1 2 + * s = 2*q , and y = 2 * ( y - q ). (1) + * i i i i + * + * To compute q from q , one checks whether + * i+1 i + * + * -(i+1) 2 + * (q + 2 ) <= y. (2) + * i + * -(i+1) + * If (2) is false, then q = q ; otherwise q = q + 2 . + * i+1 i i+1 i + * + * With some algebraic manipulation, it is not difficult to see + * that (2) is equivalent to + * -(i+1) + * s + 2 <= y (3) + * i i + * + * The advantage of (3) is that s and y can be computed by + * i i + * the following recurrence formula: + * if (3) is false + * + * s = s , y = y ; (4) + * i+1 i i+1 i + * + * otherwise, + * -i -(i+1) + * s = s + 2 , y = y - s - 2 (5) + * i+1 i i+1 i i + * + * One may easily use induction to prove (4) and (5). + * Note. Since the left hand side of (3) contain only i+2 bits, + * it does not necessary to do a full (53-bit) comparison + * in (3). + * 3. Final rounding + * After generating the 53 bits result, we compute one more bit. + * Together with the remainder, we can decide whether the + * result is exact, bigger than 1/2ulp, or less than 1/2ulp + * (it will never equal to 1/2ulp). + * The rounding mode can be detected by checking whether + * huge + tiny is equal to huge, and whether huge - tiny is + * equal to huge for some floating point number "huge" and "tiny". + * + * Special cases: + * sqrt(+-0) = +-0 ... exact + * sqrt(inf) = inf + * sqrt(-ve) = NaN ... with invalid signal + * sqrt(NaN) = NaN ... with invalid signal for signaling NaN + */ + +pub fn sqrtd(x: f64) -> f64 { + #[cfg(target_feature = "sse2")] + { + // Note: This path is unlikely since LLVM will usually have already + // optimized sqrt calls into hardware instructions if sse2 is available, + // but if someone does end up here they'll apprected the speed increase. + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64::*; + // SAFETY: safe, since `_mm_set_sd` takes a 64-bit float, and returns + // a 128-bit type with the lowest 64-bits as `x`, `_mm_sqrt_ss` calculates + // the sqrt of this 128-bit vector, and `_mm_cvtss_f64` extracts the lower + // 64-bits as a 64-bit float. + unsafe { + let m = _mm_set_sd(x); + let m_sqrt = _mm_sqrt_pd(m); + _mm_cvtsd_f64(m_sqrt) + } + } + #[cfg(not(target_feature = "sse2"))] + { + use core::num::Wrapping; + + const TINY: f64 = 1.0e-300; + + let mut z: f64; + let sign: Wrapping = Wrapping(0x80000000); + let mut ix0: i32; + let mut s0: i32; + let mut q: i32; + let mut m: i32; + let mut t: i32; + let mut i: i32; + let mut r: Wrapping; + let mut t1: Wrapping; + let mut s1: Wrapping; + let mut ix1: Wrapping; + let mut q1: Wrapping; + + ix0 = (x.to_bits() >> 32) as i32; + ix1 = Wrapping(x.to_bits() as u32); + + /* take care of Inf and NaN */ + if (ix0 & 0x7ff00000) == 0x7ff00000 { + return x * x + x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf, sqrt(-inf)=sNaN */ + } + /* take care of zero */ + if ix0 <= 0 { + if ((ix0 & !(sign.0 as i32)) | ix1.0 as i32) == 0 { + return x; /* sqrt(+-0) = +-0 */ + } + if ix0 < 0 { + return (x - x) / (x - x); /* sqrt(-ve) = sNaN */ + } + } + /* normalize x */ + m = ix0 >> 20; + if m == 0 { + /* subnormal x */ + while ix0 == 0 { + m -= 21; + ix0 |= (ix1 >> 11).0 as i32; + ix1 <<= 21; + } + i = 0; + while (ix0 & 0x00100000) == 0 { + i += 1; + ix0 <<= 1; + } + m -= i - 1; + ix0 |= (ix1 >> (32 - i) as usize).0 as i32; + ix1 = ix1 << i as usize; + } + m -= 1023; /* unbias exponent */ + ix0 = (ix0 & 0x000fffff) | 0x00100000; + if (m & 1) == 1 { + /* odd m, double x to make it even */ + ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32; + ix1 += ix1; + } + m >>= 1; /* m = [m/2] */ + + /* generate sqrt(x) bit by bit */ + ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32; + ix1 += ix1; + q = 0; /* [q,q1] = sqrt(x) */ + q1 = Wrapping(0); + s0 = 0; + s1 = Wrapping(0); + r = Wrapping(0x00200000); /* r = moving bit from right to left */ + + while r != Wrapping(0) { + t = s0 + r.0 as i32; + if t <= ix0 { + s0 = t + r.0 as i32; + ix0 -= t; + q += r.0 as i32; + } + ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32; + ix1 += ix1; + r >>= 1; + } + + r = sign; + while r != Wrapping(0) { + t1 = s1 + r; + t = s0; + if t < ix0 || (t == ix0 && t1 <= ix1) { + s1 = t1 + r; + if (t1 & sign) == sign && (s1 & sign) == Wrapping(0) { + s0 += 1; + } + ix0 -= t; + if ix1 < t1 { + ix0 -= 1; + } + ix1 -= t1; + q1 += r; + } + ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32; + ix1 += ix1; + r >>= 1; + } + + /* use floating add to find out rounding direction */ + if (ix0 as u32 | ix1.0) != 0 { + z = 1.0 - TINY; /* raise inexact flag */ + if z >= 1.0 { + z = 1.0 + TINY; + if q1.0 == 0xffffffff { + q1 = Wrapping(0); + q += 1; + } else if z > 1.0 { + if q1.0 == 0xfffffffe { + q += 1; + } + q1 += Wrapping(2); + } else { + q1 += q1 & Wrapping(1); + } + } + } + ix0 = (q >> 1) + 0x3fe00000; + ix1 = q1 >> 1; + if (q & 1) == 1 { + ix1 |= sign; + } + ix0 += m << 20; + f64::from_bits((ix0 as u64) << 32 | ix1.0 as u64) + } +} + +#[inline] +fn get_high_word(x: f64) -> u32 { + (x.to_bits() >> 32) as u32 +} + +#[inline] +fn with_set_high_word(f: f64, hi: u32) -> f64 { + let mut tmp = f.to_bits(); + tmp &= 0x00000000_ffffffff; + tmp |= (hi as u64) << 32; + f64::from_bits(tmp) +} + +#[inline] +fn with_set_low_word(f: f64, lo: u32) -> f64 { + let mut tmp = f.to_bits(); + tmp &= 0xffffffff_00000000; + tmp |= lo as u64; + f64::from_bits(tmp) +} diff --git a/vendor/minimal-lexical/src/mask.rs b/vendor/minimal-lexical/src/mask.rs new file mode 100644 index 00000000000000..1957c8be03e125 --- /dev/null +++ b/vendor/minimal-lexical/src/mask.rs @@ -0,0 +1,60 @@ +//! Utilities to generate bitmasks. + +#![doc(hidden)] + +/// Generate a bitwise mask for the lower `n` bits. +/// +/// # Examples +/// +/// ```rust +/// # use minimal_lexical::mask::lower_n_mask; +/// # pub fn main() { +/// assert_eq!(lower_n_mask(2), 0b11); +/// # } +/// ``` +#[inline] +pub fn lower_n_mask(n: u64) -> u64 { + debug_assert!(n <= 64, "lower_n_mask() overflow in shl."); + + match n == 64 { + // u64::MAX for older Rustc versions. + true => 0xffff_ffff_ffff_ffff, + false => (1 << n) - 1, + } +} + +/// Calculate the halfway point for the lower `n` bits. +/// +/// # Examples +/// +/// ```rust +/// # use minimal_lexical::mask::lower_n_halfway; +/// # pub fn main() { +/// assert_eq!(lower_n_halfway(2), 0b10); +/// # } +/// ``` +#[inline] +pub fn lower_n_halfway(n: u64) -> u64 { + debug_assert!(n <= 64, "lower_n_halfway() overflow in shl."); + + match n == 0 { + true => 0, + false => nth_bit(n - 1), + } +} + +/// Calculate a scalar factor of 2 above the halfway point. +/// +/// # Examples +/// +/// ```rust +/// # use minimal_lexical::mask::nth_bit; +/// # pub fn main() { +/// assert_eq!(nth_bit(2), 0b100); +/// # } +/// ``` +#[inline] +pub fn nth_bit(n: u64) -> u64 { + debug_assert!(n < 64, "nth_bit() overflow in shl."); + 1 << n +} diff --git a/vendor/minimal-lexical/src/num.rs b/vendor/minimal-lexical/src/num.rs new file mode 100644 index 00000000000000..9f682b9cbb29c3 --- /dev/null +++ b/vendor/minimal-lexical/src/num.rs @@ -0,0 +1,308 @@ +//! Utilities for Rust numbers. + +#![doc(hidden)] + +#[cfg(all(not(feature = "std"), feature = "compact"))] +use crate::libm::{powd, powf}; +#[cfg(not(feature = "compact"))] +use crate::table::{SMALL_F32_POW10, SMALL_F64_POW10, SMALL_INT_POW10, SMALL_INT_POW5}; +#[cfg(not(feature = "compact"))] +use core::hint; +use core::ops; + +/// Generic floating-point type, to be used in generic code for parsing. +/// +/// Although the trait is part of the public API, the trait provides methods +/// and constants that are effectively non-public: they may be removed +/// at any time without any breaking changes. +pub trait Float: + Sized + + Copy + + PartialEq + + PartialOrd + + Send + + Sync + + ops::Add + + ops::AddAssign + + ops::Div + + ops::DivAssign + + ops::Mul + + ops::MulAssign + + ops::Rem + + ops::RemAssign + + ops::Sub + + ops::SubAssign + + ops::Neg +{ + /// Maximum number of digits that can contribute in the mantissa. + /// + /// We can exactly represent a float in radix `b` from radix 2 if + /// `b` is divisible by 2. This function calculates the exact number of + /// digits required to exactly represent that float. + /// + /// According to the "Handbook of Floating Point Arithmetic", + /// for IEEE754, with emin being the min exponent, p2 being the + /// precision, and b being the radix, the number of digits follows as: + /// + /// `−emin + p2 + ⌊(emin + 1) log(2, b) − log(1 − 2^(−p2), b)⌋` + /// + /// For f32, this follows as: + /// emin = -126 + /// p2 = 24 + /// + /// For f64, this follows as: + /// emin = -1022 + /// p2 = 53 + /// + /// In Python: + /// `-emin + p2 + math.floor((emin+1)*math.log(2, b) - math.log(1-2**(-p2), b))` + /// + /// This was used to calculate the maximum number of digits for [2, 36]. + const MAX_DIGITS: usize; + + // MASKS + + /// Bitmask for the sign bit. + const SIGN_MASK: u64; + /// Bitmask for the exponent, including the hidden bit. + const EXPONENT_MASK: u64; + /// Bitmask for the hidden bit in exponent, which is an implicit 1 in the fraction. + const HIDDEN_BIT_MASK: u64; + /// Bitmask for the mantissa (fraction), excluding the hidden bit. + const MANTISSA_MASK: u64; + + // PROPERTIES + + /// Size of the significand (mantissa) without hidden bit. + const MANTISSA_SIZE: i32; + /// Bias of the exponet + const EXPONENT_BIAS: i32; + /// Exponent portion of a denormal float. + const DENORMAL_EXPONENT: i32; + /// Maximum exponent value in float. + const MAX_EXPONENT: i32; + + // ROUNDING + + /// Mask to determine if a full-carry occurred (1 in bit above hidden bit). + const CARRY_MASK: u64; + + /// Bias for marking an invalid extended float. + // Value is `i16::MIN`, using hard-coded constants for older Rustc versions. + const INVALID_FP: i32 = -0x8000; + + // Maximum mantissa for the fast-path (`1 << 53` for f64). + const MAX_MANTISSA_FAST_PATH: u64 = 2_u64 << Self::MANTISSA_SIZE; + + // Largest exponent value `(1 << EXP_BITS) - 1`. + const INFINITE_POWER: i32 = Self::MAX_EXPONENT + Self::EXPONENT_BIAS; + + // Round-to-even only happens for negative values of q + // when q ≥ −4 in the 64-bit case and when q ≥ −17 in + // the 32-bitcase. + // + // When q ≥ 0,we have that 5^q ≤ 2m+1. In the 64-bit case,we + // have 5^q ≤ 2m+1 ≤ 2^54 or q ≤ 23. In the 32-bit case,we have + // 5^q ≤ 2m+1 ≤ 2^25 or q ≤ 10. + // + // When q < 0, we have w ≥ (2m+1)×5^−q. We must have that w < 2^64 + // so (2m+1)×5^−q < 2^64. We have that 2m+1 > 2^53 (64-bit case) + // or 2m+1 > 2^24 (32-bit case). Hence,we must have 2^53×5^−q < 2^64 + // (64-bit) and 2^24×5^−q < 2^64 (32-bit). Hence we have 5^−q < 2^11 + // or q ≥ −4 (64-bit case) and 5^−q < 2^40 or q ≥ −17 (32-bitcase). + // + // Thus we have that we only need to round ties to even when + // we have that q ∈ [−4,23](in the 64-bit case) or q∈[−17,10] + // (in the 32-bit case). In both cases,the power of five(5^|q|) + // fits in a 64-bit word. + const MIN_EXPONENT_ROUND_TO_EVEN: i32; + const MAX_EXPONENT_ROUND_TO_EVEN: i32; + + /// Minimum normal exponent value `-(1 << (EXPONENT_SIZE - 1)) + 1`. + const MINIMUM_EXPONENT: i32; + + /// Smallest decimal exponent for a non-zero value. + const SMALLEST_POWER_OF_TEN: i32; + + /// Largest decimal exponent for a non-infinite value. + const LARGEST_POWER_OF_TEN: i32; + + /// Minimum exponent that for a fast path case, or `-⌊(MANTISSA_SIZE+1)/log2(10)⌋` + const MIN_EXPONENT_FAST_PATH: i32; + + /// Maximum exponent that for a fast path case, or `⌊(MANTISSA_SIZE+1)/log2(5)⌋` + const MAX_EXPONENT_FAST_PATH: i32; + + /// Maximum exponent that can be represented for a disguised-fast path case. + /// This is `MAX_EXPONENT_FAST_PATH + ⌊(MANTISSA_SIZE+1)/log2(10)⌋` + const MAX_EXPONENT_DISGUISED_FAST_PATH: i32; + + /// Convert 64-bit integer to float. + fn from_u64(u: u64) -> Self; + + // Re-exported methods from std. + fn from_bits(u: u64) -> Self; + fn to_bits(self) -> u64; + + /// Get a small power-of-radix for fast-path multiplication. + /// + /// # Safety + /// + /// Safe as long as the exponent is smaller than the table size. + unsafe fn pow_fast_path(exponent: usize) -> Self; + + /// Get a small, integral power-of-radix for fast-path multiplication. + /// + /// # Safety + /// + /// Safe as long as the exponent is smaller than the table size. + #[inline(always)] + unsafe fn int_pow_fast_path(exponent: usize, radix: u32) -> u64 { + // SAFETY: safe as long as the exponent is smaller than the radix table. + #[cfg(not(feature = "compact"))] + return match radix { + 5 => unsafe { *SMALL_INT_POW5.get_unchecked(exponent) }, + 10 => unsafe { *SMALL_INT_POW10.get_unchecked(exponent) }, + _ => unsafe { hint::unreachable_unchecked() }, + }; + + #[cfg(feature = "compact")] + return (radix as u64).pow(exponent as u32); + } + + /// Returns true if the float is a denormal. + #[inline] + fn is_denormal(self) -> bool { + self.to_bits() & Self::EXPONENT_MASK == 0 + } + + /// Get exponent component from the float. + #[inline] + fn exponent(self) -> i32 { + if self.is_denormal() { + return Self::DENORMAL_EXPONENT; + } + + let bits = self.to_bits(); + let biased_e: i32 = ((bits & Self::EXPONENT_MASK) >> Self::MANTISSA_SIZE) as i32; + biased_e - Self::EXPONENT_BIAS + } + + /// Get mantissa (significand) component from float. + #[inline] + fn mantissa(self) -> u64 { + let bits = self.to_bits(); + let s = bits & Self::MANTISSA_MASK; + if !self.is_denormal() { + s + Self::HIDDEN_BIT_MASK + } else { + s + } + } +} + +impl Float for f32 { + const MAX_DIGITS: usize = 114; + const SIGN_MASK: u64 = 0x80000000; + const EXPONENT_MASK: u64 = 0x7F800000; + const HIDDEN_BIT_MASK: u64 = 0x00800000; + const MANTISSA_MASK: u64 = 0x007FFFFF; + const MANTISSA_SIZE: i32 = 23; + const EXPONENT_BIAS: i32 = 127 + Self::MANTISSA_SIZE; + const DENORMAL_EXPONENT: i32 = 1 - Self::EXPONENT_BIAS; + const MAX_EXPONENT: i32 = 0xFF - Self::EXPONENT_BIAS; + const CARRY_MASK: u64 = 0x1000000; + const MIN_EXPONENT_ROUND_TO_EVEN: i32 = -17; + const MAX_EXPONENT_ROUND_TO_EVEN: i32 = 10; + const MINIMUM_EXPONENT: i32 = -127; + const SMALLEST_POWER_OF_TEN: i32 = -65; + const LARGEST_POWER_OF_TEN: i32 = 38; + const MIN_EXPONENT_FAST_PATH: i32 = -10; + const MAX_EXPONENT_FAST_PATH: i32 = 10; + const MAX_EXPONENT_DISGUISED_FAST_PATH: i32 = 17; + + #[inline(always)] + unsafe fn pow_fast_path(exponent: usize) -> Self { + // SAFETY: safe as long as the exponent is smaller than the radix table. + #[cfg(not(feature = "compact"))] + return unsafe { *SMALL_F32_POW10.get_unchecked(exponent) }; + + #[cfg(feature = "compact")] + return powf(10.0f32, exponent as f32); + } + + #[inline] + fn from_u64(u: u64) -> f32 { + u as _ + } + + #[inline] + fn from_bits(u: u64) -> f32 { + // Constant is `u32::MAX` for older Rustc versions. + debug_assert!(u <= 0xffff_ffff); + f32::from_bits(u as u32) + } + + #[inline] + fn to_bits(self) -> u64 { + f32::to_bits(self) as u64 + } +} + +impl Float for f64 { + const MAX_DIGITS: usize = 769; + const SIGN_MASK: u64 = 0x8000000000000000; + const EXPONENT_MASK: u64 = 0x7FF0000000000000; + const HIDDEN_BIT_MASK: u64 = 0x0010000000000000; + const MANTISSA_MASK: u64 = 0x000FFFFFFFFFFFFF; + const MANTISSA_SIZE: i32 = 52; + const EXPONENT_BIAS: i32 = 1023 + Self::MANTISSA_SIZE; + const DENORMAL_EXPONENT: i32 = 1 - Self::EXPONENT_BIAS; + const MAX_EXPONENT: i32 = 0x7FF - Self::EXPONENT_BIAS; + const CARRY_MASK: u64 = 0x20000000000000; + const MIN_EXPONENT_ROUND_TO_EVEN: i32 = -4; + const MAX_EXPONENT_ROUND_TO_EVEN: i32 = 23; + const MINIMUM_EXPONENT: i32 = -1023; + const SMALLEST_POWER_OF_TEN: i32 = -342; + const LARGEST_POWER_OF_TEN: i32 = 308; + const MIN_EXPONENT_FAST_PATH: i32 = -22; + const MAX_EXPONENT_FAST_PATH: i32 = 22; + const MAX_EXPONENT_DISGUISED_FAST_PATH: i32 = 37; + + #[inline(always)] + unsafe fn pow_fast_path(exponent: usize) -> Self { + // SAFETY: safe as long as the exponent is smaller than the radix table. + #[cfg(not(feature = "compact"))] + return unsafe { *SMALL_F64_POW10.get_unchecked(exponent) }; + + #[cfg(feature = "compact")] + return powd(10.0f64, exponent as f64); + } + + #[inline] + fn from_u64(u: u64) -> f64 { + u as _ + } + + #[inline] + fn from_bits(u: u64) -> f64 { + f64::from_bits(u) + } + + #[inline] + fn to_bits(self) -> u64 { + f64::to_bits(self) + } +} + +#[inline(always)] +#[cfg(all(feature = "std", feature = "compact"))] +pub fn powf(x: f32, y: f32) -> f32 { + x.powf(y) +} + +#[inline(always)] +#[cfg(all(feature = "std", feature = "compact"))] +pub fn powd(x: f64, y: f64) -> f64 { + x.powf(y) +} diff --git a/vendor/minimal-lexical/src/number.rs b/vendor/minimal-lexical/src/number.rs new file mode 100644 index 00000000000000..5981f9dd79232f --- /dev/null +++ b/vendor/minimal-lexical/src/number.rs @@ -0,0 +1,83 @@ +//! Representation of a float as the significant digits and exponent. +//! +//! This is adapted from [fast-float-rust](https://github.com/aldanor/fast-float-rust), +//! a port of [fast_float](https://github.com/fastfloat/fast_float) to Rust. + +#![doc(hidden)] + +#[cfg(feature = "nightly")] +use crate::fpu::set_precision; +use crate::num::Float; + +/// Representation of a number as the significant digits and exponent. +/// +/// This is only used if the exponent base and the significant digit +/// radix are the same, since we need to be able to move powers in and +/// out of the exponent. +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub struct Number { + /// The exponent of the float, scaled to the mantissa. + pub exponent: i32, + /// The significant digits of the float. + pub mantissa: u64, + /// If the significant digits were truncated. + pub many_digits: bool, +} + +impl Number { + /// Detect if the float can be accurately reconstructed from native floats. + #[inline] + pub fn is_fast_path(&self) -> bool { + F::MIN_EXPONENT_FAST_PATH <= self.exponent + && self.exponent <= F::MAX_EXPONENT_DISGUISED_FAST_PATH + && self.mantissa <= F::MAX_MANTISSA_FAST_PATH + && !self.many_digits + } + + /// The fast path algorithmn using machine-sized integers and floats. + /// + /// This is extracted into a separate function so that it can be attempted before constructing + /// a Decimal. This only works if both the mantissa and the exponent + /// can be exactly represented as a machine float, since IEE-754 guarantees + /// no rounding will occur. + /// + /// There is an exception: disguised fast-path cases, where we can shift + /// powers-of-10 from the exponent to the significant digits. + pub fn try_fast_path(&self) -> Option { + // The fast path crucially depends on arithmetic being rounded to the correct number of bits + // without any intermediate rounding. On x86 (without SSE or SSE2) this requires the precision + // of the x87 FPU stack to be changed so that it directly rounds to 64/32 bit. + // The `set_precision` function takes care of setting the precision on architectures which + // require setting it by changing the global state (like the control word of the x87 FPU). + #[cfg(feature = "nightly")] + let _cw = set_precision::(); + + if self.is_fast_path::() { + let max_exponent = F::MAX_EXPONENT_FAST_PATH; + Some(if self.exponent <= max_exponent { + // normal fast path + let value = F::from_u64(self.mantissa); + if self.exponent < 0 { + // SAFETY: safe, since the `exponent <= max_exponent`. + value / unsafe { F::pow_fast_path((-self.exponent) as _) } + } else { + // SAFETY: safe, since the `exponent <= max_exponent`. + value * unsafe { F::pow_fast_path(self.exponent as _) } + } + } else { + // disguised fast path + let shift = self.exponent - max_exponent; + // SAFETY: safe, since `shift <= (max_disguised - max_exponent)`. + let int_power = unsafe { F::int_pow_fast_path(shift as usize, 10) }; + let mantissa = self.mantissa.checked_mul(int_power)?; + if mantissa > F::MAX_MANTISSA_FAST_PATH { + return None; + } + // SAFETY: safe, since the `table.len() - 1 == max_exponent`. + F::from_u64(mantissa) * unsafe { F::pow_fast_path(max_exponent as _) } + }) + } else { + None + } + } +} diff --git a/vendor/minimal-lexical/src/parse.rs b/vendor/minimal-lexical/src/parse.rs new file mode 100644 index 00000000000000..9349699eb35e53 --- /dev/null +++ b/vendor/minimal-lexical/src/parse.rs @@ -0,0 +1,201 @@ +//! Parse byte iterators to float. + +#![doc(hidden)] + +#[cfg(feature = "compact")] +use crate::bellerophon::bellerophon; +use crate::extended_float::{extended_to_float, ExtendedFloat}; +#[cfg(not(feature = "compact"))] +use crate::lemire::lemire; +use crate::num::Float; +use crate::number::Number; +use crate::slow::slow; + +/// Try to parse the significant digits quickly. +/// +/// This attempts a very quick parse, to deal with common cases. +/// +/// * `integer` - Slice containing the integer digits. +/// * `fraction` - Slice containing the fraction digits. +#[inline] +fn parse_number_fast<'a, Iter1, Iter2>( + integer: Iter1, + fraction: Iter2, + exponent: i32, +) -> Option +where + Iter1: Iterator, + Iter2: Iterator, +{ + let mut num = Number::default(); + let mut integer_count: usize = 0; + let mut fraction_count: usize = 0; + for &c in integer { + integer_count += 1; + let digit = c - b'0'; + num.mantissa = num.mantissa.wrapping_mul(10).wrapping_add(digit as u64); + } + for &c in fraction { + fraction_count += 1; + let digit = c - b'0'; + num.mantissa = num.mantissa.wrapping_mul(10).wrapping_add(digit as u64); + } + + if integer_count + fraction_count <= 19 { + // Can't overflow, since must be <= 19. + num.exponent = exponent.saturating_sub(fraction_count as i32); + Some(num) + } else { + None + } +} + +/// Parse the significant digits of the float and adjust the exponent. +/// +/// * `integer` - Slice containing the integer digits. +/// * `fraction` - Slice containing the fraction digits. +#[inline] +fn parse_number<'a, Iter1, Iter2>(mut integer: Iter1, mut fraction: Iter2, exponent: i32) -> Number +where + Iter1: Iterator + Clone, + Iter2: Iterator + Clone, +{ + // NOTE: for performance, we do this in 2 passes: + if let Some(num) = parse_number_fast(integer.clone(), fraction.clone(), exponent) { + return num; + } + + // Can only add 19 digits. + let mut num = Number::default(); + let mut count = 0; + while let Some(&c) = integer.next() { + count += 1; + if count == 20 { + // Only the integer digits affect the exponent. + num.many_digits = true; + num.exponent = exponent.saturating_add(into_i32(1 + integer.count())); + return num; + } else { + let digit = c - b'0'; + num.mantissa = num.mantissa * 10 + digit as u64; + } + } + + // Skip leading fraction zeros. + // This is required otherwise we might have a 0 mantissa and many digits. + let mut fraction_count: usize = 0; + if count == 0 { + for &c in &mut fraction { + fraction_count += 1; + if c != b'0' { + count += 1; + let digit = c - b'0'; + num.mantissa = num.mantissa * 10 + digit as u64; + break; + } + } + } + for c in fraction { + fraction_count += 1; + count += 1; + if count == 20 { + num.many_digits = true; + // This can't wrap, since we have at most 20 digits. + // We've adjusted the exponent too high by `fraction_count - 1`. + // Note: -1 is due to incrementing this loop iteration, which we + // didn't use. + num.exponent = exponent.saturating_sub(fraction_count as i32 - 1); + return num; + } else { + let digit = c - b'0'; + num.mantissa = num.mantissa * 10 + digit as u64; + } + } + + // No truncated digits: easy. + // Cannot overflow: <= 20 digits. + num.exponent = exponent.saturating_sub(fraction_count as i32); + num +} + +/// Parse float from extracted float components. +/// +/// * `integer` - Cloneable, forward iterator over integer digits. +/// * `fraction` - Cloneable, forward iterator over integer digits. +/// * `exponent` - Parsed, 32-bit exponent. +/// +/// # Preconditions +/// 1. The integer should not have leading zeros. +/// 2. The fraction should not have trailing zeros. +/// 3. All bytes in `integer` and `fraction` should be valid digits, +/// in the range [`b'0', b'9']. +/// +/// # Panics +/// +/// Although passing garbage input will not cause memory safety issues, +/// it is very likely to cause a panic with a large number of digits, or +/// in debug mode. The big-integer arithmetic without the `alloc` feature +/// assumes a maximum, fixed-width input, which assumes at maximum a +/// value of `10^(769 + 342)`, or ~4000 bits of storage. Passing in +/// nonsensical digits may require up to ~6000 bits of storage, which will +/// panic when attempting to add it to the big integer. It is therefore +/// up to the caller to validate this input. +/// +/// We cannot efficiently remove trailing zeros while only accepting a +/// forward iterator. +pub fn parse_float<'a, F, Iter1, Iter2>(integer: Iter1, fraction: Iter2, exponent: i32) -> F +where + F: Float, + Iter1: Iterator + Clone, + Iter2: Iterator + Clone, +{ + // Parse the mantissa and attempt the fast and moderate-path algorithms. + let num = parse_number(integer.clone(), fraction.clone(), exponent); + // Try the fast-path algorithm. + if let Some(value) = num.try_fast_path() { + return value; + } + + // Now try the moderate path algorithm. + let mut fp = moderate_path::(&num); + if fp.exp < 0 { + // Undo the invalid extended float biasing. + fp.exp -= F::INVALID_FP; + fp = slow::(num, fp, integer, fraction); + } + + // Unable to correctly round the float using the fast or moderate algorithms. + // Fallback to a slower, but always correct algorithm. If we have + // lossy, we can't be here. + extended_to_float::(fp) +} + +/// Wrapper for different moderate-path algorithms. +/// A return exponent of `-1` indicates an invalid value. +#[inline] +pub fn moderate_path(num: &Number) -> ExtendedFloat { + #[cfg(not(feature = "compact"))] + return lemire::(num); + + #[cfg(feature = "compact")] + return bellerophon::(num); +} + +/// Convert usize into i32 without overflow. +/// +/// This is needed to ensure when adjusting the exponent relative to +/// the mantissa we do not overflow for comically-long exponents. +#[inline] +fn into_i32(value: usize) -> i32 { + if value > i32::max_value() as usize { + i32::max_value() + } else { + value as i32 + } +} + +// Add digit to mantissa. +#[inline] +pub fn add_digit(value: u64, digit: u8) -> Option { + value.checked_mul(10)?.checked_add(digit as u64) +} diff --git a/vendor/minimal-lexical/src/rounding.rs b/vendor/minimal-lexical/src/rounding.rs new file mode 100644 index 00000000000000..7c466dec4d18e5 --- /dev/null +++ b/vendor/minimal-lexical/src/rounding.rs @@ -0,0 +1,131 @@ +//! Defines rounding schemes for floating-point numbers. + +#![doc(hidden)] + +use crate::extended_float::ExtendedFloat; +use crate::mask::{lower_n_halfway, lower_n_mask}; +use crate::num::Float; + +// ROUNDING +// -------- + +/// Round an extended-precision float to the nearest machine float. +/// +/// Shifts the significant digits into place, adjusts the exponent, +/// so it can be easily converted to a native float. +#[cfg_attr(not(feature = "compact"), inline)] +pub fn round(fp: &mut ExtendedFloat, cb: Cb) +where + F: Float, + Cb: Fn(&mut ExtendedFloat, i32), +{ + let fp_inf = ExtendedFloat { + mant: 0, + exp: F::INFINITE_POWER, + }; + + // Calculate our shift in significant digits. + let mantissa_shift = 64 - F::MANTISSA_SIZE - 1; + + // Check for a denormal float, if after the shift the exponent is negative. + if -fp.exp >= mantissa_shift { + // Have a denormal float that isn't a literal 0. + // The extra 1 is to adjust for the denormal float, which is + // `1 - F::EXPONENT_BIAS`. This works as before, because our + // old logic rounded to `F::DENORMAL_EXPONENT` (now 1), and then + // checked if `exp == F::DENORMAL_EXPONENT` and no hidden mask + // bit was set. Here, we handle that here, rather than later. + // + // This might round-down to 0, but shift will be at **max** 65, + // for halfway cases rounding towards 0. + let shift = -fp.exp + 1; + debug_assert!(shift <= 65); + cb(fp, shift.min(64)); + // Check for round-up: if rounding-nearest carried us to the hidden bit. + fp.exp = (fp.mant >= F::HIDDEN_BIT_MASK) as i32; + return; + } + + // The float is normal, round to the hidden bit. + cb(fp, mantissa_shift); + + // Check if we carried, and if so, shift the bit to the hidden bit. + let carry_mask = F::CARRY_MASK; + if fp.mant & carry_mask == carry_mask { + fp.mant >>= 1; + fp.exp += 1; + } + + // Handle if we carried and check for overflow again. + if fp.exp >= F::INFINITE_POWER { + // Exponent is above largest normal value, must be infinite. + *fp = fp_inf; + return; + } + + // Remove the hidden bit. + fp.mant &= F::MANTISSA_MASK; +} + +/// Shift right N-bytes and round towards a direction. +/// +/// Callback should take the following parameters: +/// 1. is_odd +/// 1. is_halfway +/// 1. is_above +#[cfg_attr(not(feature = "compact"), inline)] +pub fn round_nearest_tie_even(fp: &mut ExtendedFloat, shift: i32, cb: Cb) +where + // is_odd, is_halfway, is_above + Cb: Fn(bool, bool, bool) -> bool, +{ + // Ensure we've already handled denormal values that underflow. + debug_assert!(shift <= 64); + + // Extract the truncated bits using mask. + // Calculate if the value of the truncated bits are either above + // the mid-way point, or equal to it. + // + // For example, for 4 truncated bytes, the mask would be 0b1111 + // and the midway point would be 0b1000. + let mask = lower_n_mask(shift as u64); + let halfway = lower_n_halfway(shift as u64); + let truncated_bits = fp.mant & mask; + let is_above = truncated_bits > halfway; + let is_halfway = truncated_bits == halfway; + + // Bit shift so the leading bit is in the hidden bit. + // This optimixes pretty well: + // ```text + // mov ecx, esi + // shr rdi, cl + // xor eax, eax + // cmp esi, 64 + // cmovne rax, rdi + // ret + // ``` + fp.mant = match shift == 64 { + true => 0, + false => fp.mant >> shift, + }; + fp.exp += shift; + + // Extract the last bit after shifting (and determine if it is odd). + let is_odd = fp.mant & 1 == 1; + + // Calculate if we need to roundup. + // We need to roundup if we are above halfway, or if we are odd + // and at half-way (need to tie-to-even). Avoid the branch here. + fp.mant += cb(is_odd, is_halfway, is_above) as u64; +} + +/// Round our significant digits into place, truncating them. +#[cfg_attr(not(feature = "compact"), inline)] +pub fn round_down(fp: &mut ExtendedFloat, shift: i32) { + // Might have a shift greater than 64 if we have an error. + fp.mant = match shift == 64 { + true => 0, + false => fp.mant >> shift, + }; + fp.exp += shift; +} diff --git a/vendor/minimal-lexical/src/slow.rs b/vendor/minimal-lexical/src/slow.rs new file mode 100644 index 00000000000000..59d526ba42343f --- /dev/null +++ b/vendor/minimal-lexical/src/slow.rs @@ -0,0 +1,403 @@ +//! Slow, fallback cases where we cannot unambiguously round a float. +//! +//! This occurs when we cannot determine the exact representation using +//! both the fast path (native) cases nor the Lemire/Bellerophon algorithms, +//! and therefore must fallback to a slow, arbitrary-precision representation. + +#![doc(hidden)] + +use crate::bigint::{Bigint, Limb, LIMB_BITS}; +use crate::extended_float::{extended_to_float, ExtendedFloat}; +use crate::num::Float; +use crate::number::Number; +use crate::rounding::{round, round_down, round_nearest_tie_even}; +use core::cmp; + +// ALGORITHM +// --------- + +/// Parse the significant digits and biased, binary exponent of a float. +/// +/// This is a fallback algorithm that uses a big-integer representation +/// of the float, and therefore is considerably slower than faster +/// approximations. However, it will always determine how to round +/// the significant digits to the nearest machine float, allowing +/// use to handle near half-way cases. +/// +/// Near half-way cases are halfway between two consecutive machine floats. +/// For example, the float `16777217.0` has a bitwise representation of +/// `100000000000000000000000 1`. Rounding to a single-precision float, +/// the trailing `1` is truncated. Using round-nearest, tie-even, any +/// value above `16777217.0` must be rounded up to `16777218.0`, while +/// any value before or equal to `16777217.0` must be rounded down +/// to `16777216.0`. These near-halfway conversions therefore may require +/// a large number of digits to unambiguously determine how to round. +#[inline] +pub fn slow<'a, F, Iter1, Iter2>( + num: Number, + fp: ExtendedFloat, + integer: Iter1, + fraction: Iter2, +) -> ExtendedFloat +where + F: Float, + Iter1: Iterator + Clone, + Iter2: Iterator + Clone, +{ + // Ensure our preconditions are valid: + // 1. The significant digits are not shifted into place. + debug_assert!(fp.mant & (1 << 63) != 0); + + // This assumes the sign bit has already been parsed, and we're + // starting with the integer digits, and the float format has been + // correctly validated. + let sci_exp = scientific_exponent(&num); + + // We have 2 major algorithms we use for this: + // 1. An algorithm with a finite number of digits and a positive exponent. + // 2. An algorithm with a finite number of digits and a negative exponent. + let (bigmant, digits) = parse_mantissa(integer, fraction, F::MAX_DIGITS); + let exponent = sci_exp + 1 - digits as i32; + if exponent >= 0 { + positive_digit_comp::(bigmant, exponent) + } else { + negative_digit_comp::(bigmant, fp, exponent) + } +} + +/// Generate the significant digits with a positive exponent relative to mantissa. +pub fn positive_digit_comp(mut bigmant: Bigint, exponent: i32) -> ExtendedFloat { + // Simple, we just need to multiply by the power of the radix. + // Now, we can calculate the mantissa and the exponent from this. + // The binary exponent is the binary exponent for the mantissa + // shifted to the hidden bit. + bigmant.pow(10, exponent as u32).unwrap(); + + // Get the exact representation of the float from the big integer. + // hi64 checks **all** the remaining bits after the mantissa, + // so it will check if **any** truncated digits exist. + let (mant, is_truncated) = bigmant.hi64(); + let exp = bigmant.bit_length() as i32 - 64 + F::EXPONENT_BIAS; + let mut fp = ExtendedFloat { + mant, + exp, + }; + + // Shift the digits into position and determine if we need to round-up. + round::(&mut fp, |f, s| { + round_nearest_tie_even(f, s, |is_odd, is_halfway, is_above| { + is_above || (is_halfway && is_truncated) || (is_odd && is_halfway) + }); + }); + fp +} + +/// Generate the significant digits with a negative exponent relative to mantissa. +/// +/// This algorithm is quite simple: we have the significant digits `m1 * b^N1`, +/// where `m1` is the bigint mantissa, `b` is the radix, and `N1` is the radix +/// exponent. We then calculate the theoretical representation of `b+h`, which +/// is `m2 * 2^N2`, where `m2` is the bigint mantissa and `N2` is the binary +/// exponent. If we had infinite, efficient floating precision, this would be +/// equal to `m1 / b^-N1` and then compare it to `m2 * 2^N2`. +/// +/// Since we cannot divide and keep precision, we must multiply the other: +/// if we want to do `m1 / b^-N1 >= m2 * 2^N2`, we can do +/// `m1 >= m2 * b^-N1 * 2^N2` Going to the decimal case, we can show and example +/// and simplify this further: `m1 >= m2 * 2^N2 * 10^-N1`. Since we can remove +/// a power-of-two, this is `m1 >= m2 * 2^(N2 - N1) * 5^-N1`. Therefore, if +/// `N2 - N1 > 0`, we need have `m1 >= m2 * 2^(N2 - N1) * 5^-N1`, otherwise, +/// we have `m1 * 2^(N1 - N2) >= m2 * 5^-N1`, where the resulting exponents +/// are all positive. +/// +/// This allows us to compare both floats using integers efficiently +/// without any loss of precision. +#[allow(clippy::comparison_chain)] +pub fn negative_digit_comp( + bigmant: Bigint, + mut fp: ExtendedFloat, + exponent: i32, +) -> ExtendedFloat { + // Ensure our preconditions are valid: + // 1. The significant digits are not shifted into place. + debug_assert!(fp.mant & (1 << 63) != 0); + + // Get the significant digits and radix exponent for the real digits. + let mut real_digits = bigmant; + let real_exp = exponent; + debug_assert!(real_exp < 0); + + // Round down our extended-precision float and calculate `b`. + let mut b = fp; + round::(&mut b, round_down); + let b = extended_to_float::(b); + + // Get the significant digits and the binary exponent for `b+h`. + let theor = bh(b); + let mut theor_digits = Bigint::from_u64(theor.mant); + let theor_exp = theor.exp; + + // We need to scale the real digits and `b+h` digits to be the same + // order. We currently have `real_exp`, in `radix`, that needs to be + // shifted to `theor_digits` (since it is negative), and `theor_exp` + // to either `theor_digits` or `real_digits` as a power of 2 (since it + // may be positive or negative). Try to remove as many powers of 2 + // as possible. All values are relative to `theor_digits`, that is, + // reflect the power you need to multiply `theor_digits` by. + // + // Both are on opposite-sides of equation, can factor out a + // power of two. + // + // Example: 10^-10, 2^-10 -> ( 0, 10, 0) + // Example: 10^-10, 2^-15 -> (-5, 10, 0) + // Example: 10^-10, 2^-5 -> ( 5, 10, 0) + // Example: 10^-10, 2^5 -> (15, 10, 0) + let binary_exp = theor_exp - real_exp; + let halfradix_exp = -real_exp; + if halfradix_exp != 0 { + theor_digits.pow(5, halfradix_exp as u32).unwrap(); + } + if binary_exp > 0 { + theor_digits.pow(2, binary_exp as u32).unwrap(); + } else if binary_exp < 0 { + real_digits.pow(2, (-binary_exp) as u32).unwrap(); + } + + // Compare our theoretical and real digits and round nearest, tie even. + let ord = real_digits.data.cmp(&theor_digits.data); + round::(&mut fp, |f, s| { + round_nearest_tie_even(f, s, |is_odd, _, _| { + // Can ignore `is_halfway` and `is_above`, since those were + // calculates using less significant digits. + match ord { + cmp::Ordering::Greater => true, + cmp::Ordering::Less => false, + cmp::Ordering::Equal if is_odd => true, + cmp::Ordering::Equal => false, + } + }); + }); + fp +} + +/// Add a digit to the temporary value. +macro_rules! add_digit { + ($c:ident, $value:ident, $counter:ident, $count:ident) => {{ + let digit = $c - b'0'; + $value *= 10 as Limb; + $value += digit as Limb; + + // Increment our counters. + $counter += 1; + $count += 1; + }}; +} + +/// Add a temporary value to our mantissa. +macro_rules! add_temporary { + // Multiply by the small power and add the native value. + (@mul $result:ident, $power:expr, $value:expr) => { + $result.data.mul_small($power).unwrap(); + $result.data.add_small($value).unwrap(); + }; + + // # Safety + // + // Safe is `counter <= step`, or smaller than the table size. + ($format:ident, $result:ident, $counter:ident, $value:ident) => { + if $counter != 0 { + // SAFETY: safe, since `counter <= step`, or smaller than the table size. + let small_power = unsafe { f64::int_pow_fast_path($counter, 10) }; + add_temporary!(@mul $result, small_power as Limb, $value); + $counter = 0; + $value = 0; + } + }; + + // Add a temporary where we won't read the counter results internally. + // + // # Safety + // + // Safe is `counter <= step`, or smaller than the table size. + (@end $format:ident, $result:ident, $counter:ident, $value:ident) => { + if $counter != 0 { + // SAFETY: safe, since `counter <= step`, or smaller than the table size. + let small_power = unsafe { f64::int_pow_fast_path($counter, 10) }; + add_temporary!(@mul $result, small_power as Limb, $value); + } + }; + + // Add the maximum native value. + (@max $format:ident, $result:ident, $counter:ident, $value:ident, $max:ident) => { + add_temporary!(@mul $result, $max, $value); + $counter = 0; + $value = 0; + }; +} + +/// Round-up a truncated value. +macro_rules! round_up_truncated { + ($format:ident, $result:ident, $count:ident) => {{ + // Need to round-up. + // Can't just add 1, since this can accidentally round-up + // values to a halfway point, which can cause invalid results. + add_temporary!(@mul $result, 10, 1); + $count += 1; + }}; +} + +/// Check and round-up the fraction if any non-zero digits exist. +macro_rules! round_up_nonzero { + ($format:ident, $iter:expr, $result:ident, $count:ident) => {{ + for &digit in $iter { + if digit != b'0' { + round_up_truncated!($format, $result, $count); + return ($result, $count); + } + } + }}; +} + +/// Parse the full mantissa into a big integer. +/// +/// Returns the parsed mantissa and the number of digits in the mantissa. +/// The max digits is the maximum number of digits plus one. +pub fn parse_mantissa<'a, Iter1, Iter2>( + mut integer: Iter1, + mut fraction: Iter2, + max_digits: usize, +) -> (Bigint, usize) +where + Iter1: Iterator + Clone, + Iter2: Iterator + Clone, +{ + // Iteratively process all the data in the mantissa. + // We do this via small, intermediate values which once we reach + // the maximum number of digits we can process without overflow, + // we add the temporary to the big integer. + let mut counter: usize = 0; + let mut count: usize = 0; + let mut value: Limb = 0; + let mut result = Bigint::new(); + + // Now use our pre-computed small powers iteratively. + // This is calculated as `⌊log(2^BITS - 1, 10)⌋`. + let step: usize = if LIMB_BITS == 32 { + 9 + } else { + 19 + }; + let max_native = (10 as Limb).pow(step as u32); + + // Process the integer digits. + 'integer: loop { + // Parse a digit at a time, until we reach step. + while counter < step && count < max_digits { + if let Some(&c) = integer.next() { + add_digit!(c, value, counter, count); + } else { + break 'integer; + } + } + + // Check if we've exhausted our max digits. + if count == max_digits { + // Need to check if we're truncated, and round-up accordingly. + // SAFETY: safe since `counter <= step`. + add_temporary!(@end format, result, counter, value); + round_up_nonzero!(format, integer, result, count); + round_up_nonzero!(format, fraction, result, count); + return (result, count); + } else { + // Add our temporary from the loop. + // SAFETY: safe since `counter <= step`. + add_temporary!(@max format, result, counter, value, max_native); + } + } + + // Skip leading fraction zeros. + // Required to get an accurate count. + if count == 0 { + for &c in &mut fraction { + if c != b'0' { + add_digit!(c, value, counter, count); + break; + } + } + } + + // Process the fraction digits. + 'fraction: loop { + // Parse a digit at a time, until we reach step. + while counter < step && count < max_digits { + if let Some(&c) = fraction.next() { + add_digit!(c, value, counter, count); + } else { + break 'fraction; + } + } + + // Check if we've exhausted our max digits. + if count == max_digits { + // SAFETY: safe since `counter <= step`. + add_temporary!(@end format, result, counter, value); + round_up_nonzero!(format, fraction, result, count); + return (result, count); + } else { + // Add our temporary from the loop. + // SAFETY: safe since `counter <= step`. + add_temporary!(@max format, result, counter, value, max_native); + } + } + + // We will always have a remainder, as long as we entered the loop + // once, or counter % step is 0. + // SAFETY: safe since `counter <= step`. + add_temporary!(@end format, result, counter, value); + + (result, count) +} + +// SCALING +// ------- + +/// Calculate the scientific exponent from a `Number` value. +/// Any other attempts would require slowdowns for faster algorithms. +#[inline] +pub fn scientific_exponent(num: &Number) -> i32 { + // Use power reduction to make this faster. + let mut mantissa = num.mantissa; + let mut exponent = num.exponent; + while mantissa >= 10000 { + mantissa /= 10000; + exponent += 4; + } + while mantissa >= 100 { + mantissa /= 100; + exponent += 2; + } + while mantissa >= 10 { + mantissa /= 10; + exponent += 1; + } + exponent as i32 +} + +/// Calculate `b` from a a representation of `b` as a float. +#[inline] +pub fn b(float: F) -> ExtendedFloat { + ExtendedFloat { + mant: float.mantissa(), + exp: float.exponent(), + } +} + +/// Calculate `b+h` from a a representation of `b` as a float. +#[inline] +pub fn bh(float: F) -> ExtendedFloat { + let fp = b(float); + ExtendedFloat { + mant: (fp.mant << 1) + 1, + exp: fp.exp - 1, + } +} diff --git a/vendor/minimal-lexical/src/stackvec.rs b/vendor/minimal-lexical/src/stackvec.rs new file mode 100644 index 00000000000000..d9bc259555be20 --- /dev/null +++ b/vendor/minimal-lexical/src/stackvec.rs @@ -0,0 +1,308 @@ +//! Simple stack-allocated vector. + +#![cfg(not(feature = "alloc"))] +#![doc(hidden)] + +use crate::bigint; +use core::{cmp, mem, ops, ptr, slice}; + +/// Simple stack vector implementation. +#[derive(Clone)] +pub struct StackVec { + /// The raw buffer for the elements. + data: [mem::MaybeUninit; bigint::BIGINT_LIMBS], + /// The number of elements in the array (we never need more than u16::MAX). + length: u16, +} + +#[allow(clippy::new_without_default)] +impl StackVec { + /// Construct an empty vector. + #[inline] + pub const fn new() -> Self { + Self { + length: 0, + data: [mem::MaybeUninit::uninit(); bigint::BIGINT_LIMBS], + } + } + + /// Construct a vector from an existing slice. + #[inline] + pub fn try_from(x: &[bigint::Limb]) -> Option { + let mut vec = Self::new(); + vec.try_extend(x)?; + Some(vec) + } + + /// Sets the length of a vector. + /// + /// This will explicitly set the size of the vector, without actually + /// modifying its buffers, so it is up to the caller to ensure that the + /// vector is actually the specified size. + /// + /// # Safety + /// + /// Safe as long as `len` is less than `BIGINT_LIMBS`. + #[inline] + pub unsafe fn set_len(&mut self, len: usize) { + // Constant is `u16::MAX` for older Rustc versions. + debug_assert!(len <= 0xffff); + debug_assert!(len <= bigint::BIGINT_LIMBS); + self.length = len as u16; + } + + /// The number of elements stored in the vector. + #[inline] + pub const fn len(&self) -> usize { + self.length as usize + } + + /// If the vector is empty. + #[inline] + pub const fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// The number of items the vector can hold. + #[inline] + pub const fn capacity(&self) -> usize { + bigint::BIGINT_LIMBS as usize + } + + /// Append an item to the vector, without bounds checking. + /// + /// # Safety + /// + /// Safe if `self.len() < self.capacity()`. + #[inline] + pub unsafe fn push_unchecked(&mut self, value: bigint::Limb) { + debug_assert!(self.len() < self.capacity()); + // SAFETY: safe, capacity is less than the current size. + unsafe { + ptr::write(self.as_mut_ptr().add(self.len()), value); + self.length += 1; + } + } + + /// Append an item to the vector. + #[inline] + pub fn try_push(&mut self, value: bigint::Limb) -> Option<()> { + if self.len() < self.capacity() { + // SAFETY: safe, capacity is less than the current size. + unsafe { self.push_unchecked(value) }; + Some(()) + } else { + None + } + } + + /// Remove an item from the end of a vector, without bounds checking. + /// + /// # Safety + /// + /// Safe if `self.len() > 0`. + #[inline] + pub unsafe fn pop_unchecked(&mut self) -> bigint::Limb { + debug_assert!(!self.is_empty()); + // SAFETY: safe if `self.length > 0`. + // We have a trivial drop and copy, so this is safe. + self.length -= 1; + unsafe { ptr::read(self.as_mut_ptr().add(self.len())) } + } + + /// Remove an item from the end of the vector and return it, or None if empty. + #[inline] + pub fn pop(&mut self) -> Option { + if self.is_empty() { + None + } else { + // SAFETY: safe, since `self.len() > 0`. + unsafe { Some(self.pop_unchecked()) } + } + } + + /// Add items from a slice to the vector, without bounds checking. + /// + /// # Safety + /// + /// Safe if `self.len() + slc.len() <= self.capacity()`. + #[inline] + pub unsafe fn extend_unchecked(&mut self, slc: &[bigint::Limb]) { + let index = self.len(); + let new_len = index + slc.len(); + debug_assert!(self.len() + slc.len() <= self.capacity()); + let src = slc.as_ptr(); + // SAFETY: safe if `self.len() + slc.len() <= self.capacity()`. + unsafe { + let dst = self.as_mut_ptr().add(index); + ptr::copy_nonoverlapping(src, dst, slc.len()); + self.set_len(new_len); + } + } + + /// Copy elements from a slice and append them to the vector. + #[inline] + pub fn try_extend(&mut self, slc: &[bigint::Limb]) -> Option<()> { + if self.len() + slc.len() <= self.capacity() { + // SAFETY: safe, since `self.len() + slc.len() <= self.capacity()`. + unsafe { self.extend_unchecked(slc) }; + Some(()) + } else { + None + } + } + + /// Truncate vector to new length, dropping any items after `len`. + /// + /// # Safety + /// + /// Safe as long as `len <= self.capacity()`. + unsafe fn truncate_unchecked(&mut self, len: usize) { + debug_assert!(len <= self.capacity()); + self.length = len as u16; + } + + /// Resize the buffer, without bounds checking. + /// + /// # Safety + /// + /// Safe as long as `len <= self.capacity()`. + #[inline] + pub unsafe fn resize_unchecked(&mut self, len: usize, value: bigint::Limb) { + debug_assert!(len <= self.capacity()); + let old_len = self.len(); + if len > old_len { + // We have a trivial drop, so there's no worry here. + // Just, don't set the length until all values have been written, + // so we don't accidentally read uninitialized memory. + + // SAFETY: safe if `len < self.capacity()`. + let count = len - old_len; + for index in 0..count { + unsafe { + let dst = self.as_mut_ptr().add(old_len + index); + ptr::write(dst, value); + } + } + self.length = len as u16; + } else { + // SAFETY: safe since `len < self.len()`. + unsafe { self.truncate_unchecked(len) }; + } + } + + /// Try to resize the buffer. + /// + /// If the new length is smaller than the current length, truncate + /// the input. If it's larger, then append elements to the buffer. + #[inline] + pub fn try_resize(&mut self, len: usize, value: bigint::Limb) -> Option<()> { + if len > self.capacity() { + None + } else { + // SAFETY: safe, since `len <= self.capacity()`. + unsafe { self.resize_unchecked(len, value) }; + Some(()) + } + } + + // HI + + /// Get the high 64 bits from the vector. + #[inline(always)] + pub fn hi64(&self) -> (u64, bool) { + bigint::hi64(self) + } + + // FROM + + /// Create StackVec from u64 value. + #[inline(always)] + pub fn from_u64(x: u64) -> Self { + bigint::from_u64(x) + } + + // MATH + + /// Normalize the integer, so any leading zero values are removed. + #[inline] + pub fn normalize(&mut self) { + bigint::normalize(self) + } + + /// Get if the big integer is normalized. + #[inline] + pub fn is_normalized(&self) -> bool { + bigint::is_normalized(self) + } + + /// AddAssign small integer. + #[inline] + pub fn add_small(&mut self, y: bigint::Limb) -> Option<()> { + bigint::small_add(self, y) + } + + /// MulAssign small integer. + #[inline] + pub fn mul_small(&mut self, y: bigint::Limb) -> Option<()> { + bigint::small_mul(self, y) + } +} + +impl PartialEq for StackVec { + #[inline] + #[allow(clippy::op_ref)] + fn eq(&self, other: &Self) -> bool { + use core::ops::Deref; + self.len() == other.len() && self.deref() == other.deref() + } +} + +impl Eq for StackVec { +} + +impl cmp::PartialOrd for StackVec { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + Some(bigint::compare(self, other)) + } +} + +impl cmp::Ord for StackVec { + #[inline] + fn cmp(&self, other: &Self) -> cmp::Ordering { + bigint::compare(self, other) + } +} + +impl ops::Deref for StackVec { + type Target = [bigint::Limb]; + #[inline] + fn deref(&self) -> &[bigint::Limb] { + // SAFETY: safe since `self.data[..self.len()]` must be initialized + // and `self.len() <= self.capacity()`. + unsafe { + let ptr = self.data.as_ptr() as *const bigint::Limb; + slice::from_raw_parts(ptr, self.len()) + } + } +} + +impl ops::DerefMut for StackVec { + #[inline] + fn deref_mut(&mut self) -> &mut [bigint::Limb] { + // SAFETY: safe since `self.data[..self.len()]` must be initialized + // and `self.len() <= self.capacity()`. + unsafe { + let ptr = self.data.as_mut_ptr() as *mut bigint::Limb; + slice::from_raw_parts_mut(ptr, self.len()) + } + } +} + +impl ops::MulAssign<&[bigint::Limb]> for StackVec { + #[inline] + fn mul_assign(&mut self, rhs: &[bigint::Limb]) { + bigint::large_mul(self, rhs).unwrap(); + } +} diff --git a/vendor/minimal-lexical/src/table.rs b/vendor/minimal-lexical/src/table.rs new file mode 100644 index 00000000000000..7b1367e326a1bd --- /dev/null +++ b/vendor/minimal-lexical/src/table.rs @@ -0,0 +1,11 @@ +//! Pre-computed tables for parsing float strings. + +#![doc(hidden)] + +// Re-export all the feature-specific files. +#[cfg(feature = "compact")] +pub use crate::table_bellerophon::*; +#[cfg(not(feature = "compact"))] +pub use crate::table_lemire::*; +#[cfg(not(feature = "compact"))] +pub use crate::table_small::*; diff --git a/vendor/minimal-lexical/src/table_bellerophon.rs b/vendor/minimal-lexical/src/table_bellerophon.rs new file mode 100644 index 00000000000000..f85f8e6fb32277 --- /dev/null +++ b/vendor/minimal-lexical/src/table_bellerophon.rs @@ -0,0 +1,119 @@ +//! Cached exponents for basen values with 80-bit extended floats. +//! +//! Exact versions of base**n as an extended-precision float, with both +//! large and small powers. Use the large powers to minimize the amount +//! of compounded error. This is used in the Bellerophon algorithm. +//! +//! These values were calculated using Python, using the arbitrary-precision +//! integer to calculate exact extended-representation of each value. +//! These values are all normalized. +//! +//! DO NOT MODIFY: Generated by `etc/bellerophon_table.py` + +#![cfg(feature = "compact")] +#![doc(hidden)] + +use crate::bellerophon::BellerophonPowers; + +// HIGH LEVEL +// ---------- + +pub const BASE10_POWERS: BellerophonPowers = BellerophonPowers { + small: &BASE10_SMALL_MANTISSA, + large: &BASE10_LARGE_MANTISSA, + small_int: &BASE10_SMALL_INT_POWERS, + step: BASE10_STEP, + bias: BASE10_BIAS, + log2: BASE10_LOG2_MULT, + log2_shift: BASE10_LOG2_SHIFT, +}; + +// LOW-LEVEL +// --------- + +const BASE10_SMALL_MANTISSA: [u64; 10] = [ + 9223372036854775808, // 10^0 + 11529215046068469760, // 10^1 + 14411518807585587200, // 10^2 + 18014398509481984000, // 10^3 + 11258999068426240000, // 10^4 + 14073748835532800000, // 10^5 + 17592186044416000000, // 10^6 + 10995116277760000000, // 10^7 + 13743895347200000000, // 10^8 + 17179869184000000000, // 10^9 +]; +const BASE10_LARGE_MANTISSA: [u64; 66] = [ + 11555125961253852697, // 10^-350 + 13451937075301367670, // 10^-340 + 15660115838168849784, // 10^-330 + 18230774251475056848, // 10^-320 + 10611707258198326947, // 10^-310 + 12353653155963782858, // 10^-300 + 14381545078898527261, // 10^-290 + 16742321987285426889, // 10^-280 + 9745314011399999080, // 10^-270 + 11345038669416679861, // 10^-260 + 13207363278391631158, // 10^-250 + 15375394465392026070, // 10^-240 + 17899314949046850752, // 10^-230 + 10418772551374772303, // 10^-220 + 12129047596099288555, // 10^-210 + 14120069793541087484, // 10^-200 + 16437924692338667210, // 10^-190 + 9568131466127621947, // 10^-180 + 11138771039116687545, // 10^-170 + 12967236152753102995, // 10^-160 + 15095849699286165408, // 10^-150 + 17573882009934360870, // 10^-140 + 10229345649675443343, // 10^-130 + 11908525658859223294, // 10^-120 + 13863348470604074297, // 10^-110 + 16139061738043178685, // 10^-100 + 9394170331095332911, // 10^-90 + 10936253623915059621, // 10^-80 + 12731474852090538039, // 10^-70 + 14821387422376473014, // 10^-60 + 17254365866976409468, // 10^-50 + 10043362776618689222, // 10^-40 + 11692013098647223345, // 10^-30 + 13611294676837538538, // 10^-20 + 15845632502852867518, // 10^-10 + 9223372036854775808, // 10^0 + 10737418240000000000, // 10^10 + 12500000000000000000, // 10^20 + 14551915228366851806, // 10^30 + 16940658945086006781, // 10^40 + 9860761315262647567, // 10^50 + 11479437019748901445, // 10^60 + 13363823550460978230, // 10^70 + 15557538194652854267, // 10^80 + 18111358157653424735, // 10^90 + 10542197943230523224, // 10^100 + 12272733663244316382, // 10^110 + 14287342391028437277, // 10^120 + 16632655625031838749, // 10^130 + 9681479787123295682, // 10^140 + 11270725851789228247, // 10^150 + 13120851772591970218, // 10^160 + 15274681817498023410, // 10^170 + 17782069995880619867, // 10^180 + 10350527006597618960, // 10^190 + 12049599325514420588, // 10^200 + 14027579833653779454, // 10^210 + 16330252207878254650, // 10^220 + 9505457831475799117, // 10^230 + 11065809325636130661, // 10^240 + 12882297539194266616, // 10^250 + 14996968138956309548, // 10^260 + 17458768723248864463, // 10^270 + 10162340898095201970, // 10^280 + 11830521861667747109, // 10^290 + 13772540099066387756, // 10^300 +]; +const BASE10_SMALL_INT_POWERS: [u64; 10] = + [1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000]; +const BASE10_STEP: i32 = 10; +const BASE10_BIAS: i32 = 350; +const BASE10_LOG2_MULT: i64 = 217706; +const BASE10_LOG2_SHIFT: i32 = 16; diff --git a/vendor/minimal-lexical/src/table_lemire.rs b/vendor/minimal-lexical/src/table_lemire.rs new file mode 100644 index 00000000000000..110e1dab2bbf96 --- /dev/null +++ b/vendor/minimal-lexical/src/table_lemire.rs @@ -0,0 +1,676 @@ +//! Pre-computed tables powers-of-5 for extended-precision representations. +//! +//! These tables enable fast scaling of the significant digits +//! of a float to the decimal exponent, with minimal rounding +//! errors, in a 128 or 192-bit representation. +//! +//! DO NOT MODIFY: Generated by `etc/lemire_table.py` +//! +//! This adapted from the Rust implementation, based on the fast-float-rust +//! implementation, and is similarly subject to an Apache2.0/MIT license. + +#![doc(hidden)] +#![cfg(not(feature = "compact"))] + +pub const SMALLEST_POWER_OF_FIVE: i32 = -342; +pub const LARGEST_POWER_OF_FIVE: i32 = 308; +pub const N_POWERS_OF_FIVE: usize = (LARGEST_POWER_OF_FIVE - SMALLEST_POWER_OF_FIVE + 1) as usize; + +// Use static to avoid long compile times: Rust compiler errors +// can have the entire table compiled multiple times, and then +// emit code multiple times, even if it's stripped out in +// the final binary. +#[rustfmt::skip] +pub static POWER_OF_FIVE_128: [(u64, u64); N_POWERS_OF_FIVE] = [ + (0xeef453d6923bd65a, 0x113faa2906a13b3f), // 5^-342 + (0x9558b4661b6565f8, 0x4ac7ca59a424c507), // 5^-341 + (0xbaaee17fa23ebf76, 0x5d79bcf00d2df649), // 5^-340 + (0xe95a99df8ace6f53, 0xf4d82c2c107973dc), // 5^-339 + (0x91d8a02bb6c10594, 0x79071b9b8a4be869), // 5^-338 + (0xb64ec836a47146f9, 0x9748e2826cdee284), // 5^-337 + (0xe3e27a444d8d98b7, 0xfd1b1b2308169b25), // 5^-336 + (0x8e6d8c6ab0787f72, 0xfe30f0f5e50e20f7), // 5^-335 + (0xb208ef855c969f4f, 0xbdbd2d335e51a935), // 5^-334 + (0xde8b2b66b3bc4723, 0xad2c788035e61382), // 5^-333 + (0x8b16fb203055ac76, 0x4c3bcb5021afcc31), // 5^-332 + (0xaddcb9e83c6b1793, 0xdf4abe242a1bbf3d), // 5^-331 + (0xd953e8624b85dd78, 0xd71d6dad34a2af0d), // 5^-330 + (0x87d4713d6f33aa6b, 0x8672648c40e5ad68), // 5^-329 + (0xa9c98d8ccb009506, 0x680efdaf511f18c2), // 5^-328 + (0xd43bf0effdc0ba48, 0x212bd1b2566def2), // 5^-327 + (0x84a57695fe98746d, 0x14bb630f7604b57), // 5^-326 + (0xa5ced43b7e3e9188, 0x419ea3bd35385e2d), // 5^-325 + (0xcf42894a5dce35ea, 0x52064cac828675b9), // 5^-324 + (0x818995ce7aa0e1b2, 0x7343efebd1940993), // 5^-323 + (0xa1ebfb4219491a1f, 0x1014ebe6c5f90bf8), // 5^-322 + (0xca66fa129f9b60a6, 0xd41a26e077774ef6), // 5^-321 + (0xfd00b897478238d0, 0x8920b098955522b4), // 5^-320 + (0x9e20735e8cb16382, 0x55b46e5f5d5535b0), // 5^-319 + (0xc5a890362fddbc62, 0xeb2189f734aa831d), // 5^-318 + (0xf712b443bbd52b7b, 0xa5e9ec7501d523e4), // 5^-317 + (0x9a6bb0aa55653b2d, 0x47b233c92125366e), // 5^-316 + (0xc1069cd4eabe89f8, 0x999ec0bb696e840a), // 5^-315 + (0xf148440a256e2c76, 0xc00670ea43ca250d), // 5^-314 + (0x96cd2a865764dbca, 0x380406926a5e5728), // 5^-313 + (0xbc807527ed3e12bc, 0xc605083704f5ecf2), // 5^-312 + (0xeba09271e88d976b, 0xf7864a44c633682e), // 5^-311 + (0x93445b8731587ea3, 0x7ab3ee6afbe0211d), // 5^-310 + (0xb8157268fdae9e4c, 0x5960ea05bad82964), // 5^-309 + (0xe61acf033d1a45df, 0x6fb92487298e33bd), // 5^-308 + (0x8fd0c16206306bab, 0xa5d3b6d479f8e056), // 5^-307 + (0xb3c4f1ba87bc8696, 0x8f48a4899877186c), // 5^-306 + (0xe0b62e2929aba83c, 0x331acdabfe94de87), // 5^-305 + (0x8c71dcd9ba0b4925, 0x9ff0c08b7f1d0b14), // 5^-304 + (0xaf8e5410288e1b6f, 0x7ecf0ae5ee44dd9), // 5^-303 + (0xdb71e91432b1a24a, 0xc9e82cd9f69d6150), // 5^-302 + (0x892731ac9faf056e, 0xbe311c083a225cd2), // 5^-301 + (0xab70fe17c79ac6ca, 0x6dbd630a48aaf406), // 5^-300 + (0xd64d3d9db981787d, 0x92cbbccdad5b108), // 5^-299 + (0x85f0468293f0eb4e, 0x25bbf56008c58ea5), // 5^-298 + (0xa76c582338ed2621, 0xaf2af2b80af6f24e), // 5^-297 + (0xd1476e2c07286faa, 0x1af5af660db4aee1), // 5^-296 + (0x82cca4db847945ca, 0x50d98d9fc890ed4d), // 5^-295 + (0xa37fce126597973c, 0xe50ff107bab528a0), // 5^-294 + (0xcc5fc196fefd7d0c, 0x1e53ed49a96272c8), // 5^-293 + (0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7a), // 5^-292 + (0x9faacf3df73609b1, 0x77b191618c54e9ac), // 5^-291 + (0xc795830d75038c1d, 0xd59df5b9ef6a2417), // 5^-290 + (0xf97ae3d0d2446f25, 0x4b0573286b44ad1d), // 5^-289 + (0x9becce62836ac577, 0x4ee367f9430aec32), // 5^-288 + (0xc2e801fb244576d5, 0x229c41f793cda73f), // 5^-287 + (0xf3a20279ed56d48a, 0x6b43527578c1110f), // 5^-286 + (0x9845418c345644d6, 0x830a13896b78aaa9), // 5^-285 + (0xbe5691ef416bd60c, 0x23cc986bc656d553), // 5^-284 + (0xedec366b11c6cb8f, 0x2cbfbe86b7ec8aa8), // 5^-283 + (0x94b3a202eb1c3f39, 0x7bf7d71432f3d6a9), // 5^-282 + (0xb9e08a83a5e34f07, 0xdaf5ccd93fb0cc53), // 5^-281 + (0xe858ad248f5c22c9, 0xd1b3400f8f9cff68), // 5^-280 + (0x91376c36d99995be, 0x23100809b9c21fa1), // 5^-279 + (0xb58547448ffffb2d, 0xabd40a0c2832a78a), // 5^-278 + (0xe2e69915b3fff9f9, 0x16c90c8f323f516c), // 5^-277 + (0x8dd01fad907ffc3b, 0xae3da7d97f6792e3), // 5^-276 + (0xb1442798f49ffb4a, 0x99cd11cfdf41779c), // 5^-275 + (0xdd95317f31c7fa1d, 0x40405643d711d583), // 5^-274 + (0x8a7d3eef7f1cfc52, 0x482835ea666b2572), // 5^-273 + (0xad1c8eab5ee43b66, 0xda3243650005eecf), // 5^-272 + (0xd863b256369d4a40, 0x90bed43e40076a82), // 5^-271 + (0x873e4f75e2224e68, 0x5a7744a6e804a291), // 5^-270 + (0xa90de3535aaae202, 0x711515d0a205cb36), // 5^-269 + (0xd3515c2831559a83, 0xd5a5b44ca873e03), // 5^-268 + (0x8412d9991ed58091, 0xe858790afe9486c2), // 5^-267 + (0xa5178fff668ae0b6, 0x626e974dbe39a872), // 5^-266 + (0xce5d73ff402d98e3, 0xfb0a3d212dc8128f), // 5^-265 + (0x80fa687f881c7f8e, 0x7ce66634bc9d0b99), // 5^-264 + (0xa139029f6a239f72, 0x1c1fffc1ebc44e80), // 5^-263 + (0xc987434744ac874e, 0xa327ffb266b56220), // 5^-262 + (0xfbe9141915d7a922, 0x4bf1ff9f0062baa8), // 5^-261 + (0x9d71ac8fada6c9b5, 0x6f773fc3603db4a9), // 5^-260 + (0xc4ce17b399107c22, 0xcb550fb4384d21d3), // 5^-259 + (0xf6019da07f549b2b, 0x7e2a53a146606a48), // 5^-258 + (0x99c102844f94e0fb, 0x2eda7444cbfc426d), // 5^-257 + (0xc0314325637a1939, 0xfa911155fefb5308), // 5^-256 + (0xf03d93eebc589f88, 0x793555ab7eba27ca), // 5^-255 + (0x96267c7535b763b5, 0x4bc1558b2f3458de), // 5^-254 + (0xbbb01b9283253ca2, 0x9eb1aaedfb016f16), // 5^-253 + (0xea9c227723ee8bcb, 0x465e15a979c1cadc), // 5^-252 + (0x92a1958a7675175f, 0xbfacd89ec191ec9), // 5^-251 + (0xb749faed14125d36, 0xcef980ec671f667b), // 5^-250 + (0xe51c79a85916f484, 0x82b7e12780e7401a), // 5^-249 + (0x8f31cc0937ae58d2, 0xd1b2ecb8b0908810), // 5^-248 + (0xb2fe3f0b8599ef07, 0x861fa7e6dcb4aa15), // 5^-247 + (0xdfbdcece67006ac9, 0x67a791e093e1d49a), // 5^-246 + (0x8bd6a141006042bd, 0xe0c8bb2c5c6d24e0), // 5^-245 + (0xaecc49914078536d, 0x58fae9f773886e18), // 5^-244 + (0xda7f5bf590966848, 0xaf39a475506a899e), // 5^-243 + (0x888f99797a5e012d, 0x6d8406c952429603), // 5^-242 + (0xaab37fd7d8f58178, 0xc8e5087ba6d33b83), // 5^-241 + (0xd5605fcdcf32e1d6, 0xfb1e4a9a90880a64), // 5^-240 + (0x855c3be0a17fcd26, 0x5cf2eea09a55067f), // 5^-239 + (0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481e), // 5^-238 + (0xd0601d8efc57b08b, 0xf13b94daf124da26), // 5^-237 + (0x823c12795db6ce57, 0x76c53d08d6b70858), // 5^-236 + (0xa2cb1717b52481ed, 0x54768c4b0c64ca6e), // 5^-235 + (0xcb7ddcdda26da268, 0xa9942f5dcf7dfd09), // 5^-234 + (0xfe5d54150b090b02, 0xd3f93b35435d7c4c), // 5^-233 + (0x9efa548d26e5a6e1, 0xc47bc5014a1a6daf), // 5^-232 + (0xc6b8e9b0709f109a, 0x359ab6419ca1091b), // 5^-231 + (0xf867241c8cc6d4c0, 0xc30163d203c94b62), // 5^-230 + (0x9b407691d7fc44f8, 0x79e0de63425dcf1d), // 5^-229 + (0xc21094364dfb5636, 0x985915fc12f542e4), // 5^-228 + (0xf294b943e17a2bc4, 0x3e6f5b7b17b2939d), // 5^-227 + (0x979cf3ca6cec5b5a, 0xa705992ceecf9c42), // 5^-226 + (0xbd8430bd08277231, 0x50c6ff782a838353), // 5^-225 + (0xece53cec4a314ebd, 0xa4f8bf5635246428), // 5^-224 + (0x940f4613ae5ed136, 0x871b7795e136be99), // 5^-223 + (0xb913179899f68584, 0x28e2557b59846e3f), // 5^-222 + (0xe757dd7ec07426e5, 0x331aeada2fe589cf), // 5^-221 + (0x9096ea6f3848984f, 0x3ff0d2c85def7621), // 5^-220 + (0xb4bca50b065abe63, 0xfed077a756b53a9), // 5^-219 + (0xe1ebce4dc7f16dfb, 0xd3e8495912c62894), // 5^-218 + (0x8d3360f09cf6e4bd, 0x64712dd7abbbd95c), // 5^-217 + (0xb080392cc4349dec, 0xbd8d794d96aacfb3), // 5^-216 + (0xdca04777f541c567, 0xecf0d7a0fc5583a0), // 5^-215 + (0x89e42caaf9491b60, 0xf41686c49db57244), // 5^-214 + (0xac5d37d5b79b6239, 0x311c2875c522ced5), // 5^-213 + (0xd77485cb25823ac7, 0x7d633293366b828b), // 5^-212 + (0x86a8d39ef77164bc, 0xae5dff9c02033197), // 5^-211 + (0xa8530886b54dbdeb, 0xd9f57f830283fdfc), // 5^-210 + (0xd267caa862a12d66, 0xd072df63c324fd7b), // 5^-209 + (0x8380dea93da4bc60, 0x4247cb9e59f71e6d), // 5^-208 + (0xa46116538d0deb78, 0x52d9be85f074e608), // 5^-207 + (0xcd795be870516656, 0x67902e276c921f8b), // 5^-206 + (0x806bd9714632dff6, 0xba1cd8a3db53b6), // 5^-205 + (0xa086cfcd97bf97f3, 0x80e8a40eccd228a4), // 5^-204 + (0xc8a883c0fdaf7df0, 0x6122cd128006b2cd), // 5^-203 + (0xfad2a4b13d1b5d6c, 0x796b805720085f81), // 5^-202 + (0x9cc3a6eec6311a63, 0xcbe3303674053bb0), // 5^-201 + (0xc3f490aa77bd60fc, 0xbedbfc4411068a9c), // 5^-200 + (0xf4f1b4d515acb93b, 0xee92fb5515482d44), // 5^-199 + (0x991711052d8bf3c5, 0x751bdd152d4d1c4a), // 5^-198 + (0xbf5cd54678eef0b6, 0xd262d45a78a0635d), // 5^-197 + (0xef340a98172aace4, 0x86fb897116c87c34), // 5^-196 + (0x9580869f0e7aac0e, 0xd45d35e6ae3d4da0), // 5^-195 + (0xbae0a846d2195712, 0x8974836059cca109), // 5^-194 + (0xe998d258869facd7, 0x2bd1a438703fc94b), // 5^-193 + (0x91ff83775423cc06, 0x7b6306a34627ddcf), // 5^-192 + (0xb67f6455292cbf08, 0x1a3bc84c17b1d542), // 5^-191 + (0xe41f3d6a7377eeca, 0x20caba5f1d9e4a93), // 5^-190 + (0x8e938662882af53e, 0x547eb47b7282ee9c), // 5^-189 + (0xb23867fb2a35b28d, 0xe99e619a4f23aa43), // 5^-188 + (0xdec681f9f4c31f31, 0x6405fa00e2ec94d4), // 5^-187 + (0x8b3c113c38f9f37e, 0xde83bc408dd3dd04), // 5^-186 + (0xae0b158b4738705e, 0x9624ab50b148d445), // 5^-185 + (0xd98ddaee19068c76, 0x3badd624dd9b0957), // 5^-184 + (0x87f8a8d4cfa417c9, 0xe54ca5d70a80e5d6), // 5^-183 + (0xa9f6d30a038d1dbc, 0x5e9fcf4ccd211f4c), // 5^-182 + (0xd47487cc8470652b, 0x7647c3200069671f), // 5^-181 + (0x84c8d4dfd2c63f3b, 0x29ecd9f40041e073), // 5^-180 + (0xa5fb0a17c777cf09, 0xf468107100525890), // 5^-179 + (0xcf79cc9db955c2cc, 0x7182148d4066eeb4), // 5^-178 + (0x81ac1fe293d599bf, 0xc6f14cd848405530), // 5^-177 + (0xa21727db38cb002f, 0xb8ada00e5a506a7c), // 5^-176 + (0xca9cf1d206fdc03b, 0xa6d90811f0e4851c), // 5^-175 + (0xfd442e4688bd304a, 0x908f4a166d1da663), // 5^-174 + (0x9e4a9cec15763e2e, 0x9a598e4e043287fe), // 5^-173 + (0xc5dd44271ad3cdba, 0x40eff1e1853f29fd), // 5^-172 + (0xf7549530e188c128, 0xd12bee59e68ef47c), // 5^-171 + (0x9a94dd3e8cf578b9, 0x82bb74f8301958ce), // 5^-170 + (0xc13a148e3032d6e7, 0xe36a52363c1faf01), // 5^-169 + (0xf18899b1bc3f8ca1, 0xdc44e6c3cb279ac1), // 5^-168 + (0x96f5600f15a7b7e5, 0x29ab103a5ef8c0b9), // 5^-167 + (0xbcb2b812db11a5de, 0x7415d448f6b6f0e7), // 5^-166 + (0xebdf661791d60f56, 0x111b495b3464ad21), // 5^-165 + (0x936b9fcebb25c995, 0xcab10dd900beec34), // 5^-164 + (0xb84687c269ef3bfb, 0x3d5d514f40eea742), // 5^-163 + (0xe65829b3046b0afa, 0xcb4a5a3112a5112), // 5^-162 + (0x8ff71a0fe2c2e6dc, 0x47f0e785eaba72ab), // 5^-161 + (0xb3f4e093db73a093, 0x59ed216765690f56), // 5^-160 + (0xe0f218b8d25088b8, 0x306869c13ec3532c), // 5^-159 + (0x8c974f7383725573, 0x1e414218c73a13fb), // 5^-158 + (0xafbd2350644eeacf, 0xe5d1929ef90898fa), // 5^-157 + (0xdbac6c247d62a583, 0xdf45f746b74abf39), // 5^-156 + (0x894bc396ce5da772, 0x6b8bba8c328eb783), // 5^-155 + (0xab9eb47c81f5114f, 0x66ea92f3f326564), // 5^-154 + (0xd686619ba27255a2, 0xc80a537b0efefebd), // 5^-153 + (0x8613fd0145877585, 0xbd06742ce95f5f36), // 5^-152 + (0xa798fc4196e952e7, 0x2c48113823b73704), // 5^-151 + (0xd17f3b51fca3a7a0, 0xf75a15862ca504c5), // 5^-150 + (0x82ef85133de648c4, 0x9a984d73dbe722fb), // 5^-149 + (0xa3ab66580d5fdaf5, 0xc13e60d0d2e0ebba), // 5^-148 + (0xcc963fee10b7d1b3, 0x318df905079926a8), // 5^-147 + (0xffbbcfe994e5c61f, 0xfdf17746497f7052), // 5^-146 + (0x9fd561f1fd0f9bd3, 0xfeb6ea8bedefa633), // 5^-145 + (0xc7caba6e7c5382c8, 0xfe64a52ee96b8fc0), // 5^-144 + (0xf9bd690a1b68637b, 0x3dfdce7aa3c673b0), // 5^-143 + (0x9c1661a651213e2d, 0x6bea10ca65c084e), // 5^-142 + (0xc31bfa0fe5698db8, 0x486e494fcff30a62), // 5^-141 + (0xf3e2f893dec3f126, 0x5a89dba3c3efccfa), // 5^-140 + (0x986ddb5c6b3a76b7, 0xf89629465a75e01c), // 5^-139 + (0xbe89523386091465, 0xf6bbb397f1135823), // 5^-138 + (0xee2ba6c0678b597f, 0x746aa07ded582e2c), // 5^-137 + (0x94db483840b717ef, 0xa8c2a44eb4571cdc), // 5^-136 + (0xba121a4650e4ddeb, 0x92f34d62616ce413), // 5^-135 + (0xe896a0d7e51e1566, 0x77b020baf9c81d17), // 5^-134 + (0x915e2486ef32cd60, 0xace1474dc1d122e), // 5^-133 + (0xb5b5ada8aaff80b8, 0xd819992132456ba), // 5^-132 + (0xe3231912d5bf60e6, 0x10e1fff697ed6c69), // 5^-131 + (0x8df5efabc5979c8f, 0xca8d3ffa1ef463c1), // 5^-130 + (0xb1736b96b6fd83b3, 0xbd308ff8a6b17cb2), // 5^-129 + (0xddd0467c64bce4a0, 0xac7cb3f6d05ddbde), // 5^-128 + (0x8aa22c0dbef60ee4, 0x6bcdf07a423aa96b), // 5^-127 + (0xad4ab7112eb3929d, 0x86c16c98d2c953c6), // 5^-126 + (0xd89d64d57a607744, 0xe871c7bf077ba8b7), // 5^-125 + (0x87625f056c7c4a8b, 0x11471cd764ad4972), // 5^-124 + (0xa93af6c6c79b5d2d, 0xd598e40d3dd89bcf), // 5^-123 + (0xd389b47879823479, 0x4aff1d108d4ec2c3), // 5^-122 + (0x843610cb4bf160cb, 0xcedf722a585139ba), // 5^-121 + (0xa54394fe1eedb8fe, 0xc2974eb4ee658828), // 5^-120 + (0xce947a3da6a9273e, 0x733d226229feea32), // 5^-119 + (0x811ccc668829b887, 0x806357d5a3f525f), // 5^-118 + (0xa163ff802a3426a8, 0xca07c2dcb0cf26f7), // 5^-117 + (0xc9bcff6034c13052, 0xfc89b393dd02f0b5), // 5^-116 + (0xfc2c3f3841f17c67, 0xbbac2078d443ace2), // 5^-115 + (0x9d9ba7832936edc0, 0xd54b944b84aa4c0d), // 5^-114 + (0xc5029163f384a931, 0xa9e795e65d4df11), // 5^-113 + (0xf64335bcf065d37d, 0x4d4617b5ff4a16d5), // 5^-112 + (0x99ea0196163fa42e, 0x504bced1bf8e4e45), // 5^-111 + (0xc06481fb9bcf8d39, 0xe45ec2862f71e1d6), // 5^-110 + (0xf07da27a82c37088, 0x5d767327bb4e5a4c), // 5^-109 + (0x964e858c91ba2655, 0x3a6a07f8d510f86f), // 5^-108 + (0xbbe226efb628afea, 0x890489f70a55368b), // 5^-107 + (0xeadab0aba3b2dbe5, 0x2b45ac74ccea842e), // 5^-106 + (0x92c8ae6b464fc96f, 0x3b0b8bc90012929d), // 5^-105 + (0xb77ada0617e3bbcb, 0x9ce6ebb40173744), // 5^-104 + (0xe55990879ddcaabd, 0xcc420a6a101d0515), // 5^-103 + (0x8f57fa54c2a9eab6, 0x9fa946824a12232d), // 5^-102 + (0xb32df8e9f3546564, 0x47939822dc96abf9), // 5^-101 + (0xdff9772470297ebd, 0x59787e2b93bc56f7), // 5^-100 + (0x8bfbea76c619ef36, 0x57eb4edb3c55b65a), // 5^-99 + (0xaefae51477a06b03, 0xede622920b6b23f1), // 5^-98 + (0xdab99e59958885c4, 0xe95fab368e45eced), // 5^-97 + (0x88b402f7fd75539b, 0x11dbcb0218ebb414), // 5^-96 + (0xaae103b5fcd2a881, 0xd652bdc29f26a119), // 5^-95 + (0xd59944a37c0752a2, 0x4be76d3346f0495f), // 5^-94 + (0x857fcae62d8493a5, 0x6f70a4400c562ddb), // 5^-93 + (0xa6dfbd9fb8e5b88e, 0xcb4ccd500f6bb952), // 5^-92 + (0xd097ad07a71f26b2, 0x7e2000a41346a7a7), // 5^-91 + (0x825ecc24c873782f, 0x8ed400668c0c28c8), // 5^-90 + (0xa2f67f2dfa90563b, 0x728900802f0f32fa), // 5^-89 + (0xcbb41ef979346bca, 0x4f2b40a03ad2ffb9), // 5^-88 + (0xfea126b7d78186bc, 0xe2f610c84987bfa8), // 5^-87 + (0x9f24b832e6b0f436, 0xdd9ca7d2df4d7c9), // 5^-86 + (0xc6ede63fa05d3143, 0x91503d1c79720dbb), // 5^-85 + (0xf8a95fcf88747d94, 0x75a44c6397ce912a), // 5^-84 + (0x9b69dbe1b548ce7c, 0xc986afbe3ee11aba), // 5^-83 + (0xc24452da229b021b, 0xfbe85badce996168), // 5^-82 + (0xf2d56790ab41c2a2, 0xfae27299423fb9c3), // 5^-81 + (0x97c560ba6b0919a5, 0xdccd879fc967d41a), // 5^-80 + (0xbdb6b8e905cb600f, 0x5400e987bbc1c920), // 5^-79 + (0xed246723473e3813, 0x290123e9aab23b68), // 5^-78 + (0x9436c0760c86e30b, 0xf9a0b6720aaf6521), // 5^-77 + (0xb94470938fa89bce, 0xf808e40e8d5b3e69), // 5^-76 + (0xe7958cb87392c2c2, 0xb60b1d1230b20e04), // 5^-75 + (0x90bd77f3483bb9b9, 0xb1c6f22b5e6f48c2), // 5^-74 + (0xb4ecd5f01a4aa828, 0x1e38aeb6360b1af3), // 5^-73 + (0xe2280b6c20dd5232, 0x25c6da63c38de1b0), // 5^-72 + (0x8d590723948a535f, 0x579c487e5a38ad0e), // 5^-71 + (0xb0af48ec79ace837, 0x2d835a9df0c6d851), // 5^-70 + (0xdcdb1b2798182244, 0xf8e431456cf88e65), // 5^-69 + (0x8a08f0f8bf0f156b, 0x1b8e9ecb641b58ff), // 5^-68 + (0xac8b2d36eed2dac5, 0xe272467e3d222f3f), // 5^-67 + (0xd7adf884aa879177, 0x5b0ed81dcc6abb0f), // 5^-66 + (0x86ccbb52ea94baea, 0x98e947129fc2b4e9), // 5^-65 + (0xa87fea27a539e9a5, 0x3f2398d747b36224), // 5^-64 + (0xd29fe4b18e88640e, 0x8eec7f0d19a03aad), // 5^-63 + (0x83a3eeeef9153e89, 0x1953cf68300424ac), // 5^-62 + (0xa48ceaaab75a8e2b, 0x5fa8c3423c052dd7), // 5^-61 + (0xcdb02555653131b6, 0x3792f412cb06794d), // 5^-60 + (0x808e17555f3ebf11, 0xe2bbd88bbee40bd0), // 5^-59 + (0xa0b19d2ab70e6ed6, 0x5b6aceaeae9d0ec4), // 5^-58 + (0xc8de047564d20a8b, 0xf245825a5a445275), // 5^-57 + (0xfb158592be068d2e, 0xeed6e2f0f0d56712), // 5^-56 + (0x9ced737bb6c4183d, 0x55464dd69685606b), // 5^-55 + (0xc428d05aa4751e4c, 0xaa97e14c3c26b886), // 5^-54 + (0xf53304714d9265df, 0xd53dd99f4b3066a8), // 5^-53 + (0x993fe2c6d07b7fab, 0xe546a8038efe4029), // 5^-52 + (0xbf8fdb78849a5f96, 0xde98520472bdd033), // 5^-51 + (0xef73d256a5c0f77c, 0x963e66858f6d4440), // 5^-50 + (0x95a8637627989aad, 0xdde7001379a44aa8), // 5^-49 + (0xbb127c53b17ec159, 0x5560c018580d5d52), // 5^-48 + (0xe9d71b689dde71af, 0xaab8f01e6e10b4a6), // 5^-47 + (0x9226712162ab070d, 0xcab3961304ca70e8), // 5^-46 + (0xb6b00d69bb55c8d1, 0x3d607b97c5fd0d22), // 5^-45 + (0xe45c10c42a2b3b05, 0x8cb89a7db77c506a), // 5^-44 + (0x8eb98a7a9a5b04e3, 0x77f3608e92adb242), // 5^-43 + (0xb267ed1940f1c61c, 0x55f038b237591ed3), // 5^-42 + (0xdf01e85f912e37a3, 0x6b6c46dec52f6688), // 5^-41 + (0x8b61313bbabce2c6, 0x2323ac4b3b3da015), // 5^-40 + (0xae397d8aa96c1b77, 0xabec975e0a0d081a), // 5^-39 + (0xd9c7dced53c72255, 0x96e7bd358c904a21), // 5^-38 + (0x881cea14545c7575, 0x7e50d64177da2e54), // 5^-37 + (0xaa242499697392d2, 0xdde50bd1d5d0b9e9), // 5^-36 + (0xd4ad2dbfc3d07787, 0x955e4ec64b44e864), // 5^-35 + (0x84ec3c97da624ab4, 0xbd5af13bef0b113e), // 5^-34 + (0xa6274bbdd0fadd61, 0xecb1ad8aeacdd58e), // 5^-33 + (0xcfb11ead453994ba, 0x67de18eda5814af2), // 5^-32 + (0x81ceb32c4b43fcf4, 0x80eacf948770ced7), // 5^-31 + (0xa2425ff75e14fc31, 0xa1258379a94d028d), // 5^-30 + (0xcad2f7f5359a3b3e, 0x96ee45813a04330), // 5^-29 + (0xfd87b5f28300ca0d, 0x8bca9d6e188853fc), // 5^-28 + (0x9e74d1b791e07e48, 0x775ea264cf55347e), // 5^-27 + (0xc612062576589dda, 0x95364afe032a819e), // 5^-26 + (0xf79687aed3eec551, 0x3a83ddbd83f52205), // 5^-25 + (0x9abe14cd44753b52, 0xc4926a9672793543), // 5^-24 + (0xc16d9a0095928a27, 0x75b7053c0f178294), // 5^-23 + (0xf1c90080baf72cb1, 0x5324c68b12dd6339), // 5^-22 + (0x971da05074da7bee, 0xd3f6fc16ebca5e04), // 5^-21 + (0xbce5086492111aea, 0x88f4bb1ca6bcf585), // 5^-20 + (0xec1e4a7db69561a5, 0x2b31e9e3d06c32e6), // 5^-19 + (0x9392ee8e921d5d07, 0x3aff322e62439fd0), // 5^-18 + (0xb877aa3236a4b449, 0x9befeb9fad487c3), // 5^-17 + (0xe69594bec44de15b, 0x4c2ebe687989a9b4), // 5^-16 + (0x901d7cf73ab0acd9, 0xf9d37014bf60a11), // 5^-15 + (0xb424dc35095cd80f, 0x538484c19ef38c95), // 5^-14 + (0xe12e13424bb40e13, 0x2865a5f206b06fba), // 5^-13 + (0x8cbccc096f5088cb, 0xf93f87b7442e45d4), // 5^-12 + (0xafebff0bcb24aafe, 0xf78f69a51539d749), // 5^-11 + (0xdbe6fecebdedd5be, 0xb573440e5a884d1c), // 5^-10 + (0x89705f4136b4a597, 0x31680a88f8953031), // 5^-9 + (0xabcc77118461cefc, 0xfdc20d2b36ba7c3e), // 5^-8 + (0xd6bf94d5e57a42bc, 0x3d32907604691b4d), // 5^-7 + (0x8637bd05af6c69b5, 0xa63f9a49c2c1b110), // 5^-6 + (0xa7c5ac471b478423, 0xfcf80dc33721d54), // 5^-5 + (0xd1b71758e219652b, 0xd3c36113404ea4a9), // 5^-4 + (0x83126e978d4fdf3b, 0x645a1cac083126ea), // 5^-3 + (0xa3d70a3d70a3d70a, 0x3d70a3d70a3d70a4), // 5^-2 + (0xcccccccccccccccc, 0xcccccccccccccccd), // 5^-1 + (0x8000000000000000, 0x0), // 5^0 + (0xa000000000000000, 0x0), // 5^1 + (0xc800000000000000, 0x0), // 5^2 + (0xfa00000000000000, 0x0), // 5^3 + (0x9c40000000000000, 0x0), // 5^4 + (0xc350000000000000, 0x0), // 5^5 + (0xf424000000000000, 0x0), // 5^6 + (0x9896800000000000, 0x0), // 5^7 + (0xbebc200000000000, 0x0), // 5^8 + (0xee6b280000000000, 0x0), // 5^9 + (0x9502f90000000000, 0x0), // 5^10 + (0xba43b74000000000, 0x0), // 5^11 + (0xe8d4a51000000000, 0x0), // 5^12 + (0x9184e72a00000000, 0x0), // 5^13 + (0xb5e620f480000000, 0x0), // 5^14 + (0xe35fa931a0000000, 0x0), // 5^15 + (0x8e1bc9bf04000000, 0x0), // 5^16 + (0xb1a2bc2ec5000000, 0x0), // 5^17 + (0xde0b6b3a76400000, 0x0), // 5^18 + (0x8ac7230489e80000, 0x0), // 5^19 + (0xad78ebc5ac620000, 0x0), // 5^20 + (0xd8d726b7177a8000, 0x0), // 5^21 + (0x878678326eac9000, 0x0), // 5^22 + (0xa968163f0a57b400, 0x0), // 5^23 + (0xd3c21bcecceda100, 0x0), // 5^24 + (0x84595161401484a0, 0x0), // 5^25 + (0xa56fa5b99019a5c8, 0x0), // 5^26 + (0xcecb8f27f4200f3a, 0x0), // 5^27 + (0x813f3978f8940984, 0x4000000000000000), // 5^28 + (0xa18f07d736b90be5, 0x5000000000000000), // 5^29 + (0xc9f2c9cd04674ede, 0xa400000000000000), // 5^30 + (0xfc6f7c4045812296, 0x4d00000000000000), // 5^31 + (0x9dc5ada82b70b59d, 0xf020000000000000), // 5^32 + (0xc5371912364ce305, 0x6c28000000000000), // 5^33 + (0xf684df56c3e01bc6, 0xc732000000000000), // 5^34 + (0x9a130b963a6c115c, 0x3c7f400000000000), // 5^35 + (0xc097ce7bc90715b3, 0x4b9f100000000000), // 5^36 + (0xf0bdc21abb48db20, 0x1e86d40000000000), // 5^37 + (0x96769950b50d88f4, 0x1314448000000000), // 5^38 + (0xbc143fa4e250eb31, 0x17d955a000000000), // 5^39 + (0xeb194f8e1ae525fd, 0x5dcfab0800000000), // 5^40 + (0x92efd1b8d0cf37be, 0x5aa1cae500000000), // 5^41 + (0xb7abc627050305ad, 0xf14a3d9e40000000), // 5^42 + (0xe596b7b0c643c719, 0x6d9ccd05d0000000), // 5^43 + (0x8f7e32ce7bea5c6f, 0xe4820023a2000000), // 5^44 + (0xb35dbf821ae4f38b, 0xdda2802c8a800000), // 5^45 + (0xe0352f62a19e306e, 0xd50b2037ad200000), // 5^46 + (0x8c213d9da502de45, 0x4526f422cc340000), // 5^47 + (0xaf298d050e4395d6, 0x9670b12b7f410000), // 5^48 + (0xdaf3f04651d47b4c, 0x3c0cdd765f114000), // 5^49 + (0x88d8762bf324cd0f, 0xa5880a69fb6ac800), // 5^50 + (0xab0e93b6efee0053, 0x8eea0d047a457a00), // 5^51 + (0xd5d238a4abe98068, 0x72a4904598d6d880), // 5^52 + (0x85a36366eb71f041, 0x47a6da2b7f864750), // 5^53 + (0xa70c3c40a64e6c51, 0x999090b65f67d924), // 5^54 + (0xd0cf4b50cfe20765, 0xfff4b4e3f741cf6d), // 5^55 + (0x82818f1281ed449f, 0xbff8f10e7a8921a4), // 5^56 + (0xa321f2d7226895c7, 0xaff72d52192b6a0d), // 5^57 + (0xcbea6f8ceb02bb39, 0x9bf4f8a69f764490), // 5^58 + (0xfee50b7025c36a08, 0x2f236d04753d5b4), // 5^59 + (0x9f4f2726179a2245, 0x1d762422c946590), // 5^60 + (0xc722f0ef9d80aad6, 0x424d3ad2b7b97ef5), // 5^61 + (0xf8ebad2b84e0d58b, 0xd2e0898765a7deb2), // 5^62 + (0x9b934c3b330c8577, 0x63cc55f49f88eb2f), // 5^63 + (0xc2781f49ffcfa6d5, 0x3cbf6b71c76b25fb), // 5^64 + (0xf316271c7fc3908a, 0x8bef464e3945ef7a), // 5^65 + (0x97edd871cfda3a56, 0x97758bf0e3cbb5ac), // 5^66 + (0xbde94e8e43d0c8ec, 0x3d52eeed1cbea317), // 5^67 + (0xed63a231d4c4fb27, 0x4ca7aaa863ee4bdd), // 5^68 + (0x945e455f24fb1cf8, 0x8fe8caa93e74ef6a), // 5^69 + (0xb975d6b6ee39e436, 0xb3e2fd538e122b44), // 5^70 + (0xe7d34c64a9c85d44, 0x60dbbca87196b616), // 5^71 + (0x90e40fbeea1d3a4a, 0xbc8955e946fe31cd), // 5^72 + (0xb51d13aea4a488dd, 0x6babab6398bdbe41), // 5^73 + (0xe264589a4dcdab14, 0xc696963c7eed2dd1), // 5^74 + (0x8d7eb76070a08aec, 0xfc1e1de5cf543ca2), // 5^75 + (0xb0de65388cc8ada8, 0x3b25a55f43294bcb), // 5^76 + (0xdd15fe86affad912, 0x49ef0eb713f39ebe), // 5^77 + (0x8a2dbf142dfcc7ab, 0x6e3569326c784337), // 5^78 + (0xacb92ed9397bf996, 0x49c2c37f07965404), // 5^79 + (0xd7e77a8f87daf7fb, 0xdc33745ec97be906), // 5^80 + (0x86f0ac99b4e8dafd, 0x69a028bb3ded71a3), // 5^81 + (0xa8acd7c0222311bc, 0xc40832ea0d68ce0c), // 5^82 + (0xd2d80db02aabd62b, 0xf50a3fa490c30190), // 5^83 + (0x83c7088e1aab65db, 0x792667c6da79e0fa), // 5^84 + (0xa4b8cab1a1563f52, 0x577001b891185938), // 5^85 + (0xcde6fd5e09abcf26, 0xed4c0226b55e6f86), // 5^86 + (0x80b05e5ac60b6178, 0x544f8158315b05b4), // 5^87 + (0xa0dc75f1778e39d6, 0x696361ae3db1c721), // 5^88 + (0xc913936dd571c84c, 0x3bc3a19cd1e38e9), // 5^89 + (0xfb5878494ace3a5f, 0x4ab48a04065c723), // 5^90 + (0x9d174b2dcec0e47b, 0x62eb0d64283f9c76), // 5^91 + (0xc45d1df942711d9a, 0x3ba5d0bd324f8394), // 5^92 + (0xf5746577930d6500, 0xca8f44ec7ee36479), // 5^93 + (0x9968bf6abbe85f20, 0x7e998b13cf4e1ecb), // 5^94 + (0xbfc2ef456ae276e8, 0x9e3fedd8c321a67e), // 5^95 + (0xefb3ab16c59b14a2, 0xc5cfe94ef3ea101e), // 5^96 + (0x95d04aee3b80ece5, 0xbba1f1d158724a12), // 5^97 + (0xbb445da9ca61281f, 0x2a8a6e45ae8edc97), // 5^98 + (0xea1575143cf97226, 0xf52d09d71a3293bd), // 5^99 + (0x924d692ca61be758, 0x593c2626705f9c56), // 5^100 + (0xb6e0c377cfa2e12e, 0x6f8b2fb00c77836c), // 5^101 + (0xe498f455c38b997a, 0xb6dfb9c0f956447), // 5^102 + (0x8edf98b59a373fec, 0x4724bd4189bd5eac), // 5^103 + (0xb2977ee300c50fe7, 0x58edec91ec2cb657), // 5^104 + (0xdf3d5e9bc0f653e1, 0x2f2967b66737e3ed), // 5^105 + (0x8b865b215899f46c, 0xbd79e0d20082ee74), // 5^106 + (0xae67f1e9aec07187, 0xecd8590680a3aa11), // 5^107 + (0xda01ee641a708de9, 0xe80e6f4820cc9495), // 5^108 + (0x884134fe908658b2, 0x3109058d147fdcdd), // 5^109 + (0xaa51823e34a7eede, 0xbd4b46f0599fd415), // 5^110 + (0xd4e5e2cdc1d1ea96, 0x6c9e18ac7007c91a), // 5^111 + (0x850fadc09923329e, 0x3e2cf6bc604ddb0), // 5^112 + (0xa6539930bf6bff45, 0x84db8346b786151c), // 5^113 + (0xcfe87f7cef46ff16, 0xe612641865679a63), // 5^114 + (0x81f14fae158c5f6e, 0x4fcb7e8f3f60c07e), // 5^115 + (0xa26da3999aef7749, 0xe3be5e330f38f09d), // 5^116 + (0xcb090c8001ab551c, 0x5cadf5bfd3072cc5), // 5^117 + (0xfdcb4fa002162a63, 0x73d9732fc7c8f7f6), // 5^118 + (0x9e9f11c4014dda7e, 0x2867e7fddcdd9afa), // 5^119 + (0xc646d63501a1511d, 0xb281e1fd541501b8), // 5^120 + (0xf7d88bc24209a565, 0x1f225a7ca91a4226), // 5^121 + (0x9ae757596946075f, 0x3375788de9b06958), // 5^122 + (0xc1a12d2fc3978937, 0x52d6b1641c83ae), // 5^123 + (0xf209787bb47d6b84, 0xc0678c5dbd23a49a), // 5^124 + (0x9745eb4d50ce6332, 0xf840b7ba963646e0), // 5^125 + (0xbd176620a501fbff, 0xb650e5a93bc3d898), // 5^126 + (0xec5d3fa8ce427aff, 0xa3e51f138ab4cebe), // 5^127 + (0x93ba47c980e98cdf, 0xc66f336c36b10137), // 5^128 + (0xb8a8d9bbe123f017, 0xb80b0047445d4184), // 5^129 + (0xe6d3102ad96cec1d, 0xa60dc059157491e5), // 5^130 + (0x9043ea1ac7e41392, 0x87c89837ad68db2f), // 5^131 + (0xb454e4a179dd1877, 0x29babe4598c311fb), // 5^132 + (0xe16a1dc9d8545e94, 0xf4296dd6fef3d67a), // 5^133 + (0x8ce2529e2734bb1d, 0x1899e4a65f58660c), // 5^134 + (0xb01ae745b101e9e4, 0x5ec05dcff72e7f8f), // 5^135 + (0xdc21a1171d42645d, 0x76707543f4fa1f73), // 5^136 + (0x899504ae72497eba, 0x6a06494a791c53a8), // 5^137 + (0xabfa45da0edbde69, 0x487db9d17636892), // 5^138 + (0xd6f8d7509292d603, 0x45a9d2845d3c42b6), // 5^139 + (0x865b86925b9bc5c2, 0xb8a2392ba45a9b2), // 5^140 + (0xa7f26836f282b732, 0x8e6cac7768d7141e), // 5^141 + (0xd1ef0244af2364ff, 0x3207d795430cd926), // 5^142 + (0x8335616aed761f1f, 0x7f44e6bd49e807b8), // 5^143 + (0xa402b9c5a8d3a6e7, 0x5f16206c9c6209a6), // 5^144 + (0xcd036837130890a1, 0x36dba887c37a8c0f), // 5^145 + (0x802221226be55a64, 0xc2494954da2c9789), // 5^146 + (0xa02aa96b06deb0fd, 0xf2db9baa10b7bd6c), // 5^147 + (0xc83553c5c8965d3d, 0x6f92829494e5acc7), // 5^148 + (0xfa42a8b73abbf48c, 0xcb772339ba1f17f9), // 5^149 + (0x9c69a97284b578d7, 0xff2a760414536efb), // 5^150 + (0xc38413cf25e2d70d, 0xfef5138519684aba), // 5^151 + (0xf46518c2ef5b8cd1, 0x7eb258665fc25d69), // 5^152 + (0x98bf2f79d5993802, 0xef2f773ffbd97a61), // 5^153 + (0xbeeefb584aff8603, 0xaafb550ffacfd8fa), // 5^154 + (0xeeaaba2e5dbf6784, 0x95ba2a53f983cf38), // 5^155 + (0x952ab45cfa97a0b2, 0xdd945a747bf26183), // 5^156 + (0xba756174393d88df, 0x94f971119aeef9e4), // 5^157 + (0xe912b9d1478ceb17, 0x7a37cd5601aab85d), // 5^158 + (0x91abb422ccb812ee, 0xac62e055c10ab33a), // 5^159 + (0xb616a12b7fe617aa, 0x577b986b314d6009), // 5^160 + (0xe39c49765fdf9d94, 0xed5a7e85fda0b80b), // 5^161 + (0x8e41ade9fbebc27d, 0x14588f13be847307), // 5^162 + (0xb1d219647ae6b31c, 0x596eb2d8ae258fc8), // 5^163 + (0xde469fbd99a05fe3, 0x6fca5f8ed9aef3bb), // 5^164 + (0x8aec23d680043bee, 0x25de7bb9480d5854), // 5^165 + (0xada72ccc20054ae9, 0xaf561aa79a10ae6a), // 5^166 + (0xd910f7ff28069da4, 0x1b2ba1518094da04), // 5^167 + (0x87aa9aff79042286, 0x90fb44d2f05d0842), // 5^168 + (0xa99541bf57452b28, 0x353a1607ac744a53), // 5^169 + (0xd3fa922f2d1675f2, 0x42889b8997915ce8), // 5^170 + (0x847c9b5d7c2e09b7, 0x69956135febada11), // 5^171 + (0xa59bc234db398c25, 0x43fab9837e699095), // 5^172 + (0xcf02b2c21207ef2e, 0x94f967e45e03f4bb), // 5^173 + (0x8161afb94b44f57d, 0x1d1be0eebac278f5), // 5^174 + (0xa1ba1ba79e1632dc, 0x6462d92a69731732), // 5^175 + (0xca28a291859bbf93, 0x7d7b8f7503cfdcfe), // 5^176 + (0xfcb2cb35e702af78, 0x5cda735244c3d43e), // 5^177 + (0x9defbf01b061adab, 0x3a0888136afa64a7), // 5^178 + (0xc56baec21c7a1916, 0x88aaa1845b8fdd0), // 5^179 + (0xf6c69a72a3989f5b, 0x8aad549e57273d45), // 5^180 + (0x9a3c2087a63f6399, 0x36ac54e2f678864b), // 5^181 + (0xc0cb28a98fcf3c7f, 0x84576a1bb416a7dd), // 5^182 + (0xf0fdf2d3f3c30b9f, 0x656d44a2a11c51d5), // 5^183 + (0x969eb7c47859e743, 0x9f644ae5a4b1b325), // 5^184 + (0xbc4665b596706114, 0x873d5d9f0dde1fee), // 5^185 + (0xeb57ff22fc0c7959, 0xa90cb506d155a7ea), // 5^186 + (0x9316ff75dd87cbd8, 0x9a7f12442d588f2), // 5^187 + (0xb7dcbf5354e9bece, 0xc11ed6d538aeb2f), // 5^188 + (0xe5d3ef282a242e81, 0x8f1668c8a86da5fa), // 5^189 + (0x8fa475791a569d10, 0xf96e017d694487bc), // 5^190 + (0xb38d92d760ec4455, 0x37c981dcc395a9ac), // 5^191 + (0xe070f78d3927556a, 0x85bbe253f47b1417), // 5^192 + (0x8c469ab843b89562, 0x93956d7478ccec8e), // 5^193 + (0xaf58416654a6babb, 0x387ac8d1970027b2), // 5^194 + (0xdb2e51bfe9d0696a, 0x6997b05fcc0319e), // 5^195 + (0x88fcf317f22241e2, 0x441fece3bdf81f03), // 5^196 + (0xab3c2fddeeaad25a, 0xd527e81cad7626c3), // 5^197 + (0xd60b3bd56a5586f1, 0x8a71e223d8d3b074), // 5^198 + (0x85c7056562757456, 0xf6872d5667844e49), // 5^199 + (0xa738c6bebb12d16c, 0xb428f8ac016561db), // 5^200 + (0xd106f86e69d785c7, 0xe13336d701beba52), // 5^201 + (0x82a45b450226b39c, 0xecc0024661173473), // 5^202 + (0xa34d721642b06084, 0x27f002d7f95d0190), // 5^203 + (0xcc20ce9bd35c78a5, 0x31ec038df7b441f4), // 5^204 + (0xff290242c83396ce, 0x7e67047175a15271), // 5^205 + (0x9f79a169bd203e41, 0xf0062c6e984d386), // 5^206 + (0xc75809c42c684dd1, 0x52c07b78a3e60868), // 5^207 + (0xf92e0c3537826145, 0xa7709a56ccdf8a82), // 5^208 + (0x9bbcc7a142b17ccb, 0x88a66076400bb691), // 5^209 + (0xc2abf989935ddbfe, 0x6acff893d00ea435), // 5^210 + (0xf356f7ebf83552fe, 0x583f6b8c4124d43), // 5^211 + (0x98165af37b2153de, 0xc3727a337a8b704a), // 5^212 + (0xbe1bf1b059e9a8d6, 0x744f18c0592e4c5c), // 5^213 + (0xeda2ee1c7064130c, 0x1162def06f79df73), // 5^214 + (0x9485d4d1c63e8be7, 0x8addcb5645ac2ba8), // 5^215 + (0xb9a74a0637ce2ee1, 0x6d953e2bd7173692), // 5^216 + (0xe8111c87c5c1ba99, 0xc8fa8db6ccdd0437), // 5^217 + (0x910ab1d4db9914a0, 0x1d9c9892400a22a2), // 5^218 + (0xb54d5e4a127f59c8, 0x2503beb6d00cab4b), // 5^219 + (0xe2a0b5dc971f303a, 0x2e44ae64840fd61d), // 5^220 + (0x8da471a9de737e24, 0x5ceaecfed289e5d2), // 5^221 + (0xb10d8e1456105dad, 0x7425a83e872c5f47), // 5^222 + (0xdd50f1996b947518, 0xd12f124e28f77719), // 5^223 + (0x8a5296ffe33cc92f, 0x82bd6b70d99aaa6f), // 5^224 + (0xace73cbfdc0bfb7b, 0x636cc64d1001550b), // 5^225 + (0xd8210befd30efa5a, 0x3c47f7e05401aa4e), // 5^226 + (0x8714a775e3e95c78, 0x65acfaec34810a71), // 5^227 + (0xa8d9d1535ce3b396, 0x7f1839a741a14d0d), // 5^228 + (0xd31045a8341ca07c, 0x1ede48111209a050), // 5^229 + (0x83ea2b892091e44d, 0x934aed0aab460432), // 5^230 + (0xa4e4b66b68b65d60, 0xf81da84d5617853f), // 5^231 + (0xce1de40642e3f4b9, 0x36251260ab9d668e), // 5^232 + (0x80d2ae83e9ce78f3, 0xc1d72b7c6b426019), // 5^233 + (0xa1075a24e4421730, 0xb24cf65b8612f81f), // 5^234 + (0xc94930ae1d529cfc, 0xdee033f26797b627), // 5^235 + (0xfb9b7cd9a4a7443c, 0x169840ef017da3b1), // 5^236 + (0x9d412e0806e88aa5, 0x8e1f289560ee864e), // 5^237 + (0xc491798a08a2ad4e, 0xf1a6f2bab92a27e2), // 5^238 + (0xf5b5d7ec8acb58a2, 0xae10af696774b1db), // 5^239 + (0x9991a6f3d6bf1765, 0xacca6da1e0a8ef29), // 5^240 + (0xbff610b0cc6edd3f, 0x17fd090a58d32af3), // 5^241 + (0xeff394dcff8a948e, 0xddfc4b4cef07f5b0), // 5^242 + (0x95f83d0a1fb69cd9, 0x4abdaf101564f98e), // 5^243 + (0xbb764c4ca7a4440f, 0x9d6d1ad41abe37f1), // 5^244 + (0xea53df5fd18d5513, 0x84c86189216dc5ed), // 5^245 + (0x92746b9be2f8552c, 0x32fd3cf5b4e49bb4), // 5^246 + (0xb7118682dbb66a77, 0x3fbc8c33221dc2a1), // 5^247 + (0xe4d5e82392a40515, 0xfabaf3feaa5334a), // 5^248 + (0x8f05b1163ba6832d, 0x29cb4d87f2a7400e), // 5^249 + (0xb2c71d5bca9023f8, 0x743e20e9ef511012), // 5^250 + (0xdf78e4b2bd342cf6, 0x914da9246b255416), // 5^251 + (0x8bab8eefb6409c1a, 0x1ad089b6c2f7548e), // 5^252 + (0xae9672aba3d0c320, 0xa184ac2473b529b1), // 5^253 + (0xda3c0f568cc4f3e8, 0xc9e5d72d90a2741e), // 5^254 + (0x8865899617fb1871, 0x7e2fa67c7a658892), // 5^255 + (0xaa7eebfb9df9de8d, 0xddbb901b98feeab7), // 5^256 + (0xd51ea6fa85785631, 0x552a74227f3ea565), // 5^257 + (0x8533285c936b35de, 0xd53a88958f87275f), // 5^258 + (0xa67ff273b8460356, 0x8a892abaf368f137), // 5^259 + (0xd01fef10a657842c, 0x2d2b7569b0432d85), // 5^260 + (0x8213f56a67f6b29b, 0x9c3b29620e29fc73), // 5^261 + (0xa298f2c501f45f42, 0x8349f3ba91b47b8f), // 5^262 + (0xcb3f2f7642717713, 0x241c70a936219a73), // 5^263 + (0xfe0efb53d30dd4d7, 0xed238cd383aa0110), // 5^264 + (0x9ec95d1463e8a506, 0xf4363804324a40aa), // 5^265 + (0xc67bb4597ce2ce48, 0xb143c6053edcd0d5), // 5^266 + (0xf81aa16fdc1b81da, 0xdd94b7868e94050a), // 5^267 + (0x9b10a4e5e9913128, 0xca7cf2b4191c8326), // 5^268 + (0xc1d4ce1f63f57d72, 0xfd1c2f611f63a3f0), // 5^269 + (0xf24a01a73cf2dccf, 0xbc633b39673c8cec), // 5^270 + (0x976e41088617ca01, 0xd5be0503e085d813), // 5^271 + (0xbd49d14aa79dbc82, 0x4b2d8644d8a74e18), // 5^272 + (0xec9c459d51852ba2, 0xddf8e7d60ed1219e), // 5^273 + (0x93e1ab8252f33b45, 0xcabb90e5c942b503), // 5^274 + (0xb8da1662e7b00a17, 0x3d6a751f3b936243), // 5^275 + (0xe7109bfba19c0c9d, 0xcc512670a783ad4), // 5^276 + (0x906a617d450187e2, 0x27fb2b80668b24c5), // 5^277 + (0xb484f9dc9641e9da, 0xb1f9f660802dedf6), // 5^278 + (0xe1a63853bbd26451, 0x5e7873f8a0396973), // 5^279 + (0x8d07e33455637eb2, 0xdb0b487b6423e1e8), // 5^280 + (0xb049dc016abc5e5f, 0x91ce1a9a3d2cda62), // 5^281 + (0xdc5c5301c56b75f7, 0x7641a140cc7810fb), // 5^282 + (0x89b9b3e11b6329ba, 0xa9e904c87fcb0a9d), // 5^283 + (0xac2820d9623bf429, 0x546345fa9fbdcd44), // 5^284 + (0xd732290fbacaf133, 0xa97c177947ad4095), // 5^285 + (0x867f59a9d4bed6c0, 0x49ed8eabcccc485d), // 5^286 + (0xa81f301449ee8c70, 0x5c68f256bfff5a74), // 5^287 + (0xd226fc195c6a2f8c, 0x73832eec6fff3111), // 5^288 + (0x83585d8fd9c25db7, 0xc831fd53c5ff7eab), // 5^289 + (0xa42e74f3d032f525, 0xba3e7ca8b77f5e55), // 5^290 + (0xcd3a1230c43fb26f, 0x28ce1bd2e55f35eb), // 5^291 + (0x80444b5e7aa7cf85, 0x7980d163cf5b81b3), // 5^292 + (0xa0555e361951c366, 0xd7e105bcc332621f), // 5^293 + (0xc86ab5c39fa63440, 0x8dd9472bf3fefaa7), // 5^294 + (0xfa856334878fc150, 0xb14f98f6f0feb951), // 5^295 + (0x9c935e00d4b9d8d2, 0x6ed1bf9a569f33d3), // 5^296 + (0xc3b8358109e84f07, 0xa862f80ec4700c8), // 5^297 + (0xf4a642e14c6262c8, 0xcd27bb612758c0fa), // 5^298 + (0x98e7e9cccfbd7dbd, 0x8038d51cb897789c), // 5^299 + (0xbf21e44003acdd2c, 0xe0470a63e6bd56c3), // 5^300 + (0xeeea5d5004981478, 0x1858ccfce06cac74), // 5^301 + (0x95527a5202df0ccb, 0xf37801e0c43ebc8), // 5^302 + (0xbaa718e68396cffd, 0xd30560258f54e6ba), // 5^303 + (0xe950df20247c83fd, 0x47c6b82ef32a2069), // 5^304 + (0x91d28b7416cdd27e, 0x4cdc331d57fa5441), // 5^305 + (0xb6472e511c81471d, 0xe0133fe4adf8e952), // 5^306 + (0xe3d8f9e563a198e5, 0x58180fddd97723a6), // 5^307 + (0x8e679c2f5e44ff8f, 0x570f09eaa7ea7648), // 5^308 +]; diff --git a/vendor/minimal-lexical/src/table_small.rs b/vendor/minimal-lexical/src/table_small.rs new file mode 100644 index 00000000000000..9da69916fba041 --- /dev/null +++ b/vendor/minimal-lexical/src/table_small.rs @@ -0,0 +1,90 @@ +//! Pre-computed small tables for parsing decimal strings. + +#![doc(hidden)] +#![cfg(not(feature = "compact"))] + +/// Pre-computed, small powers-of-5. +pub const SMALL_INT_POW5: [u64; 28] = [ + 1, + 5, + 25, + 125, + 625, + 3125, + 15625, + 78125, + 390625, + 1953125, + 9765625, + 48828125, + 244140625, + 1220703125, + 6103515625, + 30517578125, + 152587890625, + 762939453125, + 3814697265625, + 19073486328125, + 95367431640625, + 476837158203125, + 2384185791015625, + 11920928955078125, + 59604644775390625, + 298023223876953125, + 1490116119384765625, + 7450580596923828125, +]; + +/// Pre-computed, small powers-of-10. +pub const SMALL_INT_POW10: [u64; 20] = [ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000, + 10000000000, + 100000000000, + 1000000000000, + 10000000000000, + 100000000000000, + 1000000000000000, + 10000000000000000, + 100000000000000000, + 1000000000000000000, + 10000000000000000000, +]; + +/// Pre-computed, small powers-of-10. +pub const SMALL_F32_POW10: [f32; 16] = + [1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 0., 0., 0., 0., 0.]; + +/// Pre-computed, small powers-of-10. +pub const SMALL_F64_POW10: [f64; 32] = [ + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, + 1e17, 1e18, 1e19, 1e20, 1e21, 1e22, 0., 0., 0., 0., 0., 0., 0., 0., 0., +]; + +/// Pre-computed large power-of-5 for 32-bit limbs. +#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] +pub const LARGE_POW5: [u32; 10] = [ + 4279965485, 329373468, 4020270615, 2137533757, 4287402176, 1057042919, 1071430142, 2440757623, + 381945767, 46164893, +]; + +/// Pre-computed large power-of-5 for 64-bit limbs. +#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] +pub const LARGE_POW5: [u64; 5] = [ + 1414648277510068013, + 9180637584431281687, + 4539964771860779200, + 10482974169319127550, + 198276706040285095, +]; + +/// Step for large power-of-5 for 32-bit limbs. +pub const LARGE_POW5_STEP: u32 = 135; diff --git a/vendor/minimal-lexical/tests/bellerophon.rs b/vendor/minimal-lexical/tests/bellerophon.rs new file mode 100644 index 00000000000000..99cd89acfc8241 --- /dev/null +++ b/vendor/minimal-lexical/tests/bellerophon.rs @@ -0,0 +1,59 @@ +#![cfg(feature = "compact")] +#![allow(dead_code)] + +use minimal_lexical::bellerophon::bellerophon; +use minimal_lexical::extended_float::{extended_to_float, ExtendedFloat}; +use minimal_lexical::num::Float; +use minimal_lexical::number::Number; + +pub fn bellerophon_test( + xmant: u64, + xexp: i32, + many_digits: bool, + ymant: u64, + yexp: i32, +) { + let num = Number { + exponent: xexp, + mantissa: xmant, + many_digits, + }; + let xfp = bellerophon::(&num); + let yfp = ExtendedFloat { + mant: ymant, + exp: yexp, + }; + // Given us useful error messages if the floats are valid. + if xfp.exp >= 0 && yfp.exp >= 0 { + assert!( + xfp == yfp, + "x != y, xfp={:?}, yfp={:?}, x={:?}, y={:?}", + xfp, + yfp, + extended_to_float::(xfp), + extended_to_float::(yfp) + ); + } else { + assert_eq!(xfp, yfp); + } +} + +pub fn compute_float32(q: i32, w: u64) -> (i32, u64) { + let num = Number { + exponent: q, + mantissa: w, + many_digits: false, + }; + let fp = bellerophon::(&num); + (fp.exp, fp.mant) +} + +pub fn compute_float64(q: i32, w: u64) -> (i32, u64) { + let num = Number { + exponent: q, + mantissa: w, + many_digits: false, + }; + let fp = bellerophon::(&num); + (fp.exp, fp.mant) +} diff --git a/vendor/minimal-lexical/tests/bellerophon_tests.rs b/vendor/minimal-lexical/tests/bellerophon_tests.rs new file mode 100644 index 00000000000000..f5826c615d6d0b --- /dev/null +++ b/vendor/minimal-lexical/tests/bellerophon_tests.rs @@ -0,0 +1,231 @@ +#![cfg(feature = "compact")] + +mod bellerophon; + +use bellerophon::{bellerophon_test, compute_float32, compute_float64}; +use minimal_lexical::num::Float; + +#[test] +fn halfway_round_down_test() { + // Halfway, round-down tests + bellerophon_test::(9007199254740992, 0, false, 0, 1076); + bellerophon_test::( + 9007199254740993, + 0, + false, + 9223372036854776832, + 1065 + f64::INVALID_FP, + ); + bellerophon_test::(9007199254740994, 0, false, 1, 1076); + + bellerophon_test::(18014398509481984, 0, false, 0, 1077); + bellerophon_test::( + 18014398509481986, + 0, + false, + 9223372036854776832, + 1066 + f64::INVALID_FP, + ); + bellerophon_test::(18014398509481988, 0, false, 1, 1077); + + bellerophon_test::(9223372036854775808, 0, false, 0, 1086); + bellerophon_test::( + 9223372036854776832, + 0, + false, + 9223372036854776832, + 1075 + f64::INVALID_FP, + ); + bellerophon_test::(9223372036854777856, 0, false, 1, 1086); + + // Add a 0 but say we're truncated. + bellerophon_test::(9007199254740992000, -3, true, 0, 1076); + bellerophon_test::( + 9007199254740993000, + -3, + true, + 9223372036854776832, + 1065 + f64::INVALID_FP, + ); + bellerophon_test::(9007199254740994000, -3, true, 1, 1076); +} + +#[test] +fn halfway_round_up_test() { + // Halfway, round-up tests + bellerophon_test::(9007199254740994, 0, false, 1, 1076); + bellerophon_test::( + 9007199254740995, + 0, + false, + 9223372036854778880, + 1065 + f64::INVALID_FP, + ); + bellerophon_test::(9007199254740996, 0, false, 2, 1076); + + bellerophon_test::(18014398509481988, 0, false, 1, 1077); + bellerophon_test::( + 18014398509481990, + 0, + false, + 9223372036854778880, + 1066 + f64::INVALID_FP, + ); + bellerophon_test::(18014398509481992, 0, false, 2, 1077); + + bellerophon_test::(9223372036854777856, 0, false, 1, 1086); + bellerophon_test::( + 9223372036854778880, + 0, + false, + 9223372036854778880, + 1075 + f64::INVALID_FP, + ); + bellerophon_test::(9223372036854779904, 0, false, 2, 1086); + + // Add a 0 but say we're truncated. + bellerophon_test::(9007199254740994000, -3, true, 1, 1076); + bellerophon_test::( + 9007199254740994990, + -3, + true, + 9223372036854778869, + 1065 + f64::INVALID_FP, + ); + bellerophon_test::( + 9007199254740995000, + -3, + true, + 9223372036854778879, + 1065 + f64::INVALID_FP, + ); + bellerophon_test::( + 9007199254740995010, + -3, + true, + 9223372036854778890, + 1065 + f64::INVALID_FP, + ); + bellerophon_test::(9007199254740995050, -3, true, 2, 1076); + bellerophon_test::(9007199254740996000, -3, true, 2, 1076); +} + +#[test] +fn extremes_test() { + // Need to check we get proper results with rounding for near-infinity + // and near-zero and/or denormal floats. + bellerophon_test::(5, -324, false, 1, 0); + bellerophon_test::(10, -324, false, 2, 0); + // This is very close to 2.4703282292062327206e-342. + bellerophon_test::( + 2470328229206232720, + -342, + false, + 18446744073709551608, + -64 + f64::INVALID_FP, + ); + bellerophon_test::(2470328229206232721, -342, false, 9223372036854775808, -32831); + bellerophon_test::(2470328229206232725, -342, false, 9223372036854775824, -32831); + bellerophon_test::(2470328229206232726, -342, false, 1, 0); + bellerophon_test::(2470328229206232730, -342, false, 1, 0); + // Check very close to literal infinity. + // 17.976931348623155 + // 1.797693134862315508561243283845062402343434371574593359244049e+308 + // 1.797693134862315708145274237317043567980705675258449965989175e+308 + bellerophon_test::(17976931348623155, 292, false, 4503599627370494, 2046); + bellerophon_test::(17976931348623156, 292, false, 4503599627370494, 2046); + bellerophon_test::(1797693134862315605, 290, false, 4503599627370494, 2046); + bellerophon_test::(1797693134862315607, 290, false, 4503599627370494, 2046); + bellerophon_test::(1797693134862315608, 290, false, 18446744073709548540, -30733); + bellerophon_test::(1797693134862315609, 290, false, 18446744073709548550, -30733); + bellerophon_test::(179769313486231561, 291, false, 4503599627370495, 2046); + bellerophon_test::(17976931348623157, 292, false, 4503599627370495, 2046); + + // Check existing issues and underflow. + bellerophon_test::(2470328229206232726, -343, false, 0, 0); + bellerophon_test::(2470328229206232726, -342, false, 1, 0); + bellerophon_test::(1, -250, false, 1945308223406668, 192); + bellerophon_test::(1, -150, false, 2867420733609077, 524); + bellerophon_test::(1, -45, false, 1924152549665465, 873); + bellerophon_test::(1, -40, false, 400386103400348, 890); + bellerophon_test::(1, -20, false, 2142540351554083, 956); + bellerophon_test::(1, 0, false, 0, 1023); + bellerophon_test::(1, 20, false, 1599915997629504, 1089); + bellerophon_test::(1, 40, false, 3768206498159781, 1155); + bellerophon_test::(1, 150, false, 999684479948463, 1521); + bellerophon_test::(1, 250, false, 1786584717939204, 1853); + // Minimum positive normal float. + bellerophon_test::(22250738585072014, -324, false, 0, 1); + // Maximum positive subnormal float. + bellerophon_test::(2225073858507201, -323, false, 4503599627370495, 0); + // Next highest subnormal float. + bellerophon_test::(22250738585072004, -324, false, 4503599627370494, 0); + bellerophon_test::(22250738585072006, -324, false, 4503599627370494, 0); + bellerophon_test::(22250738585072007, -324, false, 4503599627370495, 0); + bellerophon_test::(222507385850720062, -325, false, 4503599627370494, 0); + bellerophon_test::(222507385850720063, -325, false, 4503599627370494, 0); + bellerophon_test::(222507385850720064, -325, false, 4503599627370494, 0); + bellerophon_test::(2225073858507200641, -326, false, 18446744073709545462, -32779); + bellerophon_test::(2225073858507200642, -326, false, 18446744073709545472, -32779); + bellerophon_test::(222507385850720065, -325, false, 4503599627370495, 0); +} + +#[test] +fn compute_float_f32_test() { + // These test near-halfway cases for single-precision floats. + assert_eq!(compute_float32(0, 16777216), (151, 0)); + assert_eq!(compute_float32(0, 16777217), (111 + f32::INVALID_FP, 9223372586610589696)); + assert_eq!(compute_float32(0, 16777218), (151, 1)); + assert_eq!(compute_float32(0, 16777219), (111 + f32::INVALID_FP, 9223373686122217472)); + assert_eq!(compute_float32(0, 16777220), (151, 2)); + + // These are examples of the above tests, with + // digits from the exponent shifted to the mantissa. + assert_eq!(compute_float32(-10, 167772160000000000), (151, 0)); + assert_eq!( + compute_float32(-10, 167772170000000000), + (111 + f32::INVALID_FP, 9223372586610589696) + ); + assert_eq!(compute_float32(-10, 167772180000000000), (151, 1)); + // Let's check the lines to see if anything is different in table... + assert_eq!( + compute_float32(-10, 167772190000000000), + (111 + f32::INVALID_FP, 9223373686122217472) + ); + assert_eq!(compute_float32(-10, 167772200000000000), (151, 2)); +} + +#[test] +fn compute_float_f64_test() { + // These test near-halfway cases for double-precision floats. + assert_eq!(compute_float64(0, 9007199254740992), (1076, 0)); + assert_eq!(compute_float64(0, 9007199254740993), (1065 + f64::INVALID_FP, 9223372036854776832)); + assert_eq!(compute_float64(0, 9007199254740994), (1076, 1)); + assert_eq!(compute_float64(0, 9007199254740995), (1065 + f64::INVALID_FP, 9223372036854778880)); + assert_eq!(compute_float64(0, 9007199254740996), (1076, 2)); + assert_eq!(compute_float64(0, 18014398509481984), (1077, 0)); + assert_eq!( + compute_float64(0, 18014398509481986), + (1066 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!(compute_float64(0, 18014398509481988), (1077, 1)); + assert_eq!( + compute_float64(0, 18014398509481990), + (1066 + f64::INVALID_FP, 9223372036854778880) + ); + assert_eq!(compute_float64(0, 18014398509481992), (1077, 2)); + + // These are examples of the above tests, with + // digits from the exponent shifted to the mantissa. + assert_eq!(compute_float64(-3, 9007199254740992000), (1076, 0)); + assert_eq!( + compute_float64(-3, 9007199254740993000), + (1065 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!(compute_float64(-3, 9007199254740994000), (1076, 1)); + assert_eq!( + compute_float64(-3, 9007199254740995000), + (1065 + f64::INVALID_FP, 9223372036854778879) + ); + assert_eq!(compute_float64(-3, 9007199254740996000), (1076, 2)); +} diff --git a/vendor/minimal-lexical/tests/integration_tests.rs b/vendor/minimal-lexical/tests/integration_tests.rs new file mode 100644 index 00000000000000..a8f2ff8a0ec63b --- /dev/null +++ b/vendor/minimal-lexical/tests/integration_tests.rs @@ -0,0 +1,228 @@ +/// Find and parse sign and get remaining bytes. +#[inline] +fn parse_sign<'a>(bytes: &'a [u8]) -> (bool, &'a [u8]) { + match bytes.get(0) { + Some(&b'+') => (true, &bytes[1..]), + Some(&b'-') => (false, &bytes[1..]), + _ => (true, bytes), + } +} + +// Convert u8 to digit. +#[inline] +fn to_digit(c: u8) -> Option { + (c as char).to_digit(10) +} + +// Add digit from exponent. +#[inline] +fn add_digit_i32(value: i32, digit: u32) -> Option { + return value.checked_mul(10)?.checked_add(digit as i32); +} + +// Subtract digit from exponent. +#[inline] +fn sub_digit_i32(value: i32, digit: u32) -> Option { + return value.checked_mul(10)?.checked_sub(digit as i32); +} + +// Convert character to digit. +#[inline] +fn is_digit(c: u8) -> bool { + to_digit(c).is_some() +} + +// Split buffer at index. +#[inline] +fn split_at_index<'a>(digits: &'a [u8], index: usize) -> (&'a [u8], &'a [u8]) { + (&digits[..index], &digits[index..]) +} + +/// Consume until a an invalid digit is found. +/// +/// - `digits` - Slice containing 0 or more digits. +#[inline] +fn consume_digits<'a>(digits: &'a [u8]) -> (&'a [u8], &'a [u8]) { + // Consume all digits. + let mut index = 0; + while index < digits.len() && is_digit(digits[index]) { + index += 1; + } + split_at_index(digits, index) +} + +// Trim leading 0s. +#[inline] +fn ltrim_zero<'a>(bytes: &'a [u8]) -> &'a [u8] { + let count = bytes.iter().take_while(|&&si| si == b'0').count(); + &bytes[count..] +} + +// Trim trailing 0s. +#[inline] +fn rtrim_zero<'a>(bytes: &'a [u8]) -> &'a [u8] { + let count = bytes.iter().rev().take_while(|&&si| si == b'0').count(); + let index = bytes.len() - count; + &bytes[..index] +} + +// PARSERS +// ------- + +/// Parse the exponent of the float. +/// +/// * `exponent` - Slice containing the exponent digits. +/// * `is_positive` - If the exponent sign is positive. +fn parse_exponent(exponent: &[u8], is_positive: bool) -> i32 { + // Parse the sign bit or current data. + let mut value: i32 = 0; + match is_positive { + true => { + for c in exponent { + value = match add_digit_i32(value, to_digit(*c).unwrap()) { + Some(v) => v, + None => return i32::max_value(), + }; + } + }, + false => { + for c in exponent { + value = match sub_digit_i32(value, to_digit(*c).unwrap()) { + Some(v) => v, + None => return i32::min_value(), + }; + } + }, + } + + value +} + +pub fn case_insensitive_starts_with<'a, 'b, Iter1, Iter2>(mut x: Iter1, mut y: Iter2) -> bool +where + Iter1: Iterator, + Iter2: Iterator, +{ + // We use a faster optimization here for ASCII letters, which NaN + // and infinite strings **must** be. [A-Z] is 0x41-0x5A, while + // [a-z] is 0x61-0x7A. Therefore, the xor must be 0 or 32 if they + // are case-insensitive equal, but only if at least 1 of the inputs + // is an ASCII letter. + loop { + let yi = y.next(); + if yi.is_none() { + return true; + } + let yi = *yi.unwrap(); + let is_not_equal = x.next().map_or(true, |&xi| { + let xor = xi ^ yi; + xor != 0 && xor != 0x20 + }); + if is_not_equal { + return false; + } + } +} + +/// Parse float from input bytes, returning the float and the remaining bytes. +/// +/// * `bytes` - Array of bytes leading with float-data. +pub fn parse_float<'a, F>(bytes: &'a [u8]) -> (F, &'a [u8]) +where + F: minimal_lexical::Float, +{ + let start = bytes; + + // Parse the sign. + let (is_positive, bytes) = parse_sign(bytes); + + // Check NaN, Inf, Infinity + if case_insensitive_starts_with(bytes.iter(), b"NaN".iter()) { + let mut float = F::from_bits(F::EXPONENT_MASK | (F::HIDDEN_BIT_MASK >> 1)); + if !is_positive { + float = -float; + } + return (float, &bytes[3..]); + } else if case_insensitive_starts_with(bytes.iter(), b"Infinity".iter()) { + let mut float = F::from_bits(F::EXPONENT_MASK); + if !is_positive { + float = -float; + } + return (float, &bytes[8..]); + } else if case_insensitive_starts_with(bytes.iter(), b"inf".iter()) { + let mut float = F::from_bits(F::EXPONENT_MASK); + if !is_positive { + float = -float; + } + return (float, &bytes[3..]); + } + + // Extract and parse the float components: + // 1. Integer + // 2. Fraction + // 3. Exponent + let (integer_slc, bytes) = consume_digits(bytes); + let (fraction_slc, bytes) = match bytes.first() { + Some(&b'.') => consume_digits(&bytes[1..]), + _ => (&bytes[..0], bytes), + }; + let (exponent, bytes) = match bytes.first() { + Some(&b'e') | Some(&b'E') => { + // Extract and parse the exponent. + let (is_positive, bytes) = parse_sign(&bytes[1..]); + let (exponent, bytes) = consume_digits(bytes); + (parse_exponent(exponent, is_positive), bytes) + }, + _ => (0, bytes), + }; + + if bytes.len() == start.len() { + return (F::from_u64(0), bytes); + } + + // Note: You may want to check and validate the float data here: + // 1). Many floats require integer or fraction digits, if a fraction + // is present. + // 2). All floats require either integer or fraction digits. + // 3). Some floats do not allow a '+' sign before the significant digits. + // 4). Many floats require exponent digits after the exponent symbol. + // 5). Some floats do not allow a '+' sign before the exponent. + + // We now need to trim leading and trailing 0s from the integer + // and fraction, respectively. This is required to make the + // fast and moderate paths more efficient, and for the slow + // path. + let integer_slc = ltrim_zero(integer_slc); + let fraction_slc = rtrim_zero(fraction_slc); + + // Create the float and return our data. + let mut float: F = + minimal_lexical::parse_float(integer_slc.iter(), fraction_slc.iter(), exponent); + if !is_positive { + float = -float; + } + + (float, bytes) +} + +macro_rules! b { + ($x:literal) => { + $x.as_bytes() + }; +} + +#[test] +fn f32_test() { + assert_eq!( + (184467440000000000000.0, b!("\x00\x00006")), + parse_float::(b"000184467440737095516150\x00\x00006") + ); +} + +#[test] +fn f64_test() { + assert_eq!( + (184467440737095500000.0, b!("\x00\x00006")), + parse_float::(b"000184467440737095516150\x00\x00006") + ); +} diff --git a/vendor/minimal-lexical/tests/lemire_tests.rs b/vendor/minimal-lexical/tests/lemire_tests.rs new file mode 100644 index 00000000000000..0523ca5b2abfc6 --- /dev/null +++ b/vendor/minimal-lexical/tests/lemire_tests.rs @@ -0,0 +1,378 @@ +//! These tests are adapted from the Rust core library's unittests. + +#![cfg(not(feature = "compact"))] + +use minimal_lexical::lemire; +use minimal_lexical::num::Float; + +fn compute_error32(q: i32, w: u64) -> (i32, u64) { + let fp = lemire::compute_error::(q, w); + (fp.exp, fp.mant) +} + +fn compute_error64(q: i32, w: u64) -> (i32, u64) { + let fp = lemire::compute_error::(q, w); + (fp.exp, fp.mant) +} + +fn compute_error_scaled32(q: i32, w: u64, lz: i32) -> (i32, u64) { + let fp = lemire::compute_error_scaled::(q, w, lz); + (fp.exp, fp.mant) +} + +fn compute_error_scaled64(q: i32, w: u64, lz: i32) -> (i32, u64) { + let fp = lemire::compute_error_scaled::(q, w, lz); + (fp.exp, fp.mant) +} + +fn compute_float32(q: i32, w: u64) -> (i32, u64) { + let fp = lemire::compute_float::(q, w); + (fp.exp, fp.mant) +} + +fn compute_float64(q: i32, w: u64) -> (i32, u64) { + let fp = lemire::compute_float::(q, w); + (fp.exp, fp.mant) +} + +#[test] +fn compute_error32_test() { + // These test near-halfway cases for single-precision floats. + assert_eq!(compute_error32(0, 16777216), (111 + f32::INVALID_FP, 9223372036854775808)); + assert_eq!(compute_error32(0, 16777217), (111 + f32::INVALID_FP, 9223372586610589696)); + assert_eq!(compute_error32(0, 16777218), (111 + f32::INVALID_FP, 9223373136366403584)); + assert_eq!(compute_error32(0, 16777219), (111 + f32::INVALID_FP, 9223373686122217472)); + assert_eq!(compute_error32(0, 16777220), (111 + f32::INVALID_FP, 9223374235878031360)); + + // These are examples of the above tests, with + // digits from the exponent shifted to the mantissa. + assert_eq!( + compute_error32(-10, 167772160000000000), + (111 + f32::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error32(-10, 167772170000000000), + (111 + f32::INVALID_FP, 9223372586610589696) + ); + assert_eq!( + compute_error32(-10, 167772180000000000), + (111 + f32::INVALID_FP, 9223373136366403584) + ); + // Let's check the lines to see if anything is different in table... + assert_eq!( + compute_error32(-10, 167772190000000000), + (111 + f32::INVALID_FP, 9223373686122217472) + ); + assert_eq!( + compute_error32(-10, 167772200000000000), + (111 + f32::INVALID_FP, 9223374235878031360) + ); +} + +#[test] +fn compute_error64_test() { + // These test near-halfway cases for double-precision floats. + assert_eq!(compute_error64(0, 9007199254740992), (1065 + f64::INVALID_FP, 9223372036854775808)); + assert_eq!(compute_error64(0, 9007199254740993), (1065 + f64::INVALID_FP, 9223372036854776832)); + assert_eq!(compute_error64(0, 9007199254740994), (1065 + f64::INVALID_FP, 9223372036854777856)); + assert_eq!(compute_error64(0, 9007199254740995), (1065 + f64::INVALID_FP, 9223372036854778880)); + assert_eq!(compute_error64(0, 9007199254740996), (1065 + f64::INVALID_FP, 9223372036854779904)); + assert_eq!( + compute_error64(0, 18014398509481984), + (1066 + f64::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error64(0, 18014398509481986), + (1066 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error64(0, 18014398509481988), + (1066 + f64::INVALID_FP, 9223372036854777856) + ); + assert_eq!( + compute_error64(0, 18014398509481990), + (1066 + f64::INVALID_FP, 9223372036854778880) + ); + assert_eq!( + compute_error64(0, 18014398509481992), + (1066 + f64::INVALID_FP, 9223372036854779904) + ); + + // Test a much closer set of examples. + assert_eq!( + compute_error64(0, 9007199254740991), + (1064 + f64::INVALID_FP, 18446744073709549568) + ); + assert_eq!( + compute_error64(0, 9223372036854776831), + (1075 + f64::INVALID_FP, 9223372036854776830) + ); + assert_eq!( + compute_error64(0, 9223372036854776832), + (1075 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error64(0, 9223372036854776833), + (1075 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error64(-42, 9123456727292927), + (925 + f64::INVALID_FP, 13021432563531497894) + ); + assert_eq!( + compute_error64(-43, 91234567272929275), + (925 + f64::INVALID_FP, 13021432563531498606) + ); + assert_eq!( + compute_error64(-42, 9123456727292928), + (925 + f64::INVALID_FP, 13021432563531499320) + ); + + // These are examples of the above tests, with + // digits from the exponent shifted to the mantissa. + assert_eq!( + compute_error64(-3, 9007199254740992000), + (1065 + f64::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error64(-3, 9007199254740993000), + (1065 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error64(-3, 9007199254740994000), + (1065 + f64::INVALID_FP, 9223372036854777856) + ); + assert_eq!( + compute_error64(-3, 9007199254740995000), + (1065 + f64::INVALID_FP, 9223372036854778880) + ); + assert_eq!( + compute_error64(-3, 9007199254740996000), + (1065 + f64::INVALID_FP, 9223372036854779904) + ); + + // Test from errors in atof. + assert_eq!( + compute_error64(-18, 1000000178813934326), + (1012 + f64::INVALID_FP, 9223373686122217470) + ); + + // Check edge-cases from previous errors. + assert_eq!( + compute_error64(-342, 2470328229206232720), + (-64 + f64::INVALID_FP, 18446744073709551608) + ); +} + +#[test] +fn compute_error_scaled32_test() { + // These are the same examples above, just using pre-computed scaled values. + + // These test near-halfway cases for single-precision floats. + assert_eq!( + compute_error_scaled32(0, 4611686018427387904, 39), + (111 + f32::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error_scaled32(0, 4611686293305294848, 39), + (111 + f32::INVALID_FP, 9223372586610589696) + ); + assert_eq!( + compute_error_scaled32(0, 4611686568183201792, 39), + (111 + f32::INVALID_FP, 9223373136366403584) + ); + assert_eq!( + compute_error_scaled32(0, 4611686843061108736, 39), + (111 + f32::INVALID_FP, 9223373686122217472) + ); + assert_eq!( + compute_error_scaled32(0, 4611687117939015680, 39), + (111 + f32::INVALID_FP, 9223374235878031360) + ); + + assert_eq!( + compute_error_scaled32(-10, 9223372036854775808, 6), + (111 + f32::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error_scaled32(-10, 9223372586610589696, 6), + (111 + f32::INVALID_FP, 9223372586610589696) + ); + assert_eq!( + compute_error_scaled32(-10, 9223373136366403584, 6), + (111 + f32::INVALID_FP, 9223373136366403584) + ); + assert_eq!( + compute_error_scaled32(-10, 9223373686122217472, 6), + (111 + f32::INVALID_FP, 9223373686122217472) + ); + assert_eq!( + compute_error_scaled32(-10, 9223374235878031360, 6), + (111 + f32::INVALID_FP, 9223374235878031360) + ); +} + +#[test] +fn compute_error_scaled64_test() { + // These are the same examples above, just using pre-computed scaled values. + + // These test near-halfway cases for double-precision floats. + assert_eq!( + compute_error_scaled64(0, 4611686018427387904, 10), + (1065 + f64::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427388416, 10), + (1065 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427388928, 10), + (1065 + f64::INVALID_FP, 9223372036854777856) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427389440, 10), + (1065 + f64::INVALID_FP, 9223372036854778880) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427389952, 10), + (1065 + f64::INVALID_FP, 9223372036854779904) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427387904, 9), + (1066 + f64::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427388416, 9), + (1066 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427388928, 9), + (1066 + f64::INVALID_FP, 9223372036854777856) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427389440, 9), + (1066 + f64::INVALID_FP, 9223372036854778880) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427389952, 9), + (1066 + f64::INVALID_FP, 9223372036854779904) + ); + + // Test a much closer set of examples. + assert_eq!( + compute_error_scaled64(0, 9223372036854774784, 11), + (1064 + f64::INVALID_FP, 18446744073709549568) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427388415, 0), + (1075 + f64::INVALID_FP, 9223372036854776830) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427388416, 0), + (1075 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427388416, 0), + (1075 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error_scaled64(-42, 6510716281765748947, 10), + (925 + f64::INVALID_FP, 13021432563531497894) + ); + assert_eq!( + compute_error_scaled64(-43, 6510716281765749303, 7), + (925 + f64::INVALID_FP, 13021432563531498606) + ); + assert_eq!( + compute_error_scaled64(-42, 6510716281765749660, 10), + (925 + f64::INVALID_FP, 13021432563531499320) + ); + + // These are examples of the above tests, with + // digits from the exponent shifted to the mantissa. + assert_eq!( + compute_error_scaled64(-3, 9223372036854775808, 1), + (1065 + f64::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error_scaled64(-3, 9223372036854776832, 1), + (1065 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error_scaled64(-3, 9223372036854777856, 1), + (1065 + f64::INVALID_FP, 9223372036854777856) + ); + assert_eq!( + compute_error_scaled64(-3, 9223372036854778880, 1), + (1065 + f64::INVALID_FP, 9223372036854778880) + ); + assert_eq!( + compute_error_scaled64(-3, 9223372036854779904, 1), + (1065 + f64::INVALID_FP, 9223372036854779904) + ); + + // Test from errors in atof. + assert_eq!( + compute_error_scaled64(-18, 9223373686122217470, 4), + (1012 + f64::INVALID_FP, 9223373686122217470) + ); + + // Check edge-cases from previous errors. + assert_eq!( + compute_error_scaled64(-342, 9223372036854775804, 2), + (-64 + f64::INVALID_FP, 18446744073709551608) + ); +} + +#[test] +fn compute_float_f32_rounding() { + // These test near-halfway cases for single-precision floats. + assert_eq!(compute_float32(0, 16777216), (151, 0)); + assert_eq!(compute_float32(0, 16777217), (151, 0)); + assert_eq!(compute_float32(0, 16777218), (151, 1)); + assert_eq!(compute_float32(0, 16777219), (151, 2)); + assert_eq!(compute_float32(0, 16777220), (151, 2)); + + // These are examples of the above tests, with + // digits from the exponent shifted to the mantissa. + assert_eq!(compute_float32(-10, 167772160000000000), (151, 0)); + assert_eq!(compute_float32(-10, 167772170000000000), (151, 0)); + assert_eq!(compute_float32(-10, 167772180000000000), (151, 1)); + // Let's check the lines to see if anything is different in table... + assert_eq!(compute_float32(-10, 167772190000000000), (151, 2)); + assert_eq!(compute_float32(-10, 167772200000000000), (151, 2)); +} + +#[test] +fn compute_float_f64_rounding() { + // Also need to check halfway cases **inside** that exponent range. + + // These test near-halfway cases for double-precision floats. + assert_eq!(compute_float64(0, 9007199254740992), (1076, 0)); + assert_eq!(compute_float64(0, 9007199254740993), (1076, 0)); + assert_eq!(compute_float64(0, 9007199254740994), (1076, 1)); + assert_eq!(compute_float64(0, 9007199254740995), (1076, 2)); + assert_eq!(compute_float64(0, 9007199254740996), (1076, 2)); + assert_eq!(compute_float64(0, 18014398509481984), (1077, 0)); + assert_eq!(compute_float64(0, 18014398509481986), (1077, 0)); + assert_eq!(compute_float64(0, 18014398509481988), (1077, 1)); + assert_eq!(compute_float64(0, 18014398509481990), (1077, 2)); + assert_eq!(compute_float64(0, 18014398509481992), (1077, 2)); + + // Test a much closer set of examples. + assert_eq!(compute_float64(0, 9007199254740991), (1075, 4503599627370495)); + assert_eq!(compute_float64(0, 9223372036854776831), (1086, 0)); + assert_eq!(compute_float64(0, 9223372036854776832), (1086, 0)); + assert_eq!(compute_float64(0, 9223372036854776833), (1086, 1)); + assert_eq!(compute_float64(-42, 9123456727292927), (936, 1854521741541368)); + assert_eq!(compute_float64(-43, 91234567272929275), (936, 1854521741541369)); + assert_eq!(compute_float64(-42, 9123456727292928), (936, 1854521741541369)); + + // These are examples of the above tests, with + // digits from the exponent shifted to the mantissa. + assert_eq!(compute_float64(-3, 9007199254740992000), (1076, 0)); + assert_eq!(compute_float64(-3, 9007199254740993000), (1076, 0)); + assert_eq!(compute_float64(-3, 9007199254740994000), (1076, 1)); + assert_eq!(compute_float64(-3, 9007199254740995000), (1076, 2)); + assert_eq!(compute_float64(-3, 9007199254740996000), (1076, 2)); +} diff --git a/vendor/minimal-lexical/tests/libm_tests.rs b/vendor/minimal-lexical/tests/libm_tests.rs new file mode 100644 index 00000000000000..7f5352e1938234 --- /dev/null +++ b/vendor/minimal-lexical/tests/libm_tests.rs @@ -0,0 +1,289 @@ +#![cfg(all(not(feature = "std"), feature = "compact"))] + +// These are adapted from libm, a port of musl libc's libm to Rust. +// libm can be found online [here](https://github.com/rust-lang/libm), +// and is similarly licensed under an Apache2.0/MIT license + +use core::f64; +use minimal_lexical::libm; + +#[test] +fn fabsf_sanity_test() { + assert_eq!(libm::fabsf(-1.0), 1.0); + assert_eq!(libm::fabsf(2.8), 2.8); +} + +/// The spec: https://en.cppreference.com/w/cpp/numeric/math/fabs +#[test] +fn fabsf_spec_test() { + assert!(libm::fabsf(f32::NAN).is_nan()); + for f in [0.0, -0.0].iter().copied() { + assert_eq!(libm::fabsf(f), 0.0); + } + for f in [f32::INFINITY, f32::NEG_INFINITY].iter().copied() { + assert_eq!(libm::fabsf(f), f32::INFINITY); + } +} + +#[test] +fn sqrtf_sanity_test() { + assert_eq!(libm::sqrtf(100.0), 10.0); + assert_eq!(libm::sqrtf(4.0), 2.0); +} + +/// The spec: https://en.cppreference.com/w/cpp/numeric/math/sqrt +#[test] +fn sqrtf_spec_test() { + // Not Asserted: FE_INVALID exception is raised if argument is negative. + assert!(libm::sqrtf(-1.0).is_nan()); + assert!(libm::sqrtf(f32::NAN).is_nan()); + for f in [0.0, -0.0, f32::INFINITY].iter().copied() { + assert_eq!(libm::sqrtf(f), f); + } +} + +const POS_ZERO: &[f64] = &[0.0]; +const NEG_ZERO: &[f64] = &[-0.0]; +const POS_ONE: &[f64] = &[1.0]; +const NEG_ONE: &[f64] = &[-1.0]; +const POS_FLOATS: &[f64] = &[99.0 / 70.0, f64::consts::E, f64::consts::PI]; +const NEG_FLOATS: &[f64] = &[-99.0 / 70.0, -f64::consts::E, -f64::consts::PI]; +const POS_SMALL_FLOATS: &[f64] = &[(1.0 / 2.0), f64::MIN_POSITIVE, f64::EPSILON]; +const NEG_SMALL_FLOATS: &[f64] = &[-(1.0 / 2.0), -f64::MIN_POSITIVE, -f64::EPSILON]; +const POS_EVENS: &[f64] = &[2.0, 6.0, 8.0, 10.0, 22.0, 100.0, f64::MAX]; +const NEG_EVENS: &[f64] = &[f64::MIN, -100.0, -22.0, -10.0, -8.0, -6.0, -2.0]; +const POS_ODDS: &[f64] = &[3.0, 7.0]; +const NEG_ODDS: &[f64] = &[-7.0, -3.0]; +const NANS: &[f64] = &[f64::NAN]; +const POS_INF: &[f64] = &[f64::INFINITY]; +const NEG_INF: &[f64] = &[f64::NEG_INFINITY]; + +const ALL: &[&[f64]] = &[ + POS_ZERO, + NEG_ZERO, + NANS, + NEG_SMALL_FLOATS, + POS_SMALL_FLOATS, + NEG_FLOATS, + POS_FLOATS, + NEG_EVENS, + POS_EVENS, + NEG_ODDS, + POS_ODDS, + NEG_INF, + POS_INF, + NEG_ONE, + POS_ONE, +]; +const POS: &[&[f64]] = &[POS_ZERO, POS_ODDS, POS_ONE, POS_FLOATS, POS_EVENS, POS_INF]; +const NEG: &[&[f64]] = &[NEG_ZERO, NEG_ODDS, NEG_ONE, NEG_FLOATS, NEG_EVENS, NEG_INF]; + +fn powd(base: f64, exponent: f64, expected: f64) { + let res = libm::powd(base, exponent); + assert!( + if expected.is_nan() { + res.is_nan() + } else { + libm::powd(base, exponent) == expected + }, + "{} ** {} was {} instead of {}", + base, + exponent, + res, + expected + ); +} + +fn powd_test_sets_as_base(sets: &[&[f64]], exponent: f64, expected: f64) { + sets.iter().for_each(|s| s.iter().for_each(|val| powd(*val, exponent, expected))); +} + +fn powd_test_sets_as_exponent(base: f64, sets: &[&[f64]], expected: f64) { + sets.iter().for_each(|s| s.iter().for_each(|val| powd(base, *val, expected))); +} + +fn powd_test_sets(sets: &[&[f64]], computed: &dyn Fn(f64) -> f64, expected: &dyn Fn(f64) -> f64) { + sets.iter().for_each(|s| { + s.iter().for_each(|val| { + let exp = expected(*val); + let res = computed(*val); + + assert!( + if exp.is_nan() { + res.is_nan() + } else { + exp == res + }, + "test for {} was {} instead of {}", + val, + res, + exp + ); + }) + }); +} + +#[test] +fn powd_zero_as_exponent() { + powd_test_sets_as_base(ALL, 0.0, 1.0); + powd_test_sets_as_base(ALL, -0.0, 1.0); +} + +#[test] +fn powd_one_as_base() { + powd_test_sets_as_exponent(1.0, ALL, 1.0); +} + +#[test] +fn powd_nan_inputs() { + // NAN as the base: + // (NAN ^ anything *but 0* should be NAN) + powd_test_sets_as_exponent(f64::NAN, &ALL[2..], f64::NAN); + + // NAN as the exponent: + // (anything *but 1* ^ NAN should be NAN) + powd_test_sets_as_base(&ALL[..(ALL.len() - 2)], f64::NAN, f64::NAN); +} + +#[test] +fn powd_infinity_as_base() { + // Positive Infinity as the base: + // (+Infinity ^ positive anything but 0 and NAN should be +Infinity) + powd_test_sets_as_exponent(f64::INFINITY, &POS[1..], f64::INFINITY); + + // (+Infinity ^ negative anything except 0 and NAN should be 0.0) + powd_test_sets_as_exponent(f64::INFINITY, &NEG[1..], 0.0); + + // Negative Infinity as the base: + // (-Infinity ^ positive odd ints should be -Infinity) + powd_test_sets_as_exponent(f64::NEG_INFINITY, &[POS_ODDS], f64::NEG_INFINITY); + + // (-Infinity ^ anything but odd ints should be == -0 ^ (-anything)) + // We can lump in pos/neg odd ints here because they don't seem to + // cause panics (div by zero) in release mode (I think). + powd_test_sets(ALL, &|v: f64| libm::powd(f64::NEG_INFINITY, v), &|v: f64| libm::powd(-0.0, -v)); +} + +#[test] +fn infinity_as_exponent() { + // Positive/Negative base greater than 1: + // (pos/neg > 1 ^ Infinity should be Infinity - note this excludes NAN as the base) + powd_test_sets_as_base(&ALL[5..(ALL.len() - 2)], f64::INFINITY, f64::INFINITY); + + // (pos/neg > 1 ^ -Infinity should be 0.0) + powd_test_sets_as_base(&ALL[5..ALL.len() - 2], f64::NEG_INFINITY, 0.0); + + // Positive/Negative base less than 1: + let base_below_one = &[POS_ZERO, NEG_ZERO, NEG_SMALL_FLOATS, POS_SMALL_FLOATS]; + + // (pos/neg < 1 ^ Infinity should be 0.0 - this also excludes NAN as the base) + powd_test_sets_as_base(base_below_one, f64::INFINITY, 0.0); + + // (pos/neg < 1 ^ -Infinity should be Infinity) + powd_test_sets_as_base(base_below_one, f64::NEG_INFINITY, f64::INFINITY); + + // Positive/Negative 1 as the base: + // (pos/neg 1 ^ Infinity should be 1) + powd_test_sets_as_base(&[NEG_ONE, POS_ONE], f64::INFINITY, 1.0); + + // (pos/neg 1 ^ -Infinity should be 1) + powd_test_sets_as_base(&[NEG_ONE, POS_ONE], f64::NEG_INFINITY, 1.0); +} + +#[test] +fn powd_zero_as_base() { + // Positive Zero as the base: + // (+0 ^ anything positive but 0 and NAN should be +0) + powd_test_sets_as_exponent(0.0, &POS[1..], 0.0); + + // (+0 ^ anything negative but 0 and NAN should be Infinity) + // (this should panic because we're dividing by zero) + powd_test_sets_as_exponent(0.0, &NEG[1..], f64::INFINITY); + + // Negative Zero as the base: + // (-0 ^ anything positive but 0, NAN, and odd ints should be +0) + powd_test_sets_as_exponent(-0.0, &POS[3..], 0.0); + + // (-0 ^ anything negative but 0, NAN, and odd ints should be Infinity) + // (should panic because of divide by zero) + powd_test_sets_as_exponent(-0.0, &NEG[3..], f64::INFINITY); + + // (-0 ^ positive odd ints should be -0) + powd_test_sets_as_exponent(-0.0, &[POS_ODDS], -0.0); + + // (-0 ^ negative odd ints should be -Infinity) + // (should panic because of divide by zero) + powd_test_sets_as_exponent(-0.0, &[NEG_ODDS], f64::NEG_INFINITY); +} + +#[test] +fn special_cases() { + // One as the exponent: + // (anything ^ 1 should be anything - i.e. the base) + powd_test_sets(ALL, &|v: f64| libm::powd(v, 1.0), &|v: f64| v); + + // Negative One as the exponent: + // (anything ^ -1 should be 1/anything) + powd_test_sets(ALL, &|v: f64| libm::powd(v, -1.0), &|v: f64| 1.0 / v); + + // Factoring -1 out: + // (negative anything ^ integer should be (-1 ^ integer) * (positive anything ^ integer)) + [POS_ZERO, NEG_ZERO, POS_ONE, NEG_ONE, POS_EVENS, NEG_EVENS].iter().for_each(|int_set| { + int_set.iter().for_each(|int| { + powd_test_sets(ALL, &|v: f64| libm::powd(-v, *int), &|v: f64| { + libm::powd(-1.0, *int) * libm::powd(v, *int) + }); + }) + }); + + // Negative base (imaginary results): + // (-anything except 0 and Infinity ^ non-integer should be NAN) + NEG[1..(NEG.len() - 1)].iter().for_each(|set| { + set.iter().for_each(|val| { + powd_test_sets(&ALL[3..7], &|v: f64| libm::powd(*val, v), &|_| f64::NAN); + }) + }); +} + +#[test] +fn normal_cases() { + assert_eq!(libm::powd(2.0, 20.0), (1 << 20) as f64); + assert_eq!(libm::powd(-1.0, 9.0), -1.0); + assert!(libm::powd(-1.0, 2.2).is_nan()); + assert!(libm::powd(-1.0, -1.14).is_nan()); +} + +#[test] +fn fabsd_sanity_test() { + assert_eq!(libm::fabsd(-1.0), 1.0); + assert_eq!(libm::fabsd(2.8), 2.8); +} + +/// The spec: https://en.cppreference.com/w/cpp/numeric/math/fabs +#[test] +fn fabsd_spec_test() { + assert!(libm::fabsd(f64::NAN).is_nan()); + for f in [0.0, -0.0].iter().copied() { + assert_eq!(libm::fabsd(f), 0.0); + } + for f in [f64::INFINITY, f64::NEG_INFINITY].iter().copied() { + assert_eq!(libm::fabsd(f), f64::INFINITY); + } +} + +#[test] +fn sqrtd_sanity_test() { + assert_eq!(libm::sqrtd(100.0), 10.0); + assert_eq!(libm::sqrtd(4.0), 2.0); +} + +/// The spec: https://en.cppreference.com/w/cpp/numeric/math/sqrt +#[test] +fn sqrtd_spec_test() { + // Not Asserted: FE_INVALID exception is raised if argument is negative. + assert!(libm::sqrtd(-1.0).is_nan()); + assert!(libm::sqrtd(f64::NAN).is_nan()); + for f in [0.0, -0.0, f64::INFINITY].iter().copied() { + assert_eq!(libm::sqrtd(f), f); + } +} diff --git a/vendor/minimal-lexical/tests/mask_tests.rs b/vendor/minimal-lexical/tests/mask_tests.rs new file mode 100644 index 00000000000000..97e70a72b8f232 --- /dev/null +++ b/vendor/minimal-lexical/tests/mask_tests.rs @@ -0,0 +1,16 @@ +use minimal_lexical::mask; + +#[test] +fn lower_n_mask_test() { + assert_eq!(mask::lower_n_mask(2), 0b11); +} + +#[test] +fn lower_n_halfway_test() { + assert_eq!(mask::lower_n_halfway(2), 0b10); +} + +#[test] +fn nth_bit_test() { + assert_eq!(mask::nth_bit(2), 0b100); +} diff --git a/vendor/minimal-lexical/tests/number_tests.rs b/vendor/minimal-lexical/tests/number_tests.rs new file mode 100644 index 00000000000000..947be394c94f19 --- /dev/null +++ b/vendor/minimal-lexical/tests/number_tests.rs @@ -0,0 +1,88 @@ +use minimal_lexical::number::Number; + +#[test] +fn is_fast_path_test() { + let mut number = Number { + exponent: -4, + mantissa: 12345, + many_digits: false, + }; + assert_eq!(number.is_fast_path::(), true); + assert_eq!(number.is_fast_path::(), true); + + number.exponent = -15; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), true); + + number.exponent = -25; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), false); + + number.exponent = 25; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), true); + + number.exponent = 36; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), true); + + number.exponent = 38; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), false); + + number.mantissa = 1 << 25; + number.exponent = 0; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), true); + + number.mantissa = 1 << 54; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), false); + + number.mantissa = 1 << 52; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), true); + + number.many_digits = true; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), false); +} + +#[test] +fn try_fast_path_test() { + let mut number = Number { + exponent: -4, + mantissa: 12345, + many_digits: false, + }; + assert_eq!(number.try_fast_path::(), Some(1.2345)); + assert_eq!(number.try_fast_path::(), Some(1.2345)); + + number.exponent = -10; + assert_eq!(number.try_fast_path::(), Some(1.2345e-6)); + assert_eq!(number.try_fast_path::(), Some(1.2345e-6)); + + number.exponent = -20; + assert_eq!(number.try_fast_path::(), None); + assert_eq!(number.try_fast_path::(), Some(1.2345e-16)); + + number.exponent = -25; + assert_eq!(number.try_fast_path::(), None); + assert_eq!(number.try_fast_path::(), None); + + number.exponent = 12; + assert_eq!(number.try_fast_path::(), Some(1.2345e16)); + assert_eq!(number.try_fast_path::(), Some(1.2345e16)); + + number.exponent = 25; + assert_eq!(number.try_fast_path::(), None); + assert_eq!(number.try_fast_path::(), Some(1.2345e29)); + + number.exponent = 32; + assert_eq!(number.try_fast_path::(), None); + assert_eq!(number.try_fast_path::(), Some(1.2345e36)); + + number.exponent = 36; + assert_eq!(number.try_fast_path::(), None); + assert_eq!(number.try_fast_path::(), None); +} diff --git a/vendor/minimal-lexical/tests/parse_tests.rs b/vendor/minimal-lexical/tests/parse_tests.rs new file mode 100644 index 00000000000000..48856fd1cca365 --- /dev/null +++ b/vendor/minimal-lexical/tests/parse_tests.rs @@ -0,0 +1,189 @@ +use core::f64; +use minimal_lexical::{num, parse}; + +fn check_parse_float(integer: &str, fraction: &str, exponent: i32, expected: F) { + let integer = integer.as_bytes().iter(); + let fraction = fraction.as_bytes().iter(); + assert!(expected == parse::parse_float::(integer, fraction, exponent)); +} + +#[test] +fn parse_f32_test() { + check_parse_float("", "", 0, 0.0_f32); + check_parse_float("1", "2345", 0, 1.2345_f32); + check_parse_float("12", "345", 0, 12.345_f32); + check_parse_float("12345", "6789", 0, 12345.6789_f32); + check_parse_float("1", "2345", 10, 1.2345e10_f32); + check_parse_float("1", "2345", -38, 1.2345e-38_f32); + + // Check expected rounding, using borderline cases. + // Round-down, halfway + check_parse_float("16777216", "", 0, 16777216.0_f32); + check_parse_float("16777217", "", 0, 16777216.0_f32); + check_parse_float("16777218", "", 0, 16777218.0_f32); + check_parse_float("33554432", "", 0, 33554432.0_f32); + check_parse_float("33554434", "", 0, 33554432.0_f32); + check_parse_float("33554436", "", 0, 33554436.0_f32); + check_parse_float("17179869184", "", 0, 17179869184.0_f32); + check_parse_float("17179870208", "", 0, 17179869184.0_f32); + check_parse_float("17179871232", "", 0, 17179871232.0_f32); + + // Round-up, halfway + check_parse_float("16777218", "", 0, 16777218.0_f32); + check_parse_float("16777219", "", 0, 16777220.0_f32); + check_parse_float("16777220", "", 0, 16777220.0_f32); + + check_parse_float("33554436", "", 0, 33554436.0_f32); + check_parse_float("33554438", "", 0, 33554440.0_f32); + check_parse_float("33554440", "", 0, 33554440.0_f32); + + check_parse_float("17179871232", "", 0, 17179871232.0_f32); + check_parse_float("17179872256", "", 0, 17179873280.0_f32); + check_parse_float("17179873280", "", 0, 17179873280.0_f32); + + // Round-up, above halfway + check_parse_float("33554435", "", 0, 33554436.0_f32); + check_parse_float("17179870209", "", 0, 17179871232.0_f32); + + // Check exactly halfway, round-up at halfway + check_parse_float("1", "00000017881393432617187499", 0, 1.0000001_f32); + check_parse_float("1", "000000178813934326171875", 0, 1.0000002_f32); + check_parse_float("1", "00000017881393432617187501", 0, 1.0000002_f32); + + check_parse_float("", "000000000000000000000000000000000000011754943508222875079687365372222456778186655567720875215087517062784172594547271728515625", 0, 1.1754943508222875e-38f32); +} + +#[test] +fn parse_f64_test() { + check_parse_float("", "", 0, 0.0_f64); + check_parse_float("1", "2345", 0, 1.2345_f64); + check_parse_float("12", "345", 0, 12.345_f64); + check_parse_float("12345", "6789", 0, 12345.6789_f64); + check_parse_float("1", "2345", 10, 1.2345e10_f64); + check_parse_float("1", "2345", -308, 1.2345e-308_f64); + + // Check expected rounding, using borderline cases. + // Round-down, halfway + check_parse_float("9007199254740992", "", 0, 9007199254740992.0_f64); + check_parse_float("9007199254740993", "", 0, 9007199254740992.0_f64); + check_parse_float("9007199254740994", "", 0, 9007199254740994.0_f64); + + check_parse_float("18014398509481984", "", 0, 18014398509481984.0_f64); + check_parse_float("18014398509481986", "", 0, 18014398509481984.0_f64); + check_parse_float("18014398509481988", "", 0, 18014398509481988.0_f64); + + check_parse_float("9223372036854775808", "", 0, 9223372036854775808.0_f64); + check_parse_float("9223372036854776832", "", 0, 9223372036854775808.0_f64); + check_parse_float("9223372036854777856", "", 0, 9223372036854777856.0_f64); + + check_parse_float( + "11417981541647679048466287755595961091061972992", + "", + 0, + 11417981541647679048466287755595961091061972992.0_f64, + ); + check_parse_float( + "11417981541647680316116887983825362587765178368", + "", + 0, + 11417981541647679048466287755595961091061972992.0_f64, + ); + check_parse_float( + "11417981541647681583767488212054764084468383744", + "", + 0, + 11417981541647681583767488212054764084468383744.0_f64, + ); + + // Round-up, halfway + check_parse_float("9007199254740994", "", 0, 9007199254740994.0_f64); + check_parse_float("9007199254740995", "", 0, 9007199254740996.0_f64); + check_parse_float("9007199254740996", "", 0, 9007199254740996.0_f64); + + check_parse_float("18014398509481988", "", 0, 18014398509481988.0_f64); + check_parse_float("18014398509481990", "", 0, 18014398509481992.0_f64); + check_parse_float("18014398509481992", "", 0, 18014398509481992.0_f64); + + check_parse_float("9223372036854777856", "", 0, 9223372036854777856.0_f64); + check_parse_float("9223372036854778880", "", 0, 9223372036854779904.0_f64); + check_parse_float("9223372036854779904", "", 0, 9223372036854779904.0_f64); + + check_parse_float( + "11417981541647681583767488212054764084468383744", + "", + 0, + 11417981541647681583767488212054764084468383744.0_f64, + ); + check_parse_float( + "11417981541647682851418088440284165581171589120", + "", + 0, + 11417981541647684119068688668513567077874794496.0_f64, + ); + check_parse_float( + "11417981541647684119068688668513567077874794496", + "", + 0, + 11417981541647684119068688668513567077874794496.0_f64, + ); + + // Round-up, above halfway + check_parse_float("9223372036854776833", "", 0, 9223372036854777856.0_f64); + check_parse_float( + "11417981541647680316116887983825362587765178369", + "", + 0, + 11417981541647681583767488212054764084468383744.0_f64, + ); + + // Rounding error + // Adapted from failures in strtod. + check_parse_float("2", "2250738585072014", -308, 2.2250738585072014e-308_f64); + check_parse_float("2", "2250738585072011360574097967091319759348195463516456480234261097248222220210769455165295239081350879141491589130396211068700864386945946455276572074078206217433799881410632673292535522868813721490129811224514518898490572223072852551331557550159143974763979834118019993239625482890171070818506906306666559949382757725720157630626906633326475653000092458883164330377797918696120494973903778297049050510806099407302629371289589500035837999672072543043602840788957717961509455167482434710307026091446215722898802581825451803257070188608721131280795122334262883686223215037756666225039825343359745688844239002654981983854879482922068947216898310996983658468140228542433306603398508864458040010349339704275671864433837704860378616227717385456230658746790140867233276367187499", -308, 2.225073858507201e-308_f64); + check_parse_float("2", "22507385850720113605740979670913197593481954635164564802342610972482222202107694551652952390813508791414915891303962110687008643869459464552765720740782062174337998814106326732925355228688137214901298112245145188984905722230728525513315575501591439747639798341180199932396254828901710708185069063066665599493827577257201576306269066333264756530000924588831643303777979186961204949739037782970490505108060994073026293712895895000358379996720725430436028407889577179615094551674824347103070260914462157228988025818254518032570701886087211312807951223342628836862232150377566662250398253433597456888442390026549819838548794829220689472168983109969836584681402285424333066033985088644580400103493397042756718644338377048603786162277173854562306587467901408672332763671875", -308, 2.2250738585072014e-308_f64); + check_parse_float("2", "2250738585072011360574097967091319759348195463516456480234261097248222220210769455165295239081350879141491589130396211068700864386945946455276572074078206217433799881410632673292535522868813721490129811224514518898490572223072852551331557550159143974763979834118019993239625482890171070818506906306666559949382757725720157630626906633326475653000092458883164330377797918696120494973903778297049050510806099407302629371289589500035837999672072543043602840788957717961509455167482434710307026091446215722898802581825451803257070188608721131280795122334262883686223215037756666225039825343359745688844239002654981983854879482922068947216898310996983658468140228542433306603398508864458040010349339704275671864433837704860378616227717385456230658746790140867233276367187501", -308, 2.2250738585072014e-308_f64); + check_parse_float("179769313486231580793728971405303415079934132710037826936173778980444968292764750946649017977587207096330286416692887910946555547851940402630657488671505820681908902000708383676273854845817711531764475730270069855571366959622842914819860834936475292719074168444365510704342711559699508093042880177904174497791", "9999999999999999999999999999999999999999999999999999999999999999999999", 0, 1.7976931348623157e+308_f64); + check_parse_float("7", "4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984374999", -324, 5.0e-324_f64); + check_parse_float("7", "4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375", -324, 1.0e-323_f64); + check_parse_float("7", "4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375001", -324, 1.0e-323_f64); + check_parse_float("", "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000024703282292062327208828439643411068618252990130716238221279284125033775363510437593264991818081799618989828234772285886546332835517796989819938739800539093906315035659515570226392290858392449105184435931802849936536152500319370457678249219365623669863658480757001585769269903706311928279558551332927834338409351978015531246597263579574622766465272827220056374006485499977096599470454020828166226237857393450736339007967761930577506740176324673600968951340535537458516661134223766678604162159680461914467291840300530057530849048765391711386591646239524912623653881879636239373280423891018672348497668235089863388587925628302755995657524455507255189313690836254779186948667994968324049705821028513185451396213837722826145437693412532098591327667236328125", 0, 0.0_f64); + + // Rounding error + // Adapted from: + // https://www.exploringbinary.com/how-glibc-strtod-works/ + check_parse_float("", "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000022250738585072008890245868760858598876504231122409594654935248025624400092282356951787758888037591552642309780950434312085877387158357291821993020294379224223559819827501242041788969571311791082261043971979604000454897391938079198936081525613113376149842043271751033627391549782731594143828136275113838604094249464942286316695429105080201815926642134996606517803095075913058719846423906068637102005108723282784678843631944515866135041223479014792369585208321597621066375401613736583044193603714778355306682834535634005074073040135602968046375918583163124224521599262546494300836851861719422417646455137135420132217031370496583210154654068035397417906022589503023501937519773030945763173210852507299305089761582519159720757232455434770912461317493580281734466552734375", 0, 2.2250738585072011e-308_f64); + + // Rounding error + // Adapted from test-parse-random failures. + check_parse_float("1009", "", -31, 1.009e-28_f64); + check_parse_float("18294", "", 304, f64::INFINITY); + + // Rounding error + // Adapted from a @dangrabcad's issue #20. + check_parse_float("7", "689539722041643", 164, 7.689539722041643e164_f64); + check_parse_float("768953972204164300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "", 0, 7.689539722041643e164_f64); + check_parse_float("768953972204164300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 7.689539722041643e164_f64); + + // Check other cases similar to @dangrabcad's issue #20. + check_parse_float("9223372036854776833", "0", 0, 9223372036854777856.0_f64); + check_parse_float( + "11417981541647680316116887983825362587765178369", + "0", + 0, + 11417981541647681583767488212054764084468383744.0_f64, + ); + check_parse_float("9007199254740995", "0", 0, 9007199254740996.0_f64); + check_parse_float("18014398509481990", "0", 0, 18014398509481992.0_f64); + check_parse_float("9223372036854778880", "0", 0, 9223372036854779904.0_f64); + check_parse_float( + "11417981541647682851418088440284165581171589120", + "0", + 0, + 11417981541647684119068688668513567077874794496.0_f64, + ); + + // Check other cases ostensibly identified via proptest. + check_parse_float("71610528364411830000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 71610528364411830000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0_f64); + check_parse_float("126769393745745060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 126769393745745060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0_f64); + check_parse_float("38652960461239320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 38652960461239320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0_f64); +} diff --git a/vendor/minimal-lexical/tests/rounding_tests.rs b/vendor/minimal-lexical/tests/rounding_tests.rs new file mode 100644 index 00000000000000..794d696fb827d7 --- /dev/null +++ b/vendor/minimal-lexical/tests/rounding_tests.rs @@ -0,0 +1,64 @@ +use minimal_lexical::extended_float::ExtendedFloat; +use minimal_lexical::rounding; + +#[test] +fn round_test() { + let mut fp = ExtendedFloat { + mant: 9223372036854776832, + exp: -10, + }; + rounding::round::(&mut fp, |f, s| { + f.mant >>= s; + f.exp += s; + }); + assert_eq!(fp.mant, 0); + assert_eq!(fp.exp, 1); + + let mut fp = ExtendedFloat { + mant: 9223372036854776832, + exp: -10, + }; + rounding::round::(&mut fp, |f, s| { + f.mant >>= s; + f.exp += s; + // Round-up. + f.mant += 1; + }); + assert_eq!(fp.mant, 1); + assert_eq!(fp.exp, 1); + + // Round-down + let mut fp = ExtendedFloat { + mant: 9223372036854776832, + exp: -10, + }; + rounding::round::(&mut fp, |f, s| { + rounding::round_nearest_tie_even(f, s, |is_odd, is_halfway, is_above| { + is_above || (is_odd && is_halfway) + }); + }); + assert_eq!(fp.mant, 0); + assert_eq!(fp.exp, 1); + + // Round up + let mut fp = ExtendedFloat { + mant: 9223372036854778880, + exp: -10, + }; + rounding::round::(&mut fp, |f, s| { + rounding::round_nearest_tie_even(f, s, |is_odd, is_halfway, is_above| { + is_above || (is_odd && is_halfway) + }); + }); + assert_eq!(fp.mant, 2); + assert_eq!(fp.exp, 1); + + // Round down + let mut fp = ExtendedFloat { + mant: 9223372036854778880, + exp: -10, + }; + rounding::round::(&mut fp, rounding::round_down); + assert_eq!(fp.mant, 1); + assert_eq!(fp.exp, 1); +} diff --git a/vendor/minimal-lexical/tests/slow_tests.rs b/vendor/minimal-lexical/tests/slow_tests.rs new file mode 100644 index 00000000000000..2afea69e908a73 --- /dev/null +++ b/vendor/minimal-lexical/tests/slow_tests.rs @@ -0,0 +1,337 @@ +mod stackvec; + +use minimal_lexical::bigint::Bigint; +use minimal_lexical::extended_float::ExtendedFloat; +use minimal_lexical::num::Float; +use minimal_lexical::number::Number; +use minimal_lexical::slow; +use stackvec::vec_from_u32; + +fn b(float: F) -> (u64, i32) { + let fp = slow::b(float); + (fp.mant, fp.exp) +} + +fn bh(float: F) -> (u64, i32) { + let fp = slow::bh(float); + (fp.mant, fp.exp) +} + +#[test] +fn b_test() { + assert_eq!(b(1e-45_f32), (1, -149)); + assert_eq!(b(5e-324_f64), (1, -1074)); + assert_eq!(b(1e-323_f64), (2, -1074)); + assert_eq!(b(2e-323_f64), (4, -1074)); + assert_eq!(b(3e-323_f64), (6, -1074)); + assert_eq!(b(4e-323_f64), (8, -1074)); + assert_eq!(b(5e-323_f64), (10, -1074)); + assert_eq!(b(6e-323_f64), (12, -1074)); + assert_eq!(b(7e-323_f64), (14, -1074)); + assert_eq!(b(8e-323_f64), (16, -1074)); + assert_eq!(b(9e-323_f64), (18, -1074)); + assert_eq!(b(1_f32), (8388608, -23)); + assert_eq!(b(1_f64), (4503599627370496, -52)); + assert_eq!(b(1e38_f32), (9860761, 103)); + assert_eq!(b(1e308_f64), (5010420900022432, 971)); +} + +#[test] +fn bh_test() { + assert_eq!(bh(1e-45_f32), (3, -150)); + assert_eq!(bh(5e-324_f64), (3, -1075)); + assert_eq!(bh(1_f32), (16777217, -24)); + assert_eq!(bh(1_f64), (9007199254740993, -53)); + assert_eq!(bh(1e38_f32), (19721523, 102)); + assert_eq!(bh(1e308_f64), (10020841800044865, 970)); +} + +#[test] +fn slow_test() { + // 5e-324, round-down. + let integer = b"2"; + let fraction = b"4703282292062327208828439643411068618252990130716238221279284125033775363510437593264991818081799618989828234772285886546332835517796989819938739800539093906315035659515570226392290858392449105184435931802849936536152500319370457678249219365623669863658480757001585769269903706311928279558551332927834338409351978015531246597263579574622766465272827220056374006485499977096599470454020828166226237857393450736339007967761930577506740176324673600968951340535537458516661134223766678604162159680461914467291840300530057530849048765391711386591646239524912623653881879636239373280423891018672348497668235089863388587925628302755995657524455507255189313690836254779186948667994968324049705821028513185451396213837722826145437693412532098591327667236328124999"; + let num = Number { + mantissa: 2470328229206232720, + exponent: -342, + many_digits: true, + }; + let fp = ExtendedFloat { + mant: 1 << 63, + exp: -63, + }; + let result = slow::slow::(num.clone(), fp, integer.iter(), fraction.iter()); + assert_eq!(result.mant, 0); + assert_eq!(result.exp, 0); + + // 5e-324, round-up. + let fraction = b"47032822920623272088284396434110686182529901307162382212792841250337753635104375932649918180817996189898282347722858865463328355177969898199387398005390939063150356595155702263922908583924491051844359318028499365361525003193704576782492193656236698636584807570015857692699037063119282795585513329278343384093519780155312465972635795746227664652728272200563740064854999770965994704540208281662262378573934507363390079677619305775067401763246736009689513405355374585166611342237666786041621596804619144672918403005300575308490487653917113865916462395249126236538818796362393732804238910186723484976682350898633885879256283027559956575244555072551893136908362547791869486679949683240497058210285131854513962138377228261454376934125320985913276672363281251"; + let result = slow::slow::(num.clone(), fp, integer.iter(), fraction.iter()); + assert_eq!(result.mant, 1); + assert_eq!(result.exp, 0); + + // 8.98846567431158e+307 + let integer = b"8"; + let fraction = b"9884656743115805365666807213050294962762414131308158973971342756154045415486693752413698006024096935349884403114202125541629105369684531108613657287705365884742938136589844238179474556051429647415148697857438797685859063890851407391008830874765563025951597582513936655578157348020066364210154316532161708032"; + let num = Number { + mantissa: 8988465674311580536, + exponent: 289, + many_digits: true, + }; + let fp = ExtendedFloat { + mant: 9223372036854776832, + exp: 2035, + }; + let result = slow::slow::(num.clone(), fp, integer.iter(), fraction.iter()); + assert_eq!(result.mant, 0); + assert_eq!(result.exp, 2046); + + // 8.988465674311582e+307 + let fraction = b"98846567431158053656668072130502949627624141313081589739713427561540454154866937524136980060240969353498844031142021255416291053696845311086136572877053658847429381365898442381794745560514296474151486978574387976858590638908514073910088308747655630259515975825139366555781573480200663642101543165321617080321"; + let result = slow::slow::(num.clone(), fp, integer.iter(), fraction.iter()); + assert_eq!(result.mant, 1); + assert_eq!(result.exp, 2046); +} + +#[test] +fn positive_digit_comp_test() { + // 8.98846567431158e+307 + let bigmant = Bigint { + data: vec_from_u32(&[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1024, 2147483648, + ]), + }; + let exponent = 307 + 1 - 308; + let result = slow::positive_digit_comp::(bigmant, exponent); + assert_eq!(result.mant, 0); + assert_eq!(result.exp, 2046); + + // 8.988465674311582e+307 + let bigmant = Bigint { + data: vec_from_u32(&[ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1024, 2147483648, + ]), + }; + let exponent = 307 + 1 - 308; + let result = slow::positive_digit_comp::(bigmant, exponent); + assert_eq!(result.mant, 1); + assert_eq!(result.exp, 2046); +} + +#[test] +fn negative_digit_comp_test() { + // 5e-324, below halfway, round-down to 0.0. + let bigmant = Bigint { + data: vec_from_u32(&[ + 1727738439, 330069557, 3509095598, 686205316, 156923684, 750687444, 2688855918, + 28211928, 1887482096, 3222998811, 913348873, 1652282845, 1600735541, 1664240266, + 84454144, 1487769792, 1855966778, 2832488299, 507030148, 1410055467, 2513359584, + 3453963205, 779237894, 3456088326, 3671009895, 3094451696, 1250165638, 2682979794, + 357925323, 1713890438, 3271046672, 3485897285, 3934710962, 1813530592, 199705026, + 976390839, 2805488572, 2194288220, 2094065006, 2592523639, 3798974617, 586957244, + 1409218821, 3442050171, 3789534764, 1380190380, 2055222457, 3535299831, 429482276, + 389342206, 133558576, 721875297, 3013586570, 540178306, 2389746866, 2313334501, + 422440635, 1288499129, 864978311, 842263325, 3016323856, 2282442263, 1440906063, + 3931458696, 3511314276, 1884879882, 946366824, 4260548261, 1073379659, 1732329252, + 3828972211, 1915607049, 3665440937, 1844358779, 3735281178, 2646335050, 1457460927, + 2940016422, 1051, + ]), + }; + let fp = ExtendedFloat { + mant: 1 << 63, + exp: -63, + }; + let exponent = -324 + 1 - 755; + let result = slow::negative_digit_comp::(bigmant, fp, exponent); + assert_eq!(result.mant, 0); + assert_eq!(result.exp, 0); + + // 5e-324, halfway, round-down to 0.0. + let bigmant = Bigint { + data: vec_from_u32(&[ + 2084786877, 507136210, 2666388819, 3110242527, 3178432722, 541916566, 208847286, + 3092404665, 83491860, 2893735989, 3973758097, 2600107496, 147629623, 1754010897, + 4226332273, 2587058081, 942453804, 88731834, 1319061990, 173208747, 1982493283, + 3808794987, 3874839738, 1854586992, 3508364323, 2021729080, 1899625710, 2420749567, + 816401711, 3059730605, 1570934109, 3138812023, 1756281367, 3205859133, 2985201975, + 1014588672, 3799556578, 577719905, 4052248225, 3649019757, 398935965, 56421532, + 976366795, 1876047791, 3147705595, 4025764546, 1097271882, 1910500779, 2397021233, + 1340419138, 2753207595, 3067328524, 2210626776, 1280440432, 3940874757, 4172726578, + 1035509558, 1062145421, 1465448826, 2990139501, 1785427751, 2093931515, 4055890033, + 3388365687, 2245484242, 3609657408, 3527114516, 1013577862, 2389075196, 426934091, + 3237939346, 1071362463, 4070999470, 250952461, 2280067948, 1097862995, 2226250520, + 221983348, 1, + ]), + }; + let exponent = -324 + 1 - 752; + let result = slow::negative_digit_comp::(bigmant, fp, exponent); + assert_eq!(result.mant, 0); + assert_eq!(result.exp, 0); + + // 5e-324, above halfway, round-up to 5e-324. + let bigmant = Bigint { + data: vec_from_u32(&[ + 3667999587, 776394808, 894084415, 1037654204, 1719556155, 1124198371, 2088472861, + 859275578, 834918607, 3167556114, 1082875312, 231271193, 1476296236, 360239786, + 3608617070, 100777043, 834603454, 887318342, 305718012, 1732087473, 2645063646, + 3728211506, 93691724, 1366000745, 723904866, 3037421624, 1816387920, 2732659194, + 3869049819, 532534979, 2824439209, 1323349161, 382944493, 1993820262, 4082215981, + 1555952134, 3635827414, 1482231762, 1867776587, 2130459211, 3989359658, 564215320, + 1173733358, 1580608728, 1412284882, 1602939803, 2382784237, 1925138608, 2495375854, + 519289497, 1762272177, 608514174, 631431287, 4214469733, 754041908, 3072560125, + 1765160997, 2031519620, 1769586374, 4131591237, 674408332, 3759445970, 1904194670, + 3818885807, 980005947, 1736835717, 911406800, 1545844036, 2415915482, 4269340915, + 2314622388, 2123690045, 2055289038, 2509524619, 1325843000, 2388695363, 787668722, + 2219833485, 10, + ]), + }; + let exponent = -324 + 1 - 753; + let result = slow::negative_digit_comp::(bigmant, fp, exponent); + assert_eq!(result.mant, 1); + assert_eq!(result.exp, 0); + + // 1e-323, below halfway, round-down to 5e-324. + let bigmant = Bigint { + data: vec_from_u32(&[ + 888248023, 990208672, 1937352202, 2058615950, 470771052, 2252062332, 3771600458, + 84635785, 1367478992, 1079061842, 2740046621, 661881239, 507239328, 697753503, + 253362433, 168342080, 1272933039, 4202497602, 1521090445, 4230166401, 3245111456, + 1771955024, 2337713684, 1778330386, 2423095095, 693420498, 3750496916, 3753972086, + 1073775970, 846704018, 1223205425, 1867757265, 3214198296, 1145624482, 599115079, + 2929172517, 4121498420, 2287897365, 1987227723, 3482603622, 2806989260, 1760871734, + 4227656463, 1736215921, 2778669702, 4140571142, 1870700075, 2015964902, 1288446830, + 1168026618, 400675728, 2165625891, 450825118, 1620534920, 2874273302, 2645036208, + 1267321906, 3865497387, 2594934933, 2526789975, 459036976, 2552359495, 27750894, + 3204441497, 1944008238, 1359672352, 2839100473, 4191710191, 3220138979, 902020460, + 2896982042, 1451853853, 2406388220, 1238109043, 2615908943, 3644037856, 77415486, + 230114675, 3155, + ]), + }; + let fp = ExtendedFloat { + mant: 1 << 63, + exp: -62, + }; + let exponent = -324 + 1 - 755; + let result = slow::negative_digit_comp::(bigmant, fp, exponent); + assert_eq!(result.mant, 1); + assert_eq!(result.exp, 0); + + // 1e-323, halfway, round-up to 1e-323. + let bigmant = Bigint { + data: vec_from_u32(&[ + 1959393335, 1521408631, 3704199161, 740792990, 945363576, 1625749700, 626541858, + 687279403, 250475582, 91273375, 3331339701, 3505355194, 442888870, 967065395, + 4089062228, 3466206949, 2827361413, 266195502, 3957185970, 519626241, 1652512553, + 2836450370, 3034584624, 1268793682, 1935158378, 1770219946, 1403909835, 2967281406, + 2449205134, 589257223, 417835033, 826501478, 973876807, 1027642808, 365671335, + 3043766018, 2808735142, 1733159717, 3566810083, 2357124681, 1196807897, 169264596, + 2929100385, 1333176077, 853182194, 3487359048, 3291815648, 1436535041, 2896096404, + 4021257415, 3964655489, 612050981, 2336913034, 3841321297, 3232689679, 3928245144, + 3106528676, 3186436263, 101379182, 380483912, 1061315959, 1986827250, 3577735508, + 1575162471, 2441485432, 2239037633, 1991408958, 3040733588, 2872258292, 1280802274, + 1123883446, 3214087391, 3623063818, 752857385, 2545236548, 3293588986, 2383784264, + 665950045, 3, + ]), + }; + let exponent = -324 + 1 - 752; + let result = slow::negative_digit_comp::(bigmant, fp, exponent); + assert_eq!(result.mant, 2); + assert_eq!(result.exp, 0); + + // 1e-323, above halfway, round-up to 1e-323. + let bigmant = Bigint { + data: vec_from_u32(&[ + 2414064167, 2329184426, 2682253245, 3112962612, 863701169, 3372595114, 1970451287, + 2577826735, 2504755821, 912733750, 3248625938, 693813579, 133921412, 1080719359, + 2235916618, 302331131, 2503810362, 2661955026, 917154036, 901295123, 3640223643, + 2594699927, 281075174, 4098002235, 2171714598, 522330280, 1154196466, 3903010287, + 3017214866, 1597604939, 4178350331, 3970047484, 1148833479, 1686493490, 3656713352, + 372889108, 2317547651, 151727992, 1308362466, 2096410338, 3378144383, 1692645962, + 3521200074, 446858888, 4236854647, 513852113, 2853385416, 1480448529, 3191160267, + 1557868492, 991849235, 1825542523, 1894293861, 4053474607, 2262125726, 627745783, + 1000515697, 1799591565, 1013791827, 3804839120, 2023224998, 2688403318, 1417616716, + 2866722830, 2940017843, 915539855, 2734220401, 342564812, 2952779151, 4218088154, + 2648899870, 2076102840, 1870899819, 3233606562, 3977529001, 2871118793, 2363006167, + 2364533159, 31, + ]), + }; + let exponent = -324 + 1 - 753; + let result = slow::negative_digit_comp::(bigmant, fp, exponent); + assert_eq!(result.mant, 2); + assert_eq!(result.exp, 0); +} + +#[test] +fn parse_mantissa_test() { + let max_digits = f64::MAX_DIGITS; + + // Large number of digits. + let integer = b"2"; + let fraction = b"4703282292062327208828439643411068618252990130716238221279284125033775363510437593264991818081799618989828234772285886546332835517796989819938739800539093906315035659515570226392290858392449105184435931802849936536152500319370457678249219365623669863658480757001585769269903706311928279558551332927834338409351978015531246597263579574622766465272827220056374006485499977096599470454020828166226237857393450736339007967761930577506740176324673600968951340535537458516661134223766678604162159680461914467291840300530057530849048765391711386591646239524912623653881879636239373280423891018672348497668235089863388587925628302755995657524455507255189313690836254779186948667994968324049705821028513185451396213837722826145437693412532098591327667236328124999"; + let (bigmant, count) = slow::parse_mantissa(integer.iter(), fraction.iter(), max_digits); + let expected = vec_from_u32(&[ + 1727738439, 330069557, 3509095598, 686205316, 156923684, 750687444, 2688855918, 28211928, + 1887482096, 3222998811, 913348873, 1652282845, 1600735541, 1664240266, 84454144, + 1487769792, 1855966778, 2832488299, 507030148, 1410055467, 2513359584, 3453963205, + 779237894, 3456088326, 3671009895, 3094451696, 1250165638, 2682979794, 357925323, + 1713890438, 3271046672, 3485897285, 3934710962, 1813530592, 199705026, 976390839, + 2805488572, 2194288220, 2094065006, 2592523639, 3798974617, 586957244, 1409218821, + 3442050171, 3789534764, 1380190380, 2055222457, 3535299831, 429482276, 389342206, + 133558576, 721875297, 3013586570, 540178306, 2389746866, 2313334501, 422440635, 1288499129, + 864978311, 842263325, 3016323856, 2282442263, 1440906063, 3931458696, 3511314276, + 1884879882, 946366824, 4260548261, 1073379659, 1732329252, 3828972211, 1915607049, + 3665440937, 1844358779, 3735281178, 2646335050, 1457460927, 2940016422, 1051, + ]); + assert_eq!(&*bigmant.data, &*expected); + assert_eq!(count, 755); + + // Truncation. + let integer = b"7"; + let fraction = b"4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375332669816033062329967789262837"; + let (bigmant, count) = slow::parse_mantissa(integer.iter(), fraction.iter(), max_digits); + let expected = vec_from_u32(&[ + 983641521, 2202462645, 4170685875, 1591772364, 529830014, 803977727, 126733331, 1695971390, + 4089590927, 1532849076, 2705586665, 4046282448, 4076195232, 3230469892, 3059053929, + 79035789, 744229654, 2026438108, 3570486781, 2818088662, 3485839733, 3653138023, + 2857937689, 602717004, 3689362390, 283607819, 1783392475, 2053068939, 1888214698, + 550023429, 296880187, 1046779059, 1285361259, 84614934, 1627922685, 2023868765, 1987523901, + 743493573, 3897769089, 2210613570, 2261081349, 3015057659, 3949711644, 3346092916, + 2433639051, 36411806, 1050442, 269209477, 2649742673, 1494221829, 2763524503, 2514491481, + 2325312415, 1741242814, 2479923579, 1098250122, 2416211509, 3612906464, 403420662, + 3663250314, 1993722098, 365907183, 4270226312, 3962131185, 432952495, 2963635838, + 2996289227, 3200289391, 2753231690, 2780286109, 884373163, 1418533204, 3382415762, + 499541562, 3369625401, 3421327641, 3526770155, 3109983188, 1157439767, 734593155, + ]); + assert_eq!(&*bigmant.data, &*expected); + assert_eq!(count, max_digits + 1); + + // No fraction digits. + let integer = b"74109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375332669816033062329967789262837"; + let fraction = b""; + let (bigmant, count) = slow::parse_mantissa(integer.iter(), fraction.iter(), max_digits); + assert_eq!(&*bigmant.data, &*expected); + assert_eq!(count, max_digits + 1); + + // Multiple of step (check we add our temporary correctly). + let integer = b"7410984687618698162648531893023320585475897039214871466383785237510132609053131277979497545424539885696948470431685765963899850655339096945981621940161728171894510697854671067917687257517734731555330779540854980960845750095811137303474765809687100959097544227100475730780971111893578483867565399878350301522805593404659373979179073872386829939581848166016912201945649993128979841136206248449867871357218035220901702390328579173252022052897402080290685402160661237554998340267130003581248647904138574340187552090159017259254714629617513415977493871857473787096164563890871811984127167305601704549300470526959016576377688490826798697257336652176556794107250876433756084600398490497214911746308553955635418864151316847843631308023759629577398300170898437533266981"; + let fraction = b""; + let (bigmant, count) = slow::parse_mantissa(integer.iter(), fraction.iter(), max_digits); + let expected = vec_from_u32(&[ + 617018405, 396211401, 2130402383, 3812547827, 4263683770, 3918012496, 1787721490, + 2493014694, 435464626, 3720854431, 2928509507, 2677932436, 369049650, 3606588290, + 231237141, 2231172875, 3358152367, 95217925, 2777810007, 1016185079, 596681915, 2331711780, + 593487272, 4212730845, 339602972, 4097829793, 262427536, 4182115035, 3414687403, + 3711518952, 4168896929, 483727327, 1657080031, 2785588628, 1009114769, 482126749, + 485376744, 1123705337, 3225501941, 2939050108, 1338451005, 2104263947, 3425461126, + 1834224928, 4061025704, 792093815, 2707019125, 3610271203, 4254101529, 1026215278, + 4117890107, 1748110416, 2535111606, 80965120, 3823822115, 2354910057, 590658512, + 2682089507, 159300272, 1776569442, 3382166479, 3222978591, 540586210, 934713382, + 2014123057, 1455555790, 4119131465, 3685912982, 3019947291, 3437891678, 2660105801, + 2605860762, 394373515, 4177081532, 1616198650, 1580399082, 2017617452, 3327697130, + 315505357, + ]); + assert_eq!(&*bigmant.data, &*expected); + assert_eq!(count, 760); +} diff --git a/vendor/minimal-lexical/tests/stackvec.rs b/vendor/minimal-lexical/tests/stackvec.rs new file mode 100644 index 00000000000000..d5587f23f8a6aa --- /dev/null +++ b/vendor/minimal-lexical/tests/stackvec.rs @@ -0,0 +1,32 @@ +use minimal_lexical::bigint; +#[cfg(feature = "alloc")] +pub use minimal_lexical::heapvec::HeapVec as VecType; +#[cfg(not(feature = "alloc"))] +pub use minimal_lexical::stackvec::StackVec as VecType; + +pub fn vec_from_u32(x: &[u32]) -> VecType { + let mut vec = VecType::new(); + #[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] + { + for &xi in x { + vec.try_push(xi as bigint::Limb).unwrap(); + } + } + + #[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] + { + for xi in x.chunks(2) { + match xi.len() { + 1 => vec.try_push(xi[0] as bigint::Limb).unwrap(), + 2 => { + let xi0 = xi[0] as bigint::Limb; + let xi1 = xi[1] as bigint::Limb; + vec.try_push((xi1 << 32) | xi0).unwrap() + }, + _ => unreachable!(), + } + } + } + + vec +} diff --git a/vendor/minimal-lexical/tests/vec_tests.rs b/vendor/minimal-lexical/tests/vec_tests.rs new file mode 100644 index 00000000000000..3a5f5886ad6b6e --- /dev/null +++ b/vendor/minimal-lexical/tests/vec_tests.rs @@ -0,0 +1,395 @@ +mod stackvec; + +use core::cmp; +use minimal_lexical::bigint; +use stackvec::{vec_from_u32, VecType}; + +// u64::MAX and Limb::MAX for older Rustc versions. +const U64_MAX: u64 = 0xffff_ffff_ffff_ffff; +// LIMB_MAX +#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] +const LIMB_MAX: u64 = U64_MAX; +#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] +const LIMB_MAX: u32 = 0xffff_ffff; + +#[test] +fn simple_test() { + // Test the simple properties of the stack vector. + let mut x = VecType::from_u64(1); + assert_eq!(x.len(), 1); + assert_eq!(x.is_empty(), false); + assert_eq!(x.capacity(), bigint::BIGINT_LIMBS); + x.try_push(5).unwrap(); + assert_eq!(x.len(), 2); + assert_eq!(x.pop(), Some(5)); + assert_eq!(x.len(), 1); + assert_eq!(&*x, &[1]); + x.try_extend(&[2, 3, 4]).unwrap(); + assert_eq!(x.len(), 4); + assert_eq!(&*x, &[1, 2, 3, 4]); + x.try_resize(6, 0).unwrap(); + assert_eq!(x.len(), 6); + assert_eq!(&*x, &[1, 2, 3, 4, 0, 0]); + x.try_resize(0, 0).unwrap(); + assert_eq!(x.len(), 0); + assert_eq!(x.is_empty(), true); + + let x = VecType::try_from(&[5, 1]).unwrap(); + assert_eq!(x.len(), 2); + assert_eq!(x.is_empty(), false); + if bigint::LIMB_BITS == 32 { + assert_eq!(x.hi64(), (0x8000000280000000, false)); + } else { + assert_eq!(x.hi64(), (0x8000000000000002, true)); + } + let rview = bigint::rview(&x); + assert_eq!(x[0], 5); + assert_eq!(x[1], 1); + assert_eq!(rview[0], 1); + assert_eq!(rview[1], 5); + assert_eq!(x.len(), 2); + + assert_eq!(VecType::from_u64(U64_MAX).hi64(), (U64_MAX, false)); +} + +#[test] +fn hi64_test() { + assert_eq!(VecType::from_u64(0xA).hi64(), (0xA000000000000000, false)); + assert_eq!(VecType::from_u64(0xAB).hi64(), (0xAB00000000000000, false)); + assert_eq!(VecType::from_u64(0xAB00000000).hi64(), (0xAB00000000000000, false)); + assert_eq!(VecType::from_u64(0xA23456789A).hi64(), (0xA23456789A000000, false)); +} + +#[test] +fn cmp_test() { + // Simple + let x = VecType::from_u64(1); + let y = VecType::from_u64(2); + assert_eq!(x.partial_cmp(&x), Some(cmp::Ordering::Equal)); + assert_eq!(x.cmp(&x), cmp::Ordering::Equal); + assert_eq!(x.cmp(&y), cmp::Ordering::Less); + + // Check asymmetric + let x = VecType::try_from(&[5, 1]).unwrap(); + let y = VecType::from_u64(2); + assert_eq!(x.cmp(&x), cmp::Ordering::Equal); + assert_eq!(x.cmp(&y), cmp::Ordering::Greater); + + // Check when we use reverse ordering properly. + let x = VecType::try_from(&[5, 1, 9]).unwrap(); + let y = VecType::try_from(&[6, 2, 8]).unwrap(); + assert_eq!(x.cmp(&x), cmp::Ordering::Equal); + assert_eq!(x.cmp(&y), cmp::Ordering::Greater); + + // Complex scenario, check it properly uses reverse ordering. + let x = VecType::try_from(&[0, 1, 9]).unwrap(); + let y = VecType::try_from(&[4294967295, 0, 9]).unwrap(); + assert_eq!(x.cmp(&x), cmp::Ordering::Equal); + assert_eq!(x.cmp(&y), cmp::Ordering::Greater); +} + +#[test] +fn math_test() { + let mut x = VecType::try_from(&[0, 1, 9]).unwrap(); + assert_eq!(x.is_normalized(), true); + x.try_push(0).unwrap(); + assert_eq!(&*x, &[0, 1, 9, 0]); + assert_eq!(x.is_normalized(), false); + x.normalize(); + assert_eq!(&*x, &[0, 1, 9]); + assert_eq!(x.is_normalized(), true); + + x.add_small(1); + assert_eq!(&*x, &[1, 1, 9]); + x.add_small(LIMB_MAX); + assert_eq!(&*x, &[0, 2, 9]); + + x.mul_small(3); + assert_eq!(&*x, &[0, 6, 27]); + x.mul_small(LIMB_MAX); + let expected: VecType = if bigint::LIMB_BITS == 32 { + vec_from_u32(&[0, 4294967290, 4294967274, 26]) + } else { + vec_from_u32(&[0, 0, 4294967290, 4294967295, 4294967274, 4294967295, 26]) + }; + assert_eq!(&*x, &*expected); + + let mut x = VecType::from_u64(0xFFFFFFFF); + let y = VecType::from_u64(5); + x *= &y; + let expected: VecType = vec_from_u32(&[0xFFFFFFFB, 0x4]); + assert_eq!(&*x, &*expected); + + // Test with carry + let mut x = VecType::from_u64(1); + assert_eq!(&*x, &[1]); + x.add_small(LIMB_MAX); + assert_eq!(&*x, &[0, 1]); +} + +#[test] +fn scalar_add_test() { + assert_eq!(bigint::scalar_add(5, 5), (10, false)); + assert_eq!(bigint::scalar_add(LIMB_MAX, 1), (0, true)); +} + +#[test] +fn scalar_mul_test() { + assert_eq!(bigint::scalar_mul(5, 5, 0), (25, 0)); + assert_eq!(bigint::scalar_mul(5, 5, 1), (26, 0)); + assert_eq!(bigint::scalar_mul(LIMB_MAX, 2, 0), (LIMB_MAX - 1, 1)); +} + +#[test] +fn small_add_test() { + let mut x = VecType::from_u64(4294967295); + bigint::small_add(&mut x, 5); + let expected: VecType = vec_from_u32(&[4, 1]); + assert_eq!(&*x, &*expected); + + let mut x = VecType::from_u64(5); + bigint::small_add(&mut x, 7); + let expected = VecType::from_u64(12); + assert_eq!(&*x, &*expected); + + // Single carry, internal overflow + let mut x = VecType::from_u64(0x80000000FFFFFFFF); + bigint::small_add(&mut x, 7); + let expected: VecType = vec_from_u32(&[6, 0x80000001]); + assert_eq!(&*x, &*expected); + + // Double carry, overflow + let mut x = VecType::from_u64(0xFFFFFFFFFFFFFFFF); + bigint::small_add(&mut x, 7); + let expected: VecType = vec_from_u32(&[6, 0, 1]); + assert_eq!(&*x, &*expected); +} + +#[test] +fn small_mul_test() { + // No overflow check, 1-int. + let mut x = VecType::from_u64(5); + bigint::small_mul(&mut x, 7); + let expected = VecType::from_u64(35); + assert_eq!(&*x, &*expected); + + // No overflow check, 2-ints. + let mut x = VecType::from_u64(0x4000000040000); + bigint::small_mul(&mut x, 5); + let expected: VecType = vec_from_u32(&[0x00140000, 0x140000]); + assert_eq!(&*x, &*expected); + + // Overflow, 1 carry. + let mut x = VecType::from_u64(0x33333334); + bigint::small_mul(&mut x, 5); + let expected: VecType = vec_from_u32(&[4, 1]); + assert_eq!(&*x, &*expected); + + // Overflow, 1 carry, internal. + let mut x = VecType::from_u64(0x133333334); + bigint::small_mul(&mut x, 5); + let expected: VecType = vec_from_u32(&[4, 6]); + assert_eq!(&*x, &*expected); + + // Overflow, 2 carries. + let mut x = VecType::from_u64(0x3333333333333334); + bigint::small_mul(&mut x, 5); + let expected: VecType = vec_from_u32(&[4, 0, 1]); + assert_eq!(&*x, &*expected); +} + +#[test] +fn pow_test() { + let mut x = VecType::from_u64(1); + bigint::pow(&mut x, 2); + let expected = VecType::from_u64(25); + assert_eq!(&*x, &*expected); + + let mut x = VecType::from_u64(1); + bigint::pow(&mut x, 15); + let expected: VecType = vec_from_u32(&[452807053, 7]); + assert_eq!(&*x, &*expected); + + let mut x = VecType::from_u64(1); + bigint::pow(&mut x, 16); + let expected: VecType = vec_from_u32(&[2264035265, 35]); + assert_eq!(&*x, &*expected); + + let mut x = VecType::from_u64(1); + bigint::pow(&mut x, 17); + let expected: VecType = vec_from_u32(&[2730241733, 177]); + assert_eq!(&*x, &*expected); + + let mut x = VecType::from_u64(1); + bigint::pow(&mut x, 302); + let expected: VecType = vec_from_u32(&[ + 2443090281, 2149694430, 2297493928, 1584384001, 1279504719, 1930002239, 3312868939, + 3735173465, 3523274756, 2025818732, 1641675015, 2431239749, 4292780461, 3719612855, + 4174476133, 3296847770, 2677357556, 638848153, 2198928114, 3285049351, 2159526706, + 626302612, + ]); + assert_eq!(&*x, &*expected); +} + +#[test] +fn large_add_test() { + // Overflow, both single values + let mut x = VecType::from_u64(4294967295); + let y = VecType::from_u64(5); + bigint::large_add(&mut x, &y); + let expected: VecType = vec_from_u32(&[4, 1]); + assert_eq!(&*x, &*expected); + + // No overflow, single value + let mut x = VecType::from_u64(5); + let y = VecType::from_u64(7); + bigint::large_add(&mut x, &y); + let expected = VecType::from_u64(12); + assert_eq!(&*x, &*expected); + + // Single carry, internal overflow + let mut x = VecType::from_u64(0x80000000FFFFFFFF); + let y = VecType::from_u64(7); + bigint::large_add(&mut x, &y); + let expected: VecType = vec_from_u32(&[6, 0x80000001]); + assert_eq!(&*x, &*expected); + + // 1st overflows, 2nd doesn't. + let mut x = VecType::from_u64(0x7FFFFFFFFFFFFFFF); + let y = VecType::from_u64(0x7FFFFFFFFFFFFFFF); + bigint::large_add(&mut x, &y); + let expected: VecType = vec_from_u32(&[0xFFFFFFFE, 0xFFFFFFFF]); + assert_eq!(&*x, &*expected); + + // Both overflow. + let mut x = VecType::from_u64(0x8FFFFFFFFFFFFFFF); + let y = VecType::from_u64(0x7FFFFFFFFFFFFFFF); + bigint::large_add(&mut x, &y); + let expected: VecType = vec_from_u32(&[0xFFFFFFFE, 0x0FFFFFFF, 1]); + assert_eq!(&*x, &*expected); +} + +#[test] +fn large_mul_test() { + // Test by empty + let mut x = VecType::from_u64(0xFFFFFFFF); + let y = VecType::new(); + bigint::large_mul(&mut x, &y); + let expected = VecType::new(); + assert_eq!(&*x, &*expected); + + // Simple case + let mut x = VecType::from_u64(0xFFFFFFFF); + let y = VecType::from_u64(5); + bigint::large_mul(&mut x, &y); + let expected: VecType = vec_from_u32(&[0xFFFFFFFB, 0x4]); + assert_eq!(&*x, &*expected); + + // Large u32, but still just as easy. + let mut x = VecType::from_u64(0xFFFFFFFF); + let y = VecType::from_u64(0xFFFFFFFE); + bigint::large_mul(&mut x, &y); + let expected: VecType = vec_from_u32(&[0x2, 0xFFFFFFFD]); + assert_eq!(&*x, &*expected); + + // Let's multiply two large values together. + let mut x: VecType = vec_from_u32(&[0xFFFFFFFE, 0x0FFFFFFF, 1]); + let y: VecType = vec_from_u32(&[0x99999999, 0x99999999, 0xCCCD9999, 0xCCCC]); + bigint::large_mul(&mut x, &y); + let expected: VecType = + vec_from_u32(&[0xCCCCCCCE, 0x5CCCCCCC, 0x9997FFFF, 0x33319999, 0x999A7333, 0xD999]); + assert_eq!(&*x, &*expected); +} + +#[test] +fn very_large_mul_test() { + // Test cases triggered to that would normally use `karatsuba_mul`. + // Karatsuba multiplication was ripped out, however, these are useful + // test cases. + let mut x: VecType = vec_from_u32(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + let y: VecType = vec_from_u32(&[4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]); + bigint::large_mul(&mut x, &y); + let expected: VecType = vec_from_u32(&[ + 4, 13, 28, 50, 80, 119, 168, 228, 300, 385, 484, 598, 728, 875, 1040, 1224, 1340, 1435, + 1508, 1558, 1584, 1585, 1560, 1508, 1428, 1319, 1180, 1010, 808, 573, 304, + ]); + assert_eq!(&*x, &*expected); + + // Test cases triggered to that would normally use `karatsuba_uneven_mul`. + let mut x: VecType = vec_from_u32(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + let y: VecType = vec_from_u32(&[ + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + ]); + bigint::large_mul(&mut x, &y); + let expected: VecType = vec_from_u32(&[ + 4, 13, 28, 50, 80, 119, 168, 228, 300, 385, 484, 598, 728, 875, 1040, 1224, 1360, 1496, + 1632, 1768, 1904, 2040, 2176, 2312, 2448, 2584, 2720, 2856, 2992, 3128, 3264, 3400, 3536, + 3672, 3770, 3829, 3848, 3826, 3762, 3655, 3504, 3308, 3066, 2777, 2440, 2054, 1618, 1131, + 592, + ]); + assert_eq!(&*x, &*expected); +} + +#[test] +fn bit_length_test() { + let x: VecType = vec_from_u32(&[0, 0, 0, 1]); + assert_eq!(bigint::bit_length(&x), 97); + + let x: VecType = vec_from_u32(&[0, 0, 0, 3]); + assert_eq!(bigint::bit_length(&x), 98); + + let x = VecType::from_u64(1 << 31); + assert_eq!(bigint::bit_length(&x), 32); +} + +#[test] +fn shl_bits_test() { + let mut x = VecType::from_u64(0xD2210408); + bigint::shl_bits(&mut x, 5); + let expected: VecType = vec_from_u32(&[0x44208100, 0x1A]); + assert_eq!(&*x, &*expected); +} + +#[test] +fn shl_limbs_test() { + let mut x = VecType::from_u64(0xD2210408); + bigint::shl_limbs(&mut x, 2); + let expected: VecType = if bigint::LIMB_BITS == 32 { + vec_from_u32(&[0, 0, 0xD2210408]) + } else { + vec_from_u32(&[0, 0, 0, 0, 0xD2210408]) + }; + assert_eq!(&*x, &*expected); +} + +#[test] +fn shl_test() { + // Pattern generated via `''.join(["1" +"0"*i for i in range(20)])` + let mut x = VecType::from_u64(0xD2210408); + bigint::shl(&mut x, 5); + let expected: VecType = vec_from_u32(&[0x44208100, 0x1A]); + assert_eq!(&*x, &*expected); + + bigint::shl(&mut x, 32); + let expected: VecType = vec_from_u32(&[0, 0x44208100, 0x1A]); + assert_eq!(&*x, &*expected); + + bigint::shl(&mut x, 27); + let expected: VecType = vec_from_u32(&[0, 0, 0xD2210408]); + assert_eq!(&*x, &*expected); + + // 96-bits of previous pattern + let mut x: VecType = vec_from_u32(&[0x20020010, 0x8040100, 0xD2210408]); + bigint::shl(&mut x, 5); + let expected: VecType = vec_from_u32(&[0x400200, 0x802004, 0x44208101, 0x1A]); + assert_eq!(&*x, &*expected); + + bigint::shl(&mut x, 32); + let expected: VecType = vec_from_u32(&[0, 0x400200, 0x802004, 0x44208101, 0x1A]); + assert_eq!(&*x, &*expected); + + bigint::shl(&mut x, 27); + let expected: VecType = vec_from_u32(&[0, 0, 0x20020010, 0x8040100, 0xD2210408]); + assert_eq!(&*x, &*expected); +} diff --git a/vendor/nom/.cargo-checksum.json b/vendor/nom/.cargo-checksum.json new file mode 100644 index 00000000000000..89ee5e722e5fd9 --- /dev/null +++ b/vendor/nom/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo_vcs_info.json":"375660cfef21494aaa1a90768e0054f2c9e6293e2085a28ea3ad0b70efd5bad6","CHANGELOG.md":"fd79f8ab367d50fd57136744fa2e0a198c279c9707c2a45f00ad1b62d0a637ab","Cargo.lock":"3b95f09a31e1fc281ef890f9623ac84f0a58bc3454a96b652d2f0682963f29b5","Cargo.toml":"9d07fd5cc339d8f7c0c164b2568164b808dee1d441b17631cb7b7d44d39ed778","Cargo.toml.orig":"c9cb8a6ee98d4c60dbdd5f9789aed2b8296a74be3f3d5eac265e032008ad34db","LICENSE":"4dbda04344456f09a7a588140455413a9ac59b6b26a1ef7cdf9c800c012d87f0","README.md":"3cbdcb07f9ef189ad617f40423a17f1e48ee5aba3f118f261886c139d64d26ae","doc/nom_recipes.md":"a903a6d8f9e5c935f2a4cbd632f67bc46e41934d1cc8517da9b9e7f3840c9955","src/bits/complete.rs":"640bdcad311a05d94e4b3a1a8b2105c540f80864edb75de0a570f03d4055e5ed","src/bits/mod.rs":"1c6aa26887d379384c1c823684e6f8e91b59df72664eefd8ddf0b6ca5517e669","src/bits/streaming.rs":"304cc5d565cfa30075827c1d6cb173b7cb92b101d8ebe9bc50476a00c92fd5dc","src/branch/mod.rs":"dbe1ed1bb0230310adf8e8d15e6afcf8826c7a111f8429e13fe3e9ebd3fbeae0","src/branch/tests.rs":"9a4a7b0c38fc28881d904f8ad6757a23e543f47a4c2d6fd6a9589eeb97209088","src/bytes/complete.rs":"666fa037c63292b1616cbc04c5b414a53c705d0d2ccd8d84399bbe78f573b7e9","src/bytes/mod.rs":"055e264f71a9fa2245be652cc747cfb2c3e34c3c2ba3b75e9611be51fcebea0b","src/bytes/streaming.rs":"e716e6555fbde14bfc2d7358a3edc2191df0522bc55b1f7735f9809ceb235829","src/bytes/tests.rs":"f0d9eb90d72873346e47e5592d30093eb38cbbb5fbf2e769cda776ccfff4f887","src/character/complete.rs":"7eeb5f00baab7aeaf7f392e5872d141d352951a146c0396721dab71e29b4c87b","src/character/mod.rs":"2fc6a3b19b766a3c37328d62eedbc0c9cb9612aa1d38ececd5cc579b61725fa2","src/character/streaming.rs":"de67ec5663475bc5ffa16f12d121ce9181353b16656b90216704197fca3010fc","src/character/tests.rs":"38958a709f96f077f93a72b32d8ded0a2ad6e488d9aadbe3cf1cfd8adaec06c8","src/combinator/mod.rs":"f7b9c35734f10a4b46d2e2ae874628d48fa1fe0bfc9f44325a89a14b3cfaea02","src/combinator/tests.rs":"1e56e2c1263d93bfbd244d24160a0bea41731e5158d57382e69c215427770b94","src/error.rs":"9d9bf87e76b47cfd9170f8ae50b6deeb02ff1c296aac3eb4f71ee1474dc0fba5","src/internal.rs":"5e670e0f5955af13da4b9a98d444fc721d38835b4637fe8170871fefef4e04cb","src/lib.rs":"9e05f2447ef1e5e9418953300c97d297f26f8f33c0528733a7681d8cb458346e","src/macros.rs":"11ac3dee346e5bf61015f328975cf6ea6f438683eb3b8b6c7c8e6383b0c5b7af","src/multi/mod.rs":"6093bd5909ddae76309442eba3a33716d279d219e5933a0dedef977312d6c5f8","src/multi/tests.rs":"806f89f5f347978c22e9b8cc7f8a49ad1d1fe23bff5974725b643a2ceffe8cb0","src/number/complete.rs":"a4f312c200710a446986142d19ebc78727265cf2c3b07b70dd84339051040bdd","src/number/mod.rs":"ba6eb439ee0befcc0872be7ce43b4836622af45c3dc2fc003b0d909ee42d7b20","src/number/streaming.rs":"1c2137235f093857e545069d687f50712ea457fac03961f3f6ac15c0f7e40c2a","src/sequence/mod.rs":"2dff114a950965e321cafdb215a441990f35d28756b12b2891179f348268fca2","src/sequence/tests.rs":"8dc4ca519b274d4e0694b373b2710c2e947e6074d43ec6a738a74113a09379f5","src/str.rs":"f26aa11f43e8a4a300ea0f310d599fab3f809102cfb29033ddf84f299ee8010c","src/traits.rs":"01934f8a61fc3cc5a03438a10751d3b074c89e5f3bcc96df8e43cf6b09be2308","tests/arithmetic.rs":"725efba4fc6cc811f542f3bcc8c7afd52702a66f64319d4f2796225e2e75d0ca","tests/arithmetic_ast.rs":"c7c28c988640405dd250c86045bbda75fc6ead2a769fb05eafbfbe74d97e0485","tests/css.rs":"36a2198e42e601efc611ebd6b3c6861f3ccb6a63525829ae6a2603bcdc4c2b11","tests/custom_errors.rs":"354d5a82a4f5a24b97901a3b411b4eab038c4d034047971956b9cdc12538e50d","tests/escaped.rs":"c25987ea6d9a7dde74d58a49c332d223da3a495569cb79e3fe921bce51729ead","tests/float.rs":"cdac92fb14afb75cba9d6b8f568e272a630b2cfb9f096b76c91909a3cd016869","tests/fnmut.rs":"dc9b6140eb3405d1497b05675fc4d3050785771a2afa81990d684b2edd0c9746","tests/ini.rs":"f0ce38b90057e9e0fd2329819395c420cbf1400457f9c4279414301faa38b19c","tests/ini_str.rs":"4c8f6ce3a2a245e8365837b873c25d2d8f24887313b791e2edd09a76a2d98947","tests/issues.rs":"1322ffc270ba1bedf39b295eb622ead2715ab7d60db0181af5305a0429c7819e","tests/json.rs":"8672fca70b889d6243a2f0f4c99389e22200e4363f253e83a3f26620b92f765e","tests/mp4.rs":"db6568ee9ccad70a7b567295831b961b369011f66dc2dd406851208007588600","tests/multiline.rs":"aef9768beaf5042b8629599b2094712646abb23eb11fa662b5a9bf3dfa432547","tests/overflow.rs":"a249ebeebfc5228faf9bfd5241a54a8181df476c4699ef87bb7d8a2161b9fc72","tests/reborrow_fold.rs":"66230bacd8d36e1559f1dc919ae8eab3515963c4aef85a079ec56218c9a6e676"},"package":"d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"} \ No newline at end of file diff --git a/vendor/nom/.cargo_vcs_info.json b/vendor/nom/.cargo_vcs_info.json new file mode 100644 index 00000000000000..c356c323b4ecf5 --- /dev/null +++ b/vendor/nom/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "869f8972a4383b13cf89574fda28cb7dbfd56517" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/vendor/nom/CHANGELOG.md b/vendor/nom/CHANGELOG.md new file mode 100644 index 00000000000000..a8f4c02c27dac7 --- /dev/null +++ b/vendor/nom/CHANGELOG.md @@ -0,0 +1,1551 @@ +# Change Log + +## [Unreleased][unreleased] + +### Thanks + +### Changed + +## 7.1.3 - 2023-01-15 + +### Thanks + +- @Shadow53 + +### Fixed + +- panic in `many` and `count` combinators when the output type is zero sized + +## 7.1.2 - 2023-01-01 + +### Thanks + +- @joubs +- @Fyko +- @LoganDark +- @darnuria +- @jkugelman +- @barower +- @puzzlewolf +- @epage +- @cky +- @wolthom +- @w1ll-i-code + +### Changed + +- documentation fixes +- tests fixes +- limit the initial capacity of the result vector of `many_m_n` to 64kiB +- bits parser now accept `Parser` implementors instead of only functions + +### Added + +- implement `Tuple` parsing for the unit type as a special case +- implement `ErrorConvert` on the unit type to make it usable as error type for bits parsers +- bool parser for bits input + +## 7.1.1 - 2022-03-14 + +### Thanks + +- @ThomasdenH +- @@SphinxKnight +- @irevoire +- @doehyunbaek +- @pxeger +- @punkeel +- @max-sixty +- @Xiretza +- @5c077m4n +- @erihsu +- @TheNeikos +- @LoganDark +- @nickelc +- @chotchki +- @ctrlcctrlv + + +### Changed + +- documentation fixes +- more examples + +## 7.1.0 - 2021-11-04 + +### Thanks + +- @nickelc +- @Stargateur +- @NilsIrl +- @clonejo +- @Strytyp +- @schubart +- @jihchi +- @nipunn1313 +- @Gungy2 +- @Drumato +- @Alexhuszagh +- @Aehmlo +- @homersimpsons +- @dne +- @epage +- @saiintbrisson +- @pymongo + +### Changed + +- documentation fixes +- Ci fixes +- the move to minimal-lexical for float parsing introduced bugs that cannot be resolved right now, so this version moves back to using the standard lib' parser. *This is a performance regression**. If you have specific requirements around float parsing, you are strongly encouraged to use [recognize_float](https://docs.rs/nom/latest/nom/number/complete/fn.recognize_float.html) and another library to convert to a f32 or f64 + +### Added + +- alt now works with 1 elment tuples + +## 7.0.0 - 2021-08-21 + +This release fixes dependency compilation issues and strengthen the minimum supported Rust version (MSRV) policy. This is also the first release without the macros that were used since nom's beginning. + +### Thanks + +- @djc +- @homersimpsons +- @lo48576 +- @myrrlyn +- @RalXYZ +- @nickelc +- @cenodis + +### Added + +- `take_until1` combinator +- more `to_owned` implementations +- `fail`: a parser that always fail, useful as default condition in other combinators +- text to number parsers: in the `character::streaming` and `character::complete` modules, there are parsers named `i8, u16, u32, u64, u128` and `u8 ,u16, u32, u64, u128` that recognize decimal digits and directly convert to a number in the target size (checking for max int size) + +### Removed + +- now that function combinators are the main way to write parsers, the old macro combinators are confusing newcomers. THey have been removed +- the `BitSlice` input type from bitvec has been moved into the [nom-bitvec](https://crates.io/crates/nom-bitvec) crate. nom does not depend on bitvec now +- regex parsers have been moved into the [nom-regex](https://crates.io/crates/nom-regex) crate. nom does not depend on regex now +- `ErrorKind::PArseTo` was not needed anymore + +### Changed + +- relax trait bounds +- some performance fixes +- `split_at_position*` functions should now be guaranteed panic free +- the `lexical-core` crate used for float parsing has now been replaced with `minimal-lexical`: the new crate is faster to compile, faster to parse, and has no dependencies + +### Fixed + +- infinite loop in `escaped` combinator +- `many_m_n` now fails if min > max + + +## 6.2.1 - 2021-06-23 + +### Thanks + +This release was done thanks to the hard work of (by order of appearance in the commit list): + +- @homersimpsons + +### Fixed + +- fix documentation building + +## 6.2.0 - 2021-02-15 + +### Thanks + +This release was done thanks to the hard work of (by order of appearance in the commit list): + +- @DavidKorczynski +- @homersimpsons +- @kornelski +- @lf- +- @lewisbelcher +- @ronan-d +- @weirane +- @heymind +- @marcianx +- @Nukesor + +### Added + +- nom is now regularly fuzzed through the OSSFuzz project + +### Changed + +- lots of documentation fixes +- relax trait bounds +- workarounds for dependency issues with bitvec and memchr + +## 6.1.2 - 2021-02-15 + +### Changed + +- Fix cargo feature usage in previous release + +## 6.1.1 - 2021-02-15 + +### Thanks + +This release was done thanks to the hard work of (by order of appearance in the commit list): + +- @nickelc + +### Changed + +- Fix dependenciy incompatibilities: Restrict the bitvec->funty dependency to <=1.1 + +## 6.1.0 - 2021-01-23 + +### Thanks + +This release was done thanks to the hard work of (by order of appearance in the commit list): + +- @sachaarbonel +- @vallentin +- @Lucretiel +- @meiomorphism +- @jufajardini +- @neithernut +- @drwilco + +### Changed + +- readme and documentation fixes +- rewrite of fold_many_m_n +- relax trait bounds on some parsers +- implement `std::error::Error` on `VerboseError` + + +## 6.0.1 - 2020-11-24 + +### Thanks + +This release was done thanks to the hard work of (by order of appearance in the commit list): + +- @Leonqn +- @nickelc +- @toshokan +- @juchiast +- @shssoichiro +- @jlkiri +- @chifflier +- @fkloiber +- @Kaoet +- @Matthew Plant + +### Added + +- `ErrorConvert` implementation for `VerboseError` + +### Changed + +- CI fixes +- `fold_many*` now accept `FnMut` for the accumulation function +- relaxed input bounds on `length_count` + +# Fixed + +- documentation fixes +- the `#[deprecated]` attribute was removed from traits because it does not compile anymore on nightly +- bits and bytes combinators from the bits modules are now converted to use `FnMut` + +## 6.0.0 - 2020-10-31 + +### Thanks + +This release was done thanks to the hard work of (by order of appearance in the commit list): +- @chifflier +- @shepmaster +- @amerelo +- @razican +- @Palladinium +- @0ndorio +- Sebastian Zivota +- @keruspe +- @devonhollowood +- @parasyte +- @nnt0 +- @AntoineCezar +- @GuillaumeGomez +- @eijebong +- @stadelmanma +- @sphynx +- @snawaz +- @fosskers +- @JamesHarrison +- @calebsander +- @jthornber +- @ahmedcharles +- @rljacobson +- @benkay86 +- @georgeclaghorn +- @TianyiShi2001 +- @shnewto +- @alfriadox +- @resistor +- @myrrlyn +- @chipsenkbeil +- @ruza-net +- @fanf2 +- @jameysharp +- @FallenWarrior2k +- @jmg-duarte +- @ericseppanen +- @hbina +- Andreas Molzer +- @nickelc +- @bgourlie + +## Notable changes + +This release is a more polished version of nom 5, that came with a focus on +function parsers, by relaxing the requirements: combinators will return a +`impl FnMut` instead of `impl Fn`, allowing closures that change their context, +and parsers can be any type now, as long as they implement the new `Parser` trait. +That parser trait also comes with a few helper methods. + +Error management was often a pain point, so a lot of work went into making it easier. +Now it integrates with `std:error::Error`, the `IResult::finish()` method allows you +to convert to a more usable type, the `into` combinator can convert the error type +if there's a `From` implementation, and there are more specific error traits like +`ContextError` for the `context` combinator, and `FromExternalError` for `map_res`. +While the `VerboseError` type and its `convert_error` function saw some changes, +not many features ill be added to it, instead you are encouraged to build the error +type that corresponds to your needs if you are building a language parser. + +This version also integrates with the excellent [bitvec](https://crates.io/crates/bitvec) +crate for better bit level parsing. This part of nom was not great and a bit of a hack, +so this will give better options for those parsers. + +At last, documentation! There are now more code examples, functions and macros that require +specific cargo features are now clearly indicated, and there's a new `recipes` module +containing example patterns. + +### Breaking changes + +- the minimal Rust version is now 1.44 (1.37 if building without the `alloc` or `std` features) +- streaming parsers return the number of additional bytes they need, not the total. This was supposed to be the case everywhere, but some parsers were forgotten +- removed the `regexp_macros` cargo feature +- the `context` combinator is not linked to `ParseError` anymore, instead it come with its own `ContextError` trait +- `Needed::Size` now contains a `NonZeroUsize`, so we can reduce the structure's size by 8 bytes. When upgrading, `Needed::Size(number)` can be replaced with `Needed::new(number)` +- there is now a more general `Parser` trait, so parsers can be something else than a function. This trait also comes with combinator methods like `map`, `flat_map`, `or`. Since it is implemented on `Fn*` traits, it should not affect existing code too much +- combinators that returned a `impl Fn` now return a `impl FnMut` to allow parser closures that capture some mutable value from the context +- `separated_list` is now `separated_list0` +- removed the deprecated `methods` module +- removed the deprecated `whitespace` module +- the default error type is now a struct (`nom::error::Error`) instead of a tuple +- the `FromExternalError` allows wrapping the error returned by the function in the `map_res` combinator +- renamed the `dbg!` macro to avoid conflicts with `std::dbg!` +- `separated_list` now allows empty elements + + +### Added + +- function version of regex parsers +- `fill`: attempts to fill the output slice passed as argument +- `success`: returns a value without consuming the input +- `satisfy`: checks a predicate over the next character +- `eof` function combinator +- `consumed`: returns the produced value and the consumed input +- `length_count` function combinator +- `into`: converts a parser's output and error values if `From` implementations are available +- `IResult::finish()`: converts a parser's result to `Result<(I, O), E>` by removing the distinction between `Error` and `Failure` and panicking on `Incomplete` +- non macro versions of `u16`, `i32`, etc, with configurable endianness +- `is_newline` function +- `std::error::Error` implementation for nom's error types +- recipes section of the documentation, outlining common patterns in nom +- custom errors example +- bitstream parsing with the `BitSlice` type from the bitvec crate +- native endianness parsers +- github actions for CI + +### Changed + +- allows lexical-core 0.7 +- number parsers are now generic over the input type +- stabilized the `alloc` feature +- `convert_error` accepts a type that derefs to `&str` +- the JSON example now follows the spec better + +### Fixed +- use `fold_many0c` in the `fold_many0` macro + +## 5.1.1 - 2020-02-24 + +### Thanks + +- @Alexhuszagh for float fixes +- @AlexanderEkdahl, @JoshOrndorff, @akitsu-sanae for docs fixes +- @ignatenkobrain: dependency update +- @derekdreery: `map` implementation for errors +- @Lucretiel for docs fixes and compilation fixes +- adytzu2007: warning fixes +- @lo48576: error management fixes + +### Fixed + +- C symbols compilation errors due to old lexical-core version + +### Added + +- `Err` now has a `map` function + +### Changed + +- Make `error::context()` available without `alloc` feature + +## 5.1.0 - 2020-01-07 + +### Thanks + +- @Hywan, @nickmooney, @jplatte, @ngortheone, @ejmg, @SirWindfield, @demurgos, @spazm, @nyarly, @guedou, @adamnemecek, for docs fixes +- @Alxandr for error management bugfixes +- @Lucretiel for example fixes and optimizations +- @adytzu2007 for optimizations +- @audunhalland for utf8 fixes + +### Fixed + +- panic in `convert_error` +- `compile_error` macro usage + +### Added + +- `std::error::Error`, `std::fmt::Display`, `Eq`, `ToOwned` implementations for errors +- inline attribute for `ToUsize` + +### Changed + +- `convert_error` optimization +- `alt` optimization + +## 5.0.1 - 2019-08-22 + +### Thanks + +- @waywardmonkeys, @phaazon, @dalance for docs fixes +- @kali for `many0_m_n` fixes +- @ia0 for macros fixes + +### Fixed + +- `many0_m_n` now supports the n=1 case +- relaxed trait requirements in `cut` +- `peek!` macro reimplementation +- type inference in `value!` + +## 5.0.0 - 2019-06-24 + +This version comes with a complete rewrite of nom internals to use functions as a base +for parsers, instead of macros. Macros have been updated to use functions under +the hood, so that most existing parsers will work directly or require minimal changes. + +The `CompleteByteSlice` and `CompleteStr` input types were removed. To get different +behaviour related to streaming or complete input, there are different versions of some +parsers in different submodules, like `nom::character::streaming::alpha0` and +`nom::character::complete::alpha0`. + +The `verbose-errors` feature is gone, now the error type is decided through a generic +bound. To get equivalent behaviour to `verbose-errors`, check out `nom::error::VerboseError` + +### Thanks + +- @lowenheim helped in refactoring and error management +- @Keruspe helped in refactoring and fixing tests +- @pingiun, @Songbird0, @jeremystucki, @BeatButton, @NamsooCho, @Waelwindows, @rbtcollins, @MarkMcCaskey for a lot of help in rewriting the documentation and adding code examples +- @GuillaumeGomez for documentation rewriting and checking +- @iosmanthus for bug fixes +- @lo48576 for error management fixes +- @vaffeine for macros visibility fixes +- @webholik and @Havvy for `escaped` and `escaped_transform` fixes +- @proman21 for help on porting bits parsers + +### Added + +- the `VerboseError` type accumulates position info and error codes, and can generate a trace with span information +- the `lexical-core` crate is now used by default (through the `lexical` compilation feature) to parse floats from text +- documentation and code examples for all functions and macros + +### Changed + +- nom now uses functions instead of macros to generate parsers +- macros now use the functions under the hood +- the minimal Rust version is now 1.31 +- the verify combinator's condition function now takes its argument by reference +- `cond` will now return the error of the parser instead of None +- `alpha*`, `digit*`, `hex_digit*`, `alphanumeric*` now recognize only ASCII characters + +### Removed + +- deprecated string parsers (with the `_s` suffix), the normal version can be used instead +- `verbose-errors` is not needed anymore, now the error type can be decided when writing the parsers, and parsers provided by nom are generic over the error type +- `AtEof`, `CompleteByteSlice` and `CompleteStr` are gone, instead some parsers are specialized to work on streaming or complete input, and provided in different modules +- character parsers that were aliases to their `*1` version: eol, alpha, digit, hex_digit, oct_digit, alphanumeric, space, multispace +- `count_fixed` macro +- `whitespace::sp` can be replaced by `character::complete::multispace0` +- method combinators are now in the nom-methods crate +- `take_until_either`, `take_until_either1`, `take_until_either_and_consume` and `take_until_either_and_consume1`: they can be replaced with `is_not` (possibly combined with something else) +- `take_until_and_consume`, `take_until_and_consume1`: they can be replaced with `take_until` combined with `take` +- `sized_buffer` and `length_bytes!`: they can be replaced with the `length_data` function +- `non_empty`, `begin` and `rest_s` function +- `cond_reduce!`, `cond_with_error!`, `closure!`, `apply`, `map_res_err!`, `expr_opt!`, `expr_res!` +- `alt_complete`, `separated_list_complete`, `separated_nonempty_list_complete` + +## 4.2.3 - 2019-03-23 + +### Fixed + +- add missing `build.rs` file to the package +- fix code comparison links in changelog + +## 4.2.2 - 2019-03-04 + +### Fixed + +- regression in do_parse macro import for edition 2018 + +## 4.2.1 - 2019-02-27 + +### Fixed + +- macro expansion error in `do_parse` due to `compile_error` macro usage + +## 4.2.0 - 2019-01-29 + +### Thanks + +- @JoshMcguigan for unit test fixes +- @oza for documentation fixes +- @wackywendell for better error conversion +- @Zebradil for documentation fixes +- @tsraom for new combinators +- @hcpl for minimum Rust version tests +- @KellerFuchs for removing some unsafe uses in float parsing + +### Changed + +- macro import in edition 2018 code should work without importing internal macros now +- the regex parsers do not require the calling code to have imported the regex crate anymore +- error conversions are more ergonomic +- method combinators are now deprecated. They might be moved to a separate crate +- nom now specifies Rust 1.24.1 as minimum version. This was already the case before, now it is made explicit + +### Added + +- `many0_count` and `many1_count` to count applications of a parser instead of +accumulating its results in a `Vec` + +### Fixed + +- overflow in the byte wrapper for bit level parsers +- `f64` parsing does not use `transmute` anymore + +## 4.1.1 - 2018-10-14 + +### Fixed + +- compilation issue in verbose-errors mode for `add_return_error` + +## 4.1.0 - 2018-10-06 + +### Thanks + +- @xfix for fixing warnings, simplifying examples and performance fixes +- @dvberkel for documentation fixes +- @chifflier for fixing warnings +- @myrrlyn for dead code elimination +- @petrochenkov for removing redundant test macros +- @tbelaire for documentation fixes +- @khernyo for fixing warnings +- @linkmauve for documentation fixes +- @ProgVal for documentation fixes, warning fixes and error management +- @Nemo157 for compilation fixes +- @RReverser for documentation fixes +- @xpayn for fixing warnings +- Blas Rodriguez Irizar for documentation fixes +- @badboy for documentation fixes +- @kyrias for compilation fixes +- @kurnevsky for the `rest_len` parser +- @hjr3 for new documentation examples +- @fengalin for error management +- @ithinuel for the pcap example project +- @phaazon for documentation fixes +- @juchiast for documentation fixes +- @jrakow for the `u128` and `i128` parsers +- @smarnach for documentation fixes +- @derekdreery for `pub(crate)` support +- @YaLTeR for `map_res_err!` + +### Added + +- `rest_len` parser, returns the length of the remaining input +- `parse_to` has its own error code now +- `u128` and `i128` parsers in big and little endian modes +- support for `pub(crate)` syntax +- `map_res_err!` combinator that appends the error of its argument function in verbose errors mode + +### Fixed + +- lots of unused imports warnings were removed +- the `bytes` combinator was not compiling in some cases +- the big and little endian combinators now work without external imports +- CI is now faster and uses less cache +- in `add_return_error`, the provided error code is now evaluated only once + +### Changed + +- `fold_many1` will now transmit a `Failure` instead of transforming it to an `Error` +- `float` and `double` now work on all of nom's input types (`&[u8]`, `&str`, `CompleteByteSlice`, `CompleteStr` and any type that implements the required traits). `float_s` and `double_s` got the same modification, but are now deprecated +- `CompleteByteSlice` and `CompleteStr` get a small optimization by inlining some functions + + +## 4.0.0 - 2018-05-14 + +### Thanks + +- @jsgf for the new `AtEof` trait +- @tmccombs for fixes on `escaped*` combinators +- @s3bk for fixes around non Copy input types and documentation help +- @kamarkiewicz for fixes to no_std and CI +- @bheisler for documentation and examples +- @target-san for simplifying the `InputIter` trait for `&[u8]` +- @willmurphyscode for documentation and examples +- @Chaitanya1416 for typo fixes +- @fflorent for `input_len()` usage fixes +- @dbrgn for typo fixes +- @iBelieve for no_std fixes +- @kpp for warning fixes and clippy fixes +- @keruspe for fixes on FindToken +- @dtrebbien for fixes on take_until_and_consume1 +- @Henning-K for typo fixes +- @vthriller for documentation fixes +- @federicomenaquintero and @veprbl for their help fixing the float parsers +- @vmchale for new named_args versions +- @hywan for documentation fixes +- @fbenkstein for typo fixes +- @CAD97 for catching missing trait implementations +- @goldenlentils for &str optimizations +- @passy for typo fixes +- @ayrat555 for typo fixes +- @GuillaumeGomez for documentation fixes +- @jrakow for documentation fixes and fixes for `switch!` +- @phlosioneer for documentation fixes +- @creativcoder for typo fixes +- @derekdreery for typo fixes +- @lucasem for implementing `Deref` on `CompleteStr` and `CompleteByteSlice` +- @lowenheim for `parse_to!` fixes +- @myrrlyn for trait fixes around `CompleteStr` and `CompleteByteSlice` +- @NotBad4U for fixing code coverage analysis +- @murarth for code formatting +- @glandium for fixing build in no_std +- @csharad for regex compatibility with `CompleteStr` +- @FauxFaux for implementing `AsRef` on `CompleteStr` +- @jaje for implementing `std::Error` on `nom:Err` +- @fengalin for warning fixes +- @@khernyo for doc formatting + +Special thanks to @corkami for the logo :) + +### Breaking changes + +- the `IResult` type now becomes a `Result` from the standard library +- `Incomplete` now returns the additional data size needed, not the total data size needed +- verbose-errors is now a superset of basic errors +- all the errors now include the related input slice +- the arguments from `error_position` and other such macros were swapped to be more consistent with the rest of nom +- automatic error conversion: to fix error type inference issues, a custom error type must now implement `std::convert::From` +- the `not!` combinator returns unit `()` +- FindToken's calling convention was swapped +- the `take_*` combinators are now more coherent and stricter, see commit 484f6724ea3ccb for more information +- `many0` and other related parsers will now return `Incomplete` if the reach the end of input without an error of the child parser. They will also return `Incomplete` on an empty input +- the `sep!` combinator for whitespace only consumes whitespace in the prefix, while the `ws!` combinator takes care of consuming the remaining whitespace + +### Added + +- the `AtEof` trait for input type: indicate if we can get more input data later (related to streaming parsers and `Incomplete` handling) +- the `escaped*` parsers now support the `&str`input type +- the `Failure` error variant represents an unrecoverable error, for which `alt` and other combinators will not try other branches. This error means we got in the right part of the code (like, a prefix was checked correctly), but there was an error in the following parts +- the `CompleteByteSlice` and `CompleteStr` input types consider there will be no more refill of the input. They fixed the `Incomplete` related issues when we have all of the data +- the `exact!()` combinator will fail if we did not consume the whole input +- the `take_while_m_n!` combinator will match a specified number of characters +- `ErrorKind::TakeUntilAndConsume1` +- the `recognize_float` parser will match a float number's characters, but will not transform to a `f32` or `f64` +- `alpha` and other basic parsers are now much stricter about partial inputs. We also introduce the `*0` and `*1` versions of those parsers +- `named_args` can now specify the input type as well +- `HexDisplay` is now implemented for `&str` +- `alloc` feature +- the `InputTakeAtposition` trait allows specialized implementations of parsers like `take_while!` + +### Removed + +- the producers and consumers were removed +- the `error_code` and `error_node` macros are not used anymore + +### Fixed + +- `anychar!` now works correctly with multibyte characters +- `take_until_and_consume1!` no longer results in "no method named \`find_substring\`" and "no method named \`slice\`" compilation errors +- `take_until_and_consume1!` returns the correct Incomplete(Needed) amount +- `no_std` compiles properly, and nom can work with `alloc` too +- `parse_to!` now consumes its input + +### Changed + +- `alt` and other combinators will now clone the input if necessary. If the input is already `Copy` there is no performance impact +- the `rest` parser now works on various input types +- `InputIter::Item` for `&[u8]` is now a `u8` directly, not a reference +- we now use the `compile_error` macro to return a compile time error if there was a syntax issue +- the permutation combinator now supports optional child parsers +- the float numbers parsers have been refactored to use one common implementation that is nearly 2 times faster than the previous one +- the float number parsers now accept more variants + + +## 3.2.1 - 2017-10-27 + +### Thanks + +- @ordian for `alt_complete` fixes +- @friedm for documentation fixes +- @kali for improving error management + +### Fixed + +- there were cases where `alt_complete` could return `Incomplete` + +### Added + +- an `into_error_kind` method can be used to transform any error to a common value. This helps when the library is included multiple times as dependency with different feature sets + + +## 3.2.0 - 2017-07-24 + +### Thanks + +- @jedireza for documentation fixes +- @gmorenz for the `bytes` combinator +- @meh for character combinator fixes for UTF-8 +- @jethrogb for avoiding move issues in `separated_list` + +### Changed + +- new layout for the main page of documentation +- `anychar` can now work on any input type +- `length_bytes` is now an alias for `length_data` + +### Fixed + +- `one_of`, `none_of` and `char` will now index correctly UTF-8 characters +- the `compiler_error` macro is now correctly exported + + +### Added + +- the `bytes` combinator transforms a bit stream back to a byte slice for child parsers + +## 3.1.0 - 2017-06-16 + +### Thanks + +- @sdroege: implementing be_i24 and le_i24 +- @Hywan: integrating faster substring search using memchr +- @nizox: fixing type issues in bit stream parsing +- @grissiom: documentation fixes +- @doomrobo: implementing separated_list_complete and separated_nonempty_list_complete +- @CWood1: fixing memchr integration in no_std +- @lu_zero: integrating the compiler_error crate +- @dtolnay: helping debug a type inference issue in map + +### Changed + +- memchr is used for substring search if possible +- if building on nightly, some common syntax errors will display a specific error message. If building no stable, display the documentation to activate those messages +- `count` no longer preallocates its vector + +### Fixed + +- better type inference in alt_complete +- `alt` should now work with whitespace parsing +- `map` should not make type inference errors anymore + +### Added + +- be_i24 and le_i24, parsing big endian and little endian signed 24 bit integers +- `separated_list_complete` and `separated_nonempty_list_complete` will treat incomplete from sub parsers as error + +## 3.0.0 - 2017-05-12 + +### Thanks + +- Chris Pick for some `Incomplete` related refactors +- @dbrgn for documentation fixes +- @valarauca for adding `be_u24` +- @ithinuel for usability fixes +- @evuez for README readability fixes and improvements to `IResult` +- @s3bk for allowing non-`Copy` types as input +- @keruspe for documentation fixes +- @0xd34d10cc for trait fixes on `InputIter` +- @sdleffler for lifetime shenanigans on `named_args` +- @chengsun for type inference fixes in `alt` +- @iBelieve for adding str to no_std +- @Hywan for simplifying code in input traits +- @azerupi for extensive documentation of `alt` and `alt_complete` + +### Breaking Changes + +- `escaped`, `separated_list` and `separated_nonempty_list` can now return `Incomplete` when necessary +- `InputIter` does not require `AsChar` on its `Item` type anymore +- the `core` feature that was putting nom in `no_std` mode has been removed. There is now a `std` feature, activated by default. If it is not activated, nom is in `no_std` +- in `verbose-errors` mode, the error list is now stored in a `Vec` instead of a box based linked list +- `chain!` has finally been removed + +### Changed + +- `Endianness` now implements `Debug`, `PartialEq`, `Eq`, `Clone` and `Copy` +- custom input types can now be cloned if they're not `Copy` +- the infamous 'Cannot infer type for E' error should happen less often now +- `str` is now available in `no_std` mode + +### Fixed + +- `FileProducer` will be marked as `Eof` on full buffer +- `named_args!` now has lifetimes that cannot conflict with the lifetimes from other arguments + +### Added + +- `be_u24`: big endian 24 bit unsigned integer parsing +- `IResult` now has a `unwrap_or` method + + +## 2.2.1 - 2017-04-03 + +### Thanks + +- @Victor-Savu for formatting fixes in the README +- @chifflier for detecting and fixing integer overflows +- @utkarshkukreti for some performance improvements in benchmarks + +### Changed + +- when calculating how much data is needed in `IResult::Incomplete`, the addition could overflow (it is stored as a usize). This would apparently not result in any security vulnerability on release code + +## 2.2.0 - 2017-03-20 + +### Thanks + +- @seppo0010 for fixing `named_args` +- @keruspe for implementing or() on `IResult`, adding the option of default cases in `switch!`, adding support for `cargo-travis` +- @timlyo for documentation fixes +- @JayKickliter for extending `hex_u32` +- @1011X for fixing regex integration +- @Kerollmops for actually marking `chain!` as deprecated +- @joliss for documentation fixes +- @utkarshkukreti for tests refactoring and performance improvement +- @tmccombs for documentation fixes + +### Added + +- `IResult` gets an `or()` method +- `take_until1`, `take_until_and_consume1`, `take_till1!` and `take_till1_s!` require at least 1 character + +### Changed + +- `hex_u32` accepts uppercase digits as well +- the character based combinators leverage the input traits +- the whitespace parsers now work on &str and other types +- `take_while1` returns `Incomplete` on empty input +- `switch!` can now take a default case + +### Fixed + +- `named_args!` now imports `IResult` directly +- the upgrade to regex 0.2 broke the regex combinators, they work now + +## 2.1.0 - 2017-01-27 + +### Thanks + +- @nickbabcock for documentation fixes +- @derekdreery for documentation fixes +- @DirkyJerky for documentation fixes +- @saschagrunert for documentation fixes +- @lucab for documentation fixes +- @hyone for documentation fixes +- @tstorch for factoring `Slice` +- @shepmaster for adding crate categories +- @antoyo for adding `named_args!` + +### Added + +- `verify!` uses a first parser, then applies a function to check that its result satisfies some conditions +- `named_args!` creates a parser function that can accept other arguments along with the input +- `parse_to!` will use the `parse` method from `FromStr` to parse a value. It will automatically translate the input to a string if necessary +- `float`, `float_s`, `double`, `double_s` can recognize floating point numbers in text + +### Changed + +- `escaped!` will now return `Incomplete` if needed +- `permutation!` supports up to 20 child parsers + +## 2.0.1 - 2016-12-10 + +Bugfix release + +*Warning*: there is a small breaking change, `add_error!` is renamed to `add_return_error!`. This was planned for the 2.0 release but was forgotten. This is a small change in a feature that not many people use, for a release that is not yet widely in use, so there will be no 3.0 release for that change. + +### Thanks + +- @nickbabcock for catching and fixing the `add_error!` mixup +- @lucab for documentation fixes +- @jtdowney for noticing that `tag_no_case!` was not working at all for byte slices + +### Fixed + +- `add_error!` has been renamed to `add_return_error!` +- the `not!` combinator now accepts functions +- `tag_no_case!` is now working as accepted (before, it accepted everything) + + +## 2.0 - 2016-11-25 + +The 2.0 release is one of the biggest yet. It was a good opportunity to clean up some badly named combinators and fix invalid behaviours. + +Since this version introduces a few breaking changes, an [upgrade documentation](https://github.com/Geal/nom/blob/main/doc/upgrading_to_nom_2.md) is available, detailing the steps to fix the most common migration issues. After testing on a set of 30 crates, most of them will build directly, a large part will just need to activate the "verbose-errors" compilation feature. The remaining fixes are documented. + +This version also adds a lot of interesting features, like the permutation combinator or whitespace separated formats support. + +### Thanks + +- @lu-zero for license help +- @adamgreig for type inference fixes +- @keruspe for documentation and example fixes, for the `IResult => Result` conversion work, making `AsChar`'s method more consistent, and adding `many_till!` +- @jdeeny for implementing `Offset` on `&str` +- @vickenty for documentation fixes and his refactoring of `length_value!` and `length_bytes!` +- @overdrivenpotato for refactoring some combinators +- @taralx for documentation fixes +- @keeperofdakeys for fixing eol behaviour, writing documentation and adding `named_attr!` +- @jturner314 for writing documentation +- @bozaro for fixing compilation errors +- @uniphil for adding a `crates.io` badge +- @badboy for documentation fixes +- @jugglerchris for fixing `take_s!` +- @AndyShiue for implementing `Error` and `Display` on `ErrorKind` and detecting incorrect UTF-8 string indexing + +### Added + +- the "simple" error management system does not accumulates errors when backtracking. This is a big perf gain, and is activated by default in nom 2.0 +- nom can now work on any type that implement the traits defined in `src/traits.rs`: `InputLength`, `InputIter`, `InputTake`, `Compare`, `FindToken`, `FindSubstring`, `Slice` +- the documentation from Github's wiki has been moved to the `doc/` directory. They are markdown files that you can build with [cargo-external-doc](https://crates.io/crates/cargo-external-doc) +- whitespace separated format support: with the `ws!` combinator, you can automatically introduce whitespace parsers between all parsers and combinators +- the `permutation!` combinator applies its child parsers in any order, as long as they all succeed once, and return a tuple of the results +- `do_parse!` is a simpler alternative to `chain!`, which is now deprecated +- you can now transform an `IResult` in a `std::result::Result` +- `length_data!` parses a length, and returns a subslice of that length +- `tag_no_case!` provides case independent comparison. It works nicely, without any allocation, for ASCII strings, but for UTF-8 strings, it defaults to an unsatisfying (and incorrect) comparison by lowercasing both strings +- `named_attr!` creates functions like `named!` but can add attributes like documentation +- `many_till!` applies repeatedly its first child parser until the second succeeds + +### Changed + +- the "verbose" error management that was available in previous versions is now activated by the "verbose-errors" compilation feature +- code reorganization: most of the parsers were moved in separate files to make the source easier to navigate +- most of the combinators are now independent from the input type +- the `eof` function was replaced with the `eof!` macro +- `error!` and `add_error!` were replaced with `return_error!` and `add_return_error!` to fix the name conflict with the log crate +- the `offset()` method is now in the `Offset` trait +- `length_value!` has been renamed to `length_count!`. The new `length_value!` selects a slice and applies the second parser once on that slice +- `AsChar::is_0_to_9` is now `AsChar::is_dec_digit` +- the combinators with configurable endianness now take an enum instead of a boolean as parameter + +### Fixed +- the `count!`, `count_fixed!` and `length_*!` combinator calculate incomplete data needs correctly +- `eol`, `line_ending` and `not_line_ending` now have a consistent behaviour that works correctly with incomplete data +- `take_s!` didn't correctly handle the case when the slice is exactly the right length + +## 1.2.4 - 2016-07-20 + +### Thanks +- @Phlosioneer for documentation fixes +- @sourrust for fixing offsets in `take_bits!` +- @ChrisMacNaughton for the XFS crate +- @pwoolcoc for `rest_s` +- @fitzgen for more `IResult` methods +- @gtors for the negative lookahead feature +- @frk1 and @jeandudey for little endian float parsing +- @jethrogb for fixing input usage in `many1` +- @acatton for beating me at nom golf :D + +### Added +- the `rest_s` method on `IResult` returns the remaining `&str` input +- `unwrap_err` and `unwrap_inc` methods on `IResult` +- `not!` will peek at the input and return `Done` if the underlying parser returned `Error` or `Incomplete`, without consuming the input +- `le_f32` and `le_f64` parse little endian floating point numbers (IEEE 754) +- + +### Fixed +- documentation fixes +- `take_bits!` is now more precise +- `many1` inccorectly used the `len` function instead of `input_len` +- the INI parser is simpler +- `recognize!` had an early `return` that is removed now + +## 1.2.3 - 2016-05-10 + +### Thanks +- @lu-zero for the contribution guidelines +- @GuillaumeGomez for fixes on `length_bytes` and some documentation +- @Hywan for documentation and test fixes +- @Xirdus for correct trait import issues +- @mspiegel for the new AST example +- @cholcombe973 for adding the `cond_with_error!` combinator +- @tstorch for refactoring `many0!` +- @panicbit for the folding combinators +- @evestera for `separated_list!` fixes +- @DanielKeep for correcting some enum imports + +### Added +- Regular expression combinators starting with `re_bytes_` work on byte slices +- example parsing arithmetic expressions to an AST +- `cond_with_error!` works like `cond!` but will return `None` if the condition is false, and `Some(value)` if the underlying parser succeeded +- `fold_many0!`, `fold_many1!` and `fold_many_m_n!` will take a parser, an initial value and a combining function, and fold over the successful applications of the parser + +### Fixed +- `length_bytes!` converts the result of its child parser to usize +- `take_till!` now imports `InputLength` instead of assuming it's in scope +- `separated_list!` and `separated_nonempty_list!` will not consume the separator if there's no following successfully parsed value +- no more warnings on build + +### Changed +- simpler implementation of `many0!` + +## 1.2.2 - 2016-03-09 + +### Thanks +- @conradev for fixing `take_until_s!` +- @GuillaumeGomez for some documentation fixes +- @frewsxcv for some documentation fixes +- @tstorch for some test refactorings + +### Added +- `nom::Err` now implements `std::error::Error` + +### Fixed +- `hex_u32` does not parses more than 8 chars now +- `take_while!` and `take_while1!` will not perturb the behaviour of `recognize!` anymore + +## 1.2.1 - 2016-02-23 + +### Thanks +- @sourrust for adding methods to `IResult` +- @tstorch for the test refactoring, and for adding methods to `IResult` and `Needed` +- @joelself for fixing the method system + +### Added + +- mapping methods over `IResult` and `Needed` + +### Changed + +- `apply_rf` is renamed to `apply_m`. This will not warrant a major version, since it is part missing from the methods feture added in the 1.2.0 release +- the `regexp_macros` feature that used `regex!` to precompile regular expressions has been replaced by the normal regex engine combined with `lazy_static` + +### Fixed + +- when a parser or combinator was returning an empty buffer as remaining part, it was generating one from a static empty string. This was messing with buffer offset calculation. Now, that empty slice is taken like this: `&input[input.len()..]`. +- The `regexp_macros` and `no_std` feature build again and are now tested with Travis CI + +## 1.2.0 - 2016-02-08 + +### Thanks +- @zentner-kyle for type inference fixes +- @joelself for his work on `&str` parsing and method parsers +- @GuillaumeGomez for implementing methods on `IResult` +- @dirk for the `alt_complete!` combinator +- @tstorch for a lot of refactoring work and unit tests additions +- @jansegre for the hex digit parsers +- @belgum for some documentation fixes +- @lwandrebeck for some documentation fixes and code fixes in `hex_digit` + +### Added +- `take_until_and_consume_s!` for consumption of string data until a tag +- more function patterns in `named!`. The error type can now be specified +- `alt_complete!` works like the `alt!` combinator, but tries the next branch if the current one returned `Incomplete`, instead of returning directly +- more unit tests for a lot of combinators +- hexadecimal digit parsers +- the `tuple!` combinator takes a list of parsers as argument, and applies them serially on the input. If all of them are successful, it willr eturn a tuple accumulating all the values. This combinator will (hopefully) replace most uses of `chain!` +- parsers can now be implemented as a method for a struct thanks to the `method!`, `call_m!` and `apply_rf!` combinators + +### Fixed +- there were type inference issues in a few combinators. They will now be easier to compile +- `peek!` compilation with bare functions +- `&str` parsers were splitting data at the byte level, not at the char level, which can result in inconsistencies in parsing UTF-8 characters. They now use character indexes +- some method implementations were missing on `IResult` (with specified error type instead of implicit) + +## 1.1.0 - 2016-01-01 + +This release adds a lot of features related to `&str` parsing. The previous versions +were focused on `&[u8]` and bit streams parsing, but there's a need for more text +parsing with nom. The parsing functions like `alpha`, `digit` and others will now +accept either a `&[u8]` or a `&str`, so there is no breaking change on that part. + +There are also a few performance improvements and documentation fixes. + +### Thanks +- @Binero for pushing the work on `&str` parsing +- @meh for fixing `Option` and `Vec` imports +- @hoodie for a documentation fix +- @joelself for some documentation fixes +- @vberger for his traits magic making nom functions more generic + +### Added + +- string related parsers: `tag_s!`, `take_s!`, `is_a_s!`, `is_not_s!`, `take_while_s!`, `take_while1_s!`, `take_till_s!` +- `value!` is a combinator that always returns the same value. If a child parser is passed as second argument, that value is returned when the child parser succeeds + +### Changed + +- `tag!` will now compare even on partial input. If it expects "abcd" but receives "ef", it will now return an `Error` instead of `Incomplete` +- `many0!` and others will preallocate a larger vector to avoid some copies and reallocations +- `alpha`, `digit`, `alphanumeric`, `space` and `multispace` now accept as input a `&[u8]` or a `&str`. Additionally, they return an error if they receive an empty input +- `take_while!`, `take_while1!`, `take_while_s!`, `take_while1_s!` wilreturn an error on empty input + +### Fixed + +- if the child parser of `many0!` or `many1!` returns `Incomplete`, it will return `Incomplete` too, possibly updating the needed size +- `Option,` `Some`, `None` and `Vec` are now used with full path imports + +## 1.0.1 - 2015-11-22 + +This releases makes the 1.0 version compatible with Rust 1.2 and 1.3 + +### Thanks +- @steveklabnik for fixing lifetime issues in Producers and Consumers + +## 1.0.0 - 2015-11-16 + +Stable release for nom. A lot of new features, a few breaking changes + +### Thanks +- @ahenry for macro fixes +- @bluss for fixing documentation +- @sourrust for cleaning code and debugging the new streaming utilities +- @meh for inline optimizations +- @ccmtaylor for fixing function imports +- @soro for improvements to the streaming utilities +- @breard-r for catching my typos +- @nelsonjchen for catching my typos too +- @divarvel for hex string parsers +- @mrordinaire for the `length_bytes!` combinator + +### Breaking changes +- `IResult::Error` can now use custom error types, and is generic over the input type +- Producers and consumers have been replaced. The new implementation uses less memory and integrates more with parsers +- `nom::ErrorCode` is now `nom::ErrorKind` +- `filter!` has been renamed to `take_while!` +- `chain!` will count how much data is consumed and use that number to calculate how much data is needed if a parser returned `Incomplete` +- `alt!` returns `Incomplete` if a child parser returned `Incomplete`, instead of skipping to the next parser +- `IResult` does not require a lifetime tag anymore, yay! + +### Added + +- `complete!` will return an error if the child parser returned `Incomplete` +- `add_error!` will wrap an error, but allow backtracking +- `hex_u32` parser + +### Fixed +- the behaviour around `Incomplete` is better for most parsers now + +## 0.5.0 - 2015-10-16 + +This release fixes a few issues and stabilizes the code. + +### Thanks +- @nox for documentation fixes +- @daboross for linting fixes +- @ahenry for fixing `tap!` and extending `dbg!` and `dbg_dmp!` +- @bluss for tracking down and fixing issues with unsafe code +- @meh for inlining parser functions +- @ccmtaylor for fixing import of `str::from_utf8` + +### Fixed +- `tap!`, `dbg!` and `dbg_dmp!` now accept function parameters + +### Changed +- the type used in `count_fixed!` must be `Copy` +- `chain!` calculates how much data is needed if one of the parsers returns `Incomplete +- optional parsers in `chain!` can return `Incomplete` + +## 0.4.0 - 2015-09-08 + +Considering the number of changes since the last release, this version can contain breaking changes, so the version number becomes 0.4.0. A lot of new features and performance improvements! + +### Thanks +- @frewsxcv for documentation fixes +- @ngrewe for his work on producers and consumers +- @meh for fixes on `chain!` and for the `rest` parser +- @daboross for refactoring `many0!` and `many1!` +- @aleksander for the `switch!` combinator idea +- @TechnoMancer for his help with bit level parsing +- @sxeraverx for pointing out a bug in `is_a!` + +### Fixed +- `count_fixed!` must take an explicit type as argument to generate the fixed-size array +- optional parsing behaviour in `chain!` +- `count!` can take 0 elements +- `is_a!` and `is_not!` can now consume the whole input + +### Added +- it is now possible to seek to the end of a `MemProducer` +- `opt!` returns `Done(input, None)` if `the child parser returned `Incomplete` +- `rest` will return the remaining input +- consumers can now seek to and from the end of input +- `switch!` applies a first parser then matches on its result to choose the next parser +- bit-level parsers +- character-level parsers +- regular expression parsers +- implementation of `take_till!`, `take_while!` and `take_while1!` + +### Changed +- `alt!` can return `Incomplete` +- the error analysis functions will now take references to functions instead of moving them +- performance improvements on producers +- performance improvement for `filter!` +- performance improvement for `count!`: a `Vec` of the right size is directly allocated + +## 0.3.11 - 2015-08-04 + +### Thanks +- @bluss for remarking that the crate included random junk lying non committed in my local repository + +### Fixed +- cleanup of my local repository will ship less files in the crates, resulting in a smaller download + +## 0.3.10 - 2015-08-03 + +### Added + +- `bits!` for bit level parsing. It indicates that all child parsers will take a `(&[u8], usize)`as input, with the second parameter indicating the bit offset in the first byte. This allows viewing a byte slice as a bit stream. Most combinators can be used directly under `bits!` +- `take_bits!` takes an integer type and a number of bits, consumes that number of bits and updates the offset, possibly by crossing byte boundaries +- bit level parsers are all written in `src/bits.rs` + +### Changed + +- Parsers that specifically handle bytes have been moved to src/bytes.rs`. This applies to `tag!`, `is_not!`, `is_a!`, `filter!`, `take!`, `take_str!`, `take_until_and_consume!`, `take_until!`, `take_until_either_and_consume!`, `take_until_either!` + +## 0.3.9 - 2015-07-20 + +### Thanks +- @badboy for fixing `filter!` +- @idmit for some documentation fixes + +### Added +- `opt_res!` applies a parser and transform its result in a Result. This parser never fails +- `cond_reduce!` takes an expression as parameter, applies the parser if the expression is true, and returns an error if the expression is false +- `tap!` pass the result of a parser to a block to manipulate it, but do not affect the parser's result +- `AccReader` is a Read+BufRead that supports data accumulation and partial consumption. The `consume` method must be called afterwardsto indicate how much was consumed +- Arithmetic expression evaluation and parsing example +- `u16!`, `u32!`, `u64!`, `i16!`, `i32!`, `i64!` take an expression as parameter, if the expression is true, apply the big endian integer parser, if false, the little endian version +- type information for combinators. This will make the documentation a bit easier to navigate + +### Fixed +- `map_opt!` and `map_res!` had issues with argument order due to bad macros +- `delimited!` did not compile for certain combinations of arguments +- `filter!` did not return a byte slice but a fixed array + +## 0.3.8 - 2015-07-03 + +### Added +- code coverage is now calculated automatically on Travis CI +- `Stepper`: wrap a `Producer`, and call the method `step` with a parser. This method will buffer data if there is not enough, apply the parser if there is, and keep the rest of the input in memory for the next call +- `ReadProducer`: takes something implementing `Read`, and makes a `Producer` out of it + +### Fixed +- the combinators `separated_pair!` and `delimited!` did not work because an implementation macro was not exported +- if a `MemProducer` reached its end, it should always return `Eof` +- `map!` had issues with argument matching + +## 0.3.7 - 2015-06-24 + +### Added +- `expr_res!` and `expr_opt!` evaluate an expression returning a Result or Opt and convert it to IResult +- `AsBytes` is implemented for fixed size arrays. This allows `tag!([41u8, 42u8])` + +### Fixed +- `count_fixed!` argument parsing works again + +## 0.3.6 - 2015-06-15 + +### Added +- documentation for a few functions +- the consumer trait now requires the `failed(&self, error_code)` method in case of parsing error +- `named!` now handles the alternative `named!(pub fun_name, ...)` + +### Fixed +- `filter!` now returns the whole input if the filter function never returned false +- `take!` casts its argument as usize, so it can accepts any integer type now + +## 0.3.5 - 2015-06-10 + +### Thanks +- @cmr for some documentation fixes + +### Added +- `count_fixed!` returns a fixed array + +### Fixed +- `count!` is back to the previous behaviour, returning a `Vec` for sizes known at runtime + +### Changed +- functions and traits exported from `nom::util` are now directly in `nom::` + +## 0.3.4 - 2015-06-09 + +### Thanks +- @andrew-d for fixes on `cond!` +- @keruspe for features in `chain!` + +### Added +- `chain!` can now have mutable fields + +### Fixed +- `cond!` had an infinite macro recursion + +### Changed +- `chain!` generates less code now. No apprent compilation time improvement + +## 0.3.3 - 2015-06-09 + +### Thanks +- @andrew-d for the little endian signed integer parsers +- @keruspe for fixes on `count!` + +### Added +- `le_i8`, `le_i16`, `le_i32`, `le_i64`: little endian signed integer parsers + +### Changed +- the `alt!` parser compiles much faster, even with more than 8 branches +- `count!` can now return a fixed size array instead of a growable vector + +## 0.3.2 - 2015-05-31 + +### Thanks +- @keruspe for the `take_str` parser and the function application combinator + +### Added +- `take_str!`: takes the specified number of bytes and return a UTF-8 string +- `apply!`: do partial application on the parameters of a function + +### Changed +- `Needed::Size` now contains a `usize` instead of a `u32` + +## 0.3.1 - 2015-05-21 + +### Thanks +- @divarvel for the big endian signed integer parsers + +### Added +- `be_i8`, `be_i16`, `be_i32`, `be_i64`: big endian signed integer parsers +- the `core` feature can be passed to cargo to build with `no_std` +- colored hexdump can be generated from error chains + +## 0.3.0 - 2015-05-07 + +### Thanks +- @filipegoncalves for some documentation and the new eof parser +- @CrimsonVoid for putting fully qualified types in the macros +- @lu_zero for some documentation fixes + +### Added +- new error types that can contain an error code, an input slice, and a list of following errors +- `error!` will cut backtracking and return directly from the parser, with a specified error code +- `eof` parser, successful if there is no more input +- specific error codes for the parsers provided by nom + +### Changed +- fully qualified types in macros. A lot of imports are not needed anymore + +### Removed +- `FlatMap`, `FlatpMapOpt` and `Functor` traits (replaced by `map!`, `map_opt!` and `map_res!`) + +## 0.2.2 - 2015-04-12 + +### Thanks +- @filipegoncalves and @thehydroimpulse for debugging an infinite loop in many0 and many1 +- @thehydroimpulse for suggesting public named parsers +- @skade for removing the dependency on the collections gate + +### Added +- `named!` can now declare public functions like this: `named!(pub tst, tag!("abcd"));` +- `pair!(X,Y)` returns a tuple `(x, y)` +- `separated_pair!(X, sep, Y)` returns a tuple `(x, y)` +- `preceded!(opening, X)` returns `x` +- `terminated!(X, closing)` returns `x` +- `delimited(opening, X, closing)` returns `x` +- `separated_list(sep, X)` returns a `Vec` +- `separated_nonempty_list(sep, X)` returns a `Vec` of at list one element + +### Changed +- `many0!` and `many1!` forbid parsers that do not consume input +- `is_a!`, `is_not!`, `alpha`, `digit`, `space`, `multispace` will now return an error if they do not consume at least one byte + +## 0.2.1 - 2015-04-04 + +### Thanks +- @mtsr for catching the remaining debug println! +- @jag426 who killed a lot of warnings +- @skade for removing the dependency on the core feature gate + + +### Added +- little endian unsigned int parsers le_u8, le_u16, le_u32, le_u64 +- `count!` to apply a parser a specified number of times +- `cond!` applies a parser if the condition is met +- more parser development tools in `util::*` + +### Fixed +- in one case, `opt!` would not compile + +### Removed +- most of the feature gates are now removed. The only one still needed is `collections` + +## 0.2.0 - 2015-03-24 +*works with `rustc 1.0.0-dev (81e2396c7 2015-03-19) (built 2015-03-19)`* + +### Thanks +- Ryman for the AsBytes implementation +- jag426 and jaredly for documentation fixes +- eternaleye on #rust IRC for his help on the new macro syntax + +### Changed +- the AsBytes trait improves readability, no more b"...", but "..." instead +- Incomplete will now hold either Needed;;Unknown, or Needed::Size(u32). Matching on Incomplete without caring for the value is done with `Incomplete(_)`, but if more granularity is mandatory, `Needed` can be matched too +- `alt!` can pass the result of the parser to a closure +- the `take_*` macros changed behaviour, the default case is now not to consume the separator. The macros have been renamed as follows: `take_until!` -> `take_until_and_consume!`, `take_until_and_leave!` -> `take_until!`, `take_until_either_and_leave!` -> `take_until_either!`, `take_until_either!` -> `take_until_either_and_consume!` + +### Added +- `peek!` macro: matches the future input but does not consume it +- `length_value!` macro: the first argument is a parser returning a `n` that can cast to usize, then applies the second parser `n` times. The macro has a variant with a third argument indicating the expected input size for the second parser +- benchmarks are available at https://github.com/Geal/nom_benchmarks +- more documentation +- **Unnamed parser syntax**: warning, this is a breaking change. With this new syntax, the macro combinators do not generate functions anymore, they create blocks. That way, they can be nested, for better readability. The `named!` macro is provided to create functions from parsers. Please be aware that nesting parsers comes with a small cost of compilation time, negligible in most cases, but can quickly get to the minutes scale if not careful. If this happens, separate your parsers in multiple subfunctions. +- `named!`, `closure!` and `call!` macros used to support the unnamed syntax +- `map!`, `map_opt!` and `map_res!` to combine a parser with a normal function, transforming the input directly, or returning an `Option` or `Result` + +### Fixed +- `is_a!` is now working properly + +### Removed +- the `o!` macro does less than `chain!`, so it has been removed +- the `fold0!` and `fold1!` macros were too complex and awkward to use, the `many*` combinators will be useful for most uses for now + +## 0.1.6 - 2015-02-24 +### Changed +- consumers must have an end method that will be called after parsing + +### Added +- big endian unsigned int and float parsers: be_u8, be_u16, be_u32, be_u64, be_f32, be_f64 +- producers can seek +- function and macros documentation +- README documentation +### Fixed +- lifetime declarations +- tag! can return Incomplete + +## 0.1.5 - 2015-02-17 +### Changed +- traits were renamed: FlatMapper -> FlatMap, Mapper -> FlatMapOpt, Mapper2 -> Functor + +### Fixed +- woeks with rustc f1bb6c2f4 + +## 0.1.4 - 2015-02-17 +### Changed +- the chaining macro can take optional arguments with '?' + +## 0.1.3 - 2015-02-16 +### Changed +- the chaining macro now takes the closure at the end of the argument list + +## 0.1.2 - 2015-02-16 +### Added +- flat_map implementation for <&[u8], &[u8]> +- chaining macro +- partial MP4 parser example + + +## 0.1.1 - 2015-02-06 +### Fixed +- closure syntax change + +## Compare code + +* [unreleased](https://github.com/Geal/nom/compare/7.1.3...HEAD) +* [7.1.2](https://github.com/Geal/nom/compare/7.1.2...7.1.3) +* [7.1.2](https://github.com/Geal/nom/compare/7.1.1...7.1.2) +* [7.1.1](https://github.com/Geal/nom/compare/7.1.0...7.1.1) +* [7.1.0](https://github.com/Geal/nom/compare/7.0.0...7.1.0) +* [7.0.0](https://github.com/Geal/nom/compare/6.2.1...7.0.0) +* [6.2.1](https://github.com/Geal/nom/compare/6.2.0...6.2.1) +* [6.2.0](https://github.com/Geal/nom/compare/6.1.2...6.2.0) +* [6.1.2](https://github.com/Geal/nom/compare/6.1.1...6.1.2) +* [6.1.1](https://github.com/Geal/nom/compare/6.1.0...6.1.1) +* [6.1.0](https://github.com/Geal/nom/compare/6.0.1...6.1.0) +* [6.0.1](https://github.com/Geal/nom/compare/6.0.0...6.0.1) +* [6.0.0](https://github.com/Geal/nom/compare/5.1.1...6.0.0) +* [5.1.1](https://github.com/Geal/nom/compare/5.1.0...5.1.1) +* [5.1.0](https://github.com/Geal/nom/compare/5.0.1...5.1.0) +* [5.0.1](https://github.com/Geal/nom/compare/5.0.0...5.0.1) +* [5.0.0](https://github.com/Geal/nom/compare/4.2.3...5.0.0) +* [4.2.3](https://github.com/Geal/nom/compare/4.2.2...4.2.3) +* [4.2.2](https://github.com/Geal/nom/compare/4.2.1...4.2.2) +* [4.2.1](https://github.com/Geal/nom/compare/4.2.0...4.2.1) +* [4.2.0](https://github.com/Geal/nom/compare/4.1.1...4.2.0) +* [4.1.1](https://github.com/Geal/nom/compare/4.1.0...4.1.1) +* [4.1.0](https://github.com/Geal/nom/compare/4.0.0...4.1.0) +* [4.0.0](https://github.com/Geal/nom/compare/3.2.1...4.0.0) +* [3.2.1](https://github.com/Geal/nom/compare/3.2.0...3.2.1) +* [3.2.0](https://github.com/Geal/nom/compare/3.1.0...3.2.0) +* [3.1.0](https://github.com/Geal/nom/compare/3.0.0...3.1.0) +* [3.0.0](https://github.com/Geal/nom/compare/2.2.1...3.0.0) +* [2.2.1](https://github.com/Geal/nom/compare/2.2.0...2.2.1) +* [2.2.0](https://github.com/Geal/nom/compare/2.1.0...2.2.0) +* [2.1.0](https://github.com/Geal/nom/compare/2.0.1...2.1.0) +* [2.0.1](https://github.com/Geal/nom/compare/2.0.0...2.0.1) +* [2.0.0](https://github.com/Geal/nom/compare/1.2.4...2.0.0) +* [1.2.4](https://github.com/Geal/nom/compare/1.2.3...1.2.4) +* [1.2.3](https://github.com/Geal/nom/compare/1.2.2...1.2.3) +* [1.2.2](https://github.com/Geal/nom/compare/1.2.1...1.2.2) +* [1.2.1](https://github.com/Geal/nom/compare/1.2.0...1.2.1) +* [1.2.0](https://github.com/Geal/nom/compare/1.1.0...1.2.0) +* [1.1.0](https://github.com/Geal/nom/compare/1.0.1...1.1.0) +* [1.0.1](https://github.com/Geal/nom/compare/1.0.0...1.0.1) +* [1.0.0](https://github.com/Geal/nom/compare/0.5.0...1.0.0) +* [0.5.0](https://github.com/geal/nom/compare/0.4.0...0.5.0) +* [0.4.0](https://github.com/geal/nom/compare/0.3.11...0.4.0) +* [0.3.11](https://github.com/geal/nom/compare/0.3.10...0.3.11) +* [0.3.10](https://github.com/geal/nom/compare/0.3.9...0.3.10) +* [0.3.9](https://github.com/geal/nom/compare/0.3.8...0.3.9) +* [0.3.8](https://github.com/Geal/nom/compare/0.3.7...0.3.8) +* [0.3.7](https://github.com/Geal/nom/compare/0.3.6...0.3.7) +* [0.3.6](https://github.com/Geal/nom/compare/0.3.5...0.3.6) +* [0.3.5](https://github.com/Geal/nom/compare/0.3.4...0.3.5) +* [0.3.4](https://github.com/Geal/nom/compare/0.3.3...0.3.4) +* [0.3.3](https://github.com/Geal/nom/compare/0.3.2...0.3.3) +* [0.3.2](https://github.com/Geal/nom/compare/0.3.1...0.3.2) +* [0.3.1](https://github.com/Geal/nom/compare/0.3.0...0.3.1) +* [0.3.0](https://github.com/Geal/nom/compare/0.2.2...0.3.0) +* [0.2.2](https://github.com/Geal/nom/compare/0.2.1...0.2.2) +* [0.2.1](https://github.com/Geal/nom/compare/0.2.0...0.2.1) +* [0.2.0](https://github.com/Geal/nom/compare/0.1.6...0.2.0) +* [0.1.6](https://github.com/Geal/nom/compare/0.1.5...0.1.6) +* [0.1.5](https://github.com/Geal/nom/compare/0.1.4...0.1.5) +* [0.1.4](https://github.com/Geal/nom/compare/0.1.3...0.1.4) +* [0.1.3](https://github.com/Geal/nom/compare/0.1.2...0.1.3) +* [0.1.2](https://github.com/Geal/nom/compare/0.1.1...0.1.2) +* [0.1.1](https://github.com/Geal/nom/compare/0.1.0...0.1.1) diff --git a/vendor/nom/Cargo.lock b/vendor/nom/Cargo.lock new file mode 100644 index 00000000000000..c7168342e7d5cd --- /dev/null +++ b/vendor/nom/Cargo.lock @@ -0,0 +1,282 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "autocfg" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + +[[package]] +name = "bit-set" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e11e16035ea35e4e5997b393eacbf6f63983188f7a2ad25bfb13465f5ad59de" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "getrandom" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a60553f9a9e039a333b4e9b20573b9e9b9c0bb3a11e201ccc48ef4283456d673" + +[[package]] +name = "memchr" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "nom" +version = "7.1.3" +dependencies = [ + "doc-comment", + "memchr", + "minimal-lexical", + "proptest", +] + +[[package]] +name = "num-traits" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +dependencies = [ + "autocfg", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" + +[[package]] +name = "proptest" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e0d9cc07f18492d879586c92b485def06bc850da3118075cd45d50e9c95b0e5" +dependencies = [ + "bit-set", + "bitflags", + "byteorder", + "lazy_static", + "num-traits", + "quick-error 2.0.1", + "rand", + "rand_chacha", + "rand_xorshift", + "regex-syntax", + "rusty-fork", + "tempfile", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quick-error" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" + +[[package]] +name = "rand" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", + "rand_hc", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_hc" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" +dependencies = [ + "rand_core", +] + +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core", +] + +[[package]] +name = "redox_syscall" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex-syntax" +version = "0.6.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error 1.2.3", + "tempfile", + "wait-timeout", +] + +[[package]] +name = "tempfile" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +dependencies = [ + "cfg-if", + "libc", + "rand", + "redox_syscall", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + +[[package]] +name = "wasi" +version = "0.10.2+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/vendor/nom/Cargo.toml b/vendor/nom/Cargo.toml new file mode 100644 index 00000000000000..2388b4ceea350c --- /dev/null +++ b/vendor/nom/Cargo.toml @@ -0,0 +1,168 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.48" +name = "nom" +version = "7.1.3" +authors = ["contact@geoffroycouprie.com"] +include = [ + "CHANGELOG.md", + "LICENSE", + "README.md", + ".gitignore", + "Cargo.toml", + "src/*.rs", + "src/*/*.rs", + "tests/*.rs", + "doc/nom_recipes.md", +] +autoexamples = false +description = "A byte-oriented, zero-copy, parser combinators library" +documentation = "https://docs.rs/nom" +readme = "README.md" +keywords = [ + "parser", + "parser-combinators", + "parsing", + "streaming", + "bit", +] +categories = ["parsing"] +license = "MIT" +repository = "https://github.com/Geal/nom" + +[package.metadata.docs.rs] +features = [ + "alloc", + "std", + "docsrs", +] +all-features = true + +[profile.bench] +lto = true +codegen-units = 1 +debug = true + +[[example]] +name = "custom_error" +path = "examples/custom_error.rs" +required-features = ["alloc"] + +[[example]] +name = "json" +path = "examples/json.rs" +required-features = ["alloc"] + +[[example]] +name = "json_iterator" +path = "examples/json_iterator.rs" +required-features = ["alloc"] + +[[example]] +name = "iterator" +path = "examples/iterator.rs" + +[[example]] +name = "s_expression" +path = "examples/s_expression.rs" +required-features = ["alloc"] + +[[example]] +name = "string" +path = "examples/string.rs" +required-features = ["alloc"] + +[[test]] +name = "arithmetic" + +[[test]] +name = "arithmetic_ast" +required-features = ["alloc"] + +[[test]] +name = "css" + +[[test]] +name = "custom_errors" + +[[test]] +name = "float" + +[[test]] +name = "ini" +required-features = ["alloc"] + +[[test]] +name = "ini_str" +required-features = ["alloc"] + +[[test]] +name = "issues" +required-features = ["alloc"] + +[[test]] +name = "json" + +[[test]] +name = "mp4" +required-features = ["alloc"] + +[[test]] +name = "multiline" +required-features = ["alloc"] + +[[test]] +name = "overflow" + +[[test]] +name = "reborrow_fold" + +[[test]] +name = "fnmut" +required-features = ["alloc"] + +[dependencies.memchr] +version = "2.3" +default-features = false + +[dependencies.minimal-lexical] +version = "0.2.0" +default-features = false + +[dev-dependencies.doc-comment] +version = "0.3" + +[dev-dependencies.proptest] +version = "1.0.0" + +[features] +alloc = [] +default = ["std"] +docsrs = [] +std = [ + "alloc", + "memchr/std", + "minimal-lexical/std", +] + +[badges.coveralls] +branch = "main" +repository = "Geal/nom" +service = "github" + +[badges.maintenance] +status = "actively-developed" + +[badges.travis-ci] +repository = "Geal/nom" diff --git a/vendor/nom/LICENSE b/vendor/nom/LICENSE new file mode 100644 index 00000000000000..88557e44e34e24 --- /dev/null +++ b/vendor/nom/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2014-2019 Geoffroy Couprie + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/nom/README.md b/vendor/nom/README.md new file mode 100644 index 00000000000000..f2c1b052863714 --- /dev/null +++ b/vendor/nom/README.md @@ -0,0 +1,331 @@ +# nom, eating data byte by byte + +[![LICENSE](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE) +[![Join the chat at https://gitter.im/Geal/nom](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Geal/nom?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Build Status](https://github.com/Geal/nom/actions/workflows/ci.yml/badge.svg)](https://github.com/Geal/nom/actions/workflows/ci.yml) +[![Coverage Status](https://coveralls.io/repos/github/Geal/nom/badge.svg?branch=main)](https://coveralls.io/github/Geal/nom?branch=main) +[![Crates.io Version](https://img.shields.io/crates/v/nom.svg)](https://crates.io/crates/nom) +[![Minimum rustc version](https://img.shields.io/badge/rustc-1.48.0+-lightgray.svg)](#rust-version-requirements-msrv) + +nom is a parser combinators library written in Rust. Its goal is to provide tools +to build safe parsers without compromising the speed or memory consumption. To +that end, it uses extensively Rust's *strong typing* and *memory safety* to produce +fast and correct parsers, and provides functions, macros and traits to abstract most of the +error prone plumbing. + +![nom logo in CC0 license, by Ange Albertini](https://raw.githubusercontent.com/Geal/nom/main/assets/nom.png) + +*nom will happily take a byte out of your files :)* + + + +- [Example](#example) +- [Documentation](#documentation) +- [Why use nom?](#why-use-nom) + - [Binary format parsers](#binary-format-parsers) + - [Text format parsers](#text-format-parsers) + - [Programming language parsers](#programming-language-parsers) + - [Streaming formats](#streaming-formats) +- [Parser combinators](#parser-combinators) +- [Technical features](#technical-features) +- [Rust version requirements](#rust-version-requirements-msrv) +- [Installation](#installation) +- [Related projects](#related-projects) +- [Parsers written with nom](#parsers-written-with-nom) +- [Contributors](#contributors) + + + +## Example + +[Hexadecimal color](https://developer.mozilla.org/en-US/docs/Web/CSS/color) parser: + +```rust +extern crate nom; +use nom::{ + IResult, + bytes::complete::{tag, take_while_m_n}, + combinator::map_res, + sequence::tuple +}; + +#[derive(Debug,PartialEq)] +pub struct Color { + pub red: u8, + pub green: u8, + pub blue: u8, +} + +fn from_hex(input: &str) -> Result { + u8::from_str_radix(input, 16) +} + +fn is_hex_digit(c: char) -> bool { + c.is_digit(16) +} + +fn hex_primary(input: &str) -> IResult<&str, u8> { + map_res( + take_while_m_n(2, 2, is_hex_digit), + from_hex + )(input) +} + +fn hex_color(input: &str) -> IResult<&str, Color> { + let (input, _) = tag("#")(input)?; + let (input, (red, green, blue)) = tuple((hex_primary, hex_primary, hex_primary))(input)?; + + Ok((input, Color { red, green, blue })) +} + +fn main() {} + +#[test] +fn parse_color() { + assert_eq!(hex_color("#2F14DF"), Ok(("", Color { + red: 47, + green: 20, + blue: 223, + }))); +} +``` + +## Documentation + +- [Reference documentation](https://docs.rs/nom) +- [Various design documents and tutorials](https://github.com/Geal/nom/tree/main/doc) +- [List of combinators and their behaviour](https://github.com/Geal/nom/blob/main/doc/choosing_a_combinator.md) + +If you need any help developing your parsers, please ping `geal` on IRC (libera, geeknode, oftc), go to `#nom-parsers` on Libera IRC, or on the [Gitter chat room](https://gitter.im/Geal/nom). + +## Why use nom + +If you want to write: + +### Binary format parsers + +nom was designed to properly parse binary formats from the beginning. Compared +to the usual handwritten C parsers, nom parsers are just as fast, free from +buffer overflow vulnerabilities, and handle common patterns for you: + +- [TLV](https://en.wikipedia.org/wiki/Type-length-value) +- Bit level parsing +- Hexadecimal viewer in the debugging macros for easy data analysis +- Streaming parsers for network formats and huge files + +Example projects: + +- [FLV parser](https://github.com/rust-av/flavors) +- [Matroska parser](https://github.com/rust-av/matroska) +- [tar parser](https://github.com/Keruspe/tar-parser.rs) + +### Text format parsers + +While nom was made for binary format at first, it soon grew to work just as +well with text formats. From line based formats like CSV, to more complex, nested +formats such as JSON, nom can manage it, and provides you with useful tools: + +- Fast case insensitive comparison +- Recognizers for escaped strings +- Regular expressions can be embedded in nom parsers to represent complex character patterns succinctly +- Special care has been given to managing non ASCII characters properly + +Example projects: + +- [HTTP proxy](https://github.com/sozu-proxy/sozu/tree/main/lib/src/protocol/http/parser) +- [TOML parser](https://github.com/joelself/tomllib) + +### Programming language parsers + +While programming language parsers are usually written manually for more +flexibility and performance, nom can be (and has been successfully) used +as a prototyping parser for a language. + +nom will get you started quickly with powerful custom error types, that you +can leverage with [nom_locate](https://github.com/fflorent/nom_locate) to +pinpoint the exact line and column of the error. No need for separate +tokenizing, lexing and parsing phases: nom can automatically handle whitespace +parsing, and construct an AST in place. + +Example projects: + +- [PHP VM](https://github.com/tagua-vm/parser) +- eve language prototype +- [xshade shading language](https://github.com/xshade-lang/xshade/) + +### Streaming formats + +While a lot of formats (and the code handling them) assume that they can fit +the complete data in memory, there are formats for which we only get a part +of the data at once, like network formats, or huge files. +nom has been designed for a correct behaviour with partial data: If there is +not enough data to decide, nom will tell you it needs more instead of silently +returning a wrong result. Whether your data comes entirely or in chunks, the +result should be the same. + +It allows you to build powerful, deterministic state machines for your protocols. + +Example projects: + +- [HTTP proxy](https://github.com/sozu-proxy/sozu/tree/main/lib/src/protocol/http/parser) +- [Using nom with generators](https://github.com/Geal/generator_nom) + +## Parser combinators + +Parser combinators are an approach to parsers that is very different from +software like [lex](https://en.wikipedia.org/wiki/Lex_(software)) and +[yacc](https://en.wikipedia.org/wiki/Yacc). Instead of writing the grammar +in a separate file and generating the corresponding code, you use very +small functions with very specific purpose, like "take 5 bytes", or +"recognize the word 'HTTP'", and assemble them in meaningful patterns +like "recognize 'HTTP', then a space, then a version". +The resulting code is small, and looks like the grammar you would have +written with other parser approaches. + +This has a few advantages: + +- The parsers are small and easy to write +- The parsers components are easy to reuse (if they're general enough, please add them to nom!) +- The parsers components are easy to test separately (unit tests and property-based tests) +- The parser combination code looks close to the grammar you would have written +- You can build partial parsers, specific to the data you need at the moment, and ignore the rest + +## Technical features + +nom parsers are for: +- [x] **byte-oriented**: The basic type is `&[u8]` and parsers will work as much as possible on byte array slices (but are not limited to them) +- [x] **bit-oriented**: nom can address a byte slice as a bit stream +- [x] **string-oriented**: The same kind of combinators can apply on UTF-8 strings as well +- [x] **zero-copy**: If a parser returns a subset of its input data, it will return a slice of that input, without copying +- [x] **streaming**: nom can work on partial data and detect when it needs more data to produce a correct result +- [x] **descriptive errors**: The parsers can aggregate a list of error codes with pointers to the incriminated input slice. Those error lists can be pattern matched to provide useful messages. +- [x] **custom error types**: You can provide a specific type to improve errors returned by parsers +- [x] **safe parsing**: nom leverages Rust's safe memory handling and powerful types, and parsers are routinely fuzzed and tested with real world data. So far, the only flaws found by fuzzing were in code written outside of nom +- [x] **speed**: Benchmarks have shown that nom parsers often outperform many parser combinators library like Parsec and attoparsec, some regular expression engines and even handwritten C parsers + +Some benchmarks are available on [Github](https://github.com/Geal/nom_benchmarks). + +## Rust version requirements (MSRV) + +The 7.0 series of nom supports **Rustc version 1.48 or greater**. It is known to work properly on Rust 1.41.1 but there is no guarantee it will stay the case through this major release. + +The current policy is that this will only be updated in the next major nom release. + +## Installation + +nom is available on [crates.io](https://crates.io/crates/nom) and can be included in your Cargo enabled project like this: + +```toml +[dependencies] +nom = "7" +``` + +There are a few compilation features: + +* `alloc`: (activated by default) if disabled, nom can work in `no_std` builds without memory allocators. If enabled, combinators that allocate (like `many0`) will be available +* `std`: (activated by default, activates `alloc` too) if disabled, nom can work in `no_std` builds + +You can configure those features like this: + +```toml +[dependencies.nom] +version = "7" +default-features = false +features = ["alloc"] +``` + +# Related projects + +- [Get line and column info in nom's input type](https://github.com/fflorent/nom_locate) +- [Using nom as lexer and parser](https://github.com/Rydgel/monkey-rust) + +# Parsers written with nom + +Here is a (non exhaustive) list of known projects using nom: + +- Text file formats: [Ceph Crush](https://github.com/cholcombe973/crushtool), +[Cronenberg](https://github.com/ayrat555/cronenberg), +[XFS Runtime Stats](https://github.com/ChrisMacNaughton/xfs-rs), +[CSV](https://github.com/GuillaumeGomez/csv-parser), +[FASTA](https://github.com/TianyiShi2001/nom-fasta), +[FASTQ](https://github.com/elij/fastq.rs), +[INI](https://github.com/Geal/nom/blob/main/tests/ini.rs), +[ISO 8601 dates](https://github.com/badboy/iso8601), +[libconfig-like configuration file format](https://github.com/filipegoncalves/rust-config), +[Web archive](https://github.com/sbeckeriv/warc_nom_parser), +[PDB](https://github.com/TianyiShi2001/nom-pdb), +[proto files](https://github.com/tafia/protobuf-parser), +[Fountain screenplay markup](https://github.com/adamchalmers/fountain-rs), +[vimwiki](https://github.com/chipsenkbeil/vimwiki-server/tree/master/vimwiki) & [vimwiki_macros](https://github.com/chipsenkbeil/vimwiki-server/tree/master/vimwiki_macros) +- Programming languages: +[PHP](https://github.com/tagua-vm/parser), +[Basic Calculator](https://github.com/balajisivaraman/basic_calculator_rs), +[GLSL](https://github.com/phaazon/glsl), +[Lua](https://github.com/doomrobo/nom-lua53), +[Python](https://github.com/ProgVal/rust-python-parser), +[SQL](https://github.com/ms705/nom-sql), +[Elm](https://github.com/cout970/Elm-interpreter), +[SystemVerilog](https://github.com/dalance/sv-parser), +[Turtle](https://github.com/vandenoever/rome/tree/master/src/io/turtle), +[CSML](https://github.com/CSML-by-Clevy/csml-interpreter), +[Wasm](https://github.com/Strytyp/wasm-nom), +[Pseudocode](https://github.com/Gungy2/pseudocode) +[Filter for MeiliSearch](https://github.com/meilisearch/meilisearch) +- Interface definition formats: [Thrift](https://github.com/thehydroimpulse/thrust) +- Audio, video and image formats: +[GIF](https://github.com/Geal/gif.rs), +[MagicaVoxel .vox](https://github.com/davidedmonds/dot_vox), +[midi](https://github.com/derekdreery/nom-midi-rs), +[SWF](https://github.com/open-flash/swf-parser), +[WAVE](http://github.com/noise-Labs/wave), +[Matroska (MKV)](https://github.com/rust-av/matroska) +- Document formats: +[TAR](https://github.com/Keruspe/tar-parser.rs), +[GZ](https://github.com/nharward/nom-gzip), +[GDSII](https://github.com/erihsu/gds2-io) +- Cryptographic formats: +[X.509](https://github.com/rusticata/x509-parser) +- Network protocol formats: +[Bencode](https://github.com/jbaum98/bencode.rs), +[D-Bus](https://github.com/toshokan/misato), +[DHCP](https://github.com/rusticata/dhcp-parser), +[HTTP](https://github.com/sozu-proxy/sozu/tree/main/lib/src/protocol/http), +[URI](https://github.com/santifa/rrp/blob/master/src/uri.rs), +[IMAP](https://github.com/djc/tokio-imap), +[IRC](https://github.com/Detegr/RBot-parser), +[Pcap-NG](https://github.com/richo/pcapng-rs), +[Pcap](https://github.com/ithinuel/pcap-rs), +[Pcap + PcapNG](https://github.com/rusticata/pcap-parser), +[IKEv2](https://github.com/rusticata/ipsec-parser), +[NTP](https://github.com/rusticata/ntp-parser), +[SNMP](https://github.com/rusticata/snmp-parser), +[Kerberos v5](https://github.com/rusticata/kerberos-parser), +[DER](https://github.com/rusticata/der-parser), +[TLS](https://github.com/rusticata/tls-parser), +[IPFIX / Netflow v10](https://github.com/dominotree/rs-ipfix), +[GTP](https://github.com/fuerstenau/gorrosion-gtp), +[SIP](https://github.com/armatusmiles/sipcore/tree/master/crates/sipmsg), +[Prometheus](https://github.com/timberio/vector/blob/master/lib/prometheus-parser/src/line.rs) +- Language specifications: +[BNF](https://github.com/snewt/bnf) +- Misc formats: +[Gameboy ROM](https://github.com/MarkMcCaskey/gameboy-rom-parser), +[ANT FIT](https://github.com/stadelmanma/fitparse-rs), +[Version Numbers](https://github.com/fosskers/rs-versions), +[Telcordia/Bellcore SR-4731 SOR OTDR files](https://github.com/JamesHarrison/otdrs), +[MySQL binary log](https://github.com/PrivateRookie/boxercrab), +[URI](https://github.com/Skasselbard/nom-uri), +[Furigana](https://github.com/sachaarbonel/furigana.rs), +[Wordle Result](https://github.com/Fyko/wordle-stats/tree/main/parser) + +Want to create a new parser using `nom`? A list of not yet implemented formats is available [here](https://github.com/Geal/nom/issues/14). + +Want to add your parser here? Create a pull request for it! + +# Contributors + +nom is the fruit of the work of many contributors over the years, many thanks for your help! + + + + diff --git a/vendor/nom/doc/nom_recipes.md b/vendor/nom/doc/nom_recipes.md new file mode 100644 index 00000000000000..e8626344a7fcf3 --- /dev/null +++ b/vendor/nom/doc/nom_recipes.md @@ -0,0 +1,395 @@ +# Nom Recipes + +These are short recipes for accomplishing common tasks with nom. + +* [Whitespace](#whitespace) + + [Wrapper combinators that eat whitespace before and after a parser](#wrapper-combinators-that-eat-whitespace-before-and-after-a-parser) +* [Comments](#comments) + + [`// C++/EOL-style comments`](#-ceol-style-comments) + + [`/* C-style comments */`](#-c-style-comments-) +* [Identifiers](#identifiers) + + [`Rust-Style Identifiers`](#rust-style-identifiers) +* [Literal Values](#literal-values) + + [Escaped Strings](#escaped-strings) + + [Integers](#integers) + - [Hexadecimal](#hexadecimal) + - [Octal](#octal) + - [Binary](#binary) + - [Decimal](#decimal) + + [Floating Point Numbers](#floating-point-numbers) + +## Whitespace + + + +### Wrapper combinators that eat whitespace before and after a parser + +```rust +use nom::{ + IResult, + error::ParseError, + combinator::value, + sequence::delimited, + character::complete::multispace0, +}; + +/// A combinator that takes a parser `inner` and produces a parser that also consumes both leading and +/// trailing whitespace, returning the output of `inner`. +fn ws<'a, F: 'a, O, E: ParseError<&'a str>>(inner: F) -> impl FnMut(&'a str) -> IResult<&'a str, O, E> + where + F: Fn(&'a str) -> IResult<&'a str, O, E>, +{ + delimited( + multispace0, + inner, + multispace0 + ) +} +``` + +To eat only trailing whitespace, replace `delimited(...)` with `terminated(&inner, multispace0)`. +Likewise, the eat only leading whitespace, replace `delimited(...)` with `preceded(multispace0, +&inner)`. You can use your own parser instead of `multispace0` if you want to skip a different set +of lexemes. + +## Comments + +### `// C++/EOL-style comments` + +This version uses `%` to start a comment, does not consume the newline character, and returns an +output of `()`. + +```rust +use nom::{ + IResult, + error::ParseError, + combinator::value, + sequence::pair, + bytes::complete::is_not, + character::complete::char, +}; + +pub fn peol_comment<'a, E: ParseError<&'a str>>(i: &'a str) -> IResult<&'a str, (), E> +{ + value( + (), // Output is thrown away. + pair(char('%'), is_not("\n\r")) + )(i) +} +``` + +### `/* C-style comments */` + +Inline comments surrounded with sentinel tags `(*` and `*)`. This version returns an output of `()` +and does not handle nested comments. + +```rust +use nom::{ + IResult, + error::ParseError, + combinator::value, + sequence::tuple, + bytes::complete::{tag, take_until}, +}; + +pub fn pinline_comment<'a, E: ParseError<&'a str>>(i: &'a str) -> IResult<&'a str, (), E> { + value( + (), // Output is thrown away. + tuple(( + tag("(*"), + take_until("*)"), + tag("*)") + )) + )(i) +} +``` + +## Identifiers + +### `Rust-Style Identifiers` + +Parsing identifiers that may start with a letter (or underscore) and may contain underscores, +letters and numbers may be parsed like this: + +```rust +use nom::{ + IResult, + branch::alt, + multi::many0_count, + combinator::recognize, + sequence::pair, + character::complete::{alpha1, alphanumeric1}, + bytes::complete::tag, +}; + +pub fn identifier(input: &str) -> IResult<&str, &str> { + recognize( + pair( + alt((alpha1, tag("_"))), + many0_count(alt((alphanumeric1, tag("_")))) + ) + )(input) +} +``` + +Let's say we apply this to the identifier `hello_world123abc`. The first `alt` parser would +recognize `h`. The `pair` combinator ensures that `ello_world123abc` will be piped to the next +`alphanumeric0` parser, which recognizes every remaining character. However, the `pair` combinator +returns a tuple of the results of its sub-parsers. The `recognize` parser produces a `&str` of the +input text that was parsed, which in this case is the entire `&str` `hello_world123abc`. + +## Literal Values + +### Escaped Strings + +This is [one of the examples](https://github.com/Geal/nom/blob/main/examples/string.rs) in the +examples directory. + +### Integers + +The following recipes all return string slices rather than integer values. How to obtain an +integer value instead is demonstrated for hexadecimal integers. The others are similar. + +The parsers allow the grouping character `_`, which allows one to group the digits by byte, for +example: `0xA4_3F_11_28`. If you prefer to exclude the `_` character, the lambda to convert from a +string slice to an integer value is slightly simpler. You can also strip the `_` from the string +slice that is returned, which is demonstrated in the second hexdecimal number parser. + +If you wish to limit the number of digits in a valid integer literal, replace `many1` with +`many_m_n` in the recipes. + +#### Hexadecimal + +The parser outputs the string slice of the digits without the leading `0x`/`0X`. + +```rust +use nom::{ + IResult, + branch::alt, + multi::{many0, many1}, + combinator::recognize, + sequence::{preceded, terminated}, + character::complete::{char, one_of}, + bytes::complete::tag, +}; + +fn hexadecimal(input: &str) -> IResult<&str, &str> { // <'a, E: ParseError<&'a str>> + preceded( + alt((tag("0x"), tag("0X"))), + recognize( + many1( + terminated(one_of("0123456789abcdefABCDEF"), many0(char('_'))) + ) + ) + )(input) +} +``` + +If you want it to return the integer value instead, use map: + +```rust +use nom::{ + IResult, + branch::alt, + multi::{many0, many1}, + combinator::{map_res, recognize}, + sequence::{preceded, terminated}, + character::complete::{char, one_of}, + bytes::complete::tag, +}; + +fn hexadecimal_value(input: &str) -> IResult<&str, i64> { + map_res( + preceded( + alt((tag("0x"), tag("0X"))), + recognize( + many1( + terminated(one_of("0123456789abcdefABCDEF"), many0(char('_'))) + ) + ) + ), + |out: &str| i64::from_str_radix(&str::replace(&out, "_", ""), 16) + )(input) +} +``` + +#### Octal + +```rust +use nom::{ + IResult, + branch::alt, + multi::{many0, many1}, + combinator::recognize, + sequence::{preceded, terminated}, + character::complete::{char, one_of}, + bytes::complete::tag, +}; + +fn octal(input: &str) -> IResult<&str, &str> { + preceded( + alt((tag("0o"), tag("0O"))), + recognize( + many1( + terminated(one_of("01234567"), many0(char('_'))) + ) + ) + )(input) +} +``` + +#### Binary + +```rust +use nom::{ + IResult, + branch::alt, + multi::{many0, many1}, + combinator::recognize, + sequence::{preceded, terminated}, + character::complete::{char, one_of}, + bytes::complete::tag, +}; + +fn binary(input: &str) -> IResult<&str, &str> { + preceded( + alt((tag("0b"), tag("0B"))), + recognize( + many1( + terminated(one_of("01"), many0(char('_'))) + ) + ) + )(input) +} +``` + +#### Decimal + +```rust +use nom::{ + IResult, + multi::{many0, many1}, + combinator::recognize, + sequence::terminated, + character::complete::{char, one_of}, +}; + +fn decimal(input: &str) -> IResult<&str, &str> { + recognize( + many1( + terminated(one_of("0123456789"), many0(char('_'))) + ) + )(input) +} +``` + +### Floating Point Numbers + +The following is adapted from [the Python parser by Valentin Lorentz (ProgVal)](https://github.com/ProgVal/rust-python-parser/blob/master/src/numbers.rs). + +```rust +use nom::{ + IResult, + branch::alt, + multi::{many0, many1}, + combinator::{opt, recognize}, + sequence::{preceded, terminated, tuple}, + character::complete::{char, one_of}, +}; + +fn float(input: &str) -> IResult<&str, &str> { + alt(( + // Case one: .42 + recognize( + tuple(( + char('.'), + decimal, + opt(tuple(( + one_of("eE"), + opt(one_of("+-")), + decimal + ))) + )) + ) + , // Case two: 42e42 and 42.42e42 + recognize( + tuple(( + decimal, + opt(preceded( + char('.'), + decimal, + )), + one_of("eE"), + opt(one_of("+-")), + decimal + )) + ) + , // Case three: 42. and 42.42 + recognize( + tuple(( + decimal, + char('.'), + opt(decimal) + )) + ) + ))(input) +} + +fn decimal(input: &str) -> IResult<&str, &str> { + recognize( + many1( + terminated(one_of("0123456789"), many0(char('_'))) + ) + )(input) +} +``` + +# implementing FromStr + +The [FromStr trait](https://doc.rust-lang.org/std/str/trait.FromStr.html) provides +a common interface to parse from a string. + +```rust +use nom::{ + IResult, Finish, error::Error, + bytes::complete::{tag, take_while}, +}; +use std::str::FromStr; + +// will recognize the name in "Hello, name!" +fn parse_name(input: &str) -> IResult<&str, &str> { + let (i, _) = tag("Hello, ")(input)?; + let (i, name) = take_while(|c:char| c.is_alphabetic())(i)?; + let (i, _) = tag("!")(i)?; + + Ok((i, name)) +} + +// with FromStr, the result cannot be a reference to the input, it must be owned +#[derive(Debug)] +pub struct Name(pub String); + +impl FromStr for Name { + // the error must be owned as well + type Err = Error; + + fn from_str(s: &str) -> Result { + match parse_name(s).finish() { + Ok((_remaining, name)) => Ok(Name(name.to_string())), + Err(Error { input, code }) => Err(Error { + input: input.to_string(), + code, + }) + } + } +} + +fn main() { + // parsed: Ok(Name("nom")) + println!("parsed: {:?}", "Hello, nom!".parse::()); + + // parsed: Err(Error { input: "123!", code: Tag }) + println!("parsed: {:?}", "Hello, 123!".parse::()); +} +``` + diff --git a/vendor/nom/src/bits/complete.rs b/vendor/nom/src/bits/complete.rs new file mode 100644 index 00000000000000..bf36dcc2aae007 --- /dev/null +++ b/vendor/nom/src/bits/complete.rs @@ -0,0 +1,197 @@ +//! Bit level parsers +//! + +use crate::error::{ErrorKind, ParseError}; +use crate::internal::{Err, IResult}; +use crate::lib::std::ops::{AddAssign, Div, RangeFrom, Shl, Shr}; +use crate::traits::{InputIter, InputLength, Slice, ToUsize}; + +/// Generates a parser taking `count` bits +/// +/// # Example +/// ```rust +/// # use nom::bits::complete::take; +/// # use nom::IResult; +/// # use nom::error::{Error, ErrorKind}; +/// // Input is a tuple of (input: I, bit_offset: usize) +/// fn parser(input: (&[u8], usize), count: usize)-> IResult<(&[u8], usize), u8> { +/// take(count)(input) +/// } +/// +/// // Consumes 0 bits, returns 0 +/// assert_eq!(parser(([0b00010010].as_ref(), 0), 0), Ok((([0b00010010].as_ref(), 0), 0))); +/// +/// // Consumes 4 bits, returns their values and increase offset to 4 +/// assert_eq!(parser(([0b00010010].as_ref(), 0), 4), Ok((([0b00010010].as_ref(), 4), 0b00000001))); +/// +/// // Consumes 4 bits, offset is 4, returns their values and increase offset to 0 of next byte +/// assert_eq!(parser(([0b00010010].as_ref(), 4), 4), Ok((([].as_ref(), 0), 0b00000010))); +/// +/// // Tries to consume 12 bits but only 8 are available +/// assert_eq!(parser(([0b00010010].as_ref(), 0), 12), Err(nom::Err::Error(Error{input: ([0b00010010].as_ref(), 0), code: ErrorKind::Eof }))); +/// ``` +pub fn take>( + count: C, +) -> impl Fn((I, usize)) -> IResult<(I, usize), O, E> +where + I: Slice> + InputIter + InputLength, + C: ToUsize, + O: From + AddAssign + Shl + Shr, +{ + let count = count.to_usize(); + move |(input, bit_offset): (I, usize)| { + if count == 0 { + Ok(((input, bit_offset), 0u8.into())) + } else { + let cnt = (count + bit_offset).div(8); + if input.input_len() * 8 < count + bit_offset { + Err(Err::Error(E::from_error_kind( + (input, bit_offset), + ErrorKind::Eof, + ))) + } else { + let mut acc: O = 0_u8.into(); + let mut offset: usize = bit_offset; + let mut remaining: usize = count; + let mut end_offset: usize = 0; + + for byte in input.iter_elements().take(cnt + 1) { + if remaining == 0 { + break; + } + let val: O = if offset == 0 { + byte.into() + } else { + ((byte << offset) as u8 >> offset).into() + }; + + if remaining < 8 - offset { + acc += val >> (8 - offset - remaining); + end_offset = remaining + offset; + break; + } else { + acc += val << (remaining - (8 - offset)); + remaining -= 8 - offset; + offset = 0; + } + } + Ok(((input.slice(cnt..), end_offset), acc)) + } + } + } +} + +/// Generates a parser taking `count` bits and comparing them to `pattern` +pub fn tag>( + pattern: O, + count: C, +) -> impl Fn((I, usize)) -> IResult<(I, usize), O, E> +where + I: Slice> + InputIter + InputLength + Clone, + C: ToUsize, + O: From + AddAssign + Shl + Shr + PartialEq, +{ + let count = count.to_usize(); + move |input: (I, usize)| { + let inp = input.clone(); + + take(count)(input).and_then(|(i, o)| { + if pattern == o { + Ok((i, o)) + } else { + Err(Err::Error(error_position!(inp, ErrorKind::TagBits))) + } + }) + } +} + +/// Parses one specific bit as a bool. +/// +/// # Example +/// ```rust +/// # use nom::bits::complete::bool; +/// # use nom::IResult; +/// # use nom::error::{Error, ErrorKind}; +/// +/// fn parse(input: (&[u8], usize)) -> IResult<(&[u8], usize), bool> { +/// bool(input) +/// } +/// +/// assert_eq!(parse(([0b10000000].as_ref(), 0)), Ok((([0b10000000].as_ref(), 1), true))); +/// assert_eq!(parse(([0b10000000].as_ref(), 1)), Ok((([0b10000000].as_ref(), 2), false))); +/// ``` +pub fn bool>(input: (I, usize)) -> IResult<(I, usize), bool, E> +where + I: Slice> + InputIter + InputLength, +{ + let (res, bit): (_, u32) = take(1usize)(input)?; + Ok((res, bit != 0)) +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_take_0() { + let input = [0b00010010].as_ref(); + let count = 0usize; + assert_eq!(count, 0usize); + let offset = 0usize; + + let result: crate::IResult<(&[u8], usize), usize> = take(count)((input, offset)); + + assert_eq!(result, Ok(((input, offset), 0))); + } + + #[test] + fn test_take_eof() { + let input = [0b00010010].as_ref(); + + let result: crate::IResult<(&[u8], usize), usize> = take(1usize)((input, 8)); + + assert_eq!( + result, + Err(crate::Err::Error(crate::error::Error { + input: (input, 8), + code: ErrorKind::Eof + })) + ) + } + + #[test] + fn test_take_span_over_multiple_bytes() { + let input = [0b00010010, 0b00110100, 0b11111111, 0b11111111].as_ref(); + + let result: crate::IResult<(&[u8], usize), usize> = take(24usize)((input, 4)); + + assert_eq!( + result, + Ok((([0b11111111].as_ref(), 4), 0b1000110100111111111111)) + ); + } + + #[test] + fn test_bool_0() { + let input = [0b10000000].as_ref(); + + let result: crate::IResult<(&[u8], usize), bool> = bool((input, 0)); + + assert_eq!(result, Ok(((input, 1), true))); + } + + #[test] + fn test_bool_eof() { + let input = [0b10000000].as_ref(); + + let result: crate::IResult<(&[u8], usize), bool> = bool((input, 8)); + + assert_eq!( + result, + Err(crate::Err::Error(crate::error::Error { + input: (input, 8), + code: ErrorKind::Eof + })) + ); + } +} diff --git a/vendor/nom/src/bits/mod.rs b/vendor/nom/src/bits/mod.rs new file mode 100644 index 00000000000000..0d3f73db25d1a8 --- /dev/null +++ b/vendor/nom/src/bits/mod.rs @@ -0,0 +1,179 @@ +//! Bit level parsers +//! + +pub mod complete; +pub mod streaming; + +use crate::error::{ErrorKind, ParseError}; +use crate::internal::{Err, IResult, Needed, Parser}; +use crate::lib::std::ops::RangeFrom; +use crate::traits::{ErrorConvert, Slice}; + +/// Converts a byte-level input to a bit-level input, for consumption by a parser that uses bits. +/// +/// Afterwards, the input is converted back to a byte-level parser, with any remaining bits thrown +/// away. +/// +/// # Example +/// ``` +/// use nom::bits::{bits, streaming::take}; +/// use nom::error::Error; +/// use nom::sequence::tuple; +/// use nom::IResult; +/// +/// fn parse(input: &[u8]) -> IResult<&[u8], (u8, u8)> { +/// bits::<_, _, Error<(&[u8], usize)>, _, _>(tuple((take(4usize), take(8usize))))(input) +/// } +/// +/// let input = &[0x12, 0x34, 0xff, 0xff]; +/// +/// let output = parse(input).expect("We take 1.5 bytes and the input is longer than 2 bytes"); +/// +/// // The first byte is consumed, the second byte is partially consumed and dropped. +/// let remaining = output.0; +/// assert_eq!(remaining, [0xff, 0xff]); +/// +/// let parsed = output.1; +/// assert_eq!(parsed.0, 0x01); +/// assert_eq!(parsed.1, 0x23); +/// ``` +pub fn bits(mut parser: P) -> impl FnMut(I) -> IResult +where + E1: ParseError<(I, usize)> + ErrorConvert, + E2: ParseError, + I: Slice>, + P: Parser<(I, usize), O, E1>, +{ + move |input: I| match parser.parse((input, 0)) { + Ok(((rest, offset), result)) => { + // If the next byte has been partially read, it will be sliced away as well. + // The parser functions might already slice away all fully read bytes. + // That's why `offset / 8` isn't necessarily needed at all times. + let remaining_bytes_index = offset / 8 + if offset % 8 == 0 { 0 } else { 1 }; + Ok((rest.slice(remaining_bytes_index..), result)) + } + Err(Err::Incomplete(n)) => Err(Err::Incomplete(n.map(|u| u.get() / 8 + 1))), + Err(Err::Error(e)) => Err(Err::Error(e.convert())), + Err(Err::Failure(e)) => Err(Err::Failure(e.convert())), + } +} + +/// Counterpart to `bits`, `bytes` transforms its bit stream input into a byte slice for the underlying +/// parser, allowing byte-slice parsers to work on bit streams. +/// +/// A partial byte remaining in the input will be ignored and the given parser will start parsing +/// at the next full byte. +/// +/// ``` +/// use nom::bits::{bits, bytes, streaming::take}; +/// use nom::combinator::rest; +/// use nom::error::Error; +/// use nom::sequence::tuple; +/// use nom::IResult; +/// +/// fn parse(input: &[u8]) -> IResult<&[u8], (u8, u8, &[u8])> { +/// bits::<_, _, Error<(&[u8], usize)>, _, _>(tuple(( +/// take(4usize), +/// take(8usize), +/// bytes::<_, _, Error<&[u8]>, _, _>(rest) +/// )))(input) +/// } +/// +/// let input = &[0x12, 0x34, 0xff, 0xff]; +/// +/// assert_eq!(parse( input ), Ok(( &[][..], (0x01, 0x23, &[0xff, 0xff][..]) ))); +/// ``` +pub fn bytes(mut parser: P) -> impl FnMut((I, usize)) -> IResult<(I, usize), O, E2> +where + E1: ParseError + ErrorConvert, + E2: ParseError<(I, usize)>, + I: Slice> + Clone, + P: Parser, +{ + move |(input, offset): (I, usize)| { + let inner = if offset % 8 != 0 { + input.slice((1 + offset / 8)..) + } else { + input.slice((offset / 8)..) + }; + let i = (input, offset); + match parser.parse(inner) { + Ok((rest, res)) => Ok(((rest, 0), res)), + Err(Err::Incomplete(Needed::Unknown)) => Err(Err::Incomplete(Needed::Unknown)), + Err(Err::Incomplete(Needed::Size(sz))) => Err(match sz.get().checked_mul(8) { + Some(v) => Err::Incomplete(Needed::new(v)), + None => Err::Failure(E2::from_error_kind(i, ErrorKind::TooLarge)), + }), + Err(Err::Error(e)) => Err(Err::Error(e.convert())), + Err(Err::Failure(e)) => Err(Err::Failure(e.convert())), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::bits::streaming::take; + use crate::error::Error; + use crate::sequence::tuple; + + #[test] + /// Take the `bits` function and assert that remaining bytes are correctly returned, if the + /// previous bytes are fully consumed + fn test_complete_byte_consumption_bits() { + let input = &[0x12, 0x34, 0x56, 0x78]; + + // Take 3 bit slices with sizes [4, 8, 4]. + let result: IResult<&[u8], (u8, u8, u8)> = + bits::<_, _, Error<(&[u8], usize)>, _, _>(tuple((take(4usize), take(8usize), take(4usize))))( + input, + ); + + let output = result.expect("We take 2 bytes and the input is longer than 2 bytes"); + + let remaining = output.0; + assert_eq!(remaining, [0x56, 0x78]); + + let parsed = output.1; + assert_eq!(parsed.0, 0x01); + assert_eq!(parsed.1, 0x23); + assert_eq!(parsed.2, 0x04); + } + + #[test] + /// Take the `bits` function and assert that remaining bytes are correctly returned, if the + /// previous bytes are NOT fully consumed. Partially consumed bytes are supposed to be dropped. + /// I.e. if we consume 1.5 bytes of 4 bytes, 2 bytes will be returned, bits 13-16 will be + /// dropped. + fn test_partial_byte_consumption_bits() { + let input = &[0x12, 0x34, 0x56, 0x78]; + + // Take bit slices with sizes [4, 8]. + let result: IResult<&[u8], (u8, u8)> = + bits::<_, _, Error<(&[u8], usize)>, _, _>(tuple((take(4usize), take(8usize))))(input); + + let output = result.expect("We take 1.5 bytes and the input is longer than 2 bytes"); + + let remaining = output.0; + assert_eq!(remaining, [0x56, 0x78]); + + let parsed = output.1; + assert_eq!(parsed.0, 0x01); + assert_eq!(parsed.1, 0x23); + } + + #[test] + #[cfg(feature = "std")] + /// Ensure that in Incomplete error is thrown, if too few bytes are passed for a given parser. + fn test_incomplete_bits() { + let input = &[0x12]; + + // Take bit slices with sizes [4, 8]. + let result: IResult<&[u8], (u8, u8)> = + bits::<_, _, Error<(&[u8], usize)>, _, _>(tuple((take(4usize), take(8usize))))(input); + + assert!(result.is_err()); + let error = result.err().unwrap(); + assert_eq!("Parsing requires 2 bytes/chars", error.to_string()); + } +} diff --git a/vendor/nom/src/bits/streaming.rs b/vendor/nom/src/bits/streaming.rs new file mode 100644 index 00000000000000..a7c8d0a67b9f26 --- /dev/null +++ b/vendor/nom/src/bits/streaming.rs @@ -0,0 +1,170 @@ +//! Bit level parsers +//! + +use crate::error::{ErrorKind, ParseError}; +use crate::internal::{Err, IResult, Needed}; +use crate::lib::std::ops::{AddAssign, Div, RangeFrom, Shl, Shr}; +use crate::traits::{InputIter, InputLength, Slice, ToUsize}; + +/// Generates a parser taking `count` bits +pub fn take>( + count: C, +) -> impl Fn((I, usize)) -> IResult<(I, usize), O, E> +where + I: Slice> + InputIter + InputLength, + C: ToUsize, + O: From + AddAssign + Shl + Shr, +{ + let count = count.to_usize(); + move |(input, bit_offset): (I, usize)| { + if count == 0 { + Ok(((input, bit_offset), 0u8.into())) + } else { + let cnt = (count + bit_offset).div(8); + if input.input_len() * 8 < count + bit_offset { + Err(Err::Incomplete(Needed::new(count as usize))) + } else { + let mut acc: O = 0_u8.into(); + let mut offset: usize = bit_offset; + let mut remaining: usize = count; + let mut end_offset: usize = 0; + + for byte in input.iter_elements().take(cnt + 1) { + if remaining == 0 { + break; + } + let val: O = if offset == 0 { + byte.into() + } else { + ((byte << offset) as u8 >> offset).into() + }; + + if remaining < 8 - offset { + acc += val >> (8 - offset - remaining); + end_offset = remaining + offset; + break; + } else { + acc += val << (remaining - (8 - offset)); + remaining -= 8 - offset; + offset = 0; + } + } + Ok(((input.slice(cnt..), end_offset), acc)) + } + } + } +} + +/// Generates a parser taking `count` bits and comparing them to `pattern` +pub fn tag>( + pattern: O, + count: C, +) -> impl Fn((I, usize)) -> IResult<(I, usize), O, E> +where + I: Slice> + InputIter + InputLength + Clone, + C: ToUsize, + O: From + AddAssign + Shl + Shr + PartialEq, +{ + let count = count.to_usize(); + move |input: (I, usize)| { + let inp = input.clone(); + + take(count)(input).and_then(|(i, o)| { + if pattern == o { + Ok((i, o)) + } else { + Err(Err::Error(error_position!(inp, ErrorKind::TagBits))) + } + }) + } +} + +/// Parses one specific bit as a bool. +/// +/// # Example +/// ```rust +/// # use nom::bits::complete::bool; +/// # use nom::IResult; +/// # use nom::error::{Error, ErrorKind}; +/// +/// fn parse(input: (&[u8], usize)) -> IResult<(&[u8], usize), bool> { +/// bool(input) +/// } +/// +/// assert_eq!(parse(([0b10000000].as_ref(), 0)), Ok((([0b10000000].as_ref(), 1), true))); +/// assert_eq!(parse(([0b10000000].as_ref(), 1)), Ok((([0b10000000].as_ref(), 2), false))); +/// ``` +pub fn bool>(input: (I, usize)) -> IResult<(I, usize), bool, E> +where + I: Slice> + InputIter + InputLength, +{ + let (res, bit): (_, u32) = take(1usize)(input)?; + Ok((res, bit != 0)) +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_take_0() { + let input = [].as_ref(); + let count = 0usize; + assert_eq!(count, 0usize); + let offset = 0usize; + + let result: crate::IResult<(&[u8], usize), usize> = take(count)((input, offset)); + + assert_eq!(result, Ok(((input, offset), 0))); + } + + #[test] + fn test_tag_ok() { + let input = [0b00011111].as_ref(); + let offset = 0usize; + let bits_to_take = 4usize; + let value_to_tag = 0b0001; + + let result: crate::IResult<(&[u8], usize), usize> = + tag(value_to_tag, bits_to_take)((input, offset)); + + assert_eq!(result, Ok(((input, bits_to_take), value_to_tag))); + } + + #[test] + fn test_tag_err() { + let input = [0b00011111].as_ref(); + let offset = 0usize; + let bits_to_take = 4usize; + let value_to_tag = 0b1111; + + let result: crate::IResult<(&[u8], usize), usize> = + tag(value_to_tag, bits_to_take)((input, offset)); + + assert_eq!( + result, + Err(crate::Err::Error(crate::error::Error { + input: (input, offset), + code: ErrorKind::TagBits + })) + ); + } + + #[test] + fn test_bool_0() { + let input = [0b10000000].as_ref(); + + let result: crate::IResult<(&[u8], usize), bool> = bool((input, 0)); + + assert_eq!(result, Ok(((input, 1), true))); + } + + #[test] + fn test_bool_eof() { + let input = [0b10000000].as_ref(); + + let result: crate::IResult<(&[u8], usize), bool> = bool((input, 8)); + + assert_eq!(result, Err(crate::Err::Incomplete(Needed::new(1)))); + } +} diff --git a/vendor/nom/src/branch/mod.rs b/vendor/nom/src/branch/mod.rs new file mode 100644 index 00000000000000..e03622cb0c833d --- /dev/null +++ b/vendor/nom/src/branch/mod.rs @@ -0,0 +1,267 @@ +//! Choice combinators + +#[cfg(test)] +mod tests; + +use crate::error::ErrorKind; +use crate::error::ParseError; +use crate::internal::{Err, IResult, Parser}; + +/// Helper trait for the [alt()] combinator. +/// +/// This trait is implemented for tuples of up to 21 elements +pub trait Alt { + /// Tests each parser in the tuple and returns the result of the first one that succeeds + fn choice(&mut self, input: I) -> IResult; +} + +/// Tests a list of parsers one by one until one succeeds. +/// +/// It takes as argument a tuple of parsers. There is a maximum of 21 +/// parsers. If you need more, it is possible to nest them in other `alt` calls, +/// like this: `alt(parser_a, alt(parser_b, parser_c))` +/// +/// ```rust +/// # use nom::error_position; +/// # use nom::{Err,error::ErrorKind, Needed, IResult}; +/// use nom::character::complete::{alpha1, digit1}; +/// use nom::branch::alt; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// alt((alpha1, digit1))(input) +/// }; +/// +/// // the first parser, alpha1, recognizes the input +/// assert_eq!(parser("abc"), Ok(("", "abc"))); +/// +/// // the first parser returns an error, so alt tries the second one +/// assert_eq!(parser("123456"), Ok(("", "123456"))); +/// +/// // both parsers failed, and with the default error type, alt will return the last error +/// assert_eq!(parser(" "), Err(Err::Error(error_position!(" ", ErrorKind::Digit)))); +/// # } +/// ``` +/// +/// With a custom error type, it is possible to have alt return the error of the parser +/// that went the farthest in the input data +pub fn alt, List: Alt>( + mut l: List, +) -> impl FnMut(I) -> IResult { + move |i: I| l.choice(i) +} + +/// Helper trait for the [permutation()] combinator. +/// +/// This trait is implemented for tuples of up to 21 elements +pub trait Permutation { + /// Tries to apply all parsers in the tuple in various orders until all of them succeed + fn permutation(&mut self, input: I) -> IResult; +} + +/// Applies a list of parsers in any order. +/// +/// Permutation will succeed if all of the child parsers succeeded. +/// It takes as argument a tuple of parsers, and returns a +/// tuple of the parser results. +/// +/// ```rust +/// # use nom::{Err,error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::character::complete::{alpha1, digit1}; +/// use nom::branch::permutation; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, (&str, &str)> { +/// permutation((alpha1, digit1))(input) +/// } +/// +/// // permutation recognizes alphabetic characters then digit +/// assert_eq!(parser("abc123"), Ok(("", ("abc", "123")))); +/// +/// // but also in inverse order +/// assert_eq!(parser("123abc"), Ok(("", ("abc", "123")))); +/// +/// // it will fail if one of the parsers failed +/// assert_eq!(parser("abc;"), Err(Err::Error(Error::new(";", ErrorKind::Digit)))); +/// # } +/// ``` +/// +/// The parsers are applied greedily: if there are multiple unapplied parsers +/// that could parse the next slice of input, the first one is used. +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, IResult}; +/// use nom::branch::permutation; +/// use nom::character::complete::{anychar, char}; +/// +/// fn parser(input: &str) -> IResult<&str, (char, char)> { +/// permutation((anychar, char('a')))(input) +/// } +/// +/// // anychar parses 'b', then char('a') parses 'a' +/// assert_eq!(parser("ba"), Ok(("", ('b', 'a')))); +/// +/// // anychar parses 'a', then char('a') fails on 'b', +/// // even though char('a') followed by anychar would succeed +/// assert_eq!(parser("ab"), Err(Err::Error(Error::new("b", ErrorKind::Char)))); +/// ``` +/// +pub fn permutation, List: Permutation>( + mut l: List, +) -> impl FnMut(I) -> IResult { + move |i: I| l.permutation(i) +} + +macro_rules! alt_trait( + ($first:ident $second:ident $($id: ident)+) => ( + alt_trait!(__impl $first $second; $($id)+); + ); + (__impl $($current:ident)*; $head:ident $($id: ident)+) => ( + alt_trait_impl!($($current)*); + + alt_trait!(__impl $($current)* $head; $($id)+); + ); + (__impl $($current:ident)*; $head:ident) => ( + alt_trait_impl!($($current)*); + alt_trait_impl!($($current)* $head); + ); +); + +macro_rules! alt_trait_impl( + ($($id:ident)+) => ( + impl< + Input: Clone, Output, Error: ParseError, + $($id: Parser),+ + > Alt for ( $($id),+ ) { + + fn choice(&mut self, input: Input) -> IResult { + match self.0.parse(input.clone()) { + Err(Err::Error(e)) => alt_trait_inner!(1, self, input, e, $($id)+), + res => res, + } + } + } + ); +); + +macro_rules! alt_trait_inner( + ($it:tt, $self:expr, $input:expr, $err:expr, $head:ident $($id:ident)+) => ( + match $self.$it.parse($input.clone()) { + Err(Err::Error(e)) => { + let err = $err.or(e); + succ!($it, alt_trait_inner!($self, $input, err, $($id)+)) + } + res => res, + } + ); + ($it:tt, $self:expr, $input:expr, $err:expr, $head:ident) => ( + Err(Err::Error(Error::append($input, ErrorKind::Alt, $err))) + ); +); + +alt_trait!(A B C D E F G H I J K L M N O P Q R S T U); + +// Manually implement Alt for (A,), the 1-tuple type +impl, A: Parser> + Alt for (A,) +{ + fn choice(&mut self, input: Input) -> IResult { + self.0.parse(input) + } +} + +macro_rules! permutation_trait( + ( + $name1:ident $ty1:ident $item1:ident + $name2:ident $ty2:ident $item2:ident + $($name3:ident $ty3:ident $item3:ident)* + ) => ( + permutation_trait!(__impl $name1 $ty1 $item1, $name2 $ty2 $item2; $($name3 $ty3 $item3)*); + ); + ( + __impl $($name:ident $ty:ident $item:ident),+; + $name1:ident $ty1:ident $item1:ident $($name2:ident $ty2:ident $item2:ident)* + ) => ( + permutation_trait_impl!($($name $ty $item),+); + permutation_trait!(__impl $($name $ty $item),+ , $name1 $ty1 $item1; $($name2 $ty2 $item2)*); + ); + (__impl $($name:ident $ty:ident $item:ident),+;) => ( + permutation_trait_impl!($($name $ty $item),+); + ); +); + +macro_rules! permutation_trait_impl( + ($($name:ident $ty:ident $item:ident),+) => ( + impl< + Input: Clone, $($ty),+ , Error: ParseError, + $($name: Parser),+ + > Permutation for ( $($name),+ ) { + + fn permutation(&mut self, mut input: Input) -> IResult { + let mut res = ($(Option::<$ty>::None),+); + + loop { + let mut err: Option = None; + permutation_trait_inner!(0, self, input, res, err, $($name)+); + + // If we reach here, every iterator has either been applied before, + // or errored on the remaining input + if let Some(err) = err { + // There are remaining parsers, and all errored on the remaining input + return Err(Err::Error(Error::append(input, ErrorKind::Permutation, err))); + } + + // All parsers were applied + match res { + ($(Some($item)),+) => return Ok((input, ($($item),+))), + _ => unreachable!(), + } + } + } + } + ); +); + +macro_rules! permutation_trait_inner( + ($it:tt, $self:expr, $input:ident, $res:expr, $err:expr, $head:ident $($id:ident)*) => ( + if $res.$it.is_none() { + match $self.$it.parse($input.clone()) { + Ok((i, o)) => { + $input = i; + $res.$it = Some(o); + continue; + } + Err(Err::Error(e)) => { + $err = Some(match $err { + Some(err) => err.or(e), + None => e, + }); + } + Err(e) => return Err(e), + }; + } + succ!($it, permutation_trait_inner!($self, $input, $res, $err, $($id)*)); + ); + ($it:tt, $self:expr, $input:ident, $res:expr, $err:expr,) => (); +); + +permutation_trait!( + FnA A a + FnB B b + FnC C c + FnD D d + FnE E e + FnF F f + FnG G g + FnH H h + FnI I i + FnJ J j + FnK K k + FnL L l + FnM M m + FnN N n + FnO O o + FnP P p + FnQ Q q + FnR R r + FnS S s + FnT T t + FnU U u +); diff --git a/vendor/nom/src/branch/tests.rs b/vendor/nom/src/branch/tests.rs new file mode 100644 index 00000000000000..ecd44407e93f92 --- /dev/null +++ b/vendor/nom/src/branch/tests.rs @@ -0,0 +1,142 @@ +use crate::branch::{alt, permutation}; +use crate::bytes::streaming::tag; +use crate::error::ErrorKind; +use crate::internal::{Err, IResult, Needed}; +#[cfg(feature = "alloc")] +use crate::{ + error::ParseError, + lib::std::{ + fmt::Debug, + string::{String, ToString}, + }, +}; + +#[cfg(feature = "alloc")] +#[derive(Debug, Clone, PartialEq)] +pub struct ErrorStr(String); + +#[cfg(feature = "alloc")] +impl From for ErrorStr { + fn from(i: u32) -> Self { + ErrorStr(format!("custom error code: {}", i)) + } +} + +#[cfg(feature = "alloc")] +impl<'a> From<&'a str> for ErrorStr { + fn from(i: &'a str) -> Self { + ErrorStr(format!("custom error message: {}", i)) + } +} + +#[cfg(feature = "alloc")] +impl ParseError for ErrorStr { + fn from_error_kind(input: I, kind: ErrorKind) -> Self { + ErrorStr(format!("custom error message: ({:?}, {:?})", input, kind)) + } + + fn append(input: I, kind: ErrorKind, other: Self) -> Self { + ErrorStr(format!( + "custom error message: ({:?}, {:?}) - {:?}", + input, kind, other + )) + } +} + +#[cfg(feature = "alloc")] +#[test] +fn alt_test() { + fn work(input: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { + Ok((&b""[..], input)) + } + + #[allow(unused_variables)] + fn dont_work(input: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { + Err(Err::Error(ErrorStr("abcd".to_string()))) + } + + fn work2(input: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { + Ok((input, &b""[..])) + } + + fn alt1(i: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { + alt((dont_work, dont_work))(i) + } + fn alt2(i: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { + alt((dont_work, work))(i) + } + fn alt3(i: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { + alt((dont_work, dont_work, work2, dont_work))(i) + } + //named!(alt1, alt!(dont_work | dont_work)); + //named!(alt2, alt!(dont_work | work)); + //named!(alt3, alt!(dont_work | dont_work | work2 | dont_work)); + + let a = &b"abcd"[..]; + assert_eq!( + alt1(a), + Err(Err::Error(error_node_position!( + a, + ErrorKind::Alt, + ErrorStr("abcd".to_string()) + ))) + ); + assert_eq!(alt2(a), Ok((&b""[..], a))); + assert_eq!(alt3(a), Ok((a, &b""[..]))); + + fn alt4(i: &[u8]) -> IResult<&[u8], &[u8]> { + alt((tag("abcd"), tag("efgh")))(i) + } + let b = &b"efgh"[..]; + assert_eq!(alt4(a), Ok((&b""[..], a))); + assert_eq!(alt4(b), Ok((&b""[..], b))); +} + +#[test] +fn alt_incomplete() { + fn alt1(i: &[u8]) -> IResult<&[u8], &[u8]> { + alt((tag("a"), tag("bc"), tag("def")))(i) + } + + let a = &b""[..]; + assert_eq!(alt1(a), Err(Err::Incomplete(Needed::new(1)))); + let a = &b"b"[..]; + assert_eq!(alt1(a), Err(Err::Incomplete(Needed::new(1)))); + let a = &b"bcd"[..]; + assert_eq!(alt1(a), Ok((&b"d"[..], &b"bc"[..]))); + let a = &b"cde"[..]; + assert_eq!(alt1(a), Err(Err::Error(error_position!(a, ErrorKind::Tag)))); + let a = &b"de"[..]; + assert_eq!(alt1(a), Err(Err::Incomplete(Needed::new(1)))); + let a = &b"defg"[..]; + assert_eq!(alt1(a), Ok((&b"g"[..], &b"def"[..]))); +} + +#[test] +fn permutation_test() { + fn perm(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8], &[u8])> { + permutation((tag("abcd"), tag("efg"), tag("hi")))(i) + } + + let expected = (&b"abcd"[..], &b"efg"[..], &b"hi"[..]); + + let a = &b"abcdefghijk"[..]; + assert_eq!(perm(a), Ok((&b"jk"[..], expected))); + let b = &b"efgabcdhijk"[..]; + assert_eq!(perm(b), Ok((&b"jk"[..], expected))); + let c = &b"hiefgabcdjk"[..]; + assert_eq!(perm(c), Ok((&b"jk"[..], expected))); + + let d = &b"efgxyzabcdefghi"[..]; + assert_eq!( + perm(d), + Err(Err::Error(error_node_position!( + &b"efgxyzabcdefghi"[..], + ErrorKind::Permutation, + error_position!(&b"xyzabcdefghi"[..], ErrorKind::Tag) + ))) + ); + + let e = &b"efgabc"[..]; + assert_eq!(perm(e), Err(Err::Incomplete(Needed::new(1)))); +} diff --git a/vendor/nom/src/bytes/complete.rs b/vendor/nom/src/bytes/complete.rs new file mode 100644 index 00000000000000..a5442b53f7428b --- /dev/null +++ b/vendor/nom/src/bytes/complete.rs @@ -0,0 +1,756 @@ +//! Parsers recognizing bytes streams, complete input version + +use crate::error::ErrorKind; +use crate::error::ParseError; +use crate::internal::{Err, IResult, Parser}; +use crate::lib::std::ops::RangeFrom; +use crate::lib::std::result::Result::*; +use crate::traits::{ + Compare, CompareResult, FindSubstring, FindToken, InputIter, InputLength, InputTake, + InputTakeAtPosition, Slice, ToUsize, +}; + +/// Recognizes a pattern +/// +/// The input data will be compared to the tag combinator's argument and will return the part of +/// the input that matches the argument +/// +/// It will return `Err(Err::Error((_, ErrorKind::Tag)))` if the input doesn't match the pattern +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, &str> { +/// tag("Hello")(s) +/// } +/// +/// assert_eq!(parser("Hello, World!"), Ok((", World!", "Hello"))); +/// assert_eq!(parser("Something"), Err(Err::Error(Error::new("Something", ErrorKind::Tag)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Tag)))); +/// ``` +pub fn tag>( + tag: T, +) -> impl Fn(Input) -> IResult +where + Input: InputTake + Compare, + T: InputLength + Clone, +{ + move |i: Input| { + let tag_len = tag.input_len(); + let t = tag.clone(); + let res: IResult<_, _, Error> = match i.compare(t) { + CompareResult::Ok => Ok(i.take_split(tag_len)), + _ => { + let e: ErrorKind = ErrorKind::Tag; + Err(Err::Error(Error::from_error_kind(i, e))) + } + }; + res + } +} + +/// Recognizes a case insensitive pattern. +/// +/// The input data will be compared to the tag combinator's argument and will return the part of +/// the input that matches the argument with no regard to case. +/// +/// It will return `Err(Err::Error((_, ErrorKind::Tag)))` if the input doesn't match the pattern. +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::complete::tag_no_case; +/// +/// fn parser(s: &str) -> IResult<&str, &str> { +/// tag_no_case("hello")(s) +/// } +/// +/// assert_eq!(parser("Hello, World!"), Ok((", World!", "Hello"))); +/// assert_eq!(parser("hello, World!"), Ok((", World!", "hello"))); +/// assert_eq!(parser("HeLlO, World!"), Ok((", World!", "HeLlO"))); +/// assert_eq!(parser("Something"), Err(Err::Error(Error::new("Something", ErrorKind::Tag)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Tag)))); +/// ``` +pub fn tag_no_case>( + tag: T, +) -> impl Fn(Input) -> IResult +where + Input: InputTake + Compare, + T: InputLength + Clone, +{ + move |i: Input| { + let tag_len = tag.input_len(); + let t = tag.clone(); + + let res: IResult<_, _, Error> = match (i).compare_no_case(t) { + CompareResult::Ok => Ok(i.take_split(tag_len)), + _ => { + let e: ErrorKind = ErrorKind::Tag; + Err(Err::Error(Error::from_error_kind(i, e))) + } + }; + res + } +} + +/// Parse till certain characters are met. +/// +/// The parser will return the longest slice till one of the characters of the combinator's argument are met. +/// +/// It doesn't consume the matched character. +/// +/// It will return a `Err::Error(("", ErrorKind::IsNot))` if the pattern wasn't met. +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::complete::is_not; +/// +/// fn not_space(s: &str) -> IResult<&str, &str> { +/// is_not(" \t\r\n")(s) +/// } +/// +/// assert_eq!(not_space("Hello, World!"), Ok((" World!", "Hello,"))); +/// assert_eq!(not_space("Sometimes\t"), Ok(("\t", "Sometimes"))); +/// assert_eq!(not_space("Nospace"), Ok(("", "Nospace"))); +/// assert_eq!(not_space(""), Err(Err::Error(Error::new("", ErrorKind::IsNot)))); +/// ``` +pub fn is_not>( + arr: T, +) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + T: FindToken<::Item>, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::IsNot; + i.split_at_position1_complete(|c| arr.find_token(c), e) + } +} + +/// Returns the longest slice of the matches the pattern. +/// +/// The parser will return the longest slice consisting of the characters in provided in the +/// combinator's argument. +/// +/// It will return a `Err(Err::Error((_, ErrorKind::IsA)))` if the pattern wasn't met. +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::complete::is_a; +/// +/// fn hex(s: &str) -> IResult<&str, &str> { +/// is_a("1234567890ABCDEF")(s) +/// } +/// +/// assert_eq!(hex("123 and voila"), Ok((" and voila", "123"))); +/// assert_eq!(hex("DEADBEEF and others"), Ok((" and others", "DEADBEEF"))); +/// assert_eq!(hex("BADBABEsomething"), Ok(("something", "BADBABE"))); +/// assert_eq!(hex("D15EA5E"), Ok(("", "D15EA5E"))); +/// assert_eq!(hex(""), Err(Err::Error(Error::new("", ErrorKind::IsA)))); +/// ``` +pub fn is_a>( + arr: T, +) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + T: FindToken<::Item>, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::IsA; + i.split_at_position1_complete(|c| !arr.find_token(c), e) + } +} + +/// Returns the longest input slice (if any) that matches the predicate. +/// +/// The parser will return the longest slice that matches the given predicate *(a function that +/// takes the input and returns a bool)*. +/// # Example +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::complete::take_while; +/// use nom::character::is_alphabetic; +/// +/// fn alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// take_while(is_alphabetic)(s) +/// } +/// +/// assert_eq!(alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); +/// assert_eq!(alpha(b"12345"), Ok((&b"12345"[..], &b""[..]))); +/// assert_eq!(alpha(b"latin"), Ok((&b""[..], &b"latin"[..]))); +/// assert_eq!(alpha(b""), Ok((&b""[..], &b""[..]))); +/// ``` +pub fn take_while>( + cond: F, +) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| i.split_at_position_complete(|c| !cond(c)) +} + +/// Returns the longest (at least 1) input slice that matches the predicate. +/// +/// The parser will return the longest slice that matches the given predicate *(a function that +/// takes the input and returns a bool)*. +/// +/// It will return an `Err(Err::Error((_, ErrorKind::TakeWhile1)))` if the pattern wasn't met. +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::complete::take_while1; +/// use nom::character::is_alphabetic; +/// +/// fn alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// take_while1(is_alphabetic)(s) +/// } +/// +/// assert_eq!(alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); +/// assert_eq!(alpha(b"latin"), Ok((&b""[..], &b"latin"[..]))); +/// assert_eq!(alpha(b"12345"), Err(Err::Error(Error::new(&b"12345"[..], ErrorKind::TakeWhile1)))); +/// ``` +pub fn take_while1>( + cond: F, +) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::TakeWhile1; + i.split_at_position1_complete(|c| !cond(c), e) + } +} + +/// Returns the longest (m <= len <= n) input slice that matches the predicate. +/// +/// The parser will return the longest slice that matches the given predicate *(a function that +/// takes the input and returns a bool)*. +/// +/// It will return an `Err::Error((_, ErrorKind::TakeWhileMN))` if the pattern wasn't met or is out +/// of range (m <= len <= n). +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::complete::take_while_m_n; +/// use nom::character::is_alphabetic; +/// +/// fn short_alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// take_while_m_n(3, 6, is_alphabetic)(s) +/// } +/// +/// assert_eq!(short_alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); +/// assert_eq!(short_alpha(b"lengthy"), Ok((&b"y"[..], &b"length"[..]))); +/// assert_eq!(short_alpha(b"latin"), Ok((&b""[..], &b"latin"[..]))); +/// assert_eq!(short_alpha(b"ed"), Err(Err::Error(Error::new(&b"ed"[..], ErrorKind::TakeWhileMN)))); +/// assert_eq!(short_alpha(b"12345"), Err(Err::Error(Error::new(&b"12345"[..], ErrorKind::TakeWhileMN)))); +/// ``` +pub fn take_while_m_n>( + m: usize, + n: usize, + cond: F, +) -> impl Fn(Input) -> IResult +where + Input: InputTake + InputIter + InputLength + Slice>, + F: Fn(::Item) -> bool, +{ + move |i: Input| { + let input = i; + + match input.position(|c| !cond(c)) { + Some(idx) => { + if idx >= m { + if idx <= n { + let res: IResult<_, _, Error> = if let Ok(index) = input.slice_index(idx) { + Ok(input.take_split(index)) + } else { + Err(Err::Error(Error::from_error_kind( + input, + ErrorKind::TakeWhileMN, + ))) + }; + res + } else { + let res: IResult<_, _, Error> = if let Ok(index) = input.slice_index(n) { + Ok(input.take_split(index)) + } else { + Err(Err::Error(Error::from_error_kind( + input, + ErrorKind::TakeWhileMN, + ))) + }; + res + } + } else { + let e = ErrorKind::TakeWhileMN; + Err(Err::Error(Error::from_error_kind(input, e))) + } + } + None => { + let len = input.input_len(); + if len >= n { + match input.slice_index(n) { + Ok(index) => Ok(input.take_split(index)), + Err(_needed) => Err(Err::Error(Error::from_error_kind( + input, + ErrorKind::TakeWhileMN, + ))), + } + } else if len >= m && len <= n { + let res: IResult<_, _, Error> = Ok((input.slice(len..), input)); + res + } else { + let e = ErrorKind::TakeWhileMN; + Err(Err::Error(Error::from_error_kind(input, e))) + } + } + } + } +} + +/// Returns the longest input slice (if any) till a predicate is met. +/// +/// The parser will return the longest slice till the given predicate *(a function that +/// takes the input and returns a bool)*. +/// # Example +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::complete::take_till; +/// +/// fn till_colon(s: &str) -> IResult<&str, &str> { +/// take_till(|c| c == ':')(s) +/// } +/// +/// assert_eq!(till_colon("latin:123"), Ok((":123", "latin"))); +/// assert_eq!(till_colon(":empty matched"), Ok((":empty matched", ""))); //allowed +/// assert_eq!(till_colon("12345"), Ok(("", "12345"))); +/// assert_eq!(till_colon(""), Ok(("", ""))); +/// ``` +pub fn take_till>( + cond: F, +) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| i.split_at_position_complete(|c| cond(c)) +} + +/// Returns the longest (at least 1) input slice till a predicate is met. +/// +/// The parser will return the longest slice till the given predicate *(a function that +/// takes the input and returns a bool)*. +/// +/// It will return `Err(Err::Error((_, ErrorKind::TakeTill1)))` if the input is empty or the +/// predicate matches the first input. +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::complete::take_till1; +/// +/// fn till_colon(s: &str) -> IResult<&str, &str> { +/// take_till1(|c| c == ':')(s) +/// } +/// +/// assert_eq!(till_colon("latin:123"), Ok((":123", "latin"))); +/// assert_eq!(till_colon(":empty matched"), Err(Err::Error(Error::new(":empty matched", ErrorKind::TakeTill1)))); +/// assert_eq!(till_colon("12345"), Ok(("", "12345"))); +/// assert_eq!(till_colon(""), Err(Err::Error(Error::new("", ErrorKind::TakeTill1)))); +/// ``` +pub fn take_till1>( + cond: F, +) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::TakeTill1; + i.split_at_position1_complete(|c| cond(c), e) + } +} + +/// Returns an input slice containing the first N input elements (Input[..N]). +/// +/// It will return `Err(Err::Error((_, ErrorKind::Eof)))` if the input is shorter than the argument. +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::complete::take; +/// +/// fn take6(s: &str) -> IResult<&str, &str> { +/// take(6usize)(s) +/// } +/// +/// assert_eq!(take6("1234567"), Ok(("7", "123456"))); +/// assert_eq!(take6("things"), Ok(("", "things"))); +/// assert_eq!(take6("short"), Err(Err::Error(Error::new("short", ErrorKind::Eof)))); +/// assert_eq!(take6(""), Err(Err::Error(Error::new("", ErrorKind::Eof)))); +/// ``` +/// +/// The units that are taken will depend on the input type. For example, for a +/// `&str` it will take a number of `char`'s, whereas for a `&[u8]` it will +/// take that many `u8`'s: +/// +/// ```rust +/// use nom::error::Error; +/// use nom::bytes::complete::take; +/// +/// assert_eq!(take::<_, _, Error<_>>(1usize)("💙"), Ok(("", "💙"))); +/// assert_eq!(take::<_, _, Error<_>>(1usize)("💙".as_bytes()), Ok((b"\x9F\x92\x99".as_ref(), b"\xF0".as_ref()))); +/// ``` +pub fn take>( + count: C, +) -> impl Fn(Input) -> IResult +where + Input: InputIter + InputTake, + C: ToUsize, +{ + let c = count.to_usize(); + move |i: Input| match i.slice_index(c) { + Err(_needed) => Err(Err::Error(Error::from_error_kind(i, ErrorKind::Eof))), + Ok(index) => Ok(i.take_split(index)), + } +} + +/// Returns the input slice up to the first occurrence of the pattern. +/// +/// It doesn't consume the pattern. It will return `Err(Err::Error((_, ErrorKind::TakeUntil)))` +/// if the pattern wasn't met. +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::complete::take_until; +/// +/// fn until_eof(s: &str) -> IResult<&str, &str> { +/// take_until("eof")(s) +/// } +/// +/// assert_eq!(until_eof("hello, worldeof"), Ok(("eof", "hello, world"))); +/// assert_eq!(until_eof("hello, world"), Err(Err::Error(Error::new("hello, world", ErrorKind::TakeUntil)))); +/// assert_eq!(until_eof(""), Err(Err::Error(Error::new("", ErrorKind::TakeUntil)))); +/// assert_eq!(until_eof("1eof2eof"), Ok(("eof2eof", "1"))); +/// ``` +pub fn take_until>( + tag: T, +) -> impl Fn(Input) -> IResult +where + Input: InputTake + FindSubstring, + T: InputLength + Clone, +{ + move |i: Input| { + let t = tag.clone(); + let res: IResult<_, _, Error> = match i.find_substring(t) { + None => Err(Err::Error(Error::from_error_kind(i, ErrorKind::TakeUntil))), + Some(index) => Ok(i.take_split(index)), + }; + res + } +} + +/// Returns the non empty input slice up to the first occurrence of the pattern. +/// +/// It doesn't consume the pattern. It will return `Err(Err::Error((_, ErrorKind::TakeUntil)))` +/// if the pattern wasn't met. +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::complete::take_until1; +/// +/// fn until_eof(s: &str) -> IResult<&str, &str> { +/// take_until1("eof")(s) +/// } +/// +/// assert_eq!(until_eof("hello, worldeof"), Ok(("eof", "hello, world"))); +/// assert_eq!(until_eof("hello, world"), Err(Err::Error(Error::new("hello, world", ErrorKind::TakeUntil)))); +/// assert_eq!(until_eof(""), Err(Err::Error(Error::new("", ErrorKind::TakeUntil)))); +/// assert_eq!(until_eof("1eof2eof"), Ok(("eof2eof", "1"))); +/// assert_eq!(until_eof("eof"), Err(Err::Error(Error::new("eof", ErrorKind::TakeUntil)))); +/// ``` +pub fn take_until1>( + tag: T, +) -> impl Fn(Input) -> IResult +where + Input: InputTake + FindSubstring, + T: InputLength + Clone, +{ + move |i: Input| { + let t = tag.clone(); + let res: IResult<_, _, Error> = match i.find_substring(t) { + None => Err(Err::Error(Error::from_error_kind(i, ErrorKind::TakeUntil))), + Some(0) => Err(Err::Error(Error::from_error_kind(i, ErrorKind::TakeUntil))), + Some(index) => Ok(i.take_split(index)), + }; + res + } +} + +/// Matches a byte string with escaped characters. +/// +/// * The first argument matches the normal characters (it must not accept the control character) +/// * The second argument is the control character (like `\` in most languages) +/// * The third argument matches the escaped characters +/// # Example +/// ``` +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// # use nom::character::complete::digit1; +/// use nom::bytes::complete::escaped; +/// use nom::character::complete::one_of; +/// +/// fn esc(s: &str) -> IResult<&str, &str> { +/// escaped(digit1, '\\', one_of(r#""n\"#))(s) +/// } +/// +/// assert_eq!(esc("123;"), Ok((";", "123"))); +/// assert_eq!(esc(r#"12\"34;"#), Ok((";", r#"12\"34"#))); +/// ``` +/// +pub fn escaped<'a, Input: 'a, Error, F, G, O1, O2>( + mut normal: F, + control_char: char, + mut escapable: G, +) -> impl FnMut(Input) -> IResult +where + Input: Clone + + crate::traits::Offset + + InputLength + + InputTake + + InputTakeAtPosition + + Slice> + + InputIter, + ::Item: crate::traits::AsChar, + F: Parser, + G: Parser, + Error: ParseError, +{ + use crate::traits::AsChar; + + move |input: Input| { + let mut i = input.clone(); + + while i.input_len() > 0 { + let current_len = i.input_len(); + + match normal.parse(i.clone()) { + Ok((i2, _)) => { + // return if we consumed everything or if the normal parser + // does not consume anything + if i2.input_len() == 0 { + return Ok((input.slice(input.input_len()..), input)); + } else if i2.input_len() == current_len { + let index = input.offset(&i2); + return Ok(input.take_split(index)); + } else { + i = i2; + } + } + Err(Err::Error(_)) => { + // unwrap() should be safe here since index < $i.input_len() + if i.iter_elements().next().unwrap().as_char() == control_char { + let next = control_char.len_utf8(); + if next >= i.input_len() { + return Err(Err::Error(Error::from_error_kind( + input, + ErrorKind::Escaped, + ))); + } else { + match escapable.parse(i.slice(next..)) { + Ok((i2, _)) => { + if i2.input_len() == 0 { + return Ok((input.slice(input.input_len()..), input)); + } else { + i = i2; + } + } + Err(e) => return Err(e), + } + } + } else { + let index = input.offset(&i); + if index == 0 { + return Err(Err::Error(Error::from_error_kind( + input, + ErrorKind::Escaped, + ))); + } + return Ok(input.take_split(index)); + } + } + Err(e) => { + return Err(e); + } + } + } + + Ok((input.slice(input.input_len()..), input)) + } +} + +/// Matches a byte string with escaped characters. +/// +/// * The first argument matches the normal characters (it must not match the control character) +/// * The second argument is the control character (like `\` in most languages) +/// * The third argument matches the escaped characters and transforms them +/// +/// As an example, the chain `abc\tdef` could be `abc def` (it also consumes the control character) +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// # use std::str::from_utf8; +/// use nom::bytes::complete::{escaped_transform, tag}; +/// use nom::character::complete::alpha1; +/// use nom::branch::alt; +/// use nom::combinator::value; +/// +/// fn parser(input: &str) -> IResult<&str, String> { +/// escaped_transform( +/// alpha1, +/// '\\', +/// alt(( +/// value("\\", tag("\\")), +/// value("\"", tag("\"")), +/// value("\n", tag("n")), +/// )) +/// )(input) +/// } +/// +/// assert_eq!(parser("ab\\\"cd"), Ok(("", String::from("ab\"cd")))); +/// assert_eq!(parser("ab\\ncd"), Ok(("", String::from("ab\ncd")))); +/// ``` +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +pub fn escaped_transform( + mut normal: F, + control_char: char, + mut transform: G, +) -> impl FnMut(Input) -> IResult +where + Input: Clone + + crate::traits::Offset + + InputLength + + InputTake + + InputTakeAtPosition + + Slice> + + InputIter, + Input: crate::traits::ExtendInto, + O1: crate::traits::ExtendInto, + O2: crate::traits::ExtendInto, + ::Item: crate::traits::AsChar, + F: Parser, + G: Parser, + Error: ParseError, +{ + use crate::traits::AsChar; + + move |input: Input| { + let mut index = 0; + let mut res = input.new_builder(); + + let i = input.clone(); + + while index < i.input_len() { + let current_len = i.input_len(); + let remainder = i.slice(index..); + match normal.parse(remainder.clone()) { + Ok((i2, o)) => { + o.extend_into(&mut res); + if i2.input_len() == 0 { + return Ok((i.slice(i.input_len()..), res)); + } else if i2.input_len() == current_len { + return Ok((remainder, res)); + } else { + index = input.offset(&i2); + } + } + Err(Err::Error(_)) => { + // unwrap() should be safe here since index < $i.input_len() + if remainder.iter_elements().next().unwrap().as_char() == control_char { + let next = index + control_char.len_utf8(); + let input_len = input.input_len(); + + if next >= input_len { + return Err(Err::Error(Error::from_error_kind( + remainder, + ErrorKind::EscapedTransform, + ))); + } else { + match transform.parse(i.slice(next..)) { + Ok((i2, o)) => { + o.extend_into(&mut res); + if i2.input_len() == 0 { + return Ok((i.slice(i.input_len()..), res)); + } else { + index = input.offset(&i2); + } + } + Err(e) => return Err(e), + } + } + } else { + if index == 0 { + return Err(Err::Error(Error::from_error_kind( + remainder, + ErrorKind::EscapedTransform, + ))); + } + return Ok((remainder, res)); + } + } + Err(e) => return Err(e), + } + } + Ok((input.slice(index..), res)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn complete_take_while_m_n_utf8_all_matching() { + let result: IResult<&str, &str> = + super::take_while_m_n(1, 4, |c: char| c.is_alphabetic())("øn"); + assert_eq!(result, Ok(("", "øn"))); + } + + #[test] + fn complete_take_while_m_n_utf8_all_matching_substring() { + let result: IResult<&str, &str> = + super::take_while_m_n(1, 1, |c: char| c.is_alphabetic())("øn"); + assert_eq!(result, Ok(("n", "ø"))); + } + + // issue #1336 "escaped hangs if normal parser accepts empty" + fn escaped_string(input: &str) -> IResult<&str, &str> { + use crate::character::complete::{alpha0, one_of}; + escaped(alpha0, '\\', one_of("n"))(input) + } + + // issue #1336 "escaped hangs if normal parser accepts empty" + #[test] + fn escaped_hang() { + escaped_string("7").unwrap(); + escaped_string("a7").unwrap(); + } + + // issue ##1118 escaped does not work with empty string + fn unquote<'a>(input: &'a str) -> IResult<&'a str, &'a str> { + use crate::bytes::complete::*; + use crate::character::complete::*; + use crate::combinator::opt; + use crate::sequence::delimited; + + delimited( + char('"'), + escaped(opt(none_of(r#"\""#)), '\\', one_of(r#"\"rnt"#)), + char('"'), + )(input) + } + + #[test] + fn escaped_hang_1118() { + assert_eq!(unquote(r#""""#), Ok(("", ""))); + } +} diff --git a/vendor/nom/src/bytes/mod.rs b/vendor/nom/src/bytes/mod.rs new file mode 100644 index 00000000000000..7bc2d15a79cb99 --- /dev/null +++ b/vendor/nom/src/bytes/mod.rs @@ -0,0 +1,6 @@ +//! Parsers recognizing bytes streams + +pub mod complete; +pub mod streaming; +#[cfg(test)] +mod tests; diff --git a/vendor/nom/src/bytes/streaming.rs b/vendor/nom/src/bytes/streaming.rs new file mode 100644 index 00000000000000..e972760e21e47e --- /dev/null +++ b/vendor/nom/src/bytes/streaming.rs @@ -0,0 +1,700 @@ +//! Parsers recognizing bytes streams, streaming version + +use crate::error::ErrorKind; +use crate::error::ParseError; +use crate::internal::{Err, IResult, Needed, Parser}; +use crate::lib::std::ops::RangeFrom; +use crate::lib::std::result::Result::*; +use crate::traits::{ + Compare, CompareResult, FindSubstring, FindToken, InputIter, InputLength, InputTake, + InputTakeAtPosition, Slice, ToUsize, +}; + +/// Recognizes a pattern. +/// +/// The input data will be compared to the tag combinator's argument and will return the part of +/// the input that matches the argument. +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::streaming::tag; +/// +/// fn parser(s: &str) -> IResult<&str, &str> { +/// tag("Hello")(s) +/// } +/// +/// assert_eq!(parser("Hello, World!"), Ok((", World!", "Hello"))); +/// assert_eq!(parser("Something"), Err(Err::Error(Error::new("Something", ErrorKind::Tag)))); +/// assert_eq!(parser("S"), Err(Err::Error(Error::new("S", ErrorKind::Tag)))); +/// assert_eq!(parser("H"), Err(Err::Incomplete(Needed::new(4)))); +/// ``` +pub fn tag>( + tag: T, +) -> impl Fn(Input) -> IResult +where + Input: InputTake + InputLength + Compare, + T: InputLength + Clone, +{ + move |i: Input| { + let tag_len = tag.input_len(); + let t = tag.clone(); + + let res: IResult<_, _, Error> = match i.compare(t) { + CompareResult::Ok => Ok(i.take_split(tag_len)), + CompareResult::Incomplete => Err(Err::Incomplete(Needed::new(tag_len - i.input_len()))), + CompareResult::Error => { + let e: ErrorKind = ErrorKind::Tag; + Err(Err::Error(Error::from_error_kind(i, e))) + } + }; + res + } +} + +/// Recognizes a case insensitive pattern. +/// +/// The input data will be compared to the tag combinator's argument and will return the part of +/// the input that matches the argument with no regard to case. +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::streaming::tag_no_case; +/// +/// fn parser(s: &str) -> IResult<&str, &str> { +/// tag_no_case("hello")(s) +/// } +/// +/// assert_eq!(parser("Hello, World!"), Ok((", World!", "Hello"))); +/// assert_eq!(parser("hello, World!"), Ok((", World!", "hello"))); +/// assert_eq!(parser("HeLlO, World!"), Ok((", World!", "HeLlO"))); +/// assert_eq!(parser("Something"), Err(Err::Error(Error::new("Something", ErrorKind::Tag)))); +/// assert_eq!(parser(""), Err(Err::Incomplete(Needed::new(5)))); +/// ``` +pub fn tag_no_case>( + tag: T, +) -> impl Fn(Input) -> IResult +where + Input: InputTake + InputLength + Compare, + T: InputLength + Clone, +{ + move |i: Input| { + let tag_len = tag.input_len(); + let t = tag.clone(); + + let res: IResult<_, _, Error> = match (i).compare_no_case(t) { + CompareResult::Ok => Ok(i.take_split(tag_len)), + CompareResult::Incomplete => Err(Err::Incomplete(Needed::new(tag_len - i.input_len()))), + CompareResult::Error => { + let e: ErrorKind = ErrorKind::Tag; + Err(Err::Error(Error::from_error_kind(i, e))) + } + }; + res + } +} + +/// Parse till certain characters are met. +/// +/// The parser will return the longest slice till one of the characters of the combinator's argument are met. +/// +/// It doesn't consume the matched character. +/// +/// It will return a `Err::Incomplete(Needed::new(1))` if the pattern wasn't met. +/// # Example +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::is_not; +/// +/// fn not_space(s: &str) -> IResult<&str, &str> { +/// is_not(" \t\r\n")(s) +/// } +/// +/// assert_eq!(not_space("Hello, World!"), Ok((" World!", "Hello,"))); +/// assert_eq!(not_space("Sometimes\t"), Ok(("\t", "Sometimes"))); +/// assert_eq!(not_space("Nospace"), Err(Err::Incomplete(Needed::new(1)))); +/// assert_eq!(not_space(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn is_not>( + arr: T, +) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + T: FindToken<::Item>, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::IsNot; + i.split_at_position1(|c| arr.find_token(c), e) + } +} + +/// Returns the longest slice of the matches the pattern. +/// +/// The parser will return the longest slice consisting of the characters in provided in the +/// combinator's argument. +/// +/// # Streaming specific +/// *Streaming version* will return a `Err::Incomplete(Needed::new(1))` if the pattern wasn't met +/// or if the pattern reaches the end of the input. +/// # Example +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::is_a; +/// +/// fn hex(s: &str) -> IResult<&str, &str> { +/// is_a("1234567890ABCDEF")(s) +/// } +/// +/// assert_eq!(hex("123 and voila"), Ok((" and voila", "123"))); +/// assert_eq!(hex("DEADBEEF and others"), Ok((" and others", "DEADBEEF"))); +/// assert_eq!(hex("BADBABEsomething"), Ok(("something", "BADBABE"))); +/// assert_eq!(hex("D15EA5E"), Err(Err::Incomplete(Needed::new(1)))); +/// assert_eq!(hex(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn is_a>( + arr: T, +) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + T: FindToken<::Item>, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::IsA; + i.split_at_position1(|c| !arr.find_token(c), e) + } +} + +/// Returns the longest input slice (if any) that matches the predicate. +/// +/// The parser will return the longest slice that matches the given predicate *(a function that +/// takes the input and returns a bool)*. +/// +/// # Streaming Specific +/// *Streaming version* will return a `Err::Incomplete(Needed::new(1))` if the pattern reaches the end of the input. +/// # Example +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::take_while; +/// use nom::character::is_alphabetic; +/// +/// fn alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// take_while(is_alphabetic)(s) +/// } +/// +/// assert_eq!(alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); +/// assert_eq!(alpha(b"12345"), Ok((&b"12345"[..], &b""[..]))); +/// assert_eq!(alpha(b"latin"), Err(Err::Incomplete(Needed::new(1)))); +/// assert_eq!(alpha(b""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn take_while>( + cond: F, +) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| i.split_at_position(|c| !cond(c)) +} + +/// Returns the longest (at least 1) input slice that matches the predicate. +/// +/// The parser will return the longest slice that matches the given predicate *(a function that +/// takes the input and returns a bool)*. +/// +/// It will return an `Err(Err::Error((_, ErrorKind::TakeWhile1)))` if the pattern wasn't met. +/// +/// # Streaming Specific +/// *Streaming version* will return a `Err::Incomplete(Needed::new(1))` or if the pattern reaches the end of the input. +/// +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::streaming::take_while1; +/// use nom::character::is_alphabetic; +/// +/// fn alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// take_while1(is_alphabetic)(s) +/// } +/// +/// assert_eq!(alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); +/// assert_eq!(alpha(b"latin"), Err(Err::Incomplete(Needed::new(1)))); +/// assert_eq!(alpha(b"12345"), Err(Err::Error(Error::new(&b"12345"[..], ErrorKind::TakeWhile1)))); +/// ``` +pub fn take_while1>( + cond: F, +) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::TakeWhile1; + i.split_at_position1(|c| !cond(c), e) + } +} + +/// Returns the longest (m <= len <= n) input slice that matches the predicate. +/// +/// The parser will return the longest slice that matches the given predicate *(a function that +/// takes the input and returns a bool)*. +/// +/// It will return an `Err::Error((_, ErrorKind::TakeWhileMN))` if the pattern wasn't met. +/// # Streaming Specific +/// *Streaming version* will return a `Err::Incomplete(Needed::new(1))` if the pattern reaches the end of the input or is too short. +/// +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::streaming::take_while_m_n; +/// use nom::character::is_alphabetic; +/// +/// fn short_alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// take_while_m_n(3, 6, is_alphabetic)(s) +/// } +/// +/// assert_eq!(short_alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); +/// assert_eq!(short_alpha(b"lengthy"), Ok((&b"y"[..], &b"length"[..]))); +/// assert_eq!(short_alpha(b"latin"), Err(Err::Incomplete(Needed::new(1)))); +/// assert_eq!(short_alpha(b"ed"), Err(Err::Incomplete(Needed::new(1)))); +/// assert_eq!(short_alpha(b"12345"), Err(Err::Error(Error::new(&b"12345"[..], ErrorKind::TakeWhileMN)))); +/// ``` +pub fn take_while_m_n>( + m: usize, + n: usize, + cond: F, +) -> impl Fn(Input) -> IResult +where + Input: InputTake + InputIter + InputLength, + F: Fn(::Item) -> bool, +{ + move |i: Input| { + let input = i; + + match input.position(|c| !cond(c)) { + Some(idx) => { + if idx >= m { + if idx <= n { + let res: IResult<_, _, Error> = if let Ok(index) = input.slice_index(idx) { + Ok(input.take_split(index)) + } else { + Err(Err::Error(Error::from_error_kind( + input, + ErrorKind::TakeWhileMN, + ))) + }; + res + } else { + let res: IResult<_, _, Error> = if let Ok(index) = input.slice_index(n) { + Ok(input.take_split(index)) + } else { + Err(Err::Error(Error::from_error_kind( + input, + ErrorKind::TakeWhileMN, + ))) + }; + res + } + } else { + let e = ErrorKind::TakeWhileMN; + Err(Err::Error(Error::from_error_kind(input, e))) + } + } + None => { + let len = input.input_len(); + if len >= n { + match input.slice_index(n) { + Ok(index) => Ok(input.take_split(index)), + Err(_needed) => Err(Err::Error(Error::from_error_kind( + input, + ErrorKind::TakeWhileMN, + ))), + } + } else { + let needed = if m > len { m - len } else { 1 }; + Err(Err::Incomplete(Needed::new(needed))) + } + } + } + } +} + +/// Returns the longest input slice (if any) till a predicate is met. +/// +/// The parser will return the longest slice till the given predicate *(a function that +/// takes the input and returns a bool)*. +/// +/// # Streaming Specific +/// *Streaming version* will return a `Err::Incomplete(Needed::new(1))` if the match reaches the +/// end of input or if there was not match. +/// +/// # Example +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::take_till; +/// +/// fn till_colon(s: &str) -> IResult<&str, &str> { +/// take_till(|c| c == ':')(s) +/// } +/// +/// assert_eq!(till_colon("latin:123"), Ok((":123", "latin"))); +/// assert_eq!(till_colon(":empty matched"), Ok((":empty matched", ""))); //allowed +/// assert_eq!(till_colon("12345"), Err(Err::Incomplete(Needed::new(1)))); +/// assert_eq!(till_colon(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn take_till>( + cond: F, +) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| i.split_at_position(|c| cond(c)) +} + +/// Returns the longest (at least 1) input slice till a predicate is met. +/// +/// The parser will return the longest slice till the given predicate *(a function that +/// takes the input and returns a bool)*. +/// +/// # Streaming Specific +/// *Streaming version* will return a `Err::Incomplete(Needed::new(1))` if the match reaches the +/// end of input or if there was not match. +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::streaming::take_till1; +/// +/// fn till_colon(s: &str) -> IResult<&str, &str> { +/// take_till1(|c| c == ':')(s) +/// } +/// +/// assert_eq!(till_colon("latin:123"), Ok((":123", "latin"))); +/// assert_eq!(till_colon(":empty matched"), Err(Err::Error(Error::new(":empty matched", ErrorKind::TakeTill1)))); +/// assert_eq!(till_colon("12345"), Err(Err::Incomplete(Needed::new(1)))); +/// assert_eq!(till_colon(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn take_till1>( + cond: F, +) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::TakeTill1; + i.split_at_position1(|c| cond(c), e) + } +} + +/// Returns an input slice containing the first N input elements (Input[..N]). +/// +/// # Streaming Specific +/// *Streaming version* if the input has less than N elements, `take` will +/// return a `Err::Incomplete(Needed::new(M))` where M is the number of +/// additional bytes the parser would need to succeed. +/// It is well defined for `&[u8]` as the number of elements is the byte size, +/// but for types like `&str`, we cannot know how many bytes correspond for +/// the next few chars, so the result will be `Err::Incomplete(Needed::Unknown)` +/// +/// # Example +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::take; +/// +/// fn take6(s: &str) -> IResult<&str, &str> { +/// take(6usize)(s) +/// } +/// +/// assert_eq!(take6("1234567"), Ok(("7", "123456"))); +/// assert_eq!(take6("things"), Ok(("", "things"))); +/// assert_eq!(take6("short"), Err(Err::Incomplete(Needed::Unknown))); +/// ``` +pub fn take>( + count: C, +) -> impl Fn(Input) -> IResult +where + Input: InputIter + InputTake + InputLength, + C: ToUsize, +{ + let c = count.to_usize(); + move |i: Input| match i.slice_index(c) { + Err(i) => Err(Err::Incomplete(i)), + Ok(index) => Ok(i.take_split(index)), + } +} + +/// Returns the input slice up to the first occurrence of the pattern. +/// +/// It doesn't consume the pattern. +/// +/// # Streaming Specific +/// *Streaming version* will return a `Err::Incomplete(Needed::new(N))` if the input doesn't +/// contain the pattern or if the input is smaller than the pattern. +/// # Example +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::take_until; +/// +/// fn until_eof(s: &str) -> IResult<&str, &str> { +/// take_until("eof")(s) +/// } +/// +/// assert_eq!(until_eof("hello, worldeof"), Ok(("eof", "hello, world"))); +/// assert_eq!(until_eof("hello, world"), Err(Err::Incomplete(Needed::Unknown))); +/// assert_eq!(until_eof("hello, worldeo"), Err(Err::Incomplete(Needed::Unknown))); +/// assert_eq!(until_eof("1eof2eof"), Ok(("eof2eof", "1"))); +/// ``` +pub fn take_until>( + tag: T, +) -> impl Fn(Input) -> IResult +where + Input: InputTake + InputLength + FindSubstring, + T: Clone, +{ + move |i: Input| { + let t = tag.clone(); + + let res: IResult<_, _, Error> = match i.find_substring(t) { + None => Err(Err::Incomplete(Needed::Unknown)), + Some(index) => Ok(i.take_split(index)), + }; + res + } +} + +/// Returns the non empty input slice up to the first occurrence of the pattern. +/// +/// It doesn't consume the pattern. +/// +/// # Streaming Specific +/// *Streaming version* will return a `Err::Incomplete(Needed::new(N))` if the input doesn't +/// contain the pattern or if the input is smaller than the pattern. +/// # Example +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::bytes::streaming::take_until1; +/// +/// fn until_eof(s: &str) -> IResult<&str, &str> { +/// take_until1("eof")(s) +/// } +/// +/// assert_eq!(until_eof("hello, worldeof"), Ok(("eof", "hello, world"))); +/// assert_eq!(until_eof("hello, world"), Err(Err::Incomplete(Needed::Unknown))); +/// assert_eq!(until_eof("hello, worldeo"), Err(Err::Incomplete(Needed::Unknown))); +/// assert_eq!(until_eof("1eof2eof"), Ok(("eof2eof", "1"))); +/// assert_eq!(until_eof("eof"), Err(Err::Error(Error::new("eof", ErrorKind::TakeUntil)))); +/// ``` +pub fn take_until1>( + tag: T, +) -> impl Fn(Input) -> IResult +where + Input: InputTake + InputLength + FindSubstring, + T: Clone, +{ + move |i: Input| { + let t = tag.clone(); + + let res: IResult<_, _, Error> = match i.find_substring(t) { + None => Err(Err::Incomplete(Needed::Unknown)), + Some(0) => Err(Err::Error(Error::from_error_kind(i, ErrorKind::TakeUntil))), + Some(index) => Ok(i.take_split(index)), + }; + res + } +} + +/// Matches a byte string with escaped characters. +/// +/// * The first argument matches the normal characters (it must not accept the control character) +/// * The second argument is the control character (like `\` in most languages) +/// * The third argument matches the escaped characters +/// # Example +/// ``` +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// # use nom::character::complete::digit1; +/// use nom::bytes::streaming::escaped; +/// use nom::character::streaming::one_of; +/// +/// fn esc(s: &str) -> IResult<&str, &str> { +/// escaped(digit1, '\\', one_of("\"n\\"))(s) +/// } +/// +/// assert_eq!(esc("123;"), Ok((";", "123"))); +/// assert_eq!(esc("12\\\"34;"), Ok((";", "12\\\"34"))); +/// ``` +/// +pub fn escaped( + mut normal: F, + control_char: char, + mut escapable: G, +) -> impl FnMut(Input) -> IResult +where + Input: Clone + + crate::traits::Offset + + InputLength + + InputTake + + InputTakeAtPosition + + Slice> + + InputIter, + ::Item: crate::traits::AsChar, + F: Parser, + G: Parser, + Error: ParseError, +{ + use crate::traits::AsChar; + + move |input: Input| { + let mut i = input.clone(); + + while i.input_len() > 0 { + let current_len = i.input_len(); + + match normal.parse(i.clone()) { + Ok((i2, _)) => { + if i2.input_len() == 0 { + return Err(Err::Incomplete(Needed::Unknown)); + } else if i2.input_len() == current_len { + let index = input.offset(&i2); + return Ok(input.take_split(index)); + } else { + i = i2; + } + } + Err(Err::Error(_)) => { + // unwrap() should be safe here since index < $i.input_len() + if i.iter_elements().next().unwrap().as_char() == control_char { + let next = control_char.len_utf8(); + if next >= i.input_len() { + return Err(Err::Incomplete(Needed::new(1))); + } else { + match escapable.parse(i.slice(next..)) { + Ok((i2, _)) => { + if i2.input_len() == 0 { + return Err(Err::Incomplete(Needed::Unknown)); + } else { + i = i2; + } + } + Err(e) => return Err(e), + } + } + } else { + let index = input.offset(&i); + return Ok(input.take_split(index)); + } + } + Err(e) => { + return Err(e); + } + } + } + + Err(Err::Incomplete(Needed::Unknown)) + } +} + +/// Matches a byte string with escaped characters. +/// +/// * The first argument matches the normal characters (it must not match the control character) +/// * The second argument is the control character (like `\` in most languages) +/// * The third argument matches the escaped characters and transforms them +/// +/// As an example, the chain `abc\tdef` could be `abc def` (it also consumes the control character) +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// # use std::str::from_utf8; +/// use nom::bytes::streaming::{escaped_transform, tag}; +/// use nom::character::streaming::alpha1; +/// use nom::branch::alt; +/// use nom::combinator::value; +/// +/// fn parser(input: &str) -> IResult<&str, String> { +/// escaped_transform( +/// alpha1, +/// '\\', +/// alt(( +/// value("\\", tag("\\")), +/// value("\"", tag("\"")), +/// value("\n", tag("n")), +/// )) +/// )(input) +/// } +/// +/// assert_eq!(parser("ab\\\"cd\""), Ok(("\"", String::from("ab\"cd")))); +/// ``` +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +pub fn escaped_transform( + mut normal: F, + control_char: char, + mut transform: G, +) -> impl FnMut(Input) -> IResult +where + Input: Clone + + crate::traits::Offset + + InputLength + + InputTake + + InputTakeAtPosition + + Slice> + + InputIter, + Input: crate::traits::ExtendInto, + O1: crate::traits::ExtendInto, + O2: crate::traits::ExtendInto, + ::Item: crate::traits::AsChar, + F: Parser, + G: Parser, + Error: ParseError, +{ + use crate::traits::AsChar; + + move |input: Input| { + let mut index = 0; + let mut res = input.new_builder(); + + let i = input.clone(); + + while index < i.input_len() { + let current_len = i.input_len(); + let remainder = i.slice(index..); + match normal.parse(remainder.clone()) { + Ok((i2, o)) => { + o.extend_into(&mut res); + if i2.input_len() == 0 { + return Err(Err::Incomplete(Needed::Unknown)); + } else if i2.input_len() == current_len { + return Ok((remainder, res)); + } else { + index = input.offset(&i2); + } + } + Err(Err::Error(_)) => { + // unwrap() should be safe here since index < $i.input_len() + if remainder.iter_elements().next().unwrap().as_char() == control_char { + let next = index + control_char.len_utf8(); + let input_len = input.input_len(); + + if next >= input_len { + return Err(Err::Incomplete(Needed::Unknown)); + } else { + match transform.parse(i.slice(next..)) { + Ok((i2, o)) => { + o.extend_into(&mut res); + if i2.input_len() == 0 { + return Err(Err::Incomplete(Needed::Unknown)); + } else { + index = input.offset(&i2); + } + } + Err(e) => return Err(e), + } + } + } else { + return Ok((remainder, res)); + } + } + Err(e) => return Err(e), + } + } + Err(Err::Incomplete(Needed::Unknown)) + } +} diff --git a/vendor/nom/src/bytes/tests.rs b/vendor/nom/src/bytes/tests.rs new file mode 100644 index 00000000000000..159c4b4ffcc693 --- /dev/null +++ b/vendor/nom/src/bytes/tests.rs @@ -0,0 +1,636 @@ +use crate::character::is_alphabetic; +use crate::character::streaming::{ + alpha1 as alpha, alphanumeric1 as alphanumeric, digit1 as digit, hex_digit1 as hex_digit, + multispace1 as multispace, oct_digit1 as oct_digit, space1 as space, +}; +use crate::error::ErrorKind; +use crate::internal::{Err, IResult, Needed}; +#[cfg(feature = "alloc")] +use crate::{ + branch::alt, + bytes::complete::{escaped, escaped_transform, tag}, + combinator::{map, value}, + lib::std::string::String, + lib::std::vec::Vec, +}; + +#[test] +fn is_a() { + use crate::bytes::streaming::is_a; + + fn a_or_b(i: &[u8]) -> IResult<&[u8], &[u8]> { + is_a("ab")(i) + } + + let a = &b"abcd"[..]; + assert_eq!(a_or_b(a), Ok((&b"cd"[..], &b"ab"[..]))); + + let b = &b"bcde"[..]; + assert_eq!(a_or_b(b), Ok((&b"cde"[..], &b"b"[..]))); + + let c = &b"cdef"[..]; + assert_eq!( + a_or_b(c), + Err(Err::Error(error_position!(c, ErrorKind::IsA))) + ); + + let d = &b"bacdef"[..]; + assert_eq!(a_or_b(d), Ok((&b"cdef"[..], &b"ba"[..]))); +} + +#[test] +fn is_not() { + use crate::bytes::streaming::is_not; + + fn a_or_b(i: &[u8]) -> IResult<&[u8], &[u8]> { + is_not("ab")(i) + } + + let a = &b"cdab"[..]; + assert_eq!(a_or_b(a), Ok((&b"ab"[..], &b"cd"[..]))); + + let b = &b"cbde"[..]; + assert_eq!(a_or_b(b), Ok((&b"bde"[..], &b"c"[..]))); + + let c = &b"abab"[..]; + assert_eq!( + a_or_b(c), + Err(Err::Error(error_position!(c, ErrorKind::IsNot))) + ); + + let d = &b"cdefba"[..]; + assert_eq!(a_or_b(d), Ok((&b"ba"[..], &b"cdef"[..]))); + + let e = &b"e"[..]; + assert_eq!(a_or_b(e), Err(Err::Incomplete(Needed::new(1)))); +} + +#[cfg(feature = "alloc")] +#[allow(unused_variables)] +#[test] +fn escaping() { + use crate::character::streaming::one_of; + + fn esc(i: &[u8]) -> IResult<&[u8], &[u8]> { + escaped(alpha, '\\', one_of("\"n\\"))(i) + } + assert_eq!(esc(&b"abcd;"[..]), Ok((&b";"[..], &b"abcd"[..]))); + assert_eq!(esc(&b"ab\\\"cd;"[..]), Ok((&b";"[..], &b"ab\\\"cd"[..]))); + assert_eq!(esc(&b"\\\"abcd;"[..]), Ok((&b";"[..], &b"\\\"abcd"[..]))); + assert_eq!(esc(&b"\\n;"[..]), Ok((&b";"[..], &b"\\n"[..]))); + assert_eq!(esc(&b"ab\\\"12"[..]), Ok((&b"12"[..], &b"ab\\\""[..]))); + assert_eq!( + esc(&b"AB\\"[..]), + Err(Err::Error(error_position!( + &b"AB\\"[..], + ErrorKind::Escaped + ))) + ); + assert_eq!( + esc(&b"AB\\A"[..]), + Err(Err::Error(error_node_position!( + &b"AB\\A"[..], + ErrorKind::Escaped, + error_position!(&b"A"[..], ErrorKind::OneOf) + ))) + ); + + fn esc2(i: &[u8]) -> IResult<&[u8], &[u8]> { + escaped(digit, '\\', one_of("\"n\\"))(i) + } + assert_eq!(esc2(&b"12\\nnn34"[..]), Ok((&b"nn34"[..], &b"12\\n"[..]))); +} + +#[cfg(feature = "alloc")] +#[test] +fn escaping_str() { + use crate::character::streaming::one_of; + + fn esc(i: &str) -> IResult<&str, &str> { + escaped(alpha, '\\', one_of("\"n\\"))(i) + } + assert_eq!(esc("abcd;"), Ok((";", "abcd"))); + assert_eq!(esc("ab\\\"cd;"), Ok((";", "ab\\\"cd"))); + assert_eq!(esc("\\\"abcd;"), Ok((";", "\\\"abcd"))); + assert_eq!(esc("\\n;"), Ok((";", "\\n"))); + assert_eq!(esc("ab\\\"12"), Ok(("12", "ab\\\""))); + assert_eq!( + esc("AB\\"), + Err(Err::Error(error_position!("AB\\", ErrorKind::Escaped))) + ); + assert_eq!( + esc("AB\\A"), + Err(Err::Error(error_node_position!( + "AB\\A", + ErrorKind::Escaped, + error_position!("A", ErrorKind::OneOf) + ))) + ); + + fn esc2(i: &str) -> IResult<&str, &str> { + escaped(digit, '\\', one_of("\"n\\"))(i) + } + assert_eq!(esc2("12\\nnn34"), Ok(("nn34", "12\\n"))); + + fn esc3(i: &str) -> IResult<&str, &str> { + escaped(alpha, '\u{241b}', one_of("\"n"))(i) + } + assert_eq!(esc3("ab␛ncd;"), Ok((";", "ab␛ncd"))); +} + +#[cfg(feature = "alloc")] +fn to_s(i: Vec) -> String { + String::from_utf8_lossy(&i).into_owned() +} + +#[cfg(feature = "alloc")] +#[test] +fn escape_transform() { + fn esc(i: &[u8]) -> IResult<&[u8], String> { + map( + escaped_transform( + alpha, + '\\', + alt(( + value(&b"\\"[..], tag("\\")), + value(&b"\""[..], tag("\"")), + value(&b"\n"[..], tag("n")), + )), + ), + to_s, + )(i) + } + + assert_eq!(esc(&b"abcd;"[..]), Ok((&b";"[..], String::from("abcd")))); + assert_eq!( + esc(&b"ab\\\"cd;"[..]), + Ok((&b";"[..], String::from("ab\"cd"))) + ); + assert_eq!( + esc(&b"\\\"abcd;"[..]), + Ok((&b";"[..], String::from("\"abcd"))) + ); + assert_eq!(esc(&b"\\n;"[..]), Ok((&b";"[..], String::from("\n")))); + assert_eq!( + esc(&b"ab\\\"12"[..]), + Ok((&b"12"[..], String::from("ab\""))) + ); + assert_eq!( + esc(&b"AB\\"[..]), + Err(Err::Error(error_position!( + &b"\\"[..], + ErrorKind::EscapedTransform + ))) + ); + assert_eq!( + esc(&b"AB\\A"[..]), + Err(Err::Error(error_node_position!( + &b"AB\\A"[..], + ErrorKind::EscapedTransform, + error_position!(&b"A"[..], ErrorKind::Tag) + ))) + ); + + fn esc2(i: &[u8]) -> IResult<&[u8], String> { + map( + escaped_transform( + alpha, + '&', + alt(( + value("è".as_bytes(), tag("egrave;")), + value("à".as_bytes(), tag("agrave;")), + )), + ), + to_s, + )(i) + } + assert_eq!( + esc2(&b"abèDEF;"[..]), + Ok((&b";"[..], String::from("abèDEF"))) + ); + assert_eq!( + esc2(&b"abèDàEF;"[..]), + Ok((&b";"[..], String::from("abèDàEF"))) + ); +} + +#[cfg(feature = "std")] +#[test] +fn escape_transform_str() { + fn esc(i: &str) -> IResult<&str, String> { + escaped_transform( + alpha, + '\\', + alt(( + value("\\", tag("\\")), + value("\"", tag("\"")), + value("\n", tag("n")), + )), + )(i) + } + + assert_eq!(esc("abcd;"), Ok((";", String::from("abcd")))); + assert_eq!(esc("ab\\\"cd;"), Ok((";", String::from("ab\"cd")))); + assert_eq!(esc("\\\"abcd;"), Ok((";", String::from("\"abcd")))); + assert_eq!(esc("\\n;"), Ok((";", String::from("\n")))); + assert_eq!(esc("ab\\\"12"), Ok(("12", String::from("ab\"")))); + assert_eq!( + esc("AB\\"), + Err(Err::Error(error_position!( + "\\", + ErrorKind::EscapedTransform + ))) + ); + assert_eq!( + esc("AB\\A"), + Err(Err::Error(error_node_position!( + "AB\\A", + ErrorKind::EscapedTransform, + error_position!("A", ErrorKind::Tag) + ))) + ); + + fn esc2(i: &str) -> IResult<&str, String> { + escaped_transform( + alpha, + '&', + alt((value("è", tag("egrave;")), value("à", tag("agrave;")))), + )(i) + } + assert_eq!(esc2("abèDEF;"), Ok((";", String::from("abèDEF")))); + assert_eq!( + esc2("abèDàEF;"), + Ok((";", String::from("abèDàEF"))) + ); + + fn esc3(i: &str) -> IResult<&str, String> { + escaped_transform( + alpha, + '␛', + alt((value("\0", tag("0")), value("\n", tag("n")))), + )(i) + } + assert_eq!(esc3("a␛0bc␛n"), Ok(("", String::from("a\0bc\n")))); +} + +#[test] +fn take_until_incomplete() { + use crate::bytes::streaming::take_until; + fn y(i: &[u8]) -> IResult<&[u8], &[u8]> { + take_until("end")(i) + } + assert_eq!(y(&b"nd"[..]), Err(Err::Incomplete(Needed::Unknown))); + assert_eq!(y(&b"123"[..]), Err(Err::Incomplete(Needed::Unknown))); + assert_eq!(y(&b"123en"[..]), Err(Err::Incomplete(Needed::Unknown))); +} + +#[test] +fn take_until_incomplete_s() { + use crate::bytes::streaming::take_until; + fn ys(i: &str) -> IResult<&str, &str> { + take_until("end")(i) + } + assert_eq!(ys("123en"), Err(Err::Incomplete(Needed::Unknown))); +} + +#[test] +fn recognize() { + use crate::bytes::streaming::{tag, take}; + use crate::combinator::recognize; + use crate::sequence::delimited; + + fn x(i: &[u8]) -> IResult<&[u8], &[u8]> { + recognize(delimited(tag("")))(i) + } + let r = x(&b" aaa"[..]); + assert_eq!(r, Ok((&b" aaa"[..], &b""[..]))); + + let semicolon = &b";"[..]; + + fn ya(i: &[u8]) -> IResult<&[u8], &[u8]> { + recognize(alpha)(i) + } + let ra = ya(&b"abc;"[..]); + assert_eq!(ra, Ok((semicolon, &b"abc"[..]))); + + fn yd(i: &[u8]) -> IResult<&[u8], &[u8]> { + recognize(digit)(i) + } + let rd = yd(&b"123;"[..]); + assert_eq!(rd, Ok((semicolon, &b"123"[..]))); + + fn yhd(i: &[u8]) -> IResult<&[u8], &[u8]> { + recognize(hex_digit)(i) + } + let rhd = yhd(&b"123abcDEF;"[..]); + assert_eq!(rhd, Ok((semicolon, &b"123abcDEF"[..]))); + + fn yod(i: &[u8]) -> IResult<&[u8], &[u8]> { + recognize(oct_digit)(i) + } + let rod = yod(&b"1234567;"[..]); + assert_eq!(rod, Ok((semicolon, &b"1234567"[..]))); + + fn yan(i: &[u8]) -> IResult<&[u8], &[u8]> { + recognize(alphanumeric)(i) + } + let ran = yan(&b"123abc;"[..]); + assert_eq!(ran, Ok((semicolon, &b"123abc"[..]))); + + fn ys(i: &[u8]) -> IResult<&[u8], &[u8]> { + recognize(space)(i) + } + let rs = ys(&b" \t;"[..]); + assert_eq!(rs, Ok((semicolon, &b" \t"[..]))); + + fn yms(i: &[u8]) -> IResult<&[u8], &[u8]> { + recognize(multispace)(i) + } + let rms = yms(&b" \t\r\n;"[..]); + assert_eq!(rms, Ok((semicolon, &b" \t\r\n"[..]))); +} + +#[test] +fn take_while() { + use crate::bytes::streaming::take_while; + + fn f(i: &[u8]) -> IResult<&[u8], &[u8]> { + take_while(is_alphabetic)(i) + } + let a = b""; + let b = b"abcd"; + let c = b"abcd123"; + let d = b"123"; + + assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(f(&b[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(f(&c[..]), Ok((&d[..], &b[..]))); + assert_eq!(f(&d[..]), Ok((&d[..], &a[..]))); +} + +#[test] +fn take_while1() { + use crate::bytes::streaming::take_while1; + + fn f(i: &[u8]) -> IResult<&[u8], &[u8]> { + take_while1(is_alphabetic)(i) + } + let a = b""; + let b = b"abcd"; + let c = b"abcd123"; + let d = b"123"; + + assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(f(&b[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(f(&c[..]), Ok((&b"123"[..], &b[..]))); + assert_eq!( + f(&d[..]), + Err(Err::Error(error_position!(&d[..], ErrorKind::TakeWhile1))) + ); +} + +#[test] +fn take_while_m_n() { + use crate::bytes::streaming::take_while_m_n; + + fn x(i: &[u8]) -> IResult<&[u8], &[u8]> { + take_while_m_n(2, 4, is_alphabetic)(i) + } + let a = b""; + let b = b"a"; + let c = b"abc"; + let d = b"abc123"; + let e = b"abcde"; + let f = b"123"; + + assert_eq!(x(&a[..]), Err(Err::Incomplete(Needed::new(2)))); + assert_eq!(x(&b[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(x(&c[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(x(&d[..]), Ok((&b"123"[..], &c[..]))); + assert_eq!(x(&e[..]), Ok((&b"e"[..], &b"abcd"[..]))); + assert_eq!( + x(&f[..]), + Err(Err::Error(error_position!(&f[..], ErrorKind::TakeWhileMN))) + ); +} + +#[test] +fn take_till() { + use crate::bytes::streaming::take_till; + + fn f(i: &[u8]) -> IResult<&[u8], &[u8]> { + take_till(is_alphabetic)(i) + } + let a = b""; + let b = b"abcd"; + let c = b"123abcd"; + let d = b"123"; + + assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(f(&b[..]), Ok((&b"abcd"[..], &b""[..]))); + assert_eq!(f(&c[..]), Ok((&b"abcd"[..], &b"123"[..]))); + assert_eq!(f(&d[..]), Err(Err::Incomplete(Needed::new(1)))); +} + +#[test] +fn take_till1() { + use crate::bytes::streaming::take_till1; + + fn f(i: &[u8]) -> IResult<&[u8], &[u8]> { + take_till1(is_alphabetic)(i) + } + let a = b""; + let b = b"abcd"; + let c = b"123abcd"; + let d = b"123"; + + assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!( + f(&b[..]), + Err(Err::Error(error_position!(&b[..], ErrorKind::TakeTill1))) + ); + assert_eq!(f(&c[..]), Ok((&b"abcd"[..], &b"123"[..]))); + assert_eq!(f(&d[..]), Err(Err::Incomplete(Needed::new(1)))); +} + +#[test] +fn take_while_utf8() { + use crate::bytes::streaming::take_while; + + fn f(i: &str) -> IResult<&str, &str> { + take_while(|c| c != '點')(i) + } + + assert_eq!(f(""), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(f("abcd"), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(f("abcd點"), Ok(("點", "abcd"))); + assert_eq!(f("abcd點a"), Ok(("點a", "abcd"))); + + fn g(i: &str) -> IResult<&str, &str> { + take_while(|c| c == '點')(i) + } + + assert_eq!(g(""), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(g("點abcd"), Ok(("abcd", "點"))); + assert_eq!(g("點點點a"), Ok(("a", "點點點"))); +} + +#[test] +fn take_till_utf8() { + use crate::bytes::streaming::take_till; + + fn f(i: &str) -> IResult<&str, &str> { + take_till(|c| c == '點')(i) + } + + assert_eq!(f(""), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(f("abcd"), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(f("abcd點"), Ok(("點", "abcd"))); + assert_eq!(f("abcd點a"), Ok(("點a", "abcd"))); + + fn g(i: &str) -> IResult<&str, &str> { + take_till(|c| c != '點')(i) + } + + assert_eq!(g(""), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(g("點abcd"), Ok(("abcd", "點"))); + assert_eq!(g("點點點a"), Ok(("a", "點點點"))); +} + +#[test] +fn take_utf8() { + use crate::bytes::streaming::{take, take_while}; + + fn f(i: &str) -> IResult<&str, &str> { + take(3_usize)(i) + } + + assert_eq!(f(""), Err(Err::Incomplete(Needed::Unknown))); + assert_eq!(f("ab"), Err(Err::Incomplete(Needed::Unknown))); + assert_eq!(f("點"), Err(Err::Incomplete(Needed::Unknown))); + assert_eq!(f("ab點cd"), Ok(("cd", "ab點"))); + assert_eq!(f("a點bcd"), Ok(("cd", "a點b"))); + assert_eq!(f("a點b"), Ok(("", "a點b"))); + + fn g(i: &str) -> IResult<&str, &str> { + take_while(|c| c == '點')(i) + } + + assert_eq!(g(""), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(g("點abcd"), Ok(("abcd", "點"))); + assert_eq!(g("點點點a"), Ok(("a", "點點點"))); +} + +#[test] +fn take_while_m_n_utf8() { + use crate::bytes::streaming::take_while_m_n; + + fn parser(i: &str) -> IResult<&str, &str> { + take_while_m_n(1, 1, |c| c == 'A' || c == '😃')(i) + } + assert_eq!(parser("A!"), Ok(("!", "A"))); + assert_eq!(parser("😃!"), Ok(("!", "😃"))); +} + +#[test] +fn take_while_m_n_utf8_full_match() { + use crate::bytes::streaming::take_while_m_n; + + fn parser(i: &str) -> IResult<&str, &str> { + take_while_m_n(1, 1, |c: char| c.is_alphabetic())(i) + } + assert_eq!(parser("øn"), Ok(("n", "ø"))); +} + +#[test] +#[cfg(feature = "std")] +fn recognize_take_while() { + use crate::bytes::streaming::take_while; + use crate::character::is_alphanumeric; + use crate::combinator::recognize; + + fn x(i: &[u8]) -> IResult<&[u8], &[u8]> { + take_while(is_alphanumeric)(i) + } + fn y(i: &[u8]) -> IResult<&[u8], &[u8]> { + recognize(x)(i) + } + assert_eq!(x(&b"ab."[..]), Ok((&b"."[..], &b"ab"[..]))); + println!("X: {:?}", x(&b"ab"[..])); + assert_eq!(y(&b"ab."[..]), Ok((&b"."[..], &b"ab"[..]))); +} + +#[test] +fn length_bytes() { + use crate::{bytes::streaming::tag, multi::length_data, number::streaming::le_u8}; + + fn x(i: &[u8]) -> IResult<&[u8], &[u8]> { + length_data(le_u8)(i) + } + assert_eq!(x(b"\x02..>>"), Ok((&b">>"[..], &b".."[..]))); + assert_eq!(x(b"\x02.."), Ok((&[][..], &b".."[..]))); + assert_eq!(x(b"\x02."), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(x(b"\x02"), Err(Err::Incomplete(Needed::new(2)))); + + fn y(i: &[u8]) -> IResult<&[u8], &[u8]> { + let (i, _) = tag("magic")(i)?; + length_data(le_u8)(i) + } + assert_eq!(y(b"magic\x02..>>"), Ok((&b">>"[..], &b".."[..]))); + assert_eq!(y(b"magic\x02.."), Ok((&[][..], &b".."[..]))); + assert_eq!(y(b"magic\x02."), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(y(b"magic\x02"), Err(Err::Incomplete(Needed::new(2)))); +} + +#[cfg(feature = "alloc")] +#[test] +fn case_insensitive() { + use crate::bytes::streaming::tag_no_case; + + fn test(i: &[u8]) -> IResult<&[u8], &[u8]> { + tag_no_case("ABcd")(i) + } + assert_eq!(test(&b"aBCdefgh"[..]), Ok((&b"efgh"[..], &b"aBCd"[..]))); + assert_eq!(test(&b"abcdefgh"[..]), Ok((&b"efgh"[..], &b"abcd"[..]))); + assert_eq!(test(&b"ABCDefgh"[..]), Ok((&b"efgh"[..], &b"ABCD"[..]))); + assert_eq!(test(&b"ab"[..]), Err(Err::Incomplete(Needed::new(2)))); + assert_eq!( + test(&b"Hello"[..]), + Err(Err::Error(error_position!(&b"Hello"[..], ErrorKind::Tag))) + ); + assert_eq!( + test(&b"Hel"[..]), + Err(Err::Error(error_position!(&b"Hel"[..], ErrorKind::Tag))) + ); + + fn test2(i: &str) -> IResult<&str, &str> { + tag_no_case("ABcd")(i) + } + assert_eq!(test2("aBCdefgh"), Ok(("efgh", "aBCd"))); + assert_eq!(test2("abcdefgh"), Ok(("efgh", "abcd"))); + assert_eq!(test2("ABCDefgh"), Ok(("efgh", "ABCD"))); + assert_eq!(test2("ab"), Err(Err::Incomplete(Needed::new(2)))); + assert_eq!( + test2("Hello"), + Err(Err::Error(error_position!(&"Hello"[..], ErrorKind::Tag))) + ); + assert_eq!( + test2("Hel"), + Err(Err::Error(error_position!(&"Hel"[..], ErrorKind::Tag))) + ); +} + +#[test] +fn tag_fixed_size_array() { + use crate::bytes::streaming::tag; + + fn test(i: &[u8]) -> IResult<&[u8], &[u8]> { + tag([0x42])(i) + } + fn test2(i: &[u8]) -> IResult<&[u8], &[u8]> { + tag(&[0x42])(i) + } + let input = [0x42, 0x00]; + assert_eq!(test(&input), Ok((&b"\x00"[..], &b"\x42"[..]))); + assert_eq!(test2(&input), Ok((&b"\x00"[..], &b"\x42"[..]))); +} diff --git a/vendor/nom/src/character/complete.rs b/vendor/nom/src/character/complete.rs new file mode 100644 index 00000000000000..7cb760a68361bc --- /dev/null +++ b/vendor/nom/src/character/complete.rs @@ -0,0 +1,1227 @@ +//! Character specific parsers and combinators, complete input version. +//! +//! Functions recognizing specific characters. + +use crate::branch::alt; +use crate::combinator::opt; +use crate::error::ErrorKind; +use crate::error::ParseError; +use crate::internal::{Err, IResult}; +use crate::lib::std::ops::{Range, RangeFrom, RangeTo}; +use crate::traits::{ + AsChar, FindToken, InputIter, InputLength, InputTake, InputTakeAtPosition, Slice, +}; +use crate::traits::{Compare, CompareResult}; + +/// Recognizes one character. +/// +/// *Complete version*: Will return an error if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{ErrorKind, Error}, IResult}; +/// # use nom::character::complete::char; +/// fn parser(i: &str) -> IResult<&str, char> { +/// char('a')(i) +/// } +/// assert_eq!(parser("abc"), Ok(("bc", 'a'))); +/// assert_eq!(parser(" abc"), Err(Err::Error(Error::new(" abc", ErrorKind::Char)))); +/// assert_eq!(parser("bc"), Err(Err::Error(Error::new("bc", ErrorKind::Char)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Char)))); +/// ``` +pub fn char>(c: char) -> impl Fn(I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar, +{ + move |i: I| match (i).iter_elements().next().map(|t| { + let b = t.as_char() == c; + (&c, b) + }) { + Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), + _ => Err(Err::Error(Error::from_char(i, c))), + } +} + +/// Recognizes one character and checks that it satisfies a predicate +/// +/// *Complete version*: Will return an error if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{ErrorKind, Error}, Needed, IResult}; +/// # use nom::character::complete::satisfy; +/// fn parser(i: &str) -> IResult<&str, char> { +/// satisfy(|c| c == 'a' || c == 'b')(i) +/// } +/// assert_eq!(parser("abc"), Ok(("bc", 'a'))); +/// assert_eq!(parser("cd"), Err(Err::Error(Error::new("cd", ErrorKind::Satisfy)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Satisfy)))); +/// ``` +pub fn satisfy>(cond: F) -> impl Fn(I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar, + F: Fn(char) -> bool, +{ + move |i: I| match (i).iter_elements().next().map(|t| { + let c = t.as_char(); + let b = cond(c); + (c, b) + }) { + Some((c, true)) => Ok((i.slice(c.len()..), c)), + _ => Err(Err::Error(Error::from_error_kind(i, ErrorKind::Satisfy))), + } +} + +/// Recognizes one of the provided characters. +/// +/// *Complete version*: Will return an error if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind}; +/// # use nom::character::complete::one_of; +/// assert_eq!(one_of::<_, _, (&str, ErrorKind)>("abc")("b"), Ok(("", 'b'))); +/// assert_eq!(one_of::<_, _, (&str, ErrorKind)>("a")("bc"), Err(Err::Error(("bc", ErrorKind::OneOf)))); +/// assert_eq!(one_of::<_, _, (&str, ErrorKind)>("a")(""), Err(Err::Error(("", ErrorKind::OneOf)))); +/// ``` +pub fn one_of>(list: T) -> impl Fn(I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar + Copy, + T: FindToken<::Item>, +{ + move |i: I| match (i).iter_elements().next().map(|c| (c, list.find_token(c))) { + Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), + _ => Err(Err::Error(Error::from_error_kind(i, ErrorKind::OneOf))), + } +} + +/// Recognizes a character that is not in the provided characters. +/// +/// *Complete version*: Will return an error if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind}; +/// # use nom::character::complete::none_of; +/// assert_eq!(none_of::<_, _, (&str, ErrorKind)>("abc")("z"), Ok(("", 'z'))); +/// assert_eq!(none_of::<_, _, (&str, ErrorKind)>("ab")("a"), Err(Err::Error(("a", ErrorKind::NoneOf)))); +/// assert_eq!(none_of::<_, _, (&str, ErrorKind)>("a")(""), Err(Err::Error(("", ErrorKind::NoneOf)))); +/// ``` +pub fn none_of>(list: T) -> impl Fn(I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar + Copy, + T: FindToken<::Item>, +{ + move |i: I| match (i).iter_elements().next().map(|c| (c, !list.find_token(c))) { + Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), + _ => Err(Err::Error(Error::from_error_kind(i, ErrorKind::NoneOf))), + } +} + +/// Recognizes the string "\r\n". +/// +/// *Complete version*: Will return an error if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{Error, ErrorKind}, IResult}; +/// # use nom::character::complete::crlf; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// crlf(input) +/// } +/// +/// assert_eq!(parser("\r\nc"), Ok(("c", "\r\n"))); +/// assert_eq!(parser("ab\r\nc"), Err(Err::Error(Error::new("ab\r\nc", ErrorKind::CrLf)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::CrLf)))); +/// ``` +pub fn crlf>(input: T) -> IResult +where + T: Slice> + Slice>, + T: InputIter, + T: Compare<&'static str>, +{ + match input.compare("\r\n") { + //FIXME: is this the right index? + CompareResult::Ok => Ok((input.slice(2..), input.slice(0..2))), + _ => { + let e: ErrorKind = ErrorKind::CrLf; + Err(Err::Error(E::from_error_kind(input, e))) + } + } +} + +//FIXME: there's still an incomplete +/// Recognizes a string of any char except '\r\n' or '\n'. +/// +/// *Complete version*: Will return an error if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; +/// # use nom::character::complete::not_line_ending; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// not_line_ending(input) +/// } +/// +/// assert_eq!(parser("ab\r\nc"), Ok(("\r\nc", "ab"))); +/// assert_eq!(parser("ab\nc"), Ok(("\nc", "ab"))); +/// assert_eq!(parser("abc"), Ok(("", "abc"))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// assert_eq!(parser("a\rb\nc"), Err(Err::Error(Error { input: "a\rb\nc", code: ErrorKind::Tag }))); +/// assert_eq!(parser("a\rbc"), Err(Err::Error(Error { input: "a\rbc", code: ErrorKind::Tag }))); +/// ``` +pub fn not_line_ending>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: InputIter + InputLength, + T: Compare<&'static str>, + ::Item: AsChar, + ::Item: AsChar, +{ + match input.position(|item| { + let c = item.as_char(); + c == '\r' || c == '\n' + }) { + None => Ok((input.slice(input.input_len()..), input)), + Some(index) => { + let mut it = input.slice(index..).iter_elements(); + let nth = it.next().unwrap().as_char(); + if nth == '\r' { + let sliced = input.slice(index..); + let comp = sliced.compare("\r\n"); + match comp { + //FIXME: calculate the right index + CompareResult::Ok => Ok((input.slice(index..), input.slice(..index))), + _ => { + let e: ErrorKind = ErrorKind::Tag; + Err(Err::Error(E::from_error_kind(input, e))) + } + } + } else { + Ok((input.slice(index..), input.slice(..index))) + } + } + } +} + +/// Recognizes an end of line (both '\n' and '\r\n'). +/// +/// *Complete version*: Will return an error if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; +/// # use nom::character::complete::line_ending; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// line_ending(input) +/// } +/// +/// assert_eq!(parser("\r\nc"), Ok(("c", "\r\n"))); +/// assert_eq!(parser("ab\r\nc"), Err(Err::Error(Error::new("ab\r\nc", ErrorKind::CrLf)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::CrLf)))); +/// ``` +pub fn line_ending>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: InputIter + InputLength, + T: Compare<&'static str>, +{ + match input.compare("\n") { + CompareResult::Ok => Ok((input.slice(1..), input.slice(0..1))), + CompareResult::Incomplete => Err(Err::Error(E::from_error_kind(input, ErrorKind::CrLf))), + CompareResult::Error => { + match input.compare("\r\n") { + //FIXME: is this the right index? + CompareResult::Ok => Ok((input.slice(2..), input.slice(0..2))), + _ => Err(Err::Error(E::from_error_kind(input, ErrorKind::CrLf))), + } + } + } +} + +/// Matches a newline character '\n'. +/// +/// *Complete version*: Will return an error if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; +/// # use nom::character::complete::newline; +/// fn parser(input: &str) -> IResult<&str, char> { +/// newline(input) +/// } +/// +/// assert_eq!(parser("\nc"), Ok(("c", '\n'))); +/// assert_eq!(parser("\r\nc"), Err(Err::Error(Error::new("\r\nc", ErrorKind::Char)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Char)))); +/// ``` +pub fn newline>(input: I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar, +{ + char('\n')(input) +} + +/// Matches a tab character '\t'. +/// +/// *Complete version*: Will return an error if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; +/// # use nom::character::complete::tab; +/// fn parser(input: &str) -> IResult<&str, char> { +/// tab(input) +/// } +/// +/// assert_eq!(parser("\tc"), Ok(("c", '\t'))); +/// assert_eq!(parser("\r\nc"), Err(Err::Error(Error::new("\r\nc", ErrorKind::Char)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Char)))); +/// ``` +pub fn tab>(input: I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar, +{ + char('\t')(input) +} + +/// Matches one byte as a character. Note that the input type will +/// accept a `str`, but not a `&[u8]`, unlike many other nom parsers. +/// +/// *Complete version*: Will return an error if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{character::complete::anychar, Err, error::{Error, ErrorKind}, IResult}; +/// fn parser(input: &str) -> IResult<&str, char> { +/// anychar(input) +/// } +/// +/// assert_eq!(parser("abc"), Ok(("bc",'a'))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Eof)))); +/// ``` +pub fn anychar>(input: T) -> IResult +where + T: InputIter + InputLength + Slice>, + ::Item: AsChar, +{ + let mut it = input.iter_indices(); + match it.next() { + None => Err(Err::Error(E::from_error_kind(input, ErrorKind::Eof))), + Some((_, c)) => match it.next() { + None => Ok((input.slice(input.input_len()..), c.as_char())), + Some((idx, _)) => Ok((input.slice(idx..), c.as_char())), + }, + } +} + +/// Recognizes zero or more lowercase and uppercase ASCII alphabetic characters: a-z, A-Z +/// +/// *Complete version*: Will return the whole input if no terminating token is found (a non +/// alphabetic character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::alpha0; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// alpha0(input) +/// } +/// +/// assert_eq!(parser("ab1c"), Ok(("1c", "ab"))); +/// assert_eq!(parser("1c"), Ok(("1c", ""))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// ``` +pub fn alpha0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position_complete(|item| !item.is_alpha()) +} + +/// Recognizes one or more lowercase and uppercase ASCII alphabetic characters: a-z, A-Z +/// +/// *Complete version*: Will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non alphabetic character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; +/// # use nom::character::complete::alpha1; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// alpha1(input) +/// } +/// +/// assert_eq!(parser("aB1c"), Ok(("1c", "aB"))); +/// assert_eq!(parser("1c"), Err(Err::Error(Error::new("1c", ErrorKind::Alpha)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Alpha)))); +/// ``` +pub fn alpha1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1_complete(|item| !item.is_alpha(), ErrorKind::Alpha) +} + +/// Recognizes zero or more ASCII numerical characters: 0-9 +/// +/// *Complete version*: Will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non digit character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::digit0; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// digit0(input) +/// } +/// +/// assert_eq!(parser("21c"), Ok(("c", "21"))); +/// assert_eq!(parser("21"), Ok(("", "21"))); +/// assert_eq!(parser("a21c"), Ok(("a21c", ""))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// ``` +pub fn digit0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position_complete(|item| !item.is_dec_digit()) +} + +/// Recognizes one or more ASCII numerical characters: 0-9 +/// +/// *Complete version*: Will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non digit character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; +/// # use nom::character::complete::digit1; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// digit1(input) +/// } +/// +/// assert_eq!(parser("21c"), Ok(("c", "21"))); +/// assert_eq!(parser("c1"), Err(Err::Error(Error::new("c1", ErrorKind::Digit)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Digit)))); +/// ``` +/// +/// ## Parsing an integer +/// You can use `digit1` in combination with [`map_res`] to parse an integer: +/// +/// ``` +/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; +/// # use nom::combinator::map_res; +/// # use nom::character::complete::digit1; +/// fn parser(input: &str) -> IResult<&str, u32> { +/// map_res(digit1, str::parse)(input) +/// } +/// +/// assert_eq!(parser("416"), Ok(("", 416))); +/// assert_eq!(parser("12b"), Ok(("b", 12))); +/// assert!(parser("b").is_err()); +/// ``` +/// +/// [`map_res`]: crate::combinator::map_res +pub fn digit1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1_complete(|item| !item.is_dec_digit(), ErrorKind::Digit) +} + +/// Recognizes zero or more ASCII hexadecimal numerical characters: 0-9, A-F, a-f +/// +/// *Complete version*: Will return the whole input if no terminating token is found (a non hexadecimal digit character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::hex_digit0; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// hex_digit0(input) +/// } +/// +/// assert_eq!(parser("21cZ"), Ok(("Z", "21c"))); +/// assert_eq!(parser("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// ``` +pub fn hex_digit0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position_complete(|item| !item.is_hex_digit()) +} +/// Recognizes one or more ASCII hexadecimal numerical characters: 0-9, A-F, a-f +/// +/// *Complete version*: Will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non hexadecimal digit character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; +/// # use nom::character::complete::hex_digit1; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// hex_digit1(input) +/// } +/// +/// assert_eq!(parser("21cZ"), Ok(("Z", "21c"))); +/// assert_eq!(parser("H2"), Err(Err::Error(Error::new("H2", ErrorKind::HexDigit)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::HexDigit)))); +/// ``` +pub fn hex_digit1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1_complete(|item| !item.is_hex_digit(), ErrorKind::HexDigit) +} + +/// Recognizes zero or more octal characters: 0-7 +/// +/// *Complete version*: Will return the whole input if no terminating token is found (a non octal +/// digit character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::oct_digit0; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// oct_digit0(input) +/// } +/// +/// assert_eq!(parser("21cZ"), Ok(("cZ", "21"))); +/// assert_eq!(parser("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// ``` +pub fn oct_digit0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position_complete(|item| !item.is_oct_digit()) +} + +/// Recognizes one or more octal characters: 0-7 +/// +/// *Complete version*: Will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non octal digit character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; +/// # use nom::character::complete::oct_digit1; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// oct_digit1(input) +/// } +/// +/// assert_eq!(parser("21cZ"), Ok(("cZ", "21"))); +/// assert_eq!(parser("H2"), Err(Err::Error(Error::new("H2", ErrorKind::OctDigit)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::OctDigit)))); +/// ``` +pub fn oct_digit1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1_complete(|item| !item.is_oct_digit(), ErrorKind::OctDigit) +} + +/// Recognizes zero or more ASCII numerical and alphabetic characters: 0-9, a-z, A-Z +/// +/// *Complete version*: Will return the whole input if no terminating token is found (a non +/// alphanumerical character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::alphanumeric0; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// alphanumeric0(input) +/// } +/// +/// assert_eq!(parser("21cZ%1"), Ok(("%1", "21cZ"))); +/// assert_eq!(parser("&Z21c"), Ok(("&Z21c", ""))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// ``` +pub fn alphanumeric0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position_complete(|item| !item.is_alphanum()) +} + +/// Recognizes one or more ASCII numerical and alphabetic characters: 0-9, a-z, A-Z +/// +/// *Complete version*: Will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non alphanumerical character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; +/// # use nom::character::complete::alphanumeric1; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// alphanumeric1(input) +/// } +/// +/// assert_eq!(parser("21cZ%1"), Ok(("%1", "21cZ"))); +/// assert_eq!(parser("&H2"), Err(Err::Error(Error::new("&H2", ErrorKind::AlphaNumeric)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::AlphaNumeric)))); +/// ``` +pub fn alphanumeric1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1_complete(|item| !item.is_alphanum(), ErrorKind::AlphaNumeric) +} + +/// Recognizes zero or more spaces and tabs. +/// +/// *Complete version*: Will return the whole input if no terminating token is found (a non space +/// character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::space0; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// space0(input) +/// } +/// +/// assert_eq!(parser(" \t21c"), Ok(("21c", " \t"))); +/// assert_eq!(parser("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// ``` +pub fn space0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position_complete(|item| { + let c = item.as_char(); + !(c == ' ' || c == '\t') + }) +} + +/// Recognizes one or more spaces and tabs. +/// +/// *Complete version*: Will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non space character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; +/// # use nom::character::complete::space1; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// space1(input) +/// } +/// +/// assert_eq!(parser(" \t21c"), Ok(("21c", " \t"))); +/// assert_eq!(parser("H2"), Err(Err::Error(Error::new("H2", ErrorKind::Space)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Space)))); +/// ``` +pub fn space1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position1_complete( + |item| { + let c = item.as_char(); + !(c == ' ' || c == '\t') + }, + ErrorKind::Space, + ) +} + +/// Recognizes zero or more spaces, tabs, carriage returns and line feeds. +/// +/// *Complete version*: will return the whole input if no terminating token is found (a non space +/// character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::multispace0; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// multispace0(input) +/// } +/// +/// assert_eq!(parser(" \t\n\r21c"), Ok(("21c", " \t\n\r"))); +/// assert_eq!(parser("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// ``` +pub fn multispace0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position_complete(|item| { + let c = item.as_char(); + !(c == ' ' || c == '\t' || c == '\r' || c == '\n') + }) +} + +/// Recognizes one or more spaces, tabs, carriage returns and line feeds. +/// +/// *Complete version*: will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non space character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; +/// # use nom::character::complete::multispace1; +/// fn parser(input: &str) -> IResult<&str, &str> { +/// multispace1(input) +/// } +/// +/// assert_eq!(parser(" \t\n\r21c"), Ok(("21c", " \t\n\r"))); +/// assert_eq!(parser("H2"), Err(Err::Error(Error::new("H2", ErrorKind::MultiSpace)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::MultiSpace)))); +/// ``` +pub fn multispace1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position1_complete( + |item| { + let c = item.as_char(); + !(c == ' ' || c == '\t' || c == '\r' || c == '\n') + }, + ErrorKind::MultiSpace, + ) +} + +pub(crate) fn sign>(input: T) -> IResult +where + T: Clone + InputTake, + T: for<'a> Compare<&'a [u8]>, +{ + use crate::bytes::complete::tag; + use crate::combinator::value; + + let (i, opt_sign) = opt(alt(( + value(false, tag(&b"-"[..])), + value(true, tag(&b"+"[..])), + )))(input)?; + let sign = opt_sign.unwrap_or(true); + + Ok((i, sign)) +} + +#[doc(hidden)] +macro_rules! ints { + ($($t:tt)+) => { + $( + /// will parse a number in text form to a number + /// + /// *Complete version*: can parse until the end of input. + pub fn $t>(input: T) -> IResult + where + T: InputIter + Slice> + InputLength + InputTake + Clone, + ::Item: AsChar, + T: for <'a> Compare<&'a[u8]>, + { + let (i, sign) = sign(input.clone())?; + + if i.input_len() == 0 { + return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))); + } + + let mut value: $t = 0; + if sign { + for (pos, c) in i.iter_indices() { + match c.as_char().to_digit(10) { + None => { + if pos == 0 { + return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))); + } else { + return Ok((i.slice(pos..), value)); + } + }, + Some(d) => match value.checked_mul(10).and_then(|v| v.checked_add(d as $t)) { + None => return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))), + Some(v) => value = v, + } + } + } + } else { + for (pos, c) in i.iter_indices() { + match c.as_char().to_digit(10) { + None => { + if pos == 0 { + return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))); + } else { + return Ok((i.slice(pos..), value)); + } + }, + Some(d) => match value.checked_mul(10).and_then(|v| v.checked_sub(d as $t)) { + None => return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))), + Some(v) => value = v, + } + } + } + } + + Ok((i.slice(i.input_len()..), value)) + } + )+ + } +} + +ints! { i8 i16 i32 i64 i128 } + +#[doc(hidden)] +macro_rules! uints { + ($($t:tt)+) => { + $( + /// will parse a number in text form to a number + /// + /// *Complete version*: can parse until the end of input. + pub fn $t>(input: T) -> IResult + where + T: InputIter + Slice> + InputLength, + ::Item: AsChar, + { + let i = input; + + if i.input_len() == 0 { + return Err(Err::Error(E::from_error_kind(i, ErrorKind::Digit))); + } + + let mut value: $t = 0; + for (pos, c) in i.iter_indices() { + match c.as_char().to_digit(10) { + None => { + if pos == 0 { + return Err(Err::Error(E::from_error_kind(i, ErrorKind::Digit))); + } else { + return Ok((i.slice(pos..), value)); + } + }, + Some(d) => match value.checked_mul(10).and_then(|v| v.checked_add(d as $t)) { + None => return Err(Err::Error(E::from_error_kind(i, ErrorKind::Digit))), + Some(v) => value = v, + } + } + } + + Ok((i.slice(i.input_len()..), value)) + } + )+ + } +} + +uints! { u8 u16 u32 u64 u128 } + +#[cfg(test)] +mod tests { + use super::*; + use crate::internal::Err; + use crate::traits::ParseTo; + use proptest::prelude::*; + + macro_rules! assert_parse( + ($left: expr, $right: expr) => { + let res: $crate::IResult<_, _, (_, ErrorKind)> = $left; + assert_eq!(res, $right); + }; + ); + + #[test] + fn character() { + let empty: &[u8] = b""; + let a: &[u8] = b"abcd"; + let b: &[u8] = b"1234"; + let c: &[u8] = b"a123"; + let d: &[u8] = "azé12".as_bytes(); + let e: &[u8] = b" "; + let f: &[u8] = b" ;"; + //assert_eq!(alpha1::<_, (_, ErrorKind)>(a), Err(Err::Incomplete(Needed::Size(1)))); + assert_parse!(alpha1(a), Ok((empty, a))); + assert_eq!(alpha1(b), Err(Err::Error((b, ErrorKind::Alpha)))); + assert_eq!(alpha1::<_, (_, ErrorKind)>(c), Ok((&c[1..], &b"a"[..]))); + assert_eq!( + alpha1::<_, (_, ErrorKind)>(d), + Ok(("é12".as_bytes(), &b"az"[..])) + ); + assert_eq!(digit1(a), Err(Err::Error((a, ErrorKind::Digit)))); + assert_eq!(digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); + assert_eq!(digit1(c), Err(Err::Error((c, ErrorKind::Digit)))); + assert_eq!(digit1(d), Err(Err::Error((d, ErrorKind::Digit)))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(a), Ok((empty, a))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(c), Ok((empty, c))); + assert_eq!( + hex_digit1::<_, (_, ErrorKind)>(d), + Ok(("zé12".as_bytes(), &b"a"[..])) + ); + assert_eq!(hex_digit1(e), Err(Err::Error((e, ErrorKind::HexDigit)))); + assert_eq!(oct_digit1(a), Err(Err::Error((a, ErrorKind::OctDigit)))); + assert_eq!(oct_digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); + assert_eq!(oct_digit1(c), Err(Err::Error((c, ErrorKind::OctDigit)))); + assert_eq!(oct_digit1(d), Err(Err::Error((d, ErrorKind::OctDigit)))); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(a), Ok((empty, a))); + //assert_eq!(fix_error!(b,(), alphanumeric), Ok((empty, b))); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(c), Ok((empty, c))); + assert_eq!( + alphanumeric1::<_, (_, ErrorKind)>(d), + Ok(("é12".as_bytes(), &b"az"[..])) + ); + assert_eq!(space1::<_, (_, ErrorKind)>(e), Ok((empty, e))); + assert_eq!(space1::<_, (_, ErrorKind)>(f), Ok((&b";"[..], &b" "[..]))); + } + + #[cfg(feature = "alloc")] + #[test] + fn character_s() { + let empty = ""; + let a = "abcd"; + let b = "1234"; + let c = "a123"; + let d = "azé12"; + let e = " "; + assert_eq!(alpha1::<_, (_, ErrorKind)>(a), Ok((empty, a))); + assert_eq!(alpha1(b), Err(Err::Error((b, ErrorKind::Alpha)))); + assert_eq!(alpha1::<_, (_, ErrorKind)>(c), Ok((&c[1..], &"a"[..]))); + assert_eq!(alpha1::<_, (_, ErrorKind)>(d), Ok(("é12", &"az"[..]))); + assert_eq!(digit1(a), Err(Err::Error((a, ErrorKind::Digit)))); + assert_eq!(digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); + assert_eq!(digit1(c), Err(Err::Error((c, ErrorKind::Digit)))); + assert_eq!(digit1(d), Err(Err::Error((d, ErrorKind::Digit)))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(a), Ok((empty, a))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(c), Ok((empty, c))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(d), Ok(("zé12", &"a"[..]))); + assert_eq!(hex_digit1(e), Err(Err::Error((e, ErrorKind::HexDigit)))); + assert_eq!(oct_digit1(a), Err(Err::Error((a, ErrorKind::OctDigit)))); + assert_eq!(oct_digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); + assert_eq!(oct_digit1(c), Err(Err::Error((c, ErrorKind::OctDigit)))); + assert_eq!(oct_digit1(d), Err(Err::Error((d, ErrorKind::OctDigit)))); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(a), Ok((empty, a))); + //assert_eq!(fix_error!(b,(), alphanumeric), Ok((empty, b))); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(c), Ok((empty, c))); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(d), Ok(("é12", "az"))); + assert_eq!(space1::<_, (_, ErrorKind)>(e), Ok((empty, e))); + } + + use crate::traits::Offset; + #[test] + fn offset() { + let a = &b"abcd;"[..]; + let b = &b"1234;"[..]; + let c = &b"a123;"[..]; + let d = &b" \t;"[..]; + let e = &b" \t\r\n;"[..]; + let f = &b"123abcDEF;"[..]; + + match alpha1::<_, (_, ErrorKind)>(a) { + Ok((i, _)) => { + assert_eq!(a.offset(i) + i.len(), a.len()); + } + _ => panic!("wrong return type in offset test for alpha"), + } + match digit1::<_, (_, ErrorKind)>(b) { + Ok((i, _)) => { + assert_eq!(b.offset(i) + i.len(), b.len()); + } + _ => panic!("wrong return type in offset test for digit"), + } + match alphanumeric1::<_, (_, ErrorKind)>(c) { + Ok((i, _)) => { + assert_eq!(c.offset(i) + i.len(), c.len()); + } + _ => panic!("wrong return type in offset test for alphanumeric"), + } + match space1::<_, (_, ErrorKind)>(d) { + Ok((i, _)) => { + assert_eq!(d.offset(i) + i.len(), d.len()); + } + _ => panic!("wrong return type in offset test for space"), + } + match multispace1::<_, (_, ErrorKind)>(e) { + Ok((i, _)) => { + assert_eq!(e.offset(i) + i.len(), e.len()); + } + _ => panic!("wrong return type in offset test for multispace"), + } + match hex_digit1::<_, (_, ErrorKind)>(f) { + Ok((i, _)) => { + assert_eq!(f.offset(i) + i.len(), f.len()); + } + _ => panic!("wrong return type in offset test for hex_digit"), + } + match oct_digit1::<_, (_, ErrorKind)>(f) { + Ok((i, _)) => { + assert_eq!(f.offset(i) + i.len(), f.len()); + } + _ => panic!("wrong return type in offset test for oct_digit"), + } + } + + #[test] + fn is_not_line_ending_bytes() { + let a: &[u8] = b"ab12cd\nefgh"; + assert_eq!( + not_line_ending::<_, (_, ErrorKind)>(a), + Ok((&b"\nefgh"[..], &b"ab12cd"[..])) + ); + + let b: &[u8] = b"ab12cd\nefgh\nijkl"; + assert_eq!( + not_line_ending::<_, (_, ErrorKind)>(b), + Ok((&b"\nefgh\nijkl"[..], &b"ab12cd"[..])) + ); + + let c: &[u8] = b"ab12cd\r\nefgh\nijkl"; + assert_eq!( + not_line_ending::<_, (_, ErrorKind)>(c), + Ok((&b"\r\nefgh\nijkl"[..], &b"ab12cd"[..])) + ); + + let d: &[u8] = b"ab12cd"; + assert_eq!( + not_line_ending::<_, (_, ErrorKind)>(d), + Ok((&[][..], &d[..])) + ); + } + + #[test] + fn is_not_line_ending_str() { + /* + let a: &str = "ab12cd\nefgh"; + assert_eq!(not_line_ending(a), Ok((&"\nefgh"[..], &"ab12cd"[..]))); + + let b: &str = "ab12cd\nefgh\nijkl"; + assert_eq!(not_line_ending(b), Ok((&"\nefgh\nijkl"[..], &"ab12cd"[..]))); + + let c: &str = "ab12cd\r\nefgh\nijkl"; + assert_eq!(not_line_ending(c), Ok((&"\r\nefgh\nijkl"[..], &"ab12cd"[..]))); + + let d = "βèƒôřè\nÂßÇáƒƭèř"; + assert_eq!(not_line_ending(d), Ok((&"\nÂßÇáƒƭèř"[..], &"βèƒôřè"[..]))); + + let e = "βèƒôřè\r\nÂßÇáƒƭèř"; + assert_eq!(not_line_ending(e), Ok((&"\r\nÂßÇáƒƭèř"[..], &"βèƒôřè"[..]))); + */ + + let f = "βèƒôřè\rÂßÇáƒƭèř"; + assert_eq!(not_line_ending(f), Err(Err::Error((f, ErrorKind::Tag)))); + + let g2: &str = "ab12cd"; + assert_eq!(not_line_ending::<_, (_, ErrorKind)>(g2), Ok(("", g2))); + } + + #[test] + fn hex_digit_test() { + let i = &b"0123456789abcdefABCDEF;"[..]; + assert_parse!(hex_digit1(i), Ok((&b";"[..], &i[..i.len() - 1]))); + + let i = &b"g"[..]; + assert_parse!( + hex_digit1(i), + Err(Err::Error(error_position!(i, ErrorKind::HexDigit))) + ); + + let i = &b"G"[..]; + assert_parse!( + hex_digit1(i), + Err(Err::Error(error_position!(i, ErrorKind::HexDigit))) + ); + + assert!(crate::character::is_hex_digit(b'0')); + assert!(crate::character::is_hex_digit(b'9')); + assert!(crate::character::is_hex_digit(b'a')); + assert!(crate::character::is_hex_digit(b'f')); + assert!(crate::character::is_hex_digit(b'A')); + assert!(crate::character::is_hex_digit(b'F')); + assert!(!crate::character::is_hex_digit(b'g')); + assert!(!crate::character::is_hex_digit(b'G')); + assert!(!crate::character::is_hex_digit(b'/')); + assert!(!crate::character::is_hex_digit(b':')); + assert!(!crate::character::is_hex_digit(b'@')); + assert!(!crate::character::is_hex_digit(b'\x60')); + } + + #[test] + fn oct_digit_test() { + let i = &b"01234567;"[..]; + assert_parse!(oct_digit1(i), Ok((&b";"[..], &i[..i.len() - 1]))); + + let i = &b"8"[..]; + assert_parse!( + oct_digit1(i), + Err(Err::Error(error_position!(i, ErrorKind::OctDigit))) + ); + + assert!(crate::character::is_oct_digit(b'0')); + assert!(crate::character::is_oct_digit(b'7')); + assert!(!crate::character::is_oct_digit(b'8')); + assert!(!crate::character::is_oct_digit(b'9')); + assert!(!crate::character::is_oct_digit(b'a')); + assert!(!crate::character::is_oct_digit(b'A')); + assert!(!crate::character::is_oct_digit(b'/')); + assert!(!crate::character::is_oct_digit(b':')); + assert!(!crate::character::is_oct_digit(b'@')); + assert!(!crate::character::is_oct_digit(b'\x60')); + } + + #[test] + fn full_line_windows() { + use crate::sequence::pair; + fn take_full_line(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> { + pair(not_line_ending, line_ending)(i) + } + let input = b"abc\r\n"; + let output = take_full_line(input); + assert_eq!(output, Ok((&b""[..], (&b"abc"[..], &b"\r\n"[..])))); + } + + #[test] + fn full_line_unix() { + use crate::sequence::pair; + fn take_full_line(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> { + pair(not_line_ending, line_ending)(i) + } + let input = b"abc\n"; + let output = take_full_line(input); + assert_eq!(output, Ok((&b""[..], (&b"abc"[..], &b"\n"[..])))); + } + + #[test] + fn check_windows_lineending() { + let input = b"\r\n"; + let output = line_ending(&input[..]); + assert_parse!(output, Ok((&b""[..], &b"\r\n"[..]))); + } + + #[test] + fn check_unix_lineending() { + let input = b"\n"; + let output = line_ending(&input[..]); + assert_parse!(output, Ok((&b""[..], &b"\n"[..]))); + } + + #[test] + fn cr_lf() { + assert_parse!(crlf(&b"\r\na"[..]), Ok((&b"a"[..], &b"\r\n"[..]))); + assert_parse!( + crlf(&b"\r"[..]), + Err(Err::Error(error_position!(&b"\r"[..], ErrorKind::CrLf))) + ); + assert_parse!( + crlf(&b"\ra"[..]), + Err(Err::Error(error_position!(&b"\ra"[..], ErrorKind::CrLf))) + ); + + assert_parse!(crlf("\r\na"), Ok(("a", "\r\n"))); + assert_parse!( + crlf("\r"), + Err(Err::Error(error_position!(&"\r"[..], ErrorKind::CrLf))) + ); + assert_parse!( + crlf("\ra"), + Err(Err::Error(error_position!("\ra", ErrorKind::CrLf))) + ); + } + + #[test] + fn end_of_line() { + assert_parse!(line_ending(&b"\na"[..]), Ok((&b"a"[..], &b"\n"[..]))); + assert_parse!(line_ending(&b"\r\na"[..]), Ok((&b"a"[..], &b"\r\n"[..]))); + assert_parse!( + line_ending(&b"\r"[..]), + Err(Err::Error(error_position!(&b"\r"[..], ErrorKind::CrLf))) + ); + assert_parse!( + line_ending(&b"\ra"[..]), + Err(Err::Error(error_position!(&b"\ra"[..], ErrorKind::CrLf))) + ); + + assert_parse!(line_ending("\na"), Ok(("a", "\n"))); + assert_parse!(line_ending("\r\na"), Ok(("a", "\r\n"))); + assert_parse!( + line_ending("\r"), + Err(Err::Error(error_position!(&"\r"[..], ErrorKind::CrLf))) + ); + assert_parse!( + line_ending("\ra"), + Err(Err::Error(error_position!("\ra", ErrorKind::CrLf))) + ); + } + + fn digit_to_i16(input: &str) -> IResult<&str, i16> { + let i = input; + let (i, opt_sign) = opt(alt((char('+'), char('-'))))(i)?; + let sign = match opt_sign { + Some('+') => true, + Some('-') => false, + _ => true, + }; + + let (i, s) = match digit1::<_, crate::error::Error<_>>(i) { + Ok((i, s)) => (i, s), + Err(_) => { + return Err(Err::Error(crate::error::Error::from_error_kind( + input, + ErrorKind::Digit, + ))) + } + }; + + match s.parse_to() { + Some(n) => { + if sign { + Ok((i, n)) + } else { + Ok((i, -n)) + } + } + None => Err(Err::Error(crate::error::Error::from_error_kind( + i, + ErrorKind::Digit, + ))), + } + } + + fn digit_to_u32(i: &str) -> IResult<&str, u32> { + let (i, s) = digit1(i)?; + match s.parse_to() { + Some(n) => Ok((i, n)), + None => Err(Err::Error(crate::error::Error::from_error_kind( + i, + ErrorKind::Digit, + ))), + } + } + + proptest! { + #[test] + fn ints(s in "\\PC*") { + let res1 = digit_to_i16(&s); + let res2 = i16(s.as_str()); + assert_eq!(res1, res2); + } + + #[test] + fn uints(s in "\\PC*") { + let res1 = digit_to_u32(&s); + let res2 = u32(s.as_str()); + assert_eq!(res1, res2); + } + } +} diff --git a/vendor/nom/src/character/mod.rs b/vendor/nom/src/character/mod.rs new file mode 100644 index 00000000000000..2c5d3bc4ad833e --- /dev/null +++ b/vendor/nom/src/character/mod.rs @@ -0,0 +1,116 @@ +//! Character specific parsers and combinators +//! +//! Functions recognizing specific characters + +#[cfg(test)] +mod tests; + +pub mod complete; +pub mod streaming; + +/// Tests if byte is ASCII alphabetic: A-Z, a-z +/// +/// # Example +/// +/// ``` +/// # use nom::character::is_alphabetic; +/// assert_eq!(is_alphabetic(b'9'), false); +/// assert_eq!(is_alphabetic(b'a'), true); +/// ``` +#[inline] +pub fn is_alphabetic(chr: u8) -> bool { + (chr >= 0x41 && chr <= 0x5A) || (chr >= 0x61 && chr <= 0x7A) +} + +/// Tests if byte is ASCII digit: 0-9 +/// +/// # Example +/// +/// ``` +/// # use nom::character::is_digit; +/// assert_eq!(is_digit(b'a'), false); +/// assert_eq!(is_digit(b'9'), true); +/// ``` +#[inline] +pub fn is_digit(chr: u8) -> bool { + chr >= 0x30 && chr <= 0x39 +} + +/// Tests if byte is ASCII hex digit: 0-9, A-F, a-f +/// +/// # Example +/// +/// ``` +/// # use nom::character::is_hex_digit; +/// assert_eq!(is_hex_digit(b'a'), true); +/// assert_eq!(is_hex_digit(b'9'), true); +/// assert_eq!(is_hex_digit(b'A'), true); +/// assert_eq!(is_hex_digit(b'x'), false); +/// ``` +#[inline] +pub fn is_hex_digit(chr: u8) -> bool { + (chr >= 0x30 && chr <= 0x39) || (chr >= 0x41 && chr <= 0x46) || (chr >= 0x61 && chr <= 0x66) +} + +/// Tests if byte is ASCII octal digit: 0-7 +/// +/// # Example +/// +/// ``` +/// # use nom::character::is_oct_digit; +/// assert_eq!(is_oct_digit(b'a'), false); +/// assert_eq!(is_oct_digit(b'9'), false); +/// assert_eq!(is_oct_digit(b'6'), true); +/// ``` +#[inline] +pub fn is_oct_digit(chr: u8) -> bool { + chr >= 0x30 && chr <= 0x37 +} + +/// Tests if byte is ASCII alphanumeric: A-Z, a-z, 0-9 +/// +/// # Example +/// +/// ``` +/// # use nom::character::is_alphanumeric; +/// assert_eq!(is_alphanumeric(b'-'), false); +/// assert_eq!(is_alphanumeric(b'a'), true); +/// assert_eq!(is_alphanumeric(b'9'), true); +/// assert_eq!(is_alphanumeric(b'A'), true); +/// ``` +#[inline] +pub fn is_alphanumeric(chr: u8) -> bool { + is_alphabetic(chr) || is_digit(chr) +} + +/// Tests if byte is ASCII space or tab +/// +/// # Example +/// +/// ``` +/// # use nom::character::is_space; +/// assert_eq!(is_space(b'\n'), false); +/// assert_eq!(is_space(b'\r'), false); +/// assert_eq!(is_space(b' '), true); +/// assert_eq!(is_space(b'\t'), true); +/// ``` +#[inline] +pub fn is_space(chr: u8) -> bool { + chr == b' ' || chr == b'\t' +} + +/// Tests if byte is ASCII newline: \n +/// +/// # Example +/// +/// ``` +/// # use nom::character::is_newline; +/// assert_eq!(is_newline(b'\n'), true); +/// assert_eq!(is_newline(b'\r'), false); +/// assert_eq!(is_newline(b' '), false); +/// assert_eq!(is_newline(b'\t'), false); +/// ``` +#[inline] +pub fn is_newline(chr: u8) -> bool { + chr == b'\n' +} diff --git a/vendor/nom/src/character/streaming.rs b/vendor/nom/src/character/streaming.rs new file mode 100644 index 00000000000000..88aabba3560596 --- /dev/null +++ b/vendor/nom/src/character/streaming.rs @@ -0,0 +1,1182 @@ +//! Character specific parsers and combinators, streaming version +//! +//! Functions recognizing specific characters + +use crate::branch::alt; +use crate::combinator::opt; +use crate::error::ErrorKind; +use crate::error::ParseError; +use crate::internal::{Err, IResult, Needed}; +use crate::lib::std::ops::{Range, RangeFrom, RangeTo}; +use crate::traits::{ + AsChar, FindToken, InputIter, InputLength, InputTake, InputTakeAtPosition, Slice, +}; +use crate::traits::{Compare, CompareResult}; + +/// Recognizes one character. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{ErrorKind, Error}, Needed, IResult}; +/// # use nom::character::streaming::char; +/// fn parser(i: &str) -> IResult<&str, char> { +/// char('a')(i) +/// } +/// assert_eq!(parser("abc"), Ok(("bc", 'a'))); +/// assert_eq!(parser("bc"), Err(Err::Error(Error::new("bc", ErrorKind::Char)))); +/// assert_eq!(parser(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn char>(c: char) -> impl Fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, + ::Item: AsChar, +{ + move |i: I| match (i).iter_elements().next().map(|t| { + let b = t.as_char() == c; + (&c, b) + }) { + None => Err(Err::Incomplete(Needed::new(c.len() - i.input_len()))), + Some((_, false)) => Err(Err::Error(Error::from_char(i, c))), + Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), + } +} + +/// Recognizes one character and checks that it satisfies a predicate +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{ErrorKind, Error}, Needed, IResult}; +/// # use nom::character::streaming::satisfy; +/// fn parser(i: &str) -> IResult<&str, char> { +/// satisfy(|c| c == 'a' || c == 'b')(i) +/// } +/// assert_eq!(parser("abc"), Ok(("bc", 'a'))); +/// assert_eq!(parser("cd"), Err(Err::Error(Error::new("cd", ErrorKind::Satisfy)))); +/// assert_eq!(parser(""), Err(Err::Incomplete(Needed::Unknown))); +/// ``` +pub fn satisfy>(cond: F) -> impl Fn(I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar, + F: Fn(char) -> bool, +{ + move |i: I| match (i).iter_elements().next().map(|t| { + let c = t.as_char(); + let b = cond(c); + (c, b) + }) { + None => Err(Err::Incomplete(Needed::Unknown)), + Some((_, false)) => Err(Err::Error(Error::from_error_kind(i, ErrorKind::Satisfy))), + Some((c, true)) => Ok((i.slice(c.len()..), c)), + } +} + +/// Recognizes one of the provided characters. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::character::streaming::one_of; +/// assert_eq!(one_of::<_, _, (_, ErrorKind)>("abc")("b"), Ok(("", 'b'))); +/// assert_eq!(one_of::<_, _, (_, ErrorKind)>("a")("bc"), Err(Err::Error(("bc", ErrorKind::OneOf)))); +/// assert_eq!(one_of::<_, _, (_, ErrorKind)>("a")(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn one_of>(list: T) -> impl Fn(I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar + Copy, + T: FindToken<::Item>, +{ + move |i: I| match (i).iter_elements().next().map(|c| (c, list.find_token(c))) { + None => Err(Err::Incomplete(Needed::new(1))), + Some((_, false)) => Err(Err::Error(Error::from_error_kind(i, ErrorKind::OneOf))), + Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), + } +} + +/// Recognizes a character that is not in the provided characters. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::character::streaming::none_of; +/// assert_eq!(none_of::<_, _, (_, ErrorKind)>("abc")("z"), Ok(("", 'z'))); +/// assert_eq!(none_of::<_, _, (_, ErrorKind)>("ab")("a"), Err(Err::Error(("a", ErrorKind::NoneOf)))); +/// assert_eq!(none_of::<_, _, (_, ErrorKind)>("a")(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn none_of>(list: T) -> impl Fn(I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar + Copy, + T: FindToken<::Item>, +{ + move |i: I| match (i).iter_elements().next().map(|c| (c, !list.find_token(c))) { + None => Err(Err::Incomplete(Needed::new(1))), + Some((_, false)) => Err(Err::Error(Error::from_error_kind(i, ErrorKind::NoneOf))), + Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), + } +} + +/// Recognizes the string "\r\n". +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::crlf; +/// assert_eq!(crlf::<_, (_, ErrorKind)>("\r\nc"), Ok(("c", "\r\n"))); +/// assert_eq!(crlf::<_, (_, ErrorKind)>("ab\r\nc"), Err(Err::Error(("ab\r\nc", ErrorKind::CrLf)))); +/// assert_eq!(crlf::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(2)))); +/// ``` +pub fn crlf>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: InputIter, + T: Compare<&'static str>, +{ + match input.compare("\r\n") { + //FIXME: is this the right index? + CompareResult::Ok => Ok((input.slice(2..), input.slice(0..2))), + CompareResult::Incomplete => Err(Err::Incomplete(Needed::new(2))), + CompareResult::Error => { + let e: ErrorKind = ErrorKind::CrLf; + Err(Err::Error(E::from_error_kind(input, e))) + } + } +} + +/// Recognizes a string of any char except '\r\n' or '\n'. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::{Error, ErrorKind}, IResult, Needed}; +/// # use nom::character::streaming::not_line_ending; +/// assert_eq!(not_line_ending::<_, (_, ErrorKind)>("ab\r\nc"), Ok(("\r\nc", "ab"))); +/// assert_eq!(not_line_ending::<_, (_, ErrorKind)>("abc"), Err(Err::Incomplete(Needed::Unknown))); +/// assert_eq!(not_line_ending::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Unknown))); +/// assert_eq!(not_line_ending::<_, (_, ErrorKind)>("a\rb\nc"), Err(Err::Error(("a\rb\nc", ErrorKind::Tag )))); +/// assert_eq!(not_line_ending::<_, (_, ErrorKind)>("a\rbc"), Err(Err::Error(("a\rbc", ErrorKind::Tag )))); +/// ``` +pub fn not_line_ending>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: InputIter + InputLength, + T: Compare<&'static str>, + ::Item: AsChar, + ::Item: AsChar, +{ + match input.position(|item| { + let c = item.as_char(); + c == '\r' || c == '\n' + }) { + None => Err(Err::Incomplete(Needed::Unknown)), + Some(index) => { + let mut it = input.slice(index..).iter_elements(); + let nth = it.next().unwrap().as_char(); + if nth == '\r' { + let sliced = input.slice(index..); + let comp = sliced.compare("\r\n"); + match comp { + //FIXME: calculate the right index + CompareResult::Incomplete => Err(Err::Incomplete(Needed::Unknown)), + CompareResult::Error => { + let e: ErrorKind = ErrorKind::Tag; + Err(Err::Error(E::from_error_kind(input, e))) + } + CompareResult::Ok => Ok((input.slice(index..), input.slice(..index))), + } + } else { + Ok((input.slice(index..), input.slice(..index))) + } + } + } +} + +/// Recognizes an end of line (both '\n' and '\r\n'). +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::line_ending; +/// assert_eq!(line_ending::<_, (_, ErrorKind)>("\r\nc"), Ok(("c", "\r\n"))); +/// assert_eq!(line_ending::<_, (_, ErrorKind)>("ab\r\nc"), Err(Err::Error(("ab\r\nc", ErrorKind::CrLf)))); +/// assert_eq!(line_ending::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn line_ending>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: InputIter + InputLength, + T: Compare<&'static str>, +{ + match input.compare("\n") { + CompareResult::Ok => Ok((input.slice(1..), input.slice(0..1))), + CompareResult::Incomplete => Err(Err::Incomplete(Needed::new(1))), + CompareResult::Error => { + match input.compare("\r\n") { + //FIXME: is this the right index? + CompareResult::Ok => Ok((input.slice(2..), input.slice(0..2))), + CompareResult::Incomplete => Err(Err::Incomplete(Needed::new(2))), + CompareResult::Error => Err(Err::Error(E::from_error_kind(input, ErrorKind::CrLf))), + } + } + } +} + +/// Matches a newline character '\\n'. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::newline; +/// assert_eq!(newline::<_, (_, ErrorKind)>("\nc"), Ok(("c", '\n'))); +/// assert_eq!(newline::<_, (_, ErrorKind)>("\r\nc"), Err(Err::Error(("\r\nc", ErrorKind::Char)))); +/// assert_eq!(newline::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn newline>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, + ::Item: AsChar, +{ + char('\n')(input) +} + +/// Matches a tab character '\t'. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::tab; +/// assert_eq!(tab::<_, (_, ErrorKind)>("\tc"), Ok(("c", '\t'))); +/// assert_eq!(tab::<_, (_, ErrorKind)>("\r\nc"), Err(Err::Error(("\r\nc", ErrorKind::Char)))); +/// assert_eq!(tab::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn tab>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, + ::Item: AsChar, +{ + char('\t')(input) +} + +/// Matches one byte as a character. Note that the input type will +/// accept a `str`, but not a `&[u8]`, unlike many other nom parsers. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// # Example +/// +/// ``` +/// # use nom::{character::streaming::anychar, Err, error::ErrorKind, IResult, Needed}; +/// assert_eq!(anychar::<_, (_, ErrorKind)>("abc"), Ok(("bc",'a'))); +/// assert_eq!(anychar::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn anychar>(input: T) -> IResult +where + T: InputIter + InputLength + Slice>, + ::Item: AsChar, +{ + let mut it = input.iter_indices(); + match it.next() { + None => Err(Err::Incomplete(Needed::new(1))), + Some((_, c)) => match it.next() { + None => Ok((input.slice(input.input_len()..), c.as_char())), + Some((idx, _)) => Ok((input.slice(idx..), c.as_char())), + }, + } +} + +/// Recognizes zero or more lowercase and uppercase ASCII alphabetic characters: a-z, A-Z +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non alphabetic character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::alpha0; +/// assert_eq!(alpha0::<_, (_, ErrorKind)>("ab1c"), Ok(("1c", "ab"))); +/// assert_eq!(alpha0::<_, (_, ErrorKind)>("1c"), Ok(("1c", ""))); +/// assert_eq!(alpha0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn alpha0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position(|item| !item.is_alpha()) +} + +/// Recognizes one or more lowercase and uppercase ASCII alphabetic characters: a-z, A-Z +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non alphabetic character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::alpha1; +/// assert_eq!(alpha1::<_, (_, ErrorKind)>("aB1c"), Ok(("1c", "aB"))); +/// assert_eq!(alpha1::<_, (_, ErrorKind)>("1c"), Err(Err::Error(("1c", ErrorKind::Alpha)))); +/// assert_eq!(alpha1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn alpha1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1(|item| !item.is_alpha(), ErrorKind::Alpha) +} + +/// Recognizes zero or more ASCII numerical characters: 0-9 +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non digit character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::digit0; +/// assert_eq!(digit0::<_, (_, ErrorKind)>("21c"), Ok(("c", "21"))); +/// assert_eq!(digit0::<_, (_, ErrorKind)>("a21c"), Ok(("a21c", ""))); +/// assert_eq!(digit0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn digit0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position(|item| !item.is_dec_digit()) +} + +/// Recognizes one or more ASCII numerical characters: 0-9 +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non digit character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::digit1; +/// assert_eq!(digit1::<_, (_, ErrorKind)>("21c"), Ok(("c", "21"))); +/// assert_eq!(digit1::<_, (_, ErrorKind)>("c1"), Err(Err::Error(("c1", ErrorKind::Digit)))); +/// assert_eq!(digit1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn digit1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1(|item| !item.is_dec_digit(), ErrorKind::Digit) +} + +/// Recognizes zero or more ASCII hexadecimal numerical characters: 0-9, A-F, a-f +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non hexadecimal digit character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::hex_digit0; +/// assert_eq!(hex_digit0::<_, (_, ErrorKind)>("21cZ"), Ok(("Z", "21c"))); +/// assert_eq!(hex_digit0::<_, (_, ErrorKind)>("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(hex_digit0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn hex_digit0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position(|item| !item.is_hex_digit()) +} + +/// Recognizes one or more ASCII hexadecimal numerical characters: 0-9, A-F, a-f +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non hexadecimal digit character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::hex_digit1; +/// assert_eq!(hex_digit1::<_, (_, ErrorKind)>("21cZ"), Ok(("Z", "21c"))); +/// assert_eq!(hex_digit1::<_, (_, ErrorKind)>("H2"), Err(Err::Error(("H2", ErrorKind::HexDigit)))); +/// assert_eq!(hex_digit1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn hex_digit1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1(|item| !item.is_hex_digit(), ErrorKind::HexDigit) +} + +/// Recognizes zero or more octal characters: 0-7 +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non octal digit character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::oct_digit0; +/// assert_eq!(oct_digit0::<_, (_, ErrorKind)>("21cZ"), Ok(("cZ", "21"))); +/// assert_eq!(oct_digit0::<_, (_, ErrorKind)>("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(oct_digit0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn oct_digit0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position(|item| !item.is_oct_digit()) +} + +/// Recognizes one or more octal characters: 0-7 +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non octal digit character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::oct_digit1; +/// assert_eq!(oct_digit1::<_, (_, ErrorKind)>("21cZ"), Ok(("cZ", "21"))); +/// assert_eq!(oct_digit1::<_, (_, ErrorKind)>("H2"), Err(Err::Error(("H2", ErrorKind::OctDigit)))); +/// assert_eq!(oct_digit1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn oct_digit1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1(|item| !item.is_oct_digit(), ErrorKind::OctDigit) +} + +/// Recognizes zero or more ASCII numerical and alphabetic characters: 0-9, a-z, A-Z +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non alphanumerical character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::alphanumeric0; +/// assert_eq!(alphanumeric0::<_, (_, ErrorKind)>("21cZ%1"), Ok(("%1", "21cZ"))); +/// assert_eq!(alphanumeric0::<_, (_, ErrorKind)>("&Z21c"), Ok(("&Z21c", ""))); +/// assert_eq!(alphanumeric0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn alphanumeric0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position(|item| !item.is_alphanum()) +} + +/// Recognizes one or more ASCII numerical and alphabetic characters: 0-9, a-z, A-Z +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non alphanumerical character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::alphanumeric1; +/// assert_eq!(alphanumeric1::<_, (_, ErrorKind)>("21cZ%1"), Ok(("%1", "21cZ"))); +/// assert_eq!(alphanumeric1::<_, (_, ErrorKind)>("&H2"), Err(Err::Error(("&H2", ErrorKind::AlphaNumeric)))); +/// assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn alphanumeric1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1(|item| !item.is_alphanum(), ErrorKind::AlphaNumeric) +} + +/// Recognizes zero or more spaces and tabs. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non space character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::space0; +/// assert_eq!(space0::<_, (_, ErrorKind)>(" \t21c"), Ok(("21c", " \t"))); +/// assert_eq!(space0::<_, (_, ErrorKind)>("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(space0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn space0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position(|item| { + let c = item.as_char(); + !(c == ' ' || c == '\t') + }) +} +/// Recognizes one or more spaces and tabs. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non space character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::space1; +/// assert_eq!(space1::<_, (_, ErrorKind)>(" \t21c"), Ok(("21c", " \t"))); +/// assert_eq!(space1::<_, (_, ErrorKind)>("H2"), Err(Err::Error(("H2", ErrorKind::Space)))); +/// assert_eq!(space1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn space1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position1( + |item| { + let c = item.as_char(); + !(c == ' ' || c == '\t') + }, + ErrorKind::Space, + ) +} + +/// Recognizes zero or more spaces, tabs, carriage returns and line feeds. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non space character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::multispace0; +/// assert_eq!(multispace0::<_, (_, ErrorKind)>(" \t\n\r21c"), Ok(("21c", " \t\n\r"))); +/// assert_eq!(multispace0::<_, (_, ErrorKind)>("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(multispace0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn multispace0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position(|item| { + let c = item.as_char(); + !(c == ' ' || c == '\t' || c == '\r' || c == '\n') + }) +} + +/// Recognizes one or more spaces, tabs, carriage returns and line feeds. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non space character). +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::multispace1; +/// assert_eq!(multispace1::<_, (_, ErrorKind)>(" \t\n\r21c"), Ok(("21c", " \t\n\r"))); +/// assert_eq!(multispace1::<_, (_, ErrorKind)>("H2"), Err(Err::Error(("H2", ErrorKind::MultiSpace)))); +/// assert_eq!(multispace1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +pub fn multispace1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position1( + |item| { + let c = item.as_char(); + !(c == ' ' || c == '\t' || c == '\r' || c == '\n') + }, + ErrorKind::MultiSpace, + ) +} + +pub(crate) fn sign>(input: T) -> IResult +where + T: Clone + InputTake + InputLength, + T: for<'a> Compare<&'a [u8]>, +{ + use crate::bytes::streaming::tag; + use crate::combinator::value; + + let (i, opt_sign) = opt(alt(( + value(false, tag(&b"-"[..])), + value(true, tag(&b"+"[..])), + )))(input)?; + let sign = opt_sign.unwrap_or(true); + + Ok((i, sign)) +} + +#[doc(hidden)] +macro_rules! ints { + ($($t:tt)+) => { + $( + /// will parse a number in text form to a number + /// + /// *Complete version*: can parse until the end of input. + pub fn $t>(input: T) -> IResult + where + T: InputIter + Slice> + InputLength + InputTake + Clone, + ::Item: AsChar, + T: for <'a> Compare<&'a[u8]>, + { + let (i, sign) = sign(input.clone())?; + + if i.input_len() == 0 { + return Err(Err::Incomplete(Needed::new(1))); + } + + let mut value: $t = 0; + if sign { + for (pos, c) in i.iter_indices() { + match c.as_char().to_digit(10) { + None => { + if pos == 0 { + return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))); + } else { + return Ok((i.slice(pos..), value)); + } + }, + Some(d) => match value.checked_mul(10).and_then(|v| v.checked_add(d as $t)) { + None => return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))), + Some(v) => value = v, + } + } + } + } else { + for (pos, c) in i.iter_indices() { + match c.as_char().to_digit(10) { + None => { + if pos == 0 { + return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))); + } else { + return Ok((i.slice(pos..), value)); + } + }, + Some(d) => match value.checked_mul(10).and_then(|v| v.checked_sub(d as $t)) { + None => return Err(Err::Error(E::from_error_kind(input, ErrorKind::Digit))), + Some(v) => value = v, + } + } + } + } + + Err(Err::Incomplete(Needed::new(1))) + } + )+ + } +} + +ints! { i8 i16 i32 i64 i128 } + +#[doc(hidden)] +macro_rules! uints { + ($($t:tt)+) => { + $( + /// will parse a number in text form to a number + /// + /// *Complete version*: can parse until the end of input. + pub fn $t>(input: T) -> IResult + where + T: InputIter + Slice> + InputLength, + ::Item: AsChar, + { + let i = input; + + if i.input_len() == 0 { + return Err(Err::Incomplete(Needed::new(1))); + } + + let mut value: $t = 0; + for (pos, c) in i.iter_indices() { + match c.as_char().to_digit(10) { + None => { + if pos == 0 { + return Err(Err::Error(E::from_error_kind(i, ErrorKind::Digit))); + } else { + return Ok((i.slice(pos..), value)); + } + }, + Some(d) => match value.checked_mul(10).and_then(|v| v.checked_add(d as $t)) { + None => return Err(Err::Error(E::from_error_kind(i, ErrorKind::Digit))), + Some(v) => value = v, + } + } + } + + Err(Err::Incomplete(Needed::new(1))) + } + )+ + } +} + +uints! { u8 u16 u32 u64 u128 } + +#[cfg(test)] +mod tests { + use super::*; + use crate::error::ErrorKind; + use crate::internal::{Err, Needed}; + use crate::sequence::pair; + use crate::traits::ParseTo; + use proptest::prelude::*; + + macro_rules! assert_parse( + ($left: expr, $right: expr) => { + let res: $crate::IResult<_, _, (_, ErrorKind)> = $left; + assert_eq!(res, $right); + }; + ); + + #[test] + fn anychar_str() { + use super::anychar; + assert_eq!(anychar::<_, (&str, ErrorKind)>("Ә"), Ok(("", 'Ә'))); + } + + #[test] + fn character() { + let a: &[u8] = b"abcd"; + let b: &[u8] = b"1234"; + let c: &[u8] = b"a123"; + let d: &[u8] = "azé12".as_bytes(); + let e: &[u8] = b" "; + let f: &[u8] = b" ;"; + //assert_eq!(alpha1::<_, (_, ErrorKind)>(a), Err(Err::Incomplete(Needed::new(1)))); + assert_parse!(alpha1(a), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(alpha1(b), Err(Err::Error((b, ErrorKind::Alpha)))); + assert_eq!(alpha1::<_, (_, ErrorKind)>(c), Ok((&c[1..], &b"a"[..]))); + assert_eq!( + alpha1::<_, (_, ErrorKind)>(d), + Ok(("é12".as_bytes(), &b"az"[..])) + ); + assert_eq!(digit1(a), Err(Err::Error((a, ErrorKind::Digit)))); + assert_eq!( + digit1::<_, (_, ErrorKind)>(b), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!(digit1(c), Err(Err::Error((c, ErrorKind::Digit)))); + assert_eq!(digit1(d), Err(Err::Error((d, ErrorKind::Digit)))); + assert_eq!( + hex_digit1::<_, (_, ErrorKind)>(a), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!( + hex_digit1::<_, (_, ErrorKind)>(b), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!( + hex_digit1::<_, (_, ErrorKind)>(c), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!( + hex_digit1::<_, (_, ErrorKind)>(d), + Ok(("zé12".as_bytes(), &b"a"[..])) + ); + assert_eq!(hex_digit1(e), Err(Err::Error((e, ErrorKind::HexDigit)))); + assert_eq!(oct_digit1(a), Err(Err::Error((a, ErrorKind::OctDigit)))); + assert_eq!( + oct_digit1::<_, (_, ErrorKind)>(b), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!(oct_digit1(c), Err(Err::Error((c, ErrorKind::OctDigit)))); + assert_eq!(oct_digit1(d), Err(Err::Error((d, ErrorKind::OctDigit)))); + assert_eq!( + alphanumeric1::<_, (_, ErrorKind)>(a), + Err(Err::Incomplete(Needed::new(1))) + ); + //assert_eq!(fix_error!(b,(), alphanumeric1), Ok((empty, b))); + assert_eq!( + alphanumeric1::<_, (_, ErrorKind)>(c), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!( + alphanumeric1::<_, (_, ErrorKind)>(d), + Ok(("é12".as_bytes(), &b"az"[..])) + ); + assert_eq!( + space1::<_, (_, ErrorKind)>(e), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!(space1::<_, (_, ErrorKind)>(f), Ok((&b";"[..], &b" "[..]))); + } + + #[cfg(feature = "alloc")] + #[test] + fn character_s() { + let a = "abcd"; + let b = "1234"; + let c = "a123"; + let d = "azé12"; + let e = " "; + assert_eq!( + alpha1::<_, (_, ErrorKind)>(a), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!(alpha1(b), Err(Err::Error((b, ErrorKind::Alpha)))); + assert_eq!(alpha1::<_, (_, ErrorKind)>(c), Ok((&c[1..], &"a"[..]))); + assert_eq!(alpha1::<_, (_, ErrorKind)>(d), Ok(("é12", &"az"[..]))); + assert_eq!(digit1(a), Err(Err::Error((a, ErrorKind::Digit)))); + assert_eq!( + digit1::<_, (_, ErrorKind)>(b), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!(digit1(c), Err(Err::Error((c, ErrorKind::Digit)))); + assert_eq!(digit1(d), Err(Err::Error((d, ErrorKind::Digit)))); + assert_eq!( + hex_digit1::<_, (_, ErrorKind)>(a), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!( + hex_digit1::<_, (_, ErrorKind)>(b), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!( + hex_digit1::<_, (_, ErrorKind)>(c), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(d), Ok(("zé12", &"a"[..]))); + assert_eq!(hex_digit1(e), Err(Err::Error((e, ErrorKind::HexDigit)))); + assert_eq!(oct_digit1(a), Err(Err::Error((a, ErrorKind::OctDigit)))); + assert_eq!( + oct_digit1::<_, (_, ErrorKind)>(b), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!(oct_digit1(c), Err(Err::Error((c, ErrorKind::OctDigit)))); + assert_eq!(oct_digit1(d), Err(Err::Error((d, ErrorKind::OctDigit)))); + assert_eq!( + alphanumeric1::<_, (_, ErrorKind)>(a), + Err(Err::Incomplete(Needed::new(1))) + ); + //assert_eq!(fix_error!(b,(), alphanumeric1), Ok((empty, b))); + assert_eq!( + alphanumeric1::<_, (_, ErrorKind)>(c), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(d), Ok(("é12", "az"))); + assert_eq!( + space1::<_, (_, ErrorKind)>(e), + Err(Err::Incomplete(Needed::new(1))) + ); + } + + use crate::traits::Offset; + #[test] + fn offset() { + let a = &b"abcd;"[..]; + let b = &b"1234;"[..]; + let c = &b"a123;"[..]; + let d = &b" \t;"[..]; + let e = &b" \t\r\n;"[..]; + let f = &b"123abcDEF;"[..]; + + match alpha1::<_, (_, ErrorKind)>(a) { + Ok((i, _)) => { + assert_eq!(a.offset(i) + i.len(), a.len()); + } + _ => panic!("wrong return type in offset test for alpha"), + } + match digit1::<_, (_, ErrorKind)>(b) { + Ok((i, _)) => { + assert_eq!(b.offset(i) + i.len(), b.len()); + } + _ => panic!("wrong return type in offset test for digit"), + } + match alphanumeric1::<_, (_, ErrorKind)>(c) { + Ok((i, _)) => { + assert_eq!(c.offset(i) + i.len(), c.len()); + } + _ => panic!("wrong return type in offset test for alphanumeric"), + } + match space1::<_, (_, ErrorKind)>(d) { + Ok((i, _)) => { + assert_eq!(d.offset(i) + i.len(), d.len()); + } + _ => panic!("wrong return type in offset test for space"), + } + match multispace1::<_, (_, ErrorKind)>(e) { + Ok((i, _)) => { + assert_eq!(e.offset(i) + i.len(), e.len()); + } + _ => panic!("wrong return type in offset test for multispace"), + } + match hex_digit1::<_, (_, ErrorKind)>(f) { + Ok((i, _)) => { + assert_eq!(f.offset(i) + i.len(), f.len()); + } + _ => panic!("wrong return type in offset test for hex_digit"), + } + match oct_digit1::<_, (_, ErrorKind)>(f) { + Ok((i, _)) => { + assert_eq!(f.offset(i) + i.len(), f.len()); + } + _ => panic!("wrong return type in offset test for oct_digit"), + } + } + + #[test] + fn is_not_line_ending_bytes() { + let a: &[u8] = b"ab12cd\nefgh"; + assert_eq!( + not_line_ending::<_, (_, ErrorKind)>(a), + Ok((&b"\nefgh"[..], &b"ab12cd"[..])) + ); + + let b: &[u8] = b"ab12cd\nefgh\nijkl"; + assert_eq!( + not_line_ending::<_, (_, ErrorKind)>(b), + Ok((&b"\nefgh\nijkl"[..], &b"ab12cd"[..])) + ); + + let c: &[u8] = b"ab12cd\r\nefgh\nijkl"; + assert_eq!( + not_line_ending::<_, (_, ErrorKind)>(c), + Ok((&b"\r\nefgh\nijkl"[..], &b"ab12cd"[..])) + ); + + let d: &[u8] = b"ab12cd"; + assert_eq!( + not_line_ending::<_, (_, ErrorKind)>(d), + Err(Err::Incomplete(Needed::Unknown)) + ); + } + + #[test] + fn is_not_line_ending_str() { + /* + let a: &str = "ab12cd\nefgh"; + assert_eq!(not_line_ending(a), Ok((&"\nefgh"[..], &"ab12cd"[..]))); + + let b: &str = "ab12cd\nefgh\nijkl"; + assert_eq!(not_line_ending(b), Ok((&"\nefgh\nijkl"[..], &"ab12cd"[..]))); + + let c: &str = "ab12cd\r\nefgh\nijkl"; + assert_eq!(not_line_ending(c), Ok((&"\r\nefgh\nijkl"[..], &"ab12cd"[..]))); + + let d = "βèƒôřè\nÂßÇáƒƭèř"; + assert_eq!(not_line_ending(d), Ok((&"\nÂßÇáƒƭèř"[..], &"βèƒôřè"[..]))); + + let e = "βèƒôřè\r\nÂßÇáƒƭèř"; + assert_eq!(not_line_ending(e), Ok((&"\r\nÂßÇáƒƭèř"[..], &"βèƒôřè"[..]))); + */ + + let f = "βèƒôřè\rÂßÇáƒƭèř"; + assert_eq!(not_line_ending(f), Err(Err::Error((f, ErrorKind::Tag)))); + + let g2: &str = "ab12cd"; + assert_eq!( + not_line_ending::<_, (_, ErrorKind)>(g2), + Err(Err::Incomplete(Needed::Unknown)) + ); + } + + #[test] + fn hex_digit_test() { + let i = &b"0123456789abcdefABCDEF;"[..]; + assert_parse!(hex_digit1(i), Ok((&b";"[..], &i[..i.len() - 1]))); + + let i = &b"g"[..]; + assert_parse!( + hex_digit1(i), + Err(Err::Error(error_position!(i, ErrorKind::HexDigit))) + ); + + let i = &b"G"[..]; + assert_parse!( + hex_digit1(i), + Err(Err::Error(error_position!(i, ErrorKind::HexDigit))) + ); + + assert!(crate::character::is_hex_digit(b'0')); + assert!(crate::character::is_hex_digit(b'9')); + assert!(crate::character::is_hex_digit(b'a')); + assert!(crate::character::is_hex_digit(b'f')); + assert!(crate::character::is_hex_digit(b'A')); + assert!(crate::character::is_hex_digit(b'F')); + assert!(!crate::character::is_hex_digit(b'g')); + assert!(!crate::character::is_hex_digit(b'G')); + assert!(!crate::character::is_hex_digit(b'/')); + assert!(!crate::character::is_hex_digit(b':')); + assert!(!crate::character::is_hex_digit(b'@')); + assert!(!crate::character::is_hex_digit(b'\x60')); + } + + #[test] + fn oct_digit_test() { + let i = &b"01234567;"[..]; + assert_parse!(oct_digit1(i), Ok((&b";"[..], &i[..i.len() - 1]))); + + let i = &b"8"[..]; + assert_parse!( + oct_digit1(i), + Err(Err::Error(error_position!(i, ErrorKind::OctDigit))) + ); + + assert!(crate::character::is_oct_digit(b'0')); + assert!(crate::character::is_oct_digit(b'7')); + assert!(!crate::character::is_oct_digit(b'8')); + assert!(!crate::character::is_oct_digit(b'9')); + assert!(!crate::character::is_oct_digit(b'a')); + assert!(!crate::character::is_oct_digit(b'A')); + assert!(!crate::character::is_oct_digit(b'/')); + assert!(!crate::character::is_oct_digit(b':')); + assert!(!crate::character::is_oct_digit(b'@')); + assert!(!crate::character::is_oct_digit(b'\x60')); + } + + #[test] + fn full_line_windows() { + fn take_full_line(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> { + pair(not_line_ending, line_ending)(i) + } + let input = b"abc\r\n"; + let output = take_full_line(input); + assert_eq!(output, Ok((&b""[..], (&b"abc"[..], &b"\r\n"[..])))); + } + + #[test] + fn full_line_unix() { + fn take_full_line(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> { + pair(not_line_ending, line_ending)(i) + } + let input = b"abc\n"; + let output = take_full_line(input); + assert_eq!(output, Ok((&b""[..], (&b"abc"[..], &b"\n"[..])))); + } + + #[test] + fn check_windows_lineending() { + let input = b"\r\n"; + let output = line_ending(&input[..]); + assert_parse!(output, Ok((&b""[..], &b"\r\n"[..]))); + } + + #[test] + fn check_unix_lineending() { + let input = b"\n"; + let output = line_ending(&input[..]); + assert_parse!(output, Ok((&b""[..], &b"\n"[..]))); + } + + #[test] + fn cr_lf() { + assert_parse!(crlf(&b"\r\na"[..]), Ok((&b"a"[..], &b"\r\n"[..]))); + assert_parse!(crlf(&b"\r"[..]), Err(Err::Incomplete(Needed::new(2)))); + assert_parse!( + crlf(&b"\ra"[..]), + Err(Err::Error(error_position!(&b"\ra"[..], ErrorKind::CrLf))) + ); + + assert_parse!(crlf("\r\na"), Ok(("a", "\r\n"))); + assert_parse!(crlf("\r"), Err(Err::Incomplete(Needed::new(2)))); + assert_parse!( + crlf("\ra"), + Err(Err::Error(error_position!("\ra", ErrorKind::CrLf))) + ); + } + + #[test] + fn end_of_line() { + assert_parse!(line_ending(&b"\na"[..]), Ok((&b"a"[..], &b"\n"[..]))); + assert_parse!(line_ending(&b"\r\na"[..]), Ok((&b"a"[..], &b"\r\n"[..]))); + assert_parse!( + line_ending(&b"\r"[..]), + Err(Err::Incomplete(Needed::new(2))) + ); + assert_parse!( + line_ending(&b"\ra"[..]), + Err(Err::Error(error_position!(&b"\ra"[..], ErrorKind::CrLf))) + ); + + assert_parse!(line_ending("\na"), Ok(("a", "\n"))); + assert_parse!(line_ending("\r\na"), Ok(("a", "\r\n"))); + assert_parse!(line_ending("\r"), Err(Err::Incomplete(Needed::new(2)))); + assert_parse!( + line_ending("\ra"), + Err(Err::Error(error_position!("\ra", ErrorKind::CrLf))) + ); + } + + fn digit_to_i16(input: &str) -> IResult<&str, i16> { + let i = input; + let (i, opt_sign) = opt(alt((char('+'), char('-'))))(i)?; + let sign = match opt_sign { + Some('+') => true, + Some('-') => false, + _ => true, + }; + + let (i, s) = match digit1::<_, crate::error::Error<_>>(i) { + Ok((i, s)) => (i, s), + Err(Err::Incomplete(i)) => return Err(Err::Incomplete(i)), + Err(_) => { + return Err(Err::Error(crate::error::Error::from_error_kind( + input, + ErrorKind::Digit, + ))) + } + }; + match s.parse_to() { + Some(n) => { + if sign { + Ok((i, n)) + } else { + Ok((i, -n)) + } + } + None => Err(Err::Error(crate::error::Error::from_error_kind( + i, + ErrorKind::Digit, + ))), + } + } + + fn digit_to_u32(i: &str) -> IResult<&str, u32> { + let (i, s) = digit1(i)?; + match s.parse_to() { + Some(n) => Ok((i, n)), + None => Err(Err::Error(crate::error::Error::from_error_kind( + i, + ErrorKind::Digit, + ))), + } + } + + proptest! { + #[test] + fn ints(s in "\\PC*") { + let res1 = digit_to_i16(&s); + let res2 = i16(s.as_str()); + assert_eq!(res1, res2); + } + + #[test] + fn uints(s in "\\PC*") { + let res1 = digit_to_u32(&s); + let res2 = u32(s.as_str()); + assert_eq!(res1, res2); + } + } +} diff --git a/vendor/nom/src/character/tests.rs b/vendor/nom/src/character/tests.rs new file mode 100644 index 00000000000000..64c2a1c8a7c8c4 --- /dev/null +++ b/vendor/nom/src/character/tests.rs @@ -0,0 +1,62 @@ +use super::streaming::*; +use crate::error::ErrorKind; +use crate::internal::{Err, IResult}; + +#[test] +fn one_of_test() { + fn f(i: &[u8]) -> IResult<&[u8], char> { + one_of("ab")(i) + } + + let a = &b"abcd"[..]; + assert_eq!(f(a), Ok((&b"bcd"[..], 'a'))); + + let b = &b"cde"[..]; + assert_eq!(f(b), Err(Err::Error(error_position!(b, ErrorKind::OneOf)))); + + fn utf8(i: &str) -> IResult<&str, char> { + one_of("+\u{FF0B}")(i) + } + + assert!(utf8("+").is_ok()); + assert!(utf8("\u{FF0B}").is_ok()); +} + +#[test] +fn none_of_test() { + fn f(i: &[u8]) -> IResult<&[u8], char> { + none_of("ab")(i) + } + + let a = &b"abcd"[..]; + assert_eq!(f(a), Err(Err::Error(error_position!(a, ErrorKind::NoneOf)))); + + let b = &b"cde"[..]; + assert_eq!(f(b), Ok((&b"de"[..], 'c'))); +} + +#[test] +fn char_byteslice() { + fn f(i: &[u8]) -> IResult<&[u8], char> { + char('c')(i) + } + + let a = &b"abcd"[..]; + assert_eq!(f(a), Err(Err::Error(error_position!(a, ErrorKind::Char)))); + + let b = &b"cde"[..]; + assert_eq!(f(b), Ok((&b"de"[..], 'c'))); +} + +#[test] +fn char_str() { + fn f(i: &str) -> IResult<&str, char> { + char('c')(i) + } + + let a = &"abcd"[..]; + assert_eq!(f(a), Err(Err::Error(error_position!(a, ErrorKind::Char)))); + + let b = &"cde"[..]; + assert_eq!(f(b), Ok((&"de"[..], 'c'))); +} diff --git a/vendor/nom/src/combinator/mod.rs b/vendor/nom/src/combinator/mod.rs new file mode 100644 index 00000000000000..fe08d4a1050b48 --- /dev/null +++ b/vendor/nom/src/combinator/mod.rs @@ -0,0 +1,809 @@ +//! General purpose combinators + +#![allow(unused_imports)] + +#[cfg(feature = "alloc")] +use crate::lib::std::boxed::Box; + +use crate::error::{ErrorKind, FromExternalError, ParseError}; +use crate::internal::*; +use crate::lib::std::borrow::Borrow; +use crate::lib::std::convert::Into; +#[cfg(feature = "std")] +use crate::lib::std::fmt::Debug; +use crate::lib::std::mem::transmute; +use crate::lib::std::ops::{Range, RangeFrom, RangeTo}; +use crate::traits::{AsChar, InputIter, InputLength, InputTakeAtPosition, ParseTo}; +use crate::traits::{Compare, CompareResult, Offset, Slice}; + +#[cfg(test)] +mod tests; + +/// Return the remaining input. +/// +/// ```rust +/// # use nom::error::ErrorKind; +/// use nom::combinator::rest; +/// assert_eq!(rest::<_,(_, ErrorKind)>("abc"), Ok(("", "abc"))); +/// assert_eq!(rest::<_,(_, ErrorKind)>(""), Ok(("", ""))); +/// ``` +#[inline] +pub fn rest>(input: T) -> IResult +where + T: Slice>, + T: InputLength, +{ + Ok((input.slice(input.input_len()..), input)) +} + +/// Return the length of the remaining input. +/// +/// ```rust +/// # use nom::error::ErrorKind; +/// use nom::combinator::rest_len; +/// assert_eq!(rest_len::<_,(_, ErrorKind)>("abc"), Ok(("abc", 3))); +/// assert_eq!(rest_len::<_,(_, ErrorKind)>(""), Ok(("", 0))); +/// ``` +#[inline] +pub fn rest_len>(input: T) -> IResult +where + T: InputLength, +{ + let len = input.input_len(); + Ok((input, len)) +} + +/// Maps a function on the result of a parser. +/// +/// ```rust +/// use nom::{Err,error::ErrorKind, IResult,Parser}; +/// use nom::character::complete::digit1; +/// use nom::combinator::map; +/// # fn main() { +/// +/// let mut parser = map(digit1, |s: &str| s.len()); +/// +/// // the parser will count how many characters were returned by digit1 +/// assert_eq!(parser.parse("123456"), Ok(("", 6))); +/// +/// // this will fail if digit1 fails +/// assert_eq!(parser.parse("abc"), Err(Err::Error(("abc", ErrorKind::Digit)))); +/// # } +/// ``` +pub fn map(mut parser: F, mut f: G) -> impl FnMut(I) -> IResult +where + F: Parser, + G: FnMut(O1) -> O2, +{ + move |input: I| { + let (input, o1) = parser.parse(input)?; + Ok((input, f(o1))) + } +} + +/// Applies a function returning a `Result` over the result of a parser. +/// +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::character::complete::digit1; +/// use nom::combinator::map_res; +/// # fn main() { +/// +/// let mut parse = map_res(digit1, |s: &str| s.parse::()); +/// +/// // the parser will convert the result of digit1 to a number +/// assert_eq!(parse("123"), Ok(("", 123))); +/// +/// // this will fail if digit1 fails +/// assert_eq!(parse("abc"), Err(Err::Error(("abc", ErrorKind::Digit)))); +/// +/// // this will fail if the mapped function fails (a `u8` is too small to hold `123456`) +/// assert_eq!(parse("123456"), Err(Err::Error(("123456", ErrorKind::MapRes)))); +/// # } +/// ``` +pub fn map_res, E2, F, G>( + mut parser: F, + mut f: G, +) -> impl FnMut(I) -> IResult +where + F: Parser, + G: FnMut(O1) -> Result, +{ + move |input: I| { + let i = input.clone(); + let (input, o1) = parser.parse(input)?; + match f(o1) { + Ok(o2) => Ok((input, o2)), + Err(e) => Err(Err::Error(E::from_external_error(i, ErrorKind::MapRes, e))), + } + } +} + +/// Applies a function returning an `Option` over the result of a parser. +/// +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::character::complete::digit1; +/// use nom::combinator::map_opt; +/// # fn main() { +/// +/// let mut parse = map_opt(digit1, |s: &str| s.parse::().ok()); +/// +/// // the parser will convert the result of digit1 to a number +/// assert_eq!(parse("123"), Ok(("", 123))); +/// +/// // this will fail if digit1 fails +/// assert_eq!(parse("abc"), Err(Err::Error(("abc", ErrorKind::Digit)))); +/// +/// // this will fail if the mapped function fails (a `u8` is too small to hold `123456`) +/// assert_eq!(parse("123456"), Err(Err::Error(("123456", ErrorKind::MapOpt)))); +/// # } +/// ``` +pub fn map_opt, F, G>( + mut parser: F, + mut f: G, +) -> impl FnMut(I) -> IResult +where + F: Parser, + G: FnMut(O1) -> Option, +{ + move |input: I| { + let i = input.clone(); + let (input, o1) = parser.parse(input)?; + match f(o1) { + Some(o2) => Ok((input, o2)), + None => Err(Err::Error(E::from_error_kind(i, ErrorKind::MapOpt))), + } + } +} + +/// Applies a parser over the result of another one. +/// +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::character::complete::digit1; +/// use nom::bytes::complete::take; +/// use nom::combinator::map_parser; +/// # fn main() { +/// +/// let mut parse = map_parser(take(5u8), digit1); +/// +/// assert_eq!(parse("12345"), Ok(("", "12345"))); +/// assert_eq!(parse("123ab"), Ok(("", "123"))); +/// assert_eq!(parse("123"), Err(Err::Error(("123", ErrorKind::Eof)))); +/// # } +/// ``` +pub fn map_parser, F, G>( + mut parser: F, + mut applied_parser: G, +) -> impl FnMut(I) -> IResult +where + F: Parser, + G: Parser, +{ + move |input: I| { + let (input, o1) = parser.parse(input)?; + let (_, o2) = applied_parser.parse(o1)?; + Ok((input, o2)) + } +} + +/// Creates a new parser from the output of the first parser, then apply that parser over the rest of the input. +/// +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::bytes::complete::take; +/// use nom::number::complete::u8; +/// use nom::combinator::flat_map; +/// # fn main() { +/// +/// let mut parse = flat_map(u8, take); +/// +/// assert_eq!(parse(&[2, 0, 1, 2][..]), Ok((&[2][..], &[0, 1][..]))); +/// assert_eq!(parse(&[4, 0, 1, 2][..]), Err(Err::Error((&[0, 1, 2][..], ErrorKind::Eof)))); +/// # } +/// ``` +pub fn flat_map, F, G, H>( + mut parser: F, + mut applied_parser: G, +) -> impl FnMut(I) -> IResult +where + F: Parser, + G: FnMut(O1) -> H, + H: Parser, +{ + move |input: I| { + let (input, o1) = parser.parse(input)?; + applied_parser(o1).parse(input) + } +} + +/// Optional parser, will return `None` on [`Err::Error`]. +/// +/// To chain an error up, see [`cut`]. +/// +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::opt; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// fn parser(i: &str) -> IResult<&str, Option<&str>> { +/// opt(alpha1)(i) +/// } +/// +/// assert_eq!(parser("abcd;"), Ok((";", Some("abcd")))); +/// assert_eq!(parser("123;"), Ok(("123;", None))); +/// # } +/// ``` +pub fn opt, F>(mut f: F) -> impl FnMut(I) -> IResult, E> +where + F: Parser, +{ + move |input: I| { + let i = input.clone(); + match f.parse(input) { + Ok((i, o)) => Ok((i, Some(o))), + Err(Err::Error(_)) => Ok((i, None)), + Err(e) => Err(e), + } + } +} + +/// Calls the parser if the condition is met. +/// +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, IResult}; +/// use nom::combinator::cond; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// fn parser(b: bool, i: &str) -> IResult<&str, Option<&str>> { +/// cond(b, alpha1)(i) +/// } +/// +/// assert_eq!(parser(true, "abcd;"), Ok((";", Some("abcd")))); +/// assert_eq!(parser(false, "abcd;"), Ok(("abcd;", None))); +/// assert_eq!(parser(true, "123;"), Err(Err::Error(Error::new("123;", ErrorKind::Alpha)))); +/// assert_eq!(parser(false, "123;"), Ok(("123;", None))); +/// # } +/// ``` +pub fn cond, F>( + b: bool, + mut f: F, +) -> impl FnMut(I) -> IResult, E> +where + F: Parser, +{ + move |input: I| { + if b { + match f.parse(input) { + Ok((i, o)) => Ok((i, Some(o))), + Err(e) => Err(e), + } + } else { + Ok((input, None)) + } + } +} + +/// Tries to apply its parser without consuming the input. +/// +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::peek; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// let mut parser = peek(alpha1); +/// +/// assert_eq!(parser("abcd;"), Ok(("abcd;", "abcd"))); +/// assert_eq!(parser("123;"), Err(Err::Error(("123;", ErrorKind::Alpha)))); +/// # } +/// ``` +pub fn peek, F>(mut f: F) -> impl FnMut(I) -> IResult +where + F: Parser, +{ + move |input: I| { + let i = input.clone(); + match f.parse(input) { + Ok((_, o)) => Ok((i, o)), + Err(e) => Err(e), + } + } +} + +/// returns its input if it is at the end of input data +/// +/// When we're at the end of the data, this combinator +/// will succeed +/// +/// ``` +/// # use std::str; +/// # use nom::{Err, error::ErrorKind, IResult}; +/// # use nom::combinator::eof; +/// +/// # fn main() { +/// let parser = eof; +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Eof)))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// # } +/// ``` +pub fn eof>(input: I) -> IResult { + if input.input_len() == 0 { + let clone = input.clone(); + Ok((input, clone)) + } else { + Err(Err::Error(E::from_error_kind(input, ErrorKind::Eof))) + } +} + +/// Transforms Incomplete into `Error`. +/// +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::bytes::streaming::take; +/// use nom::combinator::complete; +/// # fn main() { +/// +/// let mut parser = complete(take(5u8)); +/// +/// assert_eq!(parser("abcdefg"), Ok(("fg", "abcde"))); +/// assert_eq!(parser("abcd"), Err(Err::Error(("abcd", ErrorKind::Complete)))); +/// # } +/// ``` +pub fn complete, F>(mut f: F) -> impl FnMut(I) -> IResult +where + F: Parser, +{ + move |input: I| { + let i = input.clone(); + match f.parse(input) { + Err(Err::Incomplete(_)) => Err(Err::Error(E::from_error_kind(i, ErrorKind::Complete))), + rest => rest, + } + } +} + +/// Succeeds if all the input has been consumed by its child parser. +/// +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::all_consuming; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// let mut parser = all_consuming(alpha1); +/// +/// assert_eq!(parser("abcd"), Ok(("", "abcd"))); +/// assert_eq!(parser("abcd;"),Err(Err::Error((";", ErrorKind::Eof)))); +/// assert_eq!(parser("123abcd;"),Err(Err::Error(("123abcd;", ErrorKind::Alpha)))); +/// # } +/// ``` +pub fn all_consuming, F>(mut f: F) -> impl FnMut(I) -> IResult +where + I: InputLength, + F: Parser, +{ + move |input: I| { + let (input, res) = f.parse(input)?; + if input.input_len() == 0 { + Ok((input, res)) + } else { + Err(Err::Error(E::from_error_kind(input, ErrorKind::Eof))) + } + } +} + +/// Returns the result of the child parser if it satisfies a verification function. +/// +/// The verification function takes as argument a reference to the output of the +/// parser. +/// +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::verify; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// let mut parser = verify(alpha1, |s: &str| s.len() == 4); +/// +/// assert_eq!(parser("abcd"), Ok(("", "abcd"))); +/// assert_eq!(parser("abcde"), Err(Err::Error(("abcde", ErrorKind::Verify)))); +/// assert_eq!(parser("123abcd;"),Err(Err::Error(("123abcd;", ErrorKind::Alpha)))); +/// # } +/// ``` +pub fn verify, F, G>( + mut first: F, + second: G, +) -> impl FnMut(I) -> IResult +where + F: Parser, + G: Fn(&O2) -> bool, + O1: Borrow, + O2: ?Sized, +{ + move |input: I| { + let i = input.clone(); + let (input, o) = first.parse(input)?; + + if second(o.borrow()) { + Ok((input, o)) + } else { + Err(Err::Error(E::from_error_kind(i, ErrorKind::Verify))) + } + } +} + +/// Returns the provided value if the child parser succeeds. +/// +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::value; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// let mut parser = value(1234, alpha1); +/// +/// assert_eq!(parser("abcd"), Ok(("", 1234))); +/// assert_eq!(parser("123abcd;"), Err(Err::Error(("123abcd;", ErrorKind::Alpha)))); +/// # } +/// ``` +pub fn value, F>( + val: O1, + mut parser: F, +) -> impl FnMut(I) -> IResult +where + F: Parser, +{ + move |input: I| parser.parse(input).map(|(i, _)| (i, val.clone())) +} + +/// Succeeds if the child parser returns an error. +/// +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::not; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// let mut parser = not(alpha1); +/// +/// assert_eq!(parser("123"), Ok(("123", ()))); +/// assert_eq!(parser("abcd"), Err(Err::Error(("abcd", ErrorKind::Not)))); +/// # } +/// ``` +pub fn not, F>(mut parser: F) -> impl FnMut(I) -> IResult +where + F: Parser, +{ + move |input: I| { + let i = input.clone(); + match parser.parse(input) { + Ok(_) => Err(Err::Error(E::from_error_kind(i, ErrorKind::Not))), + Err(Err::Error(_)) => Ok((i, ())), + Err(e) => Err(e), + } + } +} + +/// If the child parser was successful, return the consumed input as produced value. +/// +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::recognize; +/// use nom::character::complete::{char, alpha1}; +/// use nom::sequence::separated_pair; +/// # fn main() { +/// +/// let mut parser = recognize(separated_pair(alpha1, char(','), alpha1)); +/// +/// assert_eq!(parser("abcd,efgh"), Ok(("", "abcd,efgh"))); +/// assert_eq!(parser("abcd;"),Err(Err::Error((";", ErrorKind::Char)))); +/// # } +/// ``` +pub fn recognize>, O, E: ParseError, F>( + mut parser: F, +) -> impl FnMut(I) -> IResult +where + F: Parser, +{ + move |input: I| { + let i = input.clone(); + match parser.parse(i) { + Ok((i, _)) => { + let index = input.offset(&i); + Ok((i, input.slice(..index))) + } + Err(e) => Err(e), + } + } +} + +/// if the child parser was successful, return the consumed input with the output +/// as a tuple. Functions similarly to [recognize](fn.recognize.html) except it +/// returns the parser output as well. +/// +/// This can be useful especially in cases where the output is not the same type +/// as the input, or the input is a user defined type. +/// +/// Returned tuple is of the format `(consumed input, produced output)`. +/// +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::{consumed, value, recognize, map}; +/// use nom::character::complete::{char, alpha1}; +/// use nom::bytes::complete::tag; +/// use nom::sequence::separated_pair; +/// +/// fn inner_parser(input: &str) -> IResult<&str, bool> { +/// value(true, tag("1234"))(input) +/// } +/// +/// # fn main() { +/// +/// let mut consumed_parser = consumed(value(true, separated_pair(alpha1, char(','), alpha1))); +/// +/// assert_eq!(consumed_parser("abcd,efgh1"), Ok(("1", ("abcd,efgh", true)))); +/// assert_eq!(consumed_parser("abcd;"),Err(Err::Error((";", ErrorKind::Char)))); +/// +/// +/// // the first output (representing the consumed input) +/// // should be the same as that of the `recognize` parser. +/// let mut recognize_parser = recognize(inner_parser); +/// let mut consumed_parser = map(consumed(inner_parser), |(consumed, output)| consumed); +/// +/// assert_eq!(recognize_parser("1234"), consumed_parser("1234")); +/// assert_eq!(recognize_parser("abcd"), consumed_parser("abcd")); +/// # } +/// ``` +pub fn consumed(mut parser: F) -> impl FnMut(I) -> IResult +where + I: Clone + Offset + Slice>, + E: ParseError, + F: Parser, +{ + move |input: I| { + let i = input.clone(); + match parser.parse(i) { + Ok((remaining, result)) => { + let index = input.offset(&remaining); + let consumed = input.slice(..index); + Ok((remaining, (consumed, result))) + } + Err(e) => Err(e), + } + } +} + +/// Transforms an [`Err::Error`] (recoverable) to [`Err::Failure`] (unrecoverable) +/// +/// This commits the parse result, preventing alternative branch paths like with +/// [`nom::branch::alt`][crate::branch::alt]. +/// +/// # Example +/// +/// Without `cut`: +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult}; +/// # use nom::character::complete::{one_of, digit1}; +/// # use nom::combinator::rest; +/// # use nom::branch::alt; +/// # use nom::sequence::preceded; +/// # fn main() { +/// +/// fn parser(input: &str) -> IResult<&str, &str> { +/// alt(( +/// preceded(one_of("+-"), digit1), +/// rest +/// ))(input) +/// } +/// +/// assert_eq!(parser("+10 ab"), Ok((" ab", "10"))); +/// assert_eq!(parser("ab"), Ok(("", "ab"))); +/// assert_eq!(parser("+"), Ok(("", "+"))); +/// # } +/// ``` +/// +/// With `cut`: +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult, error::Error}; +/// # use nom::character::complete::{one_of, digit1}; +/// # use nom::combinator::rest; +/// # use nom::branch::alt; +/// # use nom::sequence::preceded; +/// use nom::combinator::cut; +/// # fn main() { +/// +/// fn parser(input: &str) -> IResult<&str, &str> { +/// alt(( +/// preceded(one_of("+-"), cut(digit1)), +/// rest +/// ))(input) +/// } +/// +/// assert_eq!(parser("+10 ab"), Ok((" ab", "10"))); +/// assert_eq!(parser("ab"), Ok(("", "ab"))); +/// assert_eq!(parser("+"), Err(Err::Failure(Error { input: "", code: ErrorKind::Digit }))); +/// # } +/// ``` +pub fn cut, F>(mut parser: F) -> impl FnMut(I) -> IResult +where + F: Parser, +{ + move |input: I| match parser.parse(input) { + Err(Err::Error(e)) => Err(Err::Failure(e)), + rest => rest, + } +} + +/// automatically converts the child parser's result to another type +/// +/// it will be able to convert the output value and the error value +/// as long as the `Into` implementations are available +/// +/// ```rust +/// # use nom::IResult; +/// use nom::combinator::into; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// fn parser1(i: &str) -> IResult<&str, &str> { +/// alpha1(i) +/// } +/// +/// let mut parser2 = into(parser1); +/// +/// // the parser converts the &str output of the child parser into a Vec +/// let bytes: IResult<&str, Vec> = parser2("abcd"); +/// assert_eq!(bytes, Ok(("", vec![97, 98, 99, 100]))); +/// # } +/// ``` +pub fn into(mut parser: F) -> impl FnMut(I) -> IResult +where + O1: Into, + E1: Into, + E1: ParseError, + E2: ParseError, + F: Parser, +{ + //map(parser, Into::into) + move |input: I| match parser.parse(input) { + Ok((i, o)) => Ok((i, o.into())), + Err(Err::Error(e)) => Err(Err::Error(e.into())), + Err(Err::Failure(e)) => Err(Err::Failure(e.into())), + Err(Err::Incomplete(e)) => Err(Err::Incomplete(e)), + } +} + +/// Creates an iterator from input data and a parser. +/// +/// Call the iterator's [ParserIterator::finish] method to get the remaining input if successful, +/// or the error value if we encountered an error. +/// +/// On [`Err::Error`], iteration will stop. To instead chain an error up, see [`cut`]. +/// +/// ```rust +/// use nom::{combinator::iterator, IResult, bytes::complete::tag, character::complete::alpha1, sequence::terminated}; +/// use std::collections::HashMap; +/// +/// let data = "abc|defg|hijkl|mnopqr|123"; +/// let mut it = iterator(data, terminated(alpha1, tag("|"))); +/// +/// let parsed = it.map(|v| (v, v.len())).collect::>(); +/// let res: IResult<_,_> = it.finish(); +/// +/// assert_eq!(parsed, [("abc", 3usize), ("defg", 4), ("hijkl", 5), ("mnopqr", 6)].iter().cloned().collect()); +/// assert_eq!(res, Ok(("123", ()))); +/// ``` +pub fn iterator(input: Input, f: F) -> ParserIterator +where + F: Parser, + Error: ParseError, +{ + ParserIterator { + iterator: f, + input, + state: Some(State::Running), + } +} + +/// Main structure associated to the [iterator] function. +pub struct ParserIterator { + iterator: F, + input: I, + state: Option>, +} + +impl ParserIterator { + /// Returns the remaining input if parsing was successful, or the error if we encountered an error. + pub fn finish(mut self) -> IResult { + match self.state.take().unwrap() { + State::Running | State::Done => Ok((self.input, ())), + State::Failure(e) => Err(Err::Failure(e)), + State::Incomplete(i) => Err(Err::Incomplete(i)), + } + } +} + +impl<'a, Input, Output, Error, F> core::iter::Iterator for &'a mut ParserIterator +where + F: FnMut(Input) -> IResult, + Input: Clone, +{ + type Item = Output; + + fn next(&mut self) -> Option { + if let State::Running = self.state.take().unwrap() { + let input = self.input.clone(); + + match (self.iterator)(input) { + Ok((i, o)) => { + self.input = i; + self.state = Some(State::Running); + Some(o) + } + Err(Err::Error(_)) => { + self.state = Some(State::Done); + None + } + Err(Err::Failure(e)) => { + self.state = Some(State::Failure(e)); + None + } + Err(Err::Incomplete(i)) => { + self.state = Some(State::Incomplete(i)); + None + } + } + } else { + None + } + } +} + +enum State { + Running, + Done, + Failure(E), + Incomplete(Needed), +} + +/// a parser which always succeeds with given value without consuming any input. +/// +/// It can be used for example as the last alternative in `alt` to +/// specify the default case. +/// +/// ```rust +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::branch::alt; +/// use nom::combinator::{success, value}; +/// use nom::character::complete::char; +/// # fn main() { +/// +/// let mut parser = success::<_,_,(_,ErrorKind)>(10); +/// assert_eq!(parser("xyz"), Ok(("xyz", 10))); +/// +/// let mut sign = alt((value(-1, char('-')), value(1, char('+')), success::<_,_,(_,ErrorKind)>(1))); +/// assert_eq!(sign("+10"), Ok(("10", 1))); +/// assert_eq!(sign("-10"), Ok(("10", -1))); +/// assert_eq!(sign("10"), Ok(("10", 1))); +/// # } +/// ``` +pub fn success>(val: O) -> impl Fn(I) -> IResult { + move |input: I| Ok((input, val.clone())) +} + +/// A parser which always fails. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, IResult}; +/// use nom::combinator::fail; +/// +/// let s = "string"; +/// assert_eq!(fail::<_, &str, _>(s), Err(Err::Error((s, ErrorKind::Fail)))); +/// ``` +pub fn fail>(i: I) -> IResult { + Err(Err::Error(E::from_error_kind(i, ErrorKind::Fail))) +} diff --git a/vendor/nom/src/combinator/tests.rs b/vendor/nom/src/combinator/tests.rs new file mode 100644 index 00000000000000..15d32b8aae3322 --- /dev/null +++ b/vendor/nom/src/combinator/tests.rs @@ -0,0 +1,275 @@ +use super::*; +use crate::bytes::complete::take; +use crate::bytes::streaming::tag; +use crate::error::ErrorKind; +use crate::error::ParseError; +use crate::internal::{Err, IResult, Needed}; +#[cfg(feature = "alloc")] +use crate::lib::std::boxed::Box; +use crate::number::complete::u8; + +macro_rules! assert_parse( + ($left: expr, $right: expr) => { + let res: $crate::IResult<_, _, (_, ErrorKind)> = $left; + assert_eq!(res, $right); + }; +); + +/*#[test] +fn t1() { + let v1:Vec = vec![1,2,3]; + let v2:Vec = vec![4,5,6]; + let d = Ok((&v1[..], &v2[..])); + let res = d.flat_map(print); + assert_eq!(res, Ok((&v2[..], ()))); +}*/ + +#[test] +fn eof_on_slices() { + let not_over: &[u8] = &b"Hello, world!"[..]; + let is_over: &[u8] = &b""[..]; + + let res_not_over = eof(not_over); + assert_parse!( + res_not_over, + Err(Err::Error(error_position!(not_over, ErrorKind::Eof))) + ); + + let res_over = eof(is_over); + assert_parse!(res_over, Ok((is_over, is_over))); +} + +#[test] +fn eof_on_strs() { + let not_over: &str = "Hello, world!"; + let is_over: &str = ""; + + let res_not_over = eof(not_over); + assert_parse!( + res_not_over, + Err(Err::Error(error_position!(not_over, ErrorKind::Eof))) + ); + + let res_over = eof(is_over); + assert_parse!(res_over, Ok((is_over, is_over))); +} + +/* +#[test] +fn end_of_input() { + let not_over = &b"Hello, world!"[..]; + let is_over = &b""[..]; + named!(eof_test, eof!()); + + let res_not_over = eof_test(not_over); + assert_eq!(res_not_over, Err(Err::Error(error_position!(not_over, ErrorKind::Eof)))); + + let res_over = eof_test(is_over); + assert_eq!(res_over, Ok((is_over, is_over))); +} +*/ + +#[test] +fn rest_on_slices() { + let input: &[u8] = &b"Hello, world!"[..]; + let empty: &[u8] = &b""[..]; + assert_parse!(rest(input), Ok((empty, input))); +} + +#[test] +fn rest_on_strs() { + let input: &str = "Hello, world!"; + let empty: &str = ""; + assert_parse!(rest(input), Ok((empty, input))); +} + +#[test] +fn rest_len_on_slices() { + let input: &[u8] = &b"Hello, world!"[..]; + assert_parse!(rest_len(input), Ok((input, input.len()))); +} + +use crate::lib::std::convert::From; +impl From for CustomError { + fn from(_: u32) -> Self { + CustomError + } +} + +impl ParseError for CustomError { + fn from_error_kind(_: I, _: ErrorKind) -> Self { + CustomError + } + + fn append(_: I, _: ErrorKind, _: CustomError) -> Self { + CustomError + } +} + +struct CustomError; +#[allow(dead_code)] +fn custom_error(input: &[u8]) -> IResult<&[u8], &[u8], CustomError> { + //fix_error!(input, CustomError, alphanumeric) + crate::character::streaming::alphanumeric1(input) +} + +#[test] +fn test_flat_map() { + let input: &[u8] = &[3, 100, 101, 102, 103, 104][..]; + assert_parse!( + flat_map(u8, take)(input), + Ok((&[103, 104][..], &[100, 101, 102][..])) + ); +} + +#[test] +fn test_map_opt() { + let input: &[u8] = &[50][..]; + assert_parse!( + map_opt(u8, |u| if u < 20 { Some(u) } else { None })(input), + Err(Err::Error((&[50][..], ErrorKind::MapOpt))) + ); + assert_parse!( + map_opt(u8, |u| if u > 20 { Some(u) } else { None })(input), + Ok((&[][..], 50)) + ); +} + +#[test] +fn test_map_parser() { + let input: &[u8] = &[100, 101, 102, 103, 104][..]; + assert_parse!( + map_parser(take(4usize), take(2usize))(input), + Ok((&[104][..], &[100, 101][..])) + ); +} + +#[test] +fn test_all_consuming() { + let input: &[u8] = &[100, 101, 102][..]; + assert_parse!( + all_consuming(take(2usize))(input), + Err(Err::Error((&[102][..], ErrorKind::Eof))) + ); + assert_parse!( + all_consuming(take(3usize))(input), + Ok((&[][..], &[100, 101, 102][..])) + ); +} + +#[test] +#[allow(unused)] +fn test_verify_ref() { + use crate::bytes::complete::take; + + let mut parser1 = verify(take(3u8), |s: &[u8]| s == &b"abc"[..]); + + assert_eq!(parser1(&b"abcd"[..]), Ok((&b"d"[..], &b"abc"[..]))); + assert_eq!( + parser1(&b"defg"[..]), + Err(Err::Error((&b"defg"[..], ErrorKind::Verify))) + ); + + fn parser2(i: &[u8]) -> IResult<&[u8], u32> { + verify(crate::number::streaming::be_u32, |val: &u32| *val < 3)(i) + } +} + +#[test] +#[cfg(feature = "alloc")] +fn test_verify_alloc() { + use crate::bytes::complete::take; + let mut parser1 = verify(map(take(3u8), |s: &[u8]| s.to_vec()), |s: &[u8]| { + s == &b"abc"[..] + }); + + assert_eq!(parser1(&b"abcd"[..]), Ok((&b"d"[..], (&b"abc").to_vec()))); + assert_eq!( + parser1(&b"defg"[..]), + Err(Err::Error((&b"defg"[..], ErrorKind::Verify))) + ); +} + +#[test] +#[cfg(feature = "std")] +fn test_into() { + use crate::bytes::complete::take; + use crate::{ + error::{Error, ParseError}, + Err, + }; + + let mut parser = into(take::<_, _, Error<_>>(3u8)); + let result: IResult<&[u8], Vec> = parser(&b"abcdefg"[..]); + + assert_eq!(result, Ok((&b"defg"[..], vec![97, 98, 99]))); +} + +#[test] +fn opt_test() { + fn opt_abcd(i: &[u8]) -> IResult<&[u8], Option<&[u8]>> { + opt(tag("abcd"))(i) + } + + let a = &b"abcdef"[..]; + let b = &b"bcdefg"[..]; + let c = &b"ab"[..]; + assert_eq!(opt_abcd(a), Ok((&b"ef"[..], Some(&b"abcd"[..])))); + assert_eq!(opt_abcd(b), Ok((&b"bcdefg"[..], None))); + assert_eq!(opt_abcd(c), Err(Err::Incomplete(Needed::new(2)))); +} + +#[test] +fn peek_test() { + fn peek_tag(i: &[u8]) -> IResult<&[u8], &[u8]> { + peek(tag("abcd"))(i) + } + + assert_eq!(peek_tag(&b"abcdef"[..]), Ok((&b"abcdef"[..], &b"abcd"[..]))); + assert_eq!(peek_tag(&b"ab"[..]), Err(Err::Incomplete(Needed::new(2)))); + assert_eq!( + peek_tag(&b"xxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) + ); +} + +#[test] +fn not_test() { + fn not_aaa(i: &[u8]) -> IResult<&[u8], ()> { + not(tag("aaa"))(i) + } + + assert_eq!( + not_aaa(&b"aaa"[..]), + Err(Err::Error(error_position!(&b"aaa"[..], ErrorKind::Not))) + ); + assert_eq!(not_aaa(&b"aa"[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(not_aaa(&b"abcd"[..]), Ok((&b"abcd"[..], ()))); +} + +#[test] +fn verify_test() { + use crate::bytes::streaming::take; + + fn test(i: &[u8]) -> IResult<&[u8], &[u8]> { + verify(take(5u8), |slice: &[u8]| slice[0] == b'a')(i) + } + assert_eq!(test(&b"bcd"[..]), Err(Err::Incomplete(Needed::new(2)))); + assert_eq!( + test(&b"bcdefg"[..]), + Err(Err::Error(error_position!( + &b"bcdefg"[..], + ErrorKind::Verify + ))) + ); + assert_eq!(test(&b"abcdefg"[..]), Ok((&b"fg"[..], &b"abcde"[..]))); +} + +#[test] +fn fail_test() { + let a = "string"; + let b = "another string"; + + assert_eq!(fail::<_, &str, _>(a), Err(Err::Error((a, ErrorKind::Fail)))); + assert_eq!(fail::<_, &str, _>(b), Err(Err::Error((b, ErrorKind::Fail)))); +} diff --git a/vendor/nom/src/error.rs b/vendor/nom/src/error.rs new file mode 100644 index 00000000000000..498b5e135a0973 --- /dev/null +++ b/vendor/nom/src/error.rs @@ -0,0 +1,831 @@ +//! Error management +//! +//! Parsers are generic over their error type, requiring that it implements +//! the `error::ParseError` trait. + +use crate::internal::Parser; +use crate::lib::std::fmt; + +/// This trait must be implemented by the error type of a nom parser. +/// +/// There are already implementations of it for `(Input, ErrorKind)` +/// and `VerboseError`. +/// +/// It provides methods to create an error from some combinators, +/// and combine existing errors in combinators like `alt`. +pub trait ParseError: Sized { + /// Creates an error from the input position and an [ErrorKind] + fn from_error_kind(input: I, kind: ErrorKind) -> Self; + + /// Combines an existing error with a new one created from the input + /// position and an [ErrorKind]. This is useful when backtracking + /// through a parse tree, accumulating error context on the way + fn append(input: I, kind: ErrorKind, other: Self) -> Self; + + /// Creates an error from an input position and an expected character + fn from_char(input: I, _: char) -> Self { + Self::from_error_kind(input, ErrorKind::Char) + } + + /// Combines two existing errors. This function is used to compare errors + /// generated in various branches of `alt`. + fn or(self, other: Self) -> Self { + other + } +} + +/// This trait is required by the `context` combinator to add a static string +/// to an existing error +pub trait ContextError: Sized { + /// Creates a new error from an input position, a static string and an existing error. + /// This is used mainly in the [context] combinator, to add user friendly information + /// to errors when backtracking through a parse tree + fn add_context(_input: I, _ctx: &'static str, other: Self) -> Self { + other + } +} + +/// This trait is required by the `map_res` combinator to integrate +/// error types from external functions, like [std::str::FromStr] +pub trait FromExternalError { + /// Creates a new error from an input position, an [ErrorKind] indicating the + /// wrapping parser, and an external error + fn from_external_error(input: I, kind: ErrorKind, e: E) -> Self; +} + +/// default error type, only contains the error' location and code +#[derive(Debug, PartialEq)] +pub struct Error { + /// position of the error in the input data + pub input: I, + /// nom error code + pub code: ErrorKind, +} + +impl Error { + /// creates a new basic error + pub fn new(input: I, code: ErrorKind) -> Error { + Error { input, code } + } +} + +impl ParseError for Error { + fn from_error_kind(input: I, kind: ErrorKind) -> Self { + Error { input, code: kind } + } + + fn append(_: I, _: ErrorKind, other: Self) -> Self { + other + } +} + +impl ContextError for Error {} + +impl FromExternalError for Error { + /// Create a new error from an input position and an external error + fn from_external_error(input: I, kind: ErrorKind, _e: E) -> Self { + Error { input, code: kind } + } +} + +/// The Display implementation allows the std::error::Error implementation +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "error {:?} at: {}", self.code, self.input) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for Error {} + +// for backward compatibility, keep those trait implementations +// for the previously used error type +impl ParseError for (I, ErrorKind) { + fn from_error_kind(input: I, kind: ErrorKind) -> Self { + (input, kind) + } + + fn append(_: I, _: ErrorKind, other: Self) -> Self { + other + } +} + +impl ContextError for (I, ErrorKind) {} + +impl FromExternalError for (I, ErrorKind) { + fn from_external_error(input: I, kind: ErrorKind, _e: E) -> Self { + (input, kind) + } +} + +impl ParseError for () { + fn from_error_kind(_: I, _: ErrorKind) -> Self {} + + fn append(_: I, _: ErrorKind, _: Self) -> Self {} +} + +impl ContextError for () {} + +impl FromExternalError for () { + fn from_external_error(_input: I, _kind: ErrorKind, _e: E) -> Self {} +} + +/// Creates an error from the input position and an [ErrorKind] +pub fn make_error>(input: I, kind: ErrorKind) -> E { + E::from_error_kind(input, kind) +} + +/// Combines an existing error with a new one created from the input +/// position and an [ErrorKind]. This is useful when backtracking +/// through a parse tree, accumulating error context on the way +pub fn append_error>(input: I, kind: ErrorKind, other: E) -> E { + E::append(input, kind, other) +} + +/// This error type accumulates errors and their position when backtracking +/// through a parse tree. With some post processing (cf `examples/json.rs`), +/// it can be used to display user friendly error messages +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +#[derive(Clone, Debug, PartialEq)] +pub struct VerboseError { + /// List of errors accumulated by `VerboseError`, containing the affected + /// part of input data, and some context + pub errors: crate::lib::std::vec::Vec<(I, VerboseErrorKind)>, +} + +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +#[derive(Clone, Debug, PartialEq)] +/// Error context for `VerboseError` +pub enum VerboseErrorKind { + /// Static string added by the `context` function + Context(&'static str), + /// Indicates which character was expected by the `char` function + Char(char), + /// Error kind given by various nom parsers + Nom(ErrorKind), +} + +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +impl ParseError for VerboseError { + fn from_error_kind(input: I, kind: ErrorKind) -> Self { + VerboseError { + errors: vec![(input, VerboseErrorKind::Nom(kind))], + } + } + + fn append(input: I, kind: ErrorKind, mut other: Self) -> Self { + other.errors.push((input, VerboseErrorKind::Nom(kind))); + other + } + + fn from_char(input: I, c: char) -> Self { + VerboseError { + errors: vec![(input, VerboseErrorKind::Char(c))], + } + } +} + +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +impl ContextError for VerboseError { + fn add_context(input: I, ctx: &'static str, mut other: Self) -> Self { + other.errors.push((input, VerboseErrorKind::Context(ctx))); + other + } +} + +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +impl FromExternalError for VerboseError { + /// Create a new error from an input position and an external error + fn from_external_error(input: I, kind: ErrorKind, _e: E) -> Self { + Self::from_error_kind(input, kind) + } +} + +#[cfg(feature = "alloc")] +impl fmt::Display for VerboseError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "Parse error:")?; + for (input, error) in &self.errors { + match error { + VerboseErrorKind::Nom(e) => writeln!(f, "{:?} at: {}", e, input)?, + VerboseErrorKind::Char(c) => writeln!(f, "expected '{}' at: {}", c, input)?, + VerboseErrorKind::Context(s) => writeln!(f, "in section '{}', at: {}", s, input)?, + } + } + + Ok(()) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for VerboseError {} + +use crate::internal::{Err, IResult}; + +/// Create a new error from an input position, a static string and an existing error. +/// This is used mainly in the [context] combinator, to add user friendly information +/// to errors when backtracking through a parse tree +pub fn context, F, O>( + context: &'static str, + mut f: F, +) -> impl FnMut(I) -> IResult +where + F: Parser, +{ + move |i: I| match f.parse(i.clone()) { + Ok(o) => Ok(o), + Err(Err::Incomplete(i)) => Err(Err::Incomplete(i)), + Err(Err::Error(e)) => Err(Err::Error(E::add_context(i, context, e))), + Err(Err::Failure(e)) => Err(Err::Failure(E::add_context(i, context, e))), + } +} + +/// Transforms a `VerboseError` into a trace with input position information +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +pub fn convert_error>( + input: I, + e: VerboseError, +) -> crate::lib::std::string::String { + use crate::lib::std::fmt::Write; + use crate::traits::Offset; + + let mut result = crate::lib::std::string::String::new(); + + for (i, (substring, kind)) in e.errors.iter().enumerate() { + let offset = input.offset(substring); + + if input.is_empty() { + match kind { + VerboseErrorKind::Char(c) => { + write!(&mut result, "{}: expected '{}', got empty input\n\n", i, c) + } + VerboseErrorKind::Context(s) => write!(&mut result, "{}: in {}, got empty input\n\n", i, s), + VerboseErrorKind::Nom(e) => write!(&mut result, "{}: in {:?}, got empty input\n\n", i, e), + } + } else { + let prefix = &input.as_bytes()[..offset]; + + // Count the number of newlines in the first `offset` bytes of input + let line_number = prefix.iter().filter(|&&b| b == b'\n').count() + 1; + + // Find the line that includes the subslice: + // Find the *last* newline before the substring starts + let line_begin = prefix + .iter() + .rev() + .position(|&b| b == b'\n') + .map(|pos| offset - pos) + .unwrap_or(0); + + // Find the full line after that newline + let line = input[line_begin..] + .lines() + .next() + .unwrap_or(&input[line_begin..]) + .trim_end(); + + // The (1-indexed) column number is the offset of our substring into that line + let column_number = line.offset(substring) + 1; + + match kind { + VerboseErrorKind::Char(c) => { + if let Some(actual) = substring.chars().next() { + write!( + &mut result, + "{i}: at line {line_number}:\n\ + {line}\n\ + {caret:>column$}\n\ + expected '{expected}', found {actual}\n\n", + i = i, + line_number = line_number, + line = line, + caret = '^', + column = column_number, + expected = c, + actual = actual, + ) + } else { + write!( + &mut result, + "{i}: at line {line_number}:\n\ + {line}\n\ + {caret:>column$}\n\ + expected '{expected}', got end of input\n\n", + i = i, + line_number = line_number, + line = line, + caret = '^', + column = column_number, + expected = c, + ) + } + } + VerboseErrorKind::Context(s) => write!( + &mut result, + "{i}: at line {line_number}, in {context}:\n\ + {line}\n\ + {caret:>column$}\n\n", + i = i, + line_number = line_number, + context = s, + line = line, + caret = '^', + column = column_number, + ), + VerboseErrorKind::Nom(e) => write!( + &mut result, + "{i}: at line {line_number}, in {nom_err:?}:\n\ + {line}\n\ + {caret:>column$}\n\n", + i = i, + line_number = line_number, + nom_err = e, + line = line, + caret = '^', + column = column_number, + ), + } + } + // Because `write!` to a `String` is infallible, this `unwrap` is fine. + .unwrap(); + } + + result +} + +/// Indicates which parser returned an error +#[rustfmt::skip] +#[derive(Debug,PartialEq,Eq,Hash,Clone,Copy)] +#[allow(deprecated,missing_docs)] +pub enum ErrorKind { + Tag, + MapRes, + MapOpt, + Alt, + IsNot, + IsA, + SeparatedList, + SeparatedNonEmptyList, + Many0, + Many1, + ManyTill, + Count, + TakeUntil, + LengthValue, + TagClosure, + Alpha, + Digit, + HexDigit, + OctDigit, + AlphaNumeric, + Space, + MultiSpace, + LengthValueFn, + Eof, + Switch, + TagBits, + OneOf, + NoneOf, + Char, + CrLf, + RegexpMatch, + RegexpMatches, + RegexpFind, + RegexpCapture, + RegexpCaptures, + TakeWhile1, + Complete, + Fix, + Escaped, + EscapedTransform, + NonEmpty, + ManyMN, + Not, + Permutation, + Verify, + TakeTill1, + TakeWhileMN, + TooLarge, + Many0Count, + Many1Count, + Float, + Satisfy, + Fail, +} + +#[rustfmt::skip] +#[allow(deprecated)] +/// Converts an ErrorKind to a number +pub fn error_to_u32(e: &ErrorKind) -> u32 { + match *e { + ErrorKind::Tag => 1, + ErrorKind::MapRes => 2, + ErrorKind::MapOpt => 3, + ErrorKind::Alt => 4, + ErrorKind::IsNot => 5, + ErrorKind::IsA => 6, + ErrorKind::SeparatedList => 7, + ErrorKind::SeparatedNonEmptyList => 8, + ErrorKind::Many1 => 9, + ErrorKind::Count => 10, + ErrorKind::TakeUntil => 12, + ErrorKind::LengthValue => 15, + ErrorKind::TagClosure => 16, + ErrorKind::Alpha => 17, + ErrorKind::Digit => 18, + ErrorKind::AlphaNumeric => 19, + ErrorKind::Space => 20, + ErrorKind::MultiSpace => 21, + ErrorKind::LengthValueFn => 22, + ErrorKind::Eof => 23, + ErrorKind::Switch => 27, + ErrorKind::TagBits => 28, + ErrorKind::OneOf => 29, + ErrorKind::NoneOf => 30, + ErrorKind::Char => 40, + ErrorKind::CrLf => 41, + ErrorKind::RegexpMatch => 42, + ErrorKind::RegexpMatches => 43, + ErrorKind::RegexpFind => 44, + ErrorKind::RegexpCapture => 45, + ErrorKind::RegexpCaptures => 46, + ErrorKind::TakeWhile1 => 47, + ErrorKind::Complete => 48, + ErrorKind::Fix => 49, + ErrorKind::Escaped => 50, + ErrorKind::EscapedTransform => 51, + ErrorKind::NonEmpty => 56, + ErrorKind::ManyMN => 57, + ErrorKind::HexDigit => 59, + ErrorKind::OctDigit => 61, + ErrorKind::Many0 => 62, + ErrorKind::Not => 63, + ErrorKind::Permutation => 64, + ErrorKind::ManyTill => 65, + ErrorKind::Verify => 66, + ErrorKind::TakeTill1 => 67, + ErrorKind::TakeWhileMN => 69, + ErrorKind::TooLarge => 70, + ErrorKind::Many0Count => 71, + ErrorKind::Many1Count => 72, + ErrorKind::Float => 73, + ErrorKind::Satisfy => 74, + ErrorKind::Fail => 75, + } +} + +impl ErrorKind { + #[rustfmt::skip] + #[allow(deprecated)] + /// Converts an ErrorKind to a text description + pub fn description(&self) -> &str { + match *self { + ErrorKind::Tag => "Tag", + ErrorKind::MapRes => "Map on Result", + ErrorKind::MapOpt => "Map on Option", + ErrorKind::Alt => "Alternative", + ErrorKind::IsNot => "IsNot", + ErrorKind::IsA => "IsA", + ErrorKind::SeparatedList => "Separated list", + ErrorKind::SeparatedNonEmptyList => "Separated non empty list", + ErrorKind::Many0 => "Many0", + ErrorKind::Many1 => "Many1", + ErrorKind::Count => "Count", + ErrorKind::TakeUntil => "Take until", + ErrorKind::LengthValue => "Length followed by value", + ErrorKind::TagClosure => "Tag closure", + ErrorKind::Alpha => "Alphabetic", + ErrorKind::Digit => "Digit", + ErrorKind::AlphaNumeric => "AlphaNumeric", + ErrorKind::Space => "Space", + ErrorKind::MultiSpace => "Multiple spaces", + ErrorKind::LengthValueFn => "LengthValueFn", + ErrorKind::Eof => "End of file", + ErrorKind::Switch => "Switch", + ErrorKind::TagBits => "Tag on bitstream", + ErrorKind::OneOf => "OneOf", + ErrorKind::NoneOf => "NoneOf", + ErrorKind::Char => "Char", + ErrorKind::CrLf => "CrLf", + ErrorKind::RegexpMatch => "RegexpMatch", + ErrorKind::RegexpMatches => "RegexpMatches", + ErrorKind::RegexpFind => "RegexpFind", + ErrorKind::RegexpCapture => "RegexpCapture", + ErrorKind::RegexpCaptures => "RegexpCaptures", + ErrorKind::TakeWhile1 => "TakeWhile1", + ErrorKind::Complete => "Complete", + ErrorKind::Fix => "Fix", + ErrorKind::Escaped => "Escaped", + ErrorKind::EscapedTransform => "EscapedTransform", + ErrorKind::NonEmpty => "NonEmpty", + ErrorKind::ManyMN => "Many(m, n)", + ErrorKind::HexDigit => "Hexadecimal Digit", + ErrorKind::OctDigit => "Octal digit", + ErrorKind::Not => "Negation", + ErrorKind::Permutation => "Permutation", + ErrorKind::ManyTill => "ManyTill", + ErrorKind::Verify => "predicate verification", + ErrorKind::TakeTill1 => "TakeTill1", + ErrorKind::TakeWhileMN => "TakeWhileMN", + ErrorKind::TooLarge => "Needed data size is too large", + ErrorKind::Many0Count => "Count occurrence of >=0 patterns", + ErrorKind::Many1Count => "Count occurrence of >=1 patterns", + ErrorKind::Float => "Float", + ErrorKind::Satisfy => "Satisfy", + ErrorKind::Fail => "Fail", + } + } +} + +/// Creates a parse error from a `nom::ErrorKind` +/// and the position in the input +#[allow(unused_variables)] +#[macro_export(local_inner_macros)] +macro_rules! error_position( + ($input:expr, $code:expr) => ({ + $crate::error::make_error($input, $code) + }); +); + +/// Creates a parse error from a `nom::ErrorKind`, +/// the position in the input and the next error in +/// the parsing tree +#[allow(unused_variables)] +#[macro_export(local_inner_macros)] +macro_rules! error_node_position( + ($input:expr, $code:expr, $next:expr) => ({ + $crate::error::append_error($input, $code, $next) + }); +); + +/// Prints a message and the input if the parser fails. +/// +/// The message prints the `Error` or `Incomplete` +/// and the parser's calling code. +/// +/// It also displays the input in hexdump format +/// +/// ```rust +/// use nom::{IResult, error::dbg_dmp, bytes::complete::tag}; +/// +/// fn f(i: &[u8]) -> IResult<&[u8], &[u8]> { +/// dbg_dmp(tag("abcd"), "tag")(i) +/// } +/// +/// let a = &b"efghijkl"[..]; +/// +/// // Will print the following message: +/// // Error(Position(0, [101, 102, 103, 104, 105, 106, 107, 108])) at l.5 by ' tag ! ( "abcd" ) ' +/// // 00000000 65 66 67 68 69 6a 6b 6c efghijkl +/// f(a); +/// ``` +#[cfg(feature = "std")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "std")))] +pub fn dbg_dmp<'a, F, O, E: std::fmt::Debug>( + f: F, + context: &'static str, +) -> impl Fn(&'a [u8]) -> IResult<&'a [u8], O, E> +where + F: Fn(&'a [u8]) -> IResult<&'a [u8], O, E>, +{ + use crate::HexDisplay; + move |i: &'a [u8]| match f(i) { + Err(e) => { + println!("{}: Error({:?}) at:\n{}", context, e, i.to_hex(8)); + Err(e) + } + a => a, + } +} + +#[cfg(test)] +#[cfg(feature = "alloc")] +mod tests { + use super::*; + use crate::character::complete::char; + + #[test] + fn convert_error_panic() { + let input = ""; + + let _result: IResult<_, _, VerboseError<&str>> = char('x')(input); + } +} + +/* +#[cfg(feature = "alloc")] +use lib::std::{vec::Vec, collections::HashMap}; + +#[cfg(feature = "std")] +use lib::std::hash::Hash; + +#[cfg(feature = "std")] +pub fn add_error_pattern<'a, I: Clone + Hash + Eq, O, E: Clone + Hash + Eq>( + h: &mut HashMap, &'a str>, + e: VerboseError, + message: &'a str, +) -> bool { + h.insert(e, message); + true +} + +pub fn slice_to_offsets(input: &[u8], s: &[u8]) -> (usize, usize) { + let start = input.as_ptr(); + let off1 = s.as_ptr() as usize - start as usize; + let off2 = off1 + s.len(); + (off1, off2) +} + +#[cfg(feature = "std")] +pub fn prepare_errors(input: &[u8], e: VerboseError<&[u8]>) -> Option> { + let mut v: Vec<(ErrorKind, usize, usize)> = Vec::new(); + + for (p, kind) in e.errors.drain(..) { + let (o1, o2) = slice_to_offsets(input, p); + v.push((kind, o1, o2)); + } + + v.reverse(); + Some(v) +} + +#[cfg(feature = "std")] +pub fn print_error(input: &[u8], res: VerboseError<&[u8]>) { + if let Some(v) = prepare_errors(input, res) { + let colors = generate_colors(&v); + println!("parser codes: {}", print_codes(&colors, &HashMap::new())); + println!("{}", print_offsets(input, 0, &v)); + } else { + println!("not an error"); + } +} + +#[cfg(feature = "std")] +pub fn generate_colors(v: &[(ErrorKind, usize, usize)]) -> HashMap { + let mut h: HashMap = HashMap::new(); + let mut color = 0; + + for &(ref c, _, _) in v.iter() { + h.insert(error_to_u32(c), color + 31); + color = color + 1 % 7; + } + + h +} + +pub fn code_from_offset(v: &[(ErrorKind, usize, usize)], offset: usize) -> Option { + let mut acc: Option<(u32, usize, usize)> = None; + for &(ref ek, s, e) in v.iter() { + let c = error_to_u32(ek); + if s <= offset && offset <= e { + if let Some((_, start, end)) = acc { + if start <= s && e <= end { + acc = Some((c, s, e)); + } + } else { + acc = Some((c, s, e)); + } + } + } + if let Some((code, _, _)) = acc { + return Some(code); + } else { + return None; + } +} + +#[cfg(feature = "alloc")] +pub fn reset_color(v: &mut Vec) { + v.push(0x1B); + v.push(b'['); + v.push(0); + v.push(b'm'); +} + +#[cfg(feature = "alloc")] +pub fn write_color(v: &mut Vec, color: u8) { + v.push(0x1B); + v.push(b'['); + v.push(1); + v.push(b';'); + let s = color.to_string(); + let bytes = s.as_bytes(); + v.extend(bytes.iter().cloned()); + v.push(b'm'); +} + +#[cfg(feature = "std")] +#[cfg_attr(feature = "cargo-clippy", allow(implicit_hasher))] +pub fn print_codes(colors: &HashMap, names: &HashMap) -> String { + let mut v = Vec::new(); + for (code, &color) in colors { + if let Some(&s) = names.get(code) { + let bytes = s.as_bytes(); + write_color(&mut v, color); + v.extend(bytes.iter().cloned()); + } else { + let s = code.to_string(); + let bytes = s.as_bytes(); + write_color(&mut v, color); + v.extend(bytes.iter().cloned()); + } + reset_color(&mut v); + v.push(b' '); + } + reset_color(&mut v); + + String::from_utf8_lossy(&v[..]).into_owned() +} + +#[cfg(feature = "std")] +pub fn print_offsets(input: &[u8], from: usize, offsets: &[(ErrorKind, usize, usize)]) -> String { + let mut v = Vec::with_capacity(input.len() * 3); + let mut i = from; + let chunk_size = 8; + let mut current_code: Option = None; + let mut current_code2: Option = None; + + let colors = generate_colors(&offsets); + + for chunk in input.chunks(chunk_size) { + let s = format!("{:08x}", i); + for &ch in s.as_bytes().iter() { + v.push(ch); + } + v.push(b'\t'); + + let mut k = i; + let mut l = i; + for &byte in chunk { + if let Some(code) = code_from_offset(&offsets, k) { + if let Some(current) = current_code { + if current != code { + reset_color(&mut v); + current_code = Some(code); + if let Some(&color) = colors.get(&code) { + write_color(&mut v, color); + } + } + } else { + current_code = Some(code); + if let Some(&color) = colors.get(&code) { + write_color(&mut v, color); + } + } + } + v.push(CHARS[(byte >> 4) as usize]); + v.push(CHARS[(byte & 0xf) as usize]); + v.push(b' '); + k = k + 1; + } + + reset_color(&mut v); + + if chunk_size > chunk.len() { + for _ in 0..(chunk_size - chunk.len()) { + v.push(b' '); + v.push(b' '); + v.push(b' '); + } + } + v.push(b'\t'); + + for &byte in chunk { + if let Some(code) = code_from_offset(&offsets, l) { + if let Some(current) = current_code2 { + if current != code { + reset_color(&mut v); + current_code2 = Some(code); + if let Some(&color) = colors.get(&code) { + write_color(&mut v, color); + } + } + } else { + current_code2 = Some(code); + if let Some(&color) = colors.get(&code) { + write_color(&mut v, color); + } + } + } + if (byte >= 32 && byte <= 126) || byte >= 128 { + v.push(byte); + } else { + v.push(b'.'); + } + l = l + 1; + } + reset_color(&mut v); + + v.push(b'\n'); + i = i + chunk_size; + } + + String::from_utf8_lossy(&v[..]).into_owned() +} +*/ diff --git a/vendor/nom/src/internal.rs b/vendor/nom/src/internal.rs new file mode 100644 index 00000000000000..b7572fbd0a9429 --- /dev/null +++ b/vendor/nom/src/internal.rs @@ -0,0 +1,489 @@ +//! Basic types to build the parsers + +use self::Needed::*; +use crate::error::{self, ErrorKind}; +use crate::lib::std::fmt; +use core::num::NonZeroUsize; + +/// Holds the result of parsing functions +/// +/// It depends on the input type `I`, the output type `O`, and the error type `E` +/// (by default `(I, nom::ErrorKind)`) +/// +/// The `Ok` side is a pair containing the remainder of the input (the part of the data that +/// was not parsed) and the produced value. The `Err` side contains an instance of `nom::Err`. +/// +/// Outside of the parsing code, you can use the [Finish::finish] method to convert +/// it to a more common result type +pub type IResult> = Result<(I, O), Err>; + +/// Helper trait to convert a parser's result to a more manageable type +pub trait Finish { + /// converts the parser's result to a type that is more consumable by error + /// management libraries. It keeps the same `Ok` branch, and merges `Err::Error` + /// and `Err::Failure` into the `Err` side. + /// + /// *warning*: if the result is `Err(Err::Incomplete(_))`, this method will panic. + /// - "complete" parsers: It will not be an issue, `Incomplete` is never used + /// - "streaming" parsers: `Incomplete` will be returned if there's not enough data + /// for the parser to decide, and you should gather more data before parsing again. + /// Once the parser returns either `Ok(_)`, `Err(Err::Error(_))` or `Err(Err::Failure(_))`, + /// you can get out of the parsing loop and call `finish()` on the parser's result + fn finish(self) -> Result<(I, O), E>; +} + +impl Finish for IResult { + fn finish(self) -> Result<(I, O), E> { + match self { + Ok(res) => Ok(res), + Err(Err::Error(e)) | Err(Err::Failure(e)) => Err(e), + Err(Err::Incomplete(_)) => { + panic!("Cannot call `finish()` on `Err(Err::Incomplete(_))`: this result means that the parser does not have enough data to decide, you should gather more data and try to reapply the parser instead") + } + } + } +} + +/// Contains information on needed data if a parser returned `Incomplete` +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] +pub enum Needed { + /// Needs more data, but we do not know how much + Unknown, + /// Contains the required data size in bytes + Size(NonZeroUsize), +} + +impl Needed { + /// Creates `Needed` instance, returns `Needed::Unknown` if the argument is zero + pub fn new(s: usize) -> Self { + match NonZeroUsize::new(s) { + Some(sz) => Needed::Size(sz), + None => Needed::Unknown, + } + } + + /// Indicates if we know how many bytes we need + pub fn is_known(&self) -> bool { + *self != Unknown + } + + /// Maps a `Needed` to `Needed` by applying a function to a contained `Size` value. + #[inline] + pub fn map usize>(self, f: F) -> Needed { + match self { + Unknown => Unknown, + Size(n) => Needed::new(f(n)), + } + } +} + +/// The `Err` enum indicates the parser was not successful +/// +/// It has three cases: +/// +/// * `Incomplete` indicates that more data is needed to decide. The `Needed` enum +/// can contain how many additional bytes are necessary. If you are sure your parser +/// is working on full data, you can wrap your parser with the `complete` combinator +/// to transform that case in `Error` +/// * `Error` means some parser did not succeed, but another one might (as an example, +/// when testing different branches of an `alt` combinator) +/// * `Failure` indicates an unrecoverable error. As an example, if you recognize a prefix +/// to decide on the next parser to apply, and that parser fails, you know there's no need +/// to try other parsers, you were already in the right branch, so the data is invalid +/// +#[derive(Debug, Clone, PartialEq)] +#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] +pub enum Err { + /// There was not enough data + Incomplete(Needed), + /// The parser had an error (recoverable) + Error(E), + /// The parser had an unrecoverable error: we got to the right + /// branch and we know other branches won't work, so backtrack + /// as fast as possible + Failure(E), +} + +impl Err { + /// Tests if the result is Incomplete + pub fn is_incomplete(&self) -> bool { + if let Err::Incomplete(_) = self { + true + } else { + false + } + } + + /// Applies the given function to the inner error + pub fn map(self, f: F) -> Err + where + F: FnOnce(E) -> E2, + { + match self { + Err::Incomplete(n) => Err::Incomplete(n), + Err::Failure(t) => Err::Failure(f(t)), + Err::Error(t) => Err::Error(f(t)), + } + } + + /// Automatically converts between errors if the underlying type supports it + pub fn convert(e: Err) -> Self + where + E: From, + { + e.map(crate::lib::std::convert::Into::into) + } +} + +impl Err<(T, ErrorKind)> { + /// Maps `Err<(T, ErrorKind)>` to `Err<(U, ErrorKind)>` with the given `F: T -> U` + pub fn map_input(self, f: F) -> Err<(U, ErrorKind)> + where + F: FnOnce(T) -> U, + { + match self { + Err::Incomplete(n) => Err::Incomplete(n), + Err::Failure((input, k)) => Err::Failure((f(input), k)), + Err::Error((input, k)) => Err::Error((f(input), k)), + } + } +} + +impl Err> { + /// Maps `Err>` to `Err>` with the given `F: T -> U` + pub fn map_input(self, f: F) -> Err> + where + F: FnOnce(T) -> U, + { + match self { + Err::Incomplete(n) => Err::Incomplete(n), + Err::Failure(error::Error { input, code }) => Err::Failure(error::Error { + input: f(input), + code, + }), + Err::Error(error::Error { input, code }) => Err::Error(error::Error { + input: f(input), + code, + }), + } + } +} + +#[cfg(feature = "alloc")] +use crate::lib::std::{borrow::ToOwned, string::String, vec::Vec}; +#[cfg(feature = "alloc")] +impl Err<(&[u8], ErrorKind)> { + /// Obtaining ownership + #[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] + pub fn to_owned(self) -> Err<(Vec, ErrorKind)> { + self.map_input(ToOwned::to_owned) + } +} + +#[cfg(feature = "alloc")] +impl Err<(&str, ErrorKind)> { + /// Obtaining ownership + #[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] + pub fn to_owned(self) -> Err<(String, ErrorKind)> { + self.map_input(ToOwned::to_owned) + } +} + +#[cfg(feature = "alloc")] +impl Err> { + /// Obtaining ownership + #[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] + pub fn to_owned(self) -> Err>> { + self.map_input(ToOwned::to_owned) + } +} + +#[cfg(feature = "alloc")] +impl Err> { + /// Obtaining ownership + #[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] + pub fn to_owned(self) -> Err> { + self.map_input(ToOwned::to_owned) + } +} + +impl Eq for Err {} + +impl fmt::Display for Err +where + E: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Err::Incomplete(Needed::Size(u)) => write!(f, "Parsing requires {} bytes/chars", u), + Err::Incomplete(Needed::Unknown) => write!(f, "Parsing requires more data"), + Err::Failure(c) => write!(f, "Parsing Failure: {:?}", c), + Err::Error(c) => write!(f, "Parsing Error: {:?}", c), + } + } +} + +#[cfg(feature = "std")] +use std::error::Error; + +#[cfg(feature = "std")] +impl Error for Err +where + E: fmt::Debug, +{ + fn source(&self) -> Option<&(dyn Error + 'static)> { + None // no underlying error + } +} + +/// All nom parsers implement this trait +pub trait Parser { + /// A parser takes in input type, and returns a `Result` containing + /// either the remaining input and the output value, or an error + fn parse(&mut self, input: I) -> IResult; + + /// Maps a function over the result of a parser + fn map(self, g: G) -> Map + where + G: Fn(O) -> O2, + Self: core::marker::Sized, + { + Map { + f: self, + g, + phantom: core::marker::PhantomData, + } + } + + /// Creates a second parser from the output of the first one, then apply over the rest of the input + fn flat_map(self, g: G) -> FlatMap + where + G: FnMut(O) -> H, + H: Parser, + Self: core::marker::Sized, + { + FlatMap { + f: self, + g, + phantom: core::marker::PhantomData, + } + } + + /// Applies a second parser over the output of the first one + fn and_then(self, g: G) -> AndThen + where + G: Parser, + Self: core::marker::Sized, + { + AndThen { + f: self, + g, + phantom: core::marker::PhantomData, + } + } + + /// Applies a second parser after the first one, return their results as a tuple + fn and(self, g: G) -> And + where + G: Parser, + Self: core::marker::Sized, + { + And { f: self, g } + } + + /// Applies a second parser over the input if the first one failed + fn or(self, g: G) -> Or + where + G: Parser, + Self: core::marker::Sized, + { + Or { f: self, g } + } + + /// automatically converts the parser's output and error values to another type, as long as they + /// implement the `From` trait + fn into, E2: From>(self) -> Into + where + Self: core::marker::Sized, + { + Into { + f: self, + phantom_out1: core::marker::PhantomData, + phantom_err1: core::marker::PhantomData, + phantom_out2: core::marker::PhantomData, + phantom_err2: core::marker::PhantomData, + } + } +} + +impl<'a, I, O, E, F> Parser for F +where + F: FnMut(I) -> IResult + 'a, +{ + fn parse(&mut self, i: I) -> IResult { + self(i) + } +} + +#[cfg(feature = "alloc")] +use alloc::boxed::Box; + +#[cfg(feature = "alloc")] +impl<'a, I, O, E> Parser for Box + 'a> { + fn parse(&mut self, input: I) -> IResult { + (**self).parse(input) + } +} + +/// Implementation of `Parser::map` +#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] +pub struct Map { + f: F, + g: G, + phantom: core::marker::PhantomData, +} + +impl<'a, I, O1, O2, E, F: Parser, G: Fn(O1) -> O2> Parser for Map { + fn parse(&mut self, i: I) -> IResult { + match self.f.parse(i) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, (self.g)(o))), + } + } +} + +/// Implementation of `Parser::flat_map` +#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] +pub struct FlatMap { + f: F, + g: G, + phantom: core::marker::PhantomData, +} + +impl<'a, I, O1, O2, E, F: Parser, G: Fn(O1) -> H, H: Parser> Parser + for FlatMap +{ + fn parse(&mut self, i: I) -> IResult { + let (i, o1) = self.f.parse(i)?; + (self.g)(o1).parse(i) + } +} + +/// Implementation of `Parser::and_then` +#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] +pub struct AndThen { + f: F, + g: G, + phantom: core::marker::PhantomData, +} + +impl<'a, I, O1, O2, E, F: Parser, G: Parser> Parser + for AndThen +{ + fn parse(&mut self, i: I) -> IResult { + let (i, o1) = self.f.parse(i)?; + let (_, o2) = self.g.parse(o1)?; + Ok((i, o2)) + } +} + +/// Implementation of `Parser::and` +#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] +pub struct And { + f: F, + g: G, +} + +impl<'a, I, O1, O2, E, F: Parser, G: Parser> Parser + for And +{ + fn parse(&mut self, i: I) -> IResult { + let (i, o1) = self.f.parse(i)?; + let (i, o2) = self.g.parse(i)?; + Ok((i, (o1, o2))) + } +} + +/// Implementation of `Parser::or` +#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] +pub struct Or { + f: F, + g: G, +} + +impl<'a, I: Clone, O, E: crate::error::ParseError, F: Parser, G: Parser> + Parser for Or +{ + fn parse(&mut self, i: I) -> IResult { + match self.f.parse(i.clone()) { + Err(Err::Error(e1)) => match self.g.parse(i) { + Err(Err::Error(e2)) => Err(Err::Error(e1.or(e2))), + res => res, + }, + res => res, + } + } +} + +/// Implementation of `Parser::into` +#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] +pub struct Into, E1, E2: From> { + f: F, + phantom_out1: core::marker::PhantomData, + phantom_err1: core::marker::PhantomData, + phantom_out2: core::marker::PhantomData, + phantom_err2: core::marker::PhantomData, +} + +impl< + 'a, + I: Clone, + O1, + O2: From, + E1, + E2: crate::error::ParseError + From, + F: Parser, + > Parser for Into +{ + fn parse(&mut self, i: I) -> IResult { + match self.f.parse(i) { + Ok((i, o)) => Ok((i, o.into())), + Err(Err::Error(e)) => Err(Err::Error(e.into())), + Err(Err::Failure(e)) => Err(Err::Failure(e.into())), + Err(Err::Incomplete(e)) => Err(Err::Incomplete(e)), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::error::ErrorKind; + + #[doc(hidden)] + #[macro_export] + macro_rules! assert_size ( + ($t:ty, $sz:expr) => ( + assert_eq!(crate::lib::std::mem::size_of::<$t>(), $sz); + ); + ); + + #[test] + #[cfg(target_pointer_width = "64")] + fn size_test() { + assert_size!(IResult<&[u8], &[u8], (&[u8], u32)>, 40); + //FIXME: since rust 1.65, this is now 32 bytes, likely thanks to https://github.com/rust-lang/rust/pull/94075 + // deactivating that test for now because it'll have different values depending on the rust version + // assert_size!(IResult<&str, &str, u32>, 40); + assert_size!(Needed, 8); + assert_size!(Err, 16); + assert_size!(ErrorKind, 1); + } + + #[test] + fn err_map_test() { + let e = Err::Error(1); + assert_eq!(e.map(|v| v + 1), Err::Error(2)); + } +} diff --git a/vendor/nom/src/lib.rs b/vendor/nom/src/lib.rs new file mode 100644 index 00000000000000..3beb2f4179d489 --- /dev/null +++ b/vendor/nom/src/lib.rs @@ -0,0 +1,464 @@ +//! # nom, eating data byte by byte +//! +//! nom is a parser combinator library with a focus on safe parsing, +//! streaming patterns, and as much as possible zero copy. +//! +//! ## Example +//! +//! ```rust +//! use nom::{ +//! IResult, +//! bytes::complete::{tag, take_while_m_n}, +//! combinator::map_res, +//! sequence::tuple}; +//! +//! #[derive(Debug,PartialEq)] +//! pub struct Color { +//! pub red: u8, +//! pub green: u8, +//! pub blue: u8, +//! } +//! +//! fn from_hex(input: &str) -> Result { +//! u8::from_str_radix(input, 16) +//! } +//! +//! fn is_hex_digit(c: char) -> bool { +//! c.is_digit(16) +//! } +//! +//! fn hex_primary(input: &str) -> IResult<&str, u8> { +//! map_res( +//! take_while_m_n(2, 2, is_hex_digit), +//! from_hex +//! )(input) +//! } +//! +//! fn hex_color(input: &str) -> IResult<&str, Color> { +//! let (input, _) = tag("#")(input)?; +//! let (input, (red, green, blue)) = tuple((hex_primary, hex_primary, hex_primary))(input)?; +//! +//! Ok((input, Color { red, green, blue })) +//! } +//! +//! fn main() { +//! assert_eq!(hex_color("#2F14DF"), Ok(("", Color { +//! red: 47, +//! green: 20, +//! blue: 223, +//! }))); +//! } +//! ``` +//! +//! The code is available on [Github](https://github.com/Geal/nom) +//! +//! There are a few [guides](https://github.com/Geal/nom/tree/main/doc) with more details +//! about [how to write parsers](https://github.com/Geal/nom/blob/main/doc/making_a_new_parser_from_scratch.md), +//! or the [error management system](https://github.com/Geal/nom/blob/main/doc/error_management.md). +//! You can also check out the [recipes] module that contains examples of common patterns. +//! +//! **Looking for a specific combinator? Read the +//! ["choose a combinator" guide](https://github.com/Geal/nom/blob/main/doc/choosing_a_combinator.md)** +//! +//! If you are upgrading to nom 5.0, please read the +//! [migration document](https://github.com/Geal/nom/blob/main/doc/upgrading_to_nom_5.md). +//! +//! ## Parser combinators +//! +//! Parser combinators are an approach to parsers that is very different from +//! software like [lex](https://en.wikipedia.org/wiki/Lex_(software)) and +//! [yacc](https://en.wikipedia.org/wiki/Yacc). Instead of writing the grammar +//! in a separate syntax and generating the corresponding code, you use very small +//! functions with very specific purposes, like "take 5 bytes", or "recognize the +//! word 'HTTP'", and assemble them in meaningful patterns like "recognize +//! 'HTTP', then a space, then a version". +//! The resulting code is small, and looks like the grammar you would have +//! written with other parser approaches. +//! +//! This gives us a few advantages: +//! +//! - The parsers are small and easy to write +//! - The parsers components are easy to reuse (if they're general enough, please add them to nom!) +//! - The parsers components are easy to test separately (unit tests and property-based tests) +//! - The parser combination code looks close to the grammar you would have written +//! - You can build partial parsers, specific to the data you need at the moment, and ignore the rest +//! +//! Here is an example of one such parser, to recognize text between parentheses: +//! +//! ```rust +//! use nom::{ +//! IResult, +//! sequence::delimited, +//! // see the "streaming/complete" paragraph lower for an explanation of these submodules +//! character::complete::char, +//! bytes::complete::is_not +//! }; +//! +//! fn parens(input: &str) -> IResult<&str, &str> { +//! delimited(char('('), is_not(")"), char(')'))(input) +//! } +//! ``` +//! +//! It defines a function named `parens` which will recognize a sequence of the +//! character `(`, the longest byte array not containing `)`, then the character +//! `)`, and will return the byte array in the middle. +//! +//! Here is another parser, written without using nom's combinators this time: +//! +//! ```rust +//! use nom::{IResult, Err, Needed}; +//! +//! # fn main() { +//! fn take4(i: &[u8]) -> IResult<&[u8], &[u8]>{ +//! if i.len() < 4 { +//! Err(Err::Incomplete(Needed::new(4))) +//! } else { +//! Ok((&i[4..], &i[0..4])) +//! } +//! } +//! # } +//! ``` +//! +//! This function takes a byte array as input, and tries to consume 4 bytes. +//! Writing all the parsers manually, like this, is dangerous, despite Rust's +//! safety features. There are still a lot of mistakes one can make. That's why +//! nom provides a list of functions to help in developing parsers. +//! +//! With functions, you would write it like this: +//! +//! ```rust +//! use nom::{IResult, bytes::streaming::take}; +//! fn take4(input: &str) -> IResult<&str, &str> { +//! take(4u8)(input) +//! } +//! ``` +//! +//! A parser in nom is a function which, for an input type `I`, an output type `O` +//! and an optional error type `E`, will have the following signature: +//! +//! ```rust,compile_fail +//! fn parser(input: I) -> IResult; +//! ``` +//! +//! Or like this, if you don't want to specify a custom error type (it will be `(I, ErrorKind)` by default): +//! +//! ```rust,compile_fail +//! fn parser(input: I) -> IResult; +//! ``` +//! +//! `IResult` is an alias for the `Result` type: +//! +//! ```rust +//! use nom::{Needed, error::Error}; +//! +//! type IResult> = Result<(I, O), Err>; +//! +//! enum Err { +//! Incomplete(Needed), +//! Error(E), +//! Failure(E), +//! } +//! ``` +//! +//! It can have the following values: +//! +//! - A correct result `Ok((I,O))` with the first element being the remaining of the input (not parsed yet), and the second the output value; +//! - An error `Err(Err::Error(c))` with `c` an error that can be built from the input position and a parser specific error +//! - An error `Err(Err::Incomplete(Needed))` indicating that more input is necessary. `Needed` can indicate how much data is needed +//! - An error `Err(Err::Failure(c))`. It works like the `Error` case, except it indicates an unrecoverable error: We cannot backtrack and test another parser +//! +//! Please refer to the ["choose a combinator" guide](https://github.com/Geal/nom/blob/main/doc/choosing_a_combinator.md) for an exhaustive list of parsers. +//! See also the rest of the documentation [here](https://github.com/Geal/nom/blob/main/doc). +//! +//! ## Making new parsers with function combinators +//! +//! nom is based on functions that generate parsers, with a signature like +//! this: `(arguments) -> impl Fn(Input) -> IResult`. +//! The arguments of a combinator can be direct values (like `take` which uses +//! a number of bytes or character as argument) or even other parsers (like +//! `delimited` which takes as argument 3 parsers, and returns the result of +//! the second one if all are successful). +//! +//! Here are some examples: +//! +//! ```rust +//! use nom::IResult; +//! use nom::bytes::complete::{tag, take}; +//! fn abcd_parser(i: &str) -> IResult<&str, &str> { +//! tag("abcd")(i) // will consume bytes if the input begins with "abcd" +//! } +//! +//! fn take_10(i: &[u8]) -> IResult<&[u8], &[u8]> { +//! take(10u8)(i) // will consume and return 10 bytes of input +//! } +//! ``` +//! +//! ## Combining parsers +//! +//! There are higher level patterns, like the **`alt`** combinator, which +//! provides a choice between multiple parsers. If one branch fails, it tries +//! the next, and returns the result of the first parser that succeeds: +//! +//! ```rust +//! use nom::IResult; +//! use nom::branch::alt; +//! use nom::bytes::complete::tag; +//! +//! let mut alt_tags = alt((tag("abcd"), tag("efgh"))); +//! +//! assert_eq!(alt_tags(&b"abcdxxx"[..]), Ok((&b"xxx"[..], &b"abcd"[..]))); +//! assert_eq!(alt_tags(&b"efghxxx"[..]), Ok((&b"xxx"[..], &b"efgh"[..]))); +//! assert_eq!(alt_tags(&b"ijklxxx"[..]), Err(nom::Err::Error((&b"ijklxxx"[..], nom::error::ErrorKind::Tag)))); +//! ``` +//! +//! The **`opt`** combinator makes a parser optional. If the child parser returns +//! an error, **`opt`** will still succeed and return None: +//! +//! ```rust +//! use nom::{IResult, combinator::opt, bytes::complete::tag}; +//! fn abcd_opt(i: &[u8]) -> IResult<&[u8], Option<&[u8]>> { +//! opt(tag("abcd"))(i) +//! } +//! +//! assert_eq!(abcd_opt(&b"abcdxxx"[..]), Ok((&b"xxx"[..], Some(&b"abcd"[..])))); +//! assert_eq!(abcd_opt(&b"efghxxx"[..]), Ok((&b"efghxxx"[..], None))); +//! ``` +//! +//! **`many0`** applies a parser 0 or more times, and returns a vector of the aggregated results: +//! +//! ```rust +//! # #[cfg(feature = "alloc")] +//! # fn main() { +//! use nom::{IResult, multi::many0, bytes::complete::tag}; +//! use std::str; +//! +//! fn multi(i: &str) -> IResult<&str, Vec<&str>> { +//! many0(tag("abcd"))(i) +//! } +//! +//! let a = "abcdef"; +//! let b = "abcdabcdef"; +//! let c = "azerty"; +//! assert_eq!(multi(a), Ok(("ef", vec!["abcd"]))); +//! assert_eq!(multi(b), Ok(("ef", vec!["abcd", "abcd"]))); +//! assert_eq!(multi(c), Ok(("azerty", Vec::new()))); +//! # } +//! # #[cfg(not(feature = "alloc"))] +//! # fn main() {} +//! ``` +//! +//! Here are some basic combinators available: +//! +//! - **`opt`**: Will make the parser optional (if it returns the `O` type, the new parser returns `Option`) +//! - **`many0`**: Will apply the parser 0 or more times (if it returns the `O` type, the new parser returns `Vec`) +//! - **`many1`**: Will apply the parser 1 or more times +//! +//! There are more complex (and more useful) parsers like `tuple`, which is +//! used to apply a series of parsers then assemble their results. +//! +//! Example with `tuple`: +//! +//! ```rust +//! # fn main() { +//! use nom::{error::ErrorKind, Needed, +//! number::streaming::be_u16, +//! bytes::streaming::{tag, take}, +//! sequence::tuple}; +//! +//! let mut tpl = tuple((be_u16, take(3u8), tag("fg"))); +//! +//! assert_eq!( +//! tpl(&b"abcdefgh"[..]), +//! Ok(( +//! &b"h"[..], +//! (0x6162u16, &b"cde"[..], &b"fg"[..]) +//! )) +//! ); +//! assert_eq!(tpl(&b"abcde"[..]), Err(nom::Err::Incomplete(Needed::new(2)))); +//! let input = &b"abcdejk"[..]; +//! assert_eq!(tpl(input), Err(nom::Err::Error((&input[5..], ErrorKind::Tag)))); +//! # } +//! ``` +//! +//! But you can also use a sequence of combinators written in imperative style, +//! thanks to the `?` operator: +//! +//! ```rust +//! # fn main() { +//! use nom::{IResult, bytes::complete::tag}; +//! +//! #[derive(Debug, PartialEq)] +//! struct A { +//! a: u8, +//! b: u8 +//! } +//! +//! fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Ok((i,1)) } +//! fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Ok((i,2)) } +//! +//! fn f(i: &[u8]) -> IResult<&[u8], A> { +//! // if successful, the parser returns `Ok((remaining_input, output_value))` that we can destructure +//! let (i, _) = tag("abcd")(i)?; +//! let (i, a) = ret_int1(i)?; +//! let (i, _) = tag("efgh")(i)?; +//! let (i, b) = ret_int2(i)?; +//! +//! Ok((i, A { a, b })) +//! } +//! +//! let r = f(b"abcdefghX"); +//! assert_eq!(r, Ok((&b"X"[..], A{a: 1, b: 2}))); +//! # } +//! ``` +//! +//! ## Streaming / Complete +//! +//! Some of nom's modules have `streaming` or `complete` submodules. They hold +//! different variants of the same combinators. +//! +//! A streaming parser assumes that we might not have all of the input data. +//! This can happen with some network protocol or large file parsers, where the +//! input buffer can be full and need to be resized or refilled. +//! +//! A complete parser assumes that we already have all of the input data. +//! This will be the common case with small files that can be read entirely to +//! memory. +//! +//! Here is how it works in practice: +//! +//! ```rust +//! use nom::{IResult, Err, Needed, error::{Error, ErrorKind}, bytes, character}; +//! +//! fn take_streaming(i: &[u8]) -> IResult<&[u8], &[u8]> { +//! bytes::streaming::take(4u8)(i) +//! } +//! +//! fn take_complete(i: &[u8]) -> IResult<&[u8], &[u8]> { +//! bytes::complete::take(4u8)(i) +//! } +//! +//! // both parsers will take 4 bytes as expected +//! assert_eq!(take_streaming(&b"abcde"[..]), Ok((&b"e"[..], &b"abcd"[..]))); +//! assert_eq!(take_complete(&b"abcde"[..]), Ok((&b"e"[..], &b"abcd"[..]))); +//! +//! // if the input is smaller than 4 bytes, the streaming parser +//! // will return `Incomplete` to indicate that we need more data +//! assert_eq!(take_streaming(&b"abc"[..]), Err(Err::Incomplete(Needed::new(1)))); +//! +//! // but the complete parser will return an error +//! assert_eq!(take_complete(&b"abc"[..]), Err(Err::Error(Error::new(&b"abc"[..], ErrorKind::Eof)))); +//! +//! // the alpha0 function recognizes 0 or more alphabetic characters +//! fn alpha0_streaming(i: &str) -> IResult<&str, &str> { +//! character::streaming::alpha0(i) +//! } +//! +//! fn alpha0_complete(i: &str) -> IResult<&str, &str> { +//! character::complete::alpha0(i) +//! } +//! +//! // if there's a clear limit to the recognized characters, both parsers work the same way +//! assert_eq!(alpha0_streaming("abcd;"), Ok((";", "abcd"))); +//! assert_eq!(alpha0_complete("abcd;"), Ok((";", "abcd"))); +//! +//! // but when there's no limit, the streaming version returns `Incomplete`, because it cannot +//! // know if more input data should be recognized. The whole input could be "abcd;", or +//! // "abcde;" +//! assert_eq!(alpha0_streaming("abcd"), Err(Err::Incomplete(Needed::new(1)))); +//! +//! // while the complete version knows that all of the data is there +//! assert_eq!(alpha0_complete("abcd"), Ok(("", "abcd"))); +//! ``` +//! **Going further:** Read the [guides](https://github.com/Geal/nom/tree/main/doc), +//! check out the [recipes]! +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(feature = "cargo-clippy", allow(clippy::doc_markdown))] +#![cfg_attr(feature = "docsrs", feature(doc_cfg))] +#![cfg_attr(feature = "docsrs", feature(extended_key_value_attributes))] +#![deny(missing_docs)] +#[cfg_attr(nightly, warn(rustdoc::missing_doc_code_examples))] +#[cfg(feature = "alloc")] +#[macro_use] +extern crate alloc; +#[cfg(doctest)] +extern crate doc_comment; + +#[cfg(doctest)] +doc_comment::doctest!("../README.md"); + +/// Lib module to re-export everything needed from `std` or `core`/`alloc`. This is how `serde` does +/// it, albeit there it is not public. +#[cfg_attr(nightly, allow(rustdoc::missing_doc_code_examples))] +pub mod lib { + /// `std` facade allowing `std`/`core` to be interchangeable. Reexports `alloc` crate optionally, + /// as well as `core` or `std` + #[cfg(not(feature = "std"))] + #[cfg_attr(nightly, allow(rustdoc::missing_doc_code_examples))] + /// internal std exports for no_std compatibility + pub mod std { + #[doc(hidden)] + #[cfg(not(feature = "alloc"))] + pub use core::borrow; + + #[cfg(feature = "alloc")] + #[doc(hidden)] + pub use alloc::{borrow, boxed, string, vec}; + + #[doc(hidden)] + pub use core::{cmp, convert, fmt, iter, mem, ops, option, result, slice, str}; + + /// internal reproduction of std prelude + #[doc(hidden)] + pub mod prelude { + pub use core::prelude as v1; + } + } + + #[cfg(feature = "std")] + #[cfg_attr(nightly, allow(rustdoc::missing_doc_code_examples))] + /// internal std exports for no_std compatibility + pub mod std { + #[doc(hidden)] + pub use std::{ + alloc, borrow, boxed, cmp, collections, convert, fmt, hash, iter, mem, ops, option, result, + slice, str, string, vec, + }; + + /// internal reproduction of std prelude + #[doc(hidden)] + pub mod prelude { + pub use std::prelude as v1; + } + } +} + +pub use self::bits::*; +pub use self::internal::*; +pub use self::traits::*; + +pub use self::str::*; + +#[macro_use] +mod macros; +#[macro_use] +pub mod error; + +pub mod branch; +pub mod combinator; +mod internal; +pub mod multi; +pub mod sequence; +mod traits; + +pub mod bits; +pub mod bytes; + +pub mod character; + +mod str; + +pub mod number; + +#[cfg(feature = "docsrs")] +#[cfg_attr(feature = "docsrs", cfg_attr(feature = "docsrs", doc = include_str!("../doc/nom_recipes.md")))] +pub mod recipes {} diff --git a/vendor/nom/src/macros.rs b/vendor/nom/src/macros.rs new file mode 100644 index 00000000000000..980d2d90ed28a5 --- /dev/null +++ b/vendor/nom/src/macros.rs @@ -0,0 +1,23 @@ +macro_rules! succ ( + (0, $submac:ident ! ($($rest:tt)*)) => ($submac!(1, $($rest)*)); + (1, $submac:ident ! ($($rest:tt)*)) => ($submac!(2, $($rest)*)); + (2, $submac:ident ! ($($rest:tt)*)) => ($submac!(3, $($rest)*)); + (3, $submac:ident ! ($($rest:tt)*)) => ($submac!(4, $($rest)*)); + (4, $submac:ident ! ($($rest:tt)*)) => ($submac!(5, $($rest)*)); + (5, $submac:ident ! ($($rest:tt)*)) => ($submac!(6, $($rest)*)); + (6, $submac:ident ! ($($rest:tt)*)) => ($submac!(7, $($rest)*)); + (7, $submac:ident ! ($($rest:tt)*)) => ($submac!(8, $($rest)*)); + (8, $submac:ident ! ($($rest:tt)*)) => ($submac!(9, $($rest)*)); + (9, $submac:ident ! ($($rest:tt)*)) => ($submac!(10, $($rest)*)); + (10, $submac:ident ! ($($rest:tt)*)) => ($submac!(11, $($rest)*)); + (11, $submac:ident ! ($($rest:tt)*)) => ($submac!(12, $($rest)*)); + (12, $submac:ident ! ($($rest:tt)*)) => ($submac!(13, $($rest)*)); + (13, $submac:ident ! ($($rest:tt)*)) => ($submac!(14, $($rest)*)); + (14, $submac:ident ! ($($rest:tt)*)) => ($submac!(15, $($rest)*)); + (15, $submac:ident ! ($($rest:tt)*)) => ($submac!(16, $($rest)*)); + (16, $submac:ident ! ($($rest:tt)*)) => ($submac!(17, $($rest)*)); + (17, $submac:ident ! ($($rest:tt)*)) => ($submac!(18, $($rest)*)); + (18, $submac:ident ! ($($rest:tt)*)) => ($submac!(19, $($rest)*)); + (19, $submac:ident ! ($($rest:tt)*)) => ($submac!(20, $($rest)*)); + (20, $submac:ident ! ($($rest:tt)*)) => ($submac!(21, $($rest)*)); +); diff --git a/vendor/nom/src/multi/mod.rs b/vendor/nom/src/multi/mod.rs new file mode 100644 index 00000000000000..73129084e2a903 --- /dev/null +++ b/vendor/nom/src/multi/mod.rs @@ -0,0 +1,1049 @@ +//! Combinators applying their child parser multiple times + +#[cfg(test)] +mod tests; + +use crate::error::ErrorKind; +use crate::error::ParseError; +use crate::internal::{Err, IResult, Needed, Parser}; +#[cfg(feature = "alloc")] +use crate::lib::std::vec::Vec; +use crate::traits::{InputLength, InputTake, ToUsize}; +use core::num::NonZeroUsize; + +/// Don't pre-allocate more than 64KiB when calling `Vec::with_capacity`. +/// +/// Pre-allocating memory is a nice optimization but count fields can't +/// always be trusted. We should clamp initial capacities to some reasonable +/// amount. This reduces the risk of a bogus count value triggering a panic +/// due to an OOM error. +/// +/// This does not affect correctness. Nom will always read the full number +/// of elements regardless of the capacity cap. +#[cfg(feature = "alloc")] +const MAX_INITIAL_CAPACITY_BYTES: usize = 65536; + +/// Repeats the embedded parser, gathering the results in a `Vec`. +/// +/// This stops on [`Err::Error`] and returns the results that were accumulated. To instead chain an error up, see +/// [`cut`][crate::combinator::cut]. +/// +/// # Arguments +/// * `f` The parser to apply. +/// +/// *Note*: if the parser passed in accepts empty inputs (like `alpha0` or `digit0`), `many0` will +/// return an error, to prevent going into an infinite loop +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::many0; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// many0(tag("abc"))(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); +/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); +/// assert_eq!(parser("123123"), Ok(("123123", vec![]))); +/// assert_eq!(parser(""), Ok(("", vec![]))); +/// ``` +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +pub fn many0(mut f: F) -> impl FnMut(I) -> IResult, E> +where + I: Clone + InputLength, + F: Parser, + E: ParseError, +{ + move |mut i: I| { + let mut acc = crate::lib::std::vec::Vec::with_capacity(4); + loop { + let len = i.input_len(); + match f.parse(i.clone()) { + Err(Err::Error(_)) => return Ok((i, acc)), + Err(e) => return Err(e), + Ok((i1, o)) => { + // infinite loop check: the parser must always consume + if i1.input_len() == len { + return Err(Err::Error(E::from_error_kind(i, ErrorKind::Many0))); + } + + i = i1; + acc.push(o); + } + } + } + } +} + +/// Runs the embedded parser, gathering the results in a `Vec`. +/// +/// This stops on [`Err::Error`] if there is at least one result, and returns the results that were accumulated. To instead chain an error up, +/// see [`cut`][crate::combinator::cut]. +/// +/// # Arguments +/// * `f` The parser to apply. +/// +/// *Note*: If the parser passed to `many1` accepts empty inputs +/// (like `alpha0` or `digit0`), `many1` will return an error, +/// to prevent going into an infinite loop. +/// +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::multi::many1; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// many1(tag("abc"))(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); +/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); +/// assert_eq!(parser("123123"), Err(Err::Error(Error::new("123123", ErrorKind::Tag)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Tag)))); +/// ``` +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +pub fn many1(mut f: F) -> impl FnMut(I) -> IResult, E> +where + I: Clone + InputLength, + F: Parser, + E: ParseError, +{ + move |mut i: I| match f.parse(i.clone()) { + Err(Err::Error(err)) => Err(Err::Error(E::append(i, ErrorKind::Many1, err))), + Err(e) => Err(e), + Ok((i1, o)) => { + let mut acc = crate::lib::std::vec::Vec::with_capacity(4); + acc.push(o); + i = i1; + + loop { + let len = i.input_len(); + match f.parse(i.clone()) { + Err(Err::Error(_)) => return Ok((i, acc)), + Err(e) => return Err(e), + Ok((i1, o)) => { + // infinite loop check: the parser must always consume + if i1.input_len() == len { + return Err(Err::Error(E::from_error_kind(i, ErrorKind::Many1))); + } + + i = i1; + acc.push(o); + } + } + } + } + } +} + +/// Applies the parser `f` until the parser `g` produces a result. +/// +/// Returns a tuple of the results of `f` in a `Vec` and the result of `g`. +/// +/// `f` keeps going so long as `g` produces [`Err::Error`]. To instead chain an error up, see [`cut`][crate::combinator::cut]. +/// +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::multi::many_till; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, (Vec<&str>, &str)> { +/// many_till(tag("abc"), tag("end"))(s) +/// }; +/// +/// assert_eq!(parser("abcabcend"), Ok(("", (vec!["abc", "abc"], "end")))); +/// assert_eq!(parser("abc123end"), Err(Err::Error(Error::new("123end", ErrorKind::Tag)))); +/// assert_eq!(parser("123123end"), Err(Err::Error(Error::new("123123end", ErrorKind::Tag)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Tag)))); +/// assert_eq!(parser("abcendefg"), Ok(("efg", (vec!["abc"], "end")))); +/// ``` +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +pub fn many_till( + mut f: F, + mut g: G, +) -> impl FnMut(I) -> IResult, P), E> +where + I: Clone + InputLength, + F: Parser, + G: Parser, + E: ParseError, +{ + move |mut i: I| { + let mut res = crate::lib::std::vec::Vec::new(); + loop { + let len = i.input_len(); + match g.parse(i.clone()) { + Ok((i1, o)) => return Ok((i1, (res, o))), + Err(Err::Error(_)) => { + match f.parse(i.clone()) { + Err(Err::Error(err)) => return Err(Err::Error(E::append(i, ErrorKind::ManyTill, err))), + Err(e) => return Err(e), + Ok((i1, o)) => { + // infinite loop check: the parser must always consume + if i1.input_len() == len { + return Err(Err::Error(E::from_error_kind(i1, ErrorKind::ManyTill))); + } + + res.push(o); + i = i1; + } + } + } + Err(e) => return Err(e), + } + } + } +} + +/// Alternates between two parsers to produce a list of elements. +/// +/// This stops when either parser returns [`Err::Error`] and returns the results that were accumulated. To instead chain an error up, see +/// [`cut`][crate::combinator::cut]. +/// +/// # Arguments +/// * `sep` Parses the separator between list elements. +/// * `f` Parses the elements of the list. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::separated_list0; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// separated_list0(tag("|"), tag("abc"))(s) +/// } +/// +/// assert_eq!(parser("abc|abc|abc"), Ok(("", vec!["abc", "abc", "abc"]))); +/// assert_eq!(parser("abc123abc"), Ok(("123abc", vec!["abc"]))); +/// assert_eq!(parser("abc|def"), Ok(("|def", vec!["abc"]))); +/// assert_eq!(parser(""), Ok(("", vec![]))); +/// assert_eq!(parser("def|abc"), Ok(("def|abc", vec![]))); +/// ``` +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +pub fn separated_list0( + mut sep: G, + mut f: F, +) -> impl FnMut(I) -> IResult, E> +where + I: Clone + InputLength, + F: Parser, + G: Parser, + E: ParseError, +{ + move |mut i: I| { + let mut res = Vec::new(); + + match f.parse(i.clone()) { + Err(Err::Error(_)) => return Ok((i, res)), + Err(e) => return Err(e), + Ok((i1, o)) => { + res.push(o); + i = i1; + } + } + + loop { + let len = i.input_len(); + match sep.parse(i.clone()) { + Err(Err::Error(_)) => return Ok((i, res)), + Err(e) => return Err(e), + Ok((i1, _)) => { + // infinite loop check: the parser must always consume + if i1.input_len() == len { + return Err(Err::Error(E::from_error_kind(i1, ErrorKind::SeparatedList))); + } + + match f.parse(i1.clone()) { + Err(Err::Error(_)) => return Ok((i, res)), + Err(e) => return Err(e), + Ok((i2, o)) => { + res.push(o); + i = i2; + } + } + } + } + } + } +} + +/// Alternates between two parsers to produce a list of elements until [`Err::Error`]. +/// +/// Fails if the element parser does not produce at least one element.$ +/// +/// This stops when either parser returns [`Err::Error`] and returns the results that were accumulated. To instead chain an error up, see +/// [`cut`][crate::combinator::cut]. +/// +/// # Arguments +/// * `sep` Parses the separator between list elements. +/// * `f` Parses the elements of the list. +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::multi::separated_list1; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// separated_list1(tag("|"), tag("abc"))(s) +/// } +/// +/// assert_eq!(parser("abc|abc|abc"), Ok(("", vec!["abc", "abc", "abc"]))); +/// assert_eq!(parser("abc123abc"), Ok(("123abc", vec!["abc"]))); +/// assert_eq!(parser("abc|def"), Ok(("|def", vec!["abc"]))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Tag)))); +/// assert_eq!(parser("def|abc"), Err(Err::Error(Error::new("def|abc", ErrorKind::Tag)))); +/// ``` +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +pub fn separated_list1( + mut sep: G, + mut f: F, +) -> impl FnMut(I) -> IResult, E> +where + I: Clone + InputLength, + F: Parser, + G: Parser, + E: ParseError, +{ + move |mut i: I| { + let mut res = Vec::new(); + + // Parse the first element + match f.parse(i.clone()) { + Err(e) => return Err(e), + Ok((i1, o)) => { + res.push(o); + i = i1; + } + } + + loop { + let len = i.input_len(); + match sep.parse(i.clone()) { + Err(Err::Error(_)) => return Ok((i, res)), + Err(e) => return Err(e), + Ok((i1, _)) => { + // infinite loop check: the parser must always consume + if i1.input_len() == len { + return Err(Err::Error(E::from_error_kind(i1, ErrorKind::SeparatedList))); + } + + match f.parse(i1.clone()) { + Err(Err::Error(_)) => return Ok((i, res)), + Err(e) => return Err(e), + Ok((i2, o)) => { + res.push(o); + i = i2; + } + } + } + } + } + } +} + +/// Repeats the embedded parser `m..=n` times +/// +/// This stops before `n` when the parser returns [`Err::Error`] and returns the results that were accumulated. To instead chain an error up, see +/// [`cut`][crate::combinator::cut]. +/// +/// # Arguments +/// * `m` The minimum number of iterations. +/// * `n` The maximum number of iterations. +/// * `f` The parser to apply. +/// +/// *Note*: If the parser passed to `many1` accepts empty inputs +/// (like `alpha0` or `digit0`), `many1` will return an error, +/// to prevent going into an infinite loop. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::many_m_n; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// many_m_n(0, 2, tag("abc"))(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); +/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); +/// assert_eq!(parser("123123"), Ok(("123123", vec![]))); +/// assert_eq!(parser(""), Ok(("", vec![]))); +/// assert_eq!(parser("abcabcabc"), Ok(("abc", vec!["abc", "abc"]))); +/// ``` +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +pub fn many_m_n( + min: usize, + max: usize, + mut parse: F, +) -> impl FnMut(I) -> IResult, E> +where + I: Clone + InputLength, + F: Parser, + E: ParseError, +{ + move |mut input: I| { + if min > max { + return Err(Err::Failure(E::from_error_kind(input, ErrorKind::ManyMN))); + } + + let max_initial_capacity = + MAX_INITIAL_CAPACITY_BYTES / crate::lib::std::mem::size_of::().max(1); + let mut res = crate::lib::std::vec::Vec::with_capacity(min.min(max_initial_capacity)); + for count in 0..max { + let len = input.input_len(); + match parse.parse(input.clone()) { + Ok((tail, value)) => { + // infinite loop check: the parser must always consume + if tail.input_len() == len { + return Err(Err::Error(E::from_error_kind(input, ErrorKind::ManyMN))); + } + + res.push(value); + input = tail; + } + Err(Err::Error(e)) => { + if count < min { + return Err(Err::Error(E::append(input, ErrorKind::ManyMN, e))); + } else { + return Ok((input, res)); + } + } + Err(e) => { + return Err(e); + } + } + } + + Ok((input, res)) + } +} + +/// Repeats the embedded parser, counting the results +/// +/// This stops on [`Err::Error`]. To instead chain an error up, see +/// [`cut`][crate::combinator::cut]. +/// +/// # Arguments +/// * `f` The parser to apply. +/// +/// *Note*: if the parser passed in accepts empty inputs (like `alpha0` or `digit0`), `many0` will +/// return an error, to prevent going into an infinite loop +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::many0_count; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, usize> { +/// many0_count(tag("abc"))(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", 2))); +/// assert_eq!(parser("abc123"), Ok(("123", 1))); +/// assert_eq!(parser("123123"), Ok(("123123", 0))); +/// assert_eq!(parser(""), Ok(("", 0))); +/// ``` +pub fn many0_count(mut f: F) -> impl FnMut(I) -> IResult +where + I: Clone + InputLength, + F: Parser, + E: ParseError, +{ + move |i: I| { + let mut input = i; + let mut count = 0; + + loop { + let input_ = input.clone(); + let len = input.input_len(); + match f.parse(input_) { + Ok((i, _)) => { + // infinite loop check: the parser must always consume + if i.input_len() == len { + return Err(Err::Error(E::from_error_kind(input, ErrorKind::Many0Count))); + } + + input = i; + count += 1; + } + + Err(Err::Error(_)) => return Ok((input, count)), + + Err(e) => return Err(e), + } + } + } +} + +/// Runs the embedded parser, counting the results. +/// +/// This stops on [`Err::Error`] if there is at least one result. To instead chain an error up, +/// see [`cut`][crate::combinator::cut]. +/// +/// # Arguments +/// * `f` The parser to apply. +/// +/// *Note*: If the parser passed to `many1` accepts empty inputs +/// (like `alpha0` or `digit0`), `many1` will return an error, +/// to prevent going into an infinite loop. +/// +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::multi::many1_count; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, usize> { +/// many1_count(tag("abc"))(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", 2))); +/// assert_eq!(parser("abc123"), Ok(("123", 1))); +/// assert_eq!(parser("123123"), Err(Err::Error(Error::new("123123", ErrorKind::Many1Count)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Many1Count)))); +/// ``` +pub fn many1_count(mut f: F) -> impl FnMut(I) -> IResult +where + I: Clone + InputLength, + F: Parser, + E: ParseError, +{ + move |i: I| { + let i_ = i.clone(); + match f.parse(i_) { + Err(Err::Error(_)) => Err(Err::Error(E::from_error_kind(i, ErrorKind::Many1Count))), + Err(i) => Err(i), + Ok((i1, _)) => { + let mut count = 1; + let mut input = i1; + + loop { + let len = input.input_len(); + let input_ = input.clone(); + match f.parse(input_) { + Err(Err::Error(_)) => return Ok((input, count)), + Err(e) => return Err(e), + Ok((i, _)) => { + // infinite loop check: the parser must always consume + if i.input_len() == len { + return Err(Err::Error(E::from_error_kind(i, ErrorKind::Many1Count))); + } + + count += 1; + input = i; + } + } + } + } + } + } +} + +/// Runs the embedded parser `count` times, gathering the results in a `Vec` +/// +/// # Arguments +/// * `f` The parser to apply. +/// * `count` How often to apply the parser. +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::multi::count; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// count(tag("abc"), 2)(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); +/// assert_eq!(parser("abc123"), Err(Err::Error(Error::new("123", ErrorKind::Tag)))); +/// assert_eq!(parser("123123"), Err(Err::Error(Error::new("123123", ErrorKind::Tag)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Tag)))); +/// assert_eq!(parser("abcabcabc"), Ok(("abc", vec!["abc", "abc"]))); +/// ``` +#[cfg(feature = "alloc")] +#[cfg_attr(feature = "docsrs", doc(cfg(feature = "alloc")))] +pub fn count(mut f: F, count: usize) -> impl FnMut(I) -> IResult, E> +where + I: Clone + PartialEq, + F: Parser, + E: ParseError, +{ + move |i: I| { + let mut input = i.clone(); + let max_initial_capacity = + MAX_INITIAL_CAPACITY_BYTES / crate::lib::std::mem::size_of::().max(1); + let mut res = crate::lib::std::vec::Vec::with_capacity(count.min(max_initial_capacity)); + + for _ in 0..count { + let input_ = input.clone(); + match f.parse(input_) { + Ok((i, o)) => { + res.push(o); + input = i; + } + Err(Err::Error(e)) => { + return Err(Err::Error(E::append(i, ErrorKind::Count, e))); + } + Err(e) => { + return Err(e); + } + } + } + + Ok((input, res)) + } +} + +/// Runs the embedded parser repeatedly, filling the given slice with results. +/// +/// This parser fails if the input runs out before the given slice is full. +/// +/// # Arguments +/// * `f` The parser to apply. +/// * `buf` The slice to fill +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::multi::fill; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, [&str; 2]> { +/// let mut buf = ["", ""]; +/// let (rest, ()) = fill(tag("abc"), &mut buf)(s)?; +/// Ok((rest, buf)) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", ["abc", "abc"]))); +/// assert_eq!(parser("abc123"), Err(Err::Error(Error::new("123", ErrorKind::Tag)))); +/// assert_eq!(parser("123123"), Err(Err::Error(Error::new("123123", ErrorKind::Tag)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Tag)))); +/// assert_eq!(parser("abcabcabc"), Ok(("abc", ["abc", "abc"]))); +/// ``` +pub fn fill<'a, I, O, E, F>(f: F, buf: &'a mut [O]) -> impl FnMut(I) -> IResult + 'a +where + I: Clone + PartialEq, + F: Fn(I) -> IResult + 'a, + E: ParseError, +{ + move |i: I| { + let mut input = i.clone(); + + for elem in buf.iter_mut() { + let input_ = input.clone(); + match f(input_) { + Ok((i, o)) => { + *elem = o; + input = i; + } + Err(Err::Error(e)) => { + return Err(Err::Error(E::append(i, ErrorKind::Count, e))); + } + Err(e) => { + return Err(e); + } + } + } + + Ok((input, ())) + } +} + +/// Repeats the embedded parser, calling `g` to gather the results. +/// +/// This stops on [`Err::Error`]. To instead chain an error up, see +/// [`cut`][crate::combinator::cut]. +/// +/// # Arguments +/// * `f` The parser to apply. +/// * `init` A function returning the initial value. +/// * `g` The function that combines a result of `f` with +/// the current accumulator. +/// +/// *Note*: if the parser passed in accepts empty inputs (like `alpha0` or `digit0`), `many0` will +/// return an error, to prevent going into an infinite loop +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::fold_many0; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// fold_many0( +/// tag("abc"), +/// Vec::new, +/// |mut acc: Vec<_>, item| { +/// acc.push(item); +/// acc +/// } +/// )(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); +/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); +/// assert_eq!(parser("123123"), Ok(("123123", vec![]))); +/// assert_eq!(parser(""), Ok(("", vec![]))); +/// ``` +pub fn fold_many0( + mut f: F, + mut init: H, + mut g: G, +) -> impl FnMut(I) -> IResult +where + I: Clone + InputLength, + F: Parser, + G: FnMut(R, O) -> R, + H: FnMut() -> R, + E: ParseError, +{ + move |i: I| { + let mut res = init(); + let mut input = i; + + loop { + let i_ = input.clone(); + let len = input.input_len(); + match f.parse(i_) { + Ok((i, o)) => { + // infinite loop check: the parser must always consume + if i.input_len() == len { + return Err(Err::Error(E::from_error_kind(input, ErrorKind::Many0))); + } + + res = g(res, o); + input = i; + } + Err(Err::Error(_)) => { + return Ok((input, res)); + } + Err(e) => { + return Err(e); + } + } + } + } +} + +/// Repeats the embedded parser, calling `g` to gather the results. +/// +/// This stops on [`Err::Error`] if there is at least one result. To instead chain an error up, +/// see [`cut`][crate::combinator::cut]. +/// +/// # Arguments +/// * `f` The parser to apply. +/// * `init` A function returning the initial value. +/// * `g` The function that combines a result of `f` with +/// the current accumulator. +/// +/// *Note*: If the parser passed to `many1` accepts empty inputs +/// (like `alpha0` or `digit0`), `many1` will return an error, +/// to prevent going into an infinite loop. +/// +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::multi::fold_many1; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// fold_many1( +/// tag("abc"), +/// Vec::new, +/// |mut acc: Vec<_>, item| { +/// acc.push(item); +/// acc +/// } +/// )(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); +/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); +/// assert_eq!(parser("123123"), Err(Err::Error(Error::new("123123", ErrorKind::Many1)))); +/// assert_eq!(parser(""), Err(Err::Error(Error::new("", ErrorKind::Many1)))); +/// ``` +pub fn fold_many1( + mut f: F, + mut init: H, + mut g: G, +) -> impl FnMut(I) -> IResult +where + I: Clone + InputLength, + F: Parser, + G: FnMut(R, O) -> R, + H: FnMut() -> R, + E: ParseError, +{ + move |i: I| { + let _i = i.clone(); + let init = init(); + match f.parse(_i) { + Err(Err::Error(_)) => Err(Err::Error(E::from_error_kind(i, ErrorKind::Many1))), + Err(e) => Err(e), + Ok((i1, o1)) => { + let mut acc = g(init, o1); + let mut input = i1; + + loop { + let _input = input.clone(); + let len = input.input_len(); + match f.parse(_input) { + Err(Err::Error(_)) => { + break; + } + Err(e) => return Err(e), + Ok((i, o)) => { + // infinite loop check: the parser must always consume + if i.input_len() == len { + return Err(Err::Failure(E::from_error_kind(i, ErrorKind::Many1))); + } + + acc = g(acc, o); + input = i; + } + } + } + + Ok((input, acc)) + } + } + } +} + +/// Repeats the embedded parser `m..=n` times, calling `g` to gather the results +/// +/// This stops before `n` when the parser returns [`Err::Error`]. To instead chain an error up, see +/// [`cut`][crate::combinator::cut]. +/// +/// # Arguments +/// * `m` The minimum number of iterations. +/// * `n` The maximum number of iterations. +/// * `f` The parser to apply. +/// * `init` A function returning the initial value. +/// * `g` The function that combines a result of `f` with +/// the current accumulator. +/// +/// *Note*: If the parser passed to `many1` accepts empty inputs +/// (like `alpha0` or `digit0`), `many1` will return an error, +/// to prevent going into an infinite loop. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::fold_many_m_n; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// fold_many_m_n( +/// 0, +/// 2, +/// tag("abc"), +/// Vec::new, +/// |mut acc: Vec<_>, item| { +/// acc.push(item); +/// acc +/// } +/// )(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); +/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); +/// assert_eq!(parser("123123"), Ok(("123123", vec![]))); +/// assert_eq!(parser(""), Ok(("", vec![]))); +/// assert_eq!(parser("abcabcabc"), Ok(("abc", vec!["abc", "abc"]))); +/// ``` +pub fn fold_many_m_n( + min: usize, + max: usize, + mut parse: F, + mut init: H, + mut fold: G, +) -> impl FnMut(I) -> IResult +where + I: Clone + InputLength, + F: Parser, + G: FnMut(R, O) -> R, + H: FnMut() -> R, + E: ParseError, +{ + move |mut input: I| { + if min > max { + return Err(Err::Failure(E::from_error_kind(input, ErrorKind::ManyMN))); + } + + let mut acc = init(); + for count in 0..max { + let len = input.input_len(); + match parse.parse(input.clone()) { + Ok((tail, value)) => { + // infinite loop check: the parser must always consume + if tail.input_len() == len { + return Err(Err::Error(E::from_error_kind(tail, ErrorKind::ManyMN))); + } + + acc = fold(acc, value); + input = tail; + } + //FInputXMError: handle failure properly + Err(Err::Error(err)) => { + if count < min { + return Err(Err::Error(E::append(input, ErrorKind::ManyMN, err))); + } else { + break; + } + } + Err(e) => return Err(e), + } + } + + Ok((input, acc)) + } +} + +/// Gets a number from the parser and returns a +/// subslice of the input of that size. +/// If the parser returns `Incomplete`, +/// `length_data` will return an error. +/// # Arguments +/// * `f` The parser to apply. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::number::complete::be_u16; +/// use nom::multi::length_data; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// length_data(be_u16)(s) +/// } +/// +/// assert_eq!(parser(b"\x00\x03abcefg"), Ok((&b"efg"[..], &b"abc"[..]))); +/// assert_eq!(parser(b"\x00\x03a"), Err(Err::Incomplete(Needed::new(2)))); +/// ``` +pub fn length_data(mut f: F) -> impl FnMut(I) -> IResult +where + I: InputLength + InputTake, + N: ToUsize, + F: Parser, + E: ParseError, +{ + move |i: I| { + let (i, length) = f.parse(i)?; + + let length: usize = length.to_usize(); + + if let Some(needed) = length + .checked_sub(i.input_len()) + .and_then(NonZeroUsize::new) + { + Err(Err::Incomplete(Needed::Size(needed))) + } else { + Ok(i.take_split(length)) + } + } +} + +/// Gets a number from the first parser, +/// takes a subslice of the input of that size, +/// then applies the second parser on that subslice. +/// If the second parser returns `Incomplete`, +/// `length_value` will return an error. +/// # Arguments +/// * `f` The parser to apply. +/// * `g` The parser to apply on the subslice. +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::number::complete::be_u16; +/// use nom::multi::length_value; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// length_value(be_u16, tag("abc"))(s) +/// } +/// +/// assert_eq!(parser(b"\x00\x03abcefg"), Ok((&b"efg"[..], &b"abc"[..]))); +/// assert_eq!(parser(b"\x00\x03123123"), Err(Err::Error(Error::new(&b"123"[..], ErrorKind::Tag)))); +/// assert_eq!(parser(b"\x00\x03a"), Err(Err::Incomplete(Needed::new(2)))); +/// ``` +pub fn length_value(mut f: F, mut g: G) -> impl FnMut(I) -> IResult +where + I: Clone + InputLength + InputTake, + N: ToUsize, + F: Parser, + G: Parser, + E: ParseError, +{ + move |i: I| { + let (i, length) = f.parse(i)?; + + let length: usize = length.to_usize(); + + if let Some(needed) = length + .checked_sub(i.input_len()) + .and_then(NonZeroUsize::new) + { + Err(Err::Incomplete(Needed::Size(needed))) + } else { + let (rest, i) = i.take_split(length); + match g.parse(i.clone()) { + Err(Err::Incomplete(_)) => Err(Err::Error(E::from_error_kind(i, ErrorKind::Complete))), + Err(e) => Err(e), + Ok((_, o)) => Ok((rest, o)), + } + } + } +} + +/// Gets a number from the first parser, +/// then applies the second parser that many times. +/// # Arguments +/// * `f` The parser to apply to obtain the count. +/// * `g` The parser to apply repeatedly. +/// ```rust +/// # use nom::{Err, error::{Error, ErrorKind}, Needed, IResult}; +/// use nom::number::complete::u8; +/// use nom::multi::length_count; +/// use nom::bytes::complete::tag; +/// use nom::combinator::map; +/// +/// fn parser(s: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { +/// length_count(map(u8, |i| { +/// println!("got number: {}", i); +/// i +/// }), tag("abc"))(s) +/// } +/// +/// assert_eq!(parser(&b"\x02abcabcabc"[..]), Ok(((&b"abc"[..], vec![&b"abc"[..], &b"abc"[..]])))); +/// assert_eq!(parser(b"\x03123123123"), Err(Err::Error(Error::new(&b"123123123"[..], ErrorKind::Tag)))); +/// ``` +#[cfg(feature = "alloc")] +pub fn length_count(mut f: F, mut g: G) -> impl FnMut(I) -> IResult, E> +where + I: Clone, + N: ToUsize, + F: Parser, + G: Parser, + E: ParseError, +{ + move |i: I| { + let (i, count) = f.parse(i)?; + let mut input = i.clone(); + let mut res = Vec::new(); + + for _ in 0..count.to_usize() { + let input_ = input.clone(); + match g.parse(input_) { + Ok((i, o)) => { + res.push(o); + input = i; + } + Err(Err::Error(e)) => { + return Err(Err::Error(E::append(i, ErrorKind::Count, e))); + } + Err(e) => { + return Err(e); + } + } + } + + Ok((input, res)) + } +} diff --git a/vendor/nom/src/multi/tests.rs b/vendor/nom/src/multi/tests.rs new file mode 100644 index 00000000000000..96a65181764e7f --- /dev/null +++ b/vendor/nom/src/multi/tests.rs @@ -0,0 +1,534 @@ +use super::{length_data, length_value, many0_count, many1_count}; +use crate::{ + bytes::streaming::tag, + character::streaming::digit1 as digit, + error::{ErrorKind, ParseError}, + internal::{Err, IResult, Needed}, + lib::std::str::{self, FromStr}, + number::streaming::{be_u16, be_u8}, + sequence::{pair, tuple}, +}; +#[cfg(feature = "alloc")] +use crate::{ + lib::std::vec::Vec, + multi::{ + count, fold_many0, fold_many1, fold_many_m_n, length_count, many0, many1, many_m_n, many_till, + separated_list0, separated_list1, + }, +}; + +#[test] +#[cfg(feature = "alloc")] +fn separated_list0_test() { + fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + separated_list0(tag(","), tag("abcd"))(i) + } + fn multi_empty(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + separated_list0(tag(","), tag(""))(i) + } + fn empty_sep(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + separated_list0(tag(""), tag("abc"))(i) + } + fn multi_longsep(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + separated_list0(tag(".."), tag("abcd"))(i) + } + + let a = &b"abcdef"[..]; + let b = &b"abcd,abcdef"[..]; + let c = &b"azerty"[..]; + let d = &b",,abc"[..]; + let e = &b"abcd,abcd,ef"[..]; + let f = &b"abc"[..]; + let g = &b"abcd."[..]; + let h = &b"abcd,abc"[..]; + let i = &b"abcabc"[..]; + + let res1 = vec![&b"abcd"[..]]; + assert_eq!(multi(a), Ok((&b"ef"[..], res1))); + let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; + assert_eq!(multi(b), Ok((&b"ef"[..], res2))); + assert_eq!(multi(c), Ok((&b"azerty"[..], Vec::new()))); + let res3 = vec![&b""[..], &b""[..], &b""[..]]; + assert_eq!(multi_empty(d), Ok((&b"abc"[..], res3))); + let i_err_pos = &i[3..]; + assert_eq!( + empty_sep(i), + Err(Err::Error(error_position!( + i_err_pos, + ErrorKind::SeparatedList + ))) + ); + let res4 = vec![&b"abcd"[..], &b"abcd"[..]]; + assert_eq!(multi(e), Ok((&b",ef"[..], res4))); + + assert_eq!(multi(f), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(multi_longsep(g), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(multi(h), Err(Err::Incomplete(Needed::new(1)))); +} + +#[test] +#[cfg(feature = "alloc")] +fn separated_list1_test() { + fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + separated_list1(tag(","), tag("abcd"))(i) + } + fn multi_longsep(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + separated_list1(tag(".."), tag("abcd"))(i) + } + + let a = &b"abcdef"[..]; + let b = &b"abcd,abcdef"[..]; + let c = &b"azerty"[..]; + let d = &b"abcd,abcd,ef"[..]; + + let f = &b"abc"[..]; + let g = &b"abcd."[..]; + let h = &b"abcd,abc"[..]; + + let res1 = vec![&b"abcd"[..]]; + assert_eq!(multi(a), Ok((&b"ef"[..], res1))); + let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; + assert_eq!(multi(b), Ok((&b"ef"[..], res2))); + assert_eq!( + multi(c), + Err(Err::Error(error_position!(c, ErrorKind::Tag))) + ); + let res3 = vec![&b"abcd"[..], &b"abcd"[..]]; + assert_eq!(multi(d), Ok((&b",ef"[..], res3))); + + assert_eq!(multi(f), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(multi_longsep(g), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(multi(h), Err(Err::Incomplete(Needed::new(1)))); +} + +#[test] +#[cfg(feature = "alloc")] +fn many0_test() { + fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + many0(tag("abcd"))(i) + } + fn multi_empty(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + many0(tag(""))(i) + } + + assert_eq!(multi(&b"abcdef"[..]), Ok((&b"ef"[..], vec![&b"abcd"[..]]))); + assert_eq!( + multi(&b"abcdabcdefgh"[..]), + Ok((&b"efgh"[..], vec![&b"abcd"[..], &b"abcd"[..]])) + ); + assert_eq!(multi(&b"azerty"[..]), Ok((&b"azerty"[..], Vec::new()))); + assert_eq!(multi(&b"abcdab"[..]), Err(Err::Incomplete(Needed::new(2)))); + assert_eq!(multi(&b"abcd"[..]), Err(Err::Incomplete(Needed::new(4)))); + assert_eq!(multi(&b""[..]), Err(Err::Incomplete(Needed::new(4)))); + assert_eq!( + multi_empty(&b"abcdef"[..]), + Err(Err::Error(error_position!( + &b"abcdef"[..], + ErrorKind::Many0 + ))) + ); +} + +#[test] +#[cfg(feature = "alloc")] +fn many1_test() { + fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + many1(tag("abcd"))(i) + } + + let a = &b"abcdef"[..]; + let b = &b"abcdabcdefgh"[..]; + let c = &b"azerty"[..]; + let d = &b"abcdab"[..]; + + let res1 = vec![&b"abcd"[..]]; + assert_eq!(multi(a), Ok((&b"ef"[..], res1))); + let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; + assert_eq!(multi(b), Ok((&b"efgh"[..], res2))); + assert_eq!( + multi(c), + Err(Err::Error(error_position!(c, ErrorKind::Tag))) + ); + assert_eq!(multi(d), Err(Err::Incomplete(Needed::new(2)))); +} + +#[test] +#[cfg(feature = "alloc")] +fn many_till_test() { + fn multi(i: &[u8]) -> IResult<&[u8], (Vec<&[u8]>, &[u8])> { + many_till(tag("abcd"), tag("efgh"))(i) + } + + let a = b"abcdabcdefghabcd"; + let b = b"efghabcd"; + let c = b"azerty"; + + let res_a = (vec![&b"abcd"[..], &b"abcd"[..]], &b"efgh"[..]); + let res_b: (Vec<&[u8]>, &[u8]) = (Vec::new(), &b"efgh"[..]); + assert_eq!(multi(&a[..]), Ok((&b"abcd"[..], res_a))); + assert_eq!(multi(&b[..]), Ok((&b"abcd"[..], res_b))); + assert_eq!( + multi(&c[..]), + Err(Err::Error(error_node_position!( + &c[..], + ErrorKind::ManyTill, + error_position!(&c[..], ErrorKind::Tag) + ))) + ); +} + +#[test] +#[cfg(feature = "std")] +fn infinite_many() { + fn tst(input: &[u8]) -> IResult<&[u8], &[u8]> { + println!("input: {:?}", input); + Err(Err::Error(error_position!(input, ErrorKind::Tag))) + } + + // should not go into an infinite loop + fn multi0(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + many0(tst)(i) + } + let a = &b"abcdef"[..]; + assert_eq!(multi0(a), Ok((a, Vec::new()))); + + fn multi1(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + many1(tst)(i) + } + let a = &b"abcdef"[..]; + assert_eq!( + multi1(a), + Err(Err::Error(error_position!(a, ErrorKind::Tag))) + ); +} + +#[test] +#[cfg(feature = "alloc")] +fn many_m_n_test() { + fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + many_m_n(2, 4, tag("Abcd"))(i) + } + + let a = &b"Abcdef"[..]; + let b = &b"AbcdAbcdefgh"[..]; + let c = &b"AbcdAbcdAbcdAbcdefgh"[..]; + let d = &b"AbcdAbcdAbcdAbcdAbcdefgh"[..]; + let e = &b"AbcdAb"[..]; + + assert_eq!( + multi(a), + Err(Err::Error(error_position!(&b"ef"[..], ErrorKind::Tag))) + ); + let res1 = vec![&b"Abcd"[..], &b"Abcd"[..]]; + assert_eq!(multi(b), Ok((&b"efgh"[..], res1))); + let res2 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; + assert_eq!(multi(c), Ok((&b"efgh"[..], res2))); + let res3 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; + assert_eq!(multi(d), Ok((&b"Abcdefgh"[..], res3))); + assert_eq!(multi(e), Err(Err::Incomplete(Needed::new(2)))); +} + +#[test] +#[cfg(feature = "alloc")] +fn count_test() { + const TIMES: usize = 2; + fn cnt_2(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + count(tag("abc"), TIMES)(i) + } + + assert_eq!( + cnt_2(&b"abcabcabcdef"[..]), + Ok((&b"abcdef"[..], vec![&b"abc"[..], &b"abc"[..]])) + ); + assert_eq!(cnt_2(&b"ab"[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(cnt_2(&b"abcab"[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!( + cnt_2(&b"xxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) + ); + assert_eq!( + cnt_2(&b"xxxabcabcdef"[..]), + Err(Err::Error(error_position!( + &b"xxxabcabcdef"[..], + ErrorKind::Tag + ))) + ); + assert_eq!( + cnt_2(&b"abcxxxabcdef"[..]), + Err(Err::Error(error_position!( + &b"xxxabcdef"[..], + ErrorKind::Tag + ))) + ); +} + +#[test] +#[cfg(feature = "alloc")] +fn count_zero() { + const TIMES: usize = 0; + fn counter_2(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + count(tag("abc"), TIMES)(i) + } + + let done = &b"abcabcabcdef"[..]; + let parsed_done = Vec::new(); + let rest = done; + let incomplete_1 = &b"ab"[..]; + let parsed_incompl_1 = Vec::new(); + let incomplete_2 = &b"abcab"[..]; + let parsed_incompl_2 = Vec::new(); + let error = &b"xxx"[..]; + let error_remain = &b"xxx"[..]; + let parsed_err = Vec::new(); + let error_1 = &b"xxxabcabcdef"[..]; + let parsed_err_1 = Vec::new(); + let error_1_remain = &b"xxxabcabcdef"[..]; + let error_2 = &b"abcxxxabcdef"[..]; + let parsed_err_2 = Vec::new(); + let error_2_remain = &b"abcxxxabcdef"[..]; + + assert_eq!(counter_2(done), Ok((rest, parsed_done))); + assert_eq!( + counter_2(incomplete_1), + Ok((incomplete_1, parsed_incompl_1)) + ); + assert_eq!( + counter_2(incomplete_2), + Ok((incomplete_2, parsed_incompl_2)) + ); + assert_eq!(counter_2(error), Ok((error_remain, parsed_err))); + assert_eq!(counter_2(error_1), Ok((error_1_remain, parsed_err_1))); + assert_eq!(counter_2(error_2), Ok((error_2_remain, parsed_err_2))); +} + +#[derive(Debug, Clone, PartialEq)] +pub struct NilError; + +impl From<(I, ErrorKind)> for NilError { + fn from(_: (I, ErrorKind)) -> Self { + NilError + } +} + +impl ParseError for NilError { + fn from_error_kind(_: I, _: ErrorKind) -> NilError { + NilError + } + fn append(_: I, _: ErrorKind, _: NilError) -> NilError { + NilError + } +} + +fn number(i: &[u8]) -> IResult<&[u8], u32> { + use crate::combinator::map_res; + + map_res(map_res(digit, str::from_utf8), FromStr::from_str)(i) +} + +#[test] +#[cfg(feature = "alloc")] +fn length_count_test() { + fn cnt(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + length_count(number, tag("abc"))(i) + } + + assert_eq!( + cnt(&b"2abcabcabcdef"[..]), + Ok((&b"abcdef"[..], vec![&b"abc"[..], &b"abc"[..]])) + ); + assert_eq!(cnt(&b"2ab"[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(cnt(&b"3abcab"[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!( + cnt(&b"xxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Digit))) + ); + assert_eq!( + cnt(&b"2abcxxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) + ); +} + +#[test] +fn length_data_test() { + fn take(i: &[u8]) -> IResult<&[u8], &[u8]> { + length_data(number)(i) + } + + assert_eq!( + take(&b"6abcabcabcdef"[..]), + Ok((&b"abcdef"[..], &b"abcabc"[..])) + ); + assert_eq!(take(&b"3ab"[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!( + take(&b"xxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Digit))) + ); + assert_eq!(take(&b"2abcxxx"[..]), Ok((&b"cxxx"[..], &b"ab"[..]))); +} + +#[test] +fn length_value_test() { + fn length_value_1(i: &[u8]) -> IResult<&[u8], u16> { + length_value(be_u8, be_u16)(i) + } + fn length_value_2(i: &[u8]) -> IResult<&[u8], (u8, u8)> { + length_value(be_u8, tuple((be_u8, be_u8)))(i) + } + + let i1 = [0, 5, 6]; + assert_eq!( + length_value_1(&i1), + Err(Err::Error(error_position!(&b""[..], ErrorKind::Complete))) + ); + assert_eq!( + length_value_2(&i1), + Err(Err::Error(error_position!(&b""[..], ErrorKind::Complete))) + ); + + let i2 = [1, 5, 6, 3]; + assert_eq!( + length_value_1(&i2), + Err(Err::Error(error_position!(&i2[1..2], ErrorKind::Complete))) + ); + assert_eq!( + length_value_2(&i2), + Err(Err::Error(error_position!(&i2[1..2], ErrorKind::Complete))) + ); + + let i3 = [2, 5, 6, 3, 4, 5, 7]; + assert_eq!(length_value_1(&i3), Ok((&i3[3..], 1286))); + assert_eq!(length_value_2(&i3), Ok((&i3[3..], (5, 6)))); + + let i4 = [3, 5, 6, 3, 4, 5]; + assert_eq!(length_value_1(&i4), Ok((&i4[4..], 1286))); + assert_eq!(length_value_2(&i4), Ok((&i4[4..], (5, 6)))); +} + +#[test] +#[cfg(feature = "alloc")] +fn fold_many0_test() { + fn fold_into_vec(mut acc: Vec, item: T) -> Vec { + acc.push(item); + acc + } + fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + fold_many0(tag("abcd"), Vec::new, fold_into_vec)(i) + } + fn multi_empty(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + fold_many0(tag(""), Vec::new, fold_into_vec)(i) + } + + assert_eq!(multi(&b"abcdef"[..]), Ok((&b"ef"[..], vec![&b"abcd"[..]]))); + assert_eq!( + multi(&b"abcdabcdefgh"[..]), + Ok((&b"efgh"[..], vec![&b"abcd"[..], &b"abcd"[..]])) + ); + assert_eq!(multi(&b"azerty"[..]), Ok((&b"azerty"[..], Vec::new()))); + assert_eq!(multi(&b"abcdab"[..]), Err(Err::Incomplete(Needed::new(2)))); + assert_eq!(multi(&b"abcd"[..]), Err(Err::Incomplete(Needed::new(4)))); + assert_eq!(multi(&b""[..]), Err(Err::Incomplete(Needed::new(4)))); + assert_eq!( + multi_empty(&b"abcdef"[..]), + Err(Err::Error(error_position!( + &b"abcdef"[..], + ErrorKind::Many0 + ))) + ); +} + +#[test] +#[cfg(feature = "alloc")] +fn fold_many1_test() { + fn fold_into_vec(mut acc: Vec, item: T) -> Vec { + acc.push(item); + acc + } + fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + fold_many1(tag("abcd"), Vec::new, fold_into_vec)(i) + } + + let a = &b"abcdef"[..]; + let b = &b"abcdabcdefgh"[..]; + let c = &b"azerty"[..]; + let d = &b"abcdab"[..]; + + let res1 = vec![&b"abcd"[..]]; + assert_eq!(multi(a), Ok((&b"ef"[..], res1))); + let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; + assert_eq!(multi(b), Ok((&b"efgh"[..], res2))); + assert_eq!( + multi(c), + Err(Err::Error(error_position!(c, ErrorKind::Many1))) + ); + assert_eq!(multi(d), Err(Err::Incomplete(Needed::new(2)))); +} + +#[test] +#[cfg(feature = "alloc")] +fn fold_many_m_n_test() { + fn fold_into_vec(mut acc: Vec, item: T) -> Vec { + acc.push(item); + acc + } + fn multi(i: &[u8]) -> IResult<&[u8], Vec<&[u8]>> { + fold_many_m_n(2, 4, tag("Abcd"), Vec::new, fold_into_vec)(i) + } + + let a = &b"Abcdef"[..]; + let b = &b"AbcdAbcdefgh"[..]; + let c = &b"AbcdAbcdAbcdAbcdefgh"[..]; + let d = &b"AbcdAbcdAbcdAbcdAbcdefgh"[..]; + let e = &b"AbcdAb"[..]; + + assert_eq!( + multi(a), + Err(Err::Error(error_position!(&b"ef"[..], ErrorKind::Tag))) + ); + let res1 = vec![&b"Abcd"[..], &b"Abcd"[..]]; + assert_eq!(multi(b), Ok((&b"efgh"[..], res1))); + let res2 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; + assert_eq!(multi(c), Ok((&b"efgh"[..], res2))); + let res3 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; + assert_eq!(multi(d), Ok((&b"Abcdefgh"[..], res3))); + assert_eq!(multi(e), Err(Err::Incomplete(Needed::new(2)))); +} + +#[test] +fn many0_count_test() { + fn count0_nums(i: &[u8]) -> IResult<&[u8], usize> { + many0_count(pair(digit, tag(",")))(i) + } + + assert_eq!(count0_nums(&b"123,junk"[..]), Ok((&b"junk"[..], 1))); + + assert_eq!(count0_nums(&b"123,45,junk"[..]), Ok((&b"junk"[..], 2))); + + assert_eq!( + count0_nums(&b"1,2,3,4,5,6,7,8,9,0,junk"[..]), + Ok((&b"junk"[..], 10)) + ); + + assert_eq!(count0_nums(&b"hello"[..]), Ok((&b"hello"[..], 0))); +} + +#[test] +fn many1_count_test() { + fn count1_nums(i: &[u8]) -> IResult<&[u8], usize> { + many1_count(pair(digit, tag(",")))(i) + } + + assert_eq!(count1_nums(&b"123,45,junk"[..]), Ok((&b"junk"[..], 2))); + + assert_eq!( + count1_nums(&b"1,2,3,4,5,6,7,8,9,0,junk"[..]), + Ok((&b"junk"[..], 10)) + ); + + assert_eq!( + count1_nums(&b"hello"[..]), + Err(Err::Error(error_position!( + &b"hello"[..], + ErrorKind::Many1Count + ))) + ); +} diff --git a/vendor/nom/src/number/complete.rs b/vendor/nom/src/number/complete.rs new file mode 100644 index 00000000000000..98b8b3abf836c9 --- /dev/null +++ b/vendor/nom/src/number/complete.rs @@ -0,0 +1,2126 @@ +//! Parsers recognizing numbers, complete input version + +use crate::branch::alt; +use crate::bytes::complete::tag; +use crate::character::complete::{char, digit1, sign}; +use crate::combinator::{cut, map, opt, recognize}; +use crate::error::ParseError; +use crate::error::{make_error, ErrorKind}; +use crate::internal::*; +use crate::lib::std::ops::{Range, RangeFrom, RangeTo}; +use crate::sequence::{pair, tuple}; +use crate::traits::{ + AsBytes, AsChar, Compare, InputIter, InputLength, InputTake, InputTakeAtPosition, Offset, Slice, +}; + +/// Recognizes an unsigned 1 byte integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_u8; +/// +/// let parser = |s| { +/// be_u8(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); +/// assert_eq!(parser(&b""[..]), Err(Err::Error((&[][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_u8>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 1; + if input.input_len() < bound { + Err(Err::Error(make_error(input, ErrorKind::Eof))) + } else { + let res = input.iter_elements().next().unwrap(); + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a big endian unsigned 2 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_u16; +/// +/// let parser = |s| { +/// be_u16(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0003))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_u16>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 2; + if input.input_len() < bound { + Err(Err::Error(make_error(input, ErrorKind::Eof))) + } else { + let mut res = 0u16; + for byte in input.iter_elements().take(bound) { + res = (res << 8) + byte as u16; + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a big endian unsigned 3 byte integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_u24; +/// +/// let parser = |s| { +/// be_u24(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x000305))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_u24>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 3; + if input.input_len() < bound { + Err(Err::Error(make_error(input, ErrorKind::Eof))) + } else { + let mut res = 0u32; + for byte in input.iter_elements().take(bound) { + res = (res << 8) + byte as u32; + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a big endian unsigned 4 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_u32; +/// +/// let parser = |s| { +/// be_u32(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00030507))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_u32>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 4; + if input.input_len() < bound { + Err(Err::Error(make_error(input, ErrorKind::Eof))) + } else { + let mut res = 0u32; + for byte in input.iter_elements().take(bound) { + res = (res << 8) + byte as u32; + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a big endian unsigned 8 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_u64; +/// +/// let parser = |s| { +/// be_u64(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0001020304050607))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_u64>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 8; + if input.input_len() < bound { + Err(Err::Error(make_error(input, ErrorKind::Eof))) + } else { + let mut res = 0u64; + for byte in input.iter_elements().take(bound) { + res = (res << 8) + byte as u64; + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a big endian unsigned 16 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_u128; +/// +/// let parser = |s| { +/// be_u128(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00010203040506070001020304050607))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_u128>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 16; + if input.input_len() < bound { + Err(Err::Error(make_error(input, ErrorKind::Eof))) + } else { + let mut res = 0u128; + for byte in input.iter_elements().take(bound) { + res = (res << 8) + byte as u128; + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a signed 1 byte integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_i8; +/// +/// let parser = |s| { +/// be_i8(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); +/// assert_eq!(parser(&b""[..]), Err(Err::Error((&[][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_i8>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + be_u8.map(|x| x as i8).parse(input) +} + +/// Recognizes a big endian signed 2 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_i16; +/// +/// let parser = |s| { +/// be_i16(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0003))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_i16>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + be_u16.map(|x| x as i16).parse(input) +} + +/// Recognizes a big endian signed 3 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_i24; +/// +/// let parser = |s| { +/// be_i24(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x000305))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_i24>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + // Same as the unsigned version but we need to sign-extend manually here + be_u24 + .map(|x| { + if x & 0x80_00_00 != 0 { + (x | 0xff_00_00_00) as i32 + } else { + x as i32 + } + }) + .parse(input) +} + +/// Recognizes a big endian signed 4 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_i32; +/// +/// let parser = |s| { +/// be_i32(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00030507))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_i32>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + be_u32.map(|x| x as i32).parse(input) +} + +/// Recognizes a big endian signed 8 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_i64; +/// +/// let parser = |s| { +/// be_i64(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0001020304050607))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_i64>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + be_u64.map(|x| x as i64).parse(input) +} + +/// Recognizes a big endian signed 16 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_i128; +/// +/// let parser = |s| { +/// be_i128(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00010203040506070001020304050607))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_i128>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + be_u128.map(|x| x as i128).parse(input) +} + +/// Recognizes an unsigned 1 byte integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_u8; +/// +/// let parser = |s| { +/// le_u8(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); +/// assert_eq!(parser(&b""[..]), Err(Err::Error((&[][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_u8>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 1; + if input.input_len() < bound { + Err(Err::Error(make_error(input, ErrorKind::Eof))) + } else { + let res = input.iter_elements().next().unwrap(); + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a little endian unsigned 2 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_u16; +/// +/// let parser = |s| { +/// le_u16(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0300))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_u16>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 2; + if input.input_len() < bound { + Err(Err::Error(make_error(input, ErrorKind::Eof))) + } else { + let mut res = 0u16; + for (index, byte) in input.iter_indices().take(bound) { + res += (byte as u16) << (8 * index); + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a little endian unsigned 3 byte integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_u24; +/// +/// let parser = |s| { +/// le_u24(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x050300))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_u24>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 3; + if input.input_len() < bound { + Err(Err::Error(make_error(input, ErrorKind::Eof))) + } else { + let mut res = 0u32; + for (index, byte) in input.iter_indices().take(bound) { + res += (byte as u32) << (8 * index); + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a little endian unsigned 4 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_u32; +/// +/// let parser = |s| { +/// le_u32(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07050300))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_u32>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 4; + if input.input_len() < bound { + Err(Err::Error(make_error(input, ErrorKind::Eof))) + } else { + let mut res = 0u32; + for (index, byte) in input.iter_indices().take(bound) { + res += (byte as u32) << (8 * index); + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a little endian unsigned 8 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_u64; +/// +/// let parser = |s| { +/// le_u64(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0706050403020100))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_u64>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 8; + if input.input_len() < bound { + Err(Err::Error(make_error(input, ErrorKind::Eof))) + } else { + let mut res = 0u64; + for (index, byte) in input.iter_indices().take(bound) { + res += (byte as u64) << (8 * index); + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a little endian unsigned 16 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_u128; +/// +/// let parser = |s| { +/// le_u128(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07060504030201000706050403020100))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_u128>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 16; + if input.input_len() < bound { + Err(Err::Error(make_error(input, ErrorKind::Eof))) + } else { + let mut res = 0u128; + for (index, byte) in input.iter_indices().take(bound) { + res += (byte as u128) << (8 * index); + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a signed 1 byte integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_i8; +/// +/// let parser = |s| { +/// le_i8(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); +/// assert_eq!(parser(&b""[..]), Err(Err::Error((&[][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_i8>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + be_u8.map(|x| x as i8).parse(input) +} + +/// Recognizes a little endian signed 2 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_i16; +/// +/// let parser = |s| { +/// le_i16(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0300))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_i16>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + le_u16.map(|x| x as i16).parse(input) +} + +/// Recognizes a little endian signed 3 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_i24; +/// +/// let parser = |s| { +/// le_i24(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x050300))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_i24>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + // Same as the unsigned version but we need to sign-extend manually here + le_u24 + .map(|x| { + if x & 0x80_00_00 != 0 { + (x | 0xff_00_00_00) as i32 + } else { + x as i32 + } + }) + .parse(input) +} + +/// Recognizes a little endian signed 4 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_i32; +/// +/// let parser = |s| { +/// le_i32(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07050300))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_i32>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + le_u32.map(|x| x as i32).parse(input) +} + +/// Recognizes a little endian signed 8 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_i64; +/// +/// let parser = |s| { +/// le_i64(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0706050403020100))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_i64>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + le_u64.map(|x| x as i64).parse(input) +} + +/// Recognizes a little endian signed 16 bytes integer. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_i128; +/// +/// let parser = |s| { +/// le_i128(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07060504030201000706050403020100))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_i128>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + le_u128.map(|x| x as i128).parse(input) +} + +/// Recognizes an unsigned 1 byte integer +/// +/// Note that endianness does not apply to 1 byte numbers. +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::u8; +/// +/// let parser = |s| { +/// u8(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); +/// assert_eq!(parser(&b""[..]), Err(Err::Error((&[][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn u8>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 1; + if input.input_len() < bound { + Err(Err::Error(make_error(input, ErrorKind::Eof))) + } else { + let res = input.iter_elements().next().unwrap(); + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes an unsigned 2 bytes integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u16 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian u16 integer. +/// *complete version*: returns an error if there is not enough input data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::u16; +/// +/// let be_u16 = |s| { +/// u16(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_u16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0003))); +/// assert_eq!(be_u16(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// +/// let le_u16 = |s| { +/// u16(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_u16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0300))); +/// assert_eq!(le_u16(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn u16>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_u16, + crate::number::Endianness::Little => le_u16, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_u16, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_u16, + } +} + +/// Recognizes an unsigned 3 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u24 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian u24 integer. +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::u24; +/// +/// let be_u24 = |s| { +/// u24(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_u24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x000305))); +/// assert_eq!(be_u24(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// +/// let le_u24 = |s| { +/// u24(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_u24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x050300))); +/// assert_eq!(le_u24(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn u24>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_u24, + crate::number::Endianness::Little => le_u24, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_u24, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_u24, + } +} + +/// Recognizes an unsigned 4 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u32 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian u32 integer. +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::u32; +/// +/// let be_u32 = |s| { +/// u32(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_u32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00030507))); +/// assert_eq!(be_u32(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// +/// let le_u32 = |s| { +/// u32(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_u32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07050300))); +/// assert_eq!(le_u32(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn u32>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_u32, + crate::number::Endianness::Little => le_u32, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_u32, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_u32, + } +} + +/// Recognizes an unsigned 8 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u64 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian u64 integer. +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::u64; +/// +/// let be_u64 = |s| { +/// u64(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_u64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0001020304050607))); +/// assert_eq!(be_u64(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// +/// let le_u64 = |s| { +/// u64(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_u64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0706050403020100))); +/// assert_eq!(le_u64(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn u64>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_u64, + crate::number::Endianness::Little => le_u64, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_u64, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_u64, + } +} + +/// Recognizes an unsigned 16 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u128 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian u128 integer. +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::u128; +/// +/// let be_u128 = |s| { +/// u128(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_u128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00010203040506070001020304050607))); +/// assert_eq!(be_u128(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// +/// let le_u128 = |s| { +/// u128(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_u128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07060504030201000706050403020100))); +/// assert_eq!(le_u128(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn u128>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_u128, + crate::number::Endianness::Little => le_u128, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_u128, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_u128, + } +} + +/// Recognizes a signed 1 byte integer +/// +/// Note that endianness does not apply to 1 byte numbers. +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::i8; +/// +/// let parser = |s| { +/// i8(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); +/// assert_eq!(parser(&b""[..]), Err(Err::Error((&[][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn i8>(i: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + u8.map(|x| x as i8).parse(i) +} + +/// Recognizes a signed 2 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i16 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian i16 integer. +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::i16; +/// +/// let be_i16 = |s| { +/// i16(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_i16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0003))); +/// assert_eq!(be_i16(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// +/// let le_i16 = |s| { +/// i16(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_i16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0300))); +/// assert_eq!(le_i16(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn i16>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_i16, + crate::number::Endianness::Little => le_i16, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_i16, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_i16, + } +} + +/// Recognizes a signed 3 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i24 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian i24 integer. +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::i24; +/// +/// let be_i24 = |s| { +/// i24(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_i24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x000305))); +/// assert_eq!(be_i24(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// +/// let le_i24 = |s| { +/// i24(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_i24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x050300))); +/// assert_eq!(le_i24(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn i24>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_i24, + crate::number::Endianness::Little => le_i24, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_i24, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_i24, + } +} + +/// Recognizes a signed 4 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i32 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian i32 integer. +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::i32; +/// +/// let be_i32 = |s| { +/// i32(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_i32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00030507))); +/// assert_eq!(be_i32(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// +/// let le_i32 = |s| { +/// i32(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_i32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07050300))); +/// assert_eq!(le_i32(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn i32>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_i32, + crate::number::Endianness::Little => le_i32, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_i32, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_i32, + } +} + +/// Recognizes a signed 8 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i64 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian i64 integer. +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::i64; +/// +/// let be_i64 = |s| { +/// i64(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_i64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0001020304050607))); +/// assert_eq!(be_i64(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// +/// let le_i64 = |s| { +/// i64(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_i64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0706050403020100))); +/// assert_eq!(le_i64(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn i64>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_i64, + crate::number::Endianness::Little => le_i64, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_i64, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_i64, + } +} + +/// Recognizes a signed 16 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i128 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian i128 integer. +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::i128; +/// +/// let be_i128 = |s| { +/// i128(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_i128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00010203040506070001020304050607))); +/// assert_eq!(be_i128(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// +/// let le_i128 = |s| { +/// i128(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_i128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07060504030201000706050403020100))); +/// assert_eq!(le_i128(&b"\x01"[..]), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn i128>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_i128, + crate::number::Endianness::Little => le_i128, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_i128, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_i128, + } +} + +/// Recognizes a big endian 4 bytes floating point number. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_f32; +/// +/// let parser = |s| { +/// be_f32(s) +/// }; +/// +/// assert_eq!(parser(&[0x41, 0x48, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(parser(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_f32>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match be_u32(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f32::from_bits(o))), + } +} + +/// Recognizes a big endian 8 bytes floating point number. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_f64; +/// +/// let parser = |s| { +/// be_f64(s) +/// }; +/// +/// assert_eq!(parser(&[0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(parser(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_f64>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match be_u64(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f64::from_bits(o))), + } +} + +/// Recognizes a little endian 4 bytes floating point number. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_f32; +/// +/// let parser = |s| { +/// le_f32(s) +/// }; +/// +/// assert_eq!(parser(&[0x00, 0x00, 0x48, 0x41][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(parser(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_f32>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match le_u32(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f32::from_bits(o))), + } +} + +/// Recognizes a little endian 8 bytes floating point number. +/// +/// *Complete version*: Returns an error if there is not enough input data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_f64; +/// +/// let parser = |s| { +/// le_f64(s) +/// }; +/// +/// assert_eq!(parser(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(parser(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_f64>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match le_u64(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f64::from_bits(o))), + } +} + +/// Recognizes a 4 byte floating point number +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian f32 float, +/// otherwise if `nom::number::Endianness::Little` parse a little endian f32 float. +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::f32; +/// +/// let be_f32 = |s| { +/// f32(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_f32(&[0x41, 0x48, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(be_f32(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); +/// +/// let le_f32 = |s| { +/// f32(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_f32(&[0x00, 0x00, 0x48, 0x41][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(le_f32(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn f32>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_f32, + crate::number::Endianness::Little => le_f32, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_f32, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_f32, + } +} + +/// Recognizes an 8 byte floating point number +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian f64 float, +/// otherwise if `nom::number::Endianness::Little` parse a little endian f64 float. +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::f64; +/// +/// let be_f64 = |s| { +/// f64(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_f64(&[0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(be_f64(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); +/// +/// let le_f64 = |s| { +/// f64(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(le_f64(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn f64>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_f64, + crate::number::Endianness::Little => le_f64, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_f64, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_f64, + } +} + +/// Recognizes a hex-encoded integer. +/// +/// *Complete version*: Will parse until the end of input if it has less than 8 bytes. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::hex_u32; +/// +/// let parser = |s| { +/// hex_u32(s) +/// }; +/// +/// assert_eq!(parser(&b"01AE"[..]), Ok((&b""[..], 0x01AE))); +/// assert_eq!(parser(&b"abc"[..]), Ok((&b""[..], 0x0ABC))); +/// assert_eq!(parser(&b"ggg"[..]), Err(Err::Error((&b"ggg"[..], ErrorKind::IsA)))); +/// ``` +#[inline] +pub fn hex_u32<'a, E: ParseError<&'a [u8]>>(input: &'a [u8]) -> IResult<&'a [u8], u32, E> { + let (i, o) = crate::bytes::complete::is_a(&b"0123456789abcdefABCDEF"[..])(input)?; + // Do not parse more than 8 characters for a u32 + let (parsed, remaining) = if o.len() <= 8 { + (o, i) + } else { + (&input[..8], &input[8..]) + }; + + let res = parsed + .iter() + .rev() + .enumerate() + .map(|(k, &v)| { + let digit = v as char; + digit.to_digit(16).unwrap_or(0) << (k * 4) + }) + .sum(); + + Ok((remaining, res)) +} + +/// Recognizes floating point number in a byte string and returns the corresponding slice. +/// +/// *Complete version*: Can parse until the end of input. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::recognize_float; +/// +/// let parser = |s| { +/// recognize_float(s) +/// }; +/// +/// assert_eq!(parser("11e-1"), Ok(("", "11e-1"))); +/// assert_eq!(parser("123E-02"), Ok(("", "123E-02"))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", "123"))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Char)))); +/// ``` +#[rustfmt::skip] +pub fn recognize_float>(input: T) -> IResult +where + T: Slice> + Slice>, + T: Clone + Offset, + T: InputIter, + ::Item: AsChar, + T: InputTakeAtPosition, + ::Item: AsChar, +{ + recognize( + tuple(( + opt(alt((char('+'), char('-')))), + alt(( + map(tuple((digit1, opt(pair(char('.'), opt(digit1))))), |_| ()), + map(tuple((char('.'), digit1)), |_| ()) + )), + opt(tuple(( + alt((char('e'), char('E'))), + opt(alt((char('+'), char('-')))), + cut(digit1) + ))) + )) + )(input) +} + +// workaround until issues with minimal-lexical are fixed +#[doc(hidden)] +pub fn recognize_float_or_exceptions>(input: T) -> IResult +where + T: Slice> + Slice>, + T: Clone + Offset, + T: InputIter + InputTake + Compare<&'static str>, + ::Item: AsChar, + T: InputTakeAtPosition, + ::Item: AsChar, +{ + alt(( + |i: T| { + recognize_float::<_, E>(i.clone()).map_err(|e| match e { + crate::Err::Error(_) => crate::Err::Error(E::from_error_kind(i, ErrorKind::Float)), + crate::Err::Failure(_) => crate::Err::Failure(E::from_error_kind(i, ErrorKind::Float)), + crate::Err::Incomplete(needed) => crate::Err::Incomplete(needed), + }) + }, + |i: T| { + crate::bytes::complete::tag_no_case::<_, _, E>("nan")(i.clone()) + .map_err(|_| crate::Err::Error(E::from_error_kind(i, ErrorKind::Float))) + }, + |i: T| { + crate::bytes::complete::tag_no_case::<_, _, E>("inf")(i.clone()) + .map_err(|_| crate::Err::Error(E::from_error_kind(i, ErrorKind::Float))) + }, + |i: T| { + crate::bytes::complete::tag_no_case::<_, _, E>("infinity")(i.clone()) + .map_err(|_| crate::Err::Error(E::from_error_kind(i, ErrorKind::Float))) + }, + ))(input) +} + +/// Recognizes a floating point number in text format +/// +/// It returns a tuple of (`sign`, `integer part`, `fraction part` and `exponent`) of the input +/// data. +/// +/// *Complete version*: Can parse until the end of input. +/// +pub fn recognize_float_parts>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: Clone + Offset, + T: InputIter + InputTake, + ::Item: AsChar + Copy, + T: InputTakeAtPosition + InputLength, + ::Item: AsChar, + T: for<'a> Compare<&'a [u8]>, + T: AsBytes, +{ + let (i, sign) = sign(input.clone())?; + + //let (i, zeroes) = take_while(|c: ::Item| c.as_char() == '0')(i)?; + let (i, zeroes) = match i.as_bytes().iter().position(|c| *c != b'0') { + Some(index) => i.take_split(index), + None => i.take_split(i.input_len()), + }; + //let (i, mut integer) = digit0(i)?; + let (i, mut integer) = match i + .as_bytes() + .iter() + .position(|c| !(*c >= b'0' && *c <= b'9')) + { + Some(index) => i.take_split(index), + None => i.take_split(i.input_len()), + }; + + if integer.input_len() == 0 && zeroes.input_len() > 0 { + // keep the last zero if integer is empty + integer = zeroes.slice(zeroes.input_len() - 1..); + } + + let (i, opt_dot) = opt(tag(&b"."[..]))(i)?; + let (i, fraction) = if opt_dot.is_none() { + let i2 = i.clone(); + (i2, i.slice(..0)) + } else { + // match number, trim right zeroes + let mut zero_count = 0usize; + let mut position = None; + for (pos, c) in i.as_bytes().iter().enumerate() { + if *c >= b'0' && *c <= b'9' { + if *c == b'0' { + zero_count += 1; + } else { + zero_count = 0; + } + } else { + position = Some(pos); + break; + } + } + + let position = position.unwrap_or(i.input_len()); + + let index = if zero_count == 0 { + position + } else if zero_count == position { + position - zero_count + 1 + } else { + position - zero_count + }; + + (i.slice(position..), i.slice(..index)) + }; + + if integer.input_len() == 0 && fraction.input_len() == 0 { + return Err(Err::Error(E::from_error_kind(input, ErrorKind::Float))); + } + + let i2 = i.clone(); + let (i, e) = match i.as_bytes().iter().next() { + Some(b'e') => (i.slice(1..), true), + Some(b'E') => (i.slice(1..), true), + _ => (i, false), + }; + + let (i, exp) = if e { + cut(crate::character::complete::i32)(i)? + } else { + (i2, 0) + }; + + Ok((i, (sign, integer, fraction, exp))) +} + +use crate::traits::ParseTo; + +/// Recognizes floating point number in text format and returns a f32. +/// +/// *Complete version*: Can parse until the end of input. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::float; +/// +/// let parser = |s| { +/// float(s) +/// }; +/// +/// assert_eq!(parser("11e-1"), Ok(("", 1.1))); +/// assert_eq!(parser("123E-02"), Ok(("", 1.23))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Float)))); +/// ``` +pub fn float>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: Clone + Offset + ParseTo + Compare<&'static str>, + T: InputIter + InputLength + InputTake, + ::Item: AsChar + Copy, + ::IterElem: Clone, + T: InputTakeAtPosition, + ::Item: AsChar, + T: AsBytes, + T: for<'a> Compare<&'a [u8]>, +{ + /* + let (i, (sign, integer, fraction, exponent)) = recognize_float_parts(input)?; + + let mut float: f32 = minimal_lexical::parse_float( + integer.as_bytes().iter(), + fraction.as_bytes().iter(), + exponent, + ); + if !sign { + float = -float; + } + + Ok((i, float)) + */ + let (i, s) = recognize_float_or_exceptions(input)?; + match s.parse_to() { + Some(f) => Ok((i, f)), + None => Err(crate::Err::Error(E::from_error_kind( + i, + crate::error::ErrorKind::Float, + ))), + } +} + +/// Recognizes floating point number in text format and returns a f64. +/// +/// *Complete version*: Can parse until the end of input. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::double; +/// +/// let parser = |s| { +/// double(s) +/// }; +/// +/// assert_eq!(parser("11e-1"), Ok(("", 1.1))); +/// assert_eq!(parser("123E-02"), Ok(("", 1.23))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Float)))); +/// ``` +pub fn double>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: Clone + Offset + ParseTo + Compare<&'static str>, + T: InputIter + InputLength + InputTake, + ::Item: AsChar + Copy, + ::IterElem: Clone, + T: InputTakeAtPosition, + ::Item: AsChar, + T: AsBytes, + T: for<'a> Compare<&'a [u8]>, +{ + /* + let (i, (sign, integer, fraction, exponent)) = recognize_float_parts(input)?; + + let mut float: f64 = minimal_lexical::parse_float( + integer.as_bytes().iter(), + fraction.as_bytes().iter(), + exponent, + ); + if !sign { + float = -float; + } + + Ok((i, float)) + */ + let (i, s) = recognize_float_or_exceptions(input)?; + match s.parse_to() { + Some(f) => Ok((i, f)), + None => Err(crate::Err::Error(E::from_error_kind( + i, + crate::error::ErrorKind::Float, + ))), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::error::ErrorKind; + use crate::internal::Err; + use proptest::prelude::*; + + macro_rules! assert_parse( + ($left: expr, $right: expr) => { + let res: $crate::IResult<_, _, (_, ErrorKind)> = $left; + assert_eq!(res, $right); + }; + ); + + #[test] + fn i8_tests() { + assert_parse!(i8(&[0x00][..]), Ok((&b""[..], 0))); + assert_parse!(i8(&[0x7f][..]), Ok((&b""[..], 127))); + assert_parse!(i8(&[0xff][..]), Ok((&b""[..], -1))); + assert_parse!(i8(&[0x80][..]), Ok((&b""[..], -128))); + } + + #[test] + fn be_i8_tests() { + assert_parse!(be_i8(&[0x00][..]), Ok((&b""[..], 0))); + assert_parse!(be_i8(&[0x7f][..]), Ok((&b""[..], 127))); + assert_parse!(be_i8(&[0xff][..]), Ok((&b""[..], -1))); + assert_parse!(be_i8(&[0x80][..]), Ok((&b""[..], -128))); + } + + #[test] + fn be_i16_tests() { + assert_parse!(be_i16(&[0x00, 0x00][..]), Ok((&b""[..], 0))); + assert_parse!(be_i16(&[0x7f, 0xff][..]), Ok((&b""[..], 32_767_i16))); + assert_parse!(be_i16(&[0xff, 0xff][..]), Ok((&b""[..], -1))); + assert_parse!(be_i16(&[0x80, 0x00][..]), Ok((&b""[..], -32_768_i16))); + } + + #[test] + fn be_u24_tests() { + assert_parse!(be_u24(&[0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); + assert_parse!(be_u24(&[0x00, 0xFF, 0xFF][..]), Ok((&b""[..], 65_535_u32))); + assert_parse!( + be_u24(&[0x12, 0x34, 0x56][..]), + Ok((&b""[..], 1_193_046_u32)) + ); + } + + #[test] + fn be_i24_tests() { + assert_parse!(be_i24(&[0xFF, 0xFF, 0xFF][..]), Ok((&b""[..], -1_i32))); + assert_parse!(be_i24(&[0xFF, 0x00, 0x00][..]), Ok((&b""[..], -65_536_i32))); + assert_parse!( + be_i24(&[0xED, 0xCB, 0xAA][..]), + Ok((&b""[..], -1_193_046_i32)) + ); + } + + #[test] + fn be_i32_tests() { + assert_parse!(be_i32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); + assert_parse!( + be_i32(&[0x7f, 0xff, 0xff, 0xff][..]), + Ok((&b""[..], 2_147_483_647_i32)) + ); + assert_parse!(be_i32(&[0xff, 0xff, 0xff, 0xff][..]), Ok((&b""[..], -1))); + assert_parse!( + be_i32(&[0x80, 0x00, 0x00, 0x00][..]), + Ok((&b""[..], -2_147_483_648_i32)) + ); + } + + #[test] + fn be_i64_tests() { + assert_parse!( + be_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Ok((&b""[..], 0)) + ); + assert_parse!( + be_i64(&[0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff][..]), + Ok((&b""[..], 9_223_372_036_854_775_807_i64)) + ); + assert_parse!( + be_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff][..]), + Ok((&b""[..], -1)) + ); + assert_parse!( + be_i64(&[0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Ok((&b""[..], -9_223_372_036_854_775_808_i64)) + ); + } + + #[test] + fn be_i128_tests() { + assert_parse!( + be_i128( + &[ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00 + ][..] + ), + Ok((&b""[..], 0)) + ); + assert_parse!( + be_i128( + &[ + 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff + ][..] + ), + Ok(( + &b""[..], + 170_141_183_460_469_231_731_687_303_715_884_105_727_i128 + )) + ); + assert_parse!( + be_i128( + &[ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff + ][..] + ), + Ok((&b""[..], -1)) + ); + assert_parse!( + be_i128( + &[ + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00 + ][..] + ), + Ok(( + &b""[..], + -170_141_183_460_469_231_731_687_303_715_884_105_728_i128 + )) + ); + } + + #[test] + fn le_i8_tests() { + assert_parse!(le_i8(&[0x00][..]), Ok((&b""[..], 0))); + assert_parse!(le_i8(&[0x7f][..]), Ok((&b""[..], 127))); + assert_parse!(le_i8(&[0xff][..]), Ok((&b""[..], -1))); + assert_parse!(le_i8(&[0x80][..]), Ok((&b""[..], -128))); + } + + #[test] + fn le_i16_tests() { + assert_parse!(le_i16(&[0x00, 0x00][..]), Ok((&b""[..], 0))); + assert_parse!(le_i16(&[0xff, 0x7f][..]), Ok((&b""[..], 32_767_i16))); + assert_parse!(le_i16(&[0xff, 0xff][..]), Ok((&b""[..], -1))); + assert_parse!(le_i16(&[0x00, 0x80][..]), Ok((&b""[..], -32_768_i16))); + } + + #[test] + fn le_u24_tests() { + assert_parse!(le_u24(&[0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); + assert_parse!(le_u24(&[0xFF, 0xFF, 0x00][..]), Ok((&b""[..], 65_535_u32))); + assert_parse!( + le_u24(&[0x56, 0x34, 0x12][..]), + Ok((&b""[..], 1_193_046_u32)) + ); + } + + #[test] + fn le_i24_tests() { + assert_parse!(le_i24(&[0xFF, 0xFF, 0xFF][..]), Ok((&b""[..], -1_i32))); + assert_parse!(le_i24(&[0x00, 0x00, 0xFF][..]), Ok((&b""[..], -65_536_i32))); + assert_parse!( + le_i24(&[0xAA, 0xCB, 0xED][..]), + Ok((&b""[..], -1_193_046_i32)) + ); + } + + #[test] + fn le_i32_tests() { + assert_parse!(le_i32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); + assert_parse!( + le_i32(&[0xff, 0xff, 0xff, 0x7f][..]), + Ok((&b""[..], 2_147_483_647_i32)) + ); + assert_parse!(le_i32(&[0xff, 0xff, 0xff, 0xff][..]), Ok((&b""[..], -1))); + assert_parse!( + le_i32(&[0x00, 0x00, 0x00, 0x80][..]), + Ok((&b""[..], -2_147_483_648_i32)) + ); + } + + #[test] + fn le_i64_tests() { + assert_parse!( + le_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Ok((&b""[..], 0)) + ); + assert_parse!( + le_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f][..]), + Ok((&b""[..], 9_223_372_036_854_775_807_i64)) + ); + assert_parse!( + le_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff][..]), + Ok((&b""[..], -1)) + ); + assert_parse!( + le_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80][..]), + Ok((&b""[..], -9_223_372_036_854_775_808_i64)) + ); + } + + #[test] + fn le_i128_tests() { + assert_parse!( + le_i128( + &[ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00 + ][..] + ), + Ok((&b""[..], 0)) + ); + assert_parse!( + le_i128( + &[ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x7f + ][..] + ), + Ok(( + &b""[..], + 170_141_183_460_469_231_731_687_303_715_884_105_727_i128 + )) + ); + assert_parse!( + le_i128( + &[ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff + ][..] + ), + Ok((&b""[..], -1)) + ); + assert_parse!( + le_i128( + &[ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80 + ][..] + ), + Ok(( + &b""[..], + -170_141_183_460_469_231_731_687_303_715_884_105_728_i128 + )) + ); + } + + #[test] + fn be_f32_tests() { + assert_parse!(be_f32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0_f32))); + assert_parse!( + be_f32(&[0x4d, 0x31, 0x1f, 0xd8][..]), + Ok((&b""[..], 185_728_392_f32)) + ); + } + + #[test] + fn be_f64_tests() { + assert_parse!( + be_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Ok((&b""[..], 0_f64)) + ); + assert_parse!( + be_f64(&[0x41, 0xa6, 0x23, 0xfb, 0x10, 0x00, 0x00, 0x00][..]), + Ok((&b""[..], 185_728_392_f64)) + ); + } + + #[test] + fn le_f32_tests() { + assert_parse!(le_f32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0_f32))); + assert_parse!( + le_f32(&[0xd8, 0x1f, 0x31, 0x4d][..]), + Ok((&b""[..], 185_728_392_f32)) + ); + } + + #[test] + fn le_f64_tests() { + assert_parse!( + le_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Ok((&b""[..], 0_f64)) + ); + assert_parse!( + le_f64(&[0x00, 0x00, 0x00, 0x10, 0xfb, 0x23, 0xa6, 0x41][..]), + Ok((&b""[..], 185_728_392_f64)) + ); + } + + #[test] + fn hex_u32_tests() { + assert_parse!( + hex_u32(&b";"[..]), + Err(Err::Error(error_position!(&b";"[..], ErrorKind::IsA))) + ); + assert_parse!(hex_u32(&b"ff;"[..]), Ok((&b";"[..], 255))); + assert_parse!(hex_u32(&b"1be2;"[..]), Ok((&b";"[..], 7138))); + assert_parse!(hex_u32(&b"c5a31be2;"[..]), Ok((&b";"[..], 3_315_801_058))); + assert_parse!(hex_u32(&b"C5A31be2;"[..]), Ok((&b";"[..], 3_315_801_058))); + assert_parse!(hex_u32(&b"00c5a31be2;"[..]), Ok((&b"e2;"[..], 12_952_347))); + assert_parse!( + hex_u32(&b"c5a31be201;"[..]), + Ok((&b"01;"[..], 3_315_801_058)) + ); + assert_parse!(hex_u32(&b"ffffffff;"[..]), Ok((&b";"[..], 4_294_967_295))); + assert_parse!(hex_u32(&b"0x1be2;"[..]), Ok((&b"x1be2;"[..], 0))); + assert_parse!(hex_u32(&b"12af"[..]), Ok((&b""[..], 0x12af))); + } + + #[test] + #[cfg(feature = "std")] + fn float_test() { + let mut test_cases = vec![ + "+3.14", + "3.14", + "-3.14", + "0", + "0.0", + "1.", + ".789", + "-.5", + "1e7", + "-1E-7", + ".3e-2", + "1.e4", + "1.2e4", + "12.34", + "-1.234E-12", + "-1.234e-12", + "0.00000000000000000087", + ]; + + for test in test_cases.drain(..) { + let expected32 = str::parse::(test).unwrap(); + let expected64 = str::parse::(test).unwrap(); + + println!("now parsing: {} -> {}", test, expected32); + + let larger = format!("{}", test); + assert_parse!(recognize_float(&larger[..]), Ok(("", test))); + + assert_parse!(float(larger.as_bytes()), Ok((&b""[..], expected32))); + assert_parse!(float(&larger[..]), Ok(("", expected32))); + + assert_parse!(double(larger.as_bytes()), Ok((&b""[..], expected64))); + assert_parse!(double(&larger[..]), Ok(("", expected64))); + } + + let remaining_exponent = "-1.234E-"; + assert_parse!( + recognize_float(remaining_exponent), + Err(Err::Failure(("", ErrorKind::Digit))) + ); + + let (_i, nan) = float::<_, ()>("NaN").unwrap(); + assert!(nan.is_nan()); + + let (_i, inf) = float::<_, ()>("inf").unwrap(); + assert!(inf.is_infinite()); + let (_i, inf) = float::<_, ()>("infinite").unwrap(); + assert!(inf.is_infinite()); + } + + #[test] + fn configurable_endianness() { + use crate::number::Endianness; + + fn be_tst16(i: &[u8]) -> IResult<&[u8], u16> { + u16(Endianness::Big)(i) + } + fn le_tst16(i: &[u8]) -> IResult<&[u8], u16> { + u16(Endianness::Little)(i) + } + assert_eq!(be_tst16(&[0x80, 0x00]), Ok((&b""[..], 32_768_u16))); + assert_eq!(le_tst16(&[0x80, 0x00]), Ok((&b""[..], 128_u16))); + + fn be_tst32(i: &[u8]) -> IResult<&[u8], u32> { + u32(Endianness::Big)(i) + } + fn le_tst32(i: &[u8]) -> IResult<&[u8], u32> { + u32(Endianness::Little)(i) + } + assert_eq!( + be_tst32(&[0x12, 0x00, 0x60, 0x00]), + Ok((&b""[..], 302_014_464_u32)) + ); + assert_eq!( + le_tst32(&[0x12, 0x00, 0x60, 0x00]), + Ok((&b""[..], 6_291_474_u32)) + ); + + fn be_tst64(i: &[u8]) -> IResult<&[u8], u64> { + u64(Endianness::Big)(i) + } + fn le_tst64(i: &[u8]) -> IResult<&[u8], u64> { + u64(Endianness::Little)(i) + } + assert_eq!( + be_tst64(&[0x12, 0x00, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), + Ok((&b""[..], 1_297_142_246_100_992_000_u64)) + ); + assert_eq!( + le_tst64(&[0x12, 0x00, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), + Ok((&b""[..], 36_028_874_334_666_770_u64)) + ); + + fn be_tsti16(i: &[u8]) -> IResult<&[u8], i16> { + i16(Endianness::Big)(i) + } + fn le_tsti16(i: &[u8]) -> IResult<&[u8], i16> { + i16(Endianness::Little)(i) + } + assert_eq!(be_tsti16(&[0x00, 0x80]), Ok((&b""[..], 128_i16))); + assert_eq!(le_tsti16(&[0x00, 0x80]), Ok((&b""[..], -32_768_i16))); + + fn be_tsti32(i: &[u8]) -> IResult<&[u8], i32> { + i32(Endianness::Big)(i) + } + fn le_tsti32(i: &[u8]) -> IResult<&[u8], i32> { + i32(Endianness::Little)(i) + } + assert_eq!( + be_tsti32(&[0x00, 0x12, 0x60, 0x00]), + Ok((&b""[..], 1_204_224_i32)) + ); + assert_eq!( + le_tsti32(&[0x00, 0x12, 0x60, 0x00]), + Ok((&b""[..], 6_296_064_i32)) + ); + + fn be_tsti64(i: &[u8]) -> IResult<&[u8], i64> { + i64(Endianness::Big)(i) + } + fn le_tsti64(i: &[u8]) -> IResult<&[u8], i64> { + i64(Endianness::Little)(i) + } + assert_eq!( + be_tsti64(&[0x00, 0xFF, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), + Ok((&b""[..], 71_881_672_479_506_432_i64)) + ); + assert_eq!( + le_tsti64(&[0x00, 0xFF, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), + Ok((&b""[..], 36_028_874_334_732_032_i64)) + ); + } + + #[cfg(feature = "std")] + fn parse_f64(i: &str) -> IResult<&str, f64, ()> { + match recognize_float_or_exceptions(i) { + Err(e) => Err(e), + Ok((i, s)) => { + if s.is_empty() { + return Err(Err::Error(())); + } + match s.parse_to() { + Some(n) => Ok((i, n)), + None => Err(Err::Error(())), + } + } + } + } + + proptest! { + #[test] + #[cfg(feature = "std")] + fn floats(s in "\\PC*") { + println!("testing {}", s); + let res1 = parse_f64(&s); + let res2 = double::<_, ()>(s.as_str()); + assert_eq!(res1, res2); + } + } +} diff --git a/vendor/nom/src/number/mod.rs b/vendor/nom/src/number/mod.rs new file mode 100644 index 00000000000000..58c3d51b0bdb6a --- /dev/null +++ b/vendor/nom/src/number/mod.rs @@ -0,0 +1,15 @@ +//! Parsers recognizing numbers + +pub mod complete; +pub mod streaming; + +/// Configurable endianness +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum Endianness { + /// Big endian + Big, + /// Little endian + Little, + /// Will match the host's endianness + Native, +} diff --git a/vendor/nom/src/number/streaming.rs b/vendor/nom/src/number/streaming.rs new file mode 100644 index 00000000000000..b4e856d2984424 --- /dev/null +++ b/vendor/nom/src/number/streaming.rs @@ -0,0 +1,2206 @@ +//! Parsers recognizing numbers, streaming version + +use crate::branch::alt; +use crate::bytes::streaming::tag; +use crate::character::streaming::{char, digit1, sign}; +use crate::combinator::{cut, map, opt, recognize}; +use crate::error::{ErrorKind, ParseError}; +use crate::internal::*; +use crate::lib::std::ops::{RangeFrom, RangeTo}; +use crate::sequence::{pair, tuple}; +use crate::traits::{ + AsBytes, AsChar, Compare, InputIter, InputLength, InputTake, InputTakeAtPosition, Offset, Slice, +}; + +/// Recognizes an unsigned 1 byte integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::be_u8; +/// +/// let parser = |s| { +/// be_u8::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"\x01abcd"[..], 0x00))); +/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +#[inline] +pub fn be_u8>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 1; + if input.input_len() < bound { + Err(Err::Incomplete(Needed::new(1))) + } else { + let res = input.iter_elements().next().unwrap(); + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a big endian unsigned 2 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::be_u16; +/// +/// let parser = |s| { +/// be_u16::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"abcd"[..], 0x0001))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +#[inline] +pub fn be_u16>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 2; + if input.input_len() < bound { + Err(Err::Incomplete(Needed::new(bound - input.input_len()))) + } else { + let mut res = 0u16; + for byte in input.iter_elements().take(bound) { + res = (res << 8) + byte as u16; + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a big endian unsigned 3 byte integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::be_u24; +/// +/// let parser = |s| { +/// be_u24::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02abcd"[..]), Ok((&b"abcd"[..], 0x000102))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(2)))); +/// ``` +#[inline] +pub fn be_u24>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 3; + if input.input_len() < bound { + Err(Err::Incomplete(Needed::new(bound - input.input_len()))) + } else { + let mut res = 0u32; + for byte in input.iter_elements().take(bound) { + res = (res << 8) + byte as u32; + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a big endian unsigned 4 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::be_u32; +/// +/// let parser = |s| { +/// be_u32::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03abcd"[..]), Ok((&b"abcd"[..], 0x00010203))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(3)))); +/// ``` +#[inline] +pub fn be_u32>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 4; + if input.input_len() < bound { + Err(Err::Incomplete(Needed::new(bound - input.input_len()))) + } else { + let mut res = 0u32; + for byte in input.iter_elements().take(bound) { + res = (res << 8) + byte as u32; + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a big endian unsigned 8 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::be_u64; +/// +/// let parser = |s| { +/// be_u64::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"[..]), Ok((&b"abcd"[..], 0x0001020304050607))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); +/// ``` +#[inline] +pub fn be_u64>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 8; + if input.input_len() < bound { + Err(Err::Incomplete(Needed::new(bound - input.input_len()))) + } else { + let mut res = 0u64; + for byte in input.iter_elements().take(bound) { + res = (res << 8) + byte as u64; + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a big endian unsigned 16 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::be_u128; +/// +/// let parser = |s| { +/// be_u128::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"[..]), Ok((&b"abcd"[..], 0x00010203040506070809101112131415))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); +/// ``` +#[inline] +pub fn be_u128>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 16; + if input.input_len() < bound { + Err(Err::Incomplete(Needed::new(bound - input.input_len()))) + } else { + let mut res = 0u128; + for byte in input.iter_elements().take(bound) { + res = (res << 8) + byte as u128; + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a signed 1 byte integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::be_i8; +/// +/// let parser = be_i8::<_, (_, ErrorKind)>; +/// +/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"\x01abcd"[..], 0x00))); +/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +#[inline] +pub fn be_i8>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + be_u8.map(|x| x as i8).parse(input) +} + +/// Recognizes a big endian signed 2 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::be_i16; +/// +/// let parser = be_i16::<_, (_, ErrorKind)>; +/// +/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"abcd"[..], 0x0001))); +/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(2)))); +/// ``` +#[inline] +pub fn be_i16>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + be_u16.map(|x| x as i16).parse(input) +} + +/// Recognizes a big endian signed 3 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::be_i24; +/// +/// let parser = be_i24::<_, (_, ErrorKind)>; +/// +/// assert_eq!(parser(&b"\x00\x01\x02abcd"[..]), Ok((&b"abcd"[..], 0x000102))); +/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(3)))); +/// ``` +#[inline] +pub fn be_i24>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + // Same as the unsigned version but we need to sign-extend manually here + be_u24 + .map(|x| { + if x & 0x80_00_00 != 0 { + (x | 0xff_00_00_00) as i32 + } else { + x as i32 + } + }) + .parse(input) +} + +/// Recognizes a big endian signed 4 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::be_i32; +/// +/// let parser = be_i32::<_, (_, ErrorKind)>; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03abcd"[..]), Ok((&b"abcd"[..], 0x00010203))); +/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(4)))); +/// ``` +#[inline] +pub fn be_i32>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + be_u32.map(|x| x as i32).parse(input) +} + +/// Recognizes a big endian signed 8 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::be_i64; +/// +/// let parser = be_i64::<_, (_, ErrorKind)>; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"[..]), Ok((&b"abcd"[..], 0x0001020304050607))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); +/// ``` +#[inline] +pub fn be_i64>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + be_u64.map(|x| x as i64).parse(input) +} + +/// Recognizes a big endian signed 16 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::be_i128; +/// +/// let parser = be_i128::<_, (_, ErrorKind)>; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"[..]), Ok((&b"abcd"[..], 0x00010203040506070809101112131415))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); +/// ``` +#[inline] +pub fn be_i128>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + be_u128.map(|x| x as i128).parse(input) +} + +/// Recognizes an unsigned 1 byte integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::le_u8; +/// +/// let parser = le_u8::<_, (_, ErrorKind)>; +/// +/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"\x01abcd"[..], 0x00))); +/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +#[inline] +pub fn le_u8>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 1; + if input.input_len() < bound { + Err(Err::Incomplete(Needed::new(1))) + } else { + let res = input.iter_elements().next().unwrap(); + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a little endian unsigned 2 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::le_u16; +/// +/// let parser = |s| { +/// le_u16::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"abcd"[..], 0x0100))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +#[inline] +pub fn le_u16>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 2; + if input.input_len() < bound { + Err(Err::Incomplete(Needed::new(bound - input.input_len()))) + } else { + let mut res = 0u16; + for (index, byte) in input.iter_indices().take(bound) { + res += (byte as u16) << (8 * index); + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a little endian unsigned 3 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::le_u24; +/// +/// let parser = |s| { +/// le_u24::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02abcd"[..]), Ok((&b"abcd"[..], 0x020100))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(2)))); +/// ``` +#[inline] +pub fn le_u24>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 3; + if input.input_len() < bound { + Err(Err::Incomplete(Needed::new(bound - input.input_len()))) + } else { + let mut res = 0u32; + for (index, byte) in input.iter_indices().take(bound) { + res += (byte as u32) << (8 * index); + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a little endian unsigned 4 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::le_u32; +/// +/// let parser = |s| { +/// le_u32::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03abcd"[..]), Ok((&b"abcd"[..], 0x03020100))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(3)))); +/// ``` +#[inline] +pub fn le_u32>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 4; + if input.input_len() < bound { + Err(Err::Incomplete(Needed::new(bound - input.input_len()))) + } else { + let mut res = 0u32; + for (index, byte) in input.iter_indices().take(bound) { + res += (byte as u32) << (8 * index); + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a little endian unsigned 8 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::le_u64; +/// +/// let parser = |s| { +/// le_u64::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"[..]), Ok((&b"abcd"[..], 0x0706050403020100))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); +/// ``` +#[inline] +pub fn le_u64>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 8; + if input.input_len() < bound { + Err(Err::Incomplete(Needed::new(bound - input.input_len()))) + } else { + let mut res = 0u64; + for (index, byte) in input.iter_indices().take(bound) { + res += (byte as u64) << (8 * index); + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a little endian unsigned 16 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::le_u128; +/// +/// let parser = |s| { +/// le_u128::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"[..]), Ok((&b"abcd"[..], 0x15141312111009080706050403020100))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); +/// ``` +#[inline] +pub fn le_u128>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 16; + if input.input_len() < bound { + Err(Err::Incomplete(Needed::new(bound - input.input_len()))) + } else { + let mut res = 0u128; + for (index, byte) in input.iter_indices().take(bound) { + res += (byte as u128) << (8 * index); + } + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes a signed 1 byte integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::le_i8; +/// +/// let parser = le_i8::<_, (_, ErrorKind)>; +/// +/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"\x01abcd"[..], 0x00))); +/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +#[inline] +pub fn le_i8>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + le_u8.map(|x| x as i8).parse(input) +} + +/// Recognizes a little endian signed 2 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::le_i16; +/// +/// let parser = |s| { +/// le_i16::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01abcd"[..]), Ok((&b"abcd"[..], 0x0100))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +#[inline] +pub fn le_i16>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + le_u16.map(|x| x as i16).parse(input) +} + +/// Recognizes a little endian signed 3 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::le_i24; +/// +/// let parser = |s| { +/// le_i24::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02abcd"[..]), Ok((&b"abcd"[..], 0x020100))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(2)))); +/// ``` +#[inline] +pub fn le_i24>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + // Same as the unsigned version but we need to sign-extend manually here + le_u24 + .map(|x| { + if x & 0x80_00_00 != 0 { + (x | 0xff_00_00_00) as i32 + } else { + x as i32 + } + }) + .parse(input) +} + +/// Recognizes a little endian signed 4 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::le_i32; +/// +/// let parser = |s| { +/// le_i32::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03abcd"[..]), Ok((&b"abcd"[..], 0x03020100))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(3)))); +/// ``` +#[inline] +pub fn le_i32>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + le_u32.map(|x| x as i32).parse(input) +} + +/// Recognizes a little endian signed 8 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::le_i64; +/// +/// let parser = |s| { +/// le_i64::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"[..]), Ok((&b"abcd"[..], 0x0706050403020100))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); +/// ``` +#[inline] +pub fn le_i64>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + le_u64.map(|x| x as i64).parse(input) +} + +/// Recognizes a little endian signed 16 bytes integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::le_i128; +/// +/// let parser = |s| { +/// le_i128::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"[..]), Ok((&b"abcd"[..], 0x15141312111009080706050403020100))); +/// assert_eq!(parser(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); +/// ``` +#[inline] +pub fn le_i128>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + le_u128.map(|x| x as i128).parse(input) +} + +/// Recognizes an unsigned 1 byte integer +/// +/// Note that endianness does not apply to 1 byte numbers. +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::u8; +/// +/// let parser = |s| { +/// u8::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); +/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +#[inline] +pub fn u8>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + let bound: usize = 1; + if input.input_len() < bound { + Err(Err::Incomplete(Needed::new(1))) + } else { + let res = input.iter_elements().next().unwrap(); + + Ok((input.slice(bound..), res)) + } +} + +/// Recognizes an unsigned 2 bytes integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u16 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian u16 integer. +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::u16; +/// +/// let be_u16 = |s| { +/// u16::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_u16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0003))); +/// assert_eq!(be_u16(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(1)))); +/// +/// let le_u16 = |s| { +/// u16::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_u16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0300))); +/// assert_eq!(le_u16(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +#[inline] +pub fn u16>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_u16, + crate::number::Endianness::Little => le_u16, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_u16, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_u16, + } +} + +/// Recognizes an unsigned 3 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u24 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian u24 integer. +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::u24; +/// +/// let be_u24 = |s| { +/// u24::<_,(_, ErrorKind)>(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_u24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x000305))); +/// assert_eq!(be_u24(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(2)))); +/// +/// let le_u24 = |s| { +/// u24::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_u24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x050300))); +/// assert_eq!(le_u24(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(2)))); +/// ``` +#[inline] +pub fn u24>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_u24, + crate::number::Endianness::Little => le_u24, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_u24, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_u24, + } +} + +/// Recognizes an unsigned 4 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u32 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian u32 integer. +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::u32; +/// +/// let be_u32 = |s| { +/// u32::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_u32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00030507))); +/// assert_eq!(be_u32(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(3)))); +/// +/// let le_u32 = |s| { +/// u32::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_u32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07050300))); +/// assert_eq!(le_u32(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(3)))); +/// ``` +#[inline] +pub fn u32>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_u32, + crate::number::Endianness::Little => le_u32, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_u32, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_u32, + } +} + +/// Recognizes an unsigned 8 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u64 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian u64 integer. +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::u64; +/// +/// let be_u64 = |s| { +/// u64::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_u64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0001020304050607))); +/// assert_eq!(be_u64(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); +/// +/// let le_u64 = |s| { +/// u64::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_u64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0706050403020100))); +/// assert_eq!(le_u64(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); +/// ``` +#[inline] +pub fn u64>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_u64, + crate::number::Endianness::Little => le_u64, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_u64, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_u64, + } +} + +/// Recognizes an unsigned 16 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian u128 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian u128 integer. +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::u128; +/// +/// let be_u128 = |s| { +/// u128::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_u128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00010203040506070001020304050607))); +/// assert_eq!(be_u128(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); +/// +/// let le_u128 = |s| { +/// u128::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_u128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07060504030201000706050403020100))); +/// assert_eq!(le_u128(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); +/// ``` +#[inline] +pub fn u128>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_u128, + crate::number::Endianness::Little => le_u128, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_u128, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_u128, + } +} + +/// Recognizes a signed 1 byte integer +/// +/// Note that endianness does not apply to 1 byte numbers. +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::i8; +/// +/// let parser = |s| { +/// i8::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&b"\x00\x03abcefg"[..]), Ok((&b"\x03abcefg"[..], 0x00))); +/// assert_eq!(parser(&b""[..]), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +#[inline] +pub fn i8>(i: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + u8.map(|x| x as i8).parse(i) +} + +/// Recognizes a signed 2 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i16 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian i16 integer. +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::i16; +/// +/// let be_i16 = |s| { +/// i16::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_i16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0003))); +/// assert_eq!(be_i16(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(1)))); +/// +/// let le_i16 = |s| { +/// i16::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_i16(&b"\x00\x03abcefg"[..]), Ok((&b"abcefg"[..], 0x0300))); +/// assert_eq!(le_i16(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +#[inline] +pub fn i16>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_i16, + crate::number::Endianness::Little => le_i16, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_i16, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_i16, + } +} + +/// Recognizes a signed 3 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i24 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian i24 integer. +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::i24; +/// +/// let be_i24 = |s| { +/// i24::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_i24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x000305))); +/// assert_eq!(be_i24(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(2)))); +/// +/// let le_i24 = |s| { +/// i24::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_i24(&b"\x00\x03\x05abcefg"[..]), Ok((&b"abcefg"[..], 0x050300))); +/// assert_eq!(le_i24(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(2)))); +/// ``` +#[inline] +pub fn i24>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_i24, + crate::number::Endianness::Little => le_i24, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_i24, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_i24, + } +} + +/// Recognizes a signed 4 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i32 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian i32 integer. +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::i32; +/// +/// let be_i32 = |s| { +/// i32::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_i32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00030507))); +/// assert_eq!(be_i32(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(3)))); +/// +/// let le_i32 = |s| { +/// i32::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_i32(&b"\x00\x03\x05\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07050300))); +/// assert_eq!(le_i32(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(3)))); +/// ``` +#[inline] +pub fn i32>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_i32, + crate::number::Endianness::Little => le_i32, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_i32, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_i32, + } +} + +/// Recognizes a signed 8 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i64 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian i64 integer. +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::i64; +/// +/// let be_i64 = |s| { +/// i64::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_i64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0001020304050607))); +/// assert_eq!(be_i64(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); +/// +/// let le_i64 = |s| { +/// i64::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_i64(&b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x0706050403020100))); +/// assert_eq!(le_i64(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(7)))); +/// ``` +#[inline] +pub fn i64>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_i64, + crate::number::Endianness::Little => le_i64, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_i64, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_i64, + } +} + +/// Recognizes a signed 16 byte integer +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian i128 integer, +/// otherwise if `nom::number::Endianness::Little` parse a little endian i128 integer. +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::i128; +/// +/// let be_i128 = |s| { +/// i128::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_i128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x00010203040506070001020304050607))); +/// assert_eq!(be_i128(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); +/// +/// let le_i128 = |s| { +/// i128::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_i128(&b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"[..]), Ok((&b"abcefg"[..], 0x07060504030201000706050403020100))); +/// assert_eq!(le_i128(&b"\x01"[..]), Err(Err::Incomplete(Needed::new(15)))); +/// ``` +#[inline] +pub fn i128>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_i128, + crate::number::Endianness::Little => le_i128, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_i128, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_i128, + } +} + +/// Recognizes a big endian 4 bytes floating point number. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::be_f32; +/// +/// let parser = |s| { +/// be_f32::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&[0x40, 0x29, 0x00, 0x00][..]), Ok((&b""[..], 2.640625))); +/// assert_eq!(parser(&[0x01][..]), Err(Err::Incomplete(Needed::new(3)))); +/// ``` +#[inline] +pub fn be_f32>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match be_u32(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f32::from_bits(o))), + } +} + +/// Recognizes a big endian 8 bytes floating point number. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::be_f64; +/// +/// let parser = |s| { +/// be_f64::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&[0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(parser(&[0x01][..]), Err(Err::Incomplete(Needed::new(7)))); +/// ``` +#[inline] +pub fn be_f64>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match be_u64(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f64::from_bits(o))), + } +} + +/// Recognizes a little endian 4 bytes floating point number. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::le_f32; +/// +/// let parser = |s| { +/// le_f32::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&[0x00, 0x00, 0x48, 0x41][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(parser(&[0x01][..]), Err(Err::Incomplete(Needed::new(3)))); +/// ``` +#[inline] +pub fn le_f32>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match le_u32(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f32::from_bits(o))), + } +} + +/// Recognizes a little endian 8 bytes floating point number. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::le_f64; +/// +/// let parser = |s| { +/// le_f64::<_, (_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x41][..]), Ok((&b""[..], 3145728.0))); +/// assert_eq!(parser(&[0x01][..]), Err(Err::Incomplete(Needed::new(7)))); +/// ``` +#[inline] +pub fn le_f64>(input: I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match le_u64(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f64::from_bits(o))), + } +} + +/// Recognizes a 4 byte floating point number +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian f32 float, +/// otherwise if `nom::number::Endianness::Little` parse a little endian f32 float. +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::f32; +/// +/// let be_f32 = |s| { +/// f32::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_f32(&[0x41, 0x48, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(be_f32(&b"abc"[..]), Err(Err::Incomplete(Needed::new(1)))); +/// +/// let le_f32 = |s| { +/// f32::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_f32(&[0x00, 0x00, 0x48, 0x41][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(le_f32(&b"abc"[..]), Err(Err::Incomplete(Needed::new(1)))); +/// ``` +#[inline] +pub fn f32>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_f32, + crate::number::Endianness::Little => le_f32, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_f32, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_f32, + } +} + +/// Recognizes an 8 byte floating point number +/// +/// If the parameter is `nom::number::Endianness::Big`, parse a big endian f64 float, +/// otherwise if `nom::number::Endianness::Little` parse a little endian f64 float. +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::f64; +/// +/// let be_f64 = |s| { +/// f64::<_, (_, ErrorKind)>(nom::number::Endianness::Big)(s) +/// }; +/// +/// assert_eq!(be_f64(&[0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(be_f64(&b"abc"[..]), Err(Err::Incomplete(Needed::new(5)))); +/// +/// let le_f64 = |s| { +/// f64::<_, (_, ErrorKind)>(nom::number::Endianness::Little)(s) +/// }; +/// +/// assert_eq!(le_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(le_f64(&b"abc"[..]), Err(Err::Incomplete(Needed::new(5)))); +/// ``` +#[inline] +pub fn f64>(endian: crate::number::Endianness) -> fn(I) -> IResult +where + I: Slice> + InputIter + InputLength, +{ + match endian { + crate::number::Endianness::Big => be_f64, + crate::number::Endianness::Little => le_f64, + #[cfg(target_endian = "big")] + crate::number::Endianness::Native => be_f64, + #[cfg(target_endian = "little")] + crate::number::Endianness::Native => le_f64, + } +} + +/// Recognizes a hex-encoded integer. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::hex_u32; +/// +/// let parser = |s| { +/// hex_u32(s) +/// }; +/// +/// assert_eq!(parser(b"01AE;"), Ok((&b";"[..], 0x01AE))); +/// assert_eq!(parser(b"abc"), Err(Err::Incomplete(Needed::new(1)))); +/// assert_eq!(parser(b"ggg"), Err(Err::Error((&b"ggg"[..], ErrorKind::IsA)))); +/// ``` +#[inline] +pub fn hex_u32<'a, E: ParseError<&'a [u8]>>(input: &'a [u8]) -> IResult<&'a [u8], u32, E> { + let (i, o) = crate::bytes::streaming::is_a(&b"0123456789abcdefABCDEF"[..])(input)?; + + // Do not parse more than 8 characters for a u32 + let (parsed, remaining) = if o.len() <= 8 { + (o, i) + } else { + (&input[..8], &input[8..]) + }; + + let res = parsed + .iter() + .rev() + .enumerate() + .map(|(k, &v)| { + let digit = v as char; + digit.to_digit(16).unwrap_or(0) << (k * 4) + }) + .sum(); + + Ok((remaining, res)) +} + +/// Recognizes a floating point number in text format and returns the corresponding part of the input. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if it reaches the end of input. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// use nom::number::streaming::recognize_float; +/// +/// let parser = |s| { +/// recognize_float(s) +/// }; +/// +/// assert_eq!(parser("11e-1;"), Ok((";", "11e-1"))); +/// assert_eq!(parser("123E-02;"), Ok((";", "123E-02"))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", "123"))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Char)))); +/// ``` +#[rustfmt::skip] +pub fn recognize_float>(input: T) -> IResult +where + T: Slice> + Slice>, + T: Clone + Offset, + T: InputIter, + ::Item: AsChar, + T: InputTakeAtPosition + InputLength, + ::Item: AsChar +{ + recognize( + tuple(( + opt(alt((char('+'), char('-')))), + alt(( + map(tuple((digit1, opt(pair(char('.'), opt(digit1))))), |_| ()), + map(tuple((char('.'), digit1)), |_| ()) + )), + opt(tuple(( + alt((char('e'), char('E'))), + opt(alt((char('+'), char('-')))), + cut(digit1) + ))) + )) + )(input) +} + +// workaround until issues with minimal-lexical are fixed +#[doc(hidden)] +pub fn recognize_float_or_exceptions>(input: T) -> IResult +where + T: Slice> + Slice>, + T: Clone + Offset, + T: InputIter + InputTake + InputLength + Compare<&'static str>, + ::Item: AsChar, + T: InputTakeAtPosition, + ::Item: AsChar, +{ + alt(( + |i: T| { + recognize_float::<_, E>(i.clone()).map_err(|e| match e { + crate::Err::Error(_) => crate::Err::Error(E::from_error_kind(i, ErrorKind::Float)), + crate::Err::Failure(_) => crate::Err::Failure(E::from_error_kind(i, ErrorKind::Float)), + crate::Err::Incomplete(needed) => crate::Err::Incomplete(needed), + }) + }, + |i: T| { + crate::bytes::streaming::tag_no_case::<_, _, E>("nan")(i.clone()) + .map_err(|_| crate::Err::Error(E::from_error_kind(i, ErrorKind::Float))) + }, + |i: T| { + crate::bytes::streaming::tag_no_case::<_, _, E>("inf")(i.clone()) + .map_err(|_| crate::Err::Error(E::from_error_kind(i, ErrorKind::Float))) + }, + |i: T| { + crate::bytes::streaming::tag_no_case::<_, _, E>("infinity")(i.clone()) + .map_err(|_| crate::Err::Error(E::from_error_kind(i, ErrorKind::Float))) + }, + ))(input) +} + +/// Recognizes a floating point number in text format +/// +/// It returns a tuple of (`sign`, `integer part`, `fraction part` and `exponent`) of the input +/// data. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +pub fn recognize_float_parts>(input: T) -> IResult +where + T: Slice> + Slice>, + T: Clone + Offset, + T: InputIter + crate::traits::ParseTo, + ::Item: AsChar, + T: InputTakeAtPosition + InputTake + InputLength, + ::Item: AsChar, + T: for<'a> Compare<&'a [u8]>, + T: AsBytes, +{ + let (i, sign) = sign(input.clone())?; + + //let (i, zeroes) = take_while(|c: ::Item| c.as_char() == '0')(i)?; + let (i, zeroes) = match i.as_bytes().iter().position(|c| *c != b'0') { + Some(index) => i.take_split(index), + None => i.take_split(i.input_len()), + }; + + //let (i, mut integer) = digit0(i)?; + let (i, mut integer) = match i + .as_bytes() + .iter() + .position(|c| !(*c >= b'0' && *c <= b'9')) + { + Some(index) => i.take_split(index), + None => i.take_split(i.input_len()), + }; + + if integer.input_len() == 0 && zeroes.input_len() > 0 { + // keep the last zero if integer is empty + integer = zeroes.slice(zeroes.input_len() - 1..); + } + + let (i, opt_dot) = opt(tag(&b"."[..]))(i)?; + let (i, fraction) = if opt_dot.is_none() { + let i2 = i.clone(); + (i2, i.slice(..0)) + } else { + // match number, trim right zeroes + let mut zero_count = 0usize; + let mut position = None; + for (pos, c) in i.as_bytes().iter().enumerate() { + if *c >= b'0' && *c <= b'9' { + if *c == b'0' { + zero_count += 1; + } else { + zero_count = 0; + } + } else { + position = Some(pos); + break; + } + } + + let position = match position { + Some(p) => p, + None => return Err(Err::Incomplete(Needed::new(1))), + }; + + let index = if zero_count == 0 { + position + } else if zero_count == position { + position - zero_count + 1 + } else { + position - zero_count + }; + + (i.slice(position..), i.slice(..index)) + }; + + if integer.input_len() == 0 && fraction.input_len() == 0 { + return Err(Err::Error(E::from_error_kind(input, ErrorKind::Float))); + } + + let i2 = i.clone(); + let (i, e) = match i.as_bytes().iter().next() { + Some(b'e') => (i.slice(1..), true), + Some(b'E') => (i.slice(1..), true), + _ => (i, false), + }; + + let (i, exp) = if e { + cut(crate::character::streaming::i32)(i)? + } else { + (i2, 0) + }; + + Ok((i, (sign, integer, fraction, exp))) +} + +/// Recognizes floating point number in text format and returns a f32. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::float; +/// +/// let parser = |s| { +/// float(s) +/// }; +/// +/// assert_eq!(parser("11e-1"), Ok(("", 1.1))); +/// assert_eq!(parser("123E-02"), Ok(("", 1.23))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Float)))); +/// ``` +pub fn float>(input: T) -> IResult +where + T: Slice> + Slice>, + T: Clone + Offset, + T: InputIter + InputLength + InputTake + crate::traits::ParseTo + Compare<&'static str>, + ::Item: AsChar, + ::IterElem: Clone, + T: InputTakeAtPosition, + ::Item: AsChar, + T: AsBytes, + T: for<'a> Compare<&'a [u8]>, +{ + /* + let (i, (sign, integer, fraction, exponent)) = recognize_float_parts(input)?; + + let mut float: f32 = minimal_lexical::parse_float( + integer.as_bytes().iter(), + fraction.as_bytes().iter(), + exponent, + ); + if !sign { + float = -float; + } + + Ok((i, float)) + */ + let (i, s) = recognize_float_or_exceptions(input)?; + match s.parse_to() { + Some(f) => Ok((i, f)), + None => Err(crate::Err::Error(E::from_error_kind( + i, + crate::error::ErrorKind::Float, + ))), + } +} + +/// Recognizes floating point number in text format and returns a f64. +/// +/// *Streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there is not enough data. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::double; +/// +/// let parser = |s| { +/// double(s) +/// }; +/// +/// assert_eq!(parser("11e-1"), Ok(("", 1.1))); +/// assert_eq!(parser("123E-02"), Ok(("", 1.23))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Float)))); +/// ``` +pub fn double>(input: T) -> IResult +where + T: Slice> + Slice>, + T: Clone + Offset, + T: InputIter + InputLength + InputTake + crate::traits::ParseTo + Compare<&'static str>, + ::Item: AsChar, + ::IterElem: Clone, + T: InputTakeAtPosition, + ::Item: AsChar, + T: AsBytes, + T: for<'a> Compare<&'a [u8]>, +{ + /* + let (i, (sign, integer, fraction, exponent)) = recognize_float_parts(input)?; + + let mut float: f64 = minimal_lexical::parse_float( + integer.as_bytes().iter(), + fraction.as_bytes().iter(), + exponent, + ); + if !sign { + float = -float; + } + + Ok((i, float)) + */ + let (i, s) = recognize_float_or_exceptions(input)?; + match s.parse_to() { + Some(f) => Ok((i, f)), + None => Err(crate::Err::Error(E::from_error_kind( + i, + crate::error::ErrorKind::Float, + ))), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::error::ErrorKind; + use crate::internal::{Err, Needed}; + use proptest::prelude::*; + + macro_rules! assert_parse( + ($left: expr, $right: expr) => { + let res: $crate::IResult<_, _, (_, ErrorKind)> = $left; + assert_eq!(res, $right); + }; + ); + + #[test] + fn i8_tests() { + assert_parse!(be_i8(&[0x00][..]), Ok((&b""[..], 0))); + assert_parse!(be_i8(&[0x7f][..]), Ok((&b""[..], 127))); + assert_parse!(be_i8(&[0xff][..]), Ok((&b""[..], -1))); + assert_parse!(be_i8(&[0x80][..]), Ok((&b""[..], -128))); + assert_parse!(be_i8(&[][..]), Err(Err::Incomplete(Needed::new(1)))); + } + + #[test] + fn i16_tests() { + assert_parse!(be_i16(&[0x00, 0x00][..]), Ok((&b""[..], 0))); + assert_parse!(be_i16(&[0x7f, 0xff][..]), Ok((&b""[..], 32_767_i16))); + assert_parse!(be_i16(&[0xff, 0xff][..]), Ok((&b""[..], -1))); + assert_parse!(be_i16(&[0x80, 0x00][..]), Ok((&b""[..], -32_768_i16))); + assert_parse!(be_i16(&[][..]), Err(Err::Incomplete(Needed::new(2)))); + assert_parse!(be_i16(&[0x00][..]), Err(Err::Incomplete(Needed::new(1)))); + } + + #[test] + fn u24_tests() { + assert_parse!(be_u24(&[0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); + assert_parse!(be_u24(&[0x00, 0xFF, 0xFF][..]), Ok((&b""[..], 65_535_u32))); + assert_parse!( + be_u24(&[0x12, 0x34, 0x56][..]), + Ok((&b""[..], 1_193_046_u32)) + ); + assert_parse!(be_u24(&[][..]), Err(Err::Incomplete(Needed::new(3)))); + assert_parse!(be_u24(&[0x00][..]), Err(Err::Incomplete(Needed::new(2)))); + assert_parse!( + be_u24(&[0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(1))) + ); + } + + #[test] + fn i24_tests() { + assert_parse!(be_i24(&[0xFF, 0xFF, 0xFF][..]), Ok((&b""[..], -1_i32))); + assert_parse!(be_i24(&[0xFF, 0x00, 0x00][..]), Ok((&b""[..], -65_536_i32))); + assert_parse!( + be_i24(&[0xED, 0xCB, 0xAA][..]), + Ok((&b""[..], -1_193_046_i32)) + ); + assert_parse!(be_i24(&[][..]), Err(Err::Incomplete(Needed::new(3)))); + assert_parse!(be_i24(&[0x00][..]), Err(Err::Incomplete(Needed::new(2)))); + assert_parse!( + be_i24(&[0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(1))) + ); + } + + #[test] + fn i32_tests() { + assert_parse!(be_i32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); + assert_parse!( + be_i32(&[0x7f, 0xff, 0xff, 0xff][..]), + Ok((&b""[..], 2_147_483_647_i32)) + ); + assert_parse!(be_i32(&[0xff, 0xff, 0xff, 0xff][..]), Ok((&b""[..], -1))); + assert_parse!( + be_i32(&[0x80, 0x00, 0x00, 0x00][..]), + Ok((&b""[..], -2_147_483_648_i32)) + ); + assert_parse!(be_i32(&[][..]), Err(Err::Incomplete(Needed::new(4)))); + assert_parse!(be_i32(&[0x00][..]), Err(Err::Incomplete(Needed::new(3)))); + assert_parse!( + be_i32(&[0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(2))) + ); + assert_parse!( + be_i32(&[0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(1))) + ); + } + + #[test] + fn i64_tests() { + assert_parse!( + be_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Ok((&b""[..], 0)) + ); + assert_parse!( + be_i64(&[0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff][..]), + Ok((&b""[..], 9_223_372_036_854_775_807_i64)) + ); + assert_parse!( + be_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff][..]), + Ok((&b""[..], -1)) + ); + assert_parse!( + be_i64(&[0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Ok((&b""[..], -9_223_372_036_854_775_808_i64)) + ); + assert_parse!(be_i64(&[][..]), Err(Err::Incomplete(Needed::new(8)))); + assert_parse!(be_i64(&[0x00][..]), Err(Err::Incomplete(Needed::new(7)))); + assert_parse!( + be_i64(&[0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(6))) + ); + assert_parse!( + be_i64(&[0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(5))) + ); + assert_parse!( + be_i64(&[0x00, 0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(4))) + ); + assert_parse!( + be_i64(&[0x00, 0x00, 0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(3))) + ); + assert_parse!( + be_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(2))) + ); + assert_parse!( + be_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(1))) + ); + } + + #[test] + fn i128_tests() { + assert_parse!( + be_i128( + &[ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00 + ][..] + ), + Ok((&b""[..], 0)) + ); + assert_parse!( + be_i128( + &[ + 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff + ][..] + ), + Ok(( + &b""[..], + 170_141_183_460_469_231_731_687_303_715_884_105_727_i128 + )) + ); + assert_parse!( + be_i128( + &[ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff + ][..] + ), + Ok((&b""[..], -1)) + ); + assert_parse!( + be_i128( + &[ + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00 + ][..] + ), + Ok(( + &b""[..], + -170_141_183_460_469_231_731_687_303_715_884_105_728_i128 + )) + ); + assert_parse!(be_i128(&[][..]), Err(Err::Incomplete(Needed::new(16)))); + assert_parse!(be_i128(&[0x00][..]), Err(Err::Incomplete(Needed::new(15)))); + assert_parse!( + be_i128(&[0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(14))) + ); + assert_parse!( + be_i128(&[0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(13))) + ); + assert_parse!( + be_i128(&[0x00, 0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(12))) + ); + assert_parse!( + be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(11))) + ); + assert_parse!( + be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(10))) + ); + assert_parse!( + be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(9))) + ); + assert_parse!( + be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(8))) + ); + assert_parse!( + be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(7))) + ); + assert_parse!( + be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(6))) + ); + assert_parse!( + be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(5))) + ); + assert_parse!( + be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(4))) + ); + assert_parse!( + be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Err(Err::Incomplete(Needed::new(3))) + ); + assert_parse!( + be_i128( + &[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..] + ), + Err(Err::Incomplete(Needed::new(2))) + ); + assert_parse!( + be_i128( + &[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + [..] + ), + Err(Err::Incomplete(Needed::new(1))) + ); + } + + #[test] + fn le_i8_tests() { + assert_parse!(le_i8(&[0x00][..]), Ok((&b""[..], 0))); + assert_parse!(le_i8(&[0x7f][..]), Ok((&b""[..], 127))); + assert_parse!(le_i8(&[0xff][..]), Ok((&b""[..], -1))); + assert_parse!(le_i8(&[0x80][..]), Ok((&b""[..], -128))); + } + + #[test] + fn le_i16_tests() { + assert_parse!(le_i16(&[0x00, 0x00][..]), Ok((&b""[..], 0))); + assert_parse!(le_i16(&[0xff, 0x7f][..]), Ok((&b""[..], 32_767_i16))); + assert_parse!(le_i16(&[0xff, 0xff][..]), Ok((&b""[..], -1))); + assert_parse!(le_i16(&[0x00, 0x80][..]), Ok((&b""[..], -32_768_i16))); + } + + #[test] + fn le_u24_tests() { + assert_parse!(le_u24(&[0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); + assert_parse!(le_u24(&[0xFF, 0xFF, 0x00][..]), Ok((&b""[..], 65_535_u32))); + assert_parse!( + le_u24(&[0x56, 0x34, 0x12][..]), + Ok((&b""[..], 1_193_046_u32)) + ); + } + + #[test] + fn le_i24_tests() { + assert_parse!(le_i24(&[0xFF, 0xFF, 0xFF][..]), Ok((&b""[..], -1_i32))); + assert_parse!(le_i24(&[0x00, 0x00, 0xFF][..]), Ok((&b""[..], -65_536_i32))); + assert_parse!( + le_i24(&[0xAA, 0xCB, 0xED][..]), + Ok((&b""[..], -1_193_046_i32)) + ); + } + + #[test] + fn le_i32_tests() { + assert_parse!(le_i32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0))); + assert_parse!( + le_i32(&[0xff, 0xff, 0xff, 0x7f][..]), + Ok((&b""[..], 2_147_483_647_i32)) + ); + assert_parse!(le_i32(&[0xff, 0xff, 0xff, 0xff][..]), Ok((&b""[..], -1))); + assert_parse!( + le_i32(&[0x00, 0x00, 0x00, 0x80][..]), + Ok((&b""[..], -2_147_483_648_i32)) + ); + } + + #[test] + fn le_i64_tests() { + assert_parse!( + le_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Ok((&b""[..], 0)) + ); + assert_parse!( + le_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f][..]), + Ok((&b""[..], 9_223_372_036_854_775_807_i64)) + ); + assert_parse!( + le_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff][..]), + Ok((&b""[..], -1)) + ); + assert_parse!( + le_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80][..]), + Ok((&b""[..], -9_223_372_036_854_775_808_i64)) + ); + } + + #[test] + fn le_i128_tests() { + assert_parse!( + le_i128( + &[ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00 + ][..] + ), + Ok((&b""[..], 0)) + ); + assert_parse!( + le_i128( + &[ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x7f + ][..] + ), + Ok(( + &b""[..], + 170_141_183_460_469_231_731_687_303_715_884_105_727_i128 + )) + ); + assert_parse!( + le_i128( + &[ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff + ][..] + ), + Ok((&b""[..], -1)) + ); + assert_parse!( + le_i128( + &[ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80 + ][..] + ), + Ok(( + &b""[..], + -170_141_183_460_469_231_731_687_303_715_884_105_728_i128 + )) + ); + } + + #[test] + fn be_f32_tests() { + assert_parse!(be_f32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0_f32))); + assert_parse!( + be_f32(&[0x4d, 0x31, 0x1f, 0xd8][..]), + Ok((&b""[..], 185_728_392_f32)) + ); + } + + #[test] + fn be_f64_tests() { + assert_parse!( + be_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Ok((&b""[..], 0_f64)) + ); + assert_parse!( + be_f64(&[0x41, 0xa6, 0x23, 0xfb, 0x10, 0x00, 0x00, 0x00][..]), + Ok((&b""[..], 185_728_392_f64)) + ); + } + + #[test] + fn le_f32_tests() { + assert_parse!(le_f32(&[0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 0_f32))); + assert_parse!( + le_f32(&[0xd8, 0x1f, 0x31, 0x4d][..]), + Ok((&b""[..], 185_728_392_f32)) + ); + } + + #[test] + fn le_f64_tests() { + assert_parse!( + le_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), + Ok((&b""[..], 0_f64)) + ); + assert_parse!( + le_f64(&[0x00, 0x00, 0x00, 0x10, 0xfb, 0x23, 0xa6, 0x41][..]), + Ok((&b""[..], 185_728_392_f64)) + ); + } + + #[test] + fn hex_u32_tests() { + assert_parse!( + hex_u32(&b";"[..]), + Err(Err::Error(error_position!(&b";"[..], ErrorKind::IsA))) + ); + assert_parse!(hex_u32(&b"ff;"[..]), Ok((&b";"[..], 255))); + assert_parse!(hex_u32(&b"1be2;"[..]), Ok((&b";"[..], 7138))); + assert_parse!(hex_u32(&b"c5a31be2;"[..]), Ok((&b";"[..], 3_315_801_058))); + assert_parse!(hex_u32(&b"C5A31be2;"[..]), Ok((&b";"[..], 3_315_801_058))); + assert_parse!(hex_u32(&b"00c5a31be2;"[..]), Ok((&b"e2;"[..], 12_952_347))); + assert_parse!( + hex_u32(&b"c5a31be201;"[..]), + Ok((&b"01;"[..], 3_315_801_058)) + ); + assert_parse!(hex_u32(&b"ffffffff;"[..]), Ok((&b";"[..], 4_294_967_295))); + assert_parse!(hex_u32(&b"0x1be2;"[..]), Ok((&b"x1be2;"[..], 0))); + assert_parse!(hex_u32(&b"12af"[..]), Err(Err::Incomplete(Needed::new(1)))); + } + + #[test] + #[cfg(feature = "std")] + fn float_test() { + let mut test_cases = vec![ + "+3.14", + "3.14", + "-3.14", + "0", + "0.0", + "1.", + ".789", + "-.5", + "1e7", + "-1E-7", + ".3e-2", + "1.e4", + "1.2e4", + "12.34", + "-1.234E-12", + "-1.234e-12", + "0.00000000000000000087", + ]; + + for test in test_cases.drain(..) { + let expected32 = str::parse::(test).unwrap(); + let expected64 = str::parse::(test).unwrap(); + + println!("now parsing: {} -> {}", test, expected32); + + let larger = format!("{};", test); + assert_parse!(recognize_float(&larger[..]), Ok((";", test))); + + assert_parse!(float(larger.as_bytes()), Ok((&b";"[..], expected32))); + assert_parse!(float(&larger[..]), Ok((";", expected32))); + + assert_parse!(double(larger.as_bytes()), Ok((&b";"[..], expected64))); + assert_parse!(double(&larger[..]), Ok((";", expected64))); + } + + let remaining_exponent = "-1.234E-"; + assert_parse!( + recognize_float(remaining_exponent), + Err(Err::Incomplete(Needed::new(1))) + ); + + let (_i, nan) = float::<_, ()>("NaN").unwrap(); + assert!(nan.is_nan()); + + let (_i, inf) = float::<_, ()>("inf").unwrap(); + assert!(inf.is_infinite()); + let (_i, inf) = float::<_, ()>("infinite").unwrap(); + assert!(inf.is_infinite()); + } + + #[test] + fn configurable_endianness() { + use crate::number::Endianness; + + fn be_tst16(i: &[u8]) -> IResult<&[u8], u16> { + u16(Endianness::Big)(i) + } + fn le_tst16(i: &[u8]) -> IResult<&[u8], u16> { + u16(Endianness::Little)(i) + } + assert_eq!(be_tst16(&[0x80, 0x00]), Ok((&b""[..], 32_768_u16))); + assert_eq!(le_tst16(&[0x80, 0x00]), Ok((&b""[..], 128_u16))); + + fn be_tst32(i: &[u8]) -> IResult<&[u8], u32> { + u32(Endianness::Big)(i) + } + fn le_tst32(i: &[u8]) -> IResult<&[u8], u32> { + u32(Endianness::Little)(i) + } + assert_eq!( + be_tst32(&[0x12, 0x00, 0x60, 0x00]), + Ok((&b""[..], 302_014_464_u32)) + ); + assert_eq!( + le_tst32(&[0x12, 0x00, 0x60, 0x00]), + Ok((&b""[..], 6_291_474_u32)) + ); + + fn be_tst64(i: &[u8]) -> IResult<&[u8], u64> { + u64(Endianness::Big)(i) + } + fn le_tst64(i: &[u8]) -> IResult<&[u8], u64> { + u64(Endianness::Little)(i) + } + assert_eq!( + be_tst64(&[0x12, 0x00, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), + Ok((&b""[..], 1_297_142_246_100_992_000_u64)) + ); + assert_eq!( + le_tst64(&[0x12, 0x00, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), + Ok((&b""[..], 36_028_874_334_666_770_u64)) + ); + + fn be_tsti16(i: &[u8]) -> IResult<&[u8], i16> { + i16(Endianness::Big)(i) + } + fn le_tsti16(i: &[u8]) -> IResult<&[u8], i16> { + i16(Endianness::Little)(i) + } + assert_eq!(be_tsti16(&[0x00, 0x80]), Ok((&b""[..], 128_i16))); + assert_eq!(le_tsti16(&[0x00, 0x80]), Ok((&b""[..], -32_768_i16))); + + fn be_tsti32(i: &[u8]) -> IResult<&[u8], i32> { + i32(Endianness::Big)(i) + } + fn le_tsti32(i: &[u8]) -> IResult<&[u8], i32> { + i32(Endianness::Little)(i) + } + assert_eq!( + be_tsti32(&[0x00, 0x12, 0x60, 0x00]), + Ok((&b""[..], 1_204_224_i32)) + ); + assert_eq!( + le_tsti32(&[0x00, 0x12, 0x60, 0x00]), + Ok((&b""[..], 6_296_064_i32)) + ); + + fn be_tsti64(i: &[u8]) -> IResult<&[u8], i64> { + i64(Endianness::Big)(i) + } + fn le_tsti64(i: &[u8]) -> IResult<&[u8], i64> { + i64(Endianness::Little)(i) + } + assert_eq!( + be_tsti64(&[0x00, 0xFF, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), + Ok((&b""[..], 71_881_672_479_506_432_i64)) + ); + assert_eq!( + le_tsti64(&[0x00, 0xFF, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), + Ok((&b""[..], 36_028_874_334_732_032_i64)) + ); + } + + #[cfg(feature = "std")] + fn parse_f64(i: &str) -> IResult<&str, f64, ()> { + use crate::traits::ParseTo; + match recognize_float_or_exceptions(i) { + Err(e) => Err(e), + Ok((i, s)) => { + if s.is_empty() { + return Err(Err::Error(())); + } + match s.parse_to() { + Some(n) => Ok((i, n)), + None => Err(Err::Error(())), + } + } + } + } + + proptest! { + #[test] + #[cfg(feature = "std")] + fn floats(s in "\\PC*") { + println!("testing {}", s); + let res1 = parse_f64(&s); + let res2 = double::<_, ()>(s.as_str()); + assert_eq!(res1, res2); + } + } +} diff --git a/vendor/nom/src/sequence/mod.rs b/vendor/nom/src/sequence/mod.rs new file mode 100644 index 00000000000000..735ab45cc73485 --- /dev/null +++ b/vendor/nom/src/sequence/mod.rs @@ -0,0 +1,279 @@ +//! Combinators applying parsers in sequence + +#[cfg(test)] +mod tests; + +use crate::error::ParseError; +use crate::internal::{IResult, Parser}; + +/// Gets an object from the first parser, +/// then gets another object from the second parser. +/// +/// # Arguments +/// * `first` The first parser to apply. +/// * `second` The second parser to apply. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::sequence::pair; +/// use nom::bytes::complete::tag; +/// +/// let mut parser = pair(tag("abc"), tag("efg")); +/// +/// assert_eq!(parser("abcefg"), Ok(("", ("abc", "efg")))); +/// assert_eq!(parser("abcefghij"), Ok(("hij", ("abc", "efg")))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// assert_eq!(parser("123"), Err(Err::Error(("123", ErrorKind::Tag)))); +/// ``` +pub fn pair, F, G>( + mut first: F, + mut second: G, +) -> impl FnMut(I) -> IResult +where + F: Parser, + G: Parser, +{ + move |input: I| { + let (input, o1) = first.parse(input)?; + second.parse(input).map(|(i, o2)| (i, (o1, o2))) + } +} + +/// Matches an object from the first parser and discards it, +/// then gets an object from the second parser. +/// +/// # Arguments +/// * `first` The opening parser. +/// * `second` The second parser to get object. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::sequence::preceded; +/// use nom::bytes::complete::tag; +/// +/// let mut parser = preceded(tag("abc"), tag("efg")); +/// +/// assert_eq!(parser("abcefg"), Ok(("", "efg"))); +/// assert_eq!(parser("abcefghij"), Ok(("hij", "efg"))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// assert_eq!(parser("123"), Err(Err::Error(("123", ErrorKind::Tag)))); +/// ``` +pub fn preceded, F, G>( + mut first: F, + mut second: G, +) -> impl FnMut(I) -> IResult +where + F: Parser, + G: Parser, +{ + move |input: I| { + let (input, _) = first.parse(input)?; + second.parse(input) + } +} + +/// Gets an object from the first parser, +/// then matches an object from the second parser and discards it. +/// +/// # Arguments +/// * `first` The first parser to apply. +/// * `second` The second parser to match an object. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::sequence::terminated; +/// use nom::bytes::complete::tag; +/// +/// let mut parser = terminated(tag("abc"), tag("efg")); +/// +/// assert_eq!(parser("abcefg"), Ok(("", "abc"))); +/// assert_eq!(parser("abcefghij"), Ok(("hij", "abc"))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// assert_eq!(parser("123"), Err(Err::Error(("123", ErrorKind::Tag)))); +/// ``` +pub fn terminated, F, G>( + mut first: F, + mut second: G, +) -> impl FnMut(I) -> IResult +where + F: Parser, + G: Parser, +{ + move |input: I| { + let (input, o1) = first.parse(input)?; + second.parse(input).map(|(i, _)| (i, o1)) + } +} + +/// Gets an object from the first parser, +/// then matches an object from the sep_parser and discards it, +/// then gets another object from the second parser. +/// +/// # Arguments +/// * `first` The first parser to apply. +/// * `sep` The separator parser to apply. +/// * `second` The second parser to apply. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::sequence::separated_pair; +/// use nom::bytes::complete::tag; +/// +/// let mut parser = separated_pair(tag("abc"), tag("|"), tag("efg")); +/// +/// assert_eq!(parser("abc|efg"), Ok(("", ("abc", "efg")))); +/// assert_eq!(parser("abc|efghij"), Ok(("hij", ("abc", "efg")))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// assert_eq!(parser("123"), Err(Err::Error(("123", ErrorKind::Tag)))); +/// ``` +pub fn separated_pair, F, G, H>( + mut first: F, + mut sep: G, + mut second: H, +) -> impl FnMut(I) -> IResult +where + F: Parser, + G: Parser, + H: Parser, +{ + move |input: I| { + let (input, o1) = first.parse(input)?; + let (input, _) = sep.parse(input)?; + second.parse(input).map(|(i, o2)| (i, (o1, o2))) + } +} + +/// Matches an object from the first parser and discards it, +/// then gets an object from the second parser, +/// and finally matches an object from the third parser and discards it. +/// +/// # Arguments +/// * `first` The first parser to apply and discard. +/// * `second` The second parser to apply. +/// * `third` The third parser to apply and discard. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::sequence::delimited; +/// use nom::bytes::complete::tag; +/// +/// let mut parser = delimited(tag("("), tag("abc"), tag(")")); +/// +/// assert_eq!(parser("(abc)"), Ok(("", "abc"))); +/// assert_eq!(parser("(abc)def"), Ok(("def", "abc"))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// assert_eq!(parser("123"), Err(Err::Error(("123", ErrorKind::Tag)))); +/// ``` +pub fn delimited, F, G, H>( + mut first: F, + mut second: G, + mut third: H, +) -> impl FnMut(I) -> IResult +where + F: Parser, + G: Parser, + H: Parser, +{ + move |input: I| { + let (input, _) = first.parse(input)?; + let (input, o2) = second.parse(input)?; + third.parse(input).map(|(i, _)| (i, o2)) + } +} + +/// Helper trait for the tuple combinator. +/// +/// This trait is implemented for tuples of parsers of up to 21 elements. +pub trait Tuple { + /// Parses the input and returns a tuple of results of each parser. + fn parse(&mut self, input: I) -> IResult; +} + +impl, F: Parser> + Tuple for (F,) +{ + fn parse(&mut self, input: Input) -> IResult { + self.0.parse(input).map(|(i, o)| (i, (o,))) + } +} + +macro_rules! tuple_trait( + ($name1:ident $ty1:ident, $name2: ident $ty2:ident, $($name:ident $ty:ident),*) => ( + tuple_trait!(__impl $name1 $ty1, $name2 $ty2; $($name $ty),*); + ); + (__impl $($name:ident $ty: ident),+; $name1:ident $ty1:ident, $($name2:ident $ty2:ident),*) => ( + tuple_trait_impl!($($name $ty),+); + tuple_trait!(__impl $($name $ty),+ , $name1 $ty1; $($name2 $ty2),*); + ); + (__impl $($name:ident $ty: ident),+; $name1:ident $ty1:ident) => ( + tuple_trait_impl!($($name $ty),+); + tuple_trait_impl!($($name $ty),+, $name1 $ty1); + ); +); + +macro_rules! tuple_trait_impl( + ($($name:ident $ty: ident),+) => ( + impl< + Input: Clone, $($ty),+ , Error: ParseError, + $($name: Parser),+ + > Tuple for ( $($name),+ ) { + + fn parse(&mut self, input: Input) -> IResult { + tuple_trait_inner!(0, self, input, (), $($name)+) + + } + } + ); +); + +macro_rules! tuple_trait_inner( + ($it:tt, $self:expr, $input:expr, (), $head:ident $($id:ident)+) => ({ + let (i, o) = $self.$it.parse($input.clone())?; + + succ!($it, tuple_trait_inner!($self, i, ( o ), $($id)+)) + }); + ($it:tt, $self:expr, $input:expr, ($($parsed:tt)*), $head:ident $($id:ident)+) => ({ + let (i, o) = $self.$it.parse($input.clone())?; + + succ!($it, tuple_trait_inner!($self, i, ($($parsed)* , o), $($id)+)) + }); + ($it:tt, $self:expr, $input:expr, ($($parsed:tt)*), $head:ident) => ({ + let (i, o) = $self.$it.parse($input.clone())?; + + Ok((i, ($($parsed)* , o))) + }); +); + +tuple_trait!(FnA A, FnB B, FnC C, FnD D, FnE E, FnF F, FnG G, FnH H, FnI I, FnJ J, FnK K, FnL L, + FnM M, FnN N, FnO O, FnP P, FnQ Q, FnR R, FnS S, FnT T, FnU U); + +// Special case: implement `Tuple` for `()`, the unit type. +// This can come up in macros which accept a variable number of arguments. +// Literally, `()` is an empty tuple, so it should simply parse nothing. +impl> Tuple for () { + fn parse(&mut self, input: I) -> IResult { + Ok((input, ())) + } +} + +///Applies a tuple of parsers one by one and returns their results as a tuple. +///There is a maximum of 21 parsers +/// ```rust +/// # use nom::{Err, error::ErrorKind}; +/// use nom::sequence::tuple; +/// use nom::character::complete::{alpha1, digit1}; +/// let mut parser = tuple((alpha1, digit1, alpha1)); +/// +/// assert_eq!(parser("abc123def"), Ok(("", ("abc", "123", "def")))); +/// assert_eq!(parser("123def"), Err(Err::Error(("123def", ErrorKind::Alpha)))); +/// ``` +pub fn tuple, List: Tuple>( + mut l: List, +) -> impl FnMut(I) -> IResult { + move |i: I| l.parse(i) +} diff --git a/vendor/nom/src/sequence/tests.rs b/vendor/nom/src/sequence/tests.rs new file mode 100644 index 00000000000000..30ad0d67833d81 --- /dev/null +++ b/vendor/nom/src/sequence/tests.rs @@ -0,0 +1,290 @@ +use super::*; +use crate::bytes::streaming::{tag, take}; +use crate::error::{Error, ErrorKind}; +use crate::internal::{Err, IResult, Needed}; +use crate::number::streaming::be_u16; + +#[test] +fn single_element_tuples() { + use crate::character::complete::alpha1; + use crate::{error::ErrorKind, Err}; + + let mut parser = tuple((alpha1,)); + assert_eq!(parser("abc123def"), Ok(("123def", ("abc",)))); + assert_eq!( + parser("123def"), + Err(Err::Error(("123def", ErrorKind::Alpha))) + ); +} + +#[derive(PartialEq, Eq, Debug)] +struct B { + a: u8, + b: u8, +} + +#[derive(PartialEq, Eq, Debug)] +struct C { + a: u8, + b: Option, +} + +/*FIXME: convert code examples to new error management +use util::{add_error_pattern, error_to_list, print_error}; + +#[cfg(feature = "std")] +#[rustfmt::skip] +fn error_to_string(e: &Context) -> &'static str { + let v: Vec<(P, ErrorKind)> = error_to_list(e); + // do it this way if you can use slice patterns + //match &v[..] { + // [ErrorKind::Custom(42), ErrorKind::Tag] => "missing `ijkl` tag", + // [ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag] => "missing `mnop` tag after `ijkl`", + // _ => "unrecognized error" + //} + + let collected: Vec> = v.iter().map(|&(_, ref e)| e.clone()).collect(); + if &collected[..] == [ErrorKind::Custom(42), ErrorKind::Tag] { + "missing `ijkl` tag" + } else if &collected[..] == [ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag] { + "missing `mnop` tag after `ijkl`" + } else { + "unrecognized error" + } +} + +// do it this way if you can use box patterns +//use $crate::lib::std::str; +//fn error_to_string(e:Err) -> String +// match e { +// NodePosition(ErrorKind::Custom(42), i1, box Position(ErrorKind::Tag, i2)) => { +// format!("missing `ijkl` tag, found '{}' instead", str::from_utf8(i2).unwrap()) +// }, +// NodePosition(ErrorKind::Custom(42), i1, box NodePosition(ErrorKind::Custom(128), i2, box Position(ErrorKind::Tag, i3))) => { +// format!("missing `mnop` tag after `ijkl`, found '{}' instead", str::from_utf8(i3).unwrap()) +// }, +// _ => "unrecognized error".to_string() +// } +//} +*/ + +#[test] +fn complete() { + use crate::bytes::complete::tag; + fn err_test(i: &[u8]) -> IResult<&[u8], &[u8]> { + let (i, _) = tag("ijkl")(i)?; + tag("mnop")(i) + } + let a = &b"ijklmn"[..]; + + let res_a = err_test(a); + assert_eq!( + res_a, + Err(Err::Error(error_position!(&b"mn"[..], ErrorKind::Tag))) + ); +} + +#[test] +fn pair_test() { + fn pair_abc_def(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> { + pair(tag("abc"), tag("def"))(i) + } + + assert_eq!( + pair_abc_def(&b"abcdefghijkl"[..]), + Ok((&b"ghijkl"[..], (&b"abc"[..], &b"def"[..]))) + ); + assert_eq!( + pair_abc_def(&b"ab"[..]), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!( + pair_abc_def(&b"abcd"[..]), + Err(Err::Incomplete(Needed::new(2))) + ); + assert_eq!( + pair_abc_def(&b"xxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) + ); + assert_eq!( + pair_abc_def(&b"xxxdef"[..]), + Err(Err::Error(error_position!(&b"xxxdef"[..], ErrorKind::Tag))) + ); + assert_eq!( + pair_abc_def(&b"abcxxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) + ); +} + +#[test] +fn separated_pair_test() { + fn sep_pair_abc_def(i: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> { + separated_pair(tag("abc"), tag(","), tag("def"))(i) + } + + assert_eq!( + sep_pair_abc_def(&b"abc,defghijkl"[..]), + Ok((&b"ghijkl"[..], (&b"abc"[..], &b"def"[..]))) + ); + assert_eq!( + sep_pair_abc_def(&b"ab"[..]), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!( + sep_pair_abc_def(&b"abc,d"[..]), + Err(Err::Incomplete(Needed::new(2))) + ); + assert_eq!( + sep_pair_abc_def(&b"xxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) + ); + assert_eq!( + sep_pair_abc_def(&b"xxx,def"[..]), + Err(Err::Error(error_position!(&b"xxx,def"[..], ErrorKind::Tag))) + ); + assert_eq!( + sep_pair_abc_def(&b"abc,xxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) + ); +} + +#[test] +fn preceded_test() { + fn preceded_abcd_efgh(i: &[u8]) -> IResult<&[u8], &[u8]> { + preceded(tag("abcd"), tag("efgh"))(i) + } + + assert_eq!( + preceded_abcd_efgh(&b"abcdefghijkl"[..]), + Ok((&b"ijkl"[..], &b"efgh"[..])) + ); + assert_eq!( + preceded_abcd_efgh(&b"ab"[..]), + Err(Err::Incomplete(Needed::new(2))) + ); + assert_eq!( + preceded_abcd_efgh(&b"abcde"[..]), + Err(Err::Incomplete(Needed::new(3))) + ); + assert_eq!( + preceded_abcd_efgh(&b"xxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) + ); + assert_eq!( + preceded_abcd_efgh(&b"xxxxdef"[..]), + Err(Err::Error(error_position!(&b"xxxxdef"[..], ErrorKind::Tag))) + ); + assert_eq!( + preceded_abcd_efgh(&b"abcdxxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) + ); +} + +#[test] +fn terminated_test() { + fn terminated_abcd_efgh(i: &[u8]) -> IResult<&[u8], &[u8]> { + terminated(tag("abcd"), tag("efgh"))(i) + } + + assert_eq!( + terminated_abcd_efgh(&b"abcdefghijkl"[..]), + Ok((&b"ijkl"[..], &b"abcd"[..])) + ); + assert_eq!( + terminated_abcd_efgh(&b"ab"[..]), + Err(Err::Incomplete(Needed::new(2))) + ); + assert_eq!( + terminated_abcd_efgh(&b"abcde"[..]), + Err(Err::Incomplete(Needed::new(3))) + ); + assert_eq!( + terminated_abcd_efgh(&b"xxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) + ); + assert_eq!( + terminated_abcd_efgh(&b"xxxxdef"[..]), + Err(Err::Error(error_position!(&b"xxxxdef"[..], ErrorKind::Tag))) + ); + assert_eq!( + terminated_abcd_efgh(&b"abcdxxxx"[..]), + Err(Err::Error(error_position!(&b"xxxx"[..], ErrorKind::Tag))) + ); +} + +#[test] +fn delimited_test() { + fn delimited_abc_def_ghi(i: &[u8]) -> IResult<&[u8], &[u8]> { + delimited(tag("abc"), tag("def"), tag("ghi"))(i) + } + + assert_eq!( + delimited_abc_def_ghi(&b"abcdefghijkl"[..]), + Ok((&b"jkl"[..], &b"def"[..])) + ); + assert_eq!( + delimited_abc_def_ghi(&b"ab"[..]), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!( + delimited_abc_def_ghi(&b"abcde"[..]), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!( + delimited_abc_def_ghi(&b"abcdefgh"[..]), + Err(Err::Incomplete(Needed::new(1))) + ); + assert_eq!( + delimited_abc_def_ghi(&b"xxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) + ); + assert_eq!( + delimited_abc_def_ghi(&b"xxxdefghi"[..]), + Err(Err::Error(error_position!( + &b"xxxdefghi"[..], + ErrorKind::Tag + ),)) + ); + assert_eq!( + delimited_abc_def_ghi(&b"abcxxxghi"[..]), + Err(Err::Error(error_position!(&b"xxxghi"[..], ErrorKind::Tag))) + ); + assert_eq!( + delimited_abc_def_ghi(&b"abcdefxxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) + ); +} + +#[test] +fn tuple_test() { + fn tuple_3(i: &[u8]) -> IResult<&[u8], (u16, &[u8], &[u8])> { + tuple((be_u16, take(3u8), tag("fg")))(i) + } + + assert_eq!( + tuple_3(&b"abcdefgh"[..]), + Ok((&b"h"[..], (0x6162u16, &b"cde"[..], &b"fg"[..]))) + ); + assert_eq!(tuple_3(&b"abcd"[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(tuple_3(&b"abcde"[..]), Err(Err::Incomplete(Needed::new(2)))); + assert_eq!( + tuple_3(&b"abcdejk"[..]), + Err(Err::Error(error_position!(&b"jk"[..], ErrorKind::Tag))) + ); +} + +#[test] +fn unit_type() { + assert_eq!( + tuple::<&'static str, (), Error<&'static str>, ()>(())("abxsbsh"), + Ok(("abxsbsh", ())) + ); + assert_eq!( + tuple::<&'static str, (), Error<&'static str>, ()>(())("sdfjakdsas"), + Ok(("sdfjakdsas", ())) + ); + assert_eq!( + tuple::<&'static str, (), Error<&'static str>, ()>(())(""), + Ok(("", ())) + ); +} diff --git a/vendor/nom/src/str.rs b/vendor/nom/src/str.rs new file mode 100644 index 00000000000000..1a8b8ba2d4952c --- /dev/null +++ b/vendor/nom/src/str.rs @@ -0,0 +1,536 @@ +#[cfg(test)] +mod test { + #[cfg(feature = "alloc")] + use crate::{branch::alt, bytes::complete::tag_no_case, combinator::recognize, multi::many1}; + use crate::{ + bytes::complete::{is_a, is_not, tag, take, take_till, take_until}, + error::{self, ErrorKind}, + Err, IResult, + }; + + #[test] + fn tagtr_succeed() { + const INPUT: &str = "Hello World!"; + const TAG: &str = "Hello"; + fn test(input: &str) -> IResult<&str, &str> { + tag(TAG)(input) + } + + match test(INPUT) { + Ok((extra, output)) => { + assert!(extra == " World!", "Parser `tag` consumed leftover input."); + assert!( + output == TAG, + "Parser `tag` doesn't return the tag it matched on success. \ + Expected `{}`, got `{}`.", + TAG, + output + ); + } + other => panic!( + "Parser `tag` didn't succeed when it should have. \ + Got `{:?}`.", + other + ), + }; + } + + #[test] + fn tagtr_incomplete() { + use crate::bytes::streaming::tag; + + const INPUT: &str = "Hello"; + const TAG: &str = "Hello World!"; + + let res: IResult<_, _, error::Error<_>> = tag(TAG)(INPUT); + match res { + Err(Err::Incomplete(_)) => (), + other => { + panic!( + "Parser `tag` didn't require more input when it should have. \ + Got `{:?}`.", + other + ); + } + }; + } + + #[test] + fn tagtr_error() { + const INPUT: &str = "Hello World!"; + const TAG: &str = "Random"; // TAG must be closer than INPUT. + + let res: IResult<_, _, error::Error<_>> = tag(TAG)(INPUT); + match res { + Err(Err::Error(_)) => (), + other => { + panic!( + "Parser `tag` didn't fail when it should have. Got `{:?}`.`", + other + ); + } + }; + } + + #[test] + fn take_s_succeed() { + const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; + const CONSUMED: &str = "βèƒôřèÂßÇ"; + const LEFTOVER: &str = "áƒƭèř"; + + let res: IResult<_, _, error::Error<_>> = take(9_usize)(INPUT); + match res { + Ok((extra, output)) => { + assert!( + extra == LEFTOVER, + "Parser `take_s` consumed leftover input. Leftover `{}`.", + extra + ); + assert!( + output == CONSUMED, + "Parser `take_s` doesn't return the string it consumed on success. Expected `{}`, got `{}`.", + CONSUMED, + output + ); + } + other => panic!( + "Parser `take_s` didn't succeed when it should have. \ + Got `{:?}`.", + other + ), + }; + } + + #[test] + fn take_until_succeed() { + const INPUT: &str = "βèƒôřèÂßÇ∂áƒƭèř"; + const FIND: &str = "ÂßÇ∂"; + const CONSUMED: &str = "βèƒôřè"; + const LEFTOVER: &str = "ÂßÇ∂áƒƭèř"; + + let res: IResult<_, _, (_, ErrorKind)> = take_until(FIND)(INPUT); + match res { + Ok((extra, output)) => { + assert!( + extra == LEFTOVER, + "Parser `take_until`\ + consumed leftover input. Leftover `{}`.", + extra + ); + assert!( + output == CONSUMED, + "Parser `take_until`\ + doesn't return the string it consumed on success. Expected `{}`, got `{}`.", + CONSUMED, + output + ); + } + other => panic!( + "Parser `take_until` didn't succeed when it should have. \ + Got `{:?}`.", + other + ), + }; + } + + #[test] + fn take_s_incomplete() { + use crate::bytes::streaming::take; + + const INPUT: &str = "βèƒôřèÂßÇá"; + + let res: IResult<_, _, (_, ErrorKind)> = take(13_usize)(INPUT); + match res { + Err(Err::Incomplete(_)) => (), + other => panic!( + "Parser `take` didn't require more input when it should have. \ + Got `{:?}`.", + other + ), + } + } + + use crate::internal::Needed; + + fn is_alphabetic(c: char) -> bool { + (c as u8 >= 0x41 && c as u8 <= 0x5A) || (c as u8 >= 0x61 && c as u8 <= 0x7A) + } + + #[test] + fn take_while() { + use crate::bytes::streaming::take_while; + + fn f(i: &str) -> IResult<&str, &str> { + take_while(is_alphabetic)(i) + } + let a = ""; + let b = "abcd"; + let c = "abcd123"; + let d = "123"; + + assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(f(&b[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(f(&c[..]), Ok((&d[..], &b[..]))); + assert_eq!(f(&d[..]), Ok((&d[..], &a[..]))); + } + + #[test] + fn take_while1() { + use crate::bytes::streaming::take_while1; + + fn f(i: &str) -> IResult<&str, &str> { + take_while1(is_alphabetic)(i) + } + let a = ""; + let b = "abcd"; + let c = "abcd123"; + let d = "123"; + + assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(f(&b[..]), Err(Err::Incomplete(Needed::new(1)))); + assert_eq!(f(&c[..]), Ok((&"123"[..], &b[..]))); + assert_eq!( + f(&d[..]), + Err(Err::Error(error_position!(&d[..], ErrorKind::TakeWhile1))) + ); + } + + #[test] + fn take_till_s_succeed() { + const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; + const CONSUMED: &str = "βèƒôřèÂßÇ"; + const LEFTOVER: &str = "áƒƭèř"; + fn till_s(c: char) -> bool { + c == 'á' + } + fn test(input: &str) -> IResult<&str, &str> { + take_till(till_s)(input) + } + match test(INPUT) { + Ok((extra, output)) => { + assert!( + extra == LEFTOVER, + "Parser `take_till` consumed leftover input." + ); + assert!( + output == CONSUMED, + "Parser `take_till` doesn't return the string it consumed on success. \ + Expected `{}`, got `{}`.", + CONSUMED, + output + ); + } + other => panic!( + "Parser `take_till` didn't succeed when it should have. \ + Got `{:?}`.", + other + ), + }; + } + + #[test] + fn take_while_succeed_none() { + use crate::bytes::complete::take_while; + + const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; + const CONSUMED: &str = ""; + const LEFTOVER: &str = "βèƒôřèÂßÇáƒƭèř"; + fn while_s(c: char) -> bool { + c == '9' + } + fn test(input: &str) -> IResult<&str, &str> { + take_while(while_s)(input) + } + match test(INPUT) { + Ok((extra, output)) => { + assert!( + extra == LEFTOVER, + "Parser `take_while` consumed leftover input." + ); + assert!( + output == CONSUMED, + "Parser `take_while` doesn't return the string it consumed on success. \ + Expected `{}`, got `{}`.", + CONSUMED, + output + ); + } + other => panic!( + "Parser `take_while` didn't succeed when it should have. \ + Got `{:?}`.", + other + ), + }; + } + + #[test] + fn is_not_succeed() { + const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; + const AVOID: &str = "£úçƙ¥á"; + const CONSUMED: &str = "βèƒôřèÂßÇ"; + const LEFTOVER: &str = "áƒƭèř"; + fn test(input: &str) -> IResult<&str, &str> { + is_not(AVOID)(input) + } + match test(INPUT) { + Ok((extra, output)) => { + assert!( + extra == LEFTOVER, + "Parser `is_not` consumed leftover input. Leftover `{}`.", + extra + ); + assert!( + output == CONSUMED, + "Parser `is_not` doesn't return the string it consumed on success. Expected `{}`, got `{}`.", + CONSUMED, + output + ); + } + other => panic!( + "Parser `is_not` didn't succeed when it should have. \ + Got `{:?}`.", + other + ), + }; + } + + #[test] + fn take_while_succeed_some() { + use crate::bytes::complete::take_while; + + const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; + const CONSUMED: &str = "βèƒôřèÂßÇ"; + const LEFTOVER: &str = "áƒƭèř"; + fn while_s(c: char) -> bool { + c == 'β' + || c == 'è' + || c == 'ƒ' + || c == 'ô' + || c == 'ř' + || c == 'è' + || c == 'Â' + || c == 'ß' + || c == 'Ç' + } + fn test(input: &str) -> IResult<&str, &str> { + take_while(while_s)(input) + } + match test(INPUT) { + Ok((extra, output)) => { + assert!( + extra == LEFTOVER, + "Parser `take_while` consumed leftover input." + ); + assert!( + output == CONSUMED, + "Parser `take_while` doesn't return the string it consumed on success. \ + Expected `{}`, got `{}`.", + CONSUMED, + output + ); + } + other => panic!( + "Parser `take_while` didn't succeed when it should have. \ + Got `{:?}`.", + other + ), + }; + } + + #[test] + fn is_not_fail() { + const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; + const AVOID: &str = "βúçƙ¥"; + fn test(input: &str) -> IResult<&str, &str> { + is_not(AVOID)(input) + } + match test(INPUT) { + Err(Err::Error(_)) => (), + other => panic!( + "Parser `is_not` didn't fail when it should have. Got `{:?}`.", + other + ), + }; + } + + #[test] + fn take_while1_succeed() { + use crate::bytes::complete::take_while1; + + const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; + const CONSUMED: &str = "βèƒôřèÂßÇ"; + const LEFTOVER: &str = "áƒƭèř"; + fn while1_s(c: char) -> bool { + c == 'β' + || c == 'è' + || c == 'ƒ' + || c == 'ô' + || c == 'ř' + || c == 'è' + || c == 'Â' + || c == 'ß' + || c == 'Ç' + } + fn test(input: &str) -> IResult<&str, &str> { + take_while1(while1_s)(input) + } + match test(INPUT) { + Ok((extra, output)) => { + assert!( + extra == LEFTOVER, + "Parser `take_while1` consumed leftover input." + ); + assert!( + output == CONSUMED, + "Parser `take_while1` doesn't return the string it consumed on success. \ + Expected `{}`, got `{}`.", + CONSUMED, + output + ); + } + other => panic!( + "Parser `take_while1` didn't succeed when it should have. \ + Got `{:?}`.", + other + ), + }; + } + + #[test] + fn take_until_incomplete() { + use crate::bytes::streaming::take_until; + + const INPUT: &str = "βèƒôřè"; + const FIND: &str = "βèƒôřèÂßÇ"; + + let res: IResult<_, _, (_, ErrorKind)> = take_until(FIND)(INPUT); + match res { + Err(Err::Incomplete(_)) => (), + other => panic!( + "Parser `take_until` didn't require more input when it should have. \ + Got `{:?}`.", + other + ), + }; + } + + #[test] + fn is_a_succeed() { + const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; + const MATCH: &str = "βèƒôřèÂßÇ"; + const CONSUMED: &str = "βèƒôřèÂßÇ"; + const LEFTOVER: &str = "áƒƭèř"; + fn test(input: &str) -> IResult<&str, &str> { + is_a(MATCH)(input) + } + match test(INPUT) { + Ok((extra, output)) => { + assert!( + extra == LEFTOVER, + "Parser `is_a` consumed leftover input. Leftover `{}`.", + extra + ); + assert!( + output == CONSUMED, + "Parser `is_a` doesn't return the string it consumed on success. Expected `{}`, got `{}`.", + CONSUMED, + output + ); + } + other => panic!( + "Parser `is_a` didn't succeed when it should have. \ + Got `{:?}`.", + other + ), + }; + } + + #[test] + fn take_while1_fail() { + use crate::bytes::complete::take_while1; + + const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; + fn while1_s(c: char) -> bool { + c == '9' + } + fn test(input: &str) -> IResult<&str, &str> { + take_while1(while1_s)(input) + } + match test(INPUT) { + Err(Err::Error(_)) => (), + other => panic!( + "Parser `take_while1` didn't fail when it should have. \ + Got `{:?}`.", + other + ), + }; + } + + #[test] + fn is_a_fail() { + const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; + const MATCH: &str = "Ûñℓúçƙ¥"; + fn test(input: &str) -> IResult<&str, &str> { + is_a(MATCH)(input) + } + match test(INPUT) { + Err(Err::Error(_)) => (), + other => panic!( + "Parser `is_a` didn't fail when it should have. Got `{:?}`.", + other + ), + }; + } + + #[test] + fn take_until_error() { + use crate::bytes::streaming::take_until; + + const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; + const FIND: &str = "Ráñδô₥"; + + let res: IResult<_, _, (_, ErrorKind)> = take_until(FIND)(INPUT); + match res { + Err(Err::Incomplete(_)) => (), + other => panic!( + "Parser `take_until` didn't fail when it should have. \ + Got `{:?}`.", + other + ), + }; + } + + #[test] + #[cfg(feature = "alloc")] + fn recognize_is_a() { + let a = "aabbab"; + let b = "ababcd"; + + fn f(i: &str) -> IResult<&str, &str> { + recognize(many1(alt((tag("a"), tag("b")))))(i) + } + + assert_eq!(f(&a[..]), Ok((&a[6..], &a[..]))); + assert_eq!(f(&b[..]), Ok((&b[4..], &b[..4]))); + } + + #[test] + fn utf8_indexing() { + fn dot(i: &str) -> IResult<&str, &str> { + tag(".")(i) + } + + let _ = dot("點"); + } + + #[cfg(feature = "alloc")] + #[test] + fn case_insensitive() { + fn test(i: &str) -> IResult<&str, &str> { + tag_no_case("ABcd")(i) + } + assert_eq!(test("aBCdefgh"), Ok(("efgh", "aBCd"))); + assert_eq!(test("abcdefgh"), Ok(("efgh", "abcd"))); + assert_eq!(test("ABCDefgh"), Ok(("efgh", "ABCD"))); + } +} diff --git a/vendor/nom/src/traits.rs b/vendor/nom/src/traits.rs new file mode 100644 index 00000000000000..394e5bc3a59e04 --- /dev/null +++ b/vendor/nom/src/traits.rs @@ -0,0 +1,1441 @@ +//! Traits input types have to implement to work with nom combinators +use crate::error::{ErrorKind, ParseError}; +use crate::internal::{Err, IResult, Needed}; +use crate::lib::std::iter::{Copied, Enumerate}; +use crate::lib::std::ops::{Range, RangeFrom, RangeFull, RangeTo}; +use crate::lib::std::slice::Iter; +use crate::lib::std::str::from_utf8; +use crate::lib::std::str::CharIndices; +use crate::lib::std::str::Chars; +use crate::lib::std::str::FromStr; + +#[cfg(feature = "alloc")] +use crate::lib::std::string::String; +#[cfg(feature = "alloc")] +use crate::lib::std::vec::Vec; + +/// Abstract method to calculate the input length +pub trait InputLength { + /// Calculates the input length, as indicated by its name, + /// and the name of the trait itself + fn input_len(&self) -> usize; +} + +impl<'a, T> InputLength for &'a [T] { + #[inline] + fn input_len(&self) -> usize { + self.len() + } +} + +impl<'a> InputLength for &'a str { + #[inline] + fn input_len(&self) -> usize { + self.len() + } +} + +impl<'a> InputLength for (&'a [u8], usize) { + #[inline] + fn input_len(&self) -> usize { + //println!("bit input length for ({:?}, {}):", self.0, self.1); + //println!("-> {}", self.0.len() * 8 - self.1); + self.0.len() * 8 - self.1 + } +} + +/// Useful functions to calculate the offset between slices and show a hexdump of a slice +pub trait Offset { + /// Offset between the first byte of self and the first byte of the argument + fn offset(&self, second: &Self) -> usize; +} + +impl Offset for [u8] { + fn offset(&self, second: &Self) -> usize { + let fst = self.as_ptr(); + let snd = second.as_ptr(); + + snd as usize - fst as usize + } +} + +impl<'a> Offset for &'a [u8] { + fn offset(&self, second: &Self) -> usize { + let fst = self.as_ptr(); + let snd = second.as_ptr(); + + snd as usize - fst as usize + } +} + +impl Offset for str { + fn offset(&self, second: &Self) -> usize { + let fst = self.as_ptr(); + let snd = second.as_ptr(); + + snd as usize - fst as usize + } +} + +impl<'a> Offset for &'a str { + fn offset(&self, second: &Self) -> usize { + let fst = self.as_ptr(); + let snd = second.as_ptr(); + + snd as usize - fst as usize + } +} + +/// Helper trait for types that can be viewed as a byte slice +pub trait AsBytes { + /// Casts the input type to a byte slice + fn as_bytes(&self) -> &[u8]; +} + +impl<'a> AsBytes for &'a str { + #[inline(always)] + fn as_bytes(&self) -> &[u8] { + (*self).as_bytes() + } +} + +impl AsBytes for str { + #[inline(always)] + fn as_bytes(&self) -> &[u8] { + self.as_ref() + } +} + +impl<'a> AsBytes for &'a [u8] { + #[inline(always)] + fn as_bytes(&self) -> &[u8] { + *self + } +} + +impl AsBytes for [u8] { + #[inline(always)] + fn as_bytes(&self) -> &[u8] { + self + } +} + +macro_rules! as_bytes_array_impls { + ($($N:expr)+) => { + $( + impl<'a> AsBytes for &'a [u8; $N] { + #[inline(always)] + fn as_bytes(&self) -> &[u8] { + *self + } + } + + impl AsBytes for [u8; $N] { + #[inline(always)] + fn as_bytes(&self) -> &[u8] { + self + } + } + )+ + }; +} + +as_bytes_array_impls! { + 0 1 2 3 4 5 6 7 8 9 + 10 11 12 13 14 15 16 17 18 19 + 20 21 22 23 24 25 26 27 28 29 + 30 31 32 +} + +/// Transforms common types to a char for basic token parsing +pub trait AsChar { + /// makes a char from self + fn as_char(self) -> char; + + /// Tests that self is an alphabetic character + /// + /// Warning: for `&str` it recognizes alphabetic + /// characters outside of the 52 ASCII letters + fn is_alpha(self) -> bool; + + /// Tests that self is an alphabetic character + /// or a decimal digit + fn is_alphanum(self) -> bool; + /// Tests that self is a decimal digit + fn is_dec_digit(self) -> bool; + /// Tests that self is an hex digit + fn is_hex_digit(self) -> bool; + /// Tests that self is an octal digit + fn is_oct_digit(self) -> bool; + /// Gets the len in bytes for self + fn len(self) -> usize; +} + +impl AsChar for u8 { + #[inline] + fn as_char(self) -> char { + self as char + } + #[inline] + fn is_alpha(self) -> bool { + (self >= 0x41 && self <= 0x5A) || (self >= 0x61 && self <= 0x7A) + } + #[inline] + fn is_alphanum(self) -> bool { + self.is_alpha() || self.is_dec_digit() + } + #[inline] + fn is_dec_digit(self) -> bool { + self >= 0x30 && self <= 0x39 + } + #[inline] + fn is_hex_digit(self) -> bool { + (self >= 0x30 && self <= 0x39) + || (self >= 0x41 && self <= 0x46) + || (self >= 0x61 && self <= 0x66) + } + #[inline] + fn is_oct_digit(self) -> bool { + self >= 0x30 && self <= 0x37 + } + #[inline] + fn len(self) -> usize { + 1 + } +} +impl<'a> AsChar for &'a u8 { + #[inline] + fn as_char(self) -> char { + *self as char + } + #[inline] + fn is_alpha(self) -> bool { + (*self >= 0x41 && *self <= 0x5A) || (*self >= 0x61 && *self <= 0x7A) + } + #[inline] + fn is_alphanum(self) -> bool { + self.is_alpha() || self.is_dec_digit() + } + #[inline] + fn is_dec_digit(self) -> bool { + *self >= 0x30 && *self <= 0x39 + } + #[inline] + fn is_hex_digit(self) -> bool { + (*self >= 0x30 && *self <= 0x39) + || (*self >= 0x41 && *self <= 0x46) + || (*self >= 0x61 && *self <= 0x66) + } + #[inline] + fn is_oct_digit(self) -> bool { + *self >= 0x30 && *self <= 0x37 + } + #[inline] + fn len(self) -> usize { + 1 + } +} + +impl AsChar for char { + #[inline] + fn as_char(self) -> char { + self + } + #[inline] + fn is_alpha(self) -> bool { + self.is_ascii_alphabetic() + } + #[inline] + fn is_alphanum(self) -> bool { + self.is_alpha() || self.is_dec_digit() + } + #[inline] + fn is_dec_digit(self) -> bool { + self.is_ascii_digit() + } + #[inline] + fn is_hex_digit(self) -> bool { + self.is_ascii_hexdigit() + } + #[inline] + fn is_oct_digit(self) -> bool { + self.is_digit(8) + } + #[inline] + fn len(self) -> usize { + self.len_utf8() + } +} + +impl<'a> AsChar for &'a char { + #[inline] + fn as_char(self) -> char { + *self + } + #[inline] + fn is_alpha(self) -> bool { + self.is_ascii_alphabetic() + } + #[inline] + fn is_alphanum(self) -> bool { + self.is_alpha() || self.is_dec_digit() + } + #[inline] + fn is_dec_digit(self) -> bool { + self.is_ascii_digit() + } + #[inline] + fn is_hex_digit(self) -> bool { + self.is_ascii_hexdigit() + } + #[inline] + fn is_oct_digit(self) -> bool { + self.is_digit(8) + } + #[inline] + fn len(self) -> usize { + self.len_utf8() + } +} + +/// Abstracts common iteration operations on the input type +pub trait InputIter { + /// The current input type is a sequence of that `Item` type. + /// + /// Example: `u8` for `&[u8]` or `char` for `&str` + type Item; + /// An iterator over the input type, producing the item and its position + /// for use with [Slice]. If we're iterating over `&str`, the position + /// corresponds to the byte index of the character + type Iter: Iterator; + + /// An iterator over the input type, producing the item + type IterElem: Iterator; + + /// Returns an iterator over the elements and their byte offsets + fn iter_indices(&self) -> Self::Iter; + /// Returns an iterator over the elements + fn iter_elements(&self) -> Self::IterElem; + /// Finds the byte position of the element + fn position